/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_VIRTIO_CONFIG_H #define _LINUX_VIRTIO_CONFIG_H #include #include #include #include #include #include struct irq_affinity; /** * virtio_config_ops - operations for configuring a virtio device * Note: Do not assume that a transport implements all of the operations * getting/setting a value as a simple read/write! Generally speaking, * any of @get/@set, @get_status/@set_status, or @get_features/ * @finalize_features are NOT safe to be called from an atomic * context. * @get: read the value of a configuration field * vdev: the virtio_device * offset: the offset of the configuration field * buf: the buffer to write the field value into. * len: the length of the buffer * @set: write the value of a configuration field * vdev: the virtio_device * offset: the offset of the configuration field * buf: the buffer to read the field value from. * len: the length of the buffer * @generation: config generation counter (optional) * vdev: the virtio_device * Returns the config generation counter * @get_status: read the status byte * vdev: the virtio_device * Returns the status byte * @set_status: write the status byte * vdev: the virtio_device * status: the new status byte * @reset: reset the device * vdev: the virtio device * After this, status and feature negotiation must be done again * Device must not be reset from its vq/config callbacks, or in * parallel with being added/removed. * @find_vqs: find virtqueues and instantiate them. * vdev: the virtio_device * nvqs: the number of virtqueues to find * vqs: on success, includes new virtqueues * callbacks: array of callbacks, for each virtqueue * include a NULL entry for vqs that do not need a callback * names: array of virtqueue names (mainly for debugging) * include a NULL entry for vqs unused by driver * Returns 0 on success or error status * @del_vqs: free virtqueues found by find_vqs(). * @get_features: get the array of feature bits for this device. * vdev: the virtio_device * Returns the first 64 feature bits (all we currently need). * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. * Returns 0 on success or error status * @bus_name: return the bus name associated with the device (optional) * vdev: the virtio_device * This returns a pointer to the bus name a la pci_name from which * the caller can then copy. * @set_vq_affinity: set the affinity for a virtqueue (optional). * @get_vq_affinity: get the affinity for a virtqueue (optional). */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { void (*get)(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len); void (*set)(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len); u32 (*generation)(struct virtio_device *vdev); u8 (*get_status)(struct virtio_device *vdev); void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); int (*find_vqs)(struct virtio_device *, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], const bool *ctx, struct irq_affinity *desc); void (*del_vqs)(struct virtio_device *); u64 (*get_features)(struct virtio_device *vdev); int (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); int (*set_vq_affinity)(struct virtqueue *vq, const struct cpumask *cpu_mask); const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev, int index); }; /* If driver didn't advertise the feature, it will never appear. */ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit); /** * __virtio_test_bit - helper to test feature bits. For use by transports. * Devices should normally use virtio_has_feature, * which includes more checks. * @vdev: the device * @fbit: the feature bit */ static inline bool __virtio_test_bit(const struct virtio_device *vdev, unsigned int fbit) { /* Did you forget to fix assumptions on max features? */ if (__builtin_constant_p(fbit)) BUILD_BUG_ON(fbit >= 64); else BUG_ON(fbit >= 64); return vdev->features & BIT_ULL(fbit); } /** * __virtio_set_bit - helper to set feature bits. For use by transports. * @vdev: the device * @fbit: the feature bit */ static inline void __virtio_set_bit(struct virtio_device *vdev, unsigned int fbit) { /* Did you forget to fix assumptions on max features? */ if (__builtin_constant_p(fbit)) BUILD_BUG_ON(fbit >= 64); else BUG_ON(fbit >= 64); vdev->features |= BIT_ULL(fbit); } /** * __virtio_clear_bit - helper to clear feature bits. For use by transports. * @vdev: the device * @fbit: the feature bit */ static inline void __virtio_clear_bit(struct virtio_device *vdev, unsigned int fbit) { /* Did you forget to fix assumptions on max features? */ if (__builtin_constant_p(fbit)) BUILD_BUG_ON(fbit >= 64); else BUG_ON(fbit >= 64); vdev->features &= ~BIT_ULL(fbit); } /** * virtio_has_feature - helper to determine if this device has this feature. * @vdev: the device * @fbit: the feature bit */ static inline bool virtio_has_feature(const struct virtio_device *vdev, unsigned int fbit) { if (fbit < VIRTIO_TRANSPORT_F_START) virtio_check_driver_offered_feature(vdev, fbit); return __virtio_test_bit(vdev, fbit); } /** * virtio_has_dma_quirk - determine whether this device has the DMA quirk * @vdev: the device */ static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev) { /* * Note the reverse polarity of the quirk feature (compared to most * other features), this is for compatibility with legacy systems. */ return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM); } static inline struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, vq_callback_t *c, const char *n) { vq_callback_t *callbacks[] = { c }; const char *names[] = { n }; struct virtqueue *vq; int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL, NULL); if (err < 0) return ERR_PTR(err); return vq; } static inline int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], struct irq_affinity *desc) { return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc); } static inline int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], const bool *ctx, struct irq_affinity *desc) { return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc); } /** * virtio_device_ready - enable vq use in probe function * @vdev: the device * * Driver must call this to use vqs in the probe function. * * Note: vqs are enabled automatically after probe returns. */ static inline void virtio_device_ready(struct virtio_device *dev) { unsigned status = dev->config->get_status(dev); BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK); dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK); } static inline const char *virtio_bus_name(struct virtio_device *vdev) { if (!vdev->config->bus_name) return "virtio"; return vdev->config->bus_name(vdev); } /** * virtqueue_set_affinity - setting affinity for a virtqueue * @vq: the virtqueue * @cpu: the cpu no. * * Pay attention the function are best-effort: the affinity hint may not be set * due to config support, irq type and sharing. * */ static inline int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) { struct virtio_device *vdev = vq->vdev; if (vdev->config->set_vq_affinity) return vdev->config->set_vq_affinity(vq, cpu_mask); return 0; } static inline bool virtio_is_little_endian(struct virtio_device *vdev) { return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || virtio_legacy_is_little_endian(); } /* Memory accessors */ static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val) { return __virtio16_to_cpu(virtio_is_little_endian(vdev), val); } static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val) { return __cpu_to_virtio16(virtio_is_little_endian(vdev), val); } static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val) { return __virtio32_to_cpu(virtio_is_little_endian(vdev), val); } static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val) { return __cpu_to_virtio32(virtio_is_little_endian(vdev), val); } static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val) { return __virtio64_to_cpu(virtio_is_little_endian(vdev), val); } static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) { return __cpu_to_virtio64(virtio_is_little_endian(vdev), val); } #define virtio_to_cpu(vdev, x) \ _Generic((x), \ __u8: (x), \ __virtio16: virtio16_to_cpu((vdev), (x)), \ __virtio32: virtio32_to_cpu((vdev), (x)), \ __virtio64: virtio64_to_cpu((vdev), (x)) \ ) #define cpu_to_virtio(vdev, x, m) \ _Generic((m), \ __u8: (x), \ __virtio16: cpu_to_virtio16((vdev), (x)), \ __virtio32: cpu_to_virtio32((vdev), (x)), \ __virtio64: cpu_to_virtio64((vdev), (x)) \ ) #define __virtio_native_type(structname, member) \ typeof(virtio_to_cpu(NULL, ((structname*)0)->member)) /* Config space accessors. */ #define virtio_cread(vdev, structname, member, ptr) \ do { \ typeof(((structname*)0)->member) virtio_cread_v; \ \ might_sleep(); \ /* Sanity check: must match the member's type */ \ typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \ \ switch (sizeof(virtio_cread_v)) { \ case 1: \ case 2: \ case 4: \ vdev->config->get((vdev), \ offsetof(structname, member), \ &virtio_cread_v, \ sizeof(virtio_cread_v)); \ break; \ default: \ __virtio_cread_many((vdev), \ offsetof(structname, member), \ &virtio_cread_v, \ 1, \ sizeof(virtio_cread_v)); \ break; \ } \ *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \ } while(0) /* Config space accessors. */ #define virtio_cwrite(vdev, structname, member, ptr) \ do { \ typeof(((structname*)0)->member) virtio_cwrite_v = \ cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \ \ might_sleep(); \ /* Sanity check: must match the member's type */ \ typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \ \ vdev->config->set((vdev), offsetof(structname, member), \ &virtio_cwrite_v, \ sizeof(virtio_cwrite_v)); \ } while(0) /* * Nothing virtio-specific about these, but let's worry about generalizing * these later. */ #define virtio_le_to_cpu(x) \ _Generic((x), \ __u8: (u8)(x), \ __le16: (u16)le16_to_cpu(x), \ __le32: (u32)le32_to_cpu(x), \ __le64: (u64)le64_to_cpu(x) \ ) #define virtio_cpu_to_le(x, m) \ _Generic((m), \ __u8: (x), \ __le16: cpu_to_le16(x), \ __le32: cpu_to_le32(x), \ __le64: cpu_to_le64(x) \ ) /* LE (e.g. modern) Config space accessors. */ #define virtio_cread_le(vdev, structname, member, ptr) \ do { \ typeof(((structname*)0)->member) virtio_cread_v; \ \ might_sleep(); \ /* Sanity check: must match the member's type */ \ typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \ \ switch (sizeof(virtio_cread_v)) { \ case 1: \ case 2: \ case 4: \ vdev->config->get((vdev), \ offsetof(structname, member), \ &virtio_cread_v, \ sizeof(virtio_cread_v)); \ break; \ default: \ __virtio_cread_many((vdev), \ offsetof(structname, member), \ &virtio_cread_v, \ 1, \ sizeof(virtio_cread_v)); \ break; \ } \ *(ptr) = virtio_le_to_cpu(virtio_cread_v); \ } while(0) #define virtio_cwrite_le(vdev, structname, member, ptr) \ do { \ typeof(((structname*)0)->member) virtio_cwrite_v = \ virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \ \ might_sleep(); \ /* Sanity check: must match the member's type */ \ typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \ \ vdev->config->set((vdev), offsetof(structname, member), \ &virtio_cwrite_v, \ sizeof(virtio_cwrite_v)); \ } while(0) /* Read @count fields, @bytes each. */ static inline void __virtio_cread_many(struct virtio_device *vdev, unsigned int offset, void *buf, size_t count, size_t bytes) { u32 old, gen = vdev->config->generation ? vdev->config->generation(vdev) : 0; int i; might_sleep(); do { old = gen; for (i = 0; i < count; i++) vdev->config->get(vdev, offset + bytes * i, buf + i * bytes, bytes); gen = vdev->config->generation ? vdev->config->generation(vdev) : 0; } while (gen != old); } static inline void virtio_cread_bytes(struct virtio_device *vdev, unsigned int offset, void *buf, size_t len) { __virtio_cread_many(vdev, offset, buf, len, 1); } static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset) { u8 ret; might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); return ret; } static inline void virtio_cwrite8(struct virtio_device *vdev, unsigned int offset, u8 val) { might_sleep(); vdev->config->set(vdev, offset, &val, sizeof(val)); } static inline u16 virtio_cread16(struct virtio_device *vdev, unsigned int offset) { __virtio16 ret; might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); return virtio16_to_cpu(vdev, ret); } static inline void virtio_cwrite16(struct virtio_device *vdev, unsigned int offset, u16 val) { __virtio16 v; might_sleep(); v = cpu_to_virtio16(vdev, val); vdev->config->set(vdev, offset, &v, sizeof(v)); } static inline u32 virtio_cread32(struct virtio_device *vdev, unsigned int offset) { __virtio32 ret; might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); return virtio32_to_cpu(vdev, ret); } static inline void virtio_cwrite32(struct virtio_device *vdev, unsigned int offset, u32 val) { __virtio32 v; might_sleep(); v = cpu_to_virtio32(vdev, val); vdev->config->set(vdev, offset, &v, sizeof(v)); } static inline u64 virtio_cread64(struct virtio_device *vdev, unsigned int offset) { __virtio64 ret; __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); return virtio64_to_cpu(vdev, ret); } static inline void virtio_cwrite64(struct virtio_device *vdev, unsigned int offset, u64 val) { __virtio64 v; might_sleep(); v = cpu_to_virtio64(vdev, val); vdev->config->set(vdev, offset, &v, sizeof(v)); } /* Conditional config space accessors. */ #define virtio_cread_feature(vdev, fbit, structname, member, ptr) \ ({ \ int _r = 0; \ if (!virtio_has_feature(vdev, fbit)) \ _r = -ENOENT; \ else \ virtio_cread((vdev), structname, member, ptr); \ _r; \ }) /* Conditional config space accessors. */ #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \ ({ \ int _r = 0; \ if (!virtio_has_feature(vdev, fbit)) \ _r = -ENOENT; \ else \ virtio_cread_le((vdev), structname, member, ptr); \ _r; \ }) #endif /* _LINUX_VIRTIO_CONFIG_H */ : 0.2%;'/> -rw-r--r--fs/adfs/Kconfig1
-rw-r--r--fs/adfs/adfs.h2
-rw-r--r--fs/adfs/dir_f.h5
-rw-r--r--fs/adfs/file.c4
-rw-r--r--fs/adfs/inode.c53
-rw-r--r--fs/adfs/map.c2
-rw-r--r--fs/adfs/super.c193
-rw-r--r--fs/affs/Kconfig2
-rw-r--r--fs/affs/affs.h13
-rw-r--r--fs/affs/amigaffs.c6
-rw-r--r--fs/affs/amigaffs.h9
-rw-r--r--fs/affs/dir.c44
-rw-r--r--fs/affs/file.c161
-rw-r--r--fs/affs/inode.c25
-rw-r--r--fs/affs/namei.c40
-rw-r--r--fs/affs/super.c385
-rw-r--r--fs/affs/symlink.c15
-rw-r--r--fs/afs/Kconfig1
-rw-r--r--fs/afs/Makefile7
-rw-r--r--fs/afs/addr_list.c254
-rw-r--r--fs/afs/addr_prefs.c533
-rw-r--r--fs/afs/afs.h9
-rw-r--r--fs/afs/afs_vl.h10
-rw-r--r--fs/afs/cache.c68
-rw-r--r--fs/afs/callback.c147
-rw-r--r--fs/afs/cell.c625
-rw-r--r--fs/afs/cm_security.c340
-rw-r--r--fs/afs/cmservice.c93
-rw-r--r--fs/afs/dir.c1201
-rw-r--r--fs/afs/dir_edit.c447
-rw-r--r--fs/afs/dir_search.c227
-rw-r--r--fs/afs/dir_silly.c23
-rw-r--r--fs/afs/dynroot.c477
-rw-r--r--fs/afs/file.c558
-rw-r--r--fs/afs/flock.c72
-rw-r--r--fs/afs/fs_operation.c204
-rw-r--r--fs/afs/fs_probe.c344
-rw-r--r--fs/afs/fsclient.c139
-rw-r--r--fs/afs/inode.c527
-rw-r--r--fs/afs/internal.h818
-rw-r--r--fs/afs/main.c41
-rw-r--r--fs/afs/misc.c40
-rw-r--r--fs/afs/mntpt.c39
-rw-r--r--fs/afs/proc.c137
-rw-r--r--fs/afs/protocol_yfs.h3
-rw-r--r--fs/afs/rotate.c533
-rw-r--r--fs/afs/rxrpc.c348
-rw-r--r--fs/afs/security.c54
-rw-r--r--fs/afs/server.c704
-rw-r--r--fs/afs/server_list.c180
-rw-r--r--fs/afs/super.c58
-rw-r--r--fs/afs/validation.c484
-rw-r--r--fs/afs/vl_alias.c83
-rw-r--r--fs/afs/vl_list.c48
-rw-r--r--fs/afs/vl_probe.c62
-rw-r--r--fs/afs/vl_rotate.c225
-rw-r--r--fs/afs/vlclient.c146
-rw-r--r--fs/afs/volume.c153
-rw-r--r--fs/afs/write.c901
-rw-r--r--fs/afs/xattr.c8
-rw-r--r--fs/afs/xdr_fs.h2
-rw-r--r--fs/afs/yfsclient.c330
-rw-r--r--fs/aio.c541
-rw-r--r--fs/anon_inodes.c183
-rw-r--r--fs/attr.c347
-rw-r--r--fs/autofs/Kconfig12
-rw-r--r--fs/autofs/autofs_i.h38
-rw-r--r--fs/autofs/dev-ioctl.c131
-rw-r--r--fs/autofs/expire.c16
-rw-r--r--fs/autofs/init.c10
-rw-r--r--fs/autofs/inode.c460
-rw-r--r--fs/autofs/root.c151
-rw-r--r--fs/autofs/waitq.c5
-rw-r--r--fs/backing-file.c357
-rw-r--r--fs/bad_inode.c36
-rw-r--r--fs/befs/Kconfig1
-rw-r--r--fs/befs/btree.c2
-rw-r--r--fs/befs/linuxvfs.c253
-rw-r--r--fs/bfs/Kconfig1
-rw-r--r--fs/bfs/dir.c28
-rw-r--r--fs/bfs/file.c32
-rw-r--r--fs/bfs/inode.c70
-rw-r--r--fs/binfmt_aout.c342
-rw-r--r--fs/binfmt_elf.c981
-rw-r--r--fs/binfmt_elf_fdpic.c232
-rw-r--r--fs/binfmt_flat.c240
-rw-r--r--fs/binfmt_misc.c422
-rw-r--r--fs/binfmt_script.c1
-rw-r--r--fs/bpf_fs_kfuncs.c422
-rw-r--r--fs/btrfs/Kconfig86
-rw-r--r--fs/btrfs/Makefile19
-rw-r--r--fs/btrfs/accessors.c125
-rw-r--r--fs/btrfs/accessors.h1020
-rw-r--r--fs/btrfs/acl.c72
-rw-r--r--fs/btrfs/acl.h40
-rw-r--r--fs/btrfs/async-thread.c189
-rw-r--r--fs/btrfs/async-thread.h20
-rw-r--r--fs/btrfs/backref.c1721
-rw-r--r--fs/btrfs/backref.h400
-rw-r--r--fs/btrfs/bio.c1059
-rw-r--r--fs/btrfs/bio.h126
-rw-r--r--fs/btrfs/block-group.c2049
-rw-r--r--fs/btrfs/block-group.h176
-rw-r--r--fs/btrfs/block-rsv.c228
-rw-r--r--fs/btrfs/block-rsv.h79
-rw-r--r--fs/btrfs/btrfs_inode.h578
-rw-r--r--fs/btrfs/check-integrity.c2875
-rw-r--r--fs/btrfs/check-integrity.h22
-rw-r--r--fs/btrfs/compression.c1331
-rw-r--r--fs/btrfs/compression.h170
-rw-r--r--fs/btrfs/ctree.c2658
-rw-r--r--fs/btrfs/ctree.h3572
-rw-r--r--fs/btrfs/defrag.c1519
-rw-r--r--fs/btrfs/defrag.h31
-rw-r--r--fs/btrfs/delalloc-space.c199
-rw-r--r--fs/btrfs/delalloc-space.h16
-rw-r--r--fs/btrfs/delayed-inode.c1401
-rw-r--r--fs/btrfs/delayed-inode.h187
-rw-r--r--fs/btrfs/delayed-ref.c1268
-rw-r--r--fs/btrfs/delayed-ref.h384
-rw-r--r--fs/btrfs/dev-replace.c343
-rw-r--r--fs/btrfs/dev-replace.h18
-rw-r--r--fs/btrfs/dir-item.c156
-rw-r--r--fs/btrfs/dir-item.h57
-rw-r--r--fs/btrfs/direct-io.c1106
-rw-r--r--fs/btrfs/direct-io.h16
-rw-r--r--fs/btrfs/discard.c221
-rw-r--r--fs/btrfs/discard.h2
-rw-r--r--fs/btrfs/disk-io.c3284
-rw-r--r--fs/btrfs/disk-io.h106
-rw-r--r--fs/btrfs/export.c102
-rw-r--r--fs/btrfs/export.h7
-rw-r--r--fs/btrfs/extent-io-tree.c1959
-rw-r--r--fs/btrfs/extent-io-tree.h327
-rw-r--r--fs/btrfs/extent-tree.c3642
-rw-r--r--fs/btrfs/extent-tree.h167
-rw-r--r--fs/btrfs/extent_io.c8288
-rw-r--r--fs/btrfs/extent_io.h334
-rw-r--r--fs/btrfs/extent_map.c1292
-rw-r--r--fs/btrfs/extent_map.h197
-rw-r--r--fs/btrfs/fiemap.c929
-rw-r--r--fs/btrfs/fiemap.h11
-rw-r--r--fs/btrfs/file-item.c894
-rw-r--r--fs/btrfs/file-item.h87
-rw-r--r--fs/btrfs/file.c3438
-rw-r--r--fs/btrfs/file.h51
-rw-r--r--fs/btrfs/free-space-cache.c834
-rw-r--r--fs/btrfs/free-space-cache.h43
-rw-r--r--fs/btrfs/free-space-tree.c803
-rw-r--r--fs/btrfs/free-space-tree.h61
-rw-r--r--fs/btrfs/fs.c275
-rw-r--r--fs/btrfs/fs.h1186
-rw-r--r--fs/btrfs/inode-item.c491
-rw-r--r--fs/btrfs/inode-item.h116
-rw-r--r--fs/btrfs/inode.c10588
-rw-r--r--fs/btrfs/ioctl.c3057
-rw-r--r--fs/btrfs/ioctl.h29
-rw-r--r--fs/btrfs/locking.c211
-rw-r--r--fs/btrfs/locking.h139
-rw-r--r--fs/btrfs/lru_cache.c166
-rw-r--r--fs/btrfs/lru_cache.h71
-rw-r--r--fs/btrfs/lzo.c264
-rw-r--r--fs/btrfs/messages.c315
-rw-r--r--fs/btrfs/messages.h220
-rw-r--r--fs/btrfs/misc.h170
-rw-r--r--fs/btrfs/ordered-data.c933
-rw-r--r--fs/btrfs/ordered-data.h136
-rw-r--r--fs/btrfs/orphan.c26
-rw-r--r--fs/btrfs/orphan.h16
-rw-r--r--fs/btrfs/print-tree.c359
-rw-r--r--fs/btrfs/print-tree.h9
-rw-r--r--fs/btrfs/props.c187
-rw-r--r--fs/btrfs/props.h25
-rw-r--r--fs/btrfs/qgroup.c2316
-rw-r--r--fs/btrfs/qgroup.h228
-rw-r--r--fs/btrfs/raid-stripe-tree.c470
-rw-r--r--fs/btrfs/raid-stripe-tree.h60
-rw-r--r--fs/btrfs/raid56.c3374
-rw-r--r--fs/btrfs/raid56.h280
-rw-r--r--fs/btrfs/rcu-string.h48
-rw-r--r--fs/btrfs/reada.c1086
-rw-r--r--fs/btrfs/ref-verify.c198
-rw-r--r--fs/btrfs/ref-verify.h17
-rw-r--r--fs/btrfs/reflink.c347
-rw-r--r--fs/btrfs/reflink.h4
-rw-r--r--fs/btrfs/relocation.c1849
-rw-r--r--fs/btrfs/relocation.h35
-rw-r--r--fs/btrfs/root-tree.c156
-rw-r--r--fs/btrfs/root-tree.h42
-rw-r--r--fs/btrfs/scrub.c5111
-rw-r--r--fs/btrfs/scrub.h22
-rw-r--r--fs/btrfs/send.c4275
-rw-r--r--fs/btrfs/send.h191
-rw-r--r--fs/btrfs/space-info.c1291
-rw-r--r--fs/btrfs/space-info.h228
-rw-r--r--fs/btrfs/struct-funcs.c171
-rw-r--r--fs/btrfs/subpage.c901
-rw-r--r--fs/btrfs/subpage.h232
-rw-r--r--fs/btrfs/super.c3204
-rw-r--r--fs/btrfs/super.h38
-rw-r--r--fs/btrfs/sysfs.c1062
-rw-r--r--fs/btrfs/sysfs.h22
-rw-r--r--fs/btrfs/tests/btrfs-tests.c72
-rw-r--r--fs/btrfs/tests/btrfs-tests.h9
-rw-r--r--fs/btrfs/tests/delayed-refs-tests.c1016
-rw-r--r--fs/btrfs/tests/extent-buffer-tests.c27
-rw-r--r--fs/btrfs/tests/extent-io-tests.c511
-rw-r--r--fs/btrfs/tests/extent-map-tests.c859
-rw-r--r--fs/btrfs/tests/free-space-tests.c202
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c101
-rw-r--r--fs/btrfs/tests/inode-tests.c417
-rw-r--r--fs/btrfs/tests/qgroup-tests.c109
-rw-r--r--fs/btrfs/tests/raid-stripe-tree-tests.c1161
-rw-r--r--fs/btrfs/transaction.c1171
-rw-r--r--fs/btrfs/transaction.h111
-rw-r--r--fs/btrfs/tree-checker.c792
-rw-r--r--fs/btrfs/tree-checker.h75
-rw-r--r--fs/btrfs/tree-defrag.c140
-rw-r--r--fs/btrfs/tree-log.c6401
-rw-r--r--fs/btrfs/tree-log.h81
-rw-r--r--fs/btrfs/tree-mod-log.c398
-rw-r--r--fs/btrfs/tree-mod-log.h19
-rw-r--r--fs/btrfs/ulist.c122
-rw-r--r--fs/btrfs/ulist.h5
-rw-r--r--fs/btrfs/uuid-tree.c333
-rw-r--r--fs/btrfs/uuid-tree.h19
-rw-r--r--fs/btrfs/verity.c163
-rw-r--r--fs/btrfs/verity.h35
-rw-r--r--fs/btrfs/volumes.c4409
-rw-r--r--fs/btrfs/volumes.h544
-rw-r--r--fs/btrfs/xattr.c215
-rw-r--r--fs/btrfs/xattr.h12
-rw-r--r--fs/btrfs/zlib.c386
-rw-r--r--fs/btrfs/zoned.c2301
-rw-r--r--fs/btrfs/zoned.h195
-rw-r--r--fs/btrfs/zstd.c517
-rw-r--r--fs/buffer.c2150
-rw-r--r--fs/cachefiles/Kconfig21
-rw-r--r--fs/cachefiles/Makefile7
-rw-r--r--fs/cachefiles/bind.c278
-rw-r--r--fs/cachefiles/cache.c428
-rw-r--r--fs/cachefiles/daemon.c317
-rw-r--r--fs/cachefiles/error_inject.c36
-rw-r--r--fs/cachefiles/interface.c765
-rw-r--r--fs/cachefiles/internal.h402
-rw-r--r--fs/cachefiles/io.c614
-rw-r--r--fs/cachefiles/key.c202
-rw-r--r--fs/cachefiles/main.c22
-rw-r--r--fs/cachefiles/namei.c1250
-rw-r--r--fs/cachefiles/ondemand.c762
-rw-r--r--fs/cachefiles/rdwr.c972
-rw-r--r--fs/cachefiles/security.c8
-rw-r--r--fs/cachefiles/volume.c141
-rw-r--r--fs/cachefiles/xattr.c456
-rw-r--r--fs/ceph/Kconfig3
-rw-r--r--fs/ceph/Makefile1
-rw-r--r--fs/ceph/acl.c17
-rw-r--r--fs/ceph/addr.c2263
-rw-r--r--fs/ceph/cache.c239
-rw-r--r--fs/ceph/cache.h88
-rw-r--r--fs/ceph/caps.c1608
-rw-r--r--fs/ceph/crypto.c604
-rw-r--r--fs/ceph/crypto.h272
-rw-r--r--fs/ceph/debugfs.c208
-rw-r--r--fs/ceph/dir.c680
-rw-r--r--fs/ceph/export.c139
-rw-r--r--fs/ceph/file.c1491
-rw-r--r--fs/ceph/inode.c1540
-rw-r--r--fs/ceph/io.c100
-rw-r--r--fs/ceph/io.h8
-rw-r--r--fs/ceph/ioctl.c157
-rw-r--r--fs/ceph/locks.c165
-rw-r--r--fs/ceph/mds_client.c2390
-rw-r--r--fs/ceph/mds_client.h156
-rw-r--r--fs/ceph/mdsmap.c70
-rw-r--r--fs/ceph/mdsmap.h79
-rw-r--r--fs/ceph/metric.c206
-rw-r--r--fs/ceph/metric.h151
-rw-r--r--fs/ceph/quota.c102
-rw-r--r--fs/ceph/snap.c410
-rw-r--r--fs/ceph/strings.c1
-rw-r--r--fs/ceph/super.c545
-rw-r--r--fs/ceph/super.h302
-rw-r--r--fs/ceph/xattr.c197
-rw-r--r--fs/char_dev.c23
-rw-r--r--fs/cifs/cache.c105
-rw-r--r--fs/cifs/cifs_dfs_ref.c429
-rw-r--r--fs/cifs/cifs_unicode.h404
-rw-r--r--fs/cifs/cifs_uniupr.h239
-rw-r--r--fs/cifs/cifsencrypt.c763
-rw-r--r--fs/cifs/dns_resolve.c89
-rw-r--r--fs/cifs/dns_resolve.h18
-rw-r--r--fs/cifs/file.c5004
-rw-r--r--fs/cifs/fscache.c361
-rw-r--r--fs/cifs/fscache.h183
-rw-r--r--fs/cifs/smb2inode.c738
-rw-r--r--fs/cifs/smb2pdu.h1007
-rw-r--r--fs/cifs/smbdirect.c2524
-rw-r--r--fs/cifs/smbdirect.h316
-rw-r--r--fs/cifs/trace.h1016
-rw-r--r--fs/coda/cache.c8
-rw-r--r--fs/coda/cnode.c4
-rw-r--r--fs/coda/coda_linux.c9
-rw-r--r--fs/coda/coda_linux.h6
-rw-r--r--fs/coda/dir.c49
-rw-r--r--fs/coda/file.c40
-rw-r--r--fs/coda/inode.c185
-rw-r--r--fs/coda/pioctl.c4
-rw-r--r--fs/coda/psdev.c2
-rw-r--r--fs/coda/symlink.c18
-rw-r--r--fs/coda/sysctl.c14
-rw-r--r--fs/coda/upcall.c2
-rw-r--r--fs/compat_binfmt_elf.c12
-rw-r--r--fs/configfs/Kconfig1
-rw-r--r--fs/configfs/configfs_internal.h8
-rw-r--r--fs/configfs/dir.c109
-rw-r--r--fs/configfs/file.c2
-rw-r--r--fs/configfs/inode.c43
-rw-r--r--fs/configfs/item.c2
-rw-r--r--fs/configfs/mount.c7
-rw-r--r--fs/configfs/symlink.c37
-rw-r--r--fs/coredump.c1400
-rw-r--r--fs/cramfs/Kconfig2
-rw-r--r--fs/cramfs/README8
-rw-r--r--fs/cramfs/inode.c90
-rw-r--r--fs/crypto/Kconfig22
-rw-r--r--fs/crypto/bio.c91
-rw-r--r--fs/crypto/crypto.c252
-rw-r--r--fs/crypto/fname.c247
-rw-r--r--fs/crypto/fscrypt_private.h376
-rw-r--r--fs/crypto/hkdf.c142
-rw-r--r--fs/crypto/hooks.c101
-rw-r--r--fs/crypto/inline_crypt.c338
-rw-r--r--fs/crypto/keyring.c735
-rw-r--r--fs/crypto/keysetup.c405
-rw-r--r--fs/crypto/keysetup_v1.c85
-rw-r--r--fs/crypto/policy.c283
-rw-r--r--fs/d_path.c14
-rw-r--r--fs/dax.c1283
-rw-r--r--fs/dcache.c1448
-rw-r--r--fs/debugfs/file.c487
-rw-r--r--fs/debugfs/inode.c645
-rw-r--r--fs/debugfs/internal.h58
-rw-r--r--fs/devpts/inode.c351
-rw-r--r--fs/direct-io.c216
-rw-r--r--fs/dlm/Kconfig2
-rw-r--r--fs/dlm/Makefile1
-rw-r--r--fs/dlm/ast.c383
-rw-r--r--fs/dlm/ast.h11
-rw-r--r--fs/dlm/config.c269
-rw-r--r--fs/dlm/config.h34
-rw-r--r--fs/dlm/debug_fs.c351
-rw-r--r--fs/dlm/dir.c176
-rw-r--r--fs/dlm/dir.h9
-rw-r--r--fs/dlm/dlm_internal.h370
-rw-r--r--fs/dlm/lock.c2967
-rw-r--r--fs/dlm/lock.h47
-rw-r--r--fs/dlm/lockspace.c516
-rw-r--r--fs/dlm/lockspace.h13
-rw-r--r--fs/dlm/lowcomms.c1792
-rw-r--r--fs/dlm/lowcomms.h15
-rw-r--r--fs/dlm/main.c31
-rw-r--r--fs/dlm/member.c147
-rw-r--r--fs/dlm/member.h2
-rw-r--r--fs/dlm/memory.c128
-rw-r--r--fs/dlm/memory.h12
-rw-r--r--fs/dlm/midcomms.c854
-rw-r--r--fs/dlm/midcomms.h15
-rw-r--r--fs/dlm/netlink.c138
-rw-r--r--fs/dlm/plock.c415
-rw-r--r--fs/dlm/rcom.c261
-rw-r--r--fs/dlm/rcom.h15
-rw-r--r--fs/dlm/recover.c342
-rw-r--r--fs/dlm/recover.h22
-rw-r--r--fs/dlm/recoverd.c198
-rw-r--r--fs/dlm/requestqueue.c70
-rw-r--r--fs/dlm/requestqueue.h3
-rw-r--r--fs/dlm/user.c187
-rw-r--r--fs/dlm/user.h3
-rw-r--r--fs/dlm/util.c92
-rw-r--r--fs/dlm/util.h8
-rw-r--r--fs/drop_caches.c27
-rw-r--r--fs/ecryptfs/Kconfig2
-rw-r--r--fs/ecryptfs/crypto.c179
-rw-r--r--fs/ecryptfs/dentry.c32
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h51
-rw-r--r--fs/ecryptfs/file.c76
-rw-r--r--fs/ecryptfs/inode.c306
-rw-r--r--fs/ecryptfs/keystore.c69
-rw-r--r--fs/ecryptfs/main.c453
-rw-r--r--fs/ecryptfs/mmap.c243
-rw-r--r--fs/ecryptfs/read_write.c56
-rw-r--r--fs/ecryptfs/super.c8
-rw-r--r--fs/efivarfs/Makefile2
-rw-r--r--fs/efivarfs/file.c62
-rw-r--r--fs/efivarfs/inode.c76
-rw-r--r--fs/efivarfs/internal.h55
-rw-r--r--fs/efivarfs/super.c408
-rw-r--r--fs/efivarfs/vars.c632
-rw-r--r--fs/efs/Kconfig1
-rw-r--r--fs/efs/efs.h5
-rw-r--r--fs/efs/inode.c18
-rw-r--r--fs/efs/super.c86
-rw-r--r--fs/efs/symlink.c17
-rw-r--r--fs/erofs/Kconfig128
-rw-r--r--fs/erofs/Makefile9
-rw-r--r--fs/erofs/compress.h105
-rw-r--r--fs/erofs/data.c482
-rw-r--r--fs/erofs/decompressor.c530
-rw-r--r--fs/erofs/decompressor_crypto.c182
-rw-r--r--fs/erofs/decompressor_deflate.c204
-rw-r--r--fs/erofs/decompressor_lzma.c171
-rw-r--r--fs/erofs/decompressor_zstd.c220
-rw-r--r--fs/erofs/dir.c126
-rw-r--r--fs/erofs/erofs_fs.h429
-rw-r--r--fs/erofs/fileio.c191
-rw-r--r--fs/erofs/fscache.c677
-rw-r--r--fs/erofs/inode.c475
-rw-r--r--fs/erofs/internal.h577
-rw-r--r--fs/erofs/namei.c125
-rw-r--r--fs/erofs/pcpubuf.c148
-rw-r--r--fs/erofs/super.c985
-rw-r--r--fs/erofs/sysfs.c305
-rw-r--r--fs/erofs/tagptr.h107
-rw-r--r--fs/erofs/utils.c288
-rw-r--r--fs/erofs/xattr.c821
-rw-r--r--fs/erofs/xattr.h57
-rw-r--r--fs/erofs/zdata.c2496
-rw-r--r--fs/erofs/zdata.h182
-rw-r--r--fs/erofs/zmap.c973
-rw-r--r--fs/erofs/zpvec.h152
-rw-r--r--fs/erofs/zutil.c317
-rw-r--r--fs/eventfd.c141
-rw-r--r--fs/eventpoll.c903
-rw-r--r--fs/exec.c792
-rw-r--r--fs/exfat/Kconfig2
-rw-r--r--fs/exfat/balloc.c238
-rw-r--r--fs/exfat/cache.c4
-rw-r--r--fs/exfat/dir.c828
-rw-r--r--fs/exfat/exfat_fs.h186
-rw-r--r--fs/exfat/exfat_raw.h44
-rw-r--r--fs/exfat/fatent.c161
-rw-r--r--fs/exfat/file.c581
-rw-r--r--fs/exfat/inode.c378
-rw-r--r--fs/exfat/misc.c38
-rw-r--r--fs/exfat/namei.c820
-rw-r--r--fs/exfat/nls.c33
-rw-r--r--fs/exfat/super.c325
-rw-r--r--fs/exportfs/expfs.c132
-rw-r--r--fs/ext2/Kconfig16
-rw-r--r--fs/ext2/Makefile5
-rw-r--r--fs/ext2/acl.c7
-rw-r--r--fs/ext2/acl.h2
-rw-r--r--fs/ext2/balloc.c170
-rw-r--r--fs/ext2/dir.c354
-rw-r--r--fs/ext2/ext2.h71
-rw-r--r--fs/ext2/file.c151
-rw-r--r--fs/ext2/ialloc.c15
-rw-r--r--fs/ext2/inode.c235
-rw-r--r--fs/ext2/ioctl.c14
-rw-r--r--fs/ext2/namei.c122
-rw-r--r--fs/ext2/super.c699
-rw-r--r--fs/ext2/trace.c6
-rw-r--r--fs/ext2/trace.h94
-rw-r--r--fs/ext2/xattr.c211
-rw-r--r--fs/ext2/xattr.h2
-rw-r--r--fs/ext2/xattr_security.c2
-rw-r--r--fs/ext2/xattr_trusted.c2
-rw-r--r--fs/ext2/xattr_user.c2
-rw-r--r--fs/ext4/Kconfig31
-rw-r--r--fs/ext4/Makefile1
-rw-r--r--fs/ext4/acl.c17
-rw-r--r--fs/ext4/acl.h2
-rw-r--r--fs/ext4/balloc.c198
-rw-r--r--fs/ext4/bitmap.c37
-rw-r--r--fs/ext4/block_validity.c41
-rw-r--r--fs/ext4/crypto.c241
-rw-r--r--fs/ext4/dir.c95
-rw-r--r--fs/ext4/ext4.h995
-rw-r--r--fs/ext4/ext4_extents.h7
-rw-r--r--fs/ext4/ext4_jbd2.c53
-rw-r--r--fs/ext4/ext4_jbd2.h129
-rw-r--r--fs/ext4/extents.c1945
-rw-r--r--fs/ext4/extents_status.c737
-rw-r--r--fs/ext4/extents_status.h44
-rw-r--r--fs/ext4/fast_commit.c1353
-rw-r--r--fs/ext4/fast_commit.h42
-rw-r--r--fs/ext4/file.c450
-rw-r--r--fs/ext4/fsmap.c90
-rw-r--r--fs/ext4/fsync.c62
-rw-r--r--fs/ext4/hash.c14
-rw-r--r--fs/ext4/ialloc.c118
-rw-r--r--fs/ext4/indirect.c26
-rw-r--r--fs/ext4/inline.c656
-rw-r--r--fs/ext4/inode-test.c7
-rw-r--r--fs/ext4/inode.c3560
-rw-r--r--fs/ext4/ioctl.c966
-rw-r--r--fs/ext4/mballoc-test.c999
-rw-r--r--fs/ext4/mballoc.c4062
-rw-r--r--fs/ext4/mballoc.h67
-rw-r--r--fs/ext4/migrate.c53
-rw-r--r--fs/ext4/mmp.c72
-rw-r--r--fs/ext4/move_extent.c917
-rw-r--r--fs/ext4/namei.c986
-rw-r--r--fs/ext4/orphan.c77
-rw-r--r--fs/ext4/page-io.c240
-rw-r--r--fs/ext4/readpage.c158
-rw-r--r--fs/ext4/resize.c272
-rw-r--r--fs/ext4/super.c4693
-rw-r--r--fs/ext4/symlink.c72
-rw-r--r--fs/ext4/sysfs.c225
-rw-r--r--fs/ext4/verity.c66
-rw-r--r--fs/ext4/xattr.c810
-rw-r--r--fs/ext4/xattr.h35
-rw-r--r--fs/ext4/xattr_hurd.c2
-rw-r--r--fs/ext4/xattr_security.c2
-rw-r--r--fs/ext4/xattr_trusted.c2
-rw-r--r--fs/ext4/xattr_user.c2
-rw-r--r--fs/f2fs/Kconfig12
-rw-r--r--fs/f2fs/acl.c55
-rw-r--r--fs/f2fs/acl.h10
-rw-r--r--fs/f2fs/checkpoint.c721
-rw-r--r--fs/f2fs/compress.c1171
-rw-r--r--fs/f2fs/data.c3009
-rw-r--r--fs/f2fs/debug.c444
-rw-r--r--fs/f2fs/dir.c532
-rw-r--r--fs/f2fs/extent_cache.c1067
-rw-r--r--fs/f2fs/f2fs.h2455
-rw-r--r--fs/f2fs/file.c2501
-rw-r--r--fs/f2fs/gc.c1230
-rw-r--r--fs/f2fs/gc.h77
-rw-r--r--fs/f2fs/hash.c13
-rw-r--r--fs/f2fs/inline.c437
-rw-r--r--fs/f2fs/inode.c685
-rw-r--r--fs/f2fs/iostat.c222
-rw-r--r--fs/f2fs/iostat.h23
-rw-r--r--fs/f2fs/namei.c832
-rw-r--r--fs/f2fs/node.c1512
-rw-r--r--fs/f2fs/node.h123
-rw-r--r--fs/f2fs/recovery.c477
-rw-r--r--fs/f2fs/segment.c2862
-rw-r--r--fs/f2fs/segment.h422
-rw-r--r--fs/f2fs/shrinker.c124
-rw-r--r--fs/f2fs/super.c3800
-rw-r--r--fs/f2fs/sysfs.c1166
-rw-r--r--fs/f2fs/verity.c63
-rw-r--r--fs/f2fs/xattr.c287
-rw-r--r--fs/f2fs/xattr.h33
-rw-r--r--fs/fat/Kconfig2
-rw-r--r--fs/fat/cache.c2
-rw-r--r--fs/fat/dir.c29
-rw-r--r--fs/fat/fat.h39
-rw-r--r--fs/fat/fat_test.c1
-rw-r--r--fs/fat/fatent.c9
-rw-r--r--fs/fat/file.c58
-rw-r--r--fs/fat/inode.c779
-rw-r--r--fs/fat/misc.c92
-rw-r--r--fs/fat/namei_msdos.c52
-rw-r--r--fs/fat/namei_vfat.c312
-rw-r--r--fs/fat/nfs.c11
-rw-r--r--fs/fcntl.c426
-rw-r--r--fs/fhandle.c382
-rw-r--r--fs/file.c968
-rw-r--r--fs/file_attr.c490
-rw-r--r--fs/file_table.c424
-rw-r--r--fs/filesystems.c14
-rw-r--r--fs/freevxfs/Kconfig3
-rw-r--r--fs/freevxfs/vxfs.h27
-rw-r--r--fs/freevxfs/vxfs_bmap.c34
-rw-r--r--fs/freevxfs/vxfs_dir.h29
-rw-r--r--fs/freevxfs/vxfs_extern.h27
-rw-r--r--fs/freevxfs/vxfs_fshead.c26
-rw-r--r--fs/freevxfs/vxfs_fshead.h27
-rw-r--r--fs/freevxfs/vxfs_immed.c78
-rw-r--r--fs/freevxfs/vxfs_inode.c37
-rw-r--r--fs/freevxfs/vxfs_inode.h27
-rw-r--r--fs/freevxfs/vxfs_lookup.c29
-rw-r--r--fs/freevxfs/vxfs_olt.c26
-rw-r--r--fs/freevxfs/vxfs_olt.h27
-rw-r--r--fs/freevxfs/vxfs_subr.c55
-rw-r--r--fs/freevxfs/vxfs_super.c103
-rw-r--r--fs/fs-writeback.c803
-rw-r--r--fs/fs_context.c98
-rw-r--r--fs/fs_dirent.c (renamed from fs/fs_types.c)2
-rw-r--r--fs/fs_parser.c151
-rw-r--r--fs/fs_struct.c46
-rw-r--r--fs/fscache/Kconfig40
-rw-r--r--fs/fscache/Makefile20
-rw-r--r--fs/fscache/cache.c416
-rw-r--r--fs/fscache/cookie.c1071
-rw-r--r--fs/fscache/fsdef.c98
-rw-r--r--fs/fscache/internal.h461
-rw-r--r--fs/fscache/io.c116
-rw-r--r--fs/fscache/main.c230
-rw-r--r--fs/fscache/netfs.c74
-rw-r--r--fs/fscache/object.c1125
-rw-r--r--fs/fscache/operation.c633
-rw-r--r--fs/fscache/page.c1242
-rw-r--r--fs/fscache/proc.c71
-rw-r--r--fs/fscache/stats.c283
-rw-r--r--fs/fsopen.c206
-rw-r--r--fs/fuse/Kconfig28
-rw-r--r--fs/fuse/Makefile10
-rw-r--r--fs/fuse/acl.c71
-rw-r--r--fs/fuse/backing.c179
-rw-r--r--fs/fuse/control.c89
-rw-r--r--fs/fuse/cuse.c48
-rw-r--r--fs/fuse/dax.c123
-rw-r--r--fs/fuse/dev.c1159
-rw-r--r--fs/fuse/dev_uring.c1373
-rw-r--r--fs/fuse/dev_uring_i.h211
-rw-r--r--fs/fuse/dir.c1072
-rw-r--r--fs/fuse/file.c1866
-rw-r--r--fs/fuse/fuse_dev_i.h79
-rw-r--r--fs/fuse/fuse_i.h516
-rw-r--r--fs/fuse/fuse_trace.h132
-rw-r--r--fs/fuse/inode.c660
-rw-r--r--fs/fuse/ioctl.c148
-rw-r--r--fs/fuse/iomode.c275
-rw-r--r--fs/fuse/passthrough.c197
-rw-r--r--fs/fuse/readdir.c93
-rw-r--r--fs/fuse/sysctl.c64
-rw-r--r--fs/fuse/trace.c13
-rw-r--r--fs/fuse/virtio_fs.c626
-rw-r--r--fs/fuse/xattr.c66
-rw-r--r--fs/gfs2/Kconfig2
-rw-r--r--fs/gfs2/acl.c7
-rw-r--r--fs/gfs2/acl.h8
-rw-r--r--fs/gfs2/aops.c463
-rw-r--r--fs/gfs2/aops.h5
-rw-r--r--fs/gfs2/bmap.c211
-rw-r--r--fs/gfs2/bmap.h40
-rw-r--r--fs/gfs2/dentry.c31
-rw-r--r--fs/gfs2/dir.c61
-rw-r--r--fs/gfs2/dir.h38
-rw-r--r--fs/gfs2/export.c8
-rw-r--r--fs/gfs2/file.c398
-rw-r--r--fs/gfs2/glock.c1682
-rw-r--r--fs/gfs2/glock.h223
-rw-r--r--fs/gfs2/glops.c362
-rw-r--r--fs/gfs2/glops.h4
-rw-r--r--fs/gfs2/incore.h85
-rw-r--r--fs/gfs2/inode.c517
-rw-r--r--fs/gfs2/inode.h62
-rw-r--r--fs/gfs2/lock_dlm.c253
-rw-r--r--fs/gfs2/log.c158
-rw-r--r--fs/gfs2/log.h57
-rw-r--r--fs/gfs2/lops.c173
-rw-r--r--fs/gfs2/lops.h22
-rw-r--r--fs/gfs2/main.c56
-rw-r--r--fs/gfs2/meta_io.c167
-rw-r--r--fs/gfs2/meta_io.h30
-rw-r--r--fs/gfs2/ops_fstype.c340
-rw-r--r--fs/gfs2/quota.c862
-rw-r--r--fs/gfs2/quota.h44
-rw-r--r--fs/gfs2/recovery.c88
-rw-r--r--fs/gfs2/recovery.h20
-rw-r--r--fs/gfs2/rgrp.c82
-rw-r--r--fs/gfs2/rgrp.h84
-rw-r--r--fs/gfs2/super.c600
-rw-r--r--fs/gfs2/super.h51
-rw-r--r--fs/gfs2/sys.c94
-rw-r--r--fs/gfs2/trace_gfs2.h16
-rw-r--r--fs/gfs2/trans.c62
-rw-r--r--fs/gfs2/trans.h26
-rw-r--r--fs/gfs2/util.c435
-rw-r--r--fs/gfs2/util.h98
-rw-r--r--fs/gfs2/xattr.c85
-rw-r--r--fs/gfs2/xattr.h12
-rw-r--r--fs/hfs/.kunitconfig7
-rw-r--r--fs/hfs/Kconfig17
-rw-r--r--fs/hfs/Makefile2
-rw-r--r--fs/hfs/attr.c4
-rw-r--r--fs/hfs/bfind.c17
-rw-r--r--fs/hfs/bitmap.c4
-rw-r--r--fs/hfs/bnode.c196
-rw-r--r--fs/hfs/brec.c37
-rw-r--r--fs/hfs/btree.c90
-rw-r--r--fs/hfs/btree.h113
-rw-r--r--fs/hfs/catalog.c137
-rw-r--r--fs/hfs/dir.c16
-rw-r--r--fs/hfs/extent.c33
-rw-r--r--fs/hfs/hfs.h269
-rw-r--r--fs/hfs/hfs_fs.h129
-rw-r--r--fs/hfs/inode.c129
-rw-r--r--fs/hfs/mdb.c22
-rw-r--r--fs/hfs/string.c5
-rw-r--r--fs/hfs/string_test.c133
-rw-r--r--fs/hfs/super.c355
-rw-r--r--fs/hfs/sysdep.c13
-rw-r--r--fs/hfs/trans.c2
-rw-r--r--fs/hfsplus/.kunitconfig8
-rw-r--r--fs/hfsplus/Kconfig17
-rw-r--r--fs/hfsplus/Makefile3
-rw-r--r--fs/hfsplus/attributes.c8
-rw-r--r--fs/hfsplus/bfind.c29
-rw-r--r--fs/hfsplus/bitmap.c30
-rw-r--r--fs/hfsplus/bnode.c250
-rw-r--r--fs/hfsplus/brec.c12
-rw-r--r--fs/hfsplus/btree.c39
-rw-r--r--fs/hfsplus/catalog.c14
-rw-r--r--fs/hfsplus/dir.c31
-rw-r--r--fs/hfsplus/extents.c57
-rw-r--r--fs/hfsplus/hfsplus_fs.h157
-rw-r--r--fs/hfsplus/hfsplus_raw.h390
-rw-r--r--fs/hfsplus/inode.c187
-rw-r--r--fs/hfsplus/ioctl.c4
-rw-r--r--fs/hfsplus/options.c260
-rw-r--r--fs/hfsplus/part_tbl.c5
-rw-r--r--fs/hfsplus/super.c234
-rw-r--r--fs/hfsplus/unicode.c63
-rw-r--r--fs/hfsplus/unicode_test.c1579
-rw-r--r--fs/hfsplus/wrapper.c64
-rw-r--r--fs/hfsplus/xattr.c88
-rw-r--r--fs/hfsplus/xattr.h2
-rw-r--r--fs/hfsplus/xattr_security.c2
-rw-r--r--fs/hfsplus/xattr_trusted.c2
-rw-r--r--fs/hfsplus/xattr_user.c2
-rw-r--r--fs/hostfs/Makefile8
-rw-r--r--fs/hostfs/hostfs.h42
-rw-r--r--fs/hostfs/hostfs_kern.c541
-rw-r--r--fs/hostfs/hostfs_user.c57
-rw-r--r--fs/hostfs/hostfs_user_exp.c28
-rw-r--r--fs/hpfs/Kconfig1
-rw-r--r--fs/hpfs/anode.c43
-rw-r--r--fs/hpfs/dir.c20
-rw-r--r--fs/hpfs/ea.c2
-rw-r--r--fs/hpfs/file.c45
-rw-r--r--fs/hpfs/hpfs.h44
-rw-r--r--fs/hpfs/hpfs_fn.h4
-rw-r--r--fs/hpfs/inode.c28
-rw-r--r--fs/hpfs/map.c8
-rw-r--r--fs/hpfs/namei.c85
-rw-r--r--fs/hpfs/super.c433
-rw-r--r--fs/hugetlbfs/inode.c967
-rw-r--r--fs/init.c48
-rw-r--r--fs/inode.c1580
-rw-r--r--fs/internal.h238
-rw-r--r--fs/io-wq.c1362
-rw-r--r--fs/io-wq.h205
-rw-r--r--fs/io_uring.c11127
-rw-r--r--fs/ioctl.c411
-rw-r--r--fs/iomap/Makefile12
-rw-r--r--fs/iomap/bio.c88
-rw-r--r--fs/iomap/buffered-io.c2456
-rw-r--r--fs/iomap/direct-io.c587
-rw-r--r--fs/iomap/fiemap.c25
-rw-r--r--fs/iomap/internal.h21
-rw-r--r--fs/iomap/ioend.c432
-rw-r--r--fs/iomap/iter.c82
-rw-r--r--fs/iomap/seek.c20
-rw-r--r--fs/iomap/swapfile.c11
-rw-r--r--fs/iomap/trace.h157
-rw-r--r--fs/isofs/Kconfig1
-rw-r--r--fs/isofs/Makefile7
-rw-r--r--fs/isofs/compress.c39
-rw-r--r--fs/isofs/dir.c3
-rw-r--r--fs/isofs/export.c2
-rw-r--r--fs/isofs/inode.c535
-rw-r--r--fs/isofs/isofs.h6
-rw-r--r--fs/isofs/rock.c72
-rw-r--r--fs/isofs/rock.h8
-rw-r--r--fs/isofs/util.c49
-rw-r--r--fs/jbd2/Kconfig2
-rw-r--r--fs/jbd2/checkpoint.c348
-rw-r--r--fs/jbd2/commit.c181
-rw-r--r--fs/jbd2/journal.c1013
-rw-r--r--fs/jbd2/recovery.c487
-rw-r--r--fs/jbd2/revoke.c46
-rw-r--r--fs/jbd2/transaction.c409
-rw-r--r--fs/jffs2/Kconfig3
-rw-r--r--fs/jffs2/acl.c5
-rw-r--r--fs/jffs2/acl.h2
-rw-r--r--fs/jffs2/background.c6
-rw-r--r--fs/jffs2/build.c9
-rw-r--r--fs/jffs2/compr.c50
-rw-r--r--fs/jffs2/compr.h26
-rw-r--r--fs/jffs2/compr_rtime.c3
-rw-r--r--fs/jffs2/compr_rubin.c5
-rw-r--r--fs/jffs2/debug.c2
-rw-r--r--fs/jffs2/dir.c65
-rw-r--r--fs/jffs2/erase.c17
-rw-r--r--fs/jffs2/file.c189
-rw-r--r--fs/jffs2/fs.c45
-rw-r--r--fs/jffs2/gc.c29
-rw-r--r--fs/jffs2/jffs2_fs_i.h4
-rw-r--r--fs/jffs2/malloc.c32
-rw-r--r--fs/jffs2/nodemgmt.c41
-rw-r--r--fs/jffs2/os-linux.h10
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/scan.c10
-rw-r--r--fs/jffs2/security.c2
-rw-r--r--fs/jffs2/summary.c7
-rw-r--r--fs/jffs2/super.c6
-rw-r--r--fs/jffs2/wbuf.c8
-rw-r--r--fs/jffs2/xattr.c47
-rw-r--r--fs/jffs2/xattr.h6
-rw-r--r--fs/jffs2/xattr_trusted.c2
-rw-r--r--fs/jffs2/xattr_user.c2
-rw-r--r--fs/jfs/Kconfig3
-rw-r--r--fs/jfs/Makefile4
-rw-r--r--fs/jfs/acl.c7
-rw-r--r--fs/jfs/file.c25
-rw-r--r--fs/jfs/inode.c80
-rw-r--r--fs/jfs/ioctl.c13
-rw-r--r--fs/jfs/jfs_acl.h2
-rw-r--r--fs/jfs/jfs_dinode.h2
-rw-r--r--fs/jfs/jfs_discard.c14
-rw-r--r--fs/jfs/jfs_dmap.c208
-rw-r--r--fs/jfs/jfs_dtree.c343
-rw-r--r--fs/jfs/jfs_extent.c272
-rw-r--r--fs/jfs/jfs_extent.h2
-rw-r--r--fs/jfs/jfs_filsys.h3
-rw-r--r--fs/jfs/jfs_imap.c63
-rw-r--r--fs/jfs/jfs_incore.h10
-rw-r--r--fs/jfs/jfs_inode.c6
-rw-r--r--fs/jfs/jfs_inode.h8
-rw-r--r--fs/jfs/jfs_logmgr.c55
-rw-r--r--fs/jfs/jfs_logmgr.h2
-rw-r--r--fs/jfs/jfs_metapage.c487
-rw-r--r--fs/jfs/jfs_metapage.h16
-rw-r--r--fs/jfs/jfs_mount.c27
-rw-r--r--fs/jfs/jfs_txnmgr.c56
-rw-r--r--fs/jfs/jfs_umount.c4
-rw-r--r--fs/jfs/jfs_unicode.h17
-rw-r--r--fs/jfs/jfs_uniupr.c121
-rw-r--r--fs/jfs/jfs_xattr.h4
-rw-r--r--fs/jfs/jfs_xtree.c1101
-rw-r--r--fs/jfs/jfs_xtree.h45
-rw-r--r--fs/jfs/namei.c65
-rw-r--r--fs/jfs/super.c492
-rw-r--r--fs/jfs/xattr.c50
-rw-r--r--fs/kernel_read_file.c60
-rw-r--r--fs/kernfs/dir.c719
-rw-r--r--fs/kernfs/file.c436
-rw-r--r--fs/kernfs/inode.c181
-rw-r--r--fs/kernfs/kernfs-internal.h83
-rw-r--r--fs/kernfs/mount.c133
-rw-r--r--fs/kernfs/symlink.c35
-rw-r--r--fs/ksmbd/connection.h213
-rw-r--r--fs/ksmbd/ksmbd_work.c80
-rw-r--r--fs/ksmbd/mgmt/user_config.c69
-rw-r--r--fs/ksmbd/mgmt/user_session.c369
-rw-r--r--fs/ksmbd/smb2pdu.h1708
-rw-r--r--fs/ksmbd/smb_common.h534
-rw-r--r--fs/ksmbd/smbacl.h237
-rw-r--r--fs/ksmbd/smbstatus.h1822
-rw-r--r--fs/ksmbd/transport_rdma.c2074
-rw-r--r--fs/ksmbd/transport_rdma.h63
-rw-r--r--fs/libfs.c1321
-rw-r--r--fs/lockd/Makefile11
-rw-r--r--fs/lockd/clnt4xdr.c23
-rw-r--r--fs/lockd/clntlock.c66
-rw-r--r--fs/lockd/clntproc.c108
-rw-r--r--fs/lockd/clntxdr.c19
-rw-r--r--fs/lockd/host.c6
-rw-r--r--fs/lockd/mon.c5
-rw-r--r--fs/lockd/netlink.c45
-rw-r--r--fs/lockd/netlink.h20
-rw-r--r--fs/lockd/netns.h4
-rw-r--r--fs/lockd/svc.c451
-rw-r--r--fs/lockd/svc4proc.c59
-rw-r--r--fs/lockd/svclock.c174
-rw-r--r--fs/lockd/svcproc.c51
-rw-r--r--fs/lockd/svcshare.c6
-rw-r--r--fs/lockd/svcsubs.c49
-rw-r--r--fs/lockd/trace.c3
-rw-r--r--fs/lockd/trace.h106
-rw-r--r--fs/lockd/xdr.c14
-rw-r--r--fs/lockd/xdr4.c46
-rw-r--r--fs/locks.c1292
-rw-r--r--fs/mbcache.c162
-rw-r--r--fs/minix/Kconfig1
-rw-r--r--fs/minix/bitmap.c20
-rw-r--r--fs/minix/dir.c225
-rw-r--r--fs/minix/file.c10
-rw-r--r--fs/minix/inode.c141
-rw-r--r--fs/minix/itree_common.c4
-rw-r--r--fs/minix/minix.h50
-rw-r--r--fs/minix/namei.c188
-rw-r--r--fs/mnt_idmapping.c387
-rw-r--r--fs/mount.h163
-rw-r--r--fs/mpage.c427
-rw-r--r--fs/namei.c3202
-rw-r--r--fs/namespace.c4242
-rw-r--r--fs/netfs/Kconfig37
-rw-r--r--fs/netfs/Makefile35
-rw-r--r--fs/netfs/buffered_read.c834
-rw-r--r--fs/netfs/buffered_write.c569
-rw-r--r--fs/netfs/direct_read.c272
-rw-r--r--fs/netfs/direct_write.c186
-rw-r--r--fs/netfs/fscache_cache.c429
-rw-r--r--fs/netfs/fscache_cookie.c1184
-rw-r--r--fs/netfs/fscache_internal.h14
-rw-r--r--fs/netfs/fscache_io.c290
-rw-r--r--fs/netfs/fscache_main.c109
-rw-r--r--fs/netfs/fscache_proc.c47
-rw-r--r--fs/netfs/fscache_stats.c103
-rw-r--r--fs/netfs/fscache_volume.c533
-rw-r--r--fs/netfs/internal.h442
-rw-r--r--fs/netfs/iterator.c250
-rw-r--r--fs/netfs/locking.c205
-rw-r--r--fs/netfs/main.c176
-rw-r--r--fs/netfs/misc.c546
-rw-r--r--fs/netfs/objects.c256
-rw-r--r--fs/netfs/read_collect.c585
-rw-r--r--fs/netfs/read_helper.c1209
-rw-r--r--fs/netfs/read_pgpriv2.c232
-rw-r--r--fs/netfs/read_retry.c293
-rw-r--r--fs/netfs/read_single.c195
-rw-r--r--fs/netfs/rolling_buffer.c222
-rw-r--r--fs/netfs/stats.c74
-rw-r--r--fs/netfs/write_collect.c531
-rw-r--r--fs/netfs/write_issue.c926
-rw-r--r--fs/netfs/write_retry.c230
-rw-r--r--fs/nfs/Kconfig20
-rw-r--r--fs/nfs/Makefile3
-rw-r--r--fs/nfs/blocklayout/blocklayout.c94
-rw-r--r--fs/nfs/blocklayout/blocklayout.h11
-rw-r--r--fs/nfs/blocklayout/dev.c220
-rw-r--r--fs/nfs/blocklayout/extent_tree.c104
-rw-r--r--fs/nfs/blocklayout/rpc_pipefs.c56
-rw-r--r--fs/nfs/callback.c171
-rw-r--r--fs/nfs/callback.h31
-rw-r--r--fs/nfs/callback_proc.c112
-rw-r--r--fs/nfs/callback_xdr.c89
-rw-r--r--fs/nfs/client.c250
-rw-r--r--fs/nfs/delegation.c338
-rw-r--r--fs/nfs/delegation.h50
-rw-r--r--fs/nfs/dir.c1328
-rw-r--r--fs/nfs/direct.c384
-rw-r--r--fs/nfs/dns_resolve.c19
-rw-r--r--fs/nfs/dns_resolve.h2
-rw-r--r--fs/nfs/export.c30
-rw-r--r--fs/nfs/file.c466
-rw-r--r--fs/nfs/filelayout/filelayout.c62
-rw-r--r--fs/nfs/filelayout/filelayout.h2
-rw-r--r--fs/nfs/filelayout/filelayoutdev.c22
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c1202
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h67
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c139
-rw-r--r--fs/nfs/fs_context.c266
-rw-r--r--fs/nfs/fscache-index.c140
-rw-r--r--fs/nfs/fscache.c591
-rw-r--r--fs/nfs/fscache.h266
-rw-r--r--fs/nfs/getroot.c2
-rw-r--r--fs/nfs/inode.c664
-rw-r--r--fs/nfs/internal.h328
-rw-r--r--fs/nfs/io.c55
-rw-r--r--fs/nfs/iostat.h26
-rw-r--r--fs/nfs/localio.c1072
-rw-r--r--fs/nfs/mount_clnt.c75
-rw-r--r--fs/nfs/namespace.c27
-rw-r--r--fs/nfs/netns.h8
-rw-r--r--fs/nfs/nfs.h6
-rw-r--r--fs/nfs/nfs2super.c1
-rw-r--r--fs/nfs/nfs2xdr.c75
-rw-r--r--fs/nfs/nfs3_fs.h3
-rw-r--r--fs/nfs/nfs3acl.c22
-rw-r--r--fs/nfs/nfs3client.c34
-rw-r--r--fs/nfs/nfs3proc.c110
-rw-r--r--fs/nfs/nfs3super.c4
-rw-r--r--fs/nfs/nfs3xdr.c140
-rw-r--r--fs/nfs/nfs42.h9
-rw-r--r--fs/nfs/nfs42proc.c364
-rw-r--r--fs/nfs/nfs42xattr.c150
-rw-r--r--fs/nfs/nfs42xdr.c956
-rw-r--r--fs/nfs/nfs4_fs.h36
-rw-r--r--fs/nfs/nfs4client.c242
-rw-r--r--fs/nfs/nfs4file.c80
-rw-r--r--fs/nfs/nfs4getroot.c14
-rw-r--r--fs/nfs/nfs4idmap.c71
-rw-r--r--fs/nfs/nfs4namespace.c38
-rw-r--r--fs/nfs/nfs4proc.c1440
-rw-r--r--fs/nfs/nfs4renewd.c2
-rw-r--r--fs/nfs/nfs4session.h4
-rw-r--r--fs/nfs/nfs4state.c222
-rw-r--r--fs/nfs/nfs4super.c37
-rw-r--r--fs/nfs/nfs4sysctl.c24
-rw-r--r--fs/nfs/nfs4trace.c11
-rw-r--r--fs/nfs/nfs4trace.h497
-rw-r--r--fs/nfs/nfs4xdr.c491
-rw-r--r--fs/nfs/nfsroot.c8
-rw-r--r--fs/nfs/nfstrace.h708
-rw-r--r--fs/nfs/pagelist.c384
-rw-r--r--fs/nfs/pnfs.c460
-rw-r--r--fs/nfs/pnfs.h77
-rw-r--r--fs/nfs/pnfs_dev.c5
-rw-r--r--fs/nfs/pnfs_nfs.c197
-rw-r--r--fs/nfs/proc.c33
-rw-r--r--fs/nfs/read.c261
-rw-r--r--fs/nfs/super.c186
-rw-r--r--fs/nfs/symlink.c40
-rw-r--r--fs/nfs/sysctl.c23
-rw-r--r--fs/nfs/sysfs.c355
-rw-r--r--fs/nfs/sysfs.h10
-rw-r--r--fs/nfs/unlink.c25
-rw-r--r--fs/nfs/write.c1005
-rw-r--r--fs/nfs_common/Makefile6
-rw-r--r--fs/nfs_common/common.c201
-rw-r--r--fs/nfs_common/grace.c2
-rw-r--r--fs/nfs_common/localio_trace.c10
-rw-r--r--fs/nfs_common/localio_trace.h56
-rw-r--r--fs/nfs_common/nfs_ssc.c1
-rw-r--r--fs/nfs_common/nfsacl.c9
-rw-r--r--fs/nfs_common/nfslocalio.c373
-rw-r--r--fs/nfsd/Kconfig63
-rw-r--r--fs/nfsd/Makefile27
-rw-r--r--fs/nfsd/acl.h6
-rw-r--r--fs/nfsd/auth.c21
-rw-r--r--fs/nfsd/auth.h2
-rw-r--r--fs/nfsd/blocklayout.c208
-rw-r--r--fs/nfsd/blocklayoutxdr.c221
-rw-r--r--fs/nfsd/blocklayoutxdr.h28
-rw-r--r--fs/nfsd/cache.h12
-rw-r--r--fs/nfsd/debugfs.c143
-rw-r--r--fs/nfsd/export.c270
-rw-r--r--fs/nfsd/export.h23
-rw-r--r--fs/nfsd/fault_inject.c142
-rw-r--r--fs/nfsd/filecache.c1441
-rw-r--r--fs/nfsd/filecache.h44
-rw-r--r--fs/nfsd/flexfilelayout.c15
-rw-r--r--fs/nfsd/flexfilelayoutxdr.c18
-rw-r--r--fs/nfsd/flexfilelayoutxdr.h4
-rw-r--r--fs/nfsd/localio.c217
-rw-r--r--fs/nfsd/lockd.c28
-rw-r--r--fs/nfsd/netlink.c114
-rw-r--r--fs/nfsd/netlink.h32
-rw-r--r--fs/nfsd/netns.h87
-rw-r--r--fs/nfsd/nfs2acl.c60
-rw-r--r--fs/nfsd/nfs3acl.c54
-rw-r--r--fs/nfsd/nfs3proc.c389
-rw-r--r--fs/nfsd/nfs3xdr.c107
-rw-r--r--fs/nfsd/nfs4acl.c86
-rw-r--r--fs/nfsd/nfs4callback.c778
-rw-r--r--fs/nfsd/nfs4idmap.c30
-rw-r--r--fs/nfsd/nfs4layouts.c123
-rw-r--r--fs/nfsd/nfs4proc.c1567
-rw-r--r--fs/nfsd/nfs4recover.c496
-rw-r--r--fs/nfsd/nfs4state.c3504
-rw-r--r--fs/nfsd/nfs4xdr.c4371
-rw-r--r--fs/nfsd/nfs4xdr_gen.c256
-rw-r--r--fs/nfsd/nfs4xdr_gen.h25
-rw-r--r--fs/nfsd/nfscache.c402
-rw-r--r--fs/nfsd/nfsctl.c1347
-rw-r--r--fs/nfsd/nfsd.h161
-rw-r--r--fs/nfsd/nfsfh.c426
-rw-r--r--fs/nfsd/nfsfh.h192
-rw-r--r--fs/nfsd/nfsproc.c268
-rw-r--r--fs/nfsd/nfssvc.c738
-rw-r--r--fs/nfsd/nfsxdr.c19
-rw-r--r--fs/nfsd/pnfs.h19
-rw-r--r--fs/nfsd/state.h273
-rw-r--r--fs/nfsd/stats.c102
-rw-r--r--fs/nfsd/stats.h86
-rw-r--r--fs/nfsd/trace.h1943
-rw-r--r--fs/nfsd/vfs.c1882
-rw-r--r--fs/nfsd/vfs.h126
-rw-r--r--fs/nfsd/xdr.h2
-rw-r--r--fs/nfsd/xdr3.h2
-rw-r--r--fs/nfsd/xdr4.h345
-rw-r--r--fs/nfsd/xdr4cb.h33
-rw-r--r--fs/nilfs2/Kconfig2
-rw-r--r--fs/nilfs2/alloc.c276
-rw-r--r--fs/nilfs2/alloc.h12
-rw-r--r--fs/nilfs2/bmap.c155
-rw-r--r--fs/nilfs2/bmap.h20
-rw-r--r--fs/nilfs2/btnode.c199
-rw-r--r--fs/nilfs2/btnode.h5
-rw-r--r--fs/nilfs2/btree.c130
-rw-r--r--fs/nilfs2/btree.h1
-rw-r--r--fs/nilfs2/cpfile.c715
-rw-r--r--fs/nilfs2/cpfile.h10
-rw-r--r--fs/nilfs2/dat.c213
-rw-r--r--fs/nilfs2/dir.c382
-rw-r--r--fs/nilfs2/direct.c13
-rw-r--r--fs/nilfs2/file.c46
-rw-r--r--fs/nilfs2/gcinode.c55
-rw-r--r--fs/nilfs2/ifile.c71
-rw-r--r--fs/nilfs2/ifile.h12
-rw-r--r--fs/nilfs2/inode.c413
-rw-r--r--fs/nilfs2/ioctl.c423
-rw-r--r--fs/nilfs2/mdt.c254
-rw-r--r--fs/nilfs2/mdt.h6
-rw-r--r--fs/nilfs2/namei.c143
-rw-r--r--fs/nilfs2/nilfs.h91
-rw-r--r--fs/nilfs2/page.c426
-rw-r--r--fs/nilfs2/page.h30
-rw-r--r--fs/nilfs2/recovery.c148
-rw-r--r--fs/nilfs2/segbuf.c98
-rw-r--r--fs/nilfs2/segment.c841
-rw-r--r--fs/nilfs2/segment.h11
-rw-r--r--fs/nilfs2/sufile.c388
-rw-r--r--fs/nilfs2/sufile.h22
-rw-r--r--fs/nilfs2/super.c528
-rw-r--r--fs/nilfs2/sysfs.c66
-rw-r--r--fs/nilfs2/sysfs.h8
-rw-r--r--fs/nilfs2/the_nilfs.c207
-rw-r--r--fs/nilfs2/the_nilfs.h16
-rw-r--r--fs/nls/Kconfig3
-rw-r--r--fs/nls/Makefile1
-rw-r--r--fs/nls/mac-celtic.c1
-rw-r--r--fs/nls/mac-centeuro.c1
-rw-r--r--fs/nls/mac-croatian.c1
-rw-r--r--fs/nls/mac-cyrillic.c1
-rw-r--r--fs/nls/mac-gaelic.c1
-rw-r--r--fs/nls/mac-greek.c1
-rw-r--r--fs/nls/mac-iceland.c1
-rw-r--r--fs/nls/mac-inuit.c1
-rw-r--r--fs/nls/mac-roman.c1
-rw-r--r--fs/nls/mac-romanian.c1
-rw-r--r--fs/nls/mac-turkish.c1
-rw-r--r--fs/nls/nls_ascii.c1
-rw-r--r--fs/nls/nls_base.c32
-rw-r--r--fs/nls/nls_cp1250.c1
-rw-r--r--fs/nls/nls_cp1251.c1
-rw-r--r--fs/nls/nls_cp1255.c1
-rw-r--r--fs/nls/nls_cp437.c1
-rw-r--r--fs/nls/nls_cp737.c1
-rw-r--r--fs/nls/nls_cp775.c1
-rw-r--r--fs/nls/nls_cp850.c1
-rw-r--r--fs/nls/nls_cp852.c1
-rw-r--r--fs/nls/nls_cp855.c1
-rw-r--r--fs/nls/nls_cp857.c1
-rw-r--r--fs/nls/nls_cp860.c1
-rw-r--r--fs/nls/nls_cp861.c1
-rw-r--r--fs/nls/nls_cp862.c1
-rw-r--r--fs/nls/nls_cp863.c1
-rw-r--r--fs/nls/nls_cp864.c1
-rw-r--r--fs/nls/nls_cp865.c1
-rw-r--r--fs/nls/nls_cp866.c1
-rw-r--r--fs/nls/nls_cp869.c1
-rw-r--r--fs/nls/nls_cp874.c1
-rw-r--r--fs/nls/nls_cp932.c1
-rw-r--r--fs/nls/nls_cp936.c1
-rw-r--r--fs/nls/nls_cp949.c1
-rw-r--r--fs/nls/nls_cp950.c1
-rw-r--r--fs/nls/nls_euc-jp.c1
-rw-r--r--fs/nls/nls_iso8859-1.c1
-rw-r--r--fs/nls/nls_iso8859-13.c1
-rw-r--r--fs/nls/nls_iso8859-14.c1
-rw-r--r--fs/nls/nls_iso8859-15.c1
-rw-r--r--fs/nls/nls_iso8859-2.c1
-rw-r--r--fs/nls/nls_iso8859-3.c1
-rw-r--r--fs/nls/nls_iso8859-4.c1
-rw-r--r--fs/nls/nls_iso8859-5.c1
-rw-r--r--fs/nls/nls_iso8859-6.c1
-rw-r--r--fs/nls/nls_iso8859-7.c1
-rw-r--r--fs/nls/nls_iso8859-9.c1
-rw-r--r--fs/nls/nls_koi8-r.c1
-rw-r--r--fs/nls/nls_koi8-ru.c1
-rw-r--r--fs/nls/nls_koi8-u.c1
-rw-r--r--fs/nls/nls_ucs2_data.h15
-rw-r--r--fs/nls/nls_ucs2_utils.c (renamed from fs/ksmbd/uniupr.h)157
-rw-r--r--fs/nls/nls_ucs2_utils.h (renamed from fs/ksmbd/unicode.h)90
-rw-r--r--fs/nls/nls_utf8.c1
-rw-r--r--fs/no-block.c19
-rw-r--r--fs/notify/Kconfig1
-rw-r--r--fs/notify/dnotify/dnotify.c63
-rw-r--r--fs/notify/fanotify/Kconfig1
-rw-r--r--fs/notify/fanotify/fanotify.c369
-rw-r--r--fs/notify/fanotify/fanotify.h228
-rw-r--r--fs/notify/fanotify/fanotify_user.c1217
-rw-r--r--fs/notify/fdinfo.c62
-rw-r--r--fs/notify/fsnotify.c436
-rw-r--r--fs/notify/fsnotify.h54
-rw-r--r--fs/notify/group.c23
-rw-r--r--fs/notify/inotify/inotify.h19
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c17
-rw-r--r--fs/notify/inotify/inotify_user.c111
-rw-r--r--fs/notify/mark.c383
-rw-r--r--fs/nsfs.c638
-rw-r--r--fs/ntfs/Kconfig79
-rw-r--r--fs/ntfs/Makefile15
-rw-r--r--fs/ntfs/aops.c1760
-rw-r--r--fs/ntfs/aops.h93
-rw-r--r--fs/ntfs/attrib.c2600
-rw-r--r--fs/ntfs/attrib.h102
-rw-r--r--fs/ntfs/bitmap.c179
-rw-r--r--fs/ntfs/bitmap.h104
-rw-r--r--fs/ntfs/collate.c110
-rw-r--r--fs/ntfs/collate.h36
-rw-r--r--fs/ntfs/compress.c950
-rw-r--r--fs/ntfs/debug.c159
-rw-r--r--fs/ntfs/debug.h57
-rw-r--r--fs/ntfs/dir.c1538
-rw-r--r--fs/ntfs/dir.h34
-rw-r--r--fs/ntfs/endian.h79
-rw-r--r--fs/ntfs/file.c2011
-rw-r--r--fs/ntfs/index.c440
-rw-r--r--fs/ntfs/index.h134
-rw-r--r--fs/ntfs/inode.c3089
-rw-r--r--fs/ntfs/inode.h310
-rw-r--r--fs/ntfs/layout.h2421
-rw-r--r--fs/ntfs/lcnalloc.c1000
-rw-r--r--fs/ntfs/lcnalloc.h131
-rw-r--r--fs/ntfs/logfile.c849
-rw-r--r--fs/ntfs/logfile.h295
-rw-r--r--fs/ntfs/malloc.h77
-rw-r--r--fs/ntfs/mft.c2906
-rw-r--r--fs/ntfs/mft.h110
-rw-r--r--fs/ntfs/mst.c189
-rw-r--r--fs/ntfs/namei.c391
-rw-r--r--fs/ntfs/ntfs.h150
-rw-r--r--fs/ntfs/quota.c103
-rw-r--r--fs/ntfs/quota.h21
-rw-r--r--fs/ntfs/runlist.c1893
-rw-r--r--fs/ntfs/runlist.h88
-rw-r--r--fs/ntfs/super.c3193
-rw-r--r--fs/ntfs/sysctl.c69
-rw-r--r--fs/ntfs/sysctl.h27
-rw-r--r--fs/ntfs/time.h89
-rw-r--r--fs/ntfs/types.h55
-rw-r--r--fs/ntfs/unistr.c384
-rw-r--r--fs/ntfs/upcase.c73
-rw-r--r--fs/ntfs/usnjrnl.c70
-rw-r--r--fs/ntfs/usnjrnl.h191
-rw-r--r--fs/ntfs/volume.h164
-rw-r--r--fs/ntfs3/Kconfig11
-rw-r--r--fs/ntfs3/attrib.c1228
-rw-r--r--fs/ntfs3/attrlist.c86
-rw-r--r--fs/ntfs3/bitfunc.c4
-rw-r--r--fs/ntfs3/bitmap.c274
-rw-r--r--fs/ntfs3/dir.c108
-rw-r--r--fs/ntfs3/file.c921
-rw-r--r--fs/ntfs3/frecord.c727
-rw-r--r--fs/ntfs3/fslog.c498
-rw-r--r--fs/ntfs3/fsntfs.c660
-rw-r--r--fs/ntfs3/index.c290
-rw-r--r--fs/ntfs3/inode.c774
-rw-r--r--fs/ntfs3/lib/decompress_common.h2
-rw-r--r--fs/ntfs3/lib/lzx_decompress.c3
-rw-r--r--fs/ntfs3/lznt.c13
-rw-r--r--fs/ntfs3/namei.c256
-rw-r--r--fs/ntfs3/ntfs.h150
-rw-r--r--fs/ntfs3/ntfs_fs.h276
-rw-r--r--fs/ntfs3/record.c230
-rw-r--r--fs/ntfs3/run.c209
-rw-r--r--fs/ntfs3/super.c1074
-rw-r--r--fs/ntfs3/upcase.c12
-rw-r--r--fs/ntfs3/xattr.c371
-rw-r--r--fs/ocfs2/Kconfig8
-rw-r--r--fs/ocfs2/acl.c12
-rw-r--r--fs/ocfs2/acl.h2
-rw-r--r--fs/ocfs2/alloc.c192
-rw-r--r--fs/ocfs2/alloc.h8
-rw-r--r--fs/ocfs2/aops.c499
-rw-r--r--fs/ocfs2/aops.h19
-rw-r--r--fs/ocfs2/buffer_head_io.c16
-rw-r--r--fs/ocfs2/cluster/heartbeat.c217
-rw-r--r--fs/ocfs2/cluster/heartbeat.h2
-rw-r--r--fs/ocfs2/cluster/masklog.c11
-rw-r--r--fs/ocfs2/cluster/masklog.h2
-rw-r--r--fs/ocfs2/cluster/netdebug.c42
-rw-r--r--fs/ocfs2/cluster/nodemanager.c4
-rw-r--r--fs/ocfs2/cluster/quorum.c34
-rw-r--r--fs/ocfs2/cluster/tcp.c78
-rw-r--r--fs/ocfs2/dcache.c21
-rw-r--r--fs/ocfs2/dir.c168
-rw-r--r--fs/ocfs2/dlm/dlmapi.h4
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h2
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c74
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c38
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c71
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c20
-rw-r--r--fs/ocfs2/dlm/dlmthread.c2
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c21
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c83
-rw-r--r--fs/ocfs2/dlmfs/userdlm.c17
-rw-r--r--fs/ocfs2/dlmglue.c105
-rw-r--r--fs/ocfs2/dlmglue.h6
-rw-r--r--fs/ocfs2/export.c12
-rw-r--r--fs/ocfs2/extent_map.c28
-rw-r--r--fs/ocfs2/file.c200
-rw-r--r--fs/ocfs2/file.h7
-rw-r--r--fs/ocfs2/filecheck.c5
-rw-r--r--fs/ocfs2/heartbeat.c27
-rw-r--r--fs/ocfs2/inode.c221
-rw-r--r--fs/ocfs2/inode.h3
-rw-r--r--fs/ocfs2/ioctl.c69
-rw-r--r--fs/ocfs2/ioctl.h6
-rw-r--r--fs/ocfs2/journal.c385
-rw-r--r--fs/ocfs2/journal.h8
-rw-r--r--fs/ocfs2/localalloc.c38
-rw-r--r--fs/ocfs2/locks.c13
-rw-r--r--fs/ocfs2/mmap.c27
-rw-r--r--fs/ocfs2/mmap.h2
-rw-r--r--fs/ocfs2/move_extents.c94
-rw-r--r--fs/ocfs2/namei.c123
-rw-r--r--fs/ocfs2/ocfs2.h57
-rw-r--r--fs/ocfs2/ocfs2_fs.h43
-rw-r--r--fs/ocfs2/ocfs2_ioctl.h2
-rw-r--r--fs/ocfs2/ocfs2_lockid.h2
-rw-r--r--fs/ocfs2/ocfs2_trace.h89
-rw-r--r--fs/ocfs2/quota.h1
-rw-r--r--fs/ocfs2/quota_global.c63
-rw-r--r--fs/ocfs2/quota_local.c48
-rw-r--r--fs/ocfs2/refcounttree.c162
-rw-r--r--fs/ocfs2/reservations.c6
-rw-r--r--fs/ocfs2/reservations.h13
-rw-r--r--fs/ocfs2/resize.c10
-rw-r--r--fs/ocfs2/slot_map.c48
-rw-r--r--fs/ocfs2/stack_o2cb.c10
-rw-r--r--fs/ocfs2/stack_user.c56
-rw-r--r--fs/ocfs2/stackglue.c54
-rw-r--r--fs/ocfs2/stackglue.h4
-rw-r--r--fs/ocfs2/suballoc.c180
-rw-r--r--fs/ocfs2/suballoc.h9
-rw-r--r--fs/ocfs2/super.c884
-rw-r--r--fs/ocfs2/symlink.c19
-rw-r--r--fs/ocfs2/sysfile.c12
-rw-r--r--fs/ocfs2/xattr.c132
-rw-r--r--fs/ocfs2/xattr.h2
-rw-r--r--fs/omfs/Kconfig1
-rw-r--r--fs/omfs/dir.c14
-rw-r--r--fs/omfs/file.c48
-rw-r--r--fs/omfs/inode.c196
-rw-r--r--fs/omfs/omfs_fs.h2
-rw-r--r--fs/open.c738
-rw-r--r--fs/openpromfs/inode.c20
-rw-r--r--fs/orangefs/acl.c49
-rw-r--r--fs/orangefs/dcache.c24
-rw-r--r--fs/orangefs/dir.c34
-rw-r--r--fs/orangefs/file.c43
-rw-r--r--fs/orangefs/inode.c490
-rw-r--r--fs/orangefs/namei.c54
-rw-r--r--fs/orangefs/orangefs-bufmap.c36
-rw-r--r--fs/orangefs/orangefs-bufmap.h3
-rw-r--r--fs/orangefs/orangefs-cache.c2
-rw-r--r--fs/orangefs/orangefs-debug.h43
-rw-r--r--fs/orangefs/orangefs-debugfs.c111
-rw-r--r--fs/orangefs/orangefs-kernel.h41
-rw-r--r--fs/orangefs/orangefs-mod.c11
-rw-r--r--fs/orangefs/orangefs-sysfs.c134
-rw-r--r--fs/orangefs/orangefs-utils.c28
-rw-r--r--fs/orangefs/super.c211
-rw-r--r--fs/orangefs/xattr.c18
-rw-r--r--fs/overlayfs/Kconfig12
-rw-r--r--fs/overlayfs/Makefile2
-rw-r--r--fs/overlayfs/copy_up.c671
-rw-r--r--fs/overlayfs/dir.c972
-rw-r--r--fs/overlayfs/export.c165
-rw-r--r--fs/overlayfs/file.c660
-rw-r--r--fs/overlayfs/inode.c545
-rw-r--r--fs/overlayfs/namei.c786
-rw-r--r--fs/overlayfs/overlayfs.h637
-rw-r--r--fs/overlayfs/ovl_entry.h108
-rw-r--r--fs/overlayfs/params.c1081
-rw-r--r--fs/overlayfs/params.h44
-rw-r--r--fs/overlayfs/readdir.c455
-rw-r--r--fs/overlayfs/super.c1527
-rw-r--r--fs/overlayfs/util.c722
-rw-r--r--fs/overlayfs/xattrs.c261
-rw-r--r--fs/pidfs.c1104
-rw-r--r--fs/pipe.c520
-rw-r--r--fs/pnode.c785
-rw-r--r--fs/pnode.h35
-rw-r--r--fs/posix_acl.c644
-rw-r--r--fs/proc/Kconfig22
-rw-r--r--fs/proc/Makefile2
-rw-r--r--fs/proc/array.c107
-rw-r--r--fs/proc/base.c482
-rw-r--r--fs/proc/bootconfig.c8
-rw-r--r--fs/proc/cmdline.c7
-rw-r--r--fs/proc/consoles.c28
-rw-r--r--fs/proc/cpuinfo.c6
-rw-r--r--fs/proc/devices.c6
-rw-r--r--fs/proc/fd.c104
-rw-r--r--fs/proc/fd.h2
-rw-r--r--fs/proc/generic.c85
-rw-r--r--fs/proc/inode.c117
-rw-r--r--fs/proc/internal.h144
-rw-r--r--fs/proc/interrupts.c4
-rw-r--r--fs/proc/kcore.c243
-rw-r--r--fs/proc/kmsg.c3
-rw-r--r--fs/proc/loadavg.c6
-rw-r--r--fs/proc/meminfo.c30
-rw-r--r--fs/proc/namespaces.c11
-rw-r--r--fs/proc/nommu.c3
-rw-r--r--fs/proc/page.c291
-rw-r--r--fs/proc/proc_net.c45
-rw-r--r--fs/proc/proc_sysctl.c761
-rw-r--r--fs/proc/proc_tty.c2
-rw-r--r--fs/proc/root.c137
-rw-r--r--fs/proc/self.c12
-rw-r--r--fs/proc/softirqs.c8
-rw-r--r--fs/proc/stat.c30
-rw-r--r--fs/proc/task_mmu.c2094
-rw-r--r--fs/proc/task_nommu.c122
-rw-r--r--fs/proc/thread_self.c13
-rw-r--r--fs/proc/uptime.c6
-rw-r--r--fs/proc/version.c6
-rw-r--r--fs/proc/vmcore.c557
-rw-r--r--fs/proc_namespace.c41
-rw-r--r--fs/pstore/Kconfig102
-rw-r--r--fs/pstore/blk.c12
-rw-r--r--fs/pstore/ftrace.c46
-rw-r--r--fs/pstore/inode.c234
-rw-r--r--fs/pstore/internal.h4
-rw-r--r--fs/pstore/platform.c460
-rw-r--r--fs/pstore/pmsg.c4
-rw-r--r--fs/pstore/ram.c79
-rw-r--r--fs/pstore/ram_core.c45
-rw-r--r--fs/pstore/ram_internal.h98
-rw-r--r--fs/pstore/zone.c43
-rw-r--r--fs/qnx4/Kconfig1
-rw-r--r--fs/qnx4/README9
-rw-r--r--fs/qnx4/dir.c52
-rw-r--r--fs/qnx4/inode.c70
-rw-r--r--fs/qnx4/namei.c29
-rw-r--r--fs/qnx4/qnx4.h60
-rw-r--r--fs/qnx6/Kconfig1
-rw-r--r--fs/qnx6/README8
-rw-r--r--fs/qnx6/dir.c88
-rw-r--r--fs/qnx6/inode.c181
-rw-r--r--fs/qnx6/namei.c4
-rw-r--r--fs/qnx6/qnx6.h9
-rw-r--r--fs/quota/Kconfig18
-rw-r--r--fs/quota/dquot.c612
-rw-r--r--fs/quota/quota.c23
-rw-r--r--fs/quota/quota_tree.c225
-rw-r--r--fs/quota/quota_v1.c11
-rw-r--r--fs/quota/quota_v2.c46
-rw-r--r--fs/ramfs/file-mmu.c6
-rw-r--r--fs/ramfs/file-nommu.c74
-rw-r--r--fs/ramfs/inode.c76
-rw-r--r--fs/read_write.c871
-rw-r--r--fs/readdir.c235
-rw-r--r--fs/reiserfs/Kconfig85
-rw-r--r--fs/reiserfs/Makefile30
-rw-r--r--fs/reiserfs/README161
-rw-r--r--fs/reiserfs/acl.h78
-rw-r--r--fs/reiserfs/bitmap.c1476
-rw-r--r--fs/reiserfs/dir.c346
-rw-r--r--fs/reiserfs/do_balan.c1900
-rw-r--r--fs/reiserfs/file.c263
-rw-r--r--fs/reiserfs/fix_node.c2821
-rw-r--r--fs/reiserfs/hashes.c177
-rw-r--r--fs/reiserfs/ibalance.c1161
-rw-r--r--fs/reiserfs/inode.c3439
-rw-r--r--fs/reiserfs/ioctl.c221
-rw-r--r--fs/reiserfs/item_ops.c744
-rw-r--r--fs/reiserfs/journal.c4402
-rw-r--r--fs/reiserfs/lbalance.c1426
-rw-r--r--fs/reiserfs/lock.c101
-rw-r--r--fs/reiserfs/namei.c1688
-rw-r--r--fs/reiserfs/objectid.c216
-rw-r--r--fs/reiserfs/prints.c792
-rw-r--r--fs/reiserfs/procfs.c490
-rw-r--r--fs/reiserfs/reiserfs.h3415
-rw-r--r--fs/reiserfs/resize.c230
-rw-r--r--fs/reiserfs/stree.c2281
-rw-r--r--fs/reiserfs/super.c2650
-rw-r--r--fs/reiserfs/tail_conversion.c318
-rw-r--r--fs/reiserfs/xattr.c1044
-rw-r--r--fs/reiserfs/xattr.h117
-rw-r--r--fs/reiserfs/xattr_acl.c408
-rw-r--r--fs/reiserfs/xattr_security.c109
-rw-r--r--fs/reiserfs/xattr_trusted.c46
-rw-r--r--fs/reiserfs/xattr_user.c43
-rw-r--r--fs/remap_range.c235
-rw-r--r--fs/resctrl/Kconfig39
-rw-r--r--fs/resctrl/Makefile6
-rw-r--r--fs/resctrl/ctrlmondata.c959
-rw-r--r--fs/resctrl/internal.h495
-rw-r--r--fs/resctrl/monitor.c1811
-rw-r--r--fs/resctrl/monitor_trace.h33
-rw-r--r--fs/resctrl/pseudo_lock.c1099
-rw-r--r--fs/resctrl/rdtgroup.c4584
-rw-r--r--fs/romfs/Kconfig1
-rw-r--r--fs/romfs/mmap-nommu.c8
-rw-r--r--fs/romfs/super.c51
-rw-r--r--fs/select.c158
-rw-r--r--fs/seq_file.c53
-rw-r--r--fs/signalfd.c72
-rw-r--r--fs/smb/Kconfig11
-rw-r--r--fs/smb/Makefile5
-rw-r--r--fs/smb/client/Kconfig (renamed from fs/cifs/Kconfig)89
-rw-r--r--fs/smb/client/Makefile (renamed from fs/cifs/Makefile)17
-rw-r--r--fs/smb/client/asn1.c (renamed from fs/cifs/asn1.c)2
-rw-r--r--fs/smb/client/cached_dir.c862
-rw-r--r--fs/smb/client/cached_dir.h98
-rw-r--r--fs/smb/client/cifs_debug.c (renamed from fs/cifs/cifs_debug.c)610
-rw-r--r--fs/smb/client/cifs_debug.h (renamed from fs/cifs/cifs_debug.h)24
-rw-r--r--fs/smb/client/cifs_fs_sb.h (renamed from fs/cifs/cifs_fs_sb.h)8
-rw-r--r--fs/smb/client/cifs_ioctl.h (renamed from fs/cifs/cifs_ioctl.h)18
-rw-r--r--fs/smb/client/cifs_spnego.c (renamed from fs/cifs/cifs_spnego.c)80
-rw-r--r--fs/smb/client/cifs_spnego.h (renamed from fs/cifs/cifs_spnego.h)7
-rw-r--r--fs/smb/client/cifs_spnego_negtokeninit.asn1 (renamed from fs/cifs/cifs_spnego_negtokeninit.asn1)0
-rw-r--r--fs/smb/client/cifs_swn.c (renamed from fs/cifs/cifs_swn.c)57
-rw-r--r--fs/smb/client/cifs_swn.h (renamed from fs/cifs/cifs_swn.h)0
-rw-r--r--fs/smb/client/cifs_unicode.c (renamed from fs/cifs/cifs_unicode.c)21
-rw-r--r--fs/smb/client/cifs_unicode.h73
-rw-r--r--fs/smb/client/cifsacl.c (renamed from fs/cifs/cifsacl.c)539
-rw-r--r--fs/smb/client/cifsacl.h (renamed from fs/cifs/cifsacl.h)101
-rw-r--r--fs/smb/client/cifsencrypt.c660
-rw-r--r--fs/smb/client/cifsfs.c (renamed from fs/cifs/cifsfs.c)804
-rw-r--r--fs/smb/client/cifsfs.h (renamed from fs/cifs/cifsfs.h)46
-rw-r--r--fs/smb/client/cifsglob.h (renamed from fs/cifs/cifsglob.h)1193
-rw-r--r--fs/smb/client/cifspdu.h (renamed from fs/cifs/cifspdu.h)809
-rw-r--r--fs/smb/client/cifsproto.h (renamed from fs/cifs/cifsproto.h)567
-rw-r--r--fs/smb/client/cifsroot.c (renamed from fs/cifs/cifsroot.c)2
-rw-r--r--fs/smb/client/cifssmb.c (renamed from fs/cifs/cifssmb.c)2708
-rw-r--r--fs/smb/client/cifstransport.c263
-rw-r--r--fs/smb/client/compress.c372
-rw-r--r--fs/smb/client/compress.h75
-rw-r--r--fs/smb/client/compress/lz77.c235
-rw-r--r--fs/smb/client/compress/lz77.h15
-rw-r--r--fs/smb/client/connect.c (renamed from fs/cifs/connect.c)3203
-rw-r--r--fs/smb/client/dfs.c467
-rw-r--r--fs/smb/client/dfs.h198
-rw-r--r--fs/smb/client/dfs_cache.c (renamed from fs/cifs/dfs_cache.c)936
-rw-r--r--fs/smb/client/dfs_cache.h (renamed from fs/cifs/dfs_cache.h)28
-rw-r--r--fs/smb/client/dir.c (renamed from fs/cifs/dir.c)250
-rw-r--r--fs/smb/client/dns_resolve.c96
-rw-r--r--fs/smb/client/dns_resolve.h37
-rw-r--r--fs/smb/client/export.c (renamed from fs/cifs/export.c)11
-rw-r--r--fs/smb/client/file.c3357
-rw-r--r--fs/smb/client/fs_context.c (renamed from fs/cifs/fs_context.c)761
-rw-r--r--fs/smb/client/fs_context.h (renamed from fs/cifs/fs_context.h)158
-rw-r--r--fs/smb/client/fscache.c172
-rw-r--r--fs/smb/client/fscache.h101
-rw-r--r--fs/smb/client/inode.c (renamed from fs/cifs/inode.c)1481
-rw-r--r--fs/smb/client/ioctl.c (renamed from fs/cifs/ioctl.c)151
-rw-r--r--fs/smb/client/link.c (renamed from fs/cifs/link.c)319
-rw-r--r--fs/smb/client/misc.c (renamed from fs/cifs/misc.c)774
-rw-r--r--fs/smb/client/namespace.c297
-rw-r--r--fs/smb/client/netlink.c (renamed from fs/cifs/netlink.c)1
-rw-r--r--fs/smb/client/netlink.h (renamed from fs/cifs/netlink.h)0
-rw-r--r--fs/smb/client/netmisc.c (renamed from fs/cifs/netmisc.c)34
-rw-r--r--fs/smb/client/nterr.c (renamed from fs/cifs/nterr.c)9
-rw-r--r--fs/smb/client/nterr.h (renamed from fs/cifs/nterr.h)1
-rw-r--r--fs/smb/client/ntlmssp.h (renamed from fs/cifs/ntlmssp.h)52
-rw-r--r--fs/smb/client/readdir.c (renamed from fs/cifs/readdir.c)472
-rw-r--r--fs/smb/client/reparse.c1273
-rw-r--r--fs/smb/client/reparse.h140
-rw-r--r--fs/smb/client/rfc1002pdu.h (renamed from fs/cifs/rfc1002pdu.h)14
-rw-r--r--fs/smb/client/sess.c (renamed from fs/cifs/sess.c)1220
-rw-r--r--fs/smb/client/smb1ops.c (renamed from fs/cifs/smb1ops.c)809
-rw-r--r--fs/smb/client/smb2file.c (renamed from fs/cifs/smb2file.c)202
-rw-r--r--fs/smb/client/smb2glob.h (renamed from fs/cifs/smb2glob.h)40
-rw-r--r--fs/smb/client/smb2inode.c1647
-rw-r--r--fs/smb/client/smb2maperror.c (renamed from fs/cifs/smb2maperror.c)60
-rw-r--r--fs/smb/client/smb2misc.c (renamed from fs/cifs/smb2misc.c)327
-rw-r--r--fs/smb/client/smb2ops.c (renamed from fs/cifs/smb2ops.c)3554
-rw-r--r--fs/smb/client/smb2pdu.c (renamed from fs/cifs/smb2pdu.c)2409
-rw-r--r--fs/smb/client/smb2pdu.h340
-rw-r--r--fs/smb/client/smb2proto.h (renamed from fs/cifs/smb2proto.h)147
-rw-r--r--fs/smb/client/smb2transport.c (renamed from fs/cifs/smb2transport.c)452
-rw-r--r--fs/smb/client/smbdirect.c3033
-rw-r--r--fs/smb/client/smbdirect.h76
-rw-r--r--fs/smb/client/smbencrypt.c (renamed from fs/cifs/smbencrypt.c)9
-rw-r--r--fs/smb/client/smberr.h (renamed from fs/cifs/smberr.h)0
-rw-r--r--fs/smb/client/trace.c (renamed from fs/cifs/trace.c)2
-rw-r--r--fs/smb/client/trace.h1819
-rw-r--r--fs/smb/client/transport.c (renamed from fs/cifs/transport.c)1130
-rw-r--r--fs/smb/client/unc.c (renamed from fs/cifs/unc.c)0
-rw-r--r--fs/smb/client/winucase.c (renamed from fs/cifs/winucase.c)0
-rw-r--r--fs/smb/client/xattr.c (renamed from fs/cifs/xattr.c)128
-rw-r--r--fs/smb/common/Makefile (renamed from fs/smbfs_common/Makefile)3
-rw-r--r--fs/smb/common/cifs_md4.c (renamed from fs/smbfs_common/cifs_md4.c)1
-rw-r--r--fs/smb/common/fscc.h174
-rw-r--r--fs/smb/common/md4.h (renamed from fs/smbfs_common/md4.h)0
-rw-r--r--fs/smb/common/smb2pdu.h2158
-rw-r--r--fs/smb/common/smb2status.h (renamed from fs/cifs/smb2status.h)9
-rw-r--r--fs/smb/common/smbacl.h122
-rw-r--r--fs/smb/common/smbdirect/smbdirect.h44
-rw-r--r--fs/smb/common/smbdirect/smbdirect_pdu.h55
-rw-r--r--fs/smb/common/smbdirect/smbdirect_socket.h547
-rw-r--r--fs/smb/common/smbfsctl.h (renamed from fs/smbfs_common/smbfsctl.h)22
-rw-r--r--fs/smb/common/smbglob.h71
-rw-r--r--fs/smb/server/Kconfig (renamed from fs/ksmbd/Kconfig)23
-rw-r--r--fs/smb/server/Makefile (renamed from fs/ksmbd/Makefile)0
-rw-r--r--fs/smb/server/asn1.c (renamed from fs/ksmbd/asn1.c)178
-rw-r--r--fs/smb/server/asn1.h (renamed from fs/ksmbd/asn1.h)0
-rw-r--r--fs/smb/server/auth.c (renamed from fs/ksmbd/auth.c)593
-rw-r--r--fs/smb/server/auth.h (renamed from fs/ksmbd/auth.h)28
-rw-r--r--fs/smb/server/connection.c (renamed from fs/ksmbd/connection.c)299
-rw-r--r--fs/smb/server/connection.h259
-rw-r--r--fs/smb/server/crypto_ctx.c (renamed from fs/ksmbd/crypto_ctx.c)38
-rw-r--r--fs/smb/server/crypto_ctx.h (renamed from fs/ksmbd/crypto_ctx.h)19
-rw-r--r--fs/smb/server/glob.h (renamed from fs/ksmbd/glob.h)4
-rw-r--r--fs/smb/server/ksmbd_netlink.h (renamed from fs/ksmbd/ksmbd_netlink.h)80
-rw-r--r--fs/smb/server/ksmbd_spnego_negtokeninit.asn1 (renamed from fs/ksmbd/ksmbd_spnego_negtokeninit.asn1)8
-rw-r--r--fs/smb/server/ksmbd_spnego_negtokentarg.asn1 (renamed from fs/ksmbd/ksmbd_spnego_negtokentarg.asn1)7
-rw-r--r--fs/smb/server/ksmbd_work.c174
-rw-r--r--fs/smb/server/ksmbd_work.h (renamed from fs/ksmbd/ksmbd_work.h)41
-rw-r--r--fs/smb/server/mgmt/ksmbd_ida.c (renamed from fs/ksmbd/mgmt/ksmbd_ida.c)22
-rw-r--r--fs/smb/server/mgmt/ksmbd_ida.h (renamed from fs/ksmbd/mgmt/ksmbd_ida.h)0
-rw-r--r--fs/smb/server/mgmt/share_config.c (renamed from fs/ksmbd/mgmt/share_config.c)90
-rw-r--r--fs/smb/server/mgmt/share_config.h (renamed from fs/ksmbd/mgmt/share_config.h)40
-rw-r--r--fs/smb/server/mgmt/tree_connect.c (renamed from fs/ksmbd/mgmt/tree_connect.c)83
-rw-r--r--fs/smb/server/mgmt/tree_connect.h (renamed from fs/ksmbd/mgmt/tree_connect.h)16
-rw-r--r--fs/smb/server/mgmt/user_config.c108
-rw-r--r--fs/smb/server/mgmt/user_config.h (renamed from fs/ksmbd/mgmt/user_config.h)7
-rw-r--r--fs/smb/server/mgmt/user_session.c492
-rw-r--r--fs/smb/server/mgmt/user_session.h (renamed from fs/ksmbd/mgmt/user_session.h)28
-rw-r--r--fs/smb/server/misc.c (renamed from fs/ksmbd/misc.c)105
-rw-r--r--fs/smb/server/misc.h (renamed from fs/ksmbd/misc.h)6
-rw-r--r--fs/smb/server/ndr.c (renamed from fs/ksmbd/ndr.c)30
-rw-r--r--fs/smb/server/ndr.h (renamed from fs/ksmbd/ndr.h)2
-rw-r--r--fs/smb/server/nterr.h (renamed from fs/ksmbd/nterr.h)0
-rw-r--r--fs/smb/server/ntlmssp.h (renamed from fs/ksmbd/ntlmssp.h)6
-rw-r--r--fs/smb/server/oplock.c (renamed from fs/ksmbd/oplock.c)567
-rw-r--r--fs/smb/server/oplock.h (renamed from fs/ksmbd/oplock.h)26
-rw-r--r--fs/smb/server/server.c (renamed from fs/ksmbd/server.c)134
-rw-r--r--fs/smb/server/server.h (renamed from fs/ksmbd/server.h)5
-rw-r--r--fs/smb/server/smb2misc.c (renamed from fs/ksmbd/smb2misc.c)156
-rw-r--r--fs/smb/server/smb2ops.c (renamed from fs/ksmbd/smb2ops.c)87
-rw-r--r--fs/smb/server/smb2pdu.c (renamed from fs/ksmbd/smb2pdu.c)3930
-rw-r--r--fs/smb/server/smb2pdu.h405
-rw-r--r--fs/smb/server/smb_common.c (renamed from fs/ksmbd/smb_common.c)261
-rw-r--r--fs/smb/server/smb_common.h206
-rw-r--r--fs/smb/server/smbacl.c (renamed from fs/ksmbd/smbacl.c)358
-rw-r--r--fs/smb/server/smbacl.h129
-rw-r--r--fs/smb/server/smbfsctl.h (renamed from fs/ksmbd/smbfsctl.h)2
-rw-r--r--fs/smb/server/transport_ipc.c (renamed from fs/ksmbd/transport_ipc.c)188
-rw-r--r--fs/smb/server/transport_ipc.h (renamed from fs/ksmbd/transport_ipc.h)4
-rw-r--r--fs/smb/server/transport_rdma.c2718
-rw-r--r--fs/smb/server/transport_rdma.h30
-rw-r--r--fs/smb/server/transport_tcp.c (renamed from fs/ksmbd/transport_tcp.c)317
-rw-r--r--fs/smb/server/transport_tcp.h (renamed from fs/ksmbd/transport_tcp.h)2
-rw-r--r--fs/smb/server/unicode.c (renamed from fs/ksmbd/unicode.c)200
-rw-r--r--fs/smb/server/unicode.h41
-rw-r--r--fs/smb/server/vfs.c (renamed from fs/ksmbd/vfs.c)1032
-rw-r--r--fs/smb/server/vfs.h (renamed from fs/ksmbd/vfs.h)126
-rw-r--r--fs/smb/server/vfs_cache.c (renamed from fs/ksmbd/vfs_cache.c)518
-rw-r--r--fs/smb/server/vfs_cache.h (renamed from fs/ksmbd/vfs_cache.h)48
-rw-r--r--fs/smb/server/xattr.h (renamed from fs/ksmbd/xattr.h)4
-rw-r--r--fs/smbfs_common/arc4.h23
-rw-r--r--fs/smbfs_common/cifs_arc4.c87
-rw-r--r--fs/smbfs_common/smb2pdu.h989
-rw-r--r--fs/splice.c875
-rw-r--r--fs/squashfs/Kconfig78
-rw-r--r--fs/squashfs/Makefile4
-rw-r--r--fs/squashfs/block.c197
-rw-r--r--fs/squashfs/cache.c12
-rw-r--r--fs/squashfs/decompressor.c3
-rw-r--r--fs/squashfs/decompressor.h1
-rw-r--r--fs/squashfs/decompressor_multi.c20
-rw-r--r--fs/squashfs/decompressor_multi_percpu.c28
-rw-r--r--fs/squashfs/decompressor_single.c15
-rw-r--r--fs/squashfs/export.c1
-rw-r--r--fs/squashfs/file.c411
-rw-r--r--fs/squashfs/file_cache.c6
-rw-r--r--fs/squashfs/file_direct.c118
-rw-r--r--fs/squashfs/inode.c59
-rw-r--r--fs/squashfs/lz4_wrapper.c7
-rw-r--r--fs/squashfs/lzo_wrapper.c7
-rw-r--r--fs/squashfs/namei.c14
-rw-r--r--fs/squashfs/page_actor.c61
-rw-r--r--fs/squashfs/page_actor.h68
-rw-r--r--fs/squashfs/squashfs.h39
-rw-r--r--fs/squashfs/squashfs_fs.h3
-rw-r--r--fs/squashfs/squashfs_fs_i.h2
-rw-r--r--fs/squashfs/squashfs_fs_sb.h7
-rw-r--r--fs/squashfs/super.c147
-rw-r--r--fs/squashfs/symlink.c38
-rw-r--r--fs/squashfs/xattr.c2
-rw-r--r--fs/squashfs/xattr.h4
-rw-r--r--fs/squashfs/xattr_id.c2
-rw-r--r--fs/squashfs/xz_wrapper.c11
-rw-r--r--fs/squashfs/zlib_wrapper.c12
-rw-r--r--fs/squashfs/zstd_wrapper.c28
-rw-r--r--fs/stack.c10
-rw-r--r--fs/stat.c399
-rw-r--r--fs/statfs.c16
-rw-r--r--fs/super.c1673
-rw-r--r--fs/sync.c79
-rw-r--r--fs/sysctls.c37
-rw-r--r--fs/sysfs/dir.c4
-rw-r--r--fs/sysfs/file.c93
-rw-r--r--fs/sysfs/group.c114
-rw-r--r--fs/sysfs/mount.c2
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--fs/sysv/Kconfig37
-rw-r--r--fs/sysv/Makefile9
-rw-r--r--fs/sysv/balloc.c240
-rw-r--r--fs/sysv/dir.c368
-rw-r--r--fs/sysv/file.c59
-rw-r--r--fs/sysv/ialloc.c235
-rw-r--r--fs/sysv/inode.c357
-rw-r--r--fs/sysv/itree.c504
-rw-r--r--fs/sysv/namei.c278
-rw-r--r--fs/sysv/super.c592
-rw-r--r--fs/sysv/sysv.h245
-rw-r--r--fs/tests/binfmt_elf_kunit.c64
-rw-r--r--fs/tests/exec_kunit.c141
-rw-r--r--fs/timerfd.c88
-rw-r--r--fs/tracefs/Makefile1
-rw-r--r--fs/tracefs/event_inode.c914
-rw-r--r--fs/tracefs/inode.c576
-rw-r--r--fs/tracefs/internal.h79
-rw-r--r--fs/ubifs/Makefile2
-rw-r--r--fs/ubifs/auth.c24
-rw-r--r--fs/ubifs/budget.c16
-rw-r--r--fs/ubifs/commit.c13
-rw-r--r--fs/ubifs/compress.c246
-rw-r--r--fs/ubifs/crypto.c18
-rw-r--r--fs/ubifs/debug.c60
-rw-r--r--fs/ubifs/debug.h7
-rw-r--r--fs/ubifs/dir.c501
-rw-r--r--fs/ubifs/file.c689
-rw-r--r--fs/ubifs/find.c40
-rw-r--r--fs/ubifs/gc.c19
-rw-r--r--fs/ubifs/io.c77
-rw-r--r--fs/ubifs/ioctl.c16
-rw-r--r--fs/ubifs/journal.c312
-rw-r--r--fs/ubifs/lprops.c8
-rw-r--r--fs/ubifs/lpt.c13
-rw-r--r--fs/ubifs/lpt_commit.c20
-rw-r--r--fs/ubifs/master.c5
-rw-r--r--fs/ubifs/orphan.c157
-rw-r--r--fs/ubifs/recovery.c4
-rw-r--r--fs/ubifs/replay.c6
-rw-r--r--fs/ubifs/super.c504
-rw-r--r--fs/ubifs/sysfs.c156
-rw-r--r--fs/ubifs/tnc.c164
-rw-r--r--fs/ubifs/tnc_commit.c4
-rw-r--r--fs/ubifs/tnc_misc.c31
-rw-r--r--fs/ubifs/ubifs.h104
-rw-r--r--fs/ubifs/xattr.c61
-rw-r--r--fs/udf/Kconfig2
-rw-r--r--fs/udf/balloc.c149
-rw-r--r--fs/udf/dir.c198
-rw-r--r--fs/udf/directory.c594
-rw-r--r--fs/udf/ecma_167.h2
-rw-r--r--fs/udf/file.c192
-rw-r--r--fs/udf/ialloc.c45
-rw-r--r--fs/udf/inode.c964
-rw-r--r--fs/udf/lowlevel.c13
-rw-r--r--fs/udf/misc.c24
-rw-r--r--fs/udf/namei.c1183
-rw-r--r--fs/udf/partition.c21
-rw-r--r--fs/udf/super.c686
-rw-r--r--fs/udf/symlink.c65
-rw-r--r--fs/udf/truncate.c97
-rw-r--r--fs/udf/udf_i.h3
-rw-r--r--fs/udf/udf_sb.h12
-rw-r--r--fs/udf/udfdecl.h72
-rw-r--r--fs/udf/udftime.c29
-rw-r--r--fs/udf/unicode.c8
-rw-r--r--fs/ufs/Kconfig1
-rw-r--r--fs/ufs/balloc.c137
-rw-r--r--fs/ufs/cylinder.c31
-rw-r--r--fs/ufs/dir.c298
-rw-r--r--fs/ufs/file.c5
-rw-r--r--fs/ufs/ialloc.c4
-rw-r--r--fs/ufs/inode.c312
-rw-r--r--fs/ufs/namei.c90
-rw-r--r--fs/ufs/super.c363
-rw-r--r--fs/ufs/ufs.h39
-rw-r--r--fs/ufs/ufs_fs.h4
-rw-r--r--fs/ufs/util.c91
-rw-r--r--fs/ufs/util.h83
-rw-r--r--fs/unicode/.gitignore2
-rw-r--r--fs/unicode/Kconfig12
-rw-r--r--fs/unicode/Makefile35
-rw-r--r--fs/unicode/README.utf8data8
-rw-r--r--fs/unicode/mkutf8data.c27
-rw-r--r--fs/unicode/tests/.kunitconfig3
-rw-r--r--fs/unicode/tests/utf8_kunit.c (renamed from fs/unicode/utf8-selftest.c)187
-rw-r--r--fs/unicode/utf8-core.c136
-rw-r--r--fs/unicode/utf8-norm.c262
-rw-r--r--fs/unicode/utf8data.c_shipped (renamed from fs/unicode/utf8data.h_shipped)23
-rw-r--r--fs/unicode/utf8n.h81
-rw-r--r--fs/userfaultfd.c1048
-rw-r--r--fs/utimes.c19
-rw-r--r--fs/vboxsf/Kconfig2
-rw-r--r--fs/vboxsf/dir.c45
-rw-r--r--fs/vboxsf/file.c110
-rw-r--r--fs/vboxsf/shfl_hostintf.h6
-rw-r--r--fs/vboxsf/super.c36
-rw-r--r--fs/vboxsf/utils.c29
-rw-r--r--fs/vboxsf/vboxsf_wrappers.c2
-rw-r--r--fs/vboxsf/vfsmod.h4
-rw-r--r--fs/verity/Kconfig47
-rw-r--r--fs/verity/enable.c324
-rw-r--r--fs/verity/fsverity_private.h109
-rw-r--r--fs/verity/hash_algs.c306
-rw-r--r--fs/verity/init.c52
-rw-r--r--fs/verity/measure.c129
-rw-r--r--fs/verity/open.c260
-rw-r--r--fs/verity/read_metadata.c16
-rw-r--r--fs/verity/signature.c108
-rw-r--r--fs/verity/verify.c518
-rw-r--r--fs/xattr.c1112
-rw-r--r--fs/xfs/Kconfig99
-rw-r--r--fs/xfs/Makefile104
-rw-r--r--fs/xfs/kmem.c31
-rw-r--r--fs/xfs/kmem.h83
-rw-r--r--fs/xfs/libxfs/xfs_ag.c502
-rw-r--r--fs/xfs/libxfs/xfs_ag.h330
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c56
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.h21
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c2017
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h183
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c357
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.h13
-rw-r--r--fs/xfs/libxfs/xfs_attr.c1852
-rw-r--r--fs/xfs/libxfs/xfs_attr.h229
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c547
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h17
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c188
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.h14
-rw-r--r--fs/xfs/libxfs/xfs_attr_sf.h25
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c2511
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h127
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c434
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.h216
-rw-r--r--fs/xfs/libxfs/xfs_btree.c1931
-rw-r--r--fs/xfs/libxfs/xfs_btree.h505
-rw-r--r--fs/xfs/libxfs/xfs_btree_mem.c346
-rw-r--r--fs/xfs/libxfs/xfs_btree_mem.h75
-rw-r--r--fs/xfs/libxfs/xfs_btree_staging.c228
-rw-r--r--fs/xfs/libxfs/xfs_btree_staging.h43
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c350
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h50
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h163
-rw-r--r--fs/xfs/libxfs/xfs_defer.c576
-rw-r--r--fs/xfs/libxfs/xfs_defer.h73
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c1045
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h125
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c62
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c52
-rw-r--r--fs/xfs/libxfs/xfs_dir2_leaf.c112
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c51
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h30
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c117
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c190
-rw-r--r--fs/xfs/libxfs/xfs_errortag.h130
-rw-r--r--fs/xfs/libxfs/xfs_exchmaps.c1237
-rw-r--r--fs/xfs/libxfs/xfs_exchmaps.h124
-rw-r--r--fs/xfs/libxfs/xfs_format.h562
-rw-r--r--fs/xfs/libxfs/xfs_fs.h348
-rw-r--r--fs/xfs/libxfs/xfs_group.c230
-rw-r--r--fs/xfs/libxfs/xfs_group.h192
-rw-r--r--fs/xfs/libxfs/xfs_health.h182
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c971
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h45
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c273
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h22
-rw-r--r--fs/xfs/libxfs/xfs_iext_tree.c79
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c372
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.h3
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c447
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h131
-rw-r--r--fs/xfs/libxfs/xfs_inode_util.c741
-rw-r--r--fs/xfs/libxfs/xfs_inode_util.h62
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h412
-rw-r--r--fs/xfs/libxfs/xfs_log_recover.h62
-rw-r--r--fs/xfs/libxfs/xfs_log_rlimit.c125
-rw-r--r--fs/xfs/libxfs/xfs_metadir.c485
-rw-r--r--fs/xfs/libxfs/xfs_metadir.h47
-rw-r--r--fs/xfs/libxfs/xfs_metafile.c322
-rw-r--r--fs/xfs/libxfs/xfs_metafile.h44
-rw-r--r--fs/xfs/libxfs/xfs_ondisk.h306
-rw-r--r--fs/xfs/libxfs/xfs_parent.c379
-rw-r--r--fs/xfs/libxfs/xfs_parent.h110
-rw-r--r--fs/xfs/libxfs/xfs_quota_defs.h97
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c1087
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h103
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c180
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.h5
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c1198
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h105
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c405
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h11
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c1293
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.h453
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.c750
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.h376
-rw-r--r--fs/xfs/libxfs/xfs_rtrefcount_btree.c757
-rw-r--r--fs/xfs/libxfs/xfs_rtrefcount_btree.h189
-rw-r--r--fs/xfs/libxfs/xfs_rtrmap_btree.c1033
-rw-r--r--fs/xfs/libxfs/xfs_rtrmap_btree.h212
-rw-r--r--fs/xfs/libxfs/xfs_sb.c647
-rw-r--r--fs/xfs/libxfs/xfs_sb.h16
-rw-r--r--fs/xfs/libxfs/xfs_shared.h130
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c223
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.h28
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c129
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c878
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.h41
-rw-r--r--fs/xfs/libxfs/xfs_trans_space.c121
-rw-r--r--fs/xfs/libxfs/xfs_trans_space.h42
-rw-r--r--fs/xfs/libxfs/xfs_types.c119
-rw-r--r--fs/xfs/libxfs/xfs_types.h155
-rw-r--r--fs/xfs/libxfs/xfs_zones.c187
-rw-r--r--fs/xfs/libxfs/xfs_zones.h42
-rw-r--r--fs/xfs/mrlock.h78
-rw-r--r--fs/xfs/scrub/agb_bitmap.c103
-rw-r--r--fs/xfs/scrub/agb_bitmap.h73
-rw-r--r--fs/xfs/scrub/agheader.c307
-rw-r--r--fs/xfs/scrub/agheader_repair.c1224
-rw-r--r--fs/xfs/scrub/agino_bitmap.h49
-rw-r--r--fs/xfs/scrub/alloc.c120
-rw-r--r--fs/xfs/scrub/alloc_repair.c934
-rw-r--r--fs/xfs/scrub/attr.c456
-rw-r--r--fs/xfs/scrub/attr.h67
-rw-r--r--fs/xfs/scrub/attr_repair.c1663
-rw-r--r--fs/xfs/scrub/attr_repair.h15
-rw-r--r--fs/xfs/scrub/bitmap.c720
-rw-r--r--fs/xfs/scrub/bitmap.h76
-rw-r--r--fs/xfs/scrub/bmap.c756
-rw-r--r--fs/xfs/scrub/bmap_repair.c996
-rw-r--r--fs/xfs/scrub/btree.c176
-rw-r--r--fs/xfs/scrub/btree.h18
-rw-r--r--fs/xfs/scrub/common.c1163
-rw-r--r--fs/xfs/scrub/common.h223
-rw-r--r--fs/xfs/scrub/cow_repair.c757
-rw-r--r--fs/xfs/scrub/dab_bitmap.h37
-rw-r--r--fs/xfs/scrub/dabtree.c37
-rw-r--r--fs/xfs/scrub/dabtree.h9
-rw-r--r--fs/xfs/scrub/dir.c658
-rw-r--r--fs/xfs/scrub/dir_repair.c1964
-rw-r--r--fs/xfs/scrub/dirtree.c1009
-rw-r--r--fs/xfs/scrub/dirtree.h168
-rw-r--r--fs/xfs/scrub/dirtree_repair.c821
-rw-r--r--fs/xfs/scrub/dqiterate.c211
-rw-r--r--fs/xfs/scrub/findparent.c470
-rw-r--r--fs/xfs/scrub/findparent.h56
-rw-r--r--fs/xfs/scrub/fsb_bitmap.h37
-rw-r--r--fs/xfs/scrub/fscounters.c343
-rw-r--r--fs/xfs/scrub/fscounters.h21
-rw-r--r--fs/xfs/scrub/fscounters_repair.c85
-rw-r--r--fs/xfs/scrub/health.c226
-rw-r--r--fs/xfs/scrub/health.h13
-rw-r--r--fs/xfs/scrub/ialloc.c375
-rw-r--r--fs/xfs/scrub/ialloc_repair.c877
-rw-r--r--fs/xfs/scrub/ino_bitmap.h37
-rw-r--r--fs/xfs/scrub/inode.c339
-rw-r--r--fs/xfs/scrub/inode_repair.c2090
-rw-r--r--fs/xfs/scrub/iscan.c826
-rw-r--r--fs/xfs/scrub/iscan.h100
-rw-r--r--fs/xfs/scrub/listxattr.c320
-rw-r--r--fs/xfs/scrub/listxattr.h19
-rw-r--r--fs/xfs/scrub/metapath.c677
-rw-r--r--fs/xfs/scrub/newbt.c613
-rw-r--r--fs/xfs/scrub/newbt.h76
-rw-r--r--fs/xfs/scrub/nlinks.c1073
-rw-r--r--fs/xfs/scrub/nlinks.h109
-rw-r--r--fs/xfs/scrub/nlinks_repair.c349
-rw-r--r--fs/xfs/scrub/off_bitmap.h37
-rw-r--r--fs/xfs/scrub/orphanage.c626
-rw-r--r--fs/xfs/scrub/orphanage.h86
-rw-r--r--fs/xfs/scrub/parent.c998
-rw-r--r--fs/xfs/scrub/parent_repair.c1633
-rw-r--r--fs/xfs/scrub/quota.c149
-rw-r--r--fs/xfs/scrub/quota.h36
-rw-r--r--fs/xfs/scrub/quota_repair.c566
-rw-r--r--fs/xfs/scrub/quotacheck.c867
-rw-r--r--fs/xfs/scrub/quotacheck.h76
-rw-r--r--fs/xfs/scrub/quotacheck_repair.c248
-rw-r--r--fs/xfs/scrub/rcbag.c307
-rw-r--r--fs/xfs/scrub/rcbag.h28
-rw-r--r--fs/xfs/scrub/rcbag_btree.c352
-rw-r--r--fs/xfs/scrub/rcbag_btree.h81
-rw-r--r--fs/xfs/scrub/readdir.c421
-rw-r--r--fs/xfs/scrub/readdir.h22
-rw-r--r--fs/xfs/scrub/reap.c1695
-rw-r--r--fs/xfs/scrub/reap.h47
-rw-r--r--fs/xfs/scrub/refcount.c279
-rw-r--r--fs/xfs/scrub/refcount_repair.c750
-rw-r--r--fs/xfs/scrub/repair.c1130
-rw-r--r--fs/xfs/scrub/repair.h219
-rw-r--r--fs/xfs/scrub/rgb_bitmap.h37
-rw-r--r--fs/xfs/scrub/rgsuper.c88
-rw-r--r--fs/xfs/scrub/rmap.c579
-rw-r--r--fs/xfs/scrub/rmap_repair.c1737
-rw-r--r--fs/xfs/scrub/rtb_bitmap.h37
-rw-r--r--fs/xfs/scrub/rtbitmap.c271
-rw-r--r--fs/xfs/scrub/rtbitmap.h77
-rw-r--r--fs/xfs/scrub/rtbitmap_repair.c635
-rw-r--r--fs/xfs/scrub/rtrefcount.c661
-rw-r--r--fs/xfs/scrub/rtrefcount_repair.c761
-rw-r--r--fs/xfs/scrub/rtrmap.c323
-rw-r--r--fs/xfs/scrub/rtrmap_repair.c981
-rw-r--r--fs/xfs/scrub/rtsummary.c373
-rw-r--r--fs/xfs/scrub/rtsummary.h37
-rw-r--r--fs/xfs/scrub/rtsummary_repair.c186
-rw-r--r--fs/xfs/scrub/scrub.c621
-rw-r--r--fs/xfs/scrub/scrub.h251
-rw-r--r--fs/xfs/scrub/stats.c415
-rw-r--r--fs/xfs/scrub/stats.h59
-rw-r--r--fs/xfs/scrub/symlink.c50
-rw-r--r--fs/xfs/scrub/symlink_repair.c510
-rw-r--r--fs/xfs/scrub/tempexch.h22
-rw-r--r--fs/xfs/scrub/tempfile.c980
-rw-r--r--fs/xfs/scrub/tempfile.h51
-rw-r--r--fs/xfs/scrub/trace.c31
-rw-r--r--fs/xfs/scrub/trace.h3242
-rw-r--r--fs/xfs/scrub/xfarray.c1073
-rw-r--r--fs/xfs/scrub/xfarray.h193
-rw-r--r--fs/xfs/scrub/xfblob.c168
-rw-r--r--fs/xfs/scrub/xfblob.h50
-rw-r--r--fs/xfs/scrub/xfile.c324
-rw-r--r--fs/xfs/scrub/xfile.h35
-rw-r--r--fs/xfs/scrub/xfs_scrub.h12
-rw-r--r--fs/xfs/xfs.h4
-rw-r--r--fs/xfs/xfs_acl.c31
-rw-r--r--fs/xfs/xfs_acl.h10
-rw-r--r--fs/xfs/xfs_aops.c529
-rw-r--r--fs/xfs/xfs_aops.h3
-rw-r--r--fs/xfs/xfs_attr_inactive.c33
-rw-r--r--fs/xfs/xfs_attr_item.c1257
-rw-r--r--fs/xfs/xfs_attr_item.h64
-rw-r--r--fs/xfs/xfs_attr_list.c162
-rw-r--r--fs/xfs/xfs_bio_io.c71
-rw-r--r--fs/xfs/xfs_bmap_item.c499
-rw-r--r--fs/xfs/xfs_bmap_item.h7
-rw-r--r--fs/xfs/xfs_bmap_util.c497
-rw-r--r--fs/xfs/xfs_bmap_util.h18
-rw-r--r--fs/xfs/xfs_buf.c1899
-rw-r--r--fs/xfs/xfs_buf.h152
-rw-r--r--fs/xfs/xfs_buf_item.c549
-rw-r--r--fs/xfs/xfs_buf_item.h37
-rw-r--r--fs/xfs/xfs_buf_item_recover.c286
-rw-r--r--fs/xfs/xfs_buf_mem.c247
-rw-r--r--fs/xfs/xfs_buf_mem.h32
-rw-r--r--fs/xfs/xfs_dahash_test.c673
-rw-r--r--fs/xfs/xfs_dahash_test.h12
-rw-r--r--fs/xfs/xfs_dir2_readdir.c75
-rw-r--r--fs/xfs/xfs_discard.c849
-rw-r--r--fs/xfs/xfs_discard.h6
-rw-r--r--fs/xfs/xfs_dquot.c635
-rw-r--r--fs/xfs/xfs_dquot.h66
-rw-r--r--fs/xfs/xfs_dquot_item.c72
-rw-r--r--fs/xfs/xfs_dquot_item.h7
-rw-r--r--fs/xfs/xfs_dquot_item_recover.c41
-rw-r--r--fs/xfs/xfs_drain.c152
-rw-r--r--fs/xfs/xfs_drain.h87
-rw-r--r--fs/xfs/xfs_error.c251
-rw-r--r--fs/xfs/xfs_error.h92
-rw-r--r--fs/xfs/xfs_exchmaps_item.c614
-rw-r--r--fs/xfs/xfs_exchmaps_item.h64
-rw-r--r--fs/xfs/xfs_exchrange.c923
-rw-r--r--fs/xfs/xfs_exchrange.h52
-rw-r--r--fs/xfs/xfs_export.c18
-rw-r--r--fs/xfs/xfs_export.h2
-rw-r--r--fs/xfs/xfs_extent_busy.c340
-rw-r--r--fs/xfs/xfs_extent_busy.h83
-rw-r--r--fs/xfs/xfs_extfree_item.c820
-rw-r--r--fs/xfs/xfs_extfree_item.h29
-rw-r--r--fs/xfs/xfs_file.c1479
-rw-r--r--fs/xfs/xfs_file.h15
-rw-r--r--fs/xfs/xfs_filestream.c448
-rw-r--r--fs/xfs/xfs_filestream.h6
-rw-r--r--fs/xfs/xfs_fsmap.c1089
-rw-r--r--fs/xfs/xfs_fsmap.h19
-rw-r--r--fs/xfs/xfs_fsops.c285
-rw-r--r--fs/xfs/xfs_fsops.h15
-rw-r--r--fs/xfs/xfs_globals.c20
-rw-r--r--fs/xfs/xfs_handle.c922
-rw-r--r--fs/xfs/xfs_handle.h33
-rw-r--r--fs/xfs/xfs_health.c367
-rw-r--r--fs/xfs/xfs_hooks.c52
-rw-r--r--fs/xfs/xfs_hooks.h65
-rw-r--r--fs/xfs/xfs_icache.c621
-rw-r--r--fs/xfs/xfs_icache.h21
-rw-r--r--fs/xfs/xfs_icreate_item.c3
-rw-r--r--fs/xfs/xfs_inode.c2890
-rw-r--r--fs/xfs/xfs_inode.h323
-rw-r--r--fs/xfs/xfs_inode_item.c537
-rw-r--r--fs/xfs/xfs_inode_item.h16
-rw-r--r--fs/xfs/xfs_inode_item_recover.c273
-rw-r--r--fs/xfs/xfs_ioctl.c1316
-rw-r--r--fs/xfs/xfs_ioctl.h39
-rw-r--r--fs/xfs/xfs_ioctl32.c34
-rw-r--r--fs/xfs/xfs_ioctl32.h24
-rw-r--r--fs/xfs/xfs_iomap.c1410
-rw-r--r--fs/xfs/xfs_iomap.h19
-rw-r--r--fs/xfs/xfs_iops.c574
-rw-r--r--fs/xfs/xfs_iops.h16
-rw-r--r--fs/xfs/xfs_itable.c127
-rw-r--r--fs/xfs/xfs_itable.h12
-rw-r--r--fs/xfs/xfs_iunlink_item.c177
-rw-r--r--fs/xfs/xfs_iunlink_item.h27
-rw-r--r--fs/xfs/xfs_iwalk.c176
-rw-r--r--fs/xfs/xfs_iwalk.h9
-rw-r--r--fs/xfs/xfs_linux.h48
-rw-r--r--fs/xfs/xfs_log.c1916
-rw-r--r--fs/xfs/xfs_log.h138
-rw-r--r--fs/xfs/xfs_log_cil.c1160
-rw-r--r--fs/xfs/xfs_log_priv.h284
-rw-r--r--fs/xfs/xfs_log_recover.c820
-rw-r--r--fs/xfs/xfs_message.c97
-rw-r--r--fs/xfs/xfs_message.h66
-rw-r--r--fs/xfs/xfs_mount.c699
-rw-r--r--fs/xfs/xfs_mount.h432
-rw-r--r--fs/xfs/xfs_mru_cache.c51
-rw-r--r--fs/xfs/xfs_notify_failure.c386
-rw-r--r--fs/xfs/xfs_notify_failure.h11
-rw-r--r--fs/xfs/xfs_ondisk.h177
-rw-r--r--fs/xfs/xfs_pnfs.c54
-rw-r--r--fs/xfs/xfs_qm.c810
-rw-r--r--fs/xfs/xfs_qm.h31
-rw-r--r--fs/xfs/xfs_qm_bhv.c119
-rw-r--r--fs/xfs/xfs_qm_syscalls.c82
-rw-r--r--fs/xfs/xfs_quota.h83
-rw-r--r--fs/xfs/xfs_quotaops.c65
-rw-r--r--fs/xfs/xfs_refcount_item.c695
-rw-r--r--fs/xfs/xfs_refcount_item.h8
-rw-r--r--fs/xfs/xfs_reflink.c828
-rw-r--r--fs/xfs/xfs_reflink.h34
-rw-r--r--fs/xfs/xfs_rmap_item.c753
-rw-r--r--fs/xfs/xfs_rmap_item.h7
-rw-r--r--fs/xfs/xfs_rtalloc.c2677
-rw-r--r--fs/xfs/xfs_rtalloc.h129
-rw-r--r--fs/xfs/xfs_stats.c18
-rw-r--r--fs/xfs/xfs_stats.h5
-rw-r--r--fs/xfs/xfs_super.c1262
-rw-r--r--fs/xfs/xfs_super.h3
-rw-r--r--fs/xfs/xfs_symlink.c312
-rw-r--r--fs/xfs/xfs_symlink.h3
-rw-r--r--fs/xfs/xfs_sysctl.c59
-rw-r--r--fs/xfs/xfs_sysctl.h8
-rw-r--r--fs/xfs/xfs_sysfs.c261
-rw-r--r--fs/xfs/xfs_sysfs.h20
-rw-r--r--fs/xfs/xfs_trace.c17
-rw-r--r--fs/xfs/xfs_trace.h3086
-rw-r--r--fs/xfs/xfs_trans.c840
-rw-r--r--fs/xfs/xfs_trans.h91
-rw-r--r--fs/xfs/xfs_trans_ail.c328
-rw-r--r--fs/xfs/xfs_trans_buf.c75
-rw-r--r--fs/xfs/xfs_trans_dquot.c256
-rw-r--r--fs/xfs/xfs_trans_priv.h74
-rw-r--r--fs/xfs/xfs_xattr.c153
-rw-r--r--fs/xfs/xfs_xattr.h14
-rw-r--r--fs/xfs/xfs_zone_alloc.c1328
-rw-r--r--fs/xfs/xfs_zone_alloc.h70
-rw-r--r--fs/xfs/xfs_zone_gc.c1214
-rw-r--r--fs/xfs/xfs_zone_info.c105
-rw-r--r--fs/xfs/xfs_zone_priv.h122
-rw-r--r--fs/xfs/xfs_zone_space_resv.c262
-rw-r--r--fs/zonefs/Makefile2
-rw-r--r--fs/zonefs/file.c873
-rw-r--r--fs/zonefs/super.c2005
-rw-r--r--fs/zonefs/sysfs.c135
-rw-r--r--fs/zonefs/trace.h22
-rw-r--r--fs/zonefs/zonefs.h124
2163 files changed, 428567 insertions, 321798 deletions
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
index d7bc93447c85..0c63df574ee7 100644
--- a/fs/9p/Kconfig
+++ b/fs/9p/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config 9P_FS
tristate "Plan 9 Resource Sharing Support (9P2000)"
- depends on INET && NET_9P
+ depends on NET_9P
select NETFS_SUPPORT
help
If you say Y here, you will get experimental support for
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 4dac4a0dc5f4..633da5e37299 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/fs.h>
+#include <linux/fs_struct.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <linux/slab.h>
@@ -17,34 +18,64 @@
#include "v9fs_vfs.h"
#include "fid.h"
-static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name)
+static struct posix_acl *v9fs_fid_get_acl(struct p9_fid *fid, const char *name)
{
ssize_t size;
void *value = NULL;
struct posix_acl *acl = NULL;
size = v9fs_fid_xattr_get(fid, name, NULL, 0);
- if (size > 0) {
- value = kzalloc(size, GFP_NOFS);
- if (!value)
- return ERR_PTR(-ENOMEM);
- size = v9fs_fid_xattr_get(fid, name, value, size);
- if (size > 0) {
- acl = posix_acl_from_xattr(&init_user_ns, value, size);
- if (IS_ERR(acl))
- goto err_out;
- }
- } else if (size == -ENODATA || size == 0 ||
- size == -ENOSYS || size == -EOPNOTSUPP) {
- acl = NULL;
- } else
- acl = ERR_PTR(-EIO);
-
-err_out:
+ if (size < 0)
+ return ERR_PTR(size);
+ if (size == 0)
+ return ERR_PTR(-ENODATA);
+
+ value = kzalloc(size, GFP_NOFS);
+ if (!value)
+ return ERR_PTR(-ENOMEM);
+
+ size = v9fs_fid_xattr_get(fid, name, value, size);
+ if (size < 0)
+ acl = ERR_PTR(size);
+ else if (size == 0)
+ acl = ERR_PTR(-ENODATA);
+ else
+ acl = posix_acl_from_xattr(&init_user_ns, value, size);
kfree(value);
return acl;
}
+static struct posix_acl *v9fs_acl_get(struct dentry *dentry, const char *name)
+{
+ struct p9_fid *fid;
+ struct posix_acl *acl = NULL;
+
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+ return ERR_CAST(fid);
+
+ acl = v9fs_fid_get_acl(fid, name);
+ p9_fid_put(fid);
+ return acl;
+}
+
+static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, const char *name)
+{
+ int retval;
+ struct posix_acl *acl = NULL;
+
+ acl = v9fs_fid_get_acl(fid, name);
+ if (!IS_ERR(acl))
+ return acl;
+
+ retval = PTR_ERR(acl);
+ if (retval == -ENODATA || retval == -ENOSYS || retval == -EOPNOTSUPP)
+ return NULL;
+
+ /* map everything else to -EIO */
+ return ERR_PTR(-EIO);
+}
+
int v9fs_get_acl(struct inode *inode, struct p9_fid *fid)
{
int retval = 0;
@@ -89,7 +120,7 @@ static struct posix_acl *v9fs_get_cached_acl(struct inode *inode, int type)
return acl;
}
-struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type, bool rcu)
+struct posix_acl *v9fs_iop_get_inode_acl(struct inode *inode, int type, bool rcu)
{
struct v9fs_session_info *v9ses;
@@ -109,6 +140,112 @@ struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type, bool rcu)
}
+struct posix_acl *v9fs_iop_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, int type)
+{
+ struct v9fs_session_info *v9ses;
+
+ v9ses = v9fs_dentry2v9ses(dentry);
+ /* We allow set/get/list of acl when access=client is not specified. */
+ if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT)
+ return v9fs_acl_get(dentry, posix_acl_xattr_name(type));
+ return v9fs_get_cached_acl(d_inode(dentry), type);
+}
+
+int v9fs_iop_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct posix_acl *acl, int type)
+{
+ int retval;
+ size_t size = 0;
+ void *value = NULL;
+ const char *acl_name;
+ struct v9fs_session_info *v9ses;
+ struct inode *inode = d_inode(dentry);
+
+ if (acl) {
+ retval = posix_acl_valid(inode->i_sb->s_user_ns, acl);
+ if (retval)
+ goto err_out;
+
+ size = posix_acl_xattr_size(acl->a_count);
+
+ value = kzalloc(size, GFP_NOFS);
+ if (!value) {
+ retval = -ENOMEM;
+ goto err_out;
+ }
+
+ retval = posix_acl_to_xattr(&init_user_ns, acl, value, size);
+ if (retval < 0)
+ goto err_out;
+ }
+
+ /*
+ * set the attribute on the remote. Without even looking at the
+ * xattr value. We leave it to the server to validate
+ */
+ acl_name = posix_acl_xattr_name(type);
+ v9ses = v9fs_dentry2v9ses(dentry);
+ if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) {
+ retval = v9fs_xattr_set(dentry, acl_name, value, size, 0);
+ goto err_out;
+ }
+
+ if (S_ISLNK(inode->i_mode)) {
+ retval = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ if (!inode_owner_or_capable(&nop_mnt_idmap, inode)) {
+ retval = -EPERM;
+ goto err_out;
+ }
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ if (acl) {
+ struct iattr iattr = {};
+ struct posix_acl *acl_mode = acl;
+
+ retval = posix_acl_update_mode(&nop_mnt_idmap, inode,
+ &iattr.ia_mode,
+ &acl_mode);
+ if (retval)
+ goto err_out;
+ if (!acl_mode) {
+ /*
+ * ACL can be represented by the mode bits.
+ * So don't update ACL below.
+ */
+ kfree(value);
+ value = NULL;
+ size = 0;
+ }
+ iattr.ia_valid = ATTR_MODE;
+ /*
+ * FIXME should we update ctime ?
+ * What is the following setxattr update the mode ?
+ */
+ v9fs_vfs_setattr_dotl(&nop_mnt_idmap, dentry, &iattr);
+ }
+ break;
+ case ACL_TYPE_DEFAULT:
+ if (!S_ISDIR(inode->i_mode)) {
+ retval = acl ? -EINVAL : 0;
+ goto err_out;
+ }
+ break;
+ }
+
+ retval = v9fs_xattr_set(dentry, acl_name, value, size, 0);
+ if (!retval)
+ set_cached_acl(inode, type, acl);
+
+err_out:
+ kfree(value);
+ return retval;
+}
+
static int v9fs_set_acl(struct p9_fid *fid, int type, struct posix_acl *acl)
{
int retval;
@@ -207,124 +344,3 @@ int v9fs_acl_mode(struct inode *dir, umode_t *modep,
*modep = mode;
return 0;
}
-
-static int v9fs_xattr_get_acl(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, void *buffer, size_t size)
-{
- struct v9fs_session_info *v9ses;
- struct posix_acl *acl;
- int error;
-
- v9ses = v9fs_dentry2v9ses(dentry);
- /*
- * We allow set/get/list of acl when access=client is not specified
- */
- if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT)
- return v9fs_xattr_get(dentry, handler->name, buffer, size);
-
- acl = v9fs_get_cached_acl(inode, handler->flags);
- if (IS_ERR(acl))
- return PTR_ERR(acl);
- if (acl == NULL)
- return -ENODATA;
- error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
- posix_acl_release(acl);
-
- return error;
-}
-
-static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
- struct dentry *dentry, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags)
-{
- int retval;
- struct posix_acl *acl;
- struct v9fs_session_info *v9ses;
-
- v9ses = v9fs_dentry2v9ses(dentry);
- /*
- * set the attribute on the remote. Without even looking at the
- * xattr value. We leave it to the server to validate
- */
- if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT)
- return v9fs_xattr_set(dentry, handler->name, value, size,
- flags);
-
- if (S_ISLNK(inode->i_mode))
- return -EOPNOTSUPP;
- if (!inode_owner_or_capable(&init_user_ns, inode))
- return -EPERM;
- if (value) {
- /* update the cached acl value */
- acl = posix_acl_from_xattr(&init_user_ns, value, size);
- if (IS_ERR(acl))
- return PTR_ERR(acl);
- else if (acl) {
- retval = posix_acl_valid(inode->i_sb->s_user_ns, acl);
- if (retval)
- goto err_out;
- }
- } else
- acl = NULL;
-
- switch (handler->flags) {
- case ACL_TYPE_ACCESS:
- if (acl) {
- struct iattr iattr = { 0 };
- struct posix_acl *old_acl = acl;
-
- retval = posix_acl_update_mode(&init_user_ns, inode,
- &iattr.ia_mode, &acl);
- if (retval)
- goto err_out;
- if (!acl) {
- /*
- * ACL can be represented
- * by the mode bits. So don't
- * update ACL.
- */
- posix_acl_release(old_acl);
- value = NULL;
- size = 0;
- }
- iattr.ia_valid = ATTR_MODE;
- /* FIXME should we update ctime ?
- * What is the following setxattr update the
- * mode ?
- */
- v9fs_vfs_setattr_dotl(&init_user_ns, dentry, &iattr);
- }
- break;
- case ACL_TYPE_DEFAULT:
- if (!S_ISDIR(inode->i_mode)) {
- retval = acl ? -EINVAL : 0;
- goto err_out;
- }
- break;
- default:
- BUG();
- }
- retval = v9fs_xattr_set(dentry, handler->name, value, size, flags);
- if (!retval)
- set_cached_acl(inode, handler->flags, acl);
-err_out:
- posix_acl_release(acl);
- return retval;
-}
-
-const struct xattr_handler v9fs_xattr_acl_access_handler = {
- .name = XATTR_NAME_POSIX_ACL_ACCESS,
- .flags = ACL_TYPE_ACCESS,
- .get = v9fs_xattr_get_acl,
- .set = v9fs_xattr_set_acl,
-};
-
-const struct xattr_handler v9fs_xattr_acl_default_handler = {
- .name = XATTR_NAME_POSIX_ACL_DEFAULT,
- .flags = ACL_TYPE_DEFAULT,
- .get = v9fs_xattr_get_acl,
- .set = v9fs_xattr_set_acl,
-};
diff --git a/fs/9p/acl.h b/fs/9p/acl.h
index ce5175d463dd..333cfcc281da 100644
--- a/fs/9p/acl.h
+++ b/fs/9p/acl.h
@@ -8,8 +8,12 @@
#ifdef CONFIG_9P_FS_POSIX_ACL
int v9fs_get_acl(struct inode *inode, struct p9_fid *fid);
-struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type,
+struct posix_acl *v9fs_iop_get_inode_acl(struct inode *inode, int type,
bool rcu);
+struct posix_acl *v9fs_iop_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, int type);
+int v9fs_iop_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct posix_acl *acl, int type);
int v9fs_acl_chmod(struct inode *inode, struct p9_fid *fid);
int v9fs_set_create_acl(struct inode *inode, struct p9_fid *fid,
struct posix_acl *dacl, struct posix_acl *acl);
@@ -17,7 +21,9 @@ int v9fs_acl_mode(struct inode *dir, umode_t *modep,
struct posix_acl **dpacl, struct posix_acl **pacl);
void v9fs_put_acl(struct posix_acl *dacl, struct posix_acl *acl);
#else
+#define v9fs_iop_get_inode_acl NULL
#define v9fs_iop_get_acl NULL
+#define v9fs_iop_set_acl NULL
static inline int v9fs_get_acl(struct inode *inode, struct p9_fid *fid)
{
return 0;
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index f2ba131cede1..12c0ae29f185 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -16,186 +16,61 @@
#include "v9fs.h"
#include "cache.h"
-#define CACHETAG_LEN 11
-
-struct fscache_netfs v9fs_cache_netfs = {
- .name = "9p",
- .version = 0,
-};
-
-/*
- * v9fs_random_cachetag - Generate a random tag to be associated
- * with a new cache session.
- *
- * The value of jiffies is used for a fairly randomly cache tag.
- */
-
-static
-int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
+int v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses,
+ const char *dev_name)
{
- v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
- if (!v9ses->cachetag)
- return -ENOMEM;
+ struct fscache_volume *vcookie;
+ char *name, *p;
- return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
-}
-
-const struct fscache_cookie_def v9fs_cache_session_index_def = {
- .name = "9P.session",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
+ name = kasprintf(GFP_KERNEL, "9p,%s,%s",
+ dev_name, v9ses->cachetag ?: v9ses->aname);
+ if (!name)
+ return -ENOMEM;
-void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
-{
- /* If no cache session tag was specified, we generate a random one. */
- if (!v9ses->cachetag) {
- if (v9fs_random_cachetag(v9ses) < 0) {
- v9ses->fscache = NULL;
- kfree(v9ses->cachetag);
- v9ses->cachetag = NULL;
- return;
+ for (p = name; *p; p++)
+ if (*p == '/')
+ *p = ';';
+
+ vcookie = fscache_acquire_volume(name, NULL, NULL, 0);
+ p9_debug(P9_DEBUG_FSC, "session %p get volume %p (%s)\n",
+ v9ses, vcookie, name);
+ if (IS_ERR(vcookie)) {
+ if (vcookie != ERR_PTR(-EBUSY)) {
+ kfree(name);
+ return PTR_ERR(vcookie);
}
+ pr_err("Cache volume key already in use (%s)\n", name);
+ vcookie = NULL;
}
-
- v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
- &v9fs_cache_session_index_def,
- v9ses->cachetag,
- strlen(v9ses->cachetag),
- NULL, 0,
- v9ses, 0, true);
- p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
- v9ses, v9ses->fscache);
-}
-
-void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
-{
- p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
- v9ses, v9ses->fscache);
- fscache_relinquish_cookie(v9ses->fscache, NULL, false);
- v9ses->fscache = NULL;
+ v9ses->fscache = vcookie;
+ kfree(name);
+ return 0;
}
-static enum
-fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
- const void *buffer,
- uint16_t buflen,
- loff_t object_size)
-{
- const struct v9fs_inode *v9inode = cookie_netfs_data;
-
- if (buflen != sizeof(v9inode->qid.version))
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- if (memcmp(buffer, &v9inode->qid.version,
- sizeof(v9inode->qid.version)))
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- return FSCACHE_CHECKAUX_OKAY;
-}
-
-const struct fscache_cookie_def v9fs_cache_inode_index_def = {
- .name = "9p.inode",
- .type = FSCACHE_COOKIE_TYPE_DATAFILE,
- .check_aux = v9fs_cache_inode_check_aux,
-};
-
void v9fs_cache_inode_get_cookie(struct inode *inode)
{
- struct v9fs_inode *v9inode;
+ struct v9fs_inode *v9inode = V9FS_I(inode);
struct v9fs_session_info *v9ses;
+ __le32 version;
+ __le64 path;
if (!S_ISREG(inode->i_mode))
return;
-
- v9inode = V9FS_I(inode);
- if (v9inode->fscache)
+ if (WARN_ON(v9fs_inode_cookie(v9inode)))
return;
+ version = cpu_to_le32(v9inode->qid.version);
+ path = cpu_to_le64(v9inode->qid.path);
v9ses = v9fs_inode2v9ses(inode);
- v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
- &v9fs_cache_inode_index_def,
- &v9inode->qid.path,
- sizeof(v9inode->qid.path),
- &v9inode->qid.version,
- sizeof(v9inode->qid.version),
- v9inode,
- i_size_read(&v9inode->vfs_inode),
- true);
+ v9inode->netfs.cache =
+ fscache_acquire_cookie(v9fs_session_cache(v9ses),
+ 0,
+ &path, sizeof(path),
+ &version, sizeof(version),
+ i_size_read(&v9inode->netfs.inode));
+ if (v9inode->netfs.cache)
+ mapping_set_release_always(inode->i_mapping);
p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
- inode, v9inode->fscache);
-}
-
-void v9fs_cache_inode_put_cookie(struct inode *inode)
-{
- struct v9fs_inode *v9inode = V9FS_I(inode);
-
- if (!v9inode->fscache)
- return;
- p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
- inode, v9inode->fscache);
-
- fscache_relinquish_cookie(v9inode->fscache, &v9inode->qid.version,
- false);
- v9inode->fscache = NULL;
-}
-
-void v9fs_cache_inode_flush_cookie(struct inode *inode)
-{
- struct v9fs_inode *v9inode = V9FS_I(inode);
-
- if (!v9inode->fscache)
- return;
- p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
- inode, v9inode->fscache);
-
- fscache_relinquish_cookie(v9inode->fscache, NULL, true);
- v9inode->fscache = NULL;
-}
-
-void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
-{
- struct v9fs_inode *v9inode = V9FS_I(inode);
-
- if (!v9inode->fscache)
- return;
-
- mutex_lock(&v9inode->fscache_lock);
-
- if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
- v9fs_cache_inode_flush_cookie(inode);
- else
- v9fs_cache_inode_get_cookie(inode);
-
- mutex_unlock(&v9inode->fscache_lock);
-}
-
-void v9fs_cache_inode_reset_cookie(struct inode *inode)
-{
- struct v9fs_inode *v9inode = V9FS_I(inode);
- struct v9fs_session_info *v9ses;
- struct fscache_cookie *old;
-
- if (!v9inode->fscache)
- return;
-
- old = v9inode->fscache;
-
- mutex_lock(&v9inode->fscache_lock);
- fscache_relinquish_cookie(v9inode->fscache, NULL, true);
-
- v9ses = v9fs_inode2v9ses(inode);
- v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
- &v9fs_cache_inode_index_def,
- &v9inode->qid.path,
- sizeof(v9inode->qid.path),
- &v9inode->qid.version,
- sizeof(v9inode->qid.version),
- v9inode,
- i_size_read(&v9inode->vfs_inode),
- true);
- p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
- inode, old, v9inode->fscache);
-
- mutex_unlock(&v9inode->fscache_lock);
+ inode, v9fs_inode_cookie(v9inode));
}
diff --git a/fs/9p/cache.h b/fs/9p/cache.h
index 7480b4b49fea..ee1b6b06a2fd 100644
--- a/fs/9p/cache.h
+++ b/fs/9p/cache.h
@@ -7,26 +7,14 @@
#ifndef _9P_CACHE_H
#define _9P_CACHE_H
-#define FSCACHE_USE_NEW_IO_API
-#include <linux/fscache.h>
#ifdef CONFIG_9P_FSCACHE
+#include <linux/fscache.h>
-extern struct fscache_netfs v9fs_cache_netfs;
-extern const struct fscache_cookie_def v9fs_cache_session_index_def;
-extern const struct fscache_cookie_def v9fs_cache_inode_index_def;
-
-extern void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses);
-extern void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses);
+extern int v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses,
+ const char *dev_name);
extern void v9fs_cache_inode_get_cookie(struct inode *inode);
-extern void v9fs_cache_inode_put_cookie(struct inode *inode);
-extern void v9fs_cache_inode_flush_cookie(struct inode *inode);
-extern void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp);
-extern void v9fs_cache_inode_reset_cookie(struct inode *inode);
-
-extern int __v9fs_cache_register(void);
-extern void __v9fs_cache_unregister(void);
#else /* CONFIG_9P_FSCACHE */
@@ -34,13 +22,5 @@ static inline void v9fs_cache_inode_get_cookie(struct inode *inode)
{
}
-static inline void v9fs_cache_inode_put_cookie(struct inode *inode)
-{
-}
-
-static inline void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *file)
-{
-}
-
#endif /* CONFIG_9P_FSCACHE */
#endif /* _9P_CACHE_H */
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 6aab046c98e2..f84412290a30 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -11,7 +11,6 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/idr.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -28,24 +27,38 @@ static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid)
/**
* v9fs_fid_add - add a fid to a dentry
* @dentry: dentry that the fid is being added to
- * @fid: fid to add
+ * @pfid: fid to add, NULLed out
*
*/
-void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
+void v9fs_fid_add(struct dentry *dentry, struct p9_fid **pfid)
{
+ struct p9_fid *fid = *pfid;
+
spin_lock(&dentry->d_lock);
__add_fid(dentry, fid);
spin_unlock(&dentry->d_lock);
+
+ *pfid = NULL;
+}
+
+static bool v9fs_is_writeable(int mode)
+{
+ if (mode & (P9_OWRITE|P9_ORDWR))
+ return true;
+ else
+ return false;
}
/**
* v9fs_fid_find_inode - search for an open fid off of the inode list
* @inode: return a fid pointing to a specific inode
+ * @want_writeable: only consider fids which are writeable
* @uid: return a fid belonging to the specified user
+ * @any: ignore uid as a selection criteria
*
*/
-
-static struct p9_fid *v9fs_fid_find_inode(struct inode *inode, kuid_t uid)
+struct p9_fid *v9fs_fid_find_inode(struct inode *inode, bool want_writeable,
+ kuid_t uid, bool any)
{
struct hlist_head *h;
struct p9_fid *fid, *ret = NULL;
@@ -55,8 +68,13 @@ static struct p9_fid *v9fs_fid_find_inode(struct inode *inode, kuid_t uid)
spin_lock(&inode->i_lock);
h = (struct hlist_head *)&inode->i_private;
hlist_for_each_entry(fid, h, ilist) {
- if (uid_eq(fid->uid, uid)) {
- refcount_inc(&fid->count);
+ if (any || uid_eq(fid->uid, uid)) {
+ if (want_writeable && !v9fs_is_writeable(fid->mode)) {
+ p9_debug(P9_DEBUG_VFS, " mode: %x not writeable?\n",
+ fid->mode);
+ continue;
+ }
+ p9_fid_get(fid);
ret = fid;
break;
}
@@ -68,15 +86,19 @@ static struct p9_fid *v9fs_fid_find_inode(struct inode *inode, kuid_t uid)
/**
* v9fs_open_fid_add - add an open fid to an inode
* @inode: inode that the fid is being added to
- * @fid: fid to add
+ * @pfid: fid to add, NULLed out
*
*/
-void v9fs_open_fid_add(struct inode *inode, struct p9_fid *fid)
+void v9fs_open_fid_add(struct inode *inode, struct p9_fid **pfid)
{
+ struct p9_fid *fid = *pfid;
+
spin_lock(&inode->i_lock);
hlist_add_head(&fid->ilist, (struct hlist_head *)&inode->i_private);
spin_unlock(&inode->i_lock);
+
+ *pfid = NULL;
}
@@ -96,24 +118,22 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any)
dentry, dentry, from_kuid(&init_user_ns, uid),
any);
ret = NULL;
-
- if (d_inode(dentry))
- ret = v9fs_fid_find_inode(d_inode(dentry), uid);
-
/* we'll recheck under lock if there's anything to look in */
- if (!ret && dentry->d_fsdata) {
+ if (dentry->d_fsdata) {
struct hlist_head *h = (struct hlist_head *)&dentry->d_fsdata;
spin_lock(&dentry->d_lock);
hlist_for_each_entry(fid, h, dlist) {
if (any || uid_eq(fid->uid, uid)) {
ret = fid;
- refcount_inc(&ret->count);
+ p9_fid_get(ret);
break;
}
}
spin_unlock(&dentry->d_lock);
}
+ if (!ret && dentry->d_inode)
+ ret = v9fs_fid_find_inode(dentry->d_inode, false, uid, any);
return ret;
}
@@ -151,9 +171,9 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
{
struct dentry *ds;
const unsigned char **wnames, *uname;
- int i, n, l, clone, access;
+ int i, n, l, access;
struct v9fs_session_info *v9ses;
- struct p9_fid *fid, *old_fid = NULL;
+ struct p9_fid *fid, *root_fid, *old_fid;
v9ses = v9fs_dentry2v9ses(dentry);
access = v9ses->flags & V9FS_ACCESS_MASK;
@@ -170,17 +190,17 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
fid = v9fs_fid_find(ds, uid, any);
if (fid) {
/* Found the parent fid do a lookup with that */
- struct p9_fid *ofid = fid;
+ old_fid = fid;
- fid = p9_client_walk(ofid, 1, &dentry->d_name.name, 1);
- p9_client_clunk(ofid);
+ fid = p9_client_walk(old_fid, 1, &dentry->d_name.name, 1);
+ p9_fid_put(old_fid);
goto fid_out;
}
up_read(&v9ses->rename_sem);
/* start from the root and try to do a lookup */
- fid = v9fs_fid_find(dentry->d_sb->s_root, uid, any);
- if (!fid) {
+ root_fid = v9fs_fid_find(dentry->d_sb->s_root, uid, any);
+ if (!root_fid) {
/* the user is not attached to the fs yet */
if (access == V9FS_ACCESS_SINGLE)
return ERR_PTR(-EPERM);
@@ -195,13 +215,13 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
if (IS_ERR(fid))
return fid;
- v9fs_fid_add(dentry->d_sb->s_root, fid);
+ root_fid = p9_fid_get(fid);
+ v9fs_fid_add(dentry->d_sb->s_root, &fid);
}
/* If we are root ourself just return that */
- if (dentry->d_sb->s_root == dentry) {
- refcount_inc(&fid->count);
- return fid;
- }
+ if (dentry->d_sb->s_root == dentry)
+ return root_fid;
+
/*
* Do a multipath walk with attached root.
* When walking parent we need to make sure we
@@ -213,30 +233,27 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
fid = ERR_PTR(n);
goto err_out;
}
- clone = 1;
+ fid = root_fid;
+ old_fid = root_fid;
i = 0;
while (i < n) {
l = min(n - i, P9_MAXWELEM);
/*
* We need to hold rename lock when doing a multipath
- * walk to ensure none of the patch component change
+ * walk to ensure none of the path components change
*/
- fid = p9_client_walk(fid, l, &wnames[i], clone);
+ fid = p9_client_walk(old_fid, l, &wnames[i],
+ old_fid == root_fid /* clone */);
+ /* non-cloning walk will return the same fid */
+ if (fid != old_fid) {
+ p9_fid_put(old_fid);
+ old_fid = fid;
+ }
if (IS_ERR(fid)) {
- if (old_fid) {
- /*
- * If we fail, clunk fid which are mapping
- * to path component and not the last component
- * of the path.
- */
- p9_client_clunk(old_fid);
- }
kfree(wnames);
goto err_out;
}
- old_fid = fid;
i += l;
- clone = 0;
}
kfree(wnames);
fid_out:
@@ -244,11 +261,11 @@ fid_out:
spin_lock(&dentry->d_lock);
if (d_unhashed(dentry)) {
spin_unlock(&dentry->d_lock);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
fid = ERR_PTR(-ENOENT);
} else {
__add_fid(dentry, fid);
- refcount_inc(&fid->count);
+ p9_fid_get(fid);
spin_unlock(&dentry->d_lock);
}
}
@@ -296,28 +313,3 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
return v9fs_fid_lookup_with_uid(dentry, uid, any);
}
-struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
-{
- int err;
- struct p9_fid *fid, *ofid;
-
- ofid = v9fs_fid_lookup_with_uid(dentry, GLOBAL_ROOT_UID, 0);
- fid = clone_fid(ofid);
- if (IS_ERR(fid))
- goto error_out;
- p9_client_clunk(ofid);
- /*
- * writeback fid will only be used to write back the
- * dirty pages. We always request for the open fid in read-write
- * mode so that a partial page write which result in page
- * read can work.
- */
- err = p9_client_open(fid, O_RDWR);
- if (err < 0) {
- p9_client_clunk(fid);
- fid = ERR_PTR(err);
- goto error_out;
- }
-error_out:
- return fid;
-}
diff --git a/fs/9p/fid.h b/fs/9p/fid.h
index f7f33509e169..0d6138bee2a3 100644
--- a/fs/9p/fid.h
+++ b/fs/9p/fid.h
@@ -7,15 +7,17 @@
#ifndef FS_9P_FID_H
#define FS_9P_FID_H
#include <linux/list.h>
+#include "v9fs.h"
+struct p9_fid *v9fs_fid_find_inode(struct inode *inode, bool want_writeable,
+ kuid_t uid, bool any);
struct p9_fid *v9fs_fid_lookup(struct dentry *dentry);
static inline struct p9_fid *v9fs_parent_fid(struct dentry *dentry)
{
return v9fs_fid_lookup(dentry->d_parent);
}
-void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid);
-struct p9_fid *v9fs_writeback_fid(struct dentry *dentry);
-void v9fs_open_fid_add(struct inode *inode, struct p9_fid *fid);
+void v9fs_fid_add(struct dentry *dentry, struct p9_fid **fid);
+void v9fs_open_fid_add(struct inode *inode, struct p9_fid **fid);
static inline struct p9_fid *clone_fid(struct p9_fid *fid)
{
return IS_ERR(fid) ? fid : p9_client_walk(fid, 0, NULL, 1);
@@ -29,7 +31,31 @@ static inline struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
return fid;
nfid = clone_fid(fid);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return nfid;
}
+/**
+ * v9fs_fid_addmodes - add cache flags to fid mode (for client use only)
+ * @fid: fid to augment
+ * @s_flags: session info mount flags
+ * @s_cache: session info cache flags
+ * @f_flags: unix open flags
+ *
+ * make sure mode reflects flags of underlying mounts
+ * also qid.version == 0 reflects a synthetic or legacy file system
+ * NOTE: these are set after open so only reflect 9p client not
+ * underlying file system on server.
+ */
+static inline void v9fs_fid_add_modes(struct p9_fid *fid, unsigned int s_flags,
+ unsigned int s_cache, unsigned int f_flags)
+{
+ if ((!s_cache) ||
+ ((fid->qid.version == 0) && !(s_flags & V9FS_IGNORE_QV)) ||
+ (s_flags & V9FS_DIRECT_IO) || (f_flags & O_DIRECT)) {
+ fid->mode |= P9L_DIRECT; /* no read or write cache */
+ } else if ((!(s_cache & CACHE_WRITEBACK)) ||
+ (f_flags & O_DSYNC) || (s_flags & V9FS_SYNC)) {
+ fid->mode |= P9L_NOWRITECACHE;
+ }
+}
#endif
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index e32dd5f7721b..057487efaaeb 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -13,8 +13,8 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/cred.h>
-#include <linux/parser.h>
-#include <linux/idr.h>
+#include <linux/fs_parser.h>
+#include <linux/fs_context.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <net/9p/9p.h>
@@ -34,46 +34,85 @@ struct kmem_cache *v9fs_inode_cache;
*/
enum {
+ /* Mount-point source, we need to handle this explicitly because
+ * the code below accepts unknown args and the vfs layer only handles
+ * source if we rejected it as EINVAL */
+ Opt_source,
/* Options that take integer arguments */
Opt_debug, Opt_dfltuid, Opt_dfltgid, Opt_afid,
/* String options */
Opt_uname, Opt_remotename, Opt_cache, Opt_cachetag,
/* Options that take no arguments */
- Opt_nodevmap,
- /* Cache options */
- Opt_cache_loose, Opt_fscache, Opt_mmap,
+ Opt_nodevmap, Opt_noxattr, Opt_directio, Opt_ignoreqv,
/* Access options */
Opt_access, Opt_posixacl,
/* Lock timeout option */
Opt_locktimeout,
- /* Error token */
- Opt_err
+
+ /* Client options */
+ Opt_msize, Opt_trans, Opt_legacy, Opt_version,
+
+ /* fd transport options */
+ /* Options that take integer arguments */
+ Opt_rfdno, Opt_wfdno,
+ /* Options that take no arguments */
+
+ /* rdma transport options */
+ /* Options that take integer arguments */
+ Opt_rq_depth, Opt_sq_depth, Opt_timeout,
+
+ /* Options for both fd and rdma transports */
+ Opt_port, Opt_privport,
};
-static const match_table_t tokens = {
- {Opt_debug, "debug=%x"},
- {Opt_dfltuid, "dfltuid=%u"},
- {Opt_dfltgid, "dfltgid=%u"},
- {Opt_afid, "afid=%u"},
- {Opt_uname, "uname=%s"},
- {Opt_remotename, "aname=%s"},
- {Opt_nodevmap, "nodevmap"},
- {Opt_cache, "cache=%s"},
- {Opt_cache_loose, "loose"},
- {Opt_fscache, "fscache"},
- {Opt_mmap, "mmap"},
- {Opt_cachetag, "cachetag=%s"},
- {Opt_access, "access=%s"},
- {Opt_posixacl, "posixacl"},
- {Opt_locktimeout, "locktimeout=%u"},
- {Opt_err, NULL}
+static const struct constant_table p9_versions[] = {
+ { "9p2000", p9_proto_legacy },
+ { "9p2000.u", p9_proto_2000u },
+ { "9p2000.L", p9_proto_2000L },
+ {}
};
-static const char *const v9fs_cache_modes[nr__p9_cache_modes] = {
- [CACHE_NONE] = "none",
- [CACHE_MMAP] = "mmap",
- [CACHE_LOOSE] = "loose",
- [CACHE_FSCACHE] = "fscache",
+/*
+ * This structure contains all parameters used for the core code,
+ * the client, and all the transports.
+ */
+const struct fs_parameter_spec v9fs_param_spec[] = {
+ fsparam_string ("source", Opt_source),
+ fsparam_u32hex ("debug", Opt_debug),
+ fsparam_uid ("dfltuid", Opt_dfltuid),
+ fsparam_gid ("dfltgid", Opt_dfltgid),
+ fsparam_u32 ("afid", Opt_afid),
+ fsparam_string ("uname", Opt_uname),
+ fsparam_string ("aname", Opt_remotename),
+ fsparam_flag ("nodevmap", Opt_nodevmap),
+ fsparam_flag ("noxattr", Opt_noxattr),
+ fsparam_flag ("directio", Opt_directio),
+ fsparam_flag ("ignoreqv", Opt_ignoreqv),
+ fsparam_string ("cache", Opt_cache),
+ fsparam_string ("cachetag", Opt_cachetag),
+ fsparam_string ("access", Opt_access),
+ fsparam_flag ("posixacl", Opt_posixacl),
+ fsparam_u32 ("locktimeout", Opt_locktimeout),
+
+ /* client options */
+ fsparam_u32 ("msize", Opt_msize),
+ fsparam_flag ("noextend", Opt_legacy),
+ fsparam_string ("trans", Opt_trans),
+ fsparam_enum ("version", Opt_version, p9_versions),
+
+ /* fd transport options */
+ fsparam_u32 ("rfdno", Opt_rfdno),
+ fsparam_u32 ("wfdno", Opt_wfdno),
+
+ /* rdma transport options */
+ fsparam_u32 ("sq", Opt_sq_depth),
+ fsparam_u32 ("rq", Opt_rq_depth),
+ fsparam_u32 ("timeout", Opt_timeout),
+
+ /* fd and rdma transprt options */
+ fsparam_u32 ("port", Opt_port),
+ fsparam_flag ("privport", Opt_privport),
+ {}
};
/* Interpret mount options for cache mode */
@@ -82,19 +121,24 @@ static int get_cache_mode(char *s)
int version = -EINVAL;
if (!strcmp(s, "loose")) {
- version = CACHE_LOOSE;
+ version = CACHE_SC_LOOSE;
p9_debug(P9_DEBUG_9P, "Cache mode: loose\n");
} else if (!strcmp(s, "fscache")) {
- version = CACHE_FSCACHE;
+ version = CACHE_SC_FSCACHE;
p9_debug(P9_DEBUG_9P, "Cache mode: fscache\n");
} else if (!strcmp(s, "mmap")) {
- version = CACHE_MMAP;
+ version = CACHE_SC_MMAP;
p9_debug(P9_DEBUG_9P, "Cache mode: mmap\n");
+ } else if (!strcmp(s, "readahead")) {
+ version = CACHE_SC_READAHEAD;
+ p9_debug(P9_DEBUG_9P, "Cache mode: readahead\n");
} else if (!strcmp(s, "none")) {
- version = CACHE_NONE;
+ version = CACHE_SC_NONE;
p9_debug(P9_DEBUG_9P, "Cache mode: none\n");
- } else
- pr_info("Unknown Cache mode %s\n", s);
+ } else if (kstrtoint(s, 0, &version) != 0) {
+ version = -EINVAL;
+ pr_info("Unknown Cache mode or invalid value %s\n", s);
+ }
return version;
}
@@ -106,7 +150,7 @@ int v9fs_show_options(struct seq_file *m, struct dentry *root)
struct v9fs_session_info *v9ses = root->d_sb->s_fs_info;
if (v9ses->debug)
- seq_printf(m, ",debug=%x", v9ses->debug);
+ seq_printf(m, ",debug=%#x", v9ses->debug);
if (!uid_eq(v9ses->dfltuid, V9FS_DEFUID))
seq_printf(m, ",dfltuid=%u",
from_kuid_munged(&init_user_ns, v9ses->dfltuid));
@@ -122,9 +166,9 @@ int v9fs_show_options(struct seq_file *m, struct dentry *root)
if (v9ses->nodev)
seq_puts(m, ",nodevmap");
if (v9ses->cache)
- seq_printf(m, ",%s", v9fs_cache_modes[v9ses->cache]);
+ seq_printf(m, ",cache=%#x", v9ses->cache);
#ifdef CONFIG_9P_FSCACHE
- if (v9ses->cachetag && v9ses->cache == CACHE_FSCACHE)
+ if (v9ses->cachetag && (v9ses->cache & CACHE_FSCACHE))
seq_printf(m, ",cachetag=%s", v9ses->cachetag);
#endif
@@ -144,274 +188,268 @@ int v9fs_show_options(struct seq_file *m, struct dentry *root)
break;
}
+ if (v9ses->flags & V9FS_IGNORE_QV)
+ seq_puts(m, ",ignoreqv");
+ if (v9ses->flags & V9FS_DIRECT_IO)
+ seq_puts(m, ",directio");
if (v9ses->flags & V9FS_POSIX_ACL)
seq_puts(m, ",posixacl");
+ if (v9ses->flags & V9FS_NO_XATTR)
+ seq_puts(m, ",noxattr");
+
return p9_show_client_options(m, v9ses->clnt);
}
/**
- * v9fs_parse_options - parse mount options into session structure
- * @v9ses: existing v9fs session information
- * @opts: The mount option string
+ * v9fs_parse_param - parse a mount option into the filesystem context
+ * @fc: the filesystem context
+ * @param: the parameter to parse
*
* Return 0 upon success, -ERRNO upon failure.
*/
-
-static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
+int v9fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *options, *tmp_options;
- substring_t args[MAX_OPT_ARGS];
- char *p;
- int option = 0;
+ struct v9fs_context *ctx = fc->fs_private;
+ struct fs_parse_result result;
char *s;
- int ret = 0;
-
- /* setup defaults */
- v9ses->afid = ~0;
- v9ses->debug = 0;
- v9ses->cache = CACHE_NONE;
-#ifdef CONFIG_9P_FSCACHE
- v9ses->cachetag = NULL;
-#endif
- v9ses->session_lock_timeout = P9_LOCK_TIMEOUT;
-
- if (!opts)
- return 0;
+ int r;
+ int opt;
+ struct p9_client_opts *clnt = &ctx->client_opts;
+ struct p9_fd_opts *fd_opts = &ctx->fd_opts;
+ struct p9_rdma_opts *rdma_opts = &ctx->rdma_opts;
+ struct p9_session_opts *session_opts = &ctx->session_opts;
+
+ opt = fs_parse(fc, v9fs_param_spec, param, &result);
+ if (opt < 0) {
+ /*
+ * We might like to report bad mount options here, but
+ * traditionally 9p has ignored unknown mount options
+ */
+ if (opt == -ENOPARAM)
+ return 0;
- tmp_options = kstrdup(opts, GFP_KERNEL);
- if (!tmp_options) {
- ret = -ENOMEM;
- goto fail_option_alloc;
+ return opt;
}
- options = tmp_options;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token, r;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_debug:
- r = match_int(&args[0], &option);
- if (r < 0) {
- p9_debug(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
- ret = r;
- } else {
- v9ses->debug = option;
+
+ switch (opt) {
+ case Opt_source:
+ if (fc->source) {
+ pr_info("p9: multiple sources not supported\n");
+ return -EINVAL;
+ }
+ fc->source = param->string;
+ param->string = NULL;
+ break;
+ case Opt_debug:
+ session_opts->debug = result.uint_32;
#ifdef CONFIG_NET_9P_DEBUG
- p9_debug_level = option;
+ p9_debug_level = result.uint_32;
#endif
- }
- break;
-
- case Opt_dfltuid:
- r = match_int(&args[0], &option);
- if (r < 0) {
- p9_debug(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
- ret = r;
- continue;
- }
- v9ses->dfltuid = make_kuid(current_user_ns(), option);
- if (!uid_valid(v9ses->dfltuid)) {
- p9_debug(P9_DEBUG_ERROR,
- "uid field, but not a uid?\n");
- ret = -EINVAL;
- }
- break;
- case Opt_dfltgid:
- r = match_int(&args[0], &option);
- if (r < 0) {
- p9_debug(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
- ret = r;
- continue;
- }
- v9ses->dfltgid = make_kgid(current_user_ns(), option);
- if (!gid_valid(v9ses->dfltgid)) {
- p9_debug(P9_DEBUG_ERROR,
- "gid field, but not a gid?\n");
- ret = -EINVAL;
- }
- break;
- case Opt_afid:
- r = match_int(&args[0], &option);
- if (r < 0) {
- p9_debug(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
- ret = r;
- } else {
- v9ses->afid = option;
- }
- break;
- case Opt_uname:
- kfree(v9ses->uname);
- v9ses->uname = match_strdup(&args[0]);
- if (!v9ses->uname) {
- ret = -ENOMEM;
- goto free_and_return;
- }
- break;
- case Opt_remotename:
- kfree(v9ses->aname);
- v9ses->aname = match_strdup(&args[0]);
- if (!v9ses->aname) {
- ret = -ENOMEM;
- goto free_and_return;
- }
- break;
- case Opt_nodevmap:
- v9ses->nodev = 1;
- break;
- case Opt_cache_loose:
- v9ses->cache = CACHE_LOOSE;
- break;
- case Opt_fscache:
- v9ses->cache = CACHE_FSCACHE;
- break;
- case Opt_mmap:
- v9ses->cache = CACHE_MMAP;
- break;
- case Opt_cachetag:
+ break;
+
+ case Opt_dfltuid:
+ session_opts->dfltuid = result.uid;
+ break;
+ case Opt_dfltgid:
+ session_opts->dfltgid = result.gid;
+ break;
+ case Opt_afid:
+ session_opts->afid = result.uint_32;
+ break;
+ case Opt_uname:
+ kfree(session_opts->uname);
+ session_opts->uname = param->string;
+ param->string = NULL;
+ break;
+ case Opt_remotename:
+ kfree(session_opts->aname);
+ session_opts->aname = param->string;
+ param->string = NULL;
+ break;
+ case Opt_nodevmap:
+ session_opts->nodev = 1;
+ break;
+ case Opt_noxattr:
+ session_opts->flags |= V9FS_NO_XATTR;
+ break;
+ case Opt_directio:
+ session_opts->flags |= V9FS_DIRECT_IO;
+ break;
+ case Opt_ignoreqv:
+ session_opts->flags |= V9FS_IGNORE_QV;
+ break;
+ case Opt_cachetag:
#ifdef CONFIG_9P_FSCACHE
- kfree(v9ses->cachetag);
- v9ses->cachetag = match_strdup(&args[0]);
- if (!v9ses->cachetag) {
- ret = -ENOMEM;
- goto free_and_return;
- }
+ kfree(session_opts->cachetag);
+ session_opts->cachetag = param->string;
+ param->string = NULL;
#endif
- break;
- case Opt_cache:
- s = match_strdup(&args[0]);
- if (!s) {
- ret = -ENOMEM;
- p9_debug(P9_DEBUG_ERROR,
- "problem allocating copy of cache arg\n");
- goto free_and_return;
- }
- r = get_cache_mode(s);
- if (r < 0)
- ret = r;
- else
- v9ses->cache = r;
-
- kfree(s);
- break;
-
- case Opt_access:
- s = match_strdup(&args[0]);
- if (!s) {
- ret = -ENOMEM;
- p9_debug(P9_DEBUG_ERROR,
- "problem allocating copy of access arg\n");
- goto free_and_return;
+ break;
+ case Opt_cache:
+ r = get_cache_mode(param->string);
+ if (r < 0)
+ return r;
+ session_opts->cache = r;
+ break;
+ case Opt_access:
+ s = param->string;
+ session_opts->flags &= ~V9FS_ACCESS_MASK;
+ if (strcmp(s, "user") == 0) {
+ session_opts->flags |= V9FS_ACCESS_USER;
+ } else if (strcmp(s, "any") == 0) {
+ session_opts->flags |= V9FS_ACCESS_ANY;
+ } else if (strcmp(s, "client") == 0) {
+ session_opts->flags |= V9FS_ACCESS_CLIENT;
+ } else {
+ uid_t uid;
+
+ session_opts->flags |= V9FS_ACCESS_SINGLE;
+ r = kstrtouint(s, 10, &uid);
+ if (r) {
+ pr_info("Unknown access argument %s: %d\n",
+ param->string, r);
+ return r;
}
-
- v9ses->flags &= ~V9FS_ACCESS_MASK;
- if (strcmp(s, "user") == 0)
- v9ses->flags |= V9FS_ACCESS_USER;
- else if (strcmp(s, "any") == 0)
- v9ses->flags |= V9FS_ACCESS_ANY;
- else if (strcmp(s, "client") == 0) {
- v9ses->flags |= V9FS_ACCESS_CLIENT;
- } else {
- uid_t uid;
-
- v9ses->flags |= V9FS_ACCESS_SINGLE;
- r = kstrtouint(s, 10, &uid);
- if (r) {
- ret = r;
- pr_info("Unknown access argument %s: %d\n",
- s, r);
- kfree(s);
- continue;
- }
- v9ses->uid = make_kuid(current_user_ns(), uid);
- if (!uid_valid(v9ses->uid)) {
- ret = -EINVAL;
- pr_info("Unknown uid %s\n", s);
- }
+ session_opts->uid = make_kuid(current_user_ns(), uid);
+ if (!uid_valid(session_opts->uid)) {
+ pr_info("Unknown uid %s\n", s);
+ return -EINVAL;
}
+ }
+ break;
- kfree(s);
- break;
-
- case Opt_posixacl:
+ case Opt_posixacl:
#ifdef CONFIG_9P_FS_POSIX_ACL
- v9ses->flags |= V9FS_POSIX_ACL;
+ session_opts->flags |= V9FS_POSIX_ACL;
#else
- p9_debug(P9_DEBUG_ERROR,
- "Not defined CONFIG_9P_FS_POSIX_ACL. Ignoring posixacl option\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "Not defined CONFIG_9P_FS_POSIX_ACL. Ignoring posixacl option\n");
#endif
- break;
-
- case Opt_locktimeout:
- r = match_int(&args[0], &option);
- if (r < 0) {
- p9_debug(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
- ret = r;
- continue;
- }
- if (option < 1) {
- p9_debug(P9_DEBUG_ERROR,
- "locktimeout must be a greater than zero integer.\n");
- ret = -EINVAL;
- continue;
- }
- v9ses->session_lock_timeout = (long)option * HZ;
- break;
+ break;
- default:
- continue;
+ case Opt_locktimeout:
+ if (result.uint_32 < 1) {
+ p9_debug(P9_DEBUG_ERROR,
+ "locktimeout must be a greater than zero integer.\n");
+ return -EINVAL;
}
+ session_opts->session_lock_timeout = (long)result.uint_32 * HZ;
+ break;
+
+ /* Options for client */
+ case Opt_msize:
+ if (result.uint_32 < 4096) {
+ p9_debug(P9_DEBUG_ERROR, "msize should be at least 4k\n");
+ return -EINVAL;
+ }
+ if (result.uint_32 > INT_MAX) {
+ p9_debug(P9_DEBUG_ERROR, "msize too big\n");
+ return -EINVAL;
+ }
+ clnt->msize = result.uint_32;
+ break;
+ case Opt_trans:
+ v9fs_put_trans(clnt->trans_mod);
+ clnt->trans_mod = v9fs_get_trans_by_name(param->string);
+ if (!clnt->trans_mod) {
+ pr_info("Could not find request transport: %s\n",
+ param->string);
+ return -EINVAL;
+ }
+ break;
+ case Opt_legacy:
+ clnt->proto_version = p9_proto_legacy;
+ break;
+ case Opt_version:
+ clnt->proto_version = result.uint_32;
+ p9_debug(P9_DEBUG_9P, "Protocol version: %s\n", param->string);
+ break;
+ /* Options for fd transport */
+ case Opt_rfdno:
+ fd_opts->rfd = result.uint_32;
+ break;
+ case Opt_wfdno:
+ fd_opts->wfd = result.uint_32;
+ break;
+ /* Options for rdma transport */
+ case Opt_sq_depth:
+ rdma_opts->sq_depth = result.uint_32;
+ break;
+ case Opt_rq_depth:
+ rdma_opts->rq_depth = result.uint_32;
+ break;
+ case Opt_timeout:
+ rdma_opts->timeout = result.uint_32;
+ break;
+ /* Options for both fd and rdma transports */
+ case Opt_port:
+ fd_opts->port = result.uint_32;
+ rdma_opts->port = result.uint_32;
+ break;
+ case Opt_privport:
+ fd_opts->privport = true;
+ rdma_opts->port = true;
+ break;
}
-free_and_return:
- kfree(tmp_options);
-fail_option_alloc:
- return ret;
+ return 0;
+}
+
+static void v9fs_apply_options(struct v9fs_session_info *v9ses,
+ struct fs_context *fc)
+{
+ struct v9fs_context *ctx = fc->fs_private;
+
+ v9ses->debug = ctx->session_opts.debug;
+ v9ses->dfltuid = ctx->session_opts.dfltuid;
+ v9ses->dfltgid = ctx->session_opts.dfltgid;
+ v9ses->afid = ctx->session_opts.afid;
+ v9ses->uname = ctx->session_opts.uname;
+ ctx->session_opts.uname = NULL;
+ v9ses->aname = ctx->session_opts.aname;
+ ctx->session_opts.aname = NULL;
+ v9ses->nodev = ctx->session_opts.nodev;
+ /*
+ * Note that we must |= flags here as session_init already
+ * set basic flags. This adds in flags from parsed options.
+ */
+ v9ses->flags |= ctx->session_opts.flags;
+#ifdef CONFIG_9P_FSCACHE
+ v9ses->cachetag = ctx->session_opts.cachetag;
+ ctx->session_opts.cachetag = NULL;
+#endif
+ v9ses->cache = ctx->session_opts.cache;
+ v9ses->uid = ctx->session_opts.uid;
+ v9ses->session_lock_timeout = ctx->session_opts.session_lock_timeout;
}
/**
* v9fs_session_init - initialize session
* @v9ses: session information structure
- * @dev_name: device being mounted
- * @data: options
+ * @fc: the filesystem mount context
*
*/
struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
- const char *dev_name, char *data)
+ struct fs_context *fc)
{
struct p9_fid *fid;
int rc = -ENOMEM;
- v9ses->uname = kstrdup(V9FS_DEFUSER, GFP_KERNEL);
- if (!v9ses->uname)
- goto err_names;
-
- v9ses->aname = kstrdup(V9FS_DEFANAME, GFP_KERNEL);
- if (!v9ses->aname)
- goto err_names;
init_rwsem(&v9ses->rename_sem);
- v9ses->uid = INVALID_UID;
- v9ses->dfltuid = V9FS_DEFUID;
- v9ses->dfltgid = V9FS_DEFGID;
-
- v9ses->clnt = p9_client_create(dev_name, data);
+ v9ses->clnt = p9_client_create(fc);
if (IS_ERR(v9ses->clnt)) {
rc = PTR_ERR(v9ses->clnt);
p9_debug(P9_DEBUG_ERROR, "problem initializing 9p client\n");
goto err_names;
}
+ /*
+ * Initialize flags on the real v9ses. v9fs_apply_options below
+ * will |= the additional flags from parsed options.
+ */
v9ses->flags = V9FS_ACCESS_USER;
if (p9_is_proto_dotl(v9ses->clnt)) {
@@ -421,9 +459,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->flags |= V9FS_PROTO_2000U;
}
- rc = v9fs_parse_options(v9ses, data);
- if (rc < 0)
- goto err_clnt;
+ v9fs_apply_options(v9ses, fc);
v9ses->maxdata = v9ses->clnt->msize - P9_IOHDRSZ;
@@ -436,8 +472,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->flags &= ~V9FS_ACCESS_MASK;
v9ses->flags |= V9FS_ACCESS_USER;
}
- /*FIXME !! */
- /* for legacy mode, fall back to V9FS_ACCESS_ANY */
+ /* FIXME: for legacy mode, fall back to V9FS_ACCESS_ANY */
if (!(v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) &&
((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) {
@@ -448,7 +483,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
if (!v9fs_proto_dotl(v9ses) ||
!((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) {
/*
- * We support ACL checks on clinet only if the protocol is
+ * We support ACL checks on client only if the protocol is
* 9P2000.L and access is V9FS_ACCESS_CLIENT.
*/
v9ses->flags &= ~V9FS_ACL_MASK;
@@ -469,7 +504,11 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
#ifdef CONFIG_9P_FSCACHE
/* register the session for caching */
- v9fs_cache_session_get_cookie(v9ses);
+ if (v9ses->cache & CACHE_FSCACHE) {
+ rc = v9fs_cache_session_get_cookie(v9ses, fc->source);
+ if (rc < 0)
+ goto err_clnt;
+ }
#endif
spin_lock(&v9fs_sessionlist_lock);
list_add(&v9ses->slist, &v9fs_sessionlist);
@@ -502,8 +541,7 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
}
#ifdef CONFIG_9P_FSCACHE
- if (v9ses->fscache)
- v9fs_cache_session_put_cookie(v9ses);
+ fscache_relinquish_volume(v9fs_session_cache(v9ses), NULL, false);
kfree(v9ses->cachetag);
#endif
kfree(v9ses->uname);
@@ -540,8 +578,6 @@ void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses)
p9_client_begin_disconnect(v9ses->clnt);
}
-extern int v9fs_error_init(void);
-
static struct kobject *v9fs_kobj;
#ifdef CONFIG_9P_FSCACHE
@@ -558,7 +594,7 @@ static ssize_t caches_show(struct kobject *kobj,
spin_lock(&v9fs_sessionlist_lock);
list_for_each_entry(v9ses, &v9fs_sessionlist, slist) {
if (v9ses->cachetag) {
- n = snprintf(buf, limit, "%s\n", v9ses->cachetag);
+ n = snprintf(buf + count, limit, "%s\n", v9ses->cachetag);
if (n < 0) {
count = n;
break;
@@ -594,13 +630,16 @@ static const struct attribute_group v9fs_attr_group = {
static int __init v9fs_sysfs_init(void)
{
+ int ret;
+
v9fs_kobj = kobject_create_and_add("9p", fs_kobj);
if (!v9fs_kobj)
return -ENOMEM;
- if (sysfs_create_group(v9fs_kobj, &v9fs_attr_group)) {
+ ret = sysfs_create_group(v9fs_kobj, &v9fs_attr_group);
+ if (ret) {
kobject_put(v9fs_kobj);
- return -ENOMEM;
+ return ret;
}
return 0;
@@ -620,11 +659,9 @@ static void v9fs_sysfs_cleanup(void)
static void v9fs_inode_init_once(void *foo)
{
struct v9fs_inode *v9inode = (struct v9fs_inode *)foo;
-#ifdef CONFIG_9P_FSCACHE
- v9inode->fscache = NULL;
-#endif
+
memset(&v9inode->qid, 0, sizeof(v9inode->qid));
- inode_init_once(&v9inode->vfs_inode);
+ inode_init_once(&v9inode->netfs.inode);
}
/**
@@ -636,7 +673,7 @@ static int v9fs_init_inode_cache(void)
v9fs_inode_cache = kmem_cache_create("v9fs_inode_cache",
sizeof(struct v9fs_inode),
0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
v9fs_inode_init_once);
if (!v9fs_inode_cache)
return -ENOMEM;
@@ -658,29 +695,6 @@ static void v9fs_destroy_inode_cache(void)
kmem_cache_destroy(v9fs_inode_cache);
}
-static int v9fs_cache_register(void)
-{
- int ret;
-
- ret = v9fs_init_inode_cache();
- if (ret < 0)
- return ret;
-#ifdef CONFIG_9P_FSCACHE
- ret = fscache_register_netfs(&v9fs_cache_netfs);
- if (ret < 0)
- v9fs_destroy_inode_cache();
-#endif
- return ret;
-}
-
-static void v9fs_cache_unregister(void)
-{
- v9fs_destroy_inode_cache();
-#ifdef CONFIG_9P_FSCACHE
- fscache_unregister_netfs(&v9fs_cache_netfs);
-#endif
-}
-
/**
* init_v9fs - Initialize module
*
@@ -691,9 +705,9 @@ static int __init init_v9fs(void)
int err;
pr_info("Installing v9fs 9p2000 file system support\n");
- /* TODO: Setup list of registered trasnport modules */
+ /* TODO: Setup list of registered transport modules */
- err = v9fs_cache_register();
+ err = v9fs_init_inode_cache();
if (err < 0) {
pr_err("Failed to register v9fs for caching\n");
return err;
@@ -716,7 +730,7 @@ out_sysfs_cleanup:
v9fs_sysfs_cleanup();
out_cache:
- v9fs_cache_unregister();
+ v9fs_destroy_inode_cache();
return err;
}
@@ -729,7 +743,7 @@ out_cache:
static void __exit exit_v9fs(void)
{
v9fs_sysfs_cleanup();
- v9fs_cache_unregister();
+ v9fs_destroy_inode_cache();
unregister_filesystem(&v9fs_fs_type);
}
@@ -739,4 +753,5 @@ module_exit(exit_v9fs)
MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>");
MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
MODULE_AUTHOR("Ron Minnich <rminnich@lanl.gov>");
+MODULE_DESCRIPTION("9P Client File System");
MODULE_LICENSE("GPL");
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 1647a8e63671..6a12445d3858 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -9,6 +9,10 @@
#define FS_9P_V9FS_H
#include <linux/backing-dev.h>
+#include <linux/netfs.h>
+#include <linux/fs_parser.h>
+#include <net/9p/client.h>
+#include <net/9p/transport.h>
/**
* enum p9_session_flags - option flags for each 9P session
@@ -30,29 +34,54 @@
#define V9FS_ACL_MASK V9FS_POSIX_ACL
enum p9_session_flags {
- V9FS_PROTO_2000U = 0x01,
- V9FS_PROTO_2000L = 0x02,
- V9FS_ACCESS_SINGLE = 0x04,
- V9FS_ACCESS_USER = 0x08,
- V9FS_ACCESS_CLIENT = 0x10,
- V9FS_POSIX_ACL = 0x20
+ V9FS_PROTO_2000U = 0x01,
+ V9FS_PROTO_2000L = 0x02,
+ V9FS_ACCESS_SINGLE = 0x04,
+ V9FS_ACCESS_USER = 0x08,
+ V9FS_ACCESS_CLIENT = 0x10,
+ V9FS_POSIX_ACL = 0x20,
+ V9FS_NO_XATTR = 0x40,
+ V9FS_IGNORE_QV = 0x80, /* ignore qid.version for cache hints */
+ V9FS_DIRECT_IO = 0x100,
+ V9FS_SYNC = 0x200
};
-/* possible values of ->cache */
/**
- * enum p9_cache_modes - user specified cache preferences
- * @CACHE_NONE: do not cache data, dentries, or directory contents (default)
- * @CACHE_LOOSE: cache data, dentries, and directory contents w/no consistency
+ * enum p9_cache_shortcuts - human readable cache preferences
+ * @CACHE_SC_NONE: disable all caches
+ * @CACHE_SC_READAHEAD: only provide caching for readahead
+ * @CACHE_SC_MMAP: provide caching to enable mmap
+ * @CACHE_SC_LOOSE: non-coherent caching for files and meta data
+ * @CACHE_SC_FSCACHE: persistent non-coherent caching for files and meta-data
*
- * eventually support loose, tight, time, session, default always none
*/
-enum p9_cache_modes {
- CACHE_NONE,
- CACHE_MMAP,
- CACHE_LOOSE,
- CACHE_FSCACHE,
- nr__p9_cache_modes
+enum p9_cache_shortcuts {
+ CACHE_SC_NONE = 0b00000000,
+ CACHE_SC_READAHEAD = 0b00000001,
+ CACHE_SC_MMAP = 0b00000101,
+ CACHE_SC_LOOSE = 0b00001111,
+ CACHE_SC_FSCACHE = 0b10001111,
+};
+
+/**
+ * enum p9_cache_bits - possible values of ->cache
+ * @CACHE_NONE: caches disabled
+ * @CACHE_FILE: file caching (open to close)
+ * @CACHE_META: meta-data and directory caching
+ * @CACHE_WRITEBACK: write-back caching for files
+ * @CACHE_LOOSE: don't check cache consistency
+ * @CACHE_FSCACHE: local persistent caches
+ *
+ */
+
+enum p9_cache_bits {
+ CACHE_NONE = 0b00000000,
+ CACHE_FILE = 0b00000001,
+ CACHE_META = 0b00000010,
+ CACHE_WRITEBACK = 0b00000100,
+ CACHE_LOOSE = 0b00001000,
+ CACHE_FSCACHE = 0b10000000,
};
/**
@@ -61,7 +90,7 @@ enum p9_cache_modes {
* @nodev: set to 1 to disable device mapping
* @debug: debug level
* @afid: authentication handle
- * @cache: cache mode of type &p9_cache_modes
+ * @cache: cache mode of type &p9_cache_bits
* @cachetag: the tag of the cache associated with this session
* @fscache: session cookie associated with FS-Cache
* @uname: string user name to mount hierarchy as
@@ -82,14 +111,14 @@ enum p9_cache_modes {
struct v9fs_session_info {
/* options */
- unsigned char flags;
+ unsigned int flags;
unsigned char nodev;
unsigned short debug;
unsigned int afid;
unsigned int cache;
#ifdef CONFIG_9P_FSCACHE
char *cachetag;
- struct fscache_cookie *fscache;
+ struct fscache_volume *fscache;
#endif
char *uname; /* user name to mount as */
@@ -108,35 +137,42 @@ struct v9fs_session_info {
#define V9FS_INO_INVALID_ATTR 0x01
struct v9fs_inode {
-#ifdef CONFIG_9P_FSCACHE
- struct mutex fscache_lock;
- struct fscache_cookie *fscache;
-#endif
+ struct netfs_inode netfs; /* Netfslib context and vfs inode */
struct p9_qid qid;
unsigned int cache_validity;
- struct p9_fid *writeback_fid;
struct mutex v_mutex;
- struct inode vfs_inode;
};
static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
{
- return container_of(inode, struct v9fs_inode, vfs_inode);
+ return container_of(inode, struct v9fs_inode, netfs.inode);
}
static inline struct fscache_cookie *v9fs_inode_cookie(struct v9fs_inode *v9inode)
{
#ifdef CONFIG_9P_FSCACHE
- return v9inode->fscache;
+ return netfs_i_cookie(&v9inode->netfs);
#else
return NULL;
#endif
}
+static inline struct fscache_volume *v9fs_session_cache(struct v9fs_session_info *v9ses)
+{
+#ifdef CONFIG_9P_FSCACHE
+ return v9ses->fscache;
+#else
+ return NULL;
+#endif
+}
+
+extern const struct fs_parameter_spec v9fs_param_spec[];
+
+extern int v9fs_parse_param(struct fs_context *fc, struct fs_parameter *param);
extern int v9fs_show_options(struct seq_file *m, struct dentry *root);
struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
- const char *dev_name, char *data);
+ struct fs_context *fc);
extern void v9fs_session_close(struct v9fs_session_info *v9ses);
extern void v9fs_session_cancel(struct v9fs_session_info *v9ses);
extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
@@ -144,7 +180,7 @@ extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d);
extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d);
-extern int v9fs_vfs_rename(struct user_namespace *mnt_userns,
+extern int v9fs_vfs_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags);
@@ -154,6 +190,7 @@ extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses,
extern const struct inode_operations v9fs_dir_inode_operations_dotl;
extern const struct inode_operations v9fs_file_inode_operations_dotl;
extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
+extern const struct netfs_request_ops v9fs_req_ops;
extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses,
struct p9_fid *fid,
struct super_block *sb, int new);
@@ -170,7 +207,7 @@ static inline struct v9fs_session_info *v9fs_inode2v9ses(struct inode *inode)
return inode->i_sb->s_fs_info;
}
-static inline struct v9fs_session_info *v9fs_dentry2v9ses(struct dentry *dentry)
+static inline struct v9fs_session_info *v9fs_dentry2v9ses(const struct dentry *dentry)
{
return dentry->d_sb->s_fs_info;
}
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index bc417da7e9c1..d3aefbec4de6 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -36,31 +36,30 @@ extern const struct file_operations v9fs_dir_operations;
extern const struct file_operations v9fs_dir_operations_dotl;
extern const struct dentry_operations v9fs_dentry_operations;
extern const struct dentry_operations v9fs_cached_dentry_operations;
-extern const struct file_operations v9fs_cached_file_operations;
-extern const struct file_operations v9fs_cached_file_operations_dotl;
-extern const struct file_operations v9fs_mmap_file_operations;
-extern const struct file_operations v9fs_mmap_file_operations_dotl;
extern struct kmem_cache *v9fs_inode_cache;
struct inode *v9fs_alloc_inode(struct super_block *sb);
void v9fs_free_inode(struct inode *inode);
-struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode,
- dev_t rdev);
+void v9fs_set_netfs_context(struct inode *inode);
int v9fs_init_inode(struct v9fs_session_info *v9ses,
struct inode *inode, umode_t mode, dev_t rdev);
void v9fs_evict_inode(struct inode *inode);
-ino_t v9fs_qid2ino(struct p9_qid *qid);
+#if (BITS_PER_LONG == 32)
+#define QID2INO(q) ((ino_t) (((q)->path+2) ^ (((q)->path) >> 32)))
+#else
+#define QID2INO(q) ((ino_t) ((q)->path+2))
+#endif
+
void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
struct super_block *sb, unsigned int flags);
void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
unsigned int flags);
int v9fs_dir_release(struct inode *inode, struct file *filp);
int v9fs_file_open(struct inode *inode, struct file *file);
-void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
int v9fs_uflags2omode(int uflags, int extended);
void v9fs_blank_wstat(struct p9_wstat *wstat);
-int v9fs_vfs_setattr_dotl(struct user_namespace *mnt_userns,
+int v9fs_vfs_setattr_dotl(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *iattr);
int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
int datasync);
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index fac918ccb305..862164181bac 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -12,335 +12,158 @@
#include <linux/file.h>
#include <linux/stat.h>
#include <linux/string.h>
-#include <linux/inet.h>
#include <linux/pagemap.h>
-#include <linux/idr.h>
#include <linux/sched.h>
+#include <linux/swap.h>
#include <linux/uio.h>
#include <linux/netfs.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
+#include <trace/events/netfs.h>
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "cache.h"
#include "fid.h"
-/**
- * v9fs_req_issue_op - Issue a read from 9P
- * @subreq: The read to make
+/*
+ * Writeback calls this when it finds a folio that needs uploading. This isn't
+ * called if writeback only has copy-to-cache to deal with.
*/
-static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
+static void v9fs_begin_writeback(struct netfs_io_request *wreq)
{
- struct netfs_read_request *rreq = subreq->rreq;
- struct p9_fid *fid = rreq->netfs_priv;
- struct iov_iter to;
- loff_t pos = subreq->start + subreq->transferred;
- size_t len = subreq->len - subreq->transferred;
- int total, err;
+ struct p9_fid *fid;
- iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
+ fid = v9fs_fid_find_inode(wreq->inode, true, INVALID_UID, true);
+ if (!fid) {
+ WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
+ wreq->inode->i_ino);
+ return;
+ }
- total = p9_client_read(fid, pos, &to, &err);
- netfs_subreq_terminated(subreq, err ?: total, false);
+ wreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
+ if (fid->iounit)
+ wreq->wsize = min(wreq->wsize, fid->iounit);
+ wreq->netfs_priv = fid;
+ wreq->io_streams[0].avail = true;
}
-/**
- * v9fs_init_rreq - Initialise a read request
- * @rreq: The read request
- * @file: The file being read from
+/*
+ * Issue a subrequest to write to the server.
*/
-static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file)
+static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
{
- struct p9_fid *fid = file->private_data;
+ struct p9_fid *fid = subreq->rreq->netfs_priv;
+ int err, len;
- refcount_inc(&fid->count);
- rreq->netfs_priv = fid;
+ len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
+ if (len > 0)
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ netfs_write_subrequest_terminated(subreq, len ?: err);
}
/**
- * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_rreq
- * @mapping: unused mapping of request to cleanup
- * @priv: private data to cleanup, a fid, guaranted non-null.
+ * v9fs_issue_read - Issue a read from 9P
+ * @subreq: The read to make
*/
-static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
+static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
{
- struct p9_fid *fid = priv;
-
- p9_client_clunk(fid);
-}
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct p9_fid *fid = rreq->netfs_priv;
+ unsigned long long pos = subreq->start + subreq->transferred;
+ int total, err;
-/**
- * v9fs_is_cache_enabled - Determine if caching is enabled for an inode
- * @inode: The inode to check
- */
-static bool v9fs_is_cache_enabled(struct inode *inode)
-{
- struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode));
+ total = p9_client_read(fid, pos, &subreq->io_iter, &err);
+
+ /* if we just extended the file size, any portion not in
+ * cache won't be on server and is zeroes */
+ if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
+ subreq->rreq->origin != NETFS_DIO_READ)
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ if (pos + total >= i_size_read(rreq->inode))
+ __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
+ if (!err && total) {
+ subreq->transferred += total;
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ }
- return fscache_cookie_enabled(cookie) && !hlist_empty(&cookie->backing_objects);
+ subreq->error = err;
+ netfs_read_subreq_terminated(subreq);
}
/**
- * v9fs_begin_cache_operation - Begin a cache operation for a read
+ * v9fs_init_request - Initialise a request
* @rreq: The read request
+ * @file: The file being read from
*/
-static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
-{
- struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
-
- return fscache_begin_read_operation(rreq, cookie);
-}
-
-static const struct netfs_read_request_ops v9fs_req_ops = {
- .init_rreq = v9fs_init_rreq,
- .is_cache_enabled = v9fs_is_cache_enabled,
- .begin_cache_operation = v9fs_begin_cache_operation,
- .issue_op = v9fs_req_issue_op,
- .cleanup = v9fs_req_cleanup,
-};
-
-/**
- * v9fs_vfs_readpage - read an entire page in from 9P
- * @file: file being read
- * @page: structure to page
- *
- */
-static int v9fs_vfs_readpage(struct file *file, struct page *page)
-{
- struct folio *folio = page_folio(page);
-
- return netfs_readpage(file, folio, &v9fs_req_ops, NULL);
-}
-
-/**
- * v9fs_vfs_readahead - read a set of pages from 9P
- * @ractl: The readahead parameters
- */
-static void v9fs_vfs_readahead(struct readahead_control *ractl)
-{
- netfs_readahead(ractl, &v9fs_req_ops, NULL);
-}
-
-/**
- * v9fs_release_page - release the private state associated with a page
- * @page: The page to be released
- * @gfp: The caller's allocation restrictions
- *
- * Returns 1 if the page can be released, false otherwise.
- */
-
-static int v9fs_release_page(struct page *page, gfp_t gfp)
+static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
{
- struct folio *folio = page_folio(page);
-
- if (folio_test_private(folio))
- return 0;
-#ifdef CONFIG_9P_FSCACHE
- if (folio_test_fscache(folio)) {
- if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS))
- return 0;
- folio_wait_fscache(folio);
+ struct p9_fid *fid;
+ bool writing = (rreq->origin == NETFS_READ_FOR_WRITE ||
+ rreq->origin == NETFS_WRITETHROUGH ||
+ rreq->origin == NETFS_UNBUFFERED_WRITE ||
+ rreq->origin == NETFS_DIO_WRITE);
+
+ if (rreq->origin == NETFS_WRITEBACK)
+ return 0; /* We don't get the write handle until we find we
+ * have actually dirty data and not just
+ * copy-to-cache data.
+ */
+
+ if (file) {
+ fid = file->private_data;
+ if (!fid)
+ goto no_fid;
+ p9_fid_get(fid);
+ } else {
+ fid = v9fs_fid_find_inode(rreq->inode, writing, INVALID_UID, true);
+ if (!fid)
+ goto no_fid;
}
-#endif
- return 1;
-}
-/**
- * v9fs_invalidate_page - Invalidate a page completely or partially
- * @page: The page to be invalidated
- * @offset: offset of the invalidated region
- * @length: length of the invalidated region
- */
-
-static void v9fs_invalidate_page(struct page *page, unsigned int offset,
- unsigned int length)
-{
- struct folio *folio = page_folio(page);
-
- folio_wait_fscache(folio);
-}
+ rreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
+ if (fid->iounit)
+ rreq->wsize = min(rreq->wsize, fid->iounit);
-static int v9fs_vfs_write_folio_locked(struct folio *folio)
-{
- struct inode *inode = folio_inode(folio);
- struct v9fs_inode *v9inode = V9FS_I(inode);
- loff_t start = folio_pos(folio);
- loff_t i_size = i_size_read(inode);
- struct iov_iter from;
- size_t len = folio_size(folio);
- int err;
-
- if (start >= i_size)
- return 0; /* Simultaneous truncation occurred */
-
- len = min_t(loff_t, i_size - start, len);
-
- iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
-
- /* We should have writeback_fid always set */
- BUG_ON(!v9inode->writeback_fid);
-
- folio_start_writeback(folio);
-
- p9_client_write(v9inode->writeback_fid, start, &from, &err);
-
- folio_end_writeback(folio);
- return err;
-}
-
-static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct folio *folio = page_folio(page);
- int retval;
-
- p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
-
- retval = v9fs_vfs_write_folio_locked(folio);
- if (retval < 0) {
- if (retval == -EAGAIN) {
- folio_redirty_for_writepage(wbc, folio);
- retval = 0;
- } else {
- mapping_set_error(folio_mapping(folio), retval);
- }
- } else
- retval = 0;
+ /* we might need to read from a fid that was opened write-only
+ * for read-modify-write of page cache, use the writeback fid
+ * for that */
+ WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE && !(fid->mode & P9_ORDWR));
+ rreq->netfs_priv = fid;
+ return 0;
- folio_unlock(folio);
- return retval;
+no_fid:
+ WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
+ rreq->inode->i_ino);
+ return -EINVAL;
}
/**
- * v9fs_launder_page - Writeback a dirty page
- * @page: The page to be cleaned up
- *
- * Returns 0 on success.
+ * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
+ * @rreq: The I/O request to clean up
*/
-
-static int v9fs_launder_page(struct page *page)
+static void v9fs_free_request(struct netfs_io_request *rreq)
{
- struct folio *folio = page_folio(page);
- int retval;
-
- if (folio_clear_dirty_for_io(folio)) {
- retval = v9fs_vfs_write_folio_locked(folio);
- if (retval)
- return retval;
- }
- folio_wait_fscache(folio);
- return 0;
-}
-
-/**
- * v9fs_direct_IO - 9P address space operation for direct I/O
- * @iocb: target I/O control block
- * @iter: The data/buffer to use
- *
- * The presence of v9fs_direct_IO() in the address space ops vector
- * allowes open() O_DIRECT flags which would have failed otherwise.
- *
- * In the non-cached mode, we shunt off direct read and write requests before
- * the VFS gets them, so this method should never be called.
- *
- * Direct IO is not 'yet' supported in the cached mode. Hence when
- * this routine is called through generic_file_aio_read(), the read/write fails
- * with an error.
- *
- */
-static ssize_t
-v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct file *file = iocb->ki_filp;
- loff_t pos = iocb->ki_pos;
- ssize_t n;
- int err = 0;
-
- if (iov_iter_rw(iter) == WRITE) {
- n = p9_client_write(file->private_data, pos, iter, &err);
- if (n) {
- struct inode *inode = file_inode(file);
- loff_t i_size = i_size_read(inode);
-
- if (pos + n > i_size)
- inode_add_bytes(inode, pos + n - i_size);
- }
- } else {
- n = p9_client_read(file->private_data, pos, iter, &err);
- }
- return n ? n : err;
-}
-
-static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int flags,
- struct page **subpagep, void **fsdata)
-{
- int retval;
- struct folio *folio;
- struct v9fs_inode *v9inode = V9FS_I(mapping->host);
-
- p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
-
- BUG_ON(!v9inode->writeback_fid);
-
- /* Prefetch area to be written into the cache if we're caching this
- * file. We need to do this before we get a lock on the page in case
- * there's more than one writer competing for the same cache block.
- */
- retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata,
- &v9fs_req_ops, NULL);
- if (retval < 0)
- return retval;
-
- *subpagep = &folio->page;
- return retval;
-}
-
-static int v9fs_write_end(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int copied,
- struct page *subpage, void *fsdata)
-{
- loff_t last_pos = pos + copied;
- struct folio *folio = page_folio(subpage);
- struct inode *inode = mapping->host;
-
- p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
-
- if (!folio_test_uptodate(folio)) {
- if (unlikely(copied < len)) {
- copied = 0;
- goto out;
- }
-
- folio_mark_uptodate(folio);
- }
-
- /*
- * No need to use i_size_read() here, the i_size
- * cannot change under us because we hold the i_mutex.
- */
- if (last_pos > inode->i_size) {
- inode_add_bytes(inode, last_pos - inode->i_size);
- i_size_write(inode, last_pos);
- }
- folio_mark_dirty(folio);
-out:
- folio_unlock(folio);
- folio_put(folio);
+ struct p9_fid *fid = rreq->netfs_priv;
- return copied;
+ p9_fid_put(fid);
}
+const struct netfs_request_ops v9fs_req_ops = {
+ .init_request = v9fs_init_request,
+ .free_request = v9fs_free_request,
+ .issue_read = v9fs_issue_read,
+ .begin_writeback = v9fs_begin_writeback,
+ .issue_write = v9fs_issue_write,
+};
const struct address_space_operations v9fs_addr_operations = {
- .readpage = v9fs_vfs_readpage,
- .readahead = v9fs_vfs_readahead,
- .set_page_dirty = __set_page_dirty_nobuffers,
- .writepage = v9fs_vfs_writepage,
- .write_begin = v9fs_write_begin,
- .write_end = v9fs_write_end,
- .releasepage = v9fs_release_page,
- .invalidatepage = v9fs_invalidate_page,
- .launder_page = v9fs_launder_page,
- .direct_IO = v9fs_direct_IO,
+ .read_folio = netfs_read_folio,
+ .readahead = netfs_readahead,
+ .dirty_folio = netfs_dirty_folio,
+ .release_folio = netfs_release_folio,
+ .invalidate_folio = netfs_invalidate_folio,
+ .direct_IO = noop_direct_IO,
+ .writepages = netfs_writepages,
+ .migrate_folio = filemap_migrate_folio,
};
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 1c609e99d280..c5bf74d547e8 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -13,9 +13,7 @@
#include <linux/pagemap.h>
#include <linux/stat.h>
#include <linux/string.h>
-#include <linux/inet.h>
#include <linux/namei.h>
-#include <linux/idr.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <net/9p/9p.h>
@@ -50,15 +48,20 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry)
static void v9fs_dentry_release(struct dentry *dentry)
{
struct hlist_node *p, *n;
+ struct hlist_head head;
p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n",
dentry, dentry);
- hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata)
- p9_client_clunk(hlist_entry(p, struct p9_fid, dlist));
- dentry->d_fsdata = NULL;
+
+ spin_lock(&dentry->d_lock);
+ hlist_move_list((struct hlist_head *)&dentry->d_fsdata, &head);
+ spin_unlock(&dentry->d_lock);
+
+ hlist_for_each_safe(p, n, &head)
+ p9_fid_put(hlist_entry(p, struct p9_fid, dlist));
}
-static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
{
struct p9_fid *fid;
struct inode *inode;
@@ -77,33 +80,72 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
struct v9fs_session_info *v9ses;
fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
+ if (IS_ERR(fid)) {
+ p9_debug(
+ P9_DEBUG_VFS,
+ "v9fs_fid_lookup: dentry = %pd (%p), got error %pe\n",
+ dentry, dentry, fid);
return PTR_ERR(fid);
+ }
v9ses = v9fs_inode2v9ses(inode);
if (v9fs_proto_dotl(v9ses))
retval = v9fs_refresh_inode_dotl(fid, inode);
else
retval = v9fs_refresh_inode(fid, inode);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
- if (retval == -ENOENT)
+ if (retval == -ENOENT) {
+ p9_debug(P9_DEBUG_VFS, "dentry: %pd (%p) invalidated due to ENOENT\n",
+ dentry, dentry);
+ return 0;
+ }
+ if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) {
+ p9_debug(P9_DEBUG_VFS, "dentry: %pd (%p) invalidated due to type change\n",
+ dentry, dentry);
return 0;
- if (retval < 0)
+ }
+ if (retval < 0) {
+ p9_debug(P9_DEBUG_VFS,
+ "refresh inode: dentry = %pd (%p), got error %pe\n",
+ dentry, dentry, ERR_PTR(retval));
return retval;
+ }
}
out_valid:
+ p9_debug(P9_DEBUG_VFS, "dentry: %pd (%p) is valid\n", dentry, dentry);
return 1;
}
+static int v9fs_lookup_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
+{
+ return __v9fs_lookup_revalidate(dentry, flags);
+}
+
+static bool v9fs_dentry_unalias_trylock(const struct dentry *dentry)
+{
+ struct v9fs_session_info *v9ses = v9fs_dentry2v9ses(dentry);
+ return down_write_trylock(&v9ses->rename_sem);
+}
+
+static void v9fs_dentry_unalias_unlock(const struct dentry *dentry)
+{
+ struct v9fs_session_info *v9ses = v9fs_dentry2v9ses(dentry);
+ up_write(&v9ses->rename_sem);
+}
+
const struct dentry_operations v9fs_cached_dentry_operations = {
.d_revalidate = v9fs_lookup_revalidate,
- .d_weak_revalidate = v9fs_lookup_revalidate,
+ .d_weak_revalidate = __v9fs_lookup_revalidate,
.d_delete = v9fs_cached_dentry_delete,
.d_release = v9fs_dentry_release,
+ .d_unalias_trylock = v9fs_dentry_unalias_trylock,
+ .d_unalias_unlock = v9fs_dentry_unalias_unlock,
};
const struct dentry_operations v9fs_dentry_operations = {
- .d_delete = always_delete_dentry,
.d_release = v9fs_dentry_release,
+ .d_unalias_trylock = v9fs_dentry_unalias_trylock,
+ .d_unalias_unlock = v9fs_dentry_unalias_unlock,
};
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 8c854d8cb0cd..e0d34e4e9076 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -13,10 +13,9 @@
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/sched.h>
-#include <linux/inet.h>
-#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/uio.h>
+#include <linux/fscache.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -108,7 +107,7 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
struct iov_iter to;
int n;
- iov_iter_kvec(&to, READ, &kvec, 1, buflen);
+ iov_iter_kvec(&to, ITER_DEST, &kvec, 1, buflen);
n = p9_client_read(file->private_data, ctx->pos, &to,
&err);
if (err)
@@ -128,7 +127,7 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
}
over = !dir_emit(ctx, st.name, strlen(st.name),
- v9fs_qid2ino(&st.qid), dt_type(&st));
+ QID2INO(&st.qid), dt_type(&st));
p9stat_free(&st);
if (over)
return 0;
@@ -185,7 +184,7 @@ static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx)
if (!dir_emit(ctx, curdirent.d_name,
strlen(curdirent.d_name),
- v9fs_qid2ino(&curdirent.qid),
+ QID2INO(&curdirent.qid),
curdirent.d_type))
return 0;
@@ -197,26 +196,44 @@ static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx)
/**
- * v9fs_dir_release - close a directory
- * @inode: inode of the directory
- * @filp: file pointer to a directory
+ * v9fs_dir_release - close a directory or a file
+ * @inode: inode of the directory or file
+ * @filp: file pointer to a directory or file
*
*/
int v9fs_dir_release(struct inode *inode, struct file *filp)
{
+ struct v9fs_inode *v9inode = V9FS_I(inode);
struct p9_fid *fid;
+ __le32 version;
+ loff_t i_size;
+ int retval = 0, put_err;
fid = filp->private_data;
p9_debug(P9_DEBUG_VFS, "inode: %p filp: %p fid: %d\n",
inode, filp, fid ? fid->fid : -1);
+
if (fid) {
+ if ((S_ISREG(inode->i_mode)) && (filp->f_mode & FMODE_WRITE))
+ retval = filemap_fdatawrite(inode->i_mapping);
+
spin_lock(&inode->i_lock);
hlist_del(&fid->ilist);
spin_unlock(&inode->i_lock);
- p9_client_clunk(fid);
+ put_err = p9_fid_put(fid);
+ retval = retval < 0 ? retval : put_err;
+ }
+
+ if ((filp->f_mode & FMODE_WRITE)) {
+ version = cpu_to_le32(v9inode->qid.version);
+ i_size = i_size_read(inode);
+ fscache_unuse_cookie(v9fs_inode_cookie(v9inode),
+ &version, &i_size);
+ } else {
+ fscache_unuse_cookie(v9fs_inode_cookie(v9inode), NULL, NULL);
}
- return 0;
+ return retval;
}
const struct file_operations v9fs_dir_operations = {
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 612e297f3763..6f3880208587 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -9,16 +9,15 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/stat.h>
#include <linux/string.h>
-#include <linux/inet.h>
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/utsname.h>
#include <linux/uaccess.h>
-#include <linux/idr.h>
#include <linux/uio.h>
#include <linux/slab.h>
#include <net/9p/9p.h>
@@ -29,7 +28,6 @@
#include "fid.h"
#include "cache.h"
-static const struct vm_operations_struct v9fs_file_vm_ops;
static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
/**
@@ -42,64 +40,60 @@ static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
int v9fs_file_open(struct inode *inode, struct file *file)
{
int err;
- struct v9fs_inode *v9inode;
struct v9fs_session_info *v9ses;
- struct p9_fid *fid, *writeback_fid;
+ struct p9_fid *fid;
int omode;
+ int o_append;
p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
- v9inode = V9FS_I(inode);
v9ses = v9fs_inode2v9ses(inode);
- if (v9fs_proto_dotl(v9ses))
+ if (v9fs_proto_dotl(v9ses)) {
omode = v9fs_open_to_dotl_flags(file->f_flags);
- else
+ o_append = P9_DOTL_APPEND;
+ } else {
omode = v9fs_uflags2omode(file->f_flags,
v9fs_proto_dotu(v9ses));
+ o_append = P9_OAPPEND;
+ }
fid = file->private_data;
if (!fid) {
fid = v9fs_fid_clone(file_dentry(file));
if (IS_ERR(fid))
return PTR_ERR(fid);
- err = p9_client_open(fid, omode);
+ if ((v9ses->cache & CACHE_WRITEBACK) && (omode & P9_OWRITE)) {
+ int writeback_omode = (omode & ~(P9_OWRITE | o_append)) | P9_ORDWR;
+
+ p9_debug(P9_DEBUG_CACHE, "write-only file with writeback enabled, try opening O_RDWR\n");
+
+ err = p9_client_open(fid, writeback_omode);
+ if (err < 0) {
+ p9_debug(P9_DEBUG_CACHE, "could not open O_RDWR, disabling caches\n");
+ err = p9_client_open(fid, omode);
+ fid->mode |= P9L_DIRECT;
+ }
+ } else {
+ err = p9_client_open(fid, omode);
+ }
if (err < 0) {
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return err;
}
if ((file->f_flags & O_APPEND) &&
(!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
generic_file_llseek(file, 0, SEEK_END);
- }
- file->private_data = fid;
- mutex_lock(&v9inode->v_mutex);
- if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
- !v9inode->writeback_fid &&
- ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
- /*
- * clone a fid and add it to writeback_fid
- * we do it during open time instead of
- * page dirty time via write_begin/page_mkwrite
- * because we want write after unlink usecase
- * to work.
- */
- writeback_fid = v9fs_writeback_fid(file_dentry(file));
- if (IS_ERR(writeback_fid)) {
- err = PTR_ERR(writeback_fid);
- mutex_unlock(&v9inode->v_mutex);
- goto out_error;
- }
- v9inode->writeback_fid = (void *) writeback_fid;
+ file->private_data = fid;
}
- mutex_unlock(&v9inode->v_mutex);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- v9fs_cache_inode_set_cookie(inode, file);
- v9fs_open_fid_add(inode, fid);
+
+#ifdef CONFIG_9P_FSCACHE
+ if (v9ses->cache & CACHE_FSCACHE)
+ fscache_use_cookie(v9fs_inode_cookie(V9FS_I(inode)),
+ file->f_mode & FMODE_WRITE);
+#endif
+ v9fs_fid_add_modes(fid, v9ses->flags, v9ses->cache, file->f_flags);
+ v9fs_open_fid_add(inode, &fid);
return 0;
-out_error:
- p9_client_clunk(file->private_data);
- file->private_data = NULL;
- return err;
}
/**
@@ -114,17 +108,16 @@ out_error:
static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
{
- int res = 0;
struct inode *inode = file_inode(filp);
p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
- if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
+ if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->c.flc_type != F_UNLCK) {
filemap_write_and_wait(inode->i_mapping);
invalidate_mapping_pages(&inode->i_data, 0, -1);
}
- return res;
+ return 0;
}
static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
@@ -133,14 +126,12 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
struct p9_fid *fid;
uint8_t status = P9_LOCK_ERROR;
int res = 0;
- unsigned char fl_type;
struct v9fs_session_info *v9ses;
fid = filp->private_data;
BUG_ON(fid == NULL);
- if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
- BUG();
+ BUG_ON((fl->c.flc_flags & FL_POSIX) != FL_POSIX);
res = locks_lock_file_wait(filp, fl);
if (res < 0)
@@ -149,7 +140,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
/* convert posix lock to p9 tlock args */
memset(&flock, 0, sizeof(flock));
/* map the lock type */
- switch (fl->fl_type) {
+ switch (fl->c.flc_type) {
case F_RDLCK:
flock.type = P9_LOCK_TYPE_RDLCK;
break;
@@ -165,7 +156,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
flock.length = 0;
else
flock.length = fl->fl_end - fl->fl_start + 1;
- flock.proc_id = fl->fl_pid;
+ flock.proc_id = fl->c.flc_pid;
flock.client_id = fid->clnt->name;
if (IS_SETLKW(cmd))
flock.flags = P9_LOCK_FLAGS_BLOCK;
@@ -220,12 +211,13 @@ out_unlock:
* incase server returned error for lock request, revert
* it locally
*/
- if (res < 0 && fl->fl_type != F_UNLCK) {
- fl_type = fl->fl_type;
- fl->fl_type = F_UNLCK;
+ if (res < 0 && fl->c.flc_type != F_UNLCK) {
+ unsigned char type = fl->c.flc_type;
+
+ fl->c.flc_type = F_UNLCK;
/* Even if this fails we want to return the remote error */
locks_lock_file_wait(filp, fl);
- fl->fl_type = fl_type;
+ fl->c.flc_type = type;
}
if (flock.client_id != fid->clnt->name)
kfree(flock.client_id);
@@ -247,7 +239,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
* if we have a conflicting lock locally, no need to validate
* with server
*/
- if (fl->fl_type != F_UNLCK)
+ if (fl->c.flc_type != F_UNLCK)
return res;
/* convert posix lock to p9 tgetlock args */
@@ -258,7 +250,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
glock.length = 0;
else
glock.length = fl->fl_end - fl->fl_start + 1;
- glock.proc_id = fl->fl_pid;
+ glock.proc_id = fl->c.flc_pid;
glock.client_id = fid->clnt->name;
res = p9_client_getlock_dotl(fid, &glock);
@@ -267,13 +259,13 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
/* map 9p lock type to os lock type */
switch (glock.type) {
case P9_LOCK_TYPE_RDLCK:
- fl->fl_type = F_RDLCK;
+ fl->c.flc_type = F_RDLCK;
break;
case P9_LOCK_TYPE_WRLCK:
- fl->fl_type = F_WRLCK;
+ fl->c.flc_type = F_WRLCK;
break;
case P9_LOCK_TYPE_UNLCK:
- fl->fl_type = F_UNLCK;
+ fl->c.flc_type = F_UNLCK;
break;
}
if (glock.type != P9_LOCK_TYPE_UNLCK) {
@@ -282,7 +274,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
fl->fl_end = OFFSET_MAX;
else
fl->fl_end = glock.start + glock.length - 1;
- fl->fl_pid = -glock.proc_id;
+ fl->c.flc_pid = -glock.proc_id;
}
out:
if (glock.client_id != fid->clnt->name)
@@ -306,7 +298,7 @@ static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
filp, cmd, fl, filp);
- if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
+ if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->c.flc_type != F_UNLCK) {
filemap_write_and_wait(inode->i_mapping);
invalidate_mapping_pages(&inode->i_data, 0, -1);
}
@@ -337,16 +329,16 @@ static int v9fs_file_flock_dotl(struct file *filp, int cmd,
p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
filp, cmd, fl, filp);
- if (!(fl->fl_flags & FL_FLOCK))
+ if (!(fl->c.flc_flags & FL_FLOCK))
goto out_err;
- if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
+ if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->c.flc_type != F_UNLCK) {
filemap_write_and_wait(inode->i_mapping);
invalidate_mapping_pages(&inode->i_data, 0, -1);
}
/* Convert flock to posix lock */
- fl->fl_flags |= FL_POSIX;
- fl->fl_flags ^= FL_FLOCK;
+ fl->c.flc_flags |= FL_POSIX;
+ fl->c.flc_flags ^= FL_FLOCK;
if (IS_SETLK(cmd) | IS_SETLKW(cmd))
ret = v9fs_file_do_lock(filp, cmd, fl);
@@ -366,20 +358,37 @@ static ssize_t
v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct p9_fid *fid = iocb->ki_filp->private_data;
- int ret, err = 0;
- p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
- iov_iter_count(to), iocb->ki_pos);
+ p9_debug(P9_DEBUG_VFS, "fid %d count %zu offset %lld\n",
+ fid->fid, iov_iter_count(to), iocb->ki_pos);
- if (iocb->ki_filp->f_flags & O_NONBLOCK)
- ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
- else
- ret = p9_client_read(fid, iocb->ki_pos, to, &err);
- if (!ret)
- return err;
+ if (fid->mode & P9L_DIRECT)
+ return netfs_unbuffered_read_iter(iocb, to);
- iocb->ki_pos += ret;
- return ret;
+ p9_debug(P9_DEBUG_VFS, "(cached)\n");
+ return netfs_file_read_iter(iocb, to);
+}
+
+/*
+ * v9fs_file_splice_read - splice-read from a file
+ * @in: The 9p file to read from
+ * @ppos: Where to find/update the file position
+ * @pipe: The pipe to splice into
+ * @len: The maximum amount of data to splice
+ * @flags: SPLICE_F_* flags
+ */
+static ssize_t v9fs_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ struct p9_fid *fid = in->private_data;
+
+ p9_debug(P9_DEBUG_VFS, "fid %d count %zu offset %lld\n",
+ fid->fid, len, *ppos);
+
+ if (fid->mode & P9L_DIRECT)
+ return copy_splice_read(in, ppos, pipe, len, flags);
+ return filemap_splice_read(in, ppos, pipe, len, flags);
}
/**
@@ -392,39 +401,15 @@ static ssize_t
v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
- ssize_t retval;
- loff_t origin;
- int err = 0;
+ struct p9_fid *fid = file->private_data;
- retval = generic_write_checks(iocb, from);
- if (retval <= 0)
- return retval;
+ p9_debug(P9_DEBUG_VFS, "fid %d\n", fid->fid);
- origin = iocb->ki_pos;
- retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
- if (retval > 0) {
- struct inode *inode = file_inode(file);
- loff_t i_size;
- unsigned long pg_start, pg_end;
-
- pg_start = origin >> PAGE_SHIFT;
- pg_end = (origin + retval - 1) >> PAGE_SHIFT;
- if (inode->i_mapping && inode->i_mapping->nrpages)
- invalidate_inode_pages2_range(inode->i_mapping,
- pg_start, pg_end);
- iocb->ki_pos += retval;
- i_size = i_size_read(inode);
- if (iocb->ki_pos > i_size) {
- inode_add_bytes(inode, iocb->ki_pos - i_size);
- /*
- * Need to serialize against i_size_write() in
- * v9fs_stat2inode()
- */
- v9fs_i_size_write(inode, iocb->ki_pos);
- }
- return retval;
- }
- return err;
+ if (fid->mode & (P9L_DIRECT | P9L_NOWRITECACHE))
+ return netfs_unbuffered_write_iter(iocb, from);
+
+ p9_debug(P9_DEBUG_CACHE, "(cached)\n");
+ return netfs_file_write_iter(iocb, from);
}
static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
@@ -474,52 +459,23 @@ int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
}
static int
-v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
+v9fs_file_mmap_prepare(struct vm_area_desc *desc)
{
int retval;
+ struct file *filp = desc->file;
+ struct inode *inode = file_inode(filp);
+ struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
+ p9_debug(P9_DEBUG_MMAP, "filp :%p\n", filp);
- retval = generic_file_mmap(filp, vma);
- if (!retval)
- vma->vm_ops = &v9fs_file_vm_ops;
-
- return retval;
-}
-
-static int
-v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int retval;
- struct inode *inode;
- struct v9fs_inode *v9inode;
- struct p9_fid *fid;
-
- inode = file_inode(filp);
- v9inode = V9FS_I(inode);
- mutex_lock(&v9inode->v_mutex);
- if (!v9inode->writeback_fid &&
- (vma->vm_flags & VM_SHARED) &&
- (vma->vm_flags & VM_WRITE)) {
- /*
- * clone a fid and add it to writeback_fid
- * we do it during mmap instead of
- * page dirty time via write_begin/page_mkwrite
- * because we want write after unlink usecase
- * to work.
- */
- fid = v9fs_writeback_fid(file_dentry(filp));
- if (IS_ERR(fid)) {
- retval = PTR_ERR(fid);
- mutex_unlock(&v9inode->v_mutex);
- return retval;
- }
- v9inode->writeback_fid = (void *) fid;
+ if (!(v9ses->cache & CACHE_WRITEBACK)) {
+ p9_debug(P9_DEBUG_CACHE, "(read-only mmap mode)");
+ return generic_file_readonly_mmap_prepare(desc);
}
- mutex_unlock(&v9inode->v_mutex);
- retval = generic_file_mmap(filp, vma);
+ retval = generic_file_mmap_prepare(desc);
if (!retval)
- vma->vm_ops = &v9fs_mmap_file_vm_ops;
+ desc->vm_ops = &v9fs_mmap_file_vm_ops;
return retval;
}
@@ -527,100 +483,22 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
static vm_fault_t
v9fs_vm_page_mkwrite(struct vm_fault *vmf)
{
- struct v9fs_inode *v9inode;
- struct folio *folio = page_folio(vmf->page);
- struct file *filp = vmf->vma->vm_file;
- struct inode *inode = file_inode(filp);
-
-
- p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n",
- folio, (unsigned long)filp->private_data);
-
- v9inode = V9FS_I(inode);
-
- /* Wait for the page to be written to the cache before we allow it to
- * be modified. We then assume the entire page will need writing back.
- */
-#ifdef CONFIG_9P_FSCACHE
- if (folio_test_fscache(folio) &&
- folio_wait_fscache_killable(folio) < 0)
- return VM_FAULT_NOPAGE;
-#endif
-
- /* Update file times before taking page lock */
- file_update_time(filp);
-
- BUG_ON(!v9inode->writeback_fid);
- if (folio_lock_killable(folio) < 0)
- return VM_FAULT_RETRY;
- if (folio_mapping(folio) != inode->i_mapping)
- goto out_unlock;
- folio_wait_stable(folio);
-
- return VM_FAULT_LOCKED;
-out_unlock:
- folio_unlock(folio);
- return VM_FAULT_NOPAGE;
-}
-
-/**
- * v9fs_mmap_file_read_iter - read from a file
- * @iocb: The operation parameters
- * @to: The buffer to read into
- *
- */
-static ssize_t
-v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
-{
- /* TODO: Check if there are dirty pages */
- return v9fs_file_read_iter(iocb, to);
-}
-
-/**
- * v9fs_mmap_file_write_iter - write to a file
- * @iocb: The operation parameters
- * @from: The data to write
- *
- */
-static ssize_t
-v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- /*
- * TODO: invalidate mmaps on filp's inode between
- * offset and offset+count
- */
- return v9fs_file_write_iter(iocb, from);
+ return netfs_page_mkwrite(vmf, NULL);
}
static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
{
- struct inode *inode;
-
- struct writeback_control wbc = {
- .nr_to_write = LONG_MAX,
- .sync_mode = WB_SYNC_ALL,
- .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
- /* absolute end, byte at end included */
- .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
- (vma->vm_end - vma->vm_start - 1),
- };
-
if (!(vma->vm_flags & VM_SHARED))
return;
p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
- inode = file_inode(vma->vm_file);
- filemap_fdatawrite_wbc(inode->i_mapping, &wbc);
+ filemap_fdatawrite_range(file_inode(vma->vm_file)->i_mapping,
+ (loff_t)vma->vm_pgoff * PAGE_SIZE,
+ (loff_t)vma->vm_pgoff * PAGE_SIZE +
+ (vma->vm_end - vma->vm_start - 1));
}
-
-static const struct vm_operations_struct v9fs_file_vm_ops = {
- .fault = filemap_fault,
- .map_pages = filemap_map_pages,
- .page_mkwrite = v9fs_vm_page_mkwrite,
-};
-
static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
.close = v9fs_mmap_vm_close,
.fault = filemap_fault,
@@ -628,34 +506,6 @@ static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
.page_mkwrite = v9fs_vm_page_mkwrite,
};
-
-const struct file_operations v9fs_cached_file_operations = {
- .llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
- .write_iter = generic_file_write_iter,
- .open = v9fs_file_open,
- .release = v9fs_dir_release,
- .lock = v9fs_file_lock,
- .mmap = v9fs_file_mmap,
- .splice_read = generic_file_splice_read,
- .splice_write = iter_file_splice_write,
- .fsync = v9fs_file_fsync,
-};
-
-const struct file_operations v9fs_cached_file_operations_dotl = {
- .llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
- .write_iter = generic_file_write_iter,
- .open = v9fs_file_open,
- .release = v9fs_dir_release,
- .lock = v9fs_file_lock_dotl,
- .flock = v9fs_file_flock_dotl,
- .mmap = v9fs_file_mmap,
- .splice_read = generic_file_splice_read,
- .splice_write = iter_file_splice_write,
- .fsync = v9fs_file_fsync_dotl,
-};
-
const struct file_operations v9fs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = v9fs_file_read_iter,
@@ -663,10 +513,11 @@ const struct file_operations v9fs_file_operations = {
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
- .mmap = generic_file_readonly_mmap,
- .splice_read = generic_file_splice_read,
+ .mmap_prepare = generic_file_readonly_mmap_prepare,
+ .splice_read = v9fs_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync,
+ .setlease = simple_nosetlease,
};
const struct file_operations v9fs_file_operations_dotl = {
@@ -677,35 +528,9 @@ const struct file_operations v9fs_file_operations_dotl = {
.release = v9fs_dir_release,
.lock = v9fs_file_lock_dotl,
.flock = v9fs_file_flock_dotl,
- .mmap = generic_file_readonly_mmap,
- .splice_read = generic_file_splice_read,
- .splice_write = iter_file_splice_write,
- .fsync = v9fs_file_fsync_dotl,
-};
-
-const struct file_operations v9fs_mmap_file_operations = {
- .llseek = generic_file_llseek,
- .read_iter = v9fs_mmap_file_read_iter,
- .write_iter = v9fs_mmap_file_write_iter,
- .open = v9fs_file_open,
- .release = v9fs_dir_release,
- .lock = v9fs_file_lock,
- .mmap = v9fs_mmap_file_mmap,
- .splice_read = generic_file_splice_read,
- .splice_write = iter_file_splice_write,
- .fsync = v9fs_file_fsync,
-};
-
-const struct file_operations v9fs_mmap_file_operations_dotl = {
- .llseek = generic_file_llseek,
- .read_iter = v9fs_mmap_file_read_iter,
- .write_iter = v9fs_mmap_file_write_iter,
- .open = v9fs_file_open,
- .release = v9fs_dir_release,
- .lock = v9fs_file_lock_dotl,
- .flock = v9fs_file_flock_dotl,
- .mmap = v9fs_mmap_file_mmap,
- .splice_read = generic_file_splice_read,
+ .mmap_prepare = v9fs_file_mmap_prepare,
+ .splice_read = v9fs_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync_dotl,
+ .setlease = simple_nosetlease,
};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 328c338ff304..97abe65bf7c1 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -15,9 +15,7 @@
#include <linux/pagemap.h>
#include <linux/stat.h>
#include <linux/string.h>
-#include <linux/inet.h>
#include <linux/namei.h>
-#include <linux/idr.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/xattr.h>
@@ -85,7 +83,7 @@ static int p9mode2perm(struct v9fs_session_info *v9ses,
int res;
int mode = stat->mode;
- res = mode & S_IALLUGO;
+ res = mode & 0777; /* S_IRWXUGO */
if (v9fs_proto_dotu(v9ses)) {
if ((mode & P9_DMSETUID) == P9_DMSETUID)
res |= S_ISUID;
@@ -165,7 +163,6 @@ int v9fs_uflags2omode(int uflags, int extended)
{
int ret;
- ret = 0;
switch (uflags&3) {
default:
case O_RDONLY:
@@ -181,6 +178,9 @@ int v9fs_uflags2omode(int uflags, int extended)
break;
}
+ if (uflags & O_TRUNC)
+ ret |= P9_OTRUNC;
+
if (extended) {
if (uflags & O_EXCL)
ret |= P9_OEXCL;
@@ -228,17 +228,12 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
{
struct v9fs_inode *v9inode;
- v9inode = kmem_cache_alloc(v9fs_inode_cache, GFP_KERNEL);
+ v9inode = alloc_inode_sb(sb, v9fs_inode_cache, GFP_KERNEL);
if (!v9inode)
return NULL;
-#ifdef CONFIG_9P_FSCACHE
- v9inode->fscache = NULL;
- mutex_init(&v9inode->fscache_lock);
-#endif
- v9inode->writeback_fid = NULL;
v9inode->cache_validity = 0;
mutex_init(&v9inode->v_mutex);
- return &v9inode->vfs_inode;
+ return &v9inode->netfs.inode;
}
/**
@@ -251,15 +246,24 @@ void v9fs_free_inode(struct inode *inode)
kmem_cache_free(v9fs_inode_cache, V9FS_I(inode));
}
+/*
+ * Set parameters for the netfs library
+ */
+void v9fs_set_netfs_context(struct inode *inode)
+{
+ struct v9fs_inode *v9inode = V9FS_I(inode);
+ netfs_inode_init(&v9inode->netfs, &v9fs_req_ops, true);
+}
+
int v9fs_init_inode(struct v9fs_session_info *v9ses,
struct inode *inode, umode_t mode, dev_t rdev)
{
int err = 0;
- inode_init_owner(&init_user_ns, inode, NULL, mode);
+ inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
inode->i_blocks = 0;
inode->i_rdev = rdev;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
inode->i_mapping->a_ops = &v9fs_addr_operations;
inode->i_private = NULL;
@@ -283,24 +287,10 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
case S_IFREG:
if (v9fs_proto_dotl(v9ses)) {
inode->i_op = &v9fs_file_inode_operations_dotl;
- if (v9ses->cache == CACHE_LOOSE ||
- v9ses->cache == CACHE_FSCACHE)
- inode->i_fop =
- &v9fs_cached_file_operations_dotl;
- else if (v9ses->cache == CACHE_MMAP)
- inode->i_fop = &v9fs_mmap_file_operations_dotl;
- else
- inode->i_fop = &v9fs_file_operations_dotl;
+ inode->i_fop = &v9fs_file_operations_dotl;
} else {
inode->i_op = &v9fs_file_inode_operations;
- if (v9ses->cache == CACHE_LOOSE ||
- v9ses->cache == CACHE_FSCACHE)
- inode->i_fop =
- &v9fs_cached_file_operations;
- else if (v9ses->cache == CACHE_MMAP)
- inode->i_fop = &v9fs_mmap_file_operations;
- else
- inode->i_fop = &v9fs_file_operations;
+ inode->i_fop = &v9fs_file_operations;
}
break;
@@ -345,53 +335,31 @@ error:
}
/**
- * v9fs_get_inode - helper function to setup an inode
- * @sb: superblock
- * @mode: mode to setup inode with
- * @rdev: The device numbers to set
- */
-
-struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
-{
- int err;
- struct inode *inode;
- struct v9fs_session_info *v9ses = sb->s_fs_info;
-
- p9_debug(P9_DEBUG_VFS, "super block: %p mode: %ho\n", sb, mode);
-
- inode = new_inode(sb);
- if (!inode) {
- pr_warn("%s (%d): Problem allocating inode\n",
- __func__, task_pid_nr(current));
- return ERR_PTR(-ENOMEM);
- }
- err = v9fs_init_inode(v9ses, inode, mode, rdev);
- if (err) {
- iput(inode);
- return ERR_PTR(err);
- }
- return inode;
-}
-
-/**
* v9fs_evict_inode - Remove an inode from the inode cache
* @inode: inode to release
*
*/
void v9fs_evict_inode(struct inode *inode)
{
- struct v9fs_inode *v9inode = V9FS_I(inode);
+ struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode);
+ __le32 __maybe_unused version;
- truncate_inode_pages_final(&inode->i_data);
- clear_inode(inode);
- filemap_fdatawrite(&inode->i_data);
+ if (!is_bad_inode(inode)) {
+ netfs_wait_for_outstanding_io(inode);
+ truncate_inode_pages_final(&inode->i_data);
- v9fs_cache_inode_put_cookie(inode);
- /* clunk the fid stashed in writeback_fid */
- if (v9inode->writeback_fid) {
- p9_client_clunk(v9inode->writeback_fid);
- v9inode->writeback_fid = NULL;
- }
+ version = cpu_to_le32(v9inode->qid.version);
+ netfs_clear_inode_writeback(inode, &version);
+
+ clear_inode(inode);
+ filemap_fdatawrite(&inode->i_data);
+
+#ifdef CONFIG_9P_FSCACHE
+ if (v9fs_inode_cookie(v9inode))
+ fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
+#endif
+ } else
+ clear_inode(inode);
}
static int v9fs_test_inode(struct inode *inode, void *data)
@@ -442,7 +410,6 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
dev_t rdev;
int retval;
umode_t umode;
- unsigned long i_ino;
struct inode *inode;
struct v9fs_session_info *v9ses = sb->s_fs_info;
int (*test)(struct inode *inode, void *data);
@@ -452,24 +419,24 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
else
test = v9fs_test_inode;
- i_ino = v9fs_qid2ino(qid);
- inode = iget5_locked(sb, i_ino, test, v9fs_set_inode, st);
+ inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode, st);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
/*
* initialize the inode with the stat info
* FIXME!! we may need support for stale inodes
* later.
*/
- inode->i_ino = i_ino;
+ inode->i_ino = QID2INO(qid);
umode = p9mode2unixmode(v9ses, st, &rdev);
retval = v9fs_init_inode(v9ses, inode, umode, rdev);
if (retval)
goto error;
v9fs_stat2inode(st, inode, sb, 0);
+ v9fs_set_netfs_context(inode);
v9fs_cache_inode_get_cookie(inode);
unlock_new_inode(inode);
return inode;
@@ -558,7 +525,7 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
if (v9fs_proto_dotl(v9ses))
retval = p9_client_unlinkat(dfid, dentry->d_name.name,
v9fs_at_to_dotl_flags(flags));
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
if (retval == -EOPNOTSUPP) {
/* Try the one based on path */
v9fid = v9fs_fid_clone(dentry);
@@ -608,7 +575,6 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry);
- err = 0;
name = dentry->d_name.name;
dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
@@ -622,14 +588,12 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
if (IS_ERR(ofid)) {
err = PTR_ERR(ofid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
- p9_client_clunk(dfid);
- return ERR_PTR(err);
+ goto error;
}
err = p9_client_fcreate(ofid, name, perm, mode, extension);
if (err < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_fcreate failed %d\n", err);
- p9_client_clunk(dfid);
goto error;
}
@@ -640,8 +604,6 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS,
"p9_client_walk failed %d\n", err);
- fid = NULL;
- p9_client_clunk(dfid);
goto error;
}
/*
@@ -652,27 +614,23 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS,
"inode creation failed %d\n", err);
- p9_client_clunk(dfid);
goto error;
}
- v9fs_fid_add(dentry, fid);
+ v9fs_fid_add(dentry, &fid);
d_instantiate(dentry, inode);
}
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
return ofid;
error:
- if (ofid)
- p9_client_clunk(ofid);
-
- if (fid)
- p9_client_clunk(fid);
-
+ p9_fid_put(dfid);
+ p9_fid_put(ofid);
+ p9_fid_put(fid);
return ERR_PTR(err);
}
/**
* v9fs_vfs_create - VFS hook to create a regular file
- * @mnt_userns: The user namespace of the mount
+ * @idmap: idmap of the mount
* @dir: The parent directory
* @dentry: The name of file to be created
* @mode: The UNIX file mode to set
@@ -684,7 +642,7 @@ error:
*/
static int
-v9fs_vfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+v9fs_vfs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
@@ -697,22 +655,22 @@ v9fs_vfs_create(struct user_namespace *mnt_userns, struct inode *dir,
return PTR_ERR(fid);
v9fs_invalidate_inode_attr(dir);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return 0;
}
/**
* v9fs_vfs_mkdir - VFS mkdir hook to create a directory
- * @mnt_userns: The user namespace of the mount
+ * @idmap: idmap of the mount
* @dir: inode that is being unlinked
* @dentry: dentry that is being unlinked
* @mode: mode for new directory
*
*/
-static int v9fs_vfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *v9fs_vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int err;
u32 perm;
@@ -733,9 +691,8 @@ static int v9fs_vfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
}
if (fid)
- p9_client_clunk(fid);
-
- return err;
+ p9_fid_put(fid);
+ return ERR_PTR(err);
}
/**
@@ -774,12 +731,12 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
*/
name = dentry->d_name.name;
fid = p9_client_walk(dfid, 1, &name, 1);
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
if (fid == ERR_PTR(-ENOENT))
inode = NULL;
else if (IS_ERR(fid))
inode = ERR_CAST(fid);
- else if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
+ else if (v9ses->cache & (CACHE_META|CACHE_LOOSE))
inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
else
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
@@ -793,11 +750,11 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
res = d_splice_alias(inode, dentry);
if (!IS_ERR(fid)) {
if (!res)
- v9fs_fid_add(dentry, fid);
+ v9fs_fid_add(dentry, &fid);
else if (!IS_ERR(res))
- v9fs_fid_add(res, fid);
+ v9fs_fid_add(res, &fid);
else
- p9_client_clunk(fid);
+ p9_fid_put(fid);
}
return res;
}
@@ -808,79 +765,56 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
{
int err;
u32 perm;
- struct v9fs_inode *v9inode;
+ struct v9fs_inode __maybe_unused *v9inode;
struct v9fs_session_info *v9ses;
- struct p9_fid *fid, *inode_fid;
- struct dentry *res = NULL;
+ struct p9_fid *fid;
struct inode *inode;
+ int p9_omode;
if (d_in_lookup(dentry)) {
- res = v9fs_vfs_lookup(dir, dentry, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
-
- if (res)
- dentry = res;
+ struct dentry *res = v9fs_vfs_lookup(dir, dentry, 0);
+ if (res || d_really_is_positive(dentry))
+ return finish_no_open(file, res);
}
/* Only creates */
- if (!(flags & O_CREAT) || d_really_is_positive(dentry))
- return finish_no_open(file, res);
-
- err = 0;
+ if (!(flags & O_CREAT))
+ return finish_no_open(file, NULL);
v9ses = v9fs_inode2v9ses(dir);
perm = unixmode2p9mode(v9ses, mode);
- fid = v9fs_create(v9ses, dir, dentry, NULL, perm,
- v9fs_uflags2omode(flags,
- v9fs_proto_dotu(v9ses)));
- if (IS_ERR(fid)) {
- err = PTR_ERR(fid);
- fid = NULL;
- goto error;
+ p9_omode = v9fs_uflags2omode(flags, v9fs_proto_dotu(v9ses));
+
+ if ((v9ses->cache & CACHE_WRITEBACK) && (p9_omode & P9_OWRITE)) {
+ p9_omode = (p9_omode & ~(P9_OWRITE | P9_OAPPEND)) | P9_ORDWR;
+ p9_debug(P9_DEBUG_CACHE,
+ "write-only file with writeback enabled, creating w/ O_RDWR\n");
}
+ fid = v9fs_create(v9ses, dir, dentry, NULL, perm, p9_omode);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
v9fs_invalidate_inode_attr(dir);
inode = d_inode(dentry);
v9inode = V9FS_I(inode);
- mutex_lock(&v9inode->v_mutex);
- if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
- !v9inode->writeback_fid &&
- ((flags & O_ACCMODE) != O_RDONLY)) {
- /*
- * clone a fid and add it to writeback_fid
- * we do it during open time instead of
- * page dirty time via write_begin/page_mkwrite
- * because we want write after unlink usecase
- * to work.
- */
- inode_fid = v9fs_writeback_fid(dentry);
- if (IS_ERR(inode_fid)) {
- err = PTR_ERR(inode_fid);
- mutex_unlock(&v9inode->v_mutex);
- goto error;
- }
- v9inode->writeback_fid = (void *) inode_fid;
- }
- mutex_unlock(&v9inode->v_mutex);
err = finish_open(file, dentry, generic_file_open);
- if (err)
- goto error;
+ if (unlikely(err)) {
+ p9_fid_put(fid);
+ return err;
+ }
file->private_data = fid;
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- v9fs_cache_inode_set_cookie(d_inode(dentry), file);
- v9fs_open_fid_add(inode, fid);
+#ifdef CONFIG_9P_FSCACHE
+ if (v9ses->cache & CACHE_FSCACHE)
+ fscache_use_cookie(v9fs_inode_cookie(v9inode),
+ file->f_mode & FMODE_WRITE);
+#endif
- file->f_mode |= FMODE_CREATED;
-out:
- dput(res);
- return err;
+ v9fs_fid_add_modes(fid, v9ses->flags, v9ses->cache, file->f_flags);
+ v9fs_open_fid_add(inode, &fid);
-error:
- if (fid)
- p9_client_clunk(fid);
- goto out;
+ file->f_mode |= FMODE_CREATED;
+ return 0;
}
/**
@@ -909,7 +843,7 @@ int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
/**
* v9fs_vfs_rename - VFS hook to rename an inode
- * @mnt_userns: The user namespace of the mount
+ * @idmap: The idmap of the mount
* @old_dir: old dir inode
* @old_dentry: old dentry
* @new_dir: new dir inode
@@ -919,7 +853,7 @@ int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
*/
int
-v9fs_vfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+v9fs_vfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
@@ -927,16 +861,15 @@ v9fs_vfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
struct inode *old_inode;
struct inode *new_inode;
struct v9fs_session_info *v9ses;
- struct p9_fid *oldfid, *dfid;
- struct p9_fid *olddirfid;
- struct p9_fid *newdirfid;
+ struct p9_fid *oldfid = NULL, *dfid = NULL;
+ struct p9_fid *olddirfid = NULL;
+ struct p9_fid *newdirfid = NULL;
struct p9_wstat wstat;
if (flags)
return -EINVAL;
p9_debug(P9_DEBUG_VFS, "\n");
- retval = 0;
old_inode = d_inode(old_dentry);
new_inode = d_inode(new_dentry);
v9ses = v9fs_inode2v9ses(old_inode);
@@ -946,21 +879,22 @@ v9fs_vfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
dfid = v9fs_parent_fid(old_dentry);
olddirfid = clone_fid(dfid);
- if (dfid && !IS_ERR(dfid))
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
+ dfid = NULL;
if (IS_ERR(olddirfid)) {
retval = PTR_ERR(olddirfid);
- goto done;
+ goto error;
}
dfid = v9fs_parent_fid(new_dentry);
newdirfid = clone_fid(dfid);
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
+ dfid = NULL;
if (IS_ERR(newdirfid)) {
retval = PTR_ERR(newdirfid);
- goto clunk_olddir;
+ goto error;
}
down_write(&v9ses->rename_sem);
@@ -971,7 +905,7 @@ v9fs_vfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
retval = p9_client_rename(oldfid, newdirfid,
new_dentry->d_name.name);
if (retval != -EOPNOTSUPP)
- goto clunk_newdir;
+ goto error_locked;
}
if (old_dentry->d_parent != new_dentry->d_parent) {
/*
@@ -980,14 +914,14 @@ v9fs_vfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
p9_debug(P9_DEBUG_ERROR, "old dir and new dir are different\n");
retval = -EXDEV;
- goto clunk_newdir;
+ goto error_locked;
}
v9fs_blank_wstat(&wstat);
wstat.muid = v9ses->uname;
wstat.name = new_dentry->d_name.name;
retval = p9_client_wstat(oldfid, &wstat);
-clunk_newdir:
+error_locked:
if (!retval) {
if (new_inode) {
if (S_ISDIR(new_inode->i_mode))
@@ -1008,19 +942,17 @@ clunk_newdir:
d_move(old_dentry, new_dentry);
}
up_write(&v9ses->rename_sem);
- p9_client_clunk(newdirfid);
-clunk_olddir:
- p9_client_clunk(olddirfid);
-
-done:
- p9_client_clunk(oldfid);
+error:
+ p9_fid_put(newdirfid);
+ p9_fid_put(olddirfid);
+ p9_fid_put(oldfid);
return retval;
}
/**
* v9fs_vfs_getattr - retrieve file metadata
- * @mnt_userns: The user namespace of the mount
+ * @idmap: idmap of the mount
* @path: Object to query
* @stat: metadata structure to populate
* @request_mask: Mask of STATX_xxx flags indicating the caller's interests
@@ -1029,31 +961,40 @@ done:
*/
static int
-v9fs_vfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+v9fs_vfs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
+ struct inode *inode = d_inode(dentry);
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct p9_wstat *st;
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
v9ses = v9fs_dentry2v9ses(dentry);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- generic_fillattr(&init_user_ns, d_inode(dentry), stat);
+ if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
return 0;
+ } else if (v9ses->cache & CACHE_WRITEBACK) {
+ if (S_ISREG(inode->i_mode)) {
+ int retval = filemap_fdatawrite(inode->i_mapping);
+
+ if (retval)
+ p9_debug(P9_DEBUG_ERROR,
+ "flushing writeback during getattr returned %d\n", retval);
+ }
}
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
st = p9_client_stat(fid);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
if (IS_ERR(st))
return PTR_ERR(st);
v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
- generic_fillattr(&init_user_ns, d_inode(dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(dentry), stat);
p9stat_free(st);
kfree(st);
@@ -1062,26 +1003,26 @@ v9fs_vfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
/**
* v9fs_vfs_setattr - set file metadata
- * @mnt_userns: The user namespace of the mount
+ * @idmap: idmap of the mount
* @dentry: file whose metadata to set
* @iattr: metadata assignment structure
*
*/
-static int v9fs_vfs_setattr(struct user_namespace *mnt_userns,
+static int v9fs_vfs_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *iattr)
{
int retval, use_dentry = 0;
+ struct inode *inode = d_inode(dentry);
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL;
struct p9_wstat wstat;
p9_debug(P9_DEBUG_VFS, "\n");
- retval = setattr_prepare(&init_user_ns, dentry, iattr);
+ retval = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
if (retval)
return retval;
- retval = -EPERM;
v9ses = v9fs_dentry2v9ses(dentry);
if (iattr->ia_valid & ATTR_FILE) {
fid = iattr->ia_file->private_data;
@@ -1116,25 +1057,39 @@ static int v9fs_vfs_setattr(struct user_namespace *mnt_userns,
}
/* Write all dirty data */
- if (d_is_reg(dentry))
- filemap_write_and_wait(d_inode(dentry)->i_mapping);
+ if (d_is_reg(dentry)) {
+ retval = filemap_fdatawrite(inode->i_mapping);
+ if (retval)
+ p9_debug(P9_DEBUG_ERROR,
+ "flushing writeback during setattr returned %d\n", retval);
+ }
retval = p9_client_wstat(fid, &wstat);
if (use_dentry)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
if (retval < 0)
return retval;
if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(d_inode(dentry)))
- truncate_setsize(d_inode(dentry), iattr->ia_size);
+ iattr->ia_size != i_size_read(inode)) {
+ truncate_setsize(inode, iattr->ia_size);
+ netfs_resize_file(netfs_inode(inode), iattr->ia_size, true);
- v9fs_invalidate_inode_attr(d_inode(dentry));
+#ifdef CONFIG_9P_FSCACHE
+ if (v9ses->cache & CACHE_FSCACHE) {
+ struct v9fs_inode *v9inode = V9FS_I(inode);
- setattr_copy(&init_user_ns, d_inode(dentry), iattr);
- mark_inode_dirty(d_inode(dentry));
+ fscache_resize_cookie(v9fs_inode_cookie(v9inode), iattr->ia_size);
+ }
+#endif
+ }
+
+ v9fs_invalidate_inode_attr(inode);
+
+ setattr_copy(&nop_mnt_idmap, inode, iattr);
+ mark_inode_dirty(inode);
return 0;
}
@@ -1155,11 +1110,9 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
struct v9fs_session_info *v9ses = sb->s_fs_info;
struct v9fs_inode *v9inode = V9FS_I(inode);
- set_nlink(inode, 1);
-
- inode->i_atime.tv_sec = stat->atime;
- inode->i_mtime.tv_sec = stat->mtime;
- inode->i_ctime.tv_sec = stat->mtime;
+ inode_set_atime(inode, stat->atime, 0);
+ inode_set_mtime(inode, stat->mtime, 0);
+ inode_set_ctime(inode, stat->mtime, 0);
inode->i_uid = v9ses->dfltuid;
inode->i_gid = v9ses->dfltgid;
@@ -1188,6 +1141,7 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
+ v9inode->netfs.remote_i_size = stat->length;
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
v9fs_i_size_write(inode, stat->length);
/* not real number of blocks, but 512 byte ones ... */
@@ -1196,26 +1150,6 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
}
/**
- * v9fs_qid2ino - convert qid into inode number
- * @qid: qid to hash
- *
- * BUG: potential for inode number collisions?
- */
-
-ino_t v9fs_qid2ino(struct p9_qid *qid)
-{
- u64 path = qid->path + 2;
- ino_t i = 0;
-
- if (sizeof(ino_t) == sizeof(path))
- memcpy(&i, &path, sizeof(ino_t));
- else
- i = (ino_t) (path ^ (path >> 32));
-
- return i;
-}
-
-/**
* v9fs_vfs_get_link - follow a symlink path
* @dentry: dentry for symlink
* @inode: inode for symlink
@@ -1235,17 +1169,17 @@ static const char *v9fs_vfs_get_link(struct dentry *dentry,
return ERR_PTR(-ECHILD);
v9ses = v9fs_dentry2v9ses(dentry);
- fid = v9fs_fid_lookup(dentry);
+ if (!v9fs_proto_dotu(v9ses))
+ return ERR_PTR(-EBADF);
+
p9_debug(P9_DEBUG_VFS, "%pd\n", dentry);
+ fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return ERR_CAST(fid);
- if (!v9fs_proto_dotu(v9ses))
- return ERR_PTR(-EBADF);
-
st = p9_client_stat(fid);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
if (IS_ERR(st))
return ERR_CAST(st);
@@ -1292,13 +1226,13 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
return PTR_ERR(fid);
v9fs_invalidate_inode_attr(dir);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return 0;
}
/**
* v9fs_vfs_symlink - helper function to create symlinks
- * @mnt_userns: The user namespace of the mount
+ * @idmap: idmap of the mount
* @dir: directory inode containing symlink
* @dentry: dentry for symlink
* @symname: symlink data
@@ -1308,7 +1242,7 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
*/
static int
-v9fs_vfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+v9fs_vfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
p9_debug(P9_DEBUG_VFS, " %lu,%pd,%s\n",
@@ -1348,13 +1282,13 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
v9fs_refresh_inode(oldfid, d_inode(old_dentry));
v9fs_invalidate_inode_attr(dir);
}
- p9_client_clunk(oldfid);
+ p9_fid_put(oldfid);
return retval;
}
/**
* v9fs_vfs_mknod - create a special file
- * @mnt_userns: The user namespace of the mount
+ * @idmap: idmap of the mount
* @dir: inode destination for new link
* @dentry: dentry for file
* @mode: mode for creation
@@ -1363,7 +1297,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
*/
static int
-v9fs_vfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+v9fs_vfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
@@ -1412,7 +1346,7 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
- flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
+ flags = (v9ses->cache & CACHE_LOOSE) ?
V9FS_STAT2INODE_KEEP_ISIZE : 0;
v9fs_stat2inode(st, inode, inode->i_sb, flags);
out:
@@ -1459,4 +1393,3 @@ static const struct inode_operations v9fs_symlink_inode_operations = {
.getattr = v9fs_vfs_getattr,
.setattr = v9fs_vfs_setattr,
};
-
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 7dee89ba32e7..643e759eacb2 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -13,9 +13,7 @@
#include <linux/pagemap.h>
#include <linux/stat.h>
#include <linux/string.h>
-#include <linux/inet.h>
#include <linux/namei.h>
-#include <linux/idr.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/xattr.h>
@@ -31,7 +29,7 @@
#include "acl.h"
static int
-v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
+v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t omode, dev_t rdev);
/**
@@ -102,7 +100,6 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
int new)
{
int retval;
- unsigned long i_ino;
struct inode *inode;
struct v9fs_session_info *v9ses = sb->s_fs_info;
int (*test)(struct inode *inode, void *data);
@@ -112,24 +109,24 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
else
test = v9fs_test_inode_dotl;
- i_ino = v9fs_qid2ino(qid);
- inode = iget5_locked(sb, i_ino, test, v9fs_set_inode_dotl, st);
+ inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode_dotl, st);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
/*
* initialize the inode with the stat info
* FIXME!! we may need support for stale inodes
* later.
*/
- inode->i_ino = i_ino;
+ inode->i_ino = QID2INO(qid);
retval = v9fs_init_inode(v9ses, inode,
st->st_mode, new_decode_dev(st->st_rdev));
if (retval)
goto error;
v9fs_stat2inode_dotl(st, inode, 0);
+ v9fs_set_netfs_context(inode);
v9fs_cache_inode_get_cookie(inode);
retval = v9fs_get_acl(inode, fid);
if (retval)
@@ -212,7 +209,7 @@ int v9fs_open_to_dotl_flags(int flags)
/**
* v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
- * @mnt_userns: The user namespace of the mount
+ * @idmap: The user namespace of the mount
* @dir: directory inode that is being created
* @dentry: dentry that is being deleted
* @omode: create permissions
@@ -220,10 +217,10 @@ int v9fs_open_to_dotl_flags(int flags)
*
*/
static int
-v9fs_vfs_create_dotl(struct user_namespace *mnt_userns, struct inode *dir,
+v9fs_vfs_create_dotl(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t omode, bool excl)
{
- return v9fs_vfs_mknod_dotl(mnt_userns, dir, dentry, omode, 0);
+ return v9fs_vfs_mknod_dotl(idmap, dir, dentry, omode, 0);
}
static int
@@ -233,28 +230,24 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
int err = 0;
kgid_t gid;
umode_t mode;
+ int p9_omode = v9fs_open_to_dotl_flags(flags);
const unsigned char *name = NULL;
struct p9_qid qid;
struct inode *inode;
struct p9_fid *fid = NULL;
- struct v9fs_inode *v9inode;
- struct p9_fid *dfid, *ofid, *inode_fid;
+ struct p9_fid *dfid = NULL, *ofid = NULL;
struct v9fs_session_info *v9ses;
struct posix_acl *pacl = NULL, *dacl = NULL;
- struct dentry *res = NULL;
if (d_in_lookup(dentry)) {
- res = v9fs_vfs_lookup(dir, dentry, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
-
- if (res)
- dentry = res;
+ struct dentry *res = v9fs_vfs_lookup(dir, dentry, 0);
+ if (res || d_really_is_positive(dentry))
+ return finish_no_open(file, res);
}
/* Only creates */
- if (!(flags & O_CREAT) || d_really_is_positive(dentry))
- return finish_no_open(file, res);
+ if (!(flags & O_CREAT))
+ return finish_no_open(file, NULL);
v9ses = v9fs_inode2v9ses(dir);
@@ -283,96 +276,78 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
/* Update mode based on ACL value */
err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
if (err) {
- p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n",
+ p9_debug(P9_DEBUG_VFS, "Failed to get acl values in create %d\n",
err);
- goto error;
+ goto out;
+ }
+
+ if ((v9ses->cache & CACHE_WRITEBACK) && (p9_omode & P9_OWRITE)) {
+ p9_omode = (p9_omode & ~(P9_OWRITE | P9_DOTL_APPEND)) | P9_ORDWR;
+ p9_debug(P9_DEBUG_CACHE,
+ "write-only file with writeback enabled, creating w/ O_RDWR\n");
}
- err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
- mode, gid, &qid);
+ err = p9_client_create_dotl(ofid, name, p9_omode, mode, gid, &qid);
if (err < 0) {
- p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n",
+ p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in create %d\n",
err);
- goto error;
+ goto out;
}
v9fs_invalidate_inode_attr(dir);
/* instantiate inode and assign the unopened fid to the dentry */
fid = p9_client_walk(dfid, 1, &name, 1);
- p9_client_clunk(dfid);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
- fid = NULL;
- goto error;
+ goto out;
}
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err);
- goto error;
+ goto out;
}
/* Now set the ACL based on the default value */
v9fs_set_create_acl(inode, fid, dacl, pacl);
- v9fs_fid_add(dentry, fid);
+ v9fs_fid_add(dentry, &fid);
d_instantiate(dentry, inode);
- v9inode = V9FS_I(inode);
- mutex_lock(&v9inode->v_mutex);
- if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
- !v9inode->writeback_fid &&
- ((flags & O_ACCMODE) != O_RDONLY)) {
- /*
- * clone a fid and add it to writeback_fid
- * we do it during open time instead of
- * page dirty time via write_begin/page_mkwrite
- * because we want write after unlink usecase
- * to work.
- */
- inode_fid = v9fs_writeback_fid(dentry);
- if (IS_ERR(inode_fid)) {
- err = PTR_ERR(inode_fid);
- mutex_unlock(&v9inode->v_mutex);
- goto err_clunk_old_fid;
- }
- v9inode->writeback_fid = (void *) inode_fid;
- }
- mutex_unlock(&v9inode->v_mutex);
/* Since we are opening a file, assign the open fid to the file */
err = finish_open(file, dentry, generic_file_open);
if (err)
- goto err_clunk_old_fid;
+ goto out;
file->private_data = ofid;
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- v9fs_cache_inode_set_cookie(inode, file);
- v9fs_open_fid_add(inode, ofid);
+#ifdef CONFIG_9P_FSCACHE
+ if (v9ses->cache & CACHE_FSCACHE) {
+ struct v9fs_inode *v9inode = V9FS_I(inode);
+ fscache_use_cookie(v9fs_inode_cookie(v9inode),
+ file->f_mode & FMODE_WRITE);
+ }
+#endif
+ v9fs_fid_add_modes(ofid, v9ses->flags, v9ses->cache, flags);
+ v9fs_open_fid_add(inode, &ofid);
file->f_mode |= FMODE_CREATED;
out:
+ p9_fid_put(dfid);
+ p9_fid_put(ofid);
+ p9_fid_put(fid);
v9fs_put_acl(dacl, pacl);
- dput(res);
return err;
-
-error:
- if (fid)
- p9_client_clunk(fid);
-err_clunk_old_fid:
- if (ofid)
- p9_client_clunk(ofid);
- goto out;
}
/**
* v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
- * @mnt_userns: The user namespace of the mount
+ * @idmap: The idmap of the mount
* @dir: inode that is being unlinked
* @dentry: dentry that is being unlinked
* @omode: mode for new directory
*
*/
-static int v9fs_vfs_mkdir_dotl(struct user_namespace *mnt_userns,
- struct inode *dir, struct dentry *dentry,
- umode_t omode)
+static struct dentry *v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry,
+ umode_t omode)
{
int err;
struct v9fs_session_info *v9ses;
@@ -385,7 +360,6 @@ static int v9fs_vfs_mkdir_dotl(struct user_namespace *mnt_userns,
struct posix_acl *dacl = NULL, *pacl = NULL;
p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry);
- err = 0;
v9ses = v9fs_inode2v9ses(dir);
omode |= S_IFDIR;
@@ -396,7 +370,6 @@ static int v9fs_vfs_mkdir_dotl(struct user_namespace *mnt_userns,
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
- dfid = NULL;
goto error;
}
@@ -418,63 +391,54 @@ static int v9fs_vfs_mkdir_dotl(struct user_namespace *mnt_userns,
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
err);
- fid = NULL;
goto error;
}
/* instantiate inode and assign the unopened fid to the dentry */
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
- err);
- goto error;
- }
- v9fs_fid_add(dentry, fid);
- v9fs_set_create_acl(inode, fid, dacl, pacl);
- d_instantiate(dentry, inode);
- fid = NULL;
- err = 0;
- } else {
- /*
- * Not in cached mode. No need to populate
- * inode with stat. We need to get an inode
- * so that we can set the acl with dentry
- */
- inode = v9fs_get_inode(dir->i_sb, mode, 0);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto error;
- }
- v9fs_set_create_acl(inode, fid, dacl, pacl);
- d_instantiate(dentry, inode);
+ inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
+ err);
+ goto error;
}
+ v9fs_set_create_acl(inode, fid, dacl, pacl);
+ v9fs_fid_add(dentry, &fid);
+ d_instantiate(dentry, inode);
+ err = 0;
inc_nlink(dir);
v9fs_invalidate_inode_attr(dir);
error:
- if (fid)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
v9fs_put_acl(dacl, pacl);
- p9_client_clunk(dfid);
- return err;
+ p9_fid_put(dfid);
+ return ERR_PTR(err);
}
static int
-v9fs_vfs_getattr_dotl(struct user_namespace *mnt_userns,
+v9fs_vfs_getattr_dotl(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
+ struct inode *inode = d_inode(dentry);
struct p9_stat_dotl *st;
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
v9ses = v9fs_dentry2v9ses(dentry);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- generic_fillattr(&init_user_ns, d_inode(dentry), stat);
+ if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
return 0;
+ } else if (v9ses->cache) {
+ if (S_ISREG(inode->i_mode)) {
+ int retval = filemap_fdatawrite(inode->i_mapping);
+
+ if (retval)
+ p9_debug(P9_DEBUG_ERROR,
+ "flushing writeback during getattr returned %d\n", retval);
+ }
}
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
@@ -485,12 +449,12 @@ v9fs_vfs_getattr_dotl(struct user_namespace *mnt_userns,
*/
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
if (IS_ERR(st))
return PTR_ERR(st);
v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
- generic_fillattr(&init_user_ns, d_inode(dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(dentry), stat);
/* Change block size to what the server returned */
stat->blksize = st->st_blksize;
@@ -540,35 +504,49 @@ static int v9fs_mapped_iattr_valid(int iattr_valid)
/**
* v9fs_vfs_setattr_dotl - set file metadata
- * @mnt_userns: The user namespace of the mount
+ * @idmap: idmap of the mount
* @dentry: file whose metadata to set
* @iattr: metadata assignment structure
*
*/
-int v9fs_vfs_setattr_dotl(struct user_namespace *mnt_userns,
+int v9fs_vfs_setattr_dotl(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *iattr)
{
int retval, use_dentry = 0;
- struct p9_fid *fid = NULL;
- struct p9_iattr_dotl p9attr;
struct inode *inode = d_inode(dentry);
+ struct v9fs_session_info __maybe_unused *v9ses;
+ struct p9_fid *fid = NULL;
+ struct p9_iattr_dotl p9attr = {
+ .uid = INVALID_UID,
+ .gid = INVALID_GID,
+ };
p9_debug(P9_DEBUG_VFS, "\n");
- retval = setattr_prepare(&init_user_ns, dentry, iattr);
+ retval = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
if (retval)
return retval;
+ v9ses = v9fs_dentry2v9ses(dentry);
+
p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid);
- p9attr.mode = iattr->ia_mode;
- p9attr.uid = iattr->ia_uid;
- p9attr.gid = iattr->ia_gid;
- p9attr.size = iattr->ia_size;
- p9attr.atime_sec = iattr->ia_atime.tv_sec;
- p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
- p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
- p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
+ if (iattr->ia_valid & ATTR_MODE)
+ p9attr.mode = iattr->ia_mode;
+ if (iattr->ia_valid & ATTR_UID)
+ p9attr.uid = iattr->ia_uid;
+ if (iattr->ia_valid & ATTR_GID)
+ p9attr.gid = iattr->ia_gid;
+ if (iattr->ia_valid & ATTR_SIZE)
+ p9attr.size = iattr->ia_size;
+ if (iattr->ia_valid & ATTR_ATIME_SET) {
+ p9attr.atime_sec = iattr->ia_atime.tv_sec;
+ p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
+ }
+ if (iattr->ia_valid & ATTR_MTIME_SET) {
+ p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
+ p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
+ }
if (iattr->ia_valid & ATTR_FILE) {
fid = iattr->ia_file->private_data;
@@ -582,34 +560,46 @@ int v9fs_vfs_setattr_dotl(struct user_namespace *mnt_userns,
return PTR_ERR(fid);
/* Write all dirty data */
- if (S_ISREG(inode->i_mode))
- filemap_write_and_wait(inode->i_mapping);
+ if (S_ISREG(inode->i_mode)) {
+ retval = filemap_fdatawrite(inode->i_mapping);
+ if (retval < 0)
+ p9_debug(P9_DEBUG_ERROR,
+ "Flushing file prior to setattr failed: %d\n", retval);
+ }
retval = p9_client_setattr(fid, &p9attr);
if (retval < 0) {
if (use_dentry)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return retval;
}
- if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(inode))
+ if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size !=
+ i_size_read(inode)) {
truncate_setsize(inode, iattr->ia_size);
+ netfs_resize_file(netfs_inode(inode), iattr->ia_size, true);
+
+#ifdef CONFIG_9P_FSCACHE
+ if (v9ses->cache & CACHE_FSCACHE)
+ fscache_resize_cookie(v9fs_inode_cookie(V9FS_I(inode)),
+ iattr->ia_size);
+#endif
+ }
v9fs_invalidate_inode_attr(inode);
- setattr_copy(&init_user_ns, inode, iattr);
+ setattr_copy(&nop_mnt_idmap, inode, iattr);
mark_inode_dirty(inode);
if (iattr->ia_valid & ATTR_MODE) {
/* We also want to update ACL when we update mode bits */
retval = v9fs_acl_chmod(inode, fid);
if (retval < 0) {
if (use_dentry)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return retval;
}
}
if (use_dentry)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return 0;
}
@@ -630,12 +620,12 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
struct v9fs_inode *v9inode = V9FS_I(inode);
if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
- inode->i_atime.tv_sec = stat->st_atime_sec;
- inode->i_atime.tv_nsec = stat->st_atime_nsec;
- inode->i_mtime.tv_sec = stat->st_mtime_sec;
- inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
- inode->i_ctime.tv_sec = stat->st_ctime_sec;
- inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+ inode_set_atime(inode, stat->st_atime_sec,
+ stat->st_atime_nsec);
+ inode_set_mtime(inode, stat->st_mtime_sec,
+ stat->st_mtime_nsec);
+ inode_set_ctime(inode, stat->st_ctime_sec,
+ stat->st_ctime_nsec);
inode->i_uid = stat->st_uid;
inode->i_gid = stat->st_gid;
set_nlink(inode, stat->st_nlink);
@@ -644,21 +634,22 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
+ v9inode->netfs.remote_i_size = stat->st_size;
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
v9fs_i_size_write(inode, stat->st_size);
inode->i_blocks = stat->st_blocks;
} else {
if (stat->st_result_mask & P9_STATS_ATIME) {
- inode->i_atime.tv_sec = stat->st_atime_sec;
- inode->i_atime.tv_nsec = stat->st_atime_nsec;
+ inode_set_atime(inode, stat->st_atime_sec,
+ stat->st_atime_nsec);
}
if (stat->st_result_mask & P9_STATS_MTIME) {
- inode->i_mtime.tv_sec = stat->st_mtime_sec;
- inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+ inode_set_mtime(inode, stat->st_mtime_sec,
+ stat->st_mtime_nsec);
}
if (stat->st_result_mask & P9_STATS_CTIME) {
- inode->i_ctime.tv_sec = stat->st_ctime_sec;
- inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+ inode_set_ctime(inode, stat->st_ctime_sec,
+ stat->st_ctime_nsec);
}
if (stat->st_result_mask & P9_STATS_UID)
inode->i_uid = stat->st_uid;
@@ -672,8 +663,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
inode->i_mode = mode;
}
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
- stat->st_result_mask & P9_STATS_SIZE)
+ stat->st_result_mask & P9_STATS_SIZE) {
+ v9inode->netfs.remote_i_size = stat->st_size;
v9fs_i_size_write(inode, stat->st_size);
+ }
if (stat->st_result_mask & P9_STATS_BLOCKS)
inode->i_blocks = stat->st_blocks;
}
@@ -687,21 +680,18 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
}
static int
-v9fs_vfs_symlink_dotl(struct user_namespace *mnt_userns, struct inode *dir,
+v9fs_vfs_symlink_dotl(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
int err;
kgid_t gid;
const unsigned char *name;
struct p9_qid qid;
- struct inode *inode;
struct p9_fid *dfid;
struct p9_fid *fid = NULL;
- struct v9fs_session_info *v9ses;
name = dentry->d_name.name;
p9_debug(P9_DEBUG_VFS, "%lu,%s,%s\n", dir->i_ino, name, symname);
- v9ses = v9fs_inode2v9ses(dir);
dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
@@ -721,44 +711,10 @@ v9fs_vfs_symlink_dotl(struct user_namespace *mnt_userns, struct inode *dir,
}
v9fs_invalidate_inode_attr(dir);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- /* Now walk from the parent so we can get an unopened fid. */
- fid = p9_client_walk(dfid, 1, &name, 1);
- if (IS_ERR(fid)) {
- err = PTR_ERR(fid);
- p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
- err);
- fid = NULL;
- goto error;
- }
-
- /* instantiate inode and assign the unopened fid to dentry */
- inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
- err);
- goto error;
- }
- v9fs_fid_add(dentry, fid);
- d_instantiate(dentry, inode);
- fid = NULL;
- err = 0;
- } else {
- /* Not in cached mode. No need to populate inode with stat */
- inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto error;
- }
- d_instantiate(dentry, inode);
- }
error:
- if (fid)
- p9_client_clunk(fid);
-
- p9_client_clunk(dfid);
+ p9_fid_put(fid);
+ p9_fid_put(dfid);
return err;
}
@@ -788,21 +744,21 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
oldfid = v9fs_fid_lookup(old_dentry);
if (IS_ERR(oldfid)) {
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
return PTR_ERR(oldfid);
}
err = p9_client_link(dfid, oldfid, dentry->d_name.name);
- p9_client_clunk(dfid);
- p9_client_clunk(oldfid);
+ p9_fid_put(dfid);
+ p9_fid_put(oldfid);
if (err < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
return err;
}
v9fs_invalidate_inode_attr(dir);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+ if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
/* Get the latest stat info from server. */
struct p9_fid *fid;
@@ -811,7 +767,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
return PTR_ERR(fid);
v9fs_refresh_inode_dotl(fid, d_inode(old_dentry));
- p9_client_clunk(fid);
+ p9_fid_put(fid);
}
ihold(d_inode(old_dentry));
d_instantiate(dentry, d_inode(old_dentry));
@@ -821,7 +777,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
/**
* v9fs_vfs_mknod_dotl - create a special file
- * @mnt_userns: The user namespace of the mount
+ * @idmap: The idmap of the mount
* @dir: inode destination for new link
* @dentry: dentry for file
* @omode: mode for creation
@@ -829,7 +785,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
*
*/
static int
-v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
+v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t omode, dev_t rdev)
{
int err;
@@ -851,7 +807,6 @@ v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
- dfid = NULL;
goto error;
}
@@ -876,42 +831,23 @@ v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
err);
- fid = NULL;
goto error;
}
-
- /* instantiate inode and assign the unopened fid to the dentry */
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
- err);
- goto error;
- }
- v9fs_set_create_acl(inode, fid, dacl, pacl);
- v9fs_fid_add(dentry, fid);
- d_instantiate(dentry, inode);
- fid = NULL;
- err = 0;
- } else {
- /*
- * Not in cached mode. No need to populate inode with stat.
- * socket syscall returns a fd, so we need instantiate
- */
- inode = v9fs_get_inode(dir->i_sb, mode, rdev);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto error;
- }
- v9fs_set_create_acl(inode, fid, dacl, pacl);
- d_instantiate(dentry, inode);
+ inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
+ err);
+ goto error;
}
+ v9fs_set_create_acl(inode, fid, dacl, pacl);
+ v9fs_fid_add(dentry, &fid);
+ d_instantiate(dentry, inode);
+ err = 0;
error:
- if (fid)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
v9fs_put_acl(dacl, pacl);
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
return err;
}
@@ -941,7 +877,7 @@ v9fs_vfs_get_link_dotl(struct dentry *dentry,
if (IS_ERR(fid))
return ERR_CAST(fid);
retval = p9_client_readlink(fid, &target);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
if (retval)
return ERR_PTR(retval);
set_delayed_call(done, kfree_link, target);
@@ -968,7 +904,7 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
- flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
+ flags = (v9ses->cache & CACHE_LOOSE) ?
V9FS_STAT2INODE_KEEP_ISIZE : 0;
v9fs_stat2inode_dotl(st, inode, flags);
out:
@@ -990,14 +926,18 @@ const struct inode_operations v9fs_dir_inode_operations_dotl = {
.getattr = v9fs_vfs_getattr_dotl,
.setattr = v9fs_vfs_setattr_dotl,
.listxattr = v9fs_listxattr,
+ .get_inode_acl = v9fs_iop_get_inode_acl,
.get_acl = v9fs_iop_get_acl,
+ .set_acl = v9fs_iop_set_acl,
};
const struct inode_operations v9fs_file_inode_operations_dotl = {
.getattr = v9fs_vfs_getattr_dotl,
.setattr = v9fs_vfs_setattr_dotl,
.listxattr = v9fs_listxattr,
+ .get_inode_acl = v9fs_iop_get_inode_acl,
.get_acl = v9fs_iop_get_acl,
+ .set_acl = v9fs_iop_set_acl,
};
const struct inode_operations v9fs_symlink_inode_operations_dotl = {
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index b739e02f5ef7..315336de6f02 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -12,14 +12,14 @@
#include <linux/file.h>
#include <linux/stat.h>
#include <linux/string.h>
-#include <linux/inet.h>
#include <linux/pagemap.h>
#include <linux/mount.h>
-#include <linux/idr.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/statfs.h>
#include <linux/magic.h>
+#include <linux/fscache.h>
+#include <linux/fs_context.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -31,32 +31,10 @@
static const struct super_operations v9fs_super_ops, v9fs_super_ops_dotl;
-/**
- * v9fs_set_super - set the superblock
- * @s: super block
- * @data: file system specific data
- *
- */
-
-static int v9fs_set_super(struct super_block *s, void *data)
-{
- s->s_fs_info = data;
- return set_anon_super(s, data);
-}
-
-/**
- * v9fs_fill_super - populate superblock with info
- * @sb: superblock
- * @v9ses: session information
- * @flags: flags propagated from v9fs_mount()
- *
- */
-
-static int
-v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
- int flags)
+static int v9fs_fill_super(struct super_block *sb)
{
int ret;
+ struct v9fs_session_info *v9ses = v9ses = sb->s_fs_info;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize_bits = fls(v9ses->maxdata - 1);
@@ -64,7 +42,8 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_magic = V9FS_MAGIC;
if (v9fs_proto_dotl(v9ses)) {
sb->s_op = &v9fs_super_ops_dotl;
- sb->s_xattr = v9fs_xattr_handlers;
+ if (!(v9ses->flags & V9FS_NO_XATTR))
+ sb->s_xattr = v9fs_xattr_handlers;
} else {
sb->s_op = &v9fs_super_ops;
sb->s_time_max = U32_MAX;
@@ -84,9 +63,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_bdi->io_pages = v9ses->maxdata >> PAGE_SHIFT;
}
- sb->s_flags |= SB_ACTIVE | SB_DIRSYNC;
- if (!v9ses->cache)
- sb->s_flags |= SB_SYNCHRONOUS;
+ sb->s_flags |= SB_ACTIVE;
#ifdef CONFIG_9P_FS_POSIX_ACL
if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL)
@@ -97,22 +74,17 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
}
/**
- * v9fs_mount - mount a superblock
- * @fs_type: file system type
- * @flags: mount flags
- * @dev_name: device name that was mounted
- * @data: mount options
+ * v9fs_get_tree - create the mountable root and superblock
+ * @fc: the filesystem context
*
*/
-static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int v9fs_get_tree(struct fs_context *fc)
{
struct super_block *sb = NULL;
struct inode *inode = NULL;
struct dentry *root = NULL;
struct v9fs_session_info *v9ses = NULL;
- umode_t mode = 0777 | S_ISVTX;
struct p9_fid *fid;
int retval = 0;
@@ -120,29 +92,32 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL);
if (!v9ses)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- fid = v9fs_session_init(v9ses, dev_name, data);
+ fid = v9fs_session_init(v9ses, fc);
if (IS_ERR(fid)) {
retval = PTR_ERR(fid);
goto free_session;
}
- sb = sget(fs_type, NULL, v9fs_set_super, flags, v9ses);
+ fc->s_fs_info = v9ses;
+ sb = sget_fc(fc, NULL, set_anon_super_fc);
if (IS_ERR(sb)) {
retval = PTR_ERR(sb);
goto clunk_fid;
}
- retval = v9fs_fill_super(sb, v9ses, flags);
+ retval = v9fs_fill_super(sb);
if (retval)
goto release_sb;
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- sb->s_d_op = &v9fs_cached_dentry_operations;
- else
- sb->s_d_op = &v9fs_dentry_operations;
+ if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
+ set_default_d_op(sb, &v9fs_cached_dentry_operations);
+ } else {
+ set_default_d_op(sb, &v9fs_dentry_operations);
+ sb->s_d_flags |= DCACHE_DONTCACHE;
+ }
- inode = v9fs_get_inode(sb, S_IFDIR | mode, 0);
+ inode = v9fs_get_new_inode_from_fid(v9ses, fid, sb);
if (IS_ERR(inode)) {
retval = PTR_ERR(inode);
goto release_sb;
@@ -154,46 +129,21 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
goto release_sb;
}
sb->s_root = root;
- if (v9fs_proto_dotl(v9ses)) {
- struct p9_stat_dotl *st = NULL;
-
- st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
- if (IS_ERR(st)) {
- retval = PTR_ERR(st);
- goto release_sb;
- }
- d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
- v9fs_stat2inode_dotl(st, d_inode(root), 0);
- kfree(st);
- } else {
- struct p9_wstat *st = NULL;
-
- st = p9_client_stat(fid);
- if (IS_ERR(st)) {
- retval = PTR_ERR(st);
- goto release_sb;
- }
-
- d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
- v9fs_stat2inode(st, d_inode(root), sb, 0);
-
- p9stat_free(st);
- kfree(st);
- }
retval = v9fs_get_acl(inode, fid);
if (retval)
goto release_sb;
- v9fs_fid_add(root, fid);
+ v9fs_fid_add(root, &fid);
p9_debug(P9_DEBUG_VFS, " simple set mount, return 0\n");
- return dget(sb->s_root);
+ fc->root = dget(sb->s_root);
+ return 0;
clunk_fid:
- p9_client_clunk(fid);
+ p9_fid_put(fid);
v9fs_session_close(v9ses);
free_session:
kfree(v9ses);
- return ERR_PTR(retval);
+ return retval;
release_sb:
/*
@@ -202,9 +152,9 @@ release_sb:
* attached the fid to dentry so it won't get clunked
* automatically.
*/
- p9_client_clunk(fid);
+ p9_fid_put(fid);
deactivate_locked_super(sb);
- return ERR_PTR(retval);
+ return retval;
}
/**
@@ -269,7 +219,7 @@ static int v9fs_statfs(struct dentry *dentry, struct kstatfs *buf)
}
res = simple_statfs(dentry, buf);
done:
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return res;
}
@@ -278,8 +228,8 @@ static int v9fs_drop_inode(struct inode *inode)
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(inode);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- return generic_drop_inode(inode);
+ if (v9ses->cache & (CACHE_META|CACHE_LOOSE))
+ return inode_generic_drop(inode);
/*
* in case of non cached mode always drop the
* inode because we want the inode attribute
@@ -291,54 +241,28 @@ static int v9fs_drop_inode(struct inode *inode)
static int v9fs_write_inode(struct inode *inode,
struct writeback_control *wbc)
{
- int ret;
- struct p9_wstat wstat;
- struct v9fs_inode *v9inode;
/*
* send an fsync request to server irrespective of
* wbc->sync_mode.
*/
p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
- v9inode = V9FS_I(inode);
- if (!v9inode->writeback_fid)
- return 0;
- v9fs_blank_wstat(&wstat);
-
- ret = p9_client_wstat(v9inode->writeback_fid, &wstat);
- if (ret < 0) {
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- return ret;
- }
- return 0;
+ return netfs_unpin_writeback(inode, wbc);
}
static int v9fs_write_inode_dotl(struct inode *inode,
struct writeback_control *wbc)
{
- int ret;
- struct v9fs_inode *v9inode;
- /*
- * send an fsync request to server irrespective of
- * wbc->sync_mode.
- */
- v9inode = V9FS_I(inode);
- p9_debug(P9_DEBUG_VFS, "%s: inode %p, writeback_fid %p\n",
- __func__, inode, v9inode->writeback_fid);
- if (!v9inode->writeback_fid)
- return 0;
-
- ret = p9_client_fsync(v9inode->writeback_fid, 0);
- if (ret < 0) {
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- return ret;
- }
- return 0;
+
+ p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
+
+ return netfs_unpin_writeback(inode, wbc);
}
static const struct super_operations v9fs_super_ops = {
.alloc_inode = v9fs_alloc_inode,
.free_inode = v9fs_free_inode,
.statfs = simple_statfs,
+ .drop_inode = v9fs_drop_inode,
.evict_inode = v9fs_evict_inode,
.show_options = v9fs_show_options,
.umount_begin = v9fs_umount_begin,
@@ -356,11 +280,86 @@ static const struct super_operations v9fs_super_ops_dotl = {
.write_inode = v9fs_write_inode_dotl,
};
+static void v9fs_free_fc(struct fs_context *fc)
+{
+ struct v9fs_context *ctx = fc->fs_private;
+
+ if (!ctx)
+ return;
+
+ /* These should be NULL by now but guard against leaks */
+ kfree(ctx->session_opts.uname);
+ kfree(ctx->session_opts.aname);
+#ifdef CONFIG_9P_FSCACHE
+ kfree(ctx->session_opts.cachetag);
+#endif
+ if (ctx->client_opts.trans_mod)
+ v9fs_put_trans(ctx->client_opts.trans_mod);
+ kfree(ctx);
+}
+
+static const struct fs_context_operations v9fs_context_ops = {
+ .parse_param = v9fs_parse_param,
+ .get_tree = v9fs_get_tree,
+ .free = v9fs_free_fc,
+};
+
+static int v9fs_init_fs_context(struct fs_context *fc)
+{
+ struct v9fs_context *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* initialize core options */
+ ctx->session_opts.afid = ~0;
+ ctx->session_opts.cache = CACHE_NONE;
+ ctx->session_opts.session_lock_timeout = P9_LOCK_TIMEOUT;
+ ctx->session_opts.uname = kstrdup(V9FS_DEFUSER, GFP_KERNEL);
+ if (!ctx->session_opts.uname)
+ goto error;
+
+ ctx->session_opts.aname = kstrdup(V9FS_DEFANAME, GFP_KERNEL);
+ if (!ctx->session_opts.aname)
+ goto error;
+
+ ctx->session_opts.uid = INVALID_UID;
+ ctx->session_opts.dfltuid = V9FS_DEFUID;
+ ctx->session_opts.dfltgid = V9FS_DEFGID;
+
+ /* initialize client options */
+ ctx->client_opts.proto_version = p9_proto_2000L;
+ ctx->client_opts.msize = DEFAULT_MSIZE;
+
+ /* initialize fd transport options */
+ ctx->fd_opts.port = P9_FD_PORT;
+ ctx->fd_opts.rfd = ~0;
+ ctx->fd_opts.wfd = ~0;
+ ctx->fd_opts.privport = false;
+
+ /* initialize rdma transport options */
+ ctx->rdma_opts.port = P9_RDMA_PORT;
+ ctx->rdma_opts.sq_depth = P9_RDMA_SQ_DEPTH;
+ ctx->rdma_opts.rq_depth = P9_RDMA_RQ_DEPTH;
+ ctx->rdma_opts.timeout = P9_RDMA_TIMEOUT;
+ ctx->rdma_opts.privport = false;
+
+ fc->ops = &v9fs_context_ops;
+ fc->fs_private = ctx;
+
+ return 0;
+error:
+ fc->need_free = 1;
+ return -ENOMEM;
+}
+
struct file_system_type v9fs_fs_type = {
.name = "9p",
- .mount = v9fs_mount,
.kill_sb = v9fs_kill_super,
.owner = THIS_MODULE,
.fs_flags = FS_RENAME_DOES_D_MOVE,
+ .init_fs_context = v9fs_init_fs_context,
+ .parameters = v9fs_param_spec,
};
MODULE_ALIAS_FS("9p");
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index a824441b95a2..8604e3377ee7 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -8,6 +8,7 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/uio.h>
+#include <linux/posix_acl_xattr.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -24,7 +25,7 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
struct iov_iter to;
int err;
- iov_iter_kvec(&to, READ, &kvec, 1, buffer_size);
+ iov_iter_kvec(&to, ITER_DEST, &kvec, 1, buffer_size);
attr_fid = p9_client_xattrwalk(fid, name, &attr_size);
if (IS_ERR(attr_fid)) {
@@ -34,17 +35,19 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
return retval;
}
if (attr_size > buffer_size) {
- if (!buffer_size) /* request to get the attr_size */
- retval = attr_size;
- else
+ if (buffer_size)
retval = -ERANGE;
+ else if (attr_size > SSIZE_MAX)
+ retval = -EOVERFLOW;
+ else /* request to get the attr_size */
+ retval = attr_size;
} else {
iov_iter_truncate(&to, attr_size);
retval = p9_client_read(attr_fid, 0, &to, &err);
if (err)
retval = err;
}
- p9_client_clunk(attr_fid);
+ p9_fid_put(attr_fid);
return retval;
}
@@ -65,13 +68,13 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
struct p9_fid *fid;
int ret;
- p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n",
+ p9_debug(P9_DEBUG_VFS, "name = '%s' value_len = %zu\n",
name, buffer_size);
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
ret = v9fs_fid_xattr_get(fid, name, buffer, buffer_size);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return ret;
}
@@ -98,7 +101,7 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
if (IS_ERR(fid))
return PTR_ERR(fid);
ret = v9fs_fid_xattr_set(fid, name, value, value_len, flags);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return ret;
}
@@ -109,7 +112,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
struct iov_iter from;
int retval, err;
- iov_iter_kvec(&from, WRITE, &kvec, 1, value_len);
+ iov_iter_kvec(&from, ITER_SOURCE, &kvec, 1, value_len);
p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n",
name, value_len, flags);
@@ -128,7 +131,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
retval);
else
p9_client_write(fid, 0, &from, &retval);
- err = p9_client_clunk(fid);
+ err = p9_fid_put(fid);
if (!retval && err)
retval = err;
return retval;
@@ -136,7 +139,8 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
- return v9fs_xattr_get(dentry, NULL, buffer, buffer_size);
+ /* Txattrwalk with an empty string lists xattrs instead */
+ return v9fs_xattr_get(dentry, "", buffer, buffer_size);
}
static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
@@ -149,7 +153,7 @@ static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
}
static int v9fs_xattr_handler_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -159,33 +163,29 @@ static int v9fs_xattr_handler_set(const struct xattr_handler *handler,
return v9fs_xattr_set(dentry, full_name, value, size, flags);
}
-static struct xattr_handler v9fs_xattr_user_handler = {
+static const struct xattr_handler v9fs_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.get = v9fs_xattr_handler_get,
.set = v9fs_xattr_handler_set,
};
-static struct xattr_handler v9fs_xattr_trusted_handler = {
+static const struct xattr_handler v9fs_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = v9fs_xattr_handler_get,
.set = v9fs_xattr_handler_set,
};
#ifdef CONFIG_9P_FS_SECURITY
-static struct xattr_handler v9fs_xattr_security_handler = {
+static const struct xattr_handler v9fs_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.get = v9fs_xattr_handler_get,
.set = v9fs_xattr_handler_set,
};
#endif
-const struct xattr_handler *v9fs_xattr_handlers[] = {
+const struct xattr_handler * const v9fs_xattr_handlers[] = {
&v9fs_xattr_user_handler,
&v9fs_xattr_trusted_handler,
-#ifdef CONFIG_9P_FS_POSIX_ACL
- &v9fs_xattr_acl_access_handler,
- &v9fs_xattr_acl_default_handler,
-#endif
#ifdef CONFIG_9P_FS_SECURITY
&v9fs_xattr_security_handler,
#endif
diff --git a/fs/9p/xattr.h b/fs/9p/xattr.h
index 3e11fc3331eb..3ad5a802352a 100644
--- a/fs/9p/xattr.h
+++ b/fs/9p/xattr.h
@@ -10,9 +10,7 @@
#include <net/9p/9p.h>
#include <net/9p/client.h>
-extern const struct xattr_handler *v9fs_xattr_handlers[];
-extern const struct xattr_handler v9fs_xattr_acl_access_handler;
-extern const struct xattr_handler v9fs_xattr_acl_default_handler;
+extern const struct xattr_handler * const v9fs_xattr_handlers[];
ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
void *buffer, size_t buffer_size);
diff --git a/fs/Kconfig b/fs/Kconfig
index a6313a969bc5..0bfdaecaa877 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -15,11 +15,23 @@ config VALIDATE_FS_PARSER
Enable this to perform validation of the parameter description for a
filesystem when it is registered.
-if BLOCK
-
config FS_IOMAP
bool
+# Stackable filesystems
+config FS_STACK
+ bool
+
+config BUFFER_HEAD
+ bool
+
+# old blockdev_direct_IO implementation. Use iomap for new code instead
+config LEGACY_DIRECT_IO
+ depends on BUFFER_HEAD
+ bool
+
+if BLOCK
+
source "fs/ext2/Kconfig"
source "fs/ext4/Kconfig"
source "fs/jbd2/Kconfig"
@@ -31,7 +43,6 @@ config FS_MBCACHE
default y if EXT4_FS=y
default m if EXT2_FS_XATTR || EXT4_FS
-source "fs/reiserfs/Kconfig"
source "fs/jfs/Kconfig"
source "fs/xfs/Kconfig"
@@ -42,11 +53,12 @@ source "fs/nilfs2/Kconfig"
source "fs/f2fs/Kconfig"
source "fs/zonefs/Kconfig"
+endif # BLOCK
+
config FS_DAX
bool "File system based Direct Access (DAX) support"
depends on MMU
- depends on !(ARM || MIPS || SPARC)
- select DEV_PAGEMAP_OPS if (ZONE_DEVICE && !FS_DAX_LIMITED)
+ depends on ZONE_DEVICE
select FS_IOMAP
select DAX
help
@@ -82,15 +94,6 @@ config FS_DAX_PMD
depends on ZONE_DEVICE
depends on TRANSPARENT_HUGEPAGE
-# Selected by DAX drivers that do not expect filesystem DAX to support
-# get_user_pages() of DAX mappings. I.e. "limited" indicates no support
-# for fork() of processes with MAP_SHARED mappings or support for
-# direct-I/O to a DAX mapping.
-config FS_DAX_LIMITED
- bool
-
-endif # BLOCK
-
# Posix ACL utility routines
#
# Note: Posix ACLs can be implemented without these helpers. Never use
@@ -131,7 +134,6 @@ source "fs/overlayfs/Kconfig"
menu "Caches"
source "fs/netfs/Kconfig"
-source "fs/fscache/Kconfig"
source "fs/cachefiles/Kconfig"
endmenu
@@ -150,7 +152,6 @@ menu "DOS/FAT/EXFAT/NT Filesystems"
source "fs/fat/Kconfig"
source "fs/exfat/Kconfig"
-source "fs/ntfs/Kconfig"
source "fs/ntfs3/Kconfig"
endmenu
@@ -165,6 +166,7 @@ source "fs/sysfs/Kconfig"
config TMPFS
bool "Tmpfs virtual memory file system support (former shm fs)"
depends on SHMEM
+ select MEMFD_CREATE
help
Tmpfs is a file system which keeps all files in virtual memory.
@@ -201,8 +203,8 @@ config TMPFS_XATTR
Extended attributes are name:value pairs associated with inodes by
the kernel or by users (see the attr(5) manual page for details).
- Currently this enables support for the trusted.* and
- security.* namespaces.
+ This enables support for the trusted.*, security.* and user.*
+ namespaces.
You need this for POSIX ACL support on tmpfs.
@@ -229,12 +231,26 @@ config TMPFS_INODE64
If unsure, say N.
+config TMPFS_QUOTA
+ bool "Tmpfs quota support"
+ depends on TMPFS
+ select QUOTA
+ help
+ Quota support allows to set per user and group limits for tmpfs
+ usage. Say Y to enable quota support. Once enabled you can control
+ user and group quota enforcement with quota, usrquota and grpquota
+ mount options.
+
+ If unsure, say N.
+
config ARCH_SUPPORTS_HUGETLBFS
def_bool n
-config HUGETLBFS
+menuconfig HUGETLBFS
bool "HugeTLB file system support"
- depends on X86 || IA64 || SPARC64 || ARCH_SUPPORTS_HUGETLBFS || BROKEN
+ depends on ARCH_SUPPORTS_HUGETLBFS
+ select MEMFD_CREATE
+ select PADATA if SMP
help
hugetlbfs is a filesystem backing for HugeTLB pages, based on
ramfs. For architectures that support it, say Y here and read
@@ -242,26 +258,30 @@ config HUGETLBFS
If unsure, say N.
+if HUGETLBFS
+config HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON
+ bool "HugeTLB Vmemmap Optimization (HVO) defaults to on"
+ default n
+ depends on HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+ help
+ The HugeTLB Vmemmap Optimization (HVO) defaults to off. Say Y here to
+ enable HVO by default. It can be disabled via hugetlb_free_vmemmap=off
+ (boot command line) or hugetlb_optimize_vmemmap (sysctl).
+endif # HUGETLBFS
+
config HUGETLB_PAGE
def_bool HUGETLBFS
+ select XARRAY_MULTI
-config HUGETLB_PAGE_FREE_VMEMMAP
+config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
def_bool HUGETLB_PAGE
- depends on X86_64
+ depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
depends on SPARSEMEM_VMEMMAP
+ select SPARSEMEM_VMEMMAP_PREINIT if ARCH_WANT_HUGETLB_VMEMMAP_PREINIT
-config HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON
- bool "Default freeing vmemmap pages of HugeTLB to on"
- default n
- depends on HUGETLB_PAGE_FREE_VMEMMAP
- help
- When using HUGETLB_PAGE_FREE_VMEMMAP, the freeing unused vmemmap
- pages associated with each HugeTLB page is default off. Say Y here
- to enable freeing vmemmap pages of HugeTLB by default. It can then
- be disabled on the command line via hugetlb_free_vmemmap=off.
-
-config MEMFD_CREATE
- def_bool TMPFS || HUGETLBFS
+config HUGETLB_PMD_PAGE_TABLE_SHARING
+ def_bool HUGETLB_PAGE
+ depends on ARCH_WANT_HUGE_PMD_SHARE && SPLIT_PMD_PTLOCKS
config ARCH_HAS_GIGANTIC_PAGE
bool
@@ -306,9 +326,9 @@ source "fs/omfs/Kconfig"
source "fs/hpfs/Kconfig"
source "fs/qnx4/Kconfig"
source "fs/qnx6/Kconfig"
+source "fs/resctrl/Kconfig"
source "fs/romfs/Kconfig"
source "fs/pstore/Kconfig"
-source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/erofs/Kconfig"
source "fs/vboxsf/Kconfig"
@@ -340,11 +360,12 @@ config GRACE_PERIOD
config LOCKD
tristate
depends on FILE_LOCKING
+ select CRC32
select GRACE_PERIOD
config LOCKD_V4
bool
- depends on NFSD_V3 || NFS_V3
+ depends on NFSD || NFS_V3
depends on FILE_LOCKING
default y
@@ -357,6 +378,29 @@ config NFS_COMMON
depends on NFSD || NFS_FS || LOCKD
default y
+config NFS_COMMON_LOCALIO_SUPPORT
+ tristate
+ depends on NFS_LOCALIO
+ default y if NFSD=y || NFS_FS=y
+ default m if NFSD=m && NFS_FS=m
+ select SUNRPC
+
+config NFS_LOCALIO
+ bool "NFS client and server support for LOCALIO auxiliary protocol"
+ depends on NFSD && NFS_FS
+ select NFS_COMMON_LOCALIO_SUPPORT
+ default n
+ help
+ Some NFS servers support an auxiliary NFS LOCALIO protocol
+ that is not an official part of the NFS protocol.
+
+ This option enables support for the LOCALIO protocol in the
+ kernel's NFS server and client. Enable this to permit local
+ NFS clients to bypass the network when issuing reads and
+ writes to the local NFS server.
+
+ If unsure, say N.
+
config NFS_V4_2_SSC_HELPER
bool
default y if NFS_V4_2
@@ -364,14 +408,7 @@ config NFS_V4_2_SSC_HELPER
source "net/sunrpc/Kconfig"
source "fs/ceph/Kconfig"
-source "fs/cifs/Kconfig"
-source "fs/ksmbd/Kconfig"
-
-config SMBFS_COMMON
- tristate
- default y if CIFS=y
- default m if CIFS=m
-
+source "fs/smb/Kconfig"
source "fs/coda/Kconfig"
source "fs/afs/Kconfig"
source "fs/9p/Kconfig"
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 4d5ae61580aa..1949e25c7741 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -28,6 +28,16 @@ config BINFMT_ELF
ld.so (check the file <file:Documentation/Changes> for location and
latest version).
+config BINFMT_ELF_KUNIT_TEST
+ bool "Build KUnit tests for ELF binary support" if !KUNIT_ALL_TESTS
+ depends on KUNIT=y && BINFMT_ELF=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds the ELF loader KUnit tests, which try to gather
+ prior bug fixes into a regression test collection. This is really
+ only needed for debugging. Note that with CONFIG_COMPAT=y, the
+ compat_binfmt_elf KUnit test is also created.
+
config COMPAT_BINFMT_ELF
def_bool y
depends on COMPAT && BINFMT_ELF
@@ -36,6 +46,9 @@ config COMPAT_BINFMT_ELF
config ARCH_BINFMT_ELF_STATE
bool
+config ARCH_BINFMT_ELF_EXTRA_PHDRS
+ bool
+
config ARCH_HAVE_ELF_PROT
bool
@@ -45,7 +58,7 @@ config ARCH_USE_GNU_PROPERTY
config BINFMT_ELF_FDPIC
bool "Kernel support for FDPIC ELF binaries"
default y if !BINFMT_ELF
- depends on (ARM || (SUPERH && !MMU))
+ depends on ARM || ((M68K || RISCV || SUPERH || XTENSA) && !MMU)
select ELFCORE
help
ELF FDPIC binaries are based on ELF, but allow the individual load
@@ -129,45 +142,6 @@ config BINFMT_ZFLAT
help
Support FLAT format compressed binaries
-config BINFMT_SHARED_FLAT
- bool "Enable shared FLAT support"
- depends on BINFMT_FLAT
- help
- Support FLAT shared libraries
-
-config HAVE_AOUT
- def_bool n
-
-config BINFMT_AOUT
- tristate "Kernel support for a.out and ECOFF binaries"
- depends on HAVE_AOUT
- help
- A.out (Assembler.OUTput) is a set of formats for libraries and
- executables used in the earliest versions of UNIX. Linux used
- the a.out formats QMAGIC and ZMAGIC until they were replaced
- with the ELF format.
-
- The conversion to ELF started in 1995. This option is primarily
- provided for historical interest and for the benefit of those
- who need to run binaries from that era.
-
- Most people should answer N here. If you think you may have
- occasional use for this format, enable module support above
- and answer M here to compile this support as a module called
- binfmt_aout.
-
- If any crucial components of your system (such as /sbin/init
- or /lib/ld.so) are still in a.out format, you will have to
- say Y here.
-
-config OSF4_COMPAT
- bool "OSF/1 v4 readv/writev compatibility"
- depends on ALPHA && BINFMT_AOUT
- help
- Say Y if you are using OSF/1 binaries (like Netscape and Acrobat)
- with v4 shared libraries freely available from Compaq. If you're
- going to use shared libraries from Tru64 version 5.0 or later, say N.
-
config BINFMT_MISC
tristate "Kernel support for MISC binaries"
help
@@ -202,4 +176,21 @@ config COREDUMP
certainly want to say Y here. Not necessary on systems that never
need debugging or only ever run flawless code.
+config EXEC_KUNIT_TEST
+ bool "Build execve tests" if !KUNIT_ALL_TESTS
+ depends on KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds the exec KUnit tests, which tests boundary conditions
+ of various aspects of the exec internals.
+
+config ARCH_HAS_ELF_CORE_EFLAGS
+ bool
+ depends on BINFMT_ELF && ELF_CORE
+ default n
+ help
+ Select this option if the architecture makes use of the e_flags
+ field in the ELF header to store ABI or other architecture-specific
+ information that should be preserved in core dumps.
+
endmenu
diff --git a/fs/Makefile b/fs/Makefile
index 84c5e4cdfee5..a04274a3c854 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -6,6 +6,7 @@
# Rewritten to use lists instead of if-statements.
#
+
obj-y := open.o read_write.o file_table.o super.o \
char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
ioctl.o readdir.o select.o dcache.o inode.o \
@@ -13,17 +14,13 @@ obj-y := open.o read_write.o file_table.o super.o \
seq_file.o xattr.o libfs.o fs-writeback.o \
pnode.o splice.o sync.o utimes.o d_path.o \
stack.o fs_struct.o statfs.o fs_pin.o nsfs.o \
- fs_types.o fs_context.o fs_parser.o fsopen.o init.o \
- kernel_read_file.o remap_range.o
-
-ifeq ($(CONFIG_BLOCK),y)
-obj-y += buffer.o direct-io.o mpage.o
-else
-obj-y += no-block.o
-endif
-
-obj-$(CONFIG_PROC_FS) += proc_namespace.o
+ fs_dirent.o fs_context.o fs_parser.o fsopen.o init.o \
+ kernel_read_file.o mnt_idmapping.o remap_range.o pidfs.o \
+ file_attr.o
+obj-$(CONFIG_BUFFER_HEAD) += buffer.o mpage.o
+obj-$(CONFIG_PROC_FS) += proc_namespace.o
+obj-$(CONFIG_LEGACY_DIRECT_IO) += direct-io.o
obj-y += notify/
obj-$(CONFIG_EPOLL) += eventpoll.o
obj-y += anon_inodes.o
@@ -32,13 +29,10 @@ obj-$(CONFIG_TIMERFD) += timerfd.o
obj-$(CONFIG_EVENTFD) += eventfd.o
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_AIO) += aio.o
-obj-$(CONFIG_IO_URING) += io_uring.o
-obj-$(CONFIG_IO_WQ) += io-wq.o
obj-$(CONFIG_FS_DAX) += dax.o
obj-$(CONFIG_FS_ENCRYPTION) += crypto/
obj-$(CONFIG_FS_VERITY) += verity/
obj-$(CONFIG_FILE_LOCKING) += locks.o
-obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o
obj-$(CONFIG_BINFMT_SCRIPT) += binfmt_script.o
obj-$(CONFIG_BINFMT_ELF) += binfmt_elf.o
@@ -46,11 +40,12 @@ obj-$(CONFIG_COMPAT_BINFMT_ELF) += compat_binfmt_elf.o
obj-$(CONFIG_BINFMT_ELF_FDPIC) += binfmt_elf_fdpic.o
obj-$(CONFIG_BINFMT_FLAT) += binfmt_flat.o
+obj-$(CONFIG_FS_STACK) += backing-file.o
obj-$(CONFIG_FS_MBCACHE) += mbcache.o
obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o
obj-$(CONFIG_NFS_COMMON) += nfs_common/
obj-$(CONFIG_COREDUMP) += coredump.o
-obj-$(CONFIG_SYSCTL) += drop_caches.o
+obj-$(CONFIG_SYSCTL) += drop_caches.o sysctls.o
obj-$(CONFIG_FHANDLE) += fhandle.o
obj-y += iomap/
@@ -67,8 +62,6 @@ obj-$(CONFIG_DLM) += dlm/
# Do not add any filesystems before this line
obj-$(CONFIG_NETFS_SUPPORT) += netfs/
-obj-$(CONFIG_FSCACHE) += fscache/
-obj-$(CONFIG_REISERFS_FS) += reiserfs/
obj-$(CONFIG_EXT4_FS) += ext4/
# We place ext4 before ext2 so that clean ext3 root fs's do NOT mount using the
# ext2 driver, which doesn't know about journalling! Explicitly request ext2
@@ -94,13 +87,9 @@ obj-$(CONFIG_EXPORTFS) += exportfs/
obj-$(CONFIG_NFSD) += nfsd/
obj-$(CONFIG_LOCKD) += lockd/
obj-$(CONFIG_NLS) += nls/
-obj-$(CONFIG_UNICODE) += unicode/
-obj-$(CONFIG_SYSV_FS) += sysv/
-obj-$(CONFIG_SMBFS_COMMON) += smbfs_common/
-obj-$(CONFIG_CIFS) += cifs/
-obj-$(CONFIG_SMB_SERVER) += ksmbd/
+obj-y += unicode/
+obj-$(CONFIG_SMBFS) += smb/
obj-$(CONFIG_HPFS_FS) += hpfs/
-obj-$(CONFIG_NTFS_FS) += ntfs/
obj-$(CONFIG_NTFS3_FS) += ntfs3/
obj-$(CONFIG_UFS_FS) += ufs/
obj-$(CONFIG_EFS_FS) += efs/
@@ -124,7 +113,7 @@ obj-$(CONFIG_9P_FS) += 9p/
obj-$(CONFIG_AFS_FS) += afs/
obj-$(CONFIG_NILFS2_FS) += nilfs2/
obj-$(CONFIG_BEFS_FS) += befs/
-obj-$(CONFIG_HOSTFS) += hostfs/
+obj-y += hostfs/
obj-$(CONFIG_CACHEFILES) += cachefiles/
obj-$(CONFIG_DEBUG_FS) += debugfs/
obj-$(CONFIG_TRACING) += tracefs/
@@ -138,3 +127,5 @@ obj-$(CONFIG_EFIVAR_FS) += efivarfs/
obj-$(CONFIG_EROFS_FS) += erofs/
obj-$(CONFIG_VBOXSF_FS) += vboxsf/
obj-$(CONFIG_ZONEFS_FS) += zonefs/
+obj-$(CONFIG_BPF_LSM) += bpf_fs_kfuncs.o
+obj-$(CONFIG_RESCTRL_FS) += resctrl/
diff --git a/fs/adfs/Kconfig b/fs/adfs/Kconfig
index 44738fed6625..1b97058f0c4a 100644
--- a/fs/adfs/Kconfig
+++ b/fs/adfs/Kconfig
@@ -2,6 +2,7 @@
config ADFS_FS
tristate "ADFS file system support"
depends on BLOCK
+ select BUFFER_HEAD
help
The Acorn Disc Filing System is the standard file system of the
RiscOS operating system which runs on Acorn's ARM-based Risc PC
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 06b7c92343ad..223f0283d20f 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -144,7 +144,7 @@ struct adfs_discmap {
/* Inode stuff */
struct inode *adfs_iget(struct super_block *sb, struct object_info *obj);
int adfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-int adfs_notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
+int adfs_notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr);
/* map.c */
diff --git a/fs/adfs/dir_f.h b/fs/adfs/dir_f.h
index a5393e6cf9f4..4e6c53d59ebd 100644
--- a/fs/adfs/dir_f.h
+++ b/fs/adfs/dir_f.h
@@ -58,9 +58,4 @@ struct adfs_newdirtail {
__u8 dircheckbyte;
} __attribute__((packed));
-union adfs_dirtail {
- struct adfs_olddirtail old;
- struct adfs_newdirtail new;
-};
-
#endif
diff --git a/fs/adfs/file.c b/fs/adfs/file.c
index 754afb14a6ff..cd13165fd904 100644
--- a/fs/adfs/file.c
+++ b/fs/adfs/file.c
@@ -25,10 +25,10 @@
const struct file_operations adfs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.fsync = generic_file_fsync,
.write_iter = generic_file_write_iter,
- .splice_read = generic_file_splice_read,
+ .splice_read = filemap_splice_read,
};
const struct inode_operations adfs_file_inode_operations = {
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index adbb3a1edcbf..6830f8bc8d4e 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -5,6 +5,7 @@
* Copyright (C) 1997-1999 Russell King
*/
#include <linux/buffer_head.h>
+#include <linux/mpage.h>
#include <linux/writeback.h>
#include "adfs.h"
@@ -33,14 +34,15 @@ abort_toobig:
return 0;
}
-static int adfs_writepage(struct page *page, struct writeback_control *wbc)
+static int adfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- return block_write_full_page(page, adfs_get_block, wbc);
+ return mpage_writepages(mapping, wbc, adfs_get_block);
}
-static int adfs_readpage(struct file *file, struct page *page)
+static int adfs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, adfs_get_block);
+ return block_read_full_folio(folio, adfs_get_block);
}
static void adfs_write_failed(struct address_space *mapping, loff_t to)
@@ -51,14 +53,14 @@ static void adfs_write_failed(struct address_space *mapping, loff_t to)
truncate_pagecache(inode, inode->i_size);
}
-static int adfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int adfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret;
- *pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
adfs_get_block,
&ADFS_I(mapping->host)->mmu_private);
if (unlikely(ret))
@@ -73,12 +75,14 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
}
static const struct address_space_operations adfs_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = adfs_readpage,
- .writepage = adfs_writepage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = adfs_read_folio,
+ .writepages = adfs_writepages,
.write_begin = adfs_write_begin,
.write_end = generic_write_end,
- .bmap = _adfs_bmap
+ .migrate_folio = buffer_migrate_folio,
+ .bmap = _adfs_bmap,
};
/*
@@ -241,6 +245,7 @@ struct inode *
adfs_iget(struct super_block *sb, struct object_info *obj)
{
struct inode *inode;
+ struct timespec64 ts;
inode = new_inode(sb);
if (!inode)
@@ -267,9 +272,10 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
ADFS_I(inode)->attr = obj->attr;
inode->i_mode = adfs_atts2mode(sb, inode);
- adfs_adfs2unix_time(&inode->i_mtime, inode);
- inode->i_atime = inode->i_mtime;
- inode->i_ctime = inode->i_mtime;
+ adfs_adfs2unix_time(&ts, inode);
+ inode_set_atime_to_ts(inode, ts);
+ inode_set_mtime_to_ts(inode, ts);
+ inode_set_ctime_to_ts(inode, ts);
if (S_ISDIR(inode->i_mode)) {
inode->i_op = &adfs_dir_inode_operations;
@@ -293,7 +299,7 @@ out:
* later.
*/
int
-adfs_notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
+adfs_notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
@@ -301,7 +307,7 @@ adfs_notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
unsigned int ia_valid = attr->ia_valid;
int error;
- error = setattr_prepare(&init_user_ns, dentry, attr);
+ error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
/*
* we can't change the UID or GID of any file -
@@ -320,7 +326,8 @@ adfs_notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
if (ia_valid & ATTR_MTIME && adfs_inode_is_stamped(inode)) {
adfs_unix2adfs_time(inode, &attr->ia_mtime);
- adfs_adfs2unix_time(&inode->i_mtime, inode);
+ adfs_adfs2unix_time(&attr->ia_mtime, inode);
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
}
/*
@@ -328,9 +335,9 @@ adfs_notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
* have the ability to represent them in our filesystem?
*/
if (ia_valid & ATTR_ATIME)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
if (ia_valid & ATTR_CTIME)
- inode->i_ctime = attr->ia_ctime;
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
ADFS_I(inode)->attr = adfs_mode2atts(sb, inode, attr->ia_mode);
inode->i_mode = adfs_atts2mode(sb, inode);
@@ -355,7 +362,6 @@ int adfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct super_block *sb = inode->i_sb;
struct object_info obj;
- int ret;
obj.indaddr = ADFS_I(inode)->indaddr;
obj.name_len = 0;
@@ -365,6 +371,5 @@ int adfs_write_inode(struct inode *inode, struct writeback_control *wbc)
obj.attr = ADFS_I(inode)->attr;
obj.size = inode->i_size;
- ret = adfs_dir_update(sb, &obj, wbc->sync_mode == WB_SYNC_ALL);
- return ret;
+ return adfs_dir_update(sb, &obj, wbc->sync_mode == WB_SYNC_ALL);
}
diff --git a/fs/adfs/map.c b/fs/adfs/map.c
index a81de80c45c1..a0ce272b4098 100644
--- a/fs/adfs/map.c
+++ b/fs/adfs/map.c
@@ -6,7 +6,7 @@
*/
#include <linux/slab.h>
#include <linux/statfs.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "adfs.h"
/*
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index bdbd26e571ed..fdccdbbfc213 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -6,7 +6,8 @@
*/
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/parser.h>
+#include <linux/fs_parser.h>
+#include <linux/fs_context.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -115,87 +116,61 @@ static int adfs_show_options(struct seq_file *seq, struct dentry *root)
return 0;
}
-enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_ftsuffix, Opt_err};
+enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_ftsuffix};
-static const match_table_t tokens = {
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_ownmask, "ownmask=%o"},
- {Opt_othmask, "othmask=%o"},
- {Opt_ftsuffix, "ftsuffix=%u"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec adfs_param_spec[] = {
+ fsparam_uid ("uid", Opt_uid),
+ fsparam_gid ("gid", Opt_gid),
+ fsparam_u32oct ("ownmask", Opt_ownmask),
+ fsparam_u32oct ("othmask", Opt_othmask),
+ fsparam_u32 ("ftsuffix", Opt_ftsuffix),
+ {}
};
-static int parse_options(struct super_block *sb, struct adfs_sb_info *asb,
- char *options)
+static int adfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *p;
- int option;
-
- if (!options)
- return 0;
-
- while ((p = strsep(&options, ",")) != NULL) {
- substring_t args[MAX_OPT_ARGS];
- int token;
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_uid:
- if (match_int(args, &option))
- return -EINVAL;
- asb->s_uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(asb->s_uid))
- return -EINVAL;
- break;
- case Opt_gid:
- if (match_int(args, &option))
- return -EINVAL;
- asb->s_gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(asb->s_gid))
- return -EINVAL;
- break;
- case Opt_ownmask:
- if (match_octal(args, &option))
- return -EINVAL;
- asb->s_owner_mask = option;
- break;
- case Opt_othmask:
- if (match_octal(args, &option))
- return -EINVAL;
- asb->s_other_mask = option;
- break;
- case Opt_ftsuffix:
- if (match_int(args, &option))
- return -EINVAL;
- asb->s_ftsuffix = option;
- break;
- default:
- adfs_msg(sb, KERN_ERR,
- "unrecognised mount option \"%s\" or missing value",
- p);
- return -EINVAL;
- }
+ struct adfs_sb_info *asb = fc->s_fs_info;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, adfs_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ asb->s_uid = result.uid;
+ break;
+ case Opt_gid:
+ asb->s_gid = result.gid;
+ break;
+ case Opt_ownmask:
+ asb->s_owner_mask = result.uint_32;
+ break;
+ case Opt_othmask:
+ asb->s_other_mask = result.uint_32;
+ break;
+ case Opt_ftsuffix:
+ asb->s_ftsuffix = result.uint_32;
+ break;
+ default:
+ return -EINVAL;
}
return 0;
}
-static int adfs_remount(struct super_block *sb, int *flags, char *data)
+static int adfs_reconfigure(struct fs_context *fc)
{
- struct adfs_sb_info temp_asb;
- int ret;
+ struct adfs_sb_info *new_asb = fc->s_fs_info;
+ struct adfs_sb_info *asb = ADFS_SB(fc->root->d_sb);
- sync_filesystem(sb);
- *flags |= ADFS_SB_FLAGS;
+ sync_filesystem(fc->root->d_sb);
+ fc->sb_flags |= ADFS_SB_FLAGS;
- temp_asb = *ADFS_SB(sb);
- ret = parse_options(sb, &temp_asb, data);
- if (ret == 0)
- *ADFS_SB(sb) = temp_asb;
+ /* Structure copy newly parsed options */
+ *asb = *new_asb;
- return ret;
+ return 0;
}
static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -220,7 +195,7 @@ static struct kmem_cache *adfs_inode_cachep;
static struct inode *adfs_alloc_inode(struct super_block *sb)
{
struct adfs_inode_info *ei;
- ei = kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL);
+ ei = alloc_inode_sb(sb, adfs_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
return &ei->vfs_inode;
@@ -249,7 +224,7 @@ static int __init init_inodecache(void)
adfs_inode_cachep = kmem_cache_create("adfs_inode_cache",
sizeof(struct adfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
if (adfs_inode_cachep == NULL)
return -ENOMEM;
@@ -273,7 +248,6 @@ static const struct super_operations adfs_sops = {
.write_inode = adfs_write_inode,
.put_super = adfs_put_super,
.statfs = adfs_statfs,
- .remount_fs = adfs_remount,
.show_options = adfs_show_options,
};
@@ -361,34 +335,21 @@ static int adfs_validate_dr0(struct super_block *sb, struct buffer_head *bh,
return 0;
}
-static int adfs_fill_super(struct super_block *sb, void *data, int silent)
+static int adfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct adfs_discrecord *dr;
struct object_info root_obj;
- struct adfs_sb_info *asb;
+ struct adfs_sb_info *asb = sb->s_fs_info;
struct inode *root;
int ret = -EINVAL;
+ int silent = fc->sb_flags & SB_SILENT;
sb->s_flags |= ADFS_SB_FLAGS;
- asb = kzalloc(sizeof(*asb), GFP_KERNEL);
- if (!asb)
- return -ENOMEM;
-
sb->s_fs_info = asb;
sb->s_magic = ADFS_SUPER_MAGIC;
sb->s_time_gran = 10000000;
- /* set default options */
- asb->s_uid = GLOBAL_ROOT_UID;
- asb->s_gid = GLOBAL_ROOT_GID;
- asb->s_owner_mask = ADFS_DEFAULT_OWNER_MASK;
- asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK;
- asb->s_ftsuffix = 0;
-
- if (parse_options(sb, asb, data))
- goto error;
-
/* Try to probe the filesystem boot block */
ret = adfs_probe(sb, ADFS_DISCRECORD, 1, adfs_validate_bblk);
if (ret == -EILSEQ)
@@ -436,7 +397,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
if (asb->s_ftsuffix)
asb->s_namelen += 4;
- sb->s_d_op = &adfs_dentry_operations;
+ set_default_d_op(sb, &adfs_dentry_operations);
root = adfs_iget(sb, &root_obj);
sb->s_root = d_make_root(root);
if (!sb->s_root) {
@@ -453,18 +414,61 @@ error:
return ret;
}
-static struct dentry *adfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int adfs_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, adfs_fill_super);
+}
+
+static void adfs_free_fc(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, adfs_fill_super);
+ struct adfs_context *asb = fc->s_fs_info;
+
+ kfree(asb);
+}
+
+static const struct fs_context_operations adfs_context_ops = {
+ .parse_param = adfs_parse_param,
+ .get_tree = adfs_get_tree,
+ .reconfigure = adfs_reconfigure,
+ .free = adfs_free_fc,
+};
+
+static int adfs_init_fs_context(struct fs_context *fc)
+{
+ struct adfs_sb_info *asb;
+
+ asb = kzalloc(sizeof(struct adfs_sb_info), GFP_KERNEL);
+ if (!asb)
+ return -ENOMEM;
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ struct super_block *sb = fc->root->d_sb;
+ struct adfs_sb_info *old_asb = ADFS_SB(sb);
+
+ /* structure copy existing options before parsing */
+ *asb = *old_asb;
+ } else {
+ /* set default options */
+ asb->s_uid = GLOBAL_ROOT_UID;
+ asb->s_gid = GLOBAL_ROOT_GID;
+ asb->s_owner_mask = ADFS_DEFAULT_OWNER_MASK;
+ asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK;
+ asb->s_ftsuffix = 0;
+ }
+
+ fc->ops = &adfs_context_ops;
+ fc->s_fs_info = asb;
+
+ return 0;
}
static struct file_system_type adfs_fs_type = {
.owner = THIS_MODULE,
.name = "adfs",
- .mount = adfs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = adfs_init_fs_context,
+ .parameters = adfs_param_spec,
};
MODULE_ALIAS_FS("adfs");
@@ -491,4 +495,5 @@ static void __exit exit_adfs_fs(void)
module_init(init_adfs_fs)
module_exit(exit_adfs_fs)
+MODULE_DESCRIPTION("Acorn Disc Filing System");
MODULE_LICENSE("GPL");
diff --git a/fs/affs/Kconfig b/fs/affs/Kconfig
index eb9d0ab850cb..1ae432d266c3 100644
--- a/fs/affs/Kconfig
+++ b/fs/affs/Kconfig
@@ -2,6 +2,8 @@
config AFFS_FS
tristate "Amiga FFS file system support"
depends on BLOCK
+ select BUFFER_HEAD
+ select LEGACY_DIRECT_IO
help
The Fast File System (FFS) is the common file system used on hard
disks by Amiga(tm) systems since AmigaOS Version 1.3 (34.20). Say Y
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index bfa89e131ead..ac4e9a02910b 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -14,8 +14,6 @@
/* Ugly macros make the code more pretty. */
-#define GET_END_PTR(st,p,sz) ((st *)((char *)(p)+((sz)-sizeof(st))))
-#define AFFS_GET_HASHENTRY(data,hashkey) be32_to_cpu(((struct dir_front *)data)->hashtable[hashkey])
#define AFFS_BLOCK(sb, bh, blk) (AFFS_HEAD(bh)->table[AFFS_SB(sb)->s_hashsize-1-(blk)])
#define AFFS_HEAD(bh) ((struct affs_head *)(bh)->b_data)
@@ -105,6 +103,7 @@ struct affs_sb_info {
int work_queued; /* non-zero delayed work is queued */
struct delayed_work sb_work; /* superblock flush delayed work */
spinlock_t work_lock; /* protects sb_work and work_queued */
+ struct rcu_head rcu;
};
#define AFFS_MOUNT_SF_INTL 0x0001 /* International filesystem. */
@@ -167,17 +166,17 @@ extern const struct export_operations affs_export_ops;
extern int affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len);
extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int);
extern int affs_unlink(struct inode *dir, struct dentry *dentry);
-extern int affs_create(struct user_namespace *mnt_userns, struct inode *dir,
+extern int affs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool);
-extern int affs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+extern struct dentry *affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode);
extern int affs_rmdir(struct inode *dir, struct dentry *dentry);
extern int affs_link(struct dentry *olddentry, struct inode *dir,
struct dentry *dentry);
-extern int affs_symlink(struct user_namespace *mnt_userns,
+extern int affs_symlink(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry,
const char *symname);
-extern int affs_rename2(struct user_namespace *mnt_userns,
+extern int affs_rename2(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags);
@@ -185,7 +184,7 @@ extern int affs_rename2(struct user_namespace *mnt_userns,
/* inode.c */
extern struct inode *affs_new_inode(struct inode *dir);
-extern int affs_notify_change(struct user_namespace *mnt_userns,
+extern int affs_notify_change(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr);
extern void affs_evict_inode(struct inode *inode);
extern struct inode *affs_iget(struct super_block *sb,
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 29f11e10a7c7..fd669daa4e7b 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -60,7 +60,7 @@ affs_insert_hash(struct inode *dir, struct buffer_head *bh)
mark_buffer_dirty_inode(dir_bh, dir);
affs_brelse(dir_bh);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
inode_inc_iversion(dir);
mark_inode_dirty(dir);
@@ -114,7 +114,7 @@ affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh)
affs_brelse(bh);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
inode_inc_iversion(dir);
mark_inode_dirty(dir);
@@ -315,7 +315,7 @@ affs_remove_header(struct dentry *dentry)
else
clear_nlink(inode);
affs_unlock_link(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
done:
diff --git a/fs/affs/amigaffs.h b/fs/affs/amigaffs.h
index 81fb396d4dfa..da3217ab6adb 100644
--- a/fs/affs/amigaffs.h
+++ b/fs/affs/amigaffs.h
@@ -49,12 +49,13 @@ struct affs_short_date {
struct affs_root_head {
__be32 ptype;
+ /* The following fields are not used, but kept as documentation. */
__be32 spare1;
__be32 spare2;
__be32 hash_size;
__be32 spare3;
__be32 checksum;
- __be32 hashtable[1];
+ __be32 hashtable[];
};
struct affs_root_tail {
@@ -80,7 +81,7 @@ struct affs_head {
__be32 spare1;
__be32 first_data;
__be32 checksum;
- __be32 table[1];
+ __be32 table[];
};
struct affs_tail {
@@ -108,7 +109,7 @@ struct slink_front
__be32 key;
__be32 spare1[3];
__be32 checksum;
- u8 symname[1]; /* depends on block size */
+ u8 symname[]; /* depends on block size */
};
struct affs_data_head
@@ -119,7 +120,7 @@ struct affs_data_head
__be32 size;
__be32 next;
__be32 checksum;
- u8 data[1]; /* depends on block size */
+ u8 data[]; /* depends on block size */
};
/* Permission bits */
diff --git a/fs/affs/dir.c b/fs/affs/dir.c
index b2bf7016e1b3..bd40d5f08810 100644
--- a/fs/affs/dir.c
+++ b/fs/affs/dir.c
@@ -17,13 +17,44 @@
#include <linux/iversion.h>
#include "affs.h"
+struct affs_dir_data {
+ unsigned long ino;
+ u64 cookie;
+};
+
static int affs_readdir(struct file *, struct dir_context *);
+static loff_t affs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct affs_dir_data *data = file->private_data;
+
+ return generic_llseek_cookie(file, offset, whence, &data->cookie);
+}
+
+static int affs_dir_open(struct inode *inode, struct file *file)
+{
+ struct affs_dir_data *data;
+
+ data = kzalloc(sizeof(struct affs_dir_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ file->private_data = data;
+ return 0;
+}
+
+static int affs_dir_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
const struct file_operations affs_dir_operations = {
+ .open = affs_dir_open,
.read = generic_read_dir,
- .llseek = generic_file_llseek,
+ .llseek = affs_dir_llseek,
.iterate_shared = affs_readdir,
.fsync = affs_file_fsync,
+ .release = affs_dir_release,
};
/*
@@ -45,6 +76,7 @@ static int
affs_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
+ struct affs_dir_data *data = file->private_data;
struct super_block *sb = inode->i_sb;
struct buffer_head *dir_bh = NULL;
struct buffer_head *fh_bh = NULL;
@@ -59,7 +91,7 @@ affs_readdir(struct file *file, struct dir_context *ctx)
pr_debug("%s(ino=%lu,f_pos=%llx)\n", __func__, inode->i_ino, ctx->pos);
if (ctx->pos < 2) {
- file->private_data = (void *)0;
+ data->ino = 0;
if (!dir_emit_dots(file, ctx))
return 0;
}
@@ -80,8 +112,8 @@ affs_readdir(struct file *file, struct dir_context *ctx)
/* If the directory hasn't changed since the last call to readdir(),
* we can jump directly to where we left off.
*/
- ino = (u32)(long)file->private_data;
- if (ino && inode_eq_iversion(inode, file->f_version)) {
+ ino = data->ino;
+ if (ino && inode_eq_iversion(inode, data->cookie)) {
pr_debug("readdir() left off=%d\n", ino);
goto inside;
}
@@ -131,8 +163,8 @@ inside:
} while (ino);
}
done:
- file->f_version = inode_query_iversion(inode);
- file->private_data = (void *)(long)ino;
+ data->cookie = inode_query_iversion(inode);
+ data->ino = ino;
affs_brelse(fh_bh);
out_brelse_dir:
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 75ebd2b576ca..765c3443663e 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -15,6 +15,7 @@
#include <linux/uio.h>
#include <linux/blkdev.h>
+#include <linux/mpage.h>
#include "affs.h"
static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
@@ -370,14 +371,15 @@ err_alloc:
return -ENOSPC;
}
-static int affs_writepage(struct page *page, struct writeback_control *wbc)
+static int affs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- return block_write_full_page(page, affs_get_block, wbc);
+ return mpage_writepages(mapping, wbc, affs_get_block);
}
-static int affs_readpage(struct file *file, struct page *page)
+static int affs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, affs_get_block);
+ return block_read_full_folio(folio, affs_get_block);
}
static void affs_write_failed(struct address_space *mapping, loff_t to)
@@ -413,14 +415,14 @@ affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
return ret;
}
-static int affs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int affs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret;
- *pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
affs_get_block,
&AFFS_I(mapping->host)->mmu_private);
if (unlikely(ret))
@@ -429,14 +431,15 @@ static int affs_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
-static int affs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int copied,
- struct page *page, void *fsdata)
+static int affs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping, loff_t pos,
+ unsigned int len, unsigned int copied,
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
int ret;
- ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+ ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
/* Clear Archived bit on file writes, as AmigaOS would do */
if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
@@ -453,12 +456,14 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
}
const struct address_space_operations affs_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = affs_readpage,
- .writepage = affs_writepage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = affs_read_folio,
+ .writepages = affs_writepages,
.write_begin = affs_write_begin,
.write_end = affs_write_end,
.direct_IO = affs_direct_IO,
+ .migrate_folio = buffer_migrate_folio,
.bmap = _affs_bmap
};
@@ -519,22 +524,20 @@ affs_getemptyblk_ino(struct inode *inode, int block)
return ERR_PTR(err);
}
-static int
-affs_do_readpage_ofs(struct page *page, unsigned to, int create)
+static int affs_do_read_folio_ofs(struct folio *folio, size_t to, int create)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct buffer_head *bh;
- char *data;
- unsigned pos = 0;
- u32 bidx, boff, bsize;
+ size_t pos = 0;
+ size_t bidx, boff, bsize;
u32 tmp;
- pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
- page->index, to);
- BUG_ON(to > PAGE_SIZE);
+ pr_debug("%s(%lu, %ld, 0, %zu)\n", __func__, inode->i_ino,
+ folio->index, to);
+ BUG_ON(to > folio_size(folio));
bsize = AFFS_SB(sb)->s_data_blksize;
- tmp = page->index << PAGE_SHIFT;
+ tmp = folio_pos(folio);
bidx = tmp / bsize;
boff = tmp % bsize;
@@ -544,15 +547,12 @@ affs_do_readpage_ofs(struct page *page, unsigned to, int create)
return PTR_ERR(bh);
tmp = min(bsize - boff, to - pos);
BUG_ON(pos + tmp > to || tmp > bsize);
- data = kmap_atomic(page);
- memcpy(data + pos, AFFS_DATA(bh) + boff, tmp);
- kunmap_atomic(data);
+ memcpy_to_folio(folio, pos, AFFS_DATA(bh) + boff, tmp);
affs_brelse(bh);
bidx++;
pos += tmp;
boff = 0;
}
- flush_dcache_page(page);
return 0;
}
@@ -598,7 +598,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize)
BUG_ON(tmp > bsize);
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
- AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+ AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
affs_fix_checksum(sb, bh);
bh->b_state &= ~(1UL << BH_New);
@@ -627,33 +627,33 @@ out:
return PTR_ERR(bh);
}
-static int
-affs_readpage_ofs(struct file *file, struct page *page)
+static int affs_read_folio_ofs(struct file *file, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
- u32 to;
+ struct inode *inode = folio->mapping->host;
+ size_t to;
int err;
- pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
- to = PAGE_SIZE;
- if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
- to = inode->i_size & ~PAGE_MASK;
- memset(page_address(page) + to, 0, PAGE_SIZE - to);
+ pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, folio->index);
+ to = folio_size(folio);
+ if (folio_pos(folio) + to > inode->i_size) {
+ to = inode->i_size - folio_pos(folio);
+ folio_zero_segment(folio, to, folio_size(folio));
}
- err = affs_do_readpage_ofs(page, to, 0);
+ err = affs_do_read_folio_ofs(folio, to, 0);
if (!err)
- SetPageUptodate(page);
- unlock_page(page);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
return err;
}
-static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int affs_write_begin_ofs(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
struct inode *inode = mapping->host;
- struct page *page;
+ struct folio *folio;
pgoff_t index;
int err = 0;
@@ -669,26 +669,28 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
}
index = pos >> PAGE_SHIFT;
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
- *pagep = page;
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ *foliop = folio;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
return 0;
/* XXX: inefficient but safe in the face of short writes */
- err = affs_do_readpage_ofs(page, PAGE_SIZE, 1);
+ err = affs_do_read_folio_ofs(folio, folio_size(folio), 1);
if (err) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
}
return err;
}
-static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+static int affs_write_end_ofs(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
struct super_block *sb = inode->i_sb;
@@ -703,18 +705,18 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
to = from + len;
/*
* XXX: not sure if this can handle short copies (len < copied), but
- * we don't have to, because the page should always be uptodate here,
+ * we don't have to, because the folio should always be uptodate here,
* due to write_begin.
*/
pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
pos + len);
bsize = AFFS_SB(sb)->s_data_blksize;
- data = page_address(page);
+ data = folio_address(folio);
bh = NULL;
written = 0;
- tmp = (page->index << PAGE_SHIFT) + from;
+ tmp = (folio->index << PAGE_SHIFT) + from;
bidx = tmp / bsize;
boff = tmp % bsize;
if (boff) {
@@ -726,7 +728,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
tmp = min(bsize - boff, to - from);
BUG_ON(boff + tmp > bsize || tmp > bsize);
memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
- be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
+ AFFS_DATA_HEAD(bh)->size = cpu_to_be32(
+ max(boff + tmp, be32_to_cpu(AFFS_DATA_HEAD(bh)->size)));
affs_fix_checksum(sb, bh);
mark_buffer_dirty_inode(bh, inode);
written += tmp;
@@ -748,7 +751,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
if (buffer_new(bh)) {
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
- AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+ AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
AFFS_DATA_HEAD(bh)->next = 0;
bh->b_state &= ~(1UL << BH_New);
@@ -782,7 +785,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
if (buffer_new(bh)) {
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
- AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+ AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
AFFS_DATA_HEAD(bh)->next = 0;
bh->b_state &= ~(1UL << BH_New);
@@ -806,11 +809,11 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
from += tmp;
bidx++;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
done:
affs_brelse(bh);
- tmp = (page->index << PAGE_SHIFT) + from;
+ tmp = (folio->index << PAGE_SHIFT) + from;
if (tmp > inode->i_size)
inode->i_size = AFFS_I(inode)->mmu_private = tmp;
@@ -821,8 +824,8 @@ done:
}
err_first_bh:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return written;
@@ -834,11 +837,13 @@ err_bh:
}
const struct address_space_operations affs_aops_ofs = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = affs_readpage_ofs,
- //.writepage = affs_writepage_ofs,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = affs_read_folio_ofs,
+ //.writepages = affs_writepages_ofs,
.write_begin = affs_write_begin_ofs,
- .write_end = affs_write_end_ofs
+ .write_end = affs_write_end_ofs,
+ .migrate_folio = filemap_migrate_folio,
};
/* Free any preallocated blocks. */
@@ -880,14 +885,14 @@ affs_truncate(struct inode *inode)
if (inode->i_size > AFFS_I(inode)->mmu_private) {
struct address_space *mapping = inode->i_mapping;
- struct page *page;
- void *fsdata;
+ struct folio *folio;
+ void *fsdata = NULL;
loff_t isize = inode->i_size;
int res;
- res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, 0, &page, &fsdata);
+ res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &folio, &fsdata);
if (!res)
- res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata);
+ res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, folio, fsdata);
else
inode->i_size = AFFS_I(inode)->mmu_private;
mark_inode_dirty(inode);
@@ -998,11 +1003,11 @@ const struct file_operations affs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.open = affs_file_open,
.release = affs_file_release,
.fsync = affs_file_fsync,
- .splice_read = generic_file_splice_read,
+ .splice_read = filemap_splice_read,
};
const struct inode_operations affs_file_inode_operations = {
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 2352a75bd9d6..0bfc7d151dcd 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -29,7 +29,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
pr_debug("affs_iget(%lu)\n", inode->i_ino);
@@ -149,13 +149,9 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
break;
}
- inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec
- = (be32_to_cpu(tail->change.days) * 86400LL +
- be32_to_cpu(tail->change.mins) * 60 +
- be32_to_cpu(tail->change.ticks) / 50 +
- AFFS_EPOCH_DELTA) +
- sys_tz.tz_minuteswest * 60;
- inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_atime.tv_nsec = 0;
+ inode_set_mtime(inode,
+ inode_set_atime(inode, inode_set_ctime(inode, (be32_to_cpu(tail->change.days) * 86400LL + be32_to_cpu(tail->change.mins) * 60 + be32_to_cpu(tail->change.ticks) / 50 + AFFS_EPOCH_DELTA) + sys_tz.tz_minuteswest * 60, 0).tv_sec, 0).tv_sec,
+ 0);
affs_brelse(bh);
unlock_new_inode(inode);
return inode;
@@ -187,12 +183,13 @@ affs_write_inode(struct inode *inode, struct writeback_control *wbc)
}
tail = AFFS_TAIL(sb, bh);
if (tail->stype == cpu_to_be32(ST_ROOT)) {
- affs_secs_to_datestamp(inode->i_mtime.tv_sec,
+ affs_secs_to_datestamp(inode_get_mtime_sec(inode),
&AFFS_ROOT_TAIL(sb, bh)->root_change);
} else {
tail->protect = cpu_to_be32(AFFS_I(inode)->i_protect);
tail->size = cpu_to_be32(inode->i_size);
- affs_secs_to_datestamp(inode->i_mtime.tv_sec, &tail->change);
+ affs_secs_to_datestamp(inode_get_mtime_sec(inode),
+ &tail->change);
if (!(inode->i_ino == AFFS_SB(sb)->s_root_block)) {
uid = i_uid_read(inode);
gid = i_gid_read(inode);
@@ -216,7 +213,7 @@ affs_write_inode(struct inode *inode, struct writeback_control *wbc)
}
int
-affs_notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
+affs_notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
@@ -224,7 +221,7 @@ affs_notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
pr_debug("notify_change(%lu,0x%x)\n", inode->i_ino, attr->ia_valid);
- error = setattr_prepare(&init_user_ns, dentry, attr);
+ error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (error)
goto out;
@@ -250,7 +247,7 @@ affs_notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
affs_truncate(inode);
}
- setattr_copy(&init_user_ns, inode, attr);
+ setattr_copy(&nop_mnt_idmap, inode, attr);
mark_inode_dirty(inode);
if (attr->ia_valid & ATTR_MODE)
@@ -314,7 +311,7 @@ affs_new_inode(struct inode *dir)
inode->i_gid = current_fsgid();
inode->i_ino = block;
set_nlink(inode, 1);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
atomic_set(&AFFS_I(inode)->i_opencnt, 0);
AFFS_I(inode)->i_blkcnt = 0;
AFFS_I(inode)->i_lc = NULL;
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index bcab18956b4f..f883be50db12 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -43,7 +43,7 @@ affs_get_toupper(struct super_block *sb)
* Note: the dentry argument is the parent dentry.
*/
static inline int
-__affs_hash_dentry(const struct dentry *dentry, struct qstr *qstr, toupper_t toupper, bool notruncate)
+__affs_hash_dentry(const struct dentry *dentry, struct qstr *qstr, toupper_t fn, bool notruncate)
{
const u8 *name = qstr->name;
unsigned long hash;
@@ -57,7 +57,7 @@ __affs_hash_dentry(const struct dentry *dentry, struct qstr *qstr, toupper_t tou
hash = init_name_hash(dentry);
len = min(qstr->len, AFFSNAMEMAX);
for (; len > 0; name++, len--)
- hash = partial_name_hash(toupper(*name), hash);
+ hash = partial_name_hash(fn(*name), hash);
qstr->hash = end_name_hash(hash);
return 0;
@@ -80,7 +80,7 @@ affs_intl_hash_dentry(const struct dentry *dentry, struct qstr *qstr)
}
static inline int __affs_compare_dentry(unsigned int len,
- const char *str, const struct qstr *name, toupper_t toupper,
+ const char *str, const struct qstr *name, toupper_t fn,
bool notruncate)
{
const u8 *aname = str;
@@ -106,7 +106,7 @@ static inline int __affs_compare_dentry(unsigned int len,
return 1;
for (; len > 0; len--)
- if (toupper(*aname++) != toupper(*bname++))
+ if (fn(*aname++) != fn(*bname++))
return 1;
return 0;
@@ -135,7 +135,7 @@ affs_intl_compare_dentry(const struct dentry *dentry,
*/
static inline int
-affs_match(struct dentry *dentry, const u8 *name2, toupper_t toupper)
+affs_match(struct dentry *dentry, const u8 *name2, toupper_t fn)
{
const u8 *name = dentry->d_name.name;
int len = dentry->d_name.len;
@@ -148,7 +148,7 @@ affs_match(struct dentry *dentry, const u8 *name2, toupper_t toupper)
return 0;
for (name2++; len > 0; len--)
- if (toupper(*name++) != toupper(*name2++))
+ if (fn(*name++) != fn(*name2++))
return 0;
return 1;
}
@@ -156,12 +156,12 @@ affs_match(struct dentry *dentry, const u8 *name2, toupper_t toupper)
int
affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len)
{
- toupper_t toupper = affs_get_toupper(sb);
+ toupper_t fn = affs_get_toupper(sb);
u32 hash;
hash = len = min(len, AFFSNAMEMAX);
for (; len > 0; len--)
- hash = (hash * 13 + toupper(*name++)) & 0x7ff;
+ hash = (hash * 13 + fn(*name++)) & 0x7ff;
return hash % AFFS_SB(sb)->s_hashsize;
}
@@ -171,7 +171,7 @@ affs_find_entry(struct inode *dir, struct dentry *dentry)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
- toupper_t toupper = affs_get_toupper(sb);
+ toupper_t fn = affs_get_toupper(sb);
u32 key;
pr_debug("%s(\"%pd\")\n", __func__, dentry);
@@ -189,7 +189,7 @@ affs_find_entry(struct inode *dir, struct dentry *dentry)
bh = affs_bread(sb, key);
if (!bh)
return ERR_PTR(-EIO);
- if (affs_match(dentry, AFFS_TAIL(sb, bh)->name, toupper))
+ if (affs_match(dentry, AFFS_TAIL(sb, bh)->name, fn))
return bh;
key = be32_to_cpu(AFFS_TAIL(sb, bh)->hash_chain);
}
@@ -242,7 +242,7 @@ affs_unlink(struct inode *dir, struct dentry *dentry)
}
int
-affs_create(struct user_namespace *mnt_userns, struct inode *dir,
+affs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct super_block *sb = dir->i_sb;
@@ -273,8 +273,8 @@ affs_create(struct user_namespace *mnt_userns, struct inode *dir,
return 0;
}
-int
-affs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+struct dentry *
+affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
struct inode *inode;
@@ -285,7 +285,7 @@ affs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
inode = affs_new_inode(dir);
if (!inode)
- return -ENOSPC;
+ return ERR_PTR(-ENOSPC);
inode->i_mode = S_IFDIR | mode;
affs_mode_to_prot(inode);
@@ -298,9 +298,9 @@ affs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
clear_nlink(inode);
mark_inode_dirty(inode);
iput(inode);
- return error;
+ return ERR_PTR(error);
}
- return 0;
+ return NULL;
}
int
@@ -313,7 +313,7 @@ affs_rmdir(struct inode *dir, struct dentry *dentry)
}
int
-affs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+affs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
struct super_block *sb = dir->i_sb;
@@ -503,7 +503,7 @@ done:
return retval;
}
-int affs_rename2(struct user_namespace *mnt_userns, struct inode *old_dir,
+int affs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
@@ -532,9 +532,6 @@ static struct dentry *affs_get_parent(struct dentry *child)
parent = affs_iget(child->d_sb,
be32_to_cpu(AFFS_TAIL(child->d_sb, bh)->parent));
brelse(bh);
- if (IS_ERR(parent))
- return ERR_CAST(parent);
-
return d_obtain_alias(parent);
}
@@ -568,6 +565,7 @@ static struct dentry *affs_fh_to_parent(struct super_block *sb, struct fid *fid,
}
const struct export_operations affs_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = affs_fh_to_dentry,
.fh_to_parent = affs_fh_to_parent,
.get_parent = affs_get_parent,
diff --git a/fs/affs/super.c b/fs/affs/super.c
index c609005a9eaa..44f8aa883100 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -14,7 +14,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/statfs.h>
-#include <linux/parser.h>
+#include <linux/fs_parser.h>
+#include <linux/fs_context.h>
#include <linux/magic.h>
#include <linux/sched.h>
#include <linux/cred.h>
@@ -27,7 +28,6 @@
static int affs_statfs(struct dentry *dentry, struct kstatfs *buf);
static int affs_show_options(struct seq_file *m, struct dentry *root);
-static int affs_remount (struct super_block *sb, int *flags, char *data);
static void
affs_commit_super(struct super_block *sb, int wait)
@@ -100,7 +100,7 @@ static struct inode *affs_alloc_inode(struct super_block *sb)
{
struct affs_inode_info *i;
- i = kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL);
+ i = alloc_inode_sb(sb, affs_inode_cachep, GFP_KERNEL);
if (!i)
return NULL;
@@ -130,8 +130,7 @@ static int __init init_inodecache(void)
{
affs_inode_cachep = kmem_cache_create("affs_inode_cache",
sizeof(struct affs_inode_info),
- 0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ 0, (SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT),
init_once);
if (affs_inode_cachep == NULL)
return -ENOMEM;
@@ -156,140 +155,114 @@ static const struct super_operations affs_sops = {
.put_super = affs_put_super,
.sync_fs = affs_sync_fs,
.statfs = affs_statfs,
- .remount_fs = affs_remount,
.show_options = affs_show_options,
};
enum {
Opt_bs, Opt_mode, Opt_mufs, Opt_notruncate, Opt_prefix, Opt_protect,
Opt_reserved, Opt_root, Opt_setgid, Opt_setuid,
- Opt_verbose, Opt_volume, Opt_ignore, Opt_err,
+ Opt_verbose, Opt_volume, Opt_ignore,
};
-static const match_table_t tokens = {
- {Opt_bs, "bs=%u"},
- {Opt_mode, "mode=%o"},
- {Opt_mufs, "mufs"},
- {Opt_notruncate, "nofilenametruncate"},
- {Opt_prefix, "prefix=%s"},
- {Opt_protect, "protect"},
- {Opt_reserved, "reserved=%u"},
- {Opt_root, "root=%u"},
- {Opt_setgid, "setgid=%u"},
- {Opt_setuid, "setuid=%u"},
- {Opt_verbose, "verbose"},
- {Opt_volume, "volume=%s"},
- {Opt_ignore, "grpquota"},
- {Opt_ignore, "noquota"},
- {Opt_ignore, "quota"},
- {Opt_ignore, "usrquota"},
- {Opt_err, NULL},
+struct affs_context {
+ kuid_t uid; /* uid to override */
+ kgid_t gid; /* gid to override */
+ unsigned int mode; /* mode to override */
+ unsigned int reserved; /* Number of reserved blocks */
+ int root_block; /* FFS root block number */
+ int blocksize; /* Initial device blksize */
+ char *prefix; /* Prefix for volumes and assigns */
+ char volume[32]; /* Vol. prefix for absolute symlinks */
+ unsigned long mount_flags; /* Options */
};
-static int
-parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved, s32 *root,
- int *blocksize, char **prefix, char *volume, unsigned long *mount_opts)
+static const struct fs_parameter_spec affs_param_spec[] = {
+ fsparam_u32 ("bs", Opt_bs),
+ fsparam_u32oct ("mode", Opt_mode),
+ fsparam_flag ("mufs", Opt_mufs),
+ fsparam_flag ("nofilenametruncate", Opt_notruncate),
+ fsparam_string ("prefix", Opt_prefix),
+ fsparam_flag ("protect", Opt_protect),
+ fsparam_u32 ("reserved", Opt_reserved),
+ fsparam_u32 ("root", Opt_root),
+ fsparam_gid ("setgid", Opt_setgid),
+ fsparam_uid ("setuid", Opt_setuid),
+ fsparam_flag ("verbose", Opt_verbose),
+ fsparam_string ("volume", Opt_volume),
+ fsparam_flag ("grpquota", Opt_ignore),
+ fsparam_flag ("noquota", Opt_ignore),
+ fsparam_flag ("quota", Opt_ignore),
+ fsparam_flag ("usrquota", Opt_ignore),
+ {},
+};
+
+static int affs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *p;
- substring_t args[MAX_OPT_ARGS];
-
- /* Fill in defaults */
-
- *uid = current_uid();
- *gid = current_gid();
- *reserved = 2;
- *root = -1;
- *blocksize = -1;
- volume[0] = ':';
- volume[1] = 0;
- *mount_opts = 0;
- if (!options)
- return 1;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token, n, option;
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_bs:
- if (match_int(&args[0], &n))
- return 0;
- if (n != 512 && n != 1024 && n != 2048
- && n != 4096) {
- pr_warn("Invalid blocksize (512, 1024, 2048, 4096 allowed)\n");
- return 0;
- }
- *blocksize = n;
- break;
- case Opt_mode:
- if (match_octal(&args[0], &option))
- return 0;
- *mode = option & 0777;
- affs_set_opt(*mount_opts, SF_SETMODE);
- break;
- case Opt_mufs:
- affs_set_opt(*mount_opts, SF_MUFS);
- break;
- case Opt_notruncate:
- affs_set_opt(*mount_opts, SF_NO_TRUNCATE);
- break;
- case Opt_prefix:
- kfree(*prefix);
- *prefix = match_strdup(&args[0]);
- if (!*prefix)
- return 0;
- affs_set_opt(*mount_opts, SF_PREFIX);
- break;
- case Opt_protect:
- affs_set_opt(*mount_opts, SF_IMMUTABLE);
- break;
- case Opt_reserved:
- if (match_int(&args[0], reserved))
- return 0;
- break;
- case Opt_root:
- if (match_int(&args[0], root))
- return 0;
- break;
- case Opt_setgid:
- if (match_int(&args[0], &option))
- return 0;
- *gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(*gid))
- return 0;
- affs_set_opt(*mount_opts, SF_SETGID);
- break;
- case Opt_setuid:
- if (match_int(&args[0], &option))
- return 0;
- *uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(*uid))
- return 0;
- affs_set_opt(*mount_opts, SF_SETUID);
- break;
- case Opt_verbose:
- affs_set_opt(*mount_opts, SF_VERBOSE);
- break;
- case Opt_volume: {
- char *vol = match_strdup(&args[0]);
- if (!vol)
- return 0;
- strlcpy(volume, vol, 32);
- kfree(vol);
- break;
- }
- case Opt_ignore:
- /* Silently ignore the quota options */
- break;
- default:
- pr_warn("Unrecognized mount option \"%s\" or missing value\n",
- p);
- return 0;
+ struct affs_context *ctx = fc->fs_private;
+ struct fs_parse_result result;
+ int n;
+ int opt;
+
+ opt = fs_parse(fc, affs_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_bs:
+ n = result.uint_32;
+ if (n != 512 && n != 1024 && n != 2048
+ && n != 4096) {
+ pr_warn("Invalid blocksize (512, 1024, 2048, 4096 allowed)\n");
+ return -EINVAL;
}
+ ctx->blocksize = n;
+ break;
+ case Opt_mode:
+ ctx->mode = result.uint_32 & 0777;
+ affs_set_opt(ctx->mount_flags, SF_SETMODE);
+ break;
+ case Opt_mufs:
+ affs_set_opt(ctx->mount_flags, SF_MUFS);
+ break;
+ case Opt_notruncate:
+ affs_set_opt(ctx->mount_flags, SF_NO_TRUNCATE);
+ break;
+ case Opt_prefix:
+ kfree(ctx->prefix);
+ ctx->prefix = param->string;
+ param->string = NULL;
+ affs_set_opt(ctx->mount_flags, SF_PREFIX);
+ break;
+ case Opt_protect:
+ affs_set_opt(ctx->mount_flags, SF_IMMUTABLE);
+ break;
+ case Opt_reserved:
+ ctx->reserved = result.uint_32;
+ break;
+ case Opt_root:
+ ctx->root_block = result.uint_32;
+ break;
+ case Opt_setgid:
+ ctx->gid = result.gid;
+ affs_set_opt(ctx->mount_flags, SF_SETGID);
+ break;
+ case Opt_setuid:
+ ctx->uid = result.uid;
+ affs_set_opt(ctx->mount_flags, SF_SETUID);
+ break;
+ case Opt_verbose:
+ affs_set_opt(ctx->mount_flags, SF_VERBOSE);
+ break;
+ case Opt_volume:
+ strscpy(ctx->volume, param->string, 32);
+ break;
+ case Opt_ignore:
+ /* Silently ignore the quota options */
+ break;
+ default:
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static int affs_show_options(struct seq_file *m, struct dentry *root)
@@ -330,27 +303,22 @@ static int affs_show_options(struct seq_file *m, struct dentry *root)
* hopefully have the guts to do so. Until then: sorry for the mess.
*/
-static int affs_fill_super(struct super_block *sb, void *data, int silent)
+static int affs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct affs_sb_info *sbi;
+ struct affs_context *ctx = fc->fs_private;
struct buffer_head *root_bh = NULL;
struct buffer_head *boot_bh;
struct inode *root_inode = NULL;
- s32 root_block;
+ int silent = fc->sb_flags & SB_SILENT;
int size, blocksize;
u32 chksum;
int num_bm;
int i, j;
- kuid_t uid;
- kgid_t gid;
- int reserved;
- unsigned long mount_flags;
int tmp_flags; /* fix remount prototype... */
u8 sig[4];
int ret;
- pr_debug("read_super(%s)\n", data ? (const char *)data : "no options");
-
sb->s_magic = AFFS_SUPER_MAGIC;
sb->s_op = &affs_sops;
sb->s_flags |= SB_NODIRATIME;
@@ -370,19 +338,16 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
spin_lock_init(&sbi->work_lock);
INIT_DELAYED_WORK(&sbi->sb_work, flush_superblock);
- if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
- &blocksize,&sbi->s_prefix,
- sbi->s_volume, &mount_flags)) {
- pr_err("Error parsing options\n");
- return -EINVAL;
- }
- /* N.B. after this point s_prefix must be released */
+ sbi->s_flags = ctx->mount_flags;
+ sbi->s_mode = ctx->mode;
+ sbi->s_uid = ctx->uid;
+ sbi->s_gid = ctx->gid;
+ sbi->s_reserved = ctx->reserved;
+ sbi->s_prefix = ctx->prefix;
+ ctx->prefix = NULL;
+ memcpy(sbi->s_volume, ctx->volume, 32);
- sbi->s_flags = mount_flags;
- sbi->s_mode = i;
- sbi->s_uid = uid;
- sbi->s_gid = gid;
- sbi->s_reserved= reserved;
+ /* N.B. after this point s_prefix must be released */
/* Get the size of the device in 512-byte blocks.
* If we later see that the partition uses bigger
@@ -397,15 +362,16 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
i = bdev_logical_block_size(sb->s_bdev);
j = PAGE_SIZE;
+ blocksize = ctx->blocksize;
if (blocksize > 0) {
i = j = blocksize;
size = size / (blocksize / 512);
}
for (blocksize = i; blocksize <= j; blocksize <<= 1, size >>= 1) {
- sbi->s_root_block = root_block;
- if (root_block < 0)
- sbi->s_root_block = (reserved + size - 1) / 2;
+ sbi->s_root_block = ctx->root_block;
+ if (ctx->root_block < 0)
+ sbi->s_root_block = (ctx->reserved + size - 1) / 2;
pr_debug("setting blocksize to %d\n", blocksize);
affs_set_blocksize(sb, blocksize);
sbi->s_partition_size = size;
@@ -425,7 +391,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
"size=%d, reserved=%d\n",
sb->s_id,
sbi->s_root_block + num_bm,
- blocksize, size, reserved);
+ ctx->blocksize, size, ctx->reserved);
root_bh = affs_bread(sb, sbi->s_root_block + num_bm);
if (!root_bh)
continue;
@@ -448,7 +414,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
got_root:
/* Keep super block in cache */
sbi->s_root_bh = root_bh;
- root_block = sbi->s_root_block;
+ ctx->root_block = sbi->s_root_block;
/* Find out which kind of FS we have */
boot_bh = sb_bread(sb, 0);
@@ -507,7 +473,7 @@ got_root:
return -EINVAL;
}
- if (affs_test_opt(mount_flags, SF_VERBOSE)) {
+ if (affs_test_opt(ctx->mount_flags, SF_VERBOSE)) {
u8 len = AFFS_ROOT_TAIL(sb, root_bh)->disk_name[0];
pr_notice("Mounting volume \"%.*s\": Type=%.3s\\%c, Blocksize=%d\n",
len > 31 ? 31 : len,
@@ -529,14 +495,14 @@ got_root:
/* set up enough so that it can read an inode */
- root_inode = affs_iget(sb, root_block);
+ root_inode = affs_iget(sb, ctx->root_block);
if (IS_ERR(root_inode))
return PTR_ERR(root_inode);
if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_INTL))
- sb->s_d_op = &affs_intl_dentry_operations;
+ set_default_d_op(sb, &affs_intl_dentry_operations);
else
- sb->s_d_op = &affs_dentry_operations;
+ set_default_d_op(sb, &affs_dentry_operations);
sb->s_root = d_make_root(root_inode);
if (!sb->s_root) {
@@ -549,56 +515,43 @@ got_root:
return 0;
}
-static int
-affs_remount(struct super_block *sb, int *flags, char *data)
+static int affs_reconfigure(struct fs_context *fc)
{
+ struct super_block *sb = fc->root->d_sb;
+ struct affs_context *ctx = fc->fs_private;
struct affs_sb_info *sbi = AFFS_SB(sb);
- int blocksize;
- kuid_t uid;
- kgid_t gid;
- int mode;
- int reserved;
- int root_block;
- unsigned long mount_flags;
int res = 0;
- char volume[32];
- char *prefix = NULL;
-
- pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
sync_filesystem(sb);
- *flags |= SB_NODIRATIME;
-
- memcpy(volume, sbi->s_volume, 32);
- if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
- &blocksize, &prefix, volume,
- &mount_flags)) {
- kfree(prefix);
- return -EINVAL;
- }
+ fc->sb_flags |= SB_NODIRATIME;
flush_delayed_work(&sbi->sb_work);
- sbi->s_flags = mount_flags;
- sbi->s_mode = mode;
- sbi->s_uid = uid;
- sbi->s_gid = gid;
+ /*
+ * NB: Historically, only mount_flags, mode, uid, gic, prefix,
+ * and volume are accepted during remount.
+ */
+ sbi->s_flags = ctx->mount_flags;
+ sbi->s_mode = ctx->mode;
+ sbi->s_uid = ctx->uid;
+ sbi->s_gid = ctx->gid;
/* protect against readers */
spin_lock(&sbi->symlink_lock);
- if (prefix) {
+ if (ctx->prefix) {
kfree(sbi->s_prefix);
- sbi->s_prefix = prefix;
+ sbi->s_prefix = ctx->prefix;
+ ctx->prefix = NULL;
}
- memcpy(sbi->s_volume, volume, 32);
+ memcpy(sbi->s_volume, ctx->volume, 32);
spin_unlock(&sbi->symlink_lock);
- if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
+ if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (*flags & SB_RDONLY)
+ if (fc->sb_flags & SB_RDONLY)
affs_free_bitmap(sb);
else
- res = affs_init_bitmap(sb, flags);
+ res = affs_init_bitmap(sb, &fc->sb_flags);
return res;
}
@@ -625,10 +578,9 @@ affs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-static struct dentry *affs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int affs_get_tree(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, affs_fill_super);
+ return get_tree_bdev(fc, affs_fill_super);
}
static void affs_kill_sb(struct super_block *sb)
@@ -640,16 +592,65 @@ static void affs_kill_sb(struct super_block *sb)
affs_brelse(sbi->s_root_bh);
kfree(sbi->s_prefix);
mutex_destroy(&sbi->s_bmlock);
- kfree(sbi);
+ kfree_rcu(sbi, rcu);
}
}
+static void affs_free_fc(struct fs_context *fc)
+{
+ struct affs_context *ctx = fc->fs_private;
+
+ kfree(ctx->prefix);
+ kfree(ctx);
+}
+
+static const struct fs_context_operations affs_context_ops = {
+ .parse_param = affs_parse_param,
+ .get_tree = affs_get_tree,
+ .reconfigure = affs_reconfigure,
+ .free = affs_free_fc,
+};
+
+static int affs_init_fs_context(struct fs_context *fc)
+{
+ struct affs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct affs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ struct super_block *sb = fc->root->d_sb;
+ struct affs_sb_info *sbi = AFFS_SB(sb);
+
+ /*
+ * NB: historically, no options other than volume were
+ * preserved across a remount unless they were explicitly
+ * passed in.
+ */
+ memcpy(ctx->volume, sbi->s_volume, 32);
+ } else {
+ ctx->uid = current_uid();
+ ctx->gid = current_gid();
+ ctx->reserved = 2;
+ ctx->root_block = -1;
+ ctx->blocksize = -1;
+ ctx->volume[0] = ':';
+ }
+
+ fc->ops = &affs_context_ops;
+ fc->fs_private = ctx;
+
+ return 0;
+}
+
static struct file_system_type affs_fs_type = {
.owner = THIS_MODULE,
.name = "affs",
- .mount = affs_mount,
.kill_sb = affs_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = affs_init_fs_context,
+ .parameters = affs_param_spec,
};
MODULE_ALIAS_FS("affs");
diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c
index a7531b26e8f0..094aec8d17b8 100644
--- a/fs/affs/symlink.c
+++ b/fs/affs/symlink.c
@@ -11,11 +11,11 @@
#include "affs.h"
-static int affs_symlink_readpage(struct file *file, struct page *page)
+static int affs_symlink_read_folio(struct file *file, struct folio *folio)
{
struct buffer_head *bh;
- struct inode *inode = page->mapping->host;
- char *link = page_address(page);
+ struct inode *inode = folio->mapping->host;
+ char *link = folio_address(folio);
struct slink_front *lf;
int i, j;
char c;
@@ -57,17 +57,16 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
}
link[i] = '\0';
affs_brelse(bh);
- SetPageUptodate(page);
- unlock_page(page);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
return 0;
fail:
- SetPageError(page);
- unlock_page(page);
+ folio_unlock(folio);
return -EIO;
}
const struct address_space_operations affs_symlink_aops = {
- .readpage = affs_symlink_readpage,
+ .read_folio = affs_symlink_read_folio,
};
const struct inode_operations affs_symlink_inode_operations = {
diff --git a/fs/afs/Kconfig b/fs/afs/Kconfig
index fc8ba9142f2f..682bd8ec2c10 100644
--- a/fs/afs/Kconfig
+++ b/fs/afs/Kconfig
@@ -5,6 +5,7 @@ config AFS_FS
select AF_RXRPC
select DNS_RESOLVER
select NETFS_SUPPORT
+ select CRYPTO_KRB5
help
If you say Y here, you will get an experimental Andrew File System
driver. It currently only supports unsecured read-only AFS access.
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
index 75c4e4043d1d..b49b8fe682f3 100644
--- a/fs/afs/Makefile
+++ b/fs/afs/Makefile
@@ -3,16 +3,16 @@
# Makefile for Red Hat Linux AFS client.
#
-afs-cache-$(CONFIG_AFS_FSCACHE) := cache.o
-
kafs-y := \
- $(afs-cache-y) \
addr_list.o \
+ addr_prefs.o \
callback.o \
cell.o \
+ cm_security.o \
cmservice.o \
dir.o \
dir_edit.o \
+ dir_search.o \
dir_silly.o \
dynroot.o \
file.o \
@@ -30,6 +30,7 @@ kafs-y := \
server.o \
server_list.o \
super.o \
+ validation.o \
vlclient.o \
vl_alias.o \
vl_list.o \
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
index de1ae0bead3b..e941da5b6dd9 100644
--- a/fs/afs/addr_list.c
+++ b/fs/afs/addr_list.c
@@ -13,26 +13,55 @@
#include "internal.h"
#include "afs_fs.h"
+static void afs_free_addrlist(struct rcu_head *rcu)
+{
+ struct afs_addr_list *alist = container_of(rcu, struct afs_addr_list, rcu);
+ unsigned int i;
+
+ for (i = 0; i < alist->nr_addrs; i++)
+ rxrpc_kernel_put_peer(alist->addrs[i].peer);
+ trace_afs_alist(alist->debug_id, refcount_read(&alist->usage), afs_alist_trace_free);
+ kfree(alist);
+}
+
/*
* Release an address list.
*/
-void afs_put_addrlist(struct afs_addr_list *alist)
+void afs_put_addrlist(struct afs_addr_list *alist, enum afs_alist_trace reason)
{
- if (alist && refcount_dec_and_test(&alist->usage))
- kfree_rcu(alist, rcu);
+ unsigned int debug_id;
+ bool dead;
+ int r;
+
+ if (!alist)
+ return;
+ debug_id = alist->debug_id;
+ dead = __refcount_dec_and_test(&alist->usage, &r);
+ trace_afs_alist(debug_id, r - 1, reason);
+ if (dead)
+ call_rcu(&alist->rcu, afs_free_addrlist);
+}
+
+struct afs_addr_list *afs_get_addrlist(struct afs_addr_list *alist, enum afs_alist_trace reason)
+{
+ int r;
+
+ if (alist) {
+ __refcount_inc(&alist->usage, &r);
+ trace_afs_alist(alist->debug_id, r + 1, reason);
+ }
+ return alist;
}
/*
* Allocate an address list.
*/
-struct afs_addr_list *afs_alloc_addrlist(unsigned int nr,
- unsigned short service,
- unsigned short port)
+struct afs_addr_list *afs_alloc_addrlist(unsigned int nr)
{
struct afs_addr_list *alist;
- unsigned int i;
+ static atomic_t debug_id;
- _enter("%u,%u,%u", nr, service, port);
+ _enter("%u", nr);
if (nr > AFS_MAX_ADDRESSES)
nr = AFS_MAX_ADDRESSES;
@@ -43,17 +72,8 @@ struct afs_addr_list *afs_alloc_addrlist(unsigned int nr,
refcount_set(&alist->usage, 1);
alist->max_addrs = nr;
-
- for (i = 0; i < nr; i++) {
- struct sockaddr_rxrpc *srx = &alist->addrs[i];
- srx->srx_family = AF_RXRPC;
- srx->srx_service = service;
- srx->transport_type = SOCK_DGRAM;
- srx->transport_len = sizeof(srx->transport.sin6);
- srx->transport.sin6.sin6_family = AF_INET6;
- srx->transport.sin6.sin6_port = htons(port);
- }
-
+ alist->debug_id = atomic_inc_return(&debug_id);
+ trace_afs_alist(alist->debug_id, 1, afs_alist_trace_alloc);
return alist;
}
@@ -126,7 +146,7 @@ struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *net,
if (!vllist->servers[0].server)
goto error_vl;
- alist = afs_alloc_addrlist(nr, service, AFS_VL_PORT);
+ alist = afs_alloc_addrlist(nr);
if (!alist)
goto error;
@@ -197,9 +217,11 @@ struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *net,
}
if (family == AF_INET)
- afs_merge_fs_addr4(alist, x[0], xport);
+ ret = afs_merge_fs_addr4(net, alist, x[0], xport);
else
- afs_merge_fs_addr6(alist, x, xport);
+ ret = afs_merge_fs_addr6(net, alist, x, xport);
+ if (ret < 0)
+ goto error;
} while (p < end);
@@ -216,26 +238,13 @@ bad_address:
problem, p - text, (int)len, (int)len, text);
ret = -EINVAL;
error:
- afs_put_addrlist(alist);
+ afs_put_addrlist(alist, afs_alist_trace_put_parse_error);
error_vl:
afs_put_vlserverlist(net, vllist);
return ERR_PTR(ret);
}
/*
- * Compare old and new address lists to see if there's been any change.
- * - How to do this in better than O(Nlog(N)) time?
- * - We don't really want to sort the address list, but would rather take the
- * list as we got it so as not to undo record rotation by the DNS server.
- */
-#if 0
-static int afs_cmp_addr_list(const struct afs_addr_list *a1,
- const struct afs_addr_list *a2)
-{
-}
-#endif
-
-/*
* Perform a DNS query for VL servers and build a up an address list.
*/
struct afs_vlserver_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry)
@@ -271,25 +280,33 @@ struct afs_vlserver_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry
/*
* Merge an IPv4 entry into a fileserver address list.
*/
-void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
+int afs_merge_fs_addr4(struct afs_net *net, struct afs_addr_list *alist,
+ __be32 xdr, u16 port)
{
- struct sockaddr_rxrpc *srx;
- u32 addr = ntohl(xdr);
+ struct sockaddr_rxrpc srx;
+ struct rxrpc_peer *peer;
int i;
if (alist->nr_addrs >= alist->max_addrs)
- return;
+ return 0;
- for (i = 0; i < alist->nr_ipv4; i++) {
- struct sockaddr_in *a = &alist->addrs[i].transport.sin;
- u32 a_addr = ntohl(a->sin_addr.s_addr);
- u16 a_port = ntohs(a->sin_port);
+ srx.srx_family = AF_RXRPC;
+ srx.transport_type = SOCK_DGRAM;
+ srx.transport_len = sizeof(srx.transport.sin);
+ srx.transport.sin.sin_family = AF_INET;
+ srx.transport.sin.sin_port = htons(port);
+ srx.transport.sin.sin_addr.s_addr = xdr;
- if (addr == a_addr && port == a_port)
- return;
- if (addr == a_addr && port < a_port)
- break;
- if (addr < a_addr)
+ peer = rxrpc_kernel_lookup_peer(net->socket, &srx, GFP_KERNEL);
+ if (!peer)
+ return -ENOMEM;
+
+ for (i = 0; i < alist->nr_ipv4; i++) {
+ if (peer == alist->addrs[i].peer) {
+ rxrpc_kernel_put_peer(peer);
+ return 0;
+ }
+ if (peer <= alist->addrs[i].peer)
break;
}
@@ -298,38 +315,42 @@ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
alist->addrs + i,
sizeof(alist->addrs[0]) * (alist->nr_addrs - i));
- srx = &alist->addrs[i];
- srx->srx_family = AF_RXRPC;
- srx->transport_type = SOCK_DGRAM;
- srx->transport_len = sizeof(srx->transport.sin);
- srx->transport.sin.sin_family = AF_INET;
- srx->transport.sin.sin_port = htons(port);
- srx->transport.sin.sin_addr.s_addr = xdr;
+ alist->addrs[i].peer = peer;
alist->nr_ipv4++;
alist->nr_addrs++;
+ return 0;
}
/*
* Merge an IPv6 entry into a fileserver address list.
*/
-void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
+int afs_merge_fs_addr6(struct afs_net *net, struct afs_addr_list *alist,
+ __be32 *xdr, u16 port)
{
- struct sockaddr_rxrpc *srx;
- int i, diff;
+ struct sockaddr_rxrpc srx;
+ struct rxrpc_peer *peer;
+ int i;
if (alist->nr_addrs >= alist->max_addrs)
- return;
+ return 0;
- for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
- struct sockaddr_in6 *a = &alist->addrs[i].transport.sin6;
- u16 a_port = ntohs(a->sin6_port);
+ srx.srx_family = AF_RXRPC;
+ srx.transport_type = SOCK_DGRAM;
+ srx.transport_len = sizeof(srx.transport.sin6);
+ srx.transport.sin6.sin6_family = AF_INET6;
+ srx.transport.sin6.sin6_port = htons(port);
+ memcpy(&srx.transport.sin6.sin6_addr, xdr, 16);
- diff = memcmp(xdr, &a->sin6_addr, 16);
- if (diff == 0 && port == a_port)
- return;
- if (diff == 0 && port < a_port)
- break;
- if (diff < 0)
+ peer = rxrpc_kernel_lookup_peer(net->socket, &srx, GFP_KERNEL);
+ if (!peer)
+ return -ENOMEM;
+
+ for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
+ if (peer == alist->addrs[i].peer) {
+ rxrpc_kernel_put_peer(peer);
+ return 0;
+ }
+ if (peer <= alist->addrs[i].peer)
break;
}
@@ -337,68 +358,57 @@ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
memmove(alist->addrs + i + 1,
alist->addrs + i,
sizeof(alist->addrs[0]) * (alist->nr_addrs - i));
-
- srx = &alist->addrs[i];
- srx->srx_family = AF_RXRPC;
- srx->transport_type = SOCK_DGRAM;
- srx->transport_len = sizeof(srx->transport.sin6);
- srx->transport.sin6.sin6_family = AF_INET6;
- srx->transport.sin6.sin6_port = htons(port);
- memcpy(&srx->transport.sin6.sin6_addr, xdr, 16);
+ alist->addrs[i].peer = peer;
alist->nr_addrs++;
+ return 0;
}
/*
- * Get an address to try.
+ * Set the app data on the rxrpc peers an address list points to
*/
-bool afs_iterate_addresses(struct afs_addr_cursor *ac)
+void afs_set_peer_appdata(struct afs_server *server,
+ struct afs_addr_list *old_alist,
+ struct afs_addr_list *new_alist)
{
- unsigned long set, failed;
- int index;
-
- if (!ac->alist)
- return false;
-
- set = ac->alist->responded;
- failed = ac->alist->failed;
- _enter("%lx-%lx-%lx,%d", set, failed, ac->tried, ac->index);
-
- ac->nr_iterations++;
-
- set &= ~(failed | ac->tried);
-
- if (!set)
- return false;
-
- index = READ_ONCE(ac->alist->preferred);
- if (test_bit(index, &set))
- goto selected;
+ unsigned long data = (unsigned long)server;
+ int n = 0, o = 0;
- index = __ffs(set);
-
-selected:
- ac->index = index;
- set_bit(index, &ac->tried);
- ac->responded = false;
- return true;
-}
+ if (!old_alist) {
+ /* New server. Just set all. */
+ for (; n < new_alist->nr_addrs; n++)
+ rxrpc_kernel_set_peer_data(new_alist->addrs[n].peer, data);
+ return;
+ }
+ if (!new_alist) {
+ /* Dead server. Just remove all. */
+ for (; o < old_alist->nr_addrs; o++)
+ rxrpc_kernel_set_peer_data(old_alist->addrs[o].peer, 0);
+ return;
+ }
-/*
- * Release an address list cursor.
- */
-int afs_end_cursor(struct afs_addr_cursor *ac)
-{
- struct afs_addr_list *alist;
+ /* Walk through the two lists simultaneously, setting new peers and
+ * clearing old ones. The two lists are ordered by pointer to peer
+ * record.
+ */
+ while (n < new_alist->nr_addrs && o < old_alist->nr_addrs) {
+ struct rxrpc_peer *pn = new_alist->addrs[n].peer;
+ struct rxrpc_peer *po = old_alist->addrs[o].peer;
- alist = ac->alist;
- if (alist) {
- if (ac->responded &&
- ac->index != alist->preferred &&
- test_bit(ac->alist->preferred, &ac->tried))
- WRITE_ONCE(alist->preferred, ac->index);
- afs_put_addrlist(alist);
- ac->alist = NULL;
+ if (pn == po)
+ continue;
+ if (pn < po) {
+ rxrpc_kernel_set_peer_data(pn, data);
+ n++;
+ } else {
+ rxrpc_kernel_set_peer_data(po, 0);
+ o++;
+ }
}
- return ac->error;
+ if (n < new_alist->nr_addrs)
+ for (; n < new_alist->nr_addrs; n++)
+ rxrpc_kernel_set_peer_data(new_alist->addrs[n].peer, data);
+ if (o < old_alist->nr_addrs)
+ for (; o < old_alist->nr_addrs; o++)
+ rxrpc_kernel_set_peer_data(old_alist->addrs[o].peer, 0);
}
diff --git a/fs/afs/addr_prefs.c b/fs/afs/addr_prefs.c
new file mode 100644
index 000000000000..133736412c3d
--- /dev/null
+++ b/fs/afs/addr_prefs.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Address preferences management
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": addr_prefs: " fmt
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/inet.h>
+#include <linux/seq_file.h>
+#include <keys/rxrpc-type.h>
+#include "internal.h"
+
+static inline struct afs_net *afs_seq2net_single(struct seq_file *m)
+{
+ return afs_net(seq_file_single_net(m));
+}
+
+/*
+ * Split a NUL-terminated string up to the first newline around spaces. The
+ * source string will be modified to have NUL-terminations inserted.
+ */
+static int afs_split_string(char **pbuf, char *strv[], unsigned int maxstrv)
+{
+ unsigned int count = 0;
+ char *p = *pbuf;
+
+ maxstrv--; /* Allow for terminal NULL */
+ for (;;) {
+ /* Skip over spaces */
+ while (isspace(*p)) {
+ if (*p == '\n') {
+ p++;
+ break;
+ }
+ p++;
+ }
+ if (!*p)
+ break;
+
+ /* Mark start of word */
+ if (count >= maxstrv) {
+ pr_warn("Too many elements in string\n");
+ return -EINVAL;
+ }
+ strv[count++] = p;
+
+ /* Skip over word */
+ while (!isspace(*p) && *p)
+ p++;
+ if (!*p)
+ break;
+
+ /* Mark end of word */
+ if (*p == '\n') {
+ *p++ = 0;
+ break;
+ }
+ *p++ = 0;
+ }
+
+ *pbuf = p;
+ strv[count] = NULL;
+ return count;
+}
+
+/*
+ * Parse an address with an optional subnet mask.
+ */
+static int afs_parse_address(char *p, struct afs_addr_preference *pref)
+{
+ const char *stop;
+ unsigned long mask, tmp;
+ char *end = p + strlen(p);
+ bool bracket = false;
+
+ if (*p == '[') {
+ p++;
+ bracket = true;
+ }
+
+#if 0
+ if (*p == '[') {
+ p++;
+ q = memchr(p, ']', end - p);
+ if (!q) {
+ pr_warn("Can't find closing ']'\n");
+ return -EINVAL;
+ }
+ } else {
+ for (q = p; q < end; q++)
+ if (*q == '/')
+ break;
+ }
+#endif
+
+ if (in4_pton(p, end - p, (u8 *)&pref->ipv4_addr, -1, &stop)) {
+ pref->family = AF_INET;
+ mask = 32;
+ } else if (in6_pton(p, end - p, (u8 *)&pref->ipv6_addr, -1, &stop)) {
+ pref->family = AF_INET6;
+ mask = 128;
+ } else {
+ pr_warn("Can't determine address family\n");
+ return -EINVAL;
+ }
+
+ p = (char *)stop;
+ if (bracket) {
+ if (*p != ']') {
+ pr_warn("Can't find closing ']'\n");
+ return -EINVAL;
+ }
+ p++;
+ }
+
+ if (*p == '/') {
+ p++;
+ tmp = simple_strtoul(p, &p, 10);
+ if (tmp > mask) {
+ pr_warn("Subnet mask too large\n");
+ return -EINVAL;
+ }
+ if (tmp == 0) {
+ pr_warn("Subnet mask too small\n");
+ return -EINVAL;
+ }
+ mask = tmp;
+ }
+
+ if (*p) {
+ pr_warn("Invalid address\n");
+ return -EINVAL;
+ }
+
+ pref->subnet_mask = mask;
+ return 0;
+}
+
+enum cmp_ret {
+ CONTINUE_SEARCH,
+ INSERT_HERE,
+ EXACT_MATCH,
+ SUBNET_MATCH,
+};
+
+/*
+ * See if a candidate address matches a listed address.
+ */
+static enum cmp_ret afs_cmp_address_pref(const struct afs_addr_preference *a,
+ const struct afs_addr_preference *b)
+{
+ int subnet = min(a->subnet_mask, b->subnet_mask);
+ const __be32 *pa, *pb;
+ u32 mask, na, nb;
+ int diff;
+
+ if (a->family != b->family)
+ return INSERT_HERE;
+
+ switch (a->family) {
+ case AF_INET6:
+ pa = a->ipv6_addr.s6_addr32;
+ pb = b->ipv6_addr.s6_addr32;
+ break;
+ case AF_INET:
+ pa = &a->ipv4_addr.s_addr;
+ pb = &b->ipv4_addr.s_addr;
+ break;
+ }
+
+ while (subnet > 32) {
+ diff = ntohl(*pa++) - ntohl(*pb++);
+ if (diff < 0)
+ return INSERT_HERE; /* a<b */
+ if (diff > 0)
+ return CONTINUE_SEARCH; /* a>b */
+ subnet -= 32;
+ }
+
+ if (subnet == 0)
+ return EXACT_MATCH;
+
+ mask = 0xffffffffU << (32 - subnet);
+ na = ntohl(*pa);
+ nb = ntohl(*pb);
+ diff = (na & mask) - (nb & mask);
+ //kdebug("diff %08x %08x %08x %d", na, nb, mask, diff);
+ if (diff < 0)
+ return INSERT_HERE; /* a<b */
+ if (diff > 0)
+ return CONTINUE_SEARCH; /* a>b */
+ if (a->subnet_mask == b->subnet_mask)
+ return EXACT_MATCH;
+ if (a->subnet_mask > b->subnet_mask)
+ return SUBNET_MATCH; /* a binds tighter than b */
+ return CONTINUE_SEARCH; /* b binds tighter than a */
+}
+
+/*
+ * Insert an address preference.
+ */
+static int afs_insert_address_pref(struct afs_addr_preference_list **_preflist,
+ struct afs_addr_preference *pref,
+ int index)
+{
+ struct afs_addr_preference_list *preflist = *_preflist, *old = preflist;
+ size_t size, max_prefs;
+
+ _enter("{%u/%u/%u},%u", preflist->ipv6_off, preflist->nr, preflist->max_prefs, index);
+
+ if (preflist->nr == 255)
+ return -ENOSPC;
+ if (preflist->nr >= preflist->max_prefs) {
+ max_prefs = preflist->max_prefs + 1;
+ size = struct_size(preflist, prefs, max_prefs);
+ size = roundup_pow_of_two(size);
+ max_prefs = min_t(size_t, (size - sizeof(*preflist)) / sizeof(*pref), 255);
+ preflist = kmalloc(size, GFP_KERNEL);
+ if (!preflist)
+ return -ENOMEM;
+ *preflist = **_preflist;
+ preflist->max_prefs = max_prefs;
+ *_preflist = preflist;
+
+ if (index < preflist->nr)
+ memcpy(preflist->prefs + index + 1, old->prefs + index,
+ sizeof(*pref) * (preflist->nr - index));
+ if (index > 0)
+ memcpy(preflist->prefs, old->prefs, sizeof(*pref) * index);
+ } else {
+ if (index < preflist->nr)
+ memmove(preflist->prefs + index + 1, preflist->prefs + index,
+ sizeof(*pref) * (preflist->nr - index));
+ }
+
+ preflist->prefs[index] = *pref;
+ preflist->nr++;
+ if (pref->family == AF_INET)
+ preflist->ipv6_off++;
+ return 0;
+}
+
+/*
+ * Add an address preference.
+ * echo "add <proto> <IP>[/<mask>] <prior>" >/proc/fs/afs/addr_prefs
+ */
+static int afs_add_address_pref(struct afs_net *net, struct afs_addr_preference_list **_preflist,
+ int argc, char **argv)
+{
+ struct afs_addr_preference_list *preflist = *_preflist;
+ struct afs_addr_preference pref;
+ enum cmp_ret cmp;
+ int ret, i, stop;
+
+ if (argc != 3) {
+ pr_warn("Wrong number of params\n");
+ return -EINVAL;
+ }
+
+ if (strcmp(argv[0], "udp") != 0) {
+ pr_warn("Unsupported protocol\n");
+ return -EINVAL;
+ }
+
+ ret = afs_parse_address(argv[1], &pref);
+ if (ret < 0)
+ return ret;
+
+ ret = kstrtou16(argv[2], 10, &pref.prio);
+ if (ret < 0) {
+ pr_warn("Invalid priority\n");
+ return ret;
+ }
+
+ if (pref.family == AF_INET) {
+ i = 0;
+ stop = preflist->ipv6_off;
+ } else {
+ i = preflist->ipv6_off;
+ stop = preflist->nr;
+ }
+
+ for (; i < stop; i++) {
+ cmp = afs_cmp_address_pref(&pref, &preflist->prefs[i]);
+ switch (cmp) {
+ case CONTINUE_SEARCH:
+ continue;
+ case INSERT_HERE:
+ case SUBNET_MATCH:
+ return afs_insert_address_pref(_preflist, &pref, i);
+ case EXACT_MATCH:
+ preflist->prefs[i].prio = pref.prio;
+ return 0;
+ }
+ }
+
+ return afs_insert_address_pref(_preflist, &pref, i);
+}
+
+/*
+ * Delete an address preference.
+ */
+static int afs_delete_address_pref(struct afs_addr_preference_list **_preflist,
+ int index)
+{
+ struct afs_addr_preference_list *preflist = *_preflist;
+
+ _enter("{%u/%u/%u},%u", preflist->ipv6_off, preflist->nr, preflist->max_prefs, index);
+
+ if (preflist->nr == 0)
+ return -ENOENT;
+
+ if (index < preflist->nr - 1)
+ memmove(preflist->prefs + index, preflist->prefs + index + 1,
+ sizeof(preflist->prefs[0]) * (preflist->nr - index - 1));
+
+ if (index < preflist->ipv6_off)
+ preflist->ipv6_off--;
+ preflist->nr--;
+ return 0;
+}
+
+/*
+ * Delete an address preference.
+ * echo "del <proto> <IP>[/<mask>]" >/proc/fs/afs/addr_prefs
+ */
+static int afs_del_address_pref(struct afs_net *net, struct afs_addr_preference_list **_preflist,
+ int argc, char **argv)
+{
+ struct afs_addr_preference_list *preflist = *_preflist;
+ struct afs_addr_preference pref;
+ enum cmp_ret cmp;
+ int ret, i, stop;
+
+ if (argc != 2) {
+ pr_warn("Wrong number of params\n");
+ return -EINVAL;
+ }
+
+ if (strcmp(argv[0], "udp") != 0) {
+ pr_warn("Unsupported protocol\n");
+ return -EINVAL;
+ }
+
+ ret = afs_parse_address(argv[1], &pref);
+ if (ret < 0)
+ return ret;
+
+ if (pref.family == AF_INET) {
+ i = 0;
+ stop = preflist->ipv6_off;
+ } else {
+ i = preflist->ipv6_off;
+ stop = preflist->nr;
+ }
+
+ for (; i < stop; i++) {
+ cmp = afs_cmp_address_pref(&pref, &preflist->prefs[i]);
+ switch (cmp) {
+ case CONTINUE_SEARCH:
+ continue;
+ case INSERT_HERE:
+ case SUBNET_MATCH:
+ return 0;
+ case EXACT_MATCH:
+ return afs_delete_address_pref(_preflist, i);
+ }
+ }
+
+ return -ENOANO;
+}
+
+/*
+ * Handle writes to /proc/fs/afs/addr_prefs
+ */
+int afs_proc_addr_prefs_write(struct file *file, char *buf, size_t size)
+{
+ struct afs_addr_preference_list *preflist, *old;
+ struct seq_file *m = file->private_data;
+ struct afs_net *net = afs_seq2net_single(m);
+ size_t psize;
+ char *argv[5];
+ int ret, argc, max_prefs;
+
+ inode_lock(file_inode(file));
+
+ /* Allocate a candidate new list and initialise it from the old. */
+ old = rcu_dereference_protected(net->address_prefs,
+ lockdep_is_held(&file_inode(file)->i_rwsem));
+
+ if (old)
+ max_prefs = old->nr + 1;
+ else
+ max_prefs = 1;
+
+ psize = struct_size(old, prefs, max_prefs);
+ psize = roundup_pow_of_two(psize);
+ max_prefs = min_t(size_t, (psize - sizeof(*old)) / sizeof(old->prefs[0]), 255);
+
+ ret = -ENOMEM;
+ preflist = kmalloc(struct_size(preflist, prefs, max_prefs), GFP_KERNEL);
+ if (!preflist)
+ goto done;
+
+ if (old)
+ memcpy(preflist, old, struct_size(preflist, prefs, old->nr));
+ else
+ memset(preflist, 0, sizeof(*preflist));
+ preflist->max_prefs = max_prefs;
+
+ do {
+ argc = afs_split_string(&buf, argv, ARRAY_SIZE(argv));
+ if (argc < 0) {
+ ret = argc;
+ goto done;
+ }
+ if (argc < 2)
+ goto inval;
+
+ if (strcmp(argv[0], "add") == 0)
+ ret = afs_add_address_pref(net, &preflist, argc - 1, argv + 1);
+ else if (strcmp(argv[0], "del") == 0)
+ ret = afs_del_address_pref(net, &preflist, argc - 1, argv + 1);
+ else
+ goto inval;
+ if (ret < 0)
+ goto done;
+ } while (*buf);
+
+ preflist->version++;
+ rcu_assign_pointer(net->address_prefs, preflist);
+ /* Store prefs before version */
+ smp_store_release(&net->address_pref_version, preflist->version);
+ kfree_rcu(old, rcu);
+ preflist = NULL;
+ ret = 0;
+
+done:
+ kfree(preflist);
+ inode_unlock(file_inode(file));
+ _leave(" = %d", ret);
+ return ret;
+
+inval:
+ pr_warn("Invalid Command\n");
+ ret = -EINVAL;
+ goto done;
+}
+
+/*
+ * Mark the priorities on an address list if the address preferences table has
+ * changed. The caller must hold the RCU read lock.
+ */
+void afs_get_address_preferences_rcu(struct afs_net *net, struct afs_addr_list *alist)
+{
+ const struct afs_addr_preference_list *preflist =
+ rcu_dereference(net->address_prefs);
+ const struct sockaddr_in6 *sin6;
+ const struct sockaddr_in *sin;
+ const struct sockaddr *sa;
+ struct afs_addr_preference test;
+ enum cmp_ret cmp;
+ int i, j;
+
+ if (!preflist || !preflist->nr || !alist->nr_addrs ||
+ smp_load_acquire(&alist->addr_pref_version) == preflist->version)
+ return;
+
+ test.family = AF_INET;
+ test.subnet_mask = 32;
+ test.prio = 0;
+ for (i = 0; i < alist->nr_ipv4; i++) {
+ sa = rxrpc_kernel_remote_addr(alist->addrs[i].peer);
+ sin = (const struct sockaddr_in *)sa;
+ test.ipv4_addr = sin->sin_addr;
+ for (j = 0; j < preflist->ipv6_off; j++) {
+ cmp = afs_cmp_address_pref(&test, &preflist->prefs[j]);
+ switch (cmp) {
+ case CONTINUE_SEARCH:
+ continue;
+ case INSERT_HERE:
+ break;
+ case EXACT_MATCH:
+ case SUBNET_MATCH:
+ WRITE_ONCE(alist->addrs[i].prio, preflist->prefs[j].prio);
+ break;
+ }
+ }
+ }
+
+ test.family = AF_INET6;
+ test.subnet_mask = 128;
+ test.prio = 0;
+ for (; i < alist->nr_addrs; i++) {
+ sa = rxrpc_kernel_remote_addr(alist->addrs[i].peer);
+ sin6 = (const struct sockaddr_in6 *)sa;
+ test.ipv6_addr = sin6->sin6_addr;
+ for (j = preflist->ipv6_off; j < preflist->nr; j++) {
+ cmp = afs_cmp_address_pref(&test, &preflist->prefs[j]);
+ switch (cmp) {
+ case CONTINUE_SEARCH:
+ continue;
+ case INSERT_HERE:
+ break;
+ case EXACT_MATCH:
+ case SUBNET_MATCH:
+ WRITE_ONCE(alist->addrs[i].prio, preflist->prefs[j].prio);
+ break;
+ }
+ }
+ }
+
+ smp_store_release(&alist->addr_pref_version, preflist->version);
+}
+
+/*
+ * Mark the priorities on an address list if the address preferences table has
+ * changed. Avoid taking the RCU read lock if we can.
+ */
+void afs_get_address_preferences(struct afs_net *net, struct afs_addr_list *alist)
+{
+ if (!net->address_prefs ||
+ /* Load version before prefs */
+ smp_load_acquire(&net->address_pref_version) == alist->addr_pref_version)
+ return;
+
+ rcu_read_lock();
+ afs_get_address_preferences_rcu(net, alist);
+ rcu_read_unlock();
+}
diff --git a/fs/afs/afs.h b/fs/afs/afs.h
index 432cb4b23961..ec3db00bd081 100644
--- a/fs/afs/afs.h
+++ b/fs/afs/afs.h
@@ -10,7 +10,7 @@
#include <linux/in.h>
-#define AFS_MAXCELLNAME 256 /* Maximum length of a cell name */
+#define AFS_MAXCELLNAME 253 /* Maximum length of a cell name (DNS limited) */
#define AFS_MAXVOLNAME 64 /* Maximum length of a volume name */
#define AFS_MAXNSERVERS 8 /* Maximum servers in a basic volume record */
#define AFS_NMAXNSERVERS 13 /* Maximum servers in a N/U-class volume record */
@@ -19,8 +19,8 @@
#define AFSPATHMAX 1024 /* Maximum length of a pathname plus NUL */
#define AFSOPAQUEMAX 1024 /* Maximum length of an opaque field */
-#define AFS_VL_MAX_LIFESPAN (120 * HZ)
-#define AFS_PROBE_MAX_LIFESPAN (30 * HZ)
+#define AFS_VL_MAX_LIFESPAN 120
+#define AFS_PROBE_MAX_LIFESPAN 30
typedef u64 afs_volid_t;
typedef u64 afs_vnodeid_t;
@@ -165,7 +165,8 @@ struct afs_status_cb {
* AFS volume synchronisation information
*/
struct afs_volsync {
- time64_t creation; /* volume creation time */
+ time64_t creation; /* Volume creation time (or TIME64_MIN) */
+ time64_t update; /* Volume update time (or TIME64_MIN) */
};
/*
diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h
index 9c65ffb8a523..b835e25a2c02 100644
--- a/fs/afs/afs_vl.h
+++ b/fs/afs/afs_vl.h
@@ -13,6 +13,7 @@
#define AFS_VL_PORT 7003 /* volume location service port */
#define VL_SERVICE 52 /* RxRPC service ID for the Volume Location service */
#define YFS_VL_SERVICE 2503 /* Service ID for AuriStor upgraded VL service */
+#define YFS_VL_MAXCELLNAME 256 /* Maximum length of a cell name in YFS protocol */
enum AFSVL_Operations {
VLGETENTRYBYID = 503, /* AFS Get VLDB entry by ID */
@@ -134,13 +135,4 @@ struct afs_uvldbentry__xdr {
__be32 spares9;
};
-struct afs_address_list {
- refcount_t usage;
- unsigned int version;
- unsigned int nr_addrs;
- struct sockaddr_rxrpc addrs[];
-};
-
-extern void afs_put_address_list(struct afs_address_list *alist);
-
#endif /* AFS_VL_H */
diff --git a/fs/afs/cache.c b/fs/afs/cache.c
deleted file mode 100644
index 037af93e3aba..000000000000
--- a/fs/afs/cache.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* AFS caching stuff
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/sched.h>
-#include "internal.h"
-
-static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
- const void *buffer,
- uint16_t buflen,
- loff_t object_size);
-
-struct fscache_netfs afs_cache_netfs = {
- .name = "afs",
- .version = 2,
-};
-
-struct fscache_cookie_def afs_cell_cache_index_def = {
- .name = "AFS.cell",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-struct fscache_cookie_def afs_volume_cache_index_def = {
- .name = "AFS.volume",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-struct fscache_cookie_def afs_vnode_cache_index_def = {
- .name = "AFS.vnode",
- .type = FSCACHE_COOKIE_TYPE_DATAFILE,
- .check_aux = afs_vnode_cache_check_aux,
-};
-
-/*
- * check that the auxiliary data indicates that the entry is still valid
- */
-static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
- const void *buffer,
- uint16_t buflen,
- loff_t object_size)
-{
- struct afs_vnode *vnode = cookie_netfs_data;
- struct afs_vnode_cache_aux aux;
-
- _enter("{%llx,%x,%llx},%p,%u",
- vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version,
- buffer, buflen);
-
- memcpy(&aux, buffer, sizeof(aux));
-
- /* check the size of the data is what we're expecting */
- if (buflen != sizeof(aux)) {
- _leave(" = OBSOLETE [len %hx != %zx]", buflen, sizeof(aux));
- return FSCACHE_CHECKAUX_OBSOLETE;
- }
-
- if (vnode->status.data_version != aux.data_version) {
- _leave(" = OBSOLETE [vers %llx != %llx]",
- aux.data_version, vnode->status.data_version);
- return FSCACHE_CHECKAUX_OBSOLETE;
- }
-
- _leave(" = SUCCESS");
- return FSCACHE_CHECKAUX_OKAY;
-}
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 1b4d5809808d..894d2bad6b6c 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -30,25 +30,23 @@ void afs_invalidate_mmap_work(struct work_struct *work)
{
struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
- unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false);
+ unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false);
}
-void afs_server_init_callback_work(struct work_struct *work)
+static void afs_volume_init_callback(struct afs_volume *volume)
{
- struct afs_server *server = container_of(work, struct afs_server, initcb_work);
struct afs_vnode *vnode;
- struct afs_cell *cell = server->cell;
- down_read(&cell->fs_open_mmaps_lock);
+ down_read(&volume->open_mmaps_lock);
- list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) {
- if (vnode->cb_server == server) {
- clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
- queue_work(system_unbound_wq, &vnode->cb_work);
+ list_for_each_entry(vnode, &volume->open_mmaps, cb_mmap_link) {
+ if (vnode->cb_v_check != atomic_read(&volume->cb_v_break)) {
+ afs_clear_cb_promise(vnode, afs_cb_promise_clear_vol_init_cb);
+ queue_work(system_dfl_wq, &vnode->cb_work);
}
}
- up_read(&cell->fs_open_mmaps_lock);
+ up_read(&volume->open_mmaps_lock);
}
/*
@@ -57,15 +55,20 @@ void afs_server_init_callback_work(struct work_struct *work)
*/
void afs_init_callback_state(struct afs_server *server)
{
- rcu_read_lock();
- do {
- server->cb_s_break++;
- atomic_inc(&server->cell->fs_s_break);
- if (!list_empty(&server->cell->fs_open_mmaps))
- queue_work(system_unbound_wq, &server->initcb_work);
+ struct afs_server_entry *se;
- } while ((server = rcu_dereference(server->uuid_next)));
- rcu_read_unlock();
+ down_read(&server->cell->vs_lock);
+
+ list_for_each_entry(se, &server->volumes, slink) {
+ se->cb_expires_at = AFS_NO_CB_PROMISE;
+ se->volume->cb_expires_at = AFS_NO_CB_PROMISE;
+ trace_afs_cb_v_break(se->volume->vid, atomic_read(&se->volume->cb_v_break),
+ afs_cb_break_for_s_reinit);
+ if (!list_empty(&se->volume->open_mmaps))
+ afs_volume_init_callback(se->volume);
+ }
+
+ up_read(&server->cell->vs_lock);
}
/*
@@ -76,9 +79,9 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
_enter("");
clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
- if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
+ if (afs_clear_cb_promise(vnode, afs_cb_promise_clear_cb_break)) {
vnode->cb_break++;
- vnode->cb_v_break = vnode->volume->cb_v_break;
+ vnode->cb_v_check = atomic_read(&vnode->volume->cb_v_break);
afs_clear_permits(vnode);
if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
@@ -87,7 +90,7 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
if (reason != afs_cb_break_for_deleted &&
vnode->status.type == AFS_FTYPE_FILE &&
atomic_read(&vnode->cb_nr_mmap))
- queue_work(system_unbound_wq, &vnode->cb_work);
+ queue_work(system_dfl_wq, &vnode->cb_work);
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
} else {
@@ -110,13 +113,14 @@ static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell,
{
struct afs_volume *volume = NULL;
struct rb_node *p;
- int seq = 0;
+ int seq = 1;
- do {
+ for (;;) {
/* Unfortunately, rbtree walking doesn't give reliable results
* under just the RCU read lock, so we have to check for
* changes.
*/
+ seq++; /* 2 on the 1st/lockless path, otherwise odd */
read_seqbegin_or_lock(&cell->volume_lock, &seq);
p = rcu_dereference_raw(cell->volumes.rb_node);
@@ -132,35 +136,63 @@ static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell,
volume = NULL;
}
- } while (need_seqretry(&cell->volume_lock, seq));
+ if (volume && afs_try_get_volume(volume, afs_volume_trace_get_callback))
+ break;
+ if (!need_seqretry(&cell->volume_lock, seq))
+ break;
+ seq |= 1; /* Want a lock next time */
+ }
done_seqretry(&cell->volume_lock, seq);
return volume;
}
/*
+ * Allow the fileserver to break callbacks at the volume-level. This is
+ * typically done when, for example, a R/W volume is snapshotted to a R/O
+ * volume (the only way to change an R/O volume). It may also, however, happen
+ * when a volserver takes control of a volume (offlining it, moving it, etc.).
+ *
+ * Every file in that volume will need to be reevaluated.
+ */
+static void afs_break_volume_callback(struct afs_server *server,
+ struct afs_volume *volume)
+ __releases(RCU)
+{
+ struct afs_server_list *slist = rcu_dereference(volume->servers);
+ unsigned int i, cb_v_break;
+
+ write_lock(&volume->cb_v_break_lock);
+
+ for (i = 0; i < slist->nr_servers; i++)
+ if (slist->servers[i].server == server)
+ slist->servers[i].cb_expires_at = AFS_NO_CB_PROMISE;
+ volume->cb_expires_at = AFS_NO_CB_PROMISE;
+
+ cb_v_break = atomic_inc_return_release(&volume->cb_v_break);
+ trace_afs_cb_v_break(volume->vid, cb_v_break, afs_cb_break_for_volume_callback);
+
+ write_unlock(&volume->cb_v_break_lock);
+ rcu_read_unlock();
+
+ if (!list_empty(&volume->open_mmaps))
+ afs_volume_init_callback(volume);
+}
+
+/*
* allow the fileserver to explicitly break one callback
* - happens when
* - the backing file is changed
* - a lock is released
*/
-static void afs_break_one_callback(struct afs_volume *volume,
+static void afs_break_one_callback(struct afs_server *server,
+ struct afs_volume *volume,
struct afs_fid *fid)
{
struct super_block *sb;
struct afs_vnode *vnode;
struct inode *inode;
- if (fid->vnode == 0 && fid->unique == 0) {
- /* The callback break applies to an entire volume. */
- write_lock(&volume->cb_v_break_lock);
- volume->cb_v_break++;
- trace_afs_cb_break(fid, volume->cb_v_break,
- afs_cb_break_for_volume_callback, false);
- write_unlock(&volume->cb_v_break_lock);
- return;
- }
-
/* See if we can find a matching inode - even an I_NEW inode needs to
* be marked as it can have its callback broken before we finish
* setting up the local inode.
@@ -187,25 +219,35 @@ static void afs_break_some_callbacks(struct afs_server *server,
afs_volid_t vid = cbb->fid.vid;
size_t i;
+ rcu_read_lock();
volume = afs_lookup_volume_rcu(server->cell, vid);
+ if (cbb->fid.vnode == 0 && cbb->fid.unique == 0) {
+ afs_break_volume_callback(server, volume);
+ *_count -= 1;
+ if (*_count)
+ memmove(cbb, cbb + 1, sizeof(*cbb) * *_count);
+ } else {
+ /* TODO: Find all matching volumes if we couldn't match the server and
+ * break them anyway.
+ */
- /* TODO: Find all matching volumes if we couldn't match the server and
- * break them anyway.
- */
-
- for (i = *_count; i > 0; cbb++, i--) {
- if (cbb->fid.vid == vid) {
- _debug("- Fid { vl=%08llx n=%llu u=%u }",
- cbb->fid.vid,
- cbb->fid.vnode,
- cbb->fid.unique);
- --*_count;
- if (volume)
- afs_break_one_callback(volume, &cbb->fid);
- } else {
- *residue++ = *cbb;
+ for (i = *_count; i > 0; cbb++, i--) {
+ if (cbb->fid.vid == vid) {
+ _debug("- Fid { vl=%08llx n=%llu u=%u }",
+ cbb->fid.vid,
+ cbb->fid.vnode,
+ cbb->fid.unique);
+ --*_count;
+ if (volume)
+ afs_break_one_callback(server, volume, &cbb->fid);
+ } else {
+ *residue++ = *cbb;
+ }
}
+ rcu_read_unlock();
}
+
+ afs_put_volume(volume, afs_volume_trace_put_callback);
}
/*
@@ -218,11 +260,6 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
ASSERT(server != NULL);
- rcu_read_lock();
-
while (count > 0)
afs_break_some_callbacks(server, callbacks, &count);
-
- rcu_read_unlock();
- return;
}
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index d88407fb9bc0..71c10a05cebe 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -20,8 +20,9 @@ static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
static atomic_t cell_debug_id;
-static void afs_queue_cell_manager(struct afs_net *);
-static void afs_manage_cell_work(struct work_struct *);
+static void afs_cell_timer(struct timer_list *timer);
+static void afs_destroy_cell_work(struct work_struct *work);
+static void afs_manage_cell_work(struct work_struct *work);
static void afs_dec_cells_outstanding(struct afs_net *net)
{
@@ -29,19 +30,11 @@ static void afs_dec_cells_outstanding(struct afs_net *net)
wake_up_var(&net->cells_outstanding);
}
-/*
- * Set the cell timer to fire after a given delay, assuming it's not already
- * set for an earlier time.
- */
-static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
+static void afs_set_cell_state(struct afs_cell *cell, enum afs_cell_state state)
{
- if (net->live) {
- atomic_inc(&net->cells_outstanding);
- if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
- afs_dec_cells_outstanding(net);
- } else {
- afs_queue_cell_manager(net);
- }
+ smp_store_release(&cell->state, state); /* Commit cell changes before state */
+ smp_wmb(); /* Set cell state before task state */
+ wake_up_var(&cell->state);
}
/*
@@ -64,7 +57,8 @@ static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
return ERR_PTR(-ENAMETOOLONG);
if (!name) {
- cell = net->ws_cell;
+ cell = rcu_dereference_protected(net->ws_cell,
+ lockdep_is_held(&net->cells_lock));
if (!cell)
return ERR_PTR(-EDESTADDRREQ);
goto found;
@@ -115,7 +109,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
const char *name, unsigned int namelen,
const char *addresses)
{
- struct afs_vlserver_list *vllist;
+ struct afs_vlserver_list *vllist = NULL;
struct afs_cell *cell;
int i, ret;
@@ -146,28 +140,37 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
return ERR_PTR(-ENOMEM);
}
- cell->name = kmalloc(namelen + 1, GFP_KERNEL);
+ /* Allocate the cell name and the key name in one go. */
+ cell->name = kmalloc(1 + namelen + 1 +
+ 4 + namelen + 1, GFP_KERNEL);
if (!cell->name) {
kfree(cell);
return ERR_PTR(-ENOMEM);
}
- cell->net = net;
+ cell->name[0] = '.';
+ cell->name++;
cell->name_len = namelen;
for (i = 0; i < namelen; i++)
cell->name[i] = tolower(name[i]);
- cell->name[i] = 0;
+ cell->name[i++] = 0;
+
+ cell->key_desc = cell->name + i;
+ memcpy(cell->key_desc, "afs@", 4);
+ memcpy(cell->key_desc + 4, cell->name, cell->name_len + 1);
- atomic_set(&cell->ref, 1);
+ cell->net = net;
+ refcount_set(&cell->ref, 1);
atomic_set(&cell->active, 0);
+ INIT_WORK(&cell->destroyer, afs_destroy_cell_work);
INIT_WORK(&cell->manager, afs_manage_cell_work);
+ timer_setup(&cell->management_timer, afs_cell_timer, 0);
+ init_rwsem(&cell->vs_lock);
cell->volumes = RB_ROOT;
INIT_HLIST_HEAD(&cell->proc_volumes);
seqlock_init(&cell->volume_lock);
cell->fs_servers = RB_ROOT;
- seqlock_init(&cell->fs_lock);
- INIT_LIST_HEAD(&cell->fs_open_mmaps);
- init_rwsem(&cell->fs_open_mmaps_lock);
+ init_rwsem(&cell->fs_lock);
rwlock_init(&cell->vl_servers_lock);
cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
@@ -180,6 +183,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
VL_SERVICE, AFS_VL_PORT);
if (IS_ERR(vllist)) {
ret = PTR_ERR(vllist);
+ vllist = NULL;
goto parse_failed;
}
@@ -202,7 +206,13 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
cell->dns_status = vllist->status;
smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
atomic_inc(&net->cells_outstanding);
+ ret = idr_alloc_cyclic(&net->cells_dyn_ino, cell,
+ 2, INT_MAX / 2, GFP_KERNEL);
+ if (ret < 0)
+ goto error;
+ cell->dynroot_ino = ret;
cell->debug_id = atomic_inc_return(&cell_debug_id);
+
trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc);
_leave(" = %p", cell);
@@ -212,7 +222,8 @@ parse_failed:
if (ret == -EINVAL)
printk(KERN_ERR "kAFS: bad VL server IP address\n");
error:
- kfree(cell->name);
+ afs_put_vlserverlist(cell->net, vllist);
+ kfree(cell->name - 1);
kfree(cell);
_leave(" = %d", ret);
return ERR_PTR(ret);
@@ -224,7 +235,8 @@ error:
* @name: The name of the cell.
* @namesz: The strlen of the cell name.
* @vllist: A colon/comma separated list of numeric IP addresses or NULL.
- * @excl: T if an error should be given if the cell name already exists.
+ * @reason: The reason we're doing the lookup
+ * @trace: The reason to be logged if the lookup is successful.
*
* Look up a cell record by name and query the DNS for VL server addresses if
* needed. Note that that actual DNS query is punted off to the manager thread
@@ -233,19 +245,27 @@ error:
*/
struct afs_cell *afs_lookup_cell(struct afs_net *net,
const char *name, unsigned int namesz,
- const char *vllist, bool excl)
+ const char *vllist,
+ enum afs_lookup_cell_for reason,
+ enum afs_cell_trace trace)
{
struct afs_cell *cell, *candidate, *cursor;
struct rb_node *parent, **pp;
enum afs_cell_state state;
int ret, n;
- _enter("%s,%s", name, vllist);
+ _enter("%s,%s,%u", name, vllist, reason);
- if (!excl) {
- cell = afs_find_cell(net, name, namesz, afs_cell_trace_use_lookup);
- if (!IS_ERR(cell))
+ if (reason != AFS_LOOKUP_CELL_PRELOAD) {
+ cell = afs_find_cell(net, name, namesz, trace);
+ if (!IS_ERR(cell)) {
+ if (reason == AFS_LOOKUP_CELL_DYNROOT)
+ goto no_wait;
+ if (cell->state == AFS_CELL_SETTING_UP ||
+ cell->state == AFS_CELL_UNLOOKED)
+ goto lookup_cell;
goto wait_for_cell;
+ }
}
/* Assume we're probably going to create a cell and preallocate and
@@ -286,29 +306,74 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
cell = candidate;
candidate = NULL;
- atomic_set(&cell->active, 2);
- trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), 2, afs_cell_trace_insert);
+ afs_use_cell(cell, trace);
rb_link_node_rcu(&cell->net_node, parent, pp);
rb_insert_color(&cell->net_node, &net->cells);
up_write(&net->cells_lock);
- afs_queue_cell(cell, afs_cell_trace_get_queue_new);
+lookup_cell:
+ if (reason != AFS_LOOKUP_CELL_PRELOAD &&
+ reason != AFS_LOOKUP_CELL_ROOTCELL) {
+ set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
+ afs_queue_cell(cell, afs_cell_trace_queue_new);
+ }
wait_for_cell:
- trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), atomic_read(&cell->active),
- afs_cell_trace_wait);
- _debug("wait_for_cell");
- wait_var_event(&cell->state,
- ({
- state = smp_load_acquire(&cell->state); /* vs error */
- state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED;
- }));
+ state = smp_load_acquire(&cell->state); /* vs error */
+ switch (state) {
+ case AFS_CELL_ACTIVE:
+ case AFS_CELL_DEAD:
+ break;
+ case AFS_CELL_UNLOOKED:
+ default:
+ if (reason == AFS_LOOKUP_CELL_PRELOAD ||
+ reason == AFS_LOOKUP_CELL_ROOTCELL)
+ break;
+ _debug("wait_for_cell");
+ afs_see_cell(cell, afs_cell_trace_wait);
+ wait_var_event(&cell->state,
+ ({
+ state = smp_load_acquire(&cell->state); /* vs error */
+ state == AFS_CELL_ACTIVE || state == AFS_CELL_DEAD;
+ }));
+ _debug("waited_for_cell %d %d", cell->state, cell->error);
+ }
+no_wait:
/* Check the state obtained from the wait check. */
- if (state == AFS_CELL_REMOVED) {
+ state = smp_load_acquire(&cell->state); /* vs error */
+ if (state == AFS_CELL_DEAD) {
ret = cell->error;
goto error;
}
+ if (state == AFS_CELL_ACTIVE) {
+ switch (cell->dns_status) {
+ case DNS_LOOKUP_NOT_DONE:
+ if (cell->dns_source == DNS_RECORD_FROM_CONFIG) {
+ ret = 0;
+ break;
+ }
+ fallthrough;
+ default:
+ ret = -EIO;
+ goto error;
+ case DNS_LOOKUP_GOOD:
+ case DNS_LOOKUP_GOOD_WITH_BAD:
+ ret = 0;
+ break;
+ case DNS_LOOKUP_GOT_NOT_FOUND:
+ ret = -ENOENT;
+ goto error;
+ case DNS_LOOKUP_BAD:
+ ret = -EREMOTEIO;
+ goto error;
+ case DNS_LOOKUP_GOT_LOCAL_FAILURE:
+ case DNS_LOOKUP_GOT_TEMP_FAILURE:
+ case DNS_LOOKUP_GOT_NS_FAILURE:
+ ret = -EDESTADDRREQ;
+ goto error;
+ }
+ }
_leave(" = %p [cell]", cell);
return cell;
@@ -316,10 +381,10 @@ wait_for_cell:
cell_already_exists:
_debug("cell exists");
cell = cursor;
- if (excl) {
+ if (reason == AFS_LOOKUP_CELL_PRELOAD) {
ret = -EEXIST;
} else {
- afs_use_cell(cursor, afs_cell_trace_use_lookup);
+ afs_use_cell(cursor, trace);
ret = 0;
}
up_write(&net->cells_lock);
@@ -329,7 +394,7 @@ cell_already_exists:
goto wait_for_cell;
goto error_noput;
error:
- afs_unuse_cell(net, cell, afs_cell_trace_unuse_lookup);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_lookup_error);
error_noput:
_leave(" = %d [error]", ret);
return ERR_PTR(ret);
@@ -366,8 +431,18 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
len = cp - rootcell;
}
- /* allocate a cell record for the root cell */
- new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
+ if (len == 0 || !rootcell[0] || rootcell[0] == '.' || rootcell[len - 1] == '.')
+ return -EINVAL;
+ if (memchr(rootcell, '/', len))
+ return -EINVAL;
+ cp = strstr(rootcell, "..");
+ if (cp && cp < rootcell + len)
+ return -EINVAL;
+
+ /* allocate a cell record for the root/workstation cell */
+ new_root = afs_lookup_cell(net, rootcell, len, vllist,
+ AFS_LOOKUP_CELL_ROOTCELL,
+ afs_cell_trace_use_lookup_ws);
if (IS_ERR(new_root)) {
_leave(" = %ld", PTR_ERR(new_root));
return PTR_ERR(new_root);
@@ -378,12 +453,11 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
/* install the new cell */
down_write(&net->cells_lock);
- afs_see_cell(new_root, afs_cell_trace_see_ws);
- old_root = net->ws_cell;
- net->ws_cell = new_root;
+ old_root = rcu_replace_pointer(net->ws_cell, new_root,
+ lockdep_is_held(&net->cells_lock));
up_write(&net->cells_lock);
- afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws);
+ afs_unuse_cell(old_root, afs_cell_trace_unuse_ws);
_leave(" = 0");
return 0;
}
@@ -409,10 +483,12 @@ static int afs_update_cell(struct afs_cell *cell)
if (ret == -ENOMEM)
goto out_wake;
- ret = -ENOMEM;
vllist = afs_alloc_vlserver_list(0);
- if (!vllist)
+ if (!vllist) {
+ if (ret >= 0)
+ ret = -ENOMEM;
goto out_wake;
+ }
switch (ret) {
case -ENODATA:
@@ -490,48 +566,33 @@ static void afs_cell_destroy(struct rcu_head *rcu)
{
struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
struct afs_net *net = cell->net;
- int u;
+ int r;
_enter("%p{%s}", cell, cell->name);
- u = atomic_read(&cell->ref);
- ASSERTCMP(u, ==, 0);
- trace_afs_cell(cell->debug_id, u, atomic_read(&cell->active), afs_cell_trace_free);
+ r = refcount_read(&cell->ref);
+ ASSERTCMP(r, ==, 0);
+ trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free);
afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
- afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias);
+ afs_unuse_cell(cell->alias_of, afs_cell_trace_unuse_alias);
key_put(cell->anonymous_key);
- kfree(cell->name);
+ idr_remove(&net->cells_dyn_ino, cell->dynroot_ino);
+ kfree(cell->name - 1);
kfree(cell);
afs_dec_cells_outstanding(net);
_leave(" [destroyed]");
}
-/*
- * Queue the cell manager.
- */
-static void afs_queue_cell_manager(struct afs_net *net)
-{
- int outstanding = atomic_inc_return(&net->cells_outstanding);
-
- _enter("%d", outstanding);
-
- if (!queue_work(afs_wq, &net->cells_manager))
- afs_dec_cells_outstanding(net);
-}
-
-/*
- * Cell management timer. We have an increment on cells_outstanding that we
- * need to pass along to the work item.
- */
-void afs_cells_timer(struct timer_list *timer)
+static void afs_destroy_cell_work(struct work_struct *work)
{
- struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
+ struct afs_cell *cell = container_of(work, struct afs_cell, destroyer);
- _enter("");
- if (!queue_work(afs_wq, &net->cells_manager))
- afs_dec_cells_outstanding(net);
+ afs_see_cell(cell, afs_cell_trace_destroy);
+ timer_delete_sync(&cell->management_timer);
+ cancel_work_sync(&cell->manager);
+ call_rcu(&cell->rcu, afs_cell_destroy);
}
/*
@@ -539,13 +600,10 @@ void afs_cells_timer(struct timer_list *timer)
*/
struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
- int u;
+ int r;
- if (atomic_read(&cell->ref) <= 0)
- BUG();
-
- u = atomic_inc_return(&cell->ref);
- trace_afs_cell(cell->debug_id, u, atomic_read(&cell->active), reason);
+ __refcount_inc(&cell->ref, &r);
+ trace_afs_cell(cell->debug_id, r + 1, atomic_read(&cell->active), reason);
return cell;
}
@@ -556,15 +614,17 @@ void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
if (cell) {
unsigned int debug_id = cell->debug_id;
- unsigned int u, a;
+ unsigned int a;
+ bool zero;
+ int r;
a = atomic_read(&cell->active);
- u = atomic_dec_return(&cell->ref);
- trace_afs_cell(debug_id, u, a, reason);
- if (u == 0) {
+ zero = __refcount_dec_and_test(&cell->ref, &r);
+ trace_afs_cell(debug_id, r - 1, a, reason);
+ if (zero) {
a = atomic_read(&cell->active);
WARN(a != 0, "Cell active count %u > 0\n", a);
- call_rcu(&cell->rcu, afs_cell_destroy);
+ WARN_ON(!queue_work(afs_wq, &cell->destroyer));
}
}
}
@@ -574,14 +634,11 @@ void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
*/
struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
- int u, a;
-
- if (atomic_read(&cell->ref) <= 0)
- BUG();
+ int r, a;
- u = atomic_read(&cell->ref);
+ __refcount_inc(&cell->ref, &r);
a = atomic_inc_return(&cell->active);
- trace_afs_cell(cell->debug_id, u, a, reason);
+ trace_afs_cell(cell->debug_id, r + 1, a, reason);
return cell;
}
@@ -589,11 +646,12 @@ struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
* Record a cell becoming less active. When the active counter reaches 1, it
* is scheduled for destruction, but may get reactivated.
*/
-void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_trace reason)
+void afs_unuse_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
unsigned int debug_id;
time64_t now, expire_delay;
- int u, a;
+ bool zero;
+ int r, a;
if (!cell)
return;
@@ -607,13 +665,15 @@ void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_tr
expire_delay = afs_cell_gc_delay;
debug_id = cell->debug_id;
- u = atomic_read(&cell->ref);
a = atomic_dec_return(&cell->active);
- trace_afs_cell(debug_id, u, a, reason);
- WARN_ON(a == 0);
- if (a == 1)
+ if (!a)
/* 'cell' may now be garbage collected. */
- afs_set_cell_timer(net, expire_delay);
+ afs_set_cell_timer(cell, expire_delay);
+
+ zero = __refcount_dec_and_test(&cell->ref, &r);
+ trace_afs_cell(debug_id, r - 1, a, reason);
+ if (zero)
+ WARN_ON(!queue_work(afs_wq, &cell->destroyer));
}
/*
@@ -621,11 +681,11 @@ void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_tr
*/
void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
- int u, a;
+ int r, a;
- u = atomic_read(&cell->ref);
+ r = refcount_read(&cell->ref);
a = atomic_read(&cell->active);
- trace_afs_cell(cell->debug_id, u, a, reason);
+ trace_afs_cell(cell->debug_id, r, a, reason);
}
/*
@@ -633,36 +693,27 @@ void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
*/
void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
- afs_get_cell(cell, reason);
- if (!queue_work(afs_wq, &cell->manager))
- afs_put_cell(cell, afs_cell_trace_put_queue_fail);
+ queue_work(afs_wq, &cell->manager);
}
/*
- * Allocate a key to use as a placeholder for anonymous user security.
+ * Cell-specific management timer.
*/
-static int afs_alloc_anon_key(struct afs_cell *cell)
+static void afs_cell_timer(struct timer_list *timer)
{
- struct key *key;
- char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
-
- /* Create a key to represent an anonymous user. */
- memcpy(keyname, "afs@", 4);
- dp = keyname + 4;
- cp = cell->name;
- do {
- *dp++ = tolower(*cp);
- } while (*cp++);
+ struct afs_cell *cell = container_of(timer, struct afs_cell, management_timer);
- key = rxrpc_get_null_key(keyname);
- if (IS_ERR(key))
- return PTR_ERR(key);
-
- cell->anonymous_key = key;
+ afs_see_cell(cell, afs_cell_trace_see_mgmt_timer);
+ if (refcount_read(&cell->ref) > 0 && cell->net->live)
+ queue_work(afs_wq, &cell->manager);
+}
- _debug("anon key %p{%x}",
- cell->anonymous_key, key_serial(cell->anonymous_key));
- return 0;
+/*
+ * Set/reduce the cell timer.
+ */
+void afs_set_cell_timer(struct afs_cell *cell, unsigned int delay_secs)
+{
+ timer_reduce(&cell->management_timer, jiffies + delay_secs * HZ);
}
/*
@@ -674,19 +725,6 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
struct afs_cell *pcell;
int ret;
- if (!cell->anonymous_key) {
- ret = afs_alloc_anon_key(cell);
- if (ret < 0)
- return ret;
- }
-
-#ifdef CONFIG_AFS_FSCACHE
- cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
- &afs_cell_cache_index_def,
- cell->name, strlen(cell->name),
- NULL, 0,
- cell, 0, true);
-#endif
ret = afs_proc_cell_setup(cell);
if (ret < 0)
return ret;
@@ -704,7 +742,6 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
if (cell->proc_link.next)
cell->proc_link.next->pprev = &cell->proc_link.next;
- afs_dynroot_mkdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
return 0;
}
@@ -719,247 +756,167 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
afs_proc_cell_remove(cell);
mutex_lock(&net->proc_cells_lock);
- hlist_del_rcu(&cell->proc_link);
- afs_dynroot_rmdir(net, cell);
+ if (!hlist_unhashed(&cell->proc_link))
+ hlist_del_rcu(&cell->proc_link);
mutex_unlock(&net->proc_cells_lock);
-#ifdef CONFIG_AFS_FSCACHE
- fscache_relinquish_cookie(cell->cache, NULL, false);
- cell->cache = NULL;
-#endif
-
_leave("");
}
+static bool afs_has_cell_expired(struct afs_cell *cell, time64_t *_next_manage)
+{
+ const struct afs_vlserver_list *vllist;
+ time64_t expire_at = cell->last_inactive;
+ time64_t now = ktime_get_real_seconds();
+
+ if (atomic_read(&cell->active))
+ return false;
+ if (!cell->net->live)
+ return true;
+
+ vllist = rcu_dereference_protected(cell->vl_servers, true);
+ if (vllist && vllist->nr_servers > 0)
+ expire_at += afs_cell_gc_delay;
+
+ if (expire_at <= now)
+ return true;
+ if (expire_at < *_next_manage)
+ *_next_manage = expire_at;
+ return false;
+}
+
/*
* Manage a cell record, initialising and destroying it, maintaining its DNS
* records.
*/
-static void afs_manage_cell(struct afs_cell *cell)
+static bool afs_manage_cell(struct afs_cell *cell)
{
struct afs_net *net = cell->net;
- int ret, active;
+ time64_t next_manage = TIME64_MAX;
+ int ret;
_enter("%s", cell->name);
-again:
_debug("state %u", cell->state);
switch (cell->state) {
- case AFS_CELL_INACTIVE:
- case AFS_CELL_FAILED:
- down_write(&net->cells_lock);
- active = 1;
- if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
- rb_erase(&cell->net_node, &net->cells);
- trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), 0,
- afs_cell_trace_unuse_delete);
- smp_store_release(&cell->state, AFS_CELL_REMOVED);
- }
- up_write(&net->cells_lock);
- if (cell->state == AFS_CELL_REMOVED) {
- wake_up_var(&cell->state);
- goto final_destruction;
- }
- if (cell->state == AFS_CELL_FAILED)
- goto done;
- smp_store_release(&cell->state, AFS_CELL_UNSET);
- wake_up_var(&cell->state);
- goto again;
-
- case AFS_CELL_UNSET:
- smp_store_release(&cell->state, AFS_CELL_ACTIVATING);
- wake_up_var(&cell->state);
- goto again;
-
- case AFS_CELL_ACTIVATING:
- ret = afs_activate_cell(net, cell);
- if (ret < 0)
- goto activation_failed;
+ case AFS_CELL_SETTING_UP:
+ goto set_up_cell;
+ case AFS_CELL_UNLOOKED:
+ case AFS_CELL_ACTIVE:
+ goto cell_is_active;
+ case AFS_CELL_REMOVING:
+ WARN_ON_ONCE(1);
+ return false;
+ case AFS_CELL_DEAD:
+ return false;
+ default:
+ _debug("bad state %u", cell->state);
+ WARN_ON_ONCE(1); /* Unhandled state */
+ return false;
+ }
- smp_store_release(&cell->state, AFS_CELL_ACTIVE);
- wake_up_var(&cell->state);
- goto again;
+set_up_cell:
+ ret = afs_activate_cell(net, cell);
+ if (ret < 0) {
+ cell->error = ret;
+ goto remove_cell;
+ }
- case AFS_CELL_ACTIVE:
- if (atomic_read(&cell->active) > 1) {
- if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
- ret = afs_update_cell(cell);
- if (ret < 0)
- cell->error = ret;
- }
- goto done;
- }
- smp_store_release(&cell->state, AFS_CELL_DEACTIVATING);
- wake_up_var(&cell->state);
- goto again;
+ afs_set_cell_state(cell, AFS_CELL_UNLOOKED);
- case AFS_CELL_DEACTIVATING:
- if (atomic_read(&cell->active) > 1)
- goto reverse_deactivation;
- afs_deactivate_cell(net, cell);
- smp_store_release(&cell->state, AFS_CELL_INACTIVE);
- wake_up_var(&cell->state);
- goto again;
+cell_is_active:
+ if (afs_has_cell_expired(cell, &next_manage))
+ goto remove_cell;
- case AFS_CELL_REMOVED:
- goto done;
+ if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
+ ret = afs_update_cell(cell);
+ if (ret < 0)
+ cell->error = ret;
+ if (cell->state == AFS_CELL_UNLOOKED)
+ afs_set_cell_state(cell, AFS_CELL_ACTIVE);
+ }
- default:
- break;
+ if (next_manage < TIME64_MAX && cell->net->live) {
+ time64_t now = ktime_get_real_seconds();
+
+ if (next_manage - now <= 0)
+ afs_queue_cell(cell, afs_cell_trace_queue_again);
+ else
+ afs_set_cell_timer(cell, next_manage - now);
}
- _debug("bad state %u", cell->state);
- BUG(); /* Unhandled state */
+ _leave(" [done %u]", cell->state);
+ return false;
-activation_failed:
- cell->error = ret;
- afs_deactivate_cell(net, cell);
+remove_cell:
+ down_write(&net->cells_lock);
- smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */
- wake_up_var(&cell->state);
- goto again;
+ if (atomic_read(&cell->active)) {
+ up_write(&net->cells_lock);
+ goto cell_is_active;
+ }
-reverse_deactivation:
- smp_store_release(&cell->state, AFS_CELL_ACTIVE);
- wake_up_var(&cell->state);
- _leave(" [deact->act]");
- return;
+ /* Make sure that the expiring server records are going to see the fact
+ * that the cell is caput.
+ */
+ afs_set_cell_state(cell, AFS_CELL_REMOVING);
-done:
- _leave(" [done %u]", cell->state);
- return;
+ afs_deactivate_cell(net, cell);
+ afs_purge_servers(cell);
+
+ rb_erase(&cell->net_node, &net->cells);
+ afs_see_cell(cell, afs_cell_trace_unuse_delete);
+ up_write(&net->cells_lock);
-final_destruction:
/* The root volume is pinning the cell */
- afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
+ afs_put_volume(cell->root_volume, afs_volume_trace_put_cell_root);
cell->root_volume = NULL;
- afs_put_cell(cell, afs_cell_trace_put_destroy);
+
+ afs_set_cell_state(cell, AFS_CELL_DEAD);
+ return true;
}
static void afs_manage_cell_work(struct work_struct *work)
{
struct afs_cell *cell = container_of(work, struct afs_cell, manager);
+ bool final_put;
- afs_manage_cell(cell);
- afs_put_cell(cell, afs_cell_trace_put_queue_work);
+ afs_see_cell(cell, afs_cell_trace_manage);
+ final_put = afs_manage_cell(cell);
+ afs_see_cell(cell, afs_cell_trace_managed);
+ if (final_put)
+ afs_put_cell(cell, afs_cell_trace_put_final);
}
/*
- * Manage the records of cells known to a network namespace. This includes
- * updating the DNS records and garbage collecting unused cells that were
- * automatically added.
- *
- * Note that constructed cell records may only be removed from net->cells by
- * this work item, so it is safe for this work item to stash a cursor pointing
- * into the tree and then return to caller (provided it skips cells that are
- * still under construction).
- *
- * Note also that we were given an increment on net->cells_outstanding by
- * whoever queued us that we need to deal with before returning.
+ * Purge in-memory cell database.
*/
-void afs_manage_cells(struct work_struct *work)
+void afs_cell_purge(struct afs_net *net)
{
- struct afs_net *net = container_of(work, struct afs_net, cells_manager);
+ struct afs_cell *ws;
struct rb_node *cursor;
- time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
- bool purging = !net->live;
_enter("");
- /* Trawl the cell database looking for cells that have expired from
- * lack of use and cells whose DNS results have expired and dispatch
- * their managers.
- */
- down_read(&net->cells_lock);
+ down_write(&net->cells_lock);
+ ws = rcu_replace_pointer(net->ws_cell, NULL,
+ lockdep_is_held(&net->cells_lock));
+ up_write(&net->cells_lock);
+ afs_unuse_cell(ws, afs_cell_trace_unuse_ws);
+ _debug("kick cells");
+ down_read(&net->cells_lock);
for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
- struct afs_cell *cell =
- rb_entry(cursor, struct afs_cell, net_node);
- unsigned active;
- bool sched_cell = false;
-
- active = atomic_read(&cell->active);
- trace_afs_cell(cell->debug_id, atomic_read(&cell->ref),
- active, afs_cell_trace_manage);
-
- ASSERTCMP(active, >=, 1);
-
- if (purging) {
- if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) {
- active = atomic_dec_return(&cell->active);
- trace_afs_cell(cell->debug_id, atomic_read(&cell->ref),
- active, afs_cell_trace_unuse_pin);
- }
- }
+ struct afs_cell *cell = rb_entry(cursor, struct afs_cell, net_node);
- if (active == 1) {
- struct afs_vlserver_list *vllist;
- time64_t expire_at = cell->last_inactive;
-
- read_lock(&cell->vl_servers_lock);
- vllist = rcu_dereference_protected(
- cell->vl_servers,
- lockdep_is_held(&cell->vl_servers_lock));
- if (vllist->nr_servers > 0)
- expire_at += afs_cell_gc_delay;
- read_unlock(&cell->vl_servers_lock);
- if (purging || expire_at <= now)
- sched_cell = true;
- else if (expire_at < next_manage)
- next_manage = expire_at;
- }
+ afs_see_cell(cell, afs_cell_trace_purge);
- if (!purging) {
- if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags))
- sched_cell = true;
- }
+ if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
+ afs_unuse_cell(cell, afs_cell_trace_unuse_pin);
- if (sched_cell)
- afs_queue_cell(cell, afs_cell_trace_get_queue_manage);
+ afs_queue_cell(cell, afs_cell_trace_queue_purge);
}
-
up_read(&net->cells_lock);
- /* Update the timer on the way out. We have to pass an increment on
- * cells_outstanding in the namespace that we are in to the timer or
- * the work scheduler.
- */
- if (!purging && next_manage < TIME64_MAX) {
- now = ktime_get_real_seconds();
-
- if (next_manage - now <= 0) {
- if (queue_work(afs_wq, &net->cells_manager))
- atomic_inc(&net->cells_outstanding);
- } else {
- afs_set_cell_timer(net, next_manage - now);
- }
- }
-
- afs_dec_cells_outstanding(net);
- _leave(" [%d]", atomic_read(&net->cells_outstanding));
-}
-
-/*
- * Purge in-memory cell database.
- */
-void afs_cell_purge(struct afs_net *net)
-{
- struct afs_cell *ws;
-
- _enter("");
-
- down_write(&net->cells_lock);
- ws = net->ws_cell;
- net->ws_cell = NULL;
- up_write(&net->cells_lock);
- afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws);
-
- _debug("del timer");
- if (del_timer_sync(&net->cells_timer))
- atomic_dec(&net->cells_outstanding);
-
- _debug("kick mgr");
- afs_queue_cell_manager(net);
-
_debug("wait");
wait_var_event(&net->cells_outstanding,
!atomic_read(&net->cells_outstanding));
diff --git a/fs/afs/cm_security.c b/fs/afs/cm_security.c
new file mode 100644
index 000000000000..edcbd249d202
--- /dev/null
+++ b/fs/afs/cm_security.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Cache manager security.
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include <crypto/krb5.h>
+#include "internal.h"
+#include "afs_cm.h"
+#include "afs_fs.h"
+#include "protocol_yfs.h"
+#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
+#include <trace/events/rxrpc.h>
+
+#define RXGK_SERVER_ENC_TOKEN 1036U // 0x40c
+#define xdr_round_up(x) (round_up((x), sizeof(__be32)))
+#define xdr_len_object(x) (4 + round_up((x), sizeof(__be32)))
+
+#ifdef CONFIG_RXGK
+static int afs_create_yfs_cm_token(struct sk_buff *challenge,
+ struct afs_server *server);
+#endif
+
+/*
+ * Respond to an RxGK challenge, adding appdata.
+ */
+static int afs_respond_to_challenge(struct sk_buff *challenge)
+{
+#ifdef CONFIG_RXGK
+ struct krb5_buffer appdata = {};
+ struct afs_server *server;
+#endif
+ struct rxrpc_peer *peer;
+ unsigned long peer_data;
+ u16 service_id;
+ u8 security_index;
+
+ rxrpc_kernel_query_challenge(challenge, &peer, &peer_data,
+ &service_id, &security_index);
+
+ _enter("%u,%u", service_id, security_index);
+
+ switch (service_id) {
+ /* We don't send CM_SERVICE RPCs, so don't expect a challenge
+ * therefrom.
+ */
+ case FS_SERVICE:
+ case VL_SERVICE:
+ case YFS_FS_SERVICE:
+ case YFS_VL_SERVICE:
+ break;
+ default:
+ pr_warn("Can't respond to unknown challenge %u:%u",
+ service_id, security_index);
+ return rxrpc_kernel_reject_challenge(challenge, RX_USER_ABORT, -EPROTO,
+ afs_abort_unsupported_sec_class);
+ }
+
+ switch (security_index) {
+#ifdef CONFIG_RXKAD
+ case RXRPC_SECURITY_RXKAD:
+ return rxkad_kernel_respond_to_challenge(challenge);
+#endif
+
+#ifdef CONFIG_RXGK
+ case RXRPC_SECURITY_RXGK:
+ return rxgk_kernel_respond_to_challenge(challenge, &appdata);
+
+ case RXRPC_SECURITY_YFS_RXGK:
+ switch (service_id) {
+ case FS_SERVICE:
+ case YFS_FS_SERVICE:
+ server = (struct afs_server *)peer_data;
+ if (!server->cm_rxgk_appdata.data) {
+ mutex_lock(&server->cm_token_lock);
+ if (!server->cm_rxgk_appdata.data)
+ afs_create_yfs_cm_token(challenge, server);
+ mutex_unlock(&server->cm_token_lock);
+ }
+ if (server->cm_rxgk_appdata.data)
+ appdata = server->cm_rxgk_appdata;
+ break;
+ }
+ return rxgk_kernel_respond_to_challenge(challenge, &appdata);
+#endif
+
+ default:
+ return rxrpc_kernel_reject_challenge(challenge, RX_USER_ABORT, -EPROTO,
+ afs_abort_unsupported_sec_class);
+ }
+}
+
+/*
+ * Process the OOB message queue, processing challenge packets.
+ */
+void afs_process_oob_queue(struct work_struct *work)
+{
+ struct afs_net *net = container_of(work, struct afs_net, rx_oob_work);
+ struct sk_buff *oob;
+ enum rxrpc_oob_type type;
+
+ while ((oob = rxrpc_kernel_dequeue_oob(net->socket, &type))) {
+ switch (type) {
+ case RXRPC_OOB_CHALLENGE:
+ afs_respond_to_challenge(oob);
+ break;
+ }
+ rxrpc_kernel_free_oob(oob);
+ }
+}
+
+#ifdef CONFIG_RXGK
+/*
+ * Create a securities keyring for the cache manager and attach a key to it for
+ * the RxGK tokens we want to use to secure the callback connection back from
+ * the fileserver.
+ */
+int afs_create_token_key(struct afs_net *net, struct socket *socket)
+{
+ const struct krb5_enctype *krb5;
+ struct key *ring;
+ key_ref_t key;
+ char K0[32], *desc;
+ int ret;
+
+ ring = keyring_alloc("kafs",
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
+ KEY_POS_SEARCH | KEY_POS_WRITE |
+ KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH,
+ KEY_ALLOC_NOT_IN_QUOTA,
+ NULL, NULL);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+
+ ret = rxrpc_sock_set_security_keyring(socket->sk, ring);
+ if (ret < 0)
+ goto out;
+
+ ret = -ENOPKG;
+ krb5 = crypto_krb5_find_enctype(KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96);
+ if (!krb5)
+ goto out;
+
+ if (WARN_ON_ONCE(krb5->key_len > sizeof(K0)))
+ goto out;
+
+ ret = -ENOMEM;
+ desc = kasprintf(GFP_KERNEL, "%u:%u:%u:%u",
+ YFS_CM_SERVICE, RXRPC_SECURITY_YFS_RXGK, 1, krb5->etype);
+ if (!desc)
+ goto out;
+
+ wait_for_random_bytes();
+ get_random_bytes(K0, krb5->key_len);
+
+ key = key_create(make_key_ref(ring, true),
+ "rxrpc_s", desc,
+ K0, krb5->key_len,
+ KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_USR_VIEW,
+ KEY_ALLOC_NOT_IN_QUOTA);
+ kfree(desc);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto out;
+ }
+
+ net->fs_cm_token_key = key_ref_to_ptr(key);
+ ret = 0;
+out:
+ key_put(ring);
+ return ret;
+}
+
+/*
+ * Create an YFS RxGK GSS token to use as a ticket to the specified fileserver.
+ */
+static int afs_create_yfs_cm_token(struct sk_buff *challenge,
+ struct afs_server *server)
+{
+ const struct krb5_enctype *conn_krb5, *token_krb5;
+ const struct krb5_buffer *token_key;
+ struct crypto_aead *aead;
+ struct scatterlist sg;
+ struct afs_net *net = server->cell->net;
+ const struct key *key = net->fs_cm_token_key;
+ size_t keysize, uuidsize, authsize, toksize, encsize, contsize, adatasize, offset;
+ __be32 caps[1] = {
+ [0] = htonl(AFS_CAP_ERROR_TRANSLATION),
+ };
+ __be32 *xdr;
+ void *appdata, *K0, *encbase;
+ u32 enctype;
+ int ret;
+
+ if (!key)
+ return -ENOKEY;
+
+ /* Assume that the fileserver is happy to use the same encoding type as
+ * we were told to use by the token obtained by the user.
+ */
+ enctype = rxgk_kernel_query_challenge(challenge);
+
+ conn_krb5 = crypto_krb5_find_enctype(enctype);
+ if (!conn_krb5)
+ return -ENOPKG;
+ token_krb5 = key->payload.data[0];
+ token_key = (const struct krb5_buffer *)&key->payload.data[2];
+
+ /* struct rxgk_key {
+ * afs_uint32 enctype;
+ * opaque key<>;
+ * };
+ */
+ keysize = 4 + xdr_len_object(conn_krb5->key_len);
+
+ /* struct RXGK_AuthName {
+ * afs_int32 kind;
+ * opaque data<AUTHDATAMAX>;
+ * opaque display<AUTHPRINTABLEMAX>;
+ * };
+ */
+ uuidsize = sizeof(server->uuid);
+ authsize = 4 + xdr_len_object(uuidsize) + xdr_len_object(0);
+
+ /* struct RXGK_Token {
+ * rxgk_key K0;
+ * RXGK_Level level;
+ * rxgkTime starttime;
+ * afs_int32 lifetime;
+ * afs_int32 bytelife;
+ * rxgkTime expirationtime;
+ * struct RXGK_AuthName identities<>;
+ * };
+ */
+ toksize = keysize + 8 + 4 + 4 + 8 + xdr_len_object(authsize);
+
+ offset = 0;
+ encsize = crypto_krb5_how_much_buffer(token_krb5, KRB5_ENCRYPT_MODE, toksize, &offset);
+
+ /* struct RXGK_TokenContainer {
+ * afs_int32 kvno;
+ * afs_int32 enctype;
+ * opaque encrypted_token<>;
+ * };
+ */
+ contsize = 4 + 4 + xdr_len_object(encsize);
+
+ /* struct YFSAppData {
+ * opr_uuid initiatorUuid;
+ * opr_uuid acceptorUuid;
+ * Capabilities caps;
+ * afs_int32 enctype;
+ * opaque callbackKey<>;
+ * opaque callbackToken<>;
+ * };
+ */
+ adatasize = 16 + 16 +
+ xdr_len_object(sizeof(caps)) +
+ 4 +
+ xdr_len_object(conn_krb5->key_len) +
+ xdr_len_object(contsize);
+
+ ret = -ENOMEM;
+ appdata = kzalloc(adatasize, GFP_KERNEL);
+ if (!appdata)
+ goto out;
+ xdr = appdata;
+
+ memcpy(xdr, &net->uuid, 16); /* appdata.initiatorUuid */
+ xdr += 16 / 4;
+ memcpy(xdr, &server->uuid, 16); /* appdata.acceptorUuid */
+ xdr += 16 / 4;
+ *xdr++ = htonl(ARRAY_SIZE(caps)); /* appdata.caps.len */
+ memcpy(xdr, &caps, sizeof(caps)); /* appdata.caps */
+ xdr += ARRAY_SIZE(caps);
+ *xdr++ = htonl(conn_krb5->etype); /* appdata.enctype */
+
+ *xdr++ = htonl(conn_krb5->key_len); /* appdata.callbackKey.len */
+ K0 = xdr;
+ get_random_bytes(K0, conn_krb5->key_len); /* appdata.callbackKey.data */
+ xdr += xdr_round_up(conn_krb5->key_len) / 4;
+
+ *xdr++ = htonl(contsize); /* appdata.callbackToken.len */
+ *xdr++ = htonl(1); /* cont.kvno */
+ *xdr++ = htonl(token_krb5->etype); /* cont.enctype */
+ *xdr++ = htonl(encsize); /* cont.encrypted_token.len */
+
+ encbase = xdr;
+ xdr += offset / 4;
+ *xdr++ = htonl(conn_krb5->etype); /* token.K0.enctype */
+ *xdr++ = htonl(conn_krb5->key_len); /* token.K0.key.len */
+ memcpy(xdr, K0, conn_krb5->key_len); /* token.K0.key.data */
+ xdr += xdr_round_up(conn_krb5->key_len) / 4;
+
+ *xdr++ = htonl(RXRPC_SECURITY_ENCRYPT); /* token.level */
+ *xdr++ = htonl(0); /* token.starttime */
+ *xdr++ = htonl(0); /* " */
+ *xdr++ = htonl(0); /* token.lifetime */
+ *xdr++ = htonl(0); /* token.bytelife */
+ *xdr++ = htonl(0); /* token.expirationtime */
+ *xdr++ = htonl(0); /* " */
+ *xdr++ = htonl(1); /* token.identities.count */
+ *xdr++ = htonl(0); /* token.identities[0].kind */
+ *xdr++ = htonl(uuidsize); /* token.identities[0].data.len */
+ memcpy(xdr, &server->uuid, uuidsize);
+ xdr += xdr_round_up(uuidsize) / 4;
+ *xdr++ = htonl(0); /* token.identities[0].display.len */
+
+ xdr = encbase + xdr_round_up(encsize);
+
+ if ((unsigned long)xdr - (unsigned long)appdata != adatasize)
+ pr_err("Appdata size incorrect %lx != %zx\n",
+ (unsigned long)xdr - (unsigned long)appdata, adatasize);
+
+ aead = crypto_krb5_prepare_encryption(token_krb5, token_key, RXGK_SERVER_ENC_TOKEN,
+ GFP_KERNEL);
+ if (IS_ERR(aead)) {
+ ret = PTR_ERR(aead);
+ goto out_token;
+ }
+
+ sg_init_one(&sg, encbase, encsize);
+ ret = crypto_krb5_encrypt(token_krb5, aead, &sg, 1, encsize, offset, toksize, false);
+ if (ret < 0)
+ goto out_aead;
+
+ server->cm_rxgk_appdata.len = adatasize;
+ server->cm_rxgk_appdata.data = appdata;
+ appdata = NULL;
+
+out_aead:
+ crypto_free_aead(aead);
+out_token:
+ kfree(appdata);
+out:
+ return ret;
+}
+#endif /* CONFIG_RXGK */
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index a3f5de28be79..1a906805a9e3 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -13,6 +13,8 @@
#include "internal.h"
#include "afs_cm.h"
#include "protocol_yfs.h"
+#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
+#include <trace/events/rxrpc.h>
static int afs_deliver_cb_init_call_back_state(struct afs_call *);
static int afs_deliver_cb_init_call_back_state3(struct afs_call *);
@@ -137,48 +139,6 @@ bool afs_cm_incoming_call(struct afs_call *call)
}
/*
- * Find the server record by peer address and record a probe to the cache
- * manager from a server.
- */
-static int afs_find_cm_server_by_peer(struct afs_call *call)
-{
- struct sockaddr_rxrpc srx;
- struct afs_server *server;
-
- rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
-
- server = afs_find_server(call->net, &srx);
- if (!server) {
- trace_afs_cm_no_server(call, &srx);
- return 0;
- }
-
- call->server = server;
- return 0;
-}
-
-/*
- * Find the server record by server UUID and record a probe to the cache
- * manager from a server.
- */
-static int afs_find_cm_server_by_uuid(struct afs_call *call,
- struct afs_uuid *uuid)
-{
- struct afs_server *server;
-
- rcu_read_lock();
- server = afs_find_server_by_uuid(call->net, call->request);
- rcu_read_unlock();
- if (!server) {
- trace_afs_cm_no_server_u(call, call->request);
- return 0;
- }
-
- call->server = server;
- return 0;
-}
-
-/*
* Clean up a cache manager call.
*/
static void afs_cm_destructor(struct afs_call *call)
@@ -191,7 +151,7 @@ static void afs_cm_destructor(struct afs_call *call)
* Abort a service call from within an action function.
*/
static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error,
- const char *why)
+ enum rxrpc_abort_reason why)
{
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
abort_code, error, why);
@@ -212,8 +172,8 @@ static void SRXAFSCB_CallBack(struct work_struct *work)
* to maintain cache coherency.
*/
if (call->server) {
- trace_afs_server(call->server,
- atomic_read(&call->server->ref),
+ trace_afs_server(call->server->debug_id,
+ refcount_read(&call->server->ref),
atomic_read(&call->server->active),
afs_server_trace_callback);
afs_break_callbacks(call->server, call->count, call->request);
@@ -298,7 +258,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
if (call->count2 != call->count && call->count2 != 0)
return afs_protocol_error(call, afs_eproto_cb_count);
call->iter = &call->def_iter;
- iov_iter_discard(&call->def_iter, READ, call->count2 * 3 * 4);
+ iov_iter_discard(&call->def_iter, ITER_DEST, call->count2 * 3 * 4);
call->unmarshall++;
fallthrough;
@@ -319,10 +279,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
-
- /* we'll need the file server record as that tells us which set of
- * vnodes to operate upon */
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -346,18 +303,10 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
*/
static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
{
- int ret;
-
_enter("");
afs_extract_discard(call, 0);
- ret = afs_extract_data(call, false);
- if (ret < 0)
- return ret;
-
- /* we'll need the file server record as that tells us which set of
- * vnodes to operate upon */
- return afs_find_cm_server_by_peer(call);
+ return afs_extract_data(call, false);
}
/*
@@ -370,8 +319,6 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
__be32 *b;
int ret;
- _enter("");
-
_enter("{%u}", call->unmarshall);
switch (call->unmarshall) {
@@ -418,9 +365,13 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- /* we'll need the file server record as that tells us which set of
- * vnodes to operate upon */
- return afs_find_cm_server_by_uuid(call, call->request);
+ if (memcmp(call->request, &call->server->_uuid, sizeof(call->server->_uuid)) != 0) {
+ pr_notice("Callback UUID does not match fileserver UUID\n");
+ trace_afs_cm_no_server_u(call, call->request);
+ return 0;
+ }
+
+ return 0;
}
/*
@@ -452,7 +403,7 @@ static int afs_deliver_cb_probe(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -469,7 +420,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
afs_send_empty_reply(call);
else
- afs_abort_service_call(call, 1, 1, "K-1");
+ afs_abort_service_call(call, 1, 1, afs_abort_probeuuid_negative);
afs_put_call(call);
_leave("");
@@ -530,7 +481,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -590,7 +541,7 @@ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -664,9 +615,5 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
-
- /* We'll need the file server record as that tells us which set of
- * vnodes to operate upon.
- */
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index da9b4f8577a1..f4e9e12373ac 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -12,6 +12,8 @@
#include <linux/swap.h>
#include <linux/ctype.h>
#include <linux/sched.h>
+#include <linux/iversion.h>
+#include <linux/iov_iter.h>
#include <linux/task_io_accounting_ops.h>
#include "internal.h"
#include "afs_fs.h"
@@ -21,34 +23,27 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
static int afs_dir_open(struct inode *inode, struct file *file);
static int afs_readdir(struct file *file, struct dir_context *ctx);
-static int afs_d_revalidate(struct dentry *dentry, unsigned int flags);
+static int afs_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags);
static int afs_d_delete(const struct dentry *dentry);
static void afs_d_iput(struct dentry *dentry, struct inode *inode);
-static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name, int nlen,
+static bool afs_lookup_one_filldir(struct dir_context *ctx, const char *name, int nlen,
loff_t fpos, u64 ino, unsigned dtype);
-static int afs_lookup_filldir(struct dir_context *ctx, const char *name, int nlen,
+static bool afs_lookup_filldir(struct dir_context *ctx, const char *name, int nlen,
loff_t fpos, u64 ino, unsigned dtype);
-static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int afs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl);
-static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode);
+static struct dentry *afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode);
static int afs_rmdir(struct inode *dir, struct dentry *dentry);
static int afs_unlink(struct inode *dir, struct dentry *dentry);
static int afs_link(struct dentry *from, struct inode *dir,
struct dentry *dentry);
-static int afs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *content);
-static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags);
-static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags);
-static void afs_dir_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
-
-static int afs_dir_set_page_dirty(struct page *page)
-{
- BUG(); /* This should never happen. */
-}
const struct file_operations afs_dir_file_operations = {
.open = afs_dir_open,
@@ -73,9 +68,7 @@ const struct inode_operations afs_dir_inode_operations = {
};
const struct address_space_operations afs_dir_aops = {
- .set_page_dirty = afs_dir_set_page_dirty,
- .releasepage = afs_dir_releasepage,
- .invalidatepage = afs_dir_invalidatepage,
+ .writepages = afs_single_writepages,
};
const struct dentry_operations afs_fs_dentry_operations = {
@@ -96,152 +89,124 @@ struct afs_lookup_one_cookie {
struct afs_lookup_cookie {
struct dir_context ctx;
struct qstr name;
- bool found;
- bool one_only;
unsigned short nr_fids;
struct afs_fid fids[50];
};
+static void afs_dir_unuse_cookie(struct afs_vnode *dvnode, int ret)
+{
+ if (ret == 0) {
+ struct afs_vnode_cache_aux aux;
+ loff_t i_size = i_size_read(&dvnode->netfs.inode);
+
+ afs_set_cache_aux(dvnode, &aux);
+ fscache_unuse_cookie(afs_vnode_cache(dvnode), &aux, &i_size);
+ } else {
+ fscache_unuse_cookie(afs_vnode_cache(dvnode), NULL, NULL);
+ }
+}
+
/*
- * Drop the refs that we're holding on the folios we were reading into. We've
- * got refs on the first nr_pages pages.
+ * Iterate through a kmapped directory segment, dumping a summary of
+ * the contents.
*/
-static void afs_dir_read_cleanup(struct afs_read *req)
+static size_t afs_dir_dump_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
{
- struct address_space *mapping = req->vnode->vfs_inode.i_mapping;
- struct folio *folio;
- pgoff_t last = req->nr_pages - 1;
+ do {
+ union afs_xdr_dir_block *block = iter_base;
- XA_STATE(xas, &mapping->i_pages, 0);
+ pr_warn("[%05zx] %32phN\n", progress, block);
+ iter_base += AFS_DIR_BLOCK_SIZE;
+ progress += AFS_DIR_BLOCK_SIZE;
+ len -= AFS_DIR_BLOCK_SIZE;
+ } while (len > 0);
- if (unlikely(!req->nr_pages))
- return;
+ return len;
+}
- rcu_read_lock();
- xas_for_each(&xas, folio, last) {
- if (xas_retry(&xas, folio))
- continue;
- BUG_ON(xa_is_value(folio));
- ASSERTCMP(folio_file_mapping(folio), ==, mapping);
+/*
+ * Dump the contents of a directory.
+ */
+static void afs_dir_dump(struct afs_vnode *dvnode)
+{
+ struct iov_iter iter;
+ unsigned long long i_size = i_size_read(&dvnode->netfs.inode);
- folio_put(folio);
- }
+ pr_warn("DIR %llx:%llx is=%llx\n",
+ dvnode->fid.vid, dvnode->fid.vnode, i_size);
- rcu_read_unlock();
+ iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size);
+ iterate_folioq(&iter, iov_iter_count(&iter), NULL, NULL,
+ afs_dir_dump_step);
}
/*
* check that a directory folio is valid
*/
-static bool afs_dir_check_folio(struct afs_vnode *dvnode, struct folio *folio,
- loff_t i_size)
+static bool afs_dir_check_block(struct afs_vnode *dvnode, size_t progress,
+ union afs_xdr_dir_block *block)
{
- union afs_xdr_dir_block *block;
- size_t offset, size;
- loff_t pos;
+ if (block->hdr.magic != AFS_DIR_MAGIC) {
+ pr_warn("%s(%lx): [%zx] bad magic %04x\n",
+ __func__, dvnode->netfs.inode.i_ino,
+ progress, ntohs(block->hdr.magic));
+ trace_afs_dir_check_failed(dvnode, progress);
+ trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic);
+ return false;
+ }
- /* Determine how many magic numbers there should be in this folio, but
- * we must take care because the directory may change size under us.
+ /* Make sure each block is NUL terminated so we can reasonably
+ * use string functions on it. The filenames in the folio
+ * *should* be NUL-terminated anyway.
*/
- pos = folio_pos(folio);
- if (i_size <= pos)
- goto checked;
-
- size = min_t(loff_t, folio_size(folio), i_size - pos);
- for (offset = 0; offset < size; offset += sizeof(*block)) {
- block = kmap_local_folio(folio, offset);
- if (block->hdr.magic != AFS_DIR_MAGIC) {
- printk("kAFS: %s(%lx): [%llx] bad magic %zx/%zx is %04hx\n",
- __func__, dvnode->vfs_inode.i_ino,
- pos, offset, size, ntohs(block->hdr.magic));
- trace_afs_dir_check_failed(dvnode, pos + offset, i_size);
- kunmap_local(block);
- trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic);
- goto error;
- }
-
- /* Make sure each block is NUL terminated so we can reasonably
- * use string functions on it. The filenames in the folio
- * *should* be NUL-terminated anyway.
- */
- ((u8 *)block)[AFS_DIR_BLOCK_SIZE - 1] = 0;
-
- kunmap_local(block);
- }
-checked:
+ ((u8 *)block)[AFS_DIR_BLOCK_SIZE - 1] = 0;
afs_stat_v(dvnode, n_read_dir);
return true;
-
-error:
- return false;
}
/*
- * Dump the contents of a directory.
+ * Iterate through a kmapped directory segment, checking the content.
*/
-static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
+static size_t afs_dir_check_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
{
- union afs_xdr_dir_block *block;
- struct address_space *mapping = dvnode->vfs_inode.i_mapping;
- struct folio *folio;
- pgoff_t last = req->nr_pages - 1;
- size_t offset, size;
-
- XA_STATE(xas, &mapping->i_pages, 0);
-
- pr_warn("DIR %llx:%llx f=%llx l=%llx al=%llx\n",
- dvnode->fid.vid, dvnode->fid.vnode,
- req->file_size, req->len, req->actual_len);
- pr_warn("DIR %llx %x %zx %zx\n",
- req->pos, req->nr_pages,
- req->iter->iov_offset, iov_iter_count(req->iter));
-
- xas_for_each(&xas, folio, last) {
- if (xas_retry(&xas, folio))
- continue;
+ struct afs_vnode *dvnode = priv;
- BUG_ON(folio_file_mapping(folio) != mapping);
+ if (WARN_ON_ONCE(progress % AFS_DIR_BLOCK_SIZE ||
+ len % AFS_DIR_BLOCK_SIZE))
+ return len;
- size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio));
- for (offset = 0; offset < size; offset += sizeof(*block)) {
- block = kmap_local_folio(folio, offset);
- pr_warn("[%02lx] %32phN\n", folio_index(folio) + offset, block);
- kunmap_local(block);
- }
- }
+ do {
+ if (!afs_dir_check_block(dvnode, progress, iter_base))
+ break;
+ iter_base += AFS_DIR_BLOCK_SIZE;
+ len -= AFS_DIR_BLOCK_SIZE;
+ } while (len > 0);
+
+ return len;
}
/*
- * Check all the blocks in a directory. All the folios are held pinned.
+ * Check all the blocks in a directory.
*/
-static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
+static int afs_dir_check(struct afs_vnode *dvnode)
{
- struct address_space *mapping = dvnode->vfs_inode.i_mapping;
- struct folio *folio;
- pgoff_t last = req->nr_pages - 1;
- int ret = 0;
+ struct iov_iter iter;
+ unsigned long long i_size = i_size_read(&dvnode->netfs.inode);
+ size_t checked = 0;
- XA_STATE(xas, &mapping->i_pages, 0);
-
- if (unlikely(!req->nr_pages))
+ if (unlikely(!i_size))
return 0;
- rcu_read_lock();
- xas_for_each(&xas, folio, last) {
- if (xas_retry(&xas, folio))
- continue;
-
- BUG_ON(folio_file_mapping(folio) != mapping);
-
- if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) {
- afs_dir_dump(dvnode, req);
- ret = -EIO;
- break;
- }
+ iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size);
+ checked = iterate_folioq(&iter, iov_iter_count(&iter), dvnode, NULL,
+ afs_dir_check_step);
+ if (checked != i_size) {
+ afs_dir_dump(dvnode);
+ return -EIO;
}
-
- rcu_read_unlock();
- return ret;
+ return 0;
}
/*
@@ -261,130 +226,140 @@ static int afs_dir_open(struct inode *inode, struct file *file)
}
/*
- * Read the directory into the pagecache in one go, scrubbing the previous
- * contents. The list of folios is returned, pinning them so that they don't
- * get reclaimed during the iteration.
+ * Read a file in a single download.
*/
-static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
- __acquires(&dvnode->validate_lock)
+static ssize_t afs_do_read_single(struct afs_vnode *dvnode, struct file *file)
{
- struct address_space *mapping = dvnode->vfs_inode.i_mapping;
- struct afs_read *req;
+ struct iov_iter iter;
+ ssize_t ret;
loff_t i_size;
- int nr_pages, i;
- int ret;
+ bool is_dir = (S_ISDIR(dvnode->netfs.inode.i_mode) &&
+ !test_bit(AFS_VNODE_MOUNTPOINT, &dvnode->flags));
+
+ i_size = i_size_read(&dvnode->netfs.inode);
+ if (is_dir) {
+ if (i_size < AFS_DIR_BLOCK_SIZE)
+ return afs_bad(dvnode, afs_file_error_dir_small);
+ if (i_size > AFS_DIR_BLOCK_SIZE * 1024) {
+ trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big);
+ return -EFBIG;
+ }
+ } else {
+ if (i_size > AFSPATHMAX) {
+ trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big);
+ return -EFBIG;
+ }
+ }
- _enter("");
+ /* Expand the storage. TODO: Shrink the storage too. */
+ if (dvnode->directory_size < i_size) {
+ size_t cur_size = dvnode->directory_size;
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return ERR_PTR(-ENOMEM);
+ ret = netfs_alloc_folioq_buffer(NULL,
+ &dvnode->directory, &cur_size, i_size,
+ mapping_gfp_mask(dvnode->netfs.inode.i_mapping));
+ dvnode->directory_size = cur_size;
+ if (ret < 0)
+ return ret;
+ }
- refcount_set(&req->usage, 1);
- req->vnode = dvnode;
- req->key = key_get(key);
- req->cleanup = afs_dir_read_cleanup;
+ iov_iter_folio_queue(&iter, ITER_DEST, dvnode->directory, 0, 0, dvnode->directory_size);
-expand:
- i_size = i_size_read(&dvnode->vfs_inode);
- if (i_size < 2048) {
- ret = afs_bad(dvnode, afs_file_error_dir_small);
- goto error;
- }
- if (i_size > 2048 * 1024) {
- trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big);
- ret = -EFBIG;
- goto error;
+ /* AFS requires us to perform the read of a directory synchronously as
+ * a single unit to avoid issues with the directory contents being
+ * changed between reads.
+ */
+ ret = netfs_read_single(&dvnode->netfs.inode, file, &iter);
+ if (ret >= 0) {
+ i_size = i_size_read(&dvnode->netfs.inode);
+ if (i_size > ret) {
+ /* The content has grown, so we need to expand the
+ * buffer.
+ */
+ ret = -ESTALE;
+ } else if (is_dir) {
+ int ret2 = afs_dir_check(dvnode);
+
+ if (ret2 < 0)
+ ret = ret2;
+ } else if (i_size < folioq_folio_size(dvnode->directory, 0)) {
+ /* NUL-terminate a symlink. */
+ char *symlink = kmap_local_folio(folioq_folio(dvnode->directory, 0), 0);
+
+ symlink[i_size] = 0;
+ kunmap_local(symlink);
+ }
}
- _enter("%llu", i_size);
+ return ret;
+}
- nr_pages = (i_size + PAGE_SIZE - 1) / PAGE_SIZE;
+ssize_t afs_read_single(struct afs_vnode *dvnode, struct file *file)
+{
+ ssize_t ret;
- req->actual_len = i_size; /* May change */
- req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */
- req->data_version = dvnode->status.data_version; /* May change */
- iov_iter_xarray(&req->def_iter, READ, &dvnode->vfs_inode.i_mapping->i_pages,
- 0, i_size);
- req->iter = &req->def_iter;
+ fscache_use_cookie(afs_vnode_cache(dvnode), false);
+ ret = afs_do_read_single(dvnode, file);
+ fscache_unuse_cookie(afs_vnode_cache(dvnode), NULL, NULL);
+ return ret;
+}
- /* Fill in any gaps that we might find where the memory reclaimer has
- * been at work and pin all the folios. If there are any gaps, we will
- * need to reread the entire directory contents.
- */
- i = req->nr_pages;
- while (i < nr_pages) {
- struct folio *folio;
-
- folio = filemap_get_folio(mapping, i);
- if (!folio) {
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- afs_stat_v(dvnode, n_inval);
-
- ret = -ENOMEM;
- folio = __filemap_get_folio(mapping,
- i, FGP_LOCK | FGP_CREAT,
- mapping->gfp_mask);
- if (!folio)
- goto error;
- folio_attach_private(folio, (void *)1);
- folio_unlock(folio);
- }
+/*
+ * Read the directory into a folio_queue buffer in one go, scrubbing the
+ * previous contents. We return -ESTALE if the caller needs to call us again.
+ */
+ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file)
+ __acquires(&dvnode->validate_lock)
+{
+ ssize_t ret;
+ loff_t i_size;
- req->nr_pages += folio_nr_pages(folio);
- i += folio_nr_pages(folio);
- }
+ i_size = i_size_read(&dvnode->netfs.inode);
- /* If we're going to reload, we need to lock all the pages to prevent
- * races.
- */
ret = -ERESTARTSYS;
if (down_read_killable(&dvnode->validate_lock) < 0)
goto error;
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- goto success;
+ /* We only need to reread the data if it became invalid - or if we
+ * haven't read it yet.
+ */
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ test_bit(AFS_VNODE_DIR_READ, &dvnode->flags)) {
+ ret = i_size;
+ goto valid;
+ }
up_read(&dvnode->validate_lock);
if (down_write_killable(&dvnode->validate_lock) < 0)
goto error;
- if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
- trace_afs_reload_dir(dvnode);
- ret = afs_fetch_data(dvnode, req);
- if (ret < 0)
- goto error_unlock;
-
- task_io_account_read(PAGE_SIZE * req->nr_pages);
+ if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ afs_invalidate_cache(dvnode, 0);
- if (req->len < req->file_size) {
- /* The content has grown, so we need to expand the
- * buffer.
- */
- up_write(&dvnode->validate_lock);
- goto expand;
- }
-
- /* Validate the data we just read. */
- ret = afs_dir_check(dvnode, req);
+ if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) ||
+ !test_bit(AFS_VNODE_DIR_READ, &dvnode->flags)) {
+ trace_afs_reload_dir(dvnode);
+ ret = afs_read_single(dvnode, file);
if (ret < 0)
goto error_unlock;
// TODO: Trim excess pages
set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags);
+ set_bit(AFS_VNODE_DIR_READ, &dvnode->flags);
+ } else {
+ ret = i_size;
}
downgrade_write(&dvnode->validate_lock);
-success:
- return req;
+valid:
+ return ret;
error_unlock:
up_write(&dvnode->validate_lock);
error:
- afs_put_read(req);
- _leave(" = %d", ret);
- return ERR_PTR(ret);
+ _leave(" = %zd", ret);
+ return ret;
}
/*
@@ -392,78 +367,71 @@ error:
*/
static int afs_dir_iterate_block(struct afs_vnode *dvnode,
struct dir_context *ctx,
- union afs_xdr_dir_block *block,
- unsigned blkoff)
+ union afs_xdr_dir_block *block)
{
union afs_xdr_dirent *dire;
- unsigned offset, next, curr, nr_slots;
+ unsigned int blknum, base, hdr, pos, next, nr_slots;
size_t nlen;
int tmp;
- _enter("%llx,%x", ctx->pos, blkoff);
+ blknum = ctx->pos / AFS_DIR_BLOCK_SIZE;
+ base = blknum * AFS_DIR_SLOTS_PER_BLOCK;
+ hdr = (blknum == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS);
+ pos = DIV_ROUND_UP(ctx->pos, AFS_DIR_DIRENT_SIZE) - base;
- curr = (ctx->pos - blkoff) / sizeof(union afs_xdr_dirent);
+ _enter("%llx,%x", ctx->pos, blknum);
/* walk through the block, an entry at a time */
- for (offset = (blkoff == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS);
- offset < AFS_DIR_SLOTS_PER_BLOCK;
- offset = next
- ) {
+ for (unsigned int slot = hdr; slot < AFS_DIR_SLOTS_PER_BLOCK; slot = next) {
/* skip entries marked unused in the bitmap */
- if (!(block->hdr.bitmap[offset / 8] &
- (1 << (offset % 8)))) {
- _debug("ENT[%zu.%u]: unused",
- blkoff / sizeof(union afs_xdr_dir_block), offset);
- next = offset + 1;
- if (offset >= curr)
- ctx->pos = blkoff +
- next * sizeof(union afs_xdr_dirent);
+ if (!(block->hdr.bitmap[slot / 8] &
+ (1 << (slot % 8)))) {
+ _debug("ENT[%x]: Unused", base + slot);
+ next = slot + 1;
+ if (next >= pos)
+ ctx->pos = (base + next) * sizeof(union afs_xdr_dirent);
continue;
}
/* got a valid entry */
- dire = &block->dirents[offset];
+ dire = &block->dirents[slot];
nlen = strnlen(dire->u.name,
- sizeof(*block) -
- offset * sizeof(union afs_xdr_dirent));
+ (unsigned long)(block + 1) - (unsigned long)dire->u.name - 1);
if (nlen > AFSNAMEMAX - 1) {
- _debug("ENT[%zu]: name too long (len %u/%zu)",
- blkoff / sizeof(union afs_xdr_dir_block),
- offset, nlen);
+ _debug("ENT[%x]: Name too long (len %zx)",
+ base + slot, nlen);
return afs_bad(dvnode, afs_file_error_dir_name_too_long);
}
- _debug("ENT[%zu.%u]: %s %zu \"%s\"",
- blkoff / sizeof(union afs_xdr_dir_block), offset,
- (offset < curr ? "skip" : "fill"),
+ _debug("ENT[%x]: %s %zx \"%s\"",
+ base + slot, (slot < pos ? "skip" : "fill"),
nlen, dire->u.name);
nr_slots = afs_dir_calc_slots(nlen);
- next = offset + nr_slots;
+ next = slot + nr_slots;
if (next > AFS_DIR_SLOTS_PER_BLOCK) {
- _debug("ENT[%zu.%u]:"
- " %u extends beyond end dir block"
- " (len %zu)",
- blkoff / sizeof(union afs_xdr_dir_block),
- offset, next, nlen);
+ _debug("ENT[%x]: extends beyond end dir block (len %zx)",
+ base + slot, nlen);
return afs_bad(dvnode, afs_file_error_dir_over_end);
}
/* Check that the name-extension dirents are all allocated */
for (tmp = 1; tmp < nr_slots; tmp++) {
- unsigned int ix = offset + tmp;
- if (!(block->hdr.bitmap[ix / 8] & (1 << (ix % 8)))) {
- _debug("ENT[%zu.u]:"
- " %u unmarked extension (%u/%u)",
- blkoff / sizeof(union afs_xdr_dir_block),
- offset, tmp, nr_slots);
+ unsigned int xslot = slot + tmp;
+
+ if (!(block->hdr.bitmap[xslot / 8] & (1 << (xslot % 8)))) {
+ _debug("ENT[%x]: Unmarked extension (%x/%x)",
+ base + slot, tmp, nr_slots);
return afs_bad(dvnode, afs_file_error_dir_unmarked_ext);
}
}
/* skip if starts before the current position */
- if (offset < curr)
+ if (slot < pos) {
+ if (next > pos)
+ ctx->pos = (base + next) * sizeof(union afs_xdr_dirent);
continue;
+ }
/* found the next entry */
if (!dir_emit(ctx, dire->u.name, nlen,
@@ -475,75 +443,110 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
return 0;
}
- ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
+ ctx->pos = (base + next) * sizeof(union afs_xdr_dirent);
}
_leave(" = 1 [more]");
return 1;
}
+struct afs_dir_iteration_ctx {
+ struct dir_context *dir_ctx;
+ int error;
+};
+
/*
- * iterate through the data blob that lists the contents of an AFS directory
+ * Iterate through a kmapped directory segment.
*/
-static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
- struct key *key, afs_dataversion_t *_dir_version)
+static size_t afs_dir_iterate_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
{
- struct afs_vnode *dvnode = AFS_FS_I(dir);
- union afs_xdr_dir_block *dblock;
- struct afs_read *req;
- struct folio *folio;
- unsigned offset, size;
+ struct afs_dir_iteration_ctx *ctx = priv2;
+ struct afs_vnode *dvnode = priv;
int ret;
- _enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos);
-
- if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) {
- _leave(" = -ESTALE");
- return -ESTALE;
+ if (WARN_ON_ONCE(progress % AFS_DIR_BLOCK_SIZE ||
+ len % AFS_DIR_BLOCK_SIZE)) {
+ pr_err("Mis-iteration prog=%zx len=%zx\n",
+ progress % AFS_DIR_BLOCK_SIZE,
+ len % AFS_DIR_BLOCK_SIZE);
+ return len;
}
- req = afs_read_dir(dvnode, key);
- if (IS_ERR(req))
- return PTR_ERR(req);
- *_dir_version = req->data_version;
+ do {
+ ret = afs_dir_iterate_block(dvnode, ctx->dir_ctx, iter_base);
+ if (ret != 1)
+ break;
- /* round the file position up to the next entry boundary */
- ctx->pos += sizeof(union afs_xdr_dirent) - 1;
- ctx->pos &= ~(sizeof(union afs_xdr_dirent) - 1);
+ ctx->dir_ctx->pos = round_up(ctx->dir_ctx->pos, AFS_DIR_BLOCK_SIZE);
+ iter_base += AFS_DIR_BLOCK_SIZE;
+ len -= AFS_DIR_BLOCK_SIZE;
+ } while (len > 0);
- /* walk through the blocks in sequence */
- ret = 0;
- while (ctx->pos < req->actual_len) {
- /* Fetch the appropriate folio from the directory and re-add it
- * to the LRU. We have all the pages pinned with an extra ref.
- */
- folio = __filemap_get_folio(dir->i_mapping, ctx->pos / PAGE_SIZE,
- FGP_ACCESSED, 0);
- if (!folio) {
- ret = afs_bad(dvnode, afs_file_error_dir_missing_page);
- break;
- }
+ return len;
+}
- offset = round_down(ctx->pos, sizeof(*dblock)) - folio_file_pos(folio);
- size = min_t(loff_t, folio_size(folio),
- req->actual_len - folio_file_pos(folio));
+/*
+ * Iterate through the directory folios.
+ */
+static int afs_dir_iterate_contents(struct inode *dir, struct dir_context *dir_ctx)
+{
+ struct afs_dir_iteration_ctx ctx = { .dir_ctx = dir_ctx };
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
+ struct iov_iter iter;
+ unsigned long long i_size = i_size_read(dir);
- do {
- dblock = kmap_local_folio(folio, offset);
- ret = afs_dir_iterate_block(dvnode, ctx, dblock,
- folio_file_pos(folio) + offset);
- kunmap_local(dblock);
- if (ret != 1)
- goto out;
+ /* Round the file position up to the next entry boundary */
+ dir_ctx->pos = round_up(dir_ctx->pos, sizeof(union afs_xdr_dirent));
- } while (offset += sizeof(*dblock), offset < size);
+ if (i_size <= 0 || dir_ctx->pos >= i_size)
+ return 0;
- ret = 0;
- }
+ iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size);
+ iov_iter_advance(&iter, round_down(dir_ctx->pos, AFS_DIR_BLOCK_SIZE));
+
+ iterate_folioq(&iter, iov_iter_count(&iter), dvnode, &ctx,
+ afs_dir_iterate_step);
+
+ if (ctx.error == -ESTALE)
+ afs_invalidate_dir(dvnode, afs_dir_invalid_iter_stale);
+ return ctx.error;
+}
+
+/*
+ * iterate through the data blob that lists the contents of an AFS directory
+ */
+static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
+ struct file *file, afs_dataversion_t *_dir_version)
+{
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
+ int retry_limit = 100;
+ int ret;
+
+ _enter("{%lu},%llx,,", dir->i_ino, ctx->pos);
+
+ do {
+ if (--retry_limit < 0) {
+ pr_warn("afs_read_dir(): Too many retries\n");
+ ret = -ESTALE;
+ break;
+ }
+ ret = afs_read_dir(dvnode, file);
+ if (ret < 0) {
+ if (ret != -ESTALE)
+ break;
+ if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) {
+ ret = -ESTALE;
+ break;
+ }
+ continue;
+ }
+ *_dir_version = inode_peek_iversion_raw(dir);
+
+ ret = afs_dir_iterate_contents(dir, ctx);
+ up_read(&dvnode->validate_lock);
+ } while (ret == -ESTALE);
-out:
- up_read(&dvnode->validate_lock);
- afs_put_read(req);
_leave(" = %d", ret);
return ret;
}
@@ -555,8 +558,7 @@ static int afs_readdir(struct file *file, struct dir_context *ctx)
{
afs_dataversion_t dir_version;
- return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file),
- &dir_version);
+ return afs_dir_iterate(file_inode(file), ctx, file, &dir_version);
}
/*
@@ -564,7 +566,7 @@ static int afs_readdir(struct file *file, struct dir_context *ctx)
* - if afs_dir_iterate_block() spots this function, it'll pass the FID
* uniquifier through dtype
*/
-static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
+static bool afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
int nlen, loff_t fpos, u64 ino, unsigned dtype)
{
struct afs_lookup_one_cookie *cookie =
@@ -580,38 +582,38 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
if (cookie->name.len != nlen ||
memcmp(cookie->name.name, name, nlen) != 0) {
- _leave(" = 0 [no]");
- return 0;
+ _leave(" = true [keep looking]");
+ return true;
}
cookie->fid.vnode = ino;
cookie->fid.unique = dtype;
cookie->found = 1;
- _leave(" = -1 [found]");
- return -1;
+ _leave(" = false [found]");
+ return false;
}
/*
* Do a lookup of a single name in a directory
* - just returns the FID the dentry name maps to if found
*/
-static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
- struct afs_fid *fid, struct key *key,
+static int afs_do_lookup_one(struct inode *dir, const struct qstr *name,
+ struct afs_fid *fid,
afs_dataversion_t *_dir_version)
{
struct afs_super_info *as = dir->i_sb->s_fs_info;
struct afs_lookup_one_cookie cookie = {
.ctx.actor = afs_lookup_one_filldir,
- .name = dentry->d_name,
+ .name = *name,
.fid.vid = as->volume->vid
};
int ret;
- _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
+ _enter("{%lu},{%.*s},", dir->i_ino, name->len, name->name);
/* search the directory */
- ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version);
+ ret = afs_dir_iterate(dir, &cookie.ctx, NULL, _dir_version);
if (ret < 0) {
_leave(" = %d [iter]", ret);
return ret;
@@ -632,12 +634,11 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
* - if afs_dir_iterate_block() spots this function, it'll pass the FID
* uniquifier through dtype
*/
-static int afs_lookup_filldir(struct dir_context *ctx, const char *name,
+static bool afs_lookup_filldir(struct dir_context *ctx, const char *name,
int nlen, loff_t fpos, u64 ino, unsigned dtype)
{
struct afs_lookup_cookie *cookie =
container_of(ctx, struct afs_lookup_cookie, ctx);
- int ret;
_enter("{%s,%u},%s,%u,,%llu,%u",
cookie->name.name, cookie->name.len, name, nlen,
@@ -647,24 +648,13 @@ static int afs_lookup_filldir(struct dir_context *ctx, const char *name,
BUILD_BUG_ON(sizeof(union afs_xdr_dir_block) != 2048);
BUILD_BUG_ON(sizeof(union afs_xdr_dirent) != 32);
- if (cookie->found) {
- if (cookie->nr_fids < 50) {
- cookie->fids[cookie->nr_fids].vnode = ino;
- cookie->fids[cookie->nr_fids].unique = dtype;
- cookie->nr_fids++;
- }
- } else if (cookie->name.len == nlen &&
- memcmp(cookie->name.name, name, nlen) == 0) {
- cookie->fids[1].vnode = ino;
- cookie->fids[1].unique = dtype;
- cookie->found = 1;
- if (cookie->one_only)
- return -1;
+ if (cookie->nr_fids < 50) {
+ cookie->fids[cookie->nr_fids].vnode = ino;
+ cookie->fids[cookie->nr_fids].unique = dtype;
+ cookie->nr_fids++;
}
- ret = cookie->nr_fids >= 50 ? -1 : 0;
- _leave(" = %d", ret);
- return ret;
+ return cookie->nr_fids < 50;
}
/*
@@ -687,8 +677,9 @@ static void afs_do_lookup_success(struct afs_operation *op)
vp = &op->file[0];
abort_code = vp->scb.status.abort_code;
if (abort_code != 0) {
- op->ac.abort_code = abort_code;
- op->error = afs_abort_to_error(abort_code);
+ op->call_abort_code = abort_code;
+ afs_op_set_error(op, afs_abort_to_error(abort_code));
+ op->cumul_error.abort_code = abort_code;
}
break;
@@ -701,6 +692,8 @@ static void afs_do_lookup_success(struct afs_operation *op)
break;
}
+ if (vp->scb.status.abort_code)
+ trace_afs_bulkstat_error(op, &vp->fid, i, vp->scb.status.abort_code);
if (!vp->scb.have_status && !vp->scb.have_error)
continue;
@@ -778,8 +771,7 @@ static bool afs_server_supports_ibulk(struct afs_vnode *dvnode)
* files in one go and create inodes for them. The inode of the file we were
* asked for is returned.
*/
-static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
- struct key *key)
+static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry)
{
struct afs_lookup_cookie *cookie;
struct afs_vnode_param *vp;
@@ -787,6 +779,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct inode *inode = NULL, *ti;
afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
+ bool supports_ibulk, isnew;
long ret;
int i;
@@ -800,22 +793,22 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
cookie->fids[i].vid = dvnode->fid.vid;
cookie->ctx.actor = afs_lookup_filldir;
cookie->name = dentry->d_name;
- cookie->nr_fids = 2; /* slot 0 is saved for the fid we actually want
- * and slot 1 for the directory */
+ cookie->nr_fids = 2; /* slot 1 is saved for the fid we actually want
+ * and slot 0 for the directory */
- if (!afs_server_supports_ibulk(dvnode))
- cookie->one_only = true;
-
- /* search the directory */
- ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
+ /* Search the directory for the named entry using the hash table... */
+ ret = afs_dir_search(dvnode, &dentry->d_name, &cookie->fids[1], &data_version);
if (ret < 0)
goto out;
- dentry->d_fsdata = (void *)(unsigned long)data_version;
+ supports_ibulk = afs_server_supports_ibulk(dvnode);
+ if (supports_ibulk) {
+ /* ...then scan linearly from that point for entries to lookup-ahead. */
+ cookie->ctx.pos = (ret + 1) * AFS_DIR_DIRENT_SIZE;
+ afs_dir_iterate(dir, &cookie->ctx, NULL, &data_version);
+ }
- ret = -ENOENT;
- if (!cookie->found)
- goto out;
+ dentry->d_fsdata = (void *)(unsigned long)data_version;
/* Check to see if we already have an inode for the primary fid. */
inode = ilookup5(dir->i_sb, cookie->fids[1].vnode,
@@ -840,13 +833,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
_debug("nr_files %u", op->nr_files);
/* Need space for examining all the selected files */
- op->error = -ENOMEM;
if (op->nr_files > 2) {
op->more_files = kvcalloc(op->nr_files - 2,
sizeof(struct afs_vnode_param),
GFP_KERNEL);
- if (!op->more_files)
+ if (!op->more_files) {
+ afs_op_nomem(op);
goto out_op;
+ }
for (i = 2; i < op->nr_files; i++) {
vp = &op->more_files[i - 2];
@@ -856,7 +850,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
* callback counters.
*/
ti = ilookup5_nowait(dir->i_sb, vp->fid.vnode,
- afs_ilookup5_test_by_fid, &vp->fid);
+ afs_ilookup5_test_by_fid, &vp->fid, &isnew);
if (!IS_ERR_OR_NULL(ti)) {
vnode = AFS_FS_I(ti);
vp->dv_before = vnode->status.data_version;
@@ -872,14 +866,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
* lookups contained therein are stored in the reply without aborting
* the whole operation.
*/
- op->error = -ENOTSUPP;
- if (!cookie->one_only) {
+ afs_op_set_error(op, -ENOTSUPP);
+ if (supports_ibulk) {
op->ops = &afs_inline_bulk_status_operation;
afs_begin_vnode_operation(op);
afs_wait_for_operation(op);
}
- if (op->error == -ENOTSUPP) {
+ if (afs_op_error(op) == -ENOTSUPP) {
/* We could try FS.BulkStatus next, but this aborts the entire
* op if any of the lookups fails - so, for the moment, revert
* to FS.FetchStatus for op->file[1].
@@ -889,12 +883,16 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
afs_begin_vnode_operation(op);
afs_wait_for_operation(op);
}
- inode = ERR_PTR(op->error);
out_op:
- if (op->error == 0) {
- inode = &op->file[1].vnode->vfs_inode;
- op->file[1].vnode = NULL;
+ if (!afs_op_error(op)) {
+ if (op->file[1].scb.status.abort_code) {
+ afs_op_accumulate_error(op, -ECONNABORTED,
+ op->file[1].scb.status.abort_code);
+ } else {
+ inode = &op->file[1].vnode->netfs.inode;
+ op->file[1].vnode = NULL;
+ }
}
if (op->file[0].scb.have_status)
@@ -911,8 +909,7 @@ out:
/*
* Look up an entry in a directory with @sys substitution.
*/
-static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry,
- struct key *key)
+static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry)
{
struct afs_sysnames *subs;
struct afs_net *net = afs_i2net(dir);
@@ -946,7 +943,7 @@ static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry,
}
strcpy(p, name);
- ret = lookup_one_len(buf, dentry->d_parent, len);
+ ret = lookup_noperm(&QSTR(buf), dentry->d_parent);
if (IS_ERR(ret) || d_is_positive(ret))
goto out_s;
dput(ret);
@@ -960,7 +957,6 @@ out_s:
afs_put_sysnames(subs);
kfree(buf);
out_p:
- key_put(key);
return ret;
}
@@ -974,7 +970,6 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
struct afs_fid fid = {};
struct inode *inode;
struct dentry *d;
- struct key *key;
int ret;
_enter("{%llx:%llu},%p{%pd},",
@@ -992,15 +987,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
return ERR_PTR(-ESTALE);
}
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- _leave(" = %ld [key]", PTR_ERR(key));
- return ERR_CAST(key);
- }
-
- ret = afs_validate(dvnode, key);
+ ret = afs_validate(dvnode, NULL);
if (ret < 0) {
- key_put(key);
+ afs_dir_unuse_cookie(dvnode, ret);
_leave(" = %d [val]", ret);
return ERR_PTR(ret);
}
@@ -1010,15 +999,13 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
dentry->d_name.name[dentry->d_name.len - 3] == 's' &&
dentry->d_name.name[dentry->d_name.len - 2] == 'y' &&
dentry->d_name.name[dentry->d_name.len - 1] == 's')
- return afs_lookup_atsys(dir, dentry, key);
+ return afs_lookup_atsys(dir, dentry);
afs_stat_v(dvnode, n_lookup);
- inode = afs_do_lookup(dir, dentry, key);
- key_put(key);
+ inode = afs_do_lookup(dir, dentry);
if (inode == ERR_PTR(-ENOENT))
- inode = afs_try_auto_mntpt(dentry, dir);
-
- if (!IS_ERR_OR_NULL(inode))
+ inode = NULL;
+ else if (!IS_ERR_OR_NULL(inode))
fid = AFS_FS_I(inode)->fid;
_debug("splice %p", dentry->d_inode);
@@ -1036,21 +1023,12 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
/*
* Check the validity of a dentry under RCU conditions.
*/
-static int afs_d_revalidate_rcu(struct dentry *dentry)
+static int afs_d_revalidate_rcu(struct afs_vnode *dvnode, struct dentry *dentry)
{
- struct afs_vnode *dvnode;
- struct dentry *parent;
- struct inode *dir;
long dir_version, de_version;
_enter("%p", dentry);
- /* Check the parent directory is still valid first. */
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- dvnode = AFS_FS_I(dir);
if (test_bit(AFS_VNODE_DELETED, &dvnode->flags))
return -ECHILD;
@@ -1078,11 +1056,11 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
* - NOTE! the hit can be a negative hit too, so we can't assume we have an
* inode
*/
-static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int afs_d_revalidate(struct inode *parent_dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- struct afs_vnode *vnode, *dir;
+ struct afs_vnode *vnode, *dir = AFS_FS_I(parent_dir);
struct afs_fid fid;
- struct dentry *parent;
struct inode *inode;
struct key *key;
afs_dataversion_t dir_version, invalid_before;
@@ -1090,7 +1068,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
int ret;
if (flags & LOOKUP_RCU)
- return afs_d_revalidate_rcu(dentry);
+ return afs_d_revalidate_rcu(dir, dentry);
if (d_really_is_positive(dentry)) {
vnode = AFS_FS_I(d_inode(dentry));
@@ -1105,12 +1083,12 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (IS_ERR(key))
key = NULL;
- /* Hold the parent dentry so we can peer at it */
- parent = dget_parent(dentry);
- dir = AFS_FS_I(d_inode(parent));
-
/* validate the parent directory */
- afs_validate(dir, key);
+ ret = afs_validate(dir, key);
+ if (ret == -ERESTARTSYS) {
+ key_put(key);
+ return ret;
+ }
if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
_debug("%pd: parent dir deleted", dentry);
@@ -1135,7 +1113,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
afs_stat_v(dir, n_reval);
/* search the directory for this vnode */
- ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version);
+ ret = afs_do_lookup_one(&dir->netfs.inode, name, &fid, &dir_version);
switch (ret) {
case 0:
/* the filename maps to something */
@@ -1166,7 +1144,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
_debug("%pd: file deleted (uq %u -> %u I:%u)",
dentry, fid.unique,
vnode->fid.unique,
- vnode->vfs_inode.i_generation);
+ vnode->netfs.inode.i_generation);
goto not_found;
}
goto out_valid;
@@ -1179,22 +1157,19 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
goto out_valid;
default:
- _debug("failed to iterate dir %pd: %d",
- parent, ret);
+ _debug("failed to iterate parent %pd2: %d", dentry, ret);
goto not_found;
}
out_valid:
dentry->d_fsdata = (void *)(unsigned long)dir_version;
out_valid_noupdate:
- dput(parent);
key_put(key);
_leave(" = 1 [valid]");
return 1;
not_found:
_debug("dropping dentry %pd2", dentry);
- dput(parent);
key_put(key);
_leave(" = 0 [bad]");
@@ -1249,9 +1224,10 @@ void afs_check_for_remote_deletion(struct afs_operation *op)
{
struct afs_vnode *vnode = op->file[0].vnode;
- switch (op->ac.abort_code) {
+ switch (afs_op_abort_code(op)) {
case VNOVNODE:
set_bit(AFS_VNODE_DELETED, &vnode->flags);
+ clear_nlink(&vnode->netfs.inode);
afs_break_callback(vnode, afs_cb_break_for_deleted);
}
}
@@ -1261,26 +1237,31 @@ void afs_check_for_remote_deletion(struct afs_operation *op)
*/
static void afs_vnode_new_inode(struct afs_operation *op)
{
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_vnode_param *vp = &op->file[1];
struct afs_vnode *vnode;
struct inode *inode;
_enter("");
- ASSERTCMP(op->error, ==, 0);
+ ASSERTCMP(afs_op_error(op), ==, 0);
inode = afs_iget(op, vp);
if (IS_ERR(inode)) {
/* ENOMEM or EINTR at a really inconvenient time - just abandon
* the new directory on the server.
*/
- op->error = PTR_ERR(inode);
+ afs_op_accumulate_error(op, PTR_ERR(inode), 0);
return;
}
vnode = AFS_FS_I(inode);
set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
- if (!op->error)
+ if (S_ISDIR(inode->i_mode))
+ afs_mkdir_init_dir(vnode, dvp->vnode);
+ else if (S_ISLNK(inode->i_mode))
+ afs_init_new_symlink(vnode, op);
+ if (!afs_op_error(op))
afs_cache_permit(vnode, op->key, vnode->cb_break, &vp->scb);
d_instantiate(op->dentry, inode);
}
@@ -1296,25 +1277,28 @@ static void afs_create_success(struct afs_operation *op)
static void afs_create_edit_dir(struct afs_operation *op)
{
+ struct netfs_cache_resources cres = {};
struct afs_vnode_param *dvp = &op->file[0];
struct afs_vnode_param *vp = &op->file[1];
struct afs_vnode *dvnode = dvp->vnode;
_enter("op=%08x", op->debug_id);
+ fscache_begin_write_operation(&cres, afs_vnode_cache(dvnode));
down_write(&dvnode->validate_lock);
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
afs_edit_dir_add(dvnode, &op->dentry->d_name, &vp->fid,
op->create.reason);
up_write(&dvnode->validate_lock);
+ fscache_end_operation(&cres);
}
static void afs_create_put(struct afs_operation *op)
{
_enter("op=%08x", op->debug_id);
- if (op->error)
+ if (afs_op_error(op))
d_drop(op->dentry);
}
@@ -1330,11 +1314,12 @@ static const struct afs_operation_ops afs_mkdir_operation = {
/*
* create a directory on an AFS filesystem
*/
-static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
+ int ret;
_enter("{%llx:%llu},{%pd},%ho",
dvnode->fid.vid, dvnode->fid.vnode, dentry, mode);
@@ -1342,9 +1327,11 @@ static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
op = afs_alloc_operation(NULL, dvnode->volume);
if (IS_ERR(op)) {
d_drop(dentry);
- return PTR_ERR(op);
+ return ERR_CAST(op);
}
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
@@ -1352,8 +1339,11 @@ static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
op->dentry = dentry;
op->create.mode = S_IFDIR | mode;
op->create.reason = afs_edit_dir_for_mkdir;
+ op->mtime = current_time(dir);
op->ops = &afs_mkdir_operation;
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ERR_PTR(ret);
}
/*
@@ -1364,10 +1354,10 @@ static void afs_dir_remove_subdir(struct dentry *dentry)
if (d_really_is_positive(dentry)) {
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
- clear_nlink(&vnode->vfs_inode);
+ clear_nlink(&vnode->netfs.inode);
set_bit(AFS_VNODE_DELETED, &vnode->flags);
- clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_clear_cb_promise(vnode, afs_cb_promise_clear_rmdir);
+ afs_invalidate_dir(vnode, afs_dir_invalid_subdir_removed);
}
}
@@ -1381,18 +1371,21 @@ static void afs_rmdir_success(struct afs_operation *op)
static void afs_rmdir_edit_dir(struct afs_operation *op)
{
+ struct netfs_cache_resources cres = {};
struct afs_vnode_param *dvp = &op->file[0];
struct afs_vnode *dvnode = dvp->vnode;
_enter("op=%08x", op->debug_id);
afs_dir_remove_subdir(op->dentry);
+ fscache_begin_write_operation(&cres, afs_vnode_cache(dvnode));
down_write(&dvnode->validate_lock);
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
afs_edit_dir_remove(dvnode, &op->dentry->d_name,
afs_edit_dir_for_rmdir);
up_write(&dvnode->validate_lock);
+ fscache_end_operation(&cres);
}
static void afs_rmdir_put(struct afs_operation *op)
@@ -1427,6 +1420,8 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
if (IS_ERR(op))
return PTR_ERR(op);
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
@@ -1450,10 +1445,18 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
op->file[1].vnode = vnode;
}
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+
+ /* Not all systems that can host afs servers have ENOTEMPTY. */
+ if (ret == -EEXIST)
+ ret = -ENOTEMPTY;
+out:
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
error:
- return afs_put_operation(op);
+ ret = afs_put_operation(op);
+ goto out;
}
/*
@@ -1473,7 +1476,7 @@ static void afs_dir_remove_link(struct afs_operation *op)
struct dentry *dentry = op->dentry;
int ret;
- if (op->error != 0 ||
+ if (afs_op_error(op) ||
(op->file[1].scb.have_status && op->file[1].scb.have_error))
return;
if (d_really_is_positive(dentry))
@@ -1483,8 +1486,8 @@ static void afs_dir_remove_link(struct afs_operation *op)
/* Already done */
} else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
write_seqlock(&vnode->cb_lock);
- drop_nlink(&vnode->vfs_inode);
- if (vnode->vfs_inode.i_nlink == 0) {
+ drop_nlink(&vnode->netfs.inode);
+ if (vnode->netfs.inode.i_nlink == 0) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
__afs_break_callback(vnode, afs_cb_break_for_unlink);
}
@@ -1497,10 +1500,10 @@ static void afs_dir_remove_link(struct afs_operation *op)
ret = afs_validate(vnode, op->key);
if (ret != -ESTALE)
- op->error = ret;
+ afs_op_set_error(op, ret);
}
- _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, op->error);
+ _debug("nlink %d [val %d]", vnode->netfs.inode.i_nlink, afs_op_error(op));
}
static void afs_unlink_success(struct afs_operation *op)
@@ -1516,22 +1519,25 @@ static void afs_unlink_success(struct afs_operation *op)
static void afs_unlink_edit_dir(struct afs_operation *op)
{
+ struct netfs_cache_resources cres = {};
struct afs_vnode_param *dvp = &op->file[0];
struct afs_vnode *dvnode = dvp->vnode;
_enter("op=%08x", op->debug_id);
+ fscache_begin_write_operation(&cres, afs_vnode_cache(dvnode));
down_write(&dvnode->validate_lock);
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
afs_edit_dir_remove(dvnode, &op->dentry->d_name,
afs_edit_dir_for_unlink);
up_write(&dvnode->validate_lock);
+ fscache_end_operation(&cres);
}
static void afs_unlink_put(struct afs_operation *op)
{
_enter("op=%08x", op->debug_id);
- if (op->unlink.need_rehash && op->error < 0 && op->error != -ENOENT)
+ if (op->unlink.need_rehash && afs_op_error(op) < 0 && afs_op_error(op) != -ENOENT)
d_rehash(op->dentry);
}
@@ -1564,6 +1570,8 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
if (IS_ERR(op))
return PTR_ERR(op);
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
@@ -1572,7 +1580,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
/* Try to make sure we have a callback promise on the victim. */
ret = afs_validate(vnode, op->key);
if (ret < 0) {
- op->error = ret;
+ afs_op_set_error(op, ret);
goto error;
}
@@ -1581,7 +1589,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
spin_unlock(&dentry->d_lock);
/* Start asynchronous writeout of the inode */
write_inode_now(d_inode(dentry), 0);
- op->error = afs_sillyrename(dvnode, vnode, dentry, op->key);
+ afs_op_set_error(op, afs_sillyrename(dvnode, vnode, dentry, op->key));
goto error;
}
if (!d_unhashed(dentry)) {
@@ -1602,7 +1610,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
/* If there was a conflict with a third party, check the status of the
* unlinked vnode.
*/
- if (op->error == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) {
+ if (afs_op_error(op) == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) {
op->file[1].update_ctime = false;
op->fetch_status.which = 1;
op->ops = &afs_fetch_status_operation;
@@ -1610,10 +1618,10 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
afs_wait_for_operation(op);
}
- return afs_put_operation(op);
-
error:
- return afs_put_operation(op);
+ ret = afs_put_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
}
static const struct afs_operation_ops afs_create_operation = {
@@ -1628,7 +1636,7 @@ static const struct afs_operation_ops afs_create_operation = {
/*
* create a regular file on an AFS filesystem
*/
-static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int afs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct afs_operation *op;
@@ -1647,6 +1655,8 @@ static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
goto error;
}
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
@@ -1655,8 +1665,11 @@ static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
op->dentry = dentry;
op->create.mode = S_IFREG | mode;
op->create.reason = afs_edit_dir_for_create;
+ op->mtime = current_time(dir);
op->ops = &afs_create_operation;
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
error:
d_drop(dentry);
@@ -1676,14 +1689,14 @@ static void afs_link_success(struct afs_operation *op)
afs_update_dentry_version(op, dvp, op->dentry);
if (op->dentry_2->d_parent == op->dentry->d_parent)
afs_update_dentry_version(op, dvp, op->dentry_2);
- ihold(&vp->vnode->vfs_inode);
- d_instantiate(op->dentry, &vp->vnode->vfs_inode);
+ ihold(&vp->vnode->netfs.inode);
+ d_instantiate(op->dentry, &vp->vnode->netfs.inode);
}
static void afs_link_put(struct afs_operation *op)
{
_enter("op=%08x", op->debug_id);
- if (op->error)
+ if (afs_op_error(op))
d_drop(op->dentry);
}
@@ -1721,6 +1734,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
goto error;
}
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
ret = afs_validate(vnode, op->key);
if (ret < 0)
goto error_op;
@@ -1736,10 +1751,13 @@ static int afs_link(struct dentry *from, struct inode *dir,
op->dentry_2 = from;
op->ops = &afs_link_operation;
op->create.reason = afs_edit_dir_for_link;
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
error_op:
afs_put_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
error:
d_drop(dentry);
_leave(" = %d", ret);
@@ -1758,7 +1776,7 @@ static const struct afs_operation_ops afs_symlink_operation = {
/*
* create a symlink in an AFS filesystem
*/
-static int afs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *content)
{
struct afs_operation *op;
@@ -1783,6 +1801,8 @@ static int afs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
goto error;
}
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
@@ -1790,7 +1810,10 @@ static int afs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
op->ops = &afs_symlink_operation;
op->create.reason = afs_edit_dir_for_symlink;
op->create.symlink = content;
- return afs_do_sync_operation(op);
+ op->mtime = current_time(dir);
+ ret = afs_do_sync_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
error:
d_drop(dentry);
@@ -1800,6 +1823,9 @@ error:
static void afs_rename_success(struct afs_operation *op)
{
+ struct afs_vnode *vnode = op->more_files[0].vnode;
+ struct afs_vnode *new_vnode = op->more_files[1].vnode;
+
_enter("op=%08x", op->debug_id);
op->ctime = op->file[0].scb.status.mtime_client;
@@ -1809,10 +1835,46 @@ static void afs_rename_success(struct afs_operation *op)
op->ctime = op->file[1].scb.status.mtime_client;
afs_vnode_commit_status(op, &op->file[1]);
}
+ if (op->more_files[0].scb.have_status)
+ afs_vnode_commit_status(op, &op->more_files[0]);
+ if (op->more_files[1].scb.have_status)
+ afs_vnode_commit_status(op, &op->more_files[1]);
+
+ /* If we're moving a subdir between dirs, we need to update
+ * its DV counter too as the ".." will be altered.
+ */
+ if (op->file[0].vnode != op->file[1].vnode) {
+ if (S_ISDIR(vnode->netfs.inode.i_mode)) {
+ u64 new_dv;
+
+ write_seqlock(&vnode->cb_lock);
+
+ new_dv = vnode->status.data_version + 1;
+ trace_afs_set_dv(vnode, new_dv);
+ vnode->status.data_version = new_dv;
+ inode_set_iversion_raw(&vnode->netfs.inode, new_dv);
+
+ write_sequnlock(&vnode->cb_lock);
+ }
+
+ if ((op->rename.rename_flags & RENAME_EXCHANGE) &&
+ S_ISDIR(new_vnode->netfs.inode.i_mode)) {
+ u64 new_dv;
+
+ write_seqlock(&new_vnode->cb_lock);
+
+ new_dv = new_vnode->status.data_version + 1;
+ new_vnode->status.data_version = new_dv;
+ inode_set_iversion_raw(&new_vnode->netfs.inode, new_dv);
+
+ write_sequnlock(&new_vnode->cb_lock);
+ }
+ }
}
static void afs_rename_edit_dir(struct afs_operation *op)
{
+ struct netfs_cache_resources orig_cres = {}, new_cres = {};
struct afs_vnode_param *orig_dvp = &op->file[0];
struct afs_vnode_param *new_dvp = &op->file[1];
struct afs_vnode *orig_dvnode = orig_dvp->vnode;
@@ -1829,6 +1891,10 @@ static void afs_rename_edit_dir(struct afs_operation *op)
op->rename.rehash = NULL;
}
+ fscache_begin_write_operation(&orig_cres, afs_vnode_cache(orig_dvnode));
+ if (new_dvnode != orig_dvnode)
+ fscache_begin_write_operation(&new_cres, afs_vnode_cache(new_dvnode));
+
down_write(&orig_dvnode->validate_lock);
if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
orig_dvnode->status.data_version == orig_dvp->dv_before + orig_dvp->dv_delta)
@@ -1850,6 +1916,12 @@ static void afs_rename_edit_dir(struct afs_operation *op)
&vnode->fid, afs_edit_dir_for_rename_2);
}
+ if (S_ISDIR(vnode->netfs.inode.i_mode) &&
+ new_dvnode != orig_dvnode &&
+ test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
+ afs_edit_dir_update(vnode, &dotdot_name, new_dvnode,
+ afs_edit_dir_for_rename_sub);
+
new_inode = d_inode(new_dentry);
if (new_inode) {
spin_lock(&new_inode->i_lock);
@@ -1862,9 +1934,6 @@ static void afs_rename_edit_dir(struct afs_operation *op)
/* Now we can update d_fsdata on the dentries to reflect their
* new parent's data_version.
- *
- * Note that if we ever implement RENAME_EXCHANGE, we'll have
- * to update both dentries with opposing dir versions.
*/
afs_update_dentry_version(op, new_dvp, op->dentry);
afs_update_dentry_version(op, new_dvp, op->dentry_2);
@@ -1872,6 +1941,70 @@ static void afs_rename_edit_dir(struct afs_operation *op)
d_move(old_dentry, new_dentry);
up_write(&new_dvnode->validate_lock);
+ fscache_end_operation(&orig_cres);
+ if (new_dvnode != orig_dvnode)
+ fscache_end_operation(&new_cres);
+}
+
+static void afs_rename_exchange_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ struct afs_vnode *orig_dvnode = orig_dvp->vnode;
+ struct afs_vnode *new_dvnode = new_dvp->vnode;
+ struct afs_vnode *old_vnode = op->more_files[0].vnode;
+ struct afs_vnode *new_vnode = op->more_files[1].vnode;
+ struct dentry *old_dentry = op->dentry;
+ struct dentry *new_dentry = op->dentry_2;
+
+ _enter("op=%08x", op->debug_id);
+
+ if (new_dvnode == orig_dvnode) {
+ down_write(&orig_dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
+ orig_dvnode->status.data_version == orig_dvp->dv_before + orig_dvp->dv_delta) {
+ afs_edit_dir_update(orig_dvnode, &old_dentry->d_name,
+ new_vnode, afs_edit_dir_for_rename_0);
+ afs_edit_dir_update(orig_dvnode, &new_dentry->d_name,
+ old_vnode, afs_edit_dir_for_rename_1);
+ }
+
+ d_exchange(old_dentry, new_dentry);
+ up_write(&orig_dvnode->validate_lock);
+ } else {
+ down_write(&orig_dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
+ orig_dvnode->status.data_version == orig_dvp->dv_before + orig_dvp->dv_delta)
+ afs_edit_dir_update(orig_dvnode, &old_dentry->d_name,
+ new_vnode, afs_edit_dir_for_rename_0);
+
+ up_write(&orig_dvnode->validate_lock);
+ down_write(&new_dvnode->validate_lock);
+
+ if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) &&
+ new_dvnode->status.data_version == new_dvp->dv_before + new_dvp->dv_delta)
+ afs_edit_dir_update(new_dvnode, &new_dentry->d_name,
+ old_vnode, afs_edit_dir_for_rename_1);
+
+ if (S_ISDIR(old_vnode->netfs.inode.i_mode) &&
+ test_bit(AFS_VNODE_DIR_VALID, &old_vnode->flags))
+ afs_edit_dir_update(old_vnode, &dotdot_name, new_dvnode,
+ afs_edit_dir_for_rename_sub);
+
+ if (S_ISDIR(new_vnode->netfs.inode.i_mode) &&
+ test_bit(AFS_VNODE_DIR_VALID, &new_vnode->flags))
+ afs_edit_dir_update(new_vnode, &dotdot_name, orig_dvnode,
+ afs_edit_dir_for_rename_sub);
+
+ /* Now we can update d_fsdata on the dentries to reflect their
+ * new parents' data_version.
+ */
+ afs_update_dentry_version(op, new_dvp, old_dentry);
+ afs_update_dentry_version(op, orig_dvp, new_dentry);
+
+ d_exchange(old_dentry, new_dentry);
+ up_write(&new_dvnode->validate_lock);
+ }
}
static void afs_rename_put(struct afs_operation *op)
@@ -1880,7 +2013,7 @@ static void afs_rename_put(struct afs_operation *op)
if (op->rename.rehash)
d_rehash(op->rename.rehash);
dput(op->rename.tmp);
- if (op->error)
+ if (afs_op_error(op))
d_rehash(op->dentry);
}
@@ -1892,18 +2025,44 @@ static const struct afs_operation_ops afs_rename_operation = {
.put = afs_rename_put,
};
+#if 0 /* Autoswitched in yfs_fs_rename_replace(). */
+static const struct afs_operation_ops afs_rename_replace_operation = {
+ .issue_afs_rpc = NULL,
+ .issue_yfs_rpc = yfs_fs_rename_replace,
+ .success = afs_rename_success,
+ .edit_dir = afs_rename_edit_dir,
+ .put = afs_rename_put,
+};
+#endif
+
+static const struct afs_operation_ops afs_rename_noreplace_operation = {
+ .issue_afs_rpc = NULL,
+ .issue_yfs_rpc = yfs_fs_rename_noreplace,
+ .success = afs_rename_success,
+ .edit_dir = afs_rename_edit_dir,
+ .put = afs_rename_put,
+};
+
+static const struct afs_operation_ops afs_rename_exchange_operation = {
+ .issue_afs_rpc = NULL,
+ .issue_yfs_rpc = yfs_fs_rename_exchange,
+ .success = afs_rename_success,
+ .edit_dir = afs_rename_exchange_edit_dir,
+ .put = afs_rename_put,
+};
+
/*
* rename a file in an AFS filesystem and/or move it between directories
*/
-static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
struct afs_operation *op;
- struct afs_vnode *orig_dvnode, *new_dvnode, *vnode;
+ struct afs_vnode *orig_dvnode, *new_dvnode, *vnode, *new_vnode = NULL;
int ret;
- if (flags)
+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
return -EINVAL;
/* Don't allow silly-rename files be moved around. */
@@ -1913,6 +2072,8 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
vnode = AFS_FS_I(d_inode(old_dentry));
orig_dvnode = AFS_FS_I(old_dir);
new_dvnode = AFS_FS_I(new_dir);
+ if (d_is_positive(new_dentry))
+ new_vnode = AFS_FS_I(d_inode(new_dentry));
_enter("{%llx:%llu},{%llx:%llu},{%llx:%llu},{%pd}",
orig_dvnode->fid.vid, orig_dvnode->fid.vnode,
@@ -1924,11 +2085,20 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
if (IS_ERR(op))
return PTR_ERR(op);
+ fscache_use_cookie(afs_vnode_cache(orig_dvnode), true);
+ if (new_dvnode != orig_dvnode)
+ fscache_use_cookie(afs_vnode_cache(new_dvnode), true);
+
ret = afs_validate(vnode, op->key);
- op->error = ret;
+ afs_op_set_error(op, ret);
if (ret < 0)
goto error;
+ ret = -ENOMEM;
+ op->more_files = kvcalloc(2, sizeof(struct afs_vnode_param), GFP_KERNEL);
+ if (!op->more_files)
+ goto error;
+
afs_op_set_vnode(op, 0, orig_dvnode);
afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
op->file[0].dv_delta = 1;
@@ -1937,46 +2107,63 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
op->file[1].modification = true;
op->file[0].update_ctime = true;
op->file[1].update_ctime = true;
+ op->more_files[0].vnode = vnode;
+ op->more_files[0].speculative = true;
+ op->more_files[1].vnode = new_vnode;
+ op->more_files[1].speculative = true;
+ op->nr_files = 4;
op->dentry = old_dentry;
op->dentry_2 = new_dentry;
+ op->rename.rename_flags = flags;
op->rename.new_negative = d_is_negative(new_dentry);
- op->ops = &afs_rename_operation;
- /* For non-directories, check whether the target is busy and if so,
- * make a copy of the dentry and then do a silly-rename. If the
- * silly-rename succeeds, the copied dentry is hashed and becomes the
- * new target.
- */
- if (d_is_positive(new_dentry) && !d_is_dir(new_dentry)) {
- /* To prevent any new references to the target during the
- * rename, we unhash the dentry in advance.
+ if (flags & RENAME_NOREPLACE) {
+ op->ops = &afs_rename_noreplace_operation;
+ } else if (flags & RENAME_EXCHANGE) {
+ op->ops = &afs_rename_exchange_operation;
+ d_drop(new_dentry);
+ } else {
+ /* If we might displace the target, we might need to do silly
+ * rename.
*/
- if (!d_unhashed(new_dentry)) {
- d_drop(new_dentry);
- op->rename.rehash = new_dentry;
- }
+ op->ops = &afs_rename_operation;
- if (d_count(new_dentry) > 2) {
- /* copy the target dentry's name */
- op->rename.tmp = d_alloc(new_dentry->d_parent,
- &new_dentry->d_name);
- if (!op->rename.tmp) {
- op->error = -ENOMEM;
- goto error;
+ /* For non-directories, check whether the target is busy and if
+ * so, make a copy of the dentry and then do a silly-rename.
+ * If the silly-rename succeeds, the copied dentry is hashed
+ * and becomes the new target.
+ */
+ if (d_is_positive(new_dentry) && !d_is_dir(new_dentry)) {
+ /* To prevent any new references to the target during
+ * the rename, we unhash the dentry in advance.
+ */
+ if (!d_unhashed(new_dentry)) {
+ d_drop(new_dentry);
+ op->rename.rehash = new_dentry;
}
- ret = afs_sillyrename(new_dvnode,
- AFS_FS_I(d_inode(new_dentry)),
- new_dentry, op->key);
- if (ret) {
- op->error = ret;
- goto error;
+ if (d_count(new_dentry) > 2) {
+ /* copy the target dentry's name */
+ op->rename.tmp = d_alloc(new_dentry->d_parent,
+ &new_dentry->d_name);
+ if (!op->rename.tmp) {
+ afs_op_nomem(op);
+ goto error;
+ }
+
+ ret = afs_sillyrename(new_dvnode,
+ AFS_FS_I(d_inode(new_dentry)),
+ new_dentry, op->key);
+ if (ret) {
+ afs_op_set_error(op, ret);
+ goto error;
+ }
+
+ op->dentry_2 = op->rename.tmp;
+ op->rename.rehash = NULL;
+ op->rename.new_negative = true;
}
-
- op->dentry_2 = op->rename.tmp;
- op->rename.rehash = NULL;
- op->rename.new_negative = true;
}
}
@@ -1991,49 +2178,45 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
*/
d_drop(old_dentry);
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ if (ret == -ENOTSUPP)
+ ret = -EINVAL;
+out:
+ afs_dir_unuse_cookie(orig_dvnode, ret);
+ if (new_dvnode != orig_dvnode)
+ afs_dir_unuse_cookie(new_dvnode, ret);
+ return ret;
error:
- return afs_put_operation(op);
-}
-
-/*
- * Release a directory folio and clean up its private state if it's not busy
- * - return true if the folio can now be released, false if not
- */
-static int afs_dir_releasepage(struct page *subpage, gfp_t gfp_flags)
-{
- struct folio *folio = page_folio(subpage);
- struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
-
- _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio_index(folio));
-
- folio_detach_private(folio);
-
- /* The directory will need reloading. */
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- afs_stat_v(dvnode, n_relpg);
- return true;
+ ret = afs_put_operation(op);
+ goto out;
}
/*
- * Invalidate part or all of a folio.
+ * Write the file contents to the cache as a single blob.
*/
-static void afs_dir_invalidatepage(struct page *subpage, unsigned int offset,
- unsigned int length)
+int afs_single_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- struct folio *folio = page_folio(subpage);
- struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
-
- _enter("{%lu},%u,%u", folio_index(folio), offset, length);
-
- BUG_ON(!folio_test_locked(folio));
+ struct afs_vnode *dvnode = AFS_FS_I(mapping->host);
+ struct iov_iter iter;
+ bool is_dir = (S_ISDIR(dvnode->netfs.inode.i_mode) &&
+ !test_bit(AFS_VNODE_MOUNTPOINT, &dvnode->flags));
+ int ret = 0;
- /* The directory will need reloading. */
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- afs_stat_v(dvnode, n_inval);
+ /* Need to lock to prevent the folio queue and folios from being thrown
+ * away.
+ */
+ down_read(&dvnode->validate_lock);
+
+ if (is_dir ?
+ test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) :
+ atomic64_read(&dvnode->cb_expires_at) != AFS_NO_CB_PROMISE) {
+ iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0,
+ i_size_read(&dvnode->netfs.inode));
+ ret = netfs_writeback_single(mapping, wbc, &iter);
+ }
- /* we clean up only if the entire folio is being invalidated */
- if (offset == 0 && length == folio_size(folio))
- folio_detach_private(folio);
+ up_read(&dvnode->validate_lock);
+ return ret;
}
diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
index d98e109ecee9..fd3aa9f97ce6 100644
--- a/fs/afs/dir_edit.c
+++ b/fs/afs/dir_edit.c
@@ -10,6 +10,7 @@
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/iversion.h>
+#include <linux/folio_queue.h>
#include "internal.h"
#include "xdr_fs.h"
@@ -105,31 +106,66 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
}
/*
- * Get a new directory folio.
+ * Get a specific block, extending the directory storage to cover it as needed.
*/
-static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index)
+static union afs_xdr_dir_block *afs_dir_get_block(struct afs_dir_iter *iter, size_t block)
{
- struct address_space *mapping = vnode->vfs_inode.i_mapping;
+ struct folio_queue *fq;
+ struct afs_vnode *dvnode = iter->dvnode;
struct folio *folio;
+ size_t blpos = block * AFS_DIR_BLOCK_SIZE;
+ size_t blend = (block + 1) * AFS_DIR_BLOCK_SIZE, fpos = iter->fpos;
+ int ret;
+
+ if (dvnode->directory_size < blend) {
+ size_t cur_size = dvnode->directory_size;
+
+ ret = netfs_alloc_folioq_buffer(
+ NULL, &dvnode->directory, &cur_size, blend,
+ mapping_gfp_mask(dvnode->netfs.inode.i_mapping));
+ dvnode->directory_size = cur_size;
+ if (ret < 0)
+ goto fail;
+ }
- folio = __filemap_get_folio(mapping, index,
- FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
- mapping->gfp_mask);
- if (!folio)
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- else if (folio && !folio_test_private(folio))
- folio_attach_private(folio, (void *)1);
+ fq = iter->fq;
+ if (!fq)
+ fq = dvnode->directory;
+
+ /* Search the folio queue for the folio containing the block... */
+ for (; fq; fq = fq->next) {
+ for (int s = iter->fq_slot; s < folioq_count(fq); s++) {
+ size_t fsize = folioq_folio_size(fq, s);
+
+ if (blend <= fpos + fsize) {
+ /* ... and then return the mapped block. */
+ folio = folioq_folio(fq, s);
+ if (WARN_ON_ONCE(folio_pos(folio) != fpos))
+ goto fail;
+ iter->fq = fq;
+ iter->fq_slot = s;
+ iter->fpos = fpos;
+ return kmap_local_folio(folio, blpos - fpos);
+ }
+ fpos += fsize;
+ }
+ iter->fq_slot = 0;
+ }
- return folio;
+fail:
+ iter->fq = NULL;
+ iter->fq_slot = 0;
+ afs_invalidate_dir(dvnode, afs_dir_invalid_edit_get_block);
+ return NULL;
}
/*
* Scan a directory block looking for a dirent of the right name.
*/
-static int afs_dir_scan_block(union afs_xdr_dir_block *block, struct qstr *name,
+static int afs_dir_scan_block(const union afs_xdr_dir_block *block, const struct qstr *name,
unsigned int blocknum)
{
- union afs_xdr_dirent *de;
+ const union afs_xdr_dirent *de;
u64 bitmap;
int d, len, n;
@@ -203,36 +239,32 @@ static void afs_edit_init_block(union afs_xdr_dir_block *meta,
* The caller must hold the inode locked.
*/
void afs_edit_dir_add(struct afs_vnode *vnode,
- struct qstr *name, struct afs_fid *new_fid,
+ const struct qstr *name, struct afs_fid *new_fid,
enum afs_edit_dir_reason why)
{
union afs_xdr_dir_block *meta, *block;
union afs_xdr_dirent *de;
- struct folio *folio0, *folio;
- unsigned int need_slots, nr_blocks, b;
- pgoff_t index;
+ struct afs_dir_iter iter = { .dvnode = vnode };
+ unsigned int nr_blocks, b, entry;
loff_t i_size;
int slot;
_enter(",,{%d,%s},", name->len, name->name);
- i_size = i_size_read(&vnode->vfs_inode);
+ i_size = i_size_read(&vnode->netfs.inode);
if (i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
(i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_add_bad_size);
return;
}
- folio0 = afs_dir_get_folio(vnode, 0);
- if (!folio0) {
- _leave(" [fgp]");
+ meta = afs_dir_get_block(&iter, 0);
+ if (!meta)
return;
- }
/* Work out how many slots we're going to need. */
- need_slots = afs_dir_calc_slots(name->len);
+ iter.nr_slots = afs_dir_calc_slots(name->len);
- meta = kmap_local_folio(folio0, 0);
if (i_size == 0)
goto new_directory;
nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
@@ -244,22 +276,21 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
/* If the directory extended into a new folio, then we need to
* tack a new folio on the end.
*/
- index = b / AFS_DIR_BLOCKS_PER_PAGE;
if (nr_blocks >= AFS_DIR_MAX_BLOCKS)
- goto error;
- if (index >= folio_nr_pages(folio0)) {
- folio = afs_dir_get_folio(vnode, index);
- if (!folio)
- goto error;
- } else {
- folio = folio0;
- }
+ goto error_too_many_blocks;
- block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_file_pos(folio));
+ /* Lower dir blocks have a counter in the header we can check. */
+ if (b < AFS_DIR_BLOCKS_WITH_CTR &&
+ meta->meta.alloc_ctrs[b] < iter.nr_slots)
+ continue;
+
+ block = afs_dir_get_block(&iter, b);
+ if (!block)
+ goto error;
/* Abandon the edit if we got a callback break. */
if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- goto invalidated;
+ goto already_invalidated;
_debug("block %u: %2u %3u %u",
b,
@@ -274,31 +305,23 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
}
- /* Only lower dir blocks have a counter in the header. */
- if (b >= AFS_DIR_BLOCKS_WITH_CTR ||
- meta->meta.alloc_ctrs[b] >= need_slots) {
- /* We need to try and find one or more consecutive
- * slots to hold the entry.
- */
- slot = afs_find_contig_bits(block, need_slots);
- if (slot >= 0) {
- _debug("slot %u", slot);
- goto found_space;
- }
+ /* We need to try and find one or more consecutive slots to
+ * hold the entry.
+ */
+ slot = afs_find_contig_bits(block, iter.nr_slots);
+ if (slot >= 0) {
+ _debug("slot %u", slot);
+ goto found_space;
}
kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
}
/* There are no spare slots of sufficient size, yet the operation
* succeeded. Download the directory again.
*/
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_nospc, 0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_add_no_slots);
goto out_unmap;
new_directory:
@@ -306,8 +329,7 @@ new_directory:
i_size = AFS_DIR_BLOCK_SIZE;
afs_set_i_size(vnode, i_size);
slot = AFS_DIR_RESV_BLOCKS0;
- folio = folio0;
- block = kmap_local_folio(folio, 0);
+ block = afs_dir_get_block(&iter, 0);
nr_blocks = 1;
b = 0;
@@ -325,41 +347,39 @@ found_space:
de->u.name[name->len] = 0;
/* Adjust the bitmap. */
- afs_set_contig_bits(block, slot, need_slots);
- kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
+ afs_set_contig_bits(block, slot, iter.nr_slots);
/* Adjust the allocation counter. */
if (b < AFS_DIR_BLOCKS_WITH_CTR)
- meta->meta.alloc_ctrs[b] -= need_slots;
+ meta->meta.alloc_ctrs[b] -= iter.nr_slots;
- inode_inc_iversion_raw(&vnode->vfs_inode);
+ /* Adjust the hash chain. */
+ entry = b * AFS_DIR_SLOTS_PER_BLOCK + slot;
+ iter.bucket = afs_dir_hash_name(name);
+ de->u.hash_next = meta->meta.hashtable[iter.bucket];
+ meta->meta.hashtable[iter.bucket] = htons(entry);
+ kunmap_local(block);
+
+ inode_inc_iversion_raw(&vnode->netfs.inode);
afs_stat_v(vnode, n_dir_cr);
_debug("Insert %s in %u[%u]", name->name, b, slot);
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
+
out_unmap:
kunmap_local(meta);
- folio_unlock(folio0);
- folio_put(folio0);
_leave("");
return;
-invalidated:
+already_invalidated:
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_inval, 0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
goto out_unmap;
+error_too_many_blocks:
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_add_too_many_blocks);
error:
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_error, 0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out_unmap;
}
@@ -371,123 +391,258 @@ error:
* The caller must hold the inode locked.
*/
void afs_edit_dir_remove(struct afs_vnode *vnode,
- struct qstr *name, enum afs_edit_dir_reason why)
+ const struct qstr *name, enum afs_edit_dir_reason why)
{
- union afs_xdr_dir_block *meta, *block;
- union afs_xdr_dirent *de;
- struct folio *folio0, *folio;
- unsigned int need_slots, nr_blocks, b;
- pgoff_t index;
+ union afs_xdr_dir_block *meta, *block, *pblock;
+ union afs_xdr_dirent *de, *pde;
+ struct afs_dir_iter iter = { .dvnode = vnode };
+ struct afs_fid fid;
+ unsigned int b, slot, entry;
loff_t i_size;
- int slot;
+ __be16 next;
+ int found;
_enter(",,{%d,%s},", name->len, name->name);
- i_size = i_size_read(&vnode->vfs_inode);
+ i_size = i_size_read(&vnode->netfs.inode);
if (i_size < AFS_DIR_BLOCK_SIZE ||
i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
(i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_rem_bad_size);
return;
}
- nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
- folio0 = afs_dir_get_folio(vnode, 0);
- if (!folio0) {
- _leave(" [fgp]");
+ if (!afs_dir_init_iter(&iter, name))
return;
- }
-
- /* Work out how many slots we're going to discard. */
- need_slots = afs_dir_calc_slots(name->len);
-
- meta = kmap_local_folio(folio0, 0);
-
- /* Find a block that has sufficient slots available. Each folio
- * contains two or more directory blocks.
- */
- for (b = 0; b < nr_blocks; b++) {
- index = b / AFS_DIR_BLOCKS_PER_PAGE;
- if (index >= folio_nr_pages(folio0)) {
- folio = afs_dir_get_folio(vnode, index);
- if (!folio)
- goto error;
- } else {
- folio = folio0;
- }
-
- block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_file_pos(folio));
-
- /* Abandon the edit if we got a callback break. */
- if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- goto invalidated;
- if (b > AFS_DIR_BLOCKS_WITH_CTR ||
- meta->meta.alloc_ctrs[b] <= AFS_DIR_SLOTS_PER_BLOCK - 1 - need_slots) {
- slot = afs_dir_scan_block(block, name, b);
- if (slot >= 0)
- goto found_dirent;
- }
+ meta = afs_dir_find_block(&iter, 0);
+ if (!meta)
+ return;
- kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
+ /* Find the entry in the blob. */
+ found = afs_dir_search_bucket(&iter, name, &fid);
+ if (found < 0) {
+ /* Didn't find the dirent to clobber. Re-download. */
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_noent,
+ 0, 0, 0, 0, name->name);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_rem_wrong_name);
+ goto out_unmap;
}
- /* Didn't find the dirent to clobber. Download the directory again. */
- trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_noent,
- 0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- goto out_unmap;
+ entry = found;
+ b = entry / AFS_DIR_SLOTS_PER_BLOCK;
+ slot = entry % AFS_DIR_SLOTS_PER_BLOCK;
-found_dirent:
+ block = afs_dir_find_block(&iter, b);
+ if (!block)
+ goto error;
+ if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
+ goto already_invalidated;
+
+ /* Check and clear the entry. */
de = &block->dirents[slot];
+ if (de->u.valid != 1)
+ goto error_unmap;
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete, b, slot,
ntohl(de->u.vnode), ntohl(de->u.unique),
name->name);
- memset(de, 0, sizeof(*de) * need_slots);
-
/* Adjust the bitmap. */
- afs_clear_contig_bits(block, slot, need_slots);
- kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
+ afs_clear_contig_bits(block, slot, iter.nr_slots);
/* Adjust the allocation counter. */
if (b < AFS_DIR_BLOCKS_WITH_CTR)
- meta->meta.alloc_ctrs[b] += need_slots;
+ meta->meta.alloc_ctrs[b] += iter.nr_slots;
+
+ /* Clear the constituent entries. */
+ next = de->u.hash_next;
+ memset(de, 0, sizeof(*de) * iter.nr_slots);
+ kunmap_local(block);
+
+ /* Adjust the hash chain: if iter->prev_entry is 0, the hashtable head
+ * index is previous; otherwise it's slot number of the previous entry.
+ */
+ if (!iter.prev_entry) {
+ __be16 prev_next = meta->meta.hashtable[iter.bucket];
+
+ if (unlikely(prev_next != htons(entry))) {
+ pr_warn("%llx:%llx:%x: not head of chain b=%x p=%x,%x e=%x %*s",
+ vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique,
+ iter.bucket, iter.prev_entry, prev_next, entry,
+ name->len, name->name);
+ goto error;
+ }
+ meta->meta.hashtable[iter.bucket] = next;
+ } else {
+ unsigned int pb = iter.prev_entry / AFS_DIR_SLOTS_PER_BLOCK;
+ unsigned int ps = iter.prev_entry % AFS_DIR_SLOTS_PER_BLOCK;
+ __be16 prev_next;
+
+ pblock = afs_dir_find_block(&iter, pb);
+ if (!pblock)
+ goto error;
+ pde = &pblock->dirents[ps];
+ prev_next = pde->u.hash_next;
+ if (prev_next != htons(entry)) {
+ kunmap_local(pblock);
+ pr_warn("%llx:%llx:%x: not prev in chain b=%x p=%x,%x e=%x %*s",
+ vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique,
+ iter.bucket, iter.prev_entry, prev_next, entry,
+ name->len, name->name);
+ goto error;
+ }
+ pde->u.hash_next = next;
+ kunmap_local(pblock);
+ }
+
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
- inode_set_iversion_raw(&vnode->vfs_inode, vnode->status.data_version);
+ inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version);
afs_stat_v(vnode, n_dir_rm);
_debug("Remove %s from %u[%u]", name->name, b, slot);
out_unmap:
kunmap_local(meta);
- folio_unlock(folio0);
- folio_put(folio0);
_leave("");
return;
-invalidated:
+already_invalidated:
+ kunmap_local(block);
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_inval,
0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
goto out_unmap;
+error_unmap:
+ kunmap_local(block);
error:
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_error,
0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out_unmap;
}
+
+/*
+ * Edit an entry in a directory to update the vnode it refers to. This is also
+ * used to update the ".." entry in a directory.
+ */
+void afs_edit_dir_update(struct afs_vnode *vnode, const struct qstr *name,
+ struct afs_vnode *new_dvnode, enum afs_edit_dir_reason why)
+{
+ union afs_xdr_dir_block *block;
+ union afs_xdr_dirent *de;
+ struct afs_dir_iter iter = { .dvnode = vnode };
+ unsigned int nr_blocks, b;
+ loff_t i_size;
+ int slot;
+
+ _enter("");
+
+ i_size = i_size_read(&vnode->netfs.inode);
+ if (i_size < AFS_DIR_BLOCK_SIZE) {
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_upd_bad_size);
+ return;
+ }
+
+ nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
+
+ /* Find a block that has sufficient slots available. Each folio
+ * contains two or more directory blocks.
+ */
+ for (b = 0; b < nr_blocks; b++) {
+ block = afs_dir_get_block(&iter, b);
+ if (!block)
+ goto error;
+
+ /* Abandon the edit if we got a callback break. */
+ if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
+ goto already_invalidated;
+
+ slot = afs_dir_scan_block(block, name, b);
+ if (slot >= 0)
+ goto found_dirent;
+
+ kunmap_local(block);
+ }
+
+ /* Didn't find the dirent to clobber. Download the directory again. */
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_update_nodd,
+ 0, 0, 0, 0, name->name);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_upd_no_dd);
+ goto out;
+
+found_dirent:
+ de = &block->dirents[slot];
+ de->u.vnode = htonl(new_dvnode->fid.vnode);
+ de->u.unique = htonl(new_dvnode->fid.unique);
+
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_update_dd, b, slot,
+ ntohl(de->u.vnode), ntohl(de->u.unique), name->name);
+
+ kunmap_local(block);
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
+ inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version);
+
+out:
+ _leave("");
+ return;
+
+already_invalidated:
+ kunmap_local(block);
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_update_inval,
+ 0, 0, 0, 0, name->name);
+ goto out;
+
+error:
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_update_error,
+ 0, 0, 0, 0, name->name);
+ goto out;
+}
+
+/*
+ * Initialise a new directory. We need to fill in the "." and ".." entries.
+ */
+void afs_mkdir_init_dir(struct afs_vnode *dvnode, struct afs_vnode *parent_dvnode)
+{
+ union afs_xdr_dir_block *meta;
+ struct afs_dir_iter iter = { .dvnode = dvnode };
+ union afs_xdr_dirent *de;
+ unsigned int slot = AFS_DIR_RESV_BLOCKS0;
+ loff_t i_size;
+
+ i_size = i_size_read(&dvnode->netfs.inode);
+ if (i_size != AFS_DIR_BLOCK_SIZE) {
+ afs_invalidate_dir(dvnode, afs_dir_invalid_edit_add_bad_size);
+ return;
+ }
+
+ meta = afs_dir_get_block(&iter, 0);
+ if (!meta)
+ return;
+
+ afs_edit_init_block(meta, meta, 0);
+
+ de = &meta->dirents[slot];
+ de->u.valid = 1;
+ de->u.vnode = htonl(dvnode->fid.vnode);
+ de->u.unique = htonl(dvnode->fid.unique);
+ memcpy(de->u.name, ".", 2);
+ trace_afs_edit_dir(dvnode, afs_edit_dir_for_mkdir, afs_edit_dir_mkdir, 0, slot,
+ dvnode->fid.vnode, dvnode->fid.unique, ".");
+ slot++;
+
+ de = &meta->dirents[slot];
+ de->u.valid = 1;
+ de->u.vnode = htonl(parent_dvnode->fid.vnode);
+ de->u.unique = htonl(parent_dvnode->fid.unique);
+ memcpy(de->u.name, "..", 3);
+ trace_afs_edit_dir(dvnode, afs_edit_dir_for_mkdir, afs_edit_dir_mkdir, 0, slot,
+ parent_dvnode->fid.vnode, parent_dvnode->fid.unique, "..");
+
+ afs_set_contig_bits(meta, AFS_DIR_RESV_BLOCKS0, 2);
+ meta->meta.alloc_ctrs[0] -= 2;
+ kunmap_local(meta);
+
+ netfs_single_mark_inode_dirty(&dvnode->netfs.inode);
+ set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags);
+ set_bit(AFS_VNODE_DIR_READ, &dvnode->flags);
+}
diff --git a/fs/afs/dir_search.c b/fs/afs/dir_search.c
new file mode 100644
index 000000000000..d2516e55b5ed
--- /dev/null
+++ b/fs/afs/dir_search.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Search a directory's hash table.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * https://tools.ietf.org/html/draft-keiser-afs3-directory-object-00
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/iversion.h>
+#include "internal.h"
+#include "afs_fs.h"
+#include "xdr_fs.h"
+
+/*
+ * Calculate the name hash.
+ */
+unsigned int afs_dir_hash_name(const struct qstr *name)
+{
+ const unsigned char *p = name->name;
+ unsigned int hash = 0, i;
+ int bucket;
+
+ for (i = 0; i < name->len; i++)
+ hash = (hash * 173) + p[i];
+ bucket = hash & (AFS_DIR_HASHTBL_SIZE - 1);
+ if (hash > INT_MAX) {
+ bucket = AFS_DIR_HASHTBL_SIZE - bucket;
+ bucket &= (AFS_DIR_HASHTBL_SIZE - 1);
+ }
+ return bucket;
+}
+
+/*
+ * Reset a directory iterator.
+ */
+static bool afs_dir_reset_iter(struct afs_dir_iter *iter)
+{
+ unsigned long long i_size = i_size_read(&iter->dvnode->netfs.inode);
+ unsigned int nblocks;
+
+ /* Work out the maximum number of steps we can take. */
+ nblocks = umin(i_size / AFS_DIR_BLOCK_SIZE, AFS_DIR_MAX_BLOCKS);
+ if (!nblocks)
+ return false;
+ iter->loop_check = nblocks * (AFS_DIR_SLOTS_PER_BLOCK - AFS_DIR_RESV_BLOCKS);
+ iter->prev_entry = 0; /* Hash head is previous */
+ return true;
+}
+
+/*
+ * Initialise a directory iterator for looking up a name.
+ */
+bool afs_dir_init_iter(struct afs_dir_iter *iter, const struct qstr *name)
+{
+ iter->nr_slots = afs_dir_calc_slots(name->len);
+ iter->bucket = afs_dir_hash_name(name);
+ return afs_dir_reset_iter(iter);
+}
+
+/*
+ * Get a specific block.
+ */
+union afs_xdr_dir_block *afs_dir_find_block(struct afs_dir_iter *iter, size_t block)
+{
+ struct folio_queue *fq = iter->fq;
+ struct afs_vnode *dvnode = iter->dvnode;
+ struct folio *folio;
+ size_t blpos = block * AFS_DIR_BLOCK_SIZE;
+ size_t blend = (block + 1) * AFS_DIR_BLOCK_SIZE, fpos = iter->fpos;
+ int slot = iter->fq_slot;
+
+ _enter("%zx,%d", block, slot);
+
+ if (iter->block) {
+ kunmap_local(iter->block);
+ iter->block = NULL;
+ }
+
+ if (dvnode->directory_size < blend)
+ goto fail;
+
+ if (!fq || blpos < fpos) {
+ fq = dvnode->directory;
+ slot = 0;
+ fpos = 0;
+ }
+
+ /* Search the folio queue for the folio containing the block... */
+ for (; fq; fq = fq->next) {
+ for (; slot < folioq_count(fq); slot++) {
+ size_t fsize = folioq_folio_size(fq, slot);
+
+ if (blend <= fpos + fsize) {
+ /* ... and then return the mapped block. */
+ folio = folioq_folio(fq, slot);
+ if (WARN_ON_ONCE(folio_pos(folio) != fpos))
+ goto fail;
+ iter->fq = fq;
+ iter->fq_slot = slot;
+ iter->fpos = fpos;
+ iter->block = kmap_local_folio(folio, blpos - fpos);
+ return iter->block;
+ }
+ fpos += fsize;
+ }
+ slot = 0;
+ }
+
+fail:
+ iter->fq = NULL;
+ iter->fq_slot = 0;
+ afs_invalidate_dir(dvnode, afs_dir_invalid_edit_get_block);
+ return NULL;
+}
+
+/*
+ * Search through a directory bucket.
+ */
+int afs_dir_search_bucket(struct afs_dir_iter *iter, const struct qstr *name,
+ struct afs_fid *_fid)
+{
+ const union afs_xdr_dir_block *meta;
+ unsigned int entry;
+ int ret = -ESTALE;
+
+ meta = afs_dir_find_block(iter, 0);
+ if (!meta)
+ return -ESTALE;
+
+ entry = ntohs(meta->meta.hashtable[iter->bucket & (AFS_DIR_HASHTBL_SIZE - 1)]);
+ _enter("%x,%x", iter->bucket, entry);
+
+ while (entry) {
+ const union afs_xdr_dir_block *block;
+ const union afs_xdr_dirent *dire;
+ unsigned int blnum = entry / AFS_DIR_SLOTS_PER_BLOCK;
+ unsigned int slot = entry % AFS_DIR_SLOTS_PER_BLOCK;
+ unsigned int resv = (blnum == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS);
+
+ _debug("search %x", entry);
+
+ if (slot < resv) {
+ kdebug("slot out of range h=%x rs=%2x sl=%2x-%2x",
+ iter->bucket, resv, slot, slot + iter->nr_slots - 1);
+ goto bad;
+ }
+
+ block = afs_dir_find_block(iter, blnum);
+ if (!block)
+ goto bad;
+ dire = &block->dirents[slot];
+
+ if (slot + iter->nr_slots <= AFS_DIR_SLOTS_PER_BLOCK &&
+ memcmp(dire->u.name, name->name, name->len) == 0 &&
+ dire->u.name[name->len] == '\0') {
+ _fid->vnode = ntohl(dire->u.vnode);
+ _fid->unique = ntohl(dire->u.unique);
+ ret = entry;
+ goto found;
+ }
+
+ iter->prev_entry = entry;
+ entry = ntohs(dire->u.hash_next);
+ if (!--iter->loop_check) {
+ kdebug("dir chain loop h=%x", iter->bucket);
+ goto bad;
+ }
+ }
+
+ ret = -ENOENT;
+found:
+ if (iter->block) {
+ kunmap_local(iter->block);
+ iter->block = NULL;
+ }
+
+bad:
+ if (ret == -ESTALE)
+ afs_invalidate_dir(iter->dvnode, afs_dir_invalid_iter_stale);
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Search the appropriate hash chain in the contents of an AFS directory.
+ */
+int afs_dir_search(struct afs_vnode *dvnode, const struct qstr *name,
+ struct afs_fid *_fid, afs_dataversion_t *_dir_version)
+{
+ struct afs_dir_iter iter = { .dvnode = dvnode, };
+ int ret, retry_limit = 3;
+
+ _enter("{%lu},,,", dvnode->netfs.inode.i_ino);
+
+ if (!afs_dir_init_iter(&iter, name))
+ return -ENOENT;
+ do {
+ if (--retry_limit < 0) {
+ pr_warn("afs_read_dir(): Too many retries\n");
+ ret = -ESTALE;
+ break;
+ }
+ ret = afs_read_dir(dvnode, NULL);
+ if (ret < 0) {
+ if (ret != -ESTALE)
+ break;
+ if (test_bit(AFS_VNODE_DELETED, &dvnode->flags)) {
+ ret = -ESTALE;
+ break;
+ }
+ continue;
+ }
+ *_dir_version = inode_peek_iversion_raw(&dvnode->netfs.inode);
+
+ ret = afs_dir_search_bucket(&iter, name, _fid);
+ up_read(&dvnode->validate_lock);
+ if (ret == -ESTALE)
+ afs_dir_reset_iter(&iter);
+ } while (ret == -ESTALE);
+
+ _leave(" = %d", ret);
+ return ret;
+}
diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
index 45cfd50a9521..014495d4b868 100644
--- a/fs/afs/dir_silly.c
+++ b/fs/afs/dir_silly.c
@@ -69,6 +69,12 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
if (IS_ERR(op))
return PTR_ERR(op);
+ op->more_files = kvcalloc(2, sizeof(struct afs_vnode_param), GFP_KERNEL);
+ if (!op->more_files) {
+ afs_put_operation(op);
+ return -ENOMEM;
+ }
+
afs_op_set_vnode(op, 0, dvnode);
afs_op_set_vnode(op, 1, dvnode);
op->file[0].dv_delta = 1;
@@ -77,6 +83,11 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
op->file[1].modification = true;
op->file[0].update_ctime = true;
op->file[1].update_ctime = true;
+ op->more_files[0].vnode = AFS_FS_I(d_inode(old));
+ op->more_files[0].speculative = true;
+ op->more_files[1].vnode = AFS_FS_I(d_inode(new));
+ op->more_files[1].speculative = true;
+ op->nr_files = 4;
op->dentry = old;
op->dentry_2 = new;
@@ -113,16 +124,14 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode,
sdentry = NULL;
do {
- int slen;
-
dput(sdentry);
sillycounter++;
/* Create a silly name. Note that the ".__afs" prefix is
* understood by the salvager and must not be changed.
*/
- slen = scnprintf(silly, sizeof(silly), ".__afs%04X", sillycounter);
- sdentry = lookup_one_len(silly, dentry->d_parent, slen);
+ scnprintf(silly, sizeof(silly), ".__afs%04X", sillycounter);
+ sdentry = lookup_noperm(&QSTR(silly), dentry->d_parent);
/* N.B. Better to return EBUSY here ... it could be dangerous
* to delete the file while it's in use.
@@ -131,7 +140,7 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode,
goto out;
} while (!d_is_negative(sdentry));
- ihold(&vnode->vfs_inode);
+ ihold(&vnode->netfs.inode);
ret = afs_do_silly_rename(dvnode, vnode, dentry, sdentry, key);
switch (ret) {
@@ -148,7 +157,7 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode,
d_drop(sdentry);
}
- iput(&vnode->vfs_inode);
+ iput(&vnode->netfs.inode);
dput(sdentry);
out:
_leave(" = %d", ret);
@@ -218,7 +227,7 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
/* If there was a conflict with a third party, check the status of the
* unlinked vnode.
*/
- if (op->error == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) {
+ if (op->cumul_error.error == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) {
op->file[1].update_ctime = false;
op->fetch_status.which = 1;
op->ops = &afs_fetch_status_operation;
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index db832cc931c8..aa56e8951e03 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -10,16 +10,19 @@
#include <linux/dns_resolver.h>
#include "internal.h"
-static atomic_t afs_autocell_ino;
+#define AFS_MIN_DYNROOT_CELL_INO 4 /* Allow for ., .., @cell, .@cell */
+#define AFS_MAX_DYNROOT_CELL_INO ((unsigned int)INT_MAX)
+
+static struct dentry *afs_lookup_atcell(struct inode *dir, struct dentry *dentry, ino_t ino);
/*
* iget5() comparator for inode created by autocell operations
- *
- * These pseudo inodes don't match anything.
*/
static int afs_iget5_pseudo_test(struct inode *inode, void *opaque)
{
- return 0;
+ struct afs_fid *fid = opaque;
+
+ return inode->i_ino == fid->vnode;
}
/*
@@ -39,28 +42,16 @@ static int afs_iget5_pseudo_set(struct inode *inode, void *opaque)
}
/*
- * Create an inode for a dynamic root directory or an autocell dynamic
- * automount dir.
+ * Create an inode for an autocell dynamic automount dir.
*/
-struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
+static struct inode *afs_iget_pseudo_dir(struct super_block *sb, ino_t ino)
{
- struct afs_super_info *as = AFS_FS_S(sb);
struct afs_vnode *vnode;
struct inode *inode;
- struct afs_fid fid = {};
+ struct afs_fid fid = { .vnode = ino, .unique = 1, };
_enter("");
- if (as->volume)
- fid.vid = as->volume->vid;
- if (root) {
- fid.vnode = 1;
- fid.unique = 1;
- } else {
- fid.vnode = atomic_inc_return(&afs_autocell_ino);
- fid.unique = 0;
- }
-
inode = iget5_locked(sb, fid.vnode,
afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
if (!inode) {
@@ -73,148 +64,76 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
vnode = AFS_FS_I(inode);
- /* there shouldn't be an existing inode */
- BUG_ON(!(inode->i_state & I_NEW));
-
- inode->i_size = 0;
- inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
- if (root) {
- inode->i_op = &afs_dynroot_inode_operations;
- inode->i_fop = &simple_dir_operations;
- } else {
- inode->i_op = &afs_autocell_inode_operations;
- }
- set_nlink(inode, 2);
- inode->i_uid = GLOBAL_ROOT_UID;
- inode->i_gid = GLOBAL_ROOT_GID;
- inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode);
- inode->i_blocks = 0;
- inode->i_generation = 0;
-
- set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
- if (!root) {
+ if (inode_state_read_once(inode) & I_NEW) {
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 2);
+ inode->i_size = 0;
+ inode->i_mode = S_IFDIR | 0555;
+ inode->i_op = &afs_autocell_inode_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_AUTOMOUNT | S_NOATIME;
+
+ set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
- inode->i_flags |= S_AUTOMOUNT;
- }
- inode->i_flags |= S_NOATIME;
- unlock_new_inode(inode);
+ unlock_new_inode(inode);
+ }
_leave(" = %p", inode);
return inode;
}
/*
- * Probe to see if a cell may exist. This prevents positive dentries from
- * being created unnecessarily.
+ * Try to automount the mountpoint with pseudo directory, if the autocell
+ * option is set.
*/
-static int afs_probe_cell_name(struct dentry *dentry)
+static struct dentry *afs_dynroot_lookup_cell(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
{
- struct afs_cell *cell;
+ struct afs_cell *cell = NULL;
struct afs_net *net = afs_d2net(dentry);
+ struct inode *inode = NULL;
const char *name = dentry->d_name.name;
size_t len = dentry->d_name.len;
- int ret;
+ bool dotted = false;
+ int ret = -ENOENT;
/* Names prefixed with a dot are R/W mounts. */
if (name[0] == '.') {
- if (len == 1)
- return -EINVAL;
name++;
len--;
+ dotted = true;
}
- cell = afs_find_cell(net, name, len, afs_cell_trace_use_probe);
- if (!IS_ERR(cell)) {
- afs_unuse_cell(net, cell, afs_cell_trace_unuse_probe);
- return 0;
+ cell = afs_lookup_cell(net, name, len, NULL,
+ AFS_LOOKUP_CELL_DYNROOT,
+ afs_cell_trace_use_lookup_dynroot);
+ if (IS_ERR(cell)) {
+ ret = PTR_ERR(cell);
+ goto out_no_cell;
}
- ret = dns_query(net->net, "afsdb", name, len, "srv=1",
- NULL, NULL, false);
- if (ret == -ENODATA)
- ret = -EDESTADDRREQ;
- return ret;
-}
-
-/*
- * Try to auto mount the mountpoint with pseudo directory, if the autocell
- * operation is setted.
- */
-struct inode *afs_try_auto_mntpt(struct dentry *dentry, struct inode *dir)
-{
- struct afs_vnode *vnode = AFS_FS_I(dir);
- struct inode *inode;
- int ret = -ENOENT;
-
- _enter("%p{%pd}, {%llx:%llu}",
- dentry, dentry, vnode->fid.vid, vnode->fid.vnode);
-
- if (!test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
- goto out;
-
- ret = afs_probe_cell_name(dentry);
- if (ret < 0)
- goto out;
-
- inode = afs_iget_pseudo_dir(dir->i_sb, false);
+ inode = afs_iget_pseudo_dir(dir->i_sb, cell->dynroot_ino * 2 + dotted);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto out;
}
- _leave("= %p", inode);
- return inode;
+ dentry->d_fsdata = cell;
+ return d_splice_alias(inode, dentry);
out:
- _leave("= %d", ret);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_lookup_dynroot);
+out_no_cell:
+ if (!inode)
+ return d_splice_alias(inode, dentry);
return ret == -ENOENT ? NULL : ERR_PTR(ret);
}
/*
- * Look up @cell in a dynroot directory. This is a substitution for the
- * local cell name for the net namespace.
- */
-static struct dentry *afs_lookup_atcell(struct dentry *dentry)
-{
- struct afs_cell *cell;
- struct afs_net *net = afs_d2net(dentry);
- struct dentry *ret;
- char *name;
- int len;
-
- if (!net->ws_cell)
- return ERR_PTR(-ENOENT);
-
- ret = ERR_PTR(-ENOMEM);
- name = kmalloc(AFS_MAXCELLNAME + 1, GFP_KERNEL);
- if (!name)
- goto out_p;
-
- down_read(&net->cells_lock);
- cell = net->ws_cell;
- if (cell) {
- len = cell->name_len;
- memcpy(name, cell->name, len + 1);
- }
- up_read(&net->cells_lock);
-
- ret = ERR_PTR(-ENOENT);
- if (!cell)
- goto out_n;
-
- ret = lookup_one_len(name, dentry->d_parent, len);
-
- /* We don't want to d_add() the @cell dentry here as we don't want to
- * the cached dentry to hide changes to the local cell name.
- */
-
-out_n:
- kfree(name);
-out_p:
- return ret;
-}
-
-/*
* Look up an entry in a dynroot directory.
*/
static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry,
@@ -222,8 +141,6 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
{
_enter("%pd", dentry);
- ASSERTCMP(d_inode(dentry), ==, NULL);
-
if (flags & LOOKUP_CREATE)
return ERR_PTR(-EOPNOTSUPP);
@@ -234,160 +151,256 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
if (dentry->d_name.len == 5 &&
memcmp(dentry->d_name.name, "@cell", 5) == 0)
- return afs_lookup_atcell(dentry);
+ return afs_lookup_atcell(dir, dentry, 2);
+
+ if (dentry->d_name.len == 6 &&
+ memcmp(dentry->d_name.name, ".@cell", 6) == 0)
+ return afs_lookup_atcell(dir, dentry, 3);
- return d_splice_alias(afs_try_auto_mntpt(dentry, dir), dentry);
+ return afs_dynroot_lookup_cell(dir, dentry, flags);
}
const struct inode_operations afs_dynroot_inode_operations = {
.lookup = afs_dynroot_lookup,
};
-/*
- * Dirs in the dynamic root don't need revalidation.
- */
-static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
+static void afs_dynroot_d_release(struct dentry *dentry)
{
- return 1;
+ struct afs_cell *cell = dentry->d_fsdata;
+
+ afs_unuse_cell(cell, afs_cell_trace_unuse_dynroot_mntpt);
}
/*
- * Allow the VFS to enquire as to whether a dentry should be unhashed (mustn't
- * sleep)
- * - called from dput() when d_count is going to 0.
- * - return 1 to request dentry be unhashed, 0 otherwise
+ * Keep @cell symlink dentries around, but only keep cell autodirs when they're
+ * being used.
*/
-static int afs_dynroot_d_delete(const struct dentry *dentry)
+static int afs_dynroot_delete_dentry(const struct dentry *dentry)
{
- return d_really_is_positive(dentry);
+ const struct qstr *name = &dentry->d_name;
+
+ if (name->len == 5 && memcmp(name->name, "@cell", 5) == 0)
+ return 0;
+ if (name->len == 6 && memcmp(name->name, ".@cell", 6) == 0)
+ return 0;
+ return 1;
}
const struct dentry_operations afs_dynroot_dentry_operations = {
- .d_revalidate = afs_dynroot_d_revalidate,
- .d_delete = afs_dynroot_d_delete,
- .d_release = afs_d_release,
+ .d_delete = afs_dynroot_delete_dentry,
+ .d_release = afs_dynroot_d_release,
.d_automount = afs_d_automount,
};
+static void afs_atcell_delayed_put_cell(void *arg)
+{
+ struct afs_cell *cell = arg;
+
+ afs_put_cell(cell, afs_cell_trace_put_atcell);
+}
+
/*
- * Create a manually added cell mount directory.
- * - The caller must hold net->proc_cells_lock
+ * Read @cell or .@cell symlinks.
*/
-int afs_dynroot_mkdir(struct afs_net *net, struct afs_cell *cell)
+static const char *afs_atcell_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *done)
{
- struct super_block *sb = net->dynroot_sb;
- struct dentry *root, *subdir;
- int ret;
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_cell *cell;
+ struct afs_net *net = afs_i2net(inode);
+ const char *name;
+ bool dotted = vnode->fid.vnode == 3;
- if (!sb || atomic_read(&sb->s_active) == 0)
- return 0;
+ if (!rcu_access_pointer(net->ws_cell))
+ return ERR_PTR(-ENOENT);
- /* Let the ->lookup op do the creation */
- root = sb->s_root;
- inode_lock(root->d_inode);
- subdir = lookup_one_len(cell->name, root, cell->name_len);
- if (IS_ERR(subdir)) {
- ret = PTR_ERR(subdir);
- goto unlock;
+ if (!dentry) {
+ /* We're in RCU-pathwalk. */
+ cell = rcu_dereference(net->ws_cell);
+ if (dotted)
+ name = cell->name - 1;
+ else
+ name = cell->name;
+ /* Shouldn't need to set a delayed call. */
+ return name;
}
- /* Note that we're retaining an extra ref on the dentry */
- subdir->d_fsdata = (void *)1UL;
- ret = 0;
-unlock:
- inode_unlock(root->d_inode);
- return ret;
+ down_read(&net->cells_lock);
+
+ cell = rcu_dereference_protected(net->ws_cell, lockdep_is_held(&net->cells_lock));
+ if (dotted)
+ name = cell->name - 1;
+ else
+ name = cell->name;
+ afs_get_cell(cell, afs_cell_trace_get_atcell);
+ set_delayed_call(done, afs_atcell_delayed_put_cell, cell);
+
+ up_read(&net->cells_lock);
+ return name;
}
+static const struct inode_operations afs_atcell_inode_operations = {
+ .get_link = afs_atcell_get_link,
+};
+
/*
- * Remove a manually added cell mount directory.
- * - The caller must hold net->proc_cells_lock
+ * Create an inode for the @cell or .@cell symlinks.
*/
-void afs_dynroot_rmdir(struct afs_net *net, struct afs_cell *cell)
+static struct dentry *afs_lookup_atcell(struct inode *dir, struct dentry *dentry, ino_t ino)
{
- struct super_block *sb = net->dynroot_sb;
- struct dentry *root, *subdir;
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ struct afs_fid fid = { .vnode = ino, .unique = 1, };
- if (!sb || atomic_read(&sb->s_active) == 0)
- return;
+ inode = iget5_locked(dir->i_sb, fid.vnode,
+ afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
- root = sb->s_root;
- inode_lock(root->d_inode);
+ vnode = AFS_FS_I(inode);
- /* Don't want to trigger a lookup call, which will re-add the cell */
- subdir = try_lookup_one_len(cell->name, root, cell->name_len);
- if (IS_ERR_OR_NULL(subdir)) {
- _debug("lookup %ld", PTR_ERR(subdir));
- goto no_dentry;
+ if (inode_state_read_once(inode) & I_NEW) {
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 1);
+ inode->i_size = 0;
+ inode->i_mode = S_IFLNK | 0555;
+ inode->i_op = &afs_atcell_inode_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_NOATIME;
+
+ unlock_new_inode(inode);
}
+ return d_splice_alias(inode, dentry);
+}
+
+/*
+ * Transcribe the cell database into readdir content under the RCU read lock.
+ * Each cell produces two entries, one prefixed with a dot and one not.
+ */
+static int afs_dynroot_readdir_cells(struct afs_net *net, struct dir_context *ctx)
+{
+ const struct afs_cell *cell;
+ loff_t newpos;
+
+ _enter("%llu", ctx->pos);
+
+ for (;;) {
+ unsigned int ix = ctx->pos >> 1;
+
+ cell = idr_get_next(&net->cells_dyn_ino, &ix);
+ if (!cell)
+ return 0;
+ if (READ_ONCE(cell->state) == AFS_CELL_REMOVING ||
+ READ_ONCE(cell->state) == AFS_CELL_DEAD) {
+ ctx->pos += 2;
+ ctx->pos &= ~1;
+ continue;
+ }
- _debug("rmdir %pd %u", subdir, d_count(subdir));
+ newpos = ix << 1;
+ if (newpos > ctx->pos)
+ ctx->pos = newpos;
- if (subdir->d_fsdata) {
- _debug("unpin %u", d_count(subdir));
- subdir->d_fsdata = NULL;
- dput(subdir);
+ _debug("pos %llu -> cell %u", ctx->pos, cell->dynroot_ino);
+
+ if ((ctx->pos & 1) == 0) {
+ if (!dir_emit(ctx, cell->name, cell->name_len,
+ cell->dynroot_ino, DT_DIR))
+ return 0;
+ ctx->pos++;
+ }
+ if ((ctx->pos & 1) == 1) {
+ if (!dir_emit(ctx, cell->name - 1, cell->name_len + 1,
+ cell->dynroot_ino + 1, DT_DIR))
+ return 0;
+ ctx->pos++;
+ }
}
- dput(subdir);
-no_dentry:
- inode_unlock(root->d_inode);
- _leave("");
+ return 0;
}
/*
- * Populate a newly created dynamic root with cell names.
+ * Read the AFS dynamic root directory. This produces a list of cellnames,
+ * dotted and undotted, along with @cell and .@cell links if configured.
*/
-int afs_dynroot_populate(struct super_block *sb)
+static int afs_dynroot_readdir(struct file *file, struct dir_context *ctx)
{
- struct afs_cell *cell;
- struct afs_net *net = afs_sb2net(sb);
- int ret;
+ struct afs_net *net = afs_d2net(file->f_path.dentry);
+ int ret = 0;
- mutex_lock(&net->proc_cells_lock);
+ if (!dir_emit_dots(file, ctx))
+ return 0;
- net->dynroot_sb = sb;
- hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
- ret = afs_dynroot_mkdir(net, cell);
- if (ret < 0)
- goto error;
+ if (ctx->pos == 2) {
+ if (rcu_access_pointer(net->ws_cell) &&
+ !dir_emit(ctx, "@cell", 5, 2, DT_LNK))
+ return 0;
+ ctx->pos = 3;
+ }
+ if (ctx->pos == 3) {
+ if (rcu_access_pointer(net->ws_cell) &&
+ !dir_emit(ctx, ".@cell", 6, 3, DT_LNK))
+ return 0;
+ ctx->pos = 4;
}
- ret = 0;
-out:
- mutex_unlock(&net->proc_cells_lock);
+ if ((unsigned long long)ctx->pos <= AFS_MAX_DYNROOT_CELL_INO) {
+ down_read(&net->cells_lock);
+ ret = afs_dynroot_readdir_cells(net, ctx);
+ up_read(&net->cells_lock);
+ }
return ret;
-
-error:
- net->dynroot_sb = NULL;
- goto out;
}
+static const struct file_operations afs_dynroot_file_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = afs_dynroot_readdir,
+ .fsync = noop_fsync,
+};
+
/*
- * When a dynamic root that's in the process of being destroyed, depopulate it
- * of pinned directories.
+ * Create an inode for a dynamic root directory.
*/
-void afs_dynroot_depopulate(struct super_block *sb)
+struct inode *afs_dynroot_iget_root(struct super_block *sb)
{
- struct afs_net *net = afs_sb2net(sb);
- struct dentry *root = sb->s_root, *subdir, *tmp;
-
- /* Prevent more subdirs from being created */
- mutex_lock(&net->proc_cells_lock);
- if (net->dynroot_sb == sb)
- net->dynroot_sb = NULL;
- mutex_unlock(&net->proc_cells_lock);
-
- if (root) {
- inode_lock(root->d_inode);
-
- /* Remove all the pins for dirs created for manually added cells */
- list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
- if (subdir->d_fsdata) {
- subdir->d_fsdata = NULL;
- dput(subdir);
- }
- }
+ struct afs_super_info *as = AFS_FS_S(sb);
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ struct afs_fid fid = { .vid = 0, .vnode = 1, .unique = 1,};
+
+ if (as->volume)
+ fid.vid = as->volume->vid;
+
+ inode = iget5_locked(sb, fid.vnode,
+ afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ vnode = AFS_FS_I(inode);
- inode_unlock(root->d_inode);
+ /* there shouldn't be an existing inode */
+ if (inode_state_read_once(inode) & I_NEW) {
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 2);
+ inode->i_size = 0;
+ inode->i_mode = S_IFDIR | 0555;
+ inode->i_op = &afs_dynroot_inode_operations;
+ inode->i_fop = &afs_dynroot_file_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_NOATIME;
+
+ set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
+ unlock_new_inode(inode);
}
+ _leave(" = %p", inode);
+ return inode;
}
diff --git a/fs/afs/file.c b/fs/afs/file.c
index cb6ad61eec3b..f66a92294284 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -14,18 +14,17 @@
#include <linux/gfp.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/mm.h>
+#include <linux/swap.h>
#include <linux/netfs.h>
+#include <trace/events/netfs.h>
#include "internal.h"
-static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
-static int afs_readpage(struct file *file, struct page *page);
-static int afs_symlink_readpage(struct file *file, struct page *page);
-static void afs_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
-static int afs_releasepage(struct page *page, gfp_t gfp_flags);
+static int afs_file_mmap_prepare(struct vm_area_desc *desc);
-static void afs_readahead(struct readahead_control *ractl);
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
static void afs_vm_open(struct vm_area_struct *area);
static void afs_vm_close(struct vm_area_struct *area);
static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff);
@@ -35,9 +34,9 @@ const struct file_operations afs_file_operations = {
.release = afs_release,
.llseek = generic_file_llseek,
.read_iter = afs_file_read_iter,
- .write_iter = afs_file_write,
- .mmap = afs_file_mmap,
- .splice_read = generic_file_splice_read,
+ .write_iter = netfs_file_write_iter,
+ .mmap_prepare = afs_file_mmap_prepare,
+ .splice_read = afs_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = afs_fsync,
.lock = afs_lock,
@@ -51,24 +50,16 @@ const struct inode_operations afs_file_inode_operations = {
};
const struct address_space_operations afs_file_aops = {
- .readpage = afs_readpage,
- .readahead = afs_readahead,
- .set_page_dirty = afs_set_page_dirty,
- .launder_page = afs_launder_page,
- .releasepage = afs_releasepage,
- .invalidatepage = afs_invalidatepage,
- .write_begin = afs_write_begin,
- .write_end = afs_write_end,
- .writepage = afs_writepage,
+ .direct_IO = noop_direct_IO,
+ .read_folio = netfs_read_folio,
+ .readahead = netfs_readahead,
+ .dirty_folio = netfs_dirty_folio,
+ .release_folio = netfs_release_folio,
+ .invalidate_folio = netfs_invalidate_folio,
+ .migrate_folio = filemap_migrate_folio,
.writepages = afs_writepages,
};
-const struct address_space_operations afs_symlink_aops = {
- .readpage = afs_symlink_readpage,
- .releasepage = afs_releasepage,
- .invalidatepage = afs_invalidatepage,
-};
-
static const struct vm_operations_struct afs_vm_ops = {
.open = afs_vm_open,
.close = afs_vm_close,
@@ -158,7 +149,9 @@ int afs_open(struct inode *inode, struct file *file)
if (file->f_flags & O_TRUNC)
set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
-
+
+ fscache_use_cookie(afs_vnode_cache(vnode), file->f_mode & FMODE_WRITE);
+
file->private_data = af;
_leave(" = 0");
return 0;
@@ -177,8 +170,10 @@ error:
*/
int afs_release(struct inode *inode, struct file *file)
{
+ struct afs_vnode_cache_aux aux;
struct afs_vnode *vnode = AFS_FS_I(inode);
struct afs_file *af = file->private_data;
+ loff_t i_size;
int ret = 0;
_enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
@@ -189,6 +184,15 @@ int afs_release(struct inode *inode, struct file *file)
file->private_data = NULL;
if (af->wb)
afs_put_wb_key(af->wb);
+
+ if ((file->f_mode & FMODE_WRITE)) {
+ i_size = i_size_read(&vnode->netfs.inode);
+ afs_set_cache_aux(vnode, &aux);
+ fscache_unuse_cookie(afs_vnode_cache(vnode), &aux, &i_size);
+ } else {
+ fscache_unuse_cookie(afs_vnode_cache(vnode), NULL, NULL);
+ }
+
key_put(af->key);
kfree(af);
afs_prune_wb_keys(vnode);
@@ -196,50 +200,12 @@ int afs_release(struct inode *inode, struct file *file)
return ret;
}
-/*
- * Allocate a new read record.
- */
-struct afs_read *afs_alloc_read(gfp_t gfp)
-{
- struct afs_read *req;
-
- req = kzalloc(sizeof(struct afs_read), gfp);
- if (req)
- refcount_set(&req->usage, 1);
-
- return req;
-}
-
-/*
- * Dispose of a ref to a read record.
- */
-void afs_put_read(struct afs_read *req)
-{
- if (refcount_dec_and_test(&req->usage)) {
- if (req->cleanup)
- req->cleanup(req);
- key_put(req->key);
- kfree(req);
- }
-}
-
static void afs_fetch_data_notify(struct afs_operation *op)
{
- struct afs_read *req = op->fetch.req;
- struct netfs_read_subrequest *subreq = req->subreq;
- int error = op->error;
-
- if (error == -ECONNABORTED)
- error = afs_abort_to_error(op->ac.abort_code);
- req->error = error;
-
- if (subreq) {
- __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
- netfs_subreq_terminated(subreq, error ?: req->actual_len, false);
- req->subreq = NULL;
- } else if (req->done) {
- req->done(req);
- }
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
+
+ subreq->error = afs_op_error(op);
+ netfs_read_subreq_terminated(subreq);
}
static void afs_fetch_data_success(struct afs_operation *op)
@@ -249,305 +215,293 @@ static void afs_fetch_data_success(struct afs_operation *op)
_enter("op=%08x", op->debug_id);
afs_vnode_commit_status(op, &op->file[0]);
afs_stat_v(vnode, n_fetches);
- atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes);
+ atomic_long_add(op->fetch.subreq->transferred, &op->net->n_fetch_bytes);
afs_fetch_data_notify(op);
}
-static void afs_fetch_data_put(struct afs_operation *op)
+static void afs_fetch_data_aborted(struct afs_operation *op)
{
- op->fetch.req->error = op->error;
- afs_put_read(op->fetch.req);
+ afs_check_for_remote_deletion(op);
+ afs_fetch_data_notify(op);
}
-static const struct afs_operation_ops afs_fetch_data_operation = {
+const struct afs_operation_ops afs_fetch_data_operation = {
.issue_afs_rpc = afs_fs_fetch_data,
.issue_yfs_rpc = yfs_fs_fetch_data,
.success = afs_fetch_data_success,
- .aborted = afs_check_for_remote_deletion,
+ .aborted = afs_fetch_data_aborted,
.failed = afs_fetch_data_notify,
- .put = afs_fetch_data_put,
};
-/*
- * Fetch file data from the volume.
- */
-int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
-{
- struct afs_operation *op;
-
- _enter("%s{%llx:%llu.%u},%x,,,",
- vnode->volume->name,
- vnode->fid.vid,
- vnode->fid.vnode,
- vnode->fid.unique,
- key_serial(req->key));
-
- op = afs_alloc_operation(req->key, vnode->volume);
- if (IS_ERR(op)) {
- if (req->subreq)
- netfs_subreq_terminated(req->subreq, PTR_ERR(op), false);
- return PTR_ERR(op);
- }
-
- afs_op_set_vnode(op, 0, vnode);
-
- op->fetch.req = afs_get_read(req);
- op->ops = &afs_fetch_data_operation;
- return afs_do_sync_operation(op);
-}
-
-static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
+static void afs_issue_read_call(struct afs_operation *op)
{
- struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
- struct afs_read *fsreq;
-
- fsreq = afs_alloc_read(GFP_NOFS);
- if (!fsreq)
- return netfs_subreq_terminated(subreq, -ENOMEM, false);
-
- fsreq->subreq = subreq;
- fsreq->pos = subreq->start + subreq->transferred;
- fsreq->len = subreq->len - subreq->transferred;
- fsreq->key = key_get(subreq->rreq->netfs_priv);
- fsreq->vnode = vnode;
- fsreq->iter = &fsreq->def_iter;
-
- iov_iter_xarray(&fsreq->def_iter, READ,
- &fsreq->vnode->vfs_inode.i_mapping->i_pages,
- fsreq->pos, fsreq->len);
-
- afs_fetch_data(fsreq->vnode, fsreq);
- afs_put_read(fsreq);
+ op->call_responded = false;
+ op->call_error = 0;
+ op->call_abort_code = 0;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags))
+ yfs_fs_fetch_data(op);
+ else
+ afs_fs_fetch_data(op);
}
-static int afs_symlink_readpage(struct file *file, struct page *page)
+static void afs_end_read(struct afs_operation *op)
{
- struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
- struct afs_read *fsreq;
- struct folio *folio = page_folio(page);
- int ret;
-
- fsreq = afs_alloc_read(GFP_NOFS);
- if (!fsreq)
- return -ENOMEM;
-
- fsreq->pos = folio_pos(folio);
- fsreq->len = folio_size(folio);
- fsreq->vnode = vnode;
- fsreq->iter = &fsreq->def_iter;
- iov_iter_xarray(&fsreq->def_iter, READ, &page->mapping->i_pages,
- fsreq->pos, fsreq->len);
+ if (op->call_responded && op->server)
+ set_bit(AFS_SERVER_FL_RESPONDING, &op->server->flags);
- ret = afs_fetch_data(fsreq->vnode, fsreq);
- if (ret == 0)
- SetPageUptodate(page);
- unlock_page(page);
- return ret;
-}
+ if (!afs_op_error(op))
+ afs_fetch_data_success(op);
+ else if (op->cumul_error.aborted)
+ afs_fetch_data_aborted(op);
+ else
+ afs_fetch_data_notify(op);
-static void afs_init_rreq(struct netfs_read_request *rreq, struct file *file)
-{
- rreq->netfs_priv = key_get(afs_file_key(file));
+ afs_end_vnode_operation(op);
+ afs_put_operation(op);
}
-static bool afs_is_cache_enabled(struct inode *inode)
+/*
+ * Perform I/O processing on an asynchronous call. The work item carries a ref
+ * to the call struct that we either need to release or to pass on.
+ */
+static void afs_read_receive(struct afs_call *call)
{
- struct fscache_cookie *cookie = afs_vnode_cache(AFS_FS_I(inode));
+ struct afs_operation *op = call->op;
+ enum afs_call_state state;
- return fscache_cookie_enabled(cookie) && !hlist_empty(&cookie->backing_objects);
-}
+ _enter("");
-static int afs_begin_cache_operation(struct netfs_read_request *rreq)
-{
- struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
+ state = READ_ONCE(call->state);
+ if (state == AFS_CALL_COMPLETE)
+ return;
+ trace_afs_read_recv(op, call);
- return fscache_begin_read_operation(rreq, afs_vnode_cache(vnode));
-}
+ while (state < AFS_CALL_COMPLETE && READ_ONCE(call->need_attention)) {
+ WRITE_ONCE(call->need_attention, false);
+ afs_deliver_to_call(call);
+ state = READ_ONCE(call->state);
+ }
-static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
- struct folio *folio, void **_fsdata)
-{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ if (state < AFS_CALL_COMPLETE) {
+ netfs_read_subreq_progress(op->fetch.subreq);
+ if (rxrpc_kernel_check_life(call->net->socket, call->rxcall))
+ return;
+ /* rxrpc terminated the call. */
+ afs_set_call_complete(call, call->error, call->abort_code);
+ }
- return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
-}
+ op->call_abort_code = call->abort_code;
+ op->call_error = call->error;
+ op->call_responded = call->responded;
+ op->call = NULL;
+ call->op = NULL;
+ afs_put_call(call);
+
+ /* If the call failed, then we need to crank the server rotation
+ * handle and try the next.
+ */
+ if (afs_select_fileserver(op)) {
+ afs_issue_read_call(op);
+ return;
+ }
-static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv)
-{
- key_put(netfs_priv);
+ afs_end_read(op);
}
-const struct netfs_read_request_ops afs_req_ops = {
- .init_rreq = afs_init_rreq,
- .is_cache_enabled = afs_is_cache_enabled,
- .begin_cache_operation = afs_begin_cache_operation,
- .check_write_begin = afs_check_write_begin,
- .issue_op = afs_req_issue_op,
- .cleanup = afs_priv_cleanup,
-};
-
-static int afs_readpage(struct file *file, struct page *page)
+void afs_fetch_data_async_rx(struct work_struct *work)
{
- struct folio *folio = page_folio(page);
+ struct afs_call *call = container_of(work, struct afs_call, async_work);
- return netfs_readpage(file, folio, &afs_req_ops, NULL);
+ afs_read_receive(call);
+ afs_put_call(call);
}
-static void afs_readahead(struct readahead_control *ractl)
+void afs_fetch_data_immediate_cancel(struct afs_call *call)
{
- netfs_readahead(ractl, &afs_req_ops, NULL);
+ if (call->async) {
+ afs_get_call(call, afs_call_trace_wake);
+ if (!queue_work(afs_async_calls, &call->async_work))
+ afs_deferred_put_call(call);
+ flush_work(&call->async_work);
+ }
}
/*
- * Adjust the dirty region of the page on truncation or full invalidation,
- * getting rid of the markers altogether if the region is entirely invalidated.
+ * Fetch file data from the volume.
*/
-static void afs_invalidate_dirty(struct folio *folio, unsigned int offset,
- unsigned int length)
+static void afs_issue_read(struct netfs_io_subrequest *subreq)
{
- struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
- unsigned long priv;
- unsigned int f, t, end = offset + length;
-
- priv = (unsigned long)folio_get_private(folio);
+ struct afs_operation *op;
+ struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
+ struct key *key = subreq->rreq->netfs_priv;
- /* we clean up only if the entire page is being invalidated */
- if (offset == 0 && length == folio_size(folio))
- goto full_invalidate;
+ _enter("%s{%llx:%llu.%u},%x,,,",
+ vnode->volume->name,
+ vnode->fid.vid,
+ vnode->fid.vnode,
+ vnode->fid.unique,
+ key_serial(key));
- /* If the page was dirtied by page_mkwrite(), the PTE stays writable
- * and we don't get another notification to tell us to expand it
- * again.
- */
- if (afs_is_folio_dirty_mmapped(priv))
+ op = afs_alloc_operation(key, vnode->volume);
+ if (IS_ERR(op)) {
+ subreq->error = PTR_ERR(op);
+ netfs_read_subreq_terminated(subreq);
return;
+ }
- /* We may need to shorten the dirty region */
- f = afs_folio_dirty_from(folio, priv);
- t = afs_folio_dirty_to(folio, priv);
+ afs_op_set_vnode(op, 0, vnode);
- if (t <= offset || f >= end)
- return; /* Doesn't overlap */
+ op->fetch.subreq = subreq;
+ op->ops = &afs_fetch_data_operation;
- if (f < offset && t > end)
- return; /* Splits the dirty region - just absorb it */
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
- if (f >= offset && t <= end)
- goto undirty;
+ if (subreq->rreq->origin == NETFS_READAHEAD ||
+ subreq->rreq->iocb) {
+ op->flags |= AFS_OPERATION_ASYNC;
- if (f < offset)
- t = offset;
- else
- f = end;
- if (f == t)
- goto undirty;
-
- priv = afs_folio_dirty(folio, f, t);
- folio_change_private(folio, (void *)priv);
- trace_afs_folio_dirty(vnode, tracepoint_string("trunc"), folio);
- return;
-
-undirty:
- trace_afs_folio_dirty(vnode, tracepoint_string("undirty"), folio);
- folio_clear_dirty_for_io(folio);
-full_invalidate:
- trace_afs_folio_dirty(vnode, tracepoint_string("inval"), folio);
- folio_detach_private(folio);
+ if (!afs_begin_vnode_operation(op)) {
+ subreq->error = afs_put_operation(op);
+ netfs_read_subreq_terminated(subreq);
+ return;
+ }
+
+ if (!afs_select_fileserver(op)) {
+ afs_end_read(op);
+ return;
+ }
+
+ afs_issue_read_call(op);
+ } else {
+ afs_do_sync_operation(op);
+ }
}
-/*
- * invalidate part or all of a page
- * - release a page and clean up its private data if offset is 0 (indicating
- * the entire page)
- */
-static void afs_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
{
- struct folio *folio = page_folio(page);
+ struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
- _enter("{%lu},%u,%u", folio_index(folio), offset, length);
+ if (file)
+ rreq->netfs_priv = key_get(afs_file_key(file));
+ rreq->rsize = 256 * 1024;
+ rreq->wsize = 256 * 1024 * 1024;
+
+ switch (rreq->origin) {
+ case NETFS_READ_SINGLE:
+ if (!file) {
+ struct key *key = afs_request_key(vnode->volume->cell);
+
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+ rreq->netfs_priv = key;
+ }
+ break;
+ case NETFS_WRITEBACK:
+ case NETFS_WRITETHROUGH:
+ case NETFS_UNBUFFERED_WRITE:
+ case NETFS_DIO_WRITE:
+ if (S_ISREG(rreq->inode->i_mode))
+ rreq->io_streams[0].avail = true;
+ break;
+ case NETFS_WRITEBACK_SINGLE:
+ default:
+ break;
+ }
+ return 0;
+}
- BUG_ON(!PageLocked(page));
+static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
+ struct folio **foliop, void **_fsdata)
+{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
- if (PagePrivate(page))
- afs_invalidate_dirty(folio, offset, length);
+ return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
+}
- folio_wait_fscache(folio);
- _leave("");
+static void afs_free_request(struct netfs_io_request *rreq)
+{
+ key_put(rreq->netfs_priv);
+ afs_put_wb_key(rreq->netfs_priv2);
}
-/*
- * release a page and clean up its private state if it's not busy
- * - return true if the page can now be released, false if not
- */
-static int afs_releasepage(struct page *page, gfp_t gfp_flags)
+static void afs_update_i_size(struct inode *inode, loff_t new_i_size)
{
- struct folio *folio = page_folio(page);
- struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
-
- _enter("{{%llx:%llu}[%lu],%lx},%x",
- vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
- gfp_flags);
-
- /* deny if page is being written to the cache and the caller hasn't
- * elected to wait */
-#ifdef CONFIG_AFS_FSCACHE
- if (folio_test_fscache(folio)) {
- if (!(gfp_flags & __GFP_DIRECT_RECLAIM) || !(gfp_flags & __GFP_FS))
- return false;
- folio_wait_fscache(folio);
- }
-#endif
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ loff_t i_size;
- if (folio_test_private(folio)) {
- trace_afs_folio_dirty(vnode, tracepoint_string("rel"), folio);
- folio_detach_private(folio);
+ write_seqlock(&vnode->cb_lock);
+ i_size = i_size_read(&vnode->netfs.inode);
+ if (new_i_size > i_size) {
+ i_size_write(&vnode->netfs.inode, new_i_size);
+ inode_set_bytes(&vnode->netfs.inode, new_i_size);
}
+ write_sequnlock(&vnode->cb_lock);
+ fscache_update_cookie(afs_vnode_cache(vnode), NULL, &new_i_size);
+}
- /* Indicate that the folio can be released */
- _leave(" = T");
- return true;
+static void afs_netfs_invalidate_cache(struct netfs_io_request *wreq)
+{
+ struct afs_vnode *vnode = AFS_FS_I(wreq->inode);
+
+ afs_invalidate_cache(vnode, 0);
}
+const struct netfs_request_ops afs_req_ops = {
+ .init_request = afs_init_request,
+ .free_request = afs_free_request,
+ .check_write_begin = afs_check_write_begin,
+ .issue_read = afs_issue_read,
+ .update_i_size = afs_update_i_size,
+ .invalidate_cache = afs_netfs_invalidate_cache,
+ .begin_writeback = afs_begin_writeback,
+ .prepare_write = afs_prepare_write,
+ .issue_write = afs_issue_write,
+ .retry_request = afs_retry_request,
+};
+
static void afs_add_open_mmap(struct afs_vnode *vnode)
{
if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
- down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ down_write(&vnode->volume->open_mmaps_lock);
- list_add_tail(&vnode->cb_mmap_link,
- &vnode->volume->cell->fs_open_mmaps);
+ if (list_empty(&vnode->cb_mmap_link))
+ list_add_tail(&vnode->cb_mmap_link, &vnode->volume->open_mmaps);
- up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ up_write(&vnode->volume->open_mmaps_lock);
}
}
static void afs_drop_open_mmap(struct afs_vnode *vnode)
{
- if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
+ if (atomic_add_unless(&vnode->cb_nr_mmap, -1, 1))
return;
- down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ down_write(&vnode->volume->open_mmaps_lock);
- if (atomic_read(&vnode->cb_nr_mmap) == 0)
+ read_seqlock_excl(&vnode->cb_lock);
+ // the only place where ->cb_nr_mmap may hit 0
+ // see __afs_break_callback() for the other side...
+ if (atomic_dec_and_test(&vnode->cb_nr_mmap))
list_del_init(&vnode->cb_mmap_link);
+ read_sequnlock_excl(&vnode->cb_lock);
- up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ up_write(&vnode->volume->open_mmaps_lock);
flush_work(&vnode->cb_work);
}
/*
* Handle setting up a memory mapping on an AFS file.
*/
-static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int afs_file_mmap_prepare(struct vm_area_desc *desc)
{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(desc->file));
int ret;
afs_add_open_mmap(vnode);
- ret = generic_file_mmap(file, vma);
+ ret = generic_file_mmap_prepare(desc);
if (ret == 0)
- vma->vm_ops = &afs_vm_ops;
+ desc->vm_ops = &afs_vm_ops;
else
afs_drop_open_mmap(vnode);
return ret;
@@ -566,31 +520,47 @@ static void afs_vm_close(struct vm_area_struct *vma)
static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file));
- struct afs_file *af = vmf->vma->vm_file->private_data;
- switch (afs_validate(vnode, af->key)) {
- case 0:
+ if (afs_check_validity(vnode))
return filemap_map_pages(vmf, start_pgoff, end_pgoff);
- case -ENOMEM:
- return VM_FAULT_OOM;
- case -EINTR:
- case -ERESTARTSYS:
- return VM_FAULT_RETRY;
- case -ESTALE:
- default:
- return VM_FAULT_SIGBUS;
- }
+ return 0;
}
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct afs_vnode *vnode = AFS_FS_I(inode);
struct afs_file *af = iocb->ki_filp->private_data;
- int ret;
+ ssize_t ret;
- ret = afs_validate(vnode, af->key);
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return netfs_unbuffered_read_iter(iocb, iter);
+
+ ret = netfs_start_io_read(inode);
if (ret < 0)
return ret;
+ ret = afs_validate(vnode, af->key);
+ if (ret == 0)
+ ret = filemap_read(iocb, iter, 0);
+ netfs_end_io_read(inode);
+ return ret;
+}
- return generic_file_read_iter(iocb, iter);
+static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ struct inode *inode = file_inode(in);
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_file *af = in->private_data;
+ ssize_t ret;
+
+ ret = netfs_start_io_read(inode);
+ if (ret < 0)
+ return ret;
+ ret = afs_validate(vnode, af->key);
+ if (ret == 0)
+ ret = filemap_splice_read(in, ppos, pipe, len, flags);
+ netfs_end_io_read(inode);
+ return ret;
}
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index c4210a3964d8..f0e96a35093f 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -76,7 +76,7 @@ void afs_lock_op_done(struct afs_call *call)
if (call->error == 0) {
spin_lock(&vnode->lock);
trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
- vnode->locked_at = call->reply_time;
+ vnode->locked_at = call->issue_time;
afs_schedule_lock_extension(vnode);
spin_unlock(&vnode->lock);
}
@@ -93,13 +93,13 @@ static void afs_grant_locks(struct afs_vnode *vnode)
bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE);
list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
- if (!exclusive && p->fl_type == F_WRLCK)
+ if (!exclusive && lock_is_write(p))
continue;
list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
p->fl_u.afs.state = AFS_LOCK_GRANTED;
trace_afs_flock_op(vnode, p, afs_flock_op_grant);
- wake_up(&p->fl_wait);
+ locks_wake_up(p);
}
}
@@ -112,25 +112,24 @@ static void afs_next_locker(struct afs_vnode *vnode, int error)
{
struct file_lock *p, *_p, *next = NULL;
struct key *key = vnode->lock_key;
- unsigned int fl_type = F_RDLCK;
+ unsigned int type = F_RDLCK;
_enter("");
if (vnode->lock_type == AFS_LOCK_WRITE)
- fl_type = F_WRLCK;
+ type = F_WRLCK;
list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
if (error &&
- p->fl_type == fl_type &&
- afs_file_key(p->fl_file) == key) {
+ p->c.flc_type == type &&
+ afs_file_key(p->c.flc_file) == key) {
list_del_init(&p->fl_u.afs.link);
p->fl_u.afs.state = error;
- wake_up(&p->fl_wait);
+ locks_wake_up(p);
}
/* Select the next locker to hand off to. */
- if (next &&
- (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK))
+ if (next && (lock_is_write(next) || lock_is_read(p)))
continue;
next = p;
}
@@ -142,7 +141,7 @@ static void afs_next_locker(struct afs_vnode *vnode, int error)
afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
next->fl_u.afs.state = AFS_LOCK_YOUR_TRY;
trace_afs_flock_op(vnode, next, afs_flock_op_wake);
- wake_up(&next->fl_wait);
+ locks_wake_up(next);
} else {
afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0);
@@ -166,7 +165,7 @@ static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
struct file_lock, fl_u.afs.link);
list_del_init(&p->fl_u.afs.link);
p->fl_u.afs.state = -ENOENT;
- wake_up(&p->fl_wait);
+ locks_wake_up(p);
}
key_put(vnode->lock_key);
@@ -451,7 +450,7 @@ static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
*/
static int afs_do_setlk(struct file *file, struct file_lock *fl)
{
- struct inode *inode = locks_inode(file);
+ struct inode *inode = file_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
enum afs_flock_mode mode = AFS_FS_S(inode->i_sb)->flock_mode;
afs_lock_type_t type;
@@ -464,14 +463,14 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
_enter("{%llx:%llu},%llu-%llu,%u,%u",
vnode->fid.vid, vnode->fid.vnode,
- fl->fl_start, fl->fl_end, fl->fl_type, mode);
+ fl->fl_start, fl->fl_end, fl->c.flc_type, mode);
fl->fl_ops = &afs_lock_ops;
INIT_LIST_HEAD(&fl->fl_u.afs.link);
fl->fl_u.afs.state = AFS_LOCK_PENDING;
partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX);
- type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+ type = lock_is_read(fl) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
if (mode == afs_flock_mode_write && partial)
type = AFS_LOCK_WRITE;
@@ -524,7 +523,7 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
}
if (vnode->lock_state == AFS_VNODE_LOCK_NONE &&
- !(fl->fl_flags & FL_SLEEP)) {
+ !(fl->c.flc_flags & FL_SLEEP)) {
ret = -EAGAIN;
if (type == AFS_LOCK_READ) {
if (vnode->status.lock_count == -1)
@@ -621,7 +620,7 @@ skip_server_lock:
return 0;
lock_is_contended:
- if (!(fl->fl_flags & FL_SLEEP)) {
+ if (!(fl->c.flc_flags & FL_SLEEP)) {
list_del_init(&fl->fl_u.afs.link);
afs_next_locker(vnode, 0);
ret = -EAGAIN;
@@ -641,7 +640,7 @@ need_to_wait:
spin_unlock(&vnode->lock);
trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0);
- ret = wait_event_interruptible(fl->fl_wait,
+ ret = wait_event_interruptible(fl->c.flc_wait,
fl->fl_u.afs.state != AFS_LOCK_PENDING);
trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret);
@@ -701,10 +700,11 @@ error:
*/
static int afs_do_unlk(struct file *file, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
int ret;
- _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
+ _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode,
+ fl->c.flc_type);
trace_afs_flock_op(vnode, fl, afs_flock_op_unlock);
@@ -721,7 +721,7 @@ static int afs_do_unlk(struct file *file, struct file_lock *fl)
*/
static int afs_do_getlk(struct file *file, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
struct key *key = afs_file_key(file);
int ret, lock_count;
@@ -730,11 +730,11 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl)
if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
return -ENOENT;
- fl->fl_type = F_UNLCK;
+ fl->c.flc_type = F_UNLCK;
/* check local lock records first */
posix_test_lock(file, fl);
- if (fl->fl_type == F_UNLCK) {
+ if (lock_is_unlock(fl)) {
/* no local locks; consult the server */
ret = afs_fetch_status(vnode, key, false, NULL);
if (ret < 0)
@@ -743,18 +743,18 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl)
lock_count = READ_ONCE(vnode->status.lock_count);
if (lock_count != 0) {
if (lock_count > 0)
- fl->fl_type = F_RDLCK;
+ fl->c.flc_type = F_RDLCK;
else
- fl->fl_type = F_WRLCK;
+ fl->c.flc_type = F_WRLCK;
fl->fl_start = 0;
fl->fl_end = OFFSET_MAX;
- fl->fl_pid = 0;
+ fl->c.flc_pid = 0;
}
}
ret = 0;
error:
- _leave(" = %d [%hd]", ret, fl->fl_type);
+ _leave(" = %d [%hd]", ret, fl->c.flc_type);
return ret;
}
@@ -763,13 +763,13 @@ error:
*/
int afs_lock(struct file *file, int cmd, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
enum afs_flock_operation op;
int ret;
_enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
vnode->fid.vid, vnode->fid.vnode, cmd,
- fl->fl_type, fl->fl_flags,
+ fl->c.flc_type, fl->c.flc_flags,
(long long) fl->fl_start, (long long) fl->fl_end);
if (IS_GETLK(cmd))
@@ -778,7 +778,7 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
trace_afs_flock_op(vnode, fl, afs_flock_op_lock);
- if (fl->fl_type == F_UNLCK)
+ if (lock_is_unlock(fl))
ret = afs_do_unlk(file, fl);
else
ret = afs_do_setlk(file, fl);
@@ -798,13 +798,13 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
*/
int afs_flock(struct file *file, int cmd, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
enum afs_flock_operation op;
int ret;
_enter("{%llx:%llu},%d,{t=%x,fl=%x}",
vnode->fid.vid, vnode->fid.vnode, cmd,
- fl->fl_type, fl->fl_flags);
+ fl->c.flc_type, fl->c.flc_flags);
/*
* No BSD flocks over NFS allowed.
@@ -813,14 +813,14 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl)
* Not sure whether that would be unique, though, or whether
* that would break in other places.
*/
- if (!(fl->fl_flags & FL_FLOCK))
+ if (!(fl->c.flc_flags & FL_FLOCK))
return -ENOLCK;
fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
trace_afs_flock_op(vnode, fl, afs_flock_op_flock);
/* we're simulating flock() locks using posix locks on the server */
- if (fl->fl_type == F_UNLCK)
+ if (lock_is_unlock(fl))
ret = afs_do_unlk(file, fl);
else
ret = afs_do_setlk(file, fl);
@@ -843,7 +843,7 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl)
*/
static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->c.flc_file));
_enter("");
@@ -861,7 +861,7 @@ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
*/
static void afs_fl_release_private(struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->c.flc_file));
_enter("");
diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
index d222dfbe976b..8418813ee043 100644
--- a/fs/afs/fs_operation.c
+++ b/fs/afs/fs_operation.c
@@ -35,18 +35,119 @@ struct afs_operation *afs_alloc_operation(struct key *key, struct afs_volume *vo
key_get(key);
}
- op->key = key;
- op->volume = afs_get_volume(volume, afs_volume_trace_get_new_op);
- op->net = volume->cell->net;
- op->cb_v_break = volume->cb_v_break;
- op->debug_id = atomic_inc_return(&afs_operation_debug_counter);
- op->error = -EDESTADDRREQ;
- op->ac.error = SHRT_MAX;
+ op->key = key;
+ op->volume = afs_get_volume(volume, afs_volume_trace_get_new_op);
+ op->net = volume->cell->net;
+ op->cb_v_break = atomic_read(&volume->cb_v_break);
+ op->pre_volsync.creation = volume->creation_time;
+ op->pre_volsync.update = volume->update_time;
+ op->debug_id = atomic_inc_return(&afs_operation_debug_counter);
+ op->nr_iterations = -1;
+ afs_op_set_error(op, -EDESTADDRREQ);
_leave(" = [op=%08x]", op->debug_id);
return op;
}
+struct afs_io_locker {
+ struct list_head link;
+ struct task_struct *task;
+ unsigned long have_lock;
+};
+
+/*
+ * Unlock the I/O lock on a vnode.
+ */
+static void afs_unlock_for_io(struct afs_vnode *vnode)
+{
+ struct afs_io_locker *locker;
+
+ spin_lock(&vnode->lock);
+ locker = list_first_entry_or_null(&vnode->io_lock_waiters,
+ struct afs_io_locker, link);
+ if (locker) {
+ list_del(&locker->link);
+ smp_store_release(&locker->have_lock, 1); /* The unlock barrier. */
+ smp_mb__after_atomic(); /* Store have_lock before task state */
+ wake_up_process(locker->task);
+ } else {
+ clear_bit(AFS_VNODE_IO_LOCK, &vnode->flags);
+ }
+ spin_unlock(&vnode->lock);
+}
+
+/*
+ * Lock the I/O lock on a vnode uninterruptibly. We can't use an ordinary
+ * mutex as lockdep will complain if we unlock it in the wrong thread.
+ */
+static void afs_lock_for_io(struct afs_vnode *vnode)
+{
+ struct afs_io_locker myself = { .task = current, };
+
+ spin_lock(&vnode->lock);
+
+ if (!test_and_set_bit(AFS_VNODE_IO_LOCK, &vnode->flags)) {
+ spin_unlock(&vnode->lock);
+ return;
+ }
+
+ list_add_tail(&myself.link, &vnode->io_lock_waiters);
+ spin_unlock(&vnode->lock);
+
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (smp_load_acquire(&myself.have_lock)) /* The lock barrier */
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+}
+
+/*
+ * Lock the I/O lock on a vnode interruptibly. We can't use an ordinary mutex
+ * as lockdep will complain if we unlock it in the wrong thread.
+ */
+static int afs_lock_for_io_interruptible(struct afs_vnode *vnode)
+{
+ struct afs_io_locker myself = { .task = current, };
+ int ret = 0;
+
+ spin_lock(&vnode->lock);
+
+ if (!test_and_set_bit(AFS_VNODE_IO_LOCK, &vnode->flags)) {
+ spin_unlock(&vnode->lock);
+ return 0;
+ }
+
+ list_add_tail(&myself.link, &vnode->io_lock_waiters);
+ spin_unlock(&vnode->lock);
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (smp_load_acquire(&myself.have_lock) || /* The lock barrier */
+ signal_pending(current))
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
+ /* If we got a signal, try to transfer the lock onto the next
+ * waiter.
+ */
+ if (unlikely(signal_pending(current))) {
+ spin_lock(&vnode->lock);
+ if (myself.have_lock) {
+ spin_unlock(&vnode->lock);
+ afs_unlock_for_io(vnode);
+ } else {
+ list_del(&myself.link);
+ spin_unlock(&vnode->lock);
+ }
+ ret = -ERESTARTSYS;
+ }
+ return ret;
+}
+
/*
* Lock the vnode(s) being operated upon.
*/
@@ -58,7 +159,7 @@ static bool afs_get_io_locks(struct afs_operation *op)
_enter("");
if (op->flags & AFS_OPERATION_UNINTR) {
- mutex_lock(&vnode->io_lock);
+ afs_lock_for_io(vnode);
op->flags |= AFS_OPERATION_LOCK_0;
_leave(" = t [1]");
return true;
@@ -70,8 +171,8 @@ static bool afs_get_io_locks(struct afs_operation *op)
if (vnode2 > vnode)
swap(vnode, vnode2);
- if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
- op->error = -ERESTARTSYS;
+ if (afs_lock_for_io_interruptible(vnode) < 0) {
+ afs_op_set_error(op, -ERESTARTSYS);
op->flags |= AFS_OPERATION_STOP;
_leave(" = f [I 0]");
return false;
@@ -79,10 +180,10 @@ static bool afs_get_io_locks(struct afs_operation *op)
op->flags |= AFS_OPERATION_LOCK_0;
if (vnode2) {
- if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) {
- op->error = -ERESTARTSYS;
+ if (afs_lock_for_io_interruptible(vnode2) < 0) {
+ afs_op_set_error(op, -ERESTARTSYS);
op->flags |= AFS_OPERATION_STOP;
- mutex_unlock(&vnode->io_lock);
+ afs_unlock_for_io(vnode);
op->flags &= ~AFS_OPERATION_LOCK_0;
_leave(" = f [I 1]");
return false;
@@ -102,9 +203,9 @@ static void afs_drop_io_locks(struct afs_operation *op)
_enter("");
if (op->flags & AFS_OPERATION_LOCK_1)
- mutex_unlock(&vnode2->io_lock);
+ afs_unlock_for_io(vnode2);
if (op->flags & AFS_OPERATION_LOCK_0)
- mutex_unlock(&vnode->io_lock);
+ afs_unlock_for_io(vnode);
}
static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *vp,
@@ -147,7 +248,7 @@ bool afs_begin_vnode_operation(struct afs_operation *op)
afs_prepare_vnode(op, &op->file[0], 0);
afs_prepare_vnode(op, &op->file[1], 1);
- op->cb_v_break = op->volume->cb_v_break;
+ op->cb_v_break = atomic_read(&op->volume->cb_v_break);
_leave(" = true");
return true;
}
@@ -155,20 +256,20 @@ bool afs_begin_vnode_operation(struct afs_operation *op)
/*
* Tidy up a filesystem cursor and unlock the vnode.
*/
-static void afs_end_vnode_operation(struct afs_operation *op)
+void afs_end_vnode_operation(struct afs_operation *op)
{
_enter("");
- if (op->error == -EDESTADDRREQ ||
- op->error == -EADDRNOTAVAIL ||
- op->error == -ENETUNREACH ||
- op->error == -EHOSTUNREACH)
+ switch (afs_op_error(op)) {
+ case -EDESTADDRREQ:
+ case -EADDRNOTAVAIL:
+ case -ENETUNREACH:
+ case -EHOSTUNREACH:
afs_dump_edestaddrreq(op);
+ break;
+ }
afs_drop_io_locks(op);
-
- if (op->error == -ECONNABORTED)
- op->error = afs_abort_to_error(op->ac.abort_code);
}
/*
@@ -179,37 +280,43 @@ void afs_wait_for_operation(struct afs_operation *op)
_enter("");
while (afs_select_fileserver(op)) {
- op->cb_s_break = op->server->cb_s_break;
+ op->call_responded = false;
+ op->call_error = 0;
+ op->call_abort_code = 0;
if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) &&
op->ops->issue_yfs_rpc)
op->ops->issue_yfs_rpc(op);
else if (op->ops->issue_afs_rpc)
op->ops->issue_afs_rpc(op);
else
- op->ac.error = -ENOTSUPP;
-
- if (op->call)
- op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
+ op->call_error = -ENOTSUPP;
+
+ if (op->call) {
+ afs_wait_for_call_to_complete(op->call);
+ op->call_abort_code = op->call->abort_code;
+ op->call_error = op->call->error;
+ op->call_responded = op->call->responded;
+ afs_put_call(op->call);
+ }
}
- switch (op->error) {
- case 0:
+ if (op->call_responded && op->server)
+ set_bit(AFS_SERVER_FL_RESPONDING, &op->server->flags);
+
+ if (!afs_op_error(op)) {
_debug("success");
op->ops->success(op);
- break;
- case -ECONNABORTED:
+ } else if (op->cumul_error.aborted) {
if (op->ops->aborted)
op->ops->aborted(op);
- fallthrough;
- default:
+ } else {
if (op->ops->failed)
op->ops->failed(op);
- break;
}
afs_end_vnode_operation(op);
- if (op->error == 0 && op->ops->edit_dir) {
+ if (!afs_op_error(op) && op->ops->edit_dir) {
_debug("edit_dir");
op->ops->edit_dir(op);
}
@@ -221,7 +328,8 @@ void afs_wait_for_operation(struct afs_operation *op)
*/
int afs_put_operation(struct afs_operation *op)
{
- int i, ret = op->error;
+ struct afs_addr_list *alist;
+ int i, ret = afs_op_error(op);
_enter("op=%08x,%d", op->debug_id, ret);
@@ -232,20 +340,30 @@ int afs_put_operation(struct afs_operation *op)
if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode)
clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags);
if (op->file[0].put_vnode)
- iput(&op->file[0].vnode->vfs_inode);
+ iput(&op->file[0].vnode->netfs.inode);
if (op->file[1].put_vnode)
- iput(&op->file[1].vnode->vfs_inode);
+ iput(&op->file[1].vnode->netfs.inode);
if (op->more_files) {
for (i = 0; i < op->nr_files - 2; i++)
if (op->more_files[i].put_vnode)
- iput(&op->more_files[i].vnode->vfs_inode);
+ iput(&op->more_files[i].vnode->netfs.inode);
kfree(op->more_files);
}
- afs_end_cursor(&op->ac);
+ if (op->estate) {
+ alist = op->estate->addresses;
+ if (alist) {
+ if (op->call_responded &&
+ op->addr_index != alist->preferred &&
+ test_bit(alist->preferred, &op->addr_tried))
+ WRITE_ONCE(alist->preferred, op->addr_index);
+ }
+ }
+
+ afs_clear_server_states(op);
afs_put_serverlist(op->net, op->server_list);
- afs_put_volume(op->net, op->volume, afs_volume_trace_put_put_op);
+ afs_put_volume(op->volume, afs_volume_trace_put_put_op);
key_put(op->key);
kfree(op);
return ret;
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index c0031a3ab42f..e0030ac74ea0 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -15,6 +15,42 @@
static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
static unsigned int afs_fs_probe_slow_poll_interval = 5 * 60 * HZ;
+struct afs_endpoint_state *afs_get_endpoint_state(struct afs_endpoint_state *estate,
+ enum afs_estate_trace where)
+{
+ if (estate) {
+ int r;
+
+ __refcount_inc(&estate->ref, &r);
+ trace_afs_estate(estate->server_id, estate->probe_seq, r, where);
+ }
+ return estate;
+}
+
+static void afs_endpoint_state_rcu(struct rcu_head *rcu)
+{
+ struct afs_endpoint_state *estate = container_of(rcu, struct afs_endpoint_state, rcu);
+
+ trace_afs_estate(estate->server_id, estate->probe_seq, refcount_read(&estate->ref),
+ afs_estate_trace_free);
+ afs_put_addrlist(estate->addresses, afs_alist_trace_put_estate);
+ kfree(estate);
+}
+
+void afs_put_endpoint_state(struct afs_endpoint_state *estate, enum afs_estate_trace where)
+{
+ if (estate) {
+ unsigned int server_id = estate->server_id, probe_seq = estate->probe_seq;
+ bool dead;
+ int r;
+
+ dead = __refcount_dec_and_test(&estate->ref, &r);
+ trace_afs_estate(server_id, probe_seq, r, where);
+ if (dead)
+ call_rcu(&estate->rcu, afs_endpoint_state_rcu);
+ }
+}
+
/*
* Start the probe polling timer. We have to supply it with an inc on the
* outstanding server count.
@@ -38,9 +74,10 @@ static void afs_schedule_fs_probe(struct afs_net *net,
/*
* Handle the completion of a set of probes.
*/
-static void afs_finished_fs_probe(struct afs_net *net, struct afs_server *server)
+static void afs_finished_fs_probe(struct afs_net *net, struct afs_server *server,
+ struct afs_endpoint_state *estate)
{
- bool responded = server->probe.responded;
+ bool responded = test_bit(AFS_ESTATE_RESPONDED, &estate->flags);
write_seqlock(&net->fs_lock);
if (responded) {
@@ -50,6 +87,7 @@ static void afs_finished_fs_probe(struct afs_net *net, struct afs_server *server
clear_bit(AFS_SERVER_FL_RESPONDING, &server->flags);
list_add_tail(&server->probe_link, &net->fs_probe_fast);
}
+
write_sequnlock(&net->fs_lock);
afs_schedule_fs_probe(net, server, !responded);
@@ -58,12 +96,13 @@ static void afs_finished_fs_probe(struct afs_net *net, struct afs_server *server
/*
* Handle the completion of a probe.
*/
-static void afs_done_one_fs_probe(struct afs_net *net, struct afs_server *server)
+static void afs_done_one_fs_probe(struct afs_net *net, struct afs_server *server,
+ struct afs_endpoint_state *estate)
{
_enter("");
- if (atomic_dec_and_test(&server->probe_outstanding))
- afs_finished_fs_probe(net, server);
+ if (atomic_dec_and_test(&estate->nr_probing))
+ afs_finished_fs_probe(net, server, estate);
wake_up_all(&server->probe_wq);
}
@@ -74,24 +113,22 @@ static void afs_done_one_fs_probe(struct afs_net *net, struct afs_server *server
*/
static void afs_fs_probe_not_done(struct afs_net *net,
struct afs_server *server,
- struct afs_addr_cursor *ac)
+ struct afs_endpoint_state *estate,
+ int index)
{
- struct afs_addr_list *alist = ac->alist;
- unsigned int index = ac->index;
-
_enter("");
trace_afs_io_error(0, -ENOMEM, afs_io_error_fs_probe_fail);
spin_lock(&server->probe_lock);
- server->probe.local_failure = true;
- if (server->probe.error == 0)
- server->probe.error = -ENOMEM;
+ set_bit(AFS_ESTATE_LOCAL_FAILURE, &estate->flags);
+ if (estate->error == 0)
+ estate->error = -ENOMEM;
- set_bit(index, &alist->failed);
+ set_bit(index, &estate->failed_set);
spin_unlock(&server->probe_lock);
- return afs_done_one_fs_probe(net, server);
+ return afs_done_one_fs_probe(net, server, estate);
}
/*
@@ -100,30 +137,34 @@ static void afs_fs_probe_not_done(struct afs_net *net,
*/
void afs_fileserver_probe_result(struct afs_call *call)
{
- struct afs_addr_list *alist = call->alist;
+ struct afs_endpoint_state *estate = call->probe;
+ struct afs_addr_list *alist = estate->addresses;
+ struct afs_address *addr = &alist->addrs[call->probe_index];
struct afs_server *server = call->server;
- unsigned int index = call->addr_ix;
- unsigned int rtt_us = 0, cap0;
+ unsigned int index = call->probe_index;
+ unsigned int rtt_us = -1, cap0;
int ret = call->error;
_enter("%pU,%u", &server->uuid, index);
+ WRITE_ONCE(addr->last_error, ret);
+
spin_lock(&server->probe_lock);
switch (ret) {
case 0:
- server->probe.error = 0;
+ estate->error = 0;
goto responded;
case -ECONNABORTED:
- if (!server->probe.responded) {
- server->probe.abort_code = call->abort_code;
- server->probe.error = ret;
+ if (!test_bit(AFS_ESTATE_RESPONDED, &estate->flags)) {
+ estate->abort_code = call->abort_code;
+ estate->error = ret;
}
goto responded;
case -ENOMEM:
case -ENONET:
- clear_bit(index, &alist->responded);
- server->probe.local_failure = true;
+ clear_bit(index, &estate->responsive_set);
+ set_bit(AFS_ESTATE_LOCAL_FAILURE, &estate->flags);
trace_afs_io_error(call->debug_id, ret, afs_io_error_fs_probe_fail);
goto out;
case -ECONNRESET: /* Responded, but call expired. */
@@ -136,29 +177,29 @@ void afs_fileserver_probe_result(struct afs_call *call)
case -ETIMEDOUT:
case -ETIME:
default:
- clear_bit(index, &alist->responded);
- set_bit(index, &alist->failed);
- if (!server->probe.responded &&
- (server->probe.error == 0 ||
- server->probe.error == -ETIMEDOUT ||
- server->probe.error == -ETIME))
- server->probe.error = ret;
+ clear_bit(index, &estate->responsive_set);
+ set_bit(index, &estate->failed_set);
+ if (!test_bit(AFS_ESTATE_RESPONDED, &estate->flags) &&
+ (estate->error == 0 ||
+ estate->error == -ETIMEDOUT ||
+ estate->error == -ETIME))
+ estate->error = ret;
trace_afs_io_error(call->debug_id, ret, afs_io_error_fs_probe_fail);
goto out;
}
responded:
- clear_bit(index, &alist->failed);
+ clear_bit(index, &estate->failed_set);
if (call->service_id == YFS_FS_SERVICE) {
- server->probe.is_yfs = true;
+ set_bit(AFS_ESTATE_IS_YFS, &estate->flags);
set_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
- alist->addrs[index].srx_service = call->service_id;
+ server->service_id = call->service_id;
} else {
- server->probe.not_yfs = true;
- if (!server->probe.is_yfs) {
+ set_bit(AFS_ESTATE_NOT_YFS, &estate->flags);
+ if (!test_bit(AFS_ESTATE_IS_YFS, &estate->flags)) {
clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
- alist->addrs[index].srx_service = call->service_id;
+ server->service_id = call->service_id;
}
cap0 = ntohl(call->tmp);
if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES)
@@ -167,116 +208,148 @@ responded:
clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
}
- if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
- rtt_us < server->probe.rtt) {
- server->probe.rtt = rtt_us;
+ rtt_us = rxrpc_kernel_get_srtt(addr->peer);
+ if (rtt_us < estate->rtt) {
+ estate->rtt = rtt_us;
server->rtt = rtt_us;
alist->preferred = index;
}
smp_wmb(); /* Set rtt before responded. */
- server->probe.responded = true;
- set_bit(index, &alist->responded);
+ set_bit(AFS_ESTATE_RESPONDED, &estate->flags);
+ set_bit(index, &estate->responsive_set);
set_bit(AFS_SERVER_FL_RESPONDING, &server->flags);
out:
spin_unlock(&server->probe_lock);
- _debug("probe %pU [%u] %pISpc rtt=%u ret=%d",
- &server->uuid, index, &alist->addrs[index].transport,
+ trace_afs_fs_probe(server, false, estate, index, call->error, call->abort_code, rtt_us);
+ _debug("probe[%x] %pU [%u] %pISpc rtt=%d ret=%d",
+ estate->probe_seq, &server->uuid, index,
+ rxrpc_kernel_remote_addr(alist->addrs[index].peer),
rtt_us, ret);
- return afs_done_one_fs_probe(call->net, server);
+ return afs_done_one_fs_probe(call->net, server, estate);
}
/*
- * Probe one or all of a fileserver's addresses to find out the best route and
- * to query its capabilities.
+ * Probe all of a fileserver's addresses to find out the best route and to
+ * query its capabilities.
*/
-void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
- struct key *key, bool all)
+int afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
+ struct afs_addr_list *new_alist, struct key *key)
{
- struct afs_addr_cursor ac = {
- .index = 0,
- };
+ struct afs_endpoint_state *estate, *old;
+ struct afs_addr_list *old_alist = NULL, *alist;
+ unsigned long unprobed;
_enter("%pU", &server->uuid);
- read_lock(&server->fs_lock);
- ac.alist = rcu_dereference_protected(server->addresses,
- lockdep_is_held(&server->fs_lock));
- afs_get_addrlist(ac.alist);
- read_unlock(&server->fs_lock);
+ estate = kzalloc(sizeof(*estate), GFP_KERNEL);
+ if (!estate)
+ return -ENOMEM;
+
+ refcount_set(&estate->ref, 2);
+ estate->server_id = server->debug_id;
+ estate->rtt = UINT_MAX;
+
+ write_lock(&server->fs_lock);
+
+ old = rcu_dereference_protected(server->endpoint_state,
+ lockdep_is_held(&server->fs_lock));
+ if (old) {
+ estate->responsive_set = old->responsive_set;
+ if (!new_alist)
+ new_alist = old->addresses;
+ }
+
+ if (old_alist != new_alist)
+ afs_set_peer_appdata(server, old_alist, new_alist);
+
+ estate->addresses = afs_get_addrlist(new_alist, afs_alist_trace_get_estate);
+ alist = estate->addresses;
+ estate->probe_seq = ++server->probe_counter;
+ atomic_set(&estate->nr_probing, alist->nr_addrs);
+
+ if (new_alist)
+ server->addr_version = new_alist->version;
+ rcu_assign_pointer(server->endpoint_state, estate);
+ write_unlock(&server->fs_lock);
+ if (old)
+ set_bit(AFS_ESTATE_SUPERSEDED, &old->flags);
+
+ trace_afs_estate(estate->server_id, estate->probe_seq, refcount_read(&estate->ref),
+ afs_estate_trace_alloc_probe);
+
+ afs_get_address_preferences(net, new_alist);
server->probed_at = jiffies;
- atomic_set(&server->probe_outstanding, all ? ac.alist->nr_addrs : 1);
- memset(&server->probe, 0, sizeof(server->probe));
- server->probe.rtt = UINT_MAX;
-
- ac.index = ac.alist->preferred;
- if (ac.index < 0 || ac.index >= ac.alist->nr_addrs)
- all = true;
-
- if (all) {
- for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++)
- if (!afs_fs_get_capabilities(net, server, &ac, key))
- afs_fs_probe_not_done(net, server, &ac);
- } else {
- if (!afs_fs_get_capabilities(net, server, &ac, key))
- afs_fs_probe_not_done(net, server, &ac);
+ unprobed = (1UL << alist->nr_addrs) - 1;
+ while (unprobed) {
+ unsigned int index = 0, i;
+ int best_prio = -1;
+
+ for (i = 0; i < alist->nr_addrs; i++) {
+ if (test_bit(i, &unprobed) &&
+ alist->addrs[i].prio > best_prio) {
+ index = i;
+ best_prio = alist->addrs[i].prio;
+ }
+ }
+ __clear_bit(index, &unprobed);
+
+ trace_afs_fs_probe(server, true, estate, index, 0, 0, 0);
+ if (!afs_fs_get_capabilities(net, server, estate, index, key))
+ afs_fs_probe_not_done(net, server, estate, index);
}
- afs_put_addrlist(ac.alist);
+ afs_put_endpoint_state(old, afs_estate_trace_put_probe);
+ afs_put_endpoint_state(estate, afs_estate_trace_put_probe);
+ return 0;
}
/*
- * Wait for the first as-yet untried fileserver to respond.
+ * Wait for the first as-yet untried fileserver to respond, for the probe state
+ * to be superseded or for all probes to finish.
*/
-int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried)
+int afs_wait_for_fs_probes(struct afs_operation *op, struct afs_server_state *states, bool intr)
{
- struct wait_queue_entry *waits;
- struct afs_server *server;
- unsigned int rtt = UINT_MAX, rtt_s;
- bool have_responders = false;
- int pref = -1, i;
+ struct afs_endpoint_state *estate;
+ struct afs_server_list *slist = op->server_list;
+ bool still_probing = true;
+ int ret = 0, i;
- _enter("%u,%lx", slist->nr_servers, untried);
+ _enter("%u", slist->nr_servers);
- /* Only wait for servers that have a probe outstanding. */
for (i = 0; i < slist->nr_servers; i++) {
- if (test_bit(i, &untried)) {
- server = slist->servers[i].server;
- if (!atomic_read(&server->probe_outstanding))
- __clear_bit(i, &untried);
- if (server->probe.responded)
- have_responders = true;
- }
+ estate = states[i].endpoint_state;
+ if (test_bit(AFS_ESTATE_SUPERSEDED, &estate->flags))
+ return 2;
+ if (atomic_read(&estate->nr_probing))
+ still_probing = true;
+ if (estate->responsive_set & states[i].untried_addrs)
+ return 1;
}
- if (have_responders || !untried)
+ if (!still_probing)
return 0;
- waits = kmalloc(array_size(slist->nr_servers, sizeof(*waits)), GFP_KERNEL);
- if (!waits)
- return -ENOMEM;
-
- for (i = 0; i < slist->nr_servers; i++) {
- if (test_bit(i, &untried)) {
- server = slist->servers[i].server;
- init_waitqueue_entry(&waits[i], current);
- add_wait_queue(&server->probe_wq, &waits[i]);
- }
- }
+ for (i = 0; i < slist->nr_servers; i++)
+ add_wait_queue(&slist->servers[i].server->probe_wq, &states[i].probe_waiter);
for (;;) {
- bool still_probing = false;
+ still_probing = false;
- set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
for (i = 0; i < slist->nr_servers; i++) {
- if (test_bit(i, &untried)) {
- server = slist->servers[i].server;
- if (server->probe.responded)
- goto stop;
- if (atomic_read(&server->probe_outstanding))
- still_probing = true;
+ estate = states[i].endpoint_state;
+ if (test_bit(AFS_ESTATE_SUPERSEDED, &estate->flags)) {
+ ret = 2;
+ goto stop;
+ }
+ if (atomic_read(&estate->nr_probing))
+ still_probing = true;
+ if (estate->responsive_set & states[i].untried_addrs) {
+ ret = 1;
+ goto stop;
}
}
@@ -288,28 +361,12 @@ int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried)
stop:
set_current_state(TASK_RUNNING);
- for (i = 0; i < slist->nr_servers; i++) {
- if (test_bit(i, &untried)) {
- server = slist->servers[i].server;
- rtt_s = READ_ONCE(server->rtt);
- if (test_bit(AFS_SERVER_FL_RESPONDING, &server->flags) &&
- rtt_s < rtt) {
- pref = i;
- rtt = rtt_s;
- }
-
- remove_wait_queue(&server->probe_wq, &waits[i]);
- }
- }
-
- kfree(waits);
-
- if (pref == -1 && signal_pending(current))
- return -ERESTARTSYS;
+ for (i = 0; i < slist->nr_servers; i++)
+ remove_wait_queue(&slist->servers[i].server->probe_wq, &states[i].probe_waiter);
- if (pref >= 0)
- slist->preferred = pref;
- return 0;
+ if (!ret && signal_pending(current))
+ ret = -ERESTARTSYS;
+ return ret;
}
/*
@@ -327,7 +384,7 @@ void afs_fs_probe_timer(struct timer_list *timer)
/*
* Dispatch a probe to a server.
*/
-static void afs_dispatch_fs_probe(struct afs_net *net, struct afs_server *server, bool all)
+static void afs_dispatch_fs_probe(struct afs_net *net, struct afs_server *server)
__releases(&net->fs_lock)
{
struct key *key = NULL;
@@ -340,7 +397,7 @@ static void afs_dispatch_fs_probe(struct afs_net *net, struct afs_server *server
afs_get_server(server, afs_server_trace_get_probe);
write_sequnlock(&net->fs_lock);
- afs_fs_probe_fileserver(net, server, key, all);
+ afs_fs_probe_fileserver(net, server, NULL, key);
afs_put_server(net, server, afs_server_trace_put_probe);
}
@@ -352,7 +409,7 @@ void afs_probe_fileserver(struct afs_net *net, struct afs_server *server)
{
write_seqlock(&net->fs_lock);
if (!list_empty(&server->probe_link))
- return afs_dispatch_fs_probe(net, server, true);
+ return afs_dispatch_fs_probe(net, server);
write_sequnlock(&net->fs_lock);
}
@@ -366,12 +423,15 @@ void afs_fs_probe_dispatcher(struct work_struct *work)
unsigned long nowj, timer_at, poll_at;
bool first_pass = true, set_timer = false;
- if (!net->live)
+ if (!net->live) {
+ afs_dec_servers_outstanding(net);
return;
+ }
_enter("");
if (list_empty(&net->fs_probe_fast) && list_empty(&net->fs_probe_slow)) {
+ afs_dec_servers_outstanding(net);
_leave(" [none]");
return;
}
@@ -409,7 +469,7 @@ again:
_debug("probe %pU", &server->uuid);
if (server && (first_pass || !need_resched())) {
- afs_dispatch_fs_probe(net, server, server == fast);
+ afs_dispatch_fs_probe(net, server);
first_pass = false;
goto again;
}
@@ -433,12 +493,13 @@ again:
/*
* Wait for a probe on a particular fileserver to complete for 2s.
*/
-int afs_wait_for_one_fs_probe(struct afs_server *server, bool is_intr)
+int afs_wait_for_one_fs_probe(struct afs_server *server, struct afs_endpoint_state *estate,
+ unsigned long exclude, bool is_intr)
{
struct wait_queue_entry wait;
unsigned long timo = 2 * HZ;
- if (atomic_read(&server->probe_outstanding) == 0)
+ if (atomic_read(&estate->nr_probing) == 0)
goto dont_wait;
init_wait_entry(&wait, 0);
@@ -446,8 +507,9 @@ int afs_wait_for_one_fs_probe(struct afs_server *server, bool is_intr)
prepare_to_wait_event(&server->probe_wq, &wait,
is_intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (timo == 0 ||
- server->probe.responded ||
- atomic_read(&server->probe_outstanding) == 0 ||
+ test_bit(AFS_ESTATE_SUPERSEDED, &estate->flags) ||
+ (estate->responsive_set & ~exclude) ||
+ atomic_read(&estate->nr_probing) == 0 ||
(is_intr && signal_pending(current)))
break;
timo = schedule_timeout(timo);
@@ -456,8 +518,10 @@ int afs_wait_for_one_fs_probe(struct afs_server *server, bool is_intr)
finish_wait(&server->probe_wq, &wait);
dont_wait:
- if (server->probe.responded)
+ if (test_bit(AFS_ESTATE_SUPERSEDED, &estate->flags))
return 0;
+ if (estate->responsive_set & ~exclude)
+ return 1;
if (is_intr && signal_pending(current))
return -ERESTARTSYS;
if (timo == 0)
@@ -470,6 +534,6 @@ dont_wait:
*/
void afs_fs_probe_cleanup(struct afs_net *net)
{
- if (del_timer_sync(&net->fs_probe_timer))
+ if (timer_delete_sync(&net->fs_probe_timer))
afs_dec_servers_outstanding(net);
}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 4943413d9c5f..bc9556991d7c 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -131,7 +131,7 @@ bad:
static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
{
- return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry;
+ return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry;
}
static void xdr_decode_AFSCallBack(const __be32 **_bp,
@@ -290,6 +290,7 @@ void afs_fs_fetch_status(struct afs_operation *op)
bp[2] = htonl(vp->fid.vnode);
bp[3] = htonl(vp->fid.unique);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -300,18 +301,19 @@ void afs_fs_fetch_status(struct afs_operation *op)
static int afs_deliver_fs_fetch_data(struct afs_call *call)
{
struct afs_operation *op = call->op;
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
- struct afs_read *req = op->fetch.req;
const __be32 *bp;
+ size_t count_before;
int ret;
_enter("{%u,%zu,%zu/%llu}",
call->unmarshall, call->iov_len, iov_iter_count(call->iter),
- req->actual_len);
+ call->remaining);
switch (call->unmarshall) {
case 0:
- req->actual_len = 0;
+ call->remaining = 0;
call->unmarshall++;
if (call->operation_ID == FSFETCHDATA64) {
afs_extract_to_tmp64(call);
@@ -321,8 +323,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
}
fallthrough;
- /* Extract the returned data length into
- * ->actual_len. This may indicate more or less data than was
+ /* Extract the returned data length into ->remaining.
+ * This may indicate more or less data than was
* requested will be returned.
*/
case 1:
@@ -331,38 +333,40 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
if (ret < 0)
return ret;
- req->actual_len = be64_to_cpu(call->tmp64);
- _debug("DATA length: %llu", req->actual_len);
+ call->remaining = be64_to_cpu(call->tmp64);
+ _debug("DATA length: %llu", call->remaining);
- if (req->actual_len == 0)
+ if (call->remaining == 0)
goto no_more_data;
- call->iter = req->iter;
- call->iov_len = min(req->actual_len, req->len);
+ call->iter = &subreq->io_iter;
+ call->iov_len = umin(call->remaining, subreq->len - subreq->transferred);
call->unmarshall++;
fallthrough;
/* extract the returned data */
case 2:
- _debug("extract data %zu/%llu",
- iov_iter_count(call->iter), req->actual_len);
+ count_before = call->iov_len;
+ _debug("extract data %zu/%llu", count_before, call->remaining);
ret = afs_extract_data(call, true);
+ subreq->transferred += count_before - call->iov_len;
+ call->remaining -= count_before - call->iov_len;
if (ret < 0)
return ret;
call->iter = &call->def_iter;
- if (req->actual_len <= req->len)
+ if (call->remaining)
goto no_more_data;
/* Discard any excess data the server gave us */
- afs_extract_discard(call, req->actual_len - req->len);
+ afs_extract_discard(call, call->remaining);
call->unmarshall = 3;
fallthrough;
case 3:
_debug("extract discard %zu/%llu",
- iov_iter_count(call->iter), req->actual_len - req->len);
+ iov_iter_count(call->iter), call->remaining);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -384,8 +388,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
xdr_decode_AFSCallBack(&bp, call, &vp->scb);
xdr_decode_AFSVolSync(&bp, &op->volsync);
- req->data_version = vp->scb.status.data_version;
- req->file_size = vp->scb.status.size;
+ if (subreq->start + subreq->transferred >= vp->scb.status.size)
+ __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
call->unmarshall++;
fallthrough;
@@ -404,14 +408,18 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
static const struct afs_call_type afs_RXFSFetchData = {
.name = "FS.FetchData",
.op = afs_FS_FetchData,
+ .async_rx = afs_fetch_data_async_rx,
.deliver = afs_deliver_fs_fetch_data,
+ .immediate_cancel = afs_fetch_data_immediate_cancel,
.destructor = afs_flat_call_destructor,
};
static const struct afs_call_type afs_RXFSFetchData64 = {
.name = "FS.FetchData64",
.op = afs_FS_FetchData64,
+ .async_rx = afs_fetch_data_async_rx,
.deliver = afs_deliver_fs_fetch_data,
+ .immediate_cancel = afs_fetch_data_immediate_cancel,
.destructor = afs_flat_call_destructor,
};
@@ -420,8 +428,8 @@ static const struct afs_call_type afs_RXFSFetchData64 = {
*/
static void afs_fs_fetch_data64(struct afs_operation *op)
{
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
- struct afs_read *req = op->fetch.req;
struct afs_call *call;
__be32 *bp;
@@ -431,17 +439,21 @@ static void afs_fs_fetch_data64(struct afs_operation *op)
if (!call)
return afs_op_nomem(op);
+ if (op->flags & AFS_OPERATION_ASYNC)
+ call->async = true;
+
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHDATA64);
bp[1] = htonl(vp->fid.vid);
bp[2] = htonl(vp->fid.vnode);
bp[3] = htonl(vp->fid.unique);
- bp[4] = htonl(upper_32_bits(req->pos));
- bp[5] = htonl(lower_32_bits(req->pos));
+ bp[4] = htonl(upper_32_bits(subreq->start + subreq->transferred));
+ bp[5] = htonl(lower_32_bits(subreq->start + subreq->transferred));
bp[6] = 0;
- bp[7] = htonl(lower_32_bits(req->len));
+ bp[7] = htonl(lower_32_bits(subreq->len - subreq->transferred));
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -451,9 +463,9 @@ static void afs_fs_fetch_data64(struct afs_operation *op)
*/
void afs_fs_fetch_data(struct afs_operation *op)
{
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_read *req = op->fetch.req;
__be32 *bp;
if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
@@ -465,17 +477,16 @@ void afs_fs_fetch_data(struct afs_operation *op)
if (!call)
return afs_op_nomem(op);
- req->call_debug_id = call->debug_id;
-
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHDATA);
bp[1] = htonl(vp->fid.vid);
bp[2] = htonl(vp->fid.vnode);
bp[3] = htonl(vp->fid.unique);
- bp[4] = htonl(lower_32_bits(req->pos));
- bp[5] = htonl(lower_32_bits(req->len));
+ bp[4] = htonl(lower_32_bits(subreq->start + subreq->transferred));
+ bp[5] = htonl(lower_32_bits(subreq->len + subreq->transferred));
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -559,6 +570,7 @@ void afs_fs_create_file(struct afs_operation *op)
*bp++ = htonl(op->create.mode & S_IALLUGO); /* unix mode */
*bp++ = 0; /* segment size */
+ call->fid = dvp->fid;
trace_afs_make_fs_call1(call, &dvp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -612,6 +624,7 @@ void afs_fs_make_dir(struct afs_operation *op)
*bp++ = htonl(op->create.mode & S_IALLUGO); /* unix mode */
*bp++ = 0; /* segment size */
+ call->fid = dvp->fid;
trace_afs_make_fs_call1(call, &dvp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -685,6 +698,7 @@ void afs_fs_remove_file(struct afs_operation *op)
bp = (void *) bp + padsz;
}
+ call->fid = dvp->fid;
trace_afs_make_fs_call1(call, &dvp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -732,6 +746,7 @@ void afs_fs_remove_dir(struct afs_operation *op)
bp = (void *) bp + padsz;
}
+ call->fid = dvp->fid;
trace_afs_make_fs_call1(call, &dvp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -812,6 +827,7 @@ void afs_fs_link(struct afs_operation *op)
*bp++ = htonl(vp->fid.vnode);
*bp++ = htonl(vp->fid.unique);
+ call->fid = vp->fid;
trace_afs_make_fs_call1(call, &vp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -907,6 +923,7 @@ void afs_fs_symlink(struct afs_operation *op)
*bp++ = htonl(S_IRWXUGO); /* unix mode */
*bp++ = 0; /* segment size */
+ call->fid = dvp->fid;
trace_afs_make_fs_call1(call, &dvp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1003,6 +1020,7 @@ void afs_fs_rename(struct afs_operation *op)
bp = (void *) bp + n_padsz;
}
+ call->fid = orig_dvp->fid;
trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1090,6 +1108,7 @@ static void afs_fs_store_data64(struct afs_operation *op)
*bp++ = htonl(upper_32_bits(op->store.i_size));
*bp++ = htonl(lower_32_bits(op->store.i_size));
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1140,6 +1159,7 @@ void afs_fs_store_data(struct afs_operation *op)
*bp++ = htonl(lower_32_bits(op->store.size));
*bp++ = htonl(lower_32_bits(op->store.i_size));
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1206,6 +1226,7 @@ static void afs_fs_setattr_size64(struct afs_operation *op)
*bp++ = htonl(upper_32_bits(attr->ia_size)); /* new file length */
*bp++ = htonl(lower_32_bits(attr->ia_size));
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1247,6 +1268,7 @@ static void afs_fs_setattr_size(struct afs_operation *op)
*bp++ = 0; /* size of write */
*bp++ = htonl(attr->ia_size); /* new file length */
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1283,6 +1305,7 @@ void afs_fs_setattr(struct afs_operation *op)
xdr_encode_AFS_StoreStatus(&bp, op->setattr.attr);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1446,6 +1469,7 @@ void afs_fs_get_volume_status(struct afs_operation *op)
bp[0] = htonl(FSGETVOLUMESTATUS);
bp[1] = htonl(vp->fid.vid);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1528,6 +1552,7 @@ void afs_fs_set_lock(struct afs_operation *op)
*bp++ = htonl(vp->fid.unique);
*bp++ = htonl(op->lock.type);
+ call->fid = vp->fid;
trace_afs_make_fs_calli(call, &vp->fid, op->lock.type);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1554,6 +1579,7 @@ void afs_fs_extend_lock(struct afs_operation *op)
*bp++ = htonl(vp->fid.vnode);
*bp++ = htonl(vp->fid.unique);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1580,6 +1606,7 @@ void afs_fs_release_lock(struct afs_operation *op)
*bp++ = htonl(vp->fid.vnode);
*bp++ = htonl(vp->fid.unique);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1605,13 +1632,12 @@ static const struct afs_call_type afs_RXFSGiveUpAllCallBacks = {
/*
* Flush all the callbacks we have on a server.
*/
-int afs_fs_give_up_all_callbacks(struct afs_net *net,
- struct afs_server *server,
- struct afs_addr_cursor *ac,
- struct key *key)
+int afs_fs_give_up_all_callbacks(struct afs_net *net, struct afs_server *server,
+ struct afs_address *addr, struct key *key)
{
struct afs_call *call;
__be32 *bp;
+ int ret;
_enter("");
@@ -1619,15 +1645,22 @@ int afs_fs_give_up_all_callbacks(struct afs_net *net,
if (!call)
return -ENOMEM;
- call->key = key;
+ call->key = key;
+ call->peer = rxrpc_kernel_get_peer(addr->peer);
+ call->service_id = server->service_id;
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSGIVEUPALLCALLBACKS);
- call->server = afs_use_server(server, afs_server_trace_give_up_cb);
- afs_make_call(ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, ac);
+ call->server = afs_use_server(server, false, afs_server_trace_use_give_up_cb);
+ afs_make_call(call, GFP_NOFS);
+ afs_wait_for_call_to_complete(call);
+ ret = call->error;
+ if (call->responded)
+ set_bit(AFS_SERVER_FL_RESPONDING, &server->flags);
+ afs_put_call(call);
+ return ret;
}
/*
@@ -1689,6 +1722,12 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
return 0;
}
+static void afs_fs_get_capabilities_destructor(struct afs_call *call)
+{
+ afs_put_endpoint_state(call->probe, afs_estate_trace_put_getcaps);
+ afs_flat_call_destructor(call);
+}
+
/*
* FS.GetCapabilities operation type
*/
@@ -1697,7 +1736,8 @@ static const struct afs_call_type afs_RXFSGetCapabilities = {
.op = afs_FS_GetCapabilities,
.deliver = afs_deliver_fs_get_capabilities,
.done = afs_fileserver_probe_result,
- .destructor = afs_flat_call_destructor,
+ .immediate_cancel = afs_fileserver_probe_result,
+ .destructor = afs_fs_get_capabilities_destructor,
};
/*
@@ -1707,7 +1747,8 @@ static const struct afs_call_type afs_RXFSGetCapabilities = {
* ->done() - otherwise we return false to indicate we didn't even try.
*/
bool afs_fs_get_capabilities(struct afs_net *net, struct afs_server *server,
- struct afs_addr_cursor *ac, struct key *key)
+ struct afs_endpoint_state *estate, unsigned int addr_index,
+ struct key *key)
{
struct afs_call *call;
__be32 *bp;
@@ -1718,10 +1759,14 @@ bool afs_fs_get_capabilities(struct afs_net *net, struct afs_server *server,
if (!call)
return false;
- call->key = key;
- call->server = afs_use_server(server, afs_server_trace_get_caps);
- call->upgrade = true;
- call->async = true;
+ call->key = key;
+ call->server = afs_use_server(server, false, afs_server_trace_use_get_caps);
+ call->peer = rxrpc_kernel_get_peer(estate->addresses->addrs[addr_index].peer);
+ call->probe = afs_get_endpoint_state(estate, afs_estate_trace_get_getcaps);
+ call->probe_index = addr_index;
+ call->service_id = server->service_id;
+ call->upgrade = true;
+ call->async = true;
call->max_lifespan = AFS_PROBE_MAX_LIFESPAN;
/* marshall the parameters */
@@ -1729,7 +1774,7 @@ bool afs_fs_get_capabilities(struct afs_net *net, struct afs_server *server,
*bp++ = htonl(FSGETCAPABILITIES);
trace_afs_make_fs_call(call, NULL);
- afs_make_call(ac, call, GFP_NOFS);
+ afs_make_call(call, GFP_NOFS);
afs_put_call(call);
return true;
}
@@ -1853,7 +1898,10 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_AFSVolSync(&bp, &op->volsync);
+ /* Unfortunately, prior to OpenAFS-1.6, volsync here is filled
+ * with rubbish.
+ */
+ xdr_decode_AFSVolSync(&bp, NULL);
call->unmarshall++;
fallthrough;
@@ -1899,7 +1947,7 @@ void afs_fs_inline_bulk_status(struct afs_operation *op)
int i;
if (test_bit(AFS_SERVER_FL_NO_IBULK, &op->server->flags)) {
- op->error = -ENOTSUPP;
+ afs_op_set_error(op, -ENOTSUPP);
return;
}
@@ -1928,6 +1976,7 @@ void afs_fs_inline_bulk_status(struct afs_operation *op)
*bp++ = htonl(op->more_files[i].fid.unique);
}
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -2033,6 +2082,7 @@ void afs_fs_fetch_acl(struct afs_operation *op)
bp[2] = htonl(vp->fid.vnode);
bp[3] = htonl(vp->fid.unique);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_KERNEL);
}
@@ -2078,6 +2128,7 @@ void afs_fs_store_acl(struct afs_operation *op)
if (acl->size != size)
memset((void *)&bp[5] + acl->size, 0, size - acl->size);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_KERNEL);
}
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 16906eb592d9..dde1857fcabb 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -25,8 +25,94 @@
#include "internal.h"
#include "afs_fs.h"
+void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op)
+{
+ size_t size = strlen(op->create.symlink) + 1;
+ size_t dsize = 0;
+ char *p;
+
+ if (netfs_alloc_folioq_buffer(NULL, &vnode->directory, &dsize, size,
+ mapping_gfp_mask(vnode->netfs.inode.i_mapping)) < 0)
+ return;
+
+ vnode->directory_size = dsize;
+ p = kmap_local_folio(folioq_folio(vnode->directory, 0), 0);
+ memcpy(p, op->create.symlink, size);
+ kunmap_local(p);
+ set_bit(AFS_VNODE_DIR_READ, &vnode->flags);
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
+}
+
+static void afs_put_link(void *arg)
+{
+ struct folio *folio = virt_to_folio(arg);
+
+ kunmap_local(arg);
+ folio_put(folio);
+}
+
+const char *afs_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct folio *folio;
+ char *content;
+ ssize_t ret;
+
+ if (!dentry) {
+ /* RCU pathwalk. */
+ if (!test_bit(AFS_VNODE_DIR_READ, &vnode->flags) || !afs_check_validity(vnode))
+ return ERR_PTR(-ECHILD);
+ goto good;
+ }
+
+ if (test_bit(AFS_VNODE_DIR_READ, &vnode->flags))
+ goto fetch;
+
+ ret = afs_validate(vnode, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (!test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
+ test_bit(AFS_VNODE_DIR_READ, &vnode->flags))
+ goto good;
+
+fetch:
+ ret = afs_read_single(vnode, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ set_bit(AFS_VNODE_DIR_READ, &vnode->flags);
+
+good:
+ folio = folioq_folio(vnode->directory, 0);
+ folio_get(folio);
+ content = kmap_local_folio(folio, 0);
+ set_delayed_call(callback, afs_put_link, content);
+ return content;
+}
+
+int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
+{
+ DEFINE_DELAYED_CALL(done);
+ const char *content;
+ int len;
+
+ content = afs_get_link(dentry, d_inode(dentry), &done);
+ if (IS_ERR(content)) {
+ do_delayed_call(&done);
+ return PTR_ERR(content);
+ }
+
+ len = umin(strlen(content), buflen);
+ if (copy_to_user(buffer, content, len))
+ len = -EFAULT;
+ do_delayed_call(&done);
+ return len;
+}
+
static const struct inode_operations afs_symlink_inode_operations = {
- .get_link = page_get_link,
+ .get_link = afs_get_link,
+ .readlink = afs_readlink,
};
static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode)
@@ -54,6 +140,14 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
}
/*
+ * Set parameters for the netfs library
+ */
+static void afs_set_netfs_context(struct afs_vnode *vnode)
+{
+ netfs_inode_init(&vnode->netfs, &afs_req_ops, true);
+}
+
+/*
* Initialise an inode from the vnode status.
*/
static int afs_inode_init_from_status(struct afs_operation *op,
@@ -77,18 +171,17 @@ static int afs_inode_init_from_status(struct afs_operation *op,
write_seqlock(&vnode->cb_lock);
- vnode->cb_v_break = op->cb_v_break;
- vnode->cb_s_break = op->cb_s_break;
+ vnode->cb_v_check = op->cb_v_break;
vnode->status = *status;
t = status->mtime_client;
- inode->i_ctime = t;
- inode->i_mtime = t;
- inode->i_atime = t;
+ inode_set_ctime_to_ts(inode, t);
+ inode_set_mtime_to_ts(inode, t);
+ inode_set_atime_to_ts(inode, t);
inode->i_flags |= S_NOATIME;
inode->i_uid = make_kuid(&init_user_ns, status->owner);
inode->i_gid = make_kgid(&init_user_ns, status->group);
- set_nlink(&vnode->vfs_inode, status->nlink);
+ set_nlink(&vnode->netfs.inode, status->nlink);
switch (status->type) {
case AFS_FTYPE_FILE:
@@ -96,12 +189,16 @@ static int afs_inode_init_from_status(struct afs_operation *op,
inode->i_op = &afs_file_inode_operations;
inode->i_fop = &afs_file_operations;
inode->i_mapping->a_ops = &afs_file_aops;
+ mapping_set_large_folios(inode->i_mapping);
break;
case AFS_FTYPE_DIR:
inode->i_mode = S_IFDIR | (status->mode & S_IALLUGO);
inode->i_op = &afs_dir_inode_operations;
inode->i_fop = &afs_dir_file_operations;
inode->i_mapping->a_ops = &afs_dir_aops;
+ __set_bit(NETFS_ICTX_SINGLE_NO_UPLOAD, &vnode->netfs.flags);
+ /* Assume locally cached directory data will be valid. */
+ __set_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
break;
case AFS_FTYPE_SYMLINK:
/* Symlinks with a mode of 0644 are actually mountpoints. */
@@ -113,13 +210,13 @@ static int afs_inode_init_from_status(struct afs_operation *op,
inode->i_mode = S_IFDIR | 0555;
inode->i_op = &afs_mntpt_inode_operations;
inode->i_fop = &afs_mntpt_file_operations;
- inode->i_mapping->a_ops = &afs_symlink_aops;
} else {
inode->i_mode = S_IFLNK | status->mode;
inode->i_op = &afs_symlink_inode_operations;
- inode->i_mapping->a_ops = &afs_symlink_aops;
}
+ inode->i_mapping->a_ops = &afs_dir_aops;
inode_nohighmem(inode);
+ mapping_set_release_always(inode->i_mapping);
break;
default:
dump_vnode(vnode, op->file[0].vnode != vnode ? op->file[0].vnode : NULL);
@@ -128,18 +225,20 @@ static int afs_inode_init_from_status(struct afs_operation *op,
}
afs_set_i_size(vnode, status->size);
+ afs_set_netfs_context(vnode);
vnode->invalid_before = status->data_version;
- inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
+ trace_afs_set_dv(vnode, status->data_version);
+ inode_set_iversion_raw(&vnode->netfs.inode, status->data_version);
if (!vp->scb.have_cb) {
/* it's a symlink we just created (the fileserver
* didn't give us a callback) */
- vnode->cb_expires_at = ktime_get_real_seconds();
+ afs_clear_cb_promise(vnode, afs_cb_promise_set_new_symlink);
} else {
- vnode->cb_expires_at = vp->scb.callback.expires_at;
vnode->cb_server = op->server;
- set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ afs_set_cb_promise(vnode, vp->scb.callback.expires_at,
+ afs_cb_promise_set_new_inode);
}
write_sequnlock(&vnode->cb_lock);
@@ -154,9 +253,10 @@ static void afs_apply_status(struct afs_operation *op,
{
struct afs_file_status *status = &vp->scb.status;
struct afs_vnode *vnode = vp->vnode;
- struct inode *inode = &vnode->vfs_inode;
+ struct inode *inode = &vnode->netfs.inode;
struct timespec64 t;
umode_t mode;
+ bool unexpected_jump = false;
bool data_changed = false;
bool change_size = vp->set_size;
@@ -193,17 +293,23 @@ static void afs_apply_status(struct afs_operation *op,
}
t = status->mtime_client;
- inode->i_mtime = t;
+ inode_set_mtime_to_ts(inode, t);
if (vp->update_ctime)
- inode->i_ctime = op->ctime;
+ inode_set_ctime_to_ts(inode, op->ctime);
- if (vnode->status.data_version != status->data_version)
+ if (vnode->status.data_version != status->data_version) {
+ trace_afs_set_dv(vnode, status->data_version);
data_changed = true;
+ }
vnode->status = *status;
if (vp->dv_before + vp->dv_delta != status->data_version) {
- if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags))
+ trace_afs_dv_mismatch(vnode, vp->dv_before, vp->dv_delta,
+ status->data_version);
+
+ if (vnode->cb_ro_snapshot == atomic_read(&vnode->volume->cb_ro_snapshot) &&
+ atomic64_read(&vnode->cb_expires_at) != AFS_NO_CB_PROMISE)
pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s (op=%x)\n",
vnode->fid.vid, vnode->fid.vnode,
(unsigned long long)vp->dv_before + vp->dv_delta,
@@ -212,13 +318,13 @@ static void afs_apply_status(struct afs_operation *op,
op->debug_id);
vnode->invalid_before = status->data_version;
- if (vnode->status.type == AFS_FTYPE_DIR) {
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- afs_stat_v(vnode, n_inval);
- } else {
+ if (vnode->status.type == AFS_FTYPE_DIR)
+ afs_invalidate_dir(vnode, afs_dir_invalid_dv_mismatch);
+ else
set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
- }
change_size = true;
+ data_changed = true;
+ unexpected_jump = true;
} else if (vnode->status.type == AFS_FTYPE_DIR) {
/* Expected directory change is handled elsewhere so
* that we can locally edit the directory and save on a
@@ -237,11 +343,16 @@ static void afs_apply_status(struct afs_operation *op,
* idea of what the size should be that's not the same as
* what's on the server.
*/
- if (change_size) {
+ vnode->netfs.remote_i_size = status->size;
+ if (change_size || status->size > i_size_read(inode)) {
afs_set_i_size(vnode, status->size);
- inode->i_ctime = t;
- inode->i_atime = t;
+ if (unexpected_jump)
+ vnode->netfs.zero_point = status->size;
+ inode_set_ctime_to_ts(inode, t);
+ inode_set_atime_to_ts(inode, t);
}
+ if (op->ops == &afs_fetch_data_operation)
+ op->fetch.subreq->rreq->i_size = status->size;
}
}
@@ -255,9 +366,9 @@ static void afs_apply_callback(struct afs_operation *op,
struct afs_vnode *vnode = vp->vnode;
if (!afs_cb_is_broken(vp->cb_break_before, vnode)) {
- vnode->cb_expires_at = cb->expires_at;
- vnode->cb_server = op->server;
- set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ if (op->volume->type == AFSVL_RWVOL)
+ vnode->cb_server = op->server;
+ afs_set_cb_promise(vnode, cb->expires_at, afs_cb_promise_set_apply_cb);
}
}
@@ -279,7 +390,7 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
*/
if (vp->scb.status.abort_code == VNOVNODE) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
- clear_nlink(&vnode->vfs_inode);
+ clear_nlink(&vnode->netfs.inode);
__afs_break_callback(vnode, afs_cb_break_for_deleted);
op->flags &= ~AFS_OPERATION_DIR_CONFLICT;
}
@@ -296,8 +407,8 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
if (vp->scb.have_cb)
afs_apply_callback(op, vp);
} else if (vp->op_unlinked && !(op->flags & AFS_OPERATION_DIR_CONFLICT)) {
- drop_nlink(&vnode->vfs_inode);
- if (vnode->vfs_inode.i_nlink == 0) {
+ drop_nlink(&vnode->netfs.inode);
+ if (vnode->netfs.inode.i_nlink == 0) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
__afs_break_callback(vnode, afs_cb_break_for_deleted);
}
@@ -316,9 +427,9 @@ static void afs_fetch_status_success(struct afs_operation *op)
struct afs_vnode *vnode = vp->vnode;
int ret;
- if (vnode->vfs_inode.i_state & I_NEW) {
+ if (inode_state_read_once(&vnode->netfs.inode) & I_NEW) {
ret = afs_inode_init_from_status(op, vp, vnode);
- op->error = ret;
+ afs_op_set_error(op, ret);
if (ret == 0)
afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb);
} else {
@@ -413,28 +524,33 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
{
#ifdef CONFIG_AFS_FSCACHE
struct {
- u32 vnode_id;
- u32 unique;
- u32 vnode_id_ext[2]; /* Allow for a 96-bit key */
+ __be32 vnode_id;
+ __be32 unique;
+ __be32 vnode_id_ext[2]; /* Allow for a 96-bit key */
} __packed key;
struct afs_vnode_cache_aux aux;
- if (vnode->status.type != AFS_FTYPE_FILE) {
- vnode->cache = NULL;
+ if (vnode->status.type != AFS_FTYPE_FILE &&
+ vnode->status.type != AFS_FTYPE_DIR &&
+ vnode->status.type != AFS_FTYPE_SYMLINK) {
+ vnode->netfs.cache = NULL;
return;
}
- key.vnode_id = vnode->fid.vnode;
- key.unique = vnode->fid.unique;
- key.vnode_id_ext[0] = vnode->fid.vnode >> 32;
- key.vnode_id_ext[1] = vnode->fid.vnode_hi;
- aux.data_version = vnode->status.data_version;
-
- vnode->cache = fscache_acquire_cookie(vnode->volume->cache,
- &afs_vnode_cache_index_def,
- &key, sizeof(key),
- &aux, sizeof(aux),
- vnode, vnode->status.size, true);
+ key.vnode_id = htonl(vnode->fid.vnode);
+ key.unique = htonl(vnode->fid.unique);
+ key.vnode_id_ext[0] = htonl(vnode->fid.vnode >> 32);
+ key.vnode_id_ext[1] = htonl(vnode->fid.vnode_hi);
+ afs_set_cache_aux(vnode, &aux);
+
+ afs_vnode_set_cache(vnode,
+ fscache_acquire_cookie(
+ vnode->volume->cache,
+ vnode->status.type == AFS_FTYPE_FILE ?
+ 0 : FSCACHE_ADV_SINGLE_CHUNK,
+ &key, sizeof(key),
+ &aux, sizeof(aux),
+ i_size_read(&vnode->netfs.inode)));
#endif
}
@@ -444,7 +560,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp)
{
struct afs_vnode_param *dvp = &op->file[0];
- struct super_block *sb = dvp->vnode->vfs_inode.i_sb;
+ struct super_block *sb = dvp->vnode->netfs.inode.i_sb;
struct afs_vnode *vnode;
struct inode *inode;
int ret;
@@ -463,7 +579,7 @@ struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp)
inode, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
/* deal with an existing inode */
- if (!(inode->i_state & I_NEW)) {
+ if (!(inode_state_read_once(inode) & I_NEW)) {
_leave(" = %p", inode);
return inode;
}
@@ -493,7 +609,7 @@ static int afs_iget5_set_root(struct inode *inode, void *opaque)
struct afs_vnode *vnode = AFS_FS_I(inode);
vnode->volume = as->volume;
- vnode->fid.vid = as->volume->vid,
+ vnode->fid.vid = as->volume->vid;
vnode->fid.vnode = 1;
vnode->fid.unique = 1;
inode->i_ino = 1;
@@ -523,10 +639,11 @@ struct inode *afs_root_iget(struct super_block *sb, struct key *key)
_debug("GOT ROOT INODE %p { vl=%llx }", inode, as->volume->vid);
- BUG_ON(!(inode->i_state & I_NEW));
+ BUG_ON(!(inode_state_read_once(inode) & I_NEW));
vnode = AFS_FS_I(inode);
- vnode->cb_v_break = as->volume->cb_v_break,
+ vnode->cb_v_check = atomic_read(&as->volume->cb_v_break);
+ afs_set_netfs_context(vnode);
op = afs_alloc_operation(key, as->volume);
if (IS_ERR(op)) {
@@ -556,191 +673,45 @@ error:
}
/*
- * mark the data attached to an inode as obsolete due to a write on the server
- * - might also want to ditch all the outstanding writes and dirty pages
- */
-static void afs_zap_data(struct afs_vnode *vnode)
-{
- _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
-
-#ifdef CONFIG_AFS_FSCACHE
- fscache_invalidate(vnode->cache);
-#endif
-
- /* nuke all the non-dirty pages that aren't locked, mapped or being
- * written back in a regular file and completely discard the pages in a
- * directory or symlink */
- if (S_ISREG(vnode->vfs_inode.i_mode))
- invalidate_remote_inode(&vnode->vfs_inode);
- else
- invalidate_inode_pages2(vnode->vfs_inode.i_mapping);
-}
-
-/*
- * Check to see if we have a server currently serving this volume and that it
- * hasn't been reinitialised or dropped from the list.
- */
-static bool afs_check_server_good(struct afs_vnode *vnode)
-{
- struct afs_server_list *slist;
- struct afs_server *server;
- bool good;
- int i;
-
- if (vnode->cb_fs_s_break == atomic_read(&vnode->volume->cell->fs_s_break))
- return true;
-
- rcu_read_lock();
-
- slist = rcu_dereference(vnode->volume->servers);
- for (i = 0; i < slist->nr_servers; i++) {
- server = slist->servers[i].server;
- if (server == vnode->cb_server) {
- good = (vnode->cb_s_break == server->cb_s_break);
- rcu_read_unlock();
- return good;
- }
- }
-
- rcu_read_unlock();
- return false;
-}
-
-/*
- * Check the validity of a vnode/inode.
- */
-bool afs_check_validity(struct afs_vnode *vnode)
-{
- enum afs_cb_break_reason need_clear = afs_cb_break_no_break;
- time64_t now = ktime_get_real_seconds();
- unsigned int cb_break;
- int seq = 0;
-
- do {
- read_seqbegin_or_lock(&vnode->cb_lock, &seq);
- cb_break = vnode->cb_break;
-
- if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
- if (vnode->cb_v_break != vnode->volume->cb_v_break)
- need_clear = afs_cb_break_for_v_break;
- else if (!afs_check_server_good(vnode))
- need_clear = afs_cb_break_for_s_reinit;
- else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
- need_clear = afs_cb_break_for_zap;
- else if (vnode->cb_expires_at - 10 <= now)
- need_clear = afs_cb_break_for_lapsed;
- } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
- ;
- } else {
- need_clear = afs_cb_break_no_promise;
- }
-
- } while (need_seqretry(&vnode->cb_lock, seq));
-
- done_seqretry(&vnode->cb_lock, seq);
-
- if (need_clear == afs_cb_break_no_break)
- return true;
-
- write_seqlock(&vnode->cb_lock);
- if (need_clear == afs_cb_break_no_promise)
- vnode->cb_v_break = vnode->volume->cb_v_break;
- else if (cb_break == vnode->cb_break)
- __afs_break_callback(vnode, need_clear);
- else
- trace_afs_cb_miss(&vnode->fid, need_clear);
- write_sequnlock(&vnode->cb_lock);
- return false;
-}
-
-/*
- * validate a vnode/inode
- * - there are several things we need to check
- * - parent dir data changes (rm, rmdir, rename, mkdir, create, link,
- * symlink)
- * - parent dir metadata changed (security changes)
- * - dentry data changed (write, truncate)
- * - dentry metadata changed (security changes)
- */
-int afs_validate(struct afs_vnode *vnode, struct key *key)
-{
- int ret;
-
- _enter("{v={%llx:%llu} fl=%lx},%x",
- vnode->fid.vid, vnode->fid.vnode, vnode->flags,
- key_serial(key));
-
- if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) {
- if (vnode->vfs_inode.i_nlink)
- clear_nlink(&vnode->vfs_inode);
- goto valid;
- }
-
- if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
- afs_check_validity(vnode))
- goto valid;
-
- down_write(&vnode->validate_lock);
-
- /* if the promise has expired, we need to check the server again to get
- * a new promise - note that if the (parent) directory's metadata was
- * changed then the security may be different and we may no longer have
- * access */
- if (!test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
- _debug("not promised");
- ret = afs_fetch_status(vnode, key, false, NULL);
- if (ret < 0) {
- if (ret == -ENOENT) {
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
- ret = -ESTALE;
- }
- goto error_unlock;
- }
- _debug("new promise [fl=%lx]", vnode->flags);
- }
-
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
- _debug("file already deleted");
- ret = -ESTALE;
- goto error_unlock;
- }
-
- /* if the vnode's data version number changed then its contents are
- * different */
- if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
- afs_zap_data(vnode);
- up_write(&vnode->validate_lock);
-valid:
- _leave(" = 0");
- return 0;
-
-error_unlock:
- up_write(&vnode->validate_lock);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
* read the attributes of an inode
*/
-int afs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int afs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct afs_vnode *vnode = AFS_FS_I(inode);
- int seq = 0;
+ struct key *key;
+ int ret, seq;
_enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
+ if (vnode->volume &&
+ !(query_flags & AT_STATX_DONT_SYNC) &&
+ atomic64_read(&vnode->cb_expires_at) == AFS_NO_CB_PROMISE) {
+ key = afs_request_key(vnode->volume->cell);
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+ ret = afs_validate(vnode, key);
+ key_put(key);
+ if (ret < 0)
+ return ret;
+ }
+
do {
- read_seqbegin_or_lock(&vnode->cb_lock, &seq);
- generic_fillattr(&init_user_ns, inode, stat);
+ seq = read_seqbegin(&vnode->cb_lock);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (test_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags) &&
stat->nlink > 0)
stat->nlink -= 1;
- } while (need_seqretry(&vnode->cb_lock, seq));
- done_seqretry(&vnode->cb_lock, seq);
+ /* Lie about the size of directories. We maintain a locally
+ * edited copy and may make different allocation decisions on
+ * it, but we need to give userspace the server's size.
+ */
+ if (S_ISDIR(inode->i_mode))
+ stat->size = vnode->netfs.remote_i_size;
+ } while (read_seqretry(&vnode->cb_lock, seq));
+
return 0;
}
@@ -752,9 +723,9 @@ int afs_drop_inode(struct inode *inode)
_enter("");
if (test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(inode)->flags))
- return generic_delete_inode(inode);
+ return inode_just_drop(inode);
else
- return generic_drop_inode(inode);
+ return inode_generic_drop(inode);
}
/*
@@ -762,9 +733,9 @@ int afs_drop_inode(struct inode *inode)
*/
void afs_evict_inode(struct inode *inode)
{
- struct afs_vnode *vnode;
-
- vnode = AFS_FS_I(inode);
+ struct afs_vnode_cache_aux aux;
+ struct afs_super_info *sbi = AFS_FS_S(inode->i_sb);
+ struct afs_vnode *vnode = AFS_FS_I(inode);
_enter("{%llx:%llu.%d}",
vnode->fid.vid,
@@ -775,7 +746,25 @@ void afs_evict_inode(struct inode *inode)
ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
+ if ((S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)) &&
+ (inode_state_read_once(inode) & I_DIRTY) &&
+ !sbi->dyn_root) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .for_sync = true,
+ .range_end = LLONG_MAX,
+ };
+
+ afs_single_writepages(inode->i_mapping, &wbc);
+ }
+
+ netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
+ netfs_free_folioq_buffer(vnode->directory);
+
+ afs_set_cache_aux(vnode, &aux);
+ netfs_clear_inode_writeback(inode, &aux);
clear_inode(inode);
while (!list_empty(&vnode->wb_keys)) {
@@ -785,16 +774,8 @@ void afs_evict_inode(struct inode *inode)
afs_put_wb_key(wbk);
}
-#ifdef CONFIG_AFS_FSCACHE
- {
- struct afs_vnode_cache_aux aux;
-
- aux.data_version = vnode->status.data_version;
- fscache_relinquish_cookie(vnode->cache, &aux,
- test_bit(AFS_VNODE_DELETED, &vnode->flags));
- vnode->cache = NULL;
- }
-#endif
+ fscache_relinquish_cookie(afs_vnode_cache(vnode),
+ test_bit(AFS_VNODE_DELETED, &vnode->flags));
afs_prune_wb_keys(vnode);
afs_put_permits(rcu_access_pointer(vnode->permit_cache));
@@ -808,7 +789,7 @@ void afs_evict_inode(struct inode *inode)
static void afs_setattr_success(struct afs_operation *op)
{
struct afs_vnode_param *vp = &op->file[0];
- struct inode *inode = &vp->vnode->vfs_inode;
+ struct inode *inode = &vp->vnode->netfs.inode;
loff_t old_i_size = i_size_read(inode);
op->setattr.old_i_size = old_i_size;
@@ -825,14 +806,22 @@ static void afs_setattr_success(struct afs_operation *op)
static void afs_setattr_edit_file(struct afs_operation *op)
{
struct afs_vnode_param *vp = &op->file[0];
- struct inode *inode = &vp->vnode->vfs_inode;
+ struct afs_vnode *vnode = vp->vnode;
+ struct inode *inode = &vnode->netfs.inode;
if (op->setattr.attr->ia_valid & ATTR_SIZE) {
loff_t size = op->setattr.attr->ia_size;
- loff_t i_size = op->setattr.old_i_size;
+ loff_t old = op->setattr.old_i_size;
+
+ /* Note: inode->i_size was updated by afs_apply_status() inside
+ * the I/O and callback locks.
+ */
- if (size < i_size)
+ if (size != old) {
truncate_pagecache(inode, size);
+ netfs_resize_file(&vnode->netfs, size, true);
+ fscache_resize_cookie(afs_vnode_cache(vnode), size);
+ }
}
}
@@ -846,43 +835,70 @@ static const struct afs_operation_ops afs_setattr_operation = {
/*
* set the attributes of an inode
*/
-int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int afs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
+ const unsigned int supported =
+ ATTR_SIZE | ATTR_MODE | ATTR_UID | ATTR_GID |
+ ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET | ATTR_TOUCH;
struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
+ struct inode *inode = &vnode->netfs.inode;
+ loff_t i_size;
int ret;
_enter("{%llx:%llu},{n=%pd},%x",
vnode->fid.vid, vnode->fid.vnode, dentry,
attr->ia_valid);
- if (!(attr->ia_valid & (ATTR_SIZE | ATTR_MODE | ATTR_UID | ATTR_GID |
- ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET |
- ATTR_TOUCH))) {
+ if (!(attr->ia_valid & supported)) {
_leave(" = 0 [unsupported]");
return 0;
}
+ i_size = i_size_read(inode);
if (attr->ia_valid & ATTR_SIZE) {
- if (!S_ISREG(vnode->vfs_inode.i_mode))
+ if (!S_ISREG(inode->i_mode))
return -EISDIR;
- ret = inode_newsize_ok(&vnode->vfs_inode, attr->ia_size);
+ ret = inode_newsize_ok(inode, attr->ia_size);
if (ret)
return ret;
- if (attr->ia_size == i_size_read(&vnode->vfs_inode))
+ if (attr->ia_size == i_size)
attr->ia_valid &= ~ATTR_SIZE;
}
- /* flush any dirty data outstanding on a regular file */
- if (S_ISREG(vnode->vfs_inode.i_mode))
- filemap_write_and_wait(vnode->vfs_inode.i_mapping);
+ fscache_use_cookie(afs_vnode_cache(vnode), true);
/* Prevent any new writebacks from starting whilst we do this. */
down_write(&vnode->validate_lock);
+ if ((attr->ia_valid & ATTR_SIZE) && S_ISREG(inode->i_mode)) {
+ loff_t size = attr->ia_size;
+
+ /* Wait for any outstanding writes to the server to complete */
+ loff_t from = min(size, i_size);
+ loff_t to = max(size, i_size);
+ ret = filemap_fdatawait_range(inode->i_mapping, from, to);
+ if (ret < 0)
+ goto out_unlock;
+
+ /* Don't talk to the server if we're just shortening in-memory
+ * writes that haven't gone to the server yet.
+ */
+ if (!(attr->ia_valid & (supported & ~ATTR_SIZE & ~ATTR_MTIME)) &&
+ attr->ia_size < i_size &&
+ attr->ia_size > vnode->netfs.remote_i_size) {
+ truncate_setsize(inode, attr->ia_size);
+ netfs_resize_file(&vnode->netfs, size, false);
+ fscache_resize_cookie(afs_vnode_cache(vnode),
+ attr->ia_size);
+ ret = 0;
+ goto out_unlock;
+ }
+ }
+
op = afs_alloc_operation(((attr->ia_valid & ATTR_FILE) ?
afs_file_key(attr->ia_file) : NULL),
vnode->volume);
@@ -907,6 +923,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
out_unlock:
up_write(&vnode->validate_lock);
+ fscache_unuse_cookie(afs_vnode_cache(vnode), NULL, NULL);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index aa4c0d6c9780..009064b8d661 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -9,17 +9,18 @@
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/pagemap.h>
#include <linux/rxrpc.h>
#include <linux/key.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
-#define FSCACHE_USE_NEW_IO_API
#include <linux/fscache.h>
#include <linux/backing-dev.h>
#include <linux/uuid.h>
#include <linux/mm_types.h>
#include <linux/dns_resolver.h>
+#include <crypto/krb5.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/sock.h>
@@ -33,6 +34,7 @@
struct pagevec;
struct afs_call;
struct afs_vnode;
+struct afs_server_probe;
/*
* Partial file-locking emulation mode. (The problem being that AFS3 only
@@ -73,21 +75,51 @@ enum afs_call_state {
};
/*
+ * Address preferences.
+ */
+struct afs_addr_preference {
+ union {
+ struct in_addr ipv4_addr; /* AF_INET address to compare against */
+ struct in6_addr ipv6_addr; /* AF_INET6 address to compare against */
+ };
+ sa_family_t family; /* Which address to use */
+ u16 prio; /* Priority */
+ u8 subnet_mask; /* How many bits to compare */
+};
+
+struct afs_addr_preference_list {
+ struct rcu_head rcu;
+ u16 version; /* Incremented when prefs list changes */
+ u8 ipv6_off; /* Offset of IPv6 addresses */
+ u8 nr; /* Number of addresses in total */
+ u8 max_prefs; /* Number of prefs allocated */
+ struct afs_addr_preference prefs[] __counted_by(max_prefs);
+};
+
+struct afs_address {
+ struct rxrpc_peer *peer;
+ short last_error; /* Last error from this address */
+ u16 prio; /* Address priority */
+};
+
+/*
* List of server addresses.
*/
struct afs_addr_list {
struct rcu_head rcu;
refcount_t usage;
u32 version; /* Version */
+ unsigned int debug_id;
+ unsigned int addr_pref_version; /* Version of address preference list */
unsigned char max_addrs;
unsigned char nr_addrs;
unsigned char preferred; /* Preferred address */
unsigned char nr_ipv4; /* Number of IPv4 addresses */
enum dns_record_source source:8;
enum dns_lookup_status status:8;
- unsigned long failed; /* Mask of addrs that failed locally/ICMP */
+ unsigned long probe_failed; /* Mask of addrs that failed locally/ICMP */
unsigned long responded; /* Mask of addrs that responded */
- struct sockaddr_rxrpc addrs[];
+ struct afs_address addrs[] __counted_by(max_addrs);
#define AFS_MAX_ADDRESSES ((unsigned int)(sizeof(unsigned long) * 8))
};
@@ -96,11 +128,12 @@ struct afs_addr_list {
*/
struct afs_call {
const struct afs_call_type *type; /* type of call */
- struct afs_addr_list *alist; /* Address is alist[addr_ix] */
wait_queue_head_t waitq; /* processes awaiting completion */
struct work_struct async_work; /* async I/O processor */
struct work_struct work; /* actual work processor */
+ struct work_struct free_work; /* Deferred free processor */
struct rxrpc_call *rxcall; /* RxRPC call handle */
+ struct rxrpc_peer *peer; /* Remote endpoint */
struct key *key; /* security for this call */
struct afs_net *net; /* The network namespace */
struct afs_server *server; /* The fileserver record if fs op (pins ref) */
@@ -116,33 +149,38 @@ struct afs_call {
};
void *buffer; /* reply receive buffer */
union {
- long ret0; /* Value to reply with instead of 0 */
+ struct afs_endpoint_state *probe;
+ struct afs_addr_list *vl_probe;
struct afs_addr_list *ret_alist;
struct afs_vldb_entry *ret_vldb;
char *ret_str;
};
+ struct afs_fid fid; /* Primary vnode ID (or all zeroes) */
+ unsigned char probe_index; /* Address in ->probe_alist */
struct afs_operation *op;
unsigned int server_index;
- atomic_t usage;
+ refcount_t ref;
enum afs_call_state state;
spinlock_t state_lock;
int error; /* error code */
u32 abort_code; /* Remote abort ID or 0 */
- unsigned int max_lifespan; /* Maximum lifespan to set if not 0 */
+ unsigned long long remaining; /* How much is left to receive */
+ unsigned int max_lifespan; /* Maximum lifespan in secs to set if not 0 */
unsigned request_size; /* size of request data */
unsigned reply_max; /* maximum size of reply */
unsigned count2; /* count used in unmarshalling */
unsigned char unmarshall; /* unmarshalling phase */
- unsigned char addr_ix; /* Address in ->alist */
bool drop_ref; /* T if need to drop ref for incoming call */
bool need_attention; /* T if RxRPC poked us */
bool async; /* T if asynchronous */
bool upgrade; /* T to request service upgrade */
- bool have_reply_time; /* T if have got reply_time */
bool intr; /* T if interruptible */
bool unmarshalling_error; /* T if an unmarshalling error occurred */
+ bool responded; /* Got a response from the call (may be abort) */
+ u8 security_ix; /* Security class */
u16 service_id; /* Actual service ID (after upgrade) */
unsigned int debug_id; /* Trace ID */
+ u32 enctype; /* Security encoding type */
u32 operation_ID; /* operation ID for an incoming call */
u32 count; /* count for use in unmarshalling */
union { /* place to extract temporary data */
@@ -152,7 +190,7 @@ struct afs_call {
} __attribute__((packed));
__be64 tmp64;
};
- ktime_t reply_time; /* Time of first reply packet */
+ ktime_t issue_time; /* Time of issue of operation */
};
struct afs_call_type {
@@ -167,11 +205,17 @@ struct afs_call_type {
/* clean up a call */
void (*destructor)(struct afs_call *call);
+ /* Async receive processing function */
+ void (*async_rx)(struct work_struct *work);
+
/* Work function */
void (*work)(struct work_struct *work);
/* Call done function (gets called immediately on success or failure) */
void (*done)(struct afs_call *call);
+
+ /* Handle a call being immediately cancelled. */
+ void (*immediate_cancel)(struct afs_call *call);
};
/*
@@ -199,28 +243,6 @@ static inline struct key *afs_file_key(struct file *file)
}
/*
- * Record of an outstanding read operation on a vnode.
- */
-struct afs_read {
- loff_t pos; /* Where to start reading */
- loff_t len; /* How much we're asking for */
- loff_t actual_len; /* How much we're actually getting */
- loff_t file_size; /* File size returned by server */
- struct key *key; /* The key to use to reissue the read */
- struct afs_vnode *vnode; /* The file being read into. */
- struct netfs_read_subrequest *subreq; /* Fscache helper read request this belongs to */
- afs_dataversion_t data_version; /* Version number returned by server */
- refcount_t usage;
- unsigned int call_debug_id;
- unsigned int nr_pages;
- int error;
- void (*done)(struct afs_read *);
- void (*cleanup)(struct afs_read *);
- struct iov_iter *iter; /* Iterator representing the buffer */
- struct iov_iter def_iter; /* Default iterator */
-};
-
-/*
* AFS superblock private data
* - there's one superblock per volume
*/
@@ -262,15 +284,15 @@ struct afs_net {
struct socket *socket;
struct afs_call *spare_incoming_call;
struct work_struct charge_preallocation_work;
+ struct work_struct rx_oob_work;
struct mutex socket_mutex;
atomic_t nr_outstanding_calls;
atomic_t nr_superblocks;
/* Cell database */
struct rb_root cells;
- struct afs_cell *ws_cell;
- struct work_struct cells_manager;
- struct timer_list cells_timer;
+ struct idr cells_dyn_ino; /* cell->dynroot_ino mapping */
+ struct afs_cell __rcu *ws_cell;
atomic_t cells_outstanding;
struct rw_semaphore cells_lock;
struct mutex cells_alias_lock;
@@ -282,19 +304,12 @@ struct afs_net {
* cell, but in practice, people create aliases and subsets and there's
* no easy way to distinguish them.
*/
- seqlock_t fs_lock; /* For fs_servers, fs_probe_*, fs_proc */
- struct rb_root fs_servers; /* afs_server (by server UUID or address) */
+ seqlock_t fs_lock; /* For fs_probe_*, fs_proc */
struct list_head fs_probe_fast; /* List of afs_server to probe at 30s intervals */
struct list_head fs_probe_slow; /* List of afs_server to probe at 5m intervals */
struct hlist_head fs_proc; /* procfs servers list */
- struct hlist_head fs_addresses4; /* afs_server (by lowest IPv4 addr) */
- struct hlist_head fs_addresses6; /* afs_server (by lowest IPv6 addr) */
- seqlock_t fs_addr_lock; /* For fs_addresses[46] */
-
- struct work_struct fs_manager;
- struct timer_list fs_timer;
-
+ struct key *fs_cm_token_key; /* Key for creating CM tokens */
struct work_struct fs_prober;
struct timer_list fs_probe_timer;
atomic_t servers_outstanding;
@@ -307,12 +322,14 @@ struct afs_net {
struct proc_dir_entry *proc_afs; /* /proc/net/afs directory */
struct afs_sysnames *sysnames;
rwlock_t sysnames_lock;
+ struct afs_addr_preference_list __rcu *address_prefs;
+ u16 address_pref_version;
/* Statistics counters */
atomic_t n_lookup; /* Number of lookups done */
atomic_t n_reval; /* Number of dentries needing revalidation */
atomic_t n_inval; /* Number of invalidations by the server */
- atomic_t n_relpg; /* Number of invalidations by releasepage */
+ atomic_t n_relpg; /* Number of invalidations by release_folio */
atomic_t n_read_dir; /* Number of directory pages read */
atomic_t n_dir_cr; /* Number of directory entry creation edits */
atomic_t n_dir_rm; /* Number of directory entry removal edits */
@@ -325,13 +342,11 @@ struct afs_net {
extern const char afs_init_sysname[];
enum afs_cell_state {
- AFS_CELL_UNSET,
- AFS_CELL_ACTIVATING,
+ AFS_CELL_SETTING_UP,
+ AFS_CELL_UNLOOKED,
AFS_CELL_ACTIVE,
- AFS_CELL_DEACTIVATING,
- AFS_CELL_INACTIVE,
- AFS_CELL_FAILED,
- AFS_CELL_REMOVED,
+ AFS_CELL_REMOVING,
+ AFS_CELL_DEAD,
};
/*
@@ -362,14 +377,13 @@ struct afs_cell {
struct afs_cell *alias_of; /* The cell this is an alias of */
struct afs_volume *root_volume; /* The root.cell volume if there is one */
struct key *anonymous_key; /* anonymous user key for this cell */
+ struct work_struct destroyer; /* Destroyer for cell */
struct work_struct manager; /* Manager for init/deinit/dns */
+ struct timer_list management_timer; /* General management timer */
struct hlist_node proc_link; /* /proc cell list link */
-#ifdef CONFIG_AFS_FSCACHE
- struct fscache_cookie *cache; /* caching cookie */
-#endif
time64_t dns_expiry; /* Time AFSDB/SRV record expires */
time64_t last_inactive; /* Time of last drop of usage count */
- atomic_t ref; /* Struct refcount */
+ refcount_t ref; /* Struct refcount */
atomic_t active; /* Active usage counter */
unsigned long flags;
#define AFS_CELL_FL_NO_GC 0 /* The cell was added manually, don't auto-gc */
@@ -381,18 +395,17 @@ struct afs_cell {
enum dns_lookup_status dns_status:8; /* Latest status of data from lookup */
unsigned int dns_lookup_count; /* Counter of DNS lookups */
unsigned int debug_id;
+ unsigned int dynroot_ino; /* Inode numbers for dynroot (a pair) */
/* The volumes belonging to this cell */
+ struct rw_semaphore vs_lock; /* Lock for server->volumes */
struct rb_root volumes; /* Tree of volumes on this server */
struct hlist_head proc_volumes; /* procfs volume list */
seqlock_t volume_lock; /* For volumes */
/* Active fileserver interaction state. */
struct rb_root fs_servers; /* afs_server (by server UUID) */
- seqlock_t fs_lock; /* For fs_servers */
- struct rw_semaphore fs_open_mmaps_lock;
- struct list_head fs_open_mmaps; /* List of vnodes that are mmapped */
- atomic_t fs_s_break; /* Counter of CB.InitCallBackState messages */
+ struct rw_semaphore fs_lock; /* For fs_servers */
/* VL server list. */
rwlock_t vl_servers_lock; /* Lock on vl_servers */
@@ -400,6 +413,7 @@ struct afs_cell {
u8 name_len; /* Length of name */
char *name; /* Cell name, case-flattened and NUL-padded */
+ char *key_desc; /* Authentication key description */
};
/*
@@ -414,15 +428,16 @@ struct afs_vlserver {
#define AFS_VLSERVER_FL_IS_YFS 2 /* Server is YFS not AFS */
#define AFS_VLSERVER_FL_RESPONDING 3 /* VL server is responding */
rwlock_t lock; /* Lock on addresses */
- atomic_t usage;
+ refcount_t ref;
unsigned int rtt; /* Server's current RTT in uS */
+ unsigned int debug_id;
/* Probe state */
wait_queue_head_t probe_wq;
atomic_t probe_outstanding;
spinlock_t probe_lock;
struct {
- unsigned int rtt; /* RTT in uS */
+ unsigned int rtt; /* Best RTT in uS (or UINT_MAX) */
u32 abort_code;
short error;
unsigned short flags;
@@ -432,6 +447,7 @@ struct afs_vlserver {
#define AFS_VLSERVER_PROBE_LOCAL_FAILURE 0x08 /* A local failure prevented a probe */
} probe;
+ u16 service_id; /* Service ID we're using */
u16 port;
u16 name_len; /* Length of name */
char name[]; /* Server name, case-flattened */
@@ -450,7 +466,7 @@ struct afs_vlserver_entry {
struct afs_vlserver_list {
struct rcu_head rcu;
- atomic_t usage;
+ refcount_t ref;
u8 nr_servers;
u8 index; /* Server currently in use */
u8 preferred; /* Preferred server */
@@ -481,6 +497,7 @@ struct afs_vldb_entry {
#define AFS_VOL_VTM_RW 0x01 /* R/W version of the volume is available (on this server) */
#define AFS_VOL_VTM_RO 0x02 /* R/O version of the volume is available (on this server) */
#define AFS_VOL_VTM_BAK 0x04 /* backup version of the volume is available (on this server) */
+ u8 vlsf_flags[AFS_NMAXNSERVERS];
short error;
u8 nr_servers; /* Number of server records */
u8 name_len;
@@ -488,6 +505,32 @@ struct afs_vldb_entry {
};
/*
+ * Fileserver endpoint state. The records the addresses of a fileserver's
+ * endpoints and the state and result of a round of probing on them. This
+ * allows the rotation algorithm to access those results without them being
+ * erased by a subsequent round of probing.
+ */
+struct afs_endpoint_state {
+ struct rcu_head rcu;
+ struct afs_addr_list *addresses; /* The addresses being probed */
+ unsigned long responsive_set; /* Bitset of responsive endpoints */
+ unsigned long failed_set; /* Bitset of endpoints we failed to probe */
+ refcount_t ref;
+ unsigned int server_id; /* Debug ID of server */
+ unsigned int probe_seq; /* Probe sequence (from server::probe_counter) */
+ atomic_t nr_probing; /* Number of outstanding probes */
+ unsigned int rtt; /* Best RTT in uS (or UINT_MAX) */
+ s32 abort_code;
+ short error;
+ unsigned long flags;
+#define AFS_ESTATE_RESPONDED 0 /* Set if the server responded */
+#define AFS_ESTATE_SUPERSEDED 1 /* Set if this record has been superseded */
+#define AFS_ESTATE_IS_YFS 2 /* Set if probe upgraded to YFS */
+#define AFS_ESTATE_NOT_YFS 3 /* Set if probe didn't upgrade to YFS */
+#define AFS_ESTATE_LOCAL_FAILURE 4 /* Set if there was a local failure (eg. ENOMEM) */
+};
+
+/*
* Record of fileserver with which we're actively communicating.
*/
struct afs_server {
@@ -497,70 +540,76 @@ struct afs_server {
struct afs_uuid _uuid;
};
- struct afs_addr_list __rcu *addresses;
struct afs_cell *cell; /* Cell to which belongs (pins ref) */
- struct rb_node uuid_rb; /* Link in net->fs_servers */
- struct afs_server __rcu *uuid_next; /* Next server with same UUID */
- struct afs_server *uuid_prev; /* Previous server with same UUID */
- struct list_head probe_link; /* Link in net->fs_probe_list */
- struct hlist_node addr4_link; /* Link in net->fs_addresses4 */
- struct hlist_node addr6_link; /* Link in net->fs_addresses6 */
+ struct rb_node uuid_rb; /* Link in cell->fs_servers */
+ struct list_head probe_link; /* Link in net->fs_probe_* */
struct hlist_node proc_link; /* Link in net->fs_proc */
- struct work_struct initcb_work; /* Work for CB.InitCallBackState* */
- struct afs_server *gc_next; /* Next server in manager's list */
+ struct list_head volumes; /* RCU list of afs_server_entry objects */
+ struct work_struct destroyer; /* Work item to try and destroy a server */
+ struct timer_list timer; /* Management timer */
+ struct mutex cm_token_lock; /* Lock governing creation of appdata */
+ struct krb5_buffer cm_rxgk_appdata; /* Appdata to be included in RESPONSE packet */
time64_t unuse_time; /* Time at which last unused */
unsigned long flags;
#define AFS_SERVER_FL_RESPONDING 0 /* The server is responding */
#define AFS_SERVER_FL_UPDATING 1
#define AFS_SERVER_FL_NEEDS_UPDATE 2 /* Fileserver address list is out of date */
-#define AFS_SERVER_FL_NOT_READY 4 /* The record is not ready for use */
-#define AFS_SERVER_FL_NOT_FOUND 5 /* VL server says no such server */
-#define AFS_SERVER_FL_VL_FAIL 6 /* Failed to access VL server */
+#define AFS_SERVER_FL_UNCREATED 3 /* The record needs creating */
+#define AFS_SERVER_FL_CREATING 4 /* The record is being created */
+#define AFS_SERVER_FL_EXPIRED 5 /* The record has expired */
+#define AFS_SERVER_FL_NOT_FOUND 6 /* VL server says no such server */
+#define AFS_SERVER_FL_VL_FAIL 7 /* Failed to access VL server */
#define AFS_SERVER_FL_MAY_HAVE_CB 8 /* May have callbacks on this fileserver */
#define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */
#define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
#define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */
#define AFS_SERVER_FL_HAS_FS64 19 /* Fileserver supports FS.{Fetch,Store}Data64 */
- atomic_t ref; /* Object refcount */
+#define AFS_SERVER_FL_NO_RENAME2 20 /* YFS Fileserver doesn't support enhanced rename */
+ refcount_t ref; /* Object refcount */
atomic_t active; /* Active user count */
u32 addr_version; /* Address list version */
+ u16 service_id; /* Service ID we're using. */
+ short create_error; /* Creation error */
unsigned int rtt; /* Server's current RTT in uS */
unsigned int debug_id; /* Debugging ID for traces */
/* file service access */
rwlock_t fs_lock; /* access lock */
- /* callback promise management */
- unsigned cb_s_break; /* Break-everything counter. */
-
/* Probe state */
+ struct afs_endpoint_state __rcu *endpoint_state; /* Latest endpoint/probe state */
unsigned long probed_at; /* Time last probe was dispatched (jiffies) */
wait_queue_head_t probe_wq;
- atomic_t probe_outstanding;
+ unsigned int probe_counter; /* Number of probes issued */
spinlock_t probe_lock;
- struct {
- unsigned int rtt; /* RTT in uS */
- u32 abort_code;
- short error;
- bool responded:1;
- bool is_yfs:1;
- bool not_yfs:1;
- bool local_failure:1;
- } probe;
};
+enum afs_ro_replicating {
+ AFS_RO_NOT_REPLICATING, /* Not doing replication */
+ AFS_RO_REPLICATING_USE_OLD, /* Replicating; use old version */
+ AFS_RO_REPLICATING_USE_NEW, /* Replicating; switch to new version */
+} __mode(byte);
+
/*
* Replaceable volume server list.
*/
struct afs_server_entry {
struct afs_server *server;
+ struct afs_volume *volume;
+ struct list_head slink; /* Link in server->volumes */
+ time64_t cb_expires_at; /* Time at which volume-level callback expires */
+ unsigned long flags;
+#define AFS_SE_EXCLUDED 0 /* Set if server is to be excluded in rotation */
+#define AFS_SE_VOLUME_OFFLINE 1 /* Set if volume offline notice given */
+#define AFS_SE_VOLUME_BUSY 2 /* Set if volume busy notice given */
};
struct afs_server_list {
- afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */
+ struct rcu_head rcu;
refcount_t usage;
+ bool attached; /* T if attached to servers */
+ enum afs_ro_replicating ro_replicating; /* RW->RO update (probably) in progress */
unsigned char nr_servers;
- unsigned char preferred; /* Preferred server */
unsigned short vnovol_mask; /* Servers to be skipped due to VNOVOL */
unsigned int seq; /* Set to ->servers_seq when installed */
rwlock_t lock;
@@ -571,33 +620,46 @@ struct afs_server_list {
* Live AFS volume management.
*/
struct afs_volume {
- union {
- struct rcu_head rcu;
- afs_volid_t vid; /* volume ID */
- };
- atomic_t usage;
+ struct rcu_head rcu;
+ afs_volid_t vid; /* The volume ID of this volume */
+ afs_volid_t vids[AFS_MAXTYPES]; /* All associated volume IDs */
+ refcount_t ref;
+ unsigned int debug_id; /* Debugging ID for traces */
time64_t update_at; /* Time at which to next update */
struct afs_cell *cell; /* Cell to which belongs (pins ref) */
struct rb_node cell_node; /* Link in cell->volumes */
struct hlist_node proc_link; /* Link in cell->proc_volumes */
struct super_block __rcu *sb; /* Superblock on which inodes reside */
+ struct work_struct destructor; /* Deferred destructor */
unsigned long flags;
#define AFS_VOLUME_NEEDS_UPDATE 0 /* - T if an update needs performing */
#define AFS_VOLUME_UPDATING 1 /* - T if an update is in progress */
#define AFS_VOLUME_WAIT 2 /* - T if users must wait for update */
#define AFS_VOLUME_DELETED 3 /* - T if volume appears deleted */
-#define AFS_VOLUME_OFFLINE 4 /* - T if volume offline notice given */
-#define AFS_VOLUME_BUSY 5 /* - T if volume busy notice given */
-#define AFS_VOLUME_MAYBE_NO_IBULK 6 /* - T if some servers don't have InlineBulkStatus */
+#define AFS_VOLUME_MAYBE_NO_IBULK 4 /* - T if some servers don't have InlineBulkStatus */
+#define AFS_VOLUME_RM_TREE 5 /* - Set if volume removed from cell->volumes */
#ifdef CONFIG_AFS_FSCACHE
- struct fscache_cookie *cache; /* caching cookie */
+ struct fscache_volume *cache; /* Caching cookie */
#endif
struct afs_server_list __rcu *servers; /* List of servers on which volume resides */
rwlock_t servers_lock; /* Lock for ->servers */
unsigned int servers_seq; /* Incremented each time ->servers changes */
- unsigned cb_v_break; /* Break-everything counter. */
+ /* RO release tracking */
+ struct mutex volsync_lock; /* Time/state evaluation lock */
+ time64_t creation_time; /* Volume creation time (or TIME64_MIN) */
+ time64_t update_time; /* Volume update time (or TIME64_MIN) */
+
+ /* Callback management */
+ struct mutex cb_check_lock; /* Lock to control race to check after v_break */
+ time64_t cb_expires_at; /* Earliest volume callback expiry time */
+ atomic_t cb_ro_snapshot; /* RO volume update-from-snapshot counter */
+ atomic_t cb_v_break; /* Volume-break event counter. */
+ atomic_t cb_v_check; /* Volume-break has-been-checked counter. */
+ atomic_t cb_scrub; /* Scrub-all-data event counter. */
rwlock_t cb_v_break_lock;
+ struct rw_semaphore open_mmaps_lock;
+ struct list_head open_mmaps; /* List of vnodes that are mmapped */
afs_voltype_t type; /* type of volume */
char type_force; /* force volume type (suppress R/O -> R/W) */
@@ -623,35 +685,32 @@ enum afs_lock_state {
* leak from one inode to another.
*/
struct afs_vnode {
- struct inode vfs_inode; /* the VFS's inode record */
-
+ struct netfs_inode netfs; /* Netfslib context and vfs inode */
struct afs_volume *volume; /* volume on which vnode resides */
struct afs_fid fid; /* the file identifier for this inode */
struct afs_file_status status; /* AFS status info for this file */
afs_dataversion_t invalid_before; /* Child dentries are invalid before this */
-#ifdef CONFIG_AFS_FSCACHE
- struct fscache_cookie *cache; /* caching cookie */
-#endif
struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */
- struct mutex io_lock; /* Lock for serialising I/O on this mutex */
+ struct list_head io_lock_waiters; /* Threads waiting for the I/O lock */
struct rw_semaphore validate_lock; /* lock for validating this vnode */
struct rw_semaphore rmdir_lock; /* Lock for rmdir vs sillyrename */
struct key *silly_key; /* Silly rename key */
spinlock_t wb_lock; /* lock for wb_keys */
spinlock_t lock; /* waitqueue/flags lock */
unsigned long flags;
-#define AFS_VNODE_CB_PROMISED 0 /* Set if vnode has a callback promise */
+#define AFS_VNODE_IO_LOCK 0 /* Set if the I/O serialisation lock is held */
#define AFS_VNODE_UNSET 1 /* set if vnode attributes not yet set */
#define AFS_VNODE_DIR_VALID 2 /* Set if dir contents are valid */
#define AFS_VNODE_ZAP_DATA 3 /* set if vnode's data should be invalidated */
#define AFS_VNODE_DELETED 4 /* set if vnode deleted on server */
#define AFS_VNODE_MOUNTPOINT 5 /* set if vnode is a mountpoint symlink */
-#define AFS_VNODE_AUTOCELL 6 /* set if Vnode is an auto mount point */
#define AFS_VNODE_PSEUDODIR 7 /* set if Vnode is a pseudo directory */
#define AFS_VNODE_NEW_CONTENT 8 /* Set if file has new content (create/trunc-0) */
#define AFS_VNODE_SILLY_DELETED 9 /* Set if file has been silly-deleted */
#define AFS_VNODE_MODIFYING 10 /* Set if we're performing a modification op */
+#define AFS_VNODE_DIR_READ 11 /* Set if we've read a dir's contents */
+ struct folio_queue *directory; /* Directory contents */
struct list_head wb_keys; /* List of keys available for writeback */
struct list_head pending_locks; /* locks waiting to be granted */
struct list_head granted_locks; /* locks granted on this file */
@@ -660,30 +719,42 @@ struct afs_vnode {
ktime_t locked_at; /* Time at which lock obtained */
enum afs_lock_state lock_state : 8;
afs_lock_type_t lock_type : 8;
+ unsigned int directory_size; /* Amount of space in ->directory */
/* outstanding callback notification on this file */
struct work_struct cb_work; /* Work for mmap'd files */
struct list_head cb_mmap_link; /* Link in cell->fs_open_mmaps */
void *cb_server; /* Server with callback/filelock */
atomic_t cb_nr_mmap; /* Number of mmaps */
- unsigned int cb_fs_s_break; /* Mass server break counter (cell->fs_s_break) */
- unsigned int cb_s_break; /* Mass break counter on ->server */
- unsigned int cb_v_break; /* Mass break counter on ->volume */
+ unsigned int cb_ro_snapshot; /* RO volume release counter on ->volume */
+ unsigned int cb_scrub; /* Scrub counter on ->volume */
unsigned int cb_break; /* Break counter on vnode */
+ unsigned int cb_v_check; /* Break check counter on ->volume */
seqlock_t cb_lock; /* Lock for ->cb_server, ->status, ->cb_*break */
- time64_t cb_expires_at; /* time at which callback expires */
+ atomic64_t cb_expires_at; /* time at which callback expires */
+#define AFS_NO_CB_PROMISE TIME64_MIN
};
static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode)
{
#ifdef CONFIG_AFS_FSCACHE
- return vnode->cache;
+ return netfs_i_cookie(&vnode->netfs);
#else
return NULL;
#endif
}
+static inline void afs_vnode_set_cache(struct afs_vnode *vnode,
+ struct fscache_cookie *cookie)
+{
+#ifdef CONFIG_AFS_FSCACHE
+ vnode->netfs.cache = cookie;
+ if (cookie)
+ mapping_set_release_always(vnode->netfs.inode.i_mapping);
+#endif
+}
+
/*
* cached security record for one user's attempt to access a vnode
*/
@@ -703,47 +774,56 @@ struct afs_permits {
refcount_t usage;
unsigned short nr_permits; /* Number of records */
bool invalidated; /* Invalidated due to key change */
- struct afs_permit permits[]; /* List of permits sorted by key pointer */
+ struct afs_permit permits[] __counted_by(nr_permits); /* List of permits sorted by key pointer */
};
/*
* Error prioritisation and accumulation.
*/
struct afs_error {
- short error; /* Accumulated error */
+ s32 abort_code; /* Cumulative abort code */
+ short error; /* Cumulative error */
bool responded; /* T if server responded */
-};
-
-/*
- * Cursor for iterating over a server's address list.
- */
-struct afs_addr_cursor {
- struct afs_addr_list *alist; /* Current address list (pins ref) */
- unsigned long tried; /* Tried addresses */
- signed char index; /* Current address */
- bool responded; /* T if the current address responded */
- unsigned short nr_iterations; /* Number of address iterations */
- short error;
- u32 abort_code;
+ bool aborted; /* T if ->error is from an abort */
};
/*
* Cursor for iterating over a set of volume location servers.
*/
struct afs_vl_cursor {
- struct afs_addr_cursor ac;
struct afs_cell *cell; /* The cell we're querying */
struct afs_vlserver_list *server_list; /* Current server list (pins ref) */
struct afs_vlserver *server; /* Server on which this resides */
+ struct afs_addr_list *alist; /* Current address list (pins ref) */
struct key *key; /* Key for the server */
- unsigned long untried; /* Bitmask of untried servers */
- short index; /* Current server */
- short error;
+ unsigned long untried_servers; /* Bitmask of untried servers */
+ unsigned long addr_tried; /* Tried addresses */
+ struct afs_error cumul_error; /* Cumulative error */
+ unsigned int debug_id;
+ s32 call_abort_code;
+ short call_error; /* Error from single call */
+ short server_index; /* Current server */
+ signed char addr_index; /* Current address */
unsigned short flags;
#define AFS_VL_CURSOR_STOP 0x0001 /* Set to cease iteration */
#define AFS_VL_CURSOR_RETRY 0x0002 /* Set to do a retry */
#define AFS_VL_CURSOR_RETRIED 0x0004 /* Set if started a retry */
- unsigned short nr_iterations; /* Number of server iterations */
+ short nr_iterations; /* Number of server iterations */
+ bool call_responded; /* T if the current address responded */
+};
+
+/*
+ * Fileserver state tracking for an operation. An array of these is kept,
+ * indexed by server index.
+ */
+struct afs_server_state {
+ /* Tracking of fileserver probe state. Other operations may interfere
+ * by probing a fileserver when accessing other volumes.
+ */
+ unsigned int probe_seq;
+ unsigned long untried_addrs; /* Addresses we haven't tried yet */
+ struct wait_queue_entry probe_waiter;
+ struct afs_endpoint_state *endpoint_state; /* Endpoint state being monitored */
};
/*
@@ -764,7 +844,7 @@ struct afs_vnode_param {
struct afs_fid fid; /* Fid to access */
struct afs_status_cb scb; /* Returned status and callback promise */
afs_dataversion_t dv_before; /* Data version before the call */
- unsigned int cb_break_before; /* cb_break + cb_s_break before the call */
+ unsigned int cb_break_before; /* cb_break before the call */
u8 dv_delta; /* Expected change in data version */
bool put_vnode:1; /* T if we have a ref on the vnode */
bool need_io_lock:1; /* T if we need the I/O lock on this */
@@ -789,17 +869,17 @@ struct afs_operation {
struct afs_volume *volume; /* Volume being accessed */
struct afs_vnode_param file[2];
struct afs_vnode_param *more_files;
- struct afs_volsync volsync;
+ struct afs_volsync pre_volsync; /* Volsync before op */
+ struct afs_volsync volsync; /* Volsync returned by op */
struct dentry *dentry; /* Dentry to be altered */
struct dentry *dentry_2; /* Second dentry to be altered */
struct timespec64 mtime; /* Modification time to record */
struct timespec64 ctime; /* Change time to set */
+ struct afs_error cumul_error; /* Cumulative error */
short nr_files; /* Number of entries in file[], more_files */
- short error;
unsigned int debug_id;
unsigned int cb_v_break; /* Volume break counter before op */
- unsigned int cb_s_break; /* Server break counter before op */
union {
struct {
@@ -814,12 +894,13 @@ struct afs_operation {
bool need_rehash;
} unlink;
struct {
- struct dentry *rehash;
- struct dentry *tmp;
- bool new_negative;
+ struct dentry *rehash;
+ struct dentry *tmp;
+ unsigned int rename_flags;
+ bool new_negative;
} rename;
struct {
- struct afs_read *req;
+ struct netfs_io_subrequest *subreq;
} fetch;
struct {
afs_lock_type_t type;
@@ -829,7 +910,6 @@ struct afs_operation {
loff_t pos;
loff_t size;
loff_t i_size;
- bool laundering; /* Laundering page, PG_writeback not set */
} store;
struct {
struct iattr *attr;
@@ -844,13 +924,19 @@ struct afs_operation {
};
/* Fileserver iteration state */
- struct afs_addr_cursor ac;
struct afs_server_list *server_list; /* Current server list (pins ref) */
struct afs_server *server; /* Server we're using (ref pinned by server_list) */
+ struct afs_endpoint_state *estate; /* Current endpoint state (doesn't pin ref) */
+ struct afs_server_state *server_states; /* States of the servers involved */
struct afs_call *call;
- unsigned long untried; /* Bitmask of untried servers */
- short index; /* Current server */
- unsigned short nr_iterations; /* Number of server iterations */
+ unsigned long untried_servers; /* Bitmask of untried servers */
+ unsigned long addr_tried; /* Tried addresses */
+ s32 call_abort_code; /* Abort code from single call */
+ short call_error; /* Error from single call */
+ short server_index; /* Current server */
+ short nr_iterations; /* Number of server iterations */
+ signed char addr_index; /* Current address */
+ bool call_responded; /* T if the current address responded */
unsigned int flags;
#define AFS_OPERATION_STOP 0x0001 /* Set to cease iteration */
@@ -866,70 +952,45 @@ struct afs_operation {
#define AFS_OPERATION_TRIED_ALL 0x0400 /* Set if we've tried all the fileservers */
#define AFS_OPERATION_RETRY_SERVER 0x0800 /* Set if we should retry the current server */
#define AFS_OPERATION_DIR_CONFLICT 0x1000 /* Set if we detected a 3rd-party dir change */
+#define AFS_OPERATION_ASYNC 0x2000 /* Set if should run asynchronously */
};
/*
* Cache auxiliary data.
*/
struct afs_vnode_cache_aux {
- u64 data_version;
+ __be64 data_version;
} __packed;
-/*
- * We use folio->private to hold the amount of the folio that we've written to,
- * splitting the field into two parts. However, we need to represent a range
- * 0...FOLIO_SIZE, so we reduce the resolution if the size of the folio
- * exceeds what we can encode.
- */
-#ifdef CONFIG_64BIT
-#define __AFS_FOLIO_PRIV_MASK 0x7fffffffUL
-#define __AFS_FOLIO_PRIV_SHIFT 32
-#define __AFS_FOLIO_PRIV_MMAPPED 0x80000000UL
-#else
-#define __AFS_FOLIO_PRIV_MASK 0x7fffUL
-#define __AFS_FOLIO_PRIV_SHIFT 16
-#define __AFS_FOLIO_PRIV_MMAPPED 0x8000UL
-#endif
-
-static inline unsigned int afs_folio_dirty_resolution(struct folio *folio)
-{
- int shift = folio_shift(folio) - (__AFS_FOLIO_PRIV_SHIFT - 1);
- return (shift > 0) ? shift : 0;
-}
-
-static inline size_t afs_folio_dirty_from(struct folio *folio, unsigned long priv)
+static inline void afs_set_cache_aux(struct afs_vnode *vnode,
+ struct afs_vnode_cache_aux *aux)
{
- unsigned long x = priv & __AFS_FOLIO_PRIV_MASK;
-
- /* The lower bound is inclusive */
- return x << afs_folio_dirty_resolution(folio);
+ aux->data_version = cpu_to_be64(vnode->status.data_version);
}
-static inline size_t afs_folio_dirty_to(struct folio *folio, unsigned long priv)
+static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int flags)
{
- unsigned long x = (priv >> __AFS_FOLIO_PRIV_SHIFT) & __AFS_FOLIO_PRIV_MASK;
+ struct afs_vnode_cache_aux aux;
- /* The upper bound is immediately beyond the region */
- return (x + 1) << afs_folio_dirty_resolution(folio);
+ afs_set_cache_aux(vnode, &aux);
+ fscache_invalidate(afs_vnode_cache(vnode), &aux,
+ i_size_read(&vnode->netfs.inode), flags);
}
-static inline unsigned long afs_folio_dirty(struct folio *folio, size_t from, size_t to)
-{
- unsigned int res = afs_folio_dirty_resolution(folio);
- from >>= res;
- to = (to - 1) >> res;
- return (to << __AFS_FOLIO_PRIV_SHIFT) | from;
-}
-
-static inline unsigned long afs_folio_dirty_mmapped(unsigned long priv)
-{
- return priv | __AFS_FOLIO_PRIV_MMAPPED;
-}
-
-static inline bool afs_is_folio_dirty_mmapped(unsigned long priv)
-{
- return priv & __AFS_FOLIO_PRIV_MMAPPED;
-}
+/*
+ * Directory iteration management.
+ */
+struct afs_dir_iter {
+ struct afs_vnode *dvnode;
+ union afs_xdr_dir_block *block;
+ struct folio_queue *fq;
+ unsigned int fpos;
+ int fq_slot;
+ unsigned int loop_check;
+ u8 nr_slots;
+ u8 bucket;
+ unsigned int prev_entry;
+};
#include <trace/events/afs.h>
@@ -937,45 +998,35 @@ static inline bool afs_is_folio_dirty_mmapped(unsigned long priv)
/*
* addr_list.c
*/
-static inline struct afs_addr_list *afs_get_addrlist(struct afs_addr_list *alist)
-{
- if (alist)
- refcount_inc(&alist->usage);
- return alist;
-}
-extern struct afs_addr_list *afs_alloc_addrlist(unsigned int,
- unsigned short,
- unsigned short);
-extern void afs_put_addrlist(struct afs_addr_list *);
+struct afs_addr_list *afs_get_addrlist(struct afs_addr_list *alist, enum afs_alist_trace reason);
+extern struct afs_addr_list *afs_alloc_addrlist(unsigned int nr);
+extern void afs_put_addrlist(struct afs_addr_list *alist, enum afs_alist_trace reason);
extern struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *,
const char *, size_t, char,
unsigned short, unsigned short);
+bool afs_addr_list_same(const struct afs_addr_list *a,
+ const struct afs_addr_list *b);
extern struct afs_vlserver_list *afs_dns_query(struct afs_cell *, time64_t *);
-extern bool afs_iterate_addresses(struct afs_addr_cursor *);
-extern int afs_end_cursor(struct afs_addr_cursor *);
-extern void afs_merge_fs_addr4(struct afs_addr_list *, __be32, u16);
-extern void afs_merge_fs_addr6(struct afs_addr_list *, __be32 *, u16);
+extern int afs_merge_fs_addr4(struct afs_net *net, struct afs_addr_list *addr,
+ __be32 xdr, u16 port);
+extern int afs_merge_fs_addr6(struct afs_net *net, struct afs_addr_list *addr,
+ __be32 *xdr, u16 port);
+void afs_set_peer_appdata(struct afs_server *server,
+ struct afs_addr_list *old_alist,
+ struct afs_addr_list *new_alist);
/*
- * cache.c
+ * addr_prefs.c
*/
-#ifdef CONFIG_AFS_FSCACHE
-extern struct fscache_netfs afs_cache_netfs;
-extern struct fscache_cookie_def afs_cell_cache_index_def;
-extern struct fscache_cookie_def afs_volume_cache_index_def;
-extern struct fscache_cookie_def afs_vnode_cache_index_def;
-#else
-#define afs_cell_cache_index_def (*(struct fscache_cookie_def *) NULL)
-#define afs_volume_cache_index_def (*(struct fscache_cookie_def *) NULL)
-#define afs_vnode_cache_index_def (*(struct fscache_cookie_def *) NULL)
-#endif
+int afs_proc_addr_prefs_write(struct file *file, char *buf, size_t size);
+void afs_get_address_preferences_rcu(struct afs_net *net, struct afs_addr_list *alist);
+void afs_get_address_preferences(struct afs_net *net, struct afs_addr_list *alist);
/*
* callback.c
*/
extern void afs_invalidate_mmap_work(struct work_struct *);
-extern void afs_server_init_callback_work(struct work_struct *work);
extern void afs_init_callback_state(struct afs_server *);
extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
@@ -983,13 +1034,15 @@ extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback
static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode)
{
- return vnode->cb_break + vnode->cb_v_break;
+ return vnode->cb_break + vnode->cb_ro_snapshot + vnode->cb_scrub;
}
static inline bool afs_cb_is_broken(unsigned int cb_break,
const struct afs_vnode *vnode)
{
- return cb_break != (vnode->cb_break + vnode->volume->cb_v_break);
+ return cb_break != (vnode->cb_break +
+ atomic_read(&vnode->volume->cb_ro_snapshot) +
+ atomic_read(&vnode->volume->cb_scrub));
}
/*
@@ -998,16 +1051,26 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
extern int afs_cell_init(struct afs_net *, const char *);
extern struct afs_cell *afs_find_cell(struct afs_net *, const char *, unsigned,
enum afs_cell_trace);
-extern struct afs_cell *afs_lookup_cell(struct afs_net *, const char *, unsigned,
- const char *, bool);
+enum afs_lookup_cell_for {
+ AFS_LOOKUP_CELL_DYNROOT,
+ AFS_LOOKUP_CELL_MOUNTPOINT,
+ AFS_LOOKUP_CELL_DIRECT_MOUNT,
+ AFS_LOOKUP_CELL_PRELOAD,
+ AFS_LOOKUP_CELL_ROOTCELL,
+ AFS_LOOKUP_CELL_ALIAS_CHECK,
+};
+struct afs_cell *afs_lookup_cell(struct afs_net *net,
+ const char *name, unsigned int namesz,
+ const char *vllist,
+ enum afs_lookup_cell_for reason,
+ enum afs_cell_trace trace);
extern struct afs_cell *afs_use_cell(struct afs_cell *, enum afs_cell_trace);
-extern void afs_unuse_cell(struct afs_net *, struct afs_cell *, enum afs_cell_trace);
+void afs_unuse_cell(struct afs_cell *cell, enum afs_cell_trace reason);
extern struct afs_cell *afs_get_cell(struct afs_cell *, enum afs_cell_trace);
extern void afs_see_cell(struct afs_cell *, enum afs_cell_trace);
extern void afs_put_cell(struct afs_cell *, enum afs_cell_trace);
extern void afs_queue_cell(struct afs_cell *, enum afs_cell_trace);
-extern void afs_manage_cells(struct work_struct *);
-extern void afs_cells_timer(struct timer_list *);
+void afs_set_cell_timer(struct afs_cell *cell, unsigned int delay_secs);
extern void __net_exit afs_cell_purge(struct afs_net *);
/*
@@ -1016,6 +1079,19 @@ extern void __net_exit afs_cell_purge(struct afs_net *);
extern bool afs_cm_incoming_call(struct afs_call *);
/*
+ * cm_security.c
+ */
+void afs_process_oob_queue(struct work_struct *work);
+#ifdef CONFIG_RXGK
+int afs_create_token_key(struct afs_net *net, struct socket *socket);
+#else
+static inline int afs_create_token_key(struct afs_net *net, struct socket *socket)
+{
+ return 0;
+}
+#endif
+
+/*
* dir.c
*/
extern const struct file_operations afs_dir_file_operations;
@@ -1023,15 +1099,34 @@ extern const struct inode_operations afs_dir_inode_operations;
extern const struct address_space_operations afs_dir_aops;
extern const struct dentry_operations afs_fs_dentry_operations;
+ssize_t afs_read_single(struct afs_vnode *dvnode, struct file *file);
+ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file)
+ __acquires(&dvnode->validate_lock);
extern void afs_d_release(struct dentry *);
extern void afs_check_for_remote_deletion(struct afs_operation *);
+int afs_single_writepages(struct address_space *mapping,
+ struct writeback_control *wbc);
/*
* dir_edit.c
*/
-extern void afs_edit_dir_add(struct afs_vnode *, struct qstr *, struct afs_fid *,
+extern void afs_edit_dir_add(struct afs_vnode *, const struct qstr *, struct afs_fid *,
enum afs_edit_dir_reason);
-extern void afs_edit_dir_remove(struct afs_vnode *, struct qstr *, enum afs_edit_dir_reason);
+extern void afs_edit_dir_remove(struct afs_vnode *, const struct qstr *, enum afs_edit_dir_reason);
+void afs_edit_dir_update(struct afs_vnode *vnode, const struct qstr *name,
+ struct afs_vnode *new_dvnode, enum afs_edit_dir_reason why);
+void afs_mkdir_init_dir(struct afs_vnode *dvnode, struct afs_vnode *parent_vnode);
+
+/*
+ * dir_search.c
+ */
+unsigned int afs_dir_hash_name(const struct qstr *name);
+bool afs_dir_init_iter(struct afs_dir_iter *iter, const struct qstr *name);
+union afs_xdr_dir_block *afs_dir_find_block(struct afs_dir_iter *iter, size_t block);
+int afs_dir_search_bucket(struct afs_dir_iter *iter, const struct qstr *name,
+ struct afs_fid *_fid);
+int afs_dir_search(struct afs_vnode *dvnode, const struct qstr *name,
+ struct afs_fid *_fid, afs_dataversion_t *_dir_version);
/*
* dir_silly.c
@@ -1046,34 +1141,23 @@ extern int afs_silly_iput(struct dentry *, struct inode *);
extern const struct inode_operations afs_dynroot_inode_operations;
extern const struct dentry_operations afs_dynroot_dentry_operations;
-extern struct inode *afs_try_auto_mntpt(struct dentry *, struct inode *);
-extern int afs_dynroot_mkdir(struct afs_net *, struct afs_cell *);
-extern void afs_dynroot_rmdir(struct afs_net *, struct afs_cell *);
-extern int afs_dynroot_populate(struct super_block *);
-extern void afs_dynroot_depopulate(struct super_block *);
+struct inode *afs_dynroot_iget_root(struct super_block *sb);
/*
* file.c
*/
extern const struct address_space_operations afs_file_aops;
-extern const struct address_space_operations afs_symlink_aops;
extern const struct inode_operations afs_file_inode_operations;
extern const struct file_operations afs_file_operations;
-extern const struct netfs_read_request_ops afs_req_ops;
+extern const struct afs_operation_ops afs_fetch_data_operation;
+extern const struct netfs_request_ops afs_req_ops;
extern int afs_cache_wb_key(struct afs_vnode *, struct afs_file *);
extern void afs_put_wb_key(struct afs_wb_key *);
extern int afs_open(struct inode *, struct file *);
extern int afs_release(struct inode *, struct file *);
-extern int afs_fetch_data(struct afs_vnode *, struct afs_read *);
-extern struct afs_read *afs_alloc_read(gfp_t);
-extern void afs_put_read(struct afs_read *);
-
-static inline struct afs_read *afs_get_read(struct afs_read *req)
-{
- refcount_inc(&req->usage);
- return req;
-}
+void afs_fetch_data_async_rx(struct work_struct *work);
+void afs_fetch_data_immediate_cancel(struct afs_call *call);
/*
* flock.c
@@ -1104,15 +1188,16 @@ extern void afs_fs_get_volume_status(struct afs_operation *);
extern void afs_fs_set_lock(struct afs_operation *);
extern void afs_fs_extend_lock(struct afs_operation *);
extern void afs_fs_release_lock(struct afs_operation *);
-extern int afs_fs_give_up_all_callbacks(struct afs_net *, struct afs_server *,
- struct afs_addr_cursor *, struct key *);
-extern bool afs_fs_get_capabilities(struct afs_net *, struct afs_server *,
- struct afs_addr_cursor *, struct key *);
+int afs_fs_give_up_all_callbacks(struct afs_net *net, struct afs_server *server,
+ struct afs_address *addr, struct key *key);
+bool afs_fs_get_capabilities(struct afs_net *net, struct afs_server *server,
+ struct afs_endpoint_state *estate, unsigned int addr_index,
+ struct key *key);
extern void afs_fs_inline_bulk_status(struct afs_operation *);
struct afs_acl {
u32 size;
- u8 data[];
+ u8 data[] __counted_by(size);
};
extern void afs_fs_fetch_acl(struct afs_operation *);
@@ -1124,14 +1209,10 @@ extern void afs_fs_store_acl(struct afs_operation *);
extern struct afs_operation *afs_alloc_operation(struct key *, struct afs_volume *);
extern int afs_put_operation(struct afs_operation *);
extern bool afs_begin_vnode_operation(struct afs_operation *);
+extern void afs_end_vnode_operation(struct afs_operation *op);
extern void afs_wait_for_operation(struct afs_operation *);
extern int afs_do_sync_operation(struct afs_operation *);
-static inline void afs_op_nomem(struct afs_operation *op)
-{
- op->error = -ENOMEM;
-}
-
static inline void afs_op_set_vnode(struct afs_operation *op, unsigned int n,
struct afs_vnode *vnode)
{
@@ -1148,12 +1229,17 @@ static inline void afs_op_set_fid(struct afs_operation *op, unsigned int n,
/*
* fs_probe.c
*/
+struct afs_endpoint_state *afs_get_endpoint_state(struct afs_endpoint_state *estate,
+ enum afs_estate_trace where);
+void afs_put_endpoint_state(struct afs_endpoint_state *estate, enum afs_estate_trace where);
extern void afs_fileserver_probe_result(struct afs_call *);
-extern void afs_fs_probe_fileserver(struct afs_net *, struct afs_server *, struct key *, bool);
-extern int afs_wait_for_fs_probes(struct afs_server_list *, unsigned long);
+int afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
+ struct afs_addr_list *new_alist, struct key *key);
+int afs_wait_for_fs_probes(struct afs_operation *op, struct afs_server_state *states, bool intr);
extern void afs_probe_fileserver(struct afs_net *, struct afs_server *);
extern void afs_fs_probe_dispatcher(struct work_struct *);
-extern int afs_wait_for_one_fs_probe(struct afs_server *, bool);
+int afs_wait_for_one_fs_probe(struct afs_server *server, struct afs_endpoint_state *estate,
+ unsigned long exclude, bool is_intr);
extern void afs_fs_probe_cleanup(struct afs_net *);
/*
@@ -1161,17 +1247,18 @@ extern void afs_fs_probe_cleanup(struct afs_net *);
*/
extern const struct afs_operation_ops afs_fetch_status_operation;
+void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op);
+const char *afs_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback);
+int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen);
extern void afs_vnode_commit_status(struct afs_operation *, struct afs_vnode_param *);
extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *);
extern int afs_ilookup5_test_by_fid(struct inode *, void *);
-extern struct inode *afs_iget_pseudo_dir(struct super_block *, bool);
extern struct inode *afs_iget(struct afs_operation *, struct afs_vnode_param *);
extern struct inode *afs_root_iget(struct super_block *, struct key *);
-extern bool afs_check_validity(struct afs_vnode *);
-extern int afs_validate(struct afs_vnode *, struct key *);
-extern int afs_getattr(struct user_namespace *mnt_userns, const struct path *,
+extern int afs_getattr(struct mnt_idmap *idmap, const struct path *,
struct kstat *, u32, unsigned int);
-extern int afs_setattr(struct user_namespace *mnt_userns, struct dentry *, struct iattr *);
+extern int afs_setattr(struct mnt_idmap *idmap, struct dentry *, struct iattr *);
extern void afs_evict_inode(struct inode *);
extern int afs_drop_inode(struct inode *);
@@ -1203,7 +1290,7 @@ static inline struct afs_net *afs_i2net(struct inode *inode)
static inline struct afs_net *afs_v2net(struct afs_vnode *vnode)
{
- return afs_i2net(&vnode->vfs_inode);
+ return afs_i2net(&vnode->netfs.inode);
}
static inline struct afs_net *afs_sock2net(struct sock *sk)
@@ -1224,6 +1311,31 @@ static inline void __afs_stat(atomic_t *s)
extern int afs_abort_to_error(u32);
extern void afs_prioritise_error(struct afs_error *, int, u32);
+static inline void afs_op_nomem(struct afs_operation *op)
+{
+ op->cumul_error.error = -ENOMEM;
+}
+
+static inline int afs_op_error(const struct afs_operation *op)
+{
+ return op->cumul_error.error;
+}
+
+static inline s32 afs_op_abort_code(const struct afs_operation *op)
+{
+ return op->cumul_error.abort_code;
+}
+
+static inline int afs_op_set_error(struct afs_operation *op, int error)
+{
+ return op->cumul_error.error = error;
+}
+
+static inline void afs_op_accumulate_error(struct afs_operation *op, int error, s32 abort_code)
+{
+ afs_prioritise_error(&op->cumul_error, error, abort_code);
+}
+
/*
* mntpt.c
*/
@@ -1254,6 +1366,7 @@ static inline void afs_put_sysnames(struct afs_sysnames *sysnames) {}
/*
* rotate.c
*/
+void afs_clear_server_states(struct afs_operation *op);
extern bool afs_select_fileserver(struct afs_operation *);
extern void afs_dump_edestaddrreq(const struct afs_operation *);
@@ -1266,8 +1379,10 @@ extern int __net_init afs_open_socket(struct afs_net *);
extern void __net_exit afs_close_socket(struct afs_net *);
extern void afs_charge_preallocation(struct work_struct *);
extern void afs_put_call(struct afs_call *);
-extern void afs_make_call(struct afs_addr_cursor *, struct afs_call *, gfp_t);
-extern long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *);
+void afs_deferred_put_call(struct afs_call *call);
+void afs_make_call(struct afs_call *call, gfp_t gfp);
+void afs_deliver_to_call(struct afs_call *call);
+void afs_wait_for_call_to_complete(struct afs_call *call);
extern struct afs_call *afs_alloc_flat_call(struct afs_net *,
const struct afs_call_type *,
size_t, size_t);
@@ -1277,15 +1392,41 @@ extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
extern int afs_extract_data(struct afs_call *, bool);
extern int afs_protocol_error(struct afs_call *, enum afs_eproto_cause);
+static inline struct afs_call *afs_get_call(struct afs_call *call,
+ enum afs_call_trace why)
+{
+ int r;
+
+ __refcount_inc(&call->ref, &r);
+
+ trace_afs_call(call->debug_id, why, r + 1,
+ atomic_read(&call->net->nr_outstanding_calls),
+ __builtin_return_address(0));
+ return call;
+}
+
+static inline void afs_see_call(struct afs_call *call, enum afs_call_trace why)
+{
+ int r = refcount_read(&call->ref);
+
+ trace_afs_call(call->debug_id, why, r,
+ atomic_read(&call->net->nr_outstanding_calls),
+ __builtin_return_address(0));
+}
+
static inline void afs_make_op_call(struct afs_operation *op, struct afs_call *call,
gfp_t gfp)
{
- op->call = call;
- op->type = call->type;
- call->op = op;
- call->key = op->key;
- call->intr = !(op->flags & AFS_OPERATION_UNINTR);
- afs_make_call(&op->ac, call, gfp);
+ struct afs_addr_list *alist = op->estate->addresses;
+
+ op->call = call;
+ op->type = call->type;
+ call->op = op;
+ call->key = op->key;
+ call->intr = !(op->flags & AFS_OPERATION_UNINTR);
+ call->peer = rxrpc_kernel_get_peer(alist->addrs[op->addr_index].peer);
+ call->service_id = op->server->service_id;
+ afs_make_call(call, gfp);
}
static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t size)
@@ -1293,7 +1434,7 @@ static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t si
call->iov_len = size;
call->kvec[0].iov_base = buf;
call->kvec[0].iov_len = size;
- iov_iter_kvec(&call->def_iter, READ, call->kvec, 1, size);
+ iov_iter_kvec(&call->def_iter, ITER_DEST, call->kvec, 1, size);
}
static inline void afs_extract_to_tmp(struct afs_call *call)
@@ -1311,7 +1452,7 @@ static inline void afs_extract_to_tmp64(struct afs_call *call)
static inline void afs_extract_discard(struct afs_call *call, size_t size)
{
call->iov_len = size;
- iov_iter_discard(&call->def_iter, READ, size);
+ iov_iter_discard(&call->def_iter, ITER_DEST, size);
}
static inline void afs_extract_to_buf(struct afs_call *call, size_t size)
@@ -1383,11 +1524,10 @@ extern void afs_put_permits(struct afs_permits *);
extern void afs_clear_permits(struct afs_vnode *);
extern void afs_cache_permit(struct afs_vnode *, struct key *, unsigned int,
struct afs_status_cb *);
-extern void afs_zap_permits(struct rcu_head *);
extern struct key *afs_request_key(struct afs_cell *);
extern struct key *afs_request_key_rcu(struct afs_cell *);
extern int afs_check_permit(struct afs_vnode *, struct key *, afs_access_t *);
-extern int afs_permission(struct user_namespace *, struct inode *, int);
+extern int afs_permission(struct mnt_idmap *, struct inode *, int);
extern void __exit afs_clean_up_permit_cache(void);
/*
@@ -1395,20 +1535,29 @@ extern void __exit afs_clean_up_permit_cache(void);
*/
extern spinlock_t afs_server_peer_lock;
-extern struct afs_server *afs_find_server(struct afs_net *,
- const struct sockaddr_rxrpc *);
-extern struct afs_server *afs_find_server_by_uuid(struct afs_net *, const uuid_t *);
+struct afs_server *afs_find_server(const struct rxrpc_peer *peer);
extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *, u32);
extern struct afs_server *afs_get_server(struct afs_server *, enum afs_server_trace);
-extern struct afs_server *afs_use_server(struct afs_server *, enum afs_server_trace);
-extern void afs_unuse_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
-extern void afs_unuse_server_notime(struct afs_net *, struct afs_server *, enum afs_server_trace);
+struct afs_server *afs_use_server(struct afs_server *server, bool activate,
+ enum afs_server_trace reason);
+void afs_unuse_server(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason);
+void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason);
extern void afs_put_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
-extern void afs_manage_servers(struct work_struct *);
-extern void afs_servers_timer(struct timer_list *);
+void afs_purge_servers(struct afs_cell *cell);
extern void afs_fs_probe_timer(struct timer_list *);
-extern void __net_exit afs_purge_servers(struct afs_net *);
-extern bool afs_check_server_record(struct afs_operation *, struct afs_server *);
+void __net_exit afs_wait_for_servers(struct afs_net *net);
+bool afs_check_server_record(struct afs_operation *op, struct afs_server *server, struct key *key);
+
+static inline void afs_see_server(struct afs_server *server, enum afs_server_trace trace)
+{
+ int r = refcount_read(&server->ref);
+ int a = atomic_read(&server->active);
+
+ trace_afs_server(server->debug_id, r, a, trace);
+
+}
static inline void afs_inc_servers_outstanding(struct afs_net *net)
{
@@ -1436,10 +1585,14 @@ static inline struct afs_server_list *afs_get_serverlist(struct afs_server_list
}
extern void afs_put_serverlist(struct afs_net *, struct afs_server_list *);
-extern struct afs_server_list *afs_alloc_server_list(struct afs_cell *, struct key *,
- struct afs_vldb_entry *,
- u8);
+struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
+ struct key *key,
+ struct afs_vldb_entry *vldb);
extern bool afs_annotate_server_list(struct afs_server_list *, struct afs_server_list *);
+void afs_attach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *slist);
+void afs_reattach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *slist,
+ struct afs_server_list *old);
+void afs_detach_volume_from_servers(struct afs_volume *volume, struct afs_server_list *slist);
/*
* super.c
@@ -1448,13 +1601,24 @@ extern int __init afs_fs_init(void);
extern void afs_fs_exit(void);
/*
+ * validation.c
+ */
+bool afs_check_validity(const struct afs_vnode *vnode);
+int afs_update_volume_state(struct afs_operation *op);
+int afs_validate(struct afs_vnode *vnode, struct key *key);
+
+/*
* vlclient.c
*/
extern struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_vl_cursor *,
const char *, int);
extern struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *, const uuid_t *);
-extern struct afs_call *afs_vl_get_capabilities(struct afs_net *, struct afs_addr_cursor *,
- struct key *, struct afs_vlserver *, unsigned int);
+struct afs_call *afs_vl_get_capabilities(struct afs_net *net,
+ struct afs_addr_list *alist,
+ unsigned int addr_index,
+ struct key *key,
+ struct afs_vlserver *server,
+ unsigned int server_index);
extern struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *, const uuid_t *);
extern char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *);
@@ -1484,14 +1648,14 @@ extern int afs_end_vlserver_operation(struct afs_vl_cursor *);
*/
static inline struct afs_vlserver *afs_get_vlserver(struct afs_vlserver *vlserver)
{
- atomic_inc(&vlserver->usage);
+ refcount_inc(&vlserver->ref);
return vlserver;
}
static inline struct afs_vlserver_list *afs_get_vlserverlist(struct afs_vlserver_list *vllist)
{
if (vllist)
- atomic_inc(&vllist->usage);
+ refcount_inc(&vllist->ref);
return vllist;
}
@@ -1506,34 +1670,29 @@ extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *,
* volume.c
*/
extern struct afs_volume *afs_create_volume(struct afs_fs_context *);
-extern void afs_activate_volume(struct afs_volume *);
+extern int afs_activate_volume(struct afs_volume *);
extern void afs_deactivate_volume(struct afs_volume *);
+bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason);
extern struct afs_volume *afs_get_volume(struct afs_volume *, enum afs_volume_trace);
-extern void afs_put_volume(struct afs_net *, struct afs_volume *, enum afs_volume_trace);
+void afs_put_volume(struct afs_volume *volume, enum afs_volume_trace reason);
extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
/*
* write.c
*/
-extern int afs_set_page_dirty(struct page *);
-extern int afs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
-extern int afs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
-extern int afs_writepage(struct page *, struct writeback_control *);
+void afs_prepare_write(struct netfs_io_subrequest *subreq);
+void afs_issue_write(struct netfs_io_subrequest *subreq);
+void afs_begin_writeback(struct netfs_io_request *wreq);
+void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *stream);
extern int afs_writepages(struct address_space *, struct writeback_control *);
-extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
extern int afs_fsync(struct file *, loff_t, loff_t, int);
extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
extern void afs_prune_wb_keys(struct afs_vnode *);
-extern int afs_launder_page(struct page *);
/*
* xattr.c
*/
-extern const struct xattr_handler *afs_xattr_handlers[];
+extern const struct xattr_handler * const afs_xattr_handlers[];
/*
* yfsclient.c
@@ -1547,6 +1706,9 @@ extern void yfs_fs_remove_dir(struct afs_operation *);
extern void yfs_fs_link(struct afs_operation *);
extern void yfs_fs_symlink(struct afs_operation *);
extern void yfs_fs_rename(struct afs_operation *);
+void yfs_fs_rename_replace(struct afs_operation *op);
+void yfs_fs_rename_noreplace(struct afs_operation *op);
+void yfs_fs_rename_exchange(struct afs_operation *op);
extern void yfs_fs_store_data(struct afs_operation *);
extern void yfs_fs_setattr(struct afs_operation *);
extern void yfs_fs_get_volume_status(struct afs_operation *);
@@ -1575,12 +1737,12 @@ extern void yfs_fs_store_opaque_acl2(struct afs_operation *);
*/
static inline struct afs_vnode *AFS_FS_I(struct inode *inode)
{
- return container_of(inode, struct afs_vnode, vfs_inode);
+ return container_of(inode, struct afs_vnode, netfs.inode);
}
static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode)
{
- return &vnode->vfs_inode;
+ return &vnode->netfs.inode;
}
/*
@@ -1592,7 +1754,7 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
struct afs_vnode_param *dir_vp,
struct dentry *dentry)
{
- if (!op->error)
+ if (!op->cumul_error.error)
dentry->d_fsdata =
(void *)(unsigned long)dir_vp->scb.status.data_version;
}
@@ -1603,8 +1765,8 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
*/
static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size)
{
- i_size_write(&vnode->vfs_inode, size);
- vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
+ i_size_write(&vnode->netfs.inode, size);
+ vnode->netfs.inode.i_blocks = ((size + 1023) >> 10) << 1;
}
/*
@@ -1631,6 +1793,38 @@ static inline int afs_bad(struct afs_vnode *vnode, enum afs_file_error where)
return -EIO;
}
+/*
+ * Set the callback promise on a vnode.
+ */
+static inline void afs_set_cb_promise(struct afs_vnode *vnode, time64_t expires_at,
+ enum afs_cb_promise_trace trace)
+{
+ atomic64_set(&vnode->cb_expires_at, expires_at);
+ trace_afs_cb_promise(vnode, trace);
+}
+
+/*
+ * Clear the callback promise on a vnode, returning true if it was promised.
+ */
+static inline bool afs_clear_cb_promise(struct afs_vnode *vnode,
+ enum afs_cb_promise_trace trace)
+{
+ trace_afs_cb_promise(vnode, trace);
+ return atomic64_xchg(&vnode->cb_expires_at, AFS_NO_CB_PROMISE) != AFS_NO_CB_PROMISE;
+}
+
+/*
+ * Mark a directory as being invalid.
+ */
+static inline void afs_invalidate_dir(struct afs_vnode *dvnode,
+ enum afs_dir_invalid_trace trace)
+{
+ if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
+ trace_afs_dir_invalid(dvnode, trace);
+ afs_stat_v(dvnode, n_inval);
+ }
+}
+
/*****************************************************************************/
/*
* debug tracing
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 179004b15566..e6bb8237db98 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -41,8 +41,6 @@ const char afs_init_sysname[] = "arm_linux26";
const char afs_init_sysname[] = "aarch64_linux26";
#elif defined(CONFIG_X86_32)
const char afs_init_sysname[] = "i386_linux26";
-#elif defined(CONFIG_IA64)
-const char afs_init_sysname[] = "ia64_linux26";
#elif defined(CONFIG_PPC64)
const char afs_init_sysname[] = "ppc64_linux26";
#elif defined(CONFIG_PPC32)
@@ -75,29 +73,21 @@ static int __net_init afs_net_init(struct net *net_ns)
generate_random_uuid((unsigned char *)&net->uuid);
INIT_WORK(&net->charge_preallocation_work, afs_charge_preallocation);
+ INIT_WORK(&net->rx_oob_work, afs_process_oob_queue);
mutex_init(&net->socket_mutex);
net->cells = RB_ROOT;
+ idr_init(&net->cells_dyn_ino);
init_rwsem(&net->cells_lock);
- INIT_WORK(&net->cells_manager, afs_manage_cells);
- timer_setup(&net->cells_timer, afs_cells_timer, 0);
-
mutex_init(&net->cells_alias_lock);
mutex_init(&net->proc_cells_lock);
INIT_HLIST_HEAD(&net->proc_cells);
seqlock_init(&net->fs_lock);
- net->fs_servers = RB_ROOT;
INIT_LIST_HEAD(&net->fs_probe_fast);
INIT_LIST_HEAD(&net->fs_probe_slow);
INIT_HLIST_HEAD(&net->fs_proc);
- INIT_HLIST_HEAD(&net->fs_addresses4);
- INIT_HLIST_HEAD(&net->fs_addresses6);
- seqlock_init(&net->fs_addr_lock);
-
- INIT_WORK(&net->fs_manager, afs_manage_servers);
- timer_setup(&net->fs_timer, afs_servers_timer, 0);
INIT_WORK(&net->fs_prober, afs_fs_probe_dispatcher);
timer_setup(&net->fs_probe_timer, afs_fs_probe_timer, 0);
atomic_set(&net->servers_outstanding, 1);
@@ -133,13 +123,14 @@ error_open_socket:
net->live = false;
afs_fs_probe_cleanup(net);
afs_cell_purge(net);
- afs_purge_servers(net);
+ afs_wait_for_servers(net);
error_cell_init:
net->live = false;
afs_proc_cleanup(net);
error_proc:
afs_put_sysnames(net->sysnames);
error_sysnames:
+ idr_destroy(&net->cells_dyn_ino);
net->live = false;
return ret;
}
@@ -154,10 +145,12 @@ static void __net_exit afs_net_exit(struct net *net_ns)
net->live = false;
afs_fs_probe_cleanup(net);
afs_cell_purge(net);
- afs_purge_servers(net);
+ afs_wait_for_servers(net);
afs_close_socket(net);
afs_proc_cleanup(net);
afs_put_sysnames(net->sysnames);
+ idr_destroy(&net->cells_dyn_ino);
+ kfree_rcu(rcu_access_pointer(net->address_prefs), rcu);
}
static struct pernet_operations afs_net_ops = {
@@ -176,23 +169,16 @@ static int __init afs_init(void)
printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n");
- afs_wq = alloc_workqueue("afs", 0, 0);
+ afs_wq = alloc_workqueue("afs", WQ_PERCPU, 0);
if (!afs_wq)
goto error_afs_wq;
- afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM, 0);
+ afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!afs_async_calls)
goto error_async;
- afs_lock_manager = alloc_workqueue("kafs_lockd", WQ_MEM_RECLAIM, 0);
+ afs_lock_manager = alloc_workqueue("kafs_lockd", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!afs_lock_manager)
goto error_lockmgr;
-#ifdef CONFIG_AFS_FSCACHE
- /* we want to be able to cache */
- ret = fscache_register_netfs(&afs_cache_netfs);
- if (ret < 0)
- goto error_cache;
-#endif
-
ret = register_pernet_device(&afs_net_ops);
if (ret < 0)
goto error_net;
@@ -215,10 +201,6 @@ error_proc:
error_fs:
unregister_pernet_device(&afs_net_ops);
error_net:
-#ifdef CONFIG_AFS_FSCACHE
- fscache_unregister_netfs(&afs_cache_netfs);
-error_cache:
-#endif
destroy_workqueue(afs_lock_manager);
error_lockmgr:
destroy_workqueue(afs_async_calls);
@@ -245,9 +227,6 @@ static void __exit afs_exit(void)
proc_remove(afs_proc_symlink);
afs_fs_exit();
unregister_pernet_device(&afs_net_ops);
-#ifdef CONFIG_AFS_FSCACHE
- fscache_unregister_netfs(&afs_cache_netfs);
-#endif
destroy_workqueue(afs_lock_manager);
destroy_workqueue(afs_async_calls);
destroy_workqueue(afs_wq);
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 1d1a8debe472..c8a7f266080d 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
+#include <crypto/krb5.h>
#include "internal.h"
#include "afs_fs.h"
#include "protocol_uae.h"
@@ -69,6 +70,7 @@ int afs_abort_to_error(u32 abort_code)
/* Unified AFS error table */
case UAEPERM: return -EPERM;
case UAENOENT: return -ENOENT;
+ case UAEAGAIN: return -EAGAIN;
case UAEACCES: return -EACCES;
case UAEBUSY: return -EBUSY;
case UAEEXIST: return -EEXIST;
@@ -102,7 +104,34 @@ int afs_abort_to_error(u32 abort_code)
case RXKADDATALEN: return -EKEYREJECTED;
case RXKADILLEGALLEVEL: return -EKEYREJECTED;
+ case RXGK_INCONSISTENCY: return -EPROTO;
+ case RXGK_PACKETSHORT: return -EPROTO;
+ case RXGK_BADCHALLENGE: return -EPROTO;
+ case RXGK_SEALEDINCON: return -EKEYREJECTED;
+ case RXGK_NOTAUTH: return -EKEYREJECTED;
+ case RXGK_EXPIRED: return -EKEYEXPIRED;
+ case RXGK_BADLEVEL: return -EKEYREJECTED;
+ case RXGK_BADKEYNO: return -EKEYREJECTED;
+ case RXGK_NOTRXGK: return -EKEYREJECTED;
+ case RXGK_UNSUPPORTED: return -EKEYREJECTED;
+ case RXGK_GSSERROR: return -EKEYREJECTED;
+#ifdef RXGK_BADETYPE
+ case RXGK_BADETYPE: return -ENOPKG;
+#endif
+#ifdef RXGK_BADTOKEN
+ case RXGK_BADTOKEN: return -EKEYREJECTED;
+#endif
+#ifdef RXGK_BADETYPE
+ case RXGK_DATALEN: return -EPROTO;
+#endif
+#ifdef RXGK_BADQOP
+ case RXGK_BADQOP: return -EKEYREJECTED;
+#endif
+
+ case KRB5_PROG_KEYTYPE_NOSUPP: return -ENOPKG;
+
case RXGEN_OPCODE: return -ENOTSUPP;
+ case RX_INVALID_OPERATION: return -ENOTSUPP;
default: return -EREMOTEIO;
}
@@ -115,6 +144,8 @@ void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code)
{
switch (error) {
case 0:
+ e->aborted = false;
+ e->error = 0;
return;
default:
if (e->error == -ETIMEDOUT ||
@@ -160,11 +191,18 @@ void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code)
if (e->responded)
return;
e->error = error;
+ e->aborted = false;
return;
case -ECONNABORTED:
- e->responded = true;
e->error = afs_abort_to_error(abort_code);
+ e->aborted = true;
+ e->responded = true;
+ return;
+ case -ENETRESET: /* Responded, but we seem to have changed address */
+ e->aborted = false;
+ e->responded = true;
+ e->error = error;
return;
}
}
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index bbb2c210d139..57c204a3c04e 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -30,7 +30,7 @@ const struct file_operations afs_mntpt_file_operations = {
const struct inode_operations afs_mntpt_inode_operations = {
.lookup = afs_mntpt_lookup,
- .readlink = page_readlink,
+ .readlink = afs_readlink,
.getattr = afs_getattr,
};
@@ -87,7 +87,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
ctx->force = true;
}
if (ctx->cell) {
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_mntpt);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_mntpt);
ctx->cell = NULL;
}
if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
@@ -107,7 +107,9 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
if (size > AFS_MAXCELLNAME)
return -ENAMETOOLONG;
- cell = afs_lookup_cell(ctx->net, p, size, NULL, false);
+ cell = afs_lookup_cell(ctx->net, p, size, NULL,
+ AFS_LOOKUP_CELL_MOUNTPOINT,
+ afs_cell_trace_use_lookup_mntpt);
if (IS_ERR(cell)) {
pr_err("kAFS: unable to lookup cell '%pd'\n", mntpt);
return PTR_ERR(cell);
@@ -118,9 +120,9 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
ctx->volnamesz = sizeof(afs_root_volume) - 1;
} else {
/* read the contents of the AFS special symlink */
- struct page *page;
+ DEFINE_DELAYED_CALL(cleanup);
+ const char *content;
loff_t size = i_size_read(d_inode(mntpt));
- char *buf;
if (src_as->cell)
ctx->cell = afs_use_cell(src_as->cell, afs_cell_trace_use_mntpt);
@@ -128,24 +130,24 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
if (size < 2 || size > PAGE_SIZE - 1)
return -EINVAL;
- page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL);
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- if (PageError(page)) {
- ret = afs_bad(AFS_FS_I(d_inode(mntpt)), afs_file_error_mntpt);
- put_page(page);
- return ret;
+ content = afs_get_link(mntpt, d_inode(mntpt), &cleanup);
+ if (IS_ERR(content)) {
+ do_delayed_call(&cleanup);
+ return PTR_ERR(content);
}
- buf = kmap(page);
ret = -EINVAL;
- if (buf[size - 1] == '.')
- ret = vfs_parse_fs_string(fc, "source", buf, size - 1);
- kunmap(page);
- put_page(page);
+ if (content[size - 1] == '.')
+ ret = vfs_parse_fs_qstr(fc, "source",
+ &QSTR_LEN(content, size - 1));
+ do_delayed_call(&cleanup);
if (ret < 0)
return ret;
+
+ /* Don't cross a backup volume mountpoint from a backup volume */
+ if (src_as->volume && src_as->volume->type == AFSVL_BACKVOL &&
+ ctx->type == AFSVL_BACKVOL)
+ return -ENODEV;
}
return 0;
@@ -189,7 +191,6 @@ struct vfsmount *afs_d_automount(struct path *path)
if (IS_ERR(newmnt))
return newmnt;
- mntget(newmnt); /* prevent immediate expiration */
mnt_set_expiry(newmnt, &afs_vfsmounts);
queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer,
afs_mntpt_expiry_timeout * HZ);
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 065a28bfa3f1..44520549b509 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -47,7 +47,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
/* display one cell per line on subsequent lines */
seq_printf(m, "%3u %3u %6lld %2u %2u %s\n",
- atomic_read(&cell->ref),
+ refcount_read(&cell->ref),
atomic_read(&cell->active),
cell->dns_expiry - ktime_get_real_seconds(),
vllist ? vllist->nr_servers : 0,
@@ -122,14 +122,16 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
if (strcmp(buf, "add") == 0) {
struct afs_cell *cell;
- cell = afs_lookup_cell(net, name, strlen(name), args, true);
+ cell = afs_lookup_cell(net, name, strlen(name), args,
+ AFS_LOOKUP_CELL_PRELOAD,
+ afs_cell_trace_use_lookup_add);
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
goto done;
}
if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
- afs_unuse_cell(net, cell, afs_cell_trace_unuse_no_pin);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_no_pin);
} else {
goto inval;
}
@@ -147,6 +149,56 @@ inval:
}
/*
+ * Display the list of addr_prefs known to the namespace.
+ */
+static int afs_proc_addr_prefs_show(struct seq_file *m, void *v)
+{
+ struct afs_addr_preference_list *preflist;
+ struct afs_addr_preference *pref;
+ struct afs_net *net = afs_seq2net_single(m);
+ union {
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ } addr;
+ unsigned int i;
+ char buf[44]; /* Maximum ipv6 + max subnet is 43 */
+
+ rcu_read_lock();
+ preflist = rcu_dereference(net->address_prefs);
+
+ if (!preflist) {
+ seq_puts(m, "NO PREFS\n");
+ goto out;
+ }
+
+ seq_printf(m, "PROT SUBNET PRIOR (v=%u n=%u/%u/%u)\n",
+ preflist->version, preflist->ipv6_off, preflist->nr, preflist->max_prefs);
+
+ memset(&addr, 0, sizeof(addr));
+
+ for (i = 0; i < preflist->nr; i++) {
+ pref = &preflist->prefs[i];
+
+ addr.sin.sin_family = pref->family;
+ if (pref->family == AF_INET) {
+ memcpy(&addr.sin.sin_addr, &pref->ipv4_addr,
+ sizeof(addr.sin.sin_addr));
+ snprintf(buf, sizeof(buf), "%pISc/%u", &addr.sin, pref->subnet_mask);
+ seq_printf(m, "UDP %-43.43s %5u\n", buf, pref->prio);
+ } else {
+ memcpy(&addr.sin6.sin6_addr, &pref->ipv6_addr,
+ sizeof(addr.sin6.sin6_addr));
+ snprintf(buf, sizeof(buf), "%pISc/%u", &addr.sin6, pref->subnet_mask);
+ seq_printf(m, "UDP %-43.43s %5u\n", buf, pref->prio);
+ }
+ }
+
+out:
+ rcu_read_unlock();
+ return 0;
+}
+
+/*
* Display the name of the current workstation cell.
*/
static int afs_proc_rootcell_show(struct seq_file *m, void *v)
@@ -156,7 +208,7 @@ static int afs_proc_rootcell_show(struct seq_file *m, void *v)
net = afs_seq2net_single(m);
down_read(&net->cells_lock);
- cell = net->ws_cell;
+ cell = rcu_dereference_protected(net->ws_cell, lockdep_is_held(&net->cells_lock));
if (cell)
seq_printf(m, "%s\n", cell->name);
up_read(&net->cells_lock);
@@ -190,7 +242,13 @@ static int afs_proc_rootcell_write(struct file *file, char *buf, size_t size)
/* determine command to perform */
_debug("rootcell=%s", buf);
- ret = afs_cell_init(net, buf);
+ ret = -EEXIST;
+ inode_lock(file_inode(file));
+ if (!rcu_access_pointer(net->ws_cell))
+ ret = afs_cell_init(net, buf);
+ else
+ printk("busy\n");
+ inode_unlock(file_inode(file));
out:
_leave(" = %d", ret);
@@ -217,7 +275,7 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
}
seq_printf(m, "%3d %08llx %s %s\n",
- atomic_read(&vol->usage), vol->vid,
+ refcount_read(&vol->ref), vol->vid,
afs_vol_types[vol->type],
vol->name);
@@ -227,7 +285,7 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
__acquires(cell->proc_lock)
{
- struct afs_cell *cell = PDE_DATA(file_inode(m->file));
+ struct afs_cell *cell = pde_data(file_inode(m->file));
rcu_read_lock();
return seq_hlist_start_head_rcu(&cell->proc_volumes, *_pos);
@@ -236,7 +294,7 @@ static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
static void *afs_proc_cell_volumes_next(struct seq_file *m, void *v,
loff_t *_pos)
{
- struct afs_cell *cell = PDE_DATA(file_inode(m->file));
+ struct afs_cell *cell = pde_data(file_inode(m->file));
return seq_hlist_next_rcu(v, &cell->proc_volumes, _pos);
}
@@ -307,7 +365,7 @@ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
for (i = 0; i < alist->nr_addrs; i++)
seq_printf(m, " %c %pISpc\n",
alist->preferred == i ? '>' : '-',
- &alist->addrs[i].transport);
+ rxrpc_kernel_remote_addr(alist->addrs[i].peer));
}
seq_printf(m, " info: fl=%lx rtt=%d\n", vlserver->flags, vlserver->rtt);
seq_printf(m, " probe: fl=%x e=%d ac=%d out=%d\n",
@@ -322,7 +380,7 @@ static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
{
struct afs_vl_seq_net_private *priv = m->private;
struct afs_vlserver_list *vllist;
- struct afs_cell *cell = PDE_DATA(file_inode(m->file));
+ struct afs_cell *cell = pde_data(file_inode(m->file));
loff_t pos = *_pos;
rcu_read_lock();
@@ -375,32 +433,51 @@ static const struct seq_operations afs_proc_cell_vlservers_ops = {
*/
static int afs_proc_servers_show(struct seq_file *m, void *v)
{
- struct afs_server *server;
+ struct afs_endpoint_state *estate;
struct afs_addr_list *alist;
+ struct afs_server *server;
+ unsigned long failed;
int i;
if (v == SEQ_START_TOKEN) {
- seq_puts(m, "UUID REF ACT\n");
+ seq_puts(m, "UUID REF ACT CELL\n");
return 0;
}
server = list_entry(v, struct afs_server, proc_link);
- alist = rcu_dereference(server->addresses);
- seq_printf(m, "%pU %3d %3d\n",
+ seq_printf(m, "%pU %3d %3d %s\n",
&server->uuid,
- atomic_read(&server->ref),
- atomic_read(&server->active));
- seq_printf(m, " - info: fl=%lx rtt=%u brk=%x\n",
- server->flags, server->rtt, server->cb_s_break);
- seq_printf(m, " - probe: last=%d out=%d\n",
- (int)(jiffies - server->probed_at) / HZ,
- atomic_read(&server->probe_outstanding));
- seq_printf(m, " - ALIST v=%u rsp=%lx f=%lx\n",
- alist->version, alist->responded, alist->failed);
- for (i = 0; i < alist->nr_addrs; i++)
- seq_printf(m, " [%x] %pISpc%s\n",
- i, &alist->addrs[i].transport,
- alist->preferred == i ? "*" : "");
+ refcount_read(&server->ref),
+ atomic_read(&server->active),
+ server->cell->name);
+ seq_printf(m, " - info: fl=%lx rtt=%u\n",
+ server->flags, server->rtt);
+ seq_printf(m, " - probe: last=%d\n",
+ (int)(jiffies - server->probed_at) / HZ);
+
+ estate = rcu_dereference(server->endpoint_state);
+ if (!estate)
+ goto out;
+ failed = estate->failed_set;
+ seq_printf(m, " - ESTATE pq=%x np=%u rsp=%lx f=%lx\n",
+ estate->probe_seq, atomic_read(&estate->nr_probing),
+ estate->responsive_set, estate->failed_set);
+
+ alist = estate->addresses;
+ seq_printf(m, " - ALIST v=%u ap=%u\n",
+ alist->version, alist->addr_pref_version);
+ for (i = 0; i < alist->nr_addrs; i++) {
+ const struct afs_address *addr = &alist->addrs[i];
+
+ seq_printf(m, " [%x] %pISpc%s rtt=%d err=%d p=%u\n",
+ i, rxrpc_kernel_remote_addr(addr->peer),
+ alist->preferred == i ? "*" :
+ test_bit(i, &failed) ? "!" : "",
+ rxrpc_kernel_get_srtt(addr->peer),
+ addr->last_error, addr->prio);
+ }
+
+out:
return 0;
}
@@ -681,7 +758,11 @@ int afs_proc_init(struct afs_net *net)
&afs_proc_sysname_ops,
afs_proc_sysname_write,
sizeof(struct seq_net_private),
- NULL))
+ NULL) ||
+ !proc_create_net_single_write("addr_prefs", 0644, p,
+ afs_proc_addr_prefs_show,
+ afs_proc_addr_prefs_write,
+ NULL))
goto error_tree;
net->proc_afs = p;
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
index e4cd89c44c46..b2f06c1917c2 100644
--- a/fs/afs/protocol_yfs.h
+++ b/fs/afs/protocol_yfs.h
@@ -50,6 +50,9 @@ enum YFS_FS_Operations {
YFSREMOVEACL = 64171,
YFSREMOVEFILE2 = 64173,
YFSSTOREOPAQUEACL2 = 64174,
+ YFSRENAME_REPLACE = 64176,
+ YFSRENAME_NOREPLACE = 64177,
+ YFSRENAME_EXCHANGE = 64187,
YFSINLINEBULKSTATUS = 64536, /* YFS Fetch multiple file statuses with errors */
YFSFETCHDATA64 = 64537, /* YFS Fetch file data */
YFSSTOREDATA64 = 64538, /* YFS Store file data */
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index 79e1a5f6701b..6a4e7da10fc4 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -13,6 +13,19 @@
#include <linux/sched/signal.h>
#include "internal.h"
#include "afs_fs.h"
+#include "protocol_uae.h"
+
+void afs_clear_server_states(struct afs_operation *op)
+{
+ unsigned int i;
+
+ if (op->server_states) {
+ for (i = 0; i < op->server_list->nr_servers; i++)
+ afs_put_endpoint_state(op->server_states[i].endpoint_state,
+ afs_estate_trace_put_server_state);
+ kfree(op->server_states);
+ }
+}
/*
* Begin iteration through a server list, starting with the vnode's last used
@@ -25,14 +38,41 @@ static bool afs_start_fs_iteration(struct afs_operation *op,
void *cb_server;
int i;
+ trace_afs_rotate(op, afs_rotate_trace_start, 0);
+
read_lock(&op->volume->servers_lock);
op->server_list = afs_get_serverlist(
rcu_dereference_protected(op->volume->servers,
lockdep_is_held(&op->volume->servers_lock)));
read_unlock(&op->volume->servers_lock);
- op->untried = (1UL << op->server_list->nr_servers) - 1;
- op->index = READ_ONCE(op->server_list->preferred);
+ op->server_states = kcalloc(op->server_list->nr_servers, sizeof(op->server_states[0]),
+ GFP_KERNEL);
+ if (!op->server_states) {
+ afs_op_nomem(op);
+ trace_afs_rotate(op, afs_rotate_trace_nomem, 0);
+ return false;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < op->server_list->nr_servers; i++) {
+ struct afs_endpoint_state *estate;
+ struct afs_server_state *s = &op->server_states[i];
+
+ server = op->server_list->servers[i].server;
+ estate = rcu_dereference(server->endpoint_state);
+ s->endpoint_state = afs_get_endpoint_state(estate,
+ afs_estate_trace_get_server_state);
+ s->probe_seq = estate->probe_seq;
+ s->untried_addrs = (1UL << estate->addresses->nr_addrs) - 1;
+ init_waitqueue_entry(&s->probe_waiter, current);
+ afs_get_address_preferences(op->net, estate->addresses);
+ }
+ rcu_read_unlock();
+
+
+ op->untried_servers = (1UL << op->server_list->nr_servers) - 1;
+ op->server_index = -1;
cb_server = vnode->cb_server;
if (cb_server) {
@@ -40,7 +80,7 @@ static bool afs_start_fs_iteration(struct afs_operation *op,
for (i = 0; i < op->server_list->nr_servers; i++) {
server = op->server_list->servers[i].server;
if (server == cb_server) {
- op->index = i;
+ op->server_index = i;
goto found_interest;
}
}
@@ -50,7 +90,8 @@ static bool afs_start_fs_iteration(struct afs_operation *op,
* and have to return an error.
*/
if (op->flags & AFS_OPERATION_CUR_ONLY) {
- op->error = -ESTALE;
+ afs_op_set_error(op, -ESTALE);
+ trace_afs_rotate(op, afs_rotate_trace_stale_lock, 0);
return false;
}
@@ -58,7 +99,7 @@ static bool afs_start_fs_iteration(struct afs_operation *op,
write_seqlock(&vnode->cb_lock);
ASSERTCMP(cb_server, ==, vnode->cb_server);
vnode->cb_server = NULL;
- if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags))
+ if (afs_clear_cb_promise(vnode, afs_cb_promise_clear_rotate_server))
vnode->cb_break++;
write_sequnlock(&vnode->cb_lock);
}
@@ -70,7 +111,7 @@ found_interest:
/*
* Post volume busy note.
*/
-static void afs_busy(struct afs_volume *volume, u32 abort_code)
+static void afs_busy(struct afs_operation *op, u32 abort_code)
{
const char *m;
@@ -81,7 +122,8 @@ static void afs_busy(struct afs_volume *volume, u32 abort_code)
default: m = "busy"; break;
}
- pr_notice("kAFS: Volume %llu '%s' is %s\n", volume->vid, volume->name, m);
+ pr_notice("kAFS: Volume %llu '%s' on server %pU is %s\n",
+ op->volume->vid, op->volume->name, &op->server->uuid, m);
}
/*
@@ -89,10 +131,11 @@ static void afs_busy(struct afs_volume *volume, u32 abort_code)
*/
static bool afs_sleep_and_retry(struct afs_operation *op)
{
+ trace_afs_rotate(op, afs_rotate_trace_busy_sleep, 0);
if (!(op->flags & AFS_OPERATION_UNINTR)) {
msleep_interruptible(1000);
if (signal_pending(current)) {
- op->error = -ERESTARTSYS;
+ afs_op_set_error(op, -ERESTARTSYS);
return false;
}
} else {
@@ -111,62 +154,105 @@ bool afs_select_fileserver(struct afs_operation *op)
struct afs_addr_list *alist;
struct afs_server *server;
struct afs_vnode *vnode = op->file[0].vnode;
- struct afs_error e;
- u32 rtt;
- int error = op->ac.error, i;
+ unsigned long set, failed;
+ s32 abort_code = op->call_abort_code;
+ int best_prio = 0;
+ int error = op->call_error, addr_index, i, j;
- _enter("%lx[%d],%lx[%d],%d,%d",
- op->untried, op->index,
- op->ac.tried, op->ac.index,
- error, op->ac.abort_code);
+ op->nr_iterations++;
+
+ _enter("OP=%x+%x,%llx,%u{%lx},%u{%lx},%d,%d",
+ op->debug_id, op->nr_iterations, op->volume->vid,
+ op->server_index, op->untried_servers,
+ op->addr_index, op->addr_tried,
+ error, abort_code);
if (op->flags & AFS_OPERATION_STOP) {
+ trace_afs_rotate(op, afs_rotate_trace_stopped, 0);
_leave(" = f [stopped]");
return false;
}
- op->nr_iterations++;
-
- /* Evaluate the result of the previous operation, if there was one. */
- switch (error) {
- case SHRT_MAX:
+ if (op->nr_iterations == 0)
goto start;
+ WRITE_ONCE(op->estate->addresses->addrs[op->addr_index].last_error, error);
+ trace_afs_rotate(op, afs_rotate_trace_iter, op->call_error);
+
+ /* Evaluate the result of the previous operation, if there was one. */
+ switch (op->call_error) {
case 0:
+ clear_bit(AFS_SE_VOLUME_OFFLINE,
+ &op->server_list->servers[op->server_index].flags);
+ clear_bit(AFS_SE_VOLUME_BUSY,
+ &op->server_list->servers[op->server_index].flags);
+ op->cumul_error.responded = true;
+
+ /* We succeeded, but we may need to redo the op from another
+ * server if we're looking at a set of RO volumes where some of
+ * the servers have not yet been brought up to date lest we
+ * regress the data. We only switch to the new version once
+ * >=50% of the servers are updated.
+ */
+ error = afs_update_volume_state(op);
+ if (error != 0) {
+ if (error == 1) {
+ afs_sleep_and_retry(op);
+ goto restart_from_beginning;
+ }
+ afs_op_set_error(op, error);
+ goto failed;
+ }
+ fallthrough;
default:
/* Success or local failure. Stop. */
- op->error = error;
+ afs_op_set_error(op, error);
op->flags |= AFS_OPERATION_STOP;
+ trace_afs_rotate(op, afs_rotate_trace_stop, error);
_leave(" = f [okay/local %d]", error);
return false;
case -ECONNABORTED:
/* The far side rejected the operation on some grounds. This
* might involve the server being busy or the volume having been moved.
+ *
+ * Note that various V* errors should not be sent to a cache manager
+ * by a fileserver as they should be translated to more modern UAE*
+ * errors instead. IBM AFS and OpenAFS fileservers, however, do leak
+ * these abort codes.
*/
- switch (op->ac.abort_code) {
+ trace_afs_rotate(op, afs_rotate_trace_aborted, abort_code);
+ op->cumul_error.responded = true;
+ switch (abort_code) {
case VNOVOL:
/* This fileserver doesn't know about the volume.
* - May indicate that the VL is wrong - retry once and compare
* the results.
* - May indicate that the fileserver couldn't attach to the vol.
+ * - The volume might have been temporarily removed so that it can
+ * be replaced by a volume restore. "vos" might have ended one
+ * transaction and has yet to create the next.
+ * - The volume might not be blessed or might not be in-service
+ * (administrative action).
*/
if (op->flags & AFS_OPERATION_VNOVOL) {
- op->error = -EREMOTEIO;
+ afs_op_accumulate_error(op, -EREMOTEIO, abort_code);
goto next_server;
}
write_lock(&op->volume->servers_lock);
- op->server_list->vnovol_mask |= 1 << op->index;
+ op->server_list->vnovol_mask |= 1 << op->server_index;
write_unlock(&op->volume->servers_lock);
set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags);
error = afs_check_volume_status(op->volume, op);
- if (error < 0)
- goto failed_set_error;
+ if (error < 0) {
+ afs_op_set_error(op, error);
+ goto failed;
+ }
if (test_bit(AFS_VOLUME_DELETED, &op->volume->flags)) {
- op->error = -ENOMEDIUM;
+ afs_op_set_error(op, -ENOMEDIUM);
goto failed;
}
@@ -174,7 +260,7 @@ bool afs_select_fileserver(struct afs_operation *op)
* it's the fileserver having trouble.
*/
if (rcu_access_pointer(op->volume->servers) == op->server_list) {
- op->error = -EREMOTEIO;
+ afs_op_accumulate_error(op, -EREMOTEIO, abort_code);
goto next_server;
}
@@ -183,50 +269,99 @@ bool afs_select_fileserver(struct afs_operation *op)
_leave(" = t [vnovol]");
return true;
- case VSALVAGE: /* TODO: Should this return an error or iterate? */
case VVOLEXISTS:
- case VNOSERVICE:
case VONLINE:
- case VDISKFULL:
- case VOVERQUOTA:
- op->error = afs_abort_to_error(op->ac.abort_code);
+ /* These should not be returned from the fileserver. */
+ pr_warn("Fileserver returned unexpected abort %d\n",
+ abort_code);
+ afs_op_accumulate_error(op, -EREMOTEIO, abort_code);
goto next_server;
+ case VNOSERVICE:
+ /* Prior to AFS 3.2 VNOSERVICE was returned from the fileserver
+ * if the volume was neither in-service nor administratively
+ * blessed. All usage was replaced by VNOVOL because AFS 3.1 and
+ * earlier cache managers did not handle VNOSERVICE and assumed
+ * it was the client OSes errno 105.
+ *
+ * Starting with OpenAFS 1.4.8 VNOSERVICE was repurposed as the
+ * fileserver idle dead time error which was sent in place of
+ * RX_CALL_TIMEOUT (-3). The error was intended to be sent if the
+ * fileserver took too long to send a reply to the client.
+ * RX_CALL_TIMEOUT would have caused the cache manager to mark the
+ * server down whereas VNOSERVICE since AFS 3.2 would cause cache
+ * manager to temporarily (up to 15 minutes) mark the volume
+ * instance as unusable.
+ *
+ * The idle dead logic resulted in cache inconsistency since a
+ * state changing call that the cache manager assumed was dead
+ * could still be processed to completion by the fileserver. This
+ * logic was removed in OpenAFS 1.8.0 and VNOSERVICE is no longer
+ * returned. However, many 1.4.8 through 1.6.24 fileservers are
+ * still in existence.
+ *
+ * AuriStorFS fileservers have never returned VNOSERVICE.
+ *
+ * VNOSERVICE should be treated as an alias for RX_CALL_TIMEOUT.
+ */
+ case RX_CALL_TIMEOUT:
+ afs_op_accumulate_error(op, -ETIMEDOUT, abort_code);
+ goto next_server;
+
+ case VSALVAGING: /* This error should not be leaked to cache managers
+ * but is from OpenAFS demand attach fileservers.
+ * It should be treated as an alias for VOFFLINE.
+ */
+ case VSALVAGE: /* VSALVAGE should be treated as a synonym of VOFFLINE */
case VOFFLINE:
- if (!test_and_set_bit(AFS_VOLUME_OFFLINE, &op->volume->flags)) {
- afs_busy(op->volume, op->ac.abort_code);
- clear_bit(AFS_VOLUME_BUSY, &op->volume->flags);
+ /* The volume is in use by the volserver or another volume utility
+ * for an operation that might alter the contents. The volume is
+ * expected to come back but it might take a long time (could be
+ * days).
+ */
+ if (!test_and_set_bit(AFS_SE_VOLUME_OFFLINE,
+ &op->server_list->servers[op->server_index].flags)) {
+ afs_busy(op, abort_code);
+ clear_bit(AFS_SE_VOLUME_BUSY,
+ &op->server_list->servers[op->server_index].flags);
}
if (op->flags & AFS_OPERATION_NO_VSLEEP) {
- op->error = -EADV;
- goto failed;
- }
- if (op->flags & AFS_OPERATION_CUR_ONLY) {
- op->error = -ESTALE;
+ afs_op_set_error(op, -EADV);
goto failed;
}
goto busy;
- case VSALVAGING:
- case VRESTARTING:
+ case VRESTARTING: /* The fileserver is either shutting down or starting up. */
case VBUSY:
- /* Retry after going round all the servers unless we
- * have a file lock we need to maintain.
+ /* The volume is in use by the volserver or another volume
+ * utility for an operation that is not expected to alter the
+ * contents of the volume. VBUSY does not need to be returned
+ * for a ROVOL or BACKVOL bound to an ITBusy volserver
+ * transaction. The fileserver is permitted to continue serving
+ * content from ROVOLs and BACKVOLs during an ITBusy transaction
+ * because the content will not change. However, many fileserver
+ * releases do return VBUSY for ROVOL and BACKVOL instances under
+ * many circumstances.
+ *
+ * Retry after going round all the servers unless we have a file
+ * lock we need to maintain.
*/
if (op->flags & AFS_OPERATION_NO_VSLEEP) {
- op->error = -EBUSY;
+ afs_op_set_error(op, -EBUSY);
goto failed;
}
- if (!test_and_set_bit(AFS_VOLUME_BUSY, &op->volume->flags)) {
- afs_busy(op->volume, op->ac.abort_code);
- clear_bit(AFS_VOLUME_OFFLINE, &op->volume->flags);
+ if (!test_and_set_bit(AFS_SE_VOLUME_BUSY,
+ &op->server_list->servers[op->server_index].flags)) {
+ afs_busy(op, abort_code);
+ clear_bit(AFS_SE_VOLUME_OFFLINE,
+ &op->server_list->servers[op->server_index].flags);
}
busy:
if (op->flags & AFS_OPERATION_CUR_ONLY) {
if (!afs_sleep_and_retry(op))
goto failed;
- /* Retry with same server & address */
+ /* Retry with same server & address */
_leave(" = t [vbusy]");
return true;
}
@@ -243,7 +378,7 @@ bool afs_select_fileserver(struct afs_operation *op)
* honour, just in case someone sets up a loop.
*/
if (op->flags & AFS_OPERATION_VMOVED) {
- op->error = -EREMOTEIO;
+ afs_op_set_error(op, -EREMOTEIO);
goto failed;
}
op->flags |= AFS_OPERATION_VMOVED;
@@ -251,8 +386,10 @@ bool afs_select_fileserver(struct afs_operation *op)
set_bit(AFS_VOLUME_WAIT, &op->volume->flags);
set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags);
error = afs_check_volume_status(op->volume, op);
- if (error < 0)
- goto failed_set_error;
+ if (error < 0) {
+ afs_op_set_error(op, error);
+ goto failed;
+ }
/* If the server list didn't change, then the VLDB is
* out of sync with the fileservers. This is hopefully
@@ -264,22 +401,60 @@ bool afs_select_fileserver(struct afs_operation *op)
* TODO: Retry a few times with sleeps.
*/
if (rcu_access_pointer(op->volume->servers) == op->server_list) {
- op->error = -ENOMEDIUM;
+ afs_op_accumulate_error(op, -ENOMEDIUM, abort_code);
goto failed;
}
goto restart_from_beginning;
+ case UAEIO:
+ case VIO:
+ afs_op_accumulate_error(op, -EREMOTEIO, abort_code);
+ if (op->volume->type != AFSVL_RWVOL)
+ goto next_server;
+ goto failed;
+
+ case VDISKFULL:
+ case UAENOSPC:
+ /* The partition is full. Only applies to RWVOLs.
+ * Translate locally and return ENOSPC.
+ * No replicas to failover to.
+ */
+ afs_op_set_error(op, -ENOSPC);
+ goto failed_but_online;
+
+ case VOVERQUOTA:
+ case UAEDQUOT:
+ /* Volume is full. Only applies to RWVOLs.
+ * Translate locally and return EDQUOT.
+ * No replicas to failover to.
+ */
+ afs_op_set_error(op, -EDQUOT);
+ goto failed_but_online;
+
+ case RX_INVALID_OPERATION:
+ case RXGEN_OPCODE:
+ /* Handle downgrading to an older operation. */
+ afs_op_set_error(op, -ENOTSUPP);
+ if (op->flags & AFS_OPERATION_DOWNGRADE) {
+ op->flags &= ~AFS_OPERATION_DOWNGRADE;
+ goto go_again;
+ }
+ goto failed_but_online;
+
default:
- clear_bit(AFS_VOLUME_OFFLINE, &op->volume->flags);
- clear_bit(AFS_VOLUME_BUSY, &op->volume->flags);
- op->error = afs_abort_to_error(op->ac.abort_code);
+ afs_op_accumulate_error(op, error, abort_code);
+ failed_but_online:
+ clear_bit(AFS_SE_VOLUME_OFFLINE,
+ &op->server_list->servers[op->server_index].flags);
+ clear_bit(AFS_SE_VOLUME_BUSY,
+ &op->server_list->servers[op->server_index].flags);
goto failed;
}
case -ETIMEDOUT:
case -ETIME:
- if (op->error != -EDESTADDRREQ)
+ if (afs_op_error(op) != -EDESTADDRREQ)
goto iterate_address;
fallthrough;
case -ERFKILL:
@@ -289,29 +464,40 @@ bool afs_select_fileserver(struct afs_operation *op)
case -EHOSTDOWN:
case -ECONNREFUSED:
_debug("no conn");
- op->error = error;
+ afs_op_accumulate_error(op, error, 0);
goto iterate_address;
+ case -ENETRESET:
+ pr_warn("kAFS: Peer reset %s (op=%x)\n",
+ op->type ? op->type->name : "???", op->debug_id);
+ fallthrough;
case -ECONNRESET:
_debug("call reset");
- op->error = error;
+ afs_op_set_error(op, error);
goto failed;
}
restart_from_beginning:
+ trace_afs_rotate(op, afs_rotate_trace_restart, 0);
_debug("restart");
- afs_end_cursor(&op->ac);
+ op->estate = NULL;
op->server = NULL;
+ afs_clear_server_states(op);
+ op->server_states = NULL;
afs_put_serverlist(op->net, op->server_list);
op->server_list = NULL;
start:
_debug("start");
+ ASSERTCMP(op->estate, ==, NULL);
/* See if we need to do an update of the volume record. Note that the
* volume may have moved or even have been deleted.
*/
error = afs_check_volume_status(op->volume, op);
- if (error < 0)
- goto failed_set_error;
+ trace_afs_rotate(op, afs_rotate_trace_check_vol_status, error);
+ if (error < 0) {
+ afs_op_set_error(op, error);
+ goto failed;
+ }
if (!afs_start_fs_iteration(op, vnode))
goto failed;
@@ -319,52 +505,85 @@ start:
_debug("__ VOL %llx __", op->volume->vid);
pick_server:
- _debug("pick [%lx]", op->untried);
+ _debug("pick [%lx]", op->untried_servers);
+ ASSERTCMP(op->estate, ==, NULL);
- error = afs_wait_for_fs_probes(op->server_list, op->untried);
- if (error < 0)
- goto failed_set_error;
+ error = afs_wait_for_fs_probes(op, op->server_states,
+ !(op->flags & AFS_OPERATION_UNINTR));
+ switch (error) {
+ case 0: /* No untried responsive servers and no outstanding probes */
+ trace_afs_rotate(op, afs_rotate_trace_probe_none, 0);
+ goto no_more_servers;
+ case 1: /* Got a response */
+ trace_afs_rotate(op, afs_rotate_trace_probe_response, 0);
+ break;
+ case 2: /* Probe data superseded */
+ trace_afs_rotate(op, afs_rotate_trace_probe_superseded, 0);
+ goto restart_from_beginning;
+ default:
+ trace_afs_rotate(op, afs_rotate_trace_probe_error, error);
+ afs_op_set_error(op, error);
+ goto failed;
+ }
- /* Pick the untried server with the lowest RTT. If we have outstanding
- * callbacks, we stick with the server we're already using if we can.
+ /* Pick the untried server with the highest priority untried endpoint.
+ * If we have outstanding callbacks, we stick with the server we're
+ * already using if we can.
*/
if (op->server) {
- _debug("server %u", op->index);
- if (test_bit(op->index, &op->untried))
+ _debug("server %u", op->server_index);
+ if (test_bit(op->server_index, &op->untried_servers))
goto selected_server;
op->server = NULL;
_debug("no server");
}
- op->index = -1;
- rtt = U32_MAX;
+ rcu_read_lock();
+ op->server_index = -1;
+ best_prio = -1;
for (i = 0; i < op->server_list->nr_servers; i++) {
- struct afs_server *s = op->server_list->servers[i].server;
+ struct afs_endpoint_state *es;
+ struct afs_server_entry *se = &op->server_list->servers[i];
+ struct afs_addr_list *sal;
+ struct afs_server *s = se->server;
- if (!test_bit(i, &op->untried) ||
+ if (!test_bit(i, &op->untried_servers) ||
+ test_bit(AFS_SE_EXCLUDED, &se->flags) ||
!test_bit(AFS_SERVER_FL_RESPONDING, &s->flags))
continue;
- if (s->probe.rtt < rtt) {
- op->index = i;
- rtt = s->probe.rtt;
+ es = op->server_states[i].endpoint_state;
+ sal = es->addresses;
+
+ afs_get_address_preferences_rcu(op->net, sal);
+ for (j = 0; j < sal->nr_addrs; j++) {
+ if (es->failed_set & (1 << j))
+ continue;
+ if (!sal->addrs[j].peer)
+ continue;
+ if (sal->addrs[j].prio > best_prio) {
+ op->server_index = i;
+ best_prio = sal->addrs[j].prio;
+ }
}
}
+ rcu_read_unlock();
- if (op->index == -1)
+ if (op->server_index == -1)
goto no_more_servers;
selected_server:
- _debug("use %d", op->index);
- __clear_bit(op->index, &op->untried);
+ trace_afs_rotate(op, afs_rotate_trace_selected_server, best_prio);
+ _debug("use %d prio %u", op->server_index, best_prio);
+ __clear_bit(op->server_index, &op->untried_servers);
/* We're starting on a different fileserver from the list. We need to
* check it, create a callback intercept, find its address list and
* probe its capabilities before we use it.
*/
- ASSERTCMP(op->ac.alist, ==, NULL);
- server = op->server_list->servers[op->index].server;
+ ASSERTCMP(op->estate, ==, NULL);
+ server = op->server_list->servers[op->server_index].server;
- if (!afs_check_server_record(op, server))
+ if (!afs_check_server_record(op, server, op->key))
goto failed;
_debug("USING SERVER: %pU", &server->uuid);
@@ -373,58 +592,81 @@ selected_server:
op->server = server;
if (vnode->cb_server != server) {
vnode->cb_server = server;
- vnode->cb_s_break = server->cb_s_break;
- vnode->cb_fs_s_break = atomic_read(&server->cell->fs_s_break);
- vnode->cb_v_break = vnode->volume->cb_v_break;
- clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ vnode->cb_v_check = atomic_read(&vnode->volume->cb_v_break);
+ afs_clear_cb_promise(vnode, afs_cb_promise_clear_server_change);
}
- read_lock(&server->fs_lock);
- alist = rcu_dereference_protected(server->addresses,
- lockdep_is_held(&server->fs_lock));
- afs_get_addrlist(alist);
- read_unlock(&server->fs_lock);
-
retry_server:
- memset(&op->ac, 0, sizeof(op->ac));
-
- if (!op->ac.alist)
- op->ac.alist = alist;
- else
- afs_put_addrlist(alist);
-
- op->ac.index = -1;
+ op->addr_tried = 0;
+ op->addr_index = -1;
iterate_address:
- ASSERT(op->ac.alist);
/* Iterate over the current server's address list to try and find an
* address on which it will respond to us.
*/
- if (!afs_iterate_addresses(&op->ac))
- goto out_of_addresses;
+ op->estate = op->server_states[op->server_index].endpoint_state;
+ set = READ_ONCE(op->estate->responsive_set);
+ failed = READ_ONCE(op->estate->failed_set);
+ _debug("iterate ES=%x rs=%lx fs=%lx", op->estate->probe_seq, set, failed);
+ set &= ~(failed | op->addr_tried);
+ trace_afs_rotate(op, afs_rotate_trace_iterate_addr, set);
+ if (!set)
+ goto wait_for_more_probe_results;
+
+ alist = op->estate->addresses;
+ best_prio = -1;
+ addr_index = 0;
+ for (i = 0; i < alist->nr_addrs; i++) {
+ if (!(set & (1 << i)))
+ continue;
+ if (alist->addrs[i].prio > best_prio) {
+ addr_index = i;
+ best_prio = alist->addrs[i].prio;
+ }
+ }
- _debug("address [%u] %u/%u %pISp",
- op->index, op->ac.index, op->ac.alist->nr_addrs,
- &op->ac.alist->addrs[op->ac.index].transport);
+ alist->preferred = addr_index;
+
+ op->addr_index = addr_index;
+ set_bit(addr_index, &op->addr_tried);
+ _debug("address [%u] %u/%u %pISp",
+ op->server_index, addr_index, alist->nr_addrs,
+ rxrpc_kernel_remote_addr(alist->addrs[op->addr_index].peer));
+go_again:
+ op->volsync.creation = TIME64_MIN;
+ op->volsync.update = TIME64_MIN;
+ op->call_responded = false;
_leave(" = t");
return true;
-out_of_addresses:
+wait_for_more_probe_results:
+ error = afs_wait_for_one_fs_probe(op->server, op->estate, op->addr_tried,
+ !(op->flags & AFS_OPERATION_UNINTR));
+ if (error == 1)
+ goto iterate_address;
+ if (!error)
+ goto restart_from_beginning;
+
/* We've now had a failure to respond on all of a server's addresses -
* immediately probe them again and consider retrying the server.
*/
+ trace_afs_rotate(op, afs_rotate_trace_probe_fileserver, 0);
afs_probe_fileserver(op->net, op->server);
if (op->flags & AFS_OPERATION_RETRY_SERVER) {
- alist = op->ac.alist;
- error = afs_wait_for_one_fs_probe(
- op->server, !(op->flags & AFS_OPERATION_UNINTR));
+ error = afs_wait_for_one_fs_probe(op->server, op->estate, op->addr_tried,
+ !(op->flags & AFS_OPERATION_UNINTR));
switch (error) {
- case 0:
+ case 1:
op->flags &= ~AFS_OPERATION_RETRY_SERVER;
+ trace_afs_rotate(op, afs_rotate_trace_retry_server, 1);
goto retry_server;
+ case 0:
+ trace_afs_rotate(op, afs_rotate_trace_retry_server, 0);
+ goto restart_from_beginning;
case -ERESTARTSYS:
- goto failed_set_error;
+ afs_op_set_error(op, error);
+ goto failed;
case -ETIME:
case -EDESTADDRREQ:
goto next_server;
@@ -432,34 +674,38 @@ out_of_addresses:
}
next_server:
+ trace_afs_rotate(op, afs_rotate_trace_next_server, 0);
_debug("next");
- afs_end_cursor(&op->ac);
+ op->estate = NULL;
goto pick_server;
no_more_servers:
/* That's all the servers poked to no good effect. Try again if some
* of them were busy.
*/
- if (op->flags & AFS_OPERATION_VBUSY)
+ trace_afs_rotate(op, afs_rotate_trace_no_more_servers, 0);
+ if (op->flags & AFS_OPERATION_VBUSY) {
+ afs_sleep_and_retry(op);
+ op->flags &= ~AFS_OPERATION_VBUSY;
goto restart_from_beginning;
+ }
- e.error = -EDESTADDRREQ;
- e.responded = false;
+ rcu_read_lock();
for (i = 0; i < op->server_list->nr_servers; i++) {
- struct afs_server *s = op->server_list->servers[i].server;
+ struct afs_endpoint_state *estate;
- afs_prioritise_error(&e, READ_ONCE(s->probe.error),
- s->probe.abort_code);
+ estate = op->server_states[i].endpoint_state;
+ error = READ_ONCE(estate->error);
+ if (error < 0)
+ afs_op_accumulate_error(op, error, estate->abort_code);
}
+ rcu_read_unlock();
- error = e.error;
-
-failed_set_error:
- op->error = error;
failed:
+ trace_afs_rotate(op, afs_rotate_trace_failed, 0);
op->flags |= AFS_OPERATION_STOP;
- afs_end_cursor(&op->ac);
- _leave(" = f [failed %d]", op->error);
+ op->estate = NULL;
+ _leave(" = f [failed %d]", afs_op_error(op));
return false;
}
@@ -478,37 +724,40 @@ void afs_dump_edestaddrreq(const struct afs_operation *op)
rcu_read_lock();
pr_notice("EDESTADDR occurred\n");
- pr_notice("FC: cbb=%x cbb2=%x fl=%x err=%hd\n",
+ pr_notice("OP: cbb=%x cbb2=%x fl=%x err=%hd\n",
op->file[0].cb_break_before,
- op->file[1].cb_break_before, op->flags, op->error);
- pr_notice("FC: ut=%lx ix=%d ni=%u\n",
- op->untried, op->index, op->nr_iterations);
+ op->file[1].cb_break_before, op->flags, op->cumul_error.error);
+ pr_notice("OP: ut=%lx ix=%d ni=%u\n",
+ op->untried_servers, op->server_index, op->nr_iterations);
+ pr_notice("OP: call er=%d ac=%d r=%u\n",
+ op->call_error, op->call_abort_code, op->call_responded);
if (op->server_list) {
const struct afs_server_list *sl = op->server_list;
- pr_notice("FC: SL nr=%u pr=%u vnov=%hx\n",
- sl->nr_servers, sl->preferred, sl->vnovol_mask);
+
+ pr_notice("FC: SL nr=%u vnov=%hx\n",
+ sl->nr_servers, sl->vnovol_mask);
for (i = 0; i < sl->nr_servers; i++) {
const struct afs_server *s = sl->servers[i].server;
+ const struct afs_endpoint_state *e =
+ rcu_dereference(s->endpoint_state);
+ const struct afs_addr_list *a = e->addresses;
+
pr_notice("FC: server fl=%lx av=%u %pU\n",
s->flags, s->addr_version, &s->uuid);
- if (s->addresses) {
- const struct afs_addr_list *a =
- rcu_dereference(s->addresses);
+ pr_notice("FC: - pq=%x R=%lx F=%lx\n",
+ e->probe_seq, e->responsive_set, e->failed_set);
+ if (a) {
pr_notice("FC: - av=%u nr=%u/%u/%u pr=%u\n",
a->version,
a->nr_ipv4, a->nr_addrs, a->max_addrs,
a->preferred);
- pr_notice("FC: - R=%lx F=%lx\n",
- a->responded, a->failed);
- if (a == op->ac.alist)
+ if (a == e->addresses)
pr_notice("FC: - current\n");
}
}
}
- pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n",
- op->ac.tried, op->ac.index, op->ac.abort_code, op->ac.error,
- op->ac.responded, op->ac.nr_iterations);
+ pr_notice("AC: t=%lx ax=%d\n", op->addr_tried, op->addr_index);
rcu_read_unlock();
}
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 23a1a92d64bb..bf0e4ea0aafd 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -13,16 +13,28 @@
#include "internal.h"
#include "afs_cm.h"
#include "protocol_yfs.h"
+#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
+#include <trace/events/rxrpc.h>
struct workqueue_struct *afs_async_calls;
+static void afs_deferred_free_worker(struct work_struct *work);
static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
static void afs_process_async_call(struct work_struct *);
static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
+static void afs_rx_attach(struct rxrpc_call *rxcall, unsigned long user_call_ID);
+static void afs_rx_notify_oob(struct sock *sk, struct sk_buff *oob);
static int afs_deliver_cm_op_id(struct afs_call *);
+static const struct rxrpc_kernel_ops afs_rxrpc_callback_ops = {
+ .notify_new_call = afs_rx_new_call,
+ .discard_new_call = afs_rx_discard_new_call,
+ .user_attach_call = afs_rx_attach,
+ .notify_oob = afs_rx_notify_oob,
+};
+
/* asynchronous incoming call initial processing */
static const struct afs_call_type afs_RXCMxxxx = {
.name = "CB.xxxx",
@@ -46,6 +58,7 @@ int afs_open_socket(struct afs_net *net)
goto error_1;
socket->sk->sk_allocation = GFP_NOFS;
+ socket->sk->sk_user_data = net;
/* bind the callback manager's address to make this a server socket */
memset(&srx, 0, sizeof(srx));
@@ -61,16 +74,24 @@ int afs_open_socket(struct afs_net *net)
if (ret < 0)
goto error_2;
- ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
+ ret = rxrpc_sock_set_manage_response(socket->sk, true);
+ if (ret < 0)
+ goto error_2;
+
+ ret = afs_create_token_key(net, socket);
+ if (ret < 0)
+ pr_err("Couldn't create RxGK CM key: %d\n", ret);
+
+ ret = kernel_bind(socket, (struct sockaddr_unsized *) &srx, sizeof(srx));
if (ret == -EADDRINUSE) {
srx.transport.sin6.sin6_port = 0;
- ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
+ ret = kernel_bind(socket, (struct sockaddr_unsized *) &srx, sizeof(srx));
}
if (ret < 0)
goto error_2;
srx.srx_service = YFS_CM_SERVICE;
- ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
+ ret = kernel_bind(socket, (struct sockaddr_unsized *) &srx, sizeof(srx));
if (ret < 0)
goto error_2;
@@ -81,8 +102,7 @@ int afs_open_socket(struct afs_net *net)
* it sends back to us.
*/
- rxrpc_kernel_new_call_notification(socket, afs_rx_new_call,
- afs_rx_discard_new_call);
+ rxrpc_kernel_set_notifications(socket, &afs_rxrpc_callback_ops);
ret = kernel_listen(socket, INT_MAX);
if (ret < 0)
@@ -122,7 +142,9 @@ void afs_close_socket(struct afs_net *net)
kernel_sock_shutdown(net->socket, SHUT_RDWR);
flush_workqueue(afs_async_calls);
+ net->socket->sk->sk_user_data = NULL;
sock_release(net->socket);
+ key_put(net->fs_cm_token_key);
_debug("dework");
_leave("");
@@ -145,65 +167,92 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
call->type = type;
call->net = net;
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
- atomic_set(&call->usage, 1);
- INIT_WORK(&call->async_work, afs_process_async_call);
+ refcount_set(&call->ref, 1);
+ INIT_WORK(&call->async_work, type->async_rx ?: afs_process_async_call);
+ INIT_WORK(&call->work, call->type->work);
+ INIT_WORK(&call->free_work, afs_deferred_free_worker);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->state_lock);
call->iter = &call->def_iter;
o = atomic_inc_return(&net->nr_outstanding_calls);
- trace_afs_call(call, afs_call_trace_alloc, 1, o,
+ trace_afs_call(call->debug_id, afs_call_trace_alloc, 1, o,
__builtin_return_address(0));
return call;
}
+static void afs_free_call(struct afs_call *call)
+{
+ struct afs_net *net = call->net;
+ int o;
+
+ ASSERT(!work_pending(&call->async_work));
+
+ rxrpc_kernel_put_peer(call->peer);
+
+ if (call->rxcall) {
+ rxrpc_kernel_shutdown_call(net->socket, call->rxcall);
+ rxrpc_kernel_put_call(net->socket, call->rxcall);
+ call->rxcall = NULL;
+ }
+ if (call->type->destructor)
+ call->type->destructor(call);
+
+ afs_unuse_server_notime(call->net, call->server, afs_server_trace_unuse_call);
+ kfree(call->request);
+
+ o = atomic_read(&net->nr_outstanding_calls);
+ trace_afs_call(call->debug_id, afs_call_trace_free, 0, o,
+ __builtin_return_address(0));
+ kfree(call);
+
+ o = atomic_dec_return(&net->nr_outstanding_calls);
+ if (o == 0)
+ wake_up_var(&net->nr_outstanding_calls);
+}
+
/*
* Dispose of a reference on a call.
*/
void afs_put_call(struct afs_call *call)
{
struct afs_net *net = call->net;
- int n = atomic_dec_return(&call->usage);
- int o = atomic_read(&net->nr_outstanding_calls);
+ unsigned int debug_id = call->debug_id;
+ bool zero;
+ int r, o;
- trace_afs_call(call, afs_call_trace_put, n, o,
+ zero = __refcount_dec_and_test(&call->ref, &r);
+ o = atomic_read(&net->nr_outstanding_calls);
+ trace_afs_call(debug_id, afs_call_trace_put, r - 1, o,
__builtin_return_address(0));
+ if (zero)
+ afs_free_call(call);
+}
- ASSERTCMP(n, >=, 0);
- if (n == 0) {
- ASSERT(!work_pending(&call->async_work));
- ASSERT(call->type->name != NULL);
-
- if (call->rxcall) {
- rxrpc_kernel_end_call(net->socket, call->rxcall);
- call->rxcall = NULL;
- }
- if (call->type->destructor)
- call->type->destructor(call);
-
- afs_unuse_server_notime(call->net, call->server, afs_server_trace_put_call);
- afs_put_addrlist(call->alist);
- kfree(call->request);
-
- trace_afs_call(call, afs_call_trace_free, 0, o,
- __builtin_return_address(0));
- kfree(call);
+static void afs_deferred_free_worker(struct work_struct *work)
+{
+ struct afs_call *call = container_of(work, struct afs_call, free_work);
- o = atomic_dec_return(&net->nr_outstanding_calls);
- if (o == 0)
- wake_up_var(&net->nr_outstanding_calls);
- }
+ afs_free_call(call);
}
-static struct afs_call *afs_get_call(struct afs_call *call,
- enum afs_call_trace why)
+/*
+ * Dispose of a reference on a call, deferring the cleanup to a workqueue
+ * to avoid lock recursion.
+ */
+void afs_deferred_put_call(struct afs_call *call)
{
- int u = atomic_inc_return(&call->usage);
+ struct afs_net *net = call->net;
+ unsigned int debug_id = call->debug_id;
+ bool zero;
+ int r, o;
- trace_afs_call(call, why, u,
- atomic_read(&call->net->nr_outstanding_calls),
+ zero = __refcount_dec_and_test(&call->ref, &r);
+ o = atomic_read(&net->nr_outstanding_calls);
+ trace_afs_call(debug_id, afs_call_trace_put, r - 1, o,
__builtin_return_address(0));
- return call;
+ if (zero)
+ schedule_work(&call->free_work);
}
/*
@@ -212,8 +261,6 @@ static struct afs_call *afs_get_call(struct afs_call *call,
static void afs_queue_call_work(struct afs_call *call)
{
if (call->type->work) {
- INIT_WORK(&call->work, call->type->work);
-
afs_get_call(call, afs_call_trace_work);
if (!queue_work(afs_wq, &call->work))
afs_put_call(call);
@@ -287,9 +334,8 @@ static void afs_notify_end_request_tx(struct sock *sock,
* Initiate a call and synchronously queue up the parameters for dispatch. Any
* error is stored into the call struct, which the caller must check for.
*/
-void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
+void afs_make_call(struct afs_call *call, gfp_t gfp)
{
- struct sockaddr_rxrpc *srx = &ac->alist->addrs[ac->index];
struct rxrpc_call *rxcall;
struct msghdr msg;
struct kvec iov[1];
@@ -297,7 +343,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
s64 tx_total_len;
int ret;
- _enter(",{%pISp},", &srx->transport);
+ _enter(",{%pISp+%u},", rxrpc_kernel_remote_addr(call->peer), call->service_id);
ASSERT(call->type != NULL);
ASSERT(call->type->name != NULL);
@@ -306,8 +352,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
call, call->type->name, key_serial(call->key),
atomic_read(&call->net->nr_outstanding_calls));
- call->addr_ix = ac->index;
- call->alist = afs_get_addrlist(ac->alist);
+ trace_afs_make_call(call);
/* Work out the length we're going to transmit. This is awkward for
* calls such as FS.StoreData where there's an extra injection of data
@@ -326,12 +371,15 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
}
/* create a call */
- rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
+ rxcall = rxrpc_kernel_begin_call(call->net->socket, call->peer, call->key,
(unsigned long)call,
- tx_total_len, gfp,
+ tx_total_len,
+ call->max_lifespan,
+ gfp,
(call->async ?
afs_wake_up_async_call :
afs_wake_up_call_waiter),
+ call->service_id,
call->upgrade,
(call->intr ? RXRPC_PREINTERRUPTIBLE :
RXRPC_UNINTERRUPTIBLE),
@@ -343,10 +391,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
}
call->rxcall = rxcall;
-
- if (call->max_lifespan)
- rxrpc_kernel_set_max_life(call->net->socket, rxcall,
- call->max_lifespan);
+ call->issue_time = ktime_get_real();
/* send the request */
iov[0].iov_base = call->request;
@@ -354,7 +399,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, call->request_size);
+ iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, 1, call->request_size);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = MSG_WAITALL | (call->write_iter ? MSG_MORE : 0);
@@ -384,45 +429,50 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
/* Note that at this point, we may have received the reply or an abort
* - and an asynchronous call may already have completed.
*
- * afs_wait_for_call_to_complete(call, ac)
+ * afs_wait_for_call_to_complete(call)
* must be called to synchronously clean up.
*/
return;
error_do_abort:
- if (ret != -ECONNABORTED) {
+ if (ret != -ECONNABORTED)
rxrpc_kernel_abort_call(call->net->socket, rxcall,
- RX_USER_ABORT, ret, "KSD");
- } else {
+ RX_USER_ABORT, ret,
+ afs_abort_send_data_error);
+ if (call->async) {
+ afs_see_call(call, afs_call_trace_async_abort);
+ return;
+ }
+
+ if (ret == -ECONNABORTED) {
len = 0;
- iov_iter_kvec(&msg.msg_iter, READ, NULL, 0, 0);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0);
rxrpc_kernel_recv_data(call->net->socket, rxcall,
&msg.msg_iter, &len, false,
&call->abort_code, &call->service_id);
- ac->abort_code = call->abort_code;
- ac->responded = true;
+ call->responded = true;
}
call->error = ret;
trace_afs_call_done(call);
error_kill_call:
- if (call->type->done)
- call->type->done(call);
+ if (call->async)
+ afs_see_call(call, afs_call_trace_async_kill);
+ if (call->type->immediate_cancel)
+ call->type->immediate_cancel(call);
/* We need to dispose of the extra ref we grabbed for an async call.
* The call, however, might be queued on afs_async_calls and we need to
* make sure we don't get any more notifications that might requeue it.
*/
- if (call->rxcall) {
- rxrpc_kernel_end_call(call->net->socket, call->rxcall);
- call->rxcall = NULL;
- }
+ if (call->rxcall)
+ rxrpc_kernel_shutdown_call(call->net->socket, call->rxcall);
if (call->async) {
if (cancel_work_sync(&call->async_work))
afs_put_call(call);
- afs_put_call(call);
+ afs_set_call_complete(call, ret, 0);
}
- ac->error = ret;
+ call->error = ret;
call->state = AFS_CALL_COMPLETE;
_leave(" = %d", ret);
}
@@ -456,14 +506,14 @@ static void afs_log_error(struct afs_call *call, s32 remote_abort)
max = m + 1;
pr_notice("kAFS: Peer reported %s failure on %s [%pISp]\n",
msg, call->type->name,
- &call->alist->addrs[call->addr_ix].transport);
+ rxrpc_kernel_remote_addr(call->peer));
}
}
/*
* deliver messages to a call
*/
-static void afs_deliver_to_call(struct afs_call *call)
+void afs_deliver_to_call(struct afs_call *call)
{
enum afs_call_state state;
size_t len;
@@ -480,7 +530,7 @@ static void afs_deliver_to_call(struct afs_call *call)
) {
if (state == AFS_CALL_SV_AWAIT_ACK) {
len = 0;
- iov_iter_kvec(&call->def_iter, READ, NULL, 0, 0);
+ iov_iter_kvec(&call->def_iter, ITER_DEST, NULL, 0, 0);
ret = rxrpc_kernel_recv_data(call->net->socket,
call->rxcall, &call->def_iter,
&len, false, &remote_abort,
@@ -497,18 +547,13 @@ static void afs_deliver_to_call(struct afs_call *call)
return;
}
- if (!call->have_reply_time &&
- rxrpc_kernel_get_reply_time(call->net->socket,
- call->rxcall,
- &call->reply_time))
- call->have_reply_time = true;
-
ret = call->type->deliver(call);
state = READ_ONCE(call->state);
if (ret == 0 && call->unmarshalling_error)
ret = -EBADMSG;
switch (ret) {
case 0:
+ call->responded = true;
afs_queue_call_work(call);
if (state == AFS_CALL_CL_PROC_REPLY) {
if (call->op)
@@ -523,12 +568,15 @@ static void afs_deliver_to_call(struct afs_call *call)
goto out;
case -ECONNABORTED:
ASSERTCMP(state, ==, AFS_CALL_COMPLETE);
+ call->responded = true;
afs_log_error(call, call->abort_code);
goto done;
case -ENOTSUPP:
+ call->responded = true;
abort_code = RXGEN_OPCODE;
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
- abort_code, ret, "KIV");
+ abort_code, ret,
+ afs_abort_op_not_supported);
goto local_abort;
case -EIO:
pr_err("kAFS: Call %u in bad state %u\n",
@@ -537,16 +585,20 @@ static void afs_deliver_to_call(struct afs_call *call)
case -ENODATA:
case -EBADMSG:
case -EMSGSIZE:
+ case -ENOMEM:
+ case -EFAULT:
abort_code = RXGEN_CC_UNMARSHAL;
if (state != AFS_CALL_CL_AWAIT_REPLY)
abort_code = RXGEN_SS_UNMARSHAL;
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
- abort_code, ret, "KUM");
+ abort_code, ret,
+ afs_abort_unmarshal_error);
goto local_abort;
default:
- abort_code = RX_USER_ABORT;
+ abort_code = RX_CALL_DEAD;
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
- abort_code, ret, "KER");
+ abort_code, ret,
+ afs_abort_general_error);
goto local_abort;
}
}
@@ -562,55 +614,50 @@ local_abort:
abort_code = 0;
call_complete:
afs_set_call_complete(call, ret, remote_abort);
- state = AFS_CALL_COMPLETE;
goto done;
}
/*
- * Wait synchronously for a call to complete and clean up the call struct.
+ * Wait synchronously for a call to complete.
*/
-long afs_wait_for_call_to_complete(struct afs_call *call,
- struct afs_addr_cursor *ac)
+void afs_wait_for_call_to_complete(struct afs_call *call)
{
- long ret;
bool rxrpc_complete = false;
- DECLARE_WAITQUEUE(myself, current);
-
_enter("");
- ret = call->error;
- if (ret < 0)
- goto out;
+ if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
+ DECLARE_WAITQUEUE(myself, current);
+
+ add_wait_queue(&call->waitq, &myself);
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ /* deliver any messages that are in the queue */
+ if (!afs_check_call_state(call, AFS_CALL_COMPLETE) &&
+ call->need_attention) {
+ call->need_attention = false;
+ __set_current_state(TASK_RUNNING);
+ afs_deliver_to_call(call);
+ continue;
+ }
- add_wait_queue(&call->waitq, &myself);
- for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- /* deliver any messages that are in the queue */
- if (!afs_check_call_state(call, AFS_CALL_COMPLETE) &&
- call->need_attention) {
- call->need_attention = false;
- __set_current_state(TASK_RUNNING);
- afs_deliver_to_call(call);
- continue;
- }
+ if (afs_check_call_state(call, AFS_CALL_COMPLETE))
+ break;
- if (afs_check_call_state(call, AFS_CALL_COMPLETE))
- break;
+ if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) {
+ /* rxrpc terminated the call. */
+ rxrpc_complete = true;
+ break;
+ }
- if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) {
- /* rxrpc terminated the call. */
- rxrpc_complete = true;
- break;
+ schedule();
}
- schedule();
+ remove_wait_queue(&call->waitq, &myself);
+ __set_current_state(TASK_RUNNING);
}
- remove_wait_queue(&call->waitq, &myself);
- __set_current_state(TASK_RUNNING);
-
if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
if (rxrpc_complete) {
afs_set_call_complete(call, call->error, call->abort_code);
@@ -618,33 +665,11 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
/* Kill off the call if it's still live. */
_debug("call interrupted");
if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
- RX_USER_ABORT, -EINTR, "KWI"))
+ RX_USER_ABORT, -EINTR,
+ afs_abort_interrupted))
afs_set_call_complete(call, -EINTR, 0);
}
}
-
- spin_lock_bh(&call->state_lock);
- ac->abort_code = call->abort_code;
- ac->error = call->error;
- spin_unlock_bh(&call->state_lock);
-
- ret = ac->error;
- switch (ret) {
- case 0:
- ret = call->ret0;
- call->ret0 = 0;
-
- fallthrough;
- case -ECONNABORTED:
- ac->responded = true;
- break;
- }
-
-out:
- _debug("call complete");
- afs_put_call(call);
- _leave(" = %p", (void *)ret);
- return ret;
}
/*
@@ -660,25 +685,25 @@ static void afs_wake_up_call_waiter(struct sock *sk, struct rxrpc_call *rxcall,
}
/*
- * wake up an asynchronous call
+ * Wake up an asynchronous call. The caller is holding the call notify
+ * spinlock around this, so we can't call afs_put_call().
*/
static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
unsigned long call_user_ID)
{
struct afs_call *call = (struct afs_call *)call_user_ID;
- int u;
+ int r;
trace_afs_notify_call(rxcall, call);
call->need_attention = true;
- u = atomic_fetch_add_unless(&call->usage, 1, 0);
- if (u != 0) {
- trace_afs_call(call, afs_call_trace_wake, u + 1,
+ if (__refcount_inc_not_zero(&call->ref, &r)) {
+ trace_afs_call(call->debug_id, afs_call_trace_wake, r + 1,
atomic_read(&call->net->nr_outstanding_calls),
__builtin_return_address(0));
if (!queue_work(afs_async_calls, &call->async_work))
- afs_put_call(call);
+ afs_deferred_put_call(call);
}
}
@@ -732,7 +757,6 @@ void afs_charge_preallocation(struct work_struct *work)
if (rxrpc_kernel_charge_accept(net->socket,
afs_wake_up_async_call,
- afs_rx_attach,
(unsigned long)call,
GFP_KERNEL,
call->debug_id) < 0)
@@ -760,8 +784,14 @@ static void afs_rx_discard_new_call(struct rxrpc_call *rxcall,
static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
unsigned long user_call_ID)
{
+ struct afs_call *call = (struct afs_call *)user_call_ID;
struct afs_net *net = afs_sock2net(sk);
+ call->peer = rxrpc_kernel_get_call_peer(sk->sk_socket, call->rxcall);
+ call->server = afs_find_server(call->peer);
+ if (!call->server)
+ trace_afs_cm_no_server(call, rxrpc_kernel_remote_srx(call->peer));
+
queue_work(afs_wq, &net->charge_preallocation_work);
}
@@ -788,9 +818,14 @@ static int afs_deliver_cm_op_id(struct afs_call *call)
if (!afs_cm_incoming_call(call))
return -ENOTSUPP;
+ call->security_ix = rxrpc_kernel_query_call_security(call->rxcall,
+ &call->service_id,
+ &call->enctype);
+
trace_afs_cb_call(call);
+ call->work.func = call->type->work;
- /* pass responsibility for the remainer of this message off to the
+ /* pass responsibility for the remainder of this message off to the
* cache manager op */
return call->type->deliver(call);
}
@@ -822,7 +857,7 @@ void afs_send_empty_reply(struct afs_call *call)
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_kvec(&msg.msg_iter, WRITE, NULL, 0, 0);
+ iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, NULL, 0, 0);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
@@ -836,7 +871,8 @@ void afs_send_empty_reply(struct afs_call *call)
case -ENOMEM:
_debug("oom");
rxrpc_kernel_abort_call(net->socket, call->rxcall,
- RX_USER_ABORT, -ENOMEM, "KOO");
+ RXGEN_SS_MARSHAL, -ENOMEM,
+ afs_abort_oom);
fallthrough;
default:
_leave(" [error]");
@@ -862,7 +898,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
iov[0].iov_len = len;
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
+ iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, 1, len);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
@@ -878,7 +914,8 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
if (n == -ENOMEM) {
_debug("oom");
rxrpc_kernel_abort_call(net->socket, call->rxcall,
- RX_USER_ABORT, -ENOMEM, "KOO");
+ RXGEN_SS_MARSHAL, -ENOMEM,
+ afs_abort_oom);
}
_leave(" [error]");
}
@@ -900,6 +937,7 @@ int afs_extract_data(struct afs_call *call, bool want_more)
ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter,
&call->iov_len, want_more, &remote_abort,
&call->service_id);
+ trace_afs_receive_data(call, call->iter, want_more, ret);
if (ret == 0 || ret == -EAGAIN)
return ret;
@@ -936,3 +974,13 @@ noinline int afs_protocol_error(struct afs_call *call,
call->unmarshalling_error = true;
return -EBADMSG;
}
+
+/*
+ * Wake up OOB notification processing.
+ */
+static void afs_rx_notify_oob(struct sock *sk, struct sk_buff *oob)
+{
+ struct afs_net *net = sk->sk_user_data;
+
+ schedule_work(&net->rx_oob_work);
+}
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 3c7a8fc4f93f..55ddce94af03 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -16,6 +16,31 @@
static DEFINE_HASHTABLE(afs_permits_cache, 10);
static DEFINE_SPINLOCK(afs_permits_lock);
+static DEFINE_MUTEX(afs_key_lock);
+
+/*
+ * Allocate a key to use as a placeholder for anonymous user security.
+ */
+static int afs_alloc_anon_key(struct afs_cell *cell)
+{
+ struct key *key;
+
+ mutex_lock(&afs_key_lock);
+ key = cell->anonymous_key;
+ if (!key) {
+ key = rxrpc_get_null_key(cell->key_desc);
+ if (!IS_ERR(key))
+ cell->anonymous_key = key;
+ }
+ mutex_unlock(&afs_key_lock);
+
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+
+ _debug("anon key %p{%x}",
+ cell->anonymous_key, key_serial(cell->anonymous_key));
+ return 0;
+}
/*
* get a key
@@ -23,11 +48,12 @@ static DEFINE_SPINLOCK(afs_permits_lock);
struct key *afs_request_key(struct afs_cell *cell)
{
struct key *key;
+ int ret;
- _enter("{%x}", key_serial(cell->anonymous_key));
+ _enter("{%s}", cell->key_desc);
- _debug("key %s", cell->anonymous_key->description);
- key = request_key_net(&key_type_rxrpc, cell->anonymous_key->description,
+ _debug("key %s", cell->key_desc);
+ key = request_key_net(&key_type_rxrpc, cell->key_desc,
cell->net->net, NULL);
if (IS_ERR(key)) {
if (PTR_ERR(key) != -ENOKEY) {
@@ -35,6 +61,12 @@ struct key *afs_request_key(struct afs_cell *cell)
return key;
}
+ if (!cell->anonymous_key) {
+ ret = afs_alloc_anon_key(cell);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+
/* act as anonymous user */
_leave(" = {%x} [anon]", key_serial(cell->anonymous_key));
return key_get(cell->anonymous_key);
@@ -52,11 +84,10 @@ struct key *afs_request_key_rcu(struct afs_cell *cell)
{
struct key *key;
- _enter("{%x}", key_serial(cell->anonymous_key));
+ _enter("{%s}", cell->key_desc);
- _debug("key %s", cell->anonymous_key->description);
- key = request_key_net_rcu(&key_type_rxrpc,
- cell->anonymous_key->description,
+ _debug("key %s", cell->key_desc);
+ key = request_key_net_rcu(&key_type_rxrpc, cell->key_desc,
cell->net->net);
if (IS_ERR(key)) {
if (PTR_ERR(key) != -ENOKEY) {
@@ -65,6 +96,8 @@ struct key *afs_request_key_rcu(struct afs_cell *cell)
}
/* act as anonymous user */
+ if (!cell->anonymous_key)
+ return NULL; /* Need to allocate */
_leave(" = {%x} [anon]", key_serial(cell->anonymous_key));
return key_get(cell->anonymous_key);
} else {
@@ -219,8 +252,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
* yet.
*/
size++;
- new = kzalloc(sizeof(struct afs_permits) +
- sizeof(struct afs_permit) * size, GFP_NOFS);
+ new = kzalloc(struct_size(new, permits, size), GFP_NOFS);
if (!new)
goto out_put;
@@ -396,7 +428,7 @@ int afs_check_permit(struct afs_vnode *vnode, struct key *key,
* - AFS ACLs are attached to directories only, and a file is controlled by its
* parent directory's ACL
*/
-int afs_permission(struct user_namespace *mnt_userns, struct inode *inode,
+int afs_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask)
{
struct afs_vnode *vnode = AFS_FS_I(inode);
@@ -409,7 +441,7 @@ int afs_permission(struct user_namespace *mnt_userns, struct inode *inode,
if (mask & MAY_NOT_BLOCK) {
key = afs_request_key_rcu(vnode->volume->cell);
- if (IS_ERR(key))
+ if (IS_ERR_OR_NULL(key))
return -ECHILD;
ret = -ECHILD;
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 6e5b9a19b234..c4428ebddb1d 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -14,210 +14,103 @@
static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */
static atomic_t afs_server_debug_id;
-static struct afs_server *afs_maybe_use_server(struct afs_server *,
- enum afs_server_trace);
static void __afs_put_server(struct afs_net *, struct afs_server *);
+static void afs_server_timer(struct timer_list *timer);
+static void afs_server_destroyer(struct work_struct *work);
/*
* Find a server by one of its addresses.
*/
-struct afs_server *afs_find_server(struct afs_net *net,
- const struct sockaddr_rxrpc *srx)
+struct afs_server *afs_find_server(const struct rxrpc_peer *peer)
{
- const struct afs_addr_list *alist;
- struct afs_server *server = NULL;
- unsigned int i;
- int seq = 0, diff;
+ struct afs_server *server = (struct afs_server *)rxrpc_kernel_get_peer_data(peer);
- rcu_read_lock();
-
- do {
- if (server)
- afs_unuse_server_notime(net, server, afs_server_trace_put_find_rsq);
- server = NULL;
- read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
-
- if (srx->transport.family == AF_INET6) {
- const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
- hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) {
- alist = rcu_dereference(server->addresses);
- for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
- b = &alist->addrs[i].transport.sin6;
- diff = ((u16 __force)a->sin6_port -
- (u16 __force)b->sin6_port);
- if (diff == 0)
- diff = memcmp(&a->sin6_addr,
- &b->sin6_addr,
- sizeof(struct in6_addr));
- if (diff == 0)
- goto found;
- }
- }
- } else {
- const struct sockaddr_in *a = &srx->transport.sin, *b;
- hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) {
- alist = rcu_dereference(server->addresses);
- for (i = 0; i < alist->nr_ipv4; i++) {
- b = &alist->addrs[i].transport.sin;
- diff = ((u16 __force)a->sin_port -
- (u16 __force)b->sin_port);
- if (diff == 0)
- diff = ((u32 __force)a->sin_addr.s_addr -
- (u32 __force)b->sin_addr.s_addr);
- if (diff == 0)
- goto found;
- }
- }
- }
-
- server = NULL;
- continue;
- found:
- server = afs_maybe_use_server(server, afs_server_trace_get_by_addr);
-
- } while (need_seqretry(&net->fs_addr_lock, seq));
-
- done_seqretry(&net->fs_addr_lock, seq);
-
- rcu_read_unlock();
- return server;
+ if (!server)
+ return NULL;
+ return afs_use_server(server, false, afs_server_trace_use_cm_call);
}
/*
- * Look up a server by its UUID and mark it active.
+ * Look up a server by its UUID and mark it active. The caller must hold
+ * cell->fs_lock.
*/
-struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid)
+static struct afs_server *afs_find_server_by_uuid(struct afs_cell *cell, const uuid_t *uuid)
{
- struct afs_server *server = NULL;
+ struct afs_server *server;
struct rb_node *p;
- int diff, seq = 0;
+ int diff;
_enter("%pU", uuid);
- do {
- /* Unfortunately, rbtree walking doesn't give reliable results
- * under just the RCU read lock, so we have to check for
- * changes.
- */
- if (server)
- afs_unuse_server(net, server, afs_server_trace_put_uuid_rsq);
- server = NULL;
-
- read_seqbegin_or_lock(&net->fs_lock, &seq);
-
- p = net->fs_servers.rb_node;
- while (p) {
- server = rb_entry(p, struct afs_server, uuid_rb);
-
- diff = memcmp(uuid, &server->uuid, sizeof(*uuid));
- if (diff < 0) {
- p = p->rb_left;
- } else if (diff > 0) {
- p = p->rb_right;
- } else {
- afs_use_server(server, afs_server_trace_get_by_uuid);
- break;
- }
+ p = cell->fs_servers.rb_node;
+ while (p) {
+ server = rb_entry(p, struct afs_server, uuid_rb);
- server = NULL;
+ diff = memcmp(uuid, &server->uuid, sizeof(*uuid));
+ if (diff < 0) {
+ p = p->rb_left;
+ } else if (diff > 0) {
+ p = p->rb_right;
+ } else {
+ if (test_bit(AFS_SERVER_FL_UNCREATED, &server->flags))
+ return NULL; /* Need a write lock */
+ afs_use_server(server, true, afs_server_trace_use_by_uuid);
+ return server;
}
- } while (need_seqretry(&net->fs_lock, seq));
-
- done_seqretry(&net->fs_lock, seq);
+ }
- _leave(" = %p", server);
- return server;
+ return NULL;
}
/*
- * Install a server record in the namespace tree. If there's a clash, we stick
- * it into a list anchored on whichever afs_server struct is actually in the
- * tree.
+ * Install a server record in the cell tree. The caller must hold an exclusive
+ * lock on cell->fs_lock.
*/
static struct afs_server *afs_install_server(struct afs_cell *cell,
- struct afs_server *candidate)
+ struct afs_server **candidate)
{
- const struct afs_addr_list *alist;
- struct afs_server *server, *next;
+ struct afs_server *server;
struct afs_net *net = cell->net;
struct rb_node **pp, *p;
int diff;
_enter("%p", candidate);
- write_seqlock(&net->fs_lock);
-
/* Firstly install the server in the UUID lookup tree */
- pp = &net->fs_servers.rb_node;
+ pp = &cell->fs_servers.rb_node;
p = NULL;
while (*pp) {
p = *pp;
_debug("- consider %p", p);
server = rb_entry(p, struct afs_server, uuid_rb);
- diff = memcmp(&candidate->uuid, &server->uuid, sizeof(uuid_t));
- if (diff < 0) {
+ diff = memcmp(&(*candidate)->uuid, &server->uuid, sizeof(uuid_t));
+ if (diff < 0)
pp = &(*pp)->rb_left;
- } else if (diff > 0) {
+ else if (diff > 0)
pp = &(*pp)->rb_right;
- } else {
- if (server->cell == cell)
- goto exists;
-
- /* We have the same UUID representing servers in
- * different cells. Append the new server to the list.
- */
- for (;;) {
- next = rcu_dereference_protected(
- server->uuid_next,
- lockdep_is_held(&net->fs_lock.lock));
- if (!next)
- break;
- server = next;
- }
- rcu_assign_pointer(server->uuid_next, candidate);
- candidate->uuid_prev = server;
- server = candidate;
- goto added_dup;
- }
+ else
+ goto exists;
}
- server = candidate;
+ server = *candidate;
+ *candidate = NULL;
rb_link_node(&server->uuid_rb, p, pp);
- rb_insert_color(&server->uuid_rb, &net->fs_servers);
+ rb_insert_color(&server->uuid_rb, &cell->fs_servers);
+ write_seqlock(&net->fs_lock);
hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
+ write_sequnlock(&net->fs_lock);
-added_dup:
- write_seqlock(&net->fs_addr_lock);
- alist = rcu_dereference_protected(server->addresses,
- lockdep_is_held(&net->fs_addr_lock.lock));
-
- /* Secondly, if the server has any IPv4 and/or IPv6 addresses, install
- * it in the IPv4 and/or IPv6 reverse-map lists.
- *
- * TODO: For speed we want to use something other than a flat list
- * here; even sorting the list in terms of lowest address would help a
- * bit, but anything we might want to do gets messy and memory
- * intensive.
- */
- if (alist->nr_ipv4 > 0)
- hlist_add_head_rcu(&server->addr4_link, &net->fs_addresses4);
- if (alist->nr_addrs > alist->nr_ipv4)
- hlist_add_head_rcu(&server->addr6_link, &net->fs_addresses6);
-
- write_sequnlock(&net->fs_addr_lock);
+ afs_get_cell(cell, afs_cell_trace_get_server);
exists:
- afs_get_server(server, afs_server_trace_get_install);
- write_sequnlock(&net->fs_lock);
+ afs_use_server(server, true, afs_server_trace_use_install);
return server;
}
/*
- * Allocate a new server record and mark it active.
+ * Allocate a new server record and mark it as active but uncreated.
*/
-static struct afs_server *afs_alloc_server(struct afs_cell *cell,
- const uuid_t *uuid,
- struct afs_addr_list *alist)
+static struct afs_server *afs_alloc_server(struct afs_cell *cell, const uuid_t *uuid)
{
struct afs_server *server;
struct afs_net *net = cell->net;
@@ -226,49 +119,50 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
if (!server)
- goto enomem;
+ return NULL;
- atomic_set(&server->ref, 1);
- atomic_set(&server->active, 1);
+ refcount_set(&server->ref, 1);
+ atomic_set(&server->active, 0);
+ __set_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
server->debug_id = atomic_inc_return(&afs_server_debug_id);
- RCU_INIT_POINTER(server->addresses, alist);
- server->addr_version = alist->version;
server->uuid = *uuid;
rwlock_init(&server->fs_lock);
- INIT_WORK(&server->initcb_work, afs_server_init_callback_work);
+ INIT_WORK(&server->destroyer, &afs_server_destroyer);
+ timer_setup(&server->timer, afs_server_timer, 0);
+ INIT_LIST_HEAD(&server->volumes);
init_waitqueue_head(&server->probe_wq);
+ mutex_init(&server->cm_token_lock);
INIT_LIST_HEAD(&server->probe_link);
+ INIT_HLIST_NODE(&server->proc_link);
spin_lock_init(&server->probe_lock);
server->cell = cell;
server->rtt = UINT_MAX;
+ server->service_id = FS_SERVICE;
+ server->probe_counter = 1;
+ server->probed_at = jiffies - LONG_MAX / 2;
afs_inc_servers_outstanding(net);
- trace_afs_server(server, 1, 1, afs_server_trace_alloc);
_leave(" = %p", server);
return server;
-
-enomem:
- _leave(" = NULL [nomem]");
- return NULL;
}
/*
* Look up an address record for a server
*/
-static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
- struct key *key, const uuid_t *uuid)
+static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_server *server,
+ struct key *key)
{
struct afs_vl_cursor vc;
struct afs_addr_list *alist = NULL;
int ret;
ret = -ERESTARTSYS;
- if (afs_begin_vlserver_operation(&vc, cell, key)) {
+ if (afs_begin_vlserver_operation(&vc, server->cell, key)) {
while (afs_select_vlserver(&vc)) {
if (test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags))
- alist = afs_yfsvl_get_endpoints(&vc, uuid);
+ alist = afs_yfsvl_get_endpoints(&vc, &server->uuid);
else
- alist = afs_vl_get_addrs_u(&vc, uuid);
+ alist = afs_vl_get_addrs_u(&vc, &server->uuid);
}
ret = afs_end_vlserver_operation(&vc);
@@ -278,72 +172,122 @@ static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
}
/*
- * Get or create a fileserver record.
+ * Get or create a fileserver record and return it with an active-use count on
+ * it.
*/
struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key,
const uuid_t *uuid, u32 addr_version)
{
- struct afs_addr_list *alist;
- struct afs_server *server, *candidate;
+ struct afs_addr_list *alist = NULL;
+ struct afs_server *server, *candidate = NULL;
+ bool creating = false;
+ int ret;
_enter("%p,%pU", cell->net, uuid);
- server = afs_find_server_by_uuid(cell->net, uuid);
+ down_read(&cell->fs_lock);
+ server = afs_find_server_by_uuid(cell, uuid);
+ /* Won't see servers marked uncreated. */
+ up_read(&cell->fs_lock);
+
if (server) {
+ timer_delete_sync(&server->timer);
+ if (test_bit(AFS_SERVER_FL_CREATING, &server->flags))
+ goto wait_for_creation;
if (server->addr_version != addr_version)
set_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags);
return server;
}
- alist = afs_vl_lookup_addrs(cell, key, uuid);
- if (IS_ERR(alist))
- return ERR_CAST(alist);
-
- candidate = afs_alloc_server(cell, uuid, alist);
+ candidate = afs_alloc_server(cell, uuid);
if (!candidate) {
- afs_put_addrlist(alist);
+ afs_put_addrlist(alist, afs_alist_trace_put_server_oom);
return ERR_PTR(-ENOMEM);
}
- server = afs_install_server(cell, candidate);
- if (server != candidate) {
- afs_put_addrlist(alist);
+ down_write(&cell->fs_lock);
+ server = afs_install_server(cell, &candidate);
+ if (test_bit(AFS_SERVER_FL_CREATING, &server->flags)) {
+ /* We need to wait for creation to complete. */
+ up_write(&cell->fs_lock);
+ goto wait_for_creation;
+ }
+ if (test_bit(AFS_SERVER_FL_UNCREATED, &server->flags)) {
+ set_bit(AFS_SERVER_FL_CREATING, &server->flags);
+ clear_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
+ creating = true;
+ }
+ up_write(&cell->fs_lock);
+ timer_delete_sync(&server->timer);
+
+ /* If we get to create the server, we look up the addresses and then
+ * immediately dispatch an asynchronous probe to each interface on the
+ * fileserver. This will make sure the repeat-probing service is
+ * started.
+ */
+ if (creating) {
+ alist = afs_vl_lookup_addrs(server, key);
+ if (IS_ERR(alist)) {
+ ret = PTR_ERR(alist);
+ goto create_failed;
+ }
+
+ ret = afs_fs_probe_fileserver(cell->net, server, alist, key);
+ if (ret)
+ goto create_failed;
+
+ clear_and_wake_up_bit(AFS_SERVER_FL_CREATING, &server->flags);
+ }
+
+out:
+ afs_put_addrlist(alist, afs_alist_trace_put_server_create);
+ if (candidate) {
+ kfree(rcu_access_pointer(server->endpoint_state));
kfree(candidate);
- } else {
- /* Immediately dispatch an asynchronous probe to each interface
- * on the fileserver. This will make sure the repeat-probing
- * service is started.
- */
- afs_fs_probe_fileserver(cell->net, server, key, true);
+ afs_dec_servers_outstanding(cell->net);
+ }
+ return server ?: ERR_PTR(ret);
+
+wait_for_creation:
+ afs_see_server(server, afs_server_trace_wait_create);
+ wait_on_bit(&server->flags, AFS_SERVER_FL_CREATING, TASK_UNINTERRUPTIBLE);
+ if (test_bit_acquire(AFS_SERVER_FL_UNCREATED, &server->flags)) {
+ /* Barrier: read flag before error */
+ ret = READ_ONCE(server->create_error);
+ afs_put_server(cell->net, server, afs_server_trace_unuse_create_fail);
+ server = NULL;
+ goto out;
}
- return server;
-}
+ ret = 0;
+ goto out;
-/*
- * Set the server timer to fire after a given delay, assuming it's not already
- * set for an earlier time.
- */
-static void afs_set_server_timer(struct afs_net *net, time64_t delay)
-{
- if (net->live) {
- afs_inc_servers_outstanding(net);
- if (timer_reduce(&net->fs_timer, jiffies + delay * HZ))
- afs_dec_servers_outstanding(net);
+create_failed:
+ down_write(&cell->fs_lock);
+
+ WRITE_ONCE(server->create_error, ret);
+ smp_wmb(); /* Barrier: set error before flag. */
+ set_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
+
+ clear_and_wake_up_bit(AFS_SERVER_FL_CREATING, &server->flags);
+
+ if (test_bit(AFS_SERVER_FL_UNCREATED, &server->flags)) {
+ clear_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
+ creating = true;
}
+ afs_unuse_server(cell->net, server, afs_server_trace_unuse_create_fail);
+ server = NULL;
+
+ up_write(&cell->fs_lock);
+ goto out;
}
/*
- * Server management timer. We have an increment on fs_outstanding that we
- * need to pass along to the work item.
+ * Set/reduce a server's timer.
*/
-void afs_servers_timer(struct timer_list *timer)
+static void afs_set_server_timer(struct afs_server *server, unsigned int delay_secs)
{
- struct afs_net *net = container_of(timer, struct afs_net, fs_timer);
-
- _enter("");
- if (!queue_work(afs_wq, &net->fs_manager))
- afs_dec_servers_outstanding(net);
+ mod_timer(&server->timer, jiffies + delay_secs * HZ);
}
/*
@@ -352,38 +296,32 @@ void afs_servers_timer(struct timer_list *timer)
struct afs_server *afs_get_server(struct afs_server *server,
enum afs_server_trace reason)
{
- unsigned int u = atomic_inc_return(&server->ref);
+ unsigned int a;
+ int r;
- trace_afs_server(server, u, atomic_read(&server->active), reason);
+ __refcount_inc(&server->ref, &r);
+ a = atomic_read(&server->active);
+ trace_afs_server(server->debug_id, r + 1, a, reason);
return server;
}
/*
- * Try to get a reference on a server object.
+ * Get an active count on a server object and maybe remove from the inactive
+ * list.
*/
-static struct afs_server *afs_maybe_use_server(struct afs_server *server,
- enum afs_server_trace reason)
+struct afs_server *afs_use_server(struct afs_server *server, bool activate,
+ enum afs_server_trace reason)
{
- unsigned int r = atomic_fetch_add_unless(&server->ref, 1, 0);
unsigned int a;
+ int r;
- if (r == 0)
- return NULL;
-
+ __refcount_inc(&server->ref, &r);
a = atomic_inc_return(&server->active);
- trace_afs_server(server, r, a, reason);
- return server;
-}
-
-/*
- * Get an active count on a server object.
- */
-struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_trace reason)
-{
- unsigned int r = atomic_inc_return(&server->ref);
- unsigned int a = atomic_inc_return(&server->active);
+ if (a == 1 && activate &&
+ !test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
+ timer_delete(&server->timer);
- trace_afs_server(server, r, a, reason);
+ trace_afs_server(server->debug_id, r + 1, a, reason);
return server;
}
@@ -393,14 +331,18 @@ struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_tra
void afs_put_server(struct afs_net *net, struct afs_server *server,
enum afs_server_trace reason)
{
- unsigned int usage;
+ unsigned int a, debug_id;
+ bool zero;
+ int r;
if (!server)
return;
- usage = atomic_dec_return(&server->ref);
- trace_afs_server(server, usage, atomic_read(&server->active), reason);
- if (unlikely(usage == 0))
+ debug_id = server->debug_id;
+ a = atomic_read(&server->active);
+ zero = __refcount_dec_and_test(&server->ref, &r);
+ trace_afs_server(debug_id, r - 1, a, reason);
+ if (unlikely(zero))
__afs_put_server(net, server);
}
@@ -411,13 +353,16 @@ void afs_put_server(struct afs_net *net, struct afs_server *server,
void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
enum afs_server_trace reason)
{
- if (server) {
- unsigned int active = atomic_dec_return(&server->active);
+ if (!server)
+ return;
- if (active == 0)
- afs_set_server_timer(net, afs_server_gc_delay);
- afs_put_server(net, server, reason);
+ if (atomic_dec_and_test(&server->active)) {
+ if (test_bit(AFS_SERVER_FL_EXPIRED, &server->flags) ||
+ READ_ONCE(server->cell->state) >= AFS_CELL_REMOVING)
+ schedule_work(&server->destroyer);
}
+
+ afs_put_server(net, server, reason);
}
/*
@@ -426,19 +371,34 @@ void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
void afs_unuse_server(struct afs_net *net, struct afs_server *server,
enum afs_server_trace reason)
{
- if (server) {
- server->unuse_time = ktime_get_real_seconds();
- afs_unuse_server_notime(net, server, reason);
+ if (!server)
+ return;
+
+ if (atomic_dec_and_test(&server->active)) {
+ if (!test_bit(AFS_SERVER_FL_EXPIRED, &server->flags) &&
+ READ_ONCE(server->cell->state) < AFS_CELL_REMOVING) {
+ time64_t unuse_time = ktime_get_real_seconds();
+
+ server->unuse_time = unuse_time;
+ afs_set_server_timer(server, afs_server_gc_delay);
+ } else {
+ schedule_work(&server->destroyer);
+ }
}
+
+ afs_put_server(net, server, reason);
}
static void afs_server_rcu(struct rcu_head *rcu)
{
struct afs_server *server = container_of(rcu, struct afs_server, rcu);
- trace_afs_server(server, atomic_read(&server->ref),
+ trace_afs_server(server->debug_id, refcount_read(&server->ref),
atomic_read(&server->active), afs_server_trace_free);
- afs_put_addrlist(rcu_access_pointer(server->addresses));
+ afs_put_endpoint_state(rcu_access_pointer(server->endpoint_state),
+ afs_estate_trace_put_server);
+ afs_put_cell(server->cell, afs_cell_trace_put_server);
+ kfree(server->cm_rxgk_appdata.data);
kfree(server);
}
@@ -450,173 +410,126 @@ static void __afs_put_server(struct afs_net *net, struct afs_server *server)
static void afs_give_up_callbacks(struct afs_net *net, struct afs_server *server)
{
- struct afs_addr_list *alist = rcu_access_pointer(server->addresses);
- struct afs_addr_cursor ac = {
- .alist = alist,
- .index = alist->preferred,
- .error = 0,
- };
-
- afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
+ struct afs_endpoint_state *estate = rcu_access_pointer(server->endpoint_state);
+ struct afs_addr_list *alist = estate->addresses;
+
+ afs_fs_give_up_all_callbacks(net, server, &alist->addrs[alist->preferred], NULL);
}
/*
- * destroy a dead server
+ * Check to see if the server record has expired.
*/
-static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
+static bool afs_has_server_expired(const struct afs_server *server)
{
- if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
- afs_give_up_callbacks(net, server);
+ time64_t expires_at;
- flush_work(&server->initcb_work);
- afs_put_server(net, server, afs_server_trace_destroy);
+ if (atomic_read(&server->active))
+ return false;
+
+ if (server->cell->net->live ||
+ server->cell->state >= AFS_CELL_REMOVING) {
+ trace_afs_server(server->debug_id, refcount_read(&server->ref),
+ 0, afs_server_trace_purging);
+ return true;
+ }
+
+ expires_at = server->unuse_time;
+ if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
+ !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
+ expires_at += afs_server_gc_delay;
+
+ return ktime_get_real_seconds() > expires_at;
}
/*
- * Garbage collect any expired servers.
+ * Remove a server record from it's parent cell's database.
*/
-static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
+static bool afs_remove_server_from_cell(struct afs_server *server)
{
- struct afs_server *server, *next, *prev;
- int active;
-
- while ((server = gc_list)) {
- gc_list = server->gc_next;
-
- write_seqlock(&net->fs_lock);
-
- active = atomic_read(&server->active);
- if (active == 0) {
- trace_afs_server(server, atomic_read(&server->ref),
- active, afs_server_trace_gc);
- next = rcu_dereference_protected(
- server->uuid_next, lockdep_is_held(&net->fs_lock.lock));
- prev = server->uuid_prev;
- if (!prev) {
- /* The one at the front is in the tree */
- if (!next) {
- rb_erase(&server->uuid_rb, &net->fs_servers);
- } else {
- rb_replace_node_rcu(&server->uuid_rb,
- &next->uuid_rb,
- &net->fs_servers);
- next->uuid_prev = NULL;
- }
- } else {
- /* This server is not at the front */
- rcu_assign_pointer(prev->uuid_next, next);
- if (next)
- next->uuid_prev = prev;
- }
-
- list_del(&server->probe_link);
- hlist_del_rcu(&server->proc_link);
- if (!hlist_unhashed(&server->addr4_link))
- hlist_del_rcu(&server->addr4_link);
- if (!hlist_unhashed(&server->addr6_link))
- hlist_del_rcu(&server->addr6_link);
- }
- write_sequnlock(&net->fs_lock);
+ struct afs_cell *cell = server->cell;
- if (active == 0)
- afs_destroy_server(net, server);
+ down_write(&cell->fs_lock);
+
+ if (!afs_has_server_expired(server)) {
+ up_write(&cell->fs_lock);
+ return false;
}
+
+ set_bit(AFS_SERVER_FL_EXPIRED, &server->flags);
+ _debug("expire %pU %u", &server->uuid, atomic_read(&server->active));
+ afs_see_server(server, afs_server_trace_see_expired);
+ rb_erase(&server->uuid_rb, &cell->fs_servers);
+ up_write(&cell->fs_lock);
+ return true;
}
-/*
- * Manage the records of servers known to be within a network namespace. This
- * includes garbage collecting unused servers.
- *
- * Note also that we were given an increment on net->servers_outstanding by
- * whoever queued us that we need to deal with before returning.
- */
-void afs_manage_servers(struct work_struct *work)
+static void afs_server_destroyer(struct work_struct *work)
{
- struct afs_net *net = container_of(work, struct afs_net, fs_manager);
- struct afs_server *gc_list = NULL;
- struct rb_node *cursor;
- time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
- bool purging = !net->live;
+ struct afs_endpoint_state *estate;
+ struct afs_server *server = container_of(work, struct afs_server, destroyer);
+ struct afs_net *net = server->cell->net;
- _enter("");
+ afs_see_server(server, afs_server_trace_see_destroyer);
- /* Trawl the server list looking for servers that have expired from
- * lack of use.
- */
- read_seqlock_excl(&net->fs_lock);
-
- for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) {
- struct afs_server *server =
- rb_entry(cursor, struct afs_server, uuid_rb);
- int active = atomic_read(&server->active);
+ if (test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
+ return;
- _debug("manage %pU %u", &server->uuid, active);
+ if (!afs_remove_server_from_cell(server))
+ return;
- if (purging) {
- trace_afs_server(server, atomic_read(&server->ref),
- active, afs_server_trace_purging);
- if (active != 0)
- pr_notice("Can't purge s=%08x\n", server->debug_id);
- }
+ timer_shutdown_sync(&server->timer);
+ cancel_work(&server->destroyer);
- if (active == 0) {
- time64_t expire_at = server->unuse_time;
-
- if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
- !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
- expire_at += afs_server_gc_delay;
- if (purging || expire_at <= now) {
- server->gc_next = gc_list;
- gc_list = server;
- } else if (expire_at < next_manage) {
- next_manage = expire_at;
- }
- }
- }
+ if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
+ afs_give_up_callbacks(net, server);
- read_sequnlock_excl(&net->fs_lock);
+ /* Unbind the rxrpc_peer records from the server. */
+ estate = rcu_access_pointer(server->endpoint_state);
+ if (estate)
+ afs_set_peer_appdata(server, estate->addresses, NULL);
- /* Update the timer on the way out. We have to pass an increment on
- * servers_outstanding in the namespace that we are in to the timer or
- * the work scheduler.
- */
- if (!purging && next_manage < TIME64_MAX) {
- now = ktime_get_real_seconds();
+ write_seqlock(&net->fs_lock);
+ list_del_init(&server->probe_link);
+ if (!hlist_unhashed(&server->proc_link))
+ hlist_del_rcu(&server->proc_link);
+ write_sequnlock(&net->fs_lock);
- if (next_manage - now <= 0) {
- if (queue_work(afs_wq, &net->fs_manager))
- afs_inc_servers_outstanding(net);
- } else {
- afs_set_server_timer(net, next_manage - now);
- }
- }
+ afs_put_server(net, server, afs_server_trace_destroy);
+}
- afs_gc_servers(net, gc_list);
+static void afs_server_timer(struct timer_list *timer)
+{
+ struct afs_server *server = container_of(timer, struct afs_server, timer);
- afs_dec_servers_outstanding(net);
- _leave(" [%d]", atomic_read(&net->servers_outstanding));
+ afs_see_server(server, afs_server_trace_see_timer);
+ if (!test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
+ schedule_work(&server->destroyer);
}
-static void afs_queue_server_manager(struct afs_net *net)
+/*
+ * Wake up all the servers in a cell so that they can purge themselves.
+ */
+void afs_purge_servers(struct afs_cell *cell)
{
- afs_inc_servers_outstanding(net);
- if (!queue_work(afs_wq, &net->fs_manager))
- afs_dec_servers_outstanding(net);
+ struct afs_server *server;
+ struct rb_node *rb;
+
+ down_read(&cell->fs_lock);
+ for (rb = rb_first(&cell->fs_servers); rb; rb = rb_next(rb)) {
+ server = rb_entry(rb, struct afs_server, uuid_rb);
+ afs_see_server(server, afs_server_trace_see_purge);
+ schedule_work(&server->destroyer);
+ }
+ up_read(&cell->fs_lock);
}
/*
- * Purge list of servers.
+ * Wait for outstanding servers.
*/
-void afs_purge_servers(struct afs_net *net)
+void afs_wait_for_servers(struct afs_net *net)
{
_enter("");
- if (del_timer_sync(&net->fs_timer))
- afs_dec_servers_outstanding(net);
-
- afs_queue_server_manager(net);
-
- _debug("wait");
atomic_dec(&net->servers_outstanding);
wait_var_event(&net->servers_outstanding,
!atomic_read(&net->servers_outstanding));
@@ -627,40 +540,42 @@ void afs_purge_servers(struct afs_net *net)
* Get an update for a server's address list.
*/
static noinline bool afs_update_server_record(struct afs_operation *op,
- struct afs_server *server)
+ struct afs_server *server,
+ struct key *key)
{
- struct afs_addr_list *alist, *discard;
+ struct afs_endpoint_state *estate;
+ struct afs_addr_list *alist;
+ bool has_addrs;
_enter("");
- trace_afs_server(server, atomic_read(&server->ref), atomic_read(&server->active),
+ trace_afs_server(server->debug_id, refcount_read(&server->ref),
+ atomic_read(&server->active),
afs_server_trace_update);
- alist = afs_vl_lookup_addrs(op->volume->cell, op->key, &server->uuid);
+ alist = afs_vl_lookup_addrs(server, op->key);
if (IS_ERR(alist)) {
+ rcu_read_lock();
+ estate = rcu_dereference(server->endpoint_state);
+ has_addrs = estate->addresses;
+ rcu_read_unlock();
+
if ((PTR_ERR(alist) == -ERESTARTSYS ||
PTR_ERR(alist) == -EINTR) &&
(op->flags & AFS_OPERATION_UNINTR) &&
- server->addresses) {
+ has_addrs) {
_leave(" = t [intr]");
return true;
}
- op->error = PTR_ERR(alist);
- _leave(" = f [%d]", op->error);
+ afs_op_set_error(op, PTR_ERR(alist));
+ _leave(" = f [%d]", afs_op_error(op));
return false;
}
- discard = alist;
- if (server->addr_version != alist->version) {
- write_lock(&server->fs_lock);
- discard = rcu_dereference_protected(server->addresses,
- lockdep_is_held(&server->fs_lock));
- rcu_assign_pointer(server->addresses, alist);
- server->addr_version = alist->version;
- write_unlock(&server->fs_lock);
- }
+ if (server->addr_version != alist->version)
+ afs_fs_probe_fileserver(op->net, server, alist, key);
- afs_put_addrlist(discard);
+ afs_put_addrlist(alist, afs_alist_trace_put_server_update);
_leave(" = t");
return true;
}
@@ -668,7 +583,8 @@ static noinline bool afs_update_server_record(struct afs_operation *op,
/*
* See if a server's address list needs updating.
*/
-bool afs_check_server_record(struct afs_operation *op, struct afs_server *server)
+bool afs_check_server_record(struct afs_operation *op, struct afs_server *server,
+ struct key *key)
{
bool success;
int ret, retries = 0;
@@ -688,7 +604,7 @@ retry:
update:
if (!test_and_set_bit_lock(AFS_SERVER_FL_UPDATING, &server->flags)) {
clear_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags);
- success = afs_update_server_record(op, server);
+ success = afs_update_server_record(op, server, key);
clear_bit_unlock(AFS_SERVER_FL_UPDATING, &server->flags);
wake_up_bit(&server->flags, AFS_SERVER_FL_UPDATING);
_leave(" = %d", success);
@@ -700,7 +616,7 @@ wait:
(op->flags & AFS_OPERATION_UNINTR) ?
TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
if (ret == -ERESTARTSYS) {
- op->error = ret;
+ afs_op_set_error(op, ret);
_leave(" = f [intr]");
return false;
}
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index ed9056703505..20d5474837df 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -16,43 +16,70 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
if (slist && refcount_dec_and_test(&slist->usage)) {
for (i = 0; i < slist->nr_servers; i++)
afs_unuse_server(net, slist->servers[i].server,
- afs_server_trace_put_slist);
- kfree(slist);
+ afs_server_trace_unuse_slist);
+ kfree_rcu(slist, rcu);
}
}
/*
* Build a server list from a VLDB record.
*/
-struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
+struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
struct key *key,
- struct afs_vldb_entry *vldb,
- u8 type_mask)
+ struct afs_vldb_entry *vldb)
{
struct afs_server_list *slist;
struct afs_server *server;
- int ret = -ENOMEM, nr_servers = 0, i, j;
-
- for (i = 0; i < vldb->nr_servers; i++)
- if (vldb->fs_mask[i] & type_mask)
- nr_servers++;
+ unsigned int type_mask = 1 << volume->type;
+ bool use_newrepsites = false;
+ int ret = -ENOMEM, nr_servers = 0, newrep = 0, i, j, usable = 0;
+
+ /* Work out if we're going to restrict to NEWREPSITE-marked servers or
+ * not. If at least one site is marked as NEWREPSITE, then it's likely
+ * that "vos release" is busy updating RO sites. We cut over from one
+ * to the other when >=50% of the sites have been updated. Sites that
+ * are in the process of being updated are marked DONTUSE.
+ */
+ for (i = 0; i < vldb->nr_servers; i++) {
+ if (!(vldb->fs_mask[i] & type_mask))
+ continue;
+ nr_servers++;
+ if (vldb->vlsf_flags[i] & AFS_VLSF_DONTUSE)
+ continue;
+ usable++;
+ if (vldb->vlsf_flags[i] & AFS_VLSF_NEWREPSITE)
+ newrep++;
+ }
slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL);
if (!slist)
goto error;
+ if (newrep) {
+ if (newrep < usable / 2) {
+ slist->ro_replicating = AFS_RO_REPLICATING_USE_OLD;
+ } else {
+ slist->ro_replicating = AFS_RO_REPLICATING_USE_NEW;
+ use_newrepsites = true;
+ }
+ }
+
refcount_set(&slist->usage, 1);
rwlock_init(&slist->lock);
- for (i = 0; i < AFS_MAXTYPES; i++)
- slist->vids[i] = vldb->vid[i];
-
/* Make sure a records exists for each server in the list. */
for (i = 0; i < vldb->nr_servers; i++) {
+ unsigned long se_flags = 0;
+ bool newrepsite = vldb->vlsf_flags[i] & AFS_VLSF_NEWREPSITE;
+
if (!(vldb->fs_mask[i] & type_mask))
continue;
+ if (vldb->vlsf_flags[i] & AFS_VLSF_DONTUSE)
+ __set_bit(AFS_SE_EXCLUDED, &se_flags);
+ if (newrep && (newrepsite ^ use_newrepsites))
+ __set_bit(AFS_SE_EXCLUDED, &se_flags);
- server = afs_lookup_server(cell, key, &vldb->fs_server[i],
+ server = afs_lookup_server(volume->cell, key, &vldb->fs_server[i],
vldb->addr_version[i]);
if (IS_ERR(server)) {
ret = PTR_ERR(server);
@@ -70,8 +97,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
break;
if (j < slist->nr_servers) {
if (slist->servers[j].server == server) {
- afs_put_server(cell->net, server,
- afs_server_trace_put_slist_isort);
+ afs_unuse_server_notime(volume->cell->net, server,
+ afs_server_trace_unuse_slist_isort);
continue;
}
@@ -81,6 +108,9 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
}
slist->servers[j].server = server;
+ slist->servers[j].volume = volume;
+ slist->servers[j].flags = se_flags;
+ slist->servers[j].cb_expires_at = AFS_NO_CB_PROMISE;
slist->nr_servers++;
}
@@ -92,7 +122,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
return slist;
error_2:
- afs_put_serverlist(cell->net, slist);
+ afs_put_serverlist(volume->cell->net, slist);
error:
return ERR_PTR(ret);
}
@@ -103,27 +133,117 @@ error:
bool afs_annotate_server_list(struct afs_server_list *new,
struct afs_server_list *old)
{
- struct afs_server *cur;
- int i, j;
+ unsigned long mask = 1UL << AFS_SE_EXCLUDED;
+ int i;
- if (old->nr_servers != new->nr_servers)
+ if (old->nr_servers != new->nr_servers ||
+ old->ro_replicating != new->ro_replicating)
goto changed;
- for (i = 0; i < old->nr_servers; i++)
+ for (i = 0; i < old->nr_servers; i++) {
if (old->servers[i].server != new->servers[i].server)
goto changed;
-
+ if ((old->servers[i].flags & mask) != (new->servers[i].flags & mask))
+ goto changed;
+ }
return false;
-
changed:
- /* Maintain the same preferred server as before if possible. */
- cur = old->servers[old->preferred].server;
- for (j = 0; j < new->nr_servers; j++) {
- if (new->servers[j].server == cur) {
- new->preferred = j;
- break;
+ return true;
+}
+
+/*
+ * Attach a volume to the servers it is going to use.
+ */
+void afs_attach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *slist)
+{
+ struct afs_server_entry *se, *pe;
+ struct afs_server *server;
+ struct list_head *p;
+ unsigned int i;
+
+ down_write(&volume->cell->vs_lock);
+
+ for (i = 0; i < slist->nr_servers; i++) {
+ se = &slist->servers[i];
+ server = se->server;
+
+ list_for_each(p, &server->volumes) {
+ pe = list_entry(p, struct afs_server_entry, slink);
+ if (volume->vid <= pe->volume->vid)
+ break;
}
+ list_add_tail(&se->slink, p);
}
- return true;
+ slist->attached = true;
+ up_write(&volume->cell->vs_lock);
+}
+
+/*
+ * Reattach a volume to the servers it is going to use when server list is
+ * replaced. We try to switch the attachment points to avoid rewalking the
+ * lists.
+ */
+void afs_reattach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *new,
+ struct afs_server_list *old)
+{
+ unsigned int n = 0, o = 0;
+
+ down_write(&volume->cell->vs_lock);
+
+ while (n < new->nr_servers || o < old->nr_servers) {
+ struct afs_server_entry *pn = n < new->nr_servers ? &new->servers[n] : NULL;
+ struct afs_server_entry *po = o < old->nr_servers ? &old->servers[o] : NULL;
+ struct afs_server_entry *s;
+ struct list_head *p;
+ int diff;
+
+ if (pn && po && pn->server == po->server) {
+ pn->cb_expires_at = po->cb_expires_at;
+ list_replace(&po->slink, &pn->slink);
+ n++;
+ o++;
+ continue;
+ }
+
+ if (pn && po)
+ diff = memcmp(&pn->server->uuid, &po->server->uuid,
+ sizeof(pn->server->uuid));
+ else
+ diff = pn ? -1 : 1;
+
+ if (diff < 0) {
+ list_for_each(p, &pn->server->volumes) {
+ s = list_entry(p, struct afs_server_entry, slink);
+ if (volume->vid <= s->volume->vid)
+ break;
+ }
+ list_add_tail(&pn->slink, p);
+ n++;
+ } else {
+ list_del(&po->slink);
+ o++;
+ }
+ }
+
+ up_write(&volume->cell->vs_lock);
+}
+
+/*
+ * Detach a volume from the servers it has been using.
+ */
+void afs_detach_volume_from_servers(struct afs_volume *volume, struct afs_server_list *slist)
+{
+ unsigned int i;
+
+ if (!slist->attached)
+ return;
+
+ down_write(&volume->cell->vs_lock);
+
+ for (i = 0; i < slist->nr_servers; i++)
+ list_del(&slist->servers[i].slink);
+
+ slist->attached = false;
+ up_write(&volume->cell->vs_lock);
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index d110def8aa8e..d672b7ab57ae 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -55,6 +55,7 @@ int afs_net_id;
static const struct super_operations afs_super_ops = {
.statfs = afs_statfs,
.alloc_inode = afs_alloc_inode,
+ .write_inode = netfs_unpin_writeback,
.drop_inode = afs_drop_inode,
.destroy_inode = afs_destroy_inode,
.free_inode = afs_free_inode,
@@ -193,8 +194,6 @@ static int afs_show_options(struct seq_file *m, struct dentry *root)
if (as->dyn_root)
seq_puts(m, ",dyn");
- if (test_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(d_inode(root))->flags))
- seq_puts(m, ",autocell");
switch (as->flock_mode) {
case afs_flock_mode_unset: break;
case afs_flock_mode_local: p = "local"; break;
@@ -291,13 +290,14 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param)
/* lookup the cell record */
if (cellname) {
cell = afs_lookup_cell(ctx->net, cellname, cellnamesz,
- NULL, false);
+ NULL, AFS_LOOKUP_CELL_DIRECT_MOUNT,
+ afs_cell_trace_use_lookup_mount);
if (IS_ERR(cell)) {
pr_err("kAFS: unable to lookup cell '%*.*s'\n",
cellnamesz, cellnamesz, cellname ?: "");
return PTR_ERR(cell);
}
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_parse);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_parse);
afs_see_cell(cell, afs_cell_trace_see_source);
ctx->cell = cell;
}
@@ -380,8 +380,7 @@ static int afs_validate_fc(struct fs_context *fc)
ctx->key = key;
if (ctx->volume) {
- afs_put_volume(ctx->net, ctx->volume,
- afs_volume_trace_put_validate_fc);
+ afs_put_volume(ctx->volume, afs_volume_trace_put_validate_fc);
ctx->volume = NULL;
}
@@ -395,7 +394,7 @@ static int afs_validate_fc(struct fs_context *fc)
ctx->key = NULL;
cell = afs_use_cell(ctx->cell->alias_of,
afs_cell_trace_use_fc_alias);
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_fc);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_fc);
ctx->cell = cell;
goto reget_key;
}
@@ -406,6 +405,10 @@ static int afs_validate_fc(struct fs_context *fc)
return PTR_ERR(volume);
ctx->volume = volume;
+ if (volume->type != AFSVL_RWVOL) {
+ ctx->flock_mode = afs_flock_mode_local;
+ fc->sb_flags |= SB_RDONLY;
+ }
}
return 0;
@@ -464,7 +467,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
/* allocate the root inode and dentry */
if (as->dyn_root) {
- inode = afs_iget_pseudo_dir(sb, true);
+ inode = afs_dynroot_iget_root(sb);
} else {
sprintf(sb->s_id, "%llu", as->volume->vid);
afs_activate_volume(as->volume);
@@ -474,21 +477,15 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (ctx->autocell || as->dyn_root)
- set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
-
ret = -ENOMEM;
sb->s_root = d_make_root(inode);
if (!sb->s_root)
goto error;
if (as->dyn_root) {
- sb->s_d_op = &afs_dynroot_dentry_operations;
- ret = afs_dynroot_populate(sb);
- if (ret < 0)
- goto error;
+ set_default_d_op(sb, &afs_dynroot_dentry_operations);
} else {
- sb->s_d_op = &afs_fs_dentry_operations;
+ set_default_d_op(sb, &afs_fs_dentry_operations);
rcu_assign_pointer(as->volume->sb, sb);
}
@@ -523,9 +520,8 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
static void afs_destroy_sbi(struct afs_super_info *as)
{
if (as) {
- struct afs_net *net = afs_net(as->net_ns);
- afs_put_volume(net, as->volume, afs_volume_trace_put_destroy_sbi);
- afs_unuse_cell(net, as->cell, afs_cell_trace_unuse_sbi);
+ afs_put_volume(as->volume, afs_volume_trace_put_destroy_sbi);
+ afs_unuse_cell(as->cell, afs_cell_trace_unuse_sbi);
put_net(as->net_ns);
kfree(as);
}
@@ -535,9 +531,6 @@ static void afs_kill_super(struct super_block *sb)
{
struct afs_super_info *as = AFS_FS_S(sb);
- if (as->dyn_root)
- afs_dynroot_depopulate(sb);
-
/* Clear the callback interests (which will do ilookup5) before
* deactivating the superblock.
*/
@@ -610,8 +603,8 @@ static void afs_free_fc(struct fs_context *fc)
struct afs_fs_context *ctx = fc->fs_private;
afs_destroy_sbi(fc->s_fs_info);
- afs_put_volume(ctx->net, ctx->volume, afs_volume_trace_put_free_fc);
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_fc);
+ afs_put_volume(ctx->volume, afs_volume_trace_put_free_fc);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_fc);
key_put(ctx->key);
kfree(ctx);
}
@@ -658,8 +651,8 @@ static void afs_i_init_once(void *_vnode)
struct afs_vnode *vnode = _vnode;
memset(vnode, 0, sizeof(*vnode));
- inode_init_once(&vnode->vfs_inode);
- mutex_init(&vnode->io_lock);
+ inode_init_once(&vnode->netfs.inode);
+ INIT_LIST_HEAD(&vnode->io_lock_waiters);
init_rwsem(&vnode->validate_lock);
spin_lock_init(&vnode->wb_lock);
spin_lock_init(&vnode->lock);
@@ -667,6 +660,7 @@ static void afs_i_init_once(void *_vnode)
INIT_LIST_HEAD(&vnode->pending_locks);
INIT_LIST_HEAD(&vnode->granted_locks);
INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work);
+ INIT_LIST_HEAD(&vnode->cb_mmap_link);
seqlock_init(&vnode->cb_lock);
}
@@ -677,7 +671,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
{
struct afs_vnode *vnode;
- vnode = kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
+ vnode = alloc_inode_sb(sb, afs_inode_cachep, GFP_KERNEL);
if (!vnode)
return NULL;
@@ -686,13 +680,13 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
/* Reset anything that shouldn't leak from one inode to the next. */
memset(&vnode->fid, 0, sizeof(vnode->fid));
memset(&vnode->status, 0, sizeof(vnode->status));
+ afs_vnode_set_cache(vnode, NULL);
vnode->volume = NULL;
vnode->lock_key = NULL;
vnode->permit_cache = NULL;
-#ifdef CONFIG_AFS_FSCACHE
- vnode->cache = NULL;
-#endif
+ vnode->directory = NULL;
+ vnode->directory_size = 0;
vnode->flags = 1 << AFS_VNODE_UNSET;
vnode->lock_state = AFS_VNODE_LOCK_NONE;
@@ -700,8 +694,8 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
init_rwsem(&vnode->rmdir_lock);
INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work);
- _leave(" = %p", &vnode->vfs_inode);
- return &vnode->vfs_inode;
+ _leave(" = %p", &vnode->netfs.inode);
+ return &vnode->netfs.inode;
}
static void afs_free_inode(struct inode *inode)
diff --git a/fs/afs/validation.c b/fs/afs/validation.c
new file mode 100644
index 000000000000..0ba8336c9025
--- /dev/null
+++ b/fs/afs/validation.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* vnode and volume validity verification.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include "internal.h"
+
+/*
+ * Data validation is managed through a number of mechanisms from the server:
+ *
+ * (1) On first contact with a server (such as if it has just been rebooted),
+ * the server sends us a CB.InitCallBackState* request.
+ *
+ * (2) On a RW volume, in response to certain vnode (inode)-accessing RPC
+ * calls, the server maintains a time-limited per-vnode promise that it
+ * will send us a CB.CallBack request if a third party alters the vnodes
+ * accessed.
+ *
+ * Note that a vnode-level callbacks may also be sent for other reasons,
+ * such as filelock release.
+ *
+ * (3) On a RO (or Backup) volume, in response to certain vnode-accessing RPC
+ * calls, each server maintains a time-limited per-volume promise that it
+ * will send us a CB.CallBack request if the RO volume is updated to a
+ * snapshot of the RW volume ("vos release"). This is an atomic event
+ * that cuts over all instances of the RO volume across multiple servers
+ * simultaneously.
+ *
+ * Note that a volume-level callbacks may also be sent for other reasons,
+ * such as the volumeserver taking over control of the volume from the
+ * fileserver.
+ *
+ * Note also that each server maintains an independent time limit on an
+ * independent callback.
+ *
+ * (4) Certain RPC calls include a volume information record "VolSync" in
+ * their reply. This contains a creation date for the volume that should
+ * remain unchanged for a RW volume (but will be changed if the volume is
+ * restored from backup) or will be bumped to the time of snapshotting
+ * when a RO volume is released.
+ *
+ * In order to track this events, the following are provided:
+ *
+ * ->cb_v_break. A counter of events that might mean that the contents of
+ * a volume have been altered since we last checked a vnode.
+ *
+ * ->cb_v_check. A counter of the number of events that we've sent a
+ * query to the server for. Everything's up to date if this equals
+ * cb_v_break.
+ *
+ * ->cb_scrub. A counter of the number of regression events for which we
+ * have to completely wipe the cache.
+ *
+ * ->cb_ro_snapshot. A counter of the number of times that we've
+ * recognised that a RO volume has been updated.
+ *
+ * ->cb_break. A counter of events that might mean that the contents of a
+ * vnode have been altered.
+ *
+ * ->cb_expires_at. The time at which the callback promise expires or
+ * AFS_NO_CB_PROMISE if we have no promise.
+ *
+ * The way we manage things is:
+ *
+ * (1) When a volume-level CB.CallBack occurs, we increment ->cb_v_break on
+ * the volume and reset ->cb_expires_at (ie. set AFS_NO_CB_PROMISE) on the
+ * volume and volume's server record.
+ *
+ * (2) When a CB.InitCallBackState occurs, we treat this as a volume-level
+ * callback break on all the volumes that have been using that volume
+ * (ie. increment ->cb_v_break and reset ->cb_expires_at).
+ *
+ * (3) When a vnode-level CB.CallBack occurs, we increment ->cb_break on the
+ * vnode and reset its ->cb_expires_at. If the vnode is mmapped, we also
+ * dispatch a work item to unmap all PTEs to the vnode's pagecache to
+ * force reentry to the filesystem for revalidation.
+ *
+ * (4) When entering the filesystem, we call afs_validate() to check the
+ * validity of a vnode. This first checks to see if ->cb_v_check and
+ * ->cb_v_break match, and if they don't, we lock volume->cb_check_lock
+ * exclusively and perform an FS.FetchStatus on the vnode.
+ *
+ * After checking the volume, we check the vnode. If there's a mismatch
+ * between the volume counters and the vnode's mirrors of those counters,
+ * we lock vnode->validate_lock and issue an FS.FetchStatus on the vnode.
+ *
+ * (5) When the reply from FS.FetchStatus arrives, the VolSync record is
+ * parsed:
+ *
+ * (A) If the Creation timestamp has changed on a RW volume or regressed
+ * on a RO volume, we try to increment ->cb_scrub; if it advances on a
+ * RO volume, we assume "vos release" happened and try to increment
+ * ->cb_ro_snapshot.
+ *
+ * (B) If the Update timestamp has regressed, we try to increment
+ * ->cb_scrub.
+ *
+ * Note that in both of these cases, we only do the increment if we can
+ * cmpxchg the value of the timestamp from the value we noted before the
+ * op. This tries to prevent parallel ops from fighting one another.
+ *
+ * volume->cb_v_check is then set to ->cb_v_break.
+ *
+ * (6) The AFSCallBack record included in the FS.FetchStatus reply is also
+ * parsed and used to set the promise in ->cb_expires_at for the vnode,
+ * the volume and the volume's server record.
+ *
+ * (7) If ->cb_scrub is seen to have advanced, we invalidate the pagecache for
+ * the vnode.
+ */
+
+/*
+ * Check the validity of a vnode/inode and its parent volume.
+ */
+bool afs_check_validity(const struct afs_vnode *vnode)
+{
+ const struct afs_volume *volume = vnode->volume;
+ enum afs_vnode_invalid_trace trace = afs_vnode_valid_trace;
+ time64_t cb_expires_at = atomic64_read(&vnode->cb_expires_at);
+ time64_t deadline = ktime_get_real_seconds() + 10;
+
+ if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+ return true;
+
+ if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break))
+ trace = afs_vnode_invalid_trace_cb_v_break;
+ else if (cb_expires_at == AFS_NO_CB_PROMISE)
+ trace = afs_vnode_invalid_trace_no_cb_promise;
+ else if (cb_expires_at <= deadline)
+ trace = afs_vnode_invalid_trace_expired;
+ else if (volume->cb_expires_at <= deadline)
+ trace = afs_vnode_invalid_trace_vol_expired;
+ else if (vnode->cb_ro_snapshot != atomic_read(&volume->cb_ro_snapshot))
+ trace = afs_vnode_invalid_trace_cb_ro_snapshot;
+ else if (vnode->cb_scrub != atomic_read(&volume->cb_scrub))
+ trace = afs_vnode_invalid_trace_cb_scrub;
+ else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
+ trace = afs_vnode_invalid_trace_zap_data;
+ else
+ return true;
+ trace_afs_vnode_invalid(vnode, trace);
+ return false;
+}
+
+/*
+ * See if the server we've just talked to is currently excluded.
+ */
+static bool __afs_is_server_excluded(struct afs_operation *op, struct afs_volume *volume)
+{
+ const struct afs_server_entry *se;
+ const struct afs_server_list *slist;
+ bool is_excluded = true;
+ int i;
+
+ rcu_read_lock();
+
+ slist = rcu_dereference(volume->servers);
+ for (i = 0; i < slist->nr_servers; i++) {
+ se = &slist->servers[i];
+ if (op->server == se->server) {
+ is_excluded = test_bit(AFS_SE_EXCLUDED, &se->flags);
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+ return is_excluded;
+}
+
+/*
+ * Update the volume's server list when the creation time changes and see if
+ * the server we've just talked to is currently excluded.
+ */
+static int afs_is_server_excluded(struct afs_operation *op, struct afs_volume *volume)
+{
+ int ret;
+
+ if (__afs_is_server_excluded(op, volume))
+ return 1;
+
+ set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags);
+ ret = afs_check_volume_status(op->volume, op);
+ if (ret < 0)
+ return ret;
+
+ return __afs_is_server_excluded(op, volume);
+}
+
+/*
+ * Handle a change to the volume creation time in the VolSync record.
+ */
+static int afs_update_volume_creation_time(struct afs_operation *op, struct afs_volume *volume)
+{
+ unsigned int snap;
+ time64_t cur = volume->creation_time;
+ time64_t old = op->pre_volsync.creation;
+ time64_t new = op->volsync.creation;
+ int ret;
+
+ _enter("%llx,%llx,%llx->%llx", volume->vid, cur, old, new);
+
+ if (cur == TIME64_MIN) {
+ volume->creation_time = new;
+ return 0;
+ }
+
+ if (new == cur)
+ return 0;
+
+ /* Try to advance the creation timestamp from what we had before the
+ * operation to what we got back from the server. This should
+ * hopefully ensure that in a race between multiple operations only one
+ * of them will do this.
+ */
+ if (cur != old)
+ return 0;
+
+ /* If the creation time changes in an unexpected way, we need to scrub
+ * our caches. For a RW vol, this will only change if the volume is
+ * restored from a backup; for a RO/Backup vol, this will advance when
+ * the volume is updated to a new snapshot (eg. "vos release").
+ */
+ if (volume->type == AFSVL_RWVOL)
+ goto regressed;
+ if (volume->type == AFSVL_BACKVOL) {
+ if (new < old)
+ goto regressed;
+ goto advance;
+ }
+
+ /* We have an RO volume, we need to query the VL server and look at the
+ * server flags to see if RW->RO replication is in progress.
+ */
+ ret = afs_is_server_excluded(op, volume);
+ if (ret < 0)
+ return ret;
+ if (ret > 0) {
+ snap = atomic_read(&volume->cb_ro_snapshot);
+ trace_afs_cb_v_break(volume->vid, snap, afs_cb_break_volume_excluded);
+ return ret;
+ }
+
+advance:
+ snap = atomic_inc_return(&volume->cb_ro_snapshot);
+ trace_afs_cb_v_break(volume->vid, snap, afs_cb_break_for_vos_release);
+ volume->creation_time = new;
+ return 0;
+
+regressed:
+ atomic_inc(&volume->cb_scrub);
+ trace_afs_cb_v_break(volume->vid, 0, afs_cb_break_for_creation_regress);
+ volume->creation_time = new;
+ return 0;
+}
+
+/*
+ * Handle a change to the volume update time in the VolSync record.
+ */
+static void afs_update_volume_update_time(struct afs_operation *op, struct afs_volume *volume)
+{
+ enum afs_cb_break_reason reason = afs_cb_break_no_break;
+ time64_t cur = volume->update_time;
+ time64_t old = op->pre_volsync.update;
+ time64_t new = op->volsync.update;
+
+ _enter("%llx,%llx,%llx->%llx", volume->vid, cur, old, new);
+
+ if (cur == TIME64_MIN) {
+ volume->update_time = new;
+ return;
+ }
+
+ if (new == cur)
+ return;
+
+ /* If the volume update time changes in an unexpected way, we need to
+ * scrub our caches. For a RW vol, this will advance on every
+ * modification op; for a RO/Backup vol, this will advance when the
+ * volume is updated to a new snapshot (eg. "vos release").
+ */
+ if (new < old)
+ reason = afs_cb_break_for_update_regress;
+
+ /* Try to advance the update timestamp from what we had before the
+ * operation to what we got back from the server. This should
+ * hopefully ensure that in a race between multiple operations only one
+ * of them will do this.
+ */
+ if (cur == old) {
+ if (reason == afs_cb_break_for_update_regress) {
+ atomic_inc(&volume->cb_scrub);
+ trace_afs_cb_v_break(volume->vid, 0, reason);
+ }
+ volume->update_time = new;
+ }
+}
+
+static int afs_update_volume_times(struct afs_operation *op, struct afs_volume *volume)
+{
+ int ret = 0;
+
+ if (likely(op->volsync.creation == volume->creation_time &&
+ op->volsync.update == volume->update_time))
+ return 0;
+
+ mutex_lock(&volume->volsync_lock);
+ if (op->volsync.creation != volume->creation_time) {
+ ret = afs_update_volume_creation_time(op, volume);
+ if (ret < 0)
+ goto out;
+ }
+ if (op->volsync.update != volume->update_time)
+ afs_update_volume_update_time(op, volume);
+out:
+ mutex_unlock(&volume->volsync_lock);
+ return ret;
+}
+
+/*
+ * Update the state of a volume, including recording the expiration time of the
+ * callback promise. Returns 1 to redo the operation from the start.
+ */
+int afs_update_volume_state(struct afs_operation *op)
+{
+ struct afs_server_list *slist = op->server_list;
+ struct afs_server_entry *se = &slist->servers[op->server_index];
+ struct afs_callback *cb = &op->file[0].scb.callback;
+ struct afs_volume *volume = op->volume;
+ unsigned int cb_v_break = atomic_read(&volume->cb_v_break);
+ unsigned int cb_v_check = atomic_read(&volume->cb_v_check);
+ int ret;
+
+ _enter("%llx", op->volume->vid);
+
+ if (op->volsync.creation != TIME64_MIN || op->volsync.update != TIME64_MIN) {
+ ret = afs_update_volume_times(op, volume);
+ if (ret != 0) {
+ _leave(" = %d", ret);
+ return ret;
+ }
+ }
+
+ if (op->cb_v_break == cb_v_break &&
+ (op->file[0].scb.have_cb || op->file[1].scb.have_cb)) {
+ time64_t expires_at = cb->expires_at;
+
+ if (!op->file[0].scb.have_cb)
+ expires_at = op->file[1].scb.callback.expires_at;
+
+ se->cb_expires_at = expires_at;
+ volume->cb_expires_at = expires_at;
+ }
+ if (cb_v_check < op->cb_v_break)
+ atomic_cmpxchg(&volume->cb_v_check, cb_v_check, op->cb_v_break);
+ return 0;
+}
+
+/*
+ * mark the data attached to an inode as obsolete due to a write on the server
+ * - might also want to ditch all the outstanding writes and dirty pages
+ */
+static void afs_zap_data(struct afs_vnode *vnode)
+{
+ _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
+
+ afs_invalidate_cache(vnode, 0);
+
+ /* nuke all the non-dirty pages that aren't locked, mapped or being
+ * written back in a regular file and completely discard the pages in a
+ * directory or symlink */
+ if (S_ISREG(vnode->netfs.inode.i_mode))
+ filemap_invalidate_inode(&vnode->netfs.inode, true, 0, LLONG_MAX);
+ else
+ filemap_invalidate_inode(&vnode->netfs.inode, false, 0, LLONG_MAX);
+}
+
+/*
+ * validate a vnode/inode
+ * - there are several things we need to check
+ * - parent dir data changes (rm, rmdir, rename, mkdir, create, link,
+ * symlink)
+ * - parent dir metadata changed (security changes)
+ * - dentry data changed (write, truncate)
+ * - dentry metadata changed (security changes)
+ */
+int afs_validate(struct afs_vnode *vnode, struct key *key)
+{
+ struct afs_volume *volume = vnode->volume;
+ unsigned int cb_ro_snapshot, cb_scrub;
+ time64_t deadline = ktime_get_real_seconds() + 10;
+ bool zap = false, locked_vol = false;
+ int ret;
+
+ _enter("{v={%llx:%llu} fl=%lx},%x",
+ vnode->fid.vid, vnode->fid.vnode, vnode->flags,
+ key_serial(key));
+
+ if (afs_check_validity(vnode))
+ return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
+
+ ret = down_write_killable(&vnode->validate_lock);
+ if (ret < 0)
+ goto error;
+
+ if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
+ ret = -ESTALE;
+ goto error_unlock;
+ }
+
+ /* Validate a volume after the v_break has changed or the volume
+ * callback expired. We only want to do this once per volume per
+ * v_break change. The actual work will be done when parsing the
+ * status fetch reply.
+ */
+ if (volume->cb_expires_at <= deadline ||
+ atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break)) {
+ ret = mutex_lock_interruptible(&volume->cb_check_lock);
+ if (ret < 0)
+ goto error_unlock;
+ locked_vol = true;
+ }
+
+ cb_ro_snapshot = atomic_read(&volume->cb_ro_snapshot);
+ cb_scrub = atomic_read(&volume->cb_scrub);
+ if (vnode->cb_ro_snapshot != cb_ro_snapshot ||
+ vnode->cb_scrub != cb_scrub)
+ unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false);
+
+ if (vnode->cb_ro_snapshot != cb_ro_snapshot ||
+ vnode->cb_scrub != cb_scrub ||
+ volume->cb_expires_at <= deadline ||
+ atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) ||
+ atomic64_read(&vnode->cb_expires_at) <= deadline
+ ) {
+ ret = afs_fetch_status(vnode, key, false, NULL);
+ if (ret < 0) {
+ if (ret == -ENOENT) {
+ set_bit(AFS_VNODE_DELETED, &vnode->flags);
+ ret = -ESTALE;
+ }
+ goto error_unlock;
+ }
+
+ _debug("new promise [fl=%lx]", vnode->flags);
+ }
+
+ /* We can drop the volume lock now as. */
+ if (locked_vol) {
+ mutex_unlock(&volume->cb_check_lock);
+ locked_vol = false;
+ }
+
+ cb_ro_snapshot = atomic_read(&volume->cb_ro_snapshot);
+ cb_scrub = atomic_read(&volume->cb_scrub);
+ _debug("vnode inval %x==%x %x==%x",
+ vnode->cb_ro_snapshot, cb_ro_snapshot,
+ vnode->cb_scrub, cb_scrub);
+ if (vnode->cb_scrub != cb_scrub)
+ zap = true;
+ vnode->cb_ro_snapshot = cb_ro_snapshot;
+ vnode->cb_scrub = cb_scrub;
+
+ /* if the vnode's data version number changed then its contents are
+ * different */
+ zap |= test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
+ if (zap)
+ afs_zap_data(vnode);
+ up_write(&vnode->validate_lock);
+ _leave(" = 0");
+ return 0;
+
+error_unlock:
+ if (locked_vol)
+ mutex_unlock(&volume->cb_check_lock);
+ up_write(&vnode->validate_lock);
+error:
+ _leave(" = %d", ret);
+ return ret;
+}
diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
index f04a80e4f5c3..fc9676abd252 100644
--- a/fs/afs/vl_alias.c
+++ b/fs/afs/vl_alias.c
@@ -33,55 +33,6 @@ static struct afs_volume *afs_sample_volume(struct afs_cell *cell, struct key *k
}
/*
- * Compare two addresses.
- */
-static int afs_compare_addrs(const struct sockaddr_rxrpc *srx_a,
- const struct sockaddr_rxrpc *srx_b)
-{
- short port_a, port_b;
- int addr_a, addr_b, diff;
-
- diff = (short)srx_a->transport_type - (short)srx_b->transport_type;
- if (diff)
- goto out;
-
- switch (srx_a->transport_type) {
- case AF_INET: {
- const struct sockaddr_in *a = &srx_a->transport.sin;
- const struct sockaddr_in *b = &srx_b->transport.sin;
- addr_a = ntohl(a->sin_addr.s_addr);
- addr_b = ntohl(b->sin_addr.s_addr);
- diff = addr_a - addr_b;
- if (diff == 0) {
- port_a = ntohs(a->sin_port);
- port_b = ntohs(b->sin_port);
- diff = port_a - port_b;
- }
- break;
- }
-
- case AF_INET6: {
- const struct sockaddr_in6 *a = &srx_a->transport.sin6;
- const struct sockaddr_in6 *b = &srx_b->transport.sin6;
- diff = memcmp(&a->sin6_addr, &b->sin6_addr, 16);
- if (diff == 0) {
- port_a = ntohs(a->sin6_port);
- port_b = ntohs(b->sin6_port);
- diff = port_a - port_b;
- }
- break;
- }
-
- default:
- WARN_ON(1);
- diff = 1;
- }
-
-out:
- return diff;
-}
-
-/*
* Compare the address lists of a pair of fileservers.
*/
static int afs_compare_fs_alists(const struct afs_server *server_a,
@@ -90,13 +41,13 @@ static int afs_compare_fs_alists(const struct afs_server *server_a,
const struct afs_addr_list *la, *lb;
int a = 0, b = 0, addr_matches = 0;
- la = rcu_dereference(server_a->addresses);
- lb = rcu_dereference(server_b->addresses);
+ la = rcu_dereference(server_a->endpoint_state)->addresses;
+ lb = rcu_dereference(server_b->endpoint_state)->addresses;
while (a < la->nr_addrs && b < lb->nr_addrs) {
- const struct sockaddr_rxrpc *srx_a = &la->addrs[a];
- const struct sockaddr_rxrpc *srx_b = &lb->addrs[b];
- int diff = afs_compare_addrs(srx_a, srx_b);
+ unsigned long pa = (unsigned long)la->addrs[a].peer;
+ unsigned long pb = (unsigned long)lb->addrs[b].peer;
+ long diff = pa - pb;
if (diff < 0) {
a++;
@@ -126,7 +77,7 @@ static int afs_compare_volume_slists(const struct afs_volume *vol_a,
lb = rcu_dereference(vol_b->servers);
for (i = 0; i < AFS_MAXTYPES; i++)
- if (la->vids[i] != lb->vids[i])
+ if (vol_a->vids[i] != vol_b->vids[i])
return 0;
while (a < la->nr_servers && b < lb->nr_servers) {
@@ -205,7 +156,7 @@ static int afs_query_for_alias_one(struct afs_cell *cell, struct key *key,
/* And see if it's in the new cell. */
volume = afs_sample_volume(cell, key, pvol->name, pvol->name_len);
if (IS_ERR(volume)) {
- afs_put_volume(cell->net, pvol, afs_volume_trace_put_query_alias);
+ afs_put_volume(pvol, afs_volume_trace_put_query_alias);
if (PTR_ERR(volume) != -ENOMEDIUM)
return PTR_ERR(volume);
/* That volume is not in the new cell, so not an alias */
@@ -223,8 +174,8 @@ static int afs_query_for_alias_one(struct afs_cell *cell, struct key *key,
rcu_read_unlock();
}
- afs_put_volume(cell->net, volume, afs_volume_trace_put_query_alias);
- afs_put_volume(cell->net, pvol, afs_volume_trace_put_query_alias);
+ afs_put_volume(volume, afs_volume_trace_put_query_alias);
+ afs_put_volume(pvol, afs_volume_trace_put_query_alias);
return ret;
}
@@ -254,11 +205,11 @@ static int afs_query_for_alias(struct afs_cell *cell, struct key *key)
goto is_alias;
if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) {
- afs_unuse_cell(cell->net, p, afs_cell_trace_unuse_check_alias);
+ afs_unuse_cell(p, afs_cell_trace_unuse_check_alias);
return -ERESTARTSYS;
}
- afs_unuse_cell(cell->net, p, afs_cell_trace_unuse_check_alias);
+ afs_unuse_cell(p, afs_cell_trace_unuse_check_alias);
}
mutex_unlock(&cell->net->proc_cells_lock);
@@ -285,7 +236,7 @@ static char *afs_vl_get_cell_name(struct afs_cell *cell, struct key *key)
while (afs_select_vlserver(&vc)) {
if (!test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags)) {
- vc.ac.error = -EOPNOTSUPP;
+ vc.call_error = -EOPNOTSUPP;
skipped = true;
continue;
}
@@ -302,6 +253,7 @@ static char *afs_vl_get_cell_name(struct afs_cell *cell, struct key *key)
static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
{
struct afs_cell *master;
+ size_t name_len;
char *cell_name;
cell_name = afs_vl_get_cell_name(cell, key);
@@ -313,8 +265,13 @@ static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
return 0;
}
- master = afs_lookup_cell(cell->net, cell_name, strlen(cell_name),
- NULL, false);
+ name_len = strlen(cell_name);
+ if (!name_len || name_len > AFS_MAXCELLNAME)
+ master = ERR_PTR(-EOPNOTSUPP);
+ else
+ master = afs_lookup_cell(cell->net, cell_name, name_len, NULL,
+ AFS_LOOKUP_CELL_ALIAS_CHECK,
+ afs_cell_trace_use_lookup_canonical);
kfree(cell_name);
if (IS_ERR(master))
return PTR_ERR(master);
diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c
index 38b2ba1d9ec0..9b1c20daac53 100644
--- a/fs/afs/vl_list.c
+++ b/fs/afs/vl_list.c
@@ -13,16 +13,19 @@ struct afs_vlserver *afs_alloc_vlserver(const char *name, size_t name_len,
unsigned short port)
{
struct afs_vlserver *vlserver;
+ static atomic_t debug_ids;
vlserver = kzalloc(struct_size(vlserver, name, name_len + 1),
GFP_KERNEL);
if (vlserver) {
- atomic_set(&vlserver->usage, 1);
+ refcount_set(&vlserver->ref, 1);
rwlock_init(&vlserver->lock);
init_waitqueue_head(&vlserver->probe_wq);
spin_lock_init(&vlserver->probe_lock);
+ vlserver->debug_id = atomic_inc_return(&debug_ids);
vlserver->rtt = UINT_MAX;
vlserver->name_len = name_len;
+ vlserver->service_id = VL_SERVICE;
vlserver->port = port;
memcpy(vlserver->name, name, name_len);
}
@@ -33,19 +36,16 @@ static void afs_vlserver_rcu(struct rcu_head *rcu)
{
struct afs_vlserver *vlserver = container_of(rcu, struct afs_vlserver, rcu);
- afs_put_addrlist(rcu_access_pointer(vlserver->addresses));
+ afs_put_addrlist(rcu_access_pointer(vlserver->addresses),
+ afs_alist_trace_put_vlserver);
kfree_rcu(vlserver, rcu);
}
void afs_put_vlserver(struct afs_net *net, struct afs_vlserver *vlserver)
{
- if (vlserver) {
- unsigned int u = atomic_dec_return(&vlserver->usage);
- //_debug("VL PUT %p{%u}", vlserver, u);
-
- if (u == 0)
- call_rcu(&vlserver->rcu, afs_vlserver_rcu);
- }
+ if (vlserver &&
+ refcount_dec_and_test(&vlserver->ref))
+ call_rcu(&vlserver->rcu, afs_vlserver_rcu);
}
struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int nr_servers)
@@ -54,7 +54,7 @@ struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int nr_servers)
vllist = kzalloc(struct_size(vllist, servers, nr_servers), GFP_KERNEL);
if (vllist) {
- atomic_set(&vllist->usage, 1);
+ refcount_set(&vllist->ref, 1);
rwlock_init(&vllist->lock);
}
@@ -64,10 +64,7 @@ struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int nr_servers)
void afs_put_vlserverlist(struct afs_net *net, struct afs_vlserver_list *vllist)
{
if (vllist) {
- unsigned int u = atomic_dec_return(&vllist->usage);
-
- //_debug("VLLS PUT %p{%u}", vllist, u);
- if (u == 0) {
+ if (refcount_dec_and_test(&vllist->ref)) {
int i;
for (i = 0; i < vllist->nr_servers; i++) {
@@ -90,14 +87,15 @@ static u16 afs_extract_le16(const u8 **_b)
/*
* Build a VL server address list from a DNS queried server list.
*/
-static struct afs_addr_list *afs_extract_vl_addrs(const u8 **_b, const u8 *end,
+static struct afs_addr_list *afs_extract_vl_addrs(struct afs_net *net,
+ const u8 **_b, const u8 *end,
u8 nr_addrs, u16 port)
{
struct afs_addr_list *alist;
const u8 *b = *_b;
int ret = -EINVAL;
- alist = afs_alloc_addrlist(nr_addrs, VL_SERVICE, port);
+ alist = afs_alloc_addrlist(nr_addrs);
if (!alist)
return ERR_PTR(-ENOMEM);
if (nr_addrs == 0)
@@ -116,7 +114,9 @@ static struct afs_addr_list *afs_extract_vl_addrs(const u8 **_b, const u8 *end,
goto error;
}
memcpy(x, b, 4);
- afs_merge_fs_addr4(alist, x[0], port);
+ ret = afs_merge_fs_addr4(net, alist, x[0], port);
+ if (ret < 0)
+ goto error;
b += 4;
break;
@@ -126,7 +126,9 @@ static struct afs_addr_list *afs_extract_vl_addrs(const u8 **_b, const u8 *end,
goto error;
}
memcpy(x, b, 16);
- afs_merge_fs_addr6(alist, x, port);
+ ret = afs_merge_fs_addr6(net, alist, x, port);
+ if (ret < 0)
+ goto error;
b += 16;
break;
@@ -147,7 +149,7 @@ static struct afs_addr_list *afs_extract_vl_addrs(const u8 **_b, const u8 *end,
error:
*_b = b;
- afs_put_addrlist(alist);
+ afs_put_addrlist(alist, afs_alist_trace_put_parse_error);
return ERR_PTR(ret);
}
@@ -254,7 +256,7 @@ struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell,
/* Extract the addresses - note that we can't skip this as we
* have to advance the payload pointer.
*/
- addrs = afs_extract_vl_addrs(&b, end, bs.nr_addrs, bs.port);
+ addrs = afs_extract_vl_addrs(cell->net, &b, end, bs.nr_addrs, bs.port);
if (IS_ERR(addrs)) {
ret = PTR_ERR(addrs);
goto error_2;
@@ -262,7 +264,7 @@ struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell,
if (vllist->nr_servers >= nr_servers) {
_debug("skip %u >= %u", vllist->nr_servers, nr_servers);
- afs_put_addrlist(addrs);
+ afs_put_addrlist(addrs, afs_alist_trace_put_parse_empty);
afs_put_vlserver(cell->net, server);
continue;
}
@@ -271,7 +273,7 @@ struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell,
addrs->status = bs.status;
if (addrs->nr_addrs == 0) {
- afs_put_addrlist(addrs);
+ afs_put_addrlist(addrs, afs_alist_trace_put_parse_empty);
if (!rcu_access_pointer(server->addresses)) {
afs_put_vlserver(cell->net, server);
continue;
@@ -283,7 +285,7 @@ struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell,
old = rcu_replace_pointer(server->addresses, old,
lockdep_is_held(&server->lock));
write_unlock(&server->lock);
- afs_put_addrlist(old);
+ afs_put_addrlist(old, afs_alist_trace_put_vlserver_old);
}
diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
index d1c7068b4346..3d2e0c925460 100644
--- a/fs/afs/vl_probe.c
+++ b/fs/afs/vl_probe.c
@@ -46,11 +46,12 @@ static void afs_done_one_vl_probe(struct afs_vlserver *server, bool wake_up)
*/
void afs_vlserver_probe_result(struct afs_call *call)
{
- struct afs_addr_list *alist = call->alist;
+ struct afs_addr_list *alist = call->vl_probe;
struct afs_vlserver *server = call->vlserver;
+ struct afs_address *addr = &alist->addrs[call->probe_index];
unsigned int server_index = call->server_index;
unsigned int rtt_us = 0;
- unsigned int index = call->addr_ix;
+ unsigned int index = call->probe_index;
bool have_result = false;
int ret = call->error;
@@ -89,7 +90,7 @@ void afs_vlserver_probe_result(struct afs_call *call)
case -ETIME:
default:
clear_bit(index, &alist->responded);
- set_bit(index, &alist->failed);
+ set_bit(index, &alist->probe_failed);
if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) &&
(server->probe.error == 0 ||
server->probe.error == -ETIMEDOUT ||
@@ -101,22 +102,22 @@ void afs_vlserver_probe_result(struct afs_call *call)
responded:
set_bit(index, &alist->responded);
- clear_bit(index, &alist->failed);
+ clear_bit(index, &alist->probe_failed);
if (call->service_id == YFS_VL_SERVICE) {
server->probe.flags |= AFS_VLSERVER_PROBE_IS_YFS;
set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
- alist->addrs[index].srx_service = call->service_id;
+ server->service_id = call->service_id;
} else {
server->probe.flags |= AFS_VLSERVER_PROBE_NOT_YFS;
if (!(server->probe.flags & AFS_VLSERVER_PROBE_IS_YFS)) {
clear_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
- alist->addrs[index].srx_service = call->service_id;
+ server->service_id = call->service_id;
}
}
- if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
- rtt_us < server->probe.rtt) {
+ rtt_us = rxrpc_kernel_get_srtt(addr->peer);
+ if (rtt_us < server->probe.rtt) {
server->probe.rtt = rtt_us;
server->rtt = rtt_us;
alist->preferred = index;
@@ -130,8 +131,10 @@ responded:
out:
spin_unlock(&server->probe_lock);
- _debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
- server_index, index, &alist->addrs[index].transport, rtt_us, ret);
+ trace_afs_vl_probe(server, false, alist, index, call->error, call->abort_code, rtt_us);
+ _debug("probe [%u][%u] %pISpc rtt=%d ret=%d",
+ server_index, index, rxrpc_kernel_remote_addr(addr->peer),
+ rtt_us, ret);
afs_done_one_vl_probe(server, have_result);
}
@@ -146,35 +149,52 @@ static bool afs_do_probe_vlserver(struct afs_net *net,
unsigned int server_index,
struct afs_error *_e)
{
- struct afs_addr_cursor ac = {
- .index = 0,
- };
+ struct afs_addr_list *alist;
struct afs_call *call;
+ unsigned long unprobed;
+ unsigned int index, i;
bool in_progress = false;
+ int best_prio;
_enter("%s", server->name);
read_lock(&server->lock);
- ac.alist = rcu_dereference_protected(server->addresses,
- lockdep_is_held(&server->lock));
+ alist = rcu_dereference_protected(server->addresses,
+ lockdep_is_held(&server->lock));
+ afs_get_addrlist(alist, afs_alist_trace_get_vlprobe);
read_unlock(&server->lock);
- atomic_set(&server->probe_outstanding, ac.alist->nr_addrs);
+ atomic_set(&server->probe_outstanding, alist->nr_addrs);
memset(&server->probe, 0, sizeof(server->probe));
server->probe.rtt = UINT_MAX;
- for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
- call = afs_vl_get_capabilities(net, &ac, key, server,
+ unprobed = (1UL << alist->nr_addrs) - 1;
+ while (unprobed) {
+ best_prio = -1;
+ index = 0;
+ for (i = 0; i < alist->nr_addrs; i++) {
+ if (test_bit(i, &unprobed) &&
+ alist->addrs[i].prio > best_prio) {
+ index = i;
+ best_prio = alist->addrs[i].prio;
+ }
+ }
+ __clear_bit(index, &unprobed);
+
+ trace_afs_vl_probe(server, true, alist, index, 0, 0, 0);
+ call = afs_vl_get_capabilities(net, alist, index, key, server,
server_index);
if (!IS_ERR(call)) {
+ afs_prioritise_error(_e, call->error, call->abort_code);
afs_put_call(call);
in_progress = true;
} else {
- afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code);
+ afs_prioritise_error(_e, PTR_ERR(call), 0);
afs_done_one_vl_probe(server, false);
}
}
+ afs_put_addrlist(alist, afs_alist_trace_put_vlprobe);
return in_progress;
}
@@ -185,12 +205,10 @@ int afs_send_vl_probes(struct afs_net *net, struct key *key,
struct afs_vlserver_list *vllist)
{
struct afs_vlserver *server;
- struct afs_error e;
+ struct afs_error e = {};
bool in_progress = false;
int i;
- e.error = 0;
- e.responded = false;
for (i = 0; i < vllist->nr_servers; i++) {
server = vllist->servers[i].server;
if (test_bit(AFS_VLSERVER_FL_PROBED, &server->flags))
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
index 488e58490b16..6ad9688d8f4b 100644
--- a/fs/afs/vl_rotate.c
+++ b/fs/afs/vl_rotate.c
@@ -17,18 +17,21 @@
bool afs_begin_vlserver_operation(struct afs_vl_cursor *vc, struct afs_cell *cell,
struct key *key)
{
+ static atomic_t debug_ids;
+
memset(vc, 0, sizeof(*vc));
vc->cell = cell;
vc->key = key;
- vc->error = -EDESTADDRREQ;
- vc->ac.error = SHRT_MAX;
+ vc->cumul_error.error = -EDESTADDRREQ;
+ vc->nr_iterations = -1;
if (signal_pending(current)) {
- vc->error = -EINTR;
+ vc->cumul_error.error = -EINTR;
vc->flags |= AFS_VL_CURSOR_STOP;
return false;
}
+ vc->debug_id = atomic_inc_return(&debug_ids);
return true;
}
@@ -45,21 +48,27 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
cell->dns_expiry <= ktime_get_real_seconds()) {
dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count);
set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
- afs_queue_cell(cell, afs_cell_trace_get_queue_dns);
+ afs_queue_cell(cell, afs_cell_trace_queue_dns);
if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
if (wait_var_event_interruptible(
&cell->dns_lookup_count,
smp_load_acquire(&cell->dns_lookup_count)
!= dns_lookup_count) < 0) {
- vc->error = -ERESTARTSYS;
+ vc->cumul_error.error = -ERESTARTSYS;
return false;
}
}
/* Status load is ordered after lookup counter load */
+ if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) {
+ pr_warn("No record of cell %s\n", cell->name);
+ vc->cumul_error.error = -ENOENT;
+ return false;
+ }
+
if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
- vc->error = -EDESTADDRREQ;
+ vc->cumul_error.error = -EDESTADDRREQ;
return false;
}
}
@@ -72,8 +81,8 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
if (!vc->server_list->nr_servers)
return false;
- vc->untried = (1UL << vc->server_list->nr_servers) - 1;
- vc->index = -1;
+ vc->untried_servers = (1UL << vc->server_list->nr_servers) - 1;
+ vc->server_index = -1;
return true;
}
@@ -83,54 +92,57 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
*/
bool afs_select_vlserver(struct afs_vl_cursor *vc)
{
- struct afs_addr_list *alist;
+ struct afs_addr_list *alist = vc->alist;
struct afs_vlserver *vlserver;
- struct afs_error e;
- u32 rtt;
- int error = vc->ac.error, i;
+ unsigned long set, failed;
+ unsigned int rtt;
+ s32 abort_code = vc->call_abort_code;
+ int error = vc->call_error, i;
- _enter("%lx[%d],%lx[%d],%d,%d",
- vc->untried, vc->index,
- vc->ac.tried, vc->ac.index,
- error, vc->ac.abort_code);
+ vc->nr_iterations++;
+
+ _enter("VC=%x+%x,%d{%lx},%d{%lx},%d,%d",
+ vc->debug_id, vc->nr_iterations, vc->server_index, vc->untried_servers,
+ vc->addr_index, vc->addr_tried,
+ error, abort_code);
if (vc->flags & AFS_VL_CURSOR_STOP) {
_leave(" = f [stopped]");
return false;
}
- vc->nr_iterations++;
+ if (vc->nr_iterations == 0)
+ goto start;
+
+ WRITE_ONCE(alist->addrs[vc->addr_index].last_error, error);
/* Evaluate the result of the previous operation, if there was one. */
switch (error) {
- case SHRT_MAX:
- goto start;
-
default:
case 0:
/* Success or local failure. Stop. */
- vc->error = error;
+ vc->cumul_error.error = error;
vc->flags |= AFS_VL_CURSOR_STOP;
- _leave(" = f [okay/local %d]", vc->ac.error);
+ _leave(" = f [okay/local %d]", vc->cumul_error.error);
return false;
case -ECONNABORTED:
/* The far side rejected the operation on some grounds. This
* might involve the server being busy or the volume having been moved.
*/
- switch (vc->ac.abort_code) {
+ switch (abort_code) {
case AFSVL_IO:
case AFSVL_BADVOLOPER:
case AFSVL_NOMEM:
/* The server went weird. */
- vc->error = -EREMOTEIO;
+ afs_prioritise_error(&vc->cumul_error, -EREMOTEIO, abort_code);
//write_lock(&vc->cell->vl_servers_lock);
- //vc->server_list->weird_mask |= 1 << vc->index;
+ //vc->server_list->weird_mask |= 1 << vc->server_index;
//write_unlock(&vc->cell->vl_servers_lock);
goto next_server;
default:
- vc->error = afs_abort_to_error(vc->ac.abort_code);
+ afs_prioritise_error(&vc->cumul_error, error, abort_code);
goto failed;
}
@@ -143,12 +155,12 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
case -ETIMEDOUT:
case -ETIME:
_debug("no conn %d", error);
- vc->error = error;
+ afs_prioritise_error(&vc->cumul_error, error, 0);
goto iterate_address;
case -ECONNRESET:
_debug("call reset");
- vc->error = error;
+ afs_prioritise_error(&vc->cumul_error, error, 0);
vc->flags |= AFS_VL_CURSOR_RETRY;
goto next_server;
@@ -159,7 +171,13 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
restart_from_beginning:
_debug("restart");
- afs_end_cursor(&vc->ac);
+ if (vc->call_responded &&
+ vc->addr_index != vc->alist->preferred &&
+ test_bit(alist->preferred, &vc->addr_tried))
+ WRITE_ONCE(alist->preferred, vc->addr_index);
+ afs_put_addrlist(alist, afs_alist_trace_put_vlrotate_restart);
+ alist = vc->alist = NULL;
+
afs_put_vlserverlist(vc->cell->net, vc->server_list);
vc->server_list = NULL;
if (vc->flags & AFS_VL_CURSOR_RETRIED)
@@ -167,53 +185,58 @@ restart_from_beginning:
vc->flags |= AFS_VL_CURSOR_RETRIED;
start:
_debug("start");
+ ASSERTCMP(alist, ==, NULL);
if (!afs_start_vl_iteration(vc))
goto failed;
error = afs_send_vl_probes(vc->cell->net, vc->key, vc->server_list);
- if (error < 0)
- goto failed_set_error;
+ if (error < 0) {
+ afs_prioritise_error(&vc->cumul_error, error, 0);
+ goto failed;
+ }
pick_server:
- _debug("pick [%lx]", vc->untried);
+ _debug("pick [%lx]", vc->untried_servers);
+ ASSERTCMP(alist, ==, NULL);
- error = afs_wait_for_vl_probes(vc->server_list, vc->untried);
- if (error < 0)
- goto failed_set_error;
+ error = afs_wait_for_vl_probes(vc->server_list, vc->untried_servers);
+ if (error < 0) {
+ afs_prioritise_error(&vc->cumul_error, error, 0);
+ goto failed;
+ }
/* Pick the untried server with the lowest RTT. */
- vc->index = vc->server_list->preferred;
- if (test_bit(vc->index, &vc->untried))
+ vc->server_index = vc->server_list->preferred;
+ if (test_bit(vc->server_index, &vc->untried_servers))
goto selected_server;
- vc->index = -1;
- rtt = U32_MAX;
+ vc->server_index = -1;
+ rtt = UINT_MAX;
for (i = 0; i < vc->server_list->nr_servers; i++) {
struct afs_vlserver *s = vc->server_list->servers[i].server;
- if (!test_bit(i, &vc->untried) ||
+ if (!test_bit(i, &vc->untried_servers) ||
!test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags))
continue;
- if (s->probe.rtt < rtt) {
- vc->index = i;
+ if (s->probe.rtt <= rtt) {
+ vc->server_index = i;
rtt = s->probe.rtt;
}
}
- if (vc->index == -1)
+ if (vc->server_index == -1)
goto no_more_servers;
selected_server:
- _debug("use %d", vc->index);
- __clear_bit(vc->index, &vc->untried);
+ _debug("use %d", vc->server_index);
+ __clear_bit(vc->server_index, &vc->untried_servers);
/* We're starting on a different vlserver from the list. We need to
* check it, find its address list and probe its capabilities before we
* use it.
*/
- ASSERTCMP(vc->ac.alist, ==, NULL);
- vlserver = vc->server_list->servers[vc->index].server;
+ vlserver = vc->server_list->servers[vc->server_index].server;
vc->server = vlserver;
_debug("USING VLSERVER: %s", vlserver->name);
@@ -221,34 +244,48 @@ selected_server:
read_lock(&vlserver->lock);
alist = rcu_dereference_protected(vlserver->addresses,
lockdep_is_held(&vlserver->lock));
- afs_get_addrlist(alist);
+ vc->alist = afs_get_addrlist(alist, afs_alist_trace_get_vlrotate_set);
read_unlock(&vlserver->lock);
- memset(&vc->ac, 0, sizeof(vc->ac));
-
- if (!vc->ac.alist)
- vc->ac.alist = alist;
- else
- afs_put_addrlist(alist);
-
- vc->ac.index = -1;
+ vc->addr_tried = 0;
+ vc->addr_index = -1;
iterate_address:
- ASSERT(vc->ac.alist);
/* Iterate over the current server's address list to try and find an
* address on which it will respond to us.
*/
- if (!afs_iterate_addresses(&vc->ac))
+ set = READ_ONCE(alist->responded);
+ failed = READ_ONCE(alist->probe_failed);
+ vc->addr_index = READ_ONCE(alist->preferred);
+
+ _debug("%lx-%lx-%lx,%d", set, failed, vc->addr_tried, vc->addr_index);
+
+ set &= ~(failed | vc->addr_tried);
+
+ if (!set)
goto next_server;
- _debug("VL address %d/%d", vc->ac.index, vc->ac.alist->nr_addrs);
+ if (!test_bit(vc->addr_index, &set))
+ vc->addr_index = __ffs(set);
+
+ set_bit(vc->addr_index, &vc->addr_tried);
+ vc->alist = alist;
- _leave(" = t %pISpc", &vc->ac.alist->addrs[vc->ac.index].transport);
+ _debug("VL address %d/%d", vc->addr_index, alist->nr_addrs);
+
+ vc->call_responded = false;
+ _leave(" = t %pISpc", rxrpc_kernel_remote_addr(alist->addrs[vc->addr_index].peer));
return true;
next_server:
_debug("next");
- afs_end_cursor(&vc->ac);
+ ASSERT(alist);
+ if (vc->call_responded &&
+ vc->addr_index != alist->preferred &&
+ test_bit(alist->preferred, &vc->addr_tried))
+ WRITE_ONCE(alist->preferred, vc->addr_index);
+ afs_put_addrlist(alist, afs_alist_trace_put_vlrotate_next);
+ alist = vc->alist = NULL;
goto pick_server;
no_more_servers:
@@ -258,25 +295,26 @@ no_more_servers:
if (vc->flags & AFS_VL_CURSOR_RETRY)
goto restart_from_beginning;
- e.error = -EDESTADDRREQ;
- e.responded = false;
for (i = 0; i < vc->server_list->nr_servers; i++) {
struct afs_vlserver *s = vc->server_list->servers[i].server;
if (test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags))
- e.responded = true;
- afs_prioritise_error(&e, READ_ONCE(s->probe.error),
+ vc->cumul_error.responded = true;
+ afs_prioritise_error(&vc->cumul_error, READ_ONCE(s->probe.error),
s->probe.abort_code);
}
- error = e.error;
-
-failed_set_error:
- vc->error = error;
failed:
+ if (alist) {
+ if (vc->call_responded &&
+ vc->addr_index != alist->preferred &&
+ test_bit(alist->preferred, &vc->addr_tried))
+ WRITE_ONCE(alist->preferred, vc->addr_index);
+ afs_put_addrlist(alist, afs_alist_trace_put_vlrotate_fail);
+ alist = vc->alist = NULL;
+ }
vc->flags |= AFS_VL_CURSOR_STOP;
- afs_end_cursor(&vc->ac);
- _leave(" = f [failed %d]", vc->error);
+ _leave(" = f [failed %d]", vc->cumul_error.error);
return false;
}
@@ -285,6 +323,7 @@ failed:
*/
static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
{
+ struct afs_cell *cell = vc->cell;
static int count;
int i;
@@ -294,8 +333,14 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
rcu_read_lock();
pr_notice("EDESTADDR occurred\n");
+ pr_notice("CELL: %s err=%d\n", cell->name, cell->error);
+ pr_notice("DNS: src=%u st=%u lc=%x\n",
+ cell->dns_source, cell->dns_status, cell->dns_lookup_count);
pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
- vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);
+ vc->untried_servers, vc->server_index, vc->nr_iterations,
+ vc->flags, vc->cumul_error.error);
+ pr_notice("VC: call er=%d ac=%d r=%u\n",
+ vc->call_error, vc->call_abort_code, vc->call_responded);
if (vc->server_list) {
const struct afs_vlserver_list *sl = vc->server_list;
@@ -312,16 +357,14 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
a->nr_ipv4, a->nr_addrs, a->max_addrs,
a->preferred);
pr_notice("VC: - R=%lx F=%lx\n",
- a->responded, a->failed);
- if (a == vc->ac.alist)
+ a->responded, a->probe_failed);
+ if (a == vc->alist)
pr_notice("VC: - current\n");
}
}
}
- pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n",
- vc->ac.tried, vc->ac.index, vc->ac.abort_code, vc->ac.error,
- vc->ac.responded, vc->ac.nr_iterations);
+ pr_notice("AC: t=%lx ax=%u\n", vc->addr_tried, vc->addr_index);
rcu_read_unlock();
}
@@ -332,17 +375,25 @@ int afs_end_vlserver_operation(struct afs_vl_cursor *vc)
{
struct afs_net *net = vc->cell->net;
- if (vc->error == -EDESTADDRREQ ||
- vc->error == -EADDRNOTAVAIL ||
- vc->error == -ENETUNREACH ||
- vc->error == -EHOSTUNREACH)
+ _enter("VC=%x+%x", vc->debug_id, vc->nr_iterations);
+
+ switch (vc->cumul_error.error) {
+ case -EDESTADDRREQ:
+ case -EADDRNOTAVAIL:
+ case -ENETUNREACH:
+ case -EHOSTUNREACH:
afs_vl_dump_edestaddrreq(vc);
+ break;
+ }
- afs_end_cursor(&vc->ac);
+ if (vc->alist) {
+ if (vc->call_responded &&
+ vc->addr_index != vc->alist->preferred &&
+ test_bit(vc->alist->preferred, &vc->addr_tried))
+ WRITE_ONCE(vc->alist->preferred, vc->addr_index);
+ afs_put_addrlist(vc->alist, afs_alist_trace_put_vlrotate_end);
+ vc->alist = NULL;
+ }
afs_put_vlserverlist(net, vc->server_list);
-
- if (vc->error == -ECONNABORTED)
- vc->error = afs_abort_to_error(vc->ac.abort_code);
-
- return vc->error;
+ return vc->cumul_error.error;
}
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index 00fca3c66ba6..3a23c0b08eb6 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -18,8 +18,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
{
struct afs_uvldbentry__xdr *uvldb;
struct afs_vldb_entry *entry;
- bool new_only = false;
- u32 tmp, nr_servers, vlflags;
+ u32 nr_servers, vlflags;
int i, ret;
_enter("");
@@ -41,27 +40,14 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
entry->name[i] = 0;
entry->name_len = strlen(entry->name);
- /* If there is a new replication site that we can use, ignore all the
- * sites that aren't marked as new.
- */
- for (i = 0; i < nr_servers; i++) {
- tmp = ntohl(uvldb->serverFlags[i]);
- if (!(tmp & AFS_VLSF_DONTUSE) &&
- (tmp & AFS_VLSF_NEWREPSITE))
- new_only = true;
- }
-
vlflags = ntohl(uvldb->flags);
for (i = 0; i < nr_servers; i++) {
struct afs_uuid__xdr *xdr;
struct afs_uuid *uuid;
+ u32 tmp = ntohl(uvldb->serverFlags[i]);
int j;
int n = entry->nr_servers;
- tmp = ntohl(uvldb->serverFlags[i]);
- if (tmp & AFS_VLSF_DONTUSE ||
- (new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
- continue;
if (tmp & AFS_VLSF_RWVOL) {
entry->fs_mask[n] |= AFS_VOL_VTM_RW;
if (vlflags & AFS_VLF_BACKEXISTS)
@@ -82,6 +68,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
for (j = 0; j < 6; j++)
uuid->node[j] = (u8)ntohl(xdr->node[j]);
+ entry->vlsf_flags[n] = tmp;
entry->addr_version[n] = ntohl(uvldb->serverUnique[i]);
entry->nr_servers++;
}
@@ -106,12 +93,6 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
return 0;
}
-static void afs_destroy_vl_get_entry_by_name_u(struct afs_call *call)
-{
- kfree(call->ret_vldb);
- afs_flat_call_destructor(call);
-}
-
/*
* VL.GetEntryByNameU operation type.
*/
@@ -119,7 +100,7 @@ static const struct afs_call_type afs_RXVLGetEntryByNameU = {
.name = "VL.GetEntryByNameU",
.op = afs_VL_GetEntryByNameU,
.deliver = afs_deliver_vl_get_entry_by_name_u,
- .destructor = afs_destroy_vl_get_entry_by_name_u,
+ .destructor = afs_flat_call_destructor,
};
/*
@@ -155,6 +136,8 @@ struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_vl_cursor *vc,
call->key = vc->key;
call->ret_vldb = entry;
call->max_lifespan = AFS_VL_MAX_LIFESPAN;
+ call->peer = rxrpc_kernel_get_peer(vc->alist->addrs[vc->addr_index].peer);
+ call->service_id = vc->server->service_id;
/* Marshall the parameters */
bp = call->request;
@@ -165,8 +148,17 @@ struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_vl_cursor *vc,
memset((void *)bp + volnamesz, 0, padsz);
trace_afs_make_vl_call(call);
- afs_make_call(&vc->ac, call, GFP_KERNEL);
- return (struct afs_vldb_entry *)afs_wait_for_call_to_complete(call, &vc->ac);
+ afs_make_call(call, GFP_KERNEL);
+ afs_wait_for_call_to_complete(call);
+ vc->call_abort_code = call->abort_code;
+ vc->call_error = call->error;
+ vc->call_responded = call->responded;
+ afs_put_call(call);
+ if (vc->call_error) {
+ kfree(entry);
+ return ERR_PTR(vc->call_error);
+ }
+ return entry;
}
/*
@@ -208,7 +200,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
count = ntohl(*bp);
nentries = min(nentries, count);
- alist = afs_alloc_addrlist(nentries, FS_SERVICE, AFS_FS_PORT);
+ alist = afs_alloc_addrlist(nentries);
if (!alist)
return -ENOMEM;
alist->version = uniquifier;
@@ -230,9 +222,13 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
alist = call->ret_alist;
bp = call->buffer;
count = min(call->count, 4U);
- for (i = 0; i < count; i++)
- if (alist->nr_addrs < call->count2)
- afs_merge_fs_addr4(alist, *bp++, AFS_FS_PORT);
+ for (i = 0; i < count; i++) {
+ if (alist->nr_addrs < call->count2) {
+ ret = afs_merge_fs_addr4(call->net, alist, *bp++, AFS_FS_PORT);
+ if (ret < 0)
+ return ret;
+ }
+ }
call->count -= count;
if (call->count > 0)
@@ -245,12 +241,6 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
return 0;
}
-static void afs_vl_get_addrs_u_destructor(struct afs_call *call)
-{
- afs_put_addrlist(call->ret_alist);
- return afs_flat_call_destructor(call);
-}
-
/*
* VL.GetAddrsU operation type.
*/
@@ -258,7 +248,7 @@ static const struct afs_call_type afs_RXVLGetAddrsU = {
.name = "VL.GetAddrsU",
.op = afs_VL_GetAddrsU,
.deliver = afs_deliver_vl_get_addrs_u,
- .destructor = afs_vl_get_addrs_u_destructor,
+ .destructor = afs_flat_call_destructor,
};
/*
@@ -269,6 +259,7 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *vc,
const uuid_t *uuid)
{
struct afs_ListAddrByAttributes__xdr *r;
+ struct afs_addr_list *alist;
const struct afs_uuid *u = (const struct afs_uuid *)uuid;
struct afs_call *call;
struct afs_net *net = vc->cell->net;
@@ -286,6 +277,8 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *vc,
call->key = vc->key;
call->ret_alist = NULL;
call->max_lifespan = AFS_VL_MAX_LIFESPAN;
+ call->peer = rxrpc_kernel_get_peer(vc->alist->addrs[vc->addr_index].peer);
+ call->service_id = vc->server->service_id;
/* Marshall the parameters */
bp = call->request;
@@ -304,8 +297,18 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *vc,
r->uuid.node[i] = htonl(u->node[i]);
trace_afs_make_vl_call(call);
- afs_make_call(&vc->ac, call, GFP_KERNEL);
- return (struct afs_addr_list *)afs_wait_for_call_to_complete(call, &vc->ac);
+ afs_make_call(call, GFP_KERNEL);
+ afs_wait_for_call_to_complete(call);
+ vc->call_abort_code = call->abort_code;
+ vc->call_error = call->error;
+ vc->call_responded = call->responded;
+ alist = call->ret_alist;
+ afs_put_call(call);
+ if (vc->call_error) {
+ afs_put_addrlist(alist, afs_alist_trace_put_getaddru);
+ return ERR_PTR(vc->call_error);
+ }
+ return alist;
}
/*
@@ -355,6 +358,7 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call)
static void afs_destroy_vl_get_capabilities(struct afs_call *call)
{
+ afs_put_addrlist(call->vl_probe, afs_alist_trace_put_vlgetcaps);
afs_put_vlserver(call->net, call->vlserver);
afs_flat_call_destructor(call);
}
@@ -366,6 +370,7 @@ static const struct afs_call_type afs_RXVLGetCapabilities = {
.name = "VL.GetCapabilities",
.op = afs_VL_GetCapabilities,
.deliver = afs_deliver_vl_get_capabilities,
+ .immediate_cancel = afs_vlserver_probe_result,
.done = afs_vlserver_probe_result,
.destructor = afs_destroy_vl_get_capabilities,
};
@@ -378,7 +383,8 @@ static const struct afs_call_type afs_RXVLGetCapabilities = {
* other end supports.
*/
struct afs_call *afs_vl_get_capabilities(struct afs_net *net,
- struct afs_addr_cursor *ac,
+ struct afs_addr_list *alist,
+ unsigned int addr_index,
struct key *key,
struct afs_vlserver *server,
unsigned int server_index)
@@ -395,6 +401,10 @@ struct afs_call *afs_vl_get_capabilities(struct afs_net *net,
call->key = key;
call->vlserver = afs_get_vlserver(server);
call->server_index = server_index;
+ call->peer = rxrpc_kernel_get_peer(alist->addrs[addr_index].peer);
+ call->vl_probe = afs_get_addrlist(alist, afs_alist_trace_get_vlgetcaps);
+ call->probe_index = addr_index;
+ call->service_id = server->service_id;
call->upgrade = true;
call->async = true;
call->max_lifespan = AFS_PROBE_MAX_LIFESPAN;
@@ -405,7 +415,7 @@ struct afs_call *afs_vl_get_capabilities(struct afs_net *net,
/* Can't take a ref on server */
trace_afs_make_vl_call(call);
- afs_make_call(ac, call, GFP_KERNEL);
+ afs_make_call(call, GFP_KERNEL);
return call;
}
@@ -450,7 +460,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
if (call->count > YFS_MAXENDPOINTS)
return afs_protocol_error(call, afs_eproto_yvl_fsendpt_num);
- alist = afs_alloc_addrlist(call->count, FS_SERVICE, AFS_FS_PORT);
+ alist = afs_alloc_addrlist(call->count);
if (!alist)
return -ENOMEM;
alist->version = uniquifier;
@@ -488,14 +498,18 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
if (ntohl(bp[0]) != sizeof(__be32) * 2)
return afs_protocol_error(
call, afs_eproto_yvl_fsendpt4_len);
- afs_merge_fs_addr4(alist, bp[1], ntohl(bp[2]));
+ ret = afs_merge_fs_addr4(call->net, alist, bp[1], ntohl(bp[2]));
+ if (ret < 0)
+ return ret;
bp += 3;
break;
case YFS_ENDPOINT_IPV6:
if (ntohl(bp[0]) != sizeof(__be32) * 5)
return afs_protocol_error(
call, afs_eproto_yvl_fsendpt6_len);
- afs_merge_fs_addr6(alist, bp + 1, ntohl(bp[5]));
+ ret = afs_merge_fs_addr6(call->net, alist, bp + 1, ntohl(bp[5]));
+ if (ret < 0)
+ return ret;
bp += 6;
break;
default:
@@ -610,7 +624,7 @@ static const struct afs_call_type afs_YFSVLGetEndpoints = {
.name = "YFSVL.GetEndpoints",
.op = afs_YFSVL_GetEndpoints,
.deliver = afs_deliver_yfsvl_get_endpoints,
- .destructor = afs_vl_get_addrs_u_destructor,
+ .destructor = afs_flat_call_destructor,
};
/*
@@ -620,6 +634,7 @@ static const struct afs_call_type afs_YFSVLGetEndpoints = {
struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc,
const uuid_t *uuid)
{
+ struct afs_addr_list *alist;
struct afs_call *call;
struct afs_net *net = vc->cell->net;
__be32 *bp;
@@ -635,6 +650,8 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc,
call->key = vc->key;
call->ret_alist = NULL;
call->max_lifespan = AFS_VL_MAX_LIFESPAN;
+ call->peer = rxrpc_kernel_get_peer(vc->alist->addrs[vc->addr_index].peer);
+ call->service_id = vc->server->service_id;
/* Marshall the parameters */
bp = call->request;
@@ -643,8 +660,18 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc,
memcpy(bp, uuid, sizeof(*uuid)); /* Type opr_uuid */
trace_afs_make_vl_call(call);
- afs_make_call(&vc->ac, call, GFP_KERNEL);
- return (struct afs_addr_list *)afs_wait_for_call_to_complete(call, &vc->ac);
+ afs_make_call(call, GFP_KERNEL);
+ afs_wait_for_call_to_complete(call);
+ vc->call_abort_code = call->abort_code;
+ vc->call_error = call->error;
+ vc->call_responded = call->responded;
+ alist = call->ret_alist;
+ afs_put_call(call);
+ if (vc->call_error) {
+ afs_put_addrlist(alist, afs_alist_trace_put_getaddru);
+ return ERR_PTR(vc->call_error);
+ }
+ return alist;
}
/*
@@ -671,7 +698,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call)
return ret;
namesz = ntohl(call->tmp);
- if (namesz > AFS_MAXCELLNAME)
+ if (namesz > YFS_VL_MAXCELLNAME)
return afs_protocol_error(call, afs_eproto_cellname_len);
paddedsz = (namesz + 3) & ~3;
call->count = namesz;
@@ -709,12 +736,6 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call)
return 0;
}
-static void afs_destroy_yfsvl_get_cell_name(struct afs_call *call)
-{
- kfree(call->ret_str);
- afs_flat_call_destructor(call);
-}
-
/*
* VL.GetCapabilities operation type
*/
@@ -722,7 +743,7 @@ static const struct afs_call_type afs_YFSVLGetCellName = {
.name = "YFSVL.GetCellName",
.op = afs_YFSVL_GetCellName,
.deliver = afs_deliver_yfsvl_get_cell_name,
- .destructor = afs_destroy_yfsvl_get_cell_name,
+ .destructor = afs_flat_call_destructor,
};
/*
@@ -737,6 +758,7 @@ char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *vc)
struct afs_call *call;
struct afs_net *net = vc->cell->net;
__be32 *bp;
+ char *cellname;
_enter("");
@@ -747,6 +769,8 @@ char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *vc)
call->key = vc->key;
call->ret_str = NULL;
call->max_lifespan = AFS_VL_MAX_LIFESPAN;
+ call->peer = rxrpc_kernel_get_peer(vc->alist->addrs[vc->addr_index].peer);
+ call->service_id = vc->server->service_id;
/* marshall the parameters */
bp = call->request;
@@ -754,6 +778,16 @@ char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *vc)
/* Can't take a ref on server */
trace_afs_make_vl_call(call);
- afs_make_call(&vc->ac, call, GFP_KERNEL);
- return (char *)afs_wait_for_call_to_complete(call, &vc->ac);
+ afs_make_call(call, GFP_KERNEL);
+ afs_wait_for_call_to_complete(call);
+ vc->call_abort_code = call->abort_code;
+ vc->call_error = call->error;
+ vc->call_responded = call->responded;
+ cellname = call->ret_str;
+ afs_put_call(call);
+ if (vc->call_error) {
+ kfree(cellname);
+ return ERR_PTR(vc->call_error);
+ }
+ return cellname;
}
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index f84194b791d3..0efff3d25133 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -9,8 +9,10 @@
#include <linux/slab.h>
#include "internal.h"
-unsigned __read_mostly afs_volume_gc_delay = 10;
-unsigned __read_mostly afs_volume_record_life = 60 * 60;
+static unsigned __read_mostly afs_volume_record_life = 60 * 60;
+static atomic_t afs_volume_debug_id;
+
+static void afs_destroy_volume(struct work_struct *work);
/*
* Insert a volume into a cell. If there's an existing volume record, that is
@@ -33,8 +35,13 @@ static struct afs_volume *afs_insert_volume_into_cell(struct afs_cell *cell,
} else if (p->vid > volume->vid) {
pp = &(*pp)->rb_right;
} else {
- volume = afs_get_volume(p, afs_volume_trace_get_cell_insert);
- goto found;
+ if (afs_try_get_volume(p, afs_volume_trace_get_cell_insert)) {
+ volume = p;
+ goto found;
+ }
+
+ set_bit(AFS_VOLUME_RM_TREE, &volume->flags);
+ rb_replace_node_rcu(&p->cell_node, &volume->cell_node, &cell->volumes);
}
}
@@ -53,11 +60,12 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume)
struct afs_cell *cell = volume->cell;
if (!hlist_unhashed(&volume->proc_link)) {
- trace_afs_volume(volume->vid, atomic_read(&volume->usage),
+ trace_afs_volume(volume->debug_id, volume->vid, refcount_read(&volume->ref),
afs_volume_trace_remove);
write_seqlock(&cell->volume_lock);
hlist_del_rcu(&volume->proc_link);
- rb_erase(&volume->cell_node, &cell->volumes);
+ if (!test_and_set_bit(AFS_VOLUME_RM_TREE, &volume->flags))
+ rb_erase(&volume->cell_node, &cell->volumes);
write_sequnlock(&cell->volume_lock);
}
}
@@ -67,42 +75,49 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume)
*/
static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
struct afs_vldb_entry *vldb,
- unsigned long type_mask)
+ struct afs_server_list **_slist)
{
struct afs_server_list *slist;
struct afs_volume *volume;
- int ret = -ENOMEM, nr_servers = 0, i;
-
- for (i = 0; i < vldb->nr_servers; i++)
- if (vldb->fs_mask[i] & type_mask)
- nr_servers++;
+ int ret = -ENOMEM, i;
volume = kzalloc(sizeof(struct afs_volume), GFP_KERNEL);
if (!volume)
goto error_0;
+ volume->debug_id = atomic_inc_return(&afs_volume_debug_id);
volume->vid = vldb->vid[params->type];
volume->update_at = ktime_get_real_seconds() + afs_volume_record_life;
volume->cell = afs_get_cell(params->cell, afs_cell_trace_get_vol);
volume->type = params->type;
volume->type_force = params->force;
volume->name_len = vldb->name_len;
+ volume->creation_time = TIME64_MIN;
+ volume->update_time = TIME64_MIN;
- atomic_set(&volume->usage, 1);
+ refcount_set(&volume->ref, 1);
INIT_HLIST_NODE(&volume->proc_link);
+ INIT_WORK(&volume->destructor, afs_destroy_volume);
rwlock_init(&volume->servers_lock);
+ mutex_init(&volume->volsync_lock);
+ mutex_init(&volume->cb_check_lock);
rwlock_init(&volume->cb_v_break_lock);
+ INIT_LIST_HEAD(&volume->open_mmaps);
+ init_rwsem(&volume->open_mmaps_lock);
memcpy(volume->name, vldb->name, vldb->name_len + 1);
- slist = afs_alloc_server_list(params->cell, params->key, vldb, type_mask);
+ for (i = 0; i < AFS_MAXTYPES; i++)
+ volume->vids[i] = vldb->vid[i];
+
+ slist = afs_alloc_server_list(volume, params->key, vldb);
if (IS_ERR(slist)) {
ret = PTR_ERR(slist);
goto error_1;
}
- refcount_set(&slist->usage, 1);
+ *_slist = slist;
rcu_assign_pointer(volume->servers, slist);
- trace_afs_volume(volume->vid, 1, afs_volume_trace_alloc);
+ trace_afs_volume(volume->debug_id, volume->vid, 1, afs_volume_trace_alloc);
return volume;
error_1:
@@ -116,18 +131,20 @@ error_0:
* Look up or allocate a volume record.
*/
static struct afs_volume *afs_lookup_volume(struct afs_fs_context *params,
- struct afs_vldb_entry *vldb,
- unsigned long type_mask)
+ struct afs_vldb_entry *vldb)
{
+ struct afs_server_list *slist;
struct afs_volume *candidate, *volume;
- candidate = afs_alloc_volume(params, vldb, type_mask);
+ candidate = afs_alloc_volume(params, vldb, &slist);
if (IS_ERR(candidate))
return candidate;
volume = afs_insert_volume_into_cell(params->cell, candidate);
- if (volume != candidate)
- afs_put_volume(params->net, candidate, afs_volume_trace_put_cell_dup);
+ if (volume == candidate)
+ afs_attach_volume_to_servers(volume, slist);
+ else
+ afs_put_volume(candidate, afs_volume_trace_put_cell_dup);
return volume;
}
@@ -207,8 +224,7 @@ struct afs_volume *afs_create_volume(struct afs_fs_context *params)
goto error;
}
- type_mask = 1UL << params->type;
- volume = afs_lookup_volume(params, vldb, type_mask);
+ volume = afs_lookup_volume(params, vldb);
error:
kfree(vldb);
@@ -218,18 +234,22 @@ error:
/*
* Destroy a volume record
*/
-static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
+static void afs_destroy_volume(struct work_struct *work)
{
+ struct afs_volume *volume = container_of(work, struct afs_volume, destructor);
+ struct afs_server_list *slist = rcu_access_pointer(volume->servers);
+
_enter("%p", volume);
#ifdef CONFIG_AFS_FSCACHE
ASSERTCMP(volume->cache, ==, NULL);
#endif
+ afs_detach_volume_from_servers(volume, slist);
afs_remove_volume_from_cell(volume);
- afs_put_serverlist(net, rcu_access_pointer(volume->servers));
+ afs_put_serverlist(volume->cell->net, slist);
afs_put_cell(volume->cell, afs_cell_trace_put_vol);
- trace_afs_volume(volume->vid, atomic_read(&volume->usage),
+ trace_afs_volume(volume->debug_id, volume->vid, refcount_read(&volume->ref),
afs_volume_trace_free);
kfree_rcu(volume, rcu);
@@ -237,14 +257,30 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
}
/*
+ * Try to get a reference on a volume record.
+ */
+bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason)
+{
+ int r;
+
+ if (__refcount_inc_not_zero(&volume->ref, &r)) {
+ trace_afs_volume(volume->debug_id, volume->vid, r + 1, reason);
+ return true;
+ }
+ return false;
+}
+
+/*
* Get a reference on a volume record.
*/
struct afs_volume *afs_get_volume(struct afs_volume *volume,
enum afs_volume_trace reason)
{
if (volume) {
- int u = atomic_inc_return(&volume->usage);
- trace_afs_volume(volume->vid, u, reason);
+ int r;
+
+ __refcount_inc(&volume->ref, &r);
+ trace_afs_volume(volume->debug_id, volume->vid, r + 1, reason);
}
return volume;
}
@@ -253,30 +289,48 @@ struct afs_volume *afs_get_volume(struct afs_volume *volume,
/*
* Drop a reference on a volume record.
*/
-void afs_put_volume(struct afs_net *net, struct afs_volume *volume,
- enum afs_volume_trace reason)
+void afs_put_volume(struct afs_volume *volume, enum afs_volume_trace reason)
{
if (volume) {
+ unsigned int debug_id = volume->debug_id;
afs_volid_t vid = volume->vid;
- int u = atomic_dec_return(&volume->usage);
- trace_afs_volume(vid, u, reason);
- if (u == 0)
- afs_destroy_volume(net, volume);
+ bool zero;
+ int r;
+
+ zero = __refcount_dec_and_test(&volume->ref, &r);
+ trace_afs_volume(debug_id, vid, r - 1, reason);
+ if (zero)
+ schedule_work(&volume->destructor);
}
}
/*
* Activate a volume.
*/
-void afs_activate_volume(struct afs_volume *volume)
+int afs_activate_volume(struct afs_volume *volume)
{
#ifdef CONFIG_AFS_FSCACHE
- volume->cache = fscache_acquire_cookie(volume->cell->cache,
- &afs_volume_cache_index_def,
- &volume->vid, sizeof(volume->vid),
- NULL, 0,
- volume, 0, true);
+ struct fscache_volume *vcookie;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "afs,%s,%llx",
+ volume->cell->name, volume->vid);
+ if (!name)
+ return -ENOMEM;
+
+ vcookie = fscache_acquire_volume(name, NULL, NULL, 0);
+ if (IS_ERR(vcookie)) {
+ if (vcookie != ERR_PTR(-EBUSY)) {
+ kfree(name);
+ return PTR_ERR(vcookie);
+ }
+ pr_err("AFS: Cache volume key already in use (%s)\n", name);
+ vcookie = NULL;
+ }
+ volume->cache = vcookie;
+ kfree(name);
#endif
+ return 0;
}
/*
@@ -287,7 +341,7 @@ void afs_deactivate_volume(struct afs_volume *volume)
_enter("%s", volume->name);
#ifdef CONFIG_AFS_FSCACHE
- fscache_relinquish_cookie(volume->cache, NULL,
+ fscache_relinquish_volume(volume->cache, NULL,
test_bit(AFS_VOLUME_DELETED, &volume->flags));
volume->cache = NULL;
#endif
@@ -302,7 +356,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
{
struct afs_server_list *new, *old, *discard;
struct afs_vldb_entry *vldb;
- char idbuf[16];
+ char idbuf[24];
int ret, idsz;
_enter("");
@@ -310,7 +364,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
/* We look up an ID by passing it as a decimal string in the
* operation's name parameter.
*/
- idsz = sprintf(idbuf, "%llu", volume->vid);
+ idsz = snprintf(idbuf, sizeof(idbuf), "%llu", volume->vid);
vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz);
if (IS_ERR(vldb)) {
@@ -327,8 +381,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
}
/* See if the volume's server list got updated. */
- new = afs_alloc_server_list(volume->cell, key,
- vldb, (1 << volume->type));
+ new = afs_alloc_server_list(volume, key, vldb);
if (IS_ERR(new)) {
ret = PTR_ERR(new);
goto error_vldb;
@@ -347,11 +400,17 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
discard = old;
}
- volume->update_at = ktime_get_real_seconds() + afs_volume_record_life;
+ /* Check more often if replication is ongoing. */
+ if (new->ro_replicating)
+ volume->update_at = ktime_get_real_seconds() + 10 * 60;
+ else
+ volume->update_at = ktime_get_real_seconds() + afs_volume_record_life;
write_unlock(&volume->servers_lock);
- ret = 0;
+ if (discard == old)
+ afs_reattach_volume_to_servers(volume, new, old);
afs_put_serverlist(volume->cell->net, discard);
+ ret = 0;
error_vldb:
kfree(vldb);
error:
diff --git a/fs/afs/write.c b/fs/afs/write.c
index ca4909baf5e6..93ad86ff3345 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -12,306 +12,56 @@
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/netfs.h>
-#include <linux/fscache.h>
+#include <trace/events/netfs.h>
#include "internal.h"
/*
- * mark a page as having been made dirty and thus needing writeback
- */
-int afs_set_page_dirty(struct page *page)
-{
- _enter("");
- return __set_page_dirty_nobuffers(page);
-}
-
-/*
- * prepare to perform part of a write to a page
- */
-int afs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **_page, void **fsdata)
-{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
- struct folio *folio;
- unsigned long priv;
- unsigned f, from;
- unsigned t, to;
- pgoff_t index;
- int ret;
-
- _enter("{%llx:%llu},%llx,%x",
- vnode->fid.vid, vnode->fid.vnode, pos, len);
-
- /* Prefetch area to be written into the cache if we're caching this
- * file. We need to do this before we get a lock on the page in case
- * there's more than one writer competing for the same cache block.
- */
- ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata,
- &afs_req_ops, NULL);
- if (ret < 0)
- return ret;
-
- index = folio_index(folio);
- from = pos - index * PAGE_SIZE;
- to = from + len;
-
-try_again:
- /* See if this page is already partially written in a way that we can
- * merge the new write with.
- */
- if (folio_test_private(folio)) {
- priv = (unsigned long)folio_get_private(folio);
- f = afs_folio_dirty_from(folio, priv);
- t = afs_folio_dirty_to(folio, priv);
- ASSERTCMP(f, <=, t);
-
- if (folio_test_writeback(folio)) {
- trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
- goto flush_conflicting_write;
- }
- /* If the file is being filled locally, allow inter-write
- * spaces to be merged into writes. If it's not, only write
- * back what the user gives us.
- */
- if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
- (to < f || from > t))
- goto flush_conflicting_write;
- }
-
- *_page = &folio->page;
- _leave(" = 0");
- return 0;
-
- /* The previous write and this write aren't adjacent or overlapping, so
- * flush the page out.
- */
-flush_conflicting_write:
- _debug("flush conflict");
- ret = folio_write_one(folio);
- if (ret < 0)
- goto error;
-
- ret = folio_lock_killable(folio);
- if (ret < 0)
- goto error;
- goto try_again;
-
-error:
- folio_put(folio);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * finalise part of a write to a page
- */
-int afs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *subpage, void *fsdata)
-{
- struct folio *folio = page_folio(subpage);
- struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
- unsigned long priv;
- unsigned int f, from = offset_in_folio(folio, pos);
- unsigned int t, to = from + copied;
- loff_t i_size, maybe_i_size;
-
- _enter("{%llx:%llu},{%lx}",
- vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
-
- if (!folio_test_uptodate(folio)) {
- if (copied < len) {
- copied = 0;
- goto out;
- }
-
- folio_mark_uptodate(folio);
- }
-
- if (copied == 0)
- goto out;
-
- maybe_i_size = pos + copied;
-
- i_size = i_size_read(&vnode->vfs_inode);
- if (maybe_i_size > i_size) {
- write_seqlock(&vnode->cb_lock);
- i_size = i_size_read(&vnode->vfs_inode);
- if (maybe_i_size > i_size)
- afs_set_i_size(vnode, maybe_i_size);
- write_sequnlock(&vnode->cb_lock);
- }
-
- if (folio_test_private(folio)) {
- priv = (unsigned long)folio_get_private(folio);
- f = afs_folio_dirty_from(folio, priv);
- t = afs_folio_dirty_to(folio, priv);
- if (from < f)
- f = from;
- if (to > t)
- t = to;
- priv = afs_folio_dirty(folio, f, t);
- folio_change_private(folio, (void *)priv);
- trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio);
- } else {
- priv = afs_folio_dirty(folio, from, to);
- folio_attach_private(folio, (void *)priv);
- trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio);
- }
-
- if (folio_mark_dirty(folio))
- _debug("dirtied %lx", folio_index(folio));
-
-out:
- folio_unlock(folio);
- folio_put(folio);
- return copied;
-}
-
-/*
- * kill all the pages in the given range
- */
-static void afs_kill_pages(struct address_space *mapping,
- loff_t start, loff_t len)
-{
- struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct folio *folio;
- pgoff_t index = start / PAGE_SIZE;
- pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
-
- _enter("{%llx:%llu},%llx @%llx",
- vnode->fid.vid, vnode->fid.vnode, len, start);
-
- do {
- _debug("kill %lx (to %lx)", index, last);
-
- folio = filemap_get_folio(mapping, index);
- if (!folio) {
- next = index + 1;
- continue;
- }
-
- next = folio_next_index(folio);
-
- folio_clear_uptodate(folio);
- folio_end_writeback(folio);
- folio_lock(folio);
- generic_error_remove_page(mapping, &folio->page);
- folio_unlock(folio);
- folio_put(folio);
-
- } while (index = next, index <= last);
-
- _leave("");
-}
-
-/*
- * Redirty all the pages in a given range.
- */
-static void afs_redirty_pages(struct writeback_control *wbc,
- struct address_space *mapping,
- loff_t start, loff_t len)
-{
- struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct folio *folio;
- pgoff_t index = start / PAGE_SIZE;
- pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
-
- _enter("{%llx:%llu},%llx @%llx",
- vnode->fid.vid, vnode->fid.vnode, len, start);
-
- do {
- _debug("redirty %llx @%llx", len, start);
-
- folio = filemap_get_folio(mapping, index);
- if (!folio) {
- next = index + 1;
- continue;
- }
-
- next = index + folio_nr_pages(folio);
- folio_redirty_for_writepage(wbc, folio);
- folio_end_writeback(folio);
- folio_put(folio);
- } while (index = next, index <= last);
-
- _leave("");
-}
-
-/*
* completion of write to server
*/
static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
{
- struct address_space *mapping = vnode->vfs_inode.i_mapping;
- struct folio *folio;
- pgoff_t end;
-
- XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
-
_enter("{%llx:%llu},{%x @%llx}",
vnode->fid.vid, vnode->fid.vnode, len, start);
- rcu_read_lock();
-
- end = (start + len - 1) / PAGE_SIZE;
- xas_for_each(&xas, folio, end) {
- if (!folio_test_writeback(folio)) {
- kdebug("bad %x @%llx page %lx %lx",
- len, start, folio_index(folio), end);
- ASSERT(folio_test_writeback(folio));
- }
-
- trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio);
- folio_detach_private(folio);
- folio_end_writeback(folio);
- }
-
- rcu_read_unlock();
-
afs_prune_wb_keys(vnode);
_leave("");
}
/*
* Find a key to use for the writeback. We cached the keys used to author the
- * writes on the vnode. *_wbk will contain the last writeback key used or NULL
- * and we need to start from there if it's set.
+ * writes on the vnode. wreq->netfs_priv2 will contain the last writeback key
+ * record used or NULL and we need to start from there if it's set.
+ * wreq->netfs_priv will be set to the key itself or NULL.
*/
-static int afs_get_writeback_key(struct afs_vnode *vnode,
- struct afs_wb_key **_wbk)
+static void afs_get_writeback_key(struct netfs_io_request *wreq)
{
- struct afs_wb_key *wbk = NULL;
- struct list_head *p;
- int ret = -ENOKEY, ret2;
+ struct afs_wb_key *wbk, *old = wreq->netfs_priv2;
+ struct afs_vnode *vnode = AFS_FS_I(wreq->inode);
+
+ key_put(wreq->netfs_priv);
+ wreq->netfs_priv = NULL;
+ wreq->netfs_priv2 = NULL;
spin_lock(&vnode->wb_lock);
- if (*_wbk)
- p = (*_wbk)->vnode_link.next;
+ if (old)
+ wbk = list_next_entry(old, vnode_link);
else
- p = vnode->wb_keys.next;
+ wbk = list_first_entry(&vnode->wb_keys, struct afs_wb_key, vnode_link);
- while (p != &vnode->wb_keys) {
- wbk = list_entry(p, struct afs_wb_key, vnode_link);
+ list_for_each_entry_from(wbk, &vnode->wb_keys, vnode_link) {
_debug("wbk %u", key_serial(wbk->key));
- ret2 = key_validate(wbk->key);
- if (ret2 == 0) {
+ if (key_validate(wbk->key) == 0) {
refcount_inc(&wbk->usage);
+ wreq->netfs_priv = key_get(wbk->key);
+ wreq->netfs_priv2 = wbk;
_debug("USE WB KEY %u", key_serial(wbk->key));
break;
}
-
- wbk = NULL;
- if (ret == -ENOKEY)
- ret = ret2;
- p = p->next;
}
spin_unlock(&vnode->wb_lock);
- if (*_wbk)
- afs_put_wb_key(*_wbk);
- *_wbk = wbk;
- return 0;
+
+ afs_put_wb_key(old);
}
static void afs_store_data_success(struct afs_operation *op)
@@ -320,9 +70,8 @@ static void afs_store_data_success(struct afs_operation *op)
op->ctime = op->file[0].scb.status.mtime_client;
afs_vnode_commit_status(op, &op->file[0]);
- if (op->error == 0) {
- if (!op->store.laundering)
- afs_pages_written_back(vnode, op->store.pos, op->store.size);
+ if (!afs_op_error(op)) {
+ afs_pages_written_back(vnode, op->store.pos, op->store.size);
afs_stat_v(vnode, n_stores);
atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
}
@@ -335,424 +84,153 @@ static const struct afs_operation_ops afs_store_data_operation = {
};
/*
- * write to a file
+ * Prepare a subrequest to write to the server. This sets the max_len
+ * parameter.
+ */
+void afs_prepare_write(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_stream *stream = &subreq->rreq->io_streams[subreq->stream_nr];
+
+ //if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags))
+ // subreq->max_len = 512 * 1024;
+ //else
+ stream->sreq_max_len = 256 * 1024 * 1024;
+}
+
+/*
+ * Issue a subrequest to write to the server.
*/
-static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
- bool laundering)
+static void afs_issue_write_worker(struct work_struct *work)
{
+ struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work);
+ struct netfs_io_request *wreq = subreq->rreq;
struct afs_operation *op;
- struct afs_wb_key *wbk = NULL;
- loff_t size = iov_iter_count(iter), i_size;
+ struct afs_vnode *vnode = AFS_FS_I(wreq->inode);
+ unsigned long long pos = subreq->start + subreq->transferred;
+ size_t len = subreq->len - subreq->transferred;
int ret = -ENOKEY;
- _enter("%s{%llx:%llu.%u},%llx,%llx",
+ _enter("R=%x[%x],%s{%llx:%llu.%u},%llx,%zx",
+ wreq->debug_id, subreq->debug_index,
vnode->volume->name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
- size, pos);
+ pos, len);
- ret = afs_get_writeback_key(vnode, &wbk);
- if (ret) {
- _leave(" = %d [no keys]", ret);
- return ret;
- }
+#if 0 // Error injection
+ if (subreq->debug_index == 3)
+ return netfs_write_subrequest_terminated(subreq, -ENOANO);
- op = afs_alloc_operation(wbk->key, vnode->volume);
- if (IS_ERR(op)) {
- afs_put_wb_key(wbk);
- return -ENOMEM;
+ if (!subreq->retry_count) {
+ set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ return netfs_write_subrequest_terminated(subreq, -EAGAIN);
}
+#endif
- i_size = i_size_read(&vnode->vfs_inode);
+ op = afs_alloc_operation(wreq->netfs_priv, vnode->volume);
+ if (IS_ERR(op))
+ return netfs_write_subrequest_terminated(subreq, -EAGAIN);
afs_op_set_vnode(op, 0, vnode);
- op->file[0].dv_delta = 1;
+ op->file[0].dv_delta = 1;
op->file[0].modification = true;
- op->store.write_iter = iter;
- op->store.pos = pos;
- op->store.size = size;
- op->store.i_size = max(pos + size, i_size);
- op->store.laundering = laundering;
- op->mtime = vnode->vfs_inode.i_mtime;
- op->flags |= AFS_OPERATION_UNINTR;
- op->ops = &afs_store_data_operation;
-
-try_next_key:
+ op->store.pos = pos;
+ op->store.size = len;
+ op->flags |= AFS_OPERATION_UNINTR;
+ op->ops = &afs_store_data_operation;
+
afs_begin_vnode_operation(op);
- afs_wait_for_operation(op);
- switch (op->error) {
+ op->store.write_iter = &subreq->io_iter;
+ op->store.i_size = umax(pos + len, vnode->netfs.remote_i_size);
+ op->mtime = inode_get_mtime(&vnode->netfs.inode);
+
+ afs_wait_for_operation(op);
+ ret = afs_put_operation(op);
+ switch (ret) {
+ case 0:
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ break;
case -EACCES:
case -EPERM:
case -ENOKEY:
case -EKEYEXPIRED:
case -EKEYREJECTED:
case -EKEYREVOKED:
- _debug("next");
-
- ret = afs_get_writeback_key(vnode, &wbk);
- if (ret == 0) {
- key_put(op->key);
- op->key = key_get(wbk->key);
- goto try_next_key;
- }
+ /* If there are more keys we can try, use the retry algorithm
+ * to rotate the keys.
+ */
+ if (wreq->netfs_priv2)
+ set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
break;
}
- afs_put_wb_key(wbk);
- _leave(" = %d", op->error);
- return afs_put_operation(op);
+ netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len);
+}
+
+void afs_issue_write(struct netfs_io_subrequest *subreq)
+{
+ subreq->work.func = afs_issue_write_worker;
+ if (!queue_work(system_dfl_wq, &subreq->work))
+ WARN_ON_ONCE(1);
}
/*
- * Extend the region to be written back to include subsequent contiguously
- * dirty pages if possible, but don't sleep while doing so.
- *
- * If this page holds new content, then we can include filler zeros in the
- * writeback.
+ * Writeback calls this when it finds a folio that needs uploading. This isn't
+ * called if writeback only has copy-to-cache to deal with.
*/
-static void afs_extend_writeback(struct address_space *mapping,
- struct afs_vnode *vnode,
- long *_count,
- loff_t start,
- loff_t max_len,
- bool new_content,
- unsigned int *_len)
+void afs_begin_writeback(struct netfs_io_request *wreq)
{
- struct pagevec pvec;
- struct folio *folio;
- unsigned long priv;
- unsigned int psize, filler = 0;
- unsigned int f, t;
- loff_t len = *_len;
- pgoff_t index = (start + len) / PAGE_SIZE;
- bool stop = true;
- unsigned int i;
-
- XA_STATE(xas, &mapping->i_pages, index);
- pagevec_init(&pvec);
-
- do {
- /* Firstly, we gather up a batch of contiguous dirty pages
- * under the RCU read lock - but we can't clear the dirty flags
- * there if any of those pages are mapped.
- */
- rcu_read_lock();
-
- xas_for_each(&xas, folio, ULONG_MAX) {
- stop = true;
- if (xas_retry(&xas, folio))
- continue;
- if (xa_is_value(folio))
- break;
- if (folio_index(folio) != index)
- break;
-
- if (!folio_try_get_rcu(folio)) {
- xas_reset(&xas);
- continue;
- }
-
- /* Has the page moved or been split? */
- if (unlikely(folio != xas_reload(&xas))) {
- folio_put(folio);
- break;
- }
-
- if (!folio_trylock(folio)) {
- folio_put(folio);
- break;
- }
- if (!folio_test_dirty(folio) || folio_test_writeback(folio)) {
- folio_unlock(folio);
- folio_put(folio);
- break;
- }
-
- psize = folio_size(folio);
- priv = (unsigned long)folio_get_private(folio);
- f = afs_folio_dirty_from(folio, priv);
- t = afs_folio_dirty_to(folio, priv);
- if (f != 0 && !new_content) {
- folio_unlock(folio);
- folio_put(folio);
- break;
- }
-
- len += filler + t;
- filler = psize - t;
- if (len >= max_len || *_count <= 0)
- stop = true;
- else if (t == psize || new_content)
- stop = false;
-
- index += folio_nr_pages(folio);
- if (!pagevec_add(&pvec, &folio->page))
- break;
- if (stop)
- break;
- }
-
- if (!stop)
- xas_pause(&xas);
- rcu_read_unlock();
-
- /* Now, if we obtained any pages, we can shift them to being
- * writable and mark them for caching.
- */
- if (!pagevec_count(&pvec))
- break;
-
- for (i = 0; i < pagevec_count(&pvec); i++) {
- folio = page_folio(pvec.pages[i]);
- trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio);
-
- if (!folio_clear_dirty_for_io(folio))
- BUG();
- if (folio_start_writeback(folio))
- BUG();
-
- *_count -= folio_nr_pages(folio);
- folio_unlock(folio);
- }
-
- pagevec_release(&pvec);
- cond_resched();
- } while (!stop);
-
- *_len = len;
+ if (S_ISREG(wreq->inode->i_mode))
+ afs_get_writeback_key(wreq);
}
/*
- * Synchronously write back the locked page and any subsequent non-locked dirty
- * pages.
+ * Prepare to retry the writes in request. Use this to try rotating the
+ * available writeback keys.
*/
-static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
- struct writeback_control *wbc,
- struct folio *folio,
- loff_t start, loff_t end)
+void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *stream)
{
- struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct iov_iter iter;
- unsigned long priv;
- unsigned int offset, to, len, max_len;
- loff_t i_size = i_size_read(&vnode->vfs_inode);
- bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
- long count = wbc->nr_to_write;
- int ret;
-
- _enter(",%lx,%llx-%llx", folio_index(folio), start, end);
-
- if (folio_start_writeback(folio))
- BUG();
-
- count -= folio_nr_pages(folio);
-
- /* Find all consecutive lockable dirty pages that have contiguous
- * written regions, stopping when we find a page that is not
- * immediately lockable, is not dirty or is missing, or we reach the
- * end of the range.
- */
- priv = (unsigned long)folio_get_private(folio);
- offset = afs_folio_dirty_from(folio, priv);
- to = afs_folio_dirty_to(folio, priv);
- trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio);
-
- len = to - offset;
- start += offset;
- if (start < i_size) {
- /* Trim the write to the EOF; the extra data is ignored. Also
- * put an upper limit on the size of a single storedata op.
- */
- max_len = 65536 * 4096;
- max_len = min_t(unsigned long long, max_len, end - start + 1);
- max_len = min_t(unsigned long long, max_len, i_size - start);
-
- if (len < max_len &&
- (to == folio_size(folio) || new_content))
- afs_extend_writeback(mapping, vnode, &count,
- start, max_len, new_content, &len);
- len = min_t(loff_t, len, max_len);
- }
-
- /* We now have a contiguous set of dirty pages, each with writeback
- * set; the first page is still locked at this point, but all the rest
- * have been unlocked.
- */
- folio_unlock(folio);
-
- if (start < i_size) {
- _debug("write back %x @%llx [%llx]", len, start, i_size);
-
- iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
- ret = afs_store_data(vnode, &iter, start, false);
- } else {
- _debug("write discard %x @%llx [%llx]", len, start, i_size);
-
- /* The dirty region was entirely beyond the EOF. */
- afs_pages_written_back(vnode, start, len);
- ret = 0;
- }
-
- switch (ret) {
- case 0:
- wbc->nr_to_write = count;
- ret = len;
+ struct netfs_io_subrequest *subreq =
+ list_first_entry(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+
+ switch (wreq->origin) {
+ case NETFS_READAHEAD:
+ case NETFS_READPAGE:
+ case NETFS_READ_GAPS:
+ case NETFS_READ_SINGLE:
+ case NETFS_READ_FOR_WRITE:
+ case NETFS_UNBUFFERED_READ:
+ case NETFS_DIO_READ:
+ return;
+ default:
break;
+ }
- default:
- pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
- fallthrough;
+ switch (subreq->error) {
case -EACCES:
case -EPERM:
case -ENOKEY:
case -EKEYEXPIRED:
case -EKEYREJECTED:
case -EKEYREVOKED:
- afs_redirty_pages(wbc, mapping, start, len);
- mapping_set_error(mapping, ret);
- break;
-
- case -EDQUOT:
- case -ENOSPC:
- afs_redirty_pages(wbc, mapping, start, len);
- mapping_set_error(mapping, -ENOSPC);
- break;
-
- case -EROFS:
- case -EIO:
- case -EREMOTEIO:
- case -EFBIG:
- case -ENOENT:
- case -ENOMEDIUM:
- case -ENXIO:
- trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
- afs_kill_pages(mapping, start, len);
- mapping_set_error(mapping, ret);
+ afs_get_writeback_key(wreq);
+ if (!wreq->netfs_priv)
+ stream->failed = true;
break;
}
-
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * write a page back to the server
- * - the caller locked the page for us
- */
-int afs_writepage(struct page *subpage, struct writeback_control *wbc)
-{
- struct folio *folio = page_folio(subpage);
- ssize_t ret;
- loff_t start;
-
- _enter("{%lx},", folio_index(folio));
-
- start = folio_index(folio) * PAGE_SIZE;
- ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc,
- folio, start, LLONG_MAX - start);
- if (ret < 0) {
- _leave(" = %zd", ret);
- return ret;
- }
-
- _leave(" = 0");
- return 0;
-}
-
-/*
- * write a region of pages back to the server
- */
-static int afs_writepages_region(struct address_space *mapping,
- struct writeback_control *wbc,
- loff_t start, loff_t end, loff_t *_next)
-{
- struct folio *folio;
- struct page *head_page;
- ssize_t ret;
- int n;
-
- _enter("%llx,%llx,", start, end);
-
- do {
- pgoff_t index = start / PAGE_SIZE;
-
- n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
- PAGECACHE_TAG_DIRTY, 1, &head_page);
- if (!n)
- break;
-
- folio = page_folio(head_page);
- start = folio_pos(folio); /* May regress with THPs */
-
- _debug("wback %lx", folio_index(folio));
-
- /* At this point we hold neither the i_pages lock nor the
- * page lock: the page may be truncated or invalidated
- * (changing page->mapping to NULL), or even swizzled
- * back from swapper_space to tmpfs file mapping
- */
- if (wbc->sync_mode != WB_SYNC_NONE) {
- ret = folio_lock_killable(folio);
- if (ret < 0) {
- folio_put(folio);
- return ret;
- }
- } else {
- if (!folio_trylock(folio)) {
- folio_put(folio);
- return 0;
- }
- }
-
- if (folio_mapping(folio) != mapping ||
- !folio_test_dirty(folio)) {
- start += folio_size(folio);
- folio_unlock(folio);
- folio_put(folio);
- continue;
- }
-
- if (folio_test_writeback(folio)) {
- folio_unlock(folio);
- if (wbc->sync_mode != WB_SYNC_NONE)
- folio_wait_writeback(folio);
- folio_put(folio);
- continue;
- }
-
- if (!folio_clear_dirty_for_io(folio))
- BUG();
- ret = afs_write_back_from_locked_folio(mapping, wbc, folio, start, end);
- folio_put(folio);
- if (ret < 0) {
- _leave(" = %zd", ret);
- return ret;
- }
-
- start += ret;
-
- cond_resched();
- } while (wbc->nr_to_write > 0);
-
- *_next = start;
- _leave(" = 0 [%llx]", *_next);
- return 0;
}
/*
* write some of the pending data back to the server
*/
-int afs_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
+int afs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- loff_t start, next;
int ret;
- _enter("");
-
/* We have to be careful as we can end up racing with setattr()
* truncating the pagecache since the caller doesn't take a lock here
* to prevent it.
@@ -762,66 +240,12 @@ int afs_writepages(struct address_space *mapping,
else if (!down_read_trylock(&vnode->validate_lock))
return 0;
- if (wbc->range_cyclic) {
- start = mapping->writeback_index * PAGE_SIZE;
- ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
- if (ret == 0) {
- mapping->writeback_index = next / PAGE_SIZE;
- if (start > 0 && wbc->nr_to_write > 0) {
- ret = afs_writepages_region(mapping, wbc, 0,
- start, &next);
- if (ret == 0)
- mapping->writeback_index =
- next / PAGE_SIZE;
- }
- }
- } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
- ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
- if (wbc->nr_to_write > 0 && ret == 0)
- mapping->writeback_index = next / PAGE_SIZE;
- } else {
- ret = afs_writepages_region(mapping, wbc,
- wbc->range_start, wbc->range_end, &next);
- }
-
+ ret = netfs_writepages(mapping, wbc);
up_read(&vnode->validate_lock);
- _leave(" = %d", ret);
return ret;
}
/*
- * write to an AFS file
- */
-ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
-{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
- struct afs_file *af = iocb->ki_filp->private_data;
- ssize_t result;
- size_t count = iov_iter_count(from);
-
- _enter("{%llx:%llu},{%zu},",
- vnode->fid.vid, vnode->fid.vnode, count);
-
- if (IS_SWAPFILE(&vnode->vfs_inode)) {
- printk(KERN_INFO
- "AFS: Attempt to write to active swap file!\n");
- return -EBUSY;
- }
-
- if (!count)
- return 0;
-
- result = afs_validate(vnode, af->key);
- if (result < 0)
- return result;
-
- result = generic_file_write_iter(iocb, from);
-
- _leave(" = %zd", result);
- return result;
-}
-
-/*
* flush any dirty pages for this process, and check for write errors.
* - the return status from this call provides a reliable indication of
* whether any write errors occurred for this process.
@@ -849,59 +273,11 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
*/
vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
{
- struct folio *folio = page_folio(vmf->page);
struct file *file = vmf->vma->vm_file;
- struct inode *inode = file_inode(file);
- struct afs_vnode *vnode = AFS_FS_I(inode);
- struct afs_file *af = file->private_data;
- unsigned long priv;
- vm_fault_t ret = VM_FAULT_RETRY;
-
- _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
-
- afs_validate(vnode, af->key);
-
- sb_start_pagefault(inode->i_sb);
-
- /* Wait for the page to be written to the cache before we allow it to
- * be modified. We then assume the entire page will need writing back.
- */
-#ifdef CONFIG_AFS_FSCACHE
- if (folio_test_fscache(folio) &&
- folio_wait_fscache_killable(folio) < 0)
- goto out;
-#endif
-
- if (folio_wait_writeback_killable(folio))
- goto out;
-
- if (folio_lock_killable(folio) < 0)
- goto out;
-
- /* We mustn't change folio->private until writeback is complete as that
- * details the portion of the page we need to write back and we might
- * need to redirty the page if there's a problem.
- */
- if (folio_wait_writeback_killable(folio) < 0) {
- folio_unlock(folio);
- goto out;
- }
- priv = afs_folio_dirty(folio, 0, folio_size(folio));
- priv = afs_folio_dirty_mmapped(priv);
- if (folio_test_private(folio)) {
- folio_change_private(folio, (void *)priv);
- trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio);
- } else {
- folio_attach_private(folio, (void *)priv);
- trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio);
- }
- file_update_time(file);
-
- ret = VM_FAULT_LOCKED;
-out:
- sb_end_pagefault(inode->i_sb);
- return ret;
+ if (afs_validate(AFS_FS_I(file_inode(file)), afs_file_key(file)) < 0)
+ return VM_FAULT_SIGBUS;
+ return netfs_page_mkwrite(vmf, NULL);
}
/*
@@ -915,8 +291,8 @@ void afs_prune_wb_keys(struct afs_vnode *vnode)
/* Discard unused keys */
spin_lock(&vnode->wb_lock);
- if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
- !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
+ if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
+ !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) {
list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
if (refcount_read(&wbk->usage) == 1)
list_move(&wbk->vnode_link, &graveyard);
@@ -931,42 +307,3 @@ void afs_prune_wb_keys(struct afs_vnode *vnode)
afs_put_wb_key(wbk);
}
}
-
-/*
- * Clean up a page during invalidation.
- */
-int afs_launder_page(struct page *subpage)
-{
- struct folio *folio = page_folio(subpage);
- struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
- struct iov_iter iter;
- struct bio_vec bv[1];
- unsigned long priv;
- unsigned int f, t;
- int ret = 0;
-
- _enter("{%lx}", folio_index(folio));
-
- priv = (unsigned long)folio_get_private(folio);
- if (folio_clear_dirty_for_io(folio)) {
- f = 0;
- t = folio_size(folio);
- if (folio_test_private(folio)) {
- f = afs_folio_dirty_from(folio, priv);
- t = afs_folio_dirty_to(folio, priv);
- }
-
- bv[0].bv_page = &folio->page;
- bv[0].bv_offset = f;
- bv[0].bv_len = t - f;
- iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
-
- trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
- ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
- }
-
- trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio);
- folio_detach_private(folio);
- folio_wait_fscache(folio);
- return ret;
-}
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
index 7751b0b3f81d..e19f396aa370 100644
--- a/fs/afs/xattr.c
+++ b/fs/afs/xattr.c
@@ -75,7 +75,7 @@ static bool afs_make_acl(struct afs_operation *op,
{
struct afs_acl *acl;
- acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL);
+ acl = kmalloc(struct_size(acl, data, size), GFP_KERNEL);
if (!acl) {
afs_op_nomem(op);
return false;
@@ -97,7 +97,7 @@ static const struct afs_operation_ops afs_store_acl_operation = {
* Set a file's AFS3 ACL.
*/
static int afs_xattr_set_acl(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *dentry,
struct inode *inode, const char *name,
const void *buffer, size_t size, int flags)
@@ -228,7 +228,7 @@ static const struct afs_operation_ops yfs_store_opaque_acl2_operation = {
* Set a file's YFS ACL.
*/
static int afs_xattr_set_yfs(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *dentry,
struct inode *inode, const char *name,
const void *buffer, size_t size, int flags)
@@ -353,7 +353,7 @@ static const struct xattr_handler afs_xattr_afs_volume_handler = {
.get = afs_xattr_get_volume,
};
-const struct xattr_handler *afs_xattr_handlers[] = {
+const struct xattr_handler * const afs_xattr_handlers[] = {
&afs_xattr_afs_acl_handler,
&afs_xattr_afs_cell_handler,
&afs_xattr_afs_fid_handler,
diff --git a/fs/afs/xdr_fs.h b/fs/afs/xdr_fs.h
index 8ca868164507..cc5f143d21a3 100644
--- a/fs/afs/xdr_fs.h
+++ b/fs/afs/xdr_fs.h
@@ -88,7 +88,7 @@ union afs_xdr_dir_block {
struct {
struct afs_xdr_dir_hdr hdr;
- u8 alloc_ctrs[AFS_DIR_MAX_BLOCKS];
+ u8 alloc_ctrs[AFS_DIR_BLOCKS_WITH_CTR];
__be16 hashtable[AFS_DIR_HASHTBL_SIZE];
} meta;
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index fdc7d675b4b0..febf13a49f0b 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -232,8 +232,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp,
struct afs_callback *cb = &scb->callback;
ktime_t cb_expiry;
- cb_expiry = call->reply_time;
- cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100);
+ cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100);
cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC);
scb->have_cb = true;
*_bp += xdr_size(x);
@@ -246,12 +245,15 @@ static void xdr_decode_YFSVolSync(const __be32 **_bp,
struct afs_volsync *volsync)
{
struct yfs_xdr_YFSVolSync *x = (void *)*_bp;
- u64 creation;
+ u64 creation, update;
if (volsync) {
creation = xdr_to_u64(x->vol_creation_date);
do_div(creation, 10 * 1000 * 1000);
volsync->creation = creation;
+ update = xdr_to_u64(x->vol_update_date);
+ do_div(update, 10 * 1000 * 1000);
+ volsync->update = update;
}
*_bp += xdr_size(x);
@@ -350,18 +352,19 @@ static int yfs_deliver_status_and_volsync(struct afs_call *call)
static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
{
struct afs_operation *op = call->op;
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
- struct afs_read *req = op->fetch.req;
const __be32 *bp;
+ size_t count_before;
int ret;
_enter("{%u,%zu, %zu/%llu}",
call->unmarshall, call->iov_len, iov_iter_count(call->iter),
- req->actual_len);
+ call->remaining);
switch (call->unmarshall) {
case 0:
- req->actual_len = 0;
+ call->remaining = 0;
afs_extract_to_tmp64(call);
call->unmarshall++;
fallthrough;
@@ -376,38 +379,39 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
if (ret < 0)
return ret;
- req->actual_len = be64_to_cpu(call->tmp64);
- _debug("DATA length: %llu", req->actual_len);
+ call->remaining = be64_to_cpu(call->tmp64);
+ _debug("DATA length: %llu", call->remaining);
- if (req->actual_len == 0)
+ if (call->remaining == 0)
goto no_more_data;
- call->iter = req->iter;
- call->iov_len = min(req->actual_len, req->len);
+ call->iter = &subreq->io_iter;
+ call->iov_len = min(call->remaining, subreq->len - subreq->transferred);
call->unmarshall++;
fallthrough;
/* extract the returned data */
case 2:
- _debug("extract data %zu/%llu",
- iov_iter_count(call->iter), req->actual_len);
+ count_before = call->iov_len;
+ _debug("extract data %zu/%llu", count_before, call->remaining);
ret = afs_extract_data(call, true);
+ subreq->transferred += count_before - call->iov_len;
if (ret < 0)
return ret;
call->iter = &call->def_iter;
- if (req->actual_len <= req->len)
+ if (call->remaining)
goto no_more_data;
/* Discard any excess data the server gave us */
- afs_extract_discard(call, req->actual_len - req->len);
+ afs_extract_discard(call, call->remaining);
call->unmarshall = 3;
fallthrough;
case 3:
_debug("extract discard %zu/%llu",
- iov_iter_count(call->iter), req->actual_len - req->len);
+ iov_iter_count(call->iter), call->remaining);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -432,8 +436,8 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
xdr_decode_YFSCallBack(&bp, call, &vp->scb);
xdr_decode_YFSVolSync(&bp, &op->volsync);
- req->data_version = vp->scb.status.data_version;
- req->file_size = vp->scb.status.size;
+ if (subreq->start + subreq->transferred >= vp->scb.status.size)
+ __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
call->unmarshall++;
fallthrough;
@@ -452,7 +456,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
static const struct afs_call_type yfs_RXYFSFetchData64 = {
.name = "YFS.FetchData64",
.op = yfs_FS_FetchData64,
+ .async_rx = afs_fetch_data_async_rx,
.deliver = yfs_deliver_fs_fetch_data64,
+ .immediate_cancel = afs_fetch_data_immediate_cancel,
.destructor = afs_flat_call_destructor,
};
@@ -461,14 +467,15 @@ static const struct afs_call_type yfs_RXYFSFetchData64 = {
*/
void yfs_fs_fetch_data(struct afs_operation *op)
{
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
- struct afs_read *req = op->fetch.req;
struct afs_call *call;
__be32 *bp;
- _enter(",%x,{%llx:%llu},%llx,%llx",
+ _enter(",%x,{%llx:%llu},%llx,%zx",
key_serial(op->key), vp->fid.vid, vp->fid.vnode,
- req->pos, req->len);
+ subreq->start + subreq->transferred,
+ subreq->len - subreq->transferred);
call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchData64,
sizeof(__be32) * 2 +
@@ -480,17 +487,19 @@ void yfs_fs_fetch_data(struct afs_operation *op)
if (!call)
return afs_op_nomem(op);
- req->call_debug_id = call->debug_id;
+ if (op->flags & AFS_OPERATION_ASYNC)
+ call->async = true;
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSFETCHDATA64);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
bp = xdr_encode_YFSFid(bp, &vp->fid);
- bp = xdr_encode_u64(bp, req->pos);
- bp = xdr_encode_u64(bp, req->len);
+ bp = xdr_encode_u64(bp, subreq->start + subreq->transferred);
+ bp = xdr_encode_u64(bp, subreq->len - subreq->transferred);
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -573,6 +582,7 @@ void yfs_fs_create_file(struct afs_operation *op)
bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */
yfs_check_req(call, bp);
+ call->fid = dvp->fid;
trace_afs_make_fs_call1(call, &dvp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -621,6 +631,7 @@ void yfs_fs_make_dir(struct afs_operation *op)
bp = xdr_encode_YFSStoreStatus(bp, &op->create.mode, &op->mtime);
yfs_check_req(call, bp);
+ call->fid = dvp->fid;
trace_afs_make_fs_call1(call, &dvp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -656,8 +667,9 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call)
static void yfs_done_fs_remove_file2(struct afs_call *call)
{
if (call->error == -ECONNABORTED &&
- call->abort_code == RX_INVALID_OPERATION) {
- set_bit(AFS_SERVER_FL_NO_RM2, &call->server->flags);
+ (call->abort_code == RX_INVALID_OPERATION ||
+ call->abort_code == RXGEN_OPCODE)) {
+ set_bit(AFS_SERVER_FL_NO_RM2, &call->op->server->flags);
call->op->flags |= AFS_OPERATION_DOWNGRADE;
}
}
@@ -705,6 +717,7 @@ void yfs_fs_remove_file2(struct afs_operation *op)
bp = xdr_encode_name(bp, name);
yfs_check_req(call, bp);
+ call->fid = dvp->fid;
trace_afs_make_fs_call1(call, &dvp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -774,6 +787,7 @@ void yfs_fs_remove_file(struct afs_operation *op)
bp = xdr_encode_name(bp, name);
yfs_check_req(call, bp);
+ call->fid = dvp->fid;
trace_afs_make_fs_call1(call, &dvp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -815,6 +829,7 @@ void yfs_fs_remove_dir(struct afs_operation *op)
bp = xdr_encode_name(bp, name);
yfs_check_req(call, bp);
+ call->fid = dvp->fid;
trace_afs_make_fs_call1(call, &dvp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -888,6 +903,7 @@ void yfs_fs_link(struct afs_operation *op)
bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_call1(call, &vp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -969,6 +985,7 @@ void yfs_fs_symlink(struct afs_operation *op)
bp = xdr_encode_YFSStoreStatus(bp, &mode, &op->mtime);
yfs_check_req(call, bp);
+ call->fid = dvp->fid;
trace_afs_make_fs_call1(call, &dvp->fid, name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1025,6 +1042,9 @@ void yfs_fs_rename(struct afs_operation *op)
_enter("");
+ if (!test_bit(AFS_SERVER_FL_NO_RENAME2, &op->server->flags))
+ return yfs_fs_rename_replace(op);
+
call = afs_alloc_flat_call(op->net, &yfs_RXYFSRename,
sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
@@ -1048,6 +1068,253 @@ void yfs_fs_rename(struct afs_operation *op)
bp = xdr_encode_name(bp, new_name);
yfs_check_req(call, bp);
+ call->fid = orig_dvp->fid;
+ trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name);
+ afs_make_op_call(op, call, GFP_NOFS);
+}
+
+/*
+ * Deliver reply data to a YFS.Rename_NoReplace operation. This does not
+ * return the status of a displaced target inode as there cannot be one.
+ */
+static int yfs_deliver_fs_rename_1(struct afs_call *call)
+{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ struct afs_vnode_param *old_vp = &op->more_files[0];
+ const __be32 *bp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ bp = call->buffer;
+ /* If the two dirs are the same, we have two copies of the same status
+ * report, so we just decode it twice.
+ */
+ xdr_decode_YFSFetchStatus(&bp, call, &orig_dvp->scb);
+ xdr_decode_YFSFid(&bp, &old_vp->fid);
+ xdr_decode_YFSFetchStatus(&bp, call, &old_vp->scb);
+ xdr_decode_YFSFetchStatus(&bp, call, &new_dvp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * Deliver reply data to a YFS.Rename_Replace or a YFS.Rename_Exchange
+ * operation. These return the status of the displaced target inode if there
+ * was one.
+ */
+static int yfs_deliver_fs_rename_2(struct afs_call *call)
+{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ struct afs_vnode_param *old_vp = &op->more_files[0];
+ struct afs_vnode_param *new_vp = &op->more_files[1];
+ const __be32 *bp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ bp = call->buffer;
+ /* If the two dirs are the same, we have two copies of the same status
+ * report, so we just decode it twice.
+ */
+ xdr_decode_YFSFetchStatus(&bp, call, &orig_dvp->scb);
+ xdr_decode_YFSFid(&bp, &old_vp->fid);
+ xdr_decode_YFSFetchStatus(&bp, call, &old_vp->scb);
+ xdr_decode_YFSFetchStatus(&bp, call, &new_dvp->scb);
+ xdr_decode_YFSFid(&bp, &new_vp->fid);
+ xdr_decode_YFSFetchStatus(&bp, call, &new_vp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+static void yfs_done_fs_rename_replace(struct afs_call *call)
+{
+ if (call->error == -ECONNABORTED &&
+ (call->abort_code == RX_INVALID_OPERATION ||
+ call->abort_code == RXGEN_OPCODE)) {
+ set_bit(AFS_SERVER_FL_NO_RENAME2, &call->op->server->flags);
+ call->op->flags |= AFS_OPERATION_DOWNGRADE;
+ }
+}
+
+/*
+ * YFS.Rename_Replace operation type
+ */
+static const struct afs_call_type yfs_RXYFSRename_Replace = {
+ .name = "FS.Rename_Replace",
+ .op = yfs_FS_Rename_Replace,
+ .deliver = yfs_deliver_fs_rename_2,
+ .done = yfs_done_fs_rename_replace,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * YFS.Rename_NoReplace operation type
+ */
+static const struct afs_call_type yfs_RXYFSRename_NoReplace = {
+ .name = "FS.Rename_NoReplace",
+ .op = yfs_FS_Rename_NoReplace,
+ .deliver = yfs_deliver_fs_rename_1,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * YFS.Rename_Exchange operation type
+ */
+static const struct afs_call_type yfs_RXYFSRename_Exchange = {
+ .name = "FS.Rename_Exchange",
+ .op = yfs_FS_Rename_Exchange,
+ .deliver = yfs_deliver_fs_rename_2,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Rename a file or directory, replacing the target if it exists. The status
+ * of a displaced target is returned.
+ */
+void yfs_fs_rename_replace(struct afs_operation *op)
+{
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ const struct qstr *orig_name = &op->dentry->d_name;
+ const struct qstr *new_name = &op->dentry_2->d_name;
+ struct afs_call *call;
+ __be32 *bp;
+
+ _enter("");
+
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRename_Replace,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(orig_name->len) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(new_name->len),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return afs_op_nomem(op);
+
+ /* Marshall the parameters. */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSRENAME_REPLACE);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &orig_dvp->fid);
+ bp = xdr_encode_name(bp, orig_name);
+ bp = xdr_encode_YFSFid(bp, &new_dvp->fid);
+ bp = xdr_encode_name(bp, new_name);
+ yfs_check_req(call, bp);
+
+ call->fid = orig_dvp->fid;
+ trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name);
+ afs_make_op_call(op, call, GFP_NOFS);
+}
+
+/*
+ * Rename a file or directory, failing if the target dirent exists.
+ */
+void yfs_fs_rename_noreplace(struct afs_operation *op)
+{
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ const struct qstr *orig_name = &op->dentry->d_name;
+ const struct qstr *new_name = &op->dentry_2->d_name;
+ struct afs_call *call;
+ __be32 *bp;
+
+ _enter("");
+
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRename_NoReplace,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(orig_name->len) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(new_name->len),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return afs_op_nomem(op);
+
+ /* Marshall the parameters. */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSRENAME_NOREPLACE);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &orig_dvp->fid);
+ bp = xdr_encode_name(bp, orig_name);
+ bp = xdr_encode_YFSFid(bp, &new_dvp->fid);
+ bp = xdr_encode_name(bp, new_name);
+ yfs_check_req(call, bp);
+
+ call->fid = orig_dvp->fid;
+ trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name);
+ afs_make_op_call(op, call, GFP_NOFS);
+}
+
+/*
+ * Exchange a pair of files directories.
+ */
+void yfs_fs_rename_exchange(struct afs_operation *op)
+{
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ const struct qstr *orig_name = &op->dentry->d_name;
+ const struct qstr *new_name = &op->dentry_2->d_name;
+ struct afs_call *call;
+ __be32 *bp;
+
+ _enter("");
+
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRename_Exchange,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(orig_name->len) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(new_name->len),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return afs_op_nomem(op);
+
+ /* Marshall the parameters. */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSRENAME_EXCHANGE);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &orig_dvp->fid);
+ bp = xdr_encode_name(bp, orig_name);
+ bp = xdr_encode_YFSFid(bp, &new_dvp->fid);
+ bp = xdr_encode_name(bp, new_name);
+ yfs_check_req(call, bp);
+
+ call->fid = orig_dvp->fid;
trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1103,6 +1370,7 @@ void yfs_fs_store_data(struct afs_operation *op)
bp = xdr_encode_u64(bp, op->store.i_size);
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1159,6 +1427,7 @@ static void yfs_fs_setattr_size(struct afs_operation *op)
bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1197,6 +1466,7 @@ void yfs_fs_setattr(struct afs_operation *op)
bp = xdr_encode_YFS_StoreStatus(bp, attr);
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1367,6 +1637,7 @@ void yfs_fs_get_volume_status(struct afs_operation *op)
bp = xdr_encode_u64(bp, vp->fid.vid);
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1431,6 +1702,7 @@ void yfs_fs_set_lock(struct afs_operation *op)
bp = xdr_encode_u32(bp, op->lock.type);
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_calli(call, &vp->fid, op->lock.type);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1461,6 +1733,7 @@ void yfs_fs_extend_lock(struct afs_operation *op)
bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1491,6 +1764,7 @@ void yfs_fs_release_lock(struct afs_operation *op)
bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1557,6 +1831,7 @@ void yfs_fs_fetch_status(struct afs_operation *op)
bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1737,6 +2012,7 @@ void yfs_fs_inline_bulk_status(struct afs_operation *op)
bp = xdr_encode_YFSFid(bp, &op->more_files[i].fid);
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_NOFS);
}
@@ -1899,6 +2175,7 @@ void yfs_fs_fetch_opaque_acl(struct afs_operation *op)
bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_KERNEL);
}
@@ -1949,6 +2226,7 @@ void yfs_fs_store_opaque_acl2(struct afs_operation *op)
bp += size / sizeof(__be32);
yfs_check_req(call, bp);
+ call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
afs_make_op_call(op, call, GFP_KERNEL);
}
diff --git a/fs/aio.c b/fs/aio.c
index 9c81cf611d65..0a23a8c0717f 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -80,7 +80,7 @@ struct aio_ring {
struct kioctx_table {
struct rcu_head rcu;
unsigned nr;
- struct kioctx __rcu *table[];
+ struct kioctx __rcu *table[] __counted_by(nr);
};
struct kioctx_cpu {
@@ -100,7 +100,7 @@ struct kioctx {
unsigned long user_id;
- struct __percpu kioctx_cpu *cpu;
+ struct kioctx_cpu __percpu *cpu;
/*
* For percpu reqs_available, number of slots we move to/from global
@@ -122,7 +122,7 @@ struct kioctx {
unsigned long mmap_base;
unsigned long mmap_size;
- struct page **ring_pages;
+ struct folio **ring_folios;
long nr_pages;
struct rcu_work free_rwork; /* see free_ioctx() */
@@ -160,7 +160,7 @@ struct kioctx {
spinlock_t completion_lock;
} ____cacheline_aligned_in_smp;
- struct page *internal_pages[AIO_RING_PAGES];
+ struct folio *internal_folios[AIO_RING_PAGES];
struct file *aio_ring_file;
unsigned id;
@@ -181,8 +181,9 @@ struct poll_iocb {
struct file *file;
struct wait_queue_head *head;
__poll_t events;
- bool done;
bool cancelled;
+ bool work_scheduled;
+ bool work_need_resched;
struct wait_queue_entry wait;
struct work_struct work;
};
@@ -219,9 +220,34 @@ struct aio_kiocb {
/*------ sysctl variables----*/
static DEFINE_SPINLOCK(aio_nr_lock);
-unsigned long aio_nr; /* current system wide number of aio requests */
-unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
+static unsigned long aio_nr; /* current system wide number of aio requests */
+static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
/*----end sysctl variables---*/
+#ifdef CONFIG_SYSCTL
+static const struct ctl_table aio_sysctls[] = {
+ {
+ .procname = "aio-nr",
+ .data = &aio_nr,
+ .maxlen = sizeof(aio_nr),
+ .mode = 0444,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {
+ .procname = "aio-max-nr",
+ .data = &aio_max_nr,
+ .maxlen = sizeof(aio_max_nr),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+};
+
+static void __init aio_sysctl_init(void)
+{
+ register_sysctl_init("fs", aio_sysctls);
+}
+#else
+#define aio_sysctl_init() do { } while (0)
+#endif
static struct kmem_cache *kiocb_cachep;
static struct kmem_cache *kioctx_cachep;
@@ -239,7 +265,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
return ERR_CAST(inode);
inode->i_mapping->a_ops = &aio_ctx_aops;
- inode->i_mapping->private_data = ctx;
+ inode->i_mapping->i_private_data = ctx;
inode->i_size = PAGE_SIZE * nr_pages;
file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
@@ -274,6 +300,7 @@ static int __init aio_setup(void)
kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ aio_sysctl_init();
return 0;
}
__initcall(aio_setup);
@@ -288,10 +315,10 @@ static void put_aio_ring_file(struct kioctx *ctx)
/* Prevent further access to the kioctx from migratepages */
i_mapping = aio_ring_file->f_mapping;
- spin_lock(&i_mapping->private_lock);
- i_mapping->private_data = NULL;
+ spin_lock(&i_mapping->i_private_lock);
+ i_mapping->i_private_data = NULL;
ctx->aio_ring_file = NULL;
- spin_unlock(&i_mapping->private_lock);
+ spin_unlock(&i_mapping->i_private_lock);
fput(aio_ring_file);
}
@@ -307,19 +334,20 @@ static void aio_free_ring(struct kioctx *ctx)
put_aio_ring_file(ctx);
for (i = 0; i < ctx->nr_pages; i++) {
- struct page *page;
- pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
- page_count(ctx->ring_pages[i]));
- page = ctx->ring_pages[i];
- if (!page)
+ struct folio *folio = ctx->ring_folios[i];
+
+ if (!folio)
continue;
- ctx->ring_pages[i] = NULL;
- put_page(page);
+
+ pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i,
+ folio_ref_count(folio));
+ ctx->ring_folios[i] = NULL;
+ folio_put(folio);
}
- if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
- kfree(ctx->ring_pages);
- ctx->ring_pages = NULL;
+ if (ctx->ring_folios && ctx->ring_folios != ctx->internal_folios) {
+ kfree(ctx->ring_folios);
+ ctx->ring_folios = NULL;
}
}
@@ -333,6 +361,9 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
spin_lock(&mm->ioctx_lock);
rcu_read_lock();
table = rcu_dereference(mm->ioctx_table);
+ if (!table)
+ goto out_unlock;
+
for (i = 0; i < table->nr; i++) {
struct kioctx *ctx;
@@ -346,6 +377,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
}
}
+out_unlock:
rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
return res;
@@ -360,39 +392,29 @@ static const struct vm_operations_struct aio_ring_vm_ops = {
#endif
};
-static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
+static int aio_ring_mmap_prepare(struct vm_area_desc *desc)
{
- vma->vm_flags |= VM_DONTEXPAND;
- vma->vm_ops = &aio_ring_vm_ops;
+ desc->vm_flags |= VM_DONTEXPAND;
+ desc->vm_ops = &aio_ring_vm_ops;
return 0;
}
static const struct file_operations aio_ring_fops = {
- .mmap = aio_ring_mmap,
+ .mmap_prepare = aio_ring_mmap_prepare,
};
#if IS_ENABLED(CONFIG_MIGRATION)
-static int aio_migratepage(struct address_space *mapping, struct page *new,
- struct page *old, enum migrate_mode mode)
+static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode)
{
struct kioctx *ctx;
unsigned long flags;
pgoff_t idx;
- int rc;
+ int rc = 0;
- /*
- * We cannot support the _NO_COPY case here, because copy needs to
- * happen under the ctx->completion_lock. That does not work with the
- * migration workflow of MIGRATE_SYNC_NO_COPY.
- */
- if (mode == MIGRATE_SYNC_NO_COPY)
- return -EINVAL;
-
- rc = 0;
-
- /* mapping->private_lock here protects against the kioctx teardown. */
- spin_lock(&mapping->private_lock);
- ctx = mapping->private_data;
+ /* mapping->i_private_lock here protects against the kioctx teardown. */
+ spin_lock(&mapping->i_private_lock);
+ ctx = mapping->i_private_data;
if (!ctx) {
rc = -EINVAL;
goto out;
@@ -407,10 +429,10 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
goto out;
}
- idx = old->index;
+ idx = src->index;
if (idx < (pgoff_t)ctx->nr_pages) {
- /* Make sure the old page hasn't already been changed */
- if (ctx->ring_pages[idx] != old)
+ /* Make sure the old folio hasn't already been changed */
+ if (ctx->ring_folios[idx] != src)
rc = -EAGAIN;
} else
rc = -EINVAL;
@@ -419,41 +441,42 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
goto out_unlock;
/* Writeback must be complete */
- BUG_ON(PageWriteback(old));
- get_page(new);
+ BUG_ON(folio_test_writeback(src));
+ folio_get(dst);
- rc = migrate_page_move_mapping(mapping, new, old, 1);
- if (rc != MIGRATEPAGE_SUCCESS) {
- put_page(new);
+ rc = folio_migrate_mapping(mapping, dst, src, 1);
+ if (rc) {
+ folio_put(dst);
goto out_unlock;
}
/* Take completion_lock to prevent other writes to the ring buffer
- * while the old page is copied to the new. This prevents new
+ * while the old folio is copied to the new. This prevents new
* events from being lost.
*/
spin_lock_irqsave(&ctx->completion_lock, flags);
- migrate_page_copy(new, old);
- BUG_ON(ctx->ring_pages[idx] != old);
- ctx->ring_pages[idx] = new;
+ folio_copy(dst, src);
+ folio_migrate_flags(dst, src);
+ BUG_ON(ctx->ring_folios[idx] != src);
+ ctx->ring_folios[idx] = dst;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
- /* The old page is no longer accessible. */
- put_page(old);
+ /* The old folio is no longer accessible. */
+ folio_put(src);
out_unlock:
mutex_unlock(&ctx->ring_lock);
out:
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return rc;
}
+#else
+#define aio_migrate_folio NULL
#endif
static const struct address_space_operations aio_ctx_aops = {
- .set_page_dirty = __set_page_dirty_no_writeback,
-#if IS_ENABLED(CONFIG_MIGRATION)
- .migratepage = aio_migratepage,
-#endif
+ .dirty_folio = noop_dirty_folio,
+ .migrate_folio = aio_migrate_folio,
};
static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
@@ -485,28 +508,30 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
/ sizeof(struct io_event);
- ctx->ring_pages = ctx->internal_pages;
+ ctx->ring_folios = ctx->internal_folios;
if (nr_pages > AIO_RING_PAGES) {
- ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
- GFP_KERNEL);
- if (!ctx->ring_pages) {
+ ctx->ring_folios = kcalloc(nr_pages, sizeof(struct folio *),
+ GFP_KERNEL);
+ if (!ctx->ring_folios) {
put_aio_ring_file(ctx);
return -ENOMEM;
}
}
for (i = 0; i < nr_pages; i++) {
- struct page *page;
- page = find_or_create_page(file->f_mapping,
- i, GFP_HIGHUSER | __GFP_ZERO);
- if (!page)
+ struct folio *folio;
+
+ folio = __filemap_get_folio(file->f_mapping, i,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ GFP_USER | __GFP_ZERO);
+ if (IS_ERR(folio))
break;
- pr_debug("pid(%d) page[%d]->count=%d\n",
- current->pid, i, page_count(page));
- SetPageUptodate(page);
- unlock_page(page);
- ctx->ring_pages[i] = page;
+ pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i,
+ folio_ref_count(folio));
+ folio_end_read(folio, true);
+
+ ctx->ring_folios[i] = folio;
}
ctx->nr_pages = i;
@@ -526,7 +551,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
PROT_READ | PROT_WRITE,
- MAP_SHARED, 0, &unused, NULL);
+ MAP_SHARED, 0, 0, &unused, NULL);
mmap_write_unlock(mm);
if (IS_ERR((void *)ctx->mmap_base)) {
ctx->mmap_size = 0;
@@ -539,7 +564,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ctx->user_id = ctx->mmap_base;
ctx->nr_events = nr_events; /* trusted copy */
- ring = kmap_atomic(ctx->ring_pages[0]);
+ ring = folio_address(ctx->ring_folios[0]);
ring->nr = nr_events; /* user copy */
ring->id = ~0U;
ring->head = ring->tail = 0;
@@ -547,8 +572,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ring->compat_features = AIO_RING_COMPAT_FEATURES;
ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
ring->header_length = sizeof(struct aio_ring);
- kunmap_atomic(ring);
- flush_dcache_page(ctx->ring_pages[0]);
+ flush_dcache_folio(ctx->ring_folios[0]);
return 0;
}
@@ -559,13 +583,24 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
{
- struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
- struct kioctx *ctx = req->ki_ctx;
+ struct aio_kiocb *req;
+ struct kioctx *ctx;
unsigned long flags;
+ /*
+ * kiocb didn't come from aio or is neither a read nor a write, hence
+ * ignore it.
+ */
+ if (!(iocb->ki_flags & IOCB_AIO_RW))
+ return;
+
+ req = container_of(iocb, struct aio_kiocb, rw);
+
if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
return;
+ ctx = req->ki_ctx;
+
spin_lock_irqsave(&ctx->ctx_lock, flags);
list_add_tail(&req->ki_list, &ctx->active_reqs);
req->ki_cancel = cancel;
@@ -601,7 +636,7 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
/* Synchronize against RCU protected table->table[] dereferences */
INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
- queue_rcu_work(system_wq, &ctx->free_rwork);
+ queue_rcu_work(system_percpu_wq, &ctx->free_rwork);
}
/*
@@ -648,11 +683,10 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
/* While kioctx setup is in progress,
* we are protected from page migration
- * changes ring_pages by ->ring_lock.
+ * changes ring_folios by ->ring_lock.
*/
- ring = kmap_atomic(ctx->ring_pages[0]);
+ ring = folio_address(ctx->ring_folios[0]);
ring->id = ctx->id;
- kunmap_atomic(ring);
return 0;
}
@@ -923,16 +957,13 @@ static bool __get_reqs_available(struct kioctx *ctx)
local_irq_save(flags);
kcpu = this_cpu_ptr(ctx->cpu);
if (!kcpu->reqs_available) {
- int old, avail = atomic_read(&ctx->reqs_available);
+ int avail = atomic_read(&ctx->reqs_available);
do {
if (avail < ctx->req_batch)
goto out;
-
- old = avail;
- avail = atomic_cmpxchg(&ctx->reqs_available,
- avail, avail - ctx->req_batch);
- } while (avail != old);
+ } while (!atomic_try_cmpxchg(&ctx->reqs_available,
+ &avail, avail - ctx->req_batch));
kcpu->reqs_available += ctx->req_batch;
}
@@ -996,9 +1027,8 @@ static void user_refill_reqs_available(struct kioctx *ctx)
* against ctx->completed_events below will make sure we do the
* safe/right thing.
*/
- ring = kmap_atomic(ctx->ring_pages[0]);
+ ring = folio_address(ctx->ring_folios[0]);
head = ring->head;
- kunmap_atomic(ring);
refill_reqs_available(ctx, head, ctx->tail);
}
@@ -1080,6 +1110,11 @@ static inline void iocb_destroy(struct aio_kiocb *iocb)
kmem_cache_free(kiocb_cachep, iocb);
}
+struct aio_waiter {
+ struct wait_queue_entry w;
+ size_t min_nr;
+};
+
/* aio_complete
* Called when the io request on the given iocb is complete.
*/
@@ -1088,7 +1123,7 @@ static void aio_complete(struct aio_kiocb *iocb)
struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring *ring;
struct io_event *ev_page, *event;
- unsigned tail, pos, head;
+ unsigned tail, pos, head, avail;
unsigned long flags;
/*
@@ -1104,13 +1139,12 @@ static void aio_complete(struct aio_kiocb *iocb)
if (++tail >= ctx->nr_events)
tail = 0;
- ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
+ ev_page = folio_address(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
event = ev_page + pos % AIO_EVENTS_PER_PAGE;
*event = iocb->ki_res;
- kunmap_atomic(ev_page);
- flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
+ flush_dcache_folio(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
(void __user *)(unsigned long)iocb->ki_res.obj,
@@ -1123,15 +1157,18 @@ static void aio_complete(struct aio_kiocb *iocb)
ctx->tail = tail;
- ring = kmap_atomic(ctx->ring_pages[0]);
+ ring = folio_address(ctx->ring_folios[0]);
head = ring->head;
ring->tail = tail;
- kunmap_atomic(ring);
- flush_dcache_page(ctx->ring_pages[0]);
+ flush_dcache_folio(ctx->ring_folios[0]);
ctx->completed_events++;
if (ctx->completed_events > 1)
refill_reqs_available(ctx, head, tail);
+
+ avail = tail > head
+ ? tail - head
+ : tail + ctx->nr_events - head;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
pr_debug("added to ring %p at [%u]\n", iocb, tail);
@@ -1142,7 +1179,7 @@ static void aio_complete(struct aio_kiocb *iocb)
* from IRQ context.
*/
if (iocb->ki_eventfd)
- eventfd_signal(iocb->ki_eventfd, 1);
+ eventfd_signal(iocb->ki_eventfd);
/*
* We have to order our ring_info tail store above and test
@@ -1152,8 +1189,18 @@ static void aio_complete(struct aio_kiocb *iocb)
*/
smp_mb();
- if (waitqueue_active(&ctx->wait))
- wake_up(&ctx->wait);
+ if (waitqueue_active(&ctx->wait)) {
+ struct aio_waiter *curr, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->wait.lock, flags);
+ list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry)
+ if (avail >= curr->min_nr) {
+ wake_up_process(curr->w.private);
+ list_del_init_careful(&curr->w.entry);
+ }
+ spin_unlock_irqrestore(&ctx->wait.lock, flags);
+ }
}
static inline void iocb_put(struct aio_kiocb *iocb)
@@ -1185,11 +1232,10 @@ static long aio_read_events_ring(struct kioctx *ctx,
sched_annotate_sleep();
mutex_lock(&ctx->ring_lock);
- /* Access to ->ring_pages here is protected by ctx->ring_lock. */
- ring = kmap_atomic(ctx->ring_pages[0]);
+ /* Access to ->ring_folios here is protected by ctx->ring_lock. */
+ ring = folio_address(ctx->ring_folios[0]);
head = ring->head;
tail = ring->tail;
- kunmap_atomic(ring);
/*
* Ensure that once we've read the current tail pointer, that
@@ -1208,23 +1254,22 @@ static long aio_read_events_ring(struct kioctx *ctx,
while (ret < nr) {
long avail;
struct io_event *ev;
- struct page *page;
+ struct folio *folio;
avail = (head <= tail ? tail : ctx->nr_events) - head;
if (head == tail)
break;
pos = head + AIO_EVENTS_OFFSET;
- page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
+ folio = ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE];
pos %= AIO_EVENTS_PER_PAGE;
avail = min(avail, nr - ret);
avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
- ev = kmap(page);
+ ev = folio_address(folio);
copy_ret = copy_to_user(event + ret, ev + pos,
sizeof(*ev) * avail);
- kunmap(page);
if (unlikely(copy_ret)) {
ret = -EFAULT;
@@ -1236,10 +1281,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
head %= ctx->nr_events;
}
- ring = kmap_atomic(ctx->ring_pages[0]);
+ ring = folio_address(ctx->ring_folios[0]);
ring->head = head;
- kunmap_atomic(ring);
- flush_dcache_page(ctx->ring_pages[0]);
+ flush_dcache_folio(ctx->ring_folios[0]);
pr_debug("%li h%u t%u\n", ret, head, tail);
out:
@@ -1269,7 +1313,9 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
struct io_event __user *event,
ktime_t until)
{
- long ret = 0;
+ struct hrtimer_sleeper t;
+ struct aio_waiter w;
+ long ret = 0, ret2 = 0;
/*
* Note that aio_read_events() is being called as the conditional - i.e.
@@ -1285,12 +1331,38 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
* the ringbuffer empty. So in practice we should be ok, but it's
* something to be aware of when touching this code.
*/
- if (until == 0)
- aio_read_events(ctx, min_nr, nr, event, &ret);
- else
- wait_event_interruptible_hrtimeout(ctx->wait,
- aio_read_events(ctx, min_nr, nr, event, &ret),
- until);
+ aio_read_events(ctx, min_nr, nr, event, &ret);
+ if (until == 0 || ret < 0 || ret >= min_nr)
+ return ret;
+
+ hrtimer_setup_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ if (until != KTIME_MAX) {
+ hrtimer_set_expires_range_ns(&t.timer, until, current->timer_slack_ns);
+ hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
+ }
+
+ init_wait(&w.w);
+
+ while (1) {
+ unsigned long nr_got = ret;
+
+ w.min_nr = min_nr - ret;
+
+ ret2 = prepare_to_wait_event(&ctx->wait, &w.w, TASK_INTERRUPTIBLE);
+ if (!ret2 && !t.task)
+ ret2 = -ETIME;
+
+ if (aio_read_events(ctx, min_nr, nr, event, &ret) || ret2)
+ break;
+
+ if (nr_got == ret)
+ schedule();
+ }
+
+ finish_wait(&ctx->wait, &w.w);
+ hrtimer_cancel(&t.timer);
+ destroy_hrtimer_on_stack(&t.timer);
+
return ret;
}
@@ -1426,13 +1498,8 @@ static void aio_complete_rw(struct kiocb *kiocb, long res)
if (kiocb->ki_flags & IOCB_WRITE) {
struct inode *inode = file_inode(kiocb->ki_filp);
- /*
- * Tell lockdep we inherited freeze protection from submission
- * thread.
- */
if (S_ISREG(inode->i_mode))
- __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
- file_end_write(kiocb->ki_filp);
+ kiocb_end_write(kiocb);
}
iocb->ki_res.res = res;
@@ -1440,17 +1507,17 @@ static void aio_complete_rw(struct kiocb *kiocb, long res)
iocb_put(iocb);
}
-static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
+static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb, int rw_type)
{
int ret;
+ req->ki_write_stream = 0;
req->ki_complete = aio_complete_rw;
req->private = NULL;
req->ki_pos = iocb->aio_offset;
- req->ki_flags = iocb_flags(req->ki_filp);
+ req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW;
if (iocb->aio_flags & IOCB_FLAG_RESFD)
req->ki_flags |= IOCB_EVENTFD;
- req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp));
if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
/*
* If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
@@ -1467,7 +1534,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
} else
req->ki_ioprio = get_current_ioprio();
- ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
+ ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags, rw_type);
if (unlikely(ret))
return ret;
@@ -1483,7 +1550,7 @@ static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
size_t len = iocb->aio_nbytes;
if (!vectored) {
- ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
+ ssize_t ret = import_ubuf(rw, buf, len, iter);
*iovec = NULL;
return ret;
}
@@ -1519,22 +1586,21 @@ static int aio_read(struct kiocb *req, const struct iocb *iocb,
struct file *file;
int ret;
- ret = aio_prep_rw(req, iocb);
+ ret = aio_prep_rw(req, iocb, READ);
if (ret)
return ret;
file = req->ki_filp;
if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF;
- ret = -EINVAL;
if (unlikely(!file->f_op->read_iter))
return -EINVAL;
- ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
+ ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter);
if (ret < 0)
return ret;
ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
if (!ret)
- aio_rw_done(req, call_read_iter(file, req, &iter));
+ aio_rw_done(req, file->f_op->read_iter(req, &iter));
kfree(iovec);
return ret;
}
@@ -1547,7 +1613,7 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb,
struct file *file;
int ret;
- ret = aio_prep_rw(req, iocb);
+ ret = aio_prep_rw(req, iocb, WRITE);
if (ret)
return ret;
file = req->ki_filp;
@@ -1557,24 +1623,15 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb,
if (unlikely(!file->f_op->write_iter))
return -EINVAL;
- ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
+ ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter);
if (ret < 0)
return ret;
ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
if (!ret) {
- /*
- * Open-code file_start_write here to grab freeze protection,
- * which will be released by another thread in
- * aio_complete_rw(). Fool lockdep by telling it the lock got
- * released so that it doesn't complain about the held lock when
- * we return to userspace.
- */
- if (S_ISREG(file_inode(file)->i_mode)) {
- sb_start_write(file_inode(file)->i_sb);
- __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
- }
+ if (S_ISREG(file_inode(file)->i_mode))
+ kiocb_start_write(req);
req->ki_flags |= IOCB_WRITE;
- aio_rw_done(req, call_write_iter(file, req, &iter));
+ aio_rw_done(req, file->f_op->write_iter(req, &iter));
}
kfree(iovec);
return ret;
@@ -1583,10 +1640,10 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb,
static void aio_fsync_work(struct work_struct *work)
{
struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
- const struct cred *old_cred = override_creds(iocb->fsync.creds);
- iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
- revert_creds(old_cred);
+ scoped_with_creds(iocb->fsync.creds)
+ iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
+
put_cred(iocb->fsync.creds);
iocb_put(iocb);
}
@@ -1619,6 +1676,51 @@ static void aio_poll_put_work(struct work_struct *work)
iocb_put(iocb);
}
+/*
+ * Safely lock the waitqueue which the request is on, synchronizing with the
+ * case where the ->poll() provider decides to free its waitqueue early.
+ *
+ * Returns true on success, meaning that req->head->lock was locked, req->wait
+ * is on req->head, and an RCU read lock was taken. Returns false if the
+ * request was already removed from its waitqueue (which might no longer exist).
+ */
+static bool poll_iocb_lock_wq(struct poll_iocb *req)
+{
+ wait_queue_head_t *head;
+
+ /*
+ * While we hold the waitqueue lock and the waitqueue is nonempty,
+ * wake_up_pollfree() will wait for us. However, taking the waitqueue
+ * lock in the first place can race with the waitqueue being freed.
+ *
+ * We solve this as eventpoll does: by taking advantage of the fact that
+ * all users of wake_up_pollfree() will RCU-delay the actual free. If
+ * we enter rcu_read_lock() and see that the pointer to the queue is
+ * non-NULL, we can then lock it without the memory being freed out from
+ * under us, then check whether the request is still on the queue.
+ *
+ * Keep holding rcu_read_lock() as long as we hold the queue lock, in
+ * case the caller deletes the entry from the queue, leaving it empty.
+ * In that case, only RCU prevents the queue memory from being freed.
+ */
+ rcu_read_lock();
+ head = smp_load_acquire(&req->head);
+ if (head) {
+ spin_lock(&head->lock);
+ if (!list_empty(&req->wait.entry))
+ return true;
+ spin_unlock(&head->lock);
+ }
+ rcu_read_unlock();
+ return false;
+}
+
+static void poll_iocb_unlock_wq(struct poll_iocb *req)
+{
+ spin_unlock(&req->head->lock);
+ rcu_read_unlock();
+}
+
static void aio_poll_complete_work(struct work_struct *work)
{
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,14 +1740,27 @@ static void aio_poll_complete_work(struct work_struct *work)
* avoid further branches in the fast path.
*/
spin_lock_irq(&ctx->ctx_lock);
- if (!mask && !READ_ONCE(req->cancelled)) {
- add_wait_queue(req->head, &req->wait);
- spin_unlock_irq(&ctx->ctx_lock);
- return;
- }
+ if (poll_iocb_lock_wq(req)) {
+ if (!mask && !READ_ONCE(req->cancelled)) {
+ /*
+ * The request isn't actually ready to be completed yet.
+ * Reschedule completion if another wakeup came in.
+ */
+ if (req->work_need_resched) {
+ schedule_work(&req->work);
+ req->work_need_resched = false;
+ } else {
+ req->work_scheduled = false;
+ }
+ poll_iocb_unlock_wq(req);
+ spin_unlock_irq(&ctx->ctx_lock);
+ return;
+ }
+ list_del_init(&req->wait.entry);
+ poll_iocb_unlock_wq(req);
+ } /* else, POLLFREE has freed the waitqueue, so we must complete */
list_del_init(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
- req->done = true;
spin_unlock_irq(&ctx->ctx_lock);
iocb_put(iocb);
@@ -1657,13 +1772,14 @@ static int aio_poll_cancel(struct kiocb *iocb)
struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
struct poll_iocb *req = &aiocb->poll;
- spin_lock(&req->head->lock);
- WRITE_ONCE(req->cancelled, true);
- if (!list_empty(&req->wait.entry)) {
- list_del_init(&req->wait.entry);
- schedule_work(&aiocb->poll.work);
- }
- spin_unlock(&req->head->lock);
+ if (poll_iocb_lock_wq(req)) {
+ WRITE_ONCE(req->cancelled, true);
+ if (!req->work_scheduled) {
+ schedule_work(&aiocb->poll.work);
+ req->work_scheduled = true;
+ }
+ poll_iocb_unlock_wq(req);
+ } /* else, the request was force-cancelled by POLLFREE already */
return 0;
}
@@ -1680,21 +1796,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (mask && !(mask & req->events))
return 0;
- list_del_init(&req->wait.entry);
-
- if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+ /*
+ * Complete the request inline if possible. This requires that three
+ * conditions be met:
+ * 1. An event mask must have been passed. If a plain wakeup was done
+ * instead, then mask == 0 and we have to call vfs_poll() to get
+ * the events, so inline completion isn't possible.
+ * 2. The completion work must not have already been scheduled.
+ * 3. ctx_lock must not be busy. We have to use trylock because we
+ * already hold the waitqueue lock, so this inverts the normal
+ * locking order. Use irqsave/irqrestore because not all
+ * filesystems (e.g. fuse) call this function with IRQs disabled,
+ * yet IRQs have to be disabled before ctx_lock is obtained.
+ */
+ if (mask && !req->work_scheduled &&
+ spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
struct kioctx *ctx = iocb->ki_ctx;
- /*
- * Try to complete the iocb inline if we can. Use
- * irqsave/irqrestore because not all filesystems (e.g. fuse)
- * call this function with IRQs disabled and because IRQs
- * have to be disabled before ctx_lock is obtained.
- */
+ list_del_init(&req->wait.entry);
list_del(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
- req->done = true;
- if (iocb->ki_eventfd && eventfd_signal_allowed()) {
+ if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
iocb = NULL;
INIT_WORK(&req->work, aio_poll_put_work);
schedule_work(&req->work);
@@ -1703,7 +1825,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (iocb)
iocb_put(iocb);
} else {
- schedule_work(&req->work);
+ /*
+ * Schedule the completion work if needed. If it was already
+ * scheduled, record that another wakeup came in.
+ *
+ * Don't remove the request from the waitqueue here, as it might
+ * not actually be complete yet (we won't know until vfs_poll()
+ * is called), and we must not miss any wakeups. POLLFREE is an
+ * exception to this; see below.
+ */
+ if (req->work_scheduled) {
+ req->work_need_resched = true;
+ } else {
+ schedule_work(&req->work);
+ req->work_scheduled = true;
+ }
+
+ /*
+ * If the waitqueue is being freed early but we can't complete
+ * the request inline, we have to tear down the request as best
+ * we can. That means immediately removing the request from its
+ * waitqueue and preventing all further accesses to the
+ * waitqueue via the request. We also need to schedule the
+ * completion work (done above). Also mark the request as
+ * cancelled, to potentially skip an unneeded call to ->poll().
+ */
+ if (mask & POLLFREE) {
+ WRITE_ONCE(req->cancelled, true);
+ list_del_init(&req->wait.entry);
+
+ /*
+ * Careful: this *must* be the last step, since as soon
+ * as req->head is NULL'ed out, the request can be
+ * completed and freed, since aio_poll_complete_work()
+ * will no longer need to take the waitqueue lock.
+ */
+ smp_store_release(&req->head, NULL);
+ }
}
return 1;
}
@@ -1711,6 +1869,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
struct aio_poll_table {
struct poll_table_struct pt;
struct aio_kiocb *iocb;
+ bool queued;
int error;
};
@@ -1721,11 +1880,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
/* multiple wait queues per file are not supported */
- if (unlikely(pt->iocb->poll.head)) {
+ if (unlikely(pt->queued)) {
pt->error = -EINVAL;
return;
}
+ pt->queued = true;
pt->error = 0;
pt->iocb->poll.head = head;
add_wait_queue(head, &pt->iocb->poll.wait);
@@ -1750,12 +1910,14 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
req->head = NULL;
- req->done = false;
req->cancelled = false;
+ req->work_scheduled = false;
+ req->work_need_resched = false;
apt.pt._qproc = aio_poll_queue_proc;
apt.pt._key = req->events;
apt.iocb = aiocb;
+ apt.queued = false;
apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
/* initialized the list so that we can do list_empty checks */
@@ -1764,23 +1926,35 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
mask = vfs_poll(req->file, &apt.pt) & req->events;
spin_lock_irq(&ctx->ctx_lock);
- if (likely(req->head)) {
- spin_lock(&req->head->lock);
- if (unlikely(list_empty(&req->wait.entry))) {
- if (apt.error)
+ if (likely(apt.queued)) {
+ bool on_queue = poll_iocb_lock_wq(req);
+
+ if (!on_queue || req->work_scheduled) {
+ /*
+ * aio_poll_wake() already either scheduled the async
+ * completion work, or completed the request inline.
+ */
+ if (apt.error) /* unsupported case: multiple queues */
cancel = true;
apt.error = 0;
mask = 0;
}
if (mask || apt.error) {
+ /* Steal to complete synchronously. */
list_del_init(&req->wait.entry);
} else if (cancel) {
+ /* Cancel if possible (may be too late though). */
WRITE_ONCE(req->cancelled, true);
- } else if (!req->done) { /* actually waiting for an event */
+ } else if (on_queue) {
+ /*
+ * Actually waiting for an event, so add the request to
+ * active_reqs so that it can be cancelled if needed.
+ */
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
aiocb->ki_cancel = aio_poll_cancel;
}
- spin_unlock(&req->head->lock);
+ if (on_queue)
+ poll_iocb_unlock_wq(req);
}
if (mask) { /* no async, we'd stolen it */
aiocb->ki_res.res = mangle_poll(mask);
@@ -2018,7 +2192,6 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
return -EINVAL;
spin_lock_irq(&ctx->ctx_lock);
- /* TODO: use a hash or array, this sucks. */
list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
if (kiocb->ki_res.obj == obj) {
ret = kiocb->ki_cancel(&kiocb->rw);
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index e0c3e33c4177..b8381c7fb636 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -24,15 +24,56 @@
#include <linux/uaccess.h>
-static struct vfsmount *anon_inode_mnt __read_mostly;
-static struct inode *anon_inode_inode;
+#include "internal.h"
+
+static struct vfsmount *anon_inode_mnt __ro_after_init;
+static struct inode *anon_inode_inode __ro_after_init;
+
+/*
+ * User space expects anonymous inodes to have no file type in st_mode.
+ *
+ * In particular, 'lsof' has this legacy logic:
+ *
+ * type = s->st_mode & S_IFMT;
+ * switch (type) {
+ * ...
+ * case 0:
+ * if (!strcmp(p, "anon_inode"))
+ * Lf->ntype = Ntype = N_ANON_INODE;
+ *
+ * to detect our old anon_inode logic.
+ *
+ * Rather than mess with our internal sane inode data, just fix it
+ * up here in getattr() by masking off the format bits.
+ */
+int anon_inode_getattr(struct mnt_idmap *idmap, const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags)
+{
+ struct inode *inode = d_inode(path->dentry);
+
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
+ stat->mode &= ~S_IFMT;
+ return 0;
+}
+
+int anon_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct inode_operations anon_inode_operations = {
+ .getattr = anon_inode_getattr,
+ .setattr = anon_inode_setattr,
+};
/*
* anon_inodefs_dname() is called from d_path().
*/
static char *anon_inodefs_dname(struct dentry *dentry, char *buffer, int buflen)
{
- return dynamic_dname(dentry, buffer, buflen, "anon_inode:%s",
+ return dynamic_dname(buffer, buflen, "anon_inode:%s",
dentry->d_name.name);
}
@@ -45,6 +86,8 @@ static int anon_inodefs_init_fs_context(struct fs_context *fc)
struct pseudo_fs_context *ctx = init_pseudo(fc, ANON_INODE_FS_MAGIC);
if (!ctx)
return -ENOMEM;
+ fc->s_iflags |= SB_I_NOEXEC;
+ fc->s_iflags |= SB_I_NODEV;
ctx->dops = &anon_inodefs_dentry_operations;
return 0;
}
@@ -55,31 +98,44 @@ static struct file_system_type anon_inode_fs_type = {
.kill_sb = kill_anon_super,
};
-static struct inode *anon_inode_make_secure_inode(
- const char *name,
- const struct inode *context_inode)
+/**
+ * anon_inode_make_secure_inode - allocate an anonymous inode with security context
+ * @sb: [in] Superblock to allocate from
+ * @name: [in] Name of the class of the newfile (e.g., "secretmem")
+ * @context_inode:
+ * [in] Optional parent inode for security inheritance
+ *
+ * The function ensures proper security initialization through the LSM hook
+ * security_inode_init_security_anon().
+ *
+ * Return: Pointer to new inode on success, ERR_PTR on failure.
+ */
+struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
+ const struct inode *context_inode)
{
struct inode *inode;
- const struct qstr qname = QSTR_INIT(name, strlen(name));
int error;
- inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
+ inode = alloc_anon_inode(sb);
if (IS_ERR(inode))
return inode;
inode->i_flags &= ~S_PRIVATE;
- error = security_inode_init_security_anon(inode, &qname, context_inode);
+ inode->i_op = &anon_inode_operations;
+ error = security_inode_init_security_anon(inode, &QSTR(name),
+ context_inode);
if (error) {
iput(inode);
return ERR_PTR(error);
}
return inode;
}
+EXPORT_SYMBOL_FOR_MODULES(anon_inode_make_secure_inode, "kvm");
static struct file *__anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags,
const struct inode *context_inode,
- bool secure)
+ bool make_inode)
{
struct inode *inode;
struct file *file;
@@ -87,8 +143,9 @@ static struct file *__anon_inode_getfile(const char *name,
if (fops->owner && !try_module_get(fops->owner))
return ERR_PTR(-ENOENT);
- if (secure) {
- inode = anon_inode_make_secure_inode(name, context_inode);
+ if (make_inode) {
+ inode = anon_inode_make_secure_inode(anon_inode_mnt->mnt_sb,
+ name, context_inode);
if (IS_ERR(inode)) {
file = ERR_CAST(inode);
goto err;
@@ -149,13 +206,42 @@ struct file *anon_inode_getfile(const char *name,
EXPORT_SYMBOL_GPL(anon_inode_getfile);
/**
- * anon_inode_getfile_secure - Like anon_inode_getfile(), but creates a new
+ * anon_inode_getfile_fmode - creates a new file instance by hooking it up to an
+ * anonymous inode, and a dentry that describe the "class"
+ * of the file
+ *
+ * @name: [in] name of the "class" of the new file
+ * @fops: [in] file operations for the new file
+ * @priv: [in] private data for the new file (will be file's private_data)
+ * @flags: [in] flags
+ * @f_mode: [in] fmode
+ *
+ * Creates a new file by hooking it on a single inode. This is useful for files
+ * that do not need to have a full-fledged inode in order to operate correctly.
+ * All the files created with anon_inode_getfile() will share a single inode,
+ * hence saving memory and avoiding code duplication for the file/inode/dentry
+ * setup. Allows setting the fmode. Returns the newly created file* or an error
+ * pointer.
+ */
+struct file *anon_inode_getfile_fmode(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags, fmode_t f_mode)
+{
+ struct file *file;
+
+ file = __anon_inode_getfile(name, fops, priv, flags, NULL, false);
+ if (!IS_ERR(file))
+ file->f_mode |= f_mode;
+
+ return file;
+}
+EXPORT_SYMBOL_GPL(anon_inode_getfile_fmode);
+
+/**
+ * anon_inode_create_getfile - Like anon_inode_getfile(), but creates a new
* !S_PRIVATE anon inode rather than reuse the
* singleton anon inode and calls the
- * inode_init_security_anon() LSM hook. This
- * allows for both the inode to have its own
- * security context and for the LSM to enforce
- * policy on the inode's creation.
+ * inode_init_security_anon() LSM hook.
*
* @name: [in] name of the "class" of the new file
* @fops: [in] file operations for the new file
@@ -164,11 +250,21 @@ EXPORT_SYMBOL_GPL(anon_inode_getfile);
* @context_inode:
* [in] the logical relationship with the new inode (optional)
*
+ * Create a new anonymous inode and file pair. This can be done for two
+ * reasons:
+ *
+ * - for the inode to have its own security context, so that LSMs can enforce
+ * policy on the inode's creation;
+ *
+ * - if the caller needs a unique inode, for example in order to customize
+ * the size returned by fstat()
+ *
* The LSM may use @context_inode in inode_init_security_anon(), but a
- * reference to it is not held. Returns the newly created file* or an error
- * pointer. See the anon_inode_getfile() documentation for more information.
+ * reference to it is not held.
+ *
+ * Returns the newly created file* or an error pointer.
*/
-struct file *anon_inode_getfile_secure(const char *name,
+struct file *anon_inode_create_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags,
const struct inode *context_inode)
@@ -176,34 +272,16 @@ struct file *anon_inode_getfile_secure(const char *name,
return __anon_inode_getfile(name, fops, priv, flags,
context_inode, true);
}
+EXPORT_SYMBOL_GPL(anon_inode_create_getfile);
static int __anon_inode_getfd(const char *name,
const struct file_operations *fops,
void *priv, int flags,
const struct inode *context_inode,
- bool secure)
+ bool make_inode)
{
- int error, fd;
- struct file *file;
-
- error = get_unused_fd_flags(flags);
- if (error < 0)
- return error;
- fd = error;
-
- file = __anon_inode_getfile(name, fops, priv, flags, context_inode,
- secure);
- if (IS_ERR(file)) {
- error = PTR_ERR(file);
- goto err_put_unused_fd;
- }
- fd_install(fd, file);
-
- return fd;
-
-err_put_unused_fd:
- put_unused_fd(fd);
- return error;
+ return FD_ADD(flags, __anon_inode_getfile(name, fops, priv, flags,
+ context_inode, make_inode));
}
/**
@@ -231,10 +309,9 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops,
EXPORT_SYMBOL_GPL(anon_inode_getfd);
/**
- * anon_inode_getfd_secure - Like anon_inode_getfd(), but creates a new
+ * anon_inode_create_getfd - Like anon_inode_getfd(), but creates a new
* !S_PRIVATE anon inode rather than reuse the singleton anon inode, and calls
- * the inode_init_security_anon() LSM hook. This allows the inode to have its
- * own security context and for a LSM to reject creation of the inode.
+ * the inode_init_security_anon() LSM hook.
*
* @name: [in] name of the "class" of the new file
* @fops: [in] file operations for the new file
@@ -243,16 +320,27 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd);
* @context_inode:
* [in] the logical relationship with the new inode (optional)
*
+ * Create a new anonymous inode and file pair. This can be done for two
+ * reasons:
+ *
+ * - for the inode to have its own security context, so that LSMs can enforce
+ * policy on the inode's creation;
+ *
+ * - if the caller needs a unique inode, for example in order to customize
+ * the size returned by fstat()
+ *
* The LSM may use @context_inode in inode_init_security_anon(), but a
* reference to it is not held.
+ *
+ * Returns a newly created file descriptor or an error code.
*/
-int anon_inode_getfd_secure(const char *name, const struct file_operations *fops,
+int anon_inode_create_getfd(const char *name, const struct file_operations *fops,
void *priv, int flags,
const struct inode *context_inode)
{
return __anon_inode_getfd(name, fops, priv, flags, context_inode, true);
}
-EXPORT_SYMBOL_GPL(anon_inode_getfd_secure);
+
static int __init anon_inode_init(void)
{
@@ -263,6 +351,7 @@ static int __init anon_inode_init(void)
anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
if (IS_ERR(anon_inode_inode))
panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode));
+ anon_inode_inode->i_op = &anon_inode_operations;
return 0;
}
diff --git a/fs/attr.c b/fs/attr.c
index 473d21b3a86d..b9ec6b47bab2 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -14,32 +14,93 @@
#include <linux/capability.h>
#include <linux/fsnotify.h>
#include <linux/fcntl.h>
+#include <linux/filelock.h>
#include <linux/security.h>
-#include <linux/evm.h>
-#include <linux/ima.h>
+
+/**
+ * setattr_should_drop_sgid - determine whether the setgid bit needs to be
+ * removed
+ * @idmap: idmap of the mount @inode was found from
+ * @inode: inode to check
+ *
+ * This function determines whether the setgid bit needs to be removed.
+ * We retain backwards compatibility and require setgid bit to be removed
+ * unconditionally if S_IXGRP is set. Otherwise we have the exact same
+ * requirements as setattr_prepare() and setattr_copy().
+ *
+ * Return: ATTR_KILL_SGID if setgid bit needs to be removed, 0 otherwise.
+ */
+int setattr_should_drop_sgid(struct mnt_idmap *idmap,
+ const struct inode *inode)
+{
+ umode_t mode = inode->i_mode;
+
+ if (!(mode & S_ISGID))
+ return 0;
+ if (mode & S_IXGRP)
+ return ATTR_KILL_SGID;
+ if (!in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode)))
+ return ATTR_KILL_SGID;
+ return 0;
+}
+EXPORT_SYMBOL(setattr_should_drop_sgid);
+
+/**
+ * setattr_should_drop_suidgid - determine whether the set{g,u}id bit needs to
+ * be dropped
+ * @idmap: idmap of the mount @inode was found from
+ * @inode: inode to check
+ *
+ * This function determines whether the set{g,u}id bits need to be removed.
+ * If the setuid bit needs to be removed ATTR_KILL_SUID is returned. If the
+ * setgid bit needs to be removed ATTR_KILL_SGID is returned. If both
+ * set{g,u}id bits need to be removed the corresponding mask of both flags is
+ * returned.
+ *
+ * Return: A mask of ATTR_KILL_S{G,U}ID indicating which - if any - setid bits
+ * to remove, 0 otherwise.
+ */
+int setattr_should_drop_suidgid(struct mnt_idmap *idmap,
+ struct inode *inode)
+{
+ umode_t mode = inode->i_mode;
+ int kill = 0;
+
+ /* suid always must be killed */
+ if (unlikely(mode & S_ISUID))
+ kill = ATTR_KILL_SUID;
+
+ kill |= setattr_should_drop_sgid(idmap, inode);
+
+ if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
+ return kill;
+
+ return 0;
+}
+EXPORT_SYMBOL(setattr_should_drop_suidgid);
/**
* chown_ok - verify permissions to chown inode
- * @mnt_userns: user namespace of the mount @inode was found from
+ * @idmap: idmap of the mount @inode was found from
* @inode: inode to check permissions on
- * @uid: uid to chown @inode to
+ * @ia_vfsuid: uid to chown @inode to
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then
- * take care to map the inode according to @mnt_userns before checking
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then
+ * take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
- * performed on the raw inode simply passs init_user_ns.
+ * performed on the raw inode simply pass @nop_mnt_idmap.
*/
-static bool chown_ok(struct user_namespace *mnt_userns,
- const struct inode *inode,
- kuid_t uid)
+static bool chown_ok(struct mnt_idmap *idmap,
+ const struct inode *inode, vfsuid_t ia_vfsuid)
{
- kuid_t kuid = i_uid_into_mnt(mnt_userns, inode);
- if (uid_eq(current_fsuid(), kuid) && uid_eq(uid, kuid))
+ vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode);
+ if (vfsuid_eq_kuid(vfsuid, current_fsuid()) &&
+ vfsuid_eq(ia_vfsuid, vfsuid))
return true;
- if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN))
+ if (capable_wrt_inode_uidgid(idmap, inode, CAP_CHOWN))
return true;
- if (uid_eq(kuid, INVALID_UID) &&
+ if (!vfsuid_valid(vfsuid) &&
ns_capable(inode->i_sb->s_user_ns, CAP_CHOWN))
return true;
return false;
@@ -47,26 +108,30 @@ static bool chown_ok(struct user_namespace *mnt_userns,
/**
* chgrp_ok - verify permissions to chgrp inode
- * @mnt_userns: user namespace of the mount @inode was found from
+ * @idmap: idmap of the mount @inode was found from
* @inode: inode to check permissions on
- * @gid: gid to chown @inode to
+ * @ia_vfsgid: gid to chown @inode to
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then
- * take care to map the inode according to @mnt_userns before checking
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then
+ * take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
- * performed on the raw inode simply passs init_user_ns.
+ * performed on the raw inode simply pass @nop_mnt_idmap.
*/
-static bool chgrp_ok(struct user_namespace *mnt_userns,
- const struct inode *inode, kgid_t gid)
+static bool chgrp_ok(struct mnt_idmap *idmap,
+ const struct inode *inode, vfsgid_t ia_vfsgid)
{
- kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
- if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)) &&
- (in_group_p(gid) || gid_eq(gid, kgid)))
- return true;
- if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN))
+ vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
+ vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode);
+ if (vfsuid_eq_kuid(vfsuid, current_fsuid())) {
+ if (vfsgid_eq(ia_vfsgid, vfsgid))
+ return true;
+ if (vfsgid_in_group_p(ia_vfsgid))
+ return true;
+ }
+ if (capable_wrt_inode_uidgid(idmap, inode, CAP_CHOWN))
return true;
- if (gid_eq(kgid, INVALID_GID) &&
+ if (!vfsgid_valid(vfsgid) &&
ns_capable(inode->i_sb->s_user_ns, CAP_CHOWN))
return true;
return false;
@@ -74,7 +139,7 @@ static bool chgrp_ok(struct user_namespace *mnt_userns,
/**
* setattr_prepare - check if attribute changes to a dentry are allowed
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @dentry: dentry to check
* @attr: attributes to change
*
@@ -84,16 +149,16 @@ static bool chgrp_ok(struct user_namespace *mnt_userns,
* SGID bit from mode if user is not allowed to set it. Also file capabilities
* and IMA extended attributes are cleared if ATTR_KILL_PRIV is set.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then
- * take care to map the inode according to @mnt_userns before checking
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then
+ * take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
- * performed on the raw inode simply passs init_user_ns.
+ * performed on the raw inode simply pass @nop_mnt_idmap.
*
* Should be called as the first thing in ->setattr implementations,
* possibly after taking additional locks.
*/
-int setattr_prepare(struct user_namespace *mnt_userns, struct dentry *dentry,
+int setattr_prepare(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
@@ -114,27 +179,35 @@ int setattr_prepare(struct user_namespace *mnt_userns, struct dentry *dentry,
goto kill_priv;
/* Make sure a caller can chown. */
- if ((ia_valid & ATTR_UID) && !chown_ok(mnt_userns, inode, attr->ia_uid))
+ if ((ia_valid & ATTR_UID) &&
+ !chown_ok(idmap, inode, attr->ia_vfsuid))
return -EPERM;
/* Make sure caller can chgrp. */
- if ((ia_valid & ATTR_GID) && !chgrp_ok(mnt_userns, inode, attr->ia_gid))
+ if ((ia_valid & ATTR_GID) &&
+ !chgrp_ok(idmap, inode, attr->ia_vfsgid))
return -EPERM;
/* Make sure a caller can chmod. */
if (ia_valid & ATTR_MODE) {
- if (!inode_owner_or_capable(mnt_userns, inode))
+ vfsgid_t vfsgid;
+
+ if (!inode_owner_or_capable(idmap, inode))
return -EPERM;
+
+ if (ia_valid & ATTR_GID)
+ vfsgid = attr->ia_vfsgid;
+ else
+ vfsgid = i_gid_into_vfsgid(idmap, inode);
+
/* Also check the setgid bit! */
- if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
- i_gid_into_mnt(mnt_userns, inode)) &&
- !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
+ if (!in_group_or_capable(idmap, inode, vfsgid))
attr->ia_mode &= ~S_ISGID;
}
/* Check for setting the inode time. */
if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) {
- if (!inode_owner_or_capable(mnt_userns, inode))
+ if (!inode_owner_or_capable(idmap, inode))
return -EPERM;
}
@@ -143,7 +216,7 @@ kill_priv:
if (ia_valid & ATTR_KILL_PRIV) {
int error;
- error = security_inode_killpriv(mnt_userns, dentry);
+ error = security_inode_killpriv(idmap, dentry);
if (error)
return error;
}
@@ -157,7 +230,7 @@ EXPORT_SYMBOL(setattr_prepare);
* @inode: the inode to be truncated
* @offset: the new size to assign to the inode
*
- * inode_newsize_ok must be called with i_mutex held.
+ * inode_newsize_ok must be called with i_rwsem held exclusively.
*
* inode_newsize_ok will check filesystem limits and ulimits to check that the
* new inode size is within limits. inode_newsize_ok will also send SIGXFSZ
@@ -170,6 +243,8 @@ EXPORT_SYMBOL(setattr_prepare);
*/
int inode_newsize_ok(const struct inode *inode, loff_t offset)
{
+ if (offset < 0)
+ return -EINVAL;
if (inode->i_size < offset) {
unsigned long limit;
@@ -197,59 +272,94 @@ out_big:
EXPORT_SYMBOL(inode_newsize_ok);
/**
+ * setattr_copy_mgtime - update timestamps for mgtime inodes
+ * @inode: inode timestamps to be updated
+ * @attr: attrs for the update
+ *
+ * With multigrain timestamps, take more care to prevent races when
+ * updating the ctime. Always update the ctime to the very latest using
+ * the standard mechanism, and use that to populate the atime and mtime
+ * appropriately (unless those are being set to specific values).
+ */
+static void setattr_copy_mgtime(struct inode *inode, const struct iattr *attr)
+{
+ unsigned int ia_valid = attr->ia_valid;
+ struct timespec64 now;
+
+ if (ia_valid & ATTR_CTIME_SET)
+ now = inode_set_ctime_deleg(inode, attr->ia_ctime);
+ else if (ia_valid & ATTR_CTIME)
+ now = inode_set_ctime_current(inode);
+ else
+ now = current_time(inode);
+
+ if (ia_valid & ATTR_ATIME_SET)
+ inode_set_atime_to_ts(inode, attr->ia_atime);
+ else if (ia_valid & ATTR_ATIME)
+ inode_set_atime_to_ts(inode, now);
+
+ if (ia_valid & ATTR_MTIME_SET)
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
+ else if (ia_valid & ATTR_MTIME)
+ inode_set_mtime_to_ts(inode, now);
+}
+
+/**
* setattr_copy - copy simple metadata updates into the generic inode
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @inode: the inode to be updated
* @attr: the new attributes
*
- * setattr_copy must be called with i_mutex held.
+ * setattr_copy must be called with i_rwsem held exclusively.
*
* setattr_copy updates the inode's metadata with that specified
- * in attr on idmapped mounts. If file ownership is changed setattr_copy
- * doesn't map ia_uid and ia_gid. It will asssume the caller has already
- * provided the intended values. Necessary permission checks to determine
+ * in attr on idmapped mounts. Necessary permission checks to determine
* whether or not the S_ISGID property needs to be removed are performed with
* the correct idmapped mount permission helpers.
* Noticeably missing is inode size update, which is more complex
* as it requires pagecache updates.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then
- * take care to map the inode according to @mnt_userns before checking
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then
+ * take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
- * performed on the raw inode simply passs init_user_ns.
+ * performed on the raw inode simply pass @nop_mnt_idmap.
*
* The inode is not marked as dirty after this operation. The rationale is
* that for "simple" filesystems, the struct inode is the inode storage.
* The caller is free to mark the inode dirty afterwards if needed.
*/
-void setattr_copy(struct user_namespace *mnt_userns, struct inode *inode,
+void setattr_copy(struct mnt_idmap *idmap, struct inode *inode,
const struct iattr *attr)
{
unsigned int ia_valid = attr->ia_valid;
- if (ia_valid & ATTR_UID)
- inode->i_uid = attr->ia_uid;
- if (ia_valid & ATTR_GID)
- inode->i_gid = attr->ia_gid;
- if (ia_valid & ATTR_ATIME)
- inode->i_atime = attr->ia_atime;
- if (ia_valid & ATTR_MTIME)
- inode->i_mtime = attr->ia_mtime;
- if (ia_valid & ATTR_CTIME)
- inode->i_ctime = attr->ia_ctime;
+ i_uid_update(idmap, attr, inode);
+ i_gid_update(idmap, attr, inode);
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
- kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
- if (!in_group_p(kgid) &&
- !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
+ if (!in_group_or_capable(idmap, inode,
+ i_gid_into_vfsgid(idmap, inode)))
mode &= ~S_ISGID;
inode->i_mode = mode;
}
+
+ if (is_mgtime(inode))
+ return setattr_copy_mgtime(inode, attr);
+
+ if (ia_valid & ATTR_ATIME)
+ inode_set_atime_to_ts(inode, attr->ia_atime);
+ if (ia_valid & ATTR_MTIME)
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
+
+ if (ia_valid & ATTR_CTIME_SET)
+ inode_set_ctime_deleg(inode, attr->ia_ctime);
+ else if (ia_valid & ATTR_CTIME)
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
}
EXPORT_SYMBOL(setattr_copy);
-int may_setattr(struct user_namespace *mnt_userns, struct inode *inode,
+int may_setattr(struct mnt_idmap *idmap, struct inode *inode,
unsigned int ia_valid)
{
int error;
@@ -267,8 +377,8 @@ int may_setattr(struct user_namespace *mnt_userns, struct inode *inode,
if (IS_IMMUTABLE(inode))
return -EPERM;
- if (!inode_owner_or_capable(mnt_userns, inode)) {
- error = inode_permission(mnt_userns, inode, MAY_WRITE);
+ if (!inode_owner_or_capable(idmap, inode)) {
+ error = inode_permission(idmap, inode, MAY_WRITE);
if (error)
return error;
}
@@ -278,22 +388,19 @@ int may_setattr(struct user_namespace *mnt_userns, struct inode *inode,
EXPORT_SYMBOL(may_setattr);
/**
- * notify_change - modify attributes of a filesytem object
- * @mnt_userns: user namespace of the mount the inode was found from
+ * notify_change - modify attributes of a filesystem object
+ * @idmap: idmap of the mount the inode was found from
* @dentry: object affected
* @attr: new attributes
* @delegated_inode: returns inode, if the inode is delegated
*
- * The caller must hold the i_mutex on the affected object.
+ * The caller must hold the i_rwsem exclusively on the affected object.
*
* If notify_change discovers a delegation in need of breaking,
* it will return -EWOULDBLOCK and return a reference to the inode in
* delegated_inode. The caller should then break the delegation and
* retry. Because breaking a delegation may take a long time, the
- * caller should drop the i_mutex before doing so.
- *
- * If file ownership is changed notify_change() doesn't map ia_uid and
- * ia_gid. It will asssume the caller has already provided the intended values.
+ * caller should drop the i_rwsem before doing so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
@@ -301,14 +408,14 @@ EXPORT_SYMBOL(may_setattr);
* the file open for write, as there can be no conflicting delegation in
* that case.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then
- * take care to map the inode according to @mnt_userns before checking
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then
+ * take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
- * performed on the raw inode simply passs init_user_ns.
+ * performed on the raw inode simply pass @nop_mnt_idmap.
*/
-int notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
- struct iattr *attr, struct inode **delegated_inode)
+int notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr, struct delegated_inode *delegated_inode)
{
struct inode *inode = dentry->d_inode;
umode_t mode = inode->i_mode;
@@ -318,28 +425,47 @@ int notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
WARN_ON_ONCE(!inode_is_locked(inode));
- error = may_setattr(mnt_userns, inode, ia_valid);
+ error = may_setattr(idmap, inode, ia_valid);
if (error)
return error;
if ((ia_valid & ATTR_MODE)) {
- umode_t amode = attr->ia_mode;
- /* Flag setting protected by i_mutex */
- if (is_sxid(amode))
+ /*
+ * Don't allow changing the mode of symlinks:
+ *
+ * (1) The vfs doesn't take the mode of symlinks into account
+ * during permission checking.
+ * (2) This has never worked correctly. Most major filesystems
+ * did return EOPNOTSUPP due to interactions with POSIX ACLs
+ * but did still updated the mode of the symlink.
+ * This inconsistency led system call wrapper providers such
+ * as libc to block changing the mode of symlinks with
+ * EOPNOTSUPP already.
+ * (3) To even do this in the first place one would have to use
+ * specific file descriptors and quite some effort.
+ */
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+
+ /* Flag setting protected by i_rwsem */
+ if (is_sxid(attr->ia_mode))
inode->i_flags &= ~S_NOSEC;
}
now = current_time(inode);
- attr->ia_ctime = now;
- if (!(ia_valid & ATTR_ATIME_SET))
- attr->ia_atime = now;
- else
+ if (ia_valid & ATTR_ATIME_SET)
attr->ia_atime = timestamp_truncate(attr->ia_atime, inode);
- if (!(ia_valid & ATTR_MTIME_SET))
- attr->ia_mtime = now;
else
+ attr->ia_atime = now;
+ if (ia_valid & ATTR_CTIME_SET)
+ attr->ia_ctime = timestamp_truncate(attr->ia_ctime, inode);
+ else
+ attr->ia_ctime = now;
+ if (ia_valid & ATTR_MTIME_SET)
attr->ia_mtime = timestamp_truncate(attr->ia_mtime, inode);
+ else
+ attr->ia_mtime = now;
if (ia_valid & ATTR_KILL_PRIV) {
error = security_inode_need_killpriv(dentry);
@@ -367,7 +493,7 @@ int notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
}
}
if (ia_valid & ATTR_KILL_SGID) {
- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+ if (mode & S_ISGID) {
if (!(ia_valid & ATTR_MODE)) {
ia_valid = attr->ia_valid |= ATTR_MODE;
attr->ia_mode = inode->i_mode;
@@ -383,38 +509,47 @@ int notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
* namespace of the superblock.
*/
if (ia_valid & ATTR_UID &&
- !kuid_has_mapping(inode->i_sb->s_user_ns, attr->ia_uid))
+ !vfsuid_has_fsmapping(idmap, inode->i_sb->s_user_ns,
+ attr->ia_vfsuid))
return -EOVERFLOW;
if (ia_valid & ATTR_GID &&
- !kgid_has_mapping(inode->i_sb->s_user_ns, attr->ia_gid))
+ !vfsgid_has_fsmapping(idmap, inode->i_sb->s_user_ns,
+ attr->ia_vfsgid))
return -EOVERFLOW;
/* Don't allow modifications of files with invalid uids or
* gids unless those uids & gids are being made valid.
*/
if (!(ia_valid & ATTR_UID) &&
- !uid_valid(i_uid_into_mnt(mnt_userns, inode)))
+ !vfsuid_valid(i_uid_into_vfsuid(idmap, inode)))
return -EOVERFLOW;
if (!(ia_valid & ATTR_GID) &&
- !gid_valid(i_gid_into_mnt(mnt_userns, inode)))
+ !vfsgid_valid(i_gid_into_vfsgid(idmap, inode)))
return -EOVERFLOW;
- error = security_inode_setattr(dentry, attr);
- if (error)
- return error;
- error = try_break_deleg(inode, delegated_inode);
+ error = security_inode_setattr(idmap, dentry, attr);
if (error)
return error;
+ /*
+ * If ATTR_DELEG is set, then these attributes are being set on
+ * behalf of the holder of a write delegation. We want to avoid
+ * breaking the delegation in this case.
+ */
+ if (!(ia_valid & ATTR_DELEG)) {
+ error = try_break_deleg(inode, delegated_inode);
+ if (error)
+ return error;
+ }
+
if (inode->i_op->setattr)
- error = inode->i_op->setattr(mnt_userns, dentry, attr);
+ error = inode->i_op->setattr(idmap, dentry, attr);
else
- error = simple_setattr(mnt_userns, dentry, attr);
+ error = simple_setattr(idmap, dentry, attr);
if (!error) {
fsnotify_change(dentry, ia_valid);
- ima_inode_post_setattr(mnt_userns, dentry);
- evm_inode_post_setattr(dentry, ia_valid);
+ security_inode_post_setattr(idmap, dentry, ia_valid);
}
return error;
diff --git a/fs/autofs/Kconfig b/fs/autofs/Kconfig
index 3b3a6b1423c6..54c12d9484cb 100644
--- a/fs/autofs/Kconfig
+++ b/fs/autofs/Kconfig
@@ -1,18 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-config AUTOFS4_FS
- tristate "Old Kconfig name for Kernel automounter support"
- select AUTOFS_FS
- help
- This name exists for people to just automatically pick up the
- new name of the autofs Kconfig option. All it does is select
- the new option name.
-
- It will go away in a release or two as people have
- transitioned to just plain AUTOFS_FS.
-
config AUTOFS_FS
tristate "Kernel automounter support (supports v3, v4 and v5)"
- default n
help
The automounter is a tool to automatically mount remote file systems
on demand. This implementation is partially kernel-based to reduce
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index 918826eaceea..4fd555528c5d 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -16,6 +16,7 @@
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
+#include <uapi/linux/mount.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/uaccess.h>
@@ -25,6 +26,11 @@
#include <linux/completion.h>
#include <linux/file.h>
#include <linux/magic.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
+#include "../mount.h"
+#include <linux/ns_common.h>
+
/* This is the range of ioctl() numbers we claim as ours */
#define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
@@ -51,8 +57,6 @@ extern struct file_system_type autofs_fs_type;
*/
struct autofs_info {
struct dentry *dentry;
- struct inode *inode;
-
int flags;
struct completion expire_complete;
@@ -62,6 +66,7 @@ struct autofs_info {
struct list_head expiring;
struct autofs_sb_info *sbi;
+ unsigned long exp_timeout;
unsigned long last_used;
int count;
@@ -81,6 +86,9 @@ struct autofs_info {
*/
#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
+#define AUTOFS_INF_EXPIRE_SET (1<<3) /* per-dentry expire timeout set for
+ this mount point.
+ */
struct autofs_wait_queue {
wait_queue_head_t queue;
struct autofs_wait_queue *next;
@@ -110,6 +118,7 @@ struct autofs_sb_info {
int pipefd;
struct file *pipe;
struct pid *oz_pgrp;
+ u64 mnt_ns_id;
int version;
int sub_version;
int min_proto;
@@ -148,6 +157,11 @@ static inline int autofs_oz_mode(struct autofs_sb_info *sbi)
task_pgrp(current) == sbi->oz_pgrp);
}
+static inline bool autofs_empty(struct autofs_info *ino)
+{
+ return ino->count < 2;
+}
+
struct inode *autofs_get_inode(struct super_block *, umode_t);
void autofs_free_ino(struct autofs_info *);
@@ -202,20 +216,36 @@ static inline void managed_dentry_clear_managed(struct dentry *dentry)
/* Initializing function */
-int autofs_fill_super(struct super_block *, void *, int);
+extern const struct fs_parameter_spec autofs_param_specs[];
+int autofs_init_fs_context(struct fs_context *fc);
struct autofs_info *autofs_new_ino(struct autofs_sb_info *);
void autofs_clean_ino(struct autofs_info *);
-static inline int autofs_prepare_pipe(struct file *pipe)
+static inline int autofs_check_pipe(struct file *pipe)
{
+ if (pipe->f_mode & FMODE_PATH)
+ return -EINVAL;
if (!(pipe->f_mode & FMODE_CAN_WRITE))
return -EINVAL;
if (!S_ISFIFO(file_inode(pipe)->i_mode))
return -EINVAL;
+ return 0;
+}
+
+static inline void autofs_set_packet_pipe_flags(struct file *pipe)
+{
/* We want a packet pipe */
pipe->f_flags |= O_DIRECT;
/* We don't expect -EAGAIN */
pipe->f_flags &= ~O_NONBLOCK;
+}
+
+static inline int autofs_prepare_pipe(struct file *pipe)
+{
+ int ret = autofs_check_pipe(pipe);
+ if (ret < 0)
+ return ret;
+ autofs_set_packet_pipe_flags(pipe);
return 0;
}
diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c
index 5bf781ea6d67..6743b3b64217 100644
--- a/fs/autofs/dev-ioctl.c
+++ b/fs/autofs/dev-ioctl.c
@@ -110,6 +110,7 @@ static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
*/
static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param)
{
+ unsigned int inr = _IOC_NR(cmd);
int err;
err = check_dev_ioctl_version(cmd, param);
@@ -128,15 +129,19 @@ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param)
goto out;
}
+ /* Setting the per-dentry expire timeout requires a trailing
+ * path component, ie. no '/', so invert the logic of the
+ * check_name() return for AUTOFS_DEV_IOCTL_TIMEOUT_CMD.
+ */
err = check_name(param->path);
+ if (inr == AUTOFS_DEV_IOCTL_TIMEOUT_CMD)
+ err = err ? 0 : -EINVAL;
if (err) {
pr_warn("invalid path supplied for cmd(0x%08x)\n",
cmd);
goto out;
}
} else {
- unsigned int inr = _IOC_NR(cmd);
-
if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD ||
inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD ||
inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) {
@@ -226,32 +231,14 @@ static int test_by_type(const struct path *path, void *p)
*/
static int autofs_dev_ioctl_open_mountpoint(const char *name, dev_t devid)
{
- int err, fd;
-
- fd = get_unused_fd_flags(O_CLOEXEC);
- if (likely(fd >= 0)) {
- struct file *filp;
- struct path path;
-
- err = find_autofs_mount(name, &path, test_by_dev, &devid);
- if (err)
- goto out;
-
- filp = dentry_open(&path, O_RDONLY, current_cred());
- path_put(&path);
- if (IS_ERR(filp)) {
- err = PTR_ERR(filp);
- goto out;
- }
-
- fd_install(fd, filp);
- }
+ struct path path __free(path_put) = {};
+ int err;
- return fd;
+ err = find_autofs_mount(name, &path, test_by_dev, &devid);
+ if (err)
+ return err;
-out:
- put_unused_fd(fd);
- return err;
+ return FD_ADD(O_CLOEXEC, dentry_open(&path, O_RDONLY, current_cred()));
}
/* Open a file descriptor on an autofs mount point */
@@ -376,6 +363,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
swap(sbi->oz_pgrp, new_pid);
sbi->pipefd = pipefd;
sbi->pipe = pipe;
+ sbi->mnt_ns_id = to_ns_common(current->nsproxy->mnt_ns)->ns_id;
sbi->flags &= ~AUTOFS_SBI_CATATONIC;
}
out:
@@ -396,16 +384,97 @@ static int autofs_dev_ioctl_catatonic(struct file *fp,
return 0;
}
-/* Set the autofs mount timeout */
+/*
+ * Set the autofs mount expire timeout.
+ *
+ * There are two places an expire timeout can be set, in the autofs
+ * super block info. (this is all that's needed for direct and offset
+ * mounts because there's a distinct mount corresponding to each of
+ * these) and per-dentry within within the dentry info. If a per-dentry
+ * timeout is set it will override the expire timeout set in the parent
+ * autofs super block info.
+ *
+ * If setting the autofs super block expire timeout the autofs_dev_ioctl
+ * size field will be equal to the autofs_dev_ioctl structure size. If
+ * setting the per-dentry expire timeout the mount point name is passed
+ * in the autofs_dev_ioctl path field and the size field updated to
+ * reflect this.
+ *
+ * Setting the autofs mount expire timeout sets the timeout in the super
+ * block info. struct. Setting the per-dentry timeout does a little more.
+ * If the timeout is equal to -1 the per-dentry timeout (and flag) is
+ * cleared which reverts to using the super block timeout, otherwise if
+ * timeout is 0 the timeout is set to this value and the flag is left
+ * set which disables expiration for the mount point, lastly the flag
+ * and the timeout are set enabling the dentry to use this timeout.
+ */
static int autofs_dev_ioctl_timeout(struct file *fp,
struct autofs_sb_info *sbi,
struct autofs_dev_ioctl *param)
{
- unsigned long timeout;
+ unsigned long timeout = param->timeout.timeout;
+
+ /* If setting the expire timeout for an individual indirect
+ * mount point dentry the mount trailing component path is
+ * placed in param->path and param->size adjusted to account
+ * for it otherwise param->size it is set to the structure
+ * size.
+ */
+ if (param->size == AUTOFS_DEV_IOCTL_SIZE) {
+ param->timeout.timeout = sbi->exp_timeout / HZ;
+ sbi->exp_timeout = timeout * HZ;
+ } else {
+ struct dentry *base = fp->f_path.dentry;
+ int path_len = param->size - AUTOFS_DEV_IOCTL_SIZE - 1;
+ struct dentry *dentry;
+ struct autofs_info *ino;
+
+ if (!autofs_type_indirect(sbi->type))
+ return -EINVAL;
+
+ dentry = try_lookup_noperm(&QSTR_LEN(param->path, path_len),
+ base);
+ if (IS_ERR_OR_NULL(dentry))
+ return dentry ? PTR_ERR(dentry) : -ENOENT;
+ ino = autofs_dentry_ino(dentry);
+ if (!ino) {
+ dput(dentry);
+ return -ENOENT;
+ }
+
+ if (ino->exp_timeout && ino->flags & AUTOFS_INF_EXPIRE_SET)
+ param->timeout.timeout = ino->exp_timeout / HZ;
+ else
+ param->timeout.timeout = sbi->exp_timeout / HZ;
+
+ if (timeout == -1) {
+ /* Revert to using the super block timeout */
+ ino->flags &= ~AUTOFS_INF_EXPIRE_SET;
+ ino->exp_timeout = 0;
+ } else {
+ /* Set the dentry expire flag and timeout.
+ *
+ * If timeout is 0 it will prevent the expire
+ * of this particular automount.
+ */
+ ino->flags |= AUTOFS_INF_EXPIRE_SET;
+ ino->exp_timeout = timeout * HZ;
+ }
+
+ /* An expire timeout greater than the superblock timeout
+ * could be a problem at shutdown but the super block
+ * timeout itself can change so all we can really do is
+ * warn the user.
+ */
+ if (ino->flags & AUTOFS_INF_EXPIRE_SET &&
+ ino->exp_timeout > sbi->exp_timeout)
+ pr_warn("per-mount expire timeout is greater than "
+ "the parent autofs mount timeout which could "
+ "prevent shutdown\n");
+
+ dput(dentry);
+ }
- timeout = param->timeout.timeout;
- param->timeout.timeout = sbi->exp_timeout / HZ;
- sbi->exp_timeout = timeout * HZ;
return 0;
}
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index b3fefd6237c3..5c2d459e1e48 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -73,12 +73,9 @@ done:
/* p->d_lock held */
static struct dentry *positive_after(struct dentry *p, struct dentry *child)
{
- if (child)
- child = list_next_entry(child, d_child);
- else
- child = list_first_entry(&p->d_subdirs, struct dentry, d_child);
+ child = child ? d_next_sibling(child) : d_first_child(p);
- list_for_each_entry_from(child, &p->d_subdirs, d_child) {
+ hlist_for_each_entry_from(child, d_sib) {
spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
if (simple_positive(child)) {
dget_dlock(child);
@@ -371,7 +368,7 @@ static struct dentry *should_expire(struct dentry *dentry,
return NULL;
}
- if (simple_empty(dentry))
+ if (autofs_empty(ino))
return NULL;
/* Case 2: tree mount, expire iff entire tree is not busy */
@@ -432,8 +429,6 @@ static struct dentry *autofs_expire_indirect(struct super_block *sb,
if (!root)
return NULL;
- timeout = sbi->exp_timeout;
-
dentry = NULL;
while ((dentry = get_next_positive_subdir(dentry, root))) {
spin_lock(&sbi->fs_lock);
@@ -444,6 +439,11 @@ static struct dentry *autofs_expire_indirect(struct super_block *sb,
}
spin_unlock(&sbi->fs_lock);
+ if (ino->flags & AUTOFS_INF_EXPIRE_SET)
+ timeout = ino->exp_timeout;
+ else
+ timeout = sbi->exp_timeout;
+
expired = should_expire(dentry, mnt, timeout, how);
if (!expired)
continue;
diff --git a/fs/autofs/init.c b/fs/autofs/init.c
index d3f55e874338..1d644a35ffa0 100644
--- a/fs/autofs/init.c
+++ b/fs/autofs/init.c
@@ -7,16 +7,11 @@
#include <linux/init.h>
#include "autofs_i.h"
-static struct dentry *autofs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_nodev(fs_type, flags, data, autofs_fill_super);
-}
-
struct file_system_type autofs_fs_type = {
.owner = THIS_MODULE,
.name = "autofs",
- .mount = autofs_mount,
+ .init_fs_context = autofs_init_fs_context,
+ .parameters = autofs_param_specs,
.kill_sb = autofs_kill_sb,
};
MODULE_ALIAS_FS("autofs");
@@ -43,4 +38,5 @@ static void __exit exit_autofs_fs(void)
module_init(init_autofs_fs)
module_exit(exit_autofs_fs)
+MODULE_DESCRIPTION("Kernel automounter support");
MODULE_LICENSE("GPL");
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 9edf243713eb..b932b1719dfc 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -6,7 +6,6 @@
#include <linux/seq_file.h>
#include <linux/pagemap.h>
-#include <linux/parser.h>
#include "autofs_i.h"
@@ -20,6 +19,8 @@ struct autofs_info *autofs_new_ino(struct autofs_sb_info *sbi)
INIT_LIST_HEAD(&ino->expiring);
ino->last_used = jiffies;
ino->sbi = sbi;
+ ino->exp_timeout = -1;
+ ino->count = 1;
}
return ino;
}
@@ -28,6 +29,7 @@ void autofs_clean_ino(struct autofs_info *ino)
{
ino->uid = GLOBAL_ROOT_UID;
ino->gid = GLOBAL_ROOT_GID;
+ ino->exp_timeout = -1;
ino->last_used = jiffies;
}
@@ -53,7 +55,7 @@ void autofs_kill_sb(struct super_block *sb)
}
pr_debug("shutting down\n");
- kill_litter_super(sb);
+ kill_anon_super(sb);
if (sbi)
kfree_rcu(sbi, rcu);
}
@@ -109,189 +111,171 @@ static const struct super_operations autofs_sops = {
.evict_inode = autofs_evict_inode,
};
-enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto,
- Opt_indirect, Opt_direct, Opt_offset, Opt_strictexpire,
- Opt_ignore};
-
-static const match_table_t tokens = {
- {Opt_fd, "fd=%u"},
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_pgrp, "pgrp=%u"},
- {Opt_minproto, "minproto=%u"},
- {Opt_maxproto, "maxproto=%u"},
- {Opt_indirect, "indirect"},
- {Opt_direct, "direct"},
- {Opt_offset, "offset"},
- {Opt_strictexpire, "strictexpire"},
- {Opt_ignore, "ignore"},
- {Opt_err, NULL}
+enum {
+ Opt_direct,
+ Opt_fd,
+ Opt_gid,
+ Opt_ignore,
+ Opt_indirect,
+ Opt_maxproto,
+ Opt_minproto,
+ Opt_offset,
+ Opt_pgrp,
+ Opt_strictexpire,
+ Opt_uid,
};
-static int parse_options(char *options,
- struct inode *root, int *pgrp, bool *pgrp_set,
- struct autofs_sb_info *sbi)
+const struct fs_parameter_spec autofs_param_specs[] = {
+ fsparam_flag ("direct", Opt_direct),
+ fsparam_fd ("fd", Opt_fd),
+ fsparam_gid ("gid", Opt_gid),
+ fsparam_flag ("ignore", Opt_ignore),
+ fsparam_flag ("indirect", Opt_indirect),
+ fsparam_u32 ("maxproto", Opt_maxproto),
+ fsparam_u32 ("minproto", Opt_minproto),
+ fsparam_flag ("offset", Opt_offset),
+ fsparam_u32 ("pgrp", Opt_pgrp),
+ fsparam_flag ("strictexpire", Opt_strictexpire),
+ fsparam_uid ("uid", Opt_uid),
+ {}
+};
+
+struct autofs_fs_context {
+ kuid_t uid;
+ kgid_t gid;
+ int pgrp;
+ bool pgrp_set;
+};
+
+/*
+ * Open the fd. We do it here rather than in get_tree so that it's done in the
+ * context of the system call that passed the data and not the one that
+ * triggered the superblock creation, lest the fd gets reassigned.
+ */
+static int autofs_parse_fd(struct fs_context *fc, struct autofs_sb_info *sbi,
+ struct fs_parameter *param,
+ struct fs_parse_result *result)
{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
- int pipefd = -1;
- kuid_t uid;
- kgid_t gid;
+ struct file *pipe;
+ int ret;
- root->i_uid = current_uid();
- root->i_gid = current_gid();
+ if (param->type == fs_value_is_file) {
+ /* came through the new api */
+ pipe = param->file;
+ param->file = NULL;
+ } else {
+ pipe = fget(result->uint_32);
+ }
+ if (!pipe) {
+ errorf(fc, "could not open pipe file descriptor");
+ return -EBADF;
+ }
- sbi->min_proto = AUTOFS_MIN_PROTO_VERSION;
- sbi->max_proto = AUTOFS_MAX_PROTO_VERSION;
+ ret = autofs_check_pipe(pipe);
+ if (ret < 0) {
+ errorf(fc, "Invalid/unusable pipe");
+ fput(pipe);
+ return -EBADF;
+ }
- sbi->pipefd = -1;
+ autofs_set_packet_pipe_flags(pipe);
- if (!options)
- return 1;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_fd:
- if (match_int(args, &pipefd))
- return 1;
- sbi->pipefd = pipefd;
- break;
- case Opt_uid:
- if (match_int(args, &option))
- return 1;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid))
- return 1;
- root->i_uid = uid;
- break;
- case Opt_gid:
- if (match_int(args, &option))
- return 1;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid))
- return 1;
- root->i_gid = gid;
- break;
- case Opt_pgrp:
- if (match_int(args, &option))
- return 1;
- *pgrp = option;
- *pgrp_set = true;
- break;
- case Opt_minproto:
- if (match_int(args, &option))
- return 1;
- sbi->min_proto = option;
- break;
- case Opt_maxproto:
- if (match_int(args, &option))
- return 1;
- sbi->max_proto = option;
- break;
- case Opt_indirect:
- set_autofs_type_indirect(&sbi->type);
- break;
- case Opt_direct:
- set_autofs_type_direct(&sbi->type);
- break;
- case Opt_offset:
- set_autofs_type_offset(&sbi->type);
- break;
- case Opt_strictexpire:
- sbi->flags |= AUTOFS_SBI_STRICTEXPIRE;
- break;
- case Opt_ignore:
- sbi->flags |= AUTOFS_SBI_IGNORE;
- break;
- default:
- return 1;
- }
+ if (sbi->pipe)
+ fput(sbi->pipe);
+
+ sbi->pipefd = result->uint_32;
+ sbi->pipe = pipe;
+
+ return 0;
+}
+
+static int autofs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct autofs_fs_context *ctx = fc->fs_private;
+ struct autofs_sb_info *sbi = fc->s_fs_info;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, autofs_param_specs, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_fd:
+ return autofs_parse_fd(fc, sbi, param, &result);
+ case Opt_uid:
+ ctx->uid = result.uid;
+ break;
+ case Opt_gid:
+ ctx->gid = result.gid;
+ break;
+ case Opt_pgrp:
+ ctx->pgrp = result.uint_32;
+ ctx->pgrp_set = true;
+ break;
+ case Opt_minproto:
+ sbi->min_proto = result.uint_32;
+ break;
+ case Opt_maxproto:
+ sbi->max_proto = result.uint_32;
+ break;
+ case Opt_indirect:
+ set_autofs_type_indirect(&sbi->type);
+ break;
+ case Opt_direct:
+ set_autofs_type_direct(&sbi->type);
+ break;
+ case Opt_offset:
+ set_autofs_type_offset(&sbi->type);
+ break;
+ case Opt_strictexpire:
+ sbi->flags |= AUTOFS_SBI_STRICTEXPIRE;
+ break;
+ case Opt_ignore:
+ sbi->flags |= AUTOFS_SBI_IGNORE;
}
- return (sbi->pipefd < 0);
+
+ return 0;
}
-int autofs_fill_super(struct super_block *s, void *data, int silent)
+static struct autofs_sb_info *autofs_alloc_sbi(void)
{
- struct inode *root_inode;
- struct dentry *root;
- struct file *pipe;
struct autofs_sb_info *sbi;
- struct autofs_info *ino;
- int pgrp = 0;
- bool pgrp_set = false;
- int ret = -EINVAL;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
- return -ENOMEM;
- pr_debug("starting up, sbi = %p\n", sbi);
+ return NULL;
- s->s_fs_info = sbi;
sbi->magic = AUTOFS_SBI_MAGIC;
- sbi->pipefd = -1;
- sbi->pipe = NULL;
- sbi->exp_timeout = 0;
- sbi->oz_pgrp = NULL;
- sbi->sb = s;
- sbi->version = 0;
- sbi->sub_version = 0;
sbi->flags = AUTOFS_SBI_CATATONIC;
+ sbi->min_proto = AUTOFS_MIN_PROTO_VERSION;
+ sbi->max_proto = AUTOFS_MAX_PROTO_VERSION;
+ sbi->pipefd = -1;
+ sbi->mnt_ns_id = to_ns_common(current->nsproxy->mnt_ns)->ns_id;
+
set_autofs_type_indirect(&sbi->type);
- sbi->min_proto = 0;
- sbi->max_proto = 0;
mutex_init(&sbi->wq_mutex);
mutex_init(&sbi->pipe_mutex);
spin_lock_init(&sbi->fs_lock);
- sbi->queues = NULL;
spin_lock_init(&sbi->lookup_lock);
INIT_LIST_HEAD(&sbi->active_list);
INIT_LIST_HEAD(&sbi->expiring_list);
- s->s_blocksize = 1024;
- s->s_blocksize_bits = 10;
- s->s_magic = AUTOFS_SUPER_MAGIC;
- s->s_op = &autofs_sops;
- s->s_d_op = &autofs_dentry_operations;
- s->s_time_gran = 1;
- /*
- * Get the root inode and dentry, but defer checking for errors.
- */
- ino = autofs_new_ino(sbi);
- if (!ino) {
- ret = -ENOMEM;
- goto fail_free;
- }
- root_inode = autofs_get_inode(s, S_IFDIR | 0755);
- root = d_make_root(root_inode);
- if (!root) {
- ret = -ENOMEM;
- goto fail_ino;
- }
- pipe = NULL;
-
- root->d_fsdata = ino;
+ return sbi;
+}
- /* Can this call block? */
- if (parse_options(data, root_inode, &pgrp, &pgrp_set, sbi)) {
- pr_err("called with bogus options\n");
- goto fail_dput;
- }
+static int autofs_validate_protocol(struct fs_context *fc)
+{
+ struct autofs_sb_info *sbi = fc->s_fs_info;
/* Test versions first */
if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
- pr_err("kernel does not match daemon version "
+ errorf(fc, "kernel does not match daemon version "
"daemon (%d, %d) kernel (%d, %d)\n",
sbi->min_proto, sbi->max_proto,
AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
- goto fail_dput;
+ return -EINVAL;
}
/* Establish highest kernel protocol version */
@@ -299,62 +283,148 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
sbi->version = AUTOFS_MAX_PROTO_VERSION;
else
sbi->version = sbi->max_proto;
- sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
-
- if (pgrp_set) {
- sbi->oz_pgrp = find_get_pid(pgrp);
- if (!sbi->oz_pgrp) {
- pr_err("could not find process group %d\n",
- pgrp);
- goto fail_dput;
- }
- } else {
- sbi->oz_pgrp = get_task_pid(current, PIDTYPE_PGID);
+
+ switch (sbi->version) {
+ case 4:
+ sbi->sub_version = 7;
+ break;
+ case 5:
+ sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
+ break;
+ default:
+ sbi->sub_version = 0;
}
- if (autofs_type_trigger(sbi->type))
- __managed_dentry_set_managed(root);
+ return 0;
+}
+static int autofs_fill_super(struct super_block *s, struct fs_context *fc)
+{
+ struct autofs_fs_context *ctx = fc->fs_private;
+ struct autofs_sb_info *sbi = s->s_fs_info;
+ struct inode *root_inode;
+ struct autofs_info *ino;
+
+ pr_debug("starting up, sbi = %p\n", sbi);
+
+ sbi->sb = s;
+ s->s_blocksize = 1024;
+ s->s_blocksize_bits = 10;
+ s->s_magic = AUTOFS_SUPER_MAGIC;
+ s->s_op = &autofs_sops;
+ set_default_d_op(s, &autofs_dentry_operations);
+ s->s_time_gran = 1;
+
+ /*
+ * Get the root inode and dentry, but defer checking for errors.
+ */
+ ino = autofs_new_ino(sbi);
+ if (!ino)
+ return -ENOMEM;
+
+ root_inode = autofs_get_inode(s, S_IFDIR | 0755);
+ if (!root_inode)
+ return -ENOMEM;
+
+ root_inode->i_uid = ctx->uid;
+ root_inode->i_gid = ctx->gid;
root_inode->i_fop = &autofs_root_operations;
root_inode->i_op = &autofs_dir_inode_operations;
+ s->s_root = d_make_root(root_inode);
+ if (unlikely(!s->s_root)) {
+ autofs_free_ino(ino);
+ return -ENOMEM;
+ }
+ s->s_root->d_fsdata = ino;
+
+ if (ctx->pgrp_set) {
+ sbi->oz_pgrp = find_get_pid(ctx->pgrp);
+ if (!sbi->oz_pgrp)
+ return invalf(fc, "Could not find process group %d",
+ ctx->pgrp);
+ } else
+ sbi->oz_pgrp = get_task_pid(current, PIDTYPE_PGID);
+
+ if (autofs_type_trigger(sbi->type))
+ /* s->s_root won't be contended so there's little to
+ * be gained by not taking the d_lock when setting
+ * d_flags, even when a lot mounts are being done.
+ */
+ managed_dentry_set_managed(s->s_root);
+
pr_debug("pipe fd = %d, pgrp = %u\n",
sbi->pipefd, pid_nr(sbi->oz_pgrp));
- pipe = fget(sbi->pipefd);
- if (!pipe) {
- pr_err("could not open pipe file descriptor\n");
- goto fail_put_pid;
- }
- ret = autofs_prepare_pipe(pipe);
- if (ret < 0)
- goto fail_fput;
- sbi->pipe = pipe;
sbi->flags &= ~AUTOFS_SBI_CATATONIC;
+ return 0;
+}
- /*
- * Success! Install the root dentry now to indicate completion.
- */
- s->s_root = root;
+/*
+ * Validate the parameters and then request a superblock.
+ */
+static int autofs_get_tree(struct fs_context *fc)
+{
+ struct autofs_sb_info *sbi = fc->s_fs_info;
+ int ret;
+
+ ret = autofs_validate_protocol(fc);
+ if (ret)
+ return ret;
+
+ if (sbi->pipefd < 0)
+ return invalf(fc, "No control pipe specified");
+
+ return get_tree_nodev(fc, autofs_fill_super);
+}
+
+static void autofs_free_fc(struct fs_context *fc)
+{
+ struct autofs_fs_context *ctx = fc->fs_private;
+ struct autofs_sb_info *sbi = fc->s_fs_info;
+
+ if (sbi) {
+ if (sbi->pipe)
+ fput(sbi->pipe);
+ kfree(sbi);
+ }
+ kfree(ctx);
+}
+
+static const struct fs_context_operations autofs_context_ops = {
+ .free = autofs_free_fc,
+ .parse_param = autofs_parse_param,
+ .get_tree = autofs_get_tree,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+int autofs_init_fs_context(struct fs_context *fc)
+{
+ struct autofs_fs_context *ctx;
+ struct autofs_sb_info *sbi;
+
+ ctx = kzalloc(sizeof(struct autofs_fs_context), GFP_KERNEL);
+ if (!ctx)
+ goto nomem;
+
+ ctx->uid = current_uid();
+ ctx->gid = current_gid();
+
+ sbi = autofs_alloc_sbi();
+ if (!sbi)
+ goto nomem_ctx;
+
+ fc->fs_private = ctx;
+ fc->s_fs_info = sbi;
+ fc->ops = &autofs_context_ops;
return 0;
- /*
- * Failure ... clean up.
- */
-fail_fput:
- pr_err("pipe file descriptor does not contain proper ops\n");
- fput(pipe);
-fail_put_pid:
- put_pid(sbi->oz_pgrp);
-fail_dput:
- dput(root);
- goto fail_free;
-fail_ino:
- autofs_free_ino(ino);
-fail_free:
- kfree(sbi);
- s->s_fs_info = NULL;
- return ret;
+nomem_ctx:
+ kfree(ctx);
+nomem:
+ return -ENOMEM;
}
struct inode *autofs_get_inode(struct super_block *sb, umode_t mode)
@@ -369,7 +439,7 @@ struct inode *autofs_get_inode(struct super_block *sb, umode_t mode)
inode->i_uid = d_inode(sb->s_root)->i_uid;
inode->i_gid = d_inode(sb->s_root)->i_gid;
}
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
inode->i_ino = get_next_ino();
if (S_ISDIR(mode)) {
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 91fe4548c256..2c31002b314a 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -10,12 +10,13 @@
#include "autofs_i.h"
-static int autofs_dir_symlink(struct user_namespace *, struct inode *,
+static int autofs_dir_permission(struct mnt_idmap *, struct inode *, int);
+static int autofs_dir_symlink(struct mnt_idmap *, struct inode *,
struct dentry *, const char *);
static int autofs_dir_unlink(struct inode *, struct dentry *);
static int autofs_dir_rmdir(struct inode *, struct dentry *);
-static int autofs_dir_mkdir(struct user_namespace *, struct inode *,
- struct dentry *, umode_t);
+static struct dentry *autofs_dir_mkdir(struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t);
static long autofs_root_ioctl(struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
static long autofs_root_compat_ioctl(struct file *,
@@ -50,6 +51,7 @@ const struct file_operations autofs_dir_operations = {
const struct inode_operations autofs_dir_inode_operations = {
.lookup = autofs_lookup,
+ .permission = autofs_dir_permission,
.unlink = autofs_dir_unlink,
.symlink = autofs_dir_symlink,
.mkdir = autofs_dir_mkdir,
@@ -77,6 +79,7 @@ static int autofs_dir_open(struct inode *inode, struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
pr_debug("file=%p dentry=%p %pd\n", file, dentry, dentry);
@@ -93,7 +96,7 @@ static int autofs_dir_open(struct inode *inode, struct file *file)
* it.
*/
spin_lock(&sbi->lookup_lock);
- if (!path_is_mountpoint(&file->f_path) && simple_empty(dentry)) {
+ if (!path_is_mountpoint(&file->f_path) && autofs_empty(ino)) {
spin_unlock(&sbi->lookup_lock);
return -ENOENT;
}
@@ -288,9 +291,26 @@ static struct dentry *autofs_mountpoint_changed(struct path *path)
struct dentry *dentry = path->dentry;
struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
- /*
- * If this is an indirect mount the dentry could have gone away
- * as a result of an expire and a new one created.
+ /* If this is an indirect mount the dentry could have gone away
+ * and a new one created.
+ *
+ * This is unusual and I can't remember the case for which it
+ * was originally added now. But an example of how this can
+ * happen is an autofs indirect mount that has the "browse"
+ * option set and also has the "symlink" option in the autofs
+ * map entry. In this case the daemon will remove the browse
+ * directory and create a symlink as the mount leaving the
+ * struct path stale.
+ *
+ * Another not so obvious case is when a mount in an autofs
+ * indirect mount that uses the "nobrowse" option is being
+ * expired at the same time as a path walk. If the mount has
+ * been umounted but the mount point directory seen before
+ * becoming unhashed (during a lockless path walk) when a stat
+ * family system call is made the mount won't be re-mounted as
+ * it should. In this case the mount point that's been removed
+ * (by the daemon) will be stale and the a new mount point
+ * dentry created.
*/
if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
struct dentry *parent = dentry->d_parent;
@@ -321,6 +341,14 @@ static struct vfsmount *autofs_d_automount(struct path *path)
if (autofs_oz_mode(sbi))
return NULL;
+ /* Refuse to trigger mount if current namespace is not the owner
+ * and the mount is propagation private.
+ */
+ if (sbi->mnt_ns_id != to_ns_common(current->nsproxy->mnt_ns)->ns_id) {
+ if (vfsmount_to_propagation_flags(path->mnt) & MS_PRIVATE)
+ return ERR_PTR(-EPERM);
+ }
+
/*
* If an expire request is pending everyone must wait.
* If the expire fails we're still mounted so continue
@@ -362,7 +390,7 @@ static struct vfsmount *autofs_d_automount(struct path *path)
* the mount never trigger mounts themselves (they have an
* autofs trigger mount mounted on them). But v4 pseudo direct
* mounts do need the leaves to trigger mounts. In this case
- * we have no choice but to use the list_empty() check and
+ * we have no choice but to use the autofs_empty() check and
* require user space behave.
*/
if (sbi->version > 4) {
@@ -371,7 +399,7 @@ static struct vfsmount *autofs_d_automount(struct path *path)
goto done;
}
} else {
- if (!simple_empty(dentry)) {
+ if (!autofs_empty(ino)) {
spin_unlock(&sbi->fs_lock);
goto done;
}
@@ -426,9 +454,8 @@ static int autofs_d_manage(const struct path *path, bool rcu_walk)
if (rcu_walk) {
/* We don't need fs_lock in rcu_walk mode,
- * just testing 'AUTOFS_INFO_NO_RCU' is enough.
- * simple_empty() takes a spinlock, so leave it
- * to last.
+ * just testing 'AUTOFS_INF_WANT_EXPIRE' is enough.
+ *
* We only return -EISDIR when certain this isn't
* a mount-trap.
*/
@@ -441,9 +468,7 @@ static int autofs_d_manage(const struct path *path, bool rcu_walk)
inode = d_inode_rcu(dentry);
if (inode && S_ISLNK(inode->i_mode))
return -EISDIR;
- if (list_empty(&dentry->d_subdirs))
- return 0;
- if (!simple_empty(dentry))
+ if (!autofs_empty(ino))
return -EISDIR;
return 0;
}
@@ -463,7 +488,7 @@ static int autofs_d_manage(const struct path *path, bool rcu_walk)
* we can avoid needless calls ->d_automount() and avoid
* an incorrect ELOOP error return.
*/
- if ((!path_is_mountpoint(path) && !simple_empty(dentry)) ||
+ if ((!path_is_mountpoint(path) && !autofs_empty(ino)) ||
(d_really_is_positive(dentry) && d_is_symlink(dentry)))
status = -EISDIR;
}
@@ -526,11 +551,30 @@ static struct dentry *autofs_lookup(struct inode *dir,
return NULL;
}
-static int autofs_dir_symlink(struct user_namespace *mnt_userns,
+static int autofs_dir_permission(struct mnt_idmap *idmap,
+ struct inode *inode, int mask)
+{
+ if (mask & MAY_WRITE) {
+ struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb);
+
+ if (!autofs_oz_mode(sbi))
+ return -EACCES;
+
+ /* autofs_oz_mode() needs to allow path walks when the
+ * autofs mount is catatonic but the state of an autofs
+ * file system needs to be preserved over restarts.
+ */
+ if (sbi->flags & AUTOFS_SBI_CATATONIC)
+ return -EACCES;
+ }
+
+ return generic_permission(idmap, inode, mask);
+}
+
+static int autofs_dir_symlink(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry,
const char *symname)
{
- struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
struct autofs_info *ino = autofs_dentry_ino(dentry);
struct autofs_info *p_ino;
struct inode *inode;
@@ -539,16 +583,6 @@ static int autofs_dir_symlink(struct user_namespace *mnt_userns,
pr_debug("%s <- %pd\n", symname, dentry);
- if (!autofs_oz_mode(sbi))
- return -EACCES;
-
- /* autofs_oz_mode() needs to allow path walks when the
- * autofs mount is catatonic but the state of an autofs
- * file system needs to be preserved over restarts.
- */
- if (sbi->flags & AUTOFS_SBI_CATATONIC)
- return -EACCES;
-
BUG_ON(!ino);
autofs_clean_ino(ino);
@@ -568,14 +602,12 @@ static int autofs_dir_symlink(struct user_namespace *mnt_userns,
}
inode->i_private = cp;
inode->i_size = size;
- d_add(dentry, inode);
- dget(dentry);
- ino->count++;
+ d_make_persistent(dentry, inode);
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count++;
- dir->i_mtime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
return 0;
}
@@ -598,28 +630,16 @@ static int autofs_dir_symlink(struct user_namespace *mnt_userns,
static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
- struct autofs_info *ino = autofs_dentry_ino(dentry);
struct autofs_info *p_ino;
- if (!autofs_oz_mode(sbi))
- return -EACCES;
-
- /* autofs_oz_mode() needs to allow path walks when the
- * autofs mount is catatonic but the state of an autofs
- * file system needs to be preserved over restarts.
- */
- if (sbi->flags & AUTOFS_SBI_CATATONIC)
- return -EACCES;
-
- ino->count--;
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count--;
- dput(ino->dentry);
+ d_make_discardable(dentry);
d_inode(dentry)->i_size = 0;
clear_nlink(d_inode(dentry));
- dir->i_mtime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
spin_lock(&sbi->lookup_lock);
__autofs_add_expiring(dentry);
@@ -683,16 +703,6 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry)
pr_debug("dentry %p, removing %pd\n", dentry, dentry);
- if (!autofs_oz_mode(sbi))
- return -EACCES;
-
- /* autofs_oz_mode() needs to allow path walks when the
- * autofs mount is catatonic but the state of an autofs
- * file system needs to be preserved over restarts.
- */
- if (sbi->flags & AUTOFS_SBI_CATATONIC)
- return -EACCES;
-
if (ino->count != 1)
return -ENOTEMPTY;
@@ -704,10 +714,9 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry)
if (sbi->version < 5)
autofs_clear_leaf_automount_flags(dentry);
- ino->count--;
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count--;
- dput(ino->dentry);
+ d_make_discardable(dentry);
d_inode(dentry)->i_size = 0;
clear_nlink(d_inode(dentry));
@@ -717,25 +726,15 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry)
return 0;
}
-static int autofs_dir_mkdir(struct user_namespace *mnt_userns,
- struct inode *dir, struct dentry *dentry,
- umode_t mode)
+static struct dentry *autofs_dir_mkdir(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry,
+ umode_t mode)
{
struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
struct autofs_info *ino = autofs_dentry_ino(dentry);
struct autofs_info *p_ino;
struct inode *inode;
- if (!autofs_oz_mode(sbi))
- return -EACCES;
-
- /* autofs_oz_mode() needs to allow path walks when the
- * autofs mount is catatonic but the state of an autofs
- * file system needs to be preserved over restarts.
- */
- if (sbi->flags & AUTOFS_SBI_CATATONIC)
- return -EACCES;
-
pr_debug("dentry %p, creating %pd\n", dentry, dentry);
BUG_ON(!ino);
@@ -746,20 +745,18 @@ static int autofs_dir_mkdir(struct user_namespace *mnt_userns,
inode = autofs_get_inode(dir->i_sb, S_IFDIR | mode);
if (!inode)
- return -ENOMEM;
- d_add(dentry, inode);
+ return ERR_PTR(-ENOMEM);
if (sbi->version < 5)
autofs_set_leaf_automount_flags(dentry);
- dget(dentry);
- ino->count++;
+ d_make_persistent(dentry, inode);
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count++;
inc_nlink(dir);
- dir->i_mtime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
- return 0;
+ return NULL;
}
/* Get/set timeout ioctl() operation */
diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c
index 54c1f8b8b075..33dd4660d82f 100644
--- a/fs/autofs/waitq.c
+++ b/fs/autofs/waitq.c
@@ -32,8 +32,9 @@ void autofs_catatonic_mode(struct autofs_sb_info *sbi)
wq->status = -ENOENT; /* Magic is gone - report failure */
kfree(wq->name.name - wq->offset);
wq->name.name = NULL;
- wq->wait_ctr--;
- wake_up_interruptible(&wq->queue);
+ wake_up(&wq->queue);
+ if (!--wq->wait_ctr)
+ kfree(wq);
wq = nwq;
}
fput(sbi->pipe); /* Close the pipe */
diff --git a/fs/backing-file.c b/fs/backing-file.c
new file mode 100644
index 000000000000..45da8600d564
--- /dev/null
+++ b/fs/backing-file.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Common helpers for stackable filesystems and backing files.
+ *
+ * Forked from fs/overlayfs/file.c.
+ *
+ * Copyright (C) 2017 Red Hat, Inc.
+ * Copyright (C) 2023 CTERA Networks.
+ */
+
+#include <linux/fs.h>
+#include <linux/backing-file.h>
+#include <linux/splice.h>
+#include <linux/mm.h>
+
+#include "internal.h"
+
+/**
+ * backing_file_open - open a backing file for kernel internal use
+ * @user_path: path that the user reuqested to open
+ * @flags: open flags
+ * @real_path: path of the backing file
+ * @cred: credentials for open
+ *
+ * Open a backing file for a stackable filesystem (e.g., overlayfs).
+ * @user_path may be on the stackable filesystem and @real_path on the
+ * underlying filesystem. In this case, we want to be able to return the
+ * @user_path of the stackable filesystem. This is done by embedding the
+ * returned file into a container structure that also stores the stacked
+ * file's path, which can be retrieved using backing_file_user_path().
+ */
+struct file *backing_file_open(const struct path *user_path, int flags,
+ const struct path *real_path,
+ const struct cred *cred)
+{
+ struct file *f;
+ int error;
+
+ f = alloc_empty_backing_file(flags, cred);
+ if (IS_ERR(f))
+ return f;
+
+ path_get(user_path);
+ backing_file_set_user_path(f, user_path);
+ error = vfs_open(real_path, f);
+ if (error) {
+ fput(f);
+ f = ERR_PTR(error);
+ }
+
+ return f;
+}
+EXPORT_SYMBOL_GPL(backing_file_open);
+
+struct file *backing_tmpfile_open(const struct path *user_path, int flags,
+ const struct path *real_parentpath,
+ umode_t mode, const struct cred *cred)
+{
+ struct mnt_idmap *real_idmap = mnt_idmap(real_parentpath->mnt);
+ struct file *f;
+ int error;
+
+ f = alloc_empty_backing_file(flags, cred);
+ if (IS_ERR(f))
+ return f;
+
+ path_get(user_path);
+ backing_file_set_user_path(f, user_path);
+ error = vfs_tmpfile(real_idmap, real_parentpath, f, mode);
+ if (error) {
+ fput(f);
+ f = ERR_PTR(error);
+ }
+ return f;
+}
+EXPORT_SYMBOL(backing_tmpfile_open);
+
+struct backing_aio {
+ struct kiocb iocb;
+ refcount_t ref;
+ struct kiocb *orig_iocb;
+ /* used for aio completion */
+ void (*end_write)(struct kiocb *iocb, ssize_t);
+ struct work_struct work;
+ long res;
+};
+
+static struct kmem_cache *backing_aio_cachep;
+
+#define BACKING_IOCB_MASK \
+ (IOCB_NOWAIT | IOCB_HIPRI | IOCB_DSYNC | IOCB_SYNC | IOCB_APPEND)
+
+static rwf_t iocb_to_rw_flags(int flags)
+{
+ return (__force rwf_t)(flags & BACKING_IOCB_MASK);
+}
+
+static void backing_aio_put(struct backing_aio *aio)
+{
+ if (refcount_dec_and_test(&aio->ref)) {
+ fput(aio->iocb.ki_filp);
+ kmem_cache_free(backing_aio_cachep, aio);
+ }
+}
+
+static void backing_aio_cleanup(struct backing_aio *aio, long res)
+{
+ struct kiocb *iocb = &aio->iocb;
+ struct kiocb *orig_iocb = aio->orig_iocb;
+
+ orig_iocb->ki_pos = iocb->ki_pos;
+ if (aio->end_write)
+ aio->end_write(orig_iocb, res);
+
+ backing_aio_put(aio);
+}
+
+static void backing_aio_rw_complete(struct kiocb *iocb, long res)
+{
+ struct backing_aio *aio = container_of(iocb, struct backing_aio, iocb);
+ struct kiocb *orig_iocb = aio->orig_iocb;
+
+ if (iocb->ki_flags & IOCB_WRITE)
+ kiocb_end_write(iocb);
+
+ backing_aio_cleanup(aio, res);
+ orig_iocb->ki_complete(orig_iocb, res);
+}
+
+static void backing_aio_complete_work(struct work_struct *work)
+{
+ struct backing_aio *aio = container_of(work, struct backing_aio, work);
+
+ backing_aio_rw_complete(&aio->iocb, aio->res);
+}
+
+static void backing_aio_queue_completion(struct kiocb *iocb, long res)
+{
+ struct backing_aio *aio = container_of(iocb, struct backing_aio, iocb);
+
+ /*
+ * Punt to a work queue to serialize updates of mtime/size.
+ */
+ aio->res = res;
+ INIT_WORK(&aio->work, backing_aio_complete_work);
+ queue_work(file_inode(aio->orig_iocb->ki_filp)->i_sb->s_dio_done_wq,
+ &aio->work);
+}
+
+static int backing_aio_init_wq(struct kiocb *iocb)
+{
+ struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
+
+ if (sb->s_dio_done_wq)
+ return 0;
+
+ return sb_init_dio_done_wq(sb);
+}
+
+static int do_backing_file_read_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags)
+{
+ struct backing_aio *aio = NULL;
+ int ret;
+
+ if (is_sync_kiocb(iocb)) {
+ rwf_t rwf = iocb_to_rw_flags(flags);
+
+ return vfs_iter_read(file, iter, &iocb->ki_pos, rwf);
+ }
+
+ aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL);
+ if (!aio)
+ return -ENOMEM;
+
+ aio->orig_iocb = iocb;
+ kiocb_clone(&aio->iocb, iocb, get_file(file));
+ aio->iocb.ki_complete = backing_aio_rw_complete;
+ refcount_set(&aio->ref, 2);
+ ret = vfs_iocb_iter_read(file, &aio->iocb, iter);
+ backing_aio_put(aio);
+ if (ret != -EIOCBQUEUED)
+ backing_aio_cleanup(aio, ret);
+ return ret;
+}
+
+ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx)
+{
+ ssize_t ret;
+
+ if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
+ return -EIO;
+
+ if (!iov_iter_count(iter))
+ return 0;
+
+ if (iocb->ki_flags & IOCB_DIRECT &&
+ !(file->f_mode & FMODE_CAN_ODIRECT))
+ return -EINVAL;
+
+ scoped_with_creds(ctx->cred)
+ ret = do_backing_file_read_iter(file, iter, iocb, flags);
+
+ if (ctx->accessed)
+ ctx->accessed(iocb->ki_filp);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(backing_file_read_iter);
+
+static int do_backing_file_write_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ void (*end_write)(struct kiocb *, ssize_t))
+{
+ struct backing_aio *aio;
+ int ret;
+
+ if (is_sync_kiocb(iocb)) {
+ rwf_t rwf = iocb_to_rw_flags(flags);
+
+ ret = vfs_iter_write(file, iter, &iocb->ki_pos, rwf);
+ if (end_write)
+ end_write(iocb, ret);
+ return ret;
+ }
+
+ ret = backing_aio_init_wq(iocb);
+ if (ret)
+ return ret;
+
+ aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL);
+ if (!aio)
+ return -ENOMEM;
+
+ aio->orig_iocb = iocb;
+ aio->end_write = end_write;
+ kiocb_clone(&aio->iocb, iocb, get_file(file));
+ aio->iocb.ki_flags = flags;
+ aio->iocb.ki_complete = backing_aio_queue_completion;
+ refcount_set(&aio->ref, 2);
+ ret = vfs_iocb_iter_write(file, &aio->iocb, iter);
+ backing_aio_put(aio);
+ if (ret != -EIOCBQUEUED)
+ backing_aio_cleanup(aio, ret);
+ return ret;
+}
+
+ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx)
+{
+ ssize_t ret;
+
+ if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
+ return -EIO;
+
+ if (!iov_iter_count(iter))
+ return 0;
+
+ ret = file_remove_privs(iocb->ki_filp);
+ if (ret)
+ return ret;
+
+ if (iocb->ki_flags & IOCB_DIRECT &&
+ !(file->f_mode & FMODE_CAN_ODIRECT))
+ return -EINVAL;
+
+ scoped_with_creds(ctx->cred)
+ return do_backing_file_write_iter(file, iter, iocb, flags, ctx->end_write);
+}
+EXPORT_SYMBOL_GPL(backing_file_write_iter);
+
+ssize_t backing_file_splice_read(struct file *in, struct kiocb *iocb,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx)
+{
+ ssize_t ret;
+
+ if (WARN_ON_ONCE(!(in->f_mode & FMODE_BACKING)))
+ return -EIO;
+
+ scoped_with_creds(ctx->cred)
+ ret = vfs_splice_read(in, &iocb->ki_pos, pipe, len, flags);
+
+ if (ctx->accessed)
+ ctx->accessed(iocb->ki_filp);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(backing_file_splice_read);
+
+ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, struct kiocb *iocb,
+ size_t len, unsigned int flags,
+ struct backing_file_ctx *ctx)
+{
+ ssize_t ret;
+
+ if (WARN_ON_ONCE(!(out->f_mode & FMODE_BACKING)))
+ return -EIO;
+
+ if (!out->f_op->splice_write)
+ return -EINVAL;
+
+ ret = file_remove_privs(iocb->ki_filp);
+ if (ret)
+ return ret;
+
+ scoped_with_creds(ctx->cred) {
+ file_start_write(out);
+ ret = out->f_op->splice_write(pipe, out, &iocb->ki_pos, len, flags);
+ file_end_write(out);
+ }
+
+ if (ctx->end_write)
+ ctx->end_write(iocb, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(backing_file_splice_write);
+
+int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
+ struct backing_file_ctx *ctx)
+{
+ struct file *user_file = vma->vm_file;
+ int ret;
+
+ if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
+ return -EIO;
+
+ if (!can_mmap_file(file))
+ return -ENODEV;
+
+ vma_set_file(vma, file);
+
+ scoped_with_creds(ctx->cred)
+ ret = vfs_mmap(vma->vm_file, vma);
+
+ if (ctx->accessed)
+ ctx->accessed(user_file);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(backing_file_mmap);
+
+static int __init backing_aio_init(void)
+{
+ backing_aio_cachep = KMEM_CACHE(backing_aio, SLAB_HWCACHE_ALIGN);
+ if (!backing_aio_cachep)
+ return -ENOMEM;
+
+ return 0;
+}
+fs_initcall(backing_aio_init);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 12b8fdcc445b..0ef9bcb744dd 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -27,7 +27,7 @@ static const struct file_operations bad_file_ops =
.open = bad_file_open,
};
-static int bad_inode_create(struct user_namespace *mnt_userns,
+static int bad_inode_create(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
@@ -51,17 +51,17 @@ static int bad_inode_unlink(struct inode *dir, struct dentry *dentry)
return -EIO;
}
-static int bad_inode_symlink(struct user_namespace *mnt_userns,
+static int bad_inode_symlink(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry,
const char *symname)
{
return -EIO;
}
-static int bad_inode_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *bad_inode_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- return -EIO;
+ return ERR_PTR(-EIO);
}
static int bad_inode_rmdir (struct inode *dir, struct dentry *dentry)
@@ -69,13 +69,13 @@ static int bad_inode_rmdir (struct inode *dir, struct dentry *dentry)
return -EIO;
}
-static int bad_inode_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+static int bad_inode_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
return -EIO;
}
-static int bad_inode_rename2(struct user_namespace *mnt_userns,
+static int bad_inode_rename2(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
@@ -89,20 +89,20 @@ static int bad_inode_readlink(struct dentry *dentry, char __user *buffer,
return -EIO;
}
-static int bad_inode_permission(struct user_namespace *mnt_userns,
+static int bad_inode_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
return -EIO;
}
-static int bad_inode_getattr(struct user_namespace *mnt_userns,
+static int bad_inode_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
return -EIO;
}
-static int bad_inode_setattr(struct user_namespace *mnt_userns,
+static int bad_inode_setattr(struct mnt_idmap *idmap,
struct dentry *direntry, struct iattr *attrs)
{
return -EIO;
@@ -133,8 +133,7 @@ static int bad_inode_fiemap(struct inode *inode,
return -EIO;
}
-static int bad_inode_update_time(struct inode *inode, struct timespec64 *time,
- int flags)
+static int bad_inode_update_time(struct inode *inode, int flags)
{
return -EIO;
}
@@ -146,15 +145,15 @@ static int bad_inode_atomic_open(struct inode *inode, struct dentry *dentry,
return -EIO;
}
-static int bad_inode_tmpfile(struct user_namespace *mnt_userns,
- struct inode *inode, struct dentry *dentry,
+static int bad_inode_tmpfile(struct mnt_idmap *idmap,
+ struct inode *inode, struct file *file,
umode_t mode)
{
return -EIO;
}
-static int bad_inode_set_acl(struct user_namespace *mnt_userns,
- struct inode *inode, struct posix_acl *acl,
+static int bad_inode_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct posix_acl *acl,
int type)
{
return -EIO;
@@ -177,7 +176,7 @@ static const struct inode_operations bad_inode_ops =
.setattr = bad_inode_setattr,
.listxattr = bad_inode_listxattr,
.get_link = bad_inode_get_link,
- .get_acl = bad_inode_get_acl,
+ .get_inode_acl = bad_inode_get_acl,
.fiemap = bad_inode_fiemap,
.update_time = bad_inode_update_time,
.atomic_open = bad_inode_atomic_open,
@@ -209,8 +208,7 @@ void make_bad_inode(struct inode *inode)
remove_inode_hash(inode);
inode->i_mode = S_IFREG;
- inode->i_atime = inode->i_mtime = inode->i_ctime =
- current_time(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &bad_inode_ops;
inode->i_opflags &= ~IOP_XATTR;
inode->i_fop = &bad_file_ops;
diff --git a/fs/befs/Kconfig b/fs/befs/Kconfig
index 9550b6462b81..5fcfc4024ffe 100644
--- a/fs/befs/Kconfig
+++ b/fs/befs/Kconfig
@@ -2,6 +2,7 @@
config BEFS_FS
tristate "BeOS file system (BeFS) support (read only)"
depends on BLOCK
+ select BUFFER_HEAD
select NLS
help
The BeOS File System (BeFS) is the native file system of Be, Inc's
diff --git a/fs/befs/btree.c b/fs/befs/btree.c
index 1b7e0f7128d6..53b36aa29978 100644
--- a/fs/befs/btree.c
+++ b/fs/befs/btree.c
@@ -500,7 +500,7 @@ befs_btree_read(struct super_block *sb, const befs_data_stream *ds,
goto error_alloc;
}
- strlcpy(keybuf, keystart, keylen + 1);
+ strscpy(keybuf, keystart, keylen + 1);
*value = fs64_to_cpu(sb, valarray[cur_key]);
*keysize = keylen;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index c1ba13d19024..9fcfdd6b8189 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -11,12 +11,13 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/errno.h>
#include <linux/stat.h>
#include <linux/nls.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
-#include <linux/parser.h>
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/cred.h>
@@ -40,7 +41,7 @@ MODULE_LICENSE("GPL");
static int befs_readdir(struct file *, struct dir_context *);
static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
-static int befs_readpage(struct file *file, struct page *page);
+static int befs_read_folio(struct file *file, struct folio *folio);
static sector_t befs_bmap(struct address_space *mapping, sector_t block);
static struct dentry *befs_lookup(struct inode *, struct dentry *,
unsigned int);
@@ -48,28 +49,26 @@ static struct inode *befs_iget(struct super_block *, unsigned long);
static struct inode *befs_alloc_inode(struct super_block *sb);
static void befs_free_inode(struct inode *inode);
static void befs_destroy_inodecache(void);
-static int befs_symlink_readpage(struct file *, struct page *);
+static int befs_symlink_read_folio(struct file *, struct folio *);
static int befs_utf2nls(struct super_block *sb, const char *in, int in_len,
char **out, int *out_len);
static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
char **out, int *out_len);
static void befs_put_super(struct super_block *);
-static int befs_remount(struct super_block *, int *, char *);
static int befs_statfs(struct dentry *, struct kstatfs *);
static int befs_show_options(struct seq_file *, struct dentry *);
-static int parse_options(char *, struct befs_mount_options *);
static struct dentry *befs_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type);
static struct dentry *befs_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type);
static struct dentry *befs_get_parent(struct dentry *child);
+static void befs_free_fc(struct fs_context *fc);
static const struct super_operations befs_sops = {
.alloc_inode = befs_alloc_inode, /* allocate a new inode */
.free_inode = befs_free_inode, /* deallocate an inode */
.put_super = befs_put_super, /* uninit super */
.statfs = befs_statfs, /* statfs */
- .remount_fs = befs_remount,
.show_options = befs_show_options,
};
@@ -87,31 +86,31 @@ static const struct inode_operations befs_dir_inode_operations = {
};
static const struct address_space_operations befs_aops = {
- .readpage = befs_readpage,
+ .read_folio = befs_read_folio,
.bmap = befs_bmap,
};
static const struct address_space_operations befs_symlink_aops = {
- .readpage = befs_symlink_readpage,
+ .read_folio = befs_symlink_read_folio,
};
static const struct export_operations befs_export_operations = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = befs_fh_to_dentry,
.fh_to_parent = befs_fh_to_parent,
.get_parent = befs_get_parent,
};
/*
- * Called by generic_file_read() to read a page of data
+ * Called by generic_file_read() to read a folio of data
*
* In turn, simply calls a generic block read function and
* passes it the address of befs_get_block, for mapping file
* positions to disk blocks.
*/
-static int
-befs_readpage(struct file *file, struct page *page)
+static int befs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, befs_get_block);
+ return block_read_full_folio(folio, befs_get_block);
}
static sector_t
@@ -277,7 +276,7 @@ befs_alloc_inode(struct super_block *sb)
{
struct befs_inode_info *bi;
- bi = kmem_cache_alloc(befs_inode_cachep, GFP_KERNEL);
+ bi = alloc_inode_sb(sb, befs_inode_cachep, GFP_KERNEL);
if (!bi)
return NULL;
return &bi->vfs_inode;
@@ -308,7 +307,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
befs_ino = BEFS_I(inode);
@@ -361,11 +360,11 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
* for indexing purposes. (PFD, page 54)
*/
- inode->i_mtime.tv_sec =
- fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16;
- inode->i_mtime.tv_nsec = 0; /* lower 16 bits are not a time */
- inode->i_ctime = inode->i_mtime;
- inode->i_atime = inode->i_mtime;
+ inode_set_mtime(inode,
+ fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16,
+ 0);/* lower 16 bits are not a time */
+ inode_set_ctime_to_ts(inode, inode_get_mtime(inode));
+ inode_set_atime_to_ts(inode, inode_get_mtime(inode));
befs_ino->i_inode_num = fsrun_to_cpu(sb, raw_inode->inode_num);
befs_ino->i_parent = fsrun_to_cpu(sb, raw_inode->parent);
@@ -375,7 +374,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
if (S_ISLNK(inode->i_mode) && !(befs_ino->i_flags & BEFS_LONG_SYMLINK)){
inode->i_size = 0;
inode->i_blocks = befs_sb->block_size / VFS_BLOCK_SIZE;
- strlcpy(befs_ino->i_data.symlink, raw_inode->data.symlink,
+ strscpy(befs_ino->i_data.symlink, raw_inode->data.symlink,
BEFS_SYMLINK_LEN);
} else {
int num_blks;
@@ -435,8 +434,7 @@ befs_init_inodecache(void)
{
befs_inode_cachep = kmem_cache_create_usercopy("befs_inode_cache",
sizeof(struct befs_inode_info), 0,
- (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
- SLAB_ACCOUNT),
+ SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
offsetof(struct befs_inode_info,
i_data.symlink),
sizeof_field(struct befs_inode_info,
@@ -468,14 +466,15 @@ befs_destroy_inodecache(void)
* The data stream become link name. Unless the LONG_SYMLINK
* flag is set.
*/
-static int befs_symlink_readpage(struct file *unused, struct page *page)
+static int befs_symlink_read_folio(struct file *unused, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct befs_inode_info *befs_ino = BEFS_I(inode);
befs_data_stream *data = &befs_ino->i_data.ds;
befs_off_t len = data->size;
- char *link = page_address(page);
+ char *link = folio_address(folio);
+ int err = -EIO;
if (len == 0 || len > PAGE_SIZE) {
befs_error(sb, "Long symlink with illegal length");
@@ -488,13 +487,10 @@ static int befs_symlink_readpage(struct file *unused, struct page *page)
goto fail;
}
link[len - 1] = '\0';
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
+ err = 0;
fail:
- SetPageError(page);
- unlock_page(page);
- return -EIO;
+ folio_end_read(folio, err == 0);
+ return err;
}
/*
@@ -671,99 +667,57 @@ static struct dentry *befs_get_parent(struct dentry *child)
parent = befs_iget(child->d_sb,
(unsigned long)befs_ino->i_parent.start);
- if (IS_ERR(parent))
- return ERR_CAST(parent);
-
return d_obtain_alias(parent);
}
enum {
- Opt_uid, Opt_gid, Opt_charset, Opt_debug, Opt_err,
+ Opt_uid, Opt_gid, Opt_charset, Opt_debug,
};
-static const match_table_t befs_tokens = {
- {Opt_uid, "uid=%d"},
- {Opt_gid, "gid=%d"},
- {Opt_charset, "iocharset=%s"},
- {Opt_debug, "debug"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec befs_param_spec[] = {
+ fsparam_uid ("uid", Opt_uid),
+ fsparam_gid ("gid", Opt_gid),
+ fsparam_string ("iocharset", Opt_charset),
+ fsparam_flag ("debug", Opt_debug),
+ {}
};
static int
-parse_options(char *options, struct befs_mount_options *opts)
+befs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
- kuid_t uid;
- kgid_t gid;
-
- /* Initialize options */
- opts->uid = GLOBAL_ROOT_UID;
- opts->gid = GLOBAL_ROOT_GID;
- opts->use_uid = 0;
- opts->use_gid = 0;
- opts->iocharset = NULL;
- opts->debug = 0;
-
- if (!options)
- return 1;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
-
- if (!*p)
- continue;
-
- token = match_token(p, befs_tokens, args);
- switch (token) {
- case Opt_uid:
- if (match_int(&args[0], &option))
- return 0;
- uid = INVALID_UID;
- if (option >= 0)
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid)) {
- pr_err("Invalid uid %d, "
- "using default\n", option);
- break;
- }
- opts->uid = uid;
- opts->use_uid = 1;
- break;
- case Opt_gid:
- if (match_int(&args[0], &option))
- return 0;
- gid = INVALID_GID;
- if (option >= 0)
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid)) {
- pr_err("Invalid gid %d, "
- "using default\n", option);
- break;
- }
- opts->gid = gid;
- opts->use_gid = 1;
- break;
- case Opt_charset:
- kfree(opts->iocharset);
- opts->iocharset = match_strdup(&args[0]);
- if (!opts->iocharset) {
- pr_err("allocation failure for "
- "iocharset string\n");
- return 0;
- }
- break;
- case Opt_debug:
- opts->debug = 1;
- break;
- default:
- pr_err("Unrecognized mount option \"%s\" "
- "or missing value\n", p);
- return 0;
- }
+ struct befs_mount_options *opts = fc->fs_private;
+ int token;
+ struct fs_parse_result result;
+
+ /* befs ignores all options on remount */
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE)
+ return 0;
+
+ token = fs_parse(fc, befs_param_spec, param, &result);
+ if (token < 0)
+ return token;
+
+ switch (token) {
+ case Opt_uid:
+ opts->uid = result.uid;
+ opts->use_uid = 1;
+ break;
+ case Opt_gid:
+ opts->gid = result.gid;
+ opts->use_gid = 1;
+ break;
+ case Opt_charset:
+ kfree(opts->iocharset);
+ opts->iocharset = param->string;
+ param->string = NULL;
+ break;
+ case Opt_debug:
+ opts->debug = 1;
+ break;
+ default:
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static int befs_show_options(struct seq_file *m, struct dentry *root)
@@ -799,6 +753,21 @@ befs_put_super(struct super_block *sb)
sb->s_fs_info = NULL;
}
+/*
+ * Copy the parsed options into the sbi mount_options member
+ */
+static void
+befs_set_options(struct befs_sb_info *sbi, struct befs_mount_options *opts)
+{
+ sbi->mount_opts.uid = opts->uid;
+ sbi->mount_opts.gid = opts->gid;
+ sbi->mount_opts.use_uid = opts->use_uid;
+ sbi->mount_opts.use_gid = opts->use_gid;
+ sbi->mount_opts.debug = opts->debug;
+ sbi->mount_opts.iocharset = opts->iocharset;
+ opts->iocharset = NULL;
+}
+
/* Allocate private field of the superblock, fill it.
*
* Finish filling the public superblock fields
@@ -806,7 +775,7 @@ befs_put_super(struct super_block *sb)
* Load a set of NLS translations if needed.
*/
static int
-befs_fill_super(struct super_block *sb, void *data, int silent)
+befs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct buffer_head *bh;
struct befs_sb_info *befs_sb;
@@ -816,6 +785,8 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
const unsigned long sb_block = 0;
const off_t x86_sb_off = 512;
int blocksize;
+ struct befs_mount_options *parsed_opts = fc->fs_private;
+ int silent = fc->sb_flags & SB_SILENT;
sb->s_fs_info = kzalloc(sizeof(*befs_sb), GFP_KERNEL);
if (sb->s_fs_info == NULL)
@@ -823,11 +794,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
befs_sb = BEFS_SB(sb);
- if (!parse_options((char *) data, &befs_sb->mount_opts)) {
- if (!silent)
- befs_error(sb, "cannot parse mount options");
- goto unacquire_priv_sbp;
- }
+ befs_set_options(befs_sb, parsed_opts);
befs_debug(sb, "---> %s", __func__);
@@ -940,10 +907,10 @@ unacquire_none:
}
static int
-befs_remount(struct super_block *sb, int *flags, char *data)
+befs_reconfigure(struct fs_context *fc)
{
- sync_filesystem(sb);
- if (!(*flags & SB_RDONLY))
+ sync_filesystem(fc->root->d_sb);
+ if (!(fc->sb_flags & SB_RDONLY))
return -EINVAL;
return 0;
}
@@ -971,19 +938,51 @@ befs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-static struct dentry *
-befs_mount(struct file_system_type *fs_type, int flags, const char *dev_name,
- void *data)
+static int befs_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, befs_fill_super);
+}
+
+static const struct fs_context_operations befs_context_ops = {
+ .parse_param = befs_parse_param,
+ .get_tree = befs_get_tree,
+ .reconfigure = befs_reconfigure,
+ .free = befs_free_fc,
+};
+
+static int befs_init_fs_context(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, befs_fill_super);
+ struct befs_mount_options *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ /* Initialize options */
+ opts->uid = GLOBAL_ROOT_UID;
+ opts->gid = GLOBAL_ROOT_GID;
+
+ fc->fs_private = opts;
+ fc->ops = &befs_context_ops;
+
+ return 0;
+}
+
+static void befs_free_fc(struct fs_context *fc)
+{
+ struct befs_mount_options *opts = fc->fs_private;
+
+ kfree(opts->iocharset);
+ kfree(fc->fs_private);
}
static struct file_system_type befs_fs_type = {
.owner = THIS_MODULE,
.name = "befs",
- .mount = befs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = befs_init_fs_context,
+ .parameters = befs_param_spec,
};
MODULE_ALIAS_FS("befs");
diff --git a/fs/bfs/Kconfig b/fs/bfs/Kconfig
index 3a757805b585..8e7ef866b62a 100644
--- a/fs/bfs/Kconfig
+++ b/fs/bfs/Kconfig
@@ -2,6 +2,7 @@
config BFS_FS
tristate "BFS file system support"
depends on BLOCK
+ select BUFFER_HEAD
help
Boot File System (BFS) is a file system used under SCO UnixWare to
allow the bootloader access to the kernel image and other important
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 34d4f68f786b..c375e22c4c0c 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -75,7 +75,7 @@ const struct file_operations bfs_dir_operations = {
.llseek = generic_file_llseek,
};
-static int bfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int bfs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
int err;
@@ -96,8 +96,8 @@ static int bfs_create(struct user_namespace *mnt_userns, struct inode *dir,
}
set_bit(ino, info->si_imap);
info->si_freei--;
- inode_init_owner(&init_user_ns, inode, dir, mode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
+ simple_inode_init_ts(inode);
inode->i_blocks = 0;
inode->i_op = &bfs_file_inops;
inode->i_fop = &bfs_file_operations;
@@ -158,7 +158,7 @@ static int bfs_link(struct dentry *old, struct inode *dir,
return err;
}
inc_nlink(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
ihold(inode);
d_instantiate(new, inode);
@@ -187,9 +187,9 @@ static int bfs_unlink(struct inode *dir, struct dentry *dentry)
}
de->ino = 0;
mark_buffer_dirty_inode(bh, dir);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
- inode->i_ctime = dir->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode);
error = 0;
@@ -199,7 +199,7 @@ out_brelse:
return error;
}
-static int bfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+static int bfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
@@ -240,10 +240,10 @@ static int bfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
goto end_rename;
}
old_de->ino = 0;
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
mark_inode_dirty(old_dir);
if (new_inode) {
- new_inode->i_ctime = current_time(new_inode);
+ inode_set_ctime_current(new_inode);
inode_dec_link_count(new_inode);
}
mark_buffer_dirty_inode(old_bh, old_dir);
@@ -275,11 +275,6 @@ static int bfs_add_entry(struct inode *dir, const struct qstr *child, int ino)
dprintf("name=%s, namelen=%d\n", name, namelen);
- if (!namelen)
- return -ENOENT;
- if (namelen > BFS_NAMELEN)
- return -ENAMETOOLONG;
-
sblock = BFS_I(dir)->i_sblock;
eblock = BFS_I(dir)->i_eblock;
for (block = sblock; block <= eblock; block++) {
@@ -292,9 +287,10 @@ static int bfs_add_entry(struct inode *dir, const struct qstr *child, int ino)
pos = (block - sblock) * BFS_BSIZE + off;
if (pos >= dir->i_size) {
dir->i_size += BFS_DIRENT_SIZE;
- dir->i_ctime = current_time(dir);
+ inode_set_ctime_current(dir);
}
- dir->i_mtime = current_time(dir);
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_current(dir));
mark_inode_dirty(dir);
de->ino = cpu_to_le16((u16)ino);
for (i = 0; i < BFS_NAMELEN; i++)
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index 7f8544abf636..d33d6bde992b 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -11,6 +11,7 @@
*/
#include <linux/fs.h>
+#include <linux/mpage.h>
#include <linux/buffer_head.h>
#include "bfs.h"
@@ -26,8 +27,8 @@ const struct file_operations bfs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
- .splice_read = generic_file_splice_read,
+ .mmap_prepare = generic_file_mmap_prepare,
+ .splice_read = filemap_splice_read,
};
static int bfs_move_block(unsigned long from, unsigned long to,
@@ -150,14 +151,15 @@ out:
return err;
}
-static int bfs_writepage(struct page *page, struct writeback_control *wbc)
+static int bfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- return block_write_full_page(page, bfs_get_block, wbc);
+ return mpage_writepages(mapping, wbc, bfs_get_block);
}
-static int bfs_readpage(struct file *file, struct page *page)
+static int bfs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, bfs_get_block);
+ return block_read_full_folio(folio, bfs_get_block);
}
static void bfs_write_failed(struct address_space *mapping, loff_t to)
@@ -168,14 +170,14 @@ static void bfs_write_failed(struct address_space *mapping, loff_t to)
truncate_pagecache(inode, inode->i_size);
}
-static int bfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int bfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, flags, pagep,
- bfs_get_block);
+ ret = block_write_begin(mapping, pos, len, foliop, bfs_get_block);
if (unlikely(ret))
bfs_write_failed(mapping, pos + len);
@@ -188,11 +190,13 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
}
const struct address_space_operations bfs_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = bfs_readpage,
- .writepage = bfs_writepage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = bfs_read_folio,
+ .writepages = bfs_writepages,
.write_begin = bfs_write_begin,
.write_end = generic_write_end,
+ .migrate_folio = buffer_migrate_folio,
.bmap = bfs_bmap,
};
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index fd691e4815c5..ce6f83234b67 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -17,6 +17,7 @@
#include <linux/writeback.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
+#include <linux/fs_context.h>
#include "bfs.h"
MODULE_AUTHOR("Tigran Aivazian <aivazian.tigran@gmail.com>");
@@ -41,7 +42,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) {
@@ -60,7 +61,19 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
di = (struct bfs_inode *)bh->b_data + off;
- inode->i_mode = 0x0000FFFF & le32_to_cpu(di->i_mode);
+ /*
+ * https://martin.hinner.info/fs/bfs/bfs-structure.html explains that
+ * BFS in SCO UnixWare environment used only lower 9 bits of di->i_mode
+ * value. This means that, although bfs_write_inode() saves whole
+ * inode->i_mode bits (which include S_IFMT bits and S_IS{UID,GID,VTX}
+ * bits), middle 7 bits of di->i_mode value can be garbage when these
+ * bits were not saved by bfs_write_inode().
+ * Since we can't tell whether middle 7 bits are garbage, use only
+ * lower 12 bits (i.e. tolerate S_IS{UID,GID,VTX} bits possibly being
+ * garbage) and reconstruct S_IFMT bits for Linux environment from
+ * di->i_vtype value.
+ */
+ inode->i_mode = 0x00000FFF & le32_to_cpu(di->i_mode);
if (le32_to_cpu(di->i_vtype) == BFS_VDIR) {
inode->i_mode |= S_IFDIR;
inode->i_op = &bfs_dir_inops;
@@ -70,6 +83,11 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
inode->i_op = &bfs_file_inops;
inode->i_fop = &bfs_file_operations;
inode->i_mapping->a_ops = &bfs_aops;
+ } else {
+ brelse(bh);
+ printf("Unknown vtype=%u %s:%08lx\n",
+ le32_to_cpu(di->i_vtype), inode->i_sb->s_id, ino);
+ goto error;
}
BFS_I(inode)->i_sblock = le32_to_cpu(di->i_sblock);
@@ -80,12 +98,9 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
set_nlink(inode, le32_to_cpu(di->i_nlink));
inode->i_size = BFS_FILESIZE(di);
inode->i_blocks = BFS_FILEBLOCKS(di);
- inode->i_atime.tv_sec = le32_to_cpu(di->i_atime);
- inode->i_mtime.tv_sec = le32_to_cpu(di->i_mtime);
- inode->i_ctime.tv_sec = le32_to_cpu(di->i_ctime);
- inode->i_atime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
+ inode_set_atime(inode, le32_to_cpu(di->i_atime), 0);
+ inode_set_mtime(inode, le32_to_cpu(di->i_mtime), 0);
+ inode_set_ctime(inode, le32_to_cpu(di->i_ctime), 0);
brelse(bh);
unlock_new_inode(inode);
@@ -141,9 +156,9 @@ static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc)
di->i_uid = cpu_to_le32(i_uid_read(inode));
di->i_gid = cpu_to_le32(i_gid_read(inode));
di->i_nlink = cpu_to_le32(inode->i_nlink);
- di->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
- di->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
- di->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
+ di->i_atime = cpu_to_le32(inode_get_atime_sec(inode));
+ di->i_mtime = cpu_to_le32(inode_get_mtime_sec(inode));
+ di->i_ctime = cpu_to_le32(inode_get_ctime_sec(inode));
i_sblock = BFS_I(inode)->i_sblock;
di->i_sblock = cpu_to_le32(i_sblock);
di->i_eblock = cpu_to_le32(BFS_I(inode)->i_eblock);
@@ -239,7 +254,7 @@ static struct kmem_cache *bfs_inode_cachep;
static struct inode *bfs_alloc_inode(struct super_block *sb)
{
struct bfs_inode_info *bi;
- bi = kmem_cache_alloc(bfs_inode_cachep, GFP_KERNEL);
+ bi = alloc_inode_sb(sb, bfs_inode_cachep, GFP_KERNEL);
if (!bi)
return NULL;
return &bi->vfs_inode;
@@ -262,7 +277,7 @@ static int __init init_inodecache(void)
bfs_inode_cachep = kmem_cache_create("bfs_inode_cache",
sizeof(struct bfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
if (bfs_inode_cachep == NULL)
return -ENOMEM;
@@ -308,7 +323,7 @@ void bfs_dump_imap(const char *prefix, struct super_block *s)
#endif
}
-static int bfs_fill_super(struct super_block *s, void *data, int silent)
+static int bfs_fill_super(struct super_block *s, struct fs_context *fc)
{
struct buffer_head *bh, *sbh;
struct bfs_super_block *bfs_sb;
@@ -317,6 +332,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
struct bfs_sb_info *info;
int ret = -EINVAL;
unsigned long i_sblock, i_eblock, i_eoff, s_size;
+ int silent = fc->sb_flags & SB_SILENT;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
@@ -449,18 +465,28 @@ out:
return ret;
}
-static struct dentry *bfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int bfs_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, bfs_fill_super);
+}
+
+static const struct fs_context_operations bfs_context_ops = {
+ .get_tree = bfs_get_tree,
+};
+
+static int bfs_init_fs_context(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, bfs_fill_super);
+ fc->ops = &bfs_context_ops;
+
+ return 0;
}
static struct file_system_type bfs_fs_type = {
- .owner = THIS_MODULE,
- .name = "bfs",
- .mount = bfs_mount,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .owner = THIS_MODULE,
+ .name = "bfs",
+ .init_fs_context = bfs_init_fs_context,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("bfs");
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
deleted file mode 100644
index 0dcfc691e7e2..000000000000
--- a/fs/binfmt_aout.c
+++ /dev/null
@@ -1,342 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/fs/binfmt_aout.c
- *
- * Copyright (C) 1991, 1992, 1996 Linus Torvalds
- */
-
-#include <linux/module.h>
-
-#include <linux/time.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/a.out.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/stat.h>
-#include <linux/fcntl.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/binfmts.h>
-#include <linux/personality.h>
-#include <linux/init.h>
-#include <linux/coredump.h>
-#include <linux/slab.h>
-#include <linux/sched/task_stack.h>
-
-#include <linux/uaccess.h>
-#include <asm/cacheflush.h>
-
-static int load_aout_binary(struct linux_binprm *);
-static int load_aout_library(struct file*);
-
-static struct linux_binfmt aout_format = {
- .module = THIS_MODULE,
- .load_binary = load_aout_binary,
- .load_shlib = load_aout_library,
-};
-
-#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
-
-static int set_brk(unsigned long start, unsigned long end)
-{
- start = PAGE_ALIGN(start);
- end = PAGE_ALIGN(end);
- if (end > start)
- return vm_brk(start, end - start);
- return 0;
-}
-
-/*
- * create_aout_tables() parses the env- and arg-strings in new user
- * memory and creates the pointer tables from them, and puts their
- * addresses on the "stack", returning the new stack pointer value.
- */
-static unsigned long __user *create_aout_tables(char __user *p, struct linux_binprm * bprm)
-{
- char __user * __user *argv;
- char __user * __user *envp;
- unsigned long __user *sp;
- int argc = bprm->argc;
- int envc = bprm->envc;
-
- sp = (void __user *)((-(unsigned long)sizeof(char *)) & (unsigned long) p);
-#ifdef __alpha__
-/* whee.. test-programs are so much fun. */
- put_user(0, --sp);
- put_user(0, --sp);
- if (bprm->loader) {
- put_user(0, --sp);
- put_user(1003, --sp);
- put_user(bprm->loader, --sp);
- put_user(1002, --sp);
- }
- put_user(bprm->exec, --sp);
- put_user(1001, --sp);
-#endif
- sp -= envc+1;
- envp = (char __user * __user *) sp;
- sp -= argc+1;
- argv = (char __user * __user *) sp;
-#ifndef __alpha__
- put_user((unsigned long) envp,--sp);
- put_user((unsigned long) argv,--sp);
-#endif
- put_user(argc,--sp);
- current->mm->arg_start = (unsigned long) p;
- while (argc-->0) {
- char c;
- put_user(p,argv++);
- do {
- get_user(c,p++);
- } while (c);
- }
- put_user(NULL,argv);
- current->mm->arg_end = current->mm->env_start = (unsigned long) p;
- while (envc-->0) {
- char c;
- put_user(p,envp++);
- do {
- get_user(c,p++);
- } while (c);
- }
- put_user(NULL,envp);
- current->mm->env_end = (unsigned long) p;
- return sp;
-}
-
-/*
- * These are the functions used to load a.out style executables and shared
- * libraries. There is no binary dependent code anywhere else.
- */
-
-static int load_aout_binary(struct linux_binprm * bprm)
-{
- struct pt_regs *regs = current_pt_regs();
- struct exec ex;
- unsigned long error;
- unsigned long fd_offset;
- unsigned long rlim;
- int retval;
-
- ex = *((struct exec *) bprm->buf); /* exec-header */
- if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
- N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
- N_TRSIZE(ex) || N_DRSIZE(ex) ||
- i_size_read(file_inode(bprm->file)) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
- return -ENOEXEC;
- }
-
- /*
- * Requires a mmap handler. This prevents people from using a.out
- * as part of an exploit attack against /proc-related vulnerabilities.
- */
- if (!bprm->file->f_op->mmap)
- return -ENOEXEC;
-
- fd_offset = N_TXTOFF(ex);
-
- /* Check initial limits. This avoids letting people circumvent
- * size limits imposed on them by creating programs with large
- * arrays in the data or bss.
- */
- rlim = rlimit(RLIMIT_DATA);
- if (rlim >= RLIM_INFINITY)
- rlim = ~0;
- if (ex.a_data + ex.a_bss > rlim)
- return -ENOMEM;
-
- /* Flush all traces of the currently running executable */
- retval = begin_new_exec(bprm);
- if (retval)
- return retval;
-
- /* OK, This is the point of no return */
-#ifdef __alpha__
- SET_AOUT_PERSONALITY(bprm, ex);
-#else
- set_personality(PER_LINUX);
-#endif
- setup_new_exec(bprm);
-
- current->mm->end_code = ex.a_text +
- (current->mm->start_code = N_TXTADDR(ex));
- current->mm->end_data = ex.a_data +
- (current->mm->start_data = N_DATADDR(ex));
- current->mm->brk = ex.a_bss +
- (current->mm->start_brk = N_BSSADDR(ex));
-
- retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
- if (retval < 0)
- return retval;
-
-
- if (N_MAGIC(ex) == OMAGIC) {
- unsigned long text_addr, map_size;
- loff_t pos;
-
- text_addr = N_TXTADDR(ex);
-
-#ifdef __alpha__
- pos = fd_offset;
- map_size = ex.a_text+ex.a_data + PAGE_SIZE - 1;
-#else
- pos = 32;
- map_size = ex.a_text+ex.a_data;
-#endif
- error = vm_brk(text_addr & PAGE_MASK, map_size);
- if (error)
- return error;
-
- error = read_code(bprm->file, text_addr, pos,
- ex.a_text+ex.a_data);
- if ((signed long)error < 0)
- return error;
- } else {
- if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
- (N_MAGIC(ex) != NMAGIC) && printk_ratelimit())
- {
- printk(KERN_NOTICE "executable not page aligned\n");
- }
-
- if ((fd_offset & ~PAGE_MASK) != 0 && printk_ratelimit())
- {
- printk(KERN_WARNING
- "fd_offset is not page aligned. Please convert program: %pD\n",
- bprm->file);
- }
-
- if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
- error = vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
- if (error)
- return error;
-
- read_code(bprm->file, N_TXTADDR(ex), fd_offset,
- ex.a_text + ex.a_data);
- goto beyond_if;
- }
-
- error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
- PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE,
- fd_offset);
-
- if (error != N_TXTADDR(ex))
- return error;
-
- error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE,
- fd_offset + ex.a_text);
- if (error != N_DATADDR(ex))
- return error;
- }
-beyond_if:
- set_binfmt(&aout_format);
-
- retval = set_brk(current->mm->start_brk, current->mm->brk);
- if (retval < 0)
- return retval;
-
- current->mm->start_stack =
- (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
-#ifdef __alpha__
- regs->gp = ex.a_gpvalue;
-#endif
- finalize_exec(bprm);
- start_thread(regs, ex.a_entry, current->mm->start_stack);
- return 0;
-}
-
-static int load_aout_library(struct file *file)
-{
- struct inode * inode;
- unsigned long bss, start_addr, len;
- unsigned long error;
- int retval;
- struct exec ex;
- loff_t pos = 0;
-
- inode = file_inode(file);
-
- retval = -ENOEXEC;
- error = kernel_read(file, &ex, sizeof(ex), &pos);
- if (error != sizeof(ex))
- goto out;
-
- /* We come in here for the regular a.out style of shared libraries */
- if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
- N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
- i_size_read(inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
- goto out;
- }
-
- /*
- * Requires a mmap handler. This prevents people from using a.out
- * as part of an exploit attack against /proc-related vulnerabilities.
- */
- if (!file->f_op->mmap)
- goto out;
-
- if (N_FLAGS(ex))
- goto out;
-
- /* For QMAGIC, the starting address is 0x20 into the page. We mask
- this off to get the starting address for the page */
-
- start_addr = ex.a_entry & 0xfffff000;
-
- if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) {
- if (printk_ratelimit())
- {
- printk(KERN_WARNING
- "N_TXTOFF is not page aligned. Please convert library: %pD\n",
- file);
- }
- retval = vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
- if (retval)
- goto out;
-
- read_code(file, start_addr, N_TXTOFF(ex),
- ex.a_text + ex.a_data);
- retval = 0;
- goto out;
- }
- /* Now use mmap to map the library into memory. */
- error = vm_mmap(file, start_addr, ex.a_text + ex.a_data,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE,
- N_TXTOFF(ex));
- retval = error;
- if (error != start_addr)
- goto out;
-
- len = PAGE_ALIGN(ex.a_text + ex.a_data);
- bss = ex.a_text + ex.a_data + ex.a_bss;
- if (bss > len) {
- retval = vm_brk(start_addr + len, bss - len);
- if (retval)
- goto out;
- }
- retval = 0;
-out:
- return retval;
-}
-
-static int __init init_aout_binfmt(void)
-{
- register_binfmt(&aout_format);
- return 0;
-}
-
-static void __exit exit_aout_binfmt(void)
-{
- unregister_binfmt(&aout_format);
-}
-
-core_initcall(init_aout_binfmt);
-module_exit(exit_aout_binfmt);
-MODULE_LICENSE("GPL");
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index f8c7f26f1fbb..3eb734c192e9 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -46,6 +46,7 @@
#include <linux/cred.h>
#include <linux/dax.h>
#include <linux/uaccess.h>
+#include <uapi/linux/rseq.h>
#include <asm/param.h>
#include <asm/page.h>
@@ -67,12 +68,6 @@
static int load_elf_binary(struct linux_binprm *bprm);
-#ifdef CONFIG_USELIB
-static int load_elf_library(struct file *);
-#else
-#define load_elf_library NULL
-#endif
-
/*
* If we don't support core dumping, then supply a NULL so we
* don't even try.
@@ -93,52 +88,49 @@ static int elf_core_dump(struct coredump_params *cprm);
#define ELF_CORE_EFLAGS 0
#endif
-#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
+#define ELF_PAGESTART(_v) ((_v) & ~(int)(ELF_MIN_ALIGN-1))
#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
static struct linux_binfmt elf_format = {
.module = THIS_MODULE,
.load_binary = load_elf_binary,
- .load_shlib = load_elf_library,
+#ifdef CONFIG_COREDUMP
.core_dump = elf_core_dump,
.min_coredump = ELF_EXEC_PAGESIZE,
+#endif
};
#define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
-static int set_brk(unsigned long start, unsigned long end, int prot)
+static inline void elf_coredump_set_mm_eflags(struct mm_struct *mm, u32 flags)
{
- start = ELF_PAGEALIGN(start);
- end = ELF_PAGEALIGN(end);
- if (end > start) {
- /*
- * Map the last of the bss segment.
- * If the header is requesting these pages to be
- * executable, honour that (ppc32 needs this).
- */
- int error = vm_brk_flags(start, end - start,
- prot & PROT_EXEC ? VM_EXEC : 0);
- if (error)
- return error;
- }
- current->mm->start_brk = current->mm->brk = end;
- return 0;
+#ifdef CONFIG_ARCH_HAS_ELF_CORE_EFLAGS
+ mm->saved_e_flags = flags;
+#endif
}
-/* We need to explicitly zero any fractional pages
- after the data section (i.e. bss). This would
- contain the junk from the file that should not
- be in memory
+static inline u32 elf_coredump_get_mm_eflags(struct mm_struct *mm, u32 flags)
+{
+#ifdef CONFIG_ARCH_HAS_ELF_CORE_EFLAGS
+ flags = mm->saved_e_flags;
+#endif
+ return flags;
+}
+
+/*
+ * We need to explicitly zero any trailing portion of the page that follows
+ * p_filesz when it ends before the page ends (e.g. bss), otherwise this
+ * memory will contain the junk from the file that should not be present.
*/
-static int padzero(unsigned long elf_bss)
+static int padzero(unsigned long address)
{
unsigned long nbyte;
- nbyte = ELF_PAGEOFFSET(elf_bss);
+ nbyte = ELF_PAGEOFFSET(address);
if (nbyte) {
nbyte = ELF_MIN_ALIGN - nbyte;
- if (clear_user((void __user *) elf_bss, nbyte))
+ if (clear_user((void __user *)address, nbyte))
return -EFAULT;
}
return 0;
@@ -170,8 +162,8 @@ static int padzero(unsigned long elf_bss)
static int
create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
- unsigned long load_addr, unsigned long interp_load_addr,
- unsigned long e_entry)
+ unsigned long interp_load_addr,
+ unsigned long e_entry, unsigned long phdr_addr)
{
struct mm_struct *mm = current->mm;
unsigned long p = bprm->p;
@@ -246,7 +238,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
} while (0)
#ifdef ARCH_DLINFO
- /*
+ /*
* ARCH_DLINFO must come first so PPC can do its special alignment of
* AUXV.
* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
@@ -257,7 +249,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
- NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
+ NEW_AUX_ENT(AT_PHDR, phdr_addr);
NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
NEW_AUX_ENT(AT_BASE, interp_load_addr);
@@ -274,6 +266,12 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
#ifdef ELF_HWCAP2
NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
#endif
+#ifdef ELF_HWCAP3
+ NEW_AUX_ENT(AT_HWCAP3, ELF_HWCAP3);
+#endif
+#ifdef ELF_HWCAP4
+ NEW_AUX_ENT(AT_HWCAP4, ELF_HWCAP4);
+#endif
NEW_AUX_ENT(AT_EXECFN, bprm->exec);
if (k_platform) {
NEW_AUX_ENT(AT_PLATFORM,
@@ -286,6 +284,10 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
if (bprm->have_execfd) {
NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
}
+#ifdef CONFIG_RSEQ
+ NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end));
+ NEW_AUX_ENT(AT_RSEQ_ALIGN, __alignof__(struct rseq));
+#endif
#undef NEW_AUX_ENT
/* AT_NULL is zero; clear the rest too */
memset(elf_info, 0, (char *)mm->saved_auxv +
@@ -313,10 +315,10 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
* Grow the stack manually; some architectures have a limit on how
* far ahead a user-space access may be in order to grow the stack.
*/
- if (mmap_read_lock_killable(mm))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
- vma = find_extend_vma(mm, bprm->p);
- mmap_read_unlock(mm);
+ vma = find_extend_vma_locked(mm, bprm->p);
+ mmap_write_unlock(mm);
if (!vma)
return -EFAULT;
@@ -360,6 +362,11 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
return 0;
}
+/*
+ * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset"
+ * into memory at "addr". (Note that p_filesz is rounded up to the
+ * next page, so any extra bytes from the file must be wiped.)
+ */
static unsigned long elf_map(struct file *filep, unsigned long addr,
const struct elf_phdr *eppnt, int prot, int type,
unsigned long total_size)
@@ -399,22 +406,75 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
return(map_addr);
}
-static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
+/*
+ * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset"
+ * into memory at "addr". Memory from "p_filesz" through "p_memsz"
+ * rounded up to the next page is zeroed.
+ */
+static unsigned long elf_load(struct file *filep, unsigned long addr,
+ const struct elf_phdr *eppnt, int prot, int type,
+ unsigned long total_size)
{
- int i, first_idx = -1, last_idx = -1;
+ unsigned long zero_start, zero_end;
+ unsigned long map_addr;
- for (i = 0; i < nr; i++) {
- if (cmds[i].p_type == PT_LOAD) {
- last_idx = i;
- if (first_idx == -1)
- first_idx = i;
+ if (eppnt->p_filesz) {
+ map_addr = elf_map(filep, addr, eppnt, prot, type, total_size);
+ if (BAD_ADDR(map_addr))
+ return map_addr;
+ if (eppnt->p_memsz > eppnt->p_filesz) {
+ zero_start = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) +
+ eppnt->p_filesz;
+ zero_end = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) +
+ eppnt->p_memsz;
+
+ /*
+ * Zero the end of the last mapped page but ignore
+ * any errors if the segment isn't writable.
+ */
+ if (padzero(zero_start) && (prot & PROT_WRITE))
+ return -EFAULT;
}
+ } else {
+ map_addr = zero_start = ELF_PAGESTART(addr);
+ zero_end = zero_start + ELF_PAGEOFFSET(eppnt->p_vaddr) +
+ eppnt->p_memsz;
}
- if (first_idx == -1)
- return 0;
+ if (eppnt->p_memsz > eppnt->p_filesz) {
+ /*
+ * Map the last of the segment.
+ * If the header is requesting these pages to be
+ * executable, honour that (ppc32 needs this).
+ */
+ int error;
+
+ zero_start = ELF_PAGEALIGN(zero_start);
+ zero_end = ELF_PAGEALIGN(zero_end);
+
+ error = vm_brk_flags(zero_start, zero_end - zero_start,
+ prot & PROT_EXEC ? VM_EXEC : 0);
+ if (error)
+ map_addr = error;
+ }
+ return map_addr;
+}
+
+
+static unsigned long total_mapping_size(const struct elf_phdr *phdr, int nr)
+{
+ elf_addr_t min_addr = -1;
+ elf_addr_t max_addr = 0;
+ bool pt_load = false;
+ int i;
- return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
- ELF_PAGESTART(cmds[first_idx].p_vaddr);
+ for (i = 0; i < nr; i++) {
+ if (phdr[i].p_type == PT_LOAD) {
+ min_addr = min(min_addr, ELF_PAGESTART(phdr[i].p_vaddr));
+ max_addr = max(max_addr, phdr[i].p_vaddr + phdr[i].p_memsz);
+ pt_load = true;
+ }
+ }
+ return pt_load ? (max_addr - min_addr) : 0;
}
static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
@@ -455,13 +515,13 @@ static unsigned long maximum_alignment(struct elf_phdr *cmds, int nr)
*
* Loads ELF program headers from the binary file elf_file, which has the ELF
* header pointed to by elf_ex, into a newly allocated array. The caller is
- * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
+ * responsible for freeing the allocated data. Returns NULL upon failure.
*/
static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
struct file *elf_file)
{
struct elf_phdr *elf_phdata = NULL;
- int retval, err = -1;
+ int retval = -1;
unsigned int size;
/*
@@ -474,7 +534,7 @@ static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
/* Sanity check the number of program headers... */
/* ...and their total size. */
size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
- if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
+ if (size == 0 || size > 65536)
goto out;
elf_phdata = kmalloc(size, GFP_KERNEL);
@@ -483,15 +543,9 @@ static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
/* Read in the program headers */
retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
- if (retval < 0) {
- err = retval;
- goto out;
- }
- /* Success! */
- err = 0;
out:
- if (err) {
+ if (retval) {
kfree(elf_phdata);
elf_phdata = NULL;
}
@@ -596,8 +650,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
struct elf_phdr *eppnt;
unsigned long load_addr = 0;
int load_addr_set = 0;
- unsigned long last_bss = 0, elf_bss = 0;
- int bss_prot = 0;
unsigned long error = ~0UL;
unsigned long total_size;
int i;
@@ -609,7 +661,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
if (!elf_check_arch(interp_elf_ex) ||
elf_check_fdpic(interp_elf_ex))
goto out;
- if (!interpreter->f_op->mmap)
+ if (!can_mmap_file(interpreter))
goto out;
total_size = total_mapping_size(interp_elf_phdata,
@@ -634,7 +686,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
else if (no_base && interp_elf_ex->e_type == ET_DYN)
load_addr = -vaddr;
- map_addr = elf_map(interpreter, load_addr + vaddr,
+ map_addr = elf_load(interpreter, load_addr + vaddr,
eppnt, elf_prot, elf_type, total_size);
total_size = 0;
error = map_addr;
@@ -660,51 +712,9 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
error = -ENOMEM;
goto out;
}
-
- /*
- * Find the end of the file mapping for this phdr, and
- * keep track of the largest address we see for this.
- */
- k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
- if (k > elf_bss)
- elf_bss = k;
-
- /*
- * Do the same thing for the memory mapping - between
- * elf_bss and last_bss is the bss section.
- */
- k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
- if (k > last_bss) {
- last_bss = k;
- bss_prot = elf_prot;
- }
}
}
- /*
- * Now fill out the bss section: first pad the last page from
- * the file up to the page boundary, and zero it from elf_bss
- * up to the end of the page.
- */
- if (padzero(elf_bss)) {
- error = -EFAULT;
- goto out;
- }
- /*
- * Next, align both the file and mem bss up to the page size,
- * since this is where elf_bss was just zeroed up to, and where
- * last_bss will end after the vm_brk_flags() below.
- */
- elf_bss = ELF_PAGEALIGN(elf_bss);
- last_bss = ELF_PAGEALIGN(last_bss);
- /* Finally, if there is still more bss to allocate, do it. */
- if (last_bss > elf_bss) {
- error = vm_brk_flags(elf_bss, last_bss - elf_bss,
- bss_prot & PROT_EXEC ? VM_EXEC : 0);
- if (error)
- goto out;
- }
-
error = load_addr;
out:
return error;
@@ -760,8 +770,7 @@ static int parse_elf_property(const char *data, size_t *off, size_t datasz,
}
#define NOTE_DATA_SZ SZ_1K
-#define GNU_PROPERTY_TYPE_0_NAME "GNU"
-#define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME))
+#define NOTE_NAME_SZ (sizeof(NN_GNU_PROPERTY_TYPE_0))
static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
struct arch_elf_state *arch)
@@ -798,7 +807,7 @@ static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
note.nhdr.n_namesz != NOTE_NAME_SZ ||
strncmp(note.data + sizeof(note.nhdr),
- GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr)))
+ NN_GNU_PROPERTY_TYPE_0, n - sizeof(note.nhdr)))
return -ENOEXEC;
off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ,
@@ -823,13 +832,13 @@ static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
static int load_elf_binary(struct linux_binprm *bprm)
{
struct file *interpreter = NULL; /* to shut gcc up */
- unsigned long load_addr = 0, load_bias = 0;
- int load_addr_set = 0;
+ unsigned long load_bias = 0, phdr_addr = 0;
+ int first_pt_load = 1;
unsigned long error;
struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
struct elf_phdr *elf_property_phdata = NULL;
- unsigned long elf_bss, elf_brk;
- int bss_prot = 0;
+ unsigned long elf_brk;
+ bool brk_moved = false;
int retval, i;
unsigned long elf_entry;
unsigned long e_entry;
@@ -854,7 +863,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
goto out;
if (elf_check_fdpic(elf_ex))
goto out;
- if (!bprm->file->f_op->mmap)
+ if (!can_mmap_file(bprm->file))
goto out;
elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
@@ -910,7 +919,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL);
if (!interp_elf_ex) {
retval = -ENOMEM;
- goto out_free_ph;
+ goto out_free_file;
}
/* Get the exec headers */
@@ -1008,7 +1017,8 @@ out_free_interp:
if (elf_read_implies_exec(*elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space);
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space)
current->flags |= PF_RANDOMIZE;
setup_new_exec(bprm);
@@ -1019,8 +1029,7 @@ out_free_interp:
executable_stack);
if (retval < 0)
goto out_free_dentry;
-
- elf_bss = 0;
+
elf_brk = 0;
start_code = ~0UL;
@@ -1040,33 +1049,6 @@ out_free_interp:
if (elf_ppnt->p_type != PT_LOAD)
continue;
- if (unlikely (elf_brk > elf_bss)) {
- unsigned long nbyte;
-
- /* There was a PT_LOAD segment with p_memsz > p_filesz
- before this one. Map anonymous pages, if needed,
- and clear the area. */
- retval = set_brk(elf_bss + load_bias,
- elf_brk + load_bias,
- bss_prot);
- if (retval)
- goto out_free_dentry;
- nbyte = ELF_PAGEOFFSET(elf_bss);
- if (nbyte) {
- nbyte = ELF_MIN_ALIGN - nbyte;
- if (nbyte > elf_brk - elf_bss)
- nbyte = elf_brk - elf_bss;
- if (clear_user((void __user *)elf_bss +
- load_bias, nbyte)) {
- /*
- * This bss-zeroing can fail if the ELF
- * file specifies odd protections. So
- * we don't check the return value
- */
- }
- }
- }
-
elf_prot = make_prot(elf_ppnt->p_flags, &arch_state,
!!interpreter, false);
@@ -1074,12 +1056,12 @@ out_free_interp:
vaddr = elf_ppnt->p_vaddr;
/*
- * The first time through the loop, load_addr_set is false:
+ * The first time through the loop, first_pt_load is true:
* layout will be calculated. Once set, use MAP_FIXED since
* we know we've already safely mapped the entire region with
* MAP_FIXED_NOREPLACE in the once-per-binary logic following.
*/
- if (load_addr_set) {
+ if (!first_pt_load) {
elf_flags |= MAP_FIXED;
} else if (elf_ex->e_type == ET_EXEC) {
/*
@@ -1094,15 +1076,49 @@ out_free_interp:
* Header for ET_DYN binaries to calculate the
* randomization (load_bias) for all the LOAD
* Program Headers.
+ */
+
+ /*
+ * Calculate the entire size of the ELF mapping
+ * (total_size), used for the initial mapping,
+ * due to load_addr_set which is set to true later
+ * once the initial mapping is performed.
+ *
+ * Note that this is only sensible when the LOAD
+ * segments are contiguous (or overlapping). If
+ * used for LOADs that are far apart, this would
+ * cause the holes between LOADs to be mapped,
+ * running the risk of having the mapping fail,
+ * as it would be larger than the ELF file itself.
+ *
+ * As a result, only ET_DYN does this, since
+ * some ET_EXEC (e.g. ia64) may have large virtual
+ * memory holes between LOADs.
+ *
+ */
+ total_size = total_mapping_size(elf_phdata,
+ elf_ex->e_phnum);
+ if (!total_size) {
+ retval = -EINVAL;
+ goto out_free_dentry;
+ }
+
+ /* Calculate any requested alignment. */
+ alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
+
+ /**
+ * DOC: PIE handling
*
- * There are effectively two types of ET_DYN
- * binaries: programs (i.e. PIE: ET_DYN with INTERP)
- * and loaders (ET_DYN without INTERP, since they
- * _are_ the ELF interpreter). The loaders must
- * be loaded away from programs since the program
- * may otherwise collide with the loader (especially
- * for ET_EXEC which does not have a randomized
- * position). For example to handle invocations of
+ * There are effectively two types of ET_DYN ELF
+ * binaries: programs (i.e. PIE: ET_DYN with
+ * PT_INTERP) and loaders (i.e. static PIE: ET_DYN
+ * without PT_INTERP, usually the ELF interpreter
+ * itself). Loaders must be loaded away from programs
+ * since the program may otherwise collide with the
+ * loader (especially for ET_EXEC which does not have
+ * a randomized position).
+ *
+ * For example, to handle invocations of
* "./ld.so someprog" to test out a new version of
* the loader, the subsequent program that the
* loader loads must avoid the loader itself, so
@@ -1115,17 +1131,49 @@ out_free_interp:
* ELF_ET_DYN_BASE and loaders are loaded into the
* independently randomized mmap region (0 load_bias
* without MAP_FIXED nor MAP_FIXED_NOREPLACE).
+ *
+ * See below for "brk" handling details, which is
+ * also affected by program vs loader and ASLR.
*/
if (interpreter) {
+ /* On ET_DYN with PT_INTERP, we do the ASLR. */
load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
- alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
+ /* Adjust alignment as requested. */
if (alignment)
load_bias &= ~(alignment - 1);
elf_flags |= MAP_FIXED_NOREPLACE;
- } else
- load_bias = 0;
+ } else {
+ /*
+ * For ET_DYN without PT_INTERP, we rely on
+ * the architectures's (potentially ASLR) mmap
+ * base address (via a load_bias of 0).
+ *
+ * When a large alignment is requested, we
+ * must do the allocation at address "0" right
+ * now to discover where things will load so
+ * that we can adjust the resulting alignment.
+ * In this case (load_bias != 0), we can use
+ * MAP_FIXED_NOREPLACE to make sure the mapping
+ * doesn't collide with anything.
+ */
+ if (alignment > ELF_MIN_ALIGN) {
+ load_bias = elf_load(bprm->file, 0, elf_ppnt,
+ elf_prot, elf_flags, total_size);
+ if (BAD_ADDR(load_bias)) {
+ retval = IS_ERR_VALUE(load_bias) ?
+ PTR_ERR((void*)load_bias) : -EINVAL;
+ goto out_free_dentry;
+ }
+ vm_munmap(load_bias, total_size);
+ /* Adjust alignment as requested. */
+ if (alignment)
+ load_bias &= ~(alignment - 1);
+ elf_flags |= MAP_FIXED_NOREPLACE;
+ } else
+ load_bias = 0;
+ }
/*
* Since load_bias is used for all subsequent loading
@@ -1137,38 +1185,33 @@ out_free_interp:
load_bias = ELF_PAGESTART(load_bias - vaddr);
}
- /*
- * Calculate the entire size of the ELF mapping (total_size).
- * (Note that load_addr_set is set to true later once the
- * initial mapping is performed.)
- */
- if (!load_addr_set) {
- total_size = total_mapping_size(elf_phdata,
- elf_ex->e_phnum);
- if (!total_size) {
- retval = -EINVAL;
- goto out_free_dentry;
- }
- }
-
- error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+ error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt,
elf_prot, elf_flags, total_size);
if (BAD_ADDR(error)) {
- retval = IS_ERR((void *)error) ?
+ retval = IS_ERR_VALUE(error) ?
PTR_ERR((void*)error) : -EINVAL;
goto out_free_dentry;
}
- if (!load_addr_set) {
- load_addr_set = 1;
- load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
+ if (first_pt_load) {
+ first_pt_load = 0;
if (elf_ex->e_type == ET_DYN) {
load_bias += error -
ELF_PAGESTART(load_bias + vaddr);
- load_addr += load_bias;
reloc_func_desc = load_bias;
}
}
+
+ /*
+ * Figure out which segment in the file contains the Program
+ * Header table, and map to the associated memory address.
+ */
+ if (elf_ppnt->p_offset <= elf_ex->e_phoff &&
+ elf_ex->e_phoff < elf_ppnt->p_offset + elf_ppnt->p_filesz) {
+ phdr_addr = elf_ex->e_phoff - elf_ppnt->p_offset +
+ elf_ppnt->p_vaddr;
+ }
+
k = elf_ppnt->p_vaddr;
if ((elf_ppnt->p_flags & PF_X) && k < start_code)
start_code = k;
@@ -1190,46 +1233,29 @@ out_free_interp:
k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
- if (k > elf_bss)
- elf_bss = k;
if ((elf_ppnt->p_flags & PF_X) && end_code < k)
end_code = k;
if (end_data < k)
end_data = k;
k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
- if (k > elf_brk) {
- bss_prot = elf_prot;
+ if (k > elf_brk)
elf_brk = k;
- }
}
e_entry = elf_ex->e_entry + load_bias;
- elf_bss += load_bias;
+ phdr_addr += load_bias;
elf_brk += load_bias;
start_code += load_bias;
end_code += load_bias;
start_data += load_bias;
end_data += load_bias;
- /* Calling set_brk effectively mmaps the pages that we need
- * for the bss and break sections. We must do this before
- * mapping in the interpreter, to make sure it doesn't wind
- * up getting placed where the bss needs to go.
- */
- retval = set_brk(elf_bss, elf_brk, bss_prot);
- if (retval)
- goto out_free_dentry;
- if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
- retval = -EFAULT; /* Nobody gets to see this, but.. */
- goto out_free_dentry;
- }
-
if (interpreter) {
elf_entry = load_elf_interp(interp_elf_ex,
interpreter,
load_bias, interp_elf_phdata,
&arch_state);
- if (!IS_ERR((void *)elf_entry)) {
+ if (!IS_ERR_VALUE(elf_entry)) {
/*
* load_elf_interp() returns relocation
* adjustment
@@ -1238,13 +1264,13 @@ out_free_interp:
elf_entry += interp_elf_ex->e_entry;
}
if (BAD_ADDR(elf_entry)) {
- retval = IS_ERR((void *)elf_entry) ?
+ retval = IS_ERR_VALUE(elf_entry) ?
(int)elf_entry : -EINVAL;
goto out_free_dentry;
}
reloc_func_desc = interp_load_addr;
- allow_write_access(interpreter);
+ exe_file_allow_write_access(interpreter);
fput(interpreter);
kfree(interp_elf_ex);
@@ -1267,8 +1293,8 @@ out_free_interp:
goto out;
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
- retval = create_elf_tables(bprm, elf_ex,
- load_addr, interp_load_addr, e_entry);
+ retval = create_elf_tables(bprm, elf_ex, interp_load_addr,
+ e_entry, phdr_addr);
if (retval < 0)
goto out;
@@ -1279,24 +1305,46 @@ out_free_interp:
mm->end_data = end_data;
mm->start_stack = bprm->p;
- if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
+ elf_coredump_set_mm_eflags(mm, elf_ex->e_flags);
+
+ /**
+ * DOC: "brk" handling
+ *
+ * For architectures with ELF randomization, when executing a
+ * loader directly (i.e. static PIE: ET_DYN without PT_INTERP),
+ * move the brk area out of the mmap region and into the unused
+ * ELF_ET_DYN_BASE region. Since "brk" grows up it may collide
+ * early with the stack growing down or other regions being put
+ * into the mmap region by the kernel (e.g. vdso).
+ *
+ * In the CONFIG_COMPAT_BRK case, though, everything is turned
+ * off because we're not allowed to move the brk at all.
+ */
+ if (!IS_ENABLED(CONFIG_COMPAT_BRK) &&
+ IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
+ elf_ex->e_type == ET_DYN && !interpreter) {
+ elf_brk = ELF_ET_DYN_BASE;
+ /* This counts as moving the brk, so let brk(2) know. */
+ brk_moved = true;
+ }
+ mm->start_brk = mm->brk = ELF_PAGEALIGN(elf_brk);
+
+ if ((current->flags & PF_RANDOMIZE) && snapshot_randomize_va_space > 1) {
/*
- * For architectures with ELF randomization, when executing
- * a loader directly (i.e. no interpreter listed in ELF
- * headers), move the brk area out of the mmap region
- * (since it grows up, and may collide early with the stack
- * growing down), and into the unused ELF_ET_DYN_BASE region.
+ * If we didn't move the brk to ELF_ET_DYN_BASE (above),
+ * leave a gap between .bss and brk.
*/
- if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
- elf_ex->e_type == ET_DYN && !interpreter) {
- mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
- }
+ if (!brk_moved)
+ mm->brk = mm->start_brk = mm->brk + PAGE_SIZE;
mm->brk = mm->start_brk = arch_randomize_brk(mm);
+ brk_moved = true;
+ }
+
#ifdef compat_brk_randomized
+ if (brk_moved)
current->brk_randomized = 1;
#endif
- }
if (current->personality & MMAP_PAGE_ZERO) {
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
@@ -1305,6 +1353,11 @@ out_free_interp:
emulate the SVr4 behavior. Sigh. */
error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE, 0);
+
+ retval = do_mseal(0, PAGE_SIZE, 0);
+ if (retval)
+ pr_warn_ratelimited("pid=%d, couldn't seal address 0, ret=%d.\n",
+ task_pid_nr(current), retval);
}
regs = current_pt_regs();
@@ -1332,7 +1385,8 @@ out:
out_free_dentry:
kfree(interp_elf_ex);
kfree(interp_elf_phdata);
- allow_write_access(interpreter);
+out_free_file:
+ exe_file_allow_write_access(interpreter);
if (interpreter)
fput(interpreter);
out_free_ph:
@@ -1340,91 +1394,6 @@ out_free_ph:
goto out;
}
-#ifdef CONFIG_USELIB
-/* This is really simpleminded and specialized - we are loading an
- a.out library that is given an ELF header. */
-static int load_elf_library(struct file *file)
-{
- struct elf_phdr *elf_phdata;
- struct elf_phdr *eppnt;
- unsigned long elf_bss, bss, len;
- int retval, error, i, j;
- struct elfhdr elf_ex;
-
- error = -ENOEXEC;
- retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
- if (retval < 0)
- goto out;
-
- if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
- goto out;
-
- /* First of all, some simple consistency checks */
- if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
- !elf_check_arch(&elf_ex) || !file->f_op->mmap)
- goto out;
- if (elf_check_fdpic(&elf_ex))
- goto out;
-
- /* Now read in all of the header information */
-
- j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
- /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
-
- error = -ENOMEM;
- elf_phdata = kmalloc(j, GFP_KERNEL);
- if (!elf_phdata)
- goto out;
-
- eppnt = elf_phdata;
- error = -ENOEXEC;
- retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
- if (retval < 0)
- goto out_free_ph;
-
- for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
- if ((eppnt + i)->p_type == PT_LOAD)
- j++;
- if (j != 1)
- goto out_free_ph;
-
- while (eppnt->p_type != PT_LOAD)
- eppnt++;
-
- /* Now use mmap to map the library into memory. */
- error = vm_mmap(file,
- ELF_PAGESTART(eppnt->p_vaddr),
- (eppnt->p_filesz +
- ELF_PAGEOFFSET(eppnt->p_vaddr)),
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED_NOREPLACE | MAP_PRIVATE,
- (eppnt->p_offset -
- ELF_PAGEOFFSET(eppnt->p_vaddr)));
- if (error != ELF_PAGESTART(eppnt->p_vaddr))
- goto out_free_ph;
-
- elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
- if (padzero(elf_bss)) {
- error = -EFAULT;
- goto out_free_ph;
- }
-
- len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
- bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
- if (bss > len) {
- error = vm_brk(len, bss - len);
- if (error)
- goto out_free_ph;
- }
- error = 0;
-
-out_free_ph:
- kfree(elf_phdata);
-out:
- return error;
-}
-#endif /* #ifdef CONFIG_USELIB */
-
#ifdef CONFIG_ELF_CORE
/*
* ELF core dumper
@@ -1495,11 +1464,11 @@ static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
phdr->p_filesz = sz;
phdr->p_memsz = 0;
phdr->p_flags = 0;
- phdr->p_align = 0;
+ phdr->p_align = 4;
}
-static void fill_note(struct memelfnote *note, const char *name, int type,
- unsigned int sz, void *data)
+static void __fill_note(struct memelfnote *note, const char *name, int type,
+ unsigned int sz, void *data)
{
note->name = name;
note->type = type;
@@ -1507,6 +1476,9 @@ static void fill_note(struct memelfnote *note, const char *name, int type,
note->data = data;
}
+#define fill_note(note, type, sz, data) \
+ __fill_note(note, NN_ ## type, NT_ ## type, sz, data)
+
/*
* fill up all the fields in prstatus from the given task struct, except
* registers which need to be filled up separately.
@@ -1585,7 +1557,7 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
rcu_read_unlock();
- strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
+ get_task_comm(psinfo->pr_fname, p);
return 0;
}
@@ -1597,17 +1569,16 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
do
i += 2;
while (auxv[i - 2] != AT_NULL);
- fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
+ fill_note(note, AUXV, i * sizeof(elf_addr_t), auxv);
}
static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
const kernel_siginfo_t *siginfo)
{
copy_siginfo_to_external(csigdata, siginfo);
- fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
+ fill_note(note, SIGINFO, sizeof(*csigdata), csigdata);
}
-#define MAX_FILE_NOTE_SIZE (4*1024*1024)
/*
* Format of NT_FILE note:
*
@@ -1619,25 +1590,28 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
* long file_ofs
* followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
*/
-static int fill_files_note(struct memelfnote *note)
+static int fill_files_note(struct memelfnote *note, struct coredump_params *cprm)
{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
unsigned count, size, names_ofs, remaining, n;
user_long_t *data;
user_long_t *start_end_ofs;
char *name_base, *name_curpos;
+ int i;
/* *Estimated* file count and total data size needed */
- count = mm->map_count;
+ count = cprm->vma_count;
if (count > UINT_MAX / 64)
return -EINVAL;
size = count * 64;
names_ofs = (2 + 3 * count) * sizeof(data[0]);
alloc:
- if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
+ /* paranoia check */
+ if (size >= core_file_note_size_limit) {
+ pr_warn_once("coredump Note size too large: %u (does kernel.core_file_note_size_limit sysctl need adjustment?\n",
+ size);
return -EINVAL;
+ }
size = round_up(size, PAGE_SIZE);
/*
* "size" can be 0 here legitimately.
@@ -1651,11 +1625,12 @@ static int fill_files_note(struct memelfnote *note)
name_base = name_curpos = ((char *)data) + names_ofs;
remaining = size - names_ofs;
count = 0;
- for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ for (i = 0; i < cprm->vma_count; i++) {
+ struct core_vma_metadata *m = &cprm->vma_meta[i];
struct file *file;
const char *filename;
- file = vma->vm_file;
+ file = m->file;
if (!file)
continue;
filename = file_path(file, name_curpos, remaining);
@@ -1675,9 +1650,9 @@ static int fill_files_note(struct memelfnote *note)
memmove(name_curpos, filename, n);
name_curpos += n;
- *start_end_ofs++ = vma->vm_start;
- *start_end_ofs++ = vma->vm_end;
- *start_end_ofs++ = vma->vm_pgoff;
+ *start_end_ofs++ = m->start;
+ *start_end_ofs++ = m->end;
+ *start_end_ofs++ = m->pgoff;
count++;
}
@@ -1688,7 +1663,7 @@ static int fill_files_note(struct memelfnote *note)
* Count usually is less than mm->map_count,
* we need to move filenames down.
*/
- n = mm->map_count - count;
+ n = cprm->vma_count - count;
if (n != 0) {
unsigned shift_bytes = n * 3 * sizeof(data[0]);
memmove(name_base - shift_bytes, name_base,
@@ -1697,11 +1672,10 @@ static int fill_files_note(struct memelfnote *note)
}
size = name_curpos - (char *)data;
- fill_note(note, "CORE", NT_FILE, size, data);
+ fill_note(note, FILE, size, data);
return 0;
}
-#ifdef CORE_DUMP_USE_REGSET
#include <linux/regset.h>
struct elf_thread_core_info {
@@ -1722,6 +1696,7 @@ struct elf_note_info {
int thread_notes;
};
+#ifdef CORE_DUMP_USE_REGSET
/*
* When a regset has a writeback hook, we call it on each thread before
* dumping user memory. On register window machines, this makes sure the
@@ -1744,34 +1719,34 @@ static void do_thread_regset_writeback(struct task_struct *task,
static int fill_thread_core_info(struct elf_thread_core_info *t,
const struct user_regset_view *view,
- long signr, size_t *total)
+ long signr, struct elf_note_info *info)
{
- unsigned int i;
+ unsigned int note_iter, view_iter;
/*
* NT_PRSTATUS is the one special case, because the regset data
* goes into the pr_reg field inside the note contents, rather
- * than being the whole note contents. We fill the reset in here.
+ * than being the whole note contents. We fill the regset in here.
* We assume that regset 0 is NT_PRSTATUS.
*/
fill_prstatus(&t->prstatus.common, t->task, signr);
regset_get(t->task, &view->regsets[0],
sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg);
- fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
- PRSTATUS_SIZE, &t->prstatus);
- *total += notesize(&t->notes[0]);
+ fill_note(&t->notes[0], PRSTATUS, PRSTATUS_SIZE, &t->prstatus);
+ info->size += notesize(&t->notes[0]);
do_thread_regset_writeback(t->task, &view->regsets[0]);
/*
* Each other regset might generate a note too. For each regset
- * that has no core_note_type or is inactive, we leave t->notes[i]
- * all zero and we'll know to skip writing it later.
+ * that has no core_note_type or is inactive, skip it.
*/
- for (i = 1; i < view->n; ++i) {
- const struct user_regset *regset = &view->regsets[i];
+ note_iter = 1;
+ for (view_iter = 1; view_iter < view->n; ++view_iter) {
+ const struct user_regset *regset = &view->regsets[view_iter];
int note_type = regset->core_note_type;
+ const char *note_name = regset->core_note_name;
bool is_fpreg = note_type == NT_PRFPREG;
void *data;
int ret;
@@ -1786,45 +1761,82 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
if (ret < 0)
continue;
+ if (WARN_ON_ONCE(note_iter >= info->thread_notes))
+ break;
+
if (is_fpreg)
SET_PR_FPVALID(&t->prstatus);
- fill_note(&t->notes[i], is_fpreg ? "CORE" : "LINUX",
- note_type, ret, data);
+ /* There should be a note name, but if not, guess: */
+ if (WARN_ON_ONCE(!note_name))
+ note_name = "LINUX";
+ else
+ /* Warn on non-legacy-compatible names, for now. */
+ WARN_ON_ONCE(strcmp(note_name,
+ is_fpreg ? "CORE" : "LINUX"));
+
+ __fill_note(&t->notes[note_iter], note_name, note_type,
+ ret, data);
- *total += notesize(&t->notes[i]);
+ info->size += notesize(&t->notes[note_iter]);
+ note_iter++;
}
return 1;
}
+#else
+static int fill_thread_core_info(struct elf_thread_core_info *t,
+ const struct user_regset_view *view,
+ long signr, struct elf_note_info *info)
+{
+ struct task_struct *p = t->task;
+ elf_fpregset_t *fpu;
+
+ fill_prstatus(&t->prstatus.common, p, signr);
+ elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
+
+ fill_note(&t->notes[0], PRSTATUS, sizeof(t->prstatus), &t->prstatus);
+ info->size += notesize(&t->notes[0]);
+
+ fpu = kzalloc(sizeof(elf_fpregset_t), GFP_KERNEL);
+ if (!fpu || !elf_core_copy_task_fpregs(p, fpu)) {
+ kfree(fpu);
+ return 1;
+ }
+
+ t->prstatus.pr_fpvalid = 1;
+ fill_note(&t->notes[1], PRFPREG, sizeof(*fpu), fpu);
+ info->size += notesize(&t->notes[1]);
+
+ return 1;
+}
+#endif
static int fill_note_info(struct elfhdr *elf, int phdrs,
struct elf_note_info *info,
- const kernel_siginfo_t *siginfo, struct pt_regs *regs)
+ struct coredump_params *cprm)
{
struct task_struct *dump_task = current;
- const struct user_regset_view *view = task_user_regset_view(dump_task);
+ const struct user_regset_view *view;
struct elf_thread_core_info *t;
struct elf_prpsinfo *psinfo;
struct core_thread *ct;
- unsigned int i;
-
- info->size = 0;
- info->thread = NULL;
+ u16 machine;
+ u32 flags;
psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
- if (psinfo == NULL) {
- info->psinfo.data = NULL; /* So we don't free this wrongly */
+ if (!psinfo)
return 0;
- }
+ fill_note(&info->psinfo, PRPSINFO, sizeof(*psinfo), psinfo);
- fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+#ifdef CORE_DUMP_USE_REGSET
+ view = task_user_regset_view(dump_task);
/*
* Figure out how many notes we're going to need for each thread.
*/
info->thread_notes = 0;
- for (i = 0; i < view->n; ++i)
+ for (int i = 0; i < view->n; ++i)
if (view->regsets[i].core_note_type != 0)
++info->thread_notes;
@@ -1838,41 +1850,51 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
return 0;
}
+ machine = view->e_machine;
+ flags = view->e_flags;
+#else
+ view = NULL;
+ info->thread_notes = 2;
+ machine = ELF_ARCH;
+ flags = ELF_CORE_EFLAGS;
+#endif
+
+ /*
+ * Override ELF e_flags with value taken from process,
+ * if arch needs that.
+ */
+ flags = elf_coredump_get_mm_eflags(dump_task->mm, flags);
+
/*
* Initialize the ELF file header.
*/
- fill_elf_header(elf, phdrs,
- view->e_machine, view->e_flags);
+ fill_elf_header(elf, phdrs, machine, flags);
/*
* Allocate a structure for each thread.
*/
- for (ct = &dump_task->signal->core_state->dumper; ct; ct = ct->next) {
- t = kzalloc(offsetof(struct elf_thread_core_info,
- notes[info->thread_notes]),
+ info->thread = kzalloc(struct_size(info->thread, notes, info->thread_notes),
+ GFP_KERNEL);
+ if (unlikely(!info->thread))
+ return 0;
+
+ info->thread->task = dump_task;
+ for (ct = dump_task->signal->core_state->dumper.next; ct; ct = ct->next) {
+ t = kzalloc(struct_size(t, notes, info->thread_notes),
GFP_KERNEL);
if (unlikely(!t))
return 0;
t->task = ct->task;
- if (ct->task == dump_task || !info->thread) {
- t->next = info->thread;
- info->thread = t;
- } else {
- /*
- * Make sure to keep the original task at
- * the head of the list.
- */
- t->next = info->thread->next;
- info->thread->next = t;
- }
+ t->next = info->thread->next;
+ info->thread->next = t;
}
/*
* Now fill in each thread's information.
*/
for (t = info->thread; t != NULL; t = t->next)
- if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
+ if (!fill_thread_core_info(t, view, cprm->siginfo->si_signo, info))
return 0;
/*
@@ -1881,23 +1903,18 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
info->size += notesize(&info->psinfo);
- fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
+ fill_siginfo_note(&info->signote, &info->csigdata, cprm->siginfo);
info->size += notesize(&info->signote);
fill_auxv_note(&info->auxv, current->mm);
info->size += notesize(&info->auxv);
- if (fill_files_note(&info->files) == 0)
+ if (fill_files_note(&info->files, cprm) == 0)
info->size += notesize(&info->files);
return 1;
}
-static size_t get_note_info_size(struct elf_note_info *info)
-{
- return info->size;
-}
-
/*
* Write all the notes for each thread. When writing the first thread, the
* process-wide notes are interleaved after the first thread-specific note.
@@ -1945,204 +1962,13 @@ static void free_note_info(struct elf_note_info *info)
threads = t->next;
WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
for (i = 1; i < info->thread_notes; ++i)
- kfree(t->notes[i].data);
+ kvfree(t->notes[i].data);
kfree(t);
}
kfree(info->psinfo.data);
kvfree(info->files.data);
}
-#else
-
-/* Here is the structure in which status of each thread is captured. */
-struct elf_thread_status
-{
- struct list_head list;
- struct elf_prstatus prstatus; /* NT_PRSTATUS */
- elf_fpregset_t fpu; /* NT_PRFPREG */
- struct task_struct *thread;
- struct memelfnote notes[3];
- int num_notes;
-};
-
-/*
- * In order to add the specific thread information for the elf file format,
- * we need to keep a linked list of every threads pr_status and then create
- * a single section for them in the final core file.
- */
-static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
-{
- int sz = 0;
- struct task_struct *p = t->thread;
- t->num_notes = 0;
-
- fill_prstatus(&t->prstatus.common, p, signr);
- elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
-
- fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
- &(t->prstatus));
- t->num_notes++;
- sz += notesize(&t->notes[0]);
-
- if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
- &t->fpu))) {
- fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
- &(t->fpu));
- t->num_notes++;
- sz += notesize(&t->notes[1]);
- }
- return sz;
-}
-
-struct elf_note_info {
- struct memelfnote *notes;
- struct memelfnote *notes_files;
- struct elf_prstatus *prstatus; /* NT_PRSTATUS */
- struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
- struct list_head thread_list;
- elf_fpregset_t *fpu;
- user_siginfo_t csigdata;
- int thread_status_size;
- int numnote;
-};
-
-static int elf_note_info_init(struct elf_note_info *info)
-{
- memset(info, 0, sizeof(*info));
- INIT_LIST_HEAD(&info->thread_list);
-
- /* Allocate space for ELF notes */
- info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL);
- if (!info->notes)
- return 0;
- info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
- if (!info->psinfo)
- return 0;
- info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
- if (!info->prstatus)
- return 0;
- info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
- if (!info->fpu)
- return 0;
- return 1;
-}
-
-static int fill_note_info(struct elfhdr *elf, int phdrs,
- struct elf_note_info *info,
- const kernel_siginfo_t *siginfo, struct pt_regs *regs)
-{
- struct core_thread *ct;
- struct elf_thread_status *ets;
-
- if (!elf_note_info_init(info))
- return 0;
-
- for (ct = current->signal->core_state->dumper.next;
- ct; ct = ct->next) {
- ets = kzalloc(sizeof(*ets), GFP_KERNEL);
- if (!ets)
- return 0;
-
- ets->thread = ct->task;
- list_add(&ets->list, &info->thread_list);
- }
-
- list_for_each_entry(ets, &info->thread_list, list) {
- int sz;
-
- sz = elf_dump_thread_status(siginfo->si_signo, ets);
- info->thread_status_size += sz;
- }
- /* now collect the dump for the current */
- memset(info->prstatus, 0, sizeof(*info->prstatus));
- fill_prstatus(&info->prstatus->common, current, siginfo->si_signo);
- elf_core_copy_regs(&info->prstatus->pr_reg, regs);
-
- /* Set up header */
- fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
-
- /*
- * Set up the notes in similar form to SVR4 core dumps made
- * with info from their /proc.
- */
-
- fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
- sizeof(*info->prstatus), info->prstatus);
- fill_psinfo(info->psinfo, current->group_leader, current->mm);
- fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
- sizeof(*info->psinfo), info->psinfo);
-
- fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
- fill_auxv_note(info->notes + 3, current->mm);
- info->numnote = 4;
-
- if (fill_files_note(info->notes + info->numnote) == 0) {
- info->notes_files = info->notes + info->numnote;
- info->numnote++;
- }
-
- /* Try to dump the FPU. */
- info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
- info->fpu);
- if (info->prstatus->pr_fpvalid)
- fill_note(info->notes + info->numnote++,
- "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
- return 1;
-}
-
-static size_t get_note_info_size(struct elf_note_info *info)
-{
- int sz = 0;
- int i;
-
- for (i = 0; i < info->numnote; i++)
- sz += notesize(info->notes + i);
-
- sz += info->thread_status_size;
-
- return sz;
-}
-
-static int write_note_info(struct elf_note_info *info,
- struct coredump_params *cprm)
-{
- struct elf_thread_status *ets;
- int i;
-
- for (i = 0; i < info->numnote; i++)
- if (!writenote(info->notes + i, cprm))
- return 0;
-
- /* write out the thread status notes section */
- list_for_each_entry(ets, &info->thread_list, list) {
- for (i = 0; i < ets->num_notes; i++)
- if (!writenote(&ets->notes[i], cprm))
- return 0;
- }
-
- return 1;
-}
-
-static void free_note_info(struct elf_note_info *info)
-{
- while (!list_empty(&info->thread_list)) {
- struct list_head *tmp = info->thread_list.next;
- list_del(tmp);
- kfree(list_entry(tmp, struct elf_thread_status, list));
- }
-
- /* Free data possibly allocated by fill_files_note(): */
- if (info->notes_files)
- kvfree(info->notes_files->data);
-
- kfree(info->prstatus);
- kfree(info->psinfo);
- kfree(info->notes);
- kfree(info->fpu);
-}
-
-#endif
-
static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
elf_addr_t e_shoff, int segs)
{
@@ -2169,8 +1995,7 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
static int elf_core_dump(struct coredump_params *cprm)
{
int has_dumped = 0;
- int vma_count, segs, i;
- size_t vma_data_size;
+ int segs, i;
struct elfhdr elf;
loff_t offset = 0, dataoff;
struct elf_note_info info = { };
@@ -2178,16 +2003,12 @@ static int elf_core_dump(struct coredump_params *cprm)
struct elf_shdr *shdr4extnum = NULL;
Elf_Half e_phnum;
elf_addr_t e_shoff;
- struct core_vma_metadata *vma_meta;
-
- if (dump_vma_snapshot(cprm, &vma_count, &vma_meta, &vma_data_size))
- return 0;
/*
* The number of segs are recored into ELF header as 16bit value.
* Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
*/
- segs = vma_count + elf_core_extra_phdrs();
+ segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
/* for notes section */
segs++;
@@ -2201,19 +2022,19 @@ static int elf_core_dump(struct coredump_params *cprm)
* Collect all the non-memory information about the process for the
* notes. This also sets up the file header.
*/
- if (!fill_note_info(&elf, e_phnum, &info, cprm->siginfo, cprm->regs))
+ if (!fill_note_info(&elf, e_phnum, &info, cprm))
goto end_coredump;
has_dumped = 1;
- offset += sizeof(elf); /* Elf header */
+ offset += sizeof(elf); /* ELF header */
offset += segs * sizeof(struct elf_phdr); /* Program headers */
/* Write notes phdr entry */
{
- size_t sz = get_note_info_size(&info);
+ size_t sz = info.size;
- /* For cell spufs */
+ /* For cell spufs and x86 xstate */
sz += elf_coredump_extra_notes_size();
phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
@@ -2226,8 +2047,8 @@ static int elf_core_dump(struct coredump_params *cprm)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
- offset += vma_data_size;
- offset += elf_core_extra_data_size();
+ offset += cprm->vma_data_size;
+ offset += elf_core_extra_data_size(cprm);
e_shoff = offset;
if (e_phnum == PN_XNUM) {
@@ -2246,8 +2067,8 @@ static int elf_core_dump(struct coredump_params *cprm)
goto end_coredump;
/* Write program headers for segments dump */
- for (i = 0; i < vma_count; i++) {
- struct core_vma_metadata *meta = vma_meta + i;
+ for (i = 0; i < cprm->vma_count; i++) {
+ struct core_vma_metadata *meta = cprm->vma_meta + i;
struct elf_phdr phdr;
phdr.p_type = PT_LOAD;
@@ -2273,19 +2094,19 @@ static int elf_core_dump(struct coredump_params *cprm)
if (!elf_core_write_extra_phdrs(cprm, offset))
goto end_coredump;
- /* write out the notes section */
+ /* write out the notes section */
if (!write_note_info(&info, cprm))
goto end_coredump;
- /* For cell spufs */
+ /* For cell spufs and x86 xstate */
if (elf_coredump_extra_notes_write(cprm))
goto end_coredump;
/* Align to page */
dump_skip_to(cprm, dataoff);
- for (i = 0; i < vma_count; i++) {
- struct core_vma_metadata *meta = vma_meta + i;
+ for (i = 0; i < cprm->vma_count; i++) {
+ struct core_vma_metadata *meta = cprm->vma_meta + i;
if (!dump_user_range(cprm, meta->start, meta->dump_size))
goto end_coredump;
@@ -2302,7 +2123,6 @@ static int elf_core_dump(struct coredump_params *cprm)
end_coredump:
free_note_info(&info);
kfree(shdr4extnum);
- kvfree(vma_meta);
kfree(phdr4note);
return has_dumped;
}
@@ -2323,4 +2143,7 @@ static void __exit exit_elf_binfmt(void)
core_initcall(init_elf_binfmt);
module_exit(exit_elf_binfmt);
-MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_BINFMT_ELF_KUNIT_TEST
+#include "tests/binfmt_elf_kunit.c"
+#endif
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index c6f588dc4a9d..48fd2de3bca0 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -83,8 +83,8 @@ static struct linux_binfmt elf_fdpic_format = {
.load_binary = load_elf_fdpic_binary,
#ifdef CONFIG_ELF_CORE
.core_dump = elf_fdpic_core_dump,
-#endif
.min_coredump = ELF_EXEC_PAGESIZE,
+#endif
};
static int __init init_elf_fdpic_binfmt(void)
@@ -109,7 +109,7 @@ static int is_elf(struct elfhdr *hdr, struct file *file)
return 0;
if (!elf_check_arch(hdr))
return 0;
- if (!file->f_op->mmap)
+ if (!can_mmap_file(file))
return 0;
return 1;
}
@@ -138,7 +138,7 @@ static int is_constdisp(struct elfhdr *hdr)
static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *params,
struct file *file)
{
- struct elf32_phdr *phdr;
+ struct elf_phdr *phdr;
unsigned long size;
int retval, loop;
loff_t pos = params->hdr.e_phoff;
@@ -320,7 +320,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
else
executable_stack = EXSTACK_DEFAULT;
- if (stack_size == 0) {
+ if (stack_size == 0 && interp_params.flags & ELF_FDPIC_FLAG_PRESENT) {
stack_size = interp_params.stack_size;
if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
executable_stack = EXSTACK_ENABLE_X;
@@ -345,10 +345,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
/* there's now no turning back... the old userspace image is dead,
* defunct, deceased, etc.
*/
+ SET_PERSONALITY(exec_params.hdr);
if (elf_check_fdpic(&exec_params.hdr))
- set_personality(PER_LINUX_FDPIC);
- else
- set_personality(PER_LINUX);
+ current->personality |= PER_LINUX_FDPIC;
if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
@@ -395,7 +394,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
goto error;
}
- allow_write_access(interpreter);
+ exe_file_allow_write_access(interpreter);
fput(interpreter);
interpreter = NULL;
}
@@ -434,8 +433,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
current->mm->start_stack = current->mm->start_brk + stack_size;
#endif
- if (create_elf_fdpic_tables(bprm, current->mm,
- &exec_params, &interp_params) < 0)
+ retval = create_elf_fdpic_tables(bprm, current->mm, &exec_params,
+ &interp_params);
+ if (retval < 0)
goto error;
kdebug("- start_code %lx", current->mm->start_code);
@@ -467,7 +467,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
error:
if (interpreter) {
- allow_write_access(interpreter);
+ exe_file_allow_write_access(interpreter);
fput(interpreter);
}
kfree(interpreter_name);
@@ -505,8 +505,9 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
char *k_platform, *k_base_platform;
char __user *u_platform, *u_base_platform, *p;
int loop;
- int nr; /* reset for each csp adjustment */
unsigned long flags = 0;
+ int ei_index;
+ elf_addr_t *elf_info;
#ifdef CONFIG_MMU
/* In some cases (e.g. Hyper-Threading), we want to avoid L1 evictions
@@ -559,8 +560,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
sp &= ~7UL;
/* stack the load map(s) */
- len = sizeof(struct elf32_fdpic_loadmap);
- len += sizeof(struct elf32_fdpic_loadseg) * exec_params->loadmap->nsegs;
+ len = sizeof(struct elf_fdpic_loadmap);
+ len += sizeof(struct elf_fdpic_loadseg) * exec_params->loadmap->nsegs;
sp = (sp - len) & ~7UL;
exec_params->map_addr = sp;
@@ -570,8 +571,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
current->mm->context.exec_fdpic_loadmap = (unsigned long) sp;
if (interp_params->loadmap) {
- len = sizeof(struct elf32_fdpic_loadmap);
- len += sizeof(struct elf32_fdpic_loadseg) *
+ len = sizeof(struct elf_fdpic_loadmap);
+ len += sizeof(struct elf_fdpic_loadseg) *
interp_params->loadmap->nsegs;
sp = (sp - len) & ~7UL;
interp_params->map_addr = sp;
@@ -591,6 +592,9 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
if (bprm->have_execfd)
nitems++;
+#ifdef ELF_HWCAP2
+ nitems++;
+#endif
csp = sp;
sp -= nitems * 2 * sizeof(unsigned long);
@@ -601,48 +605,34 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
csp -= sp & 15UL;
sp -= sp & 15UL;
- /* put the ELF interpreter info on the stack */
-#define NEW_AUX_ENT(id, val) \
- do { \
- struct { unsigned long _id, _val; } __user *ent, v; \
- \
- ent = (void __user *) csp; \
- v._id = (id); \
- v._val = (val); \
- if (copy_to_user(ent + nr, &v, sizeof(v))) \
- return -EFAULT; \
- nr++; \
+ /* Create the ELF interpreter info */
+ elf_info = (elf_addr_t *)mm->saved_auxv;
+ /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
+#define NEW_AUX_ENT(id, val) \
+ do { \
+ *elf_info++ = id; \
+ *elf_info++ = val; \
} while (0)
- nr = 0;
- csp -= 2 * sizeof(unsigned long);
- NEW_AUX_ENT(AT_NULL, 0);
- if (k_platform) {
- nr = 0;
- csp -= 2 * sizeof(unsigned long);
- NEW_AUX_ENT(AT_PLATFORM,
- (elf_addr_t) (unsigned long) u_platform);
- }
-
- if (k_base_platform) {
- nr = 0;
- csp -= 2 * sizeof(unsigned long);
- NEW_AUX_ENT(AT_BASE_PLATFORM,
- (elf_addr_t) (unsigned long) u_base_platform);
- }
-
- if (bprm->have_execfd) {
- nr = 0;
- csp -= 2 * sizeof(unsigned long);
- NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
- }
-
- nr = 0;
- csp -= DLINFO_ITEMS * 2 * sizeof(unsigned long);
+#ifdef ARCH_DLINFO
+ /*
+ * ARCH_DLINFO must come first so PPC can do its special alignment of
+ * AUXV.
+ * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
+ * ARCH_DLINFO changes
+ */
+ ARCH_DLINFO;
+#endif
NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
#ifdef ELF_HWCAP2
NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
#endif
+#ifdef ELF_HWCAP3
+ NEW_AUX_ENT(AT_HWCAP3, ELF_HWCAP3);
+#endif
+#ifdef ELF_HWCAP4
+ NEW_AUX_ENT(AT_HWCAP4, ELF_HWCAP4);
+#endif
NEW_AUX_ENT(AT_PAGESZ, PAGE_SIZE);
NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
NEW_AUX_ENT(AT_PHDR, exec_params->ph_addr);
@@ -659,17 +649,29 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
NEW_AUX_ENT(AT_EGID, (elf_addr_t) from_kgid_munged(cred->user_ns, cred->egid));
NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
NEW_AUX_ENT(AT_EXECFN, bprm->exec);
+ if (k_platform)
+ NEW_AUX_ENT(AT_PLATFORM,
+ (elf_addr_t)(unsigned long)u_platform);
+ if (k_base_platform)
+ NEW_AUX_ENT(AT_BASE_PLATFORM,
+ (elf_addr_t)(unsigned long)u_base_platform);
+ if (bprm->have_execfd)
+ NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
+#undef NEW_AUX_ENT
+ /* AT_NULL is zero; clear the rest too */
+ memset(elf_info, 0, (char *)mm->saved_auxv +
+ sizeof(mm->saved_auxv) - (char *)elf_info);
-#ifdef ARCH_DLINFO
- nr = 0;
- csp -= AT_VECTOR_SIZE_ARCH * 2 * sizeof(unsigned long);
+ /* And advance past the AT_NULL entry. */
+ elf_info += 2;
- /* ARCH_DLINFO must come last so platform specific code can enforce
- * special alignment requirements on the AUXV if necessary (eg. PPC).
- */
- ARCH_DLINFO;
-#endif
-#undef NEW_AUX_ENT
+ ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
+ csp -= ei_index * sizeof(elf_addr_t);
+
+ /* Put the elf_info on the stack in the right place. */
+ if (copy_to_user((void __user *)csp, mm->saved_auxv,
+ ei_index * sizeof(elf_addr_t)))
+ return -EFAULT;
/* allocate room for argv[] and envv[] */
csp -= (bprm->envc + 1) * sizeof(elf_caddr_t);
@@ -739,15 +741,15 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
struct mm_struct *mm,
const char *what)
{
- struct elf32_fdpic_loadmap *loadmap;
+ struct elf_fdpic_loadmap *loadmap;
#ifdef CONFIG_MMU
- struct elf32_fdpic_loadseg *mseg;
+ struct elf_fdpic_loadseg *mseg;
+ unsigned long load_addr;
#endif
- struct elf32_fdpic_loadseg *seg;
- struct elf32_phdr *phdr;
- unsigned long load_addr, stop;
+ struct elf_fdpic_loadseg *seg;
+ struct elf_phdr *phdr;
unsigned nloads, tmp;
- size_t size;
+ unsigned long stop;
int loop, ret;
/* allocate a load map table */
@@ -759,19 +761,15 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
if (nloads == 0)
return -ELIBBAD;
- size = sizeof(*loadmap) + nloads * sizeof(*seg);
- loadmap = kzalloc(size, GFP_KERNEL);
+ loadmap = kzalloc(struct_size(loadmap, segs, nloads), GFP_KERNEL);
if (!loadmap)
return -ENOMEM;
params->loadmap = loadmap;
- loadmap->version = ELF32_FDPIC_LOADMAP_VERSION;
+ loadmap->version = ELF_FDPIC_LOADMAP_VERSION;
loadmap->nsegs = nloads;
- load_addr = params->load_addr;
- seg = loadmap->segs;
-
/* map the requested LOADs into the memory space */
switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) {
case ELF_FDPIC_FLAG_CONSTDISP:
@@ -842,8 +840,8 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
if (phdr->p_vaddr >= seg->p_vaddr &&
phdr->p_vaddr + phdr->p_memsz <=
seg->p_vaddr + seg->p_memsz) {
- Elf32_Dyn __user *dyn;
- Elf32_Sword d_tag;
+ Elf_Dyn __user *dyn;
+ Elf_Sword d_tag;
params->dynamic_addr =
(phdr->p_vaddr - seg->p_vaddr) +
@@ -853,11 +851,11 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
* one item, and that the last item is a NULL
* entry */
if (phdr->p_memsz == 0 ||
- phdr->p_memsz % sizeof(Elf32_Dyn) != 0)
+ phdr->p_memsz % sizeof(Elf_Dyn) != 0)
goto dynamic_error;
- tmp = phdr->p_memsz / sizeof(Elf32_Dyn);
- dyn = (Elf32_Dyn __user *)params->dynamic_addr;
+ tmp = phdr->p_memsz / sizeof(Elf_Dyn);
+ dyn = (Elf_Dyn __user *)params->dynamic_addr;
if (get_user(d_tag, &dyn[tmp - 1].d_tag) ||
d_tag != 0)
goto dynamic_error;
@@ -903,10 +901,12 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
kdebug("- DYNAMIC[]: %lx", params->dynamic_addr);
seg = loadmap->segs;
for (loop = 0; loop < loadmap->nsegs; loop++, seg++)
- kdebug("- LOAD[%d] : %08x-%08x [va=%x ms=%x]",
+ kdebug("- LOAD[%d] : %08llx-%08llx [va=%llx ms=%llx]",
loop,
- seg->addr, seg->addr + seg->p_memsz - 1,
- seg->p_vaddr, seg->p_memsz);
+ (unsigned long long) seg->addr,
+ (unsigned long long) seg->addr + seg->p_memsz - 1,
+ (unsigned long long) seg->p_vaddr,
+ (unsigned long long) seg->p_memsz);
return 0;
@@ -926,8 +926,8 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(
struct file *file,
struct mm_struct *mm)
{
- struct elf32_fdpic_loadseg *seg;
- struct elf32_phdr *phdr;
+ struct elf_fdpic_loadseg *seg;
+ struct elf_phdr *phdr;
unsigned long load_addr, base = ULONG_MAX, top = 0, maddr = 0;
int loop, ret;
@@ -1010,8 +1010,8 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
struct file *file,
struct mm_struct *mm)
{
- struct elf32_fdpic_loadseg *seg;
- struct elf32_phdr *phdr;
+ struct elf_fdpic_loadseg *seg;
+ struct elf_phdr *phdr;
unsigned long load_addr, delta_vaddr;
int loop, dvset;
@@ -1024,7 +1024,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
/* deal with each load segment separately */
phdr = params->phdrs;
for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
- unsigned long maddr, disp, excess, excess1;
+ unsigned long maddr, disp, excess;
int prot = 0, flags;
if (phdr->p_type != PT_LOAD)
@@ -1085,9 +1085,10 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
maddr = vm_mmap(file, maddr, phdr->p_memsz + disp, prot, flags,
phdr->p_offset - disp);
- kdebug("mmap[%d] <file> sz=%lx pr=%x fl=%x of=%lx --> %08lx",
- loop, phdr->p_memsz + disp, prot, flags,
- phdr->p_offset - disp, maddr);
+ kdebug("mmap[%d] <file> sz=%llx pr=%x fl=%x of=%llx --> %08lx",
+ loop, (unsigned long long) phdr->p_memsz + disp,
+ prot, flags, (unsigned long long) phdr->p_offset - disp,
+ maddr);
if (IS_ERR_VALUE(maddr))
return (int) maddr;
@@ -1119,9 +1120,10 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
* extant in the file
*/
excess = phdr->p_memsz - phdr->p_filesz;
- excess1 = PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK);
#ifdef CONFIG_MMU
+ unsigned long excess1
+ = PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK);
if (excess > excess1) {
unsigned long xaddr = maddr + phdr->p_filesz + excess1;
unsigned long xmaddr;
@@ -1149,8 +1151,9 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
#else
if (excess > 0) {
- kdebug("clear[%d] ad=%lx sz=%lx",
- loop, maddr + phdr->p_filesz, excess);
+ kdebug("clear[%d] ad=%llx sz=%lx", loop,
+ (unsigned long long) maddr + phdr->p_filesz,
+ excess);
if (clear_user((void *) maddr + phdr->p_filesz, excess))
return -EFAULT;
}
@@ -1268,12 +1271,12 @@ static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offs
phdr->p_filesz = sz;
phdr->p_memsz = 0;
phdr->p_flags = 0;
- phdr->p_align = 0;
+ phdr->p_align = 4;
return;
}
-static inline void fill_note(struct memelfnote *note, const char *name, int type,
- unsigned int sz, void *data)
+static inline void __fill_note(struct memelfnote *note, const char *name, int type,
+ unsigned int sz, void *data)
{
note->name = name;
note->type = type;
@@ -1282,6 +1285,9 @@ static inline void fill_note(struct memelfnote *note, const char *name, int type
return;
}
+#define fill_note(note, type, sz, data) \
+ __fill_note(note, NN_ ## type, NT_ ## type, sz, data)
+
/*
* fill up all the fields in prstatus from the given task struct, except
* registers which need to be filled up separately.
@@ -1359,7 +1365,7 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
rcu_read_unlock();
- strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
+ get_task_comm(psinfo->pr_fname, p);
return 0;
}
@@ -1395,8 +1401,7 @@ static struct elf_thread_status *elf_dump_thread_status(long signr, struct task_
regset_get(p, &view->regsets[0],
sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg);
- fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
- &t->prstatus);
+ fill_note(&t->notes[0], PRSTATUS, sizeof(t->prstatus), &t->prstatus);
t->num_notes++;
*sz += notesize(&t->notes[0]);
@@ -1413,8 +1418,7 @@ static struct elf_thread_status *elf_dump_thread_status(long signr, struct task_
}
if (t->prstatus.pr_fpvalid) {
- fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
- &t->fpu);
+ fill_note(&t->notes[1], PRFPREG, sizeof(t->fpu), &t->fpu);
t->num_notes++;
*sz += notesize(&t->notes[1]);
}
@@ -1465,7 +1469,7 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm,
static int elf_fdpic_core_dump(struct coredump_params *cprm)
{
int has_dumped = 0;
- int vma_count, segs;
+ int segs;
int i;
struct elfhdr *elf = NULL;
loff_t offset = 0, dataoff;
@@ -1480,8 +1484,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
elf_addr_t e_shoff;
struct core_thread *ct;
struct elf_thread_status *tmp;
- struct core_vma_metadata *vma_meta = NULL;
- size_t vma_data_size;
/* alloc memory for large data structures: too large to be on stack */
elf = kmalloc(sizeof(*elf), GFP_KERNEL);
@@ -1491,9 +1493,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
if (!psinfo)
goto end_coredump;
- if (dump_vma_snapshot(cprm, &vma_count, &vma_meta, &vma_data_size))
- goto end_coredump;
-
for (ct = current->signal->core_state->dumper.next;
ct; ct = ct->next) {
tmp = elf_dump_thread_status(cprm->siginfo->si_signo,
@@ -1513,7 +1512,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
tmp->next = thread_list;
thread_list = tmp;
- segs = vma_count + elf_core_extra_phdrs();
+ segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
/* for notes section */
segs++;
@@ -1533,7 +1532,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
*/
fill_psinfo(psinfo, current->group_leader, current->mm);
- fill_note(&psinfo_note, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+ fill_note(&psinfo_note, PRPSINFO, sizeof(*psinfo), psinfo);
thread_status_size += notesize(&psinfo_note);
auxv = (elf_addr_t *) current->mm->saved_auxv;
@@ -1541,10 +1540,10 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
do
i += 2;
while (auxv[i - 2] != AT_NULL);
- fill_note(&auxv_note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
+ fill_note(&auxv_note, AUXV, i * sizeof(elf_addr_t), auxv);
thread_status_size += notesize(&auxv_note);
- offset = sizeof(*elf); /* Elf header */
+ offset = sizeof(*elf); /* ELF header */
offset += segs * sizeof(struct elf_phdr); /* Program headers */
/* Write notes phdr entry */
@@ -1558,8 +1557,8 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
/* Page-align dumped data */
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
- offset += vma_data_size;
- offset += elf_core_extra_data_size();
+ offset += cprm->vma_data_size;
+ offset += elf_core_extra_data_size(cprm);
e_shoff = offset;
if (e_phnum == PN_XNUM) {
@@ -1578,8 +1577,8 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
goto end_coredump;
/* write program headers for segments dump */
- for (i = 0; i < vma_count; i++) {
- struct core_vma_metadata *meta = vma_meta + i;
+ for (i = 0; i < cprm->vma_count; i++) {
+ struct core_vma_metadata *meta = cprm->vma_meta + i;
struct elf_phdr phdr;
size_t sz;
@@ -1608,7 +1607,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
if (!elf_core_write_extra_phdrs(cprm, offset))
goto end_coredump;
- /* write out the notes section */
+ /* write out the notes section */
if (!writenote(thread_list->notes, cprm))
goto end_coredump;
if (!writenote(&psinfo_note, cprm))
@@ -1628,7 +1627,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
dump_skip_to(cprm, dataoff);
- if (!elf_fdpic_dump_segments(cprm, vma_meta, vma_count))
+ if (!elf_fdpic_dump_segments(cprm, cprm->vma_meta, cprm->vma_count))
goto end_coredump;
if (!elf_core_write_extra_data(cprm))
@@ -1652,7 +1651,6 @@ end_coredump:
thread_list = thread_list->next;
kfree(tmp);
}
- kvfree(vma_meta);
kfree(phdr4note);
kfree(elf);
kfree(psinfo);
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 5d776f80ee50..b5b5ca1a44f7 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -39,7 +39,7 @@
#include <linux/vmalloc.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/flat.h>
@@ -68,16 +68,14 @@
#define RELOC_FAILED 0xff00ff01 /* Relocation incorrect somewhere */
#define UNLOADED_LIB 0x7ff000ff /* Placeholder for unused library */
-#ifdef CONFIG_BINFMT_SHARED_FLAT
-#define MAX_SHARED_LIBS (4)
-#else
-#define MAX_SHARED_LIBS (1)
-#endif
+#define MAX_SHARED_LIBS (1)
#ifdef CONFIG_BINFMT_FLAT_NO_DATA_START_OFFSET
#define DATA_START_OFFSET_WORDS (0)
+#define MAX_SHARED_LIBS_UPDATE (0)
#else
#define DATA_START_OFFSET_WORDS (MAX_SHARED_LIBS)
+#define MAX_SHARED_LIBS_UPDATE (MAX_SHARED_LIBS)
#endif
struct lib_info {
@@ -92,32 +90,13 @@ struct lib_info {
} lib_list[MAX_SHARED_LIBS];
};
-#ifdef CONFIG_BINFMT_SHARED_FLAT
-static int load_flat_shared_library(int id, struct lib_info *p);
-#endif
-
static int load_flat_binary(struct linux_binprm *);
-static int flat_core_dump(struct coredump_params *cprm);
static struct linux_binfmt flat_format = {
.module = THIS_MODULE,
.load_binary = load_flat_binary,
- .core_dump = flat_core_dump,
- .min_coredump = PAGE_SIZE
};
-/****************************************************************************/
-/*
- * Routine writes a core dump image in the current directory.
- * Currently only a stub-function.
- */
-
-static int flat_core_dump(struct coredump_params *cprm)
-{
- pr_warn("Process %s:%d received signr %d and should have core dumped\n",
- current->comm, current->pid, cprm->siginfo->si_signo);
- return 1;
-}
/****************************************************************************/
/*
@@ -322,51 +301,18 @@ out_free:
/****************************************************************************/
static unsigned long
-calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
+calc_reloc(unsigned long r, struct lib_info *p)
{
unsigned long addr;
- int id;
unsigned long start_brk;
unsigned long start_data;
unsigned long text_len;
unsigned long start_code;
-#ifdef CONFIG_BINFMT_SHARED_FLAT
- if (r == 0)
- id = curid; /* Relocs of 0 are always self referring */
- else {
- id = (r >> 24) & 0xff; /* Find ID for this reloc */
- r &= 0x00ffffff; /* Trim ID off here */
- }
- if (id >= MAX_SHARED_LIBS) {
- pr_err("reference 0x%lx to shared library %d", r, id);
- goto failed;
- }
- if (curid != id) {
- if (internalp) {
- pr_err("reloc address 0x%lx not in same module "
- "(%d != %d)", r, curid, id);
- goto failed;
- } else if (!p->lib_list[id].loaded &&
- load_flat_shared_library(id, p) < 0) {
- pr_err("failed to load library %d", id);
- goto failed;
- }
- /* Check versioning information (i.e. time stamps) */
- if (p->lib_list[id].build_date && p->lib_list[curid].build_date &&
- p->lib_list[curid].build_date < p->lib_list[id].build_date) {
- pr_err("library %d is younger than %d", id, curid);
- goto failed;
- }
- }
-#else
- id = 0;
-#endif
-
- start_brk = p->lib_list[id].start_brk;
- start_data = p->lib_list[id].start_data;
- start_code = p->lib_list[id].start_code;
- text_len = p->lib_list[id].text_len;
+ start_brk = p->lib_list[0].start_brk;
+ start_data = p->lib_list[0].start_data;
+ start_code = p->lib_list[0].start_code;
+ text_len = p->lib_list[0].text_len;
if (r > start_brk - start_data + text_len) {
pr_err("reloc outside program 0x%lx (0 - 0x%lx/0x%lx)",
@@ -433,8 +379,32 @@ static void old_reloc(unsigned long rl)
/****************************************************************************/
+static inline u32 __user *skip_got_header(u32 __user *rp)
+{
+ if (IS_ENABLED(CONFIG_RISCV)) {
+ /*
+ * RISC-V has a 16 byte GOT PLT header for elf64-riscv
+ * and 8 byte GOT PLT header for elf32-riscv.
+ * Skip the whole GOT PLT header, since it is reserved
+ * for the dynamic linker (ld.so).
+ */
+ u32 rp_val0, rp_val1;
+
+ if (get_user(rp_val0, rp))
+ return rp;
+ if (get_user(rp_val1, rp + 1))
+ return rp;
+
+ if (rp_val0 == 0xffffffff && rp_val1 == 0xffffffff)
+ rp += 4;
+ else if (rp_val0 == 0xffffffff)
+ rp += 2;
+ }
+ return rp;
+}
+
static int load_flat_file(struct linux_binprm *bprm,
- struct lib_info *libinfo, int id, unsigned long *extra_stack)
+ struct lib_info *libinfo, unsigned long *extra_stack)
{
struct flat_hdr *hdr;
unsigned long textpos, datapos, realdatastart;
@@ -486,14 +456,6 @@ static int load_flat_file(struct linux_binprm *bprm,
goto err;
}
- /* Don't allow old format executables to use shared libraries */
- if (rev == OLD_FLAT_VERSION && id != 0) {
- pr_err("shared libraries are not available before rev 0x%lx\n",
- FLAT_VERSION);
- ret = -ENOEXEC;
- goto err;
- }
-
/*
* fix up the flags for the older format, there were all kinds
* of endian hacks, this only works for the simple cases
@@ -516,7 +478,7 @@ static int load_flat_file(struct linux_binprm *bprm,
* 28 bits (256 MB) is way more than reasonable in this case.
* If some top bits are set we have probable binary corruption.
*/
- if ((text_len | data_len | bss_len | stack_len | full_data) >> 28) {
+ if ((text_len | data_len | bss_len | stack_len | relocs | full_data) >> 28) {
pr_err("bad header\n");
ret = -ENOEXEC;
goto err;
@@ -544,15 +506,13 @@ static int load_flat_file(struct linux_binprm *bprm,
}
/* Flush all traces of the currently running executable */
- if (id == 0) {
- ret = begin_new_exec(bprm);
- if (ret)
- goto err;
+ ret = begin_new_exec(bprm);
+ if (ret)
+ goto err;
- /* OK, This is the point of no return */
- set_personality(PER_LINUX_32BIT);
- setup_new_exec(bprm);
- }
+ /* OK, This is the point of no return */
+ set_personality(PER_LINUX_32BIT);
+ setup_new_exec(bprm);
/*
* calculate the extra space we need to map in
@@ -732,42 +692,40 @@ static int load_flat_file(struct linux_binprm *bprm,
text_len -= sizeof(struct flat_hdr); /* the real code len */
/* The main program needs a little extra setup in the task structure */
- if (id == 0) {
- current->mm->start_code = start_code;
- current->mm->end_code = end_code;
- current->mm->start_data = datapos;
- current->mm->end_data = datapos + data_len;
- /*
- * set up the brk stuff, uses any slack left in data/bss/stack
- * allocation. We put the brk after the bss (between the bss
- * and stack) like other platforms.
- * Userspace code relies on the stack pointer starting out at
- * an address right at the end of a page.
- */
- current->mm->start_brk = datapos + data_len + bss_len;
- current->mm->brk = (current->mm->start_brk + 3) & ~3;
+ current->mm->start_code = start_code;
+ current->mm->end_code = end_code;
+ current->mm->start_data = datapos;
+ current->mm->end_data = datapos + data_len;
+ /*
+ * set up the brk stuff, uses any slack left in data/bss/stack
+ * allocation. We put the brk after the bss (between the bss
+ * and stack) like other platforms.
+ * Userspace code relies on the stack pointer starting out at
+ * an address right at the end of a page.
+ */
+ current->mm->start_brk = datapos + data_len + bss_len;
+ current->mm->brk = (current->mm->start_brk + 3) & ~3;
#ifndef CONFIG_MMU
- current->mm->context.end_brk = memp + memp_size - stack_len;
+ current->mm->context.end_brk = memp + memp_size - stack_len;
#endif
- }
if (flags & FLAT_FLAG_KTRACE) {
pr_info("Mapping is %lx, Entry point is %x, data_start is %x\n",
textpos, 0x00ffffff&ntohl(hdr->entry), ntohl(hdr->data_start));
pr_info("%s %s: TEXT=%lx-%lx DATA=%lx-%lx BSS=%lx-%lx\n",
- id ? "Lib" : "Load", bprm->filename,
+ "Load", bprm->filename,
start_code, end_code, datapos, datapos + data_len,
datapos + data_len, (datapos + data_len + bss_len + 3) & ~3);
}
/* Store the current module values into the global library structure */
- libinfo->lib_list[id].start_code = start_code;
- libinfo->lib_list[id].start_data = datapos;
- libinfo->lib_list[id].start_brk = datapos + data_len + bss_len;
- libinfo->lib_list[id].text_len = text_len;
- libinfo->lib_list[id].loaded = 1;
- libinfo->lib_list[id].entry = (0x00ffffff & ntohl(hdr->entry)) + textpos;
- libinfo->lib_list[id].build_date = ntohl(hdr->build_date);
+ libinfo->lib_list[0].start_code = start_code;
+ libinfo->lib_list[0].start_data = datapos;
+ libinfo->lib_list[0].start_brk = datapos + data_len + bss_len;
+ libinfo->lib_list[0].text_len = text_len;
+ libinfo->lib_list[0].loaded = 1;
+ libinfo->lib_list[0].entry = (0x00ffffff & ntohl(hdr->entry)) + textpos;
+ libinfo->lib_list[0].build_date = ntohl(hdr->build_date);
/*
* We just load the allocations into some temporary memory to
@@ -782,14 +740,15 @@ static int load_flat_file(struct linux_binprm *bprm,
* image.
*/
if (flags & FLAT_FLAG_GOTPIC) {
- for (rp = (u32 __user *)datapos; ; rp++) {
+ rp = skip_got_header((u32 __user *) datapos);
+ for (; ; rp++) {
u32 addr, rp_val;
if (get_user(rp_val, rp))
return -EFAULT;
if (rp_val == 0xffffffff)
break;
if (rp_val) {
- addr = calc_reloc(rp_val, libinfo, id, 0);
+ addr = calc_reloc(rp_val, libinfo);
if (addr == RELOC_FAILED) {
ret = -ENOEXEC;
goto err;
@@ -825,7 +784,7 @@ static int load_flat_file(struct linux_binprm *bprm,
return -EFAULT;
relval = ntohl(tmp);
addr = flat_get_relocate_addr(relval);
- rp = (u32 __user *)calc_reloc(addr, libinfo, id, 1);
+ rp = (u32 __user *)calc_reloc(addr, libinfo);
if (rp == (u32 __user *)RELOC_FAILED) {
ret = -ENOEXEC;
goto err;
@@ -848,7 +807,7 @@ static int load_flat_file(struct linux_binprm *bprm,
*/
addr = ntohl((__force __be32)addr);
}
- addr = calc_reloc(addr, libinfo, id, 0);
+ addr = calc_reloc(addr, libinfo);
if (addr == RELOC_FAILED) {
ret = -ENOEXEC;
goto err;
@@ -876,7 +835,7 @@ static int load_flat_file(struct linux_binprm *bprm,
/* zero the BSS, BRK and stack areas */
if (clear_user((void __user *)(datapos + data_len), bss_len +
(memp + memp_size - stack_len - /* end brk */
- libinfo->lib_list[id].start_brk) + /* start brk */
+ libinfo->lib_list[0].start_brk) + /* start brk */
stack_len))
return -EFAULT;
@@ -887,49 +846,6 @@ err:
/****************************************************************************/
-#ifdef CONFIG_BINFMT_SHARED_FLAT
-
-/*
- * Load a shared library into memory. The library gets its own data
- * segment (including bss) but not argv/argc/environ.
- */
-
-static int load_flat_shared_library(int id, struct lib_info *libs)
-{
- /*
- * This is a fake bprm struct; only the members "buf", "file" and
- * "filename" are actually used.
- */
- struct linux_binprm bprm;
- int res;
- char buf[16];
- loff_t pos = 0;
-
- memset(&bprm, 0, sizeof(bprm));
-
- /* Create the file name */
- sprintf(buf, "/lib/lib%d.so", id);
-
- /* Open the file up */
- bprm.filename = buf;
- bprm.file = open_exec(bprm.filename);
- res = PTR_ERR(bprm.file);
- if (IS_ERR(bprm.file))
- return res;
-
- res = kernel_read(bprm.file, bprm.buf, BINPRM_BUF_SIZE, &pos);
-
- if (res >= 0)
- res = load_flat_file(&bprm, libs, id, NULL);
-
- allow_write_access(bprm.file);
- fput(bprm.file);
-
- return res;
-}
-
-#endif /* CONFIG_BINFMT_SHARED_FLAT */
-/****************************************************************************/
/*
* These are the functions used to load flat style executables and shared
@@ -961,12 +877,12 @@ static int load_flat_binary(struct linux_binprm *bprm)
stack_len += (bprm->envc + 1) * sizeof(char *); /* the envp array */
stack_len = ALIGN(stack_len, FLAT_STACK_ALIGN);
- res = load_flat_file(bprm, &libinfo, 0, &stack_len);
+ res = load_flat_file(bprm, &libinfo, &stack_len);
if (res < 0)
return res;
/* Update data segment pointers for all libraries */
- for (i = 0; i < MAX_SHARED_LIBS; i++) {
+ for (i = 0; i < MAX_SHARED_LIBS_UPDATE; i++) {
if (!libinfo.lib_list[i].loaded)
continue;
for (j = 0; j < MAX_SHARED_LIBS; j++) {
@@ -1006,20 +922,6 @@ static int load_flat_binary(struct linux_binprm *bprm)
*/
start_addr = libinfo.lib_list[0].entry;
-#ifdef CONFIG_BINFMT_SHARED_FLAT
- for (i = MAX_SHARED_LIBS-1; i > 0; i--) {
- if (libinfo.lib_list[i].loaded) {
- /* Push previos first to call address */
- unsigned long __user *sp;
- current->mm->start_stack -= sizeof(unsigned long);
- sp = (unsigned long __user *)current->mm->start_stack;
- if (put_user(start_addr, sp))
- return -EFAULT;
- start_addr = libinfo.lib_list[i].entry;
- }
- }
-#endif
-
#ifdef FLAT_PLAT_INIT
FLAT_PLAT_INIT(regs);
#endif
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index e1eae7ea823a..8cb1a94339b8 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -40,14 +40,11 @@ enum {
VERBOSE_STATUS = 1 /* make it zero to save 400 bytes kernel memory */
};
-static LIST_HEAD(entries);
-static int enabled = 1;
-
enum {Enabled, Magic};
-#define MISC_FMT_PRESERVE_ARGV0 (1 << 31)
-#define MISC_FMT_OPEN_BINARY (1 << 30)
-#define MISC_FMT_CREDENTIALS (1 << 29)
-#define MISC_FMT_OPEN_FILE (1 << 28)
+#define MISC_FMT_PRESERVE_ARGV0 (1UL << 31)
+#define MISC_FMT_OPEN_BINARY (1UL << 30)
+#define MISC_FMT_CREDENTIALS (1UL << 29)
+#define MISC_FMT_OPEN_FILE (1UL << 28)
typedef struct {
struct list_head list;
@@ -60,12 +57,10 @@ typedef struct {
char *name;
struct dentry *dentry;
struct file *interp_file;
+ refcount_t users; /* sync removal with load_misc_binary() */
} Node;
-static DEFINE_RWLOCK(entries_lock);
static struct file_system_type bm_fs_type;
-static struct vfsmount *bm_mnt;
-static int entry_count;
/*
* Max length of the register string. Determined by:
@@ -82,19 +77,24 @@ static int entry_count;
*/
#define MAX_REGISTER_LENGTH 1920
-/*
- * Check if we support the binfmt
- * if we do, return the node, else NULL
- * locking is done in load_misc_binary
+/**
+ * search_binfmt_handler - search for a binary handler for @bprm
+ * @misc: handle to binfmt_misc instance
+ * @bprm: binary for which we are looking for a handler
+ *
+ * Search for a binary type handler for @bprm in the list of registered binary
+ * type handlers.
+ *
+ * Return: binary type list entry on success, NULL on failure
*/
-static Node *check_file(struct linux_binprm *bprm)
+static Node *search_binfmt_handler(struct binfmt_misc *misc,
+ struct linux_binprm *bprm)
{
char *p = strrchr(bprm->interp, '.');
- struct list_head *l;
+ Node *e;
/* Walk all the registered handlers. */
- list_for_each(l, &entries) {
- Node *e = list_entry(l, Node, list);
+ list_for_each_entry(e, &misc->entries, list) {
char *s;
int j;
@@ -123,9 +123,79 @@ static Node *check_file(struct linux_binprm *bprm)
if (j == e->size)
return e;
}
+
return NULL;
}
+/**
+ * get_binfmt_handler - try to find a binary type handler
+ * @misc: handle to binfmt_misc instance
+ * @bprm: binary for which we are looking for a handler
+ *
+ * Try to find a binfmt handler for the binary type. If one is found take a
+ * reference to protect against removal via bm_{entry,status}_write().
+ *
+ * Return: binary type list entry on success, NULL on failure
+ */
+static Node *get_binfmt_handler(struct binfmt_misc *misc,
+ struct linux_binprm *bprm)
+{
+ Node *e;
+
+ read_lock(&misc->entries_lock);
+ e = search_binfmt_handler(misc, bprm);
+ if (e)
+ refcount_inc(&e->users);
+ read_unlock(&misc->entries_lock);
+ return e;
+}
+
+/**
+ * put_binfmt_handler - put binary handler node
+ * @e: node to put
+ *
+ * Free node syncing with load_misc_binary() and defer final free to
+ * load_misc_binary() in case it is using the binary type handler we were
+ * requested to remove.
+ */
+static void put_binfmt_handler(Node *e)
+{
+ if (refcount_dec_and_test(&e->users)) {
+ if (e->flags & MISC_FMT_OPEN_FILE)
+ filp_close(e->interp_file, NULL);
+ kfree(e);
+ }
+}
+
+/**
+ * load_binfmt_misc - load the binfmt_misc of the caller's user namespace
+ *
+ * To be called in load_misc_binary() to load the relevant struct binfmt_misc.
+ * If a user namespace doesn't have its own binfmt_misc mount it can make use
+ * of its ancestor's binfmt_misc handlers. This mimicks the behavior of
+ * pre-namespaced binfmt_misc where all registered binfmt_misc handlers where
+ * available to all user and user namespaces on the system.
+ *
+ * Return: the binfmt_misc instance of the caller's user namespace
+ */
+static struct binfmt_misc *load_binfmt_misc(void)
+{
+ const struct user_namespace *user_ns;
+ struct binfmt_misc *misc;
+
+ user_ns = current_user_ns();
+ while (user_ns) {
+ /* Pairs with smp_store_release() in bm_fill_super(). */
+ misc = smp_load_acquire(&user_ns->binfmt_misc);
+ if (misc)
+ return misc;
+
+ user_ns = user_ns->parent;
+ }
+
+ return &init_binfmt_misc;
+}
+
/*
* the loader itself
*/
@@ -133,18 +203,14 @@ static int load_misc_binary(struct linux_binprm *bprm)
{
Node *fmt;
struct file *interp_file = NULL;
- int retval;
+ int retval = -ENOEXEC;
+ struct binfmt_misc *misc;
- retval = -ENOEXEC;
- if (!enabled)
+ misc = load_binfmt_misc();
+ if (!misc->enabled)
return retval;
- /* to keep locking time low, we copy the interpreter string */
- read_lock(&entries_lock);
- fmt = check_file(bprm);
- if (fmt)
- dget(fmt->dentry);
- read_unlock(&entries_lock);
+ fmt = get_binfmt_handler(misc, bprm);
if (!fmt)
return retval;
@@ -198,7 +264,16 @@ static int load_misc_binary(struct linux_binprm *bprm)
retval = 0;
ret:
- dput(fmt->dentry);
+
+ /*
+ * If we actually put the node here all concurrent calls to
+ * load_misc_binary() will have finished. We also know
+ * that for the refcount to be zero someone must have concurently
+ * removed the binary type handler from the list and it's our job to
+ * free it.
+ */
+ put_binfmt_handler(fmt);
+
return retval;
}
@@ -287,7 +362,7 @@ static Node *create_entry(const char __user *buffer, size_t count)
err = -ENOMEM;
memsize = sizeof(Node) + count + 8;
- e = kmalloc(memsize, GFP_KERNEL);
+ e = kmalloc(memsize, GFP_KERNEL_ACCOUNT);
if (!e)
goto out;
@@ -399,7 +474,7 @@ static Node *create_entry(const char __user *buffer, size_t count)
if (e->mask) {
int i;
- char *masked = kmalloc(e->size, GFP_KERNEL);
+ char *masked = kmalloc(e->size, GFP_KERNEL_ACCOUNT);
print_hex_dump_bytes(
KBUILD_MODNAME ": register: mask[decoded]: ",
@@ -547,36 +622,76 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime =
- current_time(inode);
+ simple_inode_init_ts(inode);
}
return inode;
}
+/**
+ * i_binfmt_misc - retrieve struct binfmt_misc from a binfmt_misc inode
+ * @inode: inode of the relevant binfmt_misc instance
+ *
+ * This helper retrieves struct binfmt_misc from a binfmt_misc inode. This can
+ * be done without any memory barriers because we are guaranteed that
+ * user_ns->binfmt_misc is fully initialized. It was fully initialized when the
+ * binfmt_misc mount was first created.
+ *
+ * Return: struct binfmt_misc of the relevant binfmt_misc instance
+ */
+static struct binfmt_misc *i_binfmt_misc(struct inode *inode)
+{
+ return inode->i_sb->s_user_ns->binfmt_misc;
+}
+
+/**
+ * bm_evict_inode - cleanup data associated with @inode
+ * @inode: inode to which the data is attached
+ *
+ * Cleanup the binary type handler data associated with @inode if a binary type
+ * entry is removed or the filesystem is unmounted and the super block is
+ * shutdown.
+ *
+ * If the ->evict call was not caused by a super block shutdown but by a write
+ * to remove the entry or all entries via bm_{entry,status}_write() the entry
+ * will have already been removed from the list. We keep the list_empty() check
+ * to make that explicit.
+*/
static void bm_evict_inode(struct inode *inode)
{
Node *e = inode->i_private;
- if (e && e->flags & MISC_FMT_OPEN_FILE)
- filp_close(e->interp_file, NULL);
-
clear_inode(inode);
- kfree(e);
+
+ if (e) {
+ struct binfmt_misc *misc;
+
+ misc = i_binfmt_misc(inode);
+ write_lock(&misc->entries_lock);
+ if (!list_empty(&e->list))
+ list_del_init(&e->list);
+ write_unlock(&misc->entries_lock);
+ put_binfmt_handler(e);
+ }
}
-static void kill_node(Node *e)
+/**
+ * remove_binfmt_handler - remove a binary type handler
+ * @misc: handle to binfmt_misc instance
+ * @e: binary type handler to remove
+ *
+ * Remove a binary type handler from the list of binary type handlers and
+ * remove its associated dentry. This is called from
+ * binfmt_{entry,status}_write(). In the future, we might want to think about
+ * adding a proper ->unlink() method to binfmt_misc instead of forcing caller's
+ * to use writes to files in order to delete binary type handlers. But it has
+ * worked for so long that it's not a pressing issue.
+ */
+static void remove_binfmt_handler(struct binfmt_misc *misc, Node *e)
{
- struct dentry *dentry;
-
- write_lock(&entries_lock);
+ write_lock(&misc->entries_lock);
list_del_init(&e->list);
- write_unlock(&entries_lock);
-
- dentry = e->dentry;
- drop_nlink(d_inode(dentry));
- d_drop(dentry);
- dput(dentry);
- simple_release_fs(&bm_mnt, &entry_count);
+ write_unlock(&misc->entries_lock);
+ locked_recursive_removal(e->dentry, NULL);
}
/* /<entry> */
@@ -603,8 +718,8 @@ bm_entry_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- struct dentry *root;
- Node *e = file_inode(file)->i_private;
+ struct inode *inode = file_inode(file);
+ Node *e = inode->i_private;
int res = parse_command(buffer, count);
switch (res) {
@@ -618,13 +733,22 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
break;
case 3:
/* Delete this handler. */
- root = file_inode(file)->i_sb->s_root;
- inode_lock(d_inode(root));
+ inode = d_inode(inode->i_sb->s_root);
+ inode_lock_nested(inode, I_MUTEX_PARENT);
+ /*
+ * In order to add new element or remove elements from the list
+ * via bm_{entry,register,status}_write() inode_lock() on the
+ * root inode must be held.
+ * The lock is exclusive ensuring that the list can't be
+ * modified. Only load_misc_binary() can access but does so
+ * read-only. So we only need to take the write lock when we
+ * actually remove the entry from the list.
+ */
if (!list_empty(&e->list))
- kill_node(e);
+ remove_binfmt_handler(i_binfmt_misc(inode), e);
- inode_unlock(d_inode(root));
+ inode_unlock(inode);
break;
default:
return res;
@@ -641,13 +765,41 @@ static const struct file_operations bm_entry_operations = {
/* /register */
+/* add to filesystem */
+static int add_entry(Node *e, struct super_block *sb)
+{
+ struct dentry *dentry = simple_start_creating(sb->s_root, e->name);
+ struct inode *inode;
+ struct binfmt_misc *misc;
+
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ inode = bm_get_inode(sb, S_IFREG | 0644);
+ if (unlikely(!inode)) {
+ simple_done_creating(dentry);
+ return -ENOMEM;
+ }
+
+ refcount_set(&e->users, 1);
+ e->dentry = dentry;
+ inode->i_private = e;
+ inode->i_fop = &bm_entry_operations;
+
+ d_make_persistent(dentry, inode);
+ misc = i_binfmt_misc(inode);
+ write_lock(&misc->entries_lock);
+ list_add(&e->list, &misc->entries);
+ write_unlock(&misc->entries_lock);
+ simple_done_creating(dentry);
+ return 0;
+}
+
static ssize_t bm_register_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
Node *e;
- struct inode *inode;
struct super_block *sb = file_inode(file)->i_sb;
- struct dentry *root = sb->s_root, *dentry;
int err = 0;
struct file *f = NULL;
@@ -657,7 +809,15 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
return PTR_ERR(e);
if (e->flags & MISC_FMT_OPEN_FILE) {
- f = open_exec(e->interpreter);
+ /*
+ * Now that we support unprivileged binfmt_misc mounts make
+ * sure we use the credentials that the register @file was
+ * opened with to also open the interpreter. Before that this
+ * didn't matter much as only a privileged process could open
+ * the register file.
+ */
+ scoped_with_creds(file->f_cred)
+ f = open_exec(e->interpreter);
if (IS_ERR(f)) {
pr_notice("register: failed to install interpreter file %s\n",
e->interpreter);
@@ -667,47 +827,12 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
e->interp_file = f;
}
- inode_lock(d_inode(root));
- dentry = lookup_one_len(e->name, root, strlen(e->name));
- err = PTR_ERR(dentry);
- if (IS_ERR(dentry))
- goto out;
-
- err = -EEXIST;
- if (d_really_is_positive(dentry))
- goto out2;
-
- inode = bm_get_inode(sb, S_IFREG | 0644);
-
- err = -ENOMEM;
- if (!inode)
- goto out2;
-
- err = simple_pin_fs(&bm_fs_type, &bm_mnt, &entry_count);
- if (err) {
- iput(inode);
- inode = NULL;
- goto out2;
- }
-
- e->dentry = dget(dentry);
- inode->i_private = e;
- inode->i_fop = &bm_entry_operations;
-
- d_instantiate(dentry, inode);
- write_lock(&entries_lock);
- list_add(&e->list, &entries);
- write_unlock(&entries_lock);
-
- err = 0;
-out2:
- dput(dentry);
-out:
- inode_unlock(d_inode(root));
-
+ err = add_entry(e, sb);
if (err) {
- if (f)
+ if (f) {
+ exe_file_allow_write_access(f);
filp_close(f, NULL);
+ }
kfree(e);
return err;
}
@@ -724,35 +849,50 @@ static const struct file_operations bm_register_operations = {
static ssize_t
bm_status_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- char *s = enabled ? "enabled\n" : "disabled\n";
+ struct binfmt_misc *misc;
+ char *s;
+ misc = i_binfmt_misc(file_inode(file));
+ s = misc->enabled ? "enabled\n" : "disabled\n";
return simple_read_from_buffer(buf, nbytes, ppos, s, strlen(s));
}
static ssize_t bm_status_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
+ struct binfmt_misc *misc;
int res = parse_command(buffer, count);
- struct dentry *root;
+ Node *e, *next;
+ struct inode *inode;
+ misc = i_binfmt_misc(file_inode(file));
switch (res) {
case 1:
/* Disable all handlers. */
- enabled = 0;
+ misc->enabled = false;
break;
case 2:
/* Enable all handlers. */
- enabled = 1;
+ misc->enabled = true;
break;
case 3:
/* Delete all handlers. */
- root = file_inode(file)->i_sb->s_root;
- inode_lock(d_inode(root));
+ inode = d_inode(file_inode(file)->i_sb->s_root);
+ inode_lock_nested(inode, I_MUTEX_PARENT);
- while (!list_empty(&entries))
- kill_node(list_first_entry(&entries, Node, list));
+ /*
+ * In order to add new element or remove elements from the list
+ * via bm_{entry,register,status}_write() inode_lock() on the
+ * root inode must be held.
+ * The lock is exclusive ensuring that the list can't be
+ * modified. Only load_misc_binary() can access but does so
+ * read-only. So we only need to take the write lock when we
+ * actually remove the entry from the list.
+ */
+ list_for_each_entry_safe(e, next, &misc->entries, list)
+ remove_binfmt_handler(misc, e);
- inode_unlock(d_inode(root));
+ inode_unlock(inode);
break;
default:
return res;
@@ -769,32 +909,100 @@ static const struct file_operations bm_status_operations = {
/* Superblock handling */
+static void bm_put_super(struct super_block *sb)
+{
+ struct user_namespace *user_ns = sb->s_fs_info;
+
+ sb->s_fs_info = NULL;
+ put_user_ns(user_ns);
+}
+
static const struct super_operations s_ops = {
.statfs = simple_statfs,
.evict_inode = bm_evict_inode,
+ .put_super = bm_put_super,
};
static int bm_fill_super(struct super_block *sb, struct fs_context *fc)
{
int err;
+ struct user_namespace *user_ns = sb->s_user_ns;
+ struct binfmt_misc *misc;
static const struct tree_descr bm_files[] = {
[2] = {"status", &bm_status_operations, S_IWUSR|S_IRUGO},
[3] = {"register", &bm_register_operations, S_IWUSR},
/* last one */ {""}
};
+ if (WARN_ON(user_ns != current_user_ns()))
+ return -EINVAL;
+
+ /*
+ * Lazily allocate a new binfmt_misc instance for this namespace, i.e.
+ * do it here during the first mount of binfmt_misc. We don't need to
+ * waste memory for every user namespace allocation. It's likely much
+ * more common to not mount a separate binfmt_misc instance than it is
+ * to mount one.
+ *
+ * While multiple superblocks can exist they are keyed by userns in
+ * s_fs_info for binfmt_misc. Hence, the vfs guarantees that
+ * bm_fill_super() is called exactly once whenever a binfmt_misc
+ * superblock for a userns is created. This in turn lets us conclude
+ * that when a binfmt_misc superblock is created for the first time for
+ * a userns there's no one racing us. Therefore we don't need any
+ * barriers when we dereference binfmt_misc.
+ */
+ misc = user_ns->binfmt_misc;
+ if (!misc) {
+ /*
+ * If it turns out that most user namespaces actually want to
+ * register their own binary type handler and therefore all
+ * create their own separate binfmt_misc mounts we should
+ * consider turning this into a kmem cache.
+ */
+ misc = kzalloc(sizeof(struct binfmt_misc), GFP_KERNEL);
+ if (!misc)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&misc->entries);
+ rwlock_init(&misc->entries_lock);
+
+ /* Pairs with smp_load_acquire() in load_binfmt_misc(). */
+ smp_store_release(&user_ns->binfmt_misc, misc);
+ }
+
+ /*
+ * When the binfmt_misc superblock for this userns is shutdown
+ * ->enabled might have been set to false and we don't reinitialize
+ * ->enabled again in put_super() as someone might already be mounting
+ * binfmt_misc again. It also would be pointless since by the time
+ * ->put_super() is called we know that the binary type list for this
+ * bintfmt_misc mount is empty making load_misc_binary() return
+ * -ENOEXEC independent of whether ->enabled is true. Instead, if
+ * someone mounts binfmt_misc for the first time or again we simply
+ * reset ->enabled to true.
+ */
+ misc->enabled = true;
+
err = simple_fill_super(sb, BINFMTFS_MAGIC, bm_files);
if (!err)
sb->s_op = &s_ops;
return err;
}
+static void bm_free(struct fs_context *fc)
+{
+ if (fc->s_fs_info)
+ put_user_ns(fc->s_fs_info);
+}
+
static int bm_get_tree(struct fs_context *fc)
{
- return get_tree_single(fc, bm_fill_super);
+ return get_tree_keyed(fc, bm_fill_super, get_user_ns(fc->user_ns));
}
static const struct fs_context_operations bm_context_ops = {
+ .free = bm_free,
.get_tree = bm_get_tree,
};
@@ -813,7 +1021,8 @@ static struct file_system_type bm_fs_type = {
.owner = THIS_MODULE,
.name = "binfmt_misc",
.init_fs_context = bm_init_fs_context,
- .kill_sb = kill_litter_super,
+ .fs_flags = FS_USERNS_MOUNT,
+ .kill_sb = kill_anon_super,
};
MODULE_ALIAS_FS("binfmt_misc");
@@ -833,4 +1042,5 @@ static void __exit exit_misc_binfmt(void)
core_initcall(init_misc_binfmt);
module_exit(exit_misc_binfmt);
+MODULE_DESCRIPTION("Kernel support for miscellaneous binaries");
MODULE_LICENSE("GPL");
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index 1b6625e95958..637daf6e4d45 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -155,4 +155,5 @@ static void __exit exit_script_binfmt(void)
core_initcall(init_script_binfmt);
module_exit(exit_script_binfmt);
+MODULE_DESCRIPTION("Kernel support for scripts starting with #!");
MODULE_LICENSE("GPL");
diff --git a/fs/bpf_fs_kfuncs.c b/fs/bpf_fs_kfuncs.c
new file mode 100644
index 000000000000..5ace2511fec5
--- /dev/null
+++ b/fs/bpf_fs_kfuncs.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC. */
+
+#include <linux/bpf.h>
+#include <linux/bpf_lsm.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
+#include <linux/dcache.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/file.h>
+#include <linux/kernfs.h>
+#include <linux/mm.h>
+#include <linux/xattr.h>
+
+__bpf_kfunc_start_defs();
+
+/**
+ * bpf_get_task_exe_file - get a reference on the exe_file struct file member of
+ * the mm_struct that is nested within the supplied
+ * task_struct
+ * @task: task_struct of which the nested mm_struct exe_file member to get a
+ * reference on
+ *
+ * Get a reference on the exe_file struct file member field of the mm_struct
+ * nested within the supplied *task*. The referenced file pointer acquired by
+ * this BPF kfunc must be released using bpf_put_file(). Failing to call
+ * bpf_put_file() on the returned referenced struct file pointer that has been
+ * acquired by this BPF kfunc will result in the BPF program being rejected by
+ * the BPF verifier.
+ *
+ * This BPF kfunc may only be called from BPF LSM programs.
+ *
+ * Internally, this BPF kfunc leans on get_task_exe_file(), such that calling
+ * bpf_get_task_exe_file() would be analogous to calling get_task_exe_file()
+ * directly in kernel context.
+ *
+ * Return: A referenced struct file pointer to the exe_file member of the
+ * mm_struct that is nested within the supplied *task*. On error, NULL is
+ * returned.
+ */
+__bpf_kfunc struct file *bpf_get_task_exe_file(struct task_struct *task)
+{
+ return get_task_exe_file(task);
+}
+
+/**
+ * bpf_put_file - put a reference on the supplied file
+ * @file: file to put a reference on
+ *
+ * Put a reference on the supplied *file*. Only referenced file pointers may be
+ * passed to this BPF kfunc. Attempting to pass an unreferenced file pointer, or
+ * any other arbitrary pointer for that matter, will result in the BPF program
+ * being rejected by the BPF verifier.
+ *
+ * This BPF kfunc may only be called from BPF LSM programs.
+ */
+__bpf_kfunc void bpf_put_file(struct file *file)
+{
+ fput(file);
+}
+
+/**
+ * bpf_path_d_path - resolve the pathname for the supplied path
+ * @path: path to resolve the pathname for
+ * @buf: buffer to return the resolved pathname in
+ * @buf__sz: length of the supplied buffer
+ *
+ * Resolve the pathname for the supplied *path* and store it in *buf*. This BPF
+ * kfunc is the safer variant of the legacy bpf_d_path() helper and should be
+ * used in place of bpf_d_path() whenever possible. It enforces KF_TRUSTED_ARGS
+ * semantics, meaning that the supplied *path* must itself hold a valid
+ * reference, or else the BPF program will be outright rejected by the BPF
+ * verifier.
+ *
+ * This BPF kfunc may only be called from BPF LSM programs.
+ *
+ * Return: A positive integer corresponding to the length of the resolved
+ * pathname in *buf*, including the NUL termination character. On error, a
+ * negative integer is returned.
+ */
+__bpf_kfunc int bpf_path_d_path(const struct path *path, char *buf, size_t buf__sz)
+{
+ int len;
+ char *ret;
+
+ if (!buf__sz)
+ return -EINVAL;
+
+ ret = d_path(path, buf, buf__sz);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ len = buf + buf__sz - ret;
+ memmove(buf, ret, len);
+ return len;
+}
+
+static bool match_security_bpf_prefix(const char *name__str)
+{
+ return !strncmp(name__str, XATTR_NAME_BPF_LSM, XATTR_NAME_BPF_LSM_LEN);
+}
+
+static int bpf_xattr_read_permission(const char *name, struct inode *inode)
+{
+ if (WARN_ON(!inode))
+ return -EINVAL;
+
+ /* Allow reading xattr with user. and security.bpf. prefix */
+ if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+ !match_security_bpf_prefix(name))
+ return -EPERM;
+
+ return inode_permission(&nop_mnt_idmap, inode, MAY_READ);
+}
+
+/**
+ * bpf_get_dentry_xattr - get xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: output buffer of the xattr value
+ *
+ * Get xattr *name__str* of *dentry* and store the output in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefixes "user." or
+ * "security.bpf." are allowed.
+ *
+ * Return: length of the xattr value on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_get_dentry_xattr(struct dentry *dentry, const char *name__str,
+ struct bpf_dynptr *value_p)
+{
+ struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p;
+ struct inode *inode = d_inode(dentry);
+ u32 value_len;
+ void *value;
+ int ret;
+
+ value_len = __bpf_dynptr_size(value_ptr);
+ value = __bpf_dynptr_data_rw(value_ptr, value_len);
+ if (!value)
+ return -EINVAL;
+
+ ret = bpf_xattr_read_permission(name__str, inode);
+ if (ret)
+ return ret;
+ return __vfs_getxattr(dentry, inode, name__str, value, value_len);
+}
+
+/**
+ * bpf_get_file_xattr - get xattr of a file
+ * @file: file to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: output buffer of the xattr value
+ *
+ * Get xattr *name__str* of *file* and store the output in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefixes "user." or
+ * "security.bpf." are allowed.
+ *
+ * Return: length of the xattr value on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
+ struct bpf_dynptr *value_p)
+{
+ struct dentry *dentry;
+
+ dentry = file_dentry(file);
+ return bpf_get_dentry_xattr(dentry, name__str, value_p);
+}
+
+__bpf_kfunc_end_defs();
+
+static int bpf_xattr_write_permission(const char *name, struct inode *inode)
+{
+ if (WARN_ON(!inode))
+ return -EINVAL;
+
+ /* Only allow setting and removing security.bpf. xattrs */
+ if (!match_security_bpf_prefix(name))
+ return -EPERM;
+
+ return inode_permission(&nop_mnt_idmap, inode, MAY_WRITE);
+}
+
+/**
+ * bpf_set_dentry_xattr_locked - set a xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: xattr value
+ * @flags: flags to pass into filesystem operations
+ *
+ * Set xattr *name__str* of *dentry* to the value in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "security.bpf."
+ * is allowed.
+ *
+ * The caller already locked dentry->d_inode.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags)
+{
+
+ struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p;
+ struct inode *inode = d_inode(dentry);
+ const void *value;
+ u32 value_len;
+ int ret;
+
+ value_len = __bpf_dynptr_size(value_ptr);
+ value = __bpf_dynptr_data(value_ptr, value_len);
+ if (!value)
+ return -EINVAL;
+
+ ret = bpf_xattr_write_permission(name__str, inode);
+ if (ret)
+ return ret;
+
+ ret = __vfs_setxattr(&nop_mnt_idmap, dentry, inode, name__str,
+ value, value_len, flags);
+ if (!ret) {
+ fsnotify_xattr(dentry);
+
+ /* This xattr is set by BPF LSM, so we do not call
+ * security_inode_post_setxattr. Otherwise, we would
+ * risk deadlocks by calling back to the same kfunc.
+ *
+ * This is the same as security_inode_setsecurity().
+ */
+ }
+ return ret;
+}
+
+/**
+ * bpf_remove_dentry_xattr_locked - remove a xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ *
+ * Rmove xattr *name__str* of *dentry*.
+ *
+ * For security reasons, only *name__str* with prefix "security.bpf."
+ * is allowed.
+ *
+ * The caller already locked dentry->d_inode.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str)
+{
+ struct inode *inode = d_inode(dentry);
+ int ret;
+
+ ret = bpf_xattr_write_permission(name__str, inode);
+ if (ret)
+ return ret;
+
+ ret = __vfs_removexattr(&nop_mnt_idmap, dentry, name__str);
+ if (!ret) {
+ fsnotify_xattr(dentry);
+
+ /* This xattr is removed by BPF LSM, so we do not call
+ * security_inode_post_removexattr. Otherwise, we would
+ * risk deadlocks by calling back to the same kfunc.
+ */
+ }
+ return ret;
+}
+
+__bpf_kfunc_start_defs();
+
+/**
+ * bpf_set_dentry_xattr - set a xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: xattr value
+ * @flags: flags to pass into filesystem operations
+ *
+ * Set xattr *name__str* of *dentry* to the value in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "security.bpf."
+ * is allowed.
+ *
+ * The caller has not locked dentry->d_inode.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_set_dentry_xattr(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags)
+{
+ struct inode *inode = d_inode(dentry);
+ int ret;
+
+ inode_lock(inode);
+ ret = bpf_set_dentry_xattr_locked(dentry, name__str, value_p, flags);
+ inode_unlock(inode);
+ return ret;
+}
+
+/**
+ * bpf_remove_dentry_xattr - remove a xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ *
+ * Rmove xattr *name__str* of *dentry*.
+ *
+ * For security reasons, only *name__str* with prefix "security.bpf."
+ * is allowed.
+ *
+ * The caller has not locked dentry->d_inode.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_remove_dentry_xattr(struct dentry *dentry, const char *name__str)
+{
+ struct inode *inode = d_inode(dentry);
+ int ret;
+
+ inode_lock(inode);
+ ret = bpf_remove_dentry_xattr_locked(dentry, name__str);
+ inode_unlock(inode);
+ return ret;
+}
+
+#ifdef CONFIG_CGROUPS
+/**
+ * bpf_cgroup_read_xattr - read xattr of a cgroup's node in cgroupfs
+ * @cgroup: cgroup to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: output buffer of the xattr value
+ *
+ * Get xattr *name__str* of *cgroup* and store the output in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "user." is allowed.
+ *
+ * Return: length of the xattr value on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str,
+ struct bpf_dynptr *value_p)
+{
+ struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p;
+ u32 value_len;
+ void *value;
+
+ /* Only allow reading "user.*" xattrs */
+ if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+ return -EPERM;
+
+ value_len = __bpf_dynptr_size(value_ptr);
+ value = __bpf_dynptr_data_rw(value_ptr, value_len);
+ if (!value)
+ return -EINVAL;
+
+ return kernfs_xattr_get(cgroup->kn, name__str, value, value_len);
+}
+#endif /* CONFIG_CGROUPS */
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(bpf_fs_kfunc_set_ids)
+BTF_ID_FLAGS(func, bpf_get_task_exe_file,
+ KF_ACQUIRE | KF_TRUSTED_ARGS | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_put_file, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_path_d_path, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_get_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_set_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_remove_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_KFUNCS_END(bpf_fs_kfunc_set_ids)
+
+static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id)
+{
+ if (!btf_id_set8_contains(&bpf_fs_kfunc_set_ids, kfunc_id) ||
+ prog->type == BPF_PROG_TYPE_LSM)
+ return 0;
+ return -EACCES;
+}
+
+/* bpf_[set|remove]_dentry_xattr.* hooks have KF_TRUSTED_ARGS and
+ * KF_SLEEPABLE, so they are only available to sleepable hooks with
+ * dentry arguments.
+ *
+ * Setting and removing xattr requires exclusive lock on dentry->d_inode.
+ * Some hooks already locked d_inode, while some hooks have not locked
+ * d_inode. Therefore, we need different kfuncs for different hooks.
+ * Specifically, hooks in the following list (d_inode_locked_hooks)
+ * should call bpf_[set|remove]_dentry_xattr_locked; while other hooks
+ * should call bpf_[set|remove]_dentry_xattr.
+ */
+BTF_SET_START(d_inode_locked_hooks)
+BTF_ID(func, bpf_lsm_inode_post_removexattr)
+BTF_ID(func, bpf_lsm_inode_post_setattr)
+BTF_ID(func, bpf_lsm_inode_post_setxattr)
+BTF_ID(func, bpf_lsm_inode_removexattr)
+BTF_ID(func, bpf_lsm_inode_rmdir)
+BTF_ID(func, bpf_lsm_inode_setattr)
+BTF_ID(func, bpf_lsm_inode_setxattr)
+BTF_ID(func, bpf_lsm_inode_unlink)
+#ifdef CONFIG_SECURITY_PATH
+BTF_ID(func, bpf_lsm_path_unlink)
+BTF_ID(func, bpf_lsm_path_rmdir)
+#endif /* CONFIG_SECURITY_PATH */
+BTF_SET_END(d_inode_locked_hooks)
+
+bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog)
+{
+ return btf_id_set_contains(&d_inode_locked_hooks, prog->aux->attach_btf_id);
+}
+
+static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &bpf_fs_kfunc_set_ids,
+ .filter = bpf_fs_kfuncs_filter,
+};
+
+static int __init bpf_fs_kfuncs_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
+}
+
+late_initcall(bpf_fs_kfuncs_init);
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 520a0f6a7d9e..4438637c8900 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -2,9 +2,10 @@
config BTRFS_FS
tristate "Btrfs filesystem support"
+ select BLK_CGROUP_PUNT_BIO
+ select CRC32
select CRYPTO
select CRYPTO_CRC32C
- select LIBCRC32C
select CRYPTO_XXHASH
select CRYPTO_SHA256
select CRYPTO_BLAKE2B
@@ -17,9 +18,7 @@ config BTRFS_FS
select FS_IOMAP
select RAID6_PQ
select XOR_BLOCKS
- select SRCU
- depends on !PPC_256K_PAGES # powerpc
- depends on !PAGE_SIZE_256KB # hexagon
+ depends on PAGE_SIZE_LESS_THAN_256KB
help
Btrfs is a general purpose copy-on-write filesystem with extents,
@@ -32,7 +31,7 @@ config BTRFS_FS
continue to be mountable and usable by newer kernels.
For more information, please see the web pages at
- http://btrfs.wiki.kernel.org.
+ https://btrfs.readthedocs.io
To compile this file system support as a module, choose M here. The
module will be called btrfs.
@@ -49,43 +48,28 @@ config BTRFS_FS_POSIX_ACL
If you don't know what Access Control Lists are, say N
-config BTRFS_FS_CHECK_INTEGRITY
- bool "Btrfs with integrity check tool compiled in (DANGEROUS)"
- depends on BTRFS_FS
- help
- Adds code that examines all block write requests (including
- writes of the super block). The goal is to verify that the
- state of the filesystem on disk is always consistent, i.e.,
- after a power-loss or kernel panic event the filesystem is
- in a consistent state.
-
- If the integrity check tool is included and activated in
- the mount options, plenty of kernel memory is used, and
- plenty of additional CPU cycles are spent. Enabling this
- functionality is not intended for normal use.
-
- In most cases, unless you are a btrfs developer who needs
- to verify the integrity of (super)-block write requests
- during the run of a regression test, say N
-
config BTRFS_FS_RUN_SANITY_TESTS
bool "Btrfs will run sanity tests upon loading"
depends on BTRFS_FS
help
- This will run some basic sanity tests on the free space cache
- code to make sure it is acting as it should. These are mostly
- regression tests and are only really interesting to btrfs
- developers.
+ This will run sanity tests for core functionality like free space,
+ extent maps, extent io, extent buffers, inodes, qgroups and others,
+ at module load time. These are mostly regression tests and are only
+ interesting to developers.
If unsure, say N.
config BTRFS_DEBUG
bool "Btrfs debugging support"
depends on BTRFS_FS
+ select REF_TRACKER if STACKTRACE_SUPPORT
help
- Enable run-time debugging support for the btrfs filesystem. This may
- enable additional and expensive checks with negative impact on
- performance, or export extra information via sysfs.
+ Enable run-time debugging support for the btrfs filesystem.
+
+ Additional potentially expensive checks, debugging functionality or
+ sysfs exported information is enabled, like leak checks of internal
+ objects, optional forced space fragmentation and /sys/fs/btrfs/debug .
+ This has negative impact on performance.
If unsure, say N.
@@ -93,18 +77,44 @@ config BTRFS_ASSERT
bool "Btrfs assert support"
depends on BTRFS_FS
help
- Enable run-time assertion checking. This will result in panics if
- any of the assertions trip. This is meant for btrfs developers only.
+ Enable run-time assertion checking. Additional safety checks are
+ done, simple enough not to affect performance but verify invariants
+ and assumptions of code to run properly. This may result in panics,
+ and is meant for developers but can be enabled in general.
If unsure, say N.
-config BTRFS_FS_REF_VERIFY
- bool "Btrfs with the ref verify tool compiled in"
+config BTRFS_EXPERIMENTAL
+ bool "Btrfs experimental features"
depends on BTRFS_FS
default n
help
- Enable run-time extent reference verification instrumentation. This
- is meant to be used by btrfs developers for tracking down extent
- reference problems or verifying they didn't break something.
+ Enable experimental features. These features may not be stable enough
+ for end users. This is meant for btrfs developers or users who wish
+ to test the functionality and report problems.
+
+ Current list:
+
+ - COW fixup worker warning - last warning before removing the
+ functionality catching out-of-band page
+ dirtying, not necessary since 5.8
+
+ - RAID mirror read policy - additional read policies for balancing
+ reading from redundant block group
+ profiles (currently: pid, round-robin,
+ fixed devid)
+
+ - send stream protocol v3 - fs-verity support
+
+ - checksum offload mode - sysfs knob to affect when checksums are
+ calculated (at IO time, or in a thread)
+
+ - raid-stripe-tree - additional mapping of extents to devices to
+ support RAID1* profiles on zoned devices,
+ RAID56 not yet supported
+
+ - extent tree v2 - complex rework of extent tracking
+
+ - large folio support
If unsure, say N.
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 3dcf9bcc2326..743d7677b175 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -11,34 +11,37 @@ condflags := \
$(call cc-option, -Wunused-but-set-variable) \
$(call cc-option, -Wunused-const-variable) \
$(call cc-option, -Wpacked-not-aligned) \
- $(call cc-option, -Wstringop-truncation)
+ $(call cc-option, -Wstringop-truncation) \
+ $(call cc-option, -Wmaybe-uninitialized)
subdir-ccflags-y += $(condflags)
# The following turn off the warnings enabled by -Wextra
subdir-ccflags-y += -Wno-missing-field-initializers
subdir-ccflags-y += -Wno-sign-compare
subdir-ccflags-y += -Wno-type-limits
+subdir-ccflags-y += -Wno-shift-negative-value
obj-$(CONFIG_BTRFS_FS) := btrfs.o
btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
file-item.o inode-item.o disk-io.o \
- transaction.o inode.o file.o tree-defrag.o \
- extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
+ transaction.o inode.o file.o defrag.o \
+ extent_map.o sysfs.o accessors.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o zstd.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
- reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
+ backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
block-rsv.o delalloc-space.o block-group.o discard.o reflink.o \
- subpage.o tree-mod-log.o
+ subpage.o tree-mod-log.o extent-io-tree.o fs.o messages.o bio.o \
+ lru_cache.o raid-stripe-tree.o fiemap.o direct-io.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
-btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
-btrfs-$(CONFIG_BTRFS_FS_REF_VERIFY) += ref-verify.o
+btrfs-$(CONFIG_BTRFS_DEBUG) += ref-verify.o
btrfs-$(CONFIG_BLK_DEV_ZONED) += zoned.o
btrfs-$(CONFIG_FS_VERITY) += verity.o
btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
tests/extent-buffer-tests.o tests/btrfs-tests.o \
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o \
- tests/free-space-tree-tests.o tests/extent-map-tests.o
+ tests/free-space-tree-tests.o tests/extent-map-tests.o \
+ tests/raid-stripe-tree-tests.o tests/delayed-refs-tests.o
diff --git a/fs/btrfs/accessors.c b/fs/btrfs/accessors.c
new file mode 100644
index 000000000000..1248aa2535d3
--- /dev/null
+++ b/fs/btrfs/accessors.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2007 Oracle. All rights reserved.
+ */
+
+#include <linux/unaligned.h>
+#include "messages.h"
+#include "extent_io.h"
+#include "fs.h"
+#include "accessors.h"
+
+static void __cold report_setget_bounds(const struct extent_buffer *eb,
+ const void *ptr, unsigned off, int size)
+{
+ unsigned long member_offset = (unsigned long)ptr + off;
+
+ btrfs_warn(eb->fs_info,
+ "bad eb member %s: ptr 0x%lx start %llu member offset %lu size %d",
+ (member_offset > eb->len ? "start" : "end"),
+ (unsigned long)ptr, eb->start, member_offset, size);
+}
+
+/* Copy bytes from @src1 and @src2 to @dest. */
+static __always_inline void memcpy_split_src(char *dest, const char *src1,
+ const char *src2, const size_t len1,
+ const size_t total)
+{
+ memcpy(dest, src1, len1);
+ memcpy(dest + len1, src2, total - len1);
+}
+
+/*
+ * Macro templates that define helpers to read/write extent buffer data of a
+ * given size, that are also used via ctree.h for access to item members by
+ * specialized helpers.
+ *
+ * Generic helpers:
+ * - btrfs_set_8 (for 8/16/32/64)
+ * - btrfs_get_8 (for 8/16/32/64)
+ *
+ * The set/get functions handle data spanning two pages transparently, in case
+ * metadata block size is larger than page. Every pointer to metadata items is
+ * an offset into the extent buffer page array, cast to a specific type. This
+ * gives us all the type checking.
+ *
+ * The extent buffer pages stored in the array folios may not form a contiguous
+ * physical range, but the API functions assume the linear offset to the range
+ * from 0 to metadata node size.
+ */
+
+#define DEFINE_BTRFS_SETGET_BITS(bits) \
+u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
+ const void *ptr, unsigned long off) \
+{ \
+ const unsigned long member_offset = (unsigned long)ptr + off; \
+ const unsigned long idx = get_eb_folio_index(eb, member_offset);\
+ const unsigned long oif = get_eb_offset_in_folio(eb, \
+ member_offset);\
+ char *kaddr = folio_address(eb->folios[idx]) + oif; \
+ const int part = eb->folio_size - oif; \
+ u8 lebytes[sizeof(u##bits)]; \
+ \
+ if (unlikely(member_offset + sizeof(u##bits) > eb->len)) { \
+ report_setget_bounds(eb, ptr, off, sizeof(u##bits)); \
+ return 0; \
+ } \
+ if (INLINE_EXTENT_BUFFER_PAGES == 1 || sizeof(u##bits) == 1 || \
+ likely(sizeof(u##bits) <= part)) \
+ return get_unaligned_le##bits(kaddr); \
+ \
+ if (sizeof(u##bits) == 2) { \
+ lebytes[0] = *kaddr; \
+ kaddr = folio_address(eb->folios[idx + 1]); \
+ lebytes[1] = *kaddr; \
+ } else { \
+ memcpy_split_src(lebytes, kaddr, \
+ folio_address(eb->folios[idx + 1]), \
+ part, sizeof(u##bits)); \
+ } \
+ return get_unaligned_le##bits(lebytes); \
+} \
+void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
+ unsigned long off, u##bits val) \
+{ \
+ const unsigned long member_offset = (unsigned long)ptr + off; \
+ const unsigned long idx = get_eb_folio_index(eb, member_offset);\
+ const unsigned long oif = get_eb_offset_in_folio(eb, \
+ member_offset);\
+ char *kaddr = folio_address(eb->folios[idx]) + oif; \
+ const int part = eb->folio_size - oif; \
+ u8 lebytes[sizeof(u##bits)]; \
+ \
+ if (unlikely(member_offset + sizeof(u##bits) > eb->len)) { \
+ report_setget_bounds(eb, ptr, off, sizeof(u##bits)); \
+ return; \
+ } \
+ if (INLINE_EXTENT_BUFFER_PAGES == 1 || sizeof(u##bits) == 1 || \
+ likely(sizeof(u##bits) <= part)) { \
+ put_unaligned_le##bits(val, kaddr); \
+ return; \
+ } \
+ put_unaligned_le##bits(val, lebytes); \
+ if (sizeof(u##bits) == 2) { \
+ *kaddr = lebytes[0]; \
+ kaddr = folio_address(eb->folios[idx + 1]); \
+ *kaddr = lebytes[1]; \
+ } else { \
+ memcpy(kaddr, lebytes, part); \
+ kaddr = folio_address(eb->folios[idx + 1]); \
+ memcpy(kaddr, lebytes + part, sizeof(u##bits) - part); \
+ } \
+}
+
+DEFINE_BTRFS_SETGET_BITS(8)
+DEFINE_BTRFS_SETGET_BITS(16)
+DEFINE_BTRFS_SETGET_BITS(32)
+DEFINE_BTRFS_SETGET_BITS(64)
+
+void btrfs_node_key(const struct extent_buffer *eb,
+ struct btrfs_disk_key *disk_key, int nr)
+{
+ unsigned long ptr = btrfs_node_key_ptr_offset(eb, nr);
+ read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
+ struct btrfs_key_ptr, key, disk_key);
+}
diff --git a/fs/btrfs/accessors.h b/fs/btrfs/accessors.h
new file mode 100644
index 000000000000..78721412951c
--- /dev/null
+++ b/fs/btrfs/accessors.h
@@ -0,0 +1,1020 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_ACCESSORS_H
+#define BTRFS_ACCESSORS_H
+
+#include <linux/unaligned.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/align.h>
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <uapi/linux/btrfs_tree.h>
+#include "fs.h"
+#include "extent_io.h"
+
+struct extent_buffer;
+
+/*
+ * Some macros to generate set/get functions for the struct fields. This
+ * assumes there is a lefoo_to_cpu for every type, so lets make a simple one
+ * for u8:
+ */
+#define le8_to_cpu(v) (v)
+#define cpu_to_le8(v) (v)
+#define __le8 u8
+
+static inline u8 get_unaligned_le8(const void *p)
+{
+ return *(const u8 *)p;
+}
+
+static inline void put_unaligned_le8(u8 val, void *p)
+{
+ *(u8 *)p = val;
+}
+
+#define read_eb_member(eb, ptr, type, member, result) (\
+ read_extent_buffer(eb, (char *)(result), \
+ ((unsigned long)(ptr)) + \
+ offsetof(type, member), \
+ sizeof_field(type, member)))
+
+#define write_eb_member(eb, ptr, type, member, source) ( \
+ write_extent_buffer(eb, (const char *)(source), \
+ ((unsigned long)(ptr)) + \
+ offsetof(type, member), \
+ sizeof_field(type, member)))
+
+#define DECLARE_BTRFS_SETGET_BITS(bits) \
+u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
+ const void *ptr, unsigned long off); \
+void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
+ unsigned long off, u##bits val);
+
+DECLARE_BTRFS_SETGET_BITS(8)
+DECLARE_BTRFS_SETGET_BITS(16)
+DECLARE_BTRFS_SETGET_BITS(32)
+DECLARE_BTRFS_SETGET_BITS(64)
+
+#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
+static inline u##bits btrfs_##name(const struct extent_buffer *eb, \
+ const type *s) \
+{ \
+ static_assert(sizeof(u##bits) == sizeof_field(type, member)); \
+ return btrfs_get_##bits(eb, s, offsetof(type, member)); \
+} \
+static inline void btrfs_set_##name(const struct extent_buffer *eb, type *s, \
+ u##bits val) \
+{ \
+ static_assert(sizeof(u##bits) == sizeof_field(type, member)); \
+ btrfs_set_##bits(eb, s, offsetof(type, member), val); \
+}
+
+#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
+static inline u##bits btrfs_##name(const struct extent_buffer *eb) \
+{ \
+ const type *p = folio_address(eb->folios[0]) + \
+ offset_in_page(eb->start); \
+ return get_unaligned_le##bits(&p->member); \
+} \
+static inline void btrfs_set_##name(const struct extent_buffer *eb, \
+ u##bits val) \
+{ \
+ type *p = folio_address(eb->folios[0]) + offset_in_page(eb->start); \
+ put_unaligned_le##bits(val, &p->member); \
+}
+
+#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \
+static inline u##bits btrfs_##name(const type *s) \
+{ \
+ return get_unaligned_le##bits(&s->member); \
+} \
+static inline void btrfs_set_##name(type *s, u##bits val) \
+{ \
+ put_unaligned_le##bits(val, &s->member); \
+}
+
+static inline u64 btrfs_device_total_bytes(const struct extent_buffer *eb,
+ struct btrfs_dev_item *s)
+{
+ static_assert(sizeof(u64) == sizeof_field(struct btrfs_dev_item, total_bytes));
+ return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes));
+}
+static inline void btrfs_set_device_total_bytes(const struct extent_buffer *eb,
+ struct btrfs_dev_item *s,
+ u64 val)
+{
+ static_assert(sizeof(u64) == sizeof_field(struct btrfs_dev_item, total_bytes));
+ WARN_ON(!IS_ALIGNED(val, eb->fs_info->sectorsize));
+ btrfs_set_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes), val);
+}
+
+BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64);
+BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64);
+BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32);
+BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32);
+BTRFS_SETGET_FUNCS(device_start_offset, struct btrfs_dev_item, start_offset, 64);
+BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32);
+BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64);
+BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32);
+BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8);
+BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8);
+BTRFS_SETGET_FUNCS(device_generation, struct btrfs_dev_item, generation, 64);
+
+BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item,
+ total_bytes, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_device_bytes_used, struct btrfs_dev_item,
+ bytes_used, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_device_io_align, struct btrfs_dev_item,
+ io_align, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_device_io_width, struct btrfs_dev_item,
+ io_width, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item,
+ sector_size, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item, dev_group, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item,
+ seek_speed, 8);
+BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item,
+ bandwidth, 8);
+BTRFS_SETGET_STACK_FUNCS(stack_device_generation, struct btrfs_dev_item,
+ generation, 64);
+
+static inline unsigned long btrfs_device_uuid(struct btrfs_dev_item *d)
+{
+ return (unsigned long)d + offsetof(struct btrfs_dev_item, uuid);
+}
+
+static inline unsigned long btrfs_device_fsid(struct btrfs_dev_item *d)
+{
+ return (unsigned long)d + offsetof(struct btrfs_dev_item, fsid);
+}
+
+BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64);
+BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64);
+BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64);
+BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32);
+BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32);
+BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32);
+BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64);
+BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16);
+BTRFS_SETGET_FUNCS(chunk_sub_stripes, struct btrfs_chunk, sub_stripes, 16);
+BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64);
+BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64);
+
+static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s)
+{
+ return (char *)s + offsetof(struct btrfs_stripe, dev_uuid);
+}
+
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk,
+ stripe_len, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk, io_align, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk, io_width, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk,
+ sector_size, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk,
+ num_stripes, 16);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_sub_stripes, struct btrfs_chunk,
+ sub_stripes, 16);
+BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64);
+
+static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c, int nr)
+{
+ unsigned long offset = (unsigned long)c;
+
+ offset += offsetof(struct btrfs_chunk, stripe);
+ offset += nr * sizeof(struct btrfs_stripe);
+ return (struct btrfs_stripe *)offset;
+}
+
+static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr)
+{
+ return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr));
+}
+
+static inline u64 btrfs_stripe_offset_nr(const struct extent_buffer *eb,
+ struct btrfs_chunk *c, int nr)
+{
+ return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr));
+}
+
+static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb,
+ struct btrfs_chunk *c, int nr,
+ u64 val)
+{
+ btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val);
+}
+
+static inline u64 btrfs_stripe_devid_nr(const struct extent_buffer *eb,
+ struct btrfs_chunk *c, int nr)
+{
+ return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr));
+}
+
+static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb,
+ struct btrfs_chunk *c, int nr,
+ u64 val)
+{
+ btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val);
+}
+
+/* struct btrfs_block_group_item */
+BTRFS_SETGET_STACK_FUNCS(stack_block_group_used, struct btrfs_block_group_item,
+ used, 64);
+BTRFS_SETGET_FUNCS(block_group_used, struct btrfs_block_group_item, used, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_block_group_chunk_objectid,
+ struct btrfs_block_group_item, chunk_objectid, 64);
+
+BTRFS_SETGET_FUNCS(block_group_chunk_objectid,
+ struct btrfs_block_group_item, chunk_objectid, 64);
+BTRFS_SETGET_FUNCS(block_group_flags, struct btrfs_block_group_item, flags, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_block_group_flags,
+ struct btrfs_block_group_item, flags, 64);
+
+/* struct btrfs_free_space_info */
+BTRFS_SETGET_FUNCS(free_space_extent_count, struct btrfs_free_space_info,
+ extent_count, 32);
+BTRFS_SETGET_FUNCS(free_space_flags, struct btrfs_free_space_info, flags, 32);
+
+/* struct btrfs_inode_ref */
+BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16);
+BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_ref_name_len, struct btrfs_inode_ref, name_len, 16);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_ref_index, struct btrfs_inode_ref, index, 64);
+
+/* struct btrfs_inode_extref */
+BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref,
+ parent_objectid, 64);
+BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref,
+ name_len, 16);
+BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64);
+
+/* struct btrfs_inode_item */
+BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64);
+BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64);
+BTRFS_SETGET_FUNCS(inode_transid, struct btrfs_inode_item, transid, 64);
+BTRFS_SETGET_FUNCS(inode_size, struct btrfs_inode_item, size, 64);
+BTRFS_SETGET_FUNCS(inode_nbytes, struct btrfs_inode_item, nbytes, 64);
+BTRFS_SETGET_FUNCS(inode_block_group, struct btrfs_inode_item, block_group, 64);
+BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32);
+BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32);
+BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32);
+BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32);
+BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64);
+BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
+ generation, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
+ sequence, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
+ transid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, nbytes, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
+ block_group, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
+BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
+BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
+
+BTRFS_SETGET_FUNCS(raid_stride_devid, struct btrfs_raid_stride, devid, 64);
+BTRFS_SETGET_FUNCS(raid_stride_physical, struct btrfs_raid_stride, physical, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_raid_stride_devid, struct btrfs_raid_stride, devid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_raid_stride_physical, struct btrfs_raid_stride, physical, 64);
+
+/* struct btrfs_dev_extent */
+BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, chunk_tree, 64);
+BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent,
+ chunk_objectid, 64);
+BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent,
+ chunk_offset, 64);
+BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_extent_chunk_tree, struct btrfs_dev_extent,
+ chunk_tree, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_extent_chunk_objectid, struct btrfs_dev_extent,
+ chunk_objectid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_extent_chunk_offset, struct btrfs_dev_extent,
+ chunk_offset, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_extent_length, struct btrfs_dev_extent, length, 64);
+
+BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64);
+BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item, generation, 64);
+BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64);
+
+BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8);
+
+static inline void btrfs_tree_block_key(const struct extent_buffer *eb,
+ struct btrfs_tree_block_info *item,
+ struct btrfs_disk_key *key)
+{
+ read_eb_member(eb, item, struct btrfs_tree_block_info, key, key);
+}
+
+static inline void btrfs_set_tree_block_key(const struct extent_buffer *eb,
+ struct btrfs_tree_block_info *item,
+ const struct btrfs_disk_key *key)
+{
+ write_eb_member(eb, item, struct btrfs_tree_block_info, key, key);
+}
+
+BTRFS_SETGET_FUNCS(extent_data_ref_root, struct btrfs_extent_data_ref, root, 64);
+BTRFS_SETGET_FUNCS(extent_data_ref_objectid, struct btrfs_extent_data_ref,
+ objectid, 64);
+BTRFS_SETGET_FUNCS(extent_data_ref_offset, struct btrfs_extent_data_ref,
+ offset, 64);
+BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref, count, 32);
+
+BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref, count, 32);
+
+BTRFS_SETGET_FUNCS(extent_owner_ref_root_id, struct btrfs_extent_owner_ref,
+ root_id, 64);
+
+BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref,
+ type, 8);
+BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref,
+ offset, 64);
+
+static inline u32 btrfs_extent_inline_ref_size(int type)
+{
+ if (type == BTRFS_TREE_BLOCK_REF_KEY ||
+ type == BTRFS_SHARED_BLOCK_REF_KEY)
+ return sizeof(struct btrfs_extent_inline_ref);
+ if (type == BTRFS_SHARED_DATA_REF_KEY)
+ return sizeof(struct btrfs_shared_data_ref) +
+ sizeof(struct btrfs_extent_inline_ref);
+ if (type == BTRFS_EXTENT_DATA_REF_KEY)
+ return sizeof(struct btrfs_extent_data_ref) +
+ offsetof(struct btrfs_extent_inline_ref, offset);
+ if (type == BTRFS_EXTENT_OWNER_REF_KEY)
+ return sizeof(struct btrfs_extent_inline_ref);
+ return 0;
+}
+
+/* struct btrfs_node */
+BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64);
+BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_key_blockptr, struct btrfs_key_ptr, blockptr, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr,
+ generation, 64);
+
+static inline u64 btrfs_node_blockptr(const struct extent_buffer *eb, int nr)
+{
+ unsigned long ptr;
+
+ ptr = offsetof(struct btrfs_node, ptrs) +
+ sizeof(struct btrfs_key_ptr) * nr;
+ return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr);
+}
+
+static inline void btrfs_set_node_blockptr(const struct extent_buffer *eb,
+ int nr, u64 val)
+{
+ unsigned long ptr;
+
+ ptr = offsetof(struct btrfs_node, ptrs) +
+ sizeof(struct btrfs_key_ptr) * nr;
+ btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val);
+}
+
+static inline u64 btrfs_node_ptr_generation(const struct extent_buffer *eb, int nr)
+{
+ unsigned long ptr;
+
+ ptr = offsetof(struct btrfs_node, ptrs) +
+ sizeof(struct btrfs_key_ptr) * nr;
+ return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr);
+}
+
+static inline void btrfs_set_node_ptr_generation(const struct extent_buffer *eb,
+ int nr, u64 val)
+{
+ unsigned long ptr;
+
+ ptr = offsetof(struct btrfs_node, ptrs) +
+ sizeof(struct btrfs_key_ptr) * nr;
+ btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val);
+}
+
+static inline unsigned long btrfs_node_key_ptr_offset(const struct extent_buffer *eb, int nr)
+{
+ return offsetof(struct btrfs_node, ptrs) +
+ sizeof(struct btrfs_key_ptr) * nr;
+}
+
+void btrfs_node_key(const struct extent_buffer *eb,
+ struct btrfs_disk_key *disk_key, int nr);
+
+static inline void btrfs_set_node_key(const struct extent_buffer *eb,
+ const struct btrfs_disk_key *disk_key, int nr)
+{
+ unsigned long ptr;
+
+ ptr = btrfs_node_key_ptr_offset(eb, nr);
+ write_eb_member(eb, (struct btrfs_key_ptr *)ptr,
+ struct btrfs_key_ptr, key, disk_key);
+}
+
+/* struct btrfs_item */
+BTRFS_SETGET_FUNCS(raw_item_offset, struct btrfs_item, offset, 32);
+BTRFS_SETGET_FUNCS(raw_item_size, struct btrfs_item, size, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_item_offset, struct btrfs_item, offset, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_item_size, struct btrfs_item, size, 32);
+
+static inline unsigned long btrfs_item_nr_offset(const struct extent_buffer *eb, int nr)
+{
+ return offsetof(struct btrfs_leaf, items) +
+ sizeof(struct btrfs_item) * nr;
+}
+
+static inline struct btrfs_item *btrfs_item_nr(const struct extent_buffer *eb, int nr)
+{
+ return (struct btrfs_item *)btrfs_item_nr_offset(eb, nr);
+}
+
+#define BTRFS_ITEM_SETGET_FUNCS(member) \
+static inline u32 btrfs_item_##member(const struct extent_buffer *eb, int slot) \
+{ \
+ return btrfs_raw_item_##member(eb, btrfs_item_nr(eb, slot)); \
+} \
+static inline void btrfs_set_item_##member(const struct extent_buffer *eb, \
+ int slot, u32 val) \
+{ \
+ btrfs_set_raw_item_##member(eb, btrfs_item_nr(eb, slot), val); \
+}
+
+BTRFS_ITEM_SETGET_FUNCS(offset)
+BTRFS_ITEM_SETGET_FUNCS(size);
+
+static inline u32 btrfs_item_data_end(const struct extent_buffer *eb, int nr)
+{
+ return btrfs_item_offset(eb, nr) + btrfs_item_size(eb, nr);
+}
+
+static inline void btrfs_item_key(const struct extent_buffer *eb,
+ struct btrfs_disk_key *disk_key, int nr)
+{
+ struct btrfs_item *item = btrfs_item_nr(eb, nr);
+
+ read_eb_member(eb, item, struct btrfs_item, key, disk_key);
+}
+
+static inline void btrfs_set_item_key(struct extent_buffer *eb,
+ const struct btrfs_disk_key *disk_key, int nr)
+{
+ struct btrfs_item *item = btrfs_item_nr(eb, nr);
+
+ write_eb_member(eb, item, struct btrfs_item, key, disk_key);
+}
+
+BTRFS_SETGET_FUNCS(dir_log_end, struct btrfs_dir_log_item, end, 64);
+
+/* struct btrfs_root_ref */
+BTRFS_SETGET_FUNCS(root_ref_dirid, struct btrfs_root_ref, dirid, 64);
+BTRFS_SETGET_FUNCS(root_ref_sequence, struct btrfs_root_ref, sequence, 64);
+BTRFS_SETGET_FUNCS(root_ref_name_len, struct btrfs_root_ref, name_len, 16);
+BTRFS_SETGET_STACK_FUNCS(stack_root_ref_dirid, struct btrfs_root_ref, dirid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_root_ref_sequence, struct btrfs_root_ref, sequence, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_root_ref_name_len, struct btrfs_root_ref, name_len, 16);
+
+/* struct btrfs_dir_item */
+BTRFS_SETGET_FUNCS(dir_data_len, struct btrfs_dir_item, data_len, 16);
+BTRFS_SETGET_FUNCS(dir_flags, struct btrfs_dir_item, type, 8);
+BTRFS_SETGET_FUNCS(dir_name_len, struct btrfs_dir_item, name_len, 16);
+BTRFS_SETGET_FUNCS(dir_transid, struct btrfs_dir_item, transid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dir_flags, struct btrfs_dir_item, type, 8);
+BTRFS_SETGET_STACK_FUNCS(stack_dir_data_len, struct btrfs_dir_item, data_len, 16);
+BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item, name_len, 16);
+BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item, transid, 64);
+
+static inline u8 btrfs_dir_ftype(const struct extent_buffer *eb,
+ const struct btrfs_dir_item *item)
+{
+ return btrfs_dir_flags_to_ftype(btrfs_dir_flags(eb, item));
+}
+
+static inline u8 btrfs_stack_dir_ftype(const struct btrfs_dir_item *item)
+{
+ return btrfs_dir_flags_to_ftype(btrfs_stack_dir_flags(item));
+}
+
+static inline void btrfs_dir_item_key(const struct extent_buffer *eb,
+ const struct btrfs_dir_item *item,
+ struct btrfs_disk_key *key)
+{
+ read_eb_member(eb, item, struct btrfs_dir_item, location, key);
+}
+
+static inline void btrfs_set_dir_item_key(struct extent_buffer *eb,
+ struct btrfs_dir_item *item,
+ const struct btrfs_disk_key *key)
+{
+ write_eb_member(eb, item, struct btrfs_dir_item, location, key);
+}
+
+BTRFS_SETGET_FUNCS(free_space_entries, struct btrfs_free_space_header,
+ num_entries, 64);
+BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header,
+ num_bitmaps, 64);
+BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header,
+ generation, 64);
+
+static inline void btrfs_free_space_key(const struct extent_buffer *eb,
+ const struct btrfs_free_space_header *h,
+ struct btrfs_disk_key *key)
+{
+ read_eb_member(eb, h, struct btrfs_free_space_header, location, key);
+}
+
+static inline void btrfs_set_free_space_key(struct extent_buffer *eb,
+ struct btrfs_free_space_header *h,
+ const struct btrfs_disk_key *key)
+{
+ write_eb_member(eb, h, struct btrfs_free_space_header, location, key);
+}
+
+/* struct btrfs_disk_key */
+BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key, objectid, 64);
+BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64);
+BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8);
+
+#ifdef __LITTLE_ENDIAN
+
+/*
+ * Optimized helpers for little-endian architectures where CPU and on-disk
+ * structures have the same endianness and we can skip conversions.
+ */
+
+static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu_key,
+ const struct btrfs_disk_key *disk_key)
+{
+ memcpy(cpu_key, disk_key, sizeof(struct btrfs_key));
+}
+
+static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk_key,
+ const struct btrfs_key *cpu_key)
+{
+ memcpy(disk_key, cpu_key, sizeof(struct btrfs_key));
+}
+
+static inline void btrfs_node_key_to_cpu(const struct extent_buffer *eb,
+ struct btrfs_key *cpu_key, int nr)
+{
+ struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)cpu_key;
+
+ btrfs_node_key(eb, disk_key, nr);
+}
+
+static inline void btrfs_item_key_to_cpu(const struct extent_buffer *eb,
+ struct btrfs_key *cpu_key, int nr)
+{
+ struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)cpu_key;
+
+ btrfs_item_key(eb, disk_key, nr);
+}
+
+static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb,
+ const struct btrfs_dir_item *item,
+ struct btrfs_key *cpu_key)
+{
+ struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)cpu_key;
+
+ btrfs_dir_item_key(eb, item, disk_key);
+}
+
+#else
+
+static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu,
+ const struct btrfs_disk_key *disk)
+{
+ cpu->offset = le64_to_cpu(disk->offset);
+ cpu->type = disk->type;
+ cpu->objectid = le64_to_cpu(disk->objectid);
+}
+
+static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk,
+ const struct btrfs_key *cpu)
+{
+ disk->offset = cpu_to_le64(cpu->offset);
+ disk->type = cpu->type;
+ disk->objectid = cpu_to_le64(cpu->objectid);
+}
+
+static inline void btrfs_node_key_to_cpu(const struct extent_buffer *eb,
+ struct btrfs_key *key, int nr)
+{
+ struct btrfs_disk_key disk_key;
+
+ btrfs_node_key(eb, &disk_key, nr);
+ btrfs_disk_key_to_cpu(key, &disk_key);
+}
+
+static inline void btrfs_item_key_to_cpu(const struct extent_buffer *eb,
+ struct btrfs_key *key, int nr)
+{
+ struct btrfs_disk_key disk_key;
+
+ btrfs_item_key(eb, &disk_key, nr);
+ btrfs_disk_key_to_cpu(key, &disk_key);
+}
+
+static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb,
+ const struct btrfs_dir_item *item,
+ struct btrfs_key *key)
+{
+ struct btrfs_disk_key disk_key;
+
+ btrfs_dir_item_key(eb, item, &disk_key);
+ btrfs_disk_key_to_cpu(key, &disk_key);
+}
+
+#endif
+
+/* struct btrfs_header */
+BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64);
+BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header, generation, 64);
+BTRFS_SETGET_HEADER_FUNCS(header_owner, struct btrfs_header, owner, 64);
+BTRFS_SETGET_HEADER_FUNCS(header_nritems, struct btrfs_header, nritems, 32);
+BTRFS_SETGET_HEADER_FUNCS(header_flags, struct btrfs_header, flags, 64);
+BTRFS_SETGET_HEADER_FUNCS(header_level, struct btrfs_header, level, 8);
+BTRFS_SETGET_STACK_FUNCS(stack_header_generation, struct btrfs_header,
+ generation, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_header_owner, struct btrfs_header, owner, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header, nritems, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64);
+
+static inline int btrfs_header_flag(const struct extent_buffer *eb, u64 flag)
+{
+ return (btrfs_header_flags(eb) & flag) == flag;
+}
+
+static inline void btrfs_set_header_flag(struct extent_buffer *eb, u64 flag)
+{
+ u64 flags = btrfs_header_flags(eb);
+
+ btrfs_set_header_flags(eb, flags | flag);
+}
+
+static inline void btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag)
+{
+ u64 flags = btrfs_header_flags(eb);
+
+ btrfs_set_header_flags(eb, flags & ~flag);
+}
+
+static inline int btrfs_header_backref_rev(const struct extent_buffer *eb)
+{
+ u64 flags = btrfs_header_flags(eb);
+
+ return flags >> BTRFS_BACKREF_REV_SHIFT;
+}
+
+static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, int rev)
+{
+ u64 flags = btrfs_header_flags(eb);
+
+ flags &= ~BTRFS_BACKREF_REV_MASK;
+ flags |= (u64)rev << BTRFS_BACKREF_REV_SHIFT;
+ btrfs_set_header_flags(eb, flags);
+}
+
+static inline int btrfs_is_leaf(const struct extent_buffer *eb)
+{
+ return btrfs_header_level(eb) == 0;
+}
+
+/* struct btrfs_root_item */
+BTRFS_SETGET_FUNCS(disk_root_generation, struct btrfs_root_item, generation, 64);
+BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32);
+BTRFS_SETGET_FUNCS(disk_root_bytenr, struct btrfs_root_item, bytenr, 64);
+BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8);
+
+BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item, generation, 64);
+BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64);
+BTRFS_SETGET_STACK_FUNCS(root_drop_level, struct btrfs_root_item, drop_level, 8);
+BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8);
+BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64);
+BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32);
+BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64);
+BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64);
+BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64);
+BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item,
+ last_snapshot, 64);
+BTRFS_SETGET_STACK_FUNCS(root_generation_v2, struct btrfs_root_item,
+ generation_v2, 64);
+BTRFS_SETGET_STACK_FUNCS(root_ctransid, struct btrfs_root_item, ctransid, 64);
+BTRFS_SETGET_STACK_FUNCS(root_otransid, struct btrfs_root_item, otransid, 64);
+BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item, stransid, 64);
+BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item, rtransid, 64);
+
+/* struct btrfs_root_backup */
+BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup,
+ tree_root, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_tree_root_gen, struct btrfs_root_backup,
+ tree_root_gen, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_tree_root_level, struct btrfs_root_backup,
+ tree_root_level, 8);
+
+BTRFS_SETGET_STACK_FUNCS(backup_chunk_root, struct btrfs_root_backup,
+ chunk_root, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_gen, struct btrfs_root_backup,
+ chunk_root_gen, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_level, struct btrfs_root_backup,
+ chunk_root_level, 8);
+
+BTRFS_SETGET_STACK_FUNCS(backup_extent_root, struct btrfs_root_backup,
+ extent_root, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_extent_root_gen, struct btrfs_root_backup,
+ extent_root_gen, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_extent_root_level, struct btrfs_root_backup,
+ extent_root_level, 8);
+
+BTRFS_SETGET_STACK_FUNCS(backup_fs_root, struct btrfs_root_backup,
+ fs_root, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_fs_root_gen, struct btrfs_root_backup,
+ fs_root_gen, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_fs_root_level, struct btrfs_root_backup,
+ fs_root_level, 8);
+
+BTRFS_SETGET_STACK_FUNCS(backup_dev_root, struct btrfs_root_backup,
+ dev_root, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_dev_root_gen, struct btrfs_root_backup,
+ dev_root_gen, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_dev_root_level, struct btrfs_root_backup,
+ dev_root_level, 8);
+
+BTRFS_SETGET_STACK_FUNCS(backup_csum_root, struct btrfs_root_backup,
+ csum_root, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_csum_root_gen, struct btrfs_root_backup,
+ csum_root_gen, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_csum_root_level, struct btrfs_root_backup,
+ csum_root_level, 8);
+BTRFS_SETGET_STACK_FUNCS(backup_total_bytes, struct btrfs_root_backup,
+ total_bytes, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup,
+ bytes_used, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup,
+ num_devices, 64);
+
+/* struct btrfs_balance_item */
+BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64);
+
+static inline void btrfs_balance_data(const struct extent_buffer *eb,
+ const struct btrfs_balance_item *bi,
+ struct btrfs_disk_balance_args *ba)
+{
+ read_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
+}
+
+static inline void btrfs_set_balance_data(struct extent_buffer *eb,
+ struct btrfs_balance_item *bi,
+ const struct btrfs_disk_balance_args *ba)
+{
+ write_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
+}
+
+static inline void btrfs_balance_meta(const struct extent_buffer *eb,
+ const struct btrfs_balance_item *bi,
+ struct btrfs_disk_balance_args *ba)
+{
+ read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
+}
+
+static inline void btrfs_set_balance_meta(struct extent_buffer *eb,
+ struct btrfs_balance_item *bi,
+ const struct btrfs_disk_balance_args *ba)
+{
+ write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
+}
+
+static inline void btrfs_balance_sys(const struct extent_buffer *eb,
+ const struct btrfs_balance_item *bi,
+ struct btrfs_disk_balance_args *ba)
+{
+ read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
+}
+
+static inline void btrfs_set_balance_sys(struct extent_buffer *eb,
+ struct btrfs_balance_item *bi,
+ const struct btrfs_disk_balance_args *ba)
+{
+ write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
+}
+
+/* struct btrfs_super_block */
+BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64);
+BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64);
+BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block,
+ generation, 64);
+BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64);
+BTRFS_SETGET_STACK_FUNCS(super_sys_array_size,
+ struct btrfs_super_block, sys_chunk_array_size, 32);
+BTRFS_SETGET_STACK_FUNCS(super_chunk_root_generation,
+ struct btrfs_super_block, chunk_root_generation, 64);
+BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block,
+ root_level, 8);
+BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block,
+ chunk_root, 64);
+BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block,
+ chunk_root_level, 8);
+BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block, log_root, 64);
+BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block,
+ log_root_level, 8);
+BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block,
+ total_bytes, 64);
+BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block,
+ bytes_used, 64);
+BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block,
+ sectorsize, 32);
+BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block,
+ nodesize, 32);
+BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block,
+ stripesize, 32);
+BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block,
+ root_dir_objectid, 64);
+BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block,
+ num_devices, 64);
+BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block,
+ compat_flags, 64);
+BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block,
+ compat_ro_flags, 64);
+BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block,
+ incompat_flags, 64);
+BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block,
+ csum_type, 16);
+BTRFS_SETGET_STACK_FUNCS(super_cache_generation, struct btrfs_super_block,
+ cache_generation, 64);
+BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64);
+BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block,
+ uuid_tree_generation, 64);
+BTRFS_SETGET_STACK_FUNCS(super_nr_global_roots, struct btrfs_super_block,
+ nr_global_roots, 64);
+
+/* struct btrfs_file_extent_item */
+BTRFS_SETGET_STACK_FUNCS(stack_file_extent_type, struct btrfs_file_extent_item,
+ type, 8);
+BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr,
+ struct btrfs_file_extent_item, disk_bytenr, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_file_extent_offset,
+ struct btrfs_file_extent_item, offset, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_file_extent_generation,
+ struct btrfs_file_extent_item, generation, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_file_extent_num_bytes,
+ struct btrfs_file_extent_item, num_bytes, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_file_extent_ram_bytes,
+ struct btrfs_file_extent_item, ram_bytes, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_num_bytes,
+ struct btrfs_file_extent_item, disk_num_bytes, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression,
+ struct btrfs_file_extent_item, compression, 8);
+
+
+BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8);
+BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item,
+ disk_bytenr, 64);
+BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item,
+ generation, 64);
+BTRFS_SETGET_FUNCS(file_extent_disk_num_bytes, struct btrfs_file_extent_item,
+ disk_num_bytes, 64);
+BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item,
+ offset, 64);
+BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item,
+ num_bytes, 64);
+BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item,
+ ram_bytes, 64);
+BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item,
+ compression, 8);
+BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item,
+ encryption, 8);
+BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item,
+ other_encoding, 16);
+
+/* btrfs_qgroup_status_item */
+BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item,
+ generation, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_version, struct btrfs_qgroup_status_item,
+ version, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item,
+ flags, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item,
+ rescan, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_enable_gen, struct btrfs_qgroup_status_item,
+ enable_gen, 64);
+
+/* btrfs_qgroup_info_item */
+BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item,
+ generation, 64);
+BTRFS_SETGET_FUNCS(qgroup_info_rfer, struct btrfs_qgroup_info_item, rfer, 64);
+BTRFS_SETGET_FUNCS(qgroup_info_rfer_cmpr, struct btrfs_qgroup_info_item,
+ rfer_cmpr, 64);
+BTRFS_SETGET_FUNCS(qgroup_info_excl, struct btrfs_qgroup_info_item, excl, 64);
+BTRFS_SETGET_FUNCS(qgroup_info_excl_cmpr, struct btrfs_qgroup_info_item,
+ excl_cmpr, 64);
+
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_generation,
+ struct btrfs_qgroup_info_item, generation, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer, struct btrfs_qgroup_info_item,
+ rfer, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer_cmpr,
+ struct btrfs_qgroup_info_item, rfer_cmpr, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl, struct btrfs_qgroup_info_item,
+ excl, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl_cmpr,
+ struct btrfs_qgroup_info_item, excl_cmpr, 64);
+
+/* btrfs_qgroup_limit_item */
+BTRFS_SETGET_FUNCS(qgroup_limit_flags, struct btrfs_qgroup_limit_item, flags, 64);
+BTRFS_SETGET_FUNCS(qgroup_limit_max_rfer, struct btrfs_qgroup_limit_item,
+ max_rfer, 64);
+BTRFS_SETGET_FUNCS(qgroup_limit_max_excl, struct btrfs_qgroup_limit_item,
+ max_excl, 64);
+BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item,
+ rsv_rfer, 64);
+BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item,
+ rsv_excl, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_limit_flags,
+ struct btrfs_qgroup_limit_item, flags, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_limit_max_rfer,
+ struct btrfs_qgroup_limit_item, max_rfer, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_limit_max_excl,
+ struct btrfs_qgroup_limit_item, max_excl, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_limit_rsv_rfer,
+ struct btrfs_qgroup_limit_item, rsv_rfer, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_limit_rsv_excl,
+ struct btrfs_qgroup_limit_item, rsv_excl, 64);
+
+/* btrfs_dev_replace_item */
+BTRFS_SETGET_FUNCS(dev_replace_src_devid,
+ struct btrfs_dev_replace_item, src_devid, 64);
+BTRFS_SETGET_FUNCS(dev_replace_cont_reading_from_srcdev_mode,
+ struct btrfs_dev_replace_item, cont_reading_from_srcdev_mode,
+ 64);
+BTRFS_SETGET_FUNCS(dev_replace_replace_state, struct btrfs_dev_replace_item,
+ replace_state, 64);
+BTRFS_SETGET_FUNCS(dev_replace_time_started, struct btrfs_dev_replace_item,
+ time_started, 64);
+BTRFS_SETGET_FUNCS(dev_replace_time_stopped, struct btrfs_dev_replace_item,
+ time_stopped, 64);
+BTRFS_SETGET_FUNCS(dev_replace_num_write_errors, struct btrfs_dev_replace_item,
+ num_write_errors, 64);
+BTRFS_SETGET_FUNCS(dev_replace_num_uncorrectable_read_errors,
+ struct btrfs_dev_replace_item, num_uncorrectable_read_errors,
+ 64);
+BTRFS_SETGET_FUNCS(dev_replace_cursor_left, struct btrfs_dev_replace_item,
+ cursor_left, 64);
+BTRFS_SETGET_FUNCS(dev_replace_cursor_right, struct btrfs_dev_replace_item,
+ cursor_right, 64);
+
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_src_devid,
+ struct btrfs_dev_replace_item, src_devid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cont_reading_from_srcdev_mode,
+ struct btrfs_dev_replace_item,
+ cont_reading_from_srcdev_mode, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_replace_state,
+ struct btrfs_dev_replace_item, replace_state, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_started,
+ struct btrfs_dev_replace_item, time_started, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_stopped,
+ struct btrfs_dev_replace_item, time_stopped, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_write_errors,
+ struct btrfs_dev_replace_item, num_write_errors, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_uncorrectable_read_errors,
+ struct btrfs_dev_replace_item,
+ num_uncorrectable_read_errors, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left,
+ struct btrfs_dev_replace_item, cursor_left, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right,
+ struct btrfs_dev_replace_item, cursor_right, 64);
+
+/* btrfs_verity_descriptor_item */
+BTRFS_SETGET_FUNCS(verity_descriptor_encryption, struct btrfs_verity_descriptor_item,
+ encryption, 8);
+BTRFS_SETGET_FUNCS(verity_descriptor_size, struct btrfs_verity_descriptor_item,
+ size, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_verity_descriptor_encryption,
+ struct btrfs_verity_descriptor_item, encryption, 8);
+BTRFS_SETGET_STACK_FUNCS(stack_verity_descriptor_size,
+ struct btrfs_verity_descriptor_item, size, 64);
+
+/* Cast into the data area of the leaf. */
+#define btrfs_item_ptr(leaf, slot, type) \
+ ((type *)(btrfs_item_nr_offset(leaf, 0) + btrfs_item_offset(leaf, slot)))
+
+#define btrfs_item_ptr_offset(leaf, slot) \
+ ((unsigned long)(btrfs_item_nr_offset(leaf, 0) + btrfs_item_offset(leaf, slot)))
+
+#endif
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 0a0d0eccee4e..c336e2ab7f8a 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -11,16 +11,16 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
-
#include "ctree.h"
-#include "btrfs_inode.h"
#include "xattr.h"
+#include "acl.h"
+#include "misc.h"
struct posix_acl *btrfs_get_acl(struct inode *inode, int type, bool rcu)
{
int size;
const char *name;
- char *value = NULL;
+ char AUTO_KFREE(value);
struct posix_acl *acl;
if (rcu)
@@ -50,18 +50,16 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type, bool rcu)
acl = NULL;
else
acl = ERR_PTR(size);
- kfree(value);
return acl;
}
-static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
- struct user_namespace *mnt_userns,
- struct inode *inode, struct posix_acl *acl, int type)
+int __btrfs_set_acl(struct btrfs_trans_handle *trans, struct inode *inode,
+ struct posix_acl *acl, int type)
{
int ret, size = 0;
const char *name;
- char *value = NULL;
+ char AUTO_KFREE(value);
switch (type) {
case ACL_TYPE_ACCESS:
@@ -87,76 +85,40 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
nofs_flag = memalloc_nofs_save();
value = kmalloc(size, GFP_KERNEL);
memalloc_nofs_restore(nofs_flag);
- if (!value) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!value)
+ return -ENOMEM;
ret = posix_acl_to_xattr(&init_user_ns, acl, value, size);
if (ret < 0)
- goto out;
+ return ret;
}
if (trans)
ret = btrfs_setxattr(trans, inode, name, value, size, 0);
else
ret = btrfs_setxattr_trans(inode, name, value, size, 0);
+ if (ret < 0)
+ return ret;
-out:
- kfree(value);
-
- if (!ret)
- set_cached_acl(inode, type, acl);
-
- return ret;
+ set_cached_acl(inode, type, acl);
+ return 0;
}
-int btrfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+int btrfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
int ret;
+ struct inode *inode = d_inode(dentry);
umode_t old_mode = inode->i_mode;
if (type == ACL_TYPE_ACCESS && acl) {
- ret = posix_acl_update_mode(mnt_userns, inode,
+ ret = posix_acl_update_mode(idmap, inode,
&inode->i_mode, &acl);
if (ret)
return ret;
}
- ret = __btrfs_set_acl(NULL, mnt_userns, inode, acl, type);
+ ret = __btrfs_set_acl(NULL, inode, acl, type);
if (ret)
inode->i_mode = old_mode;
return ret;
}
-
-int btrfs_init_acl(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *dir)
-{
- struct posix_acl *default_acl, *acl;
- int ret = 0;
-
- /* this happens with subvols */
- if (!dir)
- return 0;
-
- ret = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
- if (ret)
- return ret;
-
- if (default_acl) {
- ret = __btrfs_set_acl(trans, &init_user_ns, inode, default_acl,
- ACL_TYPE_DEFAULT);
- posix_acl_release(default_acl);
- }
-
- if (acl) {
- if (!ret)
- ret = __btrfs_set_acl(trans, &init_user_ns, inode, acl,
- ACL_TYPE_ACCESS);
- posix_acl_release(acl);
- }
-
- if (!default_acl && !acl)
- cache_no_acl(inode);
- return ret;
-}
diff --git a/fs/btrfs/acl.h b/fs/btrfs/acl.h
new file mode 100644
index 000000000000..0458cd51ed48
--- /dev/null
+++ b/fs/btrfs/acl.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_ACL_H
+#define BTRFS_ACL_H
+
+#include <linux/types.h>
+
+struct posix_acl;
+struct inode;
+struct btrfs_trans_handle;
+
+#ifdef CONFIG_BTRFS_FS_POSIX_ACL
+
+struct mnt_idmap;
+struct dentry;
+
+struct posix_acl *btrfs_get_acl(struct inode *inode, int type, bool rcu);
+int btrfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct posix_acl *acl, int type);
+int __btrfs_set_acl(struct btrfs_trans_handle *trans, struct inode *inode,
+ struct posix_acl *acl, int type);
+
+#else
+
+#include <linux/errno.h>
+
+struct btrfs_trans_handle;
+
+#define btrfs_get_acl NULL
+#define btrfs_set_acl NULL
+static inline int __btrfs_set_acl(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct posix_acl *acl,
+ int type)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 309516e6a968..6c6f3bb58f4e 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -9,19 +9,18 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
+#include <trace/events/btrfs.h>
#include "async-thread.h"
-#include "ctree.h"
enum {
WORK_DONE_BIT,
WORK_ORDER_DONE_BIT,
- WORK_HIGH_PRIO_BIT,
};
#define NO_THRESHOLD (-1)
-#define DFT_THRESHOLD (32)
+#define DEFAULT_THRESHOLD (32)
-struct __btrfs_workqueue {
+struct btrfs_workqueue {
struct workqueue_struct *normal_wq;
/* File system this workqueue services */
@@ -48,12 +47,7 @@ struct __btrfs_workqueue {
spinlock_t thres_lock;
};
-struct btrfs_workqueue {
- struct __btrfs_workqueue *normal;
- struct __btrfs_workqueue *high;
-};
-
-struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
+struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq)
{
return wq->fs_info;
}
@@ -66,33 +60,43 @@ struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
{
/*
- * We could compare wq->normal->pending with num_online_cpus()
+ * We could compare wq->pending with num_online_cpus()
* to support "thresh == NO_THRESHOLD" case, but it requires
* moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
* postpone it until someone needs the support of that case.
*/
- if (wq->normal->thresh == NO_THRESHOLD)
+ if (wq->thresh == NO_THRESHOLD)
return false;
- return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
+ return atomic_read(&wq->pending) > wq->thresh * 2;
}
-static struct __btrfs_workqueue *
-__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
- unsigned int flags, int limit_active, int thresh)
+static void btrfs_init_workqueue(struct btrfs_workqueue *wq,
+ struct btrfs_fs_info *fs_info)
{
- struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ wq->fs_info = fs_info;
+ atomic_set(&wq->pending, 0);
+ INIT_LIST_HEAD(&wq->ordered_list);
+ spin_lock_init(&wq->list_lock);
+ spin_lock_init(&wq->thres_lock);
+}
+
+struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
+ const char *name, unsigned int flags,
+ int limit_active, int thresh)
+{
+ struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
- ret->fs_info = fs_info;
+ btrfs_init_workqueue(ret, fs_info);
+
ret->limit_active = limit_active;
- atomic_set(&ret->pending, 0);
if (thresh == 0)
- thresh = DFT_THRESHOLD;
+ thresh = DEFAULT_THRESHOLD;
/* For low threshold, disabling threshold is a better choice */
- if (thresh < DFT_THRESHOLD) {
+ if (thresh < DEFAULT_THRESHOLD) {
ret->current_active = limit_active;
ret->thresh = NO_THRESHOLD;
} else {
@@ -105,55 +109,41 @@ __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
ret->thresh = thresh;
}
- if (flags & WQ_HIGHPRI)
- ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags,
- ret->current_active, name);
- else
- ret->normal_wq = alloc_workqueue("btrfs-%s", flags,
- ret->current_active, name);
+ ret->normal_wq = alloc_workqueue("btrfs-%s", flags, ret->current_active,
+ name);
if (!ret->normal_wq) {
kfree(ret);
return NULL;
}
- INIT_LIST_HEAD(&ret->ordered_list);
- spin_lock_init(&ret->list_lock);
- spin_lock_init(&ret->thres_lock);
- trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
+ trace_btrfs_workqueue_alloc(ret, name);
return ret;
}
-static inline void
-__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
-
-struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
- const char *name,
- unsigned int flags,
- int limit_active,
- int thresh)
+struct btrfs_workqueue *btrfs_alloc_ordered_workqueue(
+ struct btrfs_fs_info *fs_info, const char *name,
+ unsigned int flags)
{
- struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ struct btrfs_workqueue *ret;
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
- ret->normal = __btrfs_alloc_workqueue(fs_info, name,
- flags & ~WQ_HIGHPRI,
- limit_active, thresh);
- if (!ret->normal) {
+ btrfs_init_workqueue(ret, fs_info);
+
+ /* Ordered workqueues don't allow @max_active adjustments. */
+ ret->limit_active = 1;
+ ret->current_active = 1;
+ ret->thresh = NO_THRESHOLD;
+
+ ret->normal_wq = alloc_ordered_workqueue("btrfs-%s", flags, name);
+ if (!ret->normal_wq) {
kfree(ret);
return NULL;
}
- if (flags & WQ_HIGHPRI) {
- ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
- limit_active, thresh);
- if (!ret->high) {
- __btrfs_destroy_workqueue(ret->normal);
- kfree(ret);
- return NULL;
- }
- }
+ trace_btrfs_workqueue_alloc(ret, name);
return ret;
}
@@ -162,7 +152,7 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
* This hook WILL be called in IRQ handler context,
* so workqueue_set_max_active MUST NOT be called in this hook
*/
-static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
+static inline void thresh_queue_hook(struct btrfs_workqueue *wq)
{
if (wq->thresh == NO_THRESHOLD)
return;
@@ -174,11 +164,11 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
* This hook is called in kthread content.
* So workqueue_set_max_active is called here.
*/
-static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
+static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
{
int new_current_active;
long pending;
- int need_change = 0;
+ bool need_change = false;
if (wq->thresh == NO_THRESHOLD)
return;
@@ -206,18 +196,17 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
new_current_active--;
new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
if (new_current_active != wq->current_active) {
- need_change = 1;
+ need_change = true;
wq->current_active = new_current_active;
}
out:
spin_unlock(&wq->thres_lock);
- if (need_change) {
+ if (need_change)
workqueue_set_max_active(wq->normal_wq, wq->current_active);
- }
}
-static void run_ordered_work(struct __btrfs_workqueue *wq,
+static void run_ordered_work(struct btrfs_workqueue *wq,
struct btrfs_work *self)
{
struct list_head *list = &wq->ordered_list;
@@ -230,10 +219,16 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
spin_lock_irqsave(lock, flags);
if (list_empty(list))
break;
- work = list_entry(list->next, struct btrfs_work,
- ordered_list);
+ work = list_first_entry(list, struct btrfs_work, ordered_list);
if (!test_bit(WORK_DONE_BIT, &work->flags))
break;
+ /*
+ * Orders all subsequent loads after reading WORK_DONE_BIT,
+ * paired with the smp_mb__before_atomic in btrfs_work_helper
+ * this guarantees that the ordered function will see all
+ * updates from ordinary work function.
+ */
+ smp_rmb();
/*
* we are going to call the ordered done function, but
@@ -245,7 +240,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
break;
trace_btrfs_ordered_sched(work);
spin_unlock_irqrestore(lock, flags);
- work->ordered_func(work);
+ work->ordered_func(work, false);
/* now take the lock again and drop our item from the list */
spin_lock_irqsave(lock, flags);
@@ -280,7 +275,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
* We don't want to call the ordered free functions with
* the lock held.
*/
- work->ordered_free(work);
+ work->ordered_func(work, true);
/* NB: work must not be dereferenced past this point. */
trace_btrfs_all_work_done(wq->fs_info, work);
}
@@ -288,7 +283,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
spin_unlock_irqrestore(lock, flags);
if (free_self) {
- self->ordered_free(self);
+ self->ordered_func(self, true);
/* NB: self must not be dereferenced past this point. */
trace_btrfs_all_work_done(wq->fs_info, self);
}
@@ -298,25 +293,31 @@ static void btrfs_work_helper(struct work_struct *normal_work)
{
struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
normal_work);
- struct __btrfs_workqueue *wq;
- int need_order = 0;
+ struct btrfs_workqueue *wq = work->wq;
+ bool need_order = false;
/*
* We should not touch things inside work in the following cases:
- * 1) after work->func() if it has no ordered_free
+ * 1) after work->func() if it has no ordered_func(..., true) to free
* Since the struct is freed in work->func().
* 2) after setting WORK_DONE_BIT
* The work may be freed in other threads almost instantly.
* So we save the needed things here.
*/
if (work->ordered_func)
- need_order = 1;
- wq = work->wq;
+ need_order = true;
trace_btrfs_work_sched(work);
thresh_exec_hook(wq);
work->func(work);
if (need_order) {
+ /*
+ * Ensures all memory accesses done in the work function are
+ * ordered before setting the WORK_DONE_BIT. Ensuring the thread
+ * which is going to executed the ordered work sees them.
+ * Pairs with the smp_rmb in run_ordered_work.
+ */
+ smp_mb__before_atomic();
set_bit(WORK_DONE_BIT, &work->flags);
run_ordered_work(wq, work);
} else {
@@ -326,18 +327,16 @@ static void btrfs_work_helper(struct work_struct *normal_work)
}
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
- btrfs_func_t ordered_func, btrfs_func_t ordered_free)
+ btrfs_ordered_func_t ordered_func)
{
work->func = func;
work->ordered_func = ordered_func;
- work->ordered_free = ordered_free;
INIT_WORK(&work->normal_work, btrfs_work_helper);
INIT_LIST_HEAD(&work->ordered_list);
work->flags = 0;
}
-static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
- struct btrfs_work *work)
+void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
{
unsigned long flags;
@@ -352,54 +351,22 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
queue_work(wq->normal_wq, &work->normal_work);
}
-void btrfs_queue_work(struct btrfs_workqueue *wq,
- struct btrfs_work *work)
-{
- struct __btrfs_workqueue *dest_wq;
-
- if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
- dest_wq = wq->high;
- else
- dest_wq = wq->normal;
- __btrfs_queue_work(dest_wq, work);
-}
-
-static inline void
-__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
-{
- destroy_workqueue(wq->normal_wq);
- trace_btrfs_workqueue_destroy(wq);
- kfree(wq);
-}
-
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
{
if (!wq)
return;
- if (wq->high)
- __btrfs_destroy_workqueue(wq->high);
- __btrfs_destroy_workqueue(wq->normal);
+ destroy_workqueue(wq->normal_wq);
+ trace_btrfs_workqueue_destroy(wq);
kfree(wq);
}
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
{
- if (!wq)
- return;
- wq->normal->limit_active = limit_active;
- if (wq->high)
- wq->high->limit_active = limit_active;
-}
-
-void btrfs_set_work_high_priority(struct btrfs_work *work)
-{
- set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
+ if (wq)
+ wq->limit_active = limit_active;
}
void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
{
- if (wq->high)
- flush_workqueue(wq->high->normal_wq);
-
- flush_workqueue(wq->normal->normal_wq);
+ flush_workqueue(wq->normal_wq);
}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 3204daa51b95..04c2f3175828 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -7,25 +7,25 @@
#ifndef BTRFS_ASYNC_THREAD_H
#define BTRFS_ASYNC_THREAD_H
+#include <linux/compiler_types.h>
#include <linux/workqueue.h>
+#include <linux/list.h>
struct btrfs_fs_info;
struct btrfs_workqueue;
-/* Internal use only */
-struct __btrfs_workqueue;
struct btrfs_work;
+
typedef void (*btrfs_func_t)(struct btrfs_work *arg);
-typedef void (*btrfs_work_func_t)(struct work_struct *arg);
+typedef void (*btrfs_ordered_func_t)(struct btrfs_work *arg, bool);
struct btrfs_work {
btrfs_func_t func;
- btrfs_func_t ordered_func;
- btrfs_func_t ordered_free;
+ btrfs_ordered_func_t ordered_func;
/* Don't touch things below */
struct work_struct normal_work;
struct list_head ordered_list;
- struct __btrfs_workqueue *wq;
+ struct btrfs_workqueue *wq;
unsigned long flags;
};
@@ -34,15 +34,17 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
unsigned int flags,
int limit_active,
int thresh);
+struct btrfs_workqueue *btrfs_alloc_ordered_workqueue(
+ struct btrfs_fs_info *fs_info, const char *name,
+ unsigned int flags);
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
- btrfs_func_t ordered_func, btrfs_func_t ordered_free);
+ btrfs_ordered_func_t ordered_func);
void btrfs_queue_work(struct btrfs_workqueue *wq,
struct btrfs_work *work);
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
-void btrfs_set_work_high_priority(struct btrfs_work *work);
struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work);
-struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq);
+struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq);
bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq);
void btrfs_flush_workqueue(struct btrfs_workqueue *wq);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index f735b8798ba1..78da47a3d00e 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -15,49 +15,77 @@
#include "locking.h"
#include "misc.h"
#include "tree-mod-log.h"
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "relocation.h"
+#include "tree-checker.h"
-/* Just an arbitrary number so we can be sure this happened */
-#define BACKREF_FOUND_SHARED 6
+/* Just arbitrary numbers so we can be sure one of these happened. */
+#define BACKREF_FOUND_SHARED 6
+#define BACKREF_FOUND_NOT_SHARED 7
struct extent_inode_elem {
u64 inum;
u64 offset;
+ u64 num_bytes;
struct extent_inode_elem *next;
};
-static int check_extent_in_eb(const struct btrfs_key *key,
+static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
+ const struct btrfs_key *key,
const struct extent_buffer *eb,
const struct btrfs_file_extent_item *fi,
- u64 extent_item_pos,
- struct extent_inode_elem **eie,
- bool ignore_offset)
+ struct extent_inode_elem **eie)
{
- u64 offset = 0;
+ const u64 data_len = btrfs_file_extent_num_bytes(eb, fi);
+ u64 offset = key->offset;
struct extent_inode_elem *e;
+ const u64 *root_ids;
+ int root_count;
+ bool cached;
- if (!ignore_offset &&
+ if (!ctx->ignore_extent_item_pos &&
!btrfs_file_extent_compression(eb, fi) &&
!btrfs_file_extent_encryption(eb, fi) &&
!btrfs_file_extent_other_encoding(eb, fi)) {
u64 data_offset;
- u64 data_len;
data_offset = btrfs_file_extent_offset(eb, fi);
- data_len = btrfs_file_extent_num_bytes(eb, fi);
- if (extent_item_pos < data_offset ||
- extent_item_pos >= data_offset + data_len)
+ if (ctx->extent_item_pos < data_offset ||
+ ctx->extent_item_pos >= data_offset + data_len)
return 1;
- offset = extent_item_pos - data_offset;
+ offset += ctx->extent_item_pos - data_offset;
}
+ if (!ctx->indirect_ref_iterator || !ctx->cache_lookup)
+ goto add_inode_elem;
+
+ cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
+ &root_count);
+ if (!cached)
+ goto add_inode_elem;
+
+ for (int i = 0; i < root_count; i++) {
+ int ret;
+
+ ret = ctx->indirect_ref_iterator(key->objectid, offset,
+ data_len, root_ids[i],
+ ctx->user_ctx);
+ if (ret)
+ return ret;
+ }
+
+add_inode_elem:
e = kmalloc(sizeof(*e), GFP_NOFS);
if (!e)
return -ENOMEM;
e->next = *eie;
e->inum = key->objectid;
- e->offset = key->offset + offset;
+ e->offset = offset;
+ e->num_bytes = data_len;
*eie = e;
return 0;
@@ -73,10 +101,9 @@ static void free_inode_elem_list(struct extent_inode_elem *eie)
}
}
-static int find_extent_in_eb(const struct extent_buffer *eb,
- u64 wanted_disk_byte, u64 extent_item_pos,
- struct extent_inode_elem **eie,
- bool ignore_offset)
+static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
+ const struct extent_buffer *eb,
+ struct extent_inode_elem **eie)
{
u64 disk_byte;
struct btrfs_key key;
@@ -102,11 +129,11 @@ static int find_extent_in_eb(const struct extent_buffer *eb,
continue;
/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
- if (disk_byte != wanted_disk_byte)
+ if (disk_byte != ctx->bytenr)
continue;
- ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
- if (ret < 0)
+ ret = check_extent_in_eb(ctx, &key, eb, fi, eie);
+ if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
return ret;
}
@@ -135,9 +162,30 @@ struct preftrees {
* - decremented when a ref->count transitions to <1
*/
struct share_check {
- u64 root_objectid;
+ struct btrfs_backref_share_check_ctx *ctx;
+ struct btrfs_root *root;
u64 inum;
+ u64 data_bytenr;
+ u64 data_extent_gen;
+ /*
+ * Counts number of inodes that refer to an extent (different inodes in
+ * the same root or different roots) that we could find. The sharedness
+ * check typically stops once this counter gets greater than 1, so it
+ * may not reflect the total number of inodes.
+ */
int share_count;
+ /*
+ * The number of times we found our inode refers to the data extent we
+ * are determining the sharedness. In other words, how many file extent
+ * items we could find for our inode that point to our target data
+ * extent. The value we get here after finishing the extent sharedness
+ * check may be smaller than reality, but if it ends up being greater
+ * than 1, then we know for sure the inode has multiple file extent
+ * items that point to our inode, and we can safely assume it's useful
+ * to cache the sharedness check result.
+ */
+ int self_ref_count;
+ bool have_delayed_delete_refs;
};
static inline int extent_is_shared(struct share_check *sc)
@@ -150,10 +198,7 @@ static struct kmem_cache *btrfs_prelim_ref_cache;
int __init btrfs_prelim_ref_init(void)
{
btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
- sizeof(struct prelim_ref),
- 0,
- SLAB_MEM_SPREAD,
- NULL);
+ sizeof(struct prelim_ref), 0, 0, NULL);
if (!btrfs_prelim_ref_cache)
return -ENOMEM;
return 0;
@@ -174,8 +219,8 @@ static void free_pref(struct prelim_ref *ref)
* A -1 return indicates ref1 is a 'lower' block than ref2, while 1
* indicates a 'higher' block.
*/
-static int prelim_ref_compare(struct prelim_ref *ref1,
- struct prelim_ref *ref2)
+static int prelim_ref_compare(const struct prelim_ref *ref1,
+ const struct prelim_ref *ref2)
{
if (ref1->level < ref2->level)
return -1;
@@ -205,8 +250,23 @@ static int prelim_ref_compare(struct prelim_ref *ref1,
return 0;
}
+static int prelim_ref_rb_add_cmp(const struct rb_node *new,
+ const struct rb_node *exist)
+{
+ const struct prelim_ref *ref_new =
+ rb_entry(new, struct prelim_ref, rbnode);
+ const struct prelim_ref *ref_exist =
+ rb_entry(exist, struct prelim_ref, rbnode);
+
+ /*
+ * prelim_ref_compare() expects the first parameter as the existing one,
+ * different from the rb_find_add_cached() order.
+ */
+ return prelim_ref_compare(ref_exist, ref_new);
+}
+
static void update_share_count(struct share_check *sc, int oldcount,
- int newcount)
+ int newcount, const struct prelim_ref *newref)
{
if ((!sc) || (oldcount == 0 && newcount < 1))
return;
@@ -215,6 +275,11 @@ static void update_share_count(struct share_check *sc, int oldcount,
sc->share_count--;
else if (oldcount < 1 && newcount > 0)
sc->share_count++;
+
+ if (newref->root_id == btrfs_root_id(sc->root) &&
+ newref->wanted_disk_byte == sc->data_bytenr &&
+ newref->key_for_search.objectid == sc->inum)
+ sc->self_ref_count += newref->count;
}
/*
@@ -228,55 +293,39 @@ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
struct share_check *sc)
{
struct rb_root_cached *root;
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct prelim_ref *ref;
- int result;
- bool leftmost = true;
+ struct rb_node *exist;
root = &preftree->root;
- p = &root->rb_root.rb_node;
-
- while (*p) {
- parent = *p;
- ref = rb_entry(parent, struct prelim_ref, rbnode);
- result = prelim_ref_compare(ref, newref);
- if (result < 0) {
- p = &(*p)->rb_left;
- } else if (result > 0) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else {
- /* Identical refs, merge them and free @newref */
- struct extent_inode_elem *eie = ref->inode_list;
+ exist = rb_find_add_cached(&newref->rbnode, root, prelim_ref_rb_add_cmp);
+ if (exist) {
+ struct prelim_ref *ref = rb_entry(exist, struct prelim_ref, rbnode);
+ /* Identical refs, merge them and free @newref */
+ struct extent_inode_elem *eie = ref->inode_list;
- while (eie && eie->next)
- eie = eie->next;
+ while (eie && eie->next)
+ eie = eie->next;
- if (!eie)
- ref->inode_list = newref->inode_list;
- else
- eie->next = newref->inode_list;
- trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
- preftree->count);
- /*
- * A delayed ref can have newref->count < 0.
- * The ref->count is updated to follow any
- * BTRFS_[ADD|DROP]_DELAYED_REF actions.
- */
- update_share_count(sc, ref->count,
- ref->count + newref->count);
- ref->count += newref->count;
- free_pref(newref);
- return;
- }
+ if (!eie)
+ ref->inode_list = newref->inode_list;
+ else
+ eie->next = newref->inode_list;
+ trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
+ preftree->count);
+ /*
+ * A delayed ref can have newref->count < 0.
+ * The ref->count is updated to follow any
+ * BTRFS_[ADD|DROP]_DELAYED_REF actions.
+ */
+ update_share_count(sc, ref->count,
+ ref->count + newref->count, newref);
+ ref->count += newref->count;
+ free_pref(newref);
+ return;
}
- update_share_count(sc, 0, newref->count);
+ update_share_count(sc, 0, newref->count, newref);
preftree->count++;
trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
- rb_link_node(&newref->rbnode, parent, p);
- rb_insert_color_cached(&newref->rbnode, root, leftmost);
}
/*
@@ -288,8 +337,10 @@ static void prelim_release(struct preftree *preftree)
struct prelim_ref *ref, *next_ref;
rbtree_postorder_for_each_entry_safe(ref, next_ref,
- &preftree->root.rb_root, rbnode)
+ &preftree->root.rb_root, rbnode) {
+ free_inode_elem_list(ref->inode_list);
free_pref(ref);
+ }
preftree->root = RB_ROOT_CACHED;
preftree->count = 0;
@@ -413,11 +464,11 @@ static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
return 0;
}
-static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
+static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
+ struct btrfs_root *root, struct btrfs_path *path,
struct ulist *parents,
struct preftrees *preftrees, struct prelim_ref *ref,
- int level, u64 time_seq, const u64 *extent_item_pos,
- bool ignore_offset)
+ int level)
{
int ret = 0;
int slot;
@@ -430,6 +481,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
u64 wanted_disk_byte = ref->wanted_disk_byte;
u64 count = 0;
u64 data_offset;
+ u8 type;
if (level != 0) {
eb = path->nodes[level];
@@ -453,10 +505,10 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
if (path->slots[0] >= btrfs_header_nritems(eb) ||
is_shared_data_backref(preftrees, eb->start) ||
ref->root_id != btrfs_header_owner(eb)) {
- if (time_seq == BTRFS_SEQ_LAST)
+ if (ctx->time_seq == BTRFS_SEQ_LAST)
ret = btrfs_next_leaf(root, path);
else
- ret = btrfs_next_old_leaf(root, path, time_seq);
+ ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
}
while (!ret && count < ref->count) {
@@ -477,13 +529,16 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
if (slot == 0 &&
(is_shared_data_backref(preftrees, eb->start) ||
ref->root_id != btrfs_header_owner(eb))) {
- if (time_seq == BTRFS_SEQ_LAST)
+ if (ctx->time_seq == BTRFS_SEQ_LAST)
ret = btrfs_next_leaf(root, path);
else
- ret = btrfs_next_old_leaf(root, path, time_seq);
+ ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
continue;
}
fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+ type = btrfs_file_extent_type(eb, fi);
+ if (type == BTRFS_FILE_EXTENT_INLINE)
+ goto next;
disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
data_offset = btrfs_file_extent_offset(eb, fi);
@@ -494,11 +549,10 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
count++;
else
goto next;
- if (extent_item_pos) {
- ret = check_extent_in_eb(&key, eb, fi,
- *extent_item_pos,
- &eie, ignore_offset);
- if (ret < 0)
+ if (!ctx->skip_inode_ref_list) {
+ ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
+ if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
+ ret < 0)
break;
}
if (ret > 0)
@@ -507,7 +561,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
eie, (void **)&old, GFP_NOFS);
if (ret < 0)
break;
- if (!ret && extent_item_pos) {
+ if (!ret && !ctx->skip_inode_ref_list) {
while (old->next)
old = old->next;
old->next = eie;
@@ -515,16 +569,17 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
eie = NULL;
}
next:
- if (time_seq == BTRFS_SEQ_LAST)
+ if (ctx->time_seq == BTRFS_SEQ_LAST)
ret = btrfs_next_item(root, path);
else
- ret = btrfs_next_old_item(root, path, time_seq);
+ ret = btrfs_next_old_item(root, path, ctx->time_seq);
}
- if (ret > 0)
- ret = 0;
- else if (ret < 0)
+ if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
free_inode_elem_list(eie);
+ else if (ret > 0)
+ ret = 0;
+
return ret;
}
@@ -532,11 +587,10 @@ next:
* resolve an indirect backref in the form (root_id, key, level)
* to a logical address
*/
-static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path, u64 time_seq,
+static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
+ struct btrfs_path *path,
struct preftrees *preftrees,
- struct prelim_ref *ref, struct ulist *parents,
- const u64 *extent_item_pos, bool ignore_offset)
+ struct prelim_ref *ref, struct ulist *parents)
{
struct btrfs_root *root;
struct extent_buffer *eb;
@@ -554,9 +608,9 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
* here.
*/
if (path->search_commit_root)
- root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
+ root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id);
else
- root = btrfs_get_fs_root(fs_info, ref->root_id, false);
+ root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto out_free;
@@ -568,17 +622,17 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
goto out;
}
- if (btrfs_is_testing(fs_info)) {
+ if (btrfs_is_testing(ctx->fs_info)) {
ret = -ENOENT;
goto out;
}
if (path->search_commit_root)
root_level = btrfs_header_level(root->commit_root);
- else if (time_seq == BTRFS_SEQ_LAST)
+ else if (ctx->time_seq == BTRFS_SEQ_LAST)
root_level = btrfs_header_level(root->node);
else
- root_level = btrfs_old_root_level(root, time_seq);
+ root_level = btrfs_old_root_level(root, ctx->time_seq);
if (root_level + 1 == level)
goto out;
@@ -606,16 +660,15 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
search_key.offset >= LLONG_MAX)
search_key.offset = 0;
path->lowest_level = level;
- if (time_seq == BTRFS_SEQ_LAST)
+ if (ctx->time_seq == BTRFS_SEQ_LAST)
ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
else
- ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
+ ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
- btrfs_debug(fs_info,
- "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
- ref->root_id, level, ref->count, ret,
- ref->key_for_search.objectid, ref->key_for_search.type,
- ref->key_for_search.offset);
+ btrfs_debug(ctx->fs_info,
+"search slot in root %llu (level %d, ref count %d) returned %d for key " BTRFS_KEY_FMT,
+ ref->root_id, level, ref->count, ret,
+ BTRFS_KEY_FMT_VALUE(&ref->key_for_search));
if (ret < 0)
goto out;
@@ -629,8 +682,7 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
eb = path->nodes[level];
}
- ret = add_all_parents(root, path, parents, preftrees, ref, level,
- time_seq, extent_item_pos, ignore_offset);
+ ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level);
out:
btrfs_put_root(root);
out_free:
@@ -647,6 +699,18 @@ unode_aux_to_inode_list(struct ulist_node *node)
return (struct extent_inode_elem *)(uintptr_t)node->aux;
}
+static void free_leaf_list(struct ulist *ulist)
+{
+ struct ulist_node *node;
+ struct ulist_iterator uiter;
+
+ ULIST_ITER_INIT(&uiter);
+ while ((node = ulist_next(ulist, &uiter)))
+ free_inode_elem_list(unode_aux_to_inode_list(node));
+
+ ulist_free(ulist);
+}
+
/*
* We maintain three separate rbtrees: one for direct refs, one for
* indirect refs which have a key, and one for indirect refs which do not
@@ -663,13 +727,11 @@ unode_aux_to_inode_list(struct ulist_node *node)
* rbtree as they are encountered. The new backrefs are subsequently
* resolved as above.
*/
-static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path, u64 time_seq,
+static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
+ struct btrfs_path *path,
struct preftrees *preftrees,
- const u64 *extent_item_pos,
- struct share_check *sc, bool ignore_offset)
+ struct share_check *sc)
{
- int err;
int ret = 0;
struct ulist *parents;
struct ulist_node *node;
@@ -688,6 +750,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
*/
while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
struct prelim_ref *ref;
+ int ret2;
ref = rb_entry(rnode, struct prelim_ref, rbnode);
if (WARN(ref->parent,
@@ -704,26 +767,23 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
continue;
}
- if (sc && sc->root_objectid &&
- ref->root_id != sc->root_objectid) {
+ if (sc && ref->root_id != btrfs_root_id(sc->root)) {
free_pref(ref);
ret = BACKREF_FOUND_SHARED;
goto out;
}
- err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
- ref, parents, extent_item_pos,
- ignore_offset);
+ ret2 = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
/*
* we can only tolerate ENOENT,otherwise,we should catch error
* and return directly.
*/
- if (err == -ENOENT) {
- prelim_ref_insert(fs_info, &preftrees->direct, ref,
+ if (ret2 == -ENOENT) {
+ prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
NULL);
continue;
- } else if (err) {
+ } else if (ret2) {
free_pref(ref);
- ret = err;
+ ret = ret2;
goto out;
}
@@ -747,7 +807,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
memcpy(new_ref, ref, sizeof(*ref));
new_ref->parent = node->val;
new_ref->inode_list = unode_aux_to_inode_list(node);
- prelim_ref_insert(fs_info, &preftrees->direct,
+ prelim_ref_insert(ctx->fs_info, &preftrees->direct,
new_ref, NULL);
}
@@ -755,13 +815,17 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
* Now it's a direct ref, put it in the direct tree. We must
* do this last because the ref could be merged/freed here.
*/
- prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
+ prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
ulist_reinit(parents);
cond_resched();
}
out:
- ulist_free(parents);
+ /*
+ * We may have inode lists attached to refs in the parents ulist, so we
+ * must free them before freeing the ulist and its refs.
+ */
+ free_leaf_list(parents);
return ret;
}
@@ -777,6 +841,8 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
struct rb_node *node;
while ((node = rb_first_cached(&tree->root))) {
+ struct btrfs_tree_parent_check check = { 0 };
+
ref = rb_entry(node, struct prelim_ref, rbnode);
rb_erase_cached(node, &tree->root);
@@ -784,16 +850,20 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
BUG_ON(ref->key_for_search.type);
BUG_ON(!ref->wanted_disk_byte);
- eb = read_tree_block(fs_info, ref->wanted_disk_byte,
- ref->root_id, 0, ref->level - 1, NULL);
+ check.level = ref->level - 1;
+ check.owner_root = ref->root_id;
+
+ eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check);
if (IS_ERR(eb)) {
free_pref(ref);
return PTR_ERR(eb);
- } else if (!extent_buffer_uptodate(eb)) {
+ }
+ if (unlikely(!extent_buffer_uptodate(eb))) {
free_pref(ref);
free_extent_buffer(eb);
return -EIO;
}
+
if (lock)
btrfs_tree_read_lock(eb);
if (btrfs_header_level(eb) == 0)
@@ -818,16 +888,11 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
struct preftrees *preftrees, struct share_check *sc)
{
struct btrfs_delayed_ref_node *node;
- struct btrfs_delayed_extent_op *extent_op = head->extent_op;
struct btrfs_key key;
- struct btrfs_key tmp_op_key;
struct rb_node *n;
int count;
int ret = 0;
- if (extent_op && extent_op->update_key)
- btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
-
spin_lock(&head->lock);
for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
node = rb_entry(n, struct btrfs_delayed_ref_node,
@@ -852,56 +917,65 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
switch (node->type) {
case BTRFS_TREE_BLOCK_REF_KEY: {
/* NORMAL INDIRECT METADATA backref */
- struct btrfs_delayed_tree_ref *ref;
+ struct btrfs_key *key_ptr = NULL;
+ /* The owner of a tree block ref is the level. */
+ int level = btrfs_delayed_ref_owner(node);
- ref = btrfs_delayed_node_to_tree_ref(node);
- ret = add_indirect_ref(fs_info, preftrees, ref->root,
- &tmp_op_key, ref->level + 1,
- node->bytenr, count, sc,
- GFP_ATOMIC);
+ if (head->extent_op && head->extent_op->update_key) {
+ btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
+ key_ptr = &key;
+ }
+
+ ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
+ key_ptr, level + 1, node->bytenr,
+ count, sc, GFP_ATOMIC);
break;
}
case BTRFS_SHARED_BLOCK_REF_KEY: {
- /* SHARED DIRECT METADATA backref */
- struct btrfs_delayed_tree_ref *ref;
-
- ref = btrfs_delayed_node_to_tree_ref(node);
+ /*
+ * SHARED DIRECT METADATA backref
+ *
+ * The owner of a tree block ref is the level.
+ */
+ int level = btrfs_delayed_ref_owner(node);
- ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
- ref->parent, node->bytenr, count,
+ ret = add_direct_ref(fs_info, preftrees, level + 1,
+ node->parent, node->bytenr, count,
sc, GFP_ATOMIC);
break;
}
case BTRFS_EXTENT_DATA_REF_KEY: {
/* NORMAL INDIRECT DATA backref */
- struct btrfs_delayed_data_ref *ref;
- ref = btrfs_delayed_node_to_data_ref(node);
-
- key.objectid = ref->objectid;
+ key.objectid = btrfs_delayed_ref_owner(node);
key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = ref->offset;
+ key.offset = btrfs_delayed_ref_offset(node);
/*
- * Found a inum that doesn't match our known inum, we
- * know it's shared.
+ * If we have a share check context and a reference for
+ * another inode, we can't exit immediately. This is
+ * because even if this is a BTRFS_ADD_DELAYED_REF
+ * reference we may find next a BTRFS_DROP_DELAYED_REF
+ * which cancels out this ADD reference.
+ *
+ * If this is a DROP reference and there was no previous
+ * ADD reference, then we need to signal that when we
+ * process references from the extent tree (through
+ * add_inline_refs() and add_keyed_refs()), we should
+ * not exit early if we find a reference for another
+ * inode, because one of the delayed DROP references
+ * may cancel that reference in the extent tree.
*/
- if (sc && sc->inum && ref->objectid != sc->inum) {
- ret = BACKREF_FOUND_SHARED;
- goto out;
- }
+ if (sc && count < 0)
+ sc->have_delayed_delete_refs = true;
- ret = add_indirect_ref(fs_info, preftrees, ref->root,
+ ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
&key, 0, node->bytenr, count, sc,
GFP_ATOMIC);
break;
}
case BTRFS_SHARED_DATA_REF_KEY: {
/* SHARED DIRECT FULL backref */
- struct btrfs_delayed_data_ref *ref;
-
- ref = btrfs_delayed_node_to_data_ref(node);
-
- ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
+ ret = add_direct_ref(fs_info, preftrees, 0, node->parent,
node->bytenr, count, sc,
GFP_ATOMIC);
break;
@@ -918,7 +992,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
}
if (!ret)
ret = extent_is_shared(sc);
-out:
+
spin_unlock(&head->lock);
return ret;
}
@@ -928,8 +1002,8 @@ out:
*
* Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
*/
-static int add_inline_refs(const struct btrfs_fs_info *fs_info,
- struct btrfs_path *path, u64 bytenr,
+static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
+ struct btrfs_path *path,
int *info_level, struct preftrees *preftrees,
struct share_check *sc)
{
@@ -950,10 +1024,15 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
leaf = path->nodes[0];
slot = path->slots[0];
- item_size = btrfs_item_size_nr(leaf, slot);
- BUG_ON(item_size < sizeof(*ei));
-
+ item_size = btrfs_item_size(leaf, slot);
ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
+
+ if (ctx->check_extent_item) {
+ ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx);
+ if (ret)
+ return ret;
+ }
+
flags = btrfs_extent_flags(leaf, ei);
btrfs_item_key_to_cpu(leaf, &found_key, slot);
@@ -982,16 +1061,16 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
iref = (struct btrfs_extent_inline_ref *)ptr;
type = btrfs_get_extent_inline_ref_type(leaf, iref,
BTRFS_REF_TYPE_ANY);
- if (type == BTRFS_REF_TYPE_INVALID)
+ if (unlikely(type == BTRFS_REF_TYPE_INVALID))
return -EUCLEAN;
offset = btrfs_extent_inline_ref_offset(leaf, iref);
switch (type) {
case BTRFS_SHARED_BLOCK_REF_KEY:
- ret = add_direct_ref(fs_info, preftrees,
+ ret = add_direct_ref(ctx->fs_info, preftrees,
*info_level + 1, offset,
- bytenr, 1, NULL, GFP_NOFS);
+ ctx->bytenr, 1, NULL, GFP_NOFS);
break;
case BTRFS_SHARED_DATA_REF_KEY: {
struct btrfs_shared_data_ref *sdref;
@@ -1000,14 +1079,14 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
sdref = (struct btrfs_shared_data_ref *)(iref + 1);
count = btrfs_shared_data_ref_count(leaf, sdref);
- ret = add_direct_ref(fs_info, preftrees, 0, offset,
- bytenr, count, sc, GFP_NOFS);
+ ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset,
+ ctx->bytenr, count, sc, GFP_NOFS);
break;
}
case BTRFS_TREE_BLOCK_REF_KEY:
- ret = add_indirect_ref(fs_info, preftrees, offset,
+ ret = add_indirect_ref(ctx->fs_info, preftrees, offset,
NULL, *info_level + 1,
- bytenr, 1, NULL, GFP_NOFS);
+ ctx->bytenr, 1, NULL, GFP_NOFS);
break;
case BTRFS_EXTENT_DATA_REF_KEY: {
struct btrfs_extent_data_ref *dref;
@@ -1021,18 +1100,25 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
- if (sc && sc->inum && key.objectid != sc->inum) {
+ if (sc && key.objectid != sc->inum &&
+ !sc->have_delayed_delete_refs) {
ret = BACKREF_FOUND_SHARED;
break;
}
root = btrfs_extent_data_ref_root(leaf, dref);
- ret = add_indirect_ref(fs_info, preftrees, root,
- &key, 0, bytenr, count,
- sc, GFP_NOFS);
+ if (!ctx->skip_data_ref ||
+ !ctx->skip_data_ref(root, key.objectid, key.offset,
+ ctx->user_ctx))
+ ret = add_indirect_ref(ctx->fs_info, preftrees,
+ root, &key, 0, ctx->bytenr,
+ count, sc, GFP_NOFS);
break;
}
+ case BTRFS_EXTENT_OWNER_REF_KEY:
+ ASSERT(btrfs_fs_incompat(ctx->fs_info, SIMPLE_QUOTA));
+ break;
default:
WARN_ON(1);
}
@@ -1049,12 +1135,13 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
*
* Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
*/
-static int add_keyed_refs(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path, u64 bytenr,
+static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx,
+ struct btrfs_root *extent_root,
+ struct btrfs_path *path,
int info_level, struct preftrees *preftrees,
struct share_check *sc)
{
- struct btrfs_root *extent_root = fs_info->extent_root;
+ struct btrfs_fs_info *fs_info = extent_root->fs_info;
int ret;
int slot;
struct extent_buffer *leaf;
@@ -1073,7 +1160,7 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
- if (key.objectid != bytenr)
+ if (key.objectid != ctx->bytenr)
break;
if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
continue;
@@ -1085,7 +1172,7 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
/* SHARED DIRECT METADATA backref */
ret = add_direct_ref(fs_info, preftrees,
info_level + 1, key.offset,
- bytenr, 1, NULL, GFP_NOFS);
+ ctx->bytenr, 1, NULL, GFP_NOFS);
break;
case BTRFS_SHARED_DATA_REF_KEY: {
/* SHARED DIRECT FULL backref */
@@ -1096,14 +1183,14 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
struct btrfs_shared_data_ref);
count = btrfs_shared_data_ref_count(leaf, sdref);
ret = add_direct_ref(fs_info, preftrees, 0,
- key.offset, bytenr, count,
+ key.offset, ctx->bytenr, count,
sc, GFP_NOFS);
break;
}
case BTRFS_TREE_BLOCK_REF_KEY:
/* NORMAL INDIRECT METADATA backref */
ret = add_indirect_ref(fs_info, preftrees, key.offset,
- NULL, info_level + 1, bytenr,
+ NULL, info_level + 1, ctx->bytenr,
1, NULL, GFP_NOFS);
break;
case BTRFS_EXTENT_DATA_REF_KEY: {
@@ -1120,15 +1207,20 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
- if (sc && sc->inum && key.objectid != sc->inum) {
+ if (sc && key.objectid != sc->inum &&
+ !sc->have_delayed_delete_refs) {
ret = BACKREF_FOUND_SHARED;
break;
}
root = btrfs_extent_data_ref_root(leaf, dref);
- ret = add_indirect_ref(fs_info, preftrees, root,
- &key, 0, bytenr, count,
- sc, GFP_NOFS);
+
+ if (!ctx->skip_data_ref ||
+ !ctx->skip_data_ref(root, key.objectid, key.offset,
+ ctx->user_ctx))
+ ret = add_indirect_ref(fs_info, preftrees, root,
+ &key, 0, ctx->bytenr,
+ count, sc, GFP_NOFS);
break;
}
default:
@@ -1143,33 +1235,149 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
}
/*
+ * The caller has joined a transaction or is holding a read lock on the
+ * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
+ * snapshot field changing while updating or checking the cache.
+ */
+static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
+ struct btrfs_root *root,
+ u64 bytenr, int level, bool *is_shared)
+{
+ const struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_backref_shared_cache_entry *entry;
+
+ if (!current->journal_info)
+ lockdep_assert_held(&fs_info->commit_root_sem);
+
+ if (!ctx->use_path_cache)
+ return false;
+
+ if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
+ return false;
+
+ /*
+ * Level -1 is used for the data extent, which is not reliable to cache
+ * because its reference count can increase or decrease without us
+ * realizing. We cache results only for extent buffers that lead from
+ * the root node down to the leaf with the file extent item.
+ */
+ ASSERT(level >= 0);
+
+ entry = &ctx->path_cache_entries[level];
+
+ /* Unused cache entry or being used for some other extent buffer. */
+ if (entry->bytenr != bytenr)
+ return false;
+
+ /*
+ * We cached a false result, but the last snapshot generation of the
+ * root changed, so we now have a snapshot. Don't trust the result.
+ */
+ if (!entry->is_shared &&
+ entry->gen != btrfs_root_last_snapshot(&root->root_item))
+ return false;
+
+ /*
+ * If we cached a true result and the last generation used for dropping
+ * a root changed, we can not trust the result, because the dropped root
+ * could be a snapshot sharing this extent buffer.
+ */
+ if (entry->is_shared &&
+ entry->gen != btrfs_get_last_root_drop_gen(fs_info))
+ return false;
+
+ *is_shared = entry->is_shared;
+ /*
+ * If the node at this level is shared, than all nodes below are also
+ * shared. Currently some of the nodes below may be marked as not shared
+ * because we have just switched from one leaf to another, and switched
+ * also other nodes above the leaf and below the current level, so mark
+ * them as shared.
+ */
+ if (*is_shared) {
+ for (int i = 0; i < level; i++) {
+ ctx->path_cache_entries[i].is_shared = true;
+ ctx->path_cache_entries[i].gen = entry->gen;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * The caller has joined a transaction or is holding a read lock on the
+ * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
+ * snapshot field changing while updating or checking the cache.
+ */
+static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
+ struct btrfs_root *root,
+ u64 bytenr, int level, bool is_shared)
+{
+ const struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_backref_shared_cache_entry *entry;
+ u64 gen;
+
+ if (!current->journal_info)
+ lockdep_assert_held(&fs_info->commit_root_sem);
+
+ if (!ctx->use_path_cache)
+ return;
+
+ if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
+ return;
+
+ /*
+ * Level -1 is used for the data extent, which is not reliable to cache
+ * because its reference count can increase or decrease without us
+ * realizing. We cache results only for extent buffers that lead from
+ * the root node down to the leaf with the file extent item.
+ */
+ ASSERT(level >= 0);
+
+ if (is_shared)
+ gen = btrfs_get_last_root_drop_gen(fs_info);
+ else
+ gen = btrfs_root_last_snapshot(&root->root_item);
+
+ entry = &ctx->path_cache_entries[level];
+ entry->bytenr = bytenr;
+ entry->is_shared = is_shared;
+ entry->gen = gen;
+
+ /*
+ * If we found an extent buffer is shared, set the cache result for all
+ * extent buffers below it to true. As nodes in the path are COWed,
+ * their sharedness is moved to their children, and if a leaf is COWed,
+ * then the sharedness of a data extent becomes direct, the refcount of
+ * data extent is increased in the extent item at the extent tree.
+ */
+ if (is_shared) {
+ for (int i = 0; i < level; i++) {
+ entry = &ctx->path_cache_entries[i];
+ entry->is_shared = is_shared;
+ entry->gen = gen;
+ }
+ }
+}
+
+/*
* this adds all existing backrefs (inline backrefs, backrefs and delayed
* refs) for the given bytenr to the refs list, merges duplicates and resolves
* indirect refs to their parent bytenr.
* When roots are found, they're added to the roots list
*
- * If time_seq is set to BTRFS_SEQ_LAST, it will not search delayed_refs, and
- * behave much like trans == NULL case, the difference only lies in it will not
- * commit root.
- * The special case is for qgroup to search roots in commit_transaction().
- *
- * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
- * shared extent is detected.
+ * @ctx: Backref walking context object, must be not NULL.
+ * @sc: If !NULL, then immediately return BACKREF_FOUND_SHARED when a
+ * shared extent is detected.
*
* Otherwise this returns 0 for success and <0 for an error.
*
- * If ignore_offset is set to false, only extent refs whose offsets match
- * extent_item_pos are returned. If true, every extent ref is returned
- * and extent_item_pos is ignored.
- *
* FIXME some caching might speed things up
*/
-static int find_parent_nodes(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 time_seq, struct ulist *refs,
- struct ulist *roots, const u64 *extent_item_pos,
- struct share_check *sc, bool ignore_offset)
+static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
+ struct share_check *sc)
{
+ struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr);
struct btrfs_key key;
struct btrfs_path *path;
struct btrfs_delayed_ref_root *delayed_refs = NULL;
@@ -1185,50 +1393,55 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
.indirect_missing_keys = PREFTREE_INIT
};
- key.objectid = bytenr;
- key.offset = (u64)-1;
- if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
+ /* Roots ulist is not needed when using a sharedness check context. */
+ if (sc)
+ ASSERT(ctx->roots == NULL);
+
+ key.objectid = ctx->bytenr;
+ if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
key.type = BTRFS_METADATA_ITEM_KEY;
else
key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = (u64)-1;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- if (!trans) {
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ if (!ctx->trans) {
+ path->search_commit_root = true;
+ path->skip_locking = true;
}
- if (time_seq == BTRFS_SEQ_LAST)
- path->skip_locking = 1;
+ if (ctx->time_seq == BTRFS_SEQ_LAST)
+ path->skip_locking = true;
- /*
- * grab both a lock on the path and a lock on the delayed ref head.
- * We need both to get a consistent picture of how the refs look
- * at a specified point in time
- */
again:
head = NULL;
- ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
- BUG_ON(ret == 0);
-
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (trans && likely(trans->type != __TRANS_DUMMY) &&
- time_seq != BTRFS_SEQ_LAST) {
-#else
- if (trans && time_seq != BTRFS_SEQ_LAST) {
-#endif
+ if (unlikely(ret == 0)) {
+ /*
+ * Key with offset -1 found, there would have to exist an extent
+ * item with such offset, but this is out of the valid range.
+ */
+ ret = -EUCLEAN;
+ goto out;
+ }
+
+ if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) &&
+ ctx->time_seq != BTRFS_SEQ_LAST) {
/*
- * look if there are updates for this ref queued and lock the
- * head
+ * We have a specific time_seq we care about and trans which
+ * means we have the path lock, we need to grab the ref head and
+ * lock it so we have a consistent view of the refs at the given
+ * time.
*/
- delayed_refs = &trans->transaction->delayed_refs;
+ delayed_refs = &ctx->trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
+ head = btrfs_find_delayed_ref_head(ctx->fs_info, delayed_refs,
+ ctx->bytenr);
if (head) {
if (!mutex_trylock(&head->mutex)) {
refcount_inc(&head->refs);
@@ -1246,7 +1459,7 @@ again:
goto again;
}
spin_unlock(&delayed_refs->lock);
- ret = add_delayed_refs(fs_info, head, time_seq,
+ ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq,
&preftrees, sc);
mutex_unlock(&head->mutex);
if (ret)
@@ -1264,30 +1477,96 @@ again:
leaf = path->nodes[0];
slot = path->slots[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
- if (key.objectid == bytenr &&
+ if (key.objectid == ctx->bytenr &&
(key.type == BTRFS_EXTENT_ITEM_KEY ||
key.type == BTRFS_METADATA_ITEM_KEY)) {
- ret = add_inline_refs(fs_info, path, bytenr,
- &info_level, &preftrees, sc);
+ ret = add_inline_refs(ctx, path, &info_level,
+ &preftrees, sc);
if (ret)
goto out;
- ret = add_keyed_refs(fs_info, path, bytenr, info_level,
+ ret = add_keyed_refs(ctx, root, path, info_level,
&preftrees, sc);
if (ret)
goto out;
}
}
+ /*
+ * If we have a share context and we reached here, it means the extent
+ * is not directly shared (no multiple reference items for it),
+ * otherwise we would have exited earlier with a return value of
+ * BACKREF_FOUND_SHARED after processing delayed references or while
+ * processing inline or keyed references from the extent tree.
+ * The extent may however be indirectly shared through shared subtrees
+ * as a result from creating snapshots, so we determine below what is
+ * its parent node, in case we are dealing with a metadata extent, or
+ * what's the leaf (or leaves), from a fs tree, that has a file extent
+ * item pointing to it in case we are dealing with a data extent.
+ */
+ ASSERT(extent_is_shared(sc) == 0);
+
+ /*
+ * If we are here for a data extent and we have a share_check structure
+ * it means the data extent is not directly shared (does not have
+ * multiple reference items), so we have to check if a path in the fs
+ * tree (going from the root node down to the leaf that has the file
+ * extent item pointing to the data extent) is shared, that is, if any
+ * of the extent buffers in the path is referenced by other trees.
+ */
+ if (sc && ctx->bytenr == sc->data_bytenr) {
+ /*
+ * If our data extent is from a generation more recent than the
+ * last generation used to snapshot the root, then we know that
+ * it can not be shared through subtrees, so we can skip
+ * resolving indirect references, there's no point in
+ * determining the extent buffers for the path from the fs tree
+ * root node down to the leaf that has the file extent item that
+ * points to the data extent.
+ */
+ if (sc->data_extent_gen >
+ btrfs_root_last_snapshot(&sc->root->root_item)) {
+ ret = BACKREF_FOUND_NOT_SHARED;
+ goto out;
+ }
+
+ /*
+ * If we are only determining if a data extent is shared or not
+ * and the corresponding file extent item is located in the same
+ * leaf as the previous file extent item, we can skip resolving
+ * indirect references for a data extent, since the fs tree path
+ * is the same (same leaf, so same path). We skip as long as the
+ * cached result for the leaf is valid and only if there's only
+ * one file extent item pointing to the data extent, because in
+ * the case of multiple file extent items, they may be located
+ * in different leaves and therefore we have multiple paths.
+ */
+ if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr &&
+ sc->self_ref_count == 1) {
+ bool cached;
+ bool is_shared;
+
+ cached = lookup_backref_shared_cache(sc->ctx, sc->root,
+ sc->ctx->curr_leaf_bytenr,
+ 0, &is_shared);
+ if (cached) {
+ if (is_shared)
+ ret = BACKREF_FOUND_SHARED;
+ else
+ ret = BACKREF_FOUND_NOT_SHARED;
+ goto out;
+ }
+ }
+ }
+
btrfs_release_path(path);
- ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
+ ret = add_missing_keys(ctx->fs_info, &preftrees, !path->skip_locking);
if (ret)
goto out;
WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
- ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
- extent_item_pos, sc, ignore_offset);
+ ret = resolve_indirect_refs(ctx, path, &preftrees, sc);
if (ret)
goto out;
@@ -1314,29 +1593,27 @@ again:
* e.g. different offsets would not be merged,
* and would retain their original ref->count < 0.
*/
- if (roots && ref->count && ref->root_id && ref->parent == 0) {
- if (sc && sc->root_objectid &&
- ref->root_id != sc->root_objectid) {
- ret = BACKREF_FOUND_SHARED;
- goto out;
- }
-
+ if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) {
/* no parent == root of tree */
- ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
+ ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS);
if (ret < 0)
goto out;
}
if (ref->count && ref->parent) {
- if (extent_item_pos && !ref->inode_list &&
+ if (!ctx->skip_inode_ref_list && !ref->inode_list &&
ref->level == 0) {
+ struct btrfs_tree_parent_check check = { 0 };
struct extent_buffer *eb;
- eb = read_tree_block(fs_info, ref->parent, 0,
- 0, ref->level, NULL);
+ check.level = ref->level;
+
+ eb = read_tree_block(ctx->fs_info, ref->parent,
+ &check);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
goto out;
- } else if (!extent_buffer_uptodate(eb)) {
+ }
+ if (unlikely(!extent_buffer_uptodate(eb))) {
free_extent_buffer(eb);
ret = -EIO;
goto out;
@@ -1344,31 +1621,53 @@ again:
if (!path->skip_locking)
btrfs_tree_read_lock(eb);
- ret = find_extent_in_eb(eb, bytenr,
- *extent_item_pos, &eie, ignore_offset);
+ ret = find_extent_in_eb(ctx, eb, &eie);
if (!path->skip_locking)
btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
- if (ret < 0)
+ if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
+ ret < 0)
goto out;
ref->inode_list = eie;
+ /*
+ * We transferred the list ownership to the ref,
+ * so set to NULL to avoid a double free in case
+ * an error happens after this.
+ */
+ eie = NULL;
}
- ret = ulist_add_merge_ptr(refs, ref->parent,
+ ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
ref->inode_list,
(void **)&eie, GFP_NOFS);
if (ret < 0)
goto out;
- if (!ret && extent_item_pos) {
+ if (!ret && !ctx->skip_inode_ref_list) {
/*
- * we've recorded that parent, so we must extend
- * its inode list here
+ * We've recorded that parent, so we must extend
+ * its inode list here.
+ *
+ * However if there was corruption we may not
+ * have found an eie, return an error in this
+ * case.
*/
- BUG_ON(!eie);
+ ASSERT(eie);
+ if (unlikely(!eie)) {
+ ret = -EUCLEAN;
+ goto out;
+ }
while (eie->next)
eie = eie->next;
eie->next = ref->inode_list;
}
eie = NULL;
+ /*
+ * We have transferred the inode list ownership from
+ * this ref to the ref we added to the 'refs' ulist.
+ * So set this ref's inode list to NULL to avoid
+ * use-after-free when our caller uses it or double
+ * frees in case an error happens before we return.
+ */
+ ref->inode_list = NULL;
}
cond_resched();
}
@@ -1380,52 +1679,36 @@ out:
prelim_release(&preftrees.indirect);
prelim_release(&preftrees.indirect_missing_keys);
- if (ret < 0)
+ if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
free_inode_elem_list(eie);
return ret;
}
-static void free_leaf_list(struct ulist *blocks)
-{
- struct ulist_node *node = NULL;
- struct extent_inode_elem *eie;
- struct ulist_iterator uiter;
-
- ULIST_ITER_INIT(&uiter);
- while ((node = ulist_next(blocks, &uiter))) {
- if (!node->aux)
- continue;
- eie = unode_aux_to_inode_list(node);
- free_inode_elem_list(eie);
- node->aux = 0;
- }
-
- ulist_free(blocks);
-}
-
/*
- * Finds all leafs with a reference to the specified combination of bytenr and
- * offset. key_list_head will point to a list of corresponding keys (caller must
- * free each list element). The leafs will be stored in the leafs ulist, which
- * must be freed with ulist_free.
+ * Finds all leaves with a reference to the specified combination of
+ * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
+ * added to the ulist at @ctx->refs, and that ulist is allocated by this
+ * function. The caller should free the ulist with free_leaf_list() if
+ * @ctx->ignore_extent_item_pos is false, otherwise a simple ulist_free() is
+ * enough.
*
- * returns 0 on success, <0 on error
+ * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
*/
-int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 time_seq, struct ulist **leafs,
- const u64 *extent_item_pos, bool ignore_offset)
+int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
{
int ret;
- *leafs = ulist_alloc(GFP_NOFS);
- if (!*leafs)
+ ASSERT(ctx->refs == NULL);
+
+ ctx->refs = ulist_alloc(GFP_NOFS);
+ if (!ctx->refs)
return -ENOMEM;
- ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
- *leafs, NULL, extent_item_pos, NULL, ignore_offset);
- if (ret < 0 && ret != -ENOENT) {
- free_leaf_list(*leafs);
+ ret = find_parent_nodes(ctx, NULL);
+ if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
+ (ret < 0 && ret != -ENOENT)) {
+ free_leaf_list(ctx->refs);
+ ctx->refs = NULL;
return ret;
}
@@ -1433,7 +1716,7 @@ int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
}
/*
- * walk all backrefs for a given extent to find all roots that reference this
+ * Walk all backrefs for a given extent to find all roots that reference this
* extent. Walking a backref means finding all extents that reference this
* extent and in turn walk the backrefs of those, too. Naturally this is a
* recursive process, but here it is implemented in an iterative fashion: We
@@ -1441,76 +1724,115 @@ int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
* list. In turn, we find all referencing extents for those, further appending
* to the list. The way we iterate the list allows adding more elements after
* the current while iterating. The process stops when we reach the end of the
- * list. Found roots are added to the roots list.
+ * list.
*
- * returns 0 on success, < 0 on error.
+ * Found roots are added to @ctx->roots, which is allocated by this function if
+ * it points to NULL, in which case the caller is responsible for freeing it
+ * after it's not needed anymore.
+ * This function requires @ctx->refs to be NULL, as it uses it for allocating a
+ * ulist to do temporary work, and frees it before returning.
+ *
+ * Returns 0 on success, < 0 on error.
*/
-static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 time_seq, struct ulist **roots,
- bool ignore_offset)
+static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
{
- struct ulist *tmp;
- struct ulist_node *node = NULL;
+ const u64 orig_bytenr = ctx->bytenr;
+ const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list;
+ bool roots_ulist_allocated = false;
struct ulist_iterator uiter;
- int ret;
+ int ret = 0;
- tmp = ulist_alloc(GFP_NOFS);
- if (!tmp)
- return -ENOMEM;
- *roots = ulist_alloc(GFP_NOFS);
- if (!*roots) {
- ulist_free(tmp);
+ ASSERT(ctx->refs == NULL);
+
+ ctx->refs = ulist_alloc(GFP_NOFS);
+ if (!ctx->refs)
return -ENOMEM;
+
+ if (!ctx->roots) {
+ ctx->roots = ulist_alloc(GFP_NOFS);
+ if (!ctx->roots) {
+ ulist_free(ctx->refs);
+ ctx->refs = NULL;
+ return -ENOMEM;
+ }
+ roots_ulist_allocated = true;
}
+ ctx->skip_inode_ref_list = true;
+
ULIST_ITER_INIT(&uiter);
while (1) {
- ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
- tmp, *roots, NULL, NULL, ignore_offset);
+ struct ulist_node *node;
+
+ ret = find_parent_nodes(ctx, NULL);
if (ret < 0 && ret != -ENOENT) {
- ulist_free(tmp);
- ulist_free(*roots);
- *roots = NULL;
- return ret;
+ if (roots_ulist_allocated) {
+ ulist_free(ctx->roots);
+ ctx->roots = NULL;
+ }
+ break;
}
- node = ulist_next(tmp, &uiter);
+ ret = 0;
+ node = ulist_next(ctx->refs, &uiter);
if (!node)
break;
- bytenr = node->val;
+ ctx->bytenr = node->val;
cond_resched();
}
- ulist_free(tmp);
- return 0;
+ ulist_free(ctx->refs);
+ ctx->refs = NULL;
+ ctx->bytenr = orig_bytenr;
+ ctx->skip_inode_ref_list = orig_skip_inode_ref_list;
+
+ return ret;
}
-int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 time_seq, struct ulist **roots,
+int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
bool skip_commit_root_sem)
{
int ret;
- if (!trans && !skip_commit_root_sem)
- down_read(&fs_info->commit_root_sem);
- ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
- time_seq, roots, false);
- if (!trans && !skip_commit_root_sem)
- up_read(&fs_info->commit_root_sem);
+ if (!ctx->trans && !skip_commit_root_sem)
+ down_read(&ctx->fs_info->commit_root_sem);
+ ret = btrfs_find_all_roots_safe(ctx);
+ if (!ctx->trans && !skip_commit_root_sem)
+ up_read(&ctx->fs_info->commit_root_sem);
return ret;
}
-/**
- * Check if an extent is shared or not
+struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void)
+{
+ struct btrfs_backref_share_check_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ulist_init(&ctx->refs);
+
+ return ctx;
+}
+
+void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx)
+{
+ if (!ctx)
+ return;
+
+ ulist_release(&ctx->refs);
+ kfree(ctx);
+}
+
+/*
+ * Check if a data extent is shared or not.
*
- * @root: root inode belongs to
- * @inum: inode number of the inode whose extent we are checking
- * @bytenr: logical bytenr of the extent we are checking
- * @roots: list of roots this extent is shared among
- * @tmp: temporary list used for iteration
+ * @inode: The inode whose extent we are checking.
+ * @bytenr: Logical bytenr of the extent we are checking.
+ * @extent_gen: Generation of the extent (file extent item) or 0 if it is
+ * not known.
+ * @ctx: A backref sharedness check context.
*
- * btrfs_check_shared uses the backref walking code but will short
+ * btrfs_is_data_extent_shared uses the backref walking code but will short
* circuit as soon as it finds a root or inode that doesn't match the
* one passed in. This provides a significant performance benefit for
* callers (such as fiemap) which want to know whether the extent is
@@ -1521,9 +1843,12 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
*
* Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
*/
-int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
- struct ulist *roots, struct ulist *tmp)
+int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
+ u64 extent_gen,
+ struct btrfs_backref_share_check_ctx *ctx)
{
+ struct btrfs_backref_walk_ctx walk_ctx = { 0 };
+ struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
struct ulist_iterator uiter;
@@ -1531,13 +1856,25 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
int ret = 0;
struct share_check shared = {
- .root_objectid = root->root_key.objectid,
- .inum = inum,
+ .ctx = ctx,
+ .root = root,
+ .inum = btrfs_ino(inode),
+ .data_bytenr = bytenr,
+ .data_extent_gen = extent_gen,
.share_count = 0,
+ .self_ref_count = 0,
+ .have_delayed_delete_refs = false,
};
+ int level;
+ bool leaf_cached;
+ bool leaf_is_shared;
+
+ for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) {
+ if (ctx->prev_extents_cache[i].bytenr == bytenr)
+ return ctx->prev_extents_cache[i].is_shared;
+ }
- ulist_init(roots);
- ulist_init(tmp);
+ ulist_init(&ctx->refs);
trans = btrfs_join_transaction_nostart(root);
if (IS_ERR(trans)) {
@@ -1549,28 +1886,146 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
down_read(&fs_info->commit_root_sem);
} else {
btrfs_get_tree_mod_seq(fs_info, &elem);
+ walk_ctx.time_seq = elem.seq;
}
+ ctx->use_path_cache = true;
+
+ /*
+ * We may have previously determined that the current leaf is shared.
+ * If it is, then we have a data extent that is shared due to a shared
+ * subtree (caused by snapshotting) and we don't need to check for data
+ * backrefs. If the leaf is not shared, then we must do backref walking
+ * to determine if the data extent is shared through reflinks.
+ */
+ leaf_cached = lookup_backref_shared_cache(ctx, root,
+ ctx->curr_leaf_bytenr, 0,
+ &leaf_is_shared);
+ if (leaf_cached && leaf_is_shared) {
+ ret = 1;
+ goto out_trans;
+ }
+
+ walk_ctx.skip_inode_ref_list = true;
+ walk_ctx.trans = trans;
+ walk_ctx.fs_info = fs_info;
+ walk_ctx.refs = &ctx->refs;
+
+ /* -1 means we are in the bytenr of the data extent. */
+ level = -1;
ULIST_ITER_INIT(&uiter);
while (1) {
- ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
- roots, NULL, &shared, false);
- if (ret == BACKREF_FOUND_SHARED) {
- /* this is the only condition under which we return 1 */
- ret = 1;
+ const unsigned long prev_ref_count = ctx->refs.nnodes;
+
+ walk_ctx.bytenr = bytenr;
+ ret = find_parent_nodes(&walk_ctx, &shared);
+ if (ret == BACKREF_FOUND_SHARED ||
+ ret == BACKREF_FOUND_NOT_SHARED) {
+ /* If shared must return 1, otherwise return 0. */
+ ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0;
+ if (level >= 0)
+ store_backref_shared_cache(ctx, root, bytenr,
+ level, ret == 1);
break;
}
if (ret < 0 && ret != -ENOENT)
break;
ret = 0;
- node = ulist_next(tmp, &uiter);
+
+ /*
+ * More than one extent buffer (bytenr) may have been added to
+ * the ctx->refs ulist, in which case we have to check multiple
+ * tree paths in case the first one is not shared, so we can not
+ * use the path cache which is made for a single path. Multiple
+ * extent buffers at the current level happen when:
+ *
+ * 1) level -1, the data extent: If our data extent was not
+ * directly shared (without multiple reference items), then
+ * it might have a single reference item with a count > 1 for
+ * the same offset, which means there are 2 (or more) file
+ * extent items that point to the data extent - this happens
+ * when a file extent item needs to be split and then one
+ * item gets moved to another leaf due to a b+tree leaf split
+ * when inserting some item. In this case the file extent
+ * items may be located in different leaves and therefore
+ * some of the leaves may be referenced through shared
+ * subtrees while others are not. Since our extent buffer
+ * cache only works for a single path (by far the most common
+ * case and simpler to deal with), we can not use it if we
+ * have multiple leaves (which implies multiple paths).
+ *
+ * 2) level >= 0, a tree node/leaf: We can have a mix of direct
+ * and indirect references on a b+tree node/leaf, so we have
+ * to check multiple paths, and the extent buffer (the
+ * current bytenr) may be shared or not. One example is
+ * during relocation as we may get a shared tree block ref
+ * (direct ref) and a non-shared tree block ref (indirect
+ * ref) for the same node/leaf.
+ */
+ if ((ctx->refs.nnodes - prev_ref_count) > 1)
+ ctx->use_path_cache = false;
+
+ if (level >= 0)
+ store_backref_shared_cache(ctx, root, bytenr,
+ level, false);
+ node = ulist_next(&ctx->refs, &uiter);
if (!node)
break;
bytenr = node->val;
+ if (ctx->use_path_cache) {
+ bool is_shared;
+ bool cached;
+
+ level++;
+ cached = lookup_backref_shared_cache(ctx, root, bytenr,
+ level, &is_shared);
+ if (cached) {
+ ret = (is_shared ? 1 : 0);
+ break;
+ }
+ }
shared.share_count = 0;
+ shared.have_delayed_delete_refs = false;
cond_resched();
}
+ /*
+ * If the path cache is disabled, then it means at some tree level we
+ * got multiple parents due to a mix of direct and indirect backrefs or
+ * multiple leaves with file extent items pointing to the same data
+ * extent. We have to invalidate the cache and cache only the sharedness
+ * result for the levels where we got only one node/reference.
+ */
+ if (!ctx->use_path_cache) {
+ int i = 0;
+
+ level--;
+ if (ret >= 0 && level >= 0) {
+ bytenr = ctx->path_cache_entries[level].bytenr;
+ ctx->use_path_cache = true;
+ store_backref_shared_cache(ctx, root, bytenr, level, ret);
+ i = level + 1;
+ }
+
+ for ( ; i < BTRFS_MAX_LEVEL; i++)
+ ctx->path_cache_entries[i].bytenr = 0;
+ }
+
+ /*
+ * Cache the sharedness result for the data extent if we know our inode
+ * has more than 1 file extent item that refers to the data extent.
+ */
+ if (ret >= 0 && shared.self_ref_count > 1) {
+ int slot = ctx->prev_extents_cache_slot;
+
+ ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr;
+ ctx->prev_extents_cache[slot].is_shared = (ret == 1);
+
+ slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE;
+ ctx->prev_extents_cache_slot = slot;
+ }
+
+out_trans:
if (trans) {
btrfs_put_tree_mod_seq(fs_info, &elem);
btrfs_end_transaction(trans);
@@ -1578,8 +2033,9 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
up_read(&fs_info->commit_root_sem);
}
out:
- ulist_release(roots);
- ulist_release(tmp);
+ ulist_release(&ctx->refs);
+ ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr;
+
return ret;
}
@@ -1740,26 +2196,33 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
struct btrfs_path *path, struct btrfs_key *found_key,
u64 *flags_ret)
{
+ struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
int ret;
u64 flags;
u64 size = 0;
- u32 item_size;
const struct extent_buffer *eb;
struct btrfs_extent_item *ei;
struct btrfs_key key;
+ key.objectid = logical;
if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
key.type = BTRFS_METADATA_ITEM_KEY;
else
key.type = BTRFS_EXTENT_ITEM_KEY;
- key.objectid = logical;
key.offset = (u64)-1;
- ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
return ret;
+ if (unlikely(ret == 0)) {
+ /*
+ * Key with offset -1 found, there would have to exist an extent
+ * item with such offset, but this is out of the valid range.
+ */
+ return -EUCLEAN;
+ }
- ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
+ ret = btrfs_previous_extent_item(extent_root, path, 0);
if (ret) {
if (ret > 0)
ret = -ENOENT;
@@ -1779,8 +2242,6 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
}
eb = path->nodes[0];
- item_size = btrfs_item_size_nr(eb, path->slots[0]);
- BUG_ON(item_size < sizeof(*ei));
ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
flags = btrfs_extent_flags(eb, ei);
@@ -1788,7 +2249,7 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
btrfs_debug(fs_info,
"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
logical, logical - found_key->objectid, found_key->objectid,
- found_key->offset, flags, item_size);
+ found_key->offset, flags, btrfs_item_size(eb, path->slots[0]));
WARN_ON(!flags_ret);
if (flags_ret) {
@@ -1850,7 +2311,7 @@ static int get_extent_inline_ref(unsigned long *ptr,
*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
BTRFS_REF_TYPE_ANY);
- if (*out_type == BTRFS_REF_TYPE_INVALID)
+ if (unlikely(*out_type == BTRFS_REF_TYPE_INVALID))
return -EUCLEAN;
*ptr += btrfs_extent_inline_ref_size(*out_type);
@@ -1925,7 +2386,7 @@ static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
"ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
extent_item_objectid, eie->inum,
eie->offset, root);
- ret = iterate(eie->inum, eie->offset, root, ctx);
+ ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx);
if (ret) {
btrfs_debug(fs_info,
"stopping iteration for %llu due to ret=%d",
@@ -1942,113 +2403,181 @@ static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
* the given parameters.
* when the iterator function returns a non-zero value, iteration stops.
*/
-int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
- u64 extent_item_objectid, u64 extent_item_pos,
- int search_commit_root,
- iterate_extent_inodes_t *iterate, void *ctx,
- bool ignore_offset)
+int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
+ bool search_commit_root,
+ iterate_extent_inodes_t *iterate, void *user_ctx)
{
int ret;
- struct btrfs_trans_handle *trans = NULL;
- struct ulist *refs = NULL;
- struct ulist *roots = NULL;
- struct ulist_node *ref_node = NULL;
- struct ulist_node *root_node = NULL;
+ struct ulist *refs;
+ struct ulist_node *ref_node;
struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
struct ulist_iterator ref_uiter;
- struct ulist_iterator root_uiter;
- btrfs_debug(fs_info, "resolving all inodes for extent %llu",
- extent_item_objectid);
+ btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu",
+ ctx->bytenr);
+
+ ASSERT(ctx->trans == NULL);
+ ASSERT(ctx->roots == NULL);
if (!search_commit_root) {
- trans = btrfs_attach_transaction(fs_info->extent_root);
+ struct btrfs_trans_handle *trans;
+
+ trans = btrfs_attach_transaction(ctx->fs_info->tree_root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT &&
PTR_ERR(trans) != -EROFS)
return PTR_ERR(trans);
trans = NULL;
}
+ ctx->trans = trans;
}
- if (trans)
- btrfs_get_tree_mod_seq(fs_info, &seq_elem);
- else
- down_read(&fs_info->commit_root_sem);
+ if (ctx->trans) {
+ btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem);
+ ctx->time_seq = seq_elem.seq;
+ } else {
+ down_read(&ctx->fs_info->commit_root_sem);
+ }
- ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
- seq_elem.seq, &refs,
- &extent_item_pos, ignore_offset);
+ ret = btrfs_find_all_leafs(ctx);
if (ret)
goto out;
+ refs = ctx->refs;
+ ctx->refs = NULL;
ULIST_ITER_INIT(&ref_uiter);
while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
- ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
- seq_elem.seq, &roots,
- ignore_offset);
+ const u64 leaf_bytenr = ref_node->val;
+ struct ulist_node *root_node;
+ struct ulist_iterator root_uiter;
+ struct extent_inode_elem *inode_list;
+
+ inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux;
+
+ if (ctx->cache_lookup) {
+ const u64 *root_ids;
+ int root_count;
+ bool cached;
+
+ cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx,
+ &root_ids, &root_count);
+ if (cached) {
+ for (int i = 0; i < root_count; i++) {
+ ret = iterate_leaf_refs(ctx->fs_info,
+ inode_list,
+ root_ids[i],
+ leaf_bytenr,
+ iterate,
+ user_ctx);
+ if (ret)
+ break;
+ }
+ continue;
+ }
+ }
+
+ if (!ctx->roots) {
+ ctx->roots = ulist_alloc(GFP_NOFS);
+ if (!ctx->roots) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ ctx->bytenr = leaf_bytenr;
+ ret = btrfs_find_all_roots_safe(ctx);
if (ret)
break;
+
+ if (ctx->cache_store)
+ ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx);
+
ULIST_ITER_INIT(&root_uiter);
- while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
- btrfs_debug(fs_info,
+ while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) {
+ btrfs_debug(ctx->fs_info,
"root %llu references leaf %llu, data list %#llx",
root_node->val, ref_node->val,
ref_node->aux);
- ret = iterate_leaf_refs(fs_info,
- (struct extent_inode_elem *)
- (uintptr_t)ref_node->aux,
- root_node->val,
- extent_item_objectid,
- iterate, ctx);
+ ret = iterate_leaf_refs(ctx->fs_info, inode_list,
+ root_node->val, ctx->bytenr,
+ iterate, user_ctx);
}
- ulist_free(roots);
+ ulist_reinit(ctx->roots);
}
free_leaf_list(refs);
out:
- if (trans) {
- btrfs_put_tree_mod_seq(fs_info, &seq_elem);
- btrfs_end_transaction(trans);
+ if (ctx->trans) {
+ btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem);
+ btrfs_end_transaction(ctx->trans);
+ ctx->trans = NULL;
} else {
- up_read(&fs_info->commit_root_sem);
+ up_read(&ctx->fs_info->commit_root_sem);
}
+ ulist_free(ctx->roots);
+ ctx->roots = NULL;
+
+ if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP)
+ ret = 0;
+
return ret;
}
+static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx)
+{
+ struct btrfs_data_container *inodes = ctx;
+ const size_t c = 3 * sizeof(u64);
+
+ if (inodes->bytes_left >= c) {
+ inodes->bytes_left -= c;
+ inodes->val[inodes->elem_cnt] = inum;
+ inodes->val[inodes->elem_cnt + 1] = offset;
+ inodes->val[inodes->elem_cnt + 2] = root;
+ inodes->elem_cnt += 3;
+ } else {
+ inodes->bytes_missing += c - inodes->bytes_left;
+ inodes->bytes_left = 0;
+ inodes->elem_missed += 3;
+ }
+
+ return 0;
+}
+
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
- iterate_extent_inodes_t *iterate, void *ctx,
- bool ignore_offset)
+ void *ctx, bool ignore_offset)
{
+ struct btrfs_backref_walk_ctx walk_ctx = { 0 };
int ret;
- u64 extent_item_pos;
u64 flags = 0;
struct btrfs_key found_key;
- int search_commit_root = path->search_commit_root;
+ struct btrfs_path *path;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
- btrfs_release_path(path);
+ btrfs_free_path(path);
if (ret < 0)
return ret;
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
return -EINVAL;
- extent_item_pos = logical - found_key.objectid;
- ret = iterate_extent_inodes(fs_info, found_key.objectid,
- extent_item_pos, search_commit_root,
- iterate, ctx, ignore_offset);
+ walk_ctx.bytenr = found_key.objectid;
+ if (ignore_offset)
+ walk_ctx.ignore_extent_item_pos = true;
+ else
+ walk_ctx.extent_item_pos = logical - found_key.objectid;
+ walk_ctx.fs_info = fs_info;
- return ret;
+ return iterate_extent_inodes(&walk_ctx, false, build_ino_list, ctx);
}
-typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
- struct extent_buffer *eb, void *ctx);
+static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
+ struct extent_buffer *eb, struct inode_fs_paths *ipath);
-static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
- struct btrfs_path *path,
- iterate_irefs_t *iterate, void *ctx)
+static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
{
int ret = 0;
int slot;
@@ -2057,8 +2586,9 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
u32 name_len;
u64 parent = 0;
int found = 0;
+ struct btrfs_root *fs_root = ipath->fs_root;
+ struct btrfs_path *path = ipath->btrfs_path;
struct extent_buffer *eb;
- struct btrfs_item *item;
struct btrfs_inode_ref *iref;
struct btrfs_key found_key;
@@ -2084,18 +2614,17 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
}
btrfs_release_path(path);
- item = btrfs_item_nr(slot);
iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
- for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
+ for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
name_len = btrfs_inode_ref_name_len(eb, iref);
/* path must be released before calling iterate()! */
btrfs_debug(fs_root->fs_info,
"following ref at offset %u for inode %llu in tree %llu",
cur, found_key.objectid,
- fs_root->root_key.objectid);
- ret = iterate(parent, name_len,
- (unsigned long)(iref + 1), eb, ctx);
+ btrfs_root_id(fs_root));
+ ret = inode_to_path(parent, name_len,
+ (unsigned long)(iref + 1), eb, ipath);
if (ret)
break;
len = sizeof(*iref) + name_len;
@@ -2109,15 +2638,15 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
return ret;
}
-static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
- struct btrfs_path *path,
- iterate_irefs_t *iterate, void *ctx)
+static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
{
int ret;
int slot;
u64 offset = 0;
u64 parent;
int found = 0;
+ struct btrfs_root *fs_root = ipath->fs_root;
+ struct btrfs_path *path = ipath->btrfs_path;
struct extent_buffer *eb;
struct btrfs_inode_extref *extref;
u32 item_size;
@@ -2143,7 +2672,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
}
btrfs_release_path(path);
- item_size = btrfs_item_size_nr(eb, slot);
+ item_size = btrfs_item_size(eb, slot);
ptr = btrfs_item_ptr_offset(eb, slot);
cur_offset = 0;
@@ -2153,8 +2682,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
parent = btrfs_inode_extref_parent(eb, extref);
name_len = btrfs_inode_extref_name_len(eb, extref);
- ret = iterate(parent, name_len,
- (unsigned long)&extref->name, eb, ctx);
+ ret = inode_to_path(parent, name_len,
+ (unsigned long)&extref->name, eb, ipath);
if (ret)
break;
@@ -2171,34 +2700,13 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
return ret;
}
-static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
- struct btrfs_path *path, iterate_irefs_t *iterate,
- void *ctx)
-{
- int ret;
- int found_refs = 0;
-
- ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
- if (!ret)
- ++found_refs;
- else if (ret != -ENOENT)
- return ret;
-
- ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
- if (ret == -ENOENT && found_refs)
- return 0;
-
- return ret;
-}
-
/*
* returns 0 if the path could be dumped (probably truncated)
* returns <0 in case of an error
*/
static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
- struct extent_buffer *eb, void *ctx)
+ struct extent_buffer *eb, struct inode_fs_paths *ipath)
{
- struct inode_fs_paths *ipath = ctx;
char *fspath;
char *fspath_min;
int i = ipath->fspath->elem_cnt;
@@ -2239,8 +2747,20 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
*/
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
{
- return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
- inode_to_path, ipath);
+ int ret;
+ int found_refs = 0;
+
+ ret = iterate_inode_refs(inum, ipath);
+ if (!ret)
+ ++found_refs;
+ else if (ret != -ENOENT)
+ return ret;
+
+ ret = iterate_inode_extrefs(inum, ipath);
+ if (ret == -ENOENT && found_refs)
+ return 0;
+
+ return ret;
}
struct btrfs_data_container *init_data_container(u32 total_bytes)
@@ -2249,20 +2769,14 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
size_t alloc_bytes;
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
- data = kvmalloc(alloc_bytes, GFP_KERNEL);
+ data = kvzalloc(alloc_bytes, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
- if (total_bytes >= sizeof(*data)) {
+ if (total_bytes >= sizeof(*data))
data->bytes_left = total_bytes - sizeof(*data);
- data->bytes_missing = 0;
- } else {
+ else
data->bytes_missing = sizeof(*data) - total_bytes;
- data->bytes_left = 0;
- }
-
- data->elem_cnt = 0;
- data->elem_missed = 0;
return data;
}
@@ -2271,7 +2785,7 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
* allocates space to return multiple file system paths for an inode.
* total_bytes to allocate are passed, note that space usable for actual path
* information will be total_bytes - sizeof(struct inode_fs_paths).
- * the returned pointer must be freed with free_ipath() in the end.
+ * the returned pointer must be freed with __free_inode_fs_paths() in the end.
*/
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
struct btrfs_path *path)
@@ -2296,20 +2810,11 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
return ifp;
}
-void free_ipath(struct inode_fs_paths *ipath)
-{
- if (!ipath)
- return;
- kvfree(ipath->fspath);
- kfree(ipath);
-}
-
-struct btrfs_backref_iter *btrfs_backref_iter_alloc(
- struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
+struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
{
struct btrfs_backref_iter *ret;
- ret = kzalloc(sizeof(*ret), gfp_flag);
+ ret = kzalloc(sizeof(*ret), GFP_NOFS);
if (!ret)
return NULL;
@@ -2320,16 +2825,27 @@ struct btrfs_backref_iter *btrfs_backref_iter_alloc(
}
/* Current backref iterator only supports iteration in commit root */
- ret->path->search_commit_root = 1;
- ret->path->skip_locking = 1;
+ ret->path->search_commit_root = true;
+ ret->path->skip_locking = true;
ret->fs_info = fs_info;
return ret;
}
+static void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
+{
+ iter->bytenr = 0;
+ iter->item_ptr = 0;
+ iter->cur_ptr = 0;
+ iter->end_ptr = 0;
+ btrfs_release_path(iter->path);
+ memset(&iter->cur_key, 0, sizeof(iter->cur_key));
+}
+
int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
{
struct btrfs_fs_info *fs_info = iter->fs_info;
+ struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
struct btrfs_path *path = iter->path;
struct btrfs_extent_item *ei;
struct btrfs_key key;
@@ -2340,15 +2856,19 @@ int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
key.offset = (u64)-1;
iter->bytenr = bytenr;
- ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
return ret;
- if (ret == 0) {
+ if (unlikely(ret == 0)) {
+ /*
+ * Key with offset -1 found, there would have to exist an extent
+ * item with such offset, but this is out of the valid range.
+ */
ret = -EUCLEAN;
goto release;
}
- if (path->slots[0] == 0) {
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ if (unlikely(path->slots[0] == 0)) {
+ DEBUG_WARN();
ret = -EUCLEAN;
goto release;
}
@@ -2364,7 +2884,7 @@ int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
path->slots[0]);
iter->end_ptr = (u32)(iter->item_ptr +
- btrfs_item_size_nr(path->nodes[0], path->slots[0]));
+ btrfs_item_size(path->nodes[0], path->slots[0]));
ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_extent_item);
@@ -2383,7 +2903,7 @@ int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
/* If there is no inline backref, go search for keyed backref */
if (iter->cur_ptr >= iter->end_ptr) {
- ret = btrfs_next_item(fs_info->extent_root, path);
+ ret = btrfs_next_item(extent_root, path);
/* No inline nor keyed ref */
if (ret > 0) {
@@ -2404,7 +2924,7 @@ int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
path->slots[0]);
iter->item_ptr = iter->cur_ptr;
- iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
+ iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
path->nodes[0], path->slots[0]));
}
@@ -2414,6 +2934,14 @@ release:
return ret;
}
+static bool btrfs_backref_iter_is_inline_ref(struct btrfs_backref_iter *iter)
+{
+ if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
+ iter->cur_key.type == BTRFS_METADATA_ITEM_KEY)
+ return true;
+ return false;
+}
+
/*
* Go to the next backref item of current bytenr, can be either inlined or
* keyed.
@@ -2426,7 +2954,8 @@ release:
*/
int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
{
- struct extent_buffer *eb = btrfs_backref_get_eb(iter);
+ struct extent_buffer *eb = iter->path->nodes[0];
+ struct btrfs_root *extent_root;
struct btrfs_path *path = iter->path;
struct btrfs_extent_inline_ref *iref;
int ret;
@@ -2457,7 +2986,8 @@ int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
}
/* We're at keyed items, there is no inline item, go to the next one */
- ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
+ extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
+ ret = btrfs_next_item(extent_root, iter->path);
if (ret)
return ret;
@@ -2469,22 +2999,19 @@ int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
path->slots[0]);
iter->cur_ptr = iter->item_ptr;
- iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
+ iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
path->slots[0]);
return 0;
}
void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
- struct btrfs_backref_cache *cache, int is_reloc)
+ struct btrfs_backref_cache *cache, bool is_reloc)
{
int i;
cache->rb_root = RB_ROOT;
for (i = 0; i < BTRFS_MAX_LEVEL; i++)
INIT_LIST_HEAD(&cache->pending[i]);
- INIT_LIST_HEAD(&cache->changed);
- INIT_LIST_HEAD(&cache->detached);
- INIT_LIST_HEAD(&cache->leaves);
INIT_LIST_HEAD(&cache->pending_edge);
INIT_LIST_HEAD(&cache->useless_node);
cache->fs_info = fs_info;
@@ -2512,6 +3039,19 @@ struct btrfs_backref_node *btrfs_backref_alloc_node(
return node;
}
+void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_node *node)
+{
+ if (node) {
+ ASSERT(list_empty(&node->list));
+ ASSERT(list_empty(&node->lower));
+ ASSERT(node->eb == NULL);
+ cache->nr_nodes--;
+ btrfs_put_root(node->root);
+ kfree(node);
+ }
+}
+
struct btrfs_backref_edge *btrfs_backref_alloc_edge(
struct btrfs_backref_cache *cache)
{
@@ -2523,6 +3063,52 @@ struct btrfs_backref_edge *btrfs_backref_alloc_edge(
return edge;
}
+void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_edge *edge)
+{
+ if (edge) {
+ cache->nr_edges--;
+ kfree(edge);
+ }
+}
+
+void btrfs_backref_unlock_node_buffer(struct btrfs_backref_node *node)
+{
+ if (node->locked) {
+ btrfs_tree_unlock(node->eb);
+ node->locked = 0;
+ }
+}
+
+void btrfs_backref_drop_node_buffer(struct btrfs_backref_node *node)
+{
+ if (node->eb) {
+ btrfs_backref_unlock_node_buffer(node);
+ free_extent_buffer(node->eb);
+ node->eb = NULL;
+ }
+}
+
+/*
+ * Drop the backref node from cache without cleaning up its children
+ * edges.
+ *
+ * This can only be called on node without parent edges.
+ * The children edges are still kept as is.
+ */
+void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
+ struct btrfs_backref_node *node)
+{
+ ASSERT(list_empty(&node->upper));
+
+ btrfs_backref_drop_node_buffer(node);
+ list_del_init(&node->list);
+ list_del_init(&node->lower);
+ if (!RB_EMPTY_NODE(&node->rb_node))
+ rb_erase(&node->rb_node, &tree->rb_root);
+ btrfs_backref_free_node(tree, node);
+}
+
/*
* Drop the backref node from cache, also cleaning up all its
* upper edges and any uncached nodes in the path.
@@ -2533,29 +3119,17 @@ struct btrfs_backref_edge *btrfs_backref_alloc_edge(
void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
struct btrfs_backref_node *node)
{
- struct btrfs_backref_node *upper;
struct btrfs_backref_edge *edge;
if (!node)
return;
- BUG_ON(!node->lowest && !node->detached);
while (!list_empty(&node->upper)) {
- edge = list_entry(node->upper.next, struct btrfs_backref_edge,
- list[LOWER]);
- upper = edge->node[UPPER];
+ edge = list_first_entry(&node->upper, struct btrfs_backref_edge,
+ list[LOWER]);
list_del(&edge->list[LOWER]);
list_del(&edge->list[UPPER]);
btrfs_backref_free_edge(cache, edge);
-
- /*
- * Add the node to leaf node list if no other child block
- * cached.
- */
- if (list_empty(&upper->lower)) {
- list_add_tail(&upper->lower, &cache->leaves);
- upper->lowest = 1;
- }
}
btrfs_backref_drop_node(cache, node);
@@ -2567,33 +3141,26 @@ void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
{
struct btrfs_backref_node *node;
- int i;
- while (!list_empty(&cache->detached)) {
- node = list_entry(cache->detached.next,
- struct btrfs_backref_node, list);
+ while ((node = rb_entry_safe(rb_first(&cache->rb_root),
+ struct btrfs_backref_node, rb_node)))
btrfs_backref_cleanup_node(cache, node);
- }
-
- while (!list_empty(&cache->leaves)) {
- node = list_entry(cache->leaves.next,
- struct btrfs_backref_node, lower);
- btrfs_backref_cleanup_node(cache, node);
- }
-
- cache->last_trans = 0;
- for (i = 0; i < BTRFS_MAX_LEVEL; i++)
- ASSERT(list_empty(&cache->pending[i]));
ASSERT(list_empty(&cache->pending_edge));
ASSERT(list_empty(&cache->useless_node));
- ASSERT(list_empty(&cache->changed));
- ASSERT(list_empty(&cache->detached));
- ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
ASSERT(!cache->nr_nodes);
ASSERT(!cache->nr_edges);
}
+static void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
+ struct btrfs_backref_node *lower,
+ struct btrfs_backref_node *upper)
+{
+ ASSERT(upper && lower && upper->level == lower->level + 1);
+ edge->node[LOWER] = lower;
+ edge->node[UPPER] = upper;
+ list_add_tail(&edge->list[LOWER], &lower->upper);
+}
/*
* Handle direct tree backref
*
@@ -2662,7 +3229,7 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
ASSERT(upper->checked);
INIT_LIST_HEAD(&edge->list[UPPER]);
}
- btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
+ btrfs_backref_link_edge(edge, cur, upper);
return 0;
}
@@ -2673,12 +3240,14 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
* We still need to do a tree search to find out the parents. This is for
* TREE_BLOCK_REF backref (keyed or inlined).
*
+ * @trans: Transaction handle.
* @ref_key: The same as @ref_key in handle_direct_tree_backref()
* @tree_key: The first key of this tree block.
* @path: A clean (released) path, to avoid allocating path every time
* the function get called.
*/
-static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
+static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_cache *cache,
struct btrfs_path *path,
struct btrfs_key *ref_key,
struct btrfs_key *tree_key,
@@ -2698,8 +3267,12 @@ static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
if (IS_ERR(root))
return PTR_ERR(root);
- if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
- cur->cowonly = 1;
+
+ /* We shouldn't be using backref cache for non-shareable roots. */
+ if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
+ btrfs_put_root(root);
+ return -EUCLEAN;
+ }
if (btrfs_root_level(&root->root_item) == cur->level) {
/* Tree root */
@@ -2726,8 +3299,8 @@ static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
level = cur->level + 1;
/* Search the tree to find parent blocks referring to the block */
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
path->lowest_level = level;
ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
path->lowest_level = 0;
@@ -2741,9 +3314,9 @@ static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
eb = path->nodes[level];
if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
btrfs_err(fs_info,
-"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
- cur->bytenr, level - 1, root->root_key.objectid,
- tree_key->objectid, tree_key->type, tree_key->offset);
+"couldn't find block (%llu) (level %d) in tree (%llu) with key " BTRFS_KEY_FMT,
+ cur->bytenr, level - 1, btrfs_root_id(root),
+ BTRFS_KEY_FMT_VALUE(tree_key));
btrfs_put_root(root);
ret = -ENOENT;
goto out;
@@ -2785,14 +3358,21 @@ static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
goto out;
}
upper->owner = btrfs_header_owner(eb);
- if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
- upper->cowonly = 1;
+
+ /* We shouldn't be using backref cache for non shareable roots. */
+ if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
+ btrfs_put_root(root);
+ btrfs_backref_free_edge(cache, edge);
+ btrfs_backref_free_node(cache, upper);
+ ret = -EUCLEAN;
+ goto out;
+ }
/*
* If we know the block isn't shared we can avoid
* checking its backrefs.
*/
- if (btrfs_block_can_be_shared(root, eb))
+ if (btrfs_block_can_be_shared(trans, root, eb))
upper->checked = 0;
else
upper->checked = 1;
@@ -2819,7 +3399,7 @@ static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
if (!upper->owner)
upper->owner = btrfs_header_owner(eb);
}
- btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
+ btrfs_backref_link_edge(edge, lower, upper);
if (rb_node) {
btrfs_put_root(root);
@@ -2840,17 +3420,18 @@ out:
* links aren't yet bi-directional. Needs to finish such links.
* Use btrfs_backref_finish_upper_links() to finish such linkage.
*
+ * @trans: Transaction handle.
* @path: Released path for indirect tree backref lookup
* @iter: Released backref iter for extent tree search
* @node_key: The first key of the tree block
*/
-int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_cache *cache,
struct btrfs_path *path,
struct btrfs_backref_iter *iter,
struct btrfs_key *node_key,
struct btrfs_backref_node *cur)
{
- struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_backref_edge *edge;
struct btrfs_backref_node *exist;
int ret;
@@ -2867,7 +3448,7 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
if (ret < 0)
goto out;
/* No extra backref? This means the tree block is corrupted */
- if (ret > 0) {
+ if (unlikely(ret > 0)) {
ret = -EUCLEAN;
goto out;
}
@@ -2879,8 +3460,8 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
* type BTRFS_TREE_BLOCK_REF_KEY
*/
ASSERT(list_is_singular(&cur->upper));
- edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
- list[LOWER]);
+ edge = list_first_entry(&cur->upper, struct btrfs_backref_edge,
+ list[LOWER]);
ASSERT(list_empty(&edge->list[UPPER]));
exist = edge->node[UPPER];
/*
@@ -2899,7 +3480,7 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
int type;
cond_resched();
- eb = btrfs_backref_get_eb(iter);
+ eb = iter->path->nodes[0];
key.objectid = iter->bytenr;
if (btrfs_backref_iter_is_inline_ref(iter)) {
@@ -2910,7 +3491,7 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
((unsigned long)iter->cur_ptr);
type = btrfs_get_extent_inline_ref_type(eb, iref,
BTRFS_REF_TYPE_BLOCK);
- if (type == BTRFS_REF_TYPE_INVALID) {
+ if (unlikely(type == BTRFS_REF_TYPE_INVALID)) {
ret = -EUCLEAN;
goto out;
}
@@ -2939,25 +3520,21 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
ret = handle_direct_tree_backref(cache, &key, cur);
if (ret < 0)
goto out;
- continue;
- } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
- ret = -EINVAL;
- btrfs_print_v0_err(fs_info);
- btrfs_handle_fs_error(fs_info, ret, NULL);
- goto out;
- } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
- continue;
+ } else if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
+ /*
+ * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref
+ * offset means the root objectid. We need to search
+ * the tree to get its parent bytenr.
+ */
+ ret = handle_indirect_tree_backref(trans, cache, path,
+ &key, node_key, cur);
+ if (ret < 0)
+ goto out;
}
-
/*
- * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
- * means the root objectid. We need to search the tree to get
- * its parent bytenr.
+ * Unrecognized tree backref items (if it can pass tree-checker)
+ * would be ignored.
*/
- ret = handle_indirect_tree_backref(cache, path, &key, node_key,
- cur);
- if (ret < 0)
- goto out;
}
ret = 0;
cur->checked = 1;
@@ -2980,15 +3557,9 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
ASSERT(start->checked);
- /* Insert this node to cache if it's not COW-only */
- if (!start->cowonly) {
- rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
- &start->rb_node);
- if (rb_node)
- btrfs_backref_panic(cache->fs_info, start->bytenr,
- -EEXIST);
- list_add_tail(&start->lower, &cache->leaves);
- }
+ rb_node = rb_simple_insert(&cache->rb_root, &start->simple_node);
+ if (rb_node)
+ btrfs_backref_panic(cache->fs_info, start->bytenr, -EEXIST);
/*
* Use breadth first search to iterate all related edges.
@@ -3027,38 +3598,22 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
* parents have already been linked.
*/
if (!RB_EMPTY_NODE(&upper->rb_node)) {
- if (upper->lowest) {
- list_del_init(&upper->lower);
- upper->lowest = 0;
- }
-
list_add_tail(&edge->list[UPPER], &upper->lower);
continue;
}
/* Sanity check, we shouldn't have any unchecked nodes */
- if (!upper->checked) {
- ASSERT(0);
+ if (unlikely(!upper->checked)) {
+ DEBUG_WARN("we should not have any unchecked nodes");
return -EUCLEAN;
}
- /* Sanity check, COW-only node has non-COW-only parent */
- if (start->cowonly != upper->cowonly) {
- ASSERT(0);
+ rb_node = rb_simple_insert(&cache->rb_root, &upper->simple_node);
+ if (unlikely(rb_node)) {
+ btrfs_backref_panic(cache->fs_info, upper->bytenr, -EEXIST);
return -EUCLEAN;
}
- /* Only cache non-COW-only (subvolume trees) tree blocks */
- if (!upper->cowonly) {
- rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
- &upper->rb_node);
- if (rb_node) {
- btrfs_backref_panic(cache->fs_info,
- upper->bytenr, -EEXIST);
- return -EUCLEAN;
- }
- }
-
list_add_tail(&edge->list[UPPER], &upper->lower);
/*
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index ba454032dbe2..1d009b0f4c69 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -6,10 +6,146 @@
#ifndef BTRFS_BACKREF_H
#define BTRFS_BACKREF_H
-#include <linux/btrfs.h>
-#include "ulist.h"
+#include <linux/types.h>
+#include <linux/rbtree.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <uapi/linux/btrfs.h>
+#include <uapi/linux/btrfs_tree.h>
+#include "messages.h"
+#include "locking.h"
#include "disk-io.h"
#include "extent_io.h"
+#include "ctree.h"
+
+struct extent_inode_elem;
+struct ulist;
+struct btrfs_extent_item;
+struct btrfs_trans_handle;
+struct btrfs_fs_info;
+
+/*
+ * Used by implementations of iterate_extent_inodes_t (see definition below) to
+ * signal that backref iteration can stop immediately and no error happened.
+ * The value must be non-negative and must not be 0, 1 (which is a common return
+ * value from things like btrfs_search_slot() and used internally in the backref
+ * walking code) and different from BACKREF_FOUND_SHARED and
+ * BACKREF_FOUND_NOT_SHARED
+ */
+#define BTRFS_ITERATE_EXTENT_INODES_STOP 5
+
+/*
+ * Should return 0 if no errors happened and iteration of backrefs should
+ * continue. Can return BTRFS_ITERATE_EXTENT_INODES_STOP or any other non-zero
+ * value to immediately stop iteration and possibly signal an error back to
+ * the caller.
+ */
+typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 num_bytes,
+ u64 root, void *ctx);
+
+/*
+ * Context and arguments for backref walking functions. Some of the fields are
+ * to be filled by the caller of such functions while other are filled by the
+ * functions themselves, as described below.
+ */
+struct btrfs_backref_walk_ctx {
+ /*
+ * The address of the extent for which we are doing backref walking.
+ * Can be either a data extent or a metadata extent.
+ *
+ * Must always be set by the top level caller.
+ */
+ u64 bytenr;
+ /*
+ * Offset relative to the target extent. This is only used for data
+ * extents, and it's meaningful because we can have file extent items
+ * that point only to a section of a data extent ("bookend" extents),
+ * and we want to filter out any that don't point to a section of the
+ * data extent containing the given offset.
+ *
+ * Must always be set by the top level caller.
+ */
+ u64 extent_item_pos;
+ /*
+ * If true and bytenr corresponds to a data extent, then references from
+ * all file extent items that point to the data extent are considered,
+ * @extent_item_pos is ignored.
+ */
+ bool ignore_extent_item_pos;
+ /*
+ * If true and bytenr corresponds to a data extent, then the inode list
+ * (each member describing inode number, file offset and root) is not
+ * added to each reference added to the @refs ulist.
+ */
+ bool skip_inode_ref_list;
+ /* A valid transaction handle or NULL. */
+ struct btrfs_trans_handle *trans;
+ /*
+ * The file system's info object, can not be NULL.
+ *
+ * Must always be set by the top level caller.
+ */
+ struct btrfs_fs_info *fs_info;
+ /*
+ * Time sequence acquired from btrfs_get_tree_mod_seq(), in case the
+ * caller joined the tree mod log to get a consistent view of b+trees
+ * while we do backref walking, or BTRFS_SEQ_LAST.
+ * When using BTRFS_SEQ_LAST, delayed refs are not checked and it uses
+ * commit roots when searching b+trees - this is a special case for
+ * qgroups used during a transaction commit.
+ */
+ u64 time_seq;
+ /*
+ * Used to collect the bytenr of metadata extents that point to the
+ * target extent.
+ */
+ struct ulist *refs;
+ /*
+ * List used to collect the IDs of the roots from which the target
+ * extent is accessible. Can be NULL in case the caller does not care
+ * about collecting root IDs.
+ */
+ struct ulist *roots;
+ /*
+ * Used by iterate_extent_inodes() and the main backref walk code
+ * (find_parent_nodes()). Lookup and store functions for an optional
+ * cache which maps the logical address (bytenr) of leaves to an array
+ * of root IDs.
+ */
+ bool (*cache_lookup)(u64 leaf_bytenr, void *user_ctx,
+ const u64 **root_ids_ret, int *root_count_ret);
+ void (*cache_store)(u64 leaf_bytenr, const struct ulist *root_ids,
+ void *user_ctx);
+ /*
+ * If this is not NULL, then the backref walking code will call this
+ * for each indirect data extent reference as soon as it finds one,
+ * before collecting all the remaining backrefs and before resolving
+ * indirect backrefs. This allows for the caller to terminate backref
+ * walking as soon as it finds one backref that matches some specific
+ * criteria. The @cache_lookup and @cache_store callbacks should not
+ * be NULL in order to use this callback.
+ */
+ iterate_extent_inodes_t *indirect_ref_iterator;
+ /*
+ * If this is not NULL, then the backref walking code will call this for
+ * each extent item it's meant to process before it actually starts
+ * processing it. If this returns anything other than 0, then it stops
+ * the backref walking code immediately.
+ */
+ int (*check_extent_item)(u64 bytenr, const struct btrfs_extent_item *ei,
+ const struct extent_buffer *leaf, void *user_ctx);
+ /*
+ * If this is not NULL, then the backref walking code will call this for
+ * each extent data ref it finds (BTRFS_EXTENT_DATA_REF_KEY keys) before
+ * processing that data ref. If this callback return false, then it will
+ * ignore this data ref and it will never resolve the indirect data ref,
+ * saving time searching for leaves in a fs tree with file extent items
+ * matching the data ref.
+ */
+ bool (*skip_data_ref)(u64 root, u64 ino, u64 offset, void *user_ctx);
+ /* Context object to pass to the callbacks defined above. */
+ void *user_ctx;
+};
struct inode_fs_paths {
struct btrfs_path *btrfs_path;
@@ -17,8 +153,65 @@ struct inode_fs_paths {
struct btrfs_data_container *fspath;
};
-typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
- void *ctx);
+struct btrfs_backref_shared_cache_entry {
+ u64 bytenr;
+ u64 gen;
+ bool is_shared;
+};
+
+#define BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE 8
+
+struct btrfs_backref_share_check_ctx {
+ /* Ulists used during backref walking. */
+ struct ulist refs;
+ /*
+ * The current leaf the caller of btrfs_is_data_extent_shared() is at.
+ * Typically the caller (at the moment only fiemap) tries to determine
+ * the sharedness of data extents point by file extent items from entire
+ * leaves.
+ */
+ u64 curr_leaf_bytenr;
+ /*
+ * The previous leaf the caller was at in the previous call to
+ * btrfs_is_data_extent_shared(). This may be the same as the current
+ * leaf. On the first call it must be 0.
+ */
+ u64 prev_leaf_bytenr;
+ /*
+ * A path from a root to a leaf that has a file extent item pointing to
+ * a given data extent should never exceed the maximum b+tree height.
+ */
+ struct btrfs_backref_shared_cache_entry path_cache_entries[BTRFS_MAX_LEVEL];
+ bool use_path_cache;
+ /*
+ * Cache the sharedness result for the last few extents we have found,
+ * but only for extents for which we have multiple file extent items
+ * that point to them.
+ * It's very common to have several file extent items that point to the
+ * same extent (bytenr) but with different offsets and lengths. This
+ * typically happens for COW writes, partial writes into prealloc
+ * extents, NOCOW writes after snapshotting a root, hole punching or
+ * reflinking within the same file (less common perhaps).
+ * So keep a small cache with the lookup results for the extent pointed
+ * by the last few file extent items. This cache is checked, with a
+ * linear scan, whenever btrfs_is_data_extent_shared() is called, so
+ * it must be small so that it does not negatively affect performance in
+ * case we don't have multiple file extent items that point to the same
+ * data extent.
+ */
+ struct {
+ u64 bytenr;
+ bool is_shared;
+ } prev_extents_cache[BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE];
+ /*
+ * The slot in the prev_extents_cache array that will be used for
+ * storing the sharedness result of a new data extent.
+ */
+ int prev_extents_cache_slot;
+};
+
+struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void);
+void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx);
int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
struct btrfs_path *path, struct btrfs_key *found_key,
@@ -28,26 +221,17 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
struct btrfs_key *key, struct btrfs_extent_item *ei,
u32 item_size, u64 *out_root, u8 *out_level);
-int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
- u64 extent_item_objectid,
- u64 extent_offset, int search_commit_root,
- iterate_extent_inodes_t *iterate, void *ctx,
- bool ignore_offset);
+int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
+ bool search_commit_root,
+ iterate_extent_inodes_t *iterate, void *user_ctx);
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
- iterate_extent_inodes_t *iterate, void *ctx,
- bool ignore_offset);
+ void *ctx, bool ignore_offset);
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
-int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 time_seq, struct ulist **leafs,
- const u64 *extent_item_pos, bool ignore_offset);
-int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 time_seq, struct ulist **roots,
+int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx);
+int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
bool skip_commit_root_sem);
char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
u32 name_len, unsigned long name_off,
@@ -57,14 +241,20 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
struct btrfs_data_container *init_data_container(u32 total_bytes);
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
struct btrfs_path *path);
-void free_ipath(struct inode_fs_paths *ipath);
+
+DEFINE_FREE(inode_fs_paths, struct inode_fs_paths *,
+ if (_T) {
+ kvfree(_T->fspath);
+ kfree(_T);
+ })
int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
u64 start_off, struct btrfs_path *path,
struct btrfs_inode_extref **ret_extref,
u64 *found_off);
-int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
- struct ulist *roots, struct ulist *tmp_ulist);
+int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
+ u64 extent_gen,
+ struct btrfs_backref_share_check_ctx *ctx);
int __init btrfs_prelim_ref_init(void);
void __cold btrfs_prelim_ref_exit(void);
@@ -73,7 +263,7 @@ struct prelim_ref {
struct rb_node rbnode;
u64 root_id;
struct btrfs_key key_for_search;
- int level;
+ u8 level;
int count;
struct extent_inode_elem *inode_list;
u64 parent;
@@ -95,24 +285,7 @@ struct btrfs_backref_iter {
u32 end_ptr;
};
-struct btrfs_backref_iter *btrfs_backref_iter_alloc(
- struct btrfs_fs_info *fs_info, gfp_t gfp_flag);
-
-static inline void btrfs_backref_iter_free(struct btrfs_backref_iter *iter)
-{
- if (!iter)
- return;
- btrfs_free_path(iter->path);
- kfree(iter);
-}
-
-static inline struct extent_buffer *btrfs_backref_get_eb(
- struct btrfs_backref_iter *iter)
-{
- if (!iter)
- return NULL;
- return iter->path->nodes[0];
-}
+struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info);
/*
* For metadata with EXTENT_ITEM key (non-skinny) case, the first inline data
@@ -133,25 +306,6 @@ int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr);
int btrfs_backref_iter_next(struct btrfs_backref_iter *iter);
-static inline bool btrfs_backref_iter_is_inline_ref(
- struct btrfs_backref_iter *iter)
-{
- if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
- iter->cur_key.type == BTRFS_METADATA_ITEM_KEY)
- return true;
- return false;
-}
-
-static inline void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
-{
- iter->bytenr = 0;
- iter->item_ptr = 0;
- iter->cur_ptr = 0;
- iter->end_ptr = 0;
- btrfs_release_path(iter->path);
- memset(&iter->cur_key, 0, sizeof(iter->cur_key));
-}
-
/*
* Backref cache related structures
*
@@ -163,11 +317,22 @@ static inline void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
* Represent a tree block in the backref cache
*/
struct btrfs_backref_node {
- struct {
- struct rb_node rb_node;
- u64 bytenr;
- }; /* Use rb_simple_node for search/insert */
+ union{
+ /* Use rb_simple_node for search/insert */
+ struct {
+ struct rb_node rb_node;
+ u64 bytenr;
+ };
+ struct rb_simple_node simple_node;
+ };
+
+ /*
+ * This is a sanity check, whenever we COW a block we will update
+ * new_bytenr with it's current location, and we will check this in
+ * various places to validate that the cache makes sense, it shouldn't
+ * be used for anything else.
+ */
u64 new_bytenr;
/* Objectid of tree block owner, can be not uptodate */
u64 owner;
@@ -185,10 +350,6 @@ struct btrfs_backref_node {
struct extent_buffer *eb;
/* Level of the tree block */
unsigned int level:8;
- /* Is the block in a non-shareable tree */
- unsigned int cowonly:1;
- /* 1 if no child node is in the cache */
- unsigned int lowest:1;
/* Is the extent buffer locked */
unsigned int locked:1;
/* Has the block been processed */
@@ -241,12 +402,6 @@ struct btrfs_backref_cache {
* level blocks may not reflect the new location
*/
struct list_head pending[BTRFS_MAX_LEVEL];
- /* List of backref nodes with no child node */
- struct list_head leaves;
- /* List of blocks that have been COWed in current transaction */
- struct list_head changed;
- /* List of detached backref node. */
- struct list_head detached;
u64 last_trans;
@@ -264,110 +419,43 @@ struct btrfs_backref_cache {
/*
* Whether this cache is for relocation
*
- * Reloction backref cache require more info for reloc root compared
+ * Relocation backref cache require more info for reloc root compared
* to generic backref cache.
*/
- unsigned int is_reloc;
+ bool is_reloc;
};
void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
- struct btrfs_backref_cache *cache, int is_reloc);
+ struct btrfs_backref_cache *cache, bool is_reloc);
struct btrfs_backref_node *btrfs_backref_alloc_node(
struct btrfs_backref_cache *cache, u64 bytenr, int level);
struct btrfs_backref_edge *btrfs_backref_alloc_edge(
struct btrfs_backref_cache *cache);
-#define LINK_LOWER (1 << 0)
-#define LINK_UPPER (1 << 1)
-static inline void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
- struct btrfs_backref_node *lower,
- struct btrfs_backref_node *upper,
- int link_which)
-{
- ASSERT(upper && lower && upper->level == lower->level + 1);
- edge->node[LOWER] = lower;
- edge->node[UPPER] = upper;
- if (link_which & LINK_LOWER)
- list_add_tail(&edge->list[LOWER], &lower->upper);
- if (link_which & LINK_UPPER)
- list_add_tail(&edge->list[UPPER], &upper->lower);
-}
-
-static inline void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
- struct btrfs_backref_node *node)
-{
- if (node) {
- ASSERT(list_empty(&node->list));
- ASSERT(list_empty(&node->lower));
- ASSERT(node->eb == NULL);
- cache->nr_nodes--;
- btrfs_put_root(node->root);
- kfree(node);
- }
-}
-
-static inline void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
- struct btrfs_backref_edge *edge)
-{
- if (edge) {
- cache->nr_edges--;
- kfree(edge);
- }
-}
-
-static inline void btrfs_backref_unlock_node_buffer(
- struct btrfs_backref_node *node)
-{
- if (node->locked) {
- btrfs_tree_unlock(node->eb);
- node->locked = 0;
- }
-}
-
-static inline void btrfs_backref_drop_node_buffer(
- struct btrfs_backref_node *node)
-{
- if (node->eb) {
- btrfs_backref_unlock_node_buffer(node);
- free_extent_buffer(node->eb);
- node->eb = NULL;
- }
-}
-
-/*
- * Drop the backref node from cache without cleaning up its children
- * edges.
- *
- * This can only be called on node without parent edges.
- * The children edges are still kept as is.
- */
-static inline void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
- struct btrfs_backref_node *node)
-{
- ASSERT(list_empty(&node->upper));
-
- btrfs_backref_drop_node_buffer(node);
- list_del_init(&node->list);
- list_del_init(&node->lower);
- if (!RB_EMPTY_NODE(&node->rb_node))
- rb_erase(&node->rb_node, &tree->rb_root);
- btrfs_backref_free_node(tree, node);
-}
+void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_node *node);
+void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_edge *edge);
+void btrfs_backref_unlock_node_buffer(struct btrfs_backref_node *node);
+void btrfs_backref_drop_node_buffer(struct btrfs_backref_node *node);
void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
struct btrfs_backref_node *node);
+void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
+ struct btrfs_backref_node *node);
void btrfs_backref_release_cache(struct btrfs_backref_cache *cache);
static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info,
- u64 bytenr, int errno)
+ u64 bytenr, int error)
{
- btrfs_panic(fs_info, errno,
+ btrfs_panic(fs_info, error,
"Inconsistency in backref cache found at offset %llu",
bytenr);
}
-int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_cache *cache,
struct btrfs_path *path,
struct btrfs_backref_iter *iter,
struct btrfs_key *node_key,
diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
new file mode 100644
index 000000000000..fa1d321a2fb8
--- /dev/null
+++ b/fs/btrfs/bio.c
@@ -0,0 +1,1059 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2007 Oracle. All rights reserved.
+ * Copyright (C) 2022 Christoph Hellwig.
+ */
+
+#include <linux/bio.h>
+#include "bio.h"
+#include "ctree.h"
+#include "volumes.h"
+#include "raid56.h"
+#include "async-thread.h"
+#include "dev-replace.h"
+#include "zoned.h"
+#include "file-item.h"
+#include "raid-stripe-tree.h"
+
+static struct bio_set btrfs_bioset;
+static struct bio_set btrfs_clone_bioset;
+static struct bio_set btrfs_repair_bioset;
+static mempool_t btrfs_failed_bio_pool;
+
+struct btrfs_failed_bio {
+ struct btrfs_bio *bbio;
+ int num_copies;
+ atomic_t repair_count;
+};
+
+/* Is this a data path I/O that needs storage layer checksum and repair? */
+static inline bool is_data_bbio(const struct btrfs_bio *bbio)
+{
+ return bbio->inode && is_data_inode(bbio->inode);
+}
+
+static bool bbio_has_ordered_extent(const struct btrfs_bio *bbio)
+{
+ return is_data_bbio(bbio) && btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE;
+}
+
+/*
+ * Initialize a btrfs_bio structure. This skips the embedded bio itself as it
+ * is already initialized by the block layer.
+ */
+void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, u64 file_offset,
+ btrfs_bio_end_io_t end_io, void *private)
+{
+ /* @inode parameter is mandatory. */
+ ASSERT(inode);
+
+ memset(bbio, 0, offsetof(struct btrfs_bio, bio));
+ bbio->inode = inode;
+ bbio->end_io = end_io;
+ bbio->private = private;
+ bbio->file_offset = file_offset;
+ atomic_set(&bbio->pending_ios, 1);
+ WRITE_ONCE(bbio->status, BLK_STS_OK);
+}
+
+/*
+ * Allocate a btrfs_bio structure. The btrfs_bio is the main I/O container for
+ * btrfs, and is used for all I/O submitted through btrfs_submit_bbio().
+ *
+ * Just like the underlying bio_alloc_bioset it will not fail as it is backed by
+ * a mempool.
+ */
+struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
+ struct btrfs_inode *inode, u64 file_offset,
+ btrfs_bio_end_io_t end_io, void *private)
+{
+ struct btrfs_bio *bbio;
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset);
+ bbio = btrfs_bio(bio);
+ btrfs_bio_init(bbio, inode, file_offset, end_io, private);
+ return bbio;
+}
+
+static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
+ struct btrfs_bio *orig_bbio,
+ u64 map_length)
+{
+ struct btrfs_bio *bbio;
+ struct bio *bio;
+
+ bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, GFP_NOFS,
+ &btrfs_clone_bioset);
+ if (IS_ERR(bio))
+ return ERR_CAST(bio);
+
+ bbio = btrfs_bio(bio);
+ btrfs_bio_init(bbio, orig_bbio->inode, orig_bbio->file_offset, NULL, orig_bbio);
+ orig_bbio->file_offset += map_length;
+ if (bbio_has_ordered_extent(bbio)) {
+ refcount_inc(&orig_bbio->ordered->refs);
+ bbio->ordered = orig_bbio->ordered;
+ bbio->orig_logical = orig_bbio->orig_logical;
+ orig_bbio->orig_logical += map_length;
+ }
+ bbio->csum_search_commit_root = orig_bbio->csum_search_commit_root;
+ atomic_inc(&orig_bbio->pending_ios);
+ return bbio;
+}
+
+void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
+{
+ /* Make sure we're already in task context. */
+ ASSERT(in_task());
+
+ if (bbio->async_csum)
+ wait_for_completion(&bbio->csum_done);
+
+ bbio->bio.bi_status = status;
+ if (bbio->bio.bi_pool == &btrfs_clone_bioset) {
+ struct btrfs_bio *orig_bbio = bbio->private;
+
+ /* Free bio that was never submitted to the underlying device. */
+ if (bbio_has_ordered_extent(bbio))
+ btrfs_put_ordered_extent(bbio->ordered);
+ bio_put(&bbio->bio);
+
+ bbio = orig_bbio;
+ }
+
+ /*
+ * At this point, bbio always points to the original btrfs_bio. Save
+ * the first error in it.
+ */
+ if (status != BLK_STS_OK)
+ cmpxchg(&bbio->status, BLK_STS_OK, status);
+
+ if (atomic_dec_and_test(&bbio->pending_ios)) {
+ /* Load split bio's error which might be set above. */
+ if (status == BLK_STS_OK)
+ bbio->bio.bi_status = READ_ONCE(bbio->status);
+
+ if (bbio_has_ordered_extent(bbio)) {
+ struct btrfs_ordered_extent *ordered = bbio->ordered;
+
+ bbio->end_io(bbio);
+ btrfs_put_ordered_extent(ordered);
+ } else {
+ bbio->end_io(bbio);
+ }
+ }
+}
+
+static int next_repair_mirror(const struct btrfs_failed_bio *fbio, int cur_mirror)
+{
+ if (cur_mirror == fbio->num_copies)
+ return cur_mirror + 1 - fbio->num_copies;
+ return cur_mirror + 1;
+}
+
+static int prev_repair_mirror(const struct btrfs_failed_bio *fbio, int cur_mirror)
+{
+ if (cur_mirror == 1)
+ return fbio->num_copies;
+ return cur_mirror - 1;
+}
+
+static void btrfs_repair_done(struct btrfs_failed_bio *fbio)
+{
+ if (atomic_dec_and_test(&fbio->repair_count)) {
+ btrfs_bio_end_io(fbio->bbio, fbio->bbio->bio.bi_status);
+ mempool_free(fbio, &btrfs_failed_bio_pool);
+ }
+}
+
+static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio,
+ struct btrfs_device *dev)
+{
+ struct btrfs_failed_bio *fbio = repair_bbio->private;
+ struct btrfs_inode *inode = repair_bbio->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ /*
+ * We can not move forward the saved_iter, as it will be later
+ * utilized by repair_bbio again.
+ */
+ struct bvec_iter saved_iter = repair_bbio->saved_iter;
+ const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
+ const u64 logical = repair_bbio->saved_iter.bi_sector << SECTOR_SHIFT;
+ const u32 nr_steps = repair_bbio->saved_iter.bi_size / step;
+ int mirror = repair_bbio->mirror_num;
+ phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
+ phys_addr_t paddr;
+ unsigned int slot = 0;
+
+ /* Repair bbio should be eaxctly one block sized. */
+ ASSERT(repair_bbio->saved_iter.bi_size == fs_info->sectorsize);
+
+ btrfs_bio_for_each_block(paddr, &repair_bbio->bio, &saved_iter, step) {
+ ASSERT(slot < nr_steps);
+ paddrs[slot] = paddr;
+ slot++;
+ }
+
+ if (repair_bbio->bio.bi_status ||
+ !btrfs_data_csum_ok(repair_bbio, dev, 0, paddrs)) {
+ bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ);
+ repair_bbio->bio.bi_iter = repair_bbio->saved_iter;
+
+ mirror = next_repair_mirror(fbio, mirror);
+ if (mirror == fbio->bbio->mirror_num) {
+ btrfs_debug(fs_info, "no mirror left");
+ fbio->bbio->bio.bi_status = BLK_STS_IOERR;
+ goto done;
+ }
+
+ btrfs_submit_bbio(repair_bbio, mirror);
+ return;
+ }
+
+ do {
+ mirror = prev_repair_mirror(fbio, mirror);
+ btrfs_repair_io_failure(fs_info, btrfs_ino(inode),
+ repair_bbio->file_offset, fs_info->sectorsize,
+ logical, paddrs, step, mirror);
+ } while (mirror != fbio->bbio->mirror_num);
+
+done:
+ btrfs_repair_done(fbio);
+ bio_put(&repair_bbio->bio);
+}
+
+/*
+ * Try to kick off a repair read to the next available mirror for a bad sector.
+ *
+ * This primarily tries to recover good data to serve the actual read request,
+ * but also tries to write the good data back to the bad mirror(s) when a
+ * read succeeded to restore the redundancy.
+ */
+static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio,
+ u32 bio_offset,
+ phys_addr_t paddrs[],
+ struct btrfs_failed_bio *fbio)
+{
+ struct btrfs_inode *inode = failed_bbio->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ const u32 sectorsize = fs_info->sectorsize;
+ const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
+ const u32 nr_steps = sectorsize / step;
+ /*
+ * For bs > ps cases, the saved_iter can be partially moved forward.
+ * In that case we should round it down to the block boundary.
+ */
+ const u64 logical = round_down(failed_bbio->saved_iter.bi_sector << SECTOR_SHIFT,
+ sectorsize);
+ struct btrfs_bio *repair_bbio;
+ struct bio *repair_bio;
+ int num_copies;
+ int mirror;
+
+ btrfs_debug(fs_info, "repair read error: read error at %llu",
+ failed_bbio->file_offset + bio_offset);
+
+ num_copies = btrfs_num_copies(fs_info, logical, sectorsize);
+ if (num_copies == 1) {
+ btrfs_debug(fs_info, "no copy to repair from");
+ failed_bbio->bio.bi_status = BLK_STS_IOERR;
+ return fbio;
+ }
+
+ if (!fbio) {
+ fbio = mempool_alloc(&btrfs_failed_bio_pool, GFP_NOFS);
+ fbio->bbio = failed_bbio;
+ fbio->num_copies = num_copies;
+ atomic_set(&fbio->repair_count, 1);
+ }
+
+ atomic_inc(&fbio->repair_count);
+
+ repair_bio = bio_alloc_bioset(NULL, nr_steps, REQ_OP_READ, GFP_NOFS,
+ &btrfs_repair_bioset);
+ repair_bio->bi_iter.bi_sector = logical >> SECTOR_SHIFT;
+ for (int i = 0; i < nr_steps; i++) {
+ int ret;
+
+ ASSERT(offset_in_page(paddrs[i]) + step <= PAGE_SIZE);
+
+ ret = bio_add_page(repair_bio, phys_to_page(paddrs[i]), step,
+ offset_in_page(paddrs[i]));
+ ASSERT(ret == step);
+ }
+
+ repair_bbio = btrfs_bio(repair_bio);
+ btrfs_bio_init(repair_bbio, failed_bbio->inode, failed_bbio->file_offset + bio_offset,
+ NULL, fbio);
+
+ mirror = next_repair_mirror(fbio, failed_bbio->mirror_num);
+ btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror);
+ btrfs_submit_bbio(repair_bbio, mirror);
+ return fbio;
+}
+
+static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *dev)
+{
+ struct btrfs_inode *inode = bbio->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ const u32 sectorsize = fs_info->sectorsize;
+ const u32 step = min(sectorsize, PAGE_SIZE);
+ const u32 nr_steps = sectorsize / step;
+ struct bvec_iter *iter = &bbio->saved_iter;
+ blk_status_t status = bbio->bio.bi_status;
+ struct btrfs_failed_bio *fbio = NULL;
+ phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
+ phys_addr_t paddr;
+ u32 offset = 0;
+
+ /* Read-repair requires the inode field to be set by the submitter. */
+ ASSERT(inode);
+
+ /*
+ * Hand off repair bios to the repair code as there is no upper level
+ * submitter for them.
+ */
+ if (bbio->bio.bi_pool == &btrfs_repair_bioset) {
+ btrfs_end_repair_bio(bbio, dev);
+ return;
+ }
+
+ /* Clear the I/O error. A failed repair will reset it. */
+ bbio->bio.bi_status = BLK_STS_OK;
+
+ btrfs_bio_for_each_block(paddr, &bbio->bio, iter, step) {
+ paddrs[(offset / step) % nr_steps] = paddr;
+ offset += step;
+
+ if (IS_ALIGNED(offset, sectorsize)) {
+ if (status ||
+ !btrfs_data_csum_ok(bbio, dev, offset - sectorsize, paddrs))
+ fbio = repair_one_sector(bbio, offset - sectorsize,
+ paddrs, fbio);
+ }
+ }
+ if (bbio->csum != bbio->csum_inline)
+ kvfree(bbio->csum);
+
+ if (fbio)
+ btrfs_repair_done(fbio);
+ else
+ btrfs_bio_end_io(bbio, bbio->bio.bi_status);
+}
+
+static void btrfs_log_dev_io_error(const struct bio *bio, struct btrfs_device *dev)
+{
+ if (!dev || !dev->bdev)
+ return;
+ if (bio->bi_status != BLK_STS_IOERR && bio->bi_status != BLK_STS_TARGET)
+ return;
+
+ if (btrfs_op(bio) == BTRFS_MAP_WRITE)
+ btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
+ else if (!(bio->bi_opf & REQ_RAHEAD))
+ btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
+ if (bio->bi_opf & REQ_PREFLUSH)
+ btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS);
+}
+
+static struct workqueue_struct *btrfs_end_io_wq(const struct btrfs_fs_info *fs_info,
+ const struct bio *bio)
+{
+ if (bio->bi_opf & REQ_META)
+ return fs_info->endio_meta_workers;
+ return fs_info->endio_workers;
+}
+
+static void simple_end_io_work(struct work_struct *work)
+{
+ struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
+ struct bio *bio = &bbio->bio;
+
+ if (bio_op(bio) == REQ_OP_READ) {
+ /* Metadata reads are checked and repaired by the submitter. */
+ if (is_data_bbio(bbio))
+ return btrfs_check_read_bio(bbio, bbio->bio.bi_private);
+ return btrfs_bio_end_io(bbio, bbio->bio.bi_status);
+ }
+ if (bio_is_zone_append(bio) && !bio->bi_status)
+ btrfs_record_physical_zoned(bbio);
+ btrfs_bio_end_io(bbio, bbio->bio.bi_status);
+}
+
+static void btrfs_simple_end_io(struct bio *bio)
+{
+ struct btrfs_bio *bbio = btrfs_bio(bio);
+ struct btrfs_device *dev = bio->bi_private;
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+
+ btrfs_bio_counter_dec(fs_info);
+
+ if (bio->bi_status)
+ btrfs_log_dev_io_error(bio, dev);
+
+ INIT_WORK(&bbio->end_io_work, simple_end_io_work);
+ queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
+}
+
+static void btrfs_raid56_end_io(struct bio *bio)
+{
+ struct btrfs_io_context *bioc = bio->bi_private;
+ struct btrfs_bio *bbio = btrfs_bio(bio);
+
+ /* RAID56 endio is always handled in workqueue. */
+ ASSERT(in_task());
+
+ btrfs_bio_counter_dec(bioc->fs_info);
+ bbio->mirror_num = bioc->mirror_num;
+ if (bio_op(bio) == REQ_OP_READ && is_data_bbio(bbio))
+ btrfs_check_read_bio(bbio, NULL);
+ else
+ btrfs_bio_end_io(bbio, bbio->bio.bi_status);
+
+ btrfs_put_bioc(bioc);
+}
+
+static void orig_write_end_io_work(struct work_struct *work)
+{
+ struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
+ struct bio *bio = &bbio->bio;
+ struct btrfs_io_stripe *stripe = bio->bi_private;
+ struct btrfs_io_context *bioc = stripe->bioc;
+
+ btrfs_bio_counter_dec(bioc->fs_info);
+
+ if (bio->bi_status) {
+ atomic_inc(&bioc->error);
+ btrfs_log_dev_io_error(bio, stripe->dev);
+ }
+
+ /*
+ * Only send an error to the higher layers if it is beyond the tolerance
+ * threshold.
+ */
+ if (atomic_read(&bioc->error) > bioc->max_errors)
+ bio->bi_status = BLK_STS_IOERR;
+ else
+ bio->bi_status = BLK_STS_OK;
+
+ if (bio_is_zone_append(bio) && !bio->bi_status)
+ stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+
+ btrfs_bio_end_io(bbio, bbio->bio.bi_status);
+ btrfs_put_bioc(bioc);
+}
+
+static void btrfs_orig_write_end_io(struct bio *bio)
+{
+ struct btrfs_bio *bbio = btrfs_bio(bio);
+
+ INIT_WORK(&bbio->end_io_work, orig_write_end_io_work);
+ queue_work(btrfs_end_io_wq(bbio->inode->root->fs_info, bio), &bbio->end_io_work);
+}
+
+static void clone_write_end_io_work(struct work_struct *work)
+{
+ struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
+ struct bio *bio = &bbio->bio;
+ struct btrfs_io_stripe *stripe = bio->bi_private;
+
+ if (bio->bi_status) {
+ atomic_inc(&stripe->bioc->error);
+ btrfs_log_dev_io_error(bio, stripe->dev);
+ } else if (bio_is_zone_append(bio)) {
+ stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ }
+
+ /* Pass on control to the original bio this one was cloned from */
+ bio_endio(stripe->bioc->orig_bio);
+ bio_put(bio);
+}
+
+static void btrfs_clone_write_end_io(struct bio *bio)
+{
+ struct btrfs_bio *bbio = btrfs_bio(bio);
+
+ INIT_WORK(&bbio->end_io_work, clone_write_end_io_work);
+ queue_work(btrfs_end_io_wq(bbio->inode->root->fs_info, bio), &bbio->end_io_work);
+}
+
+static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
+{
+ if (!dev || !dev->bdev ||
+ test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
+ (btrfs_op(bio) == BTRFS_MAP_WRITE &&
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
+ bio_io_error(bio);
+ return;
+ }
+
+ bio_set_dev(bio, dev->bdev);
+
+ /*
+ * For zone append writing, bi_sector must point the beginning of the
+ * zone
+ */
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ u64 zone_start = round_down(physical, dev->fs_info->zone_size);
+
+ ASSERT(btrfs_dev_is_sequential(dev, physical));
+ bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
+ }
+ btrfs_debug(dev->fs_info,
+ "%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
+ __func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
+ (unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev),
+ dev->devid, bio->bi_iter.bi_size);
+
+ /*
+ * Track reads if tracking is enabled; ignore I/O operations before the
+ * filesystem is fully initialized.
+ */
+ if (dev->fs_devices->collect_fs_stats && bio_op(bio) == REQ_OP_READ && dev->fs_info)
+ percpu_counter_add(&dev->fs_info->stats_read_blocks,
+ bio->bi_iter.bi_size >> dev->fs_info->sectorsize_bits);
+
+ if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT)
+ blkcg_punt_bio_submit(bio);
+ else
+ submit_bio(bio);
+}
+
+static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr)
+{
+ struct bio *orig_bio = bioc->orig_bio, *bio;
+ struct btrfs_bio *orig_bbio = btrfs_bio(orig_bio);
+
+ ASSERT(bio_op(orig_bio) != REQ_OP_READ);
+
+ /* Reuse the bio embedded into the btrfs_bio for the last mirror */
+ if (dev_nr == bioc->num_stripes - 1) {
+ bio = orig_bio;
+ bio->bi_end_io = btrfs_orig_write_end_io;
+ } else {
+ /* We need to use endio_work to run end_io in task context. */
+ bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &btrfs_bioset);
+ bio_inc_remaining(orig_bio);
+ btrfs_bio_init(btrfs_bio(bio), orig_bbio->inode,
+ orig_bbio->file_offset, NULL, NULL);
+ bio->bi_end_io = btrfs_clone_write_end_io;
+ }
+
+ bio->bi_private = &bioc->stripes[dev_nr];
+ bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT;
+ bioc->stripes[dev_nr].bioc = bioc;
+ bioc->size = bio->bi_iter.bi_size;
+ btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio);
+}
+
+static void btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc,
+ struct btrfs_io_stripe *smap, int mirror_num)
+{
+ if (!bioc) {
+ /* Single mirror read/write fast path. */
+ btrfs_bio(bio)->mirror_num = mirror_num;
+ bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT;
+ if (bio_op(bio) != REQ_OP_READ)
+ btrfs_bio(bio)->orig_physical = smap->physical;
+ bio->bi_private = smap->dev;
+ bio->bi_end_io = btrfs_simple_end_io;
+ btrfs_submit_dev_bio(smap->dev, bio);
+ } else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+ /* Parity RAID write or read recovery. */
+ bio->bi_private = bioc;
+ bio->bi_end_io = btrfs_raid56_end_io;
+ if (bio_op(bio) == REQ_OP_READ)
+ raid56_parity_recover(bio, bioc, mirror_num);
+ else
+ raid56_parity_write(bio, bioc);
+ } else {
+ /* Write to multiple mirrors. */
+ int total_devs = bioc->num_stripes;
+
+ bioc->orig_bio = bio;
+ for (int dev_nr = 0; dev_nr < total_devs; dev_nr++)
+ btrfs_submit_mirrored_bio(bioc, dev_nr);
+ }
+}
+
+static int btrfs_bio_csum(struct btrfs_bio *bbio)
+{
+ if (bbio->bio.bi_opf & REQ_META)
+ return btree_csum_one_bio(bbio);
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ return btrfs_csum_one_bio(bbio, true);
+#else
+ return btrfs_csum_one_bio(bbio, false);
+#endif
+}
+
+/*
+ * Async submit bios are used to offload expensive checksumming onto the worker
+ * threads.
+ */
+struct async_submit_bio {
+ struct btrfs_bio *bbio;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe smap;
+ int mirror_num;
+ struct btrfs_work work;
+};
+
+/*
+ * In order to insert checksums into the metadata in large chunks, we wait
+ * until bio submission time. All the pages in the bio are checksummed and
+ * sums are attached onto the ordered extent record.
+ *
+ * At IO completion time the csums attached on the ordered extent record are
+ * inserted into the btree.
+ */
+static void run_one_async_start(struct btrfs_work *work)
+{
+ struct async_submit_bio *async =
+ container_of(work, struct async_submit_bio, work);
+ int ret;
+
+ ret = btrfs_bio_csum(async->bbio);
+ if (ret)
+ async->bbio->bio.bi_status = errno_to_blk_status(ret);
+}
+
+/*
+ * In order to insert checksums into the metadata in large chunks, we wait
+ * until bio submission time. All the pages in the bio are checksummed and
+ * sums are attached onto the ordered extent record.
+ *
+ * At IO completion time the csums attached on the ordered extent record are
+ * inserted into the tree.
+ *
+ * If called with @do_free == true, then it will free the work struct.
+ */
+static void run_one_async_done(struct btrfs_work *work, bool do_free)
+{
+ struct async_submit_bio *async =
+ container_of(work, struct async_submit_bio, work);
+ struct bio *bio = &async->bbio->bio;
+
+ if (do_free) {
+ kfree(container_of(work, struct async_submit_bio, work));
+ return;
+ }
+
+ /* If an error occurred we just want to clean up the bio and move on. */
+ if (bio->bi_status) {
+ btrfs_bio_end_io(async->bbio, bio->bi_status);
+ return;
+ }
+
+ /*
+ * All of the bios that pass through here are from async helpers.
+ * Use REQ_BTRFS_CGROUP_PUNT to issue them from the owning cgroup's
+ * context. This changes nothing when cgroups aren't in use.
+ */
+ bio->bi_opf |= REQ_BTRFS_CGROUP_PUNT;
+ btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num);
+}
+
+static bool should_async_write(struct btrfs_bio *bbio)
+{
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+ bool auto_csum_mode = true;
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ enum btrfs_offload_csum_mode csum_mode = READ_ONCE(fs_devices->offload_csum_mode);
+
+ if (csum_mode == BTRFS_OFFLOAD_CSUM_FORCE_ON)
+ return true;
+ /*
+ * Write bios will calculate checksum and submit bio at the same time.
+ * Unless explicitly required don't offload serial csum calculate and bio
+ * submit into a workqueue.
+ */
+ return false;
+#endif
+
+ /* Submit synchronously if the checksum implementation is fast. */
+ if (auto_csum_mode && test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags))
+ return false;
+
+ /*
+ * Try to defer the submission to a workqueue to parallelize the
+ * checksum calculation unless the I/O is issued synchronously.
+ */
+ if (op_is_sync(bbio->bio.bi_opf))
+ return false;
+
+ /* Zoned devices require I/O to be submitted in order. */
+ if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(fs_info))
+ return false;
+
+ return true;
+}
+
+/*
+ * Submit bio to an async queue.
+ *
+ * Return true if the work has been successfully submitted, else false.
+ */
+static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
+ struct btrfs_io_context *bioc,
+ struct btrfs_io_stripe *smap, int mirror_num)
+{
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+ struct async_submit_bio *async;
+
+ async = kmalloc(sizeof(*async), GFP_NOFS);
+ if (!async)
+ return false;
+
+ async->bbio = bbio;
+ async->bioc = bioc;
+ async->smap = *smap;
+ async->mirror_num = mirror_num;
+
+ btrfs_init_work(&async->work, run_one_async_start, run_one_async_done);
+ btrfs_queue_work(fs_info->workers, &async->work);
+ return true;
+}
+
+static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length)
+{
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+ unsigned int nr_segs;
+ int sector_offset;
+
+ map_length = min(map_length, fs_info->max_zone_append_size);
+ sector_offset = bio_split_rw_at(&bbio->bio, &fs_info->limits,
+ &nr_segs, map_length);
+ if (sector_offset) {
+ /*
+ * bio_split_rw_at() could split at a size smaller than our
+ * sectorsize and thus cause unaligned I/Os. Fix that by
+ * always rounding down to the nearest boundary.
+ */
+ return ALIGN_DOWN(sector_offset << SECTOR_SHIFT, fs_info->sectorsize);
+ }
+ return map_length;
+}
+
+static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+{
+ struct btrfs_inode *inode = bbio->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct bio *bio = &bbio->bio;
+ u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ u64 length = bio->bi_iter.bi_size;
+ u64 map_length = length;
+ bool use_append = btrfs_use_zone_append(bbio);
+ struct btrfs_io_context *bioc = NULL;
+ struct btrfs_io_stripe smap;
+ blk_status_t status;
+ int ret;
+
+ if (bbio->is_scrub || btrfs_is_data_reloc_root(inode->root))
+ smap.rst_search_commit_root = true;
+ else
+ smap.rst_search_commit_root = false;
+
+ btrfs_bio_counter_inc_blocked(fs_info);
+ ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
+ &bioc, &smap, &mirror_num);
+ if (ret) {
+ status = errno_to_blk_status(ret);
+ btrfs_bio_counter_dec(fs_info);
+ goto end_bbio;
+ }
+
+ /*
+ * For fscrypt writes we will get the encrypted bio after we've remapped
+ * our bio to the physical disk location, so we need to save the
+ * original bytenr so we know what we're checksumming.
+ */
+ if (bio_op(bio) == REQ_OP_WRITE && is_data_bbio(bbio))
+ bbio->orig_logical = logical;
+
+ map_length = min(map_length, length);
+ if (use_append)
+ map_length = btrfs_append_map_length(bbio, map_length);
+
+ if (map_length < length) {
+ struct btrfs_bio *split;
+
+ split = btrfs_split_bio(fs_info, bbio, map_length);
+ if (IS_ERR(split)) {
+ status = errno_to_blk_status(PTR_ERR(split));
+ btrfs_bio_counter_dec(fs_info);
+ goto end_bbio;
+ }
+ bbio = split;
+ bio = &bbio->bio;
+ }
+
+ /*
+ * Save the iter for the end_io handler and preload the checksums for
+ * data reads.
+ */
+ if (bio_op(bio) == REQ_OP_READ && is_data_bbio(bbio)) {
+ bbio->saved_iter = bio->bi_iter;
+ ret = btrfs_lookup_bio_sums(bbio);
+ status = errno_to_blk_status(ret);
+ if (status)
+ goto fail;
+ }
+
+ if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
+ if (use_append) {
+ bio->bi_opf &= ~REQ_OP_WRITE;
+ bio->bi_opf |= REQ_OP_ZONE_APPEND;
+ }
+
+ if (is_data_bbio(bbio) && bioc && bioc->use_rst) {
+ /*
+ * No locking for the list update, as we only add to
+ * the list in the I/O submission path, and list
+ * iteration only happens in the completion path, which
+ * can't happen until after the last submission.
+ */
+ btrfs_get_bioc(bioc);
+ list_add_tail(&bioc->rst_ordered_entry, &bbio->ordered->bioc_list);
+ }
+
+ /*
+ * Csum items for reloc roots have already been cloned at this
+ * point, so they are handled as part of the no-checksum case.
+ */
+ if (!(inode->flags & BTRFS_INODE_NODATASUM) &&
+ !test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state) &&
+ !btrfs_is_data_reloc_root(inode->root)) {
+ if (should_async_write(bbio) &&
+ btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num))
+ goto done;
+
+ ret = btrfs_bio_csum(bbio);
+ status = errno_to_blk_status(ret);
+ if (status)
+ goto fail;
+ } else if (use_append ||
+ (btrfs_is_zoned(fs_info) && inode &&
+ inode->flags & BTRFS_INODE_NODATASUM)) {
+ ret = btrfs_alloc_dummy_sum(bbio);
+ status = errno_to_blk_status(ret);
+ if (status)
+ goto fail;
+ }
+ }
+
+ btrfs_submit_bio(bio, bioc, &smap, mirror_num);
+done:
+ return map_length == length;
+
+fail:
+ btrfs_bio_counter_dec(fs_info);
+ /*
+ * We have split the original bbio, now we have to end both the current
+ * @bbio and remaining one, as the remaining one will never be submitted.
+ */
+ if (map_length < length) {
+ struct btrfs_bio *remaining = bbio->private;
+
+ ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset);
+ ASSERT(remaining);
+
+ btrfs_bio_end_io(remaining, status);
+ }
+end_bbio:
+ btrfs_bio_end_io(bbio, status);
+ /* Do not submit another chunk */
+ return true;
+}
+
+static void assert_bbio_alignment(struct btrfs_bio *bbio)
+{
+#ifdef CONFIG_BTRFS_ASSERT
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 alignment = min(blocksize, PAGE_SIZE);
+ const u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
+ const u32 length = bbio->bio.bi_iter.bi_size;
+
+ /* The logical and length should still be aligned to blocksize. */
+ ASSERT(IS_ALIGNED(logical, blocksize) && IS_ALIGNED(length, blocksize) &&
+ length != 0, "root=%llu inode=%llu logical=%llu length=%u",
+ btrfs_root_id(bbio->inode->root),
+ btrfs_ino(bbio->inode), logical, length);
+
+ bio_for_each_bvec(bvec, &bbio->bio, iter)
+ ASSERT(IS_ALIGNED(bvec.bv_offset, alignment) &&
+ IS_ALIGNED(bvec.bv_len, alignment),
+ "root=%llu inode=%llu logical=%llu length=%u index=%u bv_offset=%u bv_len=%u",
+ btrfs_root_id(bbio->inode->root),
+ btrfs_ino(bbio->inode), logical, length, iter.bi_idx,
+ bvec.bv_offset, bvec.bv_len);
+#endif
+}
+
+void btrfs_submit_bbio(struct btrfs_bio *bbio, int mirror_num)
+{
+ /* If bbio->inode is not populated, its file_offset must be 0. */
+ ASSERT(bbio->inode || bbio->file_offset == 0);
+
+ assert_bbio_alignment(bbio);
+
+ while (!btrfs_submit_chunk(bbio, mirror_num))
+ ;
+}
+
+/*
+ * Submit a repair write.
+ *
+ * This bypasses btrfs_submit_bbio() deliberately, as that writes all copies in a
+ * RAID setup. Here we only want to write the one bad copy, so we do the
+ * mapping ourselves and submit the bio directly.
+ *
+ * The I/O is issued synchronously to block the repair read completion from
+ * freeing the bio.
+ *
+ * @ino: Offending inode number
+ * @fileoff: File offset inside the inode
+ * @length: Length of the repair write
+ * @logical: Logical address of the range
+ * @paddrs: Physical address array of the content
+ * @step: Length of for each paddrs
+ * @mirror_num: Mirror number to write to. Must not be zero
+ */
+int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 fileoff,
+ u32 length, u64 logical, const phys_addr_t paddrs[],
+ unsigned int step, int mirror_num)
+{
+ const u32 nr_steps = DIV_ROUND_UP_POW2(length, step);
+ struct btrfs_io_stripe smap = { 0 };
+ struct bio *bio = NULL;
+ int ret = 0;
+
+ ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
+ BUG_ON(!mirror_num);
+
+ /* Basic alignment checks. */
+ ASSERT(IS_ALIGNED(logical, fs_info->sectorsize));
+ ASSERT(IS_ALIGNED(length, fs_info->sectorsize));
+ ASSERT(IS_ALIGNED(fileoff, fs_info->sectorsize));
+ /* Either it's a single data or metadata block. */
+ ASSERT(length <= BTRFS_MAX_BLOCKSIZE);
+ ASSERT(step <= length);
+ ASSERT(is_power_of_2(step));
+
+ if (btrfs_repair_one_zone(fs_info, logical))
+ return 0;
+
+ /*
+ * Avoid races with device replace and make sure our bioc has devices
+ * associated to its stripes that don't go away while we are doing the
+ * read repair operation.
+ */
+ btrfs_bio_counter_inc_blocked(fs_info);
+ ret = btrfs_map_repair_block(fs_info, &smap, logical, length, mirror_num);
+ if (ret < 0)
+ goto out_counter_dec;
+
+ if (unlikely(!smap.dev->bdev ||
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &smap.dev->dev_state))) {
+ ret = -EIO;
+ goto out_counter_dec;
+ }
+
+ bio = bio_alloc(smap.dev->bdev, nr_steps, REQ_OP_WRITE | REQ_SYNC, GFP_NOFS);
+ bio->bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT;
+ for (int i = 0; i < nr_steps; i++) {
+ ret = bio_add_page(bio, phys_to_page(paddrs[i]), step, offset_in_page(paddrs[i]));
+ /* We should have allocated enough slots to contain all the different pages. */
+ ASSERT(ret == step);
+ }
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+ if (ret) {
+ /* try to remap that extent elsewhere? */
+ btrfs_dev_stat_inc_and_print(smap.dev, BTRFS_DEV_STAT_WRITE_ERRS);
+ goto out_counter_dec;
+ }
+
+ btrfs_info_rl(fs_info,
+ "read error corrected: ino %llu off %llu (dev %s sector %llu)",
+ ino, fileoff, btrfs_dev_name(smap.dev),
+ smap.physical >> SECTOR_SHIFT);
+ ret = 0;
+
+out_counter_dec:
+ btrfs_bio_counter_dec(fs_info);
+ return ret;
+}
+
+/*
+ * Submit a btrfs_bio based repair write.
+ *
+ * If @dev_replace is true, the write would be submitted to dev-replace target.
+ */
+void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace)
+{
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+ u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
+ u64 length = bbio->bio.bi_iter.bi_size;
+ struct btrfs_io_stripe smap = { 0 };
+ int ret;
+
+ ASSERT(mirror_num > 0);
+ ASSERT(btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE);
+ ASSERT(!is_data_inode(bbio->inode));
+ ASSERT(bbio->is_scrub);
+
+ btrfs_bio_counter_inc_blocked(fs_info);
+ ret = btrfs_map_repair_block(fs_info, &smap, logical, length, mirror_num);
+ if (ret < 0)
+ goto fail;
+
+ if (dev_replace) {
+ ASSERT(smap.dev == fs_info->dev_replace.srcdev);
+ smap.dev = fs_info->dev_replace.tgtdev;
+ }
+ btrfs_submit_bio(&bbio->bio, NULL, &smap, mirror_num);
+ return;
+
+fail:
+ btrfs_bio_counter_dec(fs_info);
+ btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
+}
+
+int __init btrfs_bioset_init(void)
+{
+ if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
+ offsetof(struct btrfs_bio, bio),
+ BIOSET_NEED_BVECS))
+ return -ENOMEM;
+ if (bioset_init(&btrfs_clone_bioset, BIO_POOL_SIZE,
+ offsetof(struct btrfs_bio, bio), 0))
+ goto out;
+ if (bioset_init(&btrfs_repair_bioset, BIO_POOL_SIZE,
+ offsetof(struct btrfs_bio, bio),
+ BIOSET_NEED_BVECS))
+ goto out;
+ if (mempool_init_kmalloc_pool(&btrfs_failed_bio_pool, BIO_POOL_SIZE,
+ sizeof(struct btrfs_failed_bio)))
+ goto out;
+ return 0;
+
+out:
+ btrfs_bioset_exit();
+ return -ENOMEM;
+}
+
+void __cold btrfs_bioset_exit(void)
+{
+ mempool_exit(&btrfs_failed_bio_pool);
+ bioset_exit(&btrfs_repair_bioset);
+ bioset_exit(&btrfs_clone_bioset);
+ bioset_exit(&btrfs_bioset);
+}
diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h
new file mode 100644
index 000000000000..1be74209f0b8
--- /dev/null
+++ b/fs/btrfs/bio.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2007 Oracle. All rights reserved.
+ * Copyright (C) 2022 Christoph Hellwig.
+ */
+
+#ifndef BTRFS_BIO_H
+#define BTRFS_BIO_H
+
+#include <linux/types.h>
+#include <linux/bio.h>
+#include <linux/workqueue.h>
+#include "tree-checker.h"
+
+struct btrfs_bio;
+struct btrfs_fs_info;
+struct btrfs_inode;
+
+#define BTRFS_BIO_INLINE_CSUM_SIZE 64
+
+typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio);
+
+/*
+ * Highlevel btrfs I/O structure. It is allocated by btrfs_bio_alloc and
+ * passed to btrfs_submit_bbio() for mapping to the physical devices.
+ */
+struct btrfs_bio {
+ /*
+ * Inode and offset into it that this I/O operates on.
+ *
+ * If the inode is a data one, csum verification and read-repair
+ * will be done automatically.
+ * If the inode is a metadata one, everything is handled by the caller.
+ */
+ struct btrfs_inode *inode;
+ u64 file_offset;
+
+ union {
+ /*
+ * For data reads: checksumming and original I/O information.
+ * (for internal use in the btrfs_submit_bbio() machinery only)
+ */
+ struct {
+ u8 *csum;
+ u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
+ struct bvec_iter saved_iter;
+ };
+
+ /*
+ * For data writes:
+ * - ordered extent covering the bio
+ * - pointer to the checksums for this bio
+ * - original physical address from the allocator
+ * (for zone append only)
+ * - original logical address, used for checksumming fscrypt bios
+ */
+ struct {
+ struct btrfs_ordered_extent *ordered;
+ struct btrfs_ordered_sum *sums;
+ struct work_struct csum_work;
+ struct completion csum_done;
+ struct bvec_iter csum_saved_iter;
+ u64 orig_physical;
+ u64 orig_logical;
+ };
+
+ /* For metadata reads: parentness verification. */
+ struct btrfs_tree_parent_check parent_check;
+ };
+
+ /* End I/O information supplied to btrfs_bio_alloc */
+ btrfs_bio_end_io_t end_io;
+ void *private;
+
+ /* For internal use in read end I/O handling */
+ unsigned int mirror_num;
+ atomic_t pending_ios;
+ struct work_struct end_io_work;
+
+ /* Save the first error status of split bio. */
+ blk_status_t status;
+
+ /* Use the commit root to look up csums (data read bio only). */
+ bool csum_search_commit_root;
+
+ /*
+ * Since scrub will reuse btree inode, we need this flag to distinguish
+ * scrub bios.
+ */
+ bool is_scrub;
+
+ /* Whether the csum generation for data write is async. */
+ bool async_csum;
+
+ /*
+ * This member must come last, bio_alloc_bioset will allocate enough
+ * bytes for entire btrfs_bio but relies on bio being last.
+ */
+ struct bio bio;
+};
+
+static inline struct btrfs_bio *btrfs_bio(struct bio *bio)
+{
+ return container_of(bio, struct btrfs_bio, bio);
+}
+
+int __init btrfs_bioset_init(void);
+void __cold btrfs_bioset_exit(void);
+
+void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, u64 file_offset,
+ btrfs_bio_end_io_t end_io, void *private);
+struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
+ struct btrfs_inode *inode, u64 file_offset,
+ btrfs_bio_end_io_t end_io, void *private);
+void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status);
+
+/* Submit using blkcg_punt_bio_submit. */
+#define REQ_BTRFS_CGROUP_PUNT REQ_FS_PRIVATE
+
+void btrfs_submit_bbio(struct btrfs_bio *bbio, int mirror_num);
+void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace);
+int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 fileoff,
+ u32 length, u64 logical, const phys_addr_t paddrs[],
+ unsigned int step, int mirror_num);
+
+#endif
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 444e9c89ff3e..08b14449fabe 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/sizes.h>
#include <linux/list_sort.h>
#include "misc.h"
#include "ctree.h"
@@ -17,6 +18,34 @@
#include "discard.h"
#include "raid56.h"
#include "zoned.h"
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+
+#ifdef CONFIG_BTRFS_DEBUG
+int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group)
+{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+
+ return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
+ block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
+ (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
+ block_group->flags & BTRFS_BLOCK_GROUP_DATA);
+}
+#endif
+
+static inline bool has_unwritten_metadata(struct btrfs_block_group *block_group)
+{
+ /* The meta_write_pointer is available only on the zoned setup. */
+ if (!btrfs_is_zoned(block_group->fs_info))
+ return false;
+
+ if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
+ return false;
+
+ return block_group->start + block_group->alloc_offset >
+ block_group->meta_write_pointer;
+}
/*
* Return target flags in extended format or 0 if restripe for this chunk_type
@@ -24,9 +53,9 @@
*
* Should be called with balance_lock held
*/
-static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
+static u64 get_restripe_target(const struct btrfs_fs_info *fs_info, u64 flags)
{
- struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+ const struct btrfs_balance_control *bctl = fs_info->balance_ctl;
u64 target = 0;
if (!bctl)
@@ -79,14 +108,21 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
}
allowed &= flags;
- if (allowed & BTRFS_BLOCK_GROUP_RAID6)
+ /* Select the highest-redundancy RAID level. */
+ if (allowed & BTRFS_BLOCK_GROUP_RAID1C4)
+ allowed = BTRFS_BLOCK_GROUP_RAID1C4;
+ else if (allowed & BTRFS_BLOCK_GROUP_RAID6)
allowed = BTRFS_BLOCK_GROUP_RAID6;
+ else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3)
+ allowed = BTRFS_BLOCK_GROUP_RAID1C3;
else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
allowed = BTRFS_BLOCK_GROUP_RAID5;
else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
allowed = BTRFS_BLOCK_GROUP_RAID10;
else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
allowed = BTRFS_BLOCK_GROUP_RAID1;
+ else if (allowed & BTRFS_BLOCK_GROUP_DUP)
+ allowed = BTRFS_BLOCK_GROUP_DUP;
else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
allowed = BTRFS_BLOCK_GROUP_RAID0;
@@ -124,7 +160,16 @@ void btrfs_put_block_group(struct btrfs_block_group *cache)
{
if (refcount_dec_and_test(&cache->refs)) {
WARN_ON(cache->pinned > 0);
- WARN_ON(cache->reserved > 0);
+ /*
+ * If there was a failure to cleanup a log tree, very likely due
+ * to an IO failure on a writeback attempt of one or more of its
+ * extent buffers, we could not do proper (and cheap) unaccounting
+ * of their reserved space, so don't warn on reserved > 0 in that
+ * case.
+ */
+ if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) ||
+ !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info))
+ WARN_ON(cache->reserved > 0);
/*
* A block_group shouldn't be on the discard_list anymore.
@@ -135,59 +180,47 @@ void btrfs_put_block_group(struct btrfs_block_group *cache)
btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
cache);
- /*
- * If not empty, someone is still holding mutex of
- * full_stripe_lock, which can only be released by caller.
- * And it will definitely cause use-after-free when caller
- * tries to release full stripe lock.
- *
- * No better way to resolve, but only to warn.
- */
- WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
kfree(cache->free_space_ctl);
- kfree(cache->physical_map);
+ btrfs_free_chunk_map(cache->physical_map);
kfree(cache);
}
}
+static int btrfs_bg_start_cmp(const struct rb_node *new,
+ const struct rb_node *exist)
+{
+ const struct btrfs_block_group *new_bg =
+ rb_entry(new, struct btrfs_block_group, cache_node);
+ const struct btrfs_block_group *exist_bg =
+ rb_entry(exist, struct btrfs_block_group, cache_node);
+
+ if (new_bg->start < exist_bg->start)
+ return -1;
+ if (new_bg->start > exist_bg->start)
+ return 1;
+ return 0;
+}
+
/*
* This adds the block group to the fs_info rb tree for the block group cache
*/
-static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
- struct btrfs_block_group *block_group)
+static int btrfs_add_block_group_cache(struct btrfs_block_group *block_group)
{
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct btrfs_block_group *cache;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct rb_node *exist;
+ int ret = 0;
ASSERT(block_group->length != 0);
- spin_lock(&info->block_group_cache_lock);
- p = &info->block_group_cache_tree.rb_node;
-
- while (*p) {
- parent = *p;
- cache = rb_entry(parent, struct btrfs_block_group, cache_node);
- if (block_group->start < cache->start) {
- p = &(*p)->rb_left;
- } else if (block_group->start > cache->start) {
- p = &(*p)->rb_right;
- } else {
- spin_unlock(&info->block_group_cache_lock);
- return -EEXIST;
- }
- }
-
- rb_link_node(&block_group->cache_node, parent, p);
- rb_insert_color(&block_group->cache_node,
- &info->block_group_cache_tree);
-
- if (info->first_logical_byte > block_group->start)
- info->first_logical_byte = block_group->start;
+ write_lock(&fs_info->block_group_cache_lock);
- spin_unlock(&info->block_group_cache_lock);
+ exist = rb_find_add_cached(&block_group->cache_node,
+ &fs_info->block_group_cache_tree, btrfs_bg_start_cmp);
+ if (exist)
+ ret = -EEXIST;
+ write_unlock(&fs_info->block_group_cache_lock);
- return 0;
+ return ret;
}
/*
@@ -201,8 +234,8 @@ static struct btrfs_block_group *block_group_cache_tree_search(
struct rb_node *n;
u64 end, start;
- spin_lock(&info->block_group_cache_lock);
- n = info->block_group_cache_tree.rb_node;
+ read_lock(&info->block_group_cache_lock);
+ n = info->block_group_cache_tree.rb_root.rb_node;
while (n) {
cache = rb_entry(n, struct btrfs_block_group, cache_node);
@@ -224,12 +257,9 @@ static struct btrfs_block_group *block_group_cache_tree_search(
break;
}
}
- if (ret) {
+ if (ret)
btrfs_get_block_group(ret);
- if (bytenr == 0 && info->first_logical_byte > ret->start)
- info->first_logical_byte = ret->start;
- }
- spin_unlock(&info->block_group_cache_lock);
+ read_unlock(&info->block_group_cache_lock);
return ret;
}
@@ -258,15 +288,15 @@ struct btrfs_block_group *btrfs_next_block_group(
struct btrfs_fs_info *fs_info = cache->fs_info;
struct rb_node *node;
- spin_lock(&fs_info->block_group_cache_lock);
+ read_lock(&fs_info->block_group_cache_lock);
/* If our block group was removed, we need a full search. */
if (RB_EMPTY_NODE(&cache->cache_node)) {
const u64 next_bytenr = cache->start + cache->length;
- spin_unlock(&fs_info->block_group_cache_lock);
+ read_unlock(&fs_info->block_group_cache_lock);
btrfs_put_block_group(cache);
- cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
+ return btrfs_lookup_first_block_group(fs_info, next_bytenr);
}
node = rb_next(&cache->cache_node);
btrfs_put_block_group(cache);
@@ -275,46 +305,68 @@ struct btrfs_block_group *btrfs_next_block_group(
btrfs_get_block_group(cache);
} else
cache = NULL;
- spin_unlock(&fs_info->block_group_cache_lock);
+ read_unlock(&fs_info->block_group_cache_lock);
return cache;
}
-bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
+/*
+ * Check if we can do a NOCOW write for a given extent.
+ *
+ * @fs_info: The filesystem information object.
+ * @bytenr: Logical start address of the extent.
+ *
+ * Check if we can do a NOCOW write for the given extent, and increments the
+ * number of NOCOW writers in the block group that contains the extent, as long
+ * as the block group exists and it's currently not in read-only mode.
+ *
+ * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller
+ * is responsible for calling btrfs_dec_nocow_writers() later.
+ *
+ * Or NULL if we can not do a NOCOW write
+ */
+struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
+ u64 bytenr)
{
struct btrfs_block_group *bg;
- bool ret = true;
+ bool can_nocow = true;
bg = btrfs_lookup_block_group(fs_info, bytenr);
if (!bg)
- return false;
+ return NULL;
spin_lock(&bg->lock);
if (bg->ro)
- ret = false;
+ can_nocow = false;
else
atomic_inc(&bg->nocow_writers);
spin_unlock(&bg->lock);
- /* No put on block group, done by btrfs_dec_nocow_writers */
- if (!ret)
+ if (!can_nocow) {
btrfs_put_block_group(bg);
+ return NULL;
+ }
- return ret;
+ /* No put on block group, done by btrfs_dec_nocow_writers(). */
+ return bg;
}
-void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
+/*
+ * Decrement the number of NOCOW writers in a block group.
+ *
+ * This is meant to be called after a previous call to btrfs_inc_nocow_writers(),
+ * and on the block group returned by that call. Typically this is called after
+ * creating an ordered extent for a NOCOW write, to prevent races with scrub and
+ * relocation.
+ *
+ * After this call, the caller should not use the block group anymore. It it wants
+ * to use it, then it should get a reference on it before calling this function.
+ */
+void btrfs_dec_nocow_writers(struct btrfs_block_group *bg)
{
- struct btrfs_block_group *bg;
-
- bg = btrfs_lookup_block_group(fs_info, bytenr);
- ASSERT(bg);
if (atomic_dec_and_test(&bg->nocow_writers))
wake_up_var(&bg->nocow_writers);
- /*
- * Once for our lookup and once for the lookup done by a previous call
- * to btrfs_inc_nocow_writers()
- */
- btrfs_put_block_group(bg);
+
+ /* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */
btrfs_put_block_group(bg);
}
@@ -377,7 +429,7 @@ struct btrfs_caching_control *btrfs_get_caching_control(
return ctl;
}
-void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
+static void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
{
if (refcount_dec_and_test(&ctl->count))
kfree(ctl);
@@ -400,50 +452,47 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
u64 num_bytes)
{
struct btrfs_caching_control *caching_ctl;
+ int progress;
caching_ctl = btrfs_get_caching_control(cache);
if (!caching_ctl)
return;
+ /*
+ * We've already failed to allocate from this block group, so even if
+ * there's enough space in the block group it isn't contiguous enough to
+ * allow for an allocation, so wait for at least the next wakeup tick,
+ * or for the thing to be done.
+ */
+ progress = atomic_read(&caching_ctl->progress);
+
wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
- (cache->free_space_ctl->free_space >= num_bytes));
+ (progress != atomic_read(&caching_ctl->progress) &&
+ (cache->free_space_ctl->free_space >= num_bytes)));
btrfs_put_caching_control(caching_ctl);
}
-int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
+static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache,
+ struct btrfs_caching_control *caching_ctl)
+{
+ wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
+ return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0;
+}
+
+static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
{
struct btrfs_caching_control *caching_ctl;
- int ret = 0;
+ int ret;
caching_ctl = btrfs_get_caching_control(cache);
if (!caching_ctl)
return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
-
- wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
- if (cache->cached == BTRFS_CACHE_ERROR)
- ret = -EIO;
+ ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
btrfs_put_caching_control(caching_ctl);
return ret;
}
-static bool space_cache_v1_done(struct btrfs_block_group *cache)
-{
- bool ret;
-
- spin_lock(&cache->lock);
- ret = cache->cached != BTRFS_CACHE_FAST;
- spin_unlock(&cache->lock);
-
- return ret;
-}
-
-void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
- struct btrfs_caching_control *caching_ctl)
-{
- wait_event(caching_ctl->wait, space_cache_v1_done(cache));
-}
-
#ifdef CONFIG_BTRFS_DEBUG
static void fragment_free_space(struct btrfs_block_group *block_group)
{
@@ -466,33 +515,44 @@ static void fragment_free_space(struct btrfs_block_group *block_group)
#endif
/*
- * This is only called by btrfs_cache_block_group, since we could have freed
- * extents we need to check the pinned_extents for any extents that can't be
- * used yet since their free space will be released as soon as the transaction
- * commits.
+ * Add a free space range to the in memory free space cache of a block group.
+ * This checks if the range contains super block locations and any such
+ * locations are not added to the free space cache.
+ *
+ * @block_group: The target block group.
+ * @start: Start offset of the range.
+ * @end: End offset of the range (exclusive).
+ * @total_added_ret: Optional pointer to return the total amount of space
+ * added to the block group's free space cache.
+ *
+ * Returns 0 on success or < 0 on error.
*/
-u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
+int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start,
+ u64 end, u64 *total_added_ret)
{
struct btrfs_fs_info *info = block_group->fs_info;
- u64 extent_start, extent_end, size, total_added = 0;
+ u64 extent_start, extent_end, size;
int ret;
+ if (total_added_ret)
+ *total_added_ret = 0;
+
while (start < end) {
- ret = find_first_extent_bit(&info->excluded_extents, start,
- &extent_start, &extent_end,
- EXTENT_DIRTY | EXTENT_UPTODATE,
- NULL);
- if (ret)
+ if (!btrfs_find_first_extent_bit(&info->excluded_extents, start,
+ &extent_start, &extent_end,
+ EXTENT_DIRTY, NULL))
break;
if (extent_start <= start) {
start = extent_end + 1;
} else if (extent_start > start && extent_start < end) {
size = extent_start - start;
- total_added += size;
ret = btrfs_add_free_space_async_trimmed(block_group,
start, size);
- BUG_ON(ret); /* -ENOMEM or logic error */
+ if (ret)
+ return ret;
+ if (total_added_ret)
+ *total_added_ret += size;
start = extent_end + 1;
} else {
break;
@@ -501,21 +561,159 @@ u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end
if (start < end) {
size = end - start;
- total_added += size;
ret = btrfs_add_free_space_async_trimmed(block_group, start,
size);
- BUG_ON(ret); /* -ENOMEM or logic error */
+ if (ret)
+ return ret;
+ if (total_added_ret)
+ *total_added_ret += size;
+ }
+
+ return 0;
+}
+
+/*
+ * Get an arbitrary extent item index / max_index through the block group
+ *
+ * @block_group the block group to sample from
+ * @index: the integral step through the block group to grab from
+ * @max_index: the granularity of the sampling
+ * @key: return value parameter for the item we find
+ *
+ * Pre-conditions on indices:
+ * 0 <= index <= max_index
+ * 0 < max_index
+ *
+ * Returns: 0 on success, 1 if the search didn't yield a useful item, negative
+ * error code on error.
+ */
+static int sample_block_group_extent_item(struct btrfs_caching_control *caching_ctl,
+ struct btrfs_block_group *block_group,
+ int index, int max_index,
+ struct btrfs_key *found_key)
+{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct btrfs_root *extent_root;
+ u64 search_offset;
+ u64 search_end = block_group->start + block_group->length;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct btrfs_key search_key;
+ int ret = 0;
+
+ ASSERT(index >= 0);
+ ASSERT(index <= max_index);
+ ASSERT(max_index > 0);
+ lockdep_assert_held(&caching_ctl->mutex);
+ lockdep_assert_held_read(&fs_info->commit_root_sem);
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start,
+ BTRFS_SUPER_INFO_OFFSET));
+
+ path->skip_locking = true;
+ path->search_commit_root = true;
+ path->reada = READA_FORWARD;
+
+ search_offset = index * div_u64(block_group->length, max_index);
+ search_key.objectid = block_group->start + search_offset;
+ search_key.type = BTRFS_EXTENT_ITEM_KEY;
+ search_key.offset = 0;
+
+ btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) {
+ /* Success; sampled an extent item in the block group */
+ if (found_key->type == BTRFS_EXTENT_ITEM_KEY &&
+ found_key->objectid >= block_group->start &&
+ found_key->objectid + found_key->offset <= search_end)
+ break;
+
+ /* We can't possibly find a valid extent item anymore */
+ if (found_key->objectid >= search_end) {
+ ret = 1;
+ break;
+ }
}
- return total_added;
+ lockdep_assert_held(&caching_ctl->mutex);
+ lockdep_assert_held_read(&fs_info->commit_root_sem);
+ return ret;
+}
+
+/*
+ * Best effort attempt to compute a block group's size class while caching it.
+ *
+ * @block_group: the block group we are caching
+ *
+ * We cannot infer the size class while adding free space extents, because that
+ * logic doesn't care about contiguous file extents (it doesn't differentiate
+ * between a 100M extent and 100 contiguous 1M extents). So we need to read the
+ * file extent items. Reading all of them is quite wasteful, because usually
+ * only a handful are enough to give a good answer. Therefore, we just grab 5 of
+ * them at even steps through the block group and pick the smallest size class
+ * we see. Since size class is best effort, and not guaranteed in general,
+ * inaccuracy is acceptable.
+ *
+ * To be more explicit about why this algorithm makes sense:
+ *
+ * If we are caching in a block group from disk, then there are three major cases
+ * to consider:
+ * 1. the block group is well behaved and all extents in it are the same size
+ * class.
+ * 2. the block group is mostly one size class with rare exceptions for last
+ * ditch allocations
+ * 3. the block group was populated before size classes and can have a totally
+ * arbitrary mix of size classes.
+ *
+ * In case 1, looking at any extent in the block group will yield the correct
+ * result. For the mixed cases, taking the minimum size class seems like a good
+ * approximation, since gaps from frees will be usable to the size class. For
+ * 2., a small handful of file extents is likely to yield the right answer. For
+ * 3, we can either read every file extent, or admit that this is best effort
+ * anyway and try to stay fast.
+ *
+ * Returns: 0 on success, negative error code on error.
+ */
+static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl,
+ struct btrfs_block_group *block_group)
+{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct btrfs_key key;
+ int i;
+ u64 min_size = block_group->length;
+ enum btrfs_block_group_size_class size_class = BTRFS_BG_SZ_NONE;
+ int ret;
+
+ if (!btrfs_block_group_should_use_size_class(block_group))
+ return 0;
+
+ lockdep_assert_held(&caching_ctl->mutex);
+ lockdep_assert_held_read(&fs_info->commit_root_sem);
+ for (i = 0; i < 5; ++i) {
+ ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key);
+ if (ret < 0)
+ goto out;
+ if (ret > 0)
+ continue;
+ min_size = min_t(u64, min_size, key.offset);
+ size_class = btrfs_calc_block_group_size_class(min_size);
+ }
+ if (size_class != BTRFS_BG_SZ_NONE) {
+ spin_lock(&block_group->lock);
+ block_group->size_class = size_class;
+ spin_unlock(&block_group->lock);
+ }
+out:
+ return ret;
}
static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
{
struct btrfs_block_group *block_group = caching_ctl->block_group;
struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct btrfs_root *extent_root = fs_info->extent_root;
- struct btrfs_path *path;
+ struct btrfs_root *extent_root;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key key;
u64 total_found = 0;
@@ -529,6 +727,7 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
return -ENOMEM;
last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
+ extent_root = btrfs_extent_root(fs_info, last);
#ifdef CONFIG_BTRFS_DEBUG
/*
@@ -545,13 +744,13 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
* root to add free space. So we skip locking and search the commit
* root, since its read-only
*/
- path->skip_locking = 1;
- path->search_commit_root = 1;
+ path->skip_locking = true;
+ path->search_commit_root = true;
path->reada = READA_FORWARD;
key.objectid = last;
- key.offset = 0;
key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = 0;
next:
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
@@ -576,8 +775,6 @@ next:
if (need_resched() ||
rwsem_is_contended(&fs_info->commit_root_sem)) {
- if (wakeup)
- caching_ctl->progress = last;
btrfs_release_path(path);
up_read(&fs_info->commit_root_sem);
mutex_unlock(&caching_ctl->mutex);
@@ -599,11 +796,8 @@ next:
if (key.objectid < last) {
key.objectid = last;
- key.offset = 0;
key.type = BTRFS_EXTENT_ITEM_KEY;
-
- if (wakeup)
- caching_ctl->progress = last;
+ key.offset = 0;
btrfs_release_path(path);
goto next;
}
@@ -618,8 +812,13 @@ next:
if (key.type == BTRFS_EXTENT_ITEM_KEY ||
key.type == BTRFS_METADATA_ITEM_KEY) {
- total_found += add_new_free_space(block_group, last,
- key.objectid);
+ u64 space_added;
+
+ ret = btrfs_add_new_free_space(block_group, last,
+ key.objectid, &space_added);
+ if (ret)
+ goto out;
+ total_found += space_added;
if (key.type == BTRFS_METADATA_ITEM_KEY)
last = key.objectid +
fs_info->nodesize;
@@ -628,23 +827,28 @@ next:
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
- if (wakeup)
+ if (wakeup) {
+ atomic_inc(&caching_ctl->progress);
wake_up(&caching_ctl->wait);
+ }
}
}
path->slots[0]++;
}
- ret = 0;
-
- total_found += add_new_free_space(block_group, last,
- block_group->start + block_group->length);
- caching_ctl->progress = (u64)-1;
+ ret = btrfs_add_new_free_space(block_group, last,
+ block_group->start + block_group->length,
+ NULL);
out:
- btrfs_free_path(path);
return ret;
}
+static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg)
+{
+ btrfs_clear_extent_bit(&bg->fs_info->excluded_extents, bg->start,
+ bg->start + bg->length - 1, EXTENT_DIRTY, NULL);
+}
+
static noinline void caching_thread(struct btrfs_work *work)
{
struct btrfs_block_group *block_group;
@@ -659,6 +863,7 @@ static noinline void caching_thread(struct btrfs_work *work)
mutex_lock(&caching_ctl->mutex);
down_read(&fs_info->commit_root_sem);
+ load_block_group_size_class(caching_ctl, block_group);
if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
ret = load_free_space_cache(block_group);
if (ret == 1) {
@@ -685,7 +890,7 @@ static noinline void caching_thread(struct btrfs_work *work)
*/
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
!(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
- ret = load_free_space_tree(caching_ctl);
+ ret = btrfs_load_free_space_tree(caching_ctl);
else
ret = load_extent_tree_free(caching_ctl);
done:
@@ -708,8 +913,6 @@ done:
}
#endif
- caching_ctl->progress = (u64)-1;
-
up_read(&fs_info->commit_root_sem);
btrfs_free_excluded_extents(block_group);
mutex_unlock(&caching_ctl->mutex);
@@ -720,9 +923,8 @@ done:
btrfs_put_block_group(block_group);
}
-int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
+int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
{
- DEFINE_WAIT(wait);
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_caching_control *caching_ctl = NULL;
int ret = 0;
@@ -739,9 +941,9 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
mutex_init(&caching_ctl->mutex);
init_waitqueue_head(&caching_ctl->wait);
caching_ctl->block_group = cache;
- caching_ctl->progress = cache->start;
refcount_set(&caching_ctl->count, 2);
- btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
+ atomic_set(&caching_ctl->progress, 0);
+ btrfs_init_work(&caching_ctl->work, caching_thread, NULL);
spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) {
@@ -755,24 +957,20 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
}
WARN_ON(cache->caching_ctl);
cache->caching_ctl = caching_ctl;
- if (btrfs_test_opt(fs_info, SPACE_CACHE))
- cache->cached = BTRFS_CACHE_FAST;
- else
- cache->cached = BTRFS_CACHE_STARTED;
- cache->has_caching_ctl = 1;
+ cache->cached = BTRFS_CACHE_STARTED;
spin_unlock(&cache->lock);
- spin_lock(&fs_info->block_group_cache_lock);
+ write_lock(&fs_info->block_group_cache_lock);
refcount_inc(&caching_ctl->count);
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
- spin_unlock(&fs_info->block_group_cache_lock);
+ write_unlock(&fs_info->block_group_cache_lock);
btrfs_get_block_group(cache);
btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
out:
- if (load_cache_only && caching_ctl)
- btrfs_wait_space_cache_v1_finished(cache, caching_ctl);
+ if (wait && caching_ctl)
+ ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
if (caching_ctl)
btrfs_put_caching_control(caching_ctl);
@@ -832,6 +1030,13 @@ static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
}
}
+static struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info)
+{
+ if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE))
+ return fs_info->block_group_root;
+ return btrfs_extent_root(fs_info, 0);
+}
+
static int remove_block_group_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_block_group *block_group)
@@ -841,7 +1046,7 @@ static int remove_block_group_item(struct btrfs_trans_handle *trans,
struct btrfs_key key;
int ret;
- root = fs_info->extent_root;
+ root = btrfs_block_group_root(fs_info);
key.objectid = block_group->start;
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
key.offset = block_group->length;
@@ -857,10 +1062,10 @@ static int remove_block_group_item(struct btrfs_trans_handle *trans,
}
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
- u64 group_start, struct extent_map *em)
+ struct btrfs_chunk_map *map)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_block_group *block_group;
struct btrfs_free_cluster *cluster;
struct inode *inode;
@@ -869,11 +1074,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
int index;
int factor;
struct btrfs_caching_control *caching_ctl = NULL;
- bool remove_em;
+ bool remove_map;
bool remove_rsv = false;
- block_group = btrfs_lookup_block_group(fs_info, group_start);
- BUG_ON(!block_group);
+ block_group = btrfs_lookup_block_group(fs_info, map->start);
+ if (!block_group)
+ return -ENOENT;
+
BUG_ON(!block_group->ro);
trace_btrfs_remove_block_group(block_group);
@@ -947,17 +1154,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- spin_lock(&fs_info->block_group_cache_lock);
- rb_erase(&block_group->cache_node,
- &fs_info->block_group_cache_tree);
+ write_lock(&fs_info->block_group_cache_lock);
+ rb_erase_cached(&block_group->cache_node,
+ &fs_info->block_group_cache_tree);
RB_CLEAR_NODE(&block_group->cache_node);
/* Once for the block groups rbtree */
btrfs_put_block_group(block_group);
- if (fs_info->first_logical_byte == block_group->start)
- fs_info->first_logical_byte = (u64)-1;
- spin_unlock(&fs_info->block_group_cache_lock);
+ write_unlock(&fs_info->block_group_cache_lock);
down_write(&block_group->space_info->groups_sem);
/*
@@ -977,32 +1182,31 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
kobject_put(kobj);
}
- if (block_group->has_caching_ctl)
- caching_ctl = btrfs_get_caching_control(block_group);
if (block_group->cached == BTRFS_CACHE_STARTED)
btrfs_wait_block_group_cache_done(block_group);
- if (block_group->has_caching_ctl) {
- spin_lock(&fs_info->block_group_cache_lock);
- if (!caching_ctl) {
- struct btrfs_caching_control *ctl;
-
- list_for_each_entry(ctl,
- &fs_info->caching_block_groups, list)
- if (ctl->block_group == block_group) {
- caching_ctl = ctl;
- refcount_inc(&caching_ctl->count);
- break;
- }
- }
- if (caching_ctl)
- list_del_init(&caching_ctl->list);
- spin_unlock(&fs_info->block_group_cache_lock);
- if (caching_ctl) {
- /* Once for the caching bgs list and once for us. */
- btrfs_put_caching_control(caching_ctl);
- btrfs_put_caching_control(caching_ctl);
+
+ write_lock(&fs_info->block_group_cache_lock);
+ caching_ctl = btrfs_get_caching_control(block_group);
+ if (!caching_ctl) {
+ struct btrfs_caching_control *ctl;
+
+ list_for_each_entry(ctl, &fs_info->caching_block_groups, list) {
+ if (ctl->block_group == block_group) {
+ caching_ctl = ctl;
+ refcount_inc(&caching_ctl->count);
+ break;
+ }
}
}
+ if (caching_ctl)
+ list_del_init(&caching_ctl->list);
+ write_unlock(&fs_info->block_group_cache_lock);
+
+ if (caching_ctl) {
+ /* Once for the caching bgs list and once for us. */
+ btrfs_put_caching_control(caching_ctl);
+ btrfs_put_caching_control(caching_ctl);
+ }
spin_lock(&trans->transaction->dirty_bgs_lock);
WARN_ON(!list_empty(&block_group->dirty_list));
@@ -1027,8 +1231,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
block_group->space_info->total_bytes -= block_group->length;
block_group->space_info->bytes_readonly -=
(block_group->length - block_group->zone_unusable);
- block_group->space_info->bytes_zone_unusable -=
- block_group->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(block_group->space_info,
+ -block_group->zone_unusable);
block_group->space_info->disk_total -= block_group->length * factor;
spin_unlock(&block_group->space_info->lock);
@@ -1044,7 +1248,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* another task to attempt to create another block group with the same
* item key (and failing with -EEXIST and a transaction abort).
*/
- ret = remove_block_group_free_space(trans, block_group);
+ ret = btrfs_remove_block_group_free_space(trans, block_group);
if (ret)
goto out;
@@ -1053,7 +1257,17 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
goto out;
spin_lock(&block_group->lock);
- block_group->removed = 1;
+ /*
+ * Hitting this WARN means we removed a block group with an unwritten
+ * region. It will cause "unable to find chunk map for logical" errors.
+ */
+ if (WARN_ON(has_unwritten_metadata(block_group)))
+ btrfs_warn(fs_info,
+ "block group %llu is removed before metadata write out",
+ block_group->start);
+
+ set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);
+
/*
* At this point trimming or scrub can't start on this block group,
* because we removed the block group from the rbtree
@@ -1064,7 +1278,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* entries because we already removed them all when we called
* btrfs_remove_free_space_cache().
*
- * And we must not remove the extent map from the fs_info->mapping_tree
+ * And we must not remove the chunk map from the fs_info->mapping_tree
* to prevent the same logical address range and physical device space
* ranges from being reused for a new block group. This is needed to
* avoid races with trimming and scrub.
@@ -1080,41 +1294,30 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* in place until the extents have been discarded completely when
* the transaction commit has completed.
*/
- remove_em = (atomic_read(&block_group->frozen) == 0);
+ remove_map = (atomic_read(&block_group->frozen) == 0);
spin_unlock(&block_group->lock);
- if (remove_em) {
- struct extent_map_tree *em_tree;
-
- em_tree = &fs_info->mapping_tree;
- write_lock(&em_tree->lock);
- remove_extent_mapping(em_tree, em);
- write_unlock(&em_tree->lock);
- /* once for the tree */
- free_extent_map(em);
- }
+ if (remove_map)
+ btrfs_remove_chunk_map(fs_info, map);
out:
/* Once for the lookup reference */
btrfs_put_block_group(block_group);
if (remove_rsv)
- btrfs_delayed_refs_rsv_release(fs_info, 1);
- btrfs_free_path(path);
+ btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
return ret;
}
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info, const u64 chunk_offset)
{
- struct extent_map_tree *em_tree = &fs_info->mapping_tree;
- struct extent_map *em;
- struct map_lookup *map;
+ struct btrfs_root *root = btrfs_block_group_root(fs_info);
+ struct btrfs_chunk_map *map;
unsigned int num_items;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, chunk_offset, 1);
- read_unlock(&em_tree->lock);
- ASSERT(em && em->start == chunk_offset);
+ map = btrfs_find_chunk_map(fs_info, chunk_offset, 1);
+ ASSERT(map != NULL);
+ ASSERT(map->start == chunk_offset);
/*
* We need to reserve 3 + N units from the metadata space info in order
@@ -1135,12 +1338,10 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
* more device items and remove one chunk item), but this is done at
* btrfs_remove_chunk() through a call to check_system_chunk().
*/
- map = em->map_lookup;
num_items = 3 + map->num_stripes;
- free_extent_map(em);
+ btrfs_free_chunk_map(map);
- return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
- num_items);
+ return btrfs_start_transaction_fallback_global_rsv(root, num_items);
}
/*
@@ -1156,7 +1357,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
* data in this block group. That check should be done by relocation routine,
* not this function.
*/
-static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
+static int inc_block_group_ro(struct btrfs_block_group *cache, bool force)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
@@ -1201,8 +1402,7 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
* BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
* leeway to allow us to mark this block group as read only.
*/
- if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
- BTRFS_RESERVE_NO_FLUSH))
+ if (btrfs_can_overcommit(sinfo, num_bytes, BTRFS_RESERVE_NO_FLUSH))
ret = 0;
}
@@ -1211,7 +1411,7 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes to readonly */
sinfo->bytes_readonly += cache->zone_unusable;
- sinfo->bytes_zone_unusable -= cache->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(sinfo, -cache->zone_unusable);
cache->zone_unusable = 0;
}
cache->ro++;
@@ -1223,24 +1423,23 @@ out:
if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
btrfs_info(cache->fs_info,
"unable to make block group %llu ro", cache->start);
- btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
+ btrfs_dump_space_info(cache->space_info, 0, false);
}
return ret;
}
static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *bg)
+ const struct btrfs_block_group *bg)
{
- struct btrfs_fs_info *fs_info = bg->fs_info;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_transaction *prev_trans = NULL;
const u64 start = bg->start;
const u64 end = start + bg->length - 1;
int ret;
spin_lock(&fs_info->trans_lock);
- if (trans->transaction->list.prev != &fs_info->trans_list) {
- prev_trans = list_last_entry(&trans->transaction->list,
- struct btrfs_transaction, list);
+ if (!list_is_first(&trans->transaction->list, &fs_info->trans_list)) {
+ prev_trans = list_prev_entry(trans->transaction, list);
refcount_inc(&prev_trans->use_count);
}
spin_unlock(&fs_info->trans_lock);
@@ -1253,18 +1452,18 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
* group in pinned_extents before we were able to clear the whole block
* group range from pinned_extents. This means that task can lookup for
* the block group after we unpinned it from pinned_extents and removed
- * it, leading to a BUG_ON() at unpin_extent_range().
+ * it, leading to an error at unpin_extent_range().
*/
mutex_lock(&fs_info->unused_bg_unpin_mutex);
if (prev_trans) {
- ret = clear_extent_bits(&prev_trans->pinned_extents, start, end,
- EXTENT_DIRTY);
+ ret = btrfs_clear_extent_bit(&prev_trans->pinned_extents, start, end,
+ EXTENT_DIRTY, NULL);
if (ret)
goto out;
}
- ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end,
- EXTENT_DIRTY);
+ ret = btrfs_clear_extent_bit(&trans->transaction->pinned_extents, start, end,
+ EXTENT_DIRTY, NULL);
out:
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
if (prev_trans)
@@ -1274,11 +1473,38 @@ out:
}
/*
+ * Link the block_group to a list via bg_list.
+ *
+ * @bg: The block_group to link to the list.
+ * @list: The list to link it to.
+ *
+ * Use this rather than list_add_tail() directly to ensure proper respect
+ * to locking and refcounting.
+ *
+ * Returns: true if the bg was linked with a refcount bump and false otherwise.
+ */
+static bool btrfs_link_bg_list(struct btrfs_block_group *bg, struct list_head *list)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+ bool added = false;
+
+ spin_lock(&fs_info->unused_bgs_lock);
+ if (list_empty(&bg->bg_list)) {
+ btrfs_get_block_group(bg);
+ list_add_tail(&bg->bg_list, list);
+ added = true;
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+ return added;
+}
+
+/*
* Process the unused_bgs list and remove any that don't have any allocated
* space inside of them.
*/
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
{
+ LIST_HEAD(retry_list);
struct btrfs_block_group *block_group;
struct btrfs_space_info *space_info;
struct btrfs_trans_handle *trans;
@@ -1288,6 +1514,9 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
return;
+ if (btrfs_fs_closing(fs_info))
+ return;
+
/*
* Long running balances can keep us blocked here for eternity, so
* simply skip deletion if we're unable to get the mutex.
@@ -1297,6 +1526,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_lock(&fs_info->unused_bgs_lock);
while (!list_empty(&fs_info->unused_bgs)) {
+ u64 used;
int trimming;
block_group = list_first_entry(&fs_info->unused_bgs,
@@ -1332,22 +1562,69 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
goto next;
}
+ spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
- if (block_group->reserved || block_group->pinned ||
- block_group->used || block_group->ro ||
+ if (btrfs_is_block_group_used(block_group) || block_group->ro ||
list_is_singular(&block_group->list)) {
/*
* We want to bail if we made new allocations or have
* outstanding allocations in this block group. We do
* the ro check in case balance is currently acting on
* this block group.
+ *
+ * Also bail out if this is the only block group for its
+ * type, because otherwise we would lose profile
+ * information from fs_info->avail_*_alloc_bits and the
+ * next block group of this type would be created with a
+ * "single" profile (even if we're in a raid fs) because
+ * fs_info->avail_*_alloc_bits would be 0.
+ */
+ trace_btrfs_skip_unused_block_group(block_group);
+ spin_unlock(&block_group->lock);
+ spin_unlock(&space_info->lock);
+ up_write(&space_info->groups_sem);
+ goto next;
+ }
+
+ /*
+ * The block group may be unused but there may be space reserved
+ * accounting with the existence of that block group, that is,
+ * space_info->bytes_may_use was incremented by a task but no
+ * space was yet allocated from the block group by the task.
+ * That space may or may not be allocated, as we are generally
+ * pessimistic about space reservation for metadata as well as
+ * for data when using compression (as we reserve space based on
+ * the worst case, when data can't be compressed, and before
+ * actually attempting compression, before starting writeback).
+ *
+ * So check if the total space of the space_info minus the size
+ * of this block group is less than the used space of the
+ * space_info - if that's the case, then it means we have tasks
+ * that might be relying on the block group in order to allocate
+ * extents, and add back the block group to the unused list when
+ * we finish, so that we retry later in case no tasks ended up
+ * needing to allocate extents from the block group.
+ */
+ used = btrfs_space_info_used(space_info, true);
+ if ((space_info->total_bytes - block_group->length < used &&
+ block_group->zone_unusable < block_group->length) ||
+ has_unwritten_metadata(block_group)) {
+ /*
+ * Add a reference for the list, compensate for the ref
+ * drop under the "next" label for the
+ * fs_info->unused_bgs list.
*/
+ btrfs_link_bg_list(block_group, &retry_list);
+
trace_btrfs_skip_unused_block_group(block_group);
spin_unlock(&block_group->lock);
+ spin_unlock(&space_info->lock);
up_write(&space_info->groups_sem);
goto next;
}
+
spin_unlock(&block_group->lock);
+ spin_unlock(&space_info->lock);
/* We don't want to force the issue, only flip if it's ok. */
ret = inc_block_group_ro(block_group, 0);
@@ -1357,6 +1634,16 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
goto next;
}
+ ret = btrfs_zone_finish(block_group);
+ if (ret < 0) {
+ btrfs_dec_block_group_ro(block_group);
+ if (ret == -EAGAIN) {
+ btrfs_link_bg_list(block_group, &retry_list);
+ ret = 0;
+ }
+ goto next;
+ }
+
/*
* Want to do this before we do anything else so we can recover
* properly if we fail to join the transaction.
@@ -1399,8 +1686,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
- btrfs_space_info_update_bytes_pinned(fs_info, space_info,
- -block_group->pinned);
+ btrfs_space_info_update_bytes_pinned(space_info, -block_group->pinned);
space_info->bytes_readonly += block_group->pinned;
block_group->pinned = 0;
@@ -1463,12 +1749,16 @@ next:
btrfs_put_block_group(block_group);
spin_lock(&fs_info->unused_bgs_lock);
}
+ list_splice_tail(&retry_list, &fs_info->unused_bgs);
spin_unlock(&fs_info->unused_bgs_lock);
mutex_unlock(&fs_info->reclaim_bgs_lock);
return;
flip_async:
btrfs_end_transaction(trans);
+ spin_lock(&fs_info->unused_bgs_lock);
+ list_splice_tail(&retry_list, &fs_info->unused_bgs);
+ spin_unlock(&fs_info->unused_bgs_lock);
mutex_unlock(&fs_info->reclaim_bgs_lock);
btrfs_put_block_group(block_group);
btrfs_discard_punt_unused_bgs_list(fs_info);
@@ -1483,6 +1773,10 @@ void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
btrfs_get_block_group(bg);
trace_btrfs_add_unused_block_group(bg);
list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
+ } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) {
+ /* Pull out the block group from the reclaim_bgs list. */
+ trace_btrfs_add_unused_block_group(bg);
+ list_move_tail(&bg->bg_list, &fs_info->unused_bgs);
}
spin_unlock(&fs_info->unused_bgs_lock);
}
@@ -1499,7 +1793,42 @@ static int reclaim_bgs_cmp(void *unused, const struct list_head *a,
bg1 = list_entry(a, struct btrfs_block_group, bg_list);
bg2 = list_entry(b, struct btrfs_block_group, bg_list);
- return bg1->used > bg2->used;
+ /*
+ * Some other task may be updating the ->used field concurrently, but it
+ * is not serious if we get a stale value or load/store tearing issues,
+ * as sorting the list of block groups to reclaim is not critical and an
+ * occasional imperfect order is ok. So silence KCSAN and avoid the
+ * overhead of locking or any other synchronization.
+ */
+ return data_race(bg1->used > bg2->used);
+}
+
+static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info)
+{
+ if (btrfs_is_zoned(fs_info))
+ return btrfs_zoned_should_reclaim(fs_info);
+ return true;
+}
+
+static bool should_reclaim_block_group(const struct btrfs_block_group *bg, u64 bytes_freed)
+{
+ const int thresh_pct = btrfs_calc_reclaim_threshold(bg->space_info);
+ u64 thresh_bytes = mult_perc(bg->length, thresh_pct);
+ const u64 new_val = bg->used;
+ const u64 old_val = new_val + bytes_freed;
+
+ if (thresh_bytes == 0)
+ return false;
+
+ /*
+ * If we were below the threshold before don't reclaim, we are likely a
+ * brand new block group and we don't want to relocate new block groups.
+ */
+ if (old_val < thresh_bytes)
+ return false;
+ if (new_val >= thresh_bytes)
+ return false;
+ return true;
}
void btrfs_reclaim_bgs_work(struct work_struct *work)
@@ -1508,11 +1837,19 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
struct btrfs_block_group *bg;
struct btrfs_space_info *space_info;
- LIST_HEAD(again_list);
+ LIST_HEAD(retry_list);
if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
return;
+ if (btrfs_fs_closing(fs_info))
+ return;
+
+ if (!btrfs_should_reclaim(fs_info))
+ return;
+
+ guard(super_write)(fs_info->sb);
+
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
return;
@@ -1533,7 +1870,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
*/
list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp);
while (!list_empty(&fs_info->reclaim_bgs)) {
- u64 zone_unusable;
+ u64 used;
+ u64 reserved;
int ret = 0;
bg = list_first_entry(&fs_info->reclaim_bgs,
@@ -1547,6 +1885,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
/* Don't race with allocators so take the groups_sem */
down_write(&space_info->groups_sem);
+ spin_lock(&space_info->lock);
spin_lock(&bg->lock);
if (bg->reserved || bg->pinned || bg->ro) {
/*
@@ -1556,57 +1895,144 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
* this block group.
*/
spin_unlock(&bg->lock);
+ spin_unlock(&space_info->lock);
up_write(&space_info->groups_sem);
goto next;
}
- spin_unlock(&bg->lock);
+ if (bg->used == 0) {
+ /*
+ * It is possible that we trigger relocation on a block
+ * group as its extents are deleted and it first goes
+ * below the threshold, then shortly after goes empty.
+ *
+ * In this case, relocating it does delete it, but has
+ * some overhead in relocation specific metadata, looking
+ * for the non-existent extents and running some extra
+ * transactions, which we can avoid by using one of the
+ * other mechanisms for dealing with empty block groups.
+ */
+ if (!btrfs_test_opt(fs_info, DISCARD_ASYNC))
+ btrfs_mark_bg_unused(bg);
+ spin_unlock(&bg->lock);
+ spin_unlock(&space_info->lock);
+ up_write(&space_info->groups_sem);
+ goto next;
- /* Get out fast, in case we're unmounting the filesystem */
- if (btrfs_fs_closing(fs_info)) {
+ }
+ /*
+ * The block group might no longer meet the reclaim condition by
+ * the time we get around to reclaiming it, so to avoid
+ * reclaiming overly full block_groups, skip reclaiming them.
+ *
+ * Since the decision making process also depends on the amount
+ * being freed, pass in a fake giant value to skip that extra
+ * check, which is more meaningful when adding to the list in
+ * the first place.
+ */
+ if (!should_reclaim_block_group(bg, bg->length)) {
+ spin_unlock(&bg->lock);
+ spin_unlock(&space_info->lock);
up_write(&space_info->groups_sem);
goto next;
}
+ spin_unlock(&bg->lock);
+ spin_unlock(&space_info->lock);
+
/*
- * Cache the zone_unusable value before turning the block group
- * to read only. As soon as the blog group is read only it's
- * zone_unusable value gets moved to the block group's read-only
- * bytes and isn't available for calculations anymore.
+ * Get out fast, in case we're read-only or unmounting the
+ * filesystem. It is OK to drop block groups from the list even
+ * for the read-only case. As we did take the super write lock,
+ * "mount -o remount,ro" won't happen and read-only filesystem
+ * means it is forced read-only due to a fatal error. So, it
+ * never gets back to read-write to let us reclaim again.
*/
- zone_unusable = bg->zone_unusable;
+ if (btrfs_need_cleaner_sleep(fs_info)) {
+ up_write(&space_info->groups_sem);
+ goto next;
+ }
+
ret = inc_block_group_ro(bg, 0);
up_write(&space_info->groups_sem);
if (ret < 0)
goto next;
- btrfs_info(fs_info,
- "reclaiming chunk %llu with %llu%% used %llu%% unusable",
- bg->start, div_u64(bg->used * 100, bg->length),
- div64_u64(zone_unusable * 100, bg->length));
+ /*
+ * The amount of bytes reclaimed corresponds to the sum of the
+ * "used" and "reserved" counters. We have set the block group
+ * to RO above, which prevents reservations from happening but
+ * we may have existing reservations for which allocation has
+ * not yet been done - btrfs_update_block_group() was not yet
+ * called, which is where we will transfer a reserved extent's
+ * size from the "reserved" counter to the "used" counter - this
+ * happens when running delayed references. When we relocate the
+ * chunk below, relocation first flushes delalloc, waits for
+ * ordered extent completion (which is where we create delayed
+ * references for data extents) and commits the current
+ * transaction (which runs delayed references), and only after
+ * it does the actual work to move extents out of the block
+ * group. So the reported amount of reclaimed bytes is
+ * effectively the sum of the 'used' and 'reserved' counters.
+ */
+ spin_lock(&bg->lock);
+ used = bg->used;
+ reserved = bg->reserved;
+ spin_unlock(&bg->lock);
+
trace_btrfs_reclaim_block_group(bg);
- ret = btrfs_relocate_chunk(fs_info, bg->start);
- if (ret && ret != -EAGAIN)
+ ret = btrfs_relocate_chunk(fs_info, bg->start, false);
+ if (ret) {
+ btrfs_dec_block_group_ro(bg);
btrfs_err(fs_info, "error relocating chunk %llu",
bg->start);
+ used = 0;
+ reserved = 0;
+ spin_lock(&space_info->lock);
+ space_info->reclaim_errors++;
+ if (READ_ONCE(space_info->periodic_reclaim))
+ space_info->periodic_reclaim_ready = false;
+ spin_unlock(&space_info->lock);
+ }
+ spin_lock(&space_info->lock);
+ space_info->reclaim_count++;
+ space_info->reclaim_bytes += used;
+ space_info->reclaim_bytes += reserved;
+ spin_unlock(&space_info->lock);
next:
+ if (ret && !READ_ONCE(space_info->periodic_reclaim))
+ btrfs_link_bg_list(bg, &retry_list);
+ btrfs_put_block_group(bg);
+
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
+ /*
+ * Reclaiming all the block groups in the list can take really
+ * long. Prioritize cleaning up unused block groups.
+ */
+ btrfs_delete_unused_bgs(fs_info);
+ /*
+ * If we are interrupted by a balance, we can just bail out. The
+ * cleaner thread restart again if necessary.
+ */
+ if (!mutex_trylock(&fs_info->reclaim_bgs_lock))
+ goto end;
spin_lock(&fs_info->unused_bgs_lock);
- if (ret == -EAGAIN && list_empty(&bg->bg_list))
- list_add_tail(&bg->bg_list, &again_list);
- else
- btrfs_put_block_group(bg);
}
- list_splice_tail(&again_list, &fs_info->reclaim_bgs);
spin_unlock(&fs_info->unused_bgs_lock);
mutex_unlock(&fs_info->reclaim_bgs_lock);
+end:
+ spin_lock(&fs_info->unused_bgs_lock);
+ list_splice_tail(&retry_list, &fs_info->reclaim_bgs);
+ spin_unlock(&fs_info->unused_bgs_lock);
btrfs_exclop_finish(fs_info);
}
void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
{
+ btrfs_reclaim_sweep(fs_info);
spin_lock(&fs_info->unused_bgs_lock);
if (!list_empty(&fs_info->reclaim_bgs))
- queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work);
+ queue_work(system_dfl_wq, &fs_info->reclaim_bgs_work);
spin_unlock(&fs_info->unused_bgs_lock);
}
@@ -1614,20 +2040,14 @@ void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
- spin_lock(&fs_info->unused_bgs_lock);
- if (list_empty(&bg->bg_list)) {
- btrfs_get_block_group(bg);
+ if (btrfs_link_bg_list(bg, &fs_info->reclaim_bgs))
trace_btrfs_add_reclaim_block_group(bg);
- list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs);
- }
- spin_unlock(&fs_info->unused_bgs_lock);
}
-static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
- struct btrfs_path *path)
+static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key,
+ const struct btrfs_path *path)
{
- struct extent_map_tree *em_tree;
- struct extent_map *em;
+ struct btrfs_chunk_map *map;
struct btrfs_block_group_item bg;
struct extent_buffer *leaf;
int slot;
@@ -1637,23 +2057,20 @@ static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
slot = path->slots[0];
leaf = path->nodes[0];
- em_tree = &fs_info->mapping_tree;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, key->objectid, key->offset);
- read_unlock(&em_tree->lock);
- if (!em) {
+ map = btrfs_find_chunk_map(fs_info, key->objectid, key->offset);
+ if (!map) {
btrfs_err(fs_info,
"logical %llu len %llu found bg but no related chunk",
key->objectid, key->offset);
return -ENOENT;
}
- if (em->start != key->objectid || em->len != key->offset) {
+ if (unlikely(map->start != key->objectid || map->chunk_len != key->offset)) {
btrfs_err(fs_info,
"block group %llu len %llu mismatch with chunk %llu len %llu",
- key->objectid, key->offset, em->start, em->len);
+ key->objectid, key->offset, map->start, map->chunk_len);
ret = -EUCLEAN;
- goto out_free_em;
+ goto out_free_map;
}
read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot),
@@ -1661,55 +2078,33 @@ static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
flags = btrfs_stack_block_group_flags(&bg) &
BTRFS_BLOCK_GROUP_TYPE_MASK;
- if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ if (unlikely(flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK))) {
btrfs_err(fs_info,
"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
key->objectid, key->offset, flags,
- (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type));
+ (BTRFS_BLOCK_GROUP_TYPE_MASK & map->type));
ret = -EUCLEAN;
}
-out_free_em:
- free_extent_map(em);
+out_free_map:
+ btrfs_free_chunk_map(map);
return ret;
}
static int find_first_block_group(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
- struct btrfs_key *key)
+ const struct btrfs_key *key)
{
- struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_root *root = btrfs_block_group_root(fs_info);
int ret;
struct btrfs_key found_key;
- struct extent_buffer *leaf;
- int slot;
-
- ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
- if (ret < 0)
- return ret;
-
- while (1) {
- slot = path->slots[0];
- leaf = path->nodes[0];
- if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(root, path);
- if (ret == 0)
- continue;
- if (ret < 0)
- goto out;
- break;
- }
- btrfs_item_key_to_cpu(leaf, &found_key, slot);
+ btrfs_for_each_slot(root, key, &found_key, path, ret) {
if (found_key.objectid >= key->objectid &&
found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
- ret = read_bg_from_eb(fs_info, &found_key, path);
- break;
+ return read_bg_from_eb(fs_info, &found_key, path);
}
-
- path->slots[0]++;
}
-out:
return ret;
}
@@ -1728,12 +2123,11 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
write_sequnlock(&fs_info->profiles_lock);
}
-/**
- * Map a physical disk address to a list of logical addresses
+/*
+ * Map a physical disk address to a list of logical addresses.
*
* @fs_info: the filesystem
* @chunk_start: logical address of block group
- * @bdev: physical device to resolve, can be NULL to indicate any device
* @physical: physical address to map to logical addresses
* @logical: return array of logical addresses which map to @physical
* @naddrs: length of @logical
@@ -1744,11 +2138,9 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
* block copies.
*/
int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
- struct block_device *bdev, u64 physical, u64 **logical,
- int *naddrs, int *stripe_len)
+ u64 physical, u64 **logical, int *naddrs, int *stripe_len)
{
- struct extent_map *em;
- struct map_lookup *map;
+ struct btrfs_chunk_map *map;
u64 *buf;
u64 bytenr;
u64 data_stripe_length;
@@ -1756,18 +2148,17 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
int i, nr = 0;
int ret = 0;
- em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
- if (IS_ERR(em))
+ map = btrfs_get_chunk_map(fs_info, chunk_start, 1);
+ if (IS_ERR(map))
return -EIO;
- map = em->map_lookup;
- data_stripe_length = em->orig_block_len;
- io_stripe_size = map->stripe_len;
- chunk_start = em->start;
+ data_stripe_length = map->stripe_size;
+ io_stripe_size = BTRFS_STRIPE_LEN;
+ chunk_start = map->start;
/* For RAID5/6 adjust to a full IO stripe length */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
- io_stripe_size = map->stripe_len * nr_data_stripes(map);
+ io_stripe_size = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
if (!buf) {
@@ -1777,32 +2168,28 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
for (i = 0; i < map->num_stripes; i++) {
bool already_inserted = false;
- u64 stripe_nr;
- u64 offset;
+ u32 stripe_nr;
+ u32 offset;
int j;
if (!in_range(physical, map->stripes[i].physical,
data_stripe_length))
continue;
- if (bdev && map->stripes[i].dev->bdev != bdev)
- continue;
+ stripe_nr = (physical - map->stripes[i].physical) >>
+ BTRFS_STRIPE_LEN_SHIFT;
+ offset = (physical - map->stripes[i].physical) &
+ BTRFS_STRIPE_LEN_MASK;
- stripe_nr = physical - map->stripes[i].physical;
- stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset);
-
- if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
- stripe_nr = stripe_nr * map->num_stripes + i;
- stripe_nr = div_u64(stripe_nr, map->sub_stripes);
- } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
- stripe_nr = stripe_nr * map->num_stripes + i;
- }
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID10))
+ stripe_nr = div_u64(stripe_nr * map->num_stripes + i,
+ map->sub_stripes);
/*
* The remaining case would be for RAID56, multiply by
* nr_data_stripes(). Alternatively, just use rmap_len below
* instead of map->stripe_len
*/
-
bytenr = chunk_start + stripe_nr * io_stripe_size + offset;
/* Ensure we don't add duplicate addresses */
@@ -1821,7 +2208,7 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
*naddrs = nr;
*stripe_len = io_stripe_size;
out:
- free_extent_map(em);
+ btrfs_free_chunk_map(map);
return ret;
}
@@ -1837,21 +2224,23 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
cache->bytes_super += stripe_len;
- ret = btrfs_add_excluded_extent(fs_info, cache->start,
- stripe_len);
+ ret = btrfs_set_extent_bit(&fs_info->excluded_extents, cache->start,
+ cache->start + stripe_len - 1,
+ EXTENT_DIRTY, NULL);
if (ret)
return ret;
}
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
- ret = btrfs_rmap_block(fs_info, cache->start, NULL,
+ ret = btrfs_rmap_block(fs_info, cache->start,
bytenr, &logical, &nr, &stripe_len);
if (ret)
return ret;
/* Shouldn't have super stripes in sequential zones */
- if (zoned && nr) {
+ if (unlikely(zoned && nr)) {
+ kfree(logical);
btrfs_err(fs_info,
"zoned: block group %llu must not contain super block",
cache->start);
@@ -1863,8 +2252,9 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
cache->start + cache->length - logical[nr]);
cache->bytes_super += len;
- ret = btrfs_add_excluded_extent(fs_info, logical[nr],
- len);
+ ret = btrfs_set_extent_bit(&fs_info->excluded_extents,
+ logical[nr], logical[nr] + len - 1,
+ EXTENT_DIRTY, NULL);
if (ret) {
kfree(logical);
return ret;
@@ -1876,16 +2266,6 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
return 0;
}
-static void link_block_group(struct btrfs_block_group *cache)
-{
- struct btrfs_space_info *space_info = cache->space_info;
- int index = btrfs_bg_flags_to_raid_index(cache->flags);
-
- down_write(&space_info->groups_sem);
- list_add_tail(&cache->list, &space_info->block_groups[index]);
- up_write(&space_info->groups_sem);
-}
-
static struct btrfs_block_group *btrfs_create_block_group_cache(
struct btrfs_fs_info *fs_info, u64 start)
{
@@ -1923,7 +2303,6 @@ static struct btrfs_block_group *btrfs_create_block_group_cache(
btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
atomic_set(&cache->frozen, 0);
mutex_init(&cache->free_space_lock);
- btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
return cache;
}
@@ -1934,49 +2313,47 @@ static struct btrfs_block_group *btrfs_create_block_group_cache(
*/
static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
{
- struct extent_map_tree *map_tree = &fs_info->mapping_tree;
- struct extent_map *em;
- struct btrfs_block_group *bg;
u64 start = 0;
int ret = 0;
while (1) {
- read_lock(&map_tree->lock);
+ struct btrfs_chunk_map *map;
+ struct btrfs_block_group *bg;
+
/*
- * lookup_extent_mapping will return the first extent map
- * intersecting the range, so setting @len to 1 is enough to
+ * btrfs_find_chunk_map() will return the first chunk map
+ * intersecting the range, so setting @length to 1 is enough to
* get the first chunk.
*/
- em = lookup_extent_mapping(map_tree, start, 1);
- read_unlock(&map_tree->lock);
- if (!em)
+ map = btrfs_find_chunk_map(fs_info, start, 1);
+ if (!map)
break;
- bg = btrfs_lookup_block_group(fs_info, em->start);
- if (!bg) {
+ bg = btrfs_lookup_block_group(fs_info, map->start);
+ if (unlikely(!bg)) {
btrfs_err(fs_info,
"chunk start=%llu len=%llu doesn't have corresponding block group",
- em->start, em->len);
+ map->start, map->chunk_len);
ret = -EUCLEAN;
- free_extent_map(em);
+ btrfs_free_chunk_map(map);
break;
}
- if (bg->start != em->start || bg->length != em->len ||
- (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
- (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ if (unlikely(bg->start != map->start || bg->length != map->chunk_len ||
+ (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
+ (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK))) {
btrfs_err(fs_info,
"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
- em->start, em->len,
- em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
+ map->start, map->chunk_len,
+ map->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
bg->start, bg->length,
bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
ret = -EUCLEAN;
- free_extent_map(em);
+ btrfs_free_chunk_map(map);
btrfs_put_block_group(bg);
break;
}
- start = em->start + em->len;
- free_extent_map(em);
+ start = map->start + map->chunk_len;
+ btrfs_free_chunk_map(map);
btrfs_put_block_group(bg);
}
return ret;
@@ -1988,7 +2365,6 @@ static int read_one_block_group(struct btrfs_fs_info *info,
int need_clear)
{
struct btrfs_block_group *cache;
- struct btrfs_space_info *space_info;
const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
int ret;
@@ -2000,9 +2376,12 @@ static int read_one_block_group(struct btrfs_fs_info *info,
cache->length = key->offset;
cache->used = btrfs_stack_block_group_used(bgi);
+ cache->commit_used = cache->used;
cache->flags = btrfs_stack_block_group_flags(bgi);
+ cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi);
+ cache->space_info = btrfs_find_space_info(info, cache->flags);
- set_free_space_tree_thresholds(cache);
+ btrfs_set_free_space_tree_thresholds(cache);
if (need_clear) {
/*
@@ -2063,30 +2442,25 @@ static int read_one_block_group(struct btrfs_fs_info *info,
/* Should not have any excluded extents. Just in case, though. */
btrfs_free_excluded_extents(cache);
} else if (cache->length == cache->used) {
- cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
btrfs_free_excluded_extents(cache);
} else if (cache->used == 0) {
- cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
- add_new_free_space(cache, cache->start,
- cache->start + cache->length);
+ ret = btrfs_add_new_free_space(cache, cache->start,
+ cache->start + cache->length, NULL);
btrfs_free_excluded_extents(cache);
+ if (ret)
+ goto error;
}
- ret = btrfs_add_block_group_cache(info, cache);
+ ret = btrfs_add_block_group_cache(cache);
if (ret) {
btrfs_remove_free_space_cache(cache);
goto error;
}
- trace_btrfs_add_block_group(info, cache, 0);
- btrfs_update_space_info(info, cache->flags, cache->length,
- cache->used, cache->bytes_super,
- cache->zone_unusable, &space_info);
-
- cache->space_info = space_info;
- link_block_group(cache);
+ trace_btrfs_add_block_group(info, cache, 0);
+ btrfs_add_bg_to_space_info(info, cache);
set_avail_alloc_bits(info, cache->flags);
if (btrfs_chunk_writeable(info, cache->start)) {
@@ -2109,32 +2483,28 @@ error:
static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
{
- struct extent_map_tree *em_tree = &fs_info->mapping_tree;
- struct btrfs_space_info *space_info;
struct rb_node *node;
int ret = 0;
- for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
- struct extent_map *em;
- struct map_lookup *map;
+ for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) {
+ struct btrfs_chunk_map *map;
struct btrfs_block_group *bg;
- em = rb_entry(node, struct extent_map, rb_node);
- map = em->map_lookup;
- bg = btrfs_create_block_group_cache(fs_info, em->start);
+ map = rb_entry(node, struct btrfs_chunk_map, rb_node);
+ bg = btrfs_create_block_group_cache(fs_info, map->start);
if (!bg) {
ret = -ENOMEM;
break;
}
/* Fill dummy cache as FULL */
- bg->length = em->len;
+ bg->length = map->chunk_len;
bg->flags = map->type;
- bg->last_byte_to_unpin = (u64)-1;
bg->cached = BTRFS_CACHE_FINISHED;
- bg->used = em->len;
+ bg->used = map->chunk_len;
bg->flags = map->type;
- ret = btrfs_add_block_group_cache(fs_info, bg);
+ bg->space_info = btrfs_find_space_info(fs_info, bg->flags);
+ ret = btrfs_add_block_group_cache(bg);
/*
* We may have some valid block group cache added already, in
* that case we skip to the next one.
@@ -2151,10 +2521,7 @@ static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
break;
}
- btrfs_update_space_info(fs_info, bg->flags, em->len, em->len,
- 0, 0, &space_info);
- bg->space_info = space_info;
- link_block_group(bg);
+ btrfs_add_bg_to_space_info(fs_info, bg);
set_avail_alloc_bits(fs_info, bg->flags);
}
@@ -2165,6 +2532,7 @@ static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
int btrfs_read_block_groups(struct btrfs_fs_info *info)
{
+ struct btrfs_root *root = btrfs_block_group_root(info);
struct btrfs_path *path;
int ret;
struct btrfs_block_group *cache;
@@ -2173,12 +2541,21 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
int need_clear = 0;
u64 cache_gen;
- if (!info->extent_root)
+ /*
+ * Either no extent root (with ibadroots rescue option) or we have
+ * unsupported RO options. The fs can never be mounted read-write, so no
+ * need to waste time searching block group items.
+ *
+ * This also allows new extent tree related changes to be RO compat,
+ * no need for a full incompat flag.
+ */
+ if (!root || (btrfs_super_compat_ro_flags(info->super_copy) &
+ ~BTRFS_FEATURE_COMPAT_RO_SUPP))
return fill_dummy_bgs(info);
key.objectid = 0;
- key.offset = 0;
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ key.offset = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -2276,30 +2653,40 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group_item bgi;
- struct btrfs_root *root;
+ struct btrfs_root *root = btrfs_block_group_root(fs_info);
struct btrfs_key key;
+ u64 old_commit_used;
+ int ret;
spin_lock(&block_group->lock);
btrfs_set_stack_block_group_used(&bgi, block_group->used);
btrfs_set_stack_block_group_chunk_objectid(&bgi,
- BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ block_group->global_root_id);
btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
+ old_commit_used = block_group->commit_used;
+ block_group->commit_used = block_group->used;
key.objectid = block_group->start;
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
key.offset = block_group->length;
spin_unlock(&block_group->lock);
- root = fs_info->extent_root;
- return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
+ ret = btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
+ if (ret < 0) {
+ spin_lock(&block_group->lock);
+ block_group->commit_used = old_commit_used;
+ spin_unlock(&block_group->lock);
+ }
+
+ return ret;
}
static int insert_dev_extent(struct btrfs_trans_handle *trans,
- struct btrfs_device *device, u64 chunk_offset,
- u64 start, u64 num_bytes)
+ const struct btrfs_device *device, u64 chunk_offset,
+ u64 start, u64 num_bytes)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dev_extent *extent;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -2316,7 +2703,7 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
key.offset = start;
ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent));
if (ret)
- goto out;
+ return ret;
leaf = path->nodes[0];
extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
@@ -2324,11 +2711,8 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
btrfs_set_dev_extent_chunk_objectid(leaf, extent,
BTRFS_FIRST_CHUNK_TREE_OBJECTID);
btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
-
btrfs_set_dev_extent_length(leaf, extent, num_bytes);
- btrfs_mark_buffer_dirty(leaf);
-out:
- btrfs_free_path(path);
+
return ret;
}
@@ -2343,19 +2727,14 @@ static int insert_dev_extents(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_device *device;
- struct extent_map *em;
- struct map_lookup *map;
+ struct btrfs_chunk_map *map;
u64 dev_offset;
- u64 stripe_size;
int i;
int ret = 0;
- em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
- if (IS_ERR(em))
- return PTR_ERR(em);
-
- map = em->map_lookup;
- stripe_size = em->orig_block_len;
+ map = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
/*
* Take the device list mutex to prevent races with the final phase of
@@ -2372,13 +2751,13 @@ static int insert_dev_extents(struct btrfs_trans_handle *trans,
dev_offset = map->stripes[i].physical;
ret = insert_dev_extent(trans, device, chunk_offset, dev_offset,
- stripe_size);
+ map->stripe_size);
if (ret)
break;
}
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- free_extent_map(em);
+ btrfs_free_chunk_map(map);
return ret;
}
@@ -2409,7 +2788,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
ret = insert_block_group_item(trans, block_group);
if (ret)
btrfs_abort_transaction(trans, ret);
- if (!block_group->chunk_item_inserted) {
+ if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
+ &block_group->runtime_flags)) {
mutex_lock(&fs_info->chunk_mutex);
ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
mutex_unlock(&fs_info->chunk_mutex);
@@ -2420,7 +2800,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
block_group->length);
if (ret)
btrfs_abort_transaction(trans, ret);
- add_block_group_free_space(trans, block_group);
+ btrfs_add_block_group_free_space(trans, block_group);
/*
* If we restriped during balance, we may have added a new raid
@@ -2433,15 +2813,72 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
/* Already aborted the transaction if it failed. */
next:
- btrfs_delayed_refs_rsv_release(fs_info, 1);
+ btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
+
+ spin_lock(&fs_info->unused_bgs_lock);
list_del_init(&block_group->bg_list);
+ clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
+ btrfs_put_block_group(block_group);
+ spin_unlock(&fs_info->unused_bgs_lock);
+
+ /*
+ * If the block group is still unused, add it to the list of
+ * unused block groups. The block group may have been created in
+ * order to satisfy a space reservation, in which case the
+ * extent allocation only happens later. But often we don't
+ * actually need to allocate space that we previously reserved,
+ * so the block group may become unused for a long time. For
+ * example for metadata we generally reserve space for a worst
+ * possible scenario, but then don't end up allocating all that
+ * space or none at all (due to no need to COW, extent buffers
+ * were already COWed in the current transaction and still
+ * unwritten, tree heights lower than the maximum possible
+ * height, etc). For data we generally reserve the exact amount
+ * of space we are going to allocate later, the exception is
+ * when using compression, as we must reserve space based on the
+ * uncompressed data size, because the compression is only done
+ * when writeback triggered and we don't know how much space we
+ * are actually going to need, so we reserve the uncompressed
+ * size because the data may be incompressible in the worst case.
+ */
+ if (ret == 0) {
+ bool used;
+
+ spin_lock(&block_group->lock);
+ used = btrfs_is_block_group_used(block_group);
+ spin_unlock(&block_group->lock);
+
+ if (!used)
+ btrfs_mark_bg_unused(block_group);
+ }
}
btrfs_trans_release_chunk_metadata(trans);
}
+/*
+ * For extent tree v2 we use the block_group_item->chunk_offset to point at our
+ * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID.
+ */
+static u64 calculate_global_root_id(const struct btrfs_fs_info *fs_info, u64 offset)
+{
+ u64 div = SZ_1G;
+ u64 index;
+
+ if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
+ return BTRFS_FIRST_CHUNK_TREE_OBJECTID;
+
+ /* If we have a smaller fs index based on 128MiB. */
+ if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL))
+ div = SZ_128M;
+
+ offset = div64_u64(offset, div);
+ div64_u64_rem(offset, fs_info->nr_global_roots, &index);
+ return index;
+}
+
struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
- u64 bytes_used, u64 type,
- u64 chunk_offset, u64 size)
+ struct btrfs_space_info *space_info,
+ u64 type, u64 chunk_offset, u64 size)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group *cache;
@@ -2453,14 +2890,21 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
if (!cache)
return ERR_PTR(-ENOMEM);
+ /*
+ * Mark it as new before adding it to the rbtree of block groups or any
+ * list, so that no other task finds it and calls btrfs_mark_bg_unused()
+ * before the new flag is set.
+ */
+ set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags);
+
cache->length = size;
- set_free_space_tree_thresholds(cache);
- cache->used = bytes_used;
+ btrfs_set_free_space_tree_thresholds(cache);
cache->flags = type;
- cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
+ cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
+
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
- cache->needs_free_space = 1;
+ set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
ret = btrfs_load_block_group_zone_info(cache, true);
if (ret) {
@@ -2468,12 +2912,6 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
return ERR_PTR(ret);
}
- /*
- * New block group is likely to be used soon. Try to activate it now.
- * Failure is OK for now.
- */
- btrfs_zone_activate(cache);
-
ret = exclude_super_stripes(cache);
if (ret) {
/* We may have excluded something, so call this just in case */
@@ -2482,27 +2920,22 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
return ERR_PTR(ret);
}
- add_new_free_space(cache, chunk_offset, chunk_offset + size);
-
+ ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
btrfs_free_excluded_extents(cache);
-
-#ifdef CONFIG_BTRFS_DEBUG
- if (btrfs_should_fragment_free_space(cache)) {
- u64 new_bytes_used = size - bytes_used;
-
- bytes_used += new_bytes_used >> 1;
- fragment_free_space(cache);
+ if (ret) {
+ btrfs_put_block_group(cache);
+ return ERR_PTR(ret);
}
-#endif
+
/*
* Ensure the corresponding space_info object is created and
* assigned to our block group. We want our bg to be added to the rbtree
* with its ->space_info set.
*/
- cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
+ cache->space_info = space_info;
ASSERT(cache->space_info);
- ret = btrfs_add_block_group_cache(fs_info, cache);
+ ret = btrfs_add_block_group_cache(cache);
if (ret) {
btrfs_remove_free_space_cache(cache);
btrfs_put_block_group(cache);
@@ -2514,16 +2947,18 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
* the rbtree, update the space info's counters.
*/
trace_btrfs_add_block_group(fs_info, cache, 1);
- btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
- cache->bytes_super, cache->zone_unusable,
- &cache->space_info);
+ btrfs_add_bg_to_space_info(fs_info, cache);
btrfs_update_global_block_rsv(fs_info);
- link_block_group(cache);
+#ifdef CONFIG_BTRFS_DEBUG
+ if (btrfs_should_fragment_free_space(cache)) {
+ cache->space_info->bytes_used += size >> 1;
+ fragment_free_space(cache);
+ }
+#endif
- list_add_tail(&cache->bg_list, &trans->new_bgs);
- trans->delayed_ref_updates++;
- btrfs_update_delayed_refs_rsv(trans);
+ btrfs_link_bg_list(cache, &trans->new_bgs);
+ btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info);
set_avail_alloc_bits(fs_info, type);
return cache;
@@ -2542,13 +2977,28 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
bool do_chunk_alloc)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_space_info *space_info = cache->space_info;
struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = btrfs_block_group_root(fs_info);
u64 alloc_flags;
int ret;
bool dirty_bg_running;
+ /*
+ * This can only happen when we are doing read-only scrub on read-only
+ * mount.
+ * In that case we should not start a new transaction on read-only fs.
+ * Thus here we skip all chunk allocations.
+ */
+ if (sb_rdonly(fs_info->sb)) {
+ mutex_lock(&fs_info->ro_block_group_mutex);
+ ret = inc_block_group_ro(cache, 0);
+ mutex_unlock(&fs_info->ro_block_group_mutex);
+ return ret;
+ }
+
do {
- trans = btrfs_join_transaction(fs_info->extent_root);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
@@ -2580,7 +3030,7 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
*/
alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
if (alloc_flags != cache->flags) {
- ret = btrfs_chunk_alloc(trans, alloc_flags,
+ ret = btrfs_chunk_alloc(trans, space_info, alloc_flags,
CHUNK_ALLOC_FORCE);
/*
* ENOSPC is allowed here, we may have enough space
@@ -2594,14 +3044,32 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
}
ret = inc_block_group_ro(cache, 0);
- if (!do_chunk_alloc || ret == -ETXTBSY)
- goto unlock_out;
if (!ret)
goto out;
- alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
- ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
+ if (ret == -ETXTBSY)
+ goto unlock_out;
+
+ /*
+ * Skip chunk allocation if the bg is SYSTEM, this is to avoid system
+ * chunk allocation storm to exhaust the system chunk array. Otherwise
+ * we still want to try our best to mark the block group read-only.
+ */
+ if (!do_chunk_alloc && ret == -ENOSPC &&
+ (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
+ goto unlock_out;
+
+ alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags);
+ ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE);
+ if (ret < 0)
+ goto out;
+ /*
+ * We have allocated a new chunk. We also need to activate that chunk to
+ * grant metadata tickets for zoned filesystem.
+ */
+ ret = btrfs_zoned_activate_one_bg(space_info, true);
if (ret < 0)
goto out;
+
ret = inc_block_group_ro(cache, 0);
if (ret == -ETXTBSY)
goto unlock_out;
@@ -2632,9 +3100,10 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes back */
cache->zone_unusable =
- (cache->alloc_offset - cache->used) +
+ (cache->alloc_offset - cache->used - cache->pinned -
+ cache->reserved) +
(cache->length - cache->zone_capacity);
- sinfo->bytes_zone_unusable += cache->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(sinfo, cache->zone_unusable);
sinfo->bytes_readonly -= cache->zone_unusable;
}
num_bytes = cache->length - cache->reserved -
@@ -2653,11 +3122,30 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
- struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_root *root = btrfs_block_group_root(fs_info);
unsigned long bi;
struct extent_buffer *leaf;
struct btrfs_block_group_item bgi;
struct btrfs_key key;
+ u64 old_commit_used;
+ u64 used;
+
+ /*
+ * Block group items update can be triggered out of commit transaction
+ * critical section, thus we need a consistent view of used bytes.
+ * We cannot use cache->used directly outside of the spin lock, as it
+ * may be changed.
+ */
+ spin_lock(&cache->lock);
+ old_commit_used = cache->commit_used;
+ used = cache->used;
+ /* No change in used bytes, can safely skip it. */
+ if (cache->commit_used == used) {
+ spin_unlock(&cache->lock);
+ return 0;
+ }
+ cache->commit_used = used;
+ spin_unlock(&cache->lock);
key.objectid = cache->start;
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
@@ -2672,14 +3160,27 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
- btrfs_set_stack_block_group_used(&bgi, cache->used);
+ btrfs_set_stack_block_group_used(&bgi, used);
btrfs_set_stack_block_group_chunk_objectid(&bgi,
- BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ cache->global_root_id);
btrfs_set_stack_block_group_flags(&bgi, cache->flags);
write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
- btrfs_mark_buffer_dirty(leaf);
fail:
btrfs_release_path(path);
+ /*
+ * We didn't update the block group item, need to revert commit_used
+ * unless the block group item didn't exist yet - this is to prevent a
+ * race with a concurrent insertion of the block group item, with
+ * insert_block_group_item(), that happened just after we attempted to
+ * update. In that case we would reset commit_used to 0 just after the
+ * insertion set it to a value greater than 0 - if the block group later
+ * becomes with 0 used bytes, we would incorrectly skip its update.
+ */
+ if (ret < 0 && ret != -ENOENT) {
+ spin_lock(&cache->lock);
+ cache->commit_used = old_commit_used;
+ spin_unlock(&cache->lock);
+ }
return ret;
}
@@ -2689,7 +3190,6 @@ static int cache_save_setup(struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct btrfs_root *root = fs_info->tree_root;
struct inode *inode = NULL;
struct extent_changeset *data_reserved = NULL;
u64 alloc_hint = 0;
@@ -2741,8 +3241,8 @@ again:
* time.
*/
BTRFS_I(inode)->generation = 0;
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
- if (ret) {
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
+ if (unlikely(ret)) {
/*
* So theoretically we could recover from this, simply set the
* super cache generation to 0 so we know to invalidate the
@@ -2814,7 +3314,7 @@ again:
cache_size *= fs_info->sectorsize;
ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0,
- cache_size);
+ cache_size, false);
if (ret)
goto out_put;
@@ -2854,7 +3354,7 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group *cache, *tmp;
struct btrfs_transaction *cur_trans = trans->transaction;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
if (list_empty(&cur_trans->dirty_bgs) ||
!btrfs_test_opt(fs_info, SPACE_CACHE))
@@ -2871,7 +3371,6 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
cache_save_setup(cache, trans, path);
}
- btrfs_free_path(path);
return 0;
}
@@ -2894,10 +3393,9 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
struct btrfs_transaction *cur_trans = trans->transaction;
int ret = 0;
int should_put;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
LIST_HEAD(dirty);
struct list_head *io = &cur_trans->io_bgs;
- int num_started = 0;
int loops = 0;
spin_lock(&cur_trans->dirty_bgs_lock);
@@ -2963,7 +3461,6 @@ again:
cache->io_ctl.inode = NULL;
ret = btrfs_write_out_cache(trans, cache, path);
if (ret == 0 && cache->io_ctl.inode) {
- num_started++;
should_put = 0;
/*
@@ -3010,7 +3507,7 @@ again:
if (should_put)
btrfs_put_block_group(cache);
if (drop_reserve)
- btrfs_delayed_refs_rsv_release(fs_info, 1);
+ btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
/*
* Avoid blocking other tasks for too long. It might even save
* us from writing caches for block groups that are going to be
@@ -3051,7 +3548,6 @@ out:
btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
}
- btrfs_free_path(path);
return ret;
}
@@ -3062,9 +3558,8 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
struct btrfs_transaction *cur_trans = trans->transaction;
int ret = 0;
int should_put;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct list_head *io = &cur_trans->io_bgs;
- int num_started = 0;
path = btrfs_alloc_path();
if (!path)
@@ -3115,14 +3610,12 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
cache_save_setup(cache, trans, path);
if (!ret)
- ret = btrfs_run_delayed_refs(trans,
- (unsigned long) -1);
+ ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
cache->io_ctl.inode = NULL;
ret = btrfs_write_out_cache(trans, cache, path);
if (ret == 0 && cache->io_ctl.inode) {
- num_started++;
should_put = 0;
list_add_tail(&cache->io_list, io);
} else {
@@ -3152,15 +3645,17 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1);
ret = update_block_group_item(trans, path, cache);
- }
- if (ret)
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ } else if (ret) {
btrfs_abort_transaction(trans, ret);
+ }
}
/* If its not on the io list, we need to put the block group */
if (should_put)
btrfs_put_block_group(cache);
- btrfs_delayed_refs_rsv_release(fs_info, 1);
+ btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
@@ -3177,7 +3672,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
btrfs_put_block_group(cache);
}
- btrfs_free_path(path);
return ret;
}
@@ -3185,12 +3679,12 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, bool alloc)
{
struct btrfs_fs_info *info = trans->fs_info;
- struct btrfs_block_group *cache = NULL;
- u64 total = num_bytes;
+ struct btrfs_space_info *space_info;
+ struct btrfs_block_group *cache;
u64 old_val;
- u64 byte_in_group;
+ bool reclaim = false;
+ bool bg_already_dirty = true;
int factor;
- int ret = 0;
/* Block accounting for super block */
spin_lock(&info->delalloc_root_lock);
@@ -3202,92 +3696,96 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
btrfs_set_super_bytes_used(info->super_copy, old_val);
spin_unlock(&info->delalloc_root_lock);
- while (total) {
- cache = btrfs_lookup_block_group(info, bytenr);
- if (!cache) {
- ret = -ENOENT;
- break;
- }
- factor = btrfs_bg_type_to_factor(cache->flags);
+ cache = btrfs_lookup_block_group(info, bytenr);
+ if (!cache)
+ return -ENOENT;
- /*
- * If this block group has free space cache written out, we
- * need to make sure to load it if we are removing space. This
- * is because we need the unpinning stage to actually add the
- * space back to the block group, otherwise we will leak space.
- */
- if (!alloc && !btrfs_block_group_done(cache))
- btrfs_cache_block_group(cache, 1);
+ /* An extent can not span multiple block groups. */
+ ASSERT(bytenr + num_bytes <= cache->start + cache->length);
- byte_in_group = bytenr - cache->start;
- WARN_ON(byte_in_group > cache->length);
+ space_info = cache->space_info;
+ factor = btrfs_bg_type_to_factor(cache->flags);
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
+ /*
+ * If this block group has free space cache written out, we need to make
+ * sure to load it if we are removing space. This is because we need
+ * the unpinning stage to actually add the space back to the block group,
+ * otherwise we will leak space.
+ */
+ if (!alloc && !btrfs_block_group_done(cache))
+ btrfs_cache_block_group(cache, true);
- if (btrfs_test_opt(info, SPACE_CACHE) &&
- cache->disk_cache_state < BTRFS_DC_CLEAR)
- cache->disk_cache_state = BTRFS_DC_CLEAR;
+ spin_lock(&space_info->lock);
+ spin_lock(&cache->lock);
- old_val = cache->used;
- num_bytes = min(total, cache->length - byte_in_group);
- if (alloc) {
- old_val += num_bytes;
- cache->used = old_val;
- cache->reserved -= num_bytes;
- cache->space_info->bytes_reserved -= num_bytes;
- cache->space_info->bytes_used += num_bytes;
- cache->space_info->disk_used += num_bytes * factor;
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
- } else {
- old_val -= num_bytes;
- cache->used = old_val;
- cache->pinned += num_bytes;
- btrfs_space_info_update_bytes_pinned(info,
- cache->space_info, num_bytes);
- cache->space_info->bytes_used -= num_bytes;
- cache->space_info->disk_used -= num_bytes * factor;
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
-
- set_extent_dirty(&trans->transaction->pinned_extents,
- bytenr, bytenr + num_bytes - 1,
- GFP_NOFS | __GFP_NOFAIL);
- }
+ if (btrfs_test_opt(info, SPACE_CACHE) &&
+ cache->disk_cache_state < BTRFS_DC_CLEAR)
+ cache->disk_cache_state = BTRFS_DC_CLEAR;
- spin_lock(&trans->transaction->dirty_bgs_lock);
- if (list_empty(&cache->dirty_list)) {
- list_add_tail(&cache->dirty_list,
- &trans->transaction->dirty_bgs);
- trans->delayed_ref_updates++;
- btrfs_get_block_group(cache);
- }
- spin_unlock(&trans->transaction->dirty_bgs_lock);
+ old_val = cache->used;
+ if (alloc) {
+ old_val += num_bytes;
+ cache->used = old_val;
+ cache->reserved -= num_bytes;
+ cache->reclaim_mark = 0;
+ space_info->bytes_reserved -= num_bytes;
+ space_info->bytes_used += num_bytes;
+ space_info->disk_used += num_bytes * factor;
+ if (READ_ONCE(space_info->periodic_reclaim))
+ btrfs_space_info_update_reclaimable(space_info, -num_bytes);
+ spin_unlock(&cache->lock);
+ spin_unlock(&space_info->lock);
+ } else {
+ old_val -= num_bytes;
+ cache->used = old_val;
+ cache->pinned += num_bytes;
+ btrfs_space_info_update_bytes_pinned(space_info, num_bytes);
+ space_info->bytes_used -= num_bytes;
+ space_info->disk_used -= num_bytes * factor;
+ if (READ_ONCE(space_info->periodic_reclaim))
+ btrfs_space_info_update_reclaimable(space_info, num_bytes);
+ else
+ reclaim = should_reclaim_block_group(cache, num_bytes);
- /*
- * No longer have used bytes in this block group, queue it for
- * deletion. We do this after adding the block group to the
- * dirty list to avoid races between cleaner kthread and space
- * cache writeout.
- */
- if (!alloc && old_val == 0) {
- if (!btrfs_test_opt(info, DISCARD_ASYNC))
- btrfs_mark_bg_unused(cache);
- }
+ spin_unlock(&cache->lock);
+ spin_unlock(&space_info->lock);
- btrfs_put_block_group(cache);
- total -= num_bytes;
- bytenr += num_bytes;
+ btrfs_set_extent_bit(&trans->transaction->pinned_extents, bytenr,
+ bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
}
+ spin_lock(&trans->transaction->dirty_bgs_lock);
+ if (list_empty(&cache->dirty_list)) {
+ list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs);
+ bg_already_dirty = false;
+ btrfs_get_block_group(cache);
+ }
+ spin_unlock(&trans->transaction->dirty_bgs_lock);
+
+ /*
+ * No longer have used bytes in this block group, queue it for deletion.
+ * We do this after adding the block group to the dirty list to avoid
+ * races between cleaner kthread and space cache writeout.
+ */
+ if (!alloc && old_val == 0) {
+ if (!btrfs_test_opt(info, DISCARD_ASYNC))
+ btrfs_mark_bg_unused(cache);
+ } else if (!alloc && reclaim) {
+ btrfs_mark_bg_to_reclaim(cache);
+ }
+
+ btrfs_put_block_group(cache);
+
/* Modified block groups are accounted for in the delayed_refs_rsv. */
- btrfs_update_delayed_refs_rsv(trans);
- return ret;
+ if (!bg_already_dirty)
+ btrfs_inc_delayed_refs_rsv_bg_updates(info);
+
+ return 0;
}
-/**
- * btrfs_add_reserved_bytes - update the block_group and space info counters
+/*
+ * Update the block_group and space info counters.
+ *
* @cache: The cache we are manipulating
* @ram_bytes: The number of bytes of file content, and will be same to
* @num_bytes except for the compress path.
@@ -3299,66 +3797,89 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
* reservation and return -EAGAIN, otherwise this function always succeeds.
*/
int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
- u64 ram_bytes, u64 num_bytes, int delalloc)
+ u64 ram_bytes, u64 num_bytes, bool delalloc,
+ bool force_wrong_size_class)
{
struct btrfs_space_info *space_info = cache->space_info;
+ enum btrfs_block_group_size_class size_class;
int ret = 0;
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
if (cache->ro) {
ret = -EAGAIN;
- } else {
- cache->reserved += num_bytes;
- space_info->bytes_reserved += num_bytes;
- trace_btrfs_space_reservation(cache->fs_info, "space_info",
- space_info->flags, num_bytes, 1);
- btrfs_space_info_update_bytes_may_use(cache->fs_info,
- space_info, -ram_bytes);
- if (delalloc)
- cache->delalloc_bytes += num_bytes;
+ goto out_error;
+ }
- /*
- * Compression can use less space than we reserved, so wake
- * tickets if that happens
- */
- if (num_bytes < ram_bytes)
- btrfs_try_granting_tickets(cache->fs_info, space_info);
+ if (btrfs_block_group_should_use_size_class(cache)) {
+ size_class = btrfs_calc_block_group_size_class(num_bytes);
+ ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class);
+ if (ret)
+ goto out_error;
}
+
+ cache->reserved += num_bytes;
+ if (delalloc)
+ cache->delalloc_bytes += num_bytes;
+
+ trace_btrfs_space_reservation(cache->fs_info, "space_info",
+ space_info->flags, num_bytes, 1);
+ spin_unlock(&cache->lock);
+
+ space_info->bytes_reserved += num_bytes;
+ btrfs_space_info_update_bytes_may_use(space_info, -ram_bytes);
+
+ /*
+ * Compression can use less space than we reserved, so wake tickets if
+ * that happens.
+ */
+ if (num_bytes < ram_bytes)
+ btrfs_try_granting_tickets(space_info);
+ spin_unlock(&space_info->lock);
+
+ return 0;
+
+out_error:
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
return ret;
}
-/**
- * btrfs_free_reserved_bytes - update the block_group and space info counters
- * @cache: The cache we are manipulating
- * @num_bytes: The number of bytes in question
- * @delalloc: The blocks are allocated for the delalloc write
+/*
+ * Update the block_group and space info counters.
+ *
+ * @cache: The cache we are manipulating.
+ * @num_bytes: The number of bytes in question.
+ * @is_delalloc: Whether the blocks are allocated for a delalloc write.
*
* This is called by somebody who is freeing space that was never actually used
* on disk. For example if you reserve some space for a new leaf in transaction
* A and before transaction A commits you free that leaf, you call this with
* reserve set to 0 in order to clear the reservation.
*/
-void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
- u64 num_bytes, int delalloc)
+void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes,
+ bool is_delalloc)
{
struct btrfs_space_info *space_info = cache->space_info;
+ bool bg_ro;
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
- if (cache->ro)
- space_info->bytes_readonly += num_bytes;
+ bg_ro = cache->ro;
cache->reserved -= num_bytes;
- space_info->bytes_reserved -= num_bytes;
- space_info->max_extent_size = 0;
-
- if (delalloc)
+ if (is_delalloc)
cache->delalloc_bytes -= num_bytes;
spin_unlock(&cache->lock);
- btrfs_try_granting_tickets(cache->fs_info, space_info);
+ if (bg_ro)
+ space_info->bytes_readonly += num_bytes;
+ else if (btrfs_is_zoned(cache->fs_info))
+ space_info->bytes_zone_unusable += num_bytes;
+
+ space_info->bytes_reserved -= num_bytes;
+ space_info->max_extent_size = 0;
+
+ btrfs_try_granting_tickets(space_info);
spin_unlock(&space_info->lock);
}
@@ -3373,14 +3894,14 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
}
}
-static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *sinfo, int force)
+static bool should_alloc_chunk(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, int force)
{
u64 bytes_used = btrfs_space_info_used(sinfo, false);
u64 thresh;
if (force == CHUNK_ALLOC_FORCE)
- return 1;
+ return true;
/*
* in limited mode, we want to have some free space up to
@@ -3388,25 +3909,34 @@ static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
*/
if (force == CHUNK_ALLOC_LIMITED) {
thresh = btrfs_super_total_bytes(fs_info->super_copy);
- thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
+ thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1));
if (sinfo->total_bytes - bytes_used < thresh)
- return 1;
+ return true;
}
- if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
- return 0;
- return 1;
+ if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80))
+ return false;
+ return true;
}
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
{
u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
+ struct btrfs_space_info *space_info;
- return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
+ space_info = btrfs_find_space_info(trans->fs_info, type);
+ if (!space_info) {
+ DEBUG_WARN();
+ return -EINVAL;
+ }
+
+ return btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE);
}
-static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
+static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_space_info *space_info,
+ u64 flags)
{
struct btrfs_block_group *bg;
int ret;
@@ -3419,7 +3949,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
*/
check_system_chunk(trans, flags);
- bg = btrfs_create_chunk(trans, flags);
+ bg = btrfs_create_chunk(trans, space_info, flags);
if (IS_ERR(bg)) {
ret = PTR_ERR(bg);
goto out;
@@ -3467,8 +3997,16 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
if (ret == -ENOSPC) {
const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info);
struct btrfs_block_group *sys_bg;
+ struct btrfs_space_info *sys_space_info;
+
+ sys_space_info = btrfs_find_space_info(trans->fs_info, sys_flags);
+ if (unlikely(!sys_space_info)) {
+ ret = -EINVAL;
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
- sys_bg = btrfs_create_chunk(trans, sys_flags);
+ sys_bg = btrfs_create_chunk(trans, sys_space_info, sys_flags);
if (IS_ERR(sys_bg)) {
ret = PTR_ERR(sys_bg);
btrfs_abort_transaction(trans, ret);
@@ -3476,24 +4014,28 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
}
ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
- } else if (ret) {
+ } else if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
out:
btrfs_trans_release_chunk_metadata(trans);
- return ret;
+ if (ret)
+ return ERR_PTR(ret);
+
+ btrfs_get_block_group(bg);
+ return bg;
}
/*
@@ -3595,6 +4137,8 @@ out:
*
* This function, btrfs_chunk_alloc(), belongs to phase 1.
*
+ * @space_info: specify which space_info the new chunk should belong to.
+ *
* If @force is CHUNK_ALLOC_FORCE:
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
@@ -3603,15 +4147,22 @@ out:
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
*/
-int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+int btrfs_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_space_info *space_info, u64 flags,
enum btrfs_chunk_alloc_enum force)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_space_info *space_info;
+ struct btrfs_block_group *ret_bg;
bool wait_for_alloc = false;
bool should_alloc = false;
+ bool from_extent_allocation = false;
int ret = 0;
+ if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
+ from_extent_allocation = true;
+ force = CHUNK_ALLOC_FORCE;
+ }
+
/* Don't re-enter if we're already allocating a chunk */
if (trans->allocating_chunk)
return -ENOSPC;
@@ -3639,9 +4190,6 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
return -ENOSPC;
- space_info = btrfs_find_space_info(fs_info, flags);
- ASSERT(space_info);
-
do {
spin_lock(&space_info->lock);
if (force < space_info->force_alloc)
@@ -3649,11 +4197,11 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
should_alloc = should_alloc_chunk(fs_info, space_info, force);
if (space_info->full) {
/* No more free physical space */
+ spin_unlock(&space_info->lock);
if (should_alloc)
ret = -ENOSPC;
else
ret = 0;
- spin_unlock(&space_info->lock);
return ret;
} else if (!should_alloc) {
spin_unlock(&space_info->lock);
@@ -3665,15 +4213,16 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
* recheck if we should continue with our allocation
* attempt.
*/
- wait_for_alloc = true;
spin_unlock(&space_info->lock);
+ wait_for_alloc = true;
+ force = CHUNK_ALLOC_NO_FORCE;
mutex_lock(&fs_info->chunk_mutex);
mutex_unlock(&fs_info->chunk_mutex);
} else {
/* Proceed with allocation */
- space_info->chunk_alloc = 1;
- wait_for_alloc = false;
+ space_info->chunk_alloc = true;
spin_unlock(&space_info->lock);
+ wait_for_alloc = false;
}
cond_resched();
@@ -3701,13 +4250,26 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
force_metadata_allocation(fs_info);
}
- ret = do_chunk_alloc(trans, flags);
+ ret_bg = do_chunk_alloc(trans, space_info, flags);
trans->allocating_chunk = false;
+ if (IS_ERR(ret_bg)) {
+ ret = PTR_ERR(ret_bg);
+ } else if (from_extent_allocation && (flags & BTRFS_BLOCK_GROUP_DATA)) {
+ /*
+ * New block group is likely to be used soon. Try to activate
+ * it now. Failure is OK for now.
+ */
+ btrfs_zone_activate(ret_bg);
+ }
+
+ if (!ret)
+ btrfs_put_block_group(ret_bg);
+
spin_lock(&space_info->lock);
if (ret < 0) {
if (ret == -ENOSPC)
- space_info->full = 1;
+ space_info->full = true;
else
goto out;
} else {
@@ -3717,14 +4279,14 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
out:
- space_info->chunk_alloc = 0;
+ space_info->chunk_alloc = false;
spin_unlock(&space_info->lock);
mutex_unlock(&fs_info->chunk_mutex);
return ret;
}
-static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
+static u64 get_profile_num_devs(const struct btrfs_fs_info *fs_info, u64 type)
{
u64 num_dev;
@@ -3758,12 +4320,16 @@ static void reserve_chunk_space(struct btrfs_trans_handle *trans,
if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
left, bytes, type);
- btrfs_dump_space_info(fs_info, info, 0, 0);
+ btrfs_dump_space_info(info, 0, false);
}
if (left < bytes) {
u64 flags = btrfs_system_alloc_profile(fs_info);
struct btrfs_block_group *bg;
+ struct btrfs_space_info *space_info;
+
+ space_info = btrfs_find_space_info(fs_info, flags);
+ ASSERT(space_info);
/*
* Ignore failure to create system chunk. We might end up not
@@ -3771,11 +4337,19 @@ static void reserve_chunk_space(struct btrfs_trans_handle *trans,
* the paths we visit in the chunk tree (they were already COWed
* or created in the current transaction for example).
*/
- bg = btrfs_create_chunk(trans, flags);
+ bg = btrfs_create_chunk(trans, space_info, flags);
if (IS_ERR(bg)) {
ret = PTR_ERR(bg);
} else {
/*
+ * We have a new chunk. We also need to activate it for
+ * zoned filesystem.
+ */
+ ret = btrfs_zoned_activate_one_bg(info, true);
+ if (ret < 0)
+ return;
+
+ /*
* If we fail to add the chunk item here, we end up
* trying again at phase 2 of chunk allocation, at
* btrfs_create_pending_block_groups(). So ignore
@@ -3790,7 +4364,7 @@ static void reserve_chunk_space(struct btrfs_trans_handle *trans,
}
if (!ret) {
- ret = btrfs_block_rsv_add(fs_info->chunk_root,
+ ret = btrfs_block_rsv_add(fs_info,
&fs_info->chunk_block_rsv,
bytes, BTRFS_RESERVE_NO_FLUSH);
if (!ret)
@@ -3850,36 +4424,62 @@ void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
{
struct btrfs_block_group *block_group;
- u64 last = 0;
- while (1) {
- struct inode *inode;
+ block_group = btrfs_lookup_first_block_group(info, 0);
+ while (block_group) {
+ btrfs_wait_block_group_cache_done(block_group);
+ spin_lock(&block_group->lock);
+ if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF,
+ &block_group->runtime_flags)) {
+ struct btrfs_inode *inode = block_group->inode;
- block_group = btrfs_lookup_first_block_group(info, last);
- while (block_group) {
- btrfs_wait_block_group_cache_done(block_group);
- spin_lock(&block_group->lock);
- if (block_group->iref)
- break;
+ block_group->inode = NULL;
+ spin_unlock(&block_group->lock);
+
+ ASSERT(block_group->io_ctl.inode == NULL);
+ iput(&inode->vfs_inode);
+ } else {
spin_unlock(&block_group->lock);
- block_group = btrfs_next_block_group(block_group);
}
- if (!block_group) {
- if (last == 0)
- break;
- last = 0;
- continue;
+ block_group = btrfs_next_block_group(block_group);
+ }
+}
+
+static void check_removing_space_info(struct btrfs_space_info *space_info)
+{
+ struct btrfs_fs_info *info = space_info->fs_info;
+
+ if (space_info->subgroup_id == BTRFS_SUB_GROUP_PRIMARY) {
+ /* This is a top space_info, proceed with its children first. */
+ for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) {
+ if (space_info->sub_group[i]) {
+ check_removing_space_info(space_info->sub_group[i]);
+ kfree(space_info->sub_group[i]);
+ space_info->sub_group[i] = NULL;
+ }
}
+ }
- inode = block_group->inode;
- block_group->iref = 0;
- block_group->inode = NULL;
- spin_unlock(&block_group->lock);
- ASSERT(block_group->io_ctl.inode == NULL);
- iput(inode);
- last = block_group->start + block_group->length;
- btrfs_put_block_group(block_group);
+ /*
+ * Do not hide this behind enospc_debug, this is actually important and
+ * indicates a real bug if this happens.
+ */
+ if (WARN_ON(space_info->bytes_pinned > 0 || space_info->bytes_may_use > 0))
+ btrfs_dump_space_info(space_info, 0, false);
+
+ /*
+ * If there was a failure to cleanup a log tree, very likely due to an
+ * IO failure on a writeback attempt of one or more of its extent
+ * buffers, we could not do proper (and cheap) unaccounting of their
+ * reserved space, so don't warn on bytes_reserved > 0 in that case.
+ */
+ if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) ||
+ !BTRFS_FS_LOG_CLEANUP_ERROR(info)) {
+ if (WARN_ON(space_info->bytes_reserved > 0))
+ btrfs_dump_space_info(space_info, 0, false);
}
+
+ WARN_ON(space_info->reclaim_size > 0);
}
/*
@@ -3894,14 +4494,25 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
struct btrfs_caching_control *caching_ctl;
struct rb_node *n;
- spin_lock(&info->block_group_cache_lock);
+ if (btrfs_is_zoned(info)) {
+ if (info->active_meta_bg) {
+ btrfs_put_block_group(info->active_meta_bg);
+ info->active_meta_bg = NULL;
+ }
+ if (info->active_system_bg) {
+ btrfs_put_block_group(info->active_system_bg);
+ info->active_system_bg = NULL;
+ }
+ }
+
+ write_lock(&info->block_group_cache_lock);
while (!list_empty(&info->caching_block_groups)) {
- caching_ctl = list_entry(info->caching_block_groups.next,
- struct btrfs_caching_control, list);
+ caching_ctl = list_first_entry(&info->caching_block_groups,
+ struct btrfs_caching_control, list);
list_del(&caching_ctl->list);
btrfs_put_caching_control(caching_ctl);
}
- spin_unlock(&info->block_group_cache_lock);
+ write_unlock(&info->block_group_cache_lock);
spin_lock(&info->unused_bgs_lock);
while (!list_empty(&info->unused_bgs)) {
@@ -3911,9 +4522,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
list_del_init(&block_group->bg_list);
btrfs_put_block_group(block_group);
}
- spin_unlock(&info->unused_bgs_lock);
- spin_lock(&info->unused_bgs_lock);
while (!list_empty(&info->reclaim_bgs)) {
block_group = list_first_entry(&info->reclaim_bgs,
struct btrfs_block_group,
@@ -3933,14 +4542,14 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
}
spin_unlock(&info->zone_active_bgs_lock);
- spin_lock(&info->block_group_cache_lock);
- while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
+ write_lock(&info->block_group_cache_lock);
+ while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) {
block_group = rb_entry(n, struct btrfs_block_group,
cache_node);
- rb_erase(&block_group->cache_node,
- &info->block_group_cache_tree);
+ rb_erase_cached(&block_group->cache_node,
+ &info->block_group_cache_tree);
RB_CLEAR_NODE(&block_group->cache_node);
- spin_unlock(&info->block_group_cache_lock);
+ write_unlock(&info->block_group_cache_lock);
down_write(&block_group->space_info->groups_sem);
list_del(&block_group->list);
@@ -3963,26 +4572,17 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
ASSERT(block_group->swap_extents == 0);
btrfs_put_block_group(block_group);
- spin_lock(&info->block_group_cache_lock);
+ write_lock(&info->block_group_cache_lock);
}
- spin_unlock(&info->block_group_cache_lock);
+ write_unlock(&info->block_group_cache_lock);
btrfs_release_global_block_rsv(info);
while (!list_empty(&info->space_info)) {
- space_info = list_entry(info->space_info.next,
- struct btrfs_space_info,
- list);
+ space_info = list_first_entry(&info->space_info,
+ struct btrfs_space_info, list);
- /*
- * Do not hide this behind enospc_debug, this is actually
- * important and indicates a real bug if this happens.
- */
- if (WARN_ON(space_info->bytes_pinned > 0 ||
- space_info->bytes_reserved > 0 ||
- space_info->bytes_may_use > 0))
- btrfs_dump_space_info(info, space_info, 0, 0);
- WARN_ON(space_info->reclaim_size > 0);
+ check_removing_space_info(space_info);
list_del(&space_info->list);
btrfs_sysfs_remove_space_info(space_info);
}
@@ -3997,34 +4597,31 @@ void btrfs_freeze_block_group(struct btrfs_block_group *cache)
void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct extent_map_tree *em_tree;
- struct extent_map *em;
bool cleanup;
spin_lock(&block_group->lock);
cleanup = (atomic_dec_and_test(&block_group->frozen) &&
- block_group->removed);
+ test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags));
spin_unlock(&block_group->lock);
if (cleanup) {
- em_tree = &fs_info->mapping_tree;
- write_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, block_group->start,
- 1);
- BUG_ON(!em); /* logic error, can't happen */
- remove_extent_mapping(em_tree, em);
- write_unlock(&em_tree->lock);
-
- /* once for us and once for the tree */
- free_extent_map(em);
- free_extent_map(em);
+ struct btrfs_chunk_map *map;
+
+ map = btrfs_find_chunk_map(fs_info, block_group->start, 1);
+ /* Logic error, can't happen. */
+ ASSERT(map);
+
+ btrfs_remove_chunk_map(fs_info, map);
+
+ /* Once for our lookup reference. */
+ btrfs_free_chunk_map(map);
/*
* We may have left one free space entry and other possible
* tasks trimming this block group have left 1 entry each one.
* Free them if any.
*/
- __btrfs_remove_free_space_cache(block_group->free_space_ctl);
+ btrfs_remove_free_space_cache(block_group);
}
}
@@ -4050,3 +4647,73 @@ void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount
bg->swap_extents -= amount;
spin_unlock(&bg->lock);
}
+
+enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size)
+{
+ if (size <= SZ_128K)
+ return BTRFS_BG_SZ_SMALL;
+ if (size <= SZ_8M)
+ return BTRFS_BG_SZ_MEDIUM;
+ return BTRFS_BG_SZ_LARGE;
+}
+
+/*
+ * Handle a block group allocating an extent in a size class
+ *
+ * @bg: The block group we allocated in.
+ * @size_class: The size class of the allocation.
+ * @force_wrong_size_class: Whether we are desperate enough to allow
+ * mismatched size classes.
+ *
+ * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the
+ * case of a race that leads to the wrong size class without
+ * force_wrong_size_class set.
+ *
+ * find_free_extent will skip block groups with a mismatched size class until
+ * it really needs to avoid ENOSPC. In that case it will set
+ * force_wrong_size_class. However, if a block group is newly allocated and
+ * doesn't yet have a size class, then it is possible for two allocations of
+ * different sizes to race and both try to use it. The loser is caught here and
+ * has to retry.
+ */
+int btrfs_use_block_group_size_class(struct btrfs_block_group *bg,
+ enum btrfs_block_group_size_class size_class,
+ bool force_wrong_size_class)
+{
+ ASSERT(size_class != BTRFS_BG_SZ_NONE);
+
+ /* The new allocation is in the right size class, do nothing */
+ if (bg->size_class == size_class)
+ return 0;
+ /*
+ * The new allocation is in a mismatched size class.
+ * This means one of two things:
+ *
+ * 1. Two tasks in find_free_extent for different size_classes raced
+ * and hit the same empty block_group. Make the loser try again.
+ * 2. A call to find_free_extent got desperate enough to set
+ * 'force_wrong_slab'. Don't change the size_class, but allow the
+ * allocation.
+ */
+ if (bg->size_class != BTRFS_BG_SZ_NONE) {
+ if (force_wrong_size_class)
+ return 0;
+ return -EAGAIN;
+ }
+ /*
+ * The happy new block group case: the new allocation is the first
+ * one in the block_group so we set size_class.
+ */
+ bg->size_class = size_class;
+
+ return 0;
+}
+
+bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg)
+{
+ if (btrfs_is_zoned(bg->fs_info))
+ return false;
+ if (!btrfs_is_block_group_data_only(bg))
+ return false;
+ return true;
+}
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 5878b7ce3b78..5f933455118c 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -3,8 +3,23 @@
#ifndef BTRFS_BLOCK_GROUP_H
#define BTRFS_BLOCK_GROUP_H
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/refcount.h>
+#include <linux/wait.h>
+#include <linux/sizes.h>
+#include <linux/rwsem.h>
+#include <linux/rbtree.h>
+#include <uapi/linux/btrfs_tree.h>
#include "free-space-cache.h"
+struct btrfs_chunk_map;
+struct btrfs_fs_info;
+struct btrfs_inode;
+struct btrfs_trans_handle;
+
enum btrfs_disk_cache_state {
BTRFS_DC_WRITTEN,
BTRFS_DC_ERROR,
@@ -12,6 +27,17 @@ enum btrfs_disk_cache_state {
BTRFS_DC_SETUP,
};
+enum btrfs_block_group_size_class {
+ /* Unset */
+ BTRFS_BG_SZ_NONE,
+ /* 0 < size <= 128K */
+ BTRFS_BG_SZ_SMALL,
+ /* 128K < size <= 8M */
+ BTRFS_BG_SZ_MEDIUM,
+ /* 8M < size < BG_LENGTH */
+ BTRFS_BG_SZ_LARGE,
+};
+
/*
* This describes the state of the block_group for async discard. This is due
* to the two pass nature of it where extent discarding is prioritized over
@@ -35,11 +61,44 @@ enum btrfs_discard_state {
* the FS with empty chunks
*
* CHUNK_ALLOC_FORCE means it must try to allocate one
+ *
+ * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
+ * find_free_extent() that also activates the zone
*/
enum btrfs_chunk_alloc_enum {
CHUNK_ALLOC_NO_FORCE,
CHUNK_ALLOC_LIMITED,
CHUNK_ALLOC_FORCE,
+ CHUNK_ALLOC_FORCE_FOR_EXTENT,
+};
+
+/* Block group flags set at runtime */
+enum btrfs_block_group_flags {
+ BLOCK_GROUP_FLAG_IREF,
+ BLOCK_GROUP_FLAG_REMOVED,
+ BLOCK_GROUP_FLAG_TO_COPY,
+ BLOCK_GROUP_FLAG_RELOCATING_REPAIR,
+ BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
+ BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+ BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
+ /* Does the block group need to be added to the free space tree? */
+ BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
+ /* Set after we add a new block group to the free space tree. */
+ BLOCK_GROUP_FLAG_FREE_SPACE_ADDED,
+ /* Indicate that the block group is placed on a sequential zone */
+ BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE,
+ /*
+ * Indicate that block group is in the list of new block groups of a
+ * transaction.
+ */
+ BLOCK_GROUP_FLAG_NEW,
+};
+
+enum btrfs_caching_type {
+ BTRFS_CACHE_NO,
+ BTRFS_CACHE_STARTED,
+ BTRFS_CACHE_FINISHED,
+ BTRFS_CACHE_ERROR,
};
struct btrfs_caching_control {
@@ -48,7 +107,8 @@ struct btrfs_caching_control {
wait_queue_head_t wait;
struct btrfs_work work;
struct btrfs_block_group *block_group;
- u64 progress;
+ /* Track progress of caching during allocation. */
+ atomic_t progress;
refcount_t count;
};
@@ -57,7 +117,7 @@ struct btrfs_caching_control {
struct btrfs_block_group {
struct btrfs_fs_info *fs_info;
- struct inode *inode;
+ struct btrfs_inode *inode;
spinlock_t lock;
u64 start;
u64 length;
@@ -68,8 +128,15 @@ struct btrfs_block_group {
u64 bytes_super;
u64 flags;
u64 cache_generation;
+ u64 global_root_id;
/*
+ * The last committed used bytes of this block group, if the above @used
+ * is still the same as @commit_used, we don't need to update block
+ * group item of this block group.
+ */
+ u64 commit_used;
+ /*
* If the free space extent count exceeds this number, convert the block
* group to bitmaps.
*/
@@ -90,22 +157,15 @@ struct btrfs_block_group {
/* For raid56, this is a full stripe, without parity */
unsigned long full_stripe_len;
+ unsigned long runtime_flags;
unsigned int ro;
- unsigned int iref:1;
- unsigned int has_caching_ctl:1;
- unsigned int removed:1;
- unsigned int to_copy:1;
- unsigned int relocating_repair:1;
- unsigned int chunk_item_inserted:1;
- unsigned int zone_is_active:1;
int disk_cache_state;
/* Cache tracking stuff */
int cached;
struct btrfs_caching_control *caching_ctl;
- u64 last_byte_to_unpin;
struct btrfs_space_info *space_info;
@@ -126,7 +186,14 @@ struct btrfs_block_group {
*/
struct list_head cluster_list;
- /* For delayed block group creation or deletion of empty block groups */
+ /*
+ * Used for several lists:
+ *
+ * 1) struct btrfs_fs_info::unused_bgs
+ * 2) struct btrfs_fs_info::reclaim_bgs
+ * 3) struct btrfs_transaction::deleted_bgs
+ * 4) struct btrfs_trans_handle::new_bgs
+ */
struct list_head bg_list;
/* For read-only block groups */
@@ -179,14 +246,10 @@ struct btrfs_block_group {
/* Lock for free space tree operations. */
struct mutex free_space_lock;
- /*
- * Does the block group need to be added to the free space tree?
- * Protected by free_space_lock.
- */
- int needs_free_space;
-
- /* Flag indicating this block group is placed on a sequential zone */
- bool seq_zone;
+ /* Protected by @free_space_lock. */
+ bool using_free_space_bitmaps;
+ /* Protected by @free_space_lock. */
+ bool using_free_space_bitmaps_cached;
/*
* Number of extents in this block group used for swap files.
@@ -194,9 +257,6 @@ struct btrfs_block_group {
*/
int swap_extents;
- /* Record locked full stripes for RAID5/6 block group */
- struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
-
/*
* Allocation offset for the block group to implement sequential
* allocation. This is used only on a zoned filesystem.
@@ -205,17 +265,27 @@ struct btrfs_block_group {
u64 zone_unusable;
u64 zone_capacity;
u64 meta_write_pointer;
- struct map_lookup *physical_map;
+ struct btrfs_chunk_map *physical_map;
struct list_head active_bg_list;
+ struct work_struct zone_finish_work;
+ struct extent_buffer *last_eb;
+ enum btrfs_block_group_size_class size_class;
+ u64 reclaim_mark;
};
-static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
+static inline u64 btrfs_block_group_end(const struct btrfs_block_group *block_group)
{
return (block_group->start + block_group->length);
}
-static inline bool btrfs_is_block_group_data_only(
- struct btrfs_block_group *block_group)
+static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg)
+{
+ lockdep_assert_held(&bg->lock);
+
+ return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0);
+}
+
+static inline bool btrfs_is_block_group_data_only(const struct btrfs_block_group *block_group)
{
/*
* In mixed mode the fragmentation is expected to be high, lowering the
@@ -226,16 +296,7 @@ static inline bool btrfs_is_block_group_data_only(
}
#ifdef CONFIG_BTRFS_DEBUG
-static inline int btrfs_should_fragment_free_space(
- struct btrfs_block_group *block_group)
-{
- struct btrfs_fs_info *fs_info = block_group->fs_info;
-
- return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
- block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
- (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
- block_group->flags & BTRFS_BLOCK_GROUP_DATA);
-}
+int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group);
#endif
struct btrfs_block_group *btrfs_lookup_first_block_group(
@@ -249,24 +310,22 @@ void btrfs_put_block_group(struct btrfs_block_group *cache);
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start);
void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
-bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
-void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
+struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
+ u64 bytenr);
+void btrfs_dec_nocow_writers(struct btrfs_block_group *bg);
void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
u64 num_bytes);
-int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
-int btrfs_cache_block_group(struct btrfs_block_group *cache,
- int load_cache_only);
-void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
+int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
struct btrfs_caching_control *btrfs_get_caching_control(
struct btrfs_block_group *cache);
-u64 add_new_free_space(struct btrfs_block_group *block_group,
- u64 start, u64 end);
+int btrfs_add_new_free_space(struct btrfs_block_group *block_group,
+ u64 start, u64 end, u64 *total_added_ret);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
- u64 group_start, struct extent_map *em);
+ struct btrfs_chunk_map *map);
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
void btrfs_reclaim_bgs_work(struct work_struct *work);
@@ -274,8 +333,8 @@ void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
int btrfs_read_block_groups(struct btrfs_fs_info *info);
struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
- u64 bytes_used, u64 type,
- u64 chunk_offset, u64 size);
+ struct btrfs_space_info *space_info,
+ u64 type, u64 chunk_offset, u64 size);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
bool do_chunk_alloc);
@@ -286,10 +345,12 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
int btrfs_update_block_group(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, bool alloc);
int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
- u64 ram_bytes, u64 num_bytes, int delalloc);
-void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
- u64 num_bytes, int delalloc);
-int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+ u64 ram_bytes, u64 num_bytes, bool delalloc,
+ bool force_wrong_size_class);
+void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes,
+ bool is_delalloc);
+int btrfs_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_space_info *space_info, u64 flags,
enum btrfs_chunk_alloc_enum force);
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
@@ -298,11 +359,8 @@ void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
int btrfs_free_block_groups(struct btrfs_fs_info *info);
-void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
- struct btrfs_caching_control *caching_ctl);
int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
- struct block_device *bdev, u64 physical, u64 **logical,
- int *naddrs, int *stripe_len);
+ u64 physical, u64 **logical, int *naddrs, int *stripe_len);
static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
{
@@ -319,7 +377,7 @@ static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
}
-static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
+static inline int btrfs_block_group_done(const struct btrfs_block_group *cache)
{
smp_mb();
return cache->cached == BTRFS_CACHE_FINISHED ||
@@ -332,4 +390,10 @@ void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
+enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size);
+int btrfs_use_block_group_size_class(struct btrfs_block_group *bg,
+ enum btrfs_block_group_size_class size_class,
+ bool force_wrong_size_class);
+bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg);
+
#endif /* BTRFS_BLOCK_GROUP_H */
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index 04a6226e0388..96cf7a162987 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -6,6 +6,8 @@
#include "space-info.h"
#include "transaction.h"
#include "block-group.h"
+#include "fs.h"
+#include "accessors.h"
/*
* HOW DO BLOCK RESERVES WORK
@@ -117,11 +119,12 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
if (block_rsv->reserved >= block_rsv->size) {
num_bytes = block_rsv->reserved - block_rsv->size;
block_rsv->reserved = block_rsv->size;
- block_rsv->full = 1;
+ block_rsv->full = true;
} else {
num_bytes = 0;
}
- if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
+ if (qgroup_to_release_ret &&
+ block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
qgroup_to_release = block_rsv->qgroup_rsv_reserved -
block_rsv->qgroup_rsv_size;
block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
@@ -141,15 +144,13 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
bytes_to_add = min(num_bytes, bytes_to_add);
dest->reserved += bytes_to_add;
if (dest->reserved >= dest->size)
- dest->full = 1;
+ dest->full = true;
num_bytes -= bytes_to_add;
}
spin_unlock(&dest->lock);
}
if (num_bytes)
- btrfs_space_info_free_bytes_may_use(fs_info,
- space_info,
- num_bytes);
+ btrfs_space_info_free_bytes_may_use(space_info, num_bytes);
}
if (qgroup_to_release_ret)
*qgroup_to_release_ret = qgroup_to_release;
@@ -170,7 +171,7 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
return 0;
}
-void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
+void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type)
{
memset(rsv, 0, sizeof(*rsv));
spin_lock_init(&rsv->lock);
@@ -179,7 +180,7 @@ void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv,
- unsigned short type)
+ enum btrfs_rsv_type type)
{
btrfs_init_block_rsv(rsv, type);
rsv->space_info = btrfs_find_space_info(fs_info,
@@ -187,7 +188,7 @@ void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
}
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
- unsigned short type)
+ enum btrfs_rsv_type type)
{
struct btrfs_block_rsv *block_rsv;
@@ -208,7 +209,7 @@ void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
kfree(rsv);
}
-int btrfs_block_rsv_add(struct btrfs_root *root,
+int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush)
{
@@ -217,23 +218,20 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
if (num_bytes == 0)
return 0;
- ret = btrfs_reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(block_rsv->space_info, num_bytes, flush);
if (!ret)
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
return ret;
}
-int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
+int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent)
{
u64 num_bytes = 0;
int ret = -ENOSPC;
- if (!block_rsv)
- return 0;
-
spin_lock(&block_rsv->lock);
- num_bytes = div_factor(block_rsv->size, min_factor);
+ num_bytes = mult_perc(block_rsv->size, min_percent);
if (block_rsv->reserved >= num_bytes)
ret = 0;
spin_unlock(&block_rsv->lock);
@@ -241,18 +239,16 @@ int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
return ret;
}
-int btrfs_block_rsv_refill(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv, u64 min_reserved,
+int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush)
{
- u64 num_bytes = 0;
int ret = -ENOSPC;
if (!block_rsv)
return 0;
spin_lock(&block_rsv->lock);
- num_bytes = min_reserved;
if (block_rsv->reserved >= num_bytes)
ret = 0;
else
@@ -262,7 +258,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
if (!ret)
return 0;
- ret = btrfs_reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(block_rsv->space_info, num_bytes, flush);
if (!ret) {
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
return 0;
@@ -280,12 +276,12 @@ u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *target = NULL;
/*
- * If we are the delayed_rsv then push to the global rsv, otherwise dump
- * into the delayed rsv if it is not full.
+ * If we are a delayed block reserve then push to the global rsv,
+ * otherwise dump into the global delayed reserve if it is not full.
*/
- if (block_rsv == delayed_rsv)
+ if (block_rsv->type == BTRFS_BLOCK_RSV_DELOPS)
target = global_rsv;
- else if (block_rsv != global_rsv && !delayed_rsv->full)
+ else if (block_rsv != global_rsv && !btrfs_block_rsv_full(delayed_rsv))
target = delayed_rsv;
if (target && block_rsv->space_info != target->space_info)
@@ -303,7 +299,7 @@ int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes)
if (block_rsv->reserved >= num_bytes) {
block_rsv->reserved -= num_bytes;
if (block_rsv->reserved < block_rsv->size)
- block_rsv->full = 0;
+ block_rsv->full = false;
ret = 0;
}
spin_unlock(&block_rsv->lock);
@@ -318,70 +314,63 @@ void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
if (update_size)
block_rsv->size += num_bytes;
else if (block_rsv->reserved >= block_rsv->size)
- block_rsv->full = 1;
+ block_rsv->full = true;
spin_unlock(&block_rsv->lock);
}
-int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *dest, u64 num_bytes,
- int min_factor)
-{
- struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
- u64 min_bytes;
-
- if (global_rsv->space_info != dest->space_info)
- return -ENOSPC;
-
- spin_lock(&global_rsv->lock);
- min_bytes = div_factor(global_rsv->size, min_factor);
- if (global_rsv->reserved < min_bytes + num_bytes) {
- spin_unlock(&global_rsv->lock);
- return -ENOSPC;
- }
- global_rsv->reserved -= num_bytes;
- if (global_rsv->reserved < global_rsv->size)
- global_rsv->full = 0;
- spin_unlock(&global_rsv->lock);
-
- btrfs_block_rsv_add_bytes(dest, num_bytes, true);
- return 0;
-}
-
void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
struct btrfs_space_info *sinfo = block_rsv->space_info;
- u64 num_bytes;
- unsigned min_items;
+ struct btrfs_root *root, *tmp;
+ u64 num_bytes = btrfs_root_used(&fs_info->tree_root->root_item);
+ unsigned int min_items = 1;
/*
* The global block rsv is based on the size of the extent tree, the
* checksum tree and the root tree. If the fs is empty we want to set
* it to a minimal amount for safety.
+ *
+ * We also are going to need to modify the minimum of the tree root and
+ * any global roots we could touch.
*/
- num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
- btrfs_root_used(&fs_info->csum_root->root_item) +
- btrfs_root_used(&fs_info->tree_root->root_item);
+ read_lock(&fs_info->global_root_lock);
+ rbtree_postorder_for_each_entry_safe(root, tmp, &fs_info->global_root_tree,
+ rb_node) {
+ if (btrfs_root_id(root) == BTRFS_EXTENT_TREE_OBJECTID ||
+ btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID ||
+ btrfs_root_id(root) == BTRFS_FREE_SPACE_TREE_OBJECTID) {
+ num_bytes += btrfs_root_used(&root->root_item);
+ min_items++;
+ }
+ }
+ read_unlock(&fs_info->global_root_lock);
- /*
- * We at a minimum are going to modify the csum root, the tree root, and
- * the extent root.
- */
- min_items = 3;
+ if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
+ num_bytes += btrfs_root_used(&fs_info->block_group_root->root_item);
+ min_items++;
+ }
+
+ if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
+ num_bytes += btrfs_root_used(&fs_info->stripe_root->root_item);
+ min_items++;
+ }
/*
* But we also want to reserve enough space so we can do the fallback
- * global reserve for an unlink, which is an additional 5 items (see the
- * comment in __unlink_start_trans for what we're modifying.)
+ * global reserve for an unlink, which is an additional
+ * BTRFS_UNLINK_METADATA_UNITS items.
*
* But we also need space for the delayed ref updates from the unlink,
- * so its 10, 5 for the actual operation, and 5 for the delayed ref
- * updates.
+ * so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for
+ * each unlink metadata item.
*/
- min_items += 10;
+ min_items += BTRFS_UNLINK_METADATA_UNITS;
num_bytes = max_t(u64, num_bytes,
- btrfs_calc_insert_metadata_size(fs_info, min_items));
+ btrfs_calc_insert_metadata_size(fs_info, min_items) +
+ btrfs_calc_delayed_ref_bytes(fs_info,
+ BTRFS_UNLINK_METADATA_UNITS));
spin_lock(&sinfo->lock);
spin_lock(&block_rsv->lock);
@@ -390,21 +379,16 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
if (block_rsv->reserved < block_rsv->size) {
num_bytes = block_rsv->size - block_rsv->reserved;
- btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
- num_bytes);
+ btrfs_space_info_update_bytes_may_use(sinfo, num_bytes);
block_rsv->reserved = block_rsv->size;
} else if (block_rsv->reserved > block_rsv->size) {
num_bytes = block_rsv->reserved - block_rsv->size;
- btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
- -num_bytes);
+ btrfs_space_info_update_bytes_may_use(sinfo, -num_bytes);
block_rsv->reserved = block_rsv->size;
- btrfs_try_granting_tickets(fs_info, sinfo);
+ btrfs_try_granting_tickets(sinfo);
}
- if (block_rsv->reserved == block_rsv->size)
- block_rsv->full = 1;
- else
- block_rsv->full = 0;
+ block_rsv->full = (block_rsv->reserved == block_rsv->size);
if (block_rsv->size >= sinfo->total_bytes)
sinfo->force_alloc = CHUNK_ALLOC_FORCE;
@@ -412,6 +396,35 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
spin_unlock(&sinfo->lock);
}
+void btrfs_init_root_block_rsv(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ switch (btrfs_root_id(root)) {
+ case BTRFS_CSUM_TREE_OBJECTID:
+ case BTRFS_EXTENT_TREE_OBJECTID:
+ case BTRFS_FREE_SPACE_TREE_OBJECTID:
+ case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
+ case BTRFS_RAID_STRIPE_TREE_OBJECTID:
+ root->block_rsv = &fs_info->delayed_refs_rsv;
+ break;
+ case BTRFS_ROOT_TREE_OBJECTID:
+ case BTRFS_DEV_TREE_OBJECTID:
+ case BTRFS_QUOTA_TREE_OBJECTID:
+ root->block_rsv = &fs_info->global_block_rsv;
+ break;
+ case BTRFS_CHUNK_TREE_OBJECTID:
+ root->block_rsv = &fs_info->chunk_block_rsv;
+ break;
+ case BTRFS_TREE_LOG_OBJECTID:
+ root->block_rsv = &fs_info->treelog_rsv;
+ break;
+ default:
+ root->block_rsv = NULL;
+ break;
+ }
+}
+
void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info)
{
struct btrfs_space_info *space_info;
@@ -426,21 +439,13 @@ void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info)
fs_info->delayed_block_rsv.space_info = space_info;
fs_info->delayed_refs_rsv.space_info = space_info;
- /*
- * Our various recovery options can leave us with NULL roots, so check
- * here and just bail before we go dereferencing NULLs everywhere.
- */
- if (!fs_info->extent_root || !fs_info->csum_root ||
- !fs_info->dev_root || !fs_info->chunk_root || !fs_info->tree_root)
- return;
-
- fs_info->extent_root->block_rsv = &fs_info->delayed_refs_rsv;
- fs_info->csum_root->block_rsv = &fs_info->delayed_refs_rsv;
- fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
- fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
- if (fs_info->quota_root)
- fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
- fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
+ /* The treelog_rsv uses a dedicated space_info on the zoned mode. */
+ if (!btrfs_is_zoned(fs_info)) {
+ fs_info->treelog_rsv.space_info = space_info;
+ } else {
+ ASSERT(space_info->sub_group[0]->subgroup_id == BTRFS_SUB_GROUP_TREELOG);
+ fs_info->treelog_rsv.space_info = space_info->sub_group[0];
+ }
btrfs_update_global_block_rsv(fs_info);
}
@@ -467,8 +472,8 @@ static struct btrfs_block_rsv *get_block_rsv(
struct btrfs_block_rsv *block_rsv = NULL;
if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
- (root == fs_info->csum_root && trans->adding_csums) ||
- (root == fs_info->uuid_root))
+ (root == fs_info->uuid_root) ||
+ (trans->adding_csums && btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID))
block_rsv = trans->block_rsv;
if (!block_rsv)
@@ -492,7 +497,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
block_rsv = get_block_rsv(trans, root);
- if (unlikely(block_rsv->size == 0))
+ if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
goto try_reserve;
again:
ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
@@ -523,7 +528,7 @@ again:
block_rsv->type, ret);
}
try_reserve:
- ret = btrfs_reserve_metadata_bytes(root, block_rsv, blocksize,
+ ret = btrfs_reserve_metadata_bytes(block_rsv->space_info, blocksize,
BTRFS_RESERVE_NO_FLUSH);
if (!ret)
return block_rsv;
@@ -538,5 +543,36 @@ try_reserve:
if (!ret)
return global_rsv;
}
+
+ /*
+ * All hope is lost, but of course our reservations are overly
+ * pessimistic, so instead of possibly having an ENOSPC abort here, try
+ * one last time to force a reservation if there's enough actual space
+ * on disk to make the reservation.
+ */
+ ret = btrfs_reserve_metadata_bytes(block_rsv->space_info, blocksize,
+ BTRFS_RESERVE_FLUSH_EMERGENCY);
+ if (!ret)
+ return block_rsv;
+
return ERR_PTR(ret);
}
+
+int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *rsv)
+{
+ u64 needed_bytes;
+ int ret;
+
+ /* 1 for slack space, 1 for updating the inode */
+ needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
+ btrfs_calc_metadata_size(fs_info, 1);
+
+ spin_lock(&rsv->lock);
+ if (rsv->reserved < needed_bytes)
+ ret = -ENOSPC;
+ else
+ ret = 0;
+ spin_unlock(&rsv->lock);
+ return ret;
+}
diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h
index 0b6ae5302837..79ae9d05cd91 100644
--- a/fs/btrfs/block-rsv.h
+++ b/fs/btrfs/block-rsv.h
@@ -3,19 +3,28 @@
#ifndef BTRFS_BLOCK_RSV_H
#define BTRFS_BLOCK_RSV_H
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+
struct btrfs_trans_handle;
+struct btrfs_root;
+struct btrfs_space_info;
+struct btrfs_block_rsv;
+struct btrfs_fs_info;
enum btrfs_reserve_flush_enum;
/*
* Types of block reserves
*/
-enum {
+enum btrfs_rsv_type {
BTRFS_BLOCK_RSV_GLOBAL,
BTRFS_BLOCK_RSV_DELALLOC,
BTRFS_BLOCK_RSV_TRANS,
BTRFS_BLOCK_RSV_CHUNK,
BTRFS_BLOCK_RSV_DELOPS,
BTRFS_BLOCK_RSV_DELREFS,
+ BTRFS_BLOCK_RSV_TREELOG,
BTRFS_BLOCK_RSV_EMPTY,
BTRFS_BLOCK_RSV_TEMP,
};
@@ -25,9 +34,10 @@ struct btrfs_block_rsv {
u64 reserved;
struct btrfs_space_info *space_info;
spinlock_t lock;
- unsigned short full;
- unsigned short type;
- unsigned short failfast;
+ bool full;
+ bool failfast;
+ /* Block reserve type, one of BTRFS_BLOCK_RSV_* */
+ enum btrfs_rsv_type type:8;
/*
* Qgroup equivalent for @size @reserved
@@ -49,28 +59,26 @@ struct btrfs_block_rsv {
u64 qgroup_rsv_reserved;
};
-void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
+void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type);
+void btrfs_init_root_block_rsv(struct btrfs_root *root);
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
- unsigned short type);
+ enum btrfs_rsv_type type);
void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv,
- unsigned short type);
+ enum btrfs_rsv_type type);
void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
-int btrfs_block_rsv_add(struct btrfs_root *root,
+int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush);
-int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor);
-int btrfs_block_rsv_refill(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv, u64 min_reserved,
+int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent);
+int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv, u64 num_bytes,
bool update_size);
int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes);
-int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *dest, u64 num_bytes,
- int min_factor);
void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes, bool update_size);
u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
@@ -82,6 +90,8 @@ void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info);
struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u32 blocksize);
+int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *rsv);
static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u32 blocksize)
@@ -90,4 +100,45 @@ static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info,
btrfs_block_rsv_release(fs_info, block_rsv, 0, NULL);
}
+/*
+ * Fast path to check if the reserve is full, may be carefully used outside of
+ * locks.
+ */
+static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
+{
+ return data_race(rsv->full);
+}
+
+/*
+ * Get the reserved mount of a block reserve in a context where getting a stale
+ * value is acceptable, instead of accessing it directly and trigger data race
+ * warning from KCSAN.
+ */
+static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
+{
+ u64 ret;
+
+ spin_lock(&rsv->lock);
+ ret = rsv->reserved;
+ spin_unlock(&rsv->lock);
+
+ return ret;
+}
+
+/*
+ * Get the size of a block reserve in a context where getting a stale value is
+ * acceptable, instead of accessing it directly and trigger data race warning
+ * from KCSAN.
+ */
+static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
+{
+ u64 ret;
+
+ spin_lock(&rsv->lock);
+ ret = rsv->size;
+ spin_unlock(&rsv->lock);
+
+ return ret;
+}
+
#endif /* BTRFS_BLOCK_RSV_H */
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index ab2a4a52e0bb..73602ee8de3f 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -8,10 +8,37 @@
#include <linux/hash.h>
#include <linux/refcount.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/compiler.h>
+#include <linux/fscrypt.h>
+#include <linux/lockdep.h>
+#include <uapi/linux/btrfs_tree.h>
+#include <trace/events/btrfs.h>
+#include "ctree.h"
+#include "block-rsv.h"
#include "extent_map.h"
-#include "extent_io.h"
-#include "ordered-data.h"
-#include "delayed-inode.h"
+#include "extent-io-tree.h"
+
+struct posix_acl;
+struct iov_iter;
+struct writeback_control;
+struct btrfs_root;
+struct btrfs_fs_info;
+struct btrfs_trans_handle;
+struct btrfs_bio;
+struct btrfs_file_extent;
+struct btrfs_delayed_node;
+
+/*
+ * Since we search a directory based on f_pos (struct dir_context::pos) we have
+ * to start at 2 since '.' and '..' have f_pos of 0 and 1 respectively, so
+ * everybody else has to start at 2 (see btrfs_real_readdir() and dir_emit_dots()).
+ */
+#define BTRFS_DIR_START_INDEX 2
/*
* ordered_data_close is set by truncate when a file that used
@@ -32,7 +59,6 @@ enum {
*/
BTRFS_INODE_NEEDS_FULL_SYNC,
BTRFS_INODE_COPY_EVERYTHING,
- BTRFS_INODE_IN_DELALLOC_LIST,
BTRFS_INODE_HAS_PROPS,
BTRFS_INODE_SNAPSHOT_FLUSH,
/*
@@ -58,6 +84,43 @@ enum {
* on the same file.
*/
BTRFS_INODE_VERITY_IN_PROGRESS,
+ /* Set when this inode is a free space inode. */
+ BTRFS_INODE_FREE_SPACE_INODE,
+ /* Set when there are no capabilities in XATTs for the inode. */
+ BTRFS_INODE_NO_CAP_XATTR,
+ /*
+ * Set if an error happened when doing a COW write before submitting a
+ * bio or during writeback. Used for both buffered writes and direct IO
+ * writes. This is to signal a fast fsync that it has to wait for
+ * ordered extents to complete and therefore not log extent maps that
+ * point to unwritten extents (when an ordered extent completes and it
+ * has the BTRFS_ORDERED_IOERR flag set, it drops extent maps in its
+ * range).
+ */
+ BTRFS_INODE_COW_WRITE_ERROR,
+ /*
+ * Indicate this is a directory that points to a subvolume for which
+ * there is no root reference item. That's a case like the following:
+ *
+ * $ btrfs subvolume create /mnt/parent
+ * $ btrfs subvolume create /mnt/parent/child
+ * $ btrfs subvolume snapshot /mnt/parent /mnt/snap
+ *
+ * If subvolume "parent" is root 256, subvolume "child" is root 257 and
+ * snapshot "snap" is root 258, then there's no root reference item (key
+ * BTRFS_ROOT_REF_KEY in the root tree) for the subvolume "child"
+ * associated to root 258 (the snapshot) - there's only for the root
+ * of the "parent" subvolume (root 256). In the chunk root we have a
+ * (256 BTRFS_ROOT_REF_KEY 257) key but we don't have a
+ * (258 BTRFS_ROOT_REF_KEY 257) key - the sames goes for backrefs, we
+ * have a (257 BTRFS_ROOT_BACKREF_KEY 256) but we don't have a
+ * (257 BTRFS_ROOT_BACKREF_KEY 258) key.
+ *
+ * So when opening the "child" dentry from the snapshot's directory,
+ * we don't find a root ref item and we create a stub inode. This is
+ * done at new_simple_dir(), called from btrfs_lookup_dentry().
+ */
+ BTRFS_INODE_ROOT_STUB,
};
/* in memory btrfs inode */
@@ -65,16 +128,32 @@ struct btrfs_inode {
/* which subvolume this inode belongs to */
struct btrfs_root *root;
- /* key used to find this inode on disk. This is used by the code
- * to read in roots of subvolumes
+#if BITS_PER_LONG == 32
+ /*
+ * The objectid of the corresponding BTRFS_INODE_ITEM_KEY.
+ * On 64 bits platforms we can get it from vfs_inode.i_ino, which is an
+ * unsigned long and therefore 64 bits on such platforms.
+ */
+ u64 objectid;
+#endif
+
+ /* Cached value of inode property 'compression'. */
+ u8 prop_compress;
+
+ /*
+ * Force compression on the file using the defrag ioctl, could be
+ * different from prop_compress and takes precedence if set.
*/
- struct btrfs_key location;
+ u8 defrag_compress;
+ s8 defrag_compress_level;
/*
* Lock for counters and all fields used to determine if the inode is in
* the log or not (last_trans, last_sub_trans, last_log_commit,
- * logged_trans), to access/update new_delalloc_bytes and to update the
- * VFS' inode number of bytes used.
+ * logged_trans), to access/update delalloc_bytes, new_delalloc_bytes,
+ * defrag_bytes, disk_i_size, outstanding_extents, csum_bytes and to
+ * update the VFS' inode number of bytes used.
+ * Also protects setting struct file::private_data.
*/
spinlock_t lock;
@@ -84,22 +163,29 @@ struct btrfs_inode {
/* the io_tree does range state (DIRTY, LOCKED etc) */
struct extent_io_tree io_tree;
- /* special utility tree used to record which mirrors have already been
- * tried when checksums fail for a given block
- */
- struct extent_io_tree io_failure_tree;
-
/*
* Keep track of where the inode has extent items mapped in order to
- * make sure the i_size adjustments are accurate
+ * make sure the i_size adjustments are accurate. Not required when the
+ * filesystem is NO_HOLES, the status can't be set while mounted as
+ * it's a mkfs-time feature.
*/
- struct extent_io_tree file_extent_tree;
+ struct extent_io_tree *file_extent_tree;
/* held while logging the inode in tree-log.c */
struct mutex log_mutex;
+ /*
+ * Counters to keep track of the number of extent item's we may use due
+ * to delalloc and such. outstanding_extents is the number of extent
+ * items we think we'll end up using, and reserved_extents is the number
+ * of extent items we've reserved metadata for. Protected by 'lock'.
+ */
+ unsigned outstanding_extents;
+
/* used to order data wrt metadata */
- struct btrfs_ordered_inode_tree ordered_tree;
+ spinlock_t ordered_tree_lock;
+ struct rb_root ordered_tree;
+ struct rb_node *ordered_tree_last;
/* list of all the delalloc inodes in the FS. There are times we need
* to write all the delalloc pages to disk, and this list is used
@@ -107,49 +193,49 @@ struct btrfs_inode {
*/
struct list_head delalloc_inodes;
- /* node for the red-black tree that links inodes in subvolume root */
- struct rb_node rb_node;
-
unsigned long runtime_flags;
- /* Keep track of who's O_SYNC/fsyncing currently */
- atomic_t sync_writers;
-
/* full 64 bit generation number, struct vfs_inode doesn't have a big
* enough field for this.
*/
u64 generation;
/*
- * transid of the trans_handle that last modified this inode
+ * ID of the transaction handle that last modified this inode.
+ * Protected by 'lock'.
*/
u64 last_trans;
/*
- * transid that last logged this inode
+ * ID of the transaction that last logged this inode.
+ * Protected by 'lock'.
*/
u64 logged_trans;
/*
- * log transid when this inode was last modified
+ * Log transaction ID when this inode was last modified.
+ * Protected by 'lock'.
*/
int last_sub_trans;
- /* a local copy of root's last_log_commit */
+ /* A local copy of root's last_log_commit. Protected by 'lock'. */
int last_log_commit;
union {
/*
* Total number of bytes pending delalloc, used by stat to
* calculate the real block usage of the file. This is used
- * only for files.
+ * only for files. Protected by 'lock'.
*/
u64 delalloc_bytes;
/*
- * The offset of the last dir item key that was logged.
+ * The lowest possible index of the next dir index key which
+ * points to an inode that needs to be logged.
* This is used only for directories.
+ * Use the helpers btrfs_get_first_dir_index_to_log() and
+ * btrfs_set_first_dir_index_to_log() to access this field.
*/
- u64 last_dir_item_offset;
+ u64 first_dir_index_to_log;
};
union {
@@ -157,34 +243,53 @@ struct btrfs_inode {
* Total number of bytes pending delalloc that fall within a file
* range that is either a hole or beyond EOF (and no prealloc extent
* exists in the range). This is always <= delalloc_bytes and this
- * is used only for files.
+ * is used only for files. Protected by 'lock'.
*/
u64 new_delalloc_bytes;
/*
* The offset of the last dir index key that was logged.
- * This is used only for directories.
+ * This is used only for directories. Protected by 'log_mutex'.
*/
u64 last_dir_index_offset;
};
- /*
- * total number of bytes pending defrag, used by stat to check whether
- * it needs COW.
- */
- u64 defrag_bytes;
+ union {
+ /*
+ * Total number of bytes pending defrag, used by stat to check whether
+ * it needs COW. Protected by 'lock'.
+ * Used by inodes other than the data relocation inode.
+ */
+ u64 defrag_bytes;
+
+ /*
+ * Logical address of the block group being relocated.
+ * Used only by the data relocation inode.
+ */
+ u64 reloc_block_group_start;
+ };
/*
- * the size of the file stored in the metadata on disk. data=ordered
+ * The size of the file stored in the metadata on disk. data=ordered
* means the in-memory i_size might be larger than the size on disk
- * because not all the blocks are written yet.
+ * because not all the blocks are written yet. Protected by 'lock'.
*/
u64 disk_i_size;
- /*
- * if this is a directory then index_cnt is the counter for the index
- * number for new files that are created
- */
- u64 index_cnt;
+ union {
+ /*
+ * If this is a directory then index_cnt is the counter for the
+ * index number for new files that are created. For an empty
+ * directory, this must be initialized to BTRFS_DIR_START_INDEX.
+ */
+ u64 index_cnt;
+
+ /*
+ * If this is not a directory, this is the number of bytes
+ * outstanding that are going to need csums. This is used in
+ * ENOSPC accounting. Protected by 'lock'.
+ */
+ u64 csum_bytes;
+ };
/* Cache the directory index number to speed the dir/file remove */
u64 dir_index;
@@ -196,70 +301,69 @@ struct btrfs_inode {
*/
u64 last_unlink_trans;
- /*
- * The id/generation of the last transaction where this inode was
- * either the source or the destination of a clone/dedupe operation.
- * Used when logging an inode to know if there are shared extents that
- * need special care when logging checksum items, to avoid duplicate
- * checksum items in a log (which can lead to a corruption where we end
- * up with missing checksum ranges after log replay).
- * Protected by the vfs inode lock.
- */
- u64 last_reflink_trans;
+ union {
+ /*
+ * The id/generation of the last transaction where this inode
+ * was either the source or the destination of a clone/dedupe
+ * operation. Used when logging an inode to know if there are
+ * shared extents that need special care when logging checksum
+ * items, to avoid duplicate checksum items in a log (which can
+ * lead to a corruption where we end up with missing checksum
+ * ranges after log replay). Protected by the VFS inode lock.
+ * Used for regular files only.
+ */
+ u64 last_reflink_trans;
- /*
- * Number of bytes outstanding that are going to need csums. This is
- * used in ENOSPC accounting.
- */
- u64 csum_bytes;
+ /*
+ * In case this a root stub inode (BTRFS_INODE_ROOT_STUB flag set),
+ * the ID of that root.
+ */
+ u64 ref_root_id;
+ };
/* Backwards incompatible flags, lower half of inode_item::flags */
u32 flags;
/* Read-only compatibility flags, upper half of inode_item::flags */
u32 ro_flags;
- /*
- * Counters to keep track of the number of extent item's we may use due
- * to delalloc and such. outstanding_extents is the number of extent
- * items we think we'll end up using, and reserved_extents is the number
- * of extent items we've reserved metadata for.
- */
- unsigned outstanding_extents;
-
struct btrfs_block_rsv block_rsv;
- /*
- * Cached values of inode properties
- */
- unsigned prop_compress; /* per-file compression algorithm */
- /*
- * Force compression on the file using the defrag ioctl, could be
- * different from prop_compress and takes precedence if set
- */
- unsigned defrag_compress;
-
struct btrfs_delayed_node *delayed_node;
/* File creation time. */
- struct timespec64 i_otime;
+ u64 i_otime_sec;
+ u32 i_otime_nsec;
/* Hook into fs_info->delayed_iputs */
struct list_head delayed_iput;
struct rw_semaphore i_mmap_lock;
+
+#ifdef CONFIG_FS_VERITY
+ struct fsverity_info *i_verity_info;
+#endif
+
struct inode vfs_inode;
};
-static inline u32 btrfs_inode_sectorsize(const struct btrfs_inode *inode)
+static inline u64 btrfs_get_first_dir_index_to_log(const struct btrfs_inode *inode)
{
- return inode->root->fs_info->sectorsize;
+ return READ_ONCE(inode->first_dir_index_to_log);
}
-static inline struct btrfs_inode *BTRFS_I(const struct inode *inode)
+static inline void btrfs_set_first_dir_index_to_log(struct btrfs_inode *inode,
+ u64 index)
{
- return container_of(inode, struct btrfs_inode, vfs_inode);
+ WRITE_ONCE(inode->first_dir_index_to_log, index);
}
+/* Type checked and const-preserving VFS inode -> btrfs inode. */
+#define BTRFS_I(_inode) \
+ _Generic(_inode, \
+ struct inode *: container_of(_inode, struct btrfs_inode, vfs_inode), \
+ const struct inode *: (const struct btrfs_inode *)container_of( \
+ _inode, const struct btrfs_inode, vfs_inode))
+
static inline unsigned long btrfs_inode_hash(u64 objectid,
const struct btrfs_root *root)
{
@@ -272,24 +376,44 @@ static inline unsigned long btrfs_inode_hash(u64 objectid,
return (unsigned long)h;
}
-static inline void btrfs_insert_inode_hash(struct inode *inode)
+#if BITS_PER_LONG == 32
+
+/*
+ * On 32 bit systems the i_ino of struct inode is 32 bits (unsigned long), so
+ * we use the inode's location objectid which is a u64 to avoid truncation.
+ */
+static inline u64 btrfs_ino(const struct btrfs_inode *inode)
{
- unsigned long h = btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root);
+ u64 ino = inode->objectid;
- __insert_inode_hash(inode, h);
+ if (test_bit(BTRFS_INODE_ROOT_STUB, &inode->runtime_flags))
+ ino = inode->vfs_inode.i_ino;
+ return ino;
}
+#else
+
static inline u64 btrfs_ino(const struct btrfs_inode *inode)
{
- u64 ino = inode->location.objectid;
+ return inode->vfs_inode.i_ino;
+}
- /*
- * !ino: btree_inode
- * type == BTRFS_ROOT_ITEM_KEY: subvol dir
- */
- if (!ino || inode->location.type == BTRFS_ROOT_ITEM_KEY)
- ino = inode->vfs_inode.i_ino;
- return ino;
+#endif
+
+static inline void btrfs_get_inode_key(const struct btrfs_inode *inode,
+ struct btrfs_key *key)
+{
+ key->objectid = btrfs_ino(inode);
+ key->type = BTRFS_INODE_ITEM_KEY;
+ key->offset = 0;
+}
+
+static inline void btrfs_set_inode_number(struct btrfs_inode *inode, u64 ino)
+{
+#if BITS_PER_LONG == 32
+ inode->objectid = ino;
+#endif
+ inode->vfs_inode.i_ino = ino;
}
static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size)
@@ -298,21 +422,14 @@ static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size)
inode->disk_i_size = size;
}
-static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode)
+static inline bool btrfs_is_free_space_inode(const struct btrfs_inode *inode)
{
- struct btrfs_root *root = inode->root;
-
- if (root == root->fs_info->tree_root &&
- btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID)
- return true;
- if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID)
- return true;
- return false;
+ return test_bit(BTRFS_INODE_FREE_SPACE_INODE, &inode->runtime_flags);
}
-static inline bool is_data_inode(struct inode *inode)
+static inline bool is_data_inode(const struct btrfs_inode *inode)
{
- return btrfs_ino(BTRFS_I(inode)) != BTRFS_BTREE_INODE_OBJECTID;
+ return btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID;
}
static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
@@ -323,7 +440,7 @@ static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
if (btrfs_is_free_space_inode(inode))
return;
trace_btrfs_inode_mod_outstanding_extents(inode->root, btrfs_ino(inode),
- mod);
+ mod, inode->outstanding_extents);
}
/*
@@ -341,6 +458,38 @@ static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode)
spin_unlock(&inode->lock);
}
+/*
+ * Should be called while holding the inode's VFS lock in exclusive mode, or
+ * while holding the inode's mmap lock (struct btrfs_inode::i_mmap_lock) in
+ * either shared or exclusive mode, or in a context where no one else can access
+ * the inode concurrently (during inode creation or when loading an inode from
+ * disk).
+ */
+static inline void btrfs_set_inode_full_sync(struct btrfs_inode *inode)
+{
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
+ /*
+ * The inode may have been part of a reflink operation in the last
+ * transaction that modified it, and then a fsync has reset the
+ * last_reflink_trans to avoid subsequent fsyncs in the same
+ * transaction to do unnecessary work. So update last_reflink_trans
+ * to the last_trans value (we have to be pessimistic and assume a
+ * reflink happened).
+ *
+ * The ->last_trans is protected by the inode's spinlock and we can
+ * have a concurrent ordered extent completion update it. Also set
+ * last_reflink_trans to ->last_trans only if the former is less than
+ * the later, because we can be called in a context where
+ * last_reflink_trans was set to the current transaction generation
+ * while ->last_trans was not yet updated in the current transaction,
+ * and therefore has a lower value.
+ */
+ spin_lock(&inode->lock);
+ if (inode->last_reflink_trans < inode->last_trans)
+ inode->last_reflink_trans = inode->last_trans;
+ spin_unlock(&inode->lock);
+}
+
static inline bool btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
{
bool ret = false;
@@ -348,80 +497,181 @@ static inline bool btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
spin_lock(&inode->lock);
if (inode->logged_trans == generation &&
inode->last_sub_trans <= inode->last_log_commit &&
- inode->last_sub_trans <= inode->root->last_log_commit)
+ inode->last_sub_trans <= btrfs_get_root_last_log_commit(inode->root))
ret = true;
spin_unlock(&inode->lock);
return ret;
}
-struct btrfs_dio_private {
- struct inode *inode;
-
- /*
- * Since DIO can use anonymous page, we cannot use page_offset() to
- * grab the file offset, thus need a dedicated member for file offset.
- */
- u64 file_offset;
- u64 disk_bytenr;
- /* Used for bio::bi_size */
- u32 bytes;
-
- /*
- * References to this structure. There is one reference per in-flight
- * bio plus one while we're still setting up.
- */
- refcount_t refs;
-
- /* dio_bio came from fs/direct-io.c */
- struct bio *dio_bio;
-
- /* Array of checksums */
- u8 csums[];
-};
-
/*
- * btrfs_inode_item stores flags in a u64, btrfs_inode stores them in two
- * separate u32s. These two functions convert between the two representations.
+ * Check if the inode has flags compatible with compression
*/
-static inline u64 btrfs_inode_combine_flags(u32 flags, u32 ro_flags)
+static inline bool btrfs_inode_can_compress(const struct btrfs_inode *inode)
{
- return (flags | ((u64)ro_flags << 32));
+ if (inode->flags & BTRFS_INODE_NODATACOW ||
+ inode->flags & BTRFS_INODE_NODATASUM)
+ return false;
+ return true;
}
-static inline void btrfs_inode_split_flags(u64 inode_item_flags,
- u32 *flags, u32 *ro_flags)
+static inline void btrfs_assert_inode_locked(struct btrfs_inode *inode)
{
- *flags = (u32)inode_item_flags;
- *ro_flags = (u32)(inode_item_flags >> 32);
+ /* Immediately trigger a crash if the inode is not locked. */
+ ASSERT(inode_is_locked(&inode->vfs_inode));
+ /* Trigger a splat in dmesg if this task is not holding the lock. */
+ lockdep_assert_held(&inode->vfs_inode.i_rwsem);
}
-/* Array of bytes with variable length, hexadecimal format 0x1234 */
-#define CSUM_FMT "0x%*phN"
-#define CSUM_FMT_VALUE(size, bytes) size, bytes
-
-static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode,
- u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
+static inline void btrfs_update_inode_mapping_flags(struct btrfs_inode *inode)
{
- struct btrfs_root *root = inode->root;
- const u32 csum_size = root->fs_info->csum_size;
-
- /* Output minus objectid, which is more meaningful */
- if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID)
- btrfs_warn_rl(root->fs_info,
-"csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
- root->root_key.objectid, btrfs_ino(inode),
- logical_start,
- CSUM_FMT_VALUE(csum_size, csum),
- CSUM_FMT_VALUE(csum_size, csum_expected),
- mirror_num);
+ if (inode->flags & BTRFS_INODE_NODATASUM)
+ mapping_clear_stable_writes(inode->vfs_inode.i_mapping);
else
- btrfs_warn_rl(root->fs_info,
-"csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
- root->root_key.objectid, btrfs_ino(inode),
- logical_start,
- CSUM_FMT_VALUE(csum_size, csum),
- CSUM_FMT_VALUE(csum_size, csum_expected),
- mirror_num);
+ mapping_set_stable_writes(inode->vfs_inode.i_mapping);
+}
+
+static inline void btrfs_set_inode_mapping_order(struct btrfs_inode *inode)
+{
+ /* Metadata inode should not reach here. */
+ ASSERT(is_data_inode(inode));
+
+ /* We only allow BITS_PER_LONGS blocks for each bitmap. */
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ mapping_set_folio_order_range(inode->vfs_inode.i_mapping,
+ inode->root->fs_info->block_min_order,
+ inode->root->fs_info->block_max_order);
+#endif
}
+void btrfs_calculate_block_csum_folio(struct btrfs_fs_info *fs_info,
+ const phys_addr_t paddr, u8 *dest);
+void btrfs_calculate_block_csum_pages(struct btrfs_fs_info *fs_info,
+ const phys_addr_t paddrs[], u8 *dest);
+int btrfs_check_block_csum(struct btrfs_fs_info *fs_info, phys_addr_t paddr, u8 *csum,
+ const u8 * const csum_expected);
+bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
+ u32 bio_offset, const phys_addr_t paddrs[]);
+noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len,
+ struct btrfs_file_extent *file_extent,
+ bool nowait);
+
+void btrfs_del_delalloc_inode(struct btrfs_inode *inode);
+struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
+int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
+int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *dir, struct btrfs_inode *inode,
+ const struct fscrypt_str *name);
+int btrfs_add_link(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
+ const struct fscrypt_str *name, bool add_backref, u64 index);
+int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry);
+int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 end);
+
+int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
+ bool in_reclaim_context);
+int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
+ unsigned int extra_bits,
+ struct extent_state **cached_state);
+
+struct btrfs_new_inode_args {
+ /* Input */
+ struct inode *dir;
+ struct dentry *dentry;
+ struct inode *inode;
+ bool orphan;
+ bool subvol;
+
+ /* Output from btrfs_new_inode_prepare(), input to btrfs_create_new_inode(). */
+ struct posix_acl *default_acl;
+ struct posix_acl *acl;
+ struct fscrypt_name fname;
+};
+
+int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
+ unsigned int *trans_num_items);
+int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_new_inode_args *args);
+void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args);
+struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
+ struct inode *dir);
+ void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
+ u32 bits);
+void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
+ struct extent_state *state, u32 bits);
+void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
+ struct extent_state *other);
+void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
+ struct extent_state *orig, u64 split);
+void btrfs_evict_inode(struct inode *inode);
+struct inode *btrfs_alloc_inode(struct super_block *sb);
+void btrfs_destroy_inode(struct inode *inode);
+void btrfs_free_inode(struct inode *inode);
+int btrfs_drop_inode(struct inode *inode);
+int __init btrfs_init_cachep(void);
+void __cold btrfs_destroy_cachep(void);
+struct btrfs_inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
+ struct btrfs_path *path);
+struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root);
+struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
+ struct folio *folio, u64 start, u64 len);
+int btrfs_update_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode);
+int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode);
+int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct btrfs_inode *inode);
+int btrfs_orphan_cleanup(struct btrfs_root *root);
+int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size);
+void btrfs_add_delayed_iput(struct btrfs_inode *inode);
+void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
+int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info);
+int btrfs_prealloc_file_range(struct inode *inode, int mode,
+ u64 start, u64 num_bytes, u64 min_size,
+ loff_t actual_len, u64 *alloc_hint);
+int btrfs_prealloc_file_range_trans(struct inode *inode,
+ struct btrfs_trans_handle *trans, int mode,
+ u64 start, u64 num_bytes, u64 min_size,
+ loff_t actual_len, u64 *alloc_hint);
+int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
+ u64 start, u64 end, struct writeback_control *wbc);
+int btrfs_writepage_cow_fixup(struct folio *folio);
+int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
+ int compress_type);
+int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
+ u64 disk_bytenr, u64 disk_io_size,
+ struct page **pages, void *uring_ctx);
+ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
+ struct btrfs_ioctl_encoded_io_args *encoded,
+ struct extent_state **cached_state,
+ u64 *disk_bytenr, u64 *disk_io_size);
+ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
+ u64 start, u64 lockend,
+ struct extent_state **cached_state,
+ u64 disk_bytenr, u64 disk_io_size,
+ size_t count, bool compressed, bool *unlocked);
+ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
+ const struct btrfs_ioctl_encoded_io_args *encoded);
+
+struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino);
+
+extern const struct dentry_operations btrfs_dentry_operations;
+
+/* Inode locking type flags, by default the exclusive lock is taken. */
+enum btrfs_ilock_type {
+ ENUM_BIT(BTRFS_ILOCK_SHARED),
+ ENUM_BIT(BTRFS_ILOCK_TRY),
+ ENUM_BIT(BTRFS_ILOCK_MMAP),
+};
+
+int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags);
+void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags);
+void btrfs_update_inode_bytes(struct btrfs_inode *inode, const u64 add_bytes,
+ const u64 del_bytes);
+void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end);
+u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
+ u64 num_bytes);
+struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
+ const struct btrfs_file_extent *file_extent,
+ int type);
+
#endif
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
deleted file mode 100644
index 7e9f90fa0388..000000000000
--- a/fs/btrfs/check-integrity.c
+++ /dev/null
@@ -1,2875 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) STRATO AG 2011. All rights reserved.
- */
-
-/*
- * This module can be used to catch cases when the btrfs kernel
- * code executes write requests to the disk that bring the file
- * system in an inconsistent state. In such a state, a power-loss
- * or kernel panic event would cause that the data on disk is
- * lost or at least damaged.
- *
- * Code is added that examines all block write requests during
- * runtime (including writes of the super block). Three rules
- * are verified and an error is printed on violation of the
- * rules:
- * 1. It is not allowed to write a disk block which is
- * currently referenced by the super block (either directly
- * or indirectly).
- * 2. When a super block is written, it is verified that all
- * referenced (directly or indirectly) blocks fulfill the
- * following requirements:
- * 2a. All referenced blocks have either been present when
- * the file system was mounted, (i.e., they have been
- * referenced by the super block) or they have been
- * written since then and the write completion callback
- * was called and no write error was indicated and a
- * FLUSH request to the device where these blocks are
- * located was received and completed.
- * 2b. All referenced blocks need to have a generation
- * number which is equal to the parent's number.
- *
- * One issue that was found using this module was that the log
- * tree on disk became temporarily corrupted because disk blocks
- * that had been in use for the log tree had been freed and
- * reused too early, while being referenced by the written super
- * block.
- *
- * The search term in the kernel log that can be used to filter
- * on the existence of detected integrity issues is
- * "btrfs: attempt".
- *
- * The integrity check is enabled via mount options. These
- * mount options are only supported if the integrity check
- * tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY.
- *
- * Example #1, apply integrity checks to all metadata:
- * mount /dev/sdb1 /mnt -o check_int
- *
- * Example #2, apply integrity checks to all metadata and
- * to data extents:
- * mount /dev/sdb1 /mnt -o check_int_data
- *
- * Example #3, apply integrity checks to all metadata and dump
- * the tree that the super block references to kernel messages
- * each time after a super block was written:
- * mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263
- *
- * If the integrity check tool is included and activated in
- * the mount options, plenty of kernel memory is used, and
- * plenty of additional CPU cycles are spent. Enabling this
- * functionality is not intended for normal use. In most
- * cases, unless you are a btrfs developer who needs to verify
- * the integrity of (super)-block write requests, do not
- * enable the config option BTRFS_FS_CHECK_INTEGRITY to
- * include and compile the integrity check tool.
- *
- * Expect millions of lines of information in the kernel log with an
- * enabled check_int_print_mask. Therefore set LOG_BUF_SHIFT in the
- * kernel config to at least 26 (which is 64MB). Usually the value is
- * limited to 21 (which is 2MB) in init/Kconfig. The file needs to be
- * changed like this before LOG_BUF_SHIFT can be set to a high value:
- * config LOG_BUF_SHIFT
- * int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
- * range 12 30
- */
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/genhd.h>
-#include <linux/blkdev.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <crypto/hash.h>
-#include "ctree.h"
-#include "disk-io.h"
-#include "transaction.h"
-#include "extent_io.h"
-#include "volumes.h"
-#include "print-tree.h"
-#include "locking.h"
-#include "check-integrity.h"
-#include "rcu-string.h"
-#include "compression.h"
-
-#define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000
-#define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000
-#define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100
-#define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051
-#define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807
-#define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530
-#define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300
-#define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters,
- * excluding " [...]" */
-#define BTRFSIC_GENERATION_UNKNOWN ((u64)-1)
-
-/*
- * The definition of the bitmask fields for the print_mask.
- * They are specified with the mount option check_integrity_print_mask.
- */
-#define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE 0x00000001
-#define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION 0x00000002
-#define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE 0x00000004
-#define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE 0x00000008
-#define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH 0x00000010
-#define BTRFSIC_PRINT_MASK_END_IO_BIO_BH 0x00000020
-#define BTRFSIC_PRINT_MASK_VERBOSE 0x00000040
-#define BTRFSIC_PRINT_MASK_VERY_VERBOSE 0x00000080
-#define BTRFSIC_PRINT_MASK_INITIAL_TREE 0x00000100
-#define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES 0x00000200
-#define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400
-#define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800
-#define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000
-#define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE 0x00002000
-
-struct btrfsic_dev_state;
-struct btrfsic_state;
-
-struct btrfsic_block {
- u32 magic_num; /* only used for debug purposes */
- unsigned int is_metadata:1; /* if it is meta-data, not data-data */
- unsigned int is_superblock:1; /* if it is one of the superblocks */
- unsigned int is_iodone:1; /* if is done by lower subsystem */
- unsigned int iodone_w_error:1; /* error was indicated to endio */
- unsigned int never_written:1; /* block was added because it was
- * referenced, not because it was
- * written */
- unsigned int mirror_num; /* large enough to hold
- * BTRFS_SUPER_MIRROR_MAX */
- struct btrfsic_dev_state *dev_state;
- u64 dev_bytenr; /* key, physical byte num on disk */
- u64 logical_bytenr; /* logical byte num on disk */
- u64 generation;
- struct btrfs_disk_key disk_key; /* extra info to print in case of
- * issues, will not always be correct */
- struct list_head collision_resolving_node; /* list node */
- struct list_head all_blocks_node; /* list node */
-
- /* the following two lists contain block_link items */
- struct list_head ref_to_list; /* list */
- struct list_head ref_from_list; /* list */
- struct btrfsic_block *next_in_same_bio;
- void *orig_bio_private;
- bio_end_io_t *orig_bio_end_io;
- int submit_bio_bh_rw;
- u64 flush_gen; /* only valid if !never_written */
-};
-
-/*
- * Elements of this type are allocated dynamically and required because
- * each block object can refer to and can be ref from multiple blocks.
- * The key to lookup them in the hashtable is the dev_bytenr of
- * the block ref to plus the one from the block referred from.
- * The fact that they are searchable via a hashtable and that a
- * ref_cnt is maintained is not required for the btrfs integrity
- * check algorithm itself, it is only used to make the output more
- * beautiful in case that an error is detected (an error is defined
- * as a write operation to a block while that block is still referenced).
- */
-struct btrfsic_block_link {
- u32 magic_num; /* only used for debug purposes */
- u32 ref_cnt;
- struct list_head node_ref_to; /* list node */
- struct list_head node_ref_from; /* list node */
- struct list_head collision_resolving_node; /* list node */
- struct btrfsic_block *block_ref_to;
- struct btrfsic_block *block_ref_from;
- u64 parent_generation;
-};
-
-struct btrfsic_dev_state {
- u32 magic_num; /* only used for debug purposes */
- struct block_device *bdev;
- struct btrfsic_state *state;
- struct list_head collision_resolving_node; /* list node */
- struct btrfsic_block dummy_block_for_bio_bh_flush;
- u64 last_flush_gen;
-};
-
-struct btrfsic_block_hashtable {
- struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE];
-};
-
-struct btrfsic_block_link_hashtable {
- struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE];
-};
-
-struct btrfsic_dev_state_hashtable {
- struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE];
-};
-
-struct btrfsic_block_data_ctx {
- u64 start; /* virtual bytenr */
- u64 dev_bytenr; /* physical bytenr on device */
- u32 len;
- struct btrfsic_dev_state *dev;
- char **datav;
- struct page **pagev;
- void *mem_to_free;
-};
-
-/* This structure is used to implement recursion without occupying
- * any stack space, refer to btrfsic_process_metablock() */
-struct btrfsic_stack_frame {
- u32 magic;
- u32 nr;
- int error;
- int i;
- int limit_nesting;
- int num_copies;
- int mirror_num;
- struct btrfsic_block *block;
- struct btrfsic_block_data_ctx *block_ctx;
- struct btrfsic_block *next_block;
- struct btrfsic_block_data_ctx next_block_ctx;
- struct btrfs_header *hdr;
- struct btrfsic_stack_frame *prev;
-};
-
-/* Some state per mounted filesystem */
-struct btrfsic_state {
- u32 print_mask;
- int include_extent_data;
- struct list_head all_blocks_list;
- struct btrfsic_block_hashtable block_hashtable;
- struct btrfsic_block_link_hashtable block_link_hashtable;
- struct btrfs_fs_info *fs_info;
- u64 max_superblock_generation;
- struct btrfsic_block *latest_superblock;
- u32 metablock_size;
- u32 datablock_size;
-};
-
-static int btrfsic_process_metablock(struct btrfsic_state *state,
- struct btrfsic_block *block,
- struct btrfsic_block_data_ctx *block_ctx,
- int limit_nesting, int force_iodone_flag);
-static void btrfsic_read_from_block_data(
- struct btrfsic_block_data_ctx *block_ctx,
- void *dst, u32 offset, size_t len);
-static int btrfsic_create_link_to_next_block(
- struct btrfsic_state *state,
- struct btrfsic_block *block,
- struct btrfsic_block_data_ctx
- *block_ctx, u64 next_bytenr,
- int limit_nesting,
- struct btrfsic_block_data_ctx *next_block_ctx,
- struct btrfsic_block **next_blockp,
- int force_iodone_flag,
- int *num_copiesp, int *mirror_nump,
- struct btrfs_disk_key *disk_key,
- u64 parent_generation);
-static int btrfsic_handle_extent_data(struct btrfsic_state *state,
- struct btrfsic_block *block,
- struct btrfsic_block_data_ctx *block_ctx,
- u32 item_offset, int force_iodone_flag);
-static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
- struct btrfsic_block_data_ctx *block_ctx_out,
- int mirror_num);
-static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
-static int btrfsic_read_block(struct btrfsic_state *state,
- struct btrfsic_block_data_ctx *block_ctx);
-static int btrfsic_process_written_superblock(
- struct btrfsic_state *state,
- struct btrfsic_block *const block,
- struct btrfs_super_block *const super_hdr);
-static void btrfsic_bio_end_io(struct bio *bp);
-static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state,
- const struct btrfsic_block *block,
- int recursion_level);
-static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
- struct btrfsic_block *const block,
- int recursion_level);
-static void btrfsic_print_add_link(const struct btrfsic_state *state,
- const struct btrfsic_block_link *l);
-static void btrfsic_print_rem_link(const struct btrfsic_state *state,
- const struct btrfsic_block_link *l);
-static char btrfsic_get_block_type(const struct btrfsic_state *state,
- const struct btrfsic_block *block);
-static void btrfsic_dump_tree(const struct btrfsic_state *state);
-static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
- const struct btrfsic_block *block,
- int indent_level);
-static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
- struct btrfsic_state *state,
- struct btrfsic_block_data_ctx *next_block_ctx,
- struct btrfsic_block *next_block,
- struct btrfsic_block *from_block,
- u64 parent_generation);
-static struct btrfsic_block *btrfsic_block_lookup_or_add(
- struct btrfsic_state *state,
- struct btrfsic_block_data_ctx *block_ctx,
- const char *additional_string,
- int is_metadata,
- int is_iodone,
- int never_written,
- int mirror_num,
- int *was_created);
-static int btrfsic_process_superblock_dev_mirror(
- struct btrfsic_state *state,
- struct btrfsic_dev_state *dev_state,
- struct btrfs_device *device,
- int superblock_mirror_num,
- struct btrfsic_dev_state **selected_dev_state,
- struct btrfs_super_block *selected_super);
-static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev);
-static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
- u64 bytenr,
- struct btrfsic_dev_state *dev_state,
- u64 dev_bytenr);
-
-static struct mutex btrfsic_mutex;
-static int btrfsic_is_initialized;
-static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable;
-
-
-static void btrfsic_block_init(struct btrfsic_block *b)
-{
- b->magic_num = BTRFSIC_BLOCK_MAGIC_NUMBER;
- b->dev_state = NULL;
- b->dev_bytenr = 0;
- b->logical_bytenr = 0;
- b->generation = BTRFSIC_GENERATION_UNKNOWN;
- b->disk_key.objectid = 0;
- b->disk_key.type = 0;
- b->disk_key.offset = 0;
- b->is_metadata = 0;
- b->is_superblock = 0;
- b->is_iodone = 0;
- b->iodone_w_error = 0;
- b->never_written = 0;
- b->mirror_num = 0;
- b->next_in_same_bio = NULL;
- b->orig_bio_private = NULL;
- b->orig_bio_end_io = NULL;
- INIT_LIST_HEAD(&b->collision_resolving_node);
- INIT_LIST_HEAD(&b->all_blocks_node);
- INIT_LIST_HEAD(&b->ref_to_list);
- INIT_LIST_HEAD(&b->ref_from_list);
- b->submit_bio_bh_rw = 0;
- b->flush_gen = 0;
-}
-
-static struct btrfsic_block *btrfsic_block_alloc(void)
-{
- struct btrfsic_block *b;
-
- b = kzalloc(sizeof(*b), GFP_NOFS);
- if (NULL != b)
- btrfsic_block_init(b);
-
- return b;
-}
-
-static void btrfsic_block_free(struct btrfsic_block *b)
-{
- BUG_ON(!(NULL == b || BTRFSIC_BLOCK_MAGIC_NUMBER == b->magic_num));
- kfree(b);
-}
-
-static void btrfsic_block_link_init(struct btrfsic_block_link *l)
-{
- l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER;
- l->ref_cnt = 1;
- INIT_LIST_HEAD(&l->node_ref_to);
- INIT_LIST_HEAD(&l->node_ref_from);
- INIT_LIST_HEAD(&l->collision_resolving_node);
- l->block_ref_to = NULL;
- l->block_ref_from = NULL;
-}
-
-static struct btrfsic_block_link *btrfsic_block_link_alloc(void)
-{
- struct btrfsic_block_link *l;
-
- l = kzalloc(sizeof(*l), GFP_NOFS);
- if (NULL != l)
- btrfsic_block_link_init(l);
-
- return l;
-}
-
-static void btrfsic_block_link_free(struct btrfsic_block_link *l)
-{
- BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num));
- kfree(l);
-}
-
-static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds)
-{
- ds->magic_num = BTRFSIC_DEV2STATE_MAGIC_NUMBER;
- ds->bdev = NULL;
- ds->state = NULL;
- INIT_LIST_HEAD(&ds->collision_resolving_node);
- ds->last_flush_gen = 0;
- btrfsic_block_init(&ds->dummy_block_for_bio_bh_flush);
- ds->dummy_block_for_bio_bh_flush.is_iodone = 1;
- ds->dummy_block_for_bio_bh_flush.dev_state = ds;
-}
-
-static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void)
-{
- struct btrfsic_dev_state *ds;
-
- ds = kzalloc(sizeof(*ds), GFP_NOFS);
- if (NULL != ds)
- btrfsic_dev_state_init(ds);
-
- return ds;
-}
-
-static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds)
-{
- BUG_ON(!(NULL == ds ||
- BTRFSIC_DEV2STATE_MAGIC_NUMBER == ds->magic_num));
- kfree(ds);
-}
-
-static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h)
-{
- int i;
-
- for (i = 0; i < BTRFSIC_BLOCK_HASHTABLE_SIZE; i++)
- INIT_LIST_HEAD(h->table + i);
-}
-
-static void btrfsic_block_hashtable_add(struct btrfsic_block *b,
- struct btrfsic_block_hashtable *h)
-{
- const unsigned int hashval =
- (((unsigned int)(b->dev_bytenr >> 16)) ^
- ((unsigned int)((uintptr_t)b->dev_state->bdev))) &
- (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
-
- list_add(&b->collision_resolving_node, h->table + hashval);
-}
-
-static void btrfsic_block_hashtable_remove(struct btrfsic_block *b)
-{
- list_del(&b->collision_resolving_node);
-}
-
-static struct btrfsic_block *btrfsic_block_hashtable_lookup(
- struct block_device *bdev,
- u64 dev_bytenr,
- struct btrfsic_block_hashtable *h)
-{
- const unsigned int hashval =
- (((unsigned int)(dev_bytenr >> 16)) ^
- ((unsigned int)((uintptr_t)bdev))) &
- (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
- struct btrfsic_block *b;
-
- list_for_each_entry(b, h->table + hashval, collision_resolving_node) {
- if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
- return b;
- }
-
- return NULL;
-}
-
-static void btrfsic_block_link_hashtable_init(
- struct btrfsic_block_link_hashtable *h)
-{
- int i;
-
- for (i = 0; i < BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE; i++)
- INIT_LIST_HEAD(h->table + i);
-}
-
-static void btrfsic_block_link_hashtable_add(
- struct btrfsic_block_link *l,
- struct btrfsic_block_link_hashtable *h)
-{
- const unsigned int hashval =
- (((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^
- ((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^
- ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^
- ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev)))
- & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
-
- BUG_ON(NULL == l->block_ref_to);
- BUG_ON(NULL == l->block_ref_from);
- list_add(&l->collision_resolving_node, h->table + hashval);
-}
-
-static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l)
-{
- list_del(&l->collision_resolving_node);
-}
-
-static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
- struct block_device *bdev_ref_to,
- u64 dev_bytenr_ref_to,
- struct block_device *bdev_ref_from,
- u64 dev_bytenr_ref_from,
- struct btrfsic_block_link_hashtable *h)
-{
- const unsigned int hashval =
- (((unsigned int)(dev_bytenr_ref_to >> 16)) ^
- ((unsigned int)(dev_bytenr_ref_from >> 16)) ^
- ((unsigned int)((uintptr_t)bdev_ref_to)) ^
- ((unsigned int)((uintptr_t)bdev_ref_from))) &
- (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
- struct btrfsic_block_link *l;
-
- list_for_each_entry(l, h->table + hashval, collision_resolving_node) {
- BUG_ON(NULL == l->block_ref_to);
- BUG_ON(NULL == l->block_ref_from);
- if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
- l->block_ref_to->dev_bytenr == dev_bytenr_ref_to &&
- l->block_ref_from->dev_state->bdev == bdev_ref_from &&
- l->block_ref_from->dev_bytenr == dev_bytenr_ref_from)
- return l;
- }
-
- return NULL;
-}
-
-static void btrfsic_dev_state_hashtable_init(
- struct btrfsic_dev_state_hashtable *h)
-{
- int i;
-
- for (i = 0; i < BTRFSIC_DEV2STATE_HASHTABLE_SIZE; i++)
- INIT_LIST_HEAD(h->table + i);
-}
-
-static void btrfsic_dev_state_hashtable_add(
- struct btrfsic_dev_state *ds,
- struct btrfsic_dev_state_hashtable *h)
-{
- const unsigned int hashval =
- (((unsigned int)((uintptr_t)ds->bdev->bd_dev)) &
- (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
-
- list_add(&ds->collision_resolving_node, h->table + hashval);
-}
-
-static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds)
-{
- list_del(&ds->collision_resolving_node);
-}
-
-static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev,
- struct btrfsic_dev_state_hashtable *h)
-{
- const unsigned int hashval =
- dev & (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1);
- struct btrfsic_dev_state *ds;
-
- list_for_each_entry(ds, h->table + hashval, collision_resolving_node) {
- if (ds->bdev->bd_dev == dev)
- return ds;
- }
-
- return NULL;
-}
-
-static int btrfsic_process_superblock(struct btrfsic_state *state,
- struct btrfs_fs_devices *fs_devices)
-{
- struct btrfs_super_block *selected_super;
- struct list_head *dev_head = &fs_devices->devices;
- struct btrfs_device *device;
- struct btrfsic_dev_state *selected_dev_state = NULL;
- int ret = 0;
- int pass;
-
- selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS);
- if (!selected_super)
- return -ENOMEM;
-
- list_for_each_entry(device, dev_head, dev_list) {
- int i;
- struct btrfsic_dev_state *dev_state;
-
- if (!device->bdev || !device->name)
- continue;
-
- dev_state = btrfsic_dev_state_lookup(device->bdev->bd_dev);
- BUG_ON(NULL == dev_state);
- for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
- ret = btrfsic_process_superblock_dev_mirror(
- state, dev_state, device, i,
- &selected_dev_state, selected_super);
- if (0 != ret && 0 == i) {
- kfree(selected_super);
- return ret;
- }
- }
- }
-
- if (NULL == state->latest_superblock) {
- pr_info("btrfsic: no superblock found!\n");
- kfree(selected_super);
- return -1;
- }
-
- for (pass = 0; pass < 3; pass++) {
- int num_copies;
- int mirror_num;
- u64 next_bytenr;
-
- switch (pass) {
- case 0:
- next_bytenr = btrfs_super_root(selected_super);
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- pr_info("root@%llu\n", next_bytenr);
- break;
- case 1:
- next_bytenr = btrfs_super_chunk_root(selected_super);
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- pr_info("chunk@%llu\n", next_bytenr);
- break;
- case 2:
- next_bytenr = btrfs_super_log_root(selected_super);
- if (0 == next_bytenr)
- continue;
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- pr_info("log@%llu\n", next_bytenr);
- break;
- }
-
- num_copies = btrfs_num_copies(state->fs_info, next_bytenr,
- state->metablock_size);
- if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- pr_info("num_copies(log_bytenr=%llu) = %d\n",
- next_bytenr, num_copies);
-
- for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
- struct btrfsic_block *next_block;
- struct btrfsic_block_data_ctx tmp_next_block_ctx;
- struct btrfsic_block_link *l;
-
- ret = btrfsic_map_block(state, next_bytenr,
- state->metablock_size,
- &tmp_next_block_ctx,
- mirror_num);
- if (ret) {
- pr_info("btrfsic: btrfsic_map_block(root @%llu, mirror %d) failed!\n",
- next_bytenr, mirror_num);
- kfree(selected_super);
- return -1;
- }
-
- next_block = btrfsic_block_hashtable_lookup(
- tmp_next_block_ctx.dev->bdev,
- tmp_next_block_ctx.dev_bytenr,
- &state->block_hashtable);
- BUG_ON(NULL == next_block);
-
- l = btrfsic_block_link_hashtable_lookup(
- tmp_next_block_ctx.dev->bdev,
- tmp_next_block_ctx.dev_bytenr,
- state->latest_superblock->dev_state->
- bdev,
- state->latest_superblock->dev_bytenr,
- &state->block_link_hashtable);
- BUG_ON(NULL == l);
-
- ret = btrfsic_read_block(state, &tmp_next_block_ctx);
- if (ret < (int)PAGE_SIZE) {
- pr_info("btrfsic: read @logical %llu failed!\n",
- tmp_next_block_ctx.start);
- btrfsic_release_block_ctx(&tmp_next_block_ctx);
- kfree(selected_super);
- return -1;
- }
-
- ret = btrfsic_process_metablock(state,
- next_block,
- &tmp_next_block_ctx,
- BTRFS_MAX_LEVEL + 3, 1);
- btrfsic_release_block_ctx(&tmp_next_block_ctx);
- }
- }
-
- kfree(selected_super);
- return ret;
-}
-
-static int btrfsic_process_superblock_dev_mirror(
- struct btrfsic_state *state,
- struct btrfsic_dev_state *dev_state,
- struct btrfs_device *device,
- int superblock_mirror_num,
- struct btrfsic_dev_state **selected_dev_state,
- struct btrfs_super_block *selected_super)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- struct btrfs_super_block *super_tmp;
- u64 dev_bytenr;
- struct btrfsic_block *superblock_tmp;
- int pass;
- struct block_device *const superblock_bdev = device->bdev;
- struct page *page;
- struct address_space *mapping = superblock_bdev->bd_inode->i_mapping;
- int ret = 0;
-
- /* super block bytenr is always the unmapped device bytenr */
- dev_bytenr = btrfs_sb_offset(superblock_mirror_num);
- if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->commit_total_bytes)
- return -1;
-
- page = read_cache_page_gfp(mapping, dev_bytenr >> PAGE_SHIFT, GFP_NOFS);
- if (IS_ERR(page))
- return -1;
-
- super_tmp = page_address(page);
-
- if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
- btrfs_super_magic(super_tmp) != BTRFS_MAGIC ||
- memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
- btrfs_super_nodesize(super_tmp) != state->metablock_size ||
- btrfs_super_sectorsize(super_tmp) != state->datablock_size) {
- ret = 0;
- goto out;
- }
-
- superblock_tmp =
- btrfsic_block_hashtable_lookup(superblock_bdev,
- dev_bytenr,
- &state->block_hashtable);
- if (NULL == superblock_tmp) {
- superblock_tmp = btrfsic_block_alloc();
- if (NULL == superblock_tmp) {
- ret = -1;
- goto out;
- }
- /* for superblock, only the dev_bytenr makes sense */
- superblock_tmp->dev_bytenr = dev_bytenr;
- superblock_tmp->dev_state = dev_state;
- superblock_tmp->logical_bytenr = dev_bytenr;
- superblock_tmp->generation = btrfs_super_generation(super_tmp);
- superblock_tmp->is_metadata = 1;
- superblock_tmp->is_superblock = 1;
- superblock_tmp->is_iodone = 1;
- superblock_tmp->never_written = 0;
- superblock_tmp->mirror_num = 1 + superblock_mirror_num;
- if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
- btrfs_info_in_rcu(fs_info,
- "new initial S-block (bdev %p, %s) @%llu (%pg/%llu/%d)",
- superblock_bdev,
- rcu_str_deref(device->name), dev_bytenr,
- dev_state->bdev, dev_bytenr,
- superblock_mirror_num);
- list_add(&superblock_tmp->all_blocks_node,
- &state->all_blocks_list);
- btrfsic_block_hashtable_add(superblock_tmp,
- &state->block_hashtable);
- }
-
- /* select the one with the highest generation field */
- if (btrfs_super_generation(super_tmp) >
- state->max_superblock_generation ||
- 0 == state->max_superblock_generation) {
- memcpy(selected_super, super_tmp, sizeof(*selected_super));
- *selected_dev_state = dev_state;
- state->max_superblock_generation =
- btrfs_super_generation(super_tmp);
- state->latest_superblock = superblock_tmp;
- }
-
- for (pass = 0; pass < 3; pass++) {
- u64 next_bytenr;
- int num_copies;
- int mirror_num;
- const char *additional_string = NULL;
- struct btrfs_disk_key tmp_disk_key;
-
- tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY;
- tmp_disk_key.offset = 0;
- switch (pass) {
- case 0:
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_ROOT_TREE_OBJECTID);
- additional_string = "initial root ";
- next_bytenr = btrfs_super_root(super_tmp);
- break;
- case 1:
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_CHUNK_TREE_OBJECTID);
- additional_string = "initial chunk ";
- next_bytenr = btrfs_super_chunk_root(super_tmp);
- break;
- case 2:
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_TREE_LOG_OBJECTID);
- additional_string = "initial log ";
- next_bytenr = btrfs_super_log_root(super_tmp);
- if (0 == next_bytenr)
- continue;
- break;
- }
-
- num_copies = btrfs_num_copies(fs_info, next_bytenr,
- state->metablock_size);
- if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- pr_info("num_copies(log_bytenr=%llu) = %d\n",
- next_bytenr, num_copies);
- for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
- struct btrfsic_block *next_block;
- struct btrfsic_block_data_ctx tmp_next_block_ctx;
- struct btrfsic_block_link *l;
-
- if (btrfsic_map_block(state, next_bytenr,
- state->metablock_size,
- &tmp_next_block_ctx,
- mirror_num)) {
- pr_info("btrfsic: btrfsic_map_block(bytenr @%llu, mirror %d) failed!\n",
- next_bytenr, mirror_num);
- ret = -1;
- goto out;
- }
-
- next_block = btrfsic_block_lookup_or_add(
- state, &tmp_next_block_ctx,
- additional_string, 1, 1, 0,
- mirror_num, NULL);
- if (NULL == next_block) {
- btrfsic_release_block_ctx(&tmp_next_block_ctx);
- ret = -1;
- goto out;
- }
-
- next_block->disk_key = tmp_disk_key;
- next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
- l = btrfsic_block_link_lookup_or_add(
- state, &tmp_next_block_ctx,
- next_block, superblock_tmp,
- BTRFSIC_GENERATION_UNKNOWN);
- btrfsic_release_block_ctx(&tmp_next_block_ctx);
- if (NULL == l) {
- ret = -1;
- goto out;
- }
- }
- }
- if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES)
- btrfsic_dump_tree_sub(state, superblock_tmp, 0);
-
-out:
- put_page(page);
- return ret;
-}
-
-static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void)
-{
- struct btrfsic_stack_frame *sf;
-
- sf = kzalloc(sizeof(*sf), GFP_NOFS);
- if (sf)
- sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER;
- return sf;
-}
-
-static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf)
-{
- BUG_ON(!(NULL == sf ||
- BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER == sf->magic));
- kfree(sf);
-}
-
-static noinline_for_stack int btrfsic_process_metablock(
- struct btrfsic_state *state,
- struct btrfsic_block *const first_block,
- struct btrfsic_block_data_ctx *const first_block_ctx,
- int first_limit_nesting, int force_iodone_flag)
-{
- struct btrfsic_stack_frame initial_stack_frame = { 0 };
- struct btrfsic_stack_frame *sf;
- struct btrfsic_stack_frame *next_stack;
- struct btrfs_header *const first_hdr =
- (struct btrfs_header *)first_block_ctx->datav[0];
-
- BUG_ON(!first_hdr);
- sf = &initial_stack_frame;
- sf->error = 0;
- sf->i = -1;
- sf->limit_nesting = first_limit_nesting;
- sf->block = first_block;
- sf->block_ctx = first_block_ctx;
- sf->next_block = NULL;
- sf->hdr = first_hdr;
- sf->prev = NULL;
-
-continue_with_new_stack_frame:
- sf->block->generation = btrfs_stack_header_generation(sf->hdr);
- if (0 == sf->hdr->level) {
- struct btrfs_leaf *const leafhdr =
- (struct btrfs_leaf *)sf->hdr;
-
- if (-1 == sf->i) {
- sf->nr = btrfs_stack_header_nritems(&leafhdr->header);
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("leaf %llu items %d generation %llu owner %llu\n",
- sf->block_ctx->start, sf->nr,
- btrfs_stack_header_generation(
- &leafhdr->header),
- btrfs_stack_header_owner(
- &leafhdr->header));
- }
-
-continue_with_current_leaf_stack_frame:
- if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
- sf->i++;
- sf->num_copies = 0;
- }
-
- if (sf->i < sf->nr) {
- struct btrfs_item disk_item;
- u32 disk_item_offset =
- (uintptr_t)(leafhdr->items + sf->i) -
- (uintptr_t)leafhdr;
- struct btrfs_disk_key *disk_key;
- u8 type;
- u32 item_offset;
- u32 item_size;
-
- if (disk_item_offset + sizeof(struct btrfs_item) >
- sf->block_ctx->len) {
-leaf_item_out_of_bounce_error:
- pr_info(
- "btrfsic: leaf item out of bounce at logical %llu, dev %pg\n",
- sf->block_ctx->start,
- sf->block_ctx->dev->bdev);
- goto one_stack_frame_backwards;
- }
- btrfsic_read_from_block_data(sf->block_ctx,
- &disk_item,
- disk_item_offset,
- sizeof(struct btrfs_item));
- item_offset = btrfs_stack_item_offset(&disk_item);
- item_size = btrfs_stack_item_size(&disk_item);
- disk_key = &disk_item.key;
- type = btrfs_disk_key_type(disk_key);
-
- if (BTRFS_ROOT_ITEM_KEY == type) {
- struct btrfs_root_item root_item;
- u32 root_item_offset;
- u64 next_bytenr;
-
- root_item_offset = item_offset +
- offsetof(struct btrfs_leaf, items);
- if (root_item_offset + item_size >
- sf->block_ctx->len)
- goto leaf_item_out_of_bounce_error;
- btrfsic_read_from_block_data(
- sf->block_ctx, &root_item,
- root_item_offset,
- item_size);
- next_bytenr = btrfs_root_bytenr(&root_item);
-
- sf->error =
- btrfsic_create_link_to_next_block(
- state,
- sf->block,
- sf->block_ctx,
- next_bytenr,
- sf->limit_nesting,
- &sf->next_block_ctx,
- &sf->next_block,
- force_iodone_flag,
- &sf->num_copies,
- &sf->mirror_num,
- disk_key,
- btrfs_root_generation(
- &root_item));
- if (sf->error)
- goto one_stack_frame_backwards;
-
- if (NULL != sf->next_block) {
- struct btrfs_header *const next_hdr =
- (struct btrfs_header *)
- sf->next_block_ctx.datav[0];
-
- next_stack =
- btrfsic_stack_frame_alloc();
- if (NULL == next_stack) {
- sf->error = -1;
- btrfsic_release_block_ctx(
- &sf->
- next_block_ctx);
- goto one_stack_frame_backwards;
- }
-
- next_stack->i = -1;
- next_stack->block = sf->next_block;
- next_stack->block_ctx =
- &sf->next_block_ctx;
- next_stack->next_block = NULL;
- next_stack->hdr = next_hdr;
- next_stack->limit_nesting =
- sf->limit_nesting - 1;
- next_stack->prev = sf;
- sf = next_stack;
- goto continue_with_new_stack_frame;
- }
- } else if (BTRFS_EXTENT_DATA_KEY == type &&
- state->include_extent_data) {
- sf->error = btrfsic_handle_extent_data(
- state,
- sf->block,
- sf->block_ctx,
- item_offset,
- force_iodone_flag);
- if (sf->error)
- goto one_stack_frame_backwards;
- }
-
- goto continue_with_current_leaf_stack_frame;
- }
- } else {
- struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr;
-
- if (-1 == sf->i) {
- sf->nr = btrfs_stack_header_nritems(&nodehdr->header);
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("node %llu level %d items %d generation %llu owner %llu\n",
- sf->block_ctx->start,
- nodehdr->header.level, sf->nr,
- btrfs_stack_header_generation(
- &nodehdr->header),
- btrfs_stack_header_owner(
- &nodehdr->header));
- }
-
-continue_with_current_node_stack_frame:
- if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
- sf->i++;
- sf->num_copies = 0;
- }
-
- if (sf->i < sf->nr) {
- struct btrfs_key_ptr key_ptr;
- u32 key_ptr_offset;
- u64 next_bytenr;
-
- key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) -
- (uintptr_t)nodehdr;
- if (key_ptr_offset + sizeof(struct btrfs_key_ptr) >
- sf->block_ctx->len) {
- pr_info(
- "btrfsic: node item out of bounce at logical %llu, dev %pg\n",
- sf->block_ctx->start,
- sf->block_ctx->dev->bdev);
- goto one_stack_frame_backwards;
- }
- btrfsic_read_from_block_data(
- sf->block_ctx, &key_ptr, key_ptr_offset,
- sizeof(struct btrfs_key_ptr));
- next_bytenr = btrfs_stack_key_blockptr(&key_ptr);
-
- sf->error = btrfsic_create_link_to_next_block(
- state,
- sf->block,
- sf->block_ctx,
- next_bytenr,
- sf->limit_nesting,
- &sf->next_block_ctx,
- &sf->next_block,
- force_iodone_flag,
- &sf->num_copies,
- &sf->mirror_num,
- &key_ptr.key,
- btrfs_stack_key_generation(&key_ptr));
- if (sf->error)
- goto one_stack_frame_backwards;
-
- if (NULL != sf->next_block) {
- struct btrfs_header *const next_hdr =
- (struct btrfs_header *)
- sf->next_block_ctx.datav[0];
-
- next_stack = btrfsic_stack_frame_alloc();
- if (NULL == next_stack) {
- sf->error = -1;
- goto one_stack_frame_backwards;
- }
-
- next_stack->i = -1;
- next_stack->block = sf->next_block;
- next_stack->block_ctx = &sf->next_block_ctx;
- next_stack->next_block = NULL;
- next_stack->hdr = next_hdr;
- next_stack->limit_nesting =
- sf->limit_nesting - 1;
- next_stack->prev = sf;
- sf = next_stack;
- goto continue_with_new_stack_frame;
- }
-
- goto continue_with_current_node_stack_frame;
- }
- }
-
-one_stack_frame_backwards:
- if (NULL != sf->prev) {
- struct btrfsic_stack_frame *const prev = sf->prev;
-
- /* the one for the initial block is freed in the caller */
- btrfsic_release_block_ctx(sf->block_ctx);
-
- if (sf->error) {
- prev->error = sf->error;
- btrfsic_stack_frame_free(sf);
- sf = prev;
- goto one_stack_frame_backwards;
- }
-
- btrfsic_stack_frame_free(sf);
- sf = prev;
- goto continue_with_new_stack_frame;
- } else {
- BUG_ON(&initial_stack_frame != sf);
- }
-
- return sf->error;
-}
-
-static void btrfsic_read_from_block_data(
- struct btrfsic_block_data_ctx *block_ctx,
- void *dstv, u32 offset, size_t len)
-{
- size_t cur;
- size_t pgoff;
- char *kaddr;
- char *dst = (char *)dstv;
- size_t start_offset = offset_in_page(block_ctx->start);
- unsigned long i = (start_offset + offset) >> PAGE_SHIFT;
-
- WARN_ON(offset + len > block_ctx->len);
- pgoff = offset_in_page(start_offset + offset);
-
- while (len > 0) {
- cur = min(len, ((size_t)PAGE_SIZE - pgoff));
- BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE));
- kaddr = block_ctx->datav[i];
- memcpy(dst, kaddr + pgoff, cur);
-
- dst += cur;
- len -= cur;
- pgoff = 0;
- i++;
- }
-}
-
-static int btrfsic_create_link_to_next_block(
- struct btrfsic_state *state,
- struct btrfsic_block *block,
- struct btrfsic_block_data_ctx *block_ctx,
- u64 next_bytenr,
- int limit_nesting,
- struct btrfsic_block_data_ctx *next_block_ctx,
- struct btrfsic_block **next_blockp,
- int force_iodone_flag,
- int *num_copiesp, int *mirror_nump,
- struct btrfs_disk_key *disk_key,
- u64 parent_generation)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- struct btrfsic_block *next_block = NULL;
- int ret;
- struct btrfsic_block_link *l;
- int did_alloc_block_link;
- int block_was_created;
-
- *next_blockp = NULL;
- if (0 == *num_copiesp) {
- *num_copiesp = btrfs_num_copies(fs_info, next_bytenr,
- state->metablock_size);
- if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- pr_info("num_copies(log_bytenr=%llu) = %d\n",
- next_bytenr, *num_copiesp);
- *mirror_nump = 1;
- }
-
- if (*mirror_nump > *num_copiesp)
- return 0;
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("btrfsic_create_link_to_next_block(mirror_num=%d)\n",
- *mirror_nump);
- ret = btrfsic_map_block(state, next_bytenr,
- state->metablock_size,
- next_block_ctx, *mirror_nump);
- if (ret) {
- pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
- next_bytenr, *mirror_nump);
- btrfsic_release_block_ctx(next_block_ctx);
- *next_blockp = NULL;
- return -1;
- }
-
- next_block = btrfsic_block_lookup_or_add(state,
- next_block_ctx, "referenced ",
- 1, force_iodone_flag,
- !force_iodone_flag,
- *mirror_nump,
- &block_was_created);
- if (NULL == next_block) {
- btrfsic_release_block_ctx(next_block_ctx);
- *next_blockp = NULL;
- return -1;
- }
- if (block_was_created) {
- l = NULL;
- next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
- } else {
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) {
- if (next_block->logical_bytenr != next_bytenr &&
- !(!next_block->is_metadata &&
- 0 == next_block->logical_bytenr))
- pr_info(
-"referenced block @%llu (%pg/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu)\n",
- next_bytenr, next_block_ctx->dev->bdev,
- next_block_ctx->dev_bytenr, *mirror_nump,
- btrfsic_get_block_type(state,
- next_block),
- next_block->logical_bytenr);
- else
- pr_info(
- "referenced block @%llu (%pg/%llu/%d) found in hash table, %c\n",
- next_bytenr, next_block_ctx->dev->bdev,
- next_block_ctx->dev_bytenr, *mirror_nump,
- btrfsic_get_block_type(state,
- next_block));
- }
- next_block->logical_bytenr = next_bytenr;
-
- next_block->mirror_num = *mirror_nump;
- l = btrfsic_block_link_hashtable_lookup(
- next_block_ctx->dev->bdev,
- next_block_ctx->dev_bytenr,
- block_ctx->dev->bdev,
- block_ctx->dev_bytenr,
- &state->block_link_hashtable);
- }
-
- next_block->disk_key = *disk_key;
- if (NULL == l) {
- l = btrfsic_block_link_alloc();
- if (NULL == l) {
- btrfsic_release_block_ctx(next_block_ctx);
- *next_blockp = NULL;
- return -1;
- }
-
- did_alloc_block_link = 1;
- l->block_ref_to = next_block;
- l->block_ref_from = block;
- l->ref_cnt = 1;
- l->parent_generation = parent_generation;
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- btrfsic_print_add_link(state, l);
-
- list_add(&l->node_ref_to, &block->ref_to_list);
- list_add(&l->node_ref_from, &next_block->ref_from_list);
-
- btrfsic_block_link_hashtable_add(l,
- &state->block_link_hashtable);
- } else {
- did_alloc_block_link = 0;
- if (0 == limit_nesting) {
- l->ref_cnt++;
- l->parent_generation = parent_generation;
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- btrfsic_print_add_link(state, l);
- }
- }
-
- if (limit_nesting > 0 && did_alloc_block_link) {
- ret = btrfsic_read_block(state, next_block_ctx);
- if (ret < (int)next_block_ctx->len) {
- pr_info("btrfsic: read block @logical %llu failed!\n",
- next_bytenr);
- btrfsic_release_block_ctx(next_block_ctx);
- *next_blockp = NULL;
- return -1;
- }
-
- *next_blockp = next_block;
- } else {
- *next_blockp = NULL;
- }
- (*mirror_nump)++;
-
- return 0;
-}
-
-static int btrfsic_handle_extent_data(
- struct btrfsic_state *state,
- struct btrfsic_block *block,
- struct btrfsic_block_data_ctx *block_ctx,
- u32 item_offset, int force_iodone_flag)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- struct btrfs_file_extent_item file_extent_item;
- u64 file_extent_item_offset;
- u64 next_bytenr;
- u64 num_bytes;
- u64 generation;
- struct btrfsic_block_link *l;
- int ret;
-
- file_extent_item_offset = offsetof(struct btrfs_leaf, items) +
- item_offset;
- if (file_extent_item_offset +
- offsetof(struct btrfs_file_extent_item, disk_num_bytes) >
- block_ctx->len) {
- pr_info("btrfsic: file item out of bounce at logical %llu, dev %pg\n",
- block_ctx->start, block_ctx->dev->bdev);
- return -1;
- }
-
- btrfsic_read_from_block_data(block_ctx, &file_extent_item,
- file_extent_item_offset,
- offsetof(struct btrfs_file_extent_item, disk_num_bytes));
- if (BTRFS_FILE_EXTENT_REG != file_extent_item.type ||
- btrfs_stack_file_extent_disk_bytenr(&file_extent_item) == 0) {
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
- pr_info("extent_data: type %u, disk_bytenr = %llu\n",
- file_extent_item.type,
- btrfs_stack_file_extent_disk_bytenr(
- &file_extent_item));
- return 0;
- }
-
- if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) >
- block_ctx->len) {
- pr_info("btrfsic: file item out of bounce at logical %llu, dev %pg\n",
- block_ctx->start, block_ctx->dev->bdev);
- return -1;
- }
- btrfsic_read_from_block_data(block_ctx, &file_extent_item,
- file_extent_item_offset,
- sizeof(struct btrfs_file_extent_item));
- next_bytenr = btrfs_stack_file_extent_disk_bytenr(&file_extent_item);
- if (btrfs_stack_file_extent_compression(&file_extent_item) ==
- BTRFS_COMPRESS_NONE) {
- next_bytenr += btrfs_stack_file_extent_offset(&file_extent_item);
- num_bytes = btrfs_stack_file_extent_num_bytes(&file_extent_item);
- } else {
- num_bytes = btrfs_stack_file_extent_disk_num_bytes(&file_extent_item);
- }
- generation = btrfs_stack_file_extent_generation(&file_extent_item);
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
- pr_info("extent_data: type %u, disk_bytenr = %llu, offset = %llu, num_bytes = %llu\n",
- file_extent_item.type,
- btrfs_stack_file_extent_disk_bytenr(&file_extent_item),
- btrfs_stack_file_extent_offset(&file_extent_item),
- num_bytes);
- while (num_bytes > 0) {
- u32 chunk_len;
- int num_copies;
- int mirror_num;
-
- if (num_bytes > state->datablock_size)
- chunk_len = state->datablock_size;
- else
- chunk_len = num_bytes;
-
- num_copies = btrfs_num_copies(fs_info, next_bytenr,
- state->datablock_size);
- if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- pr_info("num_copies(log_bytenr=%llu) = %d\n",
- next_bytenr, num_copies);
- for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
- struct btrfsic_block_data_ctx next_block_ctx;
- struct btrfsic_block *next_block;
- int block_was_created;
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("btrfsic_handle_extent_data(mirror_num=%d)\n",
- mirror_num);
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
- pr_info("\tdisk_bytenr = %llu, num_bytes %u\n",
- next_bytenr, chunk_len);
- ret = btrfsic_map_block(state, next_bytenr,
- chunk_len, &next_block_ctx,
- mirror_num);
- if (ret) {
- pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
- next_bytenr, mirror_num);
- return -1;
- }
-
- next_block = btrfsic_block_lookup_or_add(
- state,
- &next_block_ctx,
- "referenced ",
- 0,
- force_iodone_flag,
- !force_iodone_flag,
- mirror_num,
- &block_was_created);
- if (NULL == next_block) {
- btrfsic_release_block_ctx(&next_block_ctx);
- return -1;
- }
- if (!block_was_created) {
- if ((state->print_mask &
- BTRFSIC_PRINT_MASK_VERBOSE) &&
- next_block->logical_bytenr != next_bytenr &&
- !(!next_block->is_metadata &&
- 0 == next_block->logical_bytenr)) {
- pr_info(
-"referenced block @%llu (%pg/%llu/%d) found in hash table, D, bytenr mismatch (!= stored %llu)\n",
- next_bytenr,
- next_block_ctx.dev->bdev,
- next_block_ctx.dev_bytenr,
- mirror_num,
- next_block->logical_bytenr);
- }
- next_block->logical_bytenr = next_bytenr;
- next_block->mirror_num = mirror_num;
- }
-
- l = btrfsic_block_link_lookup_or_add(state,
- &next_block_ctx,
- next_block, block,
- generation);
- btrfsic_release_block_ctx(&next_block_ctx);
- if (NULL == l)
- return -1;
- }
-
- next_bytenr += chunk_len;
- num_bytes -= chunk_len;
- }
-
- return 0;
-}
-
-static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
- struct btrfsic_block_data_ctx *block_ctx_out,
- int mirror_num)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- int ret;
- u64 length;
- struct btrfs_io_context *multi = NULL;
- struct btrfs_device *device;
-
- length = len;
- ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
- bytenr, &length, &multi, mirror_num);
-
- if (ret) {
- block_ctx_out->start = 0;
- block_ctx_out->dev_bytenr = 0;
- block_ctx_out->len = 0;
- block_ctx_out->dev = NULL;
- block_ctx_out->datav = NULL;
- block_ctx_out->pagev = NULL;
- block_ctx_out->mem_to_free = NULL;
-
- return ret;
- }
-
- device = multi->stripes[0].dev;
- if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) ||
- !device->bdev || !device->name)
- block_ctx_out->dev = NULL;
- else
- block_ctx_out->dev = btrfsic_dev_state_lookup(
- device->bdev->bd_dev);
- block_ctx_out->dev_bytenr = multi->stripes[0].physical;
- block_ctx_out->start = bytenr;
- block_ctx_out->len = len;
- block_ctx_out->datav = NULL;
- block_ctx_out->pagev = NULL;
- block_ctx_out->mem_to_free = NULL;
-
- kfree(multi);
- if (NULL == block_ctx_out->dev) {
- ret = -ENXIO;
- pr_info("btrfsic: error, cannot lookup dev (#1)!\n");
- }
-
- return ret;
-}
-
-static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
-{
- if (block_ctx->mem_to_free) {
- unsigned int num_pages;
-
- BUG_ON(!block_ctx->datav);
- BUG_ON(!block_ctx->pagev);
- num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
- PAGE_SHIFT;
- /* Pages must be unmapped in reverse order */
- while (num_pages > 0) {
- num_pages--;
- if (block_ctx->datav[num_pages])
- block_ctx->datav[num_pages] = NULL;
- if (block_ctx->pagev[num_pages]) {
- __free_page(block_ctx->pagev[num_pages]);
- block_ctx->pagev[num_pages] = NULL;
- }
- }
-
- kfree(block_ctx->mem_to_free);
- block_ctx->mem_to_free = NULL;
- block_ctx->pagev = NULL;
- block_ctx->datav = NULL;
- }
-}
-
-static int btrfsic_read_block(struct btrfsic_state *state,
- struct btrfsic_block_data_ctx *block_ctx)
-{
- unsigned int num_pages;
- unsigned int i;
- size_t size;
- u64 dev_bytenr;
- int ret;
-
- BUG_ON(block_ctx->datav);
- BUG_ON(block_ctx->pagev);
- BUG_ON(block_ctx->mem_to_free);
- if (!PAGE_ALIGNED(block_ctx->dev_bytenr)) {
- pr_info("btrfsic: read_block() with unaligned bytenr %llu\n",
- block_ctx->dev_bytenr);
- return -1;
- }
-
- num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
- PAGE_SHIFT;
- size = sizeof(*block_ctx->datav) + sizeof(*block_ctx->pagev);
- block_ctx->mem_to_free = kcalloc(num_pages, size, GFP_NOFS);
- if (!block_ctx->mem_to_free)
- return -ENOMEM;
- block_ctx->datav = block_ctx->mem_to_free;
- block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
- for (i = 0; i < num_pages; i++) {
- block_ctx->pagev[i] = alloc_page(GFP_NOFS);
- if (!block_ctx->pagev[i])
- return -1;
- }
-
- dev_bytenr = block_ctx->dev_bytenr;
- for (i = 0; i < num_pages;) {
- struct bio *bio;
- unsigned int j;
-
- bio = btrfs_bio_alloc(num_pages - i);
- bio_set_dev(bio, block_ctx->dev->bdev);
- bio->bi_iter.bi_sector = dev_bytenr >> 9;
- bio->bi_opf = REQ_OP_READ;
-
- for (j = i; j < num_pages; j++) {
- ret = bio_add_page(bio, block_ctx->pagev[j],
- PAGE_SIZE, 0);
- if (PAGE_SIZE != ret)
- break;
- }
- if (j == i) {
- pr_info("btrfsic: error, failed to add a single page!\n");
- return -1;
- }
- if (submit_bio_wait(bio)) {
- pr_info("btrfsic: read error at logical %llu dev %pg!\n",
- block_ctx->start, block_ctx->dev->bdev);
- bio_put(bio);
- return -1;
- }
- bio_put(bio);
- dev_bytenr += (j - i) * PAGE_SIZE;
- i = j;
- }
- for (i = 0; i < num_pages; i++)
- block_ctx->datav[i] = page_address(block_ctx->pagev[i]);
-
- return block_ctx->len;
-}
-
-static void btrfsic_dump_database(struct btrfsic_state *state)
-{
- const struct btrfsic_block *b_all;
-
- BUG_ON(NULL == state);
-
- pr_info("all_blocks_list:\n");
- list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) {
- const struct btrfsic_block_link *l;
-
- pr_info("%c-block @%llu (%pg/%llu/%d)\n",
- btrfsic_get_block_type(state, b_all),
- b_all->logical_bytenr, b_all->dev_state->bdev,
- b_all->dev_bytenr, b_all->mirror_num);
-
- list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) {
- pr_info(
- " %c @%llu (%pg/%llu/%d) refers %u* to %c @%llu (%pg/%llu/%d)\n",
- btrfsic_get_block_type(state, b_all),
- b_all->logical_bytenr, b_all->dev_state->bdev,
- b_all->dev_bytenr, b_all->mirror_num,
- l->ref_cnt,
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
- }
-
- list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) {
- pr_info(
- " %c @%llu (%pg/%llu/%d) is ref %u* from %c @%llu (%pg/%llu/%d)\n",
- btrfsic_get_block_type(state, b_all),
- b_all->logical_bytenr, b_all->dev_state->bdev,
- b_all->dev_bytenr, b_all->mirror_num,
- l->ref_cnt,
- btrfsic_get_block_type(state, l->block_ref_from),
- l->block_ref_from->logical_bytenr,
- l->block_ref_from->dev_state->bdev,
- l->block_ref_from->dev_bytenr,
- l->block_ref_from->mirror_num);
- }
-
- pr_info("\n");
- }
-}
-
-/*
- * Test whether the disk block contains a tree block (leaf or node)
- * (note that this test fails for the super block)
- */
-static noinline_for_stack int btrfsic_test_for_metadata(
- struct btrfsic_state *state,
- char **datav, unsigned int num_pages)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
- struct btrfs_header *h;
- u8 csum[BTRFS_CSUM_SIZE];
- unsigned int i;
-
- if (num_pages * PAGE_SIZE < state->metablock_size)
- return 1; /* not metadata */
- num_pages = state->metablock_size >> PAGE_SHIFT;
- h = (struct btrfs_header *)datav[0];
-
- if (memcmp(h->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE))
- return 1;
-
- shash->tfm = fs_info->csum_shash;
- crypto_shash_init(shash);
-
- for (i = 0; i < num_pages; i++) {
- u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
- size_t sublen = i ? PAGE_SIZE :
- (PAGE_SIZE - BTRFS_CSUM_SIZE);
-
- crypto_shash_update(shash, data, sublen);
- }
- crypto_shash_final(shash, csum);
- if (memcmp(csum, h->csum, fs_info->csum_size))
- return 1;
-
- return 0; /* is metadata */
-}
-
-static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
- u64 dev_bytenr, char **mapped_datav,
- unsigned int num_pages,
- struct bio *bio, int *bio_is_patched,
- int submit_bio_bh_rw)
-{
- int is_metadata;
- struct btrfsic_block *block;
- struct btrfsic_block_data_ctx block_ctx;
- int ret;
- struct btrfsic_state *state = dev_state->state;
- struct block_device *bdev = dev_state->bdev;
- unsigned int processed_len;
-
- if (NULL != bio_is_patched)
- *bio_is_patched = 0;
-
-again:
- if (num_pages == 0)
- return;
-
- processed_len = 0;
- is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav,
- num_pages));
-
- block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
- &state->block_hashtable);
- if (NULL != block) {
- u64 bytenr = 0;
- struct btrfsic_block_link *l, *tmp;
-
- if (block->is_superblock) {
- bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
- mapped_datav[0]);
- if (num_pages * PAGE_SIZE <
- BTRFS_SUPER_INFO_SIZE) {
- pr_info("btrfsic: cannot work with too short bios!\n");
- return;
- }
- is_metadata = 1;
- BUG_ON(!PAGE_ALIGNED(BTRFS_SUPER_INFO_SIZE));
- processed_len = BTRFS_SUPER_INFO_SIZE;
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
- pr_info("[before new superblock is written]:\n");
- btrfsic_dump_tree_sub(state, block, 0);
- }
- }
- if (is_metadata) {
- if (!block->is_superblock) {
- if (num_pages * PAGE_SIZE <
- state->metablock_size) {
- pr_info("btrfsic: cannot work with too short bios!\n");
- return;
- }
- processed_len = state->metablock_size;
- bytenr = btrfs_stack_header_bytenr(
- (struct btrfs_header *)
- mapped_datav[0]);
- btrfsic_cmp_log_and_dev_bytenr(state, bytenr,
- dev_state,
- dev_bytenr);
- }
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) {
- if (block->logical_bytenr != bytenr &&
- !(!block->is_metadata &&
- block->logical_bytenr == 0))
- pr_info(
-"written block @%llu (%pg/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu)\n",
- bytenr, dev_state->bdev,
- dev_bytenr,
- block->mirror_num,
- btrfsic_get_block_type(state,
- block),
- block->logical_bytenr);
- else
- pr_info(
- "written block @%llu (%pg/%llu/%d) found in hash table, %c\n",
- bytenr, dev_state->bdev,
- dev_bytenr, block->mirror_num,
- btrfsic_get_block_type(state,
- block));
- }
- block->logical_bytenr = bytenr;
- } else {
- if (num_pages * PAGE_SIZE <
- state->datablock_size) {
- pr_info("btrfsic: cannot work with too short bios!\n");
- return;
- }
- processed_len = state->datablock_size;
- bytenr = block->logical_bytenr;
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info(
- "written block @%llu (%pg/%llu/%d) found in hash table, %c\n",
- bytenr, dev_state->bdev, dev_bytenr,
- block->mirror_num,
- btrfsic_get_block_type(state, block));
- }
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("ref_to_list: %cE, ref_from_list: %cE\n",
- list_empty(&block->ref_to_list) ? ' ' : '!',
- list_empty(&block->ref_from_list) ? ' ' : '!');
- if (btrfsic_is_block_ref_by_superblock(state, block, 0)) {
- pr_info(
-"btrfs: attempt to overwrite %c-block @%llu (%pg/%llu/%d), old(gen=%llu, objectid=%llu, type=%d, offset=%llu), new(gen=%llu), which is referenced by most recent superblock (superblockgen=%llu)!\n",
- btrfsic_get_block_type(state, block), bytenr,
- dev_state->bdev, dev_bytenr, block->mirror_num,
- block->generation,
- btrfs_disk_key_objectid(&block->disk_key),
- block->disk_key.type,
- btrfs_disk_key_offset(&block->disk_key),
- btrfs_stack_header_generation(
- (struct btrfs_header *) mapped_datav[0]),
- state->max_superblock_generation);
- btrfsic_dump_tree(state);
- }
-
- if (!block->is_iodone && !block->never_written) {
- pr_info(
-"btrfs: attempt to overwrite %c-block @%llu (%pg/%llu/%d), oldgen=%llu, newgen=%llu, which is not yet iodone!\n",
- btrfsic_get_block_type(state, block), bytenr,
- dev_state->bdev, dev_bytenr, block->mirror_num,
- block->generation,
- btrfs_stack_header_generation(
- (struct btrfs_header *)
- mapped_datav[0]));
- /* it would not be safe to go on */
- btrfsic_dump_tree(state);
- goto continue_loop;
- }
-
- /*
- * Clear all references of this block. Do not free
- * the block itself even if is not referenced anymore
- * because it still carries valuable information
- * like whether it was ever written and IO completed.
- */
- list_for_each_entry_safe(l, tmp, &block->ref_to_list,
- node_ref_to) {
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- btrfsic_print_rem_link(state, l);
- l->ref_cnt--;
- if (0 == l->ref_cnt) {
- list_del(&l->node_ref_to);
- list_del(&l->node_ref_from);
- btrfsic_block_link_hashtable_remove(l);
- btrfsic_block_link_free(l);
- }
- }
-
- block_ctx.dev = dev_state;
- block_ctx.dev_bytenr = dev_bytenr;
- block_ctx.start = bytenr;
- block_ctx.len = processed_len;
- block_ctx.pagev = NULL;
- block_ctx.mem_to_free = NULL;
- block_ctx.datav = mapped_datav;
-
- if (is_metadata || state->include_extent_data) {
- block->never_written = 0;
- block->iodone_w_error = 0;
- if (NULL != bio) {
- block->is_iodone = 0;
- BUG_ON(NULL == bio_is_patched);
- if (!*bio_is_patched) {
- block->orig_bio_private =
- bio->bi_private;
- block->orig_bio_end_io =
- bio->bi_end_io;
- block->next_in_same_bio = NULL;
- bio->bi_private = block;
- bio->bi_end_io = btrfsic_bio_end_io;
- *bio_is_patched = 1;
- } else {
- struct btrfsic_block *chained_block =
- (struct btrfsic_block *)
- bio->bi_private;
-
- BUG_ON(NULL == chained_block);
- block->orig_bio_private =
- chained_block->orig_bio_private;
- block->orig_bio_end_io =
- chained_block->orig_bio_end_io;
- block->next_in_same_bio = chained_block;
- bio->bi_private = block;
- }
- } else {
- block->is_iodone = 1;
- block->orig_bio_private = NULL;
- block->orig_bio_end_io = NULL;
- block->next_in_same_bio = NULL;
- }
- }
-
- block->flush_gen = dev_state->last_flush_gen + 1;
- block->submit_bio_bh_rw = submit_bio_bh_rw;
- if (is_metadata) {
- block->logical_bytenr = bytenr;
- block->is_metadata = 1;
- if (block->is_superblock) {
- BUG_ON(PAGE_SIZE !=
- BTRFS_SUPER_INFO_SIZE);
- ret = btrfsic_process_written_superblock(
- state,
- block,
- (struct btrfs_super_block *)
- mapped_datav[0]);
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
- pr_info("[after new superblock is written]:\n");
- btrfsic_dump_tree_sub(state, block, 0);
- }
- } else {
- block->mirror_num = 0; /* unknown */
- ret = btrfsic_process_metablock(
- state,
- block,
- &block_ctx,
- 0, 0);
- }
- if (ret)
- pr_info("btrfsic: btrfsic_process_metablock(root @%llu) failed!\n",
- dev_bytenr);
- } else {
- block->is_metadata = 0;
- block->mirror_num = 0; /* unknown */
- block->generation = BTRFSIC_GENERATION_UNKNOWN;
- if (!state->include_extent_data
- && list_empty(&block->ref_from_list)) {
- /*
- * disk block is overwritten with extent
- * data (not meta data) and we are configured
- * to not include extent data: take the
- * chance and free the block's memory
- */
- btrfsic_block_hashtable_remove(block);
- list_del(&block->all_blocks_node);
- btrfsic_block_free(block);
- }
- }
- btrfsic_release_block_ctx(&block_ctx);
- } else {
- /* block has not been found in hash table */
- u64 bytenr;
-
- if (!is_metadata) {
- processed_len = state->datablock_size;
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info(
- "written block (%pg/%llu/?) !found in hash table, D\n",
- dev_state->bdev, dev_bytenr);
- if (!state->include_extent_data) {
- /* ignore that written D block */
- goto continue_loop;
- }
-
- /* this is getting ugly for the
- * include_extent_data case... */
- bytenr = 0; /* unknown */
- } else {
- processed_len = state->metablock_size;
- bytenr = btrfs_stack_header_bytenr(
- (struct btrfs_header *)
- mapped_datav[0]);
- btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
- dev_bytenr);
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info(
- "written block @%llu (%pg/%llu/?) !found in hash table, M\n",
- bytenr, dev_state->bdev, dev_bytenr);
- }
-
- block_ctx.dev = dev_state;
- block_ctx.dev_bytenr = dev_bytenr;
- block_ctx.start = bytenr;
- block_ctx.len = processed_len;
- block_ctx.pagev = NULL;
- block_ctx.mem_to_free = NULL;
- block_ctx.datav = mapped_datav;
-
- block = btrfsic_block_alloc();
- if (NULL == block) {
- btrfsic_release_block_ctx(&block_ctx);
- goto continue_loop;
- }
- block->dev_state = dev_state;
- block->dev_bytenr = dev_bytenr;
- block->logical_bytenr = bytenr;
- block->is_metadata = is_metadata;
- block->never_written = 0;
- block->iodone_w_error = 0;
- block->mirror_num = 0; /* unknown */
- block->flush_gen = dev_state->last_flush_gen + 1;
- block->submit_bio_bh_rw = submit_bio_bh_rw;
- if (NULL != bio) {
- block->is_iodone = 0;
- BUG_ON(NULL == bio_is_patched);
- if (!*bio_is_patched) {
- block->orig_bio_private = bio->bi_private;
- block->orig_bio_end_io = bio->bi_end_io;
- block->next_in_same_bio = NULL;
- bio->bi_private = block;
- bio->bi_end_io = btrfsic_bio_end_io;
- *bio_is_patched = 1;
- } else {
- struct btrfsic_block *chained_block =
- (struct btrfsic_block *)
- bio->bi_private;
-
- BUG_ON(NULL == chained_block);
- block->orig_bio_private =
- chained_block->orig_bio_private;
- block->orig_bio_end_io =
- chained_block->orig_bio_end_io;
- block->next_in_same_bio = chained_block;
- bio->bi_private = block;
- }
- } else {
- block->is_iodone = 1;
- block->orig_bio_private = NULL;
- block->orig_bio_end_io = NULL;
- block->next_in_same_bio = NULL;
- }
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("new written %c-block @%llu (%pg/%llu/%d)\n",
- is_metadata ? 'M' : 'D',
- block->logical_bytenr, block->dev_state->bdev,
- block->dev_bytenr, block->mirror_num);
- list_add(&block->all_blocks_node, &state->all_blocks_list);
- btrfsic_block_hashtable_add(block, &state->block_hashtable);
-
- if (is_metadata) {
- ret = btrfsic_process_metablock(state, block,
- &block_ctx, 0, 0);
- if (ret)
- pr_info("btrfsic: process_metablock(root @%llu) failed!\n",
- dev_bytenr);
- }
- btrfsic_release_block_ctx(&block_ctx);
- }
-
-continue_loop:
- BUG_ON(!processed_len);
- dev_bytenr += processed_len;
- mapped_datav += processed_len >> PAGE_SHIFT;
- num_pages -= processed_len >> PAGE_SHIFT;
- goto again;
-}
-
-static void btrfsic_bio_end_io(struct bio *bp)
-{
- struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private;
- int iodone_w_error;
-
- /* mutex is not held! This is not save if IO is not yet completed
- * on umount */
- iodone_w_error = 0;
- if (bp->bi_status)
- iodone_w_error = 1;
-
- BUG_ON(NULL == block);
- bp->bi_private = block->orig_bio_private;
- bp->bi_end_io = block->orig_bio_end_io;
-
- do {
- struct btrfsic_block *next_block;
- struct btrfsic_dev_state *const dev_state = block->dev_state;
-
- if ((dev_state->state->print_mask &
- BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
- pr_info("bio_end_io(err=%d) for %c @%llu (%pg/%llu/%d)\n",
- bp->bi_status,
- btrfsic_get_block_type(dev_state->state, block),
- block->logical_bytenr, dev_state->bdev,
- block->dev_bytenr, block->mirror_num);
- next_block = block->next_in_same_bio;
- block->iodone_w_error = iodone_w_error;
- if (block->submit_bio_bh_rw & REQ_PREFLUSH) {
- dev_state->last_flush_gen++;
- if ((dev_state->state->print_mask &
- BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
- pr_info("bio_end_io() new %pg flush_gen=%llu\n",
- dev_state->bdev,
- dev_state->last_flush_gen);
- }
- if (block->submit_bio_bh_rw & REQ_FUA)
- block->flush_gen = 0; /* FUA completed means block is
- * on disk */
- block->is_iodone = 1; /* for FLUSH, this releases the block */
- block = next_block;
- } while (NULL != block);
-
- bp->bi_end_io(bp);
-}
-
-static int btrfsic_process_written_superblock(
- struct btrfsic_state *state,
- struct btrfsic_block *const superblock,
- struct btrfs_super_block *const super_hdr)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- int pass;
-
- superblock->generation = btrfs_super_generation(super_hdr);
- if (!(superblock->generation > state->max_superblock_generation ||
- 0 == state->max_superblock_generation)) {
- if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
- pr_info(
- "btrfsic: superblock @%llu (%pg/%llu/%d) with old gen %llu <= %llu\n",
- superblock->logical_bytenr,
- superblock->dev_state->bdev,
- superblock->dev_bytenr, superblock->mirror_num,
- btrfs_super_generation(super_hdr),
- state->max_superblock_generation);
- } else {
- if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
- pr_info(
- "btrfsic: got new superblock @%llu (%pg/%llu/%d) with new gen %llu > %llu\n",
- superblock->logical_bytenr,
- superblock->dev_state->bdev,
- superblock->dev_bytenr, superblock->mirror_num,
- btrfs_super_generation(super_hdr),
- state->max_superblock_generation);
-
- state->max_superblock_generation =
- btrfs_super_generation(super_hdr);
- state->latest_superblock = superblock;
- }
-
- for (pass = 0; pass < 3; pass++) {
- int ret;
- u64 next_bytenr;
- struct btrfsic_block *next_block;
- struct btrfsic_block_data_ctx tmp_next_block_ctx;
- struct btrfsic_block_link *l;
- int num_copies;
- int mirror_num;
- const char *additional_string = NULL;
- struct btrfs_disk_key tmp_disk_key = {0};
-
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_ROOT_ITEM_KEY);
- btrfs_set_disk_key_objectid(&tmp_disk_key, 0);
-
- switch (pass) {
- case 0:
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_ROOT_TREE_OBJECTID);
- additional_string = "root ";
- next_bytenr = btrfs_super_root(super_hdr);
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- pr_info("root@%llu\n", next_bytenr);
- break;
- case 1:
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_CHUNK_TREE_OBJECTID);
- additional_string = "chunk ";
- next_bytenr = btrfs_super_chunk_root(super_hdr);
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- pr_info("chunk@%llu\n", next_bytenr);
- break;
- case 2:
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_TREE_LOG_OBJECTID);
- additional_string = "log ";
- next_bytenr = btrfs_super_log_root(super_hdr);
- if (0 == next_bytenr)
- continue;
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- pr_info("log@%llu\n", next_bytenr);
- break;
- }
-
- num_copies = btrfs_num_copies(fs_info, next_bytenr,
- BTRFS_SUPER_INFO_SIZE);
- if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- pr_info("num_copies(log_bytenr=%llu) = %d\n",
- next_bytenr, num_copies);
- for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
- int was_created;
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("btrfsic_process_written_superblock(mirror_num=%d)\n", mirror_num);
- ret = btrfsic_map_block(state, next_bytenr,
- BTRFS_SUPER_INFO_SIZE,
- &tmp_next_block_ctx,
- mirror_num);
- if (ret) {
- pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
- next_bytenr, mirror_num);
- return -1;
- }
-
- next_block = btrfsic_block_lookup_or_add(
- state,
- &tmp_next_block_ctx,
- additional_string,
- 1, 0, 1,
- mirror_num,
- &was_created);
- if (NULL == next_block) {
- btrfsic_release_block_ctx(&tmp_next_block_ctx);
- return -1;
- }
-
- next_block->disk_key = tmp_disk_key;
- if (was_created)
- next_block->generation =
- BTRFSIC_GENERATION_UNKNOWN;
- l = btrfsic_block_link_lookup_or_add(
- state,
- &tmp_next_block_ctx,
- next_block,
- superblock,
- BTRFSIC_GENERATION_UNKNOWN);
- btrfsic_release_block_ctx(&tmp_next_block_ctx);
- if (NULL == l)
- return -1;
- }
- }
-
- if (WARN_ON(-1 == btrfsic_check_all_ref_blocks(state, superblock, 0)))
- btrfsic_dump_tree(state);
-
- return 0;
-}
-
-static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
- struct btrfsic_block *const block,
- int recursion_level)
-{
- const struct btrfsic_block_link *l;
- int ret = 0;
-
- if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
- /*
- * Note that this situation can happen and does not
- * indicate an error in regular cases. It happens
- * when disk blocks are freed and later reused.
- * The check-integrity module is not aware of any
- * block free operations, it just recognizes block
- * write operations. Therefore it keeps the linkage
- * information for a block until a block is
- * rewritten. This can temporarily cause incorrect
- * and even circular linkage information. This
- * causes no harm unless such blocks are referenced
- * by the most recent super block.
- */
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("btrfsic: abort cyclic linkage (case 1).\n");
-
- return ret;
- }
-
- /*
- * This algorithm is recursive because the amount of used stack
- * space is very small and the max recursion depth is limited.
- */
- list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info(
- "rl=%d, %c @%llu (%pg/%llu/%d) %u* refers to %c @%llu (%pg/%llu/%d)\n",
- recursion_level,
- btrfsic_get_block_type(state, block),
- block->logical_bytenr, block->dev_state->bdev,
- block->dev_bytenr, block->mirror_num,
- l->ref_cnt,
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
- if (l->block_ref_to->never_written) {
- pr_info(
-"btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) which is never written!\n",
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
- ret = -1;
- } else if (!l->block_ref_to->is_iodone) {
- pr_info(
-"btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) which is not yet iodone!\n",
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
- ret = -1;
- } else if (l->block_ref_to->iodone_w_error) {
- pr_info(
-"btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) which has write error!\n",
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
- ret = -1;
- } else if (l->parent_generation !=
- l->block_ref_to->generation &&
- BTRFSIC_GENERATION_UNKNOWN !=
- l->parent_generation &&
- BTRFSIC_GENERATION_UNKNOWN !=
- l->block_ref_to->generation) {
- pr_info(
-"btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) with generation %llu != parent generation %llu!\n",
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num,
- l->block_ref_to->generation,
- l->parent_generation);
- ret = -1;
- } else if (l->block_ref_to->flush_gen >
- l->block_ref_to->dev_state->last_flush_gen) {
- pr_info(
-"btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) which is not flushed out of disk's write cache (block flush_gen=%llu, dev->flush_gen=%llu)!\n",
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num, block->flush_gen,
- l->block_ref_to->dev_state->last_flush_gen);
- ret = -1;
- } else if (-1 == btrfsic_check_all_ref_blocks(state,
- l->block_ref_to,
- recursion_level +
- 1)) {
- ret = -1;
- }
- }
-
- return ret;
-}
-
-static int btrfsic_is_block_ref_by_superblock(
- const struct btrfsic_state *state,
- const struct btrfsic_block *block,
- int recursion_level)
-{
- const struct btrfsic_block_link *l;
-
- if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
- /* refer to comment at "abort cyclic linkage (case 1)" */
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("btrfsic: abort cyclic linkage (case 2).\n");
-
- return 0;
- }
-
- /*
- * This algorithm is recursive because the amount of used stack space
- * is very small and the max recursion depth is limited.
- */
- list_for_each_entry(l, &block->ref_from_list, node_ref_from) {
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info(
- "rl=%d, %c @%llu (%pg/%llu/%d) is ref %u* from %c @%llu (%pg/%llu/%d)\n",
- recursion_level,
- btrfsic_get_block_type(state, block),
- block->logical_bytenr, block->dev_state->bdev,
- block->dev_bytenr, block->mirror_num,
- l->ref_cnt,
- btrfsic_get_block_type(state, l->block_ref_from),
- l->block_ref_from->logical_bytenr,
- l->block_ref_from->dev_state->bdev,
- l->block_ref_from->dev_bytenr,
- l->block_ref_from->mirror_num);
- if (l->block_ref_from->is_superblock &&
- state->latest_superblock->dev_bytenr ==
- l->block_ref_from->dev_bytenr &&
- state->latest_superblock->dev_state->bdev ==
- l->block_ref_from->dev_state->bdev)
- return 1;
- else if (btrfsic_is_block_ref_by_superblock(state,
- l->block_ref_from,
- recursion_level +
- 1))
- return 1;
- }
-
- return 0;
-}
-
-static void btrfsic_print_add_link(const struct btrfsic_state *state,
- const struct btrfsic_block_link *l)
-{
- pr_info("add %u* link from %c @%llu (%pg/%llu/%d) to %c @%llu (%pg/%llu/%d)\n",
- l->ref_cnt,
- btrfsic_get_block_type(state, l->block_ref_from),
- l->block_ref_from->logical_bytenr,
- l->block_ref_from->dev_state->bdev,
- l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num,
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev, l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
-}
-
-static void btrfsic_print_rem_link(const struct btrfsic_state *state,
- const struct btrfsic_block_link *l)
-{
- pr_info("rem %u* link from %c @%llu (%pg/%llu/%d) to %c @%llu (%pg/%llu/%d)\n",
- l->ref_cnt,
- btrfsic_get_block_type(state, l->block_ref_from),
- l->block_ref_from->logical_bytenr,
- l->block_ref_from->dev_state->bdev,
- l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num,
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev, l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
-}
-
-static char btrfsic_get_block_type(const struct btrfsic_state *state,
- const struct btrfsic_block *block)
-{
- if (block->is_superblock &&
- state->latest_superblock->dev_bytenr == block->dev_bytenr &&
- state->latest_superblock->dev_state->bdev == block->dev_state->bdev)
- return 'S';
- else if (block->is_superblock)
- return 's';
- else if (block->is_metadata)
- return 'M';
- else
- return 'D';
-}
-
-static void btrfsic_dump_tree(const struct btrfsic_state *state)
-{
- btrfsic_dump_tree_sub(state, state->latest_superblock, 0);
-}
-
-static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
- const struct btrfsic_block *block,
- int indent_level)
-{
- const struct btrfsic_block_link *l;
- int indent_add;
- static char buf[80];
- int cursor_position;
-
- /*
- * Should better fill an on-stack buffer with a complete line and
- * dump it at once when it is time to print a newline character.
- */
-
- /*
- * This algorithm is recursive because the amount of used stack space
- * is very small and the max recursion depth is limited.
- */
- indent_add = sprintf(buf, "%c-%llu(%pg/%llu/%u)",
- btrfsic_get_block_type(state, block),
- block->logical_bytenr, block->dev_state->bdev,
- block->dev_bytenr, block->mirror_num);
- if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) {
- printk("[...]\n");
- return;
- }
- printk(buf);
- indent_level += indent_add;
- if (list_empty(&block->ref_to_list)) {
- printk("\n");
- return;
- }
- if (block->mirror_num > 1 &&
- !(state->print_mask & BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS)) {
- printk(" [...]\n");
- return;
- }
-
- cursor_position = indent_level;
- list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
- while (cursor_position < indent_level) {
- printk(" ");
- cursor_position++;
- }
- if (l->ref_cnt > 1)
- indent_add = sprintf(buf, " %d*--> ", l->ref_cnt);
- else
- indent_add = sprintf(buf, " --> ");
- if (indent_level + indent_add >
- BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) {
- printk("[...]\n");
- cursor_position = 0;
- continue;
- }
-
- printk(buf);
-
- btrfsic_dump_tree_sub(state, l->block_ref_to,
- indent_level + indent_add);
- cursor_position = 0;
- }
-}
-
-static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
- struct btrfsic_state *state,
- struct btrfsic_block_data_ctx *next_block_ctx,
- struct btrfsic_block *next_block,
- struct btrfsic_block *from_block,
- u64 parent_generation)
-{
- struct btrfsic_block_link *l;
-
- l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev,
- next_block_ctx->dev_bytenr,
- from_block->dev_state->bdev,
- from_block->dev_bytenr,
- &state->block_link_hashtable);
- if (NULL == l) {
- l = btrfsic_block_link_alloc();
- if (!l)
- return NULL;
-
- l->block_ref_to = next_block;
- l->block_ref_from = from_block;
- l->ref_cnt = 1;
- l->parent_generation = parent_generation;
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- btrfsic_print_add_link(state, l);
-
- list_add(&l->node_ref_to, &from_block->ref_to_list);
- list_add(&l->node_ref_from, &next_block->ref_from_list);
-
- btrfsic_block_link_hashtable_add(l,
- &state->block_link_hashtable);
- } else {
- l->ref_cnt++;
- l->parent_generation = parent_generation;
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- btrfsic_print_add_link(state, l);
- }
-
- return l;
-}
-
-static struct btrfsic_block *btrfsic_block_lookup_or_add(
- struct btrfsic_state *state,
- struct btrfsic_block_data_ctx *block_ctx,
- const char *additional_string,
- int is_metadata,
- int is_iodone,
- int never_written,
- int mirror_num,
- int *was_created)
-{
- struct btrfsic_block *block;
-
- block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev,
- block_ctx->dev_bytenr,
- &state->block_hashtable);
- if (NULL == block) {
- struct btrfsic_dev_state *dev_state;
-
- block = btrfsic_block_alloc();
- if (!block)
- return NULL;
-
- dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev->bd_dev);
- if (NULL == dev_state) {
- pr_info("btrfsic: error, lookup dev_state failed!\n");
- btrfsic_block_free(block);
- return NULL;
- }
- block->dev_state = dev_state;
- block->dev_bytenr = block_ctx->dev_bytenr;
- block->logical_bytenr = block_ctx->start;
- block->is_metadata = is_metadata;
- block->is_iodone = is_iodone;
- block->never_written = never_written;
- block->mirror_num = mirror_num;
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("New %s%c-block @%llu (%pg/%llu/%d)\n",
- additional_string,
- btrfsic_get_block_type(state, block),
- block->logical_bytenr, dev_state->bdev,
- block->dev_bytenr, mirror_num);
- list_add(&block->all_blocks_node, &state->all_blocks_list);
- btrfsic_block_hashtable_add(block, &state->block_hashtable);
- if (NULL != was_created)
- *was_created = 1;
- } else {
- if (NULL != was_created)
- *was_created = 0;
- }
-
- return block;
-}
-
-static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
- u64 bytenr,
- struct btrfsic_dev_state *dev_state,
- u64 dev_bytenr)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- struct btrfsic_block_data_ctx block_ctx;
- int num_copies;
- int mirror_num;
- int match = 0;
- int ret;
-
- num_copies = btrfs_num_copies(fs_info, bytenr, state->metablock_size);
-
- for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
- ret = btrfsic_map_block(state, bytenr, state->metablock_size,
- &block_ctx, mirror_num);
- if (ret) {
- pr_info("btrfsic: btrfsic_map_block(logical @%llu, mirror %d) failed!\n",
- bytenr, mirror_num);
- continue;
- }
-
- if (dev_state->bdev == block_ctx.dev->bdev &&
- dev_bytenr == block_ctx.dev_bytenr) {
- match++;
- btrfsic_release_block_ctx(&block_ctx);
- break;
- }
- btrfsic_release_block_ctx(&block_ctx);
- }
-
- if (WARN_ON(!match)) {
- pr_info(
-"btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio, buffer->log_bytenr=%llu, submit_bio(bdev=%pg, phys_bytenr=%llu)!\n",
- bytenr, dev_state->bdev, dev_bytenr);
- for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
- ret = btrfsic_map_block(state, bytenr,
- state->metablock_size,
- &block_ctx, mirror_num);
- if (ret)
- continue;
-
- pr_info("read logical bytenr @%llu maps to (%pg/%llu/%d)\n",
- bytenr, block_ctx.dev->bdev,
- block_ctx.dev_bytenr, mirror_num);
- }
- }
-}
-
-static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev)
-{
- return btrfsic_dev_state_hashtable_lookup(dev,
- &btrfsic_dev_state_hashtable);
-}
-
-static void __btrfsic_submit_bio(struct bio *bio)
-{
- struct btrfsic_dev_state *dev_state;
-
- if (!btrfsic_is_initialized)
- return;
-
- mutex_lock(&btrfsic_mutex);
- /* since btrfsic_submit_bio() is also called before
- * btrfsic_mount(), this might return NULL */
- dev_state = btrfsic_dev_state_lookup(bio->bi_bdev->bd_dev);
- if (NULL != dev_state &&
- (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
- int i = 0;
- u64 dev_bytenr;
- u64 cur_bytenr;
- struct bio_vec bvec;
- struct bvec_iter iter;
- int bio_is_patched;
- char **mapped_datav;
- unsigned int segs = bio_segments(bio);
-
- dev_bytenr = 512 * bio->bi_iter.bi_sector;
- bio_is_patched = 0;
- if (dev_state->state->print_mask &
- BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
- pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
- bio_op(bio), bio->bi_opf, segs,
- bio->bi_iter.bi_sector, dev_bytenr, bio->bi_bdev);
-
- mapped_datav = kmalloc_array(segs,
- sizeof(*mapped_datav), GFP_NOFS);
- if (!mapped_datav)
- goto leave;
- cur_bytenr = dev_bytenr;
-
- bio_for_each_segment(bvec, bio, iter) {
- BUG_ON(bvec.bv_len != PAGE_SIZE);
- mapped_datav[i] = page_address(bvec.bv_page);
- i++;
-
- if (dev_state->state->print_mask &
- BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
- pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
- i, cur_bytenr, bvec.bv_len, bvec.bv_offset);
- cur_bytenr += bvec.bv_len;
- }
- btrfsic_process_written_block(dev_state, dev_bytenr,
- mapped_datav, segs,
- bio, &bio_is_patched,
- bio->bi_opf);
- kfree(mapped_datav);
- } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
- if (dev_state->state->print_mask &
- BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
- pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
- bio_op(bio), bio->bi_opf, bio->bi_bdev);
- if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
- if ((dev_state->state->print_mask &
- (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
- BTRFSIC_PRINT_MASK_VERBOSE)))
- pr_info(
-"btrfsic_submit_bio(%pg) with FLUSH but dummy block already in use (ignored)!\n",
- dev_state->bdev);
- } else {
- struct btrfsic_block *const block =
- &dev_state->dummy_block_for_bio_bh_flush;
-
- block->is_iodone = 0;
- block->never_written = 0;
- block->iodone_w_error = 0;
- block->flush_gen = dev_state->last_flush_gen + 1;
- block->submit_bio_bh_rw = bio->bi_opf;
- block->orig_bio_private = bio->bi_private;
- block->orig_bio_end_io = bio->bi_end_io;
- block->next_in_same_bio = NULL;
- bio->bi_private = block;
- bio->bi_end_io = btrfsic_bio_end_io;
- }
- }
-leave:
- mutex_unlock(&btrfsic_mutex);
-}
-
-void btrfsic_submit_bio(struct bio *bio)
-{
- __btrfsic_submit_bio(bio);
- submit_bio(bio);
-}
-
-int btrfsic_submit_bio_wait(struct bio *bio)
-{
- __btrfsic_submit_bio(bio);
- return submit_bio_wait(bio);
-}
-
-int btrfsic_mount(struct btrfs_fs_info *fs_info,
- struct btrfs_fs_devices *fs_devices,
- int including_extent_data, u32 print_mask)
-{
- int ret;
- struct btrfsic_state *state;
- struct list_head *dev_head = &fs_devices->devices;
- struct btrfs_device *device;
-
- if (!PAGE_ALIGNED(fs_info->nodesize)) {
- pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
- fs_info->nodesize, PAGE_SIZE);
- return -1;
- }
- if (!PAGE_ALIGNED(fs_info->sectorsize)) {
- pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
- fs_info->sectorsize, PAGE_SIZE);
- return -1;
- }
- state = kvzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
- return -ENOMEM;
-
- if (!btrfsic_is_initialized) {
- mutex_init(&btrfsic_mutex);
- btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable);
- btrfsic_is_initialized = 1;
- }
- mutex_lock(&btrfsic_mutex);
- state->fs_info = fs_info;
- state->print_mask = print_mask;
- state->include_extent_data = including_extent_data;
- state->metablock_size = fs_info->nodesize;
- state->datablock_size = fs_info->sectorsize;
- INIT_LIST_HEAD(&state->all_blocks_list);
- btrfsic_block_hashtable_init(&state->block_hashtable);
- btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
- state->max_superblock_generation = 0;
- state->latest_superblock = NULL;
-
- list_for_each_entry(device, dev_head, dev_list) {
- struct btrfsic_dev_state *ds;
-
- if (!device->bdev || !device->name)
- continue;
-
- ds = btrfsic_dev_state_alloc();
- if (NULL == ds) {
- mutex_unlock(&btrfsic_mutex);
- return -ENOMEM;
- }
- ds->bdev = device->bdev;
- ds->state = state;
- btrfsic_dev_state_hashtable_add(ds,
- &btrfsic_dev_state_hashtable);
- }
-
- ret = btrfsic_process_superblock(state, fs_devices);
- if (0 != ret) {
- mutex_unlock(&btrfsic_mutex);
- btrfsic_unmount(fs_devices);
- return ret;
- }
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_DATABASE)
- btrfsic_dump_database(state);
- if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_TREE)
- btrfsic_dump_tree(state);
-
- mutex_unlock(&btrfsic_mutex);
- return 0;
-}
-
-void btrfsic_unmount(struct btrfs_fs_devices *fs_devices)
-{
- struct btrfsic_block *b_all, *tmp_all;
- struct btrfsic_state *state;
- struct list_head *dev_head = &fs_devices->devices;
- struct btrfs_device *device;
-
- if (!btrfsic_is_initialized)
- return;
-
- mutex_lock(&btrfsic_mutex);
-
- state = NULL;
- list_for_each_entry(device, dev_head, dev_list) {
- struct btrfsic_dev_state *ds;
-
- if (!device->bdev || !device->name)
- continue;
-
- ds = btrfsic_dev_state_hashtable_lookup(
- device->bdev->bd_dev,
- &btrfsic_dev_state_hashtable);
- if (NULL != ds) {
- state = ds->state;
- btrfsic_dev_state_hashtable_remove(ds);
- btrfsic_dev_state_free(ds);
- }
- }
-
- if (NULL == state) {
- pr_info("btrfsic: error, cannot find state information on umount!\n");
- mutex_unlock(&btrfsic_mutex);
- return;
- }
-
- /*
- * Don't care about keeping the lists' state up to date,
- * just free all memory that was allocated dynamically.
- * Free the blocks and the block_links.
- */
- list_for_each_entry_safe(b_all, tmp_all, &state->all_blocks_list,
- all_blocks_node) {
- struct btrfsic_block_link *l, *tmp;
-
- list_for_each_entry_safe(l, tmp, &b_all->ref_to_list,
- node_ref_to) {
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- btrfsic_print_rem_link(state, l);
-
- l->ref_cnt--;
- if (0 == l->ref_cnt)
- btrfsic_block_link_free(l);
- }
-
- if (b_all->is_iodone || b_all->never_written)
- btrfsic_block_free(b_all);
- else
- pr_info(
-"btrfs: attempt to free %c-block @%llu (%pg/%llu/%d) on umount which is not yet iodone!\n",
- btrfsic_get_block_type(state, b_all),
- b_all->logical_bytenr, b_all->dev_state->bdev,
- b_all->dev_bytenr, b_all->mirror_num);
- }
-
- mutex_unlock(&btrfsic_mutex);
-
- kvfree(state);
-}
diff --git a/fs/btrfs/check-integrity.h b/fs/btrfs/check-integrity.h
deleted file mode 100644
index bcc730a06cb5..000000000000
--- a/fs/btrfs/check-integrity.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) STRATO AG 2011. All rights reserved.
- */
-
-#ifndef BTRFS_CHECK_INTEGRITY_H
-#define BTRFS_CHECK_INTEGRITY_H
-
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
-void btrfsic_submit_bio(struct bio *bio);
-int btrfsic_submit_bio_wait(struct bio *bio);
-#else
-#define btrfsic_submit_bio submit_bio
-#define btrfsic_submit_bio_wait submit_bio_wait
-#endif
-
-int btrfsic_mount(struct btrfs_fs_info *fs_info,
- struct btrfs_fs_devices *fs_devices,
- int including_extent_data, u32 print_mask);
-void btrfsic_unmount(struct btrfs_fs_devices *fs_devices);
-
-#endif
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 32da97c3c19d..6b3357287b42 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -8,6 +8,7 @@
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
+#include <linux/pagevec.h>
#include <linux/highmem.h>
#include <linux/kthread.h>
#include <linux/time.h>
@@ -15,22 +16,26 @@
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/writeback.h>
+#include <linux/psi.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
#include <linux/log2.h>
+#include <linux/shrinker.h>
#include <crypto/hash.h>
#include "misc.h"
#include "ctree.h"
-#include "disk-io.h"
-#include "transaction.h"
+#include "fs.h"
#include "btrfs_inode.h"
-#include "volumes.h"
+#include "bio.h"
#include "ordered-data.h"
#include "compression.h"
#include "extent_io.h"
#include "extent_map.h"
#include "subpage.h"
-#include "zoned.h"
+#include "messages.h"
+#include "super.h"
+
+static struct bio_set btrfs_compressed_bioset;
static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
@@ -49,6 +54,23 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type)
return NULL;
}
+static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio)
+{
+ return container_of(bbio, struct compressed_bio, bbio);
+}
+
+static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode,
+ u64 start, blk_opf_t op,
+ btrfs_bio_end_io_t end_io)
+{
+ struct btrfs_bio *bbio;
+
+ bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op,
+ GFP_NOFS, &btrfs_compressed_bioset));
+ btrfs_bio_init(bbio, inode, start, end_io, NULL);
+ return to_compressed_bio(bbio);
+}
+
bool btrfs_compress_is_valid_type(const char *str, size_t len)
{
int i;
@@ -66,20 +88,20 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len)
}
static int compression_compress_pages(int type, struct list_head *ws,
- struct address_space *mapping, u64 start, struct page **pages,
- unsigned long *out_pages, unsigned long *total_in,
- unsigned long *total_out)
+ struct btrfs_inode *inode, u64 start,
+ struct folio **folios, unsigned long *out_folios,
+ unsigned long *total_in, unsigned long *total_out)
{
switch (type) {
case BTRFS_COMPRESS_ZLIB:
- return zlib_compress_pages(ws, mapping, start, pages,
- out_pages, total_in, total_out);
+ return zlib_compress_folios(ws, inode, start, folios,
+ out_folios, total_in, total_out);
case BTRFS_COMPRESS_LZO:
- return lzo_compress_pages(ws, mapping, start, pages,
- out_pages, total_in, total_out);
+ return lzo_compress_folios(ws, inode, start, folios,
+ out_folios, total_in, total_out);
case BTRFS_COMPRESS_ZSTD:
- return zstd_compress_pages(ws, mapping, start, pages,
- out_pages, total_in, total_out);
+ return zstd_compress_folios(ws, inode, start, folios,
+ out_folios, total_in, total_out);
case BTRFS_COMPRESS_NONE:
default:
/*
@@ -91,15 +113,15 @@ static int compression_compress_pages(int type, struct list_head *ws,
* Not a big deal, just need to inform caller that we
* haven't allocated any pages yet.
*/
- *out_pages = 0;
+ *out_folios = 0;
return -E2BIG;
}
}
-static int compression_decompress_bio(int type, struct list_head *ws,
- struct compressed_bio *cb)
+static int compression_decompress_bio(struct list_head *ws,
+ struct compressed_bio *cb)
{
- switch (type) {
+ switch (cb->compress_type) {
case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
@@ -114,16 +136,16 @@ static int compression_decompress_bio(int type, struct list_head *ws,
}
static int compression_decompress(int type, struct list_head *ws,
- unsigned char *data_in, struct page *dest_page,
- unsigned long start_byte, size_t srclen, size_t destlen)
+ const u8 *data_in, struct folio *dest_folio,
+ unsigned long dest_pgoff, size_t srclen, size_t destlen)
{
switch (type) {
- case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
- start_byte, srclen, destlen);
- case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
- start_byte, srclen, destlen);
- case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
- start_byte, srclen, destlen);
+ case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_folio,
+ dest_pgoff, srclen, destlen);
+ case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_folio,
+ dest_pgoff, srclen, destlen);
+ case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_folio,
+ dest_pgoff, srclen, destlen);
case BTRFS_COMPRESS_NONE:
default:
/*
@@ -134,271 +156,165 @@ static int compression_decompress(int type, struct list_head *ws,
}
}
-static int btrfs_decompress_bio(struct compressed_bio *cb);
-
-static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
- unsigned long disk_size)
+static void btrfs_free_compressed_folios(struct compressed_bio *cb)
{
- return sizeof(struct compressed_bio) +
- (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
+ for (unsigned int i = 0; i < cb->nr_folios; i++)
+ btrfs_free_compr_folio(cb->compressed_folios[i]);
+ kfree(cb->compressed_folios);
}
-static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
- u64 disk_start)
-{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
- const u32 csum_size = fs_info->csum_size;
- const u32 sectorsize = fs_info->sectorsize;
- struct page *page;
- unsigned int i;
- char *kaddr;
- u8 csum[BTRFS_CSUM_SIZE];
- struct compressed_bio *cb = bio->bi_private;
- u8 *cb_sum = cb->sums;
-
- if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
- return 0;
-
- shash->tfm = fs_info->csum_shash;
-
- for (i = 0; i < cb->nr_pages; i++) {
- u32 pg_offset;
- u32 bytes_left = PAGE_SIZE;
- page = cb->compressed_pages[i];
-
- /* Determine the remaining bytes inside the page first */
- if (i == cb->nr_pages - 1)
- bytes_left = cb->compressed_len - i * PAGE_SIZE;
-
- /* Hash through the page sector by sector */
- for (pg_offset = 0; pg_offset < bytes_left;
- pg_offset += sectorsize) {
- kaddr = kmap_atomic(page);
- crypto_shash_digest(shash, kaddr + pg_offset,
- sectorsize, csum);
- kunmap_atomic(kaddr);
-
- if (memcmp(&csum, cb_sum, csum_size) != 0) {
- btrfs_print_data_csum_error(inode, disk_start,
- csum, cb_sum, cb->mirror_num);
- if (btrfs_bio(bio)->device)
- btrfs_dev_stat_inc_and_print(
- btrfs_bio(bio)->device,
- BTRFS_DEV_STAT_CORRUPTION_ERRS);
- return -EIO;
- }
- cb_sum += csum_size;
- disk_start += sectorsize;
- }
- }
- return 0;
-}
+static int btrfs_decompress_bio(struct compressed_bio *cb);
/*
- * Reduce bio and io accounting for a compressed_bio with its corresponding bio.
- *
- * Return true if there is no pending bio nor io.
- * Return false otherwise.
+ * Global cache of last unused pages for compression/decompression.
*/
-static bool dec_and_test_compressed_bio(struct compressed_bio *cb, struct bio *bio)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
- unsigned int bi_size = 0;
- bool last_io = false;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
-
- /*
- * At endio time, bi_iter.bi_size doesn't represent the real bio size.
- * Thus here we have to iterate through all segments to grab correct
- * bio size.
- */
- bio_for_each_segment_all(bvec, bio, iter_all)
- bi_size += bvec->bv_len;
+static struct btrfs_compr_pool {
+ struct shrinker *shrinker;
+ spinlock_t lock;
+ struct list_head list;
+ int count;
+ int thresh;
+} compr_pool;
- if (bio->bi_status)
- cb->errors = 1;
+static unsigned long btrfs_compr_pool_count(struct shrinker *sh, struct shrink_control *sc)
+{
+ int ret;
- ASSERT(bi_size && bi_size <= cb->compressed_len);
- last_io = refcount_sub_and_test(bi_size >> fs_info->sectorsize_bits,
- &cb->pending_sectors);
/*
- * Here we must wake up the possible error handler after all other
- * operations on @cb finished, or we can race with
- * finish_compressed_bio_*() which may free @cb.
+ * We must not read the values more than once if 'ret' gets expanded in
+ * the return statement so we don't accidentally return a negative
+ * number, even if the first condition finds it positive.
*/
- wake_up_var(cb);
+ ret = READ_ONCE(compr_pool.count) - READ_ONCE(compr_pool.thresh);
- return last_io;
+ return ret > 0 ? ret : 0;
}
-static void finish_compressed_bio_read(struct compressed_bio *cb, struct bio *bio)
+static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_control *sc)
{
- unsigned int index;
- struct page *page;
+ LIST_HEAD(remove);
+ struct list_head *tmp, *next;
+ int freed;
+
+ if (compr_pool.count == 0)
+ return SHRINK_STOP;
- /* Release the compressed pages */
- for (index = 0; index < cb->nr_pages; index++) {
- page = cb->compressed_pages[index];
- page->mapping = NULL;
+ /* For now, just simply drain the whole list. */
+ spin_lock(&compr_pool.lock);
+ list_splice_init(&compr_pool.list, &remove);
+ freed = compr_pool.count;
+ compr_pool.count = 0;
+ spin_unlock(&compr_pool.lock);
+
+ list_for_each_safe(tmp, next, &remove) {
+ struct page *page = list_entry(tmp, struct page, lru);
+
+ ASSERT(page_ref_count(page) == 1);
put_page(page);
}
- /* Do io completion on the original bio */
- if (cb->errors) {
- bio_io_error(cb->orig_bio);
- } else {
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ return freed;
+}
- ASSERT(bio);
- ASSERT(!bio->bi_status);
- /*
- * We have verified the checksum already, set page checked so
- * the end_io handlers know about it
- */
- ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) {
- u64 bvec_start = page_offset(bvec->bv_page) +
- bvec->bv_offset;
-
- btrfs_page_set_checked(btrfs_sb(cb->inode->i_sb),
- bvec->bv_page, bvec_start,
- bvec->bv_len);
- }
+/*
+ * Common wrappers for page allocation from compression wrappers
+ */
+struct folio *btrfs_alloc_compr_folio(struct btrfs_fs_info *fs_info)
+{
+ struct folio *folio = NULL;
+
+ /* For bs > ps cases, no cached folio pool for now. */
+ if (fs_info->block_min_order)
+ goto alloc;
- bio_endio(cb->orig_bio);
+ spin_lock(&compr_pool.lock);
+ if (compr_pool.count > 0) {
+ folio = list_first_entry(&compr_pool.list, struct folio, lru);
+ list_del_init(&folio->lru);
+ compr_pool.count--;
}
+ spin_unlock(&compr_pool.lock);
- /* Finally free the cb struct */
- kfree(cb->compressed_pages);
- kfree(cb);
+ if (folio)
+ return folio;
+
+alloc:
+ return folio_alloc(GFP_NOFS, fs_info->block_min_order);
}
-/* when we finish reading compressed pages from the disk, we
- * decompress them and then run the bio end_io routines on the
- * decompressed pages (in the inode address space).
- *
- * This allows the checksumming and other IO error handling routines
- * to work normally
- *
- * The compressed pages are freed here, and it must be run
- * in process context
- */
-static void end_compressed_bio_read(struct bio *bio)
+void btrfs_free_compr_folio(struct folio *folio)
{
- struct compressed_bio *cb = bio->bi_private;
- struct inode *inode;
- unsigned int mirror = btrfs_bio(bio)->mirror_num;
- int ret = 0;
+ bool do_free = false;
- if (!dec_and_test_compressed_bio(cb, bio))
- goto out;
+ /* The folio is from bs > ps fs, no cached pool for now. */
+ if (folio_order(folio))
+ goto free;
- /*
- * Record the correct mirror_num in cb->orig_bio so that
- * read-repair can work properly.
- */
- btrfs_bio(cb->orig_bio)->mirror_num = mirror;
- cb->mirror_num = mirror;
+ spin_lock(&compr_pool.lock);
+ if (compr_pool.count > compr_pool.thresh) {
+ do_free = true;
+ } else {
+ list_add(&folio->lru, &compr_pool.list);
+ compr_pool.count++;
+ }
+ spin_unlock(&compr_pool.lock);
- /*
- * Some IO in this cb have failed, just skip checksum as there
- * is no way it could be correct.
- */
- if (cb->errors == 1)
- goto csum_failed;
+ if (!do_free)
+ return;
- inode = cb->inode;
- ret = check_compressed_csum(BTRFS_I(inode), bio,
- bio->bi_iter.bi_sector << 9);
- if (ret)
- goto csum_failed;
+free:
+ ASSERT(folio_ref_count(folio) == 1);
+ folio_put(folio);
+}
- /* ok, we're the last bio for this extent, lets start
- * the decompression.
- */
- ret = btrfs_decompress_bio(cb);
+static void end_bbio_compressed_read(struct btrfs_bio *bbio)
+{
+ struct compressed_bio *cb = to_compressed_bio(bbio);
+ blk_status_t status = bbio->bio.bi_status;
-csum_failed:
- if (ret)
- cb->errors = 1;
- finish_compressed_bio_read(cb, bio);
-out:
- bio_put(bio);
+ if (!status)
+ status = errno_to_blk_status(btrfs_decompress_bio(cb));
+
+ btrfs_free_compressed_folios(cb);
+ btrfs_bio_end_io(cb->orig_bbio, status);
+ bio_put(&bbio->bio);
}
/*
* Clear the writeback bits on all of the file
* pages for a compressed write
*/
-static noinline void end_compressed_writeback(struct inode *inode,
- const struct compressed_bio *cb)
+static noinline void end_compressed_writeback(const struct compressed_bio *cb)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- unsigned long index = cb->start >> PAGE_SHIFT;
- unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
- struct page *pages[16];
- unsigned long nr_pages = end_index - index + 1;
+ struct inode *inode = &cb->bbio.inode->vfs_inode;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ pgoff_t index = cb->start >> PAGE_SHIFT;
+ const pgoff_t end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
+ struct folio_batch fbatch;
int i;
int ret;
- if (cb->errors)
- mapping_set_error(inode->i_mapping, -EIO);
-
- while (nr_pages > 0) {
- ret = find_get_pages_contig(inode->i_mapping, index,
- min_t(unsigned long,
- nr_pages, ARRAY_SIZE(pages)), pages);
- if (ret == 0) {
- nr_pages -= 1;
- index += 1;
- continue;
- }
- for (i = 0; i < ret; i++) {
- if (cb->errors)
- SetPageError(pages[i]);
- btrfs_page_clamp_clear_writeback(fs_info, pages[i],
- cb->start, cb->len);
- put_page(pages[i]);
- }
- nr_pages -= ret;
- index += ret;
- }
- /* the inode may be gone now */
-}
-
-static void finish_compressed_bio_write(struct compressed_bio *cb)
-{
- struct inode *inode = cb->inode;
- unsigned int index;
+ ret = blk_status_to_errno(cb->bbio.bio.bi_status);
+ if (ret)
+ mapping_set_error(inode->i_mapping, ret);
- /*
- * Ok, we're the last bio for this extent, step one is to call back
- * into the FS and do all the end_io operations.
- */
- btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
- cb->start, cb->start + cb->len - 1,
- !cb->errors);
+ folio_batch_init(&fbatch);
+ while (index <= end_index) {
+ ret = filemap_get_folios(inode->i_mapping, &index, end_index,
+ &fbatch);
- end_compressed_writeback(inode, cb);
- /* Note, our inode could be gone now */
+ if (ret == 0)
+ return;
- /*
- * Release the compressed pages, these came from alloc_page and
- * are not attached to the inode at all
- */
- for (index = 0; index < cb->nr_pages; index++) {
- struct page *page = cb->compressed_pages[index];
+ for (i = 0; i < ret; i++) {
+ struct folio *folio = fbatch.folios[i];
- page->mapping = NULL;
- put_page(page);
+ btrfs_folio_clamp_clear_writeback(fs_info, folio,
+ cb->start, cb->len);
+ }
+ folio_batch_release(&fbatch);
}
-
- /* Finally free the cb struct */
- kfree(cb->compressed_pages);
- kfree(cb);
+ /* the inode may be gone now */
}
/*
@@ -408,86 +324,37 @@ static void finish_compressed_bio_write(struct compressed_bio *cb)
* This also calls the writeback end hooks for the file pages so that metadata
* and checksums can be updated in the file.
*/
-static void end_compressed_bio_write(struct bio *bio)
+static void end_bbio_compressed_write(struct btrfs_bio *bbio)
{
- struct compressed_bio *cb = bio->bi_private;
-
- if (!dec_and_test_compressed_bio(cb, bio))
- goto out;
-
- btrfs_record_physical_zoned(cb->inode, cb->start, bio);
-
- finish_compressed_bio_write(cb);
-out:
- bio_put(bio);
-}
+ struct compressed_bio *cb = to_compressed_bio(bbio);
-static blk_status_t submit_compressed_bio(struct btrfs_fs_info *fs_info,
- struct compressed_bio *cb,
- struct bio *bio, int mirror_num)
-{
- blk_status_t ret;
+ btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
+ cb->bbio.bio.bi_status == BLK_STS_OK);
- ASSERT(bio->bi_iter.bi_size);
- ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
- if (ret)
- return ret;
- ret = btrfs_map_bio(fs_info, bio, mirror_num);
- return ret;
+ if (cb->writeback)
+ end_compressed_writeback(cb);
+ /* Note, our inode could be gone now. */
+ btrfs_free_compressed_folios(cb);
+ bio_put(&cb->bbio.bio);
}
-/*
- * Allocate a compressed_bio, which will be used to read/write on-disk
- * (aka, compressed) * data.
- *
- * @cb: The compressed_bio structure, which records all the needed
- * information to bind the compressed data to the uncompressed
- * page cache.
- * @disk_byten: The logical bytenr where the compressed data will be read
- * from or written to.
- * @endio_func: The endio function to call after the IO for compressed data
- * is finished.
- * @next_stripe_start: Return value of logical bytenr of where next stripe starts.
- * Let the caller know to only fill the bio up to the stripe
- * boundary.
- */
-
-
-static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr,
- unsigned int opf, bio_end_io_t endio_func,
- u64 *next_stripe_start)
+static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
- struct btrfs_io_geometry geom;
- struct extent_map *em;
- struct bio *bio;
- int ret;
-
- bio = btrfs_bio_alloc(BIO_MAX_VECS);
-
- bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
- bio->bi_opf = opf;
- bio->bi_private = cb;
- bio->bi_end_io = endio_func;
-
- em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize);
- if (IS_ERR(em)) {
- bio_put(bio);
- return ERR_CAST(em);
- }
-
- if (bio_op(bio) == REQ_OP_ZONE_APPEND)
- bio_set_dev(bio, em->map_lookup->stripes[0].dev->bdev);
-
- ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), disk_bytenr, &geom);
- free_extent_map(em);
- if (ret < 0) {
- bio_put(bio);
- return ERR_PTR(ret);
+ struct bio *bio = &cb->bbio.bio;
+ u32 offset = 0;
+ unsigned int findex = 0;
+
+ while (offset < cb->compressed_len) {
+ struct folio *folio = cb->compressed_folios[findex];
+ u32 len = min_t(u32, cb->compressed_len - offset, folio_size(folio));
+ int ret;
+
+ /* Maximum compressed extent is smaller than bio size limit. */
+ ret = bio_add_folio(bio, folio, len, 0);
+ ASSERT(ret);
+ offset += len;
+ findex++;
}
- *next_stripe_start = disk_bytenr + geom.len;
-
- return bio;
}
/*
@@ -499,141 +366,33 @@ static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_byte
* This also checksums the file bytes and gets things ready for
* the end io hooks.
*/
-blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
- unsigned int len, u64 disk_start,
- unsigned int compressed_len,
- struct page **compressed_pages,
- unsigned int nr_pages,
- unsigned int write_flags,
- struct cgroup_subsys_state *blkcg_css)
+void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
+ struct folio **compressed_folios,
+ unsigned int nr_folios,
+ blk_opf_t write_flags,
+ bool writeback)
{
+ struct btrfs_inode *inode = ordered->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct bio *bio = NULL;
struct compressed_bio *cb;
- u64 cur_disk_bytenr = disk_start;
- u64 next_stripe_start;
- blk_status_t ret;
- int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
- const bool use_append = btrfs_use_zone_append(inode, disk_start);
- const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
-
- ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
- IS_ALIGNED(len, fs_info->sectorsize));
- cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
- if (!cb)
- return BLK_STS_RESOURCE;
- refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
- cb->errors = 0;
- cb->inode = &inode->vfs_inode;
- cb->start = start;
- cb->len = len;
- cb->mirror_num = 0;
- cb->compressed_pages = compressed_pages;
- cb->compressed_len = compressed_len;
- cb->orig_bio = NULL;
- cb->nr_pages = nr_pages;
-
- while (cur_disk_bytenr < disk_start + compressed_len) {
- u64 offset = cur_disk_bytenr - disk_start;
- unsigned int index = offset >> PAGE_SHIFT;
- unsigned int real_size;
- unsigned int added;
- struct page *page = compressed_pages[index];
- bool submit = false;
-
- /* Allocate new bio if submitted or not yet allocated */
- if (!bio) {
- bio = alloc_compressed_bio(cb, cur_disk_bytenr,
- bio_op | write_flags, end_compressed_bio_write,
- &next_stripe_start);
- if (IS_ERR(bio)) {
- ret = errno_to_blk_status(PTR_ERR(bio));
- bio = NULL;
- goto finish_cb;
- }
- }
- /*
- * We should never reach next_stripe_start start as we will
- * submit comp_bio when reach the boundary immediately.
- */
- ASSERT(cur_disk_bytenr != next_stripe_start);
- /*
- * We have various limits on the real read size:
- * - stripe boundary
- * - page boundary
- * - compressed length boundary
- */
- real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_bytenr);
- real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
- real_size = min_t(u64, real_size, compressed_len - offset);
- ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
-
- if (use_append)
- added = bio_add_zone_append_page(bio, page, real_size,
- offset_in_page(offset));
- else
- added = bio_add_page(bio, page, real_size,
- offset_in_page(offset));
- /* Reached zoned boundary */
- if (added == 0)
- submit = true;
-
- cur_disk_bytenr += added;
- /* Reached stripe boundary */
- if (cur_disk_bytenr == next_stripe_start)
- submit = true;
-
- /* Finished the range */
- if (cur_disk_bytenr == disk_start + compressed_len)
- submit = true;
-
- if (submit) {
- if (!skip_sum) {
- ret = btrfs_csum_one_bio(inode, bio, start, 1);
- if (ret)
- goto finish_cb;
- }
-
- ret = submit_compressed_bio(fs_info, cb, bio, 0);
- if (ret)
- goto finish_cb;
- bio = NULL;
- }
- cond_resched();
- }
- if (blkcg_css)
- kthread_associate_blkcg(NULL);
-
- return 0;
-
-finish_cb:
- if (bio) {
- bio->bi_status = ret;
- bio_endio(bio);
- }
- /* Last byte of @cb is submitted, endio will free @cb */
- if (cur_disk_bytenr == disk_start + compressed_len)
- return ret;
-
- wait_var_event(cb, refcount_read(&cb->pending_sectors) ==
- (disk_start + compressed_len - cur_disk_bytenr) >>
- fs_info->sectorsize_bits);
- /*
- * Even with previous bio ended, we should still have io not yet
- * submitted, thus need to finish manually.
- */
- ASSERT(refcount_read(&cb->pending_sectors));
- /* Now we are the only one referring @cb, can finish it safely. */
- finish_compressed_bio_write(cb);
- return ret;
-}
-
-static u64 bio_end_offset(struct bio *bio)
-{
- struct bio_vec *last = bio_last_bvec_all(bio);
-
- return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
+ ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize));
+ ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize));
+
+ cb = alloc_compressed_bio(inode, ordered->file_offset,
+ REQ_OP_WRITE | write_flags,
+ end_bbio_compressed_write);
+ cb->start = ordered->file_offset;
+ cb->len = ordered->num_bytes;
+ cb->compressed_folios = compressed_folios;
+ cb->compressed_len = ordered->disk_num_bytes;
+ cb->writeback = writeback;
+ cb->nr_folios = nr_folios;
+ cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
+ cb->bbio.ordered = ordered;
+ btrfs_add_compressed_bio_folios(cb);
+
+ btrfs_submit_bbio(&cb->bbio, 0);
}
/*
@@ -649,14 +408,16 @@ static u64 bio_end_offset(struct bio *bio)
*/
static noinline int add_ra_bio_pages(struct inode *inode,
u64 compressed_end,
- struct compressed_bio *cb)
+ struct compressed_bio *cb,
+ int *memstall, unsigned long *pflags)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- unsigned long end_index;
- u64 cur = bio_end_offset(cb->orig_bio);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ pgoff_t end_index;
+ struct bio *orig_bio = &cb->orig_bbio->bio;
+ u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
u64 isize = i_size_read(inode);
int ret;
- struct page *page;
+ struct folio *folio;
struct extent_map *em;
struct address_space *mapping = inode->i_mapping;
struct extent_map_tree *em_tree;
@@ -676,22 +437,30 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* This makes readahead less effective, so here disable readahead for
* subpage for now, until full compressed write is supported.
*/
- if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE)
+ if (fs_info->sectorsize < PAGE_SIZE)
+ return 0;
+
+ /* For bs > ps cases, we don't support readahead for compressed folios for now. */
+ if (fs_info->block_min_order)
return 0;
end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
while (cur < compressed_end) {
- u64 page_end;
- u64 pg_index = cur >> PAGE_SHIFT;
+ pgoff_t page_end;
+ pgoff_t pg_index = cur >> PAGE_SHIFT;
u32 add_size;
if (pg_index > end_index)
break;
- page = xa_load(&mapping->i_pages, pg_index);
- if (page && !xa_is_value(page)) {
- sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
+ folio = filemap_get_folio(mapping, pg_index);
+ if (!IS_ERR(folio)) {
+ u64 folio_sz = folio_size(folio);
+ u64 offset = offset_in_folio(folio, cur);
+
+ folio_put(folio);
+ sectors_missed += (folio_sz - offset) >>
fs_info->sectorsize_bits;
/* Beyond threshold, no need to continue */
@@ -702,33 +471,38 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* Jump to next page start as we already have page for
* current offset.
*/
- cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
+ cur += (folio_sz - offset);
continue;
}
- page = __page_cache_alloc(mapping_gfp_constraint(mapping,
- ~__GFP_FS));
- if (!page)
+ folio = filemap_alloc_folio(mapping_gfp_constraint(mapping, ~__GFP_FS),
+ 0, NULL);
+ if (!folio)
break;
- if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
- put_page(page);
+ if (filemap_add_folio(mapping, folio, pg_index, GFP_NOFS)) {
/* There is already a page, skip to page end */
- cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
+ cur += folio_size(folio);
+ folio_put(folio);
continue;
}
- ret = set_page_extent_mapped(page);
+ if (!*memstall && folio_test_workingset(folio)) {
+ psi_memstall_enter(pflags);
+ *memstall = 1;
+ }
+
+ ret = set_folio_extent_mapped(folio);
if (ret < 0) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
break;
}
- page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
- lock_extent(tree, cur, page_end);
+ page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1;
+ btrfs_lock_extent(tree, cur, page_end, NULL);
read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
+ em = btrfs_lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
read_unlock(&em_tree->lock);
/*
@@ -737,33 +511,33 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* to this compressed extent on disk.
*/
if (!em || cur < em->start ||
- (cur + fs_info->sectorsize > extent_map_end(em)) ||
- (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
- free_extent_map(em);
- unlock_extent(tree, cur, page_end);
- unlock_page(page);
- put_page(page);
+ (cur + fs_info->sectorsize > btrfs_extent_map_end(em)) ||
+ (btrfs_extent_map_block_start(em) >> SECTOR_SHIFT) !=
+ orig_bio->bi_iter.bi_sector) {
+ btrfs_free_extent_map(em);
+ btrfs_unlock_extent(tree, cur, page_end, NULL);
+ folio_unlock(folio);
+ folio_put(folio);
break;
}
- free_extent_map(em);
+ add_size = min(em->start + em->len, page_end + 1) - cur;
+ btrfs_free_extent_map(em);
+ btrfs_unlock_extent(tree, cur, page_end, NULL);
- if (page->index == end_index) {
- size_t zero_offset = offset_in_page(isize);
+ if (folio_contains(folio, end_index)) {
+ size_t zero_offset = offset_in_folio(folio, isize);
if (zero_offset) {
int zeros;
- zeros = PAGE_SIZE - zero_offset;
- memzero_page(page, zero_offset, zeros);
- flush_dcache_page(page);
+ zeros = folio_size(folio) - zero_offset;
+ folio_zero_range(folio, zero_offset, zeros);
}
}
- add_size = min(em->start + em->len, page_end + 1) - cur;
- ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur));
- if (ret != add_size) {
- unlock_extent(tree, cur, page_end);
- unlock_page(page);
- put_page(page);
+ if (!bio_add_folio(orig_bio, folio, add_size,
+ offset_in_folio(folio, cur))) {
+ folio_unlock(folio);
+ folio_put(folio);
break;
}
/*
@@ -772,8 +546,8 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* subpage::readers and to unlock the page.
*/
if (fs_info->sectorsize < PAGE_SIZE)
- btrfs_subpage_start_reader(fs_info, page, cur, add_size);
- put_page(page);
+ btrfs_folio_set_lock(fs_info, folio, cur, add_size);
+ folio_put(folio);
cur += add_size;
}
return 0;
@@ -790,187 +564,83 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* After the compressed pages are read, we copy the bytes into the
* bio we were passed and then call the bio end_io calls
*/
-blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
- int mirror_num, unsigned long bio_flags)
+void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct extent_map_tree *em_tree;
+ struct btrfs_inode *inode = bbio->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct compressed_bio *cb;
unsigned int compressed_len;
- unsigned int nr_pages;
- unsigned int pg_index;
- struct bio *comp_bio = NULL;
- const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
- u64 cur_disk_byte = disk_bytenr;
- u64 next_stripe_start;
- u64 file_offset;
+ u64 file_offset = bbio->file_offset;
u64 em_len;
u64 em_start;
struct extent_map *em;
- blk_status_t ret = BLK_STS_RESOURCE;
- int faili = 0;
- u8 *sums;
-
- em_tree = &BTRFS_I(inode)->extent_tree;
-
- file_offset = bio_first_bvec_all(bio)->bv_offset +
- page_offset(bio_first_page_all(bio));
+ unsigned long pflags;
+ int memstall = 0;
+ blk_status_t status;
+ int ret;
/* we need the actual starting offset of this extent in the file */
read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
+ em = btrfs_lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
read_unlock(&em_tree->lock);
- if (!em)
- return BLK_STS_IOERR;
-
- ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
- compressed_len = em->block_len;
- cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
- if (!cb)
+ if (!em) {
+ status = BLK_STS_IOERR;
goto out;
+ }
- refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
- cb->errors = 0;
- cb->inode = inode;
- cb->mirror_num = mirror_num;
- sums = cb->sums;
+ ASSERT(btrfs_extent_map_is_compressed(em));
+ compressed_len = em->disk_num_bytes;
- cb->start = em->orig_start;
+ cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ,
+ end_bbio_compressed_read);
+
+ cb->start = em->start - em->offset;
em_len = em->len;
em_start = em->start;
- free_extent_map(em);
- em = NULL;
-
- cb->len = bio->bi_iter.bi_size;
+ cb->len = bbio->bio.bi_iter.bi_size;
cb->compressed_len = compressed_len;
- cb->compress_type = extent_compress_type(bio_flags);
- cb->orig_bio = bio;
-
- nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
- cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
- GFP_NOFS);
- if (!cb->compressed_pages)
- goto fail1;
-
- for (pg_index = 0; pg_index < nr_pages; pg_index++) {
- cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS);
- if (!cb->compressed_pages[pg_index]) {
- faili = pg_index - 1;
- ret = BLK_STS_RESOURCE;
- goto fail2;
- }
- }
- faili = nr_pages - 1;
- cb->nr_pages = nr_pages;
+ cb->compress_type = btrfs_extent_map_compression(em);
+ cb->orig_bbio = bbio;
+ cb->bbio.csum_search_commit_root = bbio->csum_search_commit_root;
- add_ra_bio_pages(inode, em_start + em_len, cb);
+ btrfs_free_extent_map(em);
- /* include any pages we added in add_ra-bio_pages */
- cb->len = bio->bi_iter.bi_size;
-
- while (cur_disk_byte < disk_bytenr + compressed_len) {
- u64 offset = cur_disk_byte - disk_bytenr;
- unsigned int index = offset >> PAGE_SHIFT;
- unsigned int real_size;
- unsigned int added;
- struct page *page = cb->compressed_pages[index];
- bool submit = false;
-
- /* Allocate new bio if submitted or not yet allocated */
- if (!comp_bio) {
- comp_bio = alloc_compressed_bio(cb, cur_disk_byte,
- REQ_OP_READ, end_compressed_bio_read,
- &next_stripe_start);
- if (IS_ERR(comp_bio)) {
- ret = errno_to_blk_status(PTR_ERR(comp_bio));
- comp_bio = NULL;
- goto finish_cb;
- }
- }
- /*
- * We should never reach next_stripe_start start as we will
- * submit comp_bio when reach the boundary immediately.
- */
- ASSERT(cur_disk_byte != next_stripe_start);
- /*
- * We have various limit on the real read size:
- * - stripe boundary
- * - page boundary
- * - compressed length boundary
- */
- real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_byte);
- real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
- real_size = min_t(u64, real_size, compressed_len - offset);
- ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
-
- added = bio_add_page(comp_bio, page, real_size, offset_in_page(offset));
- /*
- * Maximum compressed extent is smaller than bio size limit,
- * thus bio_add_page() should always success.
- */
- ASSERT(added == real_size);
- cur_disk_byte += added;
-
- /* Reached stripe boundary, need to submit */
- if (cur_disk_byte == next_stripe_start)
- submit = true;
-
- /* Has finished the range, need to submit */
- if (cur_disk_byte == disk_bytenr + compressed_len)
- submit = true;
+ cb->nr_folios = DIV_ROUND_UP(compressed_len, btrfs_min_folio_size(fs_info));
+ cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct folio *), GFP_NOFS);
+ if (!cb->compressed_folios) {
+ status = BLK_STS_RESOURCE;
+ goto out_free_bio;
+ }
- if (submit) {
- unsigned int nr_sectors;
+ ret = btrfs_alloc_folio_array(cb->nr_folios, fs_info->block_min_order,
+ cb->compressed_folios);
+ if (ret) {
+ status = BLK_STS_RESOURCE;
+ goto out_free_compressed_pages;
+ }
- ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
- if (ret)
- goto finish_cb;
+ add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall,
+ &pflags);
- nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
- fs_info->sectorsize);
- sums += fs_info->csum_size * nr_sectors;
+ /* include any pages we added in add_ra-bio_pages */
+ cb->len = bbio->bio.bi_iter.bi_size;
+ cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
+ btrfs_add_compressed_bio_folios(cb);
- ret = submit_compressed_bio(fs_info, cb, comp_bio, mirror_num);
- if (ret)
- goto finish_cb;
- comp_bio = NULL;
- }
- }
- return 0;
+ if (memstall)
+ psi_memstall_leave(&pflags);
-fail2:
- while (faili >= 0) {
- __free_page(cb->compressed_pages[faili]);
- faili--;
- }
+ btrfs_submit_bbio(&cb->bbio, 0);
+ return;
- kfree(cb->compressed_pages);
-fail1:
- kfree(cb);
+out_free_compressed_pages:
+ kfree(cb->compressed_folios);
+out_free_bio:
+ bio_put(&cb->bbio.bio);
out:
- free_extent_map(em);
- return ret;
-finish_cb:
- if (comp_bio) {
- comp_bio->bi_status = ret;
- bio_endio(comp_bio);
- }
- /* All bytes of @cb is submitted, endio will free @cb */
- if (cur_disk_byte == disk_bytenr + compressed_len)
- return ret;
-
- wait_var_event(cb, refcount_read(&cb->pending_sectors) ==
- (disk_bytenr + compressed_len - cur_disk_byte) >>
- fs_info->sectorsize_bits);
- /*
- * Even with previous bio ended, we should still have io not yet
- * submitted, thus need to finish @cb manually.
- */
- ASSERT(refcount_read(&cb->pending_sectors));
- /* Now we are the only one referring @cb, can finish it safely. */
- finish_compressed_bio_read(cb, NULL);
- return ret;
+ btrfs_bio_end_io(bbio, status);
}
/*
@@ -1020,8 +690,6 @@ struct heuristic_ws {
struct list_head list;
};
-static struct workspace_manager heuristic_wsm;
-
static void free_heuristic_ws(struct list_head *ws)
{
struct heuristic_ws *workspace;
@@ -1034,7 +702,7 @@ static void free_heuristic_ws(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *alloc_heuristic_ws(unsigned int level)
+static struct list_head *alloc_heuristic_ws(struct btrfs_fs_info *fs_info)
{
struct heuristic_ws *ws;
@@ -1061,11 +729,9 @@ fail:
return ERR_PTR(-ENOMEM);
}
-const struct btrfs_compress_op btrfs_heuristic_compress = {
- .workspace_manager = &heuristic_wsm,
-};
+const struct btrfs_compress_levels btrfs_heuristic_compress = { 0 };
-static const struct btrfs_compress_op * const btrfs_compress_op[] = {
+static const struct btrfs_compress_levels * const btrfs_compress_levels[] = {
/* The heuristic is represented as compression type 0 */
&btrfs_heuristic_compress,
&btrfs_zlib_compress,
@@ -1073,13 +739,13 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zstd_compress,
};
-static struct list_head *alloc_workspace(int type, unsigned int level)
+static struct list_head *alloc_workspace(struct btrfs_fs_info *fs_info, int type, int level)
{
switch (type) {
- case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
- case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
- case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
- case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
+ case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(fs_info);
+ case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(fs_info, level);
+ case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(fs_info);
+ case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(fs_info, level);
default:
/*
* This can't happen, the type is validated several times
@@ -1105,44 +771,58 @@ static void free_workspace(int type, struct list_head *ws)
}
}
-static void btrfs_init_workspace_manager(int type)
+static int alloc_workspace_manager(struct btrfs_fs_info *fs_info,
+ enum btrfs_compression_type type)
{
- struct workspace_manager *wsm;
+ struct workspace_manager *gwsm;
struct list_head *workspace;
- wsm = btrfs_compress_op[type]->workspace_manager;
- INIT_LIST_HEAD(&wsm->idle_ws);
- spin_lock_init(&wsm->ws_lock);
- atomic_set(&wsm->total_ws, 0);
- init_waitqueue_head(&wsm->ws_wait);
+ ASSERT(fs_info->compr_wsm[type] == NULL);
+ gwsm = kzalloc(sizeof(*gwsm), GFP_KERNEL);
+ if (!gwsm)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&gwsm->idle_ws);
+ spin_lock_init(&gwsm->ws_lock);
+ atomic_set(&gwsm->total_ws, 0);
+ init_waitqueue_head(&gwsm->ws_wait);
+ fs_info->compr_wsm[type] = gwsm;
/*
* Preallocate one workspace for each compression type so we can
* guarantee forward progress in the worst case
*/
- workspace = alloc_workspace(type, 0);
+ workspace = alloc_workspace(fs_info, type, 0);
if (IS_ERR(workspace)) {
- pr_warn(
- "BTRFS: cannot preallocate compression workspace, will try later\n");
+ btrfs_warn(fs_info,
+ "cannot preallocate compression workspace for %s, will try later",
+ btrfs_compress_type2str(type));
} else {
- atomic_set(&wsm->total_ws, 1);
- wsm->free_ws = 1;
- list_add(workspace, &wsm->idle_ws);
+ atomic_set(&gwsm->total_ws, 1);
+ gwsm->free_ws = 1;
+ list_add(workspace, &gwsm->idle_ws);
}
+ return 0;
}
-static void btrfs_cleanup_workspace_manager(int type)
+static void free_workspace_manager(struct btrfs_fs_info *fs_info,
+ enum btrfs_compression_type type)
{
- struct workspace_manager *wsman;
struct list_head *ws;
-
- wsman = btrfs_compress_op[type]->workspace_manager;
- while (!list_empty(&wsman->idle_ws)) {
- ws = wsman->idle_ws.next;
+ struct workspace_manager *gwsm = fs_info->compr_wsm[type];
+
+ /* ZSTD uses its own workspace manager, should enter here. */
+ ASSERT(type != BTRFS_COMPRESS_ZSTD && type < BTRFS_NR_COMPRESS_TYPES);
+ if (!gwsm)
+ return;
+ fs_info->compr_wsm[type] = NULL;
+ while (!list_empty(&gwsm->idle_ws)) {
+ ws = gwsm->idle_ws.next;
list_del(ws);
free_workspace(type, ws);
- atomic_dec(&wsman->total_ws);
+ atomic_dec(&gwsm->total_ws);
}
+ kfree(gwsm);
}
/*
@@ -1151,9 +831,9 @@ static void btrfs_cleanup_workspace_manager(int type)
* Preallocation makes a forward progress guarantees and we do not return
* errors.
*/
-struct list_head *btrfs_get_workspace(int type, unsigned int level)
+struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, int level)
{
- struct workspace_manager *wsm;
+ struct workspace_manager *wsm = fs_info->compr_wsm[type];
struct list_head *workspace;
int cpus = num_online_cpus();
unsigned nofs_flag;
@@ -1163,7 +843,7 @@ struct list_head *btrfs_get_workspace(int type, unsigned int level)
wait_queue_head_t *ws_wait;
int *free_ws;
- wsm = btrfs_compress_op[type]->workspace_manager;
+ ASSERT(wsm);
idle_ws = &wsm->idle_ws;
ws_lock = &wsm->ws_lock;
total_ws = &wsm->total_ws;
@@ -1199,7 +879,7 @@ again:
* context of btrfs_compress_bio/btrfs_compress_pages
*/
nofs_flag = memalloc_nofs_save();
- workspace = alloc_workspace(type, level);
+ workspace = alloc_workspace(fs_info, type, level);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) {
@@ -1221,22 +901,22 @@ again:
/* once per minute */ 60 * HZ,
/* no burst */ 1);
- if (__ratelimit(&_rs)) {
- pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
- }
+ if (__ratelimit(&_rs))
+ btrfs_warn(fs_info,
+ "no compression workspaces, low memory, retrying");
}
goto again;
}
return workspace;
}
-static struct list_head *get_workspace(int type, int level)
+static struct list_head *get_workspace(struct btrfs_fs_info *fs_info, int type, int level)
{
switch (type) {
- case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
- case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
- case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level);
- case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
+ case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(fs_info, type, level);
+ case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(fs_info, level);
+ case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(fs_info, type, level);
+ case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(fs_info, level);
default:
/*
* This can't happen, the type is validated several times
@@ -1250,21 +930,21 @@ static struct list_head *get_workspace(int type, int level)
* put a workspace struct back on the list or free it if we have enough
* idle ones sitting around
*/
-void btrfs_put_workspace(int type, struct list_head *ws)
+void btrfs_put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_head *ws)
{
- struct workspace_manager *wsm;
+ struct workspace_manager *gwsm = fs_info->compr_wsm[type];
struct list_head *idle_ws;
spinlock_t *ws_lock;
atomic_t *total_ws;
wait_queue_head_t *ws_wait;
int *free_ws;
- wsm = btrfs_compress_op[type]->workspace_manager;
- idle_ws = &wsm->idle_ws;
- ws_lock = &wsm->ws_lock;
- total_ws = &wsm->total_ws;
- ws_wait = &wsm->ws_wait;
- free_ws = &wsm->free_ws;
+ ASSERT(gwsm);
+ idle_ws = &gwsm->idle_ws;
+ ws_lock = &gwsm->ws_lock;
+ total_ws = &gwsm->total_ws;
+ ws_wait = &gwsm->ws_wait;
+ free_ws = &gwsm->free_ws;
spin_lock(ws_lock);
if (*free_ws <= num_online_cpus()) {
@@ -1281,13 +961,13 @@ wake:
cond_wake_up(ws_wait);
}
-static void put_workspace(int type, struct list_head *ws)
+static void put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_head *ws)
{
switch (type) {
- case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
- case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
- case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
- case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
+ case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(fs_info, type, ws);
+ case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(fs_info, type, ws);
+ case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(fs_info, type, ws);
+ case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(fs_info, ws);
default:
/*
* This can't happen, the type is validated several times
@@ -1301,19 +981,52 @@ static void put_workspace(int type, struct list_head *ws)
* Adjust @level according to the limits of the compression algorithm or
* fallback to default
*/
-static unsigned int btrfs_compress_set_level(int type, unsigned level)
+static int btrfs_compress_set_level(unsigned int type, int level)
{
- const struct btrfs_compress_op *ops = btrfs_compress_op[type];
+ const struct btrfs_compress_levels *levels = btrfs_compress_levels[type];
if (level == 0)
- level = ops->default_level;
+ level = levels->default_level;
else
- level = min(level, ops->max_level);
+ level = clamp(level, levels->min_level, levels->max_level);
return level;
}
/*
+ * Check whether the @level is within the valid range for the given type.
+ */
+bool btrfs_compress_level_valid(unsigned int type, int level)
+{
+ const struct btrfs_compress_levels *levels = btrfs_compress_levels[type];
+
+ return levels->min_level <= level && level <= levels->max_level;
+}
+
+/* Wrapper around find_get_page(), with extra error message. */
+int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
+ struct folio **in_folio_ret)
+{
+ struct folio *in_folio;
+
+ /*
+ * The compressed write path should have the folio locked already, thus
+ * we only need to grab one reference.
+ */
+ in_folio = filemap_get_folio(mapping, start >> PAGE_SHIFT);
+ if (IS_ERR(in_folio)) {
+ struct btrfs_inode *inode = BTRFS_I(mapping->host);
+
+ btrfs_crit(inode->root->fs_info,
+ "failed to get page cache, root %lld ino %llu file offset %llu",
+ btrfs_root_id(inode->root), btrfs_ino(inode), start);
+ return -ENOENT;
+ }
+ *in_folio_ret = in_folio;
+ return 0;
+}
+
+/*
* Given an address space and start and length, compress the bytes into @pages
* that are allocated on demand.
*
@@ -1322,82 +1035,159 @@ static unsigned int btrfs_compress_set_level(int type, unsigned level)
* - compression algo are 0-3
* - the level are bits 4-7
*
- * @out_pages is an in/out parameter, holds maximum number of pages to allocate
- * and returns number of actually allocated pages
+ * @out_folios is an in/out parameter, holds maximum number of folios to allocate
+ * and returns number of actually allocated folios
*
* @total_in is used to return the number of bytes actually read. It
* may be smaller than the input length if we had to exit early because we
- * ran out of room in the pages array or because we cross the
+ * ran out of room in the folios array or because we cross the
* max_out threshold.
*
* @total_out is an in/out parameter, must be set to the input length and will
* be also used to return the total number of compressed bytes
*/
-int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
- u64 start, struct page **pages,
- unsigned long *out_pages,
- unsigned long *total_in,
- unsigned long *total_out)
+int btrfs_compress_folios(unsigned int type, int level, struct btrfs_inode *inode,
+ u64 start, struct folio **folios, unsigned long *out_folios,
+ unsigned long *total_in, unsigned long *total_out)
{
- int type = btrfs_compress_type(type_level);
- int level = btrfs_compress_level(type_level);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ const unsigned long orig_len = *total_out;
struct list_head *workspace;
int ret;
level = btrfs_compress_set_level(type, level);
- workspace = get_workspace(type, level);
- ret = compression_compress_pages(type, workspace, mapping, start, pages,
- out_pages, total_in, total_out);
- put_workspace(type, workspace);
+ workspace = get_workspace(fs_info, type, level);
+ ret = compression_compress_pages(type, workspace, inode, start, folios,
+ out_folios, total_in, total_out);
+ /* The total read-in bytes should be no larger than the input. */
+ ASSERT(*total_in <= orig_len);
+ put_workspace(fs_info, type, workspace);
return ret;
}
static int btrfs_decompress_bio(struct compressed_bio *cb)
{
+ struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
struct list_head *workspace;
int ret;
int type = cb->compress_type;
- workspace = get_workspace(type, 0);
- ret = compression_decompress_bio(type, workspace, cb);
- put_workspace(type, workspace);
+ workspace = get_workspace(fs_info, type, 0);
+ ret = compression_decompress_bio(workspace, cb);
+ put_workspace(fs_info, type, workspace);
+ if (!ret)
+ zero_fill_bio(&cb->orig_bbio->bio);
return ret;
}
/*
* a less complex decompression routine. Our compressed data fits in a
* single page, and we want to read a single page out of it.
- * start_byte tells us the offset into the compressed data we're interested in
+ * dest_pgoff tells us the offset into the destination folio where we write the
+ * decompressed data.
*/
-int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
- unsigned long start_byte, size_t srclen, size_t destlen)
+int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
+ unsigned long dest_pgoff, size_t srclen, size_t destlen)
{
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(dest_folio);
struct list_head *workspace;
+ const u32 sectorsize = fs_info->sectorsize;
int ret;
- workspace = get_workspace(type, 0);
- ret = compression_decompress(type, workspace, data_in, dest_page,
- start_byte, srclen, destlen);
- put_workspace(type, workspace);
+ /*
+ * The full destination folio range should not exceed the folio size.
+ * And the @destlen should not exceed sectorsize, as this is only called for
+ * inline file extents, which should not exceed sectorsize.
+ */
+ ASSERT(dest_pgoff + destlen <= folio_size(dest_folio) && destlen <= sectorsize);
+
+ workspace = get_workspace(fs_info, type, 0);
+ ret = compression_decompress(type, workspace, data_in, dest_folio,
+ dest_pgoff, srclen, destlen);
+ put_workspace(fs_info, type, workspace);
return ret;
}
-void __init btrfs_init_compress(void)
+int btrfs_alloc_compress_wsm(struct btrfs_fs_info *fs_info)
{
- btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
- btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
- btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
- zstd_init_workspace_manager();
+ int ret;
+
+ ret = alloc_workspace_manager(fs_info, BTRFS_COMPRESS_NONE);
+ if (ret < 0)
+ goto error;
+ ret = alloc_workspace_manager(fs_info, BTRFS_COMPRESS_ZLIB);
+ if (ret < 0)
+ goto error;
+ ret = alloc_workspace_manager(fs_info, BTRFS_COMPRESS_LZO);
+ if (ret < 0)
+ goto error;
+ ret = zstd_alloc_workspace_manager(fs_info);
+ if (ret < 0)
+ goto error;
+ return 0;
+error:
+ btrfs_free_compress_wsm(fs_info);
+ return ret;
+}
+
+void btrfs_free_compress_wsm(struct btrfs_fs_info *fs_info)
+{
+ free_workspace_manager(fs_info, BTRFS_COMPRESS_NONE);
+ free_workspace_manager(fs_info, BTRFS_COMPRESS_ZLIB);
+ free_workspace_manager(fs_info, BTRFS_COMPRESS_LZO);
+ zstd_free_workspace_manager(fs_info);
+}
+
+int __init btrfs_init_compress(void)
+{
+ if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE,
+ offsetof(struct compressed_bio, bbio.bio),
+ BIOSET_NEED_BVECS))
+ return -ENOMEM;
+
+ compr_pool.shrinker = shrinker_alloc(SHRINKER_NONSLAB, "btrfs-compr-pages");
+ if (!compr_pool.shrinker)
+ return -ENOMEM;
+
+ spin_lock_init(&compr_pool.lock);
+ INIT_LIST_HEAD(&compr_pool.list);
+ compr_pool.count = 0;
+ /* 128K / 4K = 32, for 8 threads is 256 pages. */
+ compr_pool.thresh = BTRFS_MAX_COMPRESSED / PAGE_SIZE * 8;
+ compr_pool.shrinker->count_objects = btrfs_compr_pool_count;
+ compr_pool.shrinker->scan_objects = btrfs_compr_pool_scan;
+ compr_pool.shrinker->batch = 32;
+ compr_pool.shrinker->seeks = DEFAULT_SEEKS;
+ shrinker_register(compr_pool.shrinker);
+
+ return 0;
}
void __cold btrfs_exit_compress(void)
{
- btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
- btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
- btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
- zstd_cleanup_workspace_manager();
+ /* For now scan drains all pages and does not touch the parameters. */
+ btrfs_compr_pool_scan(NULL, NULL);
+ shrinker_free(compr_pool.shrinker);
+
+ bioset_exit(&btrfs_compressed_bioset);
+}
+
+/*
+ * The bvec is a single page bvec from a bio that contains folios from a filemap.
+ *
+ * Since the folio may be a large one, and if the bv_page is not a head page of
+ * a large folio, then page->index is unreliable.
+ *
+ * Thus we need this helper to grab the proper file offset.
+ */
+static u64 file_offset_from_bvec(const struct bio_vec *bvec)
+{
+ const struct page *page = bvec->bv_page;
+ const struct folio *folio = page_folio(page);
+
+ return (page_pgoff(folio, page) << PAGE_SHIFT) + bvec->bv_offset;
}
/*
@@ -1433,7 +1223,7 @@ void __cold btrfs_exit_compress(void)
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
struct compressed_bio *cb, u32 decompressed)
{
- struct bio *orig_bio = cb->orig_bio;
+ struct bio *orig_bio = &cb->orig_bbio->bio;
/* Offset inside the full decompressed extent */
u32 cur_offset;
@@ -1445,13 +1235,14 @@ int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
u32 copy_start;
/* Offset inside the full decompressed extent */
u32 bvec_offset;
+ void *kaddr;
bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
/*
* cb->start may underflow, but subtracting that value can still
* give us correct offset inside the full decompressed extent.
*/
- bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
+ bvec_offset = file_offset_from_bvec(&bvec) - cb->start;
/* Haven't reached the bvec range, exit */
if (decompressed + buf_len <= bvec_offset)
@@ -1467,11 +1258,12 @@ int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
* @buf + @buf_len.
*/
ASSERT(copy_start - decompressed < buf_len);
- memcpy_to_page(bvec.bv_page, bvec.bv_offset,
- buf + copy_start - decompressed, copy_len);
- flush_dcache_page(bvec.bv_page);
- cur_offset += copy_len;
+ kaddr = bvec_kmap_local(&bvec);
+ memcpy(kaddr, buf + copy_start - decompressed, copy_len);
+ kunmap_local(kaddr);
+
+ cur_offset += copy_len;
bio_advance(orig_bio, copy_len);
/* Finished the bio */
if (!orig_bio->bi_iter.bi_size)
@@ -1501,7 +1293,7 @@ int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
#define ENTROPY_LVL_HIGH (80)
/*
- * For increasead precision in shannon_entropy calculation,
+ * For increased precision in shannon_entropy calculation,
* let's do pow(n, M) to save more digits after comma:
*
* - maximum int bit length is 64
@@ -1727,7 +1519,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
struct heuristic_ws *ws)
{
struct page *page;
- u64 index, index_end;
+ pgoff_t index, index_end;
u32 i, curr_sample_pos;
u8 *in_data;
@@ -1747,7 +1539,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
index_end = end >> PAGE_SHIFT;
/* Don't miss unaligned end */
- if (!IS_ALIGNED(end, PAGE_SIZE))
+ if (!PAGE_ALIGNED(end))
index_end++;
curr_sample_pos = 0;
@@ -1778,11 +1570,6 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
/*
* Compression heuristic.
*
- * For now is's a naive and optimistic 'return true', we'll extend the logic to
- * quickly (compared to direct compression) detect data characteristics
- * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
- * data.
- *
* The following types of analysis can be performed:
* - detect mostly zero data
* - detect data with low "byte set" size (text, etc)
@@ -1790,9 +1577,10 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
*
* Return non-zero if the compression should be done, 0 otherwise.
*/
-int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
+int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end)
{
- struct list_head *ws_list = get_workspace(0, 0);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct list_head *ws_list = get_workspace(fs_info, 0, 0);
struct heuristic_ws *ws;
u32 i;
u8 byte;
@@ -1800,7 +1588,7 @@ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
ws = list_entry(ws_list, struct heuristic_ws, list);
- heuristic_collect_sample(inode, start, end, ws);
+ heuristic_collect_sample(&inode->vfs_inode, start, end, ws);
if (sample_repeated_patterns(ws)) {
ret = 1;
@@ -1861,29 +1649,34 @@ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
}
out:
- put_workspace(0, ws_list);
+ put_workspace(fs_info, 0, ws_list);
return ret;
}
/*
- * Convert the compression suffix (eg. after "zlib" starting with ":") to
- * level, unrecognized string will set the default level
+ * Convert the compression suffix (eg. after "zlib" starting with ":") to level.
+ *
+ * If the resulting level exceeds the algo's supported levels, it will be clamped.
+ *
+ * Return <0 if no valid string can be found.
+ * Return 0 if everything is fine.
*/
-unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
+int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret)
{
- unsigned int level = 0;
+ int level = 0;
int ret;
- if (!type)
+ if (!type) {
+ *level_ret = btrfs_compress_set_level(type, level);
return 0;
+ }
if (str[0] == ':') {
- ret = kstrtouint(str + 1, 10, &level);
+ ret = kstrtoint(str + 1, 10, &level);
if (ret)
- level = 0;
+ return ret;
}
- level = btrfs_compress_set_level(type, level);
-
- return level;
+ *level_ret = btrfs_compress_set_level(type, level);
+ return 0;
}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 56eef0821e3e..e0228017e861 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -7,8 +7,19 @@
#define BTRFS_COMPRESSION_H
#include <linux/sizes.h>
-
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/pagemap.h>
+#include "bio.h"
+#include "fs.h"
+#include "btrfs_inode.h"
+
+struct address_space;
+struct inode;
struct btrfs_inode;
+struct btrfs_ordered_extent;
/*
* We want to make sure that amount of RAM required to uncompress an extent is
@@ -22,23 +33,20 @@ struct btrfs_inode;
/* Maximum length of compressed data stored on disk */
#define BTRFS_MAX_COMPRESSED (SZ_128K)
+#define BTRFS_MAX_COMPRESSED_PAGES (BTRFS_MAX_COMPRESSED / PAGE_SIZE)
+static_assert((BTRFS_MAX_COMPRESSED % PAGE_SIZE) == 0);
+
/* Maximum size of data before compression */
#define BTRFS_MAX_UNCOMPRESSED (SZ_128K)
#define BTRFS_ZLIB_DEFAULT_LEVEL 3
struct compressed_bio {
- /* Number of sectors with unfinished IO (unsubmitted or unfinished) */
- refcount_t pending_sectors;
-
- /* Number of compressed pages in the array */
- unsigned int nr_pages;
+ /* Number of compressed folios in the array. */
+ unsigned int nr_folios;
- /* the pages with the compressed data on them */
- struct page **compressed_pages;
-
- /* inode that owns this data */
- struct inode *inode;
+ /* The folios with the compressed data on them. */
+ struct folio **compressed_folios;
/* starting offset in the inode for our pages */
u64 start;
@@ -52,62 +60,55 @@ struct compressed_bio {
/* The compression algorithm for this bio */
u8 compress_type;
- /* IO errors */
- u8 errors;
- int mirror_num;
+ /* Whether this is a write for writeback. */
+ bool writeback;
- /* for reads, this is the bio we are copying the data into */
- struct bio *orig_bio;
+ /* For reads, this is the bio we are copying the data into. */
+ struct btrfs_bio *orig_bbio;
- /*
- * the start of a variable length array of checksums only
- * used by reads
- */
- u8 sums[];
+ /* Must be last. */
+ struct btrfs_bio bbio;
};
-static inline unsigned int btrfs_compress_type(unsigned int type_level)
+static inline struct btrfs_fs_info *cb_to_fs_info(const struct compressed_bio *cb)
{
- return (type_level & 0xF);
+ return cb->bbio.inode->root->fs_info;
}
-static inline unsigned int btrfs_compress_level(unsigned int type_level)
+/* @range_end must be exclusive. */
+static inline u32 btrfs_calc_input_length(struct folio *folio, u64 range_end, u64 cur)
{
- return ((type_level & 0xF0) >> 4);
+ /* @cur must be inside the folio. */
+ ASSERT(folio_pos(folio) <= cur);
+ ASSERT(cur < folio_next_pos(folio));
+ return umin(range_end, folio_next_pos(folio)) - cur;
}
-void __init btrfs_init_compress(void);
+int btrfs_alloc_compress_wsm(struct btrfs_fs_info *fs_info);
+void btrfs_free_compress_wsm(struct btrfs_fs_info *fs_info);
+
+int __init btrfs_init_compress(void);
void __cold btrfs_exit_compress(void);
-int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
- u64 start, struct page **pages,
- unsigned long *out_pages,
- unsigned long *total_in,
- unsigned long *total_out);
-int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
- unsigned long start_byte, size_t srclen, size_t destlen);
+bool btrfs_compress_level_valid(unsigned int type, int level);
+int btrfs_compress_folios(unsigned int type, int level, struct btrfs_inode *inode,
+ u64 start, struct folio **folios, unsigned long *out_folios,
+ unsigned long *total_in, unsigned long *total_out);
+int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
+ unsigned long dest_pgoff, size_t srclen, size_t destlen);
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
struct compressed_bio *cb, u32 decompressed);
-blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
- unsigned int len, u64 disk_start,
- unsigned int compressed_len,
- struct page **compressed_pages,
- unsigned int nr_pages,
- unsigned int write_flags,
- struct cgroup_subsys_state *blkcg_css);
-blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
- int mirror_num, unsigned long bio_flags);
-
-unsigned int btrfs_compress_str2level(unsigned int type, const char *str);
-
-enum btrfs_compression_type {
- BTRFS_COMPRESS_NONE = 0,
- BTRFS_COMPRESS_ZLIB = 1,
- BTRFS_COMPRESS_LZO = 2,
- BTRFS_COMPRESS_ZSTD = 3,
- BTRFS_NR_COMPRESS_TYPES = 4,
-};
+void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
+ struct folio **compressed_folios,
+ unsigned int nr_folios, blk_opf_t write_flags,
+ bool writeback);
+void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
+
+int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret);
+
+struct folio *btrfs_alloc_compr_folio(struct btrfs_fs_info *fs_info);
+void btrfs_free_compr_folio(struct folio *folio);
struct workspace_manager {
struct list_head idle_ws;
@@ -120,62 +121,65 @@ struct workspace_manager {
wait_queue_head_t ws_wait;
};
-struct list_head *btrfs_get_workspace(int type, unsigned int level);
-void btrfs_put_workspace(int type, struct list_head *ws);
+struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, int level);
+void btrfs_put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_head *ws);
-struct btrfs_compress_op {
- struct workspace_manager *workspace_manager;
+struct btrfs_compress_levels {
/* Maximum level supported by the compression algorithm */
- unsigned int max_level;
- unsigned int default_level;
+ int min_level;
+ int max_level;
+ int default_level;
};
/* The heuristic workspaces are managed via the 0th workspace manager */
#define BTRFS_NR_WORKSPACE_MANAGERS BTRFS_NR_COMPRESS_TYPES
-extern const struct btrfs_compress_op btrfs_heuristic_compress;
-extern const struct btrfs_compress_op btrfs_zlib_compress;
-extern const struct btrfs_compress_op btrfs_lzo_compress;
-extern const struct btrfs_compress_op btrfs_zstd_compress;
+extern const struct btrfs_compress_levels btrfs_heuristic_compress;
+extern const struct btrfs_compress_levels btrfs_zlib_compress;
+extern const struct btrfs_compress_levels btrfs_lzo_compress;
+extern const struct btrfs_compress_levels btrfs_zstd_compress;
const char* btrfs_compress_type2str(enum btrfs_compression_type type);
bool btrfs_compress_is_valid_type(const char *str, size_t len);
-int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
+int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end);
+
+int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
+ struct folio **in_folio_ret);
-int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
- u64 start, struct page **pages, unsigned long *out_pages,
+int zlib_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
+ u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
-int zlib_decompress(struct list_head *ws, unsigned char *data_in,
- struct page *dest_page, unsigned long start_byte, size_t srclen,
+int zlib_decompress(struct list_head *ws, const u8 *data_in,
+ struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
-struct list_head *zlib_alloc_workspace(unsigned int level);
+struct list_head *zlib_alloc_workspace(struct btrfs_fs_info *fs_info, unsigned int level);
void zlib_free_workspace(struct list_head *ws);
-struct list_head *zlib_get_workspace(unsigned int level);
+struct list_head *zlib_get_workspace(struct btrfs_fs_info *fs_info, unsigned int level);
-int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
- u64 start, struct page **pages, unsigned long *out_pages,
+int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
+ u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
-int lzo_decompress(struct list_head *ws, unsigned char *data_in,
- struct page *dest_page, unsigned long start_byte, size_t srclen,
+int lzo_decompress(struct list_head *ws, const u8 *data_in,
+ struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
-struct list_head *lzo_alloc_workspace(unsigned int level);
+struct list_head *lzo_alloc_workspace(struct btrfs_fs_info *fs_info);
void lzo_free_workspace(struct list_head *ws);
-int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
- u64 start, struct page **pages, unsigned long *out_pages,
+int zstd_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
+ u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
-int zstd_decompress(struct list_head *ws, unsigned char *data_in,
- struct page *dest_page, unsigned long start_byte, size_t srclen,
+int zstd_decompress(struct list_head *ws, const u8 *data_in,
+ struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
-void zstd_init_workspace_manager(void);
-void zstd_cleanup_workspace_manager(void);
-struct list_head *zstd_alloc_workspace(unsigned int level);
+int zstd_alloc_workspace_manager(struct btrfs_fs_info *fs_info);
+void zstd_free_workspace_manager(struct btrfs_fs_info *fs_info);
+struct list_head *zstd_alloc_workspace(struct btrfs_fs_info *fs_info, int level);
void zstd_free_workspace(struct list_head *ws);
-struct list_head *zstd_get_workspace(unsigned int level);
-void zstd_put_workspace(struct list_head *ws);
+struct list_head *zstd_get_workspace(struct btrfs_fs_info *fs_info, int level);
+void zstd_put_workspace(struct btrfs_fs_info *fs_info, struct list_head *ws);
#endif
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index c3983bdaf4b8..a48b4befbee7 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -8,6 +8,7 @@
#include <linux/rbtree.h>
#include <linux/mm.h>
#include <linux/error-injection.h>
+#include "messages.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -16,67 +17,128 @@
#include "volumes.h"
#include "qgroup.h"
#include "tree-mod-log.h"
+#include "tree-checker.h"
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "relocation.h"
+#include "file-item.h"
+
+static struct kmem_cache *btrfs_path_cachep;
static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_path *path, int level);
static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
const struct btrfs_key *ins_key, struct btrfs_path *path,
- int data_size, int extend);
+ int data_size, bool extend);
static int push_node_left(struct btrfs_trans_handle *trans,
struct extent_buffer *dst,
- struct extent_buffer *src, int empty);
+ struct extent_buffer *src, bool empty);
static int balance_node_right(struct btrfs_trans_handle *trans,
struct extent_buffer *dst_buf,
struct extent_buffer *src_buf);
-static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
- int level, int slot);
-
-static const struct btrfs_csums {
- u16 size;
- const char name[10];
- const char driver[12];
-} btrfs_csums[] = {
- [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
- [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
- [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
- [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
- .driver = "blake2b-256" },
-};
-
-int btrfs_super_csum_size(const struct btrfs_super_block *s)
+/*
+ * The leaf data grows from end-to-front in the node. this returns the address
+ * of the start of the last item, which is the stop of the leaf data stack.
+ */
+static unsigned int leaf_data_end(const struct extent_buffer *leaf)
{
- u16 t = btrfs_super_csum_type(s);
- /*
- * csum type is validated at mount time
- */
- return btrfs_csums[t].size;
+ u32 nr = btrfs_header_nritems(leaf);
+
+ if (nr == 0)
+ return BTRFS_LEAF_DATA_SIZE(leaf->fs_info);
+ return btrfs_item_offset(leaf, nr - 1);
}
-const char *btrfs_super_csum_name(u16 csum_type)
+/*
+ * Move data in a @leaf (using memmove, safe for overlapping ranges).
+ *
+ * @leaf: leaf that we're doing a memmove on
+ * @dst_offset: item data offset we're moving to
+ * @src_offset: item data offset were' moving from
+ * @len: length of the data we're moving
+ *
+ * Wrapper around memmove_extent_buffer() that takes into account the header on
+ * the leaf. The btrfs_item offset's start directly after the header, so we
+ * have to adjust any offsets to account for the header in the leaf. This
+ * handles that math to simplify the callers.
+ */
+static inline void memmove_leaf_data(const struct extent_buffer *leaf,
+ unsigned long dst_offset,
+ unsigned long src_offset,
+ unsigned long len)
{
- /* csum type is validated at mount time */
- return btrfs_csums[csum_type].name;
+ memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset,
+ btrfs_item_nr_offset(leaf, 0) + src_offset, len);
}
/*
- * Return driver name if defined, otherwise the name that's also a valid driver
- * name
+ * Copy item data from @src into @dst at the given @offset.
+ *
+ * @dst: destination leaf that we're copying into
+ * @src: source leaf that we're copying from
+ * @dst_offset: item data offset we're copying to
+ * @src_offset: item data offset were' copying from
+ * @len: length of the data we're copying
+ *
+ * Wrapper around copy_extent_buffer() that takes into account the header on
+ * the leaf. The btrfs_item offset's start directly after the header, so we
+ * have to adjust any offsets to account for the header in the leaf. This
+ * handles that math to simplify the callers.
*/
-const char *btrfs_super_csum_driver(u16 csum_type)
+static inline void copy_leaf_data(const struct extent_buffer *dst,
+ const struct extent_buffer *src,
+ unsigned long dst_offset,
+ unsigned long src_offset, unsigned long len)
{
- /* csum type is validated at mount time */
- return btrfs_csums[csum_type].driver[0] ?
- btrfs_csums[csum_type].driver :
- btrfs_csums[csum_type].name;
+ copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset,
+ btrfs_item_nr_offset(src, 0) + src_offset, len);
}
-size_t __attribute_const__ btrfs_get_num_csums(void)
+/*
+ * Move items in a @leaf (using memmove).
+ *
+ * @dst: destination leaf for the items
+ * @dst_item: the item nr we're copying into
+ * @src_item: the item nr we're copying from
+ * @nr_items: the number of items to copy
+ *
+ * Wrapper around memmove_extent_buffer() that does the math to get the
+ * appropriate offsets into the leaf from the item numbers.
+ */
+static inline void memmove_leaf_items(const struct extent_buffer *leaf,
+ int dst_item, int src_item, int nr_items)
+{
+ memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item),
+ btrfs_item_nr_offset(leaf, src_item),
+ nr_items * sizeof(struct btrfs_item));
+}
+
+/*
+ * Copy items from @src into @dst at the given @offset.
+ *
+ * @dst: destination leaf for the items
+ * @src: source leaf for the items
+ * @dst_item: the item nr we're copying into
+ * @src_item: the item nr we're copying from
+ * @nr_items: the number of items to copy
+ *
+ * Wrapper around copy_extent_buffer() that does the math to get the
+ * appropriate offsets into the leaf from the item numbers.
+ */
+static inline void copy_leaf_items(const struct extent_buffer *dst,
+ const struct extent_buffer *src,
+ int dst_item, int src_item, int nr_items)
{
- return ARRAY_SIZE(btrfs_csums);
+ copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item),
+ btrfs_item_nr_offset(src, src_item),
+ nr_items * sizeof(struct btrfs_item));
}
struct btrfs_path *btrfs_alloc_path(void)
{
+ might_sleep();
+
return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
}
@@ -136,7 +198,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
* the inc_not_zero dance and if it doesn't work then
* synchronize_rcu and try again.
*/
- if (atomic_inc_not_zero(&eb->refs)) {
+ if (refcount_inc_not_zero(&eb->refs)) {
rcu_read_unlock();
break;
}
@@ -162,7 +224,7 @@ static void add_root_to_dirty_list(struct btrfs_root *root)
spin_lock(&fs_info->trans_lock);
if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
/* Want the extent tree to be the last on the list */
- if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
+ if (btrfs_root_id(root) == BTRFS_EXTENT_TREE_OBJECTID)
list_move_tail(&root->dirty_list,
&fs_info->dirty_cowonly_roots);
else
@@ -187,11 +249,12 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
int ret = 0;
int level;
struct btrfs_disk_key disk_key;
+ u64 reloc_src_root = 0;
WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
trans->transid != fs_info->running_transaction->transid);
WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
- trans->transid != root->last_trans);
+ trans->transid != btrfs_get_root_last_trans(root));
level = btrfs_header_level(buf);
if (level == 0)
@@ -199,9 +262,11 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
else
btrfs_node_key(buf, &disk_key, 0);
+ if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
+ reloc_src_root = btrfs_header_owner(buf);
cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
&disk_key, level, buf->start, 0,
- BTRFS_NESTING_NEW_ROOT);
+ reloc_src_root, BTRFS_NESTING_NEW_ROOT);
if (IS_ERR(cow))
return PTR_ERR(cow);
@@ -218,19 +283,30 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
- WARN_ON(btrfs_header_generation(buf) > trans->transid);
- if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
+ if (unlikely(btrfs_header_generation(buf) > trans->transid)) {
+ btrfs_tree_unlock(cow);
+ free_extent_buffer(cow);
+ ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+
+ if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
ret = btrfs_inc_ref(trans, root, cow, 1);
- else
+ if (unlikely(ret))
+ btrfs_abort_transaction(trans, ret);
+ } else {
ret = btrfs_inc_ref(trans, root, cow, 0);
+ if (unlikely(ret))
+ btrfs_abort_transaction(trans, ret);
+ }
if (ret) {
btrfs_tree_unlock(cow);
free_extent_buffer(cow);
- btrfs_abort_transaction(trans, ret);
return ret;
}
- btrfs_mark_buffer_dirty(cow);
+ btrfs_mark_buffer_dirty(trans, cow);
*cow_ret = cow;
return 0;
}
@@ -238,22 +314,41 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
/*
* check if the tree block can be shared by multiple trees
*/
-int btrfs_block_can_be_shared(struct btrfs_root *root,
- struct extent_buffer *buf)
+bool btrfs_block_can_be_shared(const struct btrfs_trans_handle *trans,
+ const struct btrfs_root *root,
+ const struct extent_buffer *buf)
{
+ const u64 buf_gen = btrfs_header_generation(buf);
+
/*
* Tree blocks not in shareable trees and tree roots are never shared.
* If a block was allocated after the last snapshot and the block was
* not allocated by tree relocation, we know the block is not shared.
*/
- if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
- buf != root->node && buf != root->commit_root &&
- (btrfs_header_generation(buf) <=
- btrfs_root_last_snapshot(&root->root_item) ||
- btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
- return 1;
- return 0;
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
+ return false;
+
+ if (buf == root->node)
+ return false;
+
+ if (buf_gen > btrfs_root_last_snapshot(&root->root_item) &&
+ !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))
+ return false;
+
+ if (buf != root->commit_root)
+ return true;
+
+ /*
+ * An extent buffer that used to be the commit root may still be shared
+ * because the tree height may have increased and it became a child of a
+ * higher level root. This can happen when snapshotting a subvolume
+ * created in the current transaction.
+ */
+ if (buf_gen == trans->transid)
+ return true;
+
+ return false;
}
static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
@@ -266,7 +361,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
u64 refs;
u64 owner;
u64 flags;
- u64 new_flags = 0;
int ret;
/*
@@ -286,20 +380,24 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
* are only allowed for blocks use full backrefs.
*/
- if (btrfs_block_can_be_shared(root, buf)) {
+ if (btrfs_block_can_be_shared(trans, root, buf)) {
ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
btrfs_header_level(buf), 1,
- &refs, &flags);
+ &refs, &flags, NULL);
if (ret)
return ret;
- if (refs == 0) {
- ret = -EROFS;
- btrfs_handle_fs_error(fs_info, ret, NULL);
+ if (unlikely(refs == 0)) {
+ btrfs_crit(fs_info,
+ "found 0 references for tree block at bytenr %llu level %d root %llu",
+ buf->start, btrfs_header_level(buf),
+ btrfs_root_id(root));
+ ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
return ret;
}
} else {
refs = 1;
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID ||
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
else
@@ -307,19 +405,26 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
}
owner = btrfs_header_owner(buf);
- BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
- !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
+ if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID &&
+ !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) {
+ btrfs_crit(fs_info,
+"found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set",
+ buf->start, btrfs_header_level(buf),
+ btrfs_root_id(root), refs, flags);
+ ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
if (refs > 1) {
- if ((owner == root->root_key.objectid ||
- root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
+ if ((owner == btrfs_root_id(root) ||
+ btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) &&
!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
ret = btrfs_inc_ref(trans, root, buf, 1);
if (ret)
return ret;
- if (root->root_key.objectid ==
- BTRFS_TREE_RELOC_OBJECTID) {
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
ret = btrfs_dec_ref(trans, root, buf, 0);
if (ret)
return ret;
@@ -327,29 +432,22 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (ret)
return ret;
}
- new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
+ ret = btrfs_set_disk_extent_flags(trans, buf,
+ BTRFS_BLOCK_FLAG_FULL_BACKREF);
+ if (ret)
+ return ret;
} else {
- if (root->root_key.objectid ==
- BTRFS_TREE_RELOC_OBJECTID)
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
ret = btrfs_inc_ref(trans, root, cow, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0);
if (ret)
return ret;
}
- if (new_flags != 0) {
- int level = btrfs_header_level(buf);
-
- ret = btrfs_set_disk_extent_flags(trans, buf,
- new_flags, level, 0);
- if (ret)
- return ret;
- }
} else {
if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
- if (root->root_key.objectid ==
- BTRFS_TREE_RELOC_OBJECTID)
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
ret = btrfs_inc_ref(trans, root, cow, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0);
@@ -359,7 +457,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (ret)
return ret;
}
- btrfs_clean_tree_block(buf);
+ btrfs_clear_buffer_dirty(trans, buf);
*last_ref = 1;
}
return 0;
@@ -377,13 +475,13 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
* bytes the allocator should try to find free next to the block it returns.
* This is just a hint and may be ignored by the allocator.
*/
-static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *buf,
- struct extent_buffer *parent, int parent_slot,
- struct extent_buffer **cow_ret,
- u64 search_start, u64 empty_size,
- enum btrfs_lock_nesting nest)
+int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *buf,
+ struct extent_buffer *parent, int parent_slot,
+ struct extent_buffer **cow_ret,
+ u64 search_start, u64 empty_size,
+ enum btrfs_lock_nesting nest)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_disk_key disk_key;
@@ -392,6 +490,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
int last_ref = 0;
int unlock_orig = 0;
u64 parent_start = 0;
+ u64 reloc_src_root = 0;
if (*cow_ret == buf)
unlock_orig = 1;
@@ -401,7 +500,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
trans->transid != fs_info->running_transaction->transid);
WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
- trans->transid != root->last_trans);
+ trans->transid != btrfs_get_root_last_trans(root));
level = btrfs_header_level(buf);
@@ -410,12 +509,14 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
else
btrfs_node_key(buf, &disk_key, 0);
- if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
- parent_start = parent->start;
-
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
+ if (parent)
+ parent_start = parent->start;
+ reloc_src_root = btrfs_header_owner(buf);
+ }
cow = btrfs_alloc_tree_block(trans, root, parent_start,
- root->root_key.objectid, &disk_key, level,
- search_start, empty_size, nest);
+ btrfs_root_id(root), &disk_key, level,
+ search_start, empty_size, reloc_src_root, nest);
if (IS_ERR(cow))
return PTR_ERR(cow);
@@ -427,84 +528,97 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
BTRFS_HEADER_FLAG_RELOC);
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
else
- btrfs_set_header_owner(cow, root->root_key.objectid);
+ btrfs_set_header_owner(cow, btrfs_root_id(root));
write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
- if (ret) {
- btrfs_tree_unlock(cow);
- free_extent_buffer(cow);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- return ret;
+ goto error_unlock_cow;
}
if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
ret = btrfs_reloc_cow_block(trans, root, buf, cow);
- if (ret) {
- btrfs_tree_unlock(cow);
- free_extent_buffer(cow);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- return ret;
+ goto error_unlock_cow;
}
}
if (buf == root->node) {
WARN_ON(parent && parent != buf);
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID ||
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
parent_start = buf->start;
- atomic_inc(&cow->refs);
ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
- BUG_ON(ret < 0);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ goto error_unlock_cow;
+ }
+ refcount_inc(&cow->refs);
rcu_assign_pointer(root->node, cow);
- btrfs_free_tree_block(trans, root, buf, parent_start,
- last_ref);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+ parent_start, last_ref);
free_extent_buffer(buf);
add_root_to_dirty_list(root);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ goto error_unlock_cow;
+ }
} else {
WARN_ON(trans->transid != btrfs_header_generation(parent));
- btrfs_tree_mod_log_insert_key(parent, parent_slot,
- BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
+ ret = btrfs_tree_mod_log_insert_key(parent, parent_slot,
+ BTRFS_MOD_LOG_KEY_REPLACE);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto error_unlock_cow;
+ }
btrfs_set_node_blockptr(parent, parent_slot,
cow->start);
btrfs_set_node_ptr_generation(parent, parent_slot,
trans->transid);
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
if (last_ref) {
ret = btrfs_tree_mod_log_free_eb(buf);
- if (ret) {
- btrfs_tree_unlock(cow);
- free_extent_buffer(cow);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- return ret;
+ goto error_unlock_cow;
}
}
- btrfs_free_tree_block(trans, root, buf, parent_start,
- last_ref);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+ parent_start, last_ref);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ goto error_unlock_cow;
+ }
}
+
+ trace_btrfs_cow_block(root, buf, cow);
if (unlock_orig)
btrfs_tree_unlock(buf);
free_extent_buffer_stale(buf);
- btrfs_mark_buffer_dirty(cow);
+ btrfs_mark_buffer_dirty(trans, cow);
*cow_ret = cow;
return 0;
+
+error_unlock_cow:
+ btrfs_tree_unlock(cow);
+ free_extent_buffer(cow);
+ return ret;
}
-static inline int should_cow_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *buf)
+static inline bool should_cow_block(const struct btrfs_trans_handle *trans,
+ const struct btrfs_root *root,
+ const struct extent_buffer *buf)
{
if (btrfs_is_testing(root->fs_info))
- return 0;
-
- /* Ensure we can see the FORCE_COW bit */
- smp_mb__before_atomic();
+ return false;
/*
* We do not need to cow a block if
@@ -517,21 +631,33 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
* after we've finished copying src root, we must COW the shared
* block to ensure the metadata consistency.
*/
- if (btrfs_header_generation(buf) == trans->transid &&
- !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
- !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
- btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
- !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
- return 0;
- return 1;
+
+ if (btrfs_header_generation(buf) != trans->transid)
+ return true;
+
+ if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN))
+ return true;
+
+ /* Ensure we can see the FORCE_COW bit. */
+ smp_mb__before_atomic();
+ if (test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
+ return true;
+
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
+ return false;
+
+ if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))
+ return true;
+
+ return false;
}
/*
- * cows a single block, see __btrfs_cow_block for the real work.
+ * COWs a single block, see btrfs_force_cow_block() for the real work.
* This version of it has extra checks so that a block isn't COWed more than
* once per transaction, as long as it hasn't been written yet
*/
-noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
+int btrfs_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot,
struct extent_buffer **cow_ret,
@@ -539,27 +665,38 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 search_start;
- int ret;
-
- if (test_bit(BTRFS_ROOT_DELETING, &root->state))
- btrfs_err(fs_info,
- "COW'ing blocks on a fs root that's being dropped");
- if (trans->transaction != fs_info->running_transaction)
- WARN(1, KERN_CRIT "trans %llu running %llu\n",
- trans->transid,
- fs_info->running_transaction->transid);
+ if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
+ btrfs_abort_transaction(trans, -EUCLEAN);
+ btrfs_crit(fs_info,
+ "attempt to COW block %llu on root %llu that is being deleted",
+ buf->start, btrfs_root_id(root));
+ return -EUCLEAN;
+ }
- if (trans->transid != fs_info->generation)
- WARN(1, KERN_CRIT "trans %llu running %llu\n",
- trans->transid, fs_info->generation);
+ /*
+ * COWing must happen through a running transaction, which always
+ * matches the current fs generation (it's a transaction with a state
+ * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
+ * into error state to prevent the commit of any transaction.
+ */
+ if (unlikely(trans->transaction != fs_info->running_transaction ||
+ trans->transid != fs_info->generation)) {
+ btrfs_abort_transaction(trans, -EUCLEAN);
+ btrfs_crit(fs_info,
+"unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu",
+ buf->start, btrfs_root_id(root), trans->transid,
+ fs_info->running_transaction->transid,
+ fs_info->generation);
+ return -EUCLEAN;
+ }
if (!should_cow_block(trans, root, buf)) {
*cow_ret = buf;
return 0;
}
- search_start = buf->start & ~((u64)SZ_1G - 1);
+ search_start = round_down(buf->start, SZ_1G);
/*
* Before CoWing this block for later modification, check if it's
@@ -568,59 +705,12 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
* Also We don't care about the error, as it's handled internally.
*/
btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
- ret = __btrfs_cow_block(trans, root, buf, parent,
- parent_slot, cow_ret, search_start, 0, nest);
-
- trace_btrfs_cow_block(root, buf, *cow_ret);
-
- return ret;
+ return btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
+ cow_ret, search_start, 0, nest);
}
ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
/*
- * helper function for defrag to decide if two blocks pointed to by a
- * node are actually close by
- */
-static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
-{
- if (blocknr < other && other - (blocknr + blocksize) < 32768)
- return 1;
- if (blocknr > other && blocknr - (other + blocksize) < 32768)
- return 1;
- return 0;
-}
-
-#ifdef __LITTLE_ENDIAN
-
-/*
- * Compare two keys, on little-endian the disk order is same as CPU order and
- * we can avoid the conversion.
- */
-static int comp_keys(const struct btrfs_disk_key *disk_key,
- const struct btrfs_key *k2)
-{
- const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
-
- return btrfs_comp_cpu_keys(k1, k2);
-}
-
-#else
-
-/*
- * compare two keys in a memcmp fashion
- */
-static int comp_keys(const struct btrfs_disk_key *disk,
- const struct btrfs_key *k2)
-{
- struct btrfs_key k1;
-
- btrfs_disk_key_to_cpu(&k1, disk);
-
- return btrfs_comp_cpu_keys(&k1, k2);
-}
-#endif
-
-/*
* same as comp_keys only with two btrfs_key's
*/
int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
@@ -641,120 +731,52 @@ int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_ke
}
/*
- * this is used by the defrag code to go through all the
- * leaves pointed to by a node and reallocate them so that
- * disk order is close to key order
- */
-int btrfs_realloc_node(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *parent,
- int start_slot, u64 *last_ret,
- struct btrfs_key *progress)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct extent_buffer *cur;
- u64 blocknr;
- u64 search_start = *last_ret;
- u64 last_block = 0;
- u64 other;
- u32 parent_nritems;
- int end_slot;
- int i;
- int err = 0;
- u32 blocksize;
- int progress_passed = 0;
- struct btrfs_disk_key disk_key;
-
- WARN_ON(trans->transaction != fs_info->running_transaction);
- WARN_ON(trans->transid != fs_info->generation);
-
- parent_nritems = btrfs_header_nritems(parent);
- blocksize = fs_info->nodesize;
- end_slot = parent_nritems - 1;
-
- if (parent_nritems <= 1)
- return 0;
-
- for (i = start_slot; i <= end_slot; i++) {
- int close = 1;
-
- btrfs_node_key(parent, &disk_key, i);
- if (!progress_passed && comp_keys(&disk_key, progress) < 0)
- continue;
-
- progress_passed = 1;
- blocknr = btrfs_node_blockptr(parent, i);
- if (last_block == 0)
- last_block = blocknr;
-
- if (i > 0) {
- other = btrfs_node_blockptr(parent, i - 1);
- close = close_blocks(blocknr, other, blocksize);
- }
- if (!close && i < end_slot) {
- other = btrfs_node_blockptr(parent, i + 1);
- close = close_blocks(blocknr, other, blocksize);
- }
- if (close) {
- last_block = blocknr;
- continue;
- }
-
- cur = btrfs_read_node_slot(parent, i);
- if (IS_ERR(cur))
- return PTR_ERR(cur);
- if (search_start == 0)
- search_start = last_block;
-
- btrfs_tree_lock(cur);
- err = __btrfs_cow_block(trans, root, cur, parent, i,
- &cur, search_start,
- min(16 * blocksize,
- (end_slot - i) * blocksize),
- BTRFS_NESTING_COW);
- if (err) {
- btrfs_tree_unlock(cur);
- free_extent_buffer(cur);
- break;
- }
- search_start = cur->start;
- last_block = cur->start;
- *last_ret = search_start;
- btrfs_tree_unlock(cur);
- free_extent_buffer(cur);
- }
- return err;
-}
-
-/*
- * search for key in the extent_buffer. The items start at offset p,
- * and they are item_size apart.
+ * Search for a key in the given extent_buffer.
*
- * the slot in the array is returned via slot, and it points to
- * the place where you would insert key if it is not found in
- * the array.
+ * The lower boundary for the search is specified by the slot number @first_slot.
+ * Use a value of 0 to search over the whole extent buffer. Works for both
+ * leaves and nodes.
*
- * Slot may point to total number of items if the key is bigger than
- * all of the keys
+ * The slot in the extent buffer is returned via @slot. If the key exists in the
+ * extent buffer, then @slot will point to the slot where the key is, otherwise
+ * it points to the slot where you would insert the key.
+ *
+ * Slot may point to the total number of items (i.e. one position beyond the last
+ * key) if the key is bigger than the last key in the extent buffer.
*/
-static noinline int generic_bin_search(struct extent_buffer *eb,
- unsigned long p, int item_size,
- const struct btrfs_key *key, int *slot)
+int btrfs_bin_search(const struct extent_buffer *eb, int first_slot,
+ const struct btrfs_key *key, int *slot)
{
- int low = 0;
- int high = btrfs_header_nritems(eb);
+ unsigned long p;
+ int item_size;
+ /*
+ * Use unsigned types for the low and high slots, so that we get a more
+ * efficient division in the search loop below.
+ */
+ u32 low = first_slot;
+ u32 high = btrfs_header_nritems(eb);
int ret;
const int key_size = sizeof(struct btrfs_disk_key);
- if (low > high) {
+ if (unlikely(low > high)) {
btrfs_err(eb->fs_info,
- "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
+ "%s: low (%u) > high (%u) eb %llu owner %llu level %d",
__func__, low, high, eb->start,
btrfs_header_owner(eb), btrfs_header_level(eb));
return -EINVAL;
}
+ if (btrfs_header_level(eb) == 0) {
+ p = offsetof(struct btrfs_leaf, items);
+ item_size = sizeof(struct btrfs_item);
+ } else {
+ p = offsetof(struct btrfs_node, ptrs);
+ item_size = sizeof(struct btrfs_key_ptr);
+ }
+
while (low < high) {
- unsigned long oip;
+ const int unit_size = eb->folio_size;
+ unsigned long oil;
unsigned long offset;
struct btrfs_disk_key *tmp;
struct btrfs_disk_key unaligned;
@@ -762,20 +784,20 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
mid = (low + high) / 2;
offset = p + mid * item_size;
- oip = offset_in_page(offset);
+ oil = get_eb_offset_in_folio(eb, offset);
- if (oip + key_size <= PAGE_SIZE) {
- const unsigned long idx = get_eb_page_index(offset);
- char *kaddr = page_address(eb->pages[idx]);
+ if (oil + key_size <= unit_size) {
+ const unsigned long idx = get_eb_folio_index(eb, offset);
+ char *kaddr = folio_address(eb->folios[idx]);
- oip = get_eb_offset_in_page(eb, offset);
- tmp = (struct btrfs_disk_key *)(kaddr + oip);
+ oil = get_eb_offset_in_folio(eb, offset);
+ tmp = (struct btrfs_disk_key *)(kaddr + oil);
} else {
read_extent_buffer(eb, &unaligned, offset, key_size);
tmp = &unaligned;
}
- ret = comp_keys(tmp, key);
+ ret = btrfs_comp_keys(tmp, key);
if (ret < 0)
low = mid + 1;
@@ -790,36 +812,19 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
return 1;
}
-/*
- * simple bin_search frontend that does the right thing for
- * leaves vs nodes
- */
-int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
- int *slot)
-{
- if (btrfs_header_level(eb) == 0)
- return generic_bin_search(eb,
- offsetof(struct btrfs_leaf, items),
- sizeof(struct btrfs_item), key, slot);
- else
- return generic_bin_search(eb,
- offsetof(struct btrfs_node, ptrs),
- sizeof(struct btrfs_key_ptr), key, slot);
-}
-
-static void root_add_used(struct btrfs_root *root, u32 size)
+static void root_add_used_bytes(struct btrfs_root *root)
{
spin_lock(&root->accounting_lock);
btrfs_set_root_used(&root->root_item,
- btrfs_root_used(&root->root_item) + size);
+ btrfs_root_used(&root->root_item) + root->fs_info->nodesize);
spin_unlock(&root->accounting_lock);
}
-static void root_sub_used(struct btrfs_root *root, u32 size)
+static void root_sub_used_bytes(struct btrfs_root *root)
{
spin_lock(&root->accounting_lock);
btrfs_set_root_used(&root->root_item,
- btrfs_root_used(&root->root_item) - size);
+ btrfs_root_used(&root->root_item) - root->fs_info->nodesize);
spin_unlock(&root->accounting_lock);
}
@@ -830,28 +835,102 @@ struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
int slot)
{
int level = btrfs_header_level(parent);
+ struct btrfs_tree_parent_check check = { 0 };
struct extent_buffer *eb;
- struct btrfs_key first_key;
if (slot < 0 || slot >= btrfs_header_nritems(parent))
return ERR_PTR(-ENOENT);
- BUG_ON(level == 0);
+ ASSERT(level);
+
+ check.level = level - 1;
+ check.transid = btrfs_node_ptr_generation(parent, slot);
+ check.owner_root = btrfs_header_owner(parent);
+ check.has_first_key = true;
+ btrfs_node_key_to_cpu(parent, &check.first_key, slot);
- btrfs_node_key_to_cpu(parent, &first_key, slot);
eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
- btrfs_header_owner(parent),
- btrfs_node_ptr_generation(parent, slot),
- level - 1, &first_key);
- if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
+ &check);
+ if (IS_ERR(eb))
+ return eb;
+ if (unlikely(!extent_buffer_uptodate(eb))) {
free_extent_buffer(eb);
- eb = ERR_PTR(-EIO);
+ return ERR_PTR(-EIO);
}
return eb;
}
/*
+ * Promote a child node to become the new tree root.
+ *
+ * @trans: Transaction handle
+ * @root: Tree root structure to update
+ * @path: Path holding nodes and locks
+ * @level: Level of the parent (old root)
+ * @parent: The parent (old root) with exactly one item
+ *
+ * This helper is called during rebalancing when the root node contains only
+ * a single item (nritems == 1). We can reduce the tree height by promoting
+ * that child to become the new root and freeing the old root node. The path
+ * locks and references are updated accordingly.
+ *
+ * Return: 0 on success, negative errno on failure. The transaction is aborted
+ * on critical errors.
+ */
+static int promote_child_to_root(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ int level, struct extent_buffer *parent)
+{
+ struct extent_buffer *child;
+ int ret;
+
+ ASSERT(btrfs_header_nritems(parent) == 1);
+
+ child = btrfs_read_node_slot(parent, 0);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ btrfs_tree_lock(child);
+ ret = btrfs_cow_block(trans, root, child, parent, 0, &child, BTRFS_NESTING_COW);
+ if (ret) {
+ btrfs_tree_unlock(child);
+ free_extent_buffer(child);
+ return ret;
+ }
+
+ ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
+ if (unlikely(ret < 0)) {
+ btrfs_tree_unlock(child);
+ free_extent_buffer(child);
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+ rcu_assign_pointer(root->node, child);
+
+ add_root_to_dirty_list(root);
+ btrfs_tree_unlock(child);
+
+ path->locks[level] = 0;
+ path->nodes[level] = NULL;
+ btrfs_clear_buffer_dirty(trans, parent);
+ btrfs_tree_unlock(parent);
+ /* Once for the path. */
+ free_extent_buffer(parent);
+
+ root_sub_used_bytes(root);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), parent, 0, 1);
+ /* Once for the root ptr. */
+ free_extent_buffer_stale(parent);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
* node level balancing, used to make sure nodes are in proper order for
* item deletion. We balance from the top down, so we have to make sure
* that a deletion won't leave an node completely empty later on.
@@ -890,79 +969,48 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
* by promoting the node below to a root
*/
if (!parent) {
- struct extent_buffer *child;
-
if (btrfs_header_nritems(mid) != 1)
return 0;
- /* promote the child to a root */
- child = btrfs_read_node_slot(mid, 0);
- if (IS_ERR(child)) {
- ret = PTR_ERR(child);
- btrfs_handle_fs_error(fs_info, ret, NULL);
- goto enospc;
- }
-
- btrfs_tree_lock(child);
- ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
- BTRFS_NESTING_COW);
- if (ret) {
- btrfs_tree_unlock(child);
- free_extent_buffer(child);
- goto enospc;
- }
-
- ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
- BUG_ON(ret < 0);
- rcu_assign_pointer(root->node, child);
-
- add_root_to_dirty_list(root);
- btrfs_tree_unlock(child);
-
- path->locks[level] = 0;
- path->nodes[level] = NULL;
- btrfs_clean_tree_block(mid);
- btrfs_tree_unlock(mid);
- /* once for the path */
- free_extent_buffer(mid);
-
- root_sub_used(root, mid->len);
- btrfs_free_tree_block(trans, root, mid, 0, 1);
- /* once for the root ptr */
- free_extent_buffer_stale(mid);
- return 0;
+ return promote_child_to_root(trans, root, path, level, mid);
}
if (btrfs_header_nritems(mid) >
BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
return 0;
- left = btrfs_read_node_slot(parent, pslot - 1);
- if (IS_ERR(left))
- left = NULL;
+ if (pslot) {
+ left = btrfs_read_node_slot(parent, pslot - 1);
+ if (IS_ERR(left)) {
+ ret = PTR_ERR(left);
+ left = NULL;
+ goto out;
+ }
- if (left) {
- __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
+ btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT);
wret = btrfs_cow_block(trans, root, left,
parent, pslot - 1, &left,
BTRFS_NESTING_LEFT_COW);
if (wret) {
ret = wret;
- goto enospc;
+ goto out;
}
}
- right = btrfs_read_node_slot(parent, pslot + 1);
- if (IS_ERR(right))
- right = NULL;
+ if (pslot + 1 < btrfs_header_nritems(parent)) {
+ right = btrfs_read_node_slot(parent, pslot + 1);
+ if (IS_ERR(right)) {
+ ret = PTR_ERR(right);
+ right = NULL;
+ goto out;
+ }
- if (right) {
- __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
+ btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT);
wret = btrfs_cow_block(trans, root, right,
parent, pslot + 1, &right,
BTRFS_NESTING_RIGHT_COW);
if (wret) {
ret = wret;
- goto enospc;
+ goto out;
}
}
@@ -982,21 +1030,34 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (wret < 0 && wret != -ENOSPC)
ret = wret;
if (btrfs_header_nritems(right) == 0) {
- btrfs_clean_tree_block(right);
+ btrfs_clear_buffer_dirty(trans, right);
btrfs_tree_unlock(right);
- del_ptr(root, path, level + 1, pslot + 1);
- root_sub_used(root, right->len);
- btrfs_free_tree_block(trans, root, right, 0, 1);
+ ret = btrfs_del_ptr(trans, root, path, level + 1, pslot + 1);
+ if (ret < 0) {
+ free_extent_buffer_stale(right);
+ right = NULL;
+ goto out;
+ }
+ root_sub_used_bytes(root);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root),
+ right, 0, 1);
free_extent_buffer_stale(right);
right = NULL;
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
} else {
struct btrfs_disk_key right_key;
btrfs_node_key(right, &right_key, 0);
ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
- BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
- BUG_ON(ret < 0);
+ BTRFS_MOD_LOG_KEY_REPLACE);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
btrfs_set_node_key(parent, &right_key, pslot + 1);
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
}
}
if (btrfs_header_nritems(mid) == 1) {
@@ -1009,15 +1070,19 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
* otherwise we would have pulled some pointers from the
* right
*/
- if (!left) {
- ret = -EROFS;
- btrfs_handle_fs_error(fs_info, ret, NULL);
- goto enospc;
+ if (unlikely(!left)) {
+ btrfs_crit(fs_info,
+"missing left child when middle child only has 1 item, parent bytenr %llu level %d mid bytenr %llu root %llu",
+ parent->start, btrfs_header_level(parent),
+ mid->start, btrfs_root_id(root));
+ ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
+ goto out;
}
wret = balance_node_right(trans, mid, left);
if (wret < 0) {
ret = wret;
- goto enospc;
+ goto out;
}
if (wret == 1) {
wret = push_node_left(trans, left, mid, 1);
@@ -1027,32 +1092,45 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
BUG_ON(wret == 1);
}
if (btrfs_header_nritems(mid) == 0) {
- btrfs_clean_tree_block(mid);
+ btrfs_clear_buffer_dirty(trans, mid);
btrfs_tree_unlock(mid);
- del_ptr(root, path, level + 1, pslot);
- root_sub_used(root, mid->len);
- btrfs_free_tree_block(trans, root, mid, 0, 1);
+ ret = btrfs_del_ptr(trans, root, path, level + 1, pslot);
+ if (ret < 0) {
+ free_extent_buffer_stale(mid);
+ mid = NULL;
+ goto out;
+ }
+ root_sub_used_bytes(root);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
free_extent_buffer_stale(mid);
mid = NULL;
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
} else {
/* update the parent key to reflect our changes */
struct btrfs_disk_key mid_key;
btrfs_node_key(mid, &mid_key, 0);
ret = btrfs_tree_mod_log_insert_key(parent, pslot,
- BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
- BUG_ON(ret < 0);
+ BTRFS_MOD_LOG_KEY_REPLACE);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
btrfs_set_node_key(parent, &mid_key, pslot);
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
}
/* update the path */
if (left) {
if (btrfs_header_nritems(left) > orig_slot) {
- atomic_inc(&left->refs);
/* left was locked after cow */
path->nodes[level] = left;
path->slots[level + 1] -= 1;
path->slots[level] = orig_slot;
+ /* Left is now owned by path. */
+ left = NULL;
if (mid) {
btrfs_tree_unlock(mid);
free_extent_buffer(mid);
@@ -1066,14 +1144,13 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (orig_ptr !=
btrfs_node_blockptr(path->nodes[level], path->slots[level]))
BUG();
-enospc:
+out:
if (right) {
btrfs_tree_unlock(right);
free_extent_buffer(right);
}
if (left) {
- if (path->nodes[level] != left)
- btrfs_tree_unlock(left);
+ btrfs_tree_unlock(left);
free_extent_buffer(left);
}
return ret;
@@ -1111,15 +1188,15 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (!parent)
return 1;
- left = btrfs_read_node_slot(parent, pslot - 1);
- if (IS_ERR(left))
- left = NULL;
-
/* first, try to make some room in the middle buffer */
- if (left) {
+ if (pslot) {
u32 left_nr;
- __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
+ left = btrfs_read_node_slot(parent, pslot - 1);
+ if (IS_ERR(left))
+ return PTR_ERR(left);
+
+ btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT);
left_nr = btrfs_header_nritems(left);
if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
@@ -1141,10 +1218,15 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
orig_slot += left_nr;
btrfs_node_key(mid, &disk_key, 0);
ret = btrfs_tree_mod_log_insert_key(parent, pslot,
- BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
- BUG_ON(ret < 0);
+ BTRFS_MOD_LOG_KEY_REPLACE);
+ if (unlikely(ret < 0)) {
+ btrfs_tree_unlock(left);
+ free_extent_buffer(left);
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
btrfs_set_node_key(parent, &disk_key, pslot);
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
if (btrfs_header_nritems(left) > orig_slot) {
path->nodes[level] = left;
path->slots[level + 1] -= 1;
@@ -1163,17 +1245,18 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(left);
free_extent_buffer(left);
}
- right = btrfs_read_node_slot(parent, pslot + 1);
- if (IS_ERR(right))
- right = NULL;
/*
* then try to empty the right most buffer into the middle
*/
- if (right) {
+ if (pslot + 1 < btrfs_header_nritems(parent)) {
u32 right_nr;
- __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
+ right = btrfs_read_node_slot(parent, pslot + 1);
+ if (IS_ERR(right))
+ return PTR_ERR(right);
+
+ btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT);
right_nr = btrfs_header_nritems(right);
if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
@@ -1195,10 +1278,15 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_node_key(right, &disk_key, 0);
ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
- BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
- BUG_ON(ret < 0);
+ BTRFS_MOD_LOG_KEY_REPLACE);
+ if (unlikely(ret < 0)) {
+ btrfs_tree_unlock(right);
+ free_extent_buffer(right);
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
btrfs_set_node_key(parent, &disk_key, pslot + 1);
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
if (btrfs_header_nritems(mid) <= orig_slot) {
path->nodes[level] = right;
@@ -1224,7 +1312,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
* to the block in 'slot', and triggering ra on them.
*/
static void reada_for_search(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
+ const struct btrfs_path *path,
int level, int slot, u64 objectid)
{
struct extent_buffer *node;
@@ -1306,7 +1394,7 @@ static void reada_for_search(struct btrfs_fs_info *fs_info,
}
}
-static noinline void reada_for_balance(struct btrfs_path *path, int level)
+static noinline void reada_for_balance(const struct btrfs_path *path, int level)
{
struct extent_buffer *parent;
int slot;
@@ -1345,33 +1433,34 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
{
int i;
int skip_level = level;
- int no_skips = 0;
- struct extent_buffer *t;
+ bool check_skip = true;
for (i = level; i < BTRFS_MAX_LEVEL; i++) {
if (!path->nodes[i])
break;
if (!path->locks[i])
break;
- if (!no_skips && path->slots[i] == 0) {
- skip_level = i + 1;
- continue;
- }
- if (!no_skips && path->keep_locks) {
- u32 nritems;
- t = path->nodes[i];
- nritems = btrfs_header_nritems(t);
- if (nritems < 1 || path->slots[i] >= nritems - 1) {
+
+ if (check_skip) {
+ if (path->slots[i] == 0) {
skip_level = i + 1;
continue;
}
+
+ if (path->keep_locks) {
+ u32 nritems;
+
+ nritems = btrfs_header_nritems(path->nodes[i]);
+ if (nritems < 1 || path->slots[i] >= nritems - 1) {
+ skip_level = i + 1;
+ continue;
+ }
+ }
}
- if (skip_level < i && i >= lowest_unlock)
- no_skips = 1;
- t = path->nodes[i];
if (i >= lowest_unlock && i > skip_level) {
- btrfs_tree_unlock_rw(t, path->locks[i]);
+ btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
+ check_skip = false;
path->locks[i] = 0;
if (write_lock_level &&
i > min_write_lock_level &&
@@ -1383,93 +1472,160 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
}
/*
- * helper function for btrfs_search_slot. The goal is to find a block
- * in cache without setting the path to blocking. If we find the block
- * we return zero and the path is unchanged.
+ * Helper function for btrfs_search_slot() and other functions that do a search
+ * on a btree. The goal is to find a tree block in the cache (the radix tree at
+ * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read
+ * its pages from disk.
*
- * If we can't find the block, we set the path blocking and do some
- * reada. -EAGAIN is returned and the search must be repeated.
+ * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the
+ * whole btree search, starting again from the current root node.
*/
static int
read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
- struct extent_buffer **eb_ret, int level, int slot,
+ struct extent_buffer **eb_ret, int slot,
const struct btrfs_key *key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_tree_parent_check check = { 0 };
u64 blocknr;
- u64 gen;
- struct extent_buffer *tmp;
- struct btrfs_key first_key;
- int ret;
+ struct extent_buffer *tmp = NULL;
+ int ret = 0;
+ int ret2;
int parent_level;
+ bool read_tmp = false;
+ bool tmp_locked = false;
+ bool path_released = false;
blocknr = btrfs_node_blockptr(*eb_ret, slot);
- gen = btrfs_node_ptr_generation(*eb_ret, slot);
parent_level = btrfs_header_level(*eb_ret);
- btrfs_node_key_to_cpu(*eb_ret, &first_key, slot);
+ btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot);
+ check.has_first_key = true;
+ check.level = parent_level - 1;
+ check.transid = btrfs_node_ptr_generation(*eb_ret, slot);
+ check.owner_root = btrfs_root_id(root);
+ /*
+ * If we need to read an extent buffer from disk and we are holding locks
+ * on upper level nodes, we unlock all the upper nodes before reading the
+ * extent buffer, and then return -EAGAIN to the caller as it needs to
+ * restart the search. We don't release the lock on the current level
+ * because we need to walk this node to figure out which blocks to read.
+ */
tmp = find_extent_buffer(fs_info, blocknr);
if (tmp) {
if (p->reada == READA_FORWARD_ALWAYS)
- reada_for_search(fs_info, p, level, slot, key->objectid);
+ reada_for_search(fs_info, p, parent_level, slot, key->objectid);
/* first we do an atomic uptodate check */
- if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
+ if (btrfs_buffer_uptodate(tmp, check.transid, true) > 0) {
/*
* Do extra check for first_key, eb can be stale due to
* being cached, read from scrub, or have multiple
* parents (shared tree blocks).
*/
- if (btrfs_verify_level_key(tmp,
- parent_level - 1, &first_key, gen)) {
- free_extent_buffer(tmp);
- return -EUCLEAN;
+ if (unlikely(btrfs_verify_level_key(tmp, &check))) {
+ ret = -EUCLEAN;
+ goto out;
}
*eb_ret = tmp;
- return 0;
+ tmp = NULL;
+ ret = 0;
+ goto out;
+ }
+
+ if (p->nowait) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (!p->skip_locking) {
+ btrfs_unlock_up_safe(p, parent_level + 1);
+ btrfs_maybe_reset_lockdep_class(root, tmp);
+ tmp_locked = true;
+ btrfs_tree_read_lock(tmp);
+ btrfs_release_path(p);
+ ret = -EAGAIN;
+ path_released = true;
}
- /* now we're allowed to do a blocking uptodate check */
- ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
- if (!ret) {
+ /* Now we're allowed to do a blocking uptodate check. */
+ ret2 = btrfs_read_extent_buffer(tmp, &check);
+ if (ret2) {
+ ret = ret2;
+ goto out;
+ }
+
+ if (ret == 0) {
+ ASSERT(!tmp_locked);
*eb_ret = tmp;
- return 0;
+ tmp = NULL;
}
- free_extent_buffer(tmp);
- btrfs_release_path(p);
- return -EIO;
+ goto out;
+ } else if (p->nowait) {
+ ret = -EAGAIN;
+ goto out;
}
- /*
- * reduce lock contention at high levels
- * of the btree by dropping locks before
- * we read. Don't release the lock on the current
- * level because we need to walk this node to figure
- * out which blocks to read.
- */
- btrfs_unlock_up_safe(p, level + 1);
+ if (!p->skip_locking) {
+ btrfs_unlock_up_safe(p, parent_level + 1);
+ ret = -EAGAIN;
+ }
if (p->reada != READA_NONE)
- reada_for_search(fs_info, p, level, slot, key->objectid);
+ reada_for_search(fs_info, p, parent_level, slot, key->objectid);
- ret = -EAGAIN;
- tmp = read_tree_block(fs_info, blocknr, root->root_key.objectid,
- gen, parent_level - 1, &first_key);
- if (!IS_ERR(tmp)) {
- /*
- * If the read above didn't mark this buffer up to date,
- * it will never end up being up to date. Set ret to EIO now
- * and give up so that our caller doesn't loop forever
- * on our EAGAINs.
- */
- if (!extent_buffer_uptodate(tmp))
- ret = -EIO;
- free_extent_buffer(tmp);
- } else {
+ tmp = btrfs_find_create_tree_block(fs_info, blocknr, check.owner_root, check.level);
+ if (IS_ERR(tmp)) {
ret = PTR_ERR(tmp);
+ tmp = NULL;
+ goto out;
}
+ read_tmp = true;
+
+ if (!p->skip_locking) {
+ ASSERT(ret == -EAGAIN);
+ btrfs_maybe_reset_lockdep_class(root, tmp);
+ tmp_locked = true;
+ btrfs_tree_read_lock(tmp);
+ btrfs_release_path(p);
+ path_released = true;
+ }
+
+ /* Now we're allowed to do a blocking uptodate check. */
+ ret2 = btrfs_read_extent_buffer(tmp, &check);
+ if (ret2) {
+ ret = ret2;
+ goto out;
+ }
+
+ /*
+ * If the read above didn't mark this buffer up to date,
+ * it will never end up being up to date. Set ret to EIO now
+ * and give up so that our caller doesn't loop forever
+ * on our EAGAINs.
+ */
+ if (unlikely(!extent_buffer_uptodate(tmp))) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (ret == 0) {
+ ASSERT(!tmp_locked);
+ *eb_ret = tmp;
+ tmp = NULL;
+ }
+out:
+ if (tmp) {
+ if (tmp_locked)
+ btrfs_tree_read_unlock(tmp);
+ if (read_tmp && ret && ret != -EAGAIN)
+ free_extent_buffer_stale(tmp);
+ else
+ free_extent_buffer(tmp);
+ }
+ if (ret && !path_released)
+ btrfs_release_path(p);
- btrfs_release_path(p);
return ret;
}
@@ -1567,41 +1723,19 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
struct btrfs_path *p,
int write_lock_level)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *b;
- int root_lock;
+ int root_lock = 0;
int level = 0;
- /* We try very hard to do read locks on the root */
- root_lock = BTRFS_READ_LOCK;
-
if (p->search_commit_root) {
- /*
- * The commit roots are read only so we always do read locks,
- * and we always must hold the commit_root_sem when doing
- * searches on them, the only exception is send where we don't
- * want to block transaction commits for a long time, so
- * we need to clone the commit root in order to avoid races
- * with transaction commits that create a snapshot of one of
- * the roots used by a send operation.
- */
- if (p->need_commit_sem) {
- down_read(&fs_info->commit_root_sem);
- b = btrfs_clone_extent_buffer(root->commit_root);
- up_read(&fs_info->commit_root_sem);
- if (!b)
- return ERR_PTR(-ENOMEM);
-
- } else {
- b = root->commit_root;
- atomic_inc(&b->refs);
- }
+ b = root->commit_root;
+ refcount_inc(&b->refs);
level = btrfs_header_level(b);
/*
* Ensure that all callers have set skip_locking when
- * p->search_commit_root = 1.
+ * p->search_commit_root is true.
*/
- ASSERT(p->skip_locking == 1);
+ ASSERT(p->skip_locking);
goto out;
}
@@ -1612,6 +1746,9 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
goto out;
}
+ /* We try very hard to do read locks on the root */
+ root_lock = BTRFS_READ_LOCK;
+
/*
* If the level is set to maximum, we can skip trying to get the read
* lock.
@@ -1621,7 +1758,13 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
* We don't know the level of the root node until we actually
* have it read locked
*/
- b = btrfs_read_lock_root_node(root);
+ if (p->nowait) {
+ b = btrfs_try_read_lock_root_node(root);
+ if (IS_ERR(b))
+ return b;
+ } else {
+ b = btrfs_read_lock_root_node(root);
+ }
level = btrfs_header_level(b);
if (level > write_lock_level)
goto out;
@@ -1638,6 +1781,17 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
level = btrfs_header_level(b);
out:
+ /*
+ * The root may have failed to write out at some point, and thus is no
+ * longer valid, return an error in this case.
+ */
+ if (unlikely(!extent_buffer_uptodate(b))) {
+ if (root_lock)
+ btrfs_tree_unlock_rw(b, root_lock);
+ free_extent_buffer(b);
+ return ERR_PTR(-EIO);
+ }
+
p->nodes[level] = b;
if (!p->skip_locking)
p->locks[level] = root_lock;
@@ -1647,10 +1801,194 @@ out:
return b;
}
+/*
+ * Replace the extent buffer at the lowest level of the path with a cloned
+ * version. The purpose is to be able to use it safely, after releasing the
+ * commit root semaphore, even if relocation is happening in parallel, the
+ * transaction used for relocation is committed and the extent buffer is
+ * reallocated in the next transaction.
+ *
+ * This is used in a context where the caller does not prevent transaction
+ * commits from happening, either by holding a transaction handle or holding
+ * some lock, while it's doing searches through a commit root.
+ * At the moment it's only used for send operations.
+ */
+static int finish_need_commit_sem_search(struct btrfs_path *path)
+{
+ const int i = path->lowest_level;
+ const int slot = path->slots[i];
+ struct extent_buffer *lowest = path->nodes[i];
+ struct extent_buffer *clone;
+
+ ASSERT(path->need_commit_sem);
+
+ if (!lowest)
+ return 0;
+
+ lockdep_assert_held_read(&lowest->fs_info->commit_root_sem);
+
+ clone = btrfs_clone_extent_buffer(lowest);
+ if (!clone)
+ return -ENOMEM;
+
+ btrfs_release_path(path);
+ path->nodes[i] = clone;
+ path->slots[i] = slot;
+
+ return 0;
+}
+
+static inline int search_for_key_slot(const struct extent_buffer *eb,
+ int search_low_slot,
+ const struct btrfs_key *key,
+ int prev_cmp,
+ int *slot)
+{
+ /*
+ * If a previous call to btrfs_bin_search() on a parent node returned an
+ * exact match (prev_cmp == 0), we can safely assume the target key will
+ * always be at slot 0 on lower levels, since each key pointer
+ * (struct btrfs_key_ptr) refers to the lowest key accessible from the
+ * subtree it points to. Thus we can skip searching lower levels.
+ */
+ if (prev_cmp == 0) {
+ *slot = 0;
+ return 0;
+ }
+
+ return btrfs_bin_search(eb, search_low_slot, key, slot);
+}
+
+static int search_leaf(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ const struct btrfs_key *key,
+ struct btrfs_path *path,
+ int ins_len,
+ int prev_cmp)
+{
+ struct extent_buffer *leaf = path->nodes[0];
+ int leaf_free_space = -1;
+ int search_low_slot = 0;
+ int ret;
+ bool do_bin_search = true;
+
+ /*
+ * If we are doing an insertion, the leaf has enough free space and the
+ * destination slot for the key is not slot 0, then we can unlock our
+ * write lock on the parent, and any other upper nodes, before doing the
+ * binary search on the leaf (with search_for_key_slot()), allowing other
+ * tasks to lock the parent and any other upper nodes.
+ */
+ if (ins_len > 0) {
+ /*
+ * Cache the leaf free space, since we will need it later and it
+ * will not change until then.
+ */
+ leaf_free_space = btrfs_leaf_free_space(leaf);
+
+ /*
+ * !path->locks[1] means we have a single node tree, the leaf is
+ * the root of the tree.
+ */
+ if (path->locks[1] && leaf_free_space >= ins_len) {
+ struct btrfs_disk_key first_key;
+
+ ASSERT(btrfs_header_nritems(leaf) > 0);
+ btrfs_item_key(leaf, &first_key, 0);
+
+ /*
+ * Doing the extra comparison with the first key is cheap,
+ * taking into account that the first key is very likely
+ * already in a cache line because it immediately follows
+ * the extent buffer's header and we have recently accessed
+ * the header's level field.
+ */
+ ret = btrfs_comp_keys(&first_key, key);
+ if (ret < 0) {
+ /*
+ * The first key is smaller than the key we want
+ * to insert, so we are safe to unlock all upper
+ * nodes and we have to do the binary search.
+ *
+ * We do use btrfs_unlock_up_safe() and not
+ * unlock_up() because the later does not unlock
+ * nodes with a slot of 0 - we can safely unlock
+ * any node even if its slot is 0 since in this
+ * case the key does not end up at slot 0 of the
+ * leaf and there's no need to split the leaf.
+ */
+ btrfs_unlock_up_safe(path, 1);
+ search_low_slot = 1;
+ } else {
+ /*
+ * The first key is >= then the key we want to
+ * insert, so we can skip the binary search as
+ * the target key will be at slot 0.
+ *
+ * We can not unlock upper nodes when the key is
+ * less than the first key, because we will need
+ * to update the key at slot 0 of the parent node
+ * and possibly of other upper nodes too.
+ * If the key matches the first key, then we can
+ * unlock all the upper nodes, using
+ * btrfs_unlock_up_safe() instead of unlock_up()
+ * as stated above.
+ */
+ if (ret == 0)
+ btrfs_unlock_up_safe(path, 1);
+ /*
+ * ret is already 0 or 1, matching the result of
+ * a btrfs_bin_search() call, so there is no need
+ * to adjust it.
+ */
+ do_bin_search = false;
+ path->slots[0] = 0;
+ }
+ }
+ }
+
+ if (do_bin_search) {
+ ret = search_for_key_slot(leaf, search_low_slot, key,
+ prev_cmp, &path->slots[0]);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ins_len > 0) {
+ /*
+ * Item key already exists. In this case, if we are allowed to
+ * insert the item (for example, in dir_item case, item key
+ * collision is allowed), it will be merged with the original
+ * item. Only the item size grows, no new btrfs item will be
+ * added. If search_for_extension is not set, ins_len already
+ * accounts the size btrfs_item, deduct it here so leaf space
+ * check will be correct.
+ */
+ if (ret == 0 && !path->search_for_extension) {
+ ASSERT(ins_len >= sizeof(struct btrfs_item));
+ ins_len -= sizeof(struct btrfs_item);
+ }
+
+ ASSERT(leaf_free_space >= 0);
+
+ if (leaf_free_space < ins_len) {
+ int ret2;
+
+ ret2 = split_leaf(trans, root, key, path, ins_len, (ret == 0));
+ ASSERT(ret2 <= 0);
+ if (WARN_ON(ret2 > 0))
+ ret2 = -EUCLEAN;
+ if (ret2)
+ ret = ret2;
+ }
+ }
+
+ return ret;
+}
/*
- * btrfs_search_slot - look for a key in a tree and perform necessary
- * modifications to preserve tree invariants.
+ * Look for a key in a tree and perform necessary modifications to preserve
+ * tree invariants.
*
* @trans: Handle of transaction, used when modifying the tree
* @p: Holds all btree nodes along the search path
@@ -1683,10 +2021,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
const struct btrfs_key *key, struct btrfs_path *p,
int ins_len, int cow)
{
+ struct btrfs_fs_info *fs_info;
struct extent_buffer *b;
int slot;
int ret;
- int err;
int level;
int lowest_unlock = 1;
/* everything at write_lock_level or lower must be write locked */
@@ -1695,11 +2033,24 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int min_write_lock_level;
int prev_cmp;
+ if (!root)
+ return -EINVAL;
+
+ fs_info = root->fs_info;
+ might_sleep();
+
lowest_level = p->lowest_level;
WARN_ON(lowest_level && ins_len > 0);
WARN_ON(p->nodes[0] != NULL);
BUG_ON(!cow && ins_len);
+ /*
+ * For now only allow nowait for read only operations. There's no
+ * strict reason why we can't, we just only need it for reads so it's
+ * only implemented for reads.
+ */
+ ASSERT(!p->nowait || !cow);
+
if (ins_len < 0) {
lowest_unlock = 2;
@@ -1724,6 +2075,16 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
min_write_lock_level = write_lock_level;
+ if (p->need_commit_sem) {
+ ASSERT(p->search_commit_root);
+ if (p->nowait) {
+ if (!down_read_trylock(&fs_info->commit_root_sem))
+ return -EAGAIN;
+ } else {
+ down_read(&fs_info->commit_root_sem);
+ }
+ }
+
again:
prev_cmp = -1;
b = btrfs_search_slot_get_root(root, p, write_lock_level);
@@ -1734,6 +2095,7 @@ again:
while (b) {
int dec = 0;
+ int ret2;
level = btrfs_header_level(b);
@@ -1762,25 +2124,20 @@ again:
}
if (last_level)
- err = btrfs_cow_block(trans, root, b, NULL, 0,
- &b,
- BTRFS_NESTING_COW);
+ ret2 = btrfs_cow_block(trans, root, b, NULL, 0,
+ &b, BTRFS_NESTING_COW);
else
- err = btrfs_cow_block(trans, root, b,
- p->nodes[level + 1],
- p->slots[level + 1], &b,
- BTRFS_NESTING_COW);
- if (err) {
- ret = err;
+ ret2 = btrfs_cow_block(trans, root, b,
+ p->nodes[level + 1],
+ p->slots[level + 1], &b,
+ BTRFS_NESTING_COW);
+ if (ret2) {
+ ret = ret2;
goto done;
}
}
cow_done:
p->nodes[level] = b;
- /*
- * Leave path with blocking locks to avoid massive
- * lock context switch, this is made on purpose.
- */
/*
* we have a lock on b and as long as we aren't changing
@@ -1802,73 +2159,33 @@ cow_done:
}
}
- /*
- * If btrfs_bin_search returns an exact match (prev_cmp == 0)
- * we can safely assume the target key will always be in slot 0
- * on lower levels due to the invariants BTRFS' btree provides,
- * namely that a btrfs_key_ptr entry always points to the
- * lowest key in the child node, thus we can skip searching
- * lower levels
- */
- if (prev_cmp == 0) {
- slot = 0;
- ret = 0;
- } else {
- ret = btrfs_bin_search(b, key, &slot);
- prev_cmp = ret;
- if (ret < 0)
- goto done;
- }
-
if (level == 0) {
- p->slots[level] = slot;
- /*
- * Item key already exists. In this case, if we are
- * allowed to insert the item (for example, in dir_item
- * case, item key collision is allowed), it will be
- * merged with the original item. Only the item size
- * grows, no new btrfs item will be added. If
- * search_for_extension is not set, ins_len already
- * accounts the size btrfs_item, deduct it here so leaf
- * space check will be correct.
- */
- if (ret == 0 && ins_len > 0 && !p->search_for_extension) {
- ASSERT(ins_len >= sizeof(struct btrfs_item));
- ins_len -= sizeof(struct btrfs_item);
- }
- if (ins_len > 0 &&
- btrfs_leaf_free_space(b) < ins_len) {
- if (write_lock_level < 1) {
- write_lock_level = 1;
- btrfs_release_path(p);
- goto again;
- }
-
- err = split_leaf(trans, root, key,
- p, ins_len, ret == 0);
+ if (ins_len > 0)
+ ASSERT(write_lock_level >= 1);
- BUG_ON(err > 0);
- if (err) {
- ret = err;
- goto done;
- }
- }
+ ret = search_leaf(trans, root, key, p, ins_len, prev_cmp);
if (!p->search_for_split)
unlock_up(p, level, lowest_unlock,
min_write_lock_level, NULL);
goto done;
}
+
+ ret = search_for_key_slot(b, 0, key, prev_cmp, &slot);
+ if (ret < 0)
+ goto done;
+ prev_cmp = ret;
+
if (ret && slot > 0) {
dec = 1;
slot--;
}
p->slots[level] = slot;
- err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
- &write_lock_level);
- if (err == -EAGAIN)
+ ret2 = setup_nodes_for_search(trans, root, p, b, level, ins_len,
+ &write_lock_level);
+ if (ret2 == -EAGAIN)
goto again;
- if (err) {
- ret = err;
+ if (ret2) {
+ ret = ret2;
goto done;
}
b = p->nodes[level];
@@ -1894,21 +2211,32 @@ cow_done:
goto done;
}
- err = read_block_for_search(root, p, &b, level, slot, key);
- if (err == -EAGAIN)
+ ret2 = read_block_for_search(root, p, &b, slot, key);
+ if (ret2 == -EAGAIN && !p->nowait)
goto again;
- if (err) {
- ret = err;
+ if (ret2) {
+ ret = ret2;
goto done;
}
if (!p->skip_locking) {
level = btrfs_header_level(b);
+
+ btrfs_maybe_reset_lockdep_class(root, b);
+
if (level <= write_lock_level) {
btrfs_tree_lock(b);
p->locks[level] = BTRFS_WRITE_LOCK;
} else {
- btrfs_tree_read_lock(b);
+ if (p->nowait) {
+ if (!btrfs_try_tree_read_lock(b)) {
+ free_extent_buffer(b);
+ ret = -EAGAIN;
+ goto done;
+ }
+ } else {
+ btrfs_tree_read_lock(b);
+ }
p->locks[level] = BTRFS_READ_LOCK;
}
p->nodes[level] = b;
@@ -1918,6 +2246,16 @@ cow_done:
done:
if (ret < 0 && !p->skip_release_on_error)
btrfs_release_path(p);
+
+ if (p->need_commit_sem) {
+ int ret2;
+
+ ret2 = finish_need_commit_sem_search(p);
+ up_read(&fs_info->commit_root_sem);
+ if (ret2)
+ ret = ret2;
+ }
+
return ret;
}
ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
@@ -1940,13 +2278,13 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
struct extent_buffer *b;
int slot;
int ret;
- int err;
int level;
int lowest_unlock = 1;
u8 lowest_level = 0;
lowest_level = p->lowest_level;
WARN_ON(p->nodes[0] != NULL);
+ ASSERT(!p->nowait);
if (p->search_commit_root) {
BUG_ON(time_seq);
@@ -1955,7 +2293,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
again:
b = btrfs_get_old_root(root, time_seq);
- if (!b) {
+ if (unlikely(!b)) {
ret = -EIO;
goto done;
}
@@ -1964,6 +2302,7 @@ again:
while (b) {
int dec = 0;
+ int ret2;
level = btrfs_header_level(b);
p->nodes[level] = b;
@@ -1976,7 +2315,7 @@ again:
*/
btrfs_unlock_up_safe(p, level + 1);
- ret = btrfs_bin_search(b, key, &slot);
+ ret = btrfs_bin_search(b, 0, key, &slot);
if (ret < 0)
goto done;
@@ -1999,17 +2338,17 @@ again:
goto done;
}
- err = read_block_for_search(root, p, &b, level, slot, key);
- if (err == -EAGAIN)
+ ret2 = read_block_for_search(root, p, &b, slot, key);
+ if (ret2 == -EAGAIN && !p->nowait)
goto again;
- if (err) {
- ret = err;
+ if (ret2) {
+ ret = ret2;
goto done;
}
level = btrfs_header_level(b);
btrfs_tree_read_lock(b);
- b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
+ b = btrfs_tree_mod_log_rewind(fs_info, b, time_seq);
if (!b) {
ret = -ENOMEM;
goto done;
@@ -2026,6 +2365,87 @@ done:
}
/*
+ * Search the tree again to find a leaf with smaller keys.
+ * Returns 0 if it found something.
+ * Returns 1 if there are no smaller keys.
+ * Returns < 0 on error.
+ *
+ * This may release the path, and so you may lose any locks held at the
+ * time you call it.
+ */
+static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
+{
+ struct btrfs_key key;
+ struct btrfs_key orig_key;
+ struct btrfs_disk_key found_key;
+ int ret;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
+ orig_key = key;
+
+ if (key.offset > 0) {
+ key.offset--;
+ } else if (key.type > 0) {
+ key.type--;
+ key.offset = (u64)-1;
+ } else if (key.objectid > 0) {
+ key.objectid--;
+ key.type = (u8)-1;
+ key.offset = (u64)-1;
+ } else {
+ return 1;
+ }
+
+ btrfs_release_path(path);
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret <= 0)
+ return ret;
+
+ /*
+ * Previous key not found. Even if we were at slot 0 of the leaf we had
+ * before releasing the path and calling btrfs_search_slot(), we now may
+ * be in a slot pointing to the same original key - this can happen if
+ * after we released the path, one of more items were moved from a
+ * sibling leaf into the front of the leaf we had due to an insertion
+ * (see push_leaf_right()).
+ * If we hit this case and our slot is > 0 and just decrement the slot
+ * so that the caller does not process the same key again, which may or
+ * may not break the caller, depending on its logic.
+ */
+ if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
+ btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
+ ret = btrfs_comp_keys(&found_key, &orig_key);
+ if (ret == 0) {
+ if (path->slots[0] > 0) {
+ path->slots[0]--;
+ return 0;
+ }
+ /*
+ * At slot 0, same key as before, it means orig_key is
+ * the lowest, leftmost, key in the tree. We're done.
+ */
+ return 1;
+ }
+ }
+
+ btrfs_item_key(path->nodes[0], &found_key, 0);
+ ret = btrfs_comp_keys(&found_key, &key);
+ /*
+ * We might have had an item with the previous key in the tree right
+ * before we released our path. And after we released our path, that
+ * item might have been pushed to the first slot (0) of the leaf we
+ * were holding due to a tree balance. Alternatively, an item with the
+ * previous key can exist as the only element of a leaf (big fat item).
+ * Therefore account for these 2 cases, so that our callers (like
+ * btrfs_previous_item) don't miss an existing item with a key matching
+ * the previous key we computed above.
+ */
+ if (ret <= 0)
+ return 0;
+ return 1;
+}
+
+/*
* helper to use instead of search slot if no exact match is needed but
* instead the next or previous item should be returned.
* When find_higher is true, the next higher item is returned, the next lower
@@ -2124,6 +2544,32 @@ int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
}
/*
+ * Search for a valid slot for the given path.
+ *
+ * @root: The root node of the tree.
+ * @key: Will contain a valid item if found.
+ * @path: The starting point to validate the slot.
+ *
+ * Return: 0 if the item is valid
+ * 1 if not found
+ * <0 if error.
+ */
+int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
+ struct btrfs_path *path)
+{
+ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
+ int ret;
+
+ ret = btrfs_next_leaf(root, path);
+ if (ret)
+ return ret;
+ }
+
+ btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
+ return 0;
+}
+
+/*
* adjust the pointers going up the tree, starting at level
* making sure the right key of each node is points to 'key'.
* This is used after shifting pointers to the left, so it stops
@@ -2131,8 +2577,9 @@ int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
* higher levels
*
*/
-static void fixup_low_keys(struct btrfs_path *path,
- struct btrfs_disk_key *key, int level)
+static void fixup_low_keys(struct btrfs_trans_handle *trans,
+ const struct btrfs_path *path,
+ const struct btrfs_disk_key *key, int level)
{
int i;
struct extent_buffer *t;
@@ -2145,10 +2592,10 @@ static void fixup_low_keys(struct btrfs_path *path,
break;
t = path->nodes[i];
ret = btrfs_tree_mod_log_insert_key(t, tslot,
- BTRFS_MOD_LOG_KEY_REPLACE, GFP_ATOMIC);
+ BTRFS_MOD_LOG_KEY_REPLACE);
BUG_ON(ret < 0);
btrfs_set_node_key(t, key, tslot);
- btrfs_mark_buffer_dirty(path->nodes[i]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[i]);
if (tslot != 0)
break;
}
@@ -2160,10 +2607,11 @@ static void fixup_low_keys(struct btrfs_path *path,
* This function isn't completely safe. It's the caller's responsibility
* that the new key won't break the order
*/
-void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
+void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+ const struct btrfs_path *path,
const struct btrfs_key *new_key)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_disk_key disk_key;
struct extent_buffer *eb;
int slot;
@@ -2172,38 +2620,36 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
slot = path->slots[0];
if (slot > 0) {
btrfs_item_key(eb, &disk_key, slot - 1);
- if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
+ if (unlikely(btrfs_comp_keys(&disk_key, new_key) >= 0)) {
+ btrfs_print_leaf(eb);
btrfs_crit(fs_info,
- "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
+ "slot %u key " BTRFS_KEY_FMT " new key " BTRFS_KEY_FMT,
slot, btrfs_disk_key_objectid(&disk_key),
btrfs_disk_key_type(&disk_key),
btrfs_disk_key_offset(&disk_key),
- new_key->objectid, new_key->type,
- new_key->offset);
- btrfs_print_leaf(eb);
+ BTRFS_KEY_FMT_VALUE(new_key));
BUG();
}
}
if (slot < btrfs_header_nritems(eb) - 1) {
btrfs_item_key(eb, &disk_key, slot + 1);
- if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
+ if (unlikely(btrfs_comp_keys(&disk_key, new_key) <= 0)) {
+ btrfs_print_leaf(eb);
btrfs_crit(fs_info,
- "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
+ "slot %u key " BTRFS_KEY_FMT " new key " BTRFS_KEY_FMT,
slot, btrfs_disk_key_objectid(&disk_key),
btrfs_disk_key_type(&disk_key),
btrfs_disk_key_offset(&disk_key),
- new_key->objectid, new_key->type,
- new_key->offset);
- btrfs_print_leaf(eb);
+ BTRFS_KEY_FMT_VALUE(new_key));
BUG();
}
}
btrfs_cpu_key_to_disk(&disk_key, new_key);
btrfs_set_item_key(eb, &disk_key, slot);
- btrfs_mark_buffer_dirty(eb);
+ btrfs_mark_buffer_dirty(trans, eb);
if (slot == 0)
- fixup_low_keys(path, &disk_key, 1);
+ fixup_low_keys(trans, path, &disk_key, 1);
}
/*
@@ -2226,8 +2672,8 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
* is correct, we only need to bother the last key of @left and the first
* key of @right.
*/
-static bool check_sibling_keys(struct extent_buffer *left,
- struct extent_buffer *right)
+static bool check_sibling_keys(const struct extent_buffer *left,
+ const struct extent_buffer *right)
{
struct btrfs_key left_last;
struct btrfs_key right_first;
@@ -2247,12 +2693,15 @@ static bool check_sibling_keys(struct extent_buffer *left,
btrfs_item_key_to_cpu(right, &right_first, 0);
}
- if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) {
+ if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) {
+ btrfs_crit(left->fs_info, "left extent buffer:");
+ btrfs_print_tree(left, false);
+ btrfs_crit(left->fs_info, "right extent buffer:");
+ btrfs_print_tree(right, false);
btrfs_crit(left->fs_info,
-"bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
- left_last.objectid, left_last.type,
- left_last.offset, right_first.objectid,
- right_first.type, right_first.offset);
+"bad key order, sibling blocks, left last " BTRFS_KEY_FMT " right first " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(&left_last),
+ BTRFS_KEY_FMT_VALUE(&right_first));
return true;
}
return false;
@@ -2267,7 +2716,7 @@ static bool check_sibling_keys(struct extent_buffer *left,
*/
static int push_node_left(struct btrfs_trans_handle *trans,
struct extent_buffer *dst,
- struct extent_buffer *src, int empty)
+ struct extent_buffer *src, bool empty)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
int push_items = 0;
@@ -2303,35 +2752,35 @@ static int push_node_left(struct btrfs_trans_handle *trans,
push_items = min(src_nritems - 8, push_items);
/* dst is the left eb, src is the middle eb */
- if (check_sibling_keys(dst, src)) {
+ if (unlikely(check_sibling_keys(dst, src))) {
ret = -EUCLEAN;
btrfs_abort_transaction(trans, ret);
return ret;
}
ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
copy_extent_buffer(dst, src,
- btrfs_node_key_ptr_offset(dst_nritems),
- btrfs_node_key_ptr_offset(0),
+ btrfs_node_key_ptr_offset(dst, dst_nritems),
+ btrfs_node_key_ptr_offset(src, 0),
push_items * sizeof(struct btrfs_key_ptr));
if (push_items < src_nritems) {
/*
- * Don't call btrfs_tree_mod_log_insert_move() here, key removal
- * was already fully logged by btrfs_tree_mod_log_eb_copy() above.
+ * btrfs_tree_mod_log_eb_copy handles logging the move, so we
+ * don't need to do an explicit tree mod log operation for it.
*/
- memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
- btrfs_node_key_ptr_offset(push_items),
+ memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0),
+ btrfs_node_key_ptr_offset(src, push_items),
(src_nritems - push_items) *
sizeof(struct btrfs_key_ptr));
}
btrfs_set_header_nritems(src, src_nritems - push_items);
btrfs_set_header_nritems(dst, dst_nritems + push_items);
- btrfs_mark_buffer_dirty(src);
- btrfs_mark_buffer_dirty(dst);
+ btrfs_mark_buffer_dirty(trans, src);
+ btrfs_mark_buffer_dirty(trans, dst);
return ret;
}
@@ -2377,34 +2826,37 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
push_items = max_push;
/* dst is the right eb, src is the middle eb */
- if (check_sibling_keys(src, dst)) {
+ if (unlikely(check_sibling_keys(src, dst))) {
ret = -EUCLEAN;
btrfs_abort_transaction(trans, ret);
return ret;
}
- ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
- BUG_ON(ret < 0);
- memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
- btrfs_node_key_ptr_offset(0),
+
+ /*
+ * btrfs_tree_mod_log_eb_copy handles logging the move, so we don't
+ * need to do an explicit tree mod log operation for it.
+ */
+ memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items),
+ btrfs_node_key_ptr_offset(dst, 0),
(dst_nritems) *
sizeof(struct btrfs_key_ptr));
ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
push_items);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
copy_extent_buffer(dst, src,
- btrfs_node_key_ptr_offset(0),
- btrfs_node_key_ptr_offset(src_nritems - push_items),
+ btrfs_node_key_ptr_offset(dst, 0),
+ btrfs_node_key_ptr_offset(src, src_nritems - push_items),
push_items * sizeof(struct btrfs_key_ptr));
btrfs_set_header_nritems(src, src_nritems - push_items);
btrfs_set_header_nritems(dst, dst_nritems + push_items);
- btrfs_mark_buffer_dirty(src);
- btrfs_mark_buffer_dirty(dst);
+ btrfs_mark_buffer_dirty(trans, src);
+ btrfs_mark_buffer_dirty(trans, dst);
return ret;
}
@@ -2420,7 +2872,6 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, int level)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 lower_gen;
struct extent_buffer *lower;
struct extent_buffer *c;
@@ -2437,13 +2888,13 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
else
btrfs_node_key(lower, &lower_key, 0);
- c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ c = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root),
&lower_key, level, root->node->start, 0,
- BTRFS_NESTING_NEW_ROOT);
+ 0, BTRFS_NESTING_NEW_ROOT);
if (IS_ERR(c))
return PTR_ERR(c);
- root_add_used(root, fs_info->nodesize);
+ root_add_used_bytes(root);
btrfs_set_header_nritems(c, 1);
btrfs_set_node_key(c, &lower_key, 0);
@@ -2453,18 +2904,28 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
btrfs_set_node_ptr_generation(c, 0, lower_gen);
- btrfs_mark_buffer_dirty(c);
+ btrfs_mark_buffer_dirty(trans, c);
old = root->node;
ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
- BUG_ON(ret < 0);
+ if (ret < 0) {
+ int ret2;
+
+ btrfs_clear_buffer_dirty(trans, c);
+ ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1);
+ if (unlikely(ret2 < 0))
+ btrfs_abort_transaction(trans, ret2);
+ btrfs_tree_unlock(c);
+ free_extent_buffer(c);
+ return ret;
+ }
rcu_assign_pointer(root->node, c);
/* the super has an extra ref to root->node */
free_extent_buffer(old);
add_root_to_dirty_list(root);
- atomic_inc(&c->refs);
+ refcount_inc(&c->refs);
path->nodes[level] = c;
path->locks[level] = BTRFS_WRITE_LOCK;
path->slots[level] = 0;
@@ -2478,10 +2939,10 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
* slot and level indicate where you want the key to go, and
* blocknr is the block the key points to.
*/
-static void insert_ptr(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- struct btrfs_disk_key *key, u64 bytenr,
- int slot, int level)
+static int insert_ptr(struct btrfs_trans_handle *trans,
+ const struct btrfs_path *path,
+ const struct btrfs_disk_key *key, u64 bytenr,
+ int slot, int level)
{
struct extent_buffer *lower;
int nritems;
@@ -2497,24 +2958,32 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
if (level) {
ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
slot, nritems - slot);
- BUG_ON(ret < 0);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
}
memmove_extent_buffer(lower,
- btrfs_node_key_ptr_offset(slot + 1),
- btrfs_node_key_ptr_offset(slot),
+ btrfs_node_key_ptr_offset(lower, slot + 1),
+ btrfs_node_key_ptr_offset(lower, slot),
(nritems - slot) * sizeof(struct btrfs_key_ptr));
}
if (level) {
ret = btrfs_tree_mod_log_insert_key(lower, slot,
- BTRFS_MOD_LOG_KEY_ADD, GFP_NOFS);
- BUG_ON(ret < 0);
+ BTRFS_MOD_LOG_KEY_ADD);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
}
btrfs_set_node_key(lower, key, slot);
btrfs_set_node_blockptr(lower, slot, bytenr);
WARN_ON(trans->transid == 0);
btrfs_set_node_ptr_generation(lower, slot, trans->transid);
btrfs_set_header_nritems(lower, nritems + 1);
- btrfs_mark_buffer_dirty(lower);
+ btrfs_mark_buffer_dirty(trans, lower);
+
+ return 0;
}
/*
@@ -2568,32 +3037,39 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
mid = (c_nritems + 1) / 2;
btrfs_node_key(c, &disk_key, mid);
- split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ split = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root),
&disk_key, level, c->start, 0,
- BTRFS_NESTING_SPLIT);
+ 0, BTRFS_NESTING_SPLIT);
if (IS_ERR(split))
return PTR_ERR(split);
- root_add_used(root, fs_info->nodesize);
+ root_add_used_bytes(root);
ASSERT(btrfs_header_level(c) == level);
ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
- if (ret) {
+ if (unlikely(ret)) {
+ btrfs_tree_unlock(split);
+ free_extent_buffer(split);
btrfs_abort_transaction(trans, ret);
return ret;
}
copy_extent_buffer(split, c,
- btrfs_node_key_ptr_offset(0),
- btrfs_node_key_ptr_offset(mid),
+ btrfs_node_key_ptr_offset(split, 0),
+ btrfs_node_key_ptr_offset(c, mid),
(c_nritems - mid) * sizeof(struct btrfs_key_ptr));
btrfs_set_header_nritems(split, c_nritems - mid);
btrfs_set_header_nritems(c, mid);
- btrfs_mark_buffer_dirty(c);
- btrfs_mark_buffer_dirty(split);
+ btrfs_mark_buffer_dirty(trans, c);
+ btrfs_mark_buffer_dirty(trans, split);
- insert_ptr(trans, path, &disk_key, split->start,
- path->slots[level + 1] + 1, level + 1);
+ ret = insert_ptr(trans, path, &disk_key, split->start,
+ path->slots[level + 1] + 1, level + 1);
+ if (ret < 0) {
+ btrfs_tree_unlock(split);
+ free_extent_buffer(split);
+ return ret;
+ }
if (path->slots[level] >= mid) {
path->slots[level] -= mid;
@@ -2613,21 +3089,16 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
* and nr indicate which items in the leaf to check. This totals up the
* space used both by the item structs and the item data
*/
-static int leaf_space_used(struct extent_buffer *l, int start, int nr)
+static int leaf_space_used(const struct extent_buffer *l, int start, int nr)
{
- struct btrfs_item *start_item;
- struct btrfs_item *end_item;
int data_len;
int nritems = btrfs_header_nritems(l);
int end = min(nritems, start + nr) - 1;
if (!nr)
return 0;
- start_item = btrfs_item_nr(start);
- end_item = btrfs_item_nr(end);
- data_len = btrfs_item_offset(l, start_item) +
- btrfs_item_size(l, start_item);
- data_len = data_len - btrfs_item_offset(l, end_item);
+ data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start);
+ data_len = data_len - btrfs_item_offset(l, end);
data_len += sizeof(struct btrfs_item) * nr;
WARN_ON(data_len < 0);
return data_len;
@@ -2638,14 +3109,14 @@ static int leaf_space_used(struct extent_buffer *l, int start, int nr)
* the start of the leaf data. IOW, how much room
* the leaf has left for both items and data
*/
-noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
+int btrfs_leaf_free_space(const struct extent_buffer *leaf)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
int nritems = btrfs_header_nritems(leaf);
int ret;
ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_crit(fs_info,
"leaf free space ret %d, leaf data size %lu, used %d nritems %d",
ret,
@@ -2659,8 +3130,9 @@ noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
* min slot controls the lowest index we're willing to push to the
* right. We'll push up to and including min_slot, but no lower
*/
-static noinline int __push_leaf_right(struct btrfs_path *path,
- int data_size, int empty,
+static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ int data_size, bool empty,
struct extent_buffer *right,
int free_space, u32 left_nritems,
u32 min_slot)
@@ -2668,13 +3140,11 @@ static noinline int __push_leaf_right(struct btrfs_path *path,
struct btrfs_fs_info *fs_info = right->fs_info;
struct extent_buffer *left = path->nodes[0];
struct extent_buffer *upper = path->nodes[1];
- struct btrfs_map_token token;
struct btrfs_disk_key disk_key;
int slot;
u32 i;
int push_space = 0;
int push_items = 0;
- struct btrfs_item *item;
u32 nr;
u32 right_nritems;
u32 data_end;
@@ -2691,8 +3161,6 @@ static noinline int __push_leaf_right(struct btrfs_path *path,
slot = path->slots[1];
i = left_nritems - 1;
while (i >= nr) {
- item = btrfs_item_nr(i);
-
if (!empty && push_items > 0) {
if (path->slots[0] > i)
break;
@@ -2707,12 +3175,13 @@ static noinline int __push_leaf_right(struct btrfs_path *path,
if (path->slots[0] == i)
push_space += data_size;
- this_item_size = btrfs_item_size(left, item);
- if (this_item_size + sizeof(*item) + push_space > free_space)
+ this_item_size = btrfs_item_size(left, i);
+ if (this_item_size + sizeof(struct btrfs_item) +
+ push_space > free_space)
break;
push_items++;
- push_space += this_item_size + sizeof(*item);
+ push_space += this_item_size + sizeof(struct btrfs_item);
if (i == 0)
break;
i--;
@@ -2726,63 +3195,51 @@ static noinline int __push_leaf_right(struct btrfs_path *path,
/* push left to right */
right_nritems = btrfs_header_nritems(right);
- push_space = btrfs_item_end_nr(left, left_nritems - push_items);
+ push_space = btrfs_item_data_end(left, left_nritems - push_items);
push_space -= leaf_data_end(left);
/* make room in the right data area */
data_end = leaf_data_end(right);
- memmove_extent_buffer(right,
- BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
- BTRFS_LEAF_DATA_OFFSET + data_end,
- BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
+ memmove_leaf_data(right, data_end - push_space, data_end,
+ BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
/* copy from the left data area */
- copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
- BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
- BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
- push_space);
+ copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
+ leaf_data_end(left), push_space);
- memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
- btrfs_item_nr_offset(0),
- right_nritems * sizeof(struct btrfs_item));
+ memmove_leaf_items(right, push_items, 0, right_nritems);
/* copy the items from left to right */
- copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
- btrfs_item_nr_offset(left_nritems - push_items),
- push_items * sizeof(struct btrfs_item));
+ copy_leaf_items(right, left, 0, left_nritems - push_items, push_items);
/* update the item pointers */
- btrfs_init_map_token(&token, right);
right_nritems += push_items;
btrfs_set_header_nritems(right, right_nritems);
push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
for (i = 0; i < right_nritems; i++) {
- item = btrfs_item_nr(i);
- push_space -= btrfs_token_item_size(&token, item);
- btrfs_set_token_item_offset(&token, item, push_space);
+ push_space -= btrfs_item_size(right, i);
+ btrfs_set_item_offset(right, i, push_space);
}
left_nritems -= push_items;
btrfs_set_header_nritems(left, left_nritems);
if (left_nritems)
- btrfs_mark_buffer_dirty(left);
+ btrfs_mark_buffer_dirty(trans, left);
else
- btrfs_clean_tree_block(left);
+ btrfs_clear_buffer_dirty(trans, left);
- btrfs_mark_buffer_dirty(right);
+ btrfs_mark_buffer_dirty(trans, right);
btrfs_item_key(right, &disk_key, 0);
btrfs_set_node_key(upper, &disk_key, slot + 1);
- btrfs_mark_buffer_dirty(upper);
+ btrfs_mark_buffer_dirty(trans, upper);
/* then fixup the leaf pointer in the path */
if (path->slots[0] >= left_nritems) {
path->slots[0] -= left_nritems;
- if (btrfs_header_nritems(path->nodes[0]) == 0)
- btrfs_clean_tree_block(path->nodes[0]);
- btrfs_tree_unlock(path->nodes[0]);
- free_extent_buffer(path->nodes[0]);
+ btrfs_tree_unlock(left);
+ free_extent_buffer(left);
path->nodes[0] = right;
path->slots[1] += 1;
} else {
@@ -2810,7 +3267,7 @@ out_unlock:
static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_path *path,
int min_data_size, int data_size,
- int empty, u32 min_slot)
+ bool empty, u32 min_slot)
{
struct extent_buffer *left = path->nodes[0];
struct extent_buffer *right;
@@ -2831,35 +3288,27 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_write_locked(path->nodes[1]);
right = btrfs_read_node_slot(upper, slot + 1);
- /*
- * slot + 1 is not valid or we fail to read the right node,
- * no big deal, just return.
- */
if (IS_ERR(right))
- return 1;
+ return PTR_ERR(right);
- __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
+ btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT);
free_space = btrfs_leaf_free_space(right);
if (free_space < data_size)
goto out_unlock;
- /* cow and double check */
ret = btrfs_cow_block(trans, root, right, upper,
slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
if (ret)
goto out_unlock;
- free_space = btrfs_leaf_free_space(right);
- if (free_space < data_size)
- goto out_unlock;
-
left_nritems = btrfs_header_nritems(left);
if (left_nritems == 0)
goto out_unlock;
- if (check_sibling_keys(left, right)) {
+ if (unlikely(check_sibling_keys(left, right))) {
ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
btrfs_tree_unlock(right);
free_extent_buffer(right);
return ret;
@@ -2877,8 +3326,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
return 0;
}
- return __push_leaf_right(path, min_data_size, empty,
- right, free_space, left_nritems, min_slot);
+ return __push_leaf_right(trans, path, min_data_size, empty, right,
+ free_space, left_nritems, min_slot);
out_unlock:
btrfs_tree_unlock(right);
free_extent_buffer(right);
@@ -2893,8 +3342,9 @@ out_unlock:
* item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
* items
*/
-static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
- int empty, struct extent_buffer *left,
+static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path, int data_size,
+ bool empty, struct extent_buffer *left,
int free_space, u32 right_nritems,
u32 max_slot)
{
@@ -2904,13 +3354,11 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
int i;
int push_space = 0;
int push_items = 0;
- struct btrfs_item *item;
u32 old_left_nritems;
u32 nr;
int ret = 0;
u32 this_item_size;
u32 old_left_item_size;
- struct btrfs_map_token token;
if (empty)
nr = min(right_nritems, max_slot);
@@ -2918,8 +3366,6 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
nr = min(right_nritems - 1, max_slot);
for (i = 0; i < nr; i++) {
- item = btrfs_item_nr(i);
-
if (!empty && push_items > 0) {
if (path->slots[0] < i)
break;
@@ -2934,12 +3380,13 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
if (path->slots[0] == i)
push_space += data_size;
- this_item_size = btrfs_item_size(right, item);
- if (this_item_size + sizeof(*item) + push_space > free_space)
+ this_item_size = btrfs_item_size(right, i);
+ if (this_item_size + sizeof(struct btrfs_item) + push_space >
+ free_space)
break;
push_items++;
- push_space += this_item_size + sizeof(*item);
+ push_space += this_item_size + sizeof(struct btrfs_item);
}
if (push_items == 0) {
@@ -2949,79 +3396,68 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
WARN_ON(!empty && push_items == btrfs_header_nritems(right));
/* push data from right to left */
- copy_extent_buffer(left, right,
- btrfs_item_nr_offset(btrfs_header_nritems(left)),
- btrfs_item_nr_offset(0),
- push_items * sizeof(struct btrfs_item));
+ copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items);
push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
- btrfs_item_offset_nr(right, push_items - 1);
+ btrfs_item_offset(right, push_items - 1);
- copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
- leaf_data_end(left) - push_space,
- BTRFS_LEAF_DATA_OFFSET +
- btrfs_item_offset_nr(right, push_items - 1),
- push_space);
+ copy_leaf_data(left, right, leaf_data_end(left) - push_space,
+ btrfs_item_offset(right, push_items - 1), push_space);
old_left_nritems = btrfs_header_nritems(left);
BUG_ON(old_left_nritems <= 0);
- btrfs_init_map_token(&token, left);
- old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
+ old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1);
for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
u32 ioff;
- item = btrfs_item_nr(i);
-
- ioff = btrfs_token_item_offset(&token, item);
- btrfs_set_token_item_offset(&token, item,
+ ioff = btrfs_item_offset(left, i);
+ btrfs_set_item_offset(left, i,
ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
}
btrfs_set_header_nritems(left, old_left_nritems + push_items);
/* fixup right node */
- if (push_items > right_nritems)
- WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
- right_nritems);
+ if (unlikely(push_items > right_nritems)) {
+ ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
+ btrfs_crit(fs_info, "push items (%d) > right leaf items (%u)",
+ push_items, right_nritems);
+ goto out;
+ }
if (push_items < right_nritems) {
- push_space = btrfs_item_offset_nr(right, push_items - 1) -
+ push_space = btrfs_item_offset(right, push_items - 1) -
leaf_data_end(right);
- memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
- BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
- BTRFS_LEAF_DATA_OFFSET +
- leaf_data_end(right), push_space);
+ memmove_leaf_data(right,
+ BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
+ leaf_data_end(right), push_space);
- memmove_extent_buffer(right, btrfs_item_nr_offset(0),
- btrfs_item_nr_offset(push_items),
- (btrfs_header_nritems(right) - push_items) *
- sizeof(struct btrfs_item));
+ memmove_leaf_items(right, 0, push_items,
+ btrfs_header_nritems(right) - push_items);
}
- btrfs_init_map_token(&token, right);
right_nritems -= push_items;
btrfs_set_header_nritems(right, right_nritems);
push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
for (i = 0; i < right_nritems; i++) {
- item = btrfs_item_nr(i);
-
- push_space = push_space - btrfs_token_item_size(&token, item);
- btrfs_set_token_item_offset(&token, item, push_space);
+ push_space = push_space - btrfs_item_size(right, i);
+ btrfs_set_item_offset(right, i, push_space);
}
- btrfs_mark_buffer_dirty(left);
+ btrfs_mark_buffer_dirty(trans, left);
if (right_nritems)
- btrfs_mark_buffer_dirty(right);
+ btrfs_mark_buffer_dirty(trans, right);
else
- btrfs_clean_tree_block(right);
+ btrfs_clear_buffer_dirty(trans, right);
btrfs_item_key(right, &disk_key, 0);
- fixup_low_keys(path, &disk_key, 1);
+ fixup_low_keys(trans, path, &disk_key, 1);
/* then fixup the leaf pointer in the path */
if (path->slots[0] < push_items) {
path->slots[0] += old_left_nritems;
- btrfs_tree_unlock(path->nodes[0]);
- free_extent_buffer(path->nodes[0]);
+ btrfs_tree_unlock(right);
+ free_extent_buffer(right);
path->nodes[0] = left;
path->slots[1] -= 1;
} else {
@@ -3069,14 +3505,10 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_write_locked(path->nodes[1]);
left = btrfs_read_node_slot(path->nodes[1], slot - 1);
- /*
- * slot - 1 is not valid or we fail to read the left node,
- * no big deal, just return.
- */
if (IS_ERR(left))
- return 1;
+ return PTR_ERR(left);
- __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
+ btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT);
free_space = btrfs_leaf_free_space(left);
if (free_space < data_size) {
@@ -3084,7 +3516,6 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
goto out;
}
- /* cow and double check */
ret = btrfs_cow_block(trans, root, left,
path->nodes[1], slot - 1, &left,
BTRFS_NESTING_LEFT_COW);
@@ -3095,19 +3526,13 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
goto out;
}
- free_space = btrfs_leaf_free_space(left);
- if (free_space < data_size) {
- ret = 1;
- goto out;
- }
-
- if (check_sibling_keys(left, right)) {
+ if (unlikely(check_sibling_keys(left, right))) {
ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
goto out;
}
- return __push_leaf_left(path, min_data_size,
- empty, left, free_space, right_nritems,
- max_slot);
+ return __push_leaf_left(trans, path, min_data_size, empty, left,
+ free_space, right_nritems, max_slot);
out:
btrfs_tree_unlock(left);
free_extent_buffer(left);
@@ -3118,49 +3543,45 @@ out:
* split the path's leaf in two, making sure there is at least data_size
* available for the resulting leaf level of the path.
*/
-static noinline void copy_for_split(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- struct extent_buffer *l,
- struct extent_buffer *right,
- int slot, int mid, int nritems)
+static noinline int copy_for_split(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct extent_buffer *l,
+ struct extent_buffer *right,
+ int slot, int mid, int nritems)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
int data_copy_size;
int rt_data_off;
int i;
+ int ret;
struct btrfs_disk_key disk_key;
- struct btrfs_map_token token;
nritems = nritems - mid;
btrfs_set_header_nritems(right, nritems);
- data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l);
+ data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l);
- copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
- btrfs_item_nr_offset(mid),
- nritems * sizeof(struct btrfs_item));
+ copy_leaf_items(right, l, 0, mid, nritems);
- copy_extent_buffer(right, l,
- BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
- data_copy_size, BTRFS_LEAF_DATA_OFFSET +
- leaf_data_end(l), data_copy_size);
+ copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size,
+ leaf_data_end(l), data_copy_size);
- rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
+ rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid);
- btrfs_init_map_token(&token, right);
for (i = 0; i < nritems; i++) {
- struct btrfs_item *item = btrfs_item_nr(i);
u32 ioff;
- ioff = btrfs_token_item_offset(&token, item);
- btrfs_set_token_item_offset(&token, item, ioff + rt_data_off);
+ ioff = btrfs_item_offset(right, i);
+ btrfs_set_item_offset(right, i, ioff + rt_data_off);
}
btrfs_set_header_nritems(l, mid);
btrfs_item_key(right, &disk_key, 0);
- insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
+ ret = insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
+ if (ret < 0)
+ return ret;
- btrfs_mark_buffer_dirty(right);
- btrfs_mark_buffer_dirty(l);
+ btrfs_mark_buffer_dirty(trans, right);
+ btrfs_mark_buffer_dirty(trans, l);
BUG_ON(path->slots[0] != slot);
if (mid <= slot) {
@@ -3175,6 +3596,8 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
}
BUG_ON(path->slots[0] < 0);
+
+ return 0;
}
/*
@@ -3251,7 +3674,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const struct btrfs_key *ins_key,
struct btrfs_path *path, int data_size,
- int extend)
+ bool extend)
{
struct btrfs_disk_key disk_key;
struct extent_buffer *l;
@@ -3268,7 +3691,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
l = path->nodes[0];
slot = path->slots[0];
- if (extend && data_size + btrfs_item_size_nr(l, slot) +
+ if (extend && data_size + btrfs_item_size(l, slot) +
sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
return -EOVERFLOW;
@@ -3361,20 +3784,25 @@ again:
* BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
* use BTRFS_NESTING_NEW_ROOT.
*/
- right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
- &disk_key, 0, l->start, 0,
+ right = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root),
+ &disk_key, 0, l->start, 0, 0,
num_doubles ? BTRFS_NESTING_NEW_ROOT :
BTRFS_NESTING_SPLIT);
if (IS_ERR(right))
return PTR_ERR(right);
- root_add_used(root, fs_info->nodesize);
+ root_add_used_bytes(root);
if (split == 0) {
if (mid <= slot) {
btrfs_set_header_nritems(right, 0);
- insert_ptr(trans, path, &disk_key,
- right->start, path->slots[1] + 1, 1);
+ ret = insert_ptr(trans, path, &disk_key,
+ right->start, path->slots[1] + 1, 1);
+ if (ret < 0) {
+ btrfs_tree_unlock(right);
+ free_extent_buffer(right);
+ return ret;
+ }
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
@@ -3382,14 +3810,19 @@ again:
path->slots[1] += 1;
} else {
btrfs_set_header_nritems(right, 0);
- insert_ptr(trans, path, &disk_key,
- right->start, path->slots[1], 1);
+ ret = insert_ptr(trans, path, &disk_key,
+ right->start, path->slots[1], 1);
+ if (ret < 0) {
+ btrfs_tree_unlock(right);
+ free_extent_buffer(right);
+ return ret;
+ }
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
path->slots[0] = 0;
if (path->slots[1] == 0)
- fixup_low_keys(path, &disk_key, 1);
+ fixup_low_keys(trans, path, &disk_key, 1);
}
/*
* We create a new leaf 'right' for the required ins_len and
@@ -3399,7 +3832,12 @@ again:
return ret;
}
- copy_for_split(trans, path, l, right, slot, mid, nritems);
+ ret = copy_for_split(trans, path, l, right, slot, mid, nritems);
+ if (ret < 0) {
+ btrfs_tree_unlock(right);
+ free_extent_buffer(right);
+ return ret;
+ }
if (split == 2) {
BUG_ON(num_doubles != 0);
@@ -3432,12 +3870,13 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
+ key.type != BTRFS_RAID_STRIPE_KEY &&
key.type != BTRFS_EXTENT_CSUM_KEY);
if (btrfs_leaf_free_space(leaf) >= ins_len)
return 0;
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ item_size = btrfs_item_size(leaf, path->slots[0]);
if (key.type == BTRFS_EXTENT_DATA_KEY) {
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
@@ -3445,10 +3884,10 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
- path->keep_locks = 1;
- path->search_for_split = 1;
+ path->keep_locks = true;
+ path->search_for_split = true;
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
- path->search_for_split = 0;
+ path->search_for_split = false;
if (ret > 0)
ret = -EAGAIN;
if (ret < 0)
@@ -3457,7 +3896,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
ret = -EAGAIN;
leaf = path->nodes[0];
/* if our item isn't there, return now */
- if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
+ if (item_size != btrfs_item_size(leaf, path->slots[0]))
goto err;
/* the leaf has changed, it now has room. return now */
@@ -3475,22 +3914,21 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
if (ret)
goto err;
- path->keep_locks = 0;
+ path->keep_locks = false;
btrfs_unlock_up_safe(path, 1);
return 0;
err:
- path->keep_locks = 0;
+ path->keep_locks = false;
return ret;
}
-static noinline int split_item(struct btrfs_path *path,
+static noinline int split_item(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
const struct btrfs_key *new_key,
unsigned long split_offset)
{
struct extent_buffer *leaf;
- struct btrfs_item *item;
- struct btrfs_item *new_item;
- int slot;
+ int orig_slot, slot;
char *buf;
u32 nritems;
u32 item_size;
@@ -3498,11 +3936,16 @@ static noinline int split_item(struct btrfs_path *path,
struct btrfs_disk_key disk_key;
leaf = path->nodes[0];
- BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
+ /*
+ * Shouldn't happen because the caller must have previously called
+ * setup_leaf_for_split() to make room for the new item in the leaf.
+ */
+ if (WARN_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)))
+ return -ENOSPC;
- item = btrfs_item_nr(path->slots[0]);
- orig_offset = btrfs_item_offset(leaf, item);
- item_size = btrfs_item_size(leaf, item);
+ orig_slot = path->slots[0];
+ orig_offset = btrfs_item_offset(leaf, path->slots[0]);
+ item_size = btrfs_item_size(leaf, path->slots[0]);
buf = kmalloc(item_size, GFP_NOFS);
if (!buf)
@@ -3515,22 +3958,18 @@ static noinline int split_item(struct btrfs_path *path,
nritems = btrfs_header_nritems(leaf);
if (slot != nritems) {
/* shift the items */
- memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
- btrfs_item_nr_offset(slot),
- (nritems - slot) * sizeof(struct btrfs_item));
+ memmove_leaf_items(leaf, slot + 1, slot, nritems - slot);
}
btrfs_cpu_key_to_disk(&disk_key, new_key);
btrfs_set_item_key(leaf, &disk_key, slot);
- new_item = btrfs_item_nr(slot);
-
- btrfs_set_item_offset(leaf, new_item, orig_offset);
- btrfs_set_item_size(leaf, new_item, item_size - split_offset);
+ btrfs_set_item_offset(leaf, slot, orig_offset);
+ btrfs_set_item_size(leaf, slot, item_size - split_offset);
- btrfs_set_item_offset(leaf, item,
- orig_offset + item_size - split_offset);
- btrfs_set_item_size(leaf, item, split_offset);
+ btrfs_set_item_offset(leaf, orig_slot,
+ orig_offset + item_size - split_offset);
+ btrfs_set_item_size(leaf, orig_slot, split_offset);
btrfs_set_header_nritems(leaf, nritems + 1);
@@ -3543,7 +3982,7 @@ static noinline int split_item(struct btrfs_path *path,
write_extent_buffer(leaf, buf + split_offset,
btrfs_item_ptr_offset(leaf, slot),
item_size - split_offset);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
BUG_ON(btrfs_leaf_free_space(leaf) < 0);
kfree(buf);
@@ -3577,7 +4016,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- ret = split_item(path, new_key, split_offset);
+ ret = split_item(trans, path, new_key, split_offset);
return ret;
}
@@ -3587,30 +4026,29 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
* off the end of the item or if we shift the item to chop bytes off
* the front.
*/
-void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
+void btrfs_truncate_item(struct btrfs_trans_handle *trans,
+ const struct btrfs_path *path, u32 new_size, int from_end)
{
int slot;
struct extent_buffer *leaf;
- struct btrfs_item *item;
u32 nritems;
unsigned int data_end;
unsigned int old_data_start;
unsigned int old_size;
unsigned int size_diff;
int i;
- struct btrfs_map_token token;
leaf = path->nodes[0];
slot = path->slots[0];
- old_size = btrfs_item_size_nr(leaf, slot);
+ old_size = btrfs_item_size(leaf, slot);
if (old_size == new_size)
return;
nritems = btrfs_header_nritems(leaf);
data_end = leaf_data_end(leaf);
- old_data_start = btrfs_item_offset_nr(leaf, slot);
+ old_data_start = btrfs_item_offset(leaf, slot);
size_diff = old_size - new_size;
@@ -3621,20 +4059,17 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
* item0..itemN ... dataN.offset..dataN.size .. data0.size
*/
/* first correct the data pointers */
- btrfs_init_map_token(&token, leaf);
for (i = slot; i < nritems; i++) {
u32 ioff;
- item = btrfs_item_nr(i);
- ioff = btrfs_token_item_offset(&token, item);
- btrfs_set_token_item_offset(&token, item, ioff + size_diff);
+ ioff = btrfs_item_offset(leaf, i);
+ btrfs_set_item_offset(leaf, i, ioff + size_diff);
}
/* shift the data */
if (from_end) {
- memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
- data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
- data_end, old_data_start + new_size - data_end);
+ memmove_leaf_data(leaf, data_end + size_diff, data_end,
+ old_data_start + new_size - data_end);
} else {
struct btrfs_disk_key disk_key;
u64 offset;
@@ -3659,22 +4094,20 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
}
}
- memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
- data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
- data_end, old_data_start - data_end);
+ memmove_leaf_data(leaf, data_end + size_diff, data_end,
+ old_data_start - data_end);
offset = btrfs_disk_key_offset(&disk_key);
btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
btrfs_set_item_key(leaf, &disk_key, slot);
if (slot == 0)
- fixup_low_keys(path, &disk_key, 1);
+ fixup_low_keys(trans, path, &disk_key, 1);
}
- item = btrfs_item_nr(slot);
- btrfs_set_item_size(leaf, item, new_size);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_set_item_size(leaf, slot, new_size);
+ btrfs_mark_buffer_dirty(trans, leaf);
- if (btrfs_leaf_free_space(leaf) < 0) {
+ if (unlikely(btrfs_leaf_free_space(leaf) < 0)) {
btrfs_print_leaf(leaf);
BUG();
}
@@ -3683,32 +4116,31 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
/*
* make the item pointed to by the path bigger, data_size is the added size.
*/
-void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+void btrfs_extend_item(struct btrfs_trans_handle *trans,
+ const struct btrfs_path *path, u32 data_size)
{
int slot;
struct extent_buffer *leaf;
- struct btrfs_item *item;
u32 nritems;
unsigned int data_end;
unsigned int old_data;
unsigned int old_size;
int i;
- struct btrfs_map_token token;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
data_end = leaf_data_end(leaf);
- if (btrfs_leaf_free_space(leaf) < data_size) {
+ if (unlikely(btrfs_leaf_free_space(leaf) < data_size)) {
btrfs_print_leaf(leaf);
BUG();
}
slot = path->slots[0];
- old_data = btrfs_item_end_nr(leaf, slot);
+ old_data = btrfs_item_data_end(leaf, slot);
BUG_ON(slot < 0);
- if (slot >= nritems) {
+ if (unlikely(slot >= nritems)) {
btrfs_print_leaf(leaf);
btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
slot, nritems);
@@ -3719,53 +4151,49 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
* item0..itemN ... dataN.offset..dataN.size .. data0.size
*/
/* first correct the data pointers */
- btrfs_init_map_token(&token, leaf);
for (i = slot; i < nritems; i++) {
u32 ioff;
- item = btrfs_item_nr(i);
- ioff = btrfs_token_item_offset(&token, item);
- btrfs_set_token_item_offset(&token, item, ioff - data_size);
+ ioff = btrfs_item_offset(leaf, i);
+ btrfs_set_item_offset(leaf, i, ioff - data_size);
}
/* shift the data */
- memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
- data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
- data_end, old_data - data_end);
+ memmove_leaf_data(leaf, data_end - data_size, data_end,
+ old_data - data_end);
- data_end = old_data;
- old_size = btrfs_item_size_nr(leaf, slot);
- item = btrfs_item_nr(slot);
- btrfs_set_item_size(leaf, item, old_size + data_size);
- btrfs_mark_buffer_dirty(leaf);
+ old_size = btrfs_item_size(leaf, slot);
+ btrfs_set_item_size(leaf, slot, old_size + data_size);
+ btrfs_mark_buffer_dirty(trans, leaf);
- if (btrfs_leaf_free_space(leaf) < 0) {
+ if (unlikely(btrfs_leaf_free_space(leaf) < 0)) {
btrfs_print_leaf(leaf);
BUG();
}
}
-/**
- * setup_items_for_insert - Helper called before inserting one or more items
- * to a leaf. Main purpose is to save stack depth by doing the bulk of the work
- * in a function that doesn't call btrfs_search_slot
+/*
+ * Make space in the node before inserting one or more items.
*
+ * @trans: transaction handle
* @root: root we are inserting items to
* @path: points to the leaf/slot where we are going to insert new items
* @batch: information about the batch of items to insert
+ *
+ * Main purpose is to save stack depth by doing the bulk of the work in a
+ * function that doesn't call btrfs_search_slot
*/
-static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
+static void setup_items_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
const struct btrfs_item_batch *batch)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_item *item;
int i;
u32 nritems;
unsigned int data_end;
struct btrfs_disk_key disk_key;
struct extent_buffer *leaf;
int slot;
- struct btrfs_map_token token;
u32 total_size;
/*
@@ -3775,7 +4203,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
*/
if (path->slots[0] == 0) {
btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
- fixup_low_keys(path, &disk_key, 1);
+ fixup_low_keys(trans, path, &disk_key, 1);
}
btrfs_unlock_up_safe(path, 1);
@@ -3786,18 +4214,17 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
data_end = leaf_data_end(leaf);
total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
- if (btrfs_leaf_free_space(leaf) < total_size) {
+ if (unlikely(btrfs_leaf_free_space(leaf) < total_size)) {
btrfs_print_leaf(leaf);
btrfs_crit(fs_info, "not enough freespace need %u have %d",
total_size, btrfs_leaf_free_space(leaf));
BUG();
}
- btrfs_init_map_token(&token, leaf);
if (slot != nritems) {
- unsigned int old_data = btrfs_item_end_nr(leaf, slot);
+ unsigned int old_data = btrfs_item_data_end(leaf, slot);
- if (old_data < data_end) {
+ if (unlikely(old_data < data_end)) {
btrfs_print_leaf(leaf);
btrfs_crit(fs_info,
"item at slot %d with data offset %u beyond data end of leaf %u",
@@ -3811,21 +4238,16 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
for (i = slot; i < nritems; i++) {
u32 ioff;
- item = btrfs_item_nr(i);
- ioff = btrfs_token_item_offset(&token, item);
- btrfs_set_token_item_offset(&token, item,
- ioff - batch->total_data_size);
+ ioff = btrfs_item_offset(leaf, i);
+ btrfs_set_item_offset(leaf, i,
+ ioff - batch->total_data_size);
}
/* shift the items */
- memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + batch->nr),
- btrfs_item_nr_offset(slot),
- (nritems - slot) * sizeof(struct btrfs_item));
+ memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot);
/* shift the data */
- memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
- data_end - batch->total_data_size,
- BTRFS_LEAF_DATA_OFFSET + data_end,
- old_data - data_end);
+ memmove_leaf_data(leaf, data_end - batch->total_data_size,
+ data_end, old_data - data_end);
data_end = old_data;
}
@@ -3833,16 +4255,15 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
for (i = 0; i < batch->nr; i++) {
btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]);
btrfs_set_item_key(leaf, &disk_key, slot + i);
- item = btrfs_item_nr(slot + i);
data_end -= batch->data_sizes[i];
- btrfs_set_token_item_offset(&token, item, data_end);
- btrfs_set_token_item_size(&token, item, batch->data_sizes[i]);
+ btrfs_set_item_offset(leaf, slot + i, data_end);
+ btrfs_set_item_size(leaf, slot + i, batch->data_sizes[i]);
}
btrfs_set_header_nritems(leaf, nritems + batch->nr);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
- if (btrfs_leaf_free_space(leaf) < 0) {
+ if (unlikely(btrfs_leaf_free_space(leaf) < 0)) {
btrfs_print_leaf(leaf);
BUG();
}
@@ -3851,12 +4272,14 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
/*
* Insert a new item into a leaf.
*
+ * @trans: Transaction handle.
* @root: The root of the btree.
* @path: A path pointing to the target leaf and slot.
* @key: The key of the new item.
* @data_size: The size of the data associated with the new key.
*/
-void btrfs_setup_item_for_insert(struct btrfs_root *root,
+void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct btrfs_path *path,
const struct btrfs_key *key,
u32 data_size)
@@ -3868,12 +4291,16 @@ void btrfs_setup_item_for_insert(struct btrfs_root *root,
batch.total_data_size = data_size;
batch.nr = 1;
- setup_items_for_insert(root, path, &batch);
+ setup_items_for_insert(trans, root, path, &batch);
}
/*
* Given a key and some data, insert items into the tree.
* This does all the path init required, making room in the tree if needed.
+ *
+ * Returns: 0 on success
+ * -EEXIST if the first key already exists
+ * < 0 on other errors
*/
int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -3894,7 +4321,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
slot = path->slots[0];
BUG_ON(slot < 0);
- setup_items_for_insert(root, path, batch);
+ setup_items_for_insert(trans, root, path, batch);
return 0;
}
@@ -3907,7 +4334,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
u32 data_size)
{
int ret = 0;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
unsigned long ptr;
@@ -3919,9 +4346,8 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
leaf = path->nodes[0];
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, data, ptr, data_size);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
}
- btrfs_free_path(path);
return ret;
}
@@ -3943,14 +4369,14 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
u32 item_size;
leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ item_size = btrfs_item_size(leaf, path->slots[0]);
ret = setup_leaf_for_split(trans, root, path,
item_size + sizeof(struct btrfs_item));
if (ret)
return ret;
path->slots[0]++;
- btrfs_setup_item_for_insert(root, path, new_key, item_size);
+ btrfs_setup_item_for_insert(trans, root, path, new_key, item_size);
leaf = path->nodes[0];
memcpy_extent_buffer(leaf,
btrfs_item_ptr_offset(leaf, path->slots[0]),
@@ -3964,9 +4390,11 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
*
* the tree should have been previously balanced so the deletion does not
* empty a node.
+ *
+ * This is exported for use inside btrfs-progs, don't un-export it.
*/
-static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
- int level, int slot)
+int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_path *path, int level, int slot)
{
struct extent_buffer *parent = path->nodes[level];
u32 nritems;
@@ -3977,17 +4405,23 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
if (level) {
ret = btrfs_tree_mod_log_insert_move(parent, slot,
slot + 1, nritems - slot - 1);
- BUG_ON(ret < 0);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
}
memmove_extent_buffer(parent,
- btrfs_node_key_ptr_offset(slot),
- btrfs_node_key_ptr_offset(slot + 1),
+ btrfs_node_key_ptr_offset(parent, slot),
+ btrfs_node_key_ptr_offset(parent, slot + 1),
sizeof(struct btrfs_key_ptr) *
(nritems - slot - 1));
} else if (level) {
ret = btrfs_tree_mod_log_insert_key(parent, slot,
- BTRFS_MOD_LOG_KEY_REMOVE, GFP_NOFS);
- BUG_ON(ret < 0);
+ BTRFS_MOD_LOG_KEY_REMOVE);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
}
nritems--;
@@ -4000,9 +4434,10 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_disk_key disk_key;
btrfs_node_key(parent, &disk_key, 0);
- fixup_low_keys(path, &disk_key, level + 1);
+ fixup_low_keys(trans, path, &disk_key, level + 1);
}
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
+ return 0;
}
/*
@@ -4015,13 +4450,17 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
* The path must have already been setup for deleting the leaf, including
* all the proper balancing. path->nodes[1] must be locked.
*/
-static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct extent_buffer *leaf)
+static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct extent_buffer *leaf)
{
+ int ret;
+
WARN_ON(btrfs_header_generation(leaf) != trans->transid);
- del_ptr(root, path, 1, path->slots[1]);
+ ret = btrfs_del_ptr(trans, root, path, 1, path->slots[1]);
+ if (ret < 0)
+ return ret;
/*
* btrfs_free_extent is expensive, we want to make sure we
@@ -4029,11 +4468,15 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
*/
btrfs_unlock_up_safe(path, 0);
- root_sub_used(root, leaf->len);
+ root_sub_used_bytes(root);
- atomic_inc(&leaf->refs);
- btrfs_free_tree_block(trans, root, leaf, 0, 1);
+ refcount_inc(&leaf->refs);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
free_extent_buffer_stale(leaf);
+ if (ret < 0)
+ btrfs_abort_transaction(trans, ret);
+
+ return ret;
}
/*
* delete the item at the leaf level in path. If that empties
@@ -4044,55 +4487,44 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
- struct btrfs_item *item;
- u32 last_off;
- u32 dsize = 0;
int ret = 0;
int wret;
- int i;
u32 nritems;
leaf = path->nodes[0];
- last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
-
- for (i = 0; i < nr; i++)
- dsize += btrfs_item_size_nr(leaf, slot + i);
-
nritems = btrfs_header_nritems(leaf);
if (slot + nr != nritems) {
- int data_end = leaf_data_end(leaf);
- struct btrfs_map_token token;
+ const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1);
+ const int data_end = leaf_data_end(leaf);
+ u32 dsize = 0;
+ int i;
- memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
- data_end + dsize,
- BTRFS_LEAF_DATA_OFFSET + data_end,
- last_off - data_end);
+ for (i = 0; i < nr; i++)
+ dsize += btrfs_item_size(leaf, slot + i);
+
+ memmove_leaf_data(leaf, data_end + dsize, data_end,
+ last_off - data_end);
- btrfs_init_map_token(&token, leaf);
for (i = slot + nr; i < nritems; i++) {
u32 ioff;
- item = btrfs_item_nr(i);
- ioff = btrfs_token_item_offset(&token, item);
- btrfs_set_token_item_offset(&token, item, ioff + dsize);
+ ioff = btrfs_item_offset(leaf, i);
+ btrfs_set_item_offset(leaf, i, ioff + dsize);
}
- memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
- btrfs_item_nr_offset(slot + nr),
- sizeof(struct btrfs_item) *
- (nritems - slot - nr));
+ memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr);
}
btrfs_set_header_nritems(leaf, nritems - nr);
nritems -= nr;
/* delete the leaf if we've emptied it */
if (nritems == 0) {
- if (leaf == root->node) {
- btrfs_set_header_level(leaf, 0);
- } else {
- btrfs_clean_tree_block(leaf);
- btrfs_del_leaf(trans, root, path, leaf);
+ if (leaf != root->node) {
+ btrfs_clear_buffer_dirty(trans, leaf);
+ ret = btrfs_del_leaf(trans, root, path, leaf);
+ if (ret < 0)
+ return ret;
}
} else {
int used = leaf_space_used(leaf, 0, nritems);
@@ -4100,36 +4532,63 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_disk_key disk_key;
btrfs_item_key(leaf, &disk_key, 0);
- fixup_low_keys(path, &disk_key, 1);
+ fixup_low_keys(trans, path, &disk_key, 1);
}
- /* delete the leaf if it is mostly empty */
+ /*
+ * Try to delete the leaf if it is mostly empty. We do this by
+ * trying to move all its items into its left and right neighbours.
+ * If we can't move all the items, then we don't delete it - it's
+ * not ideal, but future insertions might fill the leaf with more
+ * items, or items from other leaves might be moved later into our
+ * leaf due to deletions on those leaves.
+ */
if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
+ u32 min_push_space;
+
/* push_leaf_left fixes the path.
* make sure the path still points to our leaf
- * for possible call to del_ptr below
+ * for possible call to btrfs_del_ptr below
*/
slot = path->slots[1];
- atomic_inc(&leaf->refs);
-
- wret = push_leaf_left(trans, root, path, 1, 1,
- 1, (u32)-1);
+ refcount_inc(&leaf->refs);
+ /*
+ * We want to be able to at least push one item to the
+ * left neighbour leaf, and that's the first item.
+ */
+ min_push_space = sizeof(struct btrfs_item) +
+ btrfs_item_size(leaf, 0);
+ wret = push_leaf_left(trans, root, path, 0,
+ min_push_space, 1, (u32)-1);
if (wret < 0 && wret != -ENOSPC)
ret = wret;
if (path->nodes[0] == leaf &&
btrfs_header_nritems(leaf)) {
- wret = push_leaf_right(trans, root, path, 1,
- 1, 1, 0);
+ /*
+ * If we were not able to push all items from our
+ * leaf to its left neighbour, then attempt to
+ * either push all the remaining items to the
+ * right neighbour or none. There's no advantage
+ * in pushing only some items, instead of all, as
+ * it's pointless to end up with a leaf having
+ * too few items while the neighbours can be full
+ * or nearly full.
+ */
+ nritems = btrfs_header_nritems(leaf);
+ min_push_space = leaf_space_used(leaf, 0, nritems);
+ wret = push_leaf_right(trans, root, path, 0,
+ min_push_space, 1, 0);
if (wret < 0 && wret != -ENOSPC)
ret = wret;
}
if (btrfs_header_nritems(leaf) == 0) {
path->slots[1] = slot;
- btrfs_del_leaf(trans, root, path, leaf);
+ ret = btrfs_del_leaf(trans, root, path, leaf);
free_extent_buffer(leaf);
- ret = 0;
+ if (ret < 0)
+ return ret;
} else {
/* if we're still in the path, make sure
* we're dirty. Otherwise, one of the
@@ -4137,78 +4596,25 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
* dirtied this buffer
*/
if (path->nodes[0] == leaf)
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
free_extent_buffer(leaf);
}
} else {
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
}
}
return ret;
}
/*
- * search the tree again to find a leaf with lesser keys
- * returns 0 if it found something or 1 if there are no lesser leaves.
- * returns < 0 on io errors.
- *
- * This may release the path, and so you may lose any locks held at the
- * time you call it.
- */
-int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
-{
- struct btrfs_key key;
- struct btrfs_disk_key found_key;
- int ret;
-
- btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
-
- if (key.offset > 0) {
- key.offset--;
- } else if (key.type > 0) {
- key.type--;
- key.offset = (u64)-1;
- } else if (key.objectid > 0) {
- key.objectid--;
- key.type = (u8)-1;
- key.offset = (u64)-1;
- } else {
- return 1;
- }
-
- btrfs_release_path(path);
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- return ret;
- btrfs_item_key(path->nodes[0], &found_key, 0);
- ret = comp_keys(&found_key, &key);
- /*
- * We might have had an item with the previous key in the tree right
- * before we released our path. And after we released our path, that
- * item might have been pushed to the first slot (0) of the leaf we
- * were holding due to a tree balance. Alternatively, an item with the
- * previous key can exist as the only element of a leaf (big fat item).
- * Therefore account for these 2 cases, so that our callers (like
- * btrfs_previous_item) don't miss an existing item with a key matching
- * the previous key we computed above.
- */
- if (ret <= 0)
- return 0;
- return 1;
-}
-
-/*
* A helper function to walk down the tree starting at min_key, and looking
- * for nodes or leaves that are have a minimum transaction id.
+ * for leaves that have a minimum transaction id.
* This is used by the btree defrag code, and tree logging
*
* This does not cow, but it does stuff the starting key it finds back
* into min_key, so you can call btrfs_search_slot with cow=1 on the
* key and get a writable path.
*
- * This honors path->lowest_level to prevent descent past a given level
- * of the tree.
- *
* min_trans indicates the oldest transaction that you are interested
* in walking through. Any nodes or leaves older than min_trans are
* skipped over (without reading them).
@@ -4221,15 +4627,16 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
u64 min_trans)
{
struct extent_buffer *cur;
- struct btrfs_key found_key;
int slot;
int sret;
u32 nritems;
int level;
int ret = 1;
- int keep_locks = path->keep_locks;
+ const bool keep_locks = path->keep_locks;
- path->keep_locks = 1;
+ ASSERT(!path->nowait);
+ ASSERT(path->lowest_level == 0);
+ path->keep_locks = true;
again:
cur = btrfs_read_lock_root_node(root);
level = btrfs_header_level(cur);
@@ -4244,19 +4651,20 @@ again:
while (1) {
nritems = btrfs_header_nritems(cur);
level = btrfs_header_level(cur);
- sret = btrfs_bin_search(cur, min_key, &slot);
+ sret = btrfs_bin_search(cur, 0, min_key, &slot);
if (sret < 0) {
ret = sret;
goto out;
}
- /* at the lowest level, we're done, setup the path and exit */
- if (level == path->lowest_level) {
+ /* At level 0 we're done, setup the path and exit. */
+ if (level == 0) {
if (slot >= nritems)
goto find_next_key;
ret = 0;
path->slots[level] = slot;
- btrfs_item_key_to_cpu(cur, &found_key, slot);
+ /* Save our key for returning back. */
+ btrfs_item_key_to_cpu(cur, min_key, slot);
goto out;
}
if (sret && slot > 0)
@@ -4280,8 +4688,8 @@ find_next_key:
* we didn't find a candidate key in this node, walk forward
* and find another one
*/
+ path->slots[level] = slot;
if (slot >= nritems) {
- path->slots[level] = slot;
sret = btrfs_find_next_key(root, path, min_key, level,
min_trans);
if (sret == 0) {
@@ -4291,13 +4699,6 @@ find_next_key:
goto out;
}
}
- /* save our key for returning back */
- btrfs_node_key_to_cpu(cur, &found_key, slot);
- path->slots[level] = slot;
- if (level == path->lowest_level) {
- ret = 0;
- goto out;
- }
cur = btrfs_read_node_slot(cur, slot);
if (IS_ERR(cur)) {
ret = PTR_ERR(cur);
@@ -4312,10 +4713,8 @@ find_next_key:
}
out:
path->keep_locks = keep_locks;
- if (ret == 0) {
- btrfs_unlock_up_safe(path, path->lowest_level + 1);
- memcpy(min_key, &found_key, sizeof(found_key));
- }
+ if (ret == 0)
+ btrfs_unlock_up_safe(path, 1);
return ret;
}
@@ -4327,7 +4726,7 @@ out:
* 0 is returned if another key is found, < 0 if there are any errors
* and 1 is returned if there are no higher keys in the tree
*
- * path->keep_locks should be set to 1 on the search made before
+ * path->keep_locks should be set to true on the search made before
* calling this function.
*/
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
@@ -4402,11 +4801,20 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
int level;
struct extent_buffer *c;
struct extent_buffer *next;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
+ bool need_commit_sem = false;
u32 nritems;
int ret;
int i;
+ /*
+ * The nowait semantics are used only for write paths, where we don't
+ * use the tree mod log and sequence numbers.
+ */
+ if (time_seq)
+ ASSERT(!path->nowait);
+
nritems = btrfs_header_nritems(path->nodes[0]);
if (nritems == 0)
return 1;
@@ -4417,47 +4825,49 @@ again:
next = NULL;
btrfs_release_path(path);
- path->keep_locks = 1;
+ path->keep_locks = true;
- if (time_seq)
+ if (time_seq) {
ret = btrfs_search_old_slot(root, &key, path, time_seq);
- else
+ } else {
+ if (path->need_commit_sem) {
+ path->need_commit_sem = false;
+ need_commit_sem = true;
+ if (path->nowait) {
+ if (!down_read_trylock(&fs_info->commit_root_sem)) {
+ ret = -EAGAIN;
+ goto done;
+ }
+ } else {
+ down_read(&fs_info->commit_root_sem);
+ }
+ }
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- path->keep_locks = 0;
+ }
+ path->keep_locks = false;
if (ret < 0)
- return ret;
+ goto done;
nritems = btrfs_header_nritems(path->nodes[0]);
/*
- * by releasing the path above we dropped all our locks. A balance
- * could have added more items next to the key that used to be
- * at the very end of the block. So, check again here and
- * advance the path if there are now more items available.
- */
- if (nritems > 0 && path->slots[0] < nritems - 1) {
- if (ret == 0)
- path->slots[0]++;
- ret = 0;
- goto done;
- }
- /*
- * So the above check misses one case:
- * - after releasing the path above, someone has removed the item that
- * used to be at the very end of the block, and balance between leafs
- * gets another one with bigger key.offset to replace it.
+ * By releasing the path above we dropped all our locks. A balance
+ * could have happened and
*
- * This one should be returned as well, or we can get leaf corruption
- * later(esp. in __btrfs_drop_extents()).
+ * 1. added more items after the previous last item
+ * 2. deleted the previous last item
*
- * And a bit more explanation about this check,
- * with ret > 0, the key isn't found, the path points to the slot
- * where it should be inserted, so the path->slots[0] item must be the
- * bigger one.
+ * So, check again here and advance the path if there are now more
+ * items available.
*/
- if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
- ret = 0;
- goto done;
+ if (nritems > 0 && path->slots[0] <= nritems - 1) {
+ if (ret == 0 && path->slots[0] != nritems - 1) {
+ path->slots[0]++;
+ goto done;
+ } else if (ret > 0) {
+ ret = 0;
+ goto done;
+ }
}
while (level < BTRFS_MAX_LEVEL) {
@@ -4493,9 +4903,8 @@ again:
}
next = c;
- ret = read_block_for_search(root, path, &next, level,
- slot, &key);
- if (ret == -EAGAIN)
+ ret = read_block_for_search(root, path, &next, slot, &key);
+ if (ret == -EAGAIN && !path->nowait)
goto again;
if (ret < 0) {
@@ -4505,6 +4914,10 @@ again:
if (!path->skip_locking) {
ret = btrfs_try_tree_read_lock(next);
+ if (!ret && path->nowait) {
+ ret = -EAGAIN;
+ goto done;
+ }
if (!ret && time_seq) {
/*
* If we don't get the lock, we may be racing
@@ -4533,9 +4946,8 @@ again:
if (!level)
break;
- ret = read_block_for_search(root, path, &next, level,
- 0, &key);
- if (ret == -EAGAIN)
+ ret = read_block_for_search(root, path, &next, 0, &key);
+ if (ret == -EAGAIN && !path->nowait)
goto again;
if (ret < 0) {
@@ -4543,16 +4955,41 @@ again:
goto done;
}
- if (!path->skip_locking)
- btrfs_tree_read_lock(next);
+ if (!path->skip_locking) {
+ if (path->nowait) {
+ if (!btrfs_try_tree_read_lock(next)) {
+ ret = -EAGAIN;
+ goto done;
+ }
+ } else {
+ btrfs_tree_read_lock(next);
+ }
+ }
}
ret = 0;
done:
unlock_up(path, 0, 1, 0, NULL);
+ if (need_commit_sem) {
+ int ret2;
+
+ path->need_commit_sem = true;
+ ret2 = finish_need_commit_sem_search(path);
+ up_read(&fs_info->commit_root_sem);
+ if (ret2)
+ ret = ret2;
+ }
return ret;
}
+int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq)
+{
+ path->slots[0]++;
+ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
+ return btrfs_next_old_leaf(root, path, time_seq);
+ return 0;
+}
+
/*
* this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
* searching until it gets past min_objectid or finds an item of 'type'
@@ -4636,3 +5073,16 @@ int btrfs_previous_extent_item(struct btrfs_root *root,
}
return 1;
}
+
+int __init btrfs_ctree_init(void)
+{
+ btrfs_path_cachep = KMEM_CACHE(btrfs_path, 0);
+ if (!btrfs_path_cachep)
+ return -ENOMEM;
+ return 0;
+}
+
+void __cold btrfs_ctree_exit(void)
+{
+ kmem_cache_destroy(btrfs_path_cachep);
+}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7553e9dc5f93..692370fc07b2 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -6,350 +6,23 @@
#ifndef BTRFS_CTREE_H
#define BTRFS_CTREE_H
-#include <linux/mm.h>
-#include <linux/sched/signal.h>
-#include <linux/highmem.h>
-#include <linux/fs.h>
-#include <linux/rwsem.h>
-#include <linux/semaphore.h>
-#include <linux/completion.h>
-#include <linux/backing-dev.h>
+#include <linux/cleanup.h>
+#include <linux/spinlock.h>
+#include <linux/rbtree.h>
+#include <linux/mutex.h>
#include <linux/wait.h>
-#include <linux/slab.h>
-#include <trace/events/btrfs.h>
-#include <asm/unaligned.h>
-#include <linux/pagemap.h>
-#include <linux/btrfs.h>
-#include <linux/btrfs_tree.h>
-#include <linux/workqueue.h>
-#include <linux/security.h>
-#include <linux/sizes.h>
-#include <linux/dynamic_debug.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/xarray.h>
#include <linux/refcount.h>
-#include <linux/crc32c.h>
-#include <linux/iomap.h>
-#include "extent-io-tree.h"
-#include "extent_io.h"
-#include "extent_map.h"
-#include "async-thread.h"
-#include "block-rsv.h"
+#include <uapi/linux/btrfs_tree.h>
#include "locking.h"
+#include "accessors.h"
+struct extent_buffer;
+struct btrfs_block_rsv;
struct btrfs_trans_handle;
-struct btrfs_transaction;
-struct btrfs_pending_snapshot;
-struct btrfs_delayed_ref_root;
-struct btrfs_space_info;
struct btrfs_block_group;
-extern struct kmem_cache *btrfs_trans_handle_cachep;
-extern struct kmem_cache *btrfs_bit_radix_cachep;
-extern struct kmem_cache *btrfs_path_cachep;
-extern struct kmem_cache *btrfs_free_space_cachep;
-extern struct kmem_cache *btrfs_free_space_bitmap_cachep;
-struct btrfs_ordered_sum;
-struct btrfs_ref;
-struct btrfs_bio;
-
-#define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */
-
-/*
- * Maximum number of mirrors that can be available for all profiles counting
- * the target device of dev-replace as one. During an active device replace
- * procedure, the target device of the copy operation is a mirror for the
- * filesystem data as well that can be used to read data in order to repair
- * read errors on other disks.
- *
- * Current value is derived from RAID1C4 with 4 copies.
- */
-#define BTRFS_MAX_MIRRORS (4 + 1)
-
-#define BTRFS_MAX_LEVEL 8
-
-#define BTRFS_OLDEST_GENERATION 0ULL
-
-/*
- * we can actually store much bigger names, but lets not confuse the rest
- * of linux
- */
-#define BTRFS_NAME_LEN 255
-
-/*
- * Theoretical limit is larger, but we keep this down to a sane
- * value. That should limit greatly the possibility of collisions on
- * inode ref items.
- */
-#define BTRFS_LINK_MAX 65535U
-
-#define BTRFS_EMPTY_DIR_SIZE 0
-
-/* ioprio of readahead is set to idle */
-#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
-
-#define BTRFS_DIRTY_METADATA_THRESH SZ_32M
-
-/*
- * Use large batch size to reduce overhead of metadata updates. On the reader
- * side, we only read it when we are close to ENOSPC and the read overhead is
- * mostly related to the number of CPUs, so it is OK to use arbitrary large
- * value here.
- */
-#define BTRFS_TOTAL_BYTES_PINNED_BATCH SZ_128M
-
-#define BTRFS_MAX_EXTENT_SIZE SZ_128M
-
-/*
- * Deltas are an effective way to populate global statistics. Give macro names
- * to make it clear what we're doing. An example is discard_extents in
- * btrfs_free_space_ctl.
- */
-#define BTRFS_STAT_NR_ENTRIES 2
-#define BTRFS_STAT_CURR 0
-#define BTRFS_STAT_PREV 1
-
-/*
- * Count how many BTRFS_MAX_EXTENT_SIZE cover the @size
- */
-static inline u32 count_max_extents(u64 size)
-{
- return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
-}
-
-static inline unsigned long btrfs_chunk_item_size(int num_stripes)
-{
- BUG_ON(num_stripes == 0);
- return sizeof(struct btrfs_chunk) +
- sizeof(struct btrfs_stripe) * (num_stripes - 1);
-}
-
-/*
- * Runtime (in-memory) states of filesystem
- */
-enum {
- /* Global indicator of serious filesystem errors */
- BTRFS_FS_STATE_ERROR,
- /*
- * Filesystem is being remounted, allow to skip some operations, like
- * defrag
- */
- BTRFS_FS_STATE_REMOUNTING,
- /* Filesystem in RO mode */
- BTRFS_FS_STATE_RO,
- /* Track if a transaction abort has been reported on this filesystem */
- BTRFS_FS_STATE_TRANS_ABORTED,
- /*
- * Bio operations should be blocked on this filesystem because a source
- * or target device is being destroyed as part of a device replace
- */
- BTRFS_FS_STATE_DEV_REPLACING,
- /* The btrfs_fs_info created for self-tests */
- BTRFS_FS_STATE_DUMMY_FS_INFO,
-};
-
-#define BTRFS_BACKREF_REV_MAX 256
-#define BTRFS_BACKREF_REV_SHIFT 56
-#define BTRFS_BACKREF_REV_MASK (((u64)BTRFS_BACKREF_REV_MAX - 1) << \
- BTRFS_BACKREF_REV_SHIFT)
-
-#define BTRFS_OLD_BACKREF_REV 0
-#define BTRFS_MIXED_BACKREF_REV 1
-
-/*
- * every tree block (leaf or node) starts with this header.
- */
-struct btrfs_header {
- /* these first four must match the super block */
- u8 csum[BTRFS_CSUM_SIZE];
- u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
- __le64 bytenr; /* which block this node is supposed to live in */
- __le64 flags;
-
- /* allowed to be different from the super from here on down */
- u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
- __le64 generation;
- __le64 owner;
- __le32 nritems;
- u8 level;
-} __attribute__ ((__packed__));
-
-/*
- * this is a very generous portion of the super block, giving us
- * room to translate 14 chunks with 3 stripes each.
- */
-#define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048
-
-/*
- * just in case we somehow lose the roots and are not able to mount,
- * we store an array of the roots from previous transactions
- * in the super.
- */
-#define BTRFS_NUM_BACKUP_ROOTS 4
-struct btrfs_root_backup {
- __le64 tree_root;
- __le64 tree_root_gen;
-
- __le64 chunk_root;
- __le64 chunk_root_gen;
-
- __le64 extent_root;
- __le64 extent_root_gen;
-
- __le64 fs_root;
- __le64 fs_root_gen;
-
- __le64 dev_root;
- __le64 dev_root_gen;
-
- __le64 csum_root;
- __le64 csum_root_gen;
-
- __le64 total_bytes;
- __le64 bytes_used;
- __le64 num_devices;
- /* future */
- __le64 unused_64[4];
-
- u8 tree_root_level;
- u8 chunk_root_level;
- u8 extent_root_level;
- u8 fs_root_level;
- u8 dev_root_level;
- u8 csum_root_level;
- /* future and to align */
- u8 unused_8[10];
-} __attribute__ ((__packed__));
-
-#define BTRFS_SUPER_INFO_OFFSET SZ_64K
-#define BTRFS_SUPER_INFO_SIZE 4096
-
-/*
- * the super block basically lists the main trees of the FS
- * it currently lacks any block count etc etc
- */
-struct btrfs_super_block {
- /* the first 4 fields must match struct btrfs_header */
- u8 csum[BTRFS_CSUM_SIZE];
- /* FS specific UUID, visible to user */
- u8 fsid[BTRFS_FSID_SIZE];
- __le64 bytenr; /* this block number */
- __le64 flags;
-
- /* allowed to be different from the btrfs_header from here own down */
- __le64 magic;
- __le64 generation;
- __le64 root;
- __le64 chunk_root;
- __le64 log_root;
-
- /* this will help find the new super based on the log root */
- __le64 log_root_transid;
- __le64 total_bytes;
- __le64 bytes_used;
- __le64 root_dir_objectid;
- __le64 num_devices;
- __le32 sectorsize;
- __le32 nodesize;
- __le32 __unused_leafsize;
- __le32 stripesize;
- __le32 sys_chunk_array_size;
- __le64 chunk_root_generation;
- __le64 compat_flags;
- __le64 compat_ro_flags;
- __le64 incompat_flags;
- __le16 csum_type;
- u8 root_level;
- u8 chunk_root_level;
- u8 log_root_level;
- struct btrfs_dev_item dev_item;
-
- char label[BTRFS_LABEL_SIZE];
-
- __le64 cache_generation;
- __le64 uuid_tree_generation;
-
- /* the UUID written into btree blocks */
- u8 metadata_uuid[BTRFS_FSID_SIZE];
-
- /* future expansion */
- __le64 reserved[28];
- u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE];
- struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS];
-
- /* Padded to 4096 bytes */
- u8 padding[565];
-} __attribute__ ((__packed__));
-static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
-
-/*
- * Compat flags that we support. If any incompat flags are set other than the
- * ones specified below then we will fail to mount
- */
-#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
-#define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL
-#define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
-
-#define BTRFS_FEATURE_COMPAT_RO_SUPP \
- (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \
- BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \
- BTRFS_FEATURE_COMPAT_RO_VERITY)
-
-#define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
-#define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
-
-#define BTRFS_FEATURE_INCOMPAT_SUPP \
- (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
- BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
- BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
- BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
- BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
- BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
- BTRFS_FEATURE_INCOMPAT_RAID56 | \
- BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
- BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
- BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
- BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
- BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
- BTRFS_FEATURE_INCOMPAT_ZONED)
-
-#define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
- (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
-#define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL
-
-/*
- * A leaf is full of items. offset and size tell us where to find
- * the item in the leaf (relative to the start of the data area)
- */
-struct btrfs_item {
- struct btrfs_disk_key key;
- __le32 offset;
- __le32 size;
-} __attribute__ ((__packed__));
-
-/*
- * leaves have an item area and a data area:
- * [item0, item1....itemN] [free space] [dataN...data1, data0]
- *
- * The data is separate from the items to get the keys closer together
- * during searches.
- */
-struct btrfs_leaf {
- struct btrfs_header header;
- struct btrfs_item items[];
-} __attribute__ ((__packed__));
-
-/*
- * all non-leaf blocks are nodes, they hold only keys and pointers to
- * other blocks
- */
-struct btrfs_key_ptr {
- struct btrfs_disk_key key;
- __le64 blockptr;
- __le64 generation;
-} __attribute__ ((__packed__));
-
-struct btrfs_node {
- struct btrfs_header header;
- struct btrfs_key_ptr ptrs[];
-} __attribute__ ((__packed__));
/* Read ahead values for struct btrfs_path.reada */
enum {
@@ -386,675 +59,31 @@ struct btrfs_path {
/* if there is real range locking, this locks field will change */
u8 locks[BTRFS_MAX_LEVEL];
u8 reada;
- /* keep some upper locks as we walk down */
u8 lowest_level;
/*
* set by btrfs_split_item, tells search_slot to keep all locks
* and to force calls to keep space in the nodes
*/
- unsigned int search_for_split:1;
- unsigned int keep_locks:1;
- unsigned int skip_locking:1;
- unsigned int search_commit_root:1;
- unsigned int need_commit_sem:1;
- unsigned int skip_release_on_error:1;
+ bool search_for_split:1;
+ /* Keep some upper locks as we walk down. */
+ bool keep_locks:1;
+ bool skip_locking:1;
+ bool search_commit_root:1;
+ bool need_commit_sem:1;
+ bool skip_release_on_error:1;
/*
* Indicate that new item (btrfs_search_slot) is extending already
* existing item and ins_len contains only the data size and not item
* header (ie. sizeof(struct btrfs_item) is not included).
*/
- unsigned int search_for_extension:1;
-};
-#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
- sizeof(struct btrfs_item))
-struct btrfs_dev_replace {
- u64 replace_state; /* see #define above */
- time64_t time_started; /* seconds since 1-Jan-1970 */
- time64_t time_stopped; /* seconds since 1-Jan-1970 */
- atomic64_t num_write_errors;
- atomic64_t num_uncorrectable_read_errors;
-
- u64 cursor_left;
- u64 committed_cursor_left;
- u64 cursor_left_last_write_of_item;
- u64 cursor_right;
-
- u64 cont_reading_from_srcdev_mode; /* see #define above */
-
- int is_valid;
- int item_needs_writeback;
- struct btrfs_device *srcdev;
- struct btrfs_device *tgtdev;
-
- struct mutex lock_finishing_cancel_unmount;
- struct rw_semaphore rwsem;
-
- struct btrfs_scrub_progress scrub_progress;
-
- struct percpu_counter bio_counter;
- wait_queue_head_t replace_wait;
-};
-
-/*
- * free clusters are used to claim free space in relatively large chunks,
- * allowing us to do less seeky writes. They are used for all metadata
- * allocations. In ssd_spread mode they are also used for data allocations.
- */
-struct btrfs_free_cluster {
- spinlock_t lock;
- spinlock_t refill_lock;
- struct rb_root root;
-
- /* largest extent in this cluster */
- u64 max_size;
-
- /* first extent starting offset */
- u64 window_start;
-
- /* We did a full search and couldn't create a cluster */
- bool fragmented;
-
- struct btrfs_block_group *block_group;
- /*
- * when a cluster is allocated from a block group, we put the
- * cluster onto a list in the block group so that it can
- * be freed before the block group is freed.
- */
- struct list_head block_group_list;
-};
-
-enum btrfs_caching_type {
- BTRFS_CACHE_NO,
- BTRFS_CACHE_STARTED,
- BTRFS_CACHE_FAST,
- BTRFS_CACHE_FINISHED,
- BTRFS_CACHE_ERROR,
-};
-
-/*
- * Tree to record all locked full stripes of a RAID5/6 block group
- */
-struct btrfs_full_stripe_locks_tree {
- struct rb_root root;
- struct mutex lock;
-};
-
-/* Discard control. */
-/*
- * Async discard uses multiple lists to differentiate the discard filter
- * parameters. Index 0 is for completely free block groups where we need to
- * ensure the entire block group is trimmed without being lossy. Indices
- * afterwards represent monotonically decreasing discard filter sizes to
- * prioritize what should be discarded next.
- */
-#define BTRFS_NR_DISCARD_LISTS 3
-#define BTRFS_DISCARD_INDEX_UNUSED 0
-#define BTRFS_DISCARD_INDEX_START 1
-
-struct btrfs_discard_ctl {
- struct workqueue_struct *discard_workers;
- struct delayed_work work;
- spinlock_t lock;
- struct btrfs_block_group *block_group;
- struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
- u64 prev_discard;
- u64 prev_discard_time;
- atomic_t discardable_extents;
- atomic64_t discardable_bytes;
- u64 max_discard_size;
- u64 delay_ms;
- u32 iops_limit;
- u32 kbps_limit;
- u64 discard_extent_bytes;
- u64 discard_bitmap_bytes;
- atomic64_t discard_bytes_saved;
-};
-
-enum btrfs_orphan_cleanup_state {
- ORPHAN_CLEANUP_STARTED = 1,
- ORPHAN_CLEANUP_DONE = 2,
-};
-
-void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info);
-
-/* fs_info */
-struct reloc_control;
-struct btrfs_device;
-struct btrfs_fs_devices;
-struct btrfs_balance_control;
-struct btrfs_delayed_root;
-
-/*
- * Block group or device which contains an active swapfile. Used for preventing
- * unsafe operations while a swapfile is active.
- *
- * These are sorted on (ptr, inode) (note that a block group or device can
- * contain more than one swapfile). We compare the pointer values because we
- * don't actually care what the object is, we just need a quick check whether
- * the object exists in the rbtree.
- */
-struct btrfs_swapfile_pin {
- struct rb_node node;
- void *ptr;
- struct inode *inode;
- /*
- * If true, ptr points to a struct btrfs_block_group. Otherwise, ptr
- * points to a struct btrfs_device.
- */
- bool is_block_group;
- /*
- * Only used when 'is_block_group' is true and it is the number of
- * extents used by a swapfile for this block group ('ptr' field).
- */
- int bg_extent_count;
-};
-
-bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
-
-enum {
- BTRFS_FS_BARRIER,
- BTRFS_FS_CLOSING_START,
- BTRFS_FS_CLOSING_DONE,
- BTRFS_FS_LOG_RECOVERING,
- BTRFS_FS_OPEN,
- BTRFS_FS_QUOTA_ENABLED,
- BTRFS_FS_UPDATE_UUID_TREE_GEN,
- BTRFS_FS_CREATING_FREE_SPACE_TREE,
- BTRFS_FS_BTREE_ERR,
- BTRFS_FS_LOG1_ERR,
- BTRFS_FS_LOG2_ERR,
- BTRFS_FS_QUOTA_OVERRIDE,
- /* Used to record internally whether fs has been frozen */
- BTRFS_FS_FROZEN,
- /*
- * Indicate that balance has been set up from the ioctl and is in the
- * main phase. The fs_info::balance_ctl is initialized.
- */
- BTRFS_FS_BALANCE_RUNNING,
-
- /*
- * Indicate that relocation of a chunk has started, it's set per chunk
- * and is toggled between chunks.
- * Set, tested and cleared while holding fs_info::send_reloc_lock.
- */
- BTRFS_FS_RELOC_RUNNING,
-
- /* Indicate that the cleaner thread is awake and doing something. */
- BTRFS_FS_CLEANER_RUNNING,
-
- /*
- * The checksumming has an optimized version and is considered fast,
- * so we don't need to offload checksums to workqueues.
- */
- BTRFS_FS_CSUM_IMPL_FAST,
-
- /* Indicate that the discard workqueue can service discards. */
- BTRFS_FS_DISCARD_RUNNING,
-
- /* Indicate that we need to cleanup space cache v1 */
- BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
-
- /* Indicate that we can't trust the free space tree for caching yet */
- BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
-
- /* Indicate whether there are any tree modification log users */
- BTRFS_FS_TREE_MOD_LOG_USERS,
-
-#if BITS_PER_LONG == 32
- /* Indicate if we have error/warn message printed on 32bit systems */
- BTRFS_FS_32BIT_ERROR,
- BTRFS_FS_32BIT_WARN,
-#endif
-};
-
-/*
- * Exclusive operations (device replace, resize, device add/remove, balance)
- */
-enum btrfs_exclusive_operation {
- BTRFS_EXCLOP_NONE,
- BTRFS_EXCLOP_BALANCE,
- BTRFS_EXCLOP_DEV_ADD,
- BTRFS_EXCLOP_DEV_REMOVE,
- BTRFS_EXCLOP_DEV_REPLACE,
- BTRFS_EXCLOP_RESIZE,
- BTRFS_EXCLOP_SWAP_ACTIVATE,
-};
-
-struct btrfs_fs_info {
- u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
- unsigned long flags;
- struct btrfs_root *extent_root;
- struct btrfs_root *tree_root;
- struct btrfs_root *chunk_root;
- struct btrfs_root *dev_root;
- struct btrfs_root *fs_root;
- struct btrfs_root *csum_root;
- struct btrfs_root *quota_root;
- struct btrfs_root *uuid_root;
- struct btrfs_root *free_space_root;
- struct btrfs_root *data_reloc_root;
-
- /* the log root tree is a directory of all the other log roots */
- struct btrfs_root *log_root_tree;
-
- spinlock_t fs_roots_radix_lock;
- struct radix_tree_root fs_roots_radix;
-
- /* block group cache stuff */
- spinlock_t block_group_cache_lock;
- u64 first_logical_byte;
- struct rb_root block_group_cache_tree;
-
- /* keep track of unallocated space */
- atomic64_t free_chunk_space;
-
- /* Track ranges which are used by log trees blocks/logged data extents */
- struct extent_io_tree excluded_extents;
-
- /* logical->physical extent mapping */
- struct extent_map_tree mapping_tree;
-
- /*
- * block reservation for extent, checksum, root tree and
- * delayed dir index item
- */
- struct btrfs_block_rsv global_block_rsv;
- /* block reservation for metadata operations */
- struct btrfs_block_rsv trans_block_rsv;
- /* block reservation for chunk tree */
- struct btrfs_block_rsv chunk_block_rsv;
- /* block reservation for delayed operations */
- struct btrfs_block_rsv delayed_block_rsv;
- /* block reservation for delayed refs */
- struct btrfs_block_rsv delayed_refs_rsv;
-
- struct btrfs_block_rsv empty_block_rsv;
-
- u64 generation;
- u64 last_trans_committed;
- u64 avg_delayed_ref_runtime;
-
- /*
- * this is updated to the current trans every time a full commit
- * is required instead of the faster short fsync log commits
- */
- u64 last_trans_log_full_commit;
- unsigned long mount_opt;
- /*
- * Track requests for actions that need to be done during transaction
- * commit (like for some mount options).
- */
- unsigned long pending_changes;
- unsigned long compress_type:4;
- unsigned int compress_level;
- u32 commit_interval;
- /*
- * It is a suggestive number, the read side is safe even it gets a
- * wrong number because we will write out the data into a regular
- * extent. The write side(mount/remount) is under ->s_umount lock,
- * so it is also safe.
- */
- u64 max_inline;
-
- struct btrfs_transaction *running_transaction;
- wait_queue_head_t transaction_throttle;
- wait_queue_head_t transaction_wait;
- wait_queue_head_t transaction_blocked_wait;
- wait_queue_head_t async_submit_wait;
-
- /*
- * Used to protect the incompat_flags, compat_flags, compat_ro_flags
- * when they are updated.
- *
- * Because we do not clear the flags for ever, so we needn't use
- * the lock on the read side.
- *
- * We also needn't use the lock when we mount the fs, because
- * there is no other task which will update the flag.
- */
- spinlock_t super_lock;
- struct btrfs_super_block *super_copy;
- struct btrfs_super_block *super_for_commit;
- struct super_block *sb;
- struct inode *btree_inode;
- struct mutex tree_log_mutex;
- struct mutex transaction_kthread_mutex;
- struct mutex cleaner_mutex;
- struct mutex chunk_mutex;
-
- /*
- * this is taken to make sure we don't set block groups ro after
- * the free space cache has been allocated on them
- */
- struct mutex ro_block_group_mutex;
-
- /* this is used during read/modify/write to make sure
- * no two ios are trying to mod the same stripe at the same
- * time
- */
- struct btrfs_stripe_hash_table *stripe_hash_table;
-
- /*
- * this protects the ordered operations list only while we are
- * processing all of the entries on it. This way we make
- * sure the commit code doesn't find the list temporarily empty
- * because another function happens to be doing non-waiting preflush
- * before jumping into the main commit.
- */
- struct mutex ordered_operations_mutex;
-
- struct rw_semaphore commit_root_sem;
-
- struct rw_semaphore cleanup_work_sem;
-
- struct rw_semaphore subvol_sem;
-
- spinlock_t trans_lock;
- /*
- * the reloc mutex goes with the trans lock, it is taken
- * during commit to protect us from the relocation code
- */
- struct mutex reloc_mutex;
-
- struct list_head trans_list;
- struct list_head dead_roots;
- struct list_head caching_block_groups;
-
- spinlock_t delayed_iput_lock;
- struct list_head delayed_iputs;
- atomic_t nr_delayed_iputs;
- wait_queue_head_t delayed_iputs_wait;
-
- atomic64_t tree_mod_seq;
-
- /* this protects tree_mod_log and tree_mod_seq_list */
- rwlock_t tree_mod_log_lock;
- struct rb_root tree_mod_log;
- struct list_head tree_mod_seq_list;
-
- atomic_t async_delalloc_pages;
-
- /*
- * this is used to protect the following list -- ordered_roots.
- */
- spinlock_t ordered_root_lock;
-
- /*
- * all fs/file tree roots in which there are data=ordered extents
- * pending writeback are added into this list.
- *
- * these can span multiple transactions and basically include
- * every dirty data page that isn't from nodatacow
- */
- struct list_head ordered_roots;
-
- struct mutex delalloc_root_mutex;
- spinlock_t delalloc_root_lock;
- /* all fs/file tree roots that have delalloc inodes. */
- struct list_head delalloc_roots;
-
- /*
- * there is a pool of worker threads for checksumming during writes
- * and a pool for checksumming after reads. This is because readers
- * can run with FS locks held, and the writers may be waiting for
- * those locks. We don't want ordering in the pending list to cause
- * deadlocks, and so the two are serviced separately.
- *
- * A third pool does submit_bio to avoid deadlocking with the other
- * two
- */
- struct btrfs_workqueue *workers;
- struct btrfs_workqueue *delalloc_workers;
- struct btrfs_workqueue *flush_workers;
- struct btrfs_workqueue *endio_workers;
- struct btrfs_workqueue *endio_meta_workers;
- struct btrfs_workqueue *endio_raid56_workers;
- struct btrfs_workqueue *rmw_workers;
- struct btrfs_workqueue *endio_meta_write_workers;
- struct btrfs_workqueue *endio_write_workers;
- struct btrfs_workqueue *endio_freespace_worker;
- struct btrfs_workqueue *caching_workers;
- struct btrfs_workqueue *readahead_workers;
-
- /*
- * fixup workers take dirty pages that didn't properly go through
- * the cow mechanism and make them safe to write. It happens
- * for the sys_munmap function call path
- */
- struct btrfs_workqueue *fixup_workers;
- struct btrfs_workqueue *delayed_workers;
-
- struct task_struct *transaction_kthread;
- struct task_struct *cleaner_kthread;
- u32 thread_pool_size;
-
- struct kobject *space_info_kobj;
- struct kobject *qgroups_kobj;
-
- /* used to keep from writing metadata until there is a nice batch */
- struct percpu_counter dirty_metadata_bytes;
- struct percpu_counter delalloc_bytes;
- struct percpu_counter ordered_bytes;
- s32 dirty_metadata_batch;
- s32 delalloc_batch;
-
- struct list_head dirty_cowonly_roots;
-
- struct btrfs_fs_devices *fs_devices;
-
- /*
- * The space_info list is effectively read only after initial
- * setup. It is populated at mount time and cleaned up after
- * all block groups are removed. RCU is used to protect it.
- */
- struct list_head space_info;
-
- struct btrfs_space_info *data_sinfo;
-
- struct reloc_control *reloc_ctl;
-
- /* data_alloc_cluster is only used in ssd_spread mode */
- struct btrfs_free_cluster data_alloc_cluster;
-
- /* all metadata allocations go through this cluster */
- struct btrfs_free_cluster meta_alloc_cluster;
-
- /* auto defrag inodes go here */
- spinlock_t defrag_inodes_lock;
- struct rb_root defrag_inodes;
- atomic_t defrag_running;
-
- /* Used to protect avail_{data, metadata, system}_alloc_bits */
- seqlock_t profiles_lock;
- /*
- * these three are in extended format (availability of single
- * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
- * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits)
- */
- u64 avail_data_alloc_bits;
- u64 avail_metadata_alloc_bits;
- u64 avail_system_alloc_bits;
-
- /* restriper state */
- spinlock_t balance_lock;
- struct mutex balance_mutex;
- atomic_t balance_pause_req;
- atomic_t balance_cancel_req;
- struct btrfs_balance_control *balance_ctl;
- wait_queue_head_t balance_wait_q;
-
- /* Cancellation requests for chunk relocation */
- atomic_t reloc_cancel_req;
-
- u32 data_chunk_allocations;
- u32 metadata_ratio;
-
- void *bdev_holder;
-
- /* private scrub information */
- struct mutex scrub_lock;
- atomic_t scrubs_running;
- atomic_t scrub_pause_req;
- atomic_t scrubs_paused;
- atomic_t scrub_cancel_req;
- wait_queue_head_t scrub_pause_wait;
- /*
- * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
- * running.
- */
- refcount_t scrub_workers_refcnt;
- struct btrfs_workqueue *scrub_workers;
- struct btrfs_workqueue *scrub_wr_completion_workers;
- struct btrfs_workqueue *scrub_parity_workers;
- struct btrfs_subpage_info *subpage_info;
-
- struct btrfs_discard_ctl discard_ctl;
-
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- u32 check_integrity_print_mask;
-#endif
- /* is qgroup tracking in a consistent state? */
- u64 qgroup_flags;
-
- /* holds configuration and tracking. Protected by qgroup_lock */
- struct rb_root qgroup_tree;
- spinlock_t qgroup_lock;
-
- /*
- * used to avoid frequently calling ulist_alloc()/ulist_free()
- * when doing qgroup accounting, it must be protected by qgroup_lock.
- */
- struct ulist *qgroup_ulist;
-
- /*
- * Protect user change for quota operations. If a transaction is needed,
- * it must be started before locking this lock.
- */
- struct mutex qgroup_ioctl_lock;
-
- /* list of dirty qgroups to be written at next commit */
- struct list_head dirty_qgroups;
-
- /* used by qgroup for an efficient tree traversal */
- u64 qgroup_seq;
-
- /* qgroup rescan items */
- struct mutex qgroup_rescan_lock; /* protects the progress item */
- struct btrfs_key qgroup_rescan_progress;
- struct btrfs_workqueue *qgroup_rescan_workers;
- struct completion qgroup_rescan_completion;
- struct btrfs_work qgroup_rescan_work;
- bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */
-
- /* filesystem state */
- unsigned long fs_state;
-
- struct btrfs_delayed_root *delayed_root;
-
- /* readahead tree */
- spinlock_t reada_lock;
- struct radix_tree_root reada_tree;
-
- /* readahead works cnt */
- atomic_t reada_works_cnt;
-
- /* Extent buffer radix tree */
- spinlock_t buffer_lock;
- /* Entries are eb->start / sectorsize */
- struct radix_tree_root buffer_radix;
-
- /* next backup root to be overwritten */
- int backup_root_index;
-
- /* device replace state */
- struct btrfs_dev_replace dev_replace;
-
- struct semaphore uuid_tree_rescan_sem;
-
- /* Used to reclaim the metadata space in the background. */
- struct work_struct async_reclaim_work;
- struct work_struct async_data_reclaim_work;
- struct work_struct preempt_reclaim_work;
-
- /* Reclaim partially filled block groups in the background */
- struct work_struct reclaim_bgs_work;
- struct list_head reclaim_bgs;
- int bg_reclaim_threshold;
-
- spinlock_t unused_bgs_lock;
- struct list_head unused_bgs;
- struct mutex unused_bg_unpin_mutex;
- /* Protect block groups that are going to be deleted */
- struct mutex reclaim_bgs_lock;
-
- /* Cached block sizes */
- u32 nodesize;
- u32 sectorsize;
- /* ilog2 of sectorsize, use to avoid 64bit division */
- u32 sectorsize_bits;
- u32 csum_size;
- u32 csums_per_leaf;
- u32 stripesize;
-
- /* Block groups and devices containing active swapfiles. */
- spinlock_t swapfile_pins_lock;
- struct rb_root swapfile_pins;
-
- struct crypto_shash *csum_shash;
-
- spinlock_t send_reloc_lock;
- /*
- * Number of send operations in progress.
- * Updated while holding fs_info::send_reloc_lock.
- */
- int send_in_progress;
-
- /* Type of exclusive operation running, protected by super_lock */
- enum btrfs_exclusive_operation exclusive_operation;
-
- /*
- * Zone size > 0 when in ZONED mode, otherwise it's used for a check
- * if the mode is enabled
- */
- union {
- u64 zone_size;
- u64 zoned;
- };
-
- struct mutex zoned_meta_io_lock;
- spinlock_t treelog_bg_lock;
- u64 treelog_bg;
-
- /*
- * Start of the dedicated data relocation block group, protected by
- * relocation_bg_lock.
- */
- spinlock_t relocation_bg_lock;
- u64 data_reloc_bg;
-
- spinlock_t zone_active_bgs_lock;
- struct list_head zone_active_bgs;
-
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
- spinlock_t ref_verify_lock;
- struct rb_root block_tree;
-#endif
-
-#ifdef CONFIG_BTRFS_DEBUG
- struct kobject *debug_kobj;
- struct kobject *discard_debug_kobj;
- struct list_head allocated_roots;
-
- spinlock_t eb_leak_lock;
- struct list_head allocated_ebs;
-#endif
+ bool search_for_extension:1;
+ /* Stop search if any locks need to be taken (for read) */
+ bool nowait:1;
};
-static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
-{
- return sb->s_fs_info;
-}
+#define BTRFS_PATH_AUTO_FREE(path_name) \
+ struct btrfs_path *path_name __free(btrfs_free_path) = NULL
/*
* The state of btrfs root
@@ -1110,6 +139,12 @@ enum {
BTRFS_ROOT_HAS_LOG_TREE,
/* Qgroup flushing is in progress */
BTRFS_ROOT_QGROUP_FLUSHING,
+ /* We started the orphan cleanup for this root. */
+ BTRFS_ROOT_ORPHAN_CLEANUP,
+ /* This root has a drop operation that was started previously. */
+ BTRFS_ROOT_UNFINISHED_DROP,
+ /* This reloc root needs to have its buffers lockdep class reset. */
+ BTRFS_ROOT_RESET_LOCKDEP_CLASS,
};
/*
@@ -1128,6 +163,8 @@ struct btrfs_qgroup_swapped_blocks {
* and for the extent tree extent_root root.
*/
struct btrfs_root {
+ struct rb_node rb_node;
+
struct extent_buffer *node;
struct extent_buffer *commit_root;
@@ -1154,17 +191,27 @@ struct btrfs_root {
atomic_t log_commit[2];
/* Used only for log trees of subvolumes, not for the log root tree */
atomic_t log_batch;
+ /*
+ * Protected by the 'log_mutex' lock but can be read without holding
+ * that lock to avoid unnecessary lock contention, in which case it
+ * should be read using btrfs_get_root_log_transid() except if it's a
+ * log tree in which case it can be directly accessed. Updates to this
+ * field should always use btrfs_set_root_log_transid(), except for log
+ * trees where the field can be updated directly.
+ */
int log_transid;
/* No matter the commit succeeds or not*/
int log_transid_committed;
- /* Just be updated when the commit succeeds. */
+ /*
+ * Just be updated when the commit succeeds. Use
+ * btrfs_get_root_last_log_commit() and btrfs_set_root_last_log_commit()
+ * to access this field.
+ */
int last_log_commit;
pid_t log_start_pid;
u64 last_trans;
- u32 type;
-
u64 free_objectid;
struct btrfs_key defrag_progress;
@@ -1175,20 +222,11 @@ struct btrfs_root {
struct list_head root_list;
- spinlock_t log_extents_lock[2];
- struct list_head logged_list[2];
-
- int orphan_cleanup_state;
-
- spinlock_t inode_lock;
- /* red-black tree that keeps track of in-memory inodes */
- struct rb_root inode_tree;
+ /* Xarray that keeps track of in-memory inodes. */
+ struct xarray inodes;
- /*
- * radix tree that keeps track of delayed nodes of every inode,
- * protected by inode_lock
- */
- struct radix_tree_root delayed_nodes_tree;
+ /* Xarray that keeps track of delayed nodes of every inode. */
+ struct xarray delayed_nodes;
/*
* right now this just gets used so that a root has its own devid
* for stat. It may be used for more later
@@ -1264,6 +302,9 @@ struct btrfs_root {
/* Used only by log trees, when logging csum items */
struct extent_io_tree log_csum_range;
+ /* Used in simple quotas, track root during relocation. */
+ u64 relocation_src_root;
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
u64 alloc_bytenr;
#endif
@@ -1273,6 +314,72 @@ struct btrfs_root {
#endif
};
+static inline bool btrfs_root_readonly(const struct btrfs_root *root)
+{
+ /* Byte-swap the constant at compile time, root_item::flags is LE */
+ return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
+}
+
+static inline bool btrfs_root_dead(const struct btrfs_root *root)
+{
+ /* Byte-swap the constant at compile time, root_item::flags is LE */
+ return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
+}
+
+static inline u64 btrfs_root_id(const struct btrfs_root *root)
+{
+ return root->root_key.objectid;
+}
+
+static inline int btrfs_get_root_log_transid(const struct btrfs_root *root)
+{
+ return READ_ONCE(root->log_transid);
+}
+
+static inline void btrfs_set_root_log_transid(struct btrfs_root *root, int log_transid)
+{
+ WRITE_ONCE(root->log_transid, log_transid);
+}
+
+static inline int btrfs_get_root_last_log_commit(const struct btrfs_root *root)
+{
+ return READ_ONCE(root->last_log_commit);
+}
+
+static inline void btrfs_set_root_last_log_commit(struct btrfs_root *root, int commit_id)
+{
+ WRITE_ONCE(root->last_log_commit, commit_id);
+}
+
+static inline u64 btrfs_get_root_last_trans(const struct btrfs_root *root)
+{
+ return READ_ONCE(root->last_trans);
+}
+
+static inline void btrfs_set_root_last_trans(struct btrfs_root *root, u64 transid)
+{
+ WRITE_ONCE(root->last_trans, transid);
+}
+
+/*
+ * Return the generation this root started with.
+ *
+ * Every normal root that is created with root->root_key.offset set to it's
+ * originating generation. If it is a snapshot it is the generation when the
+ * snapshot was created.
+ *
+ * However for TREE_RELOC roots root_key.offset is the objectid of the owning
+ * tree root. Thankfully we copy the root item of the owning tree root, which
+ * has it's last_snapshot set to what we would have root_key.offset set to, so
+ * return that if this is a TREE_RELOC root.
+ */
+static inline u64 btrfs_root_origin_generation(const struct btrfs_root *root)
+{
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
+ return btrfs_root_last_snapshot(&root->root_item);
+ return root->root_key.offset;
+}
+
/*
* Structure that conveys information about an extent that is going to replace
* all the extents in a file range.
@@ -1291,6 +398,8 @@ struct btrfs_replace_extent_info {
* existing extent into a file range.
*/
bool is_new_extent;
+ /* Indicate if we should update the inode's mtime and ctime. */
+ bool update_times;
/* Meaningful only if is_new_extent is true. */
int qgroup_reserved;
/*
@@ -1362,17 +471,17 @@ struct btrfs_drop_extents_args {
struct btrfs_file_private {
void *filldir_buf;
+ u64 last_index;
+ struct extent_state *llseek_cached_state;
+ /* Task that allocated this structure. */
+ struct task_struct *owner_task;
};
-
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
{
-
return info->nodesize - sizeof(struct btrfs_header);
}
-#define BTRFS_LEAF_DATA_OFFSET offsetof(struct btrfs_leaf, items)
-
static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
{
return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
@@ -1383,1466 +492,55 @@ static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
}
-#define BTRFS_FILE_EXTENT_INLINE_DATA_START \
- (offsetof(struct btrfs_file_extent_item, disk_bytenr))
-static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_fs_info *info)
-{
- return BTRFS_MAX_ITEM_SIZE(info) -
- BTRFS_FILE_EXTENT_INLINE_DATA_START;
-}
-
static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
{
return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
}
-/*
- * Flags for mount options.
- *
- * Note: don't forget to add new options to btrfs_show_options()
- */
-enum {
- BTRFS_MOUNT_NODATASUM = (1UL << 0),
- BTRFS_MOUNT_NODATACOW = (1UL << 1),
- BTRFS_MOUNT_NOBARRIER = (1UL << 2),
- BTRFS_MOUNT_SSD = (1UL << 3),
- BTRFS_MOUNT_DEGRADED = (1UL << 4),
- BTRFS_MOUNT_COMPRESS = (1UL << 5),
- BTRFS_MOUNT_NOTREELOG = (1UL << 6),
- BTRFS_MOUNT_FLUSHONCOMMIT = (1UL << 7),
- BTRFS_MOUNT_SSD_SPREAD = (1UL << 8),
- BTRFS_MOUNT_NOSSD = (1UL << 9),
- BTRFS_MOUNT_DISCARD_SYNC = (1UL << 10),
- BTRFS_MOUNT_FORCE_COMPRESS = (1UL << 11),
- BTRFS_MOUNT_SPACE_CACHE = (1UL << 12),
- BTRFS_MOUNT_CLEAR_CACHE = (1UL << 13),
- BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = (1UL << 14),
- BTRFS_MOUNT_ENOSPC_DEBUG = (1UL << 15),
- BTRFS_MOUNT_AUTO_DEFRAG = (1UL << 16),
- BTRFS_MOUNT_USEBACKUPROOT = (1UL << 17),
- BTRFS_MOUNT_SKIP_BALANCE = (1UL << 18),
- BTRFS_MOUNT_CHECK_INTEGRITY = (1UL << 19),
- BTRFS_MOUNT_CHECK_INTEGRITY_DATA = (1UL << 20),
- BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1UL << 21),
- BTRFS_MOUNT_RESCAN_UUID_TREE = (1UL << 22),
- BTRFS_MOUNT_FRAGMENT_DATA = (1UL << 23),
- BTRFS_MOUNT_FRAGMENT_METADATA = (1UL << 24),
- BTRFS_MOUNT_FREE_SPACE_TREE = (1UL << 25),
- BTRFS_MOUNT_NOLOGREPLAY = (1UL << 26),
- BTRFS_MOUNT_REF_VERIFY = (1UL << 27),
- BTRFS_MOUNT_DISCARD_ASYNC = (1UL << 28),
- BTRFS_MOUNT_IGNOREBADROOTS = (1UL << 29),
- BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 30),
-};
-
-#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
-#define BTRFS_DEFAULT_MAX_INLINE (2048)
-
-#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
-#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
-#define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
-#define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \
- BTRFS_MOUNT_##opt)
-
-#define btrfs_set_and_info(fs_info, opt, fmt, args...) \
-do { \
- if (!btrfs_test_opt(fs_info, opt)) \
- btrfs_info(fs_info, fmt, ##args); \
- btrfs_set_opt(fs_info->mount_opt, opt); \
-} while (0)
-
-#define btrfs_clear_and_info(fs_info, opt, fmt, args...) \
-do { \
- if (btrfs_test_opt(fs_info, opt)) \
- btrfs_info(fs_info, fmt, ##args); \
- btrfs_clear_opt(fs_info->mount_opt, opt); \
-} while (0)
-
-/*
- * Requests for changes that need to be done during transaction commit.
- *
- * Internal mount options that are used for special handling of the real
- * mount options (eg. cannot be set during remount and have to be set during
- * transaction commit)
- */
-
-#define BTRFS_PENDING_COMMIT (0)
-
-#define btrfs_test_pending(info, opt) \
- test_bit(BTRFS_PENDING_##opt, &(info)->pending_changes)
-#define btrfs_set_pending(info, opt) \
- set_bit(BTRFS_PENDING_##opt, &(info)->pending_changes)
-#define btrfs_clear_pending(info, opt) \
- clear_bit(BTRFS_PENDING_##opt, &(info)->pending_changes)
-
-/*
- * Helpers for setting pending mount option changes.
- *
- * Expects corresponding macros
- * BTRFS_PENDING_SET_ and CLEAR_ + short mount option name
- */
-#define btrfs_set_pending_and_info(info, opt, fmt, args...) \
-do { \
- if (!btrfs_raw_test_opt((info)->mount_opt, opt)) { \
- btrfs_info((info), fmt, ##args); \
- btrfs_set_pending((info), SET_##opt); \
- btrfs_clear_pending((info), CLEAR_##opt); \
- } \
-} while(0)
-
-#define btrfs_clear_pending_and_info(info, opt, fmt, args...) \
-do { \
- if (btrfs_raw_test_opt((info)->mount_opt, opt)) { \
- btrfs_info((info), fmt, ##args); \
- btrfs_set_pending((info), CLEAR_##opt); \
- btrfs_clear_pending((info), SET_##opt); \
- } \
-} while(0)
-
-/*
- * Inode flags
- */
-#define BTRFS_INODE_NODATASUM (1U << 0)
-#define BTRFS_INODE_NODATACOW (1U << 1)
-#define BTRFS_INODE_READONLY (1U << 2)
-#define BTRFS_INODE_NOCOMPRESS (1U << 3)
-#define BTRFS_INODE_PREALLOC (1U << 4)
-#define BTRFS_INODE_SYNC (1U << 5)
-#define BTRFS_INODE_IMMUTABLE (1U << 6)
-#define BTRFS_INODE_APPEND (1U << 7)
-#define BTRFS_INODE_NODUMP (1U << 8)
-#define BTRFS_INODE_NOATIME (1U << 9)
-#define BTRFS_INODE_DIRSYNC (1U << 10)
-#define BTRFS_INODE_COMPRESS (1U << 11)
-
-#define BTRFS_INODE_ROOT_ITEM_INIT (1U << 31)
-
-#define BTRFS_INODE_FLAG_MASK \
- (BTRFS_INODE_NODATASUM | \
- BTRFS_INODE_NODATACOW | \
- BTRFS_INODE_READONLY | \
- BTRFS_INODE_NOCOMPRESS | \
- BTRFS_INODE_PREALLOC | \
- BTRFS_INODE_SYNC | \
- BTRFS_INODE_IMMUTABLE | \
- BTRFS_INODE_APPEND | \
- BTRFS_INODE_NODUMP | \
- BTRFS_INODE_NOATIME | \
- BTRFS_INODE_DIRSYNC | \
- BTRFS_INODE_COMPRESS | \
- BTRFS_INODE_ROOT_ITEM_INIT)
-
-#define BTRFS_INODE_RO_VERITY (1U << 0)
-
-#define BTRFS_INODE_RO_FLAG_MASK (BTRFS_INODE_RO_VERITY)
-
-struct btrfs_map_token {
- struct extent_buffer *eb;
- char *kaddr;
- unsigned long offset;
-};
-
-#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
- ((bytes) >> (fs_info)->sectorsize_bits)
-
-static inline void btrfs_init_map_token(struct btrfs_map_token *token,
- struct extent_buffer *eb)
-{
- token->eb = eb;
- token->kaddr = page_address(eb->pages[0]);
- token->offset = 0;
-}
-
-/* some macros to generate set/get functions for the struct fields. This
- * assumes there is a lefoo_to_cpu for every type, so lets make a simple
- * one for u8:
- */
-#define le8_to_cpu(v) (v)
-#define cpu_to_le8(v) (v)
-#define __le8 u8
-
-static inline u8 get_unaligned_le8(const void *p)
-{
- return *(u8 *)p;
-}
-
-static inline void put_unaligned_le8(u8 val, void *p)
-{
- *(u8 *)p = val;
-}
-
-#define read_eb_member(eb, ptr, type, member, result) (\
- read_extent_buffer(eb, (char *)(result), \
- ((unsigned long)(ptr)) + \
- offsetof(type, member), \
- sizeof(((type *)0)->member)))
-
-#define write_eb_member(eb, ptr, type, member, result) (\
- write_extent_buffer(eb, (char *)(result), \
- ((unsigned long)(ptr)) + \
- offsetof(type, member), \
- sizeof(((type *)0)->member)))
-
-#define DECLARE_BTRFS_SETGET_BITS(bits) \
-u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
- const void *ptr, unsigned long off); \
-void btrfs_set_token_##bits(struct btrfs_map_token *token, \
- const void *ptr, unsigned long off, \
- u##bits val); \
-u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
- const void *ptr, unsigned long off); \
-void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
- unsigned long off, u##bits val);
-
-DECLARE_BTRFS_SETGET_BITS(8)
-DECLARE_BTRFS_SETGET_BITS(16)
-DECLARE_BTRFS_SETGET_BITS(32)
-DECLARE_BTRFS_SETGET_BITS(64)
-
-#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
-static inline u##bits btrfs_##name(const struct extent_buffer *eb, \
- const type *s) \
-{ \
- BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
- return btrfs_get_##bits(eb, s, offsetof(type, member)); \
-} \
-static inline void btrfs_set_##name(const struct extent_buffer *eb, type *s, \
- u##bits val) \
-{ \
- BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
- btrfs_set_##bits(eb, s, offsetof(type, member), val); \
-} \
-static inline u##bits btrfs_token_##name(struct btrfs_map_token *token, \
- const type *s) \
-{ \
- BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
- return btrfs_get_token_##bits(token, s, offsetof(type, member));\
-} \
-static inline void btrfs_set_token_##name(struct btrfs_map_token *token,\
- type *s, u##bits val) \
-{ \
- BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
- btrfs_set_token_##bits(token, s, offsetof(type, member), val); \
-}
-
-#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
-static inline u##bits btrfs_##name(const struct extent_buffer *eb) \
-{ \
- const type *p = page_address(eb->pages[0]) + \
- offset_in_page(eb->start); \
- return get_unaligned_le##bits(&p->member); \
-} \
-static inline void btrfs_set_##name(const struct extent_buffer *eb, \
- u##bits val) \
-{ \
- type *p = page_address(eb->pages[0]) + offset_in_page(eb->start); \
- put_unaligned_le##bits(val, &p->member); \
-}
-
-#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \
-static inline u##bits btrfs_##name(const type *s) \
-{ \
- return get_unaligned_le##bits(&s->member); \
-} \
-static inline void btrfs_set_##name(type *s, u##bits val) \
-{ \
- put_unaligned_le##bits(val, &s->member); \
-}
-
-static inline u64 btrfs_device_total_bytes(const struct extent_buffer *eb,
- struct btrfs_dev_item *s)
-{
- BUILD_BUG_ON(sizeof(u64) !=
- sizeof(((struct btrfs_dev_item *)0))->total_bytes);
- return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item,
- total_bytes));
-}
-static inline void btrfs_set_device_total_bytes(const struct extent_buffer *eb,
- struct btrfs_dev_item *s,
- u64 val)
-{
- BUILD_BUG_ON(sizeof(u64) !=
- sizeof(((struct btrfs_dev_item *)0))->total_bytes);
- WARN_ON(!IS_ALIGNED(val, eb->fs_info->sectorsize));
- btrfs_set_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes), val);
-}
-
-
-BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64);
-BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64);
-BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32);
-BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32);
-BTRFS_SETGET_FUNCS(device_start_offset, struct btrfs_dev_item,
- start_offset, 64);
-BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32);
-BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64);
-BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32);
-BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8);
-BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8);
-BTRFS_SETGET_FUNCS(device_generation, struct btrfs_dev_item, generation, 64);
-
-BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item,
- total_bytes, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_device_bytes_used, struct btrfs_dev_item,
- bytes_used, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_device_io_align, struct btrfs_dev_item,
- io_align, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_device_io_width, struct btrfs_dev_item,
- io_width, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item,
- sector_size, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item,
- dev_group, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item,
- seek_speed, 8);
-BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item,
- bandwidth, 8);
-BTRFS_SETGET_STACK_FUNCS(stack_device_generation, struct btrfs_dev_item,
- generation, 64);
-
-static inline unsigned long btrfs_device_uuid(struct btrfs_dev_item *d)
-{
- return (unsigned long)d + offsetof(struct btrfs_dev_item, uuid);
-}
-
-static inline unsigned long btrfs_device_fsid(struct btrfs_dev_item *d)
-{
- return (unsigned long)d + offsetof(struct btrfs_dev_item, fsid);
-}
-
-BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64);
-BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64);
-BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64);
-BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32);
-BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32);
-BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32);
-BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64);
-BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16);
-BTRFS_SETGET_FUNCS(chunk_sub_stripes, struct btrfs_chunk, sub_stripes, 16);
-BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64);
-BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64);
-
-static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s)
-{
- return (char *)s + offsetof(struct btrfs_stripe, dev_uuid);
-}
-
-BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk,
- stripe_len, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk,
- io_align, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk,
- io_width, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk,
- sector_size, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk,
- num_stripes, 16);
-BTRFS_SETGET_STACK_FUNCS(stack_chunk_sub_stripes, struct btrfs_chunk,
- sub_stripes, 16);
-BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64);
-
-static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c,
- int nr)
-{
- unsigned long offset = (unsigned long)c;
- offset += offsetof(struct btrfs_chunk, stripe);
- offset += nr * sizeof(struct btrfs_stripe);
- return (struct btrfs_stripe *)offset;
-}
-
-static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr)
-{
- return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr));
-}
-
-static inline u64 btrfs_stripe_offset_nr(const struct extent_buffer *eb,
- struct btrfs_chunk *c, int nr)
-{
- return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr));
-}
-
-static inline u64 btrfs_stripe_devid_nr(const struct extent_buffer *eb,
- struct btrfs_chunk *c, int nr)
-{
- return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr));
-}
-
-/* struct btrfs_block_group_item */
-BTRFS_SETGET_STACK_FUNCS(stack_block_group_used, struct btrfs_block_group_item,
- used, 64);
-BTRFS_SETGET_FUNCS(block_group_used, struct btrfs_block_group_item,
- used, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_block_group_chunk_objectid,
- struct btrfs_block_group_item, chunk_objectid, 64);
-
-BTRFS_SETGET_FUNCS(block_group_chunk_objectid,
- struct btrfs_block_group_item, chunk_objectid, 64);
-BTRFS_SETGET_FUNCS(block_group_flags,
- struct btrfs_block_group_item, flags, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_block_group_flags,
- struct btrfs_block_group_item, flags, 64);
-
-/* struct btrfs_free_space_info */
-BTRFS_SETGET_FUNCS(free_space_extent_count, struct btrfs_free_space_info,
- extent_count, 32);
-BTRFS_SETGET_FUNCS(free_space_flags, struct btrfs_free_space_info, flags, 32);
-
-/* struct btrfs_inode_ref */
-BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16);
-BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64);
-
-/* struct btrfs_inode_extref */
-BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref,
- parent_objectid, 64);
-BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref,
- name_len, 16);
-BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64);
-
-/* struct btrfs_inode_item */
-BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64);
-BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64);
-BTRFS_SETGET_FUNCS(inode_transid, struct btrfs_inode_item, transid, 64);
-BTRFS_SETGET_FUNCS(inode_size, struct btrfs_inode_item, size, 64);
-BTRFS_SETGET_FUNCS(inode_nbytes, struct btrfs_inode_item, nbytes, 64);
-BTRFS_SETGET_FUNCS(inode_block_group, struct btrfs_inode_item, block_group, 64);
-BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32);
-BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32);
-BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32);
-BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32);
-BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64);
-BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
- generation, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
- sequence, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
- transid, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
- nbytes, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
- block_group, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
-BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
-BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
-
-/* struct btrfs_dev_extent */
-BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent,
- chunk_tree, 64);
-BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent,
- chunk_objectid, 64);
-BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent,
- chunk_offset, 64);
-BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64);
-BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64);
-BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item,
- generation, 64);
-BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64);
-
-BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8);
-
-static inline void btrfs_tree_block_key(const struct extent_buffer *eb,
- struct btrfs_tree_block_info *item,
- struct btrfs_disk_key *key)
-{
- read_eb_member(eb, item, struct btrfs_tree_block_info, key, key);
-}
-
-static inline void btrfs_set_tree_block_key(const struct extent_buffer *eb,
- struct btrfs_tree_block_info *item,
- struct btrfs_disk_key *key)
-{
- write_eb_member(eb, item, struct btrfs_tree_block_info, key, key);
-}
-
-BTRFS_SETGET_FUNCS(extent_data_ref_root, struct btrfs_extent_data_ref,
- root, 64);
-BTRFS_SETGET_FUNCS(extent_data_ref_objectid, struct btrfs_extent_data_ref,
- objectid, 64);
-BTRFS_SETGET_FUNCS(extent_data_ref_offset, struct btrfs_extent_data_ref,
- offset, 64);
-BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref,
- count, 32);
-
-BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref,
- count, 32);
-
-BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref,
- type, 8);
-BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref,
- offset, 64);
-
-static inline u32 btrfs_extent_inline_ref_size(int type)
-{
- if (type == BTRFS_TREE_BLOCK_REF_KEY ||
- type == BTRFS_SHARED_BLOCK_REF_KEY)
- return sizeof(struct btrfs_extent_inline_ref);
- if (type == BTRFS_SHARED_DATA_REF_KEY)
- return sizeof(struct btrfs_shared_data_ref) +
- sizeof(struct btrfs_extent_inline_ref);
- if (type == BTRFS_EXTENT_DATA_REF_KEY)
- return sizeof(struct btrfs_extent_data_ref) +
- offsetof(struct btrfs_extent_inline_ref, offset);
- return 0;
-}
-
-/* struct btrfs_node */
-BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64);
-BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_key_blockptr, struct btrfs_key_ptr,
- blockptr, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr,
- generation, 64);
-
-static inline u64 btrfs_node_blockptr(const struct extent_buffer *eb, int nr)
-{
- unsigned long ptr;
- ptr = offsetof(struct btrfs_node, ptrs) +
- sizeof(struct btrfs_key_ptr) * nr;
- return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr);
-}
-
-static inline void btrfs_set_node_blockptr(const struct extent_buffer *eb,
- int nr, u64 val)
-{
- unsigned long ptr;
- ptr = offsetof(struct btrfs_node, ptrs) +
- sizeof(struct btrfs_key_ptr) * nr;
- btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val);
-}
-
-static inline u64 btrfs_node_ptr_generation(const struct extent_buffer *eb, int nr)
-{
- unsigned long ptr;
- ptr = offsetof(struct btrfs_node, ptrs) +
- sizeof(struct btrfs_key_ptr) * nr;
- return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr);
-}
-
-static inline void btrfs_set_node_ptr_generation(const struct extent_buffer *eb,
- int nr, u64 val)
-{
- unsigned long ptr;
- ptr = offsetof(struct btrfs_node, ptrs) +
- sizeof(struct btrfs_key_ptr) * nr;
- btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val);
-}
-
-static inline unsigned long btrfs_node_key_ptr_offset(int nr)
-{
- return offsetof(struct btrfs_node, ptrs) +
- sizeof(struct btrfs_key_ptr) * nr;
-}
-
-void btrfs_node_key(const struct extent_buffer *eb,
- struct btrfs_disk_key *disk_key, int nr);
+int __init btrfs_ctree_init(void);
+void __cold btrfs_ctree_exit(void);
-static inline void btrfs_set_node_key(const struct extent_buffer *eb,
- struct btrfs_disk_key *disk_key, int nr)
-{
- unsigned long ptr;
- ptr = btrfs_node_key_ptr_offset(nr);
- write_eb_member(eb, (struct btrfs_key_ptr *)ptr,
- struct btrfs_key_ptr, key, disk_key);
-}
-
-/* struct btrfs_item */
-BTRFS_SETGET_FUNCS(item_offset, struct btrfs_item, offset, 32);
-BTRFS_SETGET_FUNCS(item_size, struct btrfs_item, size, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_item_offset, struct btrfs_item, offset, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_item_size, struct btrfs_item, size, 32);
-
-static inline unsigned long btrfs_item_nr_offset(int nr)
-{
- return offsetof(struct btrfs_leaf, items) +
- sizeof(struct btrfs_item) * nr;
-}
-
-static inline struct btrfs_item *btrfs_item_nr(int nr)
-{
- return (struct btrfs_item *)btrfs_item_nr_offset(nr);
-}
-
-static inline u32 btrfs_item_end(const struct extent_buffer *eb,
- struct btrfs_item *item)
-{
- return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item);
-}
-
-static inline u32 btrfs_item_end_nr(const struct extent_buffer *eb, int nr)
-{
- return btrfs_item_end(eb, btrfs_item_nr(nr));
-}
-
-static inline u32 btrfs_item_offset_nr(const struct extent_buffer *eb, int nr)
-{
- return btrfs_item_offset(eb, btrfs_item_nr(nr));
-}
-
-static inline u32 btrfs_item_size_nr(const struct extent_buffer *eb, int nr)
-{
- return btrfs_item_size(eb, btrfs_item_nr(nr));
-}
-
-static inline void btrfs_item_key(const struct extent_buffer *eb,
- struct btrfs_disk_key *disk_key, int nr)
-{
- struct btrfs_item *item = btrfs_item_nr(nr);
- read_eb_member(eb, item, struct btrfs_item, key, disk_key);
-}
+int btrfs_bin_search(const struct extent_buffer *eb, int first_slot,
+ const struct btrfs_key *key, int *slot);
-static inline void btrfs_set_item_key(struct extent_buffer *eb,
- struct btrfs_disk_key *disk_key, int nr)
-{
- struct btrfs_item *item = btrfs_item_nr(nr);
- write_eb_member(eb, item, struct btrfs_item, key, disk_key);
-}
-
-BTRFS_SETGET_FUNCS(dir_log_end, struct btrfs_dir_log_item, end, 64);
-
-/*
- * struct btrfs_root_ref
- */
-BTRFS_SETGET_FUNCS(root_ref_dirid, struct btrfs_root_ref, dirid, 64);
-BTRFS_SETGET_FUNCS(root_ref_sequence, struct btrfs_root_ref, sequence, 64);
-BTRFS_SETGET_FUNCS(root_ref_name_len, struct btrfs_root_ref, name_len, 16);
-
-/* struct btrfs_dir_item */
-BTRFS_SETGET_FUNCS(dir_data_len, struct btrfs_dir_item, data_len, 16);
-BTRFS_SETGET_FUNCS(dir_type, struct btrfs_dir_item, type, 8);
-BTRFS_SETGET_FUNCS(dir_name_len, struct btrfs_dir_item, name_len, 16);
-BTRFS_SETGET_FUNCS(dir_transid, struct btrfs_dir_item, transid, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_dir_type, struct btrfs_dir_item, type, 8);
-BTRFS_SETGET_STACK_FUNCS(stack_dir_data_len, struct btrfs_dir_item,
- data_len, 16);
-BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item,
- name_len, 16);
-BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item,
- transid, 64);
-
-static inline void btrfs_dir_item_key(const struct extent_buffer *eb,
- const struct btrfs_dir_item *item,
- struct btrfs_disk_key *key)
-{
- read_eb_member(eb, item, struct btrfs_dir_item, location, key);
-}
-
-static inline void btrfs_set_dir_item_key(struct extent_buffer *eb,
- struct btrfs_dir_item *item,
- const struct btrfs_disk_key *key)
-{
- write_eb_member(eb, item, struct btrfs_dir_item, location, key);
-}
-
-BTRFS_SETGET_FUNCS(free_space_entries, struct btrfs_free_space_header,
- num_entries, 64);
-BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header,
- num_bitmaps, 64);
-BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header,
- generation, 64);
-
-static inline void btrfs_free_space_key(const struct extent_buffer *eb,
- const struct btrfs_free_space_header *h,
- struct btrfs_disk_key *key)
-{
- read_eb_member(eb, h, struct btrfs_free_space_header, location, key);
-}
-
-static inline void btrfs_set_free_space_key(struct extent_buffer *eb,
- struct btrfs_free_space_header *h,
- const struct btrfs_disk_key *key)
-{
- write_eb_member(eb, h, struct btrfs_free_space_header, location, key);
-}
-
-/* struct btrfs_disk_key */
-BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key,
- objectid, 64);
-BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64);
-BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8);
+int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
#ifdef __LITTLE_ENDIAN
/*
- * Optimized helpers for little-endian architectures where CPU and on-disk
- * structures have the same endianness and we can skip conversions.
+ * Compare two keys, on little-endian the disk order is same as CPU order and
+ * we can avoid the conversion.
*/
-
-static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu_key,
- const struct btrfs_disk_key *disk_key)
-{
- memcpy(cpu_key, disk_key, sizeof(struct btrfs_key));
-}
-
-static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk_key,
- const struct btrfs_key *cpu_key)
+static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk_key,
+ const struct btrfs_key *k2)
{
- memcpy(disk_key, cpu_key, sizeof(struct btrfs_key));
-}
-
-static inline void btrfs_node_key_to_cpu(const struct extent_buffer *eb,
- struct btrfs_key *cpu_key, int nr)
-{
- struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)cpu_key;
-
- btrfs_node_key(eb, disk_key, nr);
-}
-
-static inline void btrfs_item_key_to_cpu(const struct extent_buffer *eb,
- struct btrfs_key *cpu_key, int nr)
-{
- struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)cpu_key;
-
- btrfs_item_key(eb, disk_key, nr);
-}
+ const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
-static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb,
- const struct btrfs_dir_item *item,
- struct btrfs_key *cpu_key)
-{
- struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)cpu_key;
-
- btrfs_dir_item_key(eb, item, disk_key);
+ return btrfs_comp_cpu_keys(k1, k2);
}
#else
-static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu,
- const struct btrfs_disk_key *disk)
-{
- cpu->offset = le64_to_cpu(disk->offset);
- cpu->type = disk->type;
- cpu->objectid = le64_to_cpu(disk->objectid);
-}
-
-static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk,
- const struct btrfs_key *cpu)
-{
- disk->offset = cpu_to_le64(cpu->offset);
- disk->type = cpu->type;
- disk->objectid = cpu_to_le64(cpu->objectid);
-}
-
-static inline void btrfs_node_key_to_cpu(const struct extent_buffer *eb,
- struct btrfs_key *key, int nr)
+/* Compare two keys in a memcmp fashion. */
+static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk,
+ const struct btrfs_key *k2)
{
- struct btrfs_disk_key disk_key;
- btrfs_node_key(eb, &disk_key, nr);
- btrfs_disk_key_to_cpu(key, &disk_key);
-}
+ struct btrfs_key k1;
-static inline void btrfs_item_key_to_cpu(const struct extent_buffer *eb,
- struct btrfs_key *key, int nr)
-{
- struct btrfs_disk_key disk_key;
- btrfs_item_key(eb, &disk_key, nr);
- btrfs_disk_key_to_cpu(key, &disk_key);
-}
+ btrfs_disk_key_to_cpu(&k1, disk);
-static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb,
- const struct btrfs_dir_item *item,
- struct btrfs_key *key)
-{
- struct btrfs_disk_key disk_key;
- btrfs_dir_item_key(eb, item, &disk_key);
- btrfs_disk_key_to_cpu(key, &disk_key);
+ return btrfs_comp_cpu_keys(&k1, k2);
}
#endif
-/* struct btrfs_header */
-BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64);
-BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header,
- generation, 64);
-BTRFS_SETGET_HEADER_FUNCS(header_owner, struct btrfs_header, owner, 64);
-BTRFS_SETGET_HEADER_FUNCS(header_nritems, struct btrfs_header, nritems, 32);
-BTRFS_SETGET_HEADER_FUNCS(header_flags, struct btrfs_header, flags, 64);
-BTRFS_SETGET_HEADER_FUNCS(header_level, struct btrfs_header, level, 8);
-BTRFS_SETGET_STACK_FUNCS(stack_header_generation, struct btrfs_header,
- generation, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_header_owner, struct btrfs_header, owner, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header,
- nritems, 32);
-BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64);
-
-static inline int btrfs_header_flag(const struct extent_buffer *eb, u64 flag)
-{
- return (btrfs_header_flags(eb) & flag) == flag;
-}
-
-static inline void btrfs_set_header_flag(struct extent_buffer *eb, u64 flag)
-{
- u64 flags = btrfs_header_flags(eb);
- btrfs_set_header_flags(eb, flags | flag);
-}
-
-static inline void btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag)
-{
- u64 flags = btrfs_header_flags(eb);
- btrfs_set_header_flags(eb, flags & ~flag);
-}
-
-static inline int btrfs_header_backref_rev(const struct extent_buffer *eb)
-{
- u64 flags = btrfs_header_flags(eb);
- return flags >> BTRFS_BACKREF_REV_SHIFT;
-}
-
-static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb,
- int rev)
-{
- u64 flags = btrfs_header_flags(eb);
- flags &= ~BTRFS_BACKREF_REV_MASK;
- flags |= (u64)rev << BTRFS_BACKREF_REV_SHIFT;
- btrfs_set_header_flags(eb, flags);
-}
-
-static inline int btrfs_is_leaf(const struct extent_buffer *eb)
-{
- return btrfs_header_level(eb) == 0;
-}
-
-/* struct btrfs_root_item */
-BTRFS_SETGET_FUNCS(disk_root_generation, struct btrfs_root_item,
- generation, 64);
-BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32);
-BTRFS_SETGET_FUNCS(disk_root_bytenr, struct btrfs_root_item, bytenr, 64);
-BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8);
-
-BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item,
- generation, 64);
-BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64);
-BTRFS_SETGET_STACK_FUNCS(root_drop_level, struct btrfs_root_item, drop_level, 8);
-BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8);
-BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64);
-BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32);
-BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64);
-BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64);
-BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64);
-BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item,
- last_snapshot, 64);
-BTRFS_SETGET_STACK_FUNCS(root_generation_v2, struct btrfs_root_item,
- generation_v2, 64);
-BTRFS_SETGET_STACK_FUNCS(root_ctransid, struct btrfs_root_item,
- ctransid, 64);
-BTRFS_SETGET_STACK_FUNCS(root_otransid, struct btrfs_root_item,
- otransid, 64);
-BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item,
- stransid, 64);
-BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item,
- rtransid, 64);
-
-static inline bool btrfs_root_readonly(const struct btrfs_root *root)
-{
- /* Byte-swap the constant at compile time, root_item::flags is LE */
- return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
-}
-
-static inline bool btrfs_root_dead(const struct btrfs_root *root)
-{
- /* Byte-swap the constant at compile time, root_item::flags is LE */
- return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
-}
-
-/* struct btrfs_root_backup */
-BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup,
- tree_root, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_tree_root_gen, struct btrfs_root_backup,
- tree_root_gen, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_tree_root_level, struct btrfs_root_backup,
- tree_root_level, 8);
-
-BTRFS_SETGET_STACK_FUNCS(backup_chunk_root, struct btrfs_root_backup,
- chunk_root, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_gen, struct btrfs_root_backup,
- chunk_root_gen, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_level, struct btrfs_root_backup,
- chunk_root_level, 8);
-
-BTRFS_SETGET_STACK_FUNCS(backup_extent_root, struct btrfs_root_backup,
- extent_root, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_extent_root_gen, struct btrfs_root_backup,
- extent_root_gen, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_extent_root_level, struct btrfs_root_backup,
- extent_root_level, 8);
-
-BTRFS_SETGET_STACK_FUNCS(backup_fs_root, struct btrfs_root_backup,
- fs_root, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_fs_root_gen, struct btrfs_root_backup,
- fs_root_gen, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_fs_root_level, struct btrfs_root_backup,
- fs_root_level, 8);
-
-BTRFS_SETGET_STACK_FUNCS(backup_dev_root, struct btrfs_root_backup,
- dev_root, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_dev_root_gen, struct btrfs_root_backup,
- dev_root_gen, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_dev_root_level, struct btrfs_root_backup,
- dev_root_level, 8);
-
-BTRFS_SETGET_STACK_FUNCS(backup_csum_root, struct btrfs_root_backup,
- csum_root, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_csum_root_gen, struct btrfs_root_backup,
- csum_root_gen, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_csum_root_level, struct btrfs_root_backup,
- csum_root_level, 8);
-BTRFS_SETGET_STACK_FUNCS(backup_total_bytes, struct btrfs_root_backup,
- total_bytes, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup,
- bytes_used, 64);
-BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup,
- num_devices, 64);
-
-/* struct btrfs_balance_item */
-BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64);
-
-static inline void btrfs_balance_data(const struct extent_buffer *eb,
- const struct btrfs_balance_item *bi,
- struct btrfs_disk_balance_args *ba)
-{
- read_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
-}
-
-static inline void btrfs_set_balance_data(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
- const struct btrfs_disk_balance_args *ba)
-{
- write_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
-}
-
-static inline void btrfs_balance_meta(const struct extent_buffer *eb,
- const struct btrfs_balance_item *bi,
- struct btrfs_disk_balance_args *ba)
-{
- read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
-}
-
-static inline void btrfs_set_balance_meta(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
- const struct btrfs_disk_balance_args *ba)
-{
- write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
-}
-
-static inline void btrfs_balance_sys(const struct extent_buffer *eb,
- const struct btrfs_balance_item *bi,
- struct btrfs_disk_balance_args *ba)
-{
- read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
-}
-
-static inline void btrfs_set_balance_sys(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
- const struct btrfs_disk_balance_args *ba)
-{
- write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
-}
-
-static inline void
-btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
- const struct btrfs_disk_balance_args *disk)
-{
- memset(cpu, 0, sizeof(*cpu));
-
- cpu->profiles = le64_to_cpu(disk->profiles);
- cpu->usage = le64_to_cpu(disk->usage);
- cpu->devid = le64_to_cpu(disk->devid);
- cpu->pstart = le64_to_cpu(disk->pstart);
- cpu->pend = le64_to_cpu(disk->pend);
- cpu->vstart = le64_to_cpu(disk->vstart);
- cpu->vend = le64_to_cpu(disk->vend);
- cpu->target = le64_to_cpu(disk->target);
- cpu->flags = le64_to_cpu(disk->flags);
- cpu->limit = le64_to_cpu(disk->limit);
- cpu->stripes_min = le32_to_cpu(disk->stripes_min);
- cpu->stripes_max = le32_to_cpu(disk->stripes_max);
-}
-
-static inline void
-btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
- const struct btrfs_balance_args *cpu)
-{
- memset(disk, 0, sizeof(*disk));
-
- disk->profiles = cpu_to_le64(cpu->profiles);
- disk->usage = cpu_to_le64(cpu->usage);
- disk->devid = cpu_to_le64(cpu->devid);
- disk->pstart = cpu_to_le64(cpu->pstart);
- disk->pend = cpu_to_le64(cpu->pend);
- disk->vstart = cpu_to_le64(cpu->vstart);
- disk->vend = cpu_to_le64(cpu->vend);
- disk->target = cpu_to_le64(cpu->target);
- disk->flags = cpu_to_le64(cpu->flags);
- disk->limit = cpu_to_le64(cpu->limit);
- disk->stripes_min = cpu_to_le32(cpu->stripes_min);
- disk->stripes_max = cpu_to_le32(cpu->stripes_max);
-}
-
-/* struct btrfs_super_block */
-BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64);
-BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64);
-BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block,
- generation, 64);
-BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64);
-BTRFS_SETGET_STACK_FUNCS(super_sys_array_size,
- struct btrfs_super_block, sys_chunk_array_size, 32);
-BTRFS_SETGET_STACK_FUNCS(super_chunk_root_generation,
- struct btrfs_super_block, chunk_root_generation, 64);
-BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block,
- root_level, 8);
-BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block,
- chunk_root, 64);
-BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block,
- chunk_root_level, 8);
-BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block,
- log_root, 64);
-BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block,
- log_root_transid, 64);
-BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block,
- log_root_level, 8);
-BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block,
- total_bytes, 64);
-BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block,
- bytes_used, 64);
-BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block,
- sectorsize, 32);
-BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block,
- nodesize, 32);
-BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block,
- stripesize, 32);
-BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block,
- root_dir_objectid, 64);
-BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block,
- num_devices, 64);
-BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block,
- compat_flags, 64);
-BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block,
- compat_ro_flags, 64);
-BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block,
- incompat_flags, 64);
-BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block,
- csum_type, 16);
-BTRFS_SETGET_STACK_FUNCS(super_cache_generation, struct btrfs_super_block,
- cache_generation, 64);
-BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64);
-BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block,
- uuid_tree_generation, 64);
-
-int btrfs_super_csum_size(const struct btrfs_super_block *s);
-const char *btrfs_super_csum_name(u16 csum_type);
-const char *btrfs_super_csum_driver(u16 csum_type);
-size_t __attribute_const__ btrfs_get_num_csums(void);
-
-
-/*
- * The leaf data grows from end-to-front in the node.
- * this returns the address of the start of the last item,
- * which is the stop of the leaf data stack
- */
-static inline unsigned int leaf_data_end(const struct extent_buffer *leaf)
-{
- u32 nr = btrfs_header_nritems(leaf);
-
- if (nr == 0)
- return BTRFS_LEAF_DATA_SIZE(leaf->fs_info);
- return btrfs_item_offset_nr(leaf, nr - 1);
-}
-
-/* struct btrfs_file_extent_item */
-BTRFS_SETGET_STACK_FUNCS(stack_file_extent_type, struct btrfs_file_extent_item,
- type, 8);
-BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr,
- struct btrfs_file_extent_item, disk_bytenr, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_file_extent_offset,
- struct btrfs_file_extent_item, offset, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_file_extent_generation,
- struct btrfs_file_extent_item, generation, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_file_extent_num_bytes,
- struct btrfs_file_extent_item, num_bytes, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_file_extent_ram_bytes,
- struct btrfs_file_extent_item, ram_bytes, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_num_bytes,
- struct btrfs_file_extent_item, disk_num_bytes, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression,
- struct btrfs_file_extent_item, compression, 8);
-
-static inline unsigned long
-btrfs_file_extent_inline_start(const struct btrfs_file_extent_item *e)
-{
- return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START;
-}
-
-static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize)
-{
- return BTRFS_FILE_EXTENT_INLINE_DATA_START + datasize;
-}
-
-BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8);
-BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item,
- disk_bytenr, 64);
-BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item,
- generation, 64);
-BTRFS_SETGET_FUNCS(file_extent_disk_num_bytes, struct btrfs_file_extent_item,
- disk_num_bytes, 64);
-BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item,
- offset, 64);
-BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item,
- num_bytes, 64);
-BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item,
- ram_bytes, 64);
-BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item,
- compression, 8);
-BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item,
- encryption, 8);
-BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item,
- other_encoding, 16);
-
-/*
- * this returns the number of bytes used by the item on disk, minus the
- * size of any extent headers. If a file is compressed on disk, this is
- * the compressed size
- */
-static inline u32 btrfs_file_extent_inline_item_len(
- const struct extent_buffer *eb,
- struct btrfs_item *e)
-{
- return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
-}
-
-/* btrfs_qgroup_status_item */
-BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item,
- generation, 64);
-BTRFS_SETGET_FUNCS(qgroup_status_version, struct btrfs_qgroup_status_item,
- version, 64);
-BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item,
- flags, 64);
-BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item,
- rescan, 64);
-
-/* btrfs_qgroup_info_item */
-BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item,
- generation, 64);
-BTRFS_SETGET_FUNCS(qgroup_info_rfer, struct btrfs_qgroup_info_item, rfer, 64);
-BTRFS_SETGET_FUNCS(qgroup_info_rfer_cmpr, struct btrfs_qgroup_info_item,
- rfer_cmpr, 64);
-BTRFS_SETGET_FUNCS(qgroup_info_excl, struct btrfs_qgroup_info_item, excl, 64);
-BTRFS_SETGET_FUNCS(qgroup_info_excl_cmpr, struct btrfs_qgroup_info_item,
- excl_cmpr, 64);
-
-BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_generation,
- struct btrfs_qgroup_info_item, generation, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer, struct btrfs_qgroup_info_item,
- rfer, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer_cmpr,
- struct btrfs_qgroup_info_item, rfer_cmpr, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl, struct btrfs_qgroup_info_item,
- excl, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl_cmpr,
- struct btrfs_qgroup_info_item, excl_cmpr, 64);
-
-/* btrfs_qgroup_limit_item */
-BTRFS_SETGET_FUNCS(qgroup_limit_flags, struct btrfs_qgroup_limit_item,
- flags, 64);
-BTRFS_SETGET_FUNCS(qgroup_limit_max_rfer, struct btrfs_qgroup_limit_item,
- max_rfer, 64);
-BTRFS_SETGET_FUNCS(qgroup_limit_max_excl, struct btrfs_qgroup_limit_item,
- max_excl, 64);
-BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item,
- rsv_rfer, 64);
-BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item,
- rsv_excl, 64);
-
-/* btrfs_dev_replace_item */
-BTRFS_SETGET_FUNCS(dev_replace_src_devid,
- struct btrfs_dev_replace_item, src_devid, 64);
-BTRFS_SETGET_FUNCS(dev_replace_cont_reading_from_srcdev_mode,
- struct btrfs_dev_replace_item, cont_reading_from_srcdev_mode,
- 64);
-BTRFS_SETGET_FUNCS(dev_replace_replace_state, struct btrfs_dev_replace_item,
- replace_state, 64);
-BTRFS_SETGET_FUNCS(dev_replace_time_started, struct btrfs_dev_replace_item,
- time_started, 64);
-BTRFS_SETGET_FUNCS(dev_replace_time_stopped, struct btrfs_dev_replace_item,
- time_stopped, 64);
-BTRFS_SETGET_FUNCS(dev_replace_num_write_errors, struct btrfs_dev_replace_item,
- num_write_errors, 64);
-BTRFS_SETGET_FUNCS(dev_replace_num_uncorrectable_read_errors,
- struct btrfs_dev_replace_item, num_uncorrectable_read_errors,
- 64);
-BTRFS_SETGET_FUNCS(dev_replace_cursor_left, struct btrfs_dev_replace_item,
- cursor_left, 64);
-BTRFS_SETGET_FUNCS(dev_replace_cursor_right, struct btrfs_dev_replace_item,
- cursor_right, 64);
-
-BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_src_devid,
- struct btrfs_dev_replace_item, src_devid, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cont_reading_from_srcdev_mode,
- struct btrfs_dev_replace_item,
- cont_reading_from_srcdev_mode, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_replace_state,
- struct btrfs_dev_replace_item, replace_state, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_started,
- struct btrfs_dev_replace_item, time_started, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_stopped,
- struct btrfs_dev_replace_item, time_stopped, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_write_errors,
- struct btrfs_dev_replace_item, num_write_errors, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_uncorrectable_read_errors,
- struct btrfs_dev_replace_item,
- num_uncorrectable_read_errors, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left,
- struct btrfs_dev_replace_item, cursor_left, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right,
- struct btrfs_dev_replace_item, cursor_right, 64);
-
-/* helper function to cast into the data area of the leaf. */
-#define btrfs_item_ptr(leaf, slot, type) \
- ((type *)(BTRFS_LEAF_DATA_OFFSET + \
- btrfs_item_offset_nr(leaf, slot)))
-
-#define btrfs_item_ptr_offset(leaf, slot) \
- ((unsigned long)(BTRFS_LEAF_DATA_OFFSET + \
- btrfs_item_offset_nr(leaf, slot)))
-
-static inline u32 btrfs_crc32c(u32 crc, const void *address, unsigned length)
-{
- return crc32c(crc, address, length);
-}
-
-static inline void btrfs_crc32c_final(u32 crc, u8 *result)
-{
- put_unaligned_le32(~crc, result);
-}
-
-static inline u64 btrfs_name_hash(const char *name, int len)
-{
- return crc32c((u32)~1, name, len);
-}
-
-/*
- * Figure the key offset of an extended inode ref
- */
-static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name,
- int len)
-{
- return (u64) crc32c(parent_objectid, name, len);
-}
-
-static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
-{
- return mapping_gfp_constraint(mapping, ~__GFP_FS);
-}
-
-/* extent-tree.c */
-
-enum btrfs_inline_ref_type {
- BTRFS_REF_TYPE_INVALID,
- BTRFS_REF_TYPE_BLOCK,
- BTRFS_REF_TYPE_DATA,
- BTRFS_REF_TYPE_ANY,
-};
-
-int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
- struct btrfs_extent_inline_ref *iref,
- enum btrfs_inline_ref_type is_data);
-u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
-
-/*
- * Take the number of bytes to be checksummmed and figure out how many leaves
- * it would require to store the csums for that many bytes.
- */
-static inline u64 btrfs_csum_bytes_to_leaves(
- const struct btrfs_fs_info *fs_info, u64 csum_bytes)
-{
- const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
-
- return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
-}
-
-/*
- * Use this if we would be adding new items, as we could split nodes as we cow
- * down the tree.
- */
-static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info,
- unsigned num_items)
-{
- return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
-}
-
-/*
- * Doing a truncate or a modification won't result in new nodes or leaves, just
- * what we need for COW.
- */
-static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info,
- unsigned num_items)
-{
- return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
-}
-
-int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
- u64 start, u64 num_bytes);
-void btrfs_free_excluded_extents(struct btrfs_block_group *cache);
-int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- unsigned long count);
-void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_delayed_ref_head *head);
-int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
-int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 offset, int metadata, u64 *refs, u64 *flags);
-int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num,
- int reserved);
-int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes);
-int btrfs_exclude_logged_extents(struct extent_buffer *eb);
-int btrfs_cross_ref_exist(struct btrfs_root *root,
- u64 objectid, u64 offset, u64 bytenr, bool strict);
-struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 parent, u64 root_objectid,
- const struct btrfs_disk_key *key,
- int level, u64 hint,
- u64 empty_size,
- enum btrfs_lock_nesting nest);
-void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *buf,
- u64 parent, int last_ref);
-int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 owner,
- u64 offset, u64 ram_bytes,
- struct btrfs_key *ins);
-int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
- u64 root_objectid, u64 owner, u64 offset,
- struct btrfs_key *ins);
-int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
- u64 min_alloc_size, u64 empty_size, u64 hint_byte,
- struct btrfs_key *ins, int is_data, int delalloc);
-int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, int full_backref);
-int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, int full_backref);
-int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
- struct extent_buffer *eb, u64 flags,
- int level, int is_data);
-int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref);
-
-int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
- u64 start, u64 len, int delalloc);
-int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start,
- u64 len);
-int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans);
-int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_ref *generic_ref);
-
-void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
-
-/*
- * Different levels for to flush space when doing space reservations.
- *
- * The higher the level, the more methods we try to reclaim space.
- */
-enum btrfs_reserve_flush_enum {
- /* If we are in the transaction, we can't flush anything.*/
- BTRFS_RESERVE_NO_FLUSH,
-
- /*
- * Flush space by:
- * - Running delayed inode items
- * - Allocating a new chunk
- */
- BTRFS_RESERVE_FLUSH_LIMIT,
-
- /*
- * Flush space by:
- * - Running delayed inode items
- * - Running delayed refs
- * - Running delalloc and waiting for ordered extents
- * - Allocating a new chunk
- */
- BTRFS_RESERVE_FLUSH_EVICT,
-
- /*
- * Flush space by above mentioned methods and by:
- * - Running delayed iputs
- * - Committing transaction
- *
- * Can be interrupted by a fatal signal.
- */
- BTRFS_RESERVE_FLUSH_DATA,
- BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE,
- BTRFS_RESERVE_FLUSH_ALL,
-
- /*
- * Pretty much the same as FLUSH_ALL, but can also steal space from
- * global rsv.
- *
- * Can be interrupted by a fatal signal.
- */
- BTRFS_RESERVE_FLUSH_ALL_STEAL,
-};
-
-enum btrfs_flush_state {
- FLUSH_DELAYED_ITEMS_NR = 1,
- FLUSH_DELAYED_ITEMS = 2,
- FLUSH_DELAYED_REFS_NR = 3,
- FLUSH_DELAYED_REFS = 4,
- FLUSH_DELALLOC = 5,
- FLUSH_DELALLOC_WAIT = 6,
- FLUSH_DELALLOC_FULL = 7,
- ALLOC_CHUNK = 8,
- ALLOC_CHUNK_FORCE = 9,
- RUN_DELAYED_IPUTS = 10,
- COMMIT_TRANS = 11,
-};
-
-int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
- struct btrfs_block_rsv *rsv,
- int nitems, bool use_global_rsv);
-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
- struct btrfs_block_rsv *rsv);
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
-
-int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
-u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
-int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
- u64 start, u64 end);
-int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 num_bytes, u64 *actual_bytes);
-int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
-
-int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
-int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info);
-int btrfs_start_write_no_snapshotting(struct btrfs_root *root);
-void btrfs_end_write_no_snapshotting(struct btrfs_root *root);
-void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
-
-/* ctree.c */
-int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
- int *slot);
-int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
int btrfs_previous_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid,
int type);
int btrfs_previous_extent_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid);
-void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
+void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+ const struct btrfs_path *path,
const struct btrfs_key *new_key);
struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
@@ -2859,14 +557,26 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
struct extent_buffer *parent, int parent_slot,
struct extent_buffer **cow_ret,
enum btrfs_lock_nesting nest);
+int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *buf,
+ struct extent_buffer *parent, int parent_slot,
+ struct extent_buffer **cow_ret,
+ u64 search_start, u64 empty_size,
+ enum btrfs_lock_nesting nest);
int btrfs_copy_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
struct extent_buffer **cow_ret, u64 new_root_objectid);
-int btrfs_block_can_be_shared(struct btrfs_root *root,
- struct extent_buffer *buf);
-void btrfs_extend_item(struct btrfs_path *path, u32 data_size);
-void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end);
+bool btrfs_block_can_be_shared(const struct btrfs_trans_handle *trans,
+ const struct btrfs_root *root,
+ const struct extent_buffer *buf);
+int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_path *path, int level, int slot);
+void btrfs_extend_item(struct btrfs_trans_handle *trans,
+ const struct btrfs_path *path, u32 data_size);
+void btrfs_truncate_item(struct btrfs_trans_handle *trans,
+ const struct btrfs_path *path, u32 new_size, int from_end);
int btrfs_split_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@@ -2887,13 +597,10 @@ int btrfs_search_slot_for_read(struct btrfs_root *root,
const struct btrfs_key *key,
struct btrfs_path *p, int find_higher,
int return_any);
-int btrfs_realloc_node(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *parent,
- int start_slot, u64 *last_ret,
- struct btrfs_key *progress);
void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
+DEFINE_FREE(btrfs_free_path, struct btrfs_path *, btrfs_free_path(_T))
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int slot, int nr);
@@ -2930,7 +637,8 @@ struct btrfs_item_batch {
int nr;
};
-void btrfs_setup_item_for_insert(struct btrfs_root *root,
+void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct btrfs_path *path,
const struct btrfs_key *key,
u32 data_size);
@@ -2957,21 +665,42 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
return btrfs_insert_empty_items(trans, root, path, &batch);
}
-int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
u64 time_seq);
int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
struct btrfs_path *path);
-static inline int btrfs_next_old_item(struct btrfs_root *root,
- struct btrfs_path *p, u64 time_seq)
-{
- ++p->slots[0];
- if (p->slots[0] >= btrfs_header_nritems(p->nodes[0]))
- return btrfs_next_old_leaf(root, p, time_seq);
- return 0;
-}
+int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
+ struct btrfs_path *path);
+
+/*
+ * Search in @root for a given @key, and store the slot found in @found_key.
+ *
+ * @root: The root node of the tree.
+ * @key: The key we are looking for.
+ * @found_key: Will hold the found item.
+ * @path: Holds the current slot/leaf.
+ * @iter_ret: Contains the value returned from btrfs_search_slot or
+ * btrfs_get_next_valid_item, whichever was executed last.
+ *
+ * The @iter_ret is an output variable that will contain the return value of
+ * btrfs_search_slot, if it encountered an error, or the value returned from
+ * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid
+ * slot was found, 1 if there were no more leaves, and <0 if there was an error.
+ *
+ * It's recommended to use a separate variable for iter_ret and then use it to
+ * set the function return value so there's no confusion of the 0/1/errno
+ * values stemming from btrfs_search_slot.
+ */
+#define btrfs_for_each_slot(root, key, found_key, path, iter_ret) \
+ for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0); \
+ (iter_ret) >= 0 && \
+ (iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \
+ (path)->slots[0]++ \
+ )
+
+int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq);
/*
* Search the tree again to find a leaf with greater keys.
@@ -2988,911 +717,20 @@ static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
{
return btrfs_next_old_item(root, p, 0);
}
-int btrfs_leaf_free_space(struct extent_buffer *leaf);
-int __must_check btrfs_drop_snapshot(struct btrfs_root *root, int update_ref,
- int for_reloc);
-int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *node,
- struct extent_buffer *parent);
-static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
-{
- /*
- * Do it this way so we only ever do one test_bit in the normal case.
- */
- if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
- if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
- return 2;
- return 1;
- }
- return 0;
-}
-
-/*
- * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
- * anything except sleeping. This function is used to check the status of
- * the fs.
- * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
- * since setting and checking for SB_RDONLY in the superblock's flags is not
- * atomic.
- */
-static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
-{
- return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
- btrfs_fs_closing(fs_info);
-}
-
-static inline void btrfs_set_sb_rdonly(struct super_block *sb)
-{
- sb->s_flags |= SB_RDONLY;
- set_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
-}
-
-static inline void btrfs_clear_sb_rdonly(struct super_block *sb)
-{
- sb->s_flags &= ~SB_RDONLY;
- clear_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
-}
-
-/* root-item.c */
-int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
- u64 ref_id, u64 dirid, u64 sequence, const char *name,
- int name_len);
-int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
- u64 ref_id, u64 dirid, u64 *sequence, const char *name,
- int name_len);
-int btrfs_del_root(struct btrfs_trans_handle *trans,
- const struct btrfs_key *key);
-int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- const struct btrfs_key *key,
- struct btrfs_root_item *item);
-int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_key *key,
- struct btrfs_root_item *item);
-int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key,
- struct btrfs_path *path, struct btrfs_root_item *root_item,
- struct btrfs_key *root_key);
-int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info);
-void btrfs_set_root_node(struct btrfs_root_item *item,
- struct extent_buffer *node);
-void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
-void btrfs_update_root_times(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
-
-/* uuid-tree.c */
-int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
- u64 subid);
-int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
- u64 subid);
-int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info);
-
-/* dir-item.c */
-int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
- const char *name, int name_len);
-int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
- int name_len, struct btrfs_inode *dir,
- struct btrfs_key *location, u8 type, u64 index);
-struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, u64 dir,
- const char *name, int name_len,
- int mod);
-struct btrfs_dir_item *
-btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, u64 dir,
- u64 index, const char *name, int name_len,
- int mod);
-struct btrfs_dir_item *
-btrfs_search_dir_index_item(struct btrfs_root *root,
- struct btrfs_path *path, u64 dirid,
- const char *name, int name_len);
-int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_dir_item *di);
-int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, u64 objectid,
- const char *name, u16 name_len,
- const void *data, u16 data_len);
-struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, u64 dir,
- const char *name, u16 name_len,
- int mod);
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
- const char *name,
- int name_len);
-
-/* orphan.c */
-int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 offset);
-int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 offset);
-int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset);
-
-/* inode-item.c */
-int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- const char *name, int name_len,
- u64 inode_objectid, u64 ref_objectid, u64 index);
-int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- const char *name, int name_len,
- u64 inode_objectid, u64 ref_objectid, u64 *index);
-int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, u64 objectid);
-int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
- *root, struct btrfs_path *path,
- struct btrfs_key *location, int mod);
-
-struct btrfs_inode_extref *
-btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- const char *name, int name_len,
- u64 inode_objectid, u64 ref_objectid, int ins_len,
- int cow);
-
-struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
- int slot, const char *name,
- int name_len);
-struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
- struct extent_buffer *leaf, int slot, u64 ref_objectid,
- const char *name, int name_len);
-/* file-item.c */
-struct btrfs_dio_private;
-int btrfs_del_csums(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr, u64 len);
-blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst);
-int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 objectid, u64 pos,
- u64 disk_offset, u64 disk_num_bytes,
- u64 num_bytes, u64 offset, u64 ram_bytes,
- u8 compression, u8 encryption, u16 other_encoding);
-int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, u64 objectid,
- u64 bytenr, int mod);
-int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_ordered_sum *sums);
-blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
- u64 file_start, int contig);
-int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
- struct list_head *list, int search_commit);
-void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
- const struct btrfs_path *path,
- struct btrfs_file_extent_item *fi,
- const bool new_inline,
- struct extent_map *em);
-int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
- u64 len);
-int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
- u64 len);
-void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size);
-u64 btrfs_file_extent_end(const struct btrfs_path *path);
-
-/* inode.c */
-blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
- int mirror_num, unsigned long bio_flags);
-unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
- u32 bio_offset, struct page *page,
- u64 start, u64 end);
-struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
- u64 start, u64 len);
-noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
- u64 *orig_start, u64 *orig_block_len,
- u64 *ram_bytes, bool strict);
-
-void __btrfs_del_delalloc_inode(struct btrfs_root *root,
- struct btrfs_inode *inode);
-struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
-int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
-int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
- struct btrfs_inode *dir, struct btrfs_inode *inode,
- const char *name, int name_len);
-int btrfs_add_link(struct btrfs_trans_handle *trans,
- struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
- const char *name, int name_len, int add_backref, u64 index);
-int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry);
-int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
- int front);
-int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_inode *inode, u64 new_size,
- u32 min_type, u64 *extents_found);
-
-int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
- bool in_reclaim_context);
-int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
- unsigned int extra_bits,
- struct extent_state **cached_state);
-int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *new_root,
- struct btrfs_root *parent_root,
- struct user_namespace *mnt_userns);
- void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
- unsigned *bits);
-void btrfs_clear_delalloc_extent(struct inode *inode,
- struct extent_state *state, unsigned *bits);
-void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
- struct extent_state *other);
-void btrfs_split_delalloc_extent(struct inode *inode,
- struct extent_state *orig, u64 split);
-void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end);
-vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
-int btrfs_readpage(struct file *file, struct page *page);
-void btrfs_evict_inode(struct inode *inode);
-int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-struct inode *btrfs_alloc_inode(struct super_block *sb);
-void btrfs_destroy_inode(struct inode *inode);
-void btrfs_free_inode(struct inode *inode);
-int btrfs_drop_inode(struct inode *inode);
-int __init btrfs_init_cachep(void);
-void __cold btrfs_destroy_cachep(void);
-struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
- struct btrfs_root *root, struct btrfs_path *path);
-struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root);
-struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
- struct page *page, size_t pg_offset,
- u64 start, u64 end);
-int btrfs_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_inode *inode);
-int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_inode *inode);
-int btrfs_orphan_add(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode);
-int btrfs_orphan_cleanup(struct btrfs_root *root);
-int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size);
-void btrfs_add_delayed_iput(struct inode *inode);
-void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
-int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info);
-int btrfs_prealloc_file_range(struct inode *inode, int mode,
- u64 start, u64 num_bytes, u64 min_size,
- loff_t actual_len, u64 *alloc_hint);
-int btrfs_prealloc_file_range_trans(struct inode *inode,
- struct btrfs_trans_handle *trans, int mode,
- u64 start, u64 num_bytes, u64 min_size,
- loff_t actual_len, u64 *alloc_hint);
-int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
- u64 start, u64 end, int *page_started, unsigned long *nr_written,
- struct writeback_control *wbc);
-int btrfs_writepage_cow_fixup(struct page *page);
-void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
- struct page *page, u64 start,
- u64 end, bool uptodate);
-extern const struct dentry_operations btrfs_dentry_operations;
-extern const struct iomap_ops btrfs_dio_iomap_ops;
-extern const struct iomap_dio_ops btrfs_dio_ops;
-
-/* Inode locking type flags, by default the exclusive lock is taken */
-#define BTRFS_ILOCK_SHARED (1U << 0)
-#define BTRFS_ILOCK_TRY (1U << 1)
-#define BTRFS_ILOCK_MMAP (1U << 2)
-
-int btrfs_inode_lock(struct inode *inode, unsigned int ilock_flags);
-void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags);
-void btrfs_update_inode_bytes(struct btrfs_inode *inode,
- const u64 add_bytes,
- const u64 del_bytes);
-
-/* ioctl.c */
-long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
-int btrfs_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa);
-int btrfs_ioctl_get_supported_features(void __user *arg);
-void btrfs_sync_inode_flags_to_i_flags(struct inode *inode);
-int __pure btrfs_is_empty_uuid(u8 *uuid);
-int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
- struct btrfs_ioctl_defrag_range_args *range,
- u64 newer_than, unsigned long max_to_defrag);
-void btrfs_get_block_group_info(struct list_head *groups_list,
- struct btrfs_ioctl_space_info *space);
-void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
- struct btrfs_ioctl_balance_args *bargs);
-bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
- enum btrfs_exclusive_operation type);
-bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
- enum btrfs_exclusive_operation type);
-void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
-void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
-
-/* file.c */
-int __init btrfs_auto_defrag_init(void);
-void __cold btrfs_auto_defrag_exit(void);
-int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode);
-int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
-void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
-int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
-void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
- int skip_pinned);
-extern const struct file_operations btrfs_file_operations;
-int btrfs_drop_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_inode *inode,
- struct btrfs_drop_extents_args *args);
-int btrfs_replace_file_extents(struct btrfs_inode *inode,
- struct btrfs_path *path, const u64 start,
- const u64 end,
- struct btrfs_replace_extent_info *extent_info,
- struct btrfs_trans_handle **trans_out);
-int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode, u64 start, u64 end);
-int btrfs_release_file(struct inode *inode, struct file *file);
-int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
- size_t num_pages, loff_t pos, size_t write_bytes,
- struct extent_state **cached, bool noreserve);
-int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
-int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
- size_t *write_bytes);
-void btrfs_check_nocow_unlock(struct btrfs_inode *inode);
-
-/* tree-defrag.c */
-int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
-
-/* super.c */
-int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
- unsigned long new_flags);
-int btrfs_sync_fs(struct super_block *sb, int wait);
-char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
- u64 subvol_objectid);
-
-static inline __printf(2, 3) __cold
-void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
-{
-}
-
-#ifdef CONFIG_PRINTK
-__printf(2, 3)
-__cold
-void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...);
-#else
-#define btrfs_printk(fs_info, fmt, args...) \
- btrfs_no_printk(fs_info, fmt, ##args)
-#endif
-
-#define btrfs_emerg(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_EMERG fmt, ##args)
-#define btrfs_alert(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_ALERT fmt, ##args)
-#define btrfs_crit(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_CRIT fmt, ##args)
-#define btrfs_err(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_ERR fmt, ##args)
-#define btrfs_warn(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_WARNING fmt, ##args)
-#define btrfs_notice(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_NOTICE fmt, ##args)
-#define btrfs_info(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_INFO fmt, ##args)
-
-/*
- * Wrappers that use printk_in_rcu
- */
-#define btrfs_emerg_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_in_rcu(fs_info, KERN_EMERG fmt, ##args)
-#define btrfs_alert_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_in_rcu(fs_info, KERN_ALERT fmt, ##args)
-#define btrfs_crit_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_in_rcu(fs_info, KERN_CRIT fmt, ##args)
-#define btrfs_err_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_in_rcu(fs_info, KERN_ERR fmt, ##args)
-#define btrfs_warn_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_in_rcu(fs_info, KERN_WARNING fmt, ##args)
-#define btrfs_notice_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_in_rcu(fs_info, KERN_NOTICE fmt, ##args)
-#define btrfs_info_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_in_rcu(fs_info, KERN_INFO fmt, ##args)
-
-/*
- * Wrappers that use a ratelimited printk_in_rcu
- */
-#define btrfs_emerg_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_EMERG fmt, ##args)
-#define btrfs_alert_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_ALERT fmt, ##args)
-#define btrfs_crit_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_CRIT fmt, ##args)
-#define btrfs_err_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_ERR fmt, ##args)
-#define btrfs_warn_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_WARNING fmt, ##args)
-#define btrfs_notice_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_NOTICE fmt, ##args)
-#define btrfs_info_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_INFO fmt, ##args)
-
-/*
- * Wrappers that use a ratelimited printk
- */
-#define btrfs_emerg_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_EMERG fmt, ##args)
-#define btrfs_alert_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_ALERT fmt, ##args)
-#define btrfs_crit_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_CRIT fmt, ##args)
-#define btrfs_err_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_ERR fmt, ##args)
-#define btrfs_warn_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_WARNING fmt, ##args)
-#define btrfs_notice_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_NOTICE fmt, ##args)
-#define btrfs_info_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_INFO fmt, ##args)
-
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define btrfs_debug(fs_info, fmt, args...) \
- _dynamic_func_call_no_desc(fmt, btrfs_printk, \
- fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
- _dynamic_func_call_no_desc(fmt, btrfs_printk_in_rcu, \
- fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
- _dynamic_func_call_no_desc(fmt, btrfs_printk_rl_in_rcu, \
- fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_rl(fs_info, fmt, args...) \
- _dynamic_func_call_no_desc(fmt, btrfs_printk_ratelimited, \
- fs_info, KERN_DEBUG fmt, ##args)
-#elif defined(DEBUG)
-#define btrfs_debug(fs_info, fmt, args...) \
- btrfs_printk(fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_rl(fs_info, fmt, args...) \
- btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, ##args)
-#else
-#define btrfs_debug(fs_info, fmt, args...) \
- btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
- btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
-#define btrfs_debug_rl(fs_info, fmt, args...) \
- btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
-#endif
-
-#define btrfs_printk_in_rcu(fs_info, fmt, args...) \
-do { \
- rcu_read_lock(); \
- btrfs_printk(fs_info, fmt, ##args); \
- rcu_read_unlock(); \
-} while (0)
-
-#define btrfs_no_printk_in_rcu(fs_info, fmt, args...) \
-do { \
- rcu_read_lock(); \
- btrfs_no_printk(fs_info, fmt, ##args); \
- rcu_read_unlock(); \
-} while (0)
-
-#define btrfs_printk_ratelimited(fs_info, fmt, args...) \
-do { \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- btrfs_printk(fs_info, fmt, ##args); \
-} while (0)
-
-#define btrfs_printk_rl_in_rcu(fs_info, fmt, args...) \
-do { \
- rcu_read_lock(); \
- btrfs_printk_ratelimited(fs_info, fmt, ##args); \
- rcu_read_unlock(); \
-} while (0)
-
-#ifdef CONFIG_BTRFS_ASSERT
-__cold __noreturn
-static inline void assertfail(const char *expr, const char *file, int line)
-{
- pr_err("assertion failed: %s, in %s:%d\n", expr, file, line);
- BUG();
-}
-
-#define ASSERT(expr) \
- (likely(expr) ? (void)0 : assertfail(#expr, __FILE__, __LINE__))
-
-#else
-static inline void assertfail(const char *expr, const char* file, int line) { }
-#define ASSERT(expr) (void)(expr)
-#endif
-
-#if BITS_PER_LONG == 32
-#define BTRFS_32BIT_MAX_FILE_SIZE (((u64)ULONG_MAX + 1) << PAGE_SHIFT)
-/*
- * The warning threshold is 5/8th of the MAX_LFS_FILESIZE that limits the logical
- * addresses of extents.
- *
- * For 4K page size it's about 10T, for 64K it's 160T.
- */
-#define BTRFS_32BIT_EARLY_WARN_THRESHOLD (BTRFS_32BIT_MAX_FILE_SIZE * 5 / 8)
-void btrfs_warn_32bit_limit(struct btrfs_fs_info *fs_info);
-void btrfs_err_32bit_limit(struct btrfs_fs_info *fs_info);
-#endif
-
-/*
- * Get the correct offset inside the page of extent buffer.
- *
- * @eb: target extent buffer
- * @start: offset inside the extent buffer
- *
- * Will handle both sectorsize == PAGE_SIZE and sectorsize < PAGE_SIZE cases.
- */
-static inline size_t get_eb_offset_in_page(const struct extent_buffer *eb,
- unsigned long offset)
-{
- /*
- * For sectorsize == PAGE_SIZE case, eb->start will always be aligned
- * to PAGE_SIZE, thus adding it won't cause any difference.
- *
- * For sectorsize < PAGE_SIZE, we must only read the data that belongs
- * to the eb, thus we have to take the eb->start into consideration.
- */
- return offset_in_page(offset + eb->start);
-}
+int btrfs_leaf_free_space(const struct extent_buffer *leaf);
-static inline unsigned long get_eb_page_index(unsigned long offset)
+static inline bool btrfs_is_fstree(u64 rootid)
{
- /*
- * For sectorsize == PAGE_SIZE case, plain >> PAGE_SHIFT is enough.
- *
- * For sectorsize < PAGE_SIZE case, we only support 64K PAGE_SIZE,
- * and have ensured that all tree blocks are contained in one page,
- * thus we always get index == 0.
- */
- return offset >> PAGE_SHIFT;
-}
-
-/*
- * Use that for functions that are conditionally exported for sanity tests but
- * otherwise static
- */
-#ifndef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-#define EXPORT_FOR_TESTS static
-#else
-#define EXPORT_FOR_TESTS
-#endif
-
-__cold
-static inline void btrfs_print_v0_err(struct btrfs_fs_info *fs_info)
-{
- btrfs_err(fs_info,
-"Unsupported V0 extent filesystem detected. Aborting. Please re-create your filesystem with a newer kernel");
-}
-
-__printf(5, 6)
-__cold
-void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
- unsigned int line, int errno, const char *fmt, ...);
+ if (rootid == BTRFS_FS_TREE_OBJECTID)
+ return true;
-const char * __attribute_const__ btrfs_decode_error(int errno);
+ if ((s64)rootid < (s64)BTRFS_FIRST_FREE_OBJECTID)
+ return false;
-__cold
-void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
- const char *function,
- unsigned int line, int errno);
+ if (btrfs_qgroup_level(rootid) != 0)
+ return false;
-/*
- * Call btrfs_abort_transaction as early as possible when an error condition is
- * detected, that way the exact line number is reported.
- */
-#define btrfs_abort_transaction(trans, errno) \
-do { \
- /* Report first abort since mount */ \
- if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
- &((trans)->fs_info->fs_state))) { \
- if ((errno) != -EIO && (errno) != -EROFS) { \
- WARN(1, KERN_DEBUG \
- "BTRFS: Transaction aborted (error %d)\n", \
- (errno)); \
- } else { \
- btrfs_debug((trans)->fs_info, \
- "Transaction aborted (error %d)", \
- (errno)); \
- } \
- } \
- __btrfs_abort_transaction((trans), __func__, \
- __LINE__, (errno)); \
-} while (0)
-
-#define btrfs_handle_fs_error(fs_info, errno, fmt, args...) \
-do { \
- __btrfs_handle_fs_error((fs_info), __func__, __LINE__, \
- (errno), fmt, ##args); \
-} while (0)
-
-#define BTRFS_FS_ERROR(fs_info) (unlikely(test_bit(BTRFS_FS_STATE_ERROR, \
- &(fs_info)->fs_state)))
-
-__printf(5, 6)
-__cold
-void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
- unsigned int line, int errno, const char *fmt, ...);
-/*
- * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic
- * will panic(). Otherwise we BUG() here.
- */
-#define btrfs_panic(fs_info, errno, fmt, args...) \
-do { \
- __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \
- BUG(); \
-} while (0)
-
-
-/* compatibility and incompatibility defines */
-
-#define btrfs_set_fs_incompat(__fs_info, opt) \
- __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, \
- #opt)
-
-static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info,
- u64 flag, const char* name)
-{
- struct btrfs_super_block *disk_super;
- u64 features;
-
- disk_super = fs_info->super_copy;
- features = btrfs_super_incompat_flags(disk_super);
- if (!(features & flag)) {
- spin_lock(&fs_info->super_lock);
- features = btrfs_super_incompat_flags(disk_super);
- if (!(features & flag)) {
- features |= flag;
- btrfs_set_super_incompat_flags(disk_super, features);
- btrfs_info(fs_info,
- "setting incompat feature flag for %s (0x%llx)",
- name, flag);
- }
- spin_unlock(&fs_info->super_lock);
- }
-}
-
-#define btrfs_clear_fs_incompat(__fs_info, opt) \
- __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, \
- #opt)
-
-static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info,
- u64 flag, const char* name)
-{
- struct btrfs_super_block *disk_super;
- u64 features;
-
- disk_super = fs_info->super_copy;
- features = btrfs_super_incompat_flags(disk_super);
- if (features & flag) {
- spin_lock(&fs_info->super_lock);
- features = btrfs_super_incompat_flags(disk_super);
- if (features & flag) {
- features &= ~flag;
- btrfs_set_super_incompat_flags(disk_super, features);
- btrfs_info(fs_info,
- "clearing incompat feature flag for %s (0x%llx)",
- name, flag);
- }
- spin_unlock(&fs_info->super_lock);
- }
-}
-
-#define btrfs_fs_incompat(fs_info, opt) \
- __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
-
-static inline bool __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag)
-{
- struct btrfs_super_block *disk_super;
- disk_super = fs_info->super_copy;
- return !!(btrfs_super_incompat_flags(disk_super) & flag);
-}
-
-#define btrfs_set_fs_compat_ro(__fs_info, opt) \
- __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, \
- #opt)
-
-static inline void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info,
- u64 flag, const char *name)
-{
- struct btrfs_super_block *disk_super;
- u64 features;
-
- disk_super = fs_info->super_copy;
- features = btrfs_super_compat_ro_flags(disk_super);
- if (!(features & flag)) {
- spin_lock(&fs_info->super_lock);
- features = btrfs_super_compat_ro_flags(disk_super);
- if (!(features & flag)) {
- features |= flag;
- btrfs_set_super_compat_ro_flags(disk_super, features);
- btrfs_info(fs_info,
- "setting compat-ro feature flag for %s (0x%llx)",
- name, flag);
- }
- spin_unlock(&fs_info->super_lock);
- }
-}
-
-#define btrfs_clear_fs_compat_ro(__fs_info, opt) \
- __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, \
- #opt)
-
-static inline void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info,
- u64 flag, const char *name)
-{
- struct btrfs_super_block *disk_super;
- u64 features;
-
- disk_super = fs_info->super_copy;
- features = btrfs_super_compat_ro_flags(disk_super);
- if (features & flag) {
- spin_lock(&fs_info->super_lock);
- features = btrfs_super_compat_ro_flags(disk_super);
- if (features & flag) {
- features &= ~flag;
- btrfs_set_super_compat_ro_flags(disk_super, features);
- btrfs_info(fs_info,
- "clearing compat-ro feature flag for %s (0x%llx)",
- name, flag);
- }
- spin_unlock(&fs_info->super_lock);
- }
-}
-
-#define btrfs_fs_compat_ro(fs_info, opt) \
- __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
-
-static inline int __btrfs_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag)
-{
- struct btrfs_super_block *disk_super;
- disk_super = fs_info->super_copy;
- return !!(btrfs_super_compat_ro_flags(disk_super) & flag);
-}
-
-/* acl.c */
-#ifdef CONFIG_BTRFS_FS_POSIX_ACL
-struct posix_acl *btrfs_get_acl(struct inode *inode, int type, bool rcu);
-int btrfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
- struct posix_acl *acl, int type);
-int btrfs_init_acl(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *dir);
-#else
-#define btrfs_get_acl NULL
-#define btrfs_set_acl NULL
-static inline int btrfs_init_acl(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *dir)
-{
- return 0;
-}
-#endif
-
-/* relocation.c */
-int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start);
-int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
-int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
-int btrfs_recover_relocation(struct btrfs_root *root);
-int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len);
-int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *buf,
- struct extent_buffer *cow);
-void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
- u64 *bytes_to_reserve);
-int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
- struct btrfs_pending_snapshot *pending);
-int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info);
-struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info,
- u64 bytenr);
-int btrfs_should_ignore_reloc_root(struct btrfs_root *root);
-
-/* scrub.c */
-int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
- u64 end, struct btrfs_scrub_progress *progress,
- int readonly, int is_dev_replace);
-void btrfs_scrub_pause(struct btrfs_fs_info *fs_info);
-void btrfs_scrub_continue(struct btrfs_fs_info *fs_info);
-int btrfs_scrub_cancel(struct btrfs_fs_info *info);
-int btrfs_scrub_cancel_dev(struct btrfs_device *dev);
-int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
- struct btrfs_scrub_progress *progress);
-static inline void btrfs_init_full_stripe_locks_tree(
- struct btrfs_full_stripe_locks_tree *locks_root)
-{
- locks_root->root = RB_ROOT;
- mutex_init(&locks_root->lock);
-}
-
-/* dev-replace.c */
-void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info);
-void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info);
-void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount);
-
-static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info)
-{
- btrfs_bio_counter_sub(fs_info, 1);
-}
-
-/* reada.c */
-struct reada_control {
- struct btrfs_fs_info *fs_info; /* tree to prefetch */
- struct btrfs_key key_start;
- struct btrfs_key key_end; /* exclusive */
- atomic_t elems;
- struct kref refcnt;
- wait_queue_head_t wait;
-};
-struct reada_control *btrfs_reada_add(struct btrfs_root *root,
- struct btrfs_key *start, struct btrfs_key *end);
-int btrfs_reada_wait(void *handle);
-void btrfs_reada_detach(void *handle);
-int btree_readahead_hook(struct extent_buffer *eb, int err);
-void btrfs_reada_remove_dev(struct btrfs_device *dev);
-void btrfs_reada_undo_remove_dev(struct btrfs_device *dev);
-
-static inline int is_fstree(u64 rootid)
-{
- if (rootid == BTRFS_FS_TREE_OBJECTID ||
- ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
- !btrfs_qgroup_level(rootid)))
- return 1;
- return 0;
-}
-
-static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
-{
- return signal_pending(current);
-}
-
-/* verity.c */
-#ifdef CONFIG_FS_VERITY
-
-extern const struct fsverity_operations btrfs_verityops;
-int btrfs_drop_verity_items(struct btrfs_inode *inode);
-
-BTRFS_SETGET_FUNCS(verity_descriptor_encryption, struct btrfs_verity_descriptor_item,
- encryption, 8);
-BTRFS_SETGET_FUNCS(verity_descriptor_size, struct btrfs_verity_descriptor_item,
- size, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_verity_descriptor_encryption,
- struct btrfs_verity_descriptor_item, encryption, 8);
-BTRFS_SETGET_STACK_FUNCS(stack_verity_descriptor_size,
- struct btrfs_verity_descriptor_item, size, 64);
-
-#else
-
-static inline int btrfs_drop_verity_items(struct btrfs_inode *inode)
-{
- return 0;
-}
-
-#endif
-
-/* Sanity test specific functions */
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-void btrfs_test_destroy_inode(struct inode *inode);
-static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
-{
- return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
-}
-#else
-static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
-{
- return 0;
-}
-#endif
-
-static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
-{
- return fs_info->zoned != 0;
+ return true;
}
static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
@@ -3900,14 +738,4 @@ static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
}
-/*
- * We use page status Private2 to indicate there is an ordered extent with
- * unfinished IO.
- *
- * Rename the Private2 accessors to Ordered, to improve readability.
- */
-#define PageOrdered(page) PagePrivate2(page)
-#define SetPageOrdered(page) SetPagePrivate2(page)
-#define ClearPageOrdered(page) ClearPagePrivate2(page)
-
#endif
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
new file mode 100644
index 000000000000..b81e224d4a27
--- /dev/null
+++ b/fs/btrfs/defrag.c
@@ -0,0 +1,1519 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2007 Oracle. All rights reserved.
+ */
+
+#include <linux/sched.h>
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "locking.h"
+#include "accessors.h"
+#include "messages.h"
+#include "delalloc-space.h"
+#include "subpage.h"
+#include "defrag.h"
+#include "file-item.h"
+#include "super.h"
+#include "compression.h"
+
+static struct kmem_cache *btrfs_inode_defrag_cachep;
+
+/*
+ * When auto defrag is enabled we queue up these defrag structs to remember
+ * which inodes need defragging passes.
+ */
+struct inode_defrag {
+ struct rb_node rb_node;
+ /* Inode number */
+ u64 ino;
+ /*
+ * Transid where the defrag was added, we search for extents newer than
+ * this.
+ */
+ u64 transid;
+
+ /* Root objectid */
+ u64 root;
+
+ /*
+ * The extent size threshold for autodefrag.
+ *
+ * This value is different for compressed/non-compressed extents, thus
+ * needs to be passed from higher layer.
+ * (aka, inode_should_defrag())
+ */
+ u32 extent_thresh;
+};
+
+static int compare_inode_defrag(const struct inode_defrag *defrag1,
+ const struct inode_defrag *defrag2)
+{
+ if (defrag1->root > defrag2->root)
+ return 1;
+ else if (defrag1->root < defrag2->root)
+ return -1;
+ else if (defrag1->ino > defrag2->ino)
+ return 1;
+ else if (defrag1->ino < defrag2->ino)
+ return -1;
+ else
+ return 0;
+}
+
+static int inode_defrag_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ const struct inode_defrag *new_defrag = rb_entry(new, struct inode_defrag, rb_node);
+ const struct inode_defrag *existing_defrag = rb_entry(existing, struct inode_defrag, rb_node);
+
+ return compare_inode_defrag(new_defrag, existing_defrag);
+}
+
+/*
+ * Insert a record for an inode into the defrag tree. The lock must be held
+ * already.
+ *
+ * If you're inserting a record for an older transid than an existing record,
+ * the transid already in the tree is lowered.
+ */
+static int btrfs_insert_inode_defrag(struct btrfs_inode *inode,
+ struct inode_defrag *defrag)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct rb_node *node;
+
+ node = rb_find_add(&defrag->rb_node, &fs_info->defrag_inodes, inode_defrag_cmp);
+ if (node) {
+ struct inode_defrag *entry;
+
+ entry = rb_entry(node, struct inode_defrag, rb_node);
+ /*
+ * If we're reinserting an entry for an old defrag run, make
+ * sure to lower the transid of our existing record.
+ */
+ if (defrag->transid < entry->transid)
+ entry->transid = defrag->transid;
+ entry->extent_thresh = min(defrag->extent_thresh, entry->extent_thresh);
+ return -EEXIST;
+ }
+ set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
+ return 0;
+}
+
+static inline bool need_auto_defrag(struct btrfs_fs_info *fs_info)
+{
+ if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
+ return false;
+
+ if (btrfs_fs_closing(fs_info))
+ return false;
+
+ return true;
+}
+
+/*
+ * Insert a defrag record for this inode if auto defrag is enabled. No errors
+ * returned as they're not considered fatal.
+ */
+void btrfs_add_inode_defrag(struct btrfs_inode *inode, u32 extent_thresh)
+{
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct inode_defrag *defrag;
+ int ret;
+
+ if (!need_auto_defrag(fs_info))
+ return;
+
+ if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
+ return;
+
+ defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
+ if (!defrag)
+ return;
+
+ defrag->ino = btrfs_ino(inode);
+ defrag->transid = btrfs_get_root_last_trans(root);
+ defrag->root = btrfs_root_id(root);
+ defrag->extent_thresh = extent_thresh;
+
+ spin_lock(&fs_info->defrag_inodes_lock);
+ if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
+ /*
+ * If we set IN_DEFRAG flag and evict the inode from memory,
+ * and then re-read this inode, this new inode doesn't have
+ * IN_DEFRAG flag. At the case, we may find the existed defrag.
+ */
+ ret = btrfs_insert_inode_defrag(inode, defrag);
+ if (ret)
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+ } else {
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+ }
+ spin_unlock(&fs_info->defrag_inodes_lock);
+}
+
+/*
+ * Pick the defraggable inode that we want, if it doesn't exist, we will get the
+ * next one.
+ */
+static struct inode_defrag *btrfs_pick_defrag_inode(
+ struct btrfs_fs_info *fs_info, u64 root, u64 ino)
+{
+ struct inode_defrag *entry = NULL;
+ struct inode_defrag tmp;
+ struct rb_node *p;
+ struct rb_node *parent = NULL;
+ int ret;
+
+ tmp.ino = ino;
+ tmp.root = root;
+
+ spin_lock(&fs_info->defrag_inodes_lock);
+ p = fs_info->defrag_inodes.rb_node;
+ while (p) {
+ parent = p;
+ entry = rb_entry(parent, struct inode_defrag, rb_node);
+
+ ret = compare_inode_defrag(&tmp, entry);
+ if (ret < 0)
+ p = parent->rb_left;
+ else if (ret > 0)
+ p = parent->rb_right;
+ else
+ goto out;
+ }
+
+ if (parent && compare_inode_defrag(&tmp, entry) > 0) {
+ parent = rb_next(parent);
+ entry = rb_entry_safe(parent, struct inode_defrag, rb_node);
+ }
+out:
+ if (entry)
+ rb_erase(parent, &fs_info->defrag_inodes);
+ spin_unlock(&fs_info->defrag_inodes_lock);
+ return entry;
+}
+
+void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
+{
+ struct inode_defrag *defrag, *next;
+
+ spin_lock(&fs_info->defrag_inodes_lock);
+
+ rbtree_postorder_for_each_entry_safe(defrag, next,
+ &fs_info->defrag_inodes, rb_node)
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+
+ fs_info->defrag_inodes = RB_ROOT;
+
+ spin_unlock(&fs_info->defrag_inodes_lock);
+}
+
+#define BTRFS_DEFRAG_BATCH 1024
+
+static int btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
+ struct inode_defrag *defrag,
+ struct file_ra_state *ra)
+{
+ struct btrfs_root *inode_root;
+ struct btrfs_inode *inode;
+ struct btrfs_ioctl_defrag_range_args range;
+ int ret = 0;
+ u64 cur = 0;
+
+again:
+ if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
+ goto cleanup;
+ if (!need_auto_defrag(fs_info))
+ goto cleanup;
+
+ /* Get the inode */
+ inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
+ if (IS_ERR(inode_root)) {
+ ret = PTR_ERR(inode_root);
+ goto cleanup;
+ }
+
+ inode = btrfs_iget(defrag->ino, inode_root);
+ btrfs_put_root(inode_root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ goto cleanup;
+ }
+
+ if (cur >= i_size_read(&inode->vfs_inode)) {
+ iput(&inode->vfs_inode);
+ goto cleanup;
+ }
+
+ /* Do a chunk of defrag */
+ clear_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
+ memset(&range, 0, sizeof(range));
+ range.len = (u64)-1;
+ range.start = cur;
+ range.extent_thresh = defrag->extent_thresh;
+ file_ra_state_init(ra, inode->vfs_inode.i_mapping);
+
+ scoped_guard(super_write, fs_info->sb)
+ ret = btrfs_defrag_file(inode, ra, &range,
+ defrag->transid, BTRFS_DEFRAG_BATCH);
+ iput(&inode->vfs_inode);
+
+ if (ret < 0)
+ goto cleanup;
+
+ cur = max(cur + fs_info->sectorsize, range.start);
+ goto again;
+
+cleanup:
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+ return ret;
+}
+
+/*
+ * Run through the list of inodes in the FS that need defragging.
+ */
+int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
+{
+ struct inode_defrag *defrag;
+ u64 first_ino = 0;
+ u64 root_objectid = 0;
+
+ atomic_inc(&fs_info->defrag_running);
+ while (1) {
+ struct file_ra_state ra = { 0 };
+
+ /* Pause the auto defragger. */
+ if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
+ break;
+
+ if (!need_auto_defrag(fs_info))
+ break;
+
+ /* find an inode to defrag */
+ defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, first_ino);
+ if (!defrag) {
+ if (root_objectid || first_ino) {
+ root_objectid = 0;
+ first_ino = 0;
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ first_ino = defrag->ino + 1;
+ root_objectid = defrag->root;
+
+ btrfs_run_defrag_inode(fs_info, defrag, &ra);
+ }
+ atomic_dec(&fs_info->defrag_running);
+
+ /*
+ * During unmount, we use the transaction_wait queue to wait for the
+ * defragger to stop.
+ */
+ wake_up(&fs_info->transaction_wait);
+ return 0;
+}
+
+/*
+ * Check if two blocks addresses are close, used by defrag.
+ */
+static bool close_blocks(u64 blocknr, u64 other, u32 blocksize)
+{
+ if (blocknr < other && other - (blocknr + blocksize) < SZ_32K)
+ return true;
+ if (blocknr > other && blocknr - (other + blocksize) < SZ_32K)
+ return true;
+ return false;
+}
+
+/*
+ * Go through all the leaves pointed to by a node and reallocate them so that
+ * disk order is close to key order.
+ */
+static int btrfs_realloc_node(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *parent,
+ int start_slot, u64 *last_ret,
+ struct btrfs_key *progress)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ const u32 blocksize = fs_info->nodesize;
+ const int end_slot = btrfs_header_nritems(parent) - 1;
+ u64 search_start = *last_ret;
+ u64 last_block = 0;
+ int ret = 0;
+ bool progress_passed = false;
+
+ /*
+ * COWing must happen through a running transaction, which always
+ * matches the current fs generation (it's a transaction with a state
+ * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
+ * into error state to prevent the commit of any transaction.
+ */
+ if (unlikely(trans->transaction != fs_info->running_transaction ||
+ trans->transid != fs_info->generation)) {
+ btrfs_abort_transaction(trans, -EUCLEAN);
+ btrfs_crit(fs_info,
+"unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
+ parent->start, btrfs_root_id(root), trans->transid,
+ fs_info->running_transaction->transid,
+ fs_info->generation);
+ return -EUCLEAN;
+ }
+
+ if (btrfs_header_nritems(parent) <= 1)
+ return 0;
+
+ for (int i = start_slot; i <= end_slot; i++) {
+ struct extent_buffer *cur;
+ struct btrfs_disk_key disk_key;
+ u64 blocknr;
+ u64 other;
+ bool close = true;
+
+ btrfs_node_key(parent, &disk_key, i);
+ if (!progress_passed && btrfs_comp_keys(&disk_key, progress) < 0)
+ continue;
+
+ progress_passed = true;
+ blocknr = btrfs_node_blockptr(parent, i);
+ if (last_block == 0)
+ last_block = blocknr;
+
+ if (i > 0) {
+ other = btrfs_node_blockptr(parent, i - 1);
+ close = close_blocks(blocknr, other, blocksize);
+ }
+ if (!close && i < end_slot) {
+ other = btrfs_node_blockptr(parent, i + 1);
+ close = close_blocks(blocknr, other, blocksize);
+ }
+ if (close) {
+ last_block = blocknr;
+ continue;
+ }
+
+ cur = btrfs_read_node_slot(parent, i);
+ if (IS_ERR(cur))
+ return PTR_ERR(cur);
+ if (search_start == 0)
+ search_start = last_block;
+
+ btrfs_tree_lock(cur);
+ ret = btrfs_force_cow_block(trans, root, cur, parent, i,
+ &cur, search_start,
+ min(16 * blocksize,
+ (end_slot - i) * blocksize),
+ BTRFS_NESTING_COW);
+ if (ret) {
+ btrfs_tree_unlock(cur);
+ free_extent_buffer(cur);
+ break;
+ }
+ search_start = cur->start;
+ last_block = cur->start;
+ *last_ret = search_start;
+ btrfs_tree_unlock(cur);
+ free_extent_buffer(cur);
+ }
+ return ret;
+}
+
+/*
+ * Defrag all the leaves in a given btree.
+ * Read all the leaves and try to get key order to
+ * better reflect disk order
+ */
+
+static int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ struct btrfs_path *path = NULL;
+ struct btrfs_key key;
+ int ret = 0;
+ int wret;
+ int level;
+ int next_key_ret = 0;
+ u64 last_ret = 0;
+
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
+ goto out;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ level = btrfs_header_level(root->node);
+
+ if (level == 0)
+ goto out;
+
+ if (root->defrag_progress.objectid == 0) {
+ struct extent_buffer *root_node;
+ u32 nritems;
+
+ root_node = btrfs_lock_root_node(root);
+ nritems = btrfs_header_nritems(root_node);
+ root->defrag_max.objectid = 0;
+ /* from above we know this is not a leaf */
+ btrfs_node_key_to_cpu(root_node, &root->defrag_max,
+ nritems - 1);
+ btrfs_tree_unlock(root_node);
+ free_extent_buffer(root_node);
+ memset(&key, 0, sizeof(key));
+ } else {
+ memcpy(&key, &root->defrag_progress, sizeof(key));
+ }
+
+ path->keep_locks = true;
+
+ ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION);
+ if (ret < 0)
+ goto out;
+ if (ret > 0) {
+ ret = 0;
+ goto out;
+ }
+ btrfs_release_path(path);
+ /*
+ * We don't need a lock on a leaf. btrfs_realloc_node() will lock all
+ * leafs from path->nodes[1], so set lowest_level to 1 to avoid later
+ * a deadlock (attempting to write lock an already write locked leaf).
+ */
+ path->lowest_level = 1;
+ wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+
+ if (wret < 0) {
+ ret = wret;
+ goto out;
+ }
+ if (!path->nodes[1]) {
+ ret = 0;
+ goto out;
+ }
+ /*
+ * The node at level 1 must always be locked when our path has
+ * keep_locks set and lowest_level is 1, regardless of the value of
+ * path->slots[1].
+ */
+ ASSERT(path->locks[1] != 0);
+ ret = btrfs_realloc_node(trans, root,
+ path->nodes[1], 0,
+ &last_ret,
+ &root->defrag_progress);
+ if (ret) {
+ WARN_ON(ret == -EAGAIN);
+ goto out;
+ }
+ /*
+ * Now that we reallocated the node we can find the next key. Note that
+ * btrfs_find_next_key() can release our path and do another search
+ * without COWing, this is because even with path->keep_locks == true,
+ * btrfs_search_slot() / ctree.c:unlock_up() does not keeps a lock on a
+ * node when path->slots[node_level - 1] does not point to the last
+ * item or a slot beyond the last item (ctree.c:unlock_up()). Therefore
+ * we search for the next key after reallocating our node.
+ */
+ path->slots[1] = btrfs_header_nritems(path->nodes[1]);
+ next_key_ret = btrfs_find_next_key(root, path, &key, 1,
+ BTRFS_OLDEST_GENERATION);
+ if (next_key_ret == 0) {
+ memcpy(&root->defrag_progress, &key, sizeof(key));
+ ret = -EAGAIN;
+ }
+out:
+ btrfs_free_path(path);
+ if (ret == -EAGAIN) {
+ if (root->defrag_max.objectid > root->defrag_progress.objectid)
+ goto done;
+ if (root->defrag_max.type > root->defrag_progress.type)
+ goto done;
+ if (root->defrag_max.offset > root->defrag_progress.offset)
+ goto done;
+ ret = 0;
+ }
+done:
+ if (ret != -EAGAIN)
+ memset(&root->defrag_progress, 0,
+ sizeof(root->defrag_progress));
+
+ return ret;
+}
+
+/*
+ * Defrag a given btree. Every leaf in the btree is read and defragmented.
+ */
+int btrfs_defrag_root(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret;
+
+ if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
+ return 0;
+
+ while (1) {
+ struct btrfs_trans_handle *trans;
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ break;
+ }
+
+ ret = btrfs_defrag_leaves(trans, root);
+
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
+ cond_resched();
+
+ if (btrfs_fs_closing(fs_info) || ret != -EAGAIN)
+ break;
+
+ if (btrfs_defrag_cancelled(fs_info)) {
+ btrfs_debug(fs_info, "defrag_root cancelled");
+ ret = -EAGAIN;
+ break;
+ }
+ }
+ clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
+ return ret;
+}
+
+/*
+ * Defrag specific helper to get an extent map.
+ *
+ * Differences between this and btrfs_get_extent() are:
+ *
+ * - No extent_map will be added to inode->extent_tree
+ * To reduce memory usage in the long run.
+ *
+ * - Extra optimization to skip file extents older than @newer_than
+ * By using btrfs_search_forward() we can skip entire file ranges that
+ * have extents created in past transactions, because btrfs_search_forward()
+ * will not visit leaves and nodes with a generation smaller than given
+ * minimal generation threshold (@newer_than).
+ *
+ * Return valid em if we find a file extent matching the requirement.
+ * Return NULL if we can not find a file extent matching the requirement.
+ *
+ * Return ERR_PTR() for error.
+ */
+static struct extent_map *defrag_get_extent(struct btrfs_inode *inode,
+ u64 start, u64 newer_than)
+{
+ struct btrfs_root *root = inode->root;
+ struct btrfs_file_extent_item *fi;
+ struct btrfs_path path = { 0 };
+ struct extent_map *em;
+ struct btrfs_key key;
+ u64 ino = btrfs_ino(inode);
+ int ret;
+
+ em = btrfs_alloc_extent_map();
+ if (!em) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ key.objectid = ino;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = start;
+
+ if (newer_than) {
+ ret = btrfs_search_forward(root, &key, &path, newer_than);
+ if (ret < 0)
+ goto err;
+ /* Can't find anything newer */
+ if (ret > 0)
+ goto not_found;
+ } else {
+ ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
+ if (ret < 0)
+ goto err;
+ }
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
+ /*
+ * If btrfs_search_slot() makes path to point beyond nritems,
+ * we should not have an empty leaf, as this inode must at
+ * least have its INODE_ITEM.
+ */
+ ASSERT(btrfs_header_nritems(path.nodes[0]));
+ path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1;
+ }
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+ /* Perfect match, no need to go one slot back */
+ if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY &&
+ key.offset == start)
+ goto iterate;
+
+ /* We didn't find a perfect match, needs to go one slot back */
+ if (path.slots[0] > 0) {
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+ if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
+ path.slots[0]--;
+ }
+
+iterate:
+ /* Iterate through the path to find a file extent covering @start */
+ while (true) {
+ u64 extent_end;
+
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0]))
+ goto next;
+
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+
+ /*
+ * We may go one slot back to INODE_REF/XATTR item, then
+ * need to go forward until we reach an EXTENT_DATA.
+ * But we should still has the correct ino as key.objectid.
+ */
+ if (WARN_ON(key.objectid < ino) || key.type < BTRFS_EXTENT_DATA_KEY)
+ goto next;
+
+ /* It's beyond our target range, definitely not extent found */
+ if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY)
+ goto not_found;
+
+ /*
+ * | |<- File extent ->|
+ * \- start
+ *
+ * This means there is a hole between start and key.offset.
+ */
+ if (key.offset > start) {
+ em->start = start;
+ em->disk_bytenr = EXTENT_MAP_HOLE;
+ em->disk_num_bytes = 0;
+ em->ram_bytes = 0;
+ em->offset = 0;
+ em->len = key.offset - start;
+ break;
+ }
+
+ fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
+ struct btrfs_file_extent_item);
+ extent_end = btrfs_file_extent_end(&path);
+
+ /*
+ * |<- file extent ->| |
+ * \- start
+ *
+ * We haven't reached start, search next slot.
+ */
+ if (extent_end <= start)
+ goto next;
+
+ /* Now this extent covers @start, convert it to em */
+ btrfs_extent_item_to_extent_map(inode, &path, fi, em);
+ break;
+next:
+ ret = btrfs_next_item(root, &path);
+ if (ret < 0)
+ goto err;
+ if (ret > 0)
+ goto not_found;
+ }
+ btrfs_release_path(&path);
+ return em;
+
+not_found:
+ btrfs_release_path(&path);
+ btrfs_free_extent_map(em);
+ return NULL;
+
+err:
+ btrfs_release_path(&path);
+ btrfs_free_extent_map(em);
+ return ERR_PTR(ret);
+}
+
+static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
+ u64 newer_than, bool locked)
+{
+ struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct extent_map *em;
+ const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize;
+
+ /*
+ * Hopefully we have this extent in the tree already, try without the
+ * full extent lock.
+ */
+ read_lock(&em_tree->lock);
+ em = btrfs_lookup_extent_mapping(em_tree, start, sectorsize);
+ read_unlock(&em_tree->lock);
+
+ /*
+ * We can get a merged extent, in that case, we need to re-search
+ * tree to get the original em for defrag.
+ *
+ * This is because even if we have adjacent extents that are contiguous
+ * and compatible (same type and flags), we still want to defrag them
+ * so that we use less metadata (extent items in the extent tree and
+ * file extent items in the inode's subvolume tree).
+ */
+ if (em && (em->flags & EXTENT_FLAG_MERGED)) {
+ btrfs_free_extent_map(em);
+ em = NULL;
+ }
+
+ if (!em) {
+ struct extent_state *cached = NULL;
+ u64 end = start + sectorsize - 1;
+
+ /* Get the big lock and read metadata off disk. */
+ if (!locked)
+ btrfs_lock_extent(io_tree, start, end, &cached);
+ em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
+ if (!locked)
+ btrfs_unlock_extent(io_tree, start, end, &cached);
+
+ if (IS_ERR(em))
+ return NULL;
+ }
+
+ return em;
+}
+
+static u32 get_extent_max_capacity(const struct btrfs_fs_info *fs_info,
+ const struct extent_map *em)
+{
+ if (btrfs_extent_map_is_compressed(em))
+ return BTRFS_MAX_COMPRESSED;
+ return fs_info->max_extent_size;
+}
+
+static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
+ u32 extent_thresh, u64 newer_than, bool locked)
+{
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct extent_map *next;
+ bool ret = false;
+
+ /* This is the last extent */
+ if (em->start + em->len >= i_size_read(inode))
+ return false;
+
+ /*
+ * Here we need to pass @newer_then when checking the next extent, or
+ * we will hit a case we mark current extent for defrag, but the next
+ * one will not be a target.
+ * This will just cause extra IO without really reducing the fragments.
+ */
+ next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked);
+ /* No more em or hole */
+ if (!next || next->disk_bytenr >= EXTENT_MAP_LAST_BYTE)
+ goto out;
+ if (next->flags & EXTENT_FLAG_PREALLOC)
+ goto out;
+ /*
+ * If the next extent is at its max capacity, defragging current extent
+ * makes no sense, as the total number of extents won't change.
+ */
+ if (next->len >= get_extent_max_capacity(fs_info, em))
+ goto out;
+ /* Skip older extent */
+ if (next->generation < newer_than)
+ goto out;
+ /* Also check extent size */
+ if (next->len >= extent_thresh)
+ goto out;
+
+ ret = true;
+out:
+ btrfs_free_extent_map(next);
+ return ret;
+}
+
+/*
+ * Prepare one page to be defragged.
+ *
+ * This will ensure:
+ *
+ * - Returned page is locked and has been set up properly.
+ * - No ordered extent exists in the page.
+ * - The page is uptodate.
+ *
+ * NOTE: Caller should also wait for page writeback after the cluster is
+ * prepared, here we don't do writeback wait for each page.
+ */
+static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t index)
+{
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ gfp_t mask = btrfs_alloc_write_mask(mapping);
+ u64 lock_start;
+ u64 lock_end;
+ struct extent_state *cached_state = NULL;
+ struct folio *folio;
+ int ret;
+
+again:
+ /* TODO: Add order fgp order flags when large folios are fully enabled. */
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
+ if (IS_ERR(folio))
+ return folio;
+
+ /*
+ * Since we can defragment files opened read-only, we can encounter
+ * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS).
+ *
+ * The IO for such large folios is not fully tested, thus return
+ * an error to reject such folios unless it's an experimental build.
+ *
+ * Filesystem transparent huge pages are typically only used for
+ * executables that explicitly enable them, so this isn't very
+ * restrictive.
+ */
+ if (!IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL) && folio_test_large(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
+ return ERR_PTR(-ETXTBSY);
+ }
+
+ ret = set_folio_extent_mapped(folio);
+ if (ret < 0) {
+ folio_unlock(folio);
+ folio_put(folio);
+ return ERR_PTR(ret);
+ }
+
+ lock_start = folio_pos(folio);
+ lock_end = folio_next_pos(folio) - 1;
+ /* Wait for any existing ordered extent in the range */
+ while (1) {
+ struct btrfs_ordered_extent *ordered;
+
+ btrfs_lock_extent(&inode->io_tree, lock_start, lock_end, &cached_state);
+ ordered = btrfs_lookup_ordered_range(inode, lock_start, folio_size(folio));
+ btrfs_unlock_extent(&inode->io_tree, lock_start, lock_end, &cached_state);
+ if (!ordered)
+ break;
+
+ folio_unlock(folio);
+ btrfs_start_ordered_extent(ordered);
+ btrfs_put_ordered_extent(ordered);
+ folio_lock(folio);
+ /*
+ * We unlocked the folio above, so we need check if it was
+ * released or not.
+ */
+ if (folio->mapping != mapping || !folio->private) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
+ }
+ }
+
+ /*
+ * Now the page range has no ordered extent any more. Read the page to
+ * make it uptodate.
+ */
+ if (!folio_test_uptodate(folio)) {
+ btrfs_read_folio(NULL, folio);
+ folio_lock(folio);
+ if (folio->mapping != mapping || !folio->private) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
+ }
+ if (unlikely(!folio_test_uptodate(folio))) {
+ folio_unlock(folio);
+ folio_put(folio);
+ return ERR_PTR(-EIO);
+ }
+ }
+ return folio;
+}
+
+struct defrag_target_range {
+ struct list_head list;
+ u64 start;
+ u64 len;
+};
+
+/*
+ * Collect all valid target extents.
+ *
+ * @start: file offset to lookup
+ * @len: length to lookup
+ * @extent_thresh: file extent size threshold, any extent size >= this value
+ * will be ignored
+ * @newer_than: only defrag extents newer than this value
+ * @do_compress: whether the defrag is doing compression or no-compression
+ * if true, @extent_thresh will be ignored and all regular
+ * file extents meeting @newer_than will be targets.
+ * @locked: if the range has already held extent lock
+ * @target_list: list of targets file extents
+ */
+static int defrag_collect_targets(struct btrfs_inode *inode,
+ u64 start, u64 len, u32 extent_thresh,
+ u64 newer_than, bool do_compress,
+ bool locked, struct list_head *target_list,
+ u64 *last_scanned_ret)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ bool last_is_target = false;
+ u64 cur = start;
+ int ret = 0;
+
+ while (cur < start + len) {
+ struct extent_map *em;
+ struct defrag_target_range *new;
+ bool next_mergeable = true;
+ u64 range_len;
+
+ last_is_target = false;
+ em = defrag_lookup_extent(&inode->vfs_inode, cur, newer_than, locked);
+ if (!em)
+ break;
+
+ /*
+ * If the file extent is an inlined one, we may still want to
+ * defrag it (fallthrough) if it will cause a regular extent.
+ * This is for users who want to convert inline extents to
+ * regular ones through max_inline= mount option.
+ */
+ if (em->disk_bytenr == EXTENT_MAP_INLINE &&
+ em->len <= inode->root->fs_info->max_inline)
+ goto next;
+
+ /* Skip holes and preallocated extents. */
+ if (em->disk_bytenr == EXTENT_MAP_HOLE ||
+ (em->flags & EXTENT_FLAG_PREALLOC))
+ goto next;
+
+ /* Skip older extent */
+ if (em->generation < newer_than)
+ goto next;
+
+ /* This em is under writeback, no need to defrag */
+ if (em->generation == (u64)-1)
+ goto next;
+
+ /*
+ * Our start offset might be in the middle of an existing extent
+ * map, so take that into account.
+ */
+ range_len = em->len - (cur - em->start);
+ /*
+ * If this range of the extent map is already flagged for delalloc,
+ * skip it, because:
+ *
+ * 1) We could deadlock later, when trying to reserve space for
+ * delalloc, because in case we can't immediately reserve space
+ * the flusher can start delalloc and wait for the respective
+ * ordered extents to complete. The deadlock would happen
+ * because we do the space reservation while holding the range
+ * locked, and starting writeback, or finishing an ordered
+ * extent, requires locking the range;
+ *
+ * 2) If there's delalloc there, it means there's dirty pages for
+ * which writeback has not started yet (we clean the delalloc
+ * flag when starting writeback and after creating an ordered
+ * extent). If we mark pages in an adjacent range for defrag,
+ * then we will have a larger contiguous range for delalloc,
+ * very likely resulting in a larger extent after writeback is
+ * triggered (except in a case of free space fragmentation).
+ */
+ if (btrfs_test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1,
+ EXTENT_DELALLOC))
+ goto next;
+
+ /*
+ * For do_compress case, we want to compress all valid file
+ * extents, thus no @extent_thresh or mergeable check.
+ */
+ if (do_compress)
+ goto add;
+
+ /* Skip too large extent */
+ if (em->len >= extent_thresh)
+ goto next;
+
+ /*
+ * Skip extents already at its max capacity, this is mostly for
+ * compressed extents, which max cap is only 128K.
+ */
+ if (em->len >= get_extent_max_capacity(fs_info, em))
+ goto next;
+
+ /*
+ * Normally there are no more extents after an inline one, thus
+ * @next_mergeable will normally be false and not defragged.
+ * So if an inline extent passed all above checks, just add it
+ * for defrag, and be converted to regular extents.
+ */
+ if (em->disk_bytenr == EXTENT_MAP_INLINE)
+ goto add;
+
+ next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
+ extent_thresh, newer_than, locked);
+ if (!next_mergeable) {
+ struct defrag_target_range *last;
+
+ /* Empty target list, no way to merge with last entry */
+ if (list_empty(target_list))
+ goto next;
+ last = list_last_entry(target_list,
+ struct defrag_target_range, list);
+ /* Not mergeable with last entry */
+ if (last->start + last->len != cur)
+ goto next;
+
+ /* Mergeable, fall through to add it to @target_list. */
+ }
+
+add:
+ last_is_target = true;
+ range_len = min(btrfs_extent_map_end(em), start + len) - cur;
+ /*
+ * This one is a good target, check if it can be merged into
+ * last range of the target list.
+ */
+ if (!list_empty(target_list)) {
+ struct defrag_target_range *last;
+
+ last = list_last_entry(target_list,
+ struct defrag_target_range, list);
+ ASSERT(last->start + last->len <= cur);
+ if (last->start + last->len == cur) {
+ /* Mergeable, enlarge the last entry */
+ last->len += range_len;
+ goto next;
+ }
+ /* Fall through to allocate a new entry */
+ }
+
+ /* Allocate new defrag_target_range */
+ new = kmalloc(sizeof(*new), GFP_NOFS);
+ if (!new) {
+ btrfs_free_extent_map(em);
+ ret = -ENOMEM;
+ break;
+ }
+ new->start = cur;
+ new->len = range_len;
+ list_add_tail(&new->list, target_list);
+
+next:
+ cur = btrfs_extent_map_end(em);
+ btrfs_free_extent_map(em);
+ }
+ if (ret < 0) {
+ struct defrag_target_range *entry;
+ struct defrag_target_range *tmp;
+
+ list_for_each_entry_safe(entry, tmp, target_list, list) {
+ list_del_init(&entry->list);
+ kfree(entry);
+ }
+ }
+ if (!ret && last_scanned_ret) {
+ /*
+ * If the last extent is not a target, the caller can skip to
+ * the end of that extent.
+ * Otherwise, we can only go the end of the specified range.
+ */
+ if (!last_is_target)
+ *last_scanned_ret = max(cur, *last_scanned_ret);
+ else
+ *last_scanned_ret = max(start + len, *last_scanned_ret);
+ }
+ return ret;
+}
+
+#define CLUSTER_SIZE (SZ_256K)
+static_assert(PAGE_ALIGNED(CLUSTER_SIZE));
+
+/*
+ * Defrag one contiguous target range.
+ *
+ * @inode: target inode
+ * @target: target range to defrag
+ * @pages: locked pages covering the defrag range
+ * @nr_pages: number of locked pages
+ *
+ * Caller should ensure:
+ *
+ * - Pages are prepared
+ * Pages should be locked, no ordered extent in the pages range,
+ * no writeback.
+ *
+ * - Extent bits are locked
+ */
+static int defrag_one_locked_target(struct btrfs_inode *inode,
+ struct defrag_target_range *target,
+ struct folio **folios, int nr_pages,
+ struct extent_state **cached_state)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_changeset *data_reserved = NULL;
+ const u64 start = target->start;
+ const u64 len = target->len;
+ int ret = 0;
+
+ ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len);
+ if (ret < 0)
+ return ret;
+ btrfs_clear_extent_bit(&inode->io_tree, start, start + len - 1,
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG, cached_state);
+ btrfs_set_extent_bit(&inode->io_tree, start, start + len - 1,
+ EXTENT_DELALLOC | EXTENT_DEFRAG, cached_state);
+
+ /*
+ * Update the page status.
+ * Due to possible large folios, we have to check all folios one by one.
+ */
+ for (int i = 0; i < nr_pages && folios[i]; i++) {
+ struct folio *folio = folios[i];
+
+ if (!folio)
+ break;
+ if (start >= folio_next_pos(folio) ||
+ start + len <= folio_pos(folio))
+ continue;
+ btrfs_folio_clamp_clear_checked(fs_info, folio, start, len);
+ btrfs_folio_clamp_set_dirty(fs_info, folio, start, len);
+ }
+ btrfs_delalloc_release_extents(inode, len);
+ extent_changeset_free(data_reserved);
+
+ return ret;
+}
+
+static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
+ u32 extent_thresh, u64 newer_than, bool do_compress,
+ u64 *last_scanned_ret)
+{
+ struct extent_state *cached_state = NULL;
+ struct defrag_target_range *entry;
+ struct defrag_target_range *tmp;
+ LIST_HEAD(target_list);
+ struct folio **folios;
+ const u32 sectorsize = inode->root->fs_info->sectorsize;
+ u64 cur = start;
+ const unsigned int nr_pages = ((start + len - 1) >> PAGE_SHIFT) -
+ (start >> PAGE_SHIFT) + 1;
+ int ret = 0;
+
+ ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
+ ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize));
+
+ folios = kcalloc(nr_pages, sizeof(struct folio *), GFP_NOFS);
+ if (!folios)
+ return -ENOMEM;
+
+ /* Prepare all pages */
+ for (int i = 0; cur < start + len && i < nr_pages; i++) {
+ folios[i] = defrag_prepare_one_folio(inode, cur >> PAGE_SHIFT);
+ if (IS_ERR(folios[i])) {
+ ret = PTR_ERR(folios[i]);
+ folios[i] = NULL;
+ goto free_folios;
+ }
+ cur = folio_next_pos(folios[i]);
+ }
+ for (int i = 0; i < nr_pages; i++) {
+ if (!folios[i])
+ break;
+ folio_wait_writeback(folios[i]);
+ }
+
+ /* We should get at least one folio. */
+ ASSERT(folios[0]);
+ /* Lock the pages range */
+ btrfs_lock_extent(&inode->io_tree, folio_pos(folios[0]), cur - 1, &cached_state);
+ /*
+ * Now we have a consistent view about the extent map, re-check
+ * which range really needs to be defragged.
+ *
+ * And this time we have extent locked already, pass @locked = true
+ * so that we won't relock the extent range and cause deadlock.
+ */
+ ret = defrag_collect_targets(inode, start, len, extent_thresh,
+ newer_than, do_compress, true,
+ &target_list, last_scanned_ret);
+ if (ret < 0)
+ goto unlock_extent;
+
+ list_for_each_entry(entry, &target_list, list) {
+ ret = defrag_one_locked_target(inode, entry, folios, nr_pages,
+ &cached_state);
+ if (ret < 0)
+ break;
+ }
+
+ list_for_each_entry_safe(entry, tmp, &target_list, list) {
+ list_del_init(&entry->list);
+ kfree(entry);
+ }
+unlock_extent:
+ btrfs_unlock_extent(&inode->io_tree, folio_pos(folios[0]), cur - 1, &cached_state);
+free_folios:
+ for (int i = 0; i < nr_pages; i++) {
+ if (!folios[i])
+ break;
+ folio_unlock(folios[i]);
+ folio_put(folios[i]);
+ }
+ kfree(folios);
+ return ret;
+}
+
+static int defrag_one_cluster(struct btrfs_inode *inode,
+ struct file_ra_state *ra,
+ u64 start, u32 len, u32 extent_thresh,
+ u64 newer_than, bool do_compress,
+ unsigned long *sectors_defragged,
+ unsigned long max_sectors,
+ u64 *last_scanned_ret)
+{
+ const u32 sectorsize = inode->root->fs_info->sectorsize;
+ struct defrag_target_range *entry;
+ struct defrag_target_range *tmp;
+ LIST_HEAD(target_list);
+ int ret;
+
+ ret = defrag_collect_targets(inode, start, len, extent_thresh,
+ newer_than, do_compress, false,
+ &target_list, NULL);
+ if (ret < 0)
+ goto out;
+
+ list_for_each_entry(entry, &target_list, list) {
+ u32 range_len = entry->len;
+
+ /* Reached or beyond the limit */
+ if (max_sectors && *sectors_defragged >= max_sectors) {
+ ret = 1;
+ break;
+ }
+
+ if (max_sectors)
+ range_len = min_t(u32, range_len,
+ (max_sectors - *sectors_defragged) * sectorsize);
+
+ /*
+ * If defrag_one_range() has updated last_scanned_ret,
+ * our range may already be invalid (e.g. hole punched).
+ * Skip if our range is before last_scanned_ret, as there is
+ * no need to defrag the range anymore.
+ */
+ if (entry->start + range_len <= *last_scanned_ret)
+ continue;
+
+ page_cache_sync_readahead(inode->vfs_inode.i_mapping,
+ ra, NULL, entry->start >> PAGE_SHIFT,
+ ((entry->start + range_len - 1) >> PAGE_SHIFT) -
+ (entry->start >> PAGE_SHIFT) + 1);
+ /*
+ * Here we may not defrag any range if holes are punched before
+ * we locked the pages.
+ * But that's fine, it only affects the @sectors_defragged
+ * accounting.
+ */
+ ret = defrag_one_range(inode, entry->start, range_len,
+ extent_thresh, newer_than, do_compress,
+ last_scanned_ret);
+ if (ret < 0)
+ break;
+ *sectors_defragged += range_len >>
+ inode->root->fs_info->sectorsize_bits;
+ }
+out:
+ list_for_each_entry_safe(entry, tmp, &target_list, list) {
+ list_del_init(&entry->list);
+ kfree(entry);
+ }
+ if (ret >= 0)
+ *last_scanned_ret = max(*last_scanned_ret, start + len);
+ return ret;
+}
+
+/*
+ * Entry point to file defragmentation.
+ *
+ * @inode: inode to be defragged
+ * @ra: readahead state
+ * @range: defrag options including range and flags
+ * @newer_than: minimum transid to defrag
+ * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
+ * will be defragged.
+ *
+ * Return <0 for error.
+ * Return >=0 for the number of sectors defragged, and range->start will be updated
+ * to indicate the file offset where next defrag should be started at.
+ * (Mostly for autodefrag, which sets @max_to_defrag thus we may exit early without
+ * defragging all the range).
+ */
+int btrfs_defrag_file(struct btrfs_inode *inode, struct file_ra_state *ra,
+ struct btrfs_ioctl_defrag_range_args *range,
+ u64 newer_than, unsigned long max_to_defrag)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ unsigned long sectors_defragged = 0;
+ u64 isize = i_size_read(&inode->vfs_inode);
+ u64 cur;
+ u64 last_byte;
+ bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS);
+ bool no_compress = (range->flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS);
+ int compress_type = BTRFS_COMPRESS_ZLIB;
+ int compress_level = 0;
+ int ret = 0;
+ u32 extent_thresh = range->extent_thresh;
+ pgoff_t start_index;
+
+ ASSERT(ra);
+
+ if (isize == 0)
+ return 0;
+
+ if (range->start >= isize)
+ return -EINVAL;
+
+ if (do_compress) {
+ if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS_LEVEL) {
+ if (range->compress.type >= BTRFS_NR_COMPRESS_TYPES)
+ return -EINVAL;
+ if (range->compress.type) {
+ compress_type = range->compress.type;
+ compress_level = range->compress.level;
+ if (!btrfs_compress_level_valid(compress_type, compress_level))
+ return -EINVAL;
+ }
+ } else {
+ if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
+ return -EINVAL;
+ if (range->compress_type)
+ compress_type = range->compress_type;
+ }
+ } else if (range->flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS) {
+ compress_type = BTRFS_DEFRAG_DONT_COMPRESS;
+ compress_level = 1;
+ }
+
+ if (extent_thresh == 0)
+ extent_thresh = SZ_256K;
+
+ if (range->start + range->len > range->start) {
+ /* Got a specific range */
+ last_byte = min(isize, range->start + range->len);
+ } else {
+ /* Defrag until file end */
+ last_byte = isize;
+ }
+
+ /* Align the range */
+ cur = round_down(range->start, fs_info->sectorsize);
+ last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
+
+ /*
+ * Make writeback start from the beginning of the range, so that the
+ * defrag range can be written sequentially.
+ */
+ start_index = cur >> PAGE_SHIFT;
+ if (start_index < inode->vfs_inode.i_mapping->writeback_index)
+ inode->vfs_inode.i_mapping->writeback_index = start_index;
+
+ while (cur < last_byte) {
+ const unsigned long prev_sectors_defragged = sectors_defragged;
+ u64 last_scanned = cur;
+ u64 cluster_end;
+
+ if (btrfs_defrag_cancelled(fs_info)) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ /* We want the cluster end at page boundary when possible */
+ cluster_end = (((cur >> PAGE_SHIFT) +
+ (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1;
+ cluster_end = min(cluster_end, last_byte);
+
+ btrfs_inode_lock(inode, 0);
+ if (IS_SWAPFILE(&inode->vfs_inode)) {
+ ret = -ETXTBSY;
+ btrfs_inode_unlock(inode, 0);
+ break;
+ }
+ if (!(inode->vfs_inode.i_sb->s_flags & SB_ACTIVE)) {
+ btrfs_inode_unlock(inode, 0);
+ break;
+ }
+ if (do_compress || no_compress) {
+ inode->defrag_compress = compress_type;
+ inode->defrag_compress_level = compress_level;
+ }
+ ret = defrag_one_cluster(inode, ra, cur,
+ cluster_end + 1 - cur, extent_thresh,
+ newer_than, do_compress || no_compress,
+ &sectors_defragged,
+ max_to_defrag, &last_scanned);
+
+ if (sectors_defragged > prev_sectors_defragged)
+ balance_dirty_pages_ratelimited(inode->vfs_inode.i_mapping);
+
+ btrfs_inode_unlock(inode, 0);
+ if (ret < 0)
+ break;
+ cur = max(cluster_end + 1, last_scanned);
+ if (ret > 0) {
+ ret = 0;
+ break;
+ }
+ cond_resched();
+ }
+
+ /*
+ * Update range.start for autodefrag, this will indicate where to start
+ * in next run.
+ */
+ range->start = cur;
+ if (sectors_defragged) {
+ /*
+ * We have defragged some sectors, for compression case they
+ * need to be written back immediately.
+ */
+ if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) {
+ filemap_flush(inode->vfs_inode.i_mapping);
+ if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+ &inode->runtime_flags))
+ filemap_flush(inode->vfs_inode.i_mapping);
+ }
+ if (range->compress_type == BTRFS_COMPRESS_LZO)
+ btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
+ else if (range->compress_type == BTRFS_COMPRESS_ZSTD)
+ btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
+ ret = sectors_defragged;
+ }
+ if (do_compress || no_compress) {
+ btrfs_inode_lock(inode, 0);
+ inode->defrag_compress = BTRFS_COMPRESS_NONE;
+ btrfs_inode_unlock(inode, 0);
+ }
+ return ret;
+}
+
+void __cold btrfs_auto_defrag_exit(void)
+{
+ kmem_cache_destroy(btrfs_inode_defrag_cachep);
+}
+
+int __init btrfs_auto_defrag_init(void)
+{
+ btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
+ sizeof(struct inode_defrag), 0, 0, NULL);
+ if (!btrfs_inode_defrag_cachep)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/fs/btrfs/defrag.h b/fs/btrfs/defrag.h
new file mode 100644
index 000000000000..a7f917a38dbf
--- /dev/null
+++ b/fs/btrfs/defrag.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_DEFRAG_H
+#define BTRFS_DEFRAG_H
+
+#include <linux/types.h>
+#include <linux/compiler_types.h>
+
+struct file_ra_state;
+struct btrfs_inode;
+struct btrfs_fs_info;
+struct btrfs_root;
+struct btrfs_trans_handle;
+struct btrfs_ioctl_defrag_range_args;
+
+int btrfs_defrag_file(struct btrfs_inode *inode, struct file_ra_state *ra,
+ struct btrfs_ioctl_defrag_range_args *range,
+ u64 newer_than, unsigned long max_to_defrag);
+int __init btrfs_auto_defrag_init(void);
+void __cold btrfs_auto_defrag_exit(void);
+void btrfs_add_inode_defrag(struct btrfs_inode *inode, u32 extent_thresh);
+int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
+void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
+int btrfs_defrag_root(struct btrfs_root *root);
+
+static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
+{
+ return signal_pending(current);
+}
+
+#endif
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index 2059d1504149..0970799d0aa4 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
+#include "messages.h"
#include "ctree.h"
#include "delalloc-space.h"
#include "block-rsv.h"
#include "btrfs_inode.h"
#include "space-info.h"
-#include "transaction.h"
#include "qgroup.h"
-#include "block-group.h"
+#include "fs.h"
/*
* HOW DOES THIS WORK
@@ -111,7 +111,19 @@
* making error handling and cleanup easier.
*/
-int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
+static inline struct btrfs_space_info *data_sinfo_for_inode(const struct btrfs_inode *inode)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+
+ if (btrfs_is_zoned(fs_info) && btrfs_is_data_reloc_root(inode->root)) {
+ ASSERT(fs_info->data_sinfo->sub_group[0]->subgroup_id ==
+ BTRFS_SUB_GROUP_DATA_RELOC);
+ return fs_info->data_sinfo->sub_group[0];
+ }
+ return fs_info->data_sinfo;
+}
+
+int btrfs_alloc_data_chunk_ondemand(const struct btrfs_inode *inode, u64 bytes)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -123,13 +135,15 @@ int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
if (btrfs_is_free_space_inode(inode))
flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE;
- return btrfs_reserve_data_bytes(fs_info, bytes, flush);
+ return btrfs_reserve_data_bytes(data_sinfo_for_inode(inode), bytes, flush);
}
int btrfs_check_data_free_space(struct btrfs_inode *inode,
- struct extent_changeset **reserved, u64 start, u64 len)
+ struct extent_changeset **reserved, u64 start,
+ u64 len, bool noflush)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_DATA;
int ret;
/* align the range */
@@ -137,16 +151,24 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode,
round_down(start, fs_info->sectorsize);
start = round_down(start, fs_info->sectorsize);
- ret = btrfs_alloc_data_chunk_ondemand(inode, len);
+ if (noflush)
+ flush = BTRFS_RESERVE_NO_FLUSH;
+ else if (btrfs_is_free_space_inode(inode))
+ flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE;
+
+ ret = btrfs_reserve_data_bytes(data_sinfo_for_inode(inode), len, flush);
if (ret < 0)
return ret;
/* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
- if (ret < 0)
- btrfs_free_reserved_data_space_noquota(fs_info, len);
- else
+ if (ret < 0) {
+ btrfs_free_reserved_data_space_noquota(inode, len);
+ extent_changeset_free(*reserved);
+ *reserved = NULL;
+ } else {
ret = 0;
+ }
return ret;
}
@@ -158,15 +180,13 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode,
* which we can't sleep and is sure it won't affect qgroup reserved space.
* Like clear_bit_hook().
*/
-void btrfs_free_reserved_data_space_noquota(struct btrfs_fs_info *fs_info,
- u64 len)
+void btrfs_free_reserved_data_space_noquota(struct btrfs_inode *inode, u64 len)
{
- struct btrfs_space_info *data_sinfo;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
ASSERT(IS_ALIGNED(len, fs_info->sectorsize));
- data_sinfo = fs_info->data_sinfo;
- btrfs_space_info_free_bytes_may_use(fs_info, data_sinfo, len);
+ btrfs_space_info_free_bytes_may_use(data_sinfo_for_inode(inode), len);
}
/*
@@ -186,12 +206,12 @@ void btrfs_free_reserved_data_space(struct btrfs_inode *inode,
round_down(start, fs_info->sectorsize);
start = round_down(start, fs_info->sectorsize);
- btrfs_free_reserved_data_space_noquota(fs_info, len);
- btrfs_qgroup_free_data(inode, reserved, start, len);
+ btrfs_free_reserved_data_space_noquota(inode, len);
+ btrfs_qgroup_free_data(inode, reserved, start, len, NULL);
}
-/**
- * Release any excessive reservation
+/*
+ * Release any excessive reservations for an inode.
*
* @inode: the inode we need to release from
* @qgroup_free: free or convert qgroup meta. Unlike normal operation, qgroup
@@ -233,7 +253,6 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
u64 reserve_size = 0;
u64 qgroup_rsv_size = 0;
- u64 csum_leaves;
unsigned outstanding_extents;
lockdep_assert_held(&inode->lock);
@@ -248,10 +267,12 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
outstanding_extents);
reserve_size += btrfs_calc_metadata_size(fs_info, 1);
}
- csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
- inode->csum_bytes);
- reserve_size += btrfs_calc_insert_metadata_size(fs_info,
- csum_leaves);
+ if (!(inode->flags & BTRFS_INODE_NODATASUM)) {
+ u64 csum_leaves;
+
+ csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes);
+ reserve_size += btrfs_calc_insert_metadata_size(fs_info, csum_leaves);
+ }
/*
* For qgroup rsv, the calculation is very simple:
* account one nodesize for each outstanding extent
@@ -266,14 +287,20 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
spin_unlock(&block_rsv->lock);
}
-static void calc_inode_reservations(struct btrfs_fs_info *fs_info,
- u64 num_bytes, u64 *meta_reserve,
- u64 *qgroup_reserve)
+static void calc_inode_reservations(struct btrfs_inode *inode,
+ u64 num_bytes, u64 disk_num_bytes,
+ u64 *meta_reserve, u64 *qgroup_reserve)
{
- u64 nr_extents = count_max_extents(num_bytes);
- u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, num_bytes);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ u64 nr_extents = count_max_extents(fs_info, num_bytes);
+ u64 csum_leaves;
u64 inode_update = btrfs_calc_metadata_size(fs_info, 1);
+ if (inode->flags & BTRFS_INODE_NODATASUM)
+ csum_leaves = 0;
+ else
+ csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes);
+
*meta_reserve = btrfs_calc_insert_metadata_size(fs_info,
nr_extents + csum_leaves);
@@ -285,7 +312,8 @@ static void calc_inode_reservations(struct btrfs_fs_info *fs_info,
*qgroup_reserve = nr_extents * fs_info->nodesize;
}
-int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
+int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
+ u64 disk_num_bytes, bool noflush)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -304,17 +332,15 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
* If we have a transaction open (can happen if we call truncate_block
* from truncate), then we need FLUSH_LIMIT so we don't deadlock.
*/
- if (btrfs_is_free_space_inode(inode)) {
+ if (noflush || btrfs_is_free_space_inode(inode)) {
flush = BTRFS_RESERVE_NO_FLUSH;
} else {
if (current->journal_info)
flush = BTRFS_RESERVE_FLUSH_LIMIT;
-
- if (btrfs_transaction_in_commit(fs_info))
- schedule_timeout(1);
}
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
+ disk_num_bytes = ALIGN(disk_num_bytes, fs_info->sectorsize);
/*
* We always want to do it this way, every other way is wrong and ends
@@ -326,12 +352,14 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
* everything out and try again, which is bad. This way we just
* over-reserve slightly, and clean up the mess when we are done.
*/
- calc_inode_reservations(fs_info, num_bytes, &meta_reserve,
- &qgroup_reserve);
- ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true);
+ calc_inode_reservations(inode, num_bytes, disk_num_bytes,
+ &meta_reserve, &qgroup_reserve);
+ ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true,
+ noflush);
if (ret)
return ret;
- ret = btrfs_reserve_metadata_bytes(root, block_rsv, meta_reserve, flush);
+ ret = btrfs_reserve_metadata_bytes(block_rsv->space_info, meta_reserve,
+ flush);
if (ret) {
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
return ret;
@@ -343,10 +371,11 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
* racing with an ordered completion or some such that would think it
* needs to free the reservation we just made.
*/
+ nr_extents = count_max_extents(fs_info, num_bytes);
spin_lock(&inode->lock);
- nr_extents = count_max_extents(num_bytes);
btrfs_mod_outstanding_extents(inode, nr_extents);
- inode->csum_bytes += num_bytes;
+ if (!(inode->flags & BTRFS_INODE_NODATASUM))
+ inode->csum_bytes += disk_num_bytes;
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
@@ -362,12 +391,12 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
return 0;
}
-/**
- * Release a metadata reservation for an inode
+/*
+ * Release a metadata reservation for an inode.
*
- * @inode: the inode to release the reservation for.
- * @num_bytes: the number of bytes we are releasing.
- * @qgroup_free: free qgroup reservation or convert it to per-trans reservation
+ * @inode: the inode to release the reservation for.
+ * @num_bytes: the number of bytes we are releasing.
+ * @qgroup_free: free qgroup reservation or convert it to per-trans reservation
*
* This will release the metadata reservation for an inode. This can be called
* once we complete IO for a given set of bytes to release their metadata
@@ -380,7 +409,8 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
spin_lock(&inode->lock);
- inode->csum_bytes -= num_bytes;
+ if (!(inode->flags & BTRFS_INODE_NODATASUM))
+ inode->csum_bytes -= num_bytes;
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
@@ -390,10 +420,11 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
btrfs_inode_rsv_release(inode, qgroup_free);
}
-/**
- * btrfs_delalloc_release_extents - release our outstanding_extents
- * @inode: the inode to balance the reservation for.
- * @num_bytes: the number of bytes we originally reserved with
+/*
+ * Release our outstanding_extents for an inode.
+ *
+ * @inode: the inode to balance the reservation for.
+ * @num_bytes: the number of bytes we originally reserved with
*
* When we reserve space we increase outstanding_extents for the extents we may
* add. Once we've set the range as delalloc or created our ordered extents we
@@ -407,7 +438,7 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
unsigned num_extents;
spin_lock(&inode->lock);
- num_extents = count_max_extents(num_bytes);
+ num_extents = count_max_extents(fs_info, num_bytes);
btrfs_mod_outstanding_extents(inode, -num_extents);
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
@@ -418,46 +449,72 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
btrfs_inode_rsv_release(inode, true);
}
-/**
- * btrfs_delalloc_reserve_space - reserve data and metadata space for
- * delalloc
- * @inode: inode we're writing to
- * @start: start range we are writing to
- * @len: how long the range we are writing to
- * @reserved: mandatory parameter, record actually reserved qgroup ranges of
- * current reservation.
+/* Shrink a previously reserved extent to a new length. */
+void btrfs_delalloc_shrink_extents(struct btrfs_inode *inode, u64 reserved_len, u64 new_len)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ const u32 reserved_num_extents = count_max_extents(fs_info, reserved_len);
+ const u32 new_num_extents = count_max_extents(fs_info, new_len);
+ const int diff_num_extents = new_num_extents - reserved_num_extents;
+
+ ASSERT(new_len <= reserved_len);
+ if (new_num_extents == reserved_num_extents)
+ return;
+
+ spin_lock(&inode->lock);
+ btrfs_mod_outstanding_extents(inode, diff_num_extents);
+ btrfs_calculate_inode_block_rsv_size(fs_info, inode);
+ spin_unlock(&inode->lock);
+
+ if (btrfs_is_testing(fs_info))
+ return;
+
+ btrfs_inode_rsv_release(inode, true);
+}
+
+/*
+ * Reserve data and metadata space for delalloc
+ *
+ * @inode: inode we're writing to
+ * @start: start range we are writing to
+ * @len: how long the range we are writing to
+ * @reserved: mandatory parameter, record actually reserved qgroup ranges of
+ * current reservation.
*
* This will do the following things
*
- * - reserve space in data space info for num bytes
- * and reserve precious corresponding qgroup space
+ * - reserve space in data space info for num bytes and reserve precious
+ * corresponding qgroup space
* (Done in check_data_free_space)
*
* - reserve space for metadata space, based on the number of outstanding
- * extents and how much csums will be needed
- * also reserve metadata space in a per root over-reserve method.
+ * extents and how much csums will be needed also reserve metadata space in a
+ * per root over-reserve method.
* - add to the inodes->delalloc_bytes
* - add it to the fs_info's delalloc inodes list.
* (Above 3 all done in delalloc_reserve_metadata)
*
* Return 0 for success
- * Return <0 for error(-ENOSPC or -EQUOT)
+ * Return <0 for error(-ENOSPC or -EDQUOT)
*/
int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
struct extent_changeset **reserved, u64 start, u64 len)
{
int ret;
- ret = btrfs_check_data_free_space(inode, reserved, start, len);
+ ret = btrfs_check_data_free_space(inode, reserved, start, len, false);
if (ret < 0)
return ret;
- ret = btrfs_delalloc_reserve_metadata(inode, len);
- if (ret < 0)
+ ret = btrfs_delalloc_reserve_metadata(inode, len, len, false);
+ if (ret < 0) {
btrfs_free_reserved_data_space(inode, *reserved, start, len);
+ extent_changeset_free(*reserved);
+ *reserved = NULL;
+ }
return ret;
}
-/**
+/*
* Release data and metadata space for delalloc
*
* @inode: inode we're releasing space for
@@ -466,10 +523,10 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
* @len: length of the space already reserved
* @qgroup_free: should qgroup reserved-space also be freed
*
- * This function will release the metadata space that was not used and will
- * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
- * list if there are no delalloc bytes left.
- * Also it will handle the qgroup reserved space.
+ * Release the metadata space that was not used and will decrement
+ * ->delalloc_bytes and remove it from the fs_info->delalloc_inodes list if
+ * there are no delalloc bytes left. Also it will handle the qgroup reserved
+ * space.
*/
void btrfs_delalloc_release_space(struct btrfs_inode *inode,
struct extent_changeset *reserved,
diff --git a/fs/btrfs/delalloc-space.h b/fs/btrfs/delalloc-space.h
index 28bf5c3ef430..6119c0d3f883 100644
--- a/fs/btrfs/delalloc-space.h
+++ b/fs/btrfs/delalloc-space.h
@@ -3,21 +3,29 @@
#ifndef BTRFS_DELALLOC_SPACE_H
#define BTRFS_DELALLOC_SPACE_H
+#include <linux/types.h>
+
struct extent_changeset;
+struct btrfs_inode;
+struct btrfs_fs_info;
-int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes);
+int btrfs_alloc_data_chunk_ondemand(const struct btrfs_inode *inode, u64 bytes);
int btrfs_check_data_free_space(struct btrfs_inode *inode,
- struct extent_changeset **reserved, u64 start, u64 len);
+ struct extent_changeset **reserved, u64 start, u64 len,
+ bool noflush);
void btrfs_free_reserved_data_space(struct btrfs_inode *inode,
struct extent_changeset *reserved, u64 start, u64 len);
void btrfs_delalloc_release_space(struct btrfs_inode *inode,
struct extent_changeset *reserved,
u64 start, u64 len, bool qgroup_free);
-void btrfs_free_reserved_data_space_noquota(struct btrfs_fs_info *fs_info,
- u64 len);
+void btrfs_free_reserved_data_space_noquota(struct btrfs_inode *inode, u64 len);
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
bool qgroup_free);
int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
struct extent_changeset **reserved, u64 start, u64 len);
+int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
+ u64 disk_num_bytes, bool noflush);
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
+void btrfs_delalloc_shrink_extents(struct btrfs_inode *inode, u64 reserved_len, u64 new_len);
#endif /* BTRFS_DELALLOC_SPACE_H */
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index e164766dcc38..ce6e9f8812e0 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -6,13 +6,19 @@
#include <linux/slab.h>
#include <linux/iversion.h>
+#include "ctree.h"
+#include "fs.h"
+#include "messages.h"
#include "misc.h"
#include "delayed-inode.h"
#include "disk-io.h"
#include "transaction.h"
-#include "ctree.h"
#include "qgroup.h"
#include "locking.h"
+#include "inode-item.h"
+#include "space-info.h"
+#include "accessors.h"
+#include "file-item.h"
#define BTRFS_DELAYED_WRITEBACK 512
#define BTRFS_DELAYED_BACKGROUND 128
@@ -22,11 +28,7 @@ static struct kmem_cache *delayed_node_cache;
int __init btrfs_delayed_inode_init(void)
{
- delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
- sizeof(struct btrfs_delayed_node),
- 0,
- SLAB_MEM_SPREAD,
- NULL);
+ delayed_node_cache = KMEM_CACHE(btrfs_delayed_node, 0);
if (!delayed_node_cache)
return -ENOMEM;
return 0;
@@ -37,6 +39,17 @@ void __cold btrfs_delayed_inode_exit(void)
kmem_cache_destroy(delayed_node_cache);
}
+void btrfs_init_delayed_root(struct btrfs_delayed_root *delayed_root)
+{
+ atomic_set(&delayed_root->items, 0);
+ atomic_set(&delayed_root->items_seq, 0);
+ delayed_root->nodes = 0;
+ spin_lock_init(&delayed_root->lock);
+ init_waitqueue_head(&delayed_root->wait);
+ INIT_LIST_HEAD(&delayed_root->node_list);
+ INIT_LIST_HEAD(&delayed_root->prepare_list);
+}
+
static inline void btrfs_init_delayed_node(
struct btrfs_delayed_node *delayed_node,
struct btrfs_root *root, u64 inode_id)
@@ -44,6 +57,7 @@ static inline void btrfs_init_delayed_node(
delayed_node->root = root;
delayed_node->inode_id = inode_id;
refcount_set(&delayed_node->refs, 0);
+ btrfs_delayed_node_ref_tracker_dir_init(delayed_node);
delayed_node->ins_root = RB_ROOT_CACHED;
delayed_node->del_root = RB_ROOT_CACHED;
mutex_init(&delayed_node->mutex);
@@ -51,20 +65,9 @@ static inline void btrfs_init_delayed_node(
INIT_LIST_HEAD(&delayed_node->p_list);
}
-static inline int btrfs_is_continuous_delayed_item(
- struct btrfs_delayed_item *item1,
- struct btrfs_delayed_item *item2)
-{
- if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
- item1->key.objectid == item2->key.objectid &&
- item1->key.type == item2->key.type &&
- item1->key.offset + 1 == item2->key.offset)
- return 1;
- return 0;
-}
-
static struct btrfs_delayed_node *btrfs_get_delayed_node(
- struct btrfs_inode *btrfs_inode)
+ struct btrfs_inode *btrfs_inode,
+ struct btrfs_ref_tracker *tracker)
{
struct btrfs_root *root = btrfs_inode->root;
u64 ino = btrfs_ino(btrfs_inode);
@@ -73,25 +76,27 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
node = READ_ONCE(btrfs_inode->delayed_node);
if (node) {
refcount_inc(&node->refs);
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_NOFS);
return node;
}
- spin_lock(&root->inode_lock);
- node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
+ xa_lock(&root->delayed_nodes);
+ node = xa_load(&root->delayed_nodes, ino);
if (node) {
if (btrfs_inode->delayed_node) {
refcount_inc(&node->refs); /* can be accessed */
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
BUG_ON(btrfs_inode->delayed_node != node);
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->delayed_nodes);
return node;
}
/*
* It's possible that we're racing into the middle of removing
- * this node from the radix tree. In this case, the refcount
+ * this node from the xarray. In this case, the refcount
* was zero and it should never go back to one. Just return
- * NULL like it was never in the radix at all; our release
+ * NULL like it was never in the xarray at all; our release
* function is in the process of removing it.
*
* Some implementations of refcount_inc refuse to bump the
@@ -99,36 +104,46 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
* here, refcount_inc() may decide to just WARN_ONCE() instead
* of actually bumping the refcount.
*
- * If this node is properly in the radix, we want to bump the
+ * If this node is properly in the xarray, we want to bump the
* refcount twice, once for the inode and once for this get
* operation.
*/
if (refcount_inc_not_zero(&node->refs)) {
refcount_inc(&node->refs);
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
+ btrfs_delayed_node_ref_tracker_alloc(node, &node->inode_cache_tracker,
+ GFP_ATOMIC);
btrfs_inode->delayed_node = node;
} else {
node = NULL;
}
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->delayed_nodes);
return node;
}
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->delayed_nodes);
return NULL;
}
-/* Will return either the node or PTR_ERR(-ENOMEM) */
+/*
+ * Look up an existing delayed node associated with @btrfs_inode or create a new
+ * one and insert it to the delayed nodes of the root.
+ *
+ * Return the delayed node, or error pointer on failure.
+ */
static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
- struct btrfs_inode *btrfs_inode)
+ struct btrfs_inode *btrfs_inode,
+ struct btrfs_ref_tracker *tracker)
{
struct btrfs_delayed_node *node;
struct btrfs_root *root = btrfs_inode->root;
u64 ino = btrfs_ino(btrfs_inode);
int ret;
+ void *ptr;
again:
- node = btrfs_get_delayed_node(btrfs_inode);
+ node = btrfs_get_delayed_node(btrfs_inode, tracker);
if (node)
return node;
@@ -137,26 +152,35 @@ again:
return ERR_PTR(-ENOMEM);
btrfs_init_delayed_node(node, root, ino);
- /* cached in the btrfs inode and can be accessed */
- refcount_set(&node->refs, 2);
-
- ret = radix_tree_preload(GFP_NOFS);
- if (ret) {
+ /* Allocate and reserve the slot, from now it can return a NULL from xa_load(). */
+ ret = xa_reserve(&root->delayed_nodes, ino, GFP_NOFS);
+ if (ret == -ENOMEM) {
+ btrfs_delayed_node_ref_tracker_dir_exit(node);
kmem_cache_free(delayed_node_cache, node);
- return ERR_PTR(ret);
+ return ERR_PTR(-ENOMEM);
}
-
- spin_lock(&root->inode_lock);
- ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
- if (ret == -EEXIST) {
- spin_unlock(&root->inode_lock);
+ xa_lock(&root->delayed_nodes);
+ ptr = xa_load(&root->delayed_nodes, ino);
+ if (ptr) {
+ /* Somebody inserted it, go back and read it. */
+ xa_unlock(&root->delayed_nodes);
+ btrfs_delayed_node_ref_tracker_dir_exit(node);
kmem_cache_free(delayed_node_cache, node);
- radix_tree_preload_end();
+ node = NULL;
goto again;
}
+ ptr = __xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
+ ASSERT(xa_err(ptr) != -EINVAL);
+ ASSERT(xa_err(ptr) != -ENOMEM);
+ ASSERT(ptr == NULL);
+
+ /* Cached in the inode and can be accessed. */
+ refcount_set(&node->refs, 2);
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
+ btrfs_delayed_node_ref_tracker_alloc(node, &node->inode_cache_tracker, GFP_ATOMIC);
+
btrfs_inode->delayed_node = node;
- spin_unlock(&root->inode_lock);
- radix_tree_preload_end();
+ xa_unlock(&root->delayed_nodes);
return node;
}
@@ -180,6 +204,8 @@ static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
list_add_tail(&node->n_list, &root->node_list);
list_add_tail(&node->p_list, &root->prepare_list);
refcount_inc(&node->refs); /* inserted into list */
+ btrfs_delayed_node_ref_tracker_alloc(node, &node->node_list_tracker,
+ GFP_ATOMIC);
root->nodes++;
set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
}
@@ -193,6 +219,7 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
spin_lock(&root->lock);
if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
root->nodes--;
+ btrfs_delayed_node_ref_tracker_free(node, &node->node_list_tracker);
refcount_dec(&node->refs); /* not in the list */
list_del_init(&node->n_list);
if (!list_empty(&node->p_list))
@@ -203,26 +230,26 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
}
static struct btrfs_delayed_node *btrfs_first_delayed_node(
- struct btrfs_delayed_root *delayed_root)
+ struct btrfs_delayed_root *delayed_root,
+ struct btrfs_ref_tracker *tracker)
{
- struct list_head *p;
- struct btrfs_delayed_node *node = NULL;
+ struct btrfs_delayed_node *node;
spin_lock(&delayed_root->lock);
- if (list_empty(&delayed_root->node_list))
- goto out;
-
- p = delayed_root->node_list.next;
- node = list_entry(p, struct btrfs_delayed_node, n_list);
- refcount_inc(&node->refs);
-out:
+ node = list_first_entry_or_null(&delayed_root->node_list,
+ struct btrfs_delayed_node, n_list);
+ if (node) {
+ refcount_inc(&node->refs);
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
+ }
spin_unlock(&delayed_root->lock);
return node;
}
static struct btrfs_delayed_node *btrfs_next_delayed_node(
- struct btrfs_delayed_node *node)
+ struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker)
{
struct btrfs_delayed_root *delayed_root;
struct list_head *p;
@@ -242,6 +269,7 @@ static struct btrfs_delayed_node *btrfs_next_delayed_node(
next = list_entry(p, struct btrfs_delayed_node, n_list);
refcount_inc(&next->refs);
+ btrfs_delayed_node_ref_tracker_alloc(next, tracker, GFP_ATOMIC);
out:
spin_unlock(&delayed_root->lock);
@@ -250,7 +278,7 @@ out:
static void __btrfs_release_delayed_node(
struct btrfs_delayed_node *delayed_node,
- int mod)
+ int mod, struct btrfs_ref_tracker *tracker)
{
struct btrfs_delayed_root *delayed_root;
@@ -266,201 +294,139 @@ static void __btrfs_release_delayed_node(
btrfs_dequeue_delayed_node(delayed_root, delayed_node);
mutex_unlock(&delayed_node->mutex);
+ btrfs_delayed_node_ref_tracker_free(delayed_node, tracker);
if (refcount_dec_and_test(&delayed_node->refs)) {
struct btrfs_root *root = delayed_node->root;
- spin_lock(&root->inode_lock);
+ xa_erase(&root->delayed_nodes, delayed_node->inode_id);
/*
* Once our refcount goes to zero, nobody is allowed to bump it
* back up. We can delete it now.
*/
ASSERT(refcount_read(&delayed_node->refs) == 0);
- radix_tree_delete(&root->delayed_nodes_tree,
- delayed_node->inode_id);
- spin_unlock(&root->inode_lock);
+ btrfs_delayed_node_ref_tracker_dir_exit(delayed_node);
kmem_cache_free(delayed_node_cache, delayed_node);
}
}
-static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
+static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker)
{
- __btrfs_release_delayed_node(node, 0);
+ __btrfs_release_delayed_node(node, 0, tracker);
}
static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
- struct btrfs_delayed_root *delayed_root)
+ struct btrfs_delayed_root *delayed_root,
+ struct btrfs_ref_tracker *tracker)
{
- struct list_head *p;
- struct btrfs_delayed_node *node = NULL;
+ struct btrfs_delayed_node *node;
spin_lock(&delayed_root->lock);
- if (list_empty(&delayed_root->prepare_list))
- goto out;
-
- p = delayed_root->prepare_list.next;
- list_del_init(p);
- node = list_entry(p, struct btrfs_delayed_node, p_list);
- refcount_inc(&node->refs);
-out:
+ node = list_first_entry_or_null(&delayed_root->prepare_list,
+ struct btrfs_delayed_node, p_list);
+ if (node) {
+ list_del_init(&node->p_list);
+ refcount_inc(&node->refs);
+ btrfs_delayed_node_ref_tracker_alloc(node, tracker, GFP_ATOMIC);
+ }
spin_unlock(&delayed_root->lock);
return node;
}
static inline void btrfs_release_prepared_delayed_node(
- struct btrfs_delayed_node *node)
+ struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker)
{
- __btrfs_release_delayed_node(node, 1);
+ __btrfs_release_delayed_node(node, 1, tracker);
}
-static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
+static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
+ struct btrfs_delayed_node *node,
+ enum btrfs_delayed_item_type type)
{
struct btrfs_delayed_item *item;
- item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
+
+ item = kmalloc(struct_size(item, data, data_len), GFP_NOFS);
if (item) {
item->data_len = data_len;
- item->ins_or_del = 0;
+ item->type = type;
item->bytes_reserved = 0;
- item->delayed_node = NULL;
+ item->delayed_node = node;
+ RB_CLEAR_NODE(&item->rb_node);
+ INIT_LIST_HEAD(&item->log_list);
+ item->logged = false;
refcount_set(&item->refs, 1);
}
return item;
}
+static int delayed_item_index_cmp(const void *key, const struct rb_node *node)
+{
+ const u64 *index = key;
+ const struct btrfs_delayed_item *delayed_item = rb_entry(node,
+ struct btrfs_delayed_item, rb_node);
+
+ if (delayed_item->index < *index)
+ return 1;
+ else if (delayed_item->index > *index)
+ return -1;
+
+ return 0;
+}
+
/*
- * __btrfs_lookup_delayed_item - look up the delayed item by key
+ * Look up the delayed item by key.
+ *
* @delayed_node: pointer to the delayed node
- * @key: the key to look up
- * @prev: used to store the prev item if the right item isn't found
- * @next: used to store the next item if the right item isn't found
+ * @index: the dir index value to lookup (offset of a dir index key)
*
* Note: if we don't find the right item, we will return the prev item and
* the next item.
*/
static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
struct rb_root *root,
- struct btrfs_key *key,
- struct btrfs_delayed_item **prev,
- struct btrfs_delayed_item **next)
+ u64 index)
{
- struct rb_node *node, *prev_node = NULL;
- struct btrfs_delayed_item *delayed_item = NULL;
- int ret = 0;
-
- node = root->rb_node;
+ struct rb_node *node;
- while (node) {
- delayed_item = rb_entry(node, struct btrfs_delayed_item,
- rb_node);
- prev_node = node;
- ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
- if (ret < 0)
- node = node->rb_right;
- else if (ret > 0)
- node = node->rb_left;
- else
- return delayed_item;
- }
-
- if (prev) {
- if (!prev_node)
- *prev = NULL;
- else if (ret < 0)
- *prev = delayed_item;
- else if ((node = rb_prev(prev_node)) != NULL) {
- *prev = rb_entry(node, struct btrfs_delayed_item,
- rb_node);
- } else
- *prev = NULL;
- }
-
- if (next) {
- if (!prev_node)
- *next = NULL;
- else if (ret > 0)
- *next = delayed_item;
- else if ((node = rb_next(prev_node)) != NULL) {
- *next = rb_entry(node, struct btrfs_delayed_item,
- rb_node);
- } else
- *next = NULL;
- }
- return NULL;
+ node = rb_find(&index, root, delayed_item_index_cmp);
+ return rb_entry_safe(node, struct btrfs_delayed_item, rb_node);
}
-static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
- struct btrfs_delayed_node *delayed_node,
- struct btrfs_key *key)
+static int btrfs_delayed_item_cmp(const struct rb_node *new,
+ const struct rb_node *exist)
{
- return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
- NULL, NULL);
+ const struct btrfs_delayed_item *new_item =
+ rb_entry(new, struct btrfs_delayed_item, rb_node);
+
+ return delayed_item_index_cmp(&new_item->index, exist);
}
static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
- struct btrfs_delayed_item *ins,
- int action)
+ struct btrfs_delayed_item *ins)
{
- struct rb_node **p, *node;
- struct rb_node *parent_node = NULL;
struct rb_root_cached *root;
- struct btrfs_delayed_item *item;
- int cmp;
- bool leftmost = true;
+ struct rb_node *exist;
- if (action == BTRFS_DELAYED_INSERTION_ITEM)
+ if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
root = &delayed_node->ins_root;
- else if (action == BTRFS_DELAYED_DELETION_ITEM)
- root = &delayed_node->del_root;
else
- BUG();
- p = &root->rb_root.rb_node;
- node = &ins->rb_node;
-
- while (*p) {
- parent_node = *p;
- item = rb_entry(parent_node, struct btrfs_delayed_item,
- rb_node);
-
- cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
- if (cmp < 0) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else if (cmp > 0) {
- p = &(*p)->rb_left;
- } else {
- return -EEXIST;
- }
- }
+ root = &delayed_node->del_root;
- rb_link_node(node, parent_node, p);
- rb_insert_color_cached(node, root, leftmost);
- ins->delayed_node = delayed_node;
- ins->ins_or_del = action;
+ exist = rb_find_add_cached(&ins->rb_node, root, btrfs_delayed_item_cmp);
+ if (exist)
+ return -EEXIST;
- if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
- action == BTRFS_DELAYED_INSERTION_ITEM &&
- ins->key.offset >= delayed_node->index_cnt)
- delayed_node->index_cnt = ins->key.offset + 1;
+ if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
+ ins->index >= delayed_node->index_cnt)
+ delayed_node->index_cnt = ins->index + 1;
delayed_node->count++;
atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
return 0;
}
-static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
- struct btrfs_delayed_item *item)
-{
- return __btrfs_add_delayed_item(node, item,
- BTRFS_DELAYED_INSERTION_ITEM);
-}
-
-static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
- struct btrfs_delayed_item *item)
-{
- return __btrfs_add_delayed_item(node, item,
- BTRFS_DELAYED_DELETION_ITEM);
-}
-
static void finish_one_item(struct btrfs_delayed_root *delayed_root)
{
int seq = atomic_inc_return(&delayed_root->items_seq);
@@ -473,25 +439,27 @@ static void finish_one_item(struct btrfs_delayed_root *delayed_root)
static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
{
+ struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
struct rb_root_cached *root;
struct btrfs_delayed_root *delayed_root;
- /* Not associated with any delayed_node */
- if (!delayed_item->delayed_node)
+ /* Not inserted, ignore it. */
+ if (RB_EMPTY_NODE(&delayed_item->rb_node))
return;
- delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
- BUG_ON(!delayed_root);
- BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
- delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
+ /* If it's in a rbtree, then we need to have delayed node locked. */
+ lockdep_assert_held(&delayed_node->mutex);
- if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
- root = &delayed_item->delayed_node->ins_root;
+ delayed_root = delayed_node->root->fs_info->delayed_root;
+
+ if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
+ root = &delayed_node->ins_root;
else
- root = &delayed_item->delayed_node->del_root;
+ root = &delayed_node->del_root;
rb_erase_cached(&delayed_item->rb_node, root);
- delayed_item->delayed_node->count--;
+ RB_CLEAR_NODE(&delayed_item->rb_node);
+ delayed_node->count--;
finish_one_item(delayed_root);
}
@@ -508,49 +476,33 @@ static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
struct btrfs_delayed_node *delayed_node)
{
- struct rb_node *p;
- struct btrfs_delayed_item *item = NULL;
-
- p = rb_first_cached(&delayed_node->ins_root);
- if (p)
- item = rb_entry(p, struct btrfs_delayed_item, rb_node);
+ struct rb_node *p = rb_first_cached(&delayed_node->ins_root);
- return item;
+ return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
}
static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
struct btrfs_delayed_node *delayed_node)
{
- struct rb_node *p;
- struct btrfs_delayed_item *item = NULL;
+ struct rb_node *p = rb_first_cached(&delayed_node->del_root);
- p = rb_first_cached(&delayed_node->del_root);
- if (p)
- item = rb_entry(p, struct btrfs_delayed_item, rb_node);
-
- return item;
+ return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
}
static struct btrfs_delayed_item *__btrfs_next_delayed_item(
struct btrfs_delayed_item *item)
{
- struct rb_node *p;
- struct btrfs_delayed_item *next = NULL;
-
- p = rb_next(&item->rb_node);
- if (p)
- next = rb_entry(p, struct btrfs_delayed_item, rb_node);
+ struct rb_node *p = rb_next(&item->rb_node);
- return next;
+ return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
}
static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct btrfs_delayed_item *item)
{
struct btrfs_block_rsv *src_rsv;
struct btrfs_block_rsv *dst_rsv;
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
u64 num_bytes;
int ret;
@@ -570,9 +522,15 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
if (!ret) {
trace_btrfs_space_reservation(fs_info, "delayed_item",
- item->key.objectid,
+ item->delayed_node->inode_id,
num_bytes, 1);
- item->bytes_reserved = num_bytes;
+ /*
+ * For insertions we track reserved metadata space by accounting
+ * for the number of leaves that will be used, based on the delayed
+ * node's curr_index_batch_size and index_item_leaves fields.
+ */
+ if (item->type == BTRFS_DELAYED_DELETION_ITEM)
+ item->bytes_reserved = num_bytes;
}
return ret;
@@ -593,11 +551,26 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
* to release/reserve qgroup space.
*/
trace_btrfs_space_reservation(fs_info, "delayed_item",
- item->key.objectid, item->bytes_reserved,
- 0);
+ item->delayed_node->inode_id,
+ item->bytes_reserved, 0);
btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
}
+static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
+ unsigned int num_leaves)
+{
+ struct btrfs_fs_info *fs_info = node->root->fs_info;
+ const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
+
+ /* There are no space reservations during log replay, bail out. */
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
+ return;
+
+ trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
+ bytes, 0);
+ btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
+}
+
static int btrfs_delayed_inode_reserve_metadata(
struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -629,7 +602,7 @@ static int btrfs_delayed_inode_reserve_metadata(
BTRFS_QGROUP_RSV_META_PREALLOC, true);
if (ret < 0)
return ret;
- ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
+ ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
BTRFS_RESERVE_NO_FLUSH);
/* NO_FLUSH could only fail with -ENOSPC */
ASSERT(ret == 0 || ret == -ENOSPC);
@@ -671,36 +644,78 @@ static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
}
/*
- * Insert a single delayed item or a batch of delayed items that have consecutive
- * keys if they exist.
+ * Insert a single delayed item or a batch of delayed items, as many as possible
+ * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
+ * in the rbtree, and if there's a gap between two consecutive dir index items,
+ * then it means at some point we had delayed dir indexes to add but they got
+ * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
+ * into the subvolume tree. Dir index keys also have their offsets coming from a
+ * monotonically increasing counter, so we can't get new keys with an offset that
+ * fits within a gap between delayed dir index items.
*/
static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_delayed_item *first_item)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_delayed_node *node = first_item->delayed_node;
LIST_HEAD(item_list);
struct btrfs_delayed_item *curr;
struct btrfs_delayed_item *next;
- const int max_size = BTRFS_LEAF_DATA_SIZE(root->fs_info);
+ const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
struct btrfs_item_batch batch;
+ struct btrfs_key first_key;
+ const u32 first_data_size = first_item->data_len;
int total_size;
- char *ins_data = NULL;
+ char AUTO_KFREE(ins_data);
int ret;
+ bool continuous_keys_only = false;
+
+ lockdep_assert_held(&node->mutex);
+
+ /*
+ * During normal operation the delayed index offset is continuously
+ * increasing, so we can batch insert all items as there will not be any
+ * overlapping keys in the tree.
+ *
+ * The exception to this is log replay, where we may have interleaved
+ * offsets in the tree, so our batch needs to be continuous keys only in
+ * order to ensure we do not end up with out of order items in our leaf.
+ */
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
+ continuous_keys_only = true;
+
+ /*
+ * For delayed items to insert, we track reserved metadata bytes based
+ * on the number of leaves that we will use.
+ * See btrfs_insert_delayed_dir_index() and
+ * btrfs_delayed_item_reserve_metadata()).
+ */
+ ASSERT(first_item->bytes_reserved == 0);
list_add_tail(&first_item->tree_list, &item_list);
- batch.total_data_size = first_item->data_len;
+ batch.total_data_size = first_data_size;
batch.nr = 1;
- total_size = first_item->data_len + sizeof(struct btrfs_item);
+ total_size = first_data_size + sizeof(struct btrfs_item);
curr = first_item;
while (true) {
int next_size;
next = __btrfs_next_delayed_item(curr);
- if (!next || !btrfs_is_continuous_delayed_item(curr, next))
+ if (!next)
break;
+ /*
+ * We cannot allow gaps in the key space if we're doing log
+ * replay.
+ */
+ if (continuous_keys_only && (next->index != curr->index + 1))
+ break;
+
+ ASSERT(next->bytes_reserved == 0);
+
next_size = next->data_len + sizeof(struct btrfs_item);
if (total_size + next_size > max_size)
break;
@@ -713,25 +728,28 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
}
if (batch.nr == 1) {
- batch.keys = &first_item->key;
- batch.data_sizes = &first_item->data_len;
+ first_key.objectid = node->inode_id;
+ first_key.type = BTRFS_DIR_INDEX_KEY;
+ first_key.offset = first_item->index;
+ batch.keys = &first_key;
+ batch.data_sizes = &first_data_size;
} else {
struct btrfs_key *ins_keys;
u32 *ins_sizes;
int i = 0;
- ins_data = kmalloc(batch.nr * sizeof(u32) +
- batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
- if (!ins_data) {
- ret = -ENOMEM;
- goto out;
- }
+ ins_data = kmalloc_array(batch.nr,
+ sizeof(u32) + sizeof(struct btrfs_key), GFP_NOFS);
+ if (!ins_data)
+ return -ENOMEM;
ins_sizes = (u32 *)ins_data;
ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
batch.keys = ins_keys;
batch.data_sizes = ins_sizes;
list_for_each_entry(curr, &item_list, tree_list) {
- ins_keys[i] = curr->key;
+ ins_keys[i].objectid = node->inode_id;
+ ins_keys[i].type = BTRFS_DIR_INDEX_KEY;
+ ins_keys[i].offset = curr->index;
ins_sizes[i] = curr->data_len;
i++;
}
@@ -739,7 +757,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_items(trans, root, path, &batch);
if (ret)
- goto out;
+ return ret;
list_for_each_entry(curr, &item_list, tree_list) {
char *data_ptr;
@@ -757,14 +775,45 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
*/
btrfs_release_path(path);
+ ASSERT(node->index_item_leaves > 0);
+
+ /*
+ * For normal operations we will batch an entire leaf's worth of delayed
+ * items, so if there are more items to process we can decrement
+ * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
+ *
+ * However for log replay we may not have inserted an entire leaf's
+ * worth of items, we may have not had continuous items, so decrementing
+ * here would mess up the index_item_leaves accounting. For this case
+ * only clean up the accounting when there are no items left.
+ */
+ if (next && !continuous_keys_only) {
+ /*
+ * We inserted one batch of items into a leaf a there are more
+ * items to flush in a future batch, now release one unit of
+ * metadata space from the delayed block reserve, corresponding
+ * the leaf we just flushed to.
+ */
+ btrfs_delayed_item_release_leaves(node, 1);
+ node->index_item_leaves--;
+ } else if (!next) {
+ /*
+ * There are no more items to insert. We can have a number of
+ * reserved leaves > 1 here - this happens when many dir index
+ * items are added and then removed before they are flushed (file
+ * names with a very short life, never span a transaction). So
+ * release all remaining leaves.
+ */
+ btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
+ node->index_item_leaves = 0;
+ }
+
list_for_each_entry_safe(curr, next, &item_list, tree_list) {
list_del(&curr->tree_list);
- btrfs_delayed_item_release_metadata(root, curr);
btrfs_release_delayed_item(curr);
}
-out:
- kfree(ins_data);
- return ret;
+
+ return 0;
}
static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
@@ -795,62 +844,77 @@ static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_delayed_item *item)
{
+ const u64 ino = item->delayed_node->inode_id;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr, *next;
- struct extent_buffer *leaf;
- struct btrfs_key key;
- struct list_head head;
- int nitems, i, last_item;
- int ret = 0;
+ struct extent_buffer *leaf = path->nodes[0];
+ LIST_HEAD(batch_list);
+ int nitems, slot, last_slot;
+ int ret;
+ u64 total_reserved_size = item->bytes_reserved;
- BUG_ON(!path->nodes[0]);
+ ASSERT(leaf != NULL);
- leaf = path->nodes[0];
+ slot = path->slots[0];
+ last_slot = btrfs_header_nritems(leaf) - 1;
+ /*
+ * Our caller always gives us a path pointing to an existing item, so
+ * this can not happen.
+ */
+ ASSERT(slot <= last_slot);
+ if (WARN_ON(slot > last_slot))
+ return -ENOENT;
- i = path->slots[0];
- last_item = btrfs_header_nritems(leaf) - 1;
- if (i > last_item)
- return -ENOENT; /* FIXME: Is errno suitable? */
+ nitems = 1;
+ curr = item;
+ list_add_tail(&curr->tree_list, &batch_list);
- next = item;
- INIT_LIST_HEAD(&head);
- btrfs_item_key_to_cpu(leaf, &key, i);
- nitems = 0;
/*
- * count the number of the dir index items that we can delete in batch
+ * Keep checking if the next delayed item matches the next item in the
+ * leaf - if so, we can add it to the batch of items to delete from the
+ * leaf.
*/
- while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
- list_add_tail(&next->tree_list, &head);
- nitems++;
+ while (slot < last_slot) {
+ struct btrfs_key key;
- curr = next;
next = __btrfs_next_delayed_item(curr);
if (!next)
break;
- if (!btrfs_is_continuous_delayed_item(curr, next))
- break;
-
- i++;
- if (i > last_item)
+ slot++;
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (key.objectid != ino ||
+ key.type != BTRFS_DIR_INDEX_KEY ||
+ key.offset != next->index)
break;
- btrfs_item_key_to_cpu(leaf, &key, i);
+ nitems++;
+ curr = next;
+ list_add_tail(&curr->tree_list, &batch_list);
+ total_reserved_size += curr->bytes_reserved;
}
- if (!nitems)
- return 0;
-
ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
if (ret)
- goto out;
+ return ret;
+
+ /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
+ if (total_reserved_size > 0) {
+ /*
+ * Check btrfs_delayed_item_reserve_metadata() to see why we
+ * don't need to release/reserve qgroup space.
+ */
+ trace_btrfs_space_reservation(fs_info, "delayed_item", ino,
+ total_reserved_size, 0);
+ btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
+ total_reserved_size, NULL);
+ }
- list_for_each_entry_safe(curr, next, &head, tree_list) {
- btrfs_delayed_item_release_metadata(root, curr);
+ list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
list_del(&curr->tree_list);
btrfs_release_delayed_item(curr);
}
-out:
- return ret;
+ return 0;
}
static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
@@ -858,43 +922,57 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_delayed_node *node)
{
- struct btrfs_delayed_item *curr, *prev;
+ struct btrfs_key key;
int ret = 0;
-do_again:
- mutex_lock(&node->mutex);
- curr = __btrfs_first_delayed_deletion_item(node);
- if (!curr)
- goto delete_fail;
+ key.objectid = node->inode_id;
+ key.type = BTRFS_DIR_INDEX_KEY;
+
+ while (ret == 0) {
+ struct btrfs_delayed_item *item;
+
+ mutex_lock(&node->mutex);
+ item = __btrfs_first_delayed_deletion_item(node);
+ if (!item) {
+ mutex_unlock(&node->mutex);
+ break;
+ }
+
+ key.offset = item->index;
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret > 0) {
+ /*
+ * There's no matching item in the leaf. This means we
+ * have already deleted this item in a past run of the
+ * delayed items. We ignore errors when running delayed
+ * items from an async context, through a work queue job
+ * running btrfs_async_run_delayed_root(), and don't
+ * release delayed items that failed to complete. This
+ * is because we will retry later, and at transaction
+ * commit time we always run delayed items and will
+ * then deal with errors if they fail to run again.
+ *
+ * So just release delayed items for which we can't find
+ * an item in the tree, and move to the next item.
+ */
+ btrfs_release_path(path);
+ btrfs_release_delayed_item(item);
+ ret = 0;
+ } else if (ret == 0) {
+ ret = btrfs_batch_delete_items(trans, root, path, item);
+ btrfs_release_path(path);
+ }
- ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
- if (ret < 0)
- goto delete_fail;
- else if (ret > 0) {
/*
- * can't find the item which the node points to, so this node
- * is invalid, just drop it.
+ * We unlock and relock on each iteration, this is to prevent
+ * blocking other tasks for too long while we are being run from
+ * the async context (work queue job). Those tasks are typically
+ * running system calls like creat/mkdir/rename/unlink/etc which
+ * need to add delayed items to this delayed node.
*/
- prev = curr;
- curr = __btrfs_next_delayed_item(prev);
- btrfs_release_delayed_item(prev);
- ret = 0;
- btrfs_release_path(path);
- if (curr) {
- mutex_unlock(&node->mutex);
- goto do_again;
- } else
- goto delete_fail;
+ mutex_unlock(&node->mutex);
}
- btrfs_batch_delete_items(trans, root, path, curr);
- btrfs_release_path(path);
- mutex_unlock(&node->mutex);
- goto do_again;
-
-delete_fail:
- btrfs_release_path(path);
- mutex_unlock(&node->mutex);
return ret;
}
@@ -904,7 +982,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
if (delayed_node &&
test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
- BUG_ON(!delayed_node->root);
+ ASSERT(delayed_node->root);
clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
delayed_node->count--;
@@ -951,27 +1029,55 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
ret = btrfs_lookup_inode(trans, root, path, &key, mod);
if (ret > 0)
ret = -ENOENT;
- if (ret < 0)
+ if (ret < 0) {
+ /*
+ * If we fail to update the delayed inode we need to abort the
+ * transaction, because we could leave the inode with the
+ * improper counts behind.
+ */
+ if (unlikely(ret != -ENOENT))
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
sizeof(struct btrfs_inode_item));
- btrfs_mark_buffer_dirty(leaf);
if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
goto out;
- path->slots[0]++;
- if (path->slots[0] >= btrfs_header_nritems(leaf))
- goto search;
-again:
+ /*
+ * Now we're going to delete the INODE_REF/EXTREF, which should be the
+ * only one ref left. Check if the next item is an INODE_REF/EXTREF.
+ *
+ * But if we're the last item already, release and search for the last
+ * INODE_REF/EXTREF.
+ */
+ if (path->slots[0] + 1 >= btrfs_header_nritems(leaf)) {
+ key.objectid = node->inode_id;
+ key.type = BTRFS_INODE_EXTREF_KEY;
+ key.offset = (u64)-1;
+
+ btrfs_release_path(path);
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ goto err_out;
+ }
+ ASSERT(ret > 0);
+ ASSERT(path->slots[0] > 0);
+ ret = 0;
+ path->slots[0]--;
+ leaf = path->nodes[0];
+ } else {
+ path->slots[0]++;
+ }
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != node->inode_id)
goto out;
-
if (key.type != BTRFS_INODE_REF_KEY &&
key.type != BTRFS_INODE_EXTREF_KEY)
goto out;
@@ -981,39 +1087,16 @@ again:
* so there is only one iref. The case that several irefs are
* in the same item doesn't exist.
*/
- btrfs_del_item(trans, root, path);
+ ret = btrfs_del_item(trans, root, path);
+ if (ret < 0)
+ btrfs_abort_transaction(trans, ret);
out:
btrfs_release_delayed_iref(node);
btrfs_release_path(path);
err_out:
btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
btrfs_release_delayed_inode(node);
-
- /*
- * If we fail to update the delayed inode we need to abort the
- * transaction, because we could leave the inode with the improper
- * counts behind.
- */
- if (ret && ret != -ENOENT)
- btrfs_abort_transaction(trans, ret);
-
return ret;
-
-search:
- btrfs_release_path(path);
-
- key.type = BTRFS_INODE_EXTREF_KEY;
- key.offset = -1;
-
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0)
- goto err_out;
- ASSERT(ret);
-
- ret = 0;
- leaf = path->nodes[0];
- path->slots[0]--;
- goto again;
}
static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
@@ -1049,6 +1132,9 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
if (ret)
return ret;
+ ret = btrfs_record_root_in_trans(trans, node->root);
+ if (ret)
+ return ret;
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
return ret;
}
@@ -1064,6 +1150,7 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
+ struct btrfs_ref_tracker curr_delayed_node_tracker, prev_delayed_node_tracker;
struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv;
int ret = 0;
@@ -1081,25 +1168,39 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
delayed_root = fs_info->delayed_root;
- curr_node = btrfs_first_delayed_node(delayed_root);
+ curr_node = btrfs_first_delayed_node(delayed_root, &curr_delayed_node_tracker);
while (curr_node && (!count || nr--)) {
ret = __btrfs_commit_inode_delayed_items(trans, path,
curr_node);
- if (ret) {
- btrfs_release_delayed_node(curr_node);
- curr_node = NULL;
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
prev_node = curr_node;
- curr_node = btrfs_next_delayed_node(curr_node);
- btrfs_release_delayed_node(prev_node);
+ prev_delayed_node_tracker = curr_delayed_node_tracker;
+ curr_node = btrfs_next_delayed_node(curr_node, &curr_delayed_node_tracker);
+ /*
+ * See the comment below about releasing path before releasing
+ * node. If the commit of delayed items was successful the path
+ * should always be released, but in case of an error, it may
+ * point to locked extent buffers (a leaf at the very least).
+ */
+ ASSERT(path->nodes[0] == NULL);
+ btrfs_release_delayed_node(prev_node, &prev_delayed_node_tracker);
}
- if (curr_node)
- btrfs_release_delayed_node(curr_node);
+ /*
+ * Release the path to avoid a potential deadlock and lockdep splat when
+ * releasing the delayed node, as that requires taking the delayed node's
+ * mutex. If another task starts running delayed items before we take
+ * the mutex, it will first lock the mutex and then it may try to lock
+ * the same btree path (leaf).
+ */
btrfs_free_path(path);
+
+ if (curr_node)
+ btrfs_release_delayed_node(curr_node, &curr_delayed_node_tracker);
trans->block_rsv = block_rsv;
return ret;
@@ -1118,8 +1219,10 @@ int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
- struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
- struct btrfs_path *path;
+ struct btrfs_ref_tracker delayed_node_tracker;
+ struct btrfs_delayed_node *delayed_node =
+ btrfs_get_delayed_node(inode, &delayed_node_tracker);
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_block_rsv *block_rsv;
int ret;
@@ -1129,14 +1232,14 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
mutex_lock(&delayed_node->mutex);
if (!delayed_node->count) {
mutex_unlock(&delayed_node->mutex);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return 0;
}
mutex_unlock(&delayed_node->mutex);
path = btrfs_alloc_path();
if (!path) {
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return -ENOMEM;
}
@@ -1145,8 +1248,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
- btrfs_release_delayed_node(delayed_node);
- btrfs_free_path(path);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
trans->block_rsv = block_rsv;
return ret;
@@ -1156,18 +1258,20 @@ int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_trans_handle *trans;
- struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
+ struct btrfs_ref_tracker delayed_node_tracker;
+ struct btrfs_delayed_node *delayed_node;
struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv;
int ret;
+ delayed_node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
if (!delayed_node)
return 0;
mutex_lock(&delayed_node->mutex);
if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
mutex_unlock(&delayed_node->mutex);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return 0;
}
mutex_unlock(&delayed_node->mutex);
@@ -1201,7 +1305,7 @@ trans_out:
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out:
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return ret;
}
@@ -1215,7 +1319,8 @@ void btrfs_remove_delayed_node(struct btrfs_inode *inode)
return;
inode->delayed_node = NULL;
- btrfs_release_delayed_node(delayed_node);
+
+ btrfs_release_delayed_node(delayed_node, &delayed_node->inode_cache_tracker);
}
struct btrfs_async_delayed_work {
@@ -1231,6 +1336,7 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
struct btrfs_trans_handle *trans;
struct btrfs_path *path;
struct btrfs_delayed_node *delayed_node = NULL;
+ struct btrfs_ref_tracker delayed_node_tracker;
struct btrfs_root *root;
struct btrfs_block_rsv *block_rsv;
int total_done = 0;
@@ -1247,7 +1353,8 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
BTRFS_DELAYED_BACKGROUND / 2)
break;
- delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
+ delayed_node = btrfs_first_prepared_delayed_node(delayed_root,
+ &delayed_node_tracker);
if (!delayed_node)
break;
@@ -1256,7 +1363,8 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_release_path(path);
- btrfs_release_prepared_delayed_node(delayed_node);
+ btrfs_release_prepared_delayed_node(delayed_node,
+ &delayed_node_tracker);
total_done++;
continue;
}
@@ -1271,7 +1379,8 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
btrfs_btree_balance_dirty_nodelay(root->fs_info);
btrfs_release_path(path);
- btrfs_release_prepared_delayed_node(delayed_node);
+ btrfs_release_prepared_delayed_node(delayed_node,
+ &delayed_node_tracker);
total_done++;
} while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
@@ -1294,8 +1403,7 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
return -ENOMEM;
async_work->delayed_root = delayed_root;
- btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
- NULL);
+ btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL);
async_work->nr = nr;
btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
@@ -1304,20 +1412,28 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
{
- WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
+ struct btrfs_ref_tracker delayed_node_tracker;
+ struct btrfs_delayed_node *node;
+
+ node = btrfs_first_delayed_node( fs_info->delayed_root, &delayed_node_tracker);
+ if (WARN_ON(node)) {
+ btrfs_delayed_node_ref_tracker_free(node,
+ &delayed_node_tracker);
+ refcount_dec(&node->refs);
+ }
}
-static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
+static bool could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
{
int val = atomic_read(&delayed_root->items_seq);
if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
- return 1;
+ return true;
if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
- return 1;
+ return true;
- return 0;
+ return false;
}
void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
@@ -1346,143 +1462,231 @@ void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
}
-/* Will return 0 or -ENOMEM */
+static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
+
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
+ return;
+
+ /*
+ * Adding the new dir index item does not require touching another
+ * leaf, so we can release 1 unit of metadata that was previously
+ * reserved when starting the transaction. This applies only to
+ * the case where we had a transaction start and excludes the
+ * transaction join case (when replaying log trees).
+ */
+ trace_btrfs_space_reservation(fs_info, "transaction",
+ trans->transid, bytes, 0);
+ btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
+ ASSERT(trans->bytes_reserved >= bytes);
+ trans->bytes_reserved -= bytes;
+}
+
+/* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
const char *name, int name_len,
struct btrfs_inode *dir,
- struct btrfs_disk_key *disk_key, u8 type,
+ const struct btrfs_disk_key *disk_key, u8 flags,
u64 index)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
struct btrfs_delayed_node *delayed_node;
+ struct btrfs_ref_tracker delayed_node_tracker;
struct btrfs_delayed_item *delayed_item;
struct btrfs_dir_item *dir_item;
+ bool reserve_leaf_space;
+ u32 data_len;
int ret;
- delayed_node = btrfs_get_or_create_delayed_node(dir);
+ delayed_node = btrfs_get_or_create_delayed_node(dir, &delayed_node_tracker);
if (IS_ERR(delayed_node))
return PTR_ERR(delayed_node);
- delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
+ delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len,
+ delayed_node,
+ BTRFS_DELAYED_INSERTION_ITEM);
if (!delayed_item) {
ret = -ENOMEM;
goto release_node;
}
- delayed_item->key.objectid = btrfs_ino(dir);
- delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
- delayed_item->key.offset = index;
+ delayed_item->index = index;
dir_item = (struct btrfs_dir_item *)delayed_item->data;
dir_item->location = *disk_key;
btrfs_set_stack_dir_transid(dir_item, trans->transid);
btrfs_set_stack_dir_data_len(dir_item, 0);
btrfs_set_stack_dir_name_len(dir_item, name_len);
- btrfs_set_stack_dir_type(dir_item, type);
+ btrfs_set_stack_dir_flags(dir_item, flags);
memcpy((char *)(dir_item + 1), name, name_len);
- ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
- /*
- * we have reserved enough space when we start a new transaction,
- * so reserving metadata failure is impossible
- */
- BUG_ON(ret);
+ data_len = delayed_item->data_len + sizeof(struct btrfs_item);
mutex_lock(&delayed_node->mutex);
- ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
+
+ /*
+ * First attempt to insert the delayed item. This is to make the error
+ * handling path simpler in case we fail (-EEXIST). There's no risk of
+ * any other task coming in and running the delayed item before we do
+ * the metadata space reservation below, because we are holding the
+ * delayed node's mutex and that mutex must also be locked before the
+ * node's delayed items can be run.
+ */
+ ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
if (unlikely(ret)) {
btrfs_err(trans->fs_info,
- "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
- name_len, name, delayed_node->root->root_key.objectid,
- delayed_node->inode_id, ret);
- BUG();
+"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
+ name_len, name, index, btrfs_root_id(delayed_node->root),
+ delayed_node->inode_id, dir->index_cnt,
+ delayed_node->index_cnt, ret);
+ btrfs_release_delayed_item(delayed_item);
+ btrfs_release_dir_index_item_space(trans);
+ mutex_unlock(&delayed_node->mutex);
+ goto release_node;
+ }
+
+ if (delayed_node->index_item_leaves == 0 ||
+ delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
+ delayed_node->curr_index_batch_size = data_len;
+ reserve_leaf_space = true;
+ } else {
+ delayed_node->curr_index_batch_size += data_len;
+ reserve_leaf_space = false;
+ }
+
+ if (reserve_leaf_space) {
+ ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item);
+ /*
+ * Space was reserved for a dir index item insertion when we
+ * started the transaction, so getting a failure here should be
+ * impossible.
+ */
+ if (WARN_ON(ret)) {
+ btrfs_release_delayed_item(delayed_item);
+ mutex_unlock(&delayed_node->mutex);
+ goto release_node;
+ }
+
+ delayed_node->index_item_leaves++;
+ } else {
+ btrfs_release_dir_index_item_space(trans);
}
mutex_unlock(&delayed_node->mutex);
release_node:
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return ret;
}
-static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_node *node,
- struct btrfs_key *key)
+static bool btrfs_delete_delayed_insertion_item(struct btrfs_delayed_node *node,
+ u64 index)
{
struct btrfs_delayed_item *item;
mutex_lock(&node->mutex);
- item = __btrfs_lookup_delayed_insertion_item(node, key);
+ item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
if (!item) {
mutex_unlock(&node->mutex);
- return 1;
+ return false;
+ }
+
+ /*
+ * For delayed items to insert, we track reserved metadata bytes based
+ * on the number of leaves that we will use.
+ * See btrfs_insert_delayed_dir_index() and
+ * btrfs_delayed_item_reserve_metadata()).
+ */
+ ASSERT(item->bytes_reserved == 0);
+ ASSERT(node->index_item_leaves > 0);
+
+ /*
+ * If there's only one leaf reserved, we can decrement this item from the
+ * current batch, otherwise we can not because we don't know which leaf
+ * it belongs to. With the current limit on delayed items, we rarely
+ * accumulate enough dir index items to fill more than one leaf (even
+ * when using a leaf size of 4K).
+ */
+ if (node->index_item_leaves == 1) {
+ const u32 data_len = item->data_len + sizeof(struct btrfs_item);
+
+ ASSERT(node->curr_index_batch_size >= data_len);
+ node->curr_index_batch_size -= data_len;
}
- btrfs_delayed_item_release_metadata(node->root, item);
btrfs_release_delayed_item(item);
+
+ /* If we now have no more dir index items, we can release all leaves. */
+ if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
+ btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
+ node->index_item_leaves = 0;
+ }
+
mutex_unlock(&node->mutex);
- return 0;
+ return true;
}
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir, u64 index)
{
struct btrfs_delayed_node *node;
+ struct btrfs_ref_tracker delayed_node_tracker;
struct btrfs_delayed_item *item;
- struct btrfs_key item_key;
int ret;
- node = btrfs_get_or_create_delayed_node(dir);
+ node = btrfs_get_or_create_delayed_node(dir, &delayed_node_tracker);
if (IS_ERR(node))
return PTR_ERR(node);
- item_key.objectid = btrfs_ino(dir);
- item_key.type = BTRFS_DIR_INDEX_KEY;
- item_key.offset = index;
-
- ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
- &item_key);
- if (!ret)
+ if (btrfs_delete_delayed_insertion_item(node, index)) {
+ ret = 0;
goto end;
+ }
- item = btrfs_alloc_delayed_item(0);
+ item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
if (!item) {
ret = -ENOMEM;
goto end;
}
- item->key = item_key;
+ item->index = index;
- ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
+ ret = btrfs_delayed_item_reserve_metadata(trans, item);
/*
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible.
*/
if (ret < 0) {
btrfs_err(trans->fs_info,
-"metadata reservation failed for delayed dir item deltiona, should have been reserved");
+"metadata reservation failed for delayed dir item deletion, index: %llu, root: %llu, inode: %llu, error: %d",
+ index, btrfs_root_id(node->root), node->inode_id, ret);
btrfs_release_delayed_item(item);
goto end;
}
mutex_lock(&node->mutex);
- ret = __btrfs_add_delayed_deletion_item(node, item);
+ ret = __btrfs_add_delayed_item(node, item);
if (unlikely(ret)) {
btrfs_err(trans->fs_info,
- "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
- index, node->root->root_key.objectid,
- node->inode_id, ret);
+"failed to add delayed dir index item, root: %llu, inode: %llu, index: %llu, error: %d",
+ index, btrfs_root_id(node->root), node->inode_id, ret);
btrfs_delayed_item_release_metadata(dir->root, item);
btrfs_release_delayed_item(item);
}
mutex_unlock(&node->mutex);
end:
- btrfs_release_delayed_node(node);
+ btrfs_release_delayed_node(node, &delayed_node_tracker);
return ret;
}
int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
{
- struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
+ struct btrfs_ref_tracker delayed_node_tracker;
+ struct btrfs_delayed_node *delayed_node;
+ delayed_node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
if (!delayed_node)
return -ENOENT;
@@ -1492,23 +1696,25 @@ int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
* is updated now. So we needn't lock the delayed node.
*/
if (!delayed_node->index_cnt) {
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return -EINVAL;
}
inode->index_cnt = delayed_node->index_cnt;
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return 0;
}
-bool btrfs_readdir_get_delayed_items(struct inode *inode,
+bool btrfs_readdir_get_delayed_items(struct btrfs_inode *inode,
+ u64 last_index,
struct list_head *ins_list,
struct list_head *del_list)
{
struct btrfs_delayed_node *delayed_node;
struct btrfs_delayed_item *item;
+ struct btrfs_ref_tracker delayed_node_tracker;
- delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
+ delayed_node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
if (!delayed_node)
return false;
@@ -1521,14 +1727,14 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
mutex_lock(&delayed_node->mutex);
item = __btrfs_first_delayed_insertion_item(delayed_node);
- while (item) {
+ while (item && item->index <= last_index) {
refcount_inc(&item->refs);
list_add_tail(&item->readdir_list, ins_list);
item = __btrfs_next_delayed_item(item);
}
item = __btrfs_first_delayed_deletion_item(delayed_node);
- while (item) {
+ while (item && item->index <= last_index) {
refcount_inc(&item->refs);
list_add_tail(&item->readdir_list, del_list);
item = __btrfs_next_delayed_item(item);
@@ -1543,12 +1749,13 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
* insert/delete delayed items in this period. So we also needn't
* requeue or dequeue this delayed node.
*/
+ btrfs_delayed_node_ref_tracker_free(delayed_node, &delayed_node_tracker);
refcount_dec(&delayed_node->refs);
return true;
}
-void btrfs_readdir_put_delayed_items(struct inode *inode,
+void btrfs_readdir_put_delayed_items(struct btrfs_inode *inode,
struct list_head *ins_list,
struct list_head *del_list)
{
@@ -1570,20 +1777,19 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
* The VFS is going to do up_read(), so we need to downgrade back to a
* read lock.
*/
- downgrade_write(&inode->i_rwsem);
+ downgrade_write(&inode->vfs_inode.i_rwsem);
}
-int btrfs_should_delete_dir_index(struct list_head *del_list,
- u64 index)
+bool btrfs_should_delete_dir_index(const struct list_head *del_list, u64 index)
{
struct btrfs_delayed_item *curr;
- int ret = 0;
+ bool ret = false;
list_for_each_entry(curr, del_list, readdir_list) {
- if (curr->key.offset > index)
+ if (curr->index > index)
break;
- if (curr->key.offset == index) {
- ret = 1;
+ if (curr->index == index) {
+ ret = true;
break;
}
}
@@ -1591,177 +1797,167 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
}
/*
- * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
- *
+ * Read dir info stored in the delayed tree.
*/
-int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- struct list_head *ins_list)
+bool btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+ const struct list_head *ins_list)
{
struct btrfs_dir_item *di;
struct btrfs_delayed_item *curr, *next;
struct btrfs_key location;
char *name;
int name_len;
- int over = 0;
unsigned char d_type;
- if (list_empty(ins_list))
- return 0;
-
/*
* Changing the data of the delayed item is impossible. So
* we needn't lock them. And we have held i_mutex of the
* directory, nobody can delete any directory indexes now.
*/
list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
+ bool over;
+
list_del(&curr->readdir_list);
- if (curr->key.offset < ctx->pos) {
+ if (curr->index < ctx->pos) {
if (refcount_dec_and_test(&curr->refs))
kfree(curr);
continue;
}
- ctx->pos = curr->key.offset;
+ ctx->pos = curr->index;
di = (struct btrfs_dir_item *)curr->data;
name = (char *)(di + 1);
name_len = btrfs_stack_dir_name_len(di);
- d_type = fs_ftype_to_dtype(di->type);
+ d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type));
btrfs_disk_key_to_cpu(&location, &di->location);
- over = !dir_emit(ctx, name, name_len,
- location.objectid, d_type);
+ over = !dir_emit(ctx, name, name_len, location.objectid, d_type);
if (refcount_dec_and_test(&curr->refs))
kfree(curr);
if (over)
- return 1;
+ return true;
ctx->pos++;
}
- return 0;
+ return false;
}
static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *inode_item,
- struct inode *inode)
+ struct btrfs_inode *inode)
{
+ struct inode *vfs_inode = &inode->vfs_inode;
u64 flags;
- btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
- btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
- btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
- btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
- btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
- btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
- btrfs_set_stack_inode_generation(inode_item,
- BTRFS_I(inode)->generation);
+ btrfs_set_stack_inode_uid(inode_item, i_uid_read(vfs_inode));
+ btrfs_set_stack_inode_gid(inode_item, i_gid_read(vfs_inode));
+ btrfs_set_stack_inode_size(inode_item, inode->disk_i_size);
+ btrfs_set_stack_inode_mode(inode_item, vfs_inode->i_mode);
+ btrfs_set_stack_inode_nlink(inode_item, vfs_inode->i_nlink);
+ btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(vfs_inode));
+ btrfs_set_stack_inode_generation(inode_item, inode->generation);
btrfs_set_stack_inode_sequence(inode_item,
- inode_peek_iversion(inode));
+ inode_peek_iversion(vfs_inode));
btrfs_set_stack_inode_transid(inode_item, trans->transid);
- btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
- flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
- BTRFS_I(inode)->ro_flags);
+ btrfs_set_stack_inode_rdev(inode_item, vfs_inode->i_rdev);
+ flags = btrfs_inode_combine_flags(inode->flags, inode->ro_flags);
btrfs_set_stack_inode_flags(inode_item, flags);
btrfs_set_stack_inode_block_group(inode_item, 0);
btrfs_set_stack_timespec_sec(&inode_item->atime,
- inode->i_atime.tv_sec);
+ inode_get_atime_sec(vfs_inode));
btrfs_set_stack_timespec_nsec(&inode_item->atime,
- inode->i_atime.tv_nsec);
+ inode_get_atime_nsec(vfs_inode));
btrfs_set_stack_timespec_sec(&inode_item->mtime,
- inode->i_mtime.tv_sec);
+ inode_get_mtime_sec(vfs_inode));
btrfs_set_stack_timespec_nsec(&inode_item->mtime,
- inode->i_mtime.tv_nsec);
+ inode_get_mtime_nsec(vfs_inode));
btrfs_set_stack_timespec_sec(&inode_item->ctime,
- inode->i_ctime.tv_sec);
+ inode_get_ctime_sec(vfs_inode));
btrfs_set_stack_timespec_nsec(&inode_item->ctime,
- inode->i_ctime.tv_nsec);
+ inode_get_ctime_nsec(vfs_inode));
- btrfs_set_stack_timespec_sec(&inode_item->otime,
- BTRFS_I(inode)->i_otime.tv_sec);
- btrfs_set_stack_timespec_nsec(&inode_item->otime,
- BTRFS_I(inode)->i_otime.tv_nsec);
+ btrfs_set_stack_timespec_sec(&inode_item->otime, inode->i_otime_sec);
+ btrfs_set_stack_timespec_nsec(&inode_item->otime, inode->i_otime_nsec);
}
-int btrfs_fill_inode(struct inode *inode, u32 *rdev)
+int btrfs_fill_inode(struct btrfs_inode *inode, u32 *rdev)
{
- struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct btrfs_delayed_node *delayed_node;
+ struct btrfs_ref_tracker delayed_node_tracker;
struct btrfs_inode_item *inode_item;
+ struct inode *vfs_inode = &inode->vfs_inode;
- delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
+ delayed_node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
if (!delayed_node)
return -ENOENT;
mutex_lock(&delayed_node->mutex);
if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
mutex_unlock(&delayed_node->mutex);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return -ENOENT;
}
inode_item = &delayed_node->inode_item;
- i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
- i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
- btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
- btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
- round_up(i_size_read(inode), fs_info->sectorsize));
- inode->i_mode = btrfs_stack_inode_mode(inode_item);
- set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
- inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
- BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
- BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
-
- inode_set_iversion_queried(inode,
- btrfs_stack_inode_sequence(inode_item));
- inode->i_rdev = 0;
+ i_uid_write(vfs_inode, btrfs_stack_inode_uid(inode_item));
+ i_gid_write(vfs_inode, btrfs_stack_inode_gid(inode_item));
+ btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
+ vfs_inode->i_mode = btrfs_stack_inode_mode(inode_item);
+ set_nlink(vfs_inode, btrfs_stack_inode_nlink(inode_item));
+ inode_set_bytes(vfs_inode, btrfs_stack_inode_nbytes(inode_item));
+ inode->generation = btrfs_stack_inode_generation(inode_item);
+ inode->last_trans = btrfs_stack_inode_transid(inode_item);
+
+ inode_set_iversion_queried(vfs_inode, btrfs_stack_inode_sequence(inode_item));
+ vfs_inode->i_rdev = 0;
*rdev = btrfs_stack_inode_rdev(inode_item);
btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
- &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
+ &inode->flags, &inode->ro_flags);
- inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
- inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
+ inode_set_atime(vfs_inode, btrfs_stack_timespec_sec(&inode_item->atime),
+ btrfs_stack_timespec_nsec(&inode_item->atime));
- inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
- inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
+ inode_set_mtime(vfs_inode, btrfs_stack_timespec_sec(&inode_item->mtime),
+ btrfs_stack_timespec_nsec(&inode_item->mtime));
- inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
- inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
+ inode_set_ctime(vfs_inode, btrfs_stack_timespec_sec(&inode_item->ctime),
+ btrfs_stack_timespec_nsec(&inode_item->ctime));
- BTRFS_I(inode)->i_otime.tv_sec =
- btrfs_stack_timespec_sec(&inode_item->otime);
- BTRFS_I(inode)->i_otime.tv_nsec =
- btrfs_stack_timespec_nsec(&inode_item->otime);
+ inode->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime);
+ inode->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
- inode->i_generation = BTRFS_I(inode)->generation;
- BTRFS_I(inode)->index_cnt = (u64)-1;
+ vfs_inode->i_generation = inode->generation;
+ if (S_ISDIR(vfs_inode->i_mode))
+ inode->index_cnt = (u64)-1;
mutex_unlock(&delayed_node->mutex);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return 0;
}
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct btrfs_inode *inode)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_delayed_node *delayed_node;
+ struct btrfs_ref_tracker delayed_node_tracker;
int ret = 0;
- delayed_node = btrfs_get_or_create_delayed_node(inode);
+ delayed_node = btrfs_get_or_create_delayed_node(inode, &delayed_node_tracker);
if (IS_ERR(delayed_node))
return PTR_ERR(delayed_node);
mutex_lock(&delayed_node->mutex);
if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
- fill_stack_inode_item(trans, &delayed_node->inode_item,
- &inode->vfs_inode);
+ fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
goto release_node;
}
@@ -1769,13 +1965,13 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
if (ret)
goto release_node;
- fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
+ fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
delayed_node->count++;
atomic_inc(&root->fs_info->delayed_root->items);
release_node:
mutex_unlock(&delayed_node->mutex);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return ret;
}
@@ -1783,6 +1979,7 @@ int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_delayed_node *delayed_node;
+ struct btrfs_ref_tracker delayed_node_tracker;
/*
* we don't do delayed inode updates during log recovery because it
@@ -1792,7 +1989,7 @@ int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
return -EAGAIN;
- delayed_node = btrfs_get_or_create_delayed_node(inode);
+ delayed_node = btrfs_get_or_create_delayed_node(inode, &delayed_node_tracker);
if (IS_ERR(delayed_node))
return PTR_ERR(delayed_node);
@@ -1811,15 +2008,12 @@ int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
* It is very rare.
*/
mutex_lock(&delayed_node->mutex);
- if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
- goto release_node;
-
- set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
- delayed_node->count++;
- atomic_inc(&fs_info->delayed_root->items);
-release_node:
+ if (!test_and_set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
+ delayed_node->count++;
+ atomic_inc(&fs_info->delayed_root->items);
+ }
mutex_unlock(&delayed_node->mutex);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
return 0;
}
@@ -1832,12 +2026,17 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
mutex_lock(&delayed_node->mutex);
curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
while (curr_item) {
- btrfs_delayed_item_release_metadata(root, curr_item);
prev_item = curr_item;
curr_item = __btrfs_next_delayed_item(prev_item);
btrfs_release_delayed_item(prev_item);
}
+ if (delayed_node->index_item_leaves > 0) {
+ btrfs_delayed_item_release_leaves(delayed_node,
+ delayed_node->index_item_leaves);
+ delayed_node->index_item_leaves = 0;
+ }
+
curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
while (curr_item) {
btrfs_delayed_item_release_metadata(root, curr_item);
@@ -1858,47 +2057,56 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
{
struct btrfs_delayed_node *delayed_node;
+ struct btrfs_ref_tracker delayed_node_tracker;
- delayed_node = btrfs_get_delayed_node(inode);
+ delayed_node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
if (!delayed_node)
return;
__btrfs_kill_delayed_node(delayed_node);
- btrfs_release_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
}
void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
{
- u64 inode_id = 0;
+ unsigned long index = 0;
struct btrfs_delayed_node *delayed_nodes[8];
- int i, n;
+ struct btrfs_ref_tracker delayed_node_trackers[8];
while (1) {
- spin_lock(&root->inode_lock);
- n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
- (void **)delayed_nodes, inode_id,
- ARRAY_SIZE(delayed_nodes));
- if (!n) {
- spin_unlock(&root->inode_lock);
- break;
+ struct btrfs_delayed_node *node;
+ int count;
+
+ xa_lock(&root->delayed_nodes);
+ if (xa_empty(&root->delayed_nodes)) {
+ xa_unlock(&root->delayed_nodes);
+ return;
}
- inode_id = delayed_nodes[n - 1]->inode_id + 1;
- for (i = 0; i < n; i++) {
+ count = 0;
+ xa_for_each_start(&root->delayed_nodes, index, node, index) {
/*
* Don't increase refs in case the node is dead and
* about to be removed from the tree in the loop below
*/
- if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
- delayed_nodes[i] = NULL;
+ if (refcount_inc_not_zero(&node->refs)) {
+ btrfs_delayed_node_ref_tracker_alloc(node,
+ &delayed_node_trackers[count],
+ GFP_ATOMIC);
+ delayed_nodes[count] = node;
+ count++;
+ }
+ if (count >= ARRAY_SIZE(delayed_nodes))
+ break;
}
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->delayed_nodes);
+ index++;
- for (i = 0; i < n; i++) {
- if (!delayed_nodes[i])
- continue;
+ for (int i = 0; i < count; i++) {
__btrfs_kill_delayed_node(delayed_nodes[i]);
- btrfs_release_delayed_node(delayed_nodes[i]);
+ btrfs_delayed_node_ref_tracker_dir_print(delayed_nodes[i]);
+ btrfs_release_delayed_node(delayed_nodes[i],
+ &delayed_node_trackers[i]);
}
}
}
@@ -1906,14 +2114,131 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
{
struct btrfs_delayed_node *curr_node, *prev_node;
+ struct btrfs_ref_tracker curr_delayed_node_tracker, prev_delayed_node_tracker;
- curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
+ curr_node = btrfs_first_delayed_node(fs_info->delayed_root,
+ &curr_delayed_node_tracker);
while (curr_node) {
__btrfs_kill_delayed_node(curr_node);
prev_node = curr_node;
- curr_node = btrfs_next_delayed_node(curr_node);
- btrfs_release_delayed_node(prev_node);
+ prev_delayed_node_tracker = curr_delayed_node_tracker;
+ curr_node = btrfs_next_delayed_node(curr_node, &curr_delayed_node_tracker);
+ btrfs_release_delayed_node(prev_node, &prev_delayed_node_tracker);
}
}
+void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
+ struct list_head *ins_list,
+ struct list_head *del_list)
+{
+ struct btrfs_delayed_node *node;
+ struct btrfs_delayed_item *item;
+ struct btrfs_ref_tracker delayed_node_tracker;
+
+ node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
+ if (!node)
+ return;
+
+ mutex_lock(&node->mutex);
+ item = __btrfs_first_delayed_insertion_item(node);
+ while (item) {
+ /*
+ * It's possible that the item is already in a log list. This
+ * can happen in case two tasks are trying to log the same
+ * directory. For example if we have tasks A and task B:
+ *
+ * Task A collected the delayed items into a log list while
+ * under the inode's log_mutex (at btrfs_log_inode()), but it
+ * only releases the items after logging the inodes they point
+ * to (if they are new inodes), which happens after unlocking
+ * the log mutex;
+ *
+ * Task B enters btrfs_log_inode() and acquires the log_mutex
+ * of the same directory inode, before task B releases the
+ * delayed items. This can happen for example when logging some
+ * inode we need to trigger logging of its parent directory, so
+ * logging two files that have the same parent directory can
+ * lead to this.
+ *
+ * If this happens, just ignore delayed items already in a log
+ * list. All the tasks logging the directory are under a log
+ * transaction and whichever finishes first can not sync the log
+ * before the other completes and leaves the log transaction.
+ */
+ if (!item->logged && list_empty(&item->log_list)) {
+ refcount_inc(&item->refs);
+ list_add_tail(&item->log_list, ins_list);
+ }
+ item = __btrfs_next_delayed_item(item);
+ }
+
+ item = __btrfs_first_delayed_deletion_item(node);
+ while (item) {
+ /* It may be non-empty, for the same reason mentioned above. */
+ if (!item->logged && list_empty(&item->log_list)) {
+ refcount_inc(&item->refs);
+ list_add_tail(&item->log_list, del_list);
+ }
+ item = __btrfs_next_delayed_item(item);
+ }
+ mutex_unlock(&node->mutex);
+
+ /*
+ * We are called during inode logging, which means the inode is in use
+ * and can not be evicted before we finish logging the inode. So we never
+ * have the last reference on the delayed inode.
+ * Also, we don't use btrfs_release_delayed_node() because that would
+ * requeue the delayed inode (change its order in the list of prepared
+ * nodes) and we don't want to do such change because we don't create or
+ * delete delayed items.
+ */
+ ASSERT(refcount_read(&node->refs) > 1);
+ btrfs_delayed_node_ref_tracker_free(node, &delayed_node_tracker);
+ refcount_dec(&node->refs);
+}
+
+void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
+ struct list_head *ins_list,
+ struct list_head *del_list)
+{
+ struct btrfs_delayed_node *node;
+ struct btrfs_delayed_item *item;
+ struct btrfs_delayed_item *next;
+ struct btrfs_ref_tracker delayed_node_tracker;
+
+ node = btrfs_get_delayed_node(inode, &delayed_node_tracker);
+ if (!node)
+ return;
+
+ mutex_lock(&node->mutex);
+
+ list_for_each_entry_safe(item, next, ins_list, log_list) {
+ item->logged = true;
+ list_del_init(&item->log_list);
+ if (refcount_dec_and_test(&item->refs))
+ kfree(item);
+ }
+
+ list_for_each_entry_safe(item, next, del_list, log_list) {
+ item->logged = true;
+ list_del_init(&item->log_list);
+ if (refcount_dec_and_test(&item->refs))
+ kfree(item);
+ }
+
+ mutex_unlock(&node->mutex);
+
+ /*
+ * We are called during inode logging, which means the inode is in use
+ * and can not be evicted before we finish logging the inode. So we never
+ * have the last reference on the delayed inode.
+ * Also, we don't use btrfs_release_delayed_node() because that would
+ * requeue the delayed inode (change its order in the list of prepared
+ * nodes) and we don't want to do such change because we don't create or
+ * delete delayed items.
+ */
+ ASSERT(refcount_read(&node->refs) > 1);
+ btrfs_delayed_node_ref_tracker_free(node, &delayed_node_tracker);
+ refcount_dec(&node->refs);
+}
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index b2412160c5bc..b09d4ec8c77d 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -7,18 +7,28 @@
#ifndef BTRFS_DELAYED_INODE_H
#define BTRFS_DELAYED_INODE_H
+#include <linux/types.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/wait.h>
+#include <linux/fs.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
+#include <linux/ref_tracker.h>
#include "ctree.h"
-/* types of the delayed item */
-#define BTRFS_DELAYED_INSERTION_ITEM 1
-#define BTRFS_DELAYED_DELETION_ITEM 2
+struct btrfs_disk_key;
+struct btrfs_fs_info;
+struct btrfs_inode;
+struct btrfs_root;
+struct btrfs_trans_handle;
+
+enum btrfs_delayed_item_type {
+ BTRFS_DELAYED_INSERTION_ITEM,
+ BTRFS_DELAYED_DELETION_ITEM
+};
struct btrfs_delayed_root {
spinlock_t lock;
@@ -35,6 +45,22 @@ struct btrfs_delayed_root {
wait_queue_head_t wait;
};
+struct btrfs_ref_tracker_dir {
+#ifdef CONFIG_BTRFS_DEBUG
+ struct ref_tracker_dir dir;
+#else
+ struct {} tracker;
+#endif
+};
+
+struct btrfs_ref_tracker {
+#ifdef CONFIG_BTRFS_DEBUG
+ struct ref_tracker *tracker;
+#else
+ struct {} tracker;
+#endif
+};
+
#define BTRFS_DELAYED_NODE_IN_LIST 0
#define BTRFS_DELAYED_NODE_INODE_DIRTY 1
#define BTRFS_DELAYED_NODE_DEL_IREF 2
@@ -55,40 +81,59 @@ struct btrfs_delayed_node {
struct mutex mutex;
struct btrfs_inode_item inode_item;
refcount_t refs;
+ int count;
u64 index_cnt;
unsigned long flags;
- int count;
+ /*
+ * The size of the next batch of dir index items to insert (if this
+ * node is from a directory inode). Protected by @mutex.
+ */
+ u32 curr_index_batch_size;
+ /*
+ * Number of leaves reserved for inserting dir index items (if this
+ * node belongs to a directory inode). This may be larger then the
+ * actual number of leaves we end up using. Protected by @mutex.
+ */
+ u32 index_item_leaves;
+ /* Track all references to this delayed node. */
+ struct btrfs_ref_tracker_dir ref_dir;
+ /* Track delayed node reference stored in node list. */
+ struct btrfs_ref_tracker node_list_tracker;
+ /* Track delayed node reference stored in inode cache. */
+ struct btrfs_ref_tracker inode_cache_tracker;
};
struct btrfs_delayed_item {
struct rb_node rb_node;
- struct btrfs_key key;
+ /* Offset value of the corresponding dir index key. */
+ u64 index;
struct list_head tree_list; /* used for batch insert/delete items */
struct list_head readdir_list; /* used for readdir items */
+ /*
+ * Used when logging a directory.
+ * Insertions and deletions to this list are protected by the parent
+ * delayed node's mutex.
+ */
+ struct list_head log_list;
u64 bytes_reserved;
struct btrfs_delayed_node *delayed_node;
refcount_t refs;
- int ins_or_del;
- u32 data_len;
- char data[];
+ enum btrfs_delayed_item_type type:8;
+ /*
+ * Track if this delayed item was already logged.
+ * Protected by the mutex of the parent delayed inode.
+ */
+ bool logged;
+ /* The maximum leaf size is 64K, so u16 is more than enough. */
+ u16 data_len;
+ char data[] __counted_by(data_len);
};
-static inline void btrfs_init_delayed_root(
- struct btrfs_delayed_root *delayed_root)
-{
- atomic_set(&delayed_root->items, 0);
- atomic_set(&delayed_root->items_seq, 0);
- delayed_root->nodes = 0;
- spin_lock_init(&delayed_root->lock);
- init_waitqueue_head(&delayed_root->wait);
- INIT_LIST_HEAD(&delayed_root->node_list);
- INIT_LIST_HEAD(&delayed_root->prepare_list);
-}
-
+void btrfs_init_delayed_root(struct btrfs_delayed_root *delayed_root);
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
const char *name, int name_len,
struct btrfs_inode *dir,
- struct btrfs_disk_key *disk_key, u8 type,
+ const struct btrfs_disk_key *disk_key, u8 flags,
u64 index);
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
@@ -110,9 +155,8 @@ int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode);
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct btrfs_inode *inode);
-int btrfs_fill_inode(struct inode *inode, u32 *rdev);
+int btrfs_fill_inode(struct btrfs_inode *inode, u32 *rdev);
int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode);
/* Used for drop dead root */
@@ -122,16 +166,24 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info);
/* Used for readdir() */
-bool btrfs_readdir_get_delayed_items(struct inode *inode,
+bool btrfs_readdir_get_delayed_items(struct btrfs_inode *inode,
+ u64 last_index,
struct list_head *ins_list,
struct list_head *del_list);
-void btrfs_readdir_put_delayed_items(struct inode *inode,
+void btrfs_readdir_put_delayed_items(struct btrfs_inode *inode,
struct list_head *ins_list,
struct list_head *del_list);
-int btrfs_should_delete_dir_index(struct list_head *del_list,
- u64 index);
-int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- struct list_head *ins_list);
+bool btrfs_should_delete_dir_index(const struct list_head *del_list, u64 index);
+bool btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+ const struct list_head *ins_list);
+
+/* Used during directory logging. */
+void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
+ struct list_head *ins_list,
+ struct list_head *del_list);
+void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
+ struct list_head *ins_list,
+ struct list_head *del_list);
/* for init */
int __init btrfs_delayed_inode_init(void);
@@ -140,4 +192,81 @@ void __cold btrfs_delayed_inode_exit(void);
/* for debugging */
void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info);
+#define BTRFS_DELAYED_NODE_REF_TRACKER_QUARANTINE_COUNT 16
+#define BTRFS_DELAYED_NODE_REF_TRACKER_DISPLAY_LIMIT 16
+
+#ifdef CONFIG_BTRFS_DEBUG
+static inline void btrfs_delayed_node_ref_tracker_dir_init(struct btrfs_delayed_node *node)
+{
+ if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
+ return;
+
+ ref_tracker_dir_init(&node->ref_dir.dir,
+ BTRFS_DELAYED_NODE_REF_TRACKER_QUARANTINE_COUNT,
+ "delayed_node");
+}
+
+static inline void btrfs_delayed_node_ref_tracker_dir_exit(struct btrfs_delayed_node *node)
+{
+ if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
+ return;
+
+ ref_tracker_dir_exit(&node->ref_dir.dir);
+}
+
+static inline void btrfs_delayed_node_ref_tracker_dir_print(struct btrfs_delayed_node *node)
+{
+ if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
+ return;
+
+ /*
+ * Only print if there are leaked references. The caller is
+ * holding one reference, so if refs == 1 there is no leak.
+ */
+ if (refcount_read(&node->refs) == 1)
+ return;
+
+ ref_tracker_dir_print(&node->ref_dir.dir,
+ BTRFS_DELAYED_NODE_REF_TRACKER_DISPLAY_LIMIT);
+}
+
+static inline int btrfs_delayed_node_ref_tracker_alloc(struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker,
+ gfp_t gfp)
+{
+ if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
+ return 0;
+
+ return ref_tracker_alloc(&node->ref_dir.dir, &tracker->tracker, gfp);
+}
+
+static inline int btrfs_delayed_node_ref_tracker_free(struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker)
+{
+ if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
+ return 0;
+
+ return ref_tracker_free(&node->ref_dir.dir, &tracker->tracker);
+}
+#else
+static inline void btrfs_delayed_node_ref_tracker_dir_init(struct btrfs_delayed_node *node) { }
+
+static inline void btrfs_delayed_node_ref_tracker_dir_exit(struct btrfs_delayed_node *node) { }
+
+static inline void btrfs_delayed_node_ref_tracker_dir_print(struct btrfs_delayed_node *node) { }
+
+static inline int btrfs_delayed_node_ref_tracker_alloc(struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker,
+ gfp_t gfp)
+{
+ return 0;
+}
+
+static inline int btrfs_delayed_node_ref_tracker_free(struct btrfs_delayed_node *node,
+ struct btrfs_ref_tracker *tracker)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index cca7e85e32dd..e8bc37453336 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -6,16 +6,18 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sort.h>
+#include "messages.h"
#include "ctree.h"
#include "delayed-ref.h"
+#include "extent-tree.h"
#include "transaction.h"
#include "qgroup.h"
#include "space-info.h"
#include "tree-mod-log.h"
+#include "fs.h"
struct kmem_cache *btrfs_delayed_ref_head_cachep;
-struct kmem_cache *btrfs_delayed_tree_ref_cachep;
-struct kmem_cache *btrfs_delayed_data_ref_cachep;
+struct kmem_cache *btrfs_delayed_ref_node_cachep;
struct kmem_cache *btrfs_delayed_extent_op_cachep;
/*
* delayed back reference update tracking. For subvolume trees
@@ -51,38 +53,24 @@ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
return ret;
}
-int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
-{
- u64 num_entries =
- atomic_read(&trans->transaction->delayed_refs.num_entries);
- u64 avg_runtime;
- u64 val;
-
- smp_mb();
- avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
- val = num_entries * avg_runtime;
- if (val >= NSEC_PER_SEC)
- return 1;
- if (val >= NSEC_PER_SEC / 2)
- return 2;
-
- return btrfs_check_space_for_delayed_refs(trans->fs_info);
-}
-
-/**
- * Release a ref head's reservation
+/*
+ * Release a ref head's reservation.
*
* @fs_info: the filesystem
- * @nr: number of items to drop
+ * @nr_refs: number of delayed refs to drop
+ * @nr_csums: number of csum items to drop
*
- * This drops the delayed ref head's count from the delayed refs rsv and frees
- * any excess reservation we had.
+ * Drops the delayed ref head's count from the delayed refs rsv and free any
+ * excess reservation we had.
*/
-void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
+void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums)
{
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
- u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
- u64 released = 0;
+ u64 num_bytes;
+ u64 released;
+
+ num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr_refs);
+ num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
if (released)
@@ -91,81 +79,127 @@ void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
}
/*
- * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
- * @trans - the trans that may have generated delayed refs
+ * Adjust the size of the delayed refs rsv.
*
- * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
- * it'll calculate the additional size and add it to the delayed_refs_rsv.
+ * This is to be called anytime we may have adjusted trans->delayed_ref_updates
+ * or trans->delayed_ref_csum_deletions, it'll calculate the additional size and
+ * add it to the delayed_refs_rsv.
*/
void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
+ struct btrfs_block_rsv *local_rsv = &trans->delayed_rsv;
u64 num_bytes;
+ u64 reserved_bytes;
+
+ if (btrfs_is_testing(fs_info))
+ return;
+
+ num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates);
+ num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info,
+ trans->delayed_ref_csum_deletions);
- if (!trans->delayed_ref_updates)
+ if (num_bytes == 0)
return;
- num_bytes = btrfs_calc_insert_metadata_size(fs_info,
- trans->delayed_ref_updates);
+ /*
+ * Try to take num_bytes from the transaction's local delayed reserve.
+ * If not possible, try to take as much as it's available. If the local
+ * reserve doesn't have enough reserved space, the delayed refs reserve
+ * will be refilled next time btrfs_delayed_refs_rsv_refill() is called
+ * by someone or if a transaction commit is triggered before that, the
+ * global block reserve will be used. We want to minimize using the
+ * global block reserve for cases we can account for in advance, to
+ * avoid exhausting it and reach -ENOSPC during a transaction commit.
+ */
+ spin_lock(&local_rsv->lock);
+ reserved_bytes = min(num_bytes, local_rsv->reserved);
+ local_rsv->reserved -= reserved_bytes;
+ local_rsv->full = (local_rsv->reserved >= local_rsv->size);
+ spin_unlock(&local_rsv->lock);
+
spin_lock(&delayed_rsv->lock);
delayed_rsv->size += num_bytes;
- delayed_rsv->full = 0;
+ delayed_rsv->reserved += reserved_bytes;
+ delayed_rsv->full = (delayed_rsv->reserved >= delayed_rsv->size);
spin_unlock(&delayed_rsv->lock);
trans->delayed_ref_updates = 0;
+ trans->delayed_ref_csum_deletions = 0;
}
-/**
- * Transfer bytes to our delayed refs rsv
- *
- * @fs_info: the filesystem
- * @src: source block rsv to transfer from
- * @num_bytes: number of bytes to transfer
- *
- * This transfers up to the num_bytes amount from the src rsv to the
- * delayed_refs_rsv. Any extra bytes are returned to the space info.
+/*
+ * Adjust the size of the delayed refs block reserve for 1 block group item
+ * insertion, used after allocating a block group.
*/
-void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *src,
- u64 num_bytes)
+void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
{
- struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
- u64 to_free = 0;
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
- spin_lock(&src->lock);
- src->reserved -= num_bytes;
- src->size -= num_bytes;
- spin_unlock(&src->lock);
+ spin_lock(&delayed_rsv->lock);
+ /*
+ * Inserting a block group item does not require changing the free space
+ * tree, only the extent tree or the block group tree, so this is all we
+ * need.
+ */
+ delayed_rsv->size += btrfs_calc_insert_metadata_size(fs_info, 1);
+ delayed_rsv->full = false;
+ spin_unlock(&delayed_rsv->lock);
+}
- spin_lock(&delayed_refs_rsv->lock);
- if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
- u64 delta = delayed_refs_rsv->size -
- delayed_refs_rsv->reserved;
- if (num_bytes > delta) {
- to_free = num_bytes - delta;
- num_bytes = delta;
- }
- } else {
- to_free = num_bytes;
- num_bytes = 0;
- }
+/*
+ * Adjust the size of the delayed refs block reserve to release space for 1
+ * block group item insertion.
+ */
+void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
+ const u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
+ u64 released;
- if (num_bytes)
- delayed_refs_rsv->reserved += num_bytes;
- if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
- delayed_refs_rsv->full = 1;
- spin_unlock(&delayed_refs_rsv->lock);
+ released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
+ if (released > 0)
+ trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
+ 0, released, 0);
+}
- if (num_bytes)
+/*
+ * Adjust the size of the delayed refs block reserve for 1 block group item
+ * update.
+ */
+void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
+
+ spin_lock(&delayed_rsv->lock);
+ /*
+ * Updating a block group item does not result in new nodes/leaves and
+ * does not require changing the free space tree, only the extent tree
+ * or the block group tree, so this is all we need.
+ */
+ delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1);
+ delayed_rsv->full = false;
+ spin_unlock(&delayed_rsv->lock);
+}
+
+/*
+ * Adjust the size of the delayed refs block reserve to release space for 1
+ * block group item update.
+ */
+void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
+ const u64 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
+ u64 released;
+
+ released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
+ if (released > 0)
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
- 0, num_bytes, 1);
- if (to_free)
- btrfs_space_info_free_bytes_may_use(fs_info,
- delayed_refs_rsv->space_info, to_free);
+ 0, released, 0);
}
-/**
- * Refill based on our delayed refs usage
+/*
+ * Refill based on our delayed refs usage.
*
* @fs_info: the filesystem
* @flush: control how we can flush for this reservation.
@@ -177,8 +211,11 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush)
{
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
- u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
+ struct btrfs_space_info *space_info = block_rsv->space_info;
+ u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
u64 num_bytes = 0;
+ u64 refilled_bytes;
+ u64 to_free;
int ret = -ENOSPC;
spin_lock(&block_rsv->lock);
@@ -191,66 +228,62 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
if (!num_bytes)
return 0;
- ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv,
- num_bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(space_info, num_bytes, flush);
if (ret)
return ret;
- btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
- trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
- 0, num_bytes, 1);
- return 0;
-}
-/*
- * compare two delayed tree backrefs with same bytenr and type
- */
-static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
- struct btrfs_delayed_tree_ref *ref2)
-{
- if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
- if (ref1->root < ref2->root)
- return -1;
- if (ref1->root > ref2->root)
- return 1;
+ /*
+ * We may have raced with someone else, so check again if we the block
+ * reserve is still not full and release any excess space.
+ */
+ spin_lock(&block_rsv->lock);
+ if (block_rsv->reserved < block_rsv->size) {
+ u64 needed = block_rsv->size - block_rsv->reserved;
+
+ if (num_bytes >= needed) {
+ block_rsv->reserved += needed;
+ block_rsv->full = true;
+ to_free = num_bytes - needed;
+ refilled_bytes = needed;
+ } else {
+ block_rsv->reserved += num_bytes;
+ to_free = 0;
+ refilled_bytes = num_bytes;
+ }
} else {
- if (ref1->parent < ref2->parent)
- return -1;
- if (ref1->parent > ref2->parent)
- return 1;
+ to_free = num_bytes;
+ refilled_bytes = 0;
}
+ spin_unlock(&block_rsv->lock);
+
+ if (to_free > 0)
+ btrfs_space_info_free_bytes_may_use(space_info, to_free);
+
+ if (refilled_bytes > 0)
+ trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
+ refilled_bytes, 1);
return 0;
}
/*
* compare two delayed data backrefs with same bytenr and type
*/
-static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
- struct btrfs_delayed_data_ref *ref2)
+static int comp_data_refs(const struct btrfs_delayed_ref_node *ref1,
+ const struct btrfs_delayed_ref_node *ref2)
{
- if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
- if (ref1->root < ref2->root)
- return -1;
- if (ref1->root > ref2->root)
- return 1;
- if (ref1->objectid < ref2->objectid)
- return -1;
- if (ref1->objectid > ref2->objectid)
- return 1;
- if (ref1->offset < ref2->offset)
- return -1;
- if (ref1->offset > ref2->offset)
- return 1;
- } else {
- if (ref1->parent < ref2->parent)
- return -1;
- if (ref1->parent > ref2->parent)
- return 1;
- }
+ if (ref1->data_ref.objectid < ref2->data_ref.objectid)
+ return -1;
+ if (ref1->data_ref.objectid > ref2->data_ref.objectid)
+ return 1;
+ if (ref1->data_ref.offset < ref2->data_ref.offset)
+ return -1;
+ if (ref1->data_ref.offset > ref2->data_ref.offset)
+ return 1;
return 0;
}
-static int comp_refs(struct btrfs_delayed_ref_node *ref1,
- struct btrfs_delayed_ref_node *ref2,
+static int comp_refs(const struct btrfs_delayed_ref_node *ref1,
+ const struct btrfs_delayed_ref_node *ref2,
bool check_seq)
{
int ret = 0;
@@ -259,13 +292,20 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1,
return -1;
if (ref1->type > ref2->type)
return 1;
- if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
- ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
- ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
- btrfs_delayed_node_to_tree_ref(ref2));
- else
- ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
- btrfs_delayed_node_to_data_ref(ref2));
+ if (ref1->type == BTRFS_SHARED_BLOCK_REF_KEY ||
+ ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
+ if (ref1->parent < ref2->parent)
+ return -1;
+ if (ref1->parent > ref2->parent)
+ return 1;
+ } else {
+ if (ref1->ref_root < ref2->ref_root)
+ return -1;
+ if (ref1->ref_root > ref2->ref_root)
+ return 1;
+ if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY)
+ ret = comp_data_refs(ref1, ref2);
+ }
if (ret)
return ret;
if (check_seq) {
@@ -277,145 +317,57 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1,
return 0;
}
-/* insert a new ref to head ref rbtree */
-static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
- struct rb_node *node)
+static int cmp_refs_node(const struct rb_node *new, const struct rb_node *exist)
{
- struct rb_node **p = &root->rb_root.rb_node;
- struct rb_node *parent_node = NULL;
- struct btrfs_delayed_ref_head *entry;
- struct btrfs_delayed_ref_head *ins;
- u64 bytenr;
- bool leftmost = true;
-
- ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
- bytenr = ins->bytenr;
- while (*p) {
- parent_node = *p;
- entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
- href_node);
-
- if (bytenr < entry->bytenr) {
- p = &(*p)->rb_left;
- } else if (bytenr > entry->bytenr) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else {
- return entry;
- }
- }
+ const struct btrfs_delayed_ref_node *new_node =
+ rb_entry(new, struct btrfs_delayed_ref_node, ref_node);
+ const struct btrfs_delayed_ref_node *exist_node =
+ rb_entry(exist, struct btrfs_delayed_ref_node, ref_node);
- rb_link_node(node, parent_node, p);
- rb_insert_color_cached(node, root, leftmost);
- return NULL;
+ return comp_refs(new_node, exist_node, true);
}
static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
struct btrfs_delayed_ref_node *ins)
{
- struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *node = &ins->ref_node;
- struct rb_node *parent_node = NULL;
- struct btrfs_delayed_ref_node *entry;
- bool leftmost = true;
-
- while (*p) {
- int comp;
-
- parent_node = *p;
- entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
- ref_node);
- comp = comp_refs(ins, entry, true);
- if (comp < 0) {
- p = &(*p)->rb_left;
- } else if (comp > 0) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else {
- return entry;
- }
- }
+ struct rb_node *exist = rb_find_add_cached(node, root, cmp_refs_node);
- rb_link_node(node, parent_node, p);
- rb_insert_color_cached(node, root, leftmost);
- return NULL;
+ return rb_entry_safe(exist, struct btrfs_delayed_ref_node, ref_node);
}
static struct btrfs_delayed_ref_head *find_first_ref_head(
struct btrfs_delayed_ref_root *dr)
{
- struct rb_node *n;
- struct btrfs_delayed_ref_head *entry;
-
- n = rb_first_cached(&dr->href_root);
- if (!n)
- return NULL;
-
- entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
+ unsigned long from = 0;
- return entry;
-}
+ lockdep_assert_held(&dr->lock);
-/*
- * Find a head entry based on bytenr. This returns the delayed ref head if it
- * was able to find one, or NULL if nothing was in that spot. If return_bigger
- * is given, the next bigger entry is returned if no exact match is found.
- */
-static struct btrfs_delayed_ref_head *find_ref_head(
- struct btrfs_delayed_ref_root *dr, u64 bytenr,
- bool return_bigger)
-{
- struct rb_root *root = &dr->href_root.rb_root;
- struct rb_node *n;
- struct btrfs_delayed_ref_head *entry;
-
- n = root->rb_node;
- entry = NULL;
- while (n) {
- entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
-
- if (bytenr < entry->bytenr)
- n = n->rb_left;
- else if (bytenr > entry->bytenr)
- n = n->rb_right;
- else
- return entry;
- }
- if (entry && return_bigger) {
- if (bytenr > entry->bytenr) {
- n = rb_next(&entry->href_node);
- if (!n)
- return NULL;
- entry = rb_entry(n, struct btrfs_delayed_ref_head,
- href_node);
- }
- return entry;
- }
- return NULL;
+ return xa_find(&dr->head_refs, &from, ULONG_MAX, XA_PRESENT);
}
-int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_delayed_ref_head *head)
+static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
{
lockdep_assert_held(&delayed_refs->lock);
if (mutex_trylock(&head->mutex))
- return 0;
+ return true;
refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
mutex_lock(&head->mutex);
spin_lock(&delayed_refs->lock);
- if (RB_EMPTY_NODE(&head->href_node)) {
+ if (!head->tracked) {
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
- return -EAGAIN;
+ return false;
}
btrfs_put_delayed_ref_head(head);
- return 0;
+ return true;
}
-static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
+static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_ref_node *ref)
@@ -425,12 +377,11 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
RB_CLEAR_NODE(&ref->ref_node);
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
- ref->in_tree = 0;
btrfs_put_delayed_ref(ref);
- atomic_dec(&delayed_refs->num_entries);
+ btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
}
-static bool merge_ref(struct btrfs_trans_handle *trans,
+static bool merge_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_ref_node *ref,
@@ -460,10 +411,10 @@ static bool merge_ref(struct btrfs_trans_handle *trans,
mod = -next->ref_mod;
}
- drop_delayed_ref(trans, delayed_refs, head, next);
+ drop_delayed_ref(fs_info, delayed_refs, head, next);
ref->ref_mod += mod;
if (ref->ref_mod == 0) {
- drop_delayed_ref(trans, delayed_refs, head, ref);
+ drop_delayed_ref(fs_info, delayed_refs, head, ref);
done = true;
} else {
/*
@@ -477,11 +428,10 @@ static bool merge_ref(struct btrfs_trans_handle *trans,
return done;
}
-void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_node *ref;
struct rb_node *node;
u64 seq = 0;
@@ -502,7 +452,7 @@ again:
ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
if (seq && ref->seq >= seq)
continue;
- if (merge_ref(trans, delayed_refs, head, ref, seq))
+ if (merge_ref(fs_info, delayed_refs, head, ref, seq))
goto again;
}
}
@@ -523,78 +473,131 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
}
struct btrfs_delayed_ref_head *btrfs_select_ref_head(
+ const struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs)
{
struct btrfs_delayed_ref_head *head;
+ unsigned long start_index;
+ unsigned long found_index;
+ bool found_head = false;
+ bool locked;
+ spin_lock(&delayed_refs->lock);
again:
- head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
- true);
- if (!head && delayed_refs->run_delayed_start != 0) {
- delayed_refs->run_delayed_start = 0;
- head = find_first_ref_head(delayed_refs);
+ start_index = (delayed_refs->run_delayed_start >> fs_info->sectorsize_bits);
+ xa_for_each_start(&delayed_refs->head_refs, found_index, head, start_index) {
+ if (!head->processing) {
+ found_head = true;
+ break;
+ }
}
- if (!head)
- return NULL;
-
- while (head->processing) {
- struct rb_node *node;
-
- node = rb_next(&head->href_node);
- if (!node) {
- if (delayed_refs->run_delayed_start == 0)
- return NULL;
- delayed_refs->run_delayed_start = 0;
- goto again;
+ if (!found_head) {
+ if (delayed_refs->run_delayed_start == 0) {
+ spin_unlock(&delayed_refs->lock);
+ return NULL;
}
- head = rb_entry(node, struct btrfs_delayed_ref_head,
- href_node);
+ delayed_refs->run_delayed_start = 0;
+ goto again;
}
- head->processing = 1;
+ head->processing = true;
WARN_ON(delayed_refs->num_heads_ready == 0);
delayed_refs->num_heads_ready--;
delayed_refs->run_delayed_start = head->bytenr +
head->num_bytes;
+
+ locked = btrfs_delayed_ref_lock(delayed_refs, head);
+ spin_unlock(&delayed_refs->lock);
+
+ /*
+ * We may have dropped the spin lock to get the head mutex lock, and
+ * that might have given someone else time to free the head. If that's
+ * true, it has been removed from our list and we can move on.
+ */
+ if (!locked)
+ return ERR_PTR(-EAGAIN);
+
return head;
}
-void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
+{
+ spin_lock(&delayed_refs->lock);
+ head->processing = false;
+ delayed_refs->num_heads_ready++;
+ spin_unlock(&delayed_refs->lock);
+ btrfs_delayed_ref_unlock(head);
+}
+
+void btrfs_delete_ref_head(const struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head)
{
+ const unsigned long index = (head->bytenr >> fs_info->sectorsize_bits);
+
lockdep_assert_held(&delayed_refs->lock);
lockdep_assert_held(&head->lock);
- rb_erase_cached(&head->href_node, &delayed_refs->href_root);
- RB_CLEAR_NODE(&head->href_node);
- atomic_dec(&delayed_refs->num_entries);
+ xa_erase(&delayed_refs->head_refs, index);
+ head->tracked = false;
delayed_refs->num_heads--;
- if (head->processing == 0)
+ if (!head->processing)
delayed_refs->num_heads_ready--;
}
+struct btrfs_delayed_ref_node *btrfs_select_delayed_ref(struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_delayed_ref_node *ref;
+
+ lockdep_assert_held(&head->mutex);
+ lockdep_assert_held(&head->lock);
+
+ if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
+ return NULL;
+
+ /*
+ * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
+ * This is to prevent a ref count from going down to zero, which deletes
+ * the extent item from the extent tree, when there still are references
+ * to add, which would fail because they would not find the extent item.
+ */
+ if (!list_empty(&head->ref_add_list))
+ return list_first_entry(&head->ref_add_list,
+ struct btrfs_delayed_ref_node, add_list);
+
+ ref = rb_entry(rb_first_cached(&head->ref_tree),
+ struct btrfs_delayed_ref_node, ref_node);
+ ASSERT(list_empty(&ref->add_list));
+ return ref;
+}
+
/*
* Helper to insert the ref_node to the tail or merge with tail.
*
- * Return 0 for insert.
- * Return >0 for merge.
+ * Return false if the ref was inserted.
+ * Return true if the ref was merged into an existing one (and therefore can be
+ * freed by the caller).
*/
-static int insert_delayed_ref(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_root *root,
- struct btrfs_delayed_ref_head *href,
- struct btrfs_delayed_ref_node *ref)
+static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *href,
+ struct btrfs_delayed_ref_node *ref)
{
+ struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs;
struct btrfs_delayed_ref_node *exist;
int mod;
- int ret = 0;
spin_lock(&href->lock);
exist = tree_insert(&href->ref_tree, ref);
- if (!exist)
- goto inserted;
+ if (!exist) {
+ if (ref->action == BTRFS_ADD_DELAYED_REF)
+ list_add_tail(&ref->add_list, &href->ref_add_list);
+ spin_unlock(&href->lock);
+ trans->delayed_ref_updates++;
+ return false;
+ }
/* Now we are sure we can merge */
- ret = 1;
if (exist->action == ref->action) {
mod = ref->ref_mod;
} else {
@@ -608,7 +611,7 @@ static int insert_delayed_ref(struct btrfs_trans_handle *trans,
&href->ref_add_list);
else if (ref->action == BTRFS_DROP_DELAYED_REF) {
ASSERT(!list_empty(&exist->add_list));
- list_del(&exist->add_list);
+ list_del_init(&exist->add_list);
} else {
ASSERT(0);
}
@@ -619,15 +622,9 @@ static int insert_delayed_ref(struct btrfs_trans_handle *trans,
/* remove existing tail if its ref_mod is zero */
if (exist->ref_mod == 0)
- drop_delayed_ref(trans, root, href, exist);
- spin_unlock(&href->lock);
- return ret;
-inserted:
- if (ref->action == BTRFS_ADD_DELAYED_REF)
- list_add_tail(&ref->add_list, &href->ref_add_list);
- atomic_inc(&root->num_entries);
+ drop_delayed_ref(trans->fs_info, root, href, exist);
spin_unlock(&href->lock);
- return ret;
+ return true;
}
/*
@@ -646,6 +643,15 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
BUG_ON(existing->is_data != update->is_data);
spin_lock(&existing->lock);
+
+ /*
+ * When freeing an extent, we may not know the owning root when we
+ * first create the head_ref. However, some deref before the last deref
+ * will know it, so we just need to update the head_ref accordingly.
+ */
+ if (!existing->owning_root)
+ existing->owning_root = update->owning_root;
+
if (update->must_insert_reserved) {
/* if the extent was freed and then
* reallocated before the delayed ref
@@ -655,6 +661,7 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
* Set it again here
*/
existing->must_insert_reserved = update->must_insert_reserved;
+ existing->owning_root = update->owning_root;
/*
* update the num_bytes so we make sure the accounting
@@ -694,6 +701,8 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
/*
* If we are going to from a positive ref mod to a negative or vice
* versa we need to make sure to adjust pending_csums accordingly.
+ * We reserve bytes for csum deletion when adding or updating a ref head
+ * see add_delayed_ref_head() for more details.
*/
if (existing->is_data) {
u64 csum_leaves =
@@ -702,11 +711,11 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
delayed_refs->pending_csums -= existing->num_bytes;
- btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
+ btrfs_delayed_refs_rsv_release(fs_info, 0, csum_leaves);
}
if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
delayed_refs->pending_csums += existing->num_bytes;
- trans->delayed_ref_updates += csum_leaves;
+ trans->delayed_ref_csum_deletions += csum_leaves;
}
}
@@ -714,97 +723,150 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
}
static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_ref *generic_ref,
struct btrfs_qgroup_extent_record *qrecord,
- u64 bytenr, u64 num_bytes, u64 ref_root,
- u64 reserved, int action, bool is_data,
- bool is_system)
+ u64 reserved)
{
int count_mod = 1;
- int must_insert_reserved = 0;
+ bool must_insert_reserved = false;
/* If reserved is provided, it must be a data extent. */
- BUG_ON(!is_data && reserved);
+ BUG_ON(generic_ref->type != BTRFS_REF_DATA && reserved);
- /*
- * The head node stores the sum of all the mods, so dropping a ref
- * should drop the sum in the head node by one.
- */
- if (action == BTRFS_UPDATE_DELAYED_HEAD)
+ switch (generic_ref->action) {
+ case BTRFS_ADD_DELAYED_REF:
+ /* count_mod is already set to 1. */
+ break;
+ case BTRFS_UPDATE_DELAYED_HEAD:
count_mod = 0;
- else if (action == BTRFS_DROP_DELAYED_REF)
+ break;
+ case BTRFS_DROP_DELAYED_REF:
+ /*
+ * The head node stores the sum of all the mods, so dropping a ref
+ * should drop the sum in the head node by one.
+ */
count_mod = -1;
-
- /*
- * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
- * accounting when the extent is finally added, or if a later
- * modification deletes the delayed ref without ever inserting the
- * extent into the extent allocation tree. ref->must_insert_reserved
- * is the flag used to record that accounting mods are required.
- *
- * Once we record must_insert_reserved, switch the action to
- * BTRFS_ADD_DELAYED_REF because other special casing is not required.
- */
- if (action == BTRFS_ADD_DELAYED_EXTENT)
- must_insert_reserved = 1;
- else
- must_insert_reserved = 0;
+ break;
+ case BTRFS_ADD_DELAYED_EXTENT:
+ /*
+ * BTRFS_ADD_DELAYED_EXTENT means that we need to update the
+ * reserved accounting when the extent is finally added, or if a
+ * later modification deletes the delayed ref without ever
+ * inserting the extent into the extent allocation tree.
+ * ref->must_insert_reserved is the flag used to record that
+ * accounting mods are required.
+ *
+ * Once we record must_insert_reserved, switch the action to
+ * BTRFS_ADD_DELAYED_REF because other special casing is not
+ * required.
+ */
+ must_insert_reserved = true;
+ break;
+ }
refcount_set(&head_ref->refs, 1);
- head_ref->bytenr = bytenr;
- head_ref->num_bytes = num_bytes;
+ head_ref->bytenr = generic_ref->bytenr;
+ head_ref->num_bytes = generic_ref->num_bytes;
head_ref->ref_mod = count_mod;
+ head_ref->reserved_bytes = reserved;
head_ref->must_insert_reserved = must_insert_reserved;
- head_ref->is_data = is_data;
- head_ref->is_system = is_system;
+ head_ref->owning_root = generic_ref->owning_root;
+ head_ref->is_data = (generic_ref->type == BTRFS_REF_DATA);
+ head_ref->is_system = (generic_ref->ref_root == BTRFS_CHUNK_TREE_OBJECTID);
head_ref->ref_tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(&head_ref->ref_add_list);
- RB_CLEAR_NODE(&head_ref->href_node);
- head_ref->processing = 0;
+ head_ref->tracked = false;
+ head_ref->processing = false;
head_ref->total_ref_mod = count_mod;
spin_lock_init(&head_ref->lock);
mutex_init(&head_ref->mutex);
+ /* If not metadata set an impossible level to help debugging. */
+ if (generic_ref->type == BTRFS_REF_METADATA)
+ head_ref->level = generic_ref->tree_ref.level;
+ else
+ head_ref->level = U8_MAX;
+
if (qrecord) {
- if (ref_root && reserved) {
+ if (generic_ref->ref_root && reserved) {
qrecord->data_rsv = reserved;
- qrecord->data_rsv_refroot = ref_root;
+ qrecord->data_rsv_refroot = generic_ref->ref_root;
}
- qrecord->bytenr = bytenr;
- qrecord->num_bytes = num_bytes;
+ qrecord->num_bytes = generic_ref->num_bytes;
qrecord->old_roots = NULL;
}
}
/*
- * helper function to actually insert a head node into the rbtree.
- * this does all the dirty work in terms of maintaining the correct
- * overall modification count.
+ * Helper function to actually insert a head node into the xarray. This does all
+ * the dirty work in terms of maintaining the correct overall modification
+ * count.
+ *
+ * The caller is responsible for calling kfree() on @qrecord. More specifically,
+ * if this function reports that it did not insert it as noted in
+ * @qrecord_inserted_ret, then it's safe to call kfree() on it.
+ *
+ * Returns an error pointer in case of an error.
*/
static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head_ref,
struct btrfs_qgroup_extent_record *qrecord,
- int action, int *qrecord_inserted_ret)
+ int action, bool *qrecord_inserted_ret)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_root *delayed_refs;
- int qrecord_inserted = 0;
+ const unsigned long index = (head_ref->bytenr >> fs_info->sectorsize_bits);
+
+ /*
+ * If 'qrecord_inserted_ret' is provided, then the first thing we need
+ * to do is to initialize it to false just in case we have an exit
+ * before trying to insert the record.
+ */
+ if (qrecord_inserted_ret)
+ *qrecord_inserted_ret = false;
delayed_refs = &trans->transaction->delayed_refs;
+ lockdep_assert_held(&delayed_refs->lock);
+
+#if BITS_PER_LONG == 32
+ if (head_ref->bytenr >= MAX_LFS_FILESIZE) {
+ if (qrecord)
+ xa_release(&delayed_refs->dirty_extents, index);
+ btrfs_err_rl(fs_info,
+"delayed ref head %llu is beyond 32bit page cache and xarray index limit",
+ head_ref->bytenr);
+ btrfs_err_32bit_limit(fs_info);
+ return ERR_PTR(-EOVERFLOW);
+ }
+#endif
/* Record qgroup extent info if provided */
if (qrecord) {
- if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
- delayed_refs, qrecord))
- kfree(qrecord);
- else
- qrecord_inserted = 1;
+ /*
+ * Setting 'qrecord' but not 'qrecord_inserted_ret' will likely
+ * result in a memory leakage.
+ */
+ ASSERT(qrecord_inserted_ret != NULL);
+
+ int ret;
+
+ ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord,
+ head_ref->bytenr);
+ if (ret) {
+ /* Clean up if insertion fails or item exists. */
+ xa_release(&delayed_refs->dirty_extents, index);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ } else if (qrecord_inserted_ret) {
+ *qrecord_inserted_ret = true;
+ }
}
- trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
+ trace_add_delayed_ref_head(fs_info, head_ref, action);
- existing = htree_insert(&delayed_refs->href_root,
- &head_ref->href_node);
+ existing = xa_load(&delayed_refs->head_refs, index);
if (existing) {
update_existing_head_ref(trans, existing, head_ref);
/*
@@ -814,26 +876,39 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
head_ref = existing;
} else {
+ existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC);
+ if (xa_is_err(existing)) {
+ /* Memory was preallocated by the caller. */
+ ASSERT(xa_err(existing) != -ENOMEM);
+ return ERR_PTR(xa_err(existing));
+ } else if (WARN_ON(existing)) {
+ /*
+ * Shouldn't happen we just did a lookup before under
+ * delayed_refs->lock.
+ */
+ return ERR_PTR(-EEXIST);
+ }
+ head_ref->tracked = true;
+ /*
+ * We reserve the amount of bytes needed to delete csums when
+ * adding the ref head and not when adding individual drop refs
+ * since the csum items are deleted only after running the last
+ * delayed drop ref (the data extent's ref count drops to 0).
+ */
if (head_ref->is_data && head_ref->ref_mod < 0) {
delayed_refs->pending_csums += head_ref->num_bytes;
- trans->delayed_ref_updates +=
- btrfs_csum_bytes_to_leaves(trans->fs_info,
- head_ref->num_bytes);
+ trans->delayed_ref_csum_deletions +=
+ btrfs_csum_bytes_to_leaves(fs_info, head_ref->num_bytes);
}
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
- atomic_inc(&delayed_refs->num_entries);
- trans->delayed_ref_updates++;
}
- if (qrecord_inserted_ret)
- *qrecord_inserted_ret = qrecord_inserted;
return head_ref;
}
/*
- * init_delayed_ref_common - Initialize the structure which represents a
- * modification to a an extent.
+ * Initialize the structure which represents a modification to an extent.
*
* @fs_info: Internal to the mounted filesystem mount structure.
*
@@ -858,106 +933,148 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
*/
static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *ref,
- u64 bytenr, u64 num_bytes, u64 ref_root,
- int action, u8 ref_type)
+ struct btrfs_ref *generic_ref)
{
+ int action = generic_ref->action;
u64 seq = 0;
if (action == BTRFS_ADD_DELAYED_EXTENT)
action = BTRFS_ADD_DELAYED_REF;
- if (is_fstree(ref_root))
+ if (btrfs_is_fstree(generic_ref->ref_root))
seq = atomic64_read(&fs_info->tree_mod_seq);
refcount_set(&ref->refs, 1);
- ref->bytenr = bytenr;
- ref->num_bytes = num_bytes;
+ ref->bytenr = generic_ref->bytenr;
+ ref->num_bytes = generic_ref->num_bytes;
ref->ref_mod = 1;
ref->action = action;
- ref->is_head = 0;
- ref->in_tree = 1;
ref->seq = seq;
- ref->type = ref_type;
+ ref->type = btrfs_ref_type(generic_ref);
+ ref->ref_root = generic_ref->ref_root;
+ ref->parent = generic_ref->parent;
RB_CLEAR_NODE(&ref->ref_node);
INIT_LIST_HEAD(&ref->add_list);
+
+ if (generic_ref->type == BTRFS_REF_DATA)
+ ref->data_ref = generic_ref->data_ref;
+ else
+ ref->tree_ref = generic_ref->tree_ref;
}
-/*
- * add a delayed tree ref. This does all of the accounting required
- * to make sure the delayed ref is eventually processed before this
- * transaction commits.
- */
-int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
- struct btrfs_ref *generic_ref,
- struct btrfs_delayed_extent_op *extent_op)
+void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
+ bool skip_qgroup)
+{
+#ifdef CONFIG_BTRFS_DEBUG
+ /* If @real_root not set, use @root as fallback */
+ generic_ref->real_root = mod_root ?: generic_ref->ref_root;
+#endif
+ generic_ref->tree_ref.level = level;
+ generic_ref->type = BTRFS_REF_METADATA;
+ if (skip_qgroup || !(btrfs_is_fstree(generic_ref->ref_root) &&
+ (!mod_root || btrfs_is_fstree(mod_root))))
+ generic_ref->skip_qgroup = true;
+ else
+ generic_ref->skip_qgroup = false;
+
+}
+
+void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
+ u64 mod_root, bool skip_qgroup)
+{
+#ifdef CONFIG_BTRFS_DEBUG
+ /* If @real_root not set, use @root as fallback */
+ generic_ref->real_root = mod_root ?: generic_ref->ref_root;
+#endif
+ generic_ref->data_ref.objectid = ino;
+ generic_ref->data_ref.offset = offset;
+ generic_ref->type = BTRFS_REF_DATA;
+ if (skip_qgroup || !(btrfs_is_fstree(generic_ref->ref_root) &&
+ (!mod_root || btrfs_is_fstree(mod_root))))
+ generic_ref->skip_qgroup = true;
+ else
+ generic_ref->skip_qgroup = false;
+}
+
+static int add_delayed_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_ref *generic_ref,
+ struct btrfs_delayed_extent_op *extent_op,
+ u64 reserved)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_delayed_tree_ref *ref;
+ struct btrfs_delayed_ref_node *node;
struct btrfs_delayed_ref_head *head_ref;
+ struct btrfs_delayed_ref_head *new_head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
- int qrecord_inserted;
- bool is_system;
+ const unsigned long index = (generic_ref->bytenr >> fs_info->sectorsize_bits);
+ bool qrecord_reserved = false;
+ bool qrecord_inserted;
int action = generic_ref->action;
- int level = generic_ref->tree_ref.level;
+ bool merged;
int ret;
- u64 bytenr = generic_ref->bytenr;
- u64 num_bytes = generic_ref->len;
- u64 parent = generic_ref->parent;
- u8 ref_type;
- is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
-
- ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
- BUG_ON(extent_op && extent_op->is_data);
- ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
- if (!ref)
+ node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
+ if (!node)
return -ENOMEM;
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) {
- kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_node;
}
- if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
- !generic_ref->skip_qgroup) {
+ delayed_refs = &trans->transaction->delayed_refs;
+
+ if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record) {
- kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
- kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_head_ref;
}
+ if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
+ ret = -ENOMEM;
+ goto free_record;
+ }
+ qrecord_reserved = true;
}
- if (parent)
- ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
- else
- ref_type = BTRFS_TREE_BLOCK_REF_KEY;
-
- init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
- generic_ref->tree_ref.owning_root, action,
- ref_type);
- ref->root = generic_ref->tree_ref.owning_root;
- ref->parent = parent;
- ref->level = level;
-
- init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
- generic_ref->tree_ref.owning_root, 0, action,
- false, is_system);
+ ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
+ if (ret) {
+ if (qrecord_reserved)
+ xa_release(&delayed_refs->dirty_extents, index);
+ goto free_record;
+ }
+
+ init_delayed_ref_common(fs_info, node, generic_ref);
+ init_delayed_ref_head(head_ref, generic_ref, record, reserved);
head_ref->extent_op = extent_op;
- delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
/*
* insert both the head node and the new ref without dropping
* the spin lock
*/
- head_ref = add_delayed_ref_head(trans, head_ref, record,
- action, &qrecord_inserted);
+ new_head_ref = add_delayed_ref_head(trans, head_ref, record,
+ action, &qrecord_inserted);
+ if (IS_ERR(new_head_ref)) {
+ xa_release(&delayed_refs->head_refs, index);
+ spin_unlock(&delayed_refs->lock);
+ ret = PTR_ERR(new_head_ref);
- ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ /*
+ * It's only safe to call kfree() on 'qrecord' if
+ * add_delayed_ref_head() has _not_ inserted it for
+ * tracing. Otherwise we need to handle this here.
+ */
+ if (!qrecord_reserved || qrecord_inserted)
+ goto free_head_ref;
+ goto free_record;
+ }
+ head_ref = new_head_ref;
+
+ merged = insert_delayed_ref(trans, head_ref, node);
spin_unlock(&delayed_refs->lock);
/*
@@ -966,16 +1083,38 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
*/
btrfs_update_delayed_refs_rsv(trans);
- trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
- action == BTRFS_ADD_DELAYED_EXTENT ?
- BTRFS_ADD_DELAYED_REF : action);
- if (ret > 0)
- kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+ if (generic_ref->type == BTRFS_REF_DATA)
+ trace_add_delayed_data_ref(trans->fs_info, node);
+ else
+ trace_add_delayed_tree_ref(trans->fs_info, node);
+ if (merged)
+ kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
if (qrecord_inserted)
- btrfs_qgroup_trace_extent_post(trans, record);
+ return btrfs_qgroup_trace_extent_post(trans, record, generic_ref->bytenr);
+ kfree(record);
return 0;
+
+free_record:
+ kfree(record);
+free_head_ref:
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+free_node:
+ kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
+ return ret;
+}
+
+/*
+ * Add a delayed tree ref. This does all of the accounting required to make sure
+ * the delayed ref is eventually processed before this transaction commits.
+ */
+int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_ref *generic_ref,
+ struct btrfs_delayed_extent_op *extent_op)
+{
+ ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
+ return add_delayed_ref(trans, generic_ref, extent_op, 0);
}
/*
@@ -985,113 +1124,51 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref,
u64 reserved)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_delayed_data_ref *ref;
- struct btrfs_delayed_ref_head *head_ref;
- struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_qgroup_extent_record *record = NULL;
- int qrecord_inserted;
- int action = generic_ref->action;
- int ret;
- u64 bytenr = generic_ref->bytenr;
- u64 num_bytes = generic_ref->len;
- u64 parent = generic_ref->parent;
- u64 ref_root = generic_ref->data_ref.owning_root;
- u64 owner = generic_ref->data_ref.ino;
- u64 offset = generic_ref->data_ref.offset;
- u8 ref_type;
-
- ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
- ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
- if (!ref)
- return -ENOMEM;
-
- if (parent)
- ref_type = BTRFS_SHARED_DATA_REF_KEY;
- else
- ref_type = BTRFS_EXTENT_DATA_REF_KEY;
- init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
- ref_root, action, ref_type);
- ref->root = ref_root;
- ref->parent = parent;
- ref->objectid = owner;
- ref->offset = offset;
-
-
- head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
- if (!head_ref) {
- kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
- return -ENOMEM;
- }
-
- if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
- !generic_ref->skip_qgroup) {
- record = kzalloc(sizeof(*record), GFP_NOFS);
- if (!record) {
- kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
- kmem_cache_free(btrfs_delayed_ref_head_cachep,
- head_ref);
- return -ENOMEM;
- }
- }
-
- init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
- reserved, action, true, false);
- head_ref->extent_op = NULL;
-
- delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
-
- /*
- * insert both the head node and the new ref without dropping
- * the spin lock
- */
- head_ref = add_delayed_ref_head(trans, head_ref, record,
- action, &qrecord_inserted);
-
- ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
- spin_unlock(&delayed_refs->lock);
-
- /*
- * Need to update the delayed_refs_rsv with any changes we may have
- * made.
- */
- btrfs_update_delayed_refs_rsv(trans);
-
- trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
- action == BTRFS_ADD_DELAYED_EXTENT ?
- BTRFS_ADD_DELAYED_REF : action);
- if (ret > 0)
- kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
-
-
- if (qrecord_inserted)
- return btrfs_qgroup_trace_extent_post(trans, record);
- return 0;
+ ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action);
+ return add_delayed_ref(trans, generic_ref, NULL, reserved);
}
int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes,
+ u64 bytenr, u64 num_bytes, u8 level,
struct btrfs_delayed_extent_op *extent_op)
{
+ const unsigned long index = (bytenr >> trans->fs_info->sectorsize_bits);
struct btrfs_delayed_ref_head *head_ref;
+ struct btrfs_delayed_ref_head *head_ref_ret;
struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_ref generic_ref = {
+ .type = BTRFS_REF_METADATA,
+ .action = BTRFS_UPDATE_DELAYED_HEAD,
+ .bytenr = bytenr,
+ .num_bytes = num_bytes,
+ .tree_ref.level = level,
+ };
+ int ret;
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref)
return -ENOMEM;
- init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
- BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
- false);
+ init_delayed_ref_head(head_ref, &generic_ref, NULL, 0);
head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
- add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
- NULL);
+ ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
+ if (ret) {
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+ return ret;
+ }
+ spin_lock(&delayed_refs->lock);
+ head_ref_ret = add_delayed_ref_head(trans, head_ref, NULL,
+ BTRFS_UPDATE_DELAYED_HEAD, NULL);
+ if (IS_ERR(head_ref_ret)) {
+ xa_release(&delayed_refs->head_refs, index);
+ spin_unlock(&delayed_refs->lock);
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+ return PTR_ERR(head_ref_ret);
+ }
spin_unlock(&delayed_refs->lock);
/*
@@ -1102,53 +1179,192 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
return 0;
}
+void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
+{
+ if (refcount_dec_and_test(&ref->refs)) {
+ WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
+ kmem_cache_free(btrfs_delayed_ref_node_cachep, ref);
+ }
+}
+
/*
* This does a simple search for the head node for a given extent. Returns the
* head node if found, or NULL if not.
*/
struct btrfs_delayed_ref_head *
-btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
+btrfs_find_delayed_ref_head(const struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ u64 bytenr)
{
+ const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
+
lockdep_assert_held(&delayed_refs->lock);
- return find_ref_head(delayed_refs, bytenr, false);
+ return xa_load(&delayed_refs->head_refs, index);
+}
+
+static int find_comp(struct btrfs_delayed_ref_node *entry, u64 root, u64 parent)
+{
+ int type = parent ? BTRFS_SHARED_BLOCK_REF_KEY : BTRFS_TREE_BLOCK_REF_KEY;
+
+ if (type < entry->type)
+ return -1;
+ if (type > entry->type)
+ return 1;
+
+ if (type == BTRFS_TREE_BLOCK_REF_KEY) {
+ if (root < entry->ref_root)
+ return -1;
+ if (root > entry->ref_root)
+ return 1;
+ } else {
+ if (parent < entry->parent)
+ return -1;
+ if (parent > entry->parent)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Check to see if a given root/parent reference is attached to the head. This
+ * only checks for BTRFS_ADD_DELAYED_REF references that match, as that
+ * indicates the reference exists for the given root or parent. This is for
+ * tree blocks only.
+ *
+ * @head: the head of the bytenr we're searching.
+ * @root: the root objectid of the reference if it is a normal reference.
+ * @parent: the parent if this is a shared backref.
+ */
+bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
+ u64 root, u64 parent)
+{
+ struct rb_node *node;
+ bool found = false;
+
+ lockdep_assert_held(&head->mutex);
+
+ spin_lock(&head->lock);
+ node = head->ref_tree.rb_root.rb_node;
+ while (node) {
+ struct btrfs_delayed_ref_node *entry;
+ int ret;
+
+ entry = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
+ ret = find_comp(entry, root, parent);
+ if (ret < 0) {
+ node = node->rb_left;
+ } else if (ret > 0) {
+ node = node->rb_right;
+ } else {
+ /*
+ * We only want to count ADD actions, as drops mean the
+ * ref doesn't exist.
+ */
+ if (entry->action == BTRFS_ADD_DELAYED_REF)
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&head->lock);
+ return found;
+}
+
+void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
+{
+ struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+
+ spin_lock(&delayed_refs->lock);
+ while (true) {
+ struct btrfs_delayed_ref_head *head;
+ struct rb_node *n;
+ bool pin_bytes = false;
+
+ head = find_first_ref_head(delayed_refs);
+ if (!head)
+ break;
+
+ if (!btrfs_delayed_ref_lock(delayed_refs, head))
+ continue;
+
+ spin_lock(&head->lock);
+ while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
+ struct btrfs_delayed_ref_node *ref;
+
+ ref = rb_entry(n, struct btrfs_delayed_ref_node, ref_node);
+ drop_delayed_ref(fs_info, delayed_refs, head, ref);
+ }
+ if (head->must_insert_reserved)
+ pin_bytes = true;
+ btrfs_free_delayed_extent_op(head->extent_op);
+ btrfs_delete_ref_head(fs_info, delayed_refs, head);
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+ mutex_unlock(&head->mutex);
+
+ if (!btrfs_is_testing(fs_info) && pin_bytes) {
+ struct btrfs_block_group *bg;
+
+ bg = btrfs_lookup_block_group(fs_info, head->bytenr);
+ if (WARN_ON_ONCE(bg == NULL)) {
+ /*
+ * Unexpected and there's nothing we can do here
+ * because we are in a transaction abort path,
+ * so any errors can only be ignored or reported
+ * while attempting to cleanup all resources.
+ */
+ btrfs_err(fs_info,
+"block group for delayed ref at %llu was not found while destroying ref head",
+ head->bytenr);
+ } else {
+ spin_lock(&bg->space_info->lock);
+ spin_lock(&bg->lock);
+ bg->pinned += head->num_bytes;
+ btrfs_space_info_update_bytes_pinned(bg->space_info,
+ head->num_bytes);
+ bg->reserved -= head->num_bytes;
+ bg->space_info->bytes_reserved -= head->num_bytes;
+ spin_unlock(&bg->lock);
+ spin_unlock(&bg->space_info->lock);
+
+ btrfs_put_block_group(bg);
+ }
+
+ btrfs_error_unpin_extent_range(fs_info, head->bytenr,
+ head->bytenr + head->num_bytes - 1);
+ }
+ if (!btrfs_is_testing(fs_info))
+ btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
+ btrfs_put_delayed_ref_head(head);
+ cond_resched();
+ spin_lock(&delayed_refs->lock);
+ }
+
+ if (!btrfs_is_testing(fs_info))
+ btrfs_qgroup_destroy_extent_records(trans);
+
+ spin_unlock(&delayed_refs->lock);
}
void __cold btrfs_delayed_ref_exit(void)
{
kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
- kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
- kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
+ kmem_cache_destroy(btrfs_delayed_ref_node_cachep);
kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
}
int __init btrfs_delayed_ref_init(void)
{
- btrfs_delayed_ref_head_cachep = kmem_cache_create(
- "btrfs_delayed_ref_head",
- sizeof(struct btrfs_delayed_ref_head), 0,
- SLAB_MEM_SPREAD, NULL);
+ btrfs_delayed_ref_head_cachep = KMEM_CACHE(btrfs_delayed_ref_head, 0);
if (!btrfs_delayed_ref_head_cachep)
- goto fail;
-
- btrfs_delayed_tree_ref_cachep = kmem_cache_create(
- "btrfs_delayed_tree_ref",
- sizeof(struct btrfs_delayed_tree_ref), 0,
- SLAB_MEM_SPREAD, NULL);
- if (!btrfs_delayed_tree_ref_cachep)
- goto fail;
+ return -ENOMEM;
- btrfs_delayed_data_ref_cachep = kmem_cache_create(
- "btrfs_delayed_data_ref",
- sizeof(struct btrfs_delayed_data_ref), 0,
- SLAB_MEM_SPREAD, NULL);
- if (!btrfs_delayed_data_ref_cachep)
+ btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
+ if (!btrfs_delayed_ref_node_cachep)
goto fail;
- btrfs_delayed_extent_op_cachep = kmem_cache_create(
- "btrfs_delayed_extent_op",
- sizeof(struct btrfs_delayed_extent_op), 0,
- SLAB_MEM_SPREAD, NULL);
+ btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
if (!btrfs_delayed_extent_op_cachep)
goto fail;
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 91a3aabad150..5ce940532144 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -6,20 +6,65 @@
#ifndef BTRFS_DELAYED_REF_H
#define BTRFS_DELAYED_REF_H
+#include <linux/types.h>
#include <linux/refcount.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <uapi/linux/btrfs_tree.h>
+#include "fs.h"
+#include "messages.h"
+
+struct btrfs_trans_handle;
+struct btrfs_fs_info;
/* these are the possible values of struct btrfs_delayed_ref_node->action */
-#define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
-#define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
-#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
-#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
+enum btrfs_delayed_ref_action {
+ /* Add one backref to the tree */
+ BTRFS_ADD_DELAYED_REF = 1,
+ /* Delete one backref from the tree */
+ BTRFS_DROP_DELAYED_REF,
+ /* Record a full extent allocation */
+ BTRFS_ADD_DELAYED_EXTENT,
+ /* Not changing ref count on head ref */
+ BTRFS_UPDATE_DELAYED_HEAD,
+} __packed;
+
+struct btrfs_data_ref {
+ /* For EXTENT_DATA_REF */
+
+ /* Inode which refers to this data extent */
+ u64 objectid;
+
+ /*
+ * file_offset - extent_offset
+ *
+ * file_offset is the key.offset of the EXTENT_DATA key.
+ * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
+ */
+ u64 offset;
+};
+
+struct btrfs_tree_ref {
+ /*
+ * Level of this tree block.
+ *
+ * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
+ */
+ int level;
+
+ /* For non-skinny metadata, no special member needed */
+};
struct btrfs_delayed_ref_node {
struct rb_node ref_node;
/*
* If action is BTRFS_ADD_DELAYED_REF, also link this node to
* ref_head->ref_add_list, then we do not need to iterate the
- * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
+ * refs rbtree in the corresponding delayed ref head
+ * (struct btrfs_delayed_ref_head::ref_tree).
*/
struct list_head add_list;
@@ -32,6 +77,15 @@ struct btrfs_delayed_ref_node {
/* seq number to keep track of insertion order */
u64 seq;
+ /* The ref_root for this ref */
+ u64 ref_root;
+
+ /*
+ * The parent for this ref, if this isn't set the ref_root is the
+ * reference owner.
+ */
+ u64 parent;
+
/* ref count on this data structure */
refcount_t refs;
@@ -48,17 +102,17 @@ struct btrfs_delayed_ref_node {
unsigned int action:8;
unsigned int type:8;
- /* is this node still in the rbtree? */
- unsigned int is_head:1;
- unsigned int in_tree:1;
+
+ union {
+ struct btrfs_tree_ref tree_ref;
+ struct btrfs_data_ref data_ref;
+ };
};
struct btrfs_delayed_extent_op {
struct btrfs_disk_key key;
- u8 level;
bool update_key;
bool update_flags;
- bool is_data;
u64 flags_to_set;
};
@@ -71,20 +125,20 @@ struct btrfs_delayed_extent_op {
struct btrfs_delayed_ref_head {
u64 bytenr;
u64 num_bytes;
- refcount_t refs;
/*
* the mutex is held while running the refs, and it is also
* held when checking the sum of reference modifications.
*/
struct mutex mutex;
+ refcount_t refs;
+
+ /* Protects 'ref_tree' and 'ref_add_list'. */
spinlock_t lock;
struct rb_root_cached ref_tree;
/* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
struct list_head ref_add_list;
- struct rb_node href_node;
-
struct btrfs_delayed_extent_op *extent_op;
/*
@@ -103,6 +157,21 @@ struct btrfs_delayed_ref_head {
int ref_mod;
/*
+ * The root that triggered the allocation when must_insert_reserved is
+ * set to true.
+ */
+ u64 owning_root;
+
+ /*
+ * Track reserved bytes when setting must_insert_reserved. On success
+ * or cleanup, we will need to free the reservation.
+ */
+ u64 reserved_bytes;
+
+ /* Tree block level, for metadata only. */
+ u8 level;
+
+ /*
* when a new extent is allocated, it is just reserved in memory
* The actual extent isn't inserted into the extent allocation tree
* until the delayed ref is processed. must_insert_reserved is
@@ -114,25 +183,16 @@ struct btrfs_delayed_ref_head {
* we need to update the in ram accounting to properly reflect
* the free has happened.
*/
- unsigned int must_insert_reserved:1;
- unsigned int is_data:1;
- unsigned int is_system:1;
- unsigned int processing:1;
-};
+ bool must_insert_reserved;
-struct btrfs_delayed_tree_ref {
- struct btrfs_delayed_ref_node node;
- u64 root;
- u64 parent;
- int level;
-};
-
-struct btrfs_delayed_data_ref {
- struct btrfs_delayed_ref_node node;
- u64 root;
- u64 parent;
- u64 objectid;
- u64 offset;
+ bool is_data;
+ bool is_system;
+ bool processing;
+ /*
+ * Indicate if it's currently in the data structure that tracks head
+ * refs (struct btrfs_delayed_ref_root::head_refs).
+ */
+ bool tracked;
};
enum btrfs_delayed_ref_flags {
@@ -141,30 +201,52 @@ enum btrfs_delayed_ref_flags {
};
struct btrfs_delayed_ref_root {
- /* head ref rbtree */
- struct rb_root_cached href_root;
-
- /* dirty extent records */
- struct rb_root dirty_extent_root;
+ /*
+ * Track head references.
+ * The keys correspond to the logical address of the extent ("bytenr")
+ * right shifted by fs_info->sectorsize_bits. This is both to get a more
+ * dense index space (optimizes xarray structure) and because indexes in
+ * xarrays are of "unsigned long" type, meaning they are 32 bits wide on
+ * 32 bits platforms, limiting the extent range to 4G which is too low
+ * and makes it unusable (truncated index values) on 32 bits platforms.
+ * Protected by the spinlock 'lock' defined below.
+ */
+ struct xarray head_refs;
- /* this spin lock protects the rbtree and the entries inside */
- spinlock_t lock;
+ /*
+ * Track dirty extent records.
+ * The keys correspond to the logical address of the extent ("bytenr")
+ * right shifted by fs_info->sectorsize_bits, for same reasons as above.
+ */
+ struct xarray dirty_extents;
- /* how many delayed ref updates we've queued, used by the
- * throttling code
+ /*
+ * Protects the xarray head_refs, its entries and the following fields:
+ * num_heads, num_heads_ready, pending_csums and run_delayed_start.
*/
- atomic_t num_entries;
+ spinlock_t lock;
- /* total number of head nodes in tree */
+ /* Total number of head refs, protected by the spinlock 'lock'. */
unsigned long num_heads;
- /* total number of head nodes ready for processing */
+ /*
+ * Total number of head refs ready for processing, protected by the
+ * spinlock 'lock'.
+ */
unsigned long num_heads_ready;
+ /*
+ * Track space reserved for deleting csums of data extents.
+ * Protected by the spinlock 'lock'.
+ */
u64 pending_csums;
unsigned long flags;
+ /*
+ * Track from which bytenr to start searching ref heads.
+ * Protected by the spinlock 'lock'.
+ */
u64 run_delayed_start;
/*
@@ -180,48 +262,11 @@ enum btrfs_ref_type {
BTRFS_REF_NOT_SET,
BTRFS_REF_DATA,
BTRFS_REF_METADATA,
- BTRFS_REF_LAST,
-};
-
-struct btrfs_data_ref {
- /* For EXTENT_DATA_REF */
-
- /* Original root this data extent belongs to */
- u64 owning_root;
-
- /* Inode which refers to this data extent */
- u64 ino;
-
- /*
- * file_offset - extent_offset
- *
- * file_offset is the key.offset of the EXTENT_DATA key.
- * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
- */
- u64 offset;
-};
-
-struct btrfs_tree_ref {
- /*
- * Level of this tree block
- *
- * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
- */
- int level;
-
- /*
- * Root which owns this tree block.
- *
- * For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
- */
- u64 owning_root;
-
- /* For non-skinny metadata, no special member needed */
-};
+} __packed;
struct btrfs_ref {
enum btrfs_ref_type type;
- int action;
+ enum btrfs_delayed_ref_action action;
/*
* Whether this extent should go through qgroup record.
@@ -231,12 +276,15 @@ struct btrfs_ref {
*/
bool skip_qgroup;
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
- /* Through which root is this modification. */
- u64 real_root;
-#endif
u64 bytenr;
- u64 len;
+ u64 num_bytes;
+ u64 owning_root;
+
+ /*
+ * The root that owns the reference for this reference, this will be set
+ * or ->parent will be set, depending on what type of reference this is.
+ */
+ u64 ref_root;
/* Bytenr of the parent tree block */
u64 parent;
@@ -244,62 +292,57 @@ struct btrfs_ref {
struct btrfs_data_ref data_ref;
struct btrfs_tree_ref tree_ref;
};
+
+#ifdef CONFIG_BTRFS_DEBUG
+ /* Through which root is this modification. */
+ u64 real_root;
+#endif
};
extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
-extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
-extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
+extern struct kmem_cache *btrfs_delayed_ref_node_cachep;
extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
int __init btrfs_delayed_ref_init(void);
void __cold btrfs_delayed_ref_exit(void);
-static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref,
- int action, u64 bytenr, u64 len, u64 parent)
+static inline u64 btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info *fs_info,
+ int num_delayed_refs)
{
- generic_ref->action = action;
- generic_ref->bytenr = bytenr;
- generic_ref->len = len;
- generic_ref->parent = parent;
-}
+ u64 num_bytes;
-static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref,
- int level, u64 root, u64 mod_root, bool skip_qgroup)
-{
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
- /* If @real_root not set, use @root as fallback */
- generic_ref->real_root = mod_root ?: root;
-#endif
- generic_ref->tree_ref.level = level;
- generic_ref->tree_ref.owning_root = root;
- generic_ref->type = BTRFS_REF_METADATA;
- if (skip_qgroup || !(is_fstree(root) &&
- (!mod_root || is_fstree(mod_root))))
- generic_ref->skip_qgroup = true;
- else
- generic_ref->skip_qgroup = false;
+ num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_delayed_refs);
+ /*
+ * We have to check the mount option here because we could be enabling
+ * the free space tree for the first time and don't have the compat_ro
+ * option set yet.
+ *
+ * We need extra reservations if we have the free space tree because
+ * we'll have to modify that tree as well.
+ */
+ if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
+ num_bytes *= 2;
+
+ return num_bytes;
}
-static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref,
- u64 ref_root, u64 ino, u64 offset, u64 mod_root,
- bool skip_qgroup)
+static inline u64 btrfs_calc_delayed_ref_csum_bytes(const struct btrfs_fs_info *fs_info,
+ int num_csum_items)
{
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
- /* If @real_root not set, use @root as fallback */
- generic_ref->real_root = mod_root ?: ref_root;
-#endif
- generic_ref->data_ref.owning_root = ref_root;
- generic_ref->data_ref.ino = ino;
- generic_ref->data_ref.offset = offset;
- generic_ref->type = BTRFS_REF_DATA;
- if (skip_qgroup || !(is_fstree(ref_root) &&
- (!mod_root || is_fstree(mod_root))))
- generic_ref->skip_qgroup = true;
- else
- generic_ref->skip_qgroup = false;
+ /*
+ * Deleting csum items does not result in new nodes/leaves and does not
+ * require changing the free space tree, only the csum tree, so this is
+ * all we need.
+ */
+ return btrfs_calc_metadata_size(fs_info, num_csum_items);
}
+void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
+ bool skip_qgroup);
+void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
+ u64 mod_root, bool skip_qgroup);
+
static inline struct btrfs_delayed_extent_op *
btrfs_alloc_delayed_extent_op(void)
{
@@ -313,25 +356,7 @@ btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
}
-static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
-{
- WARN_ON(refcount_read(&ref->refs) == 0);
- if (refcount_dec_and_test(&ref->refs)) {
- WARN_ON(ref->in_tree);
- switch (ref->type) {
- case BTRFS_TREE_BLOCK_REF_KEY:
- case BTRFS_SHARED_BLOCK_REF_KEY:
- kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
- break;
- case BTRFS_EXTENT_DATA_REF_KEY:
- case BTRFS_SHARED_DATA_REF_KEY:
- kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
- break;
- default:
- BUG();
- }
- }
-}
+void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref);
static inline u64 btrfs_ref_head_to_space_flags(
struct btrfs_delayed_ref_head *head_ref)
@@ -356,52 +381,79 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref,
u64 reserved);
int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes,
+ u64 bytenr, u64 num_bytes, u8 level,
struct btrfs_delayed_extent_op *extent_op);
-void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head);
struct btrfs_delayed_ref_head *
-btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+btrfs_find_delayed_ref_head(const struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
u64 bytenr);
-int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_delayed_ref_head *head);
static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
{
mutex_unlock(&head->mutex);
}
-void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+void btrfs_delete_ref_head(const struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head);
struct btrfs_delayed_ref_head *btrfs_select_ref_head(
+ const struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs);
+void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head);
+struct btrfs_delayed_ref_node *btrfs_select_delayed_ref(struct btrfs_delayed_ref_head *head);
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
-void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr);
+void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums);
void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
+void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info);
+void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info);
+void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
+void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush);
-void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *src,
- u64 num_bytes);
-int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
+bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
+ u64 root, u64 parent);
+void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans);
-/*
- * helper functions to cast a node into its container
- */
-static inline struct btrfs_delayed_tree_ref *
-btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
+static inline u64 btrfs_delayed_ref_owner(const struct btrfs_delayed_ref_node *node)
+{
+ if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
+ node->type == BTRFS_SHARED_DATA_REF_KEY)
+ return node->data_ref.objectid;
+ return node->tree_ref.level;
+}
+
+static inline u64 btrfs_delayed_ref_offset(const struct btrfs_delayed_ref_node *node)
{
- return container_of(node, struct btrfs_delayed_tree_ref, node);
+ if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
+ node->type == BTRFS_SHARED_DATA_REF_KEY)
+ return node->data_ref.offset;
+ return 0;
}
-static inline struct btrfs_delayed_data_ref *
-btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
+static inline u8 btrfs_ref_type(const struct btrfs_ref *ref)
{
- return container_of(node, struct btrfs_delayed_data_ref, node);
+ ASSERT(ref->type == BTRFS_REF_DATA || ref->type == BTRFS_REF_METADATA);
+
+ if (ref->type == BTRFS_REF_DATA) {
+ if (ref->parent)
+ return BTRFS_SHARED_DATA_REF_KEY;
+ else
+ return BTRFS_EXTENT_DATA_REF_KEY;
+ } else {
+ if (ref->parent)
+ return BTRFS_SHARED_BLOCK_REF_KEY;
+ else
+ return BTRFS_TREE_BLOCK_REF_KEY;
+ }
+
+ return 0;
}
#endif
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index c85a7d44da79..b6c7da8e1bc8 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -11,18 +11,17 @@
#include <linux/math64.h>
#include "misc.h"
#include "ctree.h"
-#include "extent_map.h"
#include "disk-io.h"
#include "transaction.h"
-#include "print-tree.h"
#include "volumes.h"
#include "async-thread.h"
-#include "check-integrity.h"
-#include "rcu-string.h"
#include "dev-replace.h"
#include "sysfs.h"
#include "zoned.h"
#include "block-group.h"
+#include "fs.h"
+#include "accessors.h"
+#include "scrub.h"
/*
* Device replace overview
@@ -39,14 +38,14 @@
* All new writes will be written to both target and source devices, so even
* if replace gets canceled, sources device still contains up-to-date data.
*
- * Location: handle_ops_on_dev_replace() from __btrfs_map_block()
+ * Location: handle_ops_on_dev_replace() from btrfs_map_block()
* Start: btrfs_dev_replace_start()
* End: btrfs_dev_replace_finishing()
* Content: Latest data/metadata
*
* - Copy existing extents
*
- * This happens by re-using scrub facility, as scrub also iterates through
+ * This happens by reusing scrub facility, as scrub also iterates through
* existing extents from commit root.
*
* Location: scrub_write_block_to_dev_replace() from
@@ -77,7 +76,7 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
struct extent_buffer *eb;
int slot;
int ret = 0;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
int item_size;
struct btrfs_dev_replace_item *ptr;
u64 src_devid;
@@ -86,10 +85,8 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
return 0;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
key.objectid = 0;
key.type = BTRFS_DEV_REPLACE_KEY;
@@ -101,13 +98,11 @@ no_valid_dev_replace_entry_found:
* We don't have a replace item or it's corrupted. If there is
* a replace target, fail the mount.
*/
- if (btrfs_find_device(fs_info->fs_devices, &args)) {
+ if (unlikely(btrfs_find_device(fs_info->fs_devices, &args))) {
btrfs_err(fs_info,
"found replace target device without a valid replace item");
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
- ret = 0;
dev_replace->replace_state =
BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
dev_replace->cont_reading_from_srcdev_mode =
@@ -124,11 +119,11 @@ no_valid_dev_replace_entry_found:
dev_replace->tgtdev = NULL;
dev_replace->is_valid = 0;
dev_replace->item_needs_writeback = 0;
- goto out;
+ return 0;
}
slot = path->slots[0];
eb = path->nodes[0];
- item_size = btrfs_item_size_nr(eb, slot);
+ item_size = btrfs_item_size(eb, slot);
ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_replace_item);
if (item_size != sizeof(struct btrfs_dev_replace_item)) {
@@ -163,9 +158,9 @@ no_valid_dev_replace_entry_found:
* We don't have an active replace item but if there is a
* replace target, fail the mount.
*/
- if (btrfs_find_device(fs_info->fs_devices, &args)) {
+ if (unlikely(btrfs_find_device(fs_info->fs_devices, &args))) {
btrfs_err(fs_info,
- "replace devid present without an active replace item");
+"replace without active item, run 'device scan --forget' on the target device");
ret = -EUCLEAN;
} else {
dev_replace->srcdev = NULL;
@@ -182,8 +177,7 @@ no_valid_dev_replace_entry_found:
* allow 'btrfs dev replace_cancel' if src/tgt device is
* missing
*/
- if (!dev_replace->srcdev &&
- !btrfs_test_opt(fs_info, DEGRADED)) {
+ if (unlikely(!dev_replace->srcdev && !btrfs_test_opt(fs_info, DEGRADED))) {
ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
@@ -191,8 +185,7 @@ no_valid_dev_replace_entry_found:
"srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?",
src_devid);
}
- if (!dev_replace->tgtdev &&
- !btrfs_test_opt(fs_info, DEGRADED)) {
+ if (unlikely(!dev_replace->tgtdev && !btrfs_test_opt(fs_info, DEGRADED))) {
ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
@@ -227,8 +220,6 @@ no_valid_dev_replace_entry_found:
break;
}
-out:
- btrfs_free_path(path);
return ret;
}
@@ -243,9 +234,10 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
struct btrfs_device *srcdev,
struct btrfs_device **device_out)
{
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
+ struct file *bdev_file;
struct block_device *bdev;
- struct rcu_string *name;
u64 devid = BTRFS_DEV_REPLACE_DEVID;
int ret = 0;
@@ -255,12 +247,13 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
return -EINVAL;
}
- bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
- fs_info->bdev_holder);
- if (IS_ERR(bdev)) {
+ bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE,
+ fs_info->sb, &fs_holder_ops);
+ if (IS_ERR(bdev_file)) {
btrfs_err(fs_info, "target device %s is invalid!", device_path);
- return PTR_ERR(bdev);
+ return PTR_ERR(bdev_file);
}
+ bdev = file_bdev(bdev_file);
if (!btrfs_check_device_zone_type(fs_info, bdev)) {
btrfs_err(fs_info,
@@ -271,7 +264,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
sync_blockdev(bdev);
- list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
if (device->bdev == bdev) {
btrfs_err(fs_info,
"target device is in the filesystem!");
@@ -289,19 +282,15 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
}
- device = btrfs_alloc_device(NULL, &devid, NULL);
+ device = btrfs_alloc_device(NULL, &devid, NULL, device_path);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
goto error;
}
- name = rcu_string_strdup(device_path, GFP_KERNEL);
- if (!name) {
- btrfs_free_device(device);
- ret = -ENOMEM;
+ ret = lookup_bdev(device_path, &device->devt);
+ if (ret)
goto error;
- }
- rcu_assign_pointer(device->name, name);
set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
device->generation = 0;
@@ -315,28 +304,28 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
device->commit_bytes_used = device->bytes_used;
device->fs_info = fs_info;
device->bdev = bdev;
+ device->bdev_file = bdev_file;
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
- device->mode = FMODE_EXCL;
device->dev_stats_valid = 1;
- set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
- device->fs_devices = fs_info->fs_devices;
+ set_blocksize(bdev_file, BTRFS_BDEV_BLOCKSIZE);
+ device->fs_devices = fs_devices;
- ret = btrfs_get_dev_zone_info(device);
+ ret = btrfs_get_dev_zone_info(device, false);
if (ret)
goto error;
- mutex_lock(&fs_info->fs_devices->device_list_mutex);
- list_add(&device->dev_list, &fs_info->fs_devices->devices);
- fs_info->fs_devices->num_devices++;
- fs_info->fs_devices->open_devices++;
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_devices->device_list_mutex);
+ list_add(&device->dev_list, &fs_devices->devices);
+ fs_devices->num_devices++;
+ fs_devices->open_devices++;
+ mutex_unlock(&fs_devices->device_list_mutex);
*device_out = device;
return 0;
error:
- blkdev_put(bdev, FMODE_EXCL);
+ bdev_fput(bdev_file);
return ret;
}
@@ -349,7 +338,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
struct btrfs_root *dev_root = fs_info->dev_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *eb;
struct btrfs_dev_replace_item *ptr;
@@ -368,20 +357,19 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
key.offset = 0;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
+
ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
if (ret < 0) {
btrfs_warn(fs_info,
"error %d while searching for dev_replace item!",
ret);
- goto out;
+ return ret;
}
if (ret == 0 &&
- btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
+ btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
/*
* need to delete old one and insert a new one.
* Since no attempt is made to recover any old state, if the
@@ -398,7 +386,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
btrfs_warn(fs_info,
"delete too small dev_replace item failed %d!",
ret);
- goto out;
+ return ret;
}
ret = 1;
}
@@ -411,7 +399,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
if (ret < 0) {
btrfs_warn(fs_info,
"insert dev_replace item failed %d!", ret);
- goto out;
+ return ret;
}
}
@@ -444,22 +432,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
dev_replace->item_needs_writeback = 0;
up_write(&dev_replace->rwsem);
- btrfs_mark_buffer_dirty(eb);
-
-out:
- btrfs_free_path(path);
-
return ret;
}
-static char* btrfs_dev_name(struct btrfs_device *device)
-{
- if (!device || test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
- return "<missing disk>";
- else
- return rcu_str_deref(device->name);
-}
-
static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
struct btrfs_device *src_dev)
{
@@ -470,6 +445,7 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
struct btrfs_dev_extent *dev_extent = NULL;
struct btrfs_block_group *cache;
struct btrfs_trans_handle *trans;
+ int iter_ret = 0;
int ret = 0;
u64 chunk_offset;
@@ -513,36 +489,15 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
}
path->reada = READA_FORWARD;
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
key.objectid = src_dev->devid;
key.type = BTRFS_DEV_EXTENT_KEY;
key.offset = 0;
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto free_path;
- if (ret > 0) {
- if (path->slots[0] >=
- btrfs_header_nritems(path->nodes[0])) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- goto free_path;
- if (ret > 0) {
- ret = 0;
- goto free_path;
- }
- } else {
- ret = 0;
- }
- }
-
- while (1) {
+ btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
struct extent_buffer *leaf = path->nodes[0];
- int slot = path->slots[0];
-
- btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid != src_dev->devid)
break;
@@ -553,30 +508,20 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
if (found_key.offset < key.offset)
break;
- dev_extent = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
+ dev_extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dev_extent);
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
if (!cache)
- goto skip;
-
- spin_lock(&cache->lock);
- cache->to_copy = 1;
- spin_unlock(&cache->lock);
+ continue;
+ set_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
btrfs_put_block_group(cache);
-
-skip:
- ret = btrfs_next_item(root, path);
- if (ret != 0) {
- if (ret > 0)
- ret = 0;
- break;
- }
}
+ if (iter_ret < 0)
+ ret = iter_ret;
-free_path:
btrfs_free_path(path);
unlock:
mutex_unlock(&fs_info->chunk_mutex);
@@ -589,8 +534,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
u64 physical)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
- struct extent_map *em;
- struct map_lookup *map;
+ struct btrfs_chunk_map *map;
u64 chunk_offset = cache->start;
int num_extents, cur_extent;
int i;
@@ -600,17 +544,17 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
return true;
spin_lock(&cache->lock);
- if (cache->removed) {
+ if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
spin_unlock(&cache->lock);
return true;
}
spin_unlock(&cache->lock);
- em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
- ASSERT(!IS_ERR(em));
- map = em->map_lookup;
+ map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
+ ASSERT(!IS_ERR(map));
- num_extents = cur_extent = 0;
+ num_extents = 0;
+ cur_extent = 0;
for (i = 0; i < map->num_stripes; i++) {
/* We have more device extent to copy */
if (srcdev != map->stripes[i].dev)
@@ -621,7 +565,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
cur_extent = i;
}
- free_extent_map(em);
+ btrfs_free_chunk_map(map);
if (num_extents > 1 && cur_extent < num_extents - 1) {
/*
@@ -632,9 +576,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
}
/* Last stripe on this device */
- spin_lock(&cache->lock);
- cache->to_copy = 0;
- spin_unlock(&cache->lock);
+ clear_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
return true;
}
@@ -656,7 +598,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
return PTR_ERR(src_device);
if (btrfs_pinned_by_swapfile(fs_info, src_device)) {
- btrfs_warn_in_rcu(fs_info,
+ btrfs_warn(fs_info,
"cannot replace device %s (devid %llu) due to active swapfile",
btrfs_dev_name(src_device), src_device->devid);
return -ETXTBSY;
@@ -685,6 +627,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
return ret;
down_write(&dev_replace->rwsem);
+ dev_replace->replace_task = current;
switch (dev_replace->replace_state) {
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
@@ -692,7 +635,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
break;
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
- ASSERT(0);
+ DEBUG_WARN("unexpected STARTED or SUSPENDED dev-replace state");
ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
up_write(&dev_replace->rwsem);
goto leave;
@@ -702,11 +645,11 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
dev_replace->srcdev = src_device;
dev_replace->tgtdev = tgt_device;
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"dev_replace from %s (devid %llu) to %s started",
btrfs_dev_name(src_device),
src_device->devid,
- rcu_str_deref(tgt_device->name));
+ btrfs_dev_name(tgt_device));
/*
* from now on, the writes to the srcdev are all duplicated to
@@ -728,9 +671,14 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
if (ret)
btrfs_err(fs_info, "kobj add dev failed %d", ret);
- btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
- /* Commit dev_replace state and reserve 1 item for it. */
+ /*
+ * Commit dev_replace state and reserve 1 item for it.
+ * This is crucial to ensure we won't miss copying extents for new block
+ * groups that are allocated after we started the device replace, and
+ * must be done after setting up the device replace state.
+ */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
@@ -762,6 +710,23 @@ leave:
return ret;
}
+static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args)
+{
+ if (args->start.srcdevid == 0) {
+ if (memchr(args->start.srcdev_name, 0,
+ sizeof(args->start.srcdev_name)) == NULL)
+ return -ENAMETOOLONG;
+ } else {
+ args->start.srcdev_name[0] = 0;
+ }
+
+ if (memchr(args->start.tgtdev_name, 0,
+ sizeof(args->start.tgtdev_name)) == NULL)
+ return -ENAMETOOLONG;
+
+ return 0;
+}
+
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args)
{
@@ -774,10 +739,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
default:
return -EINVAL;
}
-
- if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
- args->start.tgtdev_name[0] == '\0')
- return -EINVAL;
+ ret = btrfs_check_replace_dev_names(args);
+ if (ret < 0)
+ return ret;
ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
args->start.srcdevid,
@@ -828,17 +792,17 @@ static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev,
lockdep_assert_held(&srcdev->fs_info->chunk_mutex);
- while (!find_first_extent_bit(&srcdev->alloc_state, start,
- &found_start, &found_end,
- CHUNK_ALLOCATED, &cached_state)) {
- ret = set_extent_bits(&tgtdev->alloc_state, found_start,
- found_end, CHUNK_ALLOCATED);
+ while (btrfs_find_first_extent_bit(&srcdev->alloc_state, start,
+ &found_start, &found_end,
+ CHUNK_ALLOCATED, &cached_state)) {
+ ret = btrfs_set_extent_bit(&tgtdev->alloc_state, found_start,
+ found_end, CHUNK_ALLOCATED, NULL);
if (ret)
break;
start = found_end + 1;
}
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
return ret;
}
@@ -847,31 +811,53 @@ static void btrfs_dev_replace_update_device_in_mapping_tree(
struct btrfs_device *srcdev,
struct btrfs_device *tgtdev)
{
- struct extent_map_tree *em_tree = &fs_info->mapping_tree;
- struct extent_map *em;
- struct map_lookup *map;
- u64 start = 0;
- int i;
+ struct rb_node *node;
- write_lock(&em_tree->lock);
- do {
- em = lookup_extent_mapping(em_tree, start, (u64)-1);
- if (!em)
- break;
- map = em->map_lookup;
- for (i = 0; i < map->num_stripes; i++)
+ /*
+ * The chunk mutex must be held so that no new chunks can be created
+ * while we are updating existing chunks. This guarantees we don't miss
+ * any new chunk that gets created for a range that falls before the
+ * range of the last chunk we processed.
+ */
+ lockdep_assert_held(&fs_info->chunk_mutex);
+
+ write_lock(&fs_info->mapping_tree_lock);
+ node = rb_first_cached(&fs_info->mapping_tree);
+ while (node) {
+ struct rb_node *next = rb_next(node);
+ struct btrfs_chunk_map *map;
+ u64 next_start;
+
+ map = rb_entry(node, struct btrfs_chunk_map, rb_node);
+ next_start = map->start + map->chunk_len;
+
+ for (int i = 0; i < map->num_stripes; i++)
if (srcdev == map->stripes[i].dev)
map->stripes[i].dev = tgtdev;
- start = em->start + em->len;
- free_extent_map(em);
- } while (start);
- write_unlock(&em_tree->lock);
+
+ if (cond_resched_rwlock_write(&fs_info->mapping_tree_lock)) {
+ map = btrfs_find_chunk_map_nolock(fs_info, next_start, U64_MAX);
+ if (!map)
+ break;
+ node = &map->rb_node;
+ /*
+ * Drop the lookup reference since we are holding the
+ * lock in write mode and no one can remove the chunk
+ * map from the tree and drop its tree reference.
+ */
+ btrfs_free_chunk_map(map);
+ } else {
+ node = next;
+ }
+ }
+ write_unlock(&fs_info->mapping_tree_lock);
}
static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
int scrub_ret)
{
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *tgt_device;
struct btrfs_device *src_device;
struct btrfs_root *root = fs_info->tree_root;
@@ -904,10 +890,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret;
}
- btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
-
- if (!scrub_ret)
- btrfs_reada_remove_dev(src_device);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
/*
* We have to use this loop approach because at this point src_device
@@ -917,7 +900,6 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
while (1) {
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
- btrfs_reada_undo_remove_dev(src_device);
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return PTR_ERR(trans);
}
@@ -925,12 +907,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
WARN_ON(ret);
/* Prevent write_all_supers() during the finishing procedure */
- mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_devices->device_list_mutex);
/* Prevent new chunks being allocated on the source device */
mutex_lock(&fs_info->chunk_mutex);
if (!list_empty(&src_device->post_commit_list)) {
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_devices->device_list_mutex);
mutex_unlock(&fs_info->chunk_mutex);
} else {
break;
@@ -959,16 +941,15 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
tgt_device);
} else {
if (scrub_ret != -ECANCELED)
- btrfs_err_in_rcu(fs_info,
+ btrfs_err(fs_info,
"btrfs_scrub_dev(%s, %llu, %s) failed %d",
btrfs_dev_name(src_device),
src_device->devid,
- rcu_str_deref(tgt_device->name), scrub_ret);
+ btrfs_dev_name(tgt_device), scrub_ret);
error:
up_write(&dev_replace->rwsem);
mutex_unlock(&fs_info->chunk_mutex);
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- btrfs_reada_undo_remove_dev(src_device);
+ mutex_unlock(&fs_devices->device_list_mutex);
btrfs_rm_dev_replace_blocked(fs_info);
if (tgt_device)
btrfs_destroy_dev_replace_tgtdev(tgt_device);
@@ -978,11 +959,11 @@ error:
return scrub_ret;
}
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"dev_replace from %s (devid %llu) to %s finished",
btrfs_dev_name(src_device),
src_device->devid,
- rcu_str_deref(tgt_device->name));
+ btrfs_dev_name(tgt_device));
clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &tgt_device->dev_state);
tgt_device->devid = src_device->devid;
src_device->devid = BTRFS_DEV_REPLACE_DEVID;
@@ -997,9 +978,10 @@ error:
btrfs_assign_next_active_device(src_device, tgt_device);
- list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
- fs_info->fs_devices->rw_devices++;
+ list_add(&tgt_device->dev_alloc_list, &fs_devices->alloc_list);
+ fs_devices->rw_devices++;
+ dev_replace->replace_task = NULL;
up_write(&dev_replace->rwsem);
btrfs_rm_dev_replace_blocked(fs_info);
@@ -1021,14 +1003,13 @@ error:
* belong to this filesystem.
*/
mutex_unlock(&fs_info->chunk_mutex);
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_devices->device_list_mutex);
/* replace the sysfs entry */
btrfs_sysfs_remove_device(src_device);
btrfs_sysfs_update_devid(tgt_device);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &src_device->dev_state))
- btrfs_scratch_superblocks(fs_info, src_device->bdev,
- src_device->name->str);
+ btrfs_scratch_superblocks(fs_info, src_device);
/* write back the superblocks */
trans = btrfs_start_transaction(root, 0);
@@ -1126,7 +1107,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
* btrfs_dev_replace_finishing() will handle the
* cleanup part
*/
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"dev_replace from %s (devid %llu) to %s canceled",
btrfs_dev_name(src_device), src_device->devid,
btrfs_dev_name(tgt_device));
@@ -1150,8 +1131,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
up_write(&dev_replace->rwsem);
/* Scrub for replace must not be running in suspended state */
- ret = btrfs_scrub_cancel(fs_info);
- ASSERT(ret != -ENOTCONN);
+ btrfs_scrub_cancel(fs_info);
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
@@ -1161,7 +1141,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
ret = btrfs_commit_transaction(trans);
WARN_ON(ret);
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"suspended dev_replace from %s (devid %llu) to %s canceled",
btrfs_dev_name(src_device), src_device->devid,
btrfs_dev_name(tgt_device));
@@ -1265,7 +1245,7 @@ static int btrfs_dev_replace_kthread(void *data)
progress = btrfs_dev_replace_progress(fs_info);
progress = div_u64(progress, 10);
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"continuing dev_replace from %s (devid %llu) to target %s @%u%%",
btrfs_dev_name(dev_replace->srcdev),
dev_replace->srcdev->devid,
@@ -1283,16 +1263,16 @@ static int btrfs_dev_replace_kthread(void *data)
return 0;
}
-int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
+bool __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
{
if (!dev_replace->is_valid)
- return 0;
+ return false;
switch (dev_replace->replace_state) {
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
- return 0;
+ return false;
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
/*
@@ -1307,12 +1287,7 @@ int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
*/
break;
}
- return 1;
-}
-
-void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info)
-{
- percpu_counter_inc(&fs_info->dev_replace.bio_counter);
+ return true;
}
void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
index 3911049a5f23..b35cecf388f2 100644
--- a/fs/btrfs/dev-replace.h
+++ b/fs/btrfs/dev-replace.h
@@ -6,7 +6,15 @@
#ifndef BTRFS_DEV_REPLACE_H
#define BTRFS_DEV_REPLACE_H
+#include <linux/types.h>
+#include <linux/compiler_types.h>
+
struct btrfs_ioctl_dev_replace_args;
+struct btrfs_fs_info;
+struct btrfs_trans_handle;
+struct btrfs_dev_replace;
+struct btrfs_block_group;
+struct btrfs_device;
int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_replace(struct btrfs_trans_handle *trans);
@@ -17,9 +25,17 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info);
void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info);
int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info);
-int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
+bool __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
struct btrfs_block_group *cache,
u64 physical);
+void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info);
+void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount);
+
+static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info)
+{
+ btrfs_bio_counter_sub(fs_info, 1);
+}
+
#endif
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 7721ce0c0604..085a83ae9e62 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -3,9 +3,13 @@
* Copyright (C) 2007 Oracle. All rights reserved.
*/
+#include "messages.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
+#include "accessors.h"
+#include "dir-item.h"
+#include "delayed-inode.h"
/*
* insert a name into a directory, doing overflow properly if there is a hash
@@ -19,32 +23,29 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
*trans,
struct btrfs_root *root,
struct btrfs_path *path,
- struct btrfs_key *cpu_key,
+ const struct btrfs_key *cpu_key,
u32 data_size,
const char *name,
int name_len)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *ptr;
- struct btrfs_item *item;
struct extent_buffer *leaf;
ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
if (ret == -EEXIST) {
struct btrfs_dir_item *di;
- di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
+ di = btrfs_match_dir_item_name(path, name, name_len);
if (di)
return ERR_PTR(-EEXIST);
- btrfs_extend_item(path, data_size);
+ btrfs_extend_item(trans, path, data_size);
} else if (ret < 0)
return ERR_PTR(ret);
WARN_ON(ret > 0);
leaf = path->nodes[0];
- item = btrfs_item_nr(path->slots[0]);
ptr = btrfs_item_ptr(leaf, path->slots[0], char);
- BUG_ON(data_size > btrfs_item_size(leaf, item));
- ptr += btrfs_item_size(leaf, item) - data_size;
+ ASSERT(data_size <= btrfs_item_size(leaf, path->slots[0]));
+ ptr += btrfs_item_size(leaf, path->slots[0]) - data_size;
return (struct btrfs_dir_item *)ptr;
}
@@ -83,7 +84,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
btrfs_cpu_key_to_disk(&disk_key, &location);
btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
- btrfs_set_dir_type(leaf, dir_item, BTRFS_FT_XATTR);
+ btrfs_set_dir_flags(leaf, dir_item, BTRFS_FT_XATTR);
btrfs_set_dir_name_len(leaf, dir_item, name_len);
btrfs_set_dir_transid(leaf, dir_item, trans->transid);
btrfs_set_dir_data_len(leaf, dir_item, data_len);
@@ -92,7 +93,6 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, name, name_ptr, name_len);
write_extent_buffer(leaf, data, data_ptr, data_len);
- btrfs_mark_buffer_dirty(path->nodes[0]);
return ret;
}
@@ -105,14 +105,14 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
* to use for the second index (if one is created).
* Will return 0 or -ENOMEM
*/
-int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
- int name_len, struct btrfs_inode *dir,
- struct btrfs_key *location, u8 type, u64 index)
+int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
+ const struct fscrypt_str *name, struct btrfs_inode *dir,
+ const struct btrfs_key *location, u8 type, u64 index)
{
int ret = 0;
int ret2 = 0;
struct btrfs_root *root = dir->root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dir_item *dir_item;
struct extent_buffer *leaf;
unsigned long name_ptr;
@@ -122,7 +122,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
key.objectid = btrfs_ino(dir);
key.type = BTRFS_DIR_ITEM_KEY;
- key.offset = btrfs_name_hash(name, name_len);
+ key.offset = btrfs_name_hash(name->name, name->len);
path = btrfs_alloc_path();
if (!path)
@@ -130,9 +130,9 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
btrfs_cpu_key_to_disk(&disk_key, location);
- data_size = sizeof(*dir_item) + name_len;
+ data_size = sizeof(*dir_item) + name->len;
dir_item = insert_with_overflow(trans, root, path, &key, data_size,
- name, name_len);
+ name->name, name->len);
if (IS_ERR(dir_item)) {
ret = PTR_ERR(dir_item);
if (ret == -EEXIST)
@@ -140,16 +140,18 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
goto out_free;
}
+ if (IS_ENCRYPTED(&dir->vfs_inode))
+ type |= BTRFS_FT_ENCRYPTED;
+
leaf = path->nodes[0];
btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
- btrfs_set_dir_type(leaf, dir_item, type);
+ btrfs_set_dir_flags(leaf, dir_item, type);
btrfs_set_dir_data_len(leaf, dir_item, 0);
- btrfs_set_dir_name_len(leaf, dir_item, name_len);
+ btrfs_set_dir_name_len(leaf, dir_item, name->len);
btrfs_set_dir_transid(leaf, dir_item, trans->transid);
name_ptr = (unsigned long)(dir_item + 1);
- write_extent_buffer(leaf, name, name_ptr, name_len);
- btrfs_mark_buffer_dirty(leaf);
+ write_extent_buffer(leaf, name->name, name_ptr, name->len);
second_insert:
/* FIXME, use some real flag for selecting the extra index */
@@ -159,10 +161,9 @@ second_insert:
}
btrfs_release_path(path);
- ret2 = btrfs_insert_delayed_dir_index(trans, name, name_len, dir,
+ ret2 = btrfs_insert_delayed_dir_index(trans, name->name, name->len, dir,
&disk_key, type, index);
out_free:
- btrfs_free_path(path);
if (ret)
return ret;
if (ret2)
@@ -186,7 +187,7 @@ static struct btrfs_dir_item *btrfs_lookup_match_dir(
if (ret > 0)
return ERR_PTR(-ENOENT);
- return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
+ return btrfs_match_dir_item_name(path, name, name_len);
}
/*
@@ -208,7 +209,7 @@ static struct btrfs_dir_item *btrfs_lookup_match_dir(
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
- const char *name, int name_len,
+ const struct fscrypt_str *name,
int mod)
{
struct btrfs_key key;
@@ -216,17 +217,18 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
key.objectid = dir;
key.type = BTRFS_DIR_ITEM_KEY;
- key.offset = btrfs_name_hash(name, name_len);
+ key.offset = btrfs_name_hash(name->name, name->len);
- di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+ di = btrfs_lookup_match_dir(trans, root, path, &key, name->name,
+ name->len, mod);
if (IS_ERR(di) && PTR_ERR(di) == -ENOENT)
return NULL;
return di;
}
-int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
- const char *name, int name_len)
+int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir_ino,
+ const struct fscrypt_str *name)
{
int ret;
struct btrfs_key key;
@@ -234,53 +236,45 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
int data_size;
struct extent_buffer *leaf;
int slot;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- key.objectid = dir;
+ key.objectid = dir_ino;
key.type = BTRFS_DIR_ITEM_KEY;
- key.offset = btrfs_name_hash(name, name_len);
+ key.offset = btrfs_name_hash(name->name, name->len);
- di = btrfs_lookup_match_dir(NULL, root, path, &key, name, name_len, 0);
+ di = btrfs_lookup_match_dir(NULL, root, path, &key, name->name,
+ name->len, 0);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
/* Nothing found, we're safe */
- if (ret == -ENOENT) {
- ret = 0;
- goto out;
- }
+ if (ret == -ENOENT)
+ return 0;
if (ret < 0)
- goto out;
+ return ret;
}
/* we found an item, look for our name in the item */
if (di) {
/* our exact name was found */
- ret = -EEXIST;
- goto out;
+ return -EEXIST;
}
- /*
- * see if there is room in the item to insert this
- * name
- */
- data_size = sizeof(*di) + name_len;
+ /* See if there is room in the item to insert this name. */
+ data_size = sizeof(*di) + name->len;
leaf = path->nodes[0];
slot = path->slots[0];
- if (data_size + btrfs_item_size_nr(leaf, slot) +
+ if (data_size + btrfs_item_size(leaf, slot) +
sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root->fs_info)) {
- ret = -EOVERFLOW;
- } else {
- /* plenty of insertion room */
- ret = 0;
+ return -EOVERFLOW;
}
-out:
- btrfs_free_path(path);
- return ret;
+
+ /* Plenty of insertion room. */
+ return 0;
}
/*
@@ -305,8 +299,7 @@ struct btrfs_dir_item *
btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
- u64 index, const char *name, int name_len,
- int mod)
+ u64 index, const struct fscrypt_str *name, int mod)
{
struct btrfs_dir_item *di;
struct btrfs_key key;
@@ -315,7 +308,8 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
key.type = BTRFS_DIR_INDEX_KEY;
key.offset = index;
- di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+ di = btrfs_lookup_match_dir(trans, root, path, &key, name->name,
+ name->len, mod);
if (di == ERR_PTR(-ENOENT))
return NULL;
@@ -323,51 +317,30 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
}
struct btrfs_dir_item *
-btrfs_search_dir_index_item(struct btrfs_root *root,
- struct btrfs_path *path, u64 dirid,
- const char *name, int name_len)
+btrfs_search_dir_index_item(struct btrfs_root *root, struct btrfs_path *path,
+ u64 dirid, const struct fscrypt_str *name)
{
- struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
- u32 nritems;
int ret;
key.objectid = dirid;
key.type = BTRFS_DIR_INDEX_KEY;
key.offset = 0;
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- return ERR_PTR(ret);
-
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
-
- while (1) {
- if (path->slots[0] >= nritems) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- return ERR_PTR(ret);
- if (ret > 0)
- break;
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- continue;
- }
-
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ btrfs_for_each_slot(root, &key, &key, path, ret) {
if (key.objectid != dirid || key.type != BTRFS_DIR_INDEX_KEY)
break;
- di = btrfs_match_dir_item_name(root->fs_info, path,
- name, name_len);
+ di = btrfs_match_dir_item_name(path, name->name, name->len);
if (di)
return di;
-
- path->slots[0]++;
}
- return NULL;
+ /* Adjust return code if the key was not found in the next leaf. */
+ if (ret >= 0)
+ ret = -ENOENT;
+
+ return ERR_PTR(ret);
}
struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
@@ -395,8 +368,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
* this walks through all the entries in a dir item and finds one
* for a specific name.
*/
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
+struct btrfs_dir_item *btrfs_match_dir_item_name(const struct btrfs_path *path,
const char *name, int name_len)
{
struct btrfs_dir_item *dir_item;
@@ -409,7 +381,7 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
leaf = path->nodes[0];
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
- total_len = btrfs_item_size_nr(leaf, path->slots[0]);
+ total_len = btrfs_item_size(leaf, path->slots[0]);
while (cur < total_len) {
this_len = sizeof(*dir_item) +
btrfs_dir_name_len(leaf, dir_item) +
@@ -434,7 +406,7 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
- struct btrfs_dir_item *di)
+ const struct btrfs_dir_item *di)
{
struct extent_buffer *leaf;
@@ -445,7 +417,7 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
sub_item_len = sizeof(*di) + btrfs_dir_name_len(leaf, di) +
btrfs_dir_data_len(leaf, di);
- item_len = btrfs_item_size_nr(leaf, path->slots[0]);
+ item_len = btrfs_item_size(leaf, path->slots[0]);
if (sub_item_len == item_len) {
ret = btrfs_del_item(trans, root, path);
} else {
@@ -456,7 +428,7 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_len - (ptr + sub_item_len - start));
- btrfs_truncate_item(path, item_len - sub_item_len, 1);
+ btrfs_truncate_item(trans, path, item_len - sub_item_len, 1);
}
return ret;
}
diff --git a/fs/btrfs/dir-item.h b/fs/btrfs/dir-item.h
new file mode 100644
index 000000000000..e52174a8baf9
--- /dev/null
+++ b/fs/btrfs/dir-item.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_DIR_ITEM_H
+#define BTRFS_DIR_ITEM_H
+
+#include <linux/types.h>
+#include <linux/crc32c.h>
+
+struct fscrypt_str;
+struct btrfs_fs_info;
+struct btrfs_key;
+struct btrfs_path;
+struct btrfs_inode;
+struct btrfs_root;
+struct btrfs_trans_handle;
+
+int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir_ino,
+ const struct fscrypt_str *name);
+int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
+ const struct fscrypt_str *name, struct btrfs_inode *dir,
+ const struct btrfs_key *location, u8 type, u64 index);
+struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path, u64 dir,
+ const struct fscrypt_str *name, int mod);
+struct btrfs_dir_item *btrfs_lookup_dir_index_item(
+ struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path, u64 dir,
+ u64 index, const struct fscrypt_str *name, int mod);
+struct btrfs_dir_item *btrfs_search_dir_index_item(struct btrfs_root *root,
+ struct btrfs_path *path, u64 dirid,
+ const struct fscrypt_str *name);
+int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const struct btrfs_dir_item *di);
+int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path, u64 objectid,
+ const char *name, u16 name_len,
+ const void *data, u16 data_len);
+struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path, u64 dir,
+ const char *name, u16 name_len,
+ int mod);
+struct btrfs_dir_item *btrfs_match_dir_item_name(const struct btrfs_path *path,
+ const char *name,
+ int name_len);
+
+static inline u64 btrfs_name_hash(const char *name, int len)
+{
+ return crc32c((u32)~1, name, len);
+}
+
+#endif
diff --git a/fs/btrfs/direct-io.c b/fs/btrfs/direct-io.c
new file mode 100644
index 000000000000..07e19e88ba4b
--- /dev/null
+++ b/fs/btrfs/direct-io.c
@@ -0,0 +1,1106 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/fsverity.h>
+#include <linux/iomap.h>
+#include "ctree.h"
+#include "delalloc-space.h"
+#include "direct-io.h"
+#include "extent-tree.h"
+#include "file.h"
+#include "fs.h"
+#include "transaction.h"
+#include "volumes.h"
+#include "bio.h"
+#include "ordered-data.h"
+
+struct btrfs_dio_data {
+ ssize_t submitted;
+ struct extent_changeset *data_reserved;
+ struct btrfs_ordered_extent *ordered;
+ bool data_space_reserved;
+ bool nocow_done;
+};
+
+struct btrfs_dio_private {
+ /* Range of I/O */
+ u64 file_offset;
+ u32 bytes;
+
+ /* This must be last */
+ struct btrfs_bio bbio;
+};
+
+static struct bio_set btrfs_dio_bioset;
+
+static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
+ struct extent_state **cached_state,
+ unsigned int iomap_flags)
+{
+ const bool writing = (iomap_flags & IOMAP_WRITE);
+ const bool nowait = (iomap_flags & IOMAP_NOWAIT);
+ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct btrfs_ordered_extent *ordered;
+ int ret = 0;
+
+ /* Direct lock must be taken before the extent lock. */
+ if (nowait) {
+ if (!btrfs_try_lock_dio_extent(io_tree, lockstart, lockend, cached_state))
+ return -EAGAIN;
+ } else {
+ btrfs_lock_dio_extent(io_tree, lockstart, lockend, cached_state);
+ }
+
+ while (1) {
+ if (nowait) {
+ if (!btrfs_try_lock_extent(io_tree, lockstart, lockend,
+ cached_state)) {
+ ret = -EAGAIN;
+ break;
+ }
+ } else {
+ btrfs_lock_extent(io_tree, lockstart, lockend, cached_state);
+ }
+ /*
+ * We're concerned with the entire range that we're going to be
+ * doing DIO to, so we need to make sure there's no ordered
+ * extents in this range.
+ */
+ ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
+ lockend - lockstart + 1);
+
+ /*
+ * We need to make sure there are no buffered pages in this
+ * range either, we could have raced between the invalidate in
+ * generic_file_direct_write and locking the extent. The
+ * invalidate needs to happen so that reads after a write do not
+ * get stale data.
+ */
+ if (!ordered &&
+ (!writing || !filemap_range_has_page(inode->i_mapping,
+ lockstart, lockend)))
+ break;
+
+ btrfs_unlock_extent(io_tree, lockstart, lockend, cached_state);
+
+ if (ordered) {
+ if (nowait) {
+ btrfs_put_ordered_extent(ordered);
+ ret = -EAGAIN;
+ break;
+ }
+ /*
+ * If we are doing a DIO read and the ordered extent we
+ * found is for a buffered write, we can not wait for it
+ * to complete and retry, because if we do so we can
+ * deadlock with concurrent buffered writes on page
+ * locks. This happens only if our DIO read covers more
+ * than one extent map, if at this point has already
+ * created an ordered extent for a previous extent map
+ * and locked its range in the inode's io tree, and a
+ * concurrent write against that previous extent map's
+ * range and this range started (we unlock the ranges
+ * in the io tree only when the bios complete and
+ * buffered writes always lock pages before attempting
+ * to lock range in the io tree).
+ */
+ if (writing ||
+ test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
+ btrfs_start_ordered_extent(ordered);
+ else
+ ret = nowait ? -EAGAIN : -ENOTBLK;
+ btrfs_put_ordered_extent(ordered);
+ } else {
+ /*
+ * We could trigger writeback for this range (and wait
+ * for it to complete) and then invalidate the pages for
+ * this range (through invalidate_inode_pages2_range()),
+ * but that can lead us to a deadlock with a concurrent
+ * call to readahead (a buffered read or a defrag call
+ * triggered a readahead) on a page lock due to an
+ * ordered dio extent we created before but did not have
+ * yet a corresponding bio submitted (whence it can not
+ * complete), which makes readahead wait for that
+ * ordered extent to complete while holding a lock on
+ * that page.
+ */
+ ret = nowait ? -EAGAIN : -ENOTBLK;
+ }
+
+ if (ret)
+ break;
+
+ cond_resched();
+ }
+
+ if (ret)
+ btrfs_unlock_dio_extent(io_tree, lockstart, lockend, cached_state);
+ return ret;
+}
+
+static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
+ struct btrfs_dio_data *dio_data,
+ const u64 start,
+ const struct btrfs_file_extent *file_extent,
+ const int type)
+{
+ struct extent_map *em = NULL;
+ struct btrfs_ordered_extent *ordered;
+
+ if (type != BTRFS_ORDERED_NOCOW) {
+ em = btrfs_create_io_em(inode, start, file_extent, type);
+ if (IS_ERR(em))
+ goto out;
+ }
+
+ ordered = btrfs_alloc_ordered_extent(inode, start, file_extent,
+ (1U << type) |
+ (1U << BTRFS_ORDERED_DIRECT));
+ if (IS_ERR(ordered)) {
+ if (em) {
+ btrfs_free_extent_map(em);
+ btrfs_drop_extent_map_range(inode, start,
+ start + file_extent->num_bytes - 1, false);
+ }
+ em = ERR_CAST(ordered);
+ } else {
+ ASSERT(!dio_data->ordered);
+ dio_data->ordered = ordered;
+ }
+ out:
+
+ return em;
+}
+
+static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
+ struct btrfs_dio_data *dio_data,
+ u64 start, u64 len)
+{
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_file_extent file_extent;
+ struct extent_map *em;
+ struct btrfs_key ins;
+ u64 alloc_hint;
+ int ret;
+
+ alloc_hint = btrfs_get_extent_allocation_hint(inode, start, len);
+again:
+ ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
+ 0, alloc_hint, &ins, true, true);
+ if (ret == -EAGAIN) {
+ ASSERT(btrfs_is_zoned(fs_info));
+ wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
+ TASK_UNINTERRUPTIBLE);
+ goto again;
+ }
+ if (ret)
+ return ERR_PTR(ret);
+
+ file_extent.disk_bytenr = ins.objectid;
+ file_extent.disk_num_bytes = ins.offset;
+ file_extent.num_bytes = ins.offset;
+ file_extent.ram_bytes = ins.offset;
+ file_extent.offset = 0;
+ file_extent.compression = BTRFS_COMPRESS_NONE;
+ em = btrfs_create_dio_extent(inode, dio_data, start, &file_extent,
+ BTRFS_ORDERED_REGULAR);
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+ if (IS_ERR(em))
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
+
+ return em;
+}
+
+static int btrfs_get_blocks_direct_write(struct extent_map **map,
+ struct inode *inode,
+ struct btrfs_dio_data *dio_data,
+ u64 start, u64 *lenp,
+ unsigned int iomap_flags)
+{
+ const bool nowait = (iomap_flags & IOMAP_NOWAIT);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct btrfs_file_extent file_extent;
+ struct extent_map *em = *map;
+ int type;
+ u64 block_start;
+ struct btrfs_block_group *bg;
+ bool can_nocow = false;
+ bool space_reserved = false;
+ u64 len = *lenp;
+ u64 prev_len;
+ int ret = 0;
+
+ /*
+ * We don't allocate a new extent in the following cases
+ *
+ * 1) The inode is marked as NODATACOW. In this case we'll just use the
+ * existing extent.
+ * 2) The extent is marked as PREALLOC. We're good to go here and can
+ * just use the extent.
+ *
+ */
+ if ((em->flags & EXTENT_FLAG_PREALLOC) ||
+ ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
+ em->disk_bytenr != EXTENT_MAP_HOLE)) {
+ if (em->flags & EXTENT_FLAG_PREALLOC)
+ type = BTRFS_ORDERED_PREALLOC;
+ else
+ type = BTRFS_ORDERED_NOCOW;
+ len = min(len, em->len - (start - em->start));
+ block_start = btrfs_extent_map_block_start(em) + (start - em->start);
+
+ if (can_nocow_extent(BTRFS_I(inode), start, &len, &file_extent,
+ false) == 1) {
+ bg = btrfs_inc_nocow_writers(fs_info, block_start);
+ if (bg)
+ can_nocow = true;
+ }
+ }
+
+ prev_len = len;
+ if (can_nocow) {
+ struct extent_map *em2;
+
+ /* We can NOCOW, so only need to reserve metadata space. */
+ ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
+ nowait);
+ if (ret < 0) {
+ /* Our caller expects us to free the input extent map. */
+ btrfs_free_extent_map(em);
+ *map = NULL;
+ btrfs_dec_nocow_writers(bg);
+ if (nowait && (ret == -ENOSPC || ret == -EDQUOT))
+ ret = -EAGAIN;
+ goto out;
+ }
+ space_reserved = true;
+
+ em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start,
+ &file_extent, type);
+ btrfs_dec_nocow_writers(bg);
+ if (type == BTRFS_ORDERED_PREALLOC) {
+ btrfs_free_extent_map(em);
+ *map = em2;
+ em = em2;
+ }
+
+ if (IS_ERR(em2)) {
+ ret = PTR_ERR(em2);
+ goto out;
+ }
+
+ dio_data->nocow_done = true;
+ } else {
+ /* Our caller expects us to free the input extent map. */
+ btrfs_free_extent_map(em);
+ *map = NULL;
+
+ if (nowait) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /*
+ * If we could not allocate data space before locking the file
+ * range and we can't do a NOCOW write, then we have to fail.
+ */
+ if (!dio_data->data_space_reserved) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /*
+ * We have to COW and we have already reserved data space before,
+ * so now we reserve only metadata.
+ */
+ ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
+ false);
+ if (ret < 0)
+ goto out;
+ space_reserved = true;
+
+ em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out;
+ }
+ *map = em;
+ len = min(len, em->len - (start - em->start));
+ if (len < prev_len)
+ btrfs_delalloc_release_metadata(BTRFS_I(inode),
+ prev_len - len, true);
+ }
+
+ /*
+ * We have created our ordered extent, so we can now release our reservation
+ * for an outstanding extent.
+ */
+ btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
+
+ /*
+ * Need to update the i_size under the extent lock so buffered
+ * readers will get the updated i_size when we unlock.
+ */
+ if (start + len > i_size_read(inode))
+ i_size_write(inode, start + len);
+out:
+ if (ret && space_reserved) {
+ btrfs_delalloc_release_extents(BTRFS_I(inode), len);
+ btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true);
+ }
+ *lenp = len;
+ return ret;
+}
+
+static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
+ loff_t length, unsigned int flags, struct iomap *iomap,
+ struct iomap *srcmap)
+{
+ struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct extent_map *em;
+ struct extent_state *cached_state = NULL;
+ struct btrfs_dio_data *dio_data = iter->private;
+ u64 lockstart, lockend;
+ const bool write = !!(flags & IOMAP_WRITE);
+ int ret = 0;
+ u64 len = length;
+ const u64 data_alloc_len = length;
+ u32 unlock_bits = EXTENT_LOCKED;
+
+ /*
+ * We could potentially fault if we have a buffer > PAGE_SIZE, and if
+ * we're NOWAIT we may submit a bio for a partial range and return
+ * EIOCBQUEUED, which would result in an errant short read.
+ *
+ * The best way to handle this would be to allow for partial completions
+ * of iocb's, so we could submit the partial bio, return and fault in
+ * the rest of the pages, and then submit the io for the rest of the
+ * range. However we don't have that currently, so simply return
+ * -EAGAIN at this point so that the normal path is used.
+ */
+ if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE)
+ return -EAGAIN;
+
+ /*
+ * Cap the size of reads to that usually seen in buffered I/O as we need
+ * to allocate a contiguous array for the checksums.
+ */
+ if (!write)
+ len = min_t(u64, len, fs_info->sectorsize * BIO_MAX_VECS);
+
+ lockstart = start;
+ lockend = start + len - 1;
+
+ /*
+ * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
+ * enough if we've written compressed pages to this area, so we need to
+ * flush the dirty pages again to make absolutely sure that any
+ * outstanding dirty pages are on disk - the first flush only starts
+ * compression on the data, while keeping the pages locked, so by the
+ * time the second flush returns we know bios for the compressed pages
+ * were submitted and finished, and the pages no longer under writeback.
+ *
+ * If we have a NOWAIT request and we have any pages in the range that
+ * are locked, likely due to compression still in progress, we don't want
+ * to block on page locks. We also don't want to block on pages marked as
+ * dirty or under writeback (same as for the non-compression case).
+ * iomap_dio_rw() did the same check, but after that and before we got
+ * here, mmap'ed writes may have happened or buffered reads started
+ * (readpage() and readahead(), which lock pages), as we haven't locked
+ * the file range yet.
+ */
+ if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+ &BTRFS_I(inode)->runtime_flags)) {
+ if (flags & IOMAP_NOWAIT) {
+ if (filemap_range_needs_writeback(inode->i_mapping,
+ lockstart, lockend))
+ return -EAGAIN;
+ } else {
+ ret = filemap_fdatawrite_range(inode->i_mapping, start,
+ start + length - 1);
+ if (ret)
+ return ret;
+ }
+ }
+
+ memset(dio_data, 0, sizeof(*dio_data));
+
+ /*
+ * We always try to allocate data space and must do it before locking
+ * the file range, to avoid deadlocks with concurrent writes to the same
+ * range if the range has several extents and the writes don't expand the
+ * current i_size (the inode lock is taken in shared mode). If we fail to
+ * allocate data space here we continue and later, after locking the
+ * file range, we fail with ENOSPC only if we figure out we can not do a
+ * NOCOW write.
+ */
+ if (write && !(flags & IOMAP_NOWAIT)) {
+ ret = btrfs_check_data_free_space(BTRFS_I(inode),
+ &dio_data->data_reserved,
+ start, data_alloc_len, false);
+ if (!ret)
+ dio_data->data_space_reserved = true;
+ else if (!(BTRFS_I(inode)->flags &
+ (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
+ goto err;
+ }
+
+ /*
+ * If this errors out it's because we couldn't invalidate pagecache for
+ * this range and we need to fallback to buffered IO, or we are doing a
+ * NOWAIT read/write and we need to block.
+ */
+ ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags);
+ if (ret < 0)
+ goto err;
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto unlock_err;
+ }
+
+ /*
+ * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
+ * io. INLINE is special, and we could probably kludge it in here, but
+ * it's still buffered so for safety lets just fall back to the generic
+ * buffered path.
+ *
+ * For COMPRESSED we _have_ to read the entire extent in so we can
+ * decompress it, so there will be buffering required no matter what we
+ * do, so go ahead and fallback to buffered.
+ *
+ * We return -ENOTBLK because that's what makes DIO go ahead and go back
+ * to buffered IO. Don't blame me, this is the price we pay for using
+ * the generic code.
+ */
+ if (btrfs_extent_map_is_compressed(em) || em->disk_bytenr == EXTENT_MAP_INLINE) {
+ btrfs_free_extent_map(em);
+ /*
+ * If we are in a NOWAIT context, return -EAGAIN in order to
+ * fallback to buffered IO. This is not only because we can
+ * block with buffered IO (no support for NOWAIT semantics at
+ * the moment) but also to avoid returning short reads to user
+ * space - this happens if we were able to read some data from
+ * previous non-compressed extents and then when we fallback to
+ * buffered IO, at btrfs_file_read_iter() by calling
+ * filemap_read(), we fail to fault in pages for the read buffer,
+ * in which case filemap_read() returns a short read (the number
+ * of bytes previously read is > 0, so it does not return -EFAULT).
+ */
+ ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;
+ goto unlock_err;
+ }
+
+ len = min(len, em->len - (start - em->start));
+
+ /*
+ * If we have a NOWAIT request and the range contains multiple extents
+ * (or a mix of extents and holes), then we return -EAGAIN to make the
+ * caller fallback to a context where it can do a blocking (without
+ * NOWAIT) request. This way we avoid doing partial IO and returning
+ * success to the caller, which is not optimal for writes and for reads
+ * it can result in unexpected behaviour for an application.
+ *
+ * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
+ * iomap_dio_rw(), we can end up returning less data then what the caller
+ * asked for, resulting in an unexpected, and incorrect, short read.
+ * That is, the caller asked to read N bytes and we return less than that,
+ * which is wrong unless we are crossing EOF. This happens if we get a
+ * page fault error when trying to fault in pages for the buffer that is
+ * associated to the struct iov_iter passed to iomap_dio_rw(), and we
+ * have previously submitted bios for other extents in the range, in
+ * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
+ * those bios have completed by the time we get the page fault error,
+ * which we return back to our caller - we should only return EIOCBQUEUED
+ * after we have submitted bios for all the extents in the range.
+ */
+ if ((flags & IOMAP_NOWAIT) && len < length) {
+ btrfs_free_extent_map(em);
+ ret = -EAGAIN;
+ goto unlock_err;
+ }
+
+ if (write) {
+ ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
+ start, &len, flags);
+ if (ret < 0)
+ goto unlock_err;
+ /* Recalc len in case the new em is smaller than requested */
+ len = min(len, em->len - (start - em->start));
+ if (dio_data->data_space_reserved) {
+ u64 release_offset;
+ u64 release_len = 0;
+
+ if (dio_data->nocow_done) {
+ release_offset = start;
+ release_len = data_alloc_len;
+ } else if (len < data_alloc_len) {
+ release_offset = start + len;
+ release_len = data_alloc_len - len;
+ }
+
+ if (release_len > 0)
+ btrfs_free_reserved_data_space(BTRFS_I(inode),
+ dio_data->data_reserved,
+ release_offset,
+ release_len);
+ }
+ }
+
+ /*
+ * Translate extent map information to iomap.
+ * We trim the extents (and move the addr) even though iomap code does
+ * that, since we have locked only the parts we are performing I/O in.
+ */
+ if ((em->disk_bytenr == EXTENT_MAP_HOLE) ||
+ ((em->flags & EXTENT_FLAG_PREALLOC) && !write)) {
+ iomap->addr = IOMAP_NULL_ADDR;
+ iomap->type = IOMAP_HOLE;
+ } else {
+ iomap->addr = btrfs_extent_map_block_start(em) + (start - em->start);
+ iomap->type = IOMAP_MAPPED;
+ }
+ iomap->offset = start;
+ iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
+ iomap->length = len;
+ btrfs_free_extent_map(em);
+
+ /*
+ * Reads will hold the EXTENT_DIO_LOCKED bit until the io is completed,
+ * writes only hold it for this part. We hold the extent lock until
+ * we're completely done with the extent map to make sure it remains
+ * valid.
+ */
+ if (write)
+ unlock_bits |= EXTENT_DIO_LOCKED;
+
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ unlock_bits, &cached_state);
+
+ /* We didn't use everything, unlock the dio extent for the remainder. */
+ if (!write && (start + len) < lockend)
+ btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, start + len,
+ lockend, NULL);
+
+ return 0;
+
+unlock_err:
+ /*
+ * Don't use EXTENT_LOCK_BITS here in case we extend it later and forget
+ * to update this, be explicit that we expect EXTENT_LOCKED and
+ * EXTENT_DIO_LOCKED to be set here, and so that's what we're clearing.
+ */
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ EXTENT_LOCKED | EXTENT_DIO_LOCKED, &cached_state);
+err:
+ if (dio_data->data_space_reserved) {
+ btrfs_free_reserved_data_space(BTRFS_I(inode),
+ dio_data->data_reserved,
+ start, data_alloc_len);
+ extent_changeset_free(dio_data->data_reserved);
+ }
+
+ return ret;
+}
+
+static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+ ssize_t written, unsigned int flags, struct iomap *iomap)
+{
+ struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
+ struct btrfs_dio_data *dio_data = iter->private;
+ size_t submitted = dio_data->submitted;
+ const bool write = !!(flags & IOMAP_WRITE);
+ int ret = 0;
+
+ if (!write && (iomap->type == IOMAP_HOLE)) {
+ /* If reading from a hole, unlock and return */
+ btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos,
+ pos + length - 1, NULL);
+ return 0;
+ }
+
+ if (submitted < length) {
+ pos += submitted;
+ length -= submitted;
+ if (write)
+ btrfs_finish_ordered_extent(dio_data->ordered, NULL,
+ pos, length, false);
+ else
+ btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos,
+ pos + length - 1, NULL);
+ ret = -ENOTBLK;
+ }
+ if (write) {
+ btrfs_put_ordered_extent(dio_data->ordered);
+ dio_data->ordered = NULL;
+ }
+
+ if (write)
+ extent_changeset_free(dio_data->data_reserved);
+ return ret;
+}
+
+static void btrfs_dio_end_io(struct btrfs_bio *bbio)
+{
+ struct btrfs_dio_private *dip =
+ container_of(bbio, struct btrfs_dio_private, bbio);
+ struct btrfs_inode *inode = bbio->inode;
+ struct bio *bio = &bbio->bio;
+
+ if (bio->bi_status) {
+ btrfs_warn(inode->root->fs_info,
+ "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d",
+ btrfs_ino(inode), bio->bi_opf,
+ dip->file_offset, dip->bytes, bio->bi_status);
+ }
+
+ if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
+ btrfs_finish_ordered_extent(bbio->ordered, NULL,
+ dip->file_offset, dip->bytes,
+ !bio->bi_status);
+ } else {
+ btrfs_unlock_dio_extent(&inode->io_tree, dip->file_offset,
+ dip->file_offset + dip->bytes - 1, NULL);
+ }
+
+ bbio->bio.bi_private = bbio->private;
+ iomap_dio_bio_end_io(bio);
+}
+
+static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio,
+ struct btrfs_ordered_extent *ordered)
+{
+ u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
+ u64 len = bbio->bio.bi_iter.bi_size;
+ struct btrfs_ordered_extent *new;
+ int ret;
+
+ /* Must always be called for the beginning of an ordered extent. */
+ if (WARN_ON_ONCE(start != ordered->disk_bytenr))
+ return -EINVAL;
+
+ /* No need to split if the ordered extent covers the entire bio. */
+ if (ordered->disk_num_bytes == len) {
+ refcount_inc(&ordered->refs);
+ bbio->ordered = ordered;
+ return 0;
+ }
+
+ /*
+ * Don't split the extent_map for NOCOW extents, as we're writing into
+ * a pre-existing one.
+ */
+ if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
+ ret = btrfs_split_extent_map(bbio->inode, bbio->file_offset,
+ ordered->num_bytes, len,
+ ordered->disk_bytenr);
+ if (ret)
+ return ret;
+ }
+
+ new = btrfs_split_ordered_extent(ordered, len);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+ bbio->ordered = new;
+ return 0;
+}
+
+static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
+ loff_t file_offset)
+{
+ struct btrfs_bio *bbio = btrfs_bio(bio);
+ struct btrfs_dio_private *dip =
+ container_of(bbio, struct btrfs_dio_private, bbio);
+ struct btrfs_dio_data *dio_data = iter->private;
+
+ btrfs_bio_init(bbio, BTRFS_I(iter->inode), file_offset,
+ btrfs_dio_end_io, bio->bi_private);
+
+ dip->file_offset = file_offset;
+ dip->bytes = bio->bi_iter.bi_size;
+
+ dio_data->submitted += bio->bi_iter.bi_size;
+
+ /*
+ * Check if we are doing a partial write. If we are, we need to split
+ * the ordered extent to match the submitted bio. Hang on to the
+ * remaining unfinishable ordered_extent in dio_data so that it can be
+ * cancelled in iomap_end to avoid a deadlock wherein faulting the
+ * remaining pages is blocked on the outstanding ordered extent.
+ */
+ if (iter->flags & IOMAP_WRITE) {
+ int ret;
+
+ ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered);
+ if (ret) {
+ btrfs_finish_ordered_extent(dio_data->ordered, NULL,
+ file_offset, dip->bytes,
+ !ret);
+ bio->bi_status = errno_to_blk_status(ret);
+ iomap_dio_bio_end_io(bio);
+ return;
+ }
+ }
+
+ btrfs_submit_bbio(bbio, 0);
+}
+
+static const struct iomap_ops btrfs_dio_iomap_ops = {
+ .iomap_begin = btrfs_dio_iomap_begin,
+ .iomap_end = btrfs_dio_iomap_end,
+};
+
+static const struct iomap_dio_ops btrfs_dio_ops = {
+ .submit_io = btrfs_dio_submit_io,
+ .bio_set = &btrfs_dio_bioset,
+};
+
+static ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter,
+ size_t done_before)
+{
+ struct btrfs_dio_data data = { 0 };
+
+ return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
+ IOMAP_DIO_PARTIAL, &data, done_before);
+}
+
+static struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
+ size_t done_before)
+{
+ struct btrfs_dio_data data = { 0 };
+
+ return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
+ IOMAP_DIO_PARTIAL, &data, done_before);
+}
+
+static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
+ const struct iov_iter *iter, loff_t offset)
+{
+ const u32 blocksize_mask = fs_info->sectorsize - 1;
+
+ if (offset & blocksize_mask)
+ return -EINVAL;
+
+ if (iov_iter_alignment(iter) & blocksize_mask)
+ return -EINVAL;
+
+ /*
+ * For bs > ps support, we heavily rely on large folios to make sure no
+ * block will cross large folio boundaries.
+ *
+ * But memory provided by direct IO is only virtually contiguous, not
+ * physically contiguous, and will break the btrfs' large folio requirement.
+ *
+ * So for bs > ps support, all direct IOs should fallback to buffered ones.
+ */
+ if (fs_info->sectorsize > PAGE_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ loff_t pos;
+ ssize_t written = 0;
+ ssize_t written_buffered;
+ size_t prev_left = 0;
+ loff_t endbyte;
+ ssize_t ret;
+ unsigned int ilock_flags = 0;
+ struct iomap_dio *dio;
+
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ ilock_flags |= BTRFS_ILOCK_TRY;
+
+ /*
+ * If the write DIO is within EOF, use a shared lock and also only if
+ * security bits will likely not be dropped by file_remove_privs() called
+ * from btrfs_write_check(). Either will need to be rechecked after the
+ * lock was acquired.
+ */
+ if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode))
+ ilock_flags |= BTRFS_ILOCK_SHARED;
+
+relock:
+ ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
+ if (ret < 0)
+ return ret;
+
+ /* Shared lock cannot be used with security bits set. */
+ if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ ilock_flags &= ~BTRFS_ILOCK_SHARED;
+ goto relock;
+ }
+
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ return ret;
+ }
+
+ ret = btrfs_write_check(iocb, ret);
+ if (ret < 0) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ goto out;
+ }
+
+ pos = iocb->ki_pos;
+ /*
+ * Re-check since file size may have changed just before taking the
+ * lock or pos may have changed because of O_APPEND in generic_write_check()
+ */
+ if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
+ pos + iov_iter_count(from) > i_size_read(inode)) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ ilock_flags &= ~BTRFS_ILOCK_SHARED;
+ goto relock;
+ }
+
+ if (check_direct_IO(fs_info, from, pos)) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ goto buffered;
+ }
+ /*
+ * We can't control the folios being passed in, applications can write
+ * to them while a direct IO write is in progress. This means the
+ * content might change after we calculated the data checksum.
+ * Therefore we can end up storing a checksum that doesn't match the
+ * persisted data.
+ *
+ * To be extra safe and avoid false data checksum mismatch, if the
+ * inode requires data checksum, just fallback to buffered IO.
+ * For buffered IO we have full control of page cache and can ensure
+ * no one is modifying the content during writeback.
+ */
+ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ goto buffered;
+ }
+
+ /*
+ * The iov_iter can be mapped to the same file range we are writing to.
+ * If that's the case, then we will deadlock in the iomap code, because
+ * it first calls our callback btrfs_dio_iomap_begin(), which will create
+ * an ordered extent, and after that it will fault in the pages that the
+ * iov_iter refers to. During the fault in we end up in the readahead
+ * pages code (starting at btrfs_readahead()), which will lock the range,
+ * find that ordered extent and then wait for it to complete (at
+ * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
+ * obviously the ordered extent can never complete as we didn't submit
+ * yet the respective bio(s). This always happens when the buffer is
+ * memory mapped to the same file range, since the iomap DIO code always
+ * invalidates pages in the target file range (after starting and waiting
+ * for any writeback).
+ *
+ * So here we disable page faults in the iov_iter and then retry if we
+ * got -EFAULT, faulting in the pages before the retry.
+ */
+again:
+ from->nofault = true;
+ dio = btrfs_dio_write(iocb, from, written);
+ from->nofault = false;
+
+ if (IS_ERR_OR_NULL(dio)) {
+ ret = PTR_ERR_OR_ZERO(dio);
+ } else {
+ /*
+ * If we have a synchronous write, we must make sure the fsync
+ * triggered by the iomap_dio_complete() call below doesn't
+ * deadlock on the inode lock - we are already holding it and we
+ * can't call it after unlocking because we may need to complete
+ * partial writes due to the input buffer (or parts of it) not
+ * being already faulted in.
+ */
+ ASSERT(current->journal_info == NULL);
+ current->journal_info = BTRFS_TRANS_DIO_WRITE_STUB;
+ ret = iomap_dio_complete(dio);
+ current->journal_info = NULL;
+ }
+
+ /* No increment (+=) because iomap returns a cumulative value. */
+ if (ret > 0)
+ written = ret;
+
+ if (iov_iter_count(from) > 0 && (ret == -EFAULT || ret > 0)) {
+ const size_t left = iov_iter_count(from);
+ /*
+ * We have more data left to write. Try to fault in as many as
+ * possible of the remainder pages and retry. We do this without
+ * releasing and locking again the inode, to prevent races with
+ * truncate.
+ *
+ * Also, in case the iov refers to pages in the file range of the
+ * file we want to write to (due to a mmap), we could enter an
+ * infinite loop if we retry after faulting the pages in, since
+ * iomap will invalidate any pages in the range early on, before
+ * it tries to fault in the pages of the iov. So we keep track of
+ * how much was left of iov in the previous EFAULT and fallback
+ * to buffered IO in case we haven't made any progress.
+ */
+ if (left == prev_left) {
+ ret = -ENOTBLK;
+ } else {
+ fault_in_iov_iter_readable(from, left);
+ prev_left = left;
+ goto again;
+ }
+ }
+
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+
+ /*
+ * If 'ret' is -ENOTBLK or we have not written all data, then it means
+ * we must fallback to buffered IO.
+ */
+ if ((ret < 0 && ret != -ENOTBLK) || !iov_iter_count(from))
+ goto out;
+
+buffered:
+ /*
+ * If we are in a NOWAIT context, then return -EAGAIN to signal the caller
+ * it must retry the operation in a context where blocking is acceptable,
+ * because even if we end up not blocking during the buffered IO attempt
+ * below, we will block when flushing and waiting for the IO.
+ */
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ pos = iocb->ki_pos;
+ written_buffered = btrfs_buffered_write(iocb, from);
+ if (written_buffered < 0) {
+ ret = written_buffered;
+ goto out;
+ }
+ /*
+ * Ensure all data is persisted. We want the next direct IO read to be
+ * able to read what was just written.
+ */
+ endbyte = pos + written_buffered - 1;
+ ret = btrfs_fdatawrite_range(BTRFS_I(inode), pos, endbyte);
+ if (ret)
+ goto out;
+ ret = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
+ if (ret)
+ goto out;
+ written += written_buffered;
+ iocb->ki_pos = pos + written_buffered;
+ invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
+ endbyte >> PAGE_SHIFT);
+out:
+ return ret < 0 ? ret : written;
+}
+
+static int check_direct_read(struct btrfs_fs_info *fs_info,
+ const struct iov_iter *iter, loff_t offset)
+{
+ int ret;
+ int i, seg;
+
+ ret = check_direct_IO(fs_info, iter, offset);
+ if (ret < 0)
+ return ret;
+
+ if (!iter_is_iovec(iter))
+ return 0;
+
+ for (seg = 0; seg < iter->nr_segs; seg++) {
+ for (i = seg + 1; i < iter->nr_segs; i++) {
+ const struct iovec *iov1 = iter_iov(iter) + seg;
+ const struct iovec *iov2 = iter_iov(iter) + i;
+
+ if (iov1->iov_base == iov2->iov_base)
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ size_t prev_left = 0;
+ ssize_t read = 0;
+ ssize_t ret;
+
+ if (fsverity_active(inode))
+ return 0;
+
+ if (check_direct_read(inode_to_fs_info(inode), to, iocb->ki_pos))
+ return 0;
+
+ btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
+again:
+ /*
+ * This is similar to what we do for direct IO writes, see the comment
+ * at btrfs_direct_write(), but we also disable page faults in addition
+ * to disabling them only at the iov_iter level. This is because when
+ * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
+ * which can still trigger page fault ins despite having set ->nofault
+ * to true of our 'to' iov_iter.
+ *
+ * The difference to direct IO writes is that we deadlock when trying
+ * to lock the extent range in the inode's tree during he page reads
+ * triggered by the fault in (while for writes it is due to waiting for
+ * our own ordered extent). This is because for direct IO reads,
+ * btrfs_dio_iomap_begin() returns with the extent range locked, which
+ * is only unlocked in the endio callback (end_bio_extent_readpage()).
+ */
+ pagefault_disable();
+ to->nofault = true;
+ ret = btrfs_dio_read(iocb, to, read);
+ to->nofault = false;
+ pagefault_enable();
+
+ /* No increment (+=) because iomap returns a cumulative value. */
+ if (ret > 0)
+ read = ret;
+
+ if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
+ const size_t left = iov_iter_count(to);
+
+ if (left == prev_left) {
+ /*
+ * We didn't make any progress since the last attempt,
+ * fallback to a buffered read for the remainder of the
+ * range. This is just to avoid any possibility of looping
+ * for too long.
+ */
+ ret = read;
+ } else {
+ /*
+ * We made some progress since the last retry or this is
+ * the first time we are retrying. Fault in as many pages
+ * as possible and retry.
+ */
+ fault_in_iov_iter_writeable(to, left);
+ prev_left = left;
+ goto again;
+ }
+ }
+ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
+ return ret < 0 ? ret : read;
+}
+
+int __init btrfs_init_dio(void)
+{
+ if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE,
+ offsetof(struct btrfs_dio_private, bbio.bio),
+ BIOSET_NEED_BVECS))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void __cold btrfs_destroy_dio(void)
+{
+ bioset_exit(&btrfs_dio_bioset);
+}
diff --git a/fs/btrfs/direct-io.h b/fs/btrfs/direct-io.h
new file mode 100644
index 000000000000..df5d45ee6de7
--- /dev/null
+++ b/fs/btrfs/direct-io.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_DIRECT_IO_H
+#define BTRFS_DIRECT_IO_H
+
+#include <linux/types.h>
+
+struct kiocb;
+
+int __init btrfs_init_dio(void);
+void __cold btrfs_destroy_dio(void);
+
+ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from);
+ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to);
+
+#endif /* BTRFS_DIRECT_IO_H */
diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
index e1b7bd927d69..89fe85778115 100644
--- a/fs/btrfs/discard.c
+++ b/fs/btrfs/discard.c
@@ -11,6 +11,7 @@
#include "block-group.h"
#include "discard.h"
#include "free-space-cache.h"
+#include "fs.h"
/*
* This contains the logic to handle async discard.
@@ -55,13 +56,11 @@
#define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC)
#define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC)
-/* Target completion latency of discarding all discardable extents */
-#define BTRFS_DISCARD_TARGET_MSEC (6 * 60 * 60UL * MSEC_PER_SEC)
#define BTRFS_DISCARD_MIN_DELAY_MSEC (1UL)
#define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL)
-#define BTRFS_DISCARD_MAX_IOPS (10U)
+#define BTRFS_DISCARD_MAX_IOPS (1000U)
-/* Montonically decreasing minimum length filters after index 0 */
+/* Monotonically decreasing minimum length filters after index 0 */
static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
0,
BTRFS_ASYNC_DISCARD_MAX_FILTER,
@@ -69,16 +68,32 @@ static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
};
static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
- struct btrfs_block_group *block_group)
+ const struct btrfs_block_group *block_group)
{
return &discard_ctl->discard_list[block_group->discard_index];
}
+/*
+ * Determine if async discard should be running.
+ *
+ * @discard_ctl: discard control
+ *
+ * Check if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set.
+ */
+static bool btrfs_run_discard_work(const struct btrfs_discard_ctl *discard_ctl)
+{
+ struct btrfs_fs_info *fs_info = container_of(discard_ctl,
+ struct btrfs_fs_info,
+ discard_ctl);
+
+ return (!(fs_info->sb->s_flags & SB_RDONLY) &&
+ test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags));
+}
+
static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group)
{
- if (!btrfs_run_discard_work(discard_ctl))
- return;
+ lockdep_assert_held(&discard_ctl->lock);
if (list_empty(&block_group->discard_list) ||
block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) {
@@ -88,6 +103,8 @@ static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
BTRFS_DISCARD_DELAY);
block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
}
+ if (list_empty(&block_group->discard_list))
+ btrfs_get_block_group(block_group);
list_move_tail(&block_group->discard_list,
get_discard_list(discard_ctl, block_group));
@@ -99,6 +116,9 @@ static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
if (!btrfs_is_block_group_data_only(block_group))
return;
+ if (!btrfs_run_discard_work(discard_ctl))
+ return;
+
spin_lock(&discard_ctl->lock);
__add_to_discard_list(discard_ctl, block_group);
spin_unlock(&discard_ctl->lock);
@@ -107,8 +127,12 @@ static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group)
{
+ bool queued;
+
spin_lock(&discard_ctl->lock);
+ queued = !list_empty(&block_group->discard_list);
+
if (!btrfs_run_discard_work(discard_ctl)) {
spin_unlock(&discard_ctl->lock);
return;
@@ -120,6 +144,8 @@ static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl,
block_group->discard_eligible_time = (ktime_get_ns() +
BTRFS_DISCARD_UNUSED_DELAY);
block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
+ if (!queued)
+ btrfs_get_block_group(block_group);
list_add_tail(&block_group->discard_list,
&discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]);
@@ -130,6 +156,7 @@ static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group)
{
bool running = false;
+ bool queued = false;
spin_lock(&discard_ctl->lock);
@@ -139,17 +166,21 @@ static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
}
block_group->discard_eligible_time = 0;
+ queued = !list_empty(&block_group->discard_list);
list_del_init(&block_group->discard_list);
+ if (queued)
+ btrfs_put_block_group(block_group);
spin_unlock(&discard_ctl->lock);
return running;
}
-/**
- * find_next_block_group - find block_group that's up next for discarding
- * @discard_ctl: discard control
- * @now: current time
+/*
+ * Find block_group that's up next for discarding.
+ *
+ * @discard_ctl: discard control
+ * @now: current time
*
* Iterate over the discard lists to find the next block_group up for
* discarding checking the discard_eligible_time of block_group.
@@ -184,17 +215,17 @@ static struct btrfs_block_group *find_next_block_group(
return ret_block_group;
}
-/**
- * Wrap find_next_block_group()
+/*
+ * Look up next block group and set it for use.
*
* @discard_ctl: discard control
* @discard_state: the discard_state of the block_group after state management
* @discard_index: the discard_index of the block_group after state management
* @now: time when discard was invoked, in ns
*
- * This wraps find_next_block_group() and sets the block_group to be in use.
- * discard_state's control flow is managed here. Variables related to
- * discard_state are reset here as needed (eg discard_cursor). @discard_state
+ * Wrap find_next_block_group() and set the block_group to be in use.
+ * @discard_state's control flow is managed here. Variables related to
+ * @discard_state are reset here as needed (eg. @discard_cursor). @discard_state
* and @discard_index are remembered as it may change while we're discarding,
* but we want the discard to execute in the context determined here.
*/
@@ -212,19 +243,36 @@ again:
if (block_group && now >= block_group->discard_eligible_time) {
if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
block_group->used != 0) {
- if (btrfs_is_block_group_data_only(block_group))
+ if (btrfs_is_block_group_data_only(block_group)) {
__add_to_discard_list(discard_ctl, block_group);
- else
+ /*
+ * The block group must have been moved to other
+ * discard list even if discard was disabled in
+ * the meantime or a transaction abort happened,
+ * otherwise we can end up in an infinite loop,
+ * always jumping into the 'again' label and
+ * keep getting this block group over and over
+ * in case there are no other block groups in
+ * the discard lists.
+ */
+ ASSERT(block_group->discard_index !=
+ BTRFS_DISCARD_INDEX_UNUSED,
+ "discard_index=%d",
+ block_group->discard_index);
+ } else {
list_del_init(&block_group->discard_list);
+ btrfs_put_block_group(block_group);
+ }
goto again;
}
if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) {
block_group->discard_cursor = block_group->start;
block_group->discard_state = BTRFS_DISCARD_EXTENTS;
}
- discard_ctl->block_group = block_group;
}
if (block_group) {
+ btrfs_get_block_group(block_group);
+ discard_ctl->block_group = block_group;
*discard_state = block_group->discard_state;
*discard_index = block_group->discard_index;
}
@@ -233,10 +281,11 @@ again:
return block_group;
}
-/**
- * btrfs_discard_check_filter - updates a block groups filters
- * @block_group: block group of interest
- * @bytes: recently freed region size after coalescing
+/*
+ * Update a block group's filters.
+ *
+ * @block_group: block group of interest
+ * @bytes: recently freed region size after coalescing
*
* Async discard maintains multiple lists with progressively smaller filters
* to prioritize discarding based on size. Should a free space that matches
@@ -271,8 +320,9 @@ void btrfs_discard_check_filter(struct btrfs_block_group *block_group,
}
}
-/**
- * btrfs_update_discard_index - moves a block group along the discard lists
+/*
+ * Move a block group along the discard lists.
+ *
* @discard_ctl: discard control
* @block_group: block_group of interest
*
@@ -291,13 +341,14 @@ static void btrfs_update_discard_index(struct btrfs_discard_ctl *discard_ctl,
add_to_discard_list(discard_ctl, block_group);
}
-/**
- * btrfs_discard_cancel_work - remove a block_group from the discard lists
+/*
+ * Remove a block_group from the discard lists.
+ *
* @discard_ctl: discard control
* @block_group: block_group of interest
*
- * This removes @block_group from the discard lists. If necessary, it waits on
- * the current work and then reschedules the delayed work.
+ * Remove @block_group from the discard lists. If necessary, wait on the
+ * current work and then reschedule the delayed work.
*/
void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group)
@@ -308,12 +359,13 @@ void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
}
}
-/**
- * btrfs_discard_queue_work - handles queuing the block_groups
+/*
+ * Handles queuing the block_groups.
+ *
* @discard_ctl: discard control
* @block_group: block_group of interest
*
- * This maintains the LRU order of the discard lists.
+ * Maintain the LRU order of the discard lists.
*/
void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group)
@@ -383,7 +435,8 @@ static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
}
/*
- * btrfs_discard_schedule_work - responsible for scheduling the discard work
+ * Responsible for scheduling the discard work.
+ *
* @discard_ctl: discard control
* @override: override the current timer
*
@@ -401,15 +454,16 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
spin_unlock(&discard_ctl->lock);
}
-/**
- * btrfs_finish_discard_pass - determine next step of a block_group
+/*
+ * Determine next step of a block_group.
+ *
* @discard_ctl: discard control
* @block_group: block_group of interest
*
- * This determines the next step for a block group after it's finished going
- * through a pass on a discard list. If it is unused and fully trimmed, we can
- * mark it unused and send it to the unused_bgs path. Otherwise, pass it onto
- * the appropriate filter list or let it fall off.
+ * Determine the next step for a block group after it's finished going through
+ * a pass on a discard list. If it is unused and fully trimmed, we can mark it
+ * unused and send it to the unused_bgs path. Otherwise, pass it onto the
+ * appropriate filter list or let it fall off.
*/
static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group)
@@ -426,12 +480,13 @@ static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl,
}
}
-/**
- * btrfs_discard_workfn - discard work function
+/*
+ * Discard work queue callback
+ *
* @work: work
*
- * This finds the next block_group to start discarding and then discards a
- * single region. It does this in a two-pass fashion: first extents and second
+ * Find the next block_group to start discarding and then discard a single
+ * region. It does this in a two-pass fashion: first extents and second
* bitmaps. Completely discarded block groups are sent to the unused_bgs path.
*/
static void btrfs_discard_workfn(struct work_struct *work)
@@ -448,9 +503,20 @@ static void btrfs_discard_workfn(struct work_struct *work)
block_group = peek_discard_list(discard_ctl, &discard_state,
&discard_index, now);
- if (!block_group || !btrfs_run_discard_work(discard_ctl))
+ if (!block_group)
+ return;
+ if (!btrfs_run_discard_work(discard_ctl)) {
+ spin_lock(&discard_ctl->lock);
+ btrfs_put_block_group(block_group);
+ discard_ctl->block_group = NULL;
+ spin_unlock(&discard_ctl->lock);
return;
+ }
if (now < block_group->discard_eligible_time) {
+ spin_lock(&discard_ctl->lock);
+ btrfs_put_block_group(block_group);
+ discard_ctl->block_group = NULL;
+ spin_unlock(&discard_ctl->lock);
btrfs_discard_schedule_work(discard_ctl, false);
return;
}
@@ -502,29 +568,15 @@ static void btrfs_discard_workfn(struct work_struct *work)
spin_lock(&discard_ctl->lock);
discard_ctl->prev_discard = trimmed;
discard_ctl->prev_discard_time = now;
+ btrfs_put_block_group(block_group);
discard_ctl->block_group = NULL;
__btrfs_discard_schedule_work(discard_ctl, now, false);
spin_unlock(&discard_ctl->lock);
}
-/**
- * btrfs_run_discard_work - determines if async discard should be running
- * @discard_ctl: discard control
+/*
+ * Recalculate the base delay.
*
- * Checks if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set.
- */
-bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl)
-{
- struct btrfs_fs_info *fs_info = container_of(discard_ctl,
- struct btrfs_fs_info,
- discard_ctl);
-
- return (!(fs_info->sb->s_flags & SB_RDONLY) &&
- test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags));
-}
-
-/**
- * btrfs_discard_calc_delay - recalculate the base delay
* @discard_ctl: discard control
*
* Recalculate the base delay which is based off the total number of
@@ -536,6 +588,7 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
s32 discardable_extents;
s64 discardable_bytes;
u32 iops_limit;
+ unsigned long min_delay = BTRFS_DISCARD_MIN_DELAY_MSEC;
unsigned long delay;
discardable_extents = atomic_read(&discard_ctl->discardable_extents);
@@ -545,7 +598,7 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
spin_lock(&discard_ctl->lock);
/*
- * The following is to fix a potential -1 discrepenancy that we're not
+ * The following is to fix a potential -1 discrepancy that we're not
* sure how to reproduce. But given that this is the only place that
* utilizes these numbers and this is only called by from
* btrfs_finish_extent_commit() which is synchronized, we can correct
@@ -566,25 +619,32 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
}
iops_limit = READ_ONCE(discard_ctl->iops_limit);
- if (iops_limit)
+
+ if (iops_limit) {
delay = MSEC_PER_SEC / iops_limit;
- else
- delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents;
+ } else {
+ /*
+ * Unset iops_limit means go as fast as possible, so allow a
+ * delay of 0.
+ */
+ delay = 0;
+ min_delay = 0;
+ }
- delay = clamp(delay, BTRFS_DISCARD_MIN_DELAY_MSEC,
- BTRFS_DISCARD_MAX_DELAY_MSEC);
+ delay = clamp(delay, min_delay, BTRFS_DISCARD_MAX_DELAY_MSEC);
discard_ctl->delay_ms = delay;
spin_unlock(&discard_ctl->lock);
}
-/**
- * btrfs_discard_update_discardable - propagate discard counters
+/*
+ * Propagate discard counters.
+ *
* @block_group: block_group of interest
*
- * This propagates deltas of counters up to the discard_ctl. It maintains a
- * current counter and a previous counter passing the delta up to the global
- * stat. Then the current counter value becomes the previous counter value.
+ * Propagate deltas of counters up to the discard_ctl. It maintains a current
+ * counter and a previous counter passing the delta up to the global stat.
+ * Then the current counter value becomes the previous counter value.
*/
void btrfs_discard_update_discardable(struct btrfs_block_group *block_group)
{
@@ -619,8 +679,9 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group)
}
}
-/**
- * btrfs_discard_punt_unused_bgs_list - punt unused_bgs list to discard lists
+/*
+ * Punt unused_bgs list to discard lists.
+ *
* @fs_info: fs_info of interest
*
* The unused_bgs list needs to be punted to the discard lists because the
@@ -638,14 +699,19 @@ void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info)
list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs,
bg_list) {
list_del_init(&block_group->bg_list);
- btrfs_put_block_group(block_group);
btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
+ /*
+ * This put is for the get done by btrfs_mark_bg_unused.
+ * Queueing discard incremented it for discard's reference.
+ */
+ btrfs_put_block_group(block_group);
}
spin_unlock(&fs_info->unused_bgs_lock);
}
-/**
- * btrfs_discard_purge_list - purge discard lists
+/*
+ * Purge discard lists.
+ *
* @discard_ctl: discard control
*
* If we are disabling async discard, we may have intercepted block groups that
@@ -669,6 +735,7 @@ static void btrfs_discard_purge_list(struct btrfs_discard_ctl *discard_ctl)
if (block_group->used == 0)
btrfs_mark_bg_unused(block_group);
spin_lock(&discard_ctl->lock);
+ btrfs_put_block_group(block_group);
}
}
spin_unlock(&discard_ctl->lock);
diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
index 57b9202f427f..2c5e85394092 100644
--- a/fs/btrfs/discard.h
+++ b/fs/btrfs/discard.h
@@ -3,6 +3,7 @@
#ifndef BTRFS_DISCARD_H
#define BTRFS_DISCARD_H
+#include <linux/types.h>
#include <linux/sizes.h>
struct btrfs_fs_info;
@@ -24,7 +25,6 @@ void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group);
void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
bool override);
-bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl);
/* Update operations */
void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 59c3be8c1f4c..89149fac804c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -17,20 +17,18 @@
#include <linux/error-injection.h>
#include <linux/crc32c.h>
#include <linux/sched/mm.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <crypto/hash.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "volumes.h"
+#include "bio.h"
#include "print-tree.h"
#include "locking.h"
#include "tree-log.h"
#include "free-space-cache.h"
#include "free-space-tree.h"
-#include "check-integrity.h"
-#include "rcu-string.h"
#include "dev-replace.h"
#include "raid56.h"
#include "sysfs.h"
@@ -43,6 +41,16 @@
#include "space-info.h"
#include "zoned.h"
#include "subpage.h"
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "root-tree.h"
+#include "defrag.h"
+#include "uuid-tree.h"
+#include "relocation.h"
+#include "scrub.h"
+#include "super.h"
+#include "delayed-inode.h"
#define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
BTRFS_HEADER_FLAG_RELOC |\
@@ -51,53 +59,9 @@
BTRFS_SUPER_FLAG_METADUMP |\
BTRFS_SUPER_FLAG_METADUMP_V2)
-static void end_workqueue_fn(struct btrfs_work *work);
-static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
-static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
- struct btrfs_fs_info *fs_info);
-static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
-static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *dirty_pages,
- int mark);
-static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *pinned_extents);
static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
-/*
- * btrfs_end_io_wq structs are used to do processing in task context when an IO
- * is complete. This is used during reads to verify checksums, and it is used
- * by writes to insert metadata for new file extents after IO is complete.
- */
-struct btrfs_end_io_wq {
- struct bio *bio;
- bio_end_io_t *end_io;
- void *private;
- struct btrfs_fs_info *info;
- blk_status_t status;
- enum btrfs_wq_endio_type metadata;
- struct btrfs_work work;
-};
-
-static struct kmem_cache *btrfs_end_io_wq_cache;
-
-int __init btrfs_end_io_wq_init(void)
-{
- btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
- sizeof(struct btrfs_end_io_wq),
- 0,
- SLAB_MEM_SPREAD,
- NULL);
- if (!btrfs_end_io_wq_cache)
- return -ENOMEM;
- return 0;
-}
-
-void __cold btrfs_end_io_wq_exit(void)
-{
- kmem_cache_destroy(btrfs_end_io_wq_cache);
-}
-
static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
{
if (fs_info->csum_shash)
@@ -105,124 +69,42 @@ static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
}
/*
- * async submit bios are used to offload expensive checksumming
- * onto the worker threads. They checksum file and metadata bios
- * just before they are sent down the IO stack.
- */
-struct async_submit_bio {
- struct inode *inode;
- struct bio *bio;
- extent_submit_bio_start_t *submit_bio_start;
- int mirror_num;
-
- /* Optional parameter for submit_bio_start used by direct io */
- u64 dio_file_offset;
- struct btrfs_work work;
- blk_status_t status;
-};
-
-/*
- * Lockdep class keys for extent_buffer->lock's in this root. For a given
- * eb, the lockdep key is determined by the btrfs_root it belongs to and
- * the level the eb occupies in the tree.
- *
- * Different roots are used for different purposes and may nest inside each
- * other and they require separate keysets. As lockdep keys should be
- * static, assign keysets according to the purpose of the root as indicated
- * by btrfs_root->root_key.objectid. This ensures that all special purpose
- * roots have separate keysets.
- *
- * Lock-nesting across peer nodes is always done with the immediate parent
- * node locked thus preventing deadlock. As lockdep doesn't know this, use
- * subclass to avoid triggering lockdep warning in such cases.
- *
- * The key is set by the readpage_end_io_hook after the buffer has passed
- * csum validation but before the pages are unlocked. It is also set by
- * btrfs_init_new_buffer on freshly allocated blocks.
- *
- * We also add a check to make sure the highest level of the tree is the
- * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
- * needs update as well.
- */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# if BTRFS_MAX_LEVEL != 8
-# error
-# endif
-
-#define DEFINE_LEVEL(stem, level) \
- .names[level] = "btrfs-" stem "-0" #level,
-
-#define DEFINE_NAME(stem) \
- DEFINE_LEVEL(stem, 0) \
- DEFINE_LEVEL(stem, 1) \
- DEFINE_LEVEL(stem, 2) \
- DEFINE_LEVEL(stem, 3) \
- DEFINE_LEVEL(stem, 4) \
- DEFINE_LEVEL(stem, 5) \
- DEFINE_LEVEL(stem, 6) \
- DEFINE_LEVEL(stem, 7)
-
-static struct btrfs_lockdep_keyset {
- u64 id; /* root objectid */
- /* Longest entry: btrfs-free-space-00 */
- char names[BTRFS_MAX_LEVEL][20];
- struct lock_class_key keys[BTRFS_MAX_LEVEL];
-} btrfs_lockdep_keysets[] = {
- { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") },
- { .id = BTRFS_EXTENT_TREE_OBJECTID, DEFINE_NAME("extent") },
- { .id = BTRFS_CHUNK_TREE_OBJECTID, DEFINE_NAME("chunk") },
- { .id = BTRFS_DEV_TREE_OBJECTID, DEFINE_NAME("dev") },
- { .id = BTRFS_CSUM_TREE_OBJECTID, DEFINE_NAME("csum") },
- { .id = BTRFS_QUOTA_TREE_OBJECTID, DEFINE_NAME("quota") },
- { .id = BTRFS_TREE_LOG_OBJECTID, DEFINE_NAME("log") },
- { .id = BTRFS_TREE_RELOC_OBJECTID, DEFINE_NAME("treloc") },
- { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") },
- { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") },
- { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
- { .id = 0, DEFINE_NAME("tree") },
-};
-
-#undef DEFINE_LEVEL
-#undef DEFINE_NAME
-
-void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
- int level)
-{
- struct btrfs_lockdep_keyset *ks;
-
- BUG_ON(level >= ARRAY_SIZE(ks->keys));
-
- /* find the matching keyset, id 0 is the default entry */
- for (ks = btrfs_lockdep_keysets; ks->id; ks++)
- if (ks->id == objectid)
- break;
-
- lockdep_set_class_and_name(&eb->lock,
- &ks->keys[level], ks->names[level]);
-}
-
-#endif
-
-/*
* Compute the csum of a btree block and store the result to provided buffer.
*/
static void csum_tree_block(struct extent_buffer *buf, u8 *result)
{
struct btrfs_fs_info *fs_info = buf->fs_info;
- const int num_pages = num_extent_pages(buf);
- const int first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
+ int num_pages;
+ u32 first_page_part;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
char *kaddr;
int i;
shash->tfm = fs_info->csum_shash;
crypto_shash_init(shash);
- kaddr = page_address(buf->pages[0]) + offset_in_page(buf->start);
+
+ if (buf->addr) {
+ /* Pages are contiguous, handle them as a big one. */
+ kaddr = buf->addr;
+ first_page_part = fs_info->nodesize;
+ num_pages = 1;
+ } else {
+ kaddr = folio_address(buf->folios[0]);
+ first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
+ num_pages = num_extent_pages(buf);
+ }
+
crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
first_page_part - BTRFS_CSUM_SIZE);
- for (i = 1; i < num_pages; i++) {
- kaddr = page_address(buf->pages[i]);
+ /*
+ * Multiple single-page folios case would reach here.
+ *
+ * nodesize <= PAGE_SIZE and large folio all handled by above
+ * crypto_shash_update() already.
+ */
+ for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
+ kaddr = folio_address(buf->folios[i]);
crypto_shash_update(shash, kaddr, PAGE_SIZE);
}
memset(result, 0, BTRFS_CSUM_SIZE);
@@ -235,36 +117,27 @@ static void csum_tree_block(struct extent_buffer *buf, u8 *result)
* detect blocks that either didn't get written at all or got written
* in the wrong place.
*/
-static int verify_parent_transid(struct extent_io_tree *io_tree,
- struct extent_buffer *eb, u64 parent_transid,
- int atomic)
+int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, bool atomic)
{
- struct extent_state *cached_state = NULL;
- int ret;
+ if (!extent_buffer_uptodate(eb))
+ return 0;
if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
- return 0;
+ return 1;
if (atomic)
return -EAGAIN;
- lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
- &cached_state);
- if (extent_buffer_uptodate(eb) &&
- btrfs_header_generation(eb) == parent_transid) {
- ret = 0;
- goto out;
- }
- btrfs_err_rl(eb->fs_info,
- "parent transid verify failed on %llu wanted %llu found %llu",
- eb->start,
+ if (!extent_buffer_uptodate(eb) ||
+ btrfs_header_generation(eb) != parent_transid) {
+ btrfs_err_rl(eb->fs_info,
+"parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
+ eb->start, eb->read_mirror,
parent_transid, btrfs_header_generation(eb));
- ret = 1;
- clear_extent_buffer_uptodate(eb);
-out:
- unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
- &cached_state);
- return ret;
+ clear_extent_buffer_uptodate(eb);
+ return 0;
+ }
+ return 1;
}
static bool btrfs_supported_super_csum(u16 csum_type)
@@ -284,11 +157,9 @@ static bool btrfs_supported_super_csum(u16 csum_type)
* Return 0 if the superblock checksum type matches the checksum value of that
* algorithm. Pass the raw disk superblock data.
*/
-static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
- char *raw_disk_sb)
+int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
+ const struct btrfs_super_block *disk_sb)
{
- struct btrfs_super_block *disk_sb =
- (struct btrfs_super_block *)raw_disk_sb;
char result[BTRFS_CSUM_SIZE];
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
@@ -299,7 +170,7 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
* BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
* filled with zeros and is included in the checksum.
*/
- crypto_shash_digest(shash, raw_disk_sb + BTRFS_CSUM_SIZE,
+ crypto_shash_digest(shash, (const u8 *)disk_sb + BTRFS_CSUM_SIZE,
BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
if (memcmp(disk_sb->csum, result, fs_info->csum_size))
@@ -308,61 +179,37 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
return 0;
}
-int btrfs_verify_level_key(struct extent_buffer *eb, int level,
- struct btrfs_key *first_key, u64 parent_transid)
+static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
+ int mirror_num)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
- int found_level;
- struct btrfs_key found_key;
- int ret;
+ const u32 step = min(fs_info->nodesize, PAGE_SIZE);
+ const u32 nr_steps = eb->len / step;
+ phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
+ int ret = 0;
- found_level = btrfs_header_level(eb);
- if (found_level != level) {
- WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
- KERN_ERR "BTRFS: tree level check failed\n");
- btrfs_err(fs_info,
-"tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
- eb->start, level, found_level);
- return -EIO;
- }
+ if (sb_rdonly(fs_info->sb))
+ return -EROFS;
- if (!first_key)
- return 0;
+ for (int i = 0; i < num_extent_pages(eb); i++) {
+ struct folio *folio = eb->folios[i];
- /*
- * For live tree block (new tree blocks in current transaction),
- * we need proper lock context to avoid race, which is impossible here.
- * So we only checks tree blocks which is read from disk, whose
- * generation <= fs_info->last_trans_committed.
- */
- if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
- return 0;
+ /* No large folio support yet. */
+ ASSERT(folio_order(folio) == 0);
+ ASSERT(i < nr_steps);
- /* We have @first_key, so this @eb must have at least one item */
- if (btrfs_header_nritems(eb) == 0) {
- btrfs_err(fs_info,
- "invalid tree nritems, bytenr=%llu nritems=0 expect >0",
- eb->start);
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
- return -EUCLEAN;
+ /*
+ * For nodesize < page size, there is just one paddr, with some
+ * offset inside the page.
+ *
+ * For nodesize >= page size, it's one or more paddrs, and eb->start
+ * must be aligned to page boundary.
+ */
+ paddrs[i] = page_to_phys(&folio->page) + offset_in_page(eb->start);
}
- if (found_level)
- btrfs_node_key_to_cpu(eb, &found_key, 0);
- else
- btrfs_item_key_to_cpu(eb, &found_key, 0);
- ret = btrfs_comp_cpu_keys(first_key, &found_key);
-
- if (ret) {
- WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
- KERN_ERR "BTRFS: tree first key check failed\n");
- btrfs_err(fs_info,
-"tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
- eb->start, parent_transid, first_key->objectid,
- first_key->type, first_key->offset,
- found_key.objectid, found_key.type,
- found_key.offset);
- }
+ ret = btrfs_repair_io_failure(fs_info, 0, eb->start, eb->len, eb->start,
+ paddrs, step, mirror_num);
return ret;
}
@@ -370,36 +217,25 @@ int btrfs_verify_level_key(struct extent_buffer *eb, int level,
* helper to read a given tree block, doing retries as required when
* the checksums don't match and we have alternate mirrors to try.
*
- * @parent_transid: expected transid, skip check if 0
- * @level: expected level, mandatory check
- * @first_key: expected key of first slot, skip check if NULL
+ * @check: expected tree parentness check, see the comments of the
+ * structure for details.
*/
-static int btree_read_extent_buffer_pages(struct extent_buffer *eb,
- u64 parent_transid, int level,
- struct btrfs_key *first_key)
+int btrfs_read_extent_buffer(struct extent_buffer *eb,
+ const struct btrfs_tree_parent_check *check)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
- struct extent_io_tree *io_tree;
int failed = 0;
int ret;
int num_copies = 0;
int mirror_num = 0;
int failed_mirror = 0;
- io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
+ ASSERT(check);
+
while (1) {
- clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
- ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num);
- if (!ret) {
- if (verify_parent_transid(io_tree, eb,
- parent_transid, 0))
- ret = -EIO;
- else if (btrfs_verify_level_key(eb, level,
- first_key, parent_transid))
- ret = -EUCLEAN;
- else
- break;
- }
+ ret = read_extent_buffer_pages(eb, mirror_num, check);
+ if (!ret)
+ break;
num_copies = btrfs_num_copies(fs_info,
eb->start, eb->len);
@@ -425,12 +261,39 @@ static int btree_read_extent_buffer_pages(struct extent_buffer *eb,
return ret;
}
-static int csum_one_extent_buffer(struct extent_buffer *eb)
+/*
+ * Checksum a dirty tree block before IO.
+ */
+int btree_csum_one_bio(struct btrfs_bio *bbio)
{
+ struct extent_buffer *eb = bbio->private;
struct btrfs_fs_info *fs_info = eb->fs_info;
+ u64 found_start = btrfs_header_bytenr(eb);
+ u64 last_trans;
u8 result[BTRFS_CSUM_SIZE];
int ret;
+ /* Btree blocks are always contiguous on disk. */
+ if (WARN_ON_ONCE(bbio->file_offset != eb->start))
+ return -EIO;
+ if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len))
+ return -EIO;
+
+ /*
+ * If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't
+ * checksum it but zero-out its content. This is done to preserve
+ * ordering of I/O without unnecessarily writing out data.
+ */
+ if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) {
+ memzero_extent_buffer(eb, 0, eb->len);
+ return 0;
+ }
+
+ if (WARN_ON_ONCE(found_start != eb->start))
+ return -EIO;
+ if (WARN_ON(!btrfs_meta_folio_test_uptodate(eb->folios[0], eb)))
+ return -EIO;
+
ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
offsetof(struct btrfs_header, fsid),
BTRFS_FSID_SIZE) == 0);
@@ -439,129 +302,69 @@ static int csum_one_extent_buffer(struct extent_buffer *eb)
if (btrfs_header_level(eb))
ret = btrfs_check_node(eb);
else
- ret = btrfs_check_leaf_full(eb);
+ ret = btrfs_check_leaf(eb);
- if (ret < 0) {
- btrfs_print_tree(eb, 0);
+ if (ret < 0)
+ goto error;
+
+ /*
+ * Also check the generation, the eb reached here must be newer than
+ * last committed. Or something seriously wrong happened.
+ */
+ last_trans = btrfs_get_last_trans_committed(fs_info);
+ if (unlikely(btrfs_header_generation(eb) <= last_trans)) {
+ ret = -EUCLEAN;
btrfs_err(fs_info,
- "block=%llu write time tree block corruption detected",
- eb->start);
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
- return ret;
+ "block=%llu bad generation, have %llu expect > %llu",
+ eb->start, btrfs_header_generation(eb), last_trans);
+ goto error;
}
write_extent_buffer(eb, result, 0, fs_info->csum_size);
-
return 0;
-}
-
-/* Checksum all dirty extent buffers in one bio_vec */
-static int csum_dirty_subpage_buffers(struct btrfs_fs_info *fs_info,
- struct bio_vec *bvec)
-{
- struct page *page = bvec->bv_page;
- u64 bvec_start = page_offset(page) + bvec->bv_offset;
- u64 cur;
- int ret = 0;
-
- for (cur = bvec_start; cur < bvec_start + bvec->bv_len;
- cur += fs_info->nodesize) {
- struct extent_buffer *eb;
- bool uptodate;
-
- eb = find_extent_buffer(fs_info, cur);
- uptodate = btrfs_subpage_test_uptodate(fs_info, page, cur,
- fs_info->nodesize);
-
- /* A dirty eb shouldn't disappear from buffer_radix */
- if (WARN_ON(!eb))
- return -EUCLEAN;
-
- if (WARN_ON(cur != btrfs_header_bytenr(eb))) {
- free_extent_buffer(eb);
- return -EUCLEAN;
- }
- if (WARN_ON(!uptodate)) {
- free_extent_buffer(eb);
- return -EUCLEAN;
- }
-
- ret = csum_one_extent_buffer(eb);
- free_extent_buffer(eb);
- if (ret < 0)
- return ret;
- }
- return ret;
-}
-
-/*
- * Checksum a dirty tree block before IO. This has extra checks to make sure
- * we only fill in the checksum field in the first page of a multi-page block.
- * For subpage extent buffers we need bvec to also read the offset in the page.
- */
-static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct bio_vec *bvec)
-{
- struct page *page = bvec->bv_page;
- u64 start = page_offset(page);
- u64 found_start;
- struct extent_buffer *eb;
-
- if (fs_info->sectorsize < PAGE_SIZE)
- return csum_dirty_subpage_buffers(fs_info, bvec);
-
- eb = (struct extent_buffer *)page->private;
- if (page != eb->pages[0])
- return 0;
-
- found_start = btrfs_header_bytenr(eb);
-
- if (test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags)) {
- WARN_ON(found_start != 0);
- return 0;
- }
+error:
+ btrfs_print_tree(eb, 0);
+ btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
+ eb->start);
/*
- * Please do not consolidate these warnings into a single if.
- * It is useful to know what went wrong.
+ * Be noisy if this is an extent buffer from a log tree. We don't abort
+ * a transaction in case there's a bad log tree extent buffer, we just
+ * fallback to a transaction commit. Still we want to know when there is
+ * a bad log tree extent buffer, as that may signal a bug somewhere.
*/
- if (WARN_ON(found_start != start))
- return -EUCLEAN;
- if (WARN_ON(!PageUptodate(page)))
- return -EUCLEAN;
-
- return csum_one_extent_buffer(eb);
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
+ btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
+ return ret;
}
-static int check_tree_block_fsid(struct extent_buffer *eb)
+static bool check_tree_block_fsid(struct extent_buffer *eb)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
u8 fsid[BTRFS_FSID_SIZE];
- u8 *metadata_uuid;
read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
BTRFS_FSID_SIZE);
+
/*
- * Checking the incompat flag is only valid for the current fs. For
- * seed devices it's forbidden to have their uuid changed so reading
- * ->fsid in this case is fine
+ * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid.
+ * This is then overwritten by metadata_uuid if it is present in the
+ * device_list_add(). The same true for a seed device as well. So use of
+ * fs_devices::metadata_uuid is appropriate here.
*/
- if (btrfs_fs_incompat(fs_info, METADATA_UUID))
- metadata_uuid = fs_devices->metadata_uuid;
- else
- metadata_uuid = fs_devices->fsid;
-
- if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE))
- return 0;
+ if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
+ return false;
list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE))
- return 0;
+ return false;
- return 1;
+ return true;
}
/* Do basic extent buffer checks at read time */
-static int validate_extent_buffer(struct extent_buffer *eb)
+int btrfs_validate_extent_buffer(struct extent_buffer *eb,
+ const struct btrfs_tree_parent_check *check)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
u64 found_start;
@@ -570,424 +373,143 @@ static int validate_extent_buffer(struct extent_buffer *eb)
u8 result[BTRFS_CSUM_SIZE];
const u8 *header_csum;
int ret = 0;
+ const bool ignore_csum = btrfs_test_opt(fs_info, IGNOREMETACSUMS);
+
+ ASSERT(check);
found_start = btrfs_header_bytenr(eb);
- if (found_start != eb->start) {
- btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
- eb->start, found_start);
+ if (unlikely(found_start != eb->start)) {
+ btrfs_err_rl(fs_info,
+ "bad tree block start, mirror %u want %llu have %llu",
+ eb->read_mirror, eb->start, found_start);
ret = -EIO;
goto out;
}
- if (check_tree_block_fsid(eb)) {
- btrfs_err_rl(fs_info, "bad fsid on block %llu",
- eb->start);
+ if (unlikely(check_tree_block_fsid(eb))) {
+ btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
+ eb->start, eb->read_mirror);
ret = -EIO;
goto out;
}
found_level = btrfs_header_level(eb);
- if (found_level >= BTRFS_MAX_LEVEL) {
- btrfs_err(fs_info, "bad tree block level %d on %llu",
- (int)btrfs_header_level(eb), eb->start);
+ if (unlikely(found_level >= BTRFS_MAX_LEVEL)) {
+ btrfs_err(fs_info,
+ "bad tree block level, mirror %u level %d on logical %llu",
+ eb->read_mirror, btrfs_header_level(eb), eb->start);
ret = -EIO;
goto out;
}
csum_tree_block(eb, result);
- header_csum = page_address(eb->pages[0]) +
- get_eb_offset_in_page(eb, offsetof(struct btrfs_header, csum));
+ header_csum = folio_address(eb->folios[0]) +
+ get_eb_offset_in_folio(eb, offsetof(struct btrfs_header, csum));
if (memcmp(result, header_csum, csum_size) != 0) {
btrfs_warn_rl(fs_info,
- "checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d",
- eb->start,
- CSUM_FMT_VALUE(csum_size, header_csum),
- CSUM_FMT_VALUE(csum_size, result),
- btrfs_header_level(eb));
- ret = -EUCLEAN;
- goto out;
- }
-
- /*
- * If this is a leaf block and it is corrupt, set the corrupt bit so
- * that we don't try and read the other copies of this block, just
- * return -EIO.
- */
- if (found_level == 0 && btrfs_check_leaf_full(eb)) {
- set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
- ret = -EIO;
+"checksum verify failed on logical %llu mirror %u wanted " BTRFS_CSUM_FMT " found " BTRFS_CSUM_FMT " level %d%s",
+ eb->start, eb->read_mirror,
+ BTRFS_CSUM_FMT_VALUE(csum_size, header_csum),
+ BTRFS_CSUM_FMT_VALUE(csum_size, result),
+ btrfs_header_level(eb),
+ ignore_csum ? ", ignored" : "");
+ if (unlikely(!ignore_csum)) {
+ ret = -EUCLEAN;
+ goto out;
+ }
}
- if (found_level > 0 && btrfs_check_node(eb))
- ret = -EIO;
-
- if (!ret)
- set_extent_buffer_uptodate(eb);
- else
+ if (unlikely(found_level != check->level)) {
btrfs_err(fs_info,
- "block=%llu read time tree block corruption detected",
- eb->start);
-out:
- return ret;
-}
-
-static int validate_subpage_buffer(struct page *page, u64 start, u64 end,
- int mirror)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
- struct extent_buffer *eb;
- bool reads_done;
- int ret = 0;
-
- /*
- * We don't allow bio merge for subpage metadata read, so we should
- * only get one eb for each endio hook.
- */
- ASSERT(end == start + fs_info->nodesize - 1);
- ASSERT(PagePrivate(page));
-
- eb = find_extent_buffer(fs_info, start);
- /*
- * When we are reading one tree block, eb must have been inserted into
- * the radix tree. If not, something is wrong.
- */
- ASSERT(eb);
-
- reads_done = atomic_dec_and_test(&eb->io_pages);
- /* Subpage read must finish in page read */
- ASSERT(reads_done);
-
- eb->read_mirror = mirror;
- if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
+ "level verify failed on logical %llu mirror %u wanted %u found %u",
+ eb->start, eb->read_mirror, check->level, found_level);
ret = -EIO;
- goto err;
+ goto out;
}
- ret = validate_extent_buffer(eb);
- if (ret < 0)
- goto err;
-
- if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
- btree_readahead_hook(eb, ret);
-
- set_extent_buffer_uptodate(eb);
-
- free_extent_buffer(eb);
- return ret;
-err:
- /*
- * end_bio_extent_readpage decrements io_pages in case of error,
- * make sure it has something to decrement.
- */
- atomic_inc(&eb->io_pages);
- clear_extent_buffer_uptodate(eb);
- free_extent_buffer(eb);
- return ret;
-}
-
-int btrfs_validate_metadata_buffer(struct btrfs_bio *bbio,
- struct page *page, u64 start, u64 end,
- int mirror)
-{
- struct extent_buffer *eb;
- int ret = 0;
- int reads_done;
-
- ASSERT(page->private);
-
- if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
- return validate_subpage_buffer(page, start, end, mirror);
-
- eb = (struct extent_buffer *)page->private;
-
- /*
- * The pending IO might have been the only thing that kept this buffer
- * in memory. Make sure we have a ref for all this other checks
- */
- atomic_inc(&eb->refs);
-
- reads_done = atomic_dec_and_test(&eb->io_pages);
- if (!reads_done)
- goto err;
-
- eb->read_mirror = mirror;
- if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
+ if (unlikely(check->transid &&
+ btrfs_header_generation(eb) != check->transid)) {
+ btrfs_err_rl(eb->fs_info,
+"parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
+ eb->start, eb->read_mirror, check->transid,
+ btrfs_header_generation(eb));
ret = -EIO;
- goto err;
- }
- ret = validate_extent_buffer(eb);
-err:
- if (reads_done &&
- test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
- btree_readahead_hook(eb, ret);
-
- if (ret) {
- /*
- * our io error hook is going to dec the io pages
- * again, we have to make sure it has something
- * to decrement
- */
- atomic_inc(&eb->io_pages);
- clear_extent_buffer_uptodate(eb);
+ goto out;
}
- free_extent_buffer(eb);
-
- return ret;
-}
+ if (check->has_first_key) {
+ const struct btrfs_key *expect_key = &check->first_key;
+ struct btrfs_key found_key;
-static void end_workqueue_bio(struct bio *bio)
-{
- struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
- struct btrfs_fs_info *fs_info;
- struct btrfs_workqueue *wq;
-
- fs_info = end_io_wq->info;
- end_io_wq->status = bio->bi_status;
-
- if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
- wq = fs_info->endio_meta_write_workers;
- else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
- wq = fs_info->endio_freespace_worker;
- else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
- wq = fs_info->endio_raid56_workers;
- else
- wq = fs_info->endio_write_workers;
- } else {
- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
- wq = fs_info->endio_raid56_workers;
- else if (end_io_wq->metadata)
- wq = fs_info->endio_meta_workers;
+ if (found_level)
+ btrfs_node_key_to_cpu(eb, &found_key, 0);
else
- wq = fs_info->endio_workers;
- }
-
- btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
- btrfs_queue_work(wq, &end_io_wq->work);
-}
-
-blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
- enum btrfs_wq_endio_type metadata)
-{
- struct btrfs_end_io_wq *end_io_wq;
-
- end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
- if (!end_io_wq)
- return BLK_STS_RESOURCE;
-
- end_io_wq->private = bio->bi_private;
- end_io_wq->end_io = bio->bi_end_io;
- end_io_wq->info = info;
- end_io_wq->status = 0;
- end_io_wq->bio = bio;
- end_io_wq->metadata = metadata;
-
- bio->bi_private = end_io_wq;
- bio->bi_end_io = end_workqueue_bio;
- return 0;
-}
-
-static void run_one_async_start(struct btrfs_work *work)
-{
- struct async_submit_bio *async;
- blk_status_t ret;
-
- async = container_of(work, struct async_submit_bio, work);
- ret = async->submit_bio_start(async->inode, async->bio,
- async->dio_file_offset);
- if (ret)
- async->status = ret;
-}
-
-/*
- * In order to insert checksums into the metadata in large chunks, we wait
- * until bio submission time. All the pages in the bio are checksummed and
- * sums are attached onto the ordered extent record.
- *
- * At IO completion time the csums attached on the ordered extent record are
- * inserted into the tree.
- */
-static void run_one_async_done(struct btrfs_work *work)
-{
- struct async_submit_bio *async;
- struct inode *inode;
- blk_status_t ret;
-
- async = container_of(work, struct async_submit_bio, work);
- inode = async->inode;
-
- /* If an error occurred we just want to clean up the bio and move on */
- if (async->status) {
- async->bio->bi_status = async->status;
- bio_endio(async->bio);
- return;
- }
-
- /*
- * All of the bios that pass through here are from async helpers.
- * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context.
- * This changes nothing when cgroups aren't in use.
- */
- async->bio->bi_opf |= REQ_CGROUP_PUNT;
- ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
- if (ret) {
- async->bio->bi_status = ret;
- bio_endio(async->bio);
+ btrfs_item_key_to_cpu(eb, &found_key, 0);
+ if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) {
+ btrfs_err(fs_info,
+"tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
+ eb->start, check->transid,
+ expect_key->objectid,
+ expect_key->type, expect_key->offset,
+ found_key.objectid, found_key.type,
+ found_key.offset);
+ ret = -EUCLEAN;
+ goto out;
+ }
}
-}
-
-static void run_one_async_free(struct btrfs_work *work)
-{
- struct async_submit_bio *async;
-
- async = container_of(work, struct async_submit_bio, work);
- kfree(async);
-}
-
-blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
- int mirror_num, unsigned long bio_flags,
- u64 dio_file_offset,
- extent_submit_bio_start_t *submit_bio_start)
-{
- struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
- struct async_submit_bio *async;
-
- async = kmalloc(sizeof(*async), GFP_NOFS);
- if (!async)
- return BLK_STS_RESOURCE;
-
- async->inode = inode;
- async->bio = bio;
- async->mirror_num = mirror_num;
- async->submit_bio_start = submit_bio_start;
-
- btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
- run_one_async_free);
-
- async->dio_file_offset = dio_file_offset;
-
- async->status = 0;
-
- if (op_is_sync(bio->bi_opf))
- btrfs_set_work_high_priority(&async->work);
-
- btrfs_queue_work(fs_info->workers, &async->work);
- return 0;
-}
-
-static blk_status_t btree_csum_one_bio(struct bio *bio)
-{
- struct bio_vec *bvec;
- struct btrfs_root *root;
- int ret = 0;
- struct bvec_iter_all iter_all;
-
- ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, iter_all) {
- root = BTRFS_I(bvec->bv_page->mapping->host)->root;
- ret = csum_dirty_buffer(root->fs_info, bvec);
- if (ret)
- break;
+ if (check->owner_root) {
+ ret = btrfs_check_eb_owner(eb, check->owner_root);
+ if (ret < 0)
+ goto out;
}
- return errno_to_blk_status(ret);
-}
-
-static blk_status_t btree_submit_bio_start(struct inode *inode, struct bio *bio,
- u64 dio_file_offset)
-{
- /*
- * when we're called for a write, we're already in the async
- * submission context. Just jump into btrfs_map_bio
- */
- return btree_csum_one_bio(bio);
-}
-
-static bool should_async_write(struct btrfs_fs_info *fs_info,
- struct btrfs_inode *bi)
-{
- if (btrfs_is_zoned(fs_info))
- return false;
- if (atomic_read(&bi->sync_writers))
- return false;
- if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags))
- return false;
- return true;
-}
-
-blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
- int mirror_num, unsigned long bio_flags)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- blk_status_t ret;
+ /* If this is a leaf block and it is corrupt, just return -EIO. */
+ if (found_level == 0 && btrfs_check_leaf(eb))
+ ret = -EIO;
- if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
- /*
- * called for a read, do the setup so that checksum validation
- * can happen in the async kernel threads
- */
- ret = btrfs_bio_wq_end_io(fs_info, bio,
- BTRFS_WQ_ENDIO_METADATA);
- if (ret)
- goto out_w_error;
- ret = btrfs_map_bio(fs_info, bio, mirror_num);
- } else if (!should_async_write(fs_info, BTRFS_I(inode))) {
- ret = btree_csum_one_bio(bio);
- if (ret)
- goto out_w_error;
- ret = btrfs_map_bio(fs_info, bio, mirror_num);
- } else {
- /*
- * kthread helpers are used to submit writes so that
- * checksumming can happen in parallel across all CPUs
- */
- ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
- 0, btree_submit_bio_start);
- }
+ if (found_level > 0 && btrfs_check_node(eb))
+ ret = -EIO;
if (ret)
- goto out_w_error;
- return 0;
-
-out_w_error:
- bio->bi_status = ret;
- bio_endio(bio);
+ btrfs_err(fs_info,
+ "read time tree block corruption detected on logical %llu mirror %u",
+ eb->start, eb->read_mirror);
+out:
return ret;
}
#ifdef CONFIG_MIGRATION
-static int btree_migratepage(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode)
+static int btree_migrate_folio(struct address_space *mapping,
+ struct folio *dst, struct folio *src, enum migrate_mode mode)
{
/*
* we can't safely write a btree page from here,
* we haven't done the locking hook
*/
- if (PageDirty(page))
+ if (folio_test_dirty(src))
return -EAGAIN;
/*
* Buffers may be managed in a filesystem specific way.
* We must have no buffers or drop them.
*/
- if (page_has_private(page) &&
- !try_to_release_page(page, GFP_KERNEL))
+ if (folio_get_private(src) &&
+ !filemap_release_folio(src, GFP_KERNEL))
return -EAGAIN;
- return migrate_page(mapping, newpage, page, mode);
+ return migrate_folio(mapping, dst, src, mode);
}
+#else
+#define btree_migrate_folio NULL
#endif
-
static int btree_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct btrfs_fs_info *fs_info;
int ret;
if (wbc->sync_mode == WB_SYNC_NONE) {
+ struct btrfs_fs_info *fs_info;
if (wbc->for_kupdate)
return 0;
- fs_info = BTRFS_I(mapping->host)->root->fs_info;
+ fs_info = inode_to_fs_info(mapping->host);
/* this is a bit racy, but that's ok */
ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
BTRFS_DIRTY_METADATA_THRESH,
@@ -998,60 +520,62 @@ static int btree_writepages(struct address_space *mapping,
return btree_write_cache_pages(mapping, wbc);
}
-static int btree_releasepage(struct page *page, gfp_t gfp_flags)
+static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
{
- if (PageWriteback(page) || PageDirty(page))
- return 0;
+ if (folio_test_writeback(folio) || folio_test_dirty(folio))
+ return false;
- return try_release_extent_buffer(page);
+ return try_release_extent_buffer(folio);
}
-static void btree_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void btree_invalidate_folio(struct folio *folio, size_t offset,
+ size_t length)
{
struct extent_io_tree *tree;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- extent_invalidatepage(tree, page, offset);
- btree_releasepage(page, GFP_NOFS);
- if (PagePrivate(page)) {
- btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
- "page private not zero on page %llu",
- (unsigned long long)page_offset(page));
- detach_page_private(page);
+
+ tree = &folio_to_inode(folio)->io_tree;
+ extent_invalidate_folio(tree, folio, offset);
+ btree_release_folio(folio, GFP_NOFS);
+ if (folio_get_private(folio)) {
+ btrfs_warn(folio_to_fs_info(folio),
+ "folio private not zero on folio %llu",
+ (unsigned long long)folio_pos(folio));
+ folio_detach_private(folio);
}
}
-static int btree_set_page_dirty(struct page *page)
-{
#ifdef DEBUG
- struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
+static bool btree_dirty_folio(struct address_space *mapping,
+ struct folio *folio)
+{
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
+ struct btrfs_subpage_info *spi = fs_info->subpage_info;
struct btrfs_subpage *subpage;
struct extent_buffer *eb;
int cur_bit = 0;
- u64 page_start = page_offset(page);
+ u64 page_start = folio_pos(folio);
if (fs_info->sectorsize == PAGE_SIZE) {
- BUG_ON(!PagePrivate(page));
- eb = (struct extent_buffer *)page->private;
+ eb = folio_get_private(folio);
BUG_ON(!eb);
BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
BUG_ON(!atomic_read(&eb->refs));
btrfs_assert_tree_write_locked(eb);
- return __set_page_dirty_nobuffers(page);
+ return filemap_dirty_folio(mapping, folio);
}
- ASSERT(PagePrivate(page) && page->private);
- subpage = (struct btrfs_subpage *)page->private;
- ASSERT(subpage->dirty_bitmap);
- while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) {
+ ASSERT(spi);
+ subpage = folio_get_private(folio);
+
+ for (cur_bit = spi->dirty_offset;
+ cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
+ cur_bit++) {
unsigned long flags;
u64 cur;
- u16 tmp = (1 << cur_bit);
spin_lock_irqsave(&subpage->lock, flags);
- if (!(tmp & subpage->dirty_bitmap)) {
+ if (!test_bit(cur_bit, subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags);
- cur_bit++;
continue;
}
spin_unlock_irqrestore(&subpage->lock, flags);
@@ -1064,20 +588,20 @@ static int btree_set_page_dirty(struct page *page)
btrfs_assert_tree_write_locked(eb);
free_extent_buffer(eb);
- cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits);
+ cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
}
-#endif
- return __set_page_dirty_nobuffers(page);
+ return filemap_dirty_folio(mapping, folio);
}
+#else
+#define btree_dirty_folio filemap_dirty_folio
+#endif
static const struct address_space_operations btree_aops = {
.writepages = btree_writepages,
- .releasepage = btree_releasepage,
- .invalidatepage = btree_invalidatepage,
-#ifdef CONFIG_MIGRATION
- .migratepage = btree_migratepage,
-#endif
- .set_page_dirty = btree_set_page_dirty,
+ .release_folio = btree_release_folio,
+ .invalidate_folio = btree_invalidate_folio,
+ .migrate_folio = btree_migrate_folio,
+ .dirty_folio = btree_dirty_folio,
};
struct extent_buffer *btrfs_find_create_tree_block(
@@ -1094,24 +618,23 @@ struct extent_buffer *btrfs_find_create_tree_block(
* Read tree block at logical address @bytenr and do variant basic but critical
* verification.
*
- * @owner_root: the objectid of the root owner for this block.
- * @parent_transid: expected transid of this tree block, skip check if 0
- * @level: expected level, mandatory check
- * @first_key: expected key in slot 0, skip check if NULL
+ * @check: expected tree parentness check, see comments of the
+ * structure for details.
*/
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 owner_root, u64 parent_transid,
- int level, struct btrfs_key *first_key)
+ struct btrfs_tree_parent_check *check)
{
struct extent_buffer *buf = NULL;
int ret;
- buf = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
+ ASSERT(check);
+
+ buf = btrfs_find_create_tree_block(fs_info, bytenr, check->owner_root,
+ check->level);
if (IS_ERR(buf))
return buf;
- ret = btree_read_extent_buffer_pages(buf, parent_transid,
- level, first_key);
+ ret = btrfs_read_extent_buffer(buf, check);
if (ret) {
free_extent_buffer_stale(buf);
return ERR_PTR(ret);
@@ -1120,39 +643,23 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
}
-void btrfs_clean_tree_block(struct extent_buffer *buf)
+static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
+ u64 objectid, gfp_t flags)
{
- struct btrfs_fs_info *fs_info = buf->fs_info;
- if (btrfs_header_generation(buf) ==
- fs_info->running_transaction->transid) {
- btrfs_assert_tree_write_locked(buf);
-
- if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
- percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
- -buf->len,
- fs_info->dirty_metadata_batch);
- clear_extent_buffer_dirty(buf);
- }
- }
-}
+ struct btrfs_root *root;
+
+ root = kzalloc(sizeof(*root), flags);
+ if (!root)
+ return NULL;
-static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
- u64 objectid)
-{
- bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
root->fs_info = fs_info;
- root->node = NULL;
- root->commit_root = NULL;
- root->state = 0;
- root->orphan_cleanup_state = 0;
-
- root->last_trans = 0;
- root->free_objectid = 0;
- root->nr_delalloc_inodes = 0;
- root->nr_ordered_extents = 0;
- root->inode_tree = RB_ROOT;
- INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
- root->block_rsv = NULL;
+ root->root_key.objectid = objectid;
+ RB_CLEAR_NODE(&root->rb_node);
+
+ xa_init(&root->inodes);
+ xa_init(&root->delayed_nodes);
+
+ btrfs_init_root_block_rsv(root);
INIT_LIST_HEAD(&root->dirty_list);
INIT_LIST_HEAD(&root->root_list);
@@ -1161,14 +668,9 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
INIT_LIST_HEAD(&root->ordered_extents);
INIT_LIST_HEAD(&root->ordered_root);
INIT_LIST_HEAD(&root->reloc_dirty_list);
- INIT_LIST_HEAD(&root->logged_list[0]);
- INIT_LIST_HEAD(&root->logged_list[1]);
- spin_lock_init(&root->inode_lock);
spin_lock_init(&root->delalloc_lock);
spin_lock_init(&root->ordered_extent_lock);
spin_lock_init(&root->accounting_lock);
- spin_lock_init(&root->log_extents_lock[0]);
- spin_lock_init(&root->log_extents_lock[1]);
spin_lock_init(&root->qgroup_meta_rsv_lock);
mutex_init(&root->objectid_mutex);
mutex_init(&root->log_mutex);
@@ -1187,22 +689,14 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
refcount_set(&root->refs, 1);
atomic_set(&root->snapshot_force_cow, 0);
atomic_set(&root->nr_swapfiles, 0);
- root->log_transid = 0;
root->log_transid_committed = -1;
- root->last_log_commit = 0;
- if (!dummy) {
- extent_io_tree_init(fs_info, &root->dirty_log_pages,
- IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL);
- extent_io_tree_init(fs_info, &root->log_csum_range,
- IO_TREE_LOG_CSUM_RANGE, NULL);
+ if (!btrfs_is_testing(fs_info)) {
+ btrfs_extent_io_tree_init(fs_info, &root->dirty_log_pages,
+ IO_TREE_ROOT_DIRTY_LOG_PAGES);
+ btrfs_extent_io_tree_init(fs_info, &root->log_csum_range,
+ IO_TREE_LOG_CSUM_RANGE);
}
- memset(&root->root_key, 0, sizeof(root->root_key));
- memset(&root->root_item, 0, sizeof(root->root_item));
- memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
- root->root_key.objectid = objectid;
- root->anon_dev = 0;
-
spin_lock_init(&root->root_item_lock);
btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
#ifdef CONFIG_BTRFS_DEBUG
@@ -1211,14 +705,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
list_add_tail(&root->leak_list, &fs_info->allocated_roots);
spin_unlock(&fs_info->fs_roots_radix_lock);
#endif
-}
-static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
- u64 objectid, gfp_t flags)
-{
- struct btrfs_root *root = kzalloc(sizeof(*root), flags);
- if (root)
- __setup_root(root, fs_info, objectid);
return root;
}
@@ -1242,6 +729,107 @@ struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
}
#endif
+static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node)
+{
+ const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node);
+ const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node);
+
+ return btrfs_comp_cpu_keys(&a->root_key, &b->root_key);
+}
+
+static int global_root_key_cmp(const void *k, const struct rb_node *node)
+{
+ const struct btrfs_key *key = k;
+ const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node);
+
+ return btrfs_comp_cpu_keys(key, &root->root_key);
+}
+
+int btrfs_global_root_insert(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct rb_node *tmp;
+ int ret = 0;
+
+ write_lock(&fs_info->global_root_lock);
+ tmp = rb_find_add(&root->rb_node, &fs_info->global_root_tree, global_root_cmp);
+ write_unlock(&fs_info->global_root_lock);
+
+ if (tmp) {
+ ret = -EEXIST;
+ btrfs_warn(fs_info, "global root %llu %llu already exists",
+ btrfs_root_id(root), root->root_key.offset);
+ }
+ return ret;
+}
+
+void btrfs_global_root_delete(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ write_lock(&fs_info->global_root_lock);
+ rb_erase(&root->rb_node, &fs_info->global_root_tree);
+ write_unlock(&fs_info->global_root_lock);
+}
+
+struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info,
+ struct btrfs_key *key)
+{
+ struct rb_node *node;
+ struct btrfs_root *root = NULL;
+
+ read_lock(&fs_info->global_root_lock);
+ node = rb_find(key, &fs_info->global_root_tree, global_root_key_cmp);
+ if (node)
+ root = container_of(node, struct btrfs_root, rb_node);
+ read_unlock(&fs_info->global_root_lock);
+
+ return root;
+}
+
+static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr)
+{
+ struct btrfs_block_group *block_group;
+ u64 ret;
+
+ if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
+ return 0;
+
+ if (bytenr)
+ block_group = btrfs_lookup_block_group(fs_info, bytenr);
+ else
+ block_group = btrfs_lookup_first_block_group(fs_info, bytenr);
+ ASSERT(block_group);
+ if (!block_group)
+ return 0;
+ ret = block_group->global_root_id;
+ btrfs_put_block_group(block_group);
+
+ return ret;
+}
+
+struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr)
+{
+ struct btrfs_key key = {
+ .objectid = BTRFS_CSUM_TREE_OBJECTID,
+ .type = BTRFS_ROOT_ITEM_KEY,
+ .offset = btrfs_global_root_id(fs_info, bytenr),
+ };
+
+ return btrfs_global_root(fs_info, &key);
+}
+
+struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr)
+{
+ struct btrfs_key key = {
+ .objectid = BTRFS_EXTENT_TREE_OBJECTID,
+ .type = BTRFS_ROOT_ITEM_KEY,
+ .offset = btrfs_global_root_id(fs_info, bytenr),
+ };
+
+ return btrfs_global_root(fs_info, &key);
+}
+
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
u64 objectid)
{
@@ -1268,15 +856,15 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
root->root_key.offset = 0;
leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
- BTRFS_NESTING_NORMAL);
+ 0, BTRFS_NESTING_NORMAL);
if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf);
leaf = NULL;
- goto fail_unlock;
+ goto fail;
}
root->node = leaf;
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
root->commit_root = btrfs_root_node(root);
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
@@ -1290,7 +878,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
btrfs_set_root_used(&root->root_item, leaf->len);
btrfs_set_root_last_snapshot(&root->root_item, 0);
btrfs_set_root_dirid(&root->root_item, 0);
- if (is_fstree(objectid))
+ if (btrfs_is_fstree(objectid))
generate_random_guid(root->root_item.uuid);
else
export_guid(root->root_item.uuid, &guid_null);
@@ -1307,17 +895,13 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
return root;
-fail_unlock:
- if (leaf)
- btrfs_tree_unlock(leaf);
fail:
btrfs_put_root(root);
return ERR_PTR(ret);
}
-static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+static struct btrfs_root *alloc_log_tree(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
@@ -1348,13 +932,13 @@ int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
*/
leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
- NULL, 0, 0, 0, BTRFS_NESTING_NORMAL);
+ NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL);
if (IS_ERR(leaf))
return PTR_ERR(leaf);
root->node = leaf;
- btrfs_mark_buffer_dirty(root->node);
+ btrfs_mark_buffer_dirty(trans, root->node);
btrfs_tree_unlock(root->node);
return 0;
@@ -1365,7 +949,7 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
{
struct btrfs_root *log_root;
- log_root = alloc_log_tree(trans, fs_info);
+ log_root = alloc_log_tree(fs_info);
if (IS_ERR(log_root))
return PTR_ERR(log_root);
@@ -1391,7 +975,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *inode_item;
int ret;
- log_root = alloc_log_tree(trans, fs_info);
+ log_root = alloc_log_tree(fs_info);
if (IS_ERR(log_root))
return PTR_ERR(log_root);
@@ -1401,8 +985,8 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
return ret;
}
- log_root->last_trans = trans->transid;
- log_root->root_key.offset = root->root_key.objectid;
+ btrfs_set_root_last_trans(log_root, trans->transid);
+ log_root->root_key.offset = btrfs_root_id(root);
inode_item = &log_root->root_item.inode;
btrfs_set_stack_inode_generation(inode_item, 1);
@@ -1416,17 +1000,18 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
WARN_ON(root->log_root);
root->log_root = log_root;
- root->log_transid = 0;
+ btrfs_set_root_log_transid(root, 0);
root->log_transid_committed = -1;
- root->last_log_commit = 0;
+ btrfs_set_root_last_log_commit(root, 0);
return 0;
}
static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
struct btrfs_path *path,
- struct btrfs_key *key)
+ const struct btrfs_key *key)
{
struct btrfs_root *root;
+ struct btrfs_tree_parent_check check = { 0 };
struct btrfs_fs_info *fs_info = tree_root->fs_info;
u64 generation;
int ret;
@@ -1446,17 +1031,37 @@ static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
generation = btrfs_root_generation(&root->root_item);
level = btrfs_root_level(&root->root_item);
- root->node = read_tree_block(fs_info,
- btrfs_root_bytenr(&root->root_item),
- key->objectid, generation, level, NULL);
+ check.level = level;
+ check.transid = generation;
+ check.owner_root = key->objectid;
+ root->node = read_tree_block(fs_info, btrfs_root_bytenr(&root->root_item),
+ &check);
if (IS_ERR(root->node)) {
ret = PTR_ERR(root->node);
root->node = NULL;
goto fail;
- } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
+ }
+ if (unlikely(!btrfs_buffer_uptodate(root->node, generation, false))) {
ret = -EIO;
goto fail;
}
+
+ /*
+ * For real fs, and not log/reloc trees, root owner must
+ * match its root node owner
+ */
+ if (unlikely(!btrfs_is_testing(fs_info) &&
+ btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
+ btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
+ btrfs_root_id(root) != btrfs_header_owner(root->node))) {
+ btrfs_crit(fs_info,
+"root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
+ btrfs_root_id(root), root->node->start,
+ btrfs_header_owner(root->node),
+ btrfs_root_id(root));
+ ret = -EUCLEAN;
+ goto fail;
+ }
root->commit_root = btrfs_root_node(root);
return root;
fail:
@@ -1465,42 +1070,35 @@ fail:
}
struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
- struct btrfs_key *key)
+ const struct btrfs_key *key)
{
struct btrfs_root *root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
path = btrfs_alloc_path();
if (!path)
return ERR_PTR(-ENOMEM);
root = read_tree_root_path(tree_root, path, key);
- btrfs_free_path(path);
return root;
}
/*
- * Initialize subvolume root in-memory structure
+ * Initialize subvolume root in-memory structure.
*
* @anon_dev: anonymous device to attach to the root, if zero, allocate new
+ *
+ * In case of failure the caller is responsible to call btrfs_free_fs_root()
*/
static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
{
int ret;
- unsigned int nofs_flag;
- /*
- * We might be called under a transaction (e.g. indirect backref
- * resolution) which could deadlock if it triggers memory reclaim
- */
- nofs_flag = memalloc_nofs_save();
- ret = btrfs_drew_lock_init(&root->snapshot_lock);
- memalloc_nofs_restore(nofs_flag);
- if (ret)
- goto fail;
+ btrfs_drew_lock_init(&root->snapshot_lock);
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
- !btrfs_is_data_reloc_root(root)) {
+ if (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
+ !btrfs_is_data_reloc_root(root) &&
+ btrfs_is_fstree(btrfs_root_id(root))) {
set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
btrfs_check_and_init_root_item(&root->root_item);
}
@@ -1509,12 +1107,12 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
* Don't assign anonymous block device to roots that are not exposed to
* userspace, the id pool is limited to 1M
*/
- if (is_fstree(root->root_key.objectid) &&
+ if (btrfs_is_fstree(btrfs_root_id(root)) &&
btrfs_root_refs(&root->root_item) > 0) {
if (!anon_dev) {
ret = get_anon_bdev(&root->anon_dev);
if (ret)
- goto fail;
+ return ret;
} else {
root->anon_dev = anon_dev;
}
@@ -1524,7 +1122,7 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
ret = btrfs_init_root_free_objectid(root);
if (ret) {
mutex_unlock(&root->objectid_mutex);
- goto fail;
+ return ret;
}
ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
@@ -1532,9 +1130,6 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
mutex_unlock(&root->objectid_mutex);
return 0;
-fail:
- /* The caller is responsible to call btrfs_free_fs_root */
- return ret;
}
static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
@@ -1545,8 +1140,7 @@ static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
spin_lock(&fs_info->fs_roots_radix_lock);
root = radix_tree_lookup(&fs_info->fs_roots_radix,
(unsigned long)root_id);
- if (root)
- root = btrfs_grab_root(root);
+ root = btrfs_grab_root(root);
spin_unlock(&fs_info->fs_roots_radix_lock);
return root;
}
@@ -1554,26 +1148,36 @@ static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
u64 objectid)
{
- if (objectid == BTRFS_ROOT_TREE_OBJECTID)
+ struct btrfs_key key = {
+ .objectid = objectid,
+ .type = BTRFS_ROOT_ITEM_KEY,
+ .offset = 0,
+ };
+
+ switch (objectid) {
+ case BTRFS_ROOT_TREE_OBJECTID:
return btrfs_grab_root(fs_info->tree_root);
- if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
- return btrfs_grab_root(fs_info->extent_root);
- if (objectid == BTRFS_CHUNK_TREE_OBJECTID)
+ case BTRFS_EXTENT_TREE_OBJECTID:
+ return btrfs_grab_root(btrfs_global_root(fs_info, &key));
+ case BTRFS_CHUNK_TREE_OBJECTID:
return btrfs_grab_root(fs_info->chunk_root);
- if (objectid == BTRFS_DEV_TREE_OBJECTID)
+ case BTRFS_DEV_TREE_OBJECTID:
return btrfs_grab_root(fs_info->dev_root);
- if (objectid == BTRFS_CSUM_TREE_OBJECTID)
- return btrfs_grab_root(fs_info->csum_root);
- if (objectid == BTRFS_QUOTA_TREE_OBJECTID)
- return btrfs_grab_root(fs_info->quota_root) ?
- fs_info->quota_root : ERR_PTR(-ENOENT);
- if (objectid == BTRFS_UUID_TREE_OBJECTID)
- return btrfs_grab_root(fs_info->uuid_root) ?
- fs_info->uuid_root : ERR_PTR(-ENOENT);
- if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
- return btrfs_grab_root(fs_info->free_space_root) ?
- fs_info->free_space_root : ERR_PTR(-ENOENT);
- return NULL;
+ case BTRFS_CSUM_TREE_OBJECTID:
+ return btrfs_grab_root(btrfs_global_root(fs_info, &key));
+ case BTRFS_QUOTA_TREE_OBJECTID:
+ return btrfs_grab_root(fs_info->quota_root);
+ case BTRFS_UUID_TREE_OBJECTID:
+ return btrfs_grab_root(fs_info->uuid_root);
+ case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
+ return btrfs_grab_root(fs_info->block_group_root);
+ case BTRFS_FREE_SPACE_TREE_OBJECTID:
+ return btrfs_grab_root(btrfs_global_root(fs_info, &key));
+ case BTRFS_RAID_STRIPE_TREE_OBJECTID:
+ return btrfs_grab_root(fs_info->stripe_root);
+ default:
+ return NULL;
+ }
}
int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
@@ -1587,7 +1191,7 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
spin_lock(&fs_info->fs_roots_radix_lock);
ret = radix_tree_insert(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid,
+ (unsigned long)btrfs_root_id(root),
root);
if (ret == 0) {
btrfs_grab_root(root);
@@ -1599,7 +1203,7 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
return ret;
}
-void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
+void btrfs_check_leaked_roots(const struct btrfs_fs_info *fs_info)
{
#ifdef CONFIG_BTRFS_DEBUG
struct btrfs_root *root;
@@ -1612,6 +1216,7 @@ void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
btrfs_err(fs_info, "leaked root %s refcount %d",
btrfs_root_name(&root->root_key, buf),
refcount_read(&root->refs));
+ WARN_ON_ONCE(1);
while (refcount_read(&root->refs) > 1)
btrfs_put_root(root);
btrfs_put_root(root);
@@ -1619,32 +1224,52 @@ void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
#endif
}
+static void free_global_roots(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_root *root;
+ struct rb_node *node;
+
+ while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) {
+ root = rb_entry(node, struct btrfs_root, rb_node);
+ rb_erase(&root->rb_node, &fs_info->global_root_tree);
+ btrfs_put_root(root);
+ }
+}
+
void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
{
+ struct percpu_counter *em_counter = &fs_info->evictable_extent_maps;
+
+ if (fs_info->fs_devices)
+ btrfs_close_devices(fs_info->fs_devices);
+ btrfs_free_compress_wsm(fs_info);
+ percpu_counter_destroy(&fs_info->stats_read_blocks);
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
percpu_counter_destroy(&fs_info->delalloc_bytes);
percpu_counter_destroy(&fs_info->ordered_bytes);
+ if (percpu_counter_initialized(em_counter))
+ ASSERT(percpu_counter_sum_positive(em_counter) == 0);
+ percpu_counter_destroy(em_counter);
percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
btrfs_free_csum_hash(fs_info);
btrfs_free_stripe_hash_table(fs_info);
btrfs_free_ref_cache(fs_info);
kfree(fs_info->balance_ctl);
kfree(fs_info->delayed_root);
- btrfs_put_root(fs_info->extent_root);
+ free_global_roots(fs_info);
btrfs_put_root(fs_info->tree_root);
btrfs_put_root(fs_info->chunk_root);
btrfs_put_root(fs_info->dev_root);
- btrfs_put_root(fs_info->csum_root);
btrfs_put_root(fs_info->quota_root);
btrfs_put_root(fs_info->uuid_root);
- btrfs_put_root(fs_info->free_space_root);
btrfs_put_root(fs_info->fs_root);
btrfs_put_root(fs_info->data_reloc_root);
+ btrfs_put_root(fs_info->block_group_root);
+ btrfs_put_root(fs_info->stripe_root);
btrfs_check_leaked_roots(fs_info);
btrfs_extent_buffer_leak_debug_check(fs_info);
kfree(fs_info->super_copy);
kfree(fs_info->super_for_commit);
- kfree(fs_info->subpage_info);
kvfree(fs_info);
}
@@ -1663,12 +1288,12 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
*
* @objectid: root id
* @anon_dev: preallocated anonymous block device number for new roots,
- * pass 0 for new allocation.
+ * pass NULL for a new allocation.
* @check_ref: whether to check root item references, If true, return -ENOENT
* for orphan roots
*/
static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
- u64 objectid, dev_t anon_dev,
+ u64 objectid, dev_t *anon_dev,
bool check_ref)
{
struct btrfs_root *root;
@@ -1679,11 +1304,30 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
root = btrfs_get_global_root(fs_info, objectid);
if (root)
return root;
+
+ /*
+ * If we're called for non-subvolume trees, and above function didn't
+ * find one, do not try to read it from disk.
+ *
+ * This is namely for free-space-tree and quota tree, which can change
+ * at runtime and should only be grabbed from fs_info.
+ */
+ if (!btrfs_is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
+ return ERR_PTR(-ENOENT);
again:
root = btrfs_lookup_fs_root(fs_info, objectid);
if (root) {
- /* Shouldn't get preallocated anon_dev for cached roots */
- ASSERT(!anon_dev);
+ /*
+ * Some other caller may have read out the newly inserted
+ * subvolume already (for things like backref walk etc). Not
+ * that common but still possible. In that case, we just need
+ * to free the anon_dev.
+ */
+ if (unlikely(anon_dev && *anon_dev)) {
+ free_anon_bdev(*anon_dev);
+ *anon_dev = 0;
+ }
+
if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
btrfs_put_root(root);
return ERR_PTR(-ENOENT);
@@ -1703,7 +1347,7 @@ again:
goto fail;
}
- ret = btrfs_init_fs_root(root, anon_dev);
+ ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
if (ret)
goto fail;
@@ -1725,13 +1369,22 @@ again:
ret = btrfs_insert_fs_root(fs_info, root);
if (ret) {
- btrfs_put_root(root);
- if (ret == -EEXIST)
+ if (ret == -EEXIST) {
+ btrfs_put_root(root);
goto again;
+ }
goto fail;
}
return root;
fail:
+ /*
+ * If our caller provided us an anonymous device, then it's his
+ * responsibility to free it in case we fail. So we have to set our
+ * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
+ * and once again by our caller.
+ */
+ if (anon_dev && *anon_dev)
+ root->anon_dev = 0;
btrfs_put_root(root);
return ERR_PTR(ret);
}
@@ -1746,7 +1399,7 @@ fail:
struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
u64 objectid, bool check_ref)
{
- return btrfs_get_root_ref(fs_info, objectid, 0, check_ref);
+ return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
}
/*
@@ -1754,17 +1407,18 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
* the anonymous block device id
*
* @objectid: tree objectid
- * @anon_dev: if zero, allocate a new anonymous block device or use the
- * parameter value
+ * @anon_dev: if NULL, allocate a new anonymous block device or use the
+ * parameter value if not NULL
*/
struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
- u64 objectid, dev_t anon_dev)
+ u64 objectid, dev_t *anon_dev)
{
return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
}
/*
- * btrfs_get_fs_root_commit_root - return a root for the given objectid
+ * Return a root for the given objectid.
+ *
* @fs_info: the fs_info
* @objectid: the objectid we need to lookup
*
@@ -1809,29 +1463,9 @@ struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
return root;
}
-/*
- * called by the kthread helper functions to finally call the bio end_io
- * functions. This is where read checksum verification actually happens
- */
-static void end_workqueue_fn(struct btrfs_work *work)
-{
- struct bio *bio;
- struct btrfs_end_io_wq *end_io_wq;
-
- end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
- bio = end_io_wq->bio;
-
- bio->bi_status = end_io_wq->status;
- bio->bi_private = end_io_wq->private;
- bio->bi_end_io = end_io_wq->end_io;
- bio_endio(bio);
- kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
-}
-
static int cleaner_kthread(void *arg)
{
- struct btrfs_root *root = arg;
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info = arg;
int again;
while (1) {
@@ -1862,9 +1496,12 @@ static int cleaner_kthread(void *arg)
goto sleep;
}
+ if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags))
+ btrfs_sysfs_feature_update(fs_info);
+
btrfs_run_delayed_iputs(fs_info);
- again = btrfs_clean_one_deleted_snapshot(root);
+ again = btrfs_clean_one_deleted_snapshot(fs_info);
mutex_unlock(&fs_info->cleaner_mutex);
/*
@@ -1916,7 +1553,7 @@ static int transaction_kthread(void *arg)
do {
cannot_commit = false;
- delay = msecs_to_jiffies(fs_info->commit_interval * 1000);
+ delay = secs_to_jiffies(fs_info->commit_interval);
mutex_lock(&fs_info->transaction_kthread_mutex);
spin_lock(&fs_info->trans_lock);
@@ -1927,12 +1564,13 @@ static int transaction_kthread(void *arg)
}
delta = ktime_get_seconds() - cur->start_time;
- if (cur->state < TRANS_STATE_COMMIT_START &&
+ if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
+ cur->state < TRANS_STATE_COMMIT_PREP &&
delta < fs_info->commit_interval) {
spin_unlock(&fs_info->trans_lock);
- delay -= msecs_to_jiffies((delta - 1) * 1000);
+ delay -= secs_to_jiffies(delta - 1);
delay = min(delay,
- msecs_to_jiffies(fs_info->commit_interval * 1000));
+ secs_to_jiffies(fs_info->commit_interval));
goto sleep;
}
transid = cur->transid;
@@ -2023,11 +1661,23 @@ static void backup_super_roots(struct btrfs_fs_info *info)
btrfs_set_backup_chunk_root_level(root_backup,
btrfs_header_level(info->chunk_root->node));
- btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
- btrfs_set_backup_extent_root_gen(root_backup,
- btrfs_header_generation(info->extent_root->node));
- btrfs_set_backup_extent_root_level(root_backup,
- btrfs_header_level(info->extent_root->node));
+ if (!btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE)) {
+ struct btrfs_root *extent_root = btrfs_extent_root(info, 0);
+ struct btrfs_root *csum_root = btrfs_csum_root(info, 0);
+
+ btrfs_set_backup_extent_root(root_backup,
+ extent_root->node->start);
+ btrfs_set_backup_extent_root_gen(root_backup,
+ btrfs_header_generation(extent_root->node));
+ btrfs_set_backup_extent_root_level(root_backup,
+ btrfs_header_level(extent_root->node));
+
+ btrfs_set_backup_csum_root(root_backup, csum_root->node->start);
+ btrfs_set_backup_csum_root_gen(root_backup,
+ btrfs_header_generation(csum_root->node));
+ btrfs_set_backup_csum_root_level(root_backup,
+ btrfs_header_level(csum_root->node));
+ }
/*
* we might commit during log recovery, which happens before we set
@@ -2048,12 +1698,6 @@ static void backup_super_roots(struct btrfs_fs_info *info)
btrfs_set_backup_dev_root_level(root_backup,
btrfs_header_level(info->dev_root->node));
- btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
- btrfs_set_backup_csum_root_gen(root_backup,
- btrfs_header_generation(info->csum_root->node));
- btrfs_set_backup_csum_root_level(root_backup,
- btrfs_header_level(info->csum_root->node));
-
btrfs_set_backup_total_bytes(root_backup,
btrfs_super_total_bytes(info->super_copy));
btrfs_set_backup_bytes_used(root_backup,
@@ -2071,11 +1715,11 @@ static void backup_super_roots(struct btrfs_fs_info *info)
}
/*
- * read_backup_root - Reads a backup root based on the passed priority. Prio 0
- * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
+ * Reads a backup root based on the passed priority. Prio 0 is the newest, prio
+ * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
*
- * fs_info - filesystem whose backup roots need to be read
- * priority - priority of backup root required
+ * @fs_info: filesystem whose backup roots need to be read
+ * @priority: priority of backup root required
*
* Returns backup root index on success and -EINVAL otherwise.
*/
@@ -2120,14 +1764,14 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
btrfs_destroy_workqueue(fs_info->fixup_workers);
btrfs_destroy_workqueue(fs_info->delalloc_workers);
btrfs_destroy_workqueue(fs_info->workers);
- btrfs_destroy_workqueue(fs_info->endio_workers);
- btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
- btrfs_destroy_workqueue(fs_info->rmw_workers);
+ if (fs_info->endio_workers)
+ destroy_workqueue(fs_info->endio_workers);
+ if (fs_info->rmw_workers)
+ destroy_workqueue(fs_info->rmw_workers);
btrfs_destroy_workqueue(fs_info->endio_write_workers);
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
btrfs_destroy_workqueue(fs_info->delayed_workers);
btrfs_destroy_workqueue(fs_info->caching_workers);
- btrfs_destroy_workqueue(fs_info->readahead_workers);
btrfs_destroy_workqueue(fs_info->flush_workers);
btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
if (fs_info->discard_ctl.discard_workers)
@@ -2137,8 +1781,8 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
* the queues used for metadata I/O, since tasks from those other work
* queues can do metadata I/O operations.
*/
- btrfs_destroy_workqueue(fs_info->endio_meta_workers);
- btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
+ if (fs_info->endio_meta_workers)
+ destroy_workqueue(fs_info->endio_meta_workers);
}
static void free_root_extent_buffers(struct btrfs_root *root)
@@ -2151,21 +1795,31 @@ static void free_root_extent_buffers(struct btrfs_root *root)
}
}
+static void free_global_root_pointers(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_root *root, *tmp;
+
+ rbtree_postorder_for_each_entry_safe(root, tmp,
+ &fs_info->global_root_tree,
+ rb_node)
+ free_root_extent_buffers(root);
+}
+
/* helper to cleanup tree roots */
static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
{
free_root_extent_buffers(info->tree_root);
+ free_global_root_pointers(info);
free_root_extent_buffers(info->dev_root);
- free_root_extent_buffers(info->extent_root);
- free_root_extent_buffers(info->csum_root);
free_root_extent_buffers(info->quota_root);
free_root_extent_buffers(info->uuid_root);
free_root_extent_buffers(info->fs_root);
free_root_extent_buffers(info->data_reloc_root);
+ free_root_extent_buffers(info->block_group_root);
+ free_root_extent_buffers(info->stripe_root);
if (free_chunk_root)
free_root_extent_buffers(info->chunk_root);
- free_root_extent_buffers(info->free_space_root);
}
void btrfs_put_root(struct btrfs_root *root)
@@ -2174,11 +1828,13 @@ void btrfs_put_root(struct btrfs_root *root)
return;
if (refcount_dec_and_test(&root->refs)) {
- WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
+ if (WARN_ON(!xa_empty(&root->inodes)))
+ xa_destroy(&root->inodes);
+ if (WARN_ON(!xa_empty(&root->delayed_nodes)))
+ xa_destroy(&root->delayed_nodes);
WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
if (root->anon_dev)
free_anon_bdev(root->anon_dev);
- btrfs_drew_lock_destroy(&root->snapshot_lock);
free_root_extent_buffers(root);
#ifdef CONFIG_BTRFS_DEBUG
spin_lock(&root->fs_info->fs_roots_radix_lock);
@@ -2196,8 +1852,8 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
int i;
while (!list_empty(&fs_info->dead_roots)) {
- gang[0] = list_entry(fs_info->dead_roots.next,
- struct btrfs_root, root_list);
+ gang[0] = list_first_entry(&fs_info->dead_roots,
+ struct btrfs_root, root_list);
list_del(&gang[0]->root_list);
if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
@@ -2238,11 +1894,18 @@ static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
atomic_set(&fs_info->reloc_cancel_req, 0);
}
-static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
+static int btrfs_init_btree_inode(struct super_block *sb)
{
- struct inode *inode = fs_info->btree_inode;
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ unsigned long hash = btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID,
+ fs_info->tree_root);
+ struct inode *inode;
- inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
+ inode = new_inode(sb);
+ if (!inode)
+ return -ENOMEM;
+
+ btrfs_set_inode_number(BTRFS_I(inode), BTRFS_BTREE_INODE_OBJECTID);
set_nlink(inode, 1);
/*
* we set the i_size on the btree inode to the max possible int.
@@ -2251,17 +1914,19 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
*/
inode->i_size = OFFSET_MAX;
inode->i_mapping->a_ops = &btree_aops;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
- RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
- extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
- IO_TREE_BTREE_INODE_IO, inode);
- BTRFS_I(inode)->io_tree.track_uptodate = false;
- extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
+ btrfs_extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
+ IO_TREE_BTREE_INODE_IO);
+ btrfs_extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
- memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
- btrfs_insert_inode_hash(inode);
+ __insert_inode_hash(inode, hash);
+ set_bit(AS_KERNEL_FILE, &inode->i_mapping->flags);
+ fs_info->btree_inode = inode;
+
+ return 0;
}
static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
@@ -2278,20 +1943,19 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
fs_info->qgroup_tree = RB_ROOT;
INIT_LIST_HEAD(&fs_info->dirty_qgroups);
fs_info->qgroup_seq = 1;
- fs_info->qgroup_ulist = NULL;
fs_info->qgroup_rescan_running = false;
+ fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
mutex_init(&fs_info->qgroup_rescan_lock);
}
-static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
- struct btrfs_fs_devices *fs_devices)
+static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
{
u32 max_active = fs_info->thread_pool_size;
unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
+ unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_PERCPU;
fs_info->workers =
- btrfs_alloc_workqueue(fs_info, "worker",
- flags | WQ_HIGHPRI, max_active, 16);
+ btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16);
fs_info->delalloc_workers =
btrfs_alloc_workqueue(fs_info, "delalloc",
@@ -2305,25 +1969,13 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
fs_info->fixup_workers =
- btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
+ btrfs_alloc_ordered_workqueue(fs_info, "fixup", ordered_flags);
- /*
- * endios are largely parallel and should have a very
- * low idle thresh
- */
fs_info->endio_workers =
- btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
+ alloc_workqueue("btrfs-endio", flags, max_active);
fs_info->endio_meta_workers =
- btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
- max_active, 4);
- fs_info->endio_meta_write_workers =
- btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
- max_active, 2);
- fs_info->endio_raid56_workers =
- btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
- max_active, 4);
- fs_info->rmw_workers =
- btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
+ alloc_workqueue("btrfs-endio-meta", flags, max_active);
+ fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
fs_info->endio_write_workers =
btrfs_alloc_workqueue(fs_info, "endio-write", flags,
max_active, 2);
@@ -2333,23 +1985,19 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
fs_info->delayed_workers =
btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
max_active, 0);
- fs_info->readahead_workers =
- btrfs_alloc_workqueue(fs_info, "readahead", flags,
- max_active, 2);
fs_info->qgroup_rescan_workers =
- btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
+ btrfs_alloc_ordered_workqueue(fs_info, "qgroup-rescan",
+ ordered_flags);
fs_info->discard_ctl.discard_workers =
- alloc_workqueue("btrfs_discard", WQ_UNBOUND | WQ_FREEZABLE, 1);
+ alloc_ordered_workqueue("btrfs-discard", WQ_FREEZABLE);
- if (!(fs_info->workers && fs_info->delalloc_workers &&
- fs_info->flush_workers &&
+ if (!(fs_info->workers &&
+ fs_info->delalloc_workers && fs_info->flush_workers &&
fs_info->endio_workers && fs_info->endio_meta_workers &&
- fs_info->endio_meta_write_workers &&
- fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
+ fs_info->endio_write_workers &&
fs_info->endio_freespace_worker && fs_info->rmw_workers &&
- fs_info->caching_workers && fs_info->readahead_workers &&
- fs_info->fixup_workers && fs_info->delayed_workers &&
- fs_info->qgroup_rescan_workers &&
+ fs_info->caching_workers && fs_info->fixup_workers &&
+ fs_info->delayed_workers && fs_info->qgroup_rescan_workers &&
fs_info->discard_ctl.discard_workers)) {
return -ENOMEM;
}
@@ -2372,6 +2020,22 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
fs_info->csum_shash = csum_shash;
+ /* Check if the checksum implementation is a fast accelerated one. */
+ switch (csum_type) {
+ case BTRFS_CSUM_TYPE_CRC32:
+ if (crc32_optimizations() & CRC32C_OPTIMIZATION)
+ set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
+ break;
+ case BTRFS_CSUM_TYPE_XXHASH:
+ set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
+ break;
+ default:
+ break;
+ }
+
+ btrfs_info(fs_info, "using %s (%s) checksum algorithm",
+ btrfs_super_csum_name(csum_type),
+ crypto_shash_driver_name(csum_shash));
return 0;
}
@@ -2379,12 +2043,13 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices)
{
int ret;
+ struct btrfs_tree_parent_check check = { 0 };
struct btrfs_root *log_tree_root;
struct btrfs_super_block *disk_super = fs_info->super_copy;
u64 bytenr = btrfs_super_log_root(disk_super);
int level = btrfs_super_log_root_level(disk_super);
- if (fs_devices->rw_devices == 0) {
+ if (unlikely(fs_devices->rw_devices == 0)) {
btrfs_warn(fs_info, "log replay required on RO media");
return -EIO;
}
@@ -2394,27 +2059,29 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
if (!log_tree_root)
return -ENOMEM;
- log_tree_root->node = read_tree_block(fs_info, bytenr,
- BTRFS_TREE_LOG_OBJECTID,
- fs_info->generation + 1, level,
- NULL);
+ check.level = level;
+ check.transid = fs_info->generation + 1;
+ check.owner_root = BTRFS_TREE_LOG_OBJECTID;
+ log_tree_root->node = read_tree_block(fs_info, bytenr, &check);
if (IS_ERR(log_tree_root->node)) {
btrfs_warn(fs_info, "failed to read log tree");
ret = PTR_ERR(log_tree_root->node);
log_tree_root->node = NULL;
btrfs_put_root(log_tree_root);
return ret;
- } else if (!extent_buffer_uptodate(log_tree_root->node)) {
+ }
+ if (unlikely(!extent_buffer_uptodate(log_tree_root->node))) {
btrfs_err(fs_info, "failed to read log tree");
btrfs_put_root(log_tree_root);
return -EIO;
}
+
/* returns with log_tree_root freed on success */
ret = btrfs_recover_log_trees(log_tree_root);
+ btrfs_put_root(log_tree_root);
if (ret) {
btrfs_handle_fs_error(fs_info, ret,
"Failed to recover log tree");
- btrfs_put_root(log_tree_root);
return ret;
}
@@ -2427,6 +2094,113 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
return 0;
}
+static int load_global_roots_objectid(struct btrfs_root *tree_root,
+ struct btrfs_path *path, u64 objectid,
+ const char *name)
+{
+ struct btrfs_fs_info *fs_info = tree_root->fs_info;
+ struct btrfs_root *root;
+ u64 max_global_id = 0;
+ int ret;
+ struct btrfs_key key = {
+ .objectid = objectid,
+ .type = BTRFS_ROOT_ITEM_KEY,
+ .offset = 0,
+ };
+ bool found = false;
+
+ /* If we have IGNOREDATACSUMS skip loading these roots. */
+ if (objectid == BTRFS_CSUM_TREE_OBJECTID &&
+ btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
+ set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
+ return 0;
+ }
+
+ while (1) {
+ ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+ if (ret < 0)
+ break;
+
+ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
+ ret = btrfs_next_leaf(tree_root, path);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ break;
+ }
+ }
+ ret = 0;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (key.objectid != objectid)
+ break;
+ btrfs_release_path(path);
+
+ /*
+ * Just worry about this for extent tree, it'll be the same for
+ * everybody.
+ */
+ if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
+ max_global_id = max(max_global_id, key.offset);
+
+ found = true;
+ root = read_tree_root_path(tree_root, path, &key);
+ if (IS_ERR(root)) {
+ ret = PTR_ERR(root);
+ break;
+ }
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ ret = btrfs_global_root_insert(root);
+ if (ret) {
+ btrfs_put_root(root);
+ break;
+ }
+ key.offset++;
+ }
+ btrfs_release_path(path);
+
+ if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
+ fs_info->nr_global_roots = max_global_id + 1;
+
+ if (!found || ret) {
+ if (objectid == BTRFS_CSUM_TREE_OBJECTID)
+ set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
+
+ if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
+ ret = ret ? ret : -ENOENT;
+ else
+ ret = 0;
+ btrfs_err(fs_info, "failed to load root %s", name);
+ }
+ return ret;
+}
+
+static int load_global_roots(struct btrfs_root *tree_root)
+{
+ BTRFS_PATH_AUTO_FREE(path);
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ ret = load_global_roots_objectid(tree_root, path,
+ BTRFS_EXTENT_TREE_OBJECTID, "extent");
+ if (ret)
+ return ret;
+ ret = load_global_roots_objectid(tree_root, path,
+ BTRFS_CSUM_TREE_OBJECTID, "csum");
+ if (ret)
+ return ret;
+ if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE))
+ return ret;
+ ret = load_global_roots_objectid(tree_root, path,
+ BTRFS_FREE_SPACE_TREE_OBJECTID,
+ "free space");
+
+ return ret;
+}
+
static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *tree_root = fs_info->tree_root;
@@ -2434,21 +2208,27 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
struct btrfs_key location;
int ret;
- BUG_ON(!fs_info->tree_root);
+ ASSERT(fs_info->tree_root);
+
+ ret = load_global_roots(tree_root);
+ if (ret)
+ return ret;
- location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
location.type = BTRFS_ROOT_ITEM_KEY;
location.offset = 0;
- root = btrfs_read_tree_root(tree_root, &location);
- if (IS_ERR(root)) {
- if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
- ret = PTR_ERR(root);
- goto out;
+ if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
+ location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID;
+ root = btrfs_read_tree_root(tree_root, &location);
+ if (IS_ERR(root)) {
+ if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
+ ret = PTR_ERR(root);
+ goto out;
+ }
+ } else {
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->block_group_root = root;
}
- } else {
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
- fs_info->extent_root = root;
}
location.objectid = BTRFS_DEV_TREE_OBJECTID;
@@ -2463,22 +2243,9 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
fs_info->dev_root = root;
}
/* Initialize fs_info for all devices in any case */
- btrfs_init_devices_late(fs_info);
-
- /* If IGNOREDATACSUMS is set don't bother reading the csum root. */
- if (!btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
- location.objectid = BTRFS_CSUM_TREE_OBJECTID;
- root = btrfs_read_tree_root(tree_root, &location);
- if (IS_ERR(root)) {
- if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
- ret = PTR_ERR(root);
- goto out;
- }
- } else {
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
- fs_info->csum_root = root;
- }
- }
+ ret = btrfs_init_devices_late(fs_info);
+ if (ret)
+ goto out;
/*
* This tree can share blocks with some other fs tree during relocation
@@ -2500,7 +2267,6 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
root = btrfs_read_tree_root(tree_root, &location);
if (!IS_ERR(root)) {
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
- set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
fs_info->quota_root = root;
}
@@ -2517,17 +2283,17 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
fs_info->uuid_root = root;
}
- if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
- location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
+ if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
+ location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
root = btrfs_read_tree_root(tree_root, &location);
if (IS_ERR(root)) {
if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
ret = PTR_ERR(root);
goto out;
}
- } else {
+ } else {
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
- fs_info->free_space_root = root;
+ fs_info->stripe_root = root;
}
}
@@ -2538,6 +2304,71 @@ out:
return ret;
}
+static int validate_sys_chunk_array(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_super_block *sb)
+{
+ unsigned int cur = 0; /* Offset inside the sys chunk array */
+ /*
+ * At sb read time, fs_info is not fully initialized. Thus we have
+ * to use super block sectorsize, which should have been validated.
+ */
+ const u32 sectorsize = btrfs_super_sectorsize(sb);
+ u32 sys_array_size = btrfs_super_sys_array_size(sb);
+
+ if (unlikely(sys_array_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)) {
+ btrfs_err(fs_info, "system chunk array too big %u > %u",
+ sys_array_size, BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
+ return -EUCLEAN;
+ }
+
+ while (cur < sys_array_size) {
+ struct btrfs_disk_key *disk_key;
+ struct btrfs_chunk *chunk;
+ struct btrfs_key key;
+ u64 type;
+ u16 num_stripes;
+ u32 len;
+ int ret;
+
+ disk_key = (struct btrfs_disk_key *)(sb->sys_chunk_array + cur);
+ len = sizeof(*disk_key);
+
+ if (unlikely(cur + len > sys_array_size))
+ goto short_read;
+ cur += len;
+
+ btrfs_disk_key_to_cpu(&key, disk_key);
+ if (unlikely(key.type != BTRFS_CHUNK_ITEM_KEY)) {
+ btrfs_err(fs_info,
+ "unexpected item type %u in sys_array at offset %u",
+ key.type, cur);
+ return -EUCLEAN;
+ }
+ chunk = (struct btrfs_chunk *)(sb->sys_chunk_array + cur);
+ num_stripes = btrfs_stack_chunk_num_stripes(chunk);
+ if (unlikely(cur + btrfs_chunk_item_size(num_stripes) > sys_array_size))
+ goto short_read;
+ type = btrfs_stack_chunk_type(chunk);
+ if (unlikely(!(type & BTRFS_BLOCK_GROUP_SYSTEM))) {
+ btrfs_err(fs_info,
+ "invalid chunk type %llu in sys_array at offset %u",
+ type, cur);
+ return -EUCLEAN;
+ }
+ ret = btrfs_check_chunk_valid(fs_info, NULL, chunk, key.offset,
+ sectorsize);
+ if (ret < 0)
+ return ret;
+ cur += btrfs_chunk_item_size(num_stripes);
+ }
+ return 0;
+short_read:
+ btrfs_err(fs_info,
+ "super block sys chunk array short read, cur=%u sys_array_size=%u",
+ cur, sys_array_size);
+ return -EUCLEAN;
+}
+
/*
* Real super block validation
* NOTE: super csum type and incompat features will not be checked here.
@@ -2548,21 +2379,29 @@ out:
* 1, 2 2nd and 3rd backup copy
* -1 skip bytenr check
*/
-static int validate_super(struct btrfs_fs_info *fs_info,
- struct btrfs_super_block *sb, int mirror_num)
+int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_super_block *sb, int mirror_num)
{
u64 nodesize = btrfs_super_nodesize(sb);
u64 sectorsize = btrfs_super_sectorsize(sb);
int ret = 0;
+ const bool ignore_flags = btrfs_test_opt(fs_info, IGNORESUPERFLAGS);
if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
btrfs_err(fs_info, "no valid FS found");
ret = -EINVAL;
}
- if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
- btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
- btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
- ret = -EINVAL;
+ if ((btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)) {
+ if (!ignore_flags) {
+ btrfs_err(fs_info,
+ "unrecognized or unsupported super flag 0x%llx",
+ btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
+ ret = -EINVAL;
+ } else {
+ btrfs_info(fs_info,
+ "unrecognized or unsupported super flags: 0x%llx, ignored",
+ btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
+ }
}
if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
btrfs_err(fs_info, "tree_root level too big: %d >= %d",
@@ -2584,19 +2423,13 @@ static int validate_super(struct btrfs_fs_info *fs_info,
* Check sectorsize and nodesize first, other check will need it.
* Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
*/
- if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
+ if (!is_power_of_2(sectorsize) || sectorsize < BTRFS_MIN_BLOCKSIZE ||
sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
ret = -EINVAL;
}
- /*
- * For 4K page size, we only support 4K sector size.
- * For 64K page size, we support 64K and 4K sector sizes.
- */
- if ((PAGE_SIZE == SZ_4K && sectorsize != PAGE_SIZE) ||
- (PAGE_SIZE == SZ_64K && (sectorsize != SZ_4K &&
- sectorsize != SZ_64K))) {
+ if (!btrfs_supported_blocksize(sectorsize)) {
btrfs_err(fs_info,
"sectorsize %llu not yet supported for page size %lu",
sectorsize, PAGE_SIZE);
@@ -2631,21 +2464,19 @@ static int validate_super(struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
- if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
- BTRFS_FSID_SIZE)) {
+ if (!fs_info->fs_devices->temp_fsid &&
+ memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
- fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
+ sb->fsid, fs_info->fs_devices->fsid);
ret = -EINVAL;
}
- if (btrfs_fs_incompat(fs_info, METADATA_UUID) &&
- memcmp(fs_info->fs_devices->metadata_uuid,
- fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) {
+ if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
+ BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
- fs_info->super_copy->metadata_uuid,
- fs_info->fs_devices->metadata_uuid);
+ btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
ret = -EINVAL;
}
@@ -2658,6 +2489,18 @@ static int validate_super(struct btrfs_fs_info *fs_info,
}
/*
+ * Artificial requirement for block-group-tree to force newer features
+ * (free-space-tree, no-holes) so the test matrix is smaller.
+ */
+ if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
+ (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) ||
+ !btrfs_fs_incompat(fs_info, NO_HOLES))) {
+ btrfs_err(fs_info,
+ "block-group-tree feature requires free-space-tree and no-holes");
+ ret = -EINVAL;
+ }
+
+ /*
* Hint to catch really bogus numbers, bitflips or so, more exact checks are
* done later
*/
@@ -2686,6 +2529,11 @@ static int validate_super(struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
+ if (ret)
+ return ret;
+
+ ret = validate_sys_chunk_array(fs_info, sb);
+
/*
* Obvious sys_chunk_array corruptions, it must hold at least one key
* and one chunk
@@ -2731,7 +2579,7 @@ static int validate_super(struct btrfs_fs_info *fs_info,
*/
static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
{
- return validate_super(fs_info, fs_info->super_copy, 0);
+ return btrfs_validate_super(fs_info, fs_info->super_copy, 0);
}
/*
@@ -2745,16 +2593,16 @@ static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
{
int ret;
- ret = validate_super(fs_info, sb, -1);
+ ret = btrfs_validate_super(fs_info, sb, -1);
if (ret < 0)
goto out;
- if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
+ if (unlikely(!btrfs_supported_super_csum(btrfs_super_csum_type(sb)))) {
ret = -EUCLEAN;
btrfs_err(fs_info, "invalid csum type, has %u want %u",
btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
goto out;
}
- if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
+ if (unlikely(btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP)) {
ret = -EUCLEAN;
btrfs_err(fs_info,
"invalid incompat flags, has 0x%llx valid mask 0x%llx",
@@ -2769,6 +2617,50 @@ out:
return ret;
}
+static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int level)
+{
+ struct btrfs_tree_parent_check check = {
+ .level = level,
+ .transid = gen,
+ .owner_root = btrfs_root_id(root)
+ };
+ int ret = 0;
+
+ root->node = read_tree_block(root->fs_info, bytenr, &check);
+ if (IS_ERR(root->node)) {
+ ret = PTR_ERR(root->node);
+ root->node = NULL;
+ return ret;
+ }
+ if (unlikely(!extent_buffer_uptodate(root->node))) {
+ free_extent_buffer(root->node);
+ root->node = NULL;
+ return -EIO;
+ }
+
+ btrfs_set_root_node(&root->root_item, root->node);
+ root->commit_root = btrfs_root_node(root);
+ btrfs_set_root_refs(&root->root_item, 1);
+ return ret;
+}
+
+static int load_important_roots(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_super_block *sb = fs_info->super_copy;
+ u64 gen, bytenr;
+ int level, ret;
+
+ bytenr = btrfs_super_root(sb);
+ gen = btrfs_super_generation(sb);
+ level = btrfs_super_root_level(sb);
+ ret = load_super_root(fs_info->tree_root, bytenr, gen, level);
+ if (ret) {
+ btrfs_warn(fs_info, "couldn't read tree root");
+ return ret;
+ }
+ return 0;
+}
+
static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
{
int backup_index = find_newest_super_backup(fs_info);
@@ -2779,9 +2671,6 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
int i;
for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
- u64 generation;
- int level;
-
if (handle_error) {
if (!IS_ERR(tree_root->node))
free_extent_buffer(tree_root->node);
@@ -2798,37 +2687,19 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
*/
btrfs_set_super_log_root(sb, 0);
- /* We can't trust the free space cache either */
- btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
-
+ btrfs_warn(fs_info, "try to load backup roots slot %d", i);
ret = read_backup_root(fs_info, i);
backup_index = ret;
if (ret < 0)
return ret;
}
- generation = btrfs_super_generation(sb);
- level = btrfs_super_root_level(sb);
- tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb),
- BTRFS_ROOT_TREE_OBJECTID,
- generation, level, NULL);
- if (IS_ERR(tree_root->node)) {
- handle_error = true;
- ret = PTR_ERR(tree_root->node);
- tree_root->node = NULL;
- btrfs_warn(fs_info, "couldn't read tree root");
- continue;
- } else if (!extent_buffer_uptodate(tree_root->node)) {
+ ret = load_important_roots(fs_info);
+ if (ret) {
handle_error = true;
- ret = -EIO;
- btrfs_warn(fs_info, "error while reading tree root");
continue;
}
- btrfs_set_root_node(&tree_root->root_item, tree_root->node);
- tree_root->commit_root = btrfs_root_node(tree_root);
- btrfs_set_root_refs(&tree_root->root_item, 1);
-
/*
* No need to hold btrfs_root::objectid_mutex since the fs
* hasn't been fully initialised and we are the only user
@@ -2848,8 +2719,9 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
}
/* All successful */
- fs_info->generation = generation;
- fs_info->last_trans_committed = generation;
+ fs_info->generation = btrfs_header_generation(tree_root->node);
+ btrfs_set_last_trans_committed(fs_info, fs_info->generation);
+ fs_info->last_reloc_trans = 0;
/* Always begin writing backup roots after the one being used */
if (backup_index < 0) {
@@ -2864,10 +2736,21 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
return ret;
}
+/*
+ * Lockdep gets confused between our buffer_tree which requires IRQ locking because
+ * we modify marks in the IRQ context, and our delayed inode xarray which doesn't
+ * have these requirements. Use a class key so lockdep doesn't get them mixed up.
+ */
+static struct lock_class_key buffer_xa_class;
+
void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
{
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
- INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
+
+ /* Use the same flags as mapping->i_pages. */
+ xa_init_flags(&fs_info->buffer_tree, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
+ lockdep_set_class(&fs_info->buffer_tree.xa_lock, &buffer_xa_class);
+
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->delayed_iputs);
@@ -2879,19 +2762,33 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
spin_lock_init(&fs_info->delayed_iput_lock);
spin_lock_init(&fs_info->defrag_inodes_lock);
spin_lock_init(&fs_info->super_lock);
- spin_lock_init(&fs_info->buffer_lock);
spin_lock_init(&fs_info->unused_bgs_lock);
spin_lock_init(&fs_info->treelog_bg_lock);
spin_lock_init(&fs_info->zone_active_bgs_lock);
spin_lock_init(&fs_info->relocation_bg_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
+ rwlock_init(&fs_info->global_root_lock);
mutex_init(&fs_info->unused_bg_unpin_mutex);
mutex_init(&fs_info->reclaim_bgs_lock);
mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex);
mutex_init(&fs_info->zoned_meta_io_lock);
+ mutex_init(&fs_info->zoned_data_reloc_io_lock);
seqlock_init(&fs_info->profiles_lock);
+ btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers);
+ btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
+ btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
+ btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
+ btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
+ BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
+ btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
+ BTRFS_LOCKDEP_TRANS_UNBLOCKED);
+ btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
+ BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
+ btrfs_state_lockdep_init_map(fs_info, btrfs_trans_completed,
+ BTRFS_LOCKDEP_TRANS_COMPLETED);
+
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
INIT_LIST_HEAD(&fs_info->space_info);
INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
@@ -2903,11 +2800,13 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&fs_info->allocated_ebs);
spin_lock_init(&fs_info->eb_leak_lock);
#endif
- extent_map_tree_init(&fs_info->mapping_tree);
+ fs_info->mapping_tree = RB_ROOT_CACHED;
+ rwlock_init(&fs_info->mapping_tree_lock);
btrfs_init_block_rsv(&fs_info->global_block_rsv,
BTRFS_BLOCK_RSV_GLOBAL);
btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
+ btrfs_init_block_rsv(&fs_info->treelog_rsv, BTRFS_BLOCK_RSV_TREELOG);
btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
BTRFS_BLOCK_RSV_DELOPS);
@@ -2916,19 +2815,15 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
atomic_set(&fs_info->async_delalloc_pages, 0);
atomic_set(&fs_info->defrag_running, 0);
- atomic_set(&fs_info->reada_works_cnt, 0);
atomic_set(&fs_info->nr_delayed_iputs, 0);
atomic64_set(&fs_info->tree_mod_seq, 0);
+ fs_info->global_root_tree = RB_ROOT;
fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
fs_info->metadata_ratio = 0;
fs_info->defrag_inodes = RB_ROOT;
atomic64_set(&fs_info->free_chunk_space, 0);
fs_info->tree_mod_log = RB_ROOT;
fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
- fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
- /* readahead state */
- INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
- spin_lock_init(&fs_info->reada_lock);
btrfs_init_ref_verify(fs_info);
fs_info->thread_pool_size = min_t(unsigned long,
@@ -2938,19 +2833,15 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
spin_lock_init(&fs_info->ordered_root_lock);
btrfs_init_scrub(fs_info);
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- fs_info->check_integrity_print_mask = 0;
-#endif
btrfs_init_balance(fs_info);
btrfs_init_async_reclaim_work(fs_info);
+ btrfs_init_extent_map_shrinker_work(fs_info);
- spin_lock_init(&fs_info->block_group_cache_lock);
- fs_info->block_group_cache_tree = RB_ROOT;
- fs_info->first_logical_byte = (u64)-1;
+ rwlock_init(&fs_info->block_group_cache_lock);
+ fs_info->block_group_cache_tree = RB_ROOT_CACHED;
- extent_io_tree_init(fs_info, &fs_info->excluded_extents,
- IO_TREE_FS_EXCLUDED_EXTENTS, NULL);
- set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
+ btrfs_extent_io_tree_init(fs_info, &fs_info->excluded_extents,
+ IO_TREE_FS_EXCLUDED_EXTENTS);
mutex_init(&fs_info->ordered_operations_mutex);
mutex_init(&fs_info->tree_log_mutex);
@@ -2982,12 +2873,14 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
fs_info->sectorsize_bits = ilog2(4096);
fs_info->stripesize = 4096;
+ /* Default compress algorithm when user does -o compress */
+ fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
+
+ fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
+
spin_lock_init(&fs_info->swapfile_pins_lock);
fs_info->swapfile_pins = RB_ROOT;
- spin_lock_init(&fs_info->send_reloc_lock);
- fs_info->send_in_progress = 0;
-
fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH;
INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work);
}
@@ -2997,6 +2890,7 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
int ret;
fs_info->sb = sb;
+ /* Temporary fixed values for block size until we read the superblock. */
sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
@@ -3004,10 +2898,18 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
if (ret)
return ret;
+ ret = percpu_counter_init(&fs_info->evictable_extent_maps, 0, GFP_KERNEL);
+ if (ret)
+ return ret;
+
ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
if (ret)
return ret;
+ ret = percpu_counter_init(&fs_info->stats_read_blocks, 0, GFP_KERNEL);
+ if (ret)
+ return ret;
+
fs_info->dirty_metadata_batch = PAGE_SIZE *
(1 + ilog2(nr_cpu_ids));
@@ -3028,13 +2930,15 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
if (sb_rdonly(sb))
set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
+ if (btrfs_test_opt(fs_info, IGNOREMETACSUMS))
+ set_bit(BTRFS_FS_STATE_SKIP_META_CSUMS, &fs_info->fs_state);
return btrfs_alloc_stripe_hash_table(fs_info);
}
static int btrfs_uuid_rescan_kthread(void *data)
{
- struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
+ struct btrfs_fs_info *fs_info = data;
int ret;
/*
@@ -3069,15 +2973,55 @@ static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
return 0;
}
-/*
- * Some options only have meaning at mount time and shouldn't persist across
- * remounts, or be displayed. Clear these at the end of mount and remount
- * code paths.
- */
-void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info)
+static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
{
- btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
- btrfs_clear_opt(fs_info->mount_opt, CLEAR_CACHE);
+ u64 root_objectid = 0;
+ struct btrfs_root *gang[8];
+ int ret = 0;
+
+ while (1) {
+ unsigned int found;
+
+ spin_lock(&fs_info->fs_roots_radix_lock);
+ found = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+ (void **)gang, root_objectid,
+ ARRAY_SIZE(gang));
+ if (!found) {
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+ break;
+ }
+ root_objectid = btrfs_root_id(gang[found - 1]) + 1;
+
+ for (int i = 0; i < found; i++) {
+ /* Avoid to grab roots in dead_roots. */
+ if (btrfs_root_refs(&gang[i]->root_item) == 0) {
+ gang[i] = NULL;
+ continue;
+ }
+ /* Grab all the search result for later use. */
+ gang[i] = btrfs_grab_root(gang[i]);
+ }
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+
+ for (int i = 0; i < found; i++) {
+ if (!gang[i])
+ continue;
+ root_objectid = btrfs_root_id(gang[i]);
+ /*
+ * Continue to release the remaining roots after the first
+ * error without cleanup and preserve the first error
+ * for the return.
+ */
+ if (!ret)
+ ret = btrfs_orphan_cleanup(gang[i]);
+ btrfs_put_root(gang[i]);
+ }
+ if (ret)
+ break;
+
+ root_objectid++;
+ }
+ return ret;
}
/*
@@ -3088,23 +3032,38 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
{
int ret;
const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
- bool clear_free_space_tree = false;
+ bool rebuild_free_space_tree = false;
if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
- clear_free_space_tree = true;
+ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
+ btrfs_warn(fs_info,
+ "'clear_cache' option is ignored with extent tree v2");
+ else
+ rebuild_free_space_tree = true;
} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
btrfs_warn(fs_info, "free space tree is invalid");
- clear_free_space_tree = true;
+ rebuild_free_space_tree = true;
}
- if (clear_free_space_tree) {
- btrfs_info(fs_info, "clearing free space tree");
- ret = btrfs_clear_free_space_tree(fs_info);
+ if (rebuild_free_space_tree) {
+ btrfs_info(fs_info, "rebuilding free space tree");
+ ret = btrfs_rebuild_free_space_tree(fs_info);
if (ret) {
btrfs_warn(fs_info,
- "failed to clear free space tree: %d", ret);
+ "failed to rebuild free space tree: %d", ret);
+ goto out;
+ }
+ }
+
+ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+ !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
+ btrfs_info(fs_info, "disabling free space tree");
+ ret = btrfs_delete_free_space_tree(fs_info);
+ if (ret) {
+ btrfs_warn(fs_info,
+ "failed to disable free space tree: %d", ret);
goto out;
}
}
@@ -3137,7 +3096,7 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
up_read(&fs_info->cleanup_work_sem);
mutex_lock(&fs_info->cleaner_mutex);
- ret = btrfs_recover_relocation(fs_info->tree_root);
+ ret = btrfs_recover_relocation(fs_info);
mutex_unlock(&fs_info->cleaner_mutex);
if (ret < 0) {
btrfs_warn(fs_info, "failed to recover relocation: %d", ret);
@@ -3187,28 +3146,131 @@ out:
return ret;
}
-int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
- char *options)
+/*
+ * Do various sanity and dependency checks of different features.
+ *
+ * @is_rw_mount: If the mount is read-write.
+ *
+ * This is the place for less strict checks (like for subpage or artificial
+ * feature dependencies).
+ *
+ * For strict checks or possible corruption detection, see
+ * btrfs_validate_super().
+ *
+ * This should be called after btrfs_parse_options(), as some mount options
+ * (space cache related) can modify on-disk format like free space tree and
+ * screw up certain feature dependencies.
+ */
+int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+{
+ struct btrfs_super_block *disk_super = fs_info->super_copy;
+ u64 incompat = btrfs_super_incompat_flags(disk_super);
+ const u64 compat_ro = btrfs_super_compat_ro_flags(disk_super);
+ const u64 compat_ro_unsupp = (compat_ro & ~BTRFS_FEATURE_COMPAT_RO_SUPP);
+
+ if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
+ btrfs_err(fs_info,
+ "cannot mount because of unknown incompat features (0x%llx)",
+ incompat);
+ return -EINVAL;
+ }
+
+ /* Runtime limitation for mixed block groups. */
+ if ((incompat & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
+ (fs_info->sectorsize != fs_info->nodesize)) {
+ btrfs_err(fs_info,
+"unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
+ fs_info->nodesize, fs_info->sectorsize);
+ return -EINVAL;
+ }
+
+ /* Mixed backref is an always-enabled feature. */
+ incompat |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
+
+ /* Set compression related flags just in case. */
+ if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
+ incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+ else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
+ incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
+
+ /*
+ * An ancient flag, which should really be marked deprecated.
+ * Such runtime limitation doesn't really need a incompat flag.
+ */
+ if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
+ incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
+
+ if (compat_ro_unsupp && is_rw_mount) {
+ btrfs_err(fs_info,
+ "cannot mount read-write because of unknown compat_ro features (0x%llx)",
+ compat_ro);
+ return -EINVAL;
+ }
+
+ /*
+ * We have unsupported RO compat features, although RO mounted, we
+ * should not cause any metadata writes, including log replay.
+ * Or we could screw up whatever the new feature requires.
+ */
+ if (compat_ro_unsupp && btrfs_super_log_root(disk_super) &&
+ !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
+ btrfs_err(fs_info,
+"cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
+ compat_ro);
+ return -EINVAL;
+ }
+
+ /*
+ * Artificial limitations for block group tree, to force
+ * block-group-tree to rely on no-holes and free-space-tree.
+ */
+ if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
+ (!btrfs_fs_incompat(fs_info, NO_HOLES) ||
+ !btrfs_test_opt(fs_info, FREE_SPACE_TREE))) {
+ btrfs_err(fs_info,
+"block-group-tree feature requires no-holes and free-space-tree features");
+ return -EINVAL;
+ }
+
+ /*
+ * Subpage/bs > ps runtime limitation on v1 cache.
+ *
+ * V1 space cache still has some hard coded PAGE_SIZE usage, while
+ * we're already defaulting to v2 cache, no need to bother v1 as it's
+ * going to be deprecated anyway.
+ */
+ if (fs_info->sectorsize != PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
+ btrfs_warn(fs_info,
+ "v1 space cache is not supported for page size %lu with sectorsize %u",
+ PAGE_SIZE, fs_info->sectorsize);
+ return -EINVAL;
+ }
+
+ /* This can be called by remount, we need to protect the super block. */
+ spin_lock(&fs_info->super_lock);
+ btrfs_set_super_incompat_flags(disk_super, incompat);
+ spin_unlock(&fs_info->super_lock);
+
+ return 0;
+}
+
+int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices)
{
u32 sectorsize;
u32 nodesize;
u32 stripesize;
u64 generation;
- u64 features;
u16 csum_type;
struct btrfs_super_block *disk_super;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *tree_root;
struct btrfs_root *chunk_root;
int ret;
- int err = -EINVAL;
int level;
ret = init_mount_fs_info(fs_info, sb);
- if (ret) {
- err = ret;
+ if (ret)
goto fail;
- }
/* These need to be init'ed before we start creating inodes and such. */
tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
@@ -3218,29 +3280,26 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
GFP_KERNEL);
fs_info->chunk_root = chunk_root;
if (!tree_root || !chunk_root) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto fail;
}
- fs_info->btree_inode = new_inode(sb);
- if (!fs_info->btree_inode) {
- err = -ENOMEM;
+ ret = btrfs_init_btree_inode(sb);
+ if (ret)
goto fail;
- }
- mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
- btrfs_init_btree_inode(fs_info);
invalidate_bdev(fs_devices->latest_dev->bdev);
/*
* Read super block and check the signature bytes only
*/
- disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev);
+ disk_super = btrfs_read_disk_super(fs_devices->latest_dev->bdev, 0, false);
if (IS_ERR(disk_super)) {
- err = PTR_ERR(disk_super);
+ ret = PTR_ERR(disk_super);
goto fail_alloc;
}
+ btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
/*
* Verify the type first, if that or the checksum value are
* corrupted, we'll find out
@@ -3249,7 +3308,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
if (!btrfs_supported_super_csum(csum_type)) {
btrfs_err(fs_info, "unsupported checksum algorithm: %u",
csum_type);
- err = -EINVAL;
+ ret = -EINVAL;
btrfs_release_disk_super(disk_super);
goto fail_alloc;
}
@@ -3258,7 +3317,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
ret = btrfs_init_csum_hash(fs_info, csum_type);
if (ret) {
- err = ret;
btrfs_release_disk_super(disk_super);
goto fail_alloc;
}
@@ -3267,9 +3325,9 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
* We want to check superblock checksum, the type is stored inside.
* Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
*/
- if (btrfs_check_super_csum(fs_info, (u8 *)disk_super)) {
+ if (btrfs_check_super_csum(fs_info, disk_super)) {
btrfs_err(fs_info, "superblock checksum mismatch");
- err = -EINVAL;
+ ret = -EINVAL;
btrfs_release_disk_super(disk_super);
goto fail_alloc;
}
@@ -3284,48 +3342,25 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
disk_super = fs_info->super_copy;
-
- features = btrfs_super_flags(disk_super);
- if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
- features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
- btrfs_set_super_flags(disk_super, features);
- btrfs_info(fs_info,
- "found metadata UUID change in progress flag, clearing");
- }
-
memcpy(fs_info->super_for_commit, fs_info->super_copy,
sizeof(*fs_info->super_for_commit));
ret = btrfs_validate_mount_super(fs_info);
if (ret) {
btrfs_err(fs_info, "superblock contains fatal errors");
- err = -EINVAL;
+ ret = -EINVAL;
goto fail_alloc;
}
- if (!btrfs_super_root(disk_super))
+ if (!btrfs_super_root(disk_super)) {
+ btrfs_err(fs_info, "invalid superblock tree root bytenr");
+ ret = -EINVAL;
goto fail_alloc;
+ }
/* check FS state, whether FS is broken. */
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
- set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
-
- /*
- * In the long term, we'll store the compression type in the super
- * block, and it'll be used for per file compression control.
- */
- fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
-
- /*
- * Flag our filesystem as having big metadata blocks if they are bigger
- * than the page size.
- */
- if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
- if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
- btrfs_info(fs_info,
- "flagging fs with big metadata feature");
- features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
- }
+ WRITE_ONCE(fs_info->fs_error, -EUCLEAN);
/* Set up fs_info before parsing mount options */
nodesize = btrfs_super_nodesize(disk_super);
@@ -3335,95 +3370,51 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
fs_info->nodesize = nodesize;
+ fs_info->nodesize_bits = ilog2(nodesize);
fs_info->sectorsize = sectorsize;
fs_info->sectorsize_bits = ilog2(sectorsize);
+ fs_info->block_min_order = ilog2(round_up(sectorsize, PAGE_SIZE) >> PAGE_SHIFT);
+ fs_info->block_max_order = ilog2((BITS_PER_LONG << fs_info->sectorsize_bits) >> PAGE_SHIFT);
fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
fs_info->stripesize = stripesize;
+ fs_info->fs_devices->fs_info = fs_info;
- ret = btrfs_parse_options(fs_info, options, sb->s_flags);
- if (ret) {
- err = ret;
- goto fail_alloc;
- }
+ if (fs_info->sectorsize > PAGE_SIZE)
+ btrfs_warn(fs_info,
+ "support for block size %u with page size %lu is experimental, some features may be missing",
+ fs_info->sectorsize, PAGE_SIZE);
+ /*
+ * Handle the space caching options appropriately now that we have the
+ * super block loaded and validated.
+ */
+ btrfs_set_free_space_cache_settings(fs_info);
- features = btrfs_super_incompat_flags(disk_super) &
- ~BTRFS_FEATURE_INCOMPAT_SUPP;
- if (features) {
- btrfs_err(fs_info,
- "cannot mount because of unsupported optional features (%llx)",
- features);
- err = -EINVAL;
+ if (!btrfs_check_options(fs_info, &fs_info->mount_opt, sb->s_flags)) {
+ ret = -EINVAL;
goto fail_alloc;
}
- features = btrfs_super_incompat_flags(disk_super);
- features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
- if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
- features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
- else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
- features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
-
- if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
- btrfs_info(fs_info, "has skinny extents");
-
- /*
- * mixed block groups end up with duplicate but slightly offset
- * extent buffers for the same range. It leads to corruptions
- */
- if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
- (sectorsize != nodesize)) {
- btrfs_err(fs_info,
-"unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
- nodesize, sectorsize);
+ ret = btrfs_check_features(fs_info, !sb_rdonly(sb));
+ if (ret < 0)
goto fail_alloc;
- }
/*
- * Needn't use the lock because there is no other task which will
- * update the flag.
+ * At this point our mount options are validated, if we set ->max_inline
+ * to something non-standard make sure we truncate it to sectorsize.
*/
- btrfs_set_super_incompat_flags(disk_super, features);
-
- features = btrfs_super_compat_ro_flags(disk_super) &
- ~BTRFS_FEATURE_COMPAT_RO_SUPP;
- if (!sb_rdonly(sb) && features) {
- btrfs_err(fs_info,
- "cannot mount read-write because of unsupported optional features (%llx)",
- features);
- err = -EINVAL;
- goto fail_alloc;
- }
-
- if (sectorsize < PAGE_SIZE) {
- struct btrfs_subpage_info *subpage_info;
-
- btrfs_warn(fs_info,
- "read-write for sector size %u with page size %lu is experimental",
- sectorsize, PAGE_SIZE);
- if (btrfs_super_incompat_flags(fs_info->super_copy) &
- BTRFS_FEATURE_INCOMPAT_RAID56) {
- btrfs_err(fs_info,
- "RAID56 is not yet supported for sector size %u with page size %lu",
- sectorsize, PAGE_SIZE);
- err = -EINVAL;
- goto fail_alloc;
- }
- subpage_info = kzalloc(sizeof(*subpage_info), GFP_KERNEL);
- if (!subpage_info)
- goto fail_alloc;
- btrfs_init_subpage_info(subpage_info, sectorsize);
- fs_info->subpage_info = subpage_info;
- }
+ fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize);
- ret = btrfs_init_workqueues(fs_info, fs_devices);
- if (ret) {
- err = ret;
+ ret = btrfs_alloc_compress_wsm(fs_info);
+ if (ret)
+ goto fail_sb_buffer;
+ ret = btrfs_init_workqueues(fs_info);
+ if (ret)
goto fail_sb_buffer;
- }
sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
+ /* Update the values for the current filesystem. */
sb->s_blocksize = sectorsize;
sb->s_blocksize_bits = blksize_bits(sectorsize);
memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
@@ -3438,21 +3429,12 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
generation = btrfs_super_chunk_root_generation(disk_super);
level = btrfs_super_chunk_root_level(disk_super);
-
- chunk_root->node = read_tree_block(fs_info,
- btrfs_super_chunk_root(disk_super),
- BTRFS_CHUNK_TREE_OBJECTID,
- generation, level, NULL);
- if (IS_ERR(chunk_root->node) ||
- !extent_buffer_uptodate(chunk_root->node)) {
+ ret = load_super_root(chunk_root, btrfs_super_chunk_root(disk_super),
+ generation, level);
+ if (ret) {
btrfs_err(fs_info, "failed to read chunk root");
- if (!IS_ERR(chunk_root->node))
- free_extent_buffer(chunk_root->node);
- chunk_root->node = NULL;
goto fail_tree_roots;
}
- btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
- chunk_root->commit_root = btrfs_root_node(chunk_root);
read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
offsetof(struct btrfs_header, chunk_tree_uuid),
@@ -3472,8 +3454,9 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
* below in btrfs_init_dev_replace().
*/
btrfs_free_extra_devids(fs_devices);
- if (!fs_devices->latest_dev->bdev) {
+ if (unlikely(!fs_devices->latest_dev->bdev)) {
btrfs_err(fs_info, "failed to read devices");
+ ret = -EIO;
goto fail_tree_roots;
}
@@ -3489,8 +3472,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
ret = btrfs_get_dev_zone_info_all_devices(fs_info);
if (ret) {
btrfs_err(fs_info,
- "zoned: failed to read device zone info: %d",
- ret);
+ "zoned: failed to read device zone info: %d", ret);
goto fail_block_groups;
}
@@ -3563,47 +3545,34 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
goto fail_sysfs;
}
+ btrfs_zoned_reserve_data_reloc_bg(fs_info);
+ btrfs_free_zone_cache(fs_info);
+
+ btrfs_check_active_zone_reservation(fs_info);
+
if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
!btrfs_check_rw_degradable(fs_info, NULL)) {
btrfs_warn(fs_info,
"writable mount is not allowed due to too many missing devices");
+ ret = -EINVAL;
goto fail_sysfs;
}
- fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
+ fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info,
"btrfs-cleaner");
- if (IS_ERR(fs_info->cleaner_kthread))
+ if (IS_ERR(fs_info->cleaner_kthread)) {
+ ret = PTR_ERR(fs_info->cleaner_kthread);
goto fail_sysfs;
+ }
fs_info->transaction_kthread = kthread_run(transaction_kthread,
tree_root,
"btrfs-transaction");
- if (IS_ERR(fs_info->transaction_kthread))
+ if (IS_ERR(fs_info->transaction_kthread)) {
+ ret = PTR_ERR(fs_info->transaction_kthread);
goto fail_cleaner;
-
- if (!btrfs_test_opt(fs_info, NOSSD) &&
- !fs_info->fs_devices->rotating) {
- btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
}
- /*
- * Mount does not set all options immediately, we can do it now and do
- * not have to wait for transaction commit
- */
- btrfs_apply_pending_changes(fs_info);
-
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
- ret = btrfsic_mount(fs_info, fs_devices,
- btrfs_test_opt(fs_info,
- CHECK_INTEGRITY_DATA) ? 1 : 0,
- fs_info->check_integrity_print_mask);
- if (ret)
- btrfs_warn(fs_info,
- "failed to initialize integrity check module: %d",
- ret);
- }
-#endif
ret = btrfs_read_qgroup_config(fs_info);
if (ret)
goto fail_trans_kthread;
@@ -3616,22 +3585,20 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
!btrfs_test_opt(fs_info, NOLOGREPLAY)) {
btrfs_info(fs_info, "start tree-log replay");
ret = btrfs_replay_log(fs_info, fs_devices);
- if (ret) {
- err = ret;
+ if (ret)
goto fail_qgroup;
- }
}
fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
if (IS_ERR(fs_info->fs_root)) {
- err = PTR_ERR(fs_info->fs_root);
- btrfs_warn(fs_info, "failed to read fs tree: %d", err);
+ ret = PTR_ERR(fs_info->fs_root);
+ btrfs_warn(fs_info, "failed to read fs tree: %d", ret);
fs_info->fs_root = NULL;
goto fail_qgroup;
}
if (sb_rdonly(sb))
- goto clear_oneshot;
+ return 0;
ret = btrfs_start_pre_rw_mount(fs_info);
if (ret) {
@@ -3655,8 +3622,10 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
set_bit(BTRFS_FS_OPEN, &fs_info->flags);
-clear_oneshot:
- btrfs_clear_oneshot_options(fs_info);
+ /* Kick the cleaner thread so it'll start deleting snapshots. */
+ if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags))
+ wake_up_process(fs_info->cleaner_kthread);
+
return 0;
fail_qgroup:
@@ -3693,141 +3662,74 @@ fail_sb_buffer:
btrfs_stop_all_workers(fs_info);
btrfs_free_block_groups(fs_info);
fail_alloc:
- btrfs_mapping_tree_free(&fs_info->mapping_tree);
+ btrfs_mapping_tree_free(fs_info);
iput(fs_info->btree_inode);
fail:
- btrfs_close_devices(fs_info->fs_devices);
- return err;
+ ASSERT(ret < 0);
+ return ret;
}
ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
static void btrfs_end_super_write(struct bio *bio)
{
struct btrfs_device *device = bio->bi_private;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
- struct page *page;
-
- bio_for_each_segment_all(bvec, bio, iter_all) {
- page = bvec->bv_page;
+ struct folio_iter fi;
+ bio_for_each_folio_all(fi, bio) {
if (bio->bi_status) {
- btrfs_warn_rl_in_rcu(device->fs_info,
- "lost page write due to IO error on %s (%d)",
- rcu_str_deref(device->name),
+ btrfs_warn_rl(device->fs_info,
+ "lost super block write due to IO error on %s (%d)",
+ btrfs_dev_name(device),
blk_status_to_errno(bio->bi_status));
- ClearPageUptodate(page);
- SetPageError(page);
btrfs_dev_stat_inc_and_print(device,
BTRFS_DEV_STAT_WRITE_ERRS);
- } else {
- SetPageUptodate(page);
+ /* Ensure failure if the primary sb fails. */
+ if (bio->bi_opf & REQ_FUA)
+ atomic_add(BTRFS_SUPER_PRIMARY_WRITE_ERROR,
+ &device->sb_write_errors);
+ else
+ atomic_inc(&device->sb_write_errors);
}
-
- put_page(page);
- unlock_page(page);
+ folio_unlock(fi.folio);
+ folio_put(fi.folio);
}
bio_put(bio);
}
-struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
- int copy_num)
-{
- struct btrfs_super_block *super;
- struct page *page;
- u64 bytenr, bytenr_orig;
- struct address_space *mapping = bdev->bd_inode->i_mapping;
- int ret;
-
- bytenr_orig = btrfs_sb_offset(copy_num);
- ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr);
- if (ret == -ENOENT)
- return ERR_PTR(-EINVAL);
- else if (ret)
- return ERR_PTR(ret);
-
- if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev))
- return ERR_PTR(-EINVAL);
-
- page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
- if (IS_ERR(page))
- return ERR_CAST(page);
-
- super = page_address(page);
- if (btrfs_super_magic(super) != BTRFS_MAGIC) {
- btrfs_release_disk_super(super);
- return ERR_PTR(-ENODATA);
- }
-
- if (btrfs_super_bytenr(super) != bytenr_orig) {
- btrfs_release_disk_super(super);
- return ERR_PTR(-EINVAL);
- }
-
- return super;
-}
-
-
-struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
-{
- struct btrfs_super_block *super, *latest = NULL;
- int i;
- u64 transid = 0;
-
- /* we would like to check all the supers, but that would make
- * a btrfs mount succeed after a mkfs from a different FS.
- * So, we need to add a special mount option to scan for
- * later supers, using BTRFS_SUPER_MIRROR_MAX instead
- */
- for (i = 0; i < 1; i++) {
- super = btrfs_read_dev_one_super(bdev, i);
- if (IS_ERR(super))
- continue;
-
- if (!latest || btrfs_super_generation(super) > transid) {
- if (latest)
- btrfs_release_disk_super(super);
-
- latest = super;
- transid = btrfs_super_generation(super);
- }
- }
-
- return super;
-}
-
/*
* Write superblock @sb to the @device. Do not wait for completion, all the
- * pages we use for writing are locked.
+ * folios we use for writing are locked.
*
* Write @max_mirrors copies of the superblock, where 0 means default that fit
* the expected device size at commit time. Note that max_mirrors must be
* same for write and wait phases.
*
- * Return number of errors when page is not found or submission fails.
+ * Return number of errors when folio is not found or submission fails.
*/
static int write_dev_supers(struct btrfs_device *device,
struct btrfs_super_block *sb, int max_mirrors)
{
struct btrfs_fs_info *fs_info = device->fs_info;
- struct address_space *mapping = device->bdev->bd_inode->i_mapping;
+ struct address_space *mapping = device->bdev->bd_mapping;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
int i;
- int errors = 0;
int ret;
u64 bytenr, bytenr_orig;
+ atomic_set(&device->sb_write_errors, 0);
+
if (max_mirrors == 0)
max_mirrors = BTRFS_SUPER_MIRROR_MAX;
shash->tfm = fs_info->csum_shash;
for (i = 0; i < max_mirrors; i++) {
- struct page *page;
+ struct folio *folio;
struct bio *bio;
struct btrfs_super_block *disk_super;
+ size_t offset;
bytenr_orig = btrfs_sb_offset(i);
ret = btrfs_sb_log_location(device, i, WRITE, &bytenr);
@@ -3835,9 +3737,9 @@ static int write_dev_supers(struct btrfs_device *device,
continue;
} else if (ret < 0) {
btrfs_err(device->fs_info,
- "couldn't get super block location for mirror %d",
- i);
- errors++;
+ "couldn't get super block location for mirror %d error %d",
+ i, ret);
+ atomic_inc(&device->sb_write_errors);
continue;
}
if (bytenr + BTRFS_SUPER_INFO_SIZE >=
@@ -3850,20 +3752,19 @@ static int write_dev_supers(struct btrfs_device *device,
BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
sb->csum);
- page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT,
- GFP_NOFS);
- if (!page) {
+ folio = __filemap_get_folio(mapping, bytenr >> PAGE_SHIFT,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ GFP_NOFS);
+ if (IS_ERR(folio)) {
btrfs_err(device->fs_info,
- "couldn't get super block page for bytenr %llu",
- bytenr);
- errors++;
+ "couldn't get super block page for bytenr %llu error %ld",
+ bytenr, PTR_ERR(folio));
+ atomic_inc(&device->sb_write_errors);
continue;
}
- /* Bump the refcount for wait_dev_supers() */
- get_page(page);
-
- disk_super = page_address(page);
+ offset = offset_in_folio(folio, bytenr);
+ disk_super = folio_address(folio) + offset;
memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
/*
@@ -3871,37 +3772,35 @@ static int write_dev_supers(struct btrfs_device *device,
* to do I/O, so we don't lose the ability to do integrity
* checking.
*/
- bio = bio_alloc(GFP_NOFS, 1);
- bio_set_dev(bio, device->bdev);
+ bio = bio_alloc(device->bdev, 1,
+ REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO,
+ GFP_NOFS);
bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
bio->bi_private = device;
bio->bi_end_io = btrfs_end_super_write;
- __bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE,
- offset_in_page(bytenr));
+ bio_add_folio_nofail(bio, folio, BTRFS_SUPER_INFO_SIZE, offset);
/*
* We FUA only the first super block. The others we allow to
* go down lazy and there's a short window where the on-disk
* copies might still contain the older version.
*/
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO;
if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
bio->bi_opf |= REQ_FUA;
-
- btrfsic_submit_bio(bio);
+ submit_bio(bio);
if (btrfs_advance_sb_log(device, i))
- errors++;
+ atomic_inc(&device->sb_write_errors);
}
- return errors < i ? 0 : -1;
+ return atomic_read(&device->sb_write_errors) < i ? 0 : -1;
}
/*
* Wait for write completion of superblocks done by write_dev_supers,
* @max_mirrors same for write and wait phases.
*
- * Return number of errors when page is not found or not marked up to
- * date.
+ * Return -1 if primary super block write failed or when there were no super block
+ * copies written. Otherwise 0.
*/
static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
{
@@ -3915,7 +3814,7 @@ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
max_mirrors = BTRFS_SUPER_MIRROR_MAX;
for (i = 0; i < max_mirrors; i++) {
- struct page *page;
+ struct folio *folio;
ret = btrfs_sb_log_location(device, i, READ, &bytenr);
if (ret == -ENOENT) {
@@ -3930,30 +3829,20 @@ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
device->commit_total_bytes)
break;
- page = find_get_page(device->bdev->bd_inode->i_mapping,
- bytenr >> PAGE_SHIFT);
- if (!page) {
- errors++;
- if (i == 0)
- primary_failed = true;
+ folio = filemap_get_folio(device->bdev->bd_mapping,
+ bytenr >> PAGE_SHIFT);
+ /* If the folio has been removed, then we know it completed. */
+ if (IS_ERR(folio))
continue;
- }
- /* Page is submitted locked and unlocked once the IO completes */
- wait_on_page_locked(page);
- if (PageError(page)) {
- errors++;
- if (i == 0)
- primary_failed = true;
- }
-
- /* Drop our reference */
- put_page(page);
- /* Drop the reference from the writing run */
- put_page(page);
+ /* Folio will be unlocked once the write completes. */
+ folio_wait_locked(folio);
+ folio_put(folio);
}
- /* log error, force error return */
+ errors += atomic_read(&device->sb_write_errors);
+ if (errors >= BTRFS_SUPER_PRIMARY_WRITE_ERROR)
+ primary_failed = true;
if (primary_failed) {
btrfs_err(device->fs_info, "error writing primary super block to device %llu",
device->devid);
@@ -3969,6 +3858,7 @@ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
*/
static void btrfs_end_empty_barrier(struct bio *bio)
{
+ bio_uninit(bio);
complete(bio->bi_private);
}
@@ -3978,44 +3868,39 @@ static void btrfs_end_empty_barrier(struct bio *bio)
*/
static void write_dev_flush(struct btrfs_device *device)
{
- struct request_queue *q = bdev_get_queue(device->bdev);
- struct bio *bio = device->flush_bio;
+ struct bio *bio = &device->flush_bio;
- if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- return;
+ device->last_flush_error = BLK_STS_OK;
- bio_reset(bio);
+ bio_init(bio, device->bdev, NULL, 0,
+ REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
bio->bi_end_io = btrfs_end_empty_barrier;
- bio_set_dev(bio, device->bdev);
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait;
-
- btrfsic_submit_bio(bio);
+ submit_bio(bio);
set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
}
/*
* If the flush bio has been submitted by write_dev_flush, wait for it.
+ * Return true for any error, and false otherwise.
*/
-static blk_status_t wait_dev_flush(struct btrfs_device *device)
+static bool wait_dev_flush(struct btrfs_device *device)
{
- struct bio *bio = device->flush_bio;
+ struct bio *bio = &device->flush_bio;
- if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
- return BLK_STS_OK;
+ if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
+ return false;
- clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
wait_for_completion_io(&device->flush_wait);
- return bio->bi_status;
-}
+ if (bio->bi_status) {
+ device->last_flush_error = bio->bi_status;
+ btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS);
+ return true;
+ }
-static int check_barrier_error(struct btrfs_fs_info *fs_info)
-{
- if (!btrfs_check_rw_degradable(fs_info, NULL))
- return -EIO;
- return 0;
+ return false;
}
/*
@@ -4027,7 +3912,6 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
struct list_head *head;
struct btrfs_device *dev;
int errors_wait = 0;
- blk_status_t ret;
lockdep_assert_held(&info->fs_devices->device_list_mutex);
/* send down all the barriers */
@@ -4042,7 +3926,6 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
continue;
write_dev_flush(dev);
- dev->last_flush_error = BLK_STS_OK;
}
/* wait for all the barriers */
@@ -4057,23 +3940,17 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
continue;
- ret = wait_dev_flush(dev);
- if (ret) {
- dev->last_flush_error = ret;
- btrfs_dev_stat_inc_and_print(dev,
- BTRFS_DEV_STAT_FLUSH_ERRS);
+ if (wait_dev_flush(dev))
errors_wait++;
- }
}
- if (errors_wait) {
- /*
- * At some point we need the status of all disks
- * to arrive at the volume status. So error checking
- * is being pushed to a separate loop.
- */
- return check_barrier_error(info);
- }
+ /*
+ * Checks last_flush_error of disks in order to determine the device
+ * state.
+ */
+ if (unlikely(errors_wait && !btrfs_check_rw_degradable(info, NULL)))
+ return -EIO;
+
return 0;
}
@@ -4099,7 +3976,7 @@ int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
}
if (min_tolerated == INT_MAX) {
- pr_warn("BTRFS: unknown raid flag: %llu", flags);
+ btrfs_warn(NULL, "unknown raid flag: %llu", flags);
min_tolerated = 0;
}
@@ -4173,7 +4050,7 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
ret = btrfs_validate_write_super(fs_info, sb);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
btrfs_handle_fs_error(fs_info, -EUCLEAN,
"unexpected superblock corruption detected");
@@ -4184,7 +4061,7 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
if (ret)
total_errors++;
}
- if (total_errors > max_errors) {
+ if (unlikely(total_errors > max_errors)) {
btrfs_err(fs_info, "%d errors while writing supers",
total_errors);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
@@ -4209,7 +4086,7 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
total_errors++;
}
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- if (total_errors > max_errors) {
+ if (unlikely(total_errors > max_errors)) {
btrfs_handle_fs_error(fs_info, -EIO,
"%d errors while writing supers",
total_errors);
@@ -4226,7 +4103,7 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
spin_lock(&fs_info->fs_roots_radix_lock);
radix_tree_delete(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid);
+ (unsigned long)btrfs_root_id(root));
if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
drop_ref = true;
spin_unlock(&fs_info->fs_roots_radix_lock);
@@ -4243,61 +4120,8 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
btrfs_put_root(root);
}
-int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
-{
- u64 root_objectid = 0;
- struct btrfs_root *gang[8];
- int i = 0;
- int err = 0;
- unsigned int ret = 0;
-
- while (1) {
- spin_lock(&fs_info->fs_roots_radix_lock);
- ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
- (void **)gang, root_objectid,
- ARRAY_SIZE(gang));
- if (!ret) {
- spin_unlock(&fs_info->fs_roots_radix_lock);
- break;
- }
- root_objectid = gang[ret - 1]->root_key.objectid + 1;
-
- for (i = 0; i < ret; i++) {
- /* Avoid to grab roots in dead_roots */
- if (btrfs_root_refs(&gang[i]->root_item) == 0) {
- gang[i] = NULL;
- continue;
- }
- /* grab all the search result for later use */
- gang[i] = btrfs_grab_root(gang[i]);
- }
- spin_unlock(&fs_info->fs_roots_radix_lock);
-
- for (i = 0; i < ret; i++) {
- if (!gang[i])
- continue;
- root_objectid = gang[i]->root_key.objectid;
- err = btrfs_orphan_cleanup(gang[i]);
- if (err)
- break;
- btrfs_put_root(gang[i]);
- }
- root_objectid++;
- }
-
- /* release the uncleaned roots due to error */
- for (; i < ret; i++) {
- if (gang[i])
- btrfs_put_root(gang[i]);
- }
- return err;
-}
-
int btrfs_commit_super(struct btrfs_fs_info *fs_info)
{
- struct btrfs_root *root = fs_info->tree_root;
- struct btrfs_trans_handle *trans;
-
mutex_lock(&fs_info->cleaner_mutex);
btrfs_run_delayed_iputs(fs_info);
mutex_unlock(&fs_info->cleaner_mutex);
@@ -4307,10 +4131,47 @@ int btrfs_commit_super(struct btrfs_fs_info *fs_info)
down_write(&fs_info->cleanup_work_sem);
up_write(&fs_info->cleanup_work_sem);
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- return btrfs_commit_transaction(trans);
+ return btrfs_commit_current_transaction(fs_info->tree_root);
+}
+
+static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_transaction *trans;
+ struct btrfs_transaction *tmp;
+ bool found = false;
+
+ /*
+ * This function is only called at the very end of close_ctree(),
+ * thus no other running transaction, no need to take trans_lock.
+ */
+ ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags));
+ list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) {
+ struct extent_state *cached = NULL;
+ u64 dirty_bytes = 0;
+ u64 cur = 0;
+ u64 found_start;
+ u64 found_end;
+
+ found = true;
+ while (btrfs_find_first_extent_bit(&trans->dirty_pages, cur,
+ &found_start, &found_end,
+ EXTENT_DIRTY, &cached)) {
+ dirty_bytes += found_end + 1 - found_start;
+ cur = found_end + 1;
+ }
+ btrfs_warn(fs_info,
+ "transaction %llu (with %llu dirty metadata bytes) is not committed",
+ trans->transid, dirty_bytes);
+ btrfs_cleanup_one_transaction(trans);
+
+ if (trans == fs_info->running_transaction)
+ fs_info->running_transaction = NULL;
+ list_del_init(&trans->list);
+
+ btrfs_put_transaction(trans);
+ trace_btrfs_transaction_commit(fs_info);
+ }
+ ASSERT(!found);
}
void __cold close_ctree(struct btrfs_fs_info *fs_info)
@@ -4318,6 +4179,28 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
int ret;
set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
+
+ /*
+ * If we had UNFINISHED_DROPS we could still be processing them, so
+ * clear that bit and wake up relocation so it can stop.
+ * We must do this before stopping the block group reclaim task, because
+ * at btrfs_relocate_block_group() we wait for this bit, and after the
+ * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
+ * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
+ * return 1.
+ */
+ btrfs_wake_unfinished_drop(fs_info);
+
+ /*
+ * We may have the reclaim task running and relocating a data block group,
+ * in which case it may create delayed iputs. So stop it before we park
+ * the cleaner kthread otherwise we can get new delayed iputs after
+ * parking the cleaner, and that can make the async reclaim task to hang
+ * if it's waiting for delayed iputs to complete, since the cleaner is
+ * parked and can not run delayed iputs - this will make us hang when
+ * trying to stop the async reclaim task.
+ */
+ cancel_work_sync(&fs_info->reclaim_bgs_work);
/*
* We don't want the cleaner to start new transactions, add more delayed
* iputs, etc. while we're closing. We can't use kthread_stop() yet
@@ -4348,11 +4231,104 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
/* clear out the rbtree of defraggable inodes */
btrfs_cleanup_defrag_inodes(fs_info);
+ /*
+ * Handle the error fs first, as it will flush and wait for all ordered
+ * extents. This will generate delayed iputs, thus we want to handle
+ * it first.
+ */
+ if (unlikely(BTRFS_FS_ERROR(fs_info)))
+ btrfs_error_commit_super(fs_info);
+
+ /*
+ * Wait for any fixup workers to complete.
+ * If we don't wait for them here and they are still running by the time
+ * we call kthread_stop() against the cleaner kthread further below, we
+ * get an use-after-free on the cleaner because the fixup worker adds an
+ * inode to the list of delayed iputs and then attempts to wakeup the
+ * cleaner kthread, which was already stopped and destroyed. We parked
+ * already the cleaner, but below we run all pending delayed iputs.
+ */
+ btrfs_flush_workqueue(fs_info->fixup_workers);
+ /*
+ * Similar case here, we have to wait for delalloc workers before we
+ * proceed below and stop the cleaner kthread, otherwise we trigger a
+ * use-after-tree on the cleaner kthread task_struct when a delalloc
+ * worker running submit_compressed_extents() adds a delayed iput, which
+ * does a wake up on the cleaner kthread, which was already freed below
+ * when we call kthread_stop().
+ */
+ btrfs_flush_workqueue(fs_info->delalloc_workers);
+
+ /*
+ * We can have ordered extents getting their last reference dropped from
+ * the fs_info->workers queue because for async writes for data bios we
+ * queue a work for that queue, at btrfs_wq_submit_bio(), that runs
+ * run_one_async_done() which calls btrfs_bio_end_io() in case the bio
+ * has an error, and that later function can do the final
+ * btrfs_put_ordered_extent() on the ordered extent attached to the bio,
+ * which adds a delayed iput for the inode. So we must flush the queue
+ * so that we don't have delayed iputs after committing the current
+ * transaction below and stopping the cleaner and transaction kthreads.
+ */
+ btrfs_flush_workqueue(fs_info->workers);
+
+ /*
+ * When finishing a compressed write bio we schedule a work queue item
+ * to finish an ordered extent - end_bbio_compressed_write()
+ * calls btrfs_finish_ordered_extent() which in turns does a call to
+ * btrfs_queue_ordered_fn(), and that queues the ordered extent
+ * completion either in the endio_write_workers work queue or in the
+ * fs_info->endio_freespace_worker work queue. We flush those queues
+ * below, so before we flush them we must flush this queue for the
+ * workers of compressed writes.
+ */
+ flush_workqueue(fs_info->endio_workers);
+
+ /*
+ * After we parked the cleaner kthread, ordered extents may have
+ * completed and created new delayed iputs. If one of the async reclaim
+ * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
+ * can hang forever trying to stop it, because if a delayed iput is
+ * added after it ran btrfs_run_delayed_iputs() and before it called
+ * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
+ * no one else to run iputs.
+ *
+ * So wait for all ongoing ordered extents to complete and then run
+ * delayed iputs. This works because once we reach this point no one
+ * can create new ordered extents, but delayed iputs can still be added
+ * by a reclaim worker (see comments further below).
+ *
+ * Also note that btrfs_wait_ordered_roots() is not safe here, because
+ * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
+ * but the delayed iput for the respective inode is made only when doing
+ * the final btrfs_put_ordered_extent() (which must happen at
+ * btrfs_finish_ordered_io() when we are unmounting).
+ */
+ btrfs_flush_workqueue(fs_info->endio_write_workers);
+ /* Ordered extents for free space inodes. */
+ btrfs_flush_workqueue(fs_info->endio_freespace_worker);
+ /*
+ * Run delayed iputs in case an async reclaim worker is waiting for them
+ * to be run as mentioned above.
+ */
+ btrfs_run_delayed_iputs(fs_info);
+
cancel_work_sync(&fs_info->async_reclaim_work);
cancel_work_sync(&fs_info->async_data_reclaim_work);
cancel_work_sync(&fs_info->preempt_reclaim_work);
+ cancel_work_sync(&fs_info->em_shrinker_work);
- cancel_work_sync(&fs_info->reclaim_bgs_work);
+ /*
+ * Run delayed iputs again because an async reclaim worker may have
+ * added new ones if it was flushing delalloc:
+ *
+ * shrink_delalloc() -> btrfs_start_delalloc_roots() ->
+ * start_delalloc_inodes() -> btrfs_add_delayed_iput()
+ */
+ btrfs_run_delayed_iputs(fs_info);
+
+ /* There should be no more workload to generate new delayed iputs. */
+ set_bit(BTRFS_FS_STATE_NO_DELAYED_IPUT, &fs_info->fs_state);
/* Cancel or finish ongoing discard work */
btrfs_discard_cleanup(fs_info);
@@ -4382,9 +4358,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
btrfs_err(fs_info, "commit super ret %d", ret);
}
- if (BTRFS_FS_ERROR(fs_info))
- btrfs_error_commit_super(fs_info);
-
kthread_stop(fs_info->transaction_kthread);
kthread_stop(fs_info->cleaner_kthread);
@@ -4392,7 +4365,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
if (btrfs_check_quota_leak(fs_info)) {
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ DEBUG_WARN("qgroup reserved space leaked");
btrfs_err(fs_info, "qgroup reserved space leaked");
}
@@ -4421,7 +4394,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
btrfs_stop_all_workers(fs_info);
/* We shouldn't have any transaction open at this point */
- ASSERT(list_empty(&fs_info->trans_list));
+ warn_about_uncommitted_trans(fs_info);
clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
free_root_pointers(fs_info, true);
@@ -4438,37 +4411,14 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
iput(fs_info->btree_inode);
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
- btrfsic_unmount(fs_info->fs_devices);
-#endif
-
- btrfs_mapping_tree_free(&fs_info->mapping_tree);
- btrfs_close_devices(fs_info->fs_devices);
-}
-
-int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
- int atomic)
-{
- int ret;
- struct inode *btree_inode = buf->pages[0]->mapping->host;
-
- ret = extent_buffer_uptodate(buf);
- if (!ret)
- return ret;
-
- ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
- parent_transid, atomic);
- if (ret == -EAGAIN)
- return ret;
- return !ret;
+ btrfs_mapping_tree_free(fs_info);
}
-void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
+void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
+ struct extent_buffer *buf)
{
struct btrfs_fs_info *fs_info = buf->fs_info;
u64 transid = btrfs_header_generation(buf);
- int was_dirty;
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/*
@@ -4479,27 +4429,16 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
return;
#endif
+ /* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
+ ASSERT(trans->transid == fs_info->generation);
btrfs_assert_tree_write_locked(buf);
- if (transid != fs_info->generation)
- WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
- buf->start, transid, fs_info->generation);
- was_dirty = set_extent_buffer_dirty(buf);
- if (!was_dirty)
- percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
- buf->len,
- fs_info->dirty_metadata_batch);
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- /*
- * Since btrfs_mark_buffer_dirty() can be called with item pointer set
- * but item data not updated.
- * So here we should only check item pointers, not item data.
- */
- if (btrfs_header_level(buf) == 0 &&
- btrfs_check_leaf_relaxed(buf)) {
- btrfs_print_leaf(buf);
- ASSERT(0);
+ if (unlikely(transid != fs_info->generation)) {
+ btrfs_abort_transaction(trans, -EUCLEAN);
+ btrfs_crit(fs_info,
+"dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu",
+ buf->start, transid, fs_info->generation);
}
-#endif
+ set_extent_buffer_dirty(buf);
}
static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
@@ -4535,22 +4474,11 @@ void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
__btrfs_btree_balance_dirty(fs_info, 0);
}
-int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
- struct btrfs_key *first_key)
-{
- return btree_read_extent_buffer_pages(buf, parent_transid,
- level, first_key);
-}
-
static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
{
/* cleanup FS via transaction */
btrfs_cleanup_transaction(fs_info);
- mutex_lock(&fs_info->cleaner_mutex);
- btrfs_run_delayed_iputs(fs_info);
- mutex_unlock(&fs_info->cleaner_mutex);
-
down_write(&fs_info->cleanup_work_sem);
up_write(&fs_info->cleanup_work_sem);
}
@@ -4574,7 +4502,7 @@ static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
for (i = 0; i < ret; i++) {
if (!gang[i])
continue;
- root_objectid = gang[i]->root_key.objectid;
+ root_objectid = btrfs_root_id(gang[i]);
btrfs_free_log(NULL, gang[i]);
btrfs_put_root(gang[i]);
}
@@ -4603,9 +4531,7 @@ static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
- struct list_head splice;
-
- INIT_LIST_HEAD(&splice);
+ LIST_HEAD(splice);
spin_lock(&fs_info->ordered_root_lock);
list_splice_init(&fs_info->ordered_roots, &splice);
@@ -4629,95 +4555,13 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
* extents that haven't had their dirty pages IO start writeout yet
* actually get run and error out properly.
*/
- btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
-}
-
-static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
- struct btrfs_fs_info *fs_info)
-{
- struct rb_node *node;
- struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_delayed_ref_node *ref;
- int ret = 0;
-
- delayed_refs = &trans->delayed_refs;
-
- spin_lock(&delayed_refs->lock);
- if (atomic_read(&delayed_refs->num_entries) == 0) {
- spin_unlock(&delayed_refs->lock);
- btrfs_debug(fs_info, "delayed_refs has NO entry");
- return ret;
- }
-
- while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
- struct btrfs_delayed_ref_head *head;
- struct rb_node *n;
- bool pin_bytes = false;
-
- head = rb_entry(node, struct btrfs_delayed_ref_head,
- href_node);
- if (btrfs_delayed_ref_lock(delayed_refs, head))
- continue;
-
- spin_lock(&head->lock);
- while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
- ref = rb_entry(n, struct btrfs_delayed_ref_node,
- ref_node);
- ref->in_tree = 0;
- rb_erase_cached(&ref->ref_node, &head->ref_tree);
- RB_CLEAR_NODE(&ref->ref_node);
- if (!list_empty(&ref->add_list))
- list_del(&ref->add_list);
- atomic_dec(&delayed_refs->num_entries);
- btrfs_put_delayed_ref(ref);
- }
- if (head->must_insert_reserved)
- pin_bytes = true;
- btrfs_free_delayed_extent_op(head->extent_op);
- btrfs_delete_ref_head(delayed_refs, head);
- spin_unlock(&head->lock);
- spin_unlock(&delayed_refs->lock);
- mutex_unlock(&head->mutex);
-
- if (pin_bytes) {
- struct btrfs_block_group *cache;
-
- cache = btrfs_lookup_block_group(fs_info, head->bytenr);
- BUG_ON(!cache);
-
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
- cache->pinned += head->num_bytes;
- btrfs_space_info_update_bytes_pinned(fs_info,
- cache->space_info, head->num_bytes);
- cache->reserved -= head->num_bytes;
- cache->space_info->bytes_reserved -= head->num_bytes;
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
-
- btrfs_put_block_group(cache);
-
- btrfs_error_unpin_extent_range(fs_info, head->bytenr,
- head->bytenr + head->num_bytes - 1);
- }
- btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
- btrfs_put_delayed_ref_head(head);
- cond_resched();
- spin_lock(&delayed_refs->lock);
- }
- btrfs_qgroup_destroy_extent_records(trans);
-
- spin_unlock(&delayed_refs->lock);
-
- return ret;
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
}
static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
{
struct btrfs_inode *btrfs_inode;
- struct list_head splice;
-
- INIT_LIST_HEAD(&splice);
+ LIST_HEAD(splice);
spin_lock(&root->delalloc_lock);
list_splice_init(&root->delalloc_inodes, &splice);
@@ -4726,7 +4570,7 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
struct inode *inode = NULL;
btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
delalloc_inodes);
- __btrfs_del_delalloc_inode(root, btrfs_inode);
+ btrfs_del_delalloc_inode(btrfs_inode);
spin_unlock(&root->delalloc_lock);
/*
@@ -4735,7 +4579,11 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
*/
inode = igrab(&btrfs_inode->vfs_inode);
if (inode) {
+ unsigned int nofs_flag;
+
+ nofs_flag = memalloc_nofs_save();
invalidate_inode_pages2(inode->i_mapping);
+ memalloc_nofs_restore(nofs_flag);
iput(inode);
}
spin_lock(&root->delalloc_lock);
@@ -4746,9 +4594,7 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
- struct list_head splice;
-
- INIT_LIST_HEAD(&splice);
+ LIST_HEAD(splice);
spin_lock(&fs_info->delalloc_root_lock);
list_splice_init(&fs_info->delalloc_roots, &splice);
@@ -4767,45 +4613,38 @@ static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
spin_unlock(&fs_info->delalloc_root_lock);
}
-static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *dirty_pages,
- int mark)
+static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *dirty_pages,
+ int mark)
{
- int ret;
struct extent_buffer *eb;
u64 start = 0;
u64 end;
- while (1) {
- ret = find_first_extent_bit(dirty_pages, start, &start, &end,
- mark, NULL);
- if (ret)
- break;
-
- clear_extent_bits(dirty_pages, start, end, mark);
+ while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end,
+ mark, NULL)) {
+ btrfs_clear_extent_bit(dirty_pages, start, end, mark, NULL);
while (start <= end) {
eb = find_extent_buffer(fs_info, start);
start += fs_info->nodesize;
if (!eb)
continue;
+
+ btrfs_tree_lock(eb);
wait_on_extent_buffer_writeback(eb);
+ btrfs_clear_buffer_dirty(NULL, eb);
+ btrfs_tree_unlock(eb);
- if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
- &eb->bflags))
- clear_extent_buffer_dirty(eb);
free_extent_buffer_stale(eb);
}
}
-
- return ret;
}
-static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *unpin)
+static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *unpin)
{
u64 start;
u64 end;
- int ret;
while (1) {
struct extent_state *cached_state = NULL;
@@ -4817,21 +4656,18 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
* the same extent range.
*/
mutex_lock(&fs_info->unused_bg_unpin_mutex);
- ret = find_first_extent_bit(unpin, 0, &start, &end,
- EXTENT_DIRTY, &cached_state);
- if (ret) {
+ if (!btrfs_find_first_extent_bit(unpin, 0, &start, &end,
+ EXTENT_DIRTY, &cached_state)) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
}
- clear_extent_dirty(unpin, start, end, &cached_state);
- free_extent_state(cached_state);
+ btrfs_clear_extent_dirty(unpin, start, end, &cached_state);
+ btrfs_free_extent_state(cached_state);
btrfs_error_unpin_extent_range(fs_info, start, end);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
}
-
- return 0;
}
static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
@@ -4840,7 +4676,12 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
inode = cache->io_ctl.inode;
if (inode) {
+ unsigned int nofs_flag;
+
+ nofs_flag = memalloc_nofs_save();
invalidate_inode_pages2(inode->i_mapping);
+ memalloc_nofs_restore(nofs_flag);
+
BTRFS_I(inode)->generation = 0;
cache->io_ctl.inode = NULL;
iput(inode);
@@ -4874,7 +4715,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
spin_unlock(&cur_trans->dirty_bgs_lock);
btrfs_put_block_group(cache);
- btrfs_delayed_refs_rsv_release(fs_info, 1);
+ btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
@@ -4896,9 +4737,35 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
}
}
-void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
- struct btrfs_fs_info *fs_info)
+static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_root *gang[8];
+ int i;
+ int ret;
+
+ spin_lock(&fs_info->fs_roots_radix_lock);
+ while (1) {
+ ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
+ (void **)gang, 0,
+ ARRAY_SIZE(gang),
+ BTRFS_ROOT_TRANS_TAG);
+ if (ret == 0)
+ break;
+ for (i = 0; i < ret; i++) {
+ struct btrfs_root *root = gang[i];
+
+ btrfs_qgroup_free_meta_all_pertrans(root);
+ radix_tree_tag_clear(&fs_info->fs_roots_radix,
+ (unsigned long)btrfs_root_id(root),
+ BTRFS_ROOT_TRANS_TAG);
+ }
+ }
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+}
+
+void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans)
{
+ struct btrfs_fs_info *fs_info = cur_trans->fs_info;
struct btrfs_device *dev, *tmp;
btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
@@ -4910,7 +4777,7 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
list_del_init(&dev->post_commit_list);
}
- btrfs_destroy_delayed_refs(cur_trans, fs_info);
+ btrfs_destroy_delayed_refs(cur_trans);
cur_trans->state = TRANS_STATE_COMMIT_START;
wake_up(&fs_info->transaction_blocked_wait);
@@ -4918,14 +4785,10 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
cur_trans->state = TRANS_STATE_UNBLOCKED;
wake_up(&fs_info->transaction_wait);
- btrfs_destroy_delayed_inodes(fs_info);
-
btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
EXTENT_DIRTY);
btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
- btrfs_free_redirty_list(cur_trans);
-
cur_trans->state =TRANS_STATE_COMPLETED;
wake_up(&cur_trans->commit_wait);
}
@@ -4940,7 +4803,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
while (!list_empty(&fs_info->trans_list)) {
t = list_first_entry(&fs_info->trans_list,
struct btrfs_transaction, list);
- if (t->state >= TRANS_STATE_COMMIT_START) {
+ if (t->state >= TRANS_STATE_COMMIT_PREP) {
refcount_inc(&t->use_count);
spin_unlock(&fs_info->trans_lock);
btrfs_wait_for_commit(fs_info, t->transid);
@@ -4960,7 +4823,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
} else {
spin_unlock(&fs_info->trans_lock);
}
- btrfs_cleanup_one_transaction(t, fs_info);
+ btrfs_cleanup_one_transaction(t);
spin_lock(&fs_info->trans_lock);
if (t == fs_info->running_transaction)
@@ -4969,7 +4832,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
spin_unlock(&fs_info->trans_lock);
btrfs_put_transaction(t);
- trace_btrfs_transaction_commit(fs_info->tree_root);
+ trace_btrfs_transaction_commit(fs_info);
spin_lock(&fs_info->trans_lock);
}
spin_unlock(&fs_info->trans_lock);
@@ -4978,6 +4841,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
btrfs_assert_delayed_root_empty(fs_info);
btrfs_destroy_all_delalloc_inodes(fs_info);
btrfs_drop_all_logs(fs_info);
+ btrfs_free_all_qgroup_pertrans(fs_info);
mutex_unlock(&fs_info->transaction_kthread_mutex);
return 0;
@@ -4985,7 +4849,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
int btrfs_init_root_free_objectid(struct btrfs_root *root)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
struct extent_buffer *l;
struct btrfs_key search_key;
@@ -5001,8 +4865,14 @@ int btrfs_init_root_free_objectid(struct btrfs_root *root)
search_key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
if (ret < 0)
- goto error;
- BUG_ON(ret == 0); /* Corruption */
+ return ret;
+ if (unlikely(ret == 0)) {
+ /*
+ * Key with offset -1 found, there would have to exist a root
+ * with such id, but this is out of valid range.
+ */
+ return -EUCLEAN;
+ }
if (path->slots[0] > 0) {
slot = path->slots[0] - 1;
l = path->nodes[0];
@@ -5012,10 +4882,8 @@ int btrfs_init_root_free_objectid(struct btrfs_root *root)
} else {
root->free_objectid = BTRFS_FIRST_FREE_OBJECTID;
}
- ret = 0;
-error:
- btrfs_free_path(path);
- return ret;
+
+ return 0;
}
int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
@@ -5026,7 +4894,7 @@ int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
btrfs_warn(root->fs_info,
"the objectid of root %llu reaches its highest value",
- root->root_key.objectid);
+ btrfs_root_id(root));
ret = -ENOSPC;
goto out;
}
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index a2b5db4ba262..5320da83d0cf 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -6,6 +6,23 @@
#ifndef BTRFS_DISK_IO_H
#define BTRFS_DISK_IO_H
+#include <linux/sizes.h>
+#include <linux/compiler_types.h>
+#include "ctree.h"
+#include "bio.h"
+#include "ordered-data.h"
+
+struct block_device;
+struct super_block;
+struct extent_buffer;
+struct btrfs_device;
+struct btrfs_fs_devices;
+struct btrfs_fs_info;
+struct btrfs_super_block;
+struct btrfs_trans_handle;
+struct btrfs_tree_parent_check;
+struct btrfs_transaction;
+
#define BTRFS_SUPER_MIRROR_MAX 3
#define BTRFS_SUPER_MIRROR_SHIFT 12
@@ -17,13 +34,6 @@
*/
#define BTRFS_BDEV_BLOCKSIZE (4096)
-enum btrfs_wq_endio_type {
- BTRFS_WQ_ENDIO_DATA,
- BTRFS_WQ_ENDIO_METADATA,
- BTRFS_WQ_ENDIO_FREE_SPACE,
- BTRFS_WQ_ENDIO_RAID56,
-};
-
static inline u64 btrfs_sb_offset(int mirror)
{
u64 start = SZ_16K;
@@ -32,34 +42,26 @@ static inline u64 btrfs_sb_offset(int mirror)
return BTRFS_SUPER_INFO_OFFSET;
}
-struct btrfs_device;
-struct btrfs_fs_devices;
-
-void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info);
+void btrfs_check_leaked_roots(const struct btrfs_fs_info *fs_info);
void btrfs_init_fs_info(struct btrfs_fs_info *fs_info);
-int btrfs_verify_level_key(struct extent_buffer *eb, int level,
- struct btrfs_key *first_key, u64 parent_transid);
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 owner_root, u64 parent_transid,
- int level, struct btrfs_key *first_key);
+ struct btrfs_tree_parent_check *check);
struct extent_buffer *btrfs_find_create_tree_block(
struct btrfs_fs_info *fs_info,
u64 bytenr, u64 owner_root,
int level);
-void btrfs_clean_tree_block(struct extent_buffer *buf);
-void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info);
int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info);
-int __cold open_ctree(struct super_block *sb,
- struct btrfs_fs_devices *fs_devices,
- char *options);
+int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
+ const struct btrfs_super_block *disk_sb);
+int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices);
void __cold close_ctree(struct btrfs_fs_info *fs_info);
+int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_super_block *sb, int mirror_num);
+int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount);
int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors);
-struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev);
-struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
- int copy_num);
int btrfs_commit_super(struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
- struct btrfs_key *key);
+ const struct btrfs_key *key);
int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
@@ -67,22 +69,24 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
u64 objectid, bool check_ref);
struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
- u64 objectid, dev_t anon_dev);
+ u64 objectid, dev_t *anon_dev);
struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 objectid);
+int btrfs_global_root_insert(struct btrfs_root *root);
+void btrfs_global_root_delete(struct btrfs_root *root);
+struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info,
+ struct btrfs_key *key);
+struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr);
+struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_free_fs_info(struct btrfs_fs_info *fs_info);
-int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info);
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
-int btrfs_validate_metadata_buffer(struct btrfs_bio *bbio,
- struct page *page, u64 start, u64 end,
- int mirror);
-blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
- int mirror_num, unsigned long bio_flags);
+int btrfs_validate_extent_buffer(struct extent_buffer *eb,
+ const struct btrfs_tree_parent_check *check);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info);
#endif
@@ -90,9 +94,6 @@ struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info);
/*
* This function is used to grab the root, and avoid it is freed when we
* access it. But it doesn't ensure that the tree is not dropped.
- *
- * If you want to ensure the whole tree is safe, you should use
- * fs_info->subvol_srcu
*/
static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root)
{
@@ -104,19 +105,13 @@ static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root)
}
void btrfs_put_root(struct btrfs_root *root);
-void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
-int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
- int atomic);
-int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
- struct btrfs_key *first_key);
-blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
- enum btrfs_wq_endio_type metadata);
-blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
- int mirror_num, unsigned long bio_flags,
- u64 dio_file_offset,
- extent_submit_bio_start_t *submit_bio_start);
-blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
- int mirror_num);
+void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
+ struct extent_buffer *buf);
+int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, bool atomic);
+int btrfs_read_extent_buffer(struct extent_buffer *buf,
+ const struct btrfs_tree_parent_check *check);
+
+int btree_csum_one_bio(struct btrfs_bio *bbio);
int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
@@ -125,26 +120,11 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *trans,
struct btrfs_fs_info *fs_info);
-void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
- struct btrfs_fs_info *fs_info);
+void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans);
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
u64 objectid);
-int btree_lock_page_hook(struct page *page, void *data,
- void (*flush_fn)(void *));
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid);
int btrfs_init_root_free_objectid(struct btrfs_root *root);
-int __init btrfs_end_io_wq_init(void);
-void __cold btrfs_end_io_wq_exit(void);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void btrfs_set_buffer_lockdep_class(u64 objectid,
- struct extent_buffer *eb, int level);
-#else
-static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
- struct extent_buffer *eb, int level)
-{
-}
-#endif
#endif
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 1d4c2397d0d6..230d9326b685 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -5,8 +5,9 @@
#include "ctree.h"
#include "disk-io.h"
#include "btrfs_inode.h"
-#include "print-tree.h"
#include "export.h"
+#include "accessors.h"
+#include "super.h"
#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, \
parent_objectid) / 4)
@@ -22,7 +23,11 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
int type;
if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
- *max_len = BTRFS_FID_SIZE_CONNECTABLE;
+ if (btrfs_root_id(BTRFS_I(inode)->root) !=
+ btrfs_root_id(BTRFS_I(parent)->root))
+ *max_len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
+ else
+ *max_len = BTRFS_FID_SIZE_CONNECTABLE;
return FILEID_INVALID;
} else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
*max_len = BTRFS_FID_SIZE_NON_CONNECTABLE;
@@ -33,17 +38,19 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
type = FILEID_BTRFS_WITHOUT_PARENT;
fid->objectid = btrfs_ino(BTRFS_I(inode));
- fid->root_objectid = BTRFS_I(inode)->root->root_key.objectid;
+ fid->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
fid->gen = inode->i_generation;
if (parent) {
u64 parent_root_id;
- fid->parent_objectid = BTRFS_I(parent)->location.objectid;
+ fid->parent_objectid = btrfs_ino(BTRFS_I(parent));
fid->parent_gen = parent->i_generation;
- parent_root_id = BTRFS_I(parent)->root->root_key.objectid;
+ parent_root_id = btrfs_root_id(BTRFS_I(parent)->root);
if (parent_root_id != fid->root_objectid) {
+ if (*max_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT)
+ return FILEID_INVALID;
fid->parent_root_objectid = parent_root_id;
len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
type = FILEID_BTRFS_WITH_PARENT_ROOT;
@@ -57,13 +64,24 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
return type;
}
+/*
+ * Read dentry of inode with @objectid from filesystem root @root_objectid.
+ *
+ * @sb: the filesystem super block
+ * @objectid: inode objectid
+ * @root_objectid: object id of the subvolume root where to look up the inode
+ * @generation: optional, if not zero, verify that the found inode
+ * generation matches
+ *
+ * Return dentry alias for the inode, otherwise an error. In case the
+ * generation does not match return ESTALE.
+ */
struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
- u64 root_objectid, u32 generation,
- int check_generation)
+ u64 root_objectid, u64 generation)
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *root;
- struct inode *inode;
+ struct btrfs_inode *inode;
if (objectid < BTRFS_FIRST_FREE_OBJECTID)
return ERR_PTR(-ESTALE);
@@ -72,17 +90,17 @@ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
if (IS_ERR(root))
return ERR_CAST(root);
- inode = btrfs_iget(sb, objectid, root);
+ inode = btrfs_iget(objectid, root);
btrfs_put_root(root);
if (IS_ERR(inode))
return ERR_CAST(inode);
- if (check_generation && generation != inode->i_generation) {
- iput(inode);
+ if (generation != 0 && generation != inode->vfs_inode.i_generation) {
+ iput(&inode->vfs_inode);
return ERR_PTR(-ESTALE);
}
- return d_obtain_alias(inode);
+ return d_obtain_alias(&inode->vfs_inode);
}
static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh,
@@ -106,7 +124,7 @@ static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh,
objectid = fid->parent_objectid;
generation = fid->parent_gen;
- return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
+ return btrfs_get_dentry(sb, objectid, root_objectid, generation);
}
static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
@@ -128,14 +146,15 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
root_objectid = fid->root_objectid;
generation = fid->gen;
- return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
+ return btrfs_get_dentry(sb, objectid, root_objectid, generation);
}
struct dentry *btrfs_get_parent(struct dentry *child)
{
- struct inode *dir = d_inode(child);
- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
- struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_inode *dir = BTRFS_I(d_inode(child));
+ struct btrfs_inode *inode;
+ struct btrfs_root *root = dir->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_root_ref *ref;
@@ -147,13 +166,13 @@ struct dentry *btrfs_get_parent(struct dentry *child)
if (!path)
return ERR_PTR(-ENOMEM);
- if (btrfs_ino(BTRFS_I(dir)) == BTRFS_FIRST_FREE_OBJECTID) {
- key.objectid = root->root_key.objectid;
+ if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) {
+ key.objectid = btrfs_root_id(root);
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = fs_info->tree_root;
} else {
- key.objectid = btrfs_ino(BTRFS_I(dir));
+ key.objectid = btrfs_ino(dir);
key.type = BTRFS_INODE_REF_KEY;
key.offset = (u64)-1;
}
@@ -161,8 +180,15 @@ struct dentry *btrfs_get_parent(struct dentry *child)
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto fail;
+ if (unlikely(ret == 0)) {
+ /*
+ * Key with offset of -1 found, there would have to exist an
+ * inode with such number or a root with such id.
+ */
+ ret = -EUCLEAN;
+ goto fail;
+ }
- BUG_ON(ret == 0); /* Key with offset of -1 found */
if (path->slots[0] == 0) {
ret = -ENOENT;
goto fail;
@@ -188,10 +214,14 @@ struct dentry *btrfs_get_parent(struct dentry *child)
if (found_key.type == BTRFS_ROOT_BACKREF_KEY) {
return btrfs_get_dentry(fs_info->sb, key.objectid,
- found_key.offset, 0, 0);
+ found_key.offset, 0);
}
- return d_obtain_alias(btrfs_iget(fs_info->sb, key.objectid, root));
+ inode = btrfs_iget(key.objectid, root);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ return d_obtain_alias(&inode->vfs_inode);
fail:
btrfs_free_path(path);
return ERR_PTR(ret);
@@ -200,11 +230,11 @@ fail:
static int btrfs_get_name(struct dentry *parent, char *name,
struct dentry *child)
{
- struct inode *inode = d_inode(child);
- struct inode *dir = d_inode(parent);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_path *path;
- struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_inode *inode = BTRFS_I(d_inode(child));
+ struct btrfs_inode *dir = BTRFS_I(d_inode(parent));
+ struct btrfs_root *root = dir->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_inode_ref *iref;
struct btrfs_root_ref *rref;
struct extent_buffer *leaf;
@@ -214,37 +244,34 @@ static int btrfs_get_name(struct dentry *parent, char *name,
int ret;
u64 ino;
- if (!S_ISDIR(dir->i_mode))
+ if (!S_ISDIR(dir->vfs_inode.i_mode))
return -EINVAL;
- ino = btrfs_ino(BTRFS_I(inode));
+ ino = btrfs_ino(inode);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
if (ino == BTRFS_FIRST_FREE_OBJECTID) {
- key.objectid = BTRFS_I(inode)->root->root_key.objectid;
+ key.objectid = btrfs_root_id(inode->root);
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = fs_info->tree_root;
} else {
key.objectid = ino;
- key.offset = btrfs_ino(BTRFS_I(dir));
key.type = BTRFS_INODE_REF_KEY;
+ key.offset = btrfs_ino(dir);
}
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
- btrfs_free_path(path);
return ret;
} else if (ret > 0) {
- if (ino == BTRFS_FIRST_FREE_OBJECTID) {
+ if (ino == BTRFS_FIRST_FREE_OBJECTID)
path->slots[0]--;
- } else {
- btrfs_free_path(path);
+ else
return -ENOENT;
- }
}
leaf = path->nodes[0];
@@ -261,7 +288,6 @@ static int btrfs_get_name(struct dentry *parent, char *name,
}
read_extent_buffer(leaf, name, name_ptr, name_len);
- btrfs_free_path(path);
/*
* have to add the null termination to make sure that reconnect_path
diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
index f32f4113c976..464582273af9 100644
--- a/fs/btrfs/export.h
+++ b/fs/btrfs/export.h
@@ -4,6 +4,10 @@
#define BTRFS_EXPORT_H
#include <linux/exportfs.h>
+#include <linux/types.h>
+
+struct dentry;
+struct super_block;
extern const struct export_operations btrfs_export_ops;
@@ -19,8 +23,7 @@ struct btrfs_fid {
} __attribute__ ((packed));
struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
- u64 root_objectid, u32 generation,
- int check_generation);
+ u64 root_objectid, u64 generation);
struct dentry *btrfs_get_parent(struct dentry *child);
#endif
diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
new file mode 100644
index 000000000000..bb2ca1c9c7b0
--- /dev/null
+++ b/fs/btrfs/extent-io-tree.c
@@ -0,0 +1,1959 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/slab.h>
+#include <trace/events/btrfs.h>
+#include "messages.h"
+#include "ctree.h"
+#include "extent_io.h"
+#include "extent-io-tree.h"
+#include "btrfs_inode.h"
+
+static struct kmem_cache *extent_state_cache;
+
+static inline bool extent_state_in_tree(const struct extent_state *state)
+{
+ return !RB_EMPTY_NODE(&state->rb_node);
+}
+
+#ifdef CONFIG_BTRFS_DEBUG
+static LIST_HEAD(states);
+static DEFINE_SPINLOCK(leak_lock);
+
+static inline void btrfs_leak_debug_add_state(struct extent_state *state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&leak_lock, flags);
+ list_add(&state->leak_list, &states);
+ spin_unlock_irqrestore(&leak_lock, flags);
+}
+
+static inline void btrfs_leak_debug_del_state(struct extent_state *state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&leak_lock, flags);
+ list_del(&state->leak_list);
+ spin_unlock_irqrestore(&leak_lock, flags);
+}
+
+static inline void btrfs_extent_state_leak_debug_check(void)
+{
+ struct extent_state *state;
+
+ while (!list_empty(&states)) {
+ state = list_first_entry(&states, struct extent_state, leak_list);
+ btrfs_err(NULL,
+ "state leak: start %llu end %llu state %u in tree %d refs %d",
+ state->start, state->end, state->state,
+ extent_state_in_tree(state),
+ refcount_read(&state->refs));
+ list_del(&state->leak_list);
+ WARN_ON_ONCE(1);
+ kmem_cache_free(extent_state_cache, state);
+ }
+}
+
+#define btrfs_debug_check_extent_io_range(tree, start, end) \
+ __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
+static inline void __btrfs_debug_check_extent_io_range(const char *caller,
+ struct extent_io_tree *tree,
+ u64 start, u64 end)
+{
+ const struct btrfs_inode *inode = tree->inode;
+ u64 isize;
+
+ if (tree->owner != IO_TREE_INODE_IO)
+ return;
+
+ isize = i_size_read(&inode->vfs_inode);
+ if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
+ btrfs_debug_rl(inode->root->fs_info,
+ "%s: ino %llu isize %llu odd range [%llu,%llu]",
+ caller, btrfs_ino(inode), isize, start, end);
+ }
+}
+#else
+#define btrfs_leak_debug_add_state(state) do {} while (0)
+#define btrfs_leak_debug_del_state(state) do {} while (0)
+#define btrfs_extent_state_leak_debug_check() do {} while (0)
+#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
+#endif
+
+/* Read-only access to the inode. */
+const struct btrfs_inode *btrfs_extent_io_tree_to_inode(const struct extent_io_tree *tree)
+{
+ if (tree->owner == IO_TREE_INODE_IO)
+ return tree->inode;
+ return NULL;
+}
+
+/* For read-only access to fs_info. */
+const struct btrfs_fs_info *btrfs_extent_io_tree_to_fs_info(const struct extent_io_tree *tree)
+{
+ if (tree->owner == IO_TREE_INODE_IO)
+ return tree->inode->root->fs_info;
+ return tree->fs_info;
+}
+
+void btrfs_extent_io_tree_init(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *tree, unsigned int owner)
+{
+ tree->state = RB_ROOT;
+ spin_lock_init(&tree->lock);
+ tree->fs_info = fs_info;
+ tree->owner = owner;
+}
+
+/*
+ * Empty an io tree, removing and freeing every extent state record from the
+ * tree. This should be called once we are sure no other task can access the
+ * tree anymore, so no tree updates happen after we empty the tree and there
+ * aren't any waiters on any extent state record (EXTENT_LOCK_BITS are never
+ * set on any extent state when calling this function).
+ */
+void btrfs_extent_io_tree_release(struct extent_io_tree *tree)
+{
+ struct rb_root root;
+ struct extent_state *state;
+ struct extent_state *tmp;
+
+ spin_lock(&tree->lock);
+ root = tree->state;
+ tree->state = RB_ROOT;
+ rbtree_postorder_for_each_entry_safe(state, tmp, &root, rb_node) {
+ /* Clear node to keep free_extent_state() happy. */
+ RB_CLEAR_NODE(&state->rb_node);
+ ASSERT(!(state->state & EXTENT_LOCK_BITS));
+ /*
+ * No need for a memory barrier here, as we are holding the tree
+ * lock and we only change the waitqueue while holding that lock
+ * (see wait_extent_bit()).
+ */
+ ASSERT(!waitqueue_active(&state->wq));
+ btrfs_free_extent_state(state);
+ cond_resched_lock(&tree->lock);
+ }
+ /*
+ * Should still be empty even after a reschedule, no other task should
+ * be accessing the tree anymore.
+ */
+ ASSERT(RB_EMPTY_ROOT(&tree->state));
+ spin_unlock(&tree->lock);
+}
+
+static struct extent_state *alloc_extent_state(gfp_t mask)
+{
+ struct extent_state *state;
+
+ /*
+ * The given mask might be not appropriate for the slab allocator,
+ * drop the unsupported bits
+ */
+ mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
+ state = kmem_cache_alloc(extent_state_cache, mask);
+ if (!state)
+ return state;
+ state->state = 0;
+ RB_CLEAR_NODE(&state->rb_node);
+ btrfs_leak_debug_add_state(state);
+ refcount_set(&state->refs, 1);
+ init_waitqueue_head(&state->wq);
+ trace_btrfs_alloc_extent_state(state, mask, _RET_IP_);
+ return state;
+}
+
+static struct extent_state *alloc_extent_state_atomic(struct extent_state *prealloc)
+{
+ if (!prealloc)
+ prealloc = alloc_extent_state(GFP_ATOMIC);
+
+ return prealloc;
+}
+
+void btrfs_free_extent_state(struct extent_state *state)
+{
+ if (!state)
+ return;
+ if (refcount_dec_and_test(&state->refs)) {
+ WARN_ON(extent_state_in_tree(state));
+ btrfs_leak_debug_del_state(state);
+ trace_btrfs_free_extent_state(state, _RET_IP_);
+ kmem_cache_free(extent_state_cache, state);
+ }
+}
+
+static int add_extent_changeset(struct extent_state *state, u32 bits,
+ struct extent_changeset *changeset,
+ int set)
+{
+ int ret;
+
+ if (!changeset)
+ return 0;
+ if (set && (state->state & bits) == bits)
+ return 0;
+ if (!set && (state->state & bits) == 0)
+ return 0;
+ changeset->bytes_changed += state->end - state->start + 1;
+ ret = ulist_add(&changeset->range_changed, state->start, state->end,
+ GFP_ATOMIC);
+ return ret;
+}
+
+static inline struct extent_state *next_state(struct extent_state *state)
+{
+ struct rb_node *next = rb_next(&state->rb_node);
+
+ return rb_entry_safe(next, struct extent_state, rb_node);
+}
+
+static inline struct extent_state *prev_state(struct extent_state *state)
+{
+ struct rb_node *next = rb_prev(&state->rb_node);
+
+ return rb_entry_safe(next, struct extent_state, rb_node);
+}
+
+/*
+ * Search @tree for an entry that contains @offset or if none exists for the
+ * first entry that starts and ends after that offset.
+ *
+ * @tree: the tree to search
+ * @offset: search offset
+ * @node_ret: pointer where new node should be anchored (used when inserting an
+ * entry in the tree)
+ * @parent_ret: points to entry which would have been the parent of the entry,
+ * containing @offset
+ *
+ * Return a pointer to the entry that contains @offset byte address.
+ *
+ * If no such entry exists, return the first entry that starts and ends after
+ * @offset if one exists, otherwise NULL.
+ *
+ * If the returned entry starts at @offset, then @node_ret and @parent_ret
+ * aren't changed.
+ */
+static inline struct extent_state *tree_search_for_insert(struct extent_io_tree *tree,
+ u64 offset,
+ struct rb_node ***node_ret,
+ struct rb_node **parent_ret)
+{
+ struct rb_root *root = &tree->state;
+ struct rb_node **node = &root->rb_node;
+ struct rb_node *prev = NULL;
+ struct extent_state *entry = NULL;
+
+ while (*node) {
+ prev = *node;
+ entry = rb_entry(prev, struct extent_state, rb_node);
+
+ if (offset < entry->start)
+ node = &(*node)->rb_left;
+ else if (offset > entry->end)
+ node = &(*node)->rb_right;
+ else
+ return entry;
+ }
+
+ if (node_ret)
+ *node_ret = node;
+ if (parent_ret)
+ *parent_ret = prev;
+
+ /*
+ * Return either the current entry if it contains offset (it ends after
+ * or at offset) or the first entry that starts and ends after offset if
+ * one exists, or NULL.
+ */
+ while (entry && offset > entry->end)
+ entry = next_state(entry);
+
+ return entry;
+}
+
+/*
+ * Search offset in the tree or fill neighbor rbtree node pointers.
+ *
+ * @tree: the tree to search
+ * @offset: offset that should fall within an entry in @tree
+ * @next_ret: pointer to the first entry whose range ends after @offset
+ * @prev_ret: pointer to the first entry whose range begins before @offset
+ *
+ * Return a pointer to the entry that contains @offset byte address. If no
+ * such entry exists, then return NULL and fill @prev_ret and @next_ret.
+ * Otherwise return the found entry and other pointers are left untouched.
+ */
+static struct extent_state *tree_search_prev_next(struct extent_io_tree *tree,
+ u64 offset,
+ struct extent_state **prev_ret,
+ struct extent_state **next_ret)
+{
+ struct rb_root *root = &tree->state;
+ struct rb_node **node = &root->rb_node;
+ struct extent_state *orig_prev;
+ struct extent_state *entry = NULL;
+
+ ASSERT(prev_ret);
+ ASSERT(next_ret);
+
+ while (*node) {
+ entry = rb_entry(*node, struct extent_state, rb_node);
+
+ if (offset < entry->start)
+ node = &(*node)->rb_left;
+ else if (offset > entry->end)
+ node = &(*node)->rb_right;
+ else
+ return entry;
+ }
+
+ orig_prev = entry;
+ while (entry && offset > entry->end)
+ entry = next_state(entry);
+ *next_ret = entry;
+ entry = orig_prev;
+
+ while (entry && offset < entry->start)
+ entry = prev_state(entry);
+ *prev_ret = entry;
+
+ return NULL;
+}
+
+/*
+ * Inexact rb-tree search, return the next entry if @offset is not found
+ */
+static inline struct extent_state *tree_search(struct extent_io_tree *tree, u64 offset)
+{
+ return tree_search_for_insert(tree, offset, NULL, NULL);
+}
+
+static void __cold extent_io_tree_panic(const struct extent_io_tree *tree,
+ const struct extent_state *state,
+ const char *opname,
+ int err)
+{
+ btrfs_panic(btrfs_extent_io_tree_to_fs_info(tree), err,
+ "extent io tree error on %s state start %llu end %llu",
+ opname, state->start, state->end);
+}
+
+static void merge_prev_state(struct extent_io_tree *tree, struct extent_state *state)
+{
+ struct extent_state *prev;
+
+ prev = prev_state(state);
+ if (prev && prev->end == state->start - 1 && prev->state == state->state) {
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_merge_delalloc_extent(tree->inode, state, prev);
+ state->start = prev->start;
+ rb_erase(&prev->rb_node, &tree->state);
+ RB_CLEAR_NODE(&prev->rb_node);
+ btrfs_free_extent_state(prev);
+ }
+}
+
+static void merge_next_state(struct extent_io_tree *tree, struct extent_state *state)
+{
+ struct extent_state *next;
+
+ next = next_state(state);
+ if (next && next->start == state->end + 1 && next->state == state->state) {
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_merge_delalloc_extent(tree->inode, state, next);
+ state->end = next->end;
+ rb_erase(&next->rb_node, &tree->state);
+ RB_CLEAR_NODE(&next->rb_node);
+ btrfs_free_extent_state(next);
+ }
+}
+
+/*
+ * Utility function to look for merge candidates inside a given range. Any
+ * extents with matching state are merged together into a single extent in the
+ * tree. Extents with EXTENT_IO in their state field are not merged because
+ * the end_io handlers need to be able to do operations on them without
+ * sleeping (or doing allocations/splits).
+ *
+ * This should be called with the tree lock held.
+ */
+static void merge_state(struct extent_io_tree *tree, struct extent_state *state)
+{
+ if (state->state & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY))
+ return;
+
+ merge_prev_state(tree, state);
+ merge_next_state(tree, state);
+}
+
+static void set_state_bits(struct extent_io_tree *tree,
+ struct extent_state *state,
+ u32 bits, struct extent_changeset *changeset)
+{
+ u32 bits_to_set = bits & ~EXTENT_CTLBITS;
+ int ret;
+
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_set_delalloc_extent(tree->inode, state, bits);
+
+ ret = add_extent_changeset(state, bits_to_set, changeset, 1);
+ BUG_ON(ret < 0);
+ state->state |= bits_to_set;
+}
+
+/*
+ * Insert an extent_state struct into the tree. 'bits' are set on the
+ * struct before it is inserted.
+ *
+ * Returns a pointer to the struct extent_state record containing the range
+ * requested for insertion, which may be the same as the given struct or it
+ * may be an existing record in the tree that was expanded to accommodate the
+ * requested range. In case of an extent_state different from the one that was
+ * given, the later can be freed or reused by the caller.
+ *
+ * On error it returns an error pointer.
+ *
+ * The tree lock is not taken internally. This is a utility function and
+ * probably isn't what you want to call (see set/clear_extent_bit).
+ */
+static struct extent_state *insert_state(struct extent_io_tree *tree,
+ struct extent_state *state,
+ u32 bits,
+ struct extent_changeset *changeset)
+{
+ struct rb_node **node;
+ struct rb_node *parent = NULL;
+ const u64 start = state->start - 1;
+ const u64 end = state->end + 1;
+ const bool try_merge = !(bits & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY));
+
+ set_state_bits(tree, state, bits, changeset);
+
+ node = &tree->state.rb_node;
+ while (*node) {
+ struct extent_state *entry;
+
+ parent = *node;
+ entry = rb_entry(parent, struct extent_state, rb_node);
+
+ if (state->end < entry->start) {
+ if (try_merge && end == entry->start &&
+ state->state == entry->state) {
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_merge_delalloc_extent(tree->inode,
+ state, entry);
+ entry->start = state->start;
+ merge_prev_state(tree, entry);
+ state->state = 0;
+ return entry;
+ }
+ node = &(*node)->rb_left;
+ } else if (state->end > entry->end) {
+ if (try_merge && entry->end == start &&
+ state->state == entry->state) {
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_merge_delalloc_extent(tree->inode,
+ state, entry);
+ entry->end = state->end;
+ merge_next_state(tree, entry);
+ state->state = 0;
+ return entry;
+ }
+ node = &(*node)->rb_right;
+ } else {
+ return ERR_PTR(-EEXIST);
+ }
+ }
+
+ rb_link_node(&state->rb_node, parent, node);
+ rb_insert_color(&state->rb_node, &tree->state);
+
+ return state;
+}
+
+/*
+ * Insert state to @tree to the location given by @node and @parent.
+ */
+static void insert_state_fast(struct extent_io_tree *tree,
+ struct extent_state *state, struct rb_node **node,
+ struct rb_node *parent, unsigned bits,
+ struct extent_changeset *changeset)
+{
+ set_state_bits(tree, state, bits, changeset);
+ rb_link_node(&state->rb_node, parent, node);
+ rb_insert_color(&state->rb_node, &tree->state);
+ merge_state(tree, state);
+}
+
+/*
+ * Split a given extent state struct in two, inserting the preallocated
+ * struct 'prealloc' as the newly created second half. 'split' indicates an
+ * offset inside 'orig' where it should be split.
+ *
+ * Before calling,
+ * the tree has 'orig' at [orig->start, orig->end]. After calling, there
+ * are two extent state structs in the tree:
+ * prealloc: [orig->start, split - 1]
+ * orig: [ split, orig->end ]
+ *
+ * The tree locks are not taken by this function. They need to be held
+ * by the caller.
+ */
+static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
+ struct extent_state *prealloc, u64 split)
+{
+ struct rb_node *parent = NULL;
+ struct rb_node **node;
+
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_split_delalloc_extent(tree->inode, orig, split);
+
+ prealloc->start = orig->start;
+ prealloc->end = split - 1;
+ prealloc->state = orig->state;
+ orig->start = split;
+
+ parent = &orig->rb_node;
+ node = &parent;
+ while (*node) {
+ struct extent_state *entry;
+
+ parent = *node;
+ entry = rb_entry(parent, struct extent_state, rb_node);
+
+ if (prealloc->end < entry->start) {
+ node = &(*node)->rb_left;
+ } else if (prealloc->end > entry->end) {
+ node = &(*node)->rb_right;
+ } else {
+ btrfs_free_extent_state(prealloc);
+ return -EEXIST;
+ }
+ }
+
+ rb_link_node(&prealloc->rb_node, parent, node);
+ rb_insert_color(&prealloc->rb_node, &tree->state);
+
+ return 0;
+}
+
+/*
+ * Use this during tree iteration to avoid doing next node searches when it's
+ * not needed (the current record ends at or after the target range's end).
+ */
+static inline struct extent_state *next_search_state(struct extent_state *state, u64 end)
+{
+ if (state->end < end)
+ return next_state(state);
+
+ return NULL;
+}
+
+/*
+ * Utility function to clear some bits in an extent state struct. It will
+ * optionally wake up anyone waiting on this state (wake == 1).
+ *
+ * If no bits are set on the state struct after clearing things, the
+ * struct is freed and removed from the tree
+ */
+static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
+ struct extent_state *state,
+ u32 bits, int wake, u64 end,
+ struct extent_changeset *changeset)
+{
+ struct extent_state *next;
+ u32 bits_to_clear = bits & ~EXTENT_CTLBITS;
+ int ret;
+
+ if (tree->owner == IO_TREE_INODE_IO)
+ btrfs_clear_delalloc_extent(tree->inode, state, bits);
+
+ ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
+ BUG_ON(ret < 0);
+ state->state &= ~bits_to_clear;
+ if (wake)
+ wake_up(&state->wq);
+ if (state->state == 0) {
+ next = next_search_state(state, end);
+ if (extent_state_in_tree(state)) {
+ rb_erase(&state->rb_node, &tree->state);
+ RB_CLEAR_NODE(&state->rb_node);
+ btrfs_free_extent_state(state);
+ } else {
+ WARN_ON(1);
+ }
+ } else {
+ merge_state(tree, state);
+ next = next_search_state(state, end);
+ }
+ return next;
+}
+
+/*
+ * Detect if extent bits request NOWAIT semantics and set the gfp mask accordingly,
+ * unset the EXTENT_NOWAIT bit.
+ */
+static void set_gfp_mask_from_bits(u32 *bits, gfp_t *mask)
+{
+ *mask = (*bits & EXTENT_NOWAIT ? GFP_NOWAIT : GFP_NOFS);
+ *bits &= EXTENT_NOWAIT - 1;
+}
+
+/*
+ * Clear some bits on a range in the tree. This may require splitting or
+ * inserting elements in the tree, so the gfp mask is used to indicate which
+ * allocations or sleeping are allowed.
+ *
+ * The range [start, end] is inclusive.
+ *
+ * This takes the tree lock, and returns 0 on success and < 0 on error.
+ */
+int btrfs_clear_extent_bit_changeset(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached_state,
+ struct extent_changeset *changeset)
+{
+ struct extent_state *state;
+ struct extent_state *cached;
+ struct extent_state *prealloc = NULL;
+ u64 last_end;
+ int ret = 0;
+ bool clear;
+ bool wake;
+ const bool delete = (bits & EXTENT_CLEAR_ALL_BITS);
+ gfp_t mask;
+
+ set_gfp_mask_from_bits(&bits, &mask);
+ btrfs_debug_check_extent_io_range(tree, start, end);
+ trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
+
+ if (delete)
+ bits |= ~EXTENT_CTLBITS;
+
+ if (bits & EXTENT_DELALLOC)
+ bits |= EXTENT_NORESERVE;
+
+ wake = (bits & EXTENT_LOCK_BITS);
+ clear = (bits & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY));
+again:
+ if (!prealloc) {
+ /*
+ * Don't care for allocation failure here because we might end
+ * up not needing the pre-allocated extent state at all, which
+ * is the case if we only have in the tree extent states that
+ * cover our input range and don't cover too any other range.
+ * If we end up needing a new extent state we allocate it later.
+ */
+ prealloc = alloc_extent_state(mask);
+ }
+
+ spin_lock(&tree->lock);
+ if (cached_state) {
+ cached = *cached_state;
+
+ if (clear) {
+ *cached_state = NULL;
+ cached_state = NULL;
+ }
+
+ if (cached && extent_state_in_tree(cached) &&
+ cached->start <= start && cached->end > start) {
+ if (clear)
+ refcount_dec(&cached->refs);
+ state = cached;
+ goto hit_next;
+ }
+ if (clear)
+ btrfs_free_extent_state(cached);
+ }
+
+ /* This search will find the extents that end after our range starts. */
+ state = tree_search(tree, start);
+ if (!state)
+ goto out;
+hit_next:
+ if (state->start > end)
+ goto out;
+ WARN_ON(state->end < start);
+ last_end = state->end;
+
+ /* The state doesn't have the wanted bits, go ahead. */
+ if (!(state->state & bits)) {
+ state = next_search_state(state, end);
+ goto next;
+ }
+
+ /*
+ * | ---- desired range ---- |
+ * | state | or
+ * | ------------- state -------------- |
+ *
+ * We need to split the extent we found, and may flip bits on second
+ * half.
+ *
+ * If the extent we found extends past our range, we just split and
+ * search again. It'll get split again the next time though.
+ *
+ * If the extent we found is inside our range, we clear the desired bit
+ * on it.
+ */
+
+ if (state->start < start) {
+ prealloc = alloc_extent_state_atomic(prealloc);
+ if (!prealloc)
+ goto search_again;
+ ret = split_state(tree, state, prealloc, start);
+ prealloc = NULL;
+ if (ret) {
+ extent_io_tree_panic(tree, state, "split", ret);
+ goto out;
+ }
+ if (state->end <= end) {
+ state = clear_state_bit(tree, state, bits, wake, end,
+ changeset);
+ goto next;
+ }
+ if (need_resched())
+ goto search_again;
+ /*
+ * Fallthrough and try atomic extent state allocation if needed.
+ * If it fails we'll jump to 'search_again' retry the allocation
+ * in non-atomic mode and start the search again.
+ */
+ }
+ /*
+ * | ---- desired range ---- |
+ * | state |
+ * We need to split the extent, and clear the bit on the first half.
+ */
+ if (state->start <= end && state->end > end) {
+ prealloc = alloc_extent_state_atomic(prealloc);
+ if (!prealloc)
+ goto search_again;
+ ret = split_state(tree, state, prealloc, end + 1);
+ if (ret) {
+ extent_io_tree_panic(tree, state, "split", ret);
+ prealloc = NULL;
+ goto out;
+ }
+
+ if (wake)
+ wake_up(&state->wq);
+
+ clear_state_bit(tree, prealloc, bits, wake, end, changeset);
+
+ prealloc = NULL;
+ goto out;
+ }
+
+ state = clear_state_bit(tree, state, bits, wake, end, changeset);
+next:
+ if (last_end >= end)
+ goto out;
+ start = last_end + 1;
+ if (state && !need_resched())
+ goto hit_next;
+
+search_again:
+ spin_unlock(&tree->lock);
+ if (gfpflags_allow_blocking(mask))
+ cond_resched();
+ goto again;
+
+out:
+ spin_unlock(&tree->lock);
+ btrfs_free_extent_state(prealloc);
+
+ return ret;
+
+}
+
+/*
+ * Wait for one or more bits to clear on a range in the state tree.
+ * The range [start, end] is inclusive.
+ * The tree lock is taken by this function
+ */
+static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached_state)
+{
+ struct extent_state *state;
+
+ btrfs_debug_check_extent_io_range(tree, start, end);
+
+ spin_lock(&tree->lock);
+again:
+ /*
+ * Maintain cached_state, as we may not remove it from the tree if there
+ * are more bits than the bits we're waiting on set on this state.
+ */
+ if (cached_state && *cached_state) {
+ state = *cached_state;
+ if (extent_state_in_tree(state) &&
+ state->start <= start && start < state->end)
+ goto process_node;
+ }
+ while (1) {
+ /*
+ * This search will find all the extents that end after our
+ * range starts.
+ */
+ state = tree_search(tree, start);
+process_node:
+ if (!state)
+ break;
+ if (state->start > end)
+ goto out;
+
+ if (state->state & bits) {
+ DEFINE_WAIT(wait);
+
+ start = state->start;
+ refcount_inc(&state->refs);
+ prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock(&tree->lock);
+ schedule();
+ spin_lock(&tree->lock);
+ finish_wait(&state->wq, &wait);
+ btrfs_free_extent_state(state);
+ goto again;
+ }
+ start = state->end + 1;
+
+ if (start > end)
+ break;
+
+ if (!cond_resched_lock(&tree->lock)) {
+ state = next_state(state);
+ goto process_node;
+ }
+ }
+out:
+ /* This state is no longer useful, clear it and free it up. */
+ if (cached_state && *cached_state) {
+ state = *cached_state;
+ *cached_state = NULL;
+ btrfs_free_extent_state(state);
+ }
+ spin_unlock(&tree->lock);
+}
+
+static void cache_state_if_flags(struct extent_state *state,
+ struct extent_state **cached_ptr,
+ unsigned flags)
+{
+ if (cached_ptr && !(*cached_ptr)) {
+ if (!flags || (state->state & flags)) {
+ *cached_ptr = state;
+ refcount_inc(&state->refs);
+ }
+ }
+}
+
+static void cache_state(struct extent_state *state,
+ struct extent_state **cached_ptr)
+{
+ return cache_state_if_flags(state, cached_ptr, EXTENT_LOCK_BITS | EXTENT_BOUNDARY);
+}
+
+/*
+ * Find the first state struct with 'bits' set after 'start', and return it.
+ * tree->lock must be held. NULL will returned if nothing was found after
+ * 'start'.
+ */
+static struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
+ u64 start, u32 bits)
+{
+ struct extent_state *state;
+
+ /*
+ * This search will find all the extents that end after our range
+ * starts.
+ */
+ state = tree_search(tree, start);
+ while (state) {
+ if (state->state & bits)
+ return state;
+ state = next_state(state);
+ }
+ return NULL;
+}
+
+/*
+ * Find the first offset in the io tree with one or more @bits set.
+ *
+ * Note: If there are multiple bits set in @bits, any of them will match.
+ *
+ * Return true if we find something, and update @start_ret and @end_ret.
+ * Return false if we found nothing.
+ */
+bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits,
+ struct extent_state **cached_state)
+{
+ struct extent_state *state;
+ bool ret = false;
+
+ spin_lock(&tree->lock);
+ if (cached_state && *cached_state) {
+ state = *cached_state;
+ if (state->end == start - 1 && extent_state_in_tree(state)) {
+ while ((state = next_state(state)) != NULL) {
+ if (state->state & bits)
+ break;
+ }
+ /*
+ * If we found the next extent state, clear cached_state
+ * so that we can cache the next extent state below and
+ * avoid future calls going over the same extent state
+ * again. If we haven't found any, clear as well since
+ * it's now useless.
+ */
+ btrfs_free_extent_state(*cached_state);
+ *cached_state = NULL;
+ if (state)
+ goto got_it;
+ goto out;
+ }
+ btrfs_free_extent_state(*cached_state);
+ *cached_state = NULL;
+ }
+
+ state = find_first_extent_bit_state(tree, start, bits);
+got_it:
+ if (state) {
+ cache_state_if_flags(state, cached_state, 0);
+ *start_ret = state->start;
+ *end_ret = state->end;
+ ret = true;
+ }
+out:
+ spin_unlock(&tree->lock);
+ return ret;
+}
+
+/*
+ * Find a contiguous area of bits
+ *
+ * @tree: io tree to check
+ * @start: offset to start the search from
+ * @start_ret: the first offset we found with the bits set
+ * @end_ret: the final contiguous range of the bits that were set
+ * @bits: bits to look for
+ *
+ * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
+ * to set bits appropriately, and then merge them again. During this time it
+ * will drop the tree->lock, so use this helper if you want to find the actual
+ * contiguous area for given bits. We will search to the first bit we find, and
+ * then walk down the tree until we find a non-contiguous area. The area
+ * returned will be the full contiguous area with the bits set.
+ *
+ * Returns true if we found a range with the given bits set, in which case
+ * @start_ret and @end_ret are updated, or false if no range was found.
+ */
+bool btrfs_find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits)
+{
+ struct extent_state *state;
+ bool ret = false;
+
+ ASSERT(!btrfs_fs_incompat(btrfs_extent_io_tree_to_fs_info(tree), NO_HOLES));
+
+ spin_lock(&tree->lock);
+ state = find_first_extent_bit_state(tree, start, bits);
+ if (state) {
+ *start_ret = state->start;
+ *end_ret = state->end;
+ while ((state = next_state(state)) != NULL) {
+ if (state->start > (*end_ret + 1))
+ break;
+ *end_ret = state->end;
+ }
+ ret = true;
+ }
+ spin_unlock(&tree->lock);
+ return ret;
+}
+
+/*
+ * Find a contiguous range of bytes in the file marked as delalloc, not more
+ * than 'max_bytes'. start and end are used to return the range,
+ *
+ * True is returned if we find something, false if nothing was in the tree.
+ */
+bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
+ u64 *end, u64 max_bytes,
+ struct extent_state **cached_state)
+{
+ struct extent_state *state;
+ u64 cur_start = *start;
+ bool found = false;
+ u64 total_bytes = 0;
+
+ spin_lock(&tree->lock);
+
+ /*
+ * This search will find all the extents that end after our range
+ * starts.
+ */
+ state = tree_search(tree, cur_start);
+ if (!state) {
+ *end = (u64)-1;
+ goto out;
+ }
+
+ while (state) {
+ if (found && (state->start != cur_start ||
+ (state->state & EXTENT_BOUNDARY))) {
+ goto out;
+ }
+ if (!(state->state & EXTENT_DELALLOC)) {
+ if (!found)
+ *end = state->end;
+ goto out;
+ }
+ if (!found) {
+ *start = state->start;
+ *cached_state = state;
+ refcount_inc(&state->refs);
+ }
+ found = true;
+ *end = state->end;
+ cur_start = state->end + 1;
+ total_bytes += state->end - state->start + 1;
+ if (total_bytes >= max_bytes)
+ break;
+ state = next_state(state);
+ }
+out:
+ spin_unlock(&tree->lock);
+ return found;
+}
+
+/*
+ * Set some bits on a range in the tree. This may require allocations or
+ * sleeping. By default all allocations use GFP_NOFS, use EXTENT_NOWAIT for
+ * GFP_NOWAIT.
+ *
+ * If any of the exclusive bits are set, this will fail with -EEXIST if some
+ * part of the range already has the desired bits set. The extent_state of the
+ * existing range is returned in failed_state in this case, and the start of the
+ * existing range is returned in failed_start. failed_state is used as an
+ * optimization for wait_extent_bit, failed_start must be used as the source of
+ * truth as failed_state may have changed since we returned.
+ *
+ * [start, end] is inclusive This takes the tree lock.
+ */
+static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, u64 *failed_start,
+ struct extent_state **failed_state,
+ struct extent_state **cached_state,
+ struct extent_changeset *changeset)
+{
+ struct extent_state *state;
+ struct extent_state *prealloc = NULL;
+ struct rb_node **p = NULL;
+ struct rb_node *parent = NULL;
+ int ret = 0;
+ u64 last_start;
+ u64 last_end;
+ u32 exclusive_bits = (bits & EXTENT_LOCK_BITS);
+ gfp_t mask;
+
+ set_gfp_mask_from_bits(&bits, &mask);
+ btrfs_debug_check_extent_io_range(tree, start, end);
+ trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
+
+ if (exclusive_bits)
+ ASSERT(failed_start);
+ else
+ ASSERT(failed_start == NULL && failed_state == NULL);
+again:
+ if (!prealloc) {
+ /*
+ * Don't care for allocation failure here because we might end
+ * up not needing the pre-allocated extent state at all, which
+ * is the case if we only have in the tree extent states that
+ * cover our input range and don't cover too any other range.
+ * If we end up needing a new extent state we allocate it later.
+ */
+ prealloc = alloc_extent_state(mask);
+ }
+ /* Optimistically preallocate the extent changeset ulist node. */
+ if (changeset)
+ extent_changeset_prealloc(changeset, mask);
+
+ spin_lock(&tree->lock);
+ if (cached_state && *cached_state) {
+ state = *cached_state;
+ if (state->start <= start && state->end > start &&
+ extent_state_in_tree(state))
+ goto hit_next;
+ }
+ /*
+ * This search will find all the extents that end after our range
+ * starts.
+ */
+ state = tree_search_for_insert(tree, start, &p, &parent);
+ if (!state) {
+ prealloc = alloc_extent_state_atomic(prealloc);
+ if (!prealloc)
+ goto search_again;
+ prealloc->start = start;
+ prealloc->end = end;
+ insert_state_fast(tree, prealloc, p, parent, bits, changeset);
+ cache_state(prealloc, cached_state);
+ prealloc = NULL;
+ goto out;
+ }
+hit_next:
+ last_start = state->start;
+ last_end = state->end;
+
+ /*
+ * | ---- desired range ---- |
+ * | state |
+ *
+ * Just lock what we found and keep going
+ */
+ if (state->start == start && state->end <= end) {
+ if (state->state & exclusive_bits) {
+ *failed_start = state->start;
+ cache_state(state, failed_state);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ set_state_bits(tree, state, bits, changeset);
+ cache_state(state, cached_state);
+ merge_state(tree, state);
+ if (last_end >= end)
+ goto out;
+ start = last_end + 1;
+ state = next_state(state);
+ if (state && state->start == start && !need_resched())
+ goto hit_next;
+ goto search_again;
+ }
+
+ /*
+ * | ---- desired range ---- |
+ * | state |
+ * or
+ * | ------------- state -------------- |
+ *
+ * We need to split the extent we found, and may flip bits on second
+ * half.
+ *
+ * If the extent we found extends past our range, we just split and
+ * search again. It'll get split again the next time though.
+ *
+ * If the extent we found is inside our range, we set the desired bit
+ * on it.
+ */
+ if (state->start < start) {
+ if (state->state & exclusive_bits) {
+ *failed_start = start;
+ cache_state(state, failed_state);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ /*
+ * If this extent already has all the bits we want set, then
+ * skip it, not necessary to split it or do anything with it.
+ */
+ if ((state->state & bits) == bits) {
+ start = state->end + 1;
+ cache_state(state, cached_state);
+ goto search_again;
+ }
+
+ prealloc = alloc_extent_state_atomic(prealloc);
+ if (!prealloc)
+ goto search_again;
+ ret = split_state(tree, state, prealloc, start);
+ if (ret)
+ extent_io_tree_panic(tree, state, "split", ret);
+
+ prealloc = NULL;
+ if (ret)
+ goto out;
+ if (state->end <= end) {
+ set_state_bits(tree, state, bits, changeset);
+ cache_state(state, cached_state);
+ merge_state(tree, state);
+ if (last_end >= end)
+ goto out;
+ start = last_end + 1;
+ state = next_state(state);
+ if (state && state->start == start && !need_resched())
+ goto hit_next;
+ }
+ goto search_again;
+ }
+ /*
+ * | ---- desired range ---- |
+ * | state | or | state |
+ *
+ * There's a hole, we need to insert something in it and ignore the
+ * extent we found.
+ */
+ if (state->start > start) {
+ struct extent_state *inserted_state;
+
+ prealloc = alloc_extent_state_atomic(prealloc);
+ if (!prealloc)
+ goto search_again;
+
+ /*
+ * Avoid to free 'prealloc' if it can be merged with the later
+ * extent.
+ */
+ prealloc->start = start;
+ if (end < last_start)
+ prealloc->end = end;
+ else
+ prealloc->end = last_start - 1;
+
+ inserted_state = insert_state(tree, prealloc, bits, changeset);
+ if (IS_ERR(inserted_state)) {
+ ret = PTR_ERR(inserted_state);
+ extent_io_tree_panic(tree, prealloc, "insert", ret);
+ goto out;
+ }
+
+ cache_state(inserted_state, cached_state);
+ if (inserted_state == prealloc)
+ prealloc = NULL;
+ start = inserted_state->end + 1;
+
+ /* Beyond target range, stop. */
+ if (start > end)
+ goto out;
+
+ if (need_resched())
+ goto search_again;
+
+ state = next_search_state(inserted_state, end);
+ /*
+ * If there's a next state, whether contiguous or not, we don't
+ * need to unlock and start search again. If it's not contiguous
+ * we will end up here and try to allocate a prealloc state and insert.
+ */
+ if (state)
+ goto hit_next;
+ goto search_again;
+ }
+ /*
+ * | ---- desired range ---- |
+ * | state |
+ *
+ * We need to split the extent, and set the bit on the first half
+ */
+ if (state->start <= end && state->end > end) {
+ if (state->state & exclusive_bits) {
+ *failed_start = start;
+ cache_state(state, failed_state);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ prealloc = alloc_extent_state_atomic(prealloc);
+ if (!prealloc)
+ goto search_again;
+ ret = split_state(tree, state, prealloc, end + 1);
+ if (ret) {
+ extent_io_tree_panic(tree, state, "split", ret);
+ prealloc = NULL;
+ goto out;
+ }
+
+ set_state_bits(tree, prealloc, bits, changeset);
+ cache_state(prealloc, cached_state);
+ merge_state(tree, prealloc);
+ prealloc = NULL;
+ goto out;
+ }
+
+search_again:
+ if (start > end)
+ goto out;
+ spin_unlock(&tree->lock);
+ if (gfpflags_allow_blocking(mask))
+ cond_resched();
+ goto again;
+
+out:
+ spin_unlock(&tree->lock);
+ btrfs_free_extent_state(prealloc);
+
+ return ret;
+
+}
+
+int btrfs_set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached_state)
+{
+ return set_extent_bit(tree, start, end, bits, NULL, NULL, cached_state, NULL);
+}
+
+/*
+ * Convert all bits in a given range from one bit to another
+ *
+ * @tree: the io tree to search
+ * @start: the start offset in bytes
+ * @end: the end offset in bytes (inclusive)
+ * @bits: the bits to set in this range
+ * @clear_bits: the bits to clear in this range
+ * @cached_state: state that we're going to cache
+ *
+ * This will go through and set bits for the given range. If any states exist
+ * already in this range they are set with the given bit and cleared of the
+ * clear_bits. This is only meant to be used by things that are mergeable, ie.
+ * converting from say DELALLOC to DIRTY. This is not meant to be used with
+ * boundary bits like LOCK.
+ *
+ * All allocations are done with GFP_NOFS.
+ */
+int btrfs_convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, u32 clear_bits,
+ struct extent_state **cached_state)
+{
+ struct extent_state *state;
+ struct extent_state *prealloc = NULL;
+ struct rb_node **p = NULL;
+ struct rb_node *parent = NULL;
+ int ret = 0;
+ u64 last_start;
+ u64 last_end;
+ bool first_iteration = true;
+
+ btrfs_debug_check_extent_io_range(tree, start, end);
+ trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
+ clear_bits);
+
+again:
+ if (!prealloc) {
+ /*
+ * Best effort, don't worry if extent state allocation fails
+ * here for the first iteration. We might have a cached state
+ * that matches exactly the target range, in which case no
+ * extent state allocations are needed. We'll only know this
+ * after locking the tree.
+ */
+ prealloc = alloc_extent_state(GFP_NOFS);
+ if (!prealloc && !first_iteration)
+ return -ENOMEM;
+ }
+
+ spin_lock(&tree->lock);
+ if (cached_state && *cached_state) {
+ state = *cached_state;
+ if (state->start <= start && state->end > start &&
+ extent_state_in_tree(state))
+ goto hit_next;
+ }
+
+ /*
+ * This search will find all the extents that end after our range
+ * starts.
+ */
+ state = tree_search_for_insert(tree, start, &p, &parent);
+ if (!state) {
+ prealloc = alloc_extent_state_atomic(prealloc);
+ if (!prealloc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ prealloc->start = start;
+ prealloc->end = end;
+ insert_state_fast(tree, prealloc, p, parent, bits, NULL);
+ cache_state(prealloc, cached_state);
+ prealloc = NULL;
+ goto out;
+ }
+hit_next:
+ last_start = state->start;
+ last_end = state->end;
+
+ /*
+ * | ---- desired range ---- |
+ * | state |
+ *
+ * Just lock what we found and keep going.
+ */
+ if (state->start == start && state->end <= end) {
+ set_state_bits(tree, state, bits, NULL);
+ cache_state(state, cached_state);
+ state = clear_state_bit(tree, state, clear_bits, 0, end, NULL);
+ if (last_end >= end)
+ goto out;
+ start = last_end + 1;
+ if (state && state->start == start && !need_resched())
+ goto hit_next;
+ goto search_again;
+ }
+
+ /*
+ * | ---- desired range ---- |
+ * | state |
+ * or
+ * | ------------- state -------------- |
+ *
+ * We need to split the extent we found, and may flip bits on second
+ * half.
+ *
+ * If the extent we found extends past our range, we just split and
+ * search again. It'll get split again the next time though.
+ *
+ * If the extent we found is inside our range, we set the desired bit
+ * on it.
+ */
+ if (state->start < start) {
+ prealloc = alloc_extent_state_atomic(prealloc);
+ if (!prealloc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = split_state(tree, state, prealloc, start);
+ prealloc = NULL;
+ if (ret) {
+ extent_io_tree_panic(tree, state, "split", ret);
+ goto out;
+ }
+ if (state->end <= end) {
+ set_state_bits(tree, state, bits, NULL);
+ cache_state(state, cached_state);
+ state = clear_state_bit(tree, state, clear_bits, 0, end, NULL);
+ if (last_end >= end)
+ goto out;
+ start = last_end + 1;
+ if (state && state->start == start && !need_resched())
+ goto hit_next;
+ }
+ goto search_again;
+ }
+ /*
+ * | ---- desired range ---- |
+ * | state | or | state |
+ *
+ * There's a hole, we need to insert something in it and ignore the
+ * extent we found.
+ */
+ if (state->start > start) {
+ struct extent_state *inserted_state;
+
+ prealloc = alloc_extent_state_atomic(prealloc);
+ if (!prealloc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Avoid to free 'prealloc' if it can be merged with the later
+ * extent.
+ */
+ prealloc->start = start;
+ if (end < last_start)
+ prealloc->end = end;
+ else
+ prealloc->end = last_start - 1;
+
+ inserted_state = insert_state(tree, prealloc, bits, NULL);
+ if (IS_ERR(inserted_state)) {
+ ret = PTR_ERR(inserted_state);
+ extent_io_tree_panic(tree, prealloc, "insert", ret);
+ goto out;
+ }
+ cache_state(inserted_state, cached_state);
+ if (inserted_state == prealloc)
+ prealloc = NULL;
+ start = inserted_state->end + 1;
+
+ /* Beyond target range, stop. */
+ if (start > end)
+ goto out;
+
+ if (need_resched())
+ goto search_again;
+
+ state = next_search_state(inserted_state, end);
+ /*
+ * If there's a next state, whether contiguous or not, we don't
+ * need to unlock and start search again. If it's not contiguous
+ * we will end up here and try to allocate a prealloc state and insert.
+ */
+ if (state)
+ goto hit_next;
+ goto search_again;
+ }
+ /*
+ * | ---- desired range ---- |
+ * | state |
+ *
+ * We need to split the extent, and set the bit on the first half.
+ */
+ if (state->start <= end && state->end > end) {
+ prealloc = alloc_extent_state_atomic(prealloc);
+ if (!prealloc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = split_state(tree, state, prealloc, end + 1);
+ if (ret) {
+ extent_io_tree_panic(tree, state, "split", ret);
+ prealloc = NULL;
+ goto out;
+ }
+
+ set_state_bits(tree, prealloc, bits, NULL);
+ cache_state(prealloc, cached_state);
+ clear_state_bit(tree, prealloc, clear_bits, 0, end, NULL);
+ prealloc = NULL;
+ goto out;
+ }
+
+search_again:
+ if (start > end)
+ goto out;
+ spin_unlock(&tree->lock);
+ cond_resched();
+ first_iteration = false;
+ goto again;
+
+out:
+ spin_unlock(&tree->lock);
+ btrfs_free_extent_state(prealloc);
+
+ return ret;
+}
+
+/*
+ * Find the first range that has @bits not set. This range could start before
+ * @start.
+ *
+ * @tree: the tree to search
+ * @start: offset at/after which the found extent should start
+ * @start_ret: records the beginning of the range
+ * @end_ret: records the end of the range (inclusive)
+ * @bits: the set of bits which must be unset
+ *
+ * Since unallocated range is also considered one which doesn't have the bits
+ * set it's possible that @end_ret contains -1, this happens in case the range
+ * spans (last_range_end, end of device]. In this case it's up to the caller to
+ * trim @end_ret to the appropriate size.
+ */
+void btrfs_find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits)
+{
+ struct extent_state *state;
+ struct extent_state *prev = NULL, *next = NULL;
+
+ spin_lock(&tree->lock);
+
+ /* Find first extent with bits cleared */
+ while (1) {
+ state = tree_search_prev_next(tree, start, &prev, &next);
+ if (!state && !next && !prev) {
+ /*
+ * Tree is completely empty, send full range and let
+ * caller deal with it
+ */
+ *start_ret = 0;
+ *end_ret = -1;
+ goto out;
+ } else if (!state && !next) {
+ /*
+ * We are past the last allocated chunk, set start at
+ * the end of the last extent.
+ */
+ *start_ret = prev->end + 1;
+ *end_ret = -1;
+ goto out;
+ } else if (!state) {
+ state = next;
+ }
+
+ /*
+ * At this point 'state' either contains 'start' or start is
+ * before 'state'
+ */
+ if (in_range(start, state->start, state->end - state->start + 1)) {
+ if (state->state & bits) {
+ /*
+ * |--range with bits sets--|
+ * |
+ * start
+ */
+ start = state->end + 1;
+ } else {
+ /*
+ * 'start' falls within a range that doesn't
+ * have the bits set, so take its start as the
+ * beginning of the desired range
+ *
+ * |--range with bits cleared----|
+ * |
+ * start
+ */
+ *start_ret = state->start;
+ break;
+ }
+ } else {
+ /*
+ * |---prev range---|---hole/unset---|---node range---|
+ * |
+ * start
+ *
+ * or
+ *
+ * |---hole/unset--||--first node--|
+ * 0 |
+ * start
+ */
+ if (prev)
+ *start_ret = prev->end + 1;
+ else
+ *start_ret = 0;
+ break;
+ }
+ }
+
+ /*
+ * Find the longest stretch from start until an entry which has the
+ * bits set
+ */
+ while (state) {
+ if (state->end >= start && !(state->state & bits)) {
+ *end_ret = state->end;
+ } else {
+ *end_ret = state->start - 1;
+ break;
+ }
+ state = next_state(state);
+ }
+out:
+ spin_unlock(&tree->lock);
+}
+
+/*
+ * Count the number of bytes in the tree that have a given bit(s) set for a
+ * given range.
+ *
+ * @tree: The io tree to search.
+ * @start: The start offset of the range. This value is updated to the
+ * offset of the first byte found with the given bit(s), so it
+ * can end up being bigger than the initial value.
+ * @search_end: The end offset (inclusive value) of the search range.
+ * @max_bytes: The maximum byte count we are interested. The search stops
+ * once it reaches this count.
+ * @bits: The bits the range must have in order to be accounted for.
+ * If multiple bits are set, then only subranges that have all
+ * the bits set are accounted for.
+ * @contig: Indicate if we should ignore holes in the range or not. If
+ * this is true, then stop once we find a hole.
+ * @cached_state: A cached state to be used across multiple calls to this
+ * function in order to speedup searches. Use NULL if this is
+ * called only once or if each call does not start where the
+ * previous one ended.
+ *
+ * Returns the total number of bytes found within the given range that have
+ * all given bits set. If the returned number of bytes is greater than zero
+ * then @start is updated with the offset of the first byte with the bits set.
+ */
+u64 btrfs_count_range_bits(struct extent_io_tree *tree,
+ u64 *start, u64 search_end, u64 max_bytes,
+ u32 bits, bool contig,
+ struct extent_state **cached_state)
+{
+ struct extent_state *state = NULL;
+ struct extent_state *cached;
+ u64 cur_start = *start;
+ u64 total_bytes = 0;
+ u64 last = 0;
+ int found = 0;
+
+ if (WARN_ON(search_end < cur_start))
+ return 0;
+
+ spin_lock(&tree->lock);
+
+ if (!cached_state || !*cached_state)
+ goto search;
+
+ cached = *cached_state;
+
+ if (!extent_state_in_tree(cached))
+ goto search;
+
+ if (cached->start <= cur_start && cur_start <= cached->end) {
+ state = cached;
+ } else if (cached->start > cur_start) {
+ struct extent_state *prev;
+
+ /*
+ * The cached state starts after our search range's start. Check
+ * if the previous state record starts at or before the range we
+ * are looking for, and if so, use it - this is a common case
+ * when there are holes between records in the tree. If there is
+ * no previous state record, we can start from our cached state.
+ */
+ prev = prev_state(cached);
+ if (!prev)
+ state = cached;
+ else if (prev->start <= cur_start && cur_start <= prev->end)
+ state = prev;
+ }
+
+ /*
+ * This search will find all the extents that end after our range
+ * starts.
+ */
+search:
+ if (!state)
+ state = tree_search(tree, cur_start);
+
+ while (state) {
+ if (state->start > search_end)
+ break;
+ if (contig && found && state->start > last + 1)
+ break;
+ if (state->end >= cur_start && (state->state & bits) == bits) {
+ total_bytes += min(search_end, state->end) + 1 -
+ max(cur_start, state->start);
+ if (total_bytes >= max_bytes)
+ break;
+ if (!found) {
+ *start = max(cur_start, state->start);
+ found = 1;
+ }
+ last = state->end;
+ } else if (contig && found) {
+ break;
+ }
+ state = next_state(state);
+ }
+
+ if (cached_state) {
+ btrfs_free_extent_state(*cached_state);
+ *cached_state = state;
+ if (state)
+ refcount_inc(&state->refs);
+ }
+
+ spin_unlock(&tree->lock);
+
+ return total_bytes;
+}
+
+/*
+ * Check if the single @bit exists in the given range.
+ */
+bool btrfs_test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit)
+{
+ struct extent_state *state;
+ bool bitset = false;
+
+ ASSERT(is_power_of_2(bit));
+
+ spin_lock(&tree->lock);
+ state = tree_search(tree, start);
+ while (state) {
+ if (state->start > end)
+ break;
+
+ if (state->state & bit) {
+ bitset = true;
+ break;
+ }
+
+ if (state->end >= end)
+ break;
+ state = next_state(state);
+ }
+ spin_unlock(&tree->lock);
+ return bitset;
+}
+
+void btrfs_get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
+ struct extent_state **cached_state)
+{
+ struct extent_state *state;
+
+ /*
+ * The cached state is currently mandatory and not used to start the
+ * search, only to cache the first state record found in the range.
+ */
+ ASSERT(cached_state != NULL);
+ ASSERT(*cached_state == NULL);
+
+ *bits = 0;
+
+ spin_lock(&tree->lock);
+ state = tree_search(tree, start);
+ if (state && state->start < end) {
+ *cached_state = state;
+ refcount_inc(&state->refs);
+ }
+ while (state) {
+ if (state->start > end)
+ break;
+
+ *bits |= state->state;
+
+ if (state->end >= end)
+ break;
+
+ state = next_state(state);
+ }
+ spin_unlock(&tree->lock);
+}
+
+/*
+ * Check if the whole range [@start,@end) contains the single @bit set.
+ */
+bool btrfs_test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
+ struct extent_state *cached)
+{
+ struct extent_state *state;
+ bool bitset = true;
+
+ ASSERT(is_power_of_2(bit));
+ ASSERT(start < end);
+
+ spin_lock(&tree->lock);
+ if (cached && extent_state_in_tree(cached) && cached->start <= start &&
+ cached->end > start)
+ state = cached;
+ else
+ state = tree_search(tree, start);
+ while (state) {
+ if (state->start > start) {
+ bitset = false;
+ break;
+ }
+
+ if ((state->state & bit) == 0) {
+ bitset = false;
+ break;
+ }
+
+ if (state->end >= end)
+ break;
+
+ /* Next state must start where this one ends. */
+ start = state->end + 1;
+ state = next_state(state);
+ }
+
+ /* We ran out of states and were still inside of our range. */
+ if (!state)
+ bitset = false;
+ spin_unlock(&tree->lock);
+ return bitset;
+}
+
+/* Wrappers around set/clear extent bit */
+int btrfs_set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_changeset *changeset)
+{
+ /*
+ * We don't support EXTENT_LOCK_BITS yet, as current changeset will
+ * record any bits changed, so for EXTENT_LOCK_BITS case, it will either
+ * fail with -EEXIST or changeset will record the whole range.
+ */
+ ASSERT(!(bits & EXTENT_LOCK_BITS));
+
+ return set_extent_bit(tree, start, end, bits, NULL, NULL, NULL, changeset);
+}
+
+int btrfs_clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_changeset *changeset)
+{
+ /*
+ * Don't support EXTENT_LOCK_BITS case, same reason as
+ * set_record_extent_bits().
+ */
+ ASSERT(!(bits & EXTENT_LOCK_BITS));
+
+ return btrfs_clear_extent_bit_changeset(tree, start, end, bits, NULL, changeset);
+}
+
+bool btrfs_try_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached)
+{
+ int ret;
+ u64 failed_start;
+
+ ret = set_extent_bit(tree, start, end, bits, &failed_start, NULL, cached, NULL);
+ if (ret == -EEXIST) {
+ if (failed_start > start)
+ btrfs_clear_extent_bit(tree, start, failed_start - 1,
+ bits, cached);
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Either insert or lock state struct between start and end use mask to tell
+ * us if waiting is desired.
+ */
+int btrfs_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
+ struct extent_state **cached_state)
+{
+ struct extent_state *failed_state = NULL;
+ int ret;
+ u64 failed_start;
+
+ ret = set_extent_bit(tree, start, end, bits, &failed_start,
+ &failed_state, cached_state, NULL);
+ while (ret == -EEXIST) {
+ if (failed_start != start)
+ btrfs_clear_extent_bit(tree, start, failed_start - 1,
+ bits, cached_state);
+
+ wait_extent_bit(tree, failed_start, end, bits, &failed_state);
+ ret = set_extent_bit(tree, start, end, bits, &failed_start,
+ &failed_state, cached_state, NULL);
+ }
+ return ret;
+}
+
+/*
+ * Get the extent state that follows the given extent state.
+ * This is meant to be used in a context where we know no other tasks can
+ * concurrently modify the tree.
+ */
+struct extent_state *btrfs_next_extent_state(struct extent_io_tree *tree,
+ struct extent_state *state)
+{
+ struct extent_state *next;
+
+ spin_lock(&tree->lock);
+ ASSERT(extent_state_in_tree(state));
+ next = next_state(state);
+ if (next)
+ refcount_inc(&next->refs);
+ spin_unlock(&tree->lock);
+
+ return next;
+}
+
+void __cold btrfs_extent_state_free_cachep(void)
+{
+ btrfs_extent_state_leak_debug_check();
+ kmem_cache_destroy(extent_state_cache);
+}
+
+int __init btrfs_extent_state_init_cachep(void)
+{
+ extent_state_cache = kmem_cache_create("btrfs_extent_state",
+ sizeof(struct extent_state), 0, 0,
+ NULL);
+ if (!extent_state_cache)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
index 04083ee5ae6e..6f07b965e8da 100644
--- a/fs/btrfs/extent-io-tree.h
+++ b/fs/btrfs/extent-io-tree.h
@@ -3,42 +3,77 @@
#ifndef BTRFS_EXTENT_IO_TREE_H
#define BTRFS_EXTENT_IO_TREE_H
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/refcount.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include "misc.h"
+
struct extent_changeset;
-struct io_failure_record;
+struct btrfs_fs_info;
+struct btrfs_inode;
/* Bits for the extent state */
-#define EXTENT_DIRTY (1U << 0)
-#define EXTENT_UPTODATE (1U << 1)
-#define EXTENT_LOCKED (1U << 2)
-#define EXTENT_NEW (1U << 3)
-#define EXTENT_DELALLOC (1U << 4)
-#define EXTENT_DEFRAG (1U << 5)
-#define EXTENT_BOUNDARY (1U << 6)
-#define EXTENT_NODATASUM (1U << 7)
-#define EXTENT_CLEAR_META_RESV (1U << 8)
-#define EXTENT_NEED_WAIT (1U << 9)
-#define EXTENT_DAMAGED (1U << 10)
-#define EXTENT_NORESERVE (1U << 11)
-#define EXTENT_QGROUP_RESERVED (1U << 12)
-#define EXTENT_CLEAR_DATA_RESV (1U << 13)
-/*
- * Must be cleared only during ordered extent completion or on error paths if we
- * did not manage to submit bios and create the ordered extents for the range.
- * Should not be cleared during page release and page invalidation (if there is
- * an ordered extent in flight), that is left for the ordered extent completion.
- */
-#define EXTENT_DELALLOC_NEW (1U << 14)
-/*
- * When an ordered extent successfully completes for a region marked as a new
- * delalloc range, use this flag when clearing a new delalloc range to indicate
- * that the VFS' inode number of bytes should be incremented and the inode's new
- * delalloc bytes decremented, in an atomic way to prevent races with stat(2).
- */
-#define EXTENT_ADD_INODE_BYTES (1U << 15)
+enum {
+ ENUM_BIT(EXTENT_DIRTY),
+ ENUM_BIT(EXTENT_LOCKED),
+ ENUM_BIT(EXTENT_DIO_LOCKED),
+ ENUM_BIT(EXTENT_DIRTY_LOG1),
+ ENUM_BIT(EXTENT_DIRTY_LOG2),
+ ENUM_BIT(EXTENT_DELALLOC),
+ ENUM_BIT(EXTENT_DEFRAG),
+ ENUM_BIT(EXTENT_BOUNDARY),
+ ENUM_BIT(EXTENT_NODATASUM),
+ ENUM_BIT(EXTENT_CLEAR_META_RESV),
+ ENUM_BIT(EXTENT_NEED_WAIT),
+ ENUM_BIT(EXTENT_NORESERVE),
+ ENUM_BIT(EXTENT_QGROUP_RESERVED),
+ ENUM_BIT(EXTENT_CLEAR_DATA_RESV),
+ /*
+ * Must be cleared only during ordered extent completion or on error
+ * paths if we did not manage to submit bios and create the ordered
+ * extents for the range. Should not be cleared during page release
+ * and page invalidation (if there is an ordered extent in flight),
+ * that is left for the ordered extent completion.
+ */
+ ENUM_BIT(EXTENT_DELALLOC_NEW),
+ /*
+ * Mark that a range is being locked for finishing an ordered extent.
+ * Used together with EXTENT_LOCKED.
+ */
+ ENUM_BIT(EXTENT_FINISHING_ORDERED),
+ /*
+ * When an ordered extent successfully completes for a region marked as
+ * a new delalloc range, use this flag when clearing a new delalloc
+ * range to indicate that the VFS' inode number of bytes should be
+ * incremented and the inode's new delalloc bytes decremented, in an
+ * atomic way to prevent races with stat(2).
+ */
+ ENUM_BIT(EXTENT_ADD_INODE_BYTES),
+ /*
+ * Set during truncate when we're clearing an entire range and we just
+ * want the extent states to go away.
+ */
+ ENUM_BIT(EXTENT_CLEAR_ALL_BITS),
+
+ /*
+ * This must be last.
+ *
+ * Bit not representing a state but a request for NOWAIT semantics,
+ * e.g. when allocating memory, and must be masked out from the other
+ * bits.
+ */
+ ENUM_BIT(EXTENT_NOWAIT)
+};
+
#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
EXTENT_CLEAR_DATA_RESV)
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | \
- EXTENT_ADD_INODE_BYTES)
+ EXTENT_ADD_INODE_BYTES | \
+ EXTENT_CLEAR_ALL_BITS)
+
+#define EXTENT_LOCK_BITS (EXTENT_LOCKED | EXTENT_DIO_LOCKED)
/*
* Redefined bits above which are used only in the device allocation tree,
@@ -56,7 +91,6 @@ enum {
IO_TREE_FS_EXCLUDED_EXTENTS,
IO_TREE_BTREE_INODE_IO,
IO_TREE_INODE_IO,
- IO_TREE_INODE_IO_FAILURE,
IO_TREE_RELOC_BLOCKS,
IO_TREE_TRANS_DIRTY_PAGES,
IO_TREE_ROOT_DIRTY_LOG_PAGES,
@@ -68,10 +102,17 @@ enum {
struct extent_io_tree {
struct rb_root state;
- struct btrfs_fs_info *fs_info;
- void *private_data;
- u64 dirty_bytes;
- bool track_uptodate;
+ /*
+ * The fs_info is needed for trace points, a tree attached to an inode
+ * needs the inode.
+ *
+ * owner == IO_TREE_INODE_IO - then inode is valid and fs_info can be
+ * accessed as inode->root->fs_info
+ */
+ union {
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_inode *inode;
+ };
/* Who owns this io tree, should be one of IO_TREE_* */
u8 owner;
@@ -89,179 +130,115 @@ struct extent_state {
refcount_t refs;
u32 state;
- struct io_failure_record *failrec;
-
#ifdef CONFIG_BTRFS_DEBUG
struct list_head leak_list;
#endif
};
-int __init extent_state_cache_init(void);
-void __cold extent_state_cache_exit(void);
-
-void extent_io_tree_init(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *tree, unsigned int owner,
- void *private_data);
-void extent_io_tree_release(struct extent_io_tree *tree);
-
-int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached);
+const struct btrfs_inode *btrfs_extent_io_tree_to_inode(const struct extent_io_tree *tree);
+const struct btrfs_fs_info *btrfs_extent_io_tree_to_fs_info(const struct extent_io_tree *tree);
-static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
-{
- return lock_extent_bits(tree, start, end, NULL);
-}
+void btrfs_extent_io_tree_init(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *tree, unsigned int owner);
+void btrfs_extent_io_tree_release(struct extent_io_tree *tree);
+int btrfs_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
+ struct extent_state **cached);
+bool btrfs_try_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached);
-int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
-
-int __init extent_io_init(void);
-void __cold extent_io_exit(void);
-
-u64 count_range_bits(struct extent_io_tree *tree,
- u64 *start, u64 search_end,
- u64 max_bytes, u32 bits, int contig);
-
-void free_extent_state(struct extent_state *state);
-int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, int filled, struct extent_state *cached_state);
-int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, struct extent_changeset *changeset);
-int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, int wake, int delete,
- struct extent_state **cached);
-int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, int wake, int delete,
- struct extent_state **cached, gfp_t mask,
- struct extent_changeset *changeset);
-
-static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
+static inline int btrfs_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+ struct extent_state **cached)
{
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
+ return btrfs_lock_extent_bits(tree, start, end, EXTENT_LOCKED, cached);
}
-static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached)
+static inline bool btrfs_try_lock_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
{
- return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
- GFP_NOFS, NULL);
+ return btrfs_try_lock_extent_bits(tree, start, end, EXTENT_LOCKED, cached);
}
-static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
- u64 start, u64 end, struct extent_state **cached)
-{
- return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
- GFP_ATOMIC, NULL);
-}
-
-static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
- u64 end, u32 bits)
-{
- int wake = 0;
+int __init btrfs_extent_state_init_cachep(void);
+void __cold btrfs_extent_state_free_cachep(void);
- if (bits & EXTENT_LOCKED)
- wake = 1;
+u64 btrfs_count_range_bits(struct extent_io_tree *tree,
+ u64 *start, u64 search_end,
+ u64 max_bytes, u32 bits, bool contig,
+ struct extent_state **cached_state);
- return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
-}
-
-int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, struct extent_changeset *changeset);
-int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, unsigned exclusive_bits, u64 *failed_start,
- struct extent_state **cached_state, gfp_t mask,
- struct extent_changeset *changeset);
-int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits);
-
-static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
- u64 end, u32 bits)
-{
- return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
- NULL);
-}
-
-static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state)
-{
- return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
- cached_state, GFP_NOFS, NULL);
-}
-
-static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
- u64 end, gfp_t mask)
+void btrfs_free_extent_state(struct extent_state *state);
+bool btrfs_test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
+ struct extent_state *cached_state);
+bool btrfs_test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
+void btrfs_get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
+ struct extent_state **cached_state);
+int btrfs_clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_changeset *changeset);
+int btrfs_clear_extent_bit_changeset(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached,
+ struct extent_changeset *changeset);
+
+static inline int btrfs_clear_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 end, u32 bits,
+ struct extent_state **cached)
{
- return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, NULL,
- mask, NULL);
+ return btrfs_clear_extent_bit_changeset(tree, start, end, bits, cached, NULL);
}
-static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached)
+static inline int btrfs_unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+ struct extent_state **cached)
{
- return clear_extent_bit(tree, start, end,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, cached);
+ return btrfs_clear_extent_bit_changeset(tree, start, end, EXTENT_LOCKED,
+ cached, NULL);
}
-int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, u32 clear_bits,
- struct extent_state **cached_state);
+int btrfs_set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_changeset *changeset);
+int btrfs_set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached_state);
-static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
- u64 end, u32 extra_bits,
- struct extent_state **cached_state)
+static inline int btrfs_clear_extent_dirty(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
{
- return set_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
- 0, NULL, cached_state, GFP_NOFS, NULL);
+ return btrfs_clear_extent_bit(tree, start, end,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, cached);
}
-static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state)
+int btrfs_convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, u32 clear_bits,
+ struct extent_state **cached_state);
+
+bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits,
+ struct extent_state **cached_state);
+void btrfs_find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits);
+bool btrfs_find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits);
+bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
+ u64 *end, u64 max_bytes,
+ struct extent_state **cached_state);
+static inline int btrfs_lock_dio_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
{
- return set_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
- 0, NULL, cached_state, GFP_NOFS, NULL);
+ return btrfs_lock_extent_bits(tree, start, end, EXTENT_DIO_LOCKED, cached);
}
-static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
- u64 end)
+static inline bool btrfs_try_lock_dio_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
{
- return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, NULL,
- GFP_NOFS, NULL);
+ return btrfs_try_lock_extent_bits(tree, start, end, EXTENT_DIO_LOCKED, cached);
}
-static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state, gfp_t mask)
+static inline int btrfs_unlock_dio_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
{
- return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
- cached_state, mask, NULL);
+ return btrfs_clear_extent_bit_changeset(tree, start, end, EXTENT_DIO_LOCKED,
+ cached, NULL);
}
-int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits,
- struct extent_state **cached_state);
-void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits);
-int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits);
-int extent_invalidatepage(struct extent_io_tree *tree,
- struct page *page, unsigned long offset);
-bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
- u64 *end, u64 max_bytes,
- struct extent_state **cached_state);
-
-/* This should be reworked in the future and put elsewhere. */
-struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start);
-int set_state_failrec(struct extent_io_tree *tree, u64 start,
- struct io_failure_record *failrec);
-void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
- u64 end);
-int free_io_failure(struct extent_io_tree *failure_tree,
- struct extent_io_tree *io_tree,
- struct io_failure_record *rec);
-int clean_io_failure(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *failure_tree,
- struct extent_io_tree *io_tree, u64 start,
- struct page *page, u64 ino, unsigned int pg_offset);
+struct extent_state *btrfs_next_extent_state(struct extent_io_tree *tree,
+ struct extent_state *state);
#endif /* BTRFS_EXTENT_IO_TREE_H */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3fd736a02c1e..e4cae34620d1 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -16,8 +16,9 @@
#include <linux/percpu_counter.h>
#include <linux/lockdep.h>
#include <linux/crc32c.h>
-#include "misc.h"
-#include "tree-log.h"
+#include "ctree.h"
+#include "extent-tree.h"
+#include "transaction.h"
#include "disk-io.h"
#include "print-tree.h"
#include "volumes.h"
@@ -25,25 +26,28 @@
#include "locking.h"
#include "free-space-cache.h"
#include "free-space-tree.h"
-#include "sysfs.h"
#include "qgroup.h"
#include "ref-verify.h"
#include "space-info.h"
#include "block-rsv.h"
-#include "delalloc-space.h"
-#include "block-group.h"
#include "discard.h"
-#include "rcu-string.h"
#include "zoned.h"
#include "dev-replace.h"
+#include "fs.h"
+#include "accessors.h"
+#include "root-tree.h"
+#include "file-item.h"
+#include "orphan.h"
+#include "tree-checker.h"
+#include "raid-stripe-tree.h"
+#include "delayed-inode.h"
#undef SCRAMBLE_DELAYED_REFS
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *node, u64 parent,
- u64 root_objectid, u64 owner_objectid,
- u64 owner_offset, int refs_to_drop,
+ struct btrfs_delayed_ref_head *href,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extra_op);
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
@@ -51,56 +55,33 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
- struct btrfs_key *ins, int ref_mod);
+ struct btrfs_key *ins, int ref_mod, u64 oref_root);
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *node,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op);
-static int find_next_key(struct btrfs_path *path, int level,
+static int find_next_key(const struct btrfs_path *path, int level,
struct btrfs_key *key);
-static int block_group_bits(struct btrfs_block_group *cache, u64 bits)
+static int block_group_bits(const struct btrfs_block_group *cache, u64 bits)
{
return (cache->flags & bits) == bits;
}
-int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
- u64 start, u64 num_bytes)
-{
- u64 end = start + num_bytes - 1;
- set_extent_bits(&fs_info->excluded_extents, start, end,
- EXTENT_UPTODATE);
- return 0;
-}
-
-void btrfs_free_excluded_extents(struct btrfs_block_group *cache)
-{
- struct btrfs_fs_info *fs_info = cache->fs_info;
- u64 start, end;
-
- start = cache->start;
- end = start + cache->length - 1;
-
- clear_extent_bits(&fs_info->excluded_extents, start, end,
- EXTENT_UPTODATE);
-}
-
/* simple helper to search for an existing data extent at a given offset */
int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
{
- int ret;
+ struct btrfs_root *root = btrfs_extent_root(fs_info, start);
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = start;
- key.offset = len;
key.type = BTRFS_EXTENT_ITEM_KEY;
- ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
- btrfs_free_path(path);
- return ret;
+ key.offset = len;
+ return btrfs_search_slot(NULL, root, &key, path, 0, 0);
}
/*
@@ -114,17 +95,17 @@ int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
*/
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 offset, int metadata, u64 *refs, u64 *flags)
+ u64 offset, int metadata, u64 *refs, u64 *flags,
+ u64 *owning_root)
{
+ struct btrfs_root *extent_root;
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_path *path;
- struct btrfs_extent_item *ei;
- struct extent_buffer *leaf;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
- u32 item_size;
u64 num_refs;
u64 extent_flags;
+ u64 owner = 0;
int ret;
/*
@@ -140,24 +121,20 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- if (!trans) {
- path->skip_locking = 1;
- path->search_commit_root = 1;
- }
-
search_again:
key.objectid = bytenr;
- key.offset = offset;
if (metadata)
key.type = BTRFS_METADATA_ITEM_KEY;
else
key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = offset;
- ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
+ extent_root = btrfs_extent_root(fs_info, bytenr);
+ ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
- goto out_free;
+ return ret;
- if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
+ if (ret > 0 && key.type == BTRFS_METADATA_ITEM_KEY) {
if (path->slots[0]) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
@@ -170,37 +147,40 @@ search_again:
}
if (ret == 0) {
- leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
- if (item_size >= sizeof(*ei)) {
- ei = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_item);
- num_refs = btrfs_extent_refs(leaf, ei);
- extent_flags = btrfs_extent_flags(leaf, ei);
- } else {
- ret = -EINVAL;
- btrfs_print_v0_err(fs_info);
- if (trans)
- btrfs_abort_transaction(trans, ret);
- else
- btrfs_handle_fs_error(fs_info, ret, NULL);
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_extent_item *ei;
+ const u32 item_size = btrfs_item_size(leaf, path->slots[0]);
- goto out_free;
+ if (unlikely(item_size < sizeof(*ei))) {
+ ret = -EUCLEAN;
+ btrfs_err(fs_info,
+ "unexpected extent item size, has %u expect >= %zu",
+ item_size, sizeof(*ei));
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
- BUG_ON(num_refs == 0);
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ num_refs = btrfs_extent_refs(leaf, ei);
+ if (unlikely(num_refs == 0)) {
+ ret = -EUCLEAN;
+ btrfs_err(fs_info,
+ "unexpected zero reference count for extent item " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(&key));
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+ extent_flags = btrfs_extent_flags(leaf, ei);
+ owner = btrfs_get_extent_owner_root(fs_info, leaf, path->slots[0]);
} else {
num_refs = 0;
extent_flags = 0;
ret = 0;
}
- if (!trans)
- goto out;
-
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
+ head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
if (head) {
if (!mutex_trylock(&head->mutex)) {
refcount_inc(&head->refs);
@@ -220,22 +200,21 @@ search_again:
spin_lock(&head->lock);
if (head->extent_op && head->extent_op->update_flags)
extent_flags |= head->extent_op->flags_to_set;
- else
- BUG_ON(num_refs == 0);
num_refs += head->ref_mod;
spin_unlock(&head->lock);
mutex_unlock(&head->mutex);
}
spin_unlock(&delayed_refs->lock);
-out:
+
WARN_ON(num_refs == 0);
if (refs)
*refs = num_refs;
if (flags)
*flags = extent_flags;
-out_free:
- btrfs_free_path(path);
+ if (owning_root)
+ *owning_root = owner;
+
return ret;
}
@@ -347,16 +326,22 @@ out_free:
/*
* is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
- * is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
+ * is_data == BTRFS_REF_TYPE_DATA, data type is required,
* is_data == BTRFS_REF_TYPE_ANY, either type is OK.
*/
int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
- struct btrfs_extent_inline_ref *iref,
+ const struct btrfs_extent_inline_ref *iref,
enum btrfs_inline_ref_type is_data)
{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
int type = btrfs_extent_inline_ref_type(eb, iref);
u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
+ if (type == BTRFS_EXTENT_OWNER_REF_KEY) {
+ ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
+ return type;
+ }
+
if (type == BTRFS_TREE_BLOCK_REF_KEY ||
type == BTRFS_SHARED_BLOCK_REF_KEY ||
type == BTRFS_SHARED_DATA_REF_KEY ||
@@ -365,26 +350,25 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
if (type == BTRFS_TREE_BLOCK_REF_KEY)
return type;
if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
- ASSERT(eb->fs_info);
+ ASSERT(fs_info);
/*
* Every shared one has parent tree block,
* which must be aligned to sector size.
*/
- if (offset &&
- IS_ALIGNED(offset, eb->fs_info->sectorsize))
+ if (offset && IS_ALIGNED(offset, fs_info->sectorsize))
return type;
}
} else if (is_data == BTRFS_REF_TYPE_DATA) {
if (type == BTRFS_EXTENT_DATA_REF_KEY)
return type;
if (type == BTRFS_SHARED_DATA_REF_KEY) {
- ASSERT(eb->fs_info);
+ ASSERT(fs_info);
/*
* Every shared one has parent tree block,
* which must be aligned to sector size.
*/
if (offset &&
- IS_ALIGNED(offset, eb->fs_info->sectorsize))
+ IS_ALIGNED(offset, fs_info->sectorsize))
return type;
}
} else {
@@ -393,11 +377,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
}
}
- btrfs_print_leaf((struct extent_buffer *)eb);
- btrfs_err(eb->fs_info,
+ WARN_ON(1);
+ btrfs_print_leaf(eb);
+ btrfs_err(fs_info,
"eb %llu iref 0x%lx invalid extent inline ref type %d",
eb->start, (unsigned long)iref, type);
- WARN_ON(1);
return BTRFS_REF_TYPE_INVALID;
}
@@ -409,32 +393,32 @@ u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
__le64 lenum;
lenum = cpu_to_le64(root_objectid);
- high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
+ high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
lenum = cpu_to_le64(owner);
- low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
+ low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
lenum = cpu_to_le64(offset);
- low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
+ low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
return ((u64)high_crc << 31) ^ (u64)low_crc;
}
-static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
- struct btrfs_extent_data_ref *ref)
+static u64 hash_extent_data_ref_item(const struct extent_buffer *leaf,
+ const struct btrfs_extent_data_ref *ref)
{
return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
btrfs_extent_data_ref_objectid(leaf, ref),
btrfs_extent_data_ref_offset(leaf, ref));
}
-static int match_extent_data_ref(struct extent_buffer *leaf,
- struct btrfs_extent_data_ref *ref,
- u64 root_objectid, u64 owner, u64 offset)
+static bool match_extent_data_ref(const struct extent_buffer *leaf,
+ const struct btrfs_extent_data_ref *ref,
+ u64 root_objectid, u64 owner, u64 offset)
{
if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
btrfs_extent_data_ref_offset(leaf, ref) != offset)
- return 0;
- return 1;
+ return false;
+ return true;
}
static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
@@ -443,14 +427,13 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
u64 root_objectid,
u64 owner, u64 offset)
{
- struct btrfs_root *root = trans->fs_info->extent_root;
+ struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
struct btrfs_key key;
struct btrfs_extent_data_ref *ref;
struct extent_buffer *leaf;
u32 nritems;
- int ret;
int recow;
- int err = -ENOENT;
+ int ret;
key.objectid = bytenr;
if (parent) {
@@ -464,26 +447,26 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
again:
recow = 0;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0) {
- err = ret;
- goto fail;
- }
+ if (ret < 0)
+ return ret;
if (parent) {
- if (!ret)
- return 0;
- goto fail;
+ if (ret)
+ return -ENOENT;
+ return 0;
}
+ ret = -ENOENT;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
while (1) {
if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- err = ret;
- if (ret)
- goto fail;
+ if (ret) {
+ if (ret > 0)
+ return -ENOENT;
+ return ret;
+ }
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
@@ -504,37 +487,37 @@ again:
btrfs_release_path(path);
goto again;
}
- err = 0;
+ ret = 0;
break;
}
path->slots[0]++;
}
fail:
- return err;
+ return ret;
}
static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
- u64 bytenr, u64 parent,
- u64 root_objectid, u64 owner,
- u64 offset, int refs_to_add)
+ const struct btrfs_delayed_ref_node *node,
+ u64 bytenr)
{
- struct btrfs_root *root = trans->fs_info->extent_root;
+ struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
struct btrfs_key key;
struct extent_buffer *leaf;
+ u64 owner = btrfs_delayed_ref_owner(node);
+ u64 offset = btrfs_delayed_ref_offset(node);
u32 size;
u32 num_refs;
int ret;
key.objectid = bytenr;
- if (parent) {
+ if (node->parent) {
key.type = BTRFS_SHARED_DATA_REF_KEY;
- key.offset = parent;
+ key.offset = node->parent;
size = sizeof(struct btrfs_shared_data_ref);
} else {
key.type = BTRFS_EXTENT_DATA_REF_KEY;
- key.offset = hash_extent_data_ref(root_objectid,
- owner, offset);
+ key.offset = hash_extent_data_ref(node->ref_root, owner, offset);
size = sizeof(struct btrfs_extent_data_ref);
}
@@ -543,15 +526,15 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
goto fail;
leaf = path->nodes[0];
- if (parent) {
+ if (node->parent) {
struct btrfs_shared_data_ref *ref;
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
if (ret == 0) {
- btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
+ btrfs_set_shared_data_ref_count(leaf, ref, node->ref_mod);
} else {
num_refs = btrfs_shared_data_ref_count(leaf, ref);
- num_refs += refs_to_add;
+ num_refs += node->ref_mod;
btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
}
} else {
@@ -559,7 +542,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
while (ret == -EEXIST) {
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
- if (match_extent_data_ref(leaf, ref, root_objectid,
+ if (match_extent_data_ref(leaf, ref, node->ref_root,
owner, offset))
break;
btrfs_release_path(path);
@@ -574,18 +557,16 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
if (ret == 0) {
- btrfs_set_extent_data_ref_root(leaf, ref,
- root_objectid);
+ btrfs_set_extent_data_ref_root(leaf, ref, node->ref_root);
btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
btrfs_set_extent_data_ref_offset(leaf, ref, offset);
- btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
+ btrfs_set_extent_data_ref_count(leaf, ref, node->ref_mod);
} else {
num_refs = btrfs_extent_data_ref_count(leaf, ref);
- num_refs += refs_to_add;
+ num_refs += node->ref_mod;
btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
}
}
- btrfs_mark_buffer_dirty(leaf);
ret = 0;
fail:
btrfs_release_path(path);
@@ -593,8 +574,9 @@ fail:
}
static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct btrfs_path *path,
- int refs_to_drop, int *last_ref)
+ int refs_to_drop)
{
struct btrfs_key key;
struct btrfs_extent_data_ref *ref1 = NULL;
@@ -614,44 +596,41 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
ref2 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
- } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
- btrfs_print_v0_err(trans->fs_info);
- btrfs_abort_transaction(trans, -EINVAL);
- return -EINVAL;
} else {
- BUG();
+ btrfs_err(trans->fs_info,
+ "unrecognized backref key " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(&key));
+ btrfs_abort_transaction(trans, -EUCLEAN);
+ return -EUCLEAN;
}
BUG_ON(num_refs < refs_to_drop);
num_refs -= refs_to_drop;
if (num_refs == 0) {
- ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
- *last_ref = 1;
+ ret = btrfs_del_item(trans, root, path);
} else {
if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
- btrfs_mark_buffer_dirty(leaf);
}
return ret;
}
-static noinline u32 extent_data_ref_count(struct btrfs_path *path,
- struct btrfs_extent_inline_ref *iref)
+static noinline u32 extent_data_ref_count(const struct btrfs_path *path,
+ const struct btrfs_extent_inline_ref *iref)
{
struct btrfs_key key;
struct extent_buffer *leaf;
- struct btrfs_extent_data_ref *ref1;
- struct btrfs_shared_data_ref *ref2;
+ const struct btrfs_extent_data_ref *ref1;
+ const struct btrfs_shared_data_ref *ref2;
u32 num_refs = 0;
int type;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
if (iref) {
/*
* If type is invalid, we should have bailed out earlier than
@@ -660,10 +639,10 @@ static noinline u32 extent_data_ref_count(struct btrfs_path *path,
type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
ASSERT(type != BTRFS_REF_TYPE_INVALID);
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
- ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
+ ref1 = (const struct btrfs_extent_data_ref *)(&iref->offset);
num_refs = btrfs_extent_data_ref_count(leaf, ref1);
} else {
- ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
+ ref2 = (const struct btrfs_shared_data_ref *)(iref + 1);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
}
} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
@@ -685,7 +664,7 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 parent,
u64 root_objectid)
{
- struct btrfs_root *root = trans->fs_info->extent_root;
+ struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
struct btrfs_key key;
int ret;
@@ -706,23 +685,23 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
- u64 bytenr, u64 parent,
- u64 root_objectid)
+ const struct btrfs_delayed_ref_node *node,
+ u64 bytenr)
{
+ struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
struct btrfs_key key;
int ret;
key.objectid = bytenr;
- if (parent) {
+ if (node->parent) {
key.type = BTRFS_SHARED_BLOCK_REF_KEY;
- key.offset = parent;
+ key.offset = node->parent;
} else {
key.type = BTRFS_TREE_BLOCK_REF_KEY;
- key.offset = root_objectid;
+ key.offset = node->ref_root;
}
- ret = btrfs_insert_empty_item(trans, trans->fs_info->extent_root,
- path, &key, 0);
+ ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
btrfs_release_path(path);
return ret;
}
@@ -744,7 +723,7 @@ static inline int extent_ref_type(u64 parent, u64 owner)
return type;
}
-static int find_next_key(struct btrfs_path *path, int level,
+static int find_next_key(const struct btrfs_path *path, int level,
struct btrfs_key *key)
{
@@ -787,7 +766,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
u64 owner, u64 offset, int insert)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr);
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
@@ -800,7 +779,6 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
int type;
int want;
int ret;
- int err = 0;
bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
int needed;
@@ -811,8 +789,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
want = extent_ref_type(parent, owner);
if (insert) {
extra_size = btrfs_extent_inline_ref_size(want);
- path->search_for_extension = 1;
- path->keep_locks = 1;
+ path->search_for_extension = true;
} else
extra_size = -1;
@@ -827,10 +804,8 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
again:
ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out;
- }
/*
* We may be a newly converted file system which still has the old fat
@@ -857,19 +832,26 @@ again:
}
if (ret && !insert) {
- err = -ENOENT;
+ ret = -ENOENT;
goto out;
} else if (WARN_ON(ret)) {
- err = -EIO;
+ btrfs_print_leaf(path->nodes[0]);
+ btrfs_err(fs_info,
+"extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu",
+ bytenr, num_bytes, parent, root_objectid, owner,
+ offset);
+ ret = -EUCLEAN;
goto out;
}
leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ item_size = btrfs_item_size(leaf, path->slots[0]);
if (unlikely(item_size < sizeof(*ei))) {
- err = -EINVAL;
- btrfs_print_v0_err(fs_info);
- btrfs_abort_transaction(trans, err);
+ ret = -EUCLEAN;
+ btrfs_err(fs_info,
+ "unexpected extent item size, has %llu expect >= %zu",
+ item_size, sizeof(*ei));
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -889,16 +871,17 @@ again:
else
needed = BTRFS_REF_TYPE_BLOCK;
- err = -ENOENT;
- while (1) {
- if (ptr >= end) {
- WARN_ON(ptr > end);
- break;
- }
+ ret = -ENOENT;
+ while (ptr < end) {
iref = (struct btrfs_extent_inline_ref *)ptr;
type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
- if (type == BTRFS_REF_TYPE_INVALID) {
- err = -EUCLEAN;
+ if (type == BTRFS_EXTENT_OWNER_REF_KEY) {
+ ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
+ ptr += btrfs_extent_inline_ref_size(type);
+ continue;
+ }
+ if (unlikely(type == BTRFS_REF_TYPE_INVALID)) {
+ ret = -EUCLEAN;
goto out;
}
@@ -914,7 +897,7 @@ again:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
if (match_extent_data_ref(leaf, dref, root_objectid,
owner, offset)) {
- err = 0;
+ ret = 0;
break;
}
if (hash_extent_data_ref_item(leaf, dref) <
@@ -925,14 +908,14 @@ again:
ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
if (parent > 0) {
if (parent == ref_offset) {
- err = 0;
+ ret = 0;
break;
}
if (ref_offset < parent)
break;
} else {
if (root_objectid == ref_offset) {
- err = 0;
+ ret = 0;
break;
}
if (ref_offset < root_objectid)
@@ -941,12 +924,41 @@ again:
}
ptr += btrfs_extent_inline_ref_size(type);
}
- if (err == -ENOENT && insert) {
+
+ if (unlikely(ptr > end)) {
+ ret = -EUCLEAN;
+ btrfs_print_leaf(path->nodes[0]);
+ btrfs_crit(fs_info,
+"overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu",
+ path->slots[0], root_objectid, owner, offset, parent);
+ goto out;
+ }
+
+ if (ret == -ENOENT && insert) {
if (item_size + extra_size >=
BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
- err = -EAGAIN;
+ ret = -EAGAIN;
goto out;
}
+
+ if (path->slots[0] + 1 < btrfs_header_nritems(path->nodes[0])) {
+ struct btrfs_key tmp_key;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &tmp_key, path->slots[0] + 1);
+ if (tmp_key.objectid == bytenr &&
+ tmp_key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ goto out_no_entry;
+ }
+
+ if (!path->keep_locks) {
+ btrfs_release_path(path);
+ path->keep_locks = true;
+ goto again;
+ }
+
/*
* To add new inline back ref, we have to make sure
* there is no corresponding back ref item.
@@ -956,25 +968,27 @@ again:
if (find_next_key(path, 0, &key) == 0 &&
key.objectid == bytenr &&
key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
- err = -EAGAIN;
+ ret = -EAGAIN;
goto out;
}
}
+out_no_entry:
*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
out:
- if (insert) {
- path->keep_locks = 0;
- path->search_for_extension = 0;
+ if (path->keep_locks) {
+ path->keep_locks = false;
btrfs_unlock_up_safe(path, 1);
}
- return err;
+ if (insert)
+ path->search_for_extension = false;
+ return ret;
}
/*
* helper to add new inline back ref
*/
static noinline_for_stack
-void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
+void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
u64 parent, u64 root_objectid,
@@ -997,7 +1011,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
type = extent_ref_type(parent, owner);
size = btrfs_extent_inline_ref_size(type);
- btrfs_extend_item(path, size);
+ btrfs_extend_item(trans, path, size);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
@@ -1007,7 +1021,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
__run_delayed_extent_op(extent_op, leaf, ei);
ptr = (unsigned long)ei + item_offset;
- end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
+ end = (unsigned long)ei + btrfs_item_size(leaf, path->slots[0]);
if (ptr < end - size)
memmove_extent_buffer(leaf, ptr + size, ptr,
end - size - ptr);
@@ -1031,7 +1045,6 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
} else {
btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
}
- btrfs_mark_buffer_dirty(leaf);
}
static int lookup_extent_backref(struct btrfs_trans_handle *trans,
@@ -1064,14 +1077,15 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
/*
* helper to update/remove inline back ref
*/
-static noinline_for_stack
-void update_inline_extent_backref(struct btrfs_path *path,
+static noinline_for_stack int update_inline_extent_backref(
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_mod,
- struct btrfs_delayed_extent_op *extent_op,
- int *last_ref)
+ struct btrfs_delayed_extent_op *extent_op)
{
struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_extent_item *ei;
struct btrfs_extent_data_ref *dref = NULL;
struct btrfs_shared_data_ref *sref = NULL;
@@ -1084,18 +1098,33 @@ void update_inline_extent_backref(struct btrfs_path *path,
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
- WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
+ if (unlikely(refs_to_mod < 0 && refs + refs_to_mod <= 0)) {
+ struct btrfs_key key;
+ u32 extent_size;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.type == BTRFS_METADATA_ITEM_KEY)
+ extent_size = fs_info->nodesize;
+ else
+ extent_size = key.offset;
+ btrfs_print_leaf(leaf);
+ btrfs_err(fs_info,
+ "invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu",
+ key.objectid, extent_size, refs_to_mod, refs);
+ return -EUCLEAN;
+ }
refs += refs_to_mod;
btrfs_set_extent_refs(leaf, ei, refs);
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, ei);
+ type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
/*
- * If type is invalid, we should have bailed out after
- * lookup_inline_extent_backref().
+ * Function btrfs_get_extent_inline_ref_type() has already printed
+ * error messages.
*/
- type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
- ASSERT(type != BTRFS_REF_TYPE_INVALID);
+ if (unlikely(type == BTRFS_REF_TYPE_INVALID))
+ return -EUCLEAN;
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
@@ -1105,10 +1134,43 @@ void update_inline_extent_backref(struct btrfs_path *path,
refs = btrfs_shared_data_ref_count(leaf, sref);
} else {
refs = 1;
- BUG_ON(refs_to_mod != -1);
+ /*
+ * For tree blocks we can only drop one ref for it, and tree
+ * blocks should not have refs > 1.
+ *
+ * Furthermore if we're inserting a new inline backref, we
+ * won't reach this path either. That would be
+ * setup_inline_extent_backref().
+ */
+ if (unlikely(refs_to_mod != -1)) {
+ struct btrfs_key key;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+
+ btrfs_print_leaf(leaf);
+ btrfs_err(fs_info,
+ "invalid refs_to_mod for tree block %llu, has %d expect -1",
+ key.objectid, refs_to_mod);
+ return -EUCLEAN;
+ }
}
- BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
+ if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) {
+ struct btrfs_key key;
+ u32 extent_size;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.type == BTRFS_METADATA_ITEM_KEY)
+ extent_size = fs_info->nodesize;
+ else
+ extent_size = key.offset;
+ btrfs_print_leaf(leaf);
+ btrfs_err(fs_info,
+"invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu",
+ (unsigned long)iref, key.objectid, extent_size,
+ refs_to_mod, refs);
+ return -EUCLEAN;
+ }
refs += refs_to_mod;
if (refs > 0) {
@@ -1117,18 +1179,17 @@ void update_inline_extent_backref(struct btrfs_path *path,
else
btrfs_set_shared_data_ref_count(leaf, sref, refs);
} else {
- *last_ref = 1;
size = btrfs_extent_inline_ref_size(type);
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ item_size = btrfs_item_size(leaf, path->slots[0]);
ptr = (unsigned long)iref;
end = (unsigned long)ei + item_size;
if (ptr + size < end)
memmove_extent_buffer(leaf, ptr, ptr + size,
end - ptr - size);
item_size -= size;
- btrfs_truncate_item(path, item_size, 1);
+ btrfs_truncate_item(trans, path, item_size, 1);
}
- btrfs_mark_buffer_dirty(leaf);
+ return 0;
}
static noinline_for_stack
@@ -1150,22 +1211,17 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
* We're adding refs to a tree block we already own, this
* should not happen at all.
*/
- if (owner < BTRFS_FIRST_FREE_OBJECTID) {
+ if (unlikely(owner < BTRFS_FIRST_FREE_OBJECTID)) {
+ btrfs_print_leaf(path->nodes[0]);
btrfs_crit(trans->fs_info,
-"adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu",
- bytenr, num_bytes, root_objectid);
- if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) {
- WARN_ON(1);
- btrfs_crit(trans->fs_info,
- "path->slots[0]=%d path->nodes[0]:", path->slots[0]);
- btrfs_print_leaf(path->nodes[0]);
- }
+"adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu slot %u",
+ bytenr, num_bytes, root_objectid, path->slots[0]);
return -EUCLEAN;
}
- update_inline_extent_backref(path, iref, refs_to_add,
- extent_op, NULL);
+ ret = update_inline_extent_backref(trans, path, iref,
+ refs_to_add, extent_op);
} else if (ret == -ENOENT) {
- setup_inline_extent_backref(trans->fs_info, path, iref, parent,
+ setup_inline_extent_backref(trans, path, iref, parent,
root_objectid, owner, offset,
refs_to_add, extent_op);
ret = 0;
@@ -1174,23 +1230,21 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
}
static int remove_extent_backref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
- int refs_to_drop, int is_data, int *last_ref)
+ int refs_to_drop, int is_data)
{
int ret = 0;
BUG_ON(!is_data && refs_to_drop != 1);
- if (iref) {
- update_inline_extent_backref(path, iref, -refs_to_drop, NULL,
- last_ref);
- } else if (is_data) {
- ret = remove_extent_data_ref(trans, path, refs_to_drop,
- last_ref);
- } else {
- *last_ref = 1;
- ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
- }
+ if (iref)
+ ret = update_inline_extent_backref(trans, path, iref,
+ -refs_to_drop, NULL);
+ else if (is_data)
+ ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
+ else
+ ret = btrfs_del_item(trans, root, path);
return ret;
}
@@ -1199,11 +1253,12 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
{
int j, ret = 0;
u64 bytes_left, end;
- u64 aligned_start = ALIGN(start, 1 << 9);
+ u64 aligned_start = ALIGN(start, SECTOR_SIZE);
- if (WARN_ON(start != aligned_start)) {
+ /* Adjust the range to be aligned to 512B sectors if necessary. */
+ if (start != aligned_start) {
len -= aligned_start - start;
- len = round_down(len, 1 << 9);
+ len = round_down(len, SECTOR_SIZE);
start = aligned_start;
}
@@ -1241,8 +1296,9 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
}
if (size) {
- ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
- GFP_NOFS, 0);
+ ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
+ size >> SECTOR_SHIFT,
+ GFP_NOFS);
if (!ret)
*discarded_bytes += size;
else if (ret != -EOPNOTSUPP)
@@ -1257,16 +1313,33 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
bytes_left = end - start;
}
- if (bytes_left) {
- ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
- GFP_NOFS, 0);
- if (!ret)
- *discarded_bytes += bytes_left;
+ while (bytes_left) {
+ u64 bytes_to_discard = min(BTRFS_MAX_DISCARD_CHUNK_SIZE, bytes_left);
+
+ ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
+ bytes_to_discard >> SECTOR_SHIFT,
+ GFP_NOFS);
+
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ break;
+ continue;
+ }
+
+ start += bytes_to_discard;
+ bytes_left -= bytes_to_discard;
+ *discarded_bytes += bytes_to_discard;
+
+ if (btrfs_trim_interrupted()) {
+ ret = -ERESTARTSYS;
+ break;
+ }
}
+
return ret;
}
-static int do_discard_extent(struct btrfs_io_stripe *stripe, u64 *bytes)
+static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes)
{
struct btrfs_device *dev = stripe->dev;
struct btrfs_fs_info *fs_info = dev->fs_info;
@@ -1294,7 +1367,7 @@ static int do_discard_extent(struct btrfs_io_stripe *stripe, u64 *bytes)
ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len,
&discarded);
discarded += src_disc;
- } else if (blk_queue_discard(bdev_get_queue(stripe->dev->bdev))) {
+ } else if (bdev_max_discard_sectors(stripe->dev->bdev)) {
ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded);
} else {
ret = 0;
@@ -1313,76 +1386,60 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 discarded_bytes = 0;
u64 end = bytenr + num_bytes;
u64 cur = bytenr;
- struct btrfs_io_context *bioc = NULL;
/*
- * Avoid races with device replace and make sure our bioc has devices
- * associated to its stripes that don't go away while we are discarding.
+ * Avoid races with device replace and make sure the devices in the
+ * stripes don't go away while we are discarding.
*/
btrfs_bio_counter_inc_blocked(fs_info);
while (cur < end) {
- struct btrfs_io_stripe *stripe;
+ struct btrfs_discard_stripe *stripes;
+ unsigned int num_stripes;
int i;
num_bytes = end - cur;
- /* Tell the block device(s) that the sectors can be discarded */
- ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur,
- &num_bytes, &bioc, 0);
- /*
- * Error can be -ENOMEM, -ENOENT (no such chunk mapping) or
- * -EOPNOTSUPP. For any such error, @num_bytes is not updated,
- * thus we can't continue anyway.
- */
- if (ret < 0)
- goto out;
+ stripes = btrfs_map_discard(fs_info, cur, &num_bytes, &num_stripes);
+ if (IS_ERR(stripes)) {
+ ret = PTR_ERR(stripes);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ break;
+ }
- stripe = bioc->stripes;
- for (i = 0; i < bioc->num_stripes; i++, stripe++) {
+ for (i = 0; i < num_stripes; i++) {
+ struct btrfs_discard_stripe *stripe = stripes + i;
u64 bytes;
- struct btrfs_device *device = stripe->dev;
- if (!device->bdev) {
+ if (!stripe->dev->bdev) {
ASSERT(btrfs_test_opt(fs_info, DEGRADED));
continue;
}
- if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
+ &stripe->dev->dev_state))
continue;
ret = do_discard_extent(stripe, &bytes);
- if (!ret) {
- discarded_bytes += bytes;
- } else if (ret != -EOPNOTSUPP) {
+ if (ret) {
/*
- * Logic errors or -ENOMEM, or -EIO, but
- * unlikely to happen.
- *
- * And since there are two loops, explicitly
- * go to out to avoid confusion.
+ * Keep going if discard is not supported by the
+ * device.
*/
- btrfs_put_bioc(bioc);
- goto out;
+ if (ret != -EOPNOTSUPP)
+ break;
+ ret = 0;
+ } else {
+ discarded_bytes += bytes;
}
-
- /*
- * Just in case we get back EOPNOTSUPP for some reason,
- * just ignore the return value so we don't screw up
- * people calling discard_extent.
- */
- ret = 0;
}
- btrfs_put_bioc(bioc);
+ kfree(stripes);
+ if (ret)
+ break;
cur += num_bytes;
}
-out:
btrfs_bio_counter_dec(fs_info);
-
if (actual_bytes)
*actual_bytes = discarded_bytes;
-
-
- if (ret == -EOPNOTSUPP)
- ret = 0;
return ret;
}
@@ -1396,7 +1453,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
generic_ref->action);
BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
- generic_ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID);
+ generic_ref->ref_root == BTRFS_TREE_LOG_OBJECTID);
if (generic_ref->type == BTRFS_REF_METADATA)
ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
@@ -1409,7 +1466,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
}
/*
- * __btrfs_inc_extent_ref - insert backreference for a given extent
+ * Insert backreference for a given extent.
*
* The counterpart is in __btrfs_free_extent(), with examples and more details
* how it works.
@@ -1419,45 +1476,24 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
* @node: The delayed ref node used to get the bytenr/length for
* extent whose references are incremented.
*
- * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/
- * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical
- * bytenr of the parent block. Since new extents are always
- * created with indirect references, this will only be the case
- * when relocating a shared extent. In that case, root_objectid
- * will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must
- * be 0
- *
- * @root_objectid: The id of the root where this modification has originated,
- * this can be either one of the well-known metadata trees or
- * the subvolume id which references this extent.
- *
- * @owner: For data extents it is the inode number of the owning file.
- * For metadata extents this parameter holds the level in the
- * tree of the extent.
- *
- * @offset: For metadata extents the offset is ignored and is currently
- * always passed as 0. For data extents it is the fileoffset
- * this extent belongs to.
- *
- * @refs_to_add Number of references to add
- *
* @extent_op Pointer to a structure, holding information necessary when
* updating a tree block's flags
*
*/
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *node,
- u64 parent, u64 root_objectid,
- u64 owner, u64 offset, int refs_to_add,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_extent_item *item;
struct btrfs_key key;
u64 bytenr = node->bytenr;
u64 num_bytes = node->num_bytes;
+ u64 owner = btrfs_delayed_ref_owner(node);
+ u64 offset = btrfs_delayed_ref_offset(node);
u64 refs;
+ int refs_to_add = node->ref_mod;
int ret;
path = btrfs_alloc_path();
@@ -1466,10 +1502,10 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
/* this will setup the path even if it fails to insert the back ref */
ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
- parent, root_objectid, owner,
+ node->parent, node->ref_root, owner,
offset, refs_to_add, extent_op);
if ((ret < 0 && ret != -EAGAIN) || !ret)
- goto out;
+ return ret;
/*
* Ok we had -EAGAIN which means we didn't have space to insert and
@@ -1484,65 +1520,84 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, item);
- btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
/* now insert the actual backref */
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- BUG_ON(refs_to_add != 1);
- ret = insert_tree_block_ref(trans, path, bytenr, parent,
- root_objectid);
+ ret = insert_tree_block_ref(trans, path, node, bytenr);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
} else {
- ret = insert_extent_data_ref(trans, path, bytenr, parent,
- root_objectid, owner, offset,
- refs_to_add);
+ ret = insert_extent_data_ref(trans, path, node, bytenr);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
}
- if (ret)
- btrfs_abort_transaction(trans, ret);
-out:
- btrfs_free_path(path);
+
return ret;
}
+static void free_head_ref_squota_rsv(struct btrfs_fs_info *fs_info,
+ const struct btrfs_delayed_ref_head *href)
+{
+ u64 root = href->owning_root;
+
+ /*
+ * Don't check must_insert_reserved, as this is called from contexts
+ * where it has already been unset.
+ */
+ if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE ||
+ !href->is_data || !btrfs_is_fstree(root))
+ return;
+
+ btrfs_qgroup_free_refroot(fs_info, root, href->reserved_bytes,
+ BTRFS_QGROUP_RSV_DATA);
+}
+
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_ref_head *href,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
- int insert_reserved)
+ bool insert_reserved)
{
int ret = 0;
- struct btrfs_delayed_data_ref *ref;
- struct btrfs_key ins;
u64 parent = 0;
- u64 ref_root = 0;
u64 flags = 0;
- ins.objectid = node->bytenr;
- ins.offset = node->num_bytes;
- ins.type = BTRFS_EXTENT_ITEM_KEY;
-
- ref = btrfs_delayed_node_to_data_ref(node);
- trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action);
+ trace_run_delayed_data_ref(trans->fs_info, node);
if (node->type == BTRFS_SHARED_DATA_REF_KEY)
- parent = ref->parent;
- ref_root = ref->root;
+ parent = node->parent;
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
+ struct btrfs_key key;
+ struct btrfs_squota_delta delta = {
+ .root = href->owning_root,
+ .num_bytes = node->num_bytes,
+ .is_data = true,
+ .is_inc = true,
+ .generation = trans->transid,
+ };
+ u64 owner = btrfs_delayed_ref_owner(node);
+ u64 offset = btrfs_delayed_ref_offset(node);
+
if (extent_op)
flags |= extent_op->flags_to_set;
- ret = alloc_reserved_file_extent(trans, parent, ref_root,
- flags, ref->objectid,
- ref->offset, &ins,
- node->ref_mod);
+
+ key.objectid = node->bytenr;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = node->num_bytes;
+
+ ret = alloc_reserved_file_extent(trans, parent, node->ref_root,
+ flags, owner, offset, &key,
+ node->ref_mod,
+ href->owning_root);
+ free_head_ref_squota_rsv(trans->fs_info, href);
+ if (!ret)
+ ret = btrfs_record_squota_delta(trans->fs_info, &delta);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
- ref->objectid, ref->offset,
- node->ref_mod, extent_op);
+ ret = __btrfs_inc_extent_ref(trans, node, extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, node, parent,
- ref_root, ref->objectid,
- ref->offset, node->ref_mod,
- extent_op);
+ ret = __btrfs_free_extent(trans, href, node, extent_op);
} else {
BUG();
}
@@ -1568,23 +1623,23 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
}
static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *head,
+ const struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *root;
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_extent_item *ei;
struct extent_buffer *leaf;
u32 item_size;
int ret;
- int err = 0;
- int metadata = !extent_op->is_data;
+ int metadata = 1;
if (TRANS_ABORTED(trans))
return 0;
- if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
+ if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
metadata = 0;
path = btrfs_alloc_path();
@@ -1595,19 +1650,18 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
if (metadata) {
key.type = BTRFS_METADATA_ITEM_KEY;
- key.offset = extent_op->level;
+ key.offset = head->level;
} else {
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = head->num_bytes;
}
+ root = btrfs_extent_root(fs_info, key.objectid);
again:
- ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
+ ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0) {
- err = ret;
- goto out;
- }
- if (ret > 0) {
+ return ret;
+ } else if (ret > 0) {
if (metadata) {
if (path->slots[0] > 0) {
path->slots[0]--;
@@ -1623,68 +1677,77 @@ again:
metadata = 0;
key.objectid = head->bytenr;
- key.offset = head->num_bytes;
key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = head->num_bytes;
goto again;
}
} else {
- err = -EIO;
- goto out;
+ ret = -EUCLEAN;
+ btrfs_err(fs_info,
+ "missing extent item for extent %llu num_bytes %llu level %d",
+ head->bytenr, head->num_bytes, head->level);
+ return ret;
}
}
leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ item_size = btrfs_item_size(leaf, path->slots[0]);
if (unlikely(item_size < sizeof(*ei))) {
- err = -EINVAL;
- btrfs_print_v0_err(fs_info);
- btrfs_abort_transaction(trans, err);
- goto out;
+ ret = -EUCLEAN;
+ btrfs_err(fs_info,
+ "unexpected extent item size, has %u expect >= %zu",
+ item_size, sizeof(*ei));
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
__run_delayed_extent_op(extent_op, leaf, ei);
- btrfs_mark_buffer_dirty(leaf);
-out:
- btrfs_free_path(path);
- return err;
+ return ret;
}
static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_ref_head *href,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
- int insert_reserved)
+ bool insert_reserved)
{
int ret = 0;
- struct btrfs_delayed_tree_ref *ref;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
u64 parent = 0;
u64 ref_root = 0;
- ref = btrfs_delayed_node_to_tree_ref(node);
- trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action);
+ trace_run_delayed_tree_ref(trans->fs_info, node);
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
- parent = ref->parent;
- ref_root = ref->root;
+ parent = node->parent;
+ ref_root = node->ref_root;
- if (node->ref_mod != 1) {
+ if (unlikely(node->ref_mod != 1)) {
btrfs_err(trans->fs_info,
- "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
+ "btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu",
node->bytenr, node->ref_mod, node->action, ref_root,
parent);
- return -EIO;
+ return -EUCLEAN;
}
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
- BUG_ON(!extent_op || !extent_op->update_flags);
+ struct btrfs_squota_delta delta = {
+ .root = href->owning_root,
+ .num_bytes = fs_info->nodesize,
+ .is_data = false,
+ .is_inc = true,
+ .generation = trans->transid,
+ };
+
ret = alloc_reserved_tree_block(trans, node, extent_op);
+ if (!ret)
+ btrfs_record_squota_delta(fs_info, &delta);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
- ref->level, 0, 1, extent_op);
+ ret = __btrfs_inc_extent_ref(trans, node, extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, node, parent, ref_root,
- ref->level, 0, 1, extent_op);
+ ret = __btrfs_free_extent(trans, href, node, extent_op);
} else {
BUG();
}
@@ -1693,67 +1756,43 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
/* helper function to actually process a single delayed ref entry */
static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_ref_head *href,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
- int insert_reserved)
+ bool insert_reserved)
{
int ret = 0;
if (TRANS_ABORTED(trans)) {
- if (insert_reserved)
- btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
+ if (insert_reserved) {
+ btrfs_pin_extent(trans, node->bytenr, node->num_bytes);
+ free_head_ref_squota_rsv(trans->fs_info, href);
+ }
return 0;
}
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
node->type == BTRFS_SHARED_BLOCK_REF_KEY)
- ret = run_delayed_tree_ref(trans, node, extent_op,
+ ret = run_delayed_tree_ref(trans, href, node, extent_op,
insert_reserved);
else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
node->type == BTRFS_SHARED_DATA_REF_KEY)
- ret = run_delayed_data_ref(trans, node, extent_op,
+ ret = run_delayed_data_ref(trans, href, node, extent_op,
insert_reserved);
+ else if (node->type == BTRFS_EXTENT_OWNER_REF_KEY)
+ ret = 0;
else
BUG();
if (ret && insert_reserved)
- btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
+ btrfs_pin_extent(trans, node->bytenr, node->num_bytes);
+ if (ret < 0)
+ btrfs_err(trans->fs_info,
+"failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d",
+ node->bytenr, node->num_bytes, node->type,
+ node->action, node->ref_mod, ret);
return ret;
}
-static inline struct btrfs_delayed_ref_node *
-select_delayed_ref(struct btrfs_delayed_ref_head *head)
-{
- struct btrfs_delayed_ref_node *ref;
-
- if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
- return NULL;
-
- /*
- * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
- * This is to prevent a ref count from going down to zero, which deletes
- * the extent item from the extent tree, when there still are references
- * to add, which would fail because they would not find the extent item.
- */
- if (!list_empty(&head->ref_add_list))
- return list_first_entry(&head->ref_add_list,
- struct btrfs_delayed_ref_node, add_list);
-
- ref = rb_entry(rb_first_cached(&head->ref_tree),
- struct btrfs_delayed_ref_node, ref_node);
- ASSERT(list_empty(&ref->add_list));
- return ref;
-}
-
-static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_delayed_ref_head *head)
-{
- spin_lock(&delayed_refs->lock);
- head->processing = 0;
- delayed_refs->num_heads_ready++;
- spin_unlock(&delayed_refs->lock);
- btrfs_delayed_ref_unlock(head);
-}
-
static struct btrfs_delayed_extent_op *cleanup_extent_op(
struct btrfs_delayed_ref_head *head)
{
@@ -1786,28 +1825,38 @@ static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
return ret ? ret : 1;
}
-void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
+u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head)
{
- int nr_items = 1; /* Dropping this ref head update. */
+ u64 ret = 0;
/*
* We had csum deletions accounted for in our delayed refs rsv, we need
* to drop the csum leaves for this update from our delayed_refs_rsv.
*/
if (head->total_ref_mod < 0 && head->is_data) {
+ int nr_csums;
+
spin_lock(&delayed_refs->lock);
delayed_refs->pending_csums -= head->num_bytes;
spin_unlock(&delayed_refs->lock);
- nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
+ nr_csums = btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
+
+ btrfs_delayed_refs_rsv_release(fs_info, 0, nr_csums);
+
+ ret = btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
}
+ /* must_insert_reserved can be set only if we didn't run the head ref. */
+ if (head->must_insert_reserved)
+ free_head_ref_squota_rsv(fs_info, head);
- btrfs_delayed_refs_rsv_release(fs_info, nr_items);
+ return ret;
}
static int cleanup_ref_head(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *head)
+ struct btrfs_delayed_ref_head *head,
+ u64 *bytes_released)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -1818,7 +1867,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
ret = run_and_cleanup_extent_op(trans, head);
if (ret < 0) {
- unselect_delayed_ref_head(delayed_refs, head);
+ btrfs_unselect_ref_head(delayed_refs, head);
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
return ret;
} else if (ret) {
@@ -1837,19 +1886,22 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
spin_unlock(&delayed_refs->lock);
return 1;
}
- btrfs_delete_ref_head(delayed_refs, head);
+ btrfs_delete_ref_head(fs_info, delayed_refs, head);
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
if (head->must_insert_reserved) {
- btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1);
+ btrfs_pin_extent(trans, head->bytenr, head->num_bytes);
if (head->is_data) {
- ret = btrfs_del_csums(trans, fs_info->csum_root,
- head->bytenr, head->num_bytes);
+ struct btrfs_root *csum_root;
+
+ csum_root = btrfs_csum_root(fs_info, head->bytenr);
+ ret = btrfs_del_csums(trans, csum_root, head->bytenr,
+ head->num_bytes);
}
}
- btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
+ *bytes_released += btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
trace_run_delayed_ref_head(fs_info, head, 0);
btrfs_delayed_ref_unlock(head);
@@ -1857,48 +1909,15 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
return ret;
}
-static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
- struct btrfs_trans_handle *trans)
-{
- struct btrfs_delayed_ref_root *delayed_refs =
- &trans->transaction->delayed_refs;
- struct btrfs_delayed_ref_head *head = NULL;
- int ret;
-
- spin_lock(&delayed_refs->lock);
- head = btrfs_select_ref_head(delayed_refs);
- if (!head) {
- spin_unlock(&delayed_refs->lock);
- return head;
- }
-
- /*
- * Grab the lock that says we are going to process all the refs for
- * this head
- */
- ret = btrfs_delayed_ref_lock(delayed_refs, head);
- spin_unlock(&delayed_refs->lock);
-
- /*
- * We may have dropped the spin lock to get the head mutex lock, and
- * that might have given someone else time to free the head. If that's
- * true, it has been removed from our list and we can move on.
- */
- if (ret == -EAGAIN)
- head = ERR_PTR(-EAGAIN);
-
- return head;
-}
-
static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *locked_ref,
- unsigned long *run_refs)
+ struct btrfs_delayed_ref_head *locked_ref,
+ u64 *bytes_released)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_extent_op *extent_op;
struct btrfs_delayed_ref_node *ref;
- int must_insert_reserved = 0;
+ bool must_insert_reserved;
int ret;
delayed_refs = &trans->transaction->delayed_refs;
@@ -1906,16 +1925,14 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
lockdep_assert_held(&locked_ref->mutex);
lockdep_assert_held(&locked_ref->lock);
- while ((ref = select_delayed_ref(locked_ref))) {
+ while ((ref = btrfs_select_delayed_ref(locked_ref))) {
if (ref->seq &&
btrfs_check_delayed_seq(fs_info, ref->seq)) {
spin_unlock(&locked_ref->lock);
- unselect_delayed_ref_head(delayed_refs, locked_ref);
+ btrfs_unselect_ref_head(delayed_refs, locked_ref);
return -EAGAIN;
}
- (*run_refs)++;
- ref->in_tree = 0;
rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
RB_CLEAR_NODE(&ref->ref_node);
if (!list_empty(&ref->add_list))
@@ -1935,28 +1952,33 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
default:
WARN_ON(1);
}
- atomic_dec(&delayed_refs->num_entries);
/*
* Record the must_insert_reserved flag before we drop the
* spin lock.
*/
must_insert_reserved = locked_ref->must_insert_reserved;
- locked_ref->must_insert_reserved = 0;
+ /*
+ * Unsetting this on the head ref relinquishes ownership of
+ * the rsv_bytes, so it is critical that every possible code
+ * path from here forward frees all reserves including qgroup
+ * reserve.
+ */
+ locked_ref->must_insert_reserved = false;
extent_op = locked_ref->extent_op;
locked_ref->extent_op = NULL;
spin_unlock(&locked_ref->lock);
- ret = run_one_delayed_ref(trans, ref, extent_op,
+ ret = run_one_delayed_ref(trans, locked_ref, ref, extent_op,
must_insert_reserved);
+ btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
+ *bytes_released += btrfs_calc_delayed_ref_bytes(fs_info, 1);
btrfs_free_delayed_extent_op(extent_op);
if (ret) {
- unselect_delayed_ref_head(delayed_refs, locked_ref);
+ btrfs_unselect_ref_head(delayed_refs, locked_ref);
btrfs_put_delayed_ref(ref);
- btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
- ret);
return ret;
}
@@ -1964,7 +1986,7 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
cond_resched();
spin_lock(&locked_ref->lock);
- btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
+ btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
}
return 0;
@@ -1975,20 +1997,30 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
* Returns -ENOMEM or -EIO on failure and will abort the transaction.
*/
static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- unsigned long nr)
+ u64 min_bytes)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_head *locked_ref = NULL;
- ktime_t start = ktime_get();
int ret;
unsigned long count = 0;
- unsigned long actual_count = 0;
+ unsigned long max_count = 0;
+ u64 bytes_processed = 0;
delayed_refs = &trans->transaction->delayed_refs;
+ if (min_bytes == 0) {
+ /*
+ * We may be subject to a harmless race if some task is
+ * concurrently adding or removing a delayed ref, so silence
+ * KCSAN and similar tools.
+ */
+ max_count = data_race(delayed_refs->num_heads_ready);
+ min_bytes = U64_MAX;
+ }
+
do {
if (!locked_ref) {
- locked_ref = btrfs_obtain_ref_head(trans);
+ locked_ref = btrfs_select_ref_head(fs_info, delayed_refs);
if (IS_ERR_OR_NULL(locked_ref)) {
if (PTR_ERR(locked_ref) == -EAGAIN) {
continue;
@@ -2011,10 +2043,9 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
* insert_inline_extent_backref()).
*/
spin_lock(&locked_ref->lock);
- btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
+ btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
- ret = btrfs_run_delayed_refs_for_head(trans, locked_ref,
- &actual_count);
+ ret = btrfs_run_delayed_refs_for_head(trans, locked_ref, &bytes_processed);
if (ret < 0 && ret != -EAGAIN) {
/*
* Error, btrfs_run_delayed_refs_for_head already
@@ -2026,7 +2057,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
* Success, perform the usual cleanup of a processed
* head
*/
- ret = cleanup_ref_head(trans, locked_ref);
+ ret = cleanup_ref_head(trans, locked_ref, &bytes_processed);
if (ret > 0 ) {
/* We dropped our lock, we need to loop. */
ret = 0;
@@ -2043,26 +2074,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
locked_ref = NULL;
cond_resched();
- } while ((nr != -1 && count < nr) || locked_ref);
+ } while ((min_bytes != U64_MAX && bytes_processed < min_bytes) ||
+ (max_count > 0 && count < max_count) ||
+ locked_ref);
- /*
- * We don't want to include ref heads since we can have empty ref heads
- * and those will drastically skew our runtime down since we just do
- * accounting, no actual extent tree updates.
- */
- if (actual_count > 0) {
- u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
- u64 avg;
-
- /*
- * We weigh the current average higher than our current runtime
- * to avoid large swings in the average.
- */
- spin_lock(&delayed_refs->lock);
- avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
- fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
- spin_unlock(&delayed_refs->lock);
- }
return 0;
}
@@ -2110,24 +2125,25 @@ static u64 find_middle(struct rb_root *root)
#endif
/*
- * this starts processing the delayed reference count updates and
- * extent insertions we have queued up so far. count can be
- * 0, which means to process everything in the tree at the start
- * of the run (but not newly added entries), or it can be some target
- * number you'd like to process.
+ * Start processing the delayed reference count updates and extent insertions
+ * we have queued up so far.
+ *
+ * @trans: Transaction handle.
+ * @min_bytes: How many bytes of delayed references to process. After this
+ * many bytes we stop processing delayed references if there are
+ * any more. If 0 it means to run all existing delayed references,
+ * but not new ones added after running all existing ones.
+ * Use (u64)-1 (U64_MAX) to run all existing delayed references
+ * plus any new ones that are added.
*
* Returns 0 on success or if called with an aborted transaction
* Returns <0 on error and aborts the transaction
*/
-int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- unsigned long count)
+int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, u64 min_bytes)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_delayed_ref_head *head;
int ret;
- int run_all = count == (unsigned long)-1;
/* We'll clean this up in btrfs_cleanup_transaction */
if (TRANS_ABORTED(trans))
@@ -2137,48 +2153,35 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
return 0;
delayed_refs = &trans->transaction->delayed_refs;
- if (count == 0)
- count = delayed_refs->num_heads_ready;
-
again:
#ifdef SCRAMBLE_DELAYED_REFS
delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif
- ret = __btrfs_run_delayed_refs(trans, count);
- if (ret < 0) {
+ ret = __btrfs_run_delayed_refs(trans, min_bytes);
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
- if (run_all) {
+ if (min_bytes == U64_MAX) {
btrfs_create_pending_block_groups(trans);
spin_lock(&delayed_refs->lock);
- node = rb_first_cached(&delayed_refs->href_root);
- if (!node) {
+ if (xa_empty(&delayed_refs->head_refs)) {
spin_unlock(&delayed_refs->lock);
- goto out;
+ return 0;
}
- head = rb_entry(node, struct btrfs_delayed_ref_head,
- href_node);
- refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
- /* Mutex was contended, block until it's released and retry. */
- mutex_lock(&head->mutex);
- mutex_unlock(&head->mutex);
-
- btrfs_put_delayed_ref_head(head);
cond_resched();
goto again;
}
-out:
+
return 0;
}
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
- struct extent_buffer *eb, u64 flags,
- int level, int is_data)
+ struct extent_buffer *eb, u64 flags)
{
struct btrfs_delayed_extent_op *extent_op;
int ret;
@@ -2190,22 +2193,21 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
extent_op->flags_to_set = flags;
extent_op->update_flags = true;
extent_op->update_key = false;
- extent_op->is_data = is_data ? true : false;
- extent_op->level = level;
- ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op);
+ ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len,
+ btrfs_header_level(eb), extent_op);
if (ret)
btrfs_free_delayed_extent_op(extent_op);
return ret;
}
-static noinline int check_delayed_ref(struct btrfs_root *root,
+static noinline int check_delayed_ref(struct btrfs_inode *inode,
struct btrfs_path *path,
- u64 objectid, u64 offset, u64 bytenr)
+ u64 offset, u64 bytenr)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_node *ref;
- struct btrfs_delayed_data_ref *data_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_transaction *cur_trans;
struct rb_node *node;
@@ -2221,7 +2223,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
delayed_refs = &cur_trans->delayed_refs;
spin_lock(&delayed_refs->lock);
- head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
+ head = btrfs_find_delayed_ref_head(root->fs_info, delayed_refs, bytenr);
if (!head) {
spin_unlock(&delayed_refs->lock);
btrfs_put_transaction(cur_trans);
@@ -2229,6 +2231,12 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
}
if (!mutex_trylock(&head->mutex)) {
+ if (path->nowait) {
+ spin_unlock(&delayed_refs->lock);
+ btrfs_put_transaction(cur_trans);
+ return -EAGAIN;
+ }
+
refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
@@ -2253,6 +2261,9 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
*/
for (node = rb_first_cached(&head->ref_tree); node;
node = rb_next(node)) {
+ u64 ref_owner;
+ u64 ref_offset;
+
ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
/* If it's a shared ref we know a cross reference exists */
if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
@@ -2260,15 +2271,15 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
break;
}
- data_ref = btrfs_delayed_node_to_data_ref(ref);
+ ref_owner = btrfs_delayed_ref_owner(ref);
+ ref_offset = btrfs_delayed_ref_offset(ref);
/*
* If our ref doesn't match the one we're currently looking at
* then we have a cross reference.
*/
- if (data_ref->root != root->root_key.objectid ||
- data_ref->objectid != objectid ||
- data_ref->offset != offset) {
+ if (ref->ref_root != btrfs_root_id(root) ||
+ ref_owner != btrfs_ino(inode) || ref_offset != offset) {
ret = 1;
break;
}
@@ -2279,103 +2290,167 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
return ret;
}
-static noinline int check_committed_ref(struct btrfs_root *root,
+/*
+ * Check if there are references for a data extent other than the one belonging
+ * to the given inode and offset.
+ *
+ * @inode: The only inode we expect to find associated with the data extent.
+ * @path: A path to use for searching the extent tree.
+ * @offset: The only offset we expect to find associated with the data extent.
+ * @bytenr: The logical address of the data extent.
+ *
+ * When the extent does not have any other references other than the one we
+ * expect to find, we always return a value of 0 with the path having a locked
+ * leaf that contains the extent's extent item - this is necessary to ensure
+ * we don't race with a task running delayed references, and our caller must
+ * have such a path when calling check_delayed_ref() - it must lock a delayed
+ * ref head while holding the leaf locked. In case the extent item is not found
+ * in the extent tree, we return -ENOENT with the path having the leaf (locked)
+ * where the extent item should be, in order to prevent races with another task
+ * running delayed references, so that we don't miss any reference when calling
+ * check_delayed_ref().
+ *
+ * Note: this may return false positives, and this is because we want to be
+ * quick here as we're called in write paths (when flushing delalloc and
+ * in the direct IO write path). For example we can have an extent with
+ * a single reference but that reference is not inlined, or we may have
+ * many references in the extent tree but we also have delayed references
+ * that cancel all the reference except the one for our inode and offset,
+ * but it would be expensive to do such checks and complex due to all
+ * locking to avoid races between the checks and flushing delayed refs,
+ * plus non-inline references may be located on leaves other than the one
+ * that contains the extent item in the extent tree. The important thing
+ * here is to not return false negatives and that the false positives are
+ * not very common.
+ *
+ * Returns: 0 if there are no cross references and with the path having a locked
+ * leaf from the extent tree that contains the extent's extent item.
+ *
+ * 1 if there are cross references (false positives can happen).
+ *
+ * < 0 in case of an error. In case of -ENOENT the leaf in the extent
+ * tree where the extent item should be located at is read locked and
+ * accessible in the given path.
+ */
+static noinline int check_committed_ref(struct btrfs_inode *inode,
struct btrfs_path *path,
- u64 objectid, u64 offset, u64 bytenr,
- bool strict)
+ u64 offset, u64 bytenr)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_root *extent_root = fs_info->extent_root;
+ struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
struct extent_buffer *leaf;
struct btrfs_extent_data_ref *ref;
struct btrfs_extent_inline_ref *iref;
struct btrfs_extent_item *ei;
struct btrfs_key key;
u32 item_size;
+ u32 expected_size;
int type;
int ret;
key.objectid = bytenr;
- key.offset = (u64)-1;
key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
- goto out;
- BUG_ON(ret == 0); /* Corruption */
+ return ret;
+ if (unlikely(ret == 0)) {
+ /*
+ * Key with offset -1 found, there would have to exist an extent
+ * item with such offset, but this is out of the valid range.
+ */
+ return -EUCLEAN;
+ }
- ret = -ENOENT;
if (path->slots[0] == 0)
- goto out;
+ return -ENOENT;
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
- goto out;
+ return -ENOENT;
- ret = 1;
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ item_size = btrfs_item_size(leaf, path->slots[0]);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ expected_size = sizeof(*ei) + btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY);
- /* If extent item has more than 1 inline ref then it's shared */
- if (item_size != sizeof(*ei) +
- btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
- goto out;
-
- /*
- * If extent created before last snapshot => it's shared unless the
- * snapshot has been deleted. Use the heuristic if strict is false.
- */
- if (!strict &&
- (btrfs_extent_generation(leaf, ei) <=
- btrfs_root_last_snapshot(&root->root_item)))
- goto out;
+ /* No inline refs; we need to bail before checking for owner ref. */
+ if (item_size == sizeof(*ei))
+ return 1;
+ /* Check for an owner ref; skip over it to the real inline refs. */
iref = (struct btrfs_extent_inline_ref *)(ei + 1);
+ type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
+ if (btrfs_fs_incompat(fs_info, SIMPLE_QUOTA) && type == BTRFS_EXTENT_OWNER_REF_KEY) {
+ expected_size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY);
+ iref = (struct btrfs_extent_inline_ref *)(iref + 1);
+ type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
+ }
+
+ /* If extent item has more than 1 inline ref then it's shared */
+ if (item_size != expected_size)
+ return 1;
/* If this extent has SHARED_DATA_REF then it's shared */
- type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
if (type != BTRFS_EXTENT_DATA_REF_KEY)
- goto out;
+ return 1;
ref = (struct btrfs_extent_data_ref *)(&iref->offset);
if (btrfs_extent_refs(leaf, ei) !=
btrfs_extent_data_ref_count(leaf, ref) ||
- btrfs_extent_data_ref_root(leaf, ref) !=
- root->root_key.objectid ||
- btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
+ btrfs_extent_data_ref_root(leaf, ref) != btrfs_root_id(root) ||
+ btrfs_extent_data_ref_objectid(leaf, ref) != btrfs_ino(inode) ||
btrfs_extent_data_ref_offset(leaf, ref) != offset)
- goto out;
+ return 1;
- ret = 0;
-out:
- return ret;
+ return 0;
}
-int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
- u64 bytenr, bool strict)
+int btrfs_cross_ref_exist(struct btrfs_inode *inode, u64 offset,
+ u64 bytenr, struct btrfs_path *path)
{
- struct btrfs_path *path;
int ret;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
do {
- ret = check_committed_ref(root, path, objectid,
- offset, bytenr, strict);
+ ret = check_committed_ref(inode, path, offset, bytenr);
if (ret && ret != -ENOENT)
goto out;
- ret = check_delayed_ref(root, path, objectid, offset, bytenr);
- } while (ret == -EAGAIN);
+ /*
+ * The path must have a locked leaf from the extent tree where
+ * the extent item for our extent is located, in case it exists,
+ * or where it should be located in case it doesn't exist yet
+ * because it's new and its delayed ref was not yet flushed.
+ * We need to lock the delayed ref head at check_delayed_ref(),
+ * if one exists, while holding the leaf locked in order to not
+ * race with delayed ref flushing, missing references and
+ * incorrectly reporting that the extent is not shared.
+ */
+ if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
+ struct extent_buffer *leaf = path->nodes[0];
+
+ ASSERT(leaf != NULL);
+ btrfs_assert_tree_read_locked(leaf);
+
+ if (ret != -ENOENT) {
+ struct btrfs_key key;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ ASSERT(key.objectid == bytenr);
+ ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY);
+ }
+ }
+
+ ret = check_delayed_ref(inode, path, offset, bytenr);
+ } while (ret == -EAGAIN && !path->nowait);
out:
- btrfs_free_path(path);
- if (btrfs_is_data_reloc_root(root))
+ btrfs_release_path(path);
+ if (btrfs_is_data_reloc_root(inode->root))
WARN_ON(ret > 0);
return ret;
}
@@ -2383,17 +2458,14 @@ out:
static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
- int full_backref, int inc)
+ bool full_backref, bool inc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- u64 bytenr;
- u64 num_bytes;
u64 parent;
u64 ref_root;
u32 nritems;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
- struct btrfs_ref generic_ref = { 0 };
bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC);
int i;
int action;
@@ -2420,6 +2492,12 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
action = BTRFS_DROP_DELAYED_REF;
for (i = 0; i < nritems; i++) {
+ struct btrfs_ref ref = {
+ .action = action,
+ .parent = parent,
+ .ref_root = ref_root,
+ };
+
if (level == 0) {
btrfs_item_key_to_cpu(buf, &key, i);
if (key.type != BTRFS_EXTENT_DATA_KEY)
@@ -2429,34 +2507,33 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
if (btrfs_file_extent_type(buf, fi) ==
BTRFS_FILE_EXTENT_INLINE)
continue;
- bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
- if (bytenr == 0)
+ ref.bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
+ if (ref.bytenr == 0)
continue;
- num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
+ ref.num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
+ ref.owning_root = ref_root;
+
key.offset -= btrfs_file_extent_offset(buf, fi);
- btrfs_init_generic_ref(&generic_ref, action, bytenr,
- num_bytes, parent);
- btrfs_init_data_ref(&generic_ref, ref_root, key.objectid,
- key.offset, root->root_key.objectid,
- for_reloc);
+ btrfs_init_data_ref(&ref, key.objectid, key.offset,
+ btrfs_root_id(root), for_reloc);
if (inc)
- ret = btrfs_inc_extent_ref(trans, &generic_ref);
+ ret = btrfs_inc_extent_ref(trans, &ref);
else
- ret = btrfs_free_extent(trans, &generic_ref);
+ ret = btrfs_free_extent(trans, &ref);
if (ret)
goto fail;
} else {
- bytenr = btrfs_node_blockptr(buf, i);
- num_bytes = fs_info->nodesize;
- btrfs_init_generic_ref(&generic_ref, action, bytenr,
- num_bytes, parent);
- btrfs_init_tree_ref(&generic_ref, level - 1, ref_root,
- root->root_key.objectid, for_reloc);
+ /* We don't know the owning_root, leave as 0. */
+ ref.bytenr = btrfs_node_blockptr(buf, i);
+ ref.num_bytes = fs_info->nodesize;
+
+ btrfs_init_tree_ref(&ref, level - 1,
+ btrfs_root_id(root), for_reloc);
if (inc)
- ret = btrfs_inc_extent_ref(trans, &generic_ref);
+ ret = btrfs_inc_extent_ref(trans, &ref);
else
- ret = btrfs_free_extent(trans, &generic_ref);
+ ret = btrfs_free_extent(trans, &ref);
if (ret)
goto fail;
}
@@ -2467,15 +2544,15 @@ fail:
}
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, int full_backref)
+ struct extent_buffer *buf, bool full_backref)
{
- return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
+ return __btrfs_mod_ref(trans, root, buf, full_backref, true);
}
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, int full_backref)
+ struct extent_buffer *buf, bool full_backref)
{
- return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
+ return __btrfs_mod_ref(trans, root, buf, full_backref, false);
}
static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
@@ -2495,97 +2572,81 @@ static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
return ret;
}
-static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
+static u64 first_logical_byte(struct btrfs_fs_info *fs_info)
{
- struct btrfs_block_group *cache;
- u64 bytenr;
+ struct rb_node *leftmost;
+ u64 bytenr = 0;
- spin_lock(&fs_info->block_group_cache_lock);
- bytenr = fs_info->first_logical_byte;
- spin_unlock(&fs_info->block_group_cache_lock);
+ read_lock(&fs_info->block_group_cache_lock);
+ /* Get the block group with the lowest logical start address. */
+ leftmost = rb_first_cached(&fs_info->block_group_cache_tree);
+ if (leftmost) {
+ struct btrfs_block_group *bg;
- if (bytenr < (u64)-1)
- return bytenr;
-
- cache = btrfs_lookup_first_block_group(fs_info, search_start);
- if (!cache)
- return 0;
-
- bytenr = cache->start;
- btrfs_put_block_group(cache);
+ bg = rb_entry(leftmost, struct btrfs_block_group, cache_node);
+ bytenr = bg->start;
+ }
+ read_unlock(&fs_info->block_group_cache_lock);
return bytenr;
}
static int pin_down_extent(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *cache,
- u64 bytenr, u64 num_bytes, int reserved)
+ struct btrfs_block_group *bg,
+ u64 bytenr, u64 num_bytes, bool reserved)
{
- struct btrfs_fs_info *fs_info = cache->fs_info;
-
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
- cache->pinned += num_bytes;
- btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info,
- num_bytes);
- if (reserved) {
- cache->reserved -= num_bytes;
- cache->space_info->bytes_reserved -= num_bytes;
- }
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
-
- set_extent_dirty(&trans->transaction->pinned_extents, bytenr,
- bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
+ struct btrfs_space_info *space_info = bg->space_info;
+ const u64 reserved_bytes = (reserved ? num_bytes : 0);
+
+ spin_lock(&space_info->lock);
+ spin_lock(&bg->lock);
+ bg->pinned += num_bytes;
+ bg->reserved -= reserved_bytes;
+ spin_unlock(&bg->lock);
+ space_info->bytes_reserved -= reserved_bytes;
+ btrfs_space_info_update_bytes_pinned(space_info, num_bytes);
+ spin_unlock(&space_info->lock);
+
+ btrfs_set_extent_bit(&trans->transaction->pinned_extents, bytenr,
+ bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
return 0;
}
-int btrfs_pin_extent(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes, int reserved)
+int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes)
{
struct btrfs_block_group *cache;
cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
BUG_ON(!cache); /* Logic error */
- pin_down_extent(trans, cache, bytenr, num_bytes, reserved);
+ pin_down_extent(trans, cache, bytenr, num_bytes, true);
btrfs_put_block_group(cache);
return 0;
}
-/*
- * this function must be called within transaction
- */
int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes)
+ const struct extent_buffer *eb)
{
struct btrfs_block_group *cache;
int ret;
- cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
+ cache = btrfs_lookup_block_group(trans->fs_info, eb->start);
if (!cache)
return -EINVAL;
/*
- * pull in the free space cache (if any) so that our pin
- * removes the free space from the cache. We have load_only set
- * to one because the slow code to read in the free extents does check
- * the pinned extents.
- */
- btrfs_cache_block_group(cache, 1);
- /*
- * Make sure we wait until the cache is completely built in case it is
- * missing or is invalid and therefore needs to be rebuilt.
+ * Fully cache the free space first so that our pin removes the free space
+ * from the cache.
*/
- ret = btrfs_wait_block_group_cache_done(cache);
+ ret = btrfs_cache_block_group(cache, true);
if (ret)
goto out;
- pin_down_extent(trans, cache, bytenr, num_bytes, 0);
+ pin_down_extent(trans, cache, eb->start, eb->len, false);
/* remove us from the free space cache (if we're there at all) */
- ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
+ ret = btrfs_remove_free_space(cache, eb->start, eb->len);
out:
btrfs_put_block_group(cache);
return ret;
@@ -2601,12 +2662,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
if (!block_group)
return -EINVAL;
- btrfs_cache_block_group(block_group, 1);
- /*
- * Make sure we wait until the cache is completely built in case it is
- * missing or is invalid and therefore needs to be rebuilt.
- */
- ret = btrfs_wait_block_group_cache_done(block_group);
+ ret = btrfs_cache_block_group(block_group, true);
if (ret)
goto out;
@@ -2689,22 +2745,24 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
{
struct btrfs_block_group *cache = NULL;
struct btrfs_space_info *space_info;
- struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
struct btrfs_free_cluster *cluster = NULL;
- u64 len;
u64 total_unpinned = 0;
u64 empty_cluster = 0;
- bool readonly;
while (start <= end) {
- readonly = false;
+ u64 len;
+ bool readonly;
+
if (!cache ||
start >= cache->start + cache->length) {
if (cache)
btrfs_put_block_group(cache);
total_unpinned = 0;
cache = btrfs_lookup_block_group(fs_info, start);
- BUG_ON(!cache); /* Logic error */
+ if (unlikely(cache == NULL)) {
+ /* Logic error, something removed the block group. */
+ return -EUCLEAN;
+ }
cluster = fetch_cluster_info(fs_info,
cache->space_info,
@@ -2715,13 +2773,8 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
len = cache->start + cache->length - start;
len = min(len, end + 1 - start);
- down_read(&fs_info->commit_root_sem);
- if (start < cache->last_byte_to_unpin && return_free_space) {
- u64 add_len = min(len, cache->last_byte_to_unpin - start);
-
- btrfs_add_free_space(cache, start, add_len);
- }
- up_read(&fs_info->commit_root_sem);
+ if (return_free_space)
+ btrfs_add_free_space(cache, start, len);
start += len;
total_unpinned += len;
@@ -2742,43 +2795,27 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
+ readonly = cache->ro;
cache->pinned -= len;
- btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
+ spin_unlock(&cache->lock);
+
+ btrfs_space_info_update_bytes_pinned(space_info, -len);
space_info->max_extent_size = 0;
- if (cache->ro) {
+
+ if (readonly) {
space_info->bytes_readonly += len;
- readonly = true;
} else if (btrfs_is_zoned(fs_info)) {
/* Need reset before reusing in a zoned block group */
- space_info->bytes_zone_unusable += len;
- readonly = true;
+ btrfs_space_info_update_bytes_zone_unusable(space_info, len);
+ } else if (return_free_space) {
+ btrfs_return_free_space(space_info, len);
}
- spin_unlock(&cache->lock);
- if (!readonly && return_free_space &&
- global_rsv->space_info == space_info) {
- u64 to_add = len;
-
- spin_lock(&global_rsv->lock);
- if (!global_rsv->full) {
- to_add = min(len, global_rsv->size -
- global_rsv->reserved);
- global_rsv->reserved += to_add;
- btrfs_space_info_update_bytes_may_use(fs_info,
- space_info, to_add);
- if (global_rsv->reserved >= global_rsv->size)
- global_rsv->full = 1;
- len -= to_add;
- }
- spin_unlock(&global_rsv->lock);
- }
- /* Add to any tickets we may have */
- if (!readonly && return_free_space && len)
- btrfs_try_granting_tickets(fs_info, space_info);
spin_unlock(&space_info->lock);
}
if (cache)
btrfs_put_block_group(cache);
+
return 0;
}
@@ -2787,34 +2824,63 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group *block_group, *tmp;
struct list_head *deleted_bgs;
- struct extent_io_tree *unpin;
+ struct extent_io_tree *unpin = &trans->transaction->pinned_extents;
+ struct extent_state *cached_state = NULL;
u64 start;
u64 end;
+ int unpin_error = 0;
int ret;
- unpin = &trans->transaction->pinned_extents;
+ mutex_lock(&fs_info->unused_bg_unpin_mutex);
+ btrfs_find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY, &cached_state);
- while (!TRANS_ABORTED(trans)) {
- struct extent_state *cached_state = NULL;
-
- mutex_lock(&fs_info->unused_bg_unpin_mutex);
- ret = find_first_extent_bit(unpin, 0, &start, &end,
- EXTENT_DIRTY, &cached_state);
- if (ret) {
- mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- break;
- }
+ while (!TRANS_ABORTED(trans) && cached_state) {
+ struct extent_state *next_state;
if (btrfs_test_opt(fs_info, DISCARD_SYNC))
ret = btrfs_discard_extent(fs_info, start,
end + 1 - start, NULL);
- clear_extent_dirty(unpin, start, end, &cached_state);
- unpin_extent_range(fs_info, start, end, true);
- mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- free_extent_state(cached_state);
- cond_resched();
+ next_state = btrfs_next_extent_state(unpin, cached_state);
+ btrfs_clear_extent_dirty(unpin, start, end, &cached_state);
+ ret = unpin_extent_range(fs_info, start, end, true);
+ /*
+ * If we get an error unpinning an extent range, store the first
+ * error to return later after trying to unpin all ranges and do
+ * the sync discards. Our caller will abort the transaction
+ * (which already wrote new superblocks) and on the next mount
+ * the space will be available as it was pinned by in-memory
+ * only structures in this phase.
+ */
+ if (ret) {
+ btrfs_err_rl(fs_info,
+"failed to unpin extent range [%llu, %llu] when committing transaction %llu: %s (%d)",
+ start, end, trans->transid,
+ btrfs_decode_error(ret), ret);
+ if (!unpin_error)
+ unpin_error = ret;
+ }
+
+ btrfs_free_extent_state(cached_state);
+
+ if (need_resched()) {
+ btrfs_free_extent_state(next_state);
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
+ cond_resched();
+ cached_state = NULL;
+ mutex_lock(&fs_info->unused_bg_unpin_mutex);
+ btrfs_find_first_extent_bit(unpin, 0, &start, &end,
+ EXTENT_DIRTY, &cached_state);
+ } else {
+ cached_state = next_state;
+ if (cached_state) {
+ start = cached_state->start;
+ end = cached_state->end;
+ }
+ }
}
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
+ btrfs_free_extent_state(cached_state);
if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
btrfs_discard_calc_delay(&fs_info->discard_ctl);
@@ -2828,16 +2894,20 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
*/
deleted_bgs = &trans->transaction->deleted_bgs;
list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
- u64 trimmed = 0;
-
ret = -EROFS;
if (!TRANS_ABORTED(trans))
- ret = btrfs_discard_extent(fs_info,
- block_group->start,
- block_group->length,
- &trimmed);
+ ret = btrfs_discard_extent(fs_info, block_group->start,
+ block_group->length, NULL);
+ /*
+ * Not strictly necessary to lock, as the block_group should be
+ * read-only from btrfs_delete_unused_bgs().
+ */
+ ASSERT(block_group->ro);
+ spin_lock(&fs_info->unused_bgs_lock);
list_del_init(&block_group->bg_list);
+ spin_unlock(&fs_info->unused_bgs_lock);
+
btrfs_unfreeze_block_group(block_group);
btrfs_put_block_group(block_group);
@@ -2849,9 +2919,106 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
}
}
+ return unpin_error;
+}
+
+/*
+ * Parse an extent item's inline extents looking for a simple quotas owner ref.
+ *
+ * @fs_info: the btrfs_fs_info for this mount
+ * @leaf: a leaf in the extent tree containing the extent item
+ * @slot: the slot in the leaf where the extent item is found
+ *
+ * Returns the objectid of the root that originally allocated the extent item
+ * if the inline owner ref is expected and present, otherwise 0.
+ *
+ * If an extent item has an owner ref item, it will be the first inline ref
+ * item. Therefore the logic is to check whether there are any inline ref
+ * items, then check the type of the first one.
+ */
+u64 btrfs_get_extent_owner_root(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf, int slot)
+{
+ struct btrfs_extent_item *ei;
+ struct btrfs_extent_inline_ref *iref;
+ struct btrfs_extent_owner_ref *oref;
+ unsigned long ptr;
+ unsigned long end;
+ int type;
+
+ if (!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA))
+ return 0;
+
+ ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
+ ptr = (unsigned long)(ei + 1);
+ end = (unsigned long)ei + btrfs_item_size(leaf, slot);
+
+ /* No inline ref items of any kind, can't check type. */
+ if (ptr == end)
+ return 0;
+
+ iref = (struct btrfs_extent_inline_ref *)ptr;
+ type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
+
+ /* We found an owner ref, get the root out of it. */
+ if (type == BTRFS_EXTENT_OWNER_REF_KEY) {
+ oref = (struct btrfs_extent_owner_ref *)(&iref->offset);
+ return btrfs_extent_owner_ref_root_id(leaf, oref);
+ }
+
+ /* We have inline refs, but not an owner ref. */
return 0;
}
+static int do_free_extent_accounting(struct btrfs_trans_handle *trans,
+ u64 bytenr, struct btrfs_squota_delta *delta)
+{
+ int ret;
+ u64 num_bytes = delta->num_bytes;
+
+ if (delta->is_data) {
+ struct btrfs_root *csum_root;
+
+ csum_root = btrfs_csum_root(trans->fs_info, bytenr);
+ ret = btrfs_del_csums(trans, csum_root, bytenr, num_bytes);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, bytenr, num_bytes);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+ }
+
+ ret = btrfs_record_squota_delta(trans->fs_info, delta);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+
+ ret = btrfs_add_to_free_space_tree(trans, bytenr, num_bytes);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+
+ ret = btrfs_update_block_group(trans, bytenr, num_bytes, false);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+
+ return ret;
+}
+
+#define abort_and_dump(trans, path, fmt, args...) \
+({ \
+ btrfs_abort_transaction(trans, -EUCLEAN); \
+ btrfs_print_leaf(path->nodes[0]); \
+ btrfs_crit(trans->fs_info, fmt, ##args); \
+})
+
/*
* Drop one or more refs of @node.
*
@@ -2912,15 +3079,14 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
* And that (13631488 EXTENT_DATA_REF <HASH>) gets removed.
*/
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *node, u64 parent,
- u64 root_objectid, u64 owner_objectid,
- u64 owner_offset, int refs_to_drop,
+ struct btrfs_delayed_ref_head *href,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_key key;
- struct btrfs_path *path;
- struct btrfs_root *extent_root = info->extent_root;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct btrfs_root *extent_root;
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
struct btrfs_extent_inline_ref *iref;
@@ -2929,12 +3095,18 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
int extent_slot = 0;
int found_extent = 0;
int num_to_del = 1;
+ int refs_to_drop = node->ref_mod;
u32 item_size;
u64 refs;
u64 bytenr = node->bytenr;
u64 num_bytes = node->num_bytes;
- int last_ref = 0;
+ u64 owner_objectid = btrfs_delayed_ref_owner(node);
+ u64 owner_offset = btrfs_delayed_ref_offset(node);
bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
+ u64 delayed_ref_root = href->owning_root;
+
+ extent_root = btrfs_extent_root(info, bytenr);
+ ASSERT(extent_root);
path = btrfs_alloc_path();
if (!path)
@@ -2942,20 +3114,20 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
- if (!is_data && refs_to_drop != 1) {
+ if (unlikely(!is_data && refs_to_drop != 1)) {
btrfs_crit(info,
"invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u",
node->bytenr, refs_to_drop);
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
if (is_data)
skinny_metadata = false;
ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes,
- parent, root_objectid, owner_objectid,
+ node->parent, node->ref_root, owner_objectid,
owner_offset);
if (ret == 0) {
/*
@@ -2982,26 +3154,25 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
break;
}
- /* Quick path didn't find the EXTEMT/METADATA_ITEM */
+ /* Quick path didn't find the EXTENT/METADATA_ITEM */
if (path->slots[0] - extent_slot > 5)
break;
extent_slot--;
}
if (!found_extent) {
- if (iref) {
- btrfs_crit(info,
-"invalid iref, no EXTENT/METADATA_ITEM found but has inline extent ref");
- btrfs_abort_transaction(trans, -EUCLEAN);
- goto err_dump;
+ if (unlikely(iref)) {
+ abort_and_dump(trans, path,
+"invalid iref slot %u, no EXTENT/METADATA_ITEM found but has inline extent ref",
+ path->slots[0]);
+ return -EUCLEAN;
}
/* Must be SHARED_* item, remove the backref first */
- ret = remove_extent_backref(trans, path, NULL,
- refs_to_drop,
- is_data, &last_ref);
- if (ret) {
+ ret = remove_extent_backref(trans, extent_root, path,
+ NULL, refs_to_drop, is_data);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
btrfs_release_path(path);
@@ -3042,64 +3213,63 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
if (ret) {
- btrfs_err(info,
- "umm, got %d back from search, was looking for %llu",
- ret, bytenr);
if (ret > 0)
btrfs_print_leaf(path->nodes[0]);
+ btrfs_err(info,
+ "umm, got %d back from search, was looking for %llu, slot %d",
+ ret, bytenr, path->slots[0]);
}
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
extent_slot = path->slots[0];
}
} else if (WARN_ON(ret == -ENOENT)) {
- btrfs_print_leaf(path->nodes[0]);
- btrfs_err(info,
- "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
- bytenr, parent, root_objectid, owner_objectid,
- owner_offset);
- btrfs_abort_transaction(trans, ret);
- goto out;
+ abort_and_dump(trans, path,
+"unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu slot %d",
+ bytenr, node->parent, node->ref_root, owner_objectid,
+ owner_offset, path->slots[0]);
+ return ret;
} else {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, extent_slot);
+ item_size = btrfs_item_size(leaf, extent_slot);
if (unlikely(item_size < sizeof(*ei))) {
- ret = -EINVAL;
- btrfs_print_v0_err(info);
+ ret = -EUCLEAN;
+ btrfs_err(trans->fs_info,
+ "unexpected extent item size, has %u expect >= %zu",
+ item_size, sizeof(*ei));
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
ei = btrfs_item_ptr(leaf, extent_slot,
struct btrfs_extent_item);
if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
key.type == BTRFS_EXTENT_ITEM_KEY) {
struct btrfs_tree_block_info *bi;
- if (item_size < sizeof(*ei) + sizeof(*bi)) {
- btrfs_crit(info,
-"invalid extent item size for key (%llu, %u, %llu) owner %llu, has %u expect >= %zu",
- key.objectid, key.type, key.offset,
- owner_objectid, item_size,
- sizeof(*ei) + sizeof(*bi));
- btrfs_abort_transaction(trans, -EUCLEAN);
- goto err_dump;
+
+ if (unlikely(item_size < sizeof(*ei) + sizeof(*bi))) {
+ abort_and_dump(trans, path,
+"invalid extent item size for key (%llu, %u, %llu) slot %u owner %llu, has %u expect >= %zu",
+ key.objectid, key.type, key.offset,
+ path->slots[0], owner_objectid, item_size,
+ sizeof(*ei) + sizeof(*bi));
+ return -EUCLEAN;
}
bi = (struct btrfs_tree_block_info *)(ei + 1);
WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
}
refs = btrfs_extent_refs(leaf, ei);
- if (refs < refs_to_drop) {
- btrfs_crit(info,
- "trying to drop %d refs but we only have %llu for bytenr %llu",
- refs_to_drop, refs, bytenr);
- btrfs_abort_transaction(trans, -EUCLEAN);
- goto err_dump;
+ if (unlikely(refs < refs_to_drop)) {
+ abort_and_dump(trans, path,
+ "trying to drop %d refs but we only have %llu for bytenr %llu slot %u",
+ refs_to_drop, refs, bytenr, path->slots[0]);
+ return -EUCLEAN;
}
refs -= refs_to_drop;
@@ -3111,45 +3281,49 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
* be updated by remove_extent_backref
*/
if (iref) {
- if (!found_extent) {
- btrfs_crit(info,
-"invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found");
- btrfs_abort_transaction(trans, -EUCLEAN);
- goto err_dump;
+ if (unlikely(!found_extent)) {
+ abort_and_dump(trans, path,
+"invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found, slot %u",
+ path->slots[0]);
+ return -EUCLEAN;
}
} else {
btrfs_set_extent_refs(leaf, ei, refs);
- btrfs_mark_buffer_dirty(leaf);
}
if (found_extent) {
- ret = remove_extent_backref(trans, path, iref,
- refs_to_drop, is_data,
- &last_ref);
- if (ret) {
+ ret = remove_extent_backref(trans, extent_root, path,
+ iref, refs_to_drop, is_data);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
}
} else {
+ struct btrfs_squota_delta delta = {
+ .root = delayed_ref_root,
+ .num_bytes = num_bytes,
+ .is_data = is_data,
+ .is_inc = false,
+ .generation = btrfs_extent_generation(leaf, ei),
+ };
+
/* In this branch refs == 1 */
if (found_extent) {
- if (is_data && refs_to_drop !=
- extent_data_ref_count(path, iref)) {
- btrfs_crit(info,
- "invalid refs_to_drop, current refs %u refs_to_drop %u",
- extent_data_ref_count(path, iref),
- refs_to_drop);
- btrfs_abort_transaction(trans, -EUCLEAN);
- goto err_dump;
+ if (unlikely(is_data && refs_to_drop !=
+ extent_data_ref_count(path, iref))) {
+ abort_and_dump(trans, path,
+ "invalid refs_to_drop, current refs %u refs_to_drop %u slot %u",
+ extent_data_ref_count(path, iref),
+ refs_to_drop, path->slots[0]);
+ return -EUCLEAN;
}
if (iref) {
- if (path->slots[0] != extent_slot) {
- btrfs_crit(info,
-"invalid iref, extent item key (%llu %u %llu) doesn't have wanted iref",
- key.objectid, key.type,
- key.offset);
- btrfs_abort_transaction(trans, -EUCLEAN);
- goto err_dump;
+ if (unlikely(path->slots[0] != extent_slot)) {
+ abort_and_dump(trans, path,
+"invalid iref, extent item key " BTRFS_KEY_FMT " slot %u doesn't have wanted iref",
+ BTRFS_KEY_FMT_VALUE(&key),
+ path->slots[0]);
+ return -EUCLEAN;
}
} else {
/*
@@ -3158,65 +3332,40 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
* | extent_slot ||extent_slot + 1|
* [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ]
*/
- if (path->slots[0] != extent_slot + 1) {
- btrfs_crit(info,
- "invalid SHARED_* item, previous item is not EXTENT/METADATA_ITEM");
- btrfs_abort_transaction(trans, -EUCLEAN);
- goto err_dump;
+ if (unlikely(path->slots[0] != extent_slot + 1)) {
+ abort_and_dump(trans, path,
+ "invalid SHARED_* item slot %u, previous item is not EXTENT/METADATA_ITEM",
+ path->slots[0]);
+ return -EUCLEAN;
}
path->slots[0] = extent_slot;
num_to_del = 2;
}
}
+ /*
+ * We can't infer the data owner from the delayed ref, so we need
+ * to try to get it from the owning ref item.
+ *
+ * If it is not present, then that extent was not written under
+ * simple quotas mode, so we don't need to account for its deletion.
+ */
+ if (is_data)
+ delta.root = btrfs_get_extent_owner_root(trans->fs_info,
+ leaf, extent_slot);
- last_ref = 1;
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
num_to_del);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
btrfs_release_path(path);
- if (is_data) {
- ret = btrfs_del_csums(trans, info->csum_root, bytenr,
- num_bytes);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
- }
-
- ret = add_to_free_space_tree(trans, bytenr, num_bytes);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
-
- ret = btrfs_update_block_group(trans, bytenr, num_bytes, false);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
+ ret = do_free_extent_accounting(trans, bytenr, &delta);
}
btrfs_release_path(path);
-out:
- btrfs_free_path(path);
return ret;
-err_dump:
- /*
- * Leaf dump can take up a lot of log buffer, so we only do full leaf
- * dump for debug build.
- */
- if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) {
- btrfs_crit(info, "path->slots[0]=%d extent_slot=%d",
- path->slots[0], extent_slot);
- btrfs_print_leaf(path->nodes[0]);
- }
-
- btrfs_free_path(path);
- return -EUCLEAN;
}
/*
@@ -3228,13 +3377,14 @@ err_dump:
static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
u64 bytenr)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_root *delayed_refs;
int ret = 0;
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
+ head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
if (!head)
goto out_delayed_unlock;
@@ -3252,8 +3402,8 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
if (!mutex_trylock(&head->mutex))
goto out;
- btrfs_delete_ref_head(delayed_refs, head);
- head->processing = 0;
+ btrfs_delete_ref_head(fs_info, delayed_refs, head);
+ head->processing = false;
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
@@ -3262,7 +3412,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
if (head->must_insert_reserved)
ret = 1;
- btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head);
+ btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
return ret;
@@ -3274,86 +3424,93 @@ out_delayed_unlock:
return 0;
}
-void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *buf,
- u64 parent, int last_ref)
+int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ u64 root_id,
+ struct extent_buffer *buf,
+ u64 parent, int last_ref)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_ref generic_ref = { 0 };
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_block_group *bg;
int ret;
- btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
- buf->start, buf->len, parent);
- btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf),
- root->root_key.objectid, 0, false);
+ if (root_id != BTRFS_TREE_LOG_OBJECTID) {
+ struct btrfs_ref generic_ref = {
+ .action = BTRFS_DROP_DELAYED_REF,
+ .bytenr = buf->start,
+ .num_bytes = buf->len,
+ .parent = parent,
+ .owning_root = btrfs_header_owner(buf),
+ .ref_root = root_id,
+ };
+
+ /*
+ * Assert that the extent buffer is not cleared due to
+ * EXTENT_BUFFER_ZONED_ZEROOUT. Please refer
+ * btrfs_clear_buffer_dirty() and btree_csum_one_bio() for
+ * detail.
+ */
+ ASSERT(btrfs_header_bytenr(buf) != 0);
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
+ btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), 0, false);
btrfs_ref_tree_mod(fs_info, &generic_ref);
ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret < 0)
+ return ret;
}
- if (last_ref && btrfs_header_generation(buf) == trans->transid) {
- struct btrfs_block_group *cache;
- bool must_pin = false;
-
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
- ret = check_ref_cleanup(trans, buf->start);
- if (!ret) {
- btrfs_redirty_list_add(trans->transaction, buf);
- goto out;
- }
- }
+ if (!last_ref)
+ return 0;
- cache = btrfs_lookup_block_group(fs_info, buf->start);
+ if (btrfs_header_generation(buf) != trans->transid)
+ goto out;
- if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
- pin_down_extent(trans, cache, buf->start, buf->len, 1);
- btrfs_put_block_group(cache);
+ if (root_id != BTRFS_TREE_LOG_OBJECTID) {
+ ret = check_ref_cleanup(trans, buf->start);
+ if (!ret)
goto out;
- }
+ }
- /*
- * If this is a leaf and there are tree mod log users, we may
- * have recorded mod log operations that point to this leaf.
- * So we must make sure no one reuses this leaf's extent before
- * mod log operations are applied to a node, otherwise after
- * rewinding a node using the mod log operations we get an
- * inconsistent btree, as the leaf's extent may now be used as
- * a node or leaf for another different btree.
- * We are safe from races here because at this point no other
- * node or root points to this extent buffer, so if after this
- * check a new tree mod log user joins, it will not be able to
- * find a node pointing to this leaf and record operations that
- * point to this leaf.
- */
- if (btrfs_header_level(buf) == 0 &&
- test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
- must_pin = true;
+ bg = btrfs_lookup_block_group(fs_info, buf->start);
- if (must_pin || btrfs_is_zoned(fs_info)) {
- btrfs_redirty_list_add(trans->transaction, buf);
- pin_down_extent(trans, cache, buf->start, buf->len, 1);
- btrfs_put_block_group(cache);
- goto out;
- }
+ if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
+ pin_down_extent(trans, bg, buf->start, buf->len, true);
+ btrfs_put_block_group(bg);
+ goto out;
+ }
- WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
+ /*
+ * If there are tree mod log users we may have recorded mod log
+ * operations for this node. If we re-allocate this node we
+ * could replay operations on this node that happened when it
+ * existed in a completely different root. For example if it
+ * was part of root A, then was reallocated to root B, and we
+ * are doing a btrfs_old_search_slot(root b), we could replay
+ * operations that happened when the block was part of root A,
+ * giving us an inconsistent view of the btree.
+ *
+ * We are safe from races here because at this point no other
+ * node or root points to this extent buffer, so if after this
+ * check a new tree mod log user joins we will not have an
+ * existing log of operations on this node that we have to
+ * contend with.
+ */
- btrfs_add_free_space(cache, buf->start, buf->len);
- btrfs_free_reserved_bytes(cache, buf->len, 0);
- btrfs_put_block_group(cache);
- trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
+ if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)
+ || btrfs_is_zoned(fs_info)) {
+ pin_down_extent(trans, bg, buf->start, buf->len, true);
+ btrfs_put_block_group(bg);
+ goto out;
}
+
+ WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
+
+ btrfs_add_free_space(bg, buf->start, buf->len);
+ btrfs_free_reserved_bytes(bg, buf->len, false);
+ btrfs_put_block_group(bg);
+ trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
+
out:
- if (last_ref) {
- /*
- * Deleting the buffer, clear the corrupt flag since it doesn't
- * matter anymore.
- */
- clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
- }
+ return 0;
}
/* Can return -ENOMEM */
@@ -3369,12 +3526,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
* tree log blocks never actually go into the extent allocation
* tree, just update pinning info and exit early.
*/
- if ((ref->type == BTRFS_REF_METADATA &&
- ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) ||
- (ref->type == BTRFS_REF_DATA &&
- ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)) {
- /* unlocks the pinned mutex */
- btrfs_pin_extent(trans, ref->bytenr, ref->len, 1);
+ if (ref->ref_root == BTRFS_TREE_LOG_OBJECTID) {
+ btrfs_pin_extent(trans, ref->bytenr, ref->num_bytes);
ret = 0;
} else if (ref->type == BTRFS_REF_METADATA) {
ret = btrfs_add_delayed_tree_ref(trans, ref, NULL);
@@ -3382,32 +3535,57 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
ret = btrfs_add_delayed_data_ref(trans, ref, 0);
}
- if (!((ref->type == BTRFS_REF_METADATA &&
- ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) ||
- (ref->type == BTRFS_REF_DATA &&
- ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)))
+ if (ref->ref_root != BTRFS_TREE_LOG_OBJECTID)
btrfs_ref_tree_mod(fs_info, ref);
return ret;
}
enum btrfs_loop_type {
+ /*
+ * Start caching block groups but do not wait for progress or for them
+ * to be done.
+ */
LOOP_CACHING_NOWAIT,
+
+ /*
+ * Wait for the block group free_space >= the space we're waiting for if
+ * the block group isn't cached.
+ */
LOOP_CACHING_WAIT,
+
+ /*
+ * Allow allocations to happen from block groups that do not yet have a
+ * size classification.
+ */
+ LOOP_UNSET_SIZE_CLASS,
+
+ /*
+ * Allocate a chunk and then retry the allocation.
+ */
LOOP_ALLOC_CHUNK,
+
+ /*
+ * Ignore the size class restrictions for this allocation.
+ */
+ LOOP_WRONG_SIZE_CLASS,
+
+ /*
+ * Ignore the empty size, only try to allocate the number of bytes
+ * needed for this allocation.
+ */
LOOP_NO_EMPTY_SIZE,
};
static inline void
-btrfs_lock_block_group(struct btrfs_block_group *cache,
- int delalloc)
+btrfs_lock_block_group(struct btrfs_block_group *cache, bool delalloc)
{
if (delalloc)
down_read(&cache->data_rwsem);
}
static inline void btrfs_grab_block_group(struct btrfs_block_group *cache,
- int delalloc)
+ bool delalloc)
{
btrfs_get_block_group(cache);
if (delalloc)
@@ -3417,7 +3595,7 @@ static inline void btrfs_grab_block_group(struct btrfs_block_group *cache,
static struct btrfs_block_group *btrfs_lock_cluster(
struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
- int delalloc)
+ bool delalloc)
__acquires(&cluster->refill_lock)
{
struct btrfs_block_group *used_bg = NULL;
@@ -3454,94 +3632,32 @@ static struct btrfs_block_group *btrfs_lock_cluster(
}
static inline void
-btrfs_release_block_group(struct btrfs_block_group *cache,
- int delalloc)
+btrfs_release_block_group(struct btrfs_block_group *cache, bool delalloc)
{
if (delalloc)
up_read(&cache->data_rwsem);
btrfs_put_block_group(cache);
}
-enum btrfs_extent_allocation_policy {
- BTRFS_EXTENT_ALLOC_CLUSTERED,
- BTRFS_EXTENT_ALLOC_ZONED,
-};
-
-/*
- * Structure used internally for find_free_extent() function. Wraps needed
- * parameters.
- */
-struct find_free_extent_ctl {
- /* Basic allocation info */
- u64 ram_bytes;
- u64 num_bytes;
- u64 min_alloc_size;
- u64 empty_size;
- u64 flags;
- int delalloc;
-
- /* Where to start the search inside the bg */
- u64 search_start;
-
- /* For clustered allocation */
- u64 empty_cluster;
- struct btrfs_free_cluster *last_ptr;
- bool use_cluster;
-
- bool have_caching_bg;
- bool orig_have_caching_bg;
-
- /* Allocation is called for tree-log */
- bool for_treelog;
-
- /* Allocation is called for data relocation */
- bool for_data_reloc;
-
- /* RAID index, converted from flags */
- int index;
-
- /*
- * Current loop number, check find_free_extent_update_loop() for details
- */
- int loop;
-
- /*
- * Whether we're refilling a cluster, if true we need to re-search
- * current block group but don't try to refill the cluster again.
- */
- bool retry_clustered;
-
- /*
- * Whether we're updating free space cache, if true we need to re-search
- * current block group but don't try updating free space cache again.
- */
- bool retry_unclustered;
-
- /* If current block group is cached */
- int cached;
-
- /* Max contiguous hole found */
- u64 max_extent_size;
-
- /* Total free space from free space cache, not always contiguous */
- u64 total_free_space;
-
- /* Found result */
- u64 found_offset;
-
- /* Hint where to start looking for an empty space */
- u64 hint_byte;
-
- /* Allocation policy */
- enum btrfs_extent_allocation_policy policy;
-};
-
+static bool find_free_extent_check_size_class(const struct find_free_extent_ctl *ffe_ctl,
+ const struct btrfs_block_group *bg)
+{
+ if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED)
+ return true;
+ if (!btrfs_block_group_should_use_size_class(bg))
+ return true;
+ if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS)
+ return true;
+ if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS &&
+ bg->size_class == BTRFS_BG_SZ_NONE)
+ return true;
+ return ffe_ctl->size_class == bg->size_class;
+}
/*
* Helper function for find_free_extent().
*
* Return -ENOENT to inform caller that we need fallback to unclustered mode.
- * Return -EAGAIN to inform caller that we need to re-search this block group
* Return >0 to inform caller that we find nothing
* Return 0 means we have found a location and set ffe_ctl->found_offset.
*/
@@ -3559,7 +3675,8 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
if (!cluster_bg)
goto refill_cluster;
if (cluster_bg != bg && (cluster_bg->ro ||
- !block_group_bits(cluster_bg, ffe_ctl->flags)))
+ !block_group_bits(cluster_bg, ffe_ctl->flags) ||
+ !find_free_extent_check_size_class(ffe_ctl, cluster_bg)))
goto release_cluster;
offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
@@ -3568,8 +3685,7 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
if (offset) {
/* We have a block, we're done */
spin_unlock(&last_ptr->refill_lock);
- trace_btrfs_reserve_extent_cluster(cluster_bg,
- ffe_ctl->search_start, ffe_ctl->num_bytes);
+ trace_btrfs_reserve_extent_cluster(cluster_bg, ffe_ctl);
*cluster_bg_ret = cluster_bg;
ffe_ctl->found_offset = offset;
return 0;
@@ -3619,20 +3735,10 @@ refill_cluster:
if (offset) {
/* We found one, proceed */
spin_unlock(&last_ptr->refill_lock);
- trace_btrfs_reserve_extent_cluster(bg,
- ffe_ctl->search_start,
- ffe_ctl->num_bytes);
ffe_ctl->found_offset = offset;
+ trace_btrfs_reserve_extent_cluster(bg, ffe_ctl);
return 0;
}
- } else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
- !ffe_ctl->retry_clustered) {
- spin_unlock(&last_ptr->refill_lock);
-
- ffe_ctl->retry_clustered = true;
- btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
- ffe_ctl->empty_cluster + ffe_ctl->empty_size);
- return -EAGAIN;
}
/*
* At this point we either didn't find a cluster or we weren't able to
@@ -3647,7 +3753,6 @@ refill_cluster:
/*
* Return >0 to inform caller that we find nothing
* Return 0 when we found an free extent and set ffe_ctrl->found_offset
- * Return -EAGAIN to inform caller that we need to re-search this block group
*/
static int find_free_extent_unclustered(struct btrfs_block_group *bg,
struct find_free_extent_ctl *ffe_ctl)
@@ -3685,25 +3790,8 @@ static int find_free_extent_unclustered(struct btrfs_block_group *bg,
offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
ffe_ctl->num_bytes, ffe_ctl->empty_size,
&ffe_ctl->max_extent_size);
-
- /*
- * If we didn't find a chunk, and we haven't failed on this block group
- * before, and this block group is in the middle of caching and we are
- * ok with waiting, then go ahead and wait for progress to be made, and
- * set @retry_unclustered to true.
- *
- * If @retry_unclustered is true then we've already waited on this
- * block group once and should move on to the next block group.
- */
- if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
- ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
- btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
- ffe_ctl->empty_size);
- ffe_ctl->retry_unclustered = true;
- return -EAGAIN;
- } else if (!offset) {
+ if (!offset)
return 1;
- }
ffe_ctl->found_offset = offset;
return 0;
}
@@ -3717,7 +3805,7 @@ static int do_allocation_clustered(struct btrfs_block_group *block_group,
/* We want to try and use the cluster allocator, so lets look there */
if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) {
ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret);
- if (ret >= 0 || ret == -EAGAIN)
+ if (ret >= 0)
return ret;
/* ret == -ENOENT case falls through */
}
@@ -3790,23 +3878,36 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
spin_unlock(&fs_info->relocation_bg_lock);
if (skip)
return 1;
+
/* Check RO and no space case before trying to activate it */
spin_lock(&block_group->lock);
- if (block_group->ro ||
- block_group->alloc_offset == block_group->zone_capacity) {
- spin_unlock(&block_group->lock);
- return 1;
+ if (block_group->ro || btrfs_zoned_bg_is_full(block_group)) {
+ ret = 1;
+ /*
+ * May need to clear fs_info->{treelog,data_reloc}_bg.
+ * Return the error after taking the locks.
+ */
}
spin_unlock(&block_group->lock);
- if (!btrfs_zone_activate(block_group))
- return 1;
+ /* Metadata block group is activated at write time. */
+ if (!ret && (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
+ !btrfs_zone_activate(block_group)) {
+ ret = 1;
+ /*
+ * May need to clear fs_info->{treelog,data_reloc}_bg.
+ * Return the error after taking the locks.
+ */
+ }
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
spin_lock(&fs_info->treelog_bg_lock);
spin_lock(&fs_info->relocation_bg_lock);
+ if (ret)
+ goto out;
+
ASSERT(!ffe_ctl->for_treelog ||
block_group->start == fs_info->treelog_bg ||
fs_info->treelog_bg == 0);
@@ -3814,7 +3915,9 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
block_group->start == fs_info->data_reloc_bg ||
fs_info->data_reloc_bg == 0);
- if (block_group->ro) {
+ if (block_group->ro ||
+ (!ffe_ctl->for_data_reloc &&
+ test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) {
ret = 1;
goto out;
}
@@ -3857,8 +3960,26 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
fs_info->treelog_bg = block_group->start;
- if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg)
- fs_info->data_reloc_bg = block_group->start;
+ if (ffe_ctl->for_data_reloc) {
+ if (!fs_info->data_reloc_bg)
+ fs_info->data_reloc_bg = block_group->start;
+ /*
+ * Do not allow allocations from this block group, unless it is
+ * for data relocation. Compared to increasing the ->ro, setting
+ * the ->zoned_data_reloc_ongoing flag still allows nocow
+ * writers to come in. See btrfs_inc_nocow_writers().
+ *
+ * We need to disable an allocation to avoid an allocation of
+ * regular (non-relocation data) extent. With mix of relocation
+ * extents and regular extents, we can dispatch WRITE commands
+ * (for relocation extents) and ZONE APPEND commands (for
+ * regular extents) at the same time to the same zone, which
+ * easily break the write pointer.
+ *
+ * Also, this flag avoids this block group to be zone finished.
+ */
+ set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
+ }
ffe_ctl->found_offset = start + block_group->alloc_offset;
block_group->alloc_offset += num_bytes;
@@ -3901,12 +4022,11 @@ static int do_allocation(struct btrfs_block_group *block_group,
static void release_block_group(struct btrfs_block_group *block_group,
struct find_free_extent_ctl *ffe_ctl,
- int delalloc)
+ bool delalloc)
{
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
- ffe_ctl->retry_clustered = false;
- ffe_ctl->retry_unclustered = false;
+ ffe_ctl->retry_uncached = false;
break;
case BTRFS_EXTENT_ALLOC_ZONED:
/* Nothing to do */
@@ -3947,19 +4067,67 @@ static void found_extent(struct find_free_extent_ctl *ffe_ctl,
}
}
-static int chunk_allocation_failed(struct find_free_extent_ctl *ffe_ctl)
+static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info,
+ struct find_free_extent_ctl *ffe_ctl)
+{
+ /* Block group's activeness is not a requirement for METADATA block groups. */
+ if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA))
+ return 0;
+
+ /* If we can activate new zone, just allocate a chunk and use it */
+ if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags))
+ return 0;
+
+ /*
+ * We already reached the max active zones. Try to finish one block
+ * group to make a room for a new block group. This is only possible
+ * for a data block group because btrfs_zone_finish() may need to wait
+ * for a running transaction which can cause a deadlock for metadata
+ * allocation.
+ */
+ if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
+ int ret = btrfs_zone_finish_one_bg(fs_info);
+
+ if (ret == 1)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * If we have enough free space left in an already active block group
+ * and we can't activate any other zone now, do not allow allocating a
+ * new chunk and let find_free_extent() retry with a smaller size.
+ */
+ if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size)
+ return -ENOSPC;
+
+ /*
+ * Even min_alloc_size is not left in any block groups. Since we cannot
+ * activate a new block group, allocating it may not help. Let's tell a
+ * caller to try again and hope it progress something by writing some
+ * parts of the region. That is only possible for data block groups,
+ * where a part of the region can be written.
+ */
+ if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)
+ return -EAGAIN;
+
+ /*
+ * We cannot activate a new block group and no enough space left in any
+ * block groups. So, allocating a new block group may not help. But,
+ * there is nothing to do anyway, so let's go with it.
+ */
+ return 0;
+}
+
+static int can_allocate_chunk(struct btrfs_fs_info *fs_info,
+ struct find_free_extent_ctl *ffe_ctl)
{
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
- /*
- * If we can't allocate a new chunk we've already looped through
- * at least once, move on to the NO_EMPTY_SIZE case.
- */
- ffe_ctl->loop = LOOP_NO_EMPTY_SIZE;
return 0;
case BTRFS_EXTENT_ALLOC_ZONED:
- /* Give up here */
- return -ENOSPC;
+ return can_allocate_chunk_zoned(fs_info, ffe_ctl);
default:
BUG();
}
@@ -3973,9 +4141,10 @@ static int chunk_allocation_failed(struct find_free_extent_ctl *ffe_ctl)
static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
struct btrfs_key *ins,
struct find_free_extent_ctl *ffe_ctl,
+ struct btrfs_space_info *space_info,
bool full_search)
{
- struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_root *root = fs_info->chunk_root;
int ret;
if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) &&
@@ -3987,18 +4156,6 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
return 0;
}
- if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size &&
- !btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->index)) {
- /*
- * If we have enough free space left in an already active block
- * group and we can't activate any other zone now, retry the
- * active ones with a smaller allocation size. Returning early
- * from here will tell btrfs_reserve_extent() to haven the
- * size.
- */
- return -ENOSPC;
- }
-
if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg)
return 1;
@@ -4006,34 +4163,28 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
if (ffe_ctl->index < BTRFS_NR_RAID_TYPES)
return 1;
- /*
- * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
- * caching kthreads as we move along
- * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
- * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
- * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
- * again
- */
+ /* See the comments for btrfs_loop_type for an explanation of the phases. */
if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) {
ffe_ctl->index = 0;
- if (ffe_ctl->loop == LOOP_CACHING_NOWAIT) {
- /*
- * We want to skip the LOOP_CACHING_WAIT step if we
- * don't have any uncached bgs and we've already done a
- * full search through.
- */
- if (ffe_ctl->orig_have_caching_bg || !full_search)
- ffe_ctl->loop = LOOP_CACHING_WAIT;
- else
- ffe_ctl->loop = LOOP_ALLOC_CHUNK;
- } else {
+ /*
+ * We want to skip the LOOP_CACHING_WAIT step if we don't have
+ * any uncached bgs and we've already done a full search
+ * through.
+ */
+ if (ffe_ctl->loop == LOOP_CACHING_NOWAIT &&
+ (!ffe_ctl->orig_have_caching_bg && full_search))
ffe_ctl->loop++;
- }
+ ffe_ctl->loop++;
if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) {
struct btrfs_trans_handle *trans;
int exist = 0;
+ /* Check if allocation policy allows to create a new chunk */
+ ret = can_allocate_chunk(fs_info, ffe_ctl);
+ if (ret)
+ return ret;
+
trans = current->journal_info;
if (trans)
exist = 1;
@@ -4045,12 +4196,14 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
return ret;
}
- ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
- CHUNK_ALLOC_FORCE);
+ ret = btrfs_chunk_alloc(trans, space_info, ffe_ctl->flags,
+ CHUNK_ALLOC_FORCE_FOR_EXTENT);
/* Do not bail out on ENOSPC since we can do more. */
- if (ret == -ENOSPC)
- ret = chunk_allocation_failed(ffe_ctl);
+ if (ret == -ENOSPC) {
+ ret = 0;
+ ffe_ctl->loop++;
+ }
else if (ret < 0)
btrfs_abort_transaction(trans, ret);
else
@@ -4131,6 +4284,44 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
return 0;
}
+static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
+ struct find_free_extent_ctl *ffe_ctl,
+ struct btrfs_space_info *space_info)
+{
+ if (ffe_ctl->for_treelog) {
+ spin_lock(&fs_info->treelog_bg_lock);
+ if (fs_info->treelog_bg)
+ ffe_ctl->hint_byte = fs_info->treelog_bg;
+ spin_unlock(&fs_info->treelog_bg_lock);
+ } else if (ffe_ctl->for_data_reloc) {
+ spin_lock(&fs_info->relocation_bg_lock);
+ if (fs_info->data_reloc_bg)
+ ffe_ctl->hint_byte = fs_info->data_reloc_bg;
+ spin_unlock(&fs_info->relocation_bg_lock);
+ } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
+ struct btrfs_block_group *block_group;
+
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
+ /*
+ * No lock is OK here because avail is monotonically
+ * decreasing, and this is just a hint.
+ */
+ u64 avail = block_group->zone_capacity - block_group->alloc_offset;
+
+ if (block_group_bits(block_group, ffe_ctl->flags) &&
+ block_group->space_info == space_info &&
+ avail >= ffe_ctl->num_bytes) {
+ ffe_ctl->hint_byte = block_group->start;
+ break;
+ }
+ }
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+ }
+
+ return 0;
+}
+
static int prepare_allocation(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl,
struct btrfs_space_info *space_info,
@@ -4141,19 +4332,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
return prepare_allocation_clustered(fs_info, ffe_ctl,
space_info, ins);
case BTRFS_EXTENT_ALLOC_ZONED:
- if (ffe_ctl->for_treelog) {
- spin_lock(&fs_info->treelog_bg_lock);
- if (fs_info->treelog_bg)
- ffe_ctl->hint_byte = fs_info->treelog_bg;
- spin_unlock(&fs_info->treelog_bg_lock);
- }
- if (ffe_ctl->for_data_reloc) {
- spin_lock(&fs_info->relocation_bg_lock);
- if (fs_info->data_reloc_bg)
- ffe_ctl->hint_byte = fs_info->data_reloc_bg;
- spin_unlock(&fs_info->relocation_bg_lock);
- }
- return 0;
+ return prepare_allocation_zoned(fs_info, ffe_ctl, space_info);
default:
BUG();
}
@@ -4206,14 +4385,13 @@ static noinline int find_free_extent(struct btrfs_root *root,
ffe_ctl->orig_have_caching_bg = false;
ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags);
ffe_ctl->loop = 0;
- /* For clustered allocation */
- ffe_ctl->retry_clustered = false;
- ffe_ctl->retry_unclustered = false;
+ ffe_ctl->retry_uncached = false;
ffe_ctl->cached = 0;
ffe_ctl->max_extent_size = 0;
ffe_ctl->total_free_space = 0;
ffe_ctl->found_offset = 0;
ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED;
+ ffe_ctl->size_class = btrfs_calc_block_group_size_class(ffe_ctl->num_bytes);
if (btrfs_is_zoned(fs_info))
ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED;
@@ -4222,12 +4400,22 @@ static noinline int find_free_extent(struct btrfs_root *root,
ins->objectid = 0;
ins->offset = 0;
- trace_find_free_extent(root, ffe_ctl->num_bytes, ffe_ctl->empty_size,
- ffe_ctl->flags);
+ trace_btrfs_find_free_extent(root, ffe_ctl);
space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags);
+ if (btrfs_is_zoned(fs_info) && space_info) {
+ /* Use dedicated sub-space_info for dedicated block group users. */
+ if (ffe_ctl->for_data_reloc) {
+ space_info = space_info->sub_group[0];
+ ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC);
+ } else if (ffe_ctl->for_treelog) {
+ space_info = space_info->sub_group[0];
+ ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_TREELOG);
+ }
+ }
if (!space_info) {
- btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags);
+ btrfs_err(fs_info, "no space info for %llu, tree-log %d, relocation %d",
+ ffe_ctl->flags, ffe_ctl->for_treelog, ffe_ctl->for_data_reloc);
return -ENOSPC;
}
@@ -4236,7 +4424,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
return ret;
ffe_ctl->search_start = max(ffe_ctl->search_start,
- first_logical_byte(fs_info, 0));
+ first_logical_byte(fs_info));
ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte);
if (ffe_ctl->search_start == ffe_ctl->hint_byte) {
block_group = btrfs_lookup_block_group(fs_info,
@@ -4249,6 +4437,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
* picked out then we don't care that the block group is cached.
*/
if (block_group && block_group_bits(block_group, ffe_ctl->flags) &&
+ block_group->space_info == space_info &&
block_group->cached != BTRFS_CACHE_NO) {
down_read(&space_info->groups_sem);
if (list_empty(&block_group->list) ||
@@ -4266,6 +4455,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
block_group->flags);
btrfs_lock_block_group(block_group,
ffe_ctl->delalloc);
+ ffe_ctl->hinted = true;
goto have_block_group;
}
} else if (block_group) {
@@ -4273,6 +4463,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
}
}
search:
+ trace_btrfs_find_free_extent_search_loop(root, ffe_ctl);
ffe_ctl->have_caching_bg = false;
if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) ||
ffe_ctl->index == 0)
@@ -4282,6 +4473,7 @@ search:
&space_info->block_groups[ffe_ctl->index], list) {
struct btrfs_block_group *bg_ret;
+ ffe_ctl->hinted = false;
/* If the block group is read-only, we can skip it entirely. */
if (unlikely(block_group->ro)) {
if (ffe_ctl->for_treelog)
@@ -4323,10 +4515,11 @@ search:
}
have_block_group:
+ trace_btrfs_find_free_extent_have_block_group(root, ffe_ctl, block_group);
ffe_ctl->cached = btrfs_block_group_done(block_group);
if (unlikely(!ffe_ctl->cached)) {
ffe_ctl->have_caching_bg = true;
- ret = btrfs_cache_block_group(block_group, 0);
+ ret = btrfs_cache_block_group(block_group, false);
/*
* If we get ENOMEM here or something else we want to
@@ -4344,21 +4537,23 @@ have_block_group:
ret = 0;
}
- if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
+ if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) {
+ if (!cache_block_group_error)
+ cache_block_group_error = -EIO;
+ goto loop;
+ }
+
+ if (!find_free_extent_check_size_class(ffe_ctl, block_group))
goto loop;
bg_ret = NULL;
ret = do_allocation(block_group, ffe_ctl, &bg_ret);
- if (ret == 0) {
- if (bg_ret && bg_ret != block_group) {
- btrfs_release_block_group(block_group,
- ffe_ctl->delalloc);
- block_group = bg_ret;
- }
- } else if (ret == -EAGAIN) {
- goto have_block_group;
- } else if (ret > 0) {
+ if (ret > 0)
goto loop;
+
+ if (bg_ret && bg_ret != block_group) {
+ btrfs_release_block_group(block_group, ffe_ctl->delalloc);
+ block_group = bg_ret;
}
/* Checks */
@@ -4381,7 +4576,8 @@ have_block_group:
ret = btrfs_add_reserved_bytes(block_group, ffe_ctl->ram_bytes,
ffe_ctl->num_bytes,
- ffe_ctl->delalloc);
+ ffe_ctl->delalloc,
+ ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS);
if (ret == -EAGAIN) {
btrfs_add_free_space_unused(block_group,
ffe_ctl->found_offset,
@@ -4394,17 +4590,26 @@ have_block_group:
ins->objectid = ffe_ctl->search_start;
ins->offset = ffe_ctl->num_bytes;
- trace_btrfs_reserve_extent(block_group, ffe_ctl->search_start,
- ffe_ctl->num_bytes);
+ trace_btrfs_reserve_extent(block_group, ffe_ctl);
btrfs_release_block_group(block_group, ffe_ctl->delalloc);
break;
loop:
+ if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
+ !ffe_ctl->retry_uncached) {
+ ffe_ctl->retry_uncached = true;
+ btrfs_wait_block_group_cache_progress(block_group,
+ ffe_ctl->num_bytes +
+ ffe_ctl->empty_cluster +
+ ffe_ctl->empty_size);
+ goto have_block_group;
+ }
release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc);
cond_resched();
}
up_read(&space_info->groups_sem);
- ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search);
+ ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, space_info,
+ full_search);
if (ret > 0)
goto search;
@@ -4426,8 +4631,8 @@ loop:
}
/*
- * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
- * hole that is at least as big as @num_bytes.
+ * Entry point to the extent allocator. Tries to find a hole that is at least
+ * as big as @num_bytes.
*
* @root - The root that will contain this extent
*
@@ -4473,14 +4678,14 @@ loop:
int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
u64 num_bytes, u64 min_alloc_size,
u64 empty_size, u64 hint_byte,
- struct btrfs_key *ins, int is_data, int delalloc)
+ struct btrfs_key *ins, bool is_data, bool delalloc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct find_free_extent_ctl ffe_ctl = {};
bool final_tried = num_bytes == min_alloc_size;
u64 flags;
int ret;
- bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+ bool for_treelog = (btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID);
bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data);
flags = get_alloc_profile_by_root(root, is_data);
@@ -4518,16 +4723,15 @@ again:
"allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d",
flags, num_bytes, for_treelog, for_data_reloc);
if (sinfo)
- btrfs_dump_space_info(fs_info, sinfo,
- num_bytes, 1);
+ btrfs_dump_space_info(sinfo, num_bytes, 1);
}
}
return ret;
}
-int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
- u64 start, u64 len, int delalloc)
+int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len,
+ bool is_delalloc)
{
struct btrfs_block_group *cache;
@@ -4539,58 +4743,86 @@ int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
}
btrfs_add_free_space(cache, start, len);
- btrfs_free_reserved_bytes(cache, len, delalloc);
+ btrfs_free_reserved_bytes(cache, len, is_delalloc);
trace_btrfs_reserved_extent_free(fs_info, start, len);
btrfs_put_block_group(cache);
return 0;
}
-int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start,
- u64 len)
+int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans,
+ const struct extent_buffer *eb)
{
struct btrfs_block_group *cache;
int ret = 0;
- cache = btrfs_lookup_block_group(trans->fs_info, start);
+ cache = btrfs_lookup_block_group(trans->fs_info, eb->start);
if (!cache) {
btrfs_err(trans->fs_info, "unable to find block group for %llu",
- start);
+ eb->start);
return -ENOSPC;
}
- ret = pin_down_extent(trans, cache, start, len, 1);
+ ret = pin_down_extent(trans, cache, eb->start, eb->len, true);
btrfs_put_block_group(cache);
return ret;
}
+static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr,
+ u64 num_bytes)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ int ret;
+
+ ret = btrfs_remove_from_free_space_tree(trans, bytenr, num_bytes);
+ if (ret)
+ return ret;
+
+ ret = btrfs_update_block_group(trans, bytenr, num_bytes, true);
+ if (ret) {
+ ASSERT(!ret);
+ btrfs_err(fs_info, "update block group failed for %llu %llu",
+ bytenr, num_bytes);
+ return ret;
+ }
+
+ trace_btrfs_reserved_extent_alloc(fs_info, bytenr, num_bytes);
+ return 0;
+}
+
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
- struct btrfs_key *ins, int ref_mod)
+ struct btrfs_key *ins, int ref_mod, u64 oref_root)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *extent_root;
int ret;
struct btrfs_extent_item *extent_item;
+ struct btrfs_extent_owner_ref *oref;
struct btrfs_extent_inline_ref *iref;
struct btrfs_path *path;
struct extent_buffer *leaf;
int type;
u32 size;
+ const bool simple_quota = (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE);
if (parent > 0)
type = BTRFS_SHARED_DATA_REF_KEY;
else
type = BTRFS_EXTENT_DATA_REF_KEY;
- size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
+ size = sizeof(*extent_item);
+ if (simple_quota)
+ size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY);
+ size += btrfs_extent_inline_ref_size(type);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
- ins, size);
+ extent_root = btrfs_extent_root(fs_info, ins->objectid);
+ ret = btrfs_insert_empty_item(trans, extent_root, path, ins, size);
if (ret) {
btrfs_free_path(path);
return ret;
@@ -4605,7 +4837,14 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
flags | BTRFS_EXTENT_FLAG_DATA);
iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
+ if (simple_quota) {
+ btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_EXTENT_OWNER_REF_KEY);
+ oref = (struct btrfs_extent_owner_ref *)(&iref->offset);
+ btrfs_set_extent_owner_ref_root_id(leaf, oref, oref_root);
+ iref = (struct btrfs_extent_inline_ref *)(oref + 1);
+ }
btrfs_set_extent_inline_ref_type(leaf, iref, type);
+
if (parent > 0) {
struct btrfs_shared_data_ref *ref;
ref = (struct btrfs_shared_data_ref *)(iref + 1);
@@ -4620,28 +4859,17 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
}
- btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
- ret = remove_from_free_space_tree(trans, ins->objectid, ins->offset);
- if (ret)
- return ret;
-
- ret = btrfs_update_block_group(trans, ins->objectid, ins->offset, true);
- if (ret) { /* -ENOENT, logic error */
- btrfs_err(fs_info, "update block group failed for %llu %llu",
- ins->objectid, ins->offset);
- BUG();
- }
- trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
- return ret;
+ return alloc_reserved_extent(trans, ins->objectid, ins->offset);
}
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *node,
+ const struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *extent_root;
int ret;
struct btrfs_extent_item *extent_item;
struct btrfs_key extent_key;
@@ -4649,32 +4877,30 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_extent_inline_ref *iref;
struct btrfs_path *path;
struct extent_buffer *leaf;
- struct btrfs_delayed_tree_ref *ref;
u32 size = sizeof(*extent_item) + sizeof(*iref);
- u64 num_bytes;
- u64 flags = extent_op->flags_to_set;
+ const u64 flags = (extent_op ? extent_op->flags_to_set : 0);
+ /* The owner of a tree block is the level. */
+ int level = btrfs_delayed_ref_owner(node);
bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
- ref = btrfs_delayed_node_to_tree_ref(node);
-
extent_key.objectid = node->bytenr;
if (skinny_metadata) {
- extent_key.offset = ref->level;
+ /* The owner of a tree block is the level. */
+ extent_key.offset = level;
extent_key.type = BTRFS_METADATA_ITEM_KEY;
- num_bytes = fs_info->nodesize;
} else {
extent_key.offset = node->num_bytes;
extent_key.type = BTRFS_EXTENT_ITEM_KEY;
size += sizeof(*block_info);
- num_bytes = node->num_bytes;
}
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
- &extent_key, size);
+ extent_root = btrfs_extent_root(fs_info, extent_key.objectid);
+ ret = btrfs_insert_empty_item(trans, extent_root, path, &extent_key,
+ size);
if (ret) {
btrfs_free_path(path);
return ret;
@@ -4693,39 +4919,23 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
} else {
block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
btrfs_set_tree_block_key(leaf, block_info, &extent_op->key);
- btrfs_set_tree_block_level(leaf, block_info, ref->level);
+ btrfs_set_tree_block_level(leaf, block_info, level);
iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
}
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
btrfs_set_extent_inline_ref_type(leaf, iref,
BTRFS_SHARED_BLOCK_REF_KEY);
- btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent);
+ btrfs_set_extent_inline_ref_offset(leaf, iref, node->parent);
} else {
btrfs_set_extent_inline_ref_type(leaf, iref,
BTRFS_TREE_BLOCK_REF_KEY);
- btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
+ btrfs_set_extent_inline_ref_offset(leaf, iref, node->ref_root);
}
- btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
- ret = remove_from_free_space_tree(trans, extent_key.objectid,
- num_bytes);
- if (ret)
- return ret;
-
- ret = btrfs_update_block_group(trans, extent_key.objectid,
- fs_info->nodesize, true);
- if (ret) { /* -ENOENT, logic error */
- btrfs_err(fs_info, "update block group failed for %llu %llu",
- extent_key.objectid, extent_key.offset);
- BUG();
- }
-
- trace_btrfs_reserved_extent_alloc(fs_info, extent_key.objectid,
- fs_info->nodesize);
- return ret;
+ return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
}
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
@@ -4733,14 +4943,20 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins)
{
- struct btrfs_ref generic_ref = { 0 };
+ struct btrfs_ref generic_ref = {
+ .action = BTRFS_ADD_DELAYED_EXTENT,
+ .bytenr = ins->objectid,
+ .num_bytes = ins->offset,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_root_id(root),
+ };
- BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+ ASSERT(generic_ref.ref_root != BTRFS_TREE_LOG_OBJECTID);
- btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
- ins->objectid, ins->offset, 0);
- btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner,
- offset, 0, false);
+ if (btrfs_is_data_reloc_root(root) && btrfs_is_fstree(root->relocation_src_root))
+ generic_ref.owning_root = root->relocation_src_root;
+
+ btrfs_init_data_ref(&generic_ref, owner, offset, 0, false);
btrfs_ref_tree_mod(root->fs_info, &generic_ref);
return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes);
@@ -4759,6 +4975,13 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
int ret;
struct btrfs_block_group *block_group;
struct btrfs_space_info *space_info;
+ const struct btrfs_squota_delta delta = {
+ .root = root_objectid,
+ .num_bytes = ins->offset,
+ .generation = trans->transid,
+ .is_data = true,
+ .is_inc = true,
+ };
/*
* Mixed block groups will exclude before processing the log so we only
@@ -4784,13 +5007,36 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
spin_unlock(&space_info->lock);
ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
- offset, ins, 1);
+ offset, ins, 1, root_objectid);
if (ret)
- btrfs_pin_extent(trans, ins->objectid, ins->offset, 1);
+ btrfs_pin_extent(trans, ins->objectid, ins->offset);
+ ret = btrfs_record_squota_delta(fs_info, &delta);
btrfs_put_block_group(block_group);
return ret;
}
+#ifdef CONFIG_BTRFS_DEBUG
+/*
+ * Extra safety check in case the extent tree is corrupted and extent allocator
+ * chooses to use a tree block which is already used and locked.
+ */
+static bool check_eb_lock_owner(const struct extent_buffer *eb)
+{
+ if (eb->lock_owner == current->pid) {
+ btrfs_err_rl(eb->fs_info,
+"tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
+ eb->start, btrfs_header_owner(eb), current->pid);
+ return true;
+ }
+ return false;
+}
+#else
+static bool check_eb_lock_owner(struct extent_buffer *eb)
+{
+ return false;
+}
+#endif
+
static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
u64 bytenr, int level, u64 owner,
@@ -4798,34 +5044,45 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *buf;
+ u64 lockdep_owner = owner;
buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level);
if (IS_ERR(buf))
return buf;
- /*
- * Extra safety check in case the extent tree is corrupted and extent
- * allocator chooses to use a tree block which is already used and
- * locked.
- */
- if (buf->lock_owner == current->pid) {
- btrfs_err_rl(fs_info,
-"tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
- buf->start, btrfs_header_owner(buf), current->pid);
+ if (unlikely(check_eb_lock_owner(buf))) {
free_extent_buffer(buf);
return ERR_PTR(-EUCLEAN);
}
/*
+ * The reloc trees are just snapshots, so we need them to appear to be
+ * just like any other fs tree WRT lockdep.
+ *
+ * The exception however is in replace_path() in relocation, where we
+ * hold the lock on the original fs root and then search for the reloc
+ * root. At that point we need to make sure any reloc root buffers are
+ * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make
+ * lockdep happy.
+ */
+ if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID &&
+ !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
+ lockdep_owner = BTRFS_FS_TREE_OBJECTID;
+
+ /* btrfs_clear_buffer_dirty() accesses generation field. */
+ btrfs_set_header_generation(buf, trans->transid);
+
+ /*
* This needs to stay, because we could allocate a freed block from an
* old tree into a new tree, so we need to make sure this new block is
* set to the appropriate level and owner.
*/
- btrfs_set_buffer_lockdep_class(owner, buf, level);
- __btrfs_tree_lock(buf, nest);
- btrfs_clean_tree_block(buf);
+ btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level);
+
+ btrfs_tree_lock_nested(buf, nest);
+ btrfs_clear_buffer_dirty(trans, buf);
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
- clear_bit(EXTENT_BUFFER_NO_CHECK, &buf->bflags);
+ clear_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &buf->bflags);
set_extent_buffer_uptodate(buf);
@@ -4837,22 +5094,24 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
btrfs_set_header_owner(buf, owner);
write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid);
write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
- if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+ if (btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID) {
buf->log_index = root->log_transid % 2;
/*
* we allow two log transactions at a time, use different
* EXTENT bit to differentiate dirty pages.
*/
if (buf->log_index == 0)
- set_extent_dirty(&root->dirty_log_pages, buf->start,
- buf->start + buf->len - 1, GFP_NOFS);
+ btrfs_set_extent_bit(&root->dirty_log_pages, buf->start,
+ buf->start + buf->len - 1,
+ EXTENT_DIRTY_LOG1, NULL);
else
- set_extent_new(&root->dirty_log_pages, buf->start,
- buf->start + buf->len - 1);
+ btrfs_set_extent_bit(&root->dirty_log_pages, buf->start,
+ buf->start + buf->len - 1,
+ EXTENT_DIRTY_LOG2, NULL);
} else {
buf->log_index = -1;
- set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
- buf->start + buf->len - 1, GFP_NOFS);
+ btrfs_set_extent_bit(&trans->transaction->dirty_pages, buf->start,
+ buf->start + buf->len - 1, EXTENT_DIRTY, NULL);
}
/* this returns a buffer locked for blocking */
return buf;
@@ -4868,18 +5127,18 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
const struct btrfs_disk_key *key,
int level, u64 hint,
u64 empty_size,
+ u64 reloc_src_root,
enum btrfs_lock_nesting nest)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key ins;
struct btrfs_block_rsv *block_rsv;
struct extent_buffer *buf;
- struct btrfs_delayed_extent_op *extent_op;
- struct btrfs_ref generic_ref = { 0 };
u64 flags = 0;
int ret;
u32 blocksize = fs_info->nodesize;
bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
+ u64 owning_root;
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (btrfs_is_testing(fs_info)) {
@@ -4896,7 +5155,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
return ERR_CAST(block_rsv);
ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
- empty_size, hint, &ins, 0, 0);
+ empty_size, hint, &ins, false, false);
if (ret)
goto out_unuse;
@@ -4906,48 +5165,59 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
ret = PTR_ERR(buf);
goto out_free_reserved;
}
+ owning_root = btrfs_header_owner(buf);
if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
if (parent == 0)
parent = ins.objectid;
flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
+ owning_root = reloc_src_root;
} else
BUG_ON(parent > 0);
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
- extent_op = btrfs_alloc_delayed_extent_op();
- if (!extent_op) {
- ret = -ENOMEM;
- goto out_free_buf;
+ struct btrfs_delayed_extent_op *extent_op;
+ struct btrfs_ref generic_ref = {
+ .action = BTRFS_ADD_DELAYED_EXTENT,
+ .bytenr = ins.objectid,
+ .num_bytes = ins.offset,
+ .parent = parent,
+ .owning_root = owning_root,
+ .ref_root = root_objectid,
+ };
+
+ if (!skinny_metadata || flags != 0) {
+ extent_op = btrfs_alloc_delayed_extent_op();
+ if (!extent_op) {
+ ret = -ENOMEM;
+ goto out_free_buf;
+ }
+ if (key)
+ memcpy(&extent_op->key, key, sizeof(extent_op->key));
+ else
+ memset(&extent_op->key, 0, sizeof(extent_op->key));
+ extent_op->flags_to_set = flags;
+ extent_op->update_key = (skinny_metadata ? false : true);
+ extent_op->update_flags = (flags != 0);
+ } else {
+ extent_op = NULL;
}
- if (key)
- memcpy(&extent_op->key, key, sizeof(extent_op->key));
- else
- memset(&extent_op->key, 0, sizeof(extent_op->key));
- extent_op->flags_to_set = flags;
- extent_op->update_key = skinny_metadata ? false : true;
- extent_op->update_flags = true;
- extent_op->is_data = false;
- extent_op->level = level;
-
- btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
- ins.objectid, ins.offset, parent);
- btrfs_init_tree_ref(&generic_ref, level, root_objectid,
- root->root_key.objectid, false);
+
+ btrfs_init_tree_ref(&generic_ref, level, btrfs_root_id(root), false);
btrfs_ref_tree_mod(fs_info, &generic_ref);
ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op);
- if (ret)
- goto out_free_delayed;
+ if (ret) {
+ btrfs_free_delayed_extent_op(extent_op);
+ goto out_free_buf;
+ }
}
return buf;
-out_free_delayed:
- btrfs_free_delayed_extent_op(extent_op);
out_free_buf:
btrfs_tree_unlock(buf);
free_extent_buffer(buf);
out_free_reserved:
- btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, false);
out_unuse:
btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize);
return ERR_PTR(ret);
@@ -4967,11 +5237,99 @@ struct walk_control {
int reada_slot;
int reada_count;
int restarted;
+ /* Indicate that extent info needs to be looked up when walking the tree. */
+ int lookup_info;
};
+/*
+ * This is our normal stage. We are traversing blocks the current snapshot owns
+ * and we are dropping any of our references to any children we are able to, and
+ * then freeing the block once we've processed all of the children.
+ */
#define DROP_REFERENCE 1
+
+/*
+ * We enter this stage when we have to walk into a child block (meaning we can't
+ * simply drop our reference to it from our current parent node) and there are
+ * more than one reference on it. If we are the owner of any of the children
+ * blocks from the current parent node then we have to do the FULL_BACKREF dance
+ * on them in order to drop our normal ref and add the shared ref.
+ */
#define UPDATE_BACKREF 2
+/*
+ * Decide if we need to walk down into this node to adjust the references.
+ *
+ * @root: the root we are currently deleting
+ * @wc: the walk control for this deletion
+ * @eb: the parent eb that we're currently visiting
+ * @refs: the number of refs for wc->level - 1
+ * @flags: the flags for wc->level - 1
+ * @slot: the slot in the eb that we're currently checking
+ *
+ * This is meant to be called when we're evaluating if a node we point to at
+ * wc->level should be read and walked into, or if we can simply delete our
+ * reference to it. We return true if we should walk into the node, false if we
+ * can skip it.
+ *
+ * We have assertions in here to make sure this is called correctly. We assume
+ * that sanity checking on the blocks read to this point has been done, so any
+ * corrupted file systems must have been caught before calling this function.
+ */
+static bool visit_node_for_delete(struct btrfs_root *root, struct walk_control *wc,
+ struct extent_buffer *eb, u64 flags, int slot)
+{
+ struct btrfs_key key;
+ u64 generation;
+ int level = wc->level;
+
+ ASSERT(level > 0);
+ ASSERT(wc->refs[level - 1] > 0);
+
+ /*
+ * The update backref stage we only want to skip if we already have
+ * FULL_BACKREF set, otherwise we need to read.
+ */
+ if (wc->stage == UPDATE_BACKREF) {
+ if (level == 1 && flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ return false;
+ return true;
+ }
+
+ /*
+ * We're the last ref on this block, we must walk into it and process
+ * any refs it's pointing at.
+ */
+ if (wc->refs[level - 1] == 1)
+ return true;
+
+ /*
+ * If we're already FULL_BACKREF then we know we can just drop our
+ * current reference.
+ */
+ if (level == 1 && flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ return false;
+
+ /*
+ * This block is older than our creation generation, we can drop our
+ * reference to it.
+ */
+ generation = btrfs_node_ptr_generation(eb, slot);
+ if (!wc->update_ref || generation <= btrfs_root_origin_generation(root))
+ return false;
+
+ /*
+ * This block was processed from a previous snapshot deletion run, we
+ * can skip it.
+ */
+ btrfs_node_key_to_cpu(eb, &key, slot);
+ if (btrfs_comp_cpu_keys(&key, &wc->update_progress) < 0)
+ return false;
+
+ /* All other cases we need to wander into the node. */
+ return true;
+}
+
static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct walk_control *wc,
@@ -4983,7 +5341,6 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
u64 refs;
u64 flags;
u32 nritems;
- struct btrfs_key key;
struct extent_buffer *eb;
int ret;
int slot;
@@ -5013,38 +5370,29 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
goto reada;
if (wc->stage == UPDATE_BACKREF &&
- generation <= root->root_key.offset)
+ generation <= btrfs_root_origin_generation(root))
continue;
/* We don't lock the tree block, it's OK to be racy here */
ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
wc->level - 1, 1, &refs,
- &flags);
+ &flags, NULL);
/* We don't care about errors in readahead. */
if (ret < 0)
continue;
- BUG_ON(refs == 0);
- if (wc->stage == DROP_REFERENCE) {
- if (refs == 1)
- goto reada;
+ /*
+ * This could be racey, it's conceivable that we raced and end
+ * up with a bogus refs count, if that's the case just skip, if
+ * we are actually corrupt we will notice when we look up
+ * everything again with our locks.
+ */
+ if (refs == 0)
+ continue;
- if (wc->level == 1 &&
- (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
- continue;
- if (!wc->update_ref ||
- generation <= root->root_key.offset)
- continue;
- btrfs_node_key_to_cpu(eb, &key, slot);
- ret = btrfs_comp_cpu_keys(&key,
- &wc->update_progress);
- if (ret < 0)
- continue;
- } else {
- if (wc->level == 1 &&
- (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
- continue;
- }
+ /* If we don't need to visit this node don't reada. */
+ if (!visit_node_for_delete(root, wc, eb, flags, slot))
+ continue;
reada:
btrfs_readahead_node_child(eb, slot);
nread++;
@@ -5063,7 +5411,7 @@ reada:
static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
- struct walk_control *wc, int lookup_info)
+ struct walk_control *wc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int level = wc->level;
@@ -5071,26 +5419,29 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
int ret;
- if (wc->stage == UPDATE_BACKREF &&
- btrfs_header_owner(eb) != root->root_key.objectid)
+ if (wc->stage == UPDATE_BACKREF && btrfs_header_owner(eb) != btrfs_root_id(root))
return 1;
/*
* when reference count of tree block is 1, it won't increase
* again. once full backref flag is set, we never clear it.
*/
- if (lookup_info &&
+ if (wc->lookup_info &&
((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
(wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
- BUG_ON(!path->locks[level]);
+ ASSERT(path->locks[level]);
ret = btrfs_lookup_extent_info(trans, fs_info,
eb->start, level, 1,
&wc->refs[level],
- &wc->flags[level]);
- BUG_ON(ret == -ENOMEM);
+ &wc->flags[level],
+ NULL);
if (ret)
return ret;
- BUG_ON(wc->refs[level] == 0);
+ if (unlikely(wc->refs[level] == 0)) {
+ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
+ eb->start);
+ return -EUCLEAN;
+ }
}
if (wc->stage == DROP_REFERENCE) {
@@ -5106,14 +5457,22 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
/* wc->stage == UPDATE_BACKREF */
if (!(wc->flags[level] & flag)) {
- BUG_ON(!path->locks[level]);
+ ASSERT(path->locks[level]);
ret = btrfs_inc_ref(trans, root, eb, 1);
- BUG_ON(ret); /* -ENOMEM */
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
ret = btrfs_dec_ref(trans, root, eb, 0);
- BUG_ON(ret); /* -ENOMEM */
- ret = btrfs_set_disk_extent_flags(trans, eb, flag,
- btrfs_header_level(eb), 0);
- BUG_ON(ret); /* -ENOMEM */
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+ ret = btrfs_set_disk_extent_flags(trans, eb, flag);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
wc->flags[level] |= flag;
}
@@ -5136,23 +5495,186 @@ static int check_ref_exists(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr, u64 parent,
int level)
{
- struct btrfs_path *path;
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_delayed_ref_head *head;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_extent_inline_ref *iref;
int ret;
+ bool exists = false;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
-
+again:
ret = lookup_extent_backref(trans, path, &iref, bytenr,
root->fs_info->nodesize, parent,
- root->root_key.objectid, level, 0);
- btrfs_free_path(path);
- if (ret == -ENOENT)
+ btrfs_root_id(root), level, 0);
+ if (ret != -ENOENT) {
+ /*
+ * If we get 0 then we found our reference, return 1, else
+ * return the error if it's not -ENOENT;
+ */
+ return (ret < 0 ) ? ret : 1;
+ }
+
+ /*
+ * We could have a delayed ref with this reference, so look it up while
+ * we're holding the path open to make sure we don't race with the
+ * delayed ref running.
+ */
+ delayed_refs = &trans->transaction->delayed_refs;
+ spin_lock(&delayed_refs->lock);
+ head = btrfs_find_delayed_ref_head(root->fs_info, delayed_refs, bytenr);
+ if (!head)
+ goto out;
+ if (!mutex_trylock(&head->mutex)) {
+ /*
+ * We're contended, means that the delayed ref is running, get a
+ * reference and wait for the ref head to be complete and then
+ * try again.
+ */
+ refcount_inc(&head->refs);
+ spin_unlock(&delayed_refs->lock);
+
+ btrfs_release_path(path);
+
+ mutex_lock(&head->mutex);
+ mutex_unlock(&head->mutex);
+ btrfs_put_delayed_ref_head(head);
+ goto again;
+ }
+
+ exists = btrfs_find_delayed_tree_ref(head, btrfs_root_id(root), parent);
+ mutex_unlock(&head->mutex);
+out:
+ spin_unlock(&delayed_refs->lock);
+ return exists ? 1 : 0;
+}
+
+/*
+ * We may not have an uptodate block, so if we are going to walk down into this
+ * block we need to drop the lock, read it off of the disk, re-lock it and
+ * return to continue dropping the snapshot.
+ */
+static int check_next_block_uptodate(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct walk_control *wc,
+ struct extent_buffer *next)
+{
+ struct btrfs_tree_parent_check check = { 0 };
+ u64 generation;
+ int level = wc->level;
+ int ret;
+
+ btrfs_assert_tree_write_locked(next);
+
+ generation = btrfs_node_ptr_generation(path->nodes[level], path->slots[level]);
+
+ if (btrfs_buffer_uptodate(next, generation, false))
return 0;
- if (ret < 0)
+
+ check.level = level - 1;
+ check.transid = generation;
+ check.owner_root = btrfs_root_id(root);
+ check.has_first_key = true;
+ btrfs_node_key_to_cpu(path->nodes[level], &check.first_key, path->slots[level]);
+
+ btrfs_tree_unlock(next);
+ if (level == 1)
+ reada_walk_down(trans, root, wc, path);
+ ret = btrfs_read_extent_buffer(next, &check);
+ if (ret) {
+ free_extent_buffer(next);
return ret;
- return 1;
+ }
+ btrfs_tree_lock(next);
+ wc->lookup_info = 1;
+ return 0;
+}
+
+/*
+ * If we determine that we don't have to visit wc->level - 1 then we need to
+ * determine if we can drop our reference.
+ *
+ * If we are UPDATE_BACKREF then we will not, we need to update our backrefs.
+ *
+ * If we are DROP_REFERENCE this will figure out if we need to drop our current
+ * reference, skipping it if we dropped it from a previous uncompleted drop, or
+ * dropping it if we still have a reference to it.
+ */
+static int maybe_drop_reference(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_path *path, struct walk_control *wc,
+ struct extent_buffer *next, u64 owner_root)
+{
+ struct btrfs_ref ref = {
+ .action = BTRFS_DROP_DELAYED_REF,
+ .bytenr = next->start,
+ .num_bytes = root->fs_info->nodesize,
+ .owning_root = owner_root,
+ .ref_root = btrfs_root_id(root),
+ };
+ int level = wc->level;
+ int ret;
+
+ /* We are UPDATE_BACKREF, we're not dropping anything. */
+ if (wc->stage == UPDATE_BACKREF)
+ return 0;
+
+ if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
+ ref.parent = path->nodes[level]->start;
+ } else {
+ ASSERT(btrfs_root_id(root) == btrfs_header_owner(path->nodes[level]));
+ if (unlikely(btrfs_root_id(root) != btrfs_header_owner(path->nodes[level]))) {
+ btrfs_err(root->fs_info, "mismatched block owner");
+ return -EIO;
+ }
+ }
+
+ /*
+ * If we had a drop_progress we need to verify the refs are set as
+ * expected. If we find our ref then we know that from here on out
+ * everything should be correct, and we can clear the
+ * ->restarted flag.
+ */
+ if (wc->restarted) {
+ ret = check_ref_exists(trans, root, next->start, ref.parent,
+ level - 1);
+ if (ret <= 0)
+ return ret;
+ ret = 0;
+ wc->restarted = 0;
+ }
+
+ /*
+ * Reloc tree doesn't contribute to qgroup numbers, and we have already
+ * accounted them at merge time (replace_path), thus we could skip
+ * expensive subtree trace here.
+ */
+ if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
+ wc->refs[level - 1] > 1) {
+ u64 generation = btrfs_node_ptr_generation(path->nodes[level],
+ path->slots[level]);
+
+ ret = btrfs_qgroup_trace_subtree(trans, next, generation, level - 1);
+ if (ret) {
+ btrfs_err_rl(root->fs_info,
+"error %d accounting shared subtree, quota is out of sync, rescan required",
+ ret);
+ }
+ }
+
+ /*
+ * We need to update the next key in our walk control so we can update
+ * the drop_progress key accordingly. We don't care if find_next_key
+ * doesn't find a key because that means we're at the end and are going
+ * to clean up now.
+ */
+ wc->drop_level = level;
+ find_next_key(path, level, &wc->drop_progress);
+
+ btrfs_init_tree_ref(&ref, level - 1, 0, false);
+ return btrfs_free_extent(trans, &ref);
}
/*
@@ -5171,20 +5693,15 @@ static int check_ref_exists(struct btrfs_trans_handle *trans,
static noinline int do_walk_down(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
- struct walk_control *wc, int *lookup_info)
+ struct walk_control *wc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 generation;
- u64 parent;
- struct btrfs_key key;
- struct btrfs_key first_key;
- struct btrfs_ref ref = { 0 };
+ u64 owner_root = 0;
struct extent_buffer *next;
int level = wc->level;
- int reada = 0;
int ret = 0;
- bool need_account = false;
generation = btrfs_node_ptr_generation(path->nodes[level],
path->slots[level]);
@@ -5194,88 +5711,57 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
* for the subtree
*/
if (wc->stage == UPDATE_BACKREF &&
- generation <= root->root_key.offset) {
- *lookup_info = 1;
+ generation <= btrfs_root_origin_generation(root)) {
+ wc->lookup_info = 1;
return 1;
}
bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
- btrfs_node_key_to_cpu(path->nodes[level], &first_key,
- path->slots[level]);
- next = find_extent_buffer(fs_info, bytenr);
- if (!next) {
- next = btrfs_find_create_tree_block(fs_info, bytenr,
- root->root_key.objectid, level - 1);
- if (IS_ERR(next))
- return PTR_ERR(next);
- reada = 1;
- }
+ next = btrfs_find_create_tree_block(fs_info, bytenr, btrfs_root_id(root),
+ level - 1);
+ if (IS_ERR(next))
+ return PTR_ERR(next);
+
btrfs_tree_lock(next);
ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
&wc->refs[level - 1],
- &wc->flags[level - 1]);
+ &wc->flags[level - 1],
+ &owner_root);
if (ret < 0)
goto out_unlock;
if (unlikely(wc->refs[level - 1] == 0)) {
- btrfs_err(fs_info, "Missing references.");
- ret = -EIO;
+ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
+ bytenr);
+ ret = -EUCLEAN;
goto out_unlock;
}
- *lookup_info = 0;
+ wc->lookup_info = 0;
- if (wc->stage == DROP_REFERENCE) {
- if (wc->refs[level - 1] > 1) {
- need_account = true;
- if (level == 1 &&
- (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
- goto skip;
-
- if (!wc->update_ref ||
- generation <= root->root_key.offset)
- goto skip;
-
- btrfs_node_key_to_cpu(path->nodes[level], &key,
- path->slots[level]);
- ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
- if (ret < 0)
- goto skip;
+ /* If we don't have to walk into this node skip it. */
+ if (!visit_node_for_delete(root, wc, path->nodes[level],
+ wc->flags[level - 1], path->slots[level]))
+ goto skip;
- wc->stage = UPDATE_BACKREF;
- wc->shared_level = level - 1;
- }
- } else {
- if (level == 1 &&
- (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
- goto skip;
+ /*
+ * We have to walk down into this node, and if we're currently at the
+ * DROP_REFERENCE stage and this block is shared then we need to switch
+ * to the UPDATE_BACKREF stage in order to convert to FULL_BACKREF.
+ */
+ if (wc->stage == DROP_REFERENCE && wc->refs[level - 1] > 1) {
+ wc->stage = UPDATE_BACKREF;
+ wc->shared_level = level - 1;
}
- if (!btrfs_buffer_uptodate(next, generation, 0)) {
- btrfs_tree_unlock(next);
- free_extent_buffer(next);
- next = NULL;
- *lookup_info = 1;
- }
-
- if (!next) {
- if (reada && level == 1)
- reada_walk_down(trans, root, wc, path);
- next = read_tree_block(fs_info, bytenr, root->root_key.objectid,
- generation, level - 1, &first_key);
- if (IS_ERR(next)) {
- return PTR_ERR(next);
- } else if (!extent_buffer_uptodate(next)) {
- free_extent_buffer(next);
- return -EIO;
- }
- btrfs_tree_lock(next);
- }
+ ret = check_next_block_uptodate(trans, root, path, wc, next);
+ if (ret)
+ return ret;
level--;
ASSERT(level == btrfs_header_level(next));
- if (level != btrfs_header_level(next)) {
+ if (unlikely(level != btrfs_header_level(next))) {
btrfs_err(root->fs_info, "mismatched level");
ret = -EIO;
goto out_unlock;
@@ -5288,76 +5774,12 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
wc->reada_slot = 0;
return 0;
skip:
+ ret = maybe_drop_reference(trans, root, path, wc, next, owner_root);
+ if (ret)
+ goto out_unlock;
wc->refs[level - 1] = 0;
wc->flags[level - 1] = 0;
- if (wc->stage == DROP_REFERENCE) {
- if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
- parent = path->nodes[level]->start;
- } else {
- ASSERT(root->root_key.objectid ==
- btrfs_header_owner(path->nodes[level]));
- if (root->root_key.objectid !=
- btrfs_header_owner(path->nodes[level])) {
- btrfs_err(root->fs_info,
- "mismatched block owner");
- ret = -EIO;
- goto out_unlock;
- }
- parent = 0;
- }
-
- /*
- * If we had a drop_progress we need to verify the refs are set
- * as expected. If we find our ref then we know that from here
- * on out everything should be correct, and we can clear the
- * ->restarted flag.
- */
- if (wc->restarted) {
- ret = check_ref_exists(trans, root, bytenr, parent,
- level - 1);
- if (ret < 0)
- goto out_unlock;
- if (ret == 0)
- goto no_delete;
- ret = 0;
- wc->restarted = 0;
- }
-
- /*
- * Reloc tree doesn't contribute to qgroup numbers, and we have
- * already accounted them at merge time (replace_path),
- * thus we could skip expensive subtree trace here.
- */
- if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
- need_account) {
- ret = btrfs_qgroup_trace_subtree(trans, next,
- generation, level - 1);
- if (ret) {
- btrfs_err_rl(fs_info,
- "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
- ret);
- }
- }
-
- /*
- * We need to update the next key in our walk control so we can
- * update the drop_progress key accordingly. We don't care if
- * find_next_key doesn't find a key because that means we're at
- * the end and are going to clean up now.
- */
- wc->drop_level = level;
- find_next_key(path, level, &wc->drop_progress);
-
- btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
- fs_info->nodesize, parent);
- btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid,
- 0, false);
- ret = btrfs_free_extent(trans, &ref);
- if (ret)
- goto out_unlock;
- }
-no_delete:
- *lookup_info = 1;
+ wc->lookup_info = 1;
ret = 1;
out_unlock:
@@ -5385,13 +5807,13 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
struct walk_control *wc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- int ret;
+ int ret = 0;
int level = wc->level;
struct extent_buffer *eb = path->nodes[level];
u64 parent = 0;
if (wc->stage == UPDATE_BACKREF) {
- BUG_ON(wc->shared_level < level);
+ ASSERT(wc->shared_level >= level);
if (level < wc->shared_level)
goto out;
@@ -5409,20 +5831,26 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
* count is one.
*/
if (!path->locks[level]) {
- BUG_ON(level == 0);
+ ASSERT(level > 0);
btrfs_tree_lock(eb);
path->locks[level] = BTRFS_WRITE_LOCK;
ret = btrfs_lookup_extent_info(trans, fs_info,
eb->start, level, 1,
&wc->refs[level],
- &wc->flags[level]);
+ &wc->flags[level],
+ NULL);
if (ret < 0) {
btrfs_tree_unlock_rw(eb, path->locks[level]);
path->locks[level] = 0;
return ret;
}
- BUG_ON(wc->refs[level] == 0);
+ if (unlikely(wc->refs[level] == 0)) {
+ btrfs_tree_unlock_rw(eb, path->locks[level]);
+ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
+ eb->start);
+ return -EUCLEAN;
+ }
if (wc->refs[level] == 1) {
btrfs_tree_unlock_rw(eb, path->locks[level]);
path->locks[level] = 0;
@@ -5432,16 +5860,24 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
}
/* wc->stage == DROP_REFERENCE */
- BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
+ ASSERT(path->locks[level] || wc->refs[level] == 1);
if (wc->refs[level] == 1) {
if (level == 0) {
- if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
ret = btrfs_dec_ref(trans, root, eb, 1);
- else
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+ } else {
ret = btrfs_dec_ref(trans, root, eb, 0);
- BUG_ON(ret); /* -ENOMEM */
- if (is_fstree(root->root_key.objectid)) {
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+ }
+ if (btrfs_is_fstree(btrfs_root_id(root))) {
ret = btrfs_qgroup_trace_leaf_items(trans, eb);
if (ret) {
btrfs_err_rl(fs_info,
@@ -5450,52 +5886,75 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
}
}
}
- /* make block locked assertion in btrfs_clean_tree_block happy */
- if (!path->locks[level] &&
- btrfs_header_generation(eb) == trans->transid) {
+ /* Make block locked assertion in btrfs_clear_buffer_dirty happy. */
+ if (!path->locks[level]) {
btrfs_tree_lock(eb);
path->locks[level] = BTRFS_WRITE_LOCK;
}
- btrfs_clean_tree_block(eb);
+ btrfs_clear_buffer_dirty(trans, eb);
}
if (eb == root->node) {
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = eb->start;
- else if (root->root_key.objectid != btrfs_header_owner(eb))
+ else if (unlikely(btrfs_root_id(root) != btrfs_header_owner(eb)))
goto owner_mismatch;
} else {
if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = path->nodes[level + 1]->start;
- else if (root->root_key.objectid !=
- btrfs_header_owner(path->nodes[level + 1]))
+ else if (unlikely(btrfs_root_id(root) !=
+ btrfs_header_owner(path->nodes[level + 1])))
goto owner_mismatch;
}
- btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent,
+ wc->refs[level] == 1);
+ if (ret < 0)
+ btrfs_abort_transaction(trans, ret);
out:
wc->refs[level] = 0;
wc->flags[level] = 0;
- return 0;
+ return ret;
owner_mismatch:
btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
- btrfs_header_owner(eb), root->root_key.objectid);
+ btrfs_header_owner(eb), btrfs_root_id(root));
return -EUCLEAN;
}
+/*
+ * walk_down_tree consists of two steps.
+ *
+ * walk_down_proc(). Look up the reference count and reference of our current
+ * wc->level. At this point path->nodes[wc->level] should be populated and
+ * uptodate, and in most cases should already be locked. If we are in
+ * DROP_REFERENCE and our refcount is > 1 then we've entered a shared node and
+ * we can walk back up the tree. If we are UPDATE_BACKREF we have to set
+ * FULL_BACKREF on this node if it's not already set, and then do the
+ * FULL_BACKREF conversion dance, which is to drop the root reference and add
+ * the shared reference to all of this nodes children.
+ *
+ * do_walk_down(). This is where we actually start iterating on the children of
+ * our current path->nodes[wc->level]. For DROP_REFERENCE that means dropping
+ * our reference to the children that return false from visit_node_for_delete(),
+ * which has various conditions where we know we can just drop our reference
+ * without visiting the node. For UPDATE_BACKREF we will skip any children that
+ * visit_node_for_delete() returns false for, only walking down when necessary.
+ * The bulk of the work for UPDATE_BACKREF occurs in the walk_up_tree() part of
+ * snapshot deletion.
+ */
static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct walk_control *wc)
{
int level = wc->level;
- int lookup_info = 1;
- int ret;
+ int ret = 0;
+ wc->lookup_info = 1;
while (level >= 0) {
- ret = walk_down_proc(trans, root, path, wc, lookup_info);
- if (ret > 0)
+ ret = walk_down_proc(trans, root, path, wc);
+ if (ret)
break;
if (level == 0)
@@ -5505,17 +5964,34 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
btrfs_header_nritems(path->nodes[level]))
break;
- ret = do_walk_down(trans, root, path, wc, &lookup_info);
+ ret = do_walk_down(trans, root, path, wc);
if (ret > 0) {
path->slots[level]++;
continue;
} else if (ret < 0)
- return ret;
+ break;
level = wc->level;
}
- return 0;
+ return (ret == 1) ? 0 : ret;
}
+/*
+ * walk_up_tree() is responsible for making sure we visit every slot on our
+ * current node, and if we're at the end of that node then we call
+ * walk_up_proc() on our current node which will do one of a few things based on
+ * our stage.
+ *
+ * UPDATE_BACKREF. If we wc->level is currently less than our wc->shared_level
+ * then we need to walk back up the tree, and then going back down into the
+ * other slots via walk_down_tree to update any other children from our original
+ * wc->shared_level. Once we're at or above our wc->shared_level we can switch
+ * back to DROP_REFERENCE, lookup the current nodes refs and flags, and carry on.
+ *
+ * DROP_REFERENCE. If our refs == 1 then we're going to free this tree block.
+ * If we're level 0 then we need to btrfs_dec_ref() on all of the data extents
+ * in our current leaf. After that we call btrfs_free_tree_block() on the
+ * current node and walk up to the next node to walk down the next slot.
+ */
static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@@ -5562,35 +6038,36 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
* also make sure backrefs for the shared block and all lower level
* blocks are properly updated.
*
- * If called with for_reloc == 0, may exit early with -EAGAIN
+ * If called with for_reloc set, may exit early with -EAGAIN
*/
-int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
+int btrfs_drop_snapshot(struct btrfs_root *root, bool update_ref, bool for_reloc)
{
+ const bool is_reloc_root = (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID);
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct btrfs_trans_handle *trans;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root_item *root_item = &root->root_item;
- struct walk_control *wc;
+ struct walk_control AUTO_KFREE(wc);
struct btrfs_key key;
- int err = 0;
- int ret;
+ const u64 rootid = btrfs_root_id(root);
+ int ret = 0;
int level;
bool root_dropped = false;
+ bool unfinished_drop = false;
- btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid);
+ btrfs_debug(fs_info, "Drop subvolume %llu", btrfs_root_id(root));
path = btrfs_alloc_path();
if (!path) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
wc = kzalloc(sizeof(*wc), GFP_NOFS);
if (!wc) {
- btrfs_free_path(path);
- err = -ENOMEM;
- goto out;
+ ret = -ENOMEM;
+ goto out_free;
}
/*
@@ -5602,12 +6079,12 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
else
trans = btrfs_start_transaction(tree_root, 0);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
goto out_free;
}
- err = btrfs_run_delayed_items(trans);
- if (err)
+ ret = btrfs_run_delayed_items(trans);
+ if (ret)
goto out_end_trans;
/*
@@ -5619,6 +6096,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
* already dropped.
*/
set_bit(BTRFS_ROOT_DELETING, &root->state);
+ unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state);
+
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
level = btrfs_header_level(root->node);
path->nodes[level] = btrfs_lock_root_node(root);
@@ -5636,11 +6115,11 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
path->lowest_level = level;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
path->lowest_level = 0;
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out_end_trans;
- }
+
WARN_ON(ret > 0);
+ ret = 0;
/*
* unlock our path, this is safe because only this
@@ -5653,14 +6132,17 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
btrfs_tree_lock(path->nodes[level]);
path->locks[level] = BTRFS_WRITE_LOCK;
+ /*
+ * btrfs_lookup_extent_info() returns 0 for success,
+ * or < 0 for error.
+ */
ret = btrfs_lookup_extent_info(trans, fs_info,
path->nodes[level]->start,
level, 1, &wc->refs[level],
- &wc->flags[level]);
- if (ret < 0) {
- err = ret;
+ &wc->flags[level], NULL);
+ if (ret < 0)
goto out_end_trans;
- }
+
BUG_ON(wc->refs[level] == 0);
if (level == btrfs_root_drop_level(root_item))
@@ -5684,19 +6166,20 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
while (1) {
ret = walk_down_tree(trans, root, path, wc);
- if (ret < 0) {
- err = ret;
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
break;
}
ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
- if (ret < 0) {
- err = ret;
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
break;
}
if (ret > 0) {
BUG_ON(wc->stage != DROP_REFERENCE);
+ ret = 0;
break;
}
@@ -5716,17 +6199,19 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
ret = btrfs_update_root(trans, tree_root,
&root->root_key,
root_item);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- err = ret;
goto out_end_trans;
}
+ if (!is_reloc_root)
+ btrfs_set_last_root_drop_gen(fs_info, trans->transid);
+
btrfs_end_transaction_throttle(trans);
if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
btrfs_debug(fs_info,
"drop snapshot early exit");
- err = -EAGAIN;
+ ret = -EAGAIN;
goto out_free;
}
@@ -5740,37 +6225,36 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
else
trans = btrfs_start_transaction(tree_root, 0);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
goto out_free;
}
}
}
btrfs_release_path(path);
- if (err)
+ if (ret)
goto out_end_trans;
ret = btrfs_del_root(trans, &root->root_key);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- err = ret;
goto out_end_trans;
}
- if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
+ if (!is_reloc_root) {
ret = btrfs_find_root(tree_root, &root->root_key, path,
NULL, NULL);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
- err = ret;
goto out_end_trans;
} else if (ret > 0) {
- /* if we fail to delete the orphan item this time
+ ret = 0;
+ /*
+ * If we fail to delete the orphan item this time
* around, it'll get picked up the next time.
*
* The most common failure here is just -ENOENT.
*/
- btrfs_del_orphan_item(trans, tree_root,
- root->root_key.objectid);
+ btrfs_del_orphan_item(trans, tree_root, btrfs_root_id(root));
}
}
@@ -5788,11 +6272,28 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
btrfs_put_root(root);
root_dropped = true;
out_end_trans:
+ if (!is_reloc_root)
+ btrfs_set_last_root_drop_gen(fs_info, trans->transid);
+
btrfs_end_transaction_throttle(trans);
out_free:
- kfree(wc);
btrfs_free_path(path);
out:
+ if (!ret && root_dropped) {
+ ret = btrfs_qgroup_cleanup_dropped_subvolume(fs_info, rootid);
+ if (ret < 0)
+ btrfs_warn_rl(fs_info,
+ "failed to cleanup qgroup 0/%llu: %d",
+ rootid, ret);
+ ret = 0;
+ }
+ /*
+ * We were an unfinished drop root, check to see if there are any
+ * pending, and if not clear and wake up any waiters.
+ */
+ if (!ret && unfinished_drop)
+ btrfs_maybe_wake_unfinished_drop(fs_info);
+
/*
* So if we need to stop dropping the snapshot for whatever reason we
* need to make sure to add it back to the dead root list so that we
@@ -5802,7 +6303,7 @@ out:
*/
if (!for_reloc && !root_dropped)
btrfs_add_dead_root(root);
- return err;
+ return ret;
}
/*
@@ -5817,28 +6318,25 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *parent)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_path *path;
- struct walk_control *wc;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct walk_control AUTO_KFREE(wc);
int level;
int parent_level;
int ret = 0;
- int wret;
- BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+ BUG_ON(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
wc = kzalloc(sizeof(*wc), GFP_NOFS);
- if (!wc) {
- btrfs_free_path(path);
+ if (!wc)
return -ENOMEM;
- }
btrfs_assert_tree_write_locked(parent);
parent_level = btrfs_header_level(parent);
- atomic_inc(&parent->refs);
+ refcount_inc(&parent->refs);
path->nodes[parent_level] = parent;
path->slots[parent_level] = btrfs_header_nritems(parent);
@@ -5858,62 +6356,28 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
while (1) {
- wret = walk_down_tree(trans, root, path, wc);
- if (wret < 0) {
- ret = wret;
- break;
- }
+ ret = walk_down_tree(trans, root, path, wc);
+ if (ret < 0)
+ return ret;
- wret = walk_up_tree(trans, root, path, wc, parent_level);
- if (wret < 0)
- ret = wret;
- if (wret != 0)
+ ret = walk_up_tree(trans, root, path, wc, parent_level);
+ if (ret) {
+ if (ret < 0)
+ return ret;
break;
+ }
}
- kfree(wc);
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/*
- * helper to account the unused space of all the readonly block group in the
- * space_info. takes mirrors into account.
+ * Unpin the extent range in an error context and don't add the space back.
+ * Errors are not propagated further.
*/
-u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
-{
- struct btrfs_block_group *block_group;
- u64 free_bytes = 0;
- int factor;
-
- /* It's df, we don't care if it's racy */
- if (list_empty(&sinfo->ro_bgs))
- return 0;
-
- spin_lock(&sinfo->lock);
- list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
- spin_lock(&block_group->lock);
-
- if (!block_group->ro) {
- spin_unlock(&block_group->lock);
- continue;
- }
-
- factor = btrfs_bg_type_to_factor(block_group->flags);
- free_bytes += (block_group->length -
- block_group->used) * factor;
-
- spin_unlock(&block_group->lock);
- }
- spin_unlock(&sinfo->lock);
-
- return free_bytes;
-}
-
-int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
- u64 start, u64 end)
+void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end)
{
- return unpin_extent_range(fs_info, start, end, false);
+ unpin_extent_range(fs_info, start, end, false);
}
/*
@@ -5938,13 +6402,13 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
*/
static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
{
- u64 start = SZ_1M, len = 0, end = 0;
+ u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0;
int ret;
*trimmed = 0;
/* Discard not supported = nothing to do. */
- if (!blk_queue_discard(bdev_get_queue(device->bdev)))
+ if (!bdev_max_discard_sectors(device->bdev))
return 0;
/* Not writable = nothing to do. */
@@ -5965,25 +6429,25 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
if (ret)
break;
- find_first_clear_extent_bit(&device->alloc_state, start,
- &start, &end,
- CHUNK_TRIMMED | CHUNK_ALLOCATED);
+ btrfs_find_first_clear_extent_bit(&device->alloc_state, start,
+ &start, &end,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED);
/* Check if there are any CHUNK_* bits left */
if (start > device->total_bytes) {
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
- btrfs_warn_in_rcu(fs_info,
+ DEBUG_WARN();
+ btrfs_warn(fs_info,
"ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu",
start, end - start + 1,
- rcu_str_deref(device->name),
+ btrfs_dev_name(device),
device->total_bytes);
mutex_unlock(&fs_info->chunk_mutex);
ret = 0;
break;
}
- /* Ensure we skip the reserved area in the first 1M */
- start = max_t(u64, start, SZ_1M);
+ /* Ensure we skip the reserved space on each device. */
+ start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED);
/*
* If find_first_clear_extent_bit find a range that spans the
@@ -6004,9 +6468,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
ret = btrfs_issue_discard(device->bdev, start, len,
&bytes);
if (!ret)
- set_extent_bits(&device->alloc_state, start,
- start + bytes - 1,
- CHUNK_TRIMMED);
+ btrfs_set_extent_bit(&device->alloc_state, start,
+ start + bytes - 1, CHUNK_TRIMMED, NULL);
mutex_unlock(&fs_info->chunk_mutex);
if (ret)
@@ -6015,7 +6478,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
start += len;
*trimmed += bytes;
- if (fatal_signal_pending(current)) {
+ if (btrfs_trim_interrupted()) {
ret = -ERESTARTSYS;
break;
}
@@ -6051,6 +6514,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
int dev_ret = 0;
int ret = 0;
+ if (range->start == U64_MAX)
+ return -EINVAL;
+
/*
* Check range overflow if range->len is set.
* The default range->len is U64_MAX.
@@ -6071,13 +6537,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
if (end - start >= range->minlen) {
if (!btrfs_block_group_done(cache)) {
- ret = btrfs_cache_block_group(cache, 0);
- if (ret) {
- bg_failed++;
- bg_ret = ret;
- continue;
- }
- ret = btrfs_wait_block_group_cache_done(cache);
+ ret = btrfs_cache_block_group(cache, true);
if (ret) {
bg_failed++;
bg_ret = ret;
@@ -6110,13 +6570,13 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
continue;
ret = btrfs_trim_free_extents(device, &group_trimmed);
+
+ trimmed += group_trimmed;
if (ret) {
dev_failed++;
dev_ret = ret;
break;
}
-
- trimmed += group_trimmed;
}
mutex_unlock(&fs_devices->device_list_mutex);
diff --git a/fs/btrfs/extent-tree.h b/fs/btrfs/extent-tree.h
new file mode 100644
index 000000000000..71bb8109c969
--- /dev/null
+++ b/fs/btrfs/extent-tree.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_EXTENT_TREE_H
+#define BTRFS_EXTENT_TREE_H
+
+#include <linux/types.h>
+#include "block-group.h"
+#include "locking.h"
+
+struct extent_buffer;
+struct btrfs_free_cluster;
+struct btrfs_fs_info;
+struct btrfs_root;
+struct btrfs_path;
+struct btrfs_ref;
+struct btrfs_disk_key;
+struct btrfs_delayed_ref_head;
+struct btrfs_delayed_ref_root;
+struct btrfs_extent_inline_ref;
+
+enum btrfs_extent_allocation_policy {
+ BTRFS_EXTENT_ALLOC_CLUSTERED,
+ BTRFS_EXTENT_ALLOC_ZONED,
+};
+
+struct find_free_extent_ctl {
+ /* Basic allocation info */
+ u64 ram_bytes;
+ u64 num_bytes;
+ u64 min_alloc_size;
+ u64 empty_size;
+ u64 flags;
+
+ /* Where to start the search inside the bg */
+ u64 search_start;
+
+ /* For clustered allocation */
+ u64 empty_cluster;
+ struct btrfs_free_cluster *last_ptr;
+ bool use_cluster;
+
+ bool delalloc;
+ bool have_caching_bg;
+ bool orig_have_caching_bg;
+
+ /* Allocation is called for tree-log */
+ bool for_treelog;
+
+ /* Allocation is called for data relocation */
+ bool for_data_reloc;
+
+ /*
+ * Set to true if we're retrying the allocation on this block group
+ * after waiting for caching progress, this is so that we retry only
+ * once before moving on to another block group.
+ */
+ bool retry_uncached;
+
+ /* Whether or not the allocator is currently following a hint. */
+ bool hinted;
+
+ /* RAID index, converted from flags */
+ int index;
+
+ /*
+ * Current loop number, check find_free_extent_update_loop() for details
+ */
+ int loop;
+
+ /* If current block group is cached */
+ int cached;
+
+ /* Max contiguous hole found */
+ u64 max_extent_size;
+
+ /* Total free space from free space cache, not always contiguous */
+ u64 total_free_space;
+
+ /* Found result */
+ u64 found_offset;
+
+ /* Hint where to start looking for an empty space */
+ u64 hint_byte;
+
+ /* Allocation policy */
+ enum btrfs_extent_allocation_policy policy;
+
+ /* Size class of block groups to prefer in early loops */
+ enum btrfs_block_group_size_class size_class;
+};
+
+enum btrfs_inline_ref_type {
+ BTRFS_REF_TYPE_INVALID,
+ BTRFS_REF_TYPE_BLOCK,
+ BTRFS_REF_TYPE_DATA,
+ BTRFS_REF_TYPE_ANY,
+};
+
+int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
+ const struct btrfs_extent_inline_ref *iref,
+ enum btrfs_inline_ref_type is_data);
+u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
+
+int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, u64 min_bytes);
+u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head);
+int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
+int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 bytenr,
+ u64 offset, int metadata, u64 *refs, u64 *flags,
+ u64 *owner_root);
+int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num);
+int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
+ const struct extent_buffer *eb);
+int btrfs_exclude_logged_extents(struct extent_buffer *eb);
+int btrfs_cross_ref_exist(struct btrfs_inode *inode, u64 offset, u64 bytenr,
+ struct btrfs_path *path);
+struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 parent, u64 root_objectid,
+ const struct btrfs_disk_key *key,
+ int level, u64 hint,
+ u64 empty_size,
+ u64 reloc_src_root,
+ enum btrfs_lock_nesting nest);
+int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ u64 root_id,
+ struct extent_buffer *buf,
+ u64 parent, int last_ref);
+int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 owner,
+ u64 offset, u64 ram_bytes,
+ struct btrfs_key *ins);
+int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
+ u64 root_objectid, u64 owner, u64 offset,
+ struct btrfs_key *ins);
+int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
+ u64 min_alloc_size, u64 empty_size, u64 hint_byte,
+ struct btrfs_key *ins, bool is_data, bool delalloc);
+int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct extent_buffer *buf, bool full_backref);
+int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct extent_buffer *buf, bool full_backref);
+int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
+ struct extent_buffer *eb, u64 flags);
+int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref);
+
+u64 btrfs_get_extent_owner_root(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf, int slot);
+int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len,
+ bool is_delalloc);
+int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans,
+ const struct extent_buffer *eb);
+int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans);
+int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_ref *generic_ref);
+int btrfs_drop_snapshot(struct btrfs_root *root, bool update_ref, bool for_reloc);
+int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *node,
+ struct extent_buffer *parent);
+void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end);
+int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
+ u64 num_bytes, u64 *actual_bytes);
+int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
+
+#endif
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4e03a6d3aa32..629fd5af4286 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -6,62 +6,56 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/page-flags.h>
+#include <linux/sched/mm.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/prefetch.h>
-#include <linux/cleancache.h>
#include <linux/fsverity.h>
-#include "misc.h"
#include "extent_io.h"
#include "extent-io-tree.h"
#include "extent_map.h"
#include "ctree.h"
#include "btrfs_inode.h"
-#include "volumes.h"
-#include "check-integrity.h"
+#include "bio.h"
#include "locking.h"
-#include "rcu-string.h"
#include "backref.h"
#include "disk-io.h"
#include "subpage.h"
#include "zoned.h"
#include "block-group.h"
+#include "compression.h"
+#include "fs.h"
+#include "accessors.h"
+#include "file-item.h"
+#include "file.h"
+#include "dev-replace.h"
+#include "super.h"
+#include "transaction.h"
-static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
-static struct bio_set btrfs_bioset;
-
-static inline bool extent_state_in_tree(const struct extent_state *state)
-{
- return !RB_EMPTY_NODE(&state->rb_node);
-}
#ifdef CONFIG_BTRFS_DEBUG
-static LIST_HEAD(states);
-static DEFINE_SPINLOCK(leak_lock);
-
-static inline void btrfs_leak_debug_add(spinlock_t *lock,
- struct list_head *new,
- struct list_head *head)
+static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
unsigned long flags;
- spin_lock_irqsave(lock, flags);
- list_add(new, head);
- spin_unlock_irqrestore(lock, flags);
+ spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
+ list_add(&eb->leak_list, &fs_info->allocated_ebs);
+ spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
}
-static inline void btrfs_leak_debug_del(spinlock_t *lock,
- struct list_head *entry)
+static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
unsigned long flags;
- spin_lock_irqsave(lock, flags);
- list_del(entry);
- spin_unlock_irqrestore(lock, flags);
+ spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
+ list_del(&eb->leak_list);
+ spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
}
void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
@@ -76,1753 +70,179 @@ void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
if (!fs_info->allocated_ebs.next)
return;
+ WARN_ON(!list_empty(&fs_info->allocated_ebs));
spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
while (!list_empty(&fs_info->allocated_ebs)) {
eb = list_first_entry(&fs_info->allocated_ebs,
struct extent_buffer, leak_list);
- pr_err(
- "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
- eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
+ btrfs_err(fs_info,
+ "buffer leak start %llu len %u refs %d bflags %lu owner %llu",
+ eb->start, eb->len, refcount_read(&eb->refs), eb->bflags,
btrfs_header_owner(eb));
list_del(&eb->leak_list);
+ WARN_ON_ONCE(1);
kmem_cache_free(extent_buffer_cache, eb);
}
spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
}
-
-static inline void btrfs_extent_state_leak_debug_check(void)
-{
- struct extent_state *state;
-
- while (!list_empty(&states)) {
- state = list_entry(states.next, struct extent_state, leak_list);
- pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
- state->start, state->end, state->state,
- extent_state_in_tree(state),
- refcount_read(&state->refs));
- list_del(&state->leak_list);
- kmem_cache_free(extent_state_cache, state);
- }
-}
-
-#define btrfs_debug_check_extent_io_range(tree, start, end) \
- __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
-static inline void __btrfs_debug_check_extent_io_range(const char *caller,
- struct extent_io_tree *tree, u64 start, u64 end)
-{
- struct inode *inode = tree->private_data;
- u64 isize;
-
- if (!inode || !is_data_inode(inode))
- return;
-
- isize = i_size_read(inode);
- if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
- btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
- "%s: ino %llu isize %llu odd range [%llu,%llu]",
- caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
- }
-}
#else
-#define btrfs_leak_debug_add(lock, new, head) do {} while (0)
-#define btrfs_leak_debug_del(lock, entry) do {} while (0)
-#define btrfs_extent_state_leak_debug_check() do {} while (0)
-#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
+#define btrfs_leak_debug_add_eb(eb) do {} while (0)
+#define btrfs_leak_debug_del_eb(eb) do {} while (0)
#endif
-struct tree_entry {
- u64 start;
- u64 end;
- struct rb_node rb_node;
-};
-
-struct extent_page_data {
- struct btrfs_bio_ctrl bio_ctrl;
- /* tells writepage not to lock the state bits for this range
- * it still does the unlocking
- */
- unsigned int extent_locked:1;
-
- /* tells the submit_bio code to use REQ_SYNC */
- unsigned int sync_io:1;
-};
-
-static int add_extent_changeset(struct extent_state *state, u32 bits,
- struct extent_changeset *changeset,
- int set)
-{
- int ret;
-
- if (!changeset)
- return 0;
- if (set && (state->state & bits) == bits)
- return 0;
- if (!set && (state->state & bits) == 0)
- return 0;
- changeset->bytes_changed += state->end - state->start + 1;
- ret = ulist_add(&changeset->range_changed, state->start, state->end,
- GFP_ATOMIC);
- return ret;
-}
-
-int __must_check submit_one_bio(struct bio *bio, int mirror_num,
- unsigned long bio_flags)
-{
- blk_status_t ret = 0;
- struct extent_io_tree *tree = bio->bi_private;
-
- bio->bi_private = NULL;
-
- /* Caller should ensure the bio has at least some range added */
- ASSERT(bio->bi_iter.bi_size);
- if (is_data_inode(tree->private_data))
- ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
- bio_flags);
- else
- ret = btrfs_submit_metadata_bio(tree->private_data, bio,
- mirror_num, bio_flags);
-
- return blk_status_to_errno(ret);
-}
-
-/* Cleanup unsubmitted bios */
-static void end_write_bio(struct extent_page_data *epd, int ret)
-{
- struct bio *bio = epd->bio_ctrl.bio;
-
- if (bio) {
- bio->bi_status = errno_to_blk_status(ret);
- bio_endio(bio);
- epd->bio_ctrl.bio = NULL;
- }
-}
-
-/*
- * Submit bio from extent page data via submit_one_bio
- *
- * Return 0 if everything is OK.
- * Return <0 for error.
- */
-static int __must_check flush_write_bio(struct extent_page_data *epd)
-{
- int ret = 0;
- struct bio *bio = epd->bio_ctrl.bio;
-
- if (bio) {
- ret = submit_one_bio(bio, 0, 0);
- /*
- * Clean up of epd->bio is handled by its endio function.
- * And endio is either triggered by successful bio execution
- * or the error handler of submit bio hook.
- * So at this point, no matter what happened, we don't need
- * to clean up epd->bio.
- */
- epd->bio_ctrl.bio = NULL;
- }
- return ret;
-}
-
-int __init extent_state_cache_init(void)
-{
- extent_state_cache = kmem_cache_create("btrfs_extent_state",
- sizeof(struct extent_state), 0,
- SLAB_MEM_SPREAD, NULL);
- if (!extent_state_cache)
- return -ENOMEM;
- return 0;
-}
-
-int __init extent_io_init(void)
-{
- extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
- sizeof(struct extent_buffer), 0,
- SLAB_MEM_SPREAD, NULL);
- if (!extent_buffer_cache)
- return -ENOMEM;
-
- if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
- offsetof(struct btrfs_bio, bio),
- BIOSET_NEED_BVECS))
- goto free_buffer_cache;
-
- if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
- goto free_bioset;
-
- return 0;
-
-free_bioset:
- bioset_exit(&btrfs_bioset);
-
-free_buffer_cache:
- kmem_cache_destroy(extent_buffer_cache);
- extent_buffer_cache = NULL;
- return -ENOMEM;
-}
-
-void __cold extent_state_cache_exit(void)
-{
- btrfs_extent_state_leak_debug_check();
- kmem_cache_destroy(extent_state_cache);
-}
-
-void __cold extent_io_exit(void)
-{
- /*
- * Make sure all delayed rcu free are flushed before we
- * destroy caches.
- */
- rcu_barrier();
- kmem_cache_destroy(extent_buffer_cache);
- bioset_exit(&btrfs_bioset);
-}
-
-/*
- * For the file_extent_tree, we want to hold the inode lock when we lookup and
- * update the disk_i_size, but lockdep will complain because our io_tree we hold
- * the tree lock and get the inode lock when setting delalloc. These two things
- * are unrelated, so make a class for the file_extent_tree so we don't get the
- * two locking patterns mixed up.
- */
-static struct lock_class_key file_extent_tree_class;
-
-void extent_io_tree_init(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *tree, unsigned int owner,
- void *private_data)
-{
- tree->fs_info = fs_info;
- tree->state = RB_ROOT;
- tree->dirty_bytes = 0;
- spin_lock_init(&tree->lock);
- tree->private_data = private_data;
- tree->owner = owner;
- if (owner == IO_TREE_INODE_FILE_EXTENT)
- lockdep_set_class(&tree->lock, &file_extent_tree_class);
-}
-
-void extent_io_tree_release(struct extent_io_tree *tree)
-{
- spin_lock(&tree->lock);
- /*
- * Do a single barrier for the waitqueue_active check here, the state
- * of the waitqueue should not change once extent_io_tree_release is
- * called.
- */
- smp_mb();
- while (!RB_EMPTY_ROOT(&tree->state)) {
- struct rb_node *node;
- struct extent_state *state;
-
- node = rb_first(&tree->state);
- state = rb_entry(node, struct extent_state, rb_node);
- rb_erase(&state->rb_node, &tree->state);
- RB_CLEAR_NODE(&state->rb_node);
- /*
- * btree io trees aren't supposed to have tasks waiting for
- * changes in the flags of extent states ever.
- */
- ASSERT(!waitqueue_active(&state->wq));
- free_extent_state(state);
-
- cond_resched_lock(&tree->lock);
- }
- spin_unlock(&tree->lock);
-}
-
-static struct extent_state *alloc_extent_state(gfp_t mask)
-{
- struct extent_state *state;
-
- /*
- * The given mask might be not appropriate for the slab allocator,
- * drop the unsupported bits
- */
- mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
- state = kmem_cache_alloc(extent_state_cache, mask);
- if (!state)
- return state;
- state->state = 0;
- state->failrec = NULL;
- RB_CLEAR_NODE(&state->rb_node);
- btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
- refcount_set(&state->refs, 1);
- init_waitqueue_head(&state->wq);
- trace_alloc_extent_state(state, mask, _RET_IP_);
- return state;
-}
-
-void free_extent_state(struct extent_state *state)
-{
- if (!state)
- return;
- if (refcount_dec_and_test(&state->refs)) {
- WARN_ON(extent_state_in_tree(state));
- btrfs_leak_debug_del(&leak_lock, &state->leak_list);
- trace_free_extent_state(state, _RET_IP_);
- kmem_cache_free(extent_state_cache, state);
- }
-}
-
-static struct rb_node *tree_insert(struct rb_root *root,
- struct rb_node *search_start,
- u64 offset,
- struct rb_node *node,
- struct rb_node ***p_in,
- struct rb_node **parent_in)
-{
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct tree_entry *entry;
-
- if (p_in && parent_in) {
- p = *p_in;
- parent = *parent_in;
- goto do_insert;
- }
-
- p = search_start ? &search_start : &root->rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct tree_entry, rb_node);
-
- if (offset < entry->start)
- p = &(*p)->rb_left;
- else if (offset > entry->end)
- p = &(*p)->rb_right;
- else
- return parent;
- }
-
-do_insert:
- rb_link_node(node, parent, p);
- rb_insert_color(node, root);
- return NULL;
-}
-
-/**
- * Search @tree for an entry that contains @offset. Such entry would have
- * entry->start <= offset && entry->end >= offset.
- *
- * @tree: the tree to search
- * @offset: offset that should fall within an entry in @tree
- * @next_ret: pointer to the first entry whose range ends after @offset
- * @prev_ret: pointer to the first entry whose range begins before @offset
- * @p_ret: pointer where new node should be anchored (used when inserting an
- * entry in the tree)
- * @parent_ret: points to entry which would have been the parent of the entry,
- * containing @offset
- *
- * This function returns a pointer to the entry that contains @offset byte
- * address. If no such entry exists, then NULL is returned and the other
- * pointer arguments to the function are filled, otherwise the found entry is
- * returned and other pointers are left untouched.
- */
-static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
- struct rb_node **next_ret,
- struct rb_node **prev_ret,
- struct rb_node ***p_ret,
- struct rb_node **parent_ret)
-{
- struct rb_root *root = &tree->state;
- struct rb_node **n = &root->rb_node;
- struct rb_node *prev = NULL;
- struct rb_node *orig_prev = NULL;
- struct tree_entry *entry;
- struct tree_entry *prev_entry = NULL;
-
- while (*n) {
- prev = *n;
- entry = rb_entry(prev, struct tree_entry, rb_node);
- prev_entry = entry;
-
- if (offset < entry->start)
- n = &(*n)->rb_left;
- else if (offset > entry->end)
- n = &(*n)->rb_right;
- else
- return *n;
- }
-
- if (p_ret)
- *p_ret = n;
- if (parent_ret)
- *parent_ret = prev;
-
- if (next_ret) {
- orig_prev = prev;
- while (prev && offset > prev_entry->end) {
- prev = rb_next(prev);
- prev_entry = rb_entry(prev, struct tree_entry, rb_node);
- }
- *next_ret = prev;
- prev = orig_prev;
- }
-
- if (prev_ret) {
- prev_entry = rb_entry(prev, struct tree_entry, rb_node);
- while (prev && offset < prev_entry->start) {
- prev = rb_prev(prev);
- prev_entry = rb_entry(prev, struct tree_entry, rb_node);
- }
- *prev_ret = prev;
- }
- return NULL;
-}
-
-static inline struct rb_node *
-tree_search_for_insert(struct extent_io_tree *tree,
- u64 offset,
- struct rb_node ***p_ret,
- struct rb_node **parent_ret)
-{
- struct rb_node *next= NULL;
- struct rb_node *ret;
-
- ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
- if (!ret)
- return next;
- return ret;
-}
-
-static inline struct rb_node *tree_search(struct extent_io_tree *tree,
- u64 offset)
-{
- return tree_search_for_insert(tree, offset, NULL, NULL);
-}
-
-/*
- * utility function to look for merge candidates inside a given range.
- * Any extents with matching state are merged together into a single
- * extent in the tree. Extents with EXTENT_IO in their state field
- * are not merged because the end_io handlers need to be able to do
- * operations on them without sleeping (or doing allocations/splits).
- *
- * This should be called with the tree lock held.
- */
-static void merge_state(struct extent_io_tree *tree,
- struct extent_state *state)
-{
- struct extent_state *other;
- struct rb_node *other_node;
-
- if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
- return;
-
- other_node = rb_prev(&state->rb_node);
- if (other_node) {
- other = rb_entry(other_node, struct extent_state, rb_node);
- if (other->end == state->start - 1 &&
- other->state == state->state) {
- if (tree->private_data &&
- is_data_inode(tree->private_data))
- btrfs_merge_delalloc_extent(tree->private_data,
- state, other);
- state->start = other->start;
- rb_erase(&other->rb_node, &tree->state);
- RB_CLEAR_NODE(&other->rb_node);
- free_extent_state(other);
- }
- }
- other_node = rb_next(&state->rb_node);
- if (other_node) {
- other = rb_entry(other_node, struct extent_state, rb_node);
- if (other->start == state->end + 1 &&
- other->state == state->state) {
- if (tree->private_data &&
- is_data_inode(tree->private_data))
- btrfs_merge_delalloc_extent(tree->private_data,
- state, other);
- state->end = other->end;
- rb_erase(&other->rb_node, &tree->state);
- RB_CLEAR_NODE(&other->rb_node);
- free_extent_state(other);
- }
- }
-}
-
-static void set_state_bits(struct extent_io_tree *tree,
- struct extent_state *state, u32 *bits,
- struct extent_changeset *changeset);
-
/*
- * insert an extent_state struct into the tree. 'bits' are set on the
- * struct before it is inserted.
- *
- * This may return -EEXIST if the extent is already there, in which case the
- * state struct is freed.
- *
- * The tree lock is not taken internally. This is a utility function and
- * probably isn't what you want to call (see set/clear_extent_bit).
+ * Structure to record info about the bio being assembled, and other info like
+ * how many bytes are there before stripe/ordered extent boundary.
*/
-static int insert_state(struct extent_io_tree *tree,
- struct extent_state *state, u64 start, u64 end,
- struct rb_node ***p,
- struct rb_node **parent,
- u32 *bits, struct extent_changeset *changeset)
-{
- struct rb_node *node;
-
- if (end < start) {
- btrfs_err(tree->fs_info,
- "insert state: end < start %llu %llu", end, start);
- WARN_ON(1);
- }
- state->start = start;
- state->end = end;
-
- set_state_bits(tree, state, bits, changeset);
-
- node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
- if (node) {
- struct extent_state *found;
- found = rb_entry(node, struct extent_state, rb_node);
- btrfs_err(tree->fs_info,
- "found node %llu %llu on insert of %llu %llu",
- found->start, found->end, start, end);
- return -EEXIST;
- }
- merge_state(tree, state);
- return 0;
-}
-
-/*
- * split a given extent state struct in two, inserting the preallocated
- * struct 'prealloc' as the newly created second half. 'split' indicates an
- * offset inside 'orig' where it should be split.
- *
- * Before calling,
- * the tree has 'orig' at [orig->start, orig->end]. After calling, there
- * are two extent state structs in the tree:
- * prealloc: [orig->start, split - 1]
- * orig: [ split, orig->end ]
- *
- * The tree locks are not taken by this function. They need to be held
- * by the caller.
- */
-static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
- struct extent_state *prealloc, u64 split)
-{
- struct rb_node *node;
-
- if (tree->private_data && is_data_inode(tree->private_data))
- btrfs_split_delalloc_extent(tree->private_data, orig, split);
-
- prealloc->start = orig->start;
- prealloc->end = split - 1;
- prealloc->state = orig->state;
- orig->start = split;
-
- node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
- &prealloc->rb_node, NULL, NULL);
- if (node) {
- free_extent_state(prealloc);
- return -EEXIST;
- }
- return 0;
-}
-
-static struct extent_state *next_state(struct extent_state *state)
-{
- struct rb_node *next = rb_next(&state->rb_node);
- if (next)
- return rb_entry(next, struct extent_state, rb_node);
- else
- return NULL;
-}
-
-/*
- * utility function to clear some bits in an extent state struct.
- * it will optionally wake up anyone waiting on this state (wake == 1).
- *
- * If no bits are set on the state struct after clearing things, the
- * struct is freed and removed from the tree
- */
-static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
- struct extent_state *state,
- u32 *bits, int wake,
- struct extent_changeset *changeset)
-{
- struct extent_state *next;
- u32 bits_to_clear = *bits & ~EXTENT_CTLBITS;
- int ret;
-
- if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
- u64 range = state->end - state->start + 1;
- WARN_ON(range > tree->dirty_bytes);
- tree->dirty_bytes -= range;
- }
-
- if (tree->private_data && is_data_inode(tree->private_data))
- btrfs_clear_delalloc_extent(tree->private_data, state, bits);
-
- ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
- BUG_ON(ret < 0);
- state->state &= ~bits_to_clear;
- if (wake)
- wake_up(&state->wq);
- if (state->state == 0) {
- next = next_state(state);
- if (extent_state_in_tree(state)) {
- rb_erase(&state->rb_node, &tree->state);
- RB_CLEAR_NODE(&state->rb_node);
- free_extent_state(state);
- } else {
- WARN_ON(1);
- }
- } else {
- merge_state(tree, state);
- next = next_state(state);
- }
- return next;
-}
-
-static struct extent_state *
-alloc_extent_state_atomic(struct extent_state *prealloc)
-{
- if (!prealloc)
- prealloc = alloc_extent_state(GFP_ATOMIC);
-
- return prealloc;
-}
-
-static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
-{
- btrfs_panic(tree->fs_info, err,
- "locking error: extent tree was modified by another thread while locked");
-}
-
-/*
- * clear some bits on a range in the tree. This may require splitting
- * or inserting elements in the tree, so the gfp mask is used to
- * indicate which allocations or sleeping are allowed.
- *
- * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
- * the given range from the tree regardless of state (ie for truncate).
- *
- * the range [start, end] is inclusive.
- *
- * This takes the tree lock, and returns 0 on success and < 0 on error.
- */
-int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, int wake, int delete,
- struct extent_state **cached_state,
- gfp_t mask, struct extent_changeset *changeset)
-{
- struct extent_state *state;
- struct extent_state *cached;
- struct extent_state *prealloc = NULL;
- struct rb_node *node;
- u64 last_end;
- int err;
- int clear = 0;
-
- btrfs_debug_check_extent_io_range(tree, start, end);
- trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
-
- if (bits & EXTENT_DELALLOC)
- bits |= EXTENT_NORESERVE;
-
- if (delete)
- bits |= ~EXTENT_CTLBITS;
-
- if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
- clear = 1;
-again:
- if (!prealloc && gfpflags_allow_blocking(mask)) {
- /*
- * Don't care for allocation failure here because we might end
- * up not needing the pre-allocated extent state at all, which
- * is the case if we only have in the tree extent states that
- * cover our input range and don't cover too any other range.
- * If we end up needing a new extent state we allocate it later.
- */
- prealloc = alloc_extent_state(mask);
- }
-
- spin_lock(&tree->lock);
- if (cached_state) {
- cached = *cached_state;
-
- if (clear) {
- *cached_state = NULL;
- cached_state = NULL;
- }
-
- if (cached && extent_state_in_tree(cached) &&
- cached->start <= start && cached->end > start) {
- if (clear)
- refcount_dec(&cached->refs);
- state = cached;
- goto hit_next;
- }
- if (clear)
- free_extent_state(cached);
- }
- /*
- * this search will find the extents that end after
- * our range starts
- */
- node = tree_search(tree, start);
- if (!node)
- goto out;
- state = rb_entry(node, struct extent_state, rb_node);
-hit_next:
- if (state->start > end)
- goto out;
- WARN_ON(state->end < start);
- last_end = state->end;
-
- /* the state doesn't have the wanted bits, go ahead */
- if (!(state->state & bits)) {
- state = next_state(state);
- goto next;
- }
-
+struct btrfs_bio_ctrl {
+ struct btrfs_bio *bbio;
+ /* Last byte contained in bbio + 1 . */
+ loff_t next_file_offset;
+ enum btrfs_compression_type compress_type;
+ u32 len_to_oe_boundary;
+ blk_opf_t opf;
/*
- * | ---- desired range ---- |
- * | state | or
- * | ------------- state -------------- |
+ * For data read bios, we attempt to optimize csum lookups if the extent
+ * generation is older than the current one. To make this possible, we
+ * need to track the maximum generation of an extent in a bio_ctrl to
+ * make the decision when submitting the bio.
*
- * We need to split the extent we found, and may flip
- * bits on second half.
+ * The pattern between do_readpage(), submit_one_bio() and
+ * submit_extent_folio() is quite subtle, so tracking this is tricky.
*
- * If the extent we found extends past our range, we
- * just split and search again. It'll get split again
- * the next time though.
+ * As we process extent E, we might submit a bio with existing built up
+ * extents before adding E to a new bio, or we might just add E to the
+ * bio. As a result, E's generation could apply to the current bio or
+ * to the next one, so we need to be careful to update the bio_ctrl's
+ * generation with E's only when we are sure E is added to bio_ctrl->bbio
+ * in submit_extent_folio().
*
- * If the extent we found is inside our range, we clear
- * the desired bit on it.
+ * See the comment in btrfs_lookup_bio_sums() for more detail on the
+ * need for this optimization.
*/
+ u64 generation;
+ btrfs_bio_end_io_t end_io_func;
+ struct writeback_control *wbc;
- if (state->start < start) {
- prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
- err = split_state(tree, state, prealloc, start);
- if (err)
- extent_io_tree_panic(tree, err);
-
- prealloc = NULL;
- if (err)
- goto out;
- if (state->end <= end) {
- state = clear_state_bit(tree, state, &bits, wake,
- changeset);
- goto next;
- }
- goto search_again;
- }
/*
- * | ---- desired range ---- |
- * | state |
- * We need to split the extent, and clear the bit
- * on the first half
+ * The sectors of the page which are going to be submitted by
+ * extent_writepage_io().
+ * This is to avoid touching ranges covered by compression/inline.
*/
- if (state->start <= end && state->end > end) {
- prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
- err = split_state(tree, state, prealloc, end + 1);
- if (err)
- extent_io_tree_panic(tree, err);
-
- if (wake)
- wake_up(&state->wq);
-
- clear_state_bit(tree, prealloc, &bits, wake, changeset);
-
- prealloc = NULL;
- goto out;
- }
-
- state = clear_state_bit(tree, state, &bits, wake, changeset);
-next:
- if (last_end == (u64)-1)
- goto out;
- start = last_end + 1;
- if (start <= end && state && !need_resched())
- goto hit_next;
-
-search_again:
- if (start > end)
- goto out;
- spin_unlock(&tree->lock);
- if (gfpflags_allow_blocking(mask))
- cond_resched();
- goto again;
-
-out:
- spin_unlock(&tree->lock);
- if (prealloc)
- free_extent_state(prealloc);
-
- return 0;
-
-}
-
-static void wait_on_state(struct extent_io_tree *tree,
- struct extent_state *state)
- __releases(tree->lock)
- __acquires(tree->lock)
-{
- DEFINE_WAIT(wait);
- prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock(&tree->lock);
- schedule();
- spin_lock(&tree->lock);
- finish_wait(&state->wq, &wait);
-}
-
-/*
- * waits for one or more bits to clear on a range in the state tree.
- * The range [start, end] is inclusive.
- * The tree lock is taken by this function
- */
-static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits)
-{
- struct extent_state *state;
- struct rb_node *node;
-
- btrfs_debug_check_extent_io_range(tree, start, end);
-
- spin_lock(&tree->lock);
-again:
- while (1) {
- /*
- * this search will find all the extents that end after
- * our range starts
- */
- node = tree_search(tree, start);
-process_node:
- if (!node)
- break;
-
- state = rb_entry(node, struct extent_state, rb_node);
-
- if (state->start > end)
- goto out;
-
- if (state->state & bits) {
- start = state->start;
- refcount_inc(&state->refs);
- wait_on_state(tree, state);
- free_extent_state(state);
- goto again;
- }
- start = state->end + 1;
-
- if (start > end)
- break;
-
- if (!cond_resched_lock(&tree->lock)) {
- node = rb_next(node);
- goto process_node;
- }
- }
-out:
- spin_unlock(&tree->lock);
-}
-
-static void set_state_bits(struct extent_io_tree *tree,
- struct extent_state *state,
- u32 *bits, struct extent_changeset *changeset)
-{
- u32 bits_to_set = *bits & ~EXTENT_CTLBITS;
- int ret;
-
- if (tree->private_data && is_data_inode(tree->private_data))
- btrfs_set_delalloc_extent(tree->private_data, state, bits);
-
- if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
- u64 range = state->end - state->start + 1;
- tree->dirty_bytes += range;
- }
- ret = add_extent_changeset(state, bits_to_set, changeset, 1);
- BUG_ON(ret < 0);
- state->state |= bits_to_set;
-}
-
-static void cache_state_if_flags(struct extent_state *state,
- struct extent_state **cached_ptr,
- unsigned flags)
-{
- if (cached_ptr && !(*cached_ptr)) {
- if (!flags || (state->state & flags)) {
- *cached_ptr = state;
- refcount_inc(&state->refs);
- }
- }
-}
-
-static void cache_state(struct extent_state *state,
- struct extent_state **cached_ptr)
-{
- return cache_state_if_flags(state, cached_ptr,
- EXTENT_LOCKED | EXTENT_BOUNDARY);
-}
-
-/*
- * set some bits on a range in the tree. This may require allocations or
- * sleeping, so the gfp mask is used to indicate what is allowed.
- *
- * If any of the exclusive bits are set, this will fail with -EEXIST if some
- * part of the range already has the desired bits set. The start of the
- * existing range is returned in failed_start in this case.
- *
- * [start, end] is inclusive This takes the tree lock.
- */
-int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
- u32 exclusive_bits, u64 *failed_start,
- struct extent_state **cached_state, gfp_t mask,
- struct extent_changeset *changeset)
-{
- struct extent_state *state;
- struct extent_state *prealloc = NULL;
- struct rb_node *node;
- struct rb_node **p;
- struct rb_node *parent;
- int err = 0;
- u64 last_start;
- u64 last_end;
-
- btrfs_debug_check_extent_io_range(tree, start, end);
- trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
-
- if (exclusive_bits)
- ASSERT(failed_start);
- else
- ASSERT(failed_start == NULL);
-again:
- if (!prealloc && gfpflags_allow_blocking(mask)) {
- /*
- * Don't care for allocation failure here because we might end
- * up not needing the pre-allocated extent state at all, which
- * is the case if we only have in the tree extent states that
- * cover our input range and don't cover too any other range.
- * If we end up needing a new extent state we allocate it later.
- */
- prealloc = alloc_extent_state(mask);
- }
-
- spin_lock(&tree->lock);
- if (cached_state && *cached_state) {
- state = *cached_state;
- if (state->start <= start && state->end > start &&
- extent_state_in_tree(state)) {
- node = &state->rb_node;
- goto hit_next;
- }
- }
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search_for_insert(tree, start, &p, &parent);
- if (!node) {
- prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
- err = insert_state(tree, prealloc, start, end,
- &p, &parent, &bits, changeset);
- if (err)
- extent_io_tree_panic(tree, err);
-
- cache_state(prealloc, cached_state);
- prealloc = NULL;
- goto out;
- }
- state = rb_entry(node, struct extent_state, rb_node);
-hit_next:
- last_start = state->start;
- last_end = state->end;
+ unsigned long submit_bitmap;
+ struct readahead_control *ractl;
/*
- * | ---- desired range ---- |
- * | state |
+ * The start offset of the last used extent map by a read operation.
*
- * Just lock what we found and keep going
- */
- if (state->start == start && state->end <= end) {
- if (state->state & exclusive_bits) {
- *failed_start = state->start;
- err = -EEXIST;
- goto out;
- }
-
- set_state_bits(tree, state, &bits, changeset);
- cache_state(state, cached_state);
- merge_state(tree, state);
- if (last_end == (u64)-1)
- goto out;
- start = last_end + 1;
- state = next_state(state);
- if (start < end && state && state->start == start &&
- !need_resched())
- goto hit_next;
- goto search_again;
- }
-
- /*
- * | ---- desired range ---- |
- * | state |
- * or
- * | ------------- state -------------- |
+ * This is for proper compressed read merge.
+ * U64_MAX means we are starting the read and have made no progress yet.
*
- * We need to split the extent we found, and may flip bits on
- * second half.
+ * The current btrfs_bio_is_contig() only uses disk_bytenr as
+ * the condition to check if the read can be merged with previous
+ * bio, which is not correct. E.g. two file extents pointing to the
+ * same extent but with different offset.
*
- * If the extent we found extends past our
- * range, we just split and search again. It'll get split
- * again the next time though.
- *
- * If the extent we found is inside our range, we set the
- * desired bit on it.
+ * So here we need to do extra checks to only merge reads that are
+ * covered by the same extent map.
+ * Just extent_map::start will be enough, as they are unique
+ * inside the same inode.
*/
- if (state->start < start) {
- if (state->state & exclusive_bits) {
- *failed_start = start;
- err = -EEXIST;
- goto out;
- }
-
- /*
- * If this extent already has all the bits we want set, then
- * skip it, not necessary to split it or do anything with it.
- */
- if ((state->state & bits) == bits) {
- start = state->end + 1;
- cache_state(state, cached_state);
- goto search_again;
- }
-
- prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
- err = split_state(tree, state, prealloc, start);
- if (err)
- extent_io_tree_panic(tree, err);
-
- prealloc = NULL;
- if (err)
- goto out;
- if (state->end <= end) {
- set_state_bits(tree, state, &bits, changeset);
- cache_state(state, cached_state);
- merge_state(tree, state);
- if (last_end == (u64)-1)
- goto out;
- start = last_end + 1;
- state = next_state(state);
- if (start < end && state && state->start == start &&
- !need_resched())
- goto hit_next;
- }
- goto search_again;
- }
- /*
- * | ---- desired range ---- |
- * | state | or | state |
- *
- * There's a hole, we need to insert something in it and
- * ignore the extent we found.
- */
- if (state->start > start) {
- u64 this_end;
- if (end < last_start)
- this_end = end;
- else
- this_end = last_start - 1;
-
- prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
-
- /*
- * Avoid to free 'prealloc' if it can be merged with
- * the later extent.
- */
- err = insert_state(tree, prealloc, start, this_end,
- NULL, NULL, &bits, changeset);
- if (err)
- extent_io_tree_panic(tree, err);
-
- cache_state(prealloc, cached_state);
- prealloc = NULL;
- start = this_end + 1;
- goto search_again;
- }
- /*
- * | ---- desired range ---- |
- * | state |
- * We need to split the extent, and set the bit
- * on the first half
- */
- if (state->start <= end && state->end > end) {
- if (state->state & exclusive_bits) {
- *failed_start = start;
- err = -EEXIST;
- goto out;
- }
-
- prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
- err = split_state(tree, state, prealloc, end + 1);
- if (err)
- extent_io_tree_panic(tree, err);
-
- set_state_bits(tree, prealloc, &bits, changeset);
- cache_state(prealloc, cached_state);
- merge_state(tree, prealloc);
- prealloc = NULL;
- goto out;
- }
-
-search_again:
- if (start > end)
- goto out;
- spin_unlock(&tree->lock);
- if (gfpflags_allow_blocking(mask))
- cond_resched();
- goto again;
-
-out:
- spin_unlock(&tree->lock);
- if (prealloc)
- free_extent_state(prealloc);
-
- return err;
-
-}
+ u64 last_em_start;
+};
-/**
- * convert_extent_bit - convert all bits in a given range from one bit to
- * another
- * @tree: the io tree to search
- * @start: the start offset in bytes
- * @end: the end offset in bytes (inclusive)
- * @bits: the bits to set in this range
- * @clear_bits: the bits to clear in this range
- * @cached_state: state that we're going to cache
- *
- * This will go through and set bits for the given range. If any states exist
- * already in this range they are set with the given bit and cleared of the
- * clear_bits. This is only meant to be used by things that are mergeable, ie
- * converting from say DELALLOC to DIRTY. This is not meant to be used with
- * boundary bits like LOCK.
+/*
+ * Helper to set the csum search commit root option for a bio_ctrl's bbio
+ * before submitting the bio.
*
- * All allocations are done with GFP_NOFS.
+ * Only for use by submit_one_bio().
*/
-int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, u32 clear_bits,
- struct extent_state **cached_state)
+static void bio_set_csum_search_commit_root(struct btrfs_bio_ctrl *bio_ctrl)
{
- struct extent_state *state;
- struct extent_state *prealloc = NULL;
- struct rb_node *node;
- struct rb_node **p;
- struct rb_node *parent;
- int err = 0;
- u64 last_start;
- u64 last_end;
- bool first_iteration = true;
-
- btrfs_debug_check_extent_io_range(tree, start, end);
- trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
- clear_bits);
-
-again:
- if (!prealloc) {
- /*
- * Best effort, don't worry if extent state allocation fails
- * here for the first iteration. We might have a cached state
- * that matches exactly the target range, in which case no
- * extent state allocations are needed. We'll only know this
- * after locking the tree.
- */
- prealloc = alloc_extent_state(GFP_NOFS);
- if (!prealloc && !first_iteration)
- return -ENOMEM;
- }
-
- spin_lock(&tree->lock);
- if (cached_state && *cached_state) {
- state = *cached_state;
- if (state->start <= start && state->end > start &&
- extent_state_in_tree(state)) {
- node = &state->rb_node;
- goto hit_next;
- }
- }
+ struct btrfs_bio *bbio = bio_ctrl->bbio;
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search_for_insert(tree, start, &p, &parent);
- if (!node) {
- prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc) {
- err = -ENOMEM;
- goto out;
- }
- err = insert_state(tree, prealloc, start, end,
- &p, &parent, &bits, NULL);
- if (err)
- extent_io_tree_panic(tree, err);
- cache_state(prealloc, cached_state);
- prealloc = NULL;
- goto out;
- }
- state = rb_entry(node, struct extent_state, rb_node);
-hit_next:
- last_start = state->start;
- last_end = state->end;
-
- /*
- * | ---- desired range ---- |
- * | state |
- *
- * Just lock what we found and keep going
- */
- if (state->start == start && state->end <= end) {
- set_state_bits(tree, state, &bits, NULL);
- cache_state(state, cached_state);
- state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
- if (last_end == (u64)-1)
- goto out;
- start = last_end + 1;
- if (start < end && state && state->start == start &&
- !need_resched())
- goto hit_next;
- goto search_again;
- }
+ ASSERT(bbio);
- /*
- * | ---- desired range ---- |
- * | state |
- * or
- * | ------------- state -------------- |
- *
- * We need to split the extent we found, and may flip bits on
- * second half.
- *
- * If the extent we found extends past our
- * range, we just split and search again. It'll get split
- * again the next time though.
- *
- * If the extent we found is inside our range, we set the
- * desired bit on it.
- */
- if (state->start < start) {
- prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc) {
- err = -ENOMEM;
- goto out;
- }
- err = split_state(tree, state, prealloc, start);
- if (err)
- extent_io_tree_panic(tree, err);
- prealloc = NULL;
- if (err)
- goto out;
- if (state->end <= end) {
- set_state_bits(tree, state, &bits, NULL);
- cache_state(state, cached_state);
- state = clear_state_bit(tree, state, &clear_bits, 0,
- NULL);
- if (last_end == (u64)-1)
- goto out;
- start = last_end + 1;
- if (start < end && state && state->start == start &&
- !need_resched())
- goto hit_next;
- }
- goto search_again;
- }
- /*
- * | ---- desired range ---- |
- * | state | or | state |
- *
- * There's a hole, we need to insert something in it and
- * ignore the extent we found.
- */
- if (state->start > start) {
- u64 this_end;
- if (end < last_start)
- this_end = end;
- else
- this_end = last_start - 1;
-
- prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc) {
- err = -ENOMEM;
- goto out;
- }
-
- /*
- * Avoid to free 'prealloc' if it can be merged with
- * the later extent.
- */
- err = insert_state(tree, prealloc, start, this_end,
- NULL, NULL, &bits, NULL);
- if (err)
- extent_io_tree_panic(tree, err);
- cache_state(prealloc, cached_state);
- prealloc = NULL;
- start = this_end + 1;
- goto search_again;
- }
- /*
- * | ---- desired range ---- |
- * | state |
- * We need to split the extent, and set the bit
- * on the first half
- */
- if (state->start <= end && state->end > end) {
- prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc) {
- err = -ENOMEM;
- goto out;
- }
-
- err = split_state(tree, state, prealloc, end + 1);
- if (err)
- extent_io_tree_panic(tree, err);
-
- set_state_bits(tree, prealloc, &bits, NULL);
- cache_state(prealloc, cached_state);
- clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
- prealloc = NULL;
- goto out;
- }
-
-search_again:
- if (start > end)
- goto out;
- spin_unlock(&tree->lock);
- cond_resched();
- first_iteration = false;
- goto again;
-
-out:
- spin_unlock(&tree->lock);
- if (prealloc)
- free_extent_state(prealloc);
-
- return err;
-}
-
-/* wrappers around set/clear extent bit */
-int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, struct extent_changeset *changeset)
-{
- /*
- * We don't support EXTENT_LOCKED yet, as current changeset will
- * record any bits changed, so for EXTENT_LOCKED case, it will
- * either fail with -EEXIST or changeset will record the whole
- * range.
- */
- BUG_ON(bits & EXTENT_LOCKED);
-
- return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
- changeset);
-}
-
-int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits)
-{
- return set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
- GFP_NOWAIT, NULL);
-}
-
-int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, int wake, int delete,
- struct extent_state **cached)
-{
- return __clear_extent_bit(tree, start, end, bits, wake, delete,
- cached, GFP_NOFS, NULL);
-}
-
-int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, struct extent_changeset *changeset)
-{
- /*
- * Don't support EXTENT_LOCKED case, same reason as
- * set_record_extent_bits().
- */
- BUG_ON(bits & EXTENT_LOCKED);
-
- return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
- changeset);
-}
-
-/*
- * either insert or lock state struct between start and end use mask to tell
- * us if waiting is desired.
- */
-int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached_state)
-{
- int err;
- u64 failed_start;
-
- while (1) {
- err = set_extent_bit(tree, start, end, EXTENT_LOCKED,
- EXTENT_LOCKED, &failed_start,
- cached_state, GFP_NOFS, NULL);
- if (err == -EEXIST) {
- wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
- start = failed_start;
- } else
- break;
- WARN_ON(start > end);
- }
- return err;
-}
+ if (!(btrfs_op(&bbio->bio) == BTRFS_MAP_READ && is_data_inode(bbio->inode)))
+ return;
-int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
-{
- int err;
- u64 failed_start;
-
- err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
- &failed_start, NULL, GFP_NOFS, NULL);
- if (err == -EEXIST) {
- if (failed_start > start)
- clear_extent_bit(tree, start, failed_start - 1,
- EXTENT_LOCKED, 1, 0, NULL);
- return 0;
- }
- return 1;
+ bio_ctrl->bbio->csum_search_commit_root =
+ (bio_ctrl->generation &&
+ bio_ctrl->generation < btrfs_get_fs_generation(bbio->inode->root->fs_info));
}
-void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
+static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
{
- unsigned long index = start >> PAGE_SHIFT;
- unsigned long end_index = end >> PAGE_SHIFT;
- struct page *page;
+ struct btrfs_bio *bbio = bio_ctrl->bbio;
- while (index <= end_index) {
- page = find_get_page(inode->i_mapping, index);
- BUG_ON(!page); /* Pages should be in the extent_io_tree */
- clear_page_dirty_for_io(page);
- put_page(page);
- index++;
- }
-}
+ if (!bbio)
+ return;
-void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
-{
- unsigned long index = start >> PAGE_SHIFT;
- unsigned long end_index = end >> PAGE_SHIFT;
- struct page *page;
+ /* Caller should ensure the bio has at least some range added */
+ ASSERT(bbio->bio.bi_iter.bi_size);
- while (index <= end_index) {
- page = find_get_page(inode->i_mapping, index);
- BUG_ON(!page); /* Pages should be in the extent_io_tree */
- __set_page_dirty_nobuffers(page);
- account_page_redirty(page);
- put_page(page);
- index++;
- }
-}
+ bio_set_csum_search_commit_root(bio_ctrl);
-/* find the first state struct with 'bits' set after 'start', and
- * return it. tree->lock must be held. NULL will returned if
- * nothing was found after 'start'
- */
-static struct extent_state *
-find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits)
-{
- struct rb_node *node;
- struct extent_state *state;
+ if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
+ bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
+ btrfs_submit_compressed_read(bbio);
+ else
+ btrfs_submit_bbio(bbio, 0);
+ /* The bbio is owned by the end_io handler now */
+ bio_ctrl->bbio = NULL;
/*
- * this search will find all the extents that end after
- * our range starts.
+ * We used the generation to decide whether to lookup csums in the
+ * commit_root or not when we called bio_set_csum_search_commit_root()
+ * above. Now, reset the generation for the next bio.
*/
- node = tree_search(tree, start);
- if (!node)
- goto out;
-
- while (1) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->end >= start && (state->state & bits))
- return state;
-
- node = rb_next(node);
- if (!node)
- break;
- }
-out:
- return NULL;
+ bio_ctrl->generation = 0;
}
/*
- * Find the first offset in the io tree with one or more @bits set.
- *
- * Note: If there are multiple bits set in @bits, any of them will match.
- *
- * Return 0 if we find something, and update @start_ret and @end_ret.
- * Return 1 if we found nothing.
+ * Submit or fail the current bio in the bio_ctrl structure.
*/
-int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits,
- struct extent_state **cached_state)
+static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
{
- struct extent_state *state;
- int ret = 1;
-
- spin_lock(&tree->lock);
- if (cached_state && *cached_state) {
- state = *cached_state;
- if (state->end == start - 1 && extent_state_in_tree(state)) {
- while ((state = next_state(state)) != NULL) {
- if (state->state & bits)
- goto got_it;
- }
- free_extent_state(*cached_state);
- *cached_state = NULL;
- goto out;
- }
- free_extent_state(*cached_state);
- *cached_state = NULL;
- }
+ struct btrfs_bio *bbio = bio_ctrl->bbio;
- state = find_first_extent_bit_state(tree, start, bits);
-got_it:
- if (state) {
- cache_state_if_flags(state, cached_state, 0);
- *start_ret = state->start;
- *end_ret = state->end;
- ret = 0;
- }
-out:
- spin_unlock(&tree->lock);
- return ret;
-}
+ if (!bbio)
+ return;
-/**
- * Find a contiguous area of bits
- *
- * @tree: io tree to check
- * @start: offset to start the search from
- * @start_ret: the first offset we found with the bits set
- * @end_ret: the final contiguous range of the bits that were set
- * @bits: bits to look for
- *
- * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
- * to set bits appropriately, and then merge them again. During this time it
- * will drop the tree->lock, so use this helper if you want to find the actual
- * contiguous area for given bits. We will search to the first bit we find, and
- * then walk down the tree until we find a non-contiguous area. The area
- * returned will be the full contiguous area with the bits set.
- */
-int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits)
-{
- struct extent_state *state;
- int ret = 1;
-
- spin_lock(&tree->lock);
- state = find_first_extent_bit_state(tree, start, bits);
- if (state) {
- *start_ret = state->start;
- *end_ret = state->end;
- while ((state = next_state(state)) != NULL) {
- if (state->start > (*end_ret + 1))
- break;
- *end_ret = state->end;
- }
- ret = 0;
+ if (ret) {
+ ASSERT(ret < 0);
+ btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
+ /* The bio is owned by the end_io handler now */
+ bio_ctrl->bbio = NULL;
+ } else {
+ submit_one_bio(bio_ctrl);
}
- spin_unlock(&tree->lock);
- return ret;
}
-/**
- * Find the first range that has @bits not set. This range could start before
- * @start.
- *
- * @tree: the tree to search
- * @start: offset at/after which the found extent should start
- * @start_ret: records the beginning of the range
- * @end_ret: records the end of the range (inclusive)
- * @bits: the set of bits which must be unset
- *
- * Since unallocated range is also considered one which doesn't have the bits
- * set it's possible that @end_ret contains -1, this happens in case the range
- * spans (last_range_end, end of device]. In this case it's up to the caller to
- * trim @end_ret to the appropriate size.
- */
-void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits)
+int __init extent_buffer_init_cachep(void)
{
- struct extent_state *state;
- struct rb_node *node, *prev = NULL, *next;
-
- spin_lock(&tree->lock);
-
- /* Find first extent with bits cleared */
- while (1) {
- node = __etree_search(tree, start, &next, &prev, NULL, NULL);
- if (!node && !next && !prev) {
- /*
- * Tree is completely empty, send full range and let
- * caller deal with it
- */
- *start_ret = 0;
- *end_ret = -1;
- goto out;
- } else if (!node && !next) {
- /*
- * We are past the last allocated chunk, set start at
- * the end of the last extent.
- */
- state = rb_entry(prev, struct extent_state, rb_node);
- *start_ret = state->end + 1;
- *end_ret = -1;
- goto out;
- } else if (!node) {
- node = next;
- }
- /*
- * At this point 'node' either contains 'start' or start is
- * before 'node'
- */
- state = rb_entry(node, struct extent_state, rb_node);
-
- if (in_range(start, state->start, state->end - state->start + 1)) {
- if (state->state & bits) {
- /*
- * |--range with bits sets--|
- * |
- * start
- */
- start = state->end + 1;
- } else {
- /*
- * 'start' falls within a range that doesn't
- * have the bits set, so take its start as
- * the beginning of the desired range
- *
- * |--range with bits cleared----|
- * |
- * start
- */
- *start_ret = state->start;
- break;
- }
- } else {
- /*
- * |---prev range---|---hole/unset---|---node range---|
- * |
- * start
- *
- * or
- *
- * |---hole/unset--||--first node--|
- * 0 |
- * start
- */
- if (prev) {
- state = rb_entry(prev, struct extent_state,
- rb_node);
- *start_ret = state->end + 1;
- } else {
- *start_ret = 0;
- }
- break;
- }
- }
-
- /*
- * Find the longest stretch from start until an entry which has the
- * bits set
- */
- while (1) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->end >= start && !(state->state & bits)) {
- *end_ret = state->end;
- } else {
- *end_ret = state->start - 1;
- break;
- }
+ extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
+ sizeof(struct extent_buffer), 0, 0,
+ NULL);
+ if (!extent_buffer_cache)
+ return -ENOMEM;
- node = rb_next(node);
- if (!node)
- break;
- }
-out:
- spin_unlock(&tree->lock);
+ return 0;
}
-/*
- * find a contiguous range of bytes in the file marked as delalloc, not
- * more than 'max_bytes'. start and end are used to return the range,
- *
- * true is returned if we find something, false if nothing was in the tree
- */
-bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
- u64 *end, u64 max_bytes,
- struct extent_state **cached_state)
+void __cold extent_buffer_free_cachep(void)
{
- struct rb_node *node;
- struct extent_state *state;
- u64 cur_start = *start;
- bool found = false;
- u64 total_bytes = 0;
-
- spin_lock(&tree->lock);
-
/*
- * this search will find all the extents that end after
- * our range starts.
+ * Make sure all delayed rcu free are flushed before we
+ * destroy caches.
*/
- node = tree_search(tree, cur_start);
- if (!node) {
- *end = (u64)-1;
- goto out;
- }
-
- while (1) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (found && (state->start != cur_start ||
- (state->state & EXTENT_BOUNDARY))) {
- goto out;
- }
- if (!(state->state & EXTENT_DELALLOC)) {
- if (!found)
- *end = state->end;
- goto out;
- }
- if (!found) {
- *start = state->start;
- *cached_state = state;
- refcount_inc(&state->refs);
- }
- found = true;
- *end = state->end;
- cur_start = state->end + 1;
- node = rb_next(node);
- total_bytes += state->end - state->start + 1;
- if (total_bytes >= max_bytes)
- break;
- if (!node)
- break;
- }
-out:
- spin_unlock(&tree->lock);
- return found;
+ rcu_barrier();
+ kmem_cache_destroy(extent_buffer_cache);
}
-/*
- * Process one page for __process_pages_contig().
- *
- * Return >0 if we hit @page == @locked_page.
- * Return 0 if we updated the page status.
- * Return -EGAIN if the we need to try again.
- * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
- */
-static int process_one_page(struct btrfs_fs_info *fs_info,
- struct address_space *mapping,
- struct page *page, struct page *locked_page,
- unsigned long page_ops, u64 start, u64 end)
+static void process_one_folio(struct btrfs_fs_info *fs_info,
+ struct folio *folio, const struct folio *locked_folio,
+ unsigned long page_ops, u64 start, u64 end)
{
u32 len;
@@ -1830,147 +250,104 @@ static int process_one_page(struct btrfs_fs_info *fs_info,
len = end + 1 - start;
if (page_ops & PAGE_SET_ORDERED)
- btrfs_page_clamp_set_ordered(fs_info, page, start, len);
- if (page_ops & PAGE_SET_ERROR)
- btrfs_page_clamp_set_error(fs_info, page, start, len);
+ btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
if (page_ops & PAGE_START_WRITEBACK) {
- btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
- btrfs_page_clamp_set_writeback(fs_info, page, start, len);
+ btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
+ btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
}
if (page_ops & PAGE_END_WRITEBACK)
- btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
-
- if (page == locked_page)
- return 1;
-
- if (page_ops & PAGE_LOCK) {
- int ret;
+ btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
- ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
- if (ret)
- return ret;
- if (!PageDirty(page) || page->mapping != mapping) {
- btrfs_page_end_writer_lock(fs_info, page, start, len);
- return -EAGAIN;
- }
- }
- if (page_ops & PAGE_UNLOCK)
- btrfs_page_end_writer_lock(fs_info, page, start, len);
- return 0;
+ if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
+ btrfs_folio_end_lock(fs_info, folio, start, len);
}
-static int __process_pages_contig(struct address_space *mapping,
- struct page *locked_page,
- u64 start, u64 end, unsigned long page_ops,
- u64 *processed_end)
+static void __process_folios_contig(struct address_space *mapping,
+ const struct folio *locked_folio, u64 start,
+ u64 end, unsigned long page_ops)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
- pgoff_t start_index = start >> PAGE_SHIFT;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
+ pgoff_t index = start >> PAGE_SHIFT;
pgoff_t end_index = end >> PAGE_SHIFT;
- pgoff_t index = start_index;
- unsigned long nr_pages = end_index - start_index + 1;
- unsigned long pages_processed = 0;
- struct page *pages[16];
- int err = 0;
+ struct folio_batch fbatch;
int i;
- if (page_ops & PAGE_LOCK) {
- ASSERT(page_ops == PAGE_LOCK);
- ASSERT(processed_end && *processed_end == start);
- }
-
- if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
- mapping_set_error(mapping, -EIO);
-
- while (nr_pages > 0) {
- int found_pages;
-
- found_pages = find_get_pages_contig(mapping, index,
- min_t(unsigned long,
- nr_pages, ARRAY_SIZE(pages)), pages);
- if (found_pages == 0) {
- /*
- * Only if we're going to lock these pages, we can find
- * nothing at @index.
- */
- ASSERT(page_ops & PAGE_LOCK);
- err = -EAGAIN;
- goto out;
- }
+ folio_batch_init(&fbatch);
+ while (index <= end_index) {
+ int found_folios;
- for (i = 0; i < found_pages; i++) {
- int process_ret;
+ found_folios = filemap_get_folios_contig(mapping, &index,
+ end_index, &fbatch);
+ for (i = 0; i < found_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
- process_ret = process_one_page(fs_info, mapping,
- pages[i], locked_page, page_ops,
- start, end);
- if (process_ret < 0) {
- for (; i < found_pages; i++)
- put_page(pages[i]);
- err = -EAGAIN;
- goto out;
- }
- put_page(pages[i]);
- pages_processed++;
+ process_one_folio(fs_info, folio, locked_folio,
+ page_ops, start, end);
}
- nr_pages -= found_pages;
- index += found_pages;
+ folio_batch_release(&fbatch);
cond_resched();
}
-out:
- if (err && processed_end) {
- /*
- * Update @processed_end. I know this is awful since it has
- * two different return value patterns (inclusive vs exclusive).
- *
- * But the exclusive pattern is necessary if @start is 0, or we
- * underflow and check against processed_end won't work as
- * expected.
- */
- if (pages_processed)
- *processed_end = min(end,
- ((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
- else
- *processed_end = start;
- }
- return err;
}
-static noinline void __unlock_for_delalloc(struct inode *inode,
- struct page *locked_page,
+static noinline void unlock_delalloc_folio(const struct inode *inode,
+ struct folio *locked_folio,
u64 start, u64 end)
{
- unsigned long index = start >> PAGE_SHIFT;
- unsigned long end_index = end >> PAGE_SHIFT;
+ ASSERT(locked_folio);
- ASSERT(locked_page);
- if (index == locked_page->index && end_index == index)
- return;
-
- __process_pages_contig(inode->i_mapping, locked_page, start, end,
- PAGE_UNLOCK, NULL);
+ __process_folios_contig(inode->i_mapping, locked_folio, start, end,
+ PAGE_UNLOCK);
}
-static noinline int lock_delalloc_pages(struct inode *inode,
- struct page *locked_page,
- u64 delalloc_start,
- u64 delalloc_end)
+static noinline int lock_delalloc_folios(struct inode *inode,
+ struct folio *locked_folio,
+ u64 start, u64 end)
{
- unsigned long index = delalloc_start >> PAGE_SHIFT;
- unsigned long end_index = delalloc_end >> PAGE_SHIFT;
- u64 processed_end = delalloc_start;
- int ret;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct address_space *mapping = inode->i_mapping;
+ pgoff_t index = start >> PAGE_SHIFT;
+ pgoff_t end_index = end >> PAGE_SHIFT;
+ u64 processed_end = start;
+ struct folio_batch fbatch;
- ASSERT(locked_page);
- if (index == locked_page->index && index == end_index)
- return 0;
+ folio_batch_init(&fbatch);
+ while (index <= end_index) {
+ unsigned int found_folios, i;
- ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
- delalloc_end, PAGE_LOCK, &processed_end);
- if (ret == -EAGAIN && processed_end > delalloc_start)
- __unlock_for_delalloc(inode, locked_page, delalloc_start,
- processed_end);
- return ret;
+ found_folios = filemap_get_folios_contig(mapping, &index,
+ end_index, &fbatch);
+ if (found_folios == 0)
+ goto out;
+
+ for (i = 0; i < found_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 range_start;
+ u32 range_len;
+
+ if (folio == locked_folio)
+ continue;
+
+ folio_lock(folio);
+ if (!folio_test_dirty(folio) || folio->mapping != mapping) {
+ folio_unlock(folio);
+ goto out;
+ }
+ range_start = max_t(u64, folio_pos(folio), start);
+ range_len = min_t(u64, folio_next_pos(folio), end + 1) - range_start;
+ btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
+
+ processed_end = range_start + range_len - 1;
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+
+ return 0;
+out:
+ folio_batch_release(&fbatch);
+ if (processed_end > start)
+ unlock_delalloc_folio(inode, locked_folio, start, processed_end);
+ return -EAGAIN;
}
/*
@@ -1990,13 +367,14 @@ static noinline int lock_delalloc_pages(struct inode *inode,
*/
EXPORT_FOR_TESTS
noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
- struct page *locked_page, u64 *start,
- u64 *end)
+ struct folio *locked_folio,
+ u64 *start, u64 *end)
{
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
const u64 orig_start = *start;
const u64 orig_end = *end;
- u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
+ u64 max_bytes = fs_info->max_extent_size;
u64 delalloc_start;
u64 delalloc_end;
bool found;
@@ -2007,13 +385,20 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
/* Caller should pass a valid @end to indicate the search range end */
ASSERT(orig_end > orig_start);
- /* The range should at least cover part of the page */
- ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
- orig_end <= page_offset(locked_page)));
+ /* The range should at least cover part of the folio */
+ ASSERT(!(orig_start >= folio_next_pos(locked_folio) ||
+ orig_end <= folio_pos(locked_folio)));
again:
/* step one, find a bunch of delalloc bytes starting at start */
delalloc_start = *start;
delalloc_end = 0;
+
+ /*
+ * If @max_bytes is smaller than a block, btrfs_find_delalloc_range() can
+ * return early without handling any dirty ranges.
+ */
+ ASSERT(max_bytes >= fs_info->sectorsize);
+
found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
max_bytes, &cached_state);
if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
@@ -2021,36 +406,37 @@ again:
/* @delalloc_end can be -1, never go beyond @orig_end */
*end = min(delalloc_end, orig_end);
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
return false;
}
/*
- * start comes from the offset of locked_page. We have to lock
- * pages in order, so we can't process delalloc bytes before
- * locked_page
+ * start comes from the offset of locked_folio. We have to lock
+ * folios in order, so we can't process delalloc bytes before
+ * locked_folio
*/
if (delalloc_start < *start)
delalloc_start = *start;
/*
- * make sure to limit the number of pages we try to lock down
+ * make sure to limit the number of folios we try to lock down
*/
if (delalloc_end + 1 - delalloc_start > max_bytes)
delalloc_end = delalloc_start + max_bytes - 1;
- /* step two, lock all the pages after the page that has start */
- ret = lock_delalloc_pages(inode, locked_page,
- delalloc_start, delalloc_end);
+ /* step two, lock all the folios after the folios that has start */
+ ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
+ delalloc_end);
ASSERT(!ret || ret == -EAGAIN);
if (ret == -EAGAIN) {
- /* some of the pages are gone, lets avoid looping by
- * shortening the size of the delalloc range we're searching
+ /*
+ * Some of the folios are gone, lets avoid looping by
+ * shortening the size of the delalloc range we're searching.
*/
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
cached_state = NULL;
if (!loops) {
- max_bytes = PAGE_SIZE;
+ max_bytes = fs_info->sectorsize;
loops = 1;
goto again;
} else {
@@ -2060,20 +446,19 @@ again:
}
/* step three, lock the state bits for the whole range */
- lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
+ btrfs_lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
/* then test to make sure it is all still delalloc */
- ret = test_range_bit(tree, delalloc_start, delalloc_end,
- EXTENT_DELALLOC, 1, cached_state);
+ ret = btrfs_test_range_bit(tree, delalloc_start, delalloc_end,
+ EXTENT_DELALLOC, cached_state);
+
+ btrfs_unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
if (!ret) {
- unlock_extent_cached(tree, delalloc_start, delalloc_end,
- &cached_state);
- __unlock_for_delalloc(inode, locked_page,
- delalloc_start, delalloc_end);
+ unlock_delalloc_folio(inode, locked_folio, delalloc_start,
+ delalloc_end);
cond_resched();
goto again;
}
- free_extent_state(cached_state);
*start = delalloc_start;
*end = delalloc_end;
out_failed:
@@ -2081,1375 +466,405 @@ out_failed:
}
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
- struct page *locked_page,
+ const struct folio *locked_folio,
+ struct extent_state **cached,
u32 clear_bits, unsigned long page_ops)
{
- clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
+ btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
- __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
- start, end, page_ops, NULL);
+ __process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start,
+ end, page_ops);
}
-/*
- * count the number of bytes in the tree that have a given bit(s)
- * set. This can be fairly slow, except for EXTENT_DIRTY which is
- * cached. The total number found is returned.
- */
-u64 count_range_bits(struct extent_io_tree *tree,
- u64 *start, u64 search_end, u64 max_bytes,
- u32 bits, int contig)
+static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
{
- struct rb_node *node;
- struct extent_state *state;
- u64 cur_start = *start;
- u64 total_bytes = 0;
- u64 last = 0;
- int found = 0;
-
- if (WARN_ON(search_end <= cur_start))
- return 0;
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
- spin_lock(&tree->lock);
- if (cur_start == 0 && bits == EXTENT_DIRTY) {
- total_bytes = tree->dirty_bytes;
- goto out;
- }
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(tree, cur_start);
- if (!node)
- goto out;
-
- while (1) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start > search_end)
- break;
- if (contig && found && state->start > last + 1)
- break;
- if (state->end >= cur_start && (state->state & bits) == bits) {
- total_bytes += min(search_end, state->end) + 1 -
- max(cur_start, state->start);
- if (total_bytes >= max_bytes)
- break;
- if (!found) {
- *start = max(cur_start, state->start);
- found = 1;
- }
- last = state->end;
- } else if (contig && found) {
- break;
- }
- node = rb_next(node);
- if (!node)
- break;
- }
-out:
- spin_unlock(&tree->lock);
- return total_bytes;
+ if (!fsverity_active(folio->mapping->host) ||
+ btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
+ start >= i_size_read(folio->mapping->host))
+ return true;
+ return fsverity_verify_folio(folio);
}
-/*
- * set the private field for a given byte offset in the tree. If there isn't
- * an extent_state there already, this does nothing.
- */
-int set_state_failrec(struct extent_io_tree *tree, u64 start,
- struct io_failure_record *failrec)
+static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
{
- struct rb_node *node;
- struct extent_state *state;
- int ret = 0;
-
- spin_lock(&tree->lock);
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(tree, start);
- if (!node) {
- ret = -ENOENT;
- goto out;
- }
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start != start) {
- ret = -ENOENT;
- goto out;
- }
- state->failrec = failrec;
-out:
- spin_unlock(&tree->lock);
- return ret;
-}
-
-struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
-{
- struct rb_node *node;
- struct extent_state *state;
- struct io_failure_record *failrec;
-
- spin_lock(&tree->lock);
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(tree, start);
- if (!node) {
- failrec = ERR_PTR(-ENOENT);
- goto out;
- }
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start != start) {
- failrec = ERR_PTR(-ENOENT);
- goto out;
- }
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
- failrec = state->failrec;
-out:
- spin_unlock(&tree->lock);
- return failrec;
-}
+ ASSERT(folio_pos(folio) <= start &&
+ start + len <= folio_next_pos(folio));
-/*
- * searches a range in the state tree for a given mask.
- * If 'filled' == 1, this returns 1 only if every extent in the tree
- * has the bits set. Otherwise, 1 is returned if any bit in the
- * range is found set.
- */
-int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, int filled, struct extent_state *cached)
-{
- struct extent_state *state = NULL;
- struct rb_node *node;
- int bitset = 0;
-
- spin_lock(&tree->lock);
- if (cached && extent_state_in_tree(cached) && cached->start <= start &&
- cached->end > start)
- node = &cached->rb_node;
+ if (uptodate && btrfs_verify_folio(folio, start, len))
+ btrfs_folio_set_uptodate(fs_info, folio, start, len);
else
- node = tree_search(tree, start);
- while (node && start <= end) {
- state = rb_entry(node, struct extent_state, rb_node);
-
- if (filled && state->start > start) {
- bitset = 0;
- break;
- }
-
- if (state->start > end)
- break;
-
- if (state->state & bits) {
- bitset = 1;
- if (!filled)
- break;
- } else if (filled) {
- bitset = 0;
- break;
- }
-
- if (state->end == (u64)-1)
- break;
-
- start = state->end + 1;
- if (start > end)
- break;
- node = rb_next(node);
- if (!node) {
- if (filled)
- bitset = 0;
- break;
- }
- }
- spin_unlock(&tree->lock);
- return bitset;
-}
-
-int free_io_failure(struct extent_io_tree *failure_tree,
- struct extent_io_tree *io_tree,
- struct io_failure_record *rec)
-{
- int ret;
- int err = 0;
-
- set_state_failrec(failure_tree, rec->start, NULL);
- ret = clear_extent_bits(failure_tree, rec->start,
- rec->start + rec->len - 1,
- EXTENT_LOCKED | EXTENT_DIRTY);
- if (ret)
- err = ret;
-
- ret = clear_extent_bits(io_tree, rec->start,
- rec->start + rec->len - 1,
- EXTENT_DAMAGED);
- if (ret && !err)
- err = ret;
-
- kfree(rec);
- return err;
-}
-
-/*
- * this bypasses the standard btrfs submit functions deliberately, as
- * the standard behavior is to write all copies in a raid setup. here we only
- * want to write the one bad copy. so we do the mapping for ourselves and issue
- * submit_bio directly.
- * to avoid any synchronization issues, wait for the data after writing, which
- * actually prevents the read that triggered the error from finishing.
- * currently, there can be no more than two copies of every data bit. thus,
- * exactly one rewrite is required.
- */
-static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
- u64 length, u64 logical, struct page *page,
- unsigned int pg_offset, int mirror_num)
-{
- struct bio *bio;
- struct btrfs_device *dev;
- u64 map_length = 0;
- u64 sector;
- struct btrfs_io_context *bioc = NULL;
- int ret;
-
- ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
- BUG_ON(!mirror_num);
-
- if (btrfs_is_zoned(fs_info))
- return btrfs_repair_one_zone(fs_info, logical);
-
- bio = btrfs_bio_alloc(1);
- bio->bi_iter.bi_size = 0;
- map_length = length;
-
- /*
- * Avoid races with device replace and make sure our bioc has devices
- * associated to its stripes that don't go away while we are doing the
- * read repair operation.
- */
- btrfs_bio_counter_inc_blocked(fs_info);
- if (btrfs_is_parity_mirror(fs_info, logical, length)) {
- /*
- * Note that we don't use BTRFS_MAP_WRITE because it's supposed
- * to update all raid stripes, but here we just want to correct
- * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
- * stripe's dev and sector.
- */
- ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
- &map_length, &bioc, 0);
- if (ret) {
- btrfs_bio_counter_dec(fs_info);
- bio_put(bio);
- return -EIO;
- }
- ASSERT(bioc->mirror_num == 1);
- } else {
- ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
- &map_length, &bioc, mirror_num);
- if (ret) {
- btrfs_bio_counter_dec(fs_info);
- bio_put(bio);
- return -EIO;
- }
- BUG_ON(mirror_num != bioc->mirror_num);
- }
-
- sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9;
- bio->bi_iter.bi_sector = sector;
- dev = bioc->stripes[bioc->mirror_num - 1].dev;
- btrfs_put_bioc(bioc);
- if (!dev || !dev->bdev ||
- !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
- btrfs_bio_counter_dec(fs_info);
- bio_put(bio);
- return -EIO;
- }
- bio_set_dev(bio, dev->bdev);
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
- bio_add_page(bio, page, length, pg_offset);
-
- if (btrfsic_submit_bio_wait(bio)) {
- /* try to remap that extent elsewhere? */
- btrfs_bio_counter_dec(fs_info);
- bio_put(bio);
- btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
- return -EIO;
- }
-
- btrfs_info_rl_in_rcu(fs_info,
- "read error corrected: ino %llu off %llu (dev %s sector %llu)",
- ino, start,
- rcu_str_deref(dev->name), sector);
- btrfs_bio_counter_dec(fs_info);
- bio_put(bio);
- return 0;
-}
-
-int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
-{
- struct btrfs_fs_info *fs_info = eb->fs_info;
- u64 start = eb->start;
- int i, num_pages = num_extent_pages(eb);
- int ret = 0;
-
- if (sb_rdonly(fs_info->sb))
- return -EROFS;
+ btrfs_folio_clear_uptodate(fs_info, folio, start, len);
- for (i = 0; i < num_pages; i++) {
- struct page *p = eb->pages[i];
-
- ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
- start - page_offset(p), mirror_num);
- if (ret)
- break;
- start += PAGE_SIZE;
- }
-
- return ret;
-}
-
-/*
- * each time an IO finishes, we do a fast check in the IO failure tree
- * to see if we need to process or clean up an io_failure_record
- */
-int clean_io_failure(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *failure_tree,
- struct extent_io_tree *io_tree, u64 start,
- struct page *page, u64 ino, unsigned int pg_offset)
-{
- u64 private;
- struct io_failure_record *failrec;
- struct extent_state *state;
- int num_copies;
- int ret;
-
- private = 0;
- ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
- EXTENT_DIRTY, 0);
- if (!ret)
- return 0;
-
- failrec = get_state_failrec(failure_tree, start);
- if (IS_ERR(failrec))
- return 0;
-
- BUG_ON(!failrec->this_mirror);
-
- if (sb_rdonly(fs_info->sb))
- goto out;
-
- spin_lock(&io_tree->lock);
- state = find_first_extent_bit_state(io_tree,
- failrec->start,
- EXTENT_LOCKED);
- spin_unlock(&io_tree->lock);
-
- if (state && state->start <= failrec->start &&
- state->end >= failrec->start + failrec->len - 1) {
- num_copies = btrfs_num_copies(fs_info, failrec->logical,
- failrec->len);
- if (num_copies > 1) {
- repair_io_failure(fs_info, ino, start, failrec->len,
- failrec->logical, page, pg_offset,
- failrec->failed_mirror);
- }
- }
-
-out:
- free_io_failure(failure_tree, io_tree, failrec);
-
- return 0;
-}
-
-/*
- * Can be called when
- * - hold extent lock
- * - under ordered extent
- * - the inode is freeing
- */
-void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
-{
- struct extent_io_tree *failure_tree = &inode->io_failure_tree;
- struct io_failure_record *failrec;
- struct extent_state *state, *next;
-
- if (RB_EMPTY_ROOT(&failure_tree->state))
- return;
-
- spin_lock(&failure_tree->lock);
- state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
- while (state) {
- if (state->start > end)
- break;
-
- ASSERT(state->end <= end);
-
- next = next_state(state);
-
- failrec = state->failrec;
- free_extent_state(state);
- kfree(failrec);
-
- state = next;
- }
- spin_unlock(&failure_tree->lock);
-}
-
-static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
- u64 start)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct io_failure_record *failrec;
- struct extent_map *em;
- struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
- struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
- const u32 sectorsize = fs_info->sectorsize;
- int ret;
- u64 logical;
-
- failrec = get_state_failrec(failure_tree, start);
- if (!IS_ERR(failrec)) {
- btrfs_debug(fs_info,
- "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu",
- failrec->logical, failrec->start, failrec->len);
- /*
- * when data can be on disk more than twice, add to failrec here
- * (e.g. with a list for failed_mirror) to make
- * clean_io_failure() clean all those errors at once.
- */
-
- return failrec;
- }
-
- failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
- if (!failrec)
- return ERR_PTR(-ENOMEM);
-
- failrec->start = start;
- failrec->len = sectorsize;
- failrec->this_mirror = 0;
- failrec->bio_flags = 0;
-
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, start, failrec->len);
- if (!em) {
- read_unlock(&em_tree->lock);
- kfree(failrec);
- return ERR_PTR(-EIO);
- }
-
- if (em->start > start || em->start + em->len <= start) {
- free_extent_map(em);
- em = NULL;
- }
- read_unlock(&em_tree->lock);
- if (!em) {
- kfree(failrec);
- return ERR_PTR(-EIO);
- }
-
- logical = start - em->start;
- logical = em->block_start + logical;
- if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
- logical = em->block_start;
- failrec->bio_flags = EXTENT_BIO_COMPRESSED;
- extent_set_compress_type(&failrec->bio_flags, em->compress_type);
- }
-
- btrfs_debug(fs_info,
- "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
- logical, start, failrec->len);
-
- failrec->logical = logical;
- free_extent_map(em);
-
- /* Set the bits in the private failure tree */
- ret = set_extent_bits(failure_tree, start, start + sectorsize - 1,
- EXTENT_LOCKED | EXTENT_DIRTY);
- if (ret >= 0) {
- ret = set_state_failrec(failure_tree, start, failrec);
- /* Set the bits in the inode's tree */
- ret = set_extent_bits(tree, start, start + sectorsize - 1,
- EXTENT_DAMAGED);
- } else if (ret < 0) {
- kfree(failrec);
- return ERR_PTR(ret);
- }
-
- return failrec;
-}
-
-static bool btrfs_check_repairable(struct inode *inode,
- struct io_failure_record *failrec,
- int failed_mirror)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- int num_copies;
-
- num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
- if (num_copies == 1) {
- /*
- * we only have a single copy of the data, so don't bother with
- * all the retry and error correction code that follows. no
- * matter what the error is, it is very likely to persist.
- */
- btrfs_debug(fs_info,
- "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
- num_copies, failrec->this_mirror, failed_mirror);
- return false;
- }
-
- /* The failure record should only contain one sector */
- ASSERT(failrec->len == fs_info->sectorsize);
-
- /*
- * There are two premises:
- * a) deliver good data to the caller
- * b) correct the bad sectors on disk
- *
- * Since we're only doing repair for one sector, we only need to get
- * a good copy of the failed sector and if we succeed, we have setup
- * everything for repair_io_failure to do the rest for us.
- */
- failrec->failed_mirror = failed_mirror;
- failrec->this_mirror++;
- if (failrec->this_mirror == failed_mirror)
- failrec->this_mirror++;
-
- if (failrec->this_mirror > num_copies) {
- btrfs_debug(fs_info,
- "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
- num_copies, failrec->this_mirror, failed_mirror);
- return false;
- }
-
- return true;
-}
-
-int btrfs_repair_one_sector(struct inode *inode,
- struct bio *failed_bio, u32 bio_offset,
- struct page *page, unsigned int pgoff,
- u64 start, int failed_mirror,
- submit_bio_hook_t *submit_bio_hook)
-{
- struct io_failure_record *failrec;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
- struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
- struct btrfs_bio *failed_bbio = btrfs_bio(failed_bio);
- const int icsum = bio_offset >> fs_info->sectorsize_bits;
- struct bio *repair_bio;
- struct btrfs_bio *repair_bbio;
- blk_status_t status;
-
- btrfs_debug(fs_info,
- "repair read error: read error at %llu", start);
-
- BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
-
- failrec = btrfs_get_io_failure_record(inode, start);
- if (IS_ERR(failrec))
- return PTR_ERR(failrec);
-
-
- if (!btrfs_check_repairable(inode, failrec, failed_mirror)) {
- free_io_failure(failure_tree, tree, failrec);
- return -EIO;
- }
-
- repair_bio = btrfs_bio_alloc(1);
- repair_bbio = btrfs_bio(repair_bio);
- repair_bio->bi_opf = REQ_OP_READ;
- repair_bio->bi_end_io = failed_bio->bi_end_io;
- repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
- repair_bio->bi_private = failed_bio->bi_private;
-
- if (failed_bbio->csum) {
- const u32 csum_size = fs_info->csum_size;
-
- repair_bbio->csum = repair_bbio->csum_inline;
- memcpy(repair_bbio->csum,
- failed_bbio->csum + csum_size * icsum, csum_size);
- }
-
- bio_add_page(repair_bio, page, failrec->len, pgoff);
- repair_bbio->iter = repair_bio->bi_iter;
-
- btrfs_debug(btrfs_sb(inode->i_sb),
- "repair read error: submitting new read to mirror %d",
- failrec->this_mirror);
-
- status = submit_bio_hook(inode, repair_bio, failrec->this_mirror,
- failrec->bio_flags);
- if (status) {
- free_io_failure(failure_tree, tree, failrec);
- bio_put(repair_bio);
- }
- return blk_status_to_errno(status);
-}
-
-static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
-
- ASSERT(page_offset(page) <= start &&
- start + len <= page_offset(page) + PAGE_SIZE);
-
- if (uptodate) {
- if (fsverity_active(page->mapping->host) &&
- !PageError(page) &&
- !PageUptodate(page) &&
- start < i_size_read(page->mapping->host) &&
- !fsverity_verify_page(page)) {
- btrfs_page_set_error(fs_info, page, start, len);
- } else {
- btrfs_page_set_uptodate(fs_info, page, start, len);
- }
- } else {
- btrfs_page_clear_uptodate(fs_info, page, start, len);
- btrfs_page_set_error(fs_info, page, start, len);
- }
-
- if (fs_info->sectorsize == PAGE_SIZE)
- unlock_page(page);
+ if (!btrfs_is_subpage(fs_info, folio))
+ folio_unlock(folio);
else
- btrfs_subpage_end_reader(fs_info, page, start, len);
-}
-
-static blk_status_t submit_read_repair(struct inode *inode,
- struct bio *failed_bio, u32 bio_offset,
- struct page *page, unsigned int pgoff,
- u64 start, u64 end, int failed_mirror,
- unsigned int error_bitmap,
- submit_bio_hook_t *submit_bio_hook)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- const u32 sectorsize = fs_info->sectorsize;
- const int nr_bits = (end + 1 - start) >> fs_info->sectorsize_bits;
- int error = 0;
- int i;
-
- BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
-
- /* We're here because we had some read errors or csum mismatch */
- ASSERT(error_bitmap);
-
- /*
- * We only get called on buffered IO, thus page must be mapped and bio
- * must not be cloned.
- */
- ASSERT(page->mapping && !bio_flagged(failed_bio, BIO_CLONED));
-
- /* Iterate through all the sectors in the range */
- for (i = 0; i < nr_bits; i++) {
- const unsigned int offset = i * sectorsize;
- struct extent_state *cached = NULL;
- bool uptodate = false;
- int ret;
-
- if (!(error_bitmap & (1U << i))) {
- /*
- * This sector has no error, just end the page read
- * and unlock the range.
- */
- uptodate = true;
- goto next;
- }
-
- ret = btrfs_repair_one_sector(inode, failed_bio,
- bio_offset + offset,
- page, pgoff + offset, start + offset,
- failed_mirror, submit_bio_hook);
- if (!ret) {
- /*
- * We have submitted the read repair, the page release
- * will be handled by the endio function of the
- * submitted repair bio.
- * Thus we don't need to do any thing here.
- */
- continue;
- }
- /*
- * Repair failed, just record the error but still continue.
- * Or the remaining sectors will not be properly unlocked.
- */
- if (!error)
- error = ret;
-next:
- end_page_read(page, uptodate, start + offset, sectorsize);
- if (uptodate)
- set_extent_uptodate(&BTRFS_I(inode)->io_tree,
- start + offset,
- start + offset + sectorsize - 1,
- &cached, GFP_ATOMIC);
- unlock_extent_cached_atomic(&BTRFS_I(inode)->io_tree,
- start + offset,
- start + offset + sectorsize - 1,
- &cached);
- }
- return errno_to_blk_status(error);
-}
-
-/* lots and lots of room for performance fixes in the end_bio funcs */
-
-void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
-{
- struct btrfs_inode *inode;
- const bool uptodate = (err == 0);
- int ret = 0;
-
- ASSERT(page && page->mapping);
- inode = BTRFS_I(page->mapping->host);
- btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
-
- if (!uptodate) {
- const struct btrfs_fs_info *fs_info = inode->root->fs_info;
- u32 len;
-
- ASSERT(end + 1 - start <= U32_MAX);
- len = end + 1 - start;
-
- btrfs_page_clear_uptodate(fs_info, page, start, len);
- btrfs_page_set_error(fs_info, page, start, len);
- ret = err < 0 ? err : -EIO;
- mapping_set_error(page->mapping, ret);
- }
+ btrfs_folio_end_lock(fs_info, folio, start, len);
}
/*
- * after a writepage IO is done, we need to:
- * clear the uptodate bits on error
- * clear the writeback bits in the extent tree for this IO
- * end_page_writeback if the page has no more pending IO
+ * After a write IO is done, we need to:
+ *
+ * - clear the uptodate bits on error
+ * - clear the writeback bits in the extent tree for the range
+ * - filio_end_writeback() if there is no more pending io for the folio
*
* Scheduling is not allowed, so the extent state tree is expected
* to have one and only one object corresponding to this IO.
*/
-static void end_bio_extent_writepage(struct bio *bio)
+static void end_bbio_data_write(struct btrfs_bio *bbio)
{
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+ struct bio *bio = &bbio->bio;
int error = blk_status_to_errno(bio->bi_status);
- struct bio_vec *bvec;
- u64 start;
- u64 end;
- struct bvec_iter_all iter_all;
- bool first_bvec = true;
+ struct folio_iter fi;
+ const u32 sectorsize = fs_info->sectorsize;
ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
- struct inode *inode = page->mapping->host;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- const u32 sectorsize = fs_info->sectorsize;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
+ u64 start = folio_pos(folio) + fi.offset;
+ u32 len = fi.length;
/* Our read/write should always be sector aligned. */
- if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
+ if (!IS_ALIGNED(fi.offset, sectorsize))
btrfs_err(fs_info,
- "partial page write in btrfs with offset %u and length %u",
- bvec->bv_offset, bvec->bv_len);
- else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
+ "partial page write in btrfs with offset %zu and length %zu",
+ fi.offset, fi.length);
+ else if (!IS_ALIGNED(fi.length, sectorsize))
btrfs_info(fs_info,
- "incomplete page write with offset %u and length %u",
- bvec->bv_offset, bvec->bv_len);
-
- start = page_offset(page) + bvec->bv_offset;
- end = start + bvec->bv_len - 1;
-
- if (first_bvec) {
- btrfs_record_physical_zoned(inode, start, bio);
- first_bvec = false;
- }
-
- end_extent_writepage(page, error, start, end);
+ "incomplete page write with offset %zu and length %zu",
+ fi.offset, fi.length);
- btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
+ btrfs_finish_ordered_extent(bbio->ordered, folio, start, len,
+ !error);
+ if (error)
+ mapping_set_error(folio->mapping, error);
+ btrfs_folio_clear_writeback(fs_info, folio, start, len);
}
bio_put(bio);
}
-/*
- * Record previously processed extent range
- *
- * For endio_readpage_release_extent() to handle a full extent range, reducing
- * the extent io operations.
- */
-struct processed_extent {
- struct btrfs_inode *inode;
- /* Start of the range in @inode */
- u64 start;
- /* End of the range in @inode */
- u64 end;
- bool uptodate;
-};
-
-/*
- * Try to release processed extent range
- *
- * May not release the extent range right now if the current range is
- * contiguous to processed extent.
- *
- * Will release processed extent when any of @inode, @uptodate, the range is
- * no longer contiguous to the processed range.
- *
- * Passing @inode == NULL will force processed extent to be released.
- */
-static void endio_readpage_release_extent(struct processed_extent *processed,
- struct btrfs_inode *inode, u64 start, u64 end,
- bool uptodate)
+static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
{
- struct extent_state *cached = NULL;
- struct extent_io_tree *tree;
-
- /* The first extent, initialize @processed */
- if (!processed->inode)
- goto update;
-
- /*
- * Contiguous to processed extent, just uptodate the end.
- *
- * Several things to notice:
- *
- * - bio can be merged as long as on-disk bytenr is contiguous
- * This means we can have page belonging to other inodes, thus need to
- * check if the inode still matches.
- * - bvec can contain range beyond current page for multi-page bvec
- * Thus we need to do processed->end + 1 >= start check
- */
- if (processed->inode == inode && processed->uptodate == uptodate &&
- processed->end + 1 >= start && end >= processed->end) {
- processed->end = end;
+ ASSERT(folio_test_locked(folio));
+ if (!btrfs_is_subpage(fs_info, folio))
return;
- }
- tree = &processed->inode->io_tree;
- /*
- * Now we don't have range contiguous to the processed range, release
- * the processed range now.
- */
- if (processed->uptodate && tree->track_uptodate)
- set_extent_uptodate(tree, processed->start, processed->end,
- &cached, GFP_ATOMIC);
- unlock_extent_cached_atomic(tree, processed->start, processed->end,
- &cached);
-
-update:
- /* Update processed to current range */
- processed->inode = inode;
- processed->start = start;
- processed->end = end;
- processed->uptodate = uptodate;
-}
-
-static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
-{
- ASSERT(PageLocked(page));
- if (fs_info->sectorsize == PAGE_SIZE)
- return;
-
- ASSERT(PagePrivate(page));
- btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
+ ASSERT(folio_test_private(folio));
+ btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), folio_size(folio));
}
/*
- * Find extent buffer for a givne bytenr.
+ * After a data read IO is done, we need to:
*
- * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
- * in endio context.
- */
-static struct extent_buffer *find_extent_buffer_readpage(
- struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
-{
- struct extent_buffer *eb;
-
- /*
- * For regular sectorsize, we can use page->private to grab extent
- * buffer
- */
- if (fs_info->sectorsize == PAGE_SIZE) {
- ASSERT(PagePrivate(page) && page->private);
- return (struct extent_buffer *)page->private;
- }
-
- /* For subpage case, we need to lookup buffer radix tree */
- rcu_read_lock();
- eb = radix_tree_lookup(&fs_info->buffer_radix,
- bytenr >> fs_info->sectorsize_bits);
- rcu_read_unlock();
- ASSERT(eb);
- return eb;
-}
-
-/*
- * after a readpage IO is done, we need to:
- * clear the uptodate bits on error
- * set the uptodate bits if things worked
- * set the page up to date if all extents in the tree are uptodate
- * clear the lock bit in the extent tree
- * unlock the page if there are no other extents locked for it
+ * - clear the uptodate bits on error
+ * - set the uptodate bits if things worked
+ * - set the folio up to date if all extents in the tree are uptodate
+ * - clear the lock bit in the extent tree
+ * - unlock the folio if there are no other extents locked for it
*
* Scheduling is not allowed, so the extent state tree is expected
* to have one and only one object corresponding to this IO.
*/
-static void end_bio_extent_readpage(struct bio *bio)
+static void end_bbio_data_read(struct btrfs_bio *bbio)
{
- struct bio_vec *bvec;
- struct btrfs_bio *bbio = btrfs_bio(bio);
- struct extent_io_tree *tree, *failure_tree;
- struct processed_extent processed = { 0 };
- /*
- * The offset to the beginning of a bio, since one bio can never be
- * larger than UINT_MAX, u32 here is enough.
- */
- u32 bio_offset = 0;
- int mirror;
- int ret;
- struct bvec_iter_all iter_all;
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+ struct bio *bio = &bbio->bio;
+ struct folio_iter fi;
ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, iter_all) {
+ bio_for_each_folio_all(fi, &bbio->bio) {
bool uptodate = !bio->bi_status;
- struct page *page = bvec->bv_page;
- struct inode *inode = page->mapping->host;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- const u32 sectorsize = fs_info->sectorsize;
- unsigned int error_bitmap = (unsigned int)-1;
- u64 start;
- u64 end;
- u32 len;
+ struct folio *folio = fi.folio;
+ struct inode *inode = folio->mapping->host;
+ u64 start = folio_pos(folio) + fi.offset;
btrfs_debug(fs_info,
- "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
- bio->bi_iter.bi_sector, bio->bi_status,
+ "%s: bi_sector=%llu, err=%d, mirror=%u",
+ __func__, bio->bi_iter.bi_sector, bio->bi_status,
bbio->mirror_num);
- tree = &BTRFS_I(inode)->io_tree;
- failure_tree = &BTRFS_I(inode)->io_failure_tree;
- /*
- * We always issue full-sector reads, but if some block in a
- * page fails to read, blk_update_request() will advance
- * bv_offset and adjust bv_len to compensate. Print a warning
- * for unaligned offsets, and an error if they don't add up to
- * a full sector.
- */
- if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
- btrfs_err(fs_info,
- "partial page read in btrfs with offset %u and length %u",
- bvec->bv_offset, bvec->bv_len);
- else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
- sectorsize))
- btrfs_info(fs_info,
- "incomplete page read with offset %u and length %u",
- bvec->bv_offset, bvec->bv_len);
-
- start = page_offset(page) + bvec->bv_offset;
- end = start + bvec->bv_len - 1;
- len = bvec->bv_len;
- mirror = bbio->mirror_num;
- if (likely(uptodate)) {
- if (is_data_inode(inode)) {
- error_bitmap = btrfs_verify_data_csum(bbio,
- bio_offset, page, start, end);
- ret = error_bitmap;
- } else {
- ret = btrfs_validate_metadata_buffer(bbio,
- page, start, end, mirror);
- }
- if (ret)
- uptodate = false;
- else
- clean_io_failure(BTRFS_I(inode)->root->fs_info,
- failure_tree, tree, start,
- page,
- btrfs_ino(BTRFS_I(inode)), 0);
- }
-
- if (likely(uptodate))
- goto readpage_ok;
-
- if (is_data_inode(inode)) {
- /*
- * btrfs_submit_read_repair() will handle all the good
- * and bad sectors, we just continue to the next bvec.
- */
- submit_read_repair(inode, bio, bio_offset, page,
- start - page_offset(page), start,
- end, mirror, error_bitmap,
- btrfs_submit_data_bio);
-
- ASSERT(bio_offset + len > bio_offset);
- bio_offset += len;
- continue;
- } else {
- struct extent_buffer *eb;
-
- eb = find_extent_buffer_readpage(fs_info, page, start);
- set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
- eb->read_mirror = mirror;
- atomic_dec(&eb->io_pages);
- if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
- &eb->bflags))
- btree_readahead_hook(eb, -EIO);
- }
-readpage_ok:
if (likely(uptodate)) {
+ u64 end = start + fi.length - 1;
loff_t i_size = i_size_read(inode);
- pgoff_t end_index = i_size >> PAGE_SHIFT;
/*
* Zero out the remaining part if this range straddles
* i_size.
*
- * Here we should only zero the range inside the bvec,
+ * Here we should only zero the range inside the folio,
* not touch anything else.
*
- * NOTE: i_size is exclusive while end is inclusive.
+ * NOTE: i_size is exclusive while end is inclusive and
+ * folio_contains() takes PAGE_SIZE units.
*/
- if (page->index == end_index && i_size <= end) {
- u32 zero_start = max(offset_in_page(i_size),
- offset_in_page(start));
-
- zero_user_segment(page, zero_start,
- offset_in_page(end) + 1);
+ if (folio_contains(folio, i_size >> PAGE_SHIFT) &&
+ i_size <= end) {
+ u32 zero_start = max(offset_in_folio(folio, i_size),
+ offset_in_folio(folio, start));
+ u32 zero_len = offset_in_folio(folio, end) + 1 -
+ zero_start;
+
+ folio_zero_range(folio, zero_start, zero_len);
}
}
- ASSERT(bio_offset + len > bio_offset);
- bio_offset += len;
- /* Update page status and unlock */
- end_page_read(page, uptodate, start, len);
- endio_readpage_release_extent(&processed, BTRFS_I(inode),
- start, end, PageUptodate(page));
+ /* Update page status and unlock. */
+ end_folio_read(folio, uptodate, start, fi.length);
}
- /* Release the last extent */
- endio_readpage_release_extent(&processed, NULL, 0, 0, false);
- btrfs_bio_free_csum(bbio);
bio_put(bio);
}
/*
- * Initialize the members up to but not including 'bio'. Use after allocating a
- * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
- * 'bio' because use of __GFP_ZERO is not supported.
+ * Populate every free slot in a provided array with folios using GFP_NOFS.
+ *
+ * @nr_folios: number of folios to allocate
+ * @order: the order of the folios to be allocated
+ * @folio_array: the array to fill with folios; any existing non-NULL entries in
+ * the array will be skipped
+ *
+ * Return: 0 if all folios were able to be allocated;
+ * -ENOMEM otherwise, the partially allocated folios would be freed and
+ * the array slots zeroed
*/
-static inline void btrfs_bio_init(struct btrfs_bio *bbio)
+int btrfs_alloc_folio_array(unsigned int nr_folios, unsigned int order,
+ struct folio **folio_array)
{
- memset(bbio, 0, offsetof(struct btrfs_bio, bio));
+ for (int i = 0; i < nr_folios; i++) {
+ if (folio_array[i])
+ continue;
+ folio_array[i] = folio_alloc(GFP_NOFS, order);
+ if (!folio_array[i])
+ goto error;
+ }
+ return 0;
+error:
+ for (int i = 0; i < nr_folios; i++) {
+ if (folio_array[i])
+ folio_put(folio_array[i]);
+ folio_array[i] = NULL;
+ }
+ return -ENOMEM;
}
/*
- * Allocate a btrfs_io_bio, with @nr_iovecs as maximum number of iovecs.
+ * Populate every free slot in a provided array with pages, using GFP_NOFS.
+ *
+ * @nr_pages: number of pages to allocate
+ * @page_array: the array to fill with pages; any existing non-null entries in
+ * the array will be skipped
+ * @nofail: whether using __GFP_NOFAIL flag
*
- * The bio allocation is backed by bioset and does not fail.
+ * Return: 0 if all pages were able to be allocated;
+ * -ENOMEM otherwise, the partially allocated pages would be freed and
+ * the array slots zeroed
*/
-struct bio *btrfs_bio_alloc(unsigned int nr_iovecs)
-{
- struct bio *bio;
-
- ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS);
- bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
- btrfs_bio_init(btrfs_bio(bio));
- return bio;
-}
-
-struct bio *btrfs_bio_clone(struct bio *bio)
+int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
+ bool nofail)
{
- struct btrfs_bio *bbio;
- struct bio *new;
-
- /* Bio allocation backed by a bioset does not fail */
- new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
- bbio = btrfs_bio(new);
- btrfs_bio_init(bbio);
- bbio->iter = bio->bi_iter;
- return new;
-}
-
-struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
-{
- struct bio *bio;
- struct btrfs_bio *bbio;
+ const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS;
+ unsigned int allocated;
- ASSERT(offset <= UINT_MAX && size <= UINT_MAX);
+ for (allocated = 0; allocated < nr_pages;) {
+ unsigned int last = allocated;
- /* this will never fail when it's backed by a bioset */
- bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
- ASSERT(bio);
-
- bbio = btrfs_bio(bio);
- btrfs_bio_init(bbio);
-
- bio_trim(bio, offset >> 9, size >> 9);
- bbio->iter = bio->bi_iter;
- return bio;
+ allocated = alloc_pages_bulk(gfp, nr_pages, page_array);
+ if (unlikely(allocated == last)) {
+ /* No progress, fail and do cleanup. */
+ for (int i = 0; i < allocated; i++) {
+ __free_page(page_array[i]);
+ page_array[i] = NULL;
+ }
+ return -ENOMEM;
+ }
+ }
+ return 0;
}
-/**
- * Attempt to add a page to bio
- *
- * @bio: destination bio
- * @page: page to add to the bio
- * @disk_bytenr: offset of the new bio or to check whether we are adding
- * a contiguous page to the previous one
- * @pg_offset: starting offset in the page
- * @size: portion of page that we want to write
- * @prev_bio_flags: flags of previous bio to see if we can merge the current one
- * @bio_flags: flags of the current bio to see if we can merge them
- *
- * Attempt to add a page to bio considering stripe alignment etc.
+/*
+ * Populate needed folios for the extent buffer.
*
- * Return >= 0 for the number of bytes added to the bio.
- * Can return 0 if the current bio is already at stripe/zone boundary.
- * Return <0 for error.
+ * For now, the folios populated are always in order 0 (aka, single page).
*/
-static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
- struct page *page,
- u64 disk_bytenr, unsigned int size,
- unsigned int pg_offset,
- unsigned long bio_flags)
+static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
{
- struct bio *bio = bio_ctrl->bio;
- u32 bio_size = bio->bi_iter.bi_size;
- u32 real_size;
- const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
- bool contig;
+ struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
+ int num_pages = num_extent_pages(eb);
int ret;
- ASSERT(bio);
- /* The limit should be calculated when bio_ctrl->bio is allocated */
- ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
- if (bio_ctrl->bio_flags != bio_flags)
- return 0;
-
- if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED)
- contig = bio->bi_iter.bi_sector == sector;
- else
- contig = bio_end_sector(bio) == sector;
- if (!contig)
- return 0;
-
- real_size = min(bio_ctrl->len_to_oe_boundary,
- bio_ctrl->len_to_stripe_boundary) - bio_size;
- real_size = min(real_size, size);
-
- /*
- * If real_size is 0, never call bio_add_*_page(), as even size is 0,
- * bio will still execute its endio function on the page!
- */
- if (real_size == 0)
- return 0;
-
- if (bio_op(bio) == REQ_OP_ZONE_APPEND)
- ret = bio_add_zone_append_page(bio, page, real_size, pg_offset);
- else
- ret = bio_add_page(bio, page, real_size, pg_offset);
+ ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
+ if (ret < 0)
+ return ret;
- return ret;
+ for (int i = 0; i < num_pages; i++)
+ eb->folios[i] = page_folio(page_array[i]);
+ eb->folio_size = PAGE_SIZE;
+ eb->folio_shift = PAGE_SHIFT;
+ return 0;
}
-static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
- struct btrfs_inode *inode, u64 file_offset)
+static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
+ u64 disk_bytenr, loff_t file_offset)
{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_io_geometry geom;
- struct btrfs_ordered_extent *ordered;
- struct extent_map *em;
- u64 logical = (bio_ctrl->bio->bi_iter.bi_sector << SECTOR_SHIFT);
- int ret;
-
- /*
- * Pages for compressed extent are never submitted to disk directly,
- * thus it has no real boundary, just set them to U32_MAX.
- *
- * The split happens for real compressed bio, which happens in
- * btrfs_submit_compressed_read/write().
- */
- if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED) {
- bio_ctrl->len_to_oe_boundary = U32_MAX;
- bio_ctrl->len_to_stripe_boundary = U32_MAX;
- return 0;
- }
- em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
- if (IS_ERR(em))
- return PTR_ERR(em);
- ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio_ctrl->bio),
- logical, &geom);
- free_extent_map(em);
- if (ret < 0) {
- return ret;
- }
- if (geom.len > U32_MAX)
- bio_ctrl->len_to_stripe_boundary = U32_MAX;
- else
- bio_ctrl->len_to_stripe_boundary = (u32)geom.len;
-
- if (!btrfs_is_zoned(fs_info) ||
- bio_op(bio_ctrl->bio) != REQ_OP_ZONE_APPEND) {
- bio_ctrl->len_to_oe_boundary = U32_MAX;
- return 0;
- }
+ struct bio *bio = &bio_ctrl->bbio->bio;
+ const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
- /* Ordered extent not yet created, so we're good */
- ordered = btrfs_lookup_ordered_extent(inode, file_offset);
- if (!ordered) {
- bio_ctrl->len_to_oe_boundary = U32_MAX;
- return 0;
+ if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
+ /*
+ * For compression, all IO should have its logical bytenr set
+ * to the starting bytenr of the compressed extent.
+ */
+ return bio->bi_iter.bi_sector == sector;
}
- bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
- ordered->disk_bytenr + ordered->disk_num_bytes - logical);
- btrfs_put_ordered_extent(ordered);
- return 0;
+ /*
+ * To merge into a bio both the disk sector and the logical offset in
+ * the file need to be contiguous.
+ */
+ return bio_ctrl->next_file_offset == file_offset &&
+ bio_end_sector(bio) == sector;
}
-static int alloc_new_bio(struct btrfs_inode *inode,
- struct btrfs_bio_ctrl *bio_ctrl,
- struct writeback_control *wbc,
- unsigned int opf,
- bio_end_io_t end_io_func,
- u64 disk_bytenr, u32 offset, u64 file_offset,
- unsigned long bio_flags)
+static void alloc_new_bio(struct btrfs_inode *inode,
+ struct btrfs_bio_ctrl *bio_ctrl,
+ u64 disk_bytenr, u64 file_offset)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct bio *bio;
- int ret;
-
- bio = btrfs_bio_alloc(BIO_MAX_VECS);
- /*
- * For compressed page range, its disk_bytenr is always @disk_bytenr
- * passed in, no matter if we have added any range into previous bio.
- */
- if (bio_flags & EXTENT_BIO_COMPRESSED)
- bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
- else
- bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT;
- bio_ctrl->bio = bio;
- bio_ctrl->bio_flags = bio_flags;
- bio->bi_end_io = end_io_func;
- bio->bi_private = &inode->io_tree;
- bio->bi_write_hint = inode->vfs_inode.i_write_hint;
- bio->bi_opf = opf;
- ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
- if (ret < 0)
- goto error;
- if (wbc) {
- struct block_device *bdev;
-
- bdev = fs_info->fs_devices->latest_dev->bdev;
- bio_set_dev(bio, bdev);
- wbc_init_bio(wbc, bio);
- }
- if (btrfs_is_zoned(fs_info) && bio_op(bio) == REQ_OP_ZONE_APPEND) {
- struct btrfs_device *device;
+ struct btrfs_bio *bbio;
- device = btrfs_zoned_get_device(fs_info, disk_bytenr,
- fs_info->sectorsize);
- if (IS_ERR(device)) {
- ret = PTR_ERR(device);
- goto error;
+ bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, inode,
+ file_offset, bio_ctrl->end_io_func, NULL);
+ bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
+ bbio->bio.bi_write_hint = inode->vfs_inode.i_write_hint;
+ bio_ctrl->bbio = bbio;
+ bio_ctrl->len_to_oe_boundary = U32_MAX;
+ bio_ctrl->next_file_offset = file_offset;
+
+ /* Limit data write bios to the ordered boundary. */
+ if (bio_ctrl->wbc) {
+ struct btrfs_ordered_extent *ordered;
+
+ ordered = btrfs_lookup_ordered_extent(inode, file_offset);
+ if (ordered) {
+ bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
+ ordered->file_offset +
+ ordered->disk_num_bytes - file_offset);
+ bbio->ordered = ordered;
}
- btrfs_bio(bio)->device = device;
+ /*
+ * Pick the last added device to support cgroup writeback. For
+ * multi-device file systems this means blk-cgroup policies have
+ * to always be set on the last added/replaced device.
+ * This is a bit odd but has been like that for a long time.
+ */
+ bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
+ wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
}
- return 0;
-error:
- bio_ctrl->bio = NULL;
- bio->bi_status = errno_to_blk_status(ret);
- bio_endio(bio);
- return ret;
}
/*
- * @opf: bio REQ_OP_* and REQ_* flags as one value
- * @wbc: optional writeback control for io accounting
- * @page: page to add to the bio
* @disk_bytenr: logical bytenr where the write will be
+ * @page: page to add to the bio
* @size: portion of page that we want to write to
* @pg_offset: offset of the new bio or to check whether we are adding
* a contiguous page to the previous one
- * @bio_ret: must be valid pointer, newly allocated bio will be stored there
- * @end_io_func: end_io callback for new bio
- * @mirror_num: desired mirror to read/write
- * @prev_bio_flags: flags of previous bio to see if we can merge the current one
- * @bio_flags: flags of the current bio to see if we can merge them
+ * @read_em_generation: generation of the extent_map we are submitting
+ * (only used for read)
+ *
+ * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
+ * new one in @bio_ctrl->bbio.
+ * The mirror number for this IO should already be initialized in
+ * @bio_ctrl->mirror_num.
*/
-static int submit_extent_page(unsigned int opf,
- struct writeback_control *wbc,
- struct btrfs_bio_ctrl *bio_ctrl,
- struct page *page, u64 disk_bytenr,
- size_t size, unsigned long pg_offset,
- bio_end_io_t end_io_func,
- int mirror_num,
- unsigned long bio_flags,
- bool force_bio_submit)
+static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
+ u64 disk_bytenr, struct folio *folio,
+ size_t size, unsigned long pg_offset,
+ u64 read_em_generation)
{
- int ret = 0;
- struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
- unsigned int cur = pg_offset;
+ struct btrfs_inode *inode = folio_to_inode(folio);
+ loff_t file_offset = folio_pos(folio) + pg_offset;
- ASSERT(bio_ctrl);
+ ASSERT(pg_offset + size <= folio_size(folio));
+ ASSERT(bio_ctrl->end_io_func);
- ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
- pg_offset + size <= PAGE_SIZE);
- if (force_bio_submit && bio_ctrl->bio) {
- ret = submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->bio_flags);
- bio_ctrl->bio = NULL;
- if (ret < 0)
- return ret;
- }
+ if (bio_ctrl->bbio &&
+ !btrfs_bio_is_contig(bio_ctrl, disk_bytenr, file_offset))
+ submit_one_bio(bio_ctrl);
- while (cur < pg_offset + size) {
- u32 offset = cur - pg_offset;
- int added;
+ do {
+ u32 len = size;
/* Allocate new bio if needed */
- if (!bio_ctrl->bio) {
- ret = alloc_new_bio(inode, bio_ctrl, wbc, opf,
- end_io_func, disk_bytenr, offset,
- page_offset(page) + cur,
- bio_flags);
- if (ret < 0)
- return ret;
+ if (!bio_ctrl->bbio)
+ alloc_new_bio(inode, bio_ctrl, disk_bytenr, file_offset);
+
+ /* Cap to the current ordered extent boundary if there is one. */
+ if (len > bio_ctrl->len_to_oe_boundary) {
+ ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
+ ASSERT(is_data_inode(inode));
+ len = bio_ctrl->len_to_oe_boundary;
+ }
+
+ if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
+ /* bio full: move on to a new one */
+ submit_one_bio(bio_ctrl);
+ continue;
}
/*
- * We must go through btrfs_bio_add_page() to ensure each
- * page range won't cross various boundaries.
+ * Now that the folio is definitely added to the bio, include its
+ * generation in the max generation calculation.
*/
- if (bio_flags & EXTENT_BIO_COMPRESSED)
- added = btrfs_bio_add_page(bio_ctrl, page, disk_bytenr,
- size - offset, pg_offset + offset,
- bio_flags);
- else
- added = btrfs_bio_add_page(bio_ctrl, page,
- disk_bytenr + offset, size - offset,
- pg_offset + offset, bio_flags);
-
- /* Metadata page range should never be split */
- if (!is_data_inode(&inode->vfs_inode))
- ASSERT(added == 0 || added == size - offset);
-
- /* At least we added some page, update the account */
- if (wbc && added)
- wbc_account_cgroup_owner(wbc, page, added);
-
- /* We have reached boundary, submit right now */
- if (added < size - offset) {
- /* The bio should contain some page(s) */
- ASSERT(bio_ctrl->bio->bi_iter.bi_size);
- ret = submit_one_bio(bio_ctrl->bio, mirror_num,
- bio_ctrl->bio_flags);
- bio_ctrl->bio = NULL;
- if (ret < 0)
- return ret;
- }
- cur += added;
- }
- return 0;
+ bio_ctrl->generation = max(bio_ctrl->generation, read_em_generation);
+ bio_ctrl->next_file_offset += len;
+
+ if (bio_ctrl->wbc)
+ wbc_account_cgroup_owner(bio_ctrl->wbc, folio, len);
+
+ size -= len;
+ pg_offset += len;
+ disk_bytenr += len;
+ file_offset += len;
+
+ /*
+ * len_to_oe_boundary defaults to U32_MAX, which isn't folio or
+ * sector aligned. alloc_new_bio() then sets it to the end of
+ * our ordered extent for writes into zoned devices.
+ *
+ * When len_to_oe_boundary is tracking an ordered extent, we
+ * trust the ordered extent code to align things properly, and
+ * the check above to cap our write to the ordered extent
+ * boundary is correct.
+ *
+ * When len_to_oe_boundary is U32_MAX, the cap above would
+ * result in a 4095 byte IO for the last folio right before
+ * we hit the bio limit of UINT_MAX. bio_add_folio() has all
+ * the checks required to make sure we don't overflow the bio,
+ * and we should just ignore len_to_oe_boundary completely
+ * unless we're using it to track an ordered extent.
+ *
+ * It's pretty hard to make a bio sized U32_MAX, but it can
+ * happen when the page cache is able to feed us contiguous
+ * folios for large extents.
+ */
+ if (bio_ctrl->len_to_oe_boundary != U32_MAX)
+ bio_ctrl->len_to_oe_boundary -= len;
+
+ /* Ordered extent boundary: move on to a new bio. */
+ if (bio_ctrl->len_to_oe_boundary == 0)
+ submit_one_bio(bio_ctrl);
+ } while (size);
}
-static int attach_extent_buffer_page(struct extent_buffer *eb,
- struct page *page,
- struct btrfs_subpage *prealloc)
+static int attach_extent_buffer_folio(struct extent_buffer *eb,
+ struct folio *folio,
+ struct btrfs_folio_state *prealloc)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
int ret = 0;
@@ -3460,93 +875,114 @@ static int attach_extent_buffer_page(struct extent_buffer *eb,
* For cloned or dummy extent buffers, their pages are not mapped and
* will not race with any other ebs.
*/
- if (page->mapping)
- lockdep_assert_held(&page->mapping->private_lock);
+ if (folio->mapping)
+ lockdep_assert_held(&folio->mapping->i_private_lock);
- if (fs_info->sectorsize == PAGE_SIZE) {
- if (!PagePrivate(page))
- attach_page_private(page, eb);
+ if (!btrfs_meta_is_subpage(fs_info)) {
+ if (!folio_test_private(folio))
+ folio_attach_private(folio, eb);
else
- WARN_ON(page->private != (unsigned long)eb);
+ WARN_ON(folio_get_private(folio) != eb);
return 0;
}
/* Already mapped, just free prealloc */
- if (PagePrivate(page)) {
- btrfs_free_subpage(prealloc);
+ if (folio_test_private(folio)) {
+ btrfs_free_folio_state(prealloc);
return 0;
}
if (prealloc)
/* Has preallocated memory for subpage */
- attach_page_private(page, prealloc);
+ folio_attach_private(folio, prealloc);
else
/* Do new allocation to attach subpage */
- ret = btrfs_attach_subpage(fs_info, page,
- BTRFS_SUBPAGE_METADATA);
+ ret = btrfs_attach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA);
return ret;
}
-int set_page_extent_mapped(struct page *page)
+int set_folio_extent_mapped(struct folio *folio)
{
struct btrfs_fs_info *fs_info;
- ASSERT(page->mapping);
+ ASSERT(folio->mapping);
- if (PagePrivate(page))
+ if (folio_test_private(folio))
return 0;
- fs_info = btrfs_sb(page->mapping->host->i_sb);
+ fs_info = folio_to_fs_info(folio);
- if (fs_info->sectorsize < PAGE_SIZE)
- return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
+ if (btrfs_is_subpage(fs_info, folio))
+ return btrfs_attach_folio_state(fs_info, folio, BTRFS_SUBPAGE_DATA);
- attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
+ folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
return 0;
}
-void clear_page_extent_mapped(struct page *page)
+void clear_folio_extent_mapped(struct folio *folio)
{
struct btrfs_fs_info *fs_info;
- ASSERT(page->mapping);
+ ASSERT(folio->mapping);
- if (!PagePrivate(page))
+ if (!folio_test_private(folio))
return;
- fs_info = btrfs_sb(page->mapping->host->i_sb);
- if (fs_info->sectorsize < PAGE_SIZE)
- return btrfs_detach_subpage(fs_info, page);
+ fs_info = folio_to_fs_info(folio);
+ if (btrfs_is_subpage(fs_info, folio))
+ return btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_DATA);
- detach_page_private(page);
+ folio_detach_private(folio);
}
-static struct extent_map *
-__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
- u64 start, u64 len, struct extent_map **em_cached)
+static struct extent_map *get_extent_map(struct btrfs_inode *inode,
+ struct folio *folio, u64 start,
+ u64 len, struct extent_map **em_cached)
{
struct extent_map *em;
- if (em_cached && *em_cached) {
+ ASSERT(em_cached);
+
+ if (*em_cached) {
em = *em_cached;
- if (extent_map_in_tree(em) && start >= em->start &&
- start < extent_map_end(em)) {
+ if (btrfs_extent_map_in_tree(em) && start >= em->start &&
+ start < btrfs_extent_map_end(em)) {
refcount_inc(&em->refs);
return em;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
*em_cached = NULL;
}
- em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
- if (em_cached && !IS_ERR_OR_NULL(em)) {
+ em = btrfs_get_extent(inode, folio, start, len);
+ if (!IS_ERR(em)) {
BUG_ON(*em_cached);
refcount_inc(&em->refs);
*em_cached = em;
}
+
return em;
}
+
+static void btrfs_readahead_expand(struct readahead_control *ractl,
+ const struct extent_map *em)
+{
+ const u64 ra_pos = readahead_pos(ractl);
+ const u64 ra_end = ra_pos + readahead_length(ractl);
+ const u64 em_end = em->start + em->len;
+
+ /* No expansion for holes and inline extents. */
+ if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE)
+ return;
+
+ ASSERT(em_end >= ra_pos,
+ "extent_map %llu %llu ends before current readahead position %llu",
+ em->start, em->len, ra_pos);
+ if (em_end > ra_end)
+ readahead_expand(ractl, ra_pos, em_end - ra_pos);
+}
+
/*
* basic readpage implementation. Locked extent state structs are inserted
* into the tree that are removed when the IO is done (by the end_io
@@ -3554,100 +990,82 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
* XXX JDM: This needs looking at to ensure proper page locking
* return 0 on success, otherwise return error
*/
-int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
- struct btrfs_bio_ctrl *bio_ctrl,
- unsigned int read_flags, u64 *prev_em_start)
+static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
+ struct btrfs_bio_ctrl *bio_ctrl)
{
- struct inode *inode = page->mapping->host;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- u64 start = page_offset(page);
- const u64 end = start + PAGE_SIZE - 1;
- u64 cur = start;
+ struct inode *inode = folio->mapping->host;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ u64 start = folio_pos(folio);
+ const u64 end = start + folio_size(folio) - 1;
u64 extent_offset;
u64 last_byte = i_size_read(inode);
- u64 block_start;
- u64 cur_end;
struct extent_map *em;
int ret = 0;
- int nr = 0;
- size_t pg_offset = 0;
- size_t iosize;
- size_t blocksize = inode->i_sb->s_blocksize;
- struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
+ const size_t blocksize = fs_info->sectorsize;
- ret = set_page_extent_mapped(page);
+ ret = set_folio_extent_mapped(folio);
if (ret < 0) {
- unlock_extent(tree, start, end);
- btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
- unlock_page(page);
- goto out;
- }
-
- if (!PageUptodate(page)) {
- if (cleancache_get_page(page) == 0) {
- BUG_ON(blocksize != PAGE_SIZE);
- unlock_extent(tree, start, end);
- unlock_page(page);
- goto out;
- }
+ folio_unlock(folio);
+ return ret;
}
- if (page->index == last_byte >> PAGE_SHIFT) {
- size_t zero_offset = offset_in_page(last_byte);
+ if (folio_contains(folio, last_byte >> PAGE_SHIFT)) {
+ size_t zero_offset = offset_in_folio(folio, last_byte);
- if (zero_offset) {
- iosize = PAGE_SIZE - zero_offset;
- memzero_page(page, zero_offset, iosize);
- flush_dcache_page(page);
- }
+ if (zero_offset)
+ folio_zero_range(folio, zero_offset,
+ folio_size(folio) - zero_offset);
}
- begin_page_read(fs_info, page);
- while (cur <= end) {
- unsigned long this_bio_flag = 0;
+ bio_ctrl->end_io_func = end_bbio_data_read;
+ begin_folio_read(fs_info, folio);
+ for (u64 cur = start; cur <= end; cur += blocksize) {
+ enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
+ unsigned long pg_offset = offset_in_folio(folio, cur);
bool force_bio_submit = false;
u64 disk_bytenr;
+ u64 block_start;
+ u64 em_gen;
ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
if (cur >= last_byte) {
- struct extent_state *cached = NULL;
-
- iosize = PAGE_SIZE - pg_offset;
- memzero_page(page, pg_offset, iosize);
- flush_dcache_page(page);
- set_extent_uptodate(tree, cur, cur + iosize - 1,
- &cached, GFP_NOFS);
- unlock_extent_cached(tree, cur,
- cur + iosize - 1, &cached);
- end_page_read(page, true, cur, iosize);
+ folio_zero_range(folio, pg_offset, end - cur + 1);
+ end_folio_read(folio, true, cur, end - cur + 1);
break;
}
- em = __get_extent_map(inode, page, pg_offset, cur,
- end - cur + 1, em_cached);
- if (IS_ERR_OR_NULL(em)) {
- unlock_extent(tree, cur, end);
- end_page_read(page, false, cur, end + 1 - cur);
- break;
+ if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
+ end_folio_read(folio, true, cur, blocksize);
+ continue;
+ }
+ em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
+ if (IS_ERR(em)) {
+ end_folio_read(folio, false, cur, end + 1 - cur);
+ return PTR_ERR(em);
}
extent_offset = cur - em->start;
- BUG_ON(extent_map_end(em) <= cur);
+ BUG_ON(btrfs_extent_map_end(em) <= cur);
BUG_ON(end < cur);
- if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
- this_bio_flag |= EXTENT_BIO_COMPRESSED;
- extent_set_compress_type(&this_bio_flag,
- em->compress_type);
- }
+ compress_type = btrfs_extent_map_compression(em);
- iosize = min(extent_map_end(em) - cur, end - cur + 1);
- cur_end = min(extent_map_end(em) - 1, end);
- iosize = ALIGN(iosize, blocksize);
- if (this_bio_flag & EXTENT_BIO_COMPRESSED)
- disk_bytenr = em->block_start;
+ /*
+ * Only expand readahead for extents which are already creating
+ * the pages anyway in add_ra_bio_pages, which is compressed
+ * extents in the non subpage case.
+ */
+ if (bio_ctrl->ractl &&
+ !btrfs_is_subpage(fs_info, folio) &&
+ compress_type != BTRFS_COMPRESS_NONE)
+ btrfs_readahead_expand(bio_ctrl->ractl, em);
+
+ if (compress_type != BTRFS_COMPRESS_NONE)
+ disk_bytenr = em->disk_bytenr;
else
- disk_bytenr = em->block_start + extent_offset;
- block_start = em->block_start;
- if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+ disk_bytenr = btrfs_extent_map_block_start(em) + extent_offset;
+
+ if (em->flags & EXTENT_FLAG_PREALLOC)
block_start = EXTENT_MAP_HOLE;
+ else
+ block_start = btrfs_extent_map_block_start(em);
/*
* If we have a file range that points to a compressed extent
@@ -3655,8 +1073,8 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
* to the same compressed extent (possibly with a different
* offset and/or length, so it either points to the whole extent
* or only part of it), we must make sure we do not submit a
- * single bio to populate the pages for the 2 ranges because
- * this makes the compressed extent read zero out the pages
+ * single bio to populate the folios for the 2 ranges because
+ * this makes the compressed extent read zero out the folios
* belonging to the 2nd range. Imagine the following scenario:
*
* File layout
@@ -3669,13 +1087,13 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
* [extent X, compressed length = 4K uncompressed length = 16K]
*
* If the bio to read the compressed extent covers both ranges,
- * it will decompress extent X into the pages belonging to the
+ * it will decompress extent X into the folios belonging to the
* first range and then it will stop, zeroing out the remaining
- * pages that belong to the other range that points to extent X.
+ * folios that belong to the other range that points to extent X.
* So here we make sure we submit 2 bios, one for the first
* range and another one for the third range. Both will target
* the same physical extent from disk, but we can't currently
- * make the compressed bio endio callback populate the pages
+ * make the compressed bio endio callback populate the folios
* for both ranges because each compressed bio is tightly
* coupled with a single extent map, and each range can have
* an extent map with a different offset value relative to the
@@ -3683,143 +1101,489 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
* is a corner case so we prioritize correctness over
* non-optimal behavior (submitting 2 bios for the same extent).
*/
- if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
- prev_em_start && *prev_em_start != (u64)-1 &&
- *prev_em_start != em->start)
+ if (compress_type != BTRFS_COMPRESS_NONE &&
+ bio_ctrl->last_em_start != U64_MAX &&
+ bio_ctrl->last_em_start != em->start)
force_bio_submit = true;
- if (prev_em_start)
- *prev_em_start = em->start;
+ bio_ctrl->last_em_start = em->start;
- free_extent_map(em);
+ em_gen = em->generation;
+ btrfs_free_extent_map(em);
em = NULL;
/* we've found a hole, just zero and go on */
if (block_start == EXTENT_MAP_HOLE) {
- struct extent_state *cached = NULL;
-
- memzero_page(page, pg_offset, iosize);
- flush_dcache_page(page);
-
- set_extent_uptodate(tree, cur, cur + iosize - 1,
- &cached, GFP_NOFS);
- unlock_extent_cached(tree, cur,
- cur + iosize - 1, &cached);
- end_page_read(page, true, cur, iosize);
- cur = cur + iosize;
- pg_offset += iosize;
- continue;
- }
- /* the get_extent function already copied into the page */
- if (test_range_bit(tree, cur, cur_end,
- EXTENT_UPTODATE, 1, NULL)) {
- unlock_extent(tree, cur, cur + iosize - 1);
- end_page_read(page, true, cur, iosize);
- cur = cur + iosize;
- pg_offset += iosize;
+ folio_zero_range(folio, pg_offset, blocksize);
+ end_folio_read(folio, true, cur, blocksize);
continue;
}
- /* we have an inline extent but it didn't get marked up
- * to date. Error out
- */
+ /* the get_extent function already copied into the folio */
if (block_start == EXTENT_MAP_INLINE) {
- unlock_extent(tree, cur, cur + iosize - 1);
- end_page_read(page, false, cur, iosize);
- cur = cur + iosize;
- pg_offset += iosize;
+ end_folio_read(folio, true, cur, blocksize);
continue;
}
- ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
- bio_ctrl, page, disk_bytenr, iosize,
- pg_offset,
- end_bio_extent_readpage, 0,
- this_bio_flag,
- force_bio_submit);
- if (!ret) {
- nr++;
- } else {
- unlock_extent(tree, cur, cur + iosize - 1);
- end_page_read(page, false, cur, iosize);
- goto out;
+ if (bio_ctrl->compress_type != compress_type) {
+ submit_one_bio(bio_ctrl);
+ bio_ctrl->compress_type = compress_type;
}
- cur = cur + iosize;
- pg_offset += iosize;
+
+ if (force_bio_submit)
+ submit_one_bio(bio_ctrl);
+ submit_extent_folio(bio_ctrl, disk_bytenr, folio, blocksize,
+ pg_offset, em_gen);
+ }
+ return 0;
+}
+
+/*
+ * Check if we can skip waiting the @ordered extent covering the block at @fileoff.
+ *
+ * @fileoff: Both input and output.
+ * Input as the file offset where the check should start at.
+ * Output as where the next check should start at,
+ * if the function returns true.
+ *
+ * Return true if we can skip to @fileoff. The caller needs to check the new
+ * @fileoff value to make sure it covers the full range, before skipping the
+ * full OE.
+ *
+ * Return false if we must wait for the ordered extent.
+ */
+static bool can_skip_one_ordered_range(struct btrfs_inode *inode,
+ struct btrfs_ordered_extent *ordered,
+ u64 *fileoff)
+{
+ const struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct folio *folio;
+ const u32 blocksize = fs_info->sectorsize;
+ u64 cur = *fileoff;
+ bool ret;
+
+ folio = filemap_get_folio(inode->vfs_inode.i_mapping, cur >> PAGE_SHIFT);
+
+ /*
+ * We should have locked the folio(s) for range [start, end], thus
+ * there must be a folio and it must be locked.
+ */
+ ASSERT(!IS_ERR(folio));
+ ASSERT(folio_test_locked(folio));
+
+ /*
+ * There are several cases for the folio and OE combination:
+ *
+ * 1) Folio has no private flag
+ * The OE has all its IO done but not yet finished, and folio got
+ * invalidated.
+ *
+ * Have we have to wait for the OE to finish, as it may contain the
+ * to-be-inserted data checksum.
+ * Without the data checksum inserted into the csum tree, read will
+ * just fail with missing csum.
+ */
+ if (!folio_test_private(folio)) {
+ ret = false;
+ goto out;
+ }
+
+ /*
+ * 2) The first block is DIRTY.
+ *
+ * This means the OE is created by some other folios whose file pos is
+ * before this one. And since we are holding the folio lock, the writeback
+ * of this folio cannot start.
+ *
+ * We must skip the whole OE, because it will never start until we
+ * finished our folio read and unlocked the folio.
+ */
+ if (btrfs_folio_test_dirty(fs_info, folio, cur, blocksize)) {
+ u64 range_len = umin(folio_next_pos(folio),
+ ordered->file_offset + ordered->num_bytes) - cur;
+
+ ret = true;
+ /*
+ * At least inside the folio, all the remaining blocks should
+ * also be dirty.
+ */
+ ASSERT(btrfs_folio_test_dirty(fs_info, folio, cur, range_len));
+ *fileoff = ordered->file_offset + ordered->num_bytes;
+ goto out;
+ }
+
+ /*
+ * 3) The first block is uptodate.
+ *
+ * At least the first block can be skipped, but we are still not fully
+ * sure. E.g. if the OE has some other folios in the range that cannot
+ * be skipped.
+ * So we return true and update @next_ret to the OE/folio boundary.
+ */
+ if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
+ u64 range_len = umin(folio_next_pos(folio),
+ ordered->file_offset + ordered->num_bytes) - cur;
+
+ /*
+ * The whole range to the OE end or folio boundary should also
+ * be uptodate.
+ */
+ ASSERT(btrfs_folio_test_uptodate(fs_info, folio, cur, range_len));
+ ret = true;
+ *fileoff = cur + range_len;
+ goto out;
}
+
+ /*
+ * 4) The first block is not uptodate.
+ *
+ * This means the folio is invalidated after the writeback was finished,
+ * but by some other operations (e.g. block aligned buffered write) the
+ * folio is inserted into filemap.
+ * Very much the same as case 1).
+ */
+ ret = false;
out:
+ folio_put(folio);
return ret;
}
-static inline void contiguous_readpages(struct page *pages[], int nr_pages,
- u64 start, u64 end,
- struct extent_map **em_cached,
- struct btrfs_bio_ctrl *bio_ctrl,
- u64 *prev_em_start)
+static bool can_skip_ordered_extent(struct btrfs_inode *inode,
+ struct btrfs_ordered_extent *ordered,
+ u64 start, u64 end)
{
- struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
- int index;
+ const u64 range_end = min(end, ordered->file_offset + ordered->num_bytes - 1);
+ u64 cur = max(start, ordered->file_offset);
- btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
+ while (cur < range_end) {
+ bool can_skip;
- for (index = 0; index < nr_pages; index++) {
- btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
- REQ_RAHEAD, prev_em_start);
- put_page(pages[index]);
+ can_skip = can_skip_one_ordered_range(inode, ordered, &cur);
+ if (!can_skip)
+ return false;
}
+ return true;
}
-static void update_nr_written(struct writeback_control *wbc,
- unsigned long nr_written)
+/*
+ * Locking helper to make sure we get a stable view of extent maps for the
+ * involved range.
+ *
+ * This is for folio read paths (read and readahead), thus the involved range
+ * should have all the folios locked.
+ */
+static void lock_extents_for_read(struct btrfs_inode *inode, u64 start, u64 end,
+ struct extent_state **cached_state)
{
- wbc->nr_to_write -= nr_written;
+ u64 cur_pos;
+
+ /* Caller must provide a valid @cached_state. */
+ ASSERT(cached_state);
+
+ /* The range must at least be page aligned, as all read paths are folio based. */
+ ASSERT(IS_ALIGNED(start, PAGE_SIZE));
+ ASSERT(IS_ALIGNED(end + 1, PAGE_SIZE));
+
+again:
+ btrfs_lock_extent(&inode->io_tree, start, end, cached_state);
+ cur_pos = start;
+ while (cur_pos < end) {
+ struct btrfs_ordered_extent *ordered;
+
+ ordered = btrfs_lookup_ordered_range(inode, cur_pos,
+ end - cur_pos + 1);
+ /*
+ * No ordered extents in the range, and we hold the extent lock,
+ * no one can modify the extent maps in the range, we're safe to return.
+ */
+ if (!ordered)
+ break;
+
+ /* Check if we can skip waiting for the whole OE. */
+ if (can_skip_ordered_extent(inode, ordered, start, end)) {
+ cur_pos = min(ordered->file_offset + ordered->num_bytes,
+ end + 1);
+ btrfs_put_ordered_extent(ordered);
+ continue;
+ }
+
+ /* Now wait for the OE to finish. */
+ btrfs_unlock_extent(&inode->io_tree, start, end, cached_state);
+ btrfs_start_ordered_extent_nowriteback(ordered, start, end + 1 - start);
+ btrfs_put_ordered_extent(ordered);
+ /* We have unlocked the whole range, restart from the beginning. */
+ goto again;
+ }
+}
+
+int btrfs_read_folio(struct file *file, struct folio *folio)
+{
+ struct btrfs_inode *inode = folio_to_inode(folio);
+ const u64 start = folio_pos(folio);
+ const u64 end = start + folio_size(folio) - 1;
+ struct extent_state *cached_state = NULL;
+ struct btrfs_bio_ctrl bio_ctrl = {
+ .opf = REQ_OP_READ,
+ .last_em_start = U64_MAX,
+ };
+ struct extent_map *em_cached = NULL;
+ int ret;
+
+ lock_extents_for_read(inode, start, end, &cached_state);
+ ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
+
+ btrfs_free_extent_map(em_cached);
+
+ /*
+ * If btrfs_do_readpage() failed we will want to submit the assembled
+ * bio to do the cleanup.
+ */
+ submit_one_bio(&bio_ctrl);
+ return ret;
+}
+
+static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap,
+ u64 start, u32 len)
+{
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
+ const u64 folio_start = folio_pos(folio);
+ unsigned int start_bit;
+ unsigned int nbits;
+
+ ASSERT(start >= folio_start && start + len <= folio_start + folio_size(folio));
+ start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
+ nbits = len >> fs_info->sectorsize_bits;
+ ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits));
+ bitmap_set(delalloc_bitmap, start_bit, nbits);
+}
+
+static bool find_next_delalloc_bitmap(struct folio *folio,
+ unsigned long *delalloc_bitmap, u64 start,
+ u64 *found_start, u32 *found_len)
+{
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
+ const u64 folio_start = folio_pos(folio);
+ const unsigned int bitmap_size = btrfs_blocks_per_folio(fs_info, folio);
+ unsigned int start_bit;
+ unsigned int first_zero;
+ unsigned int first_set;
+
+ ASSERT(start >= folio_start && start < folio_start + folio_size(folio));
+
+ start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
+ first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit);
+ if (first_set >= bitmap_size)
+ return false;
+
+ *found_start = folio_start + (first_set << fs_info->sectorsize_bits);
+ first_zero = find_next_zero_bit(delalloc_bitmap, bitmap_size, first_set);
+ *found_len = (first_zero - first_set) << fs_info->sectorsize_bits;
+ return true;
}
/*
- * helper for __extent_writepage, doing all of the delayed allocation setup.
+ * Do all of the delayed allocation setup.
*
- * This returns 1 if btrfs_run_delalloc_range function did all the work required
- * to write the page (copy into inline extent). In this case the IO has
- * been started and the page is already unlocked.
+ * Return >0 if all the dirty blocks are submitted async (compression) or inlined.
+ * The @folio should no longer be touched (treat it as already unlocked).
*
- * This returns 0 if all went well (page still locked)
- * This returns < 0 if there were errors (page still locked)
+ * Return 0 if there is still dirty block that needs to be submitted through
+ * extent_writepage_io().
+ * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be
+ * submitted, and @folio is still kept locked.
+ *
+ * Return <0 if there is any error hit.
+ * Any allocated ordered extent range covering this folio will be marked
+ * finished (IOERR), and @folio is still kept locked.
*/
static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
- struct page *page, struct writeback_control *wbc,
- unsigned long *nr_written)
-{
- const u64 page_end = page_offset(page) + PAGE_SIZE - 1;
- u64 delalloc_start = page_offset(page);
+ struct folio *folio,
+ struct btrfs_bio_ctrl *bio_ctrl)
+{
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
+ struct writeback_control *wbc = bio_ctrl->wbc;
+ const bool is_subpage = btrfs_is_subpage(fs_info, folio);
+ const u64 page_start = folio_pos(folio);
+ const u64 page_end = page_start + folio_size(folio) - 1;
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
+ unsigned long delalloc_bitmap = 0;
+ /*
+ * Save the last found delalloc end. As the delalloc end can go beyond
+ * page boundary, thus we cannot rely on subpage bitmap to locate the
+ * last delalloc end.
+ */
+ u64 last_delalloc_end = 0;
+ /*
+ * The range end (exclusive) of the last successfully finished delalloc
+ * range.
+ * Any range covered by ordered extent must either be manually marked
+ * finished (error handling), or has IO submitted (and finish the
+ * ordered extent normally).
+ *
+ * This records the end of ordered extent cleanup if we hit an error.
+ */
+ u64 last_finished_delalloc_end = page_start;
+ u64 delalloc_start = page_start;
+ u64 delalloc_end = page_end;
u64 delalloc_to_write = 0;
- int ret;
- int page_started = 0;
+ int ret = 0;
+ int bit;
- while (delalloc_start < page_end) {
- u64 delalloc_end = page_end;
- bool found;
+ /* Save the dirty bitmap as our submission bitmap will be a subset of it. */
+ if (btrfs_is_subpage(fs_info, folio)) {
+ ASSERT(blocks_per_folio > 1);
+ btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
+ } else {
+ bio_ctrl->submit_bitmap = 1;
+ }
+
+ for_each_set_bit(bit, &bio_ctrl->submit_bitmap, blocks_per_folio) {
+ u64 start = page_start + (bit << fs_info->sectorsize_bits);
+
+ btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
+ }
- found = find_lock_delalloc_range(&inode->vfs_inode, page,
- &delalloc_start,
- &delalloc_end);
- if (!found) {
+ /* Lock all (subpage) delalloc ranges inside the folio first. */
+ while (delalloc_start < page_end) {
+ delalloc_end = page_end;
+ if (!find_lock_delalloc_range(&inode->vfs_inode, folio,
+ &delalloc_start, &delalloc_end)) {
delalloc_start = delalloc_end + 1;
continue;
}
- ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
- delalloc_end, &page_started, nr_written, wbc);
- if (ret) {
- btrfs_page_set_error(inode->root->fs_info, page,
- page_offset(page), PAGE_SIZE);
- return ret;
+ set_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start,
+ min(delalloc_end, page_end) + 1 - delalloc_start);
+ last_delalloc_end = delalloc_end;
+ delalloc_start = delalloc_end + 1;
+ }
+ delalloc_start = page_start;
+
+ if (!last_delalloc_end)
+ goto out;
+
+ /* Run the delalloc ranges for the above locked ranges. */
+ while (delalloc_start < page_end) {
+ u64 found_start;
+ u32 found_len;
+ bool found;
+
+ if (!is_subpage) {
+ /*
+ * For non-subpage case, the found delalloc range must
+ * cover this folio and there must be only one locked
+ * delalloc range.
+ */
+ found_start = page_start;
+ found_len = last_delalloc_end + 1 - found_start;
+ found = true;
+ } else {
+ found = find_next_delalloc_bitmap(folio, &delalloc_bitmap,
+ delalloc_start, &found_start, &found_len);
}
+ if (!found)
+ break;
/*
- * delalloc_end is already one less than the total length, so
- * we don't subtract one from PAGE_SIZE
+ * The subpage range covers the last sector, the delalloc range may
+ * end beyond the folio boundary, use the saved delalloc_end
+ * instead.
*/
- delalloc_to_write += (delalloc_end - delalloc_start +
- PAGE_SIZE) >> PAGE_SHIFT;
- delalloc_start = delalloc_end + 1;
+ if (found_start + found_len >= page_end)
+ found_len = last_delalloc_end + 1 - found_start;
+
+ if (ret >= 0) {
+ /*
+ * Some delalloc range may be created by previous folios.
+ * Thus we still need to clean up this range during error
+ * handling.
+ */
+ last_finished_delalloc_end = found_start;
+ /* No errors hit so far, run the current delalloc range. */
+ ret = btrfs_run_delalloc_range(inode, folio,
+ found_start,
+ found_start + found_len - 1,
+ wbc);
+ if (ret >= 0)
+ last_finished_delalloc_end = found_start + found_len;
+ if (unlikely(ret < 0))
+ btrfs_err_rl(fs_info,
+"failed to run delalloc range, root=%lld ino=%llu folio=%llu submit_bitmap=%*pbl start=%llu len=%u: %d",
+ btrfs_root_id(inode->root),
+ btrfs_ino(inode),
+ folio_pos(folio),
+ blocks_per_folio,
+ &bio_ctrl->submit_bitmap,
+ found_start, found_len, ret);
+ } else {
+ /*
+ * We've hit an error during previous delalloc range,
+ * have to cleanup the remaining locked ranges.
+ */
+ btrfs_unlock_extent(&inode->io_tree, found_start,
+ found_start + found_len - 1, NULL);
+ unlock_delalloc_folio(&inode->vfs_inode, folio,
+ found_start,
+ found_start + found_len - 1);
+ }
+
+ /*
+ * We have some ranges that's going to be submitted asynchronously
+ * (compression or inline). These range have their own control
+ * on when to unlock the pages. We should not touch them
+ * anymore, so clear the range from the submission bitmap.
+ */
+ if (ret > 0) {
+ unsigned int start_bit = (found_start - page_start) >>
+ fs_info->sectorsize_bits;
+ unsigned int end_bit = (min(page_end + 1, found_start + found_len) -
+ page_start) >> fs_info->sectorsize_bits;
+ bitmap_clear(&bio_ctrl->submit_bitmap, start_bit, end_bit - start_bit);
+ }
+ /*
+ * Above btrfs_run_delalloc_range() may have unlocked the folio,
+ * thus for the last range, we cannot touch the folio anymore.
+ */
+ if (found_start + found_len >= last_delalloc_end + 1)
+ break;
+
+ delalloc_start = found_start + found_len;
+ }
+ /*
+ * It's possible we had some ordered extents created before we hit
+ * an error, cleanup non-async successfully created delalloc ranges.
+ */
+ if (unlikely(ret < 0)) {
+ unsigned int bitmap_size = min(
+ (last_finished_delalloc_end - page_start) >>
+ fs_info->sectorsize_bits,
+ blocks_per_folio);
+
+ for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size)
+ btrfs_mark_ordered_io_finished(inode, folio,
+ page_start + (bit << fs_info->sectorsize_bits),
+ fs_info->sectorsize, false);
+ return ret;
+ }
+out:
+ if (last_delalloc_end)
+ delalloc_end = last_delalloc_end;
+ else
+ delalloc_end = page_end;
+ /*
+ * delalloc_end is already one less than the total length, so
+ * we don't subtract one from PAGE_SIZE.
+ */
+ delalloc_to_write +=
+ DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
+
+ /*
+ * If all ranges are submitted asynchronously, we just need to account
+ * for them here.
+ */
+ if (bitmap_empty(&bio_ctrl->submit_bitmap, blocks_per_folio)) {
+ wbc->nr_to_write -= delalloc_to_write;
+ return 1;
}
+
if (wbc->nr_to_write < delalloc_to_write) {
int thresh = 8192;
@@ -3829,230 +1593,205 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
thresh);
}
- /* did the fill delalloc function already unlock and start
- * the IO?
- */
- if (page_started) {
- /*
- * we've unlocked the page, so we can't update
- * the mapping's writeback index, just update
- * nr_to_write.
- */
- wbc->nr_to_write -= *nr_written;
- return 1;
- }
-
return 0;
}
/*
- * Find the first byte we need to write.
+ * Return 0 if we have submitted or queued the sector for submission.
+ * Return <0 for critical errors, and the sector will have its dirty flag cleared.
*
- * For subpage, one page can contain several sectors, and
- * __extent_writepage_io() will just grab all extent maps in the page
- * range and try to submit all non-inline/non-compressed extents.
- *
- * This is a big problem for subpage, we shouldn't re-submit already written
- * data at all.
- * This function will lookup subpage dirty bit to find which range we really
- * need to submit.
- *
- * Return the next dirty range in [@start, @end).
- * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
+ * Caller should make sure filepos < i_size and handle filepos >= i_size case.
*/
-static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
- struct page *page, u64 *start, u64 *end)
+static int submit_one_sector(struct btrfs_inode *inode,
+ struct folio *folio,
+ u64 filepos, struct btrfs_bio_ctrl *bio_ctrl,
+ loff_t i_size)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- struct btrfs_subpage_info *spi = fs_info->subpage_info;
- u64 orig_start = *start;
- /* Declare as unsigned long so we can use bitmap ops */
- unsigned long flags;
- int range_start_bit;
- int range_end_bit;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_map *em;
+ u64 block_start;
+ u64 disk_bytenr;
+ u64 extent_offset;
+ u64 em_end;
+ const u32 sectorsize = fs_info->sectorsize;
- /*
- * For regular sector size == page size case, since one page only
- * contains one sector, we return the page offset directly.
- */
- if (fs_info->sectorsize == PAGE_SIZE) {
- *start = page_offset(page);
- *end = page_offset(page) + PAGE_SIZE;
- return;
+ ASSERT(IS_ALIGNED(filepos, sectorsize));
+
+ /* @filepos >= i_size case should be handled by the caller. */
+ ASSERT(filepos < i_size);
+
+ em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
+ if (IS_ERR(em)) {
+ /*
+ * When submission failed, we should still clear the folio dirty.
+ * Or the folio will be written back again but without any
+ * ordered extent.
+ */
+ btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
+ btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
+ btrfs_folio_clear_writeback(fs_info, folio, filepos, sectorsize);
+ return PTR_ERR(em);
}
- range_start_bit = spi->dirty_offset +
- (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
+ extent_offset = filepos - em->start;
+ em_end = btrfs_extent_map_end(em);
+ ASSERT(filepos <= em_end);
+ ASSERT(IS_ALIGNED(em->start, sectorsize));
+ ASSERT(IS_ALIGNED(em->len, sectorsize));
- /* We should have the page locked, but just in case */
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
- spi->dirty_offset + spi->bitmap_nr_bits);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ block_start = btrfs_extent_map_block_start(em);
+ disk_bytenr = btrfs_extent_map_block_start(em) + extent_offset;
- range_start_bit -= spi->dirty_offset;
- range_end_bit -= spi->dirty_offset;
+ ASSERT(!btrfs_extent_map_is_compressed(em));
+ ASSERT(block_start != EXTENT_MAP_HOLE);
+ ASSERT(block_start != EXTENT_MAP_INLINE);
+
+ btrfs_free_extent_map(em);
+ em = NULL;
+
+ /*
+ * Although the PageDirty bit is cleared before entering this
+ * function, subpage dirty bit is not cleared.
+ * So clear subpage dirty bit here so next time we won't submit
+ * a folio for a range already written to disk.
+ */
+ btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
+ btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
+ /*
+ * Above call should set the whole folio with writeback flag, even
+ * just for a single subpage sector.
+ * As long as the folio is properly locked and the range is correct,
+ * we should always get the folio with writeback flag.
+ */
+ ASSERT(folio_test_writeback(folio));
- *start = page_offset(page) + range_start_bit * fs_info->sectorsize;
- *end = page_offset(page) + range_end_bit * fs_info->sectorsize;
+ submit_extent_folio(bio_ctrl, disk_bytenr, folio,
+ sectorsize, filepos - folio_pos(folio), 0);
+ return 0;
}
/*
- * helper for __extent_writepage. This calls the writepage start hooks,
+ * Helper for extent_writepage(). This calls the writepage start hooks,
* and does the loop to map the page into extents and bios.
*
* We return 1 if the IO is started and the page is unlocked,
* 0 if all went well (page still locked)
* < 0 if there were errors (page still locked)
*/
-static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
- struct page *page,
- struct writeback_control *wbc,
- struct extent_page_data *epd,
- loff_t i_size,
- unsigned long nr_written,
- int *nr_ret)
+static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+ struct folio *folio,
+ u64 start, u32 len,
+ struct btrfs_bio_ctrl *bio_ctrl,
+ loff_t i_size)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- u64 cur = page_offset(page);
- u64 end = cur + PAGE_SIZE - 1;
- u64 extent_offset;
- u64 block_start;
- struct extent_map *em;
+ unsigned long range_bitmap = 0;
+ bool submitted_io = false;
+ int found_error = 0;
+ const u64 end = start + len;
+ const u64 folio_start = folio_pos(folio);
+ const u64 folio_end = folio_start + folio_size(folio);
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
+ u64 cur;
+ int bit;
int ret = 0;
- int nr = 0;
- u32 opf = REQ_OP_WRITE;
- const unsigned int write_flags = wbc_to_write_flags(wbc);
- bool compressed;
- ret = btrfs_writepage_cow_fixup(page);
- if (ret) {
+ ASSERT(start >= folio_start, "start=%llu folio_start=%llu", start, folio_start);
+ ASSERT(end <= folio_end, "start=%llu len=%u folio_start=%llu folio_size=%zu",
+ start, len, folio_start, folio_size(folio));
+
+ ret = btrfs_writepage_cow_fixup(folio);
+ if (ret == -EAGAIN) {
/* Fixup worker will requeue */
- redirty_page_for_writepage(wbc, page);
- update_nr_written(wbc, nr_written);
- unlock_page(page);
+ folio_redirty_for_writepage(bio_ctrl->wbc, folio);
+ folio_unlock(folio);
return 1;
}
+ if (ret < 0) {
+ btrfs_folio_clear_dirty(fs_info, folio, start, len);
+ btrfs_folio_set_writeback(fs_info, folio, start, len);
+ btrfs_folio_clear_writeback(fs_info, folio, start, len);
+ return ret;
+ }
- /*
- * we don't want to touch the inode after unlocking the page,
- * so we update the mapping writeback index now
- */
- update_nr_written(wbc, nr_written + 1);
+ for (cur = start; cur < end; cur += fs_info->sectorsize)
+ set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
+ bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
+ blocks_per_folio);
- while (cur <= end) {
- u64 disk_bytenr;
- u64 em_end;
- u64 dirty_range_start = cur;
- u64 dirty_range_end;
- u32 iosize;
+ bio_ctrl->end_io_func = end_bbio_data_write;
+
+ for_each_set_bit(bit, &bio_ctrl->submit_bitmap, blocks_per_folio) {
+ cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
if (cur >= i_size) {
- btrfs_writepage_endio_finish_ordered(inode, page, cur,
- end, true);
+ struct btrfs_ordered_extent *ordered;
+
+ ordered = btrfs_lookup_first_ordered_range(inode, cur,
+ folio_end - cur);
+ /*
+ * We have just run delalloc before getting here, so
+ * there must be an ordered extent.
+ */
+ ASSERT(ordered != NULL);
+ spin_lock(&inode->ordered_tree_lock);
+ set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
+ ordered->truncated_len = min(ordered->truncated_len,
+ cur - ordered->file_offset);
+ spin_unlock(&inode->ordered_tree_lock);
+ btrfs_put_ordered_extent(ordered);
+
+ btrfs_mark_ordered_io_finished(inode, folio, cur,
+ end - cur, true);
/*
* This range is beyond i_size, thus we don't need to
* bother writing back.
* But we still need to clear the dirty subpage bit, or
- * the next time the page gets dirtied, we will try to
+ * the next time the folio gets dirtied, we will try to
* writeback the sectors with subpage dirty bits,
* causing writeback without ordered extent.
*/
- btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
+ btrfs_folio_clear_dirty(fs_info, folio, cur, end - cur);
break;
}
-
- find_next_dirty_byte(fs_info, page, &dirty_range_start,
- &dirty_range_end);
- if (cur < dirty_range_start) {
- cur = dirty_range_start;
- continue;
- }
-
- em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
- if (IS_ERR_OR_NULL(em)) {
- btrfs_page_set_error(fs_info, page, cur, end - cur + 1);
- ret = PTR_ERR_OR_ZERO(em);
- break;
- }
-
- extent_offset = cur - em->start;
- em_end = extent_map_end(em);
- ASSERT(cur <= em_end);
- ASSERT(cur < end);
- ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
- ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
- block_start = em->block_start;
- compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
- disk_bytenr = em->block_start + extent_offset;
-
- /*
- * Note that em_end from extent_map_end() and dirty_range_end from
- * find_next_dirty_byte() are all exclusive
- */
- iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
-
- if (btrfs_use_zone_append(inode, em->block_start))
- opf = REQ_OP_ZONE_APPEND;
-
- free_extent_map(em);
- em = NULL;
-
- /*
- * compressed and inline extents are written through other
- * paths in the FS
- */
- if (compressed || block_start == EXTENT_MAP_HOLE ||
- block_start == EXTENT_MAP_INLINE) {
- if (compressed)
- nr++;
- else
- btrfs_writepage_endio_finish_ordered(inode,
- page, cur, cur + iosize - 1, true);
- btrfs_page_clear_dirty(fs_info, page, cur, iosize);
- cur += iosize;
+ ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
+ if (unlikely(ret < 0)) {
+ /*
+ * bio_ctrl may contain a bio crossing several folios.
+ * Submit it immediately so that the bio has a chance
+ * to finish normally, other than marked as error.
+ */
+ submit_one_bio(bio_ctrl);
+ /*
+ * Failed to grab the extent map which should be very rare.
+ * Since there is no bio submitted to finish the ordered
+ * extent, we have to manually finish this sector.
+ */
+ btrfs_mark_ordered_io_finished(inode, folio, cur,
+ fs_info->sectorsize, false);
+ if (!found_error)
+ found_error = ret;
continue;
}
-
- btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
- if (!PageWriteback(page)) {
- btrfs_err(inode->root->fs_info,
- "page %lu not writeback, cur %llu end %llu",
- page->index, cur, end);
- }
-
- /*
- * Although the PageDirty bit is cleared before entering this
- * function, subpage dirty bit is not cleared.
- * So clear subpage dirty bit here so next time we won't submit
- * page for range already written to disk.
- */
- btrfs_page_clear_dirty(fs_info, page, cur, iosize);
-
- ret = submit_extent_page(opf | write_flags, wbc,
- &epd->bio_ctrl, page,
- disk_bytenr, iosize,
- cur - page_offset(page),
- end_bio_extent_writepage,
- 0, 0, false);
- if (ret) {
- btrfs_page_set_error(fs_info, page, cur, iosize);
- if (PageWriteback(page))
- btrfs_page_clear_writeback(fs_info, page, cur,
- iosize);
- }
-
- cur += iosize;
- nr++;
+ submitted_io = true;
}
+
/*
- * If we finish without problem, we should not only clear page dirty,
- * but also empty subpage dirty bits
+ * If we didn't submitted any sector (>= i_size), folio dirty get
+ * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
+ * by folio_start_writeback() if the folio is not dirty).
+ *
+ * Here we set writeback and clear for the range. If the full folio
+ * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
+ *
+ * If we hit any error, the corresponding sector will have its dirty
+ * flag cleared and writeback finished, thus no need to handle the error case.
*/
- if (!ret)
- btrfs_page_assert_not_dirty(fs_info, page);
- *nr_ret = nr;
- return ret;
+ if (!submitted_io && !found_error) {
+ btrfs_folio_set_writeback(fs_info, folio, start, len);
+ btrfs_folio_clear_writeback(fs_info, folio, start, len);
+ }
+ return found_error;
}
/*
@@ -4064,174 +1803,115 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
* Return 0 if everything goes well.
* Return <0 for error.
*/
-static int __extent_writepage(struct page *page, struct writeback_control *wbc,
- struct extent_page_data *epd)
+static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
{
- struct inode *inode = page->mapping->host;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- const u64 page_start = page_offset(page);
- const u64 page_end = page_start + PAGE_SIZE - 1;
+ struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret;
- int nr = 0;
size_t pg_offset;
- loff_t i_size = i_size_read(inode);
- unsigned long end_index = i_size >> PAGE_SHIFT;
- unsigned long nr_written = 0;
-
- trace___extent_writepage(page, inode, wbc);
+ loff_t i_size = i_size_read(&inode->vfs_inode);
+ const pgoff_t end_index = i_size >> PAGE_SHIFT;
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
- WARN_ON(!PageLocked(page));
+ trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);
- btrfs_page_clear_error(btrfs_sb(inode->i_sb), page,
- page_offset(page), PAGE_SIZE);
+ WARN_ON(!folio_test_locked(folio));
- pg_offset = offset_in_page(i_size);
- if (page->index > end_index ||
- (page->index == end_index && !pg_offset)) {
- page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
- unlock_page(page);
+ pg_offset = offset_in_folio(folio, i_size);
+ if (folio->index > end_index ||
+ (folio->index == end_index && !pg_offset)) {
+ folio_invalidate(folio, 0, folio_size(folio));
+ folio_unlock(folio);
return 0;
}
- if (page->index == end_index) {
- memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
- flush_dcache_page(page);
- }
+ if (folio_contains(folio, end_index))
+ folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset);
- ret = set_page_extent_mapped(page);
- if (ret < 0) {
- SetPageError(page);
+ /*
+ * Default to unlock the whole folio.
+ * The proper bitmap can only be initialized until writepage_delalloc().
+ */
+ bio_ctrl->submit_bitmap = (unsigned long)-1;
+
+ /*
+ * If the page is dirty but without private set, it's marked dirty
+ * without informing the fs.
+ * Nowadays that is a bug, since the introduction of
+ * pin_user_pages*().
+ *
+ * So here we check if the page has private set to rule out such
+ * case.
+ * But we also have a long history of relying on the COW fixup,
+ * so here we only enable this check for experimental builds until
+ * we're sure it's safe.
+ */
+ if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL) &&
+ unlikely(!folio_test_private(folio))) {
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ btrfs_err_rl(fs_info,
+ "root %lld ino %llu folio %llu is marked dirty without notifying the fs",
+ btrfs_root_id(inode->root),
+ btrfs_ino(inode), folio_pos(folio));
+ ret = -EUCLEAN;
goto done;
}
- if (!epd->extent_locked) {
- ret = writepage_delalloc(BTRFS_I(inode), page, wbc, &nr_written);
- if (ret == 1)
- return 0;
- if (ret)
- goto done;
- }
+ ret = set_folio_extent_mapped(folio);
+ if (ret < 0)
+ goto done;
- ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
- nr_written, &nr);
+ ret = writepage_delalloc(inode, folio, bio_ctrl);
if (ret == 1)
return 0;
+ if (ret)
+ goto done;
+
+ ret = extent_writepage_io(inode, folio, folio_pos(folio),
+ folio_size(folio), bio_ctrl, i_size);
+ if (ret == 1)
+ return 0;
+ if (unlikely(ret < 0))
+ btrfs_err_rl(fs_info,
+"failed to submit blocks, root=%lld inode=%llu folio=%llu submit_bitmap=%*pbl: %d",
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ folio_pos(folio), blocks_per_folio,
+ &bio_ctrl->submit_bitmap, ret);
+
+ bio_ctrl->wbc->nr_to_write--;
done:
- if (nr == 0) {
- /* make sure the mapping tag for page dirty gets cleared */
- set_page_writeback(page);
- end_page_writeback(page);
- }
+ if (ret < 0)
+ mapping_set_error(folio->mapping, ret);
/*
- * Here we used to have a check for PageError() and then set @ret and
- * call end_extent_writepage().
- *
- * But in fact setting @ret here will cause different error paths
- * between subpage and regular sectorsize.
- *
- * For regular page size, we never submit current page, but only add
- * current page to current bio.
- * The bio submission can only happen in next page.
- * Thus if we hit the PageError() branch, @ret is already set to
- * non-zero value and will not get updated for regular sectorsize.
- *
- * But for subpage case, it's possible we submit part of current page,
- * thus can get PageError() set by submitted bio of the same page,
- * while our @ret is still 0.
- *
- * So here we unify the behavior and don't set @ret.
- * Error can still be properly passed to higher layer as page will
- * be set error, here we just don't handle the IO failure.
- *
- * NOTE: This is just a hotfix for subpage.
- * The root fix will be properly ending ordered extent when we hit
- * an error during writeback.
- *
- * But that needs a bigger refactoring, as we not only need to grab the
- * submitted OE, but also need to know exactly at which bytenr we hit
- * the error.
- * Currently the full page based __extent_writepage_io() is not
- * capable of that.
+ * Only unlock ranges that are submitted. As there can be some async
+ * submitted ranges inside the folio.
*/
- if (PageError(page))
- end_extent_writepage(page, ret, page_start, page_end);
- if (epd->extent_locked) {
- /*
- * If epd->extent_locked, it's from extent_write_locked_range(),
- * the page can either be locked by lock_page() or
- * process_one_page().
- * Let btrfs_page_unlock_writer() handle both cases.
- */
- ASSERT(wbc);
- btrfs_page_unlock_writer(fs_info, page, wbc->range_start,
- wbc->range_end + 1 - wbc->range_start);
- } else {
- unlock_page(page);
- }
+ btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
ASSERT(ret <= 0);
return ret;
}
-void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
-{
- wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
- TASK_UNINTERRUPTIBLE);
-}
-
-static void end_extent_buffer_writeback(struct extent_buffer *eb)
-{
- if (test_bit(EXTENT_BUFFER_ZONE_FINISH, &eb->bflags))
- btrfs_zone_finish_endio(eb->fs_info, eb->start, eb->len);
-
- clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
- smp_mb__after_atomic();
- wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
-}
-
/*
* Lock extent buffer status and pages for writeback.
*
- * May try to flush write bio if we can't get the lock.
- *
- * Return 0 if the extent buffer doesn't need to be submitted.
- * (E.g. the extent buffer is not dirty)
- * Return >0 is the extent buffer is submitted to bio.
- * Return <0 if something went wrong, no page is locked.
+ * Return %false if the extent buffer doesn't need to be submitted (e.g. the
+ * extent buffer is not dirty)
+ * Return %true is the extent buffer is submitted to bio.
*/
-static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
- struct extent_page_data *epd)
+static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
+ struct writeback_control *wbc)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
- int i, num_pages, failed_page_nr;
- int flush = 0;
- int ret = 0;
-
- if (!btrfs_try_tree_write_lock(eb)) {
- ret = flush_write_bio(epd);
- if (ret < 0)
- return ret;
- flush = 1;
- btrfs_tree_lock(eb);
- }
+ bool ret = false;
- if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
+ btrfs_tree_lock(eb);
+ while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
btrfs_tree_unlock(eb);
- if (!epd->sync_io)
- return 0;
- if (!flush) {
- ret = flush_write_bio(epd);
- if (ret < 0)
- return ret;
- flush = 1;
- }
- while (1) {
- wait_on_extent_buffer_writeback(eb);
- btrfs_tree_lock(eb);
- if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
- break;
- btrfs_tree_unlock(eb);
- }
+ if (wbc->sync_mode != WB_SYNC_ALL)
+ return false;
+ wait_on_extent_buffer_writeback(eb);
+ btrfs_tree_lock(eb);
}
/*
@@ -4241,84 +1921,50 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
*/
spin_lock(&eb->refs_lock);
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
+ XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits);
+ unsigned long flags;
+
set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
spin_unlock(&eb->refs_lock);
+
+ xas_lock_irqsave(&xas, flags);
+ xas_load(&xas);
+ xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
+ xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
+ xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
+ xas_unlock_irqrestore(&xas, flags);
+
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
-eb->len,
fs_info->dirty_metadata_batch);
- ret = 1;
+ ret = true;
} else {
spin_unlock(&eb->refs_lock);
}
-
- btrfs_tree_unlock(eb);
-
- /*
- * Either we don't need to submit any tree block, or we're submitting
- * subpage eb.
- * Subpage metadata doesn't use page locking at all, so we can skip
- * the page locking.
- */
- if (!ret || fs_info->sectorsize < PAGE_SIZE)
- return ret;
-
- num_pages = num_extent_pages(eb);
- for (i = 0; i < num_pages; i++) {
- struct page *p = eb->pages[i];
-
- if (!trylock_page(p)) {
- if (!flush) {
- int err;
-
- err = flush_write_bio(epd);
- if (err < 0) {
- ret = err;
- failed_page_nr = i;
- goto err_unlock;
- }
- flush = 1;
- }
- lock_page(p);
- }
- }
-
- return ret;
-err_unlock:
- /* Unlock already locked pages */
- for (i = 0; i < failed_page_nr; i++)
- unlock_page(eb->pages[i]);
- /*
- * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
- * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
- * be made and undo everything done before.
- */
- btrfs_tree_lock(eb);
- spin_lock(&eb->refs_lock);
- set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
- end_extent_buffer_writeback(eb);
- spin_unlock(&eb->refs_lock);
- percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
- fs_info->dirty_metadata_batch);
- btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
btrfs_tree_unlock(eb);
return ret;
}
-static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
+static void set_btree_ioerr(struct extent_buffer *eb)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
- btrfs_page_set_error(fs_info, page, eb->start, eb->len);
- if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
- return;
+ set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
+
+ /*
+ * A read may stumble upon this buffer later, make sure that it gets an
+ * error and knows there was an error.
+ */
+ clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
/*
- * If we error out, we should add back the dirty_metadata_bytes
- * to make it consistent.
+ * We need to set the mapping with the io error as well because a write
+ * error will flip the file system readonly, and then syncfs() will
+ * return a 0 because we are readonly if we don't modify the err seq for
+ * the superblock.
*/
- percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
- eb->len, fs_info->dirty_metadata_batch);
+ mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
/*
* If writeback for a btree extent that doesn't belong to a log tree
@@ -4332,7 +1978,7 @@ static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
* can be no longer dirty nor marked anymore for writeback (if a
* subsequent modification to the extent buffer didn't happen before the
* transaction commit), which makes filemap_fdata[write|wait]_range not
- * able to find the pages tagged with SetPageError at transaction
+ * able to find the pages which contain errors at transaction
* commit time. So if this happens we must abort the transaction,
* otherwise we commit a super block with btree roots that point to
* btree nodes/leafs whose content on disk is invalid - either garbage
@@ -4373,119 +2019,168 @@ static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
}
}
-/*
- * The endio specific version which won't touch any unsafe spinlock in endio
- * context.
- */
-static struct extent_buffer *find_extent_buffer_nolock(
- struct btrfs_fs_info *fs_info, u64 start)
+static void buffer_tree_set_mark(const struct extent_buffer *eb, xa_mark_t mark)
{
- struct extent_buffer *eb;
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+ XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits);
+ unsigned long flags;
- rcu_read_lock();
- eb = radix_tree_lookup(&fs_info->buffer_radix,
- start >> fs_info->sectorsize_bits);
- if (eb && atomic_inc_not_zero(&eb->refs)) {
- rcu_read_unlock();
- return eb;
+ xas_lock_irqsave(&xas, flags);
+ xas_load(&xas);
+ xas_set_mark(&xas, mark);
+ xas_unlock_irqrestore(&xas, flags);
+}
+
+static void buffer_tree_clear_mark(const struct extent_buffer *eb, xa_mark_t mark)
+{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+ XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits);
+ unsigned long flags;
+
+ xas_lock_irqsave(&xas, flags);
+ xas_load(&xas);
+ xas_clear_mark(&xas, mark);
+ xas_unlock_irqrestore(&xas, flags);
+}
+
+static void buffer_tree_tag_for_writeback(struct btrfs_fs_info *fs_info,
+ unsigned long start, unsigned long end)
+{
+ XA_STATE(xas, &fs_info->buffer_tree, start);
+ unsigned int tagged = 0;
+ void *eb;
+
+ xas_lock_irq(&xas);
+ xas_for_each_marked(&xas, eb, end, PAGECACHE_TAG_DIRTY) {
+ xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
+ if (++tagged % XA_CHECK_SCHED)
+ continue;
+ xas_pause(&xas);
+ xas_unlock_irq(&xas);
+ cond_resched();
+ xas_lock_irq(&xas);
}
- rcu_read_unlock();
- return NULL;
+ xas_unlock_irq(&xas);
}
-/*
- * The endio function for subpage extent buffer write.
- *
- * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
- * after all extent buffers in the page has finished their writeback.
- */
-static void end_bio_subpage_eb_writepage(struct bio *bio)
+struct eb_batch {
+ unsigned int nr;
+ unsigned int cur;
+ struct extent_buffer *ebs[PAGEVEC_SIZE];
+};
+
+static inline bool eb_batch_add(struct eb_batch *batch, struct extent_buffer *eb)
{
- struct btrfs_fs_info *fs_info;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ batch->ebs[batch->nr++] = eb;
+ return (batch->nr < PAGEVEC_SIZE);
+}
- fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
- ASSERT(fs_info->sectorsize < PAGE_SIZE);
+static inline void eb_batch_init(struct eb_batch *batch)
+{
+ batch->nr = 0;
+ batch->cur = 0;
+}
- ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
- u64 bvec_start = page_offset(page) + bvec->bv_offset;
- u64 bvec_end = bvec_start + bvec->bv_len - 1;
- u64 cur_bytenr = bvec_start;
+static inline struct extent_buffer *eb_batch_next(struct eb_batch *batch)
+{
+ if (batch->cur >= batch->nr)
+ return NULL;
+ return batch->ebs[batch->cur++];
+}
- ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));
+static inline void eb_batch_release(struct eb_batch *batch)
+{
+ for (unsigned int i = 0; i < batch->nr; i++)
+ free_extent_buffer(batch->ebs[i]);
+ eb_batch_init(batch);
+}
- /* Iterate through all extent buffers in the range */
- while (cur_bytenr <= bvec_end) {
- struct extent_buffer *eb;
- int done;
+static inline struct extent_buffer *find_get_eb(struct xa_state *xas, unsigned long max,
+ xa_mark_t mark)
+{
+ struct extent_buffer *eb;
- /*
- * Here we can't use find_extent_buffer(), as it may
- * try to lock eb->refs_lock, which is not safe in endio
- * context.
- */
- eb = find_extent_buffer_nolock(fs_info, cur_bytenr);
- ASSERT(eb);
+retry:
+ eb = xas_find_marked(xas, max, mark);
- cur_bytenr = eb->start + eb->len;
+ if (xas_retry(xas, eb))
+ goto retry;
- ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags));
- done = atomic_dec_and_test(&eb->io_pages);
- ASSERT(done);
+ if (!eb)
+ return NULL;
- if (bio->bi_status ||
- test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
- ClearPageUptodate(page);
- set_btree_ioerr(page, eb);
- }
+ if (!refcount_inc_not_zero(&eb->refs)) {
+ xas_reset(xas);
+ goto retry;
+ }
- btrfs_subpage_clear_writeback(fs_info, page, eb->start,
- eb->len);
- end_extent_buffer_writeback(eb);
- /*
- * free_extent_buffer() will grab spinlock which is not
- * safe in endio context. Thus here we manually dec
- * the ref.
- */
- atomic_dec(&eb->refs);
- }
+ if (unlikely(eb != xas_reload(xas))) {
+ free_extent_buffer(eb);
+ xas_reset(xas);
+ goto retry;
}
- bio_put(bio);
+
+ return eb;
}
-static void end_bio_extent_buffer_writepage(struct bio *bio)
+static unsigned int buffer_tree_get_ebs_tag(struct btrfs_fs_info *fs_info,
+ unsigned long *start,
+ unsigned long end, xa_mark_t tag,
+ struct eb_batch *batch)
{
- struct bio_vec *bvec;
+ XA_STATE(xas, &fs_info->buffer_tree, *start);
struct extent_buffer *eb;
- int done;
- struct bvec_iter_all iter_all;
- ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
+ rcu_read_lock();
+ while ((eb = find_get_eb(&xas, end, tag)) != NULL) {
+ if (!eb_batch_add(batch, eb)) {
+ *start = ((eb->start + eb->len) >> fs_info->nodesize_bits);
+ goto out;
+ }
+ }
+ if (end == ULONG_MAX)
+ *start = ULONG_MAX;
+ else
+ *start = end + 1;
+out:
+ rcu_read_unlock();
- eb = (struct extent_buffer *)page->private;
- BUG_ON(!eb);
- done = atomic_dec_and_test(&eb->io_pages);
+ return batch->nr;
+}
- if (bio->bi_status ||
- test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
- ClearPageUptodate(page);
- set_btree_ioerr(page, eb);
- }
+/*
+ * The endio specific version which won't touch any unsafe spinlock in endio
+ * context.
+ */
+static struct extent_buffer *find_extent_buffer_nolock(
+ struct btrfs_fs_info *fs_info, u64 start)
+{
+ struct extent_buffer *eb;
+ unsigned long index = (start >> fs_info->nodesize_bits);
+
+ rcu_read_lock();
+ eb = xa_load(&fs_info->buffer_tree, index);
+ if (eb && !refcount_inc_not_zero(&eb->refs))
+ eb = NULL;
+ rcu_read_unlock();
+ return eb;
+}
- end_page_writeback(page);
+static void end_bbio_meta_write(struct btrfs_bio *bbio)
+{
+ struct extent_buffer *eb = bbio->private;
+ struct folio_iter fi;
- if (!done)
- continue;
+ if (bbio->bio.bi_status != BLK_STS_OK)
+ set_btree_ioerr(eb);
- end_extent_buffer_writeback(eb);
+ bio_for_each_folio_all(fi, &bbio->bio) {
+ btrfs_meta_folio_clear_writeback(fi.folio, eb);
}
- bio_put(bio);
+ buffer_tree_clear_mark(eb, PAGECACHE_TAG_WRITEBACK);
+ clear_and_wake_up_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+ bio_put(&bbio->bio);
}
static void prepare_eb_write(struct extent_buffer *eb)
@@ -4495,342 +2190,131 @@ static void prepare_eb_write(struct extent_buffer *eb)
unsigned long end;
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
- atomic_set(&eb->io_pages, num_extent_pages(eb));
/* Set btree blocks beyond nritems with 0 to avoid stale content */
nritems = btrfs_header_nritems(eb);
if (btrfs_header_level(eb) > 0) {
- end = btrfs_node_key_ptr_offset(nritems);
+ end = btrfs_node_key_ptr_offset(eb, nritems);
memzero_extent_buffer(eb, end, eb->len - end);
} else {
/*
* Leaf:
* header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
*/
- start = btrfs_item_nr_offset(nritems);
- end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
+ start = btrfs_item_nr_offset(eb, nritems);
+ end = btrfs_item_nr_offset(eb, 0);
+ if (nritems == 0)
+ end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
+ else
+ end += btrfs_item_offset(eb, nritems - 1);
memzero_extent_buffer(eb, start, end - start);
}
}
-/*
- * Unlike the work in write_one_eb(), we rely completely on extent locking.
- * Page locking is only utilized at minimum to keep the VMM code happy.
- */
-static int write_one_subpage_eb(struct extent_buffer *eb,
- struct writeback_control *wbc,
- struct extent_page_data *epd)
+static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
+ struct writeback_control *wbc)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
- struct page *page = eb->pages[0];
- unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
- bool no_dirty_ebs = false;
- int ret;
+ struct btrfs_bio *bbio;
prepare_eb_write(eb);
- /* clear_page_dirty_for_io() in subpage helper needs page locked */
- lock_page(page);
- btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len);
-
- /* Check if this is the last dirty bit to update nr_written */
- no_dirty_ebs = btrfs_subpage_clear_and_test_dirty(fs_info, page,
- eb->start, eb->len);
- if (no_dirty_ebs)
- clear_page_dirty_for_io(page);
-
- ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
- &epd->bio_ctrl, page, eb->start, eb->len,
- eb->start - page_offset(page),
- end_bio_subpage_eb_writepage, 0, 0, false);
- if (ret) {
- btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
- set_btree_ioerr(page, eb);
- unlock_page(page);
-
- if (atomic_dec_and_test(&eb->io_pages))
- end_extent_buffer_writeback(eb);
- return -EIO;
+ bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
+ REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
+ BTRFS_I(fs_info->btree_inode), eb->start,
+ end_bbio_meta_write, eb);
+ bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
+ bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
+ wbc_init_bio(wbc, &bbio->bio);
+ for (int i = 0; i < num_extent_folios(eb); i++) {
+ struct folio *folio = eb->folios[i];
+ u64 range_start = max_t(u64, eb->start, folio_pos(folio));
+ u32 range_len = min_t(u64, folio_next_pos(folio),
+ eb->start + eb->len) - range_start;
+
+ folio_lock(folio);
+ btrfs_meta_folio_clear_dirty(folio, eb);
+ btrfs_meta_folio_set_writeback(folio, eb);
+ if (!folio_test_dirty(folio))
+ wbc->nr_to_write -= folio_nr_pages(folio);
+ bio_add_folio_nofail(&bbio->bio, folio, range_len,
+ offset_in_folio(folio, range_start));
+ wbc_account_cgroup_owner(wbc, folio, range_len);
+ folio_unlock(folio);
}
- unlock_page(page);
/*
- * Submission finished without problem, if no range of the page is
- * dirty anymore, we have submitted a page. Update nr_written in wbc.
+ * If the fs is already in error status, do not submit any writeback
+ * but immediately finish it.
*/
- if (no_dirty_ebs)
- update_nr_written(wbc, 1);
- return ret;
-}
-
-static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
- struct writeback_control *wbc,
- struct extent_page_data *epd)
-{
- u64 disk_bytenr = eb->start;
- int i, num_pages;
- unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
- int ret = 0;
-
- prepare_eb_write(eb);
-
- num_pages = num_extent_pages(eb);
- for (i = 0; i < num_pages; i++) {
- struct page *p = eb->pages[i];
-
- clear_page_dirty_for_io(p);
- set_page_writeback(p);
- ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
- &epd->bio_ctrl, p, disk_bytenr,
- PAGE_SIZE, 0,
- end_bio_extent_buffer_writepage,
- 0, 0, false);
- if (ret) {
- set_btree_ioerr(p, eb);
- if (PageWriteback(p))
- end_page_writeback(p);
- if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
- end_extent_buffer_writeback(eb);
- ret = -EIO;
- break;
- }
- disk_bytenr += PAGE_SIZE;
- update_nr_written(wbc, 1);
- unlock_page(p);
- }
-
- if (unlikely(ret)) {
- for (; i < num_pages; i++) {
- struct page *p = eb->pages[i];
- clear_page_dirty_for_io(p);
- unlock_page(p);
- }
+ if (unlikely(BTRFS_FS_ERROR(fs_info))) {
+ btrfs_bio_end_io(bbio, errno_to_blk_status(BTRFS_FS_ERROR(fs_info)));
+ return;
}
-
- return ret;
+ btrfs_submit_bbio(bbio, 0);
}
/*
- * Submit one subpage btree page.
+ * Wait for all eb writeback in the given range to finish.
*
- * The main difference to submit_eb_page() is:
- * - Page locking
- * For subpage, we don't rely on page locking at all.
- *
- * - Flush write bio
- * We only flush bio if we may be unable to fit current extent buffers into
- * current bio.
- *
- * Return >=0 for the number of submitted extent buffers.
- * Return <0 for fatal error.
+ * @fs_info: The fs_info for this file system.
+ * @start: The offset of the range to start waiting on writeback.
+ * @end: The end of the range, inclusive. This is meant to be used in
+ * conjunction with wait_marked_extents, so this will usually be
+ * the_next_eb->start - 1.
*/
-static int submit_eb_subpage(struct page *page,
- struct writeback_control *wbc,
- struct extent_page_data *epd)
+void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start,
+ u64 end)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
- int submitted = 0;
- u64 page_start = page_offset(page);
- int bit_start = 0;
- int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
- int ret;
+ struct eb_batch batch;
+ unsigned long start_index = (start >> fs_info->nodesize_bits);
+ unsigned long end_index = (end >> fs_info->nodesize_bits);
- /* Lock and write each dirty extent buffers in the range */
- while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ eb_batch_init(&batch);
+ while (start_index <= end_index) {
struct extent_buffer *eb;
- unsigned long flags;
- u64 start;
+ unsigned int nr_ebs;
- /*
- * Take private lock to ensure the subpage won't be detached
- * in the meantime.
- */
- spin_lock(&page->mapping->private_lock);
- if (!PagePrivate(page)) {
- spin_unlock(&page->mapping->private_lock);
+ nr_ebs = buffer_tree_get_ebs_tag(fs_info, &start_index, end_index,
+ PAGECACHE_TAG_WRITEBACK, &batch);
+ if (!nr_ebs)
break;
- }
- spin_lock_irqsave(&subpage->lock, flags);
- if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
- subpage->bitmaps)) {
- spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&page->mapping->private_lock);
- bit_start++;
- continue;
- }
-
- start = page_start + bit_start * fs_info->sectorsize;
- bit_start += sectors_per_node;
-
- /*
- * Here we just want to grab the eb without touching extra
- * spin locks, so call find_extent_buffer_nolock().
- */
- eb = find_extent_buffer_nolock(fs_info, start);
- spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&page->mapping->private_lock);
- /*
- * The eb has already reached 0 refs thus find_extent_buffer()
- * doesn't return it. We don't need to write back such eb
- * anyway.
- */
- if (!eb)
- continue;
-
- ret = lock_extent_buffer_for_io(eb, epd);
- if (ret == 0) {
- free_extent_buffer(eb);
- continue;
- }
- if (ret < 0) {
- free_extent_buffer(eb);
- goto cleanup;
- }
- ret = write_one_subpage_eb(eb, wbc, epd);
- free_extent_buffer(eb);
- if (ret < 0)
- goto cleanup;
- submitted++;
- }
- return submitted;
-
-cleanup:
- /* We hit error, end bio for the submitted extent buffers */
- end_write_bio(epd, ret);
- return ret;
-}
-
-/*
- * Submit all page(s) of one extent buffer.
- *
- * @page: the page of one extent buffer
- * @eb_context: to determine if we need to submit this page, if current page
- * belongs to this eb, we don't need to submit
- *
- * The caller should pass each page in their bytenr order, and here we use
- * @eb_context to determine if we have submitted pages of one extent buffer.
- *
- * If we have, we just skip until we hit a new page that doesn't belong to
- * current @eb_context.
- *
- * If not, we submit all the page(s) of the extent buffer.
- *
- * Return >0 if we have submitted the extent buffer successfully.
- * Return 0 if we don't need to submit the page, as it's already submitted by
- * previous call.
- * Return <0 for fatal error.
- */
-static int submit_eb_page(struct page *page, struct writeback_control *wbc,
- struct extent_page_data *epd,
- struct extent_buffer **eb_context)
-{
- struct address_space *mapping = page->mapping;
- struct btrfs_block_group *cache = NULL;
- struct extent_buffer *eb;
- int ret;
-
- if (!PagePrivate(page))
- return 0;
-
- if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
- return submit_eb_subpage(page, wbc, epd);
-
- spin_lock(&mapping->private_lock);
- if (!PagePrivate(page)) {
- spin_unlock(&mapping->private_lock);
- return 0;
- }
-
- eb = (struct extent_buffer *)page->private;
-
- /*
- * Shouldn't happen and normally this would be a BUG_ON but no point
- * crashing the machine for something we can survive anyway.
- */
- if (WARN_ON(!eb)) {
- spin_unlock(&mapping->private_lock);
- return 0;
- }
-
- if (eb == *eb_context) {
- spin_unlock(&mapping->private_lock);
- return 0;
- }
- ret = atomic_inc_not_zero(&eb->refs);
- spin_unlock(&mapping->private_lock);
- if (!ret)
- return 0;
-
- if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
- /*
- * If for_sync, this hole will be filled with
- * trasnsaction commit.
- */
- if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
- ret = -EAGAIN;
- else
- ret = 0;
- free_extent_buffer(eb);
- return ret;
- }
-
- *eb_context = eb;
-
- ret = lock_extent_buffer_for_io(eb, epd);
- if (ret <= 0) {
- btrfs_revert_meta_write_pointer(cache, eb);
- if (cache)
- btrfs_put_block_group(cache);
- free_extent_buffer(eb);
- return ret;
- }
- if (cache) {
- /* Impiles write in zoned mode */
- btrfs_put_block_group(cache);
- /* Mark the last eb in a block group */
- if (cache->seq_zone && eb->start + eb->len == cache->zone_capacity)
- set_bit(EXTENT_BUFFER_ZONE_FINISH, &eb->bflags);
+ while ((eb = eb_batch_next(&batch)) != NULL)
+ wait_on_extent_buffer_writeback(eb);
+ eb_batch_release(&batch);
+ cond_resched();
}
- ret = write_one_eb(eb, wbc, epd);
- free_extent_buffer(eb);
- if (ret < 0)
- return ret;
- return 1;
}
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct extent_buffer *eb_context = NULL;
- struct extent_page_data epd = {
- .bio_ctrl = { 0 },
- .extent_locked = 0,
- .sync_io = wbc->sync_mode == WB_SYNC_ALL,
- };
- struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
+ struct btrfs_eb_write_context ctx = { .wbc = wbc };
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
int ret = 0;
int done = 0;
int nr_to_write_done = 0;
- struct pagevec pvec;
- int nr_pages;
- pgoff_t index;
- pgoff_t end; /* Inclusive */
+ struct eb_batch batch;
+ unsigned int nr_ebs;
+ unsigned long index;
+ unsigned long end;
int scanned = 0;
xa_mark_t tag;
- pagevec_init(&pvec);
+ eb_batch_init(&batch);
if (wbc->range_cyclic) {
- index = mapping->writeback_index; /* Start from prev offset */
+ index = ((mapping->writeback_index << PAGE_SHIFT) >> fs_info->nodesize_bits);
end = -1;
+
/*
* Start from the beginning does not need to cycle over the
* range, mark it as scanned.
*/
scanned = (index == 0);
} else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
+ index = (wbc->range_start >> fs_info->nodesize_bits);
+ end = (wbc->range_end >> fs_info->nodesize_bits);
+
scanned = 1;
}
if (wbc->sync_mode == WB_SYNC_ALL)
@@ -4840,31 +2324,39 @@ int btree_write_cache_pages(struct address_space *mapping,
btrfs_zoned_meta_io_lock(fs_info);
retry:
if (wbc->sync_mode == WB_SYNC_ALL)
- tag_pages_for_writeback(mapping, index, end);
+ buffer_tree_tag_for_writeback(fs_info, index, end);
while (!done && !nr_to_write_done && (index <= end) &&
- (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
- tag))) {
- unsigned i;
+ (nr_ebs = buffer_tree_get_ebs_tag(fs_info, &index, end, tag, &batch))) {
+ struct extent_buffer *eb;
+
+ while ((eb = eb_batch_next(&batch)) != NULL) {
+ ctx.eb = eb;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ ret = btrfs_check_meta_write_pointer(eb->fs_info, &ctx);
+ if (ret) {
+ if (ret == -EBUSY)
+ ret = 0;
- ret = submit_eb_page(page, wbc, &epd, &eb_context);
- if (ret == 0)
+ if (ret) {
+ done = 1;
+ break;
+ }
continue;
- if (ret < 0) {
- done = 1;
- break;
}
- /*
- * the filesystem may choose to bump up nr_to_write.
- * We have to make sure to honor the new nr_to_write
- * at any time
- */
- nr_to_write_done = wbc->nr_to_write <= 0;
+ if (!lock_extent_buffer_for_io(eb, wbc))
+ continue;
+
+ /* Implies write in zoned mode. */
+ if (ctx.zoned_bg) {
+ /* Mark the last eb in the block group. */
+ btrfs_schedule_zone_finish_bg(ctx.zoned_bg, eb);
+ ctx.zoned_bg->meta_write_pointer += eb->len;
+ }
+ write_one_eb(eb, wbc);
}
- pagevec_release(&pvec);
+ nr_to_write_done = (wbc->nr_to_write <= 0);
+ eb_batch_release(&batch);
cond_resched();
}
if (!scanned && !done) {
@@ -4876,10 +2368,6 @@ retry:
index = 0;
goto retry;
}
- if (ret < 0) {
- end_write_bio(&epd, ret);
- goto out;
- }
/*
* If something went wrong, don't allow any metadata write bio to be
* submitted.
@@ -4906,24 +2394,27 @@ retry:
* Now such dirty tree block will not be cleaned by any dirty
* extent io tree. Thus we don't want to submit such wild eb
* if the fs already has error.
+ *
+ * We can get ret > 0 from submit_extent_folio() indicating how many ebs
+ * were submitted. Reset it to 0 to avoid false alerts for the caller.
*/
- if (!BTRFS_FS_ERROR(fs_info)) {
- ret = flush_write_bio(&epd);
- } else {
+ if (ret > 0)
+ ret = 0;
+ if (!ret && BTRFS_FS_ERROR(fs_info))
ret = -EROFS;
- end_write_bio(&epd, ret);
- }
-out:
+
+ if (ctx.zoned_bg)
+ btrfs_put_block_group(ctx.zoned_bg);
btrfs_zoned_meta_io_unlock(fs_info);
return ret;
}
-/**
+/*
* Walk the list of dirty pages of the given address space and write all of them.
*
- * @mapping: address space structure to write
- * @wbc: subtract the number of written pages from *@wbc->nr_to_write
- * @epd: holds context for the write, namely the bio
+ * @mapping: address space structure to write
+ * @wbc: subtract the number of written pages from *@wbc->nr_to_write
+ * @bio_ctrl: holds context for the write, namely the bio
*
* If a page is already under I/O, write_cache_pages() skips it, even
* if it's dirty. This is desirable behaviour for memory-cleaning writeback,
@@ -4934,15 +2425,15 @@ out:
* existing IO to complete.
*/
static int extent_write_cache_pages(struct address_space *mapping,
- struct writeback_control *wbc,
- struct extent_page_data *epd)
+ struct btrfs_bio_ctrl *bio_ctrl)
{
+ struct writeback_control *wbc = bio_ctrl->wbc;
struct inode *inode = mapping->host;
int ret = 0;
int done = 0;
int nr_to_write_done = 0;
- struct pagevec pvec;
- int nr_pages;
+ struct folio_batch fbatch;
+ unsigned int nr_folios;
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
@@ -4962,7 +2453,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
if (!igrab(inode))
return 0;
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
@@ -4991,69 +2482,88 @@ static int extent_write_cache_pages(struct address_space *mapping,
&BTRFS_I(inode)->runtime_flags))
wbc->tagged_writepages = 1;
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag = PAGECACHE_TAG_TOWRITE;
- else
- tag = PAGECACHE_TAG_DIRTY;
+ tag = wbc_to_tag(wbc);
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !nr_to_write_done && (index <= end) &&
- (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
- &index, end, tag))) {
+ (nr_folios = filemap_get_folios_tag(mapping, &index,
+ end, tag, &fbatch))) {
unsigned i;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
- done_index = page->index + 1;
+ done_index = folio_next_index(folio);
/*
* At this point we hold neither the i_pages lock nor
- * the page lock: the page may be truncated or
- * invalidated (changing page->mapping to NULL),
- * or even swizzled back from swapper_space to
- * tmpfs file mapping
+ * the folio lock: the folio may be truncated or
+ * invalidated (changing folio->mapping to NULL).
*/
- if (!trylock_page(page)) {
- ret = flush_write_bio(epd);
- BUG_ON(ret < 0);
- lock_page(page);
+ if (!folio_trylock(folio)) {
+ submit_write_bio(bio_ctrl, 0);
+ folio_lock(folio);
}
- if (unlikely(page->mapping != mapping)) {
- unlock_page(page);
+ if (unlikely(folio->mapping != mapping)) {
+ folio_unlock(folio);
continue;
}
- if (wbc->sync_mode != WB_SYNC_NONE) {
- if (PageWriteback(page)) {
- ret = flush_write_bio(epd);
- BUG_ON(ret < 0);
- }
- wait_on_page_writeback(page);
+ if (!folio_test_dirty(folio)) {
+ /* Someone wrote it for us. */
+ folio_unlock(folio);
+ continue;
}
- if (PageWriteback(page) ||
- !clear_page_dirty_for_io(page)) {
- unlock_page(page);
+ /*
+ * For subpage case, compression can lead to mixed
+ * writeback and dirty flags, e.g:
+ * 0 32K 64K 96K 128K
+ * | |//////||/////| |//|
+ *
+ * In above case, [32K, 96K) is asynchronously submitted
+ * for compression, and [124K, 128K) needs to be written back.
+ *
+ * If we didn't wait writeback for page 64K, [128K, 128K)
+ * won't be submitted as the page still has writeback flag
+ * and will be skipped in the next check.
+ *
+ * This mixed writeback and dirty case is only possible for
+ * subpage case.
+ *
+ * TODO: Remove this check after migrating compression to
+ * regular submission.
+ */
+ if (wbc->sync_mode != WB_SYNC_NONE ||
+ btrfs_is_subpage(inode_to_fs_info(inode), folio)) {
+ if (folio_test_writeback(folio))
+ submit_write_bio(bio_ctrl, 0);
+ folio_wait_writeback(folio);
+ }
+
+ if (folio_test_writeback(folio) ||
+ !folio_clear_dirty_for_io(folio)) {
+ folio_unlock(folio);
continue;
}
- ret = __extent_writepage(page, wbc, epd);
+ ret = extent_writepage(folio, bio_ctrl);
if (ret < 0) {
done = 1;
break;
}
/*
- * the filesystem may choose to bump up nr_to_write.
+ * The filesystem may choose to bump up nr_to_write.
* We have to make sure to honor the new nr_to_write
- * at any time
+ * at any time.
*/
- nr_to_write_done = wbc->nr_to_write <= 0;
+ nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
+ wbc->nr_to_write <= 0);
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
}
if (!scanned && !done) {
@@ -5070,36 +2580,14 @@ retry:
* page in our current bio, and thus deadlock, so flush the
* write bio here.
*/
- ret = flush_write_bio(epd);
- if (!ret)
- goto retry;
+ submit_write_bio(bio_ctrl, 0);
+ goto retry;
}
if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
mapping->writeback_index = done_index;
- btrfs_add_delayed_iput(inode);
- return ret;
-}
-
-int extent_write_full_page(struct page *page, struct writeback_control *wbc)
-{
- int ret;
- struct extent_page_data epd = {
- .bio_ctrl = { 0 },
- .extent_locked = 0,
- .sync_io = wbc->sync_mode == WB_SYNC_ALL,
- };
-
- ret = __extent_writepage(page, wbc, &epd);
- ASSERT(ret <= 0);
- if (ret < 0) {
- end_write_bio(&epd, ret);
- return ret;
- }
-
- ret = flush_write_bio(&epd);
- ASSERT(ret <= 0);
+ btrfs_add_delayed_iput(BTRFS_I(inode));
return ret;
}
@@ -5108,137 +2596,136 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
* already been ran (aka, ordered extent inserted) and all pages are still
* locked.
*/
-int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
+void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
+ u64 start, u64 end, struct writeback_control *wbc,
+ bool pages_dirty)
{
bool found_error = false;
- int first_error = 0;
int ret = 0;
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ const u32 sectorsize = fs_info->sectorsize;
+ loff_t i_size = i_size_read(inode);
u64 cur = start;
- unsigned long nr_pages;
- const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
- struct extent_page_data epd = {
- .bio_ctrl = { 0 },
- .extent_locked = 1,
- .sync_io = 1,
- };
- struct writeback_control wbc_writepages = {
- .sync_mode = WB_SYNC_ALL,
- .range_start = start,
- .range_end = end + 1,
- /* We're called from an async helper function */
- .punt_to_cgroup = 1,
- .no_cgroup_owner = 1,
+ struct btrfs_bio_ctrl bio_ctrl = {
+ .wbc = wbc,
+ .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
};
+ if (wbc->no_cgroup_owner)
+ bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
+
ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
- nr_pages = (round_up(end, PAGE_SIZE) - round_down(start, PAGE_SIZE)) >>
- PAGE_SHIFT;
- wbc_writepages.nr_to_write = nr_pages * 2;
- wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
while (cur <= end) {
- u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
+ u64 cur_end;
+ u32 cur_len;
+ struct folio *folio;
+
+ folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT);
- page = find_get_page(mapping, cur >> PAGE_SHIFT);
/*
- * All pages in the range are locked since
- * btrfs_run_delalloc_range(), thus there is no way to clear
- * the page dirty flag.
+ * This shouldn't happen, the pages are pinned and locked, this
+ * code is just in case, but shouldn't actually be run.
*/
- ASSERT(PageLocked(page));
- ASSERT(PageDirty(page));
- clear_page_dirty_for_io(page);
- ret = __extent_writepage(page, &wbc_writepages, &epd);
- ASSERT(ret <= 0);
- if (ret < 0) {
- found_error = true;
- first_error = ret;
+ if (IS_ERR(folio)) {
+ cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
+ cur_len = cur_end + 1 - cur;
+ btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
+ cur, cur_len, false);
+ mapping_set_error(mapping, PTR_ERR(folio));
+ cur = cur_end;
+ continue;
}
- put_page(page);
+
+ cur_end = min_t(u64, folio_next_pos(folio) - 1, end);
+ cur_len = cur_end + 1 - cur;
+
+ ASSERT(folio_test_locked(folio));
+ if (pages_dirty && folio != locked_folio)
+ ASSERT(folio_test_dirty(folio));
+
+ /*
+ * Set the submission bitmap to submit all sectors.
+ * extent_writepage_io() will do the truncation correctly.
+ */
+ bio_ctrl.submit_bitmap = (unsigned long)-1;
+ ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
+ &bio_ctrl, i_size);
+ if (ret == 1)
+ goto next_page;
+
+ if (ret)
+ mapping_set_error(mapping, ret);
+ btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
+ if (ret < 0)
+ found_error = true;
+next_page:
+ folio_put(folio);
cur = cur_end + 1;
}
- if (!found_error)
- ret = flush_write_bio(&epd);
- else
- end_write_bio(&epd, ret);
-
- wbc_detach_inode(&wbc_writepages);
- if (found_error)
- return first_error;
- return ret;
+ submit_write_bio(&bio_ctrl, found_error ? ret : 0);
}
-int extent_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
+int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
- const bool data_reloc = btrfs_is_data_reloc_root(BTRFS_I(inode)->root);
- const bool zoned = btrfs_is_zoned(BTRFS_I(inode)->root->fs_info);
int ret = 0;
- struct extent_page_data epd = {
- .bio_ctrl = { 0 },
- .extent_locked = 0,
- .sync_io = wbc->sync_mode == WB_SYNC_ALL,
+ struct btrfs_bio_ctrl bio_ctrl = {
+ .wbc = wbc,
+ .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
};
/*
* Allow only a single thread to do the reloc work in zoned mode to
* protect the write pointer updates.
*/
- if (data_reloc && zoned)
- btrfs_inode_lock(inode, 0);
- ret = extent_write_cache_pages(mapping, wbc, &epd);
- if (data_reloc && zoned)
- btrfs_inode_unlock(inode, 0);
- ASSERT(ret <= 0);
- if (ret < 0) {
- end_write_bio(&epd, ret);
- return ret;
- }
- ret = flush_write_bio(&epd);
+ btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
+ ret = extent_write_cache_pages(mapping, &bio_ctrl);
+ submit_write_bio(&bio_ctrl, ret);
+ btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
return ret;
}
-void extent_readahead(struct readahead_control *rac)
+void btrfs_readahead(struct readahead_control *rac)
{
- struct btrfs_bio_ctrl bio_ctrl = { 0 };
- struct page *pagepool[16];
+ struct btrfs_bio_ctrl bio_ctrl = {
+ .opf = REQ_OP_READ | REQ_RAHEAD,
+ .ractl = rac,
+ .last_em_start = U64_MAX,
+ };
+ struct folio *folio;
+ struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
+ const u64 start = readahead_pos(rac);
+ const u64 end = start + readahead_length(rac) - 1;
+ struct extent_state *cached_state = NULL;
struct extent_map *em_cached = NULL;
- u64 prev_em_start = (u64)-1;
- int nr;
- while ((nr = readahead_page_batch(rac, pagepool))) {
- u64 contig_start = readahead_pos(rac);
- u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
+ lock_extents_for_read(inode, start, end, &cached_state);
- contiguous_readpages(pagepool, nr, contig_start, contig_end,
- &em_cached, &bio_ctrl, &prev_em_start);
- }
+ while ((folio = readahead_folio(rac)) != NULL)
+ btrfs_do_readpage(folio, &em_cached, &bio_ctrl);
- if (em_cached)
- free_extent_map(em_cached);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
- if (bio_ctrl.bio) {
- if (submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags))
- return;
- }
+ if (em_cached)
+ btrfs_free_extent_map(em_cached);
+ submit_one_bio(&bio_ctrl);
}
/*
- * basic invalidatepage code, this waits on any locked or writeback
- * ranges corresponding to the page, and then deletes any extent state
+ * basic invalidate_folio code, this waits on any locked or writeback
+ * ranges corresponding to the folio, and then deletes any extent state
* records from the tree
*/
-int extent_invalidatepage(struct extent_io_tree *tree,
- struct page *page, unsigned long offset)
+int extent_invalidate_folio(struct extent_io_tree *tree,
+ struct folio *folio, size_t offset)
{
struct extent_state *cached_state = NULL;
- u64 start = page_offset(page);
- u64 end = start + PAGE_SIZE - 1;
- size_t blocksize = page->mapping->host->i_sb->s_blocksize;
+ u64 start = folio_pos(folio);
+ u64 end = start + folio_size(folio) - 1;
+ size_t blocksize = folio_to_fs_info(folio)->sectorsize;
/* This function is only called for the btree inode */
ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
@@ -5247,617 +2734,239 @@ int extent_invalidatepage(struct extent_io_tree *tree,
if (start > end)
return 0;
- lock_extent_bits(tree, start, end, &cached_state);
- wait_on_page_writeback(page);
+ btrfs_lock_extent(tree, start, end, &cached_state);
+ folio_wait_writeback(folio);
/*
* Currently for btree io tree, only EXTENT_LOCKED is utilized,
* so here we only need to unlock the extent range to free any
* existing extent state.
*/
- unlock_extent_cached(tree, start, end, &cached_state);
+ btrfs_unlock_extent(tree, start, end, &cached_state);
return 0;
}
/*
- * a helper for releasepage, this tests for areas of the page that
- * are locked or under IO and drops the related state bits if it is safe
- * to drop the page.
- */
-static int try_release_extent_state(struct extent_io_tree *tree,
- struct page *page, gfp_t mask)
-{
- u64 start = page_offset(page);
- u64 end = start + PAGE_SIZE - 1;
- int ret = 1;
-
- if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
- ret = 0;
- } else {
- /*
- * At this point we can safely clear everything except the
- * locked bit, the nodatasum bit and the delalloc new bit.
- * The delalloc new bit will be cleared by ordered extent
- * completion.
- */
- ret = __clear_extent_bit(tree, start, end,
- ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW),
- 0, 0, NULL, mask, NULL);
-
- /* if clear_extent_bit failed for enomem reasons,
- * we can't allow the release to continue.
- */
- if (ret < 0)
- ret = 0;
- else
- ret = 1;
- }
- return ret;
-}
-
-/*
- * a helper for releasepage. As long as there are no locked extents
- * in the range corresponding to the page, both state records and extent
- * map records are removed
- */
-int try_release_extent_mapping(struct page *page, gfp_t mask)
-{
- struct extent_map *em;
- u64 start = page_offset(page);
- u64 end = start + PAGE_SIZE - 1;
- struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
- struct extent_io_tree *tree = &btrfs_inode->io_tree;
- struct extent_map_tree *map = &btrfs_inode->extent_tree;
-
- if (gfpflags_allow_blocking(mask) &&
- page->mapping->host->i_size > SZ_16M) {
- u64 len;
- while (start <= end) {
- struct btrfs_fs_info *fs_info;
- u64 cur_gen;
-
- len = end - start + 1;
- write_lock(&map->lock);
- em = lookup_extent_mapping(map, start, len);
- if (!em) {
- write_unlock(&map->lock);
- break;
- }
- if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
- em->start != start) {
- write_unlock(&map->lock);
- free_extent_map(em);
- break;
- }
- if (test_range_bit(tree, em->start,
- extent_map_end(em) - 1,
- EXTENT_LOCKED, 0, NULL))
- goto next;
- /*
- * If it's not in the list of modified extents, used
- * by a fast fsync, we can remove it. If it's being
- * logged we can safely remove it since fsync took an
- * extra reference on the em.
- */
- if (list_empty(&em->list) ||
- test_bit(EXTENT_FLAG_LOGGING, &em->flags))
- goto remove_em;
- /*
- * If it's in the list of modified extents, remove it
- * only if its generation is older then the current one,
- * in which case we don't need it for a fast fsync.
- * Otherwise don't remove it, we could be racing with an
- * ongoing fast fsync that could miss the new extent.
- */
- fs_info = btrfs_inode->root->fs_info;
- spin_lock(&fs_info->trans_lock);
- cur_gen = fs_info->generation;
- spin_unlock(&fs_info->trans_lock);
- if (em->generation >= cur_gen)
- goto next;
-remove_em:
- /*
- * We only remove extent maps that are not in the list of
- * modified extents or that are in the list but with a
- * generation lower then the current generation, so there
- * is no need to set the full fsync flag on the inode (it
- * hurts the fsync performance for workloads with a data
- * size that exceeds or is close to the system's memory).
- */
- remove_extent_mapping(map, em);
- /* once for the rb tree */
- free_extent_map(em);
-next:
- start = extent_map_end(em);
- write_unlock(&map->lock);
-
- /* once for us */
- free_extent_map(em);
-
- cond_resched(); /* Allow large-extent preemption. */
- }
- }
- return try_release_extent_state(tree, page, mask);
-}
-
-/*
- * helper function for fiemap, which doesn't want to see any holes.
- * This maps until we find something past 'last'
+ * A helper for struct address_space_operations::release_folio, this tests for
+ * areas of the folio that are locked or under IO and drops the related state
+ * bits if it is safe to drop the folio.
*/
-static struct extent_map *get_extent_skip_holes(struct btrfs_inode *inode,
- u64 offset, u64 last)
+static bool try_release_extent_state(struct extent_io_tree *tree,
+ struct folio *folio)
{
- u64 sectorsize = btrfs_inode_sectorsize(inode);
- struct extent_map *em;
- u64 len;
-
- if (offset >= last)
- return NULL;
-
- while (1) {
- len = last - offset;
- if (len == 0)
- break;
- len = ALIGN(len, sectorsize);
- em = btrfs_get_extent_fiemap(inode, offset, len);
- if (IS_ERR_OR_NULL(em))
- return em;
-
- /* if this isn't a hole return it */
- if (em->block_start != EXTENT_MAP_HOLE)
- return em;
-
- /* this is a hole, advance to the next extent */
- offset = extent_map_end(em);
- free_extent_map(em);
- if (offset >= last)
- break;
- }
- return NULL;
-}
-
-/*
- * To cache previous fiemap extent
- *
- * Will be used for merging fiemap extent
- */
-struct fiemap_cache {
- u64 offset;
- u64 phys;
- u64 len;
- u32 flags;
- bool cached;
-};
-
-/*
- * Helper to submit fiemap extent.
- *
- * Will try to merge current fiemap extent specified by @offset, @phys,
- * @len and @flags with cached one.
- * And only when we fails to merge, cached one will be submitted as
- * fiemap extent.
- *
- * Return value is the same as fiemap_fill_next_extent().
- */
-static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
- struct fiemap_cache *cache,
- u64 offset, u64 phys, u64 len, u32 flags)
-{
- int ret = 0;
+ struct extent_state *cached_state = NULL;
+ u64 start = folio_pos(folio);
+ u64 end = start + folio_size(folio) - 1;
+ u32 range_bits;
+ u32 clear_bits;
+ bool ret = false;
+ int ret2;
- if (!cache->cached)
- goto assign;
+ btrfs_get_range_bits(tree, start, end, &range_bits, &cached_state);
/*
- * Sanity check, extent_fiemap() should have ensured that new
- * fiemap extent won't overlap with cached one.
- * Not recoverable.
- *
- * NOTE: Physical address can overlap, due to compression
+ * We can release the folio if it's locked only for ordered extent
+ * completion, since that doesn't require using the folio.
*/
- if (cache->offset + cache->len > offset) {
- WARN_ON(1);
- return -EINVAL;
- }
+ if ((range_bits & EXTENT_LOCKED) &&
+ !(range_bits & EXTENT_FINISHING_ORDERED))
+ goto out;
+ clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW |
+ EXTENT_CTLBITS | EXTENT_QGROUP_RESERVED |
+ EXTENT_FINISHING_ORDERED);
/*
- * Only merges fiemap extents if
- * 1) Their logical addresses are continuous
- *
- * 2) Their physical addresses are continuous
- * So truly compressed (physical size smaller than logical size)
- * extents won't get merged with each other
- *
- * 3) Share same flags except FIEMAP_EXTENT_LAST
- * So regular extent won't get merged with prealloc extent
+ * At this point we can safely clear everything except the locked,
+ * nodatasum, delalloc new and finishing ordered bits. The delalloc new
+ * bit will be cleared by ordered extent completion.
*/
- if (cache->offset + cache->len == offset &&
- cache->phys + cache->len == phys &&
- (cache->flags & ~FIEMAP_EXTENT_LAST) ==
- (flags & ~FIEMAP_EXTENT_LAST)) {
- cache->len += len;
- cache->flags |= flags;
- goto try_submit_last;
- }
+ ret2 = btrfs_clear_extent_bit(tree, start, end, clear_bits, &cached_state);
+ /*
+ * If clear_extent_bit failed for enomem reasons, we can't allow the
+ * release to continue.
+ */
+ if (ret2 == 0)
+ ret = true;
+out:
+ btrfs_free_extent_state(cached_state);
- /* Not mergeable, need to submit cached one */
- ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
- cache->len, cache->flags);
- cache->cached = false;
- if (ret)
- return ret;
-assign:
- cache->cached = true;
- cache->offset = offset;
- cache->phys = phys;
- cache->len = len;
- cache->flags = flags;
-try_submit_last:
- if (cache->flags & FIEMAP_EXTENT_LAST) {
- ret = fiemap_fill_next_extent(fieinfo, cache->offset,
- cache->phys, cache->len, cache->flags);
- cache->cached = false;
- }
return ret;
}
/*
- * Emit last fiemap cache
- *
- * The last fiemap cache may still be cached in the following case:
- * 0 4k 8k
- * |<- Fiemap range ->|
- * |<------------ First extent ----------->|
- *
- * In this case, the first extent range will be cached but not emitted.
- * So we must emit it before ending extent_fiemap().
+ * a helper for release_folio. As long as there are no locked extents
+ * in the range corresponding to the page, both state records and extent
+ * map records are removed
*/
-static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
- struct fiemap_cache *cache)
+bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
{
- int ret;
-
- if (!cache->cached)
- return 0;
-
- ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
- cache->len, cache->flags);
- cache->cached = false;
- if (ret > 0)
- ret = 0;
- return ret;
-}
-
-int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
- u64 start, u64 len)
-{
- int ret = 0;
- u64 off;
- u64 max = start + len;
- u32 flags = 0;
- u32 found_type;
- u64 last;
- u64 last_for_get_extent = 0;
- u64 disko = 0;
- u64 isize = i_size_read(&inode->vfs_inode);
- struct btrfs_key found_key;
- struct extent_map *em = NULL;
- struct extent_state *cached_state = NULL;
- struct btrfs_path *path;
- struct btrfs_root *root = inode->root;
- struct fiemap_cache cache = { 0 };
- struct ulist *roots;
- struct ulist *tmp_ulist;
- int end = 0;
- u64 em_start = 0;
- u64 em_len = 0;
- u64 em_end = 0;
-
- if (len == 0)
- return -EINVAL;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- roots = ulist_alloc(GFP_KERNEL);
- tmp_ulist = ulist_alloc(GFP_KERNEL);
- if (!roots || !tmp_ulist) {
- ret = -ENOMEM;
- goto out_free_ulist;
- }
-
- /*
- * We can't initialize that to 'start' as this could miss extents due
- * to extent item merging
- */
- off = 0;
- start = round_down(start, btrfs_inode_sectorsize(inode));
- len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
-
- /*
- * lookup the last file extent. We're not using i_size here
- * because there might be preallocation past i_size
- */
- ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
- 0);
- if (ret < 0) {
- goto out_free_ulist;
- } else {
- WARN_ON(!ret);
- if (ret == 1)
- ret = 0;
- }
-
- path->slots[0]--;
- btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
- found_type = found_key.type;
-
- /* No extents, but there might be delalloc bits */
- if (found_key.objectid != btrfs_ino(inode) ||
- found_type != BTRFS_EXTENT_DATA_KEY) {
- /* have to trust i_size as the end */
- last = (u64)-1;
- last_for_get_extent = isize;
- } else {
- /*
- * remember the start of the last extent. There are a
- * bunch of different factors that go into the length of the
- * extent, so its much less complex to remember where it started
- */
- last = found_key.offset;
- last_for_get_extent = last + 1;
- }
- btrfs_release_path(path);
-
- /*
- * we might have some extents allocated but more delalloc past those
- * extents. so, we trust isize unless the start of the last extent is
- * beyond isize
- */
- if (last < isize) {
- last = (u64)-1;
- last_for_get_extent = isize;
- }
+ u64 start = folio_pos(folio);
+ u64 end = start + folio_size(folio) - 1;
+ struct btrfs_inode *inode = folio_to_inode(folio);
+ struct extent_io_tree *io_tree = &inode->io_tree;
- lock_extent_bits(&inode->io_tree, start, start + len - 1,
- &cached_state);
+ while (start <= end) {
+ const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
+ const u64 len = end - start + 1;
+ struct extent_map_tree *extent_tree = &inode->extent_tree;
+ struct extent_map *em;
- em = get_extent_skip_holes(inode, start, last_for_get_extent);
- if (!em)
- goto out;
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- goto out;
- }
-
- while (!end) {
- u64 offset_in_extent = 0;
-
- /* break if the extent we found is outside the range */
- if (em->start >= max || extent_map_end(em) < off)
+ write_lock(&extent_tree->lock);
+ em = btrfs_lookup_extent_mapping(extent_tree, start, len);
+ if (!em) {
+ write_unlock(&extent_tree->lock);
break;
-
+ }
+ if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
+ write_unlock(&extent_tree->lock);
+ btrfs_free_extent_map(em);
+ break;
+ }
+ if (btrfs_test_range_bit_exists(io_tree, em->start,
+ btrfs_extent_map_end(em) - 1,
+ EXTENT_LOCKED))
+ goto next;
/*
- * get_extent may return an extent that starts before our
- * requested range. We have to make sure the ranges
- * we return to fiemap always move forward and don't
- * overlap, so adjust the offsets here
+ * If it's not in the list of modified extents, used by a fast
+ * fsync, we can remove it. If it's being logged we can safely
+ * remove it since fsync took an extra reference on the em.
*/
- em_start = max(em->start, off);
-
+ if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
+ goto remove_em;
/*
- * record the offset from the start of the extent
- * for adjusting the disk offset below. Only do this if the
- * extent isn't compressed since our in ram offset may be past
- * what we have actually allocated on disk.
+ * If it's in the list of modified extents, remove it only if
+ * its generation is older then the current one, in which case
+ * we don't need it for a fast fsync. Otherwise don't remove it,
+ * we could be racing with an ongoing fast fsync that could miss
+ * the new extent.
*/
- if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
- offset_in_extent = em_start - em->start;
- em_end = extent_map_end(em);
- em_len = em_end - em_start;
- flags = 0;
- if (em->block_start < EXTENT_MAP_LAST_BYTE)
- disko = em->block_start + offset_in_extent;
- else
- disko = 0;
-
+ if (em->generation >= cur_gen)
+ goto next;
+remove_em:
/*
- * bump off for our next call to get_extent
+ * We only remove extent maps that are not in the list of
+ * modified extents or that are in the list but with a
+ * generation lower then the current generation, so there is no
+ * need to set the full fsync flag on the inode (it hurts the
+ * fsync performance for workloads with a data size that exceeds
+ * or is close to the system's memory).
*/
- off = extent_map_end(em);
- if (off >= max)
- end = 1;
-
- if (em->block_start == EXTENT_MAP_LAST_BYTE) {
- end = 1;
- flags |= FIEMAP_EXTENT_LAST;
- } else if (em->block_start == EXTENT_MAP_INLINE) {
- flags |= (FIEMAP_EXTENT_DATA_INLINE |
- FIEMAP_EXTENT_NOT_ALIGNED);
- } else if (em->block_start == EXTENT_MAP_DELALLOC) {
- flags |= (FIEMAP_EXTENT_DELALLOC |
- FIEMAP_EXTENT_UNKNOWN);
- } else if (fieinfo->fi_extents_max) {
- u64 bytenr = em->block_start -
- (em->start - em->orig_start);
+ btrfs_remove_extent_mapping(inode, em);
+ /* Once for the inode's extent map tree. */
+ btrfs_free_extent_map(em);
+next:
+ start = btrfs_extent_map_end(em);
+ write_unlock(&extent_tree->lock);
+ /* Once for us, for the lookup_extent_mapping() reference. */
+ btrfs_free_extent_map(em);
+
+ if (need_resched()) {
/*
- * As btrfs supports shared space, this information
- * can be exported to userspace tools via
- * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
- * then we're just getting a count and we can skip the
- * lookup stuff.
+ * If we need to resched but we can't block just exit
+ * and leave any remaining extent maps.
*/
- ret = btrfs_check_shared(root, btrfs_ino(inode),
- bytenr, roots, tmp_ulist);
- if (ret < 0)
- goto out_free;
- if (ret)
- flags |= FIEMAP_EXTENT_SHARED;
- ret = 0;
- }
- if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
- flags |= FIEMAP_EXTENT_ENCODED;
- if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
- flags |= FIEMAP_EXTENT_UNWRITTEN;
-
- free_extent_map(em);
- em = NULL;
- if ((em_start >= last) || em_len == (u64)-1 ||
- (last == (u64)-1 && isize <= em_end)) {
- flags |= FIEMAP_EXTENT_LAST;
- end = 1;
- }
+ if (!gfpflags_allow_blocking(mask))
+ break;
- /* now scan forward to see if this is really the last extent. */
- em = get_extent_skip_holes(inode, off, last_for_get_extent);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- goto out;
- }
- if (!em) {
- flags |= FIEMAP_EXTENT_LAST;
- end = 1;
- }
- ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
- em_len, flags);
- if (ret) {
- if (ret == 1)
- ret = 0;
- goto out_free;
+ cond_resched();
}
}
-out_free:
- if (!ret)
- ret = emit_last_fiemap_cache(fieinfo, &cache);
- free_extent_map(em);
-out:
- unlock_extent_cached(&inode->io_tree, start, start + len - 1,
- &cached_state);
-
-out_free_ulist:
- btrfs_free_path(path);
- ulist_free(roots);
- ulist_free(tmp_ulist);
- return ret;
+ return try_release_extent_state(io_tree, folio);
}
-static void __free_extent_buffer(struct extent_buffer *eb)
+static int extent_buffer_under_io(const struct extent_buffer *eb)
{
- kmem_cache_free(extent_buffer_cache, eb);
-}
-
-int extent_buffer_under_io(const struct extent_buffer *eb)
-{
- return (atomic_read(&eb->io_pages) ||
- test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
+ return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
}
-static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
+static bool folio_range_has_eb(struct folio *folio)
{
- struct btrfs_subpage *subpage;
+ struct btrfs_folio_state *bfs;
- lockdep_assert_held(&page->mapping->private_lock);
+ lockdep_assert_held(&folio->mapping->i_private_lock);
- if (PagePrivate(page)) {
- subpage = (struct btrfs_subpage *)page->private;
- if (atomic_read(&subpage->eb_refs))
- return true;
- /*
- * Even there is no eb refs here, we may still have
- * end_page_read() call relying on page::private.
- */
- if (atomic_read(&subpage->readers))
+ if (folio_test_private(folio)) {
+ bfs = folio_get_private(folio);
+ if (atomic_read(&bfs->eb_refs))
return true;
}
return false;
}
-static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
+static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
+ struct address_space *mapping = folio->mapping;
const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
/*
- * For mapped eb, we're going to change the page private, which should
- * be done under the private_lock.
+ * For mapped eb, we're going to change the folio private, which should
+ * be done under the i_private_lock.
*/
if (mapped)
- spin_lock(&page->mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
- if (!PagePrivate(page)) {
+ if (!folio_test_private(folio)) {
if (mapped)
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return;
}
- if (fs_info->sectorsize == PAGE_SIZE) {
+ if (!btrfs_meta_is_subpage(fs_info)) {
/*
- * We do this since we'll remove the pages after we've
- * removed the eb from the radix tree, so we could race
- * and have this page now attached to the new eb. So
- * only clear page_private if it's still connected to
- * this eb.
+ * We do this since we'll remove the pages after we've removed
+ * the eb from the xarray, so we could race and have this page
+ * now attached to the new eb. So only clear folio if it's
+ * still connected to this eb.
*/
- if (PagePrivate(page) &&
- page->private == (unsigned long)eb) {
+ if (folio_test_private(folio) && folio_get_private(folio) == eb) {
BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
- BUG_ON(PageDirty(page));
- BUG_ON(PageWriteback(page));
- /*
- * We need to make sure we haven't be attached
- * to a new eb.
- */
- detach_page_private(page);
+ BUG_ON(folio_test_dirty(folio));
+ BUG_ON(folio_test_writeback(folio));
+ /* We need to make sure we haven't be attached to a new eb. */
+ folio_detach_private(folio);
}
if (mapped)
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return;
}
/*
- * For subpage, we can have dummy eb with page private. In this case,
- * we can directly detach the private as such page is only attached to
- * one dummy eb, no sharing.
+ * For subpage, we can have dummy eb with folio private attached. In
+ * this case, we can directly detach the private as such folio is only
+ * attached to one dummy eb, no sharing.
*/
if (!mapped) {
- btrfs_detach_subpage(fs_info, page);
+ btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA);
return;
}
- btrfs_page_dec_eb_refs(fs_info, page);
+ btrfs_folio_dec_eb_refs(fs_info, folio);
/*
- * We can only detach the page private if there are no other ebs in the
+ * We can only detach the folio private if there are no other ebs in the
* page range and no unfinished IO.
*/
- if (!page_range_has_eb(fs_info, page))
- btrfs_detach_subpage(fs_info, page);
+ if (!folio_range_has_eb(folio))
+ btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
}
-/* Release all pages attached to the extent buffer */
-static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
+/* Release all folios attached to the extent buffer */
+static void btrfs_release_extent_buffer_folios(const struct extent_buffer *eb)
{
- int i;
- int num_pages;
-
ASSERT(!extent_buffer_under_io(eb));
- num_pages = num_extent_pages(eb);
- for (i = 0; i < num_pages; i++) {
- struct page *page = eb->pages[i];
+ for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
+ struct folio *folio = eb->folios[i];
- if (!page)
+ if (!folio)
continue;
- detach_extent_buffer_page(eb, page);
-
- /* One for when we allocated the page */
- put_page(page);
+ detach_extent_buffer_folio(eb, folio);
}
}
@@ -5866,45 +2975,57 @@ static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
*/
static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
{
- btrfs_release_extent_buffer_pages(eb);
- btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
- __free_extent_buffer(eb);
+ btrfs_release_extent_buffer_folios(eb);
+ btrfs_leak_debug_del_eb(eb);
+ kmem_cache_free(extent_buffer_cache, eb);
}
-static struct extent_buffer *
-__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
- unsigned long len)
+static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *fs_info,
+ u64 start)
{
struct extent_buffer *eb = NULL;
eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
eb->start = start;
- eb->len = len;
+ eb->len = fs_info->nodesize;
eb->fs_info = fs_info;
- eb->bflags = 0;
init_rwsem(&eb->lock);
- btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
- &fs_info->allocated_ebs);
- INIT_LIST_HEAD(&eb->release_list);
+ btrfs_leak_debug_add_eb(eb);
spin_lock_init(&eb->refs_lock);
- atomic_set(&eb->refs, 1);
- atomic_set(&eb->io_pages, 0);
+ refcount_set(&eb->refs, 1);
- ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
+ ASSERT(eb->len <= BTRFS_MAX_METADATA_BLOCKSIZE);
return eb;
}
+/*
+ * For use in eb allocation error cleanup paths, as btrfs_release_extent_buffer()
+ * does not call folio_put(), and we need to set the folios to NULL so that
+ * btrfs_release_extent_buffer() will not detach them a second time.
+ */
+static void cleanup_extent_buffer_folios(struct extent_buffer *eb)
+{
+ const int num_folios = num_extent_folios(eb);
+
+ /* We cannot use num_extent_folios() as loop bound as eb->folios changes. */
+ for (int i = 0; i < num_folios; i++) {
+ ASSERT(eb->folios[i]);
+ detach_extent_buffer_folio(eb, eb->folios[i]);
+ folio_put(eb->folios[i]);
+ eb->folios[i] = NULL;
+ }
+}
+
struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
{
- int i;
- struct page *p;
struct extent_buffer *new;
- int num_pages = num_extent_pages(src);
+ int num_folios;
+ int ret;
- new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
+ new = __alloc_extent_buffer(src->fs_info, src->start);
if (new == NULL)
return NULL;
@@ -5915,121 +3036,113 @@ struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
*/
set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
- for (i = 0; i < num_pages; i++) {
- int ret;
+ ret = alloc_eb_folio_array(new, false);
+ if (ret)
+ goto release_eb;
- p = alloc_page(GFP_NOFS);
- if (!p) {
- btrfs_release_extent_buffer(new);
- return NULL;
- }
- ret = attach_extent_buffer_page(new, p, NULL);
- if (ret < 0) {
- put_page(p);
- btrfs_release_extent_buffer(new);
- return NULL;
- }
- WARN_ON(PageDirty(p));
- new->pages[i] = p;
- copy_page(page_address(p), page_address(src->pages[i]));
+ ASSERT(num_extent_folios(src) == num_extent_folios(new),
+ "%d != %d", num_extent_folios(src), num_extent_folios(new));
+ /* Explicitly use the cached num_extent value from now on. */
+ num_folios = num_extent_folios(src);
+ for (int i = 0; i < num_folios; i++) {
+ struct folio *folio = new->folios[i];
+
+ ret = attach_extent_buffer_folio(new, folio, NULL);
+ if (ret < 0)
+ goto cleanup_folios;
+ WARN_ON(folio_test_dirty(folio));
}
+ for (int i = 0; i < num_folios; i++)
+ folio_put(new->folios[i]);
+
+ copy_extent_buffer_full(new, src);
set_extent_buffer_uptodate(new);
return new;
+
+cleanup_folios:
+ cleanup_extent_buffer_folios(new);
+release_eb:
+ btrfs_release_extent_buffer(new);
+ return NULL;
}
-struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start, unsigned long len)
+struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
+ u64 start)
{
struct extent_buffer *eb;
- int num_pages;
- int i;
+ int ret;
- eb = __alloc_extent_buffer(fs_info, start, len);
+ eb = __alloc_extent_buffer(fs_info, start);
if (!eb)
return NULL;
- num_pages = num_extent_pages(eb);
- for (i = 0; i < num_pages; i++) {
- int ret;
+ ret = alloc_eb_folio_array(eb, false);
+ if (ret)
+ goto release_eb;
- eb->pages[i] = alloc_page(GFP_NOFS);
- if (!eb->pages[i])
- goto err;
- ret = attach_extent_buffer_page(eb, eb->pages[i], NULL);
+ for (int i = 0; i < num_extent_folios(eb); i++) {
+ ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
if (ret < 0)
- goto err;
+ goto cleanup_folios;
}
+ for (int i = 0; i < num_extent_folios(eb); i++)
+ folio_put(eb->folios[i]);
+
set_extent_buffer_uptodate(eb);
btrfs_set_header_nritems(eb, 0);
set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
return eb;
-err:
- for (; i > 0; i--) {
- detach_extent_buffer_page(eb, eb->pages[i - 1]);
- __free_page(eb->pages[i - 1]);
- }
- __free_extent_buffer(eb);
- return NULL;
-}
-struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start)
-{
- return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
+cleanup_folios:
+ cleanup_extent_buffer_folios(eb);
+release_eb:
+ btrfs_release_extent_buffer(eb);
+ return NULL;
}
static void check_buffer_tree_ref(struct extent_buffer *eb)
{
int refs;
/*
- * The TREE_REF bit is first set when the extent_buffer is added
- * to the radix tree. It is also reset, if unset, when a new reference
- * is created by find_extent_buffer.
+ * The TREE_REF bit is first set when the extent_buffer is added to the
+ * xarray. It is also reset, if unset, when a new reference is created
+ * by find_extent_buffer.
*
* It is only cleared in two cases: freeing the last non-tree
* reference to the extent_buffer when its STALE bit is set or
- * calling releasepage when the tree reference is the only reference.
+ * calling release_folio when the tree reference is the only reference.
*
* In both cases, care is taken to ensure that the extent_buffer's
- * pages are not under io. However, releasepage can be concurrently
+ * pages are not under io. However, release_folio can be concurrently
* called with creating new references, which is prone to race
* conditions between the calls to check_buffer_tree_ref in those
* codepaths and clearing TREE_REF in try_release_extent_buffer.
*
- * The actual lifetime of the extent_buffer in the radix tree is
- * adequately protected by the refcount, but the TREE_REF bit and
- * its corresponding reference are not. To protect against this
- * class of races, we call check_buffer_tree_ref from the codepaths
- * which trigger io after they set eb->io_pages. Note that once io is
- * initiated, TREE_REF can no longer be cleared, so that is the
- * moment at which any such race is best fixed.
+ * The actual lifetime of the extent_buffer in the xarray is adequately
+ * protected by the refcount, but the TREE_REF bit and its corresponding
+ * reference are not. To protect against this class of races, we call
+ * check_buffer_tree_ref() from the code paths which trigger io. Note that
+ * once io is initiated, TREE_REF can no longer be cleared, so that is
+ * the moment at which any such race is best fixed.
*/
- refs = atomic_read(&eb->refs);
+ refs = refcount_read(&eb->refs);
if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
return;
spin_lock(&eb->refs_lock);
if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
- atomic_inc(&eb->refs);
+ refcount_inc(&eb->refs);
spin_unlock(&eb->refs_lock);
}
-static void mark_extent_buffer_accessed(struct extent_buffer *eb,
- struct page *accessed)
+static void mark_extent_buffer_accessed(struct extent_buffer *eb)
{
- int num_pages, i;
-
check_buffer_tree_ref(eb);
- num_pages = num_extent_pages(eb);
- for (i = 0; i < num_pages; i++) {
- struct page *p = eb->pages[i];
-
- if (p != accessed)
- mark_page_accessed(p);
- }
+ for (int i = 0; i < num_extent_folios(eb); i++)
+ folio_mark_accessed(eb->folios[i]);
}
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
@@ -6057,14 +3170,14 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
spin_lock(&eb->refs_lock);
spin_unlock(&eb->refs_lock);
}
- mark_extent_buffer_accessed(eb, NULL);
+ mark_extent_buffer_accessed(eb);
return eb;
}
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start)
{
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct extent_buffer *eb, *exists = NULL;
int ret;
@@ -6076,83 +3189,202 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
return ERR_PTR(-ENOMEM);
eb->fs_info = fs_info;
again:
- ret = radix_tree_preload(GFP_NOFS);
- if (ret) {
- exists = ERR_PTR(ret);
- goto free_eb;
+ xa_lock_irq(&fs_info->buffer_tree);
+ exists = __xa_cmpxchg(&fs_info->buffer_tree, start >> fs_info->nodesize_bits,
+ NULL, eb, GFP_NOFS);
+ if (xa_is_err(exists)) {
+ ret = xa_err(exists);
+ xa_unlock_irq(&fs_info->buffer_tree);
+ btrfs_release_extent_buffer(eb);
+ return ERR_PTR(ret);
}
- spin_lock(&fs_info->buffer_lock);
- ret = radix_tree_insert(&fs_info->buffer_radix,
- start >> fs_info->sectorsize_bits, eb);
- spin_unlock(&fs_info->buffer_lock);
- radix_tree_preload_end();
- if (ret == -EEXIST) {
- exists = find_extent_buffer(fs_info, start);
- if (exists)
- goto free_eb;
- else
+ if (exists) {
+ if (!refcount_inc_not_zero(&exists->refs)) {
+ /* The extent buffer is being freed, retry. */
+ xa_unlock_irq(&fs_info->buffer_tree);
goto again;
+ }
+ xa_unlock_irq(&fs_info->buffer_tree);
+ btrfs_release_extent_buffer(eb);
+ return exists;
}
+ xa_unlock_irq(&fs_info->buffer_tree);
check_buffer_tree_ref(eb);
- set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
return eb;
-free_eb:
- btrfs_release_extent_buffer(eb);
- return exists;
-}
+#else
+ /* Stub to avoid linker error when compiled with optimizations turned off. */
+ return NULL;
#endif
+}
-static struct extent_buffer *grab_extent_buffer(
- struct btrfs_fs_info *fs_info, struct page *page)
+static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info,
+ struct folio *folio)
{
struct extent_buffer *exists;
+ lockdep_assert_held(&folio->mapping->i_private_lock);
+
/*
- * For subpage case, we completely rely on radix tree to ensure we
- * don't try to insert two ebs for the same bytenr. So here we always
- * return NULL and just continue.
+ * For subpage case, we completely rely on xarray to ensure we don't try
+ * to insert two ebs for the same bytenr. So here we always return NULL
+ * and just continue.
*/
- if (fs_info->sectorsize < PAGE_SIZE)
+ if (btrfs_meta_is_subpage(fs_info))
return NULL;
/* Page not yet attached to an extent buffer */
- if (!PagePrivate(page))
+ if (!folio_test_private(folio))
return NULL;
/*
- * We could have already allocated an eb for this page and attached one
+ * We could have already allocated an eb for this folio and attached one
* so lets see if we can get a ref on the existing eb, and if we can we
* know it's good and we can just return that one, else we know we can
- * just overwrite page->private.
+ * just overwrite folio private.
*/
- exists = (struct extent_buffer *)page->private;
- if (atomic_inc_not_zero(&exists->refs))
+ exists = folio_get_private(folio);
+ if (refcount_inc_not_zero(&exists->refs))
return exists;
- WARN_ON(PageDirty(page));
- detach_page_private(page);
+ WARN_ON(folio_test_dirty(folio));
+ folio_detach_private(folio);
return NULL;
}
+/*
+ * Validate alignment constraints of eb at logical address @start.
+ */
+static bool check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
+{
+ const u32 nodesize = fs_info->nodesize;
+
+ if (unlikely(!IS_ALIGNED(start, fs_info->sectorsize))) {
+ btrfs_err(fs_info, "bad tree block start %llu", start);
+ return true;
+ }
+
+ if (unlikely(nodesize < PAGE_SIZE && !IS_ALIGNED(start, nodesize))) {
+ btrfs_err(fs_info,
+ "tree block is not nodesize aligned, start %llu nodesize %u",
+ start, nodesize);
+ return true;
+ }
+ if (unlikely(nodesize >= PAGE_SIZE && !PAGE_ALIGNED(start))) {
+ btrfs_err(fs_info,
+ "tree block is not page aligned, start %llu nodesize %u",
+ start, nodesize);
+ return true;
+ }
+ if (unlikely(!IS_ALIGNED(start, nodesize) &&
+ !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags))) {
+ btrfs_warn(fs_info,
+"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
+ start, nodesize);
+ }
+ return false;
+}
+
+/*
+ * Return 0 if eb->folios[i] is attached to btree inode successfully.
+ * Return >0 if there is already another extent buffer for the range,
+ * and @found_eb_ret would be updated.
+ * Return -EAGAIN if the filemap has an existing folio but with different size
+ * than @eb.
+ * The caller needs to free the existing folios and retry using the same order.
+ */
+static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
+ struct btrfs_folio_state *prealloc,
+ struct extent_buffer **found_eb_ret)
+{
+
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+ struct address_space *mapping = fs_info->btree_inode->i_mapping;
+ const pgoff_t index = eb->start >> PAGE_SHIFT;
+ struct folio *existing_folio;
+ int ret;
+
+ ASSERT(found_eb_ret);
+
+ /* Caller should ensure the folio exists. */
+ ASSERT(eb->folios[i]);
+
+retry:
+ existing_folio = NULL;
+ ret = filemap_add_folio(mapping, eb->folios[i], index + i,
+ GFP_NOFS | __GFP_NOFAIL);
+ if (!ret)
+ goto finish;
+
+ existing_folio = filemap_lock_folio(mapping, index + i);
+ /* The page cache only exists for a very short time, just retry. */
+ if (IS_ERR(existing_folio))
+ goto retry;
+
+ /* For now, we should only have single-page folios for btree inode. */
+ ASSERT(folio_nr_pages(existing_folio) == 1);
+
+ if (folio_size(existing_folio) != eb->folio_size) {
+ folio_unlock(existing_folio);
+ folio_put(existing_folio);
+ return -EAGAIN;
+ }
+
+finish:
+ spin_lock(&mapping->i_private_lock);
+ if (existing_folio && btrfs_meta_is_subpage(fs_info)) {
+ /* We're going to reuse the existing page, can drop our folio now. */
+ __free_page(folio_page(eb->folios[i], 0));
+ eb->folios[i] = existing_folio;
+ } else if (existing_folio) {
+ struct extent_buffer *existing_eb;
+
+ existing_eb = grab_extent_buffer(fs_info, existing_folio);
+ if (existing_eb) {
+ /* The extent buffer still exists, we can use it directly. */
+ *found_eb_ret = existing_eb;
+ spin_unlock(&mapping->i_private_lock);
+ folio_unlock(existing_folio);
+ folio_put(existing_folio);
+ return 1;
+ }
+ /* The extent buffer no longer exists, we can reuse the folio. */
+ __free_page(folio_page(eb->folios[i], 0));
+ eb->folios[i] = existing_folio;
+ }
+ eb->folio_size = folio_size(eb->folios[i]);
+ eb->folio_shift = folio_shift(eb->folios[i]);
+ /* Should not fail, as we have preallocated the memory. */
+ ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
+ ASSERT(!ret);
+ /*
+ * To inform we have an extra eb under allocation, so that
+ * detach_extent_buffer_page() won't release the folio private when the
+ * eb hasn't been inserted into the xarray yet.
+ *
+ * The ref will be decreased when the eb releases the page, in
+ * detach_extent_buffer_page(). Thus needs no special handling in the
+ * error path.
+ */
+ btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
+ spin_unlock(&mapping->i_private_lock);
+ return 0;
+}
+
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u64 owner_root, int level)
{
- unsigned long len = fs_info->nodesize;
- int num_pages;
- int i;
- unsigned long index = start >> PAGE_SHIFT;
+ int attached = 0;
struct extent_buffer *eb;
- struct extent_buffer *exists = NULL;
- struct page *p;
- struct address_space *mapping = fs_info->btree_inode->i_mapping;
+ struct extent_buffer *existing_eb = NULL;
+ struct btrfs_folio_state *prealloc = NULL;
+ u64 lockdep_owner = owner_root;
+ bool page_contig = true;
int uptodate = 1;
int ret;
- if (!IS_ALIGNED(start, fs_info->sectorsize)) {
- btrfs_err(fs_info, "bad tree block start %llu", start);
+ if (check_eb_alignment(fs_info, start))
return ERR_PTR(-EINVAL);
- }
#if BITS_PER_LONG == 32
if (start >= MAX_LFS_FILESIZE) {
@@ -6165,135 +3397,184 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
btrfs_warn_32bit_limit(fs_info);
#endif
- if (fs_info->sectorsize < PAGE_SIZE &&
- offset_in_page(start) + len > PAGE_SIZE) {
- btrfs_err(fs_info,
- "tree block crosses page boundary, start %llu nodesize %lu",
- start, len);
- return ERR_PTR(-EINVAL);
- }
-
eb = find_extent_buffer(fs_info, start);
if (eb)
return eb;
- eb = __alloc_extent_buffer(fs_info, start, len);
+ eb = __alloc_extent_buffer(fs_info, start);
if (!eb)
return ERR_PTR(-ENOMEM);
- btrfs_set_buffer_lockdep_class(owner_root, eb, level);
- num_pages = num_extent_pages(eb);
- for (i = 0; i < num_pages; i++, index++) {
- struct btrfs_subpage *prealloc = NULL;
+ /*
+ * The reloc trees are just snapshots, so we need them to appear to be
+ * just like any other fs tree WRT lockdep.
+ */
+ if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
+ lockdep_owner = BTRFS_FS_TREE_OBJECTID;
+
+ btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
- p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
- if (!p) {
- exists = ERR_PTR(-ENOMEM);
- goto free_eb;
+ /*
+ * Preallocate folio private for subpage case, so that we won't
+ * allocate memory with i_private_lock nor page lock hold.
+ *
+ * The memory will be freed by attach_extent_buffer_page() or freed
+ * manually if we exit earlier.
+ */
+ if (btrfs_meta_is_subpage(fs_info)) {
+ prealloc = btrfs_alloc_folio_state(fs_info, PAGE_SIZE, BTRFS_SUBPAGE_METADATA);
+ if (IS_ERR(prealloc)) {
+ ret = PTR_ERR(prealloc);
+ goto out;
+ }
+ }
+
+reallocate:
+ /* Allocate all pages first. */
+ ret = alloc_eb_folio_array(eb, true);
+ if (ret < 0) {
+ btrfs_free_folio_state(prealloc);
+ goto out;
+ }
+
+ /* Attach all pages to the filemap. */
+ for (int i = 0; i < num_extent_folios(eb); i++) {
+ struct folio *folio;
+
+ ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
+ if (ret > 0) {
+ ASSERT(existing_eb);
+ goto out;
}
/*
- * Preallocate page->private for subpage case, so that we won't
- * allocate memory with private_lock hold. The memory will be
- * freed by attach_extent_buffer_page() or freed manually if
- * we exit earlier.
+ * TODO: Special handling for a corner case where the order of
+ * folios mismatch between the new eb and filemap.
+ *
+ * This happens when:
+ *
+ * - the new eb is using higher order folio
*
- * Although we have ensured one subpage eb can only have one
- * page, but it may change in the future for 16K page size
- * support, so we still preallocate the memory in the loop.
+ * - the filemap is still using 0-order folios for the range
+ * This can happen at the previous eb allocation, and we don't
+ * have higher order folio for the call.
+ *
+ * - the existing eb has already been freed
+ *
+ * In this case, we have to free the existing folios first, and
+ * re-allocate using the same order.
+ * Thankfully this is not going to happen yet, as we're still
+ * using 0-order folios.
*/
- if (fs_info->sectorsize < PAGE_SIZE) {
- prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
- if (IS_ERR(prealloc)) {
- ret = PTR_ERR(prealloc);
- unlock_page(p);
- put_page(p);
- exists = ERR_PTR(ret);
- goto free_eb;
- }
+ if (unlikely(ret == -EAGAIN)) {
+ DEBUG_WARN("folio order mismatch between new eb and filemap");
+ goto reallocate;
}
+ attached++;
- spin_lock(&mapping->private_lock);
- exists = grab_extent_buffer(fs_info, p);
- if (exists) {
- spin_unlock(&mapping->private_lock);
- unlock_page(p);
- put_page(p);
- mark_extent_buffer_accessed(exists, p);
- btrfs_free_subpage(prealloc);
- goto free_eb;
- }
- /* Should not fail, as we have preallocated the memory */
- ret = attach_extent_buffer_page(eb, p, prealloc);
- ASSERT(!ret);
/*
- * To inform we have extra eb under allocation, so that
- * detach_extent_buffer_page() won't release the page private
- * when the eb hasn't yet been inserted into radix tree.
- *
- * The ref will be decreased when the eb released the page, in
- * detach_extent_buffer_page().
- * Thus needs no special handling in error path.
+ * Only after attach_eb_folio_to_filemap(), eb->folios[] is
+ * reliable, as we may choose to reuse the existing page cache
+ * and free the allocated page.
+ */
+ folio = eb->folios[i];
+ WARN_ON(btrfs_meta_folio_test_dirty(folio, eb));
+
+ /*
+ * Check if the current page is physically contiguous with previous eb
+ * page.
+ * At this stage, either we allocated a large folio, thus @i
+ * would only be 0, or we fall back to per-page allocation.
*/
- btrfs_page_inc_eb_refs(fs_info, p);
- spin_unlock(&mapping->private_lock);
+ if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
+ page_contig = false;
- WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
- eb->pages[i] = p;
- if (!PageUptodate(p))
+ if (!btrfs_meta_folio_test_uptodate(folio, eb))
uptodate = 0;
/*
* We can't unlock the pages just yet since the extent buffer
- * hasn't been properly inserted in the radix tree, this
- * opens a race with btree_releasepage which can free a page
- * while we are still filling in all pages for the buffer and
- * we could crash.
+ * hasn't been properly inserted into the xarray, this opens a
+ * race with btree_release_folio() which can free a page while we
+ * are still filling in all pages for the buffer and we could crash.
*/
}
if (uptodate)
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
+ /* All pages are physically contiguous, can skip cross page handling. */
+ if (page_contig)
+ eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
again:
- ret = radix_tree_preload(GFP_NOFS);
- if (ret) {
- exists = ERR_PTR(ret);
- goto free_eb;
+ xa_lock_irq(&fs_info->buffer_tree);
+ existing_eb = __xa_cmpxchg(&fs_info->buffer_tree,
+ start >> fs_info->nodesize_bits, NULL, eb,
+ GFP_NOFS);
+ if (xa_is_err(existing_eb)) {
+ ret = xa_err(existing_eb);
+ xa_unlock_irq(&fs_info->buffer_tree);
+ goto out;
}
-
- spin_lock(&fs_info->buffer_lock);
- ret = radix_tree_insert(&fs_info->buffer_radix,
- start >> fs_info->sectorsize_bits, eb);
- spin_unlock(&fs_info->buffer_lock);
- radix_tree_preload_end();
- if (ret == -EEXIST) {
- exists = find_extent_buffer(fs_info, start);
- if (exists)
- goto free_eb;
- else
+ if (existing_eb) {
+ if (!refcount_inc_not_zero(&existing_eb->refs)) {
+ xa_unlock_irq(&fs_info->buffer_tree);
goto again;
+ }
+ xa_unlock_irq(&fs_info->buffer_tree);
+ goto out;
}
+ xa_unlock_irq(&fs_info->buffer_tree);
+
/* add one reference for the tree */
check_buffer_tree_ref(eb);
- set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
/*
* Now it's safe to unlock the pages because any calls to
- * btree_releasepage will correctly detect that a page belongs to a
+ * btree_release_folio will correctly detect that a page belongs to a
* live buffer and won't free them prematurely.
*/
- for (i = 0; i < num_pages; i++)
- unlock_page(eb->pages[i]);
+ for (int i = 0; i < num_extent_folios(eb); i++) {
+ folio_unlock(eb->folios[i]);
+ /*
+ * A folio that has been added to an address_space mapping
+ * should not continue holding the refcount from its original
+ * allocation indefinitely.
+ */
+ folio_put(eb->folios[i]);
+ }
return eb;
-free_eb:
- WARN_ON(!atomic_dec_and_test(&eb->refs));
- for (i = 0; i < num_pages; i++) {
- if (eb->pages[i])
- unlock_page(eb->pages[i]);
- }
+out:
+ WARN_ON(!refcount_dec_and_test(&eb->refs));
+
+ /*
+ * Any attached folios need to be detached before we unlock them. This
+ * is because when we're inserting our new folios into the mapping, and
+ * then attaching our eb to that folio. If we fail to insert our folio
+ * we'll lookup the folio for that index, and grab that EB. We do not
+ * want that to grab this eb, as we're getting ready to free it. So we
+ * have to detach it first and then unlock it.
+ *
+ * Note: the bounds is num_extent_pages() as we need to go through all slots.
+ */
+ for (int i = 0; i < num_extent_pages(eb); i++) {
+ struct folio *folio = eb->folios[i];
+
+ if (i < attached) {
+ ASSERT(folio);
+ detach_extent_buffer_folio(eb, folio);
+ folio_unlock(folio);
+ } else if (!folio) {
+ continue;
+ }
+ folio_put(folio);
+ eb->folios[i] = NULL;
+ }
btrfs_release_extent_buffer(eb);
- return exists;
+ if (ret < 0)
+ return ERR_PTR(ret);
+ ASSERT(existing_eb);
+ return existing_eb;
}
static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
@@ -6301,7 +3582,7 @@ static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
struct extent_buffer *eb =
container_of(head, struct extent_buffer, rcu_head);
- __free_extent_buffer(eb);
+ kmem_cache_free(extent_buffer_cache, eb);
}
static int release_extent_buffer(struct extent_buffer *eb)
@@ -6309,27 +3590,35 @@ static int release_extent_buffer(struct extent_buffer *eb)
{
lockdep_assert_held(&eb->refs_lock);
- WARN_ON(atomic_read(&eb->refs) == 0);
- if (atomic_dec_and_test(&eb->refs)) {
- if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
- struct btrfs_fs_info *fs_info = eb->fs_info;
+ if (refcount_dec_and_test(&eb->refs)) {
+ struct btrfs_fs_info *fs_info = eb->fs_info;
- spin_unlock(&eb->refs_lock);
+ spin_unlock(&eb->refs_lock);
- spin_lock(&fs_info->buffer_lock);
- radix_tree_delete(&fs_info->buffer_radix,
- eb->start >> fs_info->sectorsize_bits);
- spin_unlock(&fs_info->buffer_lock);
- } else {
- spin_unlock(&eb->refs_lock);
- }
+ /*
+ * We're erasing, theoretically there will be no allocations, so
+ * just use GFP_ATOMIC.
+ *
+ * We use cmpxchg instead of erase because we do not know if
+ * this eb is actually in the tree or not, we could be cleaning
+ * up an eb that we allocated but never inserted into the tree.
+ * Thus use cmpxchg to remove it from the tree if it is there,
+ * or leave the other entry if this isn't in the tree.
+ *
+ * The documentation says that putting a NULL value is the same
+ * as erase as long as XA_FLAGS_ALLOC is not set, which it isn't
+ * in this case.
+ */
+ xa_cmpxchg_irq(&fs_info->buffer_tree,
+ eb->start >> fs_info->nodesize_bits, eb, NULL,
+ GFP_ATOMIC);
- btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
- /* Should be safe to release our pages at this point */
- btrfs_release_extent_buffer_pages(eb);
+ btrfs_leak_debug_del_eb(eb);
+ /* Should be safe to release folios at this point. */
+ btrfs_release_extent_buffer_folios(eb);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
- __free_extent_buffer(eb);
+ kmem_cache_free(extent_buffer_cache, eb);
return 1;
}
#endif
@@ -6344,27 +3633,29 @@ static int release_extent_buffer(struct extent_buffer *eb)
void free_extent_buffer(struct extent_buffer *eb)
{
int refs;
- int old;
if (!eb)
return;
+ refs = refcount_read(&eb->refs);
while (1) {
- refs = atomic_read(&eb->refs);
- if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
- || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
- refs == 1))
+ if (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)) {
+ if (refs == 1)
+ break;
+ } else if (refs <= 3) {
break;
- old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
- if (old == refs)
+ }
+
+ /* Optimization to avoid locking eb->refs_lock. */
+ if (atomic_try_cmpxchg(&eb->refs.refs, &refs, refs - 1))
return;
}
spin_lock(&eb->refs_lock);
- if (atomic_read(&eb->refs) == 2 &&
+ if (refcount_read(&eb->refs) == 2 &&
test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
!extent_buffer_under_io(eb) &&
test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
- atomic_dec(&eb->refs);
+ refcount_dec(&eb->refs);
/*
* I know this is terrible, but it's temporary until we stop tracking
@@ -6381,335 +3672,244 @@ void free_extent_buffer_stale(struct extent_buffer *eb)
spin_lock(&eb->refs_lock);
set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
- if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
+ if (refcount_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
- atomic_dec(&eb->refs);
+ refcount_dec(&eb->refs);
release_extent_buffer(eb);
}
-static void btree_clear_page_dirty(struct page *page)
+static void btree_clear_folio_dirty_tag(struct folio *folio)
{
- ASSERT(PageDirty(page));
- ASSERT(PageLocked(page));
- clear_page_dirty_for_io(page);
- xa_lock_irq(&page->mapping->i_pages);
- if (!PageDirty(page))
- __xa_clear_mark(&page->mapping->i_pages,
- page_index(page), PAGECACHE_TAG_DIRTY);
- xa_unlock_irq(&page->mapping->i_pages);
+ ASSERT(!folio_test_dirty(folio));
+ ASSERT(folio_test_locked(folio));
+ xa_lock_irq(&folio->mapping->i_pages);
+ if (!folio_test_dirty(folio))
+ __xa_clear_mark(&folio->mapping->i_pages, folio->index,
+ PAGECACHE_TAG_DIRTY);
+ xa_unlock_irq(&folio->mapping->i_pages);
}
-static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
+void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
+ struct extent_buffer *eb)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
- struct page *page = eb->pages[0];
- bool last;
-
- /* btree_clear_page_dirty() needs page locked */
- lock_page(page);
- last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
- eb->len);
- if (last)
- btree_clear_page_dirty(page);
- unlock_page(page);
- WARN_ON(atomic_read(&eb->refs) == 0);
-}
-void clear_extent_buffer_dirty(const struct extent_buffer *eb)
-{
- int i;
- int num_pages;
- struct page *page;
+ btrfs_assert_tree_write_locked(eb);
+
+ if (trans && btrfs_header_generation(eb) != trans->transid)
+ return;
+
+ /*
+ * Instead of clearing the dirty flag off of the buffer, mark it as
+ * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
+ * write-ordering in zoned mode, without the need to later re-dirty
+ * the extent_buffer.
+ *
+ * The actual zeroout of the buffer will happen later in
+ * btree_csum_one_bio.
+ */
+ if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
+ set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
+ return;
+ }
- if (eb->fs_info->sectorsize < PAGE_SIZE)
- return clear_subpage_extent_buffer_dirty(eb);
+ if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
+ return;
+
+ buffer_tree_clear_mark(eb, PAGECACHE_TAG_DIRTY);
+ percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
+ fs_info->dirty_metadata_batch);
- num_pages = num_extent_pages(eb);
+ for (int i = 0; i < num_extent_folios(eb); i++) {
+ struct folio *folio = eb->folios[i];
+ bool last;
- for (i = 0; i < num_pages; i++) {
- page = eb->pages[i];
- if (!PageDirty(page))
+ if (!folio_test_dirty(folio))
continue;
- lock_page(page);
- btree_clear_page_dirty(page);
- ClearPageError(page);
- unlock_page(page);
+ folio_lock(folio);
+ last = btrfs_meta_folio_clear_and_test_dirty(folio, eb);
+ if (last)
+ btree_clear_folio_dirty_tag(folio);
+ folio_unlock(folio);
}
- WARN_ON(atomic_read(&eb->refs) == 0);
+ WARN_ON(refcount_read(&eb->refs) == 0);
}
-bool set_extent_buffer_dirty(struct extent_buffer *eb)
+void set_extent_buffer_dirty(struct extent_buffer *eb)
{
- int i;
- int num_pages;
bool was_dirty;
check_buffer_tree_ref(eb);
was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
- num_pages = num_extent_pages(eb);
- WARN_ON(atomic_read(&eb->refs) == 0);
+ WARN_ON(refcount_read(&eb->refs) == 0);
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
+ WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
if (!was_dirty) {
- bool subpage = eb->fs_info->sectorsize < PAGE_SIZE;
+ bool subpage = btrfs_meta_is_subpage(eb->fs_info);
/*
* For subpage case, we can have other extent buffers in the
- * same page, and in clear_subpage_extent_buffer_dirty() we
+ * same page, and in clear_extent_buffer_dirty() we
* have to clear page dirty without subpage lock held.
* This can cause race where our page gets dirty cleared after
* we just set it.
*
- * Thankfully, clear_subpage_extent_buffer_dirty() has locked
+ * Thankfully, clear_extent_buffer_dirty() has locked
* its page for other reasons, we can use page lock to prevent
* the above race.
*/
if (subpage)
- lock_page(eb->pages[0]);
- for (i = 0; i < num_pages; i++)
- btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
- eb->start, eb->len);
+ folio_lock(eb->folios[0]);
+ for (int i = 0; i < num_extent_folios(eb); i++)
+ btrfs_meta_folio_set_dirty(eb->folios[i], eb);
+ buffer_tree_set_mark(eb, PAGECACHE_TAG_DIRTY);
if (subpage)
- unlock_page(eb->pages[0]);
+ folio_unlock(eb->folios[0]);
+ percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
+ eb->len,
+ eb->fs_info->dirty_metadata_batch);
}
#ifdef CONFIG_BTRFS_DEBUG
- for (i = 0; i < num_pages; i++)
- ASSERT(PageDirty(eb->pages[i]));
+ for (int i = 0; i < num_extent_folios(eb); i++)
+ ASSERT(folio_test_dirty(eb->folios[i]));
#endif
-
- return was_dirty;
}
void clear_extent_buffer_uptodate(struct extent_buffer *eb)
{
- struct btrfs_fs_info *fs_info = eb->fs_info;
- struct page *page;
- int num_pages;
- int i;
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- num_pages = num_extent_pages(eb);
- for (i = 0; i < num_pages; i++) {
- page = eb->pages[i];
- if (page)
- btrfs_page_clear_uptodate(fs_info, page,
- eb->start, eb->len);
+ for (int i = 0; i < num_extent_folios(eb); i++) {
+ struct folio *folio = eb->folios[i];
+
+ if (!folio)
+ continue;
+
+ btrfs_meta_folio_clear_uptodate(folio, eb);
}
}
void set_extent_buffer_uptodate(struct extent_buffer *eb)
{
- struct btrfs_fs_info *fs_info = eb->fs_info;
- struct page *page;
- int num_pages;
- int i;
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- num_pages = num_extent_pages(eb);
- for (i = 0; i < num_pages; i++) {
- page = eb->pages[i];
- btrfs_page_set_uptodate(fs_info, page, eb->start, eb->len);
- }
+ for (int i = 0; i < num_extent_folios(eb); i++)
+ btrfs_meta_folio_set_uptodate(eb->folios[i], eb);
}
-static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
- int mirror_num)
+static void clear_extent_buffer_reading(struct extent_buffer *eb)
{
- struct btrfs_fs_info *fs_info = eb->fs_info;
- struct extent_io_tree *io_tree;
- struct page *page = eb->pages[0];
- struct btrfs_bio_ctrl bio_ctrl = { 0 };
- int ret = 0;
+ clear_and_wake_up_bit(EXTENT_BUFFER_READING, &eb->bflags);
+}
- ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
- ASSERT(PagePrivate(page));
- io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
+static void end_bbio_meta_read(struct btrfs_bio *bbio)
+{
+ struct extent_buffer *eb = bbio->private;
+ bool uptodate = !bbio->bio.bi_status;
- if (wait == WAIT_NONE) {
- if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1))
- return -EAGAIN;
- } else {
- ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1);
- if (ret < 0)
- return ret;
- }
+ /*
+ * If the extent buffer is marked UPTODATE before the read operation
+ * completes, other calls to read_extent_buffer_pages() will return
+ * early without waiting for the read to finish, causing data races.
+ */
+ WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
- ret = 0;
- if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) ||
- PageUptodate(page) ||
- btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) {
- set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- unlock_extent(io_tree, eb->start, eb->start + eb->len - 1);
- return ret;
- }
+ eb->read_mirror = bbio->mirror_num;
- clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
- eb->read_mirror = 0;
- atomic_set(&eb->io_pages, 1);
- check_buffer_tree_ref(eb);
- btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);
-
- btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len);
- ret = submit_extent_page(REQ_OP_READ | REQ_META, NULL, &bio_ctrl,
- page, eb->start, eb->len,
- eb->start - page_offset(page),
- end_bio_extent_readpage, mirror_num, 0,
- true);
- if (ret) {
- /*
- * In the endio function, if we hit something wrong we will
- * increase the io_pages, so here we need to decrease it for
- * error path.
- */
- atomic_dec(&eb->io_pages);
- }
- if (bio_ctrl.bio) {
- int tmp;
+ if (uptodate &&
+ btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
+ uptodate = false;
- tmp = submit_one_bio(bio_ctrl.bio, mirror_num, 0);
- bio_ctrl.bio = NULL;
- if (tmp < 0)
- return tmp;
- }
- if (ret || wait != WAIT_COMPLETE)
- return ret;
+ if (uptodate)
+ set_extent_buffer_uptodate(eb);
+ else
+ clear_extent_buffer_uptodate(eb);
- wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1, EXTENT_LOCKED);
- if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
- ret = -EIO;
- return ret;
+ clear_extent_buffer_reading(eb);
+ free_extent_buffer(eb);
+
+ bio_put(&bbio->bio);
}
-int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
+int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
+ const struct btrfs_tree_parent_check *check)
{
- int i;
- struct page *page;
- int err;
- int ret = 0;
- int locked_pages = 0;
- int all_uptodate = 1;
- int num_pages;
- unsigned long num_reads = 0;
- struct btrfs_bio_ctrl bio_ctrl = { 0 };
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+ struct btrfs_bio *bbio;
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
- if (eb->fs_info->sectorsize < PAGE_SIZE)
- return read_extent_buffer_subpage(eb, wait, mirror_num);
-
- num_pages = num_extent_pages(eb);
- for (i = 0; i < num_pages; i++) {
- page = eb->pages[i];
- if (wait == WAIT_NONE) {
- /*
- * WAIT_NONE is only utilized by readahead. If we can't
- * acquire the lock atomically it means either the eb
- * is being read out or under modification.
- * Either way the eb will be or has been cached,
- * readahead can exit safely.
- */
- if (!trylock_page(page))
- goto unlock_exit;
- } else {
- lock_page(page);
- }
- locked_pages++;
- }
/*
- * We need to firstly lock all pages to make sure that
- * the uptodate bit of our pages won't be affected by
- * clear_extent_buffer_uptodate().
+ * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
+ * operation, which could potentially still be in flight. In this case
+ * we simply want to return an error.
*/
- for (i = 0; i < num_pages; i++) {
- page = eb->pages[i];
- if (!PageUptodate(page)) {
- num_reads++;
- all_uptodate = 0;
- }
- }
+ if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
+ return -EIO;
- if (all_uptodate) {
- set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- goto unlock_exit;
- }
+ /* Someone else is already reading the buffer, just wait for it. */
+ if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
+ return 0;
- clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
- eb->read_mirror = 0;
- atomic_set(&eb->io_pages, num_reads);
/*
- * It is possible for releasepage to clear the TREE_REF bit before we
- * set io_pages. See check_buffer_tree_ref for a more detailed comment.
+ * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
+ * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
+ * started and finished reading the same eb. In this case, UPTODATE
+ * will now be set, and we shouldn't read it in again.
*/
- check_buffer_tree_ref(eb);
- for (i = 0; i < num_pages; i++) {
- page = eb->pages[i];
-
- if (!PageUptodate(page)) {
- if (ret) {
- atomic_dec(&eb->io_pages);
- unlock_page(page);
- continue;
- }
-
- ClearPageError(page);
- err = submit_extent_page(REQ_OP_READ | REQ_META, NULL,
- &bio_ctrl, page, page_offset(page),
- PAGE_SIZE, 0, end_bio_extent_readpage,
- mirror_num, 0, false);
- if (err) {
- /*
- * We failed to submit the bio so it's the
- * caller's responsibility to perform cleanup
- * i.e unlock page/set error bit.
- */
- ret = err;
- SetPageError(page);
- unlock_page(page);
- atomic_dec(&eb->io_pages);
- }
- } else {
- unlock_page(page);
- }
- }
-
- if (bio_ctrl.bio) {
- err = submit_one_bio(bio_ctrl.bio, mirror_num, bio_ctrl.bio_flags);
- bio_ctrl.bio = NULL;
- if (err)
- return err;
+ if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
+ clear_extent_buffer_reading(eb);
+ return 0;
}
- if (ret || wait != WAIT_COMPLETE)
- return ret;
+ eb->read_mirror = 0;
+ check_buffer_tree_ref(eb);
+ refcount_inc(&eb->refs);
+
+ bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
+ REQ_OP_READ | REQ_META, BTRFS_I(fs_info->btree_inode),
+ eb->start, end_bbio_meta_read, eb);
+ bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
+ memcpy(&bbio->parent_check, check, sizeof(*check));
+ for (int i = 0; i < num_extent_folios(eb); i++) {
+ struct folio *folio = eb->folios[i];
+ u64 range_start = max_t(u64, eb->start, folio_pos(folio));
+ u32 range_len = min_t(u64, folio_next_pos(folio),
+ eb->start + eb->len) - range_start;
+
+ bio_add_folio_nofail(&bbio->bio, folio, range_len,
+ offset_in_folio(folio, range_start));
+ }
+ btrfs_submit_bbio(bbio, mirror_num);
+ return 0;
+}
- for (i = 0; i < num_pages; i++) {
- page = eb->pages[i];
- wait_on_page_locked(page);
- if (!PageUptodate(page))
- ret = -EIO;
- }
+int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
+ const struct btrfs_tree_parent_check *check)
+{
+ int ret;
- return ret;
+ ret = read_extent_buffer_pages_nowait(eb, mirror_num, check);
+ if (ret < 0)
+ return ret;
-unlock_exit:
- while (locked_pages > 0) {
- locked_pages--;
- page = eb->pages[locked_pages];
- unlock_page(page);
- }
- return ret;
+ wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
+ if (unlikely(!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)))
+ return -EIO;
+ return 0;
}
static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
unsigned long len)
{
btrfs_warn(eb->fs_info,
- "access to eb bytenr %llu len %lu out of range start %lu len %lu",
+ "access to eb bytenr %llu len %u out of range start %lu len %lu",
eb->start, eb->len, start, len);
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ DEBUG_WARN();
return true;
}
@@ -6736,23 +3936,33 @@ static inline int check_eb_range(const struct extent_buffer *eb,
void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
unsigned long start, unsigned long len)
{
+ const int unit_size = eb->folio_size;
size_t cur;
size_t offset;
- struct page *page;
- char *kaddr;
char *dst = (char *)dstv;
- unsigned long i = get_eb_page_index(start);
+ unsigned long i = get_eb_folio_index(eb, start);
- if (check_eb_range(eb, start, len))
+ if (check_eb_range(eb, start, len)) {
+ /*
+ * Invalid range hit, reset the memory, so callers won't get
+ * some random garbage for their uninitialized memory.
+ */
+ memset(dstv, 0, len);
return;
+ }
- offset = get_eb_offset_in_page(eb, start);
+ if (eb->addr) {
+ memcpy(dstv, eb->addr + start, len);
+ return;
+ }
+
+ offset = get_eb_offset_in_folio(eb, start);
while (len > 0) {
- page = eb->pages[i];
+ char *kaddr;
- cur = min(len, (PAGE_SIZE - offset));
- kaddr = page_address(page);
+ cur = min(len, unit_size - offset);
+ kaddr = folio_address(eb->folios[i]);
memcpy(dst, kaddr + offset, cur);
dst += cur;
@@ -6766,24 +3976,29 @@ int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
void __user *dstv,
unsigned long start, unsigned long len)
{
+ const int unit_size = eb->folio_size;
size_t cur;
size_t offset;
- struct page *page;
- char *kaddr;
char __user *dst = (char __user *)dstv;
- unsigned long i = get_eb_page_index(start);
+ unsigned long i = get_eb_folio_index(eb, start);
int ret = 0;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = get_eb_offset_in_page(eb, start);
+ if (eb->addr) {
+ if (copy_to_user_nofault(dstv, eb->addr + start, len))
+ ret = -EFAULT;
+ return ret;
+ }
+
+ offset = get_eb_offset_in_folio(eb, start);
while (len > 0) {
- page = eb->pages[i];
+ char *kaddr;
- cur = min(len, (PAGE_SIZE - offset));
- kaddr = page_address(page);
+ cur = min(len, unit_size - offset);
+ kaddr = folio_address(eb->folios[i]);
if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
ret = -EFAULT;
break;
@@ -6801,25 +4016,25 @@ int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
unsigned long start, unsigned long len)
{
+ const int unit_size = eb->folio_size;
size_t cur;
size_t offset;
- struct page *page;
char *kaddr;
char *ptr = (char *)ptrv;
- unsigned long i = get_eb_page_index(start);
+ unsigned long i = get_eb_folio_index(eb, start);
int ret = 0;
if (check_eb_range(eb, start, len))
return -EINVAL;
- offset = get_eb_offset_in_page(eb, start);
+ if (eb->addr)
+ return memcmp(ptrv, eb->addr + start, len);
- while (len > 0) {
- page = eb->pages[i];
+ offset = get_eb_offset_in_folio(eb, start);
- cur = min(len, (PAGE_SIZE - offset));
-
- kaddr = page_address(page);
+ while (len > 0) {
+ cur = min(len, unit_size - offset);
+ kaddr = folio_address(eb->folios[i]);
ret = memcmp(ptr, kaddr + offset, cur);
if (ret)
break;
@@ -6838,68 +4053,71 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
* For regular sector size == PAGE_SIZE case, check if @page is uptodate.
* For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
*/
-static void assert_eb_page_uptodate(const struct extent_buffer *eb,
- struct page *page)
+static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
+ struct folio *folio = eb->folios[i];
- if (fs_info->sectorsize < PAGE_SIZE) {
- bool uptodate;
+ ASSERT(folio);
- uptodate = btrfs_subpage_test_uptodate(fs_info, page,
- eb->start, eb->len);
- WARN_ON(!uptodate);
+ /*
+ * If we are using the commit root we could potentially clear a page
+ * Uptodate while we're using the extent buffer that we've previously
+ * looked up. We don't want to complain in this case, as the page was
+ * valid before, we just didn't write it out. Instead we want to catch
+ * the case where we didn't actually read the block properly, which
+ * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
+ */
+ if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
+ return;
+
+ if (btrfs_meta_is_subpage(fs_info)) {
+ folio = eb->folios[0];
+ ASSERT(i == 0);
+ if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
+ eb->start, eb->len)))
+ btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
} else {
- WARN_ON(!PageUptodate(page));
+ WARN_ON(!folio_test_uptodate(folio));
}
}
-void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
- const void *srcv)
-{
- char *kaddr;
-
- assert_eb_page_uptodate(eb, eb->pages[0]);
- kaddr = page_address(eb->pages[0]) +
- get_eb_offset_in_page(eb, offsetof(struct btrfs_header,
- chunk_tree_uuid));
- memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
-}
-
-void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
-{
- char *kaddr;
-
- assert_eb_page_uptodate(eb, eb->pages[0]);
- kaddr = page_address(eb->pages[0]) +
- get_eb_offset_in_page(eb, offsetof(struct btrfs_header, fsid));
- memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
-}
-
-void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
- unsigned long start, unsigned long len)
+static void __write_extent_buffer(const struct extent_buffer *eb,
+ const void *srcv, unsigned long start,
+ unsigned long len, bool use_memmove)
{
+ const int unit_size = eb->folio_size;
size_t cur;
size_t offset;
- struct page *page;
char *kaddr;
- char *src = (char *)srcv;
- unsigned long i = get_eb_page_index(start);
-
- WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
+ const char *src = (const char *)srcv;
+ unsigned long i = get_eb_folio_index(eb, start);
+ /* For unmapped (dummy) ebs, no need to check their uptodate status. */
+ const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
if (check_eb_range(eb, start, len))
return;
- offset = get_eb_offset_in_page(eb, start);
+ if (eb->addr) {
+ if (use_memmove)
+ memmove(eb->addr + start, srcv, len);
+ else
+ memcpy(eb->addr + start, srcv, len);
+ return;
+ }
+
+ offset = get_eb_offset_in_folio(eb, start);
while (len > 0) {
- page = eb->pages[i];
- assert_eb_page_uptodate(eb, page);
+ if (check_uptodate)
+ assert_eb_folio_uptodate(eb, i);
- cur = min(len, PAGE_SIZE - offset);
- kaddr = page_address(page);
- memcpy(kaddr + offset, src, cur);
+ cur = min(len, unit_size - offset);
+ kaddr = folio_address(eb->folios[i]);
+ if (use_memmove)
+ memmove(kaddr + offset, src, cur);
+ else
+ memcpy(kaddr + offset, src, cur);
src += cur;
len -= cur;
@@ -6908,55 +4126,60 @@ void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
}
}
-void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
- unsigned long len)
+void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
+ unsigned long start, unsigned long len)
{
- size_t cur;
- size_t offset;
- struct page *page;
- char *kaddr;
- unsigned long i = get_eb_page_index(start);
+ return __write_extent_buffer(eb, srcv, start, len, false);
+}
- if (check_eb_range(eb, start, len))
- return;
+static void memset_extent_buffer(const struct extent_buffer *eb, int c,
+ unsigned long start, unsigned long len)
+{
+ const int unit_size = eb->folio_size;
+ unsigned long cur = start;
- offset = get_eb_offset_in_page(eb, start);
+ if (eb->addr) {
+ memset(eb->addr + start, c, len);
+ return;
+ }
- while (len > 0) {
- page = eb->pages[i];
- assert_eb_page_uptodate(eb, page);
+ while (cur < start + len) {
+ unsigned long index = get_eb_folio_index(eb, cur);
+ unsigned int offset = get_eb_offset_in_folio(eb, cur);
+ unsigned int cur_len = min(start + len - cur, unit_size - offset);
- cur = min(len, PAGE_SIZE - offset);
- kaddr = page_address(page);
- memset(kaddr + offset, 0, cur);
+ assert_eb_folio_uptodate(eb, index);
+ memset(folio_address(eb->folios[index]) + offset, c, cur_len);
- len -= cur;
- offset = 0;
- i++;
+ cur += cur_len;
}
}
+void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
+ unsigned long len)
+{
+ if (check_eb_range(eb, start, len))
+ return;
+ return memset_extent_buffer(eb, 0, start, len);
+}
+
void copy_extent_buffer_full(const struct extent_buffer *dst,
const struct extent_buffer *src)
{
- int i;
- int num_pages;
+ const int unit_size = src->folio_size;
+ unsigned long cur = 0;
ASSERT(dst->len == src->len);
- if (dst->fs_info->sectorsize == PAGE_SIZE) {
- num_pages = num_extent_pages(dst);
- for (i = 0; i < num_pages; i++)
- copy_page(page_address(dst->pages[i]),
- page_address(src->pages[i]));
- } else {
- size_t src_offset = get_eb_offset_in_page(src, 0);
- size_t dst_offset = get_eb_offset_in_page(dst, 0);
+ while (cur < src->len) {
+ unsigned long index = get_eb_folio_index(src, cur);
+ unsigned long offset = get_eb_offset_in_folio(src, cur);
+ unsigned long cur_len = min(src->len, unit_size - offset);
+ void *addr = folio_address(src->folios[index]) + offset;
+
+ write_extent_buffer(dst, addr, cur, cur_len);
- ASSERT(src->fs_info->sectorsize < PAGE_SIZE);
- memcpy(page_address(dst->pages[0]) + dst_offset,
- page_address(src->pages[0]) + src_offset,
- src->len);
+ cur += cur_len;
}
}
@@ -6965,12 +4188,12 @@ void copy_extent_buffer(const struct extent_buffer *dst,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
{
+ const int unit_size = dst->folio_size;
u64 dst_len = dst->len;
size_t cur;
size_t offset;
- struct page *page;
char *kaddr;
- unsigned long i = get_eb_page_index(dst_offset);
+ unsigned long i = get_eb_folio_index(dst, dst_offset);
if (check_eb_range(dst, dst_offset, len) ||
check_eb_range(src, src_offset, len))
@@ -6978,15 +4201,14 @@ void copy_extent_buffer(const struct extent_buffer *dst,
WARN_ON(src->len != dst_len);
- offset = get_eb_offset_in_page(dst, dst_offset);
+ offset = get_eb_offset_in_folio(dst, dst_offset);
while (len > 0) {
- page = dst->pages[i];
- assert_eb_page_uptodate(dst, page);
+ assert_eb_folio_uptodate(dst, i);
- cur = min(len, (unsigned long)(PAGE_SIZE - offset));
+ cur = min(len, (unsigned long)(unit_size - offset));
- kaddr = page_address(page);
+ kaddr = folio_address(dst->folios[i]);
read_extent_buffer(src, kaddr + offset, src_offset, cur);
src_offset += cur;
@@ -6997,22 +4219,22 @@ void copy_extent_buffer(const struct extent_buffer *dst,
}
/*
- * eb_bitmap_offset() - calculate the page and offset of the byte containing the
- * given bit number
- * @eb: the extent buffer
- * @start: offset of the bitmap item in the extent buffer
- * @nr: bit number
- * @page_index: return index of the page in the extent buffer that contains the
- * given bit number
- * @page_offset: return offset into the page given by page_index
+ * Calculate the folio and offset of the byte containing the given bit number.
+ *
+ * @eb: the extent buffer
+ * @start: offset of the bitmap item in the extent buffer
+ * @nr: bit number
+ * @folio_index: return index of the folio in the extent buffer that contains
+ * the given bit number
+ * @folio_offset: return offset into the folio given by folio_index
*
* This helper hides the ugliness of finding the byte in an extent buffer which
* contains a given bit.
*/
static inline void eb_bitmap_offset(const struct extent_buffer *eb,
unsigned long start, unsigned long nr,
- unsigned long *page_index,
- size_t *page_offset)
+ unsigned long *folio_index,
+ size_t *folio_offset)
{
size_t byte_offset = BIT_BYTE(nr);
size_t offset;
@@ -7022,115 +4244,111 @@ static inline void eb_bitmap_offset(const struct extent_buffer *eb,
* the bitmap item in the extent buffer + the offset of the byte in the
* bitmap item.
*/
- offset = start + offset_in_page(eb->start) + byte_offset;
+ offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
- *page_index = offset >> PAGE_SHIFT;
- *page_offset = offset_in_page(offset);
+ *folio_index = offset >> eb->folio_shift;
+ *folio_offset = offset_in_eb_folio(eb, offset);
}
-/**
- * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
- * @eb: the extent buffer
- * @start: offset of the bitmap item in the extent buffer
- * @nr: bit number to test
+/*
+ * Determine whether a bit in a bitmap item is set.
+ *
+ * @eb: the extent buffer
+ * @start: offset of the bitmap item in the extent buffer
+ * @nr: bit number to test
*/
-int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
- unsigned long nr)
+bool extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
+ unsigned long nr)
{
- u8 *kaddr;
- struct page *page;
unsigned long i;
size_t offset;
+ u8 *kaddr;
eb_bitmap_offset(eb, start, nr, &i, &offset);
- page = eb->pages[i];
- assert_eb_page_uptodate(eb, page);
- kaddr = page_address(page);
+ assert_eb_folio_uptodate(eb, i);
+ kaddr = folio_address(eb->folios[i]);
return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
}
-/**
- * extent_buffer_bitmap_set - set an area of a bitmap
- * @eb: the extent buffer
- * @start: offset of the bitmap item in the extent buffer
- * @pos: bit number of the first bit
- * @len: number of bits to set
+static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
+{
+ unsigned long index = get_eb_folio_index(eb, bytenr);
+
+ if (check_eb_range(eb, bytenr, 1))
+ return NULL;
+ return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
+}
+
+/*
+ * Set an area of a bitmap to 1.
+ *
+ * @eb: the extent buffer
+ * @start: offset of the bitmap item in the extent buffer
+ * @pos: bit number of the first bit
+ * @len: number of bits to set
*/
void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
unsigned long pos, unsigned long len)
{
+ unsigned int first_byte = start + BIT_BYTE(pos);
+ unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
+ const bool same_byte = (first_byte == last_byte);
+ u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
u8 *kaddr;
- struct page *page;
- unsigned long i;
- size_t offset;
- const unsigned int size = pos + len;
- int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
- u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
-
- eb_bitmap_offset(eb, start, pos, &i, &offset);
- page = eb->pages[i];
- assert_eb_page_uptodate(eb, page);
- kaddr = page_address(page);
-
- while (len >= bits_to_set) {
- kaddr[offset] |= mask_to_set;
- len -= bits_to_set;
- bits_to_set = BITS_PER_BYTE;
- mask_to_set = ~0;
- if (++offset >= PAGE_SIZE && len > 0) {
- offset = 0;
- page = eb->pages[++i];
- assert_eb_page_uptodate(eb, page);
- kaddr = page_address(page);
- }
- }
- if (len) {
- mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
- kaddr[offset] |= mask_to_set;
- }
+
+ if (same_byte)
+ mask &= BITMAP_LAST_BYTE_MASK(pos + len);
+
+ /* Handle the first byte. */
+ kaddr = extent_buffer_get_byte(eb, first_byte);
+ *kaddr |= mask;
+ if (same_byte)
+ return;
+
+ /* Handle the byte aligned part. */
+ ASSERT(first_byte + 1 <= last_byte);
+ memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
+
+ /* Handle the last byte. */
+ kaddr = extent_buffer_get_byte(eb, last_byte);
+ *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
}
-/**
- * extent_buffer_bitmap_clear - clear an area of a bitmap
- * @eb: the extent buffer
- * @start: offset of the bitmap item in the extent buffer
- * @pos: bit number of the first bit
- * @len: number of bits to clear
+/*
+ * Clear an area of a bitmap.
+ *
+ * @eb: the extent buffer
+ * @start: offset of the bitmap item in the extent buffer
+ * @pos: bit number of the first bit
+ * @len: number of bits to clear
*/
void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
unsigned long start, unsigned long pos,
unsigned long len)
{
+ unsigned int first_byte = start + BIT_BYTE(pos);
+ unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
+ const bool same_byte = (first_byte == last_byte);
+ u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
u8 *kaddr;
- struct page *page;
- unsigned long i;
- size_t offset;
- const unsigned int size = pos + len;
- int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
- u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
-
- eb_bitmap_offset(eb, start, pos, &i, &offset);
- page = eb->pages[i];
- assert_eb_page_uptodate(eb, page);
- kaddr = page_address(page);
-
- while (len >= bits_to_clear) {
- kaddr[offset] &= ~mask_to_clear;
- len -= bits_to_clear;
- bits_to_clear = BITS_PER_BYTE;
- mask_to_clear = ~0;
- if (++offset >= PAGE_SIZE && len > 0) {
- offset = 0;
- page = eb->pages[++i];
- assert_eb_page_uptodate(eb, page);
- kaddr = page_address(page);
- }
- }
- if (len) {
- mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
- kaddr[offset] &= ~mask_to_clear;
- }
+
+ if (same_byte)
+ mask &= BITMAP_LAST_BYTE_MASK(pos + len);
+
+ /* Handle the first byte. */
+ kaddr = extent_buffer_get_byte(eb, first_byte);
+ *kaddr &= ~mask;
+ if (same_byte)
+ return;
+
+ /* Handle the byte aligned part. */
+ ASSERT(first_byte + 1 <= last_byte);
+ memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
+
+ /* Handle the last byte. */
+ kaddr = extent_buffer_get_byte(eb, last_byte);
+ *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
}
static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
@@ -7139,60 +4357,40 @@ static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned
return distance < len;
}
-static void copy_pages(struct page *dst_page, struct page *src_page,
- unsigned long dst_off, unsigned long src_off,
- unsigned long len)
-{
- char *dst_kaddr = page_address(dst_page);
- char *src_kaddr;
- int must_memmove = 0;
-
- if (dst_page != src_page) {
- src_kaddr = page_address(src_page);
- } else {
- src_kaddr = dst_kaddr;
- if (areas_overlap(src_off, dst_off, len))
- must_memmove = 1;
- }
-
- if (must_memmove)
- memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
- else
- memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
-}
-
void memcpy_extent_buffer(const struct extent_buffer *dst,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
{
- size_t cur;
- size_t dst_off_in_page;
- size_t src_off_in_page;
- unsigned long dst_i;
- unsigned long src_i;
+ const int unit_size = dst->folio_size;
+ unsigned long cur_off = 0;
if (check_eb_range(dst, dst_offset, len) ||
check_eb_range(dst, src_offset, len))
return;
- while (len > 0) {
- dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
- src_off_in_page = get_eb_offset_in_page(dst, src_offset);
-
- dst_i = get_eb_page_index(dst_offset);
- src_i = get_eb_page_index(src_offset);
+ if (dst->addr) {
+ const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
- cur = min(len, (unsigned long)(PAGE_SIZE -
- src_off_in_page));
- cur = min_t(unsigned long, cur,
- (unsigned long)(PAGE_SIZE - dst_off_in_page));
+ if (use_memmove)
+ memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
+ else
+ memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
+ return;
+ }
- copy_pages(dst->pages[dst_i], dst->pages[src_i],
- dst_off_in_page, src_off_in_page, cur);
+ while (cur_off < len) {
+ unsigned long cur_src = cur_off + src_offset;
+ unsigned long folio_index = get_eb_folio_index(dst, cur_src);
+ unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
+ unsigned long cur_len = min(src_offset + len - cur_src,
+ unit_size - folio_off);
+ void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
+ const bool use_memmove = areas_overlap(src_offset + cur_off,
+ dst_offset + cur_off, cur_len);
- src_offset += cur;
- dst_offset += cur;
- len -= cur;
+ __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
+ use_memmove);
+ cur_off += cur_len;
}
}
@@ -7200,33 +4398,46 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
{
- size_t cur;
- size_t dst_off_in_page;
- size_t src_off_in_page;
unsigned long dst_end = dst_offset + len - 1;
unsigned long src_end = src_offset + len - 1;
- unsigned long dst_i;
- unsigned long src_i;
if (check_eb_range(dst, dst_offset, len) ||
check_eb_range(dst, src_offset, len))
return;
+
if (dst_offset < src_offset) {
memcpy_extent_buffer(dst, dst_offset, src_offset, len);
return;
}
+
+ if (dst->addr) {
+ memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
+ return;
+ }
+
while (len > 0) {
- dst_i = get_eb_page_index(dst_end);
- src_i = get_eb_page_index(src_end);
+ unsigned long src_i;
+ size_t cur;
+ size_t dst_off_in_folio;
+ size_t src_off_in_folio;
+ void *src_addr;
+ bool use_memmove;
+
+ src_i = get_eb_folio_index(dst, src_end);
+
+ dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
+ src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
+
+ cur = min_t(unsigned long, len, src_off_in_folio + 1);
+ cur = min(cur, dst_off_in_folio + 1);
- dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
- src_off_in_page = get_eb_offset_in_page(dst, src_end);
+ src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
+ cur + 1;
+ use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
+ cur);
- cur = min_t(unsigned long, len, src_off_in_page + 1);
- cur = min(cur, dst_off_in_page + 1);
- copy_pages(dst->pages[dst_i], dst->pages[src_i],
- dst_off_in_page - cur + 1,
- src_off_in_page - cur + 1, cur);
+ __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
+ use_memmove);
dst_end -= cur;
src_end -= cur;
@@ -7234,82 +4445,29 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
}
}
-#define GANG_LOOKUP_SIZE 16
-static struct extent_buffer *get_next_extent_buffer(
- struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
-{
- struct extent_buffer *gang[GANG_LOOKUP_SIZE];
- struct extent_buffer *found = NULL;
- u64 page_start = page_offset(page);
- u64 cur = page_start;
-
- ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
- lockdep_assert_held(&fs_info->buffer_lock);
-
- while (cur < page_start + PAGE_SIZE) {
- int ret;
- int i;
-
- ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
- (void **)gang, cur >> fs_info->sectorsize_bits,
- min_t(unsigned int, GANG_LOOKUP_SIZE,
- PAGE_SIZE / fs_info->nodesize));
- if (ret == 0)
- goto out;
- for (i = 0; i < ret; i++) {
- /* Already beyond page end */
- if (gang[i]->start >= page_start + PAGE_SIZE)
- goto out;
- /* Found one */
- if (gang[i]->start >= bytenr) {
- found = gang[i];
- goto out;
- }
- }
- cur = gang[ret - 1]->start + gang[ret - 1]->len;
- }
-out:
- return found;
-}
-
-static int try_release_subpage_extent_buffer(struct page *page)
+static int try_release_subpage_extent_buffer(struct folio *folio)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
- u64 cur = page_offset(page);
- const u64 end = page_offset(page) + PAGE_SIZE;
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
+ struct extent_buffer *eb;
+ unsigned long start = (folio_pos(folio) >> fs_info->nodesize_bits);
+ unsigned long index = start;
+ unsigned long end = index + (PAGE_SIZE >> fs_info->nodesize_bits) - 1;
int ret;
- while (cur < end) {
- struct extent_buffer *eb = NULL;
-
- /*
- * Unlike try_release_extent_buffer() which uses page->private
- * to grab buffer, for subpage case we rely on radix tree, thus
- * we need to ensure radix tree consistency.
- *
- * We also want an atomic snapshot of the radix tree, thus go
- * with spinlock rather than RCU.
- */
- spin_lock(&fs_info->buffer_lock);
- eb = get_next_extent_buffer(fs_info, page, cur);
- if (!eb) {
- /* No more eb in the page range after or at cur */
- spin_unlock(&fs_info->buffer_lock);
- break;
- }
- cur = eb->start + eb->len;
-
+ rcu_read_lock();
+ xa_for_each_range(&fs_info->buffer_tree, index, eb, start, end) {
/*
* The same as try_release_extent_buffer(), to ensure the eb
* won't disappear out from under us.
*/
spin_lock(&eb->refs_lock);
- if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
+ rcu_read_unlock();
+
+ if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
spin_unlock(&eb->refs_lock);
- spin_unlock(&fs_info->buffer_lock);
- break;
+ rcu_read_lock();
+ continue;
}
- spin_unlock(&fs_info->buffer_lock);
/*
* If tree ref isn't set then we know the ref on this eb is a
@@ -7323,43 +4481,45 @@ static int try_release_subpage_extent_buffer(struct page *page)
/*
* Here we don't care about the return value, we will always
- * check the page private at the end. And
+ * check the folio private at the end. And
* release_extent_buffer() will release the refs_lock.
*/
release_extent_buffer(eb);
+ rcu_read_lock();
}
+ rcu_read_unlock();
+
/*
- * Finally to check if we have cleared page private, as if we have
- * released all ebs in the page, the page private should be cleared now.
+ * Finally to check if we have cleared folio private, as if we have
+ * released all ebs in the page, the folio private should be cleared now.
*/
- spin_lock(&page->mapping->private_lock);
- if (!PagePrivate(page))
+ spin_lock(&folio->mapping->i_private_lock);
+ if (!folio_test_private(folio))
ret = 1;
else
ret = 0;
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
return ret;
-
}
-int try_release_extent_buffer(struct page *page)
+int try_release_extent_buffer(struct folio *folio)
{
struct extent_buffer *eb;
- if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
- return try_release_subpage_extent_buffer(page);
+ if (btrfs_meta_is_subpage(folio_to_fs_info(folio)))
+ return try_release_subpage_extent_buffer(folio);
/*
- * We need to make sure nobody is changing page->private, as we rely on
- * page->private as the pointer to extent buffer.
+ * We need to make sure nobody is changing folio private, as we rely on
+ * folio private as the pointer to extent buffer.
*/
- spin_lock(&page->mapping->private_lock);
- if (!PagePrivate(page)) {
- spin_unlock(&page->mapping->private_lock);
+ spin_lock(&folio->mapping->i_private_lock);
+ if (!folio_test_private(folio)) {
+ spin_unlock(&folio->mapping->i_private_lock);
return 1;
}
- eb = (struct extent_buffer *)page->private;
+ eb = folio_get_private(folio);
BUG_ON(!eb);
/*
@@ -7368,12 +4528,12 @@ int try_release_extent_buffer(struct page *page)
* this page.
*/
spin_lock(&eb->refs_lock);
- if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
+ if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
spin_unlock(&eb->refs_lock);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
return 0;
}
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
/*
* If tree ref isn't set then we know the ref on this eb is a real ref,
@@ -7388,7 +4548,8 @@ int try_release_extent_buffer(struct page *page)
}
/*
- * btrfs_readahead_tree_block - attempt to readahead a child block
+ * Attempt to readahead a child block.
+ *
* @fs_info: the fs_info
* @bytenr: bytenr to read
* @owner_root: objectid of the root that owns this eb
@@ -7402,6 +4563,10 @@ int try_release_extent_buffer(struct page *page)
void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 owner_root, u64 gen, int level)
{
+ struct btrfs_tree_parent_check check = {
+ .level = level,
+ .transid = gen
+ };
struct extent_buffer *eb;
int ret;
@@ -7409,12 +4574,12 @@ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
if (IS_ERR(eb))
return;
- if (btrfs_buffer_uptodate(eb, gen, 1)) {
+ if (btrfs_buffer_uptodate(eb, gen, true)) {
free_extent_buffer(eb);
return;
}
- ret = read_extent_buffer_pages(eb, WAIT_NONE, 0);
+ ret = read_extent_buffer_pages_nowait(eb, 0, &check);
if (ret < 0)
free_extent_buffer_stale(eb);
else
@@ -7422,7 +4587,8 @@ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
}
/*
- * btrfs_readahead_node_child - readahead a node's child block
+ * Readahead a node's child block.
+ *
* @node: parent node we're reading from
* @slot: slot in the parent node for the child we want to read
*
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 0399cf8e3c32..02ebb2f238af 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -7,48 +7,62 @@
#include <linux/refcount.h>
#include <linux/fiemap.h>
#include <linux/btrfs_tree.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/rwsem.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "messages.h"
#include "ulist.h"
-
-/*
- * flags for bio submission. The high bits indicate the compression
- * type for this bio
- */
-#define EXTENT_BIO_COMPRESSED 1
-#define EXTENT_BIO_FLAG_SHIFT 16
+#include "misc.h"
+
+struct page;
+struct file;
+struct folio;
+struct inode;
+struct fiemap_extent_info;
+struct readahead_control;
+struct address_space;
+struct writeback_control;
+struct extent_io_tree;
+struct extent_map_tree;
+struct extent_state;
+struct btrfs_block_group;
+struct btrfs_fs_info;
+struct btrfs_inode;
+struct btrfs_root;
+struct btrfs_trans_handle;
+struct btrfs_tree_parent_check;
enum {
EXTENT_BUFFER_UPTODATE,
EXTENT_BUFFER_DIRTY,
- EXTENT_BUFFER_CORRUPT,
- /* this got triggered by readahead */
- EXTENT_BUFFER_READAHEAD,
EXTENT_BUFFER_TREE_REF,
EXTENT_BUFFER_STALE,
EXTENT_BUFFER_WRITEBACK,
- /* read IO error */
- EXTENT_BUFFER_READ_ERR,
EXTENT_BUFFER_UNMAPPED,
- EXTENT_BUFFER_IN_TREE,
/* write IO error */
EXTENT_BUFFER_WRITE_ERR,
- EXTENT_BUFFER_NO_CHECK,
- EXTENT_BUFFER_ZONE_FINISH,
+ /* Indicate the extent buffer is written zeroed out (for zoned) */
+ EXTENT_BUFFER_ZONED_ZEROOUT,
+ /* Indicate that extent buffer pages a being read */
+ EXTENT_BUFFER_READING,
};
/* these are flags for __process_pages_contig */
-#define PAGE_UNLOCK (1 << 0)
-/* Page starts writeback, clear dirty bit and set writeback bit */
-#define PAGE_START_WRITEBACK (1 << 1)
-#define PAGE_END_WRITEBACK (1 << 2)
-#define PAGE_SET_ORDERED (1 << 3)
-#define PAGE_SET_ERROR (1 << 4)
-#define PAGE_LOCK (1 << 5)
+enum {
+ ENUM_BIT(PAGE_UNLOCK),
+ /* Page starts writeback, clear dirty bit and set writeback bit */
+ ENUM_BIT(PAGE_START_WRITEBACK),
+ ENUM_BIT(PAGE_END_WRITEBACK),
+ ENUM_BIT(PAGE_SET_ORDERED),
+};
/*
- * page->private values. Every page that is controlled by the extent
- * map has page->private set to one.
+ * Folio private values. Every page that is controlled by the extent map has
+ * folio private set to this value.
*/
-#define EXTENT_PAGE_PRIVATE 1
+#define EXTENT_FOLIO_PRIVATE 1
/*
* The extent buffer bitmap operations are done with byte granularity instead of
@@ -58,67 +72,119 @@ enum {
* single word in a bitmap may straddle two pages in the extent buffer.
*/
#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
-#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
+#define BYTE_MASK ((1U << BITS_PER_BYTE) - 1)
#define BITMAP_FIRST_BYTE_MASK(start) \
((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
#define BITMAP_LAST_BYTE_MASK(nbits) \
(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
-struct btrfs_root;
-struct btrfs_inode;
-struct btrfs_io_bio;
-struct btrfs_fs_info;
-struct io_failure_record;
-struct extent_io_tree;
-
-typedef blk_status_t (submit_bio_hook_t)(struct inode *inode, struct bio *bio,
- int mirror_num,
- unsigned long bio_flags);
-typedef blk_status_t (extent_submit_bio_start_t)(struct inode *inode,
- struct bio *bio, u64 dio_file_offset);
+int __init extent_buffer_init_cachep(void);
+void __cold extent_buffer_free_cachep(void);
#define INLINE_EXTENT_BUFFER_PAGES (BTRFS_MAX_METADATA_BLOCKSIZE / PAGE_SIZE)
struct extent_buffer {
u64 start;
- unsigned long len;
+ u32 len;
+ u32 folio_size;
unsigned long bflags;
struct btrfs_fs_info *fs_info;
+
+ /*
+ * The address where the eb can be accessed without any cross-page handling.
+ * This can be NULL if not possible.
+ */
+ void *addr;
+
spinlock_t refs_lock;
- atomic_t refs;
- atomic_t io_pages;
+ refcount_t refs;
int read_mirror;
- struct rcu_head rcu_head;
- pid_t lock_owner;
/* >= 0 if eb belongs to a log tree, -1 otherwise */
s8 log_index;
+ u8 folio_shift;
+ struct rcu_head rcu_head;
struct rw_semaphore lock;
- struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
- struct list_head release_list;
+ /*
+ * Pointers to all the folios of the extent buffer.
+ *
+ * For now the folio is always order 0 (aka, a single page).
+ */
+ struct folio *folios[INLINE_EXTENT_BUFFER_PAGES];
#ifdef CONFIG_BTRFS_DEBUG
struct list_head leak_list;
+ pid_t lock_owner;
#endif
};
+struct btrfs_eb_write_context {
+ struct writeback_control *wbc;
+ struct extent_buffer *eb;
+ /* Block group @eb resides in. Only used for zoned mode. */
+ struct btrfs_block_group *zoned_bg;
+};
+
+static inline unsigned long offset_in_eb_folio(const struct extent_buffer *eb,
+ u64 start)
+{
+ ASSERT(eb->folio_size);
+ return start & (eb->folio_size - 1);
+}
+
/*
- * Structure to record info about the bio being assembled, and other info like
- * how many bytes are there before stripe/ordered extent boundary.
+ * Get the correct offset inside the page of extent buffer.
+ *
+ * @eb: target extent buffer
+ * @start: offset inside the extent buffer
+ *
+ * Will handle both sectorsize == PAGE_SIZE and sectorsize < PAGE_SIZE cases.
*/
-struct btrfs_bio_ctrl {
- struct bio *bio;
- unsigned long bio_flags;
- u32 len_to_stripe_boundary;
- u32 len_to_oe_boundary;
-};
+static inline size_t get_eb_offset_in_folio(const struct extent_buffer *eb,
+ unsigned long offset)
+{
+ /*
+ * 1) sectorsize == PAGE_SIZE and nodesize >= PAGE_SIZE case
+ * 1.1) One large folio covering the whole eb
+ * The eb->start is aligned to folio size, thus adding it
+ * won't cause any difference.
+ * 1.2) Several page sized folios
+ * The eb->start is aligned to folio (page) size, thus
+ * adding it won't cause any difference.
+ *
+ * 2) sectorsize < PAGE_SIZE and nodesize < PAGE_SIZE case
+ * In this case there would only be one page sized folio, and there
+ * may be several different extent buffers in the page/folio.
+ * We need to add eb->start to properly access the offset inside
+ * that eb.
+ */
+ return offset_in_folio(eb->folios[0], offset + eb->start);
+}
+
+static inline unsigned long get_eb_folio_index(const struct extent_buffer *eb,
+ unsigned long offset)
+{
+ /*
+ * 1) sectorsize == PAGE_SIZE and nodesize >= PAGE_SIZE case
+ * 1.1) One large folio covering the whole eb.
+ * the folio_shift would be large enough to always make us
+ * return 0 as index.
+ * 1.2) Several page sized folios
+ * The folio_shift would be PAGE_SHIFT, giving us the correct
+ * index.
+ *
+ * 2) sectorsize < PAGE_SIZE and nodesize < PAGE_SIZE case
+ * The folio would only be page sized, and always give us 0 as index.
+ */
+ return offset >> eb->folio_shift;
+}
/*
* Structure to record how many bytes and which ranges are set/cleared
*/
struct extent_changeset {
/* How many bytes are set/cleared in this operation */
- unsigned int bytes_changed;
+ u64 bytes_changed;
/* Changed ranges */
struct ulist range_changed;
@@ -142,6 +208,11 @@ static inline struct extent_changeset *extent_changeset_alloc(void)
return ret;
}
+static inline void extent_changeset_prealloc(struct extent_changeset *changeset, gfp_t gfp_mask)
+{
+ ulist_prealloc(&changeset->range_changed, gfp_mask);
+}
+
static inline void extent_changeset_release(struct extent_changeset *changeset)
{
if (!changeset)
@@ -158,47 +229,23 @@ static inline void extent_changeset_free(struct extent_changeset *changeset)
kfree(changeset);
}
-static inline void extent_set_compress_type(unsigned long *bio_flags,
- int compress_type)
-{
- *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
-}
+bool try_release_extent_mapping(struct folio *folio, gfp_t mask);
+int try_release_extent_buffer(struct folio *folio);
-static inline int extent_compress_type(unsigned long bio_flags)
-{
- return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
-}
-
-struct extent_map_tree;
-
-typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
- struct page *page, size_t pg_offset,
- u64 start, u64 len);
-
-int try_release_extent_mapping(struct page *page, gfp_t mask);
-int try_release_extent_buffer(struct page *page);
-
-int __must_check submit_one_bio(struct bio *bio, int mirror_num,
- unsigned long bio_flags);
-int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
- struct btrfs_bio_ctrl *bio_ctrl,
- unsigned int read_flags, u64 *prev_em_start);
-int extent_write_full_page(struct page *page, struct writeback_control *wbc);
-int extent_write_locked_range(struct inode *inode, u64 start, u64 end);
-int extent_writepages(struct address_space *mapping,
- struct writeback_control *wbc);
+int btrfs_read_folio(struct file *file, struct folio *folio);
+void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
+ u64 start, u64 end, struct writeback_control *wbc,
+ bool pages_dirty);
+int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc);
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc);
-void extent_readahead(struct readahead_control *rac);
-int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
- u64 start, u64 len);
-int set_page_extent_mapped(struct page *page);
-void clear_page_extent_mapped(struct page *page);
+void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start, u64 end);
+void btrfs_readahead(struct readahead_control *rac);
+int set_folio_extent_mapped(struct folio *folio);
+void clear_folio_extent_mapped(struct folio *folio);
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u64 owner_root, int level);
-struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start, unsigned long len);
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src);
@@ -206,17 +253,23 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
void free_extent_buffer(struct extent_buffer *eb);
void free_extent_buffer_stale(struct extent_buffer *eb);
-#define WAIT_NONE 0
-#define WAIT_COMPLETE 1
-#define WAIT_PAGE_LOCK 2
-int read_extent_buffer_pages(struct extent_buffer *eb, int wait,
- int mirror_num);
-void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
+int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
+ const struct btrfs_tree_parent_check *parent_check);
+int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
+ const struct btrfs_tree_parent_check *parent_check);
+
+static inline void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
+{
+ wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
+ TASK_UNINTERRUPTIBLE);
+}
+
void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 owner_root, u64 gen, int level);
void btrfs_readahead_node_child(struct extent_buffer *node, int slot);
-static inline int num_extent_pages(const struct extent_buffer *eb)
+/* Note: this can be used in for loops without caching the value in a variable. */
+static inline int __pure num_extent_pages(const struct extent_buffer *eb)
{
/*
* For sectorsize == PAGE_SIZE case, since nodesize is always aligned to
@@ -228,6 +281,24 @@ static inline int num_extent_pages(const struct extent_buffer *eb)
return (eb->len >> PAGE_SHIFT) ?: 1;
}
+/*
+ * This can only be determined at runtime by checking eb::folios[0].
+ *
+ * As we can have either one large folio covering the whole eb
+ * (either nodesize <= PAGE_SIZE, or high order folio), or multiple
+ * single-paged folios.
+ *
+ * Note: this can be used in for loops without caching the value in a variable.
+ */
+static inline int __pure num_extent_folios(const struct extent_buffer *eb)
+{
+ if (!eb->folios[0])
+ return 0;
+ if (folio_order(eb->folios[0]))
+ return 1;
+ return num_extent_pages(eb);
+}
+
static inline int extent_buffer_uptodate(const struct extent_buffer *eb)
{
return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
@@ -241,11 +312,24 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dst,
int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
void __user *dst, unsigned long start,
unsigned long len);
-void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src);
-void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
- const void *src);
void write_extent_buffer(const struct extent_buffer *eb, const void *src,
unsigned long start, unsigned long len);
+
+static inline void write_extent_buffer_chunk_tree_uuid(
+ const struct extent_buffer *eb, const void *chunk_tree_uuid)
+{
+ write_extent_buffer(eb, chunk_tree_uuid,
+ offsetof(struct btrfs_header, chunk_tree_uuid),
+ BTRFS_FSID_SIZE);
+}
+
+static inline void write_extent_buffer_fsid(const struct extent_buffer *eb,
+ const void *fsid)
+{
+ write_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
+ BTRFS_FSID_SIZE);
+}
+
void copy_extent_buffer_full(const struct extent_buffer *dst,
const struct extent_buffer *src);
void copy_extent_buffer(const struct extent_buffer *dst,
@@ -260,57 +344,33 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
unsigned long len);
void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
unsigned long len);
-int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
- unsigned long pos);
+bool extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
+ unsigned long pos);
void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
unsigned long pos, unsigned long len);
void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
unsigned long start, unsigned long pos,
unsigned long len);
-void clear_extent_buffer_dirty(const struct extent_buffer *eb);
-bool set_extent_buffer_dirty(struct extent_buffer *eb);
+void set_extent_buffer_dirty(struct extent_buffer *eb);
void set_extent_buffer_uptodate(struct extent_buffer *eb);
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
-int extent_buffer_under_io(const struct extent_buffer *eb);
-void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
-void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
- struct page *locked_page,
+ const struct folio *locked_folio,
+ struct extent_state **cached,
u32 bits_to_clear, unsigned long page_ops);
-struct bio *btrfs_bio_alloc(unsigned int nr_iovecs);
-struct bio *btrfs_bio_clone(struct bio *bio);
-struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size);
-
-void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
-int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num);
-
-/*
- * When IO fails, either with EIO or csum verification fails, we
- * try other mirrors that might have a good copy of the data. This
- * io_failure_record is used to record state as we go through all the
- * mirrors. If another mirror has good data, the sector is set up to date
- * and things continue. If a good mirror can't be found, the original
- * bio end_io callback is called to indicate things have failed.
- */
-struct io_failure_record {
- struct page *page;
- u64 start;
- u64 len;
- u64 logical;
- unsigned long bio_flags;
- int this_mirror;
- int failed_mirror;
-};
+int extent_invalidate_folio(struct extent_io_tree *tree,
+ struct folio *folio, size_t offset);
+void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
+ struct extent_buffer *buf);
-int btrfs_repair_one_sector(struct inode *inode,
- struct bio *failed_bio, u32 bio_offset,
- struct page *page, unsigned int pgoff,
- u64 start, int failed_mirror,
- submit_bio_hook_t *submit_bio_hook);
+int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
+ bool nofail);
+int btrfs_alloc_folio_array(unsigned int nr_folios, unsigned int order,
+ struct folio **folio_array);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
bool find_lock_delalloc_range(struct inode *inode,
- struct page *locked_page, u64 *start,
+ struct folio *locked_folio, u64 *start,
u64 *end);
#endif
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 5a36add21305..7e38c23a0c1c 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -3,87 +3,73 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include "messages.h"
#include "ctree.h"
-#include "volumes.h"
#include "extent_map.h"
#include "compression.h"
+#include "btrfs_inode.h"
+#include "disk-io.h"
static struct kmem_cache *extent_map_cache;
-int __init extent_map_init(void)
+int __init btrfs_extent_map_init(void)
{
extent_map_cache = kmem_cache_create("btrfs_extent_map",
- sizeof(struct extent_map), 0,
- SLAB_MEM_SPREAD, NULL);
+ sizeof(struct extent_map), 0, 0, NULL);
if (!extent_map_cache)
return -ENOMEM;
return 0;
}
-void __cold extent_map_exit(void)
+void __cold btrfs_extent_map_exit(void)
{
kmem_cache_destroy(extent_map_cache);
}
-/**
- * extent_map_tree_init - initialize extent map tree
- * @tree: tree to initialize
- *
- * Initialize the extent tree @tree. Should be called for each new inode
- * or other user of the extent_map interface.
+/*
+ * Initialize the extent tree @tree. Should be called for each new inode or
+ * other user of the extent_map interface.
*/
-void extent_map_tree_init(struct extent_map_tree *tree)
+void btrfs_extent_map_tree_init(struct extent_map_tree *tree)
{
- tree->map = RB_ROOT_CACHED;
+ tree->root = RB_ROOT;
INIT_LIST_HEAD(&tree->modified_extents);
rwlock_init(&tree->lock);
}
-/**
- * alloc_extent_map - allocate new extent map structure
- *
- * Allocate a new extent_map structure. The new structure is
- * returned with a reference count of one and needs to be
- * freed using free_extent_map()
+/*
+ * Allocate a new extent_map structure. The new structure is returned with a
+ * reference count of one and needs to be freed using free_extent_map()
*/
-struct extent_map *alloc_extent_map(void)
+struct extent_map *btrfs_alloc_extent_map(void)
{
struct extent_map *em;
em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
if (!em)
return NULL;
RB_CLEAR_NODE(&em->rb_node);
- em->flags = 0;
- em->compress_type = BTRFS_COMPRESS_NONE;
- em->generation = 0;
refcount_set(&em->refs, 1);
INIT_LIST_HEAD(&em->list);
return em;
}
-/**
- * free_extent_map - drop reference count of an extent_map
- * @em: extent map being released
- *
- * Drops the reference out on @em by one and free the structure
- * if the reference count hits zero.
+/*
+ * Drop the reference out on @em by one and free the structure if the reference
+ * count hits zero.
*/
-void free_extent_map(struct extent_map *em)
+void btrfs_free_extent_map(struct extent_map *em)
{
if (!em)
return;
- WARN_ON(refcount_read(&em->refs) == 0);
if (refcount_dec_and_test(&em->refs)) {
- WARN_ON(extent_map_in_tree(em));
+ WARN_ON(btrfs_extent_map_in_tree(em));
WARN_ON(!list_empty(&em->list));
- if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
- kfree(em->map_lookup);
kmem_cache_free(extent_map_cache, em);
}
}
-/* simple helper to do math around the end of an extent, handling wrap */
+/* Do the math around the end of an extent, handling wrapping. */
static u64 range_end(u64 start, u64 len)
{
if (start + len < start)
@@ -91,36 +77,44 @@ static u64 range_end(u64 start, u64 len)
return start + len;
}
-static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
+static void remove_em(struct btrfs_inode *inode, struct extent_map *em)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+
+ rb_erase(&em->rb_node, &inode->extent_tree.root);
+ RB_CLEAR_NODE(&em->rb_node);
+
+ if (!btrfs_is_testing(fs_info) && btrfs_is_fstree(btrfs_root_id(inode->root)))
+ percpu_counter_dec(&fs_info->evictable_extent_maps);
+}
+
+static int tree_insert(struct rb_root *root, struct extent_map *em)
{
- struct rb_node **p = &root->rb_root.rb_node;
+ struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct extent_map *entry = NULL;
struct rb_node *orig_parent = NULL;
u64 end = range_end(em->start, em->len);
- bool leftmost = true;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct extent_map, rb_node);
- if (em->start < entry->start) {
+ if (em->start < entry->start)
p = &(*p)->rb_left;
- } else if (em->start >= extent_map_end(entry)) {
+ else if (em->start >= btrfs_extent_map_end(entry))
p = &(*p)->rb_right;
- leftmost = false;
- } else {
+ else
return -EEXIST;
- }
}
orig_parent = parent;
- while (parent && em->start >= extent_map_end(entry)) {
+ while (parent && em->start >= btrfs_extent_map_end(entry)) {
parent = rb_next(parent);
entry = rb_entry(parent, struct extent_map, rb_node);
}
if (parent)
- if (end > entry->start && em->start < extent_map_end(entry))
+ if (end > entry->start && em->start < btrfs_extent_map_end(entry))
return -EEXIST;
parent = orig_parent;
@@ -130,21 +124,20 @@ static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
entry = rb_entry(parent, struct extent_map, rb_node);
}
if (parent)
- if (end > entry->start && em->start < extent_map_end(entry))
+ if (end > entry->start && em->start < btrfs_extent_map_end(entry))
return -EEXIST;
rb_link_node(&em->rb_node, orig_parent, p);
- rb_insert_color_cached(&em->rb_node, root, leftmost);
+ rb_insert_color(&em->rb_node, root);
return 0;
}
/*
- * search through the tree for an extent_map with a given offset. If
- * it can't be found, try to find some neighboring extents
+ * Search through the tree for an extent_map with a given offset. If it can't
+ * be found, try to find some neighboring extents
*/
-static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
- struct rb_node **prev_ret,
- struct rb_node **next_ret)
+static struct rb_node *tree_search(struct rb_root *root, u64 offset,
+ struct rb_node **prev_or_next_ret)
{
struct rb_node *n = root->rb_node;
struct rb_node *prev = NULL;
@@ -152,6 +145,8 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
struct extent_map *entry;
struct extent_map *prev_entry = NULL;
+ ASSERT(prev_or_next_ret);
+
while (n) {
entry = rb_entry(n, struct extent_map, rb_node);
prev = n;
@@ -159,81 +154,194 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
if (offset < entry->start)
n = n->rb_left;
- else if (offset >= extent_map_end(entry))
+ else if (offset >= btrfs_extent_map_end(entry))
n = n->rb_right;
else
return n;
}
- if (prev_ret) {
- orig_prev = prev;
- while (prev && offset >= extent_map_end(prev_entry)) {
- prev = rb_next(prev);
- prev_entry = rb_entry(prev, struct extent_map, rb_node);
- }
- *prev_ret = prev;
- prev = orig_prev;
+ orig_prev = prev;
+ while (prev && offset >= btrfs_extent_map_end(prev_entry)) {
+ prev = rb_next(prev);
+ prev_entry = rb_entry(prev, struct extent_map, rb_node);
}
- if (next_ret) {
+ /*
+ * Previous extent map found, return as in this case the caller does not
+ * care about the next one.
+ */
+ if (prev) {
+ *prev_or_next_ret = prev;
+ return NULL;
+ }
+
+ prev = orig_prev;
+ prev_entry = rb_entry(prev, struct extent_map, rb_node);
+ while (prev && offset < prev_entry->start) {
+ prev = rb_prev(prev);
prev_entry = rb_entry(prev, struct extent_map, rb_node);
- while (prev && offset < prev_entry->start) {
- prev = rb_prev(prev);
- prev_entry = rb_entry(prev, struct extent_map, rb_node);
- }
- *next_ret = prev;
}
+ *prev_or_next_ret = prev;
+
return NULL;
}
-/* check to see if two extent_map structs are adjacent and safe to merge */
-static int mergable_maps(struct extent_map *prev, struct extent_map *next)
+static inline u64 extent_map_block_len(const struct extent_map *em)
{
- if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
- return 0;
+ if (btrfs_extent_map_is_compressed(em))
+ return em->disk_num_bytes;
+ return em->len;
+}
- /*
- * don't merge compressed extents, we need to know their
- * actual size
- */
- if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
- return 0;
+static inline u64 extent_map_block_end(const struct extent_map *em)
+{
+ const u64 block_start = btrfs_extent_map_block_start(em);
+ const u64 block_end = block_start + extent_map_block_len(em);
- if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
- test_bit(EXTENT_FLAG_LOGGING, &next->flags))
- return 0;
+ if (block_end < block_start)
+ return (u64)-1;
+
+ return block_end;
+}
+
+static bool can_merge_extent_map(const struct extent_map *em)
+{
+ if (em->flags & EXTENT_FLAG_PINNED)
+ return false;
+
+ /* Don't merge compressed extents, we need to know their actual size. */
+ if (btrfs_extent_map_is_compressed(em))
+ return false;
+
+ if (em->flags & EXTENT_FLAG_LOGGING)
+ return false;
/*
* We don't want to merge stuff that hasn't been written to the log yet
* since it may not reflect exactly what is on disk, and that would be
* bad.
*/
- if (!list_empty(&prev->list) || !list_empty(&next->list))
- return 0;
+ if (!list_empty(&em->list))
+ return false;
+
+ return true;
+}
+
+/* Check to see if two extent_map structs are adjacent and safe to merge. */
+static bool mergeable_maps(const struct extent_map *prev, const struct extent_map *next)
+{
+ if (btrfs_extent_map_end(prev) != next->start)
+ return false;
+
+ /*
+ * The merged flag is not an on-disk flag, it just indicates we had the
+ * extent maps of 2 (or more) adjacent extents merged, so factor it out.
+ */
+ if ((prev->flags & ~EXTENT_FLAG_MERGED) !=
+ (next->flags & ~EXTENT_FLAG_MERGED))
+ return false;
+
+ if (next->disk_bytenr < EXTENT_MAP_LAST_BYTE - 1)
+ return btrfs_extent_map_block_start(next) == extent_map_block_end(prev);
+
+ /* HOLES and INLINE extents. */
+ return next->disk_bytenr == prev->disk_bytenr;
+}
+
+/*
+ * Handle the on-disk data extents merge for @prev and @next.
+ *
+ * @prev: left extent to merge
+ * @next: right extent to merge
+ * @merged: the extent we will not discard after the merge; updated with new values
+ *
+ * After this, one of the two extents is the new merged extent and the other is
+ * removed from the tree and likely freed. Note that @merged is one of @prev/@next
+ * so there is const/non-const aliasing occurring here.
+ *
+ * Only touches disk_bytenr/disk_num_bytes/offset/ram_bytes.
+ * For now only uncompressed regular extent can be merged.
+ */
+static void merge_ondisk_extents(const struct extent_map *prev, const struct extent_map *next,
+ struct extent_map *merged)
+{
+ u64 new_disk_bytenr;
+ u64 new_disk_num_bytes;
+ u64 new_offset;
- ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
- prev->block_start != EXTENT_MAP_DELALLOC);
-
- if (prev->map_lookup || next->map_lookup)
- ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
- test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
-
- if (extent_map_end(prev) == next->start &&
- prev->flags == next->flags &&
- prev->map_lookup == next->map_lookup &&
- ((next->block_start == EXTENT_MAP_HOLE &&
- prev->block_start == EXTENT_MAP_HOLE) ||
- (next->block_start == EXTENT_MAP_INLINE &&
- prev->block_start == EXTENT_MAP_INLINE) ||
- (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
- next->block_start == extent_map_block_end(prev)))) {
- return 1;
+ /* @prev and @next should not be compressed. */
+ ASSERT(!btrfs_extent_map_is_compressed(prev));
+ ASSERT(!btrfs_extent_map_is_compressed(next));
+
+ /*
+ * There are two different cases where @prev and @next can be merged.
+ *
+ * 1) They are referring to the same data extent:
+ *
+ * |<----- data extent A ----->|
+ * |<- prev ->|<- next ->|
+ *
+ * 2) They are referring to different data extents but still adjacent:
+ *
+ * |<-- data extent A -->|<-- data extent B -->|
+ * |<- prev ->|<- next ->|
+ *
+ * The calculation here always merges the data extents first, then updates
+ * @offset using the new data extents.
+ *
+ * For case 1), the merged data extent would be the same.
+ * For case 2), we just merge the two data extents into one.
+ */
+ new_disk_bytenr = min(prev->disk_bytenr, next->disk_bytenr);
+ new_disk_num_bytes = max(prev->disk_bytenr + prev->disk_num_bytes,
+ next->disk_bytenr + next->disk_num_bytes) -
+ new_disk_bytenr;
+ new_offset = prev->disk_bytenr + prev->offset - new_disk_bytenr;
+
+ merged->disk_bytenr = new_disk_bytenr;
+ merged->disk_num_bytes = new_disk_num_bytes;
+ merged->ram_bytes = new_disk_num_bytes;
+ merged->offset = new_offset;
+}
+
+static void dump_extent_map(struct btrfs_fs_info *fs_info, const char *prefix,
+ struct extent_map *em)
+{
+ if (!IS_ENABLED(CONFIG_BTRFS_DEBUG))
+ return;
+ btrfs_crit(fs_info,
+"%s, start=%llu len=%llu disk_bytenr=%llu disk_num_bytes=%llu ram_bytes=%llu offset=%llu flags=0x%x",
+ prefix, em->start, em->len, em->disk_bytenr, em->disk_num_bytes,
+ em->ram_bytes, em->offset, em->flags);
+ ASSERT(0);
+}
+
+/* Internal sanity checks for btrfs debug builds. */
+static void validate_extent_map(struct btrfs_fs_info *fs_info, struct extent_map *em)
+{
+ if (!IS_ENABLED(CONFIG_BTRFS_DEBUG))
+ return;
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
+ if (em->disk_num_bytes == 0)
+ dump_extent_map(fs_info, "zero disk_num_bytes", em);
+ if (em->offset + em->len > em->ram_bytes)
+ dump_extent_map(fs_info, "ram_bytes too small", em);
+ if (em->offset + em->len > em->disk_num_bytes &&
+ !btrfs_extent_map_is_compressed(em))
+ dump_extent_map(fs_info, "disk_num_bytes too small", em);
+ if (!btrfs_extent_map_is_compressed(em) &&
+ em->ram_bytes != em->disk_num_bytes)
+ dump_extent_map(fs_info,
+ "ram_bytes mismatch with disk_num_bytes for non-compressed em",
+ em);
+ } else if (em->offset) {
+ dump_extent_map(fs_info, "non-zero offset for hole/inline", em);
}
- return 0;
}
-static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
+static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_map *merge = NULL;
struct rb_node *rb;
@@ -248,43 +356,47 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
if (refcount_read(&em->refs) > 2)
return;
+ if (!can_merge_extent_map(em))
+ return;
+
if (em->start != 0) {
rb = rb_prev(&em->rb_node);
- if (rb)
- merge = rb_entry(rb, struct extent_map, rb_node);
- if (rb && mergable_maps(merge, em)) {
+ merge = rb_entry_safe(rb, struct extent_map, rb_node);
+
+ if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
em->start = merge->start;
- em->orig_start = merge->orig_start;
em->len += merge->len;
- em->block_len += merge->block_len;
- em->block_start = merge->block_start;
- em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
- em->mod_start = merge->mod_start;
em->generation = max(em->generation, merge->generation);
- rb_erase_cached(&merge->rb_node, &tree->map);
- RB_CLEAR_NODE(&merge->rb_node);
- free_extent_map(merge);
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
+ merge_ondisk_extents(merge, em, em);
+ em->flags |= EXTENT_FLAG_MERGED;
+
+ validate_extent_map(fs_info, em);
+ remove_em(inode, merge);
+ btrfs_free_extent_map(merge);
}
}
rb = rb_next(&em->rb_node);
- if (rb)
- merge = rb_entry(rb, struct extent_map, rb_node);
- if (rb && mergable_maps(em, merge)) {
+ merge = rb_entry_safe(rb, struct extent_map, rb_node);
+
+ if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
em->len += merge->len;
- em->block_len += merge->block_len;
- rb_erase_cached(&merge->rb_node, &tree->map);
- RB_CLEAR_NODE(&merge->rb_node);
- em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
+ merge_ondisk_extents(em, merge, em);
+ validate_extent_map(fs_info, em);
em->generation = max(em->generation, merge->generation);
- free_extent_map(merge);
+ em->flags |= EXTENT_FLAG_MERGED;
+ remove_em(inode, merge);
+ btrfs_free_extent_map(merge);
}
}
-/**
- * unpin_extent_cache - unpin an extent from the cache
- * @tree: tree to unpin the extent in
+/*
+ * Unpin an extent from the cache.
+ *
+ * @inode: the inode from which we are unpinning an extent range
* @start: logical offset in the file
* @len: length of the extent
* @gen: generation that this extent has been modified in
@@ -292,162 +404,138 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
* Called after an extent has been written to disk properly. Set the generation
* to the generation that actually added the file item to the inode so we know
* we need to sync this extent when we call fsync().
+ *
+ * Returns: 0 on success
+ * -ENOENT when the extent is not found in the tree
+ * -EUCLEAN if the found extent does not match the expected start
*/
-int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
- u64 gen)
+int btrfs_unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_map_tree *tree = &inode->extent_tree;
int ret = 0;
struct extent_map *em;
- bool prealloc = false;
write_lock(&tree->lock);
- em = lookup_extent_mapping(tree, start, len);
-
- WARN_ON(!em || em->start != start);
-
- if (!em)
+ em = btrfs_lookup_extent_mapping(tree, start, len);
+
+ if (WARN_ON(!em)) {
+ btrfs_warn(fs_info,
+"no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu",
+ btrfs_ino(inode), btrfs_root_id(inode->root),
+ start, start + len, gen);
+ ret = -ENOENT;
goto out;
+ }
- em->generation = gen;
- clear_bit(EXTENT_FLAG_PINNED, &em->flags);
- em->mod_start = em->start;
- em->mod_len = em->len;
-
- if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
- prealloc = true;
- clear_bit(EXTENT_FLAG_FILLING, &em->flags);
+ if (WARN_ON(em->start != start)) {
+ btrfs_warn(fs_info,
+"found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu",
+ btrfs_ino(inode), btrfs_root_id(inode->root),
+ em->start, start, start + len, gen);
+ ret = -EUCLEAN;
+ goto out;
}
- try_merge_map(tree, em);
+ em->generation = gen;
+ em->flags &= ~EXTENT_FLAG_PINNED;
- if (prealloc) {
- em->mod_start = em->start;
- em->mod_len = em->len;
- }
+ try_merge_map(inode, em);
- free_extent_map(em);
out:
write_unlock(&tree->lock);
+ btrfs_free_extent_map(em);
return ret;
}
-void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
+void btrfs_clear_em_logging(struct btrfs_inode *inode, struct extent_map *em)
{
- clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
- if (extent_map_in_tree(em))
- try_merge_map(tree, em);
+ lockdep_assert_held_write(&inode->extent_tree.lock);
+
+ em->flags &= ~EXTENT_FLAG_LOGGING;
+ if (btrfs_extent_map_in_tree(em))
+ try_merge_map(inode, em);
}
-static inline void setup_extent_mapping(struct extent_map_tree *tree,
+static inline void setup_extent_mapping(struct btrfs_inode *inode,
struct extent_map *em,
- int modified)
+ bool modified)
{
refcount_inc(&em->refs);
- em->mod_start = em->start;
- em->mod_len = em->len;
+
+ ASSERT(list_empty(&em->list));
if (modified)
- list_move(&em->list, &tree->modified_extents);
+ list_add(&em->list, &inode->extent_tree.modified_extents);
else
- try_merge_map(tree, em);
+ try_merge_map(inode, em);
}
-static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
-{
- struct map_lookup *map = em->map_lookup;
- u64 stripe_size = em->orig_block_len;
- int i;
-
- for (i = 0; i < map->num_stripes; i++) {
- struct btrfs_io_stripe *stripe = &map->stripes[i];
- struct btrfs_device *device = stripe->dev;
-
- set_extent_bits_nowait(&device->alloc_state, stripe->physical,
- stripe->physical + stripe_size - 1, bits);
- }
-}
-
-static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
-{
- struct map_lookup *map = em->map_lookup;
- u64 stripe_size = em->orig_block_len;
- int i;
-
- for (i = 0; i < map->num_stripes; i++) {
- struct btrfs_io_stripe *stripe = &map->stripes[i];
- struct btrfs_device *device = stripe->dev;
-
- __clear_extent_bit(&device->alloc_state, stripe->physical,
- stripe->physical + stripe_size - 1, bits,
- 0, 0, NULL, GFP_NOWAIT, NULL);
- }
-}
-
-/**
- * Add new extent map to the extent tree
+/*
+ * Add a new extent map to an inode's extent map tree.
*
- * @tree: tree to insert new map in
+ * @inode: the target inode
* @em: map to insert
* @modified: indicate whether the given @em should be added to the
* modified list, which indicates the extent needs to be logged
*
- * Insert @em into @tree or perform a simple forward/backward merge with
- * existing mappings. The extent_map struct passed in will be inserted
- * into the tree directly, with an additional reference taken, or a
- * reference dropped if the merge attempt was successful.
+ * Insert @em into the @inode's extent map tree or perform a simple
+ * forward/backward merge with existing mappings. The extent_map struct passed
+ * in will be inserted into the tree directly, with an additional reference
+ * taken, or a reference dropped if the merge attempt was successful.
*/
-int add_extent_mapping(struct extent_map_tree *tree,
- struct extent_map *em, int modified)
+static int add_extent_mapping(struct btrfs_inode *inode,
+ struct extent_map *em, bool modified)
{
- int ret = 0;
+ struct extent_map_tree *tree = &inode->extent_tree;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret;
lockdep_assert_held_write(&tree->lock);
- ret = tree_insert(&tree->map, em);
+ validate_extent_map(fs_info, em);
+ ret = tree_insert(&tree->root, em);
if (ret)
- goto out;
+ return ret;
- setup_extent_mapping(tree, em, modified);
- if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
- extent_map_device_set_bits(em, CHUNK_ALLOCATED);
- extent_map_device_clear_bits(em, CHUNK_TRIMMED);
- }
-out:
- return ret;
+ setup_extent_mapping(inode, em, modified);
+
+ if (!btrfs_is_testing(fs_info) && btrfs_is_fstree(btrfs_root_id(root)))
+ percpu_counter_inc(&fs_info->evictable_extent_maps);
+
+ return 0;
}
-static struct extent_map *
-__lookup_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len, int strict)
+static struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len, bool strict)
{
struct extent_map *em;
struct rb_node *rb_node;
- struct rb_node *prev = NULL;
- struct rb_node *next = NULL;
+ struct rb_node *prev_or_next = NULL;
u64 end = range_end(start, len);
- rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next);
+ rb_node = tree_search(&tree->root, start, &prev_or_next);
if (!rb_node) {
- if (prev)
- rb_node = prev;
- else if (next)
- rb_node = next;
+ if (prev_or_next)
+ rb_node = prev_or_next;
else
return NULL;
}
em = rb_entry(rb_node, struct extent_map, rb_node);
- if (strict && !(end > em->start && start < extent_map_end(em)))
+ if (strict && !(end > em->start && start < btrfs_extent_map_end(em)))
return NULL;
refcount_inc(&em->refs);
return em;
}
-/**
- * lookup_extent_mapping - lookup extent_map
+/*
+ * Lookup extent_map that intersects @start + @len range.
+ *
* @tree: tree to lookup in
* @start: byte offset to start the search
* @len: length of the lookup range
@@ -457,14 +545,15 @@ __lookup_extent_mapping(struct extent_map_tree *tree,
* intersect, so check the object returned carefully to make sure that no
* additional lookups are needed.
*/
-struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len)
+struct extent_map *btrfs_lookup_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len)
{
- return __lookup_extent_mapping(tree, start, len, 1);
+ return lookup_extent_mapping(tree, start, len, true);
}
-/**
- * search_extent_mapping - find a nearby extent map
+/*
+ * Find a nearby extent map intersecting @start + @len (not an exact search).
+ *
* @tree: tree to lookup in
* @start: byte offset to start the search
* @len: length of the lookup range
@@ -474,47 +563,57 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
*
* If one can't be found, any nearby extent may be returned
*/
-struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len)
+struct extent_map *btrfs_search_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len)
{
- return __lookup_extent_mapping(tree, start, len, 0);
+ return lookup_extent_mapping(tree, start, len, false);
}
-/**
- * remove_extent_mapping - removes an extent_map from the extent tree
- * @tree: extent tree to remove from
+/*
+ * Remove an extent_map from its inode's extent tree.
+ *
+ * @inode: the inode the extent map belongs to
* @em: extent map being removed
*
- * Removes @em from @tree. No reference counts are dropped, and no checks
- * are done to see if the range is in use
+ * Remove @em from the extent tree of @inode. No reference counts are dropped,
+ * and no checks are done to see if the range is in use.
*/
-void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
+void btrfs_remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em)
{
- WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
- rb_erase_cached(&em->rb_node, &tree->map);
- if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
+ struct extent_map_tree *tree = &inode->extent_tree;
+
+ lockdep_assert_held_write(&tree->lock);
+
+ WARN_ON(em->flags & EXTENT_FLAG_PINNED);
+ if (!(em->flags & EXTENT_FLAG_LOGGING))
list_del_init(&em->list);
- if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
- extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
- RB_CLEAR_NODE(&em->rb_node);
+
+ remove_em(inode, em);
}
-void replace_extent_mapping(struct extent_map_tree *tree,
- struct extent_map *cur,
- struct extent_map *new,
- int modified)
+static void replace_extent_mapping(struct btrfs_inode *inode,
+ struct extent_map *cur,
+ struct extent_map *new,
+ bool modified)
{
- WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
- ASSERT(extent_map_in_tree(cur));
- if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_map_tree *tree = &inode->extent_tree;
+
+ lockdep_assert_held_write(&tree->lock);
+
+ validate_extent_map(fs_info, new);
+
+ WARN_ON(cur->flags & EXTENT_FLAG_PINNED);
+ ASSERT(btrfs_extent_map_in_tree(cur));
+ if (!(cur->flags & EXTENT_FLAG_LOGGING))
list_del_init(&cur->list);
- rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
+ rb_replace_node(&cur->rb_node, &new->rb_node, &tree->root);
RB_CLEAR_NODE(&cur->rb_node);
- setup_extent_mapping(tree, new, modified);
+ setup_extent_mapping(inode, new, modified);
}
-static struct extent_map *next_extent_map(struct extent_map *em)
+static struct extent_map *next_extent_map(const struct extent_map *em)
{
struct rb_node *next;
@@ -540,7 +639,7 @@ static struct extent_map *prev_extent_map(struct extent_map *em)
* and an extent that you want to insert, deal with overlap and insert
* the best fitted new extent into the tree.
*/
-static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
+static noinline int merge_extent_mapping(struct btrfs_inode *inode,
struct extent_map *existing,
struct extent_map *em,
u64 map_start)
@@ -551,7 +650,8 @@ static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
u64 end;
u64 start_diff;
- BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
+ if (map_start < em->start || map_start >= btrfs_extent_map_end(em))
+ return -EINVAL;
if (existing->start > map_start) {
next = existing;
@@ -561,26 +661,22 @@ static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
next = next_extent_map(prev);
}
- start = prev ? extent_map_end(prev) : em->start;
+ start = prev ? btrfs_extent_map_end(prev) : em->start;
start = max_t(u64, start, em->start);
- end = next ? next->start : extent_map_end(em);
- end = min_t(u64, end, extent_map_end(em));
+ end = next ? next->start : btrfs_extent_map_end(em);
+ end = min_t(u64, end, btrfs_extent_map_end(em));
start_diff = start - em->start;
em->start = start;
em->len = end - start;
- if (em->block_start < EXTENT_MAP_LAST_BYTE &&
- !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
- em->block_start += start_diff;
- em->block_len = em->len;
- }
- return add_extent_mapping(em_tree, em, 0);
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
+ em->offset += start_diff;
+ return add_extent_mapping(inode, em, false);
}
-/**
- * Add extent mapping into em_tree
+/*
+ * Add extent mapping into an inode's extent map tree.
*
- * @fs_info: the filesystem
- * @em_tree: extent tree into which we want to insert the extent mapping
+ * @inode: target inode
* @em_in: extent we are inserting
* @start: start of the logical range btrfs_get_extent() is requesting
* @len: length of the logical range btrfs_get_extent() is requesting
@@ -588,8 +684,8 @@ static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
* Note that @em_in's range may be different from [start, start+len),
* but they must be overlapped.
*
- * Insert @em_in into @em_tree. In case there is an overlapping range, handle
- * the -EEXIST by either:
+ * Insert @em_in into the inode's extent map tree. In case there is an
+ * overlapping range, handle the -EEXIST by either:
* a) Returning the existing extent in @em_in if @start is within the
* existing em.
* b) Merge the existing extent with @em_in passed in.
@@ -597,14 +693,21 @@ static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
* Return 0 on success, otherwise -EEXIST.
*
*/
-int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree,
+int btrfs_add_extent_mapping(struct btrfs_inode *inode,
struct extent_map **em_in, u64 start, u64 len)
{
int ret;
struct extent_map *em = *em_in;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
- ret = add_extent_mapping(em_tree, em, 0);
+ /*
+ * Tree-checker should have rejected any inline extent with non-zero
+ * file offset. Here just do a sanity check.
+ */
+ if (em->disk_bytenr == EXTENT_MAP_INLINE)
+ ASSERT(em->start == 0);
+
+ ret = add_extent_mapping(inode, em, false);
/* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that
* an overlapping map exists in the tree
@@ -612,9 +715,7 @@ int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
if (ret == -EEXIST) {
struct extent_map *existing;
- ret = 0;
-
- existing = search_extent_mapping(em_tree, start, len);
+ existing = btrfs_search_extent_mapping(&inode->extent_tree, start, len);
trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
@@ -623,8 +724,8 @@ int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
* extent causing the -EEXIST.
*/
if (start >= existing->start &&
- start < extent_map_end(existing)) {
- free_extent_map(em);
+ start < btrfs_extent_map_end(existing)) {
+ btrfs_free_extent_map(em);
*em_in = existing;
ret = 0;
} else {
@@ -635,20 +736,647 @@ int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
* The existing extent map is the one nearest to
* the [start, start + len) range which overlaps
*/
- ret = merge_extent_mapping(em_tree, existing,
- em, start);
- if (ret) {
- free_extent_map(em);
+ ret = merge_extent_mapping(inode, existing, em, start);
+ if (WARN_ON(ret)) {
+ btrfs_free_extent_map(em);
*em_in = NULL;
- WARN_ONCE(ret,
-"unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
- ret, existing->start, existing->len,
- orig_start, orig_len);
+ btrfs_warn(fs_info,
+"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu",
+ existing->start, btrfs_extent_map_end(existing),
+ orig_start, orig_start + orig_len, start);
}
- free_extent_map(existing);
+ btrfs_free_extent_map(existing);
}
}
ASSERT(ret == 0 || ret == -EEXIST);
return ret;
}
+
+/*
+ * Drop all extent maps from a tree in the fastest possible way, rescheduling
+ * if needed. This avoids searching the tree, from the root down to the first
+ * extent map, before each deletion.
+ */
+static void drop_all_extent_maps_fast(struct btrfs_inode *inode)
+{
+ struct extent_map_tree *tree = &inode->extent_tree;
+ struct rb_node *node;
+
+ write_lock(&tree->lock);
+ node = rb_first(&tree->root);
+ while (node) {
+ struct extent_map *em;
+ struct rb_node *next = rb_next(node);
+
+ em = rb_entry(node, struct extent_map, rb_node);
+ em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
+ btrfs_remove_extent_mapping(inode, em);
+ btrfs_free_extent_map(em);
+
+ if (cond_resched_rwlock_write(&tree->lock))
+ node = rb_first(&tree->root);
+ else
+ node = next;
+ }
+ write_unlock(&tree->lock);
+}
+
+/*
+ * Drop all extent maps in a given range.
+ *
+ * @inode: The target inode.
+ * @start: Start offset of the range.
+ * @end: End offset of the range (inclusive value).
+ * @skip_pinned: Indicate if pinned extent maps should be ignored or not.
+ *
+ * This drops all the extent maps that intersect the given range [@start, @end].
+ * Extent maps that partially overlap the range and extend behind or beyond it,
+ * are split.
+ * The caller should have locked an appropriate file range in the inode's io
+ * tree before calling this function.
+ */
+void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+ bool skip_pinned)
+{
+ struct extent_map *split;
+ struct extent_map *split2;
+ struct extent_map *em;
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+ u64 len = end - start + 1;
+
+ WARN_ON(end < start);
+ if (end == (u64)-1) {
+ if (start == 0 && !skip_pinned) {
+ drop_all_extent_maps_fast(inode);
+ return;
+ }
+ len = (u64)-1;
+ } else {
+ /* Make end offset exclusive for use in the loop below. */
+ end++;
+ }
+
+ /*
+ * It's ok if we fail to allocate the extent maps, see the comment near
+ * the bottom of the loop below. We only need two spare extent maps in
+ * the worst case, where the first extent map that intersects our range
+ * starts before the range and the last extent map that intersects our
+ * range ends after our range (and they might be the same extent map),
+ * because we need to split those two extent maps at the boundaries.
+ */
+ split = btrfs_alloc_extent_map();
+ split2 = btrfs_alloc_extent_map();
+
+ write_lock(&em_tree->lock);
+ em = btrfs_lookup_extent_mapping(em_tree, start, len);
+
+ while (em) {
+ /* extent_map_end() returns exclusive value (last byte + 1). */
+ const u64 em_end = btrfs_extent_map_end(em);
+ struct extent_map *next_em = NULL;
+ u64 gen;
+ unsigned long flags;
+ bool modified;
+
+ if (em_end < end) {
+ next_em = next_extent_map(em);
+ if (next_em) {
+ if (next_em->start < end)
+ refcount_inc(&next_em->refs);
+ else
+ next_em = NULL;
+ }
+ }
+
+ if (skip_pinned && (em->flags & EXTENT_FLAG_PINNED)) {
+ start = em_end;
+ goto next;
+ }
+
+ flags = em->flags;
+ /*
+ * In case we split the extent map, we want to preserve the
+ * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want
+ * it on the new extent maps.
+ */
+ em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
+ modified = !list_empty(&em->list);
+
+ /*
+ * The extent map does not cross our target range, so no need to
+ * split it, we can remove it directly.
+ */
+ if (em->start >= start && em_end <= end)
+ goto remove_em;
+
+ gen = em->generation;
+
+ if (em->start < start) {
+ if (!split) {
+ split = split2;
+ split2 = NULL;
+ if (!split)
+ goto remove_em;
+ }
+ split->start = em->start;
+ split->len = start - em->start;
+
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
+ split->disk_bytenr = em->disk_bytenr;
+ split->disk_num_bytes = em->disk_num_bytes;
+ split->offset = em->offset;
+ split->ram_bytes = em->ram_bytes;
+ } else {
+ split->disk_bytenr = em->disk_bytenr;
+ split->disk_num_bytes = 0;
+ split->offset = 0;
+ split->ram_bytes = split->len;
+ }
+
+ split->generation = gen;
+ split->flags = flags;
+ replace_extent_mapping(inode, em, split, modified);
+ btrfs_free_extent_map(split);
+ split = split2;
+ split2 = NULL;
+ }
+ if (em_end > end) {
+ if (!split) {
+ split = split2;
+ split2 = NULL;
+ if (!split)
+ goto remove_em;
+ }
+ split->start = end;
+ split->len = em_end - end;
+ split->disk_bytenr = em->disk_bytenr;
+ split->flags = flags;
+ split->generation = gen;
+
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
+ split->disk_num_bytes = em->disk_num_bytes;
+ split->offset = em->offset + end - em->start;
+ split->ram_bytes = em->ram_bytes;
+ } else {
+ split->disk_num_bytes = 0;
+ split->offset = 0;
+ split->ram_bytes = split->len;
+ }
+
+ if (btrfs_extent_map_in_tree(em)) {
+ replace_extent_mapping(inode, em, split, modified);
+ } else {
+ int ret;
+
+ ret = add_extent_mapping(inode, split, modified);
+ /* Logic error, shouldn't happen. */
+ ASSERT(ret == 0);
+ if (WARN_ON(ret != 0) && modified)
+ btrfs_set_inode_full_sync(inode);
+ }
+ btrfs_free_extent_map(split);
+ split = NULL;
+ }
+remove_em:
+ if (btrfs_extent_map_in_tree(em)) {
+ /*
+ * If the extent map is still in the tree it means that
+ * either of the following is true:
+ *
+ * 1) It fits entirely in our range (doesn't end beyond
+ * it or starts before it);
+ *
+ * 2) It starts before our range and/or ends after our
+ * range, and we were not able to allocate the extent
+ * maps for split operations, @split and @split2.
+ *
+ * If we are at case 2) then we just remove the entire
+ * extent map - this is fine since if anyone needs it to
+ * access the subranges outside our range, will just
+ * load it again from the subvolume tree's file extent
+ * item. However if the extent map was in the list of
+ * modified extents, then we must mark the inode for a
+ * full fsync, otherwise a fast fsync will miss this
+ * extent if it's new and needs to be logged.
+ */
+ if ((em->start < start || em_end > end) && modified) {
+ ASSERT(!split);
+ btrfs_set_inode_full_sync(inode);
+ }
+ btrfs_remove_extent_mapping(inode, em);
+ }
+
+ /*
+ * Once for the tree reference (we replaced or removed the
+ * extent map from the tree).
+ */
+ btrfs_free_extent_map(em);
+next:
+ /* Once for us (for our lookup reference). */
+ btrfs_free_extent_map(em);
+
+ em = next_em;
+ }
+
+ write_unlock(&em_tree->lock);
+
+ btrfs_free_extent_map(split);
+ btrfs_free_extent_map(split2);
+}
+
+/*
+ * Replace a range in the inode's extent map tree with a new extent map.
+ *
+ * @inode: The target inode.
+ * @new_em: The new extent map to add to the inode's extent map tree.
+ * @modified: Indicate if the new extent map should be added to the list of
+ * modified extents (for fast fsync tracking).
+ *
+ * Drops all the extent maps in the inode's extent map tree that intersect the
+ * range of the new extent map and adds the new extent map to the tree.
+ * The caller should have locked an appropriate file range in the inode's io
+ * tree before calling this function.
+ */
+int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
+ struct extent_map *new_em,
+ bool modified)
+{
+ const u64 end = new_em->start + new_em->len - 1;
+ struct extent_map_tree *tree = &inode->extent_tree;
+ int ret;
+
+ ASSERT(!btrfs_extent_map_in_tree(new_em));
+
+ /*
+ * The caller has locked an appropriate file range in the inode's io
+ * tree, but getting -EEXIST when adding the new extent map can still
+ * happen in case there are extents that partially cover the range, and
+ * this is due to two tasks operating on different parts of the extent.
+ * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from
+ * btrfs_get_extent") for an example and details.
+ */
+ do {
+ btrfs_drop_extent_map_range(inode, new_em->start, end, false);
+ write_lock(&tree->lock);
+ ret = add_extent_mapping(inode, new_em, modified);
+ write_unlock(&tree->lock);
+ } while (ret == -EEXIST);
+
+ return ret;
+}
+
+/*
+ * Split off the first pre bytes from the extent_map at [start, start + len],
+ * and set the block_start for it to new_logical.
+ *
+ * This function is used when an ordered_extent needs to be split.
+ */
+int btrfs_split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
+ u64 new_logical)
+{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+ struct extent_map *em;
+ struct extent_map *split_pre = NULL;
+ struct extent_map *split_mid = NULL;
+ int ret = 0;
+ unsigned long flags;
+
+ ASSERT(pre != 0);
+ ASSERT(pre < len);
+
+ split_pre = btrfs_alloc_extent_map();
+ if (!split_pre)
+ return -ENOMEM;
+ split_mid = btrfs_alloc_extent_map();
+ if (!split_mid) {
+ ret = -ENOMEM;
+ goto out_free_pre;
+ }
+
+ btrfs_lock_extent(&inode->io_tree, start, start + len - 1, NULL);
+ write_lock(&em_tree->lock);
+ em = btrfs_lookup_extent_mapping(em_tree, start, len);
+ if (unlikely(!em)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ ASSERT(em->len == len);
+ ASSERT(!btrfs_extent_map_is_compressed(em));
+ ASSERT(em->disk_bytenr < EXTENT_MAP_LAST_BYTE);
+ ASSERT(em->flags & EXTENT_FLAG_PINNED);
+ ASSERT(!(em->flags & EXTENT_FLAG_LOGGING));
+ ASSERT(!list_empty(&em->list));
+
+ flags = em->flags;
+ em->flags &= ~EXTENT_FLAG_PINNED;
+
+ /* First, replace the em with a new extent_map starting from * em->start */
+ split_pre->start = em->start;
+ split_pre->len = pre;
+ split_pre->disk_bytenr = new_logical;
+ split_pre->disk_num_bytes = split_pre->len;
+ split_pre->offset = 0;
+ split_pre->ram_bytes = split_pre->len;
+ split_pre->flags = flags;
+ split_pre->generation = em->generation;
+
+ replace_extent_mapping(inode, em, split_pre, true);
+
+ /*
+ * Now we only have an extent_map at:
+ * [em->start, em->start + pre]
+ */
+
+ /* Insert the middle extent_map. */
+ split_mid->start = em->start + pre;
+ split_mid->len = em->len - pre;
+ split_mid->disk_bytenr = btrfs_extent_map_block_start(em) + pre;
+ split_mid->disk_num_bytes = split_mid->len;
+ split_mid->offset = 0;
+ split_mid->ram_bytes = split_mid->len;
+ split_mid->flags = flags;
+ split_mid->generation = em->generation;
+ add_extent_mapping(inode, split_mid, true);
+
+ /* Once for us */
+ btrfs_free_extent_map(em);
+ /* Once for the tree */
+ btrfs_free_extent_map(em);
+
+out_unlock:
+ write_unlock(&em_tree->lock);
+ btrfs_unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
+ btrfs_free_extent_map(split_mid);
+out_free_pre:
+ btrfs_free_extent_map(split_pre);
+ return ret;
+}
+
+struct btrfs_em_shrink_ctx {
+ long nr_to_scan;
+ long scanned;
+};
+
+static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ const u64 cur_fs_gen = btrfs_get_fs_generation(fs_info);
+ struct extent_map_tree *tree = &inode->extent_tree;
+ long nr_dropped = 0;
+ struct rb_node *node;
+
+ lockdep_assert_held_write(&tree->lock);
+
+ /*
+ * Take the mmap lock so that we serialize with the inode logging phase
+ * of fsync because we may need to set the full sync flag on the inode,
+ * in case we have to remove extent maps in the tree's list of modified
+ * extents. If we set the full sync flag in the inode while an fsync is
+ * in progress, we may risk missing new extents because before the flag
+ * is set, fsync decides to only wait for writeback to complete and then
+ * during inode logging it sees the flag set and uses the subvolume tree
+ * to find new extents, which may not be there yet because ordered
+ * extents haven't completed yet.
+ *
+ * We also do a try lock because we don't want to block for too long and
+ * we are holding the extent map tree's lock in write mode.
+ */
+ if (!down_read_trylock(&inode->i_mmap_lock))
+ return 0;
+
+ node = rb_first(&tree->root);
+ while (node) {
+ struct rb_node *next = rb_next(node);
+ struct extent_map *em;
+
+ em = rb_entry(node, struct extent_map, rb_node);
+ ctx->scanned++;
+
+ if (em->flags & EXTENT_FLAG_PINNED)
+ goto next;
+
+ /*
+ * If the inode is in the list of modified extents (new) and its
+ * generation is the same (or is greater than) the current fs
+ * generation, it means it was not yet persisted so we have to
+ * set the full sync flag so that the next fsync will not miss
+ * it.
+ */
+ if (!list_empty(&em->list) && em->generation >= cur_fs_gen)
+ btrfs_set_inode_full_sync(inode);
+
+ btrfs_remove_extent_mapping(inode, em);
+ trace_btrfs_extent_map_shrinker_remove_em(inode, em);
+ /* Drop the reference for the tree. */
+ btrfs_free_extent_map(em);
+ nr_dropped++;
+next:
+ if (ctx->scanned >= ctx->nr_to_scan)
+ break;
+
+ /*
+ * Stop if we need to reschedule or there's contention on the
+ * lock. This is to avoid slowing other tasks trying to take the
+ * lock.
+ */
+ if (need_resched() || rwlock_needbreak(&tree->lock) ||
+ btrfs_fs_closing(fs_info))
+ break;
+ node = next;
+ }
+ up_read(&inode->i_mmap_lock);
+
+ return nr_dropped;
+}
+
+static struct btrfs_inode *find_first_inode_to_shrink(struct btrfs_root *root,
+ u64 min_ino)
+{
+ struct btrfs_inode *inode;
+ unsigned long from = min_ino;
+
+ xa_lock(&root->inodes);
+ while (true) {
+ struct extent_map_tree *tree;
+
+ inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
+ if (!inode)
+ break;
+
+ tree = &inode->extent_tree;
+
+ /*
+ * We want to be fast so if the lock is busy we don't want to
+ * spend time waiting for it (some task is about to do IO for
+ * the inode).
+ */
+ if (!write_trylock(&tree->lock))
+ goto next;
+
+ /*
+ * Skip inode if it doesn't have loaded extent maps, so we avoid
+ * getting a reference and doing an iput later. This includes
+ * cases like files that were opened for things like stat(2), or
+ * files with all extent maps previously released through the
+ * release folio callback (btrfs_release_folio()) or released in
+ * a previous run, or directories which never have extent maps.
+ */
+ if (RB_EMPTY_ROOT(&tree->root)) {
+ write_unlock(&tree->lock);
+ goto next;
+ }
+
+ if (igrab(&inode->vfs_inode))
+ break;
+
+ write_unlock(&tree->lock);
+next:
+ from = btrfs_ino(inode) + 1;
+ cond_resched_lock(&root->inodes.xa_lock);
+ }
+ xa_unlock(&root->inodes);
+
+ return inode;
+}
+
+static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_inode *inode;
+ long nr_dropped = 0;
+ u64 min_ino = fs_info->em_shrinker_last_ino + 1;
+
+ inode = find_first_inode_to_shrink(root, min_ino);
+ while (inode) {
+ nr_dropped += btrfs_scan_inode(inode, ctx);
+ write_unlock(&inode->extent_tree.lock);
+
+ min_ino = btrfs_ino(inode) + 1;
+ fs_info->em_shrinker_last_ino = btrfs_ino(inode);
+ iput(&inode->vfs_inode);
+
+ if (ctx->scanned >= ctx->nr_to_scan || btrfs_fs_closing(fs_info))
+ break;
+
+ cond_resched();
+
+ inode = find_first_inode_to_shrink(root, min_ino);
+ }
+
+ if (inode) {
+ /*
+ * There are still inodes in this root or we happened to process
+ * the last one and reached the scan limit. In either case set
+ * the current root to this one, so we'll resume from the next
+ * inode if there is one or we will find out this was the last
+ * one and move to the next root.
+ */
+ fs_info->em_shrinker_last_root = btrfs_root_id(root);
+ } else {
+ /*
+ * No more inodes in this root, set extent_map_shrinker_last_ino to 0 so
+ * that when processing the next root we start from its first inode.
+ */
+ fs_info->em_shrinker_last_ino = 0;
+ fs_info->em_shrinker_last_root = btrfs_root_id(root) + 1;
+ }
+
+ return nr_dropped;
+}
+
+static void btrfs_extent_map_shrinker_worker(struct work_struct *work)
+{
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_em_shrink_ctx ctx;
+ u64 start_root_id;
+ u64 next_root_id;
+ bool cycled = false;
+ long nr_dropped = 0;
+
+ fs_info = container_of(work, struct btrfs_fs_info, em_shrinker_work);
+
+ ctx.scanned = 0;
+ ctx.nr_to_scan = atomic64_read(&fs_info->em_shrinker_nr_to_scan);
+
+ start_root_id = fs_info->em_shrinker_last_root;
+ next_root_id = fs_info->em_shrinker_last_root;
+
+ if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) {
+ s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
+
+ trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr);
+ }
+
+ while (ctx.scanned < ctx.nr_to_scan && !btrfs_fs_closing(fs_info)) {
+ struct btrfs_root *root;
+ unsigned long count;
+
+ cond_resched();
+
+ spin_lock(&fs_info->fs_roots_radix_lock);
+ count = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+ (void **)&root,
+ (unsigned long)next_root_id, 1);
+ if (count == 0) {
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+ if (start_root_id > 0 && !cycled) {
+ next_root_id = 0;
+ fs_info->em_shrinker_last_root = 0;
+ fs_info->em_shrinker_last_ino = 0;
+ cycled = true;
+ continue;
+ }
+ break;
+ }
+ next_root_id = btrfs_root_id(root) + 1;
+ root = btrfs_grab_root(root);
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+
+ if (!root)
+ continue;
+
+ if (btrfs_is_fstree(btrfs_root_id(root)))
+ nr_dropped += btrfs_scan_root(root, &ctx);
+
+ btrfs_put_root(root);
+ }
+
+ if (trace_btrfs_extent_map_shrinker_scan_exit_enabled()) {
+ s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
+
+ trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, nr);
+ }
+
+ atomic64_set(&fs_info->em_shrinker_nr_to_scan, 0);
+}
+
+void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
+{
+ /*
+ * Do nothing if the shrinker is already running. In case of high memory
+ * pressure we can have a lot of tasks calling us and all passing the
+ * same nr_to_scan value, but in reality we may need only to free
+ * nr_to_scan extent maps (or less). In case we need to free more than
+ * that, we will be called again by the fs shrinker, so no worries about
+ * not doing enough work to reclaim memory from extent maps.
+ * We can also be repeatedly called with the same nr_to_scan value
+ * simply because the shrinker runs asynchronously and multiple calls
+ * to this function are made before the shrinker does enough progress.
+ *
+ * That's why we set the atomic counter to nr_to_scan only if its
+ * current value is zero, instead of incrementing the counter by
+ * nr_to_scan.
+ */
+ if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0)
+ return;
+
+ queue_work(system_dfl_wq, &fs_info->em_shrinker_work);
+}
+
+void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info)
+{
+ atomic64_set(&fs_info->em_shrinker_nr_to_scan, 0);
+ INIT_WORK(&fs_info->em_shrinker_work, btrfs_extent_map_shrinker_worker);
+}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 8e217337dff9..6f685f3c9327 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -3,98 +3,193 @@
#ifndef BTRFS_EXTENT_MAP_H
#define BTRFS_EXTENT_MAP_H
+#include <linux/compiler_types.h>
+#include <linux/spinlock_types.h>
#include <linux/rbtree.h>
+#include <linux/list.h>
#include <linux/refcount.h>
+#include "fs.h"
+
+struct btrfs_inode;
+struct btrfs_fs_info;
#define EXTENT_MAP_LAST_BYTE ((u64)-4)
#define EXTENT_MAP_HOLE ((u64)-3)
#define EXTENT_MAP_INLINE ((u64)-2)
-/* used only during fiemap calls */
-#define EXTENT_MAP_DELALLOC ((u64)-1)
/* bits for the extent_map::flags field */
enum {
/* this entry not yet on disk, don't free it */
- EXTENT_FLAG_PINNED,
- EXTENT_FLAG_COMPRESSED,
+ ENUM_BIT(EXTENT_FLAG_PINNED),
+ ENUM_BIT(EXTENT_FLAG_COMPRESS_ZLIB),
+ ENUM_BIT(EXTENT_FLAG_COMPRESS_LZO),
+ ENUM_BIT(EXTENT_FLAG_COMPRESS_ZSTD),
/* pre-allocated extent */
- EXTENT_FLAG_PREALLOC,
+ ENUM_BIT(EXTENT_FLAG_PREALLOC),
/* Logging this extent */
- EXTENT_FLAG_LOGGING,
- /* Filling in a preallocated extent */
- EXTENT_FLAG_FILLING,
- /* filesystem extent mapping type */
- EXTENT_FLAG_FS_MAPPING,
+ ENUM_BIT(EXTENT_FLAG_LOGGING),
+ /* This em is merged from two or more physically adjacent ems */
+ ENUM_BIT(EXTENT_FLAG_MERGED),
};
+/*
+ * This structure represents file extents and holes.
+ *
+ * Unlike on-disk file extent items, extent maps can be merged to save memory.
+ * This means members only match file extent items before any merging.
+ *
+ * Keep this structure as compact as possible, as we can have really large
+ * amounts of allocated extent maps at any time.
+ */
struct extent_map {
struct rb_node rb_node;
- /* all of these are in bytes */
+ /* All of these are in bytes. */
+
+ /* File offset matching the offset of a BTRFS_EXTENT_ITEM_KEY key. */
u64 start;
+
+ /*
+ * Length of the file extent.
+ *
+ * For non-inlined file extents it's btrfs_file_extent_item::num_bytes.
+ * For inline extents it's sectorsize, since inline data starts at
+ * offsetof(struct btrfs_file_extent_item, disk_bytenr) thus
+ * btrfs_file_extent_item::num_bytes is not valid.
+ */
u64 len;
- u64 mod_start;
- u64 mod_len;
- u64 orig_start;
- u64 orig_block_len;
+
+ /*
+ * The bytenr of the full on-disk extent.
+ *
+ * For regular extents it's btrfs_file_extent_item::disk_bytenr.
+ * For holes it's EXTENT_MAP_HOLE and for inline extents it's
+ * EXTENT_MAP_INLINE.
+ */
+ u64 disk_bytenr;
+
+ /*
+ * The full on-disk extent length, matching
+ * btrfs_file_extent_item::disk_num_bytes.
+ */
+ u64 disk_num_bytes;
+
+ /*
+ * Offset inside the decompressed extent.
+ *
+ * For regular extents it's btrfs_file_extent_item::offset.
+ * For holes and inline extents it's 0.
+ */
+ u64 offset;
+
+ /*
+ * The decompressed size of the whole on-disk extent, matching
+ * btrfs_file_extent_item::ram_bytes.
+ */
u64 ram_bytes;
- u64 block_start;
- u64 block_len;
+
+ /*
+ * Generation of the extent map, for merged em it's the highest
+ * generation of all merged ems.
+ * For non-merged extents, it's from btrfs_file_extent_item::generation.
+ */
u64 generation;
- unsigned long flags;
- /* Used for chunk mappings, flag EXTENT_FLAG_FS_MAPPING must be set */
- struct map_lookup *map_lookup;
+ u32 flags;
refcount_t refs;
- unsigned int compress_type;
struct list_head list;
};
struct extent_map_tree {
- struct rb_root_cached map;
+ struct rb_root root;
struct list_head modified_extents;
rwlock_t lock;
};
-static inline int extent_map_in_tree(const struct extent_map *em)
+struct btrfs_inode;
+
+static inline void btrfs_extent_map_set_compression(struct extent_map *em,
+ enum btrfs_compression_type type)
+{
+ if (type == BTRFS_COMPRESS_ZLIB)
+ em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
+ else if (type == BTRFS_COMPRESS_LZO)
+ em->flags |= EXTENT_FLAG_COMPRESS_LZO;
+ else if (type == BTRFS_COMPRESS_ZSTD)
+ em->flags |= EXTENT_FLAG_COMPRESS_ZSTD;
+}
+
+static inline enum btrfs_compression_type btrfs_extent_map_compression(
+ const struct extent_map *em)
+{
+ if (em->flags & EXTENT_FLAG_COMPRESS_ZLIB)
+ return BTRFS_COMPRESS_ZLIB;
+
+ if (em->flags & EXTENT_FLAG_COMPRESS_LZO)
+ return BTRFS_COMPRESS_LZO;
+
+ if (em->flags & EXTENT_FLAG_COMPRESS_ZSTD)
+ return BTRFS_COMPRESS_ZSTD;
+
+ return BTRFS_COMPRESS_NONE;
+}
+
+/*
+ * More efficient way to determine if extent is compressed, instead of using
+ * 'extent_map_compression() != BTRFS_COMPRESS_NONE'.
+ */
+static inline bool btrfs_extent_map_is_compressed(const struct extent_map *em)
+{
+ return (em->flags & (EXTENT_FLAG_COMPRESS_ZLIB |
+ EXTENT_FLAG_COMPRESS_LZO |
+ EXTENT_FLAG_COMPRESS_ZSTD)) != 0;
+}
+
+static inline int btrfs_extent_map_in_tree(const struct extent_map *em)
{
return !RB_EMPTY_NODE(&em->rb_node);
}
-static inline u64 extent_map_end(struct extent_map *em)
+static inline u64 btrfs_extent_map_block_start(const struct extent_map *em)
{
- if (em->start + em->len < em->start)
- return (u64)-1;
- return em->start + em->len;
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
+ if (btrfs_extent_map_is_compressed(em))
+ return em->disk_bytenr;
+ return em->disk_bytenr + em->offset;
+ }
+ return em->disk_bytenr;
}
-static inline u64 extent_map_block_end(struct extent_map *em)
+static inline u64 btrfs_extent_map_end(const struct extent_map *em)
{
- if (em->block_start + em->block_len < em->block_start)
+ if (em->start + em->len < em->start)
return (u64)-1;
- return em->block_start + em->block_len;
+ return em->start + em->len;
}
-void extent_map_tree_init(struct extent_map_tree *tree);
-struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len);
-int add_extent_mapping(struct extent_map_tree *tree,
- struct extent_map *em, int modified);
-void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
-void replace_extent_mapping(struct extent_map_tree *tree,
- struct extent_map *cur,
- struct extent_map *new,
- int modified);
-
-struct extent_map *alloc_extent_map(void);
-void free_extent_map(struct extent_map *em);
-int __init extent_map_init(void);
-void __cold extent_map_exit(void);
-int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen);
-void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em);
-struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len);
-int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree,
+void btrfs_extent_map_tree_init(struct extent_map_tree *tree);
+struct extent_map *btrfs_lookup_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len);
+void btrfs_remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em);
+int btrfs_split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
+ u64 new_logical);
+
+struct extent_map *btrfs_alloc_extent_map(void);
+void btrfs_free_extent_map(struct extent_map *em);
+int __init btrfs_extent_map_init(void);
+void __cold btrfs_extent_map_exit(void);
+int btrfs_unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen);
+void btrfs_clear_em_logging(struct btrfs_inode *inode, struct extent_map *em);
+struct extent_map *btrfs_search_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len);
+int btrfs_add_extent_mapping(struct btrfs_inode *inode,
struct extent_map **em_in, u64 start, u64 len);
+void btrfs_drop_extent_map_range(struct btrfs_inode *inode,
+ u64 start, u64 end,
+ bool skip_pinned);
+int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
+ struct extent_map *new_em,
+ bool modified);
+void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan);
+void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info);
#endif
diff --git a/fs/btrfs/fiemap.c b/fs/btrfs/fiemap.c
new file mode 100644
index 000000000000..f2eaaef8422b
--- /dev/null
+++ b/fs/btrfs/fiemap.c
@@ -0,0 +1,929 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "backref.h"
+#include "btrfs_inode.h"
+#include "fiemap.h"
+#include "file.h"
+#include "file-item.h"
+
+struct btrfs_fiemap_entry {
+ u64 offset;
+ u64 phys;
+ u64 len;
+ u32 flags;
+};
+
+/*
+ * Indicate the caller of emit_fiemap_extent() that it needs to unlock the file
+ * range from the inode's io tree, unlock the subvolume tree search path, flush
+ * the fiemap cache and relock the file range and research the subvolume tree.
+ * The value here is something negative that can't be confused with a valid
+ * errno value and different from 1 because that's also a return value from
+ * fiemap_fill_next_extent() and also it's often used to mean some btree search
+ * did not find a key, so make it some distinct negative value.
+ */
+#define BTRFS_FIEMAP_FLUSH_CACHE (-(MAX_ERRNO + 1))
+
+/*
+ * Used to:
+ *
+ * - Cache the next entry to be emitted to the fiemap buffer, so that we can
+ * merge extents that are contiguous and can be grouped as a single one;
+ *
+ * - Store extents ready to be written to the fiemap buffer in an intermediary
+ * buffer. This intermediary buffer is to ensure that in case the fiemap
+ * buffer is memory mapped to the fiemap target file, we don't deadlock
+ * during btrfs_page_mkwrite(). This is because during fiemap we are locking
+ * an extent range in order to prevent races with delalloc flushing and
+ * ordered extent completion, which is needed in order to reliably detect
+ * delalloc in holes and prealloc extents. And this can lead to a deadlock
+ * if the fiemap buffer is memory mapped to the file we are running fiemap
+ * against (a silly, useless in practice scenario, but possible) because
+ * btrfs_page_mkwrite() will try to lock the same extent range.
+ */
+struct fiemap_cache {
+ /* An array of ready fiemap entries. */
+ struct btrfs_fiemap_entry *entries;
+ /* Number of entries in the entries array. */
+ int entries_size;
+ /* Index of the next entry in the entries array to write to. */
+ int entries_pos;
+ /*
+ * Once the entries array is full, this indicates what's the offset for
+ * the next file extent item we must search for in the inode's subvolume
+ * tree after unlocking the extent range in the inode's io tree and
+ * releasing the search path.
+ */
+ u64 next_search_offset;
+ /*
+ * This matches struct fiemap_extent_info::fi_mapped_extents, we use it
+ * to count ourselves emitted extents and stop instead of relying on
+ * fiemap_fill_next_extent() because we buffer ready fiemap entries at
+ * the @entries array, and we want to stop as soon as we hit the max
+ * amount of extents to map, not just to save time but also to make the
+ * logic at extent_fiemap() simpler.
+ */
+ unsigned int extents_mapped;
+ /* Fields for the cached extent (unsubmitted, not ready, extent). */
+ u64 offset;
+ u64 phys;
+ u64 len;
+ u32 flags;
+ bool cached;
+};
+
+static int flush_fiemap_cache(struct fiemap_extent_info *fieinfo,
+ struct fiemap_cache *cache)
+{
+ for (int i = 0; i < cache->entries_pos; i++) {
+ struct btrfs_fiemap_entry *entry = &cache->entries[i];
+ int ret;
+
+ ret = fiemap_fill_next_extent(fieinfo, entry->offset,
+ entry->phys, entry->len,
+ entry->flags);
+ /*
+ * Ignore 1 (reached max entries) because we keep track of that
+ * ourselves in emit_fiemap_extent().
+ */
+ if (ret < 0)
+ return ret;
+ }
+ cache->entries_pos = 0;
+
+ return 0;
+}
+
+/*
+ * Helper to submit fiemap extent.
+ *
+ * Will try to merge current fiemap extent specified by @offset, @phys,
+ * @len and @flags with cached one.
+ * And only when we fails to merge, cached one will be submitted as
+ * fiemap extent.
+ *
+ * Return value is the same as fiemap_fill_next_extent().
+ */
+static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
+ struct fiemap_cache *cache,
+ u64 offset, u64 phys, u64 len, u32 flags)
+{
+ struct btrfs_fiemap_entry *entry;
+ u64 cache_end;
+
+ /* Set at the end of extent_fiemap(). */
+ ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
+
+ if (!cache->cached)
+ goto assign;
+
+ /*
+ * When iterating the extents of the inode, at extent_fiemap(), we may
+ * find an extent that starts at an offset behind the end offset of the
+ * previous extent we processed. This happens if fiemap is called
+ * without FIEMAP_FLAG_SYNC and there are ordered extents completing
+ * after we had to unlock the file range, release the search path, emit
+ * the fiemap extents stored in the buffer (cache->entries array) and
+ * the lock the remainder of the range and re-search the btree.
+ *
+ * For example we are in leaf X processing its last item, which is the
+ * file extent item for file range [512K, 1M[, and after
+ * btrfs_next_leaf() releases the path, there's an ordered extent that
+ * completes for the file range [768K, 2M[, and that results in trimming
+ * the file extent item so that it now corresponds to the file range
+ * [512K, 768K[ and a new file extent item is inserted for the file
+ * range [768K, 2M[, which may end up as the last item of leaf X or as
+ * the first item of the next leaf - in either case btrfs_next_leaf()
+ * will leave us with a path pointing to the new extent item, for the
+ * file range [768K, 2M[, since that's the first key that follows the
+ * last one we processed. So in order not to report overlapping extents
+ * to user space, we trim the length of the previously cached extent and
+ * emit it.
+ *
+ * Upon calling btrfs_next_leaf() we may also find an extent with an
+ * offset smaller than or equals to cache->offset, and this happens
+ * when we had a hole or prealloc extent with several delalloc ranges in
+ * it, but after btrfs_next_leaf() released the path, delalloc was
+ * flushed and the resulting ordered extents were completed, so we can
+ * now have found a file extent item for an offset that is smaller than
+ * or equals to what we have in cache->offset. We deal with this as
+ * described below.
+ */
+ cache_end = cache->offset + cache->len;
+ if (cache_end > offset) {
+ if (offset == cache->offset) {
+ /*
+ * We cached a delalloc range (found in the io tree) for
+ * a hole or prealloc extent and we have now found a
+ * file extent item for the same offset. What we have
+ * now is more recent and up to date, so discard what
+ * we had in the cache and use what we have just found.
+ */
+ goto assign;
+ } else if (offset > cache->offset) {
+ /*
+ * The extent range we previously found ends after the
+ * offset of the file extent item we found and that
+ * offset falls somewhere in the middle of that previous
+ * extent range. So adjust the range we previously found
+ * to end at the offset of the file extent item we have
+ * just found, since this extent is more up to date.
+ * Emit that adjusted range and cache the file extent
+ * item we have just found. This corresponds to the case
+ * where a previously found file extent item was split
+ * due to an ordered extent completing.
+ */
+ cache->len = offset - cache->offset;
+ goto emit;
+ } else {
+ const u64 range_end = offset + len;
+
+ /*
+ * The offset of the file extent item we have just found
+ * is behind the cached offset. This means we were
+ * processing a hole or prealloc extent for which we
+ * have found delalloc ranges (in the io tree), so what
+ * we have in the cache is the last delalloc range we
+ * found while the file extent item we found can be
+ * either for a whole delalloc range we previously
+ * emitted or only a part of that range.
+ *
+ * We have two cases here:
+ *
+ * 1) The file extent item's range ends at or behind the
+ * cached extent's end. In this case just ignore the
+ * current file extent item because we don't want to
+ * overlap with previous ranges that may have been
+ * emitted already;
+ *
+ * 2) The file extent item starts behind the currently
+ * cached extent but its end offset goes beyond the
+ * end offset of the cached extent. We don't want to
+ * overlap with a previous range that may have been
+ * emitted already, so we emit the currently cached
+ * extent and then partially store the current file
+ * extent item's range in the cache, for the subrange
+ * going the cached extent's end to the end of the
+ * file extent item.
+ */
+ if (range_end <= cache_end)
+ return 0;
+
+ if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
+ phys += cache_end - offset;
+
+ offset = cache_end;
+ len = range_end - cache_end;
+ goto emit;
+ }
+ }
+
+ /*
+ * Only merges fiemap extents if
+ * 1) Their logical addresses are continuous
+ *
+ * 2) Their physical addresses are continuous
+ * So truly compressed (physical size smaller than logical size)
+ * extents won't get merged with each other
+ *
+ * 3) Share same flags
+ */
+ if (cache->offset + cache->len == offset &&
+ cache->phys + cache->len == phys &&
+ cache->flags == flags) {
+ cache->len += len;
+ return 0;
+ }
+
+emit:
+ /* Not mergeable, need to submit cached one */
+
+ if (cache->entries_pos == cache->entries_size) {
+ /*
+ * We will need to research for the end offset of the last
+ * stored extent and not from the current offset, because after
+ * unlocking the range and releasing the path, if there's a hole
+ * between that end offset and this current offset, a new extent
+ * may have been inserted due to a new write, so we don't want
+ * to miss it.
+ */
+ entry = &cache->entries[cache->entries_size - 1];
+ cache->next_search_offset = entry->offset + entry->len;
+ cache->cached = false;
+
+ return BTRFS_FIEMAP_FLUSH_CACHE;
+ }
+
+ entry = &cache->entries[cache->entries_pos];
+ entry->offset = cache->offset;
+ entry->phys = cache->phys;
+ entry->len = cache->len;
+ entry->flags = cache->flags;
+ cache->entries_pos++;
+ cache->extents_mapped++;
+
+ if (cache->extents_mapped == fieinfo->fi_extents_max) {
+ cache->cached = false;
+ return 1;
+ }
+assign:
+ cache->cached = true;
+ cache->offset = offset;
+ cache->phys = phys;
+ cache->len = len;
+ cache->flags = flags;
+
+ return 0;
+}
+
+/*
+ * Emit last fiemap cache
+ *
+ * The last fiemap cache may still be cached in the following case:
+ * 0 4k 8k
+ * |<- Fiemap range ->|
+ * |<------------ First extent ----------->|
+ *
+ * In this case, the first extent range will be cached but not emitted.
+ * So we must emit it before ending extent_fiemap().
+ */
+static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
+ struct fiemap_cache *cache)
+{
+ int ret;
+
+ if (!cache->cached)
+ return 0;
+
+ ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
+ cache->len, cache->flags);
+ cache->cached = false;
+ if (ret > 0)
+ ret = 0;
+ return ret;
+}
+
+static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
+{
+ struct extent_buffer *clone = path->nodes[0];
+ struct btrfs_key key;
+ int slot;
+ int ret;
+
+ path->slots[0]++;
+ if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
+ return 0;
+
+ /*
+ * Add a temporary extra ref to an already cloned extent buffer to
+ * prevent btrfs_next_leaf() freeing it, we want to reuse it to avoid
+ * the cost of allocating a new one.
+ */
+ ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags));
+ refcount_inc(&clone->refs);
+
+ ret = btrfs_next_leaf(inode->root, path);
+ if (ret != 0)
+ goto out;
+
+ /*
+ * Don't bother with cloning if there are no more file extent items for
+ * our inode.
+ */
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) {
+ ret = 1;
+ goto out;
+ }
+
+ /*
+ * Important to preserve the start field, for the optimizations when
+ * checking if extents are shared (see extent_fiemap()).
+ *
+ * We must set ->start before calling copy_extent_buffer_full(). If we
+ * are on sub-pagesize blocksize, we use ->start to determine the offset
+ * into the folio where our eb exists, and if we update ->start after
+ * the fact then any subsequent reads of the eb may read from a
+ * different offset in the folio than where we originally copied into.
+ */
+ clone->start = path->nodes[0]->start;
+ /* See the comment at fiemap_search_slot() about why we clone. */
+ copy_extent_buffer_full(clone, path->nodes[0]);
+
+ slot = path->slots[0];
+ btrfs_release_path(path);
+ path->nodes[0] = clone;
+ path->slots[0] = slot;
+out:
+ if (ret)
+ free_extent_buffer(clone);
+
+ return ret;
+}
+
+/*
+ * Search for the first file extent item that starts at a given file offset or
+ * the one that starts immediately before that offset.
+ * Returns: 0 on success, < 0 on error, 1 if not found.
+ */
+static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
+ u64 file_offset)
+{
+ const u64 ino = btrfs_ino(inode);
+ struct btrfs_root *root = inode->root;
+ struct extent_buffer *clone;
+ struct btrfs_key key;
+ int slot;
+ int ret;
+
+ key.objectid = ino;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = file_offset;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ if (ret > 0 && path->slots[0] > 0) {
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
+ if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
+ path->slots[0]--;
+ }
+
+ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret != 0)
+ return ret;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
+ return 1;
+ }
+
+ /*
+ * We clone the leaf and use it during fiemap. This is because while
+ * using the leaf we do expensive things like checking if an extent is
+ * shared, which can take a long time. In order to prevent blocking
+ * other tasks for too long, we use a clone of the leaf. We have locked
+ * the file range in the inode's io tree, so we know none of our file
+ * extent items can change. This way we avoid blocking other tasks that
+ * want to insert items for other inodes in the same leaf or b+tree
+ * rebalance operations (triggered for example when someone is trying
+ * to push items into this leaf when trying to insert an item in a
+ * neighbour leaf).
+ * We also need the private clone because holding a read lock on an
+ * extent buffer of the subvolume's b+tree will make lockdep unhappy
+ * when we check if extents are shared, as backref walking may need to
+ * lock the same leaf we are processing.
+ */
+ clone = btrfs_clone_extent_buffer(path->nodes[0]);
+ if (!clone)
+ return -ENOMEM;
+
+ slot = path->slots[0];
+ btrfs_release_path(path);
+ path->nodes[0] = clone;
+ path->slots[0] = slot;
+
+ return 0;
+}
+
+/*
+ * Process a range which is a hole or a prealloc extent in the inode's subvolume
+ * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
+ * extent. The end offset (@end) is inclusive.
+ */
+static int fiemap_process_hole(struct btrfs_inode *inode,
+ struct fiemap_extent_info *fieinfo,
+ struct fiemap_cache *cache,
+ struct extent_state **delalloc_cached_state,
+ struct btrfs_backref_share_check_ctx *backref_ctx,
+ u64 disk_bytenr, u64 extent_offset,
+ u64 extent_gen,
+ u64 start, u64 end)
+{
+ const u64 i_size = i_size_read(&inode->vfs_inode);
+ u64 cur_offset = start;
+ u64 last_delalloc_end = 0;
+ u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
+ bool checked_extent_shared = false;
+ int ret;
+
+ /*
+ * There can be no delalloc past i_size, so don't waste time looking for
+ * it beyond i_size.
+ */
+ while (cur_offset < end && cur_offset < i_size) {
+ u64 delalloc_start;
+ u64 delalloc_end;
+ u64 prealloc_start;
+ u64 prealloc_len = 0;
+ bool delalloc;
+
+ delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
+ delalloc_cached_state,
+ &delalloc_start,
+ &delalloc_end);
+ if (!delalloc)
+ break;
+
+ /*
+ * If this is a prealloc extent we have to report every section
+ * of it that has no delalloc.
+ */
+ if (disk_bytenr != 0) {
+ if (last_delalloc_end == 0) {
+ prealloc_start = start;
+ prealloc_len = delalloc_start - start;
+ } else {
+ prealloc_start = last_delalloc_end + 1;
+ prealloc_len = delalloc_start - prealloc_start;
+ }
+ }
+
+ if (prealloc_len > 0) {
+ if (!checked_extent_shared && fieinfo->fi_extents_max) {
+ ret = btrfs_is_data_extent_shared(inode,
+ disk_bytenr,
+ extent_gen,
+ backref_ctx);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ prealloc_flags |= FIEMAP_EXTENT_SHARED;
+
+ checked_extent_shared = true;
+ }
+ ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
+ disk_bytenr + extent_offset,
+ prealloc_len, prealloc_flags);
+ if (ret)
+ return ret;
+ extent_offset += prealloc_len;
+ }
+
+ ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
+ delalloc_end + 1 - delalloc_start,
+ FIEMAP_EXTENT_DELALLOC |
+ FIEMAP_EXTENT_UNKNOWN);
+ if (ret)
+ return ret;
+
+ last_delalloc_end = delalloc_end;
+ cur_offset = delalloc_end + 1;
+ extent_offset += cur_offset - delalloc_start;
+ cond_resched();
+ }
+
+ /*
+ * Either we found no delalloc for the whole prealloc extent or we have
+ * a prealloc extent that spans i_size or starts at or after i_size.
+ */
+ if (disk_bytenr != 0 && last_delalloc_end < end) {
+ u64 prealloc_start;
+ u64 prealloc_len;
+
+ if (last_delalloc_end == 0) {
+ prealloc_start = start;
+ prealloc_len = end + 1 - start;
+ } else {
+ prealloc_start = last_delalloc_end + 1;
+ prealloc_len = end + 1 - prealloc_start;
+ }
+
+ if (!checked_extent_shared && fieinfo->fi_extents_max) {
+ ret = btrfs_is_data_extent_shared(inode,
+ disk_bytenr,
+ extent_gen,
+ backref_ctx);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ prealloc_flags |= FIEMAP_EXTENT_SHARED;
+ }
+ ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
+ disk_bytenr + extent_offset,
+ prealloc_len, prealloc_flags);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
+ struct btrfs_path *path,
+ u64 *last_extent_end_ret)
+{
+ const u64 ino = btrfs_ino(inode);
+ struct btrfs_root *root = inode->root;
+ struct extent_buffer *leaf;
+ struct btrfs_file_extent_item *ei;
+ struct btrfs_key key;
+ u64 disk_bytenr;
+ int ret;
+
+ /*
+ * Lookup the last file extent. We're not using i_size here because
+ * there might be preallocation past i_size.
+ */
+ ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
+ /* There can't be a file extent item at offset (u64)-1 */
+ ASSERT(ret != 0);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * For a non-existing key, btrfs_search_slot() always leaves us at a
+ * slot > 0, except if the btree is empty, which is impossible because
+ * at least it has the inode item for this inode and all the items for
+ * the root inode 256.
+ */
+ ASSERT(path->slots[0] > 0);
+ path->slots[0]--;
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
+ /* No file extent items in the subvolume tree. */
+ *last_extent_end_ret = 0;
+ return 0;
+ }
+
+ /*
+ * For an inline extent, the disk_bytenr is where inline data starts at,
+ * so first check if we have an inline extent item before checking if we
+ * have an implicit hole (disk_bytenr == 0).
+ */
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+ if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
+ *last_extent_end_ret = btrfs_file_extent_end(path);
+ return 0;
+ }
+
+ /*
+ * Find the last file extent item that is not a hole (when NO_HOLES is
+ * not enabled). This should take at most 2 iterations in the worst
+ * case: we have one hole file extent item at slot 0 of a leaf and
+ * another hole file extent item as the last item in the previous leaf.
+ * This is because we merge file extent items that represent holes.
+ */
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+ while (disk_bytenr == 0) {
+ ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
+ if (ret < 0) {
+ return ret;
+ } else if (ret > 0) {
+ /* No file extent items that are not holes. */
+ *last_extent_end_ret = 0;
+ return 0;
+ }
+ leaf = path->nodes[0];
+ ei = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+ }
+
+ *last_extent_end_ret = btrfs_file_extent_end(path);
+ return 0;
+}
+
+static int extent_fiemap(struct btrfs_inode *inode,
+ struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len)
+{
+ const u64 ino = btrfs_ino(inode);
+ struct extent_state *cached_state = NULL;
+ struct extent_state *delalloc_cached_state = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct fiemap_cache cache = { 0 };
+ struct btrfs_backref_share_check_ctx *backref_ctx;
+ u64 last_extent_end = 0;
+ u64 prev_extent_end;
+ u64 range_start;
+ u64 range_end;
+ const u64 sectorsize = inode->root->fs_info->sectorsize;
+ bool stopped = false;
+ int ret;
+
+ cache.entries_size = PAGE_SIZE / sizeof(struct btrfs_fiemap_entry);
+ cache.entries = kmalloc_array(cache.entries_size,
+ sizeof(struct btrfs_fiemap_entry),
+ GFP_KERNEL);
+ backref_ctx = btrfs_alloc_backref_share_check_ctx();
+ path = btrfs_alloc_path();
+ if (!cache.entries || !backref_ctx || !path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+restart:
+ range_start = round_down(start, sectorsize);
+ range_end = round_up(start + len, sectorsize);
+ prev_extent_end = range_start;
+
+ btrfs_lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+
+ ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
+ if (ret < 0)
+ goto out_unlock;
+ btrfs_release_path(path);
+
+ path->reada = READA_FORWARD;
+ ret = fiemap_search_slot(inode, path, range_start);
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+ /*
+ * No file extent item found, but we may have delalloc between
+ * the current offset and i_size. So check for that.
+ */
+ ret = 0;
+ goto check_eof_delalloc;
+ }
+
+ while (prev_extent_end < range_end) {
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_file_extent_item *ei;
+ struct btrfs_key key;
+ u64 extent_end;
+ u64 extent_len;
+ u64 extent_offset = 0;
+ u64 extent_gen;
+ u64 disk_bytenr = 0;
+ u64 flags = 0;
+ int extent_type;
+ u8 compression;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
+ break;
+
+ extent_end = btrfs_file_extent_end(path);
+
+ /*
+ * The first iteration can leave us at an extent item that ends
+ * before our range's start. Move to the next item.
+ */
+ if (extent_end <= range_start)
+ goto next_item;
+
+ backref_ctx->curr_leaf_bytenr = leaf->start;
+
+ /* We have in implicit hole (NO_HOLES feature enabled). */
+ if (prev_extent_end < key.offset) {
+ const u64 hole_end = min(key.offset, range_end) - 1;
+
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state,
+ backref_ctx, 0, 0, 0,
+ prev_extent_end, hole_end);
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+ /* fiemap_fill_next_extent() told us to stop. */
+ stopped = true;
+ break;
+ }
+
+ /* We've reached the end of the fiemap range, stop. */
+ if (key.offset >= range_end) {
+ stopped = true;
+ break;
+ }
+ }
+
+ extent_len = extent_end - key.offset;
+ ei = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ compression = btrfs_file_extent_compression(leaf, ei);
+ extent_type = btrfs_file_extent_type(leaf, ei);
+ extent_gen = btrfs_file_extent_generation(leaf, ei);
+
+ if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+ if (compression == BTRFS_COMPRESS_NONE)
+ extent_offset = btrfs_file_extent_offset(leaf, ei);
+ }
+
+ if (compression != BTRFS_COMPRESS_NONE)
+ flags |= FIEMAP_EXTENT_ENCODED;
+
+ if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+ flags |= FIEMAP_EXTENT_DATA_INLINE;
+ flags |= FIEMAP_EXTENT_NOT_ALIGNED;
+ ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
+ extent_len, flags);
+ } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state,
+ backref_ctx,
+ disk_bytenr, extent_offset,
+ extent_gen, key.offset,
+ extent_end - 1);
+ } else if (disk_bytenr == 0) {
+ /* We have an explicit hole. */
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state,
+ backref_ctx, 0, 0, 0,
+ key.offset, extent_end - 1);
+ } else {
+ /* We have a regular extent. */
+ if (fieinfo->fi_extents_max) {
+ ret = btrfs_is_data_extent_shared(inode,
+ disk_bytenr,
+ extent_gen,
+ backref_ctx);
+ if (ret < 0)
+ goto out_unlock;
+ else if (ret > 0)
+ flags |= FIEMAP_EXTENT_SHARED;
+ }
+
+ ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
+ disk_bytenr + extent_offset,
+ extent_len, flags);
+ }
+
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+ /* emit_fiemap_extent() told us to stop. */
+ stopped = true;
+ break;
+ }
+
+ prev_extent_end = extent_end;
+next_item:
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out_unlock;
+ }
+
+ ret = fiemap_next_leaf_item(inode, path);
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+ /* No more file extent items for this inode. */
+ break;
+ }
+ cond_resched();
+ }
+
+check_eof_delalloc:
+ if (!stopped && prev_extent_end < range_end) {
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state, backref_ctx,
+ 0, 0, 0, prev_extent_end, range_end - 1);
+ if (ret < 0)
+ goto out_unlock;
+ prev_extent_end = range_end;
+ }
+
+ if (cache.cached && cache.offset + cache.len >= last_extent_end) {
+ const u64 i_size = i_size_read(&inode->vfs_inode);
+
+ if (prev_extent_end < i_size) {
+ u64 delalloc_start;
+ u64 delalloc_end;
+ bool delalloc;
+
+ delalloc = btrfs_find_delalloc_in_range(inode,
+ prev_extent_end,
+ i_size - 1,
+ &delalloc_cached_state,
+ &delalloc_start,
+ &delalloc_end);
+ if (!delalloc)
+ cache.flags |= FIEMAP_EXTENT_LAST;
+ } else {
+ cache.flags |= FIEMAP_EXTENT_LAST;
+ }
+ }
+
+out_unlock:
+ btrfs_unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+
+ if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
+ btrfs_release_path(path);
+ ret = flush_fiemap_cache(fieinfo, &cache);
+ if (ret)
+ goto out;
+ len -= cache.next_search_offset - start;
+ start = cache.next_search_offset;
+ goto restart;
+ } else if (ret < 0) {
+ goto out;
+ }
+
+ /*
+ * Must free the path before emitting to the fiemap buffer because we
+ * may have a non-cloned leaf and if the fiemap buffer is memory mapped
+ * to a file, a write into it (through btrfs_page_mkwrite()) may trigger
+ * waiting for an ordered extent that in order to complete needs to
+ * modify that leaf, therefore leading to a deadlock.
+ */
+ btrfs_free_path(path);
+ path = NULL;
+
+ ret = flush_fiemap_cache(fieinfo, &cache);
+ if (ret)
+ goto out;
+
+ ret = emit_last_fiemap_cache(fieinfo, &cache);
+out:
+ btrfs_free_extent_state(delalloc_cached_state);
+ kfree(cache.entries);
+ btrfs_free_backref_share_ctx(backref_ctx);
+ return ret;
+}
+
+int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len)
+{
+ struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+ int ret;
+
+ ret = fiemap_prep(inode, fieinfo, start, &len, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * fiemap_prep() called filemap_write_and_wait() for the whole possible
+ * file range (0 to LLONG_MAX), but that is not enough if we have
+ * compression enabled. The first filemap_fdatawrite_range() only kicks
+ * in the compression of data (in an async thread) and will return
+ * before the compression is done and writeback is started. A second
+ * filemap_fdatawrite_range() is needed to wait for the compression to
+ * complete and writeback to start. We also need to wait for ordered
+ * extents to complete, because our fiemap implementation uses mainly
+ * file extent items to list the extents, searching for extent maps
+ * only for file ranges with holes or prealloc extents to figure out
+ * if we have delalloc in those ranges.
+ */
+ if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
+ ret = btrfs_wait_ordered_range(btrfs_inode, 0, LLONG_MAX);
+ if (ret)
+ return ret;
+ }
+
+ btrfs_inode_lock(btrfs_inode, BTRFS_ILOCK_SHARED);
+
+ /*
+ * We did an initial flush to avoid holding the inode's lock while
+ * triggering writeback and waiting for the completion of IO and ordered
+ * extents. Now after we locked the inode we do it again, because it's
+ * possible a new write may have happened in between those two steps.
+ */
+ if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
+ ret = btrfs_wait_ordered_range(btrfs_inode, 0, LLONG_MAX);
+ if (ret) {
+ btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
+ return ret;
+ }
+ }
+
+ ret = extent_fiemap(btrfs_inode, fieinfo, start, len);
+ btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
+
+ return ret;
+}
diff --git a/fs/btrfs/fiemap.h b/fs/btrfs/fiemap.h
new file mode 100644
index 000000000000..cfd74b35988f
--- /dev/null
+++ b/fs/btrfs/fiemap.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_FIEMAP_H
+#define BTRFS_FIEMAP_H
+
+#include <linux/fiemap.h>
+
+int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len);
+
+#endif /* BTRFS_FIEMAP_H */
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index d1cbb64a78f3..14e5257f0f04 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -9,13 +9,16 @@
#include <linux/highmem.h>
#include <linux/sched/mm.h>
#include <crypto/hash.h>
-#include "misc.h"
+#include "messages.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
-#include "volumes.h"
-#include "print-tree.h"
+#include "bio.h"
#include "compression.h"
+#include "fs.h"
+#include "accessors.h"
+#include "file-item.h"
+#include "volumes.h"
#define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
sizeof(struct btrfs_item) * 2) / \
@@ -24,8 +27,8 @@
#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
PAGE_SIZE))
-/**
- * Set inode's size according to filesystem options
+/*
+ * Set inode's size according to filesystem options.
*
* @inode: inode we want to update the disk_i_size for
* @new_i_size: i_size we want to set to, 0 if we use i_size
@@ -43,29 +46,29 @@
*/
void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size)
{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 start, end, i_size;
- int ret;
+ bool found;
+ spin_lock(&inode->lock);
i_size = new_i_size ?: i_size_read(&inode->vfs_inode);
- if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
+ if (!inode->file_extent_tree) {
inode->disk_i_size = i_size;
- return;
+ goto out_unlock;
}
- spin_lock(&inode->lock);
- ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start,
- &end, EXTENT_DIRTY);
- if (!ret && start == 0)
+ found = btrfs_find_contiguous_extent_bit(inode->file_extent_tree, 0, &start,
+ &end, EXTENT_DIRTY);
+ if (found && start == 0)
i_size = min(i_size, end + 1);
else
i_size = 0;
inode->disk_i_size = i_size;
+out_unlock:
spin_unlock(&inode->lock);
}
-/**
- * Mark range within a file as having a new extent inserted
+/*
+ * Mark range within a file as having a new extent inserted.
*
* @inode: inode being modified
* @start: start file offset of the file extent we've inserted
@@ -81,19 +84,20 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
u64 len)
{
+ if (!inode->file_extent_tree)
+ return 0;
+
if (len == 0)
return 0;
ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize));
- if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES))
- return 0;
- return set_extent_bits(&inode->file_extent_tree, start, start + len - 1,
- EXTENT_DIRTY);
+ return btrfs_set_extent_bit(inode->file_extent_tree, start, start + len - 1,
+ EXTENT_DIRTY, NULL);
}
-/**
- * Marks an inode range as not having a backing extent
+/*
+ * Mark an inode range as not having a backing extent.
*
* @inode: inode being modified
* @start: start file offset of the file extent we've inserted
@@ -109,68 +113,86 @@ int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
u64 len)
{
+ if (!inode->file_extent_tree)
+ return 0;
+
if (len == 0)
return 0;
ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) ||
len == (u64)-1);
- if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES))
- return 0;
- return clear_extent_bit(&inode->file_extent_tree, start,
- start + len - 1, EXTENT_DIRTY, 0, 0, NULL);
+ return btrfs_clear_extent_bit(inode->file_extent_tree, start,
+ start + len - 1, EXTENT_DIRTY, NULL);
}
-static inline u32 max_ordered_sum_bytes(struct btrfs_fs_info *fs_info,
- u16 csum_size)
+static size_t bytes_to_csum_size(const struct btrfs_fs_info *fs_info, u32 bytes)
{
- u32 ncsums = (PAGE_SIZE - sizeof(struct btrfs_ordered_sum)) / csum_size;
+ ASSERT(IS_ALIGNED(bytes, fs_info->sectorsize));
- return ncsums * fs_info->sectorsize;
+ return (bytes >> fs_info->sectorsize_bits) * fs_info->csum_size;
}
-int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
+static size_t csum_size_to_bytes(const struct btrfs_fs_info *fs_info, u32 csum_size)
+{
+ ASSERT(IS_ALIGNED(csum_size, fs_info->csum_size));
+
+ return (csum_size / fs_info->csum_size) << fs_info->sectorsize_bits;
+}
+
+static inline u32 max_ordered_sum_bytes(const struct btrfs_fs_info *fs_info)
+{
+ u32 max_csum_size = round_down(PAGE_SIZE - sizeof(struct btrfs_ordered_sum),
+ fs_info->csum_size);
+
+ return csum_size_to_bytes(fs_info, max_csum_size);
+}
+
+/*
+ * Calculate the total size needed to allocate for an ordered sum structure
+ * spanning @bytes in the file.
+ */
+static int btrfs_ordered_sum_size(const struct btrfs_fs_info *fs_info, unsigned long bytes)
+{
+ return sizeof(struct btrfs_ordered_sum) + bytes_to_csum_size(fs_info, bytes);
+}
+
+int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- u64 objectid, u64 pos,
- u64 disk_offset, u64 disk_num_bytes,
- u64 num_bytes, u64 offset, u64 ram_bytes,
- u8 compression, u8 encryption, u16 other_encoding)
+ u64 objectid, u64 pos, u64 num_bytes)
{
int ret = 0;
struct btrfs_file_extent_item *item;
struct btrfs_key file_key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+
file_key.objectid = objectid;
- file_key.offset = pos;
file_key.type = BTRFS_EXTENT_DATA_KEY;
+ file_key.offset = pos;
ret = btrfs_insert_empty_item(trans, root, path, &file_key,
sizeof(*item));
if (ret < 0)
- goto out;
- BUG_ON(ret); /* Can't happen */
+ return ret;
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
- btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
- btrfs_set_file_extent_offset(leaf, item, offset);
+ btrfs_set_file_extent_disk_bytenr(leaf, item, 0);
+ btrfs_set_file_extent_disk_num_bytes(leaf, item, 0);
+ btrfs_set_file_extent_offset(leaf, item, 0);
btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
- btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
+ btrfs_set_file_extent_ram_bytes(leaf, item, num_bytes);
btrfs_set_file_extent_generation(leaf, item, trans->transid);
btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
- btrfs_set_file_extent_compression(leaf, item, compression);
- btrfs_set_file_extent_encryption(leaf, item, encryption);
- btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
+ btrfs_set_file_extent_compression(leaf, item, 0);
+ btrfs_set_file_extent_encryption(leaf, item, 0);
+ btrfs_set_file_extent_other_encoding(leaf, item, 0);
- btrfs_mark_buffer_dirty(leaf);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -191,8 +213,8 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
int csums_in_item;
file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
- file_key.offset = bytenr;
file_key.type = BTRFS_EXTENT_CSUM_KEY;
+ file_key.offset = bytenr;
ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
if (ret < 0)
goto fail;
@@ -208,7 +230,7 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
csum_offset = (bytenr - found_key.offset) >>
fs_info->sectorsize_bits;
- csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
+ csums_in_item = btrfs_item_size(leaf, path->slots[0]);
csums_in_item /= csum_size;
if (csum_offset == csums_in_item) {
@@ -238,15 +260,15 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
int cow = mod != 0;
file_key.objectid = objectid;
- file_key.offset = offset;
file_key.type = BTRFS_EXTENT_DATA_KEY;
+ file_key.offset = offset;
return btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
}
/*
* Find checksums for logical bytenr range [disk_bytenr, disk_bytenr + len) and
- * estore the result to @dst.
+ * store the result to @dst.
*
* Return >0 for the number of sectors we found.
* Return 0 for the range [disk_bytenr, disk_bytenr + sectorsize) has no csum
@@ -257,6 +279,7 @@ static int search_csum_tree(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 disk_bytenr,
u64 len, u8 *dst)
{
+ struct btrfs_root *csum_root;
struct btrfs_csum_item *item = NULL;
struct btrfs_key key;
const u32 sectorsize = fs_info->sectorsize;
@@ -274,7 +297,7 @@ static int search_csum_tree(struct btrfs_fs_info *fs_info,
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_csum_item);
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- itemsize = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
+ itemsize = btrfs_item_size(path->nodes[0], path->slots[0]);
csum_start = key.offset;
csum_len = (itemsize / csum_size) * sectorsize;
@@ -285,13 +308,14 @@ static int search_csum_tree(struct btrfs_fs_info *fs_info,
/* Current item doesn't contain the desired range, search again */
btrfs_release_path(path);
- item = btrfs_lookup_csum(NULL, fs_info->csum_root, path, disk_bytenr, 0);
+ csum_root = btrfs_csum_root(fs_info, disk_bytenr);
+ item = btrfs_lookup_csum(NULL, csum_root, path, disk_bytenr, 0);
if (IS_ERR(item)) {
ret = PTR_ERR(item);
goto out;
}
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- itemsize = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
+ itemsize = btrfs_item_size(path->nodes[0], path->slots[0]);
csum_start = key.offset;
csum_len = (itemsize / csum_size) * sectorsize;
@@ -303,81 +327,33 @@ found:
read_extent_buffer(path->nodes[0], dst, (unsigned long)item,
ret * csum_size);
out:
- if (ret == -ENOENT)
+ if (ret == -ENOENT || ret == -EFBIG)
ret = 0;
return ret;
}
/*
- * Locate the file_offset of @cur_disk_bytenr of a @bio.
- *
- * Bio of btrfs represents read range of
- * [bi_sector << 9, bi_sector << 9 + bi_size).
- * Knowing this, we can iterate through each bvec to locate the page belong to
- * @cur_disk_bytenr and get the file offset.
- *
- * @inode is used to determine if the bvec page really belongs to @inode.
- *
- * Return 0 if we can't find the file offset
- * Return >0 if we find the file offset and restore it to @file_offset_ret
- */
-static int search_file_offset_in_bio(struct bio *bio, struct inode *inode,
- u64 disk_bytenr, u64 *file_offset_ret)
-{
- struct bvec_iter iter;
- struct bio_vec bvec;
- u64 cur = bio->bi_iter.bi_sector << SECTOR_SHIFT;
- int ret = 0;
-
- bio_for_each_segment(bvec, bio, iter) {
- struct page *page = bvec.bv_page;
-
- if (cur > disk_bytenr)
- break;
- if (cur + bvec.bv_len <= disk_bytenr) {
- cur += bvec.bv_len;
- continue;
- }
- ASSERT(in_range(disk_bytenr, cur, bvec.bv_len));
- if (page->mapping && page->mapping->host &&
- page->mapping->host == inode) {
- ret = 1;
- *file_offset_ret = page_offset(page) + bvec.bv_offset +
- disk_bytenr - cur;
- break;
- }
- }
- return ret;
-}
-
-/**
* Lookup the checksum for the read bio in csum tree.
*
- * @inode: inode that the bio is for.
- * @bio: bio to look up.
- * @dst: Buffer of size nblocks * btrfs_super_csum_size() used to return
- * checksum (nblocks = bio->bi_iter.bi_size / fs_info->sectorsize). If
- * NULL, the checksum buffer is allocated and returned in
- * btrfs_bio(bio)->csum instead.
- *
* Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise.
*/
-blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst)
+int btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct btrfs_path *path;
+ struct btrfs_inode *inode = bbio->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct bio *bio = &bbio->bio;
+ BTRFS_PATH_AUTO_FREE(path);
const u32 sectorsize = fs_info->sectorsize;
const u32 csum_size = fs_info->csum_size;
u32 orig_len = bio->bi_iter.bi_size;
u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
- u64 cur_disk_bytenr;
- u8 *csum;
const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits;
- int count = 0;
+ int ret = 0;
+ u32 bio_offset = 0;
- if (!fs_info->csum_root || (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
- return BLK_STS_OK;
+ if ((inode->flags & BTRFS_INODE_NODATASUM) ||
+ test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state))
+ return 0;
/*
* This function is only called for read bio.
@@ -394,23 +370,14 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst
ASSERT(bio_op(bio) == REQ_OP_READ);
path = btrfs_alloc_path();
if (!path)
- return BLK_STS_RESOURCE;
-
- if (!dst) {
- struct btrfs_bio *bbio = btrfs_bio(bio);
+ return -ENOMEM;
- if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
- bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS);
- if (!bbio->csum) {
- btrfs_free_path(path);
- return BLK_STS_RESOURCE;
- }
- } else {
- bbio->csum = bbio->csum_inline;
- }
- csum = bbio->csum;
+ if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
+ bbio->csum = kvcalloc(nblocks, csum_size, GFP_NOFS);
+ if (!bbio->csum)
+ return -ENOMEM;
} else {
- csum = dst;
+ bbio->csum = bbio->csum_inline;
}
/*
@@ -426,73 +393,107 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst
* read from the commit root and sidestep a nasty deadlock
* between reading the free space cache and updating the csum tree.
*/
- if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ if (btrfs_is_free_space_inode(inode)) {
+ path->search_commit_root = true;
+ path->skip_locking = true;
+ }
+
+ /*
+ * If we are searching for a csum of an extent from a past
+ * transaction, we can search in the commit root and reduce
+ * lock contention on the csum tree extent buffers.
+ *
+ * This is important because that lock is an rwsem which gets
+ * pretty heavy write load under memory pressure and sustained
+ * csum overwrites, unlike the commit_root_sem. (Memory pressure
+ * makes us writeback the nodes multiple times per transaction,
+ * which makes us cow them each time, taking the write lock.)
+ *
+ * Due to how rwsem is implemented, there is a possible
+ * priority inversion where the readers holding the lock don't
+ * get scheduled (say they're in a cgroup stuck in heavy reclaim)
+ * which then blocks writers, including transaction commit. By
+ * using a semaphore with fewer writers (only a commit switching
+ * the roots), we make this issue less likely.
+ *
+ * Note that we don't rely on btrfs_search_slot to lock the
+ * commit root csum. We call search_slot multiple times, which would
+ * create a potential race where a commit comes in between searches
+ * while we are not holding the commit_root_sem, and we get csums
+ * from across transactions.
+ */
+ if (bbio->csum_search_commit_root) {
+ path->search_commit_root = true;
+ path->skip_locking = true;
+ down_read(&fs_info->commit_root_sem);
}
- for (cur_disk_bytenr = orig_disk_bytenr;
- cur_disk_bytenr < orig_disk_bytenr + orig_len;
- cur_disk_bytenr += (count * sectorsize)) {
- u64 search_len = orig_disk_bytenr + orig_len - cur_disk_bytenr;
- unsigned int sector_offset;
- u8 *csum_dst;
+ while (bio_offset < orig_len) {
+ int count;
+ u64 cur_disk_bytenr = orig_disk_bytenr + bio_offset;
+ u8 *csum_dst = bbio->csum +
+ (bio_offset >> fs_info->sectorsize_bits) * csum_size;
+
+ count = search_csum_tree(fs_info, path, cur_disk_bytenr,
+ orig_len - bio_offset, csum_dst);
+ if (count < 0) {
+ ret = count;
+ if (bbio->csum != bbio->csum_inline)
+ kvfree(bbio->csum);
+ bbio->csum = NULL;
+ break;
+ }
/*
- * Although both cur_disk_bytenr and orig_disk_bytenr is u64,
- * we're calculating the offset to the bio start.
+ * We didn't find a csum for this range. We need to make sure
+ * we complain loudly about this, because we are not NODATASUM.
*
- * Bio size is limited to UINT_MAX, thus unsigned int is large
- * enough to contain the raw result, not to mention the right
- * shifted result.
+ * However for the DATA_RELOC inode we could potentially be
+ * relocating data extents for a NODATASUM inode, so the inode
+ * itself won't be marked with NODATASUM, but the extent we're
+ * copying is in fact NODATASUM. If we don't find a csum we
+ * assume this is the case.
*/
- ASSERT(cur_disk_bytenr - orig_disk_bytenr < UINT_MAX);
- sector_offset = (cur_disk_bytenr - orig_disk_bytenr) >>
- fs_info->sectorsize_bits;
- csum_dst = csum + sector_offset * csum_size;
-
- count = search_csum_tree(fs_info, path, cur_disk_bytenr,
- search_len, csum_dst);
- if (count <= 0) {
- /*
- * Either we hit a critical error or we didn't find
- * the csum.
- * Either way, we put zero into the csums dst, and skip
- * to the next sector.
- */
+ if (count == 0) {
memset(csum_dst, 0, csum_size);
count = 1;
- /*
- * For data reloc inode, we need to mark the range
- * NODATASUM so that balance won't report false csum
- * error.
- */
- if (BTRFS_I(inode)->root->root_key.objectid ==
- BTRFS_DATA_RELOC_TREE_OBJECTID) {
- u64 file_offset;
- int ret;
-
- ret = search_file_offset_in_bio(bio, inode,
- cur_disk_bytenr, &file_offset);
- if (ret)
- set_extent_bits(io_tree, file_offset,
- file_offset + sectorsize - 1,
- EXTENT_NODATASUM);
+ if (btrfs_is_data_reloc_root(inode->root)) {
+ u64 file_offset = bbio->file_offset + bio_offset;
+
+ btrfs_set_extent_bit(&inode->io_tree, file_offset,
+ file_offset + sectorsize - 1,
+ EXTENT_NODATASUM, NULL);
} else {
btrfs_warn_rl(fs_info,
"csum hole found for disk bytenr range [%llu, %llu)",
cur_disk_bytenr, cur_disk_bytenr + sectorsize);
}
}
+ bio_offset += count * sectorsize;
}
- btrfs_free_path(path);
- return BLK_STS_OK;
+ if (bbio->csum_search_commit_root)
+ up_read(&fs_info->commit_root_sem);
+ return ret;
}
-int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
- struct list_head *list, int search_commit)
+/*
+ * Search for checksums for a given logical range.
+ *
+ * @root: The root where to look for checksums.
+ * @start: Logical address of target checksum range.
+ * @end: End offset (inclusive) of the target checksum range.
+ * @list: List for adding each checksum that was found.
+ * Can be NULL in case the caller only wants to check if
+ * there any checksums for the range.
+ * @nowait: Indicate if the search must be non-blocking or not.
+ *
+ * Return < 0 on error, 0 if no checksums were found, or 1 if checksums were
+ * found.
+ */
+int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
+ struct list_head *list, bool nowait)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
@@ -500,12 +501,8 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct extent_buffer *leaf;
struct btrfs_ordered_sum *sums;
struct btrfs_csum_item *item;
- LIST_HEAD(tmplist);
- unsigned long offset;
int ret;
- size_t size;
- u64 csum_end;
- const u32 csum_size = fs_info->csum_size;
+ bool found_csums = false;
ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
IS_ALIGNED(end + 1, fs_info->sectorsize));
@@ -514,37 +511,50 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
if (!path)
return -ENOMEM;
- if (search_commit) {
- path->skip_locking = 1;
- path->reada = READA_FORWARD;
- path->search_commit_root = 1;
- }
+ path->nowait = nowait;
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
- key.offset = start;
key.type = BTRFS_EXTENT_CSUM_KEY;
+ key.offset = start;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto fail;
+ goto out;
if (ret > 0 && path->slots[0] > 0) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
+
+ /*
+ * There are two cases we can hit here for the previous csum
+ * item:
+ *
+ * |<- search range ->|
+ * |<- csum item ->|
+ *
+ * Or
+ * |<- search range ->|
+ * |<- csum item ->|
+ *
+ * Check if the previous csum item covers the leading part of
+ * the search range. If so we have to start from previous csum
+ * item.
+ */
if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
key.type == BTRFS_EXTENT_CSUM_KEY) {
- offset = (start - key.offset) >> fs_info->sectorsize_bits;
- if (offset * csum_size <
- btrfs_item_size_nr(leaf, path->slots[0] - 1))
+ if (bytes_to_csum_size(fs_info, start - key.offset) <
+ btrfs_item_size(leaf, path->slots[0] - 1))
path->slots[0]--;
}
}
while (start <= end) {
+ u64 csum_end;
+
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
- goto fail;
+ goto out;
if (ret > 0)
break;
leaf = path->nodes[0];
@@ -559,82 +569,251 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
if (key.offset > start)
start = key.offset;
- size = btrfs_item_size_nr(leaf, path->slots[0]);
- csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
+ csum_end = key.offset + csum_size_to_bytes(fs_info,
+ btrfs_item_size(leaf, path->slots[0]));
if (csum_end <= start) {
path->slots[0]++;
continue;
}
+ found_csums = true;
+ if (!list)
+ goto out;
+
csum_end = min(csum_end, end + 1);
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_csum_item);
while (start < csum_end) {
+ unsigned long offset;
+ size_t size;
+
size = min_t(size_t, csum_end - start,
- max_ordered_sum_bytes(fs_info, csum_size));
+ max_ordered_sum_bytes(fs_info));
sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
GFP_NOFS);
if (!sums) {
ret = -ENOMEM;
- goto fail;
+ goto out;
}
- sums->bytenr = start;
- sums->len = (int)size;
+ sums->logical = start;
+ sums->len = size;
- offset = (start - key.offset) >> fs_info->sectorsize_bits;
- offset *= csum_size;
- size >>= fs_info->sectorsize_bits;
+ offset = bytes_to_csum_size(fs_info, start - key.offset);
read_extent_buffer(path->nodes[0],
sums->sums,
((unsigned long)item) + offset,
- csum_size * size);
+ bytes_to_csum_size(fs_info, size));
+
+ start += size;
+ list_add_tail(&sums->list, list);
+ }
+ path->slots[0]++;
+ }
+out:
+ btrfs_free_path(path);
+ if (ret < 0) {
+ if (list) {
+ struct btrfs_ordered_sum *tmp_sums;
+
+ list_for_each_entry_safe(sums, tmp_sums, list, list)
+ kfree(sums);
+ }
+
+ return ret;
+ }
+
+ return found_csums ? 1 : 0;
+}
+
+/*
+ * Do the same work as btrfs_lookup_csums_list(), the difference is in how
+ * we return the result.
+ *
+ * This version will set the corresponding bits in @csum_bitmap to represent
+ * that there is a csum found.
+ * Each bit represents a sector. Thus caller should ensure @csum_buf passed
+ * in is large enough to contain all csums.
+ */
+int btrfs_lookup_csums_bitmap(struct btrfs_root *root, struct btrfs_path *path,
+ u64 start, u64 end, u8 *csum_buf,
+ unsigned long *csum_bitmap)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ struct btrfs_csum_item *item;
+ const u64 orig_start = start;
+ bool free_path = false;
+ int ret;
+
+ ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
+ IS_ALIGNED(end + 1, fs_info->sectorsize));
+
+ if (!path) {
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ free_path = true;
+ }
+
+ /* Check if we can reuse the previous path. */
+ if (path->nodes[0]) {
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+
+ if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
+ key.type == BTRFS_EXTENT_CSUM_KEY &&
+ key.offset <= start)
+ goto search_forward;
+ btrfs_release_path(path);
+ }
+
+ key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
+ key.type = BTRFS_EXTENT_CSUM_KEY;
+ key.offset = start;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto fail;
+ if (ret > 0 && path->slots[0] > 0) {
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
+
+ /*
+ * There are two cases we can hit here for the previous csum
+ * item:
+ *
+ * |<- search range ->|
+ * |<- csum item ->|
+ *
+ * Or
+ * |<- search range ->|
+ * |<- csum item ->|
+ *
+ * Check if the previous csum item covers the leading part of
+ * the search range. If so we have to start from previous csum
+ * item.
+ */
+ if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
+ key.type == BTRFS_EXTENT_CSUM_KEY) {
+ if (bytes_to_csum_size(fs_info, start - key.offset) <
+ btrfs_item_size(leaf, path->slots[0] - 1))
+ path->slots[0]--;
+ }
+ }
+
+search_forward:
+ while (start <= end) {
+ u64 csum_end;
+
+ leaf = path->nodes[0];
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto fail;
+ if (ret > 0)
+ break;
+ leaf = path->nodes[0];
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+ key.type != BTRFS_EXTENT_CSUM_KEY ||
+ key.offset > end)
+ break;
+
+ if (key.offset > start)
+ start = key.offset;
+
+ csum_end = key.offset + csum_size_to_bytes(fs_info,
+ btrfs_item_size(leaf, path->slots[0]));
+ if (csum_end <= start) {
+ path->slots[0]++;
+ continue;
+ }
+
+ csum_end = min(csum_end, end + 1);
+ item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_csum_item);
+ while (start < csum_end) {
+ unsigned long offset;
+ size_t size;
+ u8 *csum_dest = csum_buf + bytes_to_csum_size(fs_info,
+ start - orig_start);
+
+ size = min_t(size_t, csum_end - start, end + 1 - start);
+
+ offset = bytes_to_csum_size(fs_info, start - key.offset);
+
+ read_extent_buffer(path->nodes[0], csum_dest,
+ ((unsigned long)item) + offset,
+ bytes_to_csum_size(fs_info, size));
- start += fs_info->sectorsize * size;
- list_add_tail(&sums->list, &tmplist);
+ bitmap_set(csum_bitmap,
+ (start - orig_start) >> fs_info->sectorsize_bits,
+ size >> fs_info->sectorsize_bits);
+
+ start += size;
}
path->slots[0]++;
}
ret = 0;
fail:
- while (ret < 0 && !list_empty(&tmplist)) {
- sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
- list_del(&sums->list);
- kfree(sums);
+ if (free_path)
+ btrfs_free_path(path);
+ return ret;
+}
+
+static void csum_one_bio(struct btrfs_bio *bbio, struct bvec_iter *src)
+{
+ struct btrfs_inode *inode = bbio->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
+ struct bio *bio = &bbio->bio;
+ struct btrfs_ordered_sum *sums = bbio->sums;
+ struct bvec_iter iter = *src;
+ phys_addr_t paddr;
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 step = min(blocksize, PAGE_SIZE);
+ const u32 nr_steps = blocksize / step;
+ phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
+ u32 offset = 0;
+ int index = 0;
+
+ shash->tfm = fs_info->csum_shash;
+
+ btrfs_bio_for_each_block(paddr, bio, &iter, step) {
+ paddrs[(offset / step) % nr_steps] = paddr;
+ offset += step;
+
+ if (IS_ALIGNED(offset, blocksize)) {
+ btrfs_calculate_block_csum_pages(fs_info, paddrs, sums->sums + index);
+ index += fs_info->csum_size;
+ }
}
- list_splice_tail(&tmplist, list);
+}
- btrfs_free_path(path);
- return ret;
+static void csum_one_bio_work(struct work_struct *work)
+{
+ struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, csum_work);
+
+ ASSERT(btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE);
+ ASSERT(bbio->async_csum == true);
+ csum_one_bio(bbio, &bbio->csum_saved_iter);
+ complete(&bbio->csum_done);
}
/*
- * btrfs_csum_one_bio - Calculates checksums of the data contained inside a bio
- * @inode: Owner of the data inside the bio
- * @bio: Contains the data to be checksummed
- * @file_start: offset in file this bio begins to describe
- * @contig: Boolean. If true/1 means all bio vecs in this bio are
- * contiguous and they begin at @file_start in the file. False/0
- * means this bio can contain potentially discontiguous bio vecs
- * so the logical offset of each should be calculated separately.
+ * Calculate checksums of the data contained inside a bio.
*/
-blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
- u64 file_start, int contig)
+int btrfs_csum_one_bio(struct btrfs_bio *bbio, bool async)
{
+ struct btrfs_ordered_extent *ordered = bbio->ordered;
+ struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
+ struct bio *bio = &bbio->bio;
struct btrfs_ordered_sum *sums;
- struct btrfs_ordered_extent *ordered = NULL;
- char *data;
- struct bvec_iter iter;
- struct bio_vec bvec;
- int index;
- int nr_sectors;
- unsigned long total_bytes = 0;
- unsigned long this_sum_bytes = 0;
- int i;
- u64 offset;
unsigned nofs_flag;
nofs_flag = memalloc_nofs_save();
@@ -643,106 +822,60 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
memalloc_nofs_restore(nofs_flag);
if (!sums)
- return BLK_STS_RESOURCE;
+ return -ENOMEM;
+ sums->logical = bbio->orig_logical;
sums->len = bio->bi_iter.bi_size;
INIT_LIST_HEAD(&sums->list);
+ bbio->sums = sums;
+ btrfs_add_ordered_sum(ordered, sums);
- if (contig)
- offset = file_start;
- else
- offset = 0; /* shut up gcc */
-
- sums->bytenr = bio->bi_iter.bi_sector << 9;
- index = 0;
-
- shash->tfm = fs_info->csum_shash;
-
- bio_for_each_segment(bvec, bio, iter) {
- if (!contig)
- offset = page_offset(bvec.bv_page) + bvec.bv_offset;
-
- if (!ordered) {
- ordered = btrfs_lookup_ordered_extent(inode, offset);
- /*
- * The bio range is not covered by any ordered extent,
- * must be a code logic error.
- */
- if (unlikely(!ordered)) {
- WARN(1, KERN_WARNING
- "no ordered extent for root %llu ino %llu offset %llu\n",
- inode->root->root_key.objectid,
- btrfs_ino(inode), offset);
- kvfree(sums);
- return BLK_STS_IOERR;
- }
- }
-
- nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
- bvec.bv_len + fs_info->sectorsize
- - 1);
-
- for (i = 0; i < nr_sectors; i++) {
- if (offset >= ordered->file_offset + ordered->num_bytes ||
- offset < ordered->file_offset) {
- unsigned long bytes_left;
-
- sums->len = this_sum_bytes;
- this_sum_bytes = 0;
- btrfs_add_ordered_sum(ordered, sums);
- btrfs_put_ordered_extent(ordered);
-
- bytes_left = bio->bi_iter.bi_size - total_bytes;
-
- nofs_flag = memalloc_nofs_save();
- sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
- bytes_left), GFP_KERNEL);
- memalloc_nofs_restore(nofs_flag);
- BUG_ON(!sums); /* -ENOMEM */
- sums->len = bytes_left;
- ordered = btrfs_lookup_ordered_extent(inode,
- offset);
- ASSERT(ordered); /* Logic error */
- sums->bytenr = (bio->bi_iter.bi_sector << 9)
- + total_bytes;
- index = 0;
- }
-
- data = bvec_kmap_local(&bvec);
- crypto_shash_digest(shash,
- data + (i * fs_info->sectorsize),
- fs_info->sectorsize,
- sums->sums + index);
- kunmap_local(data);
- index += fs_info->csum_size;
- offset += fs_info->sectorsize;
- this_sum_bytes += fs_info->sectorsize;
- total_bytes += fs_info->sectorsize;
- }
-
+ if (!async) {
+ csum_one_bio(bbio, &bbio->bio.bi_iter);
+ return 0;
}
- this_sum_bytes = 0;
- btrfs_add_ordered_sum(ordered, sums);
- btrfs_put_ordered_extent(ordered);
+ init_completion(&bbio->csum_done);
+ bbio->async_csum = true;
+ bbio->csum_saved_iter = bbio->bio.bi_iter;
+ INIT_WORK(&bbio->csum_work, csum_one_bio_work);
+ schedule_work(&bbio->csum_work);
return 0;
}
/*
- * helper function for csum removal, this expects the
- * key to describe the csum pointed to by the path, and it expects
- * the csum to overlap the range [bytenr, len]
+ * Nodatasum I/O on zoned file systems still requires an btrfs_ordered_sum to
+ * record the updated logical address on Zone Append completion.
+ * Allocate just the structure with an empty sums array here for that case.
+ */
+int btrfs_alloc_dummy_sum(struct btrfs_bio *bbio)
+{
+ bbio->sums = kmalloc(sizeof(*bbio->sums), GFP_NOFS);
+ if (!bbio->sums)
+ return -ENOMEM;
+ bbio->sums->len = bbio->bio.bi_iter.bi_size;
+ bbio->sums->logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
+ btrfs_add_ordered_sum(bbio->ordered, bbio->sums);
+ return 0;
+}
+
+/*
+ * Remove one checksum overlapping a range.
+ *
+ * This expects the key to describe the csum pointed to by the path, and it
+ * expects the csum to overlap the range [bytenr, len]
*
- * The csum should not be entirely contained in the range and the
- * range should not be entirely contained in the csum.
+ * The csum should not be entirely contained in the range and the range should
+ * not be entirely contained in the csum.
*
- * This calls btrfs_truncate_item with the correct args based on the
- * overlap, and fixes up the key as required.
+ * This calls btrfs_truncate_item with the correct args based on the overlap,
+ * and fixes up the key as required.
*/
-static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
+static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_key *key,
u64 bytenr, u64 len)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct extent_buffer *leaf;
const u32 csum_size = fs_info->csum_size;
u64 csum_end;
@@ -750,7 +883,7 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
u32 blocksize_bits = fs_info->sectorsize_bits;
leaf = path->nodes[0];
- csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
+ csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size;
csum_end <<= blocksize_bits;
csum_end += key->offset;
@@ -763,7 +896,7 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
*/
u32 new_size = (bytenr - key->offset) >> blocksize_bits;
new_size *= csum_size;
- btrfs_truncate_item(path, new_size, 1);
+ btrfs_truncate_item(trans, path, new_size, 1);
} else if (key->offset >= bytenr && csum_end > end_byte &&
end_byte > key->offset) {
/*
@@ -775,24 +908,23 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
u32 new_size = (csum_end - end_byte) >> blocksize_bits;
new_size *= csum_size;
- btrfs_truncate_item(path, new_size, 0);
+ btrfs_truncate_item(trans, path, new_size, 0);
key->offset = end_byte;
- btrfs_set_item_key_safe(fs_info, path, key);
+ btrfs_set_item_key_safe(trans, path, key);
} else {
BUG();
}
}
/*
- * deletes the csum items from the csum tree for a given
- * range of bytes.
+ * Delete the csum items from the csum tree for a given range of bytes.
*/
int btrfs_del_csums(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr, u64 len)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
u64 end_byte = bytenr + len;
u64 csum_end;
@@ -801,8 +933,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
const u32 csum_size = fs_info->csum_size;
u32 blocksize_bits = fs_info->sectorsize_bits;
- ASSERT(root == fs_info->csum_root ||
- root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+ ASSERT(btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID ||
+ btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID);
path = btrfs_alloc_path();
if (!path)
@@ -810,8 +942,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
while (1) {
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
- key.offset = end_byte - 1;
key.type = BTRFS_EXTENT_CSUM_KEY;
+ key.offset = end_byte - 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
@@ -834,7 +966,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
if (key.offset >= end_byte)
break;
- csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
+ csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size;
csum_end <<= blocksize_bits;
csum_end += key.offset;
@@ -914,7 +1046,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
* item changed size or key
*/
ret = btrfs_split_item(trans, root, path, &key, offset);
- if (ret && ret != -EAGAIN) {
+ if (unlikely(ret && ret != -EAGAIN)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -922,13 +1054,12 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
key.offset = end_byte - 1;
} else {
- truncate_one_csum(fs_info, path, &key, bytenr, len);
+ truncate_one_csum(trans, path, &key, bytenr, len);
if (key.offset < bytenr)
break;
}
btrfs_release_path(path);
}
- btrfs_free_path(path);
return ret;
}
@@ -970,7 +1101,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key file_key;
struct btrfs_key found_key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_csum_item *item;
struct btrfs_csum_item *item_end;
struct extent_buffer *leaf = NULL;
@@ -990,10 +1121,10 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
again:
next_offset = (u64)-1;
found_next = 0;
- bytenr = sums->bytenr + total_bytes;
+ bytenr = sums->logical + total_bytes;
file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
- file_key.offset = bytenr;
file_key.type = BTRFS_EXTENT_CSUM_KEY;
+ file_key.offset = bytenr;
item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
if (!IS_ERR(item)) {
@@ -1002,7 +1133,7 @@ again:
item_end = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_csum_item);
item_end = (struct btrfs_csum_item *)((char *)item_end +
- btrfs_item_size_nr(leaf, path->slots[0]));
+ btrfs_item_size(leaf, path->slots[0]));
goto found;
}
ret = PTR_ERR(item);
@@ -1013,7 +1144,7 @@ again:
u32 item_size;
/* we found one, but it isn't big enough yet */
leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ item_size = btrfs_item_size(leaf, path->slots[0]);
if ((item_size / csum_size) >=
MAX_CSUM_ITEMS(fs_info, csum_size)) {
/* already at max size, make a new one */
@@ -1046,10 +1177,10 @@ again:
}
btrfs_release_path(path);
- path->search_for_extension = 1;
+ path->search_for_extension = true;
ret = btrfs_search_slot(trans, root, &file_key, path,
csum_size, 1);
- path->search_for_extension = 0;
+ path->search_for_extension = false;
if (ret < 0)
goto out;
@@ -1070,7 +1201,7 @@ again:
}
extend_csum:
- if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
+ if (csum_offset == btrfs_item_size(leaf, path->slots[0]) /
csum_size) {
int extend_nr;
u64 tmp;
@@ -1102,7 +1233,7 @@ extend_csum:
* search, etc, because log trees are temporary anyway and it
* would only save a few bytes of leaf space.
*/
- if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+ if (btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID) {
if (path->slots[0] + 1 >=
btrfs_header_nritems(path->nodes[0])) {
ret = find_next_csum_offset(root, path, &next_offset);
@@ -1125,12 +1256,12 @@ extend_csum:
diff = min(diff,
MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
- diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
+ diff = diff - btrfs_item_size(leaf, path->slots[0]);
diff = min_t(u32, btrfs_leaf_free_space(leaf), diff);
diff /= csum_size;
diff *= csum_size;
- btrfs_extend_item(path, diff);
+ btrfs_extend_item(trans, path, diff);
ret = 0;
goto csum;
}
@@ -1156,13 +1287,11 @@ insert:
ins_size);
if (ret < 0)
goto out;
- if (WARN_ON(ret != 0))
- goto out;
leaf = path->nodes[0];
csum:
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
item_end = (struct btrfs_csum_item *)((unsigned char *)item +
- btrfs_item_size_nr(leaf, path->slots[0]));
+ btrfs_item_size(leaf, path->slots[0]));
item = (struct btrfs_csum_item *)((unsigned char *)item +
csum_offset * csum_size);
found:
@@ -1177,21 +1306,18 @@ found:
ins_size /= csum_size;
total_bytes += ins_size * fs_info->sectorsize;
- btrfs_mark_buffer_dirty(path->nodes[0]);
if (total_bytes < sums->len) {
btrfs_release_path(path);
cond_resched();
goto again;
}
out:
- btrfs_free_path(path);
return ret;
}
void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
const struct btrfs_path *path,
- struct btrfs_file_extent_item *fi,
- const bool new_inline,
+ const struct btrfs_file_extent_item *fi,
struct extent_map *em)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
@@ -1199,58 +1325,56 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
struct extent_buffer *leaf = path->nodes[0];
const int slot = path->slots[0];
struct btrfs_key key;
- u64 extent_start, extent_end;
- u64 bytenr;
+ u64 extent_start;
u8 type = btrfs_file_extent_type(leaf, fi);
int compress_type = btrfs_file_extent_compression(leaf, fi);
btrfs_item_key_to_cpu(leaf, &key, slot);
extent_start = key.offset;
- extent_end = btrfs_file_extent_end(path);
em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
+ em->generation = btrfs_file_extent_generation(leaf, fi);
if (type == BTRFS_FILE_EXTENT_REG ||
type == BTRFS_FILE_EXTENT_PREALLOC) {
+ const u64 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+
em->start = extent_start;
- em->len = extent_end - extent_start;
- em->orig_start = extent_start -
- btrfs_file_extent_offset(leaf, fi);
- em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
- bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
- if (bytenr == 0) {
- em->block_start = EXTENT_MAP_HOLE;
+ em->len = btrfs_file_extent_end(path) - extent_start;
+ if (disk_bytenr == 0) {
+ em->disk_bytenr = EXTENT_MAP_HOLE;
+ em->disk_num_bytes = 0;
+ em->offset = 0;
return;
}
+ em->disk_bytenr = disk_bytenr;
+ em->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
+ em->offset = btrfs_file_extent_offset(leaf, fi);
if (compress_type != BTRFS_COMPRESS_NONE) {
- set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
- em->compress_type = compress_type;
- em->block_start = bytenr;
- em->block_len = em->orig_block_len;
+ btrfs_extent_map_set_compression(em, compress_type);
} else {
- bytenr += btrfs_file_extent_offset(leaf, fi);
- em->block_start = bytenr;
- em->block_len = em->len;
+ /*
+ * Older kernels can create regular non-hole data
+ * extents with ram_bytes smaller than disk_num_bytes.
+ * Not a big deal, just always use disk_num_bytes
+ * for ram_bytes.
+ */
+ em->ram_bytes = em->disk_num_bytes;
if (type == BTRFS_FILE_EXTENT_PREALLOC)
- set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
+ em->flags |= EXTENT_FLAG_PREALLOC;
}
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
- em->block_start = EXTENT_MAP_INLINE;
- em->start = extent_start;
- em->len = extent_end - extent_start;
- /*
- * Initialize orig_start and block_len with the same values
- * as in inode.c:btrfs_get_extent().
- */
- em->orig_start = EXTENT_MAP_HOLE;
- em->block_len = (u64)-1;
- if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
- set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
- em->compress_type = compress_type;
- }
+ /* Tree-checker has ensured this. */
+ ASSERT(extent_start == 0);
+
+ em->disk_bytenr = EXTENT_MAP_INLINE;
+ em->start = 0;
+ em->len = fs_info->sectorsize;
+ em->offset = 0;
+ btrfs_extent_map_set_compression(em, compress_type);
} else {
btrfs_err(fs_info,
"unknown file extent item type %d, inode %llu, offset %llu, "
"root %llu", type, btrfs_ino(inode), extent_start,
- root->root_key.objectid);
+ btrfs_root_id(root));
}
}
@@ -1271,12 +1395,10 @@ u64 btrfs_file_extent_end(const struct btrfs_path *path)
ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
- end = btrfs_file_extent_ram_bytes(leaf, fi);
- end = ALIGN(key.offset + end, leaf->fs_info->sectorsize);
- } else {
+ if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE)
+ end = leaf->fs_info->sectorsize;
+ else
end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
- }
return end;
}
diff --git a/fs/btrfs/file-item.h b/fs/btrfs/file-item.h
new file mode 100644
index 000000000000..5645c5e3abdb
--- /dev/null
+++ b/fs/btrfs/file-item.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_FILE_ITEM_H
+#define BTRFS_FILE_ITEM_H
+
+#include <linux/blk_types.h>
+#include <linux/list.h>
+#include <uapi/linux/btrfs_tree.h>
+#include "ctree.h"
+#include "ordered-data.h"
+
+struct extent_map;
+struct btrfs_file_extent_item;
+struct btrfs_fs_info;
+struct btrfs_path;
+struct btrfs_bio;
+struct btrfs_trans_handle;
+struct btrfs_root;
+struct btrfs_ordered_sum;
+struct btrfs_path;
+struct btrfs_inode;
+
+#define BTRFS_FILE_EXTENT_INLINE_DATA_START \
+ (offsetof(struct btrfs_file_extent_item, disk_bytenr))
+
+static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_fs_info *info)
+{
+ return BTRFS_MAX_ITEM_SIZE(info) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
+}
+
+/*
+ * Return the number of bytes used by the item on disk, minus the size of any
+ * extent headers. If a file is compressed on disk, this is the compressed
+ * size.
+ */
+static inline u32 btrfs_file_extent_inline_item_len(
+ const struct extent_buffer *eb,
+ int nr)
+{
+ return btrfs_item_size(eb, nr) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
+}
+
+static inline unsigned long btrfs_file_extent_inline_start(
+ const struct btrfs_file_extent_item *e)
+{
+ return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START;
+}
+
+static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize)
+{
+ return BTRFS_FILE_EXTENT_INLINE_DATA_START + datasize;
+}
+
+int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 bytenr, u64 len);
+int btrfs_lookup_bio_sums(struct btrfs_bio *bbio);
+int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 objectid, u64 pos,
+ u64 num_bytes);
+int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path, u64 objectid,
+ u64 bytenr, int mod);
+int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_ordered_sum *sums);
+int btrfs_csum_one_bio(struct btrfs_bio *bbio, bool async);
+int btrfs_alloc_dummy_sum(struct btrfs_bio *bbio);
+int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
+ struct list_head *list, int search_commit,
+ bool nowait);
+int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
+ struct list_head *list, bool nowait);
+int btrfs_lookup_csums_bitmap(struct btrfs_root *root, struct btrfs_path *path,
+ u64 start, u64 end, u8 *csum_buf,
+ unsigned long *csum_bitmap);
+void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
+ const struct btrfs_path *path,
+ const struct btrfs_file_extent_item *fi,
+ struct extent_map *em);
+int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
+ u64 len);
+int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start, u64 len);
+void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size);
+u64 btrfs_file_extent_end(const struct btrfs_path *path);
+
+#endif
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 11204dbbe053..7a501e73d880 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -18,466 +18,64 @@
#include <linux/iversion.h>
#include <linux/fsverity.h>
#include "ctree.h"
+#include "direct-io.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "print-tree.h"
#include "tree-log.h"
#include "locking.h"
-#include "volumes.h"
#include "qgroup.h"
#include "compression.h"
#include "delalloc-space.h"
#include "reflink.h"
#include "subpage.h"
-
-static struct kmem_cache *btrfs_inode_defrag_cachep;
-/*
- * when auto defrag is enabled we
- * queue up these defrag structs to remember which
- * inodes need defragging passes
- */
-struct inode_defrag {
- struct rb_node rb_node;
- /* objectid */
- u64 ino;
- /*
- * transid where the defrag was added, we search for
- * extents newer than this
- */
- u64 transid;
-
- /* root objectid */
- u64 root;
-
- /* last offset we were able to defrag */
- u64 last_offset;
-
- /* if we've wrapped around back to zero once already */
- int cycled;
-};
-
-static int __compare_inode_defrag(struct inode_defrag *defrag1,
- struct inode_defrag *defrag2)
-{
- if (defrag1->root > defrag2->root)
- return 1;
- else if (defrag1->root < defrag2->root)
- return -1;
- else if (defrag1->ino > defrag2->ino)
- return 1;
- else if (defrag1->ino < defrag2->ino)
- return -1;
- else
- return 0;
-}
-
-/* pop a record for an inode into the defrag tree. The lock
- * must be held already
- *
- * If you're inserting a record for an older transid than an
- * existing record, the transid already in the tree is lowered
- *
- * If an existing record is found the defrag item you
- * pass in is freed
- */
-static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
- struct inode_defrag *defrag)
-{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct inode_defrag *entry;
- struct rb_node **p;
- struct rb_node *parent = NULL;
- int ret;
-
- p = &fs_info->defrag_inodes.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct inode_defrag, rb_node);
-
- ret = __compare_inode_defrag(defrag, entry);
- if (ret < 0)
- p = &parent->rb_left;
- else if (ret > 0)
- p = &parent->rb_right;
- else {
- /* if we're reinserting an entry for
- * an old defrag run, make sure to
- * lower the transid of our existing record
- */
- if (defrag->transid < entry->transid)
- entry->transid = defrag->transid;
- if (defrag->last_offset > entry->last_offset)
- entry->last_offset = defrag->last_offset;
- return -EEXIST;
- }
- }
- set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
- rb_link_node(&defrag->rb_node, parent, p);
- rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
- return 0;
-}
-
-static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
-{
- if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
- return 0;
-
- if (btrfs_fs_closing(fs_info))
- return 0;
-
- return 1;
-}
-
-/*
- * insert a defrag record for this inode if auto defrag is
- * enabled
- */
-int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode)
-{
- struct btrfs_root *root = inode->root;
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct inode_defrag *defrag;
- u64 transid;
- int ret;
-
- if (!__need_auto_defrag(fs_info))
- return 0;
-
- if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
- return 0;
-
- if (trans)
- transid = trans->transid;
- else
- transid = inode->root->last_trans;
-
- defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
- if (!defrag)
- return -ENOMEM;
-
- defrag->ino = btrfs_ino(inode);
- defrag->transid = transid;
- defrag->root = root->root_key.objectid;
-
- spin_lock(&fs_info->defrag_inodes_lock);
- if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
- /*
- * If we set IN_DEFRAG flag and evict the inode from memory,
- * and then re-read this inode, this new inode doesn't have
- * IN_DEFRAG flag. At the case, we may find the existed defrag.
- */
- ret = __btrfs_add_inode_defrag(inode, defrag);
- if (ret)
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- } else {
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- }
- spin_unlock(&fs_info->defrag_inodes_lock);
- return 0;
-}
-
-/*
- * Requeue the defrag object. If there is a defrag object that points to
- * the same inode in the tree, we will merge them together (by
- * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
- */
-static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
- struct inode_defrag *defrag)
-{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- int ret;
-
- if (!__need_auto_defrag(fs_info))
- goto out;
-
- /*
- * Here we don't check the IN_DEFRAG flag, because we need merge
- * them together.
- */
- spin_lock(&fs_info->defrag_inodes_lock);
- ret = __btrfs_add_inode_defrag(inode, defrag);
- spin_unlock(&fs_info->defrag_inodes_lock);
- if (ret)
- goto out;
- return;
-out:
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
-}
-
-/*
- * pick the defragable inode that we want, if it doesn't exist, we will get
- * the next one.
- */
-static struct inode_defrag *
-btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
-{
- struct inode_defrag *entry = NULL;
- struct inode_defrag tmp;
- struct rb_node *p;
- struct rb_node *parent = NULL;
- int ret;
-
- tmp.ino = ino;
- tmp.root = root;
-
- spin_lock(&fs_info->defrag_inodes_lock);
- p = fs_info->defrag_inodes.rb_node;
- while (p) {
- parent = p;
- entry = rb_entry(parent, struct inode_defrag, rb_node);
-
- ret = __compare_inode_defrag(&tmp, entry);
- if (ret < 0)
- p = parent->rb_left;
- else if (ret > 0)
- p = parent->rb_right;
- else
- goto out;
- }
-
- if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
- parent = rb_next(parent);
- if (parent)
- entry = rb_entry(parent, struct inode_defrag, rb_node);
- else
- entry = NULL;
- }
-out:
- if (entry)
- rb_erase(parent, &fs_info->defrag_inodes);
- spin_unlock(&fs_info->defrag_inodes_lock);
- return entry;
-}
-
-void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
-{
- struct inode_defrag *defrag;
- struct rb_node *node;
-
- spin_lock(&fs_info->defrag_inodes_lock);
- node = rb_first(&fs_info->defrag_inodes);
- while (node) {
- rb_erase(node, &fs_info->defrag_inodes);
- defrag = rb_entry(node, struct inode_defrag, rb_node);
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
-
- cond_resched_lock(&fs_info->defrag_inodes_lock);
-
- node = rb_first(&fs_info->defrag_inodes);
- }
- spin_unlock(&fs_info->defrag_inodes_lock);
-}
-
-#define BTRFS_DEFRAG_BATCH 1024
-
-static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
- struct inode_defrag *defrag)
-{
- struct btrfs_root *inode_root;
- struct inode *inode;
- struct btrfs_ioctl_defrag_range_args range;
- int num_defrag;
- int ret;
-
- /* get the inode */
- inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
- if (IS_ERR(inode_root)) {
- ret = PTR_ERR(inode_root);
- goto cleanup;
- }
-
- inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
- btrfs_put_root(inode_root);
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
- goto cleanup;
- }
-
- /* do a chunk of defrag */
- clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
- memset(&range, 0, sizeof(range));
- range.len = (u64)-1;
- range.start = defrag->last_offset;
-
- sb_start_write(fs_info->sb);
- num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
- BTRFS_DEFRAG_BATCH);
- sb_end_write(fs_info->sb);
- /*
- * if we filled the whole defrag batch, there
- * must be more work to do. Queue this defrag
- * again
- */
- if (num_defrag == BTRFS_DEFRAG_BATCH) {
- defrag->last_offset = range.start;
- btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
- } else if (defrag->last_offset && !defrag->cycled) {
- /*
- * we didn't fill our defrag batch, but
- * we didn't start at zero. Make sure we loop
- * around to the start of the file.
- */
- defrag->last_offset = 0;
- defrag->cycled = 1;
- btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
- } else {
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- }
-
- iput(inode);
- return 0;
-cleanup:
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- return ret;
-}
-
-/*
- * run through the list of inodes in the FS that need
- * defragging
- */
-int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
-{
- struct inode_defrag *defrag;
- u64 first_ino = 0;
- u64 root_objectid = 0;
-
- atomic_inc(&fs_info->defrag_running);
- while (1) {
- /* Pause the auto defragger. */
- if (test_bit(BTRFS_FS_STATE_REMOUNTING,
- &fs_info->fs_state))
- break;
-
- if (!__need_auto_defrag(fs_info))
- break;
-
- /* find an inode to defrag */
- defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
- first_ino);
- if (!defrag) {
- if (root_objectid || first_ino) {
- root_objectid = 0;
- first_ino = 0;
- continue;
- } else {
- break;
- }
- }
-
- first_ino = defrag->ino + 1;
- root_objectid = defrag->root;
-
- __btrfs_run_defrag_inode(fs_info, defrag);
- }
- atomic_dec(&fs_info->defrag_running);
-
- /*
- * during unmount, we use the transaction_wait queue to
- * wait for the defragger to stop
- */
- wake_up(&fs_info->transaction_wait);
- return 0;
-}
-
-/* simple helper to fault in pages and copy. This should go away
- * and be replaced with calls into generic code.
- */
-static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
- struct page **prepared_pages,
- struct iov_iter *i)
-{
- size_t copied = 0;
- size_t total_copied = 0;
- int pg = 0;
- int offset = offset_in_page(pos);
-
- while (write_bytes > 0) {
- size_t count = min_t(size_t,
- PAGE_SIZE - offset, write_bytes);
- struct page *page = prepared_pages[pg];
- /*
- * Copy data from userspace to the current page
- */
- copied = copy_page_from_iter_atomic(page, offset, count, i);
-
- /* Flush processor's dcache for this page */
- flush_dcache_page(page);
-
- /*
- * if we get a partial write, we can end up with
- * partially up to date pages. These add
- * a lot of complexity, so make sure they don't
- * happen by forcing this copy to be retried.
- *
- * The rest of the btrfs_file_write code will fall
- * back to page at a time copies after we return 0.
- */
- if (unlikely(copied < count)) {
- if (!PageUptodate(page)) {
- iov_iter_revert(i, copied);
- copied = 0;
- }
- if (!copied)
- break;
- }
-
- write_bytes -= copied;
- total_copied += copied;
- offset += copied;
- if (offset == PAGE_SIZE) {
- pg++;
- offset = 0;
- }
- }
- return total_copied;
-}
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "file-item.h"
+#include "ioctl.h"
+#include "file.h"
+#include "super.h"
+#include "print-tree.h"
/*
- * unlocks pages after btrfs_file_write is done with them
+ * Unlock folio after btrfs_file_write() is done with it.
*/
-static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
- struct page **pages, size_t num_pages,
+static void btrfs_drop_folio(struct btrfs_fs_info *fs_info, struct folio *folio,
u64 pos, u64 copied)
{
- size_t i;
u64 block_start = round_down(pos, fs_info->sectorsize);
u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
ASSERT(block_len <= U32_MAX);
- for (i = 0; i < num_pages; i++) {
- /* page checked is some magic around finding pages that
- * have been modified without going through btrfs_set_page_dirty
- * clear it here. There should be no need to mark the pages
- * accessed as prepare_pages should have marked them accessed
- * in prepare_pages via find_or_create_page()
- */
- btrfs_page_clamp_clear_checked(fs_info, pages[i], block_start,
- block_len);
- unlock_page(pages[i]);
- put_page(pages[i]);
- }
+ /*
+ * Folio checked is some magic around finding folios that have been
+ * modified without going through btrfs_dirty_folio(). Clear it here.
+ * There should be no need to mark the pages accessed as
+ * prepare_one_folio() should have marked them accessed in
+ * prepare_one_folio() via find_or_create_page()
+ */
+ btrfs_folio_clamp_clear_checked(fs_info, folio, block_start, block_len);
+ folio_unlock(folio);
+ folio_put(folio);
}
/*
- * After btrfs_copy_from_user(), update the following things for delalloc:
- * - Mark newly dirtied pages as DELALLOC in the io tree.
+ * After copy_folio_from_iter_atomic(), update the following things for delalloc:
+ * - Mark newly dirtied folio as DELALLOC in the io tree.
* Used to advise which range is to be written back.
- * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
+ * - Mark modified folio as Uptodate/Dirty and not needing COW fixup
* - Update inode size for past EOF write
*/
-int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
- size_t num_pages, loff_t pos, size_t write_bytes,
- struct extent_state **cached, bool noreserve)
+int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos,
+ size_t write_bytes, struct extent_state **cached, bool noreserve)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- int err = 0;
- int i;
+ int ret = 0;
u64 num_bytes;
u64 start_pos;
u64 end_of_last_block;
- u64 end_pos = pos + write_bytes;
+ const u64 end_pos = pos + write_bytes;
loff_t isize = i_size_read(&inode->vfs_inode);
unsigned int extra_bits = 0;
@@ -488,9 +86,9 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
extra_bits |= EXTENT_NORESERVE;
start_pos = round_down(pos, fs_info->sectorsize);
- num_bytes = round_up(write_bytes + pos - start_pos,
- fs_info->sectorsize);
+ num_bytes = round_up(end_pos - start_pos, fs_info->sectorsize);
ASSERT(num_bytes <= U32_MAX);
+ ASSERT(folio_pos(folio) <= pos && folio_next_pos(folio) >= end_pos);
end_of_last_block = start_pos + num_bytes - 1;
@@ -498,22 +96,18 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
* The pages may have already been dirty, clear out old accounting so
* we can set things up properly
*/
- clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, cached);
+ btrfs_clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+ cached);
- err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
+ ret = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
extra_bits, cached);
- if (err)
- return err;
-
- for (i = 0; i < num_pages; i++) {
- struct page *p = pages[i];
+ if (ret)
+ return ret;
- btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
- btrfs_page_clamp_clear_checked(fs_info, p, start_pos, num_bytes);
- btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
- }
+ btrfs_folio_clamp_set_uptodate(fs_info, folio, start_pos, num_bytes);
+ btrfs_folio_clamp_clear_checked(fs_info, folio, start_pos, num_bytes);
+ btrfs_folio_clamp_set_dirty(fs_info, folio, start_pos, num_bytes);
/*
* we've only changed i_size in ram, and we haven't updated
@@ -526,159 +120,6 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
}
/*
- * this drops all the extents in the cache that intersect the range
- * [start, end]. Existing extents are split as required.
- */
-void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
- int skip_pinned)
-{
- struct extent_map *em;
- struct extent_map *split = NULL;
- struct extent_map *split2 = NULL;
- struct extent_map_tree *em_tree = &inode->extent_tree;
- u64 len = end - start + 1;
- u64 gen;
- int ret;
- int testend = 1;
- unsigned long flags;
- int compressed = 0;
- bool modified;
-
- WARN_ON(end < start);
- if (end == (u64)-1) {
- len = (u64)-1;
- testend = 0;
- }
- while (1) {
- int no_splits = 0;
-
- modified = false;
- if (!split)
- split = alloc_extent_map();
- if (!split2)
- split2 = alloc_extent_map();
- if (!split || !split2)
- no_splits = 1;
-
- write_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, start, len);
- if (!em) {
- write_unlock(&em_tree->lock);
- break;
- }
- flags = em->flags;
- gen = em->generation;
- if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
- if (testend && em->start + em->len >= start + len) {
- free_extent_map(em);
- write_unlock(&em_tree->lock);
- break;
- }
- start = em->start + em->len;
- if (testend)
- len = start + len - (em->start + em->len);
- free_extent_map(em);
- write_unlock(&em_tree->lock);
- continue;
- }
- compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
- clear_bit(EXTENT_FLAG_PINNED, &em->flags);
- clear_bit(EXTENT_FLAG_LOGGING, &flags);
- modified = !list_empty(&em->list);
- if (no_splits)
- goto next;
-
- if (em->start < start) {
- split->start = em->start;
- split->len = start - em->start;
-
- if (em->block_start < EXTENT_MAP_LAST_BYTE) {
- split->orig_start = em->orig_start;
- split->block_start = em->block_start;
-
- if (compressed)
- split->block_len = em->block_len;
- else
- split->block_len = split->len;
- split->orig_block_len = max(split->block_len,
- em->orig_block_len);
- split->ram_bytes = em->ram_bytes;
- } else {
- split->orig_start = split->start;
- split->block_len = 0;
- split->block_start = em->block_start;
- split->orig_block_len = 0;
- split->ram_bytes = split->len;
- }
-
- split->generation = gen;
- split->flags = flags;
- split->compress_type = em->compress_type;
- replace_extent_mapping(em_tree, em, split, modified);
- free_extent_map(split);
- split = split2;
- split2 = NULL;
- }
- if (testend && em->start + em->len > start + len) {
- u64 diff = start + len - em->start;
-
- split->start = start + len;
- split->len = em->start + em->len - (start + len);
- split->flags = flags;
- split->compress_type = em->compress_type;
- split->generation = gen;
-
- if (em->block_start < EXTENT_MAP_LAST_BYTE) {
- split->orig_block_len = max(em->block_len,
- em->orig_block_len);
-
- split->ram_bytes = em->ram_bytes;
- if (compressed) {
- split->block_len = em->block_len;
- split->block_start = em->block_start;
- split->orig_start = em->orig_start;
- } else {
- split->block_len = split->len;
- split->block_start = em->block_start
- + diff;
- split->orig_start = em->orig_start;
- }
- } else {
- split->ram_bytes = split->len;
- split->orig_start = split->start;
- split->block_len = 0;
- split->block_start = em->block_start;
- split->orig_block_len = 0;
- }
-
- if (extent_map_in_tree(em)) {
- replace_extent_mapping(em_tree, em, split,
- modified);
- } else {
- ret = add_extent_mapping(em_tree, split,
- modified);
- ASSERT(ret == 0); /* Logic error */
- }
- free_extent_map(split);
- split = NULL;
- }
-next:
- if (extent_map_in_tree(em))
- remove_extent_mapping(em_tree, em);
- write_unlock(&em_tree->lock);
-
- /* once for us */
- free_extent_map(em);
- /* once for the tree*/
- free_extent_map(em);
- }
- if (split)
- free_extent_map(split);
- if (split2)
- free_extent_map(split2);
-}
-
-/*
* this is very complex, but the basic idea is to drop all extents
* in the range start - end. hint_block is filled in with a block number
* that would be a good hint to the block allocator for this file.
@@ -700,7 +141,6 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
- struct btrfs_ref ref = { 0 };
struct btrfs_key key;
struct btrfs_key new_key;
u64 ino = btrfs_ino(inode);
@@ -718,7 +158,6 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
int modify_tree = -1;
int update_refs;
int found = 0;
- int leafs_visited = 0;
struct btrfs_path *path = args->path;
args->bytes_found = 0;
@@ -736,12 +175,12 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
}
if (args->drop_cache)
- btrfs_drop_extent_cache(inode, args->start, args->end - 1, 0);
+ btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
- if (args->start >= inode->disk_i_size && !args->replace_extent)
+ if (data_race(args->start >= inode->disk_i_size) && !args->replace_extent)
modify_tree = 0;
- update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
+ update_refs = (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID);
while (1) {
recow = 0;
ret = btrfs_lookup_file_extent(trans, root, path, ino,
@@ -756,11 +195,14 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
path->slots[0]--;
}
ret = 0;
- leafs_visited++;
next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
- BUG_ON(del_nr > 0);
+ if (WARN_ON(del_nr > 0)) {
+ btrfs_print_leaf(leaf);
+ ret = -EINVAL;
+ break;
+ }
ret = btrfs_next_leaf(root, path);
if (ret < 0)
break;
@@ -768,7 +210,6 @@ next_slot:
ret = 0;
break;
}
- leafs_visited++;
leaf = path->nodes[0];
recow = 1;
}
@@ -837,7 +278,11 @@ next_slot:
* | -------- extent -------- |
*/
if (args->start > key.offset && args->end < extent_end) {
- BUG_ON(del_nr > 0);
+ if (WARN_ON(del_nr > 0)) {
+ btrfs_print_leaf(leaf);
+ ret = -EINVAL;
+ break;
+ }
if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
ret = -EOPNOTSUPP;
break;
@@ -867,19 +312,24 @@ next_slot:
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - args->start);
- btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0) {
- btrfs_init_generic_ref(&ref,
- BTRFS_ADD_DELAYED_REF,
- disk_bytenr, num_bytes, 0);
- btrfs_init_data_ref(&ref,
- root->root_key.objectid,
- new_key.objectid,
- args->start - extent_offset,
- 0, false);
+ struct btrfs_ref ref = {
+ .action = BTRFS_ADD_DELAYED_REF,
+ .bytenr = disk_bytenr,
+ .num_bytes = num_bytes,
+ .parent = 0,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_root_id(root),
+ };
+ btrfs_init_data_ref(&ref, new_key.objectid,
+ args->start - extent_offset,
+ 0, false);
ret = btrfs_inc_extent_ref(trans, &ref);
- BUG_ON(ret); /* -ENOMEM */
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ break;
+ }
}
key.offset = args->start;
}
@@ -901,13 +351,12 @@ next_slot:
memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = args->end;
- btrfs_set_item_key_safe(fs_info, path, &new_key);
+ btrfs_set_item_key_safe(trans, path, &new_key);
extent_offset += args->end - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - args->end);
- btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0)
args->bytes_found += args->end - key.offset;
break;
@@ -919,7 +368,11 @@ next_slot:
* | -------- extent -------- |
*/
if (args->start > key.offset && args->end >= extent_end) {
- BUG_ON(del_nr > 0);
+ if (WARN_ON(del_nr > 0)) {
+ btrfs_print_leaf(leaf);
+ ret = -EINVAL;
+ break;
+ }
if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
ret = -EOPNOTSUPP;
break;
@@ -927,7 +380,6 @@ next_slot:
btrfs_set_file_extent_num_bytes(leaf, fi,
args->start - key.offset);
- btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0)
args->bytes_found += extent_end - args->start;
if (args->end == extent_end)
@@ -947,7 +399,11 @@ delete_extent_item:
del_slot = path->slots[0];
del_nr = 1;
} else {
- BUG_ON(del_slot + del_nr != path->slots[0]);
+ if (WARN_ON(del_slot + del_nr != path->slots[0])) {
+ btrfs_print_leaf(leaf);
+ ret = -EINVAL;
+ break;
+ }
del_nr++;
}
@@ -957,16 +413,22 @@ delete_extent_item:
extent_end = ALIGN(extent_end,
fs_info->sectorsize);
} else if (update_refs && disk_bytenr > 0) {
- btrfs_init_generic_ref(&ref,
- BTRFS_DROP_DELAYED_REF,
- disk_bytenr, num_bytes, 0);
- btrfs_init_data_ref(&ref,
- root->root_key.objectid,
- key.objectid,
- key.offset - extent_offset, 0,
- false);
+ struct btrfs_ref ref = {
+ .action = BTRFS_DROP_DELAYED_REF,
+ .bytenr = disk_bytenr,
+ .num_bytes = num_bytes,
+ .parent = 0,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_root_id(root),
+ };
+ btrfs_init_data_ref(&ref, key.objectid,
+ key.offset - extent_offset,
+ 0, false);
ret = btrfs_free_extent(trans, &ref);
- BUG_ON(ret); /* -ENOMEM */
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ break;
+ }
args->bytes_found += extent_end - key.offset;
}
@@ -980,7 +442,7 @@ delete_extent_item:
ret = btrfs_del_items(trans, root, path, del_slot,
del_nr);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -1014,7 +476,7 @@ delete_extent_item:
* which case it unlocked our path, so check path->locks[0] matches a
* write lock.
*/
- if (!ret && args->replace_extent && leafs_visited == 1 &&
+ if (!ret && args->replace_extent &&
path->locks[0] == BTRFS_WRITE_LOCK &&
btrfs_leaf_free_space(leaf) >=
sizeof(struct btrfs_item) + args->extent_item_size) {
@@ -1029,7 +491,8 @@ delete_extent_item:
if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
path->slots[0]++;
}
- btrfs_setup_item_for_insert(root, path, &key, args->extent_item_size);
+ btrfs_setup_item_for_insert(trans, root, path, &key,
+ args->extent_item_size);
args->extent_inserted = true;
}
@@ -1043,20 +506,19 @@ out:
return ret;
}
-static int extent_mergeable(struct extent_buffer *leaf, int slot,
- u64 objectid, u64 bytenr, u64 orig_offset,
- u64 *start, u64 *end)
+static bool extent_mergeable(struct extent_buffer *leaf, int slot, u64 objectid,
+ u64 bytenr, u64 orig_offset, u64 *start, u64 *end)
{
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
u64 extent_end;
if (slot < 0 || slot >= btrfs_header_nritems(leaf))
- return 0;
+ return false;
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
- return 0;
+ return false;
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
@@ -1065,15 +527,15 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
- return 0;
+ return false;
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
if ((*start && *start != key.offset) || (*end && *end != extent_end))
- return 0;
+ return false;
*start = key.offset;
*end = extent_end;
- return 1;
+ return true;
}
/*
@@ -1086,10 +548,9 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, u64 start, u64 end)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_file_extent_item *fi;
struct btrfs_ref ref = { 0 };
struct btrfs_key key;
@@ -1125,21 +586,20 @@ again:
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.objectid != ino ||
- key.type != BTRFS_EXTENT_DATA_KEY) {
+ if (unlikely(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
}
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
+ if (unlikely(btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC)) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
}
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
- if (key.offset > start || extent_end < end) {
+ if (unlikely(key.offset > start || extent_end < end)) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
@@ -1157,7 +617,7 @@ again:
ino, bytenr, orig_offset,
&other_start, &other_end)) {
new_key.offset = end;
- btrfs_set_item_key_safe(fs_info, path, &new_key);
+ btrfs_set_item_key_safe(trans, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi,
@@ -1172,7 +632,6 @@ again:
trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
end - other_start);
- btrfs_mark_buffer_dirty(leaf);
goto out;
}
}
@@ -1191,7 +650,7 @@ again:
trans->transid);
path->slots[0]++;
new_key.offset = start;
- btrfs_set_item_key_safe(fs_info, path, &new_key);
+ btrfs_set_item_key_safe(trans, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
@@ -1201,7 +660,6 @@ again:
other_end - start);
btrfs_set_file_extent_offset(leaf, fi,
start - orig_offset);
- btrfs_mark_buffer_dirty(leaf);
goto out;
}
}
@@ -1216,7 +674,7 @@ again:
btrfs_release_path(path);
goto again;
}
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -1235,14 +693,16 @@ again:
btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - split);
- btrfs_mark_buffer_dirty(leaf);
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
- num_bytes, 0);
- btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
- orig_offset, 0, false);
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.bytenr = bytenr;
+ ref.num_bytes = num_bytes;
+ ref.parent = 0;
+ ref.owning_root = btrfs_root_id(root);
+ ref.ref_root = btrfs_root_id(root);
+ btrfs_init_data_ref(&ref, ino, orig_offset, 0, false);
ret = btrfs_inc_extent_ref(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -1250,7 +710,7 @@ again:
if (split == start) {
key.offset = start;
} else {
- if (start != key.offset) {
+ if (unlikely(start != key.offset)) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
@@ -1263,10 +723,14 @@ again:
other_start = end;
other_end = 0;
- btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
- num_bytes, 0);
- btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
- 0, false);
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ref.bytenr = bytenr;
+ ref.num_bytes = num_bytes;
+ ref.parent = 0;
+ ref.owning_root = btrfs_root_id(root);
+ ref.ref_root = btrfs_root_id(root);
+ btrfs_init_data_ref(&ref, ino, orig_offset, 0, false);
if (extent_mergeable(leaf, path->slots[0] + 1,
ino, bytenr, orig_offset,
&other_start, &other_end)) {
@@ -1278,7 +742,7 @@ again:
del_slot = path->slots[0] + 1;
del_nr++;
ret = btrfs_free_extent(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -1296,7 +760,7 @@ again:
del_slot = path->slots[0];
del_nr++;
ret = btrfs_free_extent(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -1307,7 +771,6 @@ again:
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_mark_buffer_dirty(leaf);
} else {
fi = btrfs_item_ptr(leaf, del_slot - 1,
struct btrfs_file_extent_item);
@@ -1316,137 +779,131 @@ again:
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - key.offset);
- btrfs_mark_buffer_dirty(leaf);
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
out:
- btrfs_free_path(path);
return ret;
}
/*
- * on error we return an unlocked page and the error value
- * on success we return a locked page and 0
+ * On error return an unlocked folio and the error value
+ * On success return a locked folio and 0
*/
-static int prepare_uptodate_page(struct inode *inode,
- struct page *page, u64 pos,
- bool force_uptodate)
+static int prepare_uptodate_folio(struct inode *inode, struct folio *folio, u64 pos,
+ u64 len)
{
+ u64 clamp_start = max_t(u64, pos, folio_pos(folio));
+ u64 clamp_end = min_t(u64, pos + len, folio_next_pos(folio));
+ const u32 blocksize = inode_to_fs_info(inode)->sectorsize;
int ret = 0;
- if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
- !PageUptodate(page)) {
- ret = btrfs_readpage(NULL, page);
- if (ret)
- return ret;
- lock_page(page);
- if (!PageUptodate(page)) {
- unlock_page(page);
- return -EIO;
- }
+ if (folio_test_uptodate(folio))
+ return 0;
- /*
- * Since btrfs_readpage() will unlock the page before it
- * returns, there is a window where btrfs_releasepage() can be
- * called to release the page. Here we check both inode
- * mapping and PagePrivate() to make sure the page was not
- * released.
- *
- * The private flag check is essential for subpage as we need
- * to store extra bitmap using page->private.
- */
- if (page->mapping != inode->i_mapping || !PagePrivate(page)) {
- unlock_page(page);
- return -EAGAIN;
- }
+ if (IS_ALIGNED(clamp_start, blocksize) &&
+ IS_ALIGNED(clamp_end, blocksize))
+ return 0;
+
+ ret = btrfs_read_folio(NULL, folio);
+ if (ret)
+ return ret;
+ folio_lock(folio);
+ if (unlikely(!folio_test_uptodate(folio))) {
+ folio_unlock(folio);
+ return -EIO;
+ }
+
+ /*
+ * Since btrfs_read_folio() will unlock the folio before it returns,
+ * there is a window where btrfs_release_folio() can be called to
+ * release the page. Here we check both inode mapping and page
+ * private to make sure the page was not released.
+ *
+ * The private flag check is essential for subpage as we need to store
+ * extra bitmap using folio private.
+ */
+ if (folio->mapping != inode->i_mapping || !folio_test_private(folio)) {
+ folio_unlock(folio);
+ return -EAGAIN;
}
return 0;
}
+static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
+{
+ gfp_t gfp;
+
+ gfp = btrfs_alloc_write_mask(inode->i_mapping);
+ if (nowait) {
+ gfp &= ~__GFP_DIRECT_RECLAIM;
+ gfp |= GFP_NOWAIT;
+ }
+
+ return gfp;
+}
+
/*
- * this just gets pages into the page cache and locks them down.
+ * Get folio into the page cache and lock it.
*/
-static noinline int prepare_pages(struct inode *inode, struct page **pages,
- size_t num_pages, loff_t pos,
- size_t write_bytes, bool force_uptodate)
+static noinline int prepare_one_folio(struct inode *inode, struct folio **folio_ret,
+ loff_t pos, size_t write_bytes,
+ bool nowait)
{
- int i;
- unsigned long index = pos >> PAGE_SHIFT;
- gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
- int err = 0;
- int faili;
+ const pgoff_t index = pos >> PAGE_SHIFT;
+ gfp_t mask = get_prepare_gfp_flags(inode, nowait);
+ fgf_t fgp_flags = (nowait ? FGP_WRITEBEGIN | FGP_NOWAIT : FGP_WRITEBEGIN) |
+ fgf_set_order(write_bytes);
+ struct folio *folio;
+ int ret = 0;
- for (i = 0; i < num_pages; i++) {
again:
- pages[i] = find_or_create_page(inode->i_mapping, index + i,
- mask | __GFP_WRITE);
- if (!pages[i]) {
- faili = i - 1;
- err = -ENOMEM;
- goto fail;
- }
-
- err = set_page_extent_mapped(pages[i]);
- if (err < 0) {
- faili = i;
- goto fail;
- }
-
- if (i == 0)
- err = prepare_uptodate_page(inode, pages[i], pos,
- force_uptodate);
- if (!err && i == num_pages - 1)
- err = prepare_uptodate_page(inode, pages[i],
- pos + write_bytes, false);
- if (err) {
- put_page(pages[i]);
- if (err == -EAGAIN) {
- err = 0;
- goto again;
- }
- faili = i - 1;
- goto fail;
+ folio = __filemap_get_folio(inode->i_mapping, index, fgp_flags, mask);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ ret = set_folio_extent_mapped(folio);
+ if (ret < 0) {
+ folio_unlock(folio);
+ folio_put(folio);
+ return ret;
+ }
+ ret = prepare_uptodate_folio(inode, folio, pos, write_bytes);
+ if (ret) {
+ /* The folio is already unlocked. */
+ folio_put(folio);
+ if (!nowait && ret == -EAGAIN) {
+ ret = 0;
+ goto again;
}
- wait_on_page_writeback(pages[i]);
+ return ret;
}
-
+ *folio_ret = folio;
return 0;
-fail:
- while (faili >= 0) {
- unlock_page(pages[faili]);
- put_page(pages[faili]);
- faili--;
- }
- return err;
-
}
/*
- * This function locks the extent and properly waits for data=ordered extents
- * to finish before allowing the pages to be modified if need.
+ * Locks the extent and properly waits for data=ordered extents to finish
+ * before allowing the folios to be modified if need.
*
- * The return value:
+ * Return:
* 1 - the extent is locked
* 0 - the extent is not locked, and everything is OK
- * -EAGAIN - need re-prepare the pages
- * the other < 0 number - Something wrong happens
+ * -EAGAIN - need to prepare the folios again
*/
static noinline int
-lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
- size_t num_pages, loff_t pos,
- size_t write_bytes,
- u64 *lockstart, u64 *lockend,
+lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
+ loff_t pos, size_t write_bytes,
+ u64 *lockstart, u64 *lockend, bool nowait,
struct extent_state **cached_state)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 start_pos;
u64 last_pos;
- int i;
int ret = 0;
start_pos = round_down(pos, fs_info->sectorsize);
@@ -1455,20 +912,28 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
if (start_pos < inode->vfs_inode.i_size) {
struct btrfs_ordered_extent *ordered;
- lock_extent_bits(&inode->io_tree, start_pos, last_pos,
- cached_state);
+ if (nowait) {
+ if (!btrfs_try_lock_extent(&inode->io_tree, start_pos,
+ last_pos, cached_state)) {
+ folio_unlock(folio);
+ folio_put(folio);
+ return -EAGAIN;
+ }
+ } else {
+ btrfs_lock_extent(&inode->io_tree, start_pos, last_pos,
+ cached_state);
+ }
+
ordered = btrfs_lookup_ordered_range(inode, start_pos,
last_pos - start_pos + 1);
if (ordered &&
ordered->file_offset + ordered->num_bytes > start_pos &&
ordered->file_offset <= last_pos) {
- unlock_extent_cached(&inode->io_tree, start_pos,
- last_pos, cached_state);
- for (i = 0; i < num_pages; i++) {
- unlock_page(pages[i]);
- put_page(pages[i]);
- }
- btrfs_start_ordered_extent(ordered, 1);
+ btrfs_unlock_extent(&inode->io_tree, start_pos, last_pos,
+ cached_state);
+ folio_unlock(folio);
+ folio_put(folio);
+ btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
return -EAGAIN;
}
@@ -1481,98 +946,96 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
}
/*
- * We should be called after prepare_pages() which should have locked
+ * We should be called after prepare_one_folio() which should have locked
* all pages in the range.
*/
- for (i = 0; i < num_pages; i++)
- WARN_ON(!PageLocked(pages[i]));
+ WARN_ON(!folio_test_locked(folio));
return ret;
}
-static int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
+/*
+ * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
+ *
+ * @pos: File offset.
+ * @write_bytes: The length to write, will be updated to the nocow writeable
+ * range.
+ * @nowait: Indicate if we can block or not (non-blocking IO context).
+ *
+ * This function will flush ordered extents in the range to ensure proper
+ * nocow checks.
+ *
+ * Return:
+ * > 0 If we can nocow, and updates @write_bytes.
+ * 0 If we can't do a nocow write.
+ * -EAGAIN If we can't do a nocow write because snapshotting of the inode's
+ * root is in progress or because we are in a non-blocking IO
+ * context and need to block (@nowait is true).
+ * < 0 If an error happened.
+ *
+ * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
+ */
+int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
size_t *write_bytes, bool nowait)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root;
+ struct extent_state *cached_state = NULL;
u64 lockstart, lockend;
- u64 num_bytes;
- int ret;
+ u64 cur_offset;
+ int ret = 0;
if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
return 0;
- if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
+ if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
return -EAGAIN;
lockstart = round_down(pos, fs_info->sectorsize);
lockend = round_up(pos + *write_bytes,
fs_info->sectorsize) - 1;
- num_bytes = lockend - lockstart + 1;
if (nowait) {
- struct btrfs_ordered_extent *ordered;
-
- if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
+ if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend,
+ &cached_state)) {
+ btrfs_drew_write_unlock(&root->snapshot_lock);
return -EAGAIN;
-
- ordered = btrfs_lookup_ordered_range(inode, lockstart,
- num_bytes);
- if (ordered) {
- btrfs_put_ordered_extent(ordered);
- ret = -EAGAIN;
- goto out_unlock;
}
} else {
- btrfs_lock_and_flush_ordered_range(inode, lockstart,
- lockend, NULL);
+ btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend,
+ &cached_state);
}
- ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
- NULL, NULL, NULL, false);
- if (ret <= 0) {
- ret = 0;
- if (!nowait)
- btrfs_drew_write_unlock(&root->snapshot_lock);
- } else {
- *write_bytes = min_t(size_t, *write_bytes ,
- num_bytes - pos + lockstart);
+ cur_offset = lockstart;
+ while (cur_offset < lockend) {
+ u64 num_bytes = lockend - cur_offset + 1;
+
+ ret = can_nocow_extent(inode, cur_offset, &num_bytes, NULL, nowait);
+ if (ret <= 0) {
+ /*
+ * If cur_offset == lockstart it means we haven't found
+ * any extent against which we can NOCOW, so unlock the
+ * snapshot lock.
+ */
+ if (cur_offset == lockstart)
+ btrfs_drew_write_unlock(&root->snapshot_lock);
+ break;
+ }
+ cur_offset += num_bytes;
}
-out_unlock:
- unlock_extent(&inode->io_tree, lockstart, lockend);
- return ret;
-}
+ btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
-static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos,
- size_t *write_bytes)
-{
- return check_can_nocow(inode, pos, write_bytes, true);
-}
+ /*
+ * cur_offset > lockstart means there's at least a partial range we can
+ * NOCOW, and that range can cover one or more extents.
+ */
+ if (cur_offset > lockstart) {
+ *write_bytes = min_t(size_t, *write_bytes, cur_offset - pos);
+ return 1;
+ }
-/*
- * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
- *
- * @pos: File offset
- * @write_bytes: The length to write, will be updated to the nocow writeable
- * range
- *
- * This function will flush ordered extents in the range to ensure proper
- * nocow checks.
- *
- * Return:
- * >0 and update @write_bytes if we can do nocow write
- * 0 if we can't do nocow write
- * -EAGAIN if we can't get the needed lock or there are ordered extents
- * for * (nowait == true) case
- * <0 if other error happened
- *
- * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock().
- */
-int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
- size_t *write_bytes)
-{
- return check_can_nocow(inode, pos, write_bytes, false);
+ return ret;
}
void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
@@ -1580,51 +1043,25 @@ void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
btrfs_drew_write_unlock(&inode->root->snapshot_lock);
}
-static void update_time_for_write(struct inode *inode)
-{
- struct timespec64 now;
-
- if (IS_NOCMTIME(inode))
- return;
-
- now = current_time(inode);
- if (!timespec64_equal(&inode->i_mtime, &now))
- inode->i_mtime = now;
-
- if (!timespec64_equal(&inode->i_ctime, &now))
- inode->i_ctime = now;
-
- if (IS_I_VERSION(inode))
- inode_inc_iversion(inode);
-}
-
-static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
- size_t count)
+int btrfs_write_check(struct kiocb *iocb, size_t count)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
loff_t pos = iocb->ki_pos;
int ret;
loff_t oldsize;
- loff_t start_pos;
-
- if (iocb->ki_flags & IOCB_NOWAIT) {
- size_t nocow_bytes = count;
- /* We will allocate space in case nodatacow is not set, so bail */
- if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes) <= 0)
- return -EAGAIN;
- /*
- * There are holes in the range or parts of the range that must
- * be COWed (shared extents, RO block groups, etc), so just bail
- * out.
- */
- if (nocow_bytes < count)
- return -EAGAIN;
- }
+ /*
+ * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
+ * prealloc flags, as without those flags we always have to COW. We will
+ * later check if we can really COW into the target range (using
+ * can_nocow_extent() at btrfs_get_blocks_direct_write()).
+ */
+ if ((iocb->ki_flags & IOCB_NOWAIT) &&
+ !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
+ return -EAGAIN;
- current->backing_dev_info = inode_to_bdi(inode);
ret = file_remove_privs(file);
if (ret)
return ret;
@@ -1635,445 +1072,376 @@ static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
* need to start yet another transaction to update the inode as we will
* update the inode when we finish writing whatever data we write.
*/
- update_time_for_write(inode);
+ if (!IS_NOCMTIME(inode)) {
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ inode_inc_iversion(inode);
+ }
- start_pos = round_down(pos, fs_info->sectorsize);
oldsize = i_size_read(inode);
- if (start_pos > oldsize) {
+ if (pos > oldsize) {
/* Expand hole size to cover write data, preventing empty gap */
loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
- if (ret) {
- current->backing_dev_info = NULL;
+ if (ret)
return ret;
- }
}
return 0;
}
-static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
- struct iov_iter *i)
+static void release_space(struct btrfs_inode *inode, struct extent_changeset *data_reserved,
+ u64 start, u64 len, bool only_release_metadata)
{
- struct file *file = iocb->ki_filp;
- loff_t pos;
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct page **pages = NULL;
- struct extent_changeset *data_reserved = NULL;
- u64 release_bytes = 0;
- u64 lockstart;
- u64 lockend;
- size_t num_written = 0;
- int nrptrs;
- ssize_t ret;
- bool only_release_metadata = false;
- bool force_page_uptodate = false;
- loff_t old_isize = i_size_read(inode);
- unsigned int ilock_flags = 0;
-
- if (iocb->ki_flags & IOCB_NOWAIT)
- ilock_flags |= BTRFS_ILOCK_TRY;
+ if (len == 0)
+ return;
- ret = btrfs_inode_lock(inode, ilock_flags);
- if (ret < 0)
- return ret;
+ if (only_release_metadata) {
+ btrfs_check_nocow_unlock(inode);
+ btrfs_delalloc_release_metadata(inode, len, true);
+ } else {
+ const struct btrfs_fs_info *fs_info = inode->root->fs_info;
- ret = generic_write_checks(iocb, i);
- if (ret <= 0)
- goto out;
+ btrfs_delalloc_release_space(inode, data_reserved,
+ round_down(start, fs_info->sectorsize),
+ len, true);
+ }
+}
- ret = btrfs_write_check(iocb, i, ret);
- if (ret < 0)
- goto out;
+/*
+ * Reserve data and metadata space for this buffered write range.
+ *
+ * Return >0 for the number of bytes reserved, which is always block aligned.
+ * Return <0 for error.
+ */
+static ssize_t reserve_space(struct btrfs_inode *inode,
+ struct extent_changeset **data_reserved,
+ u64 start, size_t *len, bool nowait,
+ bool *only_release_metadata)
+{
+ const struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ const unsigned int block_offset = (start & (fs_info->sectorsize - 1));
+ size_t reserve_bytes;
+ int ret;
- pos = iocb->ki_pos;
- nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
- PAGE_SIZE / (sizeof(struct page *)));
- nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
- nrptrs = max(nrptrs, 8);
- pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
- if (!pages) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = btrfs_check_data_free_space(inode, data_reserved, start, *len, nowait);
+ if (ret < 0) {
+ int can_nocow;
- while (iov_iter_count(i) > 0) {
- struct extent_state *cached_state = NULL;
- size_t offset = offset_in_page(pos);
- size_t sector_offset;
- size_t write_bytes = min(iov_iter_count(i),
- nrptrs * (size_t)PAGE_SIZE -
- offset);
- size_t num_pages;
- size_t reserve_bytes;
- size_t dirty_pages;
- size_t copied;
- size_t dirty_sectors;
- size_t num_sectors;
- int extents_locked;
+ if (nowait && (ret == -ENOSPC || ret == -EAGAIN))
+ return -EAGAIN;
/*
- * Fault pages before locking them in prepare_pages
- * to avoid recursive lock
+ * If we don't have to COW at the offset, reserve metadata only.
+ * write_bytes may get smaller than requested here.
*/
- if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
- ret = -EFAULT;
- break;
- }
+ can_nocow = btrfs_check_nocow_lock(inode, start, len, nowait);
+ if (can_nocow < 0)
+ ret = can_nocow;
+ if (can_nocow > 0)
+ ret = 0;
+ if (ret)
+ return ret;
+ *only_release_metadata = true;
+ }
- only_release_metadata = false;
- sector_offset = pos & (fs_info->sectorsize - 1);
+ reserve_bytes = round_up(*len + block_offset, fs_info->sectorsize);
+ WARN_ON(reserve_bytes == 0);
+ ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes,
+ reserve_bytes, nowait);
+ if (ret) {
+ if (!*only_release_metadata)
+ btrfs_free_reserved_data_space(inode, *data_reserved,
+ start, *len);
+ else
+ btrfs_check_nocow_unlock(inode);
- extent_changeset_release(data_reserved);
- ret = btrfs_check_data_free_space(BTRFS_I(inode),
- &data_reserved, pos,
- write_bytes);
- if (ret < 0) {
- /*
- * If we don't have to COW at the offset, reserve
- * metadata only. write_bytes may get smaller than
- * requested here.
- */
- if (btrfs_check_nocow_lock(BTRFS_I(inode), pos,
- &write_bytes) > 0)
- only_release_metadata = true;
- else
- break;
- }
+ if (nowait && ret == -ENOSPC)
+ ret = -EAGAIN;
+ return ret;
+ }
+ return reserve_bytes;
+}
- num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
- WARN_ON(num_pages > nrptrs);
- reserve_bytes = round_up(write_bytes + sector_offset,
- fs_info->sectorsize);
- WARN_ON(reserve_bytes == 0);
- ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
- reserve_bytes);
- if (ret) {
- if (!only_release_metadata)
- btrfs_free_reserved_data_space(BTRFS_I(inode),
- data_reserved, pos,
- write_bytes);
- else
- btrfs_check_nocow_unlock(BTRFS_I(inode));
- break;
- }
+/* Shrink the reserved data and metadata space from @reserved_len to @new_len. */
+static void shrink_reserved_space(struct btrfs_inode *inode,
+ struct extent_changeset *data_reserved,
+ u64 reserved_start, u64 reserved_len,
+ u64 new_len, bool only_release_metadata)
+{
+ const u64 diff = reserved_len - new_len;
- release_bytes = reserve_bytes;
-again:
- /*
- * This is going to setup the pages array with the number of
- * pages we want, so we don't really need to worry about the
- * contents of pages from loop to loop
- */
- ret = prepare_pages(inode, pages, num_pages,
- pos, write_bytes,
- force_page_uptodate);
- if (ret) {
- btrfs_delalloc_release_extents(BTRFS_I(inode),
- reserve_bytes);
- break;
- }
+ ASSERT(new_len <= reserved_len);
+ btrfs_delalloc_shrink_extents(inode, reserved_len, new_len);
+ if (only_release_metadata)
+ btrfs_delalloc_release_metadata(inode, diff, true);
+ else
+ btrfs_delalloc_release_space(inode, data_reserved,
+ reserved_start + new_len, diff, true);
+}
- extents_locked = lock_and_cleanup_extent_if_need(
- BTRFS_I(inode), pages,
- num_pages, pos, write_bytes, &lockstart,
- &lockend, &cached_state);
- if (extents_locked < 0) {
- if (extents_locked == -EAGAIN)
- goto again;
- btrfs_delalloc_release_extents(BTRFS_I(inode),
- reserve_bytes);
- ret = extents_locked;
- break;
- }
+/* Calculate the maximum amount of bytes we can write into one folio. */
+static size_t calc_write_bytes(const struct btrfs_inode *inode,
+ const struct iov_iter *iter, u64 start)
+{
+ const size_t max_folio_size = mapping_max_folio_size(inode->vfs_inode.i_mapping);
+
+ return min(max_folio_size - (start & (max_folio_size - 1)),
+ iov_iter_count(iter));
+}
+
+/*
+ * Do the heavy-lifting work to copy one range into one folio of the page cache.
+ *
+ * Return > 0 in case we copied all bytes or just some of them.
+ * Return 0 if no bytes were copied, in which case the caller should retry.
+ * Return <0 on error.
+ */
+static int copy_one_range(struct btrfs_inode *inode, struct iov_iter *iter,
+ struct extent_changeset **data_reserved, u64 start,
+ bool nowait)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_state *cached_state = NULL;
+ size_t write_bytes = calc_write_bytes(inode, iter, start);
+ size_t copied;
+ const u64 reserved_start = round_down(start, fs_info->sectorsize);
+ u64 reserved_len;
+ struct folio *folio = NULL;
+ int extents_locked;
+ u64 lockstart;
+ u64 lockend;
+ bool only_release_metadata = false;
+ const unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
+ int ret;
- copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
+ /*
+ * Fault all pages before locking them in prepare_one_folio() to avoid
+ * recursive lock.
+ */
+ if (unlikely(fault_in_iov_iter_readable(iter, write_bytes)))
+ return -EFAULT;
+ extent_changeset_release(*data_reserved);
+ ret = reserve_space(inode, data_reserved, start, &write_bytes, nowait,
+ &only_release_metadata);
+ if (ret < 0)
+ return ret;
+ reserved_len = ret;
+ /* Write range must be inside the reserved range. */
+ ASSERT(reserved_start <= start);
+ ASSERT(start + write_bytes <= reserved_start + reserved_len);
- num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
- dirty_sectors = round_up(copied + sector_offset,
- fs_info->sectorsize);
- dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
+again:
+ ret = balance_dirty_pages_ratelimited_flags(inode->vfs_inode.i_mapping,
+ bdp_flags);
+ if (ret) {
+ btrfs_delalloc_release_extents(inode, reserved_len);
+ release_space(inode, *data_reserved, reserved_start, reserved_len,
+ only_release_metadata);
+ return ret;
+ }
- /*
- * if we have trouble faulting in the pages, fall
- * back to one page at a time
- */
- if (copied < write_bytes)
- nrptrs = 1;
+ ret = prepare_one_folio(&inode->vfs_inode, &folio, start, write_bytes, false);
+ if (ret) {
+ btrfs_delalloc_release_extents(inode, reserved_len);
+ release_space(inode, *data_reserved, reserved_start, reserved_len,
+ only_release_metadata);
+ return ret;
+ }
- if (copied == 0) {
- force_page_uptodate = true;
- dirty_sectors = 0;
- dirty_pages = 0;
- } else {
- force_page_uptodate = false;
- dirty_pages = DIV_ROUND_UP(copied + offset,
- PAGE_SIZE);
- }
+ /*
+ * The reserved range goes beyond the current folio, shrink the reserved
+ * space to the folio boundary.
+ */
+ if (reserved_start + reserved_len > folio_next_pos(folio)) {
+ const u64 last_block = folio_next_pos(folio);
+
+ shrink_reserved_space(inode, *data_reserved, reserved_start,
+ reserved_len, last_block - reserved_start,
+ only_release_metadata);
+ write_bytes = last_block - start;
+ reserved_len = last_block - reserved_start;
+ }
+
+ extents_locked = lock_and_cleanup_extent_if_need(inode, folio, start,
+ write_bytes, &lockstart,
+ &lockend, nowait,
+ &cached_state);
+ if (extents_locked < 0) {
+ if (!nowait && extents_locked == -EAGAIN)
+ goto again;
- if (num_sectors > dirty_sectors) {
- /* release everything except the sectors we dirtied */
- release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
- if (only_release_metadata) {
- btrfs_delalloc_release_metadata(BTRFS_I(inode),
- release_bytes, true);
- } else {
- u64 __pos;
-
- __pos = round_down(pos,
- fs_info->sectorsize) +
- (dirty_pages << PAGE_SHIFT);
- btrfs_delalloc_release_space(BTRFS_I(inode),
- data_reserved, __pos,
- release_bytes, true);
- }
- }
+ btrfs_delalloc_release_extents(inode, reserved_len);
+ release_space(inode, *data_reserved, reserved_start, reserved_len,
+ only_release_metadata);
+ ret = extents_locked;
+ return ret;
+ }
- release_bytes = round_up(copied + sector_offset,
- fs_info->sectorsize);
+ copied = copy_folio_from_iter_atomic(folio, offset_in_folio(folio, start),
+ write_bytes, iter);
+ flush_dcache_folio(folio);
- ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
- dirty_pages, pos, copied,
- &cached_state, only_release_metadata);
+ if (unlikely(copied < write_bytes)) {
+ u64 last_block;
/*
- * If we have not locked the extent range, because the range's
- * start offset is >= i_size, we might still have a non-NULL
- * cached extent state, acquired while marking the extent range
- * as delalloc through btrfs_dirty_pages(). Therefore free any
- * possible cached extent state to avoid a memory leak.
+ * The original write range doesn't need an uptodate folio as
+ * the range is block aligned. But now a short copy happened.
+ * We cannot handle it without an uptodate folio.
+ *
+ * So just revert the range and we will retry.
*/
- if (extents_locked)
- unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- lockstart, lockend, &cached_state);
- else
- free_extent_state(cached_state);
-
- btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
- if (ret) {
- btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
- break;
+ if (!folio_test_uptodate(folio)) {
+ iov_iter_revert(iter, copied);
+ copied = 0;
}
- release_bytes = 0;
- if (only_release_metadata)
- btrfs_check_nocow_unlock(BTRFS_I(inode));
-
- btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
-
- cond_resched();
+ /* No copied bytes, unlock, release reserved space and exit. */
+ if (copied == 0) {
+ if (extents_locked)
+ btrfs_unlock_extent(&inode->io_tree, lockstart, lockend,
+ &cached_state);
+ else
+ btrfs_free_extent_state(cached_state);
+ btrfs_delalloc_release_extents(inode, reserved_len);
+ release_space(inode, *data_reserved, reserved_start, reserved_len,
+ only_release_metadata);
+ btrfs_drop_folio(fs_info, folio, start, copied);
+ return 0;
+ }
- balance_dirty_pages_ratelimited(inode->i_mapping);
+ /* Release the reserved space beyond the last block. */
+ last_block = round_up(start + copied, fs_info->sectorsize);
- pos += copied;
- num_written += copied;
+ shrink_reserved_space(inode, *data_reserved, reserved_start,
+ reserved_len, last_block - reserved_start,
+ only_release_metadata);
+ reserved_len = last_block - reserved_start;
}
- kfree(pages);
-
- if (release_bytes) {
- if (only_release_metadata) {
- btrfs_check_nocow_unlock(BTRFS_I(inode));
- btrfs_delalloc_release_metadata(BTRFS_I(inode),
- release_bytes, true);
- } else {
- btrfs_delalloc_release_space(BTRFS_I(inode),
- data_reserved,
- round_down(pos, fs_info->sectorsize),
- release_bytes, true);
- }
- }
+ ret = btrfs_dirty_folio(inode, folio, start, copied, &cached_state,
+ only_release_metadata);
+ /*
+ * If we have not locked the extent range, because the range's start
+ * offset is >= i_size, we might still have a non-NULL cached extent
+ * state, acquired while marking the extent range as delalloc through
+ * btrfs_dirty_page(). Therefore free any possible cached extent state
+ * to avoid a memory leak.
+ */
+ if (extents_locked)
+ btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ else
+ btrfs_free_extent_state(cached_state);
- extent_changeset_free(data_reserved);
- if (num_written > 0) {
- pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
- iocb->ki_pos += num_written;
+ btrfs_delalloc_release_extents(inode, reserved_len);
+ if (ret) {
+ btrfs_drop_folio(fs_info, folio, start, copied);
+ release_space(inode, *data_reserved, reserved_start, reserved_len,
+ only_release_metadata);
+ return ret;
}
-out:
- btrfs_inode_unlock(inode, ilock_flags);
- return num_written ? num_written : ret;
-}
-
-static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
- const struct iov_iter *iter, loff_t offset)
-{
- const u32 blocksize_mask = fs_info->sectorsize - 1;
-
- if (offset & blocksize_mask)
- return -EINVAL;
-
- if (iov_iter_alignment(iter) & blocksize_mask)
- return -EINVAL;
+ if (only_release_metadata)
+ btrfs_check_nocow_unlock(inode);
- return 0;
+ btrfs_drop_folio(fs_info, folio, start, copied);
+ return copied;
}
-static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
{
- const bool is_sync_write = (iocb->ki_flags & IOCB_DSYNC);
struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
loff_t pos;
- ssize_t written = 0;
- ssize_t written_buffered;
- size_t prev_left = 0;
- loff_t endbyte;
- ssize_t err;
+ struct inode *inode = file_inode(file);
+ struct extent_changeset *data_reserved = NULL;
+ size_t num_written = 0;
+ ssize_t ret;
+ loff_t old_isize;
unsigned int ilock_flags = 0;
+ const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (nowait)
ilock_flags |= BTRFS_ILOCK_TRY;
- /* If the write DIO is within EOF, use a shared lock */
- if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
- ilock_flags |= BTRFS_ILOCK_SHARED;
+ ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
+ if (ret < 0)
+ return ret;
-relock:
- err = btrfs_inode_lock(inode, ilock_flags);
- if (err < 0)
- return err;
+ /*
+ * We can only trust the isize with inode lock held, or it can race with
+ * other buffered writes and cause incorrect call of
+ * pagecache_isize_extended() to overwrite existing data.
+ */
+ old_isize = i_size_read(inode);
- err = generic_write_checks(iocb, from);
- if (err <= 0) {
- btrfs_inode_unlock(inode, ilock_flags);
- return err;
- }
+ ret = generic_write_checks(iocb, iter);
+ if (ret <= 0)
+ goto out;
- err = btrfs_write_check(iocb, from, err);
- if (err < 0) {
- btrfs_inode_unlock(inode, ilock_flags);
+ ret = btrfs_write_check(iocb, ret);
+ if (ret < 0)
goto out;
- }
pos = iocb->ki_pos;
- /*
- * Re-check since file size may have changed just before taking the
- * lock or pos may have changed because of O_APPEND in generic_write_check()
- */
- if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
- pos + iov_iter_count(from) > i_size_read(inode)) {
- btrfs_inode_unlock(inode, ilock_flags);
- ilock_flags &= ~BTRFS_ILOCK_SHARED;
- goto relock;
+ while (iov_iter_count(iter) > 0) {
+ ret = copy_one_range(BTRFS_I(inode), iter, &data_reserved, pos, nowait);
+ if (ret < 0)
+ break;
+ pos += ret;
+ num_written += ret;
+ cond_resched();
}
- if (check_direct_IO(fs_info, from, pos)) {
- btrfs_inode_unlock(inode, ilock_flags);
- goto buffered;
+ extent_changeset_free(data_reserved);
+ if (num_written > 0) {
+ pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
+ iocb->ki_pos += num_written;
}
+out:
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ return num_written ? num_written : ret;
+}
- /*
- * We remove IOCB_DSYNC so that we don't deadlock when iomap_dio_rw()
- * calls generic_write_sync() (through iomap_dio_complete()), because
- * that results in calling fsync (btrfs_sync_file()) which will try to
- * lock the inode in exclusive/write mode.
- */
- if (is_sync_write)
- iocb->ki_flags &= ~IOCB_DSYNC;
-
- /*
- * The iov_iter can be mapped to the same file range we are writing to.
- * If that's the case, then we will deadlock in the iomap code, because
- * it first calls our callback btrfs_dio_iomap_begin(), which will create
- * an ordered extent, and after that it will fault in the pages that the
- * iov_iter refers to. During the fault in we end up in the readahead
- * pages code (starting at btrfs_readahead()), which will lock the range,
- * find that ordered extent and then wait for it to complete (at
- * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
- * obviously the ordered extent can never complete as we didn't submit
- * yet the respective bio(s). This always happens when the buffer is
- * memory mapped to the same file range, since the iomap DIO code always
- * invalidates pages in the target file range (after starting and waiting
- * for any writeback).
- *
- * So here we disable page faults in the iov_iter and then retry if we
- * got -EFAULT, faulting in the pages before the retry.
- */
-again:
- from->nofault = true;
- err = iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
- IOMAP_DIO_PARTIAL, written);
- from->nofault = false;
-
- /* No increment (+=) because iomap returns a cumulative value. */
- if (err > 0)
- written = err;
+static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
+ const struct btrfs_ioctl_encoded_io_args *encoded)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ loff_t count;
+ ssize_t ret;
- if (iov_iter_count(from) > 0 && (err == -EFAULT || err > 0)) {
- const size_t left = iov_iter_count(from);
+ btrfs_inode_lock(BTRFS_I(inode), 0);
+ count = encoded->len;
+ ret = generic_write_checks_count(iocb, &count);
+ if (ret == 0 && count != encoded->len) {
/*
- * We have more data left to write. Try to fault in as many as
- * possible of the remainder pages and retry. We do this without
- * releasing and locking again the inode, to prevent races with
- * truncate.
- *
- * Also, in case the iov refers to pages in the file range of the
- * file we want to write to (due to a mmap), we could enter an
- * infinite loop if we retry after faulting the pages in, since
- * iomap will invalidate any pages in the range early on, before
- * it tries to fault in the pages of the iov. So we keep track of
- * how much was left of iov in the previous EFAULT and fallback
- * to buffered IO in case we haven't made any progress.
+ * The write got truncated by generic_write_checks_count(). We
+ * can't do a partial encoded write.
*/
- if (left == prev_left) {
- err = -ENOTBLK;
- } else {
- fault_in_iov_iter_readable(from, left);
- prev_left = left;
- goto again;
- }
+ ret = -EFBIG;
}
-
- btrfs_inode_unlock(inode, ilock_flags);
-
- /*
- * Add back IOCB_DSYNC. Our caller, btrfs_file_write_iter(), will do
- * the fsync (call generic_write_sync()).
- */
- if (is_sync_write)
- iocb->ki_flags |= IOCB_DSYNC;
-
- /* If 'err' is -ENOTBLK then it means we must fallback to buffered IO. */
- if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
+ if (ret || encoded->len == 0)
goto out;
-buffered:
- pos = iocb->ki_pos;
- written_buffered = btrfs_buffered_write(iocb, from);
- if (written_buffered < 0) {
- err = written_buffered;
- goto out;
- }
- /*
- * Ensure all data is persisted. We want the next direct IO read to be
- * able to read what was just written.
- */
- endbyte = pos + written_buffered - 1;
- err = btrfs_fdatawrite_range(inode, pos, endbyte);
- if (err)
- goto out;
- err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
- if (err)
+ ret = btrfs_write_check(iocb, encoded->len);
+ if (ret < 0)
goto out;
- written += written_buffered;
- iocb->ki_pos = pos + written_buffered;
- invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
- endbyte >> PAGE_SHIFT);
+
+ ret = btrfs_do_encoded_write(iocb, from, encoded);
out:
- return err < 0 ? err : written;
+ btrfs_inode_unlock(BTRFS_I(inode), 0);
+ return ret;
}
-static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
- struct iov_iter *from)
+ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
+ const struct btrfs_ioctl_encoded_io_args *encoded)
{
struct file *file = iocb->ki_filp;
struct btrfs_inode *inode = BTRFS_I(file_inode(file));
- ssize_t num_written = 0;
- const bool sync = iocb->ki_flags & IOCB_DSYNC;
+ ssize_t num_written, num_sync;
+ if (unlikely(btrfs_is_shutdown(inode->root->fs_info)))
+ return -EIO;
/*
* If the fs flips readonly due to some impossible error, although we
* have opened a file as writable, we have to stop this write operation
@@ -2082,38 +1450,46 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
if (BTRFS_FS_ERROR(inode->root->fs_info))
return -EROFS;
- if (!(iocb->ki_flags & IOCB_DIRECT) &&
- (iocb->ki_flags & IOCB_NOWAIT))
+ if (encoded && (iocb->ki_flags & IOCB_NOWAIT))
return -EOPNOTSUPP;
- if (sync)
- atomic_inc(&inode->sync_writers);
-
- if (iocb->ki_flags & IOCB_DIRECT)
+ if (encoded) {
+ num_written = btrfs_encoded_write(iocb, from, encoded);
+ num_sync = encoded->len;
+ } else if (iocb->ki_flags & IOCB_DIRECT) {
num_written = btrfs_direct_write(iocb, from);
- else
+ num_sync = num_written;
+ } else {
num_written = btrfs_buffered_write(iocb, from);
+ num_sync = num_written;
+ }
btrfs_set_inode_last_sub_trans(inode);
- if (num_written > 0)
- num_written = generic_write_sync(iocb, num_written);
-
- if (sync)
- atomic_dec(&inode->sync_writers);
+ if (num_sync > 0) {
+ num_sync = generic_write_sync(iocb, num_sync);
+ if (num_sync < 0)
+ num_written = num_sync;
+ }
- current->backing_dev_info = NULL;
return num_written;
}
+static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ return btrfs_do_write_iter(iocb, from, NULL);
+}
+
int btrfs_release_file(struct inode *inode, struct file *filp)
{
struct btrfs_file_private *private = filp->private_data;
- if (private && private->filldir_buf)
+ if (private) {
kfree(private->filldir_buf);
- kfree(private);
- filp->private_data = NULL;
+ btrfs_free_extent_state(private->llseek_cached_state);
+ kfree(private);
+ filp->private_data = NULL;
+ }
/*
* Set by setattr when we are about to truncate a file from a non-zero
@@ -2127,7 +1503,7 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
return 0;
}
-static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
+static int start_ordered_ops(struct btrfs_inode *inode, loff_t start, loff_t end)
{
int ret;
struct blk_plug plug;
@@ -2139,9 +1515,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
* several segments of stripe length (currently 64K).
*/
blk_start_plug(&plug);
- atomic_inc(&BTRFS_I(inode)->sync_writers);
ret = btrfs_fdatawrite_range(inode, start, end);
- atomic_dec(&BTRFS_I(inode)->sync_writers);
blk_finish_plug(&plug);
return ret;
@@ -2149,10 +1523,10 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
{
- struct btrfs_inode *inode = BTRFS_I(ctx->inode);
+ struct btrfs_inode *inode = ctx->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- if (btrfs_inode_in_log(inode, fs_info->generation) &&
+ if (btrfs_inode_in_log(inode, btrfs_get_fs_generation(fs_info)) &&
list_empty(&ctx->ordered_extents))
return true;
@@ -2163,7 +1537,7 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
* and for a fast fsync we don't wait for that, we only wait for the
* writeback to complete.
*/
- if (inode->last_trans <= fs_info->last_trans_committed &&
+ if (inode->last_trans <= btrfs_get_last_trans_committed(fs_info) &&
(test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
list_empty(&ctx->ordered_extents)))
return true;
@@ -2185,14 +1559,21 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
struct dentry *dentry = file_dentry(file);
- struct inode *inode = d_inode(dentry);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
int ret = 0, err;
u64 len;
bool full_sync;
+ bool skip_ilock = false;
+
+ if (current->journal_info == BTRFS_TRANS_DIO_WRITE_STUB) {
+ skip_ilock = true;
+ current->journal_info = NULL;
+ btrfs_assert_inode_locked(inode);
+ }
trace_btrfs_sync_file(file, datasync);
@@ -2220,19 +1601,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (ret)
goto out;
- btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
+ if (skip_ilock)
+ down_write(&inode->i_mmap_lock);
+ else
+ btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
atomic_inc(&root->log_batch);
/*
- * Always check for the full sync flag while holding the inode's lock,
- * to avoid races with other tasks. The flag must be either set all the
- * time during logging or always off all the time while logging.
- */
- full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
-
- /*
* Before we acquired the inode's lock and the mmap lock, someone may
* have dirtied more pages in the target range. We need to make sure
* that writeback for any such pages does not start while we are logging
@@ -2252,11 +1628,24 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
ret = start_ordered_ops(inode, start, end);
if (ret) {
- btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ if (skip_ilock)
+ up_write(&inode->i_mmap_lock);
+ else
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
goto out;
}
/*
+ * Always check for the full sync flag while holding the inode's lock,
+ * to avoid races with other tasks. The flag must be either set all the
+ * time during logging or always off all the time while logging.
+ * We check the flag here after starting delalloc above, because when
+ * running delalloc the full sync flag may be set if we need to drop
+ * extra extent map ranges due to temporary memory allocation failures.
+ */
+ full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
+
+ /*
* We have to do this here to avoid the priority inversion of waiting on
* IO of a lower priority task while holding a transaction open.
*
@@ -2273,15 +1662,29 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
if (full_sync || btrfs_is_zoned(fs_info)) {
ret = btrfs_wait_ordered_range(inode, start, len);
+ clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
} else {
/*
* Get our ordered extents as soon as possible to avoid doing
* checksum lookups in the csum tree, and use instead the
* checksums attached to the ordered extents.
*/
- btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
- &ctx.ordered_extents);
- ret = filemap_fdatawait_range(inode->i_mapping, start, end);
+ btrfs_get_ordered_extents_for_logging(inode, &ctx.ordered_extents);
+ ret = filemap_fdatawait_range(inode->vfs_inode.i_mapping, start, end);
+ if (ret)
+ goto out_release_extents;
+
+ /*
+ * Check and clear the BTRFS_INODE_COW_WRITE_ERROR now after
+ * starting and waiting for writeback, because for buffered IO
+ * it may have been set during the end IO callback
+ * (end_bbio_data_write() -> btrfs_finish_ordered_extent()) in
+ * case an error happened and we need to wait for ordered
+ * extents to complete so that any extent maps that point to
+ * unwritten locations are dropped and we don't log them.
+ */
+ if (test_and_clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags))
+ ret = btrfs_wait_ordered_range(inode, start, len);
}
if (ret)
@@ -2289,15 +1692,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
atomic_inc(&root->log_batch);
- smp_mb();
if (skip_inode_logging(&ctx)) {
/*
* We've had everything committed since the last time we were
* modified so clear this flag in case it was set for whatever
* reason, it's no longer relevant.
*/
- clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
+ clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
/*
* An ordered extent might have started before and completed
* already with io errors, in which case the inode was not
@@ -2305,10 +1706,12 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* for any errors that might have happened since we last
* checked called fsync.
*/
- ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
+ ret = filemap_check_wb_err(inode->vfs_inode.i_mapping, file->f_wb_err);
goto out_release_extents;
}
+ btrfs_init_log_ctx_scratch_eb(&ctx);
+
/*
* We use start here because we will need to wait on the IO to complete
* in btrfs_sync_log, which could require joining a transaction (for
@@ -2328,10 +1731,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
trans->in_fsync = true;
ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
+ /*
+ * Scratch eb no longer needed, release before syncing log or commit
+ * transaction, to avoid holding unnecessary memory during such long
+ * operations.
+ */
+ if (ctx.scratch_eb) {
+ free_extent_buffer(ctx.scratch_eb);
+ ctx.scratch_eb = NULL;
+ }
btrfs_release_log_ctx_extents(&ctx);
if (ret < 0) {
/* Fallthrough and commit/free transaction. */
- ret = 1;
+ ret = BTRFS_LOG_FORCE_COMMIT;
}
/* we've logged all the items and now have a consistent
@@ -2344,29 +1756,71 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* file again, but that will end up using the synchronization
* inside btrfs_sync_log to keep things safe.
*/
- btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ if (skip_ilock)
+ up_write(&inode->i_mmap_lock);
+ else
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+
+ if (ret == BTRFS_NO_LOG_SYNC) {
+ ret = btrfs_end_transaction(trans);
+ goto out;
+ }
- if (ret != BTRFS_NO_LOG_SYNC) {
+ /* We successfully logged the inode, attempt to sync the log. */
+ if (!ret) {
+ ret = btrfs_sync_log(trans, root, &ctx);
if (!ret) {
- ret = btrfs_sync_log(trans, root, &ctx);
- if (!ret) {
- ret = btrfs_end_transaction(trans);
- goto out;
- }
- }
- if (!full_sync) {
- ret = btrfs_wait_ordered_range(inode, start, len);
- if (ret) {
- btrfs_end_transaction(trans);
- goto out;
- }
+ ret = btrfs_end_transaction(trans);
+ goto out;
}
- ret = btrfs_commit_transaction(trans);
- } else {
+ }
+
+ /*
+ * At this point we need to commit the transaction because we had
+ * btrfs_need_log_full_commit() or some other error.
+ *
+ * If we didn't do a full sync we have to stop the trans handle, wait on
+ * the ordered extents, start it again and commit the transaction. If
+ * we attempt to wait on the ordered extents here we could deadlock with
+ * something like fallocate() that is holding the extent lock trying to
+ * start a transaction while some other thread is trying to commit the
+ * transaction while we (fsync) are currently holding the transaction
+ * open.
+ */
+ if (!full_sync) {
ret = btrfs_end_transaction(trans);
+ if (ret)
+ goto out;
+ ret = btrfs_wait_ordered_range(inode, start, len);
+ if (ret)
+ goto out;
+
+ /*
+ * This is safe to use here because we're only interested in
+ * making sure the transaction that had the ordered extents is
+ * committed. We aren't waiting on anything past this point,
+ * we're purely getting the transaction and committing it.
+ */
+ trans = btrfs_attach_transaction_barrier(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+
+ /*
+ * We committed the transaction and there's no currently
+ * running transaction, this means everything we care
+ * about made it to disk and we are done.
+ */
+ if (ret == -ENOENT)
+ ret = 0;
+ goto out;
+ }
}
+
+ ret = btrfs_commit_transaction(trans);
out:
+ free_extent_buffer(ctx.scratch_eb);
ASSERT(list_empty(&ctx.list));
+ ASSERT(list_empty(&ctx.conflict_inodes));
err = file_check_and_advance_wb_err(file);
if (!ret)
ret = err;
@@ -2374,56 +1828,260 @@ out:
out_release_extents:
btrfs_release_log_ctx_extents(&ctx);
- btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ if (skip_ilock)
+ up_write(&inode->i_mmap_lock);
+ else
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
goto out;
}
+/*
+ * btrfs_page_mkwrite() is not allowed to change the file size as it gets
+ * called from a page fault handler when a page is first dirtied. Hence we must
+ * be careful to check for EOF conditions here. We set the page up correctly
+ * for a written page which means we get ENOSPC checking when writing into
+ * holes and correct delalloc and unwritten extent mapping on filesystems that
+ * support these features.
+ *
+ * We are not allowed to take the i_mutex here so we have to play games to
+ * protect against truncate races as the page could now be beyond EOF. Because
+ * truncate_setsize() writes the inode size before removing pages, once we have
+ * the page lock we can determine safely if the page is beyond EOF. If it is not
+ * beyond EOF, then the page is guaranteed safe against truncation until we
+ * unlock the page.
+ */
+static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
+{
+ struct page *page = vmf->page;
+ struct folio *folio = page_folio(page);
+ struct btrfs_inode *inode = BTRFS_I(file_inode(vmf->vma->vm_file));
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_io_tree *io_tree = &inode->io_tree;
+ struct btrfs_ordered_extent *ordered;
+ struct extent_state *cached_state = NULL;
+ struct extent_changeset *data_reserved = NULL;
+ unsigned long zero_start;
+ loff_t size;
+ size_t fsize = folio_size(folio);
+ int ret;
+ bool only_release_metadata = false;
+ u64 reserved_space;
+ u64 page_start;
+ u64 page_end;
+ u64 end;
+
+ reserved_space = fsize;
+
+ sb_start_pagefault(inode->vfs_inode.i_sb);
+ page_start = folio_pos(folio);
+ page_end = page_start + folio_size(folio) - 1;
+ end = page_end;
+
+ /*
+ * Reserving delalloc space after obtaining the page lock can lead to
+ * deadlock. For example, if a dirty page is locked by this function
+ * and the call to btrfs_delalloc_reserve_space() ends up triggering
+ * dirty page write out, then the btrfs_writepages() function could
+ * end up waiting indefinitely to get a lock on the page currently
+ * being processed by btrfs_page_mkwrite() function.
+ */
+ ret = btrfs_check_data_free_space(inode, &data_reserved, page_start,
+ reserved_space, false);
+ if (ret < 0) {
+ size_t write_bytes = reserved_space;
+
+ if (btrfs_check_nocow_lock(inode, page_start, &write_bytes, false) <= 0)
+ goto out_noreserve;
+
+ only_release_metadata = true;
+
+ /*
+ * Can't write the whole range, there may be shared extents or
+ * holes in the range, bail out with @only_release_metadata set
+ * to true so that we unlock the nocow lock before returning the
+ * error.
+ */
+ if (write_bytes < reserved_space)
+ goto out_noreserve;
+ }
+ ret = btrfs_delalloc_reserve_metadata(inode, reserved_space,
+ reserved_space, false);
+ if (ret < 0) {
+ if (!only_release_metadata)
+ btrfs_free_reserved_data_space(inode, data_reserved,
+ page_start, reserved_space);
+ goto out_noreserve;
+ }
+
+ ret = file_update_time(vmf->vma->vm_file);
+ if (ret < 0)
+ goto out;
+again:
+ down_read(&inode->i_mmap_lock);
+ folio_lock(folio);
+ size = i_size_read(&inode->vfs_inode);
+
+ if ((folio->mapping != inode->vfs_inode.i_mapping) ||
+ (page_start >= size)) {
+ /* Page got truncated out from underneath us. */
+ goto out_unlock;
+ }
+ folio_wait_writeback(folio);
+
+ btrfs_lock_extent(io_tree, page_start, page_end, &cached_state);
+ ret = set_folio_extent_mapped(folio);
+ if (ret < 0) {
+ btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
+ goto out_unlock;
+ }
+
+ /*
+ * We can't set the delalloc bits if there are pending ordered
+ * extents. Drop our locks and wait for them to finish.
+ */
+ ordered = btrfs_lookup_ordered_range(inode, page_start, fsize);
+ if (ordered) {
+ btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
+ folio_unlock(folio);
+ up_read(&inode->i_mmap_lock);
+ btrfs_start_ordered_extent(ordered);
+ btrfs_put_ordered_extent(ordered);
+ goto again;
+ }
+
+ if (folio_contains(folio, (size - 1) >> PAGE_SHIFT)) {
+ reserved_space = round_up(size - page_start, fs_info->sectorsize);
+ if (reserved_space < fsize) {
+ const u64 to_free = fsize - reserved_space;
+
+ end = page_start + reserved_space - 1;
+ if (only_release_metadata)
+ btrfs_delalloc_release_metadata(inode, to_free, true);
+ else
+ btrfs_delalloc_release_space(inode, data_reserved,
+ end + 1, to_free, true);
+ }
+ }
+
+ /*
+ * page_mkwrite gets called when the page is firstly dirtied after it's
+ * faulted in, but write(2) could also dirty a page and set delalloc
+ * bits, thus in this case for space account reason, we still need to
+ * clear any delalloc bits within this page range since we have to
+ * reserve data&meta space before lock_page() (see above comments).
+ */
+ btrfs_clear_extent_bit(io_tree, page_start, end,
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG, &cached_state);
+
+ ret = btrfs_set_extent_delalloc(inode, page_start, end, 0, &cached_state);
+ if (ret < 0) {
+ btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
+ goto out_unlock;
+ }
+
+ /* Page is wholly or partially inside EOF. */
+ if (page_start + folio_size(folio) > size)
+ zero_start = offset_in_folio(folio, size);
+ else
+ zero_start = fsize;
+
+ if (zero_start != fsize)
+ folio_zero_range(folio, zero_start, folio_size(folio) - zero_start);
+
+ btrfs_folio_clear_checked(fs_info, folio, page_start, fsize);
+ btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
+ btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
+
+ btrfs_set_inode_last_sub_trans(inode);
+
+ if (only_release_metadata)
+ btrfs_set_extent_bit(io_tree, page_start, end, EXTENT_NORESERVE,
+ &cached_state);
+
+ btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
+ up_read(&inode->i_mmap_lock);
+
+ btrfs_delalloc_release_extents(inode, fsize);
+ if (only_release_metadata)
+ btrfs_check_nocow_unlock(inode);
+ sb_end_pagefault(inode->vfs_inode.i_sb);
+ extent_changeset_free(data_reserved);
+ return VM_FAULT_LOCKED;
+
+out_unlock:
+ folio_unlock(folio);
+ up_read(&inode->i_mmap_lock);
+out:
+ btrfs_delalloc_release_extents(inode, fsize);
+ if (only_release_metadata)
+ btrfs_delalloc_release_metadata(inode, reserved_space, true);
+ else
+ btrfs_delalloc_release_space(inode, data_reserved, page_start,
+ reserved_space, true);
+ extent_changeset_free(data_reserved);
+out_noreserve:
+ if (only_release_metadata)
+ btrfs_check_nocow_unlock(inode);
+
+ sb_end_pagefault(inode->vfs_inode.i_sb);
+
+ if (ret < 0)
+ return vmf_error(ret);
+
+ /* Make the VM retry the fault. */
+ return VM_FAULT_NOPAGE;
+}
+
static const struct vm_operations_struct btrfs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = btrfs_page_mkwrite,
};
-static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
+static int btrfs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *filp = desc->file;
struct address_space *mapping = filp->f_mapping;
- if (!mapping->a_ops->readpage)
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(file_inode(filp)))))
+ return -EIO;
+ if (!mapping->a_ops->read_folio)
return -ENOEXEC;
file_accessed(filp);
- vma->vm_ops = &btrfs_file_vm_ops;
+ desc->vm_ops = &btrfs_file_vm_ops;
return 0;
}
-static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
- int slot, u64 start, u64 end)
+static bool hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
+ int slot, u64 start, u64 end)
{
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
if (slot < 0 || slot >= btrfs_header_nritems(leaf))
- return 0;
+ return false;
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid != btrfs_ino(inode) ||
key.type != BTRFS_EXTENT_DATA_KEY)
- return 0;
+ return false;
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
- return 0;
+ return false;
if (btrfs_file_extent_disk_bytenr(leaf, fi))
- return 0;
+ return false;
if (key.offset == end)
- return 1;
+ return true;
if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
- return 1;
- return 0;
+ return true;
+ return false;
}
static int fill_holes(struct btrfs_trans_handle *trans,
@@ -2435,7 +2093,6 @@ static int fill_holes(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
struct extent_map *hole_em;
- struct extent_map_tree *em_tree = &inode->extent_tree;
struct btrfs_key key;
int ret;
@@ -2469,7 +2126,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
goto out;
}
@@ -2477,7 +2134,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
u64 num_bytes;
key.offset = offset;
- btrfs_set_item_key_safe(fs_info, path, &key);
+ btrfs_set_item_key_safe(trans, path, &key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
@@ -2485,45 +2142,36 @@ static int fill_holes(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
goto out;
}
btrfs_release_path(path);
- ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
- offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
+ ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset,
+ end - offset);
if (ret)
return ret;
out:
btrfs_release_path(path);
- hole_em = alloc_extent_map();
+ hole_em = btrfs_alloc_extent_map();
if (!hole_em) {
- btrfs_drop_extent_cache(inode, offset, end - 1, 0);
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
+ btrfs_drop_extent_map_range(inode, offset, end - 1, false);
+ btrfs_set_inode_full_sync(inode);
} else {
hole_em->start = offset;
hole_em->len = end - offset;
hole_em->ram_bytes = hole_em->len;
- hole_em->orig_start = offset;
- hole_em->block_start = EXTENT_MAP_HOLE;
- hole_em->block_len = 0;
- hole_em->orig_block_len = 0;
- hole_em->compress_type = BTRFS_COMPRESS_NONE;
+ hole_em->disk_bytenr = EXTENT_MAP_HOLE;
+ hole_em->disk_num_bytes = 0;
hole_em->generation = trans->transid;
- do {
- btrfs_drop_extent_cache(inode, offset, end - 1, 0);
- write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, hole_em, 1);
- write_unlock(&em_tree->lock);
- } while (ret == -EEXIST);
- free_extent_map(hole_em);
+ ret = btrfs_replace_extent_map_range(inode, hole_em, true);
+ btrfs_free_extent_map(hole_em);
if (ret)
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &inode->runtime_flags);
+ btrfs_set_inode_full_sync(inode);
}
return 0;
@@ -2541,28 +2189,46 @@ static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
struct extent_map *em;
int ret = 0;
- em = btrfs_get_extent(inode, NULL, 0,
+ em = btrfs_get_extent(inode, NULL,
round_down(*start, fs_info->sectorsize),
round_up(*len, fs_info->sectorsize));
if (IS_ERR(em))
return PTR_ERR(em);
/* Hole or vacuum extent(only exists in no-hole mode) */
- if (em->block_start == EXTENT_MAP_HOLE) {
+ if (em->disk_bytenr == EXTENT_MAP_HOLE) {
ret = 1;
*len = em->start + em->len > *start + *len ?
0 : *start + *len - em->start - em->len;
*start = em->start + em->len;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
return ret;
}
-static int btrfs_punch_hole_lock_range(struct inode *inode,
- const u64 lockstart,
- const u64 lockend,
- struct extent_state **cached_state)
+/*
+ * Check if there is no folio in the range.
+ *
+ * We cannot utilize filemap_range_has_page() in a filemap with large folios
+ * as we can hit the following false positive:
+ *
+ * start end
+ * | |
+ * |//|//|//|//| | | | | | | | |//|//|
+ * \ / \ /
+ * Folio A Folio B
+ *
+ * That large folio A and B cover the start and end indexes.
+ * In that case filemap_range_has_page() will always return true, but the above
+ * case is fine for btrfs_punch_hole_lock_range() usage.
+ *
+ * So here we only ensure that no other folios is in the range, excluding the
+ * head/tail large folio.
+ */
+static bool check_range_has_page(struct inode *inode, u64 start, u64 end)
{
+ struct folio_batch fbatch;
+ bool ret = false;
/*
* For subpage case, if the range is not at page boundary, we could
* have pages at the leading/tailing part of the range.
@@ -2570,45 +2236,66 @@ static int btrfs_punch_hole_lock_range(struct inode *inode,
* will always return true.
* So here we need to do extra page alignment for
* filemap_range_has_page().
+ *
+ * And do not decrease page_lockend right now, as it can be 0.
*/
- const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
- const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
+ const u64 page_lockstart = round_up(start, PAGE_SIZE);
+ const u64 page_lockend = round_down(end + 1, PAGE_SIZE);
+ const pgoff_t start_index = page_lockstart >> PAGE_SHIFT;
+ const pgoff_t end_index = (page_lockend - 1) >> PAGE_SHIFT;
+ pgoff_t tmp = start_index;
+ int found_folios;
+
+ /* The same page or adjacent pages. */
+ if (page_lockend <= page_lockstart)
+ return false;
+
+ folio_batch_init(&fbatch);
+ found_folios = filemap_get_folios(inode->i_mapping, &tmp, end_index, &fbatch);
+ for (int i = 0; i < found_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
+
+ /* A large folio begins before the start. Not a target. */
+ if (folio->index < start_index)
+ continue;
+ /* A large folio extends beyond the end. Not a target. */
+ if (folio_next_index(folio) > end_index)
+ continue;
+ /* A folio doesn't cover the head/tail index. Found a target. */
+ ret = true;
+ break;
+ }
+ folio_batch_release(&fbatch);
+ return ret;
+}
+static void btrfs_punch_hole_lock_range(struct inode *inode,
+ const u64 lockstart, const u64 lockend,
+ struct extent_state **cached_state)
+{
while (1) {
- struct btrfs_ordered_extent *ordered;
- int ret;
-
truncate_pagecache_range(inode, lockstart, lockend);
- lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- cached_state);
- ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
- lockend);
-
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ cached_state);
/*
- * We need to make sure we have no ordered extents in this range
- * and nobody raced in and read a page in this range, if we did
- * we need to try again.
+ * We can't have ordered extents in the range, nor dirty/writeback
+ * pages, because we have locked the inode's VFS lock in exclusive
+ * mode, we have locked the inode's i_mmap_lock in exclusive mode,
+ * we have flushed all delalloc in the range and we have waited
+ * for any ordered extents in the range to complete.
+ * We can race with anyone reading pages from this range, so after
+ * locking the range check if we have pages in the range, and if
+ * we do, unlock the range and retry.
*/
- if ((!ordered ||
- (ordered->file_offset + ordered->num_bytes <= lockstart ||
- ordered->file_offset > lockend)) &&
- !filemap_range_has_page(inode->i_mapping,
- page_lockstart, page_lockend)) {
- if (ordered)
- btrfs_put_ordered_extent(ordered);
+ if (!check_range_has_page(inode, lockstart, lockend))
break;
- }
- if (ordered)
- btrfs_put_ordered_extent(ordered);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, cached_state);
- ret = btrfs_wait_ordered_range(inode, lockstart,
- lockend - lockstart + 1);
- if (ret)
- return ret;
+
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ cached_state);
}
- return 0;
+
+ btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
}
static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
@@ -2624,7 +2311,6 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
struct btrfs_key key;
int slot;
- struct btrfs_ref ref = { 0 };
int ret;
if (replace_len == 0)
@@ -2654,7 +2340,6 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
if (extent_info->is_new_extent)
btrfs_set_file_extent_generation(leaf, extent, trans->transid);
- btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
@@ -2680,14 +2365,17 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
extent_info->qgroup_reserved,
&key);
} else {
+ struct btrfs_ref ref = {
+ .action = BTRFS_ADD_DELAYED_REF,
+ .bytenr = extent_info->disk_offset,
+ .num_bytes = extent_info->disk_len,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_root_id(root),
+ };
u64 ref_offset;
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
- extent_info->disk_offset,
- extent_info->disk_len, 0);
ref_offset = extent_info->file_offset - extent_info->data_offset;
- btrfs_init_data_ref(&ref, root->root_key.objectid,
- btrfs_ino(inode), ref_offset, 0, false);
+ btrfs_init_data_ref(&ref, btrfs_ino(inode), ref_offset, 0, false);
ret = btrfs_inc_extent_ref(trans, &ref);
}
@@ -2717,7 +2405,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
struct btrfs_trans_handle *trans = NULL;
- struct btrfs_block_rsv *rsv;
+ struct btrfs_block_rsv rsv;
unsigned int rsv_count;
u64 cur_offset;
u64 len = end - start;
@@ -2726,13 +2414,9 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
if (end <= start)
return -EINVAL;
- rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
- if (!rsv) {
- ret = -ENOMEM;
- goto out;
- }
- rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
- rsv->failfast = 1;
+ btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
+ rsv.size = btrfs_calc_insert_metadata_size(fs_info, 1);
+ rsv.failfast = true;
/*
* 1 - update the inode
@@ -2749,13 +2433,14 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
- goto out_free;
+ goto out_release;
}
- ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
+ ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, &rsv,
min_size, false);
- BUG_ON(ret);
- trans->block_rsv = rsv;
+ if (WARN_ON(ret))
+ goto out_trans;
+ trans->block_rsv = &rsv;
cur_offset = start;
drop_args.path = path;
@@ -2777,9 +2462,9 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
* got EOPNOTSUPP via prealloc then we messed up and
* need to abort.
*/
- if (ret &&
- (ret != -EOPNOTSUPP ||
- (extent_info && extent_info->is_new_extent)))
+ if (unlikely(ret &&
+ (ret != -EOPNOTSUPP ||
+ (extent_info && extent_info->is_new_extent))))
btrfs_abort_transaction(trans, ret);
break;
}
@@ -2790,7 +2475,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
cur_offset < ino_size) {
ret = fill_holes(trans, inode, path, cur_offset,
drop_args.drop_end);
- if (ret) {
+ if (unlikely(ret)) {
/*
* If we failed then we didn't insert our hole
* entries for the area we dropped, so now the
@@ -2810,7 +2495,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
ret = btrfs_inode_clear_file_extent_range(inode,
cur_offset,
drop_args.drop_end - cur_offset);
- if (ret) {
+ if (unlikely(ret)) {
/*
* We couldn't clear our area, so we could
* presumably adjust up and corrupt the fs, so
@@ -2829,7 +2514,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
ret = btrfs_insert_replace_extent(trans, inode, path,
extent_info, replace_len,
drop_args.bytes_found);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -2838,7 +2523,25 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
extent_info->file_offset += replace_len;
}
- ret = btrfs_update_inode(trans, root, inode);
+ /*
+ * We are releasing our handle on the transaction, balance the
+ * dirty pages of the btree inode and flush delayed items, and
+ * then get a new transaction handle, which may now point to a
+ * new transaction in case someone else may have committed the
+ * transaction we used to replace/drop file extent items. So
+ * bump the inode's iversion and update mtime and ctime except
+ * if we are called from a dedupe context. This is because a
+ * power failure/crash may happen after the transaction is
+ * committed and before we finish replacing/dropping all the
+ * file extent items we need.
+ */
+ inode_inc_iversion(&inode->vfs_inode);
+
+ if (!extent_info || extent_info->update_times)
+ inode_set_mtime_to_ts(&inode->vfs_inode,
+ inode_set_ctime_current(&inode->vfs_inode));
+
+ ret = btrfs_update_inode(trans, inode);
if (ret)
break;
@@ -2853,9 +2556,10 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
}
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
- rsv, min_size, false);
- BUG_ON(ret); /* shouldn't happen */
- trans->block_rsv = rsv;
+ &rsv, min_size, false);
+ if (WARN_ON(ret))
+ break;
+ trans->block_rsv = &rsv;
cur_offset = drop_args.drop_end;
len = end - cur_offset;
@@ -2877,7 +2581,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
* maps for the replacement extents (or holes).
*/
if (extent_info && !extent_info->is_new_extent)
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
+ btrfs_set_inode_full_sync(inode);
if (ret)
goto out_trans;
@@ -2905,7 +2609,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
cur_offset < drop_args.drop_end) {
ret = fill_holes(trans, inode, path, cur_offset,
drop_args.drop_end);
- if (ret) {
+ if (unlikely(ret)) {
/* Same comment as above. */
btrfs_abort_transaction(trans, ret);
goto out_trans;
@@ -2914,7 +2618,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
/* See the comment in the loop above for the reasoning here. */
ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
drop_args.drop_end - cur_offset);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_trans;
}
@@ -2924,7 +2628,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
ret = btrfs_insert_replace_extent(trans, inode, path,
extent_info, extent_info->data_len,
drop_args.bytes_found);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_trans;
}
@@ -2932,22 +2636,22 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
out_trans:
if (!trans)
- goto out_free;
+ goto out_release;
trans->block_rsv = &fs_info->trans_block_rsv;
if (ret)
btrfs_end_transaction(trans);
else
*trans_out = trans;
-out_free:
- btrfs_free_block_rsv(fs_info, rsv);
-out:
+out_release:
+ btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
return ret;
}
-static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
@@ -2956,18 +2660,20 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
u64 lockend;
u64 tail_start;
u64 tail_len;
- u64 orig_start = offset;
+ const u64 orig_start = offset;
+ const u64 orig_end = offset + len - 1;
int ret = 0;
bool same_block;
u64 ino_size;
bool truncated_block = false;
bool updated_inode = false;
- ret = btrfs_wait_ordered_range(inode, offset, len);
+ btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
+
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode), offset, len);
if (ret)
- return ret;
+ goto out_only_mutex;
- btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
ino_size = round_up(inode->i_size, fs_info->sectorsize);
ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
if (ret < 0)
@@ -2978,24 +2684,23 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_only_mutex;
}
- lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
- lockend = round_down(offset + len,
- btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
+ ret = file_modified(file);
+ if (ret)
+ goto out_only_mutex;
+
+ lockstart = round_up(offset, fs_info->sectorsize);
+ lockend = round_down(offset + len, fs_info->sectorsize) - 1;
same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
/*
- * We needn't truncate any block which is beyond the end of the file
- * because we are sure there is no data there.
- */
- /*
* Only do this if we are in the same block and we aren't doing the
* entire block.
*/
if (same_block && len < fs_info->sectorsize) {
if (offset < ino_size) {
truncated_block = true;
- ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
- 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset + len - 1,
+ orig_start, orig_end);
} else {
ret = 0;
}
@@ -3005,9 +2710,9 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
/* zero back part of the first block */
if (offset < ino_size) {
truncated_block = true;
- ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset, orig_start, orig_end);
if (ret) {
- btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
return ret;
}
}
@@ -3042,8 +2747,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (tail_start + tail_len < ino_size) {
truncated_block = true;
ret = btrfs_truncate_block(BTRFS_I(inode),
- tail_start + tail_len,
- 0, 1);
+ tail_start + tail_len - 1,
+ orig_start, orig_end);
if (ret)
goto out_only_mutex;
}
@@ -3055,10 +2760,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_only_mutex;
}
- ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
- &cached_state);
- if (ret)
- goto out_only_mutex;
+ btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state);
path = btrfs_alloc_path();
if (!path) {
@@ -3074,14 +2776,14 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
ASSERT(trans != NULL);
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_ctime = current_time(inode);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
updated_inode = true;
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out:
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ &cached_state);
out_only_mutex:
if (!updated_inode && truncated_block && !ret) {
/*
@@ -3091,24 +2793,23 @@ out_only_mutex:
* for detecting, at fsync time, if the inode isn't yet in the
* log tree or it's there but not up to date.
*/
- struct timespec64 now = current_time(inode);
+ struct timespec64 now = inode_set_ctime_current(inode);
inode_inc_iversion(inode);
- inode->i_mtime = now;
- inode->i_ctime = now;
+ inode_set_mtime_to_ts(inode, now);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
} else {
int ret2;
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
ret2 = btrfs_end_transaction(trans);
if (!ret)
ret = ret2;
}
}
- btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
return ret;
}
@@ -3156,20 +2857,30 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
+ u64 range_start;
+ u64 range_end;
int ret;
int ret2;
if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
return 0;
+ range_start = round_down(i_size_read(inode), root->fs_info->sectorsize);
+ range_end = round_up(end, root->fs_info->sectorsize);
+
+ ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start,
+ range_end - range_start);
+ if (ret)
+ return ret;
+
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
i_size_write(inode, end);
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
ret2 = btrfs_end_transaction(trans);
return ret ? ret : ret2;
@@ -3184,23 +2895,23 @@ enum {
static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
u64 offset)
{
- const u64 sectorsize = btrfs_inode_sectorsize(inode);
+ const u64 sectorsize = inode->root->fs_info->sectorsize;
struct extent_map *em;
int ret;
offset = round_down(offset, sectorsize);
- em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(inode, NULL, offset, sectorsize);
if (IS_ERR(em))
return PTR_ERR(em);
- if (em->block_start == EXTENT_MAP_HOLE)
+ if (em->disk_bytenr == EXTENT_MAP_HOLE)
ret = RANGE_BOUNDARY_HOLE;
- else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+ else if (em->flags & EXTENT_FLAG_PREALLOC)
ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
else
ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
return ret;
}
@@ -3214,15 +2925,15 @@ static int btrfs_zero_range(struct inode *inode,
struct extent_changeset *data_reserved = NULL;
int ret;
u64 alloc_hint = 0;
- const u64 sectorsize = btrfs_inode_sectorsize(BTRFS_I(inode));
+ const u64 sectorsize = fs_info->sectorsize;
+ const u64 orig_start = offset;
+ const u64 orig_end = offset + len - 1;
u64 alloc_start = round_down(offset, sectorsize);
u64 alloc_end = round_up(offset + len, sectorsize);
u64 bytes_to_reserve = 0;
bool space_reserved = false;
- inode_dio_wait(inode);
-
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, alloc_start,
alloc_end - alloc_start);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
@@ -3237,8 +2948,7 @@ static int btrfs_zero_range(struct inode *inode,
* extents and holes, we drop all the existing extents and allocate a
* new prealloc extent, so that we get a larger contiguous disk extent.
*/
- if (em->start <= alloc_start &&
- test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
+ if (em->start <= alloc_start && (em->flags & EXTENT_FLAG_PREALLOC)) {
const u64 em_end = em->start + em->len;
if (em_end >= offset + len) {
@@ -3247,7 +2957,7 @@ static int btrfs_zero_range(struct inode *inode,
* do nothing except updating the inode's i_size if
* needed.
*/
- free_extent_map(em);
+ btrfs_free_extent_map(em);
ret = btrfs_fallocate_update_isize(inode, offset + len,
mode);
goto out;
@@ -3260,36 +2970,35 @@ static int btrfs_zero_range(struct inode *inode,
ASSERT(IS_ALIGNED(alloc_start, sectorsize));
len = offset + len - alloc_start;
offset = alloc_start;
- alloc_hint = em->block_start + em->len;
+ alloc_hint = btrfs_extent_map_block_start(em) + em->len;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
- sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, alloc_start, sectorsize);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out;
}
- if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
- free_extent_map(em);
+ if (em->flags & EXTENT_FLAG_PREALLOC) {
+ btrfs_free_extent_map(em);
ret = btrfs_fallocate_update_isize(inode, offset + len,
mode);
goto out;
}
- if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
- free_extent_map(em);
- ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
- 0);
+ if (len < sectorsize && em->disk_bytenr != EXTENT_MAP_HOLE) {
+ btrfs_free_extent_map(em);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset + len - 1,
+ orig_start, orig_end);
if (!ret)
ret = btrfs_fallocate_update_isize(inode,
offset + len,
mode);
return ret;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
alloc_start = round_down(offset, sectorsize);
alloc_end = alloc_start + sectorsize;
goto reserve_space;
@@ -3313,7 +3022,8 @@ static int btrfs_zero_range(struct inode *inode,
alloc_start = round_down(offset, sectorsize);
ret = 0;
} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
- ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset,
+ orig_start, orig_end);
if (ret)
goto out;
} else {
@@ -3330,8 +3040,8 @@ static int btrfs_zero_range(struct inode *inode,
alloc_end = round_up(offset + len, sectorsize);
ret = 0;
} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
- ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
- 0, 1);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset + len - 1,
+ orig_start, orig_end);
if (ret)
goto out;
} else {
@@ -3351,23 +3061,21 @@ reserve_space:
if (ret < 0)
goto out;
space_reserved = true;
- ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
- &cached_state);
- if (ret)
- goto out;
+ btrfs_punch_hole_lock_range(inode, lockstart, lockend,
+ &cached_state);
ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
alloc_start, bytes_to_reserve);
if (ret) {
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, &cached_state);
goto out;
}
ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
alloc_end - alloc_start,
- i_blocksize(inode),
+ fs_info->sectorsize,
offset + len, &alloc_hint);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ &cached_state);
/* btrfs_prealloc_file_range releases reserved space on error */
if (ret) {
space_reserved = false;
@@ -3392,7 +3100,7 @@ static long btrfs_fallocate(struct file *file, int mode,
struct extent_changeset *data_reserved = NULL;
struct falloc_range *range;
struct falloc_range *tmp;
- struct list_head reserve_list;
+ LIST_HEAD(reserve_list);
u64 cur_offset;
u64 last_byte;
u64 alloc_start;
@@ -3400,12 +3108,18 @@ static long btrfs_fallocate(struct file *file, int mode,
u64 alloc_hint = 0;
u64 locked_end;
u64 actual_end = 0;
+ u64 data_space_needed = 0;
+ u64 data_space_reserved = 0;
+ u64 qgroup_reserved = 0;
struct extent_map *em;
- int blocksize = btrfs_inode_sectorsize(BTRFS_I(inode));
+ int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize;
int ret;
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(inode))))
+ return -EIO;
+
/* Do not allow fallocate in ZONED mode */
- if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
+ if (btrfs_is_zoned(inode_to_fs_info(inode)))
return -EOPNOTSUPP;
alloc_start = round_down(offset, blocksize);
@@ -3418,21 +3132,9 @@ static long btrfs_fallocate(struct file *file, int mode,
return -EOPNOTSUPP;
if (mode & FALLOC_FL_PUNCH_HOLE)
- return btrfs_punch_hole(inode, offset, len);
-
- /*
- * Only trigger disk allocation, don't trigger qgroup reserve
- *
- * For qgroup space, it will be checked later.
- */
- if (!(mode & FALLOC_FL_ZERO_RANGE)) {
- ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
- alloc_end - alloc_start);
- if (ret < 0)
- return ret;
- }
+ return btrfs_punch_hole(file, offset, len);
- btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
+ btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
ret = inode_newsize_ok(inode, offset + len);
@@ -3440,6 +3142,10 @@ static long btrfs_fallocate(struct file *file, int mode,
goto out;
}
+ ret = file_modified(file);
+ if (ret)
+ goto out;
+
/*
* TODO: Move these two operations after we have checked
* accurate reserved space, or fallocate can still fail but
@@ -3458,117 +3164,109 @@ static long btrfs_fallocate(struct file *file, int mode,
* need to zero out the end of the block if i_size lands in the
* middle of a block.
*/
- ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size,
+ inode->i_size, (u64)-1);
if (ret)
goto out;
}
/*
- * wait for ordered IO before we have any locks. We'll loop again
- * below with the locks held.
+ * We have locked the inode at the VFS level (in exclusive mode) and we
+ * have locked the i_mmap_lock lock (in exclusive mode). Now before
+ * locking the file range, flush all dealloc in the range and wait for
+ * all ordered extents in the range to complete. After this we can lock
+ * the file range and, due to the previous locking we did, we know there
+ * can't be more delalloc or ordered extents in the range.
*/
- ret = btrfs_wait_ordered_range(inode, alloc_start,
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode), alloc_start,
alloc_end - alloc_start);
if (ret)
goto out;
if (mode & FALLOC_FL_ZERO_RANGE) {
ret = btrfs_zero_range(inode, offset, len, mode);
- btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
return ret;
}
locked_end = alloc_end - 1;
- while (1) {
- struct btrfs_ordered_extent *ordered;
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+ &cached_state);
- /* the extent lock is ordered inside the running
- * transaction
- */
- lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
- locked_end, &cached_state);
- ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
- locked_end);
-
- if (ordered &&
- ordered->file_offset + ordered->num_bytes > alloc_start &&
- ordered->file_offset < alloc_end) {
- btrfs_put_ordered_extent(ordered);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- alloc_start, locked_end,
- &cached_state);
- /*
- * we can't wait on the range with the transaction
- * running or with the extent lock held
- */
- ret = btrfs_wait_ordered_range(inode, alloc_start,
- alloc_end - alloc_start);
- if (ret)
- goto out;
- } else {
- if (ordered)
- btrfs_put_ordered_extent(ordered);
- break;
- }
- }
+ btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
/* First, check if we exceed the qgroup limit */
- INIT_LIST_HEAD(&reserve_list);
while (cur_offset < alloc_end) {
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, cur_offset,
alloc_end - cur_offset);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
break;
}
- last_byte = min(extent_map_end(em), alloc_end);
- actual_end = min_t(u64, extent_map_end(em), offset + len);
+ last_byte = min(btrfs_extent_map_end(em), alloc_end);
+ actual_end = min_t(u64, btrfs_extent_map_end(em), offset + len);
last_byte = ALIGN(last_byte, blocksize);
- if (em->block_start == EXTENT_MAP_HOLE ||
+ if (em->disk_bytenr == EXTENT_MAP_HOLE ||
(cur_offset >= inode->i_size &&
- !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
- ret = add_falloc_range(&reserve_list, cur_offset,
- last_byte - cur_offset);
+ !(em->flags & EXTENT_FLAG_PREALLOC))) {
+ const u64 range_len = last_byte - cur_offset;
+
+ ret = add_falloc_range(&reserve_list, cur_offset, range_len);
if (ret < 0) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
break;
}
ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
- &data_reserved, cur_offset,
- last_byte - cur_offset);
+ &data_reserved, cur_offset, range_len);
if (ret < 0) {
- cur_offset = last_byte;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
break;
}
- } else {
- /*
- * Do not need to reserve unwritten extent for this
- * range, free reserved data space first, otherwise
- * it'll result in false ENOSPC error.
- */
- btrfs_free_reserved_data_space(BTRFS_I(inode),
- data_reserved, cur_offset,
- last_byte - cur_offset);
+ qgroup_reserved += range_len;
+ data_space_needed += range_len;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
cur_offset = last_byte;
}
+ if (!ret && data_space_needed > 0) {
+ /*
+ * We are safe to reserve space here as we can't have delalloc
+ * in the range, see above.
+ */
+ ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
+ data_space_needed);
+ if (!ret)
+ data_space_reserved = data_space_needed;
+ }
+
/*
* If ret is still 0, means we're OK to fallocate.
* Or just cleanup the list and exit.
*/
list_for_each_entry_safe(range, tmp, &reserve_list, list) {
- if (!ret)
+ if (!ret) {
ret = btrfs_prealloc_file_range(inode, mode,
range->start,
- range->len, i_blocksize(inode),
+ range->len, blocksize,
offset + len, &alloc_hint);
- else
+ /*
+ * btrfs_prealloc_file_range() releases space even
+ * if it returns an error.
+ */
+ data_space_reserved -= range->len;
+ qgroup_reserved -= range->len;
+ } else if (data_space_reserved > 0) {
btrfs_free_reserved_data_space(BTRFS_I(inode),
- data_reserved, range->start,
- range->len);
+ data_reserved, range->start,
+ range->len);
+ data_space_reserved -= range->len;
+ qgroup_reserved -= range->len;
+ } else if (qgroup_reserved > 0) {
+ btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
+ range->start, range->len, NULL);
+ qgroup_reserved -= range->len;
+ }
list_del(&range->list);
kfree(range);
}
@@ -3581,35 +3279,331 @@ static long btrfs_fallocate(struct file *file, int mode,
*/
ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
out_unlock:
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
- &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+ &cached_state);
out:
- btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
- /* Let go of our reservation. */
- if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
- btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
- cur_offset, alloc_end - cur_offset);
+ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
extent_changeset_free(data_reserved);
return ret;
}
-static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
- int whence)
+/*
+ * Helper for btrfs_find_delalloc_in_range(). Find a subrange in a given range
+ * that has unflushed and/or flushing delalloc. There might be other adjacent
+ * subranges after the one it found, so btrfs_find_delalloc_in_range() keeps
+ * looping while it gets adjacent subranges, and merging them together.
+ */
+static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end,
+ struct extent_state **cached_state,
+ bool *search_io_tree,
+ u64 *delalloc_start_ret, u64 *delalloc_end_ret)
{
+ u64 len = end + 1 - start;
+ u64 delalloc_len = 0;
+ struct btrfs_ordered_extent *oe;
+ u64 oe_start;
+ u64 oe_end;
+
+ /*
+ * Search the io tree first for EXTENT_DELALLOC. If we find any, it
+ * means we have delalloc (dirty pages) for which writeback has not
+ * started yet.
+ */
+ if (*search_io_tree) {
+ spin_lock(&inode->lock);
+ if (inode->delalloc_bytes > 0) {
+ spin_unlock(&inode->lock);
+ *delalloc_start_ret = start;
+ delalloc_len = btrfs_count_range_bits(&inode->io_tree,
+ delalloc_start_ret, end,
+ len, EXTENT_DELALLOC, 1,
+ cached_state);
+ } else {
+ spin_unlock(&inode->lock);
+ }
+ }
+
+ if (delalloc_len > 0) {
+ /*
+ * If delalloc was found then *delalloc_start_ret has a sector size
+ * aligned value (rounded down).
+ */
+ *delalloc_end_ret = *delalloc_start_ret + delalloc_len - 1;
+
+ if (*delalloc_start_ret == start) {
+ /* Delalloc for the whole range, nothing more to do. */
+ if (*delalloc_end_ret == end)
+ return true;
+ /* Else trim our search range for ordered extents. */
+ start = *delalloc_end_ret + 1;
+ len = end + 1 - start;
+ }
+ } else {
+ /* No delalloc, future calls don't need to search again. */
+ *search_io_tree = false;
+ }
+
+ /*
+ * Now also check if there's any ordered extent in the range.
+ * We do this because:
+ *
+ * 1) When delalloc is flushed, the file range is locked, we clear the
+ * EXTENT_DELALLOC bit from the io tree and create an extent map and
+ * an ordered extent for the write. So we might just have been called
+ * after delalloc is flushed and before the ordered extent completes
+ * and inserts the new file extent item in the subvolume's btree;
+ *
+ * 2) We may have an ordered extent created by flushing delalloc for a
+ * subrange that starts before the subrange we found marked with
+ * EXTENT_DELALLOC in the io tree.
+ *
+ * We could also use the extent map tree to find such delalloc that is
+ * being flushed, but using the ordered extents tree is more efficient
+ * because it's usually much smaller as ordered extents are removed from
+ * the tree once they complete. With the extent maps, we may have them
+ * in the extent map tree for a very long time, and they were either
+ * created by previous writes or loaded by read operations.
+ */
+ oe = btrfs_lookup_first_ordered_range(inode, start, len);
+ if (!oe)
+ return (delalloc_len > 0);
+
+ /* The ordered extent may span beyond our search range. */
+ oe_start = max(oe->file_offset, start);
+ oe_end = min(oe->file_offset + oe->num_bytes - 1, end);
+
+ btrfs_put_ordered_extent(oe);
+
+ /* Don't have unflushed delalloc, return the ordered extent range. */
+ if (delalloc_len == 0) {
+ *delalloc_start_ret = oe_start;
+ *delalloc_end_ret = oe_end;
+ return true;
+ }
+
+ /*
+ * We have both unflushed delalloc (io_tree) and an ordered extent.
+ * If the ranges are adjacent returned a combined range, otherwise
+ * return the leftmost range.
+ */
+ if (oe_start < *delalloc_start_ret) {
+ if (oe_end < *delalloc_start_ret)
+ *delalloc_end_ret = oe_end;
+ *delalloc_start_ret = oe_start;
+ } else if (*delalloc_end_ret + 1 == oe_start) {
+ *delalloc_end_ret = oe_end;
+ }
+
+ return true;
+}
+
+/*
+ * Check if there's delalloc in a given range.
+ *
+ * @inode: The inode.
+ * @start: The start offset of the range. It does not need to be
+ * sector size aligned.
+ * @end: The end offset (inclusive value) of the search range.
+ * It does not need to be sector size aligned.
+ * @cached_state: Extent state record used for speeding up delalloc
+ * searches in the inode's io_tree. Can be NULL.
+ * @delalloc_start_ret: Output argument, set to the start offset of the
+ * subrange found with delalloc (may not be sector size
+ * aligned).
+ * @delalloc_end_ret: Output argument, set to he end offset (inclusive value)
+ * of the subrange found with delalloc.
+ *
+ * Returns true if a subrange with delalloc is found within the given range, and
+ * if so it sets @delalloc_start_ret and @delalloc_end_ret with the start and
+ * end offsets of the subrange.
+ */
+bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
+ struct extent_state **cached_state,
+ u64 *delalloc_start_ret, u64 *delalloc_end_ret)
+{
+ u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize);
+ u64 prev_delalloc_end = 0;
+ bool search_io_tree = true;
+ bool ret = false;
+
+ while (cur_offset <= end) {
+ u64 delalloc_start;
+ u64 delalloc_end;
+ bool delalloc;
+
+ delalloc = find_delalloc_subrange(inode, cur_offset, end,
+ cached_state, &search_io_tree,
+ &delalloc_start,
+ &delalloc_end);
+ if (!delalloc)
+ break;
+
+ if (prev_delalloc_end == 0) {
+ /* First subrange found. */
+ *delalloc_start_ret = max(delalloc_start, start);
+ *delalloc_end_ret = delalloc_end;
+ ret = true;
+ } else if (delalloc_start == prev_delalloc_end + 1) {
+ /* Subrange adjacent to the previous one, merge them. */
+ *delalloc_end_ret = delalloc_end;
+ } else {
+ /* Subrange not adjacent to the previous one, exit. */
+ break;
+ }
+
+ prev_delalloc_end = delalloc_end;
+ cur_offset = delalloc_end + 1;
+ cond_resched();
+ }
+
+ return ret;
+}
+
+/*
+ * Check if there's a hole or delalloc range in a range representing a hole (or
+ * prealloc extent) found in the inode's subvolume btree.
+ *
+ * @inode: The inode.
+ * @whence: Seek mode (SEEK_DATA or SEEK_HOLE).
+ * @start: Start offset of the hole region. It does not need to be sector
+ * size aligned.
+ * @end: End offset (inclusive value) of the hole region. It does not
+ * need to be sector size aligned.
+ * @start_ret: Return parameter, used to set the start of the subrange in the
+ * hole that matches the search criteria (seek mode), if such
+ * subrange is found (return value of the function is true).
+ * The value returned here may not be sector size aligned.
+ *
+ * Returns true if a subrange matching the given seek mode is found, and if one
+ * is found, it updates @start_ret with the start of the subrange.
+ */
+static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
+ struct extent_state **cached_state,
+ u64 start, u64 end, u64 *start_ret)
+{
+ u64 delalloc_start;
+ u64 delalloc_end;
+ bool delalloc;
+
+ delalloc = btrfs_find_delalloc_in_range(inode, start, end, cached_state,
+ &delalloc_start, &delalloc_end);
+ if (delalloc && whence == SEEK_DATA) {
+ *start_ret = delalloc_start;
+ return true;
+ }
+
+ if (delalloc && whence == SEEK_HOLE) {
+ /*
+ * We found delalloc but it starts after out start offset. So we
+ * have a hole between our start offset and the delalloc start.
+ */
+ if (start < delalloc_start) {
+ *start_ret = start;
+ return true;
+ }
+ /*
+ * Delalloc range starts at our start offset.
+ * If the delalloc range's length is smaller than our range,
+ * then it means we have a hole that starts where the delalloc
+ * subrange ends.
+ */
+ if (delalloc_end < end) {
+ *start_ret = delalloc_end + 1;
+ return true;
+ }
+
+ /* There's delalloc for the whole range. */
+ return false;
+ }
+
+ if (!delalloc && whence == SEEK_HOLE) {
+ *start_ret = start;
+ return true;
+ }
+
+ /*
+ * No delalloc in the range and we are seeking for data. The caller has
+ * to iterate to the next extent item in the subvolume btree.
+ */
+ return false;
+}
+
+static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
+{
+ struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host);
+ struct btrfs_file_private *private;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
- loff_t i_size = inode->vfs_inode.i_size;
+ struct extent_state **delalloc_cached_state;
+ const loff_t i_size = i_size_read(&inode->vfs_inode);
+ const u64 ino = btrfs_ino(inode);
+ struct btrfs_root *root = inode->root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ u64 last_extent_end;
u64 lockstart;
u64 lockend;
u64 start;
- u64 len;
- int ret = 0;
+ int ret;
+ bool found = false;
if (i_size == 0 || offset >= i_size)
return -ENXIO;
/*
+ * Quick path. If the inode has no prealloc extents and its number of
+ * bytes used matches its i_size, then it can not have holes.
+ */
+ if (whence == SEEK_HOLE &&
+ !(inode->flags & BTRFS_INODE_PREALLOC) &&
+ inode_get_bytes(&inode->vfs_inode) == i_size)
+ return i_size;
+
+ spin_lock(&inode->lock);
+ private = file->private_data;
+ spin_unlock(&inode->lock);
+
+ if (private && private->owner_task != current) {
+ /*
+ * Not allocated by us, don't use it as its cached state is used
+ * by the task that allocated it and we don't want neither to
+ * mess with it nor get incorrect results because it reflects an
+ * invalid state for the current task.
+ */
+ private = NULL;
+ } else if (!private) {
+ private = kzalloc(sizeof(*private), GFP_KERNEL);
+ /*
+ * No worries if memory allocation failed.
+ * The private structure is used only for speeding up multiple
+ * lseek SEEK_HOLE/DATA calls to a file when there's delalloc,
+ * so everything will still be correct.
+ */
+ if (private) {
+ bool free = false;
+
+ private->owner_task = current;
+
+ spin_lock(&inode->lock);
+ if (file->private_data)
+ free = true;
+ else
+ file->private_data = private;
+ spin_unlock(&inode->lock);
+
+ if (free) {
+ kfree(private);
+ private = NULL;
+ }
+ }
+ }
+
+ if (private)
+ delalloc_cached_state = &private->llseek_cached_state;
+ else
+ delalloc_cached_state = NULL;
+
+ /*
* offset can be negative, in this case we start finding DATA/HOLE from
* the very start of the file.
*/
@@ -3620,45 +3614,174 @@ static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
if (lockend <= lockstart)
lockend = lockstart + fs_info->sectorsize;
lockend--;
- len = lockend - lockstart + 1;
- lock_extent_bits(&inode->io_tree, lockstart, lockend, &cached_state);
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ path->reada = READA_FORWARD;
+
+ key.objectid = ino;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = start;
+
+ last_extent_end = lockstart;
+
+ btrfs_lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0) {
+ goto out;
+ } else if (ret > 0 && path->slots[0] > 0) {
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
+ if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
+ path->slots[0]--;
+ }
while (start < i_size) {
- em = btrfs_get_extent_fiemap(inode, start, len);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- em = NULL;
- break;
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_file_extent_item *extent;
+ u64 extent_end;
+ u8 type;
+
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto out;
+ else if (ret > 0)
+ break;
+
+ leaf = path->nodes[0];
}
- if (whence == SEEK_HOLE &&
- (em->block_start == EXTENT_MAP_HOLE ||
- test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
- break;
- else if (whence == SEEK_DATA &&
- (em->block_start != EXTENT_MAP_HOLE &&
- !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
break;
- start = em->start + em->len;
- free_extent_map(em);
- em = NULL;
+ extent_end = btrfs_file_extent_end(path);
+
+ /*
+ * In the first iteration we may have a slot that points to an
+ * extent that ends before our start offset, so skip it.
+ */
+ if (extent_end <= start) {
+ path->slots[0]++;
+ continue;
+ }
+
+ /* We have an implicit hole, NO_HOLES feature is likely set. */
+ if (last_extent_end < key.offset) {
+ u64 search_start = last_extent_end;
+ u64 found_start;
+
+ /*
+ * First iteration, @start matches @offset and it's
+ * within the hole.
+ */
+ if (start == offset)
+ search_start = offset;
+
+ found = find_desired_extent_in_hole(inode, whence,
+ delalloc_cached_state,
+ search_start,
+ key.offset - 1,
+ &found_start);
+ if (found) {
+ start = found_start;
+ break;
+ }
+ /*
+ * Didn't find data or a hole (due to delalloc) in the
+ * implicit hole range, so need to analyze the extent.
+ */
+ }
+
+ extent = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ type = btrfs_file_extent_type(leaf, extent);
+
+ /*
+ * Can't access the extent's disk_bytenr field if this is an
+ * inline extent, since at that offset, it's where the extent
+ * data starts.
+ */
+ if (type == BTRFS_FILE_EXTENT_PREALLOC ||
+ (type == BTRFS_FILE_EXTENT_REG &&
+ btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
+ /*
+ * Explicit hole or prealloc extent, search for delalloc.
+ * A prealloc extent is treated like a hole.
+ */
+ u64 search_start = key.offset;
+ u64 found_start;
+
+ /*
+ * First iteration, @start matches @offset and it's
+ * within the hole.
+ */
+ if (start == offset)
+ search_start = offset;
+
+ found = find_desired_extent_in_hole(inode, whence,
+ delalloc_cached_state,
+ search_start,
+ extent_end - 1,
+ &found_start);
+ if (found) {
+ start = found_start;
+ break;
+ }
+ /*
+ * Didn't find data or a hole (due to delalloc) in the
+ * implicit hole range, so need to analyze the next
+ * extent item.
+ */
+ } else {
+ /*
+ * Found a regular or inline extent.
+ * If we are seeking for data, adjust the start offset
+ * and stop, we're done.
+ */
+ if (whence == SEEK_DATA) {
+ start = max_t(u64, key.offset, offset);
+ found = true;
+ break;
+ }
+ /*
+ * Else, we are seeking for a hole, check the next file
+ * extent item.
+ */
+ }
+
+ start = extent_end;
+ last_extent_end = extent_end;
+ path->slots[0]++;
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
cond_resched();
}
- free_extent_map(em);
- unlock_extent_cached(&inode->io_tree, lockstart, lockend,
- &cached_state);
- if (ret) {
- offset = ret;
- } else {
- if (whence == SEEK_DATA && start >= i_size)
- offset = -ENXIO;
- else
- offset = min_t(loff_t, start, i_size);
+
+ /* We have an implicit hole from the last extent found up to i_size. */
+ if (!found && start < i_size) {
+ found = find_desired_extent_in_hole(inode, whence,
+ delalloc_cached_state, start,
+ i_size - 1, &start);
+ if (!found)
+ start = i_size;
}
- return offset;
+out:
+ btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ btrfs_free_path(path);
+
+ if (ret < 0)
+ return ret;
+
+ if (whence == SEEK_DATA && start >= i_size)
+ return -ENXIO;
+
+ return min_t(loff_t, start, i_size);
}
static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
@@ -3670,9 +3793,9 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
return generic_file_llseek(file, offset, whence);
case SEEK_DATA:
case SEEK_HOLE:
- btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
- offset = find_desired_extent(BTRFS_I(inode), offset, whence);
- btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
+ offset = find_desired_extent(file, offset, whence);
+ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
break;
}
@@ -3686,7 +3809,10 @@ static int btrfs_file_open(struct inode *inode, struct file *filp)
{
int ret;
- filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(inode))))
+ return -EIO;
+
+ filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
ret = fsverity_file_open(inode, filp);
if (ret)
@@ -3694,97 +3820,13 @@ static int btrfs_file_open(struct inode *inode, struct file *filp)
return generic_file_open(inode, filp);
}
-static int check_direct_read(struct btrfs_fs_info *fs_info,
- const struct iov_iter *iter, loff_t offset)
-{
- int ret;
- int i, seg;
-
- ret = check_direct_IO(fs_info, iter, offset);
- if (ret < 0)
- return ret;
-
- if (!iter_is_iovec(iter))
- return 0;
-
- for (seg = 0; seg < iter->nr_segs; seg++)
- for (i = seg + 1; i < iter->nr_segs; i++)
- if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
- return -EINVAL;
- return 0;
-}
-
-static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
-{
- struct inode *inode = file_inode(iocb->ki_filp);
- size_t prev_left = 0;
- ssize_t read = 0;
- ssize_t ret;
-
- if (fsverity_active(inode))
- return 0;
-
- if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
- return 0;
-
- btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
-again:
- /*
- * This is similar to what we do for direct IO writes, see the comment
- * at btrfs_direct_write(), but we also disable page faults in addition
- * to disabling them only at the iov_iter level. This is because when
- * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
- * which can still trigger page fault ins despite having set ->nofault
- * to true of our 'to' iov_iter.
- *
- * The difference to direct IO writes is that we deadlock when trying
- * to lock the extent range in the inode's tree during he page reads
- * triggered by the fault in (while for writes it is due to waiting for
- * our own ordered extent). This is because for direct IO reads,
- * btrfs_dio_iomap_begin() returns with the extent range locked, which
- * is only unlocked in the endio callback (end_bio_extent_readpage()).
- */
- pagefault_disable();
- to->nofault = true;
- ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
- IOMAP_DIO_PARTIAL, read);
- to->nofault = false;
- pagefault_enable();
-
- /* No increment (+=) because iomap returns a cumulative value. */
- if (ret > 0)
- read = ret;
-
- if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
- const size_t left = iov_iter_count(to);
-
- if (left == prev_left) {
- /*
- * We didn't make any progress since the last attempt,
- * fallback to a buffered read for the remainder of the
- * range. This is just to avoid any possibility of looping
- * for too long.
- */
- ret = read;
- } else {
- /*
- * We made some progress since the last retry or this is
- * the first time we are retrying. Fault in as many pages
- * as possible and retry.
- */
- fault_in_iov_iter_writeable(to, left);
- prev_left = left;
- goto again;
- }
- }
- btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
- return ret < 0 ? ret : read;
-}
-
static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
ssize_t ret = 0;
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(file_inode(iocb->ki_filp)))))
+ return -EIO;
+
if (iocb->ki_flags & IOCB_DIRECT) {
ret = btrfs_direct_read(iocb, to);
if (ret < 0 || !iov_iter_count(to) ||
@@ -3795,15 +3837,26 @@ static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
return filemap_read(iocb, to, ret);
}
+static ssize_t btrfs_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(file_inode(in)))))
+ return -EIO;
+
+ return filemap_splice_read(in, ppos, pipe, len, flags);
+}
+
const struct file_operations btrfs_file_operations = {
.llseek = btrfs_file_llseek,
.read_iter = btrfs_file_read_iter,
- .splice_read = generic_file_splice_read,
+ .splice_read = btrfs_file_splice_read,
.write_iter = btrfs_file_write_iter,
.splice_write = iter_file_splice_write,
- .mmap = btrfs_file_mmap,
+ .mmap_prepare = btrfs_file_mmap_prepare,
.open = btrfs_file_open,
.release = btrfs_release_file,
+ .get_unmapped_area = thp_get_unmapped_area,
.fsync = btrfs_sync_file,
.fallocate = btrfs_fallocate,
.unlocked_ioctl = btrfs_ioctl,
@@ -3811,27 +3864,13 @@ const struct file_operations btrfs_file_operations = {
.compat_ioctl = btrfs_compat_ioctl,
#endif
.remap_file_range = btrfs_remap_file_range,
+ .uring_cmd = btrfs_uring_cmd,
+ .fop_flags = FOP_BUFFER_RASYNC | FOP_BUFFER_WASYNC,
};
-void __cold btrfs_auto_defrag_exit(void)
-{
- kmem_cache_destroy(btrfs_inode_defrag_cachep);
-}
-
-int __init btrfs_auto_defrag_init(void)
-{
- btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
- sizeof(struct inode_defrag), 0,
- SLAB_MEM_SPREAD,
- NULL);
- if (!btrfs_inode_defrag_cachep)
- return -ENOMEM;
-
- return 0;
-}
-
-int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
+int btrfs_fdatawrite_range(struct btrfs_inode *inode, loff_t start, loff_t end)
{
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
int ret;
/*
@@ -3848,10 +3887,9 @@ int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
* know better and pull this out at some point in the future, it is
* right and you are wrong.
*/
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
- if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags))
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+ ret = filemap_fdatawrite_range(mapping, start, end);
+ if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags))
+ ret = filemap_fdatawrite_range(mapping, start, end);
return ret;
}
diff --git a/fs/btrfs/file.h b/fs/btrfs/file.h
new file mode 100644
index 000000000000..d7df81388cbe
--- /dev/null
+++ b/fs/btrfs/file.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_FILE_H
+#define BTRFS_FILE_H
+
+#include <linux/types.h>
+
+struct file;
+struct extent_state;
+struct kiocb;
+struct iov_iter;
+struct inode;
+struct folio;
+struct page;
+struct btrfs_ioctl_encoded_io_args;
+struct btrfs_drop_extents_args;
+struct btrfs_inode;
+struct btrfs_root;
+struct btrfs_path;
+struct btrfs_replace_extent_info;
+struct btrfs_trans_handle;
+
+extern const struct file_operations btrfs_file_operations;
+
+int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
+int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_inode *inode,
+ struct btrfs_drop_extents_args *args);
+int btrfs_replace_file_extents(struct btrfs_inode *inode,
+ struct btrfs_path *path, const u64 start,
+ const u64 end,
+ struct btrfs_replace_extent_info *extent_info,
+ struct btrfs_trans_handle **trans_out);
+int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode, u64 start, u64 end);
+ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
+ const struct btrfs_ioctl_encoded_io_args *encoded);
+int btrfs_release_file(struct inode *inode, struct file *file);
+int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos,
+ size_t write_bytes, struct extent_state **cached, bool noreserve);
+int btrfs_fdatawrite_range(struct btrfs_inode *inode, loff_t start, loff_t end);
+int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
+ size_t *write_bytes, bool nowait);
+void btrfs_check_nocow_unlock(struct btrfs_inode *inode);
+bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
+ struct extent_state **cached_state,
+ u64 *delalloc_start_ret, u64 *delalloc_end_ret);
+int btrfs_write_check(struct kiocb *iocb, size_t count);
+ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i);
+
+#endif
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index f3fee88c8ee0..f0f72850fab2 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -11,23 +11,32 @@
#include <linux/ratelimit.h>
#include <linux/error-injection.h>
#include <linux/sched/mm.h>
+#include <linux/string_choices.h>
+#include "extent-tree.h"
+#include "fs.h"
+#include "messages.h"
#include "misc.h"
-#include "ctree.h"
#include "free-space-cache.h"
#include "transaction.h"
#include "disk-io.h"
#include "extent_io.h"
-#include "volumes.h"
#include "space-info.h"
-#include "delalloc-space.h"
#include "block-group.h"
#include "discard.h"
#include "subpage.h"
+#include "inode-item.h"
+#include "accessors.h"
+#include "file-item.h"
+#include "file.h"
+#include "super.h"
#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
#define MAX_CACHE_BYTES_PER_GIG SZ_64K
#define FORCE_EXTENT_THRESHOLD SZ_1M
+static struct kmem_cache *btrfs_free_space_cachep;
+static struct kmem_cache *btrfs_free_space_bitmap_cachep;
+
struct btrfs_trim_range {
u64 start;
u64 bytes;
@@ -37,7 +46,7 @@ struct btrfs_trim_range {
static int link_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
- struct btrfs_free_space *info);
+ struct btrfs_free_space *info, bool update_stat);
static int search_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *bitmap_info, u64 *offset,
u64 *bytes, bool for_alloc);
@@ -45,25 +54,47 @@ static void free_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *bitmap_info);
static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset,
- u64 bytes);
+ u64 bytes, bool update_stats);
+
+static void btrfs_crc32c_final(u32 crc, u8 *result)
+{
+ put_unaligned_le32(~crc, result);
+}
+
+static void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
+{
+ struct btrfs_free_space *info;
+ struct rb_node *node;
+
+ while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
+ info = rb_entry(node, struct btrfs_free_space, offset_index);
+ if (!info->bitmap) {
+ unlink_free_space(ctl, info, true);
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ } else {
+ free_bitmap(ctl, info);
+ }
+
+ cond_resched_lock(&ctl->tree_lock);
+ }
+}
static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_path *path,
u64 offset)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_key location;
struct btrfs_disk_key disk_key;
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
- struct inode *inode = NULL;
+ struct btrfs_inode *inode;
unsigned nofs_flag;
int ret;
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = offset;
key.type = 0;
+ key.offset = offset;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
@@ -85,17 +116,17 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
* sure NOFS is set to keep us from deadlocking.
*/
nofs_flag = memalloc_nofs_save();
- inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path);
+ inode = btrfs_iget_path(location.objectid, root, path);
btrfs_release_path(path);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(inode))
- return inode;
+ return ERR_CAST(inode);
- mapping_set_gfp_mask(inode->i_mapping,
- mapping_gfp_constraint(inode->i_mapping,
+ mapping_set_gfp_mask(inode->vfs_inode.i_mapping,
+ mapping_gfp_constraint(inode->vfs_inode.i_mapping,
~(__GFP_FS | __GFP_HIGHMEM)));
- return inode;
+ return &inode->vfs_inode;
}
struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
@@ -107,7 +138,7 @@ struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
spin_lock(&block_group->lock);
if (block_group->inode)
- inode = igrab(block_group->inode);
+ inode = igrab(&block_group->inode->vfs_inode);
spin_unlock(&block_group->lock);
if (inode)
return inode;
@@ -125,10 +156,8 @@ struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
block_group->disk_cache_state = BTRFS_DC_CLEAR;
}
- if (!block_group->iref) {
- block_group->inode = igrab(inode);
- block_group->iref = 1;
- }
+ if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags))
+ block_group->inode = BTRFS_I(igrab(inode));
spin_unlock(&block_group->lock);
return inode;
@@ -169,12 +198,11 @@ static int __create_free_space_inode(struct btrfs_root *root,
btrfs_set_inode_nlink(leaf, inode_item, 1);
btrfs_set_inode_transid(leaf, inode_item, trans->transid);
btrfs_set_inode_block_group(leaf, inode_item, offset);
- btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = offset;
key.type = 0;
+ key.offset = offset;
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(struct btrfs_free_space_header));
if (ret < 0) {
@@ -187,7 +215,6 @@ static int __create_free_space_inode(struct btrfs_root *root,
struct btrfs_free_space_header);
memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
btrfs_set_free_space_key(leaf, header, &disk_key);
- btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
return 0;
@@ -217,7 +244,7 @@ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
struct inode *inode,
struct btrfs_block_group *block_group)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
int ret = 0;
@@ -230,18 +257,17 @@ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
if (IS_ERR(inode)) {
if (PTR_ERR(inode) != -ENOENT)
ret = PTR_ERR(inode);
- goto out;
+ return ret;
}
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
if (ret) {
- btrfs_add_delayed_iput(inode);
- goto out;
+ btrfs_add_delayed_iput(BTRFS_I(inode));
+ return ret;
}
clear_nlink(inode);
/* One for the block groups ref */
spin_lock(&block_group->lock);
- if (block_group->iref) {
- block_group->iref = 0;
+ if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) {
block_group->inode = NULL;
spin_unlock(&block_group->lock);
iput(inode);
@@ -249,7 +275,7 @@ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
spin_unlock(&block_group->lock);
}
/* One for the lookup ref */
- btrfs_add_delayed_iput(inode);
+ btrfs_add_delayed_iput(BTRFS_I(inode));
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
key.type = 0;
@@ -259,44 +285,32 @@ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
if (ret) {
if (ret > 0)
ret = 0;
- goto out;
+ return ret;
}
- ret = btrfs_del_item(trans, trans->fs_info->tree_root, path);
-out:
- btrfs_free_path(path);
- return ret;
-}
-
-int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *rsv)
-{
- u64 needed_bytes;
- int ret;
-
- /* 1 for slack space, 1 for updating the inode */
- needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
- btrfs_calc_metadata_size(fs_info, 1);
-
- spin_lock(&rsv->lock);
- if (rsv->reserved < needed_bytes)
- ret = -ENOSPC;
- else
- ret = 0;
- spin_unlock(&rsv->lock);
- return ret;
+ return btrfs_del_item(trans, trans->fs_info->tree_root, path);
}
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
- struct inode *inode)
+ struct inode *vfs_inode)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_truncate_control control = {
+ .inode = BTRFS_I(vfs_inode),
+ .new_size = 0,
+ .ino = btrfs_ino(BTRFS_I(vfs_inode)),
+ .min_type = BTRFS_EXTENT_DATA_KEY,
+ .clear_extent_range = true,
+ };
+ struct btrfs_inode *inode = BTRFS_I(vfs_inode);
+ struct btrfs_root *root = inode->root;
+ struct extent_state *cached_state = NULL;
int ret = 0;
bool locked = false;
if (block_group) {
- struct btrfs_path *path = btrfs_alloc_path();
+ BTRFS_PATH_AUTO_FREE(path);
+ path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto fail;
@@ -317,22 +331,28 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_CLEAR;
spin_unlock(&block_group->lock);
- btrfs_free_path(path);
}
- btrfs_i_size_write(BTRFS_I(inode), 0);
- truncate_pagecache(inode, 0);
+ btrfs_i_size_write(inode, 0);
+ truncate_pagecache(vfs_inode, 0);
+
+ btrfs_lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
+ btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
/*
* We skip the throttling logic for free space cache inodes, so we don't
* need to check for -EAGAIN.
*/
- ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
- 0, BTRFS_EXTENT_DATA_KEY, NULL);
+ ret = btrfs_truncate_inode_items(trans, root, &control);
+
+ inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
+ btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
+
+ btrfs_unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
if (ret)
goto fail;
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, inode);
fail:
if (locked)
@@ -346,7 +366,7 @@ fail:
static void readahead_cache(struct inode *inode)
{
struct file_ra_state ra;
- unsigned long last_index;
+ pgoff_t last_index;
file_ra_state_init(&ra, inode->i_mapping);
last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
@@ -372,7 +392,7 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
return -ENOMEM;
io_ctl->num_pages = num_pages;
- io_ctl->fs_info = btrfs_sb(inode->i_sb);
+ io_ctl->fs_info = inode_to_fs_info(inode);
io_ctl->inode = inode;
return 0;
@@ -412,8 +432,8 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
for (i = 0; i < io_ctl->num_pages; i++) {
if (io_ctl->pages[i]) {
- btrfs_page_clear_checked(io_ctl->fs_info,
- io_ctl->pages[i],
+ btrfs_folio_clear_checked(io_ctl->fs_info,
+ page_folio(io_ctl->pages[i]),
page_offset(io_ctl->pages[i]),
PAGE_SIZE);
unlock_page(io_ctl->pages[i]);
@@ -424,7 +444,7 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
{
- struct page *page;
+ struct folio *folio;
struct inode *inode = io_ctl->inode;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
int i;
@@ -432,31 +452,33 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
for (i = 0; i < io_ctl->num_pages; i++) {
int ret;
- page = find_or_create_page(inode->i_mapping, i, mask);
- if (!page) {
+ folio = __filemap_get_folio(inode->i_mapping, i,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mask);
+ if (IS_ERR(folio)) {
io_ctl_drop_pages(io_ctl);
- return -ENOMEM;
+ return PTR_ERR(folio);
}
- ret = set_page_extent_mapped(page);
+ ret = set_folio_extent_mapped(folio);
if (ret < 0) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
io_ctl_drop_pages(io_ctl);
return ret;
}
- io_ctl->pages[i] = page;
- if (uptodate && !PageUptodate(page)) {
- btrfs_readpage(NULL, page);
- lock_page(page);
- if (page->mapping != inode->i_mapping) {
+ io_ctl->pages[i] = &folio->page;
+ if (uptodate && !folio_test_uptodate(folio)) {
+ btrfs_read_folio(NULL, folio);
+ folio_lock(folio);
+ if (folio->mapping != inode->i_mapping) {
btrfs_err(BTRFS_I(inode)->root->fs_info,
"free space cache page truncated");
io_ctl_drop_pages(io_ctl);
return -EIO;
}
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
btrfs_err(BTRFS_I(inode)->root->fs_info,
"error reading free space cache");
io_ctl_drop_pages(io_ctl);
@@ -518,7 +540,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
if (index == 0)
offset = sizeof(u32) * io_ctl->num_pages;
- crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
+ crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
btrfs_crc32c_final(crc, (u8 *)&crc);
io_ctl_unmap_page(io_ctl);
tmp = page_address(io_ctl->pages[0]);
@@ -540,7 +562,7 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
val = *tmp;
io_ctl_map_page(io_ctl, 0);
- crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
+ crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
btrfs_crc32c_final(crc, (u8 *)&crc);
if (val != crc) {
btrfs_err_rl(io_ctl->fs_info,
@@ -666,7 +688,7 @@ static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
{
- struct btrfs_block_group *block_group = ctl->private;
+ struct btrfs_block_group *block_group = ctl->block_group;
u64 max_bytes;
u64 bitmap_bytes;
u64 extent_bytes;
@@ -676,6 +698,12 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
max_bitmaps = max_t(u64, max_bitmaps, 1);
+ if (ctl->total_bitmaps > max_bitmaps)
+ btrfs_err(block_group->fs_info,
+"invalid free space control: bg start=%llu len=%llu total_bitmaps=%u unit=%u max_bitmaps=%llu bytes_per_bg=%llu",
+ block_group->start, block_group->length,
+ ctl->total_bitmaps, ctl->unit, max_bitmaps,
+ bytes_per_bg);
ASSERT(ctl->total_bitmaps <= max_bitmaps);
/*
@@ -724,8 +752,8 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
return 0;
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = offset;
key.type = 0;
+ key.offset = offset;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
@@ -823,15 +851,17 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
}
spin_lock(&ctl->tree_lock);
ret = link_free_space(ctl, e);
- ctl->total_bitmaps++;
- recalculate_thresholds(ctl);
- spin_unlock(&ctl->tree_lock);
if (ret) {
+ spin_unlock(&ctl->tree_lock);
btrfs_err(fs_info,
"Duplicate entries in free space cache, dumping");
+ kmem_cache_free(btrfs_free_space_bitmap_cachep, e->bitmap);
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
}
+ ctl->total_bitmaps++;
+ recalculate_thresholds(ctl);
+ spin_unlock(&ctl->tree_lock);
list_add_tail(&e->list, &bitmaps);
}
@@ -858,7 +888,10 @@ out:
return ret;
free_cache:
io_ctl_drop_pages(&io_ctl);
+
+ spin_lock(&ctl->tree_lock);
__btrfs_remove_free_space_cache(ctl);
+ spin_unlock(&ctl->tree_lock);
goto out;
}
@@ -872,31 +905,37 @@ static int copy_free_space_cache(struct btrfs_block_group *block_group,
while (!ret && (n = rb_first(&ctl->free_space_offset)) != NULL) {
info = rb_entry(n, struct btrfs_free_space, offset_index);
if (!info->bitmap) {
- unlink_free_space(ctl, info);
- ret = btrfs_add_free_space(block_group, info->offset,
- info->bytes);
+ const u64 offset = info->offset;
+ const u64 bytes = info->bytes;
+
+ unlink_free_space(ctl, info, true);
+ spin_unlock(&ctl->tree_lock);
kmem_cache_free(btrfs_free_space_cachep, info);
+ ret = btrfs_add_free_space(block_group, offset, bytes);
+ spin_lock(&ctl->tree_lock);
} else {
u64 offset = info->offset;
u64 bytes = ctl->unit;
- while (search_bitmap(ctl, info, &offset, &bytes,
- false) == 0) {
+ ret = search_bitmap(ctl, info, &offset, &bytes, false);
+ if (ret == 0) {
+ bitmap_clear_bits(ctl, info, offset, bytes, true);
+ spin_unlock(&ctl->tree_lock);
ret = btrfs_add_free_space(block_group, offset,
bytes);
- if (ret)
- break;
- bitmap_clear_bits(ctl, info, offset, bytes);
- offset = info->offset;
- bytes = ctl->unit;
+ spin_lock(&ctl->tree_lock);
+ } else {
+ free_bitmap(ctl, info);
+ ret = 0;
}
- free_bitmap(ctl, info);
}
- cond_resched();
+ cond_resched_lock(&ctl->tree_lock);
}
return ret;
}
+static struct lock_class_key btrfs_free_space_inode_key;
+
int load_free_space_cache(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
@@ -929,8 +968,8 @@ int load_free_space_cache(struct btrfs_block_group *block_group)
path = btrfs_alloc_path();
if (!path)
return 0;
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
/*
* We must pass a path with search_commit_root set to btrfs_iget in
@@ -966,6 +1005,14 @@ int load_free_space_cache(struct btrfs_block_group *block_group)
}
spin_unlock(&block_group->lock);
+ /*
+ * Reinitialize the class of struct inode's mapping->invalidate_lock for
+ * free space inodes to prevent false positives related to locks for normal
+ * inodes.
+ */
+ lockdep_set_class(&(&inode->i_data)->invalidate_lock,
+ &btrfs_free_space_inode_key);
+
ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl,
path, block_group->start);
btrfs_free_path(path);
@@ -976,7 +1023,9 @@ int load_free_space_cache(struct btrfs_block_group *block_group)
block_group->bytes_super));
if (matched) {
+ spin_lock(&tmp_ctl.tree_lock);
ret = copy_free_space_cache(block_group, &tmp_ctl);
+ spin_unlock(&tmp_ctl.tree_lock);
/*
* ret == 1 means we successfully loaded the free space cache,
* so we need to re-set it here.
@@ -984,7 +1033,13 @@ int load_free_space_cache(struct btrfs_block_group *block_group)
if (ret == 0)
ret = 1;
} else {
+ /*
+ * We need to call the _locked variant so we don't try to update
+ * the discard counters.
+ */
+ spin_lock(&tmp_ctl.tree_lock);
__btrfs_remove_free_space_cache(&tmp_ctl);
+ spin_unlock(&tmp_ctl.tree_lock);
btrfs_warn(fs_info,
"block group %llu has wrong amount of free space",
block_group->start);
@@ -1025,9 +1080,8 @@ int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
/* Get the cluster for this block_group if it exists */
if (block_group && !list_empty(&block_group->cluster_list)) {
- cluster = list_entry(block_group->cluster_list.next,
- struct btrfs_free_cluster,
- block_group_list);
+ cluster = list_first_entry(&block_group->cluster_list,
+ struct btrfs_free_cluster, block_group_list);
}
if (!node && cluster) {
@@ -1100,13 +1154,13 @@ update_cache_item(struct btrfs_trans_handle *trans,
int ret;
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = offset;
key.type = 0;
+ key.offset = offset;
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
- EXTENT_DELALLOC, 0, 0, NULL);
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
+ EXTENT_DELALLOC, NULL);
goto fail;
}
leaf = path->nodes[0];
@@ -1117,9 +1171,9 @@ update_cache_item(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
found_key.offset != offset) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
- inode->i_size - 1, EXTENT_DELALLOC, 0,
- 0, NULL);
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
+ inode->i_size - 1, EXTENT_DELALLOC,
+ NULL);
btrfs_release_path(path);
goto fail;
}
@@ -1131,7 +1185,6 @@ update_cache_item(struct btrfs_trans_handle *trans,
btrfs_set_free_space_entries(leaf, header, entries);
btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
btrfs_set_free_space_generation(leaf, header, trans->transid);
- btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
return 0;
@@ -1165,10 +1218,9 @@ static noinline_for_stack int write_pinned_extent_entries(
start = block_group->start;
while (start < block_group->start + block_group->length) {
- ret = find_first_extent_bit(unpin, start,
- &extent_start, &extent_end,
- EXTENT_DIRTY, NULL);
- if (ret)
+ if (!btrfs_find_first_extent_bit(unpin, start,
+ &extent_start, &extent_end,
+ EXTENT_DIRTY, NULL))
return 0;
/* This pinned extent is out of our range */
@@ -1212,10 +1264,10 @@ static int flush_dirty_cache(struct inode *inode)
{
int ret;
- ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
if (ret)
- clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
- EXTENT_DELALLOC, 0, 0, NULL);
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
+ EXTENT_DELALLOC, NULL);
return ret;
}
@@ -1235,8 +1287,8 @@ cleanup_write_cache_enospc(struct inode *inode,
struct extent_state **cached_state)
{
io_ctl_drop_pages(io_ctl);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
- i_size_read(inode) - 1, cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ cached_state);
}
static int __btrfs_wait_cache_io(struct btrfs_root *root,
@@ -1268,7 +1320,7 @@ out:
"failed to write free space cache for block group %llu error %d",
block_group->start, ret);
}
- btrfs_update_inode(trans, root, BTRFS_I(inode));
+ btrfs_update_inode(trans, BTRFS_I(inode));
if (block_group) {
/* the dirty list is protected by the dirty_bgs_lock */
@@ -1306,10 +1358,9 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
path, block_group->start);
}
-/**
- * Write out cached info to an inode
+/*
+ * Write out cached info to an inode.
*
- * @root: root the inode belongs to
* @inode: freespace inode we are writing out
* @ctl: free space cache we are going to write out
* @block_group: block_group for this cache if it belongs to a block_group
@@ -1320,7 +1371,7 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
* on mount. This will return 0 if it was successful in writing the cache out,
* or an errno if it was not.
*/
-static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+static int __btrfs_write_out_cache(struct inode *inode,
struct btrfs_free_space_ctl *ctl,
struct btrfs_block_group *block_group,
struct btrfs_io_ctl *io_ctl,
@@ -1332,6 +1383,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
int bitmaps = 0;
int ret;
int must_iput = 0;
+ int i_size;
if (!i_size_read(inode))
return -EIO;
@@ -1361,8 +1413,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
if (ret)
goto out_unlock;
- lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
- &cached_state);
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ &cached_state);
io_ctl_set_generation(io_ctl, trans->transid);
@@ -1402,11 +1454,16 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
io_ctl_zero_remaining_pages(io_ctl);
/* Everything is written out, now we dirty the pages in the file. */
- ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages,
- io_ctl->num_pages, 0, i_size_read(inode),
- &cached_state, false);
- if (ret)
- goto out_nospc;
+ i_size = i_size_read(inode);
+ for (int i = 0; i < round_up(i_size, PAGE_SIZE) / PAGE_SIZE; i++) {
+ u64 dirty_start = i * PAGE_SIZE;
+ u64 dirty_len = min_t(u64, dirty_start + PAGE_SIZE, i_size) - dirty_start;
+
+ ret = btrfs_dirty_folio(BTRFS_I(inode), page_folio(io_ctl->pages[i]),
+ dirty_start, dirty_len, &cached_state, false);
+ if (ret < 0)
+ goto out_nospc;
+ }
if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
up_write(&block_group->data_rwsem);
@@ -1417,8 +1474,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
io_ctl_drop_pages(io_ctl);
io_ctl_free(io_ctl);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
- i_size_read(inode) - 1, &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ &cached_state);
/*
* at this point the pages are under IO and we're happy,
@@ -1428,7 +1485,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
io_ctl->entries = entries;
io_ctl->bitmaps = bitmaps;
- ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
+ ret = btrfs_fdatawrite_range(BTRFS_I(inode), 0, (u64)-1);
if (ret)
goto out;
@@ -1453,7 +1510,7 @@ out:
invalidate_inode_pages2(inode->i_mapping);
BTRFS_I(inode)->generation = 0;
}
- btrfs_update_inode(trans, root, BTRFS_I(inode));
+ btrfs_update_inode(trans, BTRFS_I(inode));
if (must_iput)
iput(inode);
return ret;
@@ -1479,8 +1536,8 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
if (IS_ERR(inode))
return 0;
- ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl,
- block_group, &block_group->io_ctl, trans);
+ ret = __btrfs_write_out_cache(inode, ctl, block_group,
+ &block_group->io_ctl, trans);
if (ret) {
btrfs_debug(fs_info,
"failed to write free space cache for block group %llu error %d",
@@ -1529,20 +1586,34 @@ static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
return bitmap_start;
}
-static int tree_insert_offset(struct rb_root *root, u64 offset,
- struct rb_node *node, int bitmap)
+static int tree_insert_offset(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_cluster *cluster,
+ struct btrfs_free_space *new_entry)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_root *root;
+ struct rb_node **p;
struct rb_node *parent = NULL;
- struct btrfs_free_space *info;
+
+ lockdep_assert_held(&ctl->tree_lock);
+
+ if (cluster) {
+ lockdep_assert_held(&cluster->lock);
+ root = &cluster->root;
+ } else {
+ root = &ctl->free_space_offset;
+ }
+
+ p = &root->rb_node;
while (*p) {
+ struct btrfs_free_space *info;
+
parent = *p;
info = rb_entry(parent, struct btrfs_free_space, offset_index);
- if (offset < info->offset) {
+ if (new_entry->offset < info->offset) {
p = &(*p)->rb_left;
- } else if (offset > info->offset) {
+ } else if (new_entry->offset > info->offset) {
p = &(*p)->rb_right;
} else {
/*
@@ -1558,7 +1629,7 @@ static int tree_insert_offset(struct rb_root *root, u64 offset,
* found a bitmap, we want to go left, or before
* logically.
*/
- if (bitmap) {
+ if (new_entry->bitmap) {
if (info->bitmap) {
WARN_ON_ONCE(1);
return -EEXIST;
@@ -1574,13 +1645,57 @@ static int tree_insert_offset(struct rb_root *root, u64 offset,
}
}
- rb_link_node(node, parent, p);
- rb_insert_color(node, root);
+ rb_link_node(&new_entry->offset_index, parent, p);
+ rb_insert_color(&new_entry->offset_index, root);
return 0;
}
/*
+ * This is a little subtle. We *only* have ->max_extent_size set if we actually
+ * searched through the bitmap and figured out the largest ->max_extent_size,
+ * otherwise it's 0. In the case that it's 0 we don't want to tell the
+ * allocator the wrong thing, we want to use the actual real max_extent_size
+ * we've found already if it's larger, or we want to use ->bytes.
+ *
+ * This matters because find_free_space() will skip entries who's ->bytes is
+ * less than the required bytes. So if we didn't search down this bitmap, we
+ * may pick some previous entry that has a smaller ->max_extent_size than we
+ * have. For example, assume we have two entries, one that has
+ * ->max_extent_size set to 4K and ->bytes set to 1M. A second entry hasn't set
+ * ->max_extent_size yet, has ->bytes set to 8K and it's contiguous. We will
+ * call into find_free_space(), and return with max_extent_size == 4K, because
+ * that first bitmap entry had ->max_extent_size set, but the second one did
+ * not. If instead we returned 8K we'd come in searching for 8K, and find the
+ * 8K contiguous range.
+ *
+ * Consider the other case, we have 2 8K chunks in that second entry and still
+ * don't have ->max_extent_size set. We'll return 16K, and the next time the
+ * allocator comes in it'll fully search our second bitmap, and this time it'll
+ * get an uptodate value of 8K as the maximum chunk size. Then we'll get the
+ * right allocation the next loop through.
+ */
+static inline u64 get_max_extent_size(const struct btrfs_free_space *entry)
+{
+ if (entry->bitmap && entry->max_extent_size)
+ return entry->max_extent_size;
+ return entry->bytes;
+}
+
+/*
+ * We want the largest entry to be leftmost, so this is inverted from what you'd
+ * normally expect.
+ */
+static bool entry_less(struct rb_node *node, const struct rb_node *parent)
+{
+ const struct btrfs_free_space *entry, *exist;
+
+ entry = rb_entry(node, struct btrfs_free_space, bytes_index);
+ exist = rb_entry(parent, struct btrfs_free_space, bytes_index);
+ return get_max_extent_size(exist) < get_max_extent_size(entry);
+}
+
+/*
* searches the tree for the given offset.
*
* fuzzy - If this is set, then we are trying to make an allocation, and we just
@@ -1592,15 +1707,12 @@ tree_search_offset(struct btrfs_free_space_ctl *ctl,
u64 offset, int bitmap_only, int fuzzy)
{
struct rb_node *n = ctl->free_space_offset.rb_node;
- struct btrfs_free_space *entry, *prev = NULL;
+ struct btrfs_free_space *entry = NULL, *prev = NULL;
- /* find entry that is closest to the 'offset' */
- while (1) {
- if (!n) {
- entry = NULL;
- break;
- }
+ lockdep_assert_held(&ctl->tree_lock);
+ /* find entry that is closest to the 'offset' */
+ while (n) {
entry = rb_entry(n, struct btrfs_free_space, offset_index);
prev = entry;
@@ -1610,6 +1722,8 @@ tree_search_offset(struct btrfs_free_space_ctl *ctl,
n = n->rb_right;
else
break;
+
+ entry = NULL;
}
if (bitmap_only) {
@@ -1686,6 +1800,10 @@ tree_search_offset(struct btrfs_free_space_ctl *ctl,
return NULL;
while (1) {
+ n = rb_next(&entry->offset_index);
+ if (!n)
+ return NULL;
+ entry = rb_entry(n, struct btrfs_free_space, offset_index);
if (entry->bitmap) {
if (entry->offset + BITS_PER_BITMAP *
ctl->unit > offset)
@@ -1694,33 +1812,27 @@ tree_search_offset(struct btrfs_free_space_ctl *ctl,
if (entry->offset + entry->bytes > offset)
break;
}
-
- n = rb_next(&entry->offset_index);
- if (!n)
- return NULL;
- entry = rb_entry(n, struct btrfs_free_space, offset_index);
}
return entry;
}
-static inline void
-__unlink_free_space(struct btrfs_free_space_ctl *ctl,
- struct btrfs_free_space *info)
+static inline void unlink_free_space(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info,
+ bool update_stat)
{
+ lockdep_assert_held(&ctl->tree_lock);
+
rb_erase(&info->offset_index, &ctl->free_space_offset);
+ rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes);
ctl->free_extents--;
if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
ctl->discardable_extents[BTRFS_STAT_CURR]--;
ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes;
}
-}
-static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
- struct btrfs_free_space *info)
-{
- __unlink_free_space(ctl, info);
- ctl->free_space -= info->bytes;
+ if (update_stat)
+ ctl->free_space -= info->bytes;
}
static int link_free_space(struct btrfs_free_space_ctl *ctl,
@@ -1728,12 +1840,15 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl,
{
int ret = 0;
+ lockdep_assert_held(&ctl->tree_lock);
+
ASSERT(info->bytes || info->bitmap);
- ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
- &info->offset_index, (info->bitmap != NULL));
+ ret = tree_insert_offset(ctl, NULL, info);
if (ret)
return ret;
+ rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less);
+
if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
ctl->discardable_extents[BTRFS_STAT_CURR]++;
ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
@@ -1744,9 +1859,27 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl,
return ret;
}
-static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
- struct btrfs_free_space *info,
- u64 offset, u64 bytes)
+static void relink_bitmap_entry(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
+{
+ ASSERT(info->bitmap);
+
+ /*
+ * If our entry is empty it's because we're on a cluster and we don't
+ * want to re-link it into our ctl bytes index.
+ */
+ if (RB_EMPTY_NODE(&info->bytes_index))
+ return;
+
+ lockdep_assert_held(&ctl->tree_lock);
+
+ rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes);
+ rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less);
+}
+
+static inline void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info,
+ u64 offset, u64 bytes, bool update_stat)
{
unsigned long start, count, end;
int extent_delta = -1;
@@ -1762,6 +1895,8 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
if (info->max_extent_size > ctl->unit)
info->max_extent_size = 0;
+ relink_bitmap_entry(ctl, info);
+
if (start && test_bit(start - 1, info->bitmap))
extent_delta++;
@@ -1773,19 +1908,14 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
}
-}
-static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
- struct btrfs_free_space *info, u64 offset,
- u64 bytes)
-{
- __bitmap_clear_bits(ctl, info, offset, bytes);
- ctl->free_space -= bytes;
+ if (update_stat)
+ ctl->free_space -= bytes;
}
-static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
- struct btrfs_free_space *info, u64 offset,
- u64 bytes)
+static void btrfs_bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info, u64 offset,
+ u64 bytes)
{
unsigned long start, count, end;
int extent_delta = 1;
@@ -1797,9 +1927,16 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
bitmap_set(info->bitmap, start, count);
+ /*
+ * We set some bytes, we have no idea what the max extent size is
+ * anymore.
+ */
+ info->max_extent_size = 0;
info->bytes += bytes;
ctl->free_space += bytes;
+ relink_bitmap_entry(ctl, info);
+
if (start && test_bit(start - 1, info->bitmap))
extent_delta--;
@@ -1867,20 +2004,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
*bytes = (u64)(max_bits) * ctl->unit;
bitmap_info->max_extent_size = *bytes;
+ relink_bitmap_entry(ctl, bitmap_info);
return -1;
}
-static inline u64 get_max_extent_size(struct btrfs_free_space *entry)
-{
- if (entry->bitmap)
- return entry->max_extent_size;
- return entry->bytes;
-}
-
/* Cache the size of the max extent in bytes */
static struct btrfs_free_space *
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
- unsigned long align, u64 *max_extent_size)
+ unsigned long align, u64 *max_extent_size, bool use_bytes_index)
{
struct btrfs_free_space *entry;
struct rb_node *node;
@@ -1890,16 +2021,38 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
if (!ctl->free_space_offset.rb_node)
goto out;
+again:
+ if (use_bytes_index) {
+ node = rb_first_cached(&ctl->free_space_bytes);
+ } else {
+ entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset),
+ 0, 1);
+ if (!entry)
+ goto out;
+ node = &entry->offset_index;
+ }
- entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
- if (!entry)
- goto out;
+ for (; node; node = rb_next(node)) {
+ if (use_bytes_index)
+ entry = rb_entry(node, struct btrfs_free_space,
+ bytes_index);
+ else
+ entry = rb_entry(node, struct btrfs_free_space,
+ offset_index);
- for (node = &entry->offset_index; node; node = rb_next(node)) {
- entry = rb_entry(node, struct btrfs_free_space, offset_index);
+ /*
+ * If we are using the bytes index then all subsequent entries
+ * in this tree are going to be < bytes, so simply set the max
+ * extent size and exit the loop.
+ *
+ * If we're using the offset index then we need to keep going
+ * through the rest of the tree.
+ */
if (entry->bytes < *bytes) {
*max_extent_size = max(get_max_extent_size(entry),
*max_extent_size);
+ if (use_bytes_index)
+ break;
continue;
}
@@ -1916,6 +2069,13 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
tmp = entry->offset;
}
+ /*
+ * We don't break here if we're using the bytes index because we
+ * may have another entry that has the correct alignment that is
+ * the right size, so we don't want to miss that possibility.
+ * At worst this adds another loop through the logic, but if we
+ * broke here we could prematurely ENOSPC.
+ */
if (entry->bytes < *bytes + align_off) {
*max_extent_size = max(get_max_extent_size(entry),
*max_extent_size);
@@ -1923,6 +2083,7 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
}
if (entry->bitmap) {
+ struct rb_node *old_next = rb_next(node);
u64 size = *bytes;
ret = search_bitmap(ctl, entry, &tmp, &size, true);
@@ -1935,6 +2096,15 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
max(get_max_extent_size(entry),
*max_extent_size);
}
+
+ /*
+ * The bitmap may have gotten re-arranged in the space
+ * index here because the max_extent_size may have been
+ * updated. Start from the beginning again if this
+ * happened.
+ */
+ if (use_bytes_index && old_next != rb_next(node))
+ goto again;
continue;
}
@@ -1973,7 +2143,7 @@ static void free_bitmap(struct btrfs_free_space_ctl *ctl,
ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes;
}
- unlink_free_space(ctl, bitmap_info);
+ unlink_free_space(ctl, bitmap_info, true);
kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
ctl->total_bitmaps--;
@@ -2011,7 +2181,7 @@ again:
/* Cannot clear past the end of the bitmap */
search_bytes = min(search_bytes, end - search_start + 1);
- bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
+ bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes, true);
*offset += search_bytes;
*bytes -= search_bytes;
@@ -2081,13 +2251,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
bytes_to_set = min(end - offset, bytes);
- bitmap_set_bits(ctl, info, offset, bytes_to_set);
-
- /*
- * We set some bytes, we have no idea what the max extent size is
- * anymore.
- */
- info->max_extent_size = 0;
+ btrfs_bitmap_set_bits(ctl, info, offset, bytes_to_set);
return bytes_to_set;
@@ -2096,7 +2260,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
- struct btrfs_block_group *block_group = ctl->private;
+ struct btrfs_block_group *block_group = ctl->block_group;
struct btrfs_fs_info *fs_info = block_group->fs_info;
bool forced = false;
@@ -2118,7 +2282,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
* If this block group has some small extents we don't want to
* use up all of our free slots in the cache with them, we want
* to reserve them to larger extents, however if we have plenty
- * of cache left then go ahead an dadd them, no sense in adding
+ * of cache left then go ahead and add them, no sense in adding
* the overhead of a bitmap if we don't have to.
*/
if (info->bytes <= fs_info->sectorsize * 8) {
@@ -2165,7 +2329,7 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
return 0;
if (ctl->op == &free_space_op)
- block_group = ctl->private;
+ block_group = ctl->block_group;
again:
/*
* Since we link bitmaps right into the cluster we need to see if we
@@ -2177,9 +2341,8 @@ again:
struct rb_node *node;
struct btrfs_free_space *entry;
- cluster = list_entry(block_group->cluster_list.next,
- struct btrfs_free_cluster,
- block_group_list);
+ cluster = list_first_entry(&block_group->cluster_list,
+ struct btrfs_free_cluster, block_group_list);
spin_lock(&cluster->lock);
node = rb_first(&cluster->root);
if (!node) {
@@ -2294,6 +2457,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
u64 offset = info->offset;
u64 bytes = info->bytes;
const bool is_trimmed = btrfs_free_space_trimmed(info);
+ struct rb_node *right_prev = NULL;
/*
* first we want to see if there is free space adjacent to the range we
@@ -2301,19 +2465,18 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
* cover the entire range
*/
right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
- if (right_info && rb_prev(&right_info->offset_index))
- left_info = rb_entry(rb_prev(&right_info->offset_index),
- struct btrfs_free_space, offset_index);
+ if (right_info)
+ right_prev = rb_prev(&right_info->offset_index);
+
+ if (right_prev)
+ left_info = rb_entry(right_prev, struct btrfs_free_space, offset_index);
else if (!right_info)
left_info = tree_search_offset(ctl, offset - 1, 0, 0);
/* See try_merge_free_space() comment. */
if (right_info && !right_info->bitmap &&
(!is_trimmed || btrfs_free_space_trimmed(right_info))) {
- if (update_stat)
- unlink_free_space(ctl, right_info);
- else
- __unlink_free_space(ctl, right_info);
+ unlink_free_space(ctl, right_info, update_stat);
info->bytes += right_info->bytes;
kmem_cache_free(btrfs_free_space_cachep, right_info);
merged = true;
@@ -2323,10 +2486,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
if (left_info && !left_info->bitmap &&
left_info->offset + left_info->bytes == offset &&
(!is_trimmed || btrfs_free_space_trimmed(left_info))) {
- if (update_stat)
- unlink_free_space(ctl, left_info);
- else
- __unlink_free_space(ctl, left_info);
+ unlink_free_space(ctl, left_info, update_stat);
info->offset = left_info->offset;
info->bytes += left_info->bytes;
kmem_cache_free(btrfs_free_space_cachep, left_info);
@@ -2362,10 +2522,7 @@ static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
if (!btrfs_free_space_trimmed(bitmap))
info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
- if (update_stat)
- bitmap_clear_bits(ctl, bitmap, end, bytes);
- else
- __bitmap_clear_bits(ctl, bitmap, end, bytes);
+ bitmap_clear_bits(ctl, bitmap, end, bytes, update_stat);
if (!bitmap->bytes)
free_bitmap(ctl, bitmap);
@@ -2419,10 +2576,7 @@ static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
if (!btrfs_free_space_trimmed(bitmap))
info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
- if (update_stat)
- bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
- else
- __bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
+ bitmap_clear_bits(ctl, bitmap, info->offset, bytes, update_stat);
if (!bitmap->bytes)
free_bitmap(ctl, bitmap);
@@ -2466,12 +2620,12 @@ static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
}
}
-int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
- struct btrfs_free_space_ctl *ctl,
+static int __btrfs_add_free_space(struct btrfs_block_group *block_group,
u64 offset, u64 bytes,
enum btrfs_trim_state trim_state)
{
- struct btrfs_block_group *block_group = ctl->private;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *info;
int ret = 0;
u64 filter_bytes = bytes;
@@ -2486,6 +2640,7 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
info->bytes = bytes;
info->trim_state = trim_state;
RB_CLEAR_NODE(&info->offset_index);
+ RB_CLEAR_NODE(&info->bytes_index);
spin_lock(&ctl->tree_lock);
@@ -2538,17 +2693,21 @@ out:
static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
u64 bytenr, u64 size, bool used)
{
- struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct btrfs_space_info *sinfo = block_group->space_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
u64 offset = bytenr - block_group->start;
u64 to_free, to_unusable;
- const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold);
- bool initial = (size == block_group->length);
+ int bg_reclaim_threshold = 0;
+ bool initial;
u64 reclaimable_unusable;
+ spin_lock(&block_group->lock);
+
+ initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
WARN_ON(!initial && offset + size > block_group->zone_capacity);
+ if (!initial)
+ bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
- spin_lock(&ctl->tree_lock);
if (!used)
to_free = size;
else if (initial)
@@ -2561,18 +2720,19 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
to_free = offset + size - block_group->alloc_offset;
to_unusable = size - to_free;
+ spin_lock(&ctl->tree_lock);
ctl->free_space += to_free;
+ spin_unlock(&ctl->tree_lock);
/*
* If the block group is read-only, we should account freed space into
* bytes_readonly.
*/
- if (!block_group->ro)
+ if (!block_group->ro) {
block_group->zone_unusable += to_unusable;
- spin_unlock(&ctl->tree_lock);
+ WARN_ON(block_group->zone_unusable > block_group->length);
+ }
if (!used) {
- spin_lock(&block_group->lock);
block_group->alloc_offset -= size;
- spin_unlock(&block_group->lock);
}
reclaimable_unusable = block_group->zone_unusable -
@@ -2582,11 +2742,12 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
btrfs_mark_bg_unused(block_group);
} else if (bg_reclaim_threshold &&
reclaimable_unusable >=
- div_factor_fine(block_group->zone_capacity,
- bg_reclaim_threshold)) {
+ mult_perc(block_group->zone_capacity, bg_reclaim_threshold)) {
btrfs_mark_bg_to_reclaim(block_group);
}
+ spin_unlock(&block_group->lock);
+
return 0;
}
@@ -2602,9 +2763,7 @@ int btrfs_add_free_space(struct btrfs_block_group *block_group,
if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC))
trim_state = BTRFS_TRIM_STATE_TRIMMED;
- return __btrfs_add_free_space(block_group->fs_info,
- block_group->free_space_ctl,
- bytenr, size, trim_state);
+ return __btrfs_add_free_space(block_group, bytenr, size, trim_state);
}
int btrfs_add_free_space_unused(struct btrfs_block_group *block_group,
@@ -2635,9 +2794,7 @@ int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group,
btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
trim_state = BTRFS_TRIM_STATE_TRIMMED;
- return __btrfs_add_free_space(block_group->fs_info,
- block_group->free_space_ctl,
- bytenr, size, trim_state);
+ return __btrfs_add_free_space(block_group, bytenr, size, trim_state);
}
int btrfs_remove_free_space(struct btrfs_block_group *block_group,
@@ -2696,7 +2853,7 @@ again:
re_search = false;
if (!info->bitmap) {
- unlink_free_space(ctl, info);
+ unlink_free_space(ctl, info, true);
if (offset == info->offset) {
u64 to_free = min(bytes, info->bytes);
@@ -2732,7 +2889,7 @@ again:
}
spin_unlock(&ctl->tree_lock);
- ret = __btrfs_add_free_space(block_group->fs_info, ctl,
+ ret = __btrfs_add_free_space(block_group,
offset + bytes,
old_end - (offset + bytes),
info->trim_state);
@@ -2769,7 +2926,8 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group,
if (btrfs_is_zoned(fs_info)) {
btrfs_info(fs_info, "free space %llu active %d",
block_group->zone_capacity - block_group->alloc_offset,
- block_group->zone_is_active);
+ test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+ &block_group->runtime_flags));
return;
}
@@ -2779,14 +2937,14 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group,
if (info->bytes >= bytes && !block_group->ro)
count++;
btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
- info->offset, info->bytes,
- (info->bitmap) ? "yes" : "no");
+ info->offset, info->bytes, str_yes_no(info->bitmap));
}
spin_unlock(&ctl->tree_lock);
btrfs_info(fs_info, "block group has cluster?: %s",
- list_empty(&block_group->cluster_list) ? "no" : "yes");
+ str_no_yes(list_empty(&block_group->cluster_list)));
btrfs_info(fs_info,
- "%d blocks of free space at or bigger than bytes is", count);
+ "%d free space entries at or bigger than %llu bytes",
+ count, bytes);
}
void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group,
@@ -2797,8 +2955,9 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group,
spin_lock_init(&ctl->tree_lock);
ctl->unit = fs_info->sectorsize;
ctl->start = block_group->start;
- ctl->private = block_group;
+ ctl->block_group = block_group;
ctl->op = &free_space_op;
+ ctl->free_space_bytes = RB_ROOT_CACHED;
INIT_LIST_HEAD(&ctl->trimming_ranges);
mutex_init(&ctl->cache_writeout_mutex);
@@ -2821,9 +2980,10 @@ static void __btrfs_return_cluster_to_free_space(
struct btrfs_free_cluster *cluster)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
- struct btrfs_free_space *entry;
struct rb_node *node;
+ lockdep_assert_held(&ctl->tree_lock);
+
spin_lock(&cluster->lock);
if (cluster->block_group != block_group) {
spin_unlock(&cluster->lock);
@@ -2836,15 +2996,14 @@ static void __btrfs_return_cluster_to_free_space(
node = rb_first(&cluster->root);
while (node) {
- bool bitmap;
+ struct btrfs_free_space *entry;
entry = rb_entry(node, struct btrfs_free_space, offset_index);
node = rb_next(&entry->offset_index);
rb_erase(&entry->offset_index, &cluster->root);
RB_CLEAR_NODE(&entry->offset_index);
- bitmap = (entry->bitmap != NULL);
- if (!bitmap) {
+ if (!entry->bitmap) {
/* Merging treats extents as if they were new */
if (!btrfs_free_space_trimmed(entry)) {
ctl->discardable_extents[BTRFS_STAT_CURR]--;
@@ -2862,42 +3021,15 @@ static void __btrfs_return_cluster_to_free_space(
entry->bytes;
}
}
- tree_insert_offset(&ctl->free_space_offset,
- entry->offset, &entry->offset_index, bitmap);
+ tree_insert_offset(ctl, NULL, entry);
+ rb_add_cached(&entry->bytes_index, &ctl->free_space_bytes,
+ entry_less);
}
cluster->root = RB_ROOT;
spin_unlock(&cluster->lock);
btrfs_put_block_group(block_group);
}
-static void __btrfs_remove_free_space_cache_locked(
- struct btrfs_free_space_ctl *ctl)
-{
- struct btrfs_free_space *info;
- struct rb_node *node;
-
- while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
- info = rb_entry(node, struct btrfs_free_space, offset_index);
- if (!info->bitmap) {
- unlink_free_space(ctl, info);
- kmem_cache_free(btrfs_free_space_cachep, info);
- } else {
- free_bitmap(ctl, info);
- }
-
- cond_resched_lock(&ctl->tree_lock);
- }
-}
-
-void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
-{
- spin_lock(&ctl->tree_lock);
- __btrfs_remove_free_space_cache_locked(ctl);
- if (ctl->private)
- btrfs_discard_update_discardable(ctl->private);
- spin_unlock(&ctl->tree_lock);
-}
-
void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -2915,16 +3047,13 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
cond_resched_lock(&ctl->tree_lock);
}
- __btrfs_remove_free_space_cache_locked(ctl);
+ __btrfs_remove_free_space_cache(ctl);
btrfs_discard_update_discardable(block_group);
spin_unlock(&ctl->tree_lock);
}
-/**
- * btrfs_is_free_space_trimmed - see if everything is trimmed
- * @block_group: block_group of interest
- *
+/*
* Walk @block_group's free space rb_tree to determine if everything is trimmed.
*/
bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group)
@@ -2965,18 +3094,20 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
u64 align_gap = 0;
u64 align_gap_len = 0;
enum btrfs_trim_state align_gap_trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
+ bool use_bytes_index = (offset == block_group->start);
ASSERT(!btrfs_is_zoned(block_group->fs_info));
spin_lock(&ctl->tree_lock);
entry = find_free_space(ctl, &offset, &bytes_search,
- block_group->full_stripe_len, max_extent_size);
+ block_group->full_stripe_len, max_extent_size,
+ use_bytes_index);
if (!entry)
goto out;
ret = offset;
if (entry->bitmap) {
- bitmap_clear_bits(ctl, entry, offset, bytes);
+ bitmap_clear_bits(ctl, entry, offset, bytes, true);
if (!btrfs_free_space_trimmed(entry))
atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
@@ -2984,7 +3115,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
if (!entry->bytes)
free_bitmap(ctl, entry);
} else {
- unlink_free_space(ctl, entry);
+ unlink_free_space(ctl, entry, true);
align_gap_len = offset - entry->offset;
align_gap = entry->offset;
align_gap_trim_state = entry->trim_state;
@@ -3006,8 +3137,7 @@ out:
spin_unlock(&ctl->tree_lock);
if (align_gap_len)
- __btrfs_add_free_space(block_group->fs_info, ctl,
- align_gap, align_gap_len,
+ __btrfs_add_free_space(block_group, align_gap, align_gap_len,
align_gap_trim_state);
return ret;
}
@@ -3062,7 +3192,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
u64 *max_extent_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
- int err;
+ int ret2;
u64 search_start = cluster->window_start;
u64 search_bytes = bytes;
u64 ret = 0;
@@ -3070,15 +3200,15 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
search_start = min_start;
search_bytes = bytes;
- err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
- if (err) {
+ ret2 = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
+ if (ret2) {
*max_extent_size = max(get_max_extent_size(entry),
*max_extent_size);
return 0;
}
ret = search_start;
- __bitmap_clear_bits(ctl, entry, ret, bytes);
+ bitmap_clear_bits(ctl, entry, ret, bytes, false);
return ret;
}
@@ -3204,6 +3334,8 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group,
unsigned long total_found = 0;
int ret;
+ lockdep_assert_held(&ctl->tree_lock);
+
i = offset_to_bit(entry->offset, ctl->unit,
max_t(u64, offset, entry->offset));
want_bits = bytes_to_bits(bytes, ctl->unit);
@@ -3254,8 +3386,18 @@ again:
cluster->window_start = start * ctl->unit + entry->offset;
rb_erase(&entry->offset_index, &ctl->free_space_offset);
- ret = tree_insert_offset(&cluster->root, entry->offset,
- &entry->offset_index, 1);
+ rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes);
+
+ /*
+ * We need to know if we're currently on the normal space index when we
+ * manipulate the bitmap so that we know we need to remove and re-insert
+ * it into the space_index tree. Clear the bytes_index node here so the
+ * bitmap manipulation helpers know not to mess with the space_index
+ * until this bitmap entry is added back into the normal cache.
+ */
+ RB_CLEAR_NODE(&entry->bytes_index);
+
+ ret = tree_insert_offset(ctl, cluster, entry);
ASSERT(!ret); /* -EEXIST; Logic error */
trace_btrfs_setup_cluster(block_group, cluster,
@@ -3283,6 +3425,8 @@ setup_cluster_no_bitmap(struct btrfs_block_group *block_group,
u64 max_extent;
u64 total_size = 0;
+ lockdep_assert_held(&ctl->tree_lock);
+
entry = tree_search_offset(ctl, offset, 0, 1);
if (!entry)
return -ENOSPC;
@@ -3344,8 +3488,8 @@ setup_cluster_no_bitmap(struct btrfs_block_group *block_group,
continue;
rb_erase(&entry->offset_index, &ctl->free_space_offset);
- ret = tree_insert_offset(&cluster->root, entry->offset,
- &entry->offset_index, 0);
+ rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes);
+ ret = tree_insert_offset(ctl, cluster, entry);
total_size += entry->bytes;
ASSERT(!ret); /* -EEXIST; Logic error */
} while (node && entry != last);
@@ -3429,7 +3573,8 @@ int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
* data, keep it dense.
*/
if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
- cont1_bytes = min_bytes = bytes + empty_size;
+ cont1_bytes = bytes + empty_size;
+ min_bytes = cont1_bytes;
} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
cont1_bytes = bytes;
min_bytes = fs_info->sectorsize;
@@ -3511,7 +3656,7 @@ static int do_trimming(struct btrfs_block_group *block_group,
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
int ret;
- int update = 0;
+ bool bg_ro;
const u64 end = start + bytes;
const u64 reserved_end = reserved_start + reserved_bytes;
enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
@@ -3519,12 +3664,14 @@ static int do_trimming(struct btrfs_block_group *block_group,
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
- if (!block_group->ro) {
+ bg_ro = block_group->ro;
+ if (!bg_ro) {
block_group->reserved += reserved_bytes;
+ spin_unlock(&block_group->lock);
space_info->bytes_reserved += reserved_bytes;
- update = 1;
+ } else {
+ spin_unlock(&block_group->lock);
}
- spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
@@ -3535,24 +3682,26 @@ static int do_trimming(struct btrfs_block_group *block_group,
mutex_lock(&ctl->cache_writeout_mutex);
if (reserved_start < start)
- __btrfs_add_free_space(fs_info, ctl, reserved_start,
+ __btrfs_add_free_space(block_group, reserved_start,
start - reserved_start,
reserved_trim_state);
- if (start + bytes < reserved_start + reserved_bytes)
- __btrfs_add_free_space(fs_info, ctl, end, reserved_end - end,
+ if (end < reserved_end)
+ __btrfs_add_free_space(block_group, end, reserved_end - end,
reserved_trim_state);
- __btrfs_add_free_space(fs_info, ctl, start, bytes, trim_state);
+ __btrfs_add_free_space(block_group, start, bytes, trim_state);
list_del(&trim_entry->list);
mutex_unlock(&ctl->cache_writeout_mutex);
- if (update) {
+ if (!bg_ro) {
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
- if (block_group->ro)
- space_info->bytes_readonly += reserved_bytes;
+ bg_ro = block_group->ro;
block_group->reserved -= reserved_bytes;
- space_info->bytes_reserved -= reserved_bytes;
spin_unlock(&block_group->lock);
+
+ space_info->bytes_reserved -= reserved_bytes;
+ if (bg_ro)
+ space_info->bytes_readonly += reserved_bytes;
spin_unlock(&space_info->lock);
}
@@ -3615,7 +3764,7 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group,
mutex_unlock(&ctl->cache_writeout_mutex);
goto next;
}
- unlink_free_space(ctl, entry);
+ unlink_free_space(ctl, entry, true);
/*
* Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
* If X < BTRFS_ASYNC_DISCARD_MIN_FILTER, we won't trim
@@ -3641,7 +3790,7 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group,
goto next;
}
- unlink_free_space(ctl, entry);
+ unlink_free_space(ctl, entry, true);
kmem_cache_free(btrfs_free_space_cachep, entry);
}
@@ -3664,7 +3813,7 @@ next:
if (async && *total_trimmed)
break;
- if (fatal_signal_pending(current)) {
+ if (btrfs_trim_interrupted()) {
ret = -ERESTARTSYS;
break;
}
@@ -3684,7 +3833,7 @@ out_unlock:
/*
* If we break out of trimming a bitmap prematurely, we should reset the
- * trimming bit. In a rather contrieved case, it's possible to race here so
+ * trimming bit. In a rather contrived case, it's possible to race here so
* reset the state to BTRFS_TRIM_STATE_UNTRIMMED.
*
* start = start of bitmap
@@ -3828,7 +3977,7 @@ static int trim_bitmaps(struct btrfs_block_group *block_group,
bytes > (max_discard_size + minlen))
bytes = max_discard_size;
- bitmap_clear_bits(ctl, entry, start, bytes);
+ bitmap_clear_bits(ctl, entry, start, bytes, true);
if (entry->bytes == 0)
free_bitmap(ctl, entry);
@@ -3855,7 +4004,7 @@ next:
}
block_group->discard_cursor = start;
- if (fatal_signal_pending(current)) {
+ if (btrfs_trim_interrupted()) {
if (start != offset)
reset_trimming_bitmap(ctl, offset);
ret = -ERESTARTSYS;
@@ -3884,7 +4033,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group,
*trimmed = 0;
spin_lock(&block_group->lock);
- if (block_group->removed) {
+ if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
spin_unlock(&block_group->lock);
return 0;
}
@@ -3914,7 +4063,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
*trimmed = 0;
spin_lock(&block_group->lock);
- if (block_group->removed) {
+ if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
spin_unlock(&block_group->lock);
return 0;
}
@@ -3936,7 +4085,7 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
*trimmed = 0;
spin_lock(&block_group->lock);
- if (block_group->removed) {
+ if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
spin_unlock(&block_group->lock);
return 0;
}
@@ -3965,7 +4114,7 @@ static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info,
btrfs_info(fs_info, "cleaning free space cache v1");
- node = rb_first(&fs_info->block_group_cache_tree);
+ node = rb_first_cached(&fs_info->block_group_cache_tree);
while (node) {
block_group = rb_entry(node, struct btrfs_block_group, cache_node);
ret = btrfs_remove_free_space_inode(trans, NULL, block_group);
@@ -3997,7 +4146,7 @@ int btrfs_set_free_space_cache_v1_active(struct btrfs_fs_info *fs_info, bool act
if (!active) {
set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags);
ret = cleanup_free_space_cache_v1(fs_info, trans);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out;
@@ -4011,6 +4160,29 @@ out:
return ret;
}
+int __init btrfs_free_space_init(void)
+{
+ btrfs_free_space_cachep = KMEM_CACHE(btrfs_free_space, 0);
+ if (!btrfs_free_space_cachep)
+ return -ENOMEM;
+
+ btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
+ PAGE_SIZE, PAGE_SIZE,
+ 0, NULL);
+ if (!btrfs_free_space_bitmap_cachep) {
+ kmem_cache_destroy(btrfs_free_space_cachep);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void __cold btrfs_free_space_exit(void)
+{
+ kmem_cache_destroy(btrfs_free_space_cachep);
+ kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
+}
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/*
* Use this if you need to make a bitmap or extent entry specifically, it
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 1f23088d43f9..9f1dbfdee8ca 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -6,6 +6,20 @@
#ifndef BTRFS_FREE_SPACE_CACHE_H
#define BTRFS_FREE_SPACE_CACHE_H
+#include <linux/rbtree.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/freezer.h>
+#include "fs.h"
+
+struct inode;
+struct page;
+struct btrfs_fs_info;
+struct btrfs_path;
+struct btrfs_trans_handle;
+struct btrfs_trim_block_group;
+
/*
* This is the trim state of an extent or bitmap.
*
@@ -22,6 +36,7 @@ enum btrfs_trim_state {
struct btrfs_free_space {
struct rb_node offset_index;
+ struct rb_node bytes_index;
u64 offset;
u64 bytes;
u64 max_extent_size;
@@ -42,9 +57,26 @@ static inline bool btrfs_free_space_trimming_bitmap(
return (info->trim_state == BTRFS_TRIM_STATE_TRIMMING);
}
+static inline bool btrfs_trim_interrupted(void)
+{
+ return fatal_signal_pending(current) || freezing(current);
+}
+
+/*
+ * Deltas are an effective way to populate global statistics. Give macro names
+ * to make it clear what we're doing. An example is discard_extents in
+ * btrfs_free_space_ctl.
+ */
+enum {
+ BTRFS_STAT_CURR,
+ BTRFS_STAT_PREV,
+ BTRFS_STAT_NR_ENTRIES,
+};
+
struct btrfs_free_space_ctl {
spinlock_t tree_lock;
struct rb_root free_space_offset;
+ struct rb_root_cached free_space_bytes;
u64 free_space;
int extents_thresh;
int free_extents;
@@ -54,7 +86,7 @@ struct btrfs_free_space_ctl {
s32 discardable_extents[BTRFS_STAT_NR_ENTRIES];
s64 discardable_bytes[BTRFS_STAT_NR_ENTRIES];
const struct btrfs_free_space_op *op;
- void *private;
+ struct btrfs_block_group *block_group;
struct mutex cache_writeout_mutex;
struct list_head trimming_ranges;
};
@@ -77,6 +109,8 @@ struct btrfs_io_ctl {
int bitmaps;
};
+int __init btrfs_free_space_init(void);
+void __cold btrfs_free_space_exit(void);
struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
struct btrfs_path *path);
int create_free_space_inode(struct btrfs_trans_handle *trans,
@@ -86,8 +120,6 @@ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
struct inode *inode,
struct btrfs_block_group *block_group);
-int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *rsv);
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct inode *inode);
@@ -101,10 +133,6 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group,
struct btrfs_free_space_ctl *ctl);
-int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
- struct btrfs_free_space_ctl *ctl,
- u64 bytenr, u64 size,
- enum btrfs_trim_state trim_state);
int btrfs_add_free_space(struct btrfs_block_group *block_group,
u64 bytenr, u64 size);
int btrfs_add_free_space_unused(struct btrfs_block_group *block_group,
@@ -113,7 +141,6 @@ int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group,
u64 bytenr, u64 size);
int btrfs_remove_free_space(struct btrfs_block_group *block_group,
u64 bytenr, u64 size);
-void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group);
bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group);
u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index a33bca94d133..1ad2ad384b9e 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -5,18 +5,37 @@
#include <linux/kernel.h>
#include <linux/sched/mm.h>
+#include "messages.h"
#include "ctree.h"
#include "disk-io.h"
#include "locking.h"
#include "free-space-tree.h"
#include "transaction.h"
#include "block-group.h"
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "root-tree.h"
static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct btrfs_path *path);
-void set_free_space_tree_thresholds(struct btrfs_block_group *cache)
+static struct btrfs_root *btrfs_free_space_root(
+ struct btrfs_block_group *block_group)
+{
+ struct btrfs_key key = {
+ .objectid = BTRFS_FREE_SPACE_TREE_OBJECTID,
+ .type = BTRFS_ROOT_ITEM_KEY,
+ .offset = 0,
+ };
+
+ if (btrfs_fs_incompat(block_group->fs_info, EXTENT_TREE_V2))
+ key.offset = block_group->global_root_id;
+ return btrfs_global_root(block_group->fs_info, &key);
+}
+
+void btrfs_set_free_space_tree_thresholds(struct btrfs_block_group *cache)
{
u32 bitmap_range;
size_t bitmap_size;
@@ -51,7 +70,7 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
- struct btrfs_root *root = trans->fs_info->free_space_root;
+ struct btrfs_root *root = btrfs_free_space_root(block_group);
struct btrfs_free_space_info *info;
struct btrfs_key key;
struct extent_buffer *leaf;
@@ -63,29 +82,25 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info));
if (ret)
- goto out;
+ return ret;
leaf = path->nodes[0];
info = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_free_space_info);
btrfs_set_free_space_extent_count(leaf, info, 0);
btrfs_set_free_space_flags(leaf, info, 0);
- btrfs_mark_buffer_dirty(leaf);
-
- ret = 0;
-out:
btrfs_release_path(path);
- return ret;
+ return 0;
}
EXPORT_FOR_TESTS
-struct btrfs_free_space_info *search_free_space_info(
+struct btrfs_free_space_info *btrfs_search_free_space_info(
struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct btrfs_path *path, int cow)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct btrfs_root *root = fs_info->free_space_root;
+ struct btrfs_root *root = btrfs_free_space_root(block_group);
struct btrfs_key key;
int ret;
@@ -99,7 +114,7 @@ struct btrfs_free_space_info *search_free_space_info(
if (ret != 0) {
btrfs_warn(fs_info, "missing free space info for %llu",
block_group->start);
- ASSERT(0);
+ DEBUG_WARN();
return ERR_PTR(-ENOENT);
}
@@ -122,13 +137,13 @@ static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans,
if (ret < 0)
return ret;
- if (ret == 0) {
- ASSERT(0);
+ if (unlikely(ret == 0)) {
+ DEBUG_WARN();
return -EIO;
}
- if (p->slots[0] == 0) {
- ASSERT(0);
+ if (unlikely(p->slots[0] == 0)) {
+ DEBUG_WARN("no previous slot found");
return -EIO;
}
p->slots[0]--;
@@ -150,11 +165,9 @@ static unsigned long *alloc_bitmap(u32 bitmap_size)
/*
* GFP_NOFS doesn't work with kvmalloc(), but we really can't recurse
- * into the filesystem as the free space bitmap can be modified in the
- * critical section of a transaction commit.
- *
- * TODO: push the memalloc_nofs_{save,restore}() to the caller where we
- * know that recursion is unsafe.
+ * into the filesystem here. All callers hold a transaction handle
+ * open, so if a GFP_KERNEL allocation recurses into the filesystem
+ * and triggers a transaction commit, we would deadlock.
*/
nofs_flag = memalloc_nofs_save();
ret = kvzalloc(bitmap_rounded_size, GFP_KERNEL);
@@ -183,12 +196,12 @@ static void le_bitmap_set(unsigned long *map, unsigned int start, int len)
}
EXPORT_FOR_TESTS
-int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path)
+int btrfs_convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *root = fs_info->free_space_root;
+ struct btrfs_root *root = btrfs_free_space_root(block_group);
struct btrfs_free_space_info *info;
struct btrfs_key key, found_key;
struct extent_buffer *leaf;
@@ -203,10 +216,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
bitmap_size = free_space_bitmap_size(fs_info, block_group->length);
bitmap = alloc_bitmap(bitmap_size);
- if (!bitmap) {
- ret = -ENOMEM;
- goto out;
- }
+ if (unlikely(!bitmap))
+ return 0;
start = block_group->start;
end = block_group->start + block_group->length;
@@ -217,8 +228,10 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
while (!done) {
ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
leaf = path->nodes[0];
nr = 0;
@@ -253,31 +266,35 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
}
ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
btrfs_release_path(path);
}
- info = search_free_space_info(trans, block_group, path, 1);
+ info = btrfs_search_free_space_info(trans, block_group, path, 1);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
leaf = path->nodes[0];
flags = btrfs_free_space_flags(leaf, info);
flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
+ block_group->using_free_space_bitmaps = true;
+ block_group->using_free_space_bitmaps_cached = true;
btrfs_set_free_space_flags(leaf, info, flags);
expected_extent_count = btrfs_free_space_extent_count(leaf, info);
- btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
- if (extent_count != expected_extent_count) {
+ if (unlikely(extent_count != expected_extent_count)) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
block_group->start, extent_count,
expected_extent_count);
- ASSERT(0);
ret = -EIO;
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -298,14 +315,15 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, root, path, &key,
data_size);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
leaf = path->nodes[0];
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, bitmap_cursor, ptr,
data_size);
- btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
i += extent_size;
@@ -315,18 +333,16 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
ret = 0;
out:
kvfree(bitmap);
- if (ret)
- btrfs_abort_transaction(trans, ret);
return ret;
}
EXPORT_FOR_TESTS
-int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path)
+int btrfs_convert_free_space_to_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *root = fs_info->free_space_root;
+ struct btrfs_root *root = btrfs_free_space_root(block_group);
struct btrfs_free_space_info *info;
struct btrfs_key key, found_key;
struct extent_buffer *leaf;
@@ -340,10 +356,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
bitmap_size = free_space_bitmap_size(fs_info, block_group->length);
bitmap = alloc_bitmap(bitmap_size);
- if (!bitmap) {
- ret = -ENOMEM;
- goto out;
- }
+ if (unlikely(!bitmap))
+ return 0;
start = block_group->start;
end = block_group->start + block_group->length;
@@ -354,8 +368,10 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
while (!done) {
ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
leaf = path->nodes[0];
nr = 0;
@@ -384,50 +400,56 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
data_size = free_space_bitmap_size(fs_info,
found_key.offset);
- ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1);
+ path->slots[0]--;
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
read_extent_buffer(leaf, bitmap_cursor, ptr,
data_size);
nr++;
- path->slots[0]--;
} else {
ASSERT(0);
}
}
ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
btrfs_release_path(path);
}
- info = search_free_space_info(trans, block_group, path, 1);
+ info = btrfs_search_free_space_info(trans, block_group, path, 1);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
leaf = path->nodes[0];
flags = btrfs_free_space_flags(leaf, info);
flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
+ block_group->using_free_space_bitmaps = false;
+ block_group->using_free_space_bitmaps_cached = true;
btrfs_set_free_space_flags(leaf, info, flags);
expected_extent_count = btrfs_free_space_extent_count(leaf, info);
- btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
- nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
+ nrbits = block_group->length >> fs_info->sectorsize_bits;
start_bit = find_next_bit_le(bitmap, nrbits, 0);
while (start_bit < nrbits) {
end_bit = find_next_zero_bit_le(bitmap, nrbits, start_bit);
ASSERT(start_bit < end_bit);
- key.objectid = start + start_bit * block_group->fs_info->sectorsize;
+ key.objectid = start + start_bit * fs_info->sectorsize;
key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
- key.offset = (end_bit - start_bit) * block_group->fs_info->sectorsize;
+ key.offset = (end_bit - start_bit) * fs_info->sectorsize;
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
btrfs_release_path(path);
extent_count++;
@@ -435,21 +457,19 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
start_bit = find_next_bit_le(bitmap, nrbits, end_bit);
}
- if (extent_count != expected_extent_count) {
+ if (unlikely(extent_count != expected_extent_count)) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
block_group->start, extent_count,
expected_extent_count);
- ASSERT(0);
ret = -EIO;
+ btrfs_abort_transaction(trans, ret);
goto out;
}
ret = 0;
out:
kvfree(bitmap);
- if (ret)
- btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -466,34 +486,31 @@ static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
if (new_extents == 0)
return 0;
- info = search_free_space_info(trans, block_group, path, 1);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto out;
- }
+ info = btrfs_search_free_space_info(trans, block_group, path, 1);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
flags = btrfs_free_space_flags(path->nodes[0], info);
extent_count = btrfs_free_space_extent_count(path->nodes[0], info);
extent_count += new_extents;
btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
- btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_release_path(path);
if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
extent_count > block_group->bitmap_high_thresh) {
- ret = convert_free_space_to_bitmaps(trans, block_group, path);
+ ret = btrfs_convert_free_space_to_bitmaps(trans, block_group, path);
} else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
extent_count < block_group->bitmap_low_thresh) {
- ret = convert_free_space_to_extents(trans, block_group, path);
+ ret = btrfs_convert_free_space_to_extents(trans, block_group, path);
}
-out:
return ret;
}
EXPORT_FOR_TESTS
-int free_space_test_bit(struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 offset)
+bool btrfs_free_space_test_bit(struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 offset)
{
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -511,12 +528,13 @@ int free_space_test_bit(struct btrfs_block_group *block_group,
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
i = div_u64(offset - found_start,
block_group->fs_info->sectorsize);
- return !!extent_buffer_test_bit(leaf, ptr, i);
+ return extent_buffer_test_bit(leaf, ptr, i);
}
-static void free_space_set_bits(struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 *start, u64 *size,
- int bit)
+static void free_space_modify_bits(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 *start, u64 *size,
+ bool set_bits)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct extent_buffer *leaf;
@@ -540,11 +558,11 @@ static void free_space_set_bits(struct btrfs_block_group *block_group,
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
first = (*start - found_start) >> fs_info->sectorsize_bits;
last = (end - found_start) >> fs_info->sectorsize_bits;
- if (bit)
+ if (set_bits)
extent_buffer_bitmap_set(leaf, ptr, first, last - first);
else
extent_buffer_bitmap_clear(leaf, ptr, first, last - first);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
*size -= end - *start;
*start = end;
@@ -584,13 +602,14 @@ static int free_space_next_bitmap(struct btrfs_trans_handle *trans,
static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct btrfs_path *path,
- u64 start, u64 size, int remove)
+ u64 start, u64 size, bool remove)
{
- struct btrfs_root *root = block_group->fs_info->free_space_root;
+ struct btrfs_root *root = btrfs_free_space_root(block_group);
struct btrfs_key key;
u64 end = start + size;
u64 cur_start, cur_size;
- int prev_bit, next_bit;
+ bool prev_bit_set = false;
+ bool next_bit_set = false;
int new_extents;
int ret;
@@ -607,16 +626,16 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1);
if (ret)
- goto out;
+ return ret;
- prev_bit = free_space_test_bit(block_group, path, prev_block);
+ prev_bit_set = btrfs_free_space_test_bit(block_group, path, prev_block);
/* The previous block may have been in the previous bitmap. */
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
if (start >= key.objectid + key.offset) {
ret = free_space_next_bitmap(trans, root, path);
if (ret)
- goto out;
+ return ret;
}
} else {
key.objectid = start;
@@ -625,9 +644,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1);
if (ret)
- goto out;
-
- prev_bit = -1;
+ return ret;
}
/*
@@ -637,13 +654,13 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
cur_start = start;
cur_size = size;
while (1) {
- free_space_set_bits(block_group, path, &cur_start, &cur_size,
- !remove);
+ free_space_modify_bits(trans, block_group, path, &cur_start,
+ &cur_size, !remove);
if (cur_size == 0)
break;
ret = free_space_next_bitmap(trans, root, path);
if (ret)
- goto out;
+ return ret;
}
/*
@@ -656,42 +673,36 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
if (end >= key.objectid + key.offset) {
ret = free_space_next_bitmap(trans, root, path);
if (ret)
- goto out;
+ return ret;
}
- next_bit = free_space_test_bit(block_group, path, end);
- } else {
- next_bit = -1;
+ next_bit_set = btrfs_free_space_test_bit(block_group, path, end);
}
if (remove) {
new_extents = -1;
- if (prev_bit == 1) {
+ if (prev_bit_set) {
/* Leftover on the left. */
new_extents++;
}
- if (next_bit == 1) {
+ if (next_bit_set) {
/* Leftover on the right. */
new_extents++;
}
} else {
new_extents = 1;
- if (prev_bit == 1) {
+ if (prev_bit_set) {
/* Merging with neighbor on the left. */
new_extents--;
}
- if (next_bit == 1) {
+ if (next_bit_set) {
/* Merging with neighbor on the right. */
new_extents--;
}
}
btrfs_release_path(path);
- ret = update_free_space_extent_count(trans, block_group, path,
- new_extents);
-
-out:
- return ret;
+ return update_free_space_extent_count(trans, block_group, path, new_extents);
}
static int remove_free_space_extent(struct btrfs_trans_handle *trans,
@@ -699,7 +710,7 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u64 start, u64 size)
{
- struct btrfs_root *root = trans->fs_info->free_space_root;
+ struct btrfs_root *root = btrfs_free_space_root(block_group);
struct btrfs_key key;
u64 found_start, found_end;
u64 end = start + size;
@@ -712,7 +723,7 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans,
ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
if (ret)
- goto out;
+ return ret;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -744,7 +755,7 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans,
/* Delete the existing key (cases 1-4). */
ret = btrfs_del_item(trans, root, path);
if (ret)
- goto out;
+ return ret;
/* Add a key for leftovers at the beginning (cases 3 and 4). */
if (start > found_start) {
@@ -755,7 +766,7 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
if (ret)
- goto out;
+ return ret;
new_extents++;
}
@@ -768,81 +779,89 @@ static int remove_free_space_extent(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
if (ret)
- goto out;
+ return ret;
new_extents++;
}
btrfs_release_path(path);
- ret = update_free_space_extent_count(trans, block_group, path,
- new_extents);
-
-out:
- return ret;
+ return update_free_space_extent_count(trans, block_group, path, new_extents);
}
-EXPORT_FOR_TESTS
-int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 start, u64 size)
+static int using_bitmaps(struct btrfs_block_group *bg, struct btrfs_path *path)
{
struct btrfs_free_space_info *info;
u32 flags;
- int ret;
- if (block_group->needs_free_space) {
- ret = __add_block_group_free_space(trans, block_group, path);
- if (ret)
- return ret;
- }
+ if (bg->using_free_space_bitmaps_cached)
+ return bg->using_free_space_bitmaps;
- info = search_free_space_info(NULL, block_group, path, 0);
+ info = btrfs_search_free_space_info(NULL, bg, path, 0);
if (IS_ERR(info))
return PTR_ERR(info);
flags = btrfs_free_space_flags(path->nodes[0], info);
btrfs_release_path(path);
- if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
+ bg->using_free_space_bitmaps = (flags & BTRFS_FREE_SPACE_USING_BITMAPS);
+ bg->using_free_space_bitmaps_cached = true;
+
+ return bg->using_free_space_bitmaps;
+}
+
+EXPORT_FOR_TESTS
+int __btrfs_remove_from_free_space_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 start, u64 size)
+{
+ int ret;
+
+ ret = __add_block_group_free_space(trans, block_group, path);
+ if (ret)
+ return ret;
+
+ ret = using_bitmaps(block_group, path);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
return modify_free_space_bitmap(trans, block_group, path,
- start, size, 1);
- } else {
- return remove_free_space_extent(trans, block_group, path,
- start, size);
- }
+ start, size, true);
+
+ return remove_free_space_extent(trans, block_group, path, start, size);
}
-int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
- u64 start, u64 size)
+int btrfs_remove_from_free_space_tree(struct btrfs_trans_handle *trans,
+ u64 start, u64 size)
{
struct btrfs_block_group *block_group;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
return 0;
path = btrfs_alloc_path();
- if (!path) {
+ if (unlikely(!path)) {
ret = -ENOMEM;
- goto out;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
block_group = btrfs_lookup_block_group(trans->fs_info, start);
- if (!block_group) {
- ASSERT(0);
+ if (unlikely(!block_group)) {
+ DEBUG_WARN("no block group found for start=%llu", start);
ret = -ENOENT;
- goto out;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
mutex_lock(&block_group->free_space_lock);
- ret = __remove_from_free_space_tree(trans, block_group, path, start,
- size);
+ ret = __btrfs_remove_from_free_space_tree(trans, block_group, path, start, size);
mutex_unlock(&block_group->free_space_lock);
-
- btrfs_put_block_group(block_group);
-out:
- btrfs_free_path(path);
if (ret)
btrfs_abort_transaction(trans, ret);
+
+ btrfs_put_block_group(block_group);
+
return ret;
}
@@ -851,7 +870,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u64 start, u64 size)
{
- struct btrfs_root *root = trans->fs_info->free_space_root;
+ struct btrfs_root *root = btrfs_free_space_root(block_group);
struct btrfs_key key, new_key;
u64 found_start, found_end;
u64 end = start + size;
@@ -889,7 +908,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
if (ret)
- goto out;
+ return ret;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -912,7 +931,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
if (found_end == start) {
ret = btrfs_del_item(trans, root, path);
if (ret)
- goto out;
+ return ret;
new_key.objectid = found_start;
new_key.offset += key.offset;
new_extents--;
@@ -929,7 +948,7 @@ right:
ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
if (ret)
- goto out;
+ return ret;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -953,7 +972,7 @@ right:
if (found_start == end) {
ret = btrfs_del_item(trans, root, path);
if (ret)
- goto out;
+ return ret;
new_key.offset += key.offset;
new_extents--;
}
@@ -963,78 +982,67 @@ insert:
/* Insert the new key (cases 1-4). */
ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0);
if (ret)
- goto out;
+ return ret;
btrfs_release_path(path);
- ret = update_free_space_extent_count(trans, block_group, path,
- new_extents);
-
-out:
- return ret;
+ return update_free_space_extent_count(trans, block_group, path, new_extents);
}
EXPORT_FOR_TESTS
-int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 start, u64 size)
+int __btrfs_add_to_free_space_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 start, u64 size)
{
- struct btrfs_free_space_info *info;
- u32 flags;
int ret;
- if (block_group->needs_free_space) {
- ret = __add_block_group_free_space(trans, block_group, path);
- if (ret)
- return ret;
- }
+ ret = __add_block_group_free_space(trans, block_group, path);
+ if (ret)
+ return ret;
- info = search_free_space_info(NULL, block_group, path, 0);
- if (IS_ERR(info))
- return PTR_ERR(info);
- flags = btrfs_free_space_flags(path->nodes[0], info);
- btrfs_release_path(path);
+ ret = using_bitmaps(block_group, path);
+ if (ret < 0)
+ return ret;
- if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
+ if (ret)
return modify_free_space_bitmap(trans, block_group, path,
- start, size, 0);
- } else {
- return add_free_space_extent(trans, block_group, path, start,
- size);
- }
+ start, size, false);
+
+ return add_free_space_extent(trans, block_group, path, start, size);
}
-int add_to_free_space_tree(struct btrfs_trans_handle *trans,
- u64 start, u64 size)
+int btrfs_add_to_free_space_tree(struct btrfs_trans_handle *trans,
+ u64 start, u64 size)
{
struct btrfs_block_group *block_group;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
return 0;
path = btrfs_alloc_path();
- if (!path) {
+ if (unlikely(!path)) {
ret = -ENOMEM;
- goto out;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
block_group = btrfs_lookup_block_group(trans->fs_info, start);
- if (!block_group) {
- ASSERT(0);
+ if (unlikely(!block_group)) {
+ DEBUG_WARN("no block group found for start=%llu", start);
ret = -ENOENT;
- goto out;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
mutex_lock(&block_group->free_space_lock);
- ret = __add_to_free_space_tree(trans, block_group, path, start, size);
+ ret = __btrfs_add_to_free_space_tree(trans, block_group, path, start, size);
mutex_unlock(&block_group->free_space_lock);
-
- btrfs_put_block_group(block_group);
-out:
- btrfs_free_path(path);
if (ret)
btrfs_abort_transaction(trans, ret);
+
+ btrfs_put_block_group(block_group);
+
return ret;
}
@@ -1046,8 +1054,9 @@ out:
static int populate_free_space_tree(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group)
{
- struct btrfs_root *extent_root = trans->fs_info->extent_root;
- struct btrfs_path *path, *path2;
+ struct btrfs_root *extent_root;
+ BTRFS_PATH_AUTO_FREE(path);
+ BTRFS_PATH_AUTO_FREE(path2);
struct btrfs_key key;
u64 start, end;
int ret;
@@ -1055,17 +1064,16 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->reada = READA_FORWARD;
path2 = btrfs_alloc_path();
- if (!path2) {
- btrfs_free_path(path);
+ if (!path2)
return -ENOMEM;
- }
+
+ path->reada = READA_FORWARD;
ret = add_new_free_space_info(trans, block_group, path2);
if (ret)
- goto out;
+ return ret;
mutex_lock(&block_group->free_space_lock);
@@ -1080,14 +1088,26 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = 0;
+ extent_root = btrfs_extent_root(trans->fs_info, key.objectid);
ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0);
if (ret < 0)
goto out_locked;
- ASSERT(ret == 0);
-
+ /*
+ * If ret is 1 (no key found), it means this is an empty block group,
+ * without any extents allocated from it and there's no block group
+ * item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree
+ * because we are using the block group tree feature (so block group
+ * items are stored in the block group tree) or this is a new block
+ * group created in the current transaction and its block group item
+ * was not yet inserted in the extent tree (that happens in
+ * btrfs_create_pending_block_groups() -> insert_block_group_item()).
+ * It also means there are no extents allocated for block groups with a
+ * start offset beyond this block group's end offset (this is the last,
+ * highest, block group).
+ */
start = block_group->start;
end = block_group->start + block_group->length;
- while (1) {
+ while (ret == 0) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
if (key.type == BTRFS_EXTENT_ITEM_KEY ||
@@ -1096,11 +1116,11 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
break;
if (start < key.objectid) {
- ret = __add_to_free_space_tree(trans,
- block_group,
- path2, start,
- key.objectid -
- start);
+ ret = __btrfs_add_to_free_space_tree(trans,
+ block_group,
+ path2, start,
+ key.objectid -
+ start);
if (ret)
goto out_locked;
}
@@ -1117,12 +1137,10 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
ret = btrfs_next_item(extent_root, path);
if (ret < 0)
goto out_locked;
- if (ret)
- break;
}
if (start < end) {
- ret = __add_to_free_space_tree(trans, block_group, path2,
- start, end - start);
+ ret = __btrfs_add_to_free_space_tree(trans, block_group, path2,
+ start, end - start);
if (ret)
goto out_locked;
}
@@ -1130,9 +1148,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
ret = 0;
out_locked:
mutex_unlock(&block_group->free_space_lock);
-out:
- btrfs_free_path(path2);
- btrfs_free_path(path);
+
return ret;
}
@@ -1155,17 +1171,28 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
BTRFS_FREE_SPACE_TREE_OBJECTID);
if (IS_ERR(free_space_root)) {
ret = PTR_ERR(free_space_root);
- goto abort;
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ goto out_clear;
+ }
+ ret = btrfs_global_root_insert(free_space_root);
+ if (unlikely(ret)) {
+ btrfs_put_root(free_space_root);
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ goto out_clear;
}
- fs_info->free_space_root = free_space_root;
- node = rb_first(&fs_info->block_group_cache_tree);
+ node = rb_first_cached(&fs_info->block_group_cache_tree);
while (node) {
block_group = rb_entry(node, struct btrfs_block_group,
cache_node);
ret = populate_free_space_tree(trans, block_group);
- if (ret)
- goto abort;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ goto out_clear;
+ }
node = rb_next(node);
}
@@ -1181,19 +1208,18 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
return ret;
-abort:
+out_clear:
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
- btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans);
return ret;
}
static int clear_free_space_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
+ struct rb_node *node;
int nr;
int ret;
@@ -1208,7 +1234,7 @@ static int clear_free_space_tree(struct btrfs_trans_handle *trans,
while (1) {
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
- goto out;
+ return ret;
nr = btrfs_header_nritems(path->nodes[0]);
if (!nr)
@@ -1217,22 +1243,34 @@ static int clear_free_space_tree(struct btrfs_trans_handle *trans,
path->slots[0] = 0;
ret = btrfs_del_items(trans, root, path, 0, nr);
if (ret)
- goto out;
+ return ret;
btrfs_release_path(path);
}
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ node = rb_first_cached(&trans->fs_info->block_group_cache_tree);
+ while (node) {
+ struct btrfs_block_group *bg;
+
+ bg = rb_entry(node, struct btrfs_block_group, cache_node);
+ clear_bit(BLOCK_GROUP_FLAG_FREE_SPACE_ADDED, &bg->runtime_flags);
+ node = rb_next(node);
+ cond_resched();
+ }
+
+ return 0;
}
-int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
+int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *tree_root = fs_info->tree_root;
- struct btrfs_root *free_space_root = fs_info->free_space_root;
+ struct btrfs_key key = {
+ .objectid = BTRFS_FREE_SPACE_TREE_OBJECTID,
+ .type = BTRFS_ROOT_ITEM_KEY,
+ .offset = 0,
+ };
+ struct btrfs_root *free_space_root = btrfs_global_root(fs_info, &key);
int ret;
trans = btrfs_start_transaction(tree_root, 0);
@@ -1241,86 +1279,184 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE);
btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
- fs_info->free_space_root = NULL;
ret = clear_free_space_tree(trans, free_space_root);
- if (ret)
- goto abort;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ return ret;
+ }
ret = btrfs_del_root(trans, &free_space_root->root_key);
- if (ret)
- goto abort;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ return ret;
+ }
+
+ btrfs_global_root_delete(free_space_root);
+ spin_lock(&fs_info->trans_lock);
list_del(&free_space_root->dirty_list);
+ spin_unlock(&fs_info->trans_lock);
btrfs_tree_lock(free_space_root->node);
- btrfs_clean_tree_block(free_space_root->node);
+ btrfs_clear_buffer_dirty(trans, free_space_root->node);
btrfs_tree_unlock(free_space_root->node);
- btrfs_free_tree_block(trans, free_space_root, free_space_root->node,
- 0, 1);
-
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(free_space_root),
+ free_space_root->node, 0, 1);
btrfs_put_root(free_space_root);
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ return ret;
+ }
return btrfs_commit_transaction(trans);
-
-abort:
- btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans);
- return ret;
}
-static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path)
+int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info)
{
+ struct btrfs_trans_handle *trans;
+ struct btrfs_key key = {
+ .objectid = BTRFS_FREE_SPACE_TREE_OBJECTID,
+ .type = BTRFS_ROOT_ITEM_KEY,
+ .offset = 0,
+ };
+ struct btrfs_root *free_space_root = btrfs_global_root(fs_info, &key);
+ struct rb_node *node;
int ret;
- block_group->needs_free_space = 0;
+ trans = btrfs_start_transaction(free_space_root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
- ret = add_new_free_space_info(trans, block_group, path);
- if (ret)
+ set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
+ set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
+
+ ret = clear_free_space_tree(trans, free_space_root);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
return ret;
+ }
+
+ node = rb_first_cached(&fs_info->block_group_cache_tree);
+ while (node) {
+ struct btrfs_block_group *block_group;
+
+ block_group = rb_entry(node, struct btrfs_block_group,
+ cache_node);
+
+ if (test_bit(BLOCK_GROUP_FLAG_FREE_SPACE_ADDED,
+ &block_group->runtime_flags))
+ goto next;
+
+ ret = populate_free_space_tree(trans, block_group);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ return ret;
+ }
+next:
+ if (btrfs_should_end_transaction(trans)) {
+ btrfs_end_transaction(trans);
+ trans = btrfs_start_transaction(free_space_root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ }
+ node = rb_next(node);
+ }
- return __add_to_free_space_tree(trans, block_group, path,
- block_group->start,
- block_group->length);
+ btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
+ btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
+ clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
+
+ ret = btrfs_commit_transaction(trans);
+ clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
+ return ret;
}
-int add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group)
+static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_path *path = NULL;
- int ret = 0;
+ bool own_path = false;
+ int ret;
- if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
+ if (!test_and_clear_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
+ &block_group->runtime_flags))
return 0;
- mutex_lock(&block_group->free_space_lock);
- if (!block_group->needs_free_space)
- goto out;
+ /*
+ * While rebuilding the free space tree we may allocate new metadata
+ * block groups while modifying the free space tree.
+ *
+ * Because during the rebuild (at btrfs_rebuild_free_space_tree()) we
+ * can use multiple transactions, every time btrfs_end_transaction() is
+ * called at btrfs_rebuild_free_space_tree() we finish the creation of
+ * new block groups by calling btrfs_create_pending_block_groups(), and
+ * that in turn calls us, through add_block_group_free_space(), to add
+ * a free space info item and a free space extent item for the block
+ * group.
+ *
+ * Then later btrfs_rebuild_free_space_tree() may find such new block
+ * groups and processes them with populate_free_space_tree(), which can
+ * fail with EEXIST since there are already items for the block group in
+ * the free space tree. Notice that we say "may find" because a new
+ * block group may be added to the block groups rbtree in a node before
+ * or after the block group currently being processed by the rebuild
+ * process. So signal the rebuild process to skip such new block groups
+ * if it finds them.
+ */
+ set_bit(BLOCK_GROUP_FLAG_FREE_SPACE_ADDED, &block_group->runtime_flags);
- path = btrfs_alloc_path();
if (!path) {
- ret = -ENOMEM;
+ path = btrfs_alloc_path();
+ if (unlikely(!path)) {
+ btrfs_abort_transaction(trans, -ENOMEM);
+ return -ENOMEM;
+ }
+ own_path = true;
+ }
+
+ ret = add_new_free_space_info(trans, block_group, path);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
}
- ret = __add_block_group_free_space(trans, block_group, path);
+ ret = __btrfs_add_to_free_space_tree(trans, block_group, path,
+ block_group->start, block_group->length);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
out:
- btrfs_free_path(path);
+ if (own_path)
+ btrfs_free_path(path);
+
+ return ret;
+}
+
+int btrfs_add_block_group_free_space(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group)
+{
+ int ret;
+
+ if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
+ return 0;
+
+ mutex_lock(&block_group->free_space_lock);
+ ret = __add_block_group_free_space(trans, block_group, NULL);
mutex_unlock(&block_group->free_space_lock);
- if (ret)
- btrfs_abort_transaction(trans, ret);
return ret;
}
-int remove_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group)
+int btrfs_remove_block_group_free_space(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group)
{
- struct btrfs_root *root = trans->fs_info->free_space_root;
- struct btrfs_path *path;
+ struct btrfs_root *root = btrfs_free_space_root(block_group);
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key, found_key;
struct extent_buffer *leaf;
u64 start, end;
@@ -1330,15 +1466,16 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
return 0;
- if (block_group->needs_free_space) {
+ if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) {
/* We never added this block group to the free space tree. */
return 0;
}
path = btrfs_alloc_path();
- if (!path) {
+ if (unlikely(!path)) {
ret = -ENOMEM;
- goto out;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
start = block_group->start;
@@ -1350,8 +1487,10 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
while (!done) {
ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
- if (ret)
- goto out;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
leaf = path->nodes[0];
nr = 0;
@@ -1379,16 +1518,15 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
}
ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
- if (ret)
- goto out;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
btrfs_release_path(path);
}
ret = 0;
-out:
- btrfs_free_path(path);
- if (ret)
- btrfs_abort_transaction(trans, ret);
+
return ret;
}
@@ -1400,7 +1538,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
struct btrfs_fs_info *fs_info;
struct btrfs_root *root;
struct btrfs_key key;
- int prev_bit = 0, bit;
+ bool prev_bit_set = false;
/* Initialize to silence GCC. */
u64 extent_start = 0;
u64 end, offset;
@@ -1410,14 +1548,14 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
block_group = caching_ctl->block_group;
fs_info = block_group->fs_info;
- root = fs_info->free_space_root;
+ root = btrfs_free_space_root(block_group);
end = block_group->start + block_group->length;
while (1) {
ret = btrfs_next_item(root, path);
if (ret < 0)
- goto out;
+ return ret;
if (ret)
break;
@@ -1429,48 +1567,50 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY);
ASSERT(key.objectid < end && key.objectid + key.offset <= end);
- caching_ctl->progress = key.objectid;
-
offset = key.objectid;
while (offset < key.objectid + key.offset) {
- bit = free_space_test_bit(block_group, path, offset);
- if (prev_bit == 0 && bit == 1) {
+ bool bit_set;
+
+ bit_set = btrfs_free_space_test_bit(block_group, path, offset);
+ if (!prev_bit_set && bit_set) {
extent_start = offset;
- } else if (prev_bit == 1 && bit == 0) {
- total_found += add_new_free_space(block_group,
- extent_start,
- offset);
+ } else if (prev_bit_set && !bit_set) {
+ u64 space_added;
+
+ ret = btrfs_add_new_free_space(block_group,
+ extent_start,
+ offset,
+ &space_added);
+ if (ret)
+ return ret;
+ total_found += space_added;
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
wake_up(&caching_ctl->wait);
}
extent_count++;
}
- prev_bit = bit;
+ prev_bit_set = bit_set;
offset += fs_info->sectorsize;
}
}
- if (prev_bit == 1) {
- total_found += add_new_free_space(block_group, extent_start,
- end);
+ if (prev_bit_set) {
+ ret = btrfs_add_new_free_space(block_group, extent_start, end, NULL);
+ if (ret)
+ return ret;
extent_count++;
}
- if (extent_count != expected_extent_count) {
+ if (unlikely(extent_count != expected_extent_count)) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
block_group->start, extent_count,
expected_extent_count);
- ASSERT(0);
- ret = -EIO;
- goto out;
+ DEBUG_WARN();
+ return -EIO;
}
- caching_ctl->progress = (u64)-1;
-
- ret = 0;
-out:
- return ret;
+ return 0;
}
static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
@@ -1488,14 +1628,16 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
block_group = caching_ctl->block_group;
fs_info = block_group->fs_info;
- root = fs_info->free_space_root;
+ root = btrfs_free_space_root(block_group);
end = block_group->start + block_group->length;
while (1) {
+ u64 space_added;
+
ret = btrfs_next_item(root, path);
if (ret < 0)
- goto out;
+ return ret;
if (ret)
break;
@@ -1507,10 +1649,12 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
ASSERT(key.objectid < end && key.objectid + key.offset <= end);
- caching_ctl->progress = key.objectid;
-
- total_found += add_new_free_space(block_group, key.objectid,
- key.objectid + key.offset);
+ ret = btrfs_add_new_free_space(block_group, key.objectid,
+ key.objectid + key.offset,
+ &space_added);
+ if (ret)
+ return ret;
+ total_found += space_added;
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
wake_up(&caching_ctl->wait);
@@ -1518,30 +1662,24 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
extent_count++;
}
- if (extent_count != expected_extent_count) {
+ if (unlikely(extent_count != expected_extent_count)) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
block_group->start, extent_count,
expected_extent_count);
- ASSERT(0);
- ret = -EIO;
- goto out;
+ DEBUG_WARN();
+ return -EIO;
}
- caching_ctl->progress = (u64)-1;
-
- ret = 0;
-out:
- return ret;
+ return 0;
}
-int load_free_space_tree(struct btrfs_caching_control *caching_ctl)
+int btrfs_load_free_space_tree(struct btrfs_caching_control *caching_ctl)
{
struct btrfs_block_group *block_group;
struct btrfs_free_space_info *info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
u32 extent_count, flags;
- int ret;
block_group = caching_ctl->block_group;
@@ -1553,15 +1691,14 @@ int load_free_space_tree(struct btrfs_caching_control *caching_ctl)
* Just like caching_thread() doesn't want to deadlock on the extent
* tree, we don't want to deadlock on the free space tree.
*/
- path->skip_locking = 1;
- path->search_commit_root = 1;
+ path->skip_locking = true;
+ path->search_commit_root = true;
path->reada = READA_FORWARD;
- info = search_free_space_info(NULL, block_group, path, 0);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto out;
- }
+ info = btrfs_search_free_space_info(NULL, block_group, path, 0);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
extent_count = btrfs_free_space_extent_count(path->nodes[0], info);
flags = btrfs_free_space_flags(path->nodes[0], info);
@@ -1571,11 +1708,7 @@ int load_free_space_tree(struct btrfs_caching_control *caching_ctl)
* there.
*/
if (flags & BTRFS_FREE_SPACE_USING_BITMAPS)
- ret = load_free_space_bitmaps(caching_ctl, path, extent_count);
+ return load_free_space_bitmaps(caching_ctl, path, extent_count);
else
- ret = load_free_space_extents(caching_ctl, path, extent_count);
-
-out:
- btrfs_free_path(path);
- return ret;
+ return load_free_space_extents(caching_ctl, path, extent_count);
}
diff --git a/fs/btrfs/free-space-tree.h b/fs/btrfs/free-space-tree.h
index dc2463e4cfe3..3d9a5d4477fc 100644
--- a/fs/btrfs/free-space-tree.h
+++ b/fs/btrfs/free-space-tree.h
@@ -6,7 +6,13 @@
#ifndef BTRFS_FREE_SPACE_TREE_H
#define BTRFS_FREE_SPACE_TREE_H
+#include <linux/bits.h>
+
struct btrfs_caching_control;
+struct btrfs_fs_info;
+struct btrfs_path;
+struct btrfs_block_group;
+struct btrfs_trans_handle;
/*
* The default size for new free space bitmap items. The last bitmap in a block
@@ -16,38 +22,39 @@ struct btrfs_caching_control;
#define BTRFS_FREE_SPACE_BITMAP_SIZE 256
#define BTRFS_FREE_SPACE_BITMAP_BITS (BTRFS_FREE_SPACE_BITMAP_SIZE * BITS_PER_BYTE)
-void set_free_space_tree_thresholds(struct btrfs_block_group *block_group);
+void btrfs_set_free_space_tree_thresholds(struct btrfs_block_group *block_group);
int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info);
-int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info);
-int load_free_space_tree(struct btrfs_caching_control *caching_ctl);
-int add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group);
-int remove_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group);
-int add_to_free_space_tree(struct btrfs_trans_handle *trans,
- u64 start, u64 size);
-int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
- u64 start, u64 size);
+int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info);
+int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info);
+int btrfs_load_free_space_tree(struct btrfs_caching_control *caching_ctl);
+int btrfs_add_block_group_free_space(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group);
+int btrfs_remove_block_group_free_space(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group);
+int btrfs_add_to_free_space_tree(struct btrfs_trans_handle *trans,
+ u64 start, u64 size);
+int btrfs_remove_from_free_space_tree(struct btrfs_trans_handle *trans,
+ u64 start, u64 size);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct btrfs_free_space_info *
-search_free_space_info(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path, int cow);
-int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
+btrfs_search_free_space_info(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 start, u64 size);
-int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 start, u64 size);
-int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path);
-int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *block_group,
- struct btrfs_path *path);
-int free_space_test_bit(struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 offset);
+ struct btrfs_path *path, int cow);
+int __btrfs_add_to_free_space_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 start, u64 size);
+int __btrfs_remove_from_free_space_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 start, u64 size);
+int btrfs_convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path);
+int btrfs_convert_free_space_to_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
+ struct btrfs_path *path);
+bool btrfs_free_space_test_bit(struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 offset);
#endif
#endif
diff --git a/fs/btrfs/fs.c b/fs/btrfs/fs.c
new file mode 100644
index 000000000000..feb0a2faa837
--- /dev/null
+++ b/fs/btrfs/fs.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "messages.h"
+#include "fs.h"
+#include "accessors.h"
+#include "volumes.h"
+
+static const struct btrfs_csums {
+ u16 size;
+ const char name[10];
+ const char driver[12];
+} btrfs_csums[] = {
+ [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
+ [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
+ [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
+ [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
+ .driver = "blake2b-256" },
+};
+
+/* This exists for btrfs-progs usages. */
+u16 btrfs_csum_type_size(u16 type)
+{
+ return btrfs_csums[type].size;
+}
+
+int btrfs_super_csum_size(const struct btrfs_super_block *s)
+{
+ u16 t = btrfs_super_csum_type(s);
+
+ /* csum type is validated at mount time. */
+ return btrfs_csum_type_size(t);
+}
+
+const char *btrfs_super_csum_name(u16 csum_type)
+{
+ /* csum type is validated at mount time. */
+ return btrfs_csums[csum_type].name;
+}
+
+/*
+ * Return driver name if defined, otherwise the name that's also a valid driver
+ * name.
+ */
+const char *btrfs_super_csum_driver(u16 csum_type)
+{
+ /* csum type is validated at mount time */
+ return btrfs_csums[csum_type].driver[0] ?
+ btrfs_csums[csum_type].driver :
+ btrfs_csums[csum_type].name;
+}
+
+size_t __attribute_const__ btrfs_get_num_csums(void)
+{
+ return ARRAY_SIZE(btrfs_csums);
+}
+
+/*
+ * We support the following block sizes for all systems:
+ *
+ * - 4K
+ * This is the most common block size. For PAGE SIZE > 4K cases the subpage
+ * mode is used.
+ *
+ * - PAGE_SIZE
+ * The straightforward block size to support.
+ *
+ * And extra support for the following block sizes based on the kernel config:
+ *
+ * - MIN_BLOCKSIZE
+ * This is either 4K (regular builds) or 2K (debug builds)
+ * This allows testing subpage routines on x86_64.
+ */
+bool __attribute_const__ btrfs_supported_blocksize(u32 blocksize)
+{
+ /* @blocksize should be validated first. */
+ ASSERT(is_power_of_2(blocksize) && blocksize >= BTRFS_MIN_BLOCKSIZE &&
+ blocksize <= BTRFS_MAX_BLOCKSIZE);
+
+ if (blocksize == PAGE_SIZE || blocksize == SZ_4K || blocksize == BTRFS_MIN_BLOCKSIZE)
+ return true;
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /*
+ * For bs > ps support it's done by specifying a minimal folio order
+ * for filemap, thus implying large data folios.
+ * For HIGHMEM systems, we can not always access the content of a (large)
+ * folio in one go, but go through them page by page.
+ *
+ * A lot of features don't implement a proper PAGE sized loop for large
+ * folios, this includes:
+ *
+ * - compression
+ * - verity
+ * - encoded write
+ *
+ * Considering HIGHMEM is such a pain to deal with and it's going
+ * to be deprecated eventually, just reject HIGHMEM && bs > ps cases.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM) && blocksize > PAGE_SIZE)
+ return false;
+ return true;
+#endif
+ return false;
+}
+
+/*
+ * Start exclusive operation @type, return true on success.
+ */
+bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation type)
+{
+ bool ret = false;
+
+ spin_lock(&fs_info->super_lock);
+ if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
+ fs_info->exclusive_operation = type;
+ ret = true;
+ }
+ spin_unlock(&fs_info->super_lock);
+
+ return ret;
+}
+
+/*
+ * Conditionally allow to enter the exclusive operation in case it's compatible
+ * with the running one. This must be paired with btrfs_exclop_start_unlock()
+ * and btrfs_exclop_finish().
+ *
+ * Compatibility:
+ * - the same type is already running
+ * - when trying to add a device and balance has been paused
+ * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
+ * must check the condition first that would allow none -> @type
+ */
+bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation type)
+{
+ spin_lock(&fs_info->super_lock);
+ if (fs_info->exclusive_operation == type ||
+ (fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED &&
+ type == BTRFS_EXCLOP_DEV_ADD))
+ return true;
+
+ spin_unlock(&fs_info->super_lock);
+ return false;
+}
+
+void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
+{
+ spin_unlock(&fs_info->super_lock);
+}
+
+void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
+{
+ spin_lock(&fs_info->super_lock);
+ WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
+ spin_unlock(&fs_info->super_lock);
+ sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
+}
+
+void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation op)
+{
+ switch (op) {
+ case BTRFS_EXCLOP_BALANCE_PAUSED:
+ spin_lock(&fs_info->super_lock);
+ ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE ||
+ fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD ||
+ fs_info->exclusive_operation == BTRFS_EXCLOP_NONE ||
+ fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
+ fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED;
+ spin_unlock(&fs_info->super_lock);
+ break;
+ case BTRFS_EXCLOP_BALANCE:
+ spin_lock(&fs_info->super_lock);
+ ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
+ fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
+ spin_unlock(&fs_info->super_lock);
+ break;
+ default:
+ btrfs_warn(fs_info,
+ "invalid exclop balance operation %d requested", op);
+ }
+}
+
+void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
+ const char *name)
+{
+ struct btrfs_super_block *disk_super;
+ u64 features;
+
+ disk_super = fs_info->super_copy;
+ features = btrfs_super_incompat_flags(disk_super);
+ if (!(features & flag)) {
+ spin_lock(&fs_info->super_lock);
+ features = btrfs_super_incompat_flags(disk_super);
+ if (!(features & flag)) {
+ features |= flag;
+ btrfs_set_super_incompat_flags(disk_super, features);
+ btrfs_info(fs_info,
+ "setting incompat feature flag for %s (0x%llx)",
+ name, flag);
+ }
+ spin_unlock(&fs_info->super_lock);
+ set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
+ }
+}
+
+void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
+ const char *name)
+{
+ struct btrfs_super_block *disk_super;
+ u64 features;
+
+ disk_super = fs_info->super_copy;
+ features = btrfs_super_incompat_flags(disk_super);
+ if (features & flag) {
+ spin_lock(&fs_info->super_lock);
+ features = btrfs_super_incompat_flags(disk_super);
+ if (features & flag) {
+ features &= ~flag;
+ btrfs_set_super_incompat_flags(disk_super, features);
+ btrfs_info(fs_info,
+ "clearing incompat feature flag for %s (0x%llx)",
+ name, flag);
+ }
+ spin_unlock(&fs_info->super_lock);
+ set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
+ }
+}
+
+void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
+ const char *name)
+{
+ struct btrfs_super_block *disk_super;
+ u64 features;
+
+ disk_super = fs_info->super_copy;
+ features = btrfs_super_compat_ro_flags(disk_super);
+ if (!(features & flag)) {
+ spin_lock(&fs_info->super_lock);
+ features = btrfs_super_compat_ro_flags(disk_super);
+ if (!(features & flag)) {
+ features |= flag;
+ btrfs_set_super_compat_ro_flags(disk_super, features);
+ btrfs_info(fs_info,
+ "setting compat-ro feature flag for %s (0x%llx)",
+ name, flag);
+ }
+ spin_unlock(&fs_info->super_lock);
+ set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
+ }
+}
+
+void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
+ const char *name)
+{
+ struct btrfs_super_block *disk_super;
+ u64 features;
+
+ disk_super = fs_info->super_copy;
+ features = btrfs_super_compat_ro_flags(disk_super);
+ if (features & flag) {
+ spin_lock(&fs_info->super_lock);
+ features = btrfs_super_compat_ro_flags(disk_super);
+ if (features & flag) {
+ features &= ~flag;
+ btrfs_set_super_compat_ro_flags(disk_super, features);
+ btrfs_info(fs_info,
+ "clearing compat-ro feature flag for %s (0x%llx)",
+ name, flag);
+ }
+ spin_unlock(&fs_info->super_lock);
+ set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
+ }
+}
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
new file mode 100644
index 000000000000..0f7e1ef27891
--- /dev/null
+++ b/fs/btrfs/fs.h
@@ -0,0 +1,1186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_FS_H
+#define BTRFS_FS_H
+
+#include <linux/blkdev.h>
+#include <linux/sizes.h>
+#include <linux/time64.h>
+#include <linux/compiler.h>
+#include <linux/math.h>
+#include <linux/atomic.h>
+#include <linux/percpu_counter.h>
+#include <linux/completion.h>
+#include <linux/lockdep.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/semaphore.h>
+#include <linux/list.h>
+#include <linux/pagemap.h>
+#include <linux/radix-tree.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/wait_bit.h>
+#include <linux/sched.h>
+#include <linux/rbtree.h>
+#include <uapi/linux/btrfs.h>
+#include <uapi/linux/btrfs_tree.h>
+#include "extent-io-tree.h"
+#include "async-thread.h"
+#include "block-rsv.h"
+#include "messages.h"
+
+struct inode;
+struct super_block;
+struct kobject;
+struct reloc_control;
+struct crypto_shash;
+struct ulist;
+struct btrfs_device;
+struct btrfs_block_group;
+struct btrfs_root;
+struct btrfs_fs_devices;
+struct btrfs_transaction;
+struct btrfs_delayed_root;
+struct btrfs_balance_control;
+struct btrfs_subpage_info;
+struct btrfs_stripe_hash_table;
+struct btrfs_space_info;
+
+/*
+ * Minimum data and metadata block size.
+ *
+ * Normally it's 4K, but for testing subpage block size on 4K page systems, we
+ * allow DEBUG builds to accept 2K page size.
+ */
+#ifdef CONFIG_BTRFS_DEBUG
+#define BTRFS_MIN_BLOCKSIZE (SZ_2K)
+#else
+#define BTRFS_MIN_BLOCKSIZE (SZ_4K)
+#endif
+
+#define BTRFS_MAX_BLOCKSIZE (SZ_64K)
+
+#define BTRFS_MAX_EXTENT_SIZE SZ_128M
+
+#define BTRFS_OLDEST_GENERATION 0ULL
+
+#define BTRFS_EMPTY_DIR_SIZE 0
+
+#define BTRFS_DIRTY_METADATA_THRESH SZ_32M
+
+#define BTRFS_SUPER_INFO_OFFSET SZ_64K
+#define BTRFS_SUPER_INFO_SIZE 4096
+static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
+
+/* Array of bytes with variable length, hexadecimal format 0x1234 */
+#define BTRFS_CSUM_FMT "0x%*phN"
+#define BTRFS_CSUM_FMT_VALUE(size, bytes) size, bytes
+
+#define BTRFS_KEY_FMT "(%llu %u %llu)"
+#define BTRFS_KEY_FMT_VALUE(key) (key)->objectid, (key)->type, (key)->offset
+
+/*
+ * Number of metadata items necessary for an unlink operation:
+ *
+ * 1 for the possible orphan item
+ * 1 for the dir item
+ * 1 for the dir index
+ * 1 for the inode ref
+ * 1 for the inode
+ * 1 for the parent inode
+ */
+#define BTRFS_UNLINK_METADATA_UNITS 6
+
+/*
+ * The reserved space at the beginning of each device. It covers the primary
+ * super block and leaves space for potential use by other tools like
+ * bootloaders or to lower potential damage of accidental overwrite.
+ */
+#define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M)
+/*
+ * Runtime (in-memory) states of filesystem
+ */
+enum {
+ /*
+ * Filesystem is being remounted, allow to skip some operations, like
+ * defrag
+ */
+ BTRFS_FS_STATE_REMOUNTING,
+ /* Filesystem in RO mode */
+ BTRFS_FS_STATE_RO,
+ /* Track if a transaction abort has been reported on this filesystem */
+ BTRFS_FS_STATE_TRANS_ABORTED,
+ /* Track if log replay has failed. */
+ BTRFS_FS_STATE_LOG_REPLAY_ABORTED,
+ /*
+ * Bio operations should be blocked on this filesystem because a source
+ * or target device is being destroyed as part of a device replace
+ */
+ BTRFS_FS_STATE_DEV_REPLACING,
+ /* The btrfs_fs_info created for self-tests */
+ BTRFS_FS_STATE_DUMMY_FS_INFO,
+
+ /* Checksum errors are ignored. */
+ BTRFS_FS_STATE_NO_DATA_CSUMS,
+ BTRFS_FS_STATE_SKIP_META_CSUMS,
+
+ /* Indicates there was an error cleaning up a log tree. */
+ BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
+
+ /* No more delayed iput can be queued. */
+ BTRFS_FS_STATE_NO_DELAYED_IPUT,
+
+ /*
+ * Emergency shutdown, a step further than transaction aborted by
+ * rejecting all operations.
+ */
+ BTRFS_FS_STATE_EMERGENCY_SHUTDOWN,
+
+ BTRFS_FS_STATE_COUNT
+};
+
+enum {
+ BTRFS_FS_CLOSING_START,
+ BTRFS_FS_CLOSING_DONE,
+ BTRFS_FS_LOG_RECOVERING,
+ BTRFS_FS_OPEN,
+ BTRFS_FS_QUOTA_ENABLED,
+ BTRFS_FS_UPDATE_UUID_TREE_GEN,
+ BTRFS_FS_CREATING_FREE_SPACE_TREE,
+ BTRFS_FS_BTREE_ERR,
+ BTRFS_FS_LOG1_ERR,
+ BTRFS_FS_LOG2_ERR,
+ BTRFS_FS_QUOTA_OVERRIDE,
+ /* Used to record internally whether fs has been frozen */
+ BTRFS_FS_FROZEN,
+ /*
+ * Indicate that balance has been set up from the ioctl and is in the
+ * main phase. The fs_info::balance_ctl is initialized.
+ */
+ BTRFS_FS_BALANCE_RUNNING,
+
+ /*
+ * Indicate that relocation of a chunk has started, it's set per chunk
+ * and is toggled between chunks.
+ */
+ BTRFS_FS_RELOC_RUNNING,
+
+ /* Indicate that the cleaner thread is awake and doing something. */
+ BTRFS_FS_CLEANER_RUNNING,
+
+ /*
+ * The checksumming has an optimized version and is considered fast,
+ * so we don't need to offload checksums to workqueues.
+ */
+ BTRFS_FS_CSUM_IMPL_FAST,
+
+ /* Indicate that the discard workqueue can service discards. */
+ BTRFS_FS_DISCARD_RUNNING,
+
+ /* Indicate that we need to cleanup space cache v1 */
+ BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
+
+ /* Indicate that we can't trust the free space tree for caching yet */
+ BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
+
+ /* Indicate whether there are any tree modification log users */
+ BTRFS_FS_TREE_MOD_LOG_USERS,
+
+ /* Indicate that we want the transaction kthread to commit right now. */
+ BTRFS_FS_COMMIT_TRANS,
+
+ /* Indicate we have half completed snapshot deletions pending. */
+ BTRFS_FS_UNFINISHED_DROPS,
+
+ /* Indicate we have to finish a zone to do next allocation. */
+ BTRFS_FS_NEED_ZONE_FINISH,
+
+ /* Indicate that we want to commit the transaction. */
+ BTRFS_FS_NEED_TRANS_COMMIT,
+
+ /* This is set when active zone tracking is needed. */
+ BTRFS_FS_ACTIVE_ZONE_TRACKING,
+
+ /*
+ * Indicate if we have some features changed, this is mostly for
+ * cleaner thread to update the sysfs interface.
+ */
+ BTRFS_FS_FEATURE_CHANGED,
+
+ /*
+ * Indicate that we have found a tree block which is only aligned to
+ * sectorsize, but not to nodesize. This should be rare nowadays.
+ */
+ BTRFS_FS_UNALIGNED_TREE_BLOCK,
+
+#if BITS_PER_LONG == 32
+ /* Indicate if we have error/warn message printed on 32bit systems */
+ BTRFS_FS_32BIT_ERROR,
+ BTRFS_FS_32BIT_WARN,
+#endif
+};
+
+/*
+ * Flags for mount options.
+ *
+ * Note: don't forget to add new options to btrfs_show_options()
+ */
+enum {
+ BTRFS_MOUNT_NODATASUM = (1ULL << 0),
+ BTRFS_MOUNT_NODATACOW = (1ULL << 1),
+ BTRFS_MOUNT_NOBARRIER = (1ULL << 2),
+ BTRFS_MOUNT_SSD = (1ULL << 3),
+ BTRFS_MOUNT_DEGRADED = (1ULL << 4),
+ BTRFS_MOUNT_COMPRESS = (1ULL << 5),
+ BTRFS_MOUNT_NOTREELOG = (1ULL << 6),
+ BTRFS_MOUNT_FLUSHONCOMMIT = (1ULL << 7),
+ BTRFS_MOUNT_SSD_SPREAD = (1ULL << 8),
+ BTRFS_MOUNT_NOSSD = (1ULL << 9),
+ BTRFS_MOUNT_DISCARD_SYNC = (1ULL << 10),
+ BTRFS_MOUNT_FORCE_COMPRESS = (1ULL << 11),
+ BTRFS_MOUNT_SPACE_CACHE = (1ULL << 12),
+ BTRFS_MOUNT_CLEAR_CACHE = (1ULL << 13),
+ BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = (1ULL << 14),
+ BTRFS_MOUNT_ENOSPC_DEBUG = (1ULL << 15),
+ BTRFS_MOUNT_AUTO_DEFRAG = (1ULL << 16),
+ BTRFS_MOUNT_USEBACKUPROOT = (1ULL << 17),
+ BTRFS_MOUNT_SKIP_BALANCE = (1ULL << 18),
+ BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1ULL << 19),
+ BTRFS_MOUNT_RESCAN_UUID_TREE = (1ULL << 20),
+ BTRFS_MOUNT_FRAGMENT_DATA = (1ULL << 21),
+ BTRFS_MOUNT_FRAGMENT_METADATA = (1ULL << 22),
+ BTRFS_MOUNT_FREE_SPACE_TREE = (1ULL << 23),
+ BTRFS_MOUNT_NOLOGREPLAY = (1ULL << 24),
+ BTRFS_MOUNT_REF_VERIFY = (1ULL << 25),
+ BTRFS_MOUNT_DISCARD_ASYNC = (1ULL << 26),
+ BTRFS_MOUNT_IGNOREBADROOTS = (1ULL << 27),
+ BTRFS_MOUNT_IGNOREDATACSUMS = (1ULL << 28),
+ BTRFS_MOUNT_NODISCARD = (1ULL << 29),
+ BTRFS_MOUNT_NOSPACECACHE = (1ULL << 30),
+ BTRFS_MOUNT_IGNOREMETACSUMS = (1ULL << 31),
+ BTRFS_MOUNT_IGNORESUPERFLAGS = (1ULL << 32),
+ BTRFS_MOUNT_REF_TRACKER = (1ULL << 33),
+};
+
+/*
+ * Compat flags that we support. If any incompat flags are set other than the
+ * ones specified below then we will fail to mount
+ */
+#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
+#define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL
+#define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
+
+#define BTRFS_FEATURE_COMPAT_RO_SUPP \
+ (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \
+ BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \
+ BTRFS_FEATURE_COMPAT_RO_VERITY | \
+ BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE)
+
+#define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
+#define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
+
+#define BTRFS_FEATURE_INCOMPAT_SUPP_STABLE \
+ (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
+ BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
+ BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
+ BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
+ BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
+ BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
+ BTRFS_FEATURE_INCOMPAT_RAID56 | \
+ BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
+ BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
+ BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
+ BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
+ BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
+ BTRFS_FEATURE_INCOMPAT_ZONED | \
+ BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA)
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /*
+ * Features under development like Extent tree v2 support is enabled
+ * only under CONFIG_BTRFS_EXPERIMENTAL
+ */
+#define BTRFS_FEATURE_INCOMPAT_SUPP \
+ (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE | \
+ BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE | \
+ BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2)
+
+#else
+
+#define BTRFS_FEATURE_INCOMPAT_SUPP \
+ (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE)
+
+#endif
+
+#define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
+ (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
+#define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL
+
+#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
+#define BTRFS_WARNING_COMMIT_INTERVAL (300)
+#define BTRFS_DEFAULT_MAX_INLINE (2048)
+
+enum btrfs_compression_type {
+ BTRFS_COMPRESS_NONE = 0,
+ BTRFS_COMPRESS_ZLIB = 1,
+ BTRFS_COMPRESS_LZO = 2,
+ BTRFS_COMPRESS_ZSTD = 3,
+ BTRFS_NR_COMPRESS_TYPES = 4,
+
+ BTRFS_DEFRAG_DONT_COMPRESS,
+};
+
+struct btrfs_dev_replace {
+ /* See #define above */
+ u64 replace_state;
+ /* Seconds since 1-Jan-1970 */
+ time64_t time_started;
+ /* Seconds since 1-Jan-1970 */
+ time64_t time_stopped;
+ atomic64_t num_write_errors;
+ atomic64_t num_uncorrectable_read_errors;
+
+ u64 cursor_left;
+ u64 committed_cursor_left;
+ u64 cursor_left_last_write_of_item;
+ u64 cursor_right;
+
+ /* See #define above */
+ u64 cont_reading_from_srcdev_mode;
+
+ int is_valid;
+ int item_needs_writeback;
+ struct btrfs_device *srcdev;
+ struct btrfs_device *tgtdev;
+
+ struct mutex lock_finishing_cancel_unmount;
+ struct rw_semaphore rwsem;
+
+ struct btrfs_scrub_progress scrub_progress;
+
+ struct percpu_counter bio_counter;
+ wait_queue_head_t replace_wait;
+
+ struct task_struct *replace_task;
+};
+
+/*
+ * Free clusters are used to claim free space in relatively large chunks,
+ * allowing us to do less seeky writes. They are used for all metadata
+ * allocations. In ssd_spread mode they are also used for data allocations.
+ */
+struct btrfs_free_cluster {
+ spinlock_t lock;
+ spinlock_t refill_lock;
+ struct rb_root root;
+
+ /* Largest extent in this cluster */
+ u64 max_size;
+
+ /* First extent starting offset */
+ u64 window_start;
+
+ /* We did a full search and couldn't create a cluster */
+ bool fragmented;
+
+ struct btrfs_block_group *block_group;
+ /*
+ * When a cluster is allocated from a block group, we put the cluster
+ * onto a list in the block group so that it can be freed before the
+ * block group is freed.
+ */
+ struct list_head block_group_list;
+};
+
+/* Discard control. */
+/*
+ * Async discard uses multiple lists to differentiate the discard filter
+ * parameters. Index 0 is for completely free block groups where we need to
+ * ensure the entire block group is trimmed without being lossy. Indices
+ * afterwards represent monotonically decreasing discard filter sizes to
+ * prioritize what should be discarded next.
+ */
+#define BTRFS_NR_DISCARD_LISTS 3
+#define BTRFS_DISCARD_INDEX_UNUSED 0
+#define BTRFS_DISCARD_INDEX_START 1
+
+struct btrfs_discard_ctl {
+ struct workqueue_struct *discard_workers;
+ struct delayed_work work;
+ spinlock_t lock;
+ struct btrfs_block_group *block_group;
+ struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
+ u64 prev_discard;
+ u64 prev_discard_time;
+ atomic_t discardable_extents;
+ atomic64_t discardable_bytes;
+ u64 max_discard_size;
+ u64 delay_ms;
+ u32 iops_limit;
+ u32 kbps_limit;
+ u64 discard_extent_bytes;
+ u64 discard_bitmap_bytes;
+ atomic64_t discard_bytes_saved;
+};
+
+/*
+ * Exclusive operations (device replace, resize, device add/remove, balance)
+ */
+enum btrfs_exclusive_operation {
+ BTRFS_EXCLOP_NONE,
+ BTRFS_EXCLOP_BALANCE_PAUSED,
+ BTRFS_EXCLOP_BALANCE,
+ BTRFS_EXCLOP_DEV_ADD,
+ BTRFS_EXCLOP_DEV_REMOVE,
+ BTRFS_EXCLOP_DEV_REPLACE,
+ BTRFS_EXCLOP_RESIZE,
+ BTRFS_EXCLOP_SWAP_ACTIVATE,
+};
+
+/* Store data about transaction commits, exported via sysfs. */
+struct btrfs_commit_stats {
+ /* Total number of commits */
+ u64 commit_count;
+ /* The maximum commit duration so far in ns */
+ u64 max_commit_dur;
+ /* The last commit duration in ns */
+ u64 last_commit_dur;
+ /* The total commit duration in ns */
+ u64 total_commit_dur;
+ /* Start of the last critical section in ns. */
+ u64 critical_section_start_time;
+};
+
+struct btrfs_fs_info {
+ u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
+ unsigned long flags;
+ struct btrfs_root *tree_root;
+ struct btrfs_root *chunk_root;
+ struct btrfs_root *dev_root;
+ struct btrfs_root *fs_root;
+ struct btrfs_root *quota_root;
+ struct btrfs_root *uuid_root;
+ struct btrfs_root *data_reloc_root;
+ struct btrfs_root *block_group_root;
+ struct btrfs_root *stripe_root;
+
+ /* The log root tree is a directory of all the other log roots */
+ struct btrfs_root *log_root_tree;
+
+ /* The tree that holds the global roots (csum, extent, etc) */
+ rwlock_t global_root_lock;
+ struct rb_root global_root_tree;
+
+ spinlock_t fs_roots_radix_lock;
+ struct radix_tree_root fs_roots_radix;
+
+ /* Block group cache stuff */
+ rwlock_t block_group_cache_lock;
+ struct rb_root_cached block_group_cache_tree;
+
+ /* Keep track of unallocated space */
+ atomic64_t free_chunk_space;
+
+ /* Track ranges which are used by log trees blocks/logged data extents */
+ struct extent_io_tree excluded_extents;
+
+ /* logical->physical extent mapping */
+ struct rb_root_cached mapping_tree;
+ rwlock_t mapping_tree_lock;
+
+ /*
+ * Block reservation for extent, checksum, root tree and delayed dir
+ * index item.
+ */
+ struct btrfs_block_rsv global_block_rsv;
+ /* Block reservation for metadata operations */
+ struct btrfs_block_rsv trans_block_rsv;
+ /* Block reservation for chunk tree */
+ struct btrfs_block_rsv chunk_block_rsv;
+ /* Block reservation for delayed operations */
+ struct btrfs_block_rsv delayed_block_rsv;
+ /* Block reservation for delayed refs */
+ struct btrfs_block_rsv delayed_refs_rsv;
+ /* Block reservation for treelog tree */
+ struct btrfs_block_rsv treelog_rsv;
+
+ struct btrfs_block_rsv empty_block_rsv;
+
+ /*
+ * Updated while holding the lock 'trans_lock'. Due to the life cycle of
+ * a transaction, it can be directly read while holding a transaction
+ * handle, everywhere else must be read with btrfs_get_fs_generation().
+ * Should always be updated using btrfs_set_fs_generation().
+ */
+ u64 generation;
+ /*
+ * Always use btrfs_get_last_trans_committed() and
+ * btrfs_set_last_trans_committed() to read and update this field.
+ */
+ u64 last_trans_committed;
+ /*
+ * Generation of the last transaction used for block group relocation
+ * since the filesystem was last mounted (or 0 if none happened yet).
+ * Must be written and read while holding btrfs_fs_info::commit_root_sem.
+ */
+ u64 last_reloc_trans;
+
+ /*
+ * This is updated to the current trans every time a full commit is
+ * required instead of the faster short fsync log commits
+ */
+ u64 last_trans_log_full_commit;
+ unsigned long long mount_opt;
+
+ /* Compress related structures. */
+ void *compr_wsm[BTRFS_NR_COMPRESS_TYPES];
+
+ int compress_type;
+ int compress_level;
+ u32 commit_interval;
+ /*
+ * It is a suggestive number, the read side is safe even it gets a
+ * wrong number because we will write out the data into a regular
+ * extent. The write side(mount/remount) is under ->s_umount lock,
+ * so it is also safe.
+ */
+ u64 max_inline;
+
+ struct btrfs_transaction *running_transaction;
+ wait_queue_head_t transaction_throttle;
+ wait_queue_head_t transaction_wait;
+ wait_queue_head_t transaction_blocked_wait;
+ wait_queue_head_t async_submit_wait;
+
+ /*
+ * Used to protect the incompat_flags, compat_flags, compat_ro_flags
+ * when they are updated.
+ *
+ * Because we do not clear the flags for ever, so we needn't use
+ * the lock on the read side.
+ *
+ * We also needn't use the lock when we mount the fs, because
+ * there is no other task which will update the flag.
+ */
+ spinlock_t super_lock;
+ struct btrfs_super_block *super_copy;
+ struct btrfs_super_block *super_for_commit;
+ struct super_block *sb;
+ struct inode *btree_inode;
+ struct mutex tree_log_mutex;
+ struct mutex transaction_kthread_mutex;
+ struct mutex cleaner_mutex;
+ struct mutex chunk_mutex;
+
+ /*
+ * This is taken to make sure we don't set block groups ro after the
+ * free space cache has been allocated on them.
+ */
+ struct mutex ro_block_group_mutex;
+
+ /*
+ * This is used during read/modify/write to make sure no two ios are
+ * trying to mod the same stripe at the same time.
+ */
+ struct btrfs_stripe_hash_table *stripe_hash_table;
+
+ /*
+ * This protects the ordered operations list only while we are
+ * processing all of the entries on it. This way we make sure the
+ * commit code doesn't find the list temporarily empty because another
+ * function happens to be doing non-waiting preflush before jumping
+ * into the main commit.
+ */
+ struct mutex ordered_operations_mutex;
+
+ struct rw_semaphore commit_root_sem;
+
+ struct rw_semaphore cleanup_work_sem;
+
+ struct rw_semaphore subvol_sem;
+
+ spinlock_t trans_lock;
+ /*
+ * The reloc mutex goes with the trans lock, it is taken during commit
+ * to protect us from the relocation code.
+ */
+ struct mutex reloc_mutex;
+
+ struct list_head trans_list;
+ struct list_head dead_roots;
+ struct list_head caching_block_groups;
+
+ spinlock_t delayed_iput_lock;
+ struct list_head delayed_iputs;
+ atomic_t nr_delayed_iputs;
+ wait_queue_head_t delayed_iputs_wait;
+
+ atomic64_t tree_mod_seq;
+
+ /* This protects tree_mod_log and tree_mod_seq_list */
+ rwlock_t tree_mod_log_lock;
+ struct rb_root tree_mod_log;
+ struct list_head tree_mod_seq_list;
+
+ atomic_t async_delalloc_pages;
+
+ /* This is used to protect the following list -- ordered_roots. */
+ spinlock_t ordered_root_lock;
+
+ /*
+ * All fs/file tree roots in which there are data=ordered extents
+ * pending writeback are added into this list.
+ *
+ * These can span multiple transactions and basically include every
+ * dirty data page that isn't from nodatacow.
+ */
+ struct list_head ordered_roots;
+
+ struct mutex delalloc_root_mutex;
+ spinlock_t delalloc_root_lock;
+ /* All fs/file tree roots that have delalloc inodes. */
+ struct list_head delalloc_roots;
+
+ /*
+ * There is a pool of worker threads for checksumming during writes and
+ * a pool for checksumming after reads. This is because readers can
+ * run with FS locks held, and the writers may be waiting for those
+ * locks. We don't want ordering in the pending list to cause
+ * deadlocks, and so the two are serviced separately.
+ *
+ * A third pool does submit_bio to avoid deadlocking with the other two.
+ */
+ struct btrfs_workqueue *workers;
+ struct btrfs_workqueue *delalloc_workers;
+ struct btrfs_workqueue *flush_workers;
+ struct workqueue_struct *endio_workers;
+ struct workqueue_struct *endio_meta_workers;
+ struct workqueue_struct *rmw_workers;
+ struct btrfs_workqueue *endio_write_workers;
+ struct btrfs_workqueue *endio_freespace_worker;
+ struct btrfs_workqueue *caching_workers;
+
+ /*
+ * Fixup workers take dirty pages that didn't properly go through the
+ * cow mechanism and make them safe to write. It happens for the
+ * sys_munmap function call path.
+ */
+ struct btrfs_workqueue *fixup_workers;
+ struct btrfs_workqueue *delayed_workers;
+
+ struct task_struct *transaction_kthread;
+ struct task_struct *cleaner_kthread;
+ u32 thread_pool_size;
+
+ struct kobject *space_info_kobj;
+ struct kobject *qgroups_kobj;
+ struct kobject *discard_kobj;
+
+ /* Track the number of blocks (sectors) read by the filesystem. */
+ struct percpu_counter stats_read_blocks;
+
+ /* Used to keep from writing metadata until there is a nice batch */
+ struct percpu_counter dirty_metadata_bytes;
+ struct percpu_counter delalloc_bytes;
+ struct percpu_counter ordered_bytes;
+ s32 dirty_metadata_batch;
+ s32 delalloc_batch;
+
+ struct percpu_counter evictable_extent_maps;
+ u64 em_shrinker_last_root;
+ u64 em_shrinker_last_ino;
+ atomic64_t em_shrinker_nr_to_scan;
+ struct work_struct em_shrinker_work;
+
+ /* Protected by 'trans_lock'. */
+ struct list_head dirty_cowonly_roots;
+
+ struct btrfs_fs_devices *fs_devices;
+
+ /*
+ * The space_info list is effectively read only after initial setup.
+ * It is populated at mount time and cleaned up after all block groups
+ * are removed. RCU is used to protect it.
+ */
+ struct list_head space_info;
+
+ struct btrfs_space_info *data_sinfo;
+
+ struct reloc_control *reloc_ctl;
+
+ /* data_alloc_cluster is only used in ssd_spread mode */
+ struct btrfs_free_cluster data_alloc_cluster;
+
+ /* All metadata allocations go through this cluster. */
+ struct btrfs_free_cluster meta_alloc_cluster;
+
+ /* Auto defrag inodes go here. */
+ spinlock_t defrag_inodes_lock;
+ struct rb_root defrag_inodes;
+ atomic_t defrag_running;
+
+ /* Used to protect avail_{data, metadata, system}_alloc_bits */
+ seqlock_t profiles_lock;
+ /*
+ * These three are in extended format (availability of single chunks is
+ * denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other types are denoted
+ * by corresponding BTRFS_BLOCK_GROUP_* bits)
+ */
+ u64 avail_data_alloc_bits;
+ u64 avail_metadata_alloc_bits;
+ u64 avail_system_alloc_bits;
+
+ /* Balance state */
+ spinlock_t balance_lock;
+ struct mutex balance_mutex;
+ atomic_t balance_pause_req;
+ atomic_t balance_cancel_req;
+ struct btrfs_balance_control *balance_ctl;
+ wait_queue_head_t balance_wait_q;
+
+ /* Cancellation requests for chunk relocation */
+ atomic_t reloc_cancel_req;
+
+ u32 data_chunk_allocations;
+ u32 metadata_ratio;
+
+ /* Private scrub information */
+ struct mutex scrub_lock;
+ atomic_t scrubs_running;
+ atomic_t scrub_pause_req;
+ atomic_t scrubs_paused;
+ atomic_t scrub_cancel_req;
+ wait_queue_head_t scrub_pause_wait;
+ /*
+ * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
+ * running.
+ */
+ refcount_t scrub_workers_refcnt;
+ struct workqueue_struct *scrub_workers;
+
+ struct btrfs_discard_ctl discard_ctl;
+
+ /* Is qgroup tracking in a consistent state? */
+ u64 qgroup_flags;
+
+ /* Holds configuration and tracking. Protected by qgroup_lock. */
+ struct rb_root qgroup_tree;
+ spinlock_t qgroup_lock;
+
+ /*
+ * Protect user change for quota operations. If a transaction is needed,
+ * it must be started before locking this lock.
+ */
+ struct mutex qgroup_ioctl_lock;
+
+ /* List of dirty qgroups to be written at next commit. */
+ struct list_head dirty_qgroups;
+
+ /* Used by qgroup for an efficient tree traversal. */
+ u64 qgroup_seq;
+
+ /* Qgroup rescan items. */
+ /* Protects the progress item */
+ struct mutex qgroup_rescan_lock;
+ struct btrfs_key qgroup_rescan_progress;
+ struct btrfs_workqueue *qgroup_rescan_workers;
+ struct completion qgroup_rescan_completion;
+ struct btrfs_work qgroup_rescan_work;
+ /* Protected by qgroup_rescan_lock */
+ bool qgroup_rescan_running;
+ u8 qgroup_drop_subtree_thres;
+ u64 qgroup_enable_gen;
+
+ /*
+ * If this is not 0, then it indicates a serious filesystem error has
+ * happened and it contains that error (negative errno value).
+ */
+ int fs_error;
+
+ /* Filesystem state */
+ unsigned long fs_state;
+
+ struct btrfs_delayed_root *delayed_root;
+
+ /* Entries are eb->start >> nodesize_bits */
+ struct xarray buffer_tree;
+
+ /* Next backup root to be overwritten */
+ int backup_root_index;
+
+ /* Device replace state */
+ struct btrfs_dev_replace dev_replace;
+
+ struct semaphore uuid_tree_rescan_sem;
+
+ /* Used to reclaim the metadata space in the background. */
+ struct work_struct async_reclaim_work;
+ struct work_struct async_data_reclaim_work;
+ struct work_struct preempt_reclaim_work;
+
+ /* Reclaim partially filled block groups in the background */
+ struct work_struct reclaim_bgs_work;
+ /* Protected by unused_bgs_lock. */
+ struct list_head reclaim_bgs;
+ int bg_reclaim_threshold;
+
+ /* Protects the lists unused_bgs and reclaim_bgs. */
+ spinlock_t unused_bgs_lock;
+ /* Protected by unused_bgs_lock. */
+ struct list_head unused_bgs;
+ struct mutex unused_bg_unpin_mutex;
+ /* Protect block groups that are going to be deleted */
+ struct mutex reclaim_bgs_lock;
+
+ /* Cached block sizes */
+ u32 nodesize;
+ u32 nodesize_bits;
+ u32 sectorsize;
+ /* ilog2 of sectorsize, use to avoid 64bit division */
+ u32 sectorsize_bits;
+ u32 block_min_order;
+ u32 block_max_order;
+ u32 csum_size;
+ u32 csums_per_leaf;
+ u32 stripesize;
+
+ /*
+ * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
+ * filesystem, on zoned it depends on the device constraints.
+ */
+ u64 max_extent_size;
+
+ /* Block groups and devices containing active swapfiles. */
+ spinlock_t swapfile_pins_lock;
+ struct rb_root swapfile_pins;
+
+ struct crypto_shash *csum_shash;
+
+ /* Type of exclusive operation running, protected by super_lock */
+ enum btrfs_exclusive_operation exclusive_operation;
+
+ /*
+ * Zone size > 0 when in ZONED mode, otherwise it's used for a check
+ * if the mode is enabled
+ */
+ u64 zone_size;
+
+ /* Constraints for ZONE_APPEND commands: */
+ struct queue_limits limits;
+ u64 max_zone_append_size;
+
+ struct mutex zoned_meta_io_lock;
+ spinlock_t treelog_bg_lock;
+ u64 treelog_bg;
+
+ /*
+ * Start of the dedicated data relocation block group, protected by
+ * relocation_bg_lock.
+ */
+ spinlock_t relocation_bg_lock;
+ u64 data_reloc_bg;
+ struct mutex zoned_data_reloc_io_lock;
+
+ struct btrfs_block_group *active_meta_bg;
+ struct btrfs_block_group *active_system_bg;
+
+ u64 nr_global_roots;
+
+ spinlock_t zone_active_bgs_lock;
+ struct list_head zone_active_bgs;
+
+ /* Updates are not protected by any lock */
+ struct btrfs_commit_stats commit_stats;
+
+ /*
+ * Last generation where we dropped a non-relocation root.
+ * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen()
+ * to change it and to read it, respectively.
+ */
+ u64 last_root_drop_gen;
+
+ /*
+ * Annotations for transaction events (structures are empty when
+ * compiled without lockdep).
+ */
+ struct lockdep_map btrfs_trans_num_writers_map;
+ struct lockdep_map btrfs_trans_num_extwriters_map;
+ struct lockdep_map btrfs_state_change_map[4];
+ struct lockdep_map btrfs_trans_pending_ordered_map;
+ struct lockdep_map btrfs_ordered_extent_map;
+
+#ifdef CONFIG_BTRFS_DEBUG
+ spinlock_t ref_verify_lock;
+ struct rb_root block_tree;
+
+ struct kobject *debug_kobj;
+ struct list_head allocated_roots;
+
+ spinlock_t eb_leak_lock;
+ struct list_head allocated_ebs;
+#endif
+};
+
+#define folio_to_inode(_folio) (BTRFS_I(_Generic((_folio), \
+ struct folio *: (_folio))->mapping->host))
+
+#define folio_to_fs_info(_folio) (folio_to_inode(_folio)->root->fs_info)
+
+#define inode_to_fs_info(_inode) (BTRFS_I(_Generic((_inode), \
+ struct inode *: (_inode)))->root->fs_info)
+
+static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
+{
+ return mapping_gfp_constraint(mapping, ~__GFP_FS);
+}
+
+/* Return the minimal folio size of the fs. */
+static inline unsigned int btrfs_min_folio_size(struct btrfs_fs_info *fs_info)
+{
+ return 1U << (PAGE_SHIFT + fs_info->block_min_order);
+}
+
+static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info)
+{
+ return READ_ONCE(fs_info->generation);
+}
+
+static inline void btrfs_set_fs_generation(struct btrfs_fs_info *fs_info, u64 gen)
+{
+ WRITE_ONCE(fs_info->generation, gen);
+}
+
+static inline u64 btrfs_get_last_trans_committed(const struct btrfs_fs_info *fs_info)
+{
+ return READ_ONCE(fs_info->last_trans_committed);
+}
+
+static inline void btrfs_set_last_trans_committed(struct btrfs_fs_info *fs_info, u64 gen)
+{
+ WRITE_ONCE(fs_info->last_trans_committed, gen);
+}
+
+static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
+ u64 gen)
+{
+ WRITE_ONCE(fs_info->last_root_drop_gen, gen);
+}
+
+static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info)
+{
+ return READ_ONCE(fs_info->last_root_drop_gen);
+}
+
+/*
+ * Take the number of bytes to be checksummed and figure out how many leaves
+ * it would require to store the csums for that many bytes.
+ */
+static inline u64 btrfs_csum_bytes_to_leaves(
+ const struct btrfs_fs_info *fs_info, u64 csum_bytes)
+{
+ const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
+
+ return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
+}
+
+/*
+ * Use this if we would be adding new items, as we could split nodes as we cow
+ * down the tree.
+ */
+static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info,
+ unsigned num_items)
+{
+ return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
+}
+
+/*
+ * Doing a truncate or a modification won't result in new nodes or leaves, just
+ * what we need for COW.
+ */
+static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info,
+ unsigned num_items)
+{
+ return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
+}
+
+#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
+ sizeof(struct btrfs_item))
+
+#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) ((bytes) >> (fs_info)->sectorsize_bits)
+
+static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
+{
+ return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && fs_info->zone_size > 0;
+}
+
+/*
+ * Count how many fs_info->max_extent_size cover the @size
+ */
+static inline u32 count_max_extents(const struct btrfs_fs_info *fs_info, u64 size)
+{
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+ if (!fs_info)
+ return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
+#endif
+
+ return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
+}
+
+static inline unsigned int btrfs_blocks_per_folio(const struct btrfs_fs_info *fs_info,
+ const struct folio *folio)
+{
+ return folio_size(folio) >> fs_info->sectorsize_bits;
+}
+
+bool __attribute_const__ btrfs_supported_blocksize(u32 blocksize);
+bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation type);
+bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation type);
+void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
+void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
+void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation op);
+
+int btrfs_check_ioctl_vol_args_path(const struct btrfs_ioctl_vol_args *vol_args);
+
+u16 btrfs_csum_type_size(u16 type);
+int btrfs_super_csum_size(const struct btrfs_super_block *s);
+const char *btrfs_super_csum_name(u16 csum_type);
+const char *btrfs_super_csum_driver(u16 csum_type);
+size_t __attribute_const__ btrfs_get_num_csums(void);
+
+static inline bool btrfs_is_empty_uuid(const u8 *uuid)
+{
+ return uuid_is_null((const uuid_t *)uuid);
+}
+
+/* Compatibility and incompatibility defines */
+void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
+ const char *name);
+void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
+ const char *name);
+void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
+ const char *name);
+void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
+ const char *name);
+
+#define __btrfs_fs_incompat(fs_info, flags) \
+ (!!(btrfs_super_incompat_flags((fs_info)->super_copy) & (flags)))
+
+#define __btrfs_fs_compat_ro(fs_info, flags) \
+ (!!(btrfs_super_compat_ro_flags((fs_info)->super_copy) & (flags)))
+
+#define btrfs_set_fs_incompat(__fs_info, opt) \
+ __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
+
+#define btrfs_clear_fs_incompat(__fs_info, opt) \
+ __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
+
+#define btrfs_fs_incompat(fs_info, opt) \
+ __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
+
+#define btrfs_set_fs_compat_ro(__fs_info, opt) \
+ __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
+
+#define btrfs_clear_fs_compat_ro(__fs_info, opt) \
+ __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
+
+#define btrfs_fs_compat_ro(fs_info, opt) \
+ __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
+
+#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
+#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
+#define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
+#define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \
+ BTRFS_MOUNT_##opt)
+
+static inline int btrfs_fs_closing(const struct btrfs_fs_info *fs_info)
+{
+ /* Do it this way so we only ever do one test_bit in the normal case. */
+ if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
+ if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
+ return 2;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
+ * anything except sleeping. This function is used to check the status of
+ * the fs.
+ * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
+ * since setting and checking for SB_RDONLY in the superblock's flags is not
+ * atomic.
+ */
+static inline int btrfs_need_cleaner_sleep(const struct btrfs_fs_info *fs_info)
+{
+ return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
+ btrfs_fs_closing(fs_info);
+}
+
+static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
+{
+ clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
+}
+
+#define BTRFS_FS_ERROR(fs_info) (READ_ONCE((fs_info)->fs_error))
+
+#define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \
+ (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
+ &(fs_info)->fs_state)))
+
+static inline bool btrfs_is_shutdown(struct btrfs_fs_info *fs_info)
+{
+ return test_bit(BTRFS_FS_STATE_EMERGENCY_SHUTDOWN, &fs_info->fs_state);
+}
+
+static inline void btrfs_force_shutdown(struct btrfs_fs_info *fs_info)
+{
+ /*
+ * Here we do not want to use handle_fs_error(), which will mark the fs
+ * read-only.
+ * Some call sites like shutdown ioctl will mark the fs shutdown when
+ * the fs is frozen. But thaw path will handle RO and RW fs
+ * differently.
+ *
+ * So here we only mark the fs error without flipping it RO.
+ */
+ WRITE_ONCE(fs_info->fs_error, -EIO);
+ if (!test_and_set_bit(BTRFS_FS_STATE_EMERGENCY_SHUTDOWN, &fs_info->fs_state))
+ btrfs_crit(fs_info, "emergency shutdown");
+}
+
+/*
+ * We use folio flag owner_2 to indicate there is an ordered extent with
+ * unfinished IO.
+ */
+#define folio_test_ordered(folio) folio_test_owner_2(folio)
+#define folio_set_ordered(folio) folio_set_owner_2(folio)
+#define folio_clear_ordered(folio) folio_clear_owner_2(folio)
+
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+
+#define EXPORT_FOR_TESTS
+
+static inline bool btrfs_is_testing(const struct btrfs_fs_info *fs_info)
+{
+ return unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state));
+}
+
+void btrfs_test_destroy_inode(struct inode *inode);
+
+#else
+
+#define EXPORT_FOR_TESTS static
+
+static inline bool btrfs_is_testing(const struct btrfs_fs_info *fs_info)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 37f36ffdaf6b..b73e1dd97208 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -4,13 +4,19 @@
*/
#include "ctree.h"
+#include "fs.h"
+#include "messages.h"
+#include "inode-item.h"
#include "disk-io.h"
#include "transaction.h"
-#include "print-tree.h"
-
-struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
- int slot, const char *name,
- int name_len)
+#include "space-info.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "file-item.h"
+
+struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf,
+ int slot,
+ const struct fscrypt_str *name)
{
struct btrfs_inode_ref *ref;
unsigned long ptr;
@@ -19,24 +25,25 @@ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
u32 cur_offset = 0;
int len;
- item_size = btrfs_item_size_nr(leaf, slot);
+ item_size = btrfs_item_size(leaf, slot);
ptr = btrfs_item_ptr_offset(leaf, slot);
while (cur_offset < item_size) {
ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
len = btrfs_inode_ref_name_len(leaf, ref);
name_ptr = (unsigned long)(ref + 1);
cur_offset += len + sizeof(*ref);
- if (len != name_len)
+ if (len != name->len)
continue;
- if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
+ if (memcmp_extent_buffer(leaf, name->name, name_ptr,
+ name->len) == 0)
return ref;
}
return NULL;
}
struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
- struct extent_buffer *leaf, int slot, u64 ref_objectid,
- const char *name, int name_len)
+ const struct extent_buffer *leaf, int slot, u64 ref_objectid,
+ const struct fscrypt_str *name)
{
struct btrfs_inode_extref *extref;
unsigned long ptr;
@@ -45,7 +52,7 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
u32 cur_offset = 0;
int ref_name_len;
- item_size = btrfs_item_size_nr(leaf, slot);
+ item_size = btrfs_item_size(leaf, slot);
ptr = btrfs_item_ptr_offset(leaf, slot);
/*
@@ -59,9 +66,10 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
name_ptr = (unsigned long)(&extref->name);
ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
- if (ref_name_len == name_len &&
+ if (ref_name_len == name->len &&
btrfs_inode_extref_parent(leaf, extref) == ref_objectid &&
- (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0))
+ (memcmp_extent_buffer(leaf, name->name, name_ptr,
+ name->len) == 0))
return extref;
cur_offset += ref_name_len + sizeof(*extref);
@@ -70,50 +78,47 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
}
/* Returns NULL if no extref found */
-struct btrfs_inode_extref *
-btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- const char *name, int name_len,
- u64 inode_objectid, u64 ref_objectid, int ins_len,
- int cow)
+struct btrfs_inode_extref *btrfs_lookup_inode_extref(struct btrfs_root *root,
+ struct btrfs_path *path,
+ const struct fscrypt_str *name,
+ u64 inode_objectid, u64 ref_objectid)
{
int ret;
struct btrfs_key key;
key.objectid = inode_objectid;
key.type = BTRFS_INODE_EXTREF_KEY;
- key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
+ key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
- ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
return ERR_PTR(ret);
if (ret > 0)
return NULL;
return btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
- ref_objectid, name, name_len);
+ ref_objectid, name);
}
static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- const char *name, int name_len,
+ const struct fscrypt_str *name,
u64 inode_objectid, u64 ref_objectid,
u64 *index)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_inode_extref *extref;
struct extent_buffer *leaf;
int ret;
- int del_len = name_len + sizeof(*extref);
+ int del_len = name->len + sizeof(*extref);
unsigned long ptr;
unsigned long item_start;
u32 item_size;
key.objectid = inode_objectid;
key.type = BTRFS_INODE_EXTREF_KEY;
- key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
+ key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
path = btrfs_alloc_path();
if (!path)
@@ -121,9 +126,9 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
- ret = -ENOENT;
+ return -ENOENT;
if (ret < 0)
- goto out;
+ return ret;
/*
* Sanity check - did we find the right item for this name?
@@ -131,25 +136,20 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
* readonly.
*/
extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
- ref_objectid, name, name_len);
- if (!extref) {
- btrfs_handle_fs_error(root->fs_info, -ENOENT, NULL);
- ret = -EROFS;
- goto out;
+ ref_objectid, name);
+ if (unlikely(!extref)) {
+ btrfs_abort_transaction(trans, -ENOENT);
+ return -ENOENT;
}
leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ item_size = btrfs_item_size(leaf, path->slots[0]);
if (index)
*index = btrfs_inode_extref_index(leaf, extref);
if (del_len == item_size) {
- /*
- * Common case only one ref in the item, remove the
- * whole item.
- */
- ret = btrfs_del_item(trans, root, path);
- goto out;
+ /* Common case only one ref in the item, remove the whole item. */
+ return btrfs_del_item(trans, root, path);
}
ptr = (unsigned long)extref;
@@ -158,17 +158,13 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
memmove_extent_buffer(leaf, ptr, ptr + del_len,
item_size - (ptr + del_len - item_start));
- btrfs_truncate_item(path, item_size - del_len, 1);
-
-out:
- btrfs_free_path(path);
+ btrfs_truncate_item(trans, path, item_size - del_len, 1);
return ret;
}
int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- const char *name, int name_len,
+ struct btrfs_root *root, const struct fscrypt_str *name,
u64 inode_objectid, u64 ref_objectid, u64 *index)
{
struct btrfs_path *path;
@@ -181,11 +177,11 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
u32 sub_item_len;
int ret;
int search_ext_refs = 0;
- int del_len = name_len + sizeof(*ref);
+ int del_len = name->len + sizeof(*ref);
key.objectid = inode_objectid;
- key.offset = ref_objectid;
key.type = BTRFS_INODE_REF_KEY;
+ key.offset = ref_objectid;
path = btrfs_alloc_path();
if (!path)
@@ -200,15 +196,14 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
goto out;
}
- ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name,
- name_len);
+ ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name);
if (!ref) {
ret = -ENOENT;
search_ext_refs = 1;
goto out;
}
leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ item_size = btrfs_item_size(leaf, path->slots[0]);
if (index)
*index = btrfs_inode_ref_index(leaf, ref);
@@ -218,11 +213,11 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
goto out;
}
ptr = (unsigned long)ref;
- sub_item_len = name_len + sizeof(*ref);
+ sub_item_len = name->len + sizeof(*ref);
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start));
- btrfs_truncate_item(path, item_size - sub_item_len, 1);
+ btrfs_truncate_item(trans, path, item_size - sub_item_len, 1);
out:
btrfs_free_path(path);
@@ -232,7 +227,7 @@ out:
* name in our ref array. Find and remove the extended
* inode ref then.
*/
- return btrfs_del_inode_extref(trans, root, name, name_len,
+ return btrfs_del_inode_extref(trans, root, name,
inode_objectid, ref_objectid, index);
}
@@ -240,27 +235,27 @@ out:
}
/*
- * btrfs_insert_inode_extref() - Inserts an extended inode ref into a tree.
+ * Insert an extended inode ref into a tree.
*
* The caller must have checked against BTRFS_LINK_MAX already.
*/
static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- const char *name, int name_len,
- u64 inode_objectid, u64 ref_objectid, u64 index)
+ const struct fscrypt_str *name,
+ u64 inode_objectid, u64 ref_objectid,
+ u64 index)
{
struct btrfs_inode_extref *extref;
int ret;
- int ins_len = name_len + sizeof(*extref);
+ int ins_len = name->len + sizeof(*extref);
unsigned long ptr;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *leaf;
- struct btrfs_item *item;
key.objectid = inode_objectid;
key.type = BTRFS_INODE_EXTREF_KEY;
- key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
+ key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
path = btrfs_alloc_path();
if (!path)
@@ -272,38 +267,33 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
if (btrfs_find_name_in_ext_backref(path->nodes[0],
path->slots[0],
ref_objectid,
- name, name_len))
- goto out;
+ name))
+ return ret;
- btrfs_extend_item(path, ins_len);
+ btrfs_extend_item(trans, path, ins_len);
ret = 0;
}
if (ret < 0)
- goto out;
+ return ret;
leaf = path->nodes[0];
- item = btrfs_item_nr(path->slots[0]);
ptr = (unsigned long)btrfs_item_ptr(leaf, path->slots[0], char);
- ptr += btrfs_item_size(leaf, item) - ins_len;
+ ptr += btrfs_item_size(leaf, path->slots[0]) - ins_len;
extref = (struct btrfs_inode_extref *)ptr;
- btrfs_set_inode_extref_name_len(path->nodes[0], extref, name_len);
+ btrfs_set_inode_extref_name_len(path->nodes[0], extref, name->len);
btrfs_set_inode_extref_index(path->nodes[0], extref, index);
btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid);
ptr = (unsigned long)&extref->name;
- write_extent_buffer(path->nodes[0], name, ptr, name_len);
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */
int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- const char *name, int name_len,
+ struct btrfs_root *root, const struct fscrypt_str *name,
u64 inode_objectid, u64 ref_objectid, u64 index)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -312,32 +302,32 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_inode_ref *ref;
unsigned long ptr;
int ret;
- int ins_len = name_len + sizeof(*ref);
+ int ins_len = name->len + sizeof(*ref);
key.objectid = inode_objectid;
- key.offset = ref_objectid;
key.type = BTRFS_INODE_REF_KEY;
+ key.offset = ref_objectid;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->skip_release_on_error = 1;
+ path->skip_release_on_error = true;
ret = btrfs_insert_empty_item(trans, root, path, &key,
ins_len);
if (ret == -EEXIST) {
u32 old_size;
ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
- name, name_len);
+ name);
if (ref)
goto out;
- old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
- btrfs_extend_item(path, ins_len);
+ old_size = btrfs_item_size(path->nodes[0], path->slots[0]);
+ btrfs_extend_item(trans, path, ins_len);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
- btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
+ btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len);
btrfs_set_inode_ref_index(path->nodes[0], ref, index);
ptr = (unsigned long)(ref + 1);
ret = 0;
@@ -345,7 +335,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
if (ret == -EOVERFLOW) {
if (btrfs_find_name_in_backref(path->nodes[0],
path->slots[0],
- name, name_len))
+ name))
ret = -EEXIST;
else
ret = -EMLINK;
@@ -354,13 +344,11 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
} else {
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
- btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
+ btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len);
btrfs_set_inode_ref_index(path->nodes[0], ref, index);
ptr = (unsigned long)(ref + 1);
}
- write_extent_buffer(path->nodes[0], name, ptr, name_len);
- btrfs_mark_buffer_dirty(path->nodes[0]);
-
+ write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
out:
btrfs_free_path(path);
@@ -371,7 +359,6 @@ out:
if (btrfs_super_incompat_flags(disk_super)
& BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
ret = btrfs_insert_inode_extref(trans, root, name,
- name_len,
inode_objectid,
ref_objectid, index);
}
@@ -419,3 +406,329 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
}
return ret;
}
+
+static inline void btrfs_trace_truncate(const struct btrfs_inode *inode,
+ const struct extent_buffer *leaf,
+ const struct btrfs_file_extent_item *fi,
+ u64 offset, int extent_type, int slot)
+{
+ if (!inode)
+ return;
+ if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+ trace_btrfs_truncate_show_fi_inline(inode, leaf, fi, slot,
+ offset);
+ else
+ trace_btrfs_truncate_show_fi_regular(inode, leaf, fi, offset);
+}
+
+/*
+ * Remove inode items from a given root.
+ *
+ * @trans: A transaction handle.
+ * @root: The root from which to remove items.
+ * @inode: The inode whose items we want to remove.
+ * @control: The btrfs_truncate_control to control how and what we
+ * are truncating.
+ *
+ * Remove all keys associated with the inode from the given root that have a key
+ * with a type greater than or equals to @min_type. When @min_type has a value of
+ * BTRFS_EXTENT_DATA_KEY, only remove file extent items that have an offset value
+ * greater than or equals to @new_size. If a file extent item that starts before
+ * @new_size and ends after it is found, its length is adjusted.
+ *
+ * Returns: 0 on success, < 0 on error and NEED_TRUNCATE_BLOCK when @min_type is
+ * BTRFS_EXTENT_DATA_KEY and the caller must truncate the last block.
+ */
+int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_truncate_control *control)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct extent_buffer *leaf;
+ struct btrfs_file_extent_item *fi;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ u64 new_size = control->new_size;
+ u64 extent_num_bytes = 0;
+ u64 extent_offset = 0;
+ u64 item_end = 0;
+ u32 found_type = (u8)-1;
+ int del_item;
+ int pending_del_nr = 0;
+ int pending_del_slot = 0;
+ int extent_type = -1;
+ int ret;
+ u64 bytes_deleted = 0;
+ bool be_nice = false;
+
+ ASSERT(control->inode || !control->clear_extent_range);
+ ASSERT(new_size == 0 || control->min_type == BTRFS_EXTENT_DATA_KEY);
+
+ control->last_size = new_size;
+ control->sub_bytes = 0;
+
+ /*
+ * For shareable roots we want to back off from time to time, this turns
+ * out to be subvolume roots, reloc roots, and data reloc roots.
+ */
+ if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
+ be_nice = true;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ path->reada = READA_BACK;
+
+ key.objectid = control->ino;
+ key.type = (u8)-1;
+ key.offset = (u64)-1;
+
+search_again:
+ /*
+ * With a 16K leaf size and 128MiB extents, you can actually queue up a
+ * huge file in a single leaf. Most of the time that bytes_deleted is
+ * > 0, it will be huge by the time we get here
+ */
+ if (be_nice && bytes_deleted > SZ_32M &&
+ btrfs_should_end_transaction(trans)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret < 0)
+ goto out;
+
+ if (ret > 0) {
+ ret = 0;
+ /* There are no items in the tree for us to truncate, we're done */
+ if (path->slots[0] == 0)
+ goto out;
+ path->slots[0]--;
+ }
+
+ while (1) {
+ u64 clear_start = 0, clear_len = 0, extent_start = 0;
+ bool refill_delayed_refs_rsv = false;
+
+ fi = NULL;
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+ found_type = found_key.type;
+
+ if (found_key.objectid != control->ino)
+ break;
+
+ if (found_type < control->min_type)
+ break;
+
+ item_end = found_key.offset;
+ if (found_type == BTRFS_EXTENT_DATA_KEY) {
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ extent_type = btrfs_file_extent_type(leaf, fi);
+ if (extent_type != BTRFS_FILE_EXTENT_INLINE)
+ item_end +=
+ btrfs_file_extent_num_bytes(leaf, fi);
+ else if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+ item_end += btrfs_file_extent_ram_bytes(leaf, fi);
+
+ btrfs_trace_truncate(control->inode, leaf, fi,
+ found_key.offset, extent_type,
+ path->slots[0]);
+ item_end--;
+ }
+ if (found_type > control->min_type) {
+ del_item = 1;
+ } else {
+ if (item_end < new_size)
+ break;
+ if (found_key.offset >= new_size)
+ del_item = 1;
+ else
+ del_item = 0;
+ }
+
+ /* FIXME, shrink the extent if the ref count is only 1 */
+ if (found_type != BTRFS_EXTENT_DATA_KEY)
+ goto delete;
+
+ control->extents_found++;
+
+ if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
+ u64 num_dec;
+
+ clear_start = found_key.offset;
+ extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
+ if (!del_item) {
+ u64 orig_num_bytes =
+ btrfs_file_extent_num_bytes(leaf, fi);
+ extent_num_bytes = ALIGN(new_size -
+ found_key.offset,
+ fs_info->sectorsize);
+ clear_start = ALIGN(new_size, fs_info->sectorsize);
+
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_num_bytes);
+ num_dec = (orig_num_bytes - extent_num_bytes);
+ if (extent_start != 0)
+ control->sub_bytes += num_dec;
+ } else {
+ extent_num_bytes =
+ btrfs_file_extent_disk_num_bytes(leaf, fi);
+ extent_offset = found_key.offset -
+ btrfs_file_extent_offset(leaf, fi);
+
+ /* FIXME blocksize != 4096 */
+ num_dec = btrfs_file_extent_num_bytes(leaf, fi);
+ if (extent_start != 0)
+ control->sub_bytes += num_dec;
+ }
+ clear_len = num_dec;
+ } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+ /*
+ * We can't truncate inline items that have had
+ * special encodings
+ */
+ if (!del_item &&
+ btrfs_file_extent_encryption(leaf, fi) == 0 &&
+ btrfs_file_extent_other_encoding(leaf, fi) == 0 &&
+ btrfs_file_extent_compression(leaf, fi) == 0) {
+ u32 size = (u32)(new_size - found_key.offset);
+
+ btrfs_set_file_extent_ram_bytes(leaf, fi, size);
+ size = btrfs_file_extent_calc_inline_size(size);
+ btrfs_truncate_item(trans, path, size, 1);
+ } else if (!del_item) {
+ /*
+ * We have to bail so the last_size is set to
+ * just before this extent.
+ */
+ ret = BTRFS_NEED_TRUNCATE_BLOCK;
+ break;
+ } else {
+ /*
+ * Inline extents are special, we just treat
+ * them as a full sector worth in the file
+ * extent tree just for simplicity sake.
+ */
+ clear_len = fs_info->sectorsize;
+ }
+
+ control->sub_bytes += item_end + 1 - new_size;
+ }
+delete:
+ /*
+ * We only want to clear the file extent range if we're
+ * modifying the actual inode's mapping, which is just the
+ * normal truncate path.
+ */
+ if (control->clear_extent_range) {
+ ret = btrfs_inode_clear_file_extent_range(control->inode,
+ clear_start, clear_len);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ break;
+ }
+ }
+
+ if (del_item) {
+ ASSERT(!pending_del_nr ||
+ ((path->slots[0] + 1) == pending_del_slot));
+
+ control->last_size = found_key.offset;
+ if (!pending_del_nr) {
+ /* No pending yet, add ourselves */
+ pending_del_slot = path->slots[0];
+ pending_del_nr = 1;
+ } else if (path->slots[0] + 1 == pending_del_slot) {
+ /* Hop on the pending chunk */
+ pending_del_nr++;
+ pending_del_slot = path->slots[0];
+ }
+ } else {
+ control->last_size = new_size;
+ break;
+ }
+
+ if (del_item && extent_start != 0 && !control->skip_ref_updates) {
+ struct btrfs_ref ref = {
+ .action = BTRFS_DROP_DELAYED_REF,
+ .bytenr = extent_start,
+ .num_bytes = extent_num_bytes,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_header_owner(leaf),
+ };
+
+ bytes_deleted += extent_num_bytes;
+
+ btrfs_init_data_ref(&ref, control->ino, extent_offset,
+ btrfs_root_id(root), false);
+ ret = btrfs_free_extent(trans, &ref);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ break;
+ }
+ if (be_nice && btrfs_check_space_for_delayed_refs(fs_info))
+ refill_delayed_refs_rsv = true;
+ }
+
+ if (found_type == BTRFS_INODE_ITEM_KEY)
+ break;
+
+ if (path->slots[0] == 0 ||
+ path->slots[0] != pending_del_slot ||
+ refill_delayed_refs_rsv) {
+ if (pending_del_nr) {
+ ret = btrfs_del_items(trans, root, path,
+ pending_del_slot,
+ pending_del_nr);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ break;
+ }
+ pending_del_nr = 0;
+ }
+ btrfs_release_path(path);
+
+ /*
+ * We can generate a lot of delayed refs, so we need to
+ * throttle every once and a while and make sure we're
+ * adding enough space to keep up with the work we are
+ * generating. Since we hold a transaction here we
+ * can't flush, and we don't want to FLUSH_LIMIT because
+ * we could have generated too many delayed refs to
+ * actually allocate, so just bail if we're short and
+ * let the normal reservation dance happen higher up.
+ */
+ if (refill_delayed_refs_rsv) {
+ ret = btrfs_delayed_refs_rsv_refill(fs_info,
+ BTRFS_RESERVE_NO_FLUSH);
+ if (ret) {
+ ret = -EAGAIN;
+ break;
+ }
+ }
+ goto search_again;
+ } else {
+ path->slots[0]--;
+ }
+ }
+out:
+ if (ret >= 0 && pending_del_nr) {
+ int ret2;
+
+ ret2 = btrfs_del_items(trans, root, path, pending_del_slot, pending_del_nr);
+ if (unlikely(ret2)) {
+ btrfs_abort_transaction(trans, ret2);
+ ret = ret2;
+ }
+ }
+
+ ASSERT(control->last_size >= new_size);
+ if (!ret && control->last_size > new_size)
+ control->last_size = new_size;
+
+ return ret;
+}
diff --git a/fs/btrfs/inode-item.h b/fs/btrfs/inode-item.h
new file mode 100644
index 000000000000..6d9f5ad20646
--- /dev/null
+++ b/fs/btrfs/inode-item.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_INODE_ITEM_H
+#define BTRFS_INODE_ITEM_H
+
+#include <linux/types.h>
+#include <linux/crc32c.h>
+
+struct fscrypt_str;
+struct extent_buffer;
+struct btrfs_trans_handle;
+struct btrfs_root;
+struct btrfs_path;
+struct btrfs_key;
+struct btrfs_inode_extref;
+struct btrfs_inode;
+struct btrfs_truncate_control;
+
+/*
+ * Return this if we need to call truncate_block for the last bit of the
+ * truncate.
+ */
+#define BTRFS_NEED_TRUNCATE_BLOCK 1
+
+struct btrfs_truncate_control {
+ /*
+ * IN: the inode we're operating on, this can be NULL if
+ * ->clear_extent_range is false.
+ */
+ struct btrfs_inode *inode;
+
+ /* IN: the size we're truncating to. */
+ u64 new_size;
+
+ /* OUT: the number of extents truncated. */
+ u64 extents_found;
+
+ /* OUT: the last size we truncated this inode to. */
+ u64 last_size;
+
+ /* OUT: the number of bytes to sub from this inode. */
+ u64 sub_bytes;
+
+ /* IN: the ino we are truncating. */
+ u64 ino;
+
+ /*
+ * IN: minimum key type to remove. All key types with this type are
+ * removed only if their offset >= new_size.
+ */
+ u32 min_type;
+
+ /*
+ * IN: true if we don't want to do extent reference updates for any file
+ * extents we drop.
+ */
+ bool skip_ref_updates;
+
+ /*
+ * IN: true if we need to clear the file extent range for the inode as
+ * we drop the file extent items.
+ */
+ bool clear_extent_range;
+};
+
+/*
+ * btrfs_inode_item stores flags in a u64, btrfs_inode stores them in two
+ * separate u32s. These two functions convert between the two representations.
+ */
+static inline u64 btrfs_inode_combine_flags(u32 flags, u32 ro_flags)
+{
+ return (flags | ((u64)ro_flags << 32));
+}
+
+static inline void btrfs_inode_split_flags(u64 inode_item_flags,
+ u32 *flags, u32 *ro_flags)
+{
+ *flags = (u32)inode_item_flags;
+ *ro_flags = (u32)(inode_item_flags >> 32);
+}
+
+/* Figure the key offset of an extended inode ref. */
+static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name, int len)
+{
+ return (u64)crc32c(parent_objectid, name, len);
+}
+
+int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_truncate_control *control);
+int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, const struct fscrypt_str *name,
+ u64 inode_objectid, u64 ref_objectid, u64 index);
+int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, const struct fscrypt_str *name,
+ u64 inode_objectid, u64 ref_objectid, u64 *index);
+int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path, u64 objectid);
+int btrfs_lookup_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *location, int mod);
+
+struct btrfs_inode_extref *btrfs_lookup_inode_extref(struct btrfs_root *root,
+ struct btrfs_path *path,
+ const struct fscrypt_str *name,
+ u64 inode_objectid, u64 ref_objectid);
+
+struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf,
+ int slot,
+ const struct fscrypt_str *name);
+struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
+ const struct extent_buffer *leaf, int slot, u64 ref_objectid,
+ const struct fscrypt_str *name);
+
+#endif
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b8c911a4a320..c4bee47829ed 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9,6 +9,7 @@
#include <linux/blk-cgroup.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/fs_struct.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
@@ -32,21 +33,19 @@
#include <linux/migrate.h>
#include <linux/sched/mm.h>
#include <linux/iomap.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/fsverity.h>
#include "misc.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "print-tree.h"
#include "ordered-data.h"
#include "xattr.h"
#include "tree-log.h"
-#include "volumes.h"
+#include "bio.h"
#include "compression.h"
#include "locking.h"
-#include "free-space-cache.h"
#include "props.h"
#include "qgroup.h"
#include "delalloc-space.h"
@@ -54,19 +53,61 @@
#include "space-info.h"
#include "zoned.h"
#include "subpage.h"
+#include "inode-item.h"
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "root-tree.h"
+#include "defrag.h"
+#include "dir-item.h"
+#include "file-item.h"
+#include "uuid-tree.h"
+#include "ioctl.h"
+#include "file.h"
+#include "acl.h"
+#include "relocation.h"
+#include "verity.h"
+#include "super.h"
+#include "orphan.h"
+#include "backref.h"
+#include "raid-stripe-tree.h"
+#include "fiemap.h"
+#include "delayed-inode.h"
+
+#define COW_FILE_RANGE_KEEP_LOCKED (1UL << 0)
+#define COW_FILE_RANGE_NO_INLINE (1UL << 1)
struct btrfs_iget_args {
u64 ino;
struct btrfs_root *root;
};
-struct btrfs_dio_data {
- u64 reserve;
- loff_t length;
- ssize_t submitted;
- struct extent_changeset *data_reserved;
+struct btrfs_rename_ctx {
+ /* Output field. Stores the index number of the old directory entry. */
+ u64 index;
};
+/*
+ * Used by data_reloc_print_warning_inode() to pass needed info for filename
+ * resolution and output of error message.
+ */
+struct data_reloc_warn {
+ struct btrfs_path path;
+ struct btrfs_fs_info *fs_info;
+ u64 extent_item_size;
+ u64 logical;
+ int mirror_num;
+};
+
+/*
+ * For the file_extent_tree, we want to hold the inode lock when we lookup and
+ * update the disk_i_size, but lockdep will complain because our io_tree we hold
+ * the tree lock and get the inode lock when setting delalloc. These two things
+ * are unrelated, so make a class for the file_extent_tree so we don't get the
+ * two locking patterns mixed up.
+ */
+static struct lock_class_key file_extent_tree_class;
+
static const struct inode_operations btrfs_dir_inode_operations;
static const struct inode_operations btrfs_symlink_inode_operations;
static const struct inode_operations btrfs_special_inode_operations;
@@ -75,30 +116,229 @@ static const struct address_space_operations btrfs_aops;
static const struct file_operations btrfs_dir_file_operations;
static struct kmem_cache *btrfs_inode_cachep;
-struct kmem_cache *btrfs_trans_handle_cachep;
-struct kmem_cache *btrfs_path_cachep;
-struct kmem_cache *btrfs_free_space_cachep;
-struct kmem_cache *btrfs_free_space_bitmap_cachep;
static int btrfs_setsize(struct inode *inode, struct iattr *attr);
-static int btrfs_truncate(struct inode *inode, bool skip_writeback);
-static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
-static noinline int cow_file_range(struct btrfs_inode *inode,
- struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written, int unlock);
-static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
- u64 len, u64 orig_start, u64 block_start,
- u64 block_len, u64 orig_block_len,
- u64 ram_bytes, int compress_type,
- int type);
-
-static void __endio_write_update_ordered(struct btrfs_inode *inode,
- const u64 offset, const u64 bytes,
- const bool uptodate);
+static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
+
+static noinline int run_delalloc_cow(struct btrfs_inode *inode,
+ struct folio *locked_folio, u64 start,
+ u64 end, struct writeback_control *wbc,
+ bool pages_dirty);
+
+static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
+ u64 root, void *warn_ctx)
+{
+ struct data_reloc_warn *warn = warn_ctx;
+ struct btrfs_fs_info *fs_info = warn->fs_info;
+ struct extent_buffer *eb;
+ struct btrfs_inode_item *inode_item;
+ struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL;
+ struct btrfs_root *local_root;
+ struct btrfs_key key;
+ unsigned int nofs_flag;
+ u32 nlink;
+ int ret;
+
+ local_root = btrfs_get_fs_root(fs_info, root, true);
+ if (IS_ERR(local_root)) {
+ ret = PTR_ERR(local_root);
+ goto err;
+ }
+
+ /* This makes the path point to (inum INODE_ITEM ioff). */
+ key.objectid = inum;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0);
+ if (ret) {
+ btrfs_put_root(local_root);
+ btrfs_release_path(&warn->path);
+ goto err;
+ }
+
+ eb = warn->path.nodes[0];
+ inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item);
+ nlink = btrfs_inode_nlink(eb, inode_item);
+ btrfs_release_path(&warn->path);
+
+ nofs_flag = memalloc_nofs_save();
+ ipath = init_ipath(4096, local_root, &warn->path);
+ memalloc_nofs_restore(nofs_flag);
+ if (IS_ERR(ipath)) {
+ btrfs_put_root(local_root);
+ ret = PTR_ERR(ipath);
+ ipath = NULL;
+ /*
+ * -ENOMEM, not a critical error, just output an generic error
+ * without filename.
+ */
+ btrfs_warn(fs_info,
+"checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
+ warn->logical, warn->mirror_num, root, inum, offset);
+ return ret;
+ }
+ ret = paths_from_inode(inum, ipath);
+ if (ret < 0) {
+ btrfs_put_root(local_root);
+ goto err;
+ }
+
+ /*
+ * We deliberately ignore the bit ipath might have been too small to
+ * hold all of the paths here
+ */
+ for (int i = 0; i < ipath->fspath->elem_cnt; i++) {
+ btrfs_warn(fs_info,
+"checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
+ warn->logical, warn->mirror_num, root, inum, offset,
+ fs_info->sectorsize, nlink,
+ (char *)(unsigned long)ipath->fspath->val[i]);
+ }
+
+ btrfs_put_root(local_root);
+ return 0;
+
+err:
+ btrfs_warn(fs_info,
+"checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
+ warn->logical, warn->mirror_num, root, inum, offset, ret);
+
+ return ret;
+}
/*
- * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
+ * Do extra user-friendly error output (e.g. lookup all the affected files).
+ *
+ * Return true if we succeeded doing the backref lookup.
+ * Return false if such lookup failed, and has to fallback to the old error message.
+ */
+static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off,
+ const u8 *csum, const u8 *csum_expected,
+ int mirror_num)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct btrfs_path path = { 0 };
+ struct btrfs_key found_key = { 0 };
+ struct extent_buffer *eb;
+ struct btrfs_extent_item *ei;
+ const u32 csum_size = fs_info->csum_size;
+ u64 logical;
+ u64 flags;
+ u32 item_size;
+ int ret;
+
+ mutex_lock(&fs_info->reloc_mutex);
+ logical = btrfs_get_reloc_bg_bytenr(fs_info);
+ mutex_unlock(&fs_info->reloc_mutex);
+
+ if (logical == U64_MAX) {
+ btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
+ btrfs_warn_rl(fs_info,
+"csum failed root %lld ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
+ btrfs_root_id(inode->root), btrfs_ino(inode), file_off,
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum),
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
+ mirror_num);
+ return;
+ }
+
+ logical += file_off;
+ btrfs_warn_rl(fs_info,
+"csum failed root %lld ino %llu off %llu logical %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
+ btrfs_root_id(inode->root),
+ btrfs_ino(inode), file_off, logical,
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum),
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
+ mirror_num);
+
+ ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
+ if (ret < 0) {
+ btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
+ logical, ret);
+ return;
+ }
+ eb = path.nodes[0];
+ ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item);
+ item_size = btrfs_item_size(eb, path.slots[0]);
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ unsigned long ptr = 0;
+ u64 ref_root;
+ u8 ref_level;
+
+ while (true) {
+ ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
+ item_size, &ref_root,
+ &ref_level);
+ if (ret < 0) {
+ btrfs_warn_rl(fs_info,
+ "failed to resolve tree backref for logical %llu: %d",
+ logical, ret);
+ break;
+ }
+ if (ret > 0)
+ break;
+
+ btrfs_warn_rl(fs_info,
+"csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
+ logical, mirror_num,
+ (ref_level ? "node" : "leaf"),
+ ref_level, ref_root);
+ }
+ btrfs_release_path(&path);
+ } else {
+ struct btrfs_backref_walk_ctx ctx = { 0 };
+ struct data_reloc_warn reloc_warn = { 0 };
+
+ btrfs_release_path(&path);
+
+ ctx.bytenr = found_key.objectid;
+ ctx.extent_item_pos = logical - found_key.objectid;
+ ctx.fs_info = fs_info;
+
+ reloc_warn.logical = logical;
+ reloc_warn.extent_item_size = found_key.offset;
+ reloc_warn.mirror_num = mirror_num;
+ reloc_warn.fs_info = fs_info;
+
+ iterate_extent_inodes(&ctx, true,
+ data_reloc_print_warning_inode, &reloc_warn);
+ }
+}
+
+static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
+ u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
+{
+ struct btrfs_root *root = inode->root;
+ const u32 csum_size = root->fs_info->csum_size;
+
+ /* For data reloc tree, it's better to do a backref lookup instead. */
+ if (btrfs_is_data_reloc_root(root))
+ return print_data_reloc_error(inode, logical_start, csum,
+ csum_expected, mirror_num);
+
+ /* Output without objectid, which is more meaningful */
+ if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) {
+ btrfs_warn_rl(root->fs_info,
+"csum failed root %lld ino %lld off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
+ btrfs_root_id(root), btrfs_ino(inode),
+ logical_start,
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum),
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
+ mirror_num);
+ } else {
+ btrfs_warn_rl(root->fs_info,
+"csum failed root %llu ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
+ btrfs_root_id(root), btrfs_ino(inode),
+ logical_start,
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum),
+ BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
+ mirror_num);
+ }
+}
+
+/*
+ * Lock inode i_rwsem based on arguments passed.
*
* ilock_flags can have the following bit set:
*
@@ -107,44 +347,44 @@ static void __endio_write_update_ordered(struct btrfs_inode *inode,
* return -EAGAIN
* BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
*/
-int btrfs_inode_lock(struct inode *inode, unsigned int ilock_flags)
+int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
{
if (ilock_flags & BTRFS_ILOCK_SHARED) {
if (ilock_flags & BTRFS_ILOCK_TRY) {
- if (!inode_trylock_shared(inode))
+ if (!inode_trylock_shared(&inode->vfs_inode))
return -EAGAIN;
else
return 0;
}
- inode_lock_shared(inode);
+ inode_lock_shared(&inode->vfs_inode);
} else {
if (ilock_flags & BTRFS_ILOCK_TRY) {
- if (!inode_trylock(inode))
+ if (!inode_trylock(&inode->vfs_inode))
return -EAGAIN;
else
return 0;
}
- inode_lock(inode);
+ inode_lock(&inode->vfs_inode);
}
if (ilock_flags & BTRFS_ILOCK_MMAP)
- down_write(&BTRFS_I(inode)->i_mmap_lock);
+ down_write(&inode->i_mmap_lock);
return 0;
}
/*
- * btrfs_inode_unlock - unock inode i_rwsem
+ * Unlock inode i_rwsem.
*
* ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
* to decide whether the lock acquired is shared or exclusive.
*/
-void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags)
+void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
{
if (ilock_flags & BTRFS_ILOCK_MMAP)
- up_write(&BTRFS_I(inode)->i_mmap_lock);
+ up_write(&inode->i_mmap_lock);
if (ilock_flags & BTRFS_ILOCK_SHARED)
- inode_unlock_shared(inode);
+ inode_unlock_shared(&inode->vfs_inode);
else
- inode_unlock(inode);
+ inode_unlock(&inode->vfs_inode);
}
/*
@@ -158,75 +398,55 @@ void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags)
* extent (btrfs_finish_ordered_io()).
*/
static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
- struct page *locked_page,
u64 offset, u64 bytes)
{
- unsigned long index = offset >> PAGE_SHIFT;
- unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
- u64 page_start = page_offset(locked_page);
- u64 page_end = page_start + PAGE_SIZE - 1;
-
- struct page *page;
+ pgoff_t index = offset >> PAGE_SHIFT;
+ const pgoff_t end_index = (offset + bytes - 1) >> PAGE_SHIFT;
+ struct folio *folio;
while (index <= end_index) {
- /*
- * For locked page, we will call end_extent_writepage() on it
- * in run_delalloc_range() for the error handling. That
- * end_extent_writepage() function will call
- * btrfs_mark_ordered_io_finished() to clear page Ordered and
- * run the ordered extent accounting.
- *
- * Here we can't just clear the Ordered bit, or
- * btrfs_mark_ordered_io_finished() would skip the accounting
- * for the page range, and the ordered extent will never finish.
- */
- if (index == (page_offset(locked_page) >> PAGE_SHIFT)) {
+ folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
+ if (IS_ERR(folio)) {
index++;
continue;
}
- page = find_get_page(inode->vfs_inode.i_mapping, index);
- index++;
- if (!page)
- continue;
+ index = folio_next_index(folio);
/*
* Here we just clear all Ordered bits for every page in the
- * range, then __endio_write_update_ordered() will handle
+ * range, then btrfs_mark_ordered_io_finished() will handle
* the ordered extent accounting for the range.
*/
- btrfs_page_clamp_clear_ordered(inode->root->fs_info, page,
- offset, bytes);
- put_page(page);
+ btrfs_folio_clamp_clear_ordered(inode->root->fs_info, folio,
+ offset, bytes);
+ folio_put(folio);
}
- /* The locked page covers the full range, nothing needs to be done */
- if (bytes + offset <= page_offset(locked_page) + PAGE_SIZE)
- return;
- /*
- * In case this page belongs to the delalloc range being instantiated
- * then skip it, since the first page of a range is going to be
- * properly cleaned up by the caller of run_delalloc_range
- */
- if (page_start >= offset && page_end <= (offset + bytes - 1)) {
- bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
- offset = page_offset(locked_page) + PAGE_SIZE;
- }
-
- return __endio_write_update_ordered(inode, offset, bytes, false);
+ return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
}
-static int btrfs_dirty_inode(struct inode *inode);
+static int btrfs_dirty_inode(struct btrfs_inode *inode);
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+ struct btrfs_new_inode_args *args)
{
- int err;
+ int ret;
- err = btrfs_init_acl(trans, inode, dir);
- if (!err)
- err = btrfs_xattr_security_init(trans, inode, dir, qstr);
- return err;
+ if (args->default_acl) {
+ ret = __btrfs_set_acl(trans, args->inode, args->default_acl,
+ ACL_TYPE_DEFAULT);
+ if (ret)
+ return ret;
+ }
+ if (args->acl) {
+ ret = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
+ if (ret)
+ return ret;
+ }
+ if (!args->default_acl && !args->acl)
+ cache_no_acl(args->inode);
+ return btrfs_xattr_security_init(trans, args->inode, args->dir,
+ &args->dentry->d_name);
}
/*
@@ -235,34 +455,49 @@ static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
* no overlapping inline items exist in the btree
*/
static int insert_inline_extent(struct btrfs_trans_handle *trans,
- struct btrfs_path *path, bool extent_inserted,
- struct btrfs_root *root, struct inode *inode,
- u64 start, size_t size, size_t compressed_size,
+ struct btrfs_path *path,
+ struct btrfs_inode *inode, bool extent_inserted,
+ size_t size, size_t compressed_size,
int compress_type,
- struct page **compressed_pages)
+ struct folio *compressed_folio,
+ bool update_i_size)
{
+ struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
- struct page *page = NULL;
+ const u32 sectorsize = trans->fs_info->sectorsize;
char *kaddr;
unsigned long ptr;
struct btrfs_file_extent_item *ei;
int ret;
size_t cur_size = size;
- unsigned long offset;
+ u64 i_size;
- ASSERT((compressed_size > 0 && compressed_pages) ||
- (compressed_size == 0 && !compressed_pages));
+ /*
+ * The decompressed size must still be no larger than a sector. Under
+ * heavy race, we can have size == 0 passed in, but that shouldn't be a
+ * big deal and we can continue the insertion.
+ */
+ ASSERT(size <= sectorsize);
- if (compressed_size && compressed_pages)
+ /*
+ * The compressed size also needs to be no larger than a sector.
+ * That's also why we only need one page as the parameter.
+ */
+ if (compressed_folio)
+ ASSERT(compressed_size <= sectorsize);
+ else
+ ASSERT(compressed_size == 0);
+
+ if (compressed_size && compressed_folio)
cur_size = compressed_size;
if (!extent_inserted) {
struct btrfs_key key;
size_t datasize;
- key.objectid = btrfs_ino(BTRFS_I(inode));
- key.offset = start;
+ key.objectid = btrfs_ino(inode);
key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = 0;
datasize = btrfs_file_extent_calc_inline_size(cur_size);
ret = btrfs_insert_empty_item(trans, root, path, &key,
@@ -281,95 +516,112 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
ptr = btrfs_file_extent_inline_start(ei);
if (compress_type != BTRFS_COMPRESS_NONE) {
- struct page *cpage;
- int i = 0;
- while (compressed_size > 0) {
- cpage = compressed_pages[i];
- cur_size = min_t(unsigned long, compressed_size,
- PAGE_SIZE);
+ kaddr = kmap_local_folio(compressed_folio, 0);
+ write_extent_buffer(leaf, kaddr, ptr, compressed_size);
+ kunmap_local(kaddr);
- kaddr = kmap_atomic(cpage);
- write_extent_buffer(leaf, kaddr, ptr, cur_size);
- kunmap_atomic(kaddr);
-
- i++;
- ptr += cur_size;
- compressed_size -= cur_size;
- }
btrfs_set_file_extent_compression(leaf, ei,
compress_type);
} else {
- page = find_get_page(inode->i_mapping,
- start >> PAGE_SHIFT);
+ struct folio *folio;
+
+ folio = filemap_get_folio(inode->vfs_inode.i_mapping, 0);
+ ASSERT(!IS_ERR(folio));
btrfs_set_file_extent_compression(leaf, ei, 0);
- kaddr = kmap_atomic(page);
- offset = offset_in_page(start);
- write_extent_buffer(leaf, kaddr + offset, ptr, size);
- kunmap_atomic(kaddr);
- put_page(page);
+ kaddr = kmap_local_folio(folio, 0);
+ write_extent_buffer(leaf, kaddr, ptr, size);
+ kunmap_local(kaddr);
+ folio_put(folio);
}
- btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
/*
* We align size to sectorsize for inline extents just for simplicity
* sake.
*/
- size = ALIGN(size, root->fs_info->sectorsize);
- ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start, size);
+ ret = btrfs_inode_set_file_extent_range(inode, 0,
+ ALIGN(size, root->fs_info->sectorsize));
if (ret)
goto fail;
/*
- * we're an inline extent, so nobody can
- * extend the file past i_size without locking
- * a page we already have locked.
+ * We're an inline extent, so nobody can extend the file past i_size
+ * without locking a page we already have locked.
*
- * We must do any isize and inode updates
- * before we unlock the pages. Otherwise we
- * could end up racing with unlink.
+ * We must do any i_size and inode updates before we unlock the pages.
+ * Otherwise we could end up racing with unlink.
*/
- BTRFS_I(inode)->disk_i_size = inode->i_size;
+ i_size = i_size_read(&inode->vfs_inode);
+ if (update_i_size && size > i_size) {
+ i_size_write(&inode->vfs_inode, size);
+ i_size = size;
+ }
+ inode->disk_i_size = i_size;
+
fail:
return ret;
}
+static bool can_cow_file_range_inline(struct btrfs_inode *inode,
+ u64 offset, u64 size,
+ size_t compressed_size)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ u64 data_len = (compressed_size ?: size);
+
+ /* Inline extents must start at offset 0. */
+ if (offset != 0)
+ return false;
+
+ /* Inline extents are limited to sectorsize. */
+ if (size > fs_info->sectorsize)
+ return false;
+
+ /* We do not allow a non-compressed extent to be as large as block size. */
+ if (data_len >= fs_info->sectorsize)
+ return false;
+
+ /* We cannot exceed the maximum inline data size. */
+ if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
+ return false;
+
+ /* We cannot exceed the user specified max_inline size. */
+ if (data_len > fs_info->max_inline)
+ return false;
+
+ /* Inline extents must be the entirety of the file. */
+ if (size < i_size_read(&inode->vfs_inode))
+ return false;
+
+ /* Encrypted file cannot be inlined. */
+ if (IS_ENCRYPTED(&inode->vfs_inode))
+ return false;
+
+ return true;
+}
/*
* conditionally insert an inline extent into the file. This
* does the checks required to make sure the data is small enough
* to fit as an inline extent.
+ *
+ * If being used directly, you must have already checked we're allowed to cow
+ * the range by getting true from can_cow_file_range_inline().
*/
-static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 start,
- u64 end, size_t compressed_size,
- int compress_type,
- struct page **compressed_pages)
+static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
+ u64 size, size_t compressed_size,
+ int compress_type,
+ struct folio *compressed_folio,
+ bool update_i_size)
{
struct btrfs_drop_extents_args drop_args = { 0 };
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
- u64 isize = i_size_read(&inode->vfs_inode);
- u64 actual_end = min(end + 1, isize);
- u64 inline_len = actual_end - start;
- u64 aligned_end = ALIGN(end, fs_info->sectorsize);
- u64 data_len = inline_len;
+ u64 data_len = (compressed_size ?: size);
int ret;
struct btrfs_path *path;
- if (compressed_size)
- data_len = compressed_size;
-
- if (start > 0 ||
- actual_end > fs_info->sectorsize ||
- data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
- (!compressed_size &&
- (actual_end & (fs_info->sectorsize - 1)) == 0) ||
- end + 1 < isize ||
- data_len > fs_info->max_inline) {
- return 1;
- }
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -382,31 +634,21 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 start,
trans->block_rsv = &inode->block_rsv;
drop_args.path = path;
- drop_args.start = start;
- drop_args.end = aligned_end;
+ drop_args.start = 0;
+ drop_args.end = fs_info->sectorsize;
drop_args.drop_cache = true;
drop_args.replace_extent = true;
-
- if (compressed_size && compressed_pages)
- drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(
- compressed_size);
- else
- drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(
- inline_len);
-
+ drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
ret = btrfs_drop_extents(trans, root, inode, &drop_args);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
- if (isize > actual_end)
- inline_len = min_t(u64, isize, actual_end);
- ret = insert_inline_extent(trans, path, drop_args.extent_inserted,
- root, &inode->vfs_inode, start,
- inline_len, compressed_size,
- compress_type, compressed_pages);
- if (ret && ret != -ENOSPC) {
+ ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
+ size, compressed_size, compress_type,
+ compressed_folio, update_i_size);
+ if (unlikely(ret && ret != -ENOSPC)) {
btrfs_abort_transaction(trans, ret);
goto out;
} else if (ret == -ENOSPC) {
@@ -414,9 +656,9 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 start,
goto out;
}
- btrfs_update_inode_bytes(inode, inline_len, drop_args.bytes_found);
- ret = btrfs_update_inode(trans, root, inode);
- if (ret && ret != -ENOSPC) {
+ btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
+ ret = btrfs_update_inode(trans, inode);
+ if (unlikely(ret && ret != -ENOSPC)) {
btrfs_abort_transaction(trans, ret);
goto out;
} else if (ret == -ENOSPC) {
@@ -424,7 +666,7 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 start,
goto out;
}
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
+ btrfs_set_inode_full_sync(inode);
out:
/*
* Don't forget to free the reserved space, as for inlined extent
@@ -432,28 +674,77 @@ out:
* And at reserve time, it's always aligned to page size, so
* just free one page here.
*/
- btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
+ btrfs_qgroup_free_data(inode, NULL, 0, fs_info->sectorsize, NULL);
btrfs_free_path(path);
btrfs_end_transaction(trans);
return ret;
}
+static noinline int cow_file_range_inline(struct btrfs_inode *inode,
+ struct folio *locked_folio,
+ u64 offset, u64 end,
+ size_t compressed_size,
+ int compress_type,
+ struct folio *compressed_folio,
+ bool update_i_size)
+{
+ struct extent_state *cached = NULL;
+ unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED;
+ u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1);
+ int ret;
+
+ if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
+ return 1;
+
+ btrfs_lock_extent(&inode->io_tree, offset, end, &cached);
+ ret = __cow_file_range_inline(inode, size, compressed_size,
+ compress_type, compressed_folio,
+ update_i_size);
+ if (ret > 0) {
+ btrfs_unlock_extent(&inode->io_tree, offset, end, &cached);
+ return ret;
+ }
+
+ /*
+ * In the successful case (ret == 0 here), cow_file_range will return 1.
+ *
+ * Quite a bit further up the callstack in extent_writepage(), ret == 1
+ * is treated as a short circuited success and does not unlock the folio,
+ * so we must do it here.
+ *
+ * In the failure case, the locked_folio does get unlocked by
+ * btrfs_folio_end_all_writers, which asserts that it is still locked
+ * at that point, so we must *not* unlock it here.
+ *
+ * The other two callsites in compress_file_range do not have a
+ * locked_folio, so they are not relevant to this logic.
+ */
+ if (ret == 0)
+ locked_folio = NULL;
+
+ extent_clear_unlock_delalloc(inode, offset, end, locked_folio, &cached,
+ clear_flags, PAGE_UNLOCK |
+ PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
+ return ret;
+}
+
struct async_extent {
u64 start;
u64 ram_size;
u64 compressed_size;
- struct page **pages;
- unsigned long nr_pages;
+ struct folio **folios;
+ unsigned long nr_folios;
int compress_type;
struct list_head list;
};
struct async_chunk {
- struct inode *inode;
- struct page *locked_page;
+ struct btrfs_inode *inode;
+ struct folio *locked_folio;
u64 start;
u64 end;
- unsigned int write_flags;
+ blk_opf_t write_flags;
struct list_head extents;
struct cgroup_subsys_state *blkcg_css;
struct btrfs_work work;
@@ -468,36 +759,26 @@ struct async_cow {
static noinline int add_async_extent(struct async_chunk *cow,
u64 start, u64 ram_size,
u64 compressed_size,
- struct page **pages,
- unsigned long nr_pages,
+ struct folio **folios,
+ unsigned long nr_folios,
int compress_type)
{
struct async_extent *async_extent;
async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
- BUG_ON(!async_extent); /* -ENOMEM */
+ if (!async_extent)
+ return -ENOMEM;
async_extent->start = start;
async_extent->ram_size = ram_size;
async_extent->compressed_size = compressed_size;
- async_extent->pages = pages;
- async_extent->nr_pages = nr_pages;
+ async_extent->folios = folios;
+ async_extent->nr_folios = nr_folios;
async_extent->compress_type = compress_type;
list_add_tail(&async_extent->list, &cow->extents);
return 0;
}
/*
- * Check if the inode has flags compatible with compression
- */
-static inline bool inode_can_compress(struct btrfs_inode *inode)
-{
- if (inode->flags & BTRFS_INODE_NODATACOW ||
- inode->flags & BTRFS_INODE_NODATASUM)
- return false;
- return true;
-}
-
-/*
* Check if the inode needs to be submitted to compression, based on mount
* options, defragmentation, properties or heuristics.
*/
@@ -506,108 +787,116 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- if (!inode_can_compress(inode)) {
- WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
- KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
- btrfs_ino(inode));
+ if (!btrfs_inode_can_compress(inode)) {
+ DEBUG_WARN("BTRFS: unexpected compression for ino %llu", btrfs_ino(inode));
return 0;
}
- /*
- * Special check for subpage.
- *
- * We lock the full page then run each delalloc range in the page, thus
- * for the following case, we will hit some subpage specific corner case:
- *
- * 0 32K 64K
- * | |///////| |///////|
- * \- A \- B
- *
- * In above case, both range A and range B will try to unlock the full
- * page [0, 64K), causing the one finished later will have page
- * unlocked already, triggering various page lock requirement BUG_ON()s.
- *
- * So here we add an artificial limit that subpage compression can only
- * if the range is fully page aligned.
- *
- * In theory we only need to ensure the first page is fully covered, but
- * the tailing partial page will be locked until the full compression
- * finishes, delaying the write of other range.
- *
- * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
- * first to prevent any submitted async extent to unlock the full page.
- * By this, we can ensure for subpage case that only the last async_cow
- * will unlock the full page.
- */
- if (fs_info->sectorsize < PAGE_SIZE) {
- if (!IS_ALIGNED(start, PAGE_SIZE) ||
- !IS_ALIGNED(end + 1, PAGE_SIZE))
- return 0;
- }
+ /* Defrag ioctl takes precedence over mount options and properties. */
+ if (inode->defrag_compress == BTRFS_DEFRAG_DONT_COMPRESS)
+ return 0;
+ if (BTRFS_COMPRESS_NONE < inode->defrag_compress &&
+ inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES)
+ return 1;
/* force compress */
if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
return 1;
- /* defrag ioctl */
- if (inode->defrag_compress)
- return 1;
/* bad compression ratios */
if (inode->flags & BTRFS_INODE_NOCOMPRESS)
return 0;
if (btrfs_test_opt(fs_info, COMPRESS) ||
inode->flags & BTRFS_INODE_COMPRESS ||
inode->prop_compress)
- return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
+ return btrfs_compress_heuristic(inode, start, end);
return 0;
}
static inline void inode_should_defrag(struct btrfs_inode *inode,
- u64 start, u64 end, u64 num_bytes, u64 small_write)
+ u64 start, u64 end, u64 num_bytes, u32 small_write)
{
/* If this is a small write inside eof, kick off a defrag */
if (num_bytes < small_write &&
(start > 0 || end + 1 < inode->disk_i_size))
- btrfs_add_inode_defrag(NULL, inode);
+ btrfs_add_inode_defrag(inode, small_write);
+}
+
+static int extent_range_clear_dirty_for_io(struct btrfs_inode *inode, u64 start, u64 end)
+{
+ const pgoff_t end_index = end >> PAGE_SHIFT;
+ struct folio *folio;
+ int ret = 0;
+
+ for (pgoff_t index = start >> PAGE_SHIFT; index <= end_index; index++) {
+ folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
+ if (IS_ERR(folio)) {
+ if (!ret)
+ ret = PTR_ERR(folio);
+ continue;
+ }
+ btrfs_folio_clamp_clear_dirty(inode->root->fs_info, folio, start,
+ end + 1 - start);
+ folio_put(folio);
+ }
+ return ret;
}
/*
- * we create compressed extents in two phases. The first
- * phase compresses a range of pages that have already been
- * locked (both pages and state bits are locked).
+ * Work queue call back to started compression on a file and pages.
*
- * This is done inside an ordered work queue, and the compression
- * is spread across many cpus. The actual IO submission is step
- * two, and the ordered work queue takes care of making sure that
- * happens in the same order things were put onto the queue by
- * writepages and friends.
+ * This is done inside an ordered work queue, and the compression is spread
+ * across many cpus. The actual IO submission is step two, and the ordered work
+ * queue takes care of making sure that happens in the same order things were
+ * put onto the queue by writepages and friends.
*
- * If this code finds it can't get good compression, it puts an
- * entry onto the work queue to write the uncompressed bytes. This
- * makes sure that both compressed inodes and uncompressed inodes
- * are written in the same order that the flusher thread sent them
- * down.
+ * If this code finds it can't get good compression, it puts an entry onto the
+ * work queue to write the uncompressed bytes. This makes sure that both
+ * compressed inodes and uncompressed inodes are written in the same order that
+ * the flusher thread sent them down.
*/
-static noinline int compress_file_range(struct async_chunk *async_chunk)
+static void compress_file_range(struct btrfs_work *work)
{
- struct inode *inode = async_chunk->inode;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct async_chunk *async_chunk =
+ container_of(work, struct async_chunk, work);
+ struct btrfs_inode *inode = async_chunk->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
u64 blocksize = fs_info->sectorsize;
u64 start = async_chunk->start;
u64 end = async_chunk->end;
u64 actual_end;
u64 i_size;
int ret = 0;
- struct page **pages = NULL;
- unsigned long nr_pages;
+ struct folio **folios = NULL;
+ unsigned long nr_folios;
unsigned long total_compressed = 0;
unsigned long total_in = 0;
+ unsigned int loff;
int i;
- int will_compress;
int compress_type = fs_info->compress_type;
- int compressed_extents = 0;
- int redirty = 0;
+ int compress_level = fs_info->compress_level;
- inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
- SZ_16K);
+ if (unlikely(btrfs_is_shutdown(fs_info)))
+ goto cleanup_and_bail_uncompressed;
+
+ inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
+
+ /*
+ * We need to call clear_page_dirty_for_io on each page in the range.
+ * Otherwise applications with the file mmap'd can wander in and change
+ * the page contents while we are compressing them.
+ */
+ ret = extent_range_clear_dirty_for_io(inode, start, end);
+
+ /*
+ * All the folios should have been locked thus no failure.
+ *
+ * And even if some folios are missing, btrfs_compress_folios()
+ * would handle them correctly, so here just do an ASSERT() check for
+ * early logic errors.
+ */
+ ASSERT(ret == 0);
/*
* We need to save i_size before now because it could change in between
@@ -619,15 +908,13 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
* does that for us.
*/
barrier();
- i_size = i_size_read(inode);
+ i_size = i_size_read(&inode->vfs_inode);
barrier();
actual_end = min_t(u64, i_size, end + 1);
again:
- will_compress = 0;
- nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
- BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0);
- nr_pages = min_t(unsigned long, nr_pages,
- BTRFS_MAX_COMPRESSED / PAGE_SIZE);
+ folios = NULL;
+ nr_folios = (end >> min_folio_shift) - (start >> min_folio_shift) + 1;
+ nr_folios = min_t(unsigned long, nr_folios, BTRFS_MAX_COMPRESSED >> min_folio_shift);
/*
* we don't want to send crud past the end of i_size through
@@ -649,447 +936,313 @@ again:
* isn't an inline extent, since it doesn't save disk space at all.
*/
if (total_compressed <= blocksize &&
- (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
+ (start > 0 || end + 1 < inode->disk_i_size))
goto cleanup_and_bail_uncompressed;
- /*
- * For subpage case, we require full page alignment for the sector
- * aligned range.
- * Thus we must also check against @actual_end, not just @end.
- */
- if (blocksize < PAGE_SIZE) {
- if (!IS_ALIGNED(start, PAGE_SIZE) ||
- !IS_ALIGNED(round_up(actual_end, blocksize), PAGE_SIZE))
- goto cleanup_and_bail_uncompressed;
- }
-
total_compressed = min_t(unsigned long, total_compressed,
BTRFS_MAX_UNCOMPRESSED);
total_in = 0;
ret = 0;
/*
- * we do compression for mount -o compress and when the
- * inode has not been flagged as nocompress. This flag can
- * change at any time if we discover bad compression ratios.
+ * We do compression for mount -o compress and when the inode has not
+ * been flagged as NOCOMPRESS. This flag can change at any time if we
+ * discover bad compression ratios.
*/
- if (inode_need_compress(BTRFS_I(inode), start, end)) {
- WARN_ON(pages);
- pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
- if (!pages) {
- /* just bail out to the uncompressed code */
- nr_pages = 0;
- goto cont;
- }
-
- if (BTRFS_I(inode)->defrag_compress)
- compress_type = BTRFS_I(inode)->defrag_compress;
- else if (BTRFS_I(inode)->prop_compress)
- compress_type = BTRFS_I(inode)->prop_compress;
+ if (!inode_need_compress(inode, start, end))
+ goto cleanup_and_bail_uncompressed;
+ folios = kcalloc(nr_folios, sizeof(struct folio *), GFP_NOFS);
+ if (!folios) {
/*
- * we need to call clear_page_dirty_for_io on each
- * page in the range. Otherwise applications with the file
- * mmap'd can wander in and change the page contents while
- * we are compressing them.
- *
- * If the compression fails for any reason, we set the pages
- * dirty again later on.
- *
- * Note that the remaining part is redirtied, the start pointer
- * has moved, the end is the original one.
+ * Memory allocation failure is not a fatal error, we can fall
+ * back to uncompressed code.
*/
- if (!redirty) {
- extent_range_clear_dirty_for_io(inode, start, end);
- redirty = 1;
- }
+ goto cleanup_and_bail_uncompressed;
+ }
- /* Compression level is applied here and only here */
- ret = btrfs_compress_pages(
- compress_type | (fs_info->compress_level << 4),
- inode->i_mapping, start,
- pages,
- &nr_pages,
- &total_in,
- &total_compressed);
+ if (0 < inode->defrag_compress && inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) {
+ compress_type = inode->defrag_compress;
+ compress_level = inode->defrag_compress_level;
+ } else if (inode->prop_compress) {
+ compress_type = inode->prop_compress;
+ }
- if (!ret) {
- unsigned long offset = offset_in_page(total_compressed);
- struct page *page = pages[nr_pages - 1];
+ /* Compression level is applied here. */
+ ret = btrfs_compress_folios(compress_type, compress_level,
+ inode, start, folios, &nr_folios, &total_in,
+ &total_compressed);
+ if (ret)
+ goto mark_incompressible;
- /* zero the tail end of the last page, we might be
- * sending it down to disk
- */
- if (offset)
- memzero_page(page, offset, PAGE_SIZE - offset);
- will_compress = 1;
- }
- }
-cont:
/*
- * Check cow_file_range() for why we don't even try to create inline
- * extent for subpage case.
+ * Zero the tail end of the last folio, as we might be sending it down
+ * to disk.
*/
- if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
- /* lets try to make an inline extent */
- if (ret || total_in < actual_end) {
- /* we didn't compress the entire range, try
- * to make an uncompressed inline extent.
- */
- ret = cow_file_range_inline(BTRFS_I(inode), start, end,
- 0, BTRFS_COMPRESS_NONE,
- NULL);
- } else {
- /* try making a compressed inline extent */
- ret = cow_file_range_inline(BTRFS_I(inode), start, end,
- total_compressed,
- compress_type, pages);
- }
- if (ret <= 0) {
- unsigned long clear_flags = EXTENT_DELALLOC |
- EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
- EXTENT_DO_ACCOUNTING;
- unsigned long page_error_op;
-
- page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
+ loff = (total_compressed & (min_folio_size - 1));
+ if (loff)
+ folio_zero_range(folios[nr_folios - 1], loff, min_folio_size - loff);
- /*
- * inline extent creation worked or returned error,
- * we don't need to create any more async work items.
- * Unlock and free up our temp pages.
- *
- * We use DO_ACCOUNTING here because we need the
- * delalloc_release_metadata to be done _after_ we drop
- * our outstanding extent for clearing delalloc for this
- * range.
- */
- extent_clear_unlock_delalloc(BTRFS_I(inode), start, end,
- NULL,
- clear_flags,
- PAGE_UNLOCK |
- PAGE_START_WRITEBACK |
- page_error_op |
- PAGE_END_WRITEBACK);
-
- /*
- * Ensure we only free the compressed pages if we have
- * them allocated, as we can still reach here with
- * inode_need_compress() == false.
- */
- if (pages) {
- for (i = 0; i < nr_pages; i++) {
- WARN_ON(pages[i]->mapping);
- put_page(pages[i]);
- }
- kfree(pages);
- }
- return 0;
- }
+ /*
+ * Try to create an inline extent.
+ *
+ * If we didn't compress the entire range, try to create an uncompressed
+ * inline extent, else a compressed one.
+ *
+ * Check cow_file_range() for why we don't even try to create inline
+ * extent for the subpage case.
+ */
+ if (total_in < actual_end)
+ ret = cow_file_range_inline(inode, NULL, start, end, 0,
+ BTRFS_COMPRESS_NONE, NULL, false);
+ else
+ ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
+ compress_type, folios[0], false);
+ if (ret <= 0) {
+ if (ret < 0)
+ mapping_set_error(mapping, -EIO);
+ goto free_pages;
}
- if (will_compress) {
- /*
- * we aren't doing an inline extent round the compressed size
- * up to a block size boundary so the allocator does sane
- * things
- */
- total_compressed = ALIGN(total_compressed, blocksize);
-
- /*
- * one last check to make sure the compression is really a
- * win, compare the page count read with the blocks on disk,
- * compression must free at least one sector size
- */
- total_in = round_up(total_in, fs_info->sectorsize);
- if (total_compressed + blocksize <= total_in) {
- compressed_extents++;
+ /*
+ * We aren't doing an inline extent. Round the compressed size up to a
+ * block size boundary so the allocator does sane things.
+ */
+ total_compressed = ALIGN(total_compressed, blocksize);
- /*
- * The async work queues will take care of doing actual
- * allocation on disk for these compressed pages, and
- * will submit them to the elevator.
- */
- add_async_extent(async_chunk, start, total_in,
- total_compressed, pages, nr_pages,
- compress_type);
-
- if (start + total_in < end) {
- start += total_in;
- pages = NULL;
- cond_resched();
- goto again;
- }
- return compressed_extents;
- }
- }
- if (pages) {
- /*
- * the compression code ran but failed to make things smaller,
- * free any pages it allocated and our page pointer array
- */
- for (i = 0; i < nr_pages; i++) {
- WARN_ON(pages[i]->mapping);
- put_page(pages[i]);
- }
- kfree(pages);
- pages = NULL;
- total_compressed = 0;
- nr_pages = 0;
+ /*
+ * One last check to make sure the compression is really a win, compare
+ * the page count read with the blocks on disk, compression must free at
+ * least one sector.
+ */
+ total_in = round_up(total_in, fs_info->sectorsize);
+ if (total_compressed + blocksize > total_in)
+ goto mark_incompressible;
- /* flag the file so we don't compress in the future */
- if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
- !(BTRFS_I(inode)->prop_compress)) {
- BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
- }
- }
-cleanup_and_bail_uncompressed:
/*
- * No compression, but we still need to write the pages in the file
- * we've been given so far. redirty the locked page if it corresponds
- * to our extent and set things up for the async work queue to run
- * cow_file_range to do the normal delalloc dance.
+ * The async work queues will take care of doing actual allocation on
+ * disk for these compressed pages, and will submit the bios.
*/
- if (async_chunk->locked_page &&
- (page_offset(async_chunk->locked_page) >= start &&
- page_offset(async_chunk->locked_page)) <= end) {
- __set_page_dirty_nobuffers(async_chunk->locked_page);
- /* unlocked later on in the async handlers */
+ ret = add_async_extent(async_chunk, start, total_in, total_compressed, folios,
+ nr_folios, compress_type);
+ BUG_ON(ret);
+ if (start + total_in < end) {
+ start += total_in;
+ cond_resched();
+ goto again;
}
+ return;
- if (redirty)
- extent_range_redirty_for_io(inode, start, end);
- add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
- BTRFS_COMPRESS_NONE);
- compressed_extents++;
-
- return compressed_extents;
+mark_incompressible:
+ if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
+ inode->flags |= BTRFS_INODE_NOCOMPRESS;
+cleanup_and_bail_uncompressed:
+ ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
+ BTRFS_COMPRESS_NONE);
+ BUG_ON(ret);
+free_pages:
+ if (folios) {
+ for (i = 0; i < nr_folios; i++) {
+ WARN_ON(folios[i]->mapping);
+ btrfs_free_compr_folio(folios[i]);
+ }
+ kfree(folios);
+ }
}
static void free_async_extent_pages(struct async_extent *async_extent)
{
int i;
- if (!async_extent->pages)
+ if (!async_extent->folios)
return;
- for (i = 0; i < async_extent->nr_pages; i++) {
- WARN_ON(async_extent->pages[i]->mapping);
- put_page(async_extent->pages[i]);
+ for (i = 0; i < async_extent->nr_folios; i++) {
+ WARN_ON(async_extent->folios[i]->mapping);
+ btrfs_free_compr_folio(async_extent->folios[i]);
}
- kfree(async_extent->pages);
- async_extent->nr_pages = 0;
- async_extent->pages = NULL;
+ kfree(async_extent->folios);
+ async_extent->nr_folios = 0;
+ async_extent->folios = NULL;
}
-static int submit_uncompressed_range(struct btrfs_inode *inode,
- struct async_extent *async_extent,
- struct page *locked_page)
+static void submit_uncompressed_range(struct btrfs_inode *inode,
+ struct async_extent *async_extent,
+ struct folio *locked_folio)
{
u64 start = async_extent->start;
u64 end = async_extent->start + async_extent->ram_size - 1;
- unsigned long nr_written = 0;
- int page_started = 0;
int ret;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .range_start = start,
+ .range_end = end,
+ .no_cgroup_owner = 1,
+ };
- /*
- * Call cow_file_range() to run the delalloc range directly, since we
- * won't go to NOCOW or async path again.
- *
- * Also we call cow_file_range() with @unlock_page == 0, so that we
- * can directly submit them without interruption.
- */
- ret = cow_file_range(inode, locked_page, start, end, &page_started,
- &nr_written, 0);
- /* Inline extent inserted, page gets unlocked and everything is done */
- if (page_started) {
- ret = 0;
- goto out;
- }
+ wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
+ ret = run_delalloc_cow(inode, locked_folio, start, end,
+ &wbc, false);
+ wbc_detach_inode(&wbc);
if (ret < 0) {
- if (locked_page)
- unlock_page(locked_page);
- goto out;
+ if (locked_folio)
+ btrfs_folio_end_lock(inode->root->fs_info, locked_folio,
+ start, async_extent->ram_size);
+ btrfs_err_rl(inode->root->fs_info,
+ "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
+ __func__, btrfs_root_id(inode->root),
+ btrfs_ino(inode), start, async_extent->ram_size, ret);
}
-
- ret = extent_write_locked_range(&inode->vfs_inode, start, end);
- /* All pages will be unlocked, including @locked_page */
-out:
- kfree(async_extent);
- return ret;
}
-static int submit_one_async_extent(struct btrfs_inode *inode,
- struct async_chunk *async_chunk,
- struct async_extent *async_extent,
- u64 *alloc_hint)
+static void submit_one_async_extent(struct async_chunk *async_chunk,
+ struct async_extent *async_extent,
+ u64 *alloc_hint)
{
+ struct btrfs_inode *inode = async_chunk->inode;
struct extent_io_tree *io_tree = &inode->io_tree;
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_ordered_extent *ordered;
+ struct btrfs_file_extent file_extent;
struct btrfs_key ins;
- struct page *locked_page = NULL;
+ struct folio *locked_folio = NULL;
+ struct extent_state *cached = NULL;
struct extent_map *em;
int ret = 0;
+ bool free_pages = false;
u64 start = async_extent->start;
u64 end = async_extent->start + async_extent->ram_size - 1;
+ if (async_chunk->blkcg_css)
+ kthread_associate_blkcg(async_chunk->blkcg_css);
+
/*
- * If async_chunk->locked_page is in the async_extent range, we need to
+ * If async_chunk->locked_folio is in the async_extent range, we need to
* handle it.
*/
- if (async_chunk->locked_page) {
- u64 locked_page_start = page_offset(async_chunk->locked_page);
- u64 locked_page_end = locked_page_start + PAGE_SIZE - 1;
+ if (async_chunk->locked_folio) {
+ u64 locked_folio_start = folio_pos(async_chunk->locked_folio);
+ u64 locked_folio_end = locked_folio_start +
+ folio_size(async_chunk->locked_folio) - 1;
- if (!(start >= locked_page_end || end <= locked_page_start))
- locked_page = async_chunk->locked_page;
+ if (!(start >= locked_folio_end || end <= locked_folio_start))
+ locked_folio = async_chunk->locked_folio;
}
- lock_extent(io_tree, start, end);
- /* We have fall back to uncompressed write */
- if (!async_extent->pages)
- return submit_uncompressed_range(inode, async_extent, locked_page);
+ if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
+ ASSERT(!async_extent->folios);
+ ASSERT(async_extent->nr_folios == 0);
+ submit_uncompressed_range(inode, async_extent, locked_folio);
+ free_pages = true;
+ goto done;
+ }
ret = btrfs_reserve_extent(root, async_extent->ram_size,
async_extent->compressed_size,
async_extent->compressed_size,
- 0, *alloc_hint, &ins, 1, 1);
+ 0, *alloc_hint, &ins, true, true);
if (ret) {
- free_async_extent_pages(async_extent);
/*
- * Here we used to try again by going back to non-compressed
- * path for ENOSPC. But we can't reserve space even for
- * compressed size, how could it work for uncompressed size
- * which requires larger size? So here we directly go error
- * path.
+ * We can't reserve contiguous space for the compressed size.
+ * Unlikely, but it's possible that we could have enough
+ * non-contiguous space for the uncompressed size instead. So
+ * fall back to uncompressed.
*/
- goto out_free;
+ submit_uncompressed_range(inode, async_extent, locked_folio);
+ free_pages = true;
+ goto done;
}
+ btrfs_lock_extent(io_tree, start, end, &cached);
+
/* Here we're doing allocation and writeback of the compressed pages */
- em = create_io_em(inode, start,
- async_extent->ram_size, /* len */
- start, /* orig_start */
- ins.objectid, /* block_start */
- ins.offset, /* block_len */
- ins.offset, /* orig_block_len */
- async_extent->ram_size, /* ram_bytes */
- async_extent->compress_type,
- BTRFS_ORDERED_COMPRESSED);
+ file_extent.disk_bytenr = ins.objectid;
+ file_extent.disk_num_bytes = ins.offset;
+ file_extent.ram_bytes = async_extent->ram_size;
+ file_extent.num_bytes = async_extent->ram_size;
+ file_extent.offset = 0;
+ file_extent.compression = async_extent->compress_type;
+
+ em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out_free_reserve;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- ret = btrfs_add_ordered_extent_compress(inode, start, /* file_offset */
- ins.objectid, /* disk_bytenr */
- async_extent->ram_size, /* num_bytes */
- ins.offset, /* disk_num_bytes */
- async_extent->compress_type);
- if (ret) {
- btrfs_drop_extent_cache(inode, start, end, 0);
+ ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
+ 1U << BTRFS_ORDERED_COMPRESSED);
+ if (IS_ERR(ordered)) {
+ btrfs_drop_extent_map_range(inode, start, end, false);
+ ret = PTR_ERR(ordered);
goto out_free_reserve;
}
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
/* Clear dirty, set writeback and unlock the pages. */
extent_clear_unlock_delalloc(inode, start, end,
- NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
+ NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC,
PAGE_UNLOCK | PAGE_START_WRITEBACK);
- if (btrfs_submit_compressed_write(inode, start, /* file_offset */
- async_extent->ram_size, /* num_bytes */
- ins.objectid, /* disk_bytenr */
- ins.offset, /* compressed_len */
- async_extent->pages, /* compressed_pages */
- async_extent->nr_pages,
- async_chunk->write_flags,
- async_chunk->blkcg_css)) {
- const u64 start = async_extent->start;
- const u64 end = start + async_extent->ram_size - 1;
-
- btrfs_writepage_endio_finish_ordered(inode, NULL, start, end, 0);
-
- extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
- PAGE_END_WRITEBACK | PAGE_SET_ERROR);
- free_async_extent_pages(async_extent);
- }
+ btrfs_submit_compressed_write(ordered,
+ async_extent->folios, /* compressed_folios */
+ async_extent->nr_folios,
+ async_chunk->write_flags, true);
*alloc_hint = ins.objectid + ins.offset;
+done:
+ if (async_chunk->blkcg_css)
+ kthread_associate_blkcg(NULL);
+ if (free_pages)
+ free_async_extent_pages(async_extent);
kfree(async_extent);
- return ret;
+ return;
out_free_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
-out_free:
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
+ mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
extent_clear_unlock_delalloc(inode, start, end,
- NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
+ NULL, &cached,
+ EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
- PAGE_END_WRITEBACK | PAGE_SET_ERROR);
+ PAGE_END_WRITEBACK);
free_async_extent_pages(async_extent);
- kfree(async_extent);
- return ret;
-}
-
-/*
- * Phase two of compressed writeback. This is the ordered portion of the code,
- * which only gets called in the order the work was queued. We walk all the
- * async extents created by compress_file_range and send them down to the disk.
- */
-static noinline void submit_compressed_extents(struct async_chunk *async_chunk)
-{
- struct btrfs_inode *inode = BTRFS_I(async_chunk->inode);
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct async_extent *async_extent;
- u64 alloc_hint = 0;
- int ret = 0;
-
- while (!list_empty(&async_chunk->extents)) {
- u64 extent_start;
- u64 ram_size;
-
- async_extent = list_entry(async_chunk->extents.next,
- struct async_extent, list);
- list_del(&async_extent->list);
- extent_start = async_extent->start;
- ram_size = async_extent->ram_size;
-
- ret = submit_one_async_extent(inode, async_chunk, async_extent,
- &alloc_hint);
- btrfs_debug(fs_info,
+ if (async_chunk->blkcg_css)
+ kthread_associate_blkcg(NULL);
+ btrfs_debug(fs_info,
"async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
- inode->root->root_key.objectid,
- btrfs_ino(inode), extent_start, ram_size, ret);
- }
+ btrfs_root_id(root), btrfs_ino(inode), start,
+ async_extent->ram_size, ret);
+ kfree(async_extent);
}
-static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
- u64 num_bytes)
+u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
+ u64 num_bytes)
{
struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
u64 alloc_hint = 0;
read_lock(&em_tree->lock);
- em = search_extent_mapping(em_tree, start, num_bytes);
+ em = btrfs_search_extent_mapping(em_tree, start, num_bytes);
if (em) {
/*
* if block start isn't an actual block number then find the
* first block in this inode and use that as a hint. If that
* block is also bogus then just don't worry about it.
*/
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- free_extent_map(em);
- em = search_extent_mapping(em_tree, 0, 0);
- if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
- alloc_hint = em->block_start;
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ btrfs_free_extent_map(em);
+ em = btrfs_search_extent_mapping(em_tree, 0, 0);
+ if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
+ alloc_hint = btrfs_extent_map_block_start(em);
if (em)
- free_extent_map(em);
+ btrfs_free_extent_map(em);
} else {
- alloc_hint = em->block_start;
- free_extent_map(em);
+ alloc_hint = btrfs_extent_map_block_start(em);
+ btrfs_free_extent_map(em);
}
}
read_unlock(&em_tree->lock);
@@ -1103,23 +1256,36 @@ static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
* allocate extents on disk for the range, and create ordered data structs
* in ram to track those extents.
*
- * locked_page is the page that writepage had locked already. We use
+ * locked_folio is the folio that writepage had locked already. We use
* it to make sure we don't do extra locks or unlocks.
*
- * *page_started is set to one if we unlock locked_page and do everything
- * required to start IO on it. It may be clean and already done with
- * IO when we return.
+ * When this function fails, it unlocks all folios except @locked_folio.
+ *
+ * When this function successfully creates an inline extent, it returns 1 and
+ * unlocks all folios including locked_folio and starts I/O on them.
+ * (In reality inline extents are limited to a single block, so locked_folio is
+ * the only folio handled anyway).
+ *
+ * When this function succeed and creates a normal extent, the folio locking
+ * status depends on the passed in flags:
+ *
+ * - If COW_FILE_RANGE_KEEP_LOCKED flag is set, all folios are kept locked.
+ * - Else all folios except for @locked_folio are unlocked.
+ *
+ * When a failure happens in the second or later iteration of the
+ * while-loop, the ordered extents created in previous iterations are cleaned up.
*/
static noinline int cow_file_range(struct btrfs_inode *inode,
- struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written, int unlock)
+ struct folio *locked_folio, u64 start,
+ u64 end, u64 *done_offset,
+ unsigned long flags)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
+ struct extent_state *cached = NULL;
u64 alloc_hint = 0;
+ u64 orig_start = start;
u64 num_bytes;
- unsigned long ram_size;
u64 cur_alloc_size = 0;
u64 min_alloc_size;
u64 blocksize = fs_info->sectorsize;
@@ -1127,11 +1293,14 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
struct extent_map *em;
unsigned clear_bits;
unsigned long page_ops;
- bool extent_reserved = false;
int ret = 0;
+ if (unlikely(btrfs_is_shutdown(fs_info))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
if (btrfs_is_free_space_inode(inode)) {
- WARN_ON_ONCE(1);
ret = -EINVAL;
goto out_unlock;
}
@@ -1142,57 +1311,36 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
- /*
- * Due to the page size limit, for subpage we can only trigger the
- * writeback for the dirty sectors of page, that means data writeback
- * is doing more writeback than what we want.
- *
- * This is especially unexpected for some call sites like fallocate,
- * where we only increase i_size after everything is done.
- * This means we can trigger inline extent even if we didn't want to.
- * So here we skip inline extent creation completely.
- */
- if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
+ if (!(flags & COW_FILE_RANGE_NO_INLINE)) {
/* lets try to make an inline extent */
- ret = cow_file_range_inline(inode, start, end, 0,
- BTRFS_COMPRESS_NONE, NULL);
- if (ret == 0) {
- /*
- * We use DO_ACCOUNTING here because we need the
- * delalloc_release_metadata to be run _after_ we drop
- * our outstanding extent for clearing delalloc for this
- * range.
- */
- extent_clear_unlock_delalloc(inode, start, end,
- locked_page,
- EXTENT_LOCKED | EXTENT_DELALLOC |
- EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
- EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
- PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
- *nr_written = *nr_written +
- (end - start + PAGE_SIZE) / PAGE_SIZE;
- *page_started = 1;
+ ret = cow_file_range_inline(inode, locked_folio, start, end, 0,
+ BTRFS_COMPRESS_NONE, NULL, false);
+ if (ret <= 0) {
/*
- * locked_page is locked by the caller of
- * writepage_delalloc(), not locked by
- * __process_pages_contig().
- *
- * We can't let __process_pages_contig() to unlock it,
- * as it doesn't have any subpage::writers recorded.
+ * We succeeded, return 1 so the caller knows we're done
+ * with this page and already handled the IO.
*
- * Here we manually unlock the page, since the caller
- * can't use page_started to determine if it's an
- * inline extent or a compressed extent.
+ * If there was an error then cow_file_range_inline() has
+ * already done the cleanup.
*/
- unlock_page(locked_page);
- goto out;
- } else if (ret < 0) {
- goto out_unlock;
+ if (ret == 0)
+ ret = 1;
+ goto done;
}
}
- alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
- btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
+ alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
+
+ /*
+ * We're not doing compressed IO, don't unlock the first page (which
+ * the caller expects to stay locked), don't clear any dirty bits and
+ * don't set any writeback bits.
+ *
+ * Do set the Ordered (Private2) bit so we know this page was properly
+ * setup for writepage.
+ */
+ page_ops = ((flags & COW_FILE_RANGE_KEEP_LOCKED) ? 0 : PAGE_UNLOCK);
+ page_ops |= PAGE_SET_ORDERED;
/*
* Relocation relies on the relocated extents to have exactly the same
@@ -1211,39 +1359,82 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
min_alloc_size = fs_info->sectorsize;
while (num_bytes > 0) {
- cur_alloc_size = num_bytes;
- ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
+ struct btrfs_ordered_extent *ordered;
+ struct btrfs_file_extent file_extent;
+
+ ret = btrfs_reserve_extent(root, num_bytes, num_bytes,
min_alloc_size, 0, alloc_hint,
- &ins, 1, 1);
+ &ins, true, true);
+ if (ret == -EAGAIN) {
+ /*
+ * btrfs_reserve_extent only returns -EAGAIN for zoned
+ * file systems, which is an indication that there are
+ * no active zones to allocate from at the moment.
+ *
+ * If this is the first loop iteration, wait for at
+ * least one zone to finish before retrying the
+ * allocation. Otherwise ask the caller to write out
+ * the already allocated blocks before coming back to
+ * us, or return -ENOSPC if it can't handle retries.
+ */
+ ASSERT(btrfs_is_zoned(fs_info));
+ if (start == orig_start) {
+ wait_on_bit_io(&inode->root->fs_info->flags,
+ BTRFS_FS_NEED_ZONE_FINISH,
+ TASK_UNINTERRUPTIBLE);
+ continue;
+ }
+ if (done_offset) {
+ /*
+ * Move @end to the end of the processed range,
+ * and exit the loop to unlock the processed extents.
+ */
+ end = start - 1;
+ ret = 0;
+ break;
+ }
+ ret = -ENOSPC;
+ }
if (ret < 0)
goto out_unlock;
cur_alloc_size = ins.offset;
- extent_reserved = true;
-
- ram_size = ins.offset;
- em = create_io_em(inode, start, ins.offset, /* len */
- start, /* orig_start */
- ins.objectid, /* block_start */
- ins.offset, /* block_len */
- ins.offset, /* orig_block_len */
- ram_size, /* ram_bytes */
- BTRFS_COMPRESS_NONE, /* compress_type */
- BTRFS_ORDERED_REGULAR /* type */);
+
+ file_extent.disk_bytenr = ins.objectid;
+ file_extent.disk_num_bytes = ins.offset;
+ file_extent.num_bytes = ins.offset;
+ file_extent.ram_bytes = ins.offset;
+ file_extent.offset = 0;
+ file_extent.compression = BTRFS_COMPRESS_NONE;
+
+ /*
+ * Locked range will be released either during error clean up or
+ * after the whole range is finished.
+ */
+ btrfs_lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1,
+ &cached);
+
+ em = btrfs_create_io_em(inode, start, &file_extent,
+ BTRFS_ORDERED_REGULAR);
if (IS_ERR(em)) {
+ btrfs_unlock_extent(&inode->io_tree, start,
+ start + cur_alloc_size - 1, &cached);
ret = PTR_ERR(em);
goto out_reserve;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
- ram_size, cur_alloc_size,
- BTRFS_ORDERED_REGULAR);
- if (ret)
+ ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
+ 1U << BTRFS_ORDERED_REGULAR);
+ if (IS_ERR(ordered)) {
+ btrfs_unlock_extent(&inode->io_tree, start,
+ start + cur_alloc_size - 1, &cached);
+ ret = PTR_ERR(ordered);
goto out_drop_extent_cache;
+ }
if (btrfs_is_data_reloc_root(root)) {
- ret = btrfs_reloc_clone_csums(inode, start,
- cur_alloc_size);
+ ret = btrfs_reloc_clone_csums(ordered);
+
/*
* Only drop cache here, and process as normal.
*
@@ -1256,34 +1447,21 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
* skip current ordered extent.
*/
if (ret)
- btrfs_drop_extent_cache(inode, start,
- start + ram_size - 1, 0);
+ btrfs_drop_extent_map_range(inode, start,
+ start + cur_alloc_size - 1,
+ false);
}
+ btrfs_put_ordered_extent(ordered);
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- /*
- * We're not doing compressed IO, don't unlock the first page
- * (which the caller expects to stay locked), don't clear any
- * dirty bits and don't set any writeback bits
- *
- * Do set the Ordered (Private2) bit so we know this page was
- * properly setup for writepage.
- */
- page_ops = unlock ? PAGE_UNLOCK : 0;
- page_ops |= PAGE_SET_ORDERED;
-
- extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
- locked_page,
- EXTENT_LOCKED | EXTENT_DELALLOC,
- page_ops);
if (num_bytes < cur_alloc_size)
num_bytes = 0;
else
num_bytes -= cur_alloc_size;
alloc_hint = ins.objectid + ins.offset;
start += cur_alloc_size;
- extent_reserved = false;
+ cur_alloc_size = 0;
/*
* btrfs_reloc_clone_csums() error, since start is increased
@@ -1293,82 +1471,132 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
if (ret)
goto out_unlock;
}
-out:
+ extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached,
+ EXTENT_LOCKED | EXTENT_DELALLOC, page_ops);
+done:
+ if (done_offset)
+ *done_offset = end;
return ret;
out_drop_extent_cache:
- btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
+ btrfs_drop_extent_map_range(inode, start, start + cur_alloc_size - 1, false);
out_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
out_unlock:
+ /*
+ * Now, we have three regions to clean up:
+ *
+ * |-------(1)----|---(2)---|-------------(3)----------|
+ * `- orig_start `- start `- start + cur_alloc_size `- end
+ *
+ * We process each region below.
+ */
+
+ /*
+ * For the range (1). We have already instantiated the ordered extents
+ * for this region, thus we need to cleanup those ordered extents.
+ * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV
+ * are also handled by the ordered extents cleanup.
+ *
+ * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and
+ * finish the writeback of the involved folios, which will be never submitted.
+ */
+ if (orig_start < start) {
+ clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
+ page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
+
+ if (!locked_folio)
+ mapping_set_error(inode->vfs_inode.i_mapping, ret);
+
+ btrfs_cleanup_ordered_extents(inode, orig_start, start - orig_start);
+ extent_clear_unlock_delalloc(inode, orig_start, start - 1,
+ locked_folio, NULL, clear_bits, page_ops);
+ }
+
clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
+ EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
+
/*
- * If we reserved an extent for our delalloc range (or a subrange) and
- * failed to create the respective ordered extent, then it means that
- * when we reserved the extent we decremented the extent's size from
- * the data space_info's bytes_may_use counter and incremented the
- * space_info's bytes_reserved counter by the same amount. We must make
- * sure extent_clear_unlock_delalloc() does not try to decrement again
- * the data space_info's bytes_may_use counter, therefore we do not pass
- * it the flag EXTENT_CLEAR_DATA_RESV.
+ * For the range (2). If we reserved an extent for our delalloc range
+ * (or a subrange) and failed to create the respective ordered extent,
+ * then it means that when we reserved the extent we decremented the
+ * extent's size from the data space_info's bytes_may_use counter and
+ * incremented the space_info's bytes_reserved counter by the same
+ * amount. We must make sure extent_clear_unlock_delalloc() does not try
+ * to decrement again the data space_info's bytes_may_use counter,
+ * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
*/
- if (extent_reserved) {
+ if (cur_alloc_size) {
extent_clear_unlock_delalloc(inode, start,
start + cur_alloc_size - 1,
- locked_page,
- clear_bits,
+ locked_folio, &cached, clear_bits,
page_ops);
- start += cur_alloc_size;
- if (start >= end)
- goto out;
+ btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL);
}
- extent_clear_unlock_delalloc(inode, start, end, locked_page,
- clear_bits | EXTENT_CLEAR_DATA_RESV,
- page_ops);
- goto out;
-}
-
-/*
- * work queue call back to started compression on a file and pages
- */
-static noinline void async_cow_start(struct btrfs_work *work)
-{
- struct async_chunk *async_chunk;
- int compressed_extents;
- async_chunk = container_of(work, struct async_chunk, work);
-
- compressed_extents = compress_file_range(async_chunk);
- if (compressed_extents == 0) {
- btrfs_add_delayed_iput(async_chunk->inode);
- async_chunk->inode = NULL;
- }
+ /*
+ * For the range (3). We never touched the region. In addition to the
+ * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
+ * space_info's bytes_may_use counter, reserved in
+ * btrfs_check_data_free_space().
+ */
+ if (start + cur_alloc_size < end) {
+ clear_bits |= EXTENT_CLEAR_DATA_RESV;
+ extent_clear_unlock_delalloc(inode, start + cur_alloc_size,
+ end, locked_folio,
+ &cached, clear_bits, page_ops);
+ btrfs_qgroup_free_data(inode, NULL, start + cur_alloc_size,
+ end - start - cur_alloc_size + 1, NULL);
+ }
+ btrfs_err(fs_info,
+"%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu cur_alloc_size=%llu: %d",
+ __func__, btrfs_root_id(inode->root),
+ btrfs_ino(inode), orig_start, end + 1 - orig_start,
+ start, cur_alloc_size, ret);
+ return ret;
}
/*
- * work queue call back to submit previously compressed pages
+ * Phase two of compressed writeback. This is the ordered portion of the code,
+ * which only gets called in the order the work was queued. We walk all the
+ * async extents created by compress_file_range and send them down to the disk.
+ *
+ * If called with @do_free == true then it'll try to finish the work and free
+ * the work struct eventually.
*/
-static noinline void async_cow_submit(struct btrfs_work *work)
+static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
{
struct async_chunk *async_chunk = container_of(work, struct async_chunk,
work);
struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
+ struct async_extent *async_extent;
unsigned long nr_pages;
+ u64 alloc_hint = 0;
+
+ if (do_free) {
+ struct async_cow *async_cow;
+
+ btrfs_add_delayed_iput(async_chunk->inode);
+ if (async_chunk->blkcg_css)
+ css_put(async_chunk->blkcg_css);
+
+ async_cow = async_chunk->async_cow;
+ if (atomic_dec_and_test(&async_cow->num_chunks))
+ kvfree(async_cow);
+ return;
+ }
nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
PAGE_SHIFT;
- /*
- * ->inode could be NULL if async_chunk_start has failed to compress,
- * in which case we don't have anything to submit, yet we need to
- * always adjust ->async_delalloc_pages as its paired with the init
- * happening in cow_file_range_async
- */
- if (async_chunk->inode)
- submit_compressed_extents(async_chunk);
+ while (!list_empty(&async_chunk->extents)) {
+ async_extent = list_first_entry(&async_chunk->extents,
+ struct async_extent, list);
+ list_del(&async_extent->list);
+ submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
+ }
/* atomic_sub_return implies a barrier */
if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
@@ -1376,74 +1604,33 @@ static noinline void async_cow_submit(struct btrfs_work *work)
cond_wake_up_nomb(&fs_info->async_submit_wait);
}
-static noinline void async_cow_free(struct btrfs_work *work)
-{
- struct async_chunk *async_chunk;
- struct async_cow *async_cow;
-
- async_chunk = container_of(work, struct async_chunk, work);
- if (async_chunk->inode)
- btrfs_add_delayed_iput(async_chunk->inode);
- if (async_chunk->blkcg_css)
- css_put(async_chunk->blkcg_css);
-
- async_cow = async_chunk->async_cow;
- if (atomic_dec_and_test(&async_cow->num_chunks))
- kvfree(async_cow);
-}
-
-static int cow_file_range_async(struct btrfs_inode *inode,
- struct writeback_control *wbc,
- struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written)
+static bool run_delalloc_compressed(struct btrfs_inode *inode,
+ struct folio *locked_folio, u64 start,
+ u64 end, struct writeback_control *wbc)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
struct async_cow *ctx;
struct async_chunk *async_chunk;
unsigned long nr_pages;
- u64 cur_end;
u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
int i;
- bool should_compress;
unsigned nofs_flag;
- const unsigned int write_flags = wbc_to_write_flags(wbc);
-
- unlock_extent(&inode->io_tree, start, end);
-
- if (inode->flags & BTRFS_INODE_NOCOMPRESS &&
- !btrfs_test_opt(fs_info, FORCE_COMPRESS)) {
- num_chunks = 1;
- should_compress = false;
- } else {
- should_compress = true;
- }
+ const blk_opf_t write_flags = wbc_to_write_flags(wbc);
nofs_flag = memalloc_nofs_save();
ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
memalloc_nofs_restore(nofs_flag);
+ if (!ctx)
+ return false;
- if (!ctx) {
- unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC |
- EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
- EXTENT_DO_ACCOUNTING;
- unsigned long page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK |
- PAGE_END_WRITEBACK | PAGE_SET_ERROR;
-
- extent_clear_unlock_delalloc(inode, start, end, locked_page,
- clear_bits, page_ops);
- return -ENOMEM;
- }
+ set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
async_chunk = ctx->chunks;
atomic_set(&ctx->num_chunks, num_chunks);
for (i = 0; i < num_chunks; i++) {
- if (should_compress)
- cur_end = min(end, start + SZ_512K - 1);
- else
- cur_end = end;
+ u64 cur_end = min(end, start + SZ_512K - 1);
/*
* igrab is called higher up in the call chain, take only the
@@ -1451,22 +1638,23 @@ static int cow_file_range_async(struct btrfs_inode *inode,
*/
ihold(&inode->vfs_inode);
async_chunk[i].async_cow = ctx;
- async_chunk[i].inode = &inode->vfs_inode;
+ async_chunk[i].inode = inode;
async_chunk[i].start = start;
async_chunk[i].end = cur_end;
async_chunk[i].write_flags = write_flags;
INIT_LIST_HEAD(&async_chunk[i].extents);
/*
- * The locked_page comes all the way from writepage and its
- * the original page we were actually given. As we spread
+ * The locked_folio comes all the way from writepage and its
+ * the original folio we were actually given. As we spread
* this large delalloc region across multiple async_chunk
- * structs, only the first struct needs a pointer to locked_page
+ * structs, only the first struct needs a pointer to
+ * locked_folio.
*
* This way we don't need racey decisions about who is supposed
* to unlock it.
*/
- if (locked_page) {
+ if (locked_folio) {
/*
* Depending on the compressibility, the pages might or
* might not go through async. We want all of them to
@@ -1476,91 +1664,72 @@ static int cow_file_range_async(struct btrfs_inode *inode,
* need full accuracy. Just account the whole thing
* against the first page.
*/
- wbc_account_cgroup_owner(wbc, locked_page,
+ wbc_account_cgroup_owner(wbc, locked_folio,
cur_end - start);
- async_chunk[i].locked_page = locked_page;
- locked_page = NULL;
+ async_chunk[i].locked_folio = locked_folio;
+ locked_folio = NULL;
} else {
- async_chunk[i].locked_page = NULL;
+ async_chunk[i].locked_folio = NULL;
}
if (blkcg_css != blkcg_root_css) {
css_get(blkcg_css);
async_chunk[i].blkcg_css = blkcg_css;
+ async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT;
} else {
async_chunk[i].blkcg_css = NULL;
}
- btrfs_init_work(&async_chunk[i].work, async_cow_start,
- async_cow_submit, async_cow_free);
+ btrfs_init_work(&async_chunk[i].work, compress_file_range,
+ submit_compressed_extents);
nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
atomic_add(nr_pages, &fs_info->async_delalloc_pages);
btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
- *nr_written += nr_pages;
start = cur_end + 1;
}
- *page_started = 1;
- return 0;
-}
-
-static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
- struct page *locked_page, u64 start,
- u64 end, int *page_started,
- unsigned long *nr_written)
-{
- int ret;
-
- ret = cow_file_range(inode, locked_page, start, end, page_started,
- nr_written, 0);
- if (ret)
- return ret;
-
- if (*page_started)
- return 0;
-
- __set_page_dirty_nobuffers(locked_page);
- account_page_redirty(locked_page);
- extent_write_locked_range(&inode->vfs_inode, start, end);
- *page_started = 1;
-
- return 0;
+ return true;
}
-static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
- u64 bytenr, u64 num_bytes)
+/*
+ * Run the delalloc range from start to end, and write back any dirty pages
+ * covered by the range.
+ */
+static noinline int run_delalloc_cow(struct btrfs_inode *inode,
+ struct folio *locked_folio, u64 start,
+ u64 end, struct writeback_control *wbc,
+ bool pages_dirty)
{
+ u64 done_offset = end;
int ret;
- struct btrfs_ordered_sum *sums;
- LIST_HEAD(list);
-
- ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
- bytenr + num_bytes - 1, &list, 0);
- if (ret == 0 && list_empty(&list))
- return 0;
- while (!list_empty(&list)) {
- sums = list_entry(list.next, struct btrfs_ordered_sum, list);
- list_del(&sums->list);
- kfree(sums);
+ while (start <= end) {
+ ret = cow_file_range(inode, locked_folio, start, end,
+ &done_offset, COW_FILE_RANGE_KEEP_LOCKED);
+ if (ret)
+ return ret;
+ extent_write_locked_range(&inode->vfs_inode, locked_folio,
+ start, done_offset, wbc, pages_dirty);
+ start = done_offset + 1;
}
- if (ret < 0)
- return ret;
+
return 1;
}
-static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
- const u64 start, const u64 end,
- int *page_started, unsigned long *nr_written)
+static int fallback_to_cow(struct btrfs_inode *inode,
+ struct folio *locked_folio, const u64 start,
+ const u64 end)
{
const bool is_space_ino = btrfs_is_free_space_inode(inode);
const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
const u64 range_bytes = end + 1 - start;
struct extent_io_tree *io_tree = &inode->io_tree;
+ struct extent_state *cached_state = NULL;
u64 range_start = start;
u64 count;
+ int ret;
/*
* If EXTENT_NORESERVE is set it means that when the buffered write was
@@ -1594,8 +1763,9 @@ static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
* group that contains that extent to RO mode and therefore force COW
* when starting writeback.
*/
- count = count_range_bits(io_tree, &range_start, end, range_bytes,
- EXTENT_NORESERVE, 0);
+ btrfs_lock_extent(io_tree, start, end, &cached_state);
+ count = btrfs_count_range_bits(io_tree, &range_start, end, range_bytes,
+ EXTENT_NORESERVE, 0, NULL);
if (count > 0 || is_space_ino || is_reloc_ino) {
u64 bytes = count;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
@@ -1605,68 +1775,301 @@ static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
bytes = range_bytes;
spin_lock(&sinfo->lock);
- btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
+ btrfs_space_info_update_bytes_may_use(sinfo, bytes);
spin_unlock(&sinfo->lock);
if (count > 0)
- clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
- 0, 0, NULL);
+ btrfs_clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
+ &cached_state);
}
+ btrfs_unlock_extent(io_tree, start, end, &cached_state);
- return cow_file_range(inode, locked_page, start, end, page_started,
- nr_written, 1);
+ /*
+ * Don't try to create inline extents, as a mix of inline extent that
+ * is written out and unlocked directly and a normal NOCOW extent
+ * doesn't work.
+ *
+ * And here we do not unlock the folio after a successful run.
+ * The folios will be unlocked after everything is finished, or by error handling.
+ *
+ * This is to ensure error handling won't need to clear dirty/ordered flags without
+ * a locked folio, which can race with writeback.
+ */
+ ret = cow_file_range(inode, locked_folio, start, end, NULL,
+ COW_FILE_RANGE_NO_INLINE | COW_FILE_RANGE_KEEP_LOCKED);
+ ASSERT(ret != 1);
+ return ret;
}
+struct can_nocow_file_extent_args {
+ /* Input fields. */
+
+ /* Start file offset of the range we want to NOCOW. */
+ u64 start;
+ /* End file offset (inclusive) of the range we want to NOCOW. */
+ u64 end;
+ bool writeback_path;
+ /*
+ * Free the path passed to can_nocow_file_extent() once it's not needed
+ * anymore.
+ */
+ bool free_path;
+
+ /*
+ * Output fields. Only set when can_nocow_file_extent() returns 1.
+ * The expected file extent for the NOCOW write.
+ */
+ struct btrfs_file_extent file_extent;
+};
+
/*
- * when nowcow writeback call back. This checks for snapshots or COW copies
+ * Check if we can NOCOW the file extent that the path points to.
+ * This function may return with the path released, so the caller should check
+ * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
+ *
+ * Returns: < 0 on error
+ * 0 if we can not NOCOW
+ * 1 if we can NOCOW
+ */
+static int can_nocow_file_extent(struct btrfs_path *path,
+ struct btrfs_key *key,
+ struct btrfs_inode *inode,
+ struct can_nocow_file_extent_args *args)
+{
+ const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_root *root = inode->root;
+ struct btrfs_file_extent_item *fi;
+ struct btrfs_root *csum_root;
+ u64 io_start;
+ u64 extent_end;
+ u8 extent_type;
+ int can_nocow = 0;
+ int ret = 0;
+ bool nowait = path->nowait;
+
+ fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+ extent_type = btrfs_file_extent_type(leaf, fi);
+
+ if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+ goto out;
+
+ if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
+ extent_type == BTRFS_FILE_EXTENT_REG)
+ goto out;
+
+ /*
+ * If the extent was created before the generation where the last snapshot
+ * for its subvolume was created, then this implies the extent is shared,
+ * hence we must COW.
+ */
+ if (btrfs_file_extent_generation(leaf, fi) <=
+ btrfs_root_last_snapshot(&root->root_item))
+ goto out;
+
+ /* An explicit hole, must COW. */
+ if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
+ goto out;
+
+ /* Compressed/encrypted/encoded extents must be COWed. */
+ if (btrfs_file_extent_compression(leaf, fi) ||
+ btrfs_file_extent_encryption(leaf, fi) ||
+ btrfs_file_extent_other_encoding(leaf, fi))
+ goto out;
+
+ extent_end = btrfs_file_extent_end(path);
+
+ args->file_extent.disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+ args->file_extent.disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
+ args->file_extent.ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
+ args->file_extent.offset = btrfs_file_extent_offset(leaf, fi);
+ args->file_extent.compression = btrfs_file_extent_compression(leaf, fi);
+
+ /*
+ * The following checks can be expensive, as they need to take other
+ * locks and do btree or rbtree searches, so release the path to avoid
+ * blocking other tasks for too long.
+ */
+ btrfs_release_path(path);
+
+ ret = btrfs_cross_ref_exist(inode, key->offset - args->file_extent.offset,
+ args->file_extent.disk_bytenr, path);
+ WARN_ON_ONCE(ret > 0 && is_freespace_inode);
+ if (ret != 0)
+ goto out;
+
+ if (args->free_path) {
+ /*
+ * We don't need the path anymore, plus through the
+ * btrfs_lookup_csums_list() call below we will end up allocating
+ * another path. So free the path to avoid unnecessary extra
+ * memory usage.
+ */
+ btrfs_free_path(path);
+ path = NULL;
+ }
+
+ /* If there are pending snapshots for this root, we must COW. */
+ if (args->writeback_path && !is_freespace_inode &&
+ atomic_read(&root->snapshot_force_cow))
+ goto out;
+
+ args->file_extent.num_bytes = min(args->end + 1, extent_end) - args->start;
+ args->file_extent.offset += args->start - key->offset;
+ io_start = args->file_extent.disk_bytenr + args->file_extent.offset;
+
+ /*
+ * Force COW if csums exist in the range. This ensures that csums for a
+ * given extent are either valid or do not exist.
+ */
+
+ csum_root = btrfs_csum_root(root->fs_info, io_start);
+ ret = btrfs_lookup_csums_list(csum_root, io_start,
+ io_start + args->file_extent.num_bytes - 1,
+ NULL, nowait);
+ WARN_ON_ONCE(ret > 0 && is_freespace_inode);
+ if (ret != 0)
+ goto out;
+
+ can_nocow = 1;
+ out:
+ if (args->free_path && path)
+ btrfs_free_path(path);
+
+ return ret < 0 ? ret : can_nocow;
+}
+
+static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
+ struct extent_state **cached,
+ struct can_nocow_file_extent_args *nocow_args,
+ u64 file_pos, bool is_prealloc)
+{
+ struct btrfs_ordered_extent *ordered;
+ const u64 len = nocow_args->file_extent.num_bytes;
+ const u64 end = file_pos + len - 1;
+ int ret = 0;
+
+ btrfs_lock_extent(&inode->io_tree, file_pos, end, cached);
+
+ if (is_prealloc) {
+ struct extent_map *em;
+
+ em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent,
+ BTRFS_ORDERED_PREALLOC);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto error;
+ }
+ btrfs_free_extent_map(em);
+ }
+
+ ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent,
+ is_prealloc
+ ? (1U << BTRFS_ORDERED_PREALLOC)
+ : (1U << BTRFS_ORDERED_NOCOW));
+ if (IS_ERR(ordered)) {
+ if (is_prealloc)
+ btrfs_drop_extent_map_range(inode, file_pos, end, false);
+ ret = PTR_ERR(ordered);
+ goto error;
+ }
+
+ if (btrfs_is_data_reloc_root(inode->root))
+ /*
+ * Errors are handled later, as we must prevent
+ * extent_clear_unlock_delalloc() in error handler from freeing
+ * metadata of the created ordered extent.
+ */
+ ret = btrfs_reloc_clone_csums(ordered);
+ btrfs_put_ordered_extent(ordered);
+
+ if (ret < 0)
+ goto error;
+ extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
+ EXTENT_LOCKED | EXTENT_DELALLOC |
+ EXTENT_CLEAR_DATA_RESV,
+ PAGE_SET_ORDERED);
+ return ret;
+
+error:
+ btrfs_cleanup_ordered_extents(inode, file_pos, len);
+ extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
+ EXTENT_LOCKED | EXTENT_DELALLOC |
+ EXTENT_CLEAR_DATA_RESV,
+ PAGE_UNLOCK | PAGE_START_WRITEBACK |
+ PAGE_END_WRITEBACK);
+ btrfs_err(inode->root->fs_info,
+ "%s failed, root=%lld inode=%llu start=%llu len=%llu: %d",
+ __func__, btrfs_root_id(inode->root), btrfs_ino(inode),
+ file_pos, len, ret);
+ return ret;
+}
+
+/*
+ * When nocow writeback calls back. This checks for snapshots or COW copies
* of the extents that exist in the file, and COWs the file as required.
*
* If no cow copies or snapshots exist, we write directly to the existing
* blocks on disk
*/
static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
- struct page *locked_page,
- const u64 start, const u64 end,
- int *page_started,
- unsigned long *nr_written)
+ struct folio *locked_folio,
+ const u64 start, const u64 end)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root;
- struct btrfs_path *path;
+ struct btrfs_path *path = NULL;
u64 cow_start = (u64)-1;
+ /*
+ * If not 0, represents the inclusive end of the last fallback_to_cow()
+ * range. Only for error handling.
+ *
+ * The same for nocow_end, it's to avoid double cleaning up the range
+ * already cleaned by nocow_one_range().
+ */
+ u64 cow_end = 0;
+ u64 nocow_end = 0;
u64 cur_offset = start;
int ret;
bool check_prev = true;
- const bool freespace_inode = btrfs_is_free_space_inode(inode);
u64 ino = btrfs_ino(inode);
- bool nocow = false;
- u64 disk_bytenr = 0;
- const bool force = inode->flags & BTRFS_INODE_NODATACOW;
+ struct can_nocow_file_extent_args nocow_args = { 0 };
+ /* The range that has ordered extent(s). */
+ u64 oe_cleanup_start;
+ u64 oe_cleanup_len = 0;
+ /* The range that is untouched. */
+ u64 untouched_start;
+ u64 untouched_len = 0;
+ /*
+ * Normally on a zoned device we're only doing COW writes, but in case
+ * of relocation on a zoned filesystem serializes I/O so that we're only
+ * writing sequentially and can end up here as well.
+ */
+ ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
+
+ if (unlikely(btrfs_is_shutdown(fs_info))) {
+ ret = -EIO;
+ goto error;
+ }
path = btrfs_alloc_path();
if (!path) {
- extent_clear_unlock_delalloc(inode, start, end, locked_page,
- EXTENT_LOCKED | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, PAGE_UNLOCK |
- PAGE_START_WRITEBACK |
- PAGE_END_WRITEBACK);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
- while (1) {
+ nocow_args.end = end;
+ nocow_args.writeback_path = true;
+
+ while (cur_offset <= end) {
+ struct btrfs_block_group *nocow_bg = NULL;
struct btrfs_key found_key;
struct btrfs_file_extent_item *fi;
struct extent_buffer *leaf;
+ struct extent_state *cached_state = NULL;
u64 extent_end;
- u64 extent_offset;
- u64 num_bytes = 0;
- u64 disk_num_bytes;
- u64 ram_bytes;
int extent_type;
- nocow = false;
-
ret = btrfs_lookup_file_extent(NULL, root, path, ino,
cur_offset, 0);
if (ret < 0)
@@ -1691,11 +2094,8 @@ next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
- if (ret < 0) {
- if (cow_start != (u64)-1)
- cur_offset = cow_start;
+ if (ret < 0)
goto error;
- }
if (ret > 0)
break;
leaf = path->nodes[0];
@@ -1723,12 +2123,13 @@ next_slot:
/*
* If the found extent starts after requested offset, then
- * adjust extent_end to be right before this extent begins
+ * adjust cur_offset to be right before this extent begins.
*/
if (found_key.offset > cur_offset) {
- extent_end = found_key.offset;
- extent_type = 0;
- goto out_check;
+ if (cow_start == (u64)-1)
+ cow_start = cur_offset;
+ cur_offset = found_key.offset;
+ goto next_slot;
}
/*
@@ -1738,122 +2139,43 @@ next_slot:
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
+ /* If this is triggered then we have a memory corruption. */
+ ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
+ if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
+ ret = -EUCLEAN;
+ goto error;
+ }
+ extent_end = btrfs_file_extent_end(path);
- ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
- if (extent_type == BTRFS_FILE_EXTENT_REG ||
- extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
- disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
- extent_offset = btrfs_file_extent_offset(leaf, fi);
- extent_end = found_key.offset +
- btrfs_file_extent_num_bytes(leaf, fi);
- disk_num_bytes =
- btrfs_file_extent_disk_num_bytes(leaf, fi);
- /*
- * If the extent we got ends before our current offset,
- * skip to the next extent.
- */
- if (extent_end <= cur_offset) {
- path->slots[0]++;
- goto next_slot;
- }
- /* Skip holes */
- if (disk_bytenr == 0)
- goto out_check;
- /* Skip compressed/encrypted/encoded extents */
- if (btrfs_file_extent_compression(leaf, fi) ||
- btrfs_file_extent_encryption(leaf, fi) ||
- btrfs_file_extent_other_encoding(leaf, fi))
- goto out_check;
- /*
- * If extent is created before the last volume's snapshot
- * this implies the extent is shared, hence we can't do
- * nocow. This is the same check as in
- * btrfs_cross_ref_exist but without calling
- * btrfs_search_slot.
- */
- if (!freespace_inode &&
- btrfs_file_extent_generation(leaf, fi) <=
- btrfs_root_last_snapshot(&root->root_item))
- goto out_check;
- if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
- goto out_check;
+ /*
+ * If the extent we got ends before our current offset, skip to
+ * the next extent.
+ */
+ if (extent_end <= cur_offset) {
+ path->slots[0]++;
+ goto next_slot;
+ }
- /*
- * The following checks can be expensive, as they need to
- * take other locks and do btree or rbtree searches, so
- * release the path to avoid blocking other tasks for too
- * long.
- */
- btrfs_release_path(path);
+ nocow_args.start = cur_offset;
+ ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
+ if (ret < 0)
+ goto error;
+ if (ret == 0)
+ goto must_cow;
- ret = btrfs_cross_ref_exist(root, ino,
- found_key.offset -
- extent_offset, disk_bytenr, false);
- if (ret) {
- /*
- * ret could be -EIO if the above fails to read
- * metadata.
- */
- if (ret < 0) {
- if (cow_start != (u64)-1)
- cur_offset = cow_start;
- goto error;
- }
-
- WARN_ON_ONCE(freespace_inode);
- goto out_check;
- }
- disk_bytenr += extent_offset;
- disk_bytenr += cur_offset - found_key.offset;
- num_bytes = min(end + 1, extent_end) - cur_offset;
- /*
- * If there are pending snapshots for this root, we
- * fall into common COW way
- */
- if (!freespace_inode && atomic_read(&root->snapshot_force_cow))
- goto out_check;
+ ret = 0;
+ nocow_bg = btrfs_inc_nocow_writers(fs_info,
+ nocow_args.file_extent.disk_bytenr +
+ nocow_args.file_extent.offset);
+ if (!nocow_bg) {
+must_cow:
/*
- * force cow if csum exists in the range.
- * this ensure that csum for a given extent are
- * either valid or do not exist.
+ * If we can't perform NOCOW writeback for the range,
+ * then record the beginning of the range that needs to
+ * be COWed. It will be written out before the next
+ * NOCOW range if we find one, or when exiting this
+ * loop.
*/
- ret = csum_exist_in_range(fs_info, disk_bytenr,
- num_bytes);
- if (ret) {
- /*
- * ret could be -EIO if the above fails to read
- * metadata.
- */
- if (ret < 0) {
- if (cow_start != (u64)-1)
- cur_offset = cow_start;
- goto error;
- }
- WARN_ON_ONCE(freespace_inode);
- goto out_check;
- }
- /* If the extent's block group is RO, we must COW */
- if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
- goto out_check;
- nocow = true;
- } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- extent_end = found_key.offset + ram_bytes;
- extent_end = ALIGN(extent_end, fs_info->sectorsize);
- /* Skip extents outside of our requested range */
- if (extent_end <= start) {
- path->slots[0]++;
- goto next_slot;
- }
- } else {
- /* If this triggers then we have a memory corruption */
- BUG();
- }
-out_check:
- /*
- * If nocow is false then record the beginning of the range
- * that needs to be COWed
- */
- if (!nocow) {
if (cow_start == (u64)-1)
cow_start = cur_offset;
cur_offset = extent_end;
@@ -1871,80 +2193,25 @@ out_check:
* NOCOW, following one which needs to be COW'ed
*/
if (cow_start != (u64)-1) {
- ret = fallback_to_cow(inode, locked_page,
- cow_start, found_key.offset - 1,
- page_started, nr_written);
- if (ret)
- goto error;
- cow_start = (u64)-1;
- }
-
- if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
- u64 orig_start = found_key.offset - extent_offset;
- struct extent_map *em;
-
- em = create_io_em(inode, cur_offset, num_bytes,
- orig_start,
- disk_bytenr, /* block_start */
- num_bytes, /* block_len */
- disk_num_bytes, /* orig_block_len */
- ram_bytes, BTRFS_COMPRESS_NONE,
- BTRFS_ORDERED_PREALLOC);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- goto error;
- }
- free_extent_map(em);
- ret = btrfs_add_ordered_extent(inode, cur_offset,
- disk_bytenr, num_bytes,
- num_bytes,
- BTRFS_ORDERED_PREALLOC);
+ ret = fallback_to_cow(inode, locked_folio, cow_start,
+ found_key.offset - 1);
if (ret) {
- btrfs_drop_extent_cache(inode, cur_offset,
- cur_offset + num_bytes - 1,
- 0);
+ cow_end = found_key.offset - 1;
+ btrfs_dec_nocow_writers(nocow_bg);
goto error;
}
- } else {
- ret = btrfs_add_ordered_extent(inode, cur_offset,
- disk_bytenr, num_bytes,
- num_bytes,
- BTRFS_ORDERED_NOCOW);
- if (ret)
- goto error;
+ cow_start = (u64)-1;
}
- if (nocow)
- btrfs_dec_nocow_writers(fs_info, disk_bytenr);
- nocow = false;
-
- if (btrfs_is_data_reloc_root(root))
- /*
- * Error handled later, as we must prevent
- * extent_clear_unlock_delalloc() in error handler
- * from freeing metadata of created ordered extent.
- */
- ret = btrfs_reloc_clone_csums(inode, cur_offset,
- num_bytes);
-
- extent_clear_unlock_delalloc(inode, cur_offset,
- cur_offset + num_bytes - 1,
- locked_page, EXTENT_LOCKED |
- EXTENT_DELALLOC |
- EXTENT_CLEAR_DATA_RESV,
- PAGE_UNLOCK | PAGE_SET_ORDERED);
-
- cur_offset = extent_end;
-
- /*
- * btrfs_reloc_clone_csums() error, now we're OK to call error
- * handler, as metadata for created ordered extent will only
- * be freed by btrfs_finish_ordered_io().
- */
- if (ret)
+ ret = nocow_one_range(inode, locked_folio, &cached_state,
+ &nocow_args, cur_offset,
+ extent_type == BTRFS_FILE_EXTENT_PREALLOC);
+ btrfs_dec_nocow_writers(nocow_bg);
+ if (ret < 0) {
+ nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
goto error;
- if (cur_offset > end)
- break;
+ }
+ cur_offset = extent_end;
}
btrfs_release_path(path);
@@ -1952,25 +2219,113 @@ out_check:
cow_start = cur_offset;
if (cow_start != (u64)-1) {
- cur_offset = end;
- ret = fallback_to_cow(inode, locked_page, cow_start, end,
- page_started, nr_written);
- if (ret)
+ ret = fallback_to_cow(inode, locked_folio, cow_start, end);
+ if (ret) {
+ cow_end = end;
goto error;
+ }
+ cow_start = (u64)-1;
}
+ /*
+ * Everything is finished without an error, can unlock the folios now.
+ *
+ * No need to touch the io tree range nor set folio ordered flag, as
+ * fallback_to_cow() and nocow_one_range() have already handled them.
+ */
+ extent_clear_unlock_delalloc(inode, start, end, locked_folio, NULL, 0, PAGE_UNLOCK);
+
+ btrfs_free_path(path);
+ return 0;
+
error:
- if (nocow)
- btrfs_dec_nocow_writers(fs_info, disk_bytenr);
+ if (cow_start == (u64)-1) {
+ /*
+ * case a)
+ * start cur_offset end
+ * | OE cleanup | Untouched |
+ *
+ * We finished a fallback_to_cow() or nocow_one_range() call,
+ * but failed to check the next range.
+ *
+ * or
+ * start cur_offset nocow_end end
+ * | OE cleanup | Skip | Untouched |
+ *
+ * nocow_one_range() failed, the range [cur_offset, nocow_end] is
+ * already cleaned up.
+ */
+ oe_cleanup_start = start;
+ oe_cleanup_len = cur_offset - start;
+ if (nocow_end)
+ untouched_start = nocow_end + 1;
+ else
+ untouched_start = cur_offset;
+ untouched_len = end + 1 - untouched_start;
+ } else if (cow_start != (u64)-1 && cow_end == 0) {
+ /*
+ * case b)
+ * start cow_start cur_offset end
+ * | OE cleanup | Untouched |
+ *
+ * We got a range that needs COW, but before we hit the next NOCOW range,
+ * thus [cow_start, cur_offset) doesn't yet have any OE.
+ */
+ oe_cleanup_start = start;
+ oe_cleanup_len = cow_start - start;
+ untouched_start = cow_start;
+ untouched_len = end + 1 - untouched_start;
+ } else {
+ /*
+ * case c)
+ * start cow_start cow_end end
+ * | OE cleanup | Skip | Untouched |
+ *
+ * fallback_to_cow() failed, and fallback_to_cow() will do the
+ * cleanup for its range, we shouldn't touch the range
+ * [cow_start, cow_end].
+ */
+ ASSERT(cow_start != (u64)-1 && cow_end != 0);
+ oe_cleanup_start = start;
+ oe_cleanup_len = cow_start - start;
+ untouched_start = cow_end + 1;
+ untouched_len = end + 1 - untouched_start;
+ }
+
+ if (oe_cleanup_len) {
+ const u64 oe_cleanup_end = oe_cleanup_start + oe_cleanup_len - 1;
+ btrfs_cleanup_ordered_extents(inode, oe_cleanup_start, oe_cleanup_len);
+ extent_clear_unlock_delalloc(inode, oe_cleanup_start, oe_cleanup_end,
+ locked_folio, NULL,
+ EXTENT_LOCKED | EXTENT_DELALLOC,
+ PAGE_UNLOCK | PAGE_START_WRITEBACK |
+ PAGE_END_WRITEBACK);
+ }
+
+ if (untouched_len) {
+ struct extent_state *cached = NULL;
+ const u64 untouched_end = untouched_start + untouched_len - 1;
- if (ret && cur_offset < end)
- extent_clear_unlock_delalloc(inode, cur_offset, end,
- locked_page, EXTENT_LOCKED |
- EXTENT_DELALLOC | EXTENT_DEFRAG |
+ /*
+ * We need to lock the extent here because we're clearing DELALLOC and
+ * we're not locked at this point.
+ */
+ btrfs_lock_extent(&inode->io_tree, untouched_start, untouched_end, &cached);
+ extent_clear_unlock_delalloc(inode, untouched_start, untouched_end,
+ locked_folio, &cached,
+ EXTENT_LOCKED | EXTENT_DELALLOC |
+ EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
+ btrfs_qgroup_free_data(inode, NULL, untouched_start, untouched_len, NULL);
+ }
btrfs_free_path(path);
+ btrfs_err(fs_info,
+"%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu oe_cleanup=%llu oe_cleanup_len=%llu untouched_start=%llu untouched_len=%llu: %d",
+ __func__, btrfs_root_id(inode->root), btrfs_ino(inode),
+ start, end + 1 - start, cur_offset, oe_cleanup_start, oe_cleanup_len,
+ untouched_start, untouched_len, ret);
return ret;
}
@@ -1978,8 +2333,7 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
{
if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
if (inode->defrag_bytes &&
- test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG,
- 0, NULL))
+ btrfs_test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
return false;
return true;
}
@@ -1990,63 +2344,51 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
* Function to process delayed allocation (create CoW) for ranges which are
* being touched for the first time.
*/
-int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
- u64 start, u64 end, int *page_started, unsigned long *nr_written,
- struct writeback_control *wbc)
+int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
+ u64 start, u64 end, struct writeback_control *wbc)
{
- int ret;
const bool zoned = btrfs_is_zoned(inode->root->fs_info);
+ int ret;
/*
- * The range must cover part of the @locked_page, or the returned
- * @page_started can confuse the caller.
+ * The range must cover part of the @locked_folio, or a return of 1
+ * can confuse the caller.
*/
- ASSERT(!(end <= page_offset(locked_page) ||
- start >= page_offset(locked_page) + PAGE_SIZE));
+ ASSERT(!(end <= folio_pos(locked_folio) ||
+ start >= folio_next_pos(locked_folio)));
if (should_nocow(inode, start, end)) {
- /*
- * Normally on a zoned device we're only doing COW writes, but
- * in case of relocation on a zoned filesystem we have taken
- * precaution, that we're only writing sequentially. It's safe
- * to use run_delalloc_nocow() here, like for regular
- * preallocated inodes.
- */
- ASSERT(!zoned ||
- (zoned && btrfs_is_data_reloc_root(inode->root)));
- ret = run_delalloc_nocow(inode, locked_page, start, end,
- page_started, nr_written);
- } else if (!inode_can_compress(inode) ||
- !inode_need_compress(inode, start, end)) {
- if (zoned)
- ret = run_delalloc_zoned(inode, locked_page, start, end,
- page_started, nr_written);
- else
- ret = cow_file_range(inode, locked_page, start, end,
- page_started, nr_written, 1);
- } else {
- set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
- ret = cow_file_range_async(inode, wbc, locked_page, start, end,
- page_started, nr_written);
+ ret = run_delalloc_nocow(inode, locked_folio, start, end);
+ return ret;
}
- ASSERT(ret <= 0);
- if (ret)
- btrfs_cleanup_ordered_extents(inode, locked_page, start,
- end - start + 1);
+
+ if (btrfs_inode_can_compress(inode) &&
+ inode_need_compress(inode, start, end) &&
+ run_delalloc_compressed(inode, locked_folio, start, end, wbc))
+ return 1;
+
+ if (zoned)
+ ret = run_delalloc_cow(inode, locked_folio, start, end, wbc,
+ true);
+ else
+ ret = cow_file_range(inode, locked_folio, start, end, NULL, 0);
return ret;
}
-void btrfs_split_delalloc_extent(struct inode *inode,
+void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
struct extent_state *orig, u64 split)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 size;
+ lockdep_assert_held(&inode->io_tree.lock);
+
/* not delalloc, ignore it */
if (!(orig->state & EXTENT_DELALLOC))
return;
size = orig->end - orig->start + 1;
- if (size > BTRFS_MAX_EXTENT_SIZE) {
+ if (size > fs_info->max_extent_size) {
u32 num_extents;
u64 new_size;
@@ -2055,16 +2397,16 @@ void btrfs_split_delalloc_extent(struct inode *inode,
* applies here, just in reverse.
*/
new_size = orig->end - split + 1;
- num_extents = count_max_extents(new_size);
+ num_extents = count_max_extents(fs_info, new_size);
new_size = split - orig->start;
- num_extents += count_max_extents(new_size);
- if (count_max_extents(size) >= num_extents)
+ num_extents += count_max_extents(fs_info, new_size);
+ if (count_max_extents(fs_info, size) >= num_extents)
return;
}
- spin_lock(&BTRFS_I(inode)->lock);
- btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
- spin_unlock(&BTRFS_I(inode)->lock);
+ spin_lock(&inode->lock);
+ btrfs_mod_outstanding_extents(inode, 1);
+ spin_unlock(&inode->lock);
}
/*
@@ -2072,12 +2414,15 @@ void btrfs_split_delalloc_extent(struct inode *inode,
* that are just merged onto old extents, such as when we are doing sequential
* writes, so we can properly account for the metadata space we'll need.
*/
-void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
+void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
struct extent_state *other)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 new_size, old_size;
u32 num_extents;
+ lockdep_assert_held(&inode->io_tree.lock);
+
/* not delalloc, ignore it */
if (!(other->state & EXTENT_DELALLOC))
return;
@@ -2088,10 +2433,10 @@ void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
new_size = other->end - new->start + 1;
/* we're not bigger than the max, unreserve the space and go */
- if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
- spin_lock(&BTRFS_I(inode)->lock);
- btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
- spin_unlock(&BTRFS_I(inode)->lock);
+ if (new_size <= fs_info->max_extent_size) {
+ spin_lock(&inode->lock);
+ btrfs_mod_outstanding_extents(inode, -1);
+ spin_unlock(&inode->lock);
return;
}
@@ -2114,94 +2459,87 @@ void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
* this case.
*/
old_size = other->end - other->start + 1;
- num_extents = count_max_extents(old_size);
+ num_extents = count_max_extents(fs_info, old_size);
old_size = new->end - new->start + 1;
- num_extents += count_max_extents(old_size);
- if (count_max_extents(new_size) >= num_extents)
+ num_extents += count_max_extents(fs_info, old_size);
+ if (count_max_extents(fs_info, new_size) >= num_extents)
return;
- spin_lock(&BTRFS_I(inode)->lock);
- btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
- spin_unlock(&BTRFS_I(inode)->lock);
+ spin_lock(&inode->lock);
+ btrfs_mod_outstanding_extents(inode, -1);
+ spin_unlock(&inode->lock);
}
-static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
- struct inode *inode)
+static void btrfs_add_delalloc_inode(struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&root->delalloc_lock);
- if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
- list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
- &root->delalloc_inodes);
- set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
- &BTRFS_I(inode)->runtime_flags);
- root->nr_delalloc_inodes++;
- if (root->nr_delalloc_inodes == 1) {
- spin_lock(&fs_info->delalloc_root_lock);
- BUG_ON(!list_empty(&root->delalloc_root));
- list_add_tail(&root->delalloc_root,
- &fs_info->delalloc_roots);
- spin_unlock(&fs_info->delalloc_root_lock);
- }
+ ASSERT(list_empty(&inode->delalloc_inodes));
+ list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
+ root->nr_delalloc_inodes++;
+ if (root->nr_delalloc_inodes == 1) {
+ spin_lock(&fs_info->delalloc_root_lock);
+ ASSERT(list_empty(&root->delalloc_root));
+ list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots);
+ spin_unlock(&fs_info->delalloc_root_lock);
}
spin_unlock(&root->delalloc_lock);
}
-
-void __btrfs_del_delalloc_inode(struct btrfs_root *root,
- struct btrfs_inode *inode)
+void btrfs_del_delalloc_inode(struct btrfs_inode *inode)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
+ lockdep_assert_held(&root->delalloc_lock);
+
+ /*
+ * We may be called after the inode was already deleted from the list,
+ * namely in the transaction abort path btrfs_destroy_delalloc_inodes(),
+ * and then later through btrfs_clear_delalloc_extent() while the inode
+ * still has ->delalloc_bytes > 0.
+ */
if (!list_empty(&inode->delalloc_inodes)) {
list_del_init(&inode->delalloc_inodes);
- clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
- &inode->runtime_flags);
root->nr_delalloc_inodes--;
if (!root->nr_delalloc_inodes) {
ASSERT(list_empty(&root->delalloc_inodes));
spin_lock(&fs_info->delalloc_root_lock);
- BUG_ON(list_empty(&root->delalloc_root));
+ ASSERT(!list_empty(&root->delalloc_root));
list_del_init(&root->delalloc_root);
spin_unlock(&fs_info->delalloc_root_lock);
}
}
}
-static void btrfs_del_delalloc_inode(struct btrfs_root *root,
- struct btrfs_inode *inode)
-{
- spin_lock(&root->delalloc_lock);
- __btrfs_del_delalloc_inode(root, inode);
- spin_unlock(&root->delalloc_lock);
-}
-
/*
* Properly track delayed allocation bytes in the inode and to maintain the
* list of inodes that have pending delalloc work to be done.
*/
-void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
- unsigned *bits)
+void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
+ u32 bits)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
- if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
+ lockdep_assert_held(&inode->io_tree.lock);
+
+ if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
WARN_ON(1);
/*
* set_bit and clear bit hooks normally require _irqsave/restore
* but in this case, we are only testing for the DELALLOC
* bit, which is only set or cleared with irqs on
*/
- if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
u64 len = state->end + 1 - state->start;
- u32 num_extents = count_max_extents(len);
- bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
+ u64 prev_delalloc_bytes;
+ u32 num_extents = count_max_extents(fs_info, len);
- spin_lock(&BTRFS_I(inode)->lock);
- btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
- spin_unlock(&BTRFS_I(inode)->lock);
+ spin_lock(&inode->lock);
+ btrfs_mod_outstanding_extents(inode, num_extents);
+ spin_unlock(&inode->lock);
/* For sanity tests */
if (btrfs_is_testing(fs_info))
@@ -2209,22 +2547,28 @@ void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
fs_info->delalloc_batch);
- spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->delalloc_bytes += len;
- if (*bits & EXTENT_DEFRAG)
- BTRFS_I(inode)->defrag_bytes += len;
- if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
- &BTRFS_I(inode)->runtime_flags))
- btrfs_add_delalloc_inodes(root, inode);
- spin_unlock(&BTRFS_I(inode)->lock);
+ spin_lock(&inode->lock);
+ prev_delalloc_bytes = inode->delalloc_bytes;
+ inode->delalloc_bytes += len;
+ if (bits & EXTENT_DEFRAG)
+ inode->defrag_bytes += len;
+ spin_unlock(&inode->lock);
+
+ /*
+ * We don't need to be under the protection of the inode's lock,
+ * because we are called while holding the inode's io_tree lock
+ * and are therefore protected against concurrent calls of this
+ * function and btrfs_clear_delalloc_extent().
+ */
+ if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0)
+ btrfs_add_delalloc_inode(inode);
}
if (!(state->state & EXTENT_DELALLOC_NEW) &&
- (*bits & EXTENT_DELALLOC_NEW)) {
- spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
- state->start;
- spin_unlock(&BTRFS_I(inode)->lock);
+ (bits & EXTENT_DELALLOC_NEW)) {
+ spin_lock(&inode->lock);
+ inode->new_delalloc_bytes += state->end + 1 - state->start;
+ spin_unlock(&inode->lock);
}
}
@@ -2232,15 +2576,16 @@ void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
* Once a range is no longer delalloc this function ensures that proper
* accounting happens.
*/
-void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
- struct extent_state *state, unsigned *bits)
+void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
+ struct extent_state *state, u32 bits)
{
- struct btrfs_inode *inode = BTRFS_I(vfs_inode);
- struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 len = state->end + 1 - state->start;
- u32 num_extents = count_max_extents(len);
+ u32 num_extents = count_max_extents(fs_info, len);
+
+ lockdep_assert_held(&inode->io_tree.lock);
- if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
+ if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
spin_lock(&inode->lock);
inode->defrag_bytes -= len;
spin_unlock(&inode->lock);
@@ -2251,9 +2596,9 @@ void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
* but in this case, we are only testing for the DELALLOC
* bit, which is only set or cleared with irqs on
*/
- if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
+ if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = inode->root;
- bool do_list = !btrfs_is_free_space_inode(inode);
+ u64 new_delalloc_bytes;
spin_lock(&inode->lock);
btrfs_mod_outstanding_extents(inode, -num_extents);
@@ -2264,321 +2609,52 @@ void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
* don't need to call delalloc_release_metadata if there is an
* error.
*/
- if (*bits & EXTENT_CLEAR_META_RESV &&
+ if (bits & EXTENT_CLEAR_META_RESV &&
root != fs_info->tree_root)
- btrfs_delalloc_release_metadata(inode, len, false);
+ btrfs_delalloc_release_metadata(inode, len, true);
/* For sanity tests. */
if (btrfs_is_testing(fs_info))
return;
if (!btrfs_is_data_reloc_root(root) &&
- do_list && !(state->state & EXTENT_NORESERVE) &&
- (*bits & EXTENT_CLEAR_DATA_RESV))
- btrfs_free_reserved_data_space_noquota(fs_info, len);
+ !btrfs_is_free_space_inode(inode) &&
+ !(state->state & EXTENT_NORESERVE) &&
+ (bits & EXTENT_CLEAR_DATA_RESV))
+ btrfs_free_reserved_data_space_noquota(inode, len);
percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
fs_info->delalloc_batch);
spin_lock(&inode->lock);
inode->delalloc_bytes -= len;
- if (do_list && inode->delalloc_bytes == 0 &&
- test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
- &inode->runtime_flags))
- btrfs_del_delalloc_inode(root, inode);
+ new_delalloc_bytes = inode->delalloc_bytes;
spin_unlock(&inode->lock);
+
+ /*
+ * We don't need to be under the protection of the inode's lock,
+ * because we are called while holding the inode's io_tree lock
+ * and are therefore protected against concurrent calls of this
+ * function and btrfs_set_delalloc_extent().
+ */
+ if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) {
+ spin_lock(&root->delalloc_lock);
+ btrfs_del_delalloc_inode(inode);
+ spin_unlock(&root->delalloc_lock);
+ }
}
if ((state->state & EXTENT_DELALLOC_NEW) &&
- (*bits & EXTENT_DELALLOC_NEW)) {
+ (bits & EXTENT_DELALLOC_NEW)) {
spin_lock(&inode->lock);
ASSERT(inode->new_delalloc_bytes >= len);
inode->new_delalloc_bytes -= len;
- if (*bits & EXTENT_ADD_INODE_BYTES)
+ if (bits & EXTENT_ADD_INODE_BYTES)
inode_add_bytes(&inode->vfs_inode, len);
spin_unlock(&inode->lock);
}
}
/*
- * in order to insert checksums into the metadata in large chunks,
- * we wait until bio submission time. All the pages in the bio are
- * checksummed and sums are attached onto the ordered extent record.
- *
- * At IO completion time the cums attached on the ordered extent record
- * are inserted into the btree
- */
-static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
- u64 dio_file_offset)
-{
- return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
-}
-
-/*
- * Split an extent_map at [start, start + len]
- *
- * This function is intended to be used only for extract_ordered_extent().
- */
-static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
- u64 pre, u64 post)
-{
- struct extent_map_tree *em_tree = &inode->extent_tree;
- struct extent_map *em;
- struct extent_map *split_pre = NULL;
- struct extent_map *split_mid = NULL;
- struct extent_map *split_post = NULL;
- int ret = 0;
- unsigned long flags;
-
- /* Sanity check */
- if (pre == 0 && post == 0)
- return 0;
-
- split_pre = alloc_extent_map();
- if (pre)
- split_mid = alloc_extent_map();
- if (post)
- split_post = alloc_extent_map();
- if (!split_pre || (pre && !split_mid) || (post && !split_post)) {
- ret = -ENOMEM;
- goto out;
- }
-
- ASSERT(pre + post < len);
-
- lock_extent(&inode->io_tree, start, start + len - 1);
- write_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, start, len);
- if (!em) {
- ret = -EIO;
- goto out_unlock;
- }
-
- ASSERT(em->len == len);
- ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
- ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
- ASSERT(test_bit(EXTENT_FLAG_PINNED, &em->flags));
- ASSERT(!test_bit(EXTENT_FLAG_LOGGING, &em->flags));
- ASSERT(!list_empty(&em->list));
-
- flags = em->flags;
- clear_bit(EXTENT_FLAG_PINNED, &em->flags);
-
- /* First, replace the em with a new extent_map starting from * em->start */
- split_pre->start = em->start;
- split_pre->len = (pre ? pre : em->len - post);
- split_pre->orig_start = split_pre->start;
- split_pre->block_start = em->block_start;
- split_pre->block_len = split_pre->len;
- split_pre->orig_block_len = split_pre->block_len;
- split_pre->ram_bytes = split_pre->len;
- split_pre->flags = flags;
- split_pre->compress_type = em->compress_type;
- split_pre->generation = em->generation;
-
- replace_extent_mapping(em_tree, em, split_pre, 1);
-
- /*
- * Now we only have an extent_map at:
- * [em->start, em->start + pre] if pre != 0
- * [em->start, em->start + em->len - post] if pre == 0
- */
-
- if (pre) {
- /* Insert the middle extent_map */
- split_mid->start = em->start + pre;
- split_mid->len = em->len - pre - post;
- split_mid->orig_start = split_mid->start;
- split_mid->block_start = em->block_start + pre;
- split_mid->block_len = split_mid->len;
- split_mid->orig_block_len = split_mid->block_len;
- split_mid->ram_bytes = split_mid->len;
- split_mid->flags = flags;
- split_mid->compress_type = em->compress_type;
- split_mid->generation = em->generation;
- add_extent_mapping(em_tree, split_mid, 1);
- }
-
- if (post) {
- split_post->start = em->start + em->len - post;
- split_post->len = post;
- split_post->orig_start = split_post->start;
- split_post->block_start = em->block_start + em->len - post;
- split_post->block_len = split_post->len;
- split_post->orig_block_len = split_post->block_len;
- split_post->ram_bytes = split_post->len;
- split_post->flags = flags;
- split_post->compress_type = em->compress_type;
- split_post->generation = em->generation;
- add_extent_mapping(em_tree, split_post, 1);
- }
-
- /* Once for us */
- free_extent_map(em);
- /* Once for the tree */
- free_extent_map(em);
-
-out_unlock:
- write_unlock(&em_tree->lock);
- unlock_extent(&inode->io_tree, start, start + len - 1);
-out:
- free_extent_map(split_pre);
- free_extent_map(split_mid);
- free_extent_map(split_post);
-
- return ret;
-}
-
-static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
- struct bio *bio, loff_t file_offset)
-{
- struct btrfs_ordered_extent *ordered;
- u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
- u64 file_len;
- u64 len = bio->bi_iter.bi_size;
- u64 end = start + len;
- u64 ordered_end;
- u64 pre, post;
- int ret = 0;
-
- ordered = btrfs_lookup_ordered_extent(inode, file_offset);
- if (WARN_ON_ONCE(!ordered))
- return BLK_STS_IOERR;
-
- /* No need to split */
- if (ordered->disk_num_bytes == len)
- goto out;
-
- /* We cannot split once end_bio'd ordered extent */
- if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) {
- ret = -EINVAL;
- goto out;
- }
-
- /* We cannot split a compressed ordered extent */
- if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) {
- ret = -EINVAL;
- goto out;
- }
-
- ordered_end = ordered->disk_bytenr + ordered->disk_num_bytes;
- /* bio must be in one ordered extent */
- if (WARN_ON_ONCE(start < ordered->disk_bytenr || end > ordered_end)) {
- ret = -EINVAL;
- goto out;
- }
-
- /* Checksum list should be empty */
- if (WARN_ON_ONCE(!list_empty(&ordered->list))) {
- ret = -EINVAL;
- goto out;
- }
-
- file_len = ordered->num_bytes;
- pre = start - ordered->disk_bytenr;
- post = ordered_end - end;
-
- ret = btrfs_split_ordered_extent(ordered, pre, post);
- if (ret)
- goto out;
- ret = split_zoned_em(inode, file_offset, file_len, pre, post);
-
-out:
- btrfs_put_ordered_extent(ordered);
-
- return errno_to_blk_status(ret);
-}
-
-/*
- * extent_io.c submission hook. This does the right thing for csum calculation
- * on write, or reading the csums from the tree before a read.
- *
- * Rules about async/sync submit,
- * a) read: sync submit
- *
- * b) write without checksum: sync submit
- *
- * c) write with checksum:
- * c-1) if bio is issued by fsync: sync submit
- * (sync_writers != 0)
- *
- * c-2) if root is reloc root: sync submit
- * (only in case of buffered IO)
- *
- * c-3) otherwise: async submit
- */
-blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
- int mirror_num, unsigned long bio_flags)
-
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
- enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
- blk_status_t ret = 0;
- int skip_sum;
- int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
-
- skip_sum = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) ||
- !fs_info->csum_root;
-
- if (btrfs_is_free_space_inode(BTRFS_I(inode)))
- metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
-
- if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
- struct page *page = bio_first_bvec_all(bio)->bv_page;
- loff_t file_offset = page_offset(page);
-
- ret = extract_ordered_extent(BTRFS_I(inode), bio, file_offset);
- if (ret)
- goto out;
- }
-
- if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
- ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
- if (ret)
- goto out;
-
- if (bio_flags & EXTENT_BIO_COMPRESSED) {
- ret = btrfs_submit_compressed_read(inode, bio,
- mirror_num,
- bio_flags);
- goto out;
- } else {
- /*
- * Lookup bio sums does extra checks around whether we
- * need to csum or not, which is why we ignore skip_sum
- * here.
- */
- ret = btrfs_lookup_bio_sums(inode, bio, NULL);
- if (ret)
- goto out;
- }
- goto mapit;
- } else if (async && !skip_sum) {
- /* csum items have already been cloned */
- if (btrfs_is_data_reloc_root(root))
- goto mapit;
- /* we're doing a write, do the async checksumming */
- ret = btrfs_wq_submit_bio(inode, bio, mirror_num, bio_flags,
- 0, btrfs_submit_bio_start);
- goto out;
- } else if (!skip_sum) {
- ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
- if (ret)
- goto out;
- }
-
-mapit:
- ret = btrfs_map_bio(fs_info, bio, mirror_num);
-
-out:
- if (ret) {
- bio->bi_status = ret;
- bio_endio(bio);
- }
- return ret;
-}
-
-/*
* given a list of ordered sums record them in the inode. This happens
* at IO completion time based on sums calculated at bio submission time.
*/
@@ -2586,11 +2662,15 @@ static int add_pending_csums(struct btrfs_trans_handle *trans,
struct list_head *list)
{
struct btrfs_ordered_sum *sum;
+ struct btrfs_root *csum_root = NULL;
int ret;
list_for_each_entry(sum, list, list) {
trans->adding_csums = true;
- ret = btrfs_csum_file_blocks(trans, trans->fs_info->csum_root, sum);
+ if (!csum_root)
+ csum_root = btrfs_csum_root(trans->fs_info,
+ sum->logical);
+ ret = btrfs_csum_file_blocks(trans, csum_root, sum);
trans->adding_csums = false;
if (ret)
return ret;
@@ -2612,11 +2692,11 @@ static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
u64 em_len;
int ret = 0;
- em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
+ em = btrfs_get_extent(inode, NULL, search_start, search_len);
if (IS_ERR(em))
return PTR_ERR(em);
- if (em->block_start != EXTENT_MAP_HOLE)
+ if (em->disk_bytenr != EXTENT_MAP_HOLE)
goto next;
em_len = em->len;
@@ -2625,13 +2705,12 @@ static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
if (em_len > search_len)
em_len = search_len;
- ret = set_extent_bit(&inode->io_tree, search_start,
- search_start + em_len - 1,
- EXTENT_DELALLOC_NEW, 0, NULL, cached_state,
- GFP_NOFS, NULL);
+ ret = btrfs_set_extent_bit(&inode->io_tree, search_start,
+ search_start + em_len - 1,
+ EXTENT_DELALLOC_NEW, cached_state);
next:
- search_start = extent_map_end(em);
- free_extent_map(em);
+ search_start = btrfs_extent_map_end(em);
+ btrfs_free_extent_map(em);
if (ret)
return ret;
}
@@ -2661,72 +2740,69 @@ int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
return ret;
}
- return set_extent_delalloc(&inode->io_tree, start, end, extra_bits,
- cached_state);
+ return btrfs_set_extent_bit(&inode->io_tree, start, end,
+ EXTENT_DELALLOC | extra_bits, cached_state);
}
/* see btrfs_writepage_start_hook for details on why this is required */
struct btrfs_writepage_fixup {
- struct page *page;
- struct inode *inode;
+ struct folio *folio;
+ struct btrfs_inode *inode;
struct btrfs_work work;
};
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
{
- struct btrfs_writepage_fixup *fixup;
+ struct btrfs_writepage_fixup *fixup =
+ container_of(work, struct btrfs_writepage_fixup, work);
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL;
- struct page *page;
- struct btrfs_inode *inode;
- u64 page_start;
- u64 page_end;
+ struct folio *folio = fixup->folio;
+ struct btrfs_inode *inode = fixup->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ u64 page_start = folio_pos(folio);
+ u64 page_end = folio_next_pos(folio) - 1;
int ret = 0;
bool free_delalloc_space = true;
- fixup = container_of(work, struct btrfs_writepage_fixup, work);
- page = fixup->page;
- inode = BTRFS_I(fixup->inode);
- page_start = page_offset(page);
- page_end = page_offset(page) + PAGE_SIZE - 1;
-
/*
* This is similar to page_mkwrite, we need to reserve the space before
- * we take the page lock.
+ * we take the folio lock.
*/
ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
- PAGE_SIZE);
+ folio_size(folio));
again:
- lock_page(page);
+ folio_lock(folio);
/*
- * Before we queued this fixup, we took a reference on the page.
- * page->mapping may go NULL, but it shouldn't be moved to a different
+ * Before we queued this fixup, we took a reference on the folio.
+ * folio->mapping may go NULL, but it shouldn't be moved to a different
* address space.
*/
- if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
+ if (!folio->mapping || !folio_test_dirty(folio) ||
+ !folio_test_checked(folio)) {
/*
* Unfortunately this is a little tricky, either
*
- * 1) We got here and our page had already been dealt with and
+ * 1) We got here and our folio had already been dealt with and
* we reserved our space, thus ret == 0, so we need to just
* drop our space reservation and bail. This can happen the
* first time we come into the fixup worker, or could happen
* while waiting for the ordered extent.
- * 2) Our page was already dealt with, but we happened to get an
+ * 2) Our folio was already dealt with, but we happened to get an
* ENOSPC above from the btrfs_delalloc_reserve_space. In
* this case we obviously don't have anything to release, but
- * because the page was already dealt with we don't want to
- * mark the page with an error, so make sure we're resetting
+ * because the folio was already dealt with we don't want to
+ * mark the folio with an error, so make sure we're resetting
* ret to 0. This is why we have this check _before_ the ret
* check, because we do not want to have a surprise ENOSPC
- * when the page was already properly dealt with.
+ * when the folio was already properly dealt with.
*/
if (!ret) {
- btrfs_delalloc_release_extents(inode, PAGE_SIZE);
+ btrfs_delalloc_release_extents(inode, folio_size(folio));
btrfs_delalloc_release_space(inode, data_reserved,
- page_start, PAGE_SIZE,
+ page_start, folio_size(folio),
true);
}
ret = 0;
@@ -2734,24 +2810,24 @@ again:
}
/*
- * We can't mess with the page state unless it is locked, so now that
+ * We can't mess with the folio state unless it is locked, so now that
* it is locked bail if we failed to make our space reservation.
*/
if (ret)
goto out_page;
- lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
/* already ordered? We're done */
- if (PageOrdered(page))
+ if (folio_test_ordered(folio))
goto out_reserved;
ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
if (ordered) {
- unlock_extent_cached(&inode->io_tree, page_start, page_end,
- &cached_state);
- unlock_page(page);
- btrfs_start_ordered_extent(ordered, 1);
+ btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
+ &cached_state);
+ folio_unlock(folio);
+ btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
goto again;
}
@@ -2768,29 +2844,28 @@ again:
*
* The page was dirty when we started, nothing should have cleaned it.
*/
- BUG_ON(!PageDirty(page));
+ BUG_ON(!folio_test_dirty(folio));
free_delalloc_space = false;
out_reserved:
btrfs_delalloc_release_extents(inode, PAGE_SIZE);
if (free_delalloc_space)
btrfs_delalloc_release_space(inode, data_reserved, page_start,
PAGE_SIZE, true);
- unlock_extent_cached(&inode->io_tree, page_start, page_end,
- &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
out_page:
if (ret) {
/*
* We hit ENOSPC or other errors. Update the mapping and page
* to reflect the errors and clean the page.
*/
- mapping_set_error(page->mapping, ret);
- end_extent_writepage(page, ret, page_start, page_end);
- clear_page_dirty_for_io(page);
- SetPageError(page);
- }
- btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE);
- unlock_page(page);
- put_page(page);
+ mapping_set_error(folio->mapping, ret);
+ btrfs_mark_ordered_io_finished(inode, folio, page_start,
+ folio_size(folio), !ret);
+ folio_clear_dirty_for_io(folio);
+ }
+ btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
+ folio_unlock(folio);
+ folio_put(folio);
kfree(fixup);
extent_changeset_free(data_reserved);
/*
@@ -2798,38 +2873,54 @@ out_page:
* that could need flushing space. Recursing back to fixup worker would
* deadlock.
*/
- btrfs_add_delayed_iput(&inode->vfs_inode);
+ btrfs_add_delayed_iput(inode);
}
/*
* There are a few paths in the higher layers of the kernel that directly
- * set the page dirty bit without asking the filesystem if it is a
+ * set the folio dirty bit without asking the filesystem if it is a
* good idea. This causes problems because we want to make sure COW
* properly happens and the data=ordered rules are followed.
*
* In our case any range that doesn't have the ORDERED bit set
* hasn't been properly setup for IO. We kick off an async process
* to fix it up. The async helper will wait for ordered extents, set
- * the delalloc bit and make it safe to write the page.
+ * the delalloc bit and make it safe to write the folio.
*/
-int btrfs_writepage_cow_fixup(struct page *page)
+int btrfs_writepage_cow_fixup(struct folio *folio)
{
- struct inode *inode = page->mapping->host;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct inode *inode = folio->mapping->host;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_writepage_fixup *fixup;
- /* This page has ordered extent covering it already */
- if (PageOrdered(page))
+ /* This folio has ordered extent covering it already */
+ if (folio_test_ordered(folio))
return 0;
/*
- * PageChecked is set below when we create a fixup worker for this page,
- * don't try to create another one if we're already PageChecked()
+ * For experimental build, we error out instead of EAGAIN.
+ *
+ * We should not hit such out-of-band dirty folios anymore.
+ */
+ if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) {
+ DEBUG_WARN();
+ btrfs_err_rl(fs_info,
+ "root %lld ino %llu folio %llu is marked dirty without notifying the fs",
+ btrfs_root_id(BTRFS_I(inode)->root),
+ btrfs_ino(BTRFS_I(inode)),
+ folio_pos(folio));
+ return -EUCLEAN;
+ }
+
+ /*
+ * folio_checked is set below when we create a fixup worker for this
+ * folio, don't try to create another one if we're already
+ * folio_test_checked.
*
- * The extent_io writepage code will redirty the page if we send back
+ * The extent_io writepage code will redirty the foio if we send back
* EAGAIN.
*/
- if (PageChecked(page))
+ if (folio_test_checked(folio))
return -EAGAIN;
fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
@@ -2839,15 +2930,15 @@ int btrfs_writepage_cow_fixup(struct page *page)
/*
* We are already holding a reference to this inode from
* write_cache_pages. We need to hold it because the space reservation
- * takes place outside of the page lock, and we can't trust
- * page->mapping outside of the page lock.
+ * takes place outside of the folio lock, and we can't trust
+ * folio->mapping outside of the folio lock.
*/
ihold(inode);
- btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE);
- get_page(page);
- btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
- fixup->page = page;
- fixup->inode = inode;
+ btrfs_folio_set_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
+ folio_get(folio);
+ btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
+ fixup->folio = folio;
+ fixup->inode = BTRFS_I(inode);
btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
return -EAGAIN;
@@ -2861,11 +2952,12 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = inode->root;
const u64 sectorsize = root->fs_info->sectorsize;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key ins;
u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
+ u64 offset = btrfs_stack_file_extent_offset(stack_fi);
u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
struct btrfs_drop_extents_args drop_args = { 0 };
@@ -2895,8 +2987,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
if (!drop_args.extent_inserted) {
ins.objectid = btrfs_ino(inode);
- ins.offset = file_pos;
ins.type = BTRFS_EXTENT_DATA_KEY;
+ ins.offset = file_pos;
ret = btrfs_insert_empty_item(trans, root, path, &ins,
sizeof(*stack_fi));
@@ -2909,14 +3001,13 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(struct btrfs_file_extent_item));
- btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
/*
* If we dropped an inline extent here, we know the range where it is
* was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
* number of bytes only for that range containing the inline extent.
- * The remaining of the range will be processed when clearning the
+ * The remaining of the range will be processed when clearing the
* EXTENT_DELALLOC_BIT bit through the ordered extent completion.
*/
if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
@@ -2932,18 +3023,17 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
ins.objectid = disk_bytenr;
- ins.offset = disk_num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
+ ins.offset = disk_num_bytes;
ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
if (ret)
goto out;
ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
- file_pos, qgroup_reserved, &ins);
+ file_pos - offset,
+ qgroup_reserved, &ins);
out:
- btrfs_free_path(path);
-
return ret;
}
@@ -2966,20 +3056,20 @@ static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_ordered_extent *oe)
{
struct btrfs_file_extent_item stack_fi;
- u64 logical_len;
bool update_inode_bytes;
+ u64 num_bytes = oe->num_bytes;
+ u64 ram_bytes = oe->ram_bytes;
memset(&stack_fi, 0, sizeof(stack_fi));
btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
oe->disk_num_bytes);
+ btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags))
- logical_len = oe->truncated_len;
- else
- logical_len = oe->num_bytes;
- btrfs_set_stack_file_extent_num_bytes(&stack_fi, logical_len);
- btrfs_set_stack_file_extent_ram_bytes(&stack_fi, logical_len);
+ num_bytes = oe->truncated_len;
+ btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
+ btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
/* Encryption and other encoding is reserved and all 0 */
@@ -2990,9 +3080,10 @@ static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
* except if the ordered extent was truncated.
*/
update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
+ test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
- return insert_reserved_file_extent(trans, BTRFS_I(oe->inode),
+ return insert_reserved_file_extent(trans, oe->inode,
oe->file_offset, &stack_fi,
update_inode_bytes, oe->qgroup_rsv);
}
@@ -3002,9 +3093,9 @@ static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
* an ordered extent if the range of bytes in the file it covers are
* fully written.
*/
-static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
+int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
{
- struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode);
+ struct btrfs_inode *inode = ordered_extent->inode;
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans = NULL;
@@ -3024,24 +3115,23 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
- !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
+ !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
+ !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
clear_bits |= EXTENT_DELALLOC_NEW;
freespace_inode = btrfs_is_free_space_inode(inode);
+ if (!freespace_inode)
+ btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
- if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
+ if (unlikely(test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags))) {
ret = -EIO;
goto out;
}
- /* A valid bdev implies a write on a sequential zone */
- if (ordered_extent->bdev) {
- btrfs_rewrite_logical_zoned(ordered_extent);
- btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
- ordered_extent->disk_num_bytes);
- }
-
- btrfs_free_io_failure_record(inode, start, end);
+ ret = btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
+ ordered_extent->disk_num_bytes);
+ if (ret)
+ goto out;
if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
truncated = true;
@@ -3051,29 +3141,21 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
- if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
- BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
-
- btrfs_inode_safe_disk_i_size_write(inode, 0);
- if (freespace_inode)
- trans = btrfs_join_transaction_spacecache(root);
- else
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- trans = NULL;
- goto out;
- }
- trans->block_rsv = &inode->block_rsv;
- ret = btrfs_update_inode_fallback(trans, root, inode);
- if (ret) /* -ENOMEM or corruption */
- btrfs_abort_transaction(trans, ret);
- goto out;
+ /*
+ * If it's a COW write we need to lock the extent range as we will be
+ * inserting/replacing file extent items and unpinning an extent map.
+ * This must be taken before joining a transaction, as it's a higher
+ * level lock (like the inode's VFS lock), otherwise we can run into an
+ * ABBA deadlock with other tasks (transactions work like a lock,
+ * depending on their current state).
+ */
+ if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
+ clear_bits |= EXTENT_LOCKED | EXTENT_FINISHING_ORDERED;
+ btrfs_lock_extent_bits(io_tree, start, end,
+ EXTENT_LOCKED | EXTENT_FINISHING_ORDERED,
+ &cached_state);
}
- clear_bits |= EXTENT_LOCKED;
- lock_extent_bits(io_tree, start, end, &cached_state);
-
if (freespace_inode)
trans = btrfs_join_transaction_spacecache(root);
else
@@ -3086,6 +3168,30 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
trans->block_rsv = &inode->block_rsv;
+ ret = btrfs_insert_raid_extent(trans, ordered_extent);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
+ /* Logic error */
+ ASSERT(list_empty(&ordered_extent->list));
+ if (unlikely(!list_empty(&ordered_extent->list))) {
+ ret = -EINVAL;
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ btrfs_inode_safe_disk_i_size_write(inode, 0);
+ ret = btrfs_update_inode_fallback(trans, inode);
+ if (unlikely(ret)) {
+ /* -ENOMEM or corruption */
+ btrfs_abort_transaction(trans, ret);
+ }
+ goto out;
+ }
+
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
@@ -3094,6 +3200,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
ordered_extent->file_offset,
ordered_extent->file_offset +
logical_len);
+ btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
+ ordered_extent->disk_num_bytes);
} else {
BUG_ON(root == fs_info->tree_root);
ret = insert_ordered_extent_file_extent(trans, ordered_extent);
@@ -3104,15 +3212,20 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
ordered_extent->disk_num_bytes);
}
}
- unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset,
- ordered_extent->num_bytes, trans->transid);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ ret = btrfs_unpin_extent_cache(inode, ordered_extent->file_offset,
+ ordered_extent->num_bytes, trans->transid);
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = add_pending_csums(trans, &ordered_extent->list);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -3124,28 +3237,24 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
*/
if ((clear_bits & EXTENT_DELALLOC_NEW) &&
!test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
- clear_extent_bit(&inode->io_tree, start, end,
- EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
- 0, 0, &cached_state);
+ btrfs_clear_extent_bit(&inode->io_tree, start, end,
+ EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
+ &cached_state);
btrfs_inode_safe_disk_i_size_write(inode, 0);
- ret = btrfs_update_inode_fallback(trans, root, inode);
- if (ret) { /* -ENOMEM or corruption */
+ ret = btrfs_update_inode_fallback(trans, inode);
+ if (unlikely(ret)) { /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
goto out;
}
- ret = 0;
out:
- clear_extent_bit(&inode->io_tree, start, end, clear_bits,
- (clear_bits & EXTENT_LOCKED) ? 1 : 0, 0,
- &cached_state);
+ btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits,
+ &cached_state);
if (trans)
btrfs_end_transaction(trans);
if (ret || truncated) {
- u64 unwritten_start = start;
-
/*
* If we failed to finish this ordered extent for any reason we
* need to make sure BTRFS_ORDERED_IOERR is set on the ordered
@@ -3154,16 +3263,32 @@ out:
* set the mapping error, so we need to set it if we're the ones
* marking this ordered extent as failed.
*/
- if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
- &ordered_extent->flags))
- mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
+ if (ret)
+ btrfs_mark_ordered_extent_error(ordered_extent);
+
+ /*
+ * Drop extent maps for the part of the extent we didn't write.
+ *
+ * We have an exception here for the free_space_inode, this is
+ * because when we do btrfs_get_extent() on the free space inode
+ * we will search the commit root. If this is a new block group
+ * we won't find anything, and we will trip over the assert in
+ * writepage where we do ASSERT(em->block_start !=
+ * EXTENT_MAP_HOLE).
+ *
+ * Theoretically we could also skip this for any NOCOW extent as
+ * we don't mess with the extent map tree in the NOCOW case, but
+ * for now simply skip this if we are the free space inode.
+ */
+ if (!btrfs_is_free_space_inode(inode)) {
+ u64 unwritten_start = start;
- if (truncated)
- unwritten_start += logical_len;
- clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
+ if (truncated)
+ unwritten_start += logical_len;
- /* Drop the cache for the part of the extent we didn't write. */
- btrfs_drop_extent_cache(inode, unwritten_start, end, 0);
+ btrfs_drop_extent_map_range(inode, unwritten_start,
+ end, false);
+ }
/*
* If the ordered extent had an IOERR or something else went
@@ -3190,7 +3315,14 @@ out:
NULL);
btrfs_free_reserved_extent(fs_info,
ordered_extent->disk_bytenr,
- ordered_extent->disk_num_bytes, 1);
+ ordered_extent->disk_num_bytes, true);
+ /*
+ * Actually free the qgroup rsv which was released when
+ * the ordered extent was created.
+ */
+ btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root),
+ ordered_extent->qgroup_rsv,
+ BTRFS_QGROUP_RSV_DATA);
}
}
@@ -3208,149 +3340,149 @@ out:
return ret;
}
-static void finish_ordered_fn(struct btrfs_work *work)
+int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
{
- struct btrfs_ordered_extent *ordered_extent;
- ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
- btrfs_finish_ordered_io(ordered_extent);
+ if (btrfs_is_zoned(ordered->inode->root->fs_info) &&
+ !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
+ list_empty(&ordered->bioc_list))
+ btrfs_finish_ordered_zoned(ordered);
+ return btrfs_finish_one_ordered(ordered);
}
-void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
- struct page *page, u64 start,
- u64 end, bool uptodate)
+/*
+ * Calculate the checksum of an fs block at physical memory address @paddr,
+ * and save the result to @dest.
+ *
+ * The folio containing @paddr must be large enough to contain a full fs block.
+ */
+void btrfs_calculate_block_csum_folio(struct btrfs_fs_info *fs_info,
+ const phys_addr_t paddr, u8 *dest)
{
- trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate);
+ struct folio *folio = page_folio(phys_to_page(paddr));
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 step = min(blocksize, PAGE_SIZE);
+ const u32 nr_steps = blocksize / step;
+ phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
- btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start,
- finish_ordered_fn, uptodate);
+ /* The full block must be inside the folio. */
+ ASSERT(offset_in_folio(folio, paddr) + blocksize <= folio_size(folio));
+
+ for (int i = 0; i < nr_steps; i++) {
+ u32 pindex = offset_in_folio(folio, paddr + i * step) >> PAGE_SHIFT;
+
+ /*
+ * For bs <= ps cases, we will only run the loop once, so the offset
+ * inside the page will only added to paddrs[0].
+ *
+ * For bs > ps cases, the block must be page aligned, thus offset
+ * inside the page will always be 0.
+ */
+ paddrs[i] = page_to_phys(folio_page(folio, pindex)) + offset_in_page(paddr);
+ }
+ return btrfs_calculate_block_csum_pages(fs_info, paddrs, dest);
}
/*
- * check_data_csum - verify checksum of one sector of uncompressed data
- * @inode: inode
- * @io_bio: btrfs_io_bio which contains the csum
- * @bio_offset: offset to the beginning of the bio (in bytes)
- * @page: page where is the data to be verified
- * @pgoff: offset inside the page
- * @start: logical offset in the file
+ * Calculate the checksum of a fs block backed by multiple noncontiguous pages
+ * at @paddrs[] and save the result to @dest.
*
- * The length of such check is always one sector size.
+ * The folio containing @paddr must be large enough to contain a full fs block.
*/
-static int check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
- u32 bio_offset, struct page *page, u32 pgoff,
- u64 start)
+void btrfs_calculate_block_csum_pages(struct btrfs_fs_info *fs_info,
+ const phys_addr_t paddrs[], u8 *dest)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 step = min(blocksize, PAGE_SIZE);
+ const u32 nr_steps = blocksize / step;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
- char *kaddr;
- u32 len = fs_info->sectorsize;
- const u32 csum_size = fs_info->csum_size;
- unsigned int offset_sectors;
- u8 *csum_expected;
- u8 csum[BTRFS_CSUM_SIZE];
-
- ASSERT(pgoff + len <= PAGE_SIZE);
-
- offset_sectors = bio_offset >> fs_info->sectorsize_bits;
- csum_expected = ((u8 *)bbio->csum) + offset_sectors * csum_size;
- kaddr = kmap_atomic(page);
shash->tfm = fs_info->csum_shash;
+ crypto_shash_init(shash);
+ for (int i = 0; i < nr_steps; i++) {
+ const phys_addr_t paddr = paddrs[i];
+ void *kaddr;
- crypto_shash_digest(shash, kaddr + pgoff, len, csum);
-
- if (memcmp(csum, csum_expected, csum_size))
- goto zeroit;
+ ASSERT(offset_in_page(paddr) + step <= PAGE_SIZE);
+ kaddr = kmap_local_page(phys_to_page(paddr)) + offset_in_page(paddr);
+ crypto_shash_update(shash, kaddr, step);
+ kunmap_local(kaddr);
+ }
+ crypto_shash_final(shash, dest);
+}
- kunmap_atomic(kaddr);
+/*
+ * Verify the checksum for a single sector without any extra action that depend
+ * on the type of I/O.
+ *
+ * @kaddr must be a properly kmapped address.
+ */
+int btrfs_check_block_csum(struct btrfs_fs_info *fs_info, phys_addr_t paddr, u8 *csum,
+ const u8 * const csum_expected)
+{
+ btrfs_calculate_block_csum_folio(fs_info, paddr, csum);
+ if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
+ return -EIO;
return 0;
-zeroit:
- btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
- bbio->mirror_num);
- if (bbio->device)
- btrfs_dev_stat_inc_and_print(bbio->device,
- BTRFS_DEV_STAT_CORRUPTION_ERRS);
- memset(kaddr + pgoff, 1, len);
- flush_dcache_page(page);
- kunmap_atomic(kaddr);
- return -EIO;
}
/*
- * When reads are done, we need to check csums to verify the data is correct.
- * if there's a match, we allow the bio to finish. If not, the code in
- * extent_io.c will try to find good copies for us.
+ * Verify the checksum of a single data sector, which can be scattered at
+ * different noncontiguous pages.
*
+ * @bbio: btrfs_io_bio which contains the csum
+ * @dev: device the sector is on
* @bio_offset: offset to the beginning of the bio (in bytes)
- * @start: file offset of the range start
- * @end: file offset of the range end (inclusive)
+ * @paddrs: physical addresses which back the fs block
*
- * Return a bitmap where bit set means a csum mismatch, and bit not set means
- * csum match.
+ * Check if the checksum on a data block is valid. When a checksum mismatch is
+ * detected, report the error and fill the corrupted range with zero.
+ *
+ * Return %true if the sector is ok or had no checksum to start with, else %false.
*/
-unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
- u32 bio_offset, struct page *page,
- u64 start, u64 end)
+bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
+ u32 bio_offset, const phys_addr_t paddrs[])
{
- struct inode *inode = page->mapping->host;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- const u32 sectorsize = root->fs_info->sectorsize;
- u32 pg_off;
- unsigned int result = 0;
-
- if (btrfs_page_test_checked(fs_info, page, start, end + 1 - start)) {
- btrfs_page_clear_checked(fs_info, page, start, end + 1 - start);
- return 0;
- }
-
- /*
- * This only happens for NODATASUM or compressed read.
- * Normally this should be covered by above check for compressed read
- * or the next check for NODATASUM. Just do a quicker exit here.
- */
- if (bbio->csum == NULL)
- return 0;
-
- if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
- return 0;
+ struct btrfs_inode *inode = bbio->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 step = min(blocksize, PAGE_SIZE);
+ const u32 nr_steps = blocksize / step;
+ u64 file_offset = bbio->file_offset + bio_offset;
+ u64 end = file_offset + blocksize - 1;
+ u8 *csum_expected;
+ u8 csum[BTRFS_CSUM_SIZE];
- if (!root->fs_info->csum_root)
- return 0;
+ if (!bbio->csum)
+ return true;
- ASSERT(page_offset(page) <= start &&
- end <= page_offset(page) + PAGE_SIZE - 1);
- for (pg_off = offset_in_page(start);
- pg_off < offset_in_page(end);
- pg_off += sectorsize, bio_offset += sectorsize) {
- u64 file_offset = pg_off + page_offset(page);
- int ret;
+ if (btrfs_is_data_reloc_root(inode->root) &&
+ btrfs_test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
+ NULL)) {
+ /* Skip the range without csum for data reloc inode */
+ btrfs_clear_extent_bit(&inode->io_tree, file_offset, end,
+ EXTENT_NODATASUM, NULL);
+ return true;
+ }
- if (btrfs_is_data_reloc_root(root) &&
- test_range_bit(io_tree, file_offset,
- file_offset + sectorsize - 1,
- EXTENT_NODATASUM, 1, NULL)) {
- /* Skip the range without csum for data reloc inode */
- clear_extent_bits(io_tree, file_offset,
- file_offset + sectorsize - 1,
- EXTENT_NODATASUM);
- continue;
- }
- ret = check_data_csum(inode, bbio, bio_offset, page, pg_off,
- page_offset(page) + pg_off);
- if (ret < 0) {
- const int nr_bit = (pg_off - offset_in_page(start)) >>
- root->fs_info->sectorsize_bits;
+ csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
+ fs_info->csum_size;
+ btrfs_calculate_block_csum_pages(fs_info, paddrs, csum);
+ if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
+ goto zeroit;
+ return true;
- result |= (1U << nr_bit);
- }
- }
- return result;
+zeroit:
+ btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected,
+ bbio->mirror_num);
+ if (dev)
+ btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
+ for (int i = 0; i < nr_steps; i++)
+ memzero_page(phys_to_page(paddrs[i]), offset_in_page(paddrs[i]), step);
+ return false;
}
/*
- * btrfs_add_delayed_iput - perform a delayed iput on @inode
+ * Perform a delayed iput on @inode.
*
* @inode: The inode we want to perform iput on
*
@@ -3359,19 +3491,25 @@ unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
* the inode to the delayed iput machinery. Delayed iputs are processed at
* transaction commit time/superblock commit/cleaner kthread.
*/
-void btrfs_add_delayed_iput(struct inode *inode)
+void btrfs_add_delayed_iput(struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_inode *binode = BTRFS_I(inode);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ unsigned long flags;
- if (atomic_add_unless(&inode->i_count, -1, 1))
+ if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
return;
+ WARN_ON_ONCE(test_bit(BTRFS_FS_STATE_NO_DELAYED_IPUT, &fs_info->fs_state));
atomic_inc(&fs_info->nr_delayed_iputs);
- spin_lock(&fs_info->delayed_iput_lock);
- ASSERT(list_empty(&binode->delayed_iput));
- list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
- spin_unlock(&fs_info->delayed_iput_lock);
+ /*
+ * Need to be irq safe here because we can be called from either an irq
+ * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
+ * context.
+ */
+ spin_lock_irqsave(&fs_info->delayed_iput_lock, flags);
+ ASSERT(list_empty(&inode->delayed_iput));
+ list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs);
+ spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags);
if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
wake_up_process(fs_info->cleaner_kthread);
}
@@ -3380,40 +3518,49 @@ static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
struct btrfs_inode *inode)
{
list_del_init(&inode->delayed_iput);
- spin_unlock(&fs_info->delayed_iput_lock);
+ spin_unlock_irq(&fs_info->delayed_iput_lock);
iput(&inode->vfs_inode);
if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
wake_up(&fs_info->delayed_iputs_wait);
- spin_lock(&fs_info->delayed_iput_lock);
+ spin_lock_irq(&fs_info->delayed_iput_lock);
}
static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
struct btrfs_inode *inode)
{
if (!list_empty(&inode->delayed_iput)) {
- spin_lock(&fs_info->delayed_iput_lock);
+ spin_lock_irq(&fs_info->delayed_iput_lock);
if (!list_empty(&inode->delayed_iput))
run_delayed_iput_locked(fs_info, inode);
- spin_unlock(&fs_info->delayed_iput_lock);
+ spin_unlock_irq(&fs_info->delayed_iput_lock);
}
}
void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
{
-
- spin_lock(&fs_info->delayed_iput_lock);
+ /*
+ * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
+ * calls btrfs_add_delayed_iput() and that needs to lock
+ * fs_info->delayed_iput_lock. So we need to disable irqs here to
+ * prevent a deadlock.
+ */
+ spin_lock_irq(&fs_info->delayed_iput_lock);
while (!list_empty(&fs_info->delayed_iputs)) {
struct btrfs_inode *inode;
inode = list_first_entry(&fs_info->delayed_iputs,
struct btrfs_inode, delayed_iput);
run_delayed_iput_locked(fs_info, inode);
- cond_resched_lock(&fs_info->delayed_iput_lock);
+ if (need_resched()) {
+ spin_unlock_irq(&fs_info->delayed_iput_lock);
+ cond_resched();
+ spin_lock_irq(&fs_info->delayed_iput_lock);
+ }
}
- spin_unlock(&fs_info->delayed_iput_lock);
+ spin_unlock_irq(&fs_info->delayed_iput_lock);
}
-/**
+/*
* Wait for flushing all delayed iputs
*
* @fs_info: the filesystem
@@ -3444,7 +3591,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
int ret;
ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
- if (ret && ret != -EEXIST) {
+ if (unlikely(ret && ret != -EEXIST)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -3469,15 +3616,14 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
int btrfs_orphan_cleanup(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key key, found_key;
struct btrfs_trans_handle *trans;
- struct inode *inode;
u64 last_objectid = 0;
int ret = 0, nr_unlink = 0;
- if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
+ if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
return 0;
path = btrfs_alloc_path();
@@ -3492,6 +3638,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
key.offset = (u64)-1;
while (1) {
+ struct btrfs_inode *inode;
+
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
@@ -3528,9 +3676,16 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
*/
if (found_key.offset == last_objectid) {
+ /*
+ * We found the same inode as before. This means we were
+ * not able to remove its items via eviction triggered
+ * by an iput(). A transaction abort may have happened,
+ * due to -ENOSPC for example, so try to grab the error
+ * that lead to a transaction abort, if any.
+ */
btrfs_err(fs_info,
"Error removing orphan entry, stopping orphan cleanup");
- ret = -EINVAL;
+ ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL;
goto out;
}
@@ -3539,12 +3694,15 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
- inode = btrfs_iget(fs_info->sb, last_objectid, root);
- ret = PTR_ERR_OR_ZERO(inode);
- if (ret && ret != -ENOENT)
- goto out;
+ inode = btrfs_iget(last_objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ inode = NULL;
+ if (ret != -ENOENT)
+ goto out;
+ }
- if (ret == -ENOENT && root == fs_info->tree_root) {
+ if (!inode && root == fs_info->tree_root) {
struct btrfs_root *dead_root;
int is_dead_root = 0;
@@ -3605,10 +3763,11 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
* deleted but wasn't. The inode number may have been reused,
* but either way, we can delete the orphan item.
*/
- if (ret == -ENOENT || inode->i_nlink) {
- if (!ret) {
- ret = btrfs_drop_verity_items(BTRFS_I(inode));
- iput(inode);
+ if (!inode || inode->vfs_inode.i_nlink) {
+ if (inode) {
+ ret = btrfs_drop_verity_items(inode);
+ iput(&inode->vfs_inode);
+ inode = NULL;
if (ret)
goto out;
}
@@ -3630,13 +3789,11 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
nr_unlink++;
/* this will do delete_inode and everything for us */
- iput(inode);
+ iput(&inode->vfs_inode);
}
/* release the path since we're done with it */
btrfs_release_path(path);
- root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
-
if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
trans = btrfs_join_transaction(root);
if (!IS_ERR(trans))
@@ -3649,19 +3806,22 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
out:
if (ret)
btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
- btrfs_free_path(path);
return ret;
}
/*
- * very simple check to peek ahead in the leaf looking for xattrs. If we
- * don't find any xattrs, we know there can't be any acls.
+ * Look ahead in the leaf for xattrs. If we don't find any then we know there
+ * can't be any ACLs.
+ *
+ * @leaf: the eb leaf where to search
+ * @slot: the slot the inode is in
+ * @objectid: the objectid of the inode
*
- * slot is the slot the inode is in, objectid is the objectid of the inode
+ * Return true if there is xattr/ACL, false otherwise.
*/
-static noinline int acls_after_inode_item(struct extent_buffer *leaf,
- int slot, u64 objectid,
- int *first_xattr_slot)
+static noinline bool acls_after_inode_item(struct extent_buffer *leaf,
+ int slot, u64 objectid,
+ int *first_xattr_slot)
{
u32 nritems = btrfs_header_nritems(leaf);
struct btrfs_key found_key;
@@ -3681,58 +3841,120 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
while (slot < nritems) {
btrfs_item_key_to_cpu(leaf, &found_key, slot);
- /* we found a different objectid, there must not be acls */
+ /* We found a different objectid, there must be no ACLs. */
if (found_key.objectid != objectid)
- return 0;
+ return false;
- /* we found an xattr, assume we've got an acl */
+ /* We found an xattr, assume we've got an ACL. */
if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
if (*first_xattr_slot == -1)
*first_xattr_slot = slot;
if (found_key.offset == xattr_access ||
found_key.offset == xattr_default)
- return 1;
+ return true;
}
/*
- * we found a key greater than an xattr key, there can't
- * be any acls later on
+ * We found a key greater than an xattr key, there can't be any
+ * ACLs later on.
*/
if (found_key.type > BTRFS_XATTR_ITEM_KEY)
- return 0;
+ return false;
slot++;
scanned++;
/*
- * it goes inode, inode backrefs, xattrs, extents,
- * so if there are a ton of hard links to an inode there can
- * be a lot of backrefs. Don't waste time searching too hard,
- * this is just an optimization
+ * The item order goes like:
+ * - inode
+ * - inode backrefs
+ * - xattrs
+ * - extents,
+ *
+ * so if there are lots of hard links to an inode there can be
+ * a lot of backrefs. Don't waste time searching too hard,
+ * this is just an optimization.
*/
if (scanned >= 8)
break;
}
- /* we hit the end of the leaf before we found an xattr or
- * something larger than an xattr. We have to assume the inode
- * has acls
+ /*
+ * We hit the end of the leaf before we found an xattr or something
+ * larger than an xattr. We have to assume the inode has ACLs.
*/
if (*first_xattr_slot == -1)
*first_xattr_slot = slot;
- return 1;
+ return true;
+}
+
+static int btrfs_init_file_extent_tree(struct btrfs_inode *inode)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+
+ if (WARN_ON_ONCE(inode->file_extent_tree))
+ return 0;
+ if (btrfs_fs_incompat(fs_info, NO_HOLES))
+ return 0;
+ if (!S_ISREG(inode->vfs_inode.i_mode))
+ return 0;
+ if (btrfs_is_free_space_inode(inode))
+ return 0;
+
+ inode->file_extent_tree = kmalloc(sizeof(struct extent_io_tree), GFP_KERNEL);
+ if (!inode->file_extent_tree)
+ return -ENOMEM;
+
+ btrfs_extent_io_tree_init(fs_info, inode->file_extent_tree,
+ IO_TREE_INODE_FILE_EXTENT);
+ /* Lockdep class is set only for the file extent tree. */
+ lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class);
+
+ return 0;
+}
+
+static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
+{
+ struct btrfs_root *root = inode->root;
+ struct btrfs_inode *existing;
+ const u64 ino = btrfs_ino(inode);
+ int ret;
+
+ if (inode_unhashed(&inode->vfs_inode))
+ return 0;
+
+ if (prealloc) {
+ ret = xa_reserve(&root->inodes, ino, GFP_NOFS);
+ if (ret)
+ return ret;
+ }
+
+ existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
+
+ if (xa_is_err(existing)) {
+ ret = xa_err(existing);
+ ASSERT(ret != -EINVAL);
+ ASSERT(ret != -ENOMEM);
+ return ret;
+ } else if (existing) {
+ WARN_ON(!(inode_state_read_once(&existing->vfs_inode) & (I_WILL_FREE | I_FREEING)));
+ }
+
+ return 0;
}
/*
- * read an inode from the btree into the in-memory inode
+ * Read a locked inode from the btree into the in-memory inode and add it to
+ * its root list/tree.
+ *
+ * On failure clean up the inode.
*/
-static int btrfs_read_locked_inode(struct inode *inode,
- struct btrfs_path *in_path)
+static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path *path)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_path *path = in_path;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
struct btrfs_inode_item *inode_item;
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct inode *vfs_inode = &inode->vfs_inode;
struct btrfs_key location;
unsigned long ptr;
int maybe_acls;
@@ -3745,19 +3967,19 @@ static int btrfs_read_locked_inode(struct inode *inode,
if (!ret)
filled = true;
- if (!path) {
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- }
+ ASSERT(path);
- memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
+ btrfs_get_inode_key(inode, &location);
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret) {
- if (path != in_path)
- btrfs_free_path(path);
- return ret;
+ /*
+ * ret > 0 can come from btrfs_search_slot called by
+ * btrfs_lookup_inode(), this means the inode was not found.
+ */
+ if (ret > 0)
+ ret = -ENOENT;
+ goto out;
}
leaf = path->nodes[0];
@@ -3767,43 +3989,47 @@ static int btrfs_read_locked_inode(struct inode *inode,
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
- inode->i_mode = btrfs_inode_mode(leaf, inode_item);
- set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
- i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
- i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
- btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
- btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
- round_up(i_size_read(inode), fs_info->sectorsize));
-
- inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
- inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
-
- inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
- inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
-
- inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
- inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
-
- BTRFS_I(inode)->i_otime.tv_sec =
- btrfs_timespec_sec(leaf, &inode_item->otime);
- BTRFS_I(inode)->i_otime.tv_nsec =
- btrfs_timespec_nsec(leaf, &inode_item->otime);
-
- inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
- BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
- BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
-
- inode_set_iversion_queried(inode,
- btrfs_inode_sequence(leaf, inode_item));
- inode->i_generation = BTRFS_I(inode)->generation;
- inode->i_rdev = 0;
+ vfs_inode->i_mode = btrfs_inode_mode(leaf, inode_item);
+ set_nlink(vfs_inode, btrfs_inode_nlink(leaf, inode_item));
+ i_uid_write(vfs_inode, btrfs_inode_uid(leaf, inode_item));
+ i_gid_write(vfs_inode, btrfs_inode_gid(leaf, inode_item));
+ btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
+
+ inode_set_atime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->atime),
+ btrfs_timespec_nsec(leaf, &inode_item->atime));
+
+ inode_set_mtime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
+ btrfs_timespec_nsec(leaf, &inode_item->mtime));
+
+ inode_set_ctime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
+ btrfs_timespec_nsec(leaf, &inode_item->ctime));
+
+ inode->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
+ inode->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
+
+ inode_set_bytes(vfs_inode, btrfs_inode_nbytes(leaf, inode_item));
+ inode->generation = btrfs_inode_generation(leaf, inode_item);
+ inode->last_trans = btrfs_inode_transid(leaf, inode_item);
+
+ inode_set_iversion_queried(vfs_inode, btrfs_inode_sequence(leaf, inode_item));
+ vfs_inode->i_generation = inode->generation;
+ vfs_inode->i_rdev = 0;
rdev = btrfs_inode_rdev(leaf, inode_item);
- BTRFS_I(inode)->index_cnt = (u64)-1;
+ if (S_ISDIR(vfs_inode->i_mode))
+ inode->index_cnt = (u64)-1;
+
btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
- &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
+ &inode->flags, &inode->ro_flags);
+ btrfs_update_inode_mapping_flags(inode);
+ btrfs_set_inode_mapping_order(inode);
cache_index:
+ ret = btrfs_init_file_extent_tree(inode);
+ if (ret)
+ goto out;
+ btrfs_inode_set_file_extent_range(inode, 0,
+ round_up(i_size_read(vfs_inode), fs_info->sectorsize));
/*
* If we were modified in the current generation and evicted from memory
* and then re-read we need to do a full sync since we don't have any
@@ -3811,11 +4037,10 @@ cache_index:
* cache.
*
* This is required for both inode re-read from disk and delayed inode
- * in delayed_nodes_tree.
+ * in the delayed_nodes xarray.
*/
- if (BTRFS_I(inode)->last_trans == fs_info->generation)
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
+ if (inode->last_trans == btrfs_get_fs_generation(fs_info))
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
/*
* We don't persist the id of the transaction where an unlink operation
@@ -3844,7 +4069,7 @@ cache_index:
* transaction commits on fsync if our inode is a directory, or if our
* inode is not a directory, logging its parent unnecessarily.
*/
- BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
+ inode->last_unlink_trans = inode->last_trans;
/*
* Same logic as for last_unlink_trans. We don't persist the generation
@@ -3852,15 +4077,15 @@ cache_index:
* operation, so after eviction and reloading the inode we must be
* pessimistic and assume the last transaction that modified the inode.
*/
- BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans;
+ inode->last_reflink_trans = inode->last_trans;
path->slots[0]++;
- if (inode->i_nlink != 1 ||
+ if (vfs_inode->i_nlink != 1 ||
path->slots[0] >= btrfs_header_nritems(leaf))
goto cache_acl;
btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
- if (location.objectid != btrfs_ino(BTRFS_I(inode)))
+ if (location.objectid != btrfs_ino(inode))
goto cache_acl;
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -3868,13 +4093,12 @@ cache_index:
struct btrfs_inode_ref *ref;
ref = (struct btrfs_inode_ref *)ptr;
- BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
+ inode->dir_index = btrfs_inode_ref_index(leaf, ref);
} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
struct btrfs_inode_extref *extref;
extref = (struct btrfs_inode_extref *)ptr;
- BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
- extref);
+ inode->dir_index = btrfs_inode_extref_index(leaf, extref);
}
cache_acl:
/*
@@ -3882,45 +4106,50 @@ cache_acl:
* any xattrs or acls
*/
maybe_acls = acls_after_inode_item(leaf, path->slots[0],
- btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
+ btrfs_ino(inode), &first_xattr_slot);
if (first_xattr_slot != -1) {
path->slots[0] = first_xattr_slot;
ret = btrfs_load_inode_props(inode, path);
if (ret)
btrfs_err(fs_info,
"error loading props for ino %llu (root %llu): %d",
- btrfs_ino(BTRFS_I(inode)),
- root->root_key.objectid, ret);
+ btrfs_ino(inode), btrfs_root_id(root), ret);
}
- if (path != in_path)
- btrfs_free_path(path);
if (!maybe_acls)
- cache_no_acl(inode);
+ cache_no_acl(vfs_inode);
- switch (inode->i_mode & S_IFMT) {
+ switch (vfs_inode->i_mode & S_IFMT) {
case S_IFREG:
- inode->i_mapping->a_ops = &btrfs_aops;
- inode->i_fop = &btrfs_file_operations;
- inode->i_op = &btrfs_file_inode_operations;
+ vfs_inode->i_mapping->a_ops = &btrfs_aops;
+ vfs_inode->i_fop = &btrfs_file_operations;
+ vfs_inode->i_op = &btrfs_file_inode_operations;
break;
case S_IFDIR:
- inode->i_fop = &btrfs_dir_file_operations;
- inode->i_op = &btrfs_dir_inode_operations;
+ vfs_inode->i_fop = &btrfs_dir_file_operations;
+ vfs_inode->i_op = &btrfs_dir_inode_operations;
break;
case S_IFLNK:
- inode->i_op = &btrfs_symlink_inode_operations;
- inode_nohighmem(inode);
- inode->i_mapping->a_ops = &btrfs_aops;
+ vfs_inode->i_op = &btrfs_symlink_inode_operations;
+ inode_nohighmem(vfs_inode);
+ vfs_inode->i_mapping->a_ops = &btrfs_aops;
break;
default:
- inode->i_op = &btrfs_special_inode_operations;
- init_special_inode(inode, inode->i_mode, rdev);
+ vfs_inode->i_op = &btrfs_special_inode_operations;
+ init_special_inode(vfs_inode, vfs_inode->i_mode, rdev);
break;
}
btrfs_sync_inode_flags_to_i_flags(inode);
+
+ ret = btrfs_add_inode_to_root(inode, true);
+ if (ret)
+ goto out;
+
return 0;
+out:
+ iget_failed(vfs_inode);
+ return ret;
}
/*
@@ -3931,70 +4160,59 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *item,
struct inode *inode)
{
- struct btrfs_map_token token;
u64 flags;
- btrfs_init_map_token(&token, leaf);
-
- btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
- btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
- btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
- btrfs_set_token_inode_mode(&token, item, inode->i_mode);
- btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
-
- btrfs_set_token_timespec_sec(&token, &item->atime,
- inode->i_atime.tv_sec);
- btrfs_set_token_timespec_nsec(&token, &item->atime,
- inode->i_atime.tv_nsec);
-
- btrfs_set_token_timespec_sec(&token, &item->mtime,
- inode->i_mtime.tv_sec);
- btrfs_set_token_timespec_nsec(&token, &item->mtime,
- inode->i_mtime.tv_nsec);
-
- btrfs_set_token_timespec_sec(&token, &item->ctime,
- inode->i_ctime.tv_sec);
- btrfs_set_token_timespec_nsec(&token, &item->ctime,
- inode->i_ctime.tv_nsec);
-
- btrfs_set_token_timespec_sec(&token, &item->otime,
- BTRFS_I(inode)->i_otime.tv_sec);
- btrfs_set_token_timespec_nsec(&token, &item->otime,
- BTRFS_I(inode)->i_otime.tv_nsec);
-
- btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
- btrfs_set_token_inode_generation(&token, item,
- BTRFS_I(inode)->generation);
- btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
- btrfs_set_token_inode_transid(&token, item, trans->transid);
- btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
+ btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
+ btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
+ btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
+ btrfs_set_inode_mode(leaf, item, inode->i_mode);
+ btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
+
+ btrfs_set_timespec_sec(leaf, &item->atime, inode_get_atime_sec(inode));
+ btrfs_set_timespec_nsec(leaf, &item->atime, inode_get_atime_nsec(inode));
+
+ btrfs_set_timespec_sec(leaf, &item->mtime, inode_get_mtime_sec(inode));
+ btrfs_set_timespec_nsec(leaf, &item->mtime, inode_get_mtime_nsec(inode));
+
+ btrfs_set_timespec_sec(leaf, &item->ctime, inode_get_ctime_sec(inode));
+ btrfs_set_timespec_nsec(leaf, &item->ctime, inode_get_ctime_nsec(inode));
+
+ btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec);
+ btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec);
+
+ btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
+ btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
+ btrfs_set_inode_sequence(leaf, item, inode_peek_iversion(inode));
+ btrfs_set_inode_transid(leaf, item, trans->transid);
+ btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
BTRFS_I(inode)->ro_flags);
- btrfs_set_token_inode_flags(&token, item, flags);
- btrfs_set_token_inode_block_group(&token, item, 0);
+ btrfs_set_inode_flags(leaf, item, flags);
+ btrfs_set_inode_block_group(leaf, item, 0);
}
/*
* copy everything in the in-memory inode into the btree.
*/
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_inode *inode)
+ struct btrfs_inode *inode)
{
struct btrfs_inode_item *inode_item;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
+ struct btrfs_key key;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1);
+ btrfs_get_inode_key(inode, &key);
+ ret = btrfs_lookup_inode(trans, inode->root, path, &key, 1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
- goto failed;
+ return ret;
}
leaf = path->nodes[0];
@@ -4002,21 +4220,17 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
- btrfs_mark_buffer_dirty(leaf);
btrfs_set_inode_last_trans(trans, inode);
- ret = 0;
-failed:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/*
* copy everything in the in-memory inode into the btree.
*/
-noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_inode *inode)
+int btrfs_update_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
@@ -4032,26 +4246,43 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
&& !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
btrfs_update_root_times(trans, root);
- ret = btrfs_delayed_update_inode(trans, root, inode);
+ ret = btrfs_delayed_update_inode(trans, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
return ret;
}
- return btrfs_update_inode_item(trans, root, inode);
+ return btrfs_update_inode_item(trans, inode);
}
int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_inode *inode)
+ struct btrfs_inode *inode)
{
int ret;
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, inode);
if (ret == -ENOSPC)
- return btrfs_update_inode_item(trans, root, inode);
+ return btrfs_update_inode_item(trans, inode);
return ret;
}
+static void update_time_after_link_or_unlink(struct btrfs_inode *dir)
+{
+ struct timespec64 now;
+
+ /*
+ * If we are replaying a log tree, we do not want to update the mtime
+ * and ctime of the parent directory with the current time, since the
+ * log replay procedure is responsible for setting them to their correct
+ * values (the ones it had when the fsync was done).
+ */
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &dir->root->fs_info->flags))
+ return;
+
+ now = inode_set_ctime_current(&dir->vfs_inode);
+ inode_set_mtime_to_ts(&dir->vfs_inode, now);
+}
+
/*
* unlink helper that gets used here in inode.c and in the tree logging
* recovery code. It remove a link in a directory with a given name, and
@@ -4060,7 +4291,8 @@ int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir,
struct btrfs_inode *inode,
- const char *name, int name_len)
+ const struct fscrypt_str *name,
+ struct btrfs_rename_ctx *rename_ctx)
{
struct btrfs_root *root = dir->root;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -4072,21 +4304,22 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
- di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
- name, name_len, -1);
+ di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
if (IS_ERR_OR_NULL(di)) {
- ret = di ? PTR_ERR(di) : -ENOENT;
- goto err;
+ btrfs_free_path(path);
+ return di ? PTR_ERR(di) : -ENOENT;
}
ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ /*
+ * Down the call chains below we'll also need to allocate a path, so no
+ * need to hold on to this one for longer than necessary.
+ */
+ btrfs_free_path(path);
if (ret)
- goto err;
- btrfs_release_path(path);
+ return ret;
/*
* If we don't have dir index, we have to get it by looking up
@@ -4106,25 +4339,34 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
}
}
- ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
- dir_ino, &index);
- if (ret) {
- btrfs_info(fs_info,
- "failed to delete reference to %.*s, inode %llu parent %llu",
- name_len, name, ino, dir_ino);
+ ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
+ if (unlikely(ret)) {
+ btrfs_crit(fs_info,
+ "failed to delete reference to %.*s, root %llu inode %llu parent %llu",
+ name->len, name->name, btrfs_root_id(root), ino, dir_ino);
btrfs_abort_transaction(trans, ret);
- goto err;
+ return ret;
}
skip_backref:
+ if (rename_ctx)
+ rename_ctx->index = index;
+
ret = btrfs_delete_delayed_dir_index(trans, dir, index);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto err;
+ return ret;
}
- btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
- dir_ino);
- btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir, index);
+ /*
+ * If we are in a rename context, we don't need to update anything in the
+ * log. That will be done later during the rename by btrfs_log_new_name().
+ * Besides that, doing it here would only cause extra unnecessary btree
+ * operations on the log tree, increasing latency for applications.
+ */
+ if (!rename_ctx) {
+ btrfs_del_inode_ref_in_log(trans, name, inode, dir);
+ btrfs_del_dir_entries_in_log(trans, name, dir, index);
+ }
/*
* If we have a pending delayed iput we could end up with the final iput
@@ -4136,30 +4378,26 @@ skip_backref:
* holding.
*/
btrfs_run_delayed_iput(fs_info, inode);
-err:
- btrfs_free_path(path);
- if (ret)
- goto out;
- btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
+ btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
inode_inc_iversion(&inode->vfs_inode);
+ inode_set_ctime_current(&inode->vfs_inode);
inode_inc_iversion(&dir->vfs_inode);
- inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
- dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
- ret = btrfs_update_inode(trans, root, dir);
-out:
- return ret;
+ update_time_after_link_or_unlink(dir);
+
+ return btrfs_update_inode(trans, dir);
}
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir, struct btrfs_inode *inode,
- const char *name, int name_len)
+ const struct fscrypt_str *name)
{
int ret;
- ret = __btrfs_unlink_inode(trans, dir, inode, name, name_len);
+
+ ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
if (!ret) {
drop_nlink(&inode->vfs_inode);
- ret = btrfs_update_inode(trans, inode->root, inode);
+ ret = btrfs_update_inode(trans, inode);
}
return ret;
}
@@ -4172,18 +4410,12 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
* plenty of slack room in the global reserve to migrate, otherwise we cannot
* allow the unlink to occur.
*/
-static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
+static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir)
{
- struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_root *root = dir->root;
- /*
- * 1 for the possible orphan item
- * 1 for the dir item
- * 1 for the dir index
- * 1 for the inode ref
- * 1 for the inode
- */
- return btrfs_start_transaction_fallback_global_rsv(root, 5);
+ return btrfs_start_transaction_fallback_global_rsv(root,
+ BTRFS_UNLINK_METADATA_UNITS);
}
static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
@@ -4191,63 +4423,81 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
struct btrfs_trans_handle *trans;
struct inode *inode = d_inode(dentry);
int ret;
+ struct fscrypt_name fname;
- trans = __unlink_start_trans(dir);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
+ if (ret)
+ return ret;
+
+ /* This needs to handle no-key deletions later on */
+
+ trans = __unlink_start_trans(BTRFS_I(dir));
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto fscrypt_free;
+ }
btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
- 0);
+ false);
- ret = btrfs_unlink_inode(trans, BTRFS_I(dir),
- BTRFS_I(d_inode(dentry)), dentry->d_name.name,
- dentry->d_name.len);
+ ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
+ &fname.disk_name);
if (ret)
- goto out;
+ goto end_trans;
if (inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
if (ret)
- goto out;
+ goto end_trans;
}
-out:
+end_trans:
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
+fscrypt_free:
+ fscrypt_free_filename(&fname);
return ret;
}
static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
- struct inode *dir, struct dentry *dentry)
+ struct btrfs_inode *dir, struct dentry *dentry)
{
- struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_root *root = dir->root;
struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
- const char *name = dentry->d_name.name;
- int name_len = dentry->d_name.len;
u64 index;
int ret;
u64 objectid;
- u64 dir_ino = btrfs_ino(BTRFS_I(dir));
+ u64 dir_ino = btrfs_ino(dir);
+ struct fscrypt_name fname;
+
+ ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
+ if (ret)
+ return ret;
+
+ /* This needs to handle no-key deletions later on */
if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
- objectid = inode->root->root_key.objectid;
+ objectid = btrfs_root_id(inode->root);
} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
- objectid = inode->location.objectid;
+ objectid = inode->ref_root_id;
} else {
WARN_ON(1);
+ fscrypt_free_filename(&fname);
return -EINVAL;
}
path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
- name, name_len, -1);
+ &fname.disk_name, -1);
if (IS_ERR_OR_NULL(di)) {
ret = di ? PTR_ERR(di) : -ENOENT;
goto out;
@@ -4257,7 +4507,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_dir_item_key_to_cpu(leaf, di, &key);
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -4273,13 +4523,9 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
* call btrfs_del_root_ref, and it _shouldn't_ fail.
*/
if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
- di = btrfs_search_dir_index_item(root, path, dir_ino,
- name, name_len);
- if (IS_ERR_OR_NULL(di)) {
- if (!di)
- ret = -ENOENT;
- else
- ret = PTR_ERR(di);
+ di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -4290,28 +4536,28 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
} else {
ret = btrfs_del_root_ref(trans, objectid,
- root->root_key.objectid, dir_ino,
- &index, name, name_len);
- if (ret) {
+ btrfs_root_id(root), dir_ino,
+ &index, &fname.disk_name);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
- ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
- if (ret) {
+ ret = btrfs_delete_delayed_dir_index(trans, dir, index);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
- btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
- inode_inc_iversion(dir);
- dir->i_mtime = dir->i_ctime = current_time(dir);
- ret = btrfs_update_inode_fallback(trans, root, BTRFS_I(dir));
+ btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
+ inode_inc_iversion(&dir->vfs_inode);
+ inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
+ ret = btrfs_update_inode_fallback(trans, dir);
if (ret)
btrfs_abort_transaction(trans, ret);
out:
- btrfs_free_path(path);
+ fscrypt_free_filename(&fname);
return ret;
}
@@ -4322,9 +4568,10 @@ out:
static noinline int may_destroy_subvol(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dir_item *di;
struct btrfs_key key;
+ struct fscrypt_str name = FSTR_INIT("default", 7);
u64 dir_id;
int ret;
@@ -4335,38 +4582,42 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
/* Make sure this root isn't set as the default subvol */
dir_id = btrfs_super_root_dir(fs_info->super_copy);
di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
- dir_id, "default", 7, 0);
+ dir_id, &name, 0);
if (di && !IS_ERR(di)) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
- if (key.objectid == root->root_key.objectid) {
+ if (key.objectid == btrfs_root_id(root)) {
ret = -EPERM;
btrfs_err(fs_info,
"deleting default subvolume %llu is not allowed",
key.objectid);
- goto out;
+ return ret;
}
btrfs_release_path(path);
}
- key.objectid = root->root_key.objectid;
+ key.objectid = btrfs_root_id(root);
key.type = BTRFS_ROOT_REF_KEY;
key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
if (ret < 0)
- goto out;
- BUG_ON(ret == 0);
+ return ret;
+ if (unlikely(ret == 0)) {
+ /*
+ * Key with offset -1 found, there would have to exist a root
+ * with such id, but this is out of valid range.
+ */
+ return -EUCLEAN;
+ }
ret = 0;
if (path->slots[0] > 0) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- if (key.objectid == root->root_key.objectid &&
- key.type == BTRFS_ROOT_REF_KEY)
+ if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY)
ret = -ENOTEMPTY;
}
-out:
- btrfs_free_path(path);
+
return ret;
}
@@ -4374,77 +4625,42 @@ out:
static void btrfs_prune_dentries(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct rb_node *node;
- struct rb_node *prev;
- struct btrfs_inode *entry;
- struct inode *inode;
- u64 objectid = 0;
+ struct btrfs_inode *inode;
+ u64 min_ino = 0;
if (!BTRFS_FS_ERROR(fs_info))
WARN_ON(btrfs_root_refs(&root->root_item) != 0);
- spin_lock(&root->inode_lock);
-again:
- node = root->inode_tree.rb_node;
- prev = NULL;
- while (node) {
- prev = node;
- entry = rb_entry(node, struct btrfs_inode, rb_node);
-
- if (objectid < btrfs_ino(entry))
- node = node->rb_left;
- else if (objectid > btrfs_ino(entry))
- node = node->rb_right;
- else
- break;
- }
- if (!node) {
- while (prev) {
- entry = rb_entry(prev, struct btrfs_inode, rb_node);
- if (objectid <= btrfs_ino(entry)) {
- node = prev;
- break;
- }
- prev = rb_next(prev);
- }
- }
- while (node) {
- entry = rb_entry(node, struct btrfs_inode, rb_node);
- objectid = btrfs_ino(entry) + 1;
- inode = igrab(&entry->vfs_inode);
- if (inode) {
- spin_unlock(&root->inode_lock);
- if (atomic_read(&inode->i_count) > 1)
- d_prune_aliases(inode);
- /*
- * btrfs_drop_inode will have it removed from the inode
- * cache when its usage count hits zero.
- */
- iput(inode);
- cond_resched();
- spin_lock(&root->inode_lock);
- goto again;
- }
-
- if (cond_resched_lock(&root->inode_lock))
- goto again;
+ inode = btrfs_find_first_inode(root, min_ino);
+ while (inode) {
+ if (icount_read(&inode->vfs_inode) > 1)
+ d_prune_aliases(&inode->vfs_inode);
- node = rb_next(node);
+ min_ino = btrfs_ino(inode) + 1;
+ /*
+ * btrfs_drop_inode() will have it removed from the inode
+ * cache when its usage count hits zero.
+ */
+ iput(&inode->vfs_inode);
+ cond_resched();
+ inode = btrfs_find_first_inode(root, min_ino);
}
- spin_unlock(&root->inode_lock);
}
-int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
+int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
- struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_root *root = dir->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode = d_inode(dentry);
struct btrfs_root *dest = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct btrfs_block_rsv block_rsv;
u64 root_flags;
+ u64 qgroup_reserved = 0;
int ret;
+ down_write(&fs_info->subvol_sem);
+
/*
* Don't allow to delete a subvolume with send in progress. This is
* inside the inode lock so the error handling that has to drop the bit
@@ -4455,19 +4671,26 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
spin_unlock(&dest->root_item_lock);
btrfs_warn(fs_info,
"attempt to delete subvolume %llu during send",
- dest->root_key.objectid);
- return -EPERM;
+ btrfs_root_id(dest));
+ ret = -EPERM;
+ goto out_up_write;
+ }
+ if (atomic_read(&dest->nr_swapfiles)) {
+ spin_unlock(&dest->root_item_lock);
+ btrfs_warn(fs_info,
+ "attempt to delete subvolume %llu with active swapfile",
+ btrfs_root_id(root));
+ ret = -EPERM;
+ goto out_up_write;
}
root_flags = btrfs_root_flags(&dest->root_item);
btrfs_set_root_flags(&dest->root_item,
root_flags | BTRFS_ROOT_SUBVOL_DEAD);
spin_unlock(&dest->root_item_lock);
- down_write(&fs_info->subvol_sem);
-
ret = may_destroy_subvol(dest);
if (ret)
- goto out_up_write;
+ goto out_undead;
btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
/*
@@ -4477,26 +4700,29 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
*/
ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
if (ret)
- goto out_up_write;
+ goto out_undead;
+ qgroup_reserved = block_rsv.qgroup_rsv_reserved;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_release;
}
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+ qgroup_reserved = 0;
trans->block_rsv = &block_rsv;
trans->bytes_reserved = block_rsv.size;
- btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
+ btrfs_record_snapshot_destroy(trans, dir);
ret = btrfs_unlink_subvol(trans, dir, dentry);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
ret = btrfs_record_root_in_trans(trans, dest);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -4509,17 +4735,16 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
ret = btrfs_insert_orphan_item(trans,
fs_info->tree_root,
- dest->root_key.objectid);
- if (ret) {
+ btrfs_root_id(dest));
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
}
ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
- BTRFS_UUID_KEY_SUBVOL,
- dest->root_key.objectid);
- if (ret && ret != -ENOENT) {
+ BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest));
+ if (unlikely(ret && ret != -ENOENT)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -4527,8 +4752,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
ret = btrfs_uuid_tree_remove(trans,
dest->root_item.received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
- dest->root_key.objectid);
- if (ret && ret != -ENOENT) {
+ btrfs_root_id(dest));
+ if (unlikely(ret && ret != -ENOENT)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -4542,16 +4767,20 @@ out_end_trans:
ret = btrfs_end_transaction(trans);
inode->i_flags |= S_DEAD;
out_release:
- btrfs_subvolume_release_metadata(root, &block_rsv);
-out_up_write:
- up_write(&fs_info->subvol_sem);
+ btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
+ if (qgroup_reserved)
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
+out_undead:
if (ret) {
spin_lock(&dest->root_item_lock);
root_flags = btrfs_root_flags(&dest->root_item);
btrfs_set_root_flags(&dest->root_item,
root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
spin_unlock(&dest->root_item_lock);
- } else {
+ }
+out_up_write:
+ up_write(&fs_info->subvol_sem);
+ if (!ret) {
d_invalidate(dentry);
btrfs_prune_dentries(dest);
ASSERT(dest->send_in_progress == 0);
@@ -4560,456 +4789,151 @@ out_up_write:
return ret;
}
-static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+static int btrfs_rmdir(struct inode *vfs_dir, struct dentry *dentry)
{
- struct inode *inode = d_inode(dentry);
- int err = 0;
+ struct btrfs_inode *dir = BTRFS_I(vfs_dir);
+ struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ int ret = 0;
struct btrfs_trans_handle *trans;
- u64 last_unlink_trans;
+ struct fscrypt_name fname;
- if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
+ if (inode->vfs_inode.i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
- if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID)
+ if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
+ if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
+ btrfs_err(fs_info,
+ "extent tree v2 doesn't support snapshot deletion yet");
+ return -EOPNOTSUPP;
+ }
return btrfs_delete_subvolume(dir, dentry);
-
- trans = __unlink_start_trans(dir);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
-
- if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
- err = btrfs_unlink_subvol(trans, dir, dentry);
- goto out;
}
- err = btrfs_orphan_add(trans, BTRFS_I(inode));
- if (err)
- goto out;
-
- last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
-
- /* now the directory is empty */
- err = btrfs_unlink_inode(trans, BTRFS_I(dir),
- BTRFS_I(d_inode(dentry)), dentry->d_name.name,
- dentry->d_name.len);
- if (!err) {
- btrfs_i_size_write(BTRFS_I(inode), 0);
- /*
- * Propagate the last_unlink_trans value of the deleted dir to
- * its parent directory. This is to prevent an unrecoverable
- * log tree in the case we do something like this:
- * 1) create dir foo
- * 2) create snapshot under dir foo
- * 3) delete the snapshot
- * 4) rmdir foo
- * 5) mkdir foo
- * 6) fsync foo or some file inside foo
- */
- if (last_unlink_trans >= trans->transid)
- BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
- }
-out:
- btrfs_end_transaction(trans);
- btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
-
- return err;
-}
-
-/*
- * Return this if we need to call truncate_block for the last bit of the
- * truncate.
- */
-#define NEED_TRUNCATE_BLOCK 1
-
-/*
- * Remove inode items from a given root.
- *
- * @trans: A transaction handle.
- * @root: The root from which to remove items.
- * @inode: The inode whose items we want to remove.
- * @new_size: The new i_size for the inode. This is only applicable when
- * @min_type is BTRFS_EXTENT_DATA_KEY, must be 0 otherwise.
- * @min_type: The minimum key type to remove. All keys with a type
- * greater than this value are removed and all keys with
- * this type are removed only if their offset is >= @new_size.
- * @extents_found: Output parameter that will contain the number of file
- * extent items that were removed or adjusted to the new
- * inode i_size. The caller is responsible for initializing
- * the counter. Also, it can be NULL if the caller does not
- * need this counter.
- *
- * Remove all keys associated with the inode from the given root that have a key
- * with a type greater than or equals to @min_type. When @min_type has a value of
- * BTRFS_EXTENT_DATA_KEY, only remove file extent items that have an offset value
- * greater than or equals to @new_size. If a file extent item that starts before
- * @new_size and ends after it is found, its length is adjusted.
- *
- * Returns: 0 on success, < 0 on error and NEED_TRUNCATE_BLOCK when @min_type is
- * BTRFS_EXTENT_DATA_KEY and the caller must truncate the last block.
- */
-int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_inode *inode,
- u64 new_size, u32 min_type,
- u64 *extents_found)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_path *path;
- struct extent_buffer *leaf;
- struct btrfs_file_extent_item *fi;
- struct btrfs_key key;
- struct btrfs_key found_key;
- u64 extent_start = 0;
- u64 extent_num_bytes = 0;
- u64 extent_offset = 0;
- u64 item_end = 0;
- u64 last_size = new_size;
- u32 found_type = (u8)-1;
- int found_extent;
- int del_item;
- int pending_del_nr = 0;
- int pending_del_slot = 0;
- int extent_type = -1;
- int ret;
- u64 ino = btrfs_ino(inode);
- u64 bytes_deleted = 0;
- bool be_nice = false;
- bool should_throttle = false;
- const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
- struct extent_state *cached_state = NULL;
-
- BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
-
- /*
- * For non-free space inodes and non-shareable roots, we want to back
- * off from time to time. This means all inodes in subvolume roots,
- * reloc roots, and data reloc roots.
- */
- if (!btrfs_is_free_space_inode(inode) &&
- test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
- be_nice = true;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- path->reada = READA_BACK;
+ ret = fscrypt_setup_filename(vfs_dir, &dentry->d_name, 1, &fname);
+ if (ret)
+ return ret;
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
- lock_extent_bits(&inode->io_tree, lock_start, (u64)-1,
- &cached_state);
+ /* This needs to handle no-key deletions later on */
- /*
- * We want to drop from the next block forward in case this
- * new size is not block aligned since we will be keeping the
- * last block of the extent just the way it is.
- */
- btrfs_drop_extent_cache(inode, ALIGN(new_size,
- fs_info->sectorsize),
- (u64)-1, 0);
+ trans = __unlink_start_trans(dir);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_notrans;
}
/*
- * This function is also used to drop the items in the log tree before
- * we relog the inode, so if root != BTRFS_I(inode)->root, it means
- * it is used to drop the logged items. So we shouldn't kill the delayed
- * items.
+ * Propagate the last_unlink_trans value of the deleted dir to its
+ * parent directory. This is to prevent an unrecoverable log tree in the
+ * case we do something like this:
+ * 1) create dir foo
+ * 2) create snapshot under dir foo
+ * 3) delete the snapshot
+ * 4) rmdir foo
+ * 5) mkdir foo
+ * 6) fsync foo or some file inside foo
+ *
+ * This is because we can't unlink other roots when replaying the dir
+ * deletes for directory foo.
*/
- if (min_type == 0 && root == inode->root)
- btrfs_kill_delayed_inode_items(inode);
-
- key.objectid = ino;
- key.offset = (u64)-1;
- key.type = (u8)-1;
+ if (inode->last_unlink_trans >= trans->transid)
+ btrfs_record_snapshot_destroy(trans, dir);
-search_again:
- /*
- * with a 16K leaf size and 128MB extents, you can actually queue
- * up a huge file in a single leaf. Most of the time that
- * bytes_deleted is > 0, it will be huge by the time we get here
- */
- if (be_nice && bytes_deleted > SZ_32M &&
- btrfs_should_end_transaction(trans)) {
- ret = -EAGAIN;
+ if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
+ ret = btrfs_unlink_subvol(trans, dir, dentry);
goto out;
}
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0)
+ ret = btrfs_orphan_add(trans, inode);
+ if (ret)
goto out;
- if (ret > 0) {
- ret = 0;
- /* there are no items in the tree for us to truncate, we're
- * done
- */
- if (path->slots[0] == 0)
- goto out;
- path->slots[0]--;
- }
-
- while (1) {
- u64 clear_start = 0, clear_len = 0;
-
- fi = NULL;
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- found_type = found_key.type;
-
- if (found_key.objectid != ino)
- break;
-
- if (found_type < min_type)
- break;
+ /* now the directory is empty */
+ ret = btrfs_unlink_inode(trans, dir, inode, &fname.disk_name);
+ if (!ret)
+ btrfs_i_size_write(inode, 0);
+out:
+ btrfs_end_transaction(trans);
+out_notrans:
+ btrfs_btree_balance_dirty(fs_info);
+ fscrypt_free_filename(&fname);
- item_end = found_key.offset;
- if (found_type == BTRFS_EXTENT_DATA_KEY) {
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- extent_type = btrfs_file_extent_type(leaf, fi);
- if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
- item_end +=
- btrfs_file_extent_num_bytes(leaf, fi);
-
- trace_btrfs_truncate_show_fi_regular(
- inode, leaf, fi, found_key.offset);
- } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- item_end += btrfs_file_extent_ram_bytes(leaf,
- fi);
-
- trace_btrfs_truncate_show_fi_inline(
- inode, leaf, fi, path->slots[0],
- found_key.offset);
- }
- item_end--;
- }
- if (found_type > min_type) {
- del_item = 1;
- } else {
- if (item_end < new_size)
- break;
- if (found_key.offset >= new_size)
- del_item = 1;
- else
- del_item = 0;
- }
- found_extent = 0;
- /* FIXME, shrink the extent if the ref count is only 1 */
- if (found_type != BTRFS_EXTENT_DATA_KEY)
- goto delete;
-
- if (extents_found != NULL)
- (*extents_found)++;
-
- if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
- u64 num_dec;
-
- clear_start = found_key.offset;
- extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
- if (!del_item) {
- u64 orig_num_bytes =
- btrfs_file_extent_num_bytes(leaf, fi);
- extent_num_bytes = ALIGN(new_size -
- found_key.offset,
- fs_info->sectorsize);
- clear_start = ALIGN(new_size, fs_info->sectorsize);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_num_bytes);
- num_dec = (orig_num_bytes -
- extent_num_bytes);
- if (test_bit(BTRFS_ROOT_SHAREABLE,
- &root->state) &&
- extent_start != 0)
- inode_sub_bytes(&inode->vfs_inode,
- num_dec);
- btrfs_mark_buffer_dirty(leaf);
- } else {
- extent_num_bytes =
- btrfs_file_extent_disk_num_bytes(leaf,
- fi);
- extent_offset = found_key.offset -
- btrfs_file_extent_offset(leaf, fi);
-
- /* FIXME blocksize != 4096 */
- num_dec = btrfs_file_extent_num_bytes(leaf, fi);
- if (extent_start != 0) {
- found_extent = 1;
- if (test_bit(BTRFS_ROOT_SHAREABLE,
- &root->state))
- inode_sub_bytes(&inode->vfs_inode,
- num_dec);
- }
- }
- clear_len = num_dec;
- } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- /*
- * we can't truncate inline items that have had
- * special encodings
- */
- if (!del_item &&
- btrfs_file_extent_encryption(leaf, fi) == 0 &&
- btrfs_file_extent_other_encoding(leaf, fi) == 0 &&
- btrfs_file_extent_compression(leaf, fi) == 0) {
- u32 size = (u32)(new_size - found_key.offset);
-
- btrfs_set_file_extent_ram_bytes(leaf, fi, size);
- size = btrfs_file_extent_calc_inline_size(size);
- btrfs_truncate_item(path, size, 1);
- } else if (!del_item) {
- /*
- * We have to bail so the last_size is set to
- * just before this extent.
- */
- ret = NEED_TRUNCATE_BLOCK;
- break;
- } else {
- /*
- * Inline extents are special, we just treat
- * them as a full sector worth in the file
- * extent tree just for simplicity sake.
- */
- clear_len = fs_info->sectorsize;
- }
+ return ret;
+}
- if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
- inode_sub_bytes(&inode->vfs_inode,
- item_end + 1 - new_size);
- }
-delete:
- /*
- * We use btrfs_truncate_inode_items() to clean up log trees for
- * multiple fsyncs, and in this case we don't want to clear the
- * file extent range because it's just the log.
- */
- if (root == inode->root) {
- ret = btrfs_inode_clear_file_extent_range(inode,
- clear_start, clear_len);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- break;
- }
- }
+static bool is_inside_block(u64 bytenr, u64 blockstart, u32 blocksize)
+{
+ ASSERT(IS_ALIGNED(blockstart, blocksize), "blockstart=%llu blocksize=%u",
+ blockstart, blocksize);
- if (del_item)
- last_size = found_key.offset;
- else
- last_size = new_size;
- if (del_item) {
- if (!pending_del_nr) {
- /* no pending yet, add ourselves */
- pending_del_slot = path->slots[0];
- pending_del_nr = 1;
- } else if (pending_del_nr &&
- path->slots[0] + 1 == pending_del_slot) {
- /* hop on the pending chunk */
- pending_del_nr++;
- pending_del_slot = path->slots[0];
- } else {
- BUG();
- }
- } else {
- break;
- }
- should_throttle = false;
+ if (blockstart <= bytenr && bytenr <= blockstart + blocksize - 1)
+ return true;
+ return false;
+}
- if (found_extent &&
- root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
- struct btrfs_ref ref = { 0 };
+static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start)
+{
+ const pgoff_t index = (start >> PAGE_SHIFT);
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ struct folio *folio;
+ u64 zero_start;
+ u64 zero_end;
+ int ret = 0;
- bytes_deleted += extent_num_bytes;
+again:
+ folio = filemap_lock_folio(mapping, index);
+ /* No folio present. */
+ if (IS_ERR(folio))
+ return 0;
- btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
- extent_start, extent_num_bytes, 0);
- btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
- ino, extent_offset,
- root->root_key.objectid, false);
- ret = btrfs_free_extent(trans, &ref);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- break;
- }
- if (be_nice) {
- if (btrfs_should_throttle_delayed_refs(trans))
- should_throttle = true;
- }
+ if (!folio_test_uptodate(folio)) {
+ ret = btrfs_read_folio(NULL, folio);
+ folio_lock(folio);
+ if (folio->mapping != mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
}
-
- if (found_type == BTRFS_INODE_ITEM_KEY)
- break;
-
- if (path->slots[0] == 0 ||
- path->slots[0] != pending_del_slot ||
- should_throttle) {
- if (pending_del_nr) {
- ret = btrfs_del_items(trans, root, path,
- pending_del_slot,
- pending_del_nr);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- break;
- }
- pending_del_nr = 0;
- }
- btrfs_release_path(path);
-
- /*
- * We can generate a lot of delayed refs, so we need to
- * throttle every once and a while and make sure we're
- * adding enough space to keep up with the work we are
- * generating. Since we hold a transaction here we
- * can't flush, and we don't want to FLUSH_LIMIT because
- * we could have generated too many delayed refs to
- * actually allocate, so just bail if we're short and
- * let the normal reservation dance happen higher up.
- */
- if (should_throttle) {
- ret = btrfs_delayed_refs_rsv_refill(fs_info,
- BTRFS_RESERVE_NO_FLUSH);
- if (ret) {
- ret = -EAGAIN;
- break;
- }
- }
- goto search_again;
- } else {
- path->slots[0]--;
+ if (unlikely(!folio_test_uptodate(folio))) {
+ ret = -EIO;
+ goto out_unlock;
}
}
-out:
- if (ret >= 0 && pending_del_nr) {
- int err;
+ folio_wait_writeback(folio);
- err = btrfs_del_items(trans, root, path, pending_del_slot,
- pending_del_nr);
- if (err) {
- btrfs_abort_transaction(trans, err);
- ret = err;
- }
- }
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
- ASSERT(last_size >= new_size);
- if (!ret && last_size > new_size)
- last_size = new_size;
- btrfs_inode_safe_disk_i_size_write(inode, last_size);
- unlock_extent_cached(&inode->io_tree, lock_start, (u64)-1,
- &cached_state);
- }
+ /*
+ * We do not need to lock extents nor wait for OE, as it's already
+ * beyond EOF.
+ */
- btrfs_free_path(path);
+ zero_start = max_t(u64, folio_pos(folio), start);
+ zero_end = folio_next_pos(folio);
+ folio_zero_range(folio, zero_start - folio_pos(folio),
+ zero_end - zero_start);
+
+out_unlock:
+ folio_unlock(folio);
+ folio_put(folio);
return ret;
}
/*
- * btrfs_truncate_block - read, zero a chunk and write a block
- * @inode - inode that we're zeroing
- * @from - the offset to start zeroing
- * @len - the length to zero, 0 to zero the entire range respective to the
- * offset
- * @front - zero up to the offset instead of from the offset on
+ * Handle the truncation of a fs block.
+ *
+ * @inode - inode that we're zeroing
+ * @offset - the file offset of the block to truncate
+ * The value must be inside [@start, @end], and the function will do
+ * extra checks if the block that covers @offset needs to be zeroed.
+ * @start - the start file offset of the range we want to zero
+ * @end - the end (inclusive) file offset of the range we want to zero.
*
- * This will find the block for the "from" offset and cow the block and zero the
- * part we want to zero. This is used with truncate and hole punching.
+ * If the range is not block aligned, read out the folio that covers @offset,
+ * and if needed zero blocks that are inside the folio and covered by [@start, @end).
+ * If @start or @end + 1 lands inside a block, that block will be marked dirty
+ * for writeback.
+ *
+ * This is utilized by hole punch, zero range, file expansion.
*/
-int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
- int front)
+int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct address_space *mapping = inode->vfs_inode.i_mapping;
@@ -5019,33 +4943,72 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
struct extent_changeset *data_reserved = NULL;
bool only_release_metadata = false;
u32 blocksize = fs_info->sectorsize;
- pgoff_t index = from >> PAGE_SHIFT;
- unsigned offset = from & (blocksize - 1);
- struct page *page;
+ pgoff_t index = (offset >> PAGE_SHIFT);
+ struct folio *folio;
gfp_t mask = btrfs_alloc_write_mask(mapping);
- size_t write_bytes = blocksize;
int ret = 0;
+ const bool in_head_block = is_inside_block(offset, round_down(start, blocksize),
+ blocksize);
+ const bool in_tail_block = is_inside_block(offset, round_down(end, blocksize),
+ blocksize);
+ bool need_truncate_head = false;
+ bool need_truncate_tail = false;
+ u64 zero_start;
+ u64 zero_end;
u64 block_start;
u64 block_end;
- if (IS_ALIGNED(offset, blocksize) &&
- (!len || IS_ALIGNED(len, blocksize)))
+ /* @offset should be inside the range. */
+ ASSERT(start <= offset && offset <= end, "offset=%llu start=%llu end=%llu",
+ offset, start, end);
+
+ /* The range is aligned at both ends. */
+ if (IS_ALIGNED(start, blocksize) && IS_ALIGNED(end + 1, blocksize)) {
+ /*
+ * For block size < page size case, we may have polluted blocks
+ * beyond EOF. So we also need to zero them out.
+ */
+ if (end == (u64)-1 && blocksize < PAGE_SIZE)
+ ret = truncate_block_zero_beyond_eof(inode, start);
+ goto out;
+ }
+
+ /*
+ * @offset may not be inside the head nor tail block. In that case we
+ * don't need to do anything.
+ */
+ if (!in_head_block && !in_tail_block)
+ goto out;
+
+ /*
+ * Skip the truncation if the range in the target block is already aligned.
+ * The seemingly complex check will also handle the same block case.
+ */
+ if (in_head_block && !IS_ALIGNED(start, blocksize))
+ need_truncate_head = true;
+ if (in_tail_block && !IS_ALIGNED(end + 1, blocksize))
+ need_truncate_tail = true;
+ if (!need_truncate_head && !need_truncate_tail)
goto out;
- block_start = round_down(from, blocksize);
+ block_start = round_down(offset, blocksize);
block_end = block_start + blocksize - 1;
ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
- blocksize);
+ blocksize, false);
if (ret < 0) {
- if (btrfs_check_nocow_lock(inode, block_start, &write_bytes) > 0) {
- /* For nocow case, no need to reserve data space */
+ size_t write_bytes = blocksize;
+
+ if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
+ /* For nocow case, no need to reserve data space. */
+ ASSERT(write_bytes == blocksize, "write_bytes=%zu blocksize=%u",
+ write_bytes, blocksize);
only_release_metadata = true;
} else {
goto out;
}
}
- ret = btrfs_delalloc_reserve_metadata(inode, blocksize);
+ ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
if (ret < 0) {
if (!only_release_metadata)
btrfs_free_reserved_data_space(inode, data_reserved,
@@ -5053,77 +5016,97 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
goto out;
}
again:
- page = find_or_create_page(mapping, index, mask);
- if (!page) {
- btrfs_delalloc_release_space(inode, data_reserved, block_start,
- blocksize, true);
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
+ if (IS_ERR(folio)) {
+ if (only_release_metadata)
+ btrfs_delalloc_release_metadata(inode, blocksize, true);
+ else
+ btrfs_delalloc_release_space(inode, data_reserved,
+ block_start, blocksize, true);
btrfs_delalloc_release_extents(inode, blocksize);
- ret = -ENOMEM;
+ ret = PTR_ERR(folio);
goto out;
}
- ret = set_page_extent_mapped(page);
- if (ret < 0)
- goto out_unlock;
- if (!PageUptodate(page)) {
- ret = btrfs_readpage(NULL, page);
- lock_page(page);
- if (page->mapping != mapping) {
- unlock_page(page);
- put_page(page);
+ if (!folio_test_uptodate(folio)) {
+ ret = btrfs_read_folio(NULL, folio);
+ folio_lock(folio);
+ if (folio->mapping != mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
goto again;
}
- if (!PageUptodate(page)) {
+ if (unlikely(!folio_test_uptodate(folio))) {
ret = -EIO;
goto out_unlock;
}
}
- wait_on_page_writeback(page);
- lock_extent_bits(io_tree, block_start, block_end, &cached_state);
+ /*
+ * We unlock the page after the io is completed and then re-lock it
+ * above. release_folio() could have come in between that and cleared
+ * folio private, but left the page in the mapping. Set the page mapped
+ * here to make sure it's properly set for the subpage stuff.
+ */
+ ret = set_folio_extent_mapped(folio);
+ if (ret < 0)
+ goto out_unlock;
+
+ folio_wait_writeback(folio);
+
+ btrfs_lock_extent(io_tree, block_start, block_end, &cached_state);
ordered = btrfs_lookup_ordered_extent(inode, block_start);
if (ordered) {
- unlock_extent_cached(io_tree, block_start, block_end,
- &cached_state);
- unlock_page(page);
- put_page(page);
- btrfs_start_ordered_extent(ordered, 1);
+ btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
+ folio_unlock(folio);
+ folio_put(folio);
+ btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
goto again;
}
- clear_extent_bit(&inode->io_tree, block_start, block_end,
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, &cached_state);
+ btrfs_clear_extent_bit(&inode->io_tree, block_start, block_end,
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+ &cached_state);
ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
&cached_state);
if (ret) {
- unlock_extent_cached(io_tree, block_start, block_end,
- &cached_state);
+ btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
goto out_unlock;
}
- if (offset != blocksize) {
- if (!len)
- len = blocksize - offset;
- if (front)
- memzero_page(page, (block_start - page_offset(page)),
- offset);
- else
- memzero_page(page, (block_start - page_offset(page)) + offset,
- len);
- flush_dcache_page(page);
+ if (end == (u64)-1) {
+ /*
+ * We're truncating beyond EOF, the remaining blocks normally are
+ * already holes thus no need to zero again, but it's possible for
+ * fs block size < page size cases to have memory mapped writes
+ * to pollute ranges beyond EOF.
+ *
+ * In that case although such polluted blocks beyond EOF will
+ * not reach disk, it still affects our page caches.
+ */
+ zero_start = max_t(u64, folio_pos(folio), start);
+ zero_end = min_t(u64, folio_next_pos(folio) - 1, end);
+ } else {
+ zero_start = max_t(u64, block_start, start);
+ zero_end = min_t(u64, block_end, end);
}
- btrfs_page_clear_checked(fs_info, page, block_start,
- block_end + 1 - block_start);
- btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start);
- unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
+ folio_zero_range(folio, zero_start - folio_pos(folio),
+ zero_end - zero_start + 1);
+
+ btrfs_folio_clear_checked(fs_info, folio, block_start,
+ block_end + 1 - block_start);
+ btrfs_folio_set_dirty(fs_info, folio, block_start,
+ block_end + 1 - block_start);
if (only_release_metadata)
- set_extent_bit(&inode->io_tree, block_start, block_end,
- EXTENT_NORESERVE, 0, NULL, NULL, GFP_NOFS, NULL);
+ btrfs_set_extent_bit(&inode->io_tree, block_start, block_end,
+ EXTENT_NORESERVE, &cached_state);
+
+ btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
out_unlock:
if (ret) {
@@ -5134,8 +5117,8 @@ out_unlock:
block_start, blocksize, true);
}
btrfs_delalloc_release_extents(inode, blocksize);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
out:
if (only_release_metadata)
btrfs_check_nocow_unlock(inode);
@@ -5143,9 +5126,9 @@ out:
return ret;
}
-static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
- u64 offset, u64 len)
+static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_drop_extents_args drop_args = { 0 };
@@ -5174,19 +5157,18 @@ static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
drop_args.drop_cache = true;
ret = btrfs_drop_extents(trans, root, inode, &drop_args);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
return ret;
}
- ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
- offset, 0, 0, len, 0, len, 0, 0, 0);
+ ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
if (ret) {
btrfs_abort_transaction(trans, ret);
} else {
btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
- btrfs_update_inode(trans, root, inode);
+ btrfs_update_inode(trans, inode);
}
btrfs_end_transaction(trans);
return ret;
@@ -5205,22 +5187,21 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
struct extent_io_tree *io_tree = &inode->io_tree;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
- struct extent_map_tree *em_tree = &inode->extent_tree;
u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
u64 block_end = ALIGN(size, fs_info->sectorsize);
u64 last_byte;
u64 cur_offset;
u64 hole_size;
- int err = 0;
+ int ret = 0;
/*
* If our size started in the middle of a block we need to zero out the
* rest of the block before we expand the i_size, otherwise we could
* expose stale data.
*/
- err = btrfs_truncate_block(inode, oldsize, 0, 0);
- if (err)
- return err;
+ ret = btrfs_truncate_block(inode, oldsize, oldsize, -1);
+ if (ret)
+ return ret;
if (size <= hole_start)
return 0;
@@ -5229,76 +5210,62 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
&cached_state);
cur_offset = hole_start;
while (1) {
- em = btrfs_get_extent(inode, NULL, 0, cur_offset,
- block_end - cur_offset);
+ em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset);
if (IS_ERR(em)) {
- err = PTR_ERR(em);
+ ret = PTR_ERR(em);
em = NULL;
break;
}
- last_byte = min(extent_map_end(em), block_end);
+ last_byte = min(btrfs_extent_map_end(em), block_end);
last_byte = ALIGN(last_byte, fs_info->sectorsize);
hole_size = last_byte - cur_offset;
- if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
+ if (!(em->flags & EXTENT_FLAG_PREALLOC)) {
struct extent_map *hole_em;
- err = maybe_insert_hole(root, inode, cur_offset,
- hole_size);
- if (err)
+ ret = maybe_insert_hole(inode, cur_offset, hole_size);
+ if (ret)
break;
- err = btrfs_inode_set_file_extent_range(inode,
+ ret = btrfs_inode_set_file_extent_range(inode,
cur_offset, hole_size);
- if (err)
+ if (ret)
break;
- btrfs_drop_extent_cache(inode, cur_offset,
- cur_offset + hole_size - 1, 0);
- hole_em = alloc_extent_map();
+ hole_em = btrfs_alloc_extent_map();
if (!hole_em) {
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &inode->runtime_flags);
+ btrfs_drop_extent_map_range(inode, cur_offset,
+ cur_offset + hole_size - 1,
+ false);
+ btrfs_set_inode_full_sync(inode);
goto next;
}
hole_em->start = cur_offset;
hole_em->len = hole_size;
- hole_em->orig_start = cur_offset;
- hole_em->block_start = EXTENT_MAP_HOLE;
- hole_em->block_len = 0;
- hole_em->orig_block_len = 0;
+ hole_em->disk_bytenr = EXTENT_MAP_HOLE;
+ hole_em->disk_num_bytes = 0;
hole_em->ram_bytes = hole_size;
- hole_em->compress_type = BTRFS_COMPRESS_NONE;
- hole_em->generation = fs_info->generation;
-
- while (1) {
- write_lock(&em_tree->lock);
- err = add_extent_mapping(em_tree, hole_em, 1);
- write_unlock(&em_tree->lock);
- if (err != -EEXIST)
- break;
- btrfs_drop_extent_cache(inode, cur_offset,
- cur_offset +
- hole_size - 1, 0);
- }
- free_extent_map(hole_em);
+ hole_em->generation = btrfs_get_fs_generation(fs_info);
+
+ ret = btrfs_replace_extent_map_range(inode, hole_em, true);
+ btrfs_free_extent_map(hole_em);
} else {
- err = btrfs_inode_set_file_extent_range(inode,
+ ret = btrfs_inode_set_file_extent_range(inode,
cur_offset, hole_size);
- if (err)
+ if (ret)
break;
}
next:
- free_extent_map(em);
+ btrfs_free_extent_map(em);
em = NULL;
cur_offset = last_byte;
if (cur_offset >= block_end)
break;
}
- free_extent_map(em);
- unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state);
- return err;
+ btrfs_free_extent_map(em);
+ btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
+ return ret;
}
static int btrfs_setsize(struct inode *inode, struct iattr *attr)
@@ -5318,9 +5285,10 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
*/
if (newsize != oldsize) {
inode_inc_iversion(inode);
- if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
- inode->i_ctime = inode->i_mtime =
- current_time(inode);
+ if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
+ }
}
if (newsize > oldsize) {
@@ -5347,14 +5315,14 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
i_size_write(inode, newsize);
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
pagecache_isize_extended(inode, oldsize, newsize);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
btrfs_drew_write_unlock(&root->snapshot_lock);
btrfs_end_transaction(trans);
} else {
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
if (btrfs_is_zoned(fs_info)) {
- ret = btrfs_wait_ordered_range(inode,
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode),
ALIGN(newsize, fs_info->sectorsize),
(u64)-1);
if (ret)
@@ -5374,9 +5342,9 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
inode_dio_wait(inode);
- ret = btrfs_truncate(inode, newsize == oldsize);
+ ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
if (ret && inode->i_nlink) {
- int err;
+ int ret2;
/*
* Truncate failed, so fix up the in-memory size. We
@@ -5384,9 +5352,9 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
* wait for disk_i_size to be stable and then update the
* in-memory size to match.
*/
- err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
- if (err)
- return err;
+ ret2 = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
+ if (ret2)
+ return ret2;
i_size_write(inode, BTRFS_I(inode)->disk_i_size);
}
}
@@ -5394,76 +5362,60 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
return ret;
}
-static int btrfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
- int err;
+ int ret;
if (btrfs_root_readonly(root))
return -EROFS;
- err = setattr_prepare(mnt_userns, dentry, attr);
- if (err)
- return err;
+ ret = setattr_prepare(idmap, dentry, attr);
+ if (ret)
+ return ret;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
- err = btrfs_setsize(inode, attr);
- if (err)
- return err;
+ ret = btrfs_setsize(inode, attr);
+ if (ret)
+ return ret;
}
if (attr->ia_valid) {
- setattr_copy(mnt_userns, inode, attr);
+ setattr_copy(idmap, inode, attr);
inode_inc_iversion(inode);
- err = btrfs_dirty_inode(inode);
+ ret = btrfs_dirty_inode(BTRFS_I(inode));
- if (!err && attr->ia_valid & ATTR_MODE)
- err = posix_acl_chmod(mnt_userns, inode, inode->i_mode);
+ if (!ret && attr->ia_valid & ATTR_MODE)
+ ret = posix_acl_chmod(idmap, dentry, inode->i_mode);
}
- return err;
+ return ret;
}
/*
- * While truncating the inode pages during eviction, we get the VFS calling
- * btrfs_invalidatepage() against each page of the inode. This is slow because
- * the calls to btrfs_invalidatepage() result in a huge amount of calls to
- * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
- * extent_state structures over and over, wasting lots of time.
+ * While truncating the inode pages during eviction, we get the VFS
+ * calling btrfs_invalidate_folio() against each folio of the inode. This
+ * is slow because the calls to btrfs_invalidate_folio() result in a
+ * huge amount of calls to lock_extent() and clear_extent_bit(),
+ * which keep merging and splitting extent_state structures over and over,
+ * wasting lots of time.
*
- * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
- * those expensive operations on a per page basis and do only the ordered io
- * finishing, while we release here the extent_map and extent_state structures,
- * without the excessive merging and splitting.
+ * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
+ * skip all those expensive operations on a per folio basis and do only
+ * the ordered io finishing, while we release here the extent_map and
+ * extent_state structures, without the excessive merging and splitting.
*/
static void evict_inode_truncate_pages(struct inode *inode)
{
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
struct rb_node *node;
- ASSERT(inode->i_state & I_FREEING);
+ ASSERT(inode_state_read_once(inode) & I_FREEING);
truncate_inode_pages_final(&inode->i_data);
- write_lock(&map_tree->lock);
- while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) {
- struct extent_map *em;
-
- node = rb_first_cached(&map_tree->map);
- em = rb_entry(node, struct extent_map, rb_node);
- clear_bit(EXTENT_FLAG_PINNED, &em->flags);
- clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
- remove_extent_mapping(map_tree, em);
- free_extent_map(em);
- if (need_resched()) {
- write_unlock(&map_tree->lock);
- cond_resched();
- write_lock(&map_tree->lock);
- }
- }
- write_unlock(&map_tree->lock);
+ btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
/*
* Keep looping until we have no more ranges in the io tree.
@@ -5496,24 +5448,23 @@ static void evict_inode_truncate_pages(struct inode *inode)
state_flags = state->state;
spin_unlock(&io_tree->lock);
- lock_extent_bits(io_tree, start, end, &cached_state);
+ btrfs_lock_extent(io_tree, start, end, &cached_state);
/*
* If still has DELALLOC flag, the extent didn't reach disk,
* and its reserved space won't be freed by delayed_ref.
* So we need to free its reserved space here.
- * (Refer to comment in btrfs_invalidatepage, case 2)
+ * (Refer to comment in btrfs_invalidate_folio, case 2)
*
* Note, end is the bytenr of last byte, so we need + 1 here.
*/
if (state_flags & EXTENT_DELALLOC)
btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
- end - start + 1);
+ end - start + 1, NULL);
- clear_extent_bit(io_tree, start, end,
- EXTENT_LOCKED | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
- &cached_state);
+ btrfs_clear_extent_bit(io_tree, start, end,
+ EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
+ &cached_state);
cond_resched();
spin_lock(&io_tree->lock);
@@ -5525,9 +5476,8 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
struct btrfs_block_rsv *rsv)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
struct btrfs_trans_handle *trans;
- u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1);
+ u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1);
int ret;
/*
@@ -5540,18 +5490,16 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
* above. We reserve our extra bit here because we generate a ton of
* delayed refs activity by truncating.
*
- * If we cannot make our reservation we'll attempt to steal from the
- * global reserve, because we really want to be able to free up space.
+ * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
+ * if we fail to make this reservation we can re-try without the
+ * delayed_refs_extra so we can make some forward progress.
*/
- ret = btrfs_block_rsv_refill(root, rsv, rsv->size + delayed_refs_extra,
+ ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
BTRFS_RESERVE_FLUSH_EVICT);
if (ret) {
- /*
- * Try to steal from the global reserve if there is space for
- * it.
- */
- if (btrfs_check_space_for_delayed_refs(fs_info) ||
- btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0)) {
+ ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
+ BTRFS_RESERVE_FLUSH_EVICT);
+ if (ret) {
btrfs_warn(fs_info,
"could not allocate space for delete; will truncate on mount");
return ERR_PTR(-ENOSPC);
@@ -5567,17 +5515,17 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
trans->block_rsv = &fs_info->trans_block_rsv;
trans->bytes_reserved = delayed_refs_extra;
btrfs_block_rsv_migrate(rsv, trans->block_rsv,
- delayed_refs_extra, 1);
+ delayed_refs_extra, true);
}
return trans;
}
void btrfs_evict_inode(struct inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_block_rsv *rsv;
+ struct btrfs_block_rsv rsv;
int ret;
trace_btrfs_inode_evict(inode);
@@ -5588,54 +5536,74 @@ void btrfs_evict_inode(struct inode *inode)
return;
}
+ fs_info = inode_to_fs_info(inode);
evict_inode_truncate_pages(inode);
if (inode->i_nlink &&
((btrfs_root_refs(&root->root_item) != 0 &&
- root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
+ btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) ||
btrfs_is_free_space_inode(BTRFS_I(inode))))
- goto no_delete;
+ goto out;
if (is_bad_inode(inode))
- goto no_delete;
-
- btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
+ goto out;
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
- goto no_delete;
+ goto out;
if (inode->i_nlink > 0) {
BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
- root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
- goto no_delete;
+ btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID);
+ goto out;
}
+ /*
+ * This makes sure the inode item in tree is uptodate and the space for
+ * the inode update is released.
+ */
ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
if (ret)
- goto no_delete;
+ goto out;
+
+ /*
+ * This drops any pending insert or delete operations we have for this
+ * inode. We could have a delayed dir index deletion queued up, but
+ * we're removing the inode completely so that'll be taken care of in
+ * the truncate.
+ */
+ btrfs_kill_delayed_inode_items(BTRFS_I(inode));
- rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
- if (!rsv)
- goto no_delete;
- rsv->size = btrfs_calc_metadata_size(fs_info, 1);
- rsv->failfast = 1;
+ btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
+ rsv.size = btrfs_calc_metadata_size(fs_info, 1);
+ rsv.failfast = true;
btrfs_i_size_write(BTRFS_I(inode), 0);
while (1) {
- trans = evict_refill_and_join(root, rsv);
+ struct btrfs_truncate_control control = {
+ .inode = BTRFS_I(inode),
+ .ino = btrfs_ino(BTRFS_I(inode)),
+ .new_size = 0,
+ .min_type = 0,
+ };
+
+ trans = evict_refill_and_join(root, &rsv);
if (IS_ERR(trans))
- goto free_rsv;
+ goto out_release;
- trans->block_rsv = rsv;
+ trans->block_rsv = &rsv;
- ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
- 0, 0, NULL);
+ ret = btrfs_truncate_inode_items(trans, root, &control);
trans->block_rsv = &fs_info->trans_block_rsv;
btrfs_end_transaction(trans);
- btrfs_btree_balance_dirty(fs_info);
+ /*
+ * We have not added new delayed items for our inode after we
+ * have flushed its delayed items, so no need to throttle on
+ * delayed items. However we have modified extent buffers.
+ */
+ btrfs_btree_balance_dirty_nodelay(fs_info);
if (ret && ret != -ENOSPC && ret != -EAGAIN)
- goto free_rsv;
+ goto out_release;
else if (!ret)
break;
}
@@ -5649,17 +5617,17 @@ void btrfs_evict_inode(struct inode *inode)
* If it turns out that we are dropping too many of these, we might want
* to add a mechanism for retrying these after a commit.
*/
- trans = evict_refill_and_join(root, rsv);
+ trans = evict_refill_and_join(root, &rsv);
if (!IS_ERR(trans)) {
- trans->block_rsv = rsv;
+ trans->block_rsv = &rsv;
btrfs_orphan_del(trans, BTRFS_I(inode));
trans->block_rsv = &fs_info->trans_block_rsv;
btrfs_end_transaction(trans);
}
-free_rsv:
- btrfs_free_block_rsv(fs_info, rsv);
-no_delete:
+out_release:
+ btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
+out:
/*
* If we didn't successfully delete, the orphan item will still be in
* the tree and we'll retry on the next mount. Again, we might also want
@@ -5677,40 +5645,50 @@ no_delete:
* If no dir entries were found, returns -ENOENT.
* If found a corrupted location in dir entry, returns -EUCLEAN.
*/
-static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
+static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
struct btrfs_key *location, u8 *type)
{
- const char *name = dentry->d_name.name;
- int namelen = dentry->d_name.len;
struct btrfs_dir_item *di;
- struct btrfs_path *path;
- struct btrfs_root *root = BTRFS_I(dir)->root;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct btrfs_root *root = dir->root;
int ret = 0;
+ struct fscrypt_name fname;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
- name, namelen, 0);
+ ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
+ if (ret < 0)
+ return ret;
+ /*
+ * fscrypt_setup_filename() should never return a positive value, but
+ * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
+ */
+ ASSERT(ret == 0);
+
+ /* This needs to handle no-key deletions later on */
+
+ di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir),
+ &fname.disk_name, 0);
if (IS_ERR_OR_NULL(di)) {
ret = di ? PTR_ERR(di) : -ENOENT;
goto out;
}
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
- if (location->type != BTRFS_INODE_ITEM_KEY &&
- location->type != BTRFS_ROOT_ITEM_KEY) {
+ if (unlikely(location->type != BTRFS_INODE_ITEM_KEY &&
+ location->type != BTRFS_ROOT_ITEM_KEY)) {
ret = -EUCLEAN;
btrfs_warn(root->fs_info,
-"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
- __func__, name, btrfs_ino(BTRFS_I(dir)),
- location->objectid, location->type, location->offset);
+"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location " BTRFS_KEY_FMT ")",
+ __func__, fname.disk_name.name, btrfs_ino(dir),
+ BTRFS_KEY_FMT_VALUE(location));
}
if (!ret)
- *type = btrfs_dir_type(path->nodes[0], di);
+ *type = btrfs_dir_ftype(path->nodes[0], di);
out:
- btrfs_free_path(path);
+ fscrypt_free_filename(&fname);
return ret;
}
@@ -5720,18 +5698,23 @@ out:
* is kind of like crossing a mount point.
*/
static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
- struct inode *dir,
+ struct btrfs_inode *dir,
struct dentry *dentry,
struct btrfs_key *location,
struct btrfs_root **sub_root)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *new_root;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
struct btrfs_key key;
int ret;
int err = 0;
+ struct fscrypt_name fname;
+
+ ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname);
+ if (ret)
+ return ret;
path = btrfs_alloc_path();
if (!path) {
@@ -5740,7 +5723,7 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
}
err = -ENOENT;
- key.objectid = BTRFS_I(dir)->root->root_key.objectid;
+ key.objectid = btrfs_root_id(dir->root);
key.type = BTRFS_ROOT_REF_KEY;
key.offset = location->objectid;
@@ -5753,13 +5736,12 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
- if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
- btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
+ if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
+ btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
goto out;
- ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
- (unsigned long)(ref + 1),
- dentry->d_name.len);
+ ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
+ (unsigned long)(ref + 1), fname.disk_name.len);
if (ret)
goto out;
@@ -5777,63 +5759,38 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
location->offset = 0;
err = 0;
out:
- btrfs_free_path(path);
+ fscrypt_free_filename(&fname);
return err;
}
-static void inode_tree_add(struct inode *inode)
-{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_inode *entry;
- struct rb_node **p;
- struct rb_node *parent;
- struct rb_node *new = &BTRFS_I(inode)->rb_node;
- u64 ino = btrfs_ino(BTRFS_I(inode));
- if (inode_unhashed(inode))
- return;
- parent = NULL;
- spin_lock(&root->inode_lock);
- p = &root->inode_tree.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct btrfs_inode, rb_node);
-
- if (ino < btrfs_ino(entry))
- p = &parent->rb_left;
- else if (ino > btrfs_ino(entry))
- p = &parent->rb_right;
- else {
- WARN_ON(!(entry->vfs_inode.i_state &
- (I_WILL_FREE | I_FREEING)));
- rb_replace_node(parent, new, &root->inode_tree);
- RB_CLEAR_NODE(parent);
- spin_unlock(&root->inode_lock);
- return;
- }
- }
- rb_link_node(new, parent, p);
- rb_insert_color(new, &root->inode_tree);
- spin_unlock(&root->inode_lock);
-}
-static void inode_tree_del(struct btrfs_inode *inode)
+static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
{
struct btrfs_root *root = inode->root;
- int empty = 0;
+ struct btrfs_inode *entry;
+ bool empty = false;
- spin_lock(&root->inode_lock);
- if (!RB_EMPTY_NODE(&inode->rb_node)) {
- rb_erase(&inode->rb_node, &root->inode_tree);
- RB_CLEAR_NODE(&inode->rb_node);
- empty = RB_EMPTY_ROOT(&root->inode_tree);
- }
- spin_unlock(&root->inode_lock);
+ xa_lock(&root->inodes);
+ /*
+ * This btrfs_inode is being freed and has already been unhashed at this
+ * point. It's possible that another btrfs_inode has already been
+ * allocated for the same inode and inserted itself into the root, so
+ * don't delete it in that case.
+ *
+ * Note that this shouldn't need to allocate memory, so the gfp flags
+ * don't really matter.
+ */
+ entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL,
+ GFP_ATOMIC);
+ if (entry == inode)
+ empty = xa_empty(&root->inodes);
+ xa_unlock(&root->inodes);
if (empty && btrfs_root_refs(&root->root_item) == 0) {
- spin_lock(&root->inode_lock);
- empty = RB_EMPTY_ROOT(&root->inode_tree);
- spin_unlock(&root->inode_lock);
+ xa_lock(&root->inodes);
+ empty = xa_empty(&root->inodes);
+ xa_unlock(&root->inodes);
if (empty)
btrfs_add_dead_root(root);
}
@@ -5844,12 +5801,13 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
{
struct btrfs_iget_args *args = p;
- inode->i_ino = args->ino;
- BTRFS_I(inode)->location.objectid = args->ino;
- BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
- BTRFS_I(inode)->location.offset = 0;
+ btrfs_set_inode_number(BTRFS_I(inode), args->ino);
BTRFS_I(inode)->root = btrfs_grab_root(args->root);
- BUG_ON(args->root && !BTRFS_I(inode)->root);
+
+ if (args->root && args->root == args->root->fs_info->tree_root &&
+ args->ino != BTRFS_BTREE_INODE_OBJECTID)
+ set_bit(BTRFS_INODE_FREE_SPACE_INODE,
+ &BTRFS_I(inode)->runtime_flags);
return 0;
}
@@ -5857,12 +5815,11 @@ static int btrfs_find_actor(struct inode *inode, void *opaque)
{
struct btrfs_iget_args *args = opaque;
- return args->ino == BTRFS_I(inode)->location.objectid &&
+ return args->ino == btrfs_ino(BTRFS_I(inode)) &&
args->root == BTRFS_I(inode)->root;
}
-static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
- struct btrfs_root *root)
+static struct btrfs_inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
{
struct inode *inode;
struct btrfs_iget_args args;
@@ -5871,162 +5828,188 @@ static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
args.ino = ino;
args.root = root;
- inode = iget5_locked(s, hashval, btrfs_find_actor,
+ inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor,
btrfs_init_locked_inode,
(void *)&args);
- return inode;
+ if (!inode)
+ return NULL;
+ return BTRFS_I(inode);
}
/*
- * Get an inode object given its inode number and corresponding root.
- * Path can be preallocated to prevent recursing back to iget through
- * allocator. NULL is also valid but may require an additional allocation
- * later.
+ * Get an inode object given its inode number and corresponding root. Path is
+ * preallocated to prevent recursing back to iget through allocator.
*/
-struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
- struct btrfs_root *root, struct btrfs_path *path)
+struct btrfs_inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
+ struct btrfs_path *path)
{
- struct inode *inode;
+ struct btrfs_inode *inode;
+ int ret;
- inode = btrfs_iget_locked(s, ino, root);
+ inode = btrfs_iget_locked(ino, root);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (inode->i_state & I_NEW) {
- int ret;
+ if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
+ return inode;
- ret = btrfs_read_locked_inode(inode, path);
- if (!ret) {
- inode_tree_add(inode);
- unlock_new_inode(inode);
- } else {
- iget_failed(inode);
- /*
- * ret > 0 can come from btrfs_search_slot called by
- * btrfs_read_locked_inode, this means the inode item
- * was not found.
- */
- if (ret > 0)
- ret = -ENOENT;
- inode = ERR_PTR(ret);
- }
- }
+ ret = btrfs_read_locked_inode(inode, path);
+ if (ret)
+ return ERR_PTR(ret);
+ unlock_new_inode(&inode->vfs_inode);
return inode;
}
-struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root)
+/*
+ * Get an inode object given its inode number and corresponding root.
+ */
+struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root)
{
- return btrfs_iget_path(s, ino, root, NULL);
+ struct btrfs_inode *inode;
+ struct btrfs_path *path;
+ int ret;
+
+ inode = btrfs_iget_locked(ino, root);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
+ return inode;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ iget_failed(&inode->vfs_inode);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = btrfs_read_locked_inode(inode, path);
+ btrfs_free_path(path);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (S_ISDIR(inode->vfs_inode.i_mode))
+ inode->vfs_inode.i_opflags |= IOP_FASTPERM_MAY_EXEC;
+ unlock_new_inode(&inode->vfs_inode);
+ return inode;
}
-static struct inode *new_simple_dir(struct super_block *s,
- struct btrfs_key *key,
- struct btrfs_root *root)
+static struct btrfs_inode *new_simple_dir(struct inode *dir,
+ struct btrfs_key *key,
+ struct btrfs_root *root)
{
- struct inode *inode = new_inode(s);
+ struct timespec64 ts;
+ struct inode *vfs_inode;
+ struct btrfs_inode *inode;
- if (!inode)
+ vfs_inode = new_inode(dir->i_sb);
+ if (!vfs_inode)
return ERR_PTR(-ENOMEM);
- BTRFS_I(inode)->root = btrfs_grab_root(root);
- memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
- set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
+ inode = BTRFS_I(vfs_inode);
+ inode->root = btrfs_grab_root(root);
+ inode->ref_root_id = key->objectid;
+ set_bit(BTRFS_INODE_ROOT_STUB, &inode->runtime_flags);
+ set_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags);
- inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
+ btrfs_set_inode_number(inode, BTRFS_EMPTY_SUBVOL_DIR_OBJECTID);
/*
* We only need lookup, the rest is read-only and there's no inode
* associated with the dentry
*/
- inode->i_op = &simple_dir_inode_operations;
- inode->i_opflags &= ~IOP_XATTR;
- inode->i_fop = &simple_dir_operations;
- inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
- inode->i_mtime = current_time(inode);
- inode->i_atime = inode->i_mtime;
- inode->i_ctime = inode->i_mtime;
- BTRFS_I(inode)->i_otime = inode->i_mtime;
+ vfs_inode->i_op = &simple_dir_inode_operations;
+ vfs_inode->i_opflags &= ~IOP_XATTR;
+ vfs_inode->i_fop = &simple_dir_operations;
+ vfs_inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
+
+ ts = inode_set_ctime_current(vfs_inode);
+ inode_set_mtime_to_ts(vfs_inode, ts);
+ inode_set_atime_to_ts(vfs_inode, inode_get_atime(dir));
+ inode->i_otime_sec = ts.tv_sec;
+ inode->i_otime_nsec = ts.tv_nsec;
+
+ vfs_inode->i_uid = dir->i_uid;
+ vfs_inode->i_gid = dir->i_gid;
return inode;
}
-static inline u8 btrfs_inode_type(struct inode *inode)
-{
- /*
- * Compile-time asserts that generic FT_* types still match
- * BTRFS_FT_* types
- */
- BUILD_BUG_ON(BTRFS_FT_UNKNOWN != FT_UNKNOWN);
- BUILD_BUG_ON(BTRFS_FT_REG_FILE != FT_REG_FILE);
- BUILD_BUG_ON(BTRFS_FT_DIR != FT_DIR);
- BUILD_BUG_ON(BTRFS_FT_CHRDEV != FT_CHRDEV);
- BUILD_BUG_ON(BTRFS_FT_BLKDEV != FT_BLKDEV);
- BUILD_BUG_ON(BTRFS_FT_FIFO != FT_FIFO);
- BUILD_BUG_ON(BTRFS_FT_SOCK != FT_SOCK);
- BUILD_BUG_ON(BTRFS_FT_SYMLINK != FT_SYMLINK);
+static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
+static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
+static_assert(BTRFS_FT_DIR == FT_DIR);
+static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
+static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
+static_assert(BTRFS_FT_FIFO == FT_FIFO);
+static_assert(BTRFS_FT_SOCK == FT_SOCK);
+static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
- return fs_umode_to_ftype(inode->i_mode);
+static inline u8 btrfs_inode_type(const struct btrfs_inode *inode)
+{
+ return fs_umode_to_ftype(inode->vfs_inode.i_mode);
}
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
- struct inode *inode;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
+ struct btrfs_inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
- struct btrfs_key location;
+ struct btrfs_key location = { 0 };
u8 di_type = 0;
int ret = 0;
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
- ret = btrfs_inode_by_name(dir, dentry, &location, &di_type);
+ ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type);
if (ret < 0)
return ERR_PTR(ret);
if (location.type == BTRFS_INODE_ITEM_KEY) {
- inode = btrfs_iget(dir->i_sb, location.objectid, root);
+ inode = btrfs_iget(location.objectid, root);
if (IS_ERR(inode))
- return inode;
+ return ERR_CAST(inode);
/* Do extra check against inode mode with di_type */
- if (btrfs_inode_type(inode) != di_type) {
+ if (unlikely(btrfs_inode_type(inode) != di_type)) {
btrfs_crit(fs_info,
"inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
- inode->i_mode, btrfs_inode_type(inode),
+ inode->vfs_inode.i_mode, btrfs_inode_type(inode),
di_type);
- iput(inode);
+ iput(&inode->vfs_inode);
return ERR_PTR(-EUCLEAN);
}
- return inode;
+ return &inode->vfs_inode;
}
- ret = fixup_tree_root_location(fs_info, dir, dentry,
+ ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
&location, &sub_root);
if (ret < 0) {
if (ret != -ENOENT)
inode = ERR_PTR(ret);
else
- inode = new_simple_dir(dir->i_sb, &location, sub_root);
+ inode = new_simple_dir(dir, &location, root);
} else {
- inode = btrfs_iget(dir->i_sb, location.objectid, sub_root);
- }
- if (root != sub_root)
+ inode = btrfs_iget(location.objectid, sub_root);
btrfs_put_root(sub_root);
- if (!IS_ERR(inode) && root != sub_root) {
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
down_read(&fs_info->cleanup_work_sem);
- if (!sb_rdonly(inode->i_sb))
+ if (!sb_rdonly(inode->vfs_inode.i_sb))
ret = btrfs_orphan_cleanup(sub_root);
up_read(&fs_info->cleanup_work_sem);
if (ret) {
- iput(inode);
+ iput(&inode->vfs_inode);
inode = ERR_PTR(ret);
}
}
- return inode;
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ return &inode->vfs_inode;
}
static int btrfs_dentry_delete(const struct dentry *dentry)
@@ -6059,6 +6042,76 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
}
/*
+ * Find the highest existing sequence number in a directory and then set the
+ * in-memory index_cnt variable to the first free sequence number.
+ */
+static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
+{
+ struct btrfs_root *root = inode->root;
+ struct btrfs_key key, found_key;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct extent_buffer *leaf;
+ int ret;
+
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_DIR_INDEX_KEY;
+ key.offset = (u64)-1;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+ /* FIXME: we should be able to handle this */
+ if (ret == 0)
+ return ret;
+
+ if (path->slots[0] == 0) {
+ inode->index_cnt = BTRFS_DIR_START_INDEX;
+ return 0;
+ }
+
+ path->slots[0]--;
+
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+ if (found_key.objectid != btrfs_ino(inode) ||
+ found_key.type != BTRFS_DIR_INDEX_KEY) {
+ inode->index_cnt = BTRFS_DIR_START_INDEX;
+ return 0;
+ }
+
+ inode->index_cnt = found_key.offset + 1;
+
+ return 0;
+}
+
+static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
+{
+ int ret = 0;
+
+ btrfs_inode_lock(dir, 0);
+ if (dir->index_cnt == (u64)-1) {
+ ret = btrfs_inode_delayed_dir_index_count(dir);
+ if (ret) {
+ ret = btrfs_set_inode_index_count(dir);
+ if (ret)
+ goto out;
+ }
+ }
+
+ /* index_cnt is the index number of next new entry, so decrement it. */
+ *index = dir->index_cnt - 1;
+out:
+ btrfs_inode_unlock(dir, 0);
+
+ return ret;
+}
+
+/*
* All this infrastructure exists because dir_emit can fault, and we are holding
* the tree lock when doing readdir. For now just allocate a buffer and copy
* our information into that, and then dir_emit from the buffer. This is
@@ -6070,10 +6123,17 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
static int btrfs_opendir(struct inode *inode, struct file *file)
{
struct btrfs_file_private *private;
+ u64 last_index;
+ int ret;
+
+ ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
+ if (ret)
+ return ret;
private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
if (!private)
return -ENOMEM;
+ private->last_index = last_index;
private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!private->filldir_buf) {
kfree(private);
@@ -6083,6 +6143,19 @@ static int btrfs_opendir(struct inode *inode, struct file *file)
return 0;
}
+static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct btrfs_file_private *private = file->private_data;
+ int ret;
+
+ ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
+ &private->last_index);
+ if (ret)
+ return ret;
+
+ return generic_file_llseek(file, offset, whence);
+}
+
struct dir_entry {
u64 ino;
u64 offset;
@@ -6116,13 +6189,11 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
struct btrfs_dir_item *di;
struct btrfs_key key;
struct btrfs_key found_key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
void *addr;
- struct list_head ins_list;
- struct list_head del_list;
+ LIST_HEAD(ins_list);
+ LIST_HEAD(del_list);
int ret;
- struct extent_buffer *leaf;
- int slot;
char *name_ptr;
int name_len;
int entries = 0;
@@ -6140,44 +6211,30 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
addr = private->filldir_buf;
path->reada = READA_FORWARD;
- INIT_LIST_HEAD(&ins_list);
- INIT_LIST_HEAD(&del_list);
- put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
+ put = btrfs_readdir_get_delayed_items(BTRFS_I(inode), private->last_index,
+ &ins_list, &del_list);
again:
key.type = BTRFS_DIR_INDEX_KEY;
key.offset = ctx->pos;
key.objectid = btrfs_ino(BTRFS_I(inode));
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto err;
-
- while (1) {
+ btrfs_for_each_slot(root, &key, &found_key, path, ret) {
struct dir_entry *entry;
-
- leaf = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- goto err;
- else if (ret > 0)
- break;
- continue;
- }
-
- btrfs_item_key_to_cpu(leaf, &found_key, slot);
+ struct extent_buffer *leaf = path->nodes[0];
+ u8 ftype;
if (found_key.objectid != key.objectid)
break;
if (found_key.type != BTRFS_DIR_INDEX_KEY)
break;
if (found_key.offset < ctx->pos)
- goto next;
+ continue;
+ if (found_key.offset > private->last_index)
+ break;
if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
- goto next;
- di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
+ continue;
+ di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
name_len = btrfs_dir_name_len(leaf, di);
if ((total_len + sizeof(struct dir_entry) + name_len) >=
PAGE_SIZE) {
@@ -6191,30 +6248,31 @@ again:
goto again;
}
+ ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di));
entry = addr;
- put_unaligned(name_len, &entry->name_len);
name_ptr = (char *)(entry + 1);
- read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
- name_len);
- put_unaligned(fs_ftype_to_dtype(btrfs_dir_type(leaf, di)),
- &entry->type);
+ read_extent_buffer(leaf, name_ptr,
+ (unsigned long)(di + 1), name_len);
+ put_unaligned(name_len, &entry->name_len);
+ put_unaligned(fs_ftype_to_dtype(ftype), &entry->type);
btrfs_dir_item_key_to_cpu(leaf, di, &location);
put_unaligned(location.objectid, &entry->ino);
put_unaligned(found_key.offset, &entry->offset);
entries++;
addr += sizeof(struct dir_entry) + name_len;
total_len += sizeof(struct dir_entry) + name_len;
-next:
- path->slots[0]++;
}
+ /* Catch error encountered during iteration */
+ if (ret < 0)
+ goto err;
+
btrfs_release_path(path);
ret = btrfs_filldir(private->filldir_buf, entries, ctx);
if (ret)
goto nopos;
- ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
- if (ret)
+ if (btrfs_readdir_delayed_dir_index(ctx, &ins_list))
goto nopos;
/*
@@ -6225,7 +6283,7 @@ next:
* offset. This means that new entries created during readdir
* are *guaranteed* to be seen in the future by that readdir.
* This has broken buggy programs which operate on names as
- * they're returned by readdir. Until we re-use freed offsets
+ * they're returned by readdir. Until we reuse freed offsets
* we have this hack to stop new entries from being returned
* under the assumption that they'll never reach this huge
* offset.
@@ -6242,8 +6300,7 @@ nopos:
ret = 0;
err:
if (put)
- btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
- btrfs_free_path(path);
+ btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list);
return ret;
}
@@ -6253,116 +6310,51 @@ err:
* FIXME, needs more benchmarking...there are no reasons other than performance
* to keep or drop this code.
*/
-static int btrfs_dirty_inode(struct inode *inode)
+static int btrfs_dirty_inode(struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
int ret;
- if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
+ if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags))
return 0;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
- if (ret && (ret == -ENOSPC || ret == -EDQUOT)) {
+ ret = btrfs_update_inode(trans, inode);
+ if (ret == -ENOSPC || ret == -EDQUOT) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, inode);
}
btrfs_end_transaction(trans);
- if (BTRFS_I(inode)->delayed_node)
+ if (inode->delayed_node)
btrfs_balance_delayed_items(fs_info);
return ret;
}
/*
- * This is a copy of file_update_time. We need this so we can return error on
- * ENOSPC for updating the inode in the case of file write and mmap writes.
+ * We need our own ->update_time so that we can return error on ENOSPC for
+ * updating the inode in the case of file write and mmap writes.
*/
-static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
- int flags)
+static int btrfs_update_time(struct inode *inode, int flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- bool dirty = flags & ~S_VERSION;
+ bool dirty;
if (btrfs_root_readonly(root))
return -EROFS;
- if (flags & S_VERSION)
- dirty |= inode_maybe_inc_iversion(inode, dirty);
- if (flags & S_CTIME)
- inode->i_ctime = *now;
- if (flags & S_MTIME)
- inode->i_mtime = *now;
- if (flags & S_ATIME)
- inode->i_atime = *now;
- return dirty ? btrfs_dirty_inode(inode) : 0;
-}
-
-/*
- * find the highest existing sequence number in a directory
- * and then set the in-memory index_cnt variable to reflect
- * free sequence numbers
- */
-static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
-{
- struct btrfs_root *root = inode->root;
- struct btrfs_key key, found_key;
- struct btrfs_path *path;
- struct extent_buffer *leaf;
- int ret;
-
- key.objectid = btrfs_ino(inode);
- key.type = BTRFS_DIR_INDEX_KEY;
- key.offset = (u64)-1;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
- /* FIXME: we should be able to handle this */
- if (ret == 0)
- goto out;
- ret = 0;
-
- /*
- * MAGIC NUMBER EXPLANATION:
- * since we search a directory based on f_pos we have to start at 2
- * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
- * else has to start at 2
- */
- if (path->slots[0] == 0) {
- inode->index_cnt = 2;
- goto out;
- }
-
- path->slots[0]--;
-
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-
- if (found_key.objectid != btrfs_ino(inode) ||
- found_key.type != BTRFS_DIR_INDEX_KEY) {
- inode->index_cnt = 2;
- goto out;
- }
-
- inode->index_cnt = found_key.offset + 1;
-out:
- btrfs_free_path(path);
- return ret;
+ dirty = inode_update_timestamps(inode, flags);
+ return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0;
}
/*
@@ -6392,7 +6384,7 @@ static int btrfs_insert_inode_locked(struct inode *inode)
{
struct btrfs_iget_args args;
- args.ino = BTRFS_I(inode)->location.objectid;
+ args.ino = btrfs_ino(BTRFS_I(inode));
args.root = BTRFS_I(inode)->root;
return insert_inode_locked4(inode,
@@ -6400,105 +6392,188 @@ static int btrfs_insert_inode_locked(struct inode *inode)
btrfs_find_actor, &args);
}
+int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
+ unsigned int *trans_num_items)
+{
+ struct inode *dir = args->dir;
+ struct inode *inode = args->inode;
+ int ret;
+
+ if (!args->orphan) {
+ ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
+ &args->fname);
+ if (ret)
+ return ret;
+ }
+
+ ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
+ if (ret) {
+ fscrypt_free_filename(&args->fname);
+ return ret;
+ }
+
+ /* 1 to add inode item */
+ *trans_num_items = 1;
+ /* 1 to add compression property */
+ if (BTRFS_I(dir)->prop_compress)
+ (*trans_num_items)++;
+ /* 1 to add default ACL xattr */
+ if (args->default_acl)
+ (*trans_num_items)++;
+ /* 1 to add access ACL xattr */
+ if (args->acl)
+ (*trans_num_items)++;
+#ifdef CONFIG_SECURITY
+ /* 1 to add LSM xattr */
+ if (dir->i_security)
+ (*trans_num_items)++;
+#endif
+ if (args->orphan) {
+ /* 1 to add orphan item */
+ (*trans_num_items)++;
+ } else {
+ /*
+ * 1 to add dir item
+ * 1 to add dir index
+ * 1 to update parent inode item
+ *
+ * No need for 1 unit for the inode ref item because it is
+ * inserted in a batch together with the inode item at
+ * btrfs_create_new_inode().
+ */
+ *trans_num_items += 3;
+ }
+ return 0;
+}
+
+void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
+{
+ posix_acl_release(args->acl);
+ posix_acl_release(args->default_acl);
+ fscrypt_free_filename(&args->fname);
+}
+
/*
* Inherit flags from the parent inode.
*
* Currently only the compression flags and the cow flags are inherited.
*/
-static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
+static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir)
{
unsigned int flags;
- if (!dir)
- return;
-
- flags = BTRFS_I(dir)->flags;
+ flags = dir->flags;
if (flags & BTRFS_INODE_NOCOMPRESS) {
- BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
- BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
+ inode->flags &= ~BTRFS_INODE_COMPRESS;
+ inode->flags |= BTRFS_INODE_NOCOMPRESS;
} else if (flags & BTRFS_INODE_COMPRESS) {
- BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
- BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
+ inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
+ inode->flags |= BTRFS_INODE_COMPRESS;
}
if (flags & BTRFS_INODE_NODATACOW) {
- BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
- if (S_ISREG(inode->i_mode))
- BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
+ inode->flags |= BTRFS_INODE_NODATACOW;
+ if (S_ISREG(inode->vfs_inode.i_mode))
+ inode->flags |= BTRFS_INODE_NODATASUM;
}
btrfs_sync_inode_flags_to_i_flags(inode);
}
-static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct user_namespace *mnt_userns,
- struct inode *dir,
- const char *name, int name_len,
- u64 ref_objectid, u64 objectid,
- umode_t mode, u64 *index)
+int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_new_inode_args *args)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct inode *inode;
+ struct timespec64 ts;
+ struct inode *dir = args->dir;
+ struct inode *inode = args->inode;
+ const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
+ struct btrfs_root *root;
struct btrfs_inode_item *inode_item;
- struct btrfs_key *location;
struct btrfs_path *path;
+ u64 objectid;
struct btrfs_inode_ref *ref;
struct btrfs_key key[2];
u32 sizes[2];
struct btrfs_item_batch batch;
unsigned long ptr;
- unsigned int nofs_flag;
int ret;
+ bool xa_reserved = false;
path = btrfs_alloc_path();
if (!path)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- nofs_flag = memalloc_nofs_save();
- inode = new_inode(fs_info->sb);
- memalloc_nofs_restore(nofs_flag);
- if (!inode) {
- btrfs_free_path(path);
- return ERR_PTR(-ENOMEM);
+ if (!args->subvol)
+ BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
+ root = BTRFS_I(inode)->root;
+
+ ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
+ if (ret)
+ goto out;
+
+ ret = btrfs_get_free_objectid(root, &objectid);
+ if (ret)
+ goto out;
+ btrfs_set_inode_number(BTRFS_I(inode), objectid);
+
+ ret = xa_reserve(&root->inodes, objectid, GFP_NOFS);
+ if (ret)
+ goto out;
+ xa_reserved = true;
+
+ if (args->orphan) {
+ /*
+ * O_TMPFILE, set link count to 0, so that after this point, we
+ * fill in an inode item with the correct link count.
+ */
+ set_nlink(inode, 0);
+ } else {
+ trace_btrfs_inode_request(dir);
+
+ ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
+ if (ret)
+ goto out;
}
+ if (S_ISDIR(inode->i_mode))
+ BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
+
+ BTRFS_I(inode)->generation = trans->transid;
+ inode->i_generation = BTRFS_I(inode)->generation;
+
/*
- * O_TMPFILE, set link count to 0, so that after this point,
- * we fill in an inode item with the correct link count.
+ * We don't have any capability xattrs set here yet, shortcut any
+ * queries for the xattrs here. If we add them later via the inode
+ * security init path or any other path this flag will be cleared.
*/
- if (!name)
- set_nlink(inode, 0);
+ set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
/*
- * we have to initialize this early, so we can reclaim the inode
- * number if we fail afterwards in this function.
+ * Subvolumes don't inherit flags from their parent directory.
+ * Originally this was probably by accident, but we probably can't
+ * change it now without compatibility issues.
*/
- inode->i_ino = objectid;
+ if (!args->subvol)
+ btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
- if (dir && name) {
- trace_btrfs_inode_request(dir);
+ btrfs_set_inode_mapping_order(BTRFS_I(inode));
+ if (S_ISREG(inode->i_mode)) {
+ if (btrfs_test_opt(fs_info, NODATASUM))
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
+ if (btrfs_test_opt(fs_info, NODATACOW))
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
+ BTRFS_INODE_NODATASUM;
+ btrfs_update_inode_mapping_flags(BTRFS_I(inode));
+ }
- ret = btrfs_set_inode_index(BTRFS_I(dir), index);
- if (ret) {
- btrfs_free_path(path);
- iput(inode);
- return ERR_PTR(ret);
- }
- } else if (dir) {
- *index = 0;
+ ret = btrfs_insert_inode_locked(inode);
+ if (ret < 0) {
+ if (!args->orphan)
+ BTRFS_I(dir)->index_cnt--;
+ goto out;
}
- /*
- * index_cnt is ignored for everything but a dir,
- * btrfs_set_inode_index_count has an explanation for the magic
- * number
- */
- BTRFS_I(inode)->index_cnt = 2;
- BTRFS_I(inode)->dir_index = *index;
- BTRFS_I(inode)->root = btrfs_grab_root(root);
- BTRFS_I(inode)->generation = trans->transid;
- inode->i_generation = BTRFS_I(inode)->generation;
/*
* We could have gotten an inode number from somebody who was fsynced
@@ -6506,7 +6581,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
* sync since it will be a full sync anyway and this will blow away the
* old info in the log.
*/
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
+ btrfs_set_inode_full_sync(BTRFS_I(inode));
key[0].objectid = objectid;
key[0].type = BTRFS_INODE_ITEM_KEY;
@@ -6514,7 +6589,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
sizes[0] = sizeof(struct btrfs_inode_item);
- if (name) {
+ if (!args->orphan) {
/*
* Start new inodes with an inode_ref. This is slightly more
* efficient for small numbers of hard links since they will
@@ -6523,37 +6598,33 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
*/
key[1].objectid = objectid;
key[1].type = BTRFS_INODE_REF_KEY;
- key[1].offset = ref_objectid;
-
- sizes[1] = name_len + sizeof(*ref);
- }
-
- location = &BTRFS_I(inode)->location;
- location->objectid = objectid;
- location->offset = 0;
- location->type = BTRFS_INODE_ITEM_KEY;
-
- ret = btrfs_insert_inode_locked(inode);
- if (ret < 0) {
- iput(inode);
- goto fail;
+ if (args->subvol) {
+ key[1].offset = objectid;
+ sizes[1] = 2 + sizeof(*ref);
+ } else {
+ key[1].offset = btrfs_ino(BTRFS_I(dir));
+ sizes[1] = name->len + sizeof(*ref);
+ }
}
batch.keys = &key[0];
batch.data_sizes = &sizes[0];
- batch.total_data_size = sizes[0] + (name ? sizes[1] : 0);
- batch.nr = name ? 2 : 1;
+ batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
+ batch.nr = args->orphan ? 1 : 2;
ret = btrfs_insert_empty_items(trans, root, path, &batch);
- if (ret != 0)
- goto fail_unlock;
+ if (unlikely(ret != 0)) {
+ btrfs_abort_transaction(trans, ret);
+ goto discard;
+ }
- inode_init_owner(mnt_userns, inode, dir, mode);
- inode_set_bytes(inode, 0);
+ ts = simple_inode_init_ts(inode);
+ BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
+ BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
- inode->i_mtime = current_time(inode);
- inode->i_atime = inode->i_mtime;
- inode->i_ctime = inode->i_mtime;
- BTRFS_I(inode)->i_otime = inode->i_mtime;
+ /*
+ * We're going to fill the inode item now, so at this point the inode
+ * must be fully initialized.
+ */
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
@@ -6561,50 +6632,111 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
sizeof(*inode_item));
fill_inode_item(trans, path->nodes[0], inode_item, inode);
- if (name) {
+ if (!args->orphan) {
ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
struct btrfs_inode_ref);
- btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
- btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
ptr = (unsigned long)(ref + 1);
- write_extent_buffer(path->nodes[0], name, ptr, name_len);
+ if (args->subvol) {
+ btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
+ btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
+ write_extent_buffer(path->nodes[0], "..", ptr, 2);
+ } else {
+ btrfs_set_inode_ref_name_len(path->nodes[0], ref,
+ name->len);
+ btrfs_set_inode_ref_index(path->nodes[0], ref,
+ BTRFS_I(inode)->dir_index);
+ write_extent_buffer(path->nodes[0], name->name, ptr,
+ name->len);
+ }
}
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ /*
+ * We don't need the path anymore, plus inheriting properties, adding
+ * ACLs, security xattrs, orphan item or adding the link, will result in
+ * allocating yet another path. So just free our path.
+ */
btrfs_free_path(path);
+ path = NULL;
- btrfs_inherit_iflags(inode, dir);
+ if (args->subvol) {
+ struct btrfs_inode *parent;
- if (S_ISREG(mode)) {
- if (btrfs_test_opt(fs_info, NODATASUM))
- BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
- if (btrfs_test_opt(fs_info, NODATACOW))
- BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
- BTRFS_INODE_NODATASUM;
+ /*
+ * Subvolumes inherit properties from their parent subvolume,
+ * not the directory they were created in.
+ */
+ parent = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, BTRFS_I(dir)->root);
+ if (IS_ERR(parent)) {
+ ret = PTR_ERR(parent);
+ } else {
+ ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
+ parent);
+ iput(&parent->vfs_inode);
+ }
+ } else {
+ ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
+ BTRFS_I(dir));
+ }
+ if (ret) {
+ btrfs_err(fs_info,
+ "error inheriting props for ino %llu (root %llu): %d",
+ btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret);
}
- inode_tree_add(inode);
+ /*
+ * Subvolumes don't inherit ACLs or get passed to the LSM. This is
+ * probably a bug.
+ */
+ if (!args->subvol) {
+ ret = btrfs_init_inode_security(trans, args);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto discard;
+ }
+ }
+
+ ret = btrfs_add_inode_to_root(BTRFS_I(inode), false);
+ if (WARN_ON(ret)) {
+ /* Shouldn't happen, we used xa_reserve() before. */
+ btrfs_abort_transaction(trans, ret);
+ goto discard;
+ }
trace_btrfs_inode_new(inode);
btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
btrfs_update_root_times(trans, root);
- ret = btrfs_inode_inherit_props(trans, inode, dir);
- if (ret)
- btrfs_err(fs_info,
- "error inheriting props for ino %llu (root %llu): %d",
- btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret);
+ if (args->orphan) {
+ ret = btrfs_orphan_add(trans, BTRFS_I(inode));
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto discard;
+ }
+ } else {
+ ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
+ 0, BTRFS_I(inode)->dir_index);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto discard;
+ }
+ }
- return inode;
+ return 0;
-fail_unlock:
+discard:
+ /*
+ * discard_new_inode() calls iput(), but the caller owns the reference
+ * to the inode.
+ */
+ ihold(inode);
discard_new_inode(inode);
-fail:
- if (dir && name)
- BTRFS_I(dir)->index_cnt--;
+out:
+ if (xa_reserved)
+ xa_release(&root->inodes, objectid);
+
btrfs_free_path(path);
- return ERR_PTR(ret);
+ return ret;
}
/*
@@ -6615,7 +6747,7 @@ fail:
*/
int btrfs_add_link(struct btrfs_trans_handle *trans,
struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
- const char *name, int name_len, int add_backref, u64 index)
+ const struct fscrypt_str *name, bool add_backref, u64 index)
{
int ret = 0;
struct btrfs_key key;
@@ -6633,42 +6765,32 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_add_root_ref(trans, key.objectid,
- root->root_key.objectid, parent_ino,
- index, name, name_len);
+ btrfs_root_id(root), parent_ino,
+ index, name);
} else if (add_backref) {
- ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
- parent_ino, index);
+ ret = btrfs_insert_inode_ref(trans, root, name,
+ ino, parent_ino, index);
}
/* Nothing to clean up yet */
if (ret)
return ret;
- ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key,
- btrfs_inode_type(&inode->vfs_inode), index);
+ ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
+ btrfs_inode_type(inode), index);
if (ret == -EEXIST || ret == -EOVERFLOW)
goto fail_dir_item;
- else if (ret) {
+ else if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
- name_len * 2);
+ name->len * 2);
inode_inc_iversion(&parent_inode->vfs_inode);
- /*
- * If we are replaying a log tree, we do not want to update the mtime
- * and ctime of the parent directory with the current time, since the
- * log replay procedure is responsible for setting them to their correct
- * values (the ones it had when the fsync was done).
- */
- if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
- struct timespec64 now = current_time(&parent_inode->vfs_inode);
+ update_time_after_link_or_unlink(parent_inode);
- parent_inode->vfs_inode.i_mtime = now;
- parent_inode->vfs_inode.i_ctime = now;
- }
- ret = btrfs_update_inode(trans, root, parent_inode);
+ ret = btrfs_update_inode(trans, parent_inode);
if (ret)
btrfs_abort_transaction(trans, ret);
return ret;
@@ -6676,167 +6798,92 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
fail_dir_item:
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
u64 local_index;
- int err;
- err = btrfs_del_root_ref(trans, key.objectid,
- root->root_key.objectid, parent_ino,
- &local_index, name, name_len);
- if (err)
- btrfs_abort_transaction(trans, err);
+ int ret2;
+
+ ret2 = btrfs_del_root_ref(trans, key.objectid, btrfs_root_id(root),
+ parent_ino, &local_index, name);
+ if (ret2)
+ btrfs_abort_transaction(trans, ret2);
} else if (add_backref) {
- u64 local_index;
- int err;
+ int ret2;
- err = btrfs_del_inode_ref(trans, root, name, name_len,
- ino, parent_ino, &local_index);
- if (err)
- btrfs_abort_transaction(trans, err);
+ ret2 = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, NULL);
+ if (ret2)
+ btrfs_abort_transaction(trans, ret2);
}
/* Return the original error code */
return ret;
}
-static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
- struct btrfs_inode *dir, struct dentry *dentry,
- struct btrfs_inode *inode, int backref, u64 index)
+static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
+ struct inode *inode)
{
- int err = btrfs_add_link(trans, dir, inode,
- dentry->d_name.name, dentry->d_name.len,
- backref, index);
- if (err > 0)
- err = -EEXIST;
- return err;
-}
-
-static int btrfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode, dev_t rdev)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
- struct btrfs_trans_handle *trans;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct btrfs_root *root = BTRFS_I(dir)->root;
- struct inode *inode = NULL;
- int err;
- u64 objectid;
- u64 index = 0;
-
- /*
- * 2 for inode item and ref
- * 2 for dir items
- * 1 for xattr if selinux is on
- */
- trans = btrfs_start_transaction(root, 5);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ struct btrfs_new_inode_args new_inode_args = {
+ .dir = dir,
+ .dentry = dentry,
+ .inode = inode,
+ };
+ unsigned int trans_num_items;
+ struct btrfs_trans_handle *trans;
+ int ret;
- err = btrfs_get_free_objectid(root, &objectid);
- if (err)
- goto out_unlock;
+ ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
+ if (ret)
+ goto out_inode;
- inode = btrfs_new_inode(trans, root, mnt_userns, dir,
- dentry->d_name.name, dentry->d_name.len,
- btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- inode = NULL;
- goto out_unlock;
+ trans = btrfs_start_transaction(root, trans_num_items);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_new_inode_args;
}
- /*
- * If the active LSM wants to access the inode during
- * d_instantiate it needs these. Smack checks to see
- * if the filesystem supports xattrs by looking at the
- * ops vector.
- */
- inode->i_op = &btrfs_special_inode_operations;
- init_special_inode(inode, inode->i_mode, rdev);
-
- err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
- if (err)
- goto out_unlock;
-
- err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
- 0, index);
- if (err)
- goto out_unlock;
-
- btrfs_update_inode(trans, root, BTRFS_I(inode));
- d_instantiate_new(dentry, inode);
+ ret = btrfs_create_new_inode(trans, &new_inode_args);
+ if (!ret) {
+ if (S_ISDIR(inode->i_mode))
+ inode->i_opflags |= IOP_FASTPERM_MAY_EXEC;
+ d_instantiate_new(dentry, inode);
+ }
-out_unlock:
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
- if (err && inode) {
- inode_dec_link_count(inode);
- discard_new_inode(inode);
- }
- return err;
+out_new_inode_args:
+ btrfs_new_inode_args_destroy(&new_inode_args);
+out_inode:
+ if (ret)
+ iput(inode);
+ return ret;
}
-static int btrfs_create(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode, bool excl)
+static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
- struct btrfs_trans_handle *trans;
- struct btrfs_root *root = BTRFS_I(dir)->root;
- struct inode *inode = NULL;
- int err;
- u64 objectid;
- u64 index = 0;
+ struct inode *inode;
- /*
- * 2 for inode item and ref
- * 2 for dir items
- * 1 for xattr if selinux is on
- */
- trans = btrfs_start_transaction(root, 5);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ inode = new_inode(dir->i_sb);
+ if (!inode)
+ return -ENOMEM;
+ inode_init_owner(idmap, inode, dir, mode);
+ inode->i_op = &btrfs_special_inode_operations;
+ init_special_inode(inode, inode->i_mode, rdev);
+ return btrfs_create_common(dir, dentry, inode);
+}
- err = btrfs_get_free_objectid(root, &objectid);
- if (err)
- goto out_unlock;
+static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
+{
+ struct inode *inode;
- inode = btrfs_new_inode(trans, root, mnt_userns, dir,
- dentry->d_name.name, dentry->d_name.len,
- btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- inode = NULL;
- goto out_unlock;
- }
- /*
- * If the active LSM wants to access the inode during
- * d_instantiate it needs these. Smack checks to see
- * if the filesystem supports xattrs by looking at the
- * ops vector.
- */
+ inode = new_inode(dir->i_sb);
+ if (!inode)
+ return -ENOMEM;
+ inode_init_owner(idmap, inode, dir, mode);
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
inode->i_mapping->a_ops = &btrfs_aops;
-
- err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
- if (err)
- goto out_unlock;
-
- err = btrfs_update_inode(trans, root, BTRFS_I(inode));
- if (err)
- goto out_unlock;
-
- err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
- 0, index);
- if (err)
- goto out_unlock;
-
- d_instantiate_new(dentry, inode);
-
-out_unlock:
- btrfs_end_transaction(trans);
- if (err && inode) {
- inode_dec_link_count(inode);
- discard_new_inode(inode);
- }
- btrfs_btree_balance_dirty(fs_info);
- return err;
+ return btrfs_create_common(dir, dentry, inode);
}
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
@@ -6845,20 +6892,24 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct btrfs_trans_handle *trans = NULL;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = d_inode(old_dentry);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct fscrypt_name fname;
u64 index;
- int err;
- int drop_inode = 0;
+ int ret;
/* do not allow sys_link's with other subvols of the same device */
- if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
+ if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root))
return -EXDEV;
if (inode->i_nlink >= BTRFS_LINK_MAX)
return -EMLINK;
- err = btrfs_set_inode_index(BTRFS_I(dir), &index);
- if (err)
+ ret = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
+ if (ret)
+ goto fail;
+
+ ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
+ if (ret)
goto fail;
/*
@@ -6869,137 +6920,84 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
*/
trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
trans = NULL;
goto fail;
}
/* There are several dir indexes for this inode, clear the cache. */
BTRFS_I(inode)->dir_index = 0ULL;
- inc_nlink(inode);
inode_inc_iversion(inode);
- inode->i_ctime = current_time(inode);
- ihold(inode);
- set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
+ inode_set_ctime_current(inode);
- err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
- 1, index);
+ ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
+ &fname.disk_name, 1, index);
+ if (ret)
+ goto fail;
- if (err) {
- drop_inode = 1;
- } else {
- struct dentry *parent = dentry->d_parent;
+ /* Link added now we update the inode item with the new link count. */
+ inc_nlink(inode);
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto fail;
+ }
- err = btrfs_update_inode(trans, root, BTRFS_I(inode));
- if (err)
+ if (inode->i_nlink == 1) {
+ /*
+ * If the new hard link count is 1, it's a file created with the
+ * open(2) O_TMPFILE flag.
+ */
+ ret = btrfs_orphan_del(trans, BTRFS_I(inode));
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto fail;
- if (inode->i_nlink == 1) {
- /*
- * If new hard link count is 1, it's a file created
- * with open(2) O_TMPFILE flag.
- */
- err = btrfs_orphan_del(trans, BTRFS_I(inode));
- if (err)
- goto fail;
}
- d_instantiate(dentry, inode);
- btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
}
+ /* Grab reference for the new dentry passed to d_instantiate(). */
+ ihold(inode);
+ d_instantiate(dentry, inode);
+ btrfs_log_new_name(trans, old_dentry, NULL, 0, dentry->d_parent);
+
fail:
+ fscrypt_free_filename(&fname);
if (trans)
btrfs_end_transaction(trans);
- if (drop_inode) {
- inode_dec_link_count(inode);
- iput(inode);
- }
btrfs_btree_balance_dirty(fs_info);
- return err;
+ return ret;
}
-static int btrfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
- struct inode *inode = NULL;
- struct btrfs_trans_handle *trans;
- struct btrfs_root *root = BTRFS_I(dir)->root;
- int err = 0;
- u64 objectid = 0;
- u64 index = 0;
-
- /*
- * 2 items for inode and ref
- * 2 items for dir items
- * 1 for xattr if selinux is on
- */
- trans = btrfs_start_transaction(root, 5);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
-
- err = btrfs_get_free_objectid(root, &objectid);
- if (err)
- goto out_fail;
-
- inode = btrfs_new_inode(trans, root, mnt_userns, dir,
- dentry->d_name.name, dentry->d_name.len,
- btrfs_ino(BTRFS_I(dir)), objectid,
- S_IFDIR | mode, &index);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- inode = NULL;
- goto out_fail;
- }
+ struct inode *inode;
- /* these must be set before we unlock the inode */
+ inode = new_inode(dir->i_sb);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
-
- err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
- if (err)
- goto out_fail;
-
- btrfs_i_size_write(BTRFS_I(inode), 0);
- err = btrfs_update_inode(trans, root, BTRFS_I(inode));
- if (err)
- goto out_fail;
-
- err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
- dentry->d_name.name,
- dentry->d_name.len, 0, index);
- if (err)
- goto out_fail;
-
- d_instantiate_new(dentry, inode);
-
-out_fail:
- btrfs_end_transaction(trans);
- if (err && inode) {
- inode_dec_link_count(inode);
- discard_new_inode(inode);
- }
- btrfs_btree_balance_dirty(fs_info);
- return err;
+ return ERR_PTR(btrfs_create_common(dir, dentry, inode));
}
static noinline int uncompress_inline(struct btrfs_path *path,
- struct page *page,
- size_t pg_offset, u64 extent_offset,
+ struct folio *folio,
struct btrfs_file_extent_item *item)
{
int ret;
struct extent_buffer *leaf = path->nodes[0];
+ const u32 blocksize = leaf->fs_info->sectorsize;
char *tmp;
size_t max_size;
unsigned long inline_size;
unsigned long ptr;
int compress_type;
- WARN_ON(pg_offset != 0);
compress_type = btrfs_file_extent_compression(leaf, item);
max_size = btrfs_file_extent_ram_bytes(leaf, item);
- inline_size = btrfs_file_extent_inline_item_len(leaf,
- btrfs_item_nr(path->slots[0]));
+ inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
tmp = kmalloc(inline_size, GFP_NOFS);
if (!tmp)
return -ENOMEM;
@@ -7007,9 +7005,9 @@ static noinline int uncompress_inline(struct btrfs_path *path,
read_extent_buffer(leaf, tmp, ptr, inline_size);
- max_size = min_t(unsigned long, PAGE_SIZE, max_size);
- ret = btrfs_decompress(compress_type, tmp, page,
- extent_offset, inline_size, max_size);
+ max_size = min_t(unsigned long, blocksize, max_size);
+ ret = btrfs_decompress(compress_type, tmp, folio, 0, inline_size,
+ max_size);
/*
* decompression code contains a memset to fill in any space between the end
@@ -7019,25 +7017,51 @@ static noinline int uncompress_inline(struct btrfs_path *path,
* cover that region here.
*/
- if (max_size + pg_offset < PAGE_SIZE)
- memzero_page(page, pg_offset + max_size,
- PAGE_SIZE - max_size - pg_offset);
+ if (max_size < blocksize)
+ folio_zero_range(folio, max_size, blocksize - max_size);
kfree(tmp);
return ret;
}
-/**
- * btrfs_get_extent - Lookup the first extent overlapping a range in a file.
+static int read_inline_extent(struct btrfs_path *path, struct folio *folio)
+{
+ const u32 blocksize = path->nodes[0]->fs_info->sectorsize;
+ struct btrfs_file_extent_item *fi;
+ void *kaddr;
+ size_t copy_size;
+
+ if (!folio || folio_test_uptodate(folio))
+ return 0;
+
+ ASSERT(folio_pos(folio) == 0);
+
+ fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+ if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
+ return uncompress_inline(path, folio, fi);
+
+ copy_size = min_t(u64, blocksize,
+ btrfs_file_extent_ram_bytes(path->nodes[0], fi));
+ kaddr = kmap_local_folio(folio, 0);
+ read_extent_buffer(path->nodes[0], kaddr,
+ btrfs_file_extent_inline_start(fi), copy_size);
+ kunmap_local(kaddr);
+ if (copy_size < blocksize)
+ folio_zero_range(folio, copy_size, blocksize - copy_size);
+ return 0;
+}
+
+/*
+ * Lookup the first extent overlapping a range in a file.
+ *
* @inode: file to search in
* @page: page to read extent data into if the extent is inline
- * @pg_offset: offset into @page to copy to
* @start: file offset
* @len: length of range starting at @start
*
- * This returns the first &struct extent_map which overlaps with the given
- * range, reading it from the B-tree and caching it if necessary. Note that
- * there may be more extents which overlap the given range after the returned
- * extent_map.
+ * Return the first &struct extent_map which overlaps the given range, reading
+ * it from the B-tree and caching it if necessary. Note that there may be more
+ * extents which overlap the given range after the returned extent_map.
*
* If @page is not NULL and the extent is inline, this also reads the extent
* data directly into the page and marks the extent up to date in the io_tree.
@@ -7045,8 +7069,7 @@ static noinline int uncompress_inline(struct btrfs_path *path,
* Return: ERR_PTR on error, non-NULL extent_map on success.
*/
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
- struct page *page, size_t pg_offset,
- u64 start, u64 len)
+ struct folio *folio, u64 start, u64 len)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret = 0;
@@ -7061,29 +7084,27 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct btrfs_key found_key;
struct extent_map *em = NULL;
struct extent_map_tree *em_tree = &inode->extent_tree;
- struct extent_io_tree *io_tree = &inode->io_tree;
read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, start, len);
+ em = btrfs_lookup_extent_mapping(em_tree, start, len);
read_unlock(&em_tree->lock);
if (em) {
if (em->start > start || em->start + em->len <= start)
- free_extent_map(em);
- else if (em->block_start == EXTENT_MAP_INLINE && page)
- free_extent_map(em);
+ btrfs_free_extent_map(em);
+ else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio)
+ btrfs_free_extent_map(em);
else
goto out;
}
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
ret = -ENOMEM;
goto out;
}
em->start = EXTENT_MAP_HOLE;
- em->orig_start = EXTENT_MAP_HOLE;
+ em->disk_bytenr = EXTENT_MAP_HOLE;
em->len = (u64)-1;
- em->block_len = (u64)-1;
path = btrfs_alloc_path();
if (!path) {
@@ -7100,8 +7121,8 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
* point the commit_root has everything we need.
*/
if (btrfs_is_free_space_inode(inode)) {
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
}
ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
@@ -7136,7 +7157,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
if (extent_type == BTRFS_FILE_EXTENT_REG ||
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
/* Only regular file could have regular/prealloc extent */
- if (!S_ISREG(inode->vfs_inode.i_mode)) {
+ if (unlikely(!S_ISREG(inode->vfs_inode.i_mode))) {
ret = -EUCLEAN;
btrfs_crit(fs_info,
"regular/prealloc extent found for non-regular inode %llu",
@@ -7173,70 +7194,47 @@ next:
/* New extent overlaps with existing one */
em->start = start;
- em->orig_start = start;
em->len = found_key.offset - start;
- em->block_start = EXTENT_MAP_HOLE;
+ em->disk_bytenr = EXTENT_MAP_HOLE;
goto insert;
}
- btrfs_extent_item_to_extent_map(inode, path, item, !page, em);
+ btrfs_extent_item_to_extent_map(inode, path, item, em);
if (extent_type == BTRFS_FILE_EXTENT_REG ||
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
goto insert;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- unsigned long ptr;
- char *map;
- size_t size;
- size_t extent_offset;
- size_t copy_size;
+ /*
+ * Inline extent can only exist at file offset 0. This is
+ * ensured by tree-checker and inline extent creation path.
+ * Thus all members representing file offsets should be zero.
+ */
+ ASSERT(extent_start == 0);
+ ASSERT(em->start == 0);
- if (!page)
- goto out;
+ /*
+ * btrfs_extent_item_to_extent_map() should have properly
+ * initialized em members already.
+ *
+ * Other members are not utilized for inline extents.
+ */
+ ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE);
+ ASSERT(em->len == fs_info->sectorsize);
- size = btrfs_file_extent_ram_bytes(leaf, item);
- extent_offset = page_offset(page) + pg_offset - extent_start;
- copy_size = min_t(u64, PAGE_SIZE - pg_offset,
- size - extent_offset);
- em->start = extent_start + extent_offset;
- em->len = ALIGN(copy_size, fs_info->sectorsize);
- em->orig_block_len = em->len;
- em->orig_start = em->start;
- ptr = btrfs_file_extent_inline_start(item) + extent_offset;
-
- if (!PageUptodate(page)) {
- if (btrfs_file_extent_compression(leaf, item) !=
- BTRFS_COMPRESS_NONE) {
- ret = uncompress_inline(path, page, pg_offset,
- extent_offset, item);
- if (ret)
- goto out;
- } else {
- map = kmap_local_page(page);
- read_extent_buffer(leaf, map + pg_offset, ptr,
- copy_size);
- if (pg_offset + copy_size < PAGE_SIZE) {
- memset(map + pg_offset + copy_size, 0,
- PAGE_SIZE - pg_offset -
- copy_size);
- }
- kunmap_local(map);
- }
- flush_dcache_page(page);
- }
- set_extent_uptodate(io_tree, em->start,
- extent_map_end(em) - 1, NULL, GFP_NOFS);
+ ret = read_inline_extent(path, folio);
+ if (ret < 0)
+ goto out;
goto insert;
}
not_found:
em->start = start;
- em->orig_start = start;
em->len = len;
- em->block_start = EXTENT_MAP_HOLE;
+ em->disk_bytenr = EXTENT_MAP_HOLE;
insert:
ret = 0;
btrfs_release_path(path);
- if (em->start > start || extent_map_end(em) <= start) {
+ if (unlikely(em->start > start || btrfs_extent_map_end(em) <= start)) {
btrfs_err(fs_info,
"bad extent! em: [%llu %llu] passed [%llu %llu]",
em->start, em->len, start, len);
@@ -7245,7 +7243,7 @@ insert:
}
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
+ ret = btrfs_add_extent_mapping(inode, &em, start, len);
write_unlock(&em_tree->lock);
out:
btrfs_free_path(path);
@@ -7253,201 +7251,12 @@ out:
trace_btrfs_get_extent(root, inode, em);
if (ret) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
return ERR_PTR(ret);
}
return em;
}
-struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
- u64 start, u64 len)
-{
- struct extent_map *em;
- struct extent_map *hole_em = NULL;
- u64 delalloc_start = start;
- u64 end;
- u64 delalloc_len;
- u64 delalloc_end;
- int err = 0;
-
- em = btrfs_get_extent(inode, NULL, 0, start, len);
- if (IS_ERR(em))
- return em;
- /*
- * If our em maps to:
- * - a hole or
- * - a pre-alloc extent,
- * there might actually be delalloc bytes behind it.
- */
- if (em->block_start != EXTENT_MAP_HOLE &&
- !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
- return em;
- else
- hole_em = em;
-
- /* check to see if we've wrapped (len == -1 or similar) */
- end = start + len;
- if (end < start)
- end = (u64)-1;
- else
- end -= 1;
-
- em = NULL;
-
- /* ok, we didn't find anything, lets look for delalloc */
- delalloc_len = count_range_bits(&inode->io_tree, &delalloc_start,
- end, len, EXTENT_DELALLOC, 1);
- delalloc_end = delalloc_start + delalloc_len;
- if (delalloc_end < delalloc_start)
- delalloc_end = (u64)-1;
-
- /*
- * We didn't find anything useful, return the original results from
- * get_extent()
- */
- if (delalloc_start > end || delalloc_end <= start) {
- em = hole_em;
- hole_em = NULL;
- goto out;
- }
-
- /*
- * Adjust the delalloc_start to make sure it doesn't go backwards from
- * the start they passed in
- */
- delalloc_start = max(start, delalloc_start);
- delalloc_len = delalloc_end - delalloc_start;
-
- if (delalloc_len > 0) {
- u64 hole_start;
- u64 hole_len;
- const u64 hole_end = extent_map_end(hole_em);
-
- em = alloc_extent_map();
- if (!em) {
- err = -ENOMEM;
- goto out;
- }
-
- ASSERT(hole_em);
- /*
- * When btrfs_get_extent can't find anything it returns one
- * huge hole
- *
- * Make sure what it found really fits our range, and adjust to
- * make sure it is based on the start from the caller
- */
- if (hole_end <= start || hole_em->start > end) {
- free_extent_map(hole_em);
- hole_em = NULL;
- } else {
- hole_start = max(hole_em->start, start);
- hole_len = hole_end - hole_start;
- }
-
- if (hole_em && delalloc_start > hole_start) {
- /*
- * Our hole starts before our delalloc, so we have to
- * return just the parts of the hole that go until the
- * delalloc starts
- */
- em->len = min(hole_len, delalloc_start - hole_start);
- em->start = hole_start;
- em->orig_start = hole_start;
- /*
- * Don't adjust block start at all, it is fixed at
- * EXTENT_MAP_HOLE
- */
- em->block_start = hole_em->block_start;
- em->block_len = hole_len;
- if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
- set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
- } else {
- /*
- * Hole is out of passed range or it starts after
- * delalloc range
- */
- em->start = delalloc_start;
- em->len = delalloc_len;
- em->orig_start = delalloc_start;
- em->block_start = EXTENT_MAP_DELALLOC;
- em->block_len = delalloc_len;
- }
- } else {
- return hole_em;
- }
-out:
-
- free_extent_map(hole_em);
- if (err) {
- free_extent_map(em);
- return ERR_PTR(err);
- }
- return em;
-}
-
-static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
- const u64 start,
- const u64 len,
- const u64 orig_start,
- const u64 block_start,
- const u64 block_len,
- const u64 orig_block_len,
- const u64 ram_bytes,
- const int type)
-{
- struct extent_map *em = NULL;
- int ret;
-
- if (type != BTRFS_ORDERED_NOCOW) {
- em = create_io_em(inode, start, len, orig_start, block_start,
- block_len, orig_block_len, ram_bytes,
- BTRFS_COMPRESS_NONE, /* compress_type */
- type);
- if (IS_ERR(em))
- goto out;
- }
- ret = btrfs_add_ordered_extent_dio(inode, start, block_start, len,
- block_len, type);
- if (ret) {
- if (em) {
- free_extent_map(em);
- btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
- }
- em = ERR_PTR(ret);
- }
- out:
-
- return em;
-}
-
-static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
- u64 start, u64 len)
-{
- struct btrfs_root *root = inode->root;
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct extent_map *em;
- struct btrfs_key ins;
- u64 alloc_hint;
- int ret;
-
- alloc_hint = get_extent_allocation_hint(inode, start, len);
- ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
- 0, alloc_hint, &ins, 1, 1);
- if (ret)
- return ERR_PTR(ret);
-
- em = btrfs_create_dio_extent(inode, start, ins.offset, start,
- ins.objectid, ins.offset, ins.offset,
- ins.offset, BTRFS_ORDERED_REGULAR);
- btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- if (IS_ERR(em))
- btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
- 1);
-
- return em;
-}
-
static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct btrfs_block_group *block_group;
@@ -7470,8 +7279,6 @@ static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
* @orig_start: (optional) Return the original file offset of the file extent
* @orig_len: (optional) Return the original on-disk length of the file extent
* @ram_bytes: (optional) Return the ram_bytes of the file extent
- * @strict: if true, omit optimizations that might force us into unnecessary
- * cow. e.g., don't trust generation number.
*
* Return:
* >0 and update @len if we can do nocow write
@@ -7481,1010 +7288,183 @@ static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
* NOTE: This only checks the file extents, caller is responsible to wait for
* any ordered extents.
*/
-noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
- u64 *orig_start, u64 *orig_block_len,
- u64 *ram_bytes, bool strict)
+noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len,
+ struct btrfs_file_extent *file_extent,
+ bool nowait)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_path *path;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct can_nocow_file_extent_args nocow_args = { 0 };
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
struct extent_buffer *leaf;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct extent_io_tree *io_tree = &inode->io_tree;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
- u64 disk_bytenr;
- u64 backref_offset;
- u64 extent_end;
- u64 num_bytes;
- int slot;
int found_type;
- bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+ path->nowait = nowait;
- ret = btrfs_lookup_file_extent(NULL, root, path,
- btrfs_ino(BTRFS_I(inode)), offset, 0);
+ ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
+ offset, 0);
if (ret < 0)
- goto out;
+ return ret;
- slot = path->slots[0];
if (ret == 1) {
- if (slot == 0) {
- /* can't find the item, must cow */
- ret = 0;
- goto out;
+ if (path->slots[0] == 0) {
+ /* Can't find the item, must COW. */
+ return 0;
}
- slot--;
+ path->slots[0]--;
}
ret = 0;
leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, slot);
- if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != btrfs_ino(inode) ||
key.type != BTRFS_EXTENT_DATA_KEY) {
- /* not our file or wrong item type, must cow */
- goto out;
+ /* Not our file or wrong item type, must COW. */
+ return 0;
}
if (key.offset > offset) {
- /* Wrong offset, must cow */
- goto out;
- }
-
- fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
- found_type = btrfs_file_extent_type(leaf, fi);
- if (found_type != BTRFS_FILE_EXTENT_REG &&
- found_type != BTRFS_FILE_EXTENT_PREALLOC) {
- /* not a regular extent, must cow */
- goto out;
+ /* Wrong offset, must COW. */
+ return 0;
}
- if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
- goto out;
-
- extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
- if (extent_end <= offset)
- goto out;
-
- disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
- if (disk_bytenr == 0)
- goto out;
+ if (btrfs_file_extent_end(path) <= offset)
+ return 0;
- if (btrfs_file_extent_compression(leaf, fi) ||
- btrfs_file_extent_encryption(leaf, fi) ||
- btrfs_file_extent_other_encoding(leaf, fi))
- goto out;
+ fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+ found_type = btrfs_file_extent_type(leaf, fi);
- /*
- * Do the same check as in btrfs_cross_ref_exist but without the
- * unnecessary search.
- */
- if (!strict &&
- (btrfs_file_extent_generation(leaf, fi) <=
- btrfs_root_last_snapshot(&root->root_item)))
- goto out;
+ nocow_args.start = offset;
+ nocow_args.end = offset + *len - 1;
+ nocow_args.free_path = true;
- backref_offset = btrfs_file_extent_offset(leaf, fi);
+ ret = can_nocow_file_extent(path, &key, inode, &nocow_args);
+ /* can_nocow_file_extent() has freed the path. */
+ path = NULL;
- if (orig_start) {
- *orig_start = key.offset - backref_offset;
- *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
- *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
+ if (ret != 1) {
+ /* Treat errors as not being able to NOCOW. */
+ return 0;
}
- if (btrfs_extent_readonly(fs_info, disk_bytenr))
- goto out;
+ if (btrfs_extent_readonly(fs_info,
+ nocow_args.file_extent.disk_bytenr +
+ nocow_args.file_extent.offset))
+ return 0;
- num_bytes = min(offset + *len, extent_end) - offset;
- if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
+ found_type == BTRFS_FILE_EXTENT_PREALLOC) {
u64 range_end;
- range_end = round_up(offset + num_bytes,
+ range_end = round_up(offset + nocow_args.file_extent.num_bytes,
root->fs_info->sectorsize) - 1;
- ret = test_range_bit(io_tree, offset, range_end,
- EXTENT_DELALLOC, 0, NULL);
- if (ret) {
- ret = -EAGAIN;
- goto out;
- }
- }
-
- btrfs_release_path(path);
-
- /*
- * look for other files referencing this extent, if we
- * find any we must cow
- */
-
- ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
- key.offset - backref_offset, disk_bytenr,
- strict);
- if (ret) {
- ret = 0;
- goto out;
+ ret = btrfs_test_range_bit_exists(io_tree, offset, range_end,
+ EXTENT_DELALLOC);
+ if (ret)
+ return -EAGAIN;
}
- /*
- * adjust disk_bytenr and num_bytes to cover just the bytes
- * in this extent we are about to write. If there
- * are any csums in that range we have to cow in order
- * to keep the csums correct
- */
- disk_bytenr += backref_offset;
- disk_bytenr += offset - key.offset;
- if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
- goto out;
- /*
- * all of the above have passed, it is safe to overwrite this extent
- * without cow
- */
- *len = num_bytes;
- ret = 1;
-out:
- btrfs_free_path(path);
- return ret;
-}
-
-static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
- struct extent_state **cached_state, bool writing)
-{
- struct btrfs_ordered_extent *ordered;
- int ret = 0;
-
- while (1) {
- lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- cached_state);
- /*
- * We're concerned with the entire range that we're going to be
- * doing DIO to, so we need to make sure there's no ordered
- * extents in this range.
- */
- ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
- lockend - lockstart + 1);
-
- /*
- * We need to make sure there are no buffered pages in this
- * range either, we could have raced between the invalidate in
- * generic_file_direct_write and locking the extent. The
- * invalidate needs to happen so that reads after a write do not
- * get stale data.
- */
- if (!ordered &&
- (!writing || !filemap_range_has_page(inode->i_mapping,
- lockstart, lockend)))
- break;
-
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- cached_state);
-
- if (ordered) {
- /*
- * If we are doing a DIO read and the ordered extent we
- * found is for a buffered write, we can not wait for it
- * to complete and retry, because if we do so we can
- * deadlock with concurrent buffered writes on page
- * locks. This happens only if our DIO read covers more
- * than one extent map, if at this point has already
- * created an ordered extent for a previous extent map
- * and locked its range in the inode's io tree, and a
- * concurrent write against that previous extent map's
- * range and this range started (we unlock the ranges
- * in the io tree only when the bios complete and
- * buffered writes always lock pages before attempting
- * to lock range in the io tree).
- */
- if (writing ||
- test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
- btrfs_start_ordered_extent(ordered, 1);
- else
- ret = -ENOTBLK;
- btrfs_put_ordered_extent(ordered);
- } else {
- /*
- * We could trigger writeback for this range (and wait
- * for it to complete) and then invalidate the pages for
- * this range (through invalidate_inode_pages2_range()),
- * but that can lead us to a deadlock with a concurrent
- * call to readahead (a buffered read or a defrag call
- * triggered a readahead) on a page lock due to an
- * ordered dio extent we created before but did not have
- * yet a corresponding bio submitted (whence it can not
- * complete), which makes readahead wait for that
- * ordered extent to complete while holding a lock on
- * that page.
- */
- ret = -ENOTBLK;
- }
-
- if (ret)
- break;
+ if (file_extent)
+ memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent));
- cond_resched();
- }
+ *len = nocow_args.file_extent.num_bytes;
- return ret;
+ return 1;
}
/* The callers of this must take lock_extent() */
-static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
- u64 len, u64 orig_start, u64 block_start,
- u64 block_len, u64 orig_block_len,
- u64 ram_bytes, int compress_type,
- int type)
+struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
+ const struct btrfs_file_extent *file_extent,
+ int type)
{
- struct extent_map_tree *em_tree;
struct extent_map *em;
int ret;
+ /*
+ * Note the missing NOCOW type.
+ *
+ * For pure NOCOW writes, we should not create an io extent map, but
+ * just reusing the existing one.
+ * Only PREALLOC writes (NOCOW write into preallocated range) can
+ * create an io extent map.
+ */
ASSERT(type == BTRFS_ORDERED_PREALLOC ||
type == BTRFS_ORDERED_COMPRESSED ||
- type == BTRFS_ORDERED_NOCOW ||
type == BTRFS_ORDERED_REGULAR);
- em_tree = &inode->extent_tree;
- em = alloc_extent_map();
+ switch (type) {
+ case BTRFS_ORDERED_PREALLOC:
+ /* We're only referring part of a larger preallocated extent. */
+ ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
+ break;
+ case BTRFS_ORDERED_REGULAR:
+ /* COW results a new extent matching our file extent size. */
+ ASSERT(file_extent->disk_num_bytes == file_extent->num_bytes);
+ ASSERT(file_extent->ram_bytes == file_extent->num_bytes);
+
+ /* Since it's a new extent, we should not have any offset. */
+ ASSERT(file_extent->offset == 0);
+ break;
+ case BTRFS_ORDERED_COMPRESSED:
+ /* Must be compressed. */
+ ASSERT(file_extent->compression != BTRFS_COMPRESS_NONE);
+
+ /*
+ * Encoded write can make us to refer to part of the
+ * uncompressed extent.
+ */
+ ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
+ break;
+ }
+
+ em = btrfs_alloc_extent_map();
if (!em)
return ERR_PTR(-ENOMEM);
em->start = start;
- em->orig_start = orig_start;
- em->len = len;
- em->block_len = block_len;
- em->block_start = block_start;
- em->orig_block_len = orig_block_len;
- em->ram_bytes = ram_bytes;
+ em->len = file_extent->num_bytes;
+ em->disk_bytenr = file_extent->disk_bytenr;
+ em->disk_num_bytes = file_extent->disk_num_bytes;
+ em->ram_bytes = file_extent->ram_bytes;
em->generation = -1;
- set_bit(EXTENT_FLAG_PINNED, &em->flags);
- if (type == BTRFS_ORDERED_PREALLOC) {
- set_bit(EXTENT_FLAG_FILLING, &em->flags);
- } else if (type == BTRFS_ORDERED_COMPRESSED) {
- set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
- em->compress_type = compress_type;
- }
-
- do {
- btrfs_drop_extent_cache(inode, em->start,
- em->start + em->len - 1, 0);
- write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 1);
- write_unlock(&em_tree->lock);
- /*
- * The caller has taken lock_extent(), who could race with us
- * to add em?
- */
- } while (ret == -EEXIST);
+ em->offset = file_extent->offset;
+ em->flags |= EXTENT_FLAG_PINNED;
+ if (type == BTRFS_ORDERED_COMPRESSED)
+ btrfs_extent_map_set_compression(em, file_extent->compression);
+ ret = btrfs_replace_extent_map_range(inode, em, true);
if (ret) {
- free_extent_map(em);
+ btrfs_free_extent_map(em);
return ERR_PTR(ret);
}
- /* em got 2 refs now, callers needs to do free_extent_map once. */
+ /* em got 2 refs now, callers needs to do btrfs_free_extent_map once. */
return em;
}
-
-static int btrfs_get_blocks_direct_write(struct extent_map **map,
- struct inode *inode,
- struct btrfs_dio_data *dio_data,
- u64 start, u64 len)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct extent_map *em = *map;
- int ret = 0;
-
- /*
- * We don't allocate a new extent in the following cases
- *
- * 1) The inode is marked as NODATACOW. In this case we'll just use the
- * existing extent.
- * 2) The extent is marked as PREALLOC. We're good to go here and can
- * just use the extent.
- *
- */
- if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
- ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
- em->block_start != EXTENT_MAP_HOLE)) {
- int type;
- u64 block_start, orig_start, orig_block_len, ram_bytes;
-
- if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
- type = BTRFS_ORDERED_PREALLOC;
- else
- type = BTRFS_ORDERED_NOCOW;
- len = min(len, em->len - (start - em->start));
- block_start = em->block_start + (start - em->start);
-
- if (can_nocow_extent(inode, start, &len, &orig_start,
- &orig_block_len, &ram_bytes, false) == 1 &&
- btrfs_inc_nocow_writers(fs_info, block_start)) {
- struct extent_map *em2;
-
- em2 = btrfs_create_dio_extent(BTRFS_I(inode), start, len,
- orig_start, block_start,
- len, orig_block_len,
- ram_bytes, type);
- btrfs_dec_nocow_writers(fs_info, block_start);
- if (type == BTRFS_ORDERED_PREALLOC) {
- free_extent_map(em);
- *map = em = em2;
- }
-
- if (em2 && IS_ERR(em2)) {
- ret = PTR_ERR(em2);
- goto out;
- }
- /*
- * For inode marked NODATACOW or extent marked PREALLOC,
- * use the existing or preallocated extent, so does not
- * need to adjust btrfs_space_info's bytes_may_use.
- */
- btrfs_free_reserved_data_space_noquota(fs_info, len);
- goto skip_cow;
- }
- }
-
- /* this will cow the extent */
- free_extent_map(em);
- *map = em = btrfs_new_extent_direct(BTRFS_I(inode), start, len);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- goto out;
- }
-
- len = min(len, em->len - (start - em->start));
-
-skip_cow:
- /*
- * Need to update the i_size under the extent lock so buffered
- * readers will get the updated i_size when we unlock.
- */
- if (start + len > i_size_read(inode))
- i_size_write(inode, start + len);
-
- dio_data->reserve -= len;
-out:
- return ret;
-}
-
-static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
- loff_t length, unsigned int flags, struct iomap *iomap,
- struct iomap *srcmap)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct extent_map *em;
- struct extent_state *cached_state = NULL;
- struct btrfs_dio_data *dio_data = NULL;
- u64 lockstart, lockend;
- const bool write = !!(flags & IOMAP_WRITE);
- int ret = 0;
- u64 len = length;
- bool unlock_extents = false;
-
- if (!write)
- len = min_t(u64, len, fs_info->sectorsize);
-
- lockstart = start;
- lockend = start + len - 1;
-
- /*
- * The generic stuff only does filemap_write_and_wait_range, which
- * isn't enough if we've written compressed pages to this area, so we
- * need to flush the dirty pages again to make absolutely sure that any
- * outstanding dirty pages are on disk.
- */
- if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags)) {
- ret = filemap_fdatawrite_range(inode->i_mapping, start,
- start + length - 1);
- if (ret)
- return ret;
- }
-
- dio_data = kzalloc(sizeof(*dio_data), GFP_NOFS);
- if (!dio_data)
- return -ENOMEM;
-
- dio_data->length = length;
- if (write) {
- dio_data->reserve = round_up(length, fs_info->sectorsize);
- ret = btrfs_delalloc_reserve_space(BTRFS_I(inode),
- &dio_data->data_reserved,
- start, dio_data->reserve);
- if (ret) {
- extent_changeset_free(dio_data->data_reserved);
- kfree(dio_data);
- return ret;
- }
- }
- iomap->private = dio_data;
-
-
- /*
- * If this errors out it's because we couldn't invalidate pagecache for
- * this range and we need to fallback to buffered.
- */
- if (lock_extent_direct(inode, lockstart, lockend, &cached_state, write)) {
- ret = -ENOTBLK;
- goto err;
- }
-
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- goto unlock_err;
- }
-
- /*
- * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
- * io. INLINE is special, and we could probably kludge it in here, but
- * it's still buffered so for safety lets just fall back to the generic
- * buffered path.
- *
- * For COMPRESSED we _have_ to read the entire extent in so we can
- * decompress it, so there will be buffering required no matter what we
- * do, so go ahead and fallback to buffered.
- *
- * We return -ENOTBLK because that's what makes DIO go ahead and go back
- * to buffered IO. Don't blame me, this is the price we pay for using
- * the generic code.
- */
- if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
- em->block_start == EXTENT_MAP_INLINE) {
- free_extent_map(em);
- ret = -ENOTBLK;
- goto unlock_err;
- }
-
- len = min(len, em->len - (start - em->start));
- if (write) {
- ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
- start, len);
- if (ret < 0)
- goto unlock_err;
- unlock_extents = true;
- /* Recalc len in case the new em is smaller than requested */
- len = min(len, em->len - (start - em->start));
- } else {
- /*
- * We need to unlock only the end area that we aren't using.
- * The rest is going to be unlocked by the endio routine.
- */
- lockstart = start + len;
- if (lockstart < lockend)
- unlock_extents = true;
- }
-
- if (unlock_extents)
- unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- lockstart, lockend, &cached_state);
- else
- free_extent_state(cached_state);
-
- /*
- * Translate extent map information to iomap.
- * We trim the extents (and move the addr) even though iomap code does
- * that, since we have locked only the parts we are performing I/O in.
- */
- if ((em->block_start == EXTENT_MAP_HOLE) ||
- (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) {
- iomap->addr = IOMAP_NULL_ADDR;
- iomap->type = IOMAP_HOLE;
- } else {
- iomap->addr = em->block_start + (start - em->start);
- iomap->type = IOMAP_MAPPED;
- }
- iomap->offset = start;
- iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
- iomap->length = len;
-
- if (write && btrfs_use_zone_append(BTRFS_I(inode), em->block_start))
- iomap->flags |= IOMAP_F_ZONE_APPEND;
-
- free_extent_map(em);
-
- return 0;
-
-unlock_err:
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
-err:
- if (dio_data) {
- btrfs_delalloc_release_space(BTRFS_I(inode),
- dio_data->data_reserved, start,
- dio_data->reserve, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->reserve);
- extent_changeset_free(dio_data->data_reserved);
- kfree(dio_data);
- }
- return ret;
-}
-
-static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
- ssize_t written, unsigned int flags, struct iomap *iomap)
-{
- int ret = 0;
- struct btrfs_dio_data *dio_data = iomap->private;
- size_t submitted = dio_data->submitted;
- const bool write = !!(flags & IOMAP_WRITE);
-
- if (!write && (iomap->type == IOMAP_HOLE)) {
- /* If reading from a hole, unlock and return */
- unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1);
- goto out;
- }
-
- if (submitted < length) {
- pos += submitted;
- length -= submitted;
- if (write)
- __endio_write_update_ordered(BTRFS_I(inode), pos,
- length, false);
- else
- unlock_extent(&BTRFS_I(inode)->io_tree, pos,
- pos + length - 1);
- ret = -ENOTBLK;
- }
-
- if (write) {
- if (dio_data->reserve)
- btrfs_delalloc_release_space(BTRFS_I(inode),
- dio_data->data_reserved, pos,
- dio_data->reserve, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->length);
- extent_changeset_free(dio_data->data_reserved);
- }
-out:
- kfree(dio_data);
- iomap->private = NULL;
-
- return ret;
-}
-
-static void btrfs_dio_private_put(struct btrfs_dio_private *dip)
-{
- /*
- * This implies a barrier so that stores to dio_bio->bi_status before
- * this and loads of dio_bio->bi_status after this are fully ordered.
- */
- if (!refcount_dec_and_test(&dip->refs))
- return;
-
- if (btrfs_op(dip->dio_bio) == BTRFS_MAP_WRITE) {
- __endio_write_update_ordered(BTRFS_I(dip->inode),
- dip->file_offset,
- dip->bytes,
- !dip->dio_bio->bi_status);
- } else {
- unlock_extent(&BTRFS_I(dip->inode)->io_tree,
- dip->file_offset,
- dip->file_offset + dip->bytes - 1);
- }
-
- bio_endio(dip->dio_bio);
- kfree(dip);
-}
-
-static blk_status_t submit_dio_repair_bio(struct inode *inode, struct bio *bio,
- int mirror_num,
- unsigned long bio_flags)
-{
- struct btrfs_dio_private *dip = bio->bi_private;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- blk_status_t ret;
-
- BUG_ON(bio_op(bio) == REQ_OP_WRITE);
-
- ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
- if (ret)
- return ret;
-
- refcount_inc(&dip->refs);
- ret = btrfs_map_bio(fs_info, bio, mirror_num);
- if (ret)
- refcount_dec(&dip->refs);
- return ret;
-}
-
-static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip,
- struct btrfs_bio *bbio,
- const bool uptodate)
-{
- struct inode *inode = dip->inode;
- struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
- const u32 sectorsize = fs_info->sectorsize;
- struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
- struct bio_vec bvec;
- struct bvec_iter iter;
- const u64 orig_file_offset = dip->file_offset;
- u64 start = orig_file_offset;
- u32 bio_offset = 0;
- blk_status_t err = BLK_STS_OK;
-
- __bio_for_each_segment(bvec, &bbio->bio, iter, bbio->iter) {
- unsigned int i, nr_sectors, pgoff;
-
- nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
- pgoff = bvec.bv_offset;
- for (i = 0; i < nr_sectors; i++) {
- ASSERT(pgoff < PAGE_SIZE);
- if (uptodate &&
- (!csum || !check_data_csum(inode, bbio,
- bio_offset, bvec.bv_page,
- pgoff, start))) {
- clean_io_failure(fs_info, failure_tree, io_tree,
- start, bvec.bv_page,
- btrfs_ino(BTRFS_I(inode)),
- pgoff);
- } else {
- int ret;
-
- ASSERT((start - orig_file_offset) < UINT_MAX);
- ret = btrfs_repair_one_sector(inode,
- &bbio->bio,
- start - orig_file_offset,
- bvec.bv_page, pgoff,
- start, bbio->mirror_num,
- submit_dio_repair_bio);
- if (ret)
- err = errno_to_blk_status(ret);
- }
- start += sectorsize;
- ASSERT(bio_offset + sectorsize > bio_offset);
- bio_offset += sectorsize;
- pgoff += sectorsize;
- }
- }
- return err;
-}
-
-static void __endio_write_update_ordered(struct btrfs_inode *inode,
- const u64 offset, const u64 bytes,
- const bool uptodate)
-{
- btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes,
- finish_ordered_fn, uptodate);
-}
-
-static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode,
- struct bio *bio,
- u64 dio_file_offset)
-{
- return btrfs_csum_one_bio(BTRFS_I(inode), bio, dio_file_offset, 1);
-}
-
-static void btrfs_end_dio_bio(struct bio *bio)
-{
- struct btrfs_dio_private *dip = bio->bi_private;
- blk_status_t err = bio->bi_status;
-
- if (err)
- btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
- "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
- btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
- bio->bi_opf, bio->bi_iter.bi_sector,
- bio->bi_iter.bi_size, err);
-
- if (bio_op(bio) == REQ_OP_READ)
- err = btrfs_check_read_dio_bio(dip, btrfs_bio(bio), !err);
-
- if (err)
- dip->dio_bio->bi_status = err;
-
- btrfs_record_physical_zoned(dip->inode, dip->file_offset, bio);
-
- bio_put(bio);
- btrfs_dio_private_put(dip);
-}
-
-static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
- struct inode *inode, u64 file_offset, int async_submit)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_dio_private *dip = bio->bi_private;
- bool write = btrfs_op(bio) == BTRFS_MAP_WRITE;
- blk_status_t ret;
-
- /* Check btrfs_submit_bio_hook() for rules about async submit. */
- if (async_submit)
- async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
-
- if (!write) {
- ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
- if (ret)
- goto err;
- }
-
- if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
- goto map;
-
- if (write && async_submit) {
- ret = btrfs_wq_submit_bio(inode, bio, 0, 0, file_offset,
- btrfs_submit_bio_start_direct_io);
- goto err;
- } else if (write) {
- /*
- * If we aren't doing async submit, calculate the csum of the
- * bio now.
- */
- ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, file_offset, 1);
- if (ret)
- goto err;
- } else {
- u64 csum_offset;
-
- csum_offset = file_offset - dip->file_offset;
- csum_offset >>= fs_info->sectorsize_bits;
- csum_offset *= fs_info->csum_size;
- btrfs_bio(bio)->csum = dip->csums + csum_offset;
- }
-map:
- ret = btrfs_map_bio(fs_info, bio, 0);
-err:
- return ret;
-}
-
/*
- * If this succeeds, the btrfs_dio_private is responsible for cleaning up locked
- * or ordered extents whether or not we submit any bios.
- */
-static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
- struct inode *inode,
- loff_t file_offset)
-{
- const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
- const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
- size_t dip_size;
- struct btrfs_dio_private *dip;
-
- dip_size = sizeof(*dip);
- if (!write && csum) {
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- size_t nblocks;
-
- nblocks = dio_bio->bi_iter.bi_size >> fs_info->sectorsize_bits;
- dip_size += fs_info->csum_size * nblocks;
- }
-
- dip = kzalloc(dip_size, GFP_NOFS);
- if (!dip)
- return NULL;
-
- dip->inode = inode;
- dip->file_offset = file_offset;
- dip->bytes = dio_bio->bi_iter.bi_size;
- dip->disk_bytenr = dio_bio->bi_iter.bi_sector << 9;
- dip->dio_bio = dio_bio;
- refcount_set(&dip->refs, 1);
- return dip;
-}
-
-static void btrfs_submit_direct(const struct iomap_iter *iter,
- struct bio *dio_bio, loff_t file_offset)
-{
- struct inode *inode = iter->inode;
- const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- const bool raid56 = (btrfs_data_alloc_profile(fs_info) &
- BTRFS_BLOCK_GROUP_RAID56_MASK);
- struct btrfs_dio_private *dip;
- struct bio *bio;
- u64 start_sector;
- int async_submit = 0;
- u64 submit_len;
- u64 clone_offset = 0;
- u64 clone_len;
- u64 logical;
- int ret;
- blk_status_t status;
- struct btrfs_io_geometry geom;
- struct btrfs_dio_data *dio_data = iter->iomap.private;
- struct extent_map *em = NULL;
-
- dip = btrfs_create_dio_private(dio_bio, inode, file_offset);
- if (!dip) {
- if (!write) {
- unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
- file_offset + dio_bio->bi_iter.bi_size - 1);
- }
- dio_bio->bi_status = BLK_STS_RESOURCE;
- bio_endio(dio_bio);
- return;
- }
-
- if (!write) {
- /*
- * Load the csums up front to reduce csum tree searches and
- * contention when submitting bios.
- *
- * If we have csums disabled this will do nothing.
- */
- status = btrfs_lookup_bio_sums(inode, dio_bio, dip->csums);
- if (status != BLK_STS_OK)
- goto out_err;
- }
-
- start_sector = dio_bio->bi_iter.bi_sector;
- submit_len = dio_bio->bi_iter.bi_size;
-
- do {
- logical = start_sector << 9;
- em = btrfs_get_chunk_map(fs_info, logical, submit_len);
- if (IS_ERR(em)) {
- status = errno_to_blk_status(PTR_ERR(em));
- em = NULL;
- goto out_err_em;
- }
- ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(dio_bio),
- logical, &geom);
- if (ret) {
- status = errno_to_blk_status(ret);
- goto out_err_em;
- }
-
- clone_len = min(submit_len, geom.len);
- ASSERT(clone_len <= UINT_MAX);
-
- /*
- * This will never fail as it's passing GPF_NOFS and
- * the allocation is backed by btrfs_bioset.
- */
- bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len);
- bio->bi_private = dip;
- bio->bi_end_io = btrfs_end_dio_bio;
-
- if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
- status = extract_ordered_extent(BTRFS_I(inode), bio,
- file_offset);
- if (status) {
- bio_put(bio);
- goto out_err;
- }
- }
-
- ASSERT(submit_len >= clone_len);
- submit_len -= clone_len;
-
- /*
- * Increase the count before we submit the bio so we know
- * the end IO handler won't happen before we increase the
- * count. Otherwise, the dip might get freed before we're
- * done setting it up.
- *
- * We transfer the initial reference to the last bio, so we
- * don't need to increment the reference count for the last one.
- */
- if (submit_len > 0) {
- refcount_inc(&dip->refs);
- /*
- * If we are submitting more than one bio, submit them
- * all asynchronously. The exception is RAID 5 or 6, as
- * asynchronous checksums make it difficult to collect
- * full stripe writes.
- */
- if (!raid56)
- async_submit = 1;
- }
-
- status = btrfs_submit_dio_bio(bio, inode, file_offset,
- async_submit);
- if (status) {
- bio_put(bio);
- if (submit_len > 0)
- refcount_dec(&dip->refs);
- goto out_err_em;
- }
-
- dio_data->submitted += clone_len;
- clone_offset += clone_len;
- start_sector += clone_len >> 9;
- file_offset += clone_len;
-
- free_extent_map(em);
- } while (submit_len > 0);
- return;
-
-out_err_em:
- free_extent_map(em);
-out_err:
- dip->dio_bio->bi_status = status;
- btrfs_dio_private_put(dip);
-}
-
-const struct iomap_ops btrfs_dio_iomap_ops = {
- .iomap_begin = btrfs_dio_iomap_begin,
- .iomap_end = btrfs_dio_iomap_end,
-};
-
-const struct iomap_dio_ops btrfs_dio_ops = {
- .submit_io = btrfs_submit_direct,
-};
-
-static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- u64 start, u64 len)
-{
- int ret;
-
- ret = fiemap_prep(inode, fieinfo, start, &len, 0);
- if (ret)
- return ret;
-
- return extent_fiemap(BTRFS_I(inode), fieinfo, start, len);
-}
-
-int btrfs_readpage(struct file *file, struct page *page)
-{
- struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
- u64 start = page_offset(page);
- u64 end = start + PAGE_SIZE - 1;
- struct btrfs_bio_ctrl bio_ctrl = { 0 };
- int ret;
-
- btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
-
- ret = btrfs_do_readpage(page, NULL, &bio_ctrl, 0, NULL);
- if (bio_ctrl.bio)
- ret = submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags);
- return ret;
-}
-
-static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct inode *inode = page->mapping->host;
- int ret;
-
- if (current->flags & PF_MEMALLOC) {
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
- }
-
- /*
- * If we are under memory pressure we will call this directly from the
- * VM, we need to make sure we have the inode referenced for the ordered
- * extent. If not just return like we didn't do anything.
- */
- if (!igrab(inode)) {
- redirty_page_for_writepage(wbc, page);
- return AOP_WRITEPAGE_ACTIVATE;
- }
- ret = extent_write_full_page(page, wbc);
- btrfs_add_delayed_iput(inode);
- return ret;
-}
-
-static int btrfs_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
-{
- return extent_writepages(mapping, wbc);
-}
-
-static void btrfs_readahead(struct readahead_control *rac)
-{
- extent_readahead(rac);
-}
-
-/*
- * For releasepage() and invalidatepage() we have a race window where
- * end_page_writeback() is called but the subpage spinlock is not yet released.
+ * For release_folio() and invalidate_folio() we have a race window where
+ * folio_end_writeback() is called but the subpage spinlock is not yet released.
* If we continue to release/invalidate the page, we could cause use-after-free
* for subpage spinlock. So this function is to spin and wait for subpage
* spinlock.
*/
-static void wait_subpage_spinlock(struct page *page)
+static void wait_subpage_spinlock(struct folio *folio)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
- struct btrfs_subpage *subpage;
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
+ struct btrfs_folio_state *bfs;
- if (fs_info->sectorsize == PAGE_SIZE)
+ if (!btrfs_is_subpage(fs_info, folio))
return;
- ASSERT(PagePrivate(page) && page->private);
- subpage = (struct btrfs_subpage *)page->private;
+ ASSERT(folio_test_private(folio) && folio_get_private(folio));
+ bfs = folio_get_private(folio);
/*
* This may look insane as we just acquire the spinlock and release it,
@@ -8497,109 +7477,108 @@ static void wait_subpage_spinlock(struct page *page)
* Here we just acquire the spinlock so that all existing callers
* should exit and we're safe to release/invalidate the page.
*/
- spin_lock_irq(&subpage->lock);
- spin_unlock_irq(&subpage->lock);
+ spin_lock_irq(&bfs->lock);
+ spin_unlock_irq(&bfs->lock);
}
-static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
+static int btrfs_launder_folio(struct folio *folio)
{
- int ret = try_release_extent_mapping(page, gfp_flags);
+ return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio),
+ folio_size(folio), NULL);
+}
- if (ret == 1) {
- wait_subpage_spinlock(page);
- clear_page_extent_mapped(page);
+static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
+{
+ if (try_release_extent_mapping(folio, gfp_flags)) {
+ wait_subpage_spinlock(folio);
+ clear_folio_extent_mapped(folio);
+ return true;
}
- return ret;
+ return false;
}
-static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
+static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
- if (PageWriteback(page) || PageDirty(page))
- return 0;
- return __btrfs_releasepage(page, gfp_flags);
+ if (folio_test_writeback(folio) || folio_test_dirty(folio))
+ return false;
+ return __btrfs_release_folio(folio, gfp_flags);
}
#ifdef CONFIG_MIGRATION
-static int btrfs_migratepage(struct address_space *mapping,
- struct page *newpage, struct page *page,
+static int btrfs_migrate_folio(struct address_space *mapping,
+ struct folio *dst, struct folio *src,
enum migrate_mode mode)
{
- int ret;
+ int ret = filemap_migrate_folio(mapping, dst, src, mode);
- ret = migrate_page_move_mapping(mapping, newpage, page, 0);
- if (ret != MIGRATEPAGE_SUCCESS)
+ if (ret)
return ret;
- if (page_has_private(page))
- attach_page_private(newpage, detach_page_private(page));
-
- if (PageOrdered(page)) {
- ClearPageOrdered(page);
- SetPageOrdered(newpage);
+ if (folio_test_ordered(src)) {
+ folio_clear_ordered(src);
+ folio_set_ordered(dst);
}
- if (mode != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
- else
- migrate_page_states(newpage, page);
- return MIGRATEPAGE_SUCCESS;
+ return 0;
}
+#else
+#define btrfs_migrate_folio NULL
#endif
-static void btrfs_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
+ size_t length)
{
- struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
+ struct btrfs_inode *inode = folio_to_inode(folio);
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_io_tree *tree = &inode->io_tree;
struct extent_state *cached_state = NULL;
- u64 page_start = page_offset(page);
- u64 page_end = page_start + PAGE_SIZE - 1;
+ u64 page_start = folio_pos(folio);
+ u64 page_end = page_start + folio_size(folio) - 1;
u64 cur;
- int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
+ int inode_evicting = inode_state_read_once(&inode->vfs_inode) & I_FREEING;
/*
- * We have page locked so no new ordered extent can be created on this
- * page, nor bio can be submitted for this page.
+ * We have folio locked so no new ordered extent can be created on this
+ * page, nor bio can be submitted for this folio.
*
- * But already submitted bio can still be finished on this page.
- * Furthermore, endio function won't skip page which has Ordered
- * (Private2) already cleared, so it's possible for endio and
- * invalidatepage to do the same ordered extent accounting twice
- * on one page.
+ * But already submitted bio can still be finished on this folio.
+ * Furthermore, endio function won't skip folio which has Ordered
+ * already cleared, so it's possible for endio and
+ * invalidate_folio to do the same ordered extent accounting twice
+ * on one folio.
*
* So here we wait for any submitted bios to finish, so that we won't
- * do double ordered extent accounting on the same page.
+ * do double ordered extent accounting on the same folio.
*/
- wait_on_page_writeback(page);
- wait_subpage_spinlock(page);
+ folio_wait_writeback(folio);
+ wait_subpage_spinlock(folio);
/*
* For subpage case, we have call sites like
* btrfs_punch_hole_lock_range() which passes range not aligned to
* sectorsize.
- * If the range doesn't cover the full page, we don't need to and
- * shouldn't clear page extent mapped, as page->private can still
+ * If the range doesn't cover the full folio, we don't need to and
+ * shouldn't clear page extent mapped, as folio->private can still
* record subpage dirty bits for other part of the range.
*
- * For cases that can invalidate the full even the range doesn't
- * cover the full page, like invalidating the last page, we're
+ * For cases that invalidate the full folio even the range doesn't
+ * cover the full folio, like invalidating the last folio, we're
* still safe to wait for ordered extent to finish.
*/
- if (!(offset == 0 && length == PAGE_SIZE)) {
- btrfs_releasepage(page, GFP_NOFS);
+ if (!(offset == 0 && length == folio_size(folio))) {
+ btrfs_release_folio(folio, GFP_NOFS);
return;
}
if (!inode_evicting)
- lock_extent_bits(tree, page_start, page_end, &cached_state);
+ btrfs_lock_extent(tree, page_start, page_end, &cached_state);
cur = page_start;
while (cur < page_end) {
struct btrfs_ordered_extent *ordered;
- bool delete_states;
u64 range_end;
u32 range_len;
+ u32 extra_flags = 0;
ordered = btrfs_lookup_first_ordered_range(inode, cur,
page_end + 1 - cur);
@@ -8609,7 +7588,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
* No ordered extent covering this range, we are safe
* to delete all extent states in the range.
*/
- delete_states = true;
+ extra_flags = EXTENT_CLEAR_ALL_BITS;
goto next;
}
if (ordered->file_offset > cur) {
@@ -8620,7 +7599,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
* the ordered extent in the next iteration.
*/
range_end = ordered->file_offset - 1;
- delete_states = true;
+ extra_flags = EXTENT_CLEAR_ALL_BITS;
goto next;
}
@@ -8628,17 +7607,16 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
page_end);
ASSERT(range_end + 1 - cur < U32_MAX);
range_len = range_end + 1 - cur;
- if (!btrfs_page_test_ordered(fs_info, page, cur, range_len)) {
+ if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
/*
- * If Ordered (Private2) is cleared, it means endio has
+ * If Ordered is cleared, it means endio has
* already been executed for the range.
* We can't delete the extent states as
* btrfs_finish_ordered_io() may still use some of them.
*/
- delete_states = false;
goto next;
}
- btrfs_page_clear_ordered(fs_info, page, cur, range_len);
+ btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
/*
* IO on this page will never be started, so we need to account
@@ -8649,17 +7627,23 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
* btrfs_finish_ordered_io().
*/
if (!inode_evicting)
- clear_extent_bit(tree, cur, range_end,
- EXTENT_DELALLOC |
- EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, 1, 0, &cached_state);
+ btrfs_clear_extent_bit(tree, cur, range_end,
+ EXTENT_DELALLOC |
+ EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG, &cached_state);
- spin_lock_irq(&inode->ordered_tree.lock);
+ spin_lock(&inode->ordered_tree_lock);
set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
ordered->truncated_len = min(ordered->truncated_len,
cur - ordered->file_offset);
- spin_unlock_irq(&inode->ordered_tree.lock);
+ spin_unlock(&inode->ordered_tree_lock);
+ /*
+ * If the ordered extent has finished, we're safe to delete all
+ * the extent states of the range, otherwise
+ * btrfs_finish_ordered_io() will get executed by endio for
+ * other pages, so we can't delete extent states.
+ */
if (btrfs_dec_test_ordered_pending(inode, &ordered,
cur, range_end + 1 - cur)) {
btrfs_finish_ordered_io(ordered);
@@ -8667,14 +7651,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
* The ordered extent has finished, now we're again
* safe to delete all extent states of the range.
*/
- delete_states = true;
- } else {
- /*
- * btrfs_finish_ordered_io() will get executed by endio
- * of other pages, thus we can't delete extent states
- * anymore
- */
- delete_states = false;
+ extra_flags = EXTENT_CLEAR_ALL_BITS;
}
next:
if (ordered)
@@ -8694,208 +7671,49 @@ next:
* reserved data space.
* Since the IO will never happen for this page.
*/
- btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur);
- if (!inode_evicting) {
- clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
- EXTENT_DELALLOC | EXTENT_UPTODATE |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1,
- delete_states, &cached_state);
- }
+ btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
+ if (!inode_evicting)
+ btrfs_clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG | extra_flags,
+ &cached_state);
cur = range_end + 1;
}
/*
* We have iterated through all ordered extents of the page, the page
- * should not have Ordered (Private2) anymore, or the above iteration
+ * should not have Ordered anymore, or the above iteration
* did something wrong.
*/
- ASSERT(!PageOrdered(page));
- btrfs_page_clear_checked(fs_info, page, page_offset(page), PAGE_SIZE);
+ ASSERT(!folio_test_ordered(folio));
+ btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
if (!inode_evicting)
- __btrfs_releasepage(page, GFP_NOFS);
- clear_page_extent_mapped(page);
+ __btrfs_release_folio(folio, GFP_NOFS);
+ clear_folio_extent_mapped(folio);
}
-/*
- * btrfs_page_mkwrite() is not allowed to change the file size as it gets
- * called from a page fault handler when a page is first dirtied. Hence we must
- * be careful to check for EOF conditions here. We set the page up correctly
- * for a written page which means we get ENOSPC checking when writing into
- * holes and correct delalloc and unwritten extent mapping on filesystems that
- * support these features.
- *
- * We are not allowed to take the i_mutex here so we have to play games to
- * protect against truncate races as the page could now be beyond EOF. Because
- * truncate_setsize() writes the inode size before removing pages, once we have
- * the page lock we can determine safely if the page is beyond EOF. If it is not
- * beyond EOF, then the page is guaranteed safe against truncation until we
- * unlock the page.
- */
-vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
+static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
{
- struct page *page = vmf->page;
- struct inode *inode = file_inode(vmf->vma->vm_file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct btrfs_ordered_extent *ordered;
- struct extent_state *cached_state = NULL;
- struct extent_changeset *data_reserved = NULL;
- unsigned long zero_start;
- loff_t size;
- vm_fault_t ret;
- int ret2;
- int reserved = 0;
- u64 reserved_space;
- u64 page_start;
- u64 page_end;
- u64 end;
-
- reserved_space = PAGE_SIZE;
-
- sb_start_pagefault(inode->i_sb);
- page_start = page_offset(page);
- page_end = page_start + PAGE_SIZE - 1;
- end = page_end;
-
- /*
- * Reserving delalloc space after obtaining the page lock can lead to
- * deadlock. For example, if a dirty page is locked by this function
- * and the call to btrfs_delalloc_reserve_space() ends up triggering
- * dirty page write out, then the btrfs_writepage() function could
- * end up waiting indefinitely to get a lock on the page currently
- * being processed by btrfs_page_mkwrite() function.
- */
- ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
- page_start, reserved_space);
- if (!ret2) {
- ret2 = file_update_time(vmf->vma->vm_file);
- reserved = 1;
- }
- if (ret2) {
- ret = vmf_error(ret2);
- if (reserved)
- goto out;
- goto out_noreserve;
- }
-
- ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
-again:
- down_read(&BTRFS_I(inode)->i_mmap_lock);
- lock_page(page);
- size = i_size_read(inode);
-
- if ((page->mapping != inode->i_mapping) ||
- (page_start >= size)) {
- /* page got truncated out from underneath us */
- goto out_unlock;
- }
- wait_on_page_writeback(page);
-
- lock_extent_bits(io_tree, page_start, page_end, &cached_state);
- ret2 = set_page_extent_mapped(page);
- if (ret2 < 0) {
- ret = vmf_error(ret2);
- unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
- goto out_unlock;
- }
-
- /*
- * we can't set the delalloc bits if there are pending ordered
- * extents. Drop our locks and wait for them to finish
- */
- ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
- PAGE_SIZE);
- if (ordered) {
- unlock_extent_cached(io_tree, page_start, page_end,
- &cached_state);
- unlock_page(page);
- up_read(&BTRFS_I(inode)->i_mmap_lock);
- btrfs_start_ordered_extent(ordered, 1);
- btrfs_put_ordered_extent(ordered);
- goto again;
- }
-
- if (page->index == ((size - 1) >> PAGE_SHIFT)) {
- reserved_space = round_up(size - page_start,
- fs_info->sectorsize);
- if (reserved_space < PAGE_SIZE) {
- end = page_start + reserved_space - 1;
- btrfs_delalloc_release_space(BTRFS_I(inode),
- data_reserved, page_start,
- PAGE_SIZE - reserved_space, true);
- }
- }
-
- /*
- * page_mkwrite gets called when the page is firstly dirtied after it's
- * faulted in, but write(2) could also dirty a page and set delalloc
- * bits, thus in this case for space account reason, we still need to
- * clear any delalloc bits within this page range since we have to
- * reserve data&meta space before lock_page() (see above comments).
- */
- clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, 0, 0, &cached_state);
-
- ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
- &cached_state);
- if (ret2) {
- unlock_extent_cached(io_tree, page_start, page_end,
- &cached_state);
- ret = VM_FAULT_SIGBUS;
- goto out_unlock;
- }
-
- /* page is wholly or partially inside EOF */
- if (page_start + PAGE_SIZE > size)
- zero_start = offset_in_page(size);
- else
- zero_start = PAGE_SIZE;
-
- if (zero_start != PAGE_SIZE) {
- memzero_page(page, zero_start, PAGE_SIZE - zero_start);
- flush_dcache_page(page);
- }
- btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
- btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start);
- btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start);
-
- btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
-
- unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
- up_read(&BTRFS_I(inode)->i_mmap_lock);
-
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
- sb_end_pagefault(inode->i_sb);
- extent_changeset_free(data_reserved);
- return VM_FAULT_LOCKED;
-
-out_unlock:
- unlock_page(page);
- up_read(&BTRFS_I(inode)->i_mmap_lock);
-out:
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
- btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
- reserved_space, (ret != 0));
-out_noreserve:
- sb_end_pagefault(inode->i_sb);
- extent_changeset_free(data_reserved);
- return ret;
-}
-
-static int btrfs_truncate(struct inode *inode, bool skip_writeback)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_block_rsv *rsv;
+ struct btrfs_truncate_control control = {
+ .inode = inode,
+ .ino = btrfs_ino(inode),
+ .min_type = BTRFS_EXTENT_DATA_KEY,
+ .clear_extent_range = true,
+ .new_size = inode->vfs_inode.i_size,
+ };
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_block_rsv rsv;
int ret;
struct btrfs_trans_handle *trans;
- u64 mask = fs_info->sectorsize - 1;
- u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
- u64 extents_found = 0;
+ const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
+ const u64 lock_start = round_down(inode->vfs_inode.i_size, fs_info->sectorsize);
+ const u64 i_size_up = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
+
+ /* Our inode is locked and the i_size can't be changed concurrently. */
+ btrfs_assert_inode_locked(inode);
if (!skip_writeback) {
- ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
- (u64)-1);
+ ret = btrfs_wait_ordered_range(inode, lock_start, (u64)-1);
if (ret)
return ret;
}
@@ -8928,11 +7746,9 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
* 2) fs_info->trans_block_rsv - this will have 1 items worth left for
* updating the inode.
*/
- rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
- if (!rsv)
- return -ENOMEM;
- rsv->size = min_size;
- rsv->failfast = 1;
+ btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
+ rsv.size = min_size;
+ rsv.failfast = true;
/*
* 1 for the truncate slack space
@@ -8945,22 +7761,43 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
}
/* Migrate the slack space for the truncate to our reserve */
- ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
+ ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, &rsv,
min_size, false);
- BUG_ON(ret);
+ /*
+ * We have reserved 2 metadata units when we started the transaction and
+ * min_size matches 1 unit, so this should never fail, but if it does,
+ * it's not critical we just fail truncation.
+ */
+ if (WARN_ON(ret)) {
+ btrfs_end_transaction(trans);
+ goto out;
+ }
- trans->block_rsv = rsv;
+ trans->block_rsv = &rsv;
while (1) {
- ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
- inode->i_size,
- BTRFS_EXTENT_DATA_KEY,
- &extents_found);
+ struct extent_state *cached_state = NULL;
+
+ btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
+ /*
+ * We want to drop from the next block forward in case this new
+ * size is not block aligned since we will be keeping the last
+ * block of the extent just the way it is.
+ */
+ btrfs_drop_extent_map_range(inode, i_size_up, (u64)-1, false);
+
+ ret = btrfs_truncate_inode_items(trans, root, &control);
+
+ inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
+ btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
+
+ btrfs_unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
+
trans->block_rsv = &fs_info->trans_block_rsv;
if (ret != -ENOSPC && ret != -EAGAIN)
break;
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, inode);
if (ret)
break;
@@ -8974,24 +7811,32 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
break;
}
- btrfs_block_rsv_release(fs_info, rsv, -1, NULL);
+ btrfs_block_rsv_release(fs_info, &rsv, -1, NULL);
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
- rsv, min_size, false);
- BUG_ON(ret); /* shouldn't happen */
- trans->block_rsv = rsv;
+ &rsv, min_size, false);
+ /*
+ * We have reserved 2 metadata units when we started the
+ * transaction and min_size matches 1 unit, so this should never
+ * fail, but if it does, it's not critical we just fail truncation.
+ */
+ if (WARN_ON(ret))
+ break;
+
+ trans->block_rsv = &rsv;
}
/*
* We can't call btrfs_truncate_block inside a trans handle as we could
- * deadlock with freeze, if we got NEED_TRUNCATE_BLOCK then we know
- * we've truncated everything except the last little bit, and can do
- * btrfs_truncate_block and then update the disk_i_size.
+ * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
+ * know we've truncated everything except the last little bit, and can
+ * do btrfs_truncate_block and then update the disk_i_size.
*/
- if (ret == NEED_TRUNCATE_BLOCK) {
+ if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
- ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
+ ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size,
+ inode->vfs_inode.i_size, (u64)-1);
if (ret)
goto out;
trans = btrfs_start_transaction(root, 1);
@@ -8999,14 +7844,14 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
ret = PTR_ERR(trans);
goto out;
}
- btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
+ btrfs_inode_safe_disk_i_size_write(inode, 0);
}
if (trans) {
int ret2;
trans->block_rsv = &fs_info->trans_block_rsv;
- ret2 = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret2 = btrfs_update_inode(trans, inode);
if (ret2 && !ret)
ret = ret2;
@@ -9016,7 +7861,7 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
btrfs_btree_balance_dirty(fs_info);
}
out:
- btrfs_free_block_rsv(fs_info, rsv);
+ btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
/*
* So if we truncate and then write and fsync we normally would just
* write the extents that changed, which is a problem if we need to
@@ -9031,52 +7876,29 @@ out:
* between the old i_size and the new i_size, and there were no prealloc
* extents beyond i_size to drop.
*/
- if (extents_found > 0)
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
+ if (control.extents_found > 0)
+ btrfs_set_inode_full_sync(inode);
return ret;
}
-/*
- * create a new subvolume directory/inode (helper for the ioctl).
- */
-int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *new_root,
- struct btrfs_root *parent_root,
- struct user_namespace *mnt_userns)
+struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
+ struct inode *dir)
{
struct inode *inode;
- int err;
- u64 index = 0;
- u64 ino;
-
- err = btrfs_get_free_objectid(new_root, &ino);
- if (err < 0)
- return err;
-
- inode = btrfs_new_inode(trans, new_root, mnt_userns, NULL, "..", 2,
- ino, ino,
- S_IFDIR | (~current_umask() & S_IRWXUGO),
- &index);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
- inode->i_op = &btrfs_dir_inode_operations;
- inode->i_fop = &btrfs_dir_file_operations;
-
- set_nlink(inode, 1);
- btrfs_i_size_write(BTRFS_I(inode), 0);
- unlock_new_inode(inode);
- err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
- if (err)
- btrfs_err(new_root->fs_info,
- "error inheriting subvolume %llu properties: %d",
- new_root->root_key.objectid, err);
-
- err = btrfs_update_inode(trans, new_root, BTRFS_I(inode));
-
- iput(inode);
- return err;
+ inode = new_inode(dir->i_sb);
+ if (inode) {
+ /*
+ * Subvolumes don't inherit the sgid bit or the parent's gid if
+ * the parent's sgid bit is set. This is probably a bug.
+ */
+ inode_init_owner(idmap, inode, NULL,
+ S_IFDIR | (~current_umask() & S_IRWXUGO));
+ inode->i_op = &btrfs_dir_inode_operations;
+ inode->i_fop = &btrfs_dir_file_operations;
+ }
+ return inode;
}
struct inode *btrfs_alloc_inode(struct super_block *sb)
@@ -9085,7 +7907,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
struct btrfs_inode *ei;
struct inode *inode;
- ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_KERNEL);
+ ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
@@ -9095,13 +7917,18 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->last_sub_trans = 0;
ei->logged_trans = 0;
ei->delalloc_bytes = 0;
+ /* new_delalloc_bytes and last_dir_index_offset are in a union. */
ei->new_delalloc_bytes = 0;
ei->defrag_bytes = 0;
ei->disk_i_size = 0;
ei->flags = 0;
ei->ro_flags = 0;
+ /*
+ * ->index_cnt will be properly initialized later when creating a new
+ * inode (btrfs_create_new_inode()) or when reading an existing inode
+ * from disk (btrfs_read_locked_inode()).
+ */
ei->csum_bytes = 0;
- ei->index_cnt = (u64)-1;
ei->dir_index = 0;
ei->last_unlink_trans = 0;
ei->last_reflink_trans = 0;
@@ -9118,24 +7945,24 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->delayed_node = NULL;
- ei->i_otime.tv_sec = 0;
- ei->i_otime.tv_nsec = 0;
+ ei->i_otime_sec = 0;
+ ei->i_otime_nsec = 0;
inode = &ei->vfs_inode;
- extent_map_tree_init(&ei->extent_tree);
- extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode);
- extent_io_tree_init(fs_info, &ei->io_failure_tree,
- IO_TREE_INODE_IO_FAILURE, inode);
- extent_io_tree_init(fs_info, &ei->file_extent_tree,
- IO_TREE_INODE_FILE_EXTENT, inode);
- ei->io_tree.track_uptodate = true;
- ei->io_failure_tree.track_uptodate = true;
- atomic_set(&ei->sync_writers, 0);
+ btrfs_extent_map_tree_init(&ei->extent_tree);
+
+ /* This io tree sets the valid inode. */
+ btrfs_extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
+ ei->io_tree.inode = ei;
+
+ ei->file_extent_tree = NULL;
+
mutex_init(&ei->log_mutex);
- btrfs_ordered_inode_tree_init(&ei->ordered_tree);
+ spin_lock_init(&ei->ordered_tree_lock);
+ ei->ordered_tree = RB_ROOT;
+ ei->ordered_tree_last = NULL;
INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->delayed_iput);
- RB_CLEAR_NODE(&ei->rb_node);
init_rwsem(&ei->i_mmap_lock);
return inode;
@@ -9144,13 +7971,15 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
void btrfs_test_destroy_inode(struct inode *inode)
{
- btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
+ btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
+ kfree(BTRFS_I(inode)->file_extent_tree);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
#endif
void btrfs_free_inode(struct inode *inode)
{
+ kfree(BTRFS_I(inode)->file_extent_tree);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
@@ -9159,6 +7988,7 @@ void btrfs_destroy_inode(struct inode *vfs_inode)
struct btrfs_ordered_extent *ordered;
struct btrfs_inode *inode = BTRFS_I(vfs_inode);
struct btrfs_root *root = inode->root;
+ bool freespace_inode;
WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
WARN_ON(vfs_inode->i_data.nrpages);
@@ -9168,9 +7998,10 @@ void btrfs_destroy_inode(struct inode *vfs_inode)
if (!S_ISDIR(vfs_inode->i_mode)) {
WARN_ON(inode->delalloc_bytes);
WARN_ON(inode->new_delalloc_bytes);
+ WARN_ON(inode->csum_bytes);
}
- WARN_ON(inode->csum_bytes);
- WARN_ON(inode->defrag_bytes);
+ if (!root || !btrfs_is_data_reloc_root(root))
+ WARN_ON(inode->defrag_bytes);
/*
* This can happen where we create an inode, but somebody else also
@@ -9180,6 +8011,12 @@ void btrfs_destroy_inode(struct inode *vfs_inode)
if (!root)
return;
+ /*
+ * If this is a free space inode do not take the ordered extents lockdep
+ * map.
+ */
+ freespace_inode = btrfs_is_free_space_inode(inode);
+
while (1) {
ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
if (!ordered)
@@ -9188,14 +8025,18 @@ void btrfs_destroy_inode(struct inode *vfs_inode)
btrfs_err(root->fs_info,
"found ordered extent %llu %llu on inode cleanup",
ordered->file_offset, ordered->num_bytes);
+
+ if (!freespace_inode)
+ btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
+
btrfs_remove_ordered_extent(inode, ordered);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
}
btrfs_qgroup_check_reserved_leak(inode);
- inode_tree_del(inode);
- btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
+ btrfs_del_inode_from_root(inode);
+ btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
btrfs_put_root(inode->root);
}
@@ -9211,14 +8052,17 @@ int btrfs_drop_inode(struct inode *inode)
if (btrfs_root_refs(&root->root_item) == 0)
return 1;
else
- return generic_drop_inode(inode);
+ return inode_generic_drop(inode);
}
static void init_once(void *foo)
{
- struct btrfs_inode *ei = (struct btrfs_inode *) foo;
+ struct btrfs_inode *ei = foo;
inode_init_once(&ei->vfs_inode);
+#ifdef CONFIG_FS_VERITY
+ ei->i_verity_info = NULL;
+#endif
}
void __cold btrfs_destroy_cachep(void)
@@ -9229,65 +8073,34 @@ void __cold btrfs_destroy_cachep(void)
*/
rcu_barrier();
kmem_cache_destroy(btrfs_inode_cachep);
- kmem_cache_destroy(btrfs_trans_handle_cachep);
- kmem_cache_destroy(btrfs_path_cachep);
- kmem_cache_destroy(btrfs_free_space_cachep);
- kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
}
int __init btrfs_init_cachep(void)
{
btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
sizeof(struct btrfs_inode), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
+ SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
init_once);
if (!btrfs_inode_cachep)
- goto fail;
-
- btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
- sizeof(struct btrfs_trans_handle), 0,
- SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
- if (!btrfs_trans_handle_cachep)
- goto fail;
-
- btrfs_path_cachep = kmem_cache_create("btrfs_path",
- sizeof(struct btrfs_path), 0,
- SLAB_MEM_SPREAD, NULL);
- if (!btrfs_path_cachep)
- goto fail;
-
- btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
- sizeof(struct btrfs_free_space), 0,
- SLAB_MEM_SPREAD, NULL);
- if (!btrfs_free_space_cachep)
- goto fail;
-
- btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
- PAGE_SIZE, PAGE_SIZE,
- SLAB_MEM_SPREAD, NULL);
- if (!btrfs_free_space_bitmap_cachep)
- goto fail;
+ return -ENOMEM;
return 0;
-fail:
- btrfs_destroy_cachep();
- return -ENOMEM;
}
-static int btrfs_getattr(struct user_namespace *mnt_userns,
+static int btrfs_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
u64 delalloc_bytes;
u64 inode_bytes;
struct inode *inode = d_inode(path->dentry);
- u32 blocksize = inode->i_sb->s_blocksize;
+ u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize;
u32 bi_flags = BTRFS_I(inode)->flags;
u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
stat->result_mask |= STATX_BTIME;
- stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
- stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
+ stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
+ stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
if (bi_flags & BTRFS_INODE_APPEND)
stat->attributes |= STATX_ATTR_APPEND;
if (bi_flags & BTRFS_INODE_COMPRESS)
@@ -9304,15 +8117,18 @@ static int btrfs_getattr(struct user_namespace *mnt_userns,
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
- generic_fillattr(mnt_userns, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
+ stat->subvol = btrfs_root_id(BTRFS_I(inode)->root);
+ stat->result_mask |= STATX_SUBVOL;
+
spin_lock(&BTRFS_I(inode)->lock);
delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
inode_bytes = inode_get_bytes(inode);
spin_unlock(&BTRFS_I(inode)->lock);
stat->blocks = (ALIGN(inode_bytes, blocksize) +
- ALIGN(delalloc_bytes, blocksize)) >> 9;
+ ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT;
return 0;
}
@@ -9321,22 +8137,25 @@ static int btrfs_rename_exchange(struct inode *old_dir,
struct inode *new_dir,
struct dentry *new_dentry)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
struct btrfs_trans_handle *trans;
+ unsigned int trans_num_items;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
struct inode *new_inode = new_dentry->d_inode;
struct inode *old_inode = old_dentry->d_inode;
- struct timespec64 ctime = current_time(old_inode);
+ struct btrfs_rename_ctx old_rename_ctx;
+ struct btrfs_rename_ctx new_rename_ctx;
u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
u64 old_idx = 0;
u64 new_idx = 0;
int ret;
int ret2;
- bool root_log_pinned = false;
- bool dest_log_pinned = false;
bool need_abort = false;
+ bool logs_pinned = false;
+ struct fscrypt_name old_fname, new_fname;
+ struct fscrypt_str *old_name, *new_name;
/*
* For non-subvolumes allow exchange only within one subvolume, in the
@@ -9348,20 +8167,56 @@ static int btrfs_rename_exchange(struct inode *old_dir,
new_ino != BTRFS_FIRST_FREE_OBJECTID))
return -EXDEV;
+ ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
+ if (ret)
+ return ret;
+
+ ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
+ if (ret) {
+ fscrypt_free_filename(&old_fname);
+ return ret;
+ }
+
+ old_name = &old_fname.disk_name;
+ new_name = &new_fname.disk_name;
+
/* close the race window with snapshot create/destroy ioctl */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
new_ino == BTRFS_FIRST_FREE_OBJECTID)
down_read(&fs_info->subvol_sem);
/*
- * We want to reserve the absolute worst case amount of items. So if
- * both inodes are subvols and we need to unlink them then that would
- * require 4 item modifications, but if they are both normal inodes it
- * would require 5 item modifications, so we'll assume their normal
- * inodes. So 5 * 2 is 10, plus 2 for the new links, so 12 total items
- * should cover the worst case number of items we'll modify.
+ * For each inode:
+ * 1 to remove old dir item
+ * 1 to remove old dir index
+ * 1 to add new dir item
+ * 1 to add new dir index
+ * 1 to update parent inode
+ *
+ * If the parents are the same, we only need to account for one
*/
- trans = btrfs_start_transaction(root, 12);
+ trans_num_items = (old_dir == new_dir ? 9 : 10);
+ if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
+ /*
+ * 1 to remove old root ref
+ * 1 to remove old root backref
+ * 1 to add new root ref
+ * 1 to add new root backref
+ */
+ trans_num_items += 4;
+ } else {
+ /*
+ * 1 to update inode item
+ * 1 to remove old inode ref
+ * 1 to add new inode ref
+ */
+ trans_num_items += 3;
+ }
+ if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
+ trans_num_items += 4;
+ else
+ trans_num_items += 3;
+ trans = btrfs_start_transaction(root, trans_num_items);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_notrans;
@@ -9392,10 +8247,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* force full log commit if subvolume involved. */
btrfs_set_log_full_commit(trans);
} else {
- ret = btrfs_insert_inode_ref(trans, dest,
- new_dentry->d_name.name,
- new_dentry->d_name.len,
- old_ino,
+ ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
btrfs_ino(BTRFS_I(new_dir)),
old_idx);
if (ret)
@@ -9408,14 +8260,11 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* force full log commit if subvolume involved. */
btrfs_set_log_full_commit(trans);
} else {
- ret = btrfs_insert_inode_ref(trans, root,
- old_dentry->d_name.name,
- old_dentry->d_name.len,
- new_ino,
+ ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
btrfs_ino(BTRFS_I(old_dir)),
new_idx);
if (ret) {
- if (need_abort)
+ if (unlikely(need_abort))
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9426,85 +8275,94 @@ static int btrfs_rename_exchange(struct inode *old_dir,
inode_inc_iversion(new_dir);
inode_inc_iversion(old_inode);
inode_inc_iversion(new_inode);
- old_dir->i_ctime = old_dir->i_mtime = ctime;
- new_dir->i_ctime = new_dir->i_mtime = ctime;
- old_inode->i_ctime = ctime;
- new_inode->i_ctime = ctime;
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
+
+ if (old_ino != BTRFS_FIRST_FREE_OBJECTID &&
+ new_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ /*
+ * If we are renaming in the same directory (and it's not for
+ * root entries) pin the log early to prevent any concurrent
+ * task from logging the directory after we removed the old
+ * entries and before we add the new entries, otherwise that
+ * task can sync a log without any entry for the inodes we are
+ * renaming and therefore replaying that log, if a power failure
+ * happens after syncing the log, would result in deleting the
+ * inodes.
+ *
+ * If the rename affects two different directories, we want to
+ * make sure the that there's no log commit that contains
+ * updates for only one of the directories but not for the
+ * other.
+ *
+ * If we are renaming an entry for a root, we don't care about
+ * log updates since we called btrfs_set_log_full_commit().
+ */
+ btrfs_pin_log_trans(root);
+ btrfs_pin_log_trans(dest);
+ logs_pinned = true;
+ }
if (old_dentry->d_parent != new_dentry->d_parent) {
btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
- BTRFS_I(old_inode), 1);
+ BTRFS_I(old_inode), true);
btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
- BTRFS_I(new_inode), 1);
- }
-
- /*
- * Now pin the logs of the roots. We do it to ensure that no other task
- * can sync the logs while we are in progress with the rename, because
- * that could result in an inconsistency in case any of the inodes that
- * are part of this rename operation were logged before.
- *
- * We pin the logs even if at this precise moment none of the inodes was
- * logged before. This is because right after we checked for that, some
- * other task fsyncing some other inode not involved with this rename
- * operation could log that one of our inodes exists.
- *
- * We don't need to pin the logs before the above calls to
- * btrfs_insert_inode_ref(), since those don't ever need to change a log.
- */
- if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
- btrfs_pin_log_trans(root);
- root_log_pinned = true;
- }
- if (new_ino != BTRFS_FIRST_FREE_OBJECTID) {
- btrfs_pin_log_trans(dest);
- dest_log_pinned = true;
+ BTRFS_I(new_inode), true);
}
/* src is a subvolume */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
- ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
+ ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
} else { /* src is an inode */
ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
BTRFS_I(old_dentry->d_inode),
- old_dentry->d_name.name,
- old_dentry->d_name.len);
- if (!ret)
- ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
- }
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_fail;
+ old_name, &old_rename_ctx);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
+ ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
/* dest is a subvolume */
if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
- ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
+ ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
} else { /* dest is an inode */
ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
BTRFS_I(new_dentry->d_inode),
- new_dentry->d_name.name,
- new_dentry->d_name.len);
- if (!ret)
- ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode));
- }
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_fail;
+ new_name, &new_rename_ctx);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
+ ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
- new_dentry->d_name.name,
- new_dentry->d_name.len, 0, old_idx);
- if (ret) {
+ new_name, 0, old_idx);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
- old_dentry->d_name.name,
- old_dentry->d_name.len, 0, new_idx);
- if (ret) {
+ old_name, 0, new_idx);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9514,45 +8372,23 @@ static int btrfs_rename_exchange(struct inode *old_dir,
if (new_inode->i_nlink == 1)
BTRFS_I(new_inode)->dir_index = new_idx;
- if (root_log_pinned) {
- btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
- new_dentry->d_parent);
- btrfs_end_log_trans(root);
- root_log_pinned = false;
- }
- if (dest_log_pinned) {
- btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir),
- old_dentry->d_parent);
- btrfs_end_log_trans(dest);
- dest_log_pinned = false;
- }
-out_fail:
/*
- * If we have pinned a log and an error happened, we unpin tasks
- * trying to sync the log and force them to fallback to a transaction
- * commit if the log currently contains any of the inodes involved in
- * this rename operation (to ensure we do not persist a log with an
- * inconsistent state for any of these inodes or leading to any
- * inconsistencies when replayed). If the transaction was aborted, the
- * abortion reason is propagated to userspace when attempting to commit
- * the transaction. If the log does not contain any of these inodes, we
- * allow the tasks to sync it.
+ * Do the log updates for all inodes.
+ *
+ * If either entry is for a root we don't need to update the logs since
+ * we've called btrfs_set_log_full_commit() before.
*/
- if (ret && (root_log_pinned || dest_log_pinned)) {
- if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
- btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
- btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
- btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))
- btrfs_set_log_full_commit(trans);
+ if (logs_pinned) {
+ btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
+ old_rename_ctx.index, new_dentry->d_parent);
+ btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
+ new_rename_ctx.index, old_dentry->d_parent);
+ }
- if (root_log_pinned) {
- btrfs_end_log_trans(root);
- root_log_pinned = false;
- }
- if (dest_log_pinned) {
- btrfs_end_log_trans(dest);
- dest_log_pinned = false;
- }
+out_fail:
+ if (logs_pinned) {
+ btrfs_end_log_trans(root);
+ btrfs_end_log_trans(dest);
}
ret2 = btrfs_end_transaction(trans);
ret = ret ? ret : ret2;
@@ -9561,78 +8397,49 @@ out_notrans:
old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem);
+ fscrypt_free_filename(&new_fname);
+ fscrypt_free_filename(&old_fname);
return ret;
}
-static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct user_namespace *mnt_userns,
- struct inode *dir,
- struct dentry *dentry)
+static struct inode *new_whiteout_inode(struct mnt_idmap *idmap,
+ struct inode *dir)
{
- int ret;
struct inode *inode;
- u64 objectid;
- u64 index;
- ret = btrfs_get_free_objectid(root, &objectid);
- if (ret)
- return ret;
-
- inode = btrfs_new_inode(trans, root, mnt_userns, dir,
- dentry->d_name.name,
- dentry->d_name.len,
- btrfs_ino(BTRFS_I(dir)),
- objectid,
- S_IFCHR | WHITEOUT_MODE,
- &index);
-
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
- return ret;
+ inode = new_inode(dir->i_sb);
+ if (inode) {
+ inode_init_owner(idmap, inode, dir,
+ S_IFCHR | WHITEOUT_MODE);
+ inode->i_op = &btrfs_special_inode_operations;
+ init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
}
-
- inode->i_op = &btrfs_special_inode_operations;
- init_special_inode(inode, inode->i_mode,
- WHITEOUT_DEV);
-
- ret = btrfs_init_inode_security(trans, inode, dir,
- &dentry->d_name);
- if (ret)
- goto out;
-
- ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
- BTRFS_I(inode), 0, index);
- if (ret)
- goto out;
-
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
-out:
- unlock_new_inode(inode);
- if (ret)
- inode_dec_link_count(inode);
- iput(inode);
-
- return ret;
+ return inode;
}
-static int btrfs_rename(struct user_namespace *mnt_userns,
+static int btrfs_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
+ struct btrfs_new_inode_args whiteout_args = {
+ .dir = old_dir,
+ .dentry = old_dentry,
+ };
struct btrfs_trans_handle *trans;
unsigned int trans_num_items;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
struct inode *new_inode = d_inode(new_dentry);
struct inode *old_inode = d_inode(old_dentry);
+ struct btrfs_rename_ctx rename_ctx;
u64 index = 0;
int ret;
int ret2;
u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
- bool log_pinned = false;
+ struct fscrypt_name old_fname, new_fname;
+ bool logs_pinned = false;
if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return -EPERM;
@@ -9649,22 +8456,28 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
+ ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
+ if (ret)
+ return ret;
- /* check for collisions, even if the name isn't there */
- ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
- new_dentry->d_name.name,
- new_dentry->d_name.len);
+ ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
+ if (ret) {
+ fscrypt_free_filename(&old_fname);
+ return ret;
+ }
+ /* check for collisions, even if the name isn't there */
+ ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
if (ret) {
if (ret == -EEXIST) {
/* we shouldn't get
* eexist without a new_inode */
if (WARN_ON(!new_inode)) {
- return ret;
+ goto out_fscrypt_names;
}
} else {
/* maybe -EOVERFLOW */
- return ret;
+ goto out_fscrypt_names;
}
}
ret = 0;
@@ -9676,23 +8489,58 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
filemap_flush(old_inode->i_mapping);
- /* close the racy window with snapshot create/destroy ioctl */
- if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
+ if (flags & RENAME_WHITEOUT) {
+ whiteout_args.inode = new_whiteout_inode(idmap, old_dir);
+ if (!whiteout_args.inode) {
+ ret = -ENOMEM;
+ goto out_fscrypt_names;
+ }
+ ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
+ if (ret)
+ goto out_whiteout_inode;
+ } else {
+ /* 1 to update the old parent inode. */
+ trans_num_items = 1;
+ }
+
+ if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
+ /* Close the race window with snapshot create/destroy ioctl */
down_read(&fs_info->subvol_sem);
+ /*
+ * 1 to remove old root ref
+ * 1 to remove old root backref
+ * 1 to add new root ref
+ * 1 to add new root backref
+ */
+ trans_num_items += 4;
+ } else {
+ /*
+ * 1 to update inode
+ * 1 to remove old inode ref
+ * 1 to add new inode ref
+ */
+ trans_num_items += 3;
+ }
/*
- * We want to reserve the absolute worst case amount of items. So if
- * both inodes are subvols and we need to unlink them then that would
- * require 4 item modifications, but if they are both normal inodes it
- * would require 5 item modifications, so we'll assume they are normal
- * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
- * should cover the worst case number of items we'll modify.
- * If our rename has the whiteout flag, we need more 5 units for the
- * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item
- * when selinux is enabled).
- */
- trans_num_items = 11;
- if (flags & RENAME_WHITEOUT)
+ * 1 to remove old dir item
+ * 1 to remove old dir index
+ * 1 to add new dir item
+ * 1 to add new dir index
+ */
+ trans_num_items += 4;
+ /* 1 to update new parent inode if it's not the same as the old parent */
+ if (new_dir != old_dir)
+ trans_num_items++;
+ if (new_inode) {
+ /*
+ * 1 to update inode
+ * 1 to remove inode ref
+ * 1 to remove dir item
+ * 1 to remove dir index
+ * 1 to possibly add orphan item
+ */
trans_num_items += 5;
+ }
trans = btrfs_start_transaction(root, trans_num_items);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
@@ -9714,11 +8562,9 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
/* force full log commit if subvolume involved. */
btrfs_set_log_full_commit(trans);
} else {
- ret = btrfs_insert_inode_ref(trans, dest,
- new_dentry->d_name.name,
- new_dentry->d_name.len,
- old_ino,
- btrfs_ino(BTRFS_I(new_dir)), index);
+ ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
+ old_ino, btrfs_ino(BTRFS_I(new_dir)),
+ index);
if (ret)
goto out_fail;
}
@@ -9726,74 +8572,88 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
inode_inc_iversion(old_dir);
inode_inc_iversion(new_dir);
inode_inc_iversion(old_inode);
- old_dir->i_ctime = old_dir->i_mtime =
- new_dir->i_ctime = new_dir->i_mtime =
- old_inode->i_ctime = current_time(old_dir);
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
- if (old_dentry->d_parent != new_dentry->d_parent)
- btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
- BTRFS_I(old_inode), 1);
-
- if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
- ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
- } else {
+ if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
/*
- * Now pin the log. We do it to ensure that no other task can
- * sync the log while we are in progress with the rename, as
- * that could result in an inconsistency in case any of the
- * inodes that are part of this rename operation were logged
- * before.
+ * If we are renaming in the same directory (and it's not a
+ * root entry) pin the log to prevent any concurrent task from
+ * logging the directory after we removed the old entry and
+ * before we add the new entry, otherwise that task can sync
+ * a log without any entry for the inode we are renaming and
+ * therefore replaying that log, if a power failure happens
+ * after syncing the log, would result in deleting the inode.
*
- * We pin the log even if at this precise moment none of the
- * inodes was logged before. This is because right after we
- * checked for that, some other task fsyncing some other inode
- * not involved with this rename operation could log that one of
- * our inodes exists.
+ * If the rename affects two different directories, we want to
+ * make sure the that there's no log commit that contains
+ * updates for only one of the directories but not for the
+ * other.
*
- * We don't need to pin the logs before the above call to
- * btrfs_insert_inode_ref(), since that does not need to change
- * a log.
+ * If we are renaming an entry for a root, we don't care about
+ * log updates since we called btrfs_set_log_full_commit().
*/
btrfs_pin_log_trans(root);
- log_pinned = true;
- ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
- BTRFS_I(d_inode(old_dentry)),
- old_dentry->d_name.name,
- old_dentry->d_name.len);
- if (!ret)
- ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
+ btrfs_pin_log_trans(dest);
+ logs_pinned = true;
}
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_fail;
+
+ if (old_dentry->d_parent != new_dentry->d_parent)
+ btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
+ BTRFS_I(old_inode), true);
+
+ if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
+ } else {
+ ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
+ BTRFS_I(d_inode(old_dentry)),
+ &old_fname.disk_name, &rename_ctx);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
+ ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
if (new_inode) {
inode_inc_iversion(new_inode);
- new_inode->i_ctime = current_time(new_inode);
if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
- ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
+ ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
BUG_ON(new_inode->i_nlink == 0);
} else {
ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
BTRFS_I(d_inode(new_dentry)),
- new_dentry->d_name.name,
- new_dentry->d_name.len);
+ &new_fname.disk_name);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
- if (!ret && new_inode->i_nlink == 0)
+ if (new_inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans,
BTRFS_I(d_inode(new_dentry)));
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_fail;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
}
ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
- new_dentry->d_name.name,
- new_dentry->d_name.len, 0, index);
- if (ret) {
+ &new_fname.disk_name, 0, index);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9801,67 +8661,61 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
if (old_inode->i_nlink == 1)
BTRFS_I(old_inode)->dir_index = index;
- if (log_pinned) {
- btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
- new_dentry->d_parent);
- btrfs_end_log_trans(root);
- log_pinned = false;
- }
+ if (logs_pinned)
+ btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
+ rename_ctx.index, new_dentry->d_parent);
if (flags & RENAME_WHITEOUT) {
- ret = btrfs_whiteout_for_rename(trans, root, mnt_userns,
- old_dir, old_dentry);
-
- if (ret) {
+ ret = btrfs_create_new_inode(trans, &whiteout_args);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_fail;
+ } else {
+ unlock_new_inode(whiteout_args.inode);
+ iput(whiteout_args.inode);
+ whiteout_args.inode = NULL;
}
}
out_fail:
- /*
- * If we have pinned the log and an error happened, we unpin tasks
- * trying to sync the log and force them to fallback to a transaction
- * commit if the log currently contains any of the inodes involved in
- * this rename operation (to ensure we do not persist a log with an
- * inconsistent state for any of these inodes or leading to any
- * inconsistencies when replayed). If the transaction was aborted, the
- * abortion reason is propagated to userspace when attempting to commit
- * the transaction. If the log does not contain any of these inodes, we
- * allow the tasks to sync it.
- */
- if (ret && log_pinned) {
- if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
- btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
- btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
- (new_inode &&
- btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
- btrfs_set_log_full_commit(trans);
-
+ if (logs_pinned) {
btrfs_end_log_trans(root);
- log_pinned = false;
+ btrfs_end_log_trans(dest);
}
ret2 = btrfs_end_transaction(trans);
ret = ret ? ret : ret2;
out_notrans:
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem);
-
+ if (flags & RENAME_WHITEOUT)
+ btrfs_new_inode_args_destroy(&whiteout_args);
+out_whiteout_inode:
+ if (flags & RENAME_WHITEOUT)
+ iput(whiteout_args.inode);
+out_fscrypt_names:
+ fscrypt_free_filename(&old_fname);
+ fscrypt_free_filename(&new_fname);
return ret;
}
-static int btrfs_rename2(struct user_namespace *mnt_userns, struct inode *old_dir,
+static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
+ int ret;
+
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
if (flags & RENAME_EXCHANGE)
- return btrfs_rename_exchange(old_dir, old_dentry, new_dir,
- new_dentry);
+ ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
+ new_dentry);
+ else
+ ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir,
+ new_dentry, flags);
+
+ btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
- return btrfs_rename(mnt_userns, old_dir, old_dentry, new_dir,
- new_dentry, flags);
+ return ret;
}
struct btrfs_delalloc_work {
@@ -9899,7 +8753,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
init_completion(&work->completion);
INIT_LIST_HEAD(&work->list);
work->inode = inode;
- btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
+ btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
return work;
}
@@ -9908,49 +8762,42 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
-static int start_delalloc_inodes(struct btrfs_root *root,
- struct writeback_control *wbc, bool snapshot,
- bool in_reclaim_context)
+static int start_delalloc_inodes(struct btrfs_root *root, long *nr_to_write,
+ bool snapshot, bool in_reclaim_context)
{
- struct btrfs_inode *binode;
- struct inode *inode;
struct btrfs_delalloc_work *work, *next;
- struct list_head works;
- struct list_head splice;
+ LIST_HEAD(works);
+ LIST_HEAD(splice);
int ret = 0;
- bool full_flush = wbc->nr_to_write == LONG_MAX;
-
- INIT_LIST_HEAD(&works);
- INIT_LIST_HEAD(&splice);
mutex_lock(&root->delalloc_mutex);
spin_lock(&root->delalloc_lock);
list_splice_init(&root->delalloc_inodes, &splice);
while (!list_empty(&splice)) {
- binode = list_entry(splice.next, struct btrfs_inode,
- delalloc_inodes);
+ struct btrfs_inode *inode;
+ struct inode *tmp_inode;
- list_move_tail(&binode->delalloc_inodes,
- &root->delalloc_inodes);
+ inode = list_first_entry(&splice, struct btrfs_inode, delalloc_inodes);
+
+ list_move_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
if (in_reclaim_context &&
- test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
+ test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags))
continue;
- inode = igrab(&binode->vfs_inode);
- if (!inode) {
+ tmp_inode = igrab(&inode->vfs_inode);
+ if (!tmp_inode) {
cond_resched_lock(&root->delalloc_lock);
continue;
}
spin_unlock(&root->delalloc_lock);
if (snapshot)
- set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
- &binode->runtime_flags);
- if (full_flush) {
- work = btrfs_alloc_delalloc_work(inode);
+ set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, &inode->runtime_flags);
+ if (nr_to_write == NULL) {
+ work = btrfs_alloc_delalloc_work(tmp_inode);
if (!work) {
- iput(inode);
+ iput(tmp_inode);
ret = -ENOMEM;
goto out;
}
@@ -9958,9 +8805,11 @@ static int start_delalloc_inodes(struct btrfs_root *root,
btrfs_queue_work(root->fs_info->flush_workers,
&work->work);
} else {
- ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc);
+ ret = filemap_flush_nr(tmp_inode->i_mapping,
+ nr_to_write);
btrfs_add_delayed_iput(inode);
- if (ret || wbc->nr_to_write <= 0)
+
+ if (ret || *nr_to_write <= 0)
goto out;
}
cond_resched();
@@ -9986,49 +8835,28 @@ out:
int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
{
- struct writeback_control wbc = {
- .nr_to_write = LONG_MAX,
- .sync_mode = WB_SYNC_NONE,
- .range_start = 0,
- .range_end = LLONG_MAX,
- };
struct btrfs_fs_info *fs_info = root->fs_info;
if (BTRFS_FS_ERROR(fs_info))
return -EROFS;
-
- return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
+ return start_delalloc_inodes(root, NULL, true, in_reclaim_context);
}
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
bool in_reclaim_context)
{
- struct writeback_control wbc = {
- .nr_to_write = nr,
- .sync_mode = WB_SYNC_NONE,
- .range_start = 0,
- .range_end = LLONG_MAX,
- };
+ long *nr_to_write = nr == LONG_MAX ? NULL : &nr;
struct btrfs_root *root;
- struct list_head splice;
+ LIST_HEAD(splice);
int ret;
if (BTRFS_FS_ERROR(fs_info))
return -EROFS;
- INIT_LIST_HEAD(&splice);
-
mutex_lock(&fs_info->delalloc_root_mutex);
spin_lock(&fs_info->delalloc_root_lock);
list_splice_init(&fs_info->delalloc_roots, &splice);
while (!list_empty(&splice)) {
- /*
- * Reset nr_to_write here so we know that we're doing a full
- * flush.
- */
- if (nr == LONG_MAX)
- wbc.nr_to_write = LONG_MAX;
-
root = list_first_entry(&splice, struct btrfs_root,
delalloc_root);
root = btrfs_grab_root(root);
@@ -10037,9 +8865,10 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
&fs_info->delalloc_roots);
spin_unlock(&fs_info->delalloc_root_lock);
- ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
+ ret = start_delalloc_inodes(root, nr_to_write, false,
+ in_reclaim_context);
btrfs_put_root(root);
- if (ret < 0 || wbc.nr_to_write <= 0)
+ if (ret < 0 || nr <= 0)
goto out;
spin_lock(&fs_info->delalloc_root_lock);
}
@@ -10056,18 +8885,21 @@ out:
return ret;
}
-static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct btrfs_key key;
- struct inode *inode = NULL;
- int err;
- u64 objectid;
- u64 index = 0;
+ struct inode *inode;
+ struct btrfs_new_inode_args new_inode_args = {
+ .dir = dir,
+ .dentry = dentry,
+ };
+ unsigned int trans_num_items;
+ int ret;
int name_len;
int datasize;
unsigned long ptr;
@@ -10075,62 +8907,60 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
struct extent_buffer *leaf;
name_len = strlen(symname);
- if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
- return -ENAMETOOLONG;
-
/*
- * 2 items for inode item and ref
- * 2 items for dir items
- * 1 item for updating parent inode item
- * 1 item for the inline extent item
- * 1 item for xattr if selinux is on
+ * Symlinks utilize uncompressed inline extent data, which should not
+ * reach block size.
*/
- trans = btrfs_start_transaction(root, 7);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
+ name_len >= fs_info->sectorsize)
+ return -ENAMETOOLONG;
- err = btrfs_get_free_objectid(root, &objectid);
- if (err)
- goto out_unlock;
+ inode = new_inode(dir->i_sb);
+ if (!inode)
+ return -ENOMEM;
+ inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO);
+ inode->i_op = &btrfs_symlink_inode_operations;
+ inode_nohighmem(inode);
+ inode->i_mapping->a_ops = &btrfs_aops;
+ btrfs_i_size_write(BTRFS_I(inode), name_len);
+ inode_set_bytes(inode, name_len);
- inode = btrfs_new_inode(trans, root, mnt_userns, dir,
- dentry->d_name.name, dentry->d_name.len,
- btrfs_ino(BTRFS_I(dir)), objectid,
- S_IFLNK | S_IRWXUGO, &index);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- inode = NULL;
- goto out_unlock;
- }
+ new_inode_args.inode = inode;
+ ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
+ if (ret)
+ goto out_inode;
+ /* 1 additional item for the inline extent */
+ trans_num_items++;
- /*
- * If the active LSM wants to access the inode during
- * d_instantiate it needs these. Smack checks to see
- * if the filesystem supports xattrs by looking at the
- * ops vector.
- */
- inode->i_fop = &btrfs_file_operations;
- inode->i_op = &btrfs_file_inode_operations;
- inode->i_mapping->a_ops = &btrfs_aops;
+ trans = btrfs_start_transaction(root, trans_num_items);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_new_inode_args;
+ }
- err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
- if (err)
- goto out_unlock;
+ ret = btrfs_create_new_inode(trans, &new_inode_args);
+ if (ret)
+ goto out;
path = btrfs_alloc_path();
- if (!path) {
- err = -ENOMEM;
- goto out_unlock;
+ if (unlikely(!path)) {
+ ret = -ENOMEM;
+ btrfs_abort_transaction(trans, ret);
+ discard_new_inode(inode);
+ inode = NULL;
+ goto out;
}
key.objectid = btrfs_ino(BTRFS_I(inode));
- key.offset = 0;
key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = 0;
datasize = btrfs_file_extent_calc_inline_size(name_len);
- err = btrfs_insert_empty_item(trans, root, path, &key,
- datasize);
- if (err) {
+ ret = btrfs_insert_empty_item(trans, root, path, &key, datasize);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
btrfs_free_path(path);
- goto out_unlock;
+ discard_new_inode(inode);
+ inode = NULL;
+ goto out;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
@@ -10145,35 +8975,19 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
ptr = btrfs_file_extent_inline_start(ei);
write_extent_buffer(leaf, symname, ptr, name_len);
- btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
- inode->i_op = &btrfs_symlink_inode_operations;
- inode_nohighmem(inode);
- inode_set_bytes(inode, name_len);
- btrfs_i_size_write(BTRFS_I(inode), name_len);
- err = btrfs_update_inode(trans, root, BTRFS_I(inode));
- /*
- * Last step, add directory indexes for our symlink inode. This is the
- * last step to avoid extra cleanup of these indexes if an error happens
- * elsewhere above.
- */
- if (!err)
- err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
- BTRFS_I(inode), 0, index);
- if (err)
- goto out_unlock;
-
d_instantiate_new(dentry, inode);
-
-out_unlock:
+ ret = 0;
+out:
btrfs_end_transaction(trans);
- if (err && inode) {
- inode_dec_link_count(inode);
- discard_new_inode(inode);
- }
btrfs_btree_balance_dirty(fs_info);
- return err;
+out_new_inode_args:
+ btrfs_new_inode_args_destroy(&new_inode_args);
+out_inode:
+ if (ret)
+ iput(inode);
+ return ret;
}
static struct btrfs_trans_handle *insert_prealloc_file_extent(
@@ -10188,7 +9002,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
struct btrfs_path *path;
u64 start = ins->objectid;
u64 len = ins->offset;
- int qgroup_released;
+ u64 qgroup_released = 0;
int ret;
memset(&stack_fi, 0, sizeof(stack_fi));
@@ -10201,9 +9015,9 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
/* Encryption and other encoding is reserved and all 0 */
- qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
- if (qgroup_released < 0)
- return ERR_PTR(qgroup_released);
+ ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
+ if (ret < 0)
+ return ERR_PTR(ret);
if (trans) {
ret = insert_reserved_file_extent(trans, inode,
@@ -10221,6 +9035,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
extent_info.file_offset = file_offset;
extent_info.extent_buf = (char *)&stack_fi;
extent_info.is_new_extent = true;
+ extent_info.update_times = true;
extent_info.qgroup_reserved = qgroup_released;
extent_info.insertions = 0;
@@ -10247,7 +9062,7 @@ free_qgroup:
* or we leak qgroup data reservation.
*/
btrfs_qgroup_free_refroot(inode->root->fs_info,
- inode->root->root_key.objectid, qgroup_released,
+ btrfs_root_id(inode->root), qgroup_released,
BTRFS_QGROUP_RSV_DATA);
return ERR_PTR(ret);
}
@@ -10257,8 +9072,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
loff_t actual_len, u64 *alloc_hint,
struct btrfs_trans_handle *trans)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key ins;
@@ -10284,7 +9098,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
*/
cur_bytes = min(cur_bytes, last_alloc);
ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
- min_size, 0, *alloc_hint, &ins, 1, 0);
+ min_size, 0, *alloc_hint, &ins, true, false);
if (ret)
break;
@@ -10310,48 +9124,36 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
btrfs_free_reserved_extent(fs_info, ins.objectid,
- ins.offset, 0);
+ ins.offset, false);
break;
}
- btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
- cur_offset + ins.offset -1, 0);
-
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
+ btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
+ cur_offset + ins.offset - 1, false);
+ btrfs_set_inode_full_sync(BTRFS_I(inode));
goto next;
}
em->start = cur_offset;
- em->orig_start = cur_offset;
em->len = ins.offset;
- em->block_start = ins.objectid;
- em->block_len = ins.offset;
- em->orig_block_len = ins.offset;
+ em->disk_bytenr = ins.objectid;
+ em->offset = 0;
+ em->disk_num_bytes = ins.offset;
em->ram_bytes = ins.offset;
- set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
+ em->flags |= EXTENT_FLAG_PREALLOC;
em->generation = trans->transid;
- while (1) {
- write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 1);
- write_unlock(&em_tree->lock);
- if (ret != -EEXIST)
- break;
- btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
- cur_offset + ins.offset - 1,
- 0);
- }
- free_extent_map(em);
+ ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
+ btrfs_free_extent_map(em);
next:
num_bytes -= ins.offset;
cur_offset += ins.offset;
*alloc_hint = ins.objectid + ins.offset;
inode_inc_iversion(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
(actual_len > inode->i_size) &&
@@ -10364,9 +9166,9 @@ next:
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
}
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
if (own_trans)
btrfs_end_transaction(trans);
@@ -10402,12 +9204,12 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
min_size, actual_len, alloc_hint, trans);
}
-static int btrfs_set_page_dirty(struct page *page)
-{
- return __set_page_dirty_nobuffers(page);
-}
-
-static int btrfs_permission(struct user_namespace *mnt_userns,
+/*
+ * NOTE: in case you are adding MAY_EXEC check for directories:
+ * we are marking them with IOP_FASTPERM_MAY_EXEC, allowing path lookup to
+ * elide calls here.
+ */
+static int btrfs_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -10420,92 +9222,762 @@ static int btrfs_permission(struct user_namespace *mnt_userns,
if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
return -EACCES;
}
- return generic_permission(mnt_userns, inode, mask);
+ return generic_permission(idmap, inode, mask);
}
-static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ struct file *file, umode_t mode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
- struct inode *inode = NULL;
- u64 objectid;
- u64 index;
- int ret = 0;
+ struct inode *inode;
+ struct btrfs_new_inode_args new_inode_args = {
+ .dir = dir,
+ .dentry = file->f_path.dentry,
+ .orphan = true,
+ };
+ unsigned int trans_num_items;
+ int ret;
+
+ inode = new_inode(dir->i_sb);
+ if (!inode)
+ return -ENOMEM;
+ inode_init_owner(idmap, inode, dir, mode);
+ inode->i_fop = &btrfs_file_operations;
+ inode->i_op = &btrfs_file_inode_operations;
+ inode->i_mapping->a_ops = &btrfs_aops;
+
+ new_inode_args.inode = inode;
+ ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
+ if (ret)
+ goto out_inode;
+
+ trans = btrfs_start_transaction(root, trans_num_items);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_new_inode_args;
+ }
+
+ ret = btrfs_create_new_inode(trans, &new_inode_args);
/*
- * 5 units required for adding orphan entry
+ * We set number of links to 0 in btrfs_create_new_inode(), and here we
+ * set it to 1 because d_tmpfile() will issue a warning if the count is
+ * 0, through:
+ *
+ * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
*/
- trans = btrfs_start_transaction(root, 5);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ set_nlink(inode, 1);
- ret = btrfs_get_free_objectid(root, &objectid);
+ if (!ret) {
+ d_tmpfile(file, inode);
+ unlock_new_inode(inode);
+ mark_inode_dirty(inode);
+ }
+
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
+out_new_inode_args:
+ btrfs_new_inode_args_destroy(&new_inode_args);
+out_inode:
if (ret)
- goto out;
+ iput(inode);
+ return finish_open_simple(file, ret);
+}
- inode = btrfs_new_inode(trans, root, mnt_userns, dir, NULL, 0,
- btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
- inode = NULL;
- goto out;
+int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
+ int compress_type)
+{
+ switch (compress_type) {
+ case BTRFS_COMPRESS_NONE:
+ return BTRFS_ENCODED_IO_COMPRESSION_NONE;
+ case BTRFS_COMPRESS_ZLIB:
+ return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
+ case BTRFS_COMPRESS_LZO:
+ /*
+ * The LZO format depends on the sector size. 64K is the maximum
+ * sector size that we support.
+ */
+ if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
+ return -EINVAL;
+ return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
+ (fs_info->sectorsize_bits - 12);
+ case BTRFS_COMPRESS_ZSTD:
+ return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
+ default:
+ return -EUCLEAN;
}
+}
- inode->i_fop = &btrfs_file_operations;
- inode->i_op = &btrfs_file_inode_operations;
+static ssize_t btrfs_encoded_read_inline(
+ struct kiocb *iocb,
+ struct iov_iter *iter, u64 start,
+ u64 lockend,
+ struct extent_state **cached_state,
+ u64 extent_start, size_t count,
+ struct btrfs_ioctl_encoded_io_args *encoded,
+ bool *unlocked)
+{
+ struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct extent_io_tree *io_tree = &inode->io_tree;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct extent_buffer *leaf;
+ struct btrfs_file_extent_item *item;
+ u64 ram_bytes;
+ unsigned long ptr;
+ void *tmp;
+ ssize_t ret;
+ const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
- inode->i_mapping->a_ops = &btrfs_aops;
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
- ret = btrfs_init_inode_security(trans, inode, dir, NULL);
- if (ret)
+ path->nowait = nowait;
+
+ ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
+ extent_start, 0);
+ if (ret) {
+ if (unlikely(ret > 0)) {
+ /* The extent item disappeared? */
+ return -EIO;
+ }
+ return ret;
+ }
+ leaf = path->nodes[0];
+ item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+
+ ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
+ ptr = btrfs_file_extent_inline_start(item);
+
+ encoded->len = min_t(u64, extent_start + ram_bytes,
+ inode->vfs_inode.i_size) - iocb->ki_pos;
+ ret = btrfs_encoded_io_compression_from_extent(fs_info,
+ btrfs_file_extent_compression(leaf, item));
+ if (ret < 0)
+ return ret;
+ encoded->compression = ret;
+ if (encoded->compression) {
+ size_t inline_size;
+
+ inline_size = btrfs_file_extent_inline_item_len(leaf,
+ path->slots[0]);
+ if (inline_size > count)
+ return -ENOBUFS;
+
+ count = inline_size;
+ encoded->unencoded_len = ram_bytes;
+ encoded->unencoded_offset = iocb->ki_pos - extent_start;
+ } else {
+ count = min_t(u64, count, encoded->len);
+ encoded->len = count;
+ encoded->unencoded_len = count;
+ ptr += iocb->ki_pos - extent_start;
+ }
+
+ tmp = kmalloc(count, GFP_NOFS);
+ if (!tmp)
+ return -ENOMEM;
+
+ read_extent_buffer(leaf, tmp, ptr, count);
+ btrfs_release_path(path);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ *unlocked = true;
+
+ ret = copy_to_iter(tmp, count, iter);
+ if (ret != count)
+ ret = -EFAULT;
+ kfree(tmp);
+
+ return ret;
+}
+
+struct btrfs_encoded_read_private {
+ struct completion *sync_reads;
+ void *uring_ctx;
+ refcount_t pending_refs;
+ blk_status_t status;
+};
+
+static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
+{
+ struct btrfs_encoded_read_private *priv = bbio->private;
+
+ if (bbio->bio.bi_status) {
+ /*
+ * The memory barrier implied by the refcount_dec_and_test() here
+ * pairs with the memory barrier implied by the refcount_dec_and_test()
+ * in btrfs_encoded_read_regular_fill_pages() to ensure that
+ * this write is observed before the load of status in
+ * btrfs_encoded_read_regular_fill_pages().
+ */
+ WRITE_ONCE(priv->status, bbio->bio.bi_status);
+ }
+ if (refcount_dec_and_test(&priv->pending_refs)) {
+ int err = blk_status_to_errno(READ_ONCE(priv->status));
+
+ if (priv->uring_ctx) {
+ btrfs_uring_read_extent_endio(priv->uring_ctx, err);
+ kfree(priv);
+ } else {
+ complete(priv->sync_reads);
+ }
+ }
+ bio_put(&bbio->bio);
+}
+
+int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
+ u64 disk_bytenr, u64 disk_io_size,
+ struct page **pages, void *uring_ctx)
+{
+ struct btrfs_encoded_read_private *priv, sync_priv;
+ struct completion sync_reads;
+ unsigned long i = 0;
+ struct btrfs_bio *bbio;
+ int ret;
+
+ /*
+ * Fast path for synchronous reads which completes in this call, io_uring
+ * needs longer time span.
+ */
+ if (uring_ctx) {
+ priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
+ if (!priv)
+ return -ENOMEM;
+ } else {
+ priv = &sync_priv;
+ init_completion(&sync_reads);
+ priv->sync_reads = &sync_reads;
+ }
+
+ refcount_set(&priv->pending_refs, 1);
+ priv->status = 0;
+ priv->uring_ctx = uring_ctx;
+
+ bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
+ btrfs_encoded_read_endio, priv);
+ bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
+
+ do {
+ size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
+
+ if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
+ refcount_inc(&priv->pending_refs);
+ btrfs_submit_bbio(bbio, 0);
+
+ bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
+ btrfs_encoded_read_endio, priv);
+ bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
+ continue;
+ }
+
+ i++;
+ disk_bytenr += bytes;
+ disk_io_size -= bytes;
+ } while (disk_io_size);
+
+ refcount_inc(&priv->pending_refs);
+ btrfs_submit_bbio(bbio, 0);
+
+ if (uring_ctx) {
+ if (refcount_dec_and_test(&priv->pending_refs)) {
+ ret = blk_status_to_errno(READ_ONCE(priv->status));
+ btrfs_uring_read_extent_endio(uring_ctx, ret);
+ kfree(priv);
+ return ret;
+ }
+
+ return -EIOCBQUEUED;
+ } else {
+ if (!refcount_dec_and_test(&priv->pending_refs))
+ wait_for_completion_io(&sync_reads);
+ /* See btrfs_encoded_read_endio() for ordering. */
+ return blk_status_to_errno(READ_ONCE(priv->status));
+ }
+}
+
+ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
+ u64 start, u64 lockend,
+ struct extent_state **cached_state,
+ u64 disk_bytenr, u64 disk_io_size,
+ size_t count, bool compressed, bool *unlocked)
+{
+ struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
+ struct extent_io_tree *io_tree = &inode->io_tree;
+ struct page **pages;
+ unsigned long nr_pages, i;
+ u64 cur;
+ size_t page_offset;
+ ssize_t ret;
+
+ nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
+ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
+ if (!pages)
+ return -ENOMEM;
+ ret = btrfs_alloc_page_array(nr_pages, pages, false);
+ if (ret) {
+ ret = -ENOMEM;
goto out;
+ }
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr,
+ disk_io_size, pages, NULL);
if (ret)
goto out;
- ret = btrfs_orphan_add(trans, BTRFS_I(inode));
+
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ *unlocked = true;
+
+ if (compressed) {
+ i = 0;
+ page_offset = 0;
+ } else {
+ i = (iocb->ki_pos - start) >> PAGE_SHIFT;
+ page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
+ }
+ cur = 0;
+ while (cur < count) {
+ size_t bytes = min_t(size_t, count - cur,
+ PAGE_SIZE - page_offset);
+
+ if (copy_page_to_iter(pages[i], page_offset, bytes,
+ iter) != bytes) {
+ ret = -EFAULT;
+ goto out;
+ }
+ i++;
+ cur += bytes;
+ page_offset = 0;
+ }
+ ret = count;
+out:
+ for (i = 0; i < nr_pages; i++) {
+ if (pages[i])
+ __free_page(pages[i]);
+ }
+ kfree(pages);
+ return ret;
+}
+
+ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
+ struct btrfs_ioctl_encoded_io_args *encoded,
+ struct extent_state **cached_state,
+ u64 *disk_bytenr, u64 *disk_io_size)
+{
+ struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_io_tree *io_tree = &inode->io_tree;
+ ssize_t ret;
+ size_t count = iov_iter_count(iter);
+ u64 start, lockend;
+ struct extent_map *em;
+ const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
+ bool unlocked = false;
+
+ file_accessed(iocb->ki_filp);
+
+ ret = btrfs_inode_lock(inode,
+ BTRFS_ILOCK_SHARED | (nowait ? BTRFS_ILOCK_TRY : 0));
if (ret)
- goto out;
+ return ret;
+ if (iocb->ki_pos >= inode->vfs_inode.i_size) {
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ return 0;
+ }
+ start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
/*
- * We set number of links to 0 in btrfs_new_inode(), and here we set
- * it to 1 because d_tmpfile() will issue a warning if the count is 0,
- * through:
- *
- * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
+ * We don't know how long the extent containing iocb->ki_pos is, but if
+ * it's compressed we know that it won't be longer than this.
*/
- set_nlink(inode, 1);
- d_tmpfile(dentry, inode);
- unlock_new_inode(inode);
- mark_inode_dirty(inode);
-out:
- btrfs_end_transaction(trans);
- if (ret && inode)
- discard_new_inode(inode);
- btrfs_btree_balance_dirty(fs_info);
+ lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
+
+ if (nowait) {
+ struct btrfs_ordered_extent *ordered;
+
+ if (filemap_range_needs_writeback(inode->vfs_inode.i_mapping,
+ start, lockend)) {
+ ret = -EAGAIN;
+ goto out_unlock_inode;
+ }
+
+ if (!btrfs_try_lock_extent(io_tree, start, lockend, cached_state)) {
+ ret = -EAGAIN;
+ goto out_unlock_inode;
+ }
+
+ ordered = btrfs_lookup_ordered_range(inode, start,
+ lockend - start + 1);
+ if (ordered) {
+ btrfs_put_ordered_extent(ordered);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
+ ret = -EAGAIN;
+ goto out_unlock_inode;
+ }
+ } else {
+ for (;;) {
+ struct btrfs_ordered_extent *ordered;
+
+ ret = btrfs_wait_ordered_range(inode, start,
+ lockend - start + 1);
+ if (ret)
+ goto out_unlock_inode;
+
+ btrfs_lock_extent(io_tree, start, lockend, cached_state);
+ ordered = btrfs_lookup_ordered_range(inode, start,
+ lockend - start + 1);
+ if (!ordered)
+ break;
+ btrfs_put_ordered_extent(ordered);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
+ cond_resched();
+ }
+ }
+
+ em = btrfs_get_extent(inode, NULL, start, lockend - start + 1);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out_unlock_extent;
+ }
+
+ if (em->disk_bytenr == EXTENT_MAP_INLINE) {
+ u64 extent_start = em->start;
+
+ /*
+ * For inline extents we get everything we need out of the
+ * extent item.
+ */
+ btrfs_free_extent_map(em);
+ em = NULL;
+ ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
+ cached_state, extent_start,
+ count, encoded, &unlocked);
+ goto out_unlock_extent;
+ }
+
+ /*
+ * We only want to return up to EOF even if the extent extends beyond
+ * that.
+ */
+ encoded->len = min_t(u64, btrfs_extent_map_end(em),
+ inode->vfs_inode.i_size) - iocb->ki_pos;
+ if (em->disk_bytenr == EXTENT_MAP_HOLE ||
+ (em->flags & EXTENT_FLAG_PREALLOC)) {
+ *disk_bytenr = EXTENT_MAP_HOLE;
+ count = min_t(u64, count, encoded->len);
+ encoded->len = count;
+ encoded->unencoded_len = count;
+ } else if (btrfs_extent_map_is_compressed(em)) {
+ *disk_bytenr = em->disk_bytenr;
+ /*
+ * Bail if the buffer isn't large enough to return the whole
+ * compressed extent.
+ */
+ if (em->disk_num_bytes > count) {
+ ret = -ENOBUFS;
+ goto out_em;
+ }
+ *disk_io_size = em->disk_num_bytes;
+ count = em->disk_num_bytes;
+ encoded->unencoded_len = em->ram_bytes;
+ encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
+ ret = btrfs_encoded_io_compression_from_extent(fs_info,
+ btrfs_extent_map_compression(em));
+ if (ret < 0)
+ goto out_em;
+ encoded->compression = ret;
+ } else {
+ *disk_bytenr = btrfs_extent_map_block_start(em) + (start - em->start);
+ if (encoded->len > count)
+ encoded->len = count;
+ /*
+ * Don't read beyond what we locked. This also limits the page
+ * allocations that we'll do.
+ */
+ *disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
+ count = start + *disk_io_size - iocb->ki_pos;
+ encoded->len = count;
+ encoded->unencoded_len = count;
+ *disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize);
+ }
+ btrfs_free_extent_map(em);
+ em = NULL;
+
+ if (*disk_bytenr == EXTENT_MAP_HOLE) {
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ unlocked = true;
+ ret = iov_iter_zero(count, iter);
+ if (ret != count)
+ ret = -EFAULT;
+ } else {
+ ret = -EIOCBQUEUED;
+ goto out_unlock_extent;
+ }
+
+out_em:
+ btrfs_free_extent_map(em);
+out_unlock_extent:
+ /* Leave inode and extent locked if we need to do a read. */
+ if (!unlocked && ret != -EIOCBQUEUED)
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
+out_unlock_inode:
+ if (!unlocked && ret != -EIOCBQUEUED)
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
return ret;
}
-void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
+ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
+ const struct btrfs_ioctl_encoded_io_args *encoded)
{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- unsigned long index = start >> PAGE_SHIFT;
- unsigned long end_index = end >> PAGE_SHIFT;
- struct page *page;
- u32 len;
+ struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct extent_io_tree *io_tree = &inode->io_tree;
+ struct extent_changeset *data_reserved = NULL;
+ struct extent_state *cached_state = NULL;
+ struct btrfs_ordered_extent *ordered;
+ struct btrfs_file_extent file_extent;
+ int compression;
+ size_t orig_count;
+ u64 start, end;
+ u64 num_bytes, ram_bytes, disk_num_bytes;
+ unsigned long nr_folios, i;
+ struct folio **folios;
+ struct btrfs_key ins;
+ bool extent_reserved = false;
+ struct extent_map *em;
+ ssize_t ret;
- ASSERT(end + 1 - start <= U32_MAX);
- len = end + 1 - start;
- while (index <= end_index) {
- page = find_get_page(inode->vfs_inode.i_mapping, index);
- ASSERT(page); /* Pages should be in the extent_io_tree */
+ switch (encoded->compression) {
+ case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
+ compression = BTRFS_COMPRESS_ZLIB;
+ break;
+ case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
+ compression = BTRFS_COMPRESS_ZSTD;
+ break;
+ case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
+ case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
+ case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
+ case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
+ case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
+ /* The sector size must match for LZO. */
+ if (encoded->compression -
+ BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
+ fs_info->sectorsize_bits)
+ return -EINVAL;
+ compression = BTRFS_COMPRESS_LZO;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
+ return -EINVAL;
+
+ /*
+ * Compressed extents should always have checksums, so error out if we
+ * have a NOCOW file or inode was created while mounted with NODATASUM.
+ */
+ if (inode->flags & BTRFS_INODE_NODATASUM)
+ return -EINVAL;
+
+ orig_count = iov_iter_count(from);
+
+ /* The extent size must be sane. */
+ if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
+ orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
+ return -EINVAL;
+
+ /*
+ * The compressed data must be smaller than the decompressed data.
+ *
+ * It's of course possible for data to compress to larger or the same
+ * size, but the buffered I/O path falls back to no compression for such
+ * data, and we don't want to break any assumptions by creating these
+ * extents.
+ *
+ * Note that this is less strict than the current check we have that the
+ * compressed data must be at least one sector smaller than the
+ * decompressed data. We only want to enforce the weaker requirement
+ * from old kernels that it is at least one byte smaller.
+ */
+ if (orig_count >= encoded->unencoded_len)
+ return -EINVAL;
+
+ /* The extent must start on a sector boundary. */
+ start = iocb->ki_pos;
+ if (!IS_ALIGNED(start, fs_info->sectorsize))
+ return -EINVAL;
- btrfs_page_set_writeback(fs_info, page, start, len);
- put_page(page);
- index++;
+ /*
+ * The extent must end on a sector boundary. However, we allow a write
+ * which ends at or extends i_size to have an unaligned length; we round
+ * up the extent size and set i_size to the unaligned end.
+ */
+ if (start + encoded->len < inode->vfs_inode.i_size &&
+ !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
+ return -EINVAL;
+
+ /* Finally, the offset in the unencoded data must be sector-aligned. */
+ if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
+ return -EINVAL;
+
+ num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
+ ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
+ end = start + num_bytes - 1;
+
+ /*
+ * If the extent cannot be inline, the compressed data on disk must be
+ * sector-aligned. For convenience, we extend it with zeroes if it
+ * isn't.
+ */
+ disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
+ nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
+ folios = kvcalloc(nr_folios, sizeof(struct folio *), GFP_KERNEL_ACCOUNT);
+ if (!folios)
+ return -ENOMEM;
+ for (i = 0; i < nr_folios; i++) {
+ size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
+ char *kaddr;
+
+ folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0);
+ if (!folios[i]) {
+ ret = -ENOMEM;
+ goto out_folios;
+ }
+ kaddr = kmap_local_folio(folios[i], 0);
+ if (copy_from_iter(kaddr, bytes, from) != bytes) {
+ kunmap_local(kaddr);
+ ret = -EFAULT;
+ goto out_folios;
+ }
+ if (bytes < PAGE_SIZE)
+ memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
+ kunmap_local(kaddr);
+ }
+
+ for (;;) {
+ ret = btrfs_wait_ordered_range(inode, start, num_bytes);
+ if (ret)
+ goto out_folios;
+ ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
+ start >> PAGE_SHIFT,
+ end >> PAGE_SHIFT);
+ if (ret)
+ goto out_folios;
+ btrfs_lock_extent(io_tree, start, end, &cached_state);
+ ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
+ if (!ordered &&
+ !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
+ break;
+ if (ordered)
+ btrfs_put_ordered_extent(ordered);
+ btrfs_unlock_extent(io_tree, start, end, &cached_state);
+ cond_resched();
+ }
+
+ /*
+ * We don't use the higher-level delalloc space functions because our
+ * num_bytes and disk_num_bytes are different.
+ */
+ ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
+ if (ret)
+ goto out_unlock;
+ ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
+ if (ret)
+ goto out_free_data_space;
+ ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
+ false);
+ if (ret)
+ goto out_qgroup_free_data;
+
+ /* Try an inline extent first. */
+ if (encoded->unencoded_len == encoded->len &&
+ encoded->unencoded_offset == 0 &&
+ can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
+ ret = __cow_file_range_inline(inode, encoded->len,
+ orig_count, compression, folios[0],
+ true);
+ if (ret <= 0) {
+ if (ret == 0)
+ ret = orig_count;
+ goto out_delalloc_release;
+ }
+ }
+
+ ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
+ disk_num_bytes, 0, 0, &ins, true, true);
+ if (ret)
+ goto out_delalloc_release;
+ extent_reserved = true;
+
+ file_extent.disk_bytenr = ins.objectid;
+ file_extent.disk_num_bytes = ins.offset;
+ file_extent.num_bytes = num_bytes;
+ file_extent.ram_bytes = ram_bytes;
+ file_extent.offset = encoded->unencoded_offset;
+ file_extent.compression = compression;
+ em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out_free_reserved;
+ }
+ btrfs_free_extent_map(em);
+
+ ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
+ (1U << BTRFS_ORDERED_ENCODED) |
+ (1U << BTRFS_ORDERED_COMPRESSED));
+ if (IS_ERR(ordered)) {
+ btrfs_drop_extent_map_range(inode, start, end, false);
+ ret = PTR_ERR(ordered);
+ goto out_free_reserved;
+ }
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+
+ if (start + encoded->len > inode->vfs_inode.i_size)
+ i_size_write(&inode->vfs_inode, start + encoded->len);
+
+ btrfs_unlock_extent(io_tree, start, end, &cached_state);
+
+ btrfs_delalloc_release_extents(inode, num_bytes);
+
+ btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false);
+ ret = orig_count;
+ goto out;
+
+out_free_reserved:
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
+out_delalloc_release:
+ btrfs_delalloc_release_extents(inode, num_bytes);
+ btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
+out_qgroup_free_data:
+ if (ret < 0)
+ btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
+out_free_data_space:
+ /*
+ * If btrfs_reserve_extent() succeeded, then we already decremented
+ * bytes_may_use.
+ */
+ if (!extent_reserved)
+ btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
+out_unlock:
+ btrfs_unlock_extent(io_tree, start, end, &cached_state);
+out_folios:
+ for (i = 0; i < nr_folios; i++) {
+ if (folios[i])
+ folio_put(folios[i]);
}
+ kvfree(folios);
+out:
+ if (ret >= 0)
+ iocb->ki_pos += encoded->len;
+ return ret;
}
#ifdef CONFIG_SWAP
@@ -10595,16 +10067,26 @@ static int btrfs_add_swap_extent(struct swap_info_struct *sis,
struct btrfs_swap_info *bsi)
{
unsigned long nr_pages;
+ unsigned long max_pages;
u64 first_ppage, first_ppage_reported, next_ppage;
int ret;
- first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT;
- next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len,
- PAGE_SIZE) >> PAGE_SHIFT;
+ /*
+ * Our swapfile may have had its size extended after the swap header was
+ * written. In that case activating the swapfile should not go beyond
+ * the max size set in the swap header.
+ */
+ if (bsi->nr_pages >= sis->max)
+ return 0;
+
+ max_pages = sis->max - bsi->nr_pages;
+ first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
+ next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
if (first_ppage >= next_ppage)
return 0;
nr_pages = next_ppage - first_ppage;
+ nr_pages = min(nr_pages, max_pages);
first_ppage_reported = first_ppage;
if (bsi->start == 0)
@@ -10638,38 +10120,59 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
- struct extent_map *em = NULL;
+ struct btrfs_chunk_map *map = NULL;
struct btrfs_device *device = NULL;
struct btrfs_swap_info bsi = {
.lowest_ppage = (sector_t)-1ULL,
};
+ struct btrfs_backref_share_check_ctx *backref_ctx = NULL;
+ struct btrfs_path *path = NULL;
int ret = 0;
u64 isize;
- u64 start;
+ u64 prev_extent_end = 0;
+
+ /*
+ * Acquire the inode's mmap lock to prevent races with memory mapped
+ * writes, as they could happen after we flush delalloc below and before
+ * we lock the extent range further below. The inode was already locked
+ * up in the call chain.
+ */
+ btrfs_assert_inode_locked(BTRFS_I(inode));
+ down_write(&BTRFS_I(inode)->i_mmap_lock);
/*
* If the swap file was just created, make sure delalloc is done. If the
* file changes again after this, the user is doing something stupid and
* we don't really care.
*/
- ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
if (ret)
- return ret;
+ goto out_unlock_mmap;
/*
* The inode is locked, so these flags won't change after we check them.
*/
if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
btrfs_warn(fs_info, "swapfile must not be compressed");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
btrfs_warn(fs_info, "swapfile must not be copy-on-write");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
btrfs_warn(fs_info, "swapfile must not be checksummed");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
+ }
+
+ path = btrfs_alloc_path();
+ backref_ctx = btrfs_alloc_backref_share_check_ctx();
+ if (!path || !backref_ctx) {
+ ret = -ENOMEM;
+ goto out_unlock_mmap;
}
/*
@@ -10684,7 +10187,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
btrfs_warn(fs_info,
"cannot activate swapfile while exclusive operation is running");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_unlock_mmap;
}
/*
@@ -10698,37 +10202,70 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
btrfs_exclop_finish(fs_info);
btrfs_warn(fs_info,
"cannot activate swapfile because snapshot creation is in progress");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
/*
* Snapshots can create extents which require COW even if NODATACOW is
* set. We use this counter to prevent snapshots. We must increment it
* before walking the extents because we don't want a concurrent
* snapshot to run after we've already checked the extents.
+ *
+ * It is possible that subvolume is marked for deletion but still not
+ * removed yet. To prevent this race, we check the root status before
+ * activating the swapfile.
*/
+ spin_lock(&root->root_item_lock);
+ if (btrfs_root_dead(root)) {
+ spin_unlock(&root->root_item_lock);
+
+ btrfs_drew_write_unlock(&root->snapshot_lock);
+ btrfs_exclop_finish(fs_info);
+ btrfs_warn(fs_info,
+ "cannot activate swapfile because subvolume %llu is being deleted",
+ btrfs_root_id(root));
+ ret = -EPERM;
+ goto out_unlock_mmap;
+ }
atomic_inc(&root->nr_swapfiles);
+ spin_unlock(&root->root_item_lock);
isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
- lock_extent_bits(io_tree, 0, isize - 1, &cached_state);
- start = 0;
- while (start < isize) {
- u64 logical_block_start, physical_block_start;
+ btrfs_lock_extent(io_tree, 0, isize - 1, &cached_state);
+ while (prev_extent_end < isize) {
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ struct btrfs_file_extent_item *ei;
struct btrfs_block_group *bg;
- u64 len = isize - start;
+ u64 logical_block_start;
+ u64 physical_block_start;
+ u64 extent_gen;
+ u64 disk_bytenr;
+ u64 len;
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
+ key.objectid = btrfs_ino(BTRFS_I(inode));
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = prev_extent_end;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
goto out;
- }
- if (em->block_start == EXTENT_MAP_HOLE) {
+ /*
+ * If key not found it means we have an implicit hole (NO_HOLES
+ * is enabled).
+ */
+ if (ret > 0) {
btrfs_warn(fs_info, "swapfile must not have holes");
ret = -EINVAL;
goto out;
}
- if (em->block_start == EXTENT_MAP_INLINE) {
+
+ leaf = path->nodes[0];
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
/*
* It's unlikely we'll ever actually find ourselves
* here, as a file small enough to fit inline won't be
@@ -10740,36 +10277,58 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
ret = -EINVAL;
goto out;
}
- if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
+
+ if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
btrfs_warn(fs_info, "swapfile must not be compressed");
ret = -EINVAL;
goto out;
}
- logical_block_start = em->block_start + (start - em->start);
- len = min(len, em->len - (start - em->start));
- free_extent_map(em);
- em = NULL;
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+ if (disk_bytenr == 0) {
+ btrfs_warn(fs_info, "swapfile must not have holes");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei);
+ extent_gen = btrfs_file_extent_generation(leaf, ei);
+ prev_extent_end = btrfs_file_extent_end(path);
+
+ if (prev_extent_end > isize)
+ len = isize - key.offset;
+ else
+ len = btrfs_file_extent_num_bytes(leaf, ei);
- ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true);
+ backref_ctx->curr_leaf_bytenr = leaf->start;
+
+ /*
+ * Don't need the path anymore, release to avoid deadlocks when
+ * calling btrfs_is_data_extent_shared() because when joining a
+ * transaction it can block waiting for the current one's commit
+ * which in turn may be trying to lock the same leaf to flush
+ * delayed items for example.
+ */
+ btrfs_release_path(path);
+
+ ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr,
+ extent_gen, backref_ctx);
if (ret < 0) {
goto out;
- } else if (ret) {
- ret = 0;
- } else {
+ } else if (ret > 0) {
btrfs_warn(fs_info,
"swapfile must not be copy-on-write");
ret = -EINVAL;
goto out;
}
- em = btrfs_get_chunk_map(fs_info, logical_block_start, len);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
+ map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
+ if (IS_ERR(map)) {
+ ret = PTR_ERR(map);
goto out;
}
- if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
btrfs_warn(fs_info,
"swapfile must have single data profile");
ret = -EINVAL;
@@ -10777,23 +10336,22 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
}
if (device == NULL) {
- device = em->map_lookup->stripes[0].dev;
+ device = map->stripes[0].dev;
ret = btrfs_add_swapfile_pin(inode, device, false);
if (ret == 1)
ret = 0;
else if (ret)
goto out;
- } else if (device != em->map_lookup->stripes[0].dev) {
+ } else if (device != map->stripes[0].dev) {
btrfs_warn(fs_info, "swapfile must be on one device");
ret = -EINVAL;
goto out;
}
- physical_block_start = (em->map_lookup->stripes[0].physical +
- (logical_block_start - em->start));
- len = min(len, em->len - (logical_block_start - em->start));
- free_extent_map(em);
- em = NULL;
+ physical_block_start = (map->stripes[0].physical +
+ (logical_block_start - map->start));
+ btrfs_free_chunk_map(map);
+ map = NULL;
bg = btrfs_lookup_block_group(fs_info, logical_block_start);
if (!bg) {
@@ -10832,22 +10390,27 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (ret)
goto out;
}
- bsi.start = start;
+ bsi.start = key.offset;
bsi.block_start = physical_block_start;
bsi.block_len = len;
}
- start += len;
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+
+ cond_resched();
}
if (bsi.block_len)
ret = btrfs_add_swap_extent(sis, &bsi);
out:
- if (!IS_ERR_OR_NULL(em))
- free_extent_map(em);
+ if (!IS_ERR_OR_NULL(map))
+ btrfs_free_chunk_map(map);
- unlock_extent_cached(io_tree, 0, isize - 1, &cached_state);
+ btrfs_unlock_extent(io_tree, 0, isize - 1, &cached_state);
if (ret)
btrfs_swap_deactivate(file);
@@ -10856,6 +10419,10 @@ out:
btrfs_exclop_finish(fs_info);
+out_unlock_mmap:
+ up_write(&BTRFS_I(inode)->i_mmap_lock);
+ btrfs_free_backref_share_ctx(backref_ctx);
+ btrfs_free_path(path);
if (ret)
return ret;
@@ -10864,7 +10431,6 @@ out:
*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
sis->max = bsi.nr_pages;
sis->pages = bsi.nr_pages - 1;
- sis->highest_bit = bsi.nr_pages - 1;
return bsi.nr_extents;
}
#else
@@ -10900,6 +10466,71 @@ void btrfs_update_inode_bytes(struct btrfs_inode *inode,
spin_unlock(&inode->lock);
}
+/*
+ * Verify that there are no ordered extents for a given file range.
+ *
+ * @inode: The target inode.
+ * @start: Start offset of the file range, should be sector size aligned.
+ * @end: End offset (inclusive) of the file range, its value +1 should be
+ * sector size aligned.
+ *
+ * This should typically be used for cases where we locked an inode's VFS lock in
+ * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
+ * we have flushed all delalloc in the range, we have waited for all ordered
+ * extents in the range to complete and finally we have locked the file range in
+ * the inode's io_tree.
+ */
+void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
+{
+ struct btrfs_root *root = inode->root;
+ struct btrfs_ordered_extent *ordered;
+
+ if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
+ return;
+
+ ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
+ if (ordered) {
+ btrfs_err(root->fs_info,
+"found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
+ start, end, btrfs_ino(inode), btrfs_root_id(root),
+ ordered->file_offset,
+ ordered->file_offset + ordered->num_bytes - 1);
+ btrfs_put_ordered_extent(ordered);
+ }
+
+ ASSERT(ordered == NULL);
+}
+
+/*
+ * Find the first inode with a minimum number.
+ *
+ * @root: The root to search for.
+ * @min_ino: The minimum inode number.
+ *
+ * Find the first inode in the @root with a number >= @min_ino and return it.
+ * Returns NULL if no such inode found.
+ */
+struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
+{
+ struct btrfs_inode *inode;
+ unsigned long from = min_ino;
+
+ xa_lock(&root->inodes);
+ while (true) {
+ inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
+ if (!inode)
+ break;
+ if (igrab(&inode->vfs_inode))
+ break;
+
+ from = btrfs_ino(inode) + 1;
+ cond_resched_lock(&root->inodes.xa_lock);
+ }
+ xa_unlock(&root->inodes);
+
+ return inode;
+}
+
static const struct inode_operations btrfs_dir_inode_operations = {
.getattr = btrfs_getattr,
.lookup = btrfs_lookup,
@@ -10914,7 +10545,7 @@ static const struct inode_operations btrfs_dir_inode_operations = {
.mknod = btrfs_mknod,
.listxattr = btrfs_listxattr,
.permission = btrfs_permission,
- .get_acl = btrfs_get_acl,
+ .get_inode_acl = btrfs_get_acl,
.set_acl = btrfs_set_acl,
.update_time = btrfs_update_time,
.tmpfile = btrfs_tmpfile,
@@ -10923,7 +10554,7 @@ static const struct inode_operations btrfs_dir_inode_operations = {
};
static const struct file_operations btrfs_dir_file_operations = {
- .llseek = generic_file_llseek,
+ .llseek = btrfs_dir_llseek,
.read = generic_read_dir,
.iterate_shared = btrfs_real_readdir,
.open = btrfs_opendir,
@@ -10948,18 +10579,15 @@ static const struct file_operations btrfs_dir_file_operations = {
* For now we're avoiding this by dropping bmap.
*/
static const struct address_space_operations btrfs_aops = {
- .readpage = btrfs_readpage,
- .writepage = btrfs_writepage,
+ .read_folio = btrfs_read_folio,
.writepages = btrfs_writepages,
.readahead = btrfs_readahead,
- .direct_IO = noop_direct_IO,
- .invalidatepage = btrfs_invalidatepage,
- .releasepage = btrfs_releasepage,
-#ifdef CONFIG_MIGRATION
- .migratepage = btrfs_migratepage,
-#endif
- .set_page_dirty = btrfs_set_page_dirty,
- .error_remove_page = generic_error_remove_page,
+ .invalidate_folio = btrfs_invalidate_folio,
+ .launder_folio = btrfs_launder_folio,
+ .release_folio = btrfs_release_folio,
+ .migrate_folio = btrfs_migrate_folio,
+ .dirty_folio = filemap_dirty_folio,
+ .error_remove_folio = generic_error_remove_folio,
.swap_activate = btrfs_swap_activate,
.swap_deactivate = btrfs_swap_deactivate,
};
@@ -10970,7 +10598,7 @@ static const struct inode_operations btrfs_file_inode_operations = {
.listxattr = btrfs_listxattr,
.permission = btrfs_permission,
.fiemap = btrfs_fiemap,
- .get_acl = btrfs_get_acl,
+ .get_inode_acl = btrfs_get_acl,
.set_acl = btrfs_set_acl,
.update_time = btrfs_update_time,
.fileattr_get = btrfs_fileattr_get,
@@ -10981,7 +10609,7 @@ static const struct inode_operations btrfs_special_inode_operations = {
.setattr = btrfs_setattr,
.permission = btrfs_permission,
.listxattr = btrfs_listxattr,
- .get_acl = btrfs_get_acl,
+ .get_inode_acl = btrfs_get_acl,
.set_acl = btrfs_set_acl,
.update_time = btrfs_update_time,
};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index fb8cc9642ac4..acb484546b1d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -28,16 +28,16 @@
#include <linux/iversion.h>
#include <linux/fileattr.h>
#include <linux/fsverity.h>
+#include <linux/sched/xacct.h>
+#include <linux/io_uring/cmd.h>
#include "ctree.h"
#include "disk-io.h"
#include "export.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "print-tree.h"
#include "volumes.h"
#include "locking.h"
#include "backref.h"
-#include "rcu-string.h"
#include "send.h"
#include "dev-replace.h"
#include "props.h"
@@ -46,9 +46,18 @@
#include "tree-log.h"
#include "compression.h"
#include "space-info.h"
-#include "delalloc-space.h"
#include "block-group.h"
-#include "subpage.h"
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "root-tree.h"
+#include "defrag.h"
+#include "dir-item.h"
+#include "uuid-tree.h"
+#include "ioctl.h"
+#include "file.h"
+#include "scrub.h"
+#include "super.h"
#ifdef CONFIG_64BIT
/* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
@@ -88,11 +97,29 @@ struct btrfs_ioctl_send_args_32 {
#define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
struct btrfs_ioctl_send_args_32)
+
+struct btrfs_ioctl_encoded_io_args_32 {
+ compat_uptr_t iov;
+ compat_ulong_t iovcnt;
+ __s64 offset;
+ __u64 flags;
+ __u64 len;
+ __u64 unencoded_len;
+ __u64 unencoded_offset;
+ __u32 compression;
+ __u32 encryption;
+ __u8 reserved[64];
+};
+
+#define BTRFS_IOC_ENCODED_READ_32 _IOR(BTRFS_IOCTL_MAGIC, 64, \
+ struct btrfs_ioctl_encoded_io_args_32)
+#define BTRFS_IOC_ENCODED_WRITE_32 _IOW(BTRFS_IOCTL_MAGIC, 64, \
+ struct btrfs_ioctl_encoded_io_args_32)
#endif
/* Mask out flags that are inappropriate for the given type of inode. */
-static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
- unsigned int flags)
+static unsigned int btrfs_mask_fsflags_for_type(const struct inode *inode,
+ unsigned int flags)
{
if (S_ISDIR(inode->i_mode))
return flags;
@@ -106,11 +133,11 @@ static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
* Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
* ioctl.
*/
-static unsigned int btrfs_inode_flags_to_fsflags(struct btrfs_inode *binode)
+static unsigned int btrfs_inode_flags_to_fsflags(const struct btrfs_inode *inode)
{
unsigned int iflags = 0;
- u32 flags = binode->flags;
- u32 ro_flags = binode->ro_flags;
+ u32 flags = inode->flags;
+ u32 ro_flags = inode->ro_flags;
if (flags & BTRFS_INODE_SYNC)
iflags |= FS_SYNC_FL;
@@ -140,25 +167,24 @@ static unsigned int btrfs_inode_flags_to_fsflags(struct btrfs_inode *binode)
/*
* Update inode->i_flags based on the btrfs internal flags.
*/
-void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
+void btrfs_sync_inode_flags_to_i_flags(struct btrfs_inode *inode)
{
- struct btrfs_inode *binode = BTRFS_I(inode);
unsigned int new_fl = 0;
- if (binode->flags & BTRFS_INODE_SYNC)
+ if (inode->flags & BTRFS_INODE_SYNC)
new_fl |= S_SYNC;
- if (binode->flags & BTRFS_INODE_IMMUTABLE)
+ if (inode->flags & BTRFS_INODE_IMMUTABLE)
new_fl |= S_IMMUTABLE;
- if (binode->flags & BTRFS_INODE_APPEND)
+ if (inode->flags & BTRFS_INODE_APPEND)
new_fl |= S_APPEND;
- if (binode->flags & BTRFS_INODE_NOATIME)
+ if (inode->flags & BTRFS_INODE_NOATIME)
new_fl |= S_NOATIME;
- if (binode->flags & BTRFS_INODE_DIRSYNC)
+ if (inode->flags & BTRFS_INODE_DIRSYNC)
new_fl |= S_DIRSYNC;
- if (binode->ro_flags & BTRFS_INODE_RO_VERITY)
+ if (inode->ro_flags & BTRFS_INODE_RO_VERITY)
new_fl |= S_VERITY;
- set_mask_bits(&inode->i_flags,
+ set_mask_bits(&inode->vfs_inode.i_flags,
S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC |
S_VERITY, new_fl);
}
@@ -192,7 +218,7 @@ static int check_fsflags(unsigned int old_flags, unsigned int flags)
return 0;
}
-static int check_fsflags_compatible(struct btrfs_fs_info *fs_info,
+static int check_fsflags_compatible(const struct btrfs_fs_info *fs_info,
unsigned int flags)
{
if (btrfs_is_zoned(fs_info) && (flags & FS_NOCOW_FL))
@@ -201,30 +227,43 @@ static int check_fsflags_compatible(struct btrfs_fs_info *fs_info,
return 0;
}
+int btrfs_check_ioctl_vol_args_path(const struct btrfs_ioctl_vol_args *vol_args)
+{
+ if (memchr(vol_args->name, 0, sizeof(vol_args->name)) == NULL)
+ return -ENAMETOOLONG;
+ return 0;
+}
+
+static int btrfs_check_ioctl_vol_args2_subvol_name(const struct btrfs_ioctl_vol_args_v2 *vol_args2)
+{
+ if (memchr(vol_args2->name, 0, sizeof(vol_args2->name)) == NULL)
+ return -ENAMETOOLONG;
+ return 0;
+}
+
/*
* Set flags/xflags from the internal inode flags. The remaining items of
* fsxattr are zeroed.
*/
-int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int btrfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
- struct btrfs_inode *binode = BTRFS_I(d_inode(dentry));
+ const struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
- fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(binode));
+ fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(inode));
return 0;
}
-int btrfs_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa)
+int btrfs_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa)
{
- struct inode *inode = d_inode(dentry);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_inode *binode = BTRFS_I(inode);
- struct btrfs_root *root = binode->root;
+ struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
unsigned int fsflags, old_fsflags;
int ret;
const char *comp = NULL;
- u32 binode_flags;
+ u32 inode_flags;
if (btrfs_root_readonly(root))
return -EROFS;
@@ -232,8 +271,8 @@ int btrfs_fileattr_set(struct user_namespace *mnt_userns,
if (fileattr_has_fsx(fa))
return -EOPNOTSUPP;
- fsflags = btrfs_mask_fsflags_for_type(inode, fa->flags);
- old_fsflags = btrfs_inode_flags_to_fsflags(binode);
+ fsflags = btrfs_mask_fsflags_for_type(&inode->vfs_inode, fa->flags);
+ old_fsflags = btrfs_inode_flags_to_fsflags(inode);
ret = check_fsflags(old_fsflags, fsflags);
if (ret)
return ret;
@@ -242,27 +281,27 @@ int btrfs_fileattr_set(struct user_namespace *mnt_userns,
if (ret)
return ret;
- binode_flags = binode->flags;
+ inode_flags = inode->flags;
if (fsflags & FS_SYNC_FL)
- binode_flags |= BTRFS_INODE_SYNC;
+ inode_flags |= BTRFS_INODE_SYNC;
else
- binode_flags &= ~BTRFS_INODE_SYNC;
+ inode_flags &= ~BTRFS_INODE_SYNC;
if (fsflags & FS_IMMUTABLE_FL)
- binode_flags |= BTRFS_INODE_IMMUTABLE;
+ inode_flags |= BTRFS_INODE_IMMUTABLE;
else
- binode_flags &= ~BTRFS_INODE_IMMUTABLE;
+ inode_flags &= ~BTRFS_INODE_IMMUTABLE;
if (fsflags & FS_APPEND_FL)
- binode_flags |= BTRFS_INODE_APPEND;
+ inode_flags |= BTRFS_INODE_APPEND;
else
- binode_flags &= ~BTRFS_INODE_APPEND;
+ inode_flags &= ~BTRFS_INODE_APPEND;
if (fsflags & FS_NODUMP_FL)
- binode_flags |= BTRFS_INODE_NODUMP;
+ inode_flags |= BTRFS_INODE_NODUMP;
else
- binode_flags &= ~BTRFS_INODE_NODUMP;
+ inode_flags &= ~BTRFS_INODE_NODUMP;
if (fsflags & FS_NOATIME_FL)
- binode_flags |= BTRFS_INODE_NOATIME;
+ inode_flags |= BTRFS_INODE_NOATIME;
else
- binode_flags &= ~BTRFS_INODE_NOATIME;
+ inode_flags &= ~BTRFS_INODE_NOATIME;
/* If coming from FS_IOC_FSSETXATTR then skip unconverted flags */
if (!fa->flags_valid) {
@@ -274,32 +313,32 @@ int btrfs_fileattr_set(struct user_namespace *mnt_userns,
}
if (fsflags & FS_DIRSYNC_FL)
- binode_flags |= BTRFS_INODE_DIRSYNC;
+ inode_flags |= BTRFS_INODE_DIRSYNC;
else
- binode_flags &= ~BTRFS_INODE_DIRSYNC;
+ inode_flags &= ~BTRFS_INODE_DIRSYNC;
if (fsflags & FS_NOCOW_FL) {
- if (S_ISREG(inode->i_mode)) {
+ if (S_ISREG(inode->vfs_inode.i_mode)) {
/*
* It's safe to turn csums off here, no extents exist.
* Otherwise we want the flag to reflect the real COW
* status of the file and will not set it.
*/
- if (inode->i_size == 0)
- binode_flags |= BTRFS_INODE_NODATACOW |
- BTRFS_INODE_NODATASUM;
+ if (inode->vfs_inode.i_size == 0)
+ inode_flags |= BTRFS_INODE_NODATACOW |
+ BTRFS_INODE_NODATASUM;
} else {
- binode_flags |= BTRFS_INODE_NODATACOW;
+ inode_flags |= BTRFS_INODE_NODATACOW;
}
} else {
/*
* Revert back under same assumptions as above
*/
- if (S_ISREG(inode->i_mode)) {
- if (inode->i_size == 0)
- binode_flags &= ~(BTRFS_INODE_NODATACOW |
- BTRFS_INODE_NODATASUM);
+ if (S_ISREG(inode->vfs_inode.i_mode)) {
+ if (inode->vfs_inode.i_size == 0)
+ inode_flags &= ~(BTRFS_INODE_NODATACOW |
+ BTRFS_INODE_NODATASUM);
} else {
- binode_flags &= ~BTRFS_INODE_NODATACOW;
+ inode_flags &= ~BTRFS_INODE_NODATACOW;
}
}
@@ -309,21 +348,21 @@ int btrfs_fileattr_set(struct user_namespace *mnt_userns,
* things smaller.
*/
if (fsflags & FS_NOCOMP_FL) {
- binode_flags &= ~BTRFS_INODE_COMPRESS;
- binode_flags |= BTRFS_INODE_NOCOMPRESS;
+ inode_flags &= ~BTRFS_INODE_COMPRESS;
+ inode_flags |= BTRFS_INODE_NOCOMPRESS;
} else if (fsflags & FS_COMPR_FL) {
- if (IS_SWAPFILE(inode))
+ if (IS_SWAPFILE(&inode->vfs_inode))
return -ETXTBSY;
- binode_flags |= BTRFS_INODE_COMPRESS;
- binode_flags &= ~BTRFS_INODE_NOCOMPRESS;
+ inode_flags |= BTRFS_INODE_COMPRESS;
+ inode_flags &= ~BTRFS_INODE_NOCOMPRESS;
comp = btrfs_compress_type2str(fs_info->compress_type);
if (!comp || comp[0] == 0)
comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
} else {
- binode_flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
+ inode_flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
}
/*
@@ -335,89 +374,35 @@ int btrfs_fileattr_set(struct user_namespace *mnt_userns,
return PTR_ERR(trans);
if (comp) {
- ret = btrfs_set_prop(trans, inode, "btrfs.compression", comp,
- strlen(comp), 0);
- if (ret) {
+ ret = btrfs_set_prop(trans, inode, "btrfs.compression",
+ comp, strlen(comp), 0);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
} else {
- ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL,
- 0, 0);
- if (ret && ret != -ENODATA) {
+ ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL, 0, 0);
+ if (unlikely(ret && ret != -ENODATA)) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
}
update_flags:
- binode->flags = binode_flags;
+ inode->flags = inode_flags;
+ btrfs_update_inode_mapping_flags(inode);
btrfs_sync_inode_flags_to_i_flags(inode);
- inode_inc_iversion(inode);
- inode->i_ctime = current_time(inode);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ inode_inc_iversion(&inode->vfs_inode);
+ inode_set_ctime_current(&inode->vfs_inode);
+ ret = btrfs_update_inode(trans, inode);
out_end_trans:
btrfs_end_transaction(trans);
return ret;
}
-/*
- * Start exclusive operation @type, return true on success
- */
-bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
- enum btrfs_exclusive_operation type)
-{
- bool ret = false;
-
- spin_lock(&fs_info->super_lock);
- if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
- fs_info->exclusive_operation = type;
- ret = true;
- }
- spin_unlock(&fs_info->super_lock);
-
- return ret;
-}
-
-/*
- * Conditionally allow to enter the exclusive operation in case it's compatible
- * with the running one. This must be paired with btrfs_exclop_start_unlock and
- * btrfs_exclop_finish.
- *
- * Compatibility:
- * - the same type is already running
- * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
- * must check the condition first that would allow none -> @type
- */
-bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
- enum btrfs_exclusive_operation type)
-{
- spin_lock(&fs_info->super_lock);
- if (fs_info->exclusive_operation == type)
- return true;
-
- spin_unlock(&fs_info->super_lock);
- return false;
-}
-
-void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
+static int btrfs_ioctl_getversion(const struct inode *inode, int __user *arg)
{
- spin_unlock(&fs_info->super_lock);
-}
-
-void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
-{
- spin_lock(&fs_info->super_lock);
- WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
- spin_unlock(&fs_info->super_lock);
- sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
-}
-
-static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
-{
- struct inode *inode = file_inode(file);
-
return put_user(inode->i_generation, arg);
}
@@ -425,7 +410,6 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_device *device;
- struct request_queue *q;
struct fstrim_range range;
u64 minlen = ULLONG_MAX;
u64 num_devices = 0;
@@ -455,14 +439,11 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
rcu_read_lock();
list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
dev_list) {
- if (!device->bdev)
+ if (!device->bdev || !bdev_max_discard_sectors(device->bdev))
continue;
- q = bdev_get_queue(device->bdev);
- if (blk_queue_discard(q)) {
- num_devices++;
- minlen = min_t(u64, q->limits.discard_granularity,
- minlen);
- }
+ num_devices++;
+ minlen = min_t(u64, bdev_discard_granularity(device->bdev),
+ minlen);
}
rcu_read_unlock();
@@ -476,52 +457,69 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
* block group is in the logical address space, which can be any
* sectorsize aligned bytenr in the range [0, U64_MAX].
*/
- if (range.len < fs_info->sb->s_blocksize)
+ if (range.len < fs_info->sectorsize)
return -EINVAL;
range.minlen = max(range.minlen, minlen);
ret = btrfs_trim_fs(fs_info, &range);
- if (ret < 0)
- return ret;
if (copy_to_user(arg, &range, sizeof(range)))
return -EFAULT;
- return 0;
+ return ret;
}
-int __pure btrfs_is_empty_uuid(u8 *uuid)
+/*
+ * Calculate the number of transaction items to reserve for creating a subvolume
+ * or snapshot, not including the inode, directory entries, or parent directory.
+ */
+static unsigned int create_subvol_num_items(const struct btrfs_qgroup_inherit *inherit)
{
- int i;
+ /*
+ * 1 to add root block
+ * 1 to add root item
+ * 1 to add root ref
+ * 1 to add root backref
+ * 1 to add UUID item
+ * 1 to add qgroup info
+ * 1 to add qgroup limit
+ *
+ * Ideally the last two would only be accounted if qgroups are enabled,
+ * but that can change between now and the time we would insert them.
+ */
+ unsigned int num_items = 7;
- for (i = 0; i < BTRFS_UUID_SIZE; i++) {
- if (uuid[i])
- return 0;
+ if (inherit) {
+ /* 2 to add qgroup relations for each inherited qgroup */
+ num_items += 2 * inherit->num_qgroups;
}
- return 1;
+ return num_items;
}
-static noinline int create_subvol(struct user_namespace *mnt_userns,
+static noinline int create_subvol(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry,
- const char *name, int namelen,
struct btrfs_qgroup_inherit *inherit)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct btrfs_trans_handle *trans;
struct btrfs_key key;
- struct btrfs_root_item *root_item;
+ struct btrfs_root_item AUTO_KFREE(root_item);
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *new_root;
struct btrfs_block_rsv block_rsv;
struct timespec64 cur_time = current_time(dir);
- struct inode *inode;
+ struct btrfs_new_inode_args new_inode_args = {
+ .dir = dir,
+ .dentry = dentry,
+ .subvol = true,
+ };
+ unsigned int trans_num_items;
int ret;
- int err;
- dev_t anon_dev = 0;
+ dev_t anon_dev;
u64 objectid;
- u64 index = 0;
+ u64 qgroup_reserved = 0;
root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
if (!root_item)
@@ -529,51 +527,58 @@ static noinline int create_subvol(struct user_namespace *mnt_userns,
ret = btrfs_get_free_objectid(fs_info->tree_root, &objectid);
if (ret)
- goto fail_free;
-
- ret = get_anon_bdev(&anon_dev);
- if (ret < 0)
- goto fail_free;
+ return ret;
/*
* Don't create subvolume whose level is not zero. Or qgroup will be
* screwed up since it assumes subvolume qgroup's level to be 0.
*/
- if (btrfs_qgroup_level(objectid)) {
- ret = -ENOSPC;
- goto fail_free;
+ if (btrfs_qgroup_level(objectid))
+ return -ENOSPC;
+
+ ret = get_anon_bdev(&anon_dev);
+ if (ret < 0)
+ return ret;
+
+ new_inode_args.inode = btrfs_new_subvol_inode(idmap, dir);
+ if (!new_inode_args.inode) {
+ ret = -ENOMEM;
+ goto out_anon_dev;
}
+ ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
+ if (ret)
+ goto out_inode;
+ trans_num_items += create_subvol_num_items(inherit);
btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
- /*
- * The same as the snapshot creation, please see the comment
- * of create_snapshot().
- */
- ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false);
+ ret = btrfs_subvolume_reserve_metadata(root, &block_rsv,
+ trans_num_items, false);
if (ret)
- goto fail_free;
+ goto out_new_inode_args;
+ qgroup_reserved = block_rsv.qgroup_rsv_reserved;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
- btrfs_subvolume_release_metadata(root, &block_rsv);
- goto fail_free;
+ goto out_release_rsv;
}
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+ qgroup_reserved = 0;
trans->block_rsv = &block_rsv;
trans->bytes_reserved = block_rsv.size;
- ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
+ ret = btrfs_qgroup_inherit(trans, 0, objectid, btrfs_root_id(root), inherit);
if (ret)
- goto fail;
+ goto out;
leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
- BTRFS_NESTING_NORMAL);
+ 0, BTRFS_NESTING_NORMAL);
if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf);
- goto fail;
+ goto out;
}
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
inode_item = &root_item->inode;
btrfs_set_stack_inode_generation(inode_item, 1);
@@ -608,110 +613,84 @@ static noinline int create_subvol(struct user_namespace *mnt_userns,
btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
key.objectid = objectid;
- key.offset = 0;
key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = 0;
ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
root_item);
if (ret) {
+ int ret2;
+
/*
* Since we don't abort the transaction in this case, free the
* tree block so that we don't leak space and leave the
* filesystem in an inconsistent state (an extent item in the
- * extent tree without backreferences). Also no need to have
- * the tree block locked since it is not in any tree at this
- * point, so no other task can find it and use it.
+ * extent tree with a backreference for a root that does not
+ * exists).
*/
- btrfs_free_tree_block(trans, root, leaf, 0, 1);
+ btrfs_tree_lock(leaf);
+ btrfs_clear_buffer_dirty(trans, leaf);
+ btrfs_tree_unlock(leaf);
+ ret2 = btrfs_free_tree_block(trans, objectid, leaf, 0, 1);
+ if (unlikely(ret2 < 0))
+ btrfs_abort_transaction(trans, ret2);
free_extent_buffer(leaf);
- goto fail;
+ goto out;
}
free_extent_buffer(leaf);
leaf = NULL;
- key.offset = (u64)-1;
- new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
+ new_root = btrfs_get_new_fs_root(fs_info, objectid, &anon_dev);
if (IS_ERR(new_root)) {
- free_anon_bdev(anon_dev);
ret = PTR_ERR(new_root);
btrfs_abort_transaction(trans, ret);
- goto fail;
+ goto out;
}
- /* Freeing will be done in btrfs_put_root() of new_root */
+ /* anon_dev is owned by new_root now. */
anon_dev = 0;
+ BTRFS_I(new_inode_args.inode)->root = new_root;
+ /* ... and new_root is owned by new_inode_args.inode now. */
ret = btrfs_record_root_in_trans(trans, new_root);
- if (ret) {
- btrfs_put_root(new_root);
- btrfs_abort_transaction(trans, ret);
- goto fail;
- }
-
- ret = btrfs_create_subvol_root(trans, new_root, root, mnt_userns);
- btrfs_put_root(new_root);
- if (ret) {
- /* We potentially lose an unused inode item here */
- btrfs_abort_transaction(trans, ret);
- goto fail;
- }
-
- /*
- * insert the directory item
- */
- ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto fail;
+ goto out;
}
- ret = btrfs_insert_dir_item(trans, name, namelen, BTRFS_I(dir), &key,
- BTRFS_FT_DIR, index);
- if (ret) {
+ ret = btrfs_uuid_tree_add(trans, root_item->uuid,
+ BTRFS_UUID_KEY_SUBVOL, objectid);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto fail;
+ goto out;
}
- btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
- ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto fail;
- }
+ btrfs_record_new_subvolume(trans, BTRFS_I(dir));
- ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
- btrfs_ino(BTRFS_I(dir)), index, name, namelen);
- if (ret) {
+ ret = btrfs_create_new_inode(trans, &new_inode_args);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto fail;
+ goto out;
}
- ret = btrfs_uuid_tree_add(trans, root_item->uuid,
- BTRFS_UUID_KEY_SUBVOL, objectid);
- if (ret)
- btrfs_abort_transaction(trans, ret);
+ d_instantiate_new(dentry, new_inode_args.inode);
+ new_inode_args.inode = NULL;
-fail:
- kfree(root_item);
+out:
trans->block_rsv = NULL;
trans->bytes_reserved = 0;
- btrfs_subvolume_release_metadata(root, &block_rsv);
-
- err = btrfs_commit_transaction(trans);
- if (err && !ret)
- ret = err;
-
- if (!ret) {
- inode = btrfs_lookup_dentry(dir, dentry);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
- d_instantiate(dentry, inode);
- }
- return ret;
-
-fail_free:
+ btrfs_end_transaction(trans);
+out_release_rsv:
+ btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
+ if (qgroup_reserved)
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
+out_new_inode_args:
+ btrfs_new_inode_args_destroy(&new_inode_args);
+out_inode:
+ iput(new_inode_args.inode);
+out_anon_dev:
if (anon_dev)
free_anon_bdev(anon_dev);
- kfree(root_item);
+
return ret;
}
@@ -719,12 +698,25 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
struct dentry *dentry, bool readonly,
struct btrfs_qgroup_inherit *inherit)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct inode *inode;
struct btrfs_pending_snapshot *pending_snapshot;
+ unsigned int trans_num_items;
struct btrfs_trans_handle *trans;
+ struct btrfs_block_rsv *block_rsv;
+ u64 qgroup_reserved = 0;
int ret;
+ /* We do not support snapshotting right now. */
+ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+ btrfs_warn(fs_info,
+ "extent tree v2 doesn't support snapshotting yet");
+ return -EOPNOTSUPP;
+ }
+
+ if (btrfs_root_refs(&root->root_item) == 0)
+ return -ENOENT;
+
if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
return -EINVAL;
@@ -749,26 +741,24 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
goto free_pending;
}
- btrfs_init_block_rsv(&pending_snapshot->block_rsv,
- BTRFS_BLOCK_RSV_TEMP);
+ block_rsv = &pending_snapshot->block_rsv;
+ btrfs_init_block_rsv(block_rsv, BTRFS_BLOCK_RSV_TEMP);
/*
- * 1 - parent dir inode
- * 2 - dir entries
- * 1 - root item
- * 2 - root ref/backref
- * 1 - root of snapshot
- * 1 - UUID item
+ * 1 to add dir item
+ * 1 to add dir index
+ * 1 to update parent inode item
*/
- ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
- &pending_snapshot->block_rsv, 8,
- false);
+ trans_num_items = create_subvol_num_items(inherit) + 3;
+ ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, block_rsv,
+ trans_num_items, false);
if (ret)
goto free_pending;
+ qgroup_reserved = block_rsv->qgroup_rsv_reserved;
pending_snapshot->dentry = dentry;
pending_snapshot->root = root;
pending_snapshot->readonly = readonly;
- pending_snapshot->dir = dir;
+ pending_snapshot->dir = BTRFS_I(dir);
pending_snapshot->inherit = inherit;
trans = btrfs_start_transaction(root, 0);
@@ -776,11 +766,15 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
ret = PTR_ERR(trans);
goto fail;
}
+ ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root);
+ if (ret) {
+ btrfs_end_transaction(trans);
+ goto fail;
+ }
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+ qgroup_reserved = 0;
- spin_lock(&fs_info->trans_lock);
- list_add(&pending_snapshot->list,
- &trans->transaction->pending_snapshots);
- spin_unlock(&fs_info->trans_lock);
+ trans->pending_snapshot = pending_snapshot;
ret = btrfs_commit_transaction(trans);
if (ret)
@@ -808,7 +802,9 @@ fail:
if (ret && pending_snapshot->snap)
pending_snapshot->snap->anon_dev = 0;
btrfs_put_root(pending_snapshot->snap);
- btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
+ btrfs_block_rsv_release(fs_info, block_rsv, (u64)-1, NULL);
+ if (qgroup_reserved)
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
free_pending:
if (pending_snapshot->anon_dev)
free_anon_bdev(pending_snapshot->anon_dev);
@@ -839,23 +835,25 @@ free_pending:
* nfs_async_unlink().
*/
-static int btrfs_may_delete(struct user_namespace *mnt_userns,
+static int btrfs_may_delete(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *victim, int isdir)
{
- int error;
+ int ret;
if (d_really_is_negative(victim))
return -ENOENT;
- BUG_ON(d_inode(victim->d_parent) != dir);
+ /* The @victim is not inside @dir. */
+ if (d_inode(victim->d_parent) != dir)
+ return -EINVAL;
audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
- error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
- if (error)
- return error;
+ ret = inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC);
+ if (ret)
+ return ret;
if (IS_APPEND(dir))
return -EPERM;
- if (check_sticky(mnt_userns, dir, d_inode(victim)) ||
+ if (check_sticky(idmap, dir, d_inode(victim)) ||
IS_APPEND(d_inode(victim)) || IS_IMMUTABLE(d_inode(victim)) ||
IS_SWAPFILE(d_inode(victim)))
return -EPERM;
@@ -874,16 +872,16 @@ static int btrfs_may_delete(struct user_namespace *mnt_userns,
}
/* copy of may_create in fs/namei.c() */
-static inline int btrfs_may_create(struct user_namespace *mnt_userns,
- struct inode *dir, struct dentry *child)
+static inline int btrfs_may_create(struct mnt_idmap *idmap,
+ struct inode *dir, const struct dentry *child)
{
if (d_really_is_positive(child))
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
- if (!fsuidgid_has_mapping(dir->i_sb, mnt_userns))
+ if (!fsuidgid_has_mapping(dir->i_sb, idmap))
return -EOVERFLOW;
- return inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
+ return inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC);
}
/*
@@ -891,39 +889,32 @@ static inline int btrfs_may_create(struct user_namespace *mnt_userns,
* sys_mkdirat and vfs_mkdir, but we only do a single component lookup
* inside this filesystem so it's quite a bit simpler.
*/
-static noinline int btrfs_mksubvol(const struct path *parent,
- struct user_namespace *mnt_userns,
- const char *name, int namelen,
- struct btrfs_root *snap_src,
+static noinline int btrfs_mksubvol(struct dentry *parent,
+ struct mnt_idmap *idmap,
+ struct qstr *qname, struct btrfs_root *snap_src,
bool readonly,
struct btrfs_qgroup_inherit *inherit)
{
- struct inode *dir = d_inode(parent->dentry);
- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
+ struct inode *dir = d_inode(parent);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct dentry *dentry;
- int error;
-
- error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
- if (error == -EINTR)
- return error;
+ struct fscrypt_str name_str = FSTR_INIT((char *)qname->name, qname->len);
+ int ret;
- dentry = lookup_one(mnt_userns, name, parent->dentry, namelen);
- error = PTR_ERR(dentry);
+ dentry = start_creating_killable(idmap, parent, qname);
if (IS_ERR(dentry))
- goto out_unlock;
+ return PTR_ERR(dentry);
- error = btrfs_may_create(mnt_userns, dir, dentry);
- if (error)
+ ret = btrfs_may_create(idmap, dir, dentry);
+ if (ret)
goto out_dput;
/*
* even if this name doesn't exist, we may get hash collisions.
* check for them now when we can safely fail
*/
- error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
- dir->i_ino, name,
- namelen);
- if (error)
+ ret = btrfs_check_dir_item_collision(BTRFS_I(dir)->root, dir->i_ino, &name_str);
+ if (ret)
goto out_dput;
down_read(&fs_info->subvol_sem);
@@ -932,34 +923,31 @@ static noinline int btrfs_mksubvol(const struct path *parent,
goto out_up_read;
if (snap_src)
- error = create_snapshot(snap_src, dir, dentry, readonly, inherit);
+ ret = create_snapshot(snap_src, dir, dentry, readonly, inherit);
else
- error = create_subvol(mnt_userns, dir, dentry, name, namelen, inherit);
+ ret = create_subvol(idmap, dir, dentry, inherit);
- if (!error)
+ if (!ret)
fsnotify_mkdir(dir, dentry);
out_up_read:
up_read(&fs_info->subvol_sem);
out_dput:
- dput(dentry);
-out_unlock:
- btrfs_inode_unlock(dir, 0);
- return error;
+ end_creating(dentry);
+ return ret;
}
-static noinline int btrfs_mksnapshot(const struct path *parent,
- struct user_namespace *mnt_userns,
- const char *name, int namelen,
+static noinline int btrfs_mksnapshot(struct dentry *parent,
+ struct mnt_idmap *idmap,
+ struct qstr *qname,
struct btrfs_root *root,
bool readonly,
struct btrfs_qgroup_inherit *inherit)
{
int ret;
- bool snapshot_force_cow = false;
/*
* Force new buffered writes to reserve space even when NOCOW is
- * possible. This is to avoid later writeback (running dealloc) to
+ * possible. This is to avoid later writeback (running delalloc) to
* fallback to COW mode and unexpectedly fail with ENOSPC.
*/
btrfs_drew_read_lock(&root->snapshot_lock);
@@ -974,601 +962,14 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
* creation.
*/
atomic_inc(&root->snapshot_force_cow);
- snapshot_force_cow = true;
-
- btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
-
- ret = btrfs_mksubvol(parent, mnt_userns, name, namelen,
- root, readonly, inherit);
-out:
- if (snapshot_force_cow)
- atomic_dec(&root->snapshot_force_cow);
- btrfs_drew_read_unlock(&root->snapshot_lock);
- return ret;
-}
-
-static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
- bool locked)
-{
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct extent_map *em;
- const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize;
-
- /*
- * hopefully we have this extent in the tree already, try without
- * the full extent lock
- */
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, start, sectorsize);
- read_unlock(&em_tree->lock);
-
- if (!em) {
- struct extent_state *cached = NULL;
- u64 end = start + sectorsize - 1;
-
- /* get the big lock and read metadata off disk */
- if (!locked)
- lock_extent_bits(io_tree, start, end, &cached);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, sectorsize);
- if (!locked)
- unlock_extent_cached(io_tree, start, end, &cached);
-
- if (IS_ERR(em))
- return NULL;
- }
-
- return em;
-}
-
-static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
- bool locked)
-{
- struct extent_map *next;
- bool ret = true;
-
- /* this is the last extent */
- if (em->start + em->len >= i_size_read(inode))
- return false;
-
- next = defrag_lookup_extent(inode, em->start + em->len, locked);
- if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
- ret = false;
- else if ((em->block_start + em->block_len == next->block_start) &&
- (em->block_len > SZ_128K && next->block_len > SZ_128K))
- ret = false;
-
- free_extent_map(next);
- return ret;
-}
-
-/*
- * Prepare one page to be defragged.
- *
- * This will ensure:
- *
- * - Returned page is locked and has been set up properly.
- * - No ordered extent exists in the page.
- * - The page is uptodate.
- *
- * NOTE: Caller should also wait for page writeback after the cluster is
- * prepared, here we don't do writeback wait for each page.
- */
-static struct page *defrag_prepare_one_page(struct btrfs_inode *inode,
- pgoff_t index)
-{
- struct address_space *mapping = inode->vfs_inode.i_mapping;
- gfp_t mask = btrfs_alloc_write_mask(mapping);
- u64 page_start = (u64)index << PAGE_SHIFT;
- u64 page_end = page_start + PAGE_SIZE - 1;
- struct extent_state *cached_state = NULL;
- struct page *page;
- int ret;
-
-again:
- page = find_or_create_page(mapping, index, mask);
- if (!page)
- return ERR_PTR(-ENOMEM);
-
- /*
- * Since we can defragment files opened read-only, we can encounter
- * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS). We
- * can't do I/O using huge pages yet, so return an error for now.
- * Filesystem transparent huge pages are typically only used for
- * executables that explicitly enable them, so this isn't very
- * restrictive.
- */
- if (PageCompound(page)) {
- unlock_page(page);
- put_page(page);
- return ERR_PTR(-ETXTBSY);
- }
-
- ret = set_page_extent_mapped(page);
- if (ret < 0) {
- unlock_page(page);
- put_page(page);
- return ERR_PTR(ret);
- }
-
- /* Wait for any existing ordered extent in the range */
- while (1) {
- struct btrfs_ordered_extent *ordered;
-
- lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state);
- ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
- unlock_extent_cached(&inode->io_tree, page_start, page_end,
- &cached_state);
- if (!ordered)
- break;
-
- unlock_page(page);
- btrfs_start_ordered_extent(ordered, 1);
- btrfs_put_ordered_extent(ordered);
- lock_page(page);
- /*
- * We unlocked the page above, so we need check if it was
- * released or not.
- */
- if (page->mapping != mapping || !PagePrivate(page)) {
- unlock_page(page);
- put_page(page);
- goto again;
- }
- }
-
- /*
- * Now the page range has no ordered extent any more. Read the page to
- * make it uptodate.
- */
- if (!PageUptodate(page)) {
- btrfs_readpage(NULL, page);
- lock_page(page);
- if (page->mapping != mapping || !PagePrivate(page)) {
- unlock_page(page);
- put_page(page);
- goto again;
- }
- if (!PageUptodate(page)) {
- unlock_page(page);
- put_page(page);
- return ERR_PTR(-EIO);
- }
- }
- return page;
-}
-
-struct defrag_target_range {
- struct list_head list;
- u64 start;
- u64 len;
-};
-
-/*
- * Collect all valid target extents.
- *
- * @start: file offset to lookup
- * @len: length to lookup
- * @extent_thresh: file extent size threshold, any extent size >= this value
- * will be ignored
- * @newer_than: only defrag extents newer than this value
- * @do_compress: whether the defrag is doing compression
- * if true, @extent_thresh will be ignored and all regular
- * file extents meeting @newer_than will be targets.
- * @locked: if the range has already held extent lock
- * @target_list: list of targets file extents
- */
-static int defrag_collect_targets(struct btrfs_inode *inode,
- u64 start, u64 len, u32 extent_thresh,
- u64 newer_than, bool do_compress,
- bool locked, struct list_head *target_list)
-{
- u64 cur = start;
- int ret = 0;
-
- while (cur < start + len) {
- struct extent_map *em;
- struct defrag_target_range *new;
- bool next_mergeable = true;
- u64 range_len;
-
- em = defrag_lookup_extent(&inode->vfs_inode, cur, locked);
- if (!em)
- break;
-
- /* Skip hole/inline/preallocated extents */
- if (em->block_start >= EXTENT_MAP_LAST_BYTE ||
- test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
- goto next;
-
- /* Skip older extent */
- if (em->generation < newer_than)
- goto next;
-
- /*
- * For do_compress case, we want to compress all valid file
- * extents, thus no @extent_thresh or mergeable check.
- */
- if (do_compress)
- goto add;
-
- /* Skip too large extent */
- if (em->len >= extent_thresh)
- goto next;
-
- next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
- locked);
- if (!next_mergeable) {
- struct defrag_target_range *last;
-
- /* Empty target list, no way to merge with last entry */
- if (list_empty(target_list))
- goto next;
- last = list_entry(target_list->prev,
- struct defrag_target_range, list);
- /* Not mergeable with last entry */
- if (last->start + last->len != cur)
- goto next;
-
- /* Mergeable, fall through to add it to @target_list. */
- }
-
-add:
- range_len = min(extent_map_end(em), start + len) - cur;
- /*
- * This one is a good target, check if it can be merged into
- * last range of the target list.
- */
- if (!list_empty(target_list)) {
- struct defrag_target_range *last;
-
- last = list_entry(target_list->prev,
- struct defrag_target_range, list);
- ASSERT(last->start + last->len <= cur);
- if (last->start + last->len == cur) {
- /* Mergeable, enlarge the last entry */
- last->len += range_len;
- goto next;
- }
- /* Fall through to allocate a new entry */
- }
-
- /* Allocate new defrag_target_range */
- new = kmalloc(sizeof(*new), GFP_NOFS);
- if (!new) {
- free_extent_map(em);
- ret = -ENOMEM;
- break;
- }
- new->start = cur;
- new->len = range_len;
- list_add_tail(&new->list, target_list);
-
-next:
- cur = extent_map_end(em);
- free_extent_map(em);
- }
- if (ret < 0) {
- struct defrag_target_range *entry;
- struct defrag_target_range *tmp;
-
- list_for_each_entry_safe(entry, tmp, target_list, list) {
- list_del_init(&entry->list);
- kfree(entry);
- }
- }
- return ret;
-}
-
-#define CLUSTER_SIZE (SZ_256K)
-
-/*
- * Defrag one contiguous target range.
- *
- * @inode: target inode
- * @target: target range to defrag
- * @pages: locked pages covering the defrag range
- * @nr_pages: number of locked pages
- *
- * Caller should ensure:
- *
- * - Pages are prepared
- * Pages should be locked, no ordered extent in the pages range,
- * no writeback.
- *
- * - Extent bits are locked
- */
-static int defrag_one_locked_target(struct btrfs_inode *inode,
- struct defrag_target_range *target,
- struct page **pages, int nr_pages,
- struct extent_state **cached_state)
-{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct extent_changeset *data_reserved = NULL;
- const u64 start = target->start;
- const u64 len = target->len;
- unsigned long last_index = (start + len - 1) >> PAGE_SHIFT;
- unsigned long start_index = start >> PAGE_SHIFT;
- unsigned long first_index = page_index(pages[0]);
- int ret = 0;
- int i;
-
- ASSERT(last_index - first_index + 1 <= nr_pages);
-
- ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len);
- if (ret < 0)
- return ret;
- clear_extent_bit(&inode->io_tree, start, start + len - 1,
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, 0, 0, cached_state);
- set_extent_defrag(&inode->io_tree, start, start + len - 1, cached_state);
-
- /* Update the page status */
- for (i = start_index - first_index; i <= last_index - first_index; i++) {
- ClearPageChecked(pages[i]);
- btrfs_page_clamp_set_dirty(fs_info, pages[i], start, len);
- }
- btrfs_delalloc_release_extents(inode, len);
- extent_changeset_free(data_reserved);
-
- return ret;
-}
-
-static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
- u32 extent_thresh, u64 newer_than, bool do_compress)
-{
- struct extent_state *cached_state = NULL;
- struct defrag_target_range *entry;
- struct defrag_target_range *tmp;
- LIST_HEAD(target_list);
- struct page **pages;
- const u32 sectorsize = inode->root->fs_info->sectorsize;
- u64 last_index = (start + len - 1) >> PAGE_SHIFT;
- u64 start_index = start >> PAGE_SHIFT;
- unsigned int nr_pages = last_index - start_index + 1;
- int ret = 0;
- int i;
-
- ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
- ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize));
-
- pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
- if (!pages)
- return -ENOMEM;
-
- /* Prepare all pages */
- for (i = 0; i < nr_pages; i++) {
- pages[i] = defrag_prepare_one_page(inode, start_index + i);
- if (IS_ERR(pages[i])) {
- ret = PTR_ERR(pages[i]);
- pages[i] = NULL;
- goto free_pages;
- }
- }
- for (i = 0; i < nr_pages; i++)
- wait_on_page_writeback(pages[i]);
-
- /* Lock the pages range */
- lock_extent_bits(&inode->io_tree, start_index << PAGE_SHIFT,
- (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
- &cached_state);
- /*
- * Now we have a consistent view about the extent map, re-check
- * which range really needs to be defragged.
- *
- * And this time we have extent locked already, pass @locked = true
- * so that we won't relock the extent range and cause deadlock.
- */
- ret = defrag_collect_targets(inode, start, len, extent_thresh,
- newer_than, do_compress, true,
- &target_list);
- if (ret < 0)
- goto unlock_extent;
-
- list_for_each_entry(entry, &target_list, list) {
- ret = defrag_one_locked_target(inode, entry, pages, nr_pages,
- &cached_state);
- if (ret < 0)
- break;
- }
-
- list_for_each_entry_safe(entry, tmp, &target_list, list) {
- list_del_init(&entry->list);
- kfree(entry);
- }
-unlock_extent:
- unlock_extent_cached(&inode->io_tree, start_index << PAGE_SHIFT,
- (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
- &cached_state);
-free_pages:
- for (i = 0; i < nr_pages; i++) {
- if (pages[i]) {
- unlock_page(pages[i]);
- put_page(pages[i]);
- }
- }
- kfree(pages);
- return ret;
-}
-
-static int defrag_one_cluster(struct btrfs_inode *inode,
- struct file_ra_state *ra,
- u64 start, u32 len, u32 extent_thresh,
- u64 newer_than, bool do_compress,
- unsigned long *sectors_defragged,
- unsigned long max_sectors)
-{
- const u32 sectorsize = inode->root->fs_info->sectorsize;
- struct defrag_target_range *entry;
- struct defrag_target_range *tmp;
- LIST_HEAD(target_list);
- int ret;
-
- BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
- ret = defrag_collect_targets(inode, start, len, extent_thresh,
- newer_than, do_compress, false,
- &target_list);
- if (ret < 0)
- goto out;
-
- list_for_each_entry(entry, &target_list, list) {
- u32 range_len = entry->len;
- /* Reached the limit */
- if (max_sectors && max_sectors == *sectors_defragged)
- break;
+ btrfs_wait_ordered_extents(root, U64_MAX, NULL);
- if (max_sectors)
- range_len = min_t(u32, range_len,
- (max_sectors - *sectors_defragged) * sectorsize);
+ ret = btrfs_mksubvol(parent, idmap, qname, root, readonly, inherit);
- if (ra)
- page_cache_sync_readahead(inode->vfs_inode.i_mapping,
- ra, NULL, entry->start >> PAGE_SHIFT,
- ((entry->start + range_len - 1) >> PAGE_SHIFT) -
- (entry->start >> PAGE_SHIFT) + 1);
- /*
- * Here we may not defrag any range if holes are punched before
- * we locked the pages.
- * But that's fine, it only affects the @sectors_defragged
- * accounting.
- */
- ret = defrag_one_range(inode, entry->start, range_len,
- extent_thresh, newer_than, do_compress);
- if (ret < 0)
- break;
- *sectors_defragged += range_len;
- }
+ atomic_dec(&root->snapshot_force_cow);
out:
- list_for_each_entry_safe(entry, tmp, &target_list, list) {
- list_del_init(&entry->list);
- kfree(entry);
- }
- return ret;
-}
-
-/*
- * Entry point to file defragmentation.
- *
- * @inode: inode to be defragged
- * @ra: readahead state (can be NUL)
- * @range: defrag options including range and flags
- * @newer_than: minimum transid to defrag
- * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
- * will be defragged.
- */
-int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
- struct btrfs_ioctl_defrag_range_args *range,
- u64 newer_than, unsigned long max_to_defrag)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- unsigned long sectors_defragged = 0;
- u64 isize = i_size_read(inode);
- u64 cur;
- u64 last_byte;
- bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
- bool ra_allocated = false;
- int compress_type = BTRFS_COMPRESS_ZLIB;
- int ret = 0;
- u32 extent_thresh = range->extent_thresh;
-
- if (isize == 0)
- return 0;
-
- if (range->start >= isize)
- return -EINVAL;
-
- if (do_compress) {
- if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
- return -EINVAL;
- if (range->compress_type)
- compress_type = range->compress_type;
- }
-
- if (extent_thresh == 0)
- extent_thresh = SZ_256K;
-
- if (range->start + range->len > range->start) {
- /* Got a specific range */
- last_byte = min(isize, range->start + range->len) - 1;
- } else {
- /* Defrag until file end */
- last_byte = isize - 1;
- }
-
- /*
- * If we were not given a ra, allocate a readahead context. As
- * readahead is just an optimization, defrag will work without it so
- * we don't error out.
- */
- if (!ra) {
- ra_allocated = true;
- ra = kzalloc(sizeof(*ra), GFP_KERNEL);
- if (ra)
- file_ra_state_init(ra, inode->i_mapping);
- }
-
- /* Align the range */
- cur = round_down(range->start, fs_info->sectorsize);
- last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
-
- while (cur < last_byte) {
- u64 cluster_end;
-
- /* The cluster size 256K should always be page aligned */
- BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
-
- /* We want the cluster end at page boundary when possible */
- cluster_end = (((cur >> PAGE_SHIFT) +
- (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1;
- cluster_end = min(cluster_end, last_byte);
-
- btrfs_inode_lock(inode, 0);
- if (IS_SWAPFILE(inode)) {
- ret = -ETXTBSY;
- btrfs_inode_unlock(inode, 0);
- break;
- }
- if (!(inode->i_sb->s_flags & SB_ACTIVE)) {
- btrfs_inode_unlock(inode, 0);
- break;
- }
- if (do_compress)
- BTRFS_I(inode)->defrag_compress = compress_type;
- ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
- cluster_end + 1 - cur, extent_thresh,
- newer_than, do_compress,
- &sectors_defragged, max_to_defrag);
- btrfs_inode_unlock(inode, 0);
- if (ret < 0)
- break;
- cur = cluster_end + 1;
- }
-
- if (ra_allocated)
- kfree(ra);
- if (sectors_defragged) {
- /*
- * We have defragged some sectors, for compression case they
- * need to be written back immediately.
- */
- if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) {
- filemap_flush(inode->i_mapping);
- if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags))
- filemap_flush(inode->i_mapping);
- }
- if (range->compress_type == BTRFS_COMPRESS_LZO)
- btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
- else if (range->compress_type == BTRFS_COMPRESS_ZSTD)
- btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
- ret = sectors_defragged;
- }
- if (do_compress) {
- btrfs_inode_lock(inode, 0);
- BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
- btrfs_inode_unlock(inode, 0);
- }
+ btrfs_drew_read_unlock(&root->snapshot_lock);
return ret;
}
@@ -1618,17 +1019,14 @@ static noinline int btrfs_ioctl_resize(struct file *file,
void __user *arg)
{
BTRFS_DEV_LOOKUP_ARGS(args);
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 new_size;
u64 old_size;
u64 devid = 1;
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_vol_args *vol_args;
- struct btrfs_trans_handle *trans;
struct btrfs_device *device = NULL;
char *sizestr;
- char *retptr;
char *devstr = NULL;
int ret = 0;
int mod = 0;
@@ -1650,7 +1048,10 @@ static noinline int btrfs_ioctl_resize(struct file *file,
ret = PTR_ERR(vol_args);
goto out_drop;
}
- vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
+ ret = btrfs_check_ioctl_vol_args_path(vol_args);
+ if (ret < 0)
+ goto out_free;
+
sizestr = vol_args->name;
cancel = (strcmp("cancel", sizestr) == 0);
ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_RESIZE, cancel);
@@ -1693,6 +1094,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,
if (!strcmp(sizestr, "max"))
new_size = bdev_nr_bytes(device->bdev);
else {
+ char *retptr;
+
if (sizestr[0] == '-') {
mod = -1;
sizestr++;
@@ -1740,6 +1143,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,
new_size = round_down(new_size, fs_info->sectorsize);
if (new_size > old_size) {
+ struct btrfs_trans_handle *trans;
+
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
@@ -1752,9 +1157,9 @@ static noinline int btrfs_ioctl_resize(struct file *file,
} /* equal, nothing need to do */
if (ret == 0 && new_size != old_size)
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"resize device %s (devid %llu) from %llu to %llu",
- rcu_str_deref(device->name), device->devid,
+ btrfs_dev_name(device), device->devid,
old_size, new_size);
out_finish:
btrfs_exclop_finish(fs_info);
@@ -1766,13 +1171,13 @@ out_drop:
}
static noinline int __btrfs_ioctl_snap_create(struct file *file,
- struct user_namespace *mnt_userns,
- const char *name, unsigned long fd, int subvol,
+ struct mnt_idmap *idmap,
+ const char *name, unsigned long fd, bool subvol,
bool readonly,
struct btrfs_qgroup_inherit *inherit)
{
- int namelen;
int ret = 0;
+ struct qstr qname = QSTR_INIT(name, strlen(name));
if (!S_ISDIR(file_inode(file)->i_mode))
return -ENOTDIR;
@@ -1781,47 +1186,53 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
if (ret)
goto out;
- namelen = strlen(name);
if (strchr(name, '/')) {
ret = -EINVAL;
goto out_drop_write;
}
- if (name[0] == '.' &&
- (namelen == 1 || (name[1] == '.' && namelen == 2))) {
+ if (qname.name[0] == '.' &&
+ (qname.len == 1 || (qname.name[1] == '.' && qname.len == 2))) {
ret = -EEXIST;
goto out_drop_write;
}
if (subvol) {
- ret = btrfs_mksubvol(&file->f_path, mnt_userns, name,
- namelen, NULL, readonly, inherit);
+ ret = btrfs_mksubvol(file_dentry(file), idmap, &qname, NULL,
+ readonly, inherit);
} else {
- struct fd src = fdget(fd);
+ CLASS(fd, src)(fd);
struct inode *src_inode;
- if (!src.file) {
+ if (fd_empty(src)) {
ret = -EINVAL;
goto out_drop_write;
}
- src_inode = file_inode(src.file);
+ src_inode = file_inode(fd_file(src));
if (src_inode->i_sb != file_inode(file)->i_sb) {
btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
"Snapshot src from another FS");
ret = -EXDEV;
- } else if (!inode_owner_or_capable(mnt_userns, src_inode)) {
+ } else if (!inode_owner_or_capable(idmap, src_inode)) {
/*
* Subvolume creation is not restricted, but snapshots
* are limited to own subvolumes only
*/
ret = -EPERM;
+ } else if (btrfs_ino(BTRFS_I(src_inode)) != BTRFS_FIRST_FREE_OBJECTID) {
+ /*
+ * Snapshots must be made with the src_inode referring
+ * to the subvolume inode, otherwise the permission
+ * checking above is useless because we may have
+ * permission on a lower directory but not the subvol
+ * itself.
+ */
+ ret = -EINVAL;
} else {
- ret = btrfs_mksnapshot(&file->f_path, mnt_userns,
- name, namelen,
+ ret = btrfs_mksnapshot(file_dentry(file), idmap, &qname,
BTRFS_I(src_inode)->root,
readonly, inherit);
}
- fdput(src);
}
out_drop_write:
mnt_drop_write_file(file);
@@ -1830,7 +1241,7 @@ out:
}
static noinline int btrfs_ioctl_snap_create(struct file *file,
- void __user *arg, int subvol)
+ void __user *arg, bool subvol)
{
struct btrfs_ioctl_vol_args *vol_args;
int ret;
@@ -1841,18 +1252,21 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
- vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
+ ret = btrfs_check_ioctl_vol_args_path(vol_args);
+ if (ret < 0)
+ goto out;
- ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
+ ret = __btrfs_ioctl_snap_create(file, file_mnt_idmap(file),
vol_args->name, vol_args->fd, subvol,
false, NULL);
+out:
kfree(vol_args);
return ret;
}
static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
- void __user *arg, int subvol)
+ void __user *arg, bool subvol)
{
struct btrfs_ioctl_vol_args_v2 *vol_args;
int ret;
@@ -1865,7 +1279,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
- vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
+ ret = btrfs_check_ioctl_vol_args2_subvol_name(vol_args);
+ if (ret < 0)
+ goto free_args;
if (vol_args->flags & ~BTRFS_SUBVOL_CREATE_ARGS_MASK) {
ret = -EOPNOTSUPP;
@@ -1875,7 +1291,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
readonly = true;
if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
- u64 nums;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(file_inode(file));
if (vol_args->size < sizeof(*inherit) ||
vol_args->size > PAGE_SIZE) {
@@ -1888,22 +1304,12 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
goto free_args;
}
- if (inherit->num_qgroups > PAGE_SIZE ||
- inherit->num_ref_copies > PAGE_SIZE ||
- inherit->num_excl_copies > PAGE_SIZE) {
- ret = -EINVAL;
- goto free_inherit;
- }
-
- nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
- 2 * inherit->num_excl_copies;
- if (vol_args->size != struct_size(inherit, qgroups, nums)) {
- ret = -EINVAL;
+ ret = btrfs_qgroup_check_inherit(fs_info, inherit, vol_args->size);
+ if (ret < 0)
goto free_inherit;
- }
}
- ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
+ ret = __btrfs_ioctl_snap_create(file, file_mnt_idmap(file),
vol_args->name, vol_args->fd, subvol,
readonly, inherit);
if (ret)
@@ -1915,16 +1321,15 @@ free_args:
return ret;
}
-static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
+static noinline int btrfs_ioctl_subvol_getflags(struct btrfs_inode *inode,
void __user *arg)
{
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
u64 flags = 0;
- if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
+ if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
return -EINVAL;
down_read(&fs_info->subvol_sem);
@@ -1942,14 +1347,14 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
void __user *arg)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 root_flags;
u64 flags;
int ret = 0;
- if (!inode_owner_or_capable(file_mnt_user_ns(file), inode))
+ if (!inode_owner_or_capable(file_mnt_idmap(file), inode))
return -EPERM;
ret = mnt_want_write_file(file);
@@ -1995,7 +1400,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
spin_unlock(&root->root_item_lock);
btrfs_warn(fs_info,
"Attempt to set subvolume %llu read-write during send",
- root->root_key.objectid);
+ btrfs_root_id(root));
ret = -EPERM;
goto out_drop_sem;
}
@@ -2027,8 +1432,8 @@ out:
return ret;
}
-static noinline int key_in_sk(struct btrfs_key *key,
- struct btrfs_ioctl_search_key *sk)
+static noinline bool key_in_sk(const struct btrfs_key *key,
+ const struct btrfs_ioctl_search_key *sk)
{
struct btrfs_key test;
int ret;
@@ -2039,7 +1444,7 @@ static noinline int key_in_sk(struct btrfs_key *key,
ret = btrfs_comp_cpu_keys(key, &test);
if (ret < 0)
- return 0;
+ return false;
test.objectid = sk->max_objectid;
test.type = sk->max_type;
@@ -2047,14 +1452,14 @@ static noinline int key_in_sk(struct btrfs_key *key,
ret = btrfs_comp_cpu_keys(key, &test);
if (ret > 0)
- return 0;
- return 1;
+ return false;
+ return true;
}
static noinline int copy_to_sk(struct btrfs_path *path,
struct btrfs_key *key,
- struct btrfs_ioctl_search_key *sk,
- size_t *buf_size,
+ const struct btrfs_ioctl_search_key *sk,
+ u64 *buf_size,
char __user *ubuf,
unsigned long *sk_offset,
int *num_found)
@@ -2082,7 +1487,7 @@ static noinline int copy_to_sk(struct btrfs_path *path,
for (i = slot; i < nritems; i++) {
item_off = btrfs_item_ptr_offset(leaf, i);
- item_len = btrfs_item_size_nr(leaf, i);
+ item_len = btrfs_item_size(leaf, i);
btrfs_item_key_to_cpu(leaf, key, i);
if (!key_in_sk(key, sk))
@@ -2110,8 +1515,8 @@ static noinline int copy_to_sk(struct btrfs_path *path,
}
sh.objectid = key->objectid;
- sh.offset = key->offset;
sh.type = key->type;
+ sh.offset = key->offset;
sh.len = item_len;
sh.transid = found_transid;
@@ -2184,15 +1589,14 @@ out:
return ret;
}
-static noinline int search_ioctl(struct inode *inode,
+static noinline int search_ioctl(struct btrfs_root *root,
struct btrfs_ioctl_search_key *sk,
- size_t *buf_size,
+ u64 *buf_size,
char __user *ubuf)
{
- struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root;
+ struct btrfs_fs_info *info = root->fs_info;
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
int num_found = 0;
unsigned long sk_offset = 0;
@@ -2207,14 +1611,13 @@ static noinline int search_ioctl(struct inode *inode,
return -ENOMEM;
if (sk->tree_id == 0) {
- /* search the root of the inode that was passed */
- root = btrfs_grab_root(BTRFS_I(inode)->root);
+ /* Search the root that we got passed. */
+ root = btrfs_grab_root(root);
} else {
+ /* Look up the root from the arguments. */
root = btrfs_get_fs_root(info, sk->tree_id, true);
- if (IS_ERR(root)) {
- btrfs_free_path(path);
+ if (IS_ERR(root))
return PTR_ERR(root);
- }
}
key.objectid = sk->min_objectid;
@@ -2222,16 +1625,19 @@ static noinline int search_ioctl(struct inode *inode,
key.offset = sk->min_offset;
while (1) {
- ret = -EFAULT;
- if (fault_in_writeable(ubuf + sk_offset, *buf_size - sk_offset))
+ /*
+ * Ensure that the whole user buffer is faulted in at sub-page
+ * granularity, otherwise the loop may live-lock.
+ */
+ if (fault_in_subpage_writeable(ubuf + sk_offset, *buf_size - sk_offset)) {
+ ret = -EFAULT;
break;
+ }
ret = btrfs_search_forward(root, &key, path, sk->min_transid);
- if (ret != 0) {
- if (ret > 0)
- ret = 0;
- goto err;
- }
+ if (ret)
+ break;
+
ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
&sk_offset, &num_found);
btrfs_release_path(path);
@@ -2239,36 +1645,32 @@ static noinline int search_ioctl(struct inode *inode,
break;
}
+ /* Normalize return values from btrfs_search_forward() and copy_to_sk(). */
if (ret > 0)
ret = 0;
-err:
+
sk->nr_items = num_found;
btrfs_put_root(root);
- btrfs_free_path(path);
return ret;
}
-static noinline int btrfs_ioctl_tree_search(struct file *file,
- void __user *argp)
+static noinline int btrfs_ioctl_tree_search(struct btrfs_root *root,
+ void __user *argp)
{
- struct btrfs_ioctl_search_args __user *uargs;
+ struct btrfs_ioctl_search_args __user *uargs = argp;
struct btrfs_ioctl_search_key sk;
- struct inode *inode;
int ret;
- size_t buf_size;
+ u64 buf_size;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- uargs = (struct btrfs_ioctl_search_args __user *)argp;
-
if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
return -EFAULT;
buf_size = sizeof(uargs->buf);
- inode = file_inode(file);
- ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
+ ret = search_ioctl(root, &sk, &buf_size, uargs->buf);
/*
* In the origin implementation an overflow is handled by returning a
@@ -2282,21 +1684,19 @@ static noinline int btrfs_ioctl_tree_search(struct file *file,
return ret;
}
-static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
+static noinline int btrfs_ioctl_tree_search_v2(struct btrfs_root *root,
void __user *argp)
{
- struct btrfs_ioctl_search_args_v2 __user *uarg;
+ struct btrfs_ioctl_search_args_v2 __user *uarg = argp;
struct btrfs_ioctl_search_args_v2 args;
- struct inode *inode;
int ret;
- size_t buf_size;
- const size_t buf_limit = SZ_16M;
+ u64 buf_size;
+ const u64 buf_limit = SZ_16M;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* copy search header and buffer size */
- uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
if (copy_from_user(&args, uarg, sizeof(args)))
return -EFAULT;
@@ -2306,8 +1706,7 @@ static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
if (buf_size > buf_limit)
buf_size = buf_limit;
- inode = file_inode(file);
- ret = search_ioctl(inode, &args.key, &buf_size,
+ ret = search_ioctl(root, &args.key, &buf_size,
(char __user *)(&uarg->buf[0]));
if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
ret = -EFAULT;
@@ -2334,7 +1733,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
int total_len = 0;
struct btrfs_inode_ref *iref;
struct extent_buffer *l;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
name[0]='\0';
@@ -2395,28 +1794,25 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
ret = 0;
out:
btrfs_put_root(root);
- btrfs_free_path(path);
return ret;
}
-static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
+static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
struct inode *inode,
struct btrfs_ioctl_ino_lookup_user_args *args)
{
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
- struct super_block *sb = inode->i_sb;
- struct btrfs_key upper_limit = BTRFS_I(inode)->location;
- u64 treeid = BTRFS_I(inode)->root->root_key.objectid;
+ u64 upper_limit = btrfs_ino(BTRFS_I(inode));
+ u64 treeid = btrfs_root_id(BTRFS_I(inode)->root);
u64 dirid = args->dirid;
unsigned long item_off;
unsigned long item_len;
struct btrfs_inode_ref *iref;
struct btrfs_root_ref *rref;
struct btrfs_root *root = NULL;
- struct btrfs_path *path;
- struct btrfs_key key, key2;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct btrfs_key key;
struct extent_buffer *leaf;
- struct inode *temp_inode;
char *ptr;
int slot;
int len;
@@ -2431,19 +1827,19 @@ static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
* If the bottom subvolume does not exist directly under upper_limit,
* construct the path in from the bottom up.
*/
- if (dirid != upper_limit.objectid) {
+ if (dirid != upper_limit) {
ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];
root = btrfs_get_fs_root(fs_info, treeid, true);
- if (IS_ERR(root)) {
- ret = PTR_ERR(root);
- goto out;
- }
+ if (IS_ERR(root))
+ return PTR_ERR(root);
key.objectid = dirid;
key.type = BTRFS_INODE_REF_KEY;
key.offset = (u64)-1;
while (1) {
+ struct btrfs_inode *temp_inode;
+
ret = btrfs_search_backwards(root, &key, path);
if (ret < 0)
goto out_put;
@@ -2468,45 +1864,32 @@ static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
read_extent_buffer(leaf, ptr,
(unsigned long)(iref + 1), len);
- /* Check the read+exec permission of this directory */
- ret = btrfs_previous_item(root, path, dirid,
- BTRFS_INODE_ITEM_KEY);
- if (ret < 0) {
- goto out_put;
- } else if (ret > 0) {
- ret = -ENOENT;
- goto out_put;
- }
-
- leaf = path->nodes[0];
- slot = path->slots[0];
- btrfs_item_key_to_cpu(leaf, &key2, slot);
- if (key2.objectid != dirid) {
- ret = -ENOENT;
- goto out_put;
- }
-
- temp_inode = btrfs_iget(sb, key2.objectid, root);
+ /*
+ * We don't need the path anymore, so release it and
+ * avoid deadlocks and lockdep warnings in case
+ * btrfs_iget() needs to lookup the inode from its root
+ * btree and lock the same leaf.
+ */
+ btrfs_release_path(path);
+ temp_inode = btrfs_iget(key.offset, root);
if (IS_ERR(temp_inode)) {
ret = PTR_ERR(temp_inode);
goto out_put;
}
- ret = inode_permission(mnt_userns, temp_inode,
+ /* Check the read+exec permission of this directory. */
+ ret = inode_permission(idmap, &temp_inode->vfs_inode,
MAY_READ | MAY_EXEC);
- iput(temp_inode);
- if (ret) {
- ret = -EACCES;
+ iput(&temp_inode->vfs_inode);
+ if (ret)
goto out_put;
- }
- if (key.offset == upper_limit.objectid)
+ if (key.offset == upper_limit)
break;
if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) {
ret = -EACCES;
goto out_put;
}
- btrfs_release_path(path);
key.objectid = key.offset;
key.offset = (u64)-1;
dirid = key.objectid;
@@ -2524,25 +1907,21 @@ static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
key.type = BTRFS_ROOT_REF_KEY;
key.offset = args->treeid;
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
- if (ret < 0) {
- goto out;
- } else if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ return -ENOENT;
leaf = path->nodes[0];
slot = path->slots[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
item_off = btrfs_item_ptr_offset(leaf, slot);
- item_len = btrfs_item_size_nr(leaf, slot);
+ item_len = btrfs_item_size(leaf, slot);
/* Check if dirid in ROOT_REF corresponds to passed dirid */
rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
- if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) {
- ret = -EINVAL;
- goto out;
- }
+ if (args->dirid != btrfs_root_ref_dirid(leaf, rref))
+ return -EINVAL;
/* Copy subvolume's name */
item_off += sizeof(struct btrfs_root_ref);
@@ -2552,30 +1931,26 @@ static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
out_put:
btrfs_put_root(root);
-out:
- btrfs_free_path(path);
+
return ret;
}
-static noinline int btrfs_ioctl_ino_lookup(struct file *file,
+static noinline int btrfs_ioctl_ino_lookup(struct btrfs_root *root,
void __user *argp)
{
struct btrfs_ioctl_ino_lookup_args *args;
- struct inode *inode;
int ret = 0;
args = memdup_user(argp, sizeof(*args));
if (IS_ERR(args))
return PTR_ERR(args);
- inode = file_inode(file);
-
/*
* Unprivileged query to obtain the containing subvolume root id. The
* path is reset so it's consistent with btrfs_search_path_in_tree.
*/
if (args->treeid == 0)
- args->treeid = BTRFS_I(inode)->root->root_key.objectid;
+ args->treeid = btrfs_root_id(root);
if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
args->name[0] = 0;
@@ -2587,7 +1962,7 @@ static noinline int btrfs_ioctl_ino_lookup(struct file *file,
goto out;
}
- ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
+ ret = btrfs_search_path_in_tree(root->fs_info,
args->treeid, args->objectid,
args->name);
@@ -2624,7 +1999,7 @@ static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
inode = file_inode(file);
if (args->dirid == BTRFS_FIRST_FREE_OBJECTID &&
- BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) {
+ btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
/*
* The subvolume does not exist under fd with which this is
* called
@@ -2633,7 +2008,7 @@ static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
return -EACCES;
}
- ret = btrfs_search_path_in_tree_user(file_mnt_user_ns(file), inode, args);
+ ret = btrfs_search_path_in_tree_user(file_mnt_idmap(file), inode, args);
if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
ret = -EFAULT;
@@ -2643,7 +2018,7 @@ static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
}
/* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */
-static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
+static int btrfs_ioctl_get_subvol_info(struct inode *inode, void __user *argp)
{
struct btrfs_ioctl_get_subvol_info_args *subvol_info;
struct btrfs_fs_info *fs_info;
@@ -2655,7 +2030,6 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
struct extent_buffer *leaf;
unsigned long item_off;
unsigned long item_len;
- struct inode *inode;
int slot;
int ret = 0;
@@ -2669,11 +2043,10 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
return -ENOMEM;
}
- inode = file_inode(file);
fs_info = BTRFS_I(inode)->root->fs_info;
/* Get root_item of inode's subvolume */
- key.objectid = BTRFS_I(inode)->root->root_key.objectid;
+ key.objectid = btrfs_root_id(BTRFS_I(inode)->root);
root = btrfs_get_fs_root(fs_info, key.objectid, true);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
@@ -2720,7 +2093,7 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
ret = btrfs_next_leaf(fs_info->tree_root, path);
if (ret < 0) {
goto out;
- } else if (ret > 0) {
+ } else if (unlikely(ret > 0)) {
ret = -EUCLEAN;
goto out;
}
@@ -2738,7 +2111,7 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
item_off = btrfs_item_ptr_offset(leaf, slot)
+ sizeof(struct btrfs_root_ref);
- item_len = btrfs_item_size_nr(leaf, slot)
+ item_len = btrfs_item_size(leaf, slot)
- sizeof(struct btrfs_root_ref);
read_extent_buffer(leaf, subvol_info->name,
item_off, item_len);
@@ -2748,6 +2121,8 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
}
}
+ btrfs_free_path(path);
+ path = NULL;
if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
ret = -EFAULT;
@@ -2763,15 +2138,14 @@ out_free:
* Return ROOT_REF information of the subvolume containing this inode
* except the subvolume name.
*/
-static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
+static int btrfs_ioctl_get_subvol_rootref(struct btrfs_root *root,
+ void __user *argp)
{
struct btrfs_ioctl_get_subvol_rootref_args *rootrefs;
struct btrfs_root_ref *rref;
- struct btrfs_root *root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *leaf;
- struct inode *inode;
u64 objectid;
int slot;
int ret;
@@ -2787,15 +2161,13 @@ static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
return PTR_ERR(rootrefs);
}
- inode = file_inode(file);
- root = BTRFS_I(inode)->root->fs_info->tree_root;
- objectid = BTRFS_I(inode)->root->root_key.objectid;
-
+ objectid = btrfs_root_id(root);
key.objectid = objectid;
key.type = BTRFS_ROOT_REF_KEY;
key.offset = rootrefs->min_treeid;
found = 0;
+ root = root->fs_info->tree_root;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
goto out;
@@ -2804,7 +2176,7 @@ static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
ret = btrfs_next_leaf(root, path);
if (ret < 0) {
goto out;
- } else if (ret > 0) {
+ } else if (unlikely(ret > 0)) {
ret = -EUCLEAN;
goto out;
}
@@ -2833,13 +2205,15 @@ static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
ret = btrfs_next_item(root, path);
if (ret < 0) {
goto out;
- } else if (ret > 0) {
+ } else if (unlikely(ret > 0)) {
ret = -EUCLEAN;
goto out;
}
}
out:
+ btrfs_free_path(path);
+
if (!ret || ret == -EOVERFLOW) {
rootrefs->num_items = found;
/* update min_treeid for next search */
@@ -2851,7 +2225,6 @@ out:
}
kfree(rootrefs);
- btrfs_free_path(path);
return ret;
}
@@ -2861,27 +2234,33 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
bool destroy_v2)
{
struct dentry *parent = file->f_path.dentry;
- struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
struct dentry *dentry;
struct inode *dir = d_inode(parent);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *dest = NULL;
struct btrfs_ioctl_vol_args *vol_args = NULL;
struct btrfs_ioctl_vol_args_v2 *vol_args2 = NULL;
- struct user_namespace *mnt_userns = file_mnt_user_ns(file);
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
char *subvol_name, *subvol_name_ptr = NULL;
- int subvol_namelen;
- int err = 0;
+ int ret = 0;
bool destroy_parent = false;
+ /* We don't support snapshots with extent tree v2 yet. */
+ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+ btrfs_err(fs_info,
+ "extent tree v2 doesn't support snapshot deletion yet");
+ return -EOPNOTSUPP;
+ }
+
if (destroy_v2) {
vol_args2 = memdup_user(arg, sizeof(*vol_args2));
if (IS_ERR(vol_args2))
return PTR_ERR(vol_args2);
if (vol_args2->flags & ~BTRFS_SUBVOL_DELETE_ARGS_MASK) {
- err = -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
goto out;
}
@@ -2890,29 +2269,31 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
* name, same as v1 currently does.
*/
if (!(vol_args2->flags & BTRFS_SUBVOL_SPEC_BY_ID)) {
- vol_args2->name[BTRFS_SUBVOL_NAME_MAX] = 0;
+ ret = btrfs_check_ioctl_vol_args2_subvol_name(vol_args2);
+ if (ret < 0)
+ goto out;
subvol_name = vol_args2->name;
- err = mnt_want_write_file(file);
- if (err)
+ ret = mnt_want_write_file(file);
+ if (ret)
goto out;
} else {
struct inode *old_dir;
if (vol_args2->subvolid < BTRFS_FIRST_FREE_OBJECTID) {
- err = -EINVAL;
+ ret = -EINVAL;
goto out;
}
- err = mnt_want_write_file(file);
- if (err)
+ ret = mnt_want_write_file(file);
+ if (ret)
goto out;
dentry = btrfs_get_dentry(fs_info->sb,
BTRFS_FIRST_FREE_OBJECTID,
- vol_args2->subvolid, 0, 0);
+ vol_args2->subvolid, 0);
if (IS_ERR(dentry)) {
- err = PTR_ERR(dentry);
+ ret = PTR_ERR(dentry);
goto out_drop_write;
}
@@ -2932,7 +2313,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
*/
dput(dentry);
if (IS_ERR(parent)) {
- err = PTR_ERR(parent);
+ ret = PTR_ERR(parent);
goto out_drop_write;
}
old_dir = dir;
@@ -2955,15 +2336,15 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
* anywhere in the filesystem the user wouldn't be able
* to delete without an idmapped mount.
*/
- if (old_dir != dir && mnt_userns != &init_user_ns) {
- err = -EOPNOTSUPP;
+ if (old_dir != dir && idmap != &nop_mnt_idmap) {
+ ret = -EOPNOTSUPP;
goto free_parent;
}
subvol_name_ptr = btrfs_get_subvol_name_from_objectid(
fs_info, vol_args2->subvolid);
if (IS_ERR(subvol_name_ptr)) {
- err = PTR_ERR(subvol_name_ptr);
+ ret = PTR_ERR(subvol_name_ptr);
goto free_parent;
}
/* subvol_name_ptr is already nul terminated */
@@ -2974,39 +2355,32 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
- vol_args->name[BTRFS_PATH_NAME_MAX] = 0;
+ ret = btrfs_check_ioctl_vol_args_path(vol_args);
+ if (ret < 0)
+ goto out;
+
subvol_name = vol_args->name;
- err = mnt_want_write_file(file);
- if (err)
+ ret = mnt_want_write_file(file);
+ if (ret)
goto out;
}
- subvol_namelen = strlen(subvol_name);
-
if (strchr(subvol_name, '/') ||
- strncmp(subvol_name, "..", subvol_namelen) == 0) {
- err = -EINVAL;
+ strcmp(subvol_name, "..") == 0) {
+ ret = -EINVAL;
goto free_subvol_name;
}
if (!S_ISDIR(dir->i_mode)) {
- err = -ENOTDIR;
+ ret = -ENOTDIR;
goto free_subvol_name;
}
- err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
- if (err == -EINTR)
- goto free_subvol_name;
- dentry = lookup_one(mnt_userns, subvol_name, parent, subvol_namelen);
+ dentry = start_removing_killable(idmap, parent, &QSTR(subvol_name));
if (IS_ERR(dentry)) {
- err = PTR_ERR(dentry);
- goto out_unlock_dir;
- }
-
- if (d_really_is_negative(dentry)) {
- err = -ENOENT;
- goto out_dput;
+ ret = PTR_ERR(dentry);
+ goto out_end_removing;
}
inode = d_inode(dentry);
@@ -3025,9 +2399,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
* Users who want to delete empty subvols should try
* rmdir(2).
*/
- err = -EPERM;
+ ret = -EPERM;
if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
- goto out_dput;
+ goto out_end_removing;
/*
* Do not allow deletion if the parent dir is the same
@@ -3036,37 +2410,33 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
* of the subvol, not a random directory contained
* within it.
*/
- err = -EINVAL;
+ ret = -EINVAL;
if (root == dest)
- goto out_dput;
+ goto out_end_removing;
- err = inode_permission(mnt_userns, inode, MAY_WRITE | MAY_EXEC);
- if (err)
- goto out_dput;
+ ret = inode_permission(idmap, inode, MAY_WRITE | MAY_EXEC);
+ if (ret)
+ goto out_end_removing;
}
/* check if subvolume may be deleted by a user */
- err = btrfs_may_delete(mnt_userns, dir, dentry, 1);
- if (err)
- goto out_dput;
+ ret = btrfs_may_delete(idmap, dir, dentry, 1);
+ if (ret)
+ goto out_end_removing;
if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
- err = -EINVAL;
- goto out_dput;
+ ret = -EINVAL;
+ goto out_end_removing;
}
- btrfs_inode_lock(inode, 0);
- err = btrfs_delete_subvolume(dir, dentry);
- btrfs_inode_unlock(inode, 0);
- if (!err) {
- fsnotify_rmdir(dir, dentry);
- d_delete(dentry);
- }
+ btrfs_inode_lock(BTRFS_I(inode), 0);
+ ret = btrfs_delete_subvolume(BTRFS_I(dir), dentry);
+ btrfs_inode_unlock(BTRFS_I(inode), 0);
+ if (!ret)
+ d_delete_notify(dir, dentry);
-out_dput:
- dput(dentry);
-out_unlock_dir:
- btrfs_inode_unlock(dir, 0);
+out_end_removing:
+ end_removing(dentry);
free_subvol_name:
kfree(subvol_name_ptr);
free_parent:
@@ -3077,7 +2447,7 @@ out_drop_write:
out:
kfree(vol_args2);
kfree(vol_args);
- return err;
+ return ret;
}
static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
@@ -3111,18 +2481,37 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
* running and allows defrag on files open in read-only mode.
*/
if (!capable(CAP_SYS_ADMIN) &&
- inode_permission(&init_user_ns, inode, MAY_WRITE)) {
+ inode_permission(&nop_mnt_idmap, inode, MAY_WRITE)) {
ret = -EPERM;
goto out;
}
+ /*
+ * Don't allow defrag on pre-content watched files, as it could
+ * populate the page cache with 0's via readahead.
+ */
+ if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (argp) {
if (copy_from_user(&range, argp, sizeof(range))) {
ret = -EFAULT;
goto out;
}
- /* compression requires us to start the IO */
- if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
+ if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS) &&
+ (range.flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ /* Compression or no-compression require to start the IO. */
+ if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS) ||
+ (range.flags & BTRFS_DEFRAG_RANGE_NOCOMPRESS)) {
range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
range.extent_thresh = (u32)-1;
}
@@ -3130,7 +2519,7 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
/* the rest are all set to zero by kzalloc */
range.len = (u64)-1;
}
- ret = btrfs_defrag_file(file_inode(file), &file->f_ra,
+ ret = btrfs_defrag_file(BTRFS_I(file_inode(file)), &file->f_ra,
&range, BTRFS_OLDEST_GENERATION, 0);
if (ret > 0)
ret = 0;
@@ -3146,13 +2535,36 @@ out:
static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
{
struct btrfs_ioctl_vol_args *vol_args;
+ bool restore_op = false;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_ADD))
- return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
+ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+ btrfs_err(fs_info, "device add not supported on extent tree v2 yet");
+ return -EINVAL;
+ }
+
+ if (fs_info->fs_devices->temp_fsid) {
+ btrfs_err(fs_info,
+ "device add not supported on cloned temp-fsid mount");
+ return -EINVAL;
+ }
+
+ if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_ADD)) {
+ if (!btrfs_exclop_start_try_lock(fs_info, BTRFS_EXCLOP_DEV_ADD))
+ return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
+
+ /*
+ * We can do the device add because we have a paused balanced,
+ * change the exclusive op type and remember we should bring
+ * back the paused balance
+ */
+ fs_info->exclusive_operation = BTRFS_EXCLOP_DEV_ADD;
+ btrfs_exclop_start_unlock(fs_info);
+ restore_op = true;
+ }
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) {
@@ -3160,15 +2572,22 @@ static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
goto out;
}
- vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
+ ret = btrfs_check_ioctl_vol_args_path(vol_args);
+ if (ret < 0)
+ goto out_free;
+
ret = btrfs_init_new_device(fs_info, vol_args->name);
if (!ret)
btrfs_info(fs_info, "disk added %s", vol_args->name);
+out_free:
kfree(vol_args);
out:
- btrfs_exclop_finish(fs_info);
+ if (restore_op)
+ btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED);
+ else
+ btrfs_exclop_finish(fs_info);
return ret;
}
@@ -3176,10 +2595,9 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
{
BTRFS_DEV_LOOKUP_ARGS(args);
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_ioctl_vol_args_v2 *vol_args;
- struct block_device *bdev = NULL;
- fmode_t mode;
+ struct file *bdev_file = NULL;
int ret;
bool cancel = false;
@@ -3187,17 +2605,18 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
return -EPERM;
vol_args = memdup_user(arg, sizeof(*vol_args));
- if (IS_ERR(vol_args)) {
- ret = PTR_ERR(vol_args);
- goto out;
- }
+ if (IS_ERR(vol_args))
+ return PTR_ERR(vol_args);
if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
ret = -EOPNOTSUPP;
goto out;
}
- vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
+ ret = btrfs_check_ioctl_vol_args2_subvol_name(vol_args);
+ if (ret < 0)
+ goto out;
+
if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
args.devid = vol_args->devid;
} else if (!strcmp("cancel", vol_args->name)) {
@@ -3218,7 +2637,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
goto err_drop;
/* Exclusive operation is now claimed */
- ret = btrfs_rm_device(fs_info, &args, &bdev, &mode);
+ ret = btrfs_rm_device(fs_info, &args, &bdev_file);
btrfs_exclop_finish(fs_info);
@@ -3232,8 +2651,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
}
err_drop:
mnt_drop_write_file(file);
- if (bdev)
- blkdev_put(bdev, mode);
+ if (bdev_file)
+ bdev_fput(bdev_file);
out:
btrfs_put_dev_args_from_path(&args);
kfree(vol_args);
@@ -3244,12 +2663,11 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
{
BTRFS_DEV_LOOKUP_ARGS(args);
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_ioctl_vol_args *vol_args;
- struct block_device *bdev = NULL;
- fmode_t mode;
+ struct file *bdev_file = NULL;
int ret;
- bool cancel;
+ bool cancel = false;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -3258,7 +2676,10 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
- vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
+ ret = btrfs_check_ioctl_vol_args_path(vol_args);
+ if (ret < 0)
+ goto out_free;
+
if (!strcmp("cancel", vol_args->name)) {
cancel = true;
} else {
@@ -3274,22 +2695,23 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
cancel);
if (ret == 0) {
- ret = btrfs_rm_device(fs_info, &args, &bdev, &mode);
+ ret = btrfs_rm_device(fs_info, &args, &bdev_file);
if (!ret)
btrfs_info(fs_info, "disk deleted %s", vol_args->name);
btrfs_exclop_finish(fs_info);
}
mnt_drop_write_file(file);
- if (bdev)
- blkdev_put(bdev, mode);
+ if (bdev_file)
+ bdev_fput(bdev_file);
out:
btrfs_put_dev_args_from_path(&args);
+out_free:
kfree(vol_args);
return ret;
}
-static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
+static long btrfs_ioctl_fs_info(const struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_ioctl_fs_info_args *fi_args;
@@ -3326,7 +2748,7 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
}
if (flags_in & BTRFS_FS_INFO_FLAG_GENERATION) {
- fi_args->generation = fs_info->generation;
+ fi_args->generation = btrfs_get_fs_generation(fs_info);
fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION;
}
@@ -3343,7 +2765,7 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
return ret;
}
-static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
+static long btrfs_ioctl_dev_info(const struct btrfs_fs_info *fs_info,
void __user *arg)
{
BTRFS_DEV_LOOKUP_ARGS(args);
@@ -3370,13 +2792,11 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
di_args->bytes_used = btrfs_device_get_bytes_used(dev);
di_args->total_bytes = btrfs_device_get_total_bytes(dev);
memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
- if (dev->name) {
- strncpy(di_args->path, rcu_str_deref(dev->name),
- sizeof(di_args->path) - 1);
- di_args->path[sizeof(di_args->path) - 1] = 0;
- } else {
+ memcpy(di_args->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
+ if (dev->name)
+ strscpy(di_args->path, btrfs_dev_name(dev), sizeof(di_args->path));
+ else
di_args->path[0] = '\0';
- }
out:
rcu_read_unlock();
@@ -3390,13 +2810,14 @@ out:
static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_root *new_root;
struct btrfs_dir_item *di;
struct btrfs_trans_handle *trans;
struct btrfs_path *path = NULL;
struct btrfs_disk_key disk_key;
+ struct fscrypt_str name = FSTR_INIT("default", 7);
u64 objectid = 0;
u64 dir_id;
int ret;
@@ -3421,7 +2842,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
ret = PTR_ERR(new_root);
goto out;
}
- if (!is_fstree(new_root->root_key.objectid)) {
+ if (!btrfs_is_fstree(btrfs_root_id(new_root))) {
ret = -ENOENT;
goto out_free;
}
@@ -3440,7 +2861,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
dir_id = btrfs_super_root_dir(fs_info->super_copy);
di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
- dir_id, "default", 7, 1);
+ dir_id, &name, 1);
if (IS_ERR_OR_NULL(di)) {
btrfs_release_path(path);
btrfs_end_transaction(trans);
@@ -3452,7 +2873,6 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
- btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_release_path(path);
btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
@@ -3483,10 +2903,10 @@ static void get_block_group_info(struct list_head *groups_list,
static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
void __user *arg)
{
- struct btrfs_ioctl_space_args space_args;
+ struct btrfs_ioctl_space_args space_args = { 0 };
struct btrfs_ioctl_space_info space;
struct btrfs_ioctl_space_info *dest;
- struct btrfs_ioctl_space_info *dest_orig;
+ struct btrfs_ioctl_space_info AUTO_KFREE(dest_orig);
struct btrfs_ioctl_space_info __user *user_dest;
struct btrfs_space_info *info;
static const u64 types[] = {
@@ -3607,9 +3027,8 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
(arg + sizeof(struct btrfs_ioctl_space_args));
if (copy_to_user(user_dest, dest_orig, alloc_size))
- ret = -EFAULT;
+ return -EFAULT;
- kfree(dest_orig);
out:
if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
ret = -EFAULT;
@@ -3622,7 +3041,13 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
{
struct btrfs_trans_handle *trans;
u64 transid;
- int ret;
+
+ /*
+ * Start orphan cleanup here for the given root in case it hasn't been
+ * started already by other means. Errors are handled in the other
+ * functions during transaction commit.
+ */
+ btrfs_orphan_cleanup(root);
trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
@@ -3630,15 +3055,11 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
return PTR_ERR(trans);
/* No running transaction, don't bother */
- transid = root->fs_info->last_trans_committed;
+ transid = btrfs_get_last_trans_committed(root->fs_info);
goto out;
}
transid = trans->transid;
- ret = btrfs_commit_transaction_async(trans);
- if (ret) {
- btrfs_end_transaction(trans);
- return ret;
- }
+ btrfs_commit_transaction_async(trans);
out:
if (argp)
if (copy_to_user(argp, &transid, sizeof(transid)))
@@ -3649,30 +3070,39 @@ out:
static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
void __user *argp)
{
- u64 transid;
+ /* By default wait for the current transaction. */
+ u64 transid = 0;
- if (argp) {
+ if (argp)
if (copy_from_user(&transid, argp, sizeof(transid)))
return -EFAULT;
- } else {
- transid = 0; /* current trans */
- }
+
return btrfs_wait_for_commit(fs_info, transid);
}
static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(file_inode(file));
struct btrfs_ioctl_scrub_args *sa;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+ btrfs_err(fs_info, "scrub: extent tree v2 not yet supported");
+ return -EINVAL;
+ }
+
sa = memdup_user(arg, sizeof(*sa));
if (IS_ERR(sa))
return PTR_ERR(sa);
+ if (sa->flags & ~BTRFS_SCRUB_SUPPORTED_FLAGS) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
ret = mnt_want_write_file(file);
if (ret)
@@ -3768,6 +3198,11 @@ static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+ btrfs_err(fs_info, "device replace not supported on extent tree v2 yet");
+ return -EINVAL;
+ }
+
p = memdup_user(arg, sizeof(*p));
if (IS_ERR(p))
return PTR_ERR(p);
@@ -3812,7 +3247,7 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
u64 rel_ptr;
int size;
struct btrfs_ioctl_ino_path_args *ipa = NULL;
- struct inode_fs_paths *ipath = NULL;
+ struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL;
struct btrfs_path *path;
if (!capable(CAP_DAC_READ_SEARCH))
@@ -3849,6 +3284,8 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
ipath->fspath->val[i] = rel_ptr;
}
+ btrfs_free_path(path);
+ path = NULL;
ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
ipath->fspath, size);
if (ret) {
@@ -3858,32 +3295,11 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
out:
btrfs_free_path(path);
- free_ipath(ipath);
kfree(ipa);
return ret;
}
-static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
-{
- struct btrfs_data_container *inodes = ctx;
- const size_t c = 3 * sizeof(u64);
-
- if (inodes->bytes_left >= c) {
- inodes->bytes_left -= c;
- inodes->val[inodes->elem_cnt] = inum;
- inodes->val[inodes->elem_cnt + 1] = offset;
- inodes->val[inodes->elem_cnt + 2] = root;
- inodes->elem_cnt += 3;
- } else {
- inodes->bytes_missing += c - inodes->bytes_left;
- inodes->bytes_left = 0;
- inodes->elem_missed += 3;
- }
-
- return 0;
-}
-
static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
void __user *arg, int version)
{
@@ -3891,7 +3307,6 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
int size;
struct btrfs_ioctl_logical_ino_args *loi;
struct btrfs_data_container *inodes = NULL;
- struct btrfs_path *path = NULL;
bool ignore_offset;
if (!capable(CAP_SYS_ADMIN))
@@ -3919,21 +3334,13 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
size = min_t(u32, loi->size, SZ_16M);
}
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
-
inodes = init_data_container(size);
if (IS_ERR(inodes)) {
ret = PTR_ERR(inodes);
- inodes = NULL;
- goto out;
+ goto out_loi;
}
- ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
- build_ino_list, inodes, ignore_offset);
+ ret = iterate_inodes_from_logical(loi->logical, fs_info, inodes, ignore_offset);
if (ret == -EINVAL)
ret = -ENOENT;
if (ret < 0)
@@ -3945,7 +3352,6 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
ret = -EFAULT;
out:
- btrfs_free_path(path);
kvfree(inodes);
out_loi:
kfree(loi);
@@ -3976,13 +3382,79 @@ void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
spin_unlock(&fs_info->balance_lock);
}
+/*
+ * Try to acquire fs_info::balance_mutex as well as set BTRFS_EXLCOP_BALANCE as
+ * required.
+ *
+ * @fs_info: the filesystem
+ * @excl_acquired: ptr to boolean value which is set to false in case balance
+ * is being resumed
+ *
+ * Return 0 on success in which case both fs_info::balance is acquired as well
+ * as exclusive ops are blocked. In case of failure return an error code.
+ */
+static int btrfs_try_lock_balance(struct btrfs_fs_info *fs_info, bool *excl_acquired)
+{
+ int ret;
+
+ /*
+ * Exclusive operation is locked. Three possibilities:
+ * (1) some other op is running
+ * (2) balance is running
+ * (3) balance is paused -- special case (think resume)
+ */
+ while (1) {
+ if (btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
+ *excl_acquired = true;
+ mutex_lock(&fs_info->balance_mutex);
+ return 0;
+ }
+
+ mutex_lock(&fs_info->balance_mutex);
+ if (fs_info->balance_ctl) {
+ /* This is either (2) or (3) */
+ if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
+ /* This is (2) */
+ ret = -EINPROGRESS;
+ goto out_failure;
+
+ } else {
+ mutex_unlock(&fs_info->balance_mutex);
+ /*
+ * Lock released to allow other waiters to
+ * continue, we'll reexamine the status again.
+ */
+ mutex_lock(&fs_info->balance_mutex);
+
+ if (fs_info->balance_ctl &&
+ !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
+ /* This is (3) */
+ *excl_acquired = false;
+ return 0;
+ }
+ }
+ } else {
+ /* This is (1) */
+ ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
+ goto out_failure;
+ }
+
+ mutex_unlock(&fs_info->balance_mutex);
+ }
+
+out_failure:
+ mutex_unlock(&fs_info->balance_mutex);
+ *excl_acquired = false;
+ return ret;
+}
+
static long btrfs_ioctl_balance(struct file *file, void __user *arg)
{
struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_balance_args *bargs;
struct btrfs_balance_control *bctl;
- bool need_unlock; /* for mut. excl. ops lock */
+ bool need_unlock = true;
int ret;
if (!capable(CAP_SYS_ADMIN))
@@ -3992,105 +3464,55 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
if (ret)
return ret;
-again:
- if (btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
- mutex_lock(&fs_info->balance_mutex);
- need_unlock = true;
- goto locked;
+ bargs = memdup_user(arg, sizeof(*bargs));
+ if (IS_ERR(bargs)) {
+ ret = PTR_ERR(bargs);
+ bargs = NULL;
+ goto out;
}
- /*
- * mut. excl. ops lock is locked. Three possibilities:
- * (1) some other op is running
- * (2) balance is running
- * (3) balance is paused -- special case (think resume)
- */
- mutex_lock(&fs_info->balance_mutex);
- if (fs_info->balance_ctl) {
- /* this is either (2) or (3) */
- if (!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
- mutex_unlock(&fs_info->balance_mutex);
- /*
- * Lock released to allow other waiters to continue,
- * we'll reexamine the status again.
- */
- mutex_lock(&fs_info->balance_mutex);
-
- if (fs_info->balance_ctl &&
- !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
- /* this is (3) */
- need_unlock = false;
- goto locked;
- }
-
- mutex_unlock(&fs_info->balance_mutex);
- goto again;
- } else {
- /* this is (2) */
- mutex_unlock(&fs_info->balance_mutex);
- ret = -EINPROGRESS;
- goto out;
- }
- } else {
- /* this is (1) */
- mutex_unlock(&fs_info->balance_mutex);
- ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
+ ret = btrfs_try_lock_balance(fs_info, &need_unlock);
+ if (ret)
goto out;
- }
-locked:
+ lockdep_assert_held(&fs_info->balance_mutex);
- if (arg) {
- bargs = memdup_user(arg, sizeof(*bargs));
- if (IS_ERR(bargs)) {
- ret = PTR_ERR(bargs);
+ if (bargs->flags & BTRFS_BALANCE_RESUME) {
+ if (!fs_info->balance_ctl) {
+ ret = -ENOTCONN;
goto out_unlock;
}
- if (bargs->flags & BTRFS_BALANCE_RESUME) {
- if (!fs_info->balance_ctl) {
- ret = -ENOTCONN;
- goto out_bargs;
- }
+ bctl = fs_info->balance_ctl;
+ spin_lock(&fs_info->balance_lock);
+ bctl->flags |= BTRFS_BALANCE_RESUME;
+ spin_unlock(&fs_info->balance_lock);
+ btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE);
- bctl = fs_info->balance_ctl;
- spin_lock(&fs_info->balance_lock);
- bctl->flags |= BTRFS_BALANCE_RESUME;
- spin_unlock(&fs_info->balance_lock);
+ goto do_balance;
+ }
- goto do_balance;
- }
- } else {
- bargs = NULL;
+ if (bargs->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
+ ret = -EINVAL;
+ goto out_unlock;
}
if (fs_info->balance_ctl) {
ret = -EINPROGRESS;
- goto out_bargs;
+ goto out_unlock;
}
bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
if (!bctl) {
ret = -ENOMEM;
- goto out_bargs;
- }
-
- if (arg) {
- memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
- memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
- memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
-
- bctl->flags = bargs->flags;
- } else {
- /* balance everything - no filters */
- bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
+ goto out_unlock;
}
- if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
- ret = -EINVAL;
- goto out_bctl;
- }
+ memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
+ memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
+ memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
+ bctl->flags = bargs->flags;
do_balance:
/*
* Ownership of bctl and exclusive operation goes to btrfs_balance.
@@ -4103,21 +3525,19 @@ do_balance:
ret = btrfs_balance(fs_info, bctl, bargs);
bctl = NULL;
- if ((ret == 0 || ret == -ECANCELED) && arg) {
+ if (ret == 0 || ret == -ECANCELED) {
if (copy_to_user(arg, bargs, sizeof(*bargs)))
ret = -EFAULT;
}
-out_bctl:
kfree(bctl);
-out_bargs:
- kfree(bargs);
out_unlock:
mutex_unlock(&fs_info->balance_mutex);
if (need_unlock)
btrfs_exclop_finish(fs_info);
out:
mnt_drop_write_file(file);
+ kfree(bargs);
return ret;
}
@@ -4139,7 +3559,7 @@ static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
void __user *arg)
{
- struct btrfs_ioctl_balance_args *bargs;
+ struct btrfs_ioctl_balance_args AUTO_KFREE(bargs);
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
@@ -4161,8 +3581,6 @@ static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
if (copy_to_user(arg, bargs, sizeof(*bargs)))
ret = -EFAULT;
-
- kfree(bargs);
out:
mutex_unlock(&fs_info->balance_mutex);
return ret;
@@ -4171,7 +3589,7 @@ out:
static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_ioctl_quota_ctl_args *sa;
int ret;
@@ -4188,14 +3606,43 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
goto drop_write;
}
- down_write(&fs_info->subvol_sem);
-
switch (sa->cmd) {
case BTRFS_QUOTA_CTL_ENABLE:
- ret = btrfs_quota_enable(fs_info);
+ case BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA:
+ down_write(&fs_info->subvol_sem);
+ ret = btrfs_quota_enable(fs_info, sa);
+ up_write(&fs_info->subvol_sem);
break;
case BTRFS_QUOTA_CTL_DISABLE:
+ /*
+ * Lock the cleaner mutex to prevent races with concurrent
+ * relocation, because relocation may be building backrefs for
+ * blocks of the quota root while we are deleting the root. This
+ * is like dropping fs roots of deleted snapshots/subvolumes, we
+ * need the same protection.
+ *
+ * This also prevents races between concurrent tasks trying to
+ * disable quotas, because we will unlock and relock
+ * qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
+ *
+ * We take this here because we have the dependency of
+ *
+ * inode_lock -> subvol_sem
+ *
+ * because of rename. With relocation we can prealloc extents,
+ * so that makes the dependency chain
+ *
+ * cleaner_mutex -> inode_lock -> subvol_sem
+ *
+ * so we must take the cleaner_mutex here before we take the
+ * subvol_sem. The deadlock can't actually happen, but this
+ * quiets lockdep.
+ */
+ mutex_lock(&fs_info->cleaner_mutex);
+ down_write(&fs_info->subvol_sem);
ret = btrfs_quota_disable(fs_info);
+ up_write(&fs_info->subvol_sem);
+ mutex_unlock(&fs_info->cleaner_mutex);
break;
default:
ret = -EINVAL;
@@ -4203,7 +3650,6 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
}
kfree(sa);
- up_write(&fs_info->subvol_sem);
drop_write:
mnt_drop_write_file(file);
return ret;
@@ -4212,9 +3658,10 @@ drop_write:
static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_qgroup_assign_args *sa;
+ struct btrfs_qgroup_list *prealloc = NULL;
struct btrfs_trans_handle *trans;
int ret;
int err;
@@ -4222,6 +3669,9 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!btrfs_qgroup_enabled(fs_info))
+ return -ENOTCONN;
+
ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -4232,28 +3682,45 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
goto drop_write;
}
+ if (sa->assign) {
+ prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
+ if (!prealloc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
+ /*
+ * Prealloc ownership is moved to the relation handler, there it's used
+ * or freed on error.
+ */
if (sa->assign) {
- ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
+ ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst, prealloc);
+ prealloc = NULL;
} else {
ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
}
/* update qgroup status and info */
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
err = btrfs_run_qgroups(trans);
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (err < 0)
- btrfs_handle_fs_error(fs_info, err,
- "failed to update qgroup status and info");
+ btrfs_warn(fs_info,
+ "qgroup status update failed after %s relation, marked as inconsistent",
+ sa->assign ? "adding" : "deleting");
err = btrfs_end_transaction(trans);
if (err && !ret)
ret = err;
out:
+ kfree(prealloc);
kfree(sa);
drop_write:
mnt_drop_write_file(file);
@@ -4272,6 +3739,9 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!btrfs_qgroup_enabled(root->fs_info))
+ return -ENOTCONN;
+
ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -4287,6 +3757,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
goto out;
}
+ if (sa->create && btrfs_is_fstree(sa->qgroupid)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
@@ -4323,6 +3798,9 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!btrfs_qgroup_enabled(root->fs_info))
+ return -ENOTCONN;
+
ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -4342,7 +3820,7 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
qgroupid = sa->qgroupid;
if (!qgroupid) {
/* take the current subvol as qgroup */
- qgroupid = root->root_key.objectid;
+ qgroupid = btrfs_root_id(root);
}
ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
@@ -4361,13 +3839,16 @@ drop_write:
static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_ioctl_quota_rescan_args *qsa;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!btrfs_qgroup_enabled(fs_info))
+ return -ENOTCONN;
+
ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -4411,8 +3892,7 @@ static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info,
return 0;
}
-static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info,
- void __user *arg)
+static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -4421,11 +3901,11 @@ static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info,
}
static long _btrfs_ioctl_set_received_subvol(struct file *file,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct btrfs_ioctl_received_subvol_args *sa)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_root_item *root_item = &root->root_item;
struct btrfs_trans_handle *trans;
@@ -4433,7 +3913,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
int ret = 0;
int received_uuid_changed;
- if (!inode_owner_or_capable(mnt_userns, inode))
+ if (!inode_owner_or_capable(idmap, inode))
return -EPERM;
ret = mnt_want_write_file(file);
@@ -4473,8 +3953,8 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
!btrfs_is_empty_uuid(root_item->received_uuid)) {
ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
- root->root_key.objectid);
- if (ret && ret != -ENOENT) {
+ btrfs_root_id(root));
+ if (unlikely(ret && ret != -ENOENT)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out;
@@ -4497,8 +3977,8 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
ret = btrfs_uuid_tree_add(trans, sa->uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
- root->root_key.objectid);
- if (ret < 0 && ret != -EEXIST) {
+ btrfs_root_id(root));
+ if (unlikely(ret < 0 && ret != -EEXIST)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out;
@@ -4538,7 +4018,7 @@ static long btrfs_ioctl_set_received_subvol_32(struct file *file,
args64->rtime.nsec = args32->rtime.nsec;
args64->flags = args32->flags;
- ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), args64);
+ ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_idmap(file), args64);
if (ret)
goto out;
@@ -4572,7 +4052,7 @@ static long btrfs_ioctl_set_received_subvol(struct file *file,
if (IS_ERR(sa))
return PTR_ERR(sa);
- ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), sa);
+ ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_idmap(file), sa);
if (ret)
goto out;
@@ -4613,7 +4093,7 @@ static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info,
static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_super_block *super_block = fs_info->super_copy;
struct btrfs_trans_handle *trans;
@@ -4644,7 +4124,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
}
spin_lock(&fs_info->super_lock);
- strcpy(super_block->label, label);
+ strscpy(super_block->label, label);
spin_unlock(&fs_info->super_lock);
ret = btrfs_commit_transaction(trans);
@@ -4688,13 +4168,13 @@ static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info,
return 0;
}
-static int check_feature_bits(struct btrfs_fs_info *fs_info,
+static int check_feature_bits(const struct btrfs_fs_info *fs_info,
enum btrfs_feature_set set,
u64 change_mask, u64 flags, u64 supported_flags,
u64 safe_set, u64 safe_clear)
{
const char *type = btrfs_feature_set_name(set);
- char *names;
+ const char AUTO_KFREE(names);
u64 disallowed, unsupported;
u64 set_mask = flags & change_mask;
u64 clear_mask = ~flags & change_mask;
@@ -4702,12 +4182,11 @@ static int check_feature_bits(struct btrfs_fs_info *fs_info,
unsupported = set_mask & ~supported_flags;
if (unsupported) {
names = btrfs_printable_features(set, unsupported);
- if (names) {
+ if (names)
btrfs_warn(fs_info,
"this kernel does not support the %s feature bit%s",
names, strchr(names, ',') ? "s" : "");
- kfree(names);
- } else
+ else
btrfs_warn(fs_info,
"this kernel does not support %s bits 0x%llx",
type, unsupported);
@@ -4717,12 +4196,11 @@ static int check_feature_bits(struct btrfs_fs_info *fs_info,
disallowed = set_mask & ~safe_set;
if (disallowed) {
names = btrfs_printable_features(set, disallowed);
- if (names) {
+ if (names)
btrfs_warn(fs_info,
"can't set the %s feature bit%s while mounted",
names, strchr(names, ',') ? "s" : "");
- kfree(names);
- } else
+ else
btrfs_warn(fs_info,
"can't set %s bits 0x%llx while mounted",
type, disallowed);
@@ -4732,12 +4210,11 @@ static int check_feature_bits(struct btrfs_fs_info *fs_info,
disallowed = clear_mask & ~safe_clear;
if (disallowed) {
names = btrfs_printable_features(set, disallowed);
- if (names) {
+ if (names)
btrfs_warn(fs_info,
"can't clear the %s feature bit%s while mounted",
names, strchr(names, ',') ? "s" : "");
- kfree(names);
- } else
+ else
btrfs_warn(fs_info,
"can't clear %s bits 0x%llx while mounted",
type, disallowed);
@@ -4756,7 +4233,7 @@ check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \
static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_super_block *super_block = fs_info->super_copy;
struct btrfs_ioctl_feature_flags flags[2];
@@ -4824,14 +4301,14 @@ out_drop_write:
return ret;
}
-static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
+static int _btrfs_ioctl_send(struct btrfs_root *root, void __user *argp, bool compat)
{
struct btrfs_ioctl_send_args *arg;
int ret;
if (compat) {
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
- struct btrfs_ioctl_send_args_32 args32;
+ struct btrfs_ioctl_send_args_32 args32 = { 0 };
ret = copy_from_user(&args32, argp, sizeof(args32));
if (ret)
@@ -4844,6 +4321,7 @@ static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
arg->clone_sources = compat_ptr(args32.clone_sources);
arg->parent_root = args32.parent_root;
arg->flags = args32.flags;
+ arg->version = args32.version;
memcpy(arg->reserved, args32.reserved,
sizeof(args32.reserved));
#else
@@ -4854,22 +4332,868 @@ static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
if (IS_ERR(arg))
return PTR_ERR(arg);
}
- ret = btrfs_ioctl_send(file, arg);
+ ret = btrfs_ioctl_send(root, arg);
kfree(arg);
return ret;
}
+static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp,
+ bool compat)
+{
+ struct btrfs_ioctl_encoded_io_args args = { 0 };
+ size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args,
+ flags);
+ size_t copy_end;
+ struct btrfs_inode *inode = BTRFS_I(file_inode(file));
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_io_tree *io_tree = &inode->io_tree;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov = iovstack;
+ struct iov_iter iter;
+ loff_t pos;
+ struct kiocb kiocb;
+ ssize_t ret;
+ u64 disk_bytenr, disk_io_size;
+ struct extent_state *cached_state = NULL;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto out_acct;
+ }
+
+ if (compat) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ struct btrfs_ioctl_encoded_io_args_32 args32;
+
+ copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32,
+ flags);
+ if (copy_from_user(&args32, argp, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ args.iov = compat_ptr(args32.iov);
+ args.iovcnt = args32.iovcnt;
+ args.offset = args32.offset;
+ args.flags = args32.flags;
+#else
+ return -ENOTTY;
+#endif
+ } else {
+ copy_end = copy_end_kernel;
+ if (copy_from_user(&args, argp, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ }
+ if (args.flags != 0) {
+ ret = -EINVAL;
+ goto out_acct;
+ }
+
+ ret = import_iovec(ITER_DEST, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
+ &iov, &iter);
+ if (ret < 0)
+ goto out_acct;
+
+ if (iov_iter_count(&iter) == 0) {
+ ret = 0;
+ goto out_iov;
+ }
+ pos = args.offset;
+ ret = rw_verify_area(READ, file, &pos, args.len);
+ if (ret < 0)
+ goto out_iov;
+
+ init_sync_kiocb(&kiocb, file);
+ kiocb.ki_pos = pos;
+
+ ret = btrfs_encoded_read(&kiocb, &iter, &args, &cached_state,
+ &disk_bytenr, &disk_io_size);
+
+ if (ret == -EIOCBQUEUED) {
+ bool unlocked = false;
+ u64 start, lockend, count;
+
+ start = ALIGN_DOWN(kiocb.ki_pos, fs_info->sectorsize);
+ lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
+
+ if (args.compression)
+ count = disk_io_size;
+ else
+ count = args.len;
+
+ ret = btrfs_encoded_read_regular(&kiocb, &iter, start, lockend,
+ &cached_state, disk_bytenr,
+ disk_io_size, count,
+ args.compression, &unlocked);
+
+ if (!unlocked) {
+ btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ }
+ }
+
+ if (ret >= 0) {
+ fsnotify_access(file);
+ if (copy_to_user(argp + copy_end,
+ (char *)&args + copy_end_kernel,
+ sizeof(args) - copy_end_kernel))
+ ret = -EFAULT;
+ }
+
+out_iov:
+ kfree(iov);
+out_acct:
+ if (ret > 0)
+ add_rchar(current, ret);
+ inc_syscr(current);
+ return ret;
+}
+
+static int btrfs_ioctl_encoded_write(struct file *file, void __user *argp, bool compat)
+{
+ struct btrfs_ioctl_encoded_io_args args;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov = iovstack;
+ struct iov_iter iter;
+ loff_t pos;
+ struct kiocb kiocb;
+ ssize_t ret;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto out_acct;
+ }
+
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ goto out_acct;
+ }
+
+ if (compat) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ struct btrfs_ioctl_encoded_io_args_32 args32;
+
+ if (copy_from_user(&args32, argp, sizeof(args32))) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ args.iov = compat_ptr(args32.iov);
+ args.iovcnt = args32.iovcnt;
+ args.offset = args32.offset;
+ args.flags = args32.flags;
+ args.len = args32.len;
+ args.unencoded_len = args32.unencoded_len;
+ args.unencoded_offset = args32.unencoded_offset;
+ args.compression = args32.compression;
+ args.encryption = args32.encryption;
+ memcpy(args.reserved, args32.reserved, sizeof(args.reserved));
+#else
+ return -ENOTTY;
+#endif
+ } else {
+ if (copy_from_user(&args, argp, sizeof(args))) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ }
+
+ ret = -EINVAL;
+ if (args.flags != 0)
+ goto out_acct;
+ if (memchr_inv(args.reserved, 0, sizeof(args.reserved)))
+ goto out_acct;
+ if (args.compression == BTRFS_ENCODED_IO_COMPRESSION_NONE &&
+ args.encryption == BTRFS_ENCODED_IO_ENCRYPTION_NONE)
+ goto out_acct;
+ if (args.compression >= BTRFS_ENCODED_IO_COMPRESSION_TYPES ||
+ args.encryption >= BTRFS_ENCODED_IO_ENCRYPTION_TYPES)
+ goto out_acct;
+ if (args.unencoded_offset > args.unencoded_len)
+ goto out_acct;
+ if (args.len > args.unencoded_len - args.unencoded_offset)
+ goto out_acct;
+
+ ret = import_iovec(ITER_SOURCE, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
+ &iov, &iter);
+ if (ret < 0)
+ goto out_acct;
+
+ if (iov_iter_count(&iter) == 0) {
+ ret = 0;
+ goto out_iov;
+ }
+ pos = args.offset;
+ ret = rw_verify_area(WRITE, file, &pos, args.len);
+ if (ret < 0)
+ goto out_iov;
+
+ init_sync_kiocb(&kiocb, file);
+ ret = kiocb_set_rw_flags(&kiocb, 0, WRITE);
+ if (ret)
+ goto out_iov;
+ kiocb.ki_pos = pos;
+
+ file_start_write(file);
+
+ ret = btrfs_do_write_iter(&kiocb, &iter, &args);
+ if (ret > 0)
+ fsnotify_modify(file);
+
+ file_end_write(file);
+out_iov:
+ kfree(iov);
+out_acct:
+ if (ret > 0)
+ add_wchar(current, ret);
+ inc_syscw(current);
+ return ret;
+}
+
+struct btrfs_uring_encoded_data {
+ struct btrfs_ioctl_encoded_io_args args;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov;
+ struct iov_iter iter;
+};
+
+/*
+ * Context that's attached to an encoded read io_uring command, in cmd->pdu. It
+ * contains the fields in btrfs_uring_read_extent that are necessary to finish
+ * off and cleanup the I/O in btrfs_uring_read_finished.
+ */
+struct btrfs_uring_priv {
+ struct io_uring_cmd *cmd;
+ struct page **pages;
+ unsigned long nr_pages;
+ struct kiocb iocb;
+ struct iovec *iov;
+ struct iov_iter iter;
+ struct extent_state *cached_state;
+ u64 count;
+ u64 start;
+ u64 lockend;
+ int err;
+ bool compressed;
+};
+
+struct io_btrfs_cmd {
+ struct btrfs_uring_encoded_data *data;
+ struct btrfs_uring_priv *priv;
+};
+
+static void btrfs_uring_read_finished(struct io_tw_req tw_req, io_tw_token_t tw)
+{
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
+ struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd);
+ struct btrfs_uring_priv *priv = bc->priv;
+ struct btrfs_inode *inode = BTRFS_I(file_inode(priv->iocb.ki_filp));
+ struct extent_io_tree *io_tree = &inode->io_tree;
+ pgoff_t index;
+ u64 cur;
+ size_t page_offset;
+ ssize_t ret;
+
+ /* The inode lock has already been acquired in btrfs_uring_read_extent. */
+ btrfs_lockdep_inode_acquire(inode, i_rwsem);
+
+ if (priv->err) {
+ ret = priv->err;
+ goto out;
+ }
+
+ if (priv->compressed) {
+ index = 0;
+ page_offset = 0;
+ } else {
+ index = (priv->iocb.ki_pos - priv->start) >> PAGE_SHIFT;
+ page_offset = offset_in_page(priv->iocb.ki_pos - priv->start);
+ }
+ cur = 0;
+ while (cur < priv->count) {
+ size_t bytes = min_t(size_t, priv->count - cur, PAGE_SIZE - page_offset);
+
+ if (copy_page_to_iter(priv->pages[index], page_offset, bytes,
+ &priv->iter) != bytes) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ index++;
+ cur += bytes;
+ page_offset = 0;
+ }
+ ret = priv->count;
+
+out:
+ btrfs_unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+
+ io_uring_cmd_done(cmd, ret, IO_URING_CMD_TASK_WORK_ISSUE_FLAGS);
+ add_rchar(current, ret);
+
+ for (index = 0; index < priv->nr_pages; index++)
+ __free_page(priv->pages[index]);
+
+ kfree(priv->pages);
+ kfree(priv->iov);
+ kfree(priv);
+ kfree(bc->data);
+}
+
+void btrfs_uring_read_extent_endio(void *ctx, int err)
+{
+ struct btrfs_uring_priv *priv = ctx;
+ struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(priv->cmd, struct io_btrfs_cmd);
+
+ priv->err = err;
+ bc->priv = priv;
+
+ io_uring_cmd_complete_in_task(priv->cmd, btrfs_uring_read_finished);
+}
+
+static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter,
+ u64 start, u64 lockend,
+ struct extent_state *cached_state,
+ u64 disk_bytenr, u64 disk_io_size,
+ size_t count, bool compressed,
+ struct iovec *iov, struct io_uring_cmd *cmd)
+{
+ struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
+ struct extent_io_tree *io_tree = &inode->io_tree;
+ struct page **pages;
+ struct btrfs_uring_priv *priv = NULL;
+ unsigned long nr_pages;
+ int ret;
+
+ nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
+ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
+ if (!pages)
+ return -ENOMEM;
+ ret = btrfs_alloc_page_array(nr_pages, pages, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_fail;
+ }
+
+ priv = kmalloc(sizeof(*priv), GFP_NOFS);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_fail;
+ }
+
+ priv->iocb = *iocb;
+ priv->iov = iov;
+ priv->iter = *iter;
+ priv->count = count;
+ priv->cmd = cmd;
+ priv->cached_state = cached_state;
+ priv->compressed = compressed;
+ priv->nr_pages = nr_pages;
+ priv->pages = pages;
+ priv->start = start;
+ priv->lockend = lockend;
+ priv->err = 0;
+
+ ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr,
+ disk_io_size, pages, priv);
+ if (ret && ret != -EIOCBQUEUED)
+ goto out_fail;
+
+ /*
+ * If we return -EIOCBQUEUED, we're deferring the cleanup to
+ * btrfs_uring_read_finished(), which will handle unlocking the extent
+ * and inode and freeing the allocations.
+ */
+
+ /*
+ * We're returning to userspace with the inode lock held, and that's
+ * okay - it'll get unlocked in a worker thread. Call
+ * btrfs_lockdep_inode_release() to avoid confusing lockdep.
+ */
+ btrfs_lockdep_inode_release(inode, i_rwsem);
+
+ return -EIOCBQUEUED;
+
+out_fail:
+ btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ kfree(priv);
+ return ret;
+}
+
+static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct file *file = cmd->file;
+ struct btrfs_inode *inode = BTRFS_I(file->f_inode);
+ struct extent_io_tree *io_tree = &inode->io_tree;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args, flags);
+ size_t copy_end;
+ int ret;
+ u64 disk_bytenr, disk_io_size;
+ loff_t pos;
+ struct kiocb kiocb;
+ struct extent_state *cached_state = NULL;
+ u64 start, lockend;
+ void __user *sqe_addr;
+ struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd);
+ struct btrfs_uring_encoded_data *data = NULL;
+
+ if (cmd->flags & IORING_URING_CMD_REISSUE)
+ data = bc->data;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto out_acct;
+ }
+ sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
+
+ if (issue_flags & IO_URING_F_COMPAT) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32, flags);
+#else
+ ret = -ENOTTY;
+ goto out_acct;
+#endif
+ } else {
+ copy_end = copy_end_kernel;
+ }
+
+ if (!data) {
+ data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out_acct;
+ }
+
+ bc->data = data;
+
+ if (issue_flags & IO_URING_F_COMPAT) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ struct btrfs_ioctl_encoded_io_args_32 args32;
+
+ if (copy_from_user(&args32, sqe_addr, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+
+ data->args.iov = compat_ptr(args32.iov);
+ data->args.iovcnt = args32.iovcnt;
+ data->args.offset = args32.offset;
+ data->args.flags = args32.flags;
+#endif
+ } else {
+ if (copy_from_user(&data->args, sqe_addr, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ }
+
+ if (data->args.flags != 0) {
+ ret = -EINVAL;
+ goto out_acct;
+ }
+
+ data->iov = data->iovstack;
+ ret = import_iovec(ITER_DEST, data->args.iov, data->args.iovcnt,
+ ARRAY_SIZE(data->iovstack), &data->iov,
+ &data->iter);
+ if (ret < 0)
+ goto out_acct;
+
+ if (iov_iter_count(&data->iter) == 0) {
+ ret = 0;
+ goto out_free;
+ }
+ }
+
+ pos = data->args.offset;
+ ret = rw_verify_area(READ, file, &pos, data->args.len);
+ if (ret < 0)
+ goto out_free;
+
+ init_sync_kiocb(&kiocb, file);
+ kiocb.ki_pos = pos;
+
+ if (issue_flags & IO_URING_F_NONBLOCK)
+ kiocb.ki_flags |= IOCB_NOWAIT;
+
+ start = ALIGN_DOWN(pos, fs_info->sectorsize);
+ lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
+
+ ret = btrfs_encoded_read(&kiocb, &data->iter, &data->args, &cached_state,
+ &disk_bytenr, &disk_io_size);
+ if (ret == -EAGAIN)
+ goto out_acct;
+ if (ret < 0 && ret != -EIOCBQUEUED)
+ goto out_free;
+
+ file_accessed(file);
+
+ if (copy_to_user(sqe_addr + copy_end,
+ (const char *)&data->args + copy_end_kernel,
+ sizeof(data->args) - copy_end_kernel)) {
+ if (ret == -EIOCBQUEUED) {
+ btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ }
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ if (ret == -EIOCBQUEUED) {
+ u64 count = min_t(u64, iov_iter_count(&data->iter), disk_io_size);
+
+ /* Match ioctl by not returning past EOF if uncompressed. */
+ if (!data->args.compression)
+ count = min_t(u64, count, data->args.len);
+
+ ret = btrfs_uring_read_extent(&kiocb, &data->iter, start, lockend,
+ cached_state, disk_bytenr, disk_io_size,
+ count, data->args.compression,
+ data->iov, cmd);
+
+ goto out_acct;
+ }
+
+out_free:
+ kfree(data->iov);
+
+out_acct:
+ if (ret > 0)
+ add_rchar(current, ret);
+ inc_syscr(current);
+
+ if (ret != -EIOCBQUEUED && ret != -EAGAIN)
+ kfree(data);
+
+ return ret;
+}
+
+static int btrfs_uring_encoded_write(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct file *file = cmd->file;
+ loff_t pos;
+ struct kiocb kiocb;
+ ssize_t ret;
+ void __user *sqe_addr;
+ struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd);
+ struct btrfs_uring_encoded_data *data = NULL;
+
+ if (cmd->flags & IORING_URING_CMD_REISSUE)
+ data = bc->data;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto out_acct;
+ }
+ sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
+
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ goto out_acct;
+ }
+
+ if (!data) {
+ data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out_acct;
+ }
+
+ bc->data = data;
+
+ if (issue_flags & IO_URING_F_COMPAT) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ struct btrfs_ioctl_encoded_io_args_32 args32;
+
+ if (copy_from_user(&args32, sqe_addr, sizeof(args32))) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ data->args.iov = compat_ptr(args32.iov);
+ data->args.iovcnt = args32.iovcnt;
+ data->args.offset = args32.offset;
+ data->args.flags = args32.flags;
+ data->args.len = args32.len;
+ data->args.unencoded_len = args32.unencoded_len;
+ data->args.unencoded_offset = args32.unencoded_offset;
+ data->args.compression = args32.compression;
+ data->args.encryption = args32.encryption;
+ memcpy(data->args.reserved, args32.reserved,
+ sizeof(data->args.reserved));
+#else
+ ret = -ENOTTY;
+ goto out_acct;
+#endif
+ } else {
+ if (copy_from_user(&data->args, sqe_addr, sizeof(data->args))) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ }
+
+ ret = -EINVAL;
+ if (data->args.flags != 0)
+ goto out_acct;
+ if (memchr_inv(data->args.reserved, 0, sizeof(data->args.reserved)))
+ goto out_acct;
+ if (data->args.compression == BTRFS_ENCODED_IO_COMPRESSION_NONE &&
+ data->args.encryption == BTRFS_ENCODED_IO_ENCRYPTION_NONE)
+ goto out_acct;
+ if (data->args.compression >= BTRFS_ENCODED_IO_COMPRESSION_TYPES ||
+ data->args.encryption >= BTRFS_ENCODED_IO_ENCRYPTION_TYPES)
+ goto out_acct;
+ if (data->args.unencoded_offset > data->args.unencoded_len)
+ goto out_acct;
+ if (data->args.len > data->args.unencoded_len - data->args.unencoded_offset)
+ goto out_acct;
+
+ data->iov = data->iovstack;
+ ret = import_iovec(ITER_SOURCE, data->args.iov, data->args.iovcnt,
+ ARRAY_SIZE(data->iovstack), &data->iov,
+ &data->iter);
+ if (ret < 0)
+ goto out_acct;
+
+ if (iov_iter_count(&data->iter) == 0) {
+ ret = 0;
+ goto out_iov;
+ }
+ }
+
+ if (issue_flags & IO_URING_F_NONBLOCK) {
+ ret = -EAGAIN;
+ goto out_acct;
+ }
+
+ pos = data->args.offset;
+ ret = rw_verify_area(WRITE, file, &pos, data->args.len);
+ if (ret < 0)
+ goto out_iov;
+
+ init_sync_kiocb(&kiocb, file);
+ ret = kiocb_set_rw_flags(&kiocb, 0, WRITE);
+ if (ret)
+ goto out_iov;
+ kiocb.ki_pos = pos;
+
+ file_start_write(file);
+
+ ret = btrfs_do_write_iter(&kiocb, &data->iter, &data->args);
+ if (ret > 0)
+ fsnotify_modify(file);
+
+ file_end_write(file);
+out_iov:
+ kfree(data->iov);
+out_acct:
+ if (ret > 0)
+ add_wchar(current, ret);
+ inc_syscw(current);
+
+ if (ret != -EAGAIN)
+ kfree(data);
+ return ret;
+}
+
+int btrfs_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(file_inode(cmd->file)))))
+ return -EIO;
+
+ switch (cmd->cmd_op) {
+ case BTRFS_IOC_ENCODED_READ:
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ case BTRFS_IOC_ENCODED_READ_32:
+#endif
+ return btrfs_uring_encoded_read(cmd, issue_flags);
+
+ case BTRFS_IOC_ENCODED_WRITE:
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ case BTRFS_IOC_ENCODED_WRITE_32:
+#endif
+ return btrfs_uring_encoded_write(cmd, issue_flags);
+ }
+
+ return -EINVAL;
+}
+
+static int btrfs_ioctl_subvol_sync(struct btrfs_fs_info *fs_info, void __user *argp)
+{
+ struct btrfs_root *root;
+ struct btrfs_ioctl_subvol_wait args = { 0 };
+ signed long sched_ret;
+ int refs;
+ u64 root_flags;
+ bool wait_for_deletion = false;
+ bool found = false;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ switch (args.mode) {
+ case BTRFS_SUBVOL_SYNC_WAIT_FOR_QUEUED:
+ /*
+ * Wait for the first one deleted that waits until all previous
+ * are cleaned.
+ */
+ spin_lock(&fs_info->trans_lock);
+ if (!list_empty(&fs_info->dead_roots)) {
+ root = list_last_entry(&fs_info->dead_roots,
+ struct btrfs_root, root_list);
+ args.subvolid = btrfs_root_id(root);
+ found = true;
+ }
+ spin_unlock(&fs_info->trans_lock);
+ if (!found)
+ return -ENOENT;
+
+ fallthrough;
+ case BTRFS_SUBVOL_SYNC_WAIT_FOR_ONE:
+ if ((0 < args.subvolid && args.subvolid < BTRFS_FIRST_FREE_OBJECTID) ||
+ BTRFS_LAST_FREE_OBJECTID < args.subvolid)
+ return -EINVAL;
+ break;
+ case BTRFS_SUBVOL_SYNC_COUNT:
+ spin_lock(&fs_info->trans_lock);
+ args.count = list_count_nodes(&fs_info->dead_roots);
+ spin_unlock(&fs_info->trans_lock);
+ if (copy_to_user(argp, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+ case BTRFS_SUBVOL_SYNC_PEEK_FIRST:
+ spin_lock(&fs_info->trans_lock);
+ /* Last in the list was deleted first. */
+ if (!list_empty(&fs_info->dead_roots)) {
+ root = list_last_entry(&fs_info->dead_roots,
+ struct btrfs_root, root_list);
+ args.subvolid = btrfs_root_id(root);
+ } else {
+ args.subvolid = 0;
+ }
+ spin_unlock(&fs_info->trans_lock);
+ if (copy_to_user(argp, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+ case BTRFS_SUBVOL_SYNC_PEEK_LAST:
+ spin_lock(&fs_info->trans_lock);
+ /* First in the list was deleted last. */
+ if (!list_empty(&fs_info->dead_roots)) {
+ root = list_first_entry(&fs_info->dead_roots,
+ struct btrfs_root, root_list);
+ args.subvolid = btrfs_root_id(root);
+ } else {
+ args.subvolid = 0;
+ }
+ spin_unlock(&fs_info->trans_lock);
+ if (copy_to_user(argp, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ /* 32bit limitation: fs_roots_radix key is not wide enough. */
+ if (sizeof(unsigned long) != sizeof(u64) && args.subvolid > U32_MAX)
+ return -EOVERFLOW;
+
+ while (1) {
+ /* Wait for the specific one. */
+ if (down_read_interruptible(&fs_info->subvol_sem) == -EINTR)
+ return -EINTR;
+ refs = -1;
+ spin_lock(&fs_info->fs_roots_radix_lock);
+ root = radix_tree_lookup(&fs_info->fs_roots_radix,
+ (unsigned long)args.subvolid);
+ if (root) {
+ spin_lock(&root->root_item_lock);
+ refs = btrfs_root_refs(&root->root_item);
+ root_flags = btrfs_root_flags(&root->root_item);
+ spin_unlock(&root->root_item_lock);
+ }
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+ up_read(&fs_info->subvol_sem);
+
+ /* Subvolume does not exist. */
+ if (!root)
+ return -ENOENT;
+
+ /* Subvolume not deleted at all. */
+ if (refs > 0)
+ return -EEXIST;
+ /* We've waited and now the subvolume is gone. */
+ if (wait_for_deletion && refs == -1) {
+ /* Return the one we waited for as the last one. */
+ if (copy_to_user(argp, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* Subvolume not found on the first try (deleted or never existed). */
+ if (refs == -1)
+ return -ENOENT;
+
+ wait_for_deletion = true;
+ ASSERT(root_flags & BTRFS_ROOT_SUBVOL_DEAD);
+ sched_ret = schedule_timeout_interruptible(HZ);
+ /* Early wake up or error. */
+ if (sched_ret != 0)
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+static int btrfs_ioctl_shutdown(struct btrfs_fs_info *fs_info, unsigned long arg)
+{
+ int ret = 0;
+ u32 flags;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(flags, (u32 __user *)arg))
+ return -EFAULT;
+
+ if (flags >= BTRFS_SHUTDOWN_FLAGS_LAST)
+ return -EINVAL;
+
+ if (btrfs_is_shutdown(fs_info))
+ return 0;
+
+ switch (flags) {
+ case BTRFS_SHUTDOWN_FLAGS_LOGFLUSH:
+ case BTRFS_SHUTDOWN_FLAGS_DEFAULT:
+ ret = freeze_super(fs_info->sb, FREEZE_HOLDER_KERNEL, NULL);
+ if (ret)
+ return ret;
+ btrfs_force_shutdown(fs_info);
+ ret = thaw_super(fs_info->sb, FREEZE_HOLDER_KERNEL, NULL);
+ if (ret)
+ return ret;
+ break;
+ case BTRFS_SHUTDOWN_FLAGS_NOLOGFLUSH:
+ btrfs_force_shutdown(fs_info);
+ break;
+ }
+ return ret;
+}
+#endif
+
long btrfs_ioctl(struct file *file, unsigned int
cmd, unsigned long arg)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_root *root = BTRFS_I(inode)->root;
void __user *argp = (void __user *)arg;
switch (cmd) {
case FS_IOC_GETVERSION:
- return btrfs_ioctl_getversion(file, argp);
+ return btrfs_ioctl_getversion(inode, argp);
case FS_IOC_GETFSLABEL:
return btrfs_ioctl_get_fslabel(fs_info, argp);
case FS_IOC_SETFSLABEL:
@@ -4877,19 +5201,19 @@ long btrfs_ioctl(struct file *file, unsigned int
case FITRIM:
return btrfs_ioctl_fitrim(fs_info, argp);
case BTRFS_IOC_SNAP_CREATE:
- return btrfs_ioctl_snap_create(file, argp, 0);
+ return btrfs_ioctl_snap_create(file, argp, false);
case BTRFS_IOC_SNAP_CREATE_V2:
- return btrfs_ioctl_snap_create_v2(file, argp, 0);
+ return btrfs_ioctl_snap_create_v2(file, argp, false);
case BTRFS_IOC_SUBVOL_CREATE:
- return btrfs_ioctl_snap_create(file, argp, 1);
+ return btrfs_ioctl_snap_create(file, argp, true);
case BTRFS_IOC_SUBVOL_CREATE_V2:
- return btrfs_ioctl_snap_create_v2(file, argp, 1);
+ return btrfs_ioctl_snap_create_v2(file, argp, true);
case BTRFS_IOC_SNAP_DESTROY:
return btrfs_ioctl_snap_destroy(file, argp, false);
case BTRFS_IOC_SNAP_DESTROY_V2:
return btrfs_ioctl_snap_destroy(file, argp, true);
case BTRFS_IOC_SUBVOL_GETFLAGS:
- return btrfs_ioctl_subvol_getflags(file, argp);
+ return btrfs_ioctl_subvol_getflags(BTRFS_I(inode), argp);
case BTRFS_IOC_SUBVOL_SETFLAGS:
return btrfs_ioctl_subvol_setflags(file, argp);
case BTRFS_IOC_DEFAULT_SUBVOL:
@@ -4910,14 +5234,12 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_fs_info(fs_info, argp);
case BTRFS_IOC_DEV_INFO:
return btrfs_ioctl_dev_info(fs_info, argp);
- case BTRFS_IOC_BALANCE:
- return btrfs_ioctl_balance(file, NULL);
case BTRFS_IOC_TREE_SEARCH:
- return btrfs_ioctl_tree_search(file, argp);
+ return btrfs_ioctl_tree_search(root, argp);
case BTRFS_IOC_TREE_SEARCH_V2:
- return btrfs_ioctl_tree_search_v2(file, argp);
+ return btrfs_ioctl_tree_search_v2(root, argp);
case BTRFS_IOC_INO_LOOKUP:
- return btrfs_ioctl_ino_lookup(file, argp);
+ return btrfs_ioctl_ino_lookup(root, argp);
case BTRFS_IOC_INO_PATHS:
return btrfs_ioctl_ino_to_path(root, argp);
case BTRFS_IOC_LOGICAL_INO:
@@ -4934,11 +5256,10 @@ long btrfs_ioctl(struct file *file, unsigned int
return ret;
ret = btrfs_sync_fs(inode->i_sb, 1);
/*
- * The transaction thread may want to do more work,
- * namely it pokes the cleaner kthread that will start
- * processing uncleaned subvols.
+ * There may be work for the cleaner kthread to do (subvolume
+ * deletion, delayed iputs, defrag inodes, etc), so wake it up.
*/
- wake_up_process(fs_info->transaction_kthread);
+ wake_up_process(fs_info->cleaner_kthread);
return ret;
}
case BTRFS_IOC_START_SYNC:
@@ -4964,10 +5285,10 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_set_received_subvol_32(file, argp);
#endif
case BTRFS_IOC_SEND:
- return _btrfs_ioctl_send(file, argp, false);
+ return _btrfs_ioctl_send(root, argp, false);
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
case BTRFS_IOC_SEND_32:
- return _btrfs_ioctl_send(file, argp, true);
+ return _btrfs_ioctl_send(root, argp, true);
#endif
case BTRFS_IOC_GET_DEV_STATS:
return btrfs_ioctl_get_dev_stats(fs_info, argp);
@@ -4984,7 +5305,7 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_QUOTA_RESCAN_STATUS:
return btrfs_ioctl_quota_rescan_status(fs_info, argp);
case BTRFS_IOC_QUOTA_RESCAN_WAIT:
- return btrfs_ioctl_quota_rescan_wait(fs_info, argp);
+ return btrfs_ioctl_quota_rescan_wait(fs_info);
case BTRFS_IOC_DEV_REPLACE:
return btrfs_ioctl_dev_replace(fs_info, argp);
case BTRFS_IOC_GET_SUPPORTED_FEATURES:
@@ -4994,15 +5315,33 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_SET_FEATURES:
return btrfs_ioctl_set_features(file, argp);
case BTRFS_IOC_GET_SUBVOL_INFO:
- return btrfs_ioctl_get_subvol_info(file, argp);
+ return btrfs_ioctl_get_subvol_info(inode, argp);
case BTRFS_IOC_GET_SUBVOL_ROOTREF:
- return btrfs_ioctl_get_subvol_rootref(file, argp);
+ return btrfs_ioctl_get_subvol_rootref(root, argp);
case BTRFS_IOC_INO_LOOKUP_USER:
return btrfs_ioctl_ino_lookup_user(file, argp);
case FS_IOC_ENABLE_VERITY:
return fsverity_ioctl_enable(file, (const void __user *)argp);
case FS_IOC_MEASURE_VERITY:
return fsverity_ioctl_measure(file, argp);
+ case FS_IOC_READ_VERITY_METADATA:
+ return fsverity_ioctl_read_metadata(file, argp);
+ case BTRFS_IOC_ENCODED_READ:
+ return btrfs_ioctl_encoded_read(file, argp, false);
+ case BTRFS_IOC_ENCODED_WRITE:
+ return btrfs_ioctl_encoded_write(file, argp, false);
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ case BTRFS_IOC_ENCODED_READ_32:
+ return btrfs_ioctl_encoded_read(file, argp, true);
+ case BTRFS_IOC_ENCODED_WRITE_32:
+ return btrfs_ioctl_encoded_write(file, argp, true);
+#endif
+ case BTRFS_IOC_SUBVOL_SYNC_WAIT:
+ return btrfs_ioctl_subvol_sync(fs_info, argp);
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ case BTRFS_IOC_SHUTDOWN:
+ return btrfs_ioctl_shutdown(fs_info, arg);
+#endif
}
return -ENOTTY;
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
new file mode 100644
index 000000000000..ccf6bed9cc24
--- /dev/null
+++ b/fs/btrfs/ioctl.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_IOCTL_H
+#define BTRFS_IOCTL_H
+
+#include <linux/types.h>
+
+struct file;
+struct dentry;
+struct mnt_idmap;
+struct file_kattr;
+struct io_uring_cmd;
+struct btrfs_inode;
+struct btrfs_fs_info;
+struct btrfs_ioctl_balance_args;
+
+long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int btrfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
+int btrfs_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa);
+int btrfs_ioctl_get_supported_features(void __user *arg);
+void btrfs_sync_inode_flags_to_i_flags(struct btrfs_inode *inode);
+void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
+ struct btrfs_ioctl_balance_args *bargs);
+int btrfs_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
+void btrfs_uring_read_extent_endio(void *ctx, int err);
+
+#endif
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 313d9d685adb..0035851d72b0 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -8,12 +8,110 @@
#include <linux/spinlock.h>
#include <linux/page-flags.h>
#include <asm/bug.h>
-#include "misc.h"
+#include <trace/events/btrfs.h>
#include "ctree.h"
#include "extent_io.h"
#include "locking.h"
/*
+ * Lockdep class keys for extent_buffer->lock's in this root. For a given
+ * eb, the lockdep key is determined by the btrfs_root it belongs to and
+ * the level the eb occupies in the tree.
+ *
+ * Different roots are used for different purposes and may nest inside each
+ * other and they require separate keysets. As lockdep keys should be
+ * static, assign keysets according to the purpose of the root as indicated
+ * by btrfs_root->root_key.objectid. This ensures that all special purpose
+ * roots have separate keysets.
+ *
+ * Lock-nesting across peer nodes is always done with the immediate parent
+ * node locked thus preventing deadlock. As lockdep doesn't know this, use
+ * subclass to avoid triggering lockdep warning in such cases.
+ *
+ * The key is set by the readpage_end_io_hook after the buffer has passed
+ * csum validation but before the pages are unlocked. It is also set by
+ * btrfs_init_new_buffer on freshly allocated blocks.
+ *
+ * We also add a check to make sure the highest level of the tree is the
+ * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
+ * needs update as well.
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if BTRFS_MAX_LEVEL != 8
+#error
+#endif
+
+#define DEFINE_LEVEL(stem, level) \
+ .names[level] = "btrfs-" stem "-0" #level,
+
+#define DEFINE_NAME(stem) \
+ DEFINE_LEVEL(stem, 0) \
+ DEFINE_LEVEL(stem, 1) \
+ DEFINE_LEVEL(stem, 2) \
+ DEFINE_LEVEL(stem, 3) \
+ DEFINE_LEVEL(stem, 4) \
+ DEFINE_LEVEL(stem, 5) \
+ DEFINE_LEVEL(stem, 6) \
+ DEFINE_LEVEL(stem, 7)
+
+static struct btrfs_lockdep_keyset {
+ u64 id; /* root objectid */
+ /* Longest entry: btrfs-block-group-00 */
+ char names[BTRFS_MAX_LEVEL][24];
+ struct lock_class_key keys[BTRFS_MAX_LEVEL];
+} btrfs_lockdep_keysets[] = {
+ { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") },
+ { .id = BTRFS_EXTENT_TREE_OBJECTID, DEFINE_NAME("extent") },
+ { .id = BTRFS_CHUNK_TREE_OBJECTID, DEFINE_NAME("chunk") },
+ { .id = BTRFS_DEV_TREE_OBJECTID, DEFINE_NAME("dev") },
+ { .id = BTRFS_CSUM_TREE_OBJECTID, DEFINE_NAME("csum") },
+ { .id = BTRFS_QUOTA_TREE_OBJECTID, DEFINE_NAME("quota") },
+ { .id = BTRFS_TREE_LOG_OBJECTID, DEFINE_NAME("log") },
+ { .id = BTRFS_TREE_RELOC_OBJECTID, DEFINE_NAME("treloc") },
+ { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") },
+ { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") },
+ { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
+ { .id = BTRFS_BLOCK_GROUP_TREE_OBJECTID, DEFINE_NAME("block-group") },
+ { .id = BTRFS_RAID_STRIPE_TREE_OBJECTID, DEFINE_NAME("raid-stripe") },
+ { .id = 0, DEFINE_NAME("tree") },
+};
+
+#undef DEFINE_LEVEL
+#undef DEFINE_NAME
+
+void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level)
+{
+ struct btrfs_lockdep_keyset *ks;
+
+ ASSERT(level < ARRAY_SIZE(ks->keys));
+
+ /* Find the matching keyset, id 0 is the default entry */
+ for (ks = btrfs_lockdep_keysets; ks->id; ks++)
+ if (ks->id == objectid)
+ break;
+
+ lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]);
+}
+
+void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb)
+{
+ if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
+ btrfs_set_buffer_lockdep_class(btrfs_root_id(root),
+ eb, btrfs_header_level(eb));
+}
+
+#endif
+
+#ifdef CONFIG_BTRFS_DEBUG
+static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner)
+{
+ eb->lock_owner = owner;
+}
+#else
+static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner) { }
+#endif
+
+/*
* Extent buffer locking
* =====================
*
@@ -30,14 +128,14 @@
*/
/*
- * __btrfs_tree_read_lock - lock extent buffer for read
+ * btrfs_tree_read_lock_nested - lock extent buffer for read
* @eb: the eb to be locked
* @nest: the nesting level to be used for lockdep
*
* This takes the read lock on the extent buffer, using the specified nesting
* level for lockdep purposes.
*/
-void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
+void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
{
u64 start_ns = 0;
@@ -45,43 +143,21 @@ void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting ne
start_ns = ktime_get_ns();
down_read_nested(&eb->lock, nest);
- eb->lock_owner = current->pid;
trace_btrfs_tree_read_lock(eb, start_ns);
}
-void btrfs_tree_read_lock(struct extent_buffer *eb)
-{
- __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL);
-}
-
/*
* Try-lock for read.
*
- * Return 1 if the rwlock has been taken, 0 otherwise
+ * Return true if the rwlock has been taken, false otherwise
*/
-int btrfs_try_tree_read_lock(struct extent_buffer *eb)
+bool btrfs_try_tree_read_lock(struct extent_buffer *eb)
{
if (down_read_trylock(&eb->lock)) {
- eb->lock_owner = current->pid;
trace_btrfs_try_tree_read_lock(eb);
- return 1;
+ return true;
}
- return 0;
-}
-
-/*
- * Try-lock for write.
- *
- * Return 1 if the rwlock has been taken, 0 otherwise
- */
-int btrfs_try_tree_write_lock(struct extent_buffer *eb)
-{
- if (down_write_trylock(&eb->lock)) {
- eb->lock_owner = current->pid;
- trace_btrfs_try_tree_write_lock(eb);
- return 1;
- }
- return 0;
+ return false;
}
/*
@@ -90,18 +166,18 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
void btrfs_tree_read_unlock(struct extent_buffer *eb)
{
trace_btrfs_tree_read_unlock(eb);
- eb->lock_owner = 0;
up_read(&eb->lock);
}
/*
- * __btrfs_tree_lock - lock eb for write
+ * Lock eb for write.
+ *
* @eb: the eb to lock
* @nest: the nesting to use for the lock
*
* Returns with the eb->lock write locked.
*/
-void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
+void btrfs_tree_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
__acquires(&eb->lock)
{
u64 start_ns = 0;
@@ -110,22 +186,17 @@ void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
start_ns = ktime_get_ns();
down_write_nested(&eb->lock, nest);
- eb->lock_owner = current->pid;
+ btrfs_set_eb_lock_owner(eb, current->pid);
trace_btrfs_tree_lock(eb, start_ns);
}
-void btrfs_tree_lock(struct extent_buffer *eb)
-{
- __btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL);
-}
-
/*
* Release the write lock.
*/
void btrfs_tree_unlock(struct extent_buffer *eb)
{
trace_btrfs_tree_unlock(eb);
- eb->lock_owner = 0;
+ btrfs_set_eb_lock_owner(eb, 0);
up_write(&eb->lock);
}
@@ -167,6 +238,8 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
while (1) {
eb = btrfs_root_node(root);
+
+ btrfs_maybe_reset_lockdep_class(root, eb);
btrfs_tree_lock(eb);
if (eb == root->node)
break;
@@ -188,6 +261,8 @@ struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
while (1) {
eb = btrfs_root_node(root);
+
+ btrfs_maybe_reset_lockdep_class(root, eb);
btrfs_tree_read_lock(eb);
if (eb == root->node)
break;
@@ -198,6 +273,31 @@ struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
}
/*
+ * Loop around taking references on and locking the root node of the tree in
+ * nowait mode until we end up with a lock on the root node or returning to
+ * avoid blocking.
+ *
+ * Return: root extent buffer with read lock held or -EAGAIN.
+ */
+struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root)
+{
+ struct extent_buffer *eb;
+
+ while (1) {
+ eb = btrfs_root_node(root);
+ if (!btrfs_try_tree_read_lock(eb)) {
+ free_extent_buffer(eb);
+ return ERR_PTR(-EAGAIN);
+ }
+ if (eb == root->node)
+ break;
+ btrfs_tree_read_unlock(eb);
+ free_extent_buffer(eb);
+ }
+ return eb;
+}
+
+/*
* DREW locks
* ==========
*
@@ -211,24 +311,12 @@ struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
* acquire the lock.
*/
-int btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
+void btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
{
- int ret;
-
- ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL);
- if (ret)
- return ret;
-
atomic_set(&lock->readers, 0);
+ atomic_set(&lock->writers, 0);
init_waitqueue_head(&lock->pending_readers);
init_waitqueue_head(&lock->pending_writers);
-
- return 0;
-}
-
-void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock)
-{
- percpu_counter_destroy(&lock->writers);
}
/* Return true if acquisition is successful, false otherwise */
@@ -237,10 +325,10 @@ bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
if (atomic_read(&lock->readers))
return false;
- percpu_counter_inc(&lock->writers);
+ atomic_inc(&lock->writers);
/* Ensure writers count is updated before we check for pending readers */
- smp_mb();
+ smp_mb__after_atomic();
if (atomic_read(&lock->readers)) {
btrfs_drew_write_unlock(lock);
return false;
@@ -260,8 +348,12 @@ void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
{
- percpu_counter_dec(&lock->writers);
- cond_wake_up(&lock->pending_readers);
+ /*
+ * atomic_dec_and_test() implies a full barrier, so woken up readers are
+ * guaranteed to see the decrement.
+ */
+ if (atomic_dec_and_test(&lock->writers))
+ wake_up(&lock->pending_readers);
}
void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
@@ -269,15 +361,14 @@ void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
atomic_inc(&lock->readers);
/*
- * Ensure the pending reader count is perceieved BEFORE this reader
+ * Ensure the pending reader count is perceived BEFORE this reader
* goes to sleep in case of active writers. This guarantees new writers
* won't be allowed and that the current reader will be woken up when
* the last active writer finishes its jobs.
*/
smp_mb__after_atomic();
- wait_event(lock->pending_readers,
- percpu_counter_sum(&lock->writers) == 0);
+ wait_event(lock->pending_readers, atomic_read(&lock->writers) == 0);
}
void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index bbc45534ae9a..a4673e7d95d7 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -8,9 +8,14 @@
#include <linux/atomic.h>
#include <linux/wait.h>
+#include <linux/lockdep.h>
#include <linux/percpu_counter.h>
#include "extent_io.h"
+struct extent_buffer;
+struct btrfs_path;
+struct btrfs_root;
+
#define BTRFS_WRITE_LOCK 1
#define BTRFS_READ_LOCK 2
@@ -69,7 +74,7 @@ enum btrfs_lock_nesting {
BTRFS_NESTING_NEW_ROOT,
/*
- * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
+ * We are limited to MAX_LOCKDEP_SUBCLASSES number of subclasses, so
* add this in here and add a static_assert to keep us from going over
* the limit. As of this writing we're limited to 8, and we're
* definitely using 8, hence this check to keep us from messing up in
@@ -78,30 +83,129 @@ enum btrfs_lock_nesting {
BTRFS_NESTING_MAX,
};
+enum btrfs_lockdep_trans_states {
+ BTRFS_LOCKDEP_TRANS_COMMIT_PREP,
+ BTRFS_LOCKDEP_TRANS_UNBLOCKED,
+ BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED,
+ BTRFS_LOCKDEP_TRANS_COMPLETED,
+};
+
+/*
+ * Lockdep annotation for wait events.
+ *
+ * @owner: The struct where the lockdep map is defined
+ * @lock: The lockdep map corresponding to a wait event
+ *
+ * This macro is used to annotate a wait event. In this case a thread acquires
+ * the lockdep map as writer (exclusive lock) because it has to block until all
+ * the threads that hold the lock as readers signal the condition for the wait
+ * event and release their locks.
+ */
+#define btrfs_might_wait_for_event(owner, lock) \
+ do { \
+ rwsem_acquire(&owner->lock##_map, 0, 0, _THIS_IP_); \
+ rwsem_release(&owner->lock##_map, _THIS_IP_); \
+ } while (0)
+
+/*
+ * Protection for the resource/condition of a wait event.
+ *
+ * @owner: The struct where the lockdep map is defined
+ * @lock: The lockdep map corresponding to a wait event
+ *
+ * Many threads can modify the condition for the wait event at the same time
+ * and signal the threads that block on the wait event. The threads that modify
+ * the condition and do the signaling acquire the lock as readers (shared
+ * lock).
+ */
+#define btrfs_lockdep_acquire(owner, lock) \
+ rwsem_acquire_read(&owner->lock##_map, 0, 0, _THIS_IP_)
+
+/*
+ * Used after signaling the condition for a wait event to release the lockdep
+ * map held by a reader thread.
+ */
+#define btrfs_lockdep_release(owner, lock) \
+ rwsem_release(&owner->lock##_map, _THIS_IP_)
+
+/*
+ * Used to account for the fact that when doing io_uring encoded I/O, we can
+ * return to userspace with the inode lock still held.
+ */
+#define btrfs_lockdep_inode_acquire(owner, lock) \
+ rwsem_acquire_read(&owner->vfs_inode.lock.dep_map, 0, 0, _THIS_IP_)
+
+#define btrfs_lockdep_inode_release(owner, lock) \
+ rwsem_release(&owner->vfs_inode.lock.dep_map, _THIS_IP_)
+
+/*
+ * Macros for the transaction states wait events, similar to the generic wait
+ * event macros.
+ */
+#define btrfs_might_wait_for_state(owner, i) \
+ do { \
+ rwsem_acquire(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_); \
+ rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_); \
+ } while (0)
+
+#define btrfs_trans_state_lockdep_acquire(owner, i) \
+ rwsem_acquire_read(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_)
+
+#define btrfs_trans_state_lockdep_release(owner, i) \
+ rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_)
+
+/* Initialization of the lockdep map */
+#define btrfs_lockdep_init_map(owner, lock) \
+ do { \
+ static struct lock_class_key lock##_key; \
+ lockdep_init_map(&owner->lock##_map, #lock, &lock##_key, 0); \
+ } while (0)
+
+/* Initialization of the transaction states lockdep maps. */
+#define btrfs_state_lockdep_init_map(owner, lock, state) \
+ do { \
+ static struct lock_class_key lock##_key; \
+ lockdep_init_map(&owner->btrfs_state_change_map[state], #lock, \
+ &lock##_key, 0); \
+ } while (0)
+
static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES,
"too many lock subclasses defined");
-struct btrfs_path;
+void btrfs_tree_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
+
+static inline void btrfs_tree_lock(struct extent_buffer *eb)
+{
+ btrfs_tree_lock_nested(eb, BTRFS_NESTING_NORMAL);
+}
-void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
-void btrfs_tree_lock(struct extent_buffer *eb);
void btrfs_tree_unlock(struct extent_buffer *eb);
-void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
-void btrfs_tree_read_lock(struct extent_buffer *eb);
+void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
+
+static inline void btrfs_tree_read_lock(struct extent_buffer *eb)
+{
+ btrfs_tree_read_lock_nested(eb, BTRFS_NESTING_NORMAL);
+}
+
void btrfs_tree_read_unlock(struct extent_buffer *eb);
-int btrfs_try_tree_read_lock(struct extent_buffer *eb);
-int btrfs_try_tree_write_lock(struct extent_buffer *eb);
+bool btrfs_try_tree_read_lock(struct extent_buffer *eb);
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
+struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root);
#ifdef CONFIG_BTRFS_DEBUG
static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb)
{
lockdep_assert_held_write(&eb->lock);
}
+static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
+{
+ lockdep_assert_held_read(&eb->lock);
+}
#else
static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { }
+static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
#endif
void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
@@ -118,17 +222,30 @@ static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
struct btrfs_drew_lock {
atomic_t readers;
- struct percpu_counter writers;
+ atomic_t writers;
wait_queue_head_t pending_writers;
wait_queue_head_t pending_readers;
};
-int btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
-void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock);
+void btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
void btrfs_drew_write_lock(struct btrfs_drew_lock *lock);
bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock);
void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
void btrfs_drew_read_lock(struct btrfs_drew_lock *lock);
void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level);
+void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb);
+#else
+static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
+ struct extent_buffer *eb, int level)
+{
+}
+static inline void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root,
+ struct extent_buffer *eb)
+{
+}
+#endif
+
#endif
diff --git a/fs/btrfs/lru_cache.c b/fs/btrfs/lru_cache.c
new file mode 100644
index 000000000000..fd88af17d8d9
--- /dev/null
+++ b/fs/btrfs/lru_cache.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mm.h>
+#include "lru_cache.h"
+#include "messages.h"
+
+/*
+ * Initialize a cache object.
+ *
+ * @cache: The cache.
+ * @max_size: Maximum size (number of entries) for the cache.
+ * Use 0 for unlimited size, it's the user's responsibility to
+ * trim the cache in that case.
+ */
+void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size)
+{
+ INIT_LIST_HEAD(&cache->lru_list);
+ mt_init(&cache->entries);
+ cache->size = 0;
+ cache->max_size = max_size;
+}
+
+static struct btrfs_lru_cache_entry *match_entry(struct list_head *head, u64 key,
+ u64 gen)
+{
+ struct btrfs_lru_cache_entry *entry;
+
+ list_for_each_entry(entry, head, list) {
+ if (entry->key == key && entry->gen == gen)
+ return entry;
+ }
+
+ return NULL;
+}
+
+/*
+ * Lookup for an entry in the cache.
+ *
+ * @cache: The cache.
+ * @key: The key of the entry we are looking for.
+ * @gen: Generation associated to the key.
+ *
+ * Returns the entry associated with the key or NULL if none found.
+ */
+struct btrfs_lru_cache_entry *btrfs_lru_cache_lookup(struct btrfs_lru_cache *cache,
+ u64 key, u64 gen)
+{
+ struct list_head *head;
+ struct btrfs_lru_cache_entry *entry;
+
+ head = mtree_load(&cache->entries, key);
+ if (!head)
+ return NULL;
+
+ entry = match_entry(head, key, gen);
+ if (entry)
+ list_move_tail(&entry->lru_list, &cache->lru_list);
+
+ return entry;
+}
+
+/*
+ * Remove an entry from the cache.
+ *
+ * @cache: The cache to remove from.
+ * @entry: The entry to remove from the cache.
+ *
+ * Note: this also frees the memory used by the entry.
+ */
+void btrfs_lru_cache_remove(struct btrfs_lru_cache *cache,
+ struct btrfs_lru_cache_entry *entry)
+{
+ struct list_head *prev = entry->list.prev;
+
+ ASSERT(cache->size > 0);
+ ASSERT(!mtree_empty(&cache->entries));
+
+ list_del(&entry->list);
+ list_del(&entry->lru_list);
+
+ if (list_empty(prev)) {
+ struct list_head *head;
+
+ /*
+ * If previous element in the list entry->list is now empty, it
+ * means it's a head entry not pointing to any cached entries,
+ * so remove it from the maple tree and free it.
+ */
+ head = mtree_erase(&cache->entries, entry->key);
+ ASSERT(head == prev);
+ kfree(head);
+ }
+
+ kfree(entry);
+ cache->size--;
+}
+
+/*
+ * Store an entry in the cache.
+ *
+ * @cache: The cache.
+ * @entry: The entry to store.
+ *
+ * Returns 0 on success and < 0 on error.
+ */
+int btrfs_lru_cache_store(struct btrfs_lru_cache *cache,
+ struct btrfs_lru_cache_entry *new_entry,
+ gfp_t gfp)
+{
+ const u64 key = new_entry->key;
+ struct list_head *head;
+ int ret;
+
+ head = kmalloc(sizeof(*head), gfp);
+ if (!head)
+ return -ENOMEM;
+
+ ret = mtree_insert(&cache->entries, key, head, gfp);
+ if (ret == 0) {
+ INIT_LIST_HEAD(head);
+ list_add_tail(&new_entry->list, head);
+ } else if (ret == -EEXIST) {
+ kfree(head);
+ head = mtree_load(&cache->entries, key);
+ ASSERT(head != NULL);
+ if (match_entry(head, key, new_entry->gen) != NULL)
+ return -EEXIST;
+ list_add_tail(&new_entry->list, head);
+ } else if (ret < 0) {
+ kfree(head);
+ return ret;
+ }
+
+ if (cache->max_size > 0 && cache->size == cache->max_size) {
+ struct btrfs_lru_cache_entry *lru_entry;
+
+ lru_entry = list_first_entry(&cache->lru_list,
+ struct btrfs_lru_cache_entry,
+ lru_list);
+ btrfs_lru_cache_remove(cache, lru_entry);
+ }
+
+ list_add_tail(&new_entry->lru_list, &cache->lru_list);
+ cache->size++;
+
+ return 0;
+}
+
+/*
+ * Empty a cache.
+ *
+ * @cache: The cache to empty.
+ *
+ * Removes all entries from the cache.
+ */
+void btrfs_lru_cache_clear(struct btrfs_lru_cache *cache)
+{
+ struct btrfs_lru_cache_entry *entry;
+ struct btrfs_lru_cache_entry *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &cache->lru_list, lru_list)
+ btrfs_lru_cache_remove(cache, entry);
+
+ ASSERT(cache->size == 0);
+ ASSERT(mtree_empty(&cache->entries));
+}
diff --git a/fs/btrfs/lru_cache.h b/fs/btrfs/lru_cache.h
new file mode 100644
index 000000000000..07f1bb1c6aa3
--- /dev/null
+++ b/fs/btrfs/lru_cache.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_LRU_CACHE_H
+#define BTRFS_LRU_CACHE_H
+
+#include <linux/types.h>
+#include <linux/maple_tree.h>
+#include <linux/list.h>
+
+/*
+ * A cache entry. This is meant to be embedded in a structure of a user of
+ * this module. Similar to how struct list_head and struct rb_node are used.
+ *
+ * Note: it should be embedded as the first element in a struct (offset 0), and
+ * this module assumes it was allocated with kmalloc(), so it calls kfree() when
+ * it needs to free an entry.
+ */
+struct btrfs_lru_cache_entry {
+ struct list_head lru_list;
+ u64 key;
+ /*
+ * Optional generation associated to a key. Use 0 if not needed/used.
+ * Entries with the same key and different generations are stored in a
+ * linked list, so use this only for cases where there's a small number
+ * of different generations.
+ */
+ u64 gen;
+ /*
+ * The maple tree uses unsigned long type for the keys, which is 32 bits
+ * on 32 bits systems, and 64 bits on 64 bits systems. So if we want to
+ * use something like inode numbers as keys, which are always a u64, we
+ * have to deal with this in a special way - we store the key in the
+ * entry itself, as a u64, and the values inserted into the maple tree
+ * are linked lists of entries - so in case we are on a 64 bits system,
+ * that list always has a single entry, while on 32 bits systems it
+ * may have more than one, with each entry having the same value for
+ * their lower 32 bits of the u64 key.
+ */
+ struct list_head list;
+};
+
+struct btrfs_lru_cache {
+ struct list_head lru_list;
+ struct maple_tree entries;
+ /* Number of entries stored in the cache. */
+ unsigned int size;
+ /* Maximum number of entries the cache can have. */
+ unsigned int max_size;
+};
+
+#define btrfs_lru_cache_for_each_entry_safe(cache, entry, tmp) \
+ list_for_each_entry_safe_reverse((entry), (tmp), &(cache)->lru_list, lru_list)
+
+static inline struct btrfs_lru_cache_entry *btrfs_lru_cache_lru_entry(
+ struct btrfs_lru_cache *cache)
+{
+ return list_first_entry_or_null(&cache->lru_list,
+ struct btrfs_lru_cache_entry, lru_list);
+}
+
+void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size);
+struct btrfs_lru_cache_entry *btrfs_lru_cache_lookup(struct btrfs_lru_cache *cache,
+ u64 key, u64 gen);
+int btrfs_lru_cache_store(struct btrfs_lru_cache *cache,
+ struct btrfs_lru_cache_entry *new_entry,
+ gfp_t gfp);
+void btrfs_lru_cache_remove(struct btrfs_lru_cache *cache,
+ struct btrfs_lru_cache_entry *entry);
+void btrfs_lru_cache_clear(struct btrfs_lru_cache *cache);
+
+#endif
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 65cb0766e62d..4758f66da449 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -13,8 +13,11 @@
#include <linux/bio.h>
#include <linux/lzo.h>
#include <linux/refcount.h>
+#include "messages.h"
#include "compression.h"
#include "ctree.h"
+#include "super.h"
+#include "btrfs_inode.h"
#define LZO_LEN 4
@@ -62,7 +65,14 @@ struct workspace {
struct list_head list;
};
-static struct workspace_manager wsm;
+static u32 workspace_buf_length(const struct btrfs_fs_info *fs_info)
+{
+ return lzo1x_worst_compress(fs_info->sectorsize);
+}
+static u32 workspace_cbuf_length(const struct btrfs_fs_info *fs_info)
+{
+ return lzo1x_worst_compress(fs_info->sectorsize);
+}
void lzo_free_workspace(struct list_head *ws)
{
@@ -74,7 +84,7 @@ void lzo_free_workspace(struct list_head *ws)
kfree(workspace);
}
-struct list_head *lzo_alloc_workspace(unsigned int level)
+struct list_head *lzo_alloc_workspace(struct btrfs_fs_info *fs_info)
{
struct workspace *workspace;
@@ -82,9 +92,9 @@ struct list_head *lzo_alloc_workspace(unsigned int level)
if (!workspace)
return ERR_PTR(-ENOMEM);
- workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
- workspace->buf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
- workspace->cbuf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
+ workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN);
+ workspace->buf = kvmalloc(workspace_buf_length(fs_info), GFP_KERNEL | __GFP_NOWARN);
+ workspace->cbuf = kvmalloc(workspace_cbuf_length(fs_info), GFP_KERNEL | __GFP_NOWARN);
if (!workspace->mem || !workspace->buf || !workspace->cbuf)
goto fail;
@@ -122,35 +132,40 @@ static inline size_t read_compress_length(const char *buf)
*
* Will allocate new pages when needed.
*/
-static int copy_compressed_data_to_page(char *compressed_data,
+static int copy_compressed_data_to_page(struct btrfs_fs_info *fs_info,
+ char *compressed_data,
size_t compressed_size,
- struct page **out_pages,
- u32 *cur_out,
- const u32 sectorsize)
+ struct folio **out_folios,
+ unsigned long max_nr_folio,
+ u32 *cur_out)
{
+ const u32 sectorsize = fs_info->sectorsize;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
u32 sector_bytes_left;
u32 orig_out;
- struct page *cur_page;
+ struct folio *cur_folio;
char *kaddr;
+ if ((*cur_out >> min_folio_shift) >= max_nr_folio)
+ return -E2BIG;
+
/*
* We never allow a segment header crossing sector boundary, previous
* run should ensure we have enough space left inside the sector.
*/
ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
- cur_page = out_pages[*cur_out / PAGE_SIZE];
+ cur_folio = out_folios[*cur_out >> min_folio_shift];
/* Allocate a new page */
- if (!cur_page) {
- cur_page = alloc_page(GFP_NOFS);
- if (!cur_page)
+ if (!cur_folio) {
+ cur_folio = btrfs_alloc_compr_folio(fs_info);
+ if (!cur_folio)
return -ENOMEM;
- out_pages[*cur_out / PAGE_SIZE] = cur_page;
+ out_folios[*cur_out >> min_folio_shift] = cur_folio;
}
- kaddr = kmap(cur_page);
- write_compress_length(kaddr + offset_in_page(*cur_out),
- compressed_size);
+ kaddr = kmap_local_folio(cur_folio, offset_in_folio(cur_folio, *cur_out));
+ write_compress_length(kaddr, compressed_size);
*cur_out += LZO_LEN;
orig_out = *cur_out;
@@ -160,18 +175,22 @@ static int copy_compressed_data_to_page(char *compressed_data,
u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
orig_out + compressed_size - *cur_out);
- kunmap(cur_page);
- cur_page = out_pages[*cur_out / PAGE_SIZE];
+ kunmap_local(kaddr);
+
+ if ((*cur_out >> min_folio_shift) >= max_nr_folio)
+ return -E2BIG;
+
+ cur_folio = out_folios[*cur_out >> min_folio_shift];
/* Allocate a new page */
- if (!cur_page) {
- cur_page = alloc_page(GFP_NOFS);
- if (!cur_page)
+ if (!cur_folio) {
+ cur_folio = btrfs_alloc_compr_folio(fs_info);
+ if (!cur_folio)
return -ENOMEM;
- out_pages[*cur_out / PAGE_SIZE] = cur_page;
+ out_folios[*cur_out >> min_folio_shift] = cur_folio;
}
- kaddr = kmap(cur_page);
+ kaddr = kmap_local_folio(cur_folio, 0);
- memcpy(kaddr + offset_in_page(*cur_out),
+ memcpy(kaddr + offset_in_folio(cur_folio, *cur_out),
compressed_data + *cur_out - orig_out, copy_len);
*cur_out += copy_len;
@@ -191,18 +210,22 @@ static int copy_compressed_data_to_page(char *compressed_data,
*cur_out += sector_bytes_left;
out:
- kunmap(cur_page);
+ kunmap_local(kaddr);
return 0;
}
-int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
- u64 start, struct page **pages, unsigned long *out_pages,
- unsigned long *total_in, unsigned long *total_out)
+int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
+ u64 start, struct folio **folios, unsigned long *out_folios,
+ unsigned long *total_in, unsigned long *total_out)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct workspace *workspace = list_entry(ws, struct workspace, list);
- const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
- struct page *page_in = NULL;
+ const u32 sectorsize = fs_info->sectorsize;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ struct folio *folio_in = NULL;
char *sizes_ptr;
+ const unsigned long max_nr_folio = *out_folios;
int ret = 0;
/* Points to the file offset of input data */
u64 cur_in = start;
@@ -210,7 +233,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
u32 cur_out = 0;
u32 len = *total_out;
- *out_pages = 0;
+ ASSERT(max_nr_folio > 0);
+ *out_folios = 0;
*total_out = 0;
*total_in = 0;
@@ -227,28 +251,29 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
size_t out_len;
/* Get the input page first */
- if (!page_in) {
- page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
- ASSERT(page_in);
+ if (!folio_in) {
+ ret = btrfs_compress_filemap_get_folio(mapping, cur_in, &folio_in);
+ if (ret < 0)
+ goto out;
}
/* Compress at most one sector of data each time */
in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
ASSERT(in_len);
- data_in = kmap(page_in);
- ret = lzo1x_1_compress(data_in +
- offset_in_page(cur_in), in_len,
+ data_in = kmap_local_folio(folio_in, offset_in_folio(folio_in, cur_in));
+ ret = lzo1x_1_compress(data_in, in_len,
workspace->cbuf, &out_len,
workspace->mem);
- kunmap(page_in);
- if (ret < 0) {
- pr_debug("BTRFS: lzo in loop returned %d\n", ret);
+ kunmap_local(data_in);
+ if (unlikely(ret < 0)) {
+ /* lzo1x_1_compress never fails. */
ret = -EIO;
goto out;
}
- ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
- pages, &cur_out, sectorsize);
+ ret = copy_compressed_data_to_page(fs_info, workspace->cbuf, out_len,
+ folios, max_nr_folio,
+ &cur_out);
if (ret < 0)
goto out;
@@ -263,15 +288,15 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
}
- /* Check if we have reached page boundary */
- if (IS_ALIGNED(cur_in, PAGE_SIZE)) {
- put_page(page_in);
- page_in = NULL;
+ /* Check if we have reached folio boundary. */
+ if (IS_ALIGNED(cur_in, min_folio_size)) {
+ folio_put(folio_in);
+ folio_in = NULL;
}
}
/* Store the size of all chunks of compressed data */
- sizes_ptr = kmap_local_page(pages[0]);
+ sizes_ptr = kmap_local_folio(folios[0], 0);
write_compress_length(sizes_ptr, cur_out);
kunmap_local(sizes_ptr);
@@ -279,7 +304,9 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_out = cur_out;
*total_in = cur_in - start;
out:
- *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
+ if (folio_in)
+ folio_put(folio_in);
+ *out_folios = DIV_ROUND_UP(cur_out, min_folio_size);
return ret;
}
@@ -291,22 +318,19 @@ out:
static void copy_compressed_segment(struct compressed_bio *cb,
char *dest, u32 len, u32 *cur_in)
{
+ struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
u32 orig_in = *cur_in;
while (*cur_in < orig_in + len) {
- char *kaddr;
- struct page *cur_page;
- u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
- orig_in + len - *cur_in);
+ struct folio *cur_folio = cb->compressed_folios[*cur_in >> min_folio_shift];
+ u32 copy_len = min_t(u32, orig_in + len - *cur_in,
+ folio_size(cur_folio) - offset_in_folio(cur_folio, *cur_in));
ASSERT(copy_len);
- cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
- kaddr = kmap(cur_page);
- memcpy(dest + *cur_in - orig_in,
- kaddr + offset_in_page(*cur_in),
- copy_len);
- kunmap(cur_page);
+ memcpy_from_folio(dest + *cur_in - orig_in, cur_folio,
+ offset_in_folio(cur_folio, *cur_in), copy_len);
*cur_in += copy_len;
}
@@ -315,8 +339,9 @@ static void copy_compressed_segment(struct compressed_bio *cb,
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
- const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
+ const struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info;
const u32 sectorsize = fs_info->sectorsize;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
char *kaddr;
int ret;
/* Compressed data length, can be unaligned */
@@ -326,9 +351,9 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
/* Bytes decompressed so far */
u32 cur_out = 0;
- kaddr = kmap(cb->compressed_pages[0]);
+ kaddr = kmap_local_folio(cb->compressed_folios[0], 0);
len_in = read_compress_length(kaddr);
- kunmap(cb->compressed_pages[0]);
+ kunmap_local(kaddr);
cur_in += LZO_LEN;
/*
@@ -338,17 +363,20 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
* and all sectors should be used.
* If this happens, it means the compressed extent is corrupted.
*/
- if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
- round_up(len_in, sectorsize) < cb->compressed_len) {
+ if (unlikely(len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
+ round_up(len_in, sectorsize) < cb->compressed_len)) {
+ struct btrfs_inode *inode = cb->bbio.inode;
+
btrfs_err(fs_info,
- "invalid lzo header, lzo len %u compressed len %u",
- len_in, cb->compressed_len);
+"lzo header invalid, root %llu inode %llu offset %llu lzo len %u compressed len %u",
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ cb->start, len_in, cb->compressed_len);
return -EUCLEAN;
}
/* Go through each lzo segment */
while (cur_in < len_in) {
- struct page *cur_page;
+ struct folio *cur_folio;
/* Length of the compressed segment */
u32 seg_len;
u32 sector_bytes_left;
@@ -360,23 +388,41 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
*/
ASSERT(cur_in / sectorsize ==
(cur_in + LZO_LEN - 1) / sectorsize);
- cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
- ASSERT(cur_page);
- kaddr = kmap(cur_page);
- seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
- kunmap(cur_page);
+ cur_folio = cb->compressed_folios[cur_in >> min_folio_shift];
+ ASSERT(cur_folio);
+ kaddr = kmap_local_folio(cur_folio, 0);
+ seg_len = read_compress_length(kaddr + offset_in_folio(cur_folio, cur_in));
+ kunmap_local(kaddr);
cur_in += LZO_LEN;
+ if (unlikely(seg_len > workspace_cbuf_length(fs_info))) {
+ struct btrfs_inode *inode = cb->bbio.inode;
+
+ /*
+ * seg_len shouldn't be larger than we have allocated
+ * for workspace->cbuf
+ */
+ btrfs_err(fs_info,
+ "lzo segment too big, root %llu inode %llu offset %llu len %u",
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ cb->start, seg_len);
+ return -EIO;
+ }
+
/* Copy the compressed segment payload into workspace */
copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
/* Decompress the data */
ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
workspace->buf, &out_len);
- if (ret != LZO_E_OK) {
- btrfs_err(fs_info, "failed to decompress");
- ret = -EIO;
- goto out;
+ if (unlikely(ret != LZO_E_OK)) {
+ struct btrfs_inode *inode = cb->bbio.inode;
+
+ btrfs_err(fs_info,
+ "lzo decompression failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode),
+ cb->start);
+ return -EIO;
}
/* Copy the data into inode pages */
@@ -385,7 +431,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
/* All data read, exit */
if (ret == 0)
- goto out;
+ return 0;
ret = 0;
/* Check if the sector has enough space for a segment header */
@@ -396,76 +442,62 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
/* Skip the padding zeros */
cur_in += sector_bytes_left;
}
-out:
- if (!ret)
- zero_fill_bio(cb->orig_bio);
- return ret;
+
+ return 0;
}
-int lzo_decompress(struct list_head *ws, unsigned char *data_in,
- struct page *dest_page, unsigned long start_byte, size_t srclen,
+int lzo_decompress(struct list_head *ws, const u8 *data_in,
+ struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(dest_folio);
+ const u32 sectorsize = fs_info->sectorsize;
size_t in_len;
size_t out_len;
- size_t max_segment_len = lzo1x_worst_compress(PAGE_SIZE);
+ size_t max_segment_len = workspace_buf_length(fs_info);
int ret = 0;
- char *kaddr;
- unsigned long bytes;
- if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
+ if (unlikely(srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2))
return -EUCLEAN;
in_len = read_compress_length(data_in);
- if (in_len != srclen)
+ if (unlikely(in_len != srclen))
return -EUCLEAN;
data_in += LZO_LEN;
in_len = read_compress_length(data_in);
- if (in_len != srclen - LZO_LEN * 2) {
+ if (unlikely(in_len != srclen - LZO_LEN * 2)) {
ret = -EUCLEAN;
goto out;
}
data_in += LZO_LEN;
- out_len = PAGE_SIZE;
+ out_len = sectorsize;
ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
- if (ret != LZO_E_OK) {
- pr_warn("BTRFS: decompress failed!\n");
+ if (unlikely(ret != LZO_E_OK)) {
+ struct btrfs_inode *inode = folio_to_inode(dest_folio);
+
+ btrfs_err(fs_info,
+ "lzo decompression failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode),
+ folio_pos(dest_folio));
ret = -EIO;
goto out;
}
- if (out_len < start_byte) {
+ ASSERT(out_len <= sectorsize);
+ memcpy_to_folio(dest_folio, dest_pgoff, workspace->buf, out_len);
+ /* Early end, considered as an error. */
+ if (unlikely(out_len < destlen)) {
ret = -EIO;
- goto out;
+ folio_zero_range(dest_folio, dest_pgoff + out_len, destlen - out_len);
}
-
- /*
- * the caller is already checking against PAGE_SIZE, but lets
- * move this check closer to the memcpy/memset
- */
- destlen = min_t(unsigned long, destlen, PAGE_SIZE);
- bytes = min_t(unsigned long, destlen, out_len - start_byte);
-
- kaddr = kmap_local_page(dest_page);
- memcpy(kaddr, workspace->buf + start_byte, bytes);
-
- /*
- * btrfs_getblock is doing a zero on the tail of the page too,
- * but this will cover anything missing from the decompressed
- * data.
- */
- if (bytes < destlen)
- memset(kaddr+bytes, 0, destlen-bytes);
- kunmap_local(kaddr);
out:
return ret;
}
-const struct btrfs_compress_op btrfs_lzo_compress = {
- .workspace_manager = &wsm,
+const struct btrfs_compress_levels btrfs_lzo_compress = {
.max_level = 1,
.default_level = 1,
};
diff --git a/fs/btrfs/messages.c b/fs/btrfs/messages.c
new file mode 100644
index 000000000000..2f853de44473
--- /dev/null
+++ b/fs/btrfs/messages.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "fs.h"
+#include "messages.h"
+#include "discard.h"
+#include "super.h"
+
+#ifdef CONFIG_PRINTK
+
+#define STATE_STRING_PREFACE " state "
+#define STATE_STRING_BUF_LEN (sizeof(STATE_STRING_PREFACE) + BTRFS_FS_STATE_COUNT + 1)
+
+/*
+ * Characters to print to indicate error conditions or uncommon filesystem state.
+ * RO is not an error.
+ */
+static const char fs_state_chars[] = {
+ [BTRFS_FS_STATE_REMOUNTING] = 'M',
+ [BTRFS_FS_STATE_RO] = 0,
+ [BTRFS_FS_STATE_TRANS_ABORTED] = 'A',
+ [BTRFS_FS_STATE_LOG_REPLAY_ABORTED] = 'O',
+ [BTRFS_FS_STATE_DEV_REPLACING] = 'R',
+ [BTRFS_FS_STATE_DUMMY_FS_INFO] = 0,
+ [BTRFS_FS_STATE_NO_DATA_CSUMS] = 'C',
+ [BTRFS_FS_STATE_SKIP_META_CSUMS] = 'S',
+ [BTRFS_FS_STATE_LOG_CLEANUP_ERROR] = 'L',
+ [BTRFS_FS_STATE_EMERGENCY_SHUTDOWN] = 'E',
+};
+
+static void btrfs_state_to_string(const struct btrfs_fs_info *info, char *buf)
+{
+ unsigned int bit;
+ bool states_printed = false;
+ unsigned long fs_state = READ_ONCE(info->fs_state);
+ char *curr = buf;
+
+ memcpy(curr, STATE_STRING_PREFACE, sizeof(STATE_STRING_PREFACE));
+ curr += sizeof(STATE_STRING_PREFACE) - 1;
+
+ if (BTRFS_FS_ERROR(info)) {
+ *curr++ = 'E';
+ states_printed = true;
+ }
+
+ for_each_set_bit(bit, &fs_state, sizeof(fs_state)) {
+ WARN_ON_ONCE(bit >= BTRFS_FS_STATE_COUNT);
+ if ((bit < BTRFS_FS_STATE_COUNT) && fs_state_chars[bit]) {
+ *curr++ = fs_state_chars[bit];
+ states_printed = true;
+ }
+ }
+
+ /* If no states were printed, reset the buffer */
+ if (!states_printed)
+ curr = buf;
+
+ *curr++ = 0;
+}
+#endif
+
+/*
+ * Generally the error codes correspond to their respective errors, but there
+ * are a few special cases.
+ *
+ * EUCLEAN: Any sort of corruption that we encounter. The tree-checker for
+ * instance will return EUCLEAN if any of the blocks are corrupted in
+ * a way that is problematic. We want to reserve EUCLEAN for these
+ * sort of corruptions.
+ *
+ * EROFS: If we check BTRFS_FS_STATE_ERROR and fail out with a return error, we
+ * need to use EROFS for this case. We will have no idea of the
+ * original failure, that will have been reported at the time we tripped
+ * over the error. Each subsequent error that doesn't have any context
+ * of the original error should use EROFS when handling BTRFS_FS_STATE_ERROR.
+ */
+const char * __attribute_const__ btrfs_decode_error(int error)
+{
+ char *errstr = "unknown";
+
+ switch (error) {
+ case -ENOENT: /* -2 */
+ errstr = "No such entry";
+ break;
+ case -EIO: /* -5 */
+ errstr = "IO failure";
+ break;
+ case -ENOMEM: /* -12*/
+ errstr = "Out of memory";
+ break;
+ case -EEXIST: /* -17 */
+ errstr = "Object already exists";
+ break;
+ case -ENOSPC: /* -28 */
+ errstr = "No space left";
+ break;
+ case -EROFS: /* -30 */
+ errstr = "Readonly filesystem";
+ break;
+ case -EOPNOTSUPP: /* -95 */
+ errstr = "Operation not supported";
+ break;
+ case -EUCLEAN: /* -117 */
+ errstr = "Filesystem corrupted";
+ break;
+ case -EDQUOT: /* -122 */
+ errstr = "Quota exceeded";
+ break;
+ }
+
+ return errstr;
+}
+
+/*
+ * Decodes expected errors from the caller and invokes the appropriate error
+ * response.
+ */
+__cold
+void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
+ unsigned int line, int error, const char *fmt, ...)
+{
+ struct super_block *sb = fs_info->sb;
+#ifdef CONFIG_PRINTK
+ char statestr[STATE_STRING_BUF_LEN];
+ const char *errstr;
+#endif
+
+#ifdef CONFIG_PRINTK_INDEX
+ printk_index_subsys_emit(
+ "BTRFS: error (device %s%s) in %s:%d: errno=%d %s", KERN_CRIT, fmt);
+#endif
+
+ /*
+ * Special case: if the error is EROFS, and we're already under
+ * SB_RDONLY, then it is safe here.
+ */
+ if (error == -EROFS && sb_rdonly(sb))
+ return;
+
+#ifdef CONFIG_PRINTK
+ errstr = btrfs_decode_error(error);
+ btrfs_state_to_string(fs_info, statestr);
+ if (fmt) {
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_crit("BTRFS: error (device %s%s) in %s:%d: errno=%d %s (%pV)\n",
+ sb->s_id, statestr, function, line, error, errstr, &vaf);
+ va_end(args);
+ } else {
+ pr_crit("BTRFS: error (device %s%s) in %s:%d: errno=%d %s\n",
+ sb->s_id, statestr, function, line, error, errstr);
+ }
+#endif
+
+ /*
+ * Today we only save the error info to memory. Long term we'll also
+ * send it down to the disk.
+ */
+ WRITE_ONCE(fs_info->fs_error, error);
+
+ /* Don't go through full error handling during mount. */
+ if (!(sb->s_flags & SB_BORN))
+ return;
+
+ if (sb_rdonly(sb))
+ return;
+
+ btrfs_discard_stop(fs_info);
+
+ /* Handle error by forcing the filesystem readonly. */
+ btrfs_set_sb_rdonly(sb);
+ btrfs_info(fs_info, "forced readonly");
+ /*
+ * Note that a running device replace operation is not canceled here
+ * although there is no way to update the progress. It would add the
+ * risk of a deadlock, therefore the canceling is omitted. The only
+ * penalty is that some I/O remains active until the procedure
+ * completes. The next time when the filesystem is mounted writable
+ * again, the device replace operation continues.
+ */
+}
+
+#ifdef CONFIG_PRINTK
+static const char * const logtypes[] = {
+ "emergency",
+ "alert",
+ "critical",
+ "error",
+ "warning",
+ "notice",
+ "info",
+ "debug",
+};
+
+/*
+ * Use one ratelimit state per log level so that a flood of less important
+ * messages doesn't cause more important ones to be dropped.
+ */
+static struct ratelimit_state printk_limits[] = {
+ RATELIMIT_STATE_INIT(printk_limits[0], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[1], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[2], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[3], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[4], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[5], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[6], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
+};
+
+void __cold _btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
+{
+ char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
+ struct va_format vaf;
+ va_list args;
+ int kern_level;
+ const char *type = logtypes[4];
+ struct ratelimit_state *ratelimit = &printk_limits[4];
+
+#ifdef CONFIG_PRINTK_INDEX
+ printk_index_subsys_emit("%sBTRFS %s (device %s): ", NULL, fmt);
+#endif
+
+ va_start(args, fmt);
+
+ while ((kern_level = printk_get_level(fmt)) != 0) {
+ size_t size = printk_skip_level(fmt) - fmt;
+
+ if (kern_level >= '0' && kern_level <= '7') {
+ memcpy(lvl, fmt, size);
+ lvl[size] = '\0';
+ type = logtypes[kern_level - '0'];
+ ratelimit = &printk_limits[kern_level - '0'];
+ }
+ fmt += size;
+ }
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ /* Do not ratelimit if CONFIG_BTRFS_DEBUG is enabled. */
+ if (IS_ENABLED(CONFIG_BTRFS_DEBUG) || __ratelimit(ratelimit)) {
+ if (fs_info) {
+ char statestr[STATE_STRING_BUF_LEN];
+
+ btrfs_state_to_string(fs_info, statestr);
+ _printk("%sBTRFS %s (device %s%s): %pV\n", lvl, type,
+ fs_info->sb->s_id, statestr, &vaf);
+ } else {
+ _printk("%sBTRFS %s: %pV\n", lvl, type, &vaf);
+ }
+ }
+
+ va_end(args);
+}
+#endif
+
+#if BITS_PER_LONG == 32
+void __cold btrfs_warn_32bit_limit(struct btrfs_fs_info *fs_info)
+{
+ if (!test_and_set_bit(BTRFS_FS_32BIT_WARN, &fs_info->flags)) {
+ btrfs_warn(fs_info, "reaching 32bit limit for logical addresses");
+ btrfs_warn(fs_info,
+"due to page cache limit on 32bit systems, btrfs can't access metadata at or beyond %lluT",
+ BTRFS_32BIT_MAX_FILE_SIZE >> 40);
+ btrfs_warn(fs_info,
+ "please consider upgrading to 64bit kernel/hardware");
+ }
+}
+
+void __cold btrfs_err_32bit_limit(struct btrfs_fs_info *fs_info)
+{
+ if (!test_and_set_bit(BTRFS_FS_32BIT_ERROR, &fs_info->flags)) {
+ btrfs_err(fs_info, "reached 32bit limit for logical addresses");
+ btrfs_err(fs_info,
+"due to page cache limit on 32bit systems, metadata beyond %lluT can't be accessed",
+ BTRFS_32BIT_MAX_FILE_SIZE >> 40);
+ btrfs_err(fs_info,
+ "please consider upgrading to 64bit kernel/hardware");
+ }
+}
+#endif
+
+/*
+ * Decode unexpected, fatal errors from the caller, issue an alert, and either
+ * panic or BUGs, depending on mount options.
+ */
+__cold
+void __btrfs_panic(const struct btrfs_fs_info *fs_info, const char *function,
+ unsigned int line, int error, const char *fmt, ...)
+{
+ char *s_id = "<unknown>";
+ const char *errstr;
+ struct va_format vaf = { .fmt = fmt };
+ va_list args;
+
+ if (fs_info)
+ s_id = fs_info->sb->s_id;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ errstr = btrfs_decode_error(error);
+ if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR)))
+ panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
+ s_id, function, line, &vaf, error, errstr);
+
+ btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)",
+ function, line, &vaf, error, errstr);
+ va_end(args);
+ /* Caller calls BUG() */
+}
diff --git a/fs/btrfs/messages.h b/fs/btrfs/messages.h
new file mode 100644
index 000000000000..d8c0bd17dcda
--- /dev/null
+++ b/fs/btrfs/messages.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_MESSAGES_H
+#define BTRFS_MESSAGES_H
+
+#include <linux/types.h>
+#include <linux/printk.h>
+#include <linux/bug.h>
+
+struct btrfs_fs_info;
+
+/*
+ * We want to be able to override this in btrfs-progs.
+ */
+#ifdef __KERNEL__
+
+static inline __printf(2, 3) __cold
+void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PRINTK
+
+#define btrfs_printk(fs_info, fmt, args...) \
+ _btrfs_printk(fs_info, fmt, ##args)
+
+__printf(2, 3)
+__cold
+void _btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...);
+
+#else
+
+#define btrfs_printk(fs_info, fmt, args...) \
+ btrfs_no_printk(fs_info, fmt, ##args)
+#endif
+
+/*
+ * Print a message with filesystem info, enclosed in RCU protection.
+ */
+#define btrfs_crit(fs_info, fmt, args...) \
+ btrfs_printk_in_rcu(fs_info, KERN_CRIT fmt, ##args)
+#define btrfs_err(fs_info, fmt, args...) \
+ btrfs_printk_in_rcu(fs_info, KERN_ERR fmt, ##args)
+#define btrfs_warn(fs_info, fmt, args...) \
+ btrfs_printk_in_rcu(fs_info, KERN_WARNING fmt, ##args)
+#define btrfs_info(fs_info, fmt, args...) \
+ btrfs_printk_in_rcu(fs_info, KERN_INFO fmt, ##args)
+
+/*
+ * Wrappers that use a ratelimited printk
+ */
+#define btrfs_crit_rl(fs_info, fmt, args...) \
+ btrfs_printk_rl_in_rcu(fs_info, KERN_CRIT fmt, ##args)
+#define btrfs_err_rl(fs_info, fmt, args...) \
+ btrfs_printk_rl_in_rcu(fs_info, KERN_ERR fmt, ##args)
+#define btrfs_warn_rl(fs_info, fmt, args...) \
+ btrfs_printk_rl_in_rcu(fs_info, KERN_WARNING fmt, ##args)
+#define btrfs_info_rl(fs_info, fmt, args...) \
+ btrfs_printk_rl_in_rcu(fs_info, KERN_INFO fmt, ##args)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define btrfs_debug(fs_info, fmt, args...) \
+ _dynamic_func_call_no_desc(fmt, btrfs_printk_in_rcu, \
+ fs_info, KERN_DEBUG fmt, ##args)
+#define btrfs_debug_rl(fs_info, fmt, args...) \
+ _dynamic_func_call_no_desc(fmt, btrfs_printk_rl_in_rcu, \
+ fs_info, KERN_DEBUG fmt, ##args)
+#elif defined(DEBUG)
+#define btrfs_debug(fs_info, fmt, args...) \
+ btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
+#define btrfs_debug_rl(fs_info, fmt, args...) \
+ btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
+#else
+/* When printk() is no_printk(), expand to no-op. */
+#define btrfs_debug(fs_info, fmt, args...) do { (void)(fs_info); } while(0)
+#define btrfs_debug_rl(fs_info, fmt, args...) do { (void)(fs_info); } while(0)
+#endif
+
+#define btrfs_printk_in_rcu(fs_info, fmt, args...) \
+do { \
+ rcu_read_lock(); \
+ btrfs_printk(fs_info, fmt, ##args); \
+ rcu_read_unlock(); \
+} while (0)
+
+#define btrfs_printk_rl_in_rcu(fs_info, fmt, args...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ \
+ rcu_read_lock(); \
+ if (__ratelimit(&_rs)) \
+ btrfs_printk(fs_info, fmt, ##args); \
+ rcu_read_unlock(); \
+} while (0)
+
+#ifdef CONFIG_BTRFS_ASSERT
+
+__printf(1, 2)
+static inline void verify_assert_printk_format(const char *fmt, ...) {
+ /* Stub to verify the assertion format string. */
+}
+
+/* Take the first token if any. */
+#define __FIRST_ARG(_, ...) _
+/*
+ * Skip the first token and return the rest, if it's empty the comma is dropped.
+ * As ##__VA_ARGS__ cannot be at the beginning of the macro the __VA_OPT__ is needed
+ * and supported since GCC 8 and Clang 12.
+ */
+#define __REST_ARGS(_, ... ) __VA_OPT__(,) __VA_ARGS__
+
+#if defined(CONFIG_CC_IS_CLANG) || GCC_VERSION >= 80000
+/*
+ * Assertion with optional printk() format.
+ *
+ * Accepted syntax:
+ * ASSERT(condition);
+ * ASSERT(condition, "string");
+ * ASSERT(condition, "variable=%d", variable);
+ *
+ * How it works:
+ * - if there's no format string, ""[0] evaluates at compile time to 0 and the
+ * true branch is executed
+ * - any non-empty format string with the "" prefix evaluates to != 0 at
+ * compile time and the false branch is executed
+ * - stringified condition is printed as %s so we don't accidentally mix format
+ * strings (the % operator)
+ * - there can be only one printk() call, so the format strings and arguments are
+ * spliced together:
+ * DEFAULT_FMT [USER_FMT], DEFAULT_ARGS [, USER_ARGS]
+ * - comma between DEFAULT_ARGS and USER_ARGS is handled by preprocessor
+ * (requires __VA_OPT__ support)
+ * - otherwise we could use __VA_OPT(,) __VA_ARGS__ for the 2nd+ argument of args,
+ */
+#define ASSERT(cond, args...) \
+do { \
+ verify_assert_printk_format("check the format string" args); \
+ if (!likely(cond)) { \
+ if (("" __FIRST_ARG(args) [0]) == 0) { \
+ pr_err("assertion failed: %s :: %ld, in %s:%d\n", \
+ #cond, (long)(cond), __FILE__, __LINE__); \
+ } else { \
+ pr_err("assertion failed: %s :: %ld, in %s:%d (" __FIRST_ARG(args) ")\n", \
+ #cond, (long)(cond), __FILE__, __LINE__ __REST_ARGS(args)); \
+ } \
+ BUG(); \
+ } \
+} while(0)
+
+#else
+
+/* For GCC < 8.x only the simple output. */
+
+#define ASSERT(cond, args...) \
+do { \
+ verify_assert_printk_format("check the format string" args); \
+ if (!likely(cond)) { \
+ pr_err("assertion failed: %s :: %ld, in %s:%d\n", \
+ #cond, (long)(cond), __FILE__, __LINE__); \
+ BUG(); \
+ } \
+} while(0)
+
+#endif
+
+#else
+/* Compile check the @cond expression but don't generate any code. */
+#define ASSERT(cond, args...) BUILD_BUG_ON_INVALID(cond)
+#endif
+
+#ifdef CONFIG_BTRFS_DEBUG
+/* Verbose warning only under debug build. */
+#define DEBUG_WARN(args...) WARN(1, KERN_ERR args)
+#else
+#define DEBUG_WARN(...) do {} while(0)
+#endif
+
+__printf(5, 6)
+__cold
+void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
+ unsigned int line, int error, const char *fmt, ...);
+
+const char * __attribute_const__ btrfs_decode_error(int error);
+
+#define btrfs_handle_fs_error(fs_info, error, fmt, args...) \
+ __btrfs_handle_fs_error((fs_info), __func__, __LINE__, \
+ (error), fmt, ##args)
+
+__printf(5, 6)
+__cold
+void __btrfs_panic(const struct btrfs_fs_info *fs_info, const char *function,
+ unsigned int line, int error, const char *fmt, ...);
+/*
+ * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic
+ * will panic(). Otherwise we BUG() here.
+ */
+#define btrfs_panic(fs_info, error, fmt, args...) \
+do { \
+ __btrfs_panic(fs_info, __func__, __LINE__, error, fmt, ##args); \
+ BUG(); \
+} while (0)
+
+#if BITS_PER_LONG == 32
+#define BTRFS_32BIT_MAX_FILE_SIZE (((u64)ULONG_MAX + 1) << PAGE_SHIFT)
+/*
+ * The warning threshold is 5/8th of the MAX_LFS_FILESIZE that limits the logical
+ * addresses of extents.
+ *
+ * For 4K page size it's about 10T, for 64K it's 160T.
+ */
+#define BTRFS_32BIT_EARLY_WARN_THRESHOLD (BTRFS_32BIT_MAX_FILE_SIZE * 5 / 8)
+void btrfs_warn_32bit_limit(struct btrfs_fs_info *fs_info);
+void btrfs_err_32bit_limit(struct btrfs_fs_info *fs_info);
+#endif
+
+#endif
diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
index 340f995652f2..12c5a9d6564f 100644
--- a/fs/btrfs/misc.h
+++ b/fs/btrfs/misc.h
@@ -3,12 +3,78 @@
#ifndef BTRFS_MISC_H
#define BTRFS_MISC_H
+#include <linux/types.h>
+#include <linux/bitmap.h>
#include <linux/sched.h>
#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
#include <linux/math64.h>
#include <linux/rbtree.h>
+#include <linux/bio.h>
-#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
+/*
+ * Convenience macros to define a pointer with the __free(kfree) and
+ * __free(kvfree) cleanup attributes and initialized to NULL.
+ */
+#define AUTO_KFREE(name) *name __free(kfree) = NULL
+#define AUTO_KVFREE(name) *name __free(kvfree) = NULL
+
+/*
+ * Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
+ */
+#define ENUM_BIT(name) \
+ __ ## name ## _BIT, \
+ name = (1U << __ ## name ## _BIT), \
+ __ ## name ## _SEQ = __ ## name ## _BIT
+
+static inline phys_addr_t bio_iter_phys(struct bio *bio, struct bvec_iter *iter)
+{
+ struct bio_vec bv = bio_iter_iovec(bio, *iter);
+
+ return bvec_phys(&bv);
+}
+
+/*
+ * Iterate bio using btrfs block size.
+ *
+ * This will handle large folio and highmem.
+ *
+ * @paddr: Physical memory address of each iteration
+ * @bio: The bio to iterate
+ * @iter: The bvec_iter (pointer) to use.
+ * @blocksize: The blocksize to iterate.
+ *
+ * This requires all folios in the bio to cover at least one block.
+ */
+#define btrfs_bio_for_each_block(paddr, bio, iter, blocksize) \
+ for (; (iter)->bi_size && \
+ (paddr = bio_iter_phys((bio), (iter)), 1); \
+ bio_advance_iter_single((bio), (iter), (blocksize)))
+
+/* Initialize a bvec_iter to the size of the specified bio. */
+static inline struct bvec_iter init_bvec_iter_for_bio(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ u32 bio_size = 0;
+ int i;
+
+ bio_for_each_bvec_all(bvec, bio, i)
+ bio_size += bvec->bv_len;
+
+ return (struct bvec_iter) {
+ .bi_sector = 0,
+ .bi_size = bio_size,
+ .bi_idx = 0,
+ .bi_bvec_done = 0,
+ };
+}
+
+#define btrfs_bio_for_each_block_all(paddr, bio, blocksize) \
+ for (struct bvec_iter iter = init_bvec_iter_for_bio(bio); \
+ (iter).bi_size && \
+ (paddr = bio_iter_phys((bio), &(iter)), 1); \
+ bio_advance_iter_single((bio), &(iter), (blocksize)))
static inline void cond_wake_up(struct wait_queue_head *wq)
{
@@ -32,22 +98,10 @@ static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
wake_up(wq);
}
-static inline u64 div_factor(u64 num, int factor)
+static inline u64 mult_perc(u64 num, u32 percent)
{
- if (factor == 10)
- return num;
- num *= factor;
- return div_u64(num, 10);
+ return div_u64(num * percent, 100);
}
-
-static inline u64 div_factor_fine(u64 num, int factor)
-{
- if (factor == 100)
- return num;
- num *= factor;
- return div_u64(num, 100);
-}
-
/* Copy of is_power_of_two that is 64bit safe */
static inline bool is_power_of_two_u64(u64 n)
{
@@ -70,7 +124,7 @@ struct rb_simple_node {
u64 bytenr;
};
-static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
+static inline struct rb_node *rb_simple_search(const struct rb_root *root, u64 bytenr)
{
struct rb_node *node = root->rb_node;
struct rb_simple_node *entry;
@@ -88,28 +142,78 @@ static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
return NULL;
}
-static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
- struct rb_node *node)
+/*
+ * Search @root from an entry that starts or comes after @bytenr.
+ *
+ * @root: the root to search.
+ * @bytenr: bytenr to search from.
+ *
+ * Return the rb_node that start at or after @bytenr. If there is no entry at
+ * or after @bytner return NULL.
+ */
+static inline struct rb_node *rb_simple_search_first(const struct rb_root *root,
+ u64 bytenr)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct rb_simple_node *entry;
+ struct rb_node *node = root->rb_node, *ret = NULL;
+ struct rb_simple_node *entry, *ret_entry = NULL;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct rb_simple_node, rb_node);
+ while (node) {
+ entry = rb_entry(node, struct rb_simple_node, rb_node);
- if (bytenr < entry->bytenr)
- p = &(*p)->rb_left;
- else if (bytenr > entry->bytenr)
- p = &(*p)->rb_right;
- else
- return parent;
+ if (bytenr < entry->bytenr) {
+ if (!ret || entry->bytenr < ret_entry->bytenr) {
+ ret = node;
+ ret_entry = entry;
+ }
+
+ node = node->rb_left;
+ } else if (bytenr > entry->bytenr) {
+ node = node->rb_right;
+ } else {
+ return node;
+ }
}
- rb_link_node(node, parent, p);
- rb_insert_color(node, root);
- return NULL;
+ return ret;
+}
+
+static int rb_simple_node_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ struct rb_simple_node *new_entry = rb_entry(new, struct rb_simple_node, rb_node);
+ struct rb_simple_node *existing_entry = rb_entry(existing, struct rb_simple_node, rb_node);
+
+ if (new_entry->bytenr < existing_entry->bytenr)
+ return -1;
+ else if (new_entry->bytenr > existing_entry->bytenr)
+ return 1;
+
+ return 0;
+}
+
+static inline struct rb_node *rb_simple_insert(struct rb_root *root,
+ struct rb_simple_node *simple_node)
+{
+ return rb_find_add(&simple_node->rb_node, root, rb_simple_node_bytenr_cmp);
+}
+
+static inline bool bitmap_test_range_all_set(const unsigned long *addr,
+ unsigned long start,
+ unsigned long nbits)
+{
+ unsigned long found_zero;
+
+ found_zero = find_next_zero_bit(addr, start + nbits, start);
+ return (found_zero == start + nbits);
+}
+
+static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
+ unsigned long start,
+ unsigned long nbits)
+{
+ unsigned long found_set;
+
+ found_set = find_next_bit(addr, start + nbits, start);
+ return (found_set == start + nbits);
}
#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 6b51fd2ec5ac..5df02c707aee 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -7,6 +7,7 @@
#include <linux/blkdev.h>
#include <linux/writeback.h>
#include <linux/sched/mm.h>
+#include "messages.h"
#include "misc.h"
#include "ctree.h"
#include "transaction.h"
@@ -17,6 +18,8 @@
#include "delalloc-space.h"
#include "qgroup.h"
#include "subpage.h"
+#include "file.h"
+#include "block-group.h"
static struct kmem_cache *btrfs_ordered_extent_cache;
@@ -108,8 +111,8 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
return NULL;
}
-static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
- u64 len)
+static int btrfs_range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
+ u64 len)
{
if (file_offset + len <= entry->file_offset ||
entry->file_offset + entry->num_bytes <= file_offset)
@@ -121,107 +124,127 @@ static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
* look find the first ordered struct that has this offset, otherwise
* the first one less than this offset
*/
-static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
- u64 file_offset)
+static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
+ u64 file_offset)
{
- struct rb_root *root = &tree->tree;
struct rb_node *prev = NULL;
struct rb_node *ret;
struct btrfs_ordered_extent *entry;
- if (tree->last) {
- entry = rb_entry(tree->last, struct btrfs_ordered_extent,
+ if (inode->ordered_tree_last) {
+ entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
rb_node);
if (in_range(file_offset, entry->file_offset, entry->num_bytes))
- return tree->last;
+ return inode->ordered_tree_last;
}
- ret = __tree_search(root, file_offset, &prev);
+ ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
if (!ret)
ret = prev;
if (ret)
- tree->last = ret;
+ inode->ordered_tree_last = ret;
return ret;
}
-/*
- * Allocate and add a new ordered_extent into the per-inode tree.
- *
- * The tree is given a single reference on the ordered extent that was
- * inserted.
- */
-static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
- u64 disk_bytenr, u64 num_bytes,
- u64 disk_num_bytes, int type, int dio,
- int compress_type)
+static struct btrfs_ordered_extent *alloc_ordered_extent(
+ struct btrfs_inode *inode, u64 file_offset, u64 num_bytes,
+ u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes,
+ u64 offset, unsigned long flags, int compress_type)
{
- struct btrfs_root *root = inode->root;
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
- struct rb_node *node;
struct btrfs_ordered_extent *entry;
int ret;
+ u64 qgroup_rsv = 0;
+ const bool is_nocow = (flags &
+ ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC)));
+
+ /*
+ * For a NOCOW write we can free the qgroup reserve right now. For a COW
+ * one we transfer the reserved space from the inode's iotree into the
+ * ordered extent by calling btrfs_qgroup_release_data() and tracking
+ * the qgroup reserved amount in the ordered extent, so that later after
+ * completing the ordered extent, when running the data delayed ref it
+ * creates, we free the reserved data with btrfs_qgroup_free_refroot().
+ */
+ if (is_nocow)
+ ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
+ else
+ ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
+
+ if (ret < 0)
+ return ERR_PTR(ret);
- if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
- /* For nocow write, we can release the qgroup rsv right now */
- ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
- if (ret < 0)
- return ret;
- ret = 0;
- } else {
- /*
- * The ordered extent has reserved qgroup space, release now
- * and pass the reserved number for qgroup_record to free.
- */
- ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
- if (ret < 0)
- return ret;
- }
entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
- if (!entry)
- return -ENOMEM;
+ if (!entry) {
+ entry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
entry->file_offset = file_offset;
- entry->disk_bytenr = disk_bytenr;
entry->num_bytes = num_bytes;
+ entry->ram_bytes = ram_bytes;
+ entry->disk_bytenr = disk_bytenr;
entry->disk_num_bytes = disk_num_bytes;
+ entry->offset = offset;
entry->bytes_left = num_bytes;
- entry->inode = igrab(&inode->vfs_inode);
+ if (WARN_ON_ONCE(!igrab(&inode->vfs_inode))) {
+ kmem_cache_free(btrfs_ordered_extent_cache, entry);
+ entry = ERR_PTR(-ESTALE);
+ goto out;
+ }
+ entry->inode = inode;
entry->compress_type = compress_type;
entry->truncated_len = (u64)-1;
- entry->qgroup_rsv = ret;
- entry->physical = (u64)-1;
-
- ASSERT(type == BTRFS_ORDERED_REGULAR ||
- type == BTRFS_ORDERED_NOCOW ||
- type == BTRFS_ORDERED_PREALLOC ||
- type == BTRFS_ORDERED_COMPRESSED);
- set_bit(type, &entry->flags);
-
- percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
- fs_info->delalloc_batch);
-
- if (dio)
- set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
-
- /* one ref for the tree */
+ entry->qgroup_rsv = qgroup_rsv;
+ entry->flags = flags;
refcount_set(&entry->refs, 1);
init_waitqueue_head(&entry->wait);
INIT_LIST_HEAD(&entry->list);
INIT_LIST_HEAD(&entry->log_list);
INIT_LIST_HEAD(&entry->root_extent_list);
INIT_LIST_HEAD(&entry->work_list);
+ INIT_LIST_HEAD(&entry->bioc_list);
init_completion(&entry->completion);
+ /*
+ * We don't need the count_max_extents here, we can assume that all of
+ * that work has been done at higher layers, so this is truly the
+ * smallest the extent is going to get.
+ */
+ spin_lock(&inode->lock);
+ btrfs_mod_outstanding_extents(inode, 1);
+ spin_unlock(&inode->lock);
+
+out:
+ if (IS_ERR(entry) && !is_nocow)
+ btrfs_qgroup_free_refroot(inode->root->fs_info,
+ btrfs_root_id(inode->root),
+ qgroup_rsv, BTRFS_QGROUP_RSV_DATA);
+
+ return entry;
+}
+
+static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
+{
+ struct btrfs_inode *inode = entry->inode;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct rb_node *node;
+
trace_btrfs_ordered_extent_add(inode, entry);
- spin_lock_irq(&tree->lock);
- node = tree_insert(&tree->tree, file_offset,
+ percpu_counter_add_batch(&fs_info->ordered_bytes, entry->num_bytes,
+ fs_info->delalloc_batch);
+
+ /* One ref for the tree. */
+ refcount_inc(&entry->refs);
+
+ spin_lock(&inode->ordered_tree_lock);
+ node = tree_insert(&inode->ordered_tree, entry->file_offset,
&entry->rb_node);
- if (node)
+ if (unlikely(node))
btrfs_panic(fs_info, -EEXIST,
"inconsistency in ordered tree at offset %llu",
- file_offset);
- spin_unlock_irq(&tree->lock);
+ entry->file_offset);
+ spin_unlock(&inode->ordered_tree_lock);
spin_lock(&root->ordered_extent_lock);
list_add_tail(&entry->root_extent_list,
@@ -234,52 +257,65 @@ static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset
spin_unlock(&fs_info->ordered_root_lock);
}
spin_unlock(&root->ordered_extent_lock);
-
- /*
- * We don't need the count_max_extents here, we can assume that all of
- * that work has been done at higher layers, so this is truly the
- * smallest the extent is going to get.
- */
- spin_lock(&inode->lock);
- btrfs_mod_outstanding_extents(inode, 1);
- spin_unlock(&inode->lock);
-
- return 0;
}
-int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
- u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
- int type)
+/*
+ * Add an ordered extent to the per-inode tree.
+ *
+ * @inode: Inode that this extent is for.
+ * @file_offset: Logical offset in file where the extent starts.
+ * @num_bytes: Logical length of extent in file.
+ * @ram_bytes: Full length of unencoded data.
+ * @disk_bytenr: Offset of extent on disk.
+ * @disk_num_bytes: Size of extent on disk.
+ * @offset: Offset into unencoded data where file data starts.
+ * @flags: Flags specifying type of extent (1U << BTRFS_ORDERED_*).
+ * @compress_type: Compression algorithm used for data.
+ *
+ * Most of these parameters correspond to &struct btrfs_file_extent_item. The
+ * tree is given a single reference on the ordered extent that was inserted, and
+ * the returned pointer is given a second reference.
+ *
+ * Return: the new ordered extent or error pointer.
+ */
+struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
+ struct btrfs_inode *inode, u64 file_offset,
+ const struct btrfs_file_extent *file_extent, unsigned long flags)
{
- ASSERT(type == BTRFS_ORDERED_REGULAR ||
- type == BTRFS_ORDERED_NOCOW ||
- type == BTRFS_ORDERED_PREALLOC);
- return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
- num_bytes, disk_num_bytes, type, 0,
- BTRFS_COMPRESS_NONE);
-}
+ struct btrfs_ordered_extent *entry;
-int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
- u64 disk_bytenr, u64 num_bytes,
- u64 disk_num_bytes, int type)
-{
- ASSERT(type == BTRFS_ORDERED_REGULAR ||
- type == BTRFS_ORDERED_NOCOW ||
- type == BTRFS_ORDERED_PREALLOC);
- return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
- num_bytes, disk_num_bytes, type, 1,
- BTRFS_COMPRESS_NONE);
-}
+ ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
-int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
- u64 disk_bytenr, u64 num_bytes,
- u64 disk_num_bytes, int compress_type)
-{
- ASSERT(compress_type != BTRFS_COMPRESS_NONE);
- return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
- num_bytes, disk_num_bytes,
- BTRFS_ORDERED_COMPRESSED, 0,
- compress_type);
+ /*
+ * For regular writes, we just use the members in @file_extent.
+ *
+ * For NOCOW, we don't really care about the numbers except @start and
+ * file_extent->num_bytes, as we won't insert a file extent item at all.
+ *
+ * For PREALLOC, we do not use ordered extent members, but
+ * btrfs_mark_extent_written() handles everything.
+ *
+ * So here we always pass 0 as offset for NOCOW/PREALLOC ordered extents,
+ * or btrfs_split_ordered_extent() cannot handle it correctly.
+ */
+ if (flags & ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC)))
+ entry = alloc_ordered_extent(inode, file_offset,
+ file_extent->num_bytes,
+ file_extent->num_bytes,
+ file_extent->disk_bytenr + file_extent->offset,
+ file_extent->num_bytes, 0, flags,
+ file_extent->compression);
+ else
+ entry = alloc_ordered_extent(inode, file_offset,
+ file_extent->num_bytes,
+ file_extent->ram_bytes,
+ file_extent->disk_bytenr,
+ file_extent->disk_num_bytes,
+ file_extent->offset, flags,
+ file_extent->compression);
+ if (!IS_ERR(entry))
+ insert_ordered_extent(entry);
+ return entry;
}
/*
@@ -290,58 +326,172 @@ int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset
void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
struct btrfs_ordered_sum *sum)
{
- struct btrfs_ordered_inode_tree *tree;
+ struct btrfs_inode *inode = entry->inode;
- tree = &BTRFS_I(entry->inode)->ordered_tree;
- spin_lock_irq(&tree->lock);
+ spin_lock(&inode->ordered_tree_lock);
list_add_tail(&sum->list, &entry->list);
- spin_unlock_irq(&tree->lock);
+ spin_unlock(&inode->ordered_tree_lock);
+}
+
+void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered)
+{
+ if (!test_and_set_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
+ mapping_set_error(ordered->inode->vfs_inode.i_mapping, -EIO);
+}
+
+static void finish_ordered_fn(struct btrfs_work *work)
+{
+ struct btrfs_ordered_extent *ordered_extent;
+
+ ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
+ btrfs_finish_ordered_io(ordered_extent);
+}
+
+static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
+ struct folio *folio, u64 file_offset,
+ u64 len, bool uptodate)
+{
+ struct btrfs_inode *inode = ordered->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+
+ lockdep_assert_held(&inode->ordered_tree_lock);
+
+ if (folio) {
+ ASSERT(folio->mapping);
+ ASSERT(folio_pos(folio) <= file_offset);
+ ASSERT(file_offset + len <= folio_next_pos(folio));
+
+ /*
+ * Ordered flag indicates whether we still have
+ * pending io unfinished for the ordered extent.
+ *
+ * If it's not set, we need to skip to next range.
+ */
+ if (!btrfs_folio_test_ordered(fs_info, folio, file_offset, len))
+ return false;
+ btrfs_folio_clear_ordered(fs_info, folio, file_offset, len);
+ }
+
+ /* Now we're fine to update the accounting. */
+ if (WARN_ON_ONCE(len > ordered->bytes_left)) {
+ btrfs_crit(fs_info,
+"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ ordered->file_offset, ordered->num_bytes,
+ len, ordered->bytes_left);
+ ordered->bytes_left = 0;
+ } else {
+ ordered->bytes_left -= len;
+ }
+
+ if (!uptodate)
+ set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
+
+ if (ordered->bytes_left)
+ return false;
+
+ /*
+ * All the IO of the ordered extent is finished, we need to queue
+ * the finish_func to be executed.
+ */
+ set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags);
+ cond_wake_up(&ordered->wait);
+ refcount_inc(&ordered->refs);
+ trace_btrfs_ordered_extent_mark_finished(inode, ordered);
+ return true;
+}
+
+static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
+{
+ struct btrfs_inode *inode = ordered->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
+ fs_info->endio_freespace_worker : fs_info->endio_write_workers;
+
+ btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
+ btrfs_queue_work(wq, &ordered->work);
+}
+
+void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
+ struct folio *folio, u64 file_offset, u64 len,
+ bool uptodate)
+{
+ struct btrfs_inode *inode = ordered->inode;
+ bool ret;
+
+ trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
+
+ spin_lock(&inode->ordered_tree_lock);
+ ret = can_finish_ordered_extent(ordered, folio, file_offset, len,
+ uptodate);
+ spin_unlock(&inode->ordered_tree_lock);
+
+ /*
+ * If this is a COW write it means we created new extent maps for the
+ * range and they point to unwritten locations if we got an error either
+ * before submitting a bio or during IO.
+ *
+ * We have marked the ordered extent with BTRFS_ORDERED_IOERR, and we
+ * are queuing its completion below. During completion, at
+ * btrfs_finish_one_ordered(), we will drop the extent maps for the
+ * unwritten extents.
+ *
+ * However because completion runs in a work queue we can end up having
+ * a fast fsync running before that. In the case of direct IO, once we
+ * unlock the inode the fsync might start, and we queue the completion
+ * before unlocking the inode. In the case of buffered IO when writeback
+ * finishes (end_bbio_data_write()) we queue the completion, so if the
+ * writeback was triggered by a fast fsync, the fsync might start
+ * logging before ordered extent completion runs in the work queue.
+ *
+ * The fast fsync will log file extent items based on the extent maps it
+ * finds, so if by the time it collects extent maps the ordered extent
+ * completion didn't happen yet, it will log file extent items that
+ * point to unwritten extents, resulting in a corruption if a crash
+ * happens and the log tree is replayed. Note that a fast fsync does not
+ * wait for completion of ordered extents in order to reduce latency.
+ *
+ * Set a flag in the inode so that the next fast fsync will wait for
+ * ordered extents to complete before starting to log.
+ */
+ if (!uptodate && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
+ set_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
+
+ if (ret)
+ btrfs_queue_ordered_fn(ordered);
}
/*
* Mark all ordered extents io inside the specified range finished.
*
- * @page: The invovled page for the opeartion.
- * For uncompressed buffered IO, the page status also needs to be
+ * @folio: The involved folio for the operation.
+ * For uncompressed buffered IO, the folio status also needs to be
* updated to indicate whether the pending ordered io is finished.
* Can be NULL for direct IO and compressed write.
* For these cases, callers are ensured they won't execute the
* endio function twice.
- * @finish_func: The function to be executed when all the IO of an ordered
- * extent are finished.
*
* This function is called for endio, thus the range must have ordered
- * extent(s) coveri it.
+ * extent(s) covering it.
*/
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
- struct page *page, u64 file_offset,
- u64 num_bytes, btrfs_func_t finish_func,
- bool uptodate)
+ struct folio *folio, u64 file_offset,
+ u64 num_bytes, bool uptodate)
{
- struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_workqueue *wq;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- unsigned long flags;
u64 cur = file_offset;
+ const u64 end = file_offset + num_bytes;
- if (btrfs_is_free_space_inode(inode))
- wq = fs_info->endio_freespace_worker;
- else
- wq = fs_info->endio_write_workers;
+ trace_btrfs_writepage_end_io_hook(inode, file_offset, end - 1, uptodate);
- if (page)
- ASSERT(page->mapping && page_offset(page) <= file_offset &&
- file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
-
- spin_lock_irqsave(&tree->lock, flags);
- while (cur < file_offset + num_bytes) {
+ spin_lock(&inode->ordered_tree_lock);
+ while (cur < end) {
u64 entry_end;
- u64 end;
- u32 len;
+ u64 this_end;
+ u64 len;
- node = tree_search(tree, cur);
+ node = ordered_tree_search(inode, cur);
/* No ordered extents at all */
if (!node)
break;
@@ -382,59 +532,18 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
* |
* cur
*/
- end = min(entry->file_offset + entry->num_bytes,
- file_offset + num_bytes) - 1;
- ASSERT(end + 1 - cur < U32_MAX);
- len = end + 1 - cur;
-
- if (page) {
- /*
- * Ordered (Private2) bit indicates whether we still
- * have pending io unfinished for the ordered extent.
- *
- * If there's no such bit, we need to skip to next range.
- */
- if (!btrfs_page_test_ordered(fs_info, page, cur, len)) {
- cur += len;
- continue;
- }
- btrfs_page_clear_ordered(fs_info, page, cur, len);
- }
-
- /* Now we're fine to update the accounting */
- if (unlikely(len > entry->bytes_left)) {
- WARN_ON(1);
- btrfs_crit(fs_info,
-"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
- inode->root->root_key.objectid,
- btrfs_ino(inode),
- entry->file_offset,
- entry->num_bytes,
- len, entry->bytes_left);
- entry->bytes_left = 0;
- } else {
- entry->bytes_left -= len;
- }
-
- if (!uptodate)
- set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
-
- /*
- * All the IO of the ordered extent is finished, we need to queue
- * the finish_func to be executed.
- */
- if (entry->bytes_left == 0) {
- set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
- cond_wake_up(&entry->wait);
- refcount_inc(&entry->refs);
- spin_unlock_irqrestore(&tree->lock, flags);
- btrfs_init_work(&entry->work, finish_func, NULL, NULL);
- btrfs_queue_work(wq, &entry->work);
- spin_lock_irqsave(&tree->lock, flags);
+ this_end = min(entry_end, end);
+ len = this_end - cur;
+ ASSERT(len < U32_MAX);
+
+ if (can_finish_ordered_extent(entry, folio, cur, len, uptodate)) {
+ spin_unlock(&inode->ordered_tree_lock);
+ btrfs_queue_ordered_fn(entry);
+ spin_lock(&inode->ordered_tree_lock);
}
cur += len;
}
- spin_unlock_irqrestore(&tree->lock, flags);
+ spin_unlock(&inode->ordered_tree_lock);
}
/*
@@ -458,19 +567,17 @@ bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size)
{
- struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- unsigned long flags;
bool finished = false;
- spin_lock_irqsave(&tree->lock, flags);
+ spin_lock(&inode->ordered_tree_lock);
if (cached && *cached) {
entry = *cached;
goto have_entry;
}
- node = tree_search(tree, file_offset);
+ node = ordered_tree_search(inode, file_offset);
if (!node)
goto out;
@@ -499,8 +606,9 @@ out:
if (finished && cached && entry) {
*cached = entry;
refcount_inc(&entry->refs);
+ trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
}
- spin_unlock_irqrestore(&tree->lock, flags);
+ spin_unlock(&inode->ordered_tree_lock);
return finished;
}
@@ -510,23 +618,18 @@ out:
*/
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
{
- struct list_head *cur;
- struct btrfs_ordered_sum *sum;
-
- trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
+ trace_btrfs_ordered_extent_put(entry->inode, entry);
if (refcount_dec_and_test(&entry->refs)) {
+ struct btrfs_ordered_sum *sum;
+ struct btrfs_ordered_sum *tmp;
+
ASSERT(list_empty(&entry->root_extent_list));
ASSERT(list_empty(&entry->log_list));
ASSERT(RB_EMPTY_NODE(&entry->rb_node));
- if (entry->inode)
- btrfs_add_delayed_iput(entry->inode);
- while (!list_empty(&entry->list)) {
- cur = entry->list.next;
- sum = list_entry(cur, struct btrfs_ordered_sum, list);
- list_del(&sum->list);
+ btrfs_add_delayed_iput(entry->inode);
+ list_for_each_entry_safe(sum, tmp, &entry->list, list)
kvfree(sum);
- }
kmem_cache_free(btrfs_ordered_extent_cache, entry);
}
}
@@ -538,33 +641,47 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
struct btrfs_ordered_extent *entry)
{
- struct btrfs_ordered_inode_tree *tree;
struct btrfs_root *root = btrfs_inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *node;
bool pending;
+ bool freespace_inode;
- /* This is paired with btrfs_add_ordered_extent. */
+ /*
+ * If this is a free space inode the thread has not acquired the ordered
+ * extents lockdep map.
+ */
+ freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
+
+ btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
+ /* This is paired with alloc_ordered_extent(). */
spin_lock(&btrfs_inode->lock);
btrfs_mod_outstanding_extents(btrfs_inode, -1);
spin_unlock(&btrfs_inode->lock);
- if (root != fs_info->tree_root)
- btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
- false);
+ if (root != fs_info->tree_root) {
+ u64 release;
+
+ if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
+ release = entry->disk_num_bytes;
+ else
+ release = entry->num_bytes;
+ btrfs_delalloc_release_metadata(btrfs_inode, release,
+ test_bit(BTRFS_ORDERED_IOERR,
+ &entry->flags));
+ }
percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
fs_info->delalloc_batch);
- tree = &btrfs_inode->ordered_tree;
- spin_lock_irq(&tree->lock);
+ spin_lock(&btrfs_inode->ordered_tree_lock);
node = &entry->rb_node;
- rb_erase(node, &tree->tree);
+ rb_erase(node, &btrfs_inode->ordered_tree);
RB_CLEAR_NODE(node);
- if (tree->last == node)
- tree->last = NULL;
+ if (btrfs_inode->ordered_tree_last == node)
+ btrfs_inode->ordered_tree_last = NULL;
set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
- spin_unlock_irq(&tree->lock);
+ spin_unlock(&btrfs_inode->ordered_tree_lock);
/*
* The current running transaction is waiting on us, we need to let it
@@ -585,7 +702,7 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
refcount_inc(&trans->use_count);
spin_unlock(&fs_info->trans_lock);
- ASSERT(trans);
+ ASSERT(trans || BTRFS_FS_ERROR(fs_info));
if (trans) {
if (atomic_dec_and_test(&trans->pending_ordered))
wake_up(&trans->pending_wait);
@@ -593,6 +710,8 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
}
}
+ btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
+
spin_lock(&root->ordered_extent_lock);
list_del_init(&entry->root_extent_list);
root->nr_ordered_extents--;
@@ -607,6 +726,8 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
}
spin_unlock(&root->ordered_extent_lock);
wake_up(&entry->wait);
+ if (!freespace_inode)
+ btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
}
static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
@@ -614,16 +735,16 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
struct btrfs_ordered_extent *ordered;
ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
- btrfs_start_ordered_extent(ordered, 1);
+ btrfs_start_ordered_extent(ordered);
complete(&ordered->completion);
}
/*
- * wait for all the ordered extents in a root. This is done when balancing
- * space between drives.
+ * Wait for all the ordered extents in a root. Use @bg as range or do whole
+ * range if it's NULL.
*/
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
- const u64 range_start, const u64 range_len)
+ const struct btrfs_block_group *bg)
{
struct btrfs_fs_info *fs_info = root->fs_info;
LIST_HEAD(splice);
@@ -631,7 +752,17 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
LIST_HEAD(works);
struct btrfs_ordered_extent *ordered, *next;
u64 count = 0;
- const u64 range_end = range_start + range_len;
+ u64 range_start, range_len;
+ u64 range_end;
+
+ if (bg) {
+ range_start = bg->start;
+ range_len = bg->length;
+ } else {
+ range_start = 0;
+ range_len = U64_MAX;
+ }
+ range_end = range_start + range_len;
mutex_lock(&root->ordered_extent_mutex);
spin_lock(&root->ordered_extent_lock);
@@ -653,15 +784,15 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
spin_unlock(&root->ordered_extent_lock);
btrfs_init_work(&ordered->flush_work,
- btrfs_run_ordered_extent_work, NULL, NULL);
+ btrfs_run_ordered_extent_work, NULL);
list_add_tail(&ordered->work_list, &works);
btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
cond_resched();
- spin_lock(&root->ordered_extent_lock);
if (nr != U64_MAX)
nr--;
count++;
+ spin_lock(&root->ordered_extent_lock);
}
list_splice_tail(&skipped, &root->ordered_extents);
list_splice_tail(&splice, &root->ordered_extents);
@@ -678,15 +809,17 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
return count;
}
+/*
+ * Wait for @nr ordered extents that intersect the @bg, or the whole range of
+ * the filesystem if @bg is NULL.
+ */
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
- const u64 range_start, const u64 range_len)
+ const struct btrfs_block_group *bg)
{
struct btrfs_root *root;
- struct list_head splice;
+ LIST_HEAD(splice);
u64 done;
- INIT_LIST_HEAD(&splice);
-
mutex_lock(&fs_info->ordered_operations_mutex);
spin_lock(&fs_info->ordered_root_lock);
list_splice_init(&fs_info->ordered_roots, &splice);
@@ -699,14 +832,13 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
&fs_info->ordered_roots);
spin_unlock(&fs_info->ordered_root_lock);
- done = btrfs_wait_ordered_extents(root, nr,
- range_start, range_len);
+ done = btrfs_wait_ordered_extents(root, nr, bg);
btrfs_put_root(root);
- spin_lock(&fs_info->ordered_root_lock);
- if (nr != U64_MAX) {
+ if (nr != U64_MAX)
nr -= done;
- }
+
+ spin_lock(&fs_info->ordered_root_lock);
}
list_splice_tail(&splice, &fs_info->ordered_roots);
spin_unlock(&fs_info->ordered_root_lock);
@@ -714,37 +846,56 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
}
/*
- * Used to start IO or wait for a given ordered extent to finish.
+ * Start IO and wait for a given ordered extent to finish.
*
- * If wait is one, this effectively waits on page writeback for all the pages
- * in the extent, and it waits on the io completion code to insert
- * metadata into the btree corresponding to the extent
+ * Wait on page writeback for all the pages in the extent but not in
+ * [@nowriteback_start, @nowriteback_start + @nowriteback_len) and the
+ * IO completion code to insert metadata into the btree corresponding to the extent.
*/
-void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
+void btrfs_start_ordered_extent_nowriteback(struct btrfs_ordered_extent *entry,
+ u64 nowriteback_start, u32 nowriteback_len)
{
u64 start = entry->file_offset;
u64 end = start + entry->num_bytes - 1;
- struct btrfs_inode *inode = BTRFS_I(entry->inode);
+ struct btrfs_inode *inode = entry->inode;
+ bool freespace_inode;
trace_btrfs_ordered_extent_start(inode, entry);
/*
+ * If this is a free space inode do not take the ordered extents lockdep
+ * map.
+ */
+ freespace_inode = btrfs_is_free_space_inode(inode);
+
+ /*
* pages in the range can be dirty, clean or writeback. We
* start IO on any dirty ones so the wait doesn't stall waiting
* for the flusher thread to find them
*/
- if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
- filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
- if (wait) {
- wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
- &entry->flags));
+ if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) {
+ if (!nowriteback_len) {
+ filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
+ } else {
+ if (start < nowriteback_start)
+ filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start,
+ nowriteback_start - 1);
+ if (nowriteback_start + nowriteback_len < end)
+ filemap_fdatawrite_range(inode->vfs_inode.i_mapping,
+ nowriteback_start + nowriteback_len,
+ end);
+ }
}
+
+ if (!freespace_inode)
+ btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
+ wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
}
/*
* Used to wait on ordered extents across a large range of bytes.
*/
-int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
+int btrfs_wait_ordered_range(struct btrfs_inode *inode, u64 start, u64 len)
{
int ret = 0;
int ret_wb = 0;
@@ -753,11 +904,11 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
struct btrfs_ordered_extent *ordered;
if (start + len < start) {
- orig_end = INT_LIMIT(loff_t);
+ orig_end = OFFSET_MAX;
} else {
orig_end = start + len - 1;
- if (orig_end > INT_LIMIT(loff_t))
- orig_end = INT_LIMIT(loff_t);
+ if (orig_end > OFFSET_MAX)
+ orig_end = OFFSET_MAX;
}
/* start IO across the range first to instantiate any delalloc
@@ -774,11 +925,11 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
* before the ordered extents complete - to avoid failures (-EEXIST)
* when adding the new ordered extents to the ordered tree.
*/
- ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
+ ret_wb = filemap_fdatawait_range(inode->vfs_inode.i_mapping, start, orig_end);
end = orig_end;
while (1) {
- ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
+ ordered = btrfs_lookup_first_ordered_extent(inode, end);
if (!ordered)
break;
if (ordered->file_offset > orig_end) {
@@ -789,7 +940,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
btrfs_put_ordered_extent(ordered);
break;
}
- btrfs_start_ordered_extent(ordered, 1);
+ btrfs_start_ordered_extent(ordered);
end = ordered->file_offset;
/*
* If the ordered extent had an error save the error but don't
@@ -813,24 +964,23 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
u64 file_offset)
{
- struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- unsigned long flags;
- tree = &inode->ordered_tree;
- spin_lock_irqsave(&tree->lock, flags);
- node = tree_search(tree, file_offset);
+ spin_lock(&inode->ordered_tree_lock);
+ node = ordered_tree_search(inode, file_offset);
if (!node)
goto out;
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
entry = NULL;
- if (entry)
+ if (entry) {
refcount_inc(&entry->refs);
+ trace_btrfs_ordered_extent_lookup(inode, entry);
+ }
out:
- spin_unlock_irqrestore(&tree->lock, flags);
+ spin_unlock(&inode->ordered_tree_lock);
return entry;
}
@@ -840,22 +990,20 @@ out:
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
struct btrfs_inode *inode, u64 file_offset, u64 len)
{
- struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- tree = &inode->ordered_tree;
- spin_lock_irq(&tree->lock);
- node = tree_search(tree, file_offset);
+ spin_lock(&inode->ordered_tree_lock);
+ node = ordered_tree_search(inode, file_offset);
if (!node) {
- node = tree_search(tree, file_offset + len);
+ node = ordered_tree_search(inode, file_offset + len);
if (!node)
goto out;
}
while (1) {
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
- if (range_overlaps(entry, file_offset, len))
+ if (btrfs_range_overlaps(entry, file_offset, len))
break;
if (entry->file_offset >= file_offset + len) {
@@ -868,9 +1016,11 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
break;
}
out:
- if (entry)
+ if (entry) {
refcount_inc(&entry->refs);
- spin_unlock_irq(&tree->lock);
+ trace_btrfs_ordered_extent_lookup_range(inode, entry);
+ }
+ spin_unlock(&inode->ordered_tree_lock);
return entry;
}
@@ -881,13 +1031,12 @@ out:
void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
struct list_head *list)
{
- struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *n;
- ASSERT(inode_is_locked(&inode->vfs_inode));
+ btrfs_assert_inode_locked(inode);
- spin_lock_irq(&tree->lock);
- for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
+ spin_lock(&inode->ordered_tree_lock);
+ for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
struct btrfs_ordered_extent *ordered;
ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
@@ -898,8 +1047,9 @@ void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
ASSERT(list_empty(&ordered->log_list));
list_add_tail(&ordered->log_list, list);
refcount_inc(&ordered->refs);
+ trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
}
- spin_unlock_irq(&tree->lock);
+ spin_unlock(&inode->ordered_tree_lock);
}
/*
@@ -909,20 +1059,19 @@ void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
{
- struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- tree = &inode->ordered_tree;
- spin_lock_irq(&tree->lock);
- node = tree_search(tree, file_offset);
+ spin_lock(&inode->ordered_tree_lock);
+ node = ordered_tree_search(inode, file_offset);
if (!node)
goto out;
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
refcount_inc(&entry->refs);
+ trace_btrfs_ordered_extent_lookup_first(inode, entry);
out:
- spin_unlock_irq(&tree->lock);
+ spin_unlock(&inode->ordered_tree_lock);
return entry;
}
@@ -938,15 +1087,14 @@ out:
struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
struct btrfs_inode *inode, u64 file_offset, u64 len)
{
- struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *node;
struct rb_node *cur;
struct rb_node *prev;
struct rb_node *next;
struct btrfs_ordered_extent *entry = NULL;
- spin_lock_irq(&tree->lock);
- node = tree->tree.rb_node;
+ spin_lock(&inode->ordered_tree_lock);
+ node = inode->ordered_tree.rb_node;
/*
* Here we don't want to use tree_search() which will use tree->last
* and screw up the search order.
@@ -984,35 +1132,39 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
}
if (prev) {
entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
- if (range_overlaps(entry, file_offset, len))
+ if (btrfs_range_overlaps(entry, file_offset, len))
goto out;
}
if (next) {
entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
- if (range_overlaps(entry, file_offset, len))
+ if (btrfs_range_overlaps(entry, file_offset, len))
goto out;
}
/* No ordered extent in the range */
entry = NULL;
out:
- if (entry)
+ if (entry) {
refcount_inc(&entry->refs);
- spin_unlock_irq(&tree->lock);
+ trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
+ }
+
+ spin_unlock(&inode->ordered_tree_lock);
return entry;
}
/*
- * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
- * ordered extents in it are run to completion.
+ * Lock the passed range and ensures all pending ordered extents in it are run
+ * to completion.
*
* @inode: Inode whose ordered tree is to be searched
* @start: Beginning of range to flush
* @end: Last byte of range to lock
* @cached_state: If passed, will return the extent state responsible for the
- * locked range. It's the caller's responsibility to free the cached state.
+ * locked range. It's the caller's responsibility to free the
+ * cached state.
*
- * This function always returns with the given range locked, ensuring after it's
- * called no order extent can be pending.
+ * Always return with the given range locked, ensuring after it's called no
+ * order extent can be pending.
*/
void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
u64 end,
@@ -1026,7 +1178,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
cachedp = cached_state;
while (1) {
- lock_extent_bits(&inode->io_tree, start, end, cachedp);
+ btrfs_lock_extent(&inode->io_tree, start, end, cachedp);
ordered = btrfs_lookup_ordered_range(inode, start,
end - start + 1);
if (!ordered) {
@@ -1039,104 +1191,161 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
refcount_dec(&cache->refs);
break;
}
- unlock_extent_cached(&inode->io_tree, start, end, cachedp);
- btrfs_start_ordered_extent(ordered, 1);
+ btrfs_unlock_extent(&inode->io_tree, start, end, cachedp);
+ btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
}
-static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
- u64 len)
+/*
+ * Lock the passed range and ensure all pending ordered extents in it are run
+ * to completion in nowait mode.
+ *
+ * Return true if btrfs_lock_ordered_range does not return any extents,
+ * otherwise false.
+ */
+bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
+ struct extent_state **cached_state)
{
- struct inode *inode = ordered->inode;
- struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
- u64 file_offset = ordered->file_offset + pos;
- u64 disk_bytenr = ordered->disk_bytenr + pos;
- u64 num_bytes = len;
- u64 disk_num_bytes = len;
- int type;
- unsigned long flags_masked = ordered->flags & ~(1 << BTRFS_ORDERED_DIRECT);
- int compress_type = ordered->compress_type;
- unsigned long weight;
- int ret;
+ struct btrfs_ordered_extent *ordered;
- weight = hweight_long(flags_masked);
- WARN_ON_ONCE(weight > 1);
- if (!weight)
- type = 0;
- else
- type = __ffs(flags_masked);
+ if (!btrfs_try_lock_extent(&inode->io_tree, start, end, cached_state))
+ return false;
- /*
- * The splitting extent is already counted and will be added again
- * in btrfs_add_ordered_extent_*(). Subtract num_bytes to avoid
- * double counting.
- */
- percpu_counter_add_batch(&fs_info->ordered_bytes, -num_bytes,
- fs_info->delalloc_batch);
- if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) {
- WARN_ON_ONCE(1);
- ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode),
- file_offset, disk_bytenr, num_bytes,
- disk_num_bytes, compress_type);
- } else if (test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
- ret = btrfs_add_ordered_extent_dio(BTRFS_I(inode), file_offset,
- disk_bytenr, num_bytes, disk_num_bytes, type);
- } else {
- ret = btrfs_add_ordered_extent(BTRFS_I(inode), file_offset,
- disk_bytenr, num_bytes, disk_num_bytes, type);
- }
+ ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
+ if (!ordered)
+ return true;
- return ret;
+ btrfs_put_ordered_extent(ordered);
+ btrfs_unlock_extent(&inode->io_tree, start, end, cached_state);
+
+ return false;
}
-int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
- u64 post)
+/* Split out a new ordered extent for this first @len bytes of @ordered. */
+struct btrfs_ordered_extent *btrfs_split_ordered_extent(
+ struct btrfs_ordered_extent *ordered, u64 len)
{
- struct inode *inode = ordered->inode;
- struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
+ struct btrfs_inode *inode = ordered->inode;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ u64 file_offset = ordered->file_offset;
+ u64 disk_bytenr = ordered->disk_bytenr;
+ unsigned long flags = ordered->flags;
+ struct btrfs_ordered_sum *sum, *tmpsum;
+ struct btrfs_ordered_extent *new;
struct rb_node *node;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- int ret = 0;
+ u64 offset = 0;
- spin_lock_irq(&tree->lock);
- /* Remove from tree once */
- node = &ordered->rb_node;
- rb_erase(node, &tree->tree);
- RB_CLEAR_NODE(node);
- if (tree->last == node)
- tree->last = NULL;
-
- ordered->file_offset += pre;
- ordered->disk_bytenr += pre;
- ordered->num_bytes -= (pre + post);
- ordered->disk_num_bytes -= (pre + post);
- ordered->bytes_left -= (pre + post);
-
- /* Re-insert the node */
- node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
- if (node)
- btrfs_panic(fs_info, -EEXIST,
- "zoned: inconsistency in ordered tree at offset %llu",
- ordered->file_offset);
+ trace_btrfs_ordered_extent_split(inode, ordered);
- spin_unlock_irq(&tree->lock);
+ ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
- if (pre)
- ret = clone_ordered_extent(ordered, 0, pre);
- if (ret == 0 && post)
- ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
- post);
+ /*
+ * The entire bio must be covered by the ordered extent, but we can't
+ * reduce the original extent to a zero length either.
+ */
+ if (WARN_ON_ONCE(len >= ordered->num_bytes))
+ return ERR_PTR(-EINVAL);
+ /*
+ * If our ordered extent had an error there's no point in continuing.
+ * The error may have come from a transaction abort done either by this
+ * task or some other concurrent task, and the transaction abort path
+ * iterates over all existing ordered extents and sets the flag
+ * BTRFS_ORDERED_IOERR on them.
+ */
+ if (unlikely(flags & (1U << BTRFS_ORDERED_IOERR))) {
+ const int fs_error = BTRFS_FS_ERROR(fs_info);
- return ret;
+ return fs_error ? ERR_PTR(fs_error) : ERR_PTR(-EIO);
+ }
+ /* We cannot split partially completed ordered extents. */
+ if (ordered->bytes_left) {
+ ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
+ if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
+ return ERR_PTR(-EINVAL);
+ }
+ /* We cannot split a compressed ordered extent. */
+ if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
+ return ERR_PTR(-EINVAL);
+
+ new = alloc_ordered_extent(inode, file_offset, len, len, disk_bytenr,
+ len, 0, flags, ordered->compress_type);
+ if (IS_ERR(new))
+ return new;
+
+ /* One ref for the tree. */
+ refcount_inc(&new->refs);
+
+ /*
+ * Take the root's ordered_extent_lock to avoid a race with
+ * btrfs_wait_ordered_extents() when updating the disk_bytenr and
+ * disk_num_bytes fields of the ordered extent below.
+ *
+ * There's no concern about a previous caller of
+ * btrfs_wait_ordered_extents() getting the trimmed ordered extent
+ * before we insert the new one, because even if it gets the ordered
+ * extent before it's trimmed and the new one inserted, right before it
+ * uses it or during its use, the ordered extent might have been
+ * trimmed in the meanwhile, and it missed the new ordered extent.
+ * There's no way around this and it's harmless for current use cases,
+ * so we take the root's ordered_extent_lock to fix that race during
+ * trimming and silence tools like KCSAN.
+ */
+ spin_lock_irq(&root->ordered_extent_lock);
+ spin_lock(&inode->ordered_tree_lock);
+
+ /*
+ * We don't have overlapping ordered extents (that would imply double
+ * allocation of extents) and we checked above that the split length
+ * does not cross the ordered extent's num_bytes field, so there's
+ * no need to remove it and re-insert it in the tree.
+ */
+ ordered->file_offset += len;
+ ordered->disk_bytenr += len;
+ ordered->num_bytes -= len;
+ ordered->disk_num_bytes -= len;
+ ordered->ram_bytes -= len;
+
+ if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
+ ASSERT(ordered->bytes_left == 0);
+ new->bytes_left = 0;
+ } else {
+ ordered->bytes_left -= len;
+ }
+
+ if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) {
+ if (ordered->truncated_len > len) {
+ ordered->truncated_len -= len;
+ } else {
+ new->truncated_len = ordered->truncated_len;
+ ordered->truncated_len = 0;
+ }
+ }
+
+ list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) {
+ if (offset == len)
+ break;
+ list_move_tail(&sum->list, &new->list);
+ offset += sum->len;
+ }
+
+ node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
+ if (unlikely(node))
+ btrfs_panic(fs_info, -EEXIST,
+ "inconsistency in ordered tree at offset %llu after split",
+ new->file_offset);
+ spin_unlock(&inode->ordered_tree_lock);
+
+ list_add_tail(&new->root_extent_list, &root->ordered_extents);
+ root->nr_ordered_extents++;
+ spin_unlock_irq(&root->ordered_extent_lock);
+ return new;
}
int __init ordered_data_init(void)
{
- btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
- sizeof(struct btrfs_ordered_extent), 0,
- SLAB_MEM_SPREAD,
- NULL);
+ btrfs_ordered_extent_cache = KMEM_CACHE(btrfs_ordered_extent, 0);
if (!btrfs_ordered_extent_cache)
return -ENOMEM;
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 4194e960ff61..1e6b0b182b29 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -6,21 +6,30 @@
#ifndef BTRFS_ORDERED_DATA_H
#define BTRFS_ORDERED_DATA_H
-/* one of these per inode */
-struct btrfs_ordered_inode_tree {
- spinlock_t lock;
- struct rb_root tree;
- struct rb_node *last;
-};
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
+#include <linux/rbtree.h>
+#include <linux/wait.h>
+#include "async-thread.h"
+
+struct inode;
+struct page;
+struct extent_state;
+struct btrfs_block_group;
+struct btrfs_inode;
+struct btrfs_root;
+struct btrfs_fs_info;
struct btrfs_ordered_sum {
- /* bytenr is the start of this extent on disk */
- u64 bytenr;
-
/*
- * this is the length in bytes covered by the sums array below.
+ * Logical start address and length for of the blocks covered by
+ * the sums array.
*/
- int len;
+ u64 logical;
+ u32 len;
+
struct list_head list;
/* last field is a variable length array of csums */
u8 sums[];
@@ -74,8 +83,18 @@ enum {
BTRFS_ORDERED_LOGGED_CSUM,
/* We wait for this extent to complete in the current transaction */
BTRFS_ORDERED_PENDING,
+ /* BTRFS_IOC_ENCODED_WRITE */
+ BTRFS_ORDERED_ENCODED,
};
+/* BTRFS_ORDERED_* flags that specify the type of the extent. */
+#define BTRFS_ORDERED_TYPE_FLAGS ((1UL << BTRFS_ORDERED_REGULAR) | \
+ (1UL << BTRFS_ORDERED_NOCOW) | \
+ (1UL << BTRFS_ORDERED_PREALLOC) | \
+ (1UL << BTRFS_ORDERED_COMPRESSED) | \
+ (1UL << BTRFS_ORDERED_DIRECT) | \
+ (1UL << BTRFS_ORDERED_ENCODED))
+
struct btrfs_ordered_extent {
/* logical offset in the file */
u64 file_offset;
@@ -84,21 +103,16 @@ struct btrfs_ordered_extent {
* These fields directly correspond to the same fields in
* btrfs_file_extent_item.
*/
- u64 disk_bytenr;
u64 num_bytes;
+ u64 ram_bytes;
+ u64 disk_bytenr;
u64 disk_num_bytes;
+ u64 offset;
/* number of bytes that still need writing */
u64 bytes_left;
/*
- * the end of the ordered extent which is behind it but
- * didn't update disk_i_size. Please see the comment of
- * btrfs_ordered_update_i_size();
- */
- u64 outstanding_isize;
-
- /*
* If we get truncated we need to adjust the file extent we enter for
* this ordered extent so that we do not expose stale data.
*/
@@ -117,7 +131,7 @@ struct btrfs_ordered_extent {
refcount_t refs;
/* the inode we belong to */
- struct inode *inode;
+ struct btrfs_inode *inode;
/* list of checksums for insertion when the extent io is done */
struct list_head list;
@@ -140,59 +154,52 @@ struct btrfs_ordered_extent {
struct btrfs_work flush_work;
struct list_head work_list;
- /*
- * Used to reverse-map physical address returned from ZONE_APPEND write
- * command in a workqueue context
- */
- u64 physical;
- struct block_device *bdev;
+ struct list_head bioc_list;
};
-/*
- * calculates the total size you need to allocate for an ordered sum
- * structure spanning 'bytes' in the file
- */
-static inline int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info,
- unsigned long bytes)
-{
- int num_sectors = (int)DIV_ROUND_UP(bytes, fs_info->sectorsize);
-
- return sizeof(struct btrfs_ordered_sum) + num_sectors * fs_info->csum_size;
-}
-
-static inline void
-btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
-{
- spin_lock_init(&t->lock);
- t->tree = RB_ROOT;
- t->last = NULL;
-}
+int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent);
+int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
struct btrfs_ordered_extent *entry);
+void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
+ struct folio *folio, u64 file_offset, u64 len,
+ bool uptodate);
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
- struct page *page, u64 file_offset,
- u64 num_bytes, btrfs_func_t finish_func,
- bool uptodate);
+ struct folio *folio, u64 file_offset,
+ u64 num_bytes, bool uptodate);
bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size);
-int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
- u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
- int type);
-int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
- u64 disk_bytenr, u64 num_bytes,
- u64 disk_num_bytes, int type);
-int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
- u64 disk_bytenr, u64 num_bytes,
- u64 disk_num_bytes, int compress_type);
+
+/*
+ * This represents details about the target file extent item of a write operation.
+ */
+struct btrfs_file_extent {
+ u64 disk_bytenr;
+ u64 disk_num_bytes;
+ u64 num_bytes;
+ u64 ram_bytes;
+ u64 offset;
+ u8 compression;
+};
+
+struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
+ struct btrfs_inode *inode, u64 file_offset,
+ const struct btrfs_file_extent *file_extent, unsigned long flags);
void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
struct btrfs_ordered_sum *sum);
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
u64 file_offset);
-void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait);
-int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
+void btrfs_start_ordered_extent_nowriteback(struct btrfs_ordered_extent *entry,
+ u64 nowriteback_start, u32 nowriteback_len);
+static inline void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
+{
+ return btrfs_start_ordered_extent_nowriteback(entry, 0, 0);
+}
+
+int btrfs_wait_ordered_range(struct btrfs_inode *inode, u64 start, u64 len);
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset);
struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
@@ -204,14 +211,17 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
struct list_head *list);
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
- const u64 range_start, const u64 range_len);
+ const struct btrfs_block_group *bg);
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
- const u64 range_start, const u64 range_len);
+ const struct btrfs_block_group *bg);
void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
u64 end,
struct extent_state **cached_state);
-int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
- u64 post);
+bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
+ struct extent_state **cached_state);
+struct btrfs_ordered_extent *btrfs_split_ordered_extent(
+ struct btrfs_ordered_extent *ordered, u64 len);
+void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered);
int __init ordered_data_init(void);
void __cold ordered_data_exit(void);
diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c
index aa534108c1e2..9f3ad124104f 100644
--- a/fs/btrfs/orphan.c
+++ b/fs/btrfs/orphan.c
@@ -4,14 +4,13 @@
*/
#include "ctree.h"
-#include "disk-io.h"
+#include "orphan.h"
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 offset)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
- int ret = 0;
key.objectid = BTRFS_ORPHAN_OBJECTID;
key.type = BTRFS_ORPHAN_ITEM_KEY;
@@ -21,16 +20,13 @@ int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
-
- btrfs_free_path(path);
- return ret;
+ return btrfs_insert_empty_item(trans, root, path, &key, 0);
}
int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 offset)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
int ret = 0;
@@ -44,15 +40,9 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
- goto out;
- if (ret) { /* JDM: Really? */
- ret = -ENOENT;
- goto out;
- }
-
- ret = btrfs_del_item(trans, root, path);
+ return ret;
+ if (ret)
+ return -ENOENT;
-out:
- btrfs_free_path(path);
- return ret;
+ return btrfs_del_item(trans, root, path);
}
diff --git a/fs/btrfs/orphan.h b/fs/btrfs/orphan.h
new file mode 100644
index 000000000000..aa54a88a60de
--- /dev/null
+++ b/fs/btrfs/orphan.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_ORPHAN_H
+#define BTRFS_ORPHAN_H
+
+#include <linux/types.h>
+
+struct btrfs_trans_handle;
+struct btrfs_root;
+
+int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 offset);
+int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 offset);
+
+#endif
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index aae1027bd76a..f189bf09ce6a 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -3,13 +3,25 @@
* Copyright (C) 2007 Oracle. All rights reserved.
*/
+#include "messages.h"
#include "ctree.h"
#include "disk-io.h"
+#include "file-item.h"
#include "print-tree.h"
+#include "accessors.h"
+#include "tree-checker.h"
+#include "volumes.h"
+#include "raid-stripe-tree.h"
+
+/*
+ * Large enough buffer size for the stringification of any key type yet short
+ * enough to use the stack and avoid allocations.
+ */
+#define KEY_TYPE_BUF_SIZE 32
struct root_name_map {
u64 id;
- char name[16];
+ const char *name;
};
static const struct root_name_map root_map[] = {
@@ -23,7 +35,9 @@ static const struct root_name_map root_map[] = {
{ BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" },
{ BTRFS_UUID_TREE_OBJECTID, "UUID_TREE" },
{ BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" },
+ { BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" },
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" },
+ { BTRFS_RAID_STRIPE_TREE_OBJECTID, "RAID_STRIPE_TREE" },
};
const char *btrfs_root_name(const struct btrfs_key *key, char *buf)
@@ -45,7 +59,7 @@ const char *btrfs_root_name(const struct btrfs_key *key, char *buf)
return buf;
}
-static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk)
+static void print_chunk(const struct extent_buffer *eb, struct btrfs_chunk *chunk)
{
int num_stripes = btrfs_chunk_num_stripes(eb, chunk);
int i;
@@ -58,7 +72,7 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk)
btrfs_stripe_offset_nr(eb, chunk, i));
}
}
-static void print_dev_item(struct extent_buffer *eb,
+static void print_dev_item(const struct extent_buffer *eb,
struct btrfs_dev_item *dev_item)
{
pr_info("\t\tdev item devid %llu total_bytes %llu bytes used %llu\n",
@@ -66,7 +80,7 @@ static void print_dev_item(struct extent_buffer *eb,
btrfs_device_total_bytes(eb, dev_item),
btrfs_device_bytes_used(eb, dev_item));
}
-static void print_extent_data_ref(struct extent_buffer *eb,
+static void print_extent_data_ref(const struct extent_buffer *eb,
struct btrfs_extent_data_ref *ref)
{
pr_cont("extent data backref root %llu objectid %llu offset %llu count %u\n",
@@ -76,23 +90,33 @@ static void print_extent_data_ref(struct extent_buffer *eb,
btrfs_extent_data_ref_count(eb, ref));
}
-static void print_extent_item(struct extent_buffer *eb, int slot, int type)
+static void print_extent_owner_ref(const struct extent_buffer *eb,
+ const struct btrfs_extent_owner_ref *ref)
+{
+ ASSERT(btrfs_fs_incompat(eb->fs_info, SIMPLE_QUOTA));
+ pr_cont("extent data owner root %llu\n", btrfs_extent_owner_ref_root_id(eb, ref));
+}
+
+static void print_extent_item(const struct extent_buffer *eb, int slot, int type)
{
struct btrfs_extent_item *ei;
struct btrfs_extent_inline_ref *iref;
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
+ struct btrfs_extent_owner_ref *oref;
struct btrfs_disk_key key;
unsigned long end;
unsigned long ptr;
- u32 item_size = btrfs_item_size_nr(eb, slot);
+ u32 item_size = btrfs_item_size(eb, slot);
u64 flags;
u64 offset;
int ref_index = 0;
if (unlikely(item_size < sizeof(*ei))) {
- btrfs_print_v0_err(eb->fs_info);
- btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
+ btrfs_err(eb->fs_info,
+ "unexpected extent item size, has %u expect >= %zu",
+ item_size, sizeof(*ei));
+ return;
}
ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
@@ -107,7 +131,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
struct btrfs_tree_block_info *info;
info = (struct btrfs_tree_block_info *)(ei + 1);
btrfs_tree_block_key(eb, info, &key);
- pr_info("\t\ttree block key (%llu %u %llu) level %d\n",
+ pr_info("\t\ttree block key " BTRFS_KEY_FMT " level %d\n",
btrfs_disk_key_objectid(&key), key.type,
btrfs_disk_key_offset(&key),
btrfs_tree_block_level(eb, info));
@@ -147,14 +171,18 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
pr_cont("shared data backref parent %llu count %u\n",
offset, btrfs_shared_data_ref_count(eb, sref));
/*
- * offset is supposed to be a tree block which
- * must be aligned to nodesize.
+ * Offset is supposed to be a tree block which must be
+ * aligned to sectorsize.
*/
- if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
+ if (!IS_ALIGNED(offset, eb->fs_info->sectorsize))
pr_info(
"\t\t\t(parent %llu not aligned to sectorsize %u)\n",
offset, eb->fs_info->sectorsize);
break;
+ case BTRFS_EXTENT_OWNER_REF_KEY:
+ oref = (struct btrfs_extent_owner_ref *)(&iref->offset);
+ print_extent_owner_ref(eb, oref);
+ break;
default:
pr_cont("(extent %llu has INVALID ref type %d)\n",
eb->start, type);
@@ -165,11 +193,11 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
WARN_ON(ptr > end);
}
-static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
+static void print_uuid_item(const struct extent_buffer *l, unsigned long offset,
u32 item_size)
{
if (!IS_ALIGNED(item_size, sizeof(u64))) {
- pr_warn("BTRFS: uuid item with illegal size %lu!\n",
+ btrfs_warn(l->fs_info, "uuid item with illegal size %lu",
(unsigned long)item_size);
return;
}
@@ -183,34 +211,231 @@ static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
}
}
+static void print_raid_stripe_key(const struct extent_buffer *eb, u32 item_size,
+ struct btrfs_stripe_extent *stripe)
+{
+ const int num_stripes = btrfs_num_raid_stripes(item_size);
+
+ for (int i = 0; i < num_stripes; i++)
+ pr_info("\t\t\tstride %d devid %llu physical %llu\n",
+ i, btrfs_raid_stride_devid(eb, &stripe->strides[i]),
+ btrfs_raid_stride_physical(eb, &stripe->strides[i]));
+}
+
/*
* Helper to output refs and locking status of extent buffer. Useful to debug
* race condition related problems.
*/
-static void print_eb_refs_lock(struct extent_buffer *eb)
+static void print_eb_refs_lock(const struct extent_buffer *eb)
{
#ifdef CONFIG_BTRFS_DEBUG
btrfs_info(eb->fs_info, "refs %u lock_owner %u current %u",
- atomic_read(&eb->refs), eb->lock_owner, current->pid);
+ refcount_read(&eb->refs), eb->lock_owner, current->pid);
#endif
}
-void btrfs_print_leaf(struct extent_buffer *l)
+static void print_timespec(const struct extent_buffer *eb,
+ struct btrfs_timespec *timespec,
+ const char *prefix, const char *suffix)
+{
+ const u64 secs = btrfs_timespec_sec(eb, timespec);
+ const u32 nsecs = btrfs_timespec_nsec(eb, timespec);
+
+ pr_info("%s%llu.%u%s", prefix, secs, nsecs, suffix);
+}
+
+static void print_inode_item(const struct extent_buffer *eb, int i)
+{
+ struct btrfs_inode_item *ii = btrfs_item_ptr(eb, i, struct btrfs_inode_item);
+
+ pr_info("\t\tinode generation %llu transid %llu size %llu nbytes %llu\n",
+ btrfs_inode_generation(eb, ii), btrfs_inode_transid(eb, ii),
+ btrfs_inode_size(eb, ii), btrfs_inode_nbytes(eb, ii));
+ pr_info("\t\tblock group %llu mode %o links %u uid %u gid %u\n",
+ btrfs_inode_block_group(eb, ii), btrfs_inode_mode(eb, ii),
+ btrfs_inode_nlink(eb, ii), btrfs_inode_uid(eb, ii),
+ btrfs_inode_gid(eb, ii));
+ pr_info("\t\trdev %llu sequence %llu flags 0x%llx\n",
+ btrfs_inode_rdev(eb, ii), btrfs_inode_sequence(eb, ii),
+ btrfs_inode_flags(eb, ii));
+ print_timespec(eb, &ii->atime, "\t\tatime ", "\n");
+ print_timespec(eb, &ii->ctime, "\t\tctime ", "\n");
+ print_timespec(eb, &ii->mtime, "\t\tmtime ", "\n");
+ print_timespec(eb, &ii->otime, "\t\totime ", "\n");
+}
+
+static void print_dir_item(const struct extent_buffer *eb, int i)
+{
+ const u32 size = btrfs_item_size(eb, i);
+ struct btrfs_dir_item *di = btrfs_item_ptr(eb, i, struct btrfs_dir_item);
+ u32 cur = 0;
+
+ while (cur < size) {
+ const u32 name_len = btrfs_dir_name_len(eb, di);
+ const u32 data_len = btrfs_dir_data_len(eb, di);
+ const u32 len = sizeof(*di) + name_len + data_len;
+ struct btrfs_key location;
+
+ btrfs_dir_item_key_to_cpu(eb, di, &location);
+ pr_info("\t\tlocation key " BTRFS_KEY_FMT " type %d\n",
+ BTRFS_KEY_FMT_VALUE(&location), btrfs_dir_ftype(eb, di));
+ pr_info("\t\ttransid %llu data_len %u name_len %u\n",
+ btrfs_dir_transid(eb, di), data_len, name_len);
+ di = (struct btrfs_dir_item *)((char *)di + len);
+ cur += len;
+ }
+}
+
+static void print_inode_ref_item(const struct extent_buffer *eb, int i)
+{
+ const u32 size = btrfs_item_size(eb, i);
+ struct btrfs_inode_ref *ref = btrfs_item_ptr(eb, i, struct btrfs_inode_ref);
+ u32 cur = 0;
+
+ while (cur < size) {
+ const u64 index = btrfs_inode_ref_index(eb, ref);
+ const u32 name_len = btrfs_inode_ref_name_len(eb, ref);
+ const u32 len = sizeof(*ref) + name_len;
+
+ pr_info("\t\tindex %llu name_len %u\n", index, name_len);
+ ref = (struct btrfs_inode_ref *)((char *)ref + len);
+ cur += len;
+ }
+}
+
+static void print_inode_extref_item(const struct extent_buffer *eb, int i)
+{
+ const u32 size = btrfs_item_size(eb, i);
+ struct btrfs_inode_extref *extref;
+ u32 cur = 0;
+
+ extref = btrfs_item_ptr(eb, i, struct btrfs_inode_extref);
+ while (cur < size) {
+ const u64 index = btrfs_inode_extref_index(eb, extref);
+ const u32 name_len = btrfs_inode_extref_name_len(eb, extref);
+ const u64 parent = btrfs_inode_extref_parent(eb, extref);
+ const u32 len = sizeof(*extref) + name_len;
+
+ pr_info("\t\tindex %llu parent %llu name_len %u\n",
+ index, parent, name_len);
+ extref = (struct btrfs_inode_extref *)((char *)extref + len);
+ cur += len;
+ }
+}
+
+static void print_dir_log_index_item(const struct extent_buffer *eb, int i)
+{
+ struct btrfs_dir_log_item *dlog;
+
+ dlog = btrfs_item_ptr(eb, i, struct btrfs_dir_log_item);
+ pr_info("\t\tdir log end %llu\n", btrfs_dir_log_end(eb, dlog));
+}
+
+static void print_extent_csum(const struct extent_buffer *eb, int i)
+{
+ const struct btrfs_fs_info *fs_info = eb->fs_info;
+ const u32 size = btrfs_item_size(eb, i);
+ const u32 csum_bytes = (size / fs_info->csum_size) * fs_info->sectorsize;
+ struct btrfs_key key;
+
+ btrfs_item_key_to_cpu(eb, &key, i);
+ pr_info("\t\trange start %llu end %llu length %u\n",
+ key.offset, key.offset + csum_bytes, csum_bytes);
+}
+
+static void print_file_extent_item(const struct extent_buffer *eb, int i)
+{
+ struct btrfs_file_extent_item *fi;
+
+ fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
+ pr_info("\t\tgeneration %llu type %hhu\n",
+ btrfs_file_extent_generation(eb, fi),
+ btrfs_file_extent_type(eb, fi));
+
+ if (btrfs_file_extent_type(eb, fi) == BTRFS_FILE_EXTENT_INLINE) {
+ pr_info("\t\tinline extent data size %u ram_bytes %llu compression %hhu\n",
+ btrfs_file_extent_inline_item_len(eb, i),
+ btrfs_file_extent_ram_bytes(eb, fi),
+ btrfs_file_extent_compression(eb, fi));
+ return;
+ }
+
+ pr_info("\t\textent data disk bytenr %llu nr %llu\n",
+ btrfs_file_extent_disk_bytenr(eb, fi),
+ btrfs_file_extent_disk_num_bytes(eb, fi));
+ pr_info("\t\textent data offset %llu nr %llu ram %llu\n",
+ btrfs_file_extent_offset(eb, fi),
+ btrfs_file_extent_num_bytes(eb, fi),
+ btrfs_file_extent_ram_bytes(eb, fi));
+ pr_info("\t\textent compression %hhu\n",
+ btrfs_file_extent_compression(eb, fi));
+}
+
+static void key_type_string(const struct btrfs_key *key, char *buf, int buf_size)
+{
+ static const char *key_to_str[256] = {
+ [BTRFS_INODE_ITEM_KEY] = "INODE_ITEM",
+ [BTRFS_INODE_REF_KEY] = "INODE_REF",
+ [BTRFS_INODE_EXTREF_KEY] = "INODE_EXTREF",
+ [BTRFS_DIR_ITEM_KEY] = "DIR_ITEM",
+ [BTRFS_DIR_INDEX_KEY] = "DIR_INDEX",
+ [BTRFS_DIR_LOG_ITEM_KEY] = "DIR_LOG_ITEM",
+ [BTRFS_DIR_LOG_INDEX_KEY] = "DIR_LOG_INDEX",
+ [BTRFS_XATTR_ITEM_KEY] = "XATTR_ITEM",
+ [BTRFS_VERITY_DESC_ITEM_KEY] = "VERITY_DESC_ITEM",
+ [BTRFS_VERITY_MERKLE_ITEM_KEY] = "VERITY_MERKLE_ITEM",
+ [BTRFS_ORPHAN_ITEM_KEY] = "ORPHAN_ITEM",
+ [BTRFS_ROOT_ITEM_KEY] = "ROOT_ITEM",
+ [BTRFS_ROOT_REF_KEY] = "ROOT_REF",
+ [BTRFS_ROOT_BACKREF_KEY] = "ROOT_BACKREF",
+ [BTRFS_EXTENT_ITEM_KEY] = "EXTENT_ITEM",
+ [BTRFS_METADATA_ITEM_KEY] = "METADATA_ITEM",
+ [BTRFS_TREE_BLOCK_REF_KEY] = "TREE_BLOCK_REF",
+ [BTRFS_SHARED_BLOCK_REF_KEY] = "SHARED_BLOCK_REF",
+ [BTRFS_EXTENT_DATA_REF_KEY] = "EXTENT_DATA_REF",
+ [BTRFS_SHARED_DATA_REF_KEY] = "SHARED_DATA_REF",
+ [BTRFS_EXTENT_OWNER_REF_KEY] = "EXTENT_OWNER_REF",
+ [BTRFS_EXTENT_CSUM_KEY] = "EXTENT_CSUM",
+ [BTRFS_EXTENT_DATA_KEY] = "EXTENT_DATA",
+ [BTRFS_BLOCK_GROUP_ITEM_KEY] = "BLOCK_GROUP_ITEM",
+ [BTRFS_FREE_SPACE_INFO_KEY] = "FREE_SPACE_INFO",
+ [BTRFS_FREE_SPACE_EXTENT_KEY] = "FREE_SPACE_EXTENT",
+ [BTRFS_FREE_SPACE_BITMAP_KEY] = "FREE_SPACE_BITMAP",
+ [BTRFS_CHUNK_ITEM_KEY] = "CHUNK_ITEM",
+ [BTRFS_DEV_ITEM_KEY] = "DEV_ITEM",
+ [BTRFS_DEV_EXTENT_KEY] = "DEV_EXTENT",
+ [BTRFS_TEMPORARY_ITEM_KEY] = "TEMPORARY_ITEM",
+ [BTRFS_DEV_REPLACE_KEY] = "DEV_REPLACE",
+ [BTRFS_STRING_ITEM_KEY] = "STRING_ITEM",
+ [BTRFS_QGROUP_STATUS_KEY] = "QGROUP_STATUS",
+ [BTRFS_QGROUP_RELATION_KEY] = "QGROUP_RELATION",
+ [BTRFS_QGROUP_INFO_KEY] = "QGROUP_INFO",
+ [BTRFS_QGROUP_LIMIT_KEY] = "QGROUP_LIMIT",
+ [BTRFS_PERSISTENT_ITEM_KEY] = "PERSISTENT_ITEM",
+ [BTRFS_UUID_KEY_SUBVOL] = "UUID_KEY_SUBVOL",
+ [BTRFS_UUID_KEY_RECEIVED_SUBVOL] = "UUID_KEY_RECEIVED_SUBVOL",
+ [BTRFS_RAID_STRIPE_KEY] = "RAID_STRIPE",
+ };
+
+ if (key->type == 0 && key->objectid == BTRFS_FREE_SPACE_OBJECTID)
+ scnprintf(buf, buf_size, "UNTYPED");
+ else if (key_to_str[key->type])
+ scnprintf(buf, buf_size, "%s", key_to_str[key->type]);
+ else
+ scnprintf(buf, buf_size, "UNKNOWN.%d", key->type);
+}
+
+void btrfs_print_leaf(const struct extent_buffer *l)
{
struct btrfs_fs_info *fs_info;
int i;
u32 type, nr;
- struct btrfs_item *item;
struct btrfs_root_item *ri;
- struct btrfs_dir_item *di;
- struct btrfs_inode_item *ii;
struct btrfs_block_group_item *bi;
- struct btrfs_file_extent_item *fi;
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
struct btrfs_dev_extent *dev_extent;
struct btrfs_key key;
- struct btrfs_key found_key;
if (!l)
return;
@@ -224,26 +449,35 @@ void btrfs_print_leaf(struct extent_buffer *l)
btrfs_leaf_free_space(l), btrfs_header_owner(l));
print_eb_refs_lock(l);
for (i = 0 ; i < nr ; i++) {
- item = btrfs_item_nr(i);
+ char key_buf[KEY_TYPE_BUF_SIZE];
+
btrfs_item_key_to_cpu(l, &key, i);
type = key.type;
- pr_info("\titem %d key (%llu %u %llu) itemoff %d itemsize %d\n",
- i, key.objectid, type, key.offset,
- btrfs_item_offset(l, item), btrfs_item_size(l, item));
+ key_type_string(&key, key_buf, KEY_TYPE_BUF_SIZE);
+
+ pr_info("\titem %d key (%llu %s %llu) itemoff %d itemsize %d\n",
+ i, key.objectid, key_buf, key.offset,
+ btrfs_item_offset(l, i), btrfs_item_size(l, i));
switch (type) {
case BTRFS_INODE_ITEM_KEY:
- ii = btrfs_item_ptr(l, i, struct btrfs_inode_item);
- pr_info("\t\tinode generation %llu size %llu mode %o\n",
- btrfs_inode_generation(l, ii),
- btrfs_inode_size(l, ii),
- btrfs_inode_mode(l, ii));
+ print_inode_item(l, i);
+ break;
+ case BTRFS_INODE_REF_KEY:
+ print_inode_ref_item(l, i);
+ break;
+ case BTRFS_INODE_EXTREF_KEY:
+ print_inode_extref_item(l, i);
break;
case BTRFS_DIR_ITEM_KEY:
- di = btrfs_item_ptr(l, i, struct btrfs_dir_item);
- btrfs_dir_item_key_to_cpu(l, di, &found_key);
- pr_info("\t\tdir oid %llu type %u\n",
- found_key.objectid,
- btrfs_dir_type(l, di));
+ case BTRFS_DIR_INDEX_KEY:
+ case BTRFS_XATTR_ITEM_KEY:
+ print_dir_item(l, i);
+ break;
+ case BTRFS_DIR_LOG_INDEX_KEY:
+ print_dir_log_index_item(l, i);
+ break;
+ case BTRFS_EXTENT_CSUM_KEY:
+ print_extent_csum(l, i);
break;
case BTRFS_ROOT_ITEM_KEY:
ri = btrfs_item_ptr(l, i, struct btrfs_root_item);
@@ -273,25 +507,7 @@ void btrfs_print_leaf(struct extent_buffer *l)
btrfs_shared_data_ref_count(l, sref));
break;
case BTRFS_EXTENT_DATA_KEY:
- fi = btrfs_item_ptr(l, i,
- struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(l, fi) ==
- BTRFS_FILE_EXTENT_INLINE) {
- pr_info("\t\tinline extent data size %llu\n",
- btrfs_file_extent_ram_bytes(l, fi));
- break;
- }
- pr_info("\t\textent data disk bytenr %llu nr %llu\n",
- btrfs_file_extent_disk_bytenr(l, fi),
- btrfs_file_extent_disk_num_bytes(l, fi));
- pr_info("\t\textent data offset %llu nr %llu ram %llu\n",
- btrfs_file_extent_offset(l, fi),
- btrfs_file_extent_num_bytes(l, fi),
- btrfs_file_extent_ram_bytes(l, fi));
- break;
- case BTRFS_EXTENT_REF_V0_KEY:
- btrfs_print_v0_err(fs_info);
- btrfs_handle_fs_error(fs_info, -EINVAL, NULL);
+ print_file_extent_item(l, i);
break;
case BTRFS_BLOCK_GROUP_ITEM_KEY:
bi = btrfs_item_ptr(l, i,
@@ -347,13 +563,17 @@ void btrfs_print_leaf(struct extent_buffer *l)
case BTRFS_UUID_KEY_SUBVOL:
case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
print_uuid_item(l, btrfs_item_ptr_offset(l, i),
- btrfs_item_size_nr(l, i));
+ btrfs_item_size(l, i));
+ break;
+ case BTRFS_RAID_STRIPE_KEY:
+ print_raid_stripe_key(l, btrfs_item_size(l, i),
+ btrfs_item_ptr(l, i, struct btrfs_stripe_extent));
break;
}
}
}
-void btrfs_print_tree(struct extent_buffer *c, bool follow)
+void btrfs_print_tree(const struct extent_buffer *c, bool follow)
{
struct btrfs_fs_info *fs_info;
int i; u32 nr;
@@ -377,25 +597,26 @@ void btrfs_print_tree(struct extent_buffer *c, bool follow)
print_eb_refs_lock(c);
for (i = 0; i < nr; i++) {
btrfs_node_key_to_cpu(c, &key, i);
- pr_info("\tkey %d (%llu %u %llu) block %llu gen %llu\n",
- i, key.objectid, key.type, key.offset,
- btrfs_node_blockptr(c, i),
- btrfs_node_ptr_generation(c, i));
+ pr_info("\tkey %d " BTRFS_KEY_FMT " block %llu gen %llu\n",
+ i, BTRFS_KEY_FMT_VALUE(&key), btrfs_node_blockptr(c, i),
+ btrfs_node_ptr_generation(c, i));
}
if (!follow)
return;
for (i = 0; i < nr; i++) {
- struct btrfs_key first_key;
+ struct btrfs_tree_parent_check check = {
+ .level = level - 1,
+ .transid = btrfs_node_ptr_generation(c, i),
+ .owner_root = btrfs_header_owner(c),
+ .has_first_key = true
+ };
struct extent_buffer *next;
- btrfs_node_key_to_cpu(c, &first_key, i);
- next = read_tree_block(fs_info, btrfs_node_blockptr(c, i),
- btrfs_header_owner(c),
- btrfs_node_ptr_generation(c, i),
- level - 1, &first_key);
- if (IS_ERR(next)) {
+ btrfs_node_key_to_cpu(c, &check.first_key, i);
+ next = read_tree_block(fs_info, btrfs_node_blockptr(c, i), &check);
+ if (IS_ERR(next))
continue;
- } else if (!extent_buffer_uptodate(next)) {
+ if (!extent_buffer_uptodate(next)) {
free_extent_buffer(next);
continue;
}
diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h
index 8c3e9319ec4e..d0e620bf5f5a 100644
--- a/fs/btrfs/print-tree.h
+++ b/fs/btrfs/print-tree.h
@@ -6,11 +6,16 @@
#ifndef BTRFS_PRINT_TREE_H
#define BTRFS_PRINT_TREE_H
+#include <linux/types.h>
+
/* Buffer size to contain tree name and possibly additional data (offset) */
#define BTRFS_ROOT_NAME_BUF_LEN 48
-void btrfs_print_leaf(struct extent_buffer *l);
-void btrfs_print_tree(struct extent_buffer *c, bool follow);
+struct extent_buffer;
+struct btrfs_key;
+
+void btrfs_print_leaf(const struct extent_buffer *l);
+void btrfs_print_tree(const struct extent_buffer *c, bool follow);
const char *btrfs_root_name(const struct btrfs_key *key, char *buf);
#endif
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index b1cb5a8c2999..adc956432d2f 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -4,12 +4,19 @@
*/
#include <linux/hashtable.h>
+#include <linux/xattr.h>
+#include "messages.h"
#include "props.h"
#include "btrfs_inode.h"
#include "transaction.h"
#include "ctree.h"
#include "xattr.h"
#include "compression.h"
+#include "space-info.h"
+#include "fs.h"
+#include "accessors.h"
+#include "super.h"
+#include "dir-item.h"
#define BTRFS_PROP_HANDLERS_HT_BITS 8
static DEFINE_HASHTABLE(prop_handlers_ht, BTRFS_PROP_HANDLERS_HT_BITS);
@@ -17,9 +24,11 @@ static DEFINE_HASHTABLE(prop_handlers_ht, BTRFS_PROP_HANDLERS_HT_BITS);
struct prop_handler {
struct hlist_node node;
const char *xattr_name;
- int (*validate)(const char *value, size_t len);
- int (*apply)(struct inode *inode, const char *value, size_t len);
- const char *(*extract)(struct inode *inode);
+ int (*validate)(const struct btrfs_inode *inode, const char *value,
+ size_t len);
+ int (*apply)(struct btrfs_inode *inode, const char *value, size_t len);
+ const char *(*extract)(const struct btrfs_inode *inode);
+ bool (*ignore)(const struct btrfs_inode *inode);
int inheritable;
};
@@ -55,7 +64,8 @@ find_prop_handler(const char *name,
return NULL;
}
-int btrfs_validate_prop(const char *name, const char *value, size_t value_len)
+int btrfs_validate_prop(const struct btrfs_inode *inode, const char *name,
+ const char *value, size_t value_len)
{
const struct prop_handler *handler;
@@ -69,10 +79,32 @@ int btrfs_validate_prop(const char *name, const char *value, size_t value_len)
if (value_len == 0)
return 0;
- return handler->validate(value, value_len);
+ return handler->validate(inode, value, value_len);
}
-int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode,
+/*
+ * Check if a property should be ignored (not set) for an inode.
+ *
+ * @inode: The target inode.
+ * @name: The property's name.
+ *
+ * The caller must be sure the given property name is valid, for example by
+ * having previously called btrfs_validate_prop().
+ *
+ * Returns: true if the property should be ignored for the given inode
+ * false if the property must not be ignored for the given inode
+ */
+bool btrfs_ignore_prop(const struct btrfs_inode *inode, const char *name)
+{
+ const struct prop_handler *handler;
+
+ handler = find_prop_handler(name, NULL);
+ ASSERT(handler != NULL);
+
+ return handler->ignore(inode);
+}
+
+int btrfs_set_prop(struct btrfs_trans_handle *trans, struct btrfs_inode *inode,
const char *name, const char *value, size_t value_len,
int flags)
{
@@ -84,7 +116,7 @@ int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode,
return -EINVAL;
if (value_len == 0) {
- ret = btrfs_setxattr(trans, inode, handler->xattr_name,
+ ret = btrfs_setxattr(trans, &inode->vfs_inode, handler->xattr_name,
NULL, 0, flags);
if (ret)
return ret;
@@ -95,18 +127,18 @@ int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode,
return ret;
}
- ret = btrfs_setxattr(trans, inode, handler->xattr_name, value,
+ ret = btrfs_setxattr(trans, &inode->vfs_inode, handler->xattr_name, value,
value_len, flags);
if (ret)
return ret;
ret = handler->apply(inode, value, value_len);
if (ret) {
- btrfs_setxattr(trans, inode, handler->xattr_name, NULL,
+ btrfs_setxattr(trans, &inode->vfs_inode, handler->xattr_name, NULL,
0, flags);
return ret;
}
- set_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(inode)->runtime_flags);
+ set_bit(BTRFS_INODE_HAS_PROPS, &inode->runtime_flags);
return 0;
}
@@ -158,7 +190,7 @@ static int iterate_object_props(struct btrfs_root *root,
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
cur = 0;
- total_len = btrfs_item_size_nr(leaf, slot);
+ total_len = btrfs_item_size(leaf, slot);
while (cur < total_len) {
u32 name_len = btrfs_dir_name_len(leaf, di);
@@ -231,29 +263,31 @@ static void inode_prop_iterator(void *ctx,
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
- ret = handler->apply(inode, value, len);
+ ret = handler->apply(BTRFS_I(inode), value, len);
if (unlikely(ret))
btrfs_warn(root->fs_info,
"error applying prop %s to ino %llu (root %llu): %d",
handler->xattr_name, btrfs_ino(BTRFS_I(inode)),
- root->root_key.objectid, ret);
+ btrfs_root_id(root), ret);
else
set_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(inode)->runtime_flags);
}
-int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path)
+int btrfs_load_inode_props(struct btrfs_inode *inode, struct btrfs_path *path)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 ino = btrfs_ino(BTRFS_I(inode));
- int ret;
-
- ret = iterate_object_props(root, path, ino, inode_prop_iterator, inode);
+ struct btrfs_root *root = inode->root;
+ u64 ino = btrfs_ino(inode);
- return ret;
+ return iterate_object_props(root, path, ino, inode_prop_iterator,
+ &inode->vfs_inode);
}
-static int prop_compression_validate(const char *value, size_t len)
+static int prop_compression_validate(const struct btrfs_inode *inode,
+ const char *value, size_t len)
{
+ if (!btrfs_inode_can_compress(inode))
+ return -EINVAL;
+
if (!value)
return 0;
@@ -267,26 +301,26 @@ static int prop_compression_validate(const char *value, size_t len)
return -EINVAL;
}
-static int prop_compression_apply(struct inode *inode, const char *value,
+static int prop_compression_apply(struct btrfs_inode *inode, const char *value,
size_t len)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
int type;
/* Reset to defaults */
if (len == 0) {
- BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
- BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
- BTRFS_I(inode)->prop_compress = BTRFS_COMPRESS_NONE;
+ inode->flags &= ~BTRFS_INODE_COMPRESS;
+ inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
+ inode->prop_compress = BTRFS_COMPRESS_NONE;
return 0;
}
/* Set NOCOMPRESS flag */
if ((len == 2 && strncmp("no", value, 2) == 0) ||
(len == 4 && strncmp("none", value, 4) == 0)) {
- BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
- BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
- BTRFS_I(inode)->prop_compress = BTRFS_COMPRESS_NONE;
+ inode->flags |= BTRFS_INODE_NOCOMPRESS;
+ inode->flags &= ~BTRFS_INODE_COMPRESS;
+ inode->prop_compress = BTRFS_COMPRESS_NONE;
return 0;
}
@@ -303,20 +337,36 @@ static int prop_compression_apply(struct inode *inode, const char *value,
return -EINVAL;
}
- BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
- BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
- BTRFS_I(inode)->prop_compress = type;
+ inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
+ inode->flags |= BTRFS_INODE_COMPRESS;
+ inode->prop_compress = type;
return 0;
}
-static const char *prop_compression_extract(struct inode *inode)
+static bool prop_compression_ignore(const struct btrfs_inode *inode)
{
- switch (BTRFS_I(inode)->prop_compress) {
+ /*
+ * Compression only has effect for regular files, and for directories
+ * we set it just to propagate it to new files created inside them.
+ * Everything else (symlinks, devices, sockets, fifos) is pointless as
+ * it will do nothing, so don't waste metadata space on a compression
+ * xattr for anything that is neither a file nor a directory.
+ */
+ if (!S_ISREG(inode->vfs_inode.i_mode) &&
+ !S_ISDIR(inode->vfs_inode.i_mode))
+ return true;
+
+ return false;
+}
+
+static const char *prop_compression_extract(const struct btrfs_inode *inode)
+{
+ switch (inode->prop_compress) {
case BTRFS_COMPRESS_ZLIB:
case BTRFS_COMPRESS_LZO:
case BTRFS_COMPRESS_ZSTD:
- return btrfs_compress_type2str(BTRFS_I(inode)->prop_compress);
+ return btrfs_compress_type2str(inode->prop_compress);
default:
break;
}
@@ -330,22 +380,22 @@ static struct prop_handler prop_handlers[] = {
.validate = prop_compression_validate,
.apply = prop_compression_apply,
.extract = prop_compression_extract,
+ .ignore = prop_compression_ignore,
.inheritable = 1
},
};
-static int inherit_props(struct btrfs_trans_handle *trans,
- struct inode *inode,
- struct inode *parent)
+int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode,
+ const struct btrfs_inode *parent)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int i;
bool need_reserve = false;
- if (!test_bit(BTRFS_INODE_HAS_PROPS,
- &BTRFS_I(parent)->runtime_flags))
+ if (!test_bit(BTRFS_INODE_HAS_PROPS, &parent->runtime_flags))
return 0;
for (i = 0; i < ARRAY_SIZE(prop_handlers); i++) {
@@ -356,6 +406,9 @@ static int inherit_props(struct btrfs_trans_handle *trans,
if (!h->inheritable)
continue;
+ if (h->ignore(inode))
+ continue;
+
value = h->extract(parent);
if (!value)
continue;
@@ -364,7 +417,7 @@ static int inherit_props(struct btrfs_trans_handle *trans,
* This is not strictly necessary as the property should be
* valid, but in case it isn't, don't propagate it further.
*/
- ret = h->validate(value, strlen(value));
+ ret = h->validate(inode, value, strlen(value));
if (ret)
continue;
@@ -377,22 +430,22 @@ static int inherit_props(struct btrfs_trans_handle *trans,
*/
if (need_reserve) {
num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
- ret = btrfs_block_rsv_add(root, trans->block_rsv,
- num_bytes, BTRFS_RESERVE_NO_FLUSH);
+ ret = btrfs_block_rsv_add(fs_info, trans->block_rsv,
+ num_bytes,
+ BTRFS_RESERVE_NO_FLUSH);
if (ret)
return ret;
}
- ret = btrfs_setxattr(trans, inode, h->xattr_name, value,
+ ret = btrfs_setxattr(trans, &inode->vfs_inode, h->xattr_name, value,
strlen(value), 0);
if (!ret) {
ret = h->apply(inode, value, strlen(value));
if (ret)
- btrfs_setxattr(trans, inode, h->xattr_name,
+ btrfs_setxattr(trans, &inode->vfs_inode, h->xattr_name,
NULL, 0, 0);
else
- set_bit(BTRFS_INODE_HAS_PROPS,
- &BTRFS_I(inode)->runtime_flags);
+ set_bit(BTRFS_INODE_HAS_PROPS, &inode->runtime_flags);
}
if (need_reserve) {
@@ -407,42 +460,7 @@ static int inherit_props(struct btrfs_trans_handle *trans,
return 0;
}
-int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans,
- struct inode *inode,
- struct inode *dir)
-{
- if (!dir)
- return 0;
-
- return inherit_props(trans, inode, dir);
-}
-
-int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_root *parent_root)
-{
- struct super_block *sb = root->fs_info->sb;
- struct inode *parent_inode, *child_inode;
- int ret;
-
- parent_inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, parent_root);
- if (IS_ERR(parent_inode))
- return PTR_ERR(parent_inode);
-
- child_inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, root);
- if (IS_ERR(child_inode)) {
- iput(parent_inode);
- return PTR_ERR(child_inode);
- }
-
- ret = inherit_props(trans, child_inode, parent_inode);
- iput(child_inode);
- iput(parent_inode);
-
- return ret;
-}
-
-void __init btrfs_props_init(void)
+int __init btrfs_props_init(void)
{
int i;
@@ -452,5 +470,6 @@ void __init btrfs_props_init(void)
hash_add(prop_handlers_ht, &p->node, h);
}
+ return 0;
}
diff --git a/fs/btrfs/props.h b/fs/btrfs/props.h
index 40b2c65b518c..15d9a025c923 100644
--- a/fs/btrfs/props.h
+++ b/fs/btrfs/props.h
@@ -6,23 +6,26 @@
#ifndef BTRFS_PROPS_H
#define BTRFS_PROPS_H
-#include "ctree.h"
+#include <linux/types.h>
+#include <linux/compiler_types.h>
-void __init btrfs_props_init(void);
+struct btrfs_inode;
+struct btrfs_path;
+struct btrfs_trans_handle;
-int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode,
+int __init btrfs_props_init(void);
+
+int btrfs_set_prop(struct btrfs_trans_handle *trans, struct btrfs_inode *inode,
const char *name, const char *value, size_t value_len,
int flags);
-int btrfs_validate_prop(const char *name, const char *value, size_t value_len);
+int btrfs_validate_prop(const struct btrfs_inode *inode, const char *name,
+ const char *value, size_t value_len);
+bool btrfs_ignore_prop(const struct btrfs_inode *inode, const char *name);
-int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path);
+int btrfs_load_inode_props(struct btrfs_inode *inode, struct btrfs_path *path);
int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans,
- struct inode *inode,
- struct inode *dir);
-
-int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_root *parent_root);
+ struct btrfs_inode *inode,
+ const struct btrfs_inode *dir);
#endif
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index db680f5be745..9e2b53e90dcb 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -24,18 +24,30 @@
#include "block-group.h"
#include "sysfs.h"
#include "tree-mod-log.h"
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "root-tree.h"
+#include "tree-checker.h"
-/* TODO XXX FIXME
- * - subvol delete -> delete when ref goes to 0? delete limits also?
- * - reorganize keys
- * - compressed
- * - sync
- * - copy also limits on subvol creation
- * - limit
- * - caches for ulists
- * - performance benchmarks
- * - check all ioctl parameters
- */
+enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info)
+{
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return BTRFS_QGROUP_MODE_DISABLED;
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
+ return BTRFS_QGROUP_MODE_SIMPLE;
+ return BTRFS_QGROUP_MODE_FULL;
+}
+
+bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info)
+{
+ return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
+}
+
+bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info)
+{
+ return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
+}
/*
* Helpers to access qgroup reservation
@@ -71,7 +83,7 @@ static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qgroup, u64 num_bytes,
enum btrfs_qgroup_rsv_type type)
{
- trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
+ trace_btrfs_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
qgroup->rsv.values[type] += num_bytes;
}
@@ -79,7 +91,7 @@ static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qgroup, u64 num_bytes,
enum btrfs_qgroup_rsv_type type)
{
- trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
+ trace_btrfs_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
if (qgroup->rsv.values[type] >= num_bytes) {
qgroup->rsv.values[type] -= num_bytes;
return;
@@ -95,7 +107,7 @@ static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *dest,
- struct btrfs_qgroup *src)
+ const struct btrfs_qgroup *src)
{
int i;
@@ -105,7 +117,7 @@ static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *dest,
- struct btrfs_qgroup *src)
+ const struct btrfs_qgroup *src)
{
int i;
@@ -129,101 +141,89 @@ static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
qg->new_refcnt += mod;
}
-static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
+static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq)
{
if (qg->old_refcnt < seq)
return 0;
return qg->old_refcnt - seq;
}
-static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
+static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq)
{
if (qg->new_refcnt < seq)
return 0;
return qg->new_refcnt - seq;
}
-/*
- * glue structure to represent the relations between qgroups.
- */
-struct btrfs_qgroup_list {
- struct list_head next_group;
- struct list_head next_member;
- struct btrfs_qgroup *group;
- struct btrfs_qgroup *member;
-};
-
-static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
-{
- return (u64)(uintptr_t)qg;
-}
-
-static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
-{
- return (struct btrfs_qgroup *)(uintptr_t)n->aux;
-}
-
static int
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
int init_flags);
static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
+static int btrfs_qgroup_qgroupid_key_cmp(const void *key, const struct rb_node *node)
+{
+ const u64 *qgroupid = key;
+ const struct btrfs_qgroup *qgroup = rb_entry(node, struct btrfs_qgroup, node);
+
+ if (qgroup->qgroupid < *qgroupid)
+ return -1;
+ else if (qgroup->qgroupid > *qgroupid)
+ return 1;
+
+ return 0;
+}
+
/* must be called with qgroup_ioctl_lock held */
-static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
+static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info,
u64 qgroupid)
{
- struct rb_node *n = fs_info->qgroup_tree.rb_node;
- struct btrfs_qgroup *qgroup;
+ struct rb_node *node;
- while (n) {
- qgroup = rb_entry(n, struct btrfs_qgroup, node);
- if (qgroup->qgroupid < qgroupid)
- n = n->rb_left;
- else if (qgroup->qgroupid > qgroupid)
- n = n->rb_right;
- else
- return qgroup;
- }
- return NULL;
+ node = rb_find(&qgroupid, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_key_cmp);
+ return rb_entry_safe(node, struct btrfs_qgroup, node);
}
-/* must be called with qgroup_lock held */
+static int btrfs_qgroup_qgroupid_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ const struct btrfs_qgroup *new_qgroup = rb_entry(new, struct btrfs_qgroup, node);
+
+ return btrfs_qgroup_qgroupid_key_cmp(&new_qgroup->qgroupid, existing);
+}
+
+/*
+ * Add qgroup to the filesystem's qgroup tree.
+ *
+ * Must be called with qgroup_lock held and @prealloc preallocated.
+ *
+ * The control on the lifespan of @prealloc would be transferred to this
+ * function, thus caller should no longer touch @prealloc.
+ */
static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup *prealloc,
u64 qgroupid)
{
- struct rb_node **p = &fs_info->qgroup_tree.rb_node;
- struct rb_node *parent = NULL;
- struct btrfs_qgroup *qgroup;
+ struct rb_node *node;
- while (*p) {
- parent = *p;
- qgroup = rb_entry(parent, struct btrfs_qgroup, node);
+ /* Caller must have pre-allocated @prealloc. */
+ ASSERT(prealloc);
- if (qgroup->qgroupid < qgroupid)
- p = &(*p)->rb_left;
- else if (qgroup->qgroupid > qgroupid)
- p = &(*p)->rb_right;
- else
- return qgroup;
+ prealloc->qgroupid = qgroupid;
+ node = rb_find_add(&prealloc->node, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_cmp);
+ if (node) {
+ kfree(prealloc);
+ return rb_entry(node, struct btrfs_qgroup, node);
}
- qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
- if (!qgroup)
- return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&prealloc->groups);
+ INIT_LIST_HEAD(&prealloc->members);
+ INIT_LIST_HEAD(&prealloc->dirty);
+ INIT_LIST_HEAD(&prealloc->iterator);
+ INIT_LIST_HEAD(&prealloc->nested_iterator);
- qgroup->qgroupid = qgroupid;
- INIT_LIST_HEAD(&qgroup->groups);
- INIT_LIST_HEAD(&qgroup->members);
- INIT_LIST_HEAD(&qgroup->dirty);
-
- rb_link_node(&qgroup->node, parent, p);
- rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
-
- return qgroup;
+ return prealloc;
}
-static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
- struct btrfs_qgroup *qgroup)
+static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
{
struct btrfs_qgroup_list *list;
@@ -254,36 +254,60 @@ static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
return -ENOENT;
rb_erase(&qgroup->node, &fs_info->qgroup_tree);
- __del_qgroup_rb(fs_info, qgroup);
+ __del_qgroup_rb(qgroup);
return 0;
}
-/* must be called with qgroup_lock held */
+/*
+ * Add relation specified by two qgroups.
+ *
+ * Must be called with qgroup_lock held, the ownership of @prealloc is
+ * transferred to this function and caller should not touch it anymore.
+ *
+ * Return: 0 on success
+ * -ENOENT if one of the qgroups is NULL
+ * <0 other errors
+ */
+static int __add_relation_rb(struct btrfs_qgroup_list *prealloc,
+ struct btrfs_qgroup *member,
+ struct btrfs_qgroup *parent)
+{
+ if (!member || !parent) {
+ kfree(prealloc);
+ return -ENOENT;
+ }
+
+ prealloc->group = parent;
+ prealloc->member = member;
+ list_add_tail(&prealloc->next_group, &member->groups);
+ list_add_tail(&prealloc->next_member, &parent->members);
+
+ return 0;
+}
+
+/*
+ * Add relation specified by two qgroup ids.
+ *
+ * Must be called with qgroup_lock held.
+ *
+ * Return: 0 on success
+ * -ENOENT if one of the ids does not exist
+ * <0 other errors
+ */
static int add_relation_rb(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup_list *prealloc,
u64 memberid, u64 parentid)
{
struct btrfs_qgroup *member;
struct btrfs_qgroup *parent;
- struct btrfs_qgroup_list *list;
member = find_qgroup_rb(fs_info, memberid);
parent = find_qgroup_rb(fs_info, parentid);
- if (!member || !parent)
- return -ENOENT;
-
- list = kzalloc(sizeof(*list), GFP_ATOMIC);
- if (!list)
- return -ENOMEM;
-
- list->group = parent;
- list->member = member;
- list_add_tail(&list->next_group, &member->groups);
- list_add_tail(&list->next_member, &parent->members);
- return 0;
+ return __add_relation_rb(prealloc, member, parent);
}
-/* must be called with qgroup_lock held */
+/* Must be called with qgroup_lock held */
static int del_relation_rb(struct btrfs_fs_info *fs_info,
u64 memberid, u64 parentid)
{
@@ -308,7 +332,7 @@ static int del_relation_rb(struct btrfs_fs_info *fs_info,
}
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
+int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
u64 rfer, u64 excl)
{
struct btrfs_qgroup *qgroup;
@@ -322,6 +346,38 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
}
#endif
+__printf(2, 3)
+static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info, const char *fmt, ...)
+{
+ const u64 old_flags = fs_info->qgroup_flags;
+
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+ return;
+ fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
+ BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
+ BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
+ if (!(old_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_warn_rl(fs_info, "qgroup marked inconsistent, %pV", &vaf);
+ va_end(args);
+ }
+}
+
+static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf, int slot,
+ struct btrfs_qgroup_status_item *ptr)
+{
+ ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
+ ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr));
+ fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr);
+}
+
/*
* The full config is read in one go, only called from open_ctree()
* It doesn't use any locking, as at this point we're still single-threaded
@@ -338,15 +394,9 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
u64 flags = 0;
u64 rescan_progress = 0;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!fs_info->quota_root)
return 0;
- fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
- if (!fs_info->qgroup_ulist) {
- ret = -ENOMEM;
- goto out;
- }
-
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
@@ -388,14 +438,11 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
"old qgroup version, quota disabled");
goto out;
}
- if (btrfs_qgroup_status_generation(l, ptr) !=
- fs_info->generation) {
- flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- btrfs_err(fs_info,
- "qgroup generation mismatch, marked as inconsistent");
- }
- fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
- ptr);
+ fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
+ qgroup_read_enable_gen(fs_info, l, slot, ptr);
+ else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation)
+ qgroup_mark_inconsistent(fs_info, "qgroup generation mismatch");
rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
goto next1;
}
@@ -406,16 +453,37 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
qgroup = find_qgroup_rb(fs_info, found_key.offset);
if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
- (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
- btrfs_err(fs_info, "inconsistent qgroup config");
- flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- }
+ (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY))
+ qgroup_mark_inconsistent(fs_info, "inconsistent qgroup config");
if (!qgroup) {
- qgroup = add_qgroup_rb(fs_info, found_key.offset);
- if (IS_ERR(qgroup)) {
- ret = PTR_ERR(qgroup);
+ struct btrfs_qgroup *prealloc;
+ struct btrfs_root *tree_root = fs_info->tree_root;
+
+ prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
+ if (!prealloc) {
+ ret = -ENOMEM;
goto out;
}
+ qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
+ /*
+ * If a qgroup exists for a subvolume ID, it is possible
+ * that subvolume has been deleted, in which case
+ * reusing that ID would lead to incorrect accounting.
+ *
+ * Ensure that we skip any such subvol ids.
+ *
+ * We don't need to lock because this is only called
+ * during mount before we start doing things like creating
+ * subvolumes.
+ */
+ if (btrfs_is_fstree(qgroup->qgroupid) &&
+ qgroup->qgroupid > tree_root->free_objectid)
+ /*
+ * Don't need to check against BTRFS_LAST_FREE_OBJECTID,
+ * as it will get checked on the next call to
+ * btrfs_get_free_objectid.
+ */
+ tree_root->free_objectid = qgroup->qgroupid + 1;
}
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
if (ret < 0)
@@ -466,6 +534,8 @@ next1:
if (ret)
goto out;
while (1) {
+ struct btrfs_qgroup_list *list = NULL;
+
slot = path->slots[0];
l = path->nodes[0];
btrfs_item_key_to_cpu(l, &found_key, slot);
@@ -479,8 +549,14 @@ next1:
goto next2;
}
- ret = add_relation_rb(fs_info, found_key.objectid,
+ list = kzalloc(sizeof(*list), GFP_KERNEL);
+ if (!list) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = add_relation_rb(fs_info, list, found_key.objectid,
found_key.offset);
+ list = NULL;
if (ret == -ENOENT) {
btrfs_warn(fs_info,
"orphan qgroup relation 0x%llx->0x%llx",
@@ -499,15 +575,12 @@ next2:
out:
btrfs_free_path(path);
fs_info->qgroup_flags |= flags;
- if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
- clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
- else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
- ret >= 0)
- ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
-
- if (ret < 0) {
- ulist_free(fs_info->qgroup_ulist);
- fs_info->qgroup_ulist = NULL;
+ if (ret >= 0) {
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
+ ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
+ } else {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
btrfs_sysfs_del_qgroups(fs_info);
}
@@ -522,12 +595,12 @@ out:
* Return false if no reserved space is left.
* Return true if some reserved space is leaked.
*/
-bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
+bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
{
struct rb_node *node;
bool ret = false;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
return ret;
/*
* Since we're unmounting, there is no race and no need to grab qgroup
@@ -555,29 +628,30 @@ bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
/*
* This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
- * first two are in single-threaded paths.And for the third one, we have set
- * quota_root to be null with qgroup_lock held before, so it is safe to clean
- * up the in-memory structures without qgroup_lock held.
+ * first two are in single-threaded paths.
*/
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
{
struct rb_node *n;
struct btrfs_qgroup *qgroup;
+ /*
+ * btrfs_quota_disable() can be called concurrently with
+ * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the
+ * lock.
+ */
+ spin_lock(&fs_info->qgroup_lock);
while ((n = rb_first(&fs_info->qgroup_tree))) {
qgroup = rb_entry(n, struct btrfs_qgroup, node);
rb_erase(n, &fs_info->qgroup_tree);
- __del_qgroup_rb(fs_info, qgroup);
+ __del_qgroup_rb(qgroup);
+ spin_unlock(&fs_info->qgroup_lock);
btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
kfree(qgroup);
+ spin_lock(&fs_info->qgroup_lock);
}
- /*
- * We call btrfs_free_qgroup_config() when unmounting
- * filesystem and disabling quota, so we set qgroup_ulist
- * to be null here to avoid double free.
- */
- ulist_free(fs_info->qgroup_ulist);
- fs_info->qgroup_ulist = NULL;
+ spin_unlock(&fs_info->qgroup_lock);
+
btrfs_sysfs_del_qgroups(fs_info);
}
@@ -586,7 +660,7 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
{
int ret;
struct btrfs_root *quota_root = trans->fs_info->quota_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
path = btrfs_alloc_path();
@@ -598,10 +672,6 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
key.offset = dst;
ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
-
- btrfs_mark_buffer_dirty(path->nodes[0]);
-
- btrfs_free_path(path);
return ret;
}
@@ -610,7 +680,7 @@ static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
{
int ret;
struct btrfs_root *quota_root = trans->fs_info->quota_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
path = btrfs_alloc_path();
@@ -623,24 +693,19 @@ static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
if (ret < 0)
- goto out;
+ return ret;
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ if (ret > 0)
+ return -ENOENT;
- ret = btrfs_del_item(trans, quota_root, path);
-out:
- btrfs_free_path(path);
- return ret;
+ return btrfs_del_item(trans, quota_root, path);
}
static int add_qgroup_item(struct btrfs_trans_handle *trans,
struct btrfs_root *quota_root, u64 qgroupid)
{
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_qgroup_info_item *qgroup_info;
struct btrfs_qgroup_limit_item *qgroup_limit;
struct extent_buffer *leaf;
@@ -666,7 +731,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
sizeof(*qgroup_info));
if (ret && ret != -EEXIST)
- goto out;
+ return ret;
leaf = path->nodes[0];
qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
@@ -677,15 +742,13 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
- btrfs_mark_buffer_dirty(leaf);
-
btrfs_release_path(path);
key.type = BTRFS_QGROUP_LIMIT_KEY;
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
sizeof(*qgroup_limit));
if (ret && ret != -EEXIST)
- goto out;
+ return ret;
leaf = path->nodes[0];
qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
@@ -696,19 +759,14 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
- btrfs_mark_buffer_dirty(leaf);
-
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
{
int ret;
struct btrfs_root *quota_root = trans->fs_info->quota_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
path = btrfs_alloc_path();
@@ -720,33 +778,27 @@ static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
key.offset = qgroupid;
ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
if (ret < 0)
- goto out;
+ return ret;
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ if (ret > 0)
+ return -ENOENT;
ret = btrfs_del_item(trans, quota_root, path);
if (ret)
- goto out;
+ return ret;
btrfs_release_path(path);
key.type = BTRFS_QGROUP_LIMIT_KEY;
ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
if (ret < 0)
- goto out;
+ return ret;
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ if (ret > 0)
+ return -ENOENT;
ret = btrfs_del_item(trans, quota_root, path);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -754,7 +806,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
struct btrfs_qgroup *qgroup)
{
struct btrfs_root *quota_root = trans->fs_info->quota_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *l;
struct btrfs_qgroup_limit_item *qgroup_limit;
@@ -774,7 +826,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
ret = -ENOENT;
if (ret)
- goto out;
+ return ret;
l = path->nodes[0];
slot = path->slots[0];
@@ -785,10 +837,6 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
- btrfs_mark_buffer_dirty(l);
-
-out:
- btrfs_free_path(path);
return ret;
}
@@ -797,7 +845,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root = fs_info->quota_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *l;
struct btrfs_qgroup_info_item *qgroup_info;
@@ -820,7 +868,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
ret = -ENOENT;
if (ret)
- goto out;
+ return ret;
l = path->nodes[0];
slot = path->slots[0];
@@ -831,10 +879,6 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
- btrfs_mark_buffer_dirty(l);
-
-out:
- btrfs_free_path(path);
return ret;
}
@@ -842,7 +886,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root = fs_info->quota_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *l;
struct btrfs_qgroup_status_item *ptr;
@@ -862,20 +906,17 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
ret = -ENOENT;
if (ret)
- goto out;
+ return ret;
l = path->nodes[0];
slot = path->slots[0];
ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
- btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
+ btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags &
+ BTRFS_QGROUP_STATUS_FLAGS_MASK);
btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
btrfs_set_qgroup_status_rescan(l, ptr,
fs_info->qgroup_rescan_progress.objectid);
- btrfs_mark_buffer_dirty(l);
-
-out:
- btrfs_free_path(path);
return ret;
}
@@ -885,7 +926,7 @@ out:
static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *leaf = NULL;
int ret;
@@ -896,13 +937,13 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
return -ENOMEM;
key.objectid = 0;
- key.offset = 0;
key.type = 0;
+ key.offset = 0;
while (1) {
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
- goto out;
+ return ret;
leaf = path->nodes[0];
nr = btrfs_header_nritems(leaf);
if (!nr)
@@ -915,17 +956,16 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
path->slots[0] = 0;
ret = btrfs_del_items(trans, root, path, 0, nr);
if (ret)
- goto out;
+ return ret;
btrfs_release_path(path);
}
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+
+ return 0;
}
-int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
+ struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
{
struct btrfs_root *quota_root;
struct btrfs_root *tree_root = fs_info->tree_root;
@@ -935,21 +975,30 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_qgroup *qgroup = NULL;
+ struct btrfs_qgroup *prealloc = NULL;
struct btrfs_trans_handle *trans = NULL;
- struct ulist *ulist = NULL;
+ const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
int ret = 0;
int slot;
+ /*
+ * We need to have subvol_sem write locked, to prevent races between
+ * concurrent tasks trying to enable quotas, because we will unlock
+ * and relock qgroup_ioctl_lock before setting fs_info->quota_root
+ * and before setting BTRFS_FS_QUOTA_ENABLED.
+ */
+ lockdep_assert_held_write(&fs_info->subvol_sem);
+
+ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+ btrfs_err(fs_info,
+ "qgroups are currently unsupported in extent tree v2");
+ return -EINVAL;
+ }
+
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (fs_info->quota_root)
goto out;
- ulist = ulist_alloc(GFP_KERNEL);
- if (!ulist) {
- ret = -ENOMEM;
- goto out;
- }
-
ret = btrfs_sysfs_add_qgroups(fs_info);
if (ret < 0)
goto out;
@@ -989,9 +1038,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
if (fs_info->quota_root)
goto out;
- fs_info->qgroup_ulist = ulist;
- ulist = NULL;
-
/*
* initially create the quota tree
*/
@@ -1003,7 +1049,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
}
path = btrfs_alloc_path();
- if (!path) {
+ if (unlikely(!path)) {
ret = -ENOMEM;
btrfs_abort_transaction(trans, ret);
goto out_free_root;
@@ -1015,7 +1061,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
sizeof(*ptr));
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -1025,13 +1071,18 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
struct btrfs_qgroup_status_item);
btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
- fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
- BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
+ fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
+ if (simple) {
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
+ btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
+ btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
+ } else {
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ }
+ btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
+ BTRFS_QGROUP_STATUS_FLAGS_MASK);
btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
- btrfs_mark_buffer_dirty(leaf);
-
key.objectid = 0;
key.type = BTRFS_ROOT_REF_KEY;
key.offset = 0;
@@ -1040,7 +1091,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
if (ret > 0)
goto out_add_root;
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -1055,27 +1106,32 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
/* Release locks on tree_root before we access quota_root */
btrfs_release_path(path);
- ret = add_qgroup_item(trans, quota_root,
- found_key.offset);
- if (ret) {
+ /* We should not have a stray @prealloc pointer. */
+ ASSERT(prealloc == NULL);
+ prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+ if (unlikely(!prealloc)) {
+ ret = -ENOMEM;
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
- qgroup = add_qgroup_rb(fs_info, found_key.offset);
- if (IS_ERR(qgroup)) {
- ret = PTR_ERR(qgroup);
+ ret = add_qgroup_item(trans, quota_root,
+ found_key.offset);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
+
+ qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
+ prealloc = NULL;
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
ret = btrfs_search_slot_for_read(tree_root, &found_key,
path, 1, 0);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -1089,7 +1145,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
}
}
ret = btrfs_next_item(tree_root, path);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -1100,25 +1156,40 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
out_add_root:
btrfs_release_path(path);
ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
- qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
- if (IS_ERR(qgroup)) {
- ret = PTR_ERR(qgroup);
- btrfs_abort_transaction(trans, ret);
+ ASSERT(prealloc == NULL);
+ prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+ if (!prealloc) {
+ ret = -ENOMEM;
goto out_free_path;
}
+ qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
+ prealloc = NULL;
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
+ fs_info->qgroup_enable_gen = trans->transid;
+
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ /*
+ * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
+ * a deadlock with tasks concurrently doing other qgroup operations, such
+ * adding/removing qgroups or adding/deleting qgroup relations for example,
+ * because all qgroup operations first start or join a transaction and then
+ * lock the qgroup_ioctl_lock mutex.
+ * We are safe from a concurrent task trying to enable quotas, by calling
+ * this function, since we are serialized by fs_info->subvol_sem.
+ */
ret = btrfs_commit_transaction(trans);
trans = NULL;
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
if (ret)
goto out_free_path;
@@ -1132,12 +1203,31 @@ out_add_root:
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
spin_unlock(&fs_info->qgroup_lock);
+ /* Skip rescan for simple qgroups. */
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+ goto out_free_path;
+
ret = qgroup_rescan_init(fs_info, 0, 1);
if (!ret) {
qgroup_rescan_zero_tracking(fs_info);
fs_info->qgroup_rescan_running = true;
btrfs_queue_work(fs_info->qgroup_rescan_workers,
&fs_info->qgroup_rescan_work);
+ } else {
+ /*
+ * We have set both BTRFS_FS_QUOTA_ENABLED and
+ * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
+ * -EINPROGRESS. That can happen because someone started the
+ * rescan worker by calling quota rescan ioctl before we
+ * attempted to initialize the rescan worker. Failure due to
+ * quotas disabled in the meanwhile is not possible, because
+ * we are holding a write lock on fs_info->subvol_sem, which
+ * is also acquired when disabling quotas.
+ * Ignore such error, and any other error would need to undo
+ * everything we did in the transaction we just committed.
+ */
+ ASSERT(ret == -EINPROGRESS);
+ ret = 0;
}
out_free_path:
@@ -1146,32 +1236,101 @@ out_free_root:
if (ret)
btrfs_put_root(quota_root);
out:
- if (ret) {
- ulist_free(fs_info->qgroup_ulist);
- fs_info->qgroup_ulist = NULL;
+ if (ret)
btrfs_sysfs_del_qgroups(fs_info);
- }
mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (ret && trans)
btrfs_end_transaction(trans);
else if (trans)
ret = btrfs_end_transaction(trans);
- ulist_free(ulist);
+
+ /*
+ * At this point we either failed at allocating prealloc, or we
+ * succeeded and passed the ownership to it to add_qgroup_rb(). In any
+ * case, this needs to be NULL or there is something wrong.
+ */
+ ASSERT(prealloc == NULL);
+
return ret;
}
+/*
+ * It is possible to have outstanding ordered extents which reserved bytes
+ * before we disabled. We need to fully flush delalloc, ordered extents, and a
+ * commit to ensure that we don't leak such reservations, only to have them
+ * come back if we re-enable.
+ *
+ * - enable simple quotas
+ * - reserve space
+ * - release it, store rsv_bytes in OE
+ * - disable quotas
+ * - enable simple quotas (qgroup rsv are all 0)
+ * - OE finishes
+ * - run delayed refs
+ * - free rsv_bytes, resulting in miscounting or even underflow
+ */
+static int flush_reservations(struct btrfs_fs_info *fs_info)
+{
+ int ret;
+
+ ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
+ if (ret)
+ return ret;
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
+
+ return btrfs_commit_current_transaction(fs_info->tree_root);
+}
+
int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
{
- struct btrfs_root *quota_root;
+ struct btrfs_root *quota_root = NULL;
struct btrfs_trans_handle *trans = NULL;
int ret = 0;
+ /*
+ * We need to have subvol_sem write locked to prevent races with
+ * snapshot creation.
+ */
+ lockdep_assert_held_write(&fs_info->subvol_sem);
+
+ /*
+ * Relocation will mess with backrefs, so make sure we have the
+ * cleaner_mutex held to protect us from relocate.
+ */
+ lockdep_assert_held(&fs_info->cleaner_mutex);
+
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
goto out;
+
+ /*
+ * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
+ * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
+ * to lock that mutex while holding a transaction handle and the rescan
+ * worker needs to commit a transaction.
+ */
mutex_unlock(&fs_info->qgroup_ioctl_lock);
/*
+ * Request qgroup rescan worker to complete and wait for it. This wait
+ * must be done before transaction start for quota disable since it may
+ * deadlock with transaction by the qgroup rescan worker.
+ */
+ clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ btrfs_qgroup_wait_for_completion(fs_info, false);
+
+ /*
+ * We have nothing held here and no trans handle, just return the error
+ * if there is one and set back the quota enabled bit since we didn't
+ * actually disable quotas.
+ */
+ ret = flush_reservations(fs_info);
+ if (ret) {
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ return ret;
+ }
+
+ /*
* 1 For the root item
*
* We should also reserve enough items for the quota tree deletion in
@@ -1186,50 +1345,55 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
goto out;
}
if (!fs_info->quota_root)
goto out;
- clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
- btrfs_qgroup_wait_for_completion(fs_info, false);
spin_lock(&fs_info->qgroup_lock);
quota_root = fs_info->quota_root;
fs_info->quota_root = NULL;
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
+ fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
spin_unlock(&fs_info->qgroup_lock);
btrfs_free_qgroup_config(fs_info);
ret = btrfs_clean_quota_tree(trans, quota_root);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = btrfs_del_root(trans, &quota_root->root_key);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
+ spin_lock(&fs_info->trans_lock);
list_del(&quota_root->dirty_list);
+ spin_unlock(&fs_info->trans_lock);
btrfs_tree_lock(quota_root->node);
- btrfs_clean_tree_block(quota_root->node);
+ btrfs_clear_buffer_dirty(trans, quota_root->node);
btrfs_tree_unlock(quota_root->node);
- btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
+ quota_root->node, 0, 1);
- btrfs_put_root(quota_root);
+ if (ret < 0)
+ btrfs_abort_transaction(trans, ret);
out:
+ btrfs_put_root(quota_root);
mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (ret && trans)
btrfs_end_transaction(trans);
else if (trans)
- ret = btrfs_end_transaction(trans);
-
+ ret = btrfs_commit_transaction(trans);
return ret;
}
@@ -1240,6 +1404,24 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
}
+static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
+{
+ if (!list_empty(&qgroup->iterator))
+ return;
+
+ list_add_tail(&qgroup->iterator, head);
+}
+
+static void qgroup_iterator_clean(struct list_head *head)
+{
+ while (!list_empty(head)) {
+ struct btrfs_qgroup *qgroup;
+
+ qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
+ list_del_init(&qgroup->iterator);
+ }
+}
+
/*
* The easy accounting, we're updating qgroup relationship whose child qgroup
* only has exclusive extents.
@@ -1254,68 +1436,44 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
*
* Caller should hold fs_info->qgroup_lock.
*/
-static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
- struct ulist *tmp, u64 ref_root,
+static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
struct btrfs_qgroup *src, int sign)
{
struct btrfs_qgroup *qgroup;
- struct btrfs_qgroup_list *glist;
- struct ulist_node *unode;
- struct ulist_iterator uiter;
+ LIST_HEAD(qgroup_list);
u64 num_bytes = src->excl;
+ u64 num_bytes_cmpr = src->excl_cmpr;
int ret = 0;
qgroup = find_qgroup_rb(fs_info, ref_root);
if (!qgroup)
goto out;
- qgroup->rfer += sign * num_bytes;
- qgroup->rfer_cmpr += sign * num_bytes;
-
- WARN_ON(sign < 0 && qgroup->excl < num_bytes);
- qgroup->excl += sign * num_bytes;
- qgroup->excl_cmpr += sign * num_bytes;
-
- if (sign > 0)
- qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
- else
- qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
-
- qgroup_dirty(fs_info, qgroup);
-
- /* Get all of the parent groups that contain this qgroup */
- list_for_each_entry(glist, &qgroup->groups, next_group) {
- ret = ulist_add(tmp, glist->group->qgroupid,
- qgroup_to_aux(glist->group), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- }
+ qgroup_iterator_add(&qgroup_list, qgroup);
+ list_for_each_entry(qgroup, &qgroup_list, iterator) {
+ struct btrfs_qgroup_list *glist;
- /* Iterate all of the parents and adjust their reference counts */
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(tmp, &uiter))) {
- qgroup = unode_aux_to_qgroup(unode);
qgroup->rfer += sign * num_bytes;
- qgroup->rfer_cmpr += sign * num_bytes;
+ qgroup->rfer_cmpr += sign * num_bytes_cmpr;
+
WARN_ON(sign < 0 && qgroup->excl < num_bytes);
+ WARN_ON(sign < 0 && qgroup->excl_cmpr < num_bytes_cmpr);
qgroup->excl += sign * num_bytes;
+ qgroup->excl_cmpr += sign * num_bytes_cmpr;
+
if (sign > 0)
qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
else
qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
- qgroup->excl_cmpr += sign * num_bytes;
qgroup_dirty(fs_info, qgroup);
- /* Add any parents of the parents */
- list_for_each_entry(glist, &qgroup->groups, next_group) {
- ret = ulist_add(tmp, glist->group->qgroupid,
- qgroup_to_aux(glist->group), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- }
+ /* Append parent qgroups to @qgroup_list. */
+ list_for_each_entry(glist, &qgroup->groups, next_group)
+ qgroup_iterator_add(&qgroup_list, glist->group);
}
ret = 0;
out:
+ qgroup_iterator_clean(&qgroup_list);
return ret;
}
@@ -1332,24 +1490,19 @@ out:
* Return < 0 for other error.
*/
static int quick_update_accounting(struct btrfs_fs_info *fs_info,
- struct ulist *tmp, u64 src, u64 dst,
- int sign)
+ u64 src, u64 dst, int sign)
{
struct btrfs_qgroup *qgroup;
int ret = 1;
- int err = 0;
qgroup = find_qgroup_rb(fs_info, src);
if (!qgroup)
goto out;
if (qgroup->excl == qgroup->rfer) {
- ret = 0;
- err = __qgroup_excl_accounting(fs_info, tmp, dst,
- qgroup, sign);
- if (err < 0) {
- ret = err;
+ ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
+ if (ret < 0)
goto out;
- }
+ ret = 0;
}
out:
if (ret)
@@ -1357,27 +1510,26 @@ out:
return ret;
}
-int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
- u64 dst)
+/*
+ * Add relation between @src and @dst qgroup. The @prealloc is allocated by the
+ * callers and transferred here (either used or freed on error).
+ */
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
+ struct btrfs_qgroup_list *prealloc)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
struct btrfs_qgroup_list *list;
- struct ulist *tmp;
- unsigned int nofs_flag;
int ret = 0;
+ ASSERT(prealloc);
+
/* Check the level of src and dst first */
- if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
+ if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) {
+ kfree(prealloc);
return -EINVAL;
-
- /* We hold a transaction handle open, must do a NOFS allocation. */
- nofs_flag = memalloc_nofs_save();
- tmp = ulist_alloc(GFP_KERNEL);
- memalloc_nofs_restore(nofs_flag);
- if (!tmp)
- return -ENOMEM;
+ }
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root) {
@@ -1410,16 +1562,17 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
}
spin_lock(&fs_info->qgroup_lock);
- ret = add_relation_rb(fs_info, src, dst);
+ ret = __add_relation_rb(prealloc, member, parent);
+ prealloc = NULL;
if (ret < 0) {
spin_unlock(&fs_info->qgroup_lock);
goto out;
}
- ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
+ ret = quick_update_accounting(fs_info, src, dst, 1);
spin_unlock(&fs_info->qgroup_lock);
out:
+ kfree(prealloc);
mutex_unlock(&fs_info->qgroup_ioctl_lock);
- ulist_free(tmp);
return ret;
}
@@ -1430,19 +1583,10 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
struct btrfs_qgroup_list *list;
- struct ulist *tmp;
bool found = false;
- unsigned int nofs_flag;
int ret = 0;
int ret2;
- /* We hold a transaction handle open, must do a NOFS allocation. */
- nofs_flag = memalloc_nofs_save();
- tmp = ulist_alloc(GFP_KERNEL);
- memalloc_nofs_restore(nofs_flag);
- if (!tmp)
- return -ENOMEM;
-
if (!fs_info->quota_root) {
ret = -ENOTCONN;
goto out;
@@ -1480,11 +1624,10 @@ delete_item:
if (found) {
spin_lock(&fs_info->qgroup_lock);
del_relation_rb(fs_info, src, dst);
- ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
+ ret = quick_update_accounting(fs_info, src, dst, -1);
spin_unlock(&fs_info->qgroup_lock);
}
out:
- ulist_free(tmp);
return ret;
}
@@ -1506,6 +1649,7 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
+ struct btrfs_qgroup *prealloc = NULL;
int ret = 0;
mutex_lock(&fs_info->qgroup_ioctl_lock);
@@ -1520,24 +1664,81 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
goto out;
}
+ prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+ if (!prealloc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
ret = add_qgroup_item(trans, quota_root, qgroupid);
if (ret)
goto out;
spin_lock(&fs_info->qgroup_lock);
- qgroup = add_qgroup_rb(fs_info, qgroupid);
+ qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
spin_unlock(&fs_info->qgroup_lock);
+ prealloc = NULL;
- if (IS_ERR(qgroup)) {
- ret = PTR_ERR(qgroup);
- goto out;
- }
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ /*
+ * At this point we either failed at allocating prealloc, or we
+ * succeeded and passed the ownership to it to add_qgroup_rb(). In any
+ * case, this needs to be NULL or there is something wrong.
+ */
+ ASSERT(prealloc == NULL);
return ret;
}
+/*
+ * Return 0 if we can not delete the qgroup (not empty or has children etc).
+ * Return >0 if we can delete the qgroup.
+ * Return <0 for other errors during tree search.
+ */
+static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
+{
+ struct btrfs_key key;
+ BTRFS_PATH_AUTO_FREE(path);
+
+ /*
+ * Squota would never be inconsistent, but there can still be case
+ * where a dropped subvolume still has qgroup numbers, and squota
+ * relies on such qgroup for future accounting.
+ *
+ * So for squota, do not allow dropping any non-zero qgroup.
+ */
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
+ (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr))
+ return 0;
+
+ /* For higher level qgroup, we can only delete it if it has no child. */
+ if (btrfs_qgroup_level(qgroup->qgroupid)) {
+ if (!list_empty(&qgroup->members))
+ return 0;
+ return 1;
+ }
+
+ /*
+ * For level-0 qgroups, we can only delete it if it has no subvolume
+ * for it.
+ * This means even a subvolume is unlinked but not yet fully dropped,
+ * we can not delete the qgroup.
+ */
+ key.objectid = qgroup->qgroupid;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = -1ULL;
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ /*
+ * The @ret from btrfs_find_root() exactly matches our definition for
+ * the return value, thus can be returned directly.
+ */
+ return btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
+}
+
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -1557,6 +1758,14 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
goto out;
}
+ ret = can_delete_qgroup(fs_info, qgroup);
+ if (ret < 0)
+ goto out;
+ if (ret == 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
/* Check if there are no children of this qgroup */
if (!list_empty(&qgroup->members)) {
ret = -EBUSY;
@@ -1577,6 +1786,44 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
}
spin_lock(&fs_info->qgroup_lock);
+ /*
+ * Warn on reserved space. The subvolume should has no child nor
+ * corresponding subvolume.
+ * Thus its reserved space should all be zero, no matter if qgroup
+ * is consistent or the mode.
+ */
+ if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) {
+ DEBUG_WARN();
+ btrfs_warn_rl(fs_info,
+"to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu",
+ btrfs_qgroup_level(qgroup->qgroupid),
+ btrfs_qgroup_subvolid(qgroup->qgroupid),
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA],
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC],
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
+
+ }
+ /*
+ * The same for rfer/excl numbers, but that's only if our qgroup is
+ * consistent and if it's in regular qgroup mode.
+ * For simple mode it's not as accurate thus we can hit non-zero values
+ * very frequently.
+ */
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
+ !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
+ if (qgroup->rfer || qgroup->excl ||
+ qgroup->rfer_cmpr || qgroup->excl_cmpr) {
+ DEBUG_WARN();
+ qgroup_mark_inconsistent(fs_info,
+ "to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
+ btrfs_qgroup_level(qgroup->qgroupid),
+ btrfs_qgroup_subvolid(qgroup->qgroupid),
+ qgroup->rfer, qgroup->rfer_cmpr,
+ qgroup->excl, qgroup->excl_cmpr);
+ }
+ }
del_qgroup_rb(fs_info, qgroupid);
spin_unlock(&fs_info->qgroup_lock);
@@ -1592,6 +1839,41 @@ out:
return ret;
}
+int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid)
+{
+ struct btrfs_trans_handle *trans;
+ int ret;
+
+ if (!btrfs_is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) ||
+ !fs_info->quota_root)
+ return 0;
+
+ /*
+ * Commit current transaction to make sure all the rfer/excl numbers
+ * get updated.
+ */
+ ret = btrfs_commit_current_transaction(fs_info->quota_root);
+ if (ret < 0)
+ return ret;
+
+ /* Start new trans to delete the qgroup info and limit items. */
+ trans = btrfs_start_transaction(fs_info->quota_root, 2);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ ret = btrfs_remove_qgroup(trans, subvolid);
+ btrfs_end_transaction(trans);
+ /*
+ * It's squota and the subvolume still has numbers needed for future
+ * accounting, in this case we can not delete it. Just skip it.
+ *
+ * Or the qgroup is already removed by a qgroup rescan. For both cases we're
+ * safe to ignore them.
+ */
+ if (ret == -EBUSY || ret == -ENOENT)
+ ret = 0;
+ return ret;
+}
+
int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
struct btrfs_qgroup_limit *limit)
{
@@ -1658,59 +1940,103 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
spin_unlock(&fs_info->qgroup_lock);
ret = update_qgroup_limit_item(trans, qgroup);
- if (ret) {
- fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- btrfs_info(fs_info, "unable to update quota limit for %llu",
- qgroupid);
- }
+ if (ret)
+ qgroup_mark_inconsistent(fs_info, "qgroup item update error %d", ret);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
+/*
+ * Inform qgroup to trace one dirty extent, its info is recorded in @record.
+ * So qgroup can account it at transaction committing time.
+ *
+ * No lock version, caller must acquire delayed ref lock and allocated memory,
+ * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
+ *
+ * Return 0 for success insert
+ * Return >0 for existing record, caller can free @record safely.
+ * Return <0 for insertion failure, caller can free @record safely.
+ */
int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_qgroup_extent_record *record)
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_qgroup_extent_record *record,
+ u64 bytenr)
{
- struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
- struct rb_node *parent_node = NULL;
- struct btrfs_qgroup_extent_record *entry;
- u64 bytenr = record->bytenr;
-
- lockdep_assert_held(&delayed_refs->lock);
- trace_btrfs_qgroup_trace_extent(fs_info, record);
-
- while (*p) {
- parent_node = *p;
- entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
- node);
- if (bytenr < entry->bytenr) {
- p = &(*p)->rb_left;
- } else if (bytenr > entry->bytenr) {
- p = &(*p)->rb_right;
- } else {
- if (record->data_rsv && !entry->data_rsv) {
- entry->data_rsv = record->data_rsv;
- entry->data_rsv_refroot =
- record->data_rsv_refroot;
- }
- return 1;
+ struct btrfs_qgroup_extent_record *existing, *ret;
+ const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
+
+ if (!btrfs_qgroup_full_accounting(fs_info))
+ return 1;
+
+#if BITS_PER_LONG == 32
+ if (bytenr >= MAX_LFS_FILESIZE) {
+ btrfs_err_rl(fs_info,
+"qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit",
+ bytenr);
+ btrfs_err_32bit_limit(fs_info);
+ return -EOVERFLOW;
+ }
+#endif
+
+ trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr);
+
+ xa_lock(&delayed_refs->dirty_extents);
+ existing = xa_load(&delayed_refs->dirty_extents, index);
+ if (existing) {
+ if (record->data_rsv && !existing->data_rsv) {
+ existing->data_rsv = record->data_rsv;
+ existing->data_rsv_refroot = record->data_rsv_refroot;
}
+ xa_unlock(&delayed_refs->dirty_extents);
+ return 1;
+ }
+
+ ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC);
+ xa_unlock(&delayed_refs->dirty_extents);
+ if (xa_is_err(ret)) {
+ qgroup_mark_inconsistent(fs_info, "xarray insert error: %d", xa_err(ret));
+ return xa_err(ret);
}
- rb_link_node(&record->node, parent_node, p);
- rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
return 0;
}
+/*
+ * Post handler after qgroup_trace_extent_nolock().
+ *
+ * NOTE: Current qgroup does the expensive backref walk at transaction
+ * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
+ * new transaction.
+ * This is designed to allow btrfs_find_all_roots() to get correct new_roots
+ * result.
+ *
+ * However for old_roots there is no need to do backref walk at that time,
+ * since we search commit roots to walk backref and result will always be
+ * correct.
+ *
+ * Due to the nature of no lock version, we can't do backref there.
+ * So we must call btrfs_qgroup_trace_extent_post() after exiting
+ * spinlock context.
+ *
+ * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
+ * using current root, then we can move all expensive backref walk out of
+ * transaction committing, but not now as qgroup accounting will be wrong again.
+ */
int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
- struct btrfs_qgroup_extent_record *qrecord)
+ struct btrfs_qgroup_extent_record *qrecord,
+ u64 bytenr)
{
- struct ulist *old_root;
- u64 bytenr = qrecord->bytenr;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_backref_walk_ctx ctx = {
+ .bytenr = bytenr,
+ .fs_info = fs_info,
+ };
int ret;
+ if (!btrfs_qgroup_full_accounting(fs_info))
+ return 0;
/*
* We are always called in a context where we are already holding a
* transaction handle. Often we are called when adding a data delayed
@@ -1732,13 +2058,13 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
*/
ASSERT(trans != NULL);
- ret = btrfs_find_all_roots(NULL, trans->fs_info, bytenr, 0, &old_root,
- true);
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
+ return 0;
+
+ ret = btrfs_find_all_roots(&ctx, true);
if (ret < 0) {
- trans->fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- btrfs_warn(trans->fs_info,
-"error accounting new delayed refs extent (err code: %d), quota inconsistent",
- ret);
+ qgroup_mark_inconsistent(fs_info,
+ "error accounting new delayed refs extent: %d", ret);
return 0;
}
@@ -1749,40 +2075,61 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
*
* So modifying qrecord->old_roots is safe here
*/
- qrecord->old_roots = old_root;
+ qrecord->old_roots = ctx.roots;
return 0;
}
+/*
+ * Inform qgroup to trace one dirty extent, specified by @bytenr and
+ * @num_bytes.
+ * So qgroup can account it at commit trans time.
+ *
+ * Better encapsulated version, with memory allocation and backref walk for
+ * commit roots.
+ * So this can sleep.
+ *
+ * Return 0 if the operation is done.
+ * Return <0 for error, like memory allocation failure or invalid parameter
+ * (NULL trans)
+ */
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
- u64 num_bytes, gfp_t gfp_flag)
+ u64 num_bytes)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_qgroup_extent_record *record;
- struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs;
+ const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
int ret;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
- || bytenr == 0 || num_bytes == 0)
+ if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
return 0;
- record = kzalloc(sizeof(*record), gfp_flag);
+ record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record)
return -ENOMEM;
- delayed_refs = &trans->transaction->delayed_refs;
- record->bytenr = bytenr;
+ if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
+ kfree(record);
+ return -ENOMEM;
+ }
+
record->num_bytes = num_bytes;
- record->old_roots = NULL;
- spin_lock(&delayed_refs->lock);
- ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
- spin_unlock(&delayed_refs->lock);
- if (ret > 0) {
+ ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr);
+ if (ret) {
+ /* Clean up if insertion fails or item exists. */
+ xa_release(&delayed_refs->dirty_extents, index);
kfree(record);
return 0;
}
- return btrfs_qgroup_trace_extent_post(trans, record);
+ return btrfs_qgroup_trace_extent_post(trans, record, bytenr);
}
+/*
+ * Inform qgroup to trace all leaf items of data
+ *
+ * Return 0 for success
+ * Return <0 for error(ENOMEM)
+ */
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
struct extent_buffer *eb)
{
@@ -1794,7 +2141,7 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
u64 bytenr, num_bytes;
/* We can be called directly from walk_up_proc() */
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
for (i = 0; i < nr; i++) {
@@ -1816,8 +2163,7 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
- ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes,
- GFP_NOFS);
+ ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes);
if (ret)
return ret;
}
@@ -1939,7 +2285,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
bool trace_leaf)
{
struct btrfs_key key;
- struct btrfs_path *src_path;
+ BTRFS_PATH_AUTO_FREE(src_path);
struct btrfs_fs_info *fs_info = trans->fs_info;
u32 nodesize = fs_info->nodesize;
int cur_level = root_level;
@@ -1951,10 +2297,8 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
return -EINVAL;
src_path = btrfs_alloc_path();
- if (!src_path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!src_path)
+ return -ENOMEM;
if (dst_level)
btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
@@ -1962,7 +2306,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
/* For src_path */
- atomic_inc(&src_eb->refs);
+ refcount_inc(&src_eb->refs);
src_path->nodes[root_level] = src_eb;
src_path->slots[root_level] = dst_path->slots[root_level];
src_path->locks[root_level] = 0;
@@ -1980,10 +2324,8 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
parent_slot = src_path->slots[cur_level + 1];
eb = btrfs_read_node_slot(eb, parent_slot);
- if (IS_ERR(eb)) {
- ret = PTR_ERR(eb);
- goto out;
- }
+ if (IS_ERR(eb))
+ return PTR_ERR(eb);
src_path->nodes[cur_level] = eb;
@@ -2004,10 +2346,8 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
&src_key, src_path->slots[cur_level]);
}
/* Content mismatch, something went wrong */
- if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
- ret = -ENOENT;
- goto out;
- }
+ if (btrfs_comp_cpu_keys(&dst_key, &src_key))
+ return -ENOENT;
cur_level--;
}
@@ -2016,24 +2356,22 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
* blocks for qgroup accounting.
*/
ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
- nodesize, GFP_NOFS);
+ nodesize);
if (ret < 0)
- goto out;
- ret = btrfs_qgroup_trace_extent(trans,
- dst_path->nodes[dst_level]->start,
- nodesize, GFP_NOFS);
+ return ret;
+ ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start,
+ nodesize);
if (ret < 0)
- goto out;
+ return ret;
/* Record leaf file extents */
if (dst_level == 0 && trace_leaf) {
ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
if (ret < 0)
- goto out;
+ return ret;
ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
}
-out:
- btrfs_free_path(src_path);
+
return ret;
}
@@ -2072,9 +2410,9 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
int i;
/* Level sanity check */
- if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
- root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
- root_level < cur_level) {
+ if (unlikely(cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
+ root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
+ root_level < cur_level)) {
btrfs_err_rl(fs_info,
"%s: bad levels, cur_level=%d root_level=%d",
__func__, cur_level, root_level);
@@ -2090,7 +2428,7 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
* dst_path->nodes[root_level] must be initialized before
* calling this function.
*/
- if (cur_level == root_level) {
+ if (unlikely(cur_level == root_level)) {
btrfs_err_rl(fs_info,
"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
__func__, root_level, root_level, cur_level);
@@ -2172,11 +2510,11 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
int level;
int ret;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
/* Wrong parameter order */
- if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
+ if (unlikely(btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb))) {
btrfs_err_rl(fs_info,
"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
btrfs_header_generation(src_eb),
@@ -2184,7 +2522,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
return -EUCLEAN;
}
- if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
+ if (unlikely(!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb))) {
ret = -EIO;
goto out;
}
@@ -2196,7 +2534,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
goto out;
}
/* For dst_path */
- atomic_inc(&dst_eb->refs);
+ refcount_inc(&dst_eb->refs);
dst_path->nodes[level] = dst_eb;
dst_path->slots[level] = 0;
dst_path->locks[level] = 0;
@@ -2211,10 +2549,20 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
out:
btrfs_free_path(dst_path);
if (ret < 0)
- fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret);
return ret;
}
+/*
+ * Inform qgroup to trace a whole subtree, including all its child tree
+ * blocks and data.
+ * The root tree block is specified by @root_eb.
+ *
+ * Normally used by relocation(tree block swap) and subvolume deletion.
+ *
+ * Return 0 for success
+ * Return <0 for error(ENOMEM or tree search error)
+ */
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb,
u64 root_gen, int root_level)
@@ -2222,24 +2570,47 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = trans->fs_info;
int ret = 0;
int level;
+ u8 drop_subptree_thres;
struct extent_buffer *eb = root_eb;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
- BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
- BUG_ON(root_eb == NULL);
+ ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL);
+ ASSERT(root_eb != NULL);
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
+ spin_lock(&fs_info->qgroup_lock);
+ drop_subptree_thres = fs_info->qgroup_drop_subtree_thres;
+ spin_unlock(&fs_info->qgroup_lock);
+
+ /*
+ * This function only gets called for snapshot drop, if we hit a high
+ * node here, it means we are going to change ownership for quite a lot
+ * of extents, which will greatly slow down btrfs_commit_transaction().
+ *
+ * So here if we find a high tree here, we just skip the accounting and
+ * mark qgroup inconsistent.
+ */
+ if (root_level >= drop_subptree_thres) {
+ qgroup_mark_inconsistent(fs_info, "subtree level reached threshold");
+ return 0;
+ }
+
if (!extent_buffer_uptodate(root_eb)) {
- ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL);
+ struct btrfs_tree_parent_check check = {
+ .transid = root_gen,
+ .level = root_level
+ };
+
+ ret = btrfs_read_extent_buffer(root_eb, &check);
if (ret)
- goto out;
+ return ret;
}
if (root_level == 0) {
ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
- goto out;
+ return ret;
}
path = btrfs_alloc_path();
@@ -2255,7 +2626,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
* walk back up the tree (adjusting slot pointers as we go)
* and restart the search process.
*/
- atomic_inc(&root_eb->refs); /* For path */
+ refcount_inc(&root_eb->refs); /* For path */
path->nodes[root_level] = root_eb;
path->slots[root_level] = 0;
path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
@@ -2275,10 +2646,8 @@ walk_down:
child_bytenr = btrfs_node_blockptr(eb, parent_slot);
eb = btrfs_read_node_slot(eb, parent_slot);
- if (IS_ERR(eb)) {
- ret = PTR_ERR(eb);
- goto out;
- }
+ if (IS_ERR(eb))
+ return PTR_ERR(eb);
path->nodes[level] = eb;
path->slots[level] = 0;
@@ -2287,17 +2656,16 @@ walk_down:
path->locks[level] = BTRFS_READ_LOCK;
ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
- fs_info->nodesize,
- GFP_NOFS);
+ fs_info->nodesize);
if (ret)
- goto out;
+ return ret;
}
if (level == 0) {
ret = btrfs_qgroup_trace_leaf_items(trans,
path->nodes[level]);
if (ret)
- goto out;
+ return ret;
/* Nonzero return here means we completed our search */
ret = adjust_slots_upwards(path, root_level);
@@ -2311,11 +2679,25 @@ walk_down:
level--;
}
- ret = 0;
-out:
- btrfs_free_path(path);
+ return 0;
+}
- return ret;
+static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
+{
+ if (!list_empty(&qgroup->nested_iterator))
+ return;
+
+ list_add_tail(&qgroup->nested_iterator, head);
+}
+
+static void qgroup_iterator_nested_clean(struct list_head *head)
+{
+ while (!list_empty(head)) {
+ struct btrfs_qgroup *qgroup;
+
+ qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator);
+ list_del_init(&qgroup->nested_iterator);
+ }
}
#define UPDATE_NEW 0
@@ -2323,57 +2705,41 @@ out:
/*
* Walk all of the roots that points to the bytenr and adjust their refcnts.
*/
-static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
- struct ulist *roots, struct ulist *tmp,
- struct ulist *qgroups, u64 seq, int update_old)
+static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
+ struct ulist *roots, struct list_head *qgroups,
+ u64 seq, bool update_old)
{
struct ulist_node *unode;
struct ulist_iterator uiter;
- struct ulist_node *tmp_unode;
- struct ulist_iterator tmp_uiter;
struct btrfs_qgroup *qg;
- int ret = 0;
if (!roots)
- return 0;
+ return;
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(roots, &uiter))) {
+ LIST_HEAD(tmp);
+
qg = find_qgroup_rb(fs_info, unode->val);
if (!qg)
continue;
- ulist_reinit(tmp);
- ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
- GFP_ATOMIC);
- if (ret < 0)
- return ret;
- ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
- if (ret < 0)
- return ret;
- ULIST_ITER_INIT(&tmp_uiter);
- while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
+ qgroup_iterator_nested_add(qgroups, qg);
+ qgroup_iterator_add(&tmp, qg);
+ list_for_each_entry(qg, &tmp, iterator) {
struct btrfs_qgroup_list *glist;
- qg = unode_aux_to_qgroup(tmp_unode);
if (update_old)
btrfs_qgroup_update_old_refcnt(qg, seq, 1);
else
btrfs_qgroup_update_new_refcnt(qg, seq, 1);
+
list_for_each_entry(glist, &qg->groups, next_group) {
- ret = ulist_add(qgroups, glist->group->qgroupid,
- qgroup_to_aux(glist->group),
- GFP_ATOMIC);
- if (ret < 0)
- return ret;
- ret = ulist_add(tmp, glist->group->qgroupid,
- qgroup_to_aux(glist->group),
- GFP_ATOMIC);
- if (ret < 0)
- return ret;
+ qgroup_iterator_nested_add(qgroups, glist->group);
+ qgroup_iterator_add(&tmp, glist->group);
}
}
+ qgroup_iterator_clean(&tmp);
}
- return 0;
}
/*
@@ -2412,27 +2778,21 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
* But this time we don't need to consider other things, the codes and logic
* is easy to understand now.
*/
-static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
- struct ulist *qgroups,
- u64 nr_old_roots,
- u64 nr_new_roots,
- u64 num_bytes, u64 seq)
+static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
+ struct list_head *qgroups, u64 nr_old_roots,
+ u64 nr_new_roots, u64 num_bytes, u64 seq)
{
- struct ulist_node *unode;
- struct ulist_iterator uiter;
struct btrfs_qgroup *qg;
- u64 cur_new_count, cur_old_count;
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(qgroups, &uiter))) {
+ list_for_each_entry(qg, qgroups, nested_iterator) {
+ u64 cur_new_count, cur_old_count;
bool dirty = false;
- qg = unode_aux_to_qgroup(unode);
cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
- trace_qgroup_update_counters(fs_info, qg, cur_old_count,
- cur_new_count);
+ trace_btrfs_qgroup_update_counters(fs_info, qg, cur_old_count,
+ cur_new_count);
/* Rfer update part */
if (cur_old_count == 0 && cur_new_count > 0) {
@@ -2498,7 +2858,6 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
if (dirty)
qgroup_dirty(fs_info, qg);
}
- return 0;
}
/*
@@ -2527,7 +2886,7 @@ static int maybe_fs_roots(struct ulist *roots)
* trees.
* If it contains a non-fs tree, it won't be shared with fs/subvol trees.
*/
- return is_fstree(unode->val);
+ return btrfs_is_fstree(unode->val);
}
int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
@@ -2535,8 +2894,7 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
struct ulist *new_roots)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct ulist *qgroups = NULL;
- struct ulist *tmp = NULL;
+ LIST_HEAD(qgroups);
u64 seq;
u64 nr_new_roots = 0;
u64 nr_old_roots = 0;
@@ -2546,7 +2904,8 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
* If quotas get disabled meanwhile, the resources need to be freed and
* we can't just exit here.
*/
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info) ||
+ fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
goto out_free;
if (new_roots) {
@@ -2564,22 +2923,9 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
if (nr_old_roots == 0 && nr_new_roots == 0)
goto out_free;
- BUG_ON(!fs_info->quota_root);
-
trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
num_bytes, nr_old_roots, nr_new_roots);
- qgroups = ulist_alloc(GFP_NOFS);
- if (!qgroups) {
- ret = -ENOMEM;
- goto out_free;
- }
- tmp = ulist_alloc(GFP_NOFS);
- if (!tmp) {
- ret = -ENOMEM;
- goto out_free;
- }
-
mutex_lock(&fs_info->qgroup_rescan_lock);
if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
@@ -2594,29 +2940,27 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
seq = fs_info->qgroup_seq;
/* Update old refcnts using old_roots */
- ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
- UPDATE_OLD);
- if (ret < 0)
- goto out;
+ qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD);
/* Update new refcnts using new_roots */
- ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
- UPDATE_NEW);
- if (ret < 0)
- goto out;
+ qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW);
- qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
+ qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots,
num_bytes, seq);
/*
+ * We're done using the iterator, release all its qgroups while holding
+ * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup()
+ * and trigger use-after-free accesses to qgroups.
+ */
+ qgroup_iterator_nested_clean(&qgroups);
+
+ /*
* Bump qgroup_seq to avoid seq overlap
*/
fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
-out:
spin_unlock(&fs_info->qgroup_lock);
out_free:
- ulist_free(tmp);
- ulist_free(qgroups);
ulist_free(old_roots);
ulist_free(new_roots);
return ret;
@@ -2628,81 +2972,109 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
struct btrfs_qgroup_extent_record *record;
struct btrfs_delayed_ref_root *delayed_refs;
struct ulist *new_roots = NULL;
- struct rb_node *node;
+ unsigned long index;
u64 num_dirty_extents = 0;
u64 qgroup_to_skip;
int ret = 0;
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+ return 0;
+
delayed_refs = &trans->transaction->delayed_refs;
qgroup_to_skip = delayed_refs->qgroup_to_skip;
- while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
- record = rb_entry(node, struct btrfs_qgroup_extent_record,
- node);
+ xa_for_each(&delayed_refs->dirty_extents, index, record) {
+ const u64 bytenr = (((u64)index) << fs_info->sectorsize_bits);
num_dirty_extents++;
- trace_btrfs_qgroup_account_extents(fs_info, record);
+ trace_btrfs_qgroup_account_extents(fs_info, record, bytenr);
+
+ if (!ret && !(fs_info->qgroup_flags &
+ BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
+ struct btrfs_backref_walk_ctx ctx = { 0 };
+
+ ctx.bytenr = bytenr;
+ ctx.fs_info = fs_info;
- if (!ret) {
/*
* Old roots should be searched when inserting qgroup
- * extent record
+ * extent record.
+ *
+ * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
+ * we may have some record inserted during
+ * NO_ACCOUNTING (thus no old_roots populated), but
+ * later we start rescan, which clears NO_ACCOUNTING,
+ * leaving some inserted records without old_roots
+ * populated.
+ *
+ * Those cases are rare and should not cause too much
+ * time spent during commit_transaction().
*/
- if (WARN_ON(!record->old_roots)) {
+ if (!record->old_roots) {
/* Search commit root to find old_roots */
- ret = btrfs_find_all_roots(NULL, fs_info,
- record->bytenr, 0,
- &record->old_roots, false);
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret < 0)
goto cleanup;
+ record->old_roots = ctx.roots;
+ ctx.roots = NULL;
}
- /* Free the reserved data space */
- btrfs_qgroup_free_refroot(fs_info,
- record->data_rsv_refroot,
- record->data_rsv,
- BTRFS_QGROUP_RSV_DATA);
/*
* Use BTRFS_SEQ_LAST as time_seq to do special search,
* which doesn't lock tree or delayed_refs and search
* current root. It's safe inside commit_transaction().
*/
- ret = btrfs_find_all_roots(trans, fs_info,
- record->bytenr, BTRFS_SEQ_LAST, &new_roots, false);
+ ctx.trans = trans;
+ ctx.time_seq = BTRFS_SEQ_LAST;
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret < 0)
goto cleanup;
+ new_roots = ctx.roots;
if (qgroup_to_skip) {
ulist_del(new_roots, qgroup_to_skip, 0);
ulist_del(record->old_roots, qgroup_to_skip,
0);
}
- ret = btrfs_qgroup_account_extent(trans, record->bytenr,
+ ret = btrfs_qgroup_account_extent(trans, bytenr,
record->num_bytes,
record->old_roots,
new_roots);
record->old_roots = NULL;
new_roots = NULL;
}
+ /* Free the reserved data space */
+ btrfs_qgroup_free_refroot(fs_info,
+ record->data_rsv_refroot,
+ record->data_rsv,
+ BTRFS_QGROUP_RSV_DATA);
cleanup:
ulist_free(record->old_roots);
ulist_free(new_roots);
new_roots = NULL;
- rb_erase(node, &delayed_refs->dirty_extent_root);
+ xa_erase(&delayed_refs->dirty_extents, index);
kfree(record);
}
- trace_qgroup_num_dirty_extents(fs_info, trans->transid,
- num_dirty_extents);
+ trace_btrfs_qgroup_num_dirty_extents(fs_info, trans->transid, num_dirty_extents);
return ret;
}
/*
- * called from commit_transaction. Writes all changed qgroups to disk.
+ * Writes all changed qgroups to disk.
+ * Called by the transaction commit path and the qgroup assign ioctl.
*/
int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
int ret = 0;
+ /*
+ * In case we are called from the qgroup assign ioctl, assert that we
+ * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
+ * disable operation (ioctl) and access a freed quota root.
+ */
+ if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
+ lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
+
if (!fs_info->quota_root)
return ret;
@@ -2715,15 +3087,15 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
spin_unlock(&fs_info->qgroup_lock);
ret = update_qgroup_info_item(trans, qgroup);
if (ret)
- fs_info->qgroup_flags |=
- BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ qgroup_mark_inconsistent(fs_info,
+ "qgroup info item update error %d", ret);
ret = update_qgroup_limit_item(trans, qgroup);
if (ret)
- fs_info->qgroup_flags |=
- BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ qgroup_mark_inconsistent(fs_info,
+ "qgroup limit item update error %d", ret);
spin_lock(&fs_info->qgroup_lock);
}
- if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (btrfs_qgroup_enabled(fs_info))
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
else
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
@@ -2731,11 +3103,165 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
ret = update_qgroup_status_item(trans);
if (ret)
- fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ qgroup_mark_inconsistent(fs_info,
+ "qgroup status item update error %d", ret);
return ret;
}
+int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup_inherit *inherit,
+ size_t size)
+{
+ if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
+ return -EOPNOTSUPP;
+ if (size < sizeof(*inherit) || size > PAGE_SIZE)
+ return -EINVAL;
+
+ /*
+ * In the past we allowed btrfs_qgroup_inherit to specify to copy
+ * rfer/excl numbers directly from other qgroups. This behavior has
+ * been disabled in userspace for a very long time, but here we should
+ * also disable it in kernel, as this behavior is known to mark qgroup
+ * inconsistent, and a rescan would wipe out the changes anyway.
+ *
+ * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies.
+ */
+ if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0)
+ return -EINVAL;
+
+ if (size != struct_size(inherit, qgroups, inherit->num_qgroups))
+ return -EINVAL;
+
+ /*
+ * Skip the inherit source qgroups check if qgroup is not enabled.
+ * Qgroup can still be later enabled causing problems, but in that case
+ * btrfs_qgroup_inherit() would just ignore those invalid ones.
+ */
+ if (!btrfs_qgroup_enabled(fs_info))
+ return 0;
+
+ /*
+ * Now check all the remaining qgroups, they should all:
+ *
+ * - Exist
+ * - Be higher level qgroups.
+ */
+ for (int i = 0; i < inherit->num_qgroups; i++) {
+ struct btrfs_qgroup *qgroup;
+ u64 qgroupid = inherit->qgroups[i];
+
+ if (btrfs_qgroup_level(qgroupid) == 0)
+ return -EINVAL;
+
+ spin_lock(&fs_info->qgroup_lock);
+ qgroup = find_qgroup_rb(fs_info, qgroupid);
+ if (!qgroup) {
+ spin_unlock(&fs_info->qgroup_lock);
+ return -ENOENT;
+ }
+ spin_unlock(&fs_info->qgroup_lock);
+ }
+ return 0;
+}
+
+static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
+ u64 inode_rootid,
+ struct btrfs_qgroup_inherit **inherit)
+{
+ int i = 0;
+ u64 num_qgroups = 0;
+ struct btrfs_qgroup *inode_qg;
+ struct btrfs_qgroup_list *qg_list;
+ struct btrfs_qgroup_inherit *res;
+ size_t struct_sz;
+ u64 *qgids;
+
+ if (*inherit)
+ return -EEXIST;
+
+ inode_qg = find_qgroup_rb(fs_info, inode_rootid);
+ if (!inode_qg)
+ return -ENOENT;
+
+ num_qgroups = list_count_nodes(&inode_qg->groups);
+
+ if (!num_qgroups)
+ return 0;
+
+ struct_sz = struct_size(res, qgroups, num_qgroups);
+ if (struct_sz == SIZE_MAX)
+ return -ERANGE;
+
+ res = kzalloc(struct_sz, GFP_NOFS);
+ if (!res)
+ return -ENOMEM;
+ res->num_qgroups = num_qgroups;
+ qgids = res->qgroups;
+
+ list_for_each_entry(qg_list, &inode_qg->groups, next_group)
+ qgids[i++] = qg_list->group->qgroupid;
+
+ *inherit = res;
+ return 0;
+}
+
+/*
+ * Check if we can skip rescan when inheriting qgroups. If @src has a single
+ * @parent, and that @parent is owning all its bytes exclusively, we can skip
+ * the full rescan, by just adding nodesize to the @parent's excl/rfer.
+ *
+ * Return <0 for fatal errors (like srcid/parentid has no qgroup).
+ * Return 0 if a quick inherit is done.
+ * Return >0 if a quick inherit is not possible, and a full rescan is needed.
+ */
+static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
+ u64 srcid, u64 parentid)
+{
+ struct btrfs_qgroup *src;
+ struct btrfs_qgroup *parent;
+ struct btrfs_qgroup_list *list;
+ int nr_parents = 0;
+
+ src = find_qgroup_rb(fs_info, srcid);
+ if (!src)
+ return -ENOENT;
+ parent = find_qgroup_rb(fs_info, parentid);
+ if (!parent)
+ return -ENOENT;
+
+ /*
+ * Source has no parent qgroup, but our new qgroup would have one.
+ * Qgroup numbers would become inconsistent.
+ */
+ if (list_empty(&src->groups))
+ return 1;
+
+ list_for_each_entry(list, &src->groups, next_group) {
+ /* The parent is not the same, quick update is not possible. */
+ if (list->group->qgroupid != parentid)
+ return 1;
+ nr_parents++;
+ /*
+ * More than one parent qgroup, we can't be sure about accounting
+ * consistency.
+ */
+ if (nr_parents > 1)
+ return 1;
+ }
+
+ /*
+ * The parent is not exclusively owning all its bytes. We're not sure
+ * if the source has any bytes not fully owned by the parent.
+ */
+ if (parent->excl != parent->rfer)
+ return 1;
+
+ parent->excl += fs_info->nodesize;
+ parent->rfer += fs_info->nodesize;
+ return 0;
+}
+
/*
* Copy the accounting information between qgroups. This is necessary
* when a snapshot or a subvolume is created. Throwing an error will
@@ -2743,20 +3269,30 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
* when a readonly fs is a reasonable outcome.
*/
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
- u64 objectid, struct btrfs_qgroup_inherit *inherit)
+ u64 objectid, u64 inode_rootid,
+ struct btrfs_qgroup_inherit *inherit)
{
int ret = 0;
- int i;
u64 *i_qgroups;
bool committing = false;
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *srcgroup;
struct btrfs_qgroup *dstgroup;
+ struct btrfs_qgroup *prealloc = NULL;
+ struct btrfs_qgroup_list **qlist_prealloc = NULL;
+ bool free_inherit = false;
bool need_rescan = false;
u32 level_size = 0;
u64 nums;
+ if (!btrfs_qgroup_enabled(fs_info))
+ return 0;
+
+ prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+ if (!prealloc)
+ return -ENOMEM;
+
/*
* There are only two callers of this function.
*
@@ -2776,8 +3312,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
if (!committing)
mutex_lock(&fs_info->qgroup_ioctl_lock);
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
- goto out;
quota_root = fs_info->quota_root;
if (!quota_root) {
@@ -2785,11 +3319,18 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
goto out;
}
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) {
+ ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit);
+ if (ret)
+ goto out;
+ free_inherit = true;
+ }
+
if (inherit) {
i_qgroups = (u64 *)(inherit + 1);
nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2 * inherit->num_excl_copies;
- for (i = 0; i < nums; ++i) {
+ for (int i = 0; i < nums; i++) {
srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
/*
@@ -2816,7 +3357,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
*/
if (inherit) {
i_qgroups = (u64 *)(inherit + 1);
- for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
+ for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) {
if (*i_qgroups == 0)
continue;
ret = add_qgroup_relation_item(trans, objectid,
@@ -2829,16 +3370,28 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
goto out;
}
ret = 0;
- }
+ qlist_prealloc = kcalloc(inherit->num_qgroups,
+ sizeof(struct btrfs_qgroup_list *),
+ GFP_NOFS);
+ if (!qlist_prealloc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (int i = 0; i < inherit->num_qgroups; i++) {
+ qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list),
+ GFP_NOFS);
+ if (!qlist_prealloc[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ }
spin_lock(&fs_info->qgroup_lock);
- dstgroup = add_qgroup_rb(fs_info, objectid);
- if (IS_ERR(dstgroup)) {
- ret = PTR_ERR(dstgroup);
- goto unlock;
- }
+ dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
+ prealloc = NULL;
if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
dstgroup->lim_flags = inherit->lim.flags;
@@ -2847,17 +3400,10 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
dstgroup->rsv_excl = inherit->lim.rsv_excl;
- ret = update_qgroup_limit_item(trans, dstgroup);
- if (ret) {
- fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- btrfs_info(fs_info,
- "unable to update quota limit for %llu",
- dstgroup->qgroupid);
- goto unlock;
- }
+ qgroup_dirty(fs_info, dstgroup);
}
- if (srcid) {
+ if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) {
srcgroup = find_qgroup_rb(fs_info, srcid);
if (!srcgroup)
goto unlock;
@@ -2884,29 +3430,40 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
qgroup_dirty(fs_info, dstgroup);
qgroup_dirty(fs_info, srcgroup);
+
+ /*
+ * If the source qgroup has parent but the new one doesn't,
+ * we need a full rescan.
+ */
+ if (!inherit && !list_empty(&srcgroup->groups))
+ need_rescan = true;
}
if (!inherit)
goto unlock;
i_qgroups = (u64 *)(inherit + 1);
- for (i = 0; i < inherit->num_qgroups; ++i) {
+ for (int i = 0; i < inherit->num_qgroups; i++) {
if (*i_qgroups) {
- ret = add_relation_rb(fs_info, objectid, *i_qgroups);
+ ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid,
+ *i_qgroups);
+ qlist_prealloc[i] = NULL;
if (ret)
goto unlock;
}
+ if (srcid) {
+ /* Check if we can do a quick inherit. */
+ ret = qgroup_snapshot_quick_inherit(fs_info, srcid, *i_qgroups);
+ if (ret < 0)
+ goto unlock;
+ if (ret > 0)
+ need_rescan = true;
+ ret = 0;
+ }
++i_qgroups;
-
- /*
- * If we're doing a snapshot, and adding the snapshot to a new
- * qgroup, the numbers are guaranteed to be incorrect.
- */
- if (srcid)
- need_rescan = true;
}
- for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
+ for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) {
struct btrfs_qgroup *src;
struct btrfs_qgroup *dst;
@@ -2927,7 +3484,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
/* Manually tweaking numbers certainly needs a rescan */
need_rescan = true;
}
- for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
+ for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) {
struct btrfs_qgroup *src;
struct btrfs_qgroup *dst;
@@ -2955,7 +3512,22 @@ out:
if (!committing)
mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (need_rescan)
- fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ qgroup_mark_inconsistent(fs_info, "qgroup inherit needs a rescan");
+ if (qlist_prealloc) {
+ for (int i = 0; i < inherit->num_qgroups; i++)
+ kfree(qlist_prealloc[i]);
+ kfree(qlist_prealloc);
+ }
+ if (free_inherit)
+ kfree(inherit);
+
+ /*
+ * At this point we either failed at allocating prealloc, or we
+ * succeeded and passed the ownership to it to add_qgroup_rb(). In any
+ * case, this needs to be NULL or there is something wrong.
+ */
+ ASSERT(prealloc == NULL);
+
return ret;
}
@@ -2977,12 +3549,11 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
{
struct btrfs_qgroup *qgroup;
struct btrfs_fs_info *fs_info = root->fs_info;
- u64 ref_root = root->root_key.objectid;
+ u64 ref_root = btrfs_root_id(root);
int ret = 0;
- struct ulist_node *unode;
- struct ulist_iterator uiter;
+ LIST_HEAD(qgroup_list);
- if (!is_fstree(ref_root))
+ if (!btrfs_is_fstree(ref_root))
return 0;
if (num_bytes == 0)
@@ -3000,49 +3571,28 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
if (!qgroup)
goto out;
- /*
- * in a first step, we check all affected qgroups if any limits would
- * be exceeded
- */
- ulist_reinit(fs_info->qgroup_ulist);
- ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
- qgroup_to_aux(qgroup), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
- struct btrfs_qgroup *qg;
+ qgroup_iterator_add(&qgroup_list, qgroup);
+ list_for_each_entry(qgroup, &qgroup_list, iterator) {
struct btrfs_qgroup_list *glist;
- qg = unode_aux_to_qgroup(unode);
-
- if (enforce && !qgroup_check_limits(qg, num_bytes)) {
+ if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
ret = -EDQUOT;
goto out;
}
- list_for_each_entry(glist, &qg->groups, next_group) {
- ret = ulist_add(fs_info->qgroup_ulist,
- glist->group->qgroupid,
- qgroup_to_aux(glist->group), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- }
+ list_for_each_entry(glist, &qgroup->groups, next_group)
+ qgroup_iterator_add(&qgroup_list, glist->group);
}
+
ret = 0;
/*
* no limits exceeded, now record the reservation into all qgroups
*/
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
- struct btrfs_qgroup *qg;
-
- qg = unode_aux_to_qgroup(unode);
-
- qgroup_rsv_add(fs_info, qg, num_bytes, type);
- }
+ list_for_each_entry(qgroup, &qgroup_list, iterator)
+ qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
out:
+ qgroup_iterator_clean(&qgroup_list);
spin_unlock(&fs_info->qgroup_lock);
return ret;
}
@@ -3061,11 +3611,9 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
enum btrfs_qgroup_rsv_type type)
{
struct btrfs_qgroup *qgroup;
- struct ulist_node *unode;
- struct ulist_iterator uiter;
- int ret = 0;
+ LIST_HEAD(qgroup_list);
- if (!is_fstree(ref_root))
+ if (!btrfs_is_fstree(ref_root))
return;
if (num_bytes == 0)
@@ -3091,30 +3639,17 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
*/
num_bytes = qgroup->rsv.values[type];
- ulist_reinit(fs_info->qgroup_ulist);
- ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
- qgroup_to_aux(qgroup), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
- struct btrfs_qgroup *qg;
+ qgroup_iterator_add(&qgroup_list, qgroup);
+ list_for_each_entry(qgroup, &qgroup_list, iterator) {
struct btrfs_qgroup_list *glist;
- qg = unode_aux_to_qgroup(unode);
-
- qgroup_rsv_release(fs_info, qg, num_bytes, type);
-
- list_for_each_entry(glist, &qg->groups, next_group) {
- ret = ulist_add(fs_info->qgroup_ulist,
- glist->group->qgroupid,
- qgroup_to_aux(glist->group), GFP_ATOMIC);
- if (ret < 0)
- goto out;
+ qgroup_rsv_release(fs_info, qgroup, num_bytes, type);
+ list_for_each_entry(glist, &qgroup->groups, next_group) {
+ qgroup_iterator_add(&qgroup_list, glist->group);
}
}
-
out:
+ qgroup_iterator_clean(&qgroup_list);
spin_unlock(&fs_info->qgroup_lock);
}
@@ -3141,24 +3676,27 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *extent_root;
struct btrfs_key found;
struct extent_buffer *scratch_leaf = NULL;
- struct ulist *roots = NULL;
u64 num_bytes;
bool done;
int slot;
int ret;
+ if (!btrfs_qgroup_full_accounting(fs_info))
+ return 1;
+
mutex_lock(&fs_info->qgroup_rescan_lock);
- ret = btrfs_search_slot_for_read(fs_info->extent_root,
+ extent_root = btrfs_extent_root(fs_info,
+ fs_info->qgroup_rescan_progress.objectid);
+ ret = btrfs_search_slot_for_read(extent_root,
&fs_info->qgroup_rescan_progress,
path, 1, 0);
btrfs_debug(fs_info,
- "current progress key (%llu %u %llu), search_slot ret %d",
- fs_info->qgroup_rescan_progress.objectid,
- fs_info->qgroup_rescan_progress.type,
- fs_info->qgroup_rescan_progress.offset, ret);
+ "current progress key " BTRFS_KEY_FMT ", search_slot ret %d",
+ BTRFS_KEY_FMT_VALUE(&fs_info->qgroup_rescan_progress), ret);
if (ret) {
/*
@@ -3191,6 +3729,8 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
mutex_unlock(&fs_info->qgroup_rescan_lock);
for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
+ struct btrfs_backref_walk_ctx ctx = { 0 };
+
btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
if (found.type != BTRFS_EXTENT_ITEM_KEY &&
found.type != BTRFS_METADATA_ITEM_KEY)
@@ -3200,13 +3740,15 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
else
num_bytes = found.offset;
- ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
- &roots, false);
+ ctx.bytenr = found.objectid;
+ ctx.fs_info = fs_info;
+
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret < 0)
goto out;
/* For rescan, just pass old_roots as NULL */
ret = btrfs_qgroup_account_extent(trans, found.objectid,
- num_bytes, NULL, roots);
+ num_bytes, NULL, ctx.roots);
if (ret < 0)
goto out;
}
@@ -3223,8 +3765,15 @@ out:
static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
{
- return btrfs_fs_closing(fs_info) ||
- test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+ if (btrfs_fs_closing(fs_info))
+ return true;
+ if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
+ return true;
+ if (!btrfs_qgroup_enabled(fs_info))
+ return true;
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
+ return true;
+ return false;
}
static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
@@ -3233,33 +3782,36 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
qgroup_rescan_work);
struct btrfs_path *path;
struct btrfs_trans_handle *trans = NULL;
- int err = -ENOMEM;
int ret = 0;
bool stopped = false;
+ bool did_leaf_rescans = false;
+
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+ return;
path = btrfs_alloc_path();
- if (!path)
+ if (!path) {
+ ret = -ENOMEM;
goto out;
+ }
/*
* Rescan should only search for commit root, and any later difference
* should be recorded by qgroup
*/
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
- err = 0;
- while (!err && !(stopped = rescan_should_stop(fs_info))) {
+ while (!ret && !(stopped = rescan_should_stop(fs_info))) {
trans = btrfs_start_transaction(fs_info->fs_root, 0);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
break;
}
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
- err = -EINTR;
- } else {
- err = qgroup_rescan_leaf(trans, path);
- }
- if (err > 0)
+
+ ret = qgroup_rescan_leaf(trans, path);
+ did_leaf_rescans = true;
+
+ if (ret > 0)
btrfs_commit_transaction(trans);
else
btrfs_end_transaction(trans);
@@ -3269,39 +3821,48 @@ out:
btrfs_free_path(path);
mutex_lock(&fs_info->qgroup_rescan_lock);
- if (err > 0 &&
+ if (ret > 0 &&
fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- } else if (err < 0) {
+ } else if (ret < 0 || stopped) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
}
mutex_unlock(&fs_info->qgroup_rescan_lock);
/*
- * only update status, since the previous part has already updated the
- * qgroup info.
+ * Only update status, since the previous part has already updated the
+ * qgroup info, and only if we did any actual work. This also prevents
+ * race with a concurrent quota disable, which has already set
+ * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
+ * btrfs_quota_disable().
*/
- trans = btrfs_start_transaction(fs_info->quota_root, 1);
- if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ if (did_leaf_rescans) {
+ trans = btrfs_start_transaction(fs_info->quota_root, 1);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ btrfs_err(fs_info,
+ "fail to start transaction for status update: %d",
+ ret);
+ }
+ } else {
trans = NULL;
- btrfs_err(fs_info,
- "fail to start transaction for status update: %d",
- err);
}
mutex_lock(&fs_info->qgroup_rescan_lock);
- if (!stopped)
+ if (!stopped ||
+ fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
if (trans) {
- ret = update_qgroup_status_item(trans);
- if (ret < 0) {
- err = ret;
- btrfs_err(fs_info, "fail to update qgroup status: %d",
- err);
+ int ret2 = update_qgroup_status_item(trans);
+
+ if (ret2 < 0) {
+ ret = ret2;
+ btrfs_err(fs_info, "fail to update qgroup status: %d", ret);
}
}
fs_info->qgroup_rescan_running = false;
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
complete_all(&fs_info->qgroup_rescan_completion);
mutex_unlock(&fs_info->qgroup_rescan_lock);
@@ -3312,11 +3873,13 @@ out:
if (stopped) {
btrfs_info(fs_info, "qgroup scan paused");
- } else if (err >= 0) {
+ } else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
+ btrfs_info(fs_info, "qgroup scan cancelled");
+ } else if (ret >= 0) {
btrfs_info(fs_info, "qgroup scan completed%s",
- err > 0 ? " (inconsistency flag cleared)" : "");
+ ret > 0 ? " (inconsistency flag cleared)" : "");
} else {
- btrfs_err(fs_info, "qgroup scan failed with %d", err);
+ btrfs_err(fs_info, "qgroup scan failed with %d", ret);
}
}
@@ -3330,18 +3893,23 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
{
int ret = 0;
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
+ btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode");
+ return -EINVAL;
+ }
+
if (!init_flags) {
/* we're resuming qgroup rescan at mount time */
if (!(fs_info->qgroup_flags &
BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
- btrfs_warn(fs_info,
+ btrfs_debug(fs_info,
"qgroup rescan init failed, qgroup rescan is not queued");
ret = -EINVAL;
} else if (!(fs_info->qgroup_flags &
BTRFS_QGROUP_STATUS_FLAG_ON)) {
- btrfs_warn(fs_info,
+ btrfs_debug(fs_info,
"qgroup rescan init failed, qgroup is not enabled");
- ret = -EINVAL;
+ ret = -ENOTCONN;
}
if (ret)
@@ -3352,14 +3920,15 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
if (init_flags) {
if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
- btrfs_warn(fs_info,
- "qgroup rescan is already in progress");
ret = -EINPROGRESS;
} else if (!(fs_info->qgroup_flags &
BTRFS_QGROUP_STATUS_FLAG_ON)) {
- btrfs_warn(fs_info,
+ btrfs_debug(fs_info,
"qgroup rescan init failed, qgroup is not enabled");
- ret = -EINVAL;
+ ret = -ENOTCONN;
+ } else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
+ /* Quota disable is in progress */
+ ret = -EBUSY;
}
if (ret) {
@@ -3371,12 +3940,14 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
memset(&fs_info->qgroup_rescan_progress, 0,
sizeof(fs_info->qgroup_rescan_progress));
+ fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
+ BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
fs_info->qgroup_rescan_progress.objectid = progress_objectid;
init_completion(&fs_info->qgroup_rescan_completion);
mutex_unlock(&fs_info->qgroup_rescan_lock);
btrfs_init_work(&fs_info->qgroup_rescan_work,
- btrfs_qgroup_rescan_worker, NULL, NULL);
+ btrfs_qgroup_rescan_worker, NULL);
return 0;
}
@@ -3403,7 +3974,6 @@ int
btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
{
int ret = 0;
- struct btrfs_trans_handle *trans;
ret = qgroup_rescan_init(fs_info, 0, 1);
if (ret)
@@ -3420,12 +3990,7 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
* going to clear all tracking information for a clean start.
*/
- trans = btrfs_join_transaction(fs_info->fs_root);
- if (IS_ERR(trans)) {
- fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
- return PTR_ERR(trans);
- }
- ret = btrfs_commit_transaction(trans);
+ ret = btrfs_commit_current_transaction(fs_info->fs_root);
if (ret) {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
return ret;
@@ -3434,12 +3999,21 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
qgroup_rescan_zero_tracking(fs_info);
mutex_lock(&fs_info->qgroup_rescan_lock);
- fs_info->qgroup_rescan_running = true;
- btrfs_queue_work(fs_info->qgroup_rescan_workers,
- &fs_info->qgroup_rescan_work);
+ /*
+ * The rescan worker is only for full accounting qgroups, check if it's
+ * enabled as it is pointless to queue it otherwise. A concurrent quota
+ * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED.
+ */
+ if (btrfs_qgroup_full_accounting(fs_info)) {
+ fs_info->qgroup_rescan_running = true;
+ btrfs_queue_work(fs_info->qgroup_rescan_workers,
+ &fs_info->qgroup_rescan_work);
+ } else {
+ ret = -ENOTCONN;
+ }
mutex_unlock(&fs_info->qgroup_rescan_lock);
- return 0;
+ return ret;
}
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
@@ -3526,8 +4100,8 @@ static int qgroup_unreserve_range(struct btrfs_inode *inode,
* Now the entry is in [start, start + len), revert the
* EXTENT_QGROUP_RESERVED bit.
*/
- clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
- entry_end, EXTENT_QGROUP_RESERVED);
+ clear_ret = btrfs_clear_extent_bit(&inode->io_tree, entry_start, entry_end,
+ EXTENT_QGROUP_RESERVED, NULL);
if (!ret && clear_ret < 0)
ret = clear_ret;
@@ -3564,7 +4138,6 @@ static int qgroup_unreserve_range(struct btrfs_inode *inode,
*/
static int try_flush_qgroup(struct btrfs_root *root)
{
- struct btrfs_trans_handle *trans;
int ret;
/* Can't hold an open transaction or we run the risk of deadlocking. */
@@ -3585,15 +4158,18 @@ static int try_flush_qgroup(struct btrfs_root *root)
ret = btrfs_start_delalloc_snapshot(root, true);
if (ret < 0)
goto out;
- btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
+ btrfs_wait_ordered_extents(root, U64_MAX, NULL);
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- goto out;
- }
+ /*
+ * After waiting for ordered extents run delayed iputs in order to free
+ * space from unlinked files before committing the current transaction,
+ * as ordered extents may have been holding the last reference of an
+ * inode and they add a delayed iput when they complete.
+ */
+ btrfs_run_delayed_iputs(root->fs_info);
+ btrfs_wait_on_delayed_iputs(root->fs_info);
- ret = btrfs_commit_transaction(trans);
+ ret = btrfs_commit_current_transaction(root);
out:
clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
wake_up(&root->qgroup_flush_wait);
@@ -3611,8 +4187,8 @@ static int qgroup_reserve_data(struct btrfs_inode *inode,
u64 to_reserve;
int ret;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
- !is_fstree(root->root_key.objectid) || len == 0)
+ if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
+ !btrfs_is_fstree(btrfs_root_id(root)) || len == 0)
return 0;
/* @reserved parameter is mandatory for qgroup */
@@ -3627,8 +4203,9 @@ static int qgroup_reserve_data(struct btrfs_inode *inode,
reserved = *reserved_ret;
/* Record already reserved space */
orig_reserved = reserved->bytes_changed;
- ret = set_record_extent_bits(&inode->io_tree, start,
- start + len -1, EXTENT_QGROUP_RESERVED, reserved);
+ ret = btrfs_set_record_extent_bits(&inode->io_tree, start,
+ start + len - 1, EXTENT_QGROUP_RESERVED,
+ reserved);
/* Newly reserved space */
to_reserve = reserved->bytes_changed - orig_reserved;
@@ -3682,13 +4259,14 @@ int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
/* Free ranges specified by @reserved, normally in error path */
static int qgroup_free_reserved_data(struct btrfs_inode *inode,
- struct extent_changeset *reserved, u64 start, u64 len)
+ struct extent_changeset *reserved,
+ u64 start, u64 len, u64 *freed_ret)
{
struct btrfs_root *root = inode->root;
struct ulist_node *unode;
struct ulist_iterator uiter;
struct extent_changeset changeset;
- int freed = 0;
+ u64 freed = 0;
int ret;
extent_changeset_init(&changeset);
@@ -3720,16 +4298,19 @@ static int qgroup_free_reserved_data(struct btrfs_inode *inode,
* EXTENT_QGROUP_RESERVED, we won't double free.
* So not need to rush.
*/
- ret = clear_record_extent_bits(&inode->io_tree, free_start,
- free_start + free_len - 1,
- EXTENT_QGROUP_RESERVED, &changeset);
+ ret = btrfs_clear_record_extent_bits(&inode->io_tree, free_start,
+ free_start + free_len - 1,
+ EXTENT_QGROUP_RESERVED,
+ &changeset);
if (ret < 0)
goto out;
freed += changeset.bytes_changed;
}
- btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
+ btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed,
BTRFS_QGROUP_RSV_DATA);
- ret = freed;
+ if (freed_ret)
+ *freed_ret = freed;
+ ret = 0;
out:
extent_changeset_release(&changeset);
return ret;
@@ -3737,22 +4318,25 @@ out:
static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
struct extent_changeset *reserved, u64 start, u64 len,
- int free)
+ u64 *released, int free)
{
struct extent_changeset changeset;
int trace_op = QGROUP_RELEASE;
int ret;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &inode->root->fs_info->flags))
- return 0;
+ if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
+ return btrfs_clear_record_extent_bits(&inode->io_tree, start,
+ start + len - 1,
+ EXTENT_QGROUP_RESERVED, NULL);
+ }
/* In release case, we shouldn't have @reserved */
WARN_ON(!free && reserved);
if (free && reserved)
- return qgroup_free_reserved_data(inode, reserved, start, len);
+ return qgroup_free_reserved_data(inode, reserved, start, len, released);
extent_changeset_init(&changeset);
- ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
- EXTENT_QGROUP_RESERVED, &changeset);
+ ret = btrfs_clear_record_extent_bits(&inode->io_tree, start, start + len - 1,
+ EXTENT_QGROUP_RESERVED, &changeset);
if (ret < 0)
goto out;
@@ -3762,9 +4346,10 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
changeset.bytes_changed, trace_op);
if (free)
btrfs_qgroup_free_refroot(inode->root->fs_info,
- inode->root->root_key.objectid,
+ btrfs_root_id(inode->root),
changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
- ret = changeset.bytes_changed;
+ if (released)
+ *released = changeset.bytes_changed;
out:
extent_changeset_release(&changeset);
return ret;
@@ -3783,9 +4368,10 @@ out:
* NOTE: This function may sleep for memory allocation.
*/
int btrfs_qgroup_free_data(struct btrfs_inode *inode,
- struct extent_changeset *reserved, u64 start, u64 len)
+ struct extent_changeset *reserved,
+ u64 start, u64 len, u64 *freed)
{
- return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
+ return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
}
/*
@@ -3803,9 +4389,9 @@ int btrfs_qgroup_free_data(struct btrfs_inode *inode,
*
* NOTE: This function may sleep for memory allocation.
*/
-int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len)
+int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
{
- return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
+ return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
}
static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
@@ -3854,12 +4440,12 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- !is_fstree(root->root_key.objectid) || num_bytes == 0)
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
+ !btrfs_is_fstree(btrfs_root_id(root)) || num_bytes == 0)
return 0;
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
- trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
+ trace_btrfs_qgroup_meta_reserve(root, (s64)num_bytes, type);
ret = qgroup_reserve(root, num_bytes, enforce, type);
if (ret < 0)
return ret;
@@ -3876,12 +4462,13 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
}
int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
- enum btrfs_qgroup_rsv_type type, bool enforce)
+ enum btrfs_qgroup_rsv_type type, bool enforce,
+ bool noflush)
{
int ret;
ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
- if (ret <= 0 && ret != -EDQUOT)
+ if ((ret <= 0 && ret != -EDQUOT) || noflush)
return ret;
ret = try_flush_qgroup(root);
@@ -3890,18 +4477,22 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
}
+/*
+ * Per-transaction meta reservation should be all freed at transaction commit
+ * time
+ */
void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- !is_fstree(root->root_key.objectid))
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
+ !btrfs_is_fstree(btrfs_root_id(root)))
return;
/* TODO: Update trace point to handle such free */
- trace_qgroup_meta_free_all_pertrans(root);
+ trace_btrfs_qgroup_meta_free_all_pertrans(root);
/* Special value -1 means to free all reserved space */
- btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
+ btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1,
BTRFS_QGROUP_RSV_META_PERTRANS);
}
@@ -3910,8 +4501,8 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
{
struct btrfs_fs_info *fs_info = root->fs_info;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- !is_fstree(root->root_key.objectid))
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
+ !btrfs_is_fstree(btrfs_root_id(root)))
return;
/*
@@ -3921,18 +4512,15 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
*/
num_bytes = sub_root_meta_rsv(root, num_bytes, type);
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
- trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
- btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
- num_bytes, type);
+ trace_btrfs_qgroup_meta_reserve(root, -(s64)num_bytes, type);
+ btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type);
}
static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
int num_bytes)
{
struct btrfs_qgroup *qgroup;
- struct ulist_node *unode;
- struct ulist_iterator uiter;
- int ret = 0;
+ LIST_HEAD(qgroup_list);
if (num_bytes == 0)
return;
@@ -3943,46 +4531,45 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
qgroup = find_qgroup_rb(fs_info, ref_root);
if (!qgroup)
goto out;
- ulist_reinit(fs_info->qgroup_ulist);
- ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
- qgroup_to_aux(qgroup), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
- struct btrfs_qgroup *qg;
- struct btrfs_qgroup_list *glist;
- qg = unode_aux_to_qgroup(unode);
+ qgroup_iterator_add(&qgroup_list, qgroup);
+ list_for_each_entry(qgroup, &qgroup_list, iterator) {
+ struct btrfs_qgroup_list *glist;
- qgroup_rsv_release(fs_info, qg, num_bytes,
+ qgroup_rsv_release(fs_info, qgroup, num_bytes,
BTRFS_QGROUP_RSV_META_PREALLOC);
- qgroup_rsv_add(fs_info, qg, num_bytes,
- BTRFS_QGROUP_RSV_META_PERTRANS);
- list_for_each_entry(glist, &qg->groups, next_group) {
- ret = ulist_add(fs_info->qgroup_ulist,
- glist->group->qgroupid,
- qgroup_to_aux(glist->group), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- }
+ if (!sb_rdonly(fs_info->sb))
+ qgroup_rsv_add(fs_info, qgroup, num_bytes,
+ BTRFS_QGROUP_RSV_META_PERTRANS);
+
+ list_for_each_entry(glist, &qgroup->groups, next_group)
+ qgroup_iterator_add(&qgroup_list, glist->group);
}
out:
+ qgroup_iterator_clean(&qgroup_list);
spin_unlock(&fs_info->qgroup_lock);
}
+/*
+ * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
+ *
+ * This is called when preallocated meta reservation needs to be used.
+ * Normally after btrfs_join_transaction() call.
+ */
void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- !is_fstree(root->root_key.objectid))
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
+ !btrfs_is_fstree(btrfs_root_id(root)))
return;
/* Same as btrfs_qgroup_free_meta_prealloc() */
num_bytes = sub_root_meta_rsv(root, num_bytes,
BTRFS_QGROUP_RSV_META_PREALLOC);
- trace_qgroup_meta_convert(root, num_bytes);
- qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
+ trace_btrfs_qgroup_meta_convert(root, num_bytes);
+ qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes);
+ if (!sb_rdonly(fs_info->sb))
+ add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
}
/*
@@ -3997,8 +4584,8 @@ void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
int ret;
extent_changeset_init(&changeset);
- ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
- EXTENT_QGROUP_RESERVED, &changeset);
+ ret = btrfs_clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
+ EXTENT_QGROUP_RESERVED, &changeset);
WARN_ON(ret < 0);
if (WARN_ON(changeset.bytes_changed)) {
@@ -4009,7 +4596,7 @@ void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
btrfs_ino(inode), unode->val, unode->aux);
}
btrfs_qgroup_free_refroot(inode->root->fs_info,
- inode->root->root_key.objectid,
+ btrfs_root_id(inode->root),
changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
}
@@ -4058,6 +4645,28 @@ out:
spin_unlock(&swapped_blocks->lock);
}
+static int qgroup_swapped_block_bytenr_key_cmp(const void *key, const struct rb_node *node)
+{
+ const u64 *bytenr = key;
+ const struct btrfs_qgroup_swapped_block *block = rb_entry(node,
+ struct btrfs_qgroup_swapped_block, node);
+
+ if (block->subvol_bytenr < *bytenr)
+ return -1;
+ else if (block->subvol_bytenr > *bytenr)
+ return 1;
+
+ return 0;
+}
+
+static int qgroup_swapped_block_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ const struct btrfs_qgroup_swapped_block *new_block = rb_entry(new,
+ struct btrfs_qgroup_swapped_block, node);
+
+ return qgroup_swapped_block_bytenr_key_cmp(&new_block->subvol_bytenr, existing);
+}
+
/*
* Add subtree roots record into @subvol_root.
*
@@ -4068,8 +4677,7 @@ out:
* BOTH POINTERS ARE BEFORE TREE SWAP
* @last_snapshot: last snapshot generation of the subvolume tree
*/
-int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
- struct btrfs_root *subvol_root,
+int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
struct btrfs_block_group *bg,
struct extent_buffer *subvol_parent, int subvol_slot,
struct extent_buffer *reloc_parent, int reloc_slot,
@@ -4078,16 +4686,15 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = subvol_root->fs_info;
struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
struct btrfs_qgroup_swapped_block *block;
- struct rb_node **cur;
- struct rb_node *parent = NULL;
+ struct rb_node *node;
int level = btrfs_header_level(subvol_parent) - 1;
int ret = 0;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
- if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
- btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
+ if (unlikely(btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
+ btrfs_node_ptr_generation(reloc_parent, reloc_slot))) {
btrfs_err_rl(fs_info,
"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
__func__,
@@ -4128,47 +4735,32 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
/* Insert @block into @blocks */
spin_lock(&blocks->lock);
- cur = &blocks->blocks[level].rb_node;
- while (*cur) {
+ node = rb_find_add(&block->node, &blocks->blocks[level], qgroup_swapped_block_bytenr_cmp);
+ if (node) {
struct btrfs_qgroup_swapped_block *entry;
- parent = *cur;
- entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
- node);
+ entry = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
- if (entry->subvol_bytenr < block->subvol_bytenr) {
- cur = &(*cur)->rb_left;
- } else if (entry->subvol_bytenr > block->subvol_bytenr) {
- cur = &(*cur)->rb_right;
- } else {
- if (entry->subvol_generation !=
- block->subvol_generation ||
- entry->reloc_bytenr != block->reloc_bytenr ||
- entry->reloc_generation !=
- block->reloc_generation) {
- /*
- * Duplicated but mismatch entry found.
- * Shouldn't happen.
- *
- * Marking qgroup inconsistent should be enough
- * for end users.
- */
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
- ret = -EEXIST;
- }
- kfree(block);
- goto out_unlock;
+ if (entry->subvol_generation != block->subvol_generation ||
+ entry->reloc_bytenr != block->reloc_bytenr ||
+ entry->reloc_generation != block->reloc_generation) {
+ /*
+ * Duplicated but mismatch entry found. Shouldn't happen.
+ * Marking qgroup inconsistent should be enough for end
+ * users.
+ */
+ DEBUG_WARN("duplicated but mismatched entry found");
+ ret = -EEXIST;
}
+ kfree(block);
+ goto out_unlock;
}
- rb_link_node(&block->node, parent, cur);
- rb_insert_color(&block->node, &blocks->blocks[level]);
blocks->swapped = true;
out_unlock:
spin_unlock(&blocks->lock);
out:
if (ret < 0)
- fs_info->qgroup_flags |=
- BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret);
return ret;
}
@@ -4183,19 +4775,19 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
struct extent_buffer *subvol_eb)
{
struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_tree_parent_check check = { 0 };
struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
- struct btrfs_qgroup_swapped_block *block;
+ struct btrfs_qgroup_swapped_block AUTO_KFREE(block);
struct extent_buffer *reloc_eb = NULL;
struct rb_node *node;
- bool found = false;
bool swapped = false;
int level = btrfs_header_level(subvol_eb);
int ret = 0;
int i;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
- if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
+ if (!btrfs_is_fstree(btrfs_root_id(root)) || !root->reloc_root)
return 0;
spin_lock(&blocks->lock);
@@ -4203,23 +4795,14 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
spin_unlock(&blocks->lock);
return 0;
}
- node = blocks->blocks[level].rb_node;
-
- while (node) {
- block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
- if (block->subvol_bytenr < subvol_eb->start) {
- node = node->rb_left;
- } else if (block->subvol_bytenr > subvol_eb->start) {
- node = node->rb_right;
- } else {
- found = true;
- break;
- }
- }
- if (!found) {
+ node = rb_find(&subvol_eb->start, &blocks->blocks[level],
+ qgroup_swapped_block_bytenr_key_cmp);
+ if (!node) {
spin_unlock(&blocks->lock);
goto out;
}
+ block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
+
/* Found one, remove it from @blocks first and update blocks->swapped */
rb_erase(&block->node, &blocks->blocks[level]);
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
@@ -4231,16 +4814,19 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
blocks->swapped = swapped;
spin_unlock(&blocks->lock);
+ check.level = block->level;
+ check.transid = block->reloc_generation;
+ check.has_first_key = true;
+ memcpy(&check.first_key, &block->first_key, sizeof(check.first_key));
+
/* Read out reloc subtree root */
- reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, 0,
- block->reloc_generation, block->level,
- &block->first_key);
+ reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check);
if (IS_ERR(reloc_eb)) {
ret = PTR_ERR(reloc_eb);
reloc_eb = NULL;
goto free_out;
}
- if (!extent_buffer_uptodate(reloc_eb)) {
+ if (unlikely(!extent_buffer_uptodate(reloc_eb))) {
ret = -EIO;
goto free_out;
}
@@ -4248,14 +4834,12 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
block->last_snapshot, block->trace_leaf);
free_out:
- kfree(block);
free_extent_buffer(reloc_eb);
out:
if (ret < 0) {
- btrfs_err_rl(fs_info,
- "failed to account subtree at bytenr %llu: %d",
- subvol_eb->start, ret);
- fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ qgroup_mark_inconsistent(fs_info,
+ "failed to account subtree at bytenr %llu: %d",
+ subvol_eb->start, ret);
}
return ret;
}
@@ -4263,12 +4847,58 @@ out:
void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
{
struct btrfs_qgroup_extent_record *entry;
- struct btrfs_qgroup_extent_record *next;
- struct rb_root *root;
+ unsigned long index;
- root = &trans->delayed_refs.dirty_extent_root;
- rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
+ xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) {
ulist_free(entry->old_roots);
kfree(entry);
}
+ xa_destroy(&trans->delayed_refs.dirty_extents);
+}
+
+int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
+ const struct btrfs_squota_delta *delta)
+{
+ int ret;
+ struct btrfs_qgroup *qgroup;
+ struct btrfs_qgroup *qg;
+ LIST_HEAD(qgroup_list);
+ u64 root = delta->root;
+ u64 num_bytes = delta->num_bytes;
+ const int sign = (delta->is_inc ? 1 : -1);
+
+ if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
+ return 0;
+
+ if (!btrfs_is_fstree(root))
+ return 0;
+
+ /* If the extent predates enabling quotas, don't count it. */
+ if (delta->generation < fs_info->qgroup_enable_gen)
+ return 0;
+
+ spin_lock(&fs_info->qgroup_lock);
+ qgroup = find_qgroup_rb(fs_info, root);
+ if (!qgroup) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = 0;
+ qgroup_iterator_add(&qgroup_list, qgroup);
+ list_for_each_entry(qg, &qgroup_list, iterator) {
+ struct btrfs_qgroup_list *glist;
+
+ qg->excl += num_bytes * sign;
+ qg->rfer += num_bytes * sign;
+ qgroup_dirty(fs_info, qg);
+
+ list_for_each_entry(glist, &qg->groups, next_group)
+ qgroup_iterator_add(&qgroup_list, glist->group);
+ }
+ qgroup_iterator_clean(&qgroup_list);
+
+out:
+ spin_unlock(&fs_info->qgroup_lock);
+ return ret;
}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 880e9df0dac1..a979fd59a4da 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -6,11 +6,25 @@
#ifndef BTRFS_QGROUP_H
#define BTRFS_QGROUP_H
+#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
#include <linux/kobject.h>
-#include "ulist.h"
-#include "delayed-ref.h"
+#include <linux/list.h>
+#include <uapi/linux/btrfs_tree.h>
+
+struct extent_buffer;
+struct extent_changeset;
+struct btrfs_delayed_extent_op;
+struct btrfs_fs_info;
+struct btrfs_root;
+struct btrfs_ioctl_quota_ctl_args;
+struct btrfs_trans_handle;
+struct btrfs_delayed_ref_root;
+struct btrfs_inode;
+struct btrfs_transaction;
+struct btrfs_block_group;
+struct btrfs_qgroup_swapped_blocks;
/*
* Btrfs qgroup overview
@@ -101,12 +115,27 @@
*/
/*
+ * These flags share the flags field of the btrfs_qgroup_status_item with the
+ * persisted flags defined in btrfs_tree.h.
+ *
+ * To minimize the chance of collision with new persisted status flags, these
+ * count backwards from the MSB.
+ */
+#define BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN (1ULL << 63)
+#define BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING (1ULL << 62)
+
+#define BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT (3)
+
+/*
* Record a dirty extent, and info qgroup to update quota on it
- * TODO: Use kmem cache to alloc it.
*/
struct btrfs_qgroup_extent_record {
- struct rb_node node;
- u64 bytenr;
+ /*
+ * The bytenr of the extent is given by its index in the dirty_extents
+ * xarray of struct btrfs_delayed_ref_root left shifted by
+ * fs_info->sectorsize_bits.
+ */
+
u64 num_bytes;
/*
@@ -216,6 +245,33 @@ struct btrfs_qgroup {
struct list_head groups; /* groups this group is member of */
struct list_head members; /* groups that are members of this group */
struct list_head dirty; /* dirty groups */
+
+ /*
+ * For qgroup iteration usage.
+ *
+ * The iteration list should always be empty until qgroup_iterator_add()
+ * is called. And should be reset to empty after the iteration is
+ * finished.
+ */
+ struct list_head iterator;
+
+ /*
+ * For nested iterator usage.
+ *
+ * Here we support at most one level of nested iterator calls like:
+ *
+ * LIST_HEAD(all_qgroups);
+ * {
+ * LIST_HEAD(local_qgroups);
+ * qgroup_iterator_add(local_qgroups, qg);
+ * qgroup_iterator_nested_add(all_qgroups, qg);
+ * do_some_work(local_qgroups);
+ * qgroup_iterator_clean(local_qgroups);
+ * }
+ * do_some_work(all_qgroups);
+ * qgroup_iterator_nested_clean(all_qgroups);
+ */
+ struct list_head nested_iterator;
struct rb_node node; /* tree of qgroups */
/*
@@ -231,6 +287,27 @@ struct btrfs_qgroup {
struct kobject kobj;
};
+/* Glue structure to represent the relations between qgroups. */
+struct btrfs_qgroup_list {
+ struct list_head next_group;
+ struct list_head next_member;
+ struct btrfs_qgroup *group;
+ struct btrfs_qgroup *member;
+};
+
+struct btrfs_squota_delta {
+ /* The fstree root this delta counts against. */
+ u64 root;
+ /* The number of bytes in the extent being counted. */
+ u64 num_bytes;
+ /* The generation the extent was created in. */
+ u64 generation;
+ /* Whether we are using or freeing the extent. */
+ bool is_inc;
+ /* Whether the extent is data or metadata. */
+ bool is_data;
+};
+
static inline u64 btrfs_qgroup_subvolid(u64 qgroupid)
{
return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1));
@@ -239,102 +316,52 @@ static inline u64 btrfs_qgroup_subvolid(u64 qgroupid)
/*
* For qgroup event trace points only
*/
-#define QGROUP_RESERVE (1<<0)
-#define QGROUP_RELEASE (1<<1)
-#define QGROUP_FREE (1<<2)
+enum {
+ ENUM_BIT(QGROUP_RESERVE),
+ ENUM_BIT(QGROUP_RELEASE),
+ ENUM_BIT(QGROUP_FREE),
+};
+
+enum btrfs_qgroup_mode {
+ BTRFS_QGROUP_MODE_DISABLED,
+ BTRFS_QGROUP_MODE_FULL,
+ BTRFS_QGROUP_MODE_SIMPLE
+};
-int btrfs_quota_enable(struct btrfs_fs_info *fs_info);
+enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info);
+bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info);
+bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info);
+int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
+ struct btrfs_ioctl_quota_ctl_args *quota_ctl_args);
int btrfs_quota_disable(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
bool interruptible);
-int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
- u64 dst);
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
+ struct btrfs_qgroup_list *prealloc);
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
u64 dst);
int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
+int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid);
int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
struct btrfs_qgroup_limit *limit);
int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
-struct btrfs_delayed_extent_op;
-/*
- * Inform qgroup to trace one dirty extent, its info is recorded in @record.
- * So qgroup can account it at transaction committing time.
- *
- * No lock version, caller must acquire delayed ref lock and allocated memory,
- * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
- *
- * Return 0 for success insert
- * Return >0 for existing record, caller can free @record safely.
- * Error is not possible
- */
int btrfs_qgroup_trace_extent_nolock(
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_qgroup_extent_record *record);
-
-/*
- * Post handler after qgroup_trace_extent_nolock().
- *
- * NOTE: Current qgroup does the expensive backref walk at transaction
- * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
- * new transaction.
- * This is designed to allow btrfs_find_all_roots() to get correct new_roots
- * result.
- *
- * However for old_roots there is no need to do backref walk at that time,
- * since we search commit roots to walk backref and result will always be
- * correct.
- *
- * Due to the nature of no lock version, we can't do backref there.
- * So we must call btrfs_qgroup_trace_extent_post() after exiting
- * spinlock context.
- *
- * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
- * using current root, then we can move all expensive backref walk out of
- * transaction committing, but not now as qgroup accounting will be wrong again.
- */
+ struct btrfs_qgroup_extent_record *record,
+ u64 bytenr);
int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
- struct btrfs_qgroup_extent_record *qrecord);
-
-/*
- * Inform qgroup to trace one dirty extent, specified by @bytenr and
- * @num_bytes.
- * So qgroup can account it at commit trans time.
- *
- * Better encapsulated version, with memory allocation and backref walk for
- * commit roots.
- * So this can sleep.
- *
- * Return 0 if the operation is done.
- * Return <0 for error, like memory allocation failure or invalid parameter
- * (NULL trans)
- */
+ struct btrfs_qgroup_extent_record *qrecord,
+ u64 bytenr);
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
- u64 num_bytes, gfp_t gfp_flag);
-
-/*
- * Inform qgroup to trace all leaf items of data
- *
- * Return 0 for success
- * Return <0 for error(ENOMEM)
- */
+ u64 num_bytes);
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
struct extent_buffer *eb);
-/*
- * Inform qgroup to trace a whole subtree, including all its child tree
- * blocks and data.
- * The root tree block is specified by @root_eb.
- *
- * Normally used by relocation(tree block swap) and subvolume deletion.
- *
- * Return 0 for success
- * Return <0 for error(ENOMEM or tree search error)
- */
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb,
u64 root_gen, int root_level);
@@ -343,40 +370,48 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
struct ulist *new_roots);
int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans);
int btrfs_run_qgroups(struct btrfs_trans_handle *trans);
+int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup_inherit *inherit,
+ size_t size);
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
- u64 objectid, struct btrfs_qgroup_inherit *inherit);
+ u64 objectid, u64 inode_rootid,
+ struct btrfs_qgroup_inherit *inherit);
void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes,
enum btrfs_qgroup_rsv_type type);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
+int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
u64 rfer, u64 excl);
#endif
/* New io_tree based accurate qgroup reserve API */
int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
struct extent_changeset **reserved, u64 start, u64 len);
-int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len);
+int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released);
int btrfs_qgroup_free_data(struct btrfs_inode *inode,
struct extent_changeset *reserved, u64 start,
- u64 len);
+ u64 len, u64 *freed);
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
enum btrfs_qgroup_rsv_type type, bool enforce);
int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
- enum btrfs_qgroup_rsv_type type, bool enforce);
+ enum btrfs_qgroup_rsv_type type, bool enforce,
+ bool noflush);
/* Reserve metadata space for pertrans and prealloc type */
static inline int btrfs_qgroup_reserve_meta_pertrans(struct btrfs_root *root,
int num_bytes, bool enforce)
{
return __btrfs_qgroup_reserve_meta(root, num_bytes,
- BTRFS_QGROUP_RSV_META_PERTRANS, enforce);
+ BTRFS_QGROUP_RSV_META_PERTRANS,
+ enforce, false);
}
static inline int btrfs_qgroup_reserve_meta_prealloc(struct btrfs_root *root,
- int num_bytes, bool enforce)
+ int num_bytes, bool enforce,
+ bool noflush)
{
return __btrfs_qgroup_reserve_meta(root, num_bytes,
- BTRFS_QGROUP_RSV_META_PREALLOC, enforce);
+ BTRFS_QGROUP_RSV_META_PREALLOC,
+ enforce, noflush);
}
void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
@@ -398,20 +433,8 @@ static inline void btrfs_qgroup_free_meta_prealloc(struct btrfs_root *root,
BTRFS_QGROUP_RSV_META_PREALLOC);
}
-/*
- * Per-transaction meta reservation should be all freed at transaction commit
- * time
- */
void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root);
-
-/*
- * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
- *
- * This is called when preallocated meta reservation needs to be used.
- * Normally after btrfs_join_transaction() call.
- */
void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
-
void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode);
/* btrfs_qgroup_swapped_blocks related functions */
@@ -419,8 +442,7 @@ void btrfs_qgroup_init_swapped_blocks(
struct btrfs_qgroup_swapped_blocks *swapped_blocks);
void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root);
-int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
- struct btrfs_root *subvol_root,
+int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
struct btrfs_block_group *bg,
struct extent_buffer *subvol_parent, int subvol_slot,
struct extent_buffer *reloc_parent, int reloc_slot,
@@ -428,6 +450,8 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *eb);
void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
-bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info);
+bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info);
+int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
+ const struct btrfs_squota_delta *delta);
#endif
diff --git a/fs/btrfs/raid-stripe-tree.c b/fs/btrfs/raid-stripe-tree.c
new file mode 100644
index 000000000000..2987cb7c686e
--- /dev/null
+++ b/fs/btrfs/raid-stripe-tree.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/btrfs_tree.h>
+#include "ctree.h"
+#include "fs.h"
+#include "accessors.h"
+#include "transaction.h"
+#include "disk-io.h"
+#include "raid-stripe-tree.h"
+#include "volumes.h"
+#include "print-tree.h"
+
+static int btrfs_partially_delete_raid_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ const struct btrfs_key *oldkey,
+ u64 newlen, u64 frontpad)
+{
+ struct btrfs_root *stripe_root = trans->fs_info->stripe_root;
+ struct btrfs_stripe_extent *extent, AUTO_KFREE(newitem);
+ struct extent_buffer *leaf;
+ int slot;
+ size_t item_size;
+ struct btrfs_key newkey = {
+ .objectid = oldkey->objectid + frontpad,
+ .type = BTRFS_RAID_STRIPE_KEY,
+ .offset = newlen,
+ };
+ int ret;
+
+ ASSERT(newlen > 0);
+ ASSERT(oldkey->type == BTRFS_RAID_STRIPE_KEY);
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ item_size = btrfs_item_size(leaf, slot);
+
+ newitem = kzalloc(item_size, GFP_NOFS);
+ if (!newitem)
+ return -ENOMEM;
+
+ extent = btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent);
+
+ for (int i = 0; i < btrfs_num_raid_stripes(item_size); i++) {
+ struct btrfs_raid_stride *stride = &extent->strides[i];
+ u64 phys;
+
+ phys = btrfs_raid_stride_physical(leaf, stride) + frontpad;
+ btrfs_set_stack_raid_stride_physical(&newitem->strides[i], phys);
+ }
+
+ ret = btrfs_del_item(trans, stripe_root, path);
+ if (ret)
+ return ret;
+
+ btrfs_release_path(path);
+ return btrfs_insert_item(trans, stripe_root, &newkey, newitem, item_size);
+}
+
+int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 length)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *stripe_root = fs_info->stripe_root;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ u64 found_start;
+ u64 found_end;
+ u64 end = start + length;
+ int slot;
+ int ret;
+
+ if (!btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE) || !stripe_root)
+ return 0;
+
+ if (!btrfs_is_testing(fs_info)) {
+ struct btrfs_chunk_map *map;
+ bool use_rst;
+
+ map = btrfs_find_chunk_map(fs_info, start, length);
+ if (!map)
+ return -EINVAL;
+ use_rst = btrfs_need_stripe_tree_update(fs_info, map->type);
+ btrfs_free_chunk_map(map);
+ if (!use_rst)
+ return 0;
+ }
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ while (1) {
+ key.objectid = start;
+ key.type = BTRFS_RAID_STRIPE_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot(trans, stripe_root, &key, path, -1, 1);
+ if (ret < 0)
+ break;
+
+ if (path->slots[0] == btrfs_header_nritems(path->nodes[0]))
+ path->slots[0]--;
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ found_start = key.objectid;
+ found_end = found_start + key.offset;
+ ret = 0;
+
+ /*
+ * The stripe extent starts before the range we want to delete,
+ * but the range spans more than one stripe extent:
+ *
+ * |--- RAID Stripe Extent ---||--- RAID Stripe Extent ---|
+ * |--- keep ---|--- drop ---|
+ *
+ * This means we have to get the previous item, truncate its
+ * length and then restart the search.
+ */
+ if (found_start > start) {
+ if (slot == 0) {
+ ret = btrfs_previous_item(stripe_root, path, start,
+ BTRFS_RAID_STRIPE_KEY);
+ if (ret) {
+ if (ret > 0)
+ ret = -ENOENT;
+ break;
+ }
+ } else {
+ path->slots[0]--;
+ }
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ found_start = key.objectid;
+ found_end = found_start + key.offset;
+ ASSERT(found_start <= start);
+ }
+
+ if (key.type != BTRFS_RAID_STRIPE_KEY)
+ break;
+
+ /* That stripe ends before we start, we're done. */
+ if (found_end <= start)
+ break;
+
+ trace_btrfs_raid_extent_delete(fs_info, start, end,
+ found_start, found_end);
+
+ /*
+ * The stripe extent starts before the range we want to delete
+ * and ends after the range we want to delete, i.e. we're
+ * punching a hole in the stripe extent:
+ *
+ * |--- RAID Stripe Extent ---|
+ * | keep |--- drop ---| keep |
+ *
+ * This means we need to a) truncate the existing item and b)
+ * create a second item for the remaining range.
+ */
+ if (found_start < start && found_end > end) {
+ size_t item_size;
+ u64 diff_start = start - found_start;
+ u64 diff_end = found_end - end;
+ struct btrfs_stripe_extent *extent;
+ struct btrfs_key newkey = {
+ .objectid = end,
+ .type = BTRFS_RAID_STRIPE_KEY,
+ .offset = diff_end,
+ };
+
+ /* The "right" item. */
+ ret = btrfs_duplicate_item(trans, stripe_root, path, &newkey);
+ if (ret)
+ break;
+
+ item_size = btrfs_item_size(leaf, path->slots[0]);
+ extent = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_stripe_extent);
+
+ for (int i = 0; i < btrfs_num_raid_stripes(item_size); i++) {
+ struct btrfs_raid_stride *stride = &extent->strides[i];
+ u64 phys;
+
+ phys = btrfs_raid_stride_physical(leaf, stride);
+ phys += diff_start + length;
+ btrfs_set_raid_stride_physical(leaf, stride, phys);
+ }
+
+ /* The "left" item. */
+ path->slots[0]--;
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ btrfs_partially_delete_raid_extent(trans, path, &key,
+ diff_start, 0);
+ break;
+ }
+
+ /*
+ * The stripe extent starts before the range we want to delete:
+ *
+ * |--- RAID Stripe Extent ---|
+ * |--- keep ---|--- drop ---|
+ *
+ * This means we have to duplicate the tree item, truncate the
+ * length to the new size and then re-insert the item.
+ */
+ if (found_start < start) {
+ u64 diff_start = start - found_start;
+
+ btrfs_partially_delete_raid_extent(trans, path, &key,
+ diff_start, 0);
+
+ start += (key.offset - diff_start);
+ length -= (key.offset - diff_start);
+ if (length == 0)
+ break;
+
+ btrfs_release_path(path);
+ continue;
+ }
+
+ /*
+ * The stripe extent ends after the range we want to delete:
+ *
+ * |--- RAID Stripe Extent ---|
+ * |--- drop ---|--- keep ---|
+ *
+ * This means we have to duplicate the tree item, truncate the
+ * length to the new size and then re-insert the item.
+ */
+ if (found_end > end) {
+ u64 diff_end = found_end - end;
+
+ btrfs_partially_delete_raid_extent(trans, path, &key,
+ key.offset - length,
+ length);
+ ASSERT(key.offset - diff_end == length);
+ break;
+ }
+
+ /* Finally we can delete the whole item, no more special cases. */
+ ret = btrfs_del_item(trans, stripe_root, path);
+ if (ret)
+ break;
+
+ start += key.offset;
+ length -= key.offset;
+ if (length == 0)
+ break;
+
+ btrfs_release_path(path);
+ }
+
+ return ret;
+}
+
+static int update_raid_extent_item(struct btrfs_trans_handle *trans,
+ struct btrfs_key *key,
+ struct btrfs_stripe_extent *stripe_extent,
+ const size_t item_size)
+{
+ BTRFS_PATH_AUTO_FREE(path);
+ struct extent_buffer *leaf;
+ int ret;
+ int slot;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ ret = btrfs_search_slot(trans, trans->fs_info->stripe_root, key, path,
+ 0, 1);
+ if (ret)
+ return (ret == 1 ? ret : -EINVAL);
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+
+ write_extent_buffer(leaf, stripe_extent, btrfs_item_ptr_offset(leaf, slot),
+ item_size);
+
+ return ret;
+}
+
+EXPORT_FOR_TESTS
+int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_io_context *bioc)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_key stripe_key;
+ struct btrfs_root *stripe_root = fs_info->stripe_root;
+ const int num_stripes = btrfs_bg_type_to_factor(bioc->map_type);
+ struct btrfs_stripe_extent AUTO_KFREE(stripe_extent);
+ const size_t item_size = struct_size(stripe_extent, strides, num_stripes);
+ int ret;
+
+ stripe_extent = kzalloc(item_size, GFP_NOFS);
+ if (!unlikely(stripe_extent)) {
+ btrfs_abort_transaction(trans, -ENOMEM);
+ btrfs_end_transaction(trans);
+ return -ENOMEM;
+ }
+
+ trace_btrfs_insert_one_raid_extent(fs_info, bioc->logical, bioc->size,
+ num_stripes);
+ for (int i = 0; i < num_stripes; i++) {
+ u64 devid = bioc->stripes[i].dev->devid;
+ u64 physical = bioc->stripes[i].physical;
+ struct btrfs_raid_stride *raid_stride = &stripe_extent->strides[i];
+
+ btrfs_set_stack_raid_stride_devid(raid_stride, devid);
+ btrfs_set_stack_raid_stride_physical(raid_stride, physical);
+ }
+
+ stripe_key.objectid = bioc->logical;
+ stripe_key.type = BTRFS_RAID_STRIPE_KEY;
+ stripe_key.offset = bioc->size;
+
+ ret = btrfs_insert_item(trans, stripe_root, &stripe_key, stripe_extent,
+ item_size);
+ if (ret == -EEXIST) {
+ ret = update_raid_extent_item(trans, &stripe_key, stripe_extent,
+ item_size);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ } else if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ }
+
+ return ret;
+}
+
+int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_ordered_extent *ordered_extent)
+{
+ struct btrfs_io_context *bioc;
+ int ret;
+
+ if (!btrfs_fs_incompat(trans->fs_info, RAID_STRIPE_TREE))
+ return 0;
+
+ list_for_each_entry(bioc, &ordered_extent->bioc_list, rst_ordered_entry) {
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret)
+ return ret;
+ }
+
+ while (!list_empty(&ordered_extent->bioc_list)) {
+ bioc = list_first_entry(&ordered_extent->bioc_list,
+ typeof(*bioc), rst_ordered_entry);
+ list_del(&bioc->rst_ordered_entry);
+ btrfs_put_bioc(bioc);
+ }
+
+ return 0;
+}
+
+int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 *length, u64 map_type,
+ u32 stripe_index, struct btrfs_io_stripe *stripe)
+{
+ struct btrfs_root *stripe_root = fs_info->stripe_root;
+ struct btrfs_stripe_extent *stripe_extent;
+ struct btrfs_key stripe_key;
+ struct btrfs_key found_key;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct extent_buffer *leaf;
+ const u64 end = logical + *length;
+ int num_stripes;
+ u64 offset;
+ u64 found_logical;
+ u64 found_length;
+ u64 found_end;
+ int slot;
+ int ret;
+
+ stripe_key.objectid = logical;
+ stripe_key.type = BTRFS_RAID_STRIPE_KEY;
+ stripe_key.offset = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ if (stripe->rst_search_commit_root) {
+ path->skip_locking = true;
+ path->search_commit_root = true;
+ }
+
+ ret = btrfs_search_slot(NULL, stripe_root, &stripe_key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+ if (ret) {
+ if (path->slots[0] != 0)
+ path->slots[0]--;
+ }
+
+ while (1) {
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+
+ btrfs_item_key_to_cpu(leaf, &found_key, slot);
+ found_logical = found_key.objectid;
+ found_length = found_key.offset;
+ found_end = found_logical + found_length;
+
+ if (found_logical > end) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ if (in_range(logical, found_logical, found_length))
+ break;
+
+ ret = btrfs_next_item(stripe_root, path);
+ if (ret)
+ goto out;
+ }
+
+ offset = logical - found_logical;
+
+ /*
+ * If we have a logically contiguous, but physically non-continuous
+ * range, we need to split the bio. Record the length after which we
+ * must split the bio.
+ */
+ if (end > found_end)
+ *length -= end - found_end;
+
+ num_stripes = btrfs_num_raid_stripes(btrfs_item_size(leaf, slot));
+ stripe_extent = btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent);
+
+ for (int i = 0; i < num_stripes; i++) {
+ struct btrfs_raid_stride *stride = &stripe_extent->strides[i];
+ u64 devid = btrfs_raid_stride_devid(leaf, stride);
+ u64 physical = btrfs_raid_stride_physical(leaf, stride);
+
+ if (devid != stripe->dev->devid)
+ continue;
+
+ if ((map_type & BTRFS_BLOCK_GROUP_DUP) && stripe_index != i)
+ continue;
+
+ stripe->physical = physical + offset;
+
+ trace_btrfs_get_raid_extent_offset(fs_info, logical, *length,
+ stripe->physical, devid);
+
+ return 0;
+ }
+
+ /* If we're here, we haven't found the requested devid in the stripe. */
+ ret = -ENODATA;
+out:
+ if (ret > 0)
+ ret = -ENODATA;
+ if (ret && ret != -EIO && !stripe->rst_search_commit_root) {
+ btrfs_debug(fs_info,
+ "cannot find raid-stripe for logical [%llu, %llu] devid %llu, profile %s",
+ logical, logical + *length, stripe->dev->devid,
+ btrfs_bg_type_to_raid_name(map_type));
+ }
+
+ return ret;
+}
diff --git a/fs/btrfs/raid-stripe-tree.h b/fs/btrfs/raid-stripe-tree.h
new file mode 100644
index 000000000000..69942ad43140
--- /dev/null
+++ b/fs/btrfs/raid-stripe-tree.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef BTRFS_RAID_STRIPE_TREE_H
+#define BTRFS_RAID_STRIPE_TREE_H
+
+#include <linux/types.h>
+#include <uapi/linux/btrfs_tree.h>
+#include "fs.h"
+#include "accessors.h"
+
+#define BTRFS_RST_SUPP_BLOCK_GROUP_MASK (BTRFS_BLOCK_GROUP_DUP | \
+ BTRFS_BLOCK_GROUP_RAID1_MASK | \
+ BTRFS_BLOCK_GROUP_RAID0 | \
+ BTRFS_BLOCK_GROUP_RAID10)
+
+struct btrfs_io_context;
+struct btrfs_io_stripe;
+struct btrfs_fs_info;
+struct btrfs_ordered_extent;
+struct btrfs_trans_handle;
+
+int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 length);
+int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 *length, u64 map_type,
+ u32 stripe_index, struct btrfs_io_stripe *stripe);
+int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_ordered_extent *ordered_extent);
+
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_io_context *bioc);
+#endif
+
+static inline bool btrfs_need_stripe_tree_update(struct btrfs_fs_info *fs_info,
+ u64 map_type)
+{
+ u64 type = map_type & BTRFS_BLOCK_GROUP_TYPE_MASK;
+ u64 profile = map_type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+ if (!btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE))
+ return false;
+
+ if (type != BTRFS_BLOCK_GROUP_DATA)
+ return false;
+
+ if (profile & BTRFS_RST_SUPP_BLOCK_GROUP_MASK)
+ return true;
+
+ return false;
+}
+
+static inline int btrfs_num_raid_stripes(u32 item_size)
+{
+ return item_size / sizeof(struct btrfs_raid_stride);
+}
+
+#endif
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0e239a4c3b26..f38d8305e46d 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -13,12 +13,14 @@
#include <linux/list_sort.h>
#include <linux/raid/xor.h>
#include <linux/mm.h>
-#include "misc.h"
+#include "messages.h"
#include "ctree.h"
#include "disk-io.h"
#include "volumes.h"
#include "raid56.h"
#include "async-thread.h"
+#include "file-item.h"
+#include "btrfs_inode.h"
/* set when additional merges to this rbio are not allowed */
#define RBIO_RMW_LOCKED_BIT 1
@@ -38,6 +40,85 @@
#define BTRFS_STRIPE_HASH_TABLE_BITS 11
+static void dump_bioc(const struct btrfs_fs_info *fs_info, const struct btrfs_io_context *bioc)
+{
+ if (unlikely(!bioc)) {
+ btrfs_crit(fs_info, "bioc=NULL");
+ return;
+ }
+ btrfs_crit(fs_info,
+"bioc logical=%llu full_stripe=%llu size=%llu map_type=0x%llx mirror=%u replace_nr_stripes=%u replace_stripe_src=%d num_stripes=%u",
+ bioc->logical, bioc->full_stripe_logical, bioc->size,
+ bioc->map_type, bioc->mirror_num, bioc->replace_nr_stripes,
+ bioc->replace_stripe_src, bioc->num_stripes);
+ for (int i = 0; i < bioc->num_stripes; i++) {
+ btrfs_crit(fs_info, " nr=%d devid=%llu physical=%llu",
+ i, bioc->stripes[i].dev->devid,
+ bioc->stripes[i].physical);
+ }
+}
+
+static void btrfs_dump_rbio(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_raid_bio *rbio)
+{
+ if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
+ return;
+
+ dump_bioc(fs_info, rbio->bioc);
+ btrfs_crit(fs_info,
+"rbio flags=0x%lx nr_sectors=%u nr_data=%u real_stripes=%u stripe_nsectors=%u sector_nsteps=%u scrubp=%u dbitmap=0x%lx",
+ rbio->flags, rbio->nr_sectors, rbio->nr_data,
+ rbio->real_stripes, rbio->stripe_nsectors,
+ rbio->sector_nsteps, rbio->scrubp, rbio->dbitmap);
+}
+
+#define ASSERT_RBIO(expr, rbio) \
+({ \
+ if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) { \
+ const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
+ (rbio)->bioc->fs_info : NULL; \
+ \
+ btrfs_dump_rbio(__fs_info, (rbio)); \
+ } \
+ ASSERT((expr)); \
+})
+
+#define ASSERT_RBIO_STRIPE(expr, rbio, stripe_nr) \
+({ \
+ if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) { \
+ const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
+ (rbio)->bioc->fs_info : NULL; \
+ \
+ btrfs_dump_rbio(__fs_info, (rbio)); \
+ btrfs_crit(__fs_info, "stripe_nr=%d", (stripe_nr)); \
+ } \
+ ASSERT((expr)); \
+})
+
+#define ASSERT_RBIO_SECTOR(expr, rbio, sector_nr) \
+({ \
+ if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) { \
+ const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
+ (rbio)->bioc->fs_info : NULL; \
+ \
+ btrfs_dump_rbio(__fs_info, (rbio)); \
+ btrfs_crit(__fs_info, "sector_nr=%d", (sector_nr)); \
+ } \
+ ASSERT((expr)); \
+})
+
+#define ASSERT_RBIO_LOGICAL(expr, rbio, logical) \
+({ \
+ if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) { \
+ const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
+ (rbio)->bioc->fs_info : NULL; \
+ \
+ btrfs_dump_rbio(__fs_info, (rbio)); \
+ btrfs_crit(__fs_info, "logical=%llu", (logical)); \
+ } \
+ ASSERT((expr)); \
+})
+
/* Used by the raid56 code to lock stripes for read/modify/write */
struct btrfs_stripe_hash {
struct list_head hash_list;
@@ -52,146 +133,56 @@ struct btrfs_stripe_hash_table {
struct btrfs_stripe_hash table[];
};
-enum btrfs_rbio_ops {
- BTRFS_RBIO_WRITE,
- BTRFS_RBIO_READ_REBUILD,
- BTRFS_RBIO_PARITY_SCRUB,
- BTRFS_RBIO_REBUILD_MISSING,
-};
-
-struct btrfs_raid_bio {
- struct btrfs_io_context *bioc;
-
- /* while we're doing rmw on a stripe
- * we put it into a hash table so we can
- * lock the stripe and merge more rbios
- * into it.
- */
- struct list_head hash_list;
-
- /*
- * LRU list for the stripe cache
- */
- struct list_head stripe_cache;
-
- /*
- * for scheduling work in the helper threads
- */
- struct btrfs_work work;
-
- /*
- * bio list and bio_list_lock are used
- * to add more bios into the stripe
- * in hopes of avoiding the full rmw
- */
- struct bio_list bio_list;
- spinlock_t bio_list_lock;
-
- /* also protected by the bio_list_lock, the
- * plug list is used by the plugging code
- * to collect partial bios while plugged. The
- * stripe locking code also uses it to hand off
- * the stripe lock to the next pending IO
- */
- struct list_head plug_list;
-
- /*
- * flags that tell us if it is safe to
- * merge with this bio
- */
- unsigned long flags;
-
- /* size of each individual stripe on disk */
- int stripe_len;
-
- /* number of data stripes (no p/q) */
- int nr_data;
-
- int real_stripes;
-
- int stripe_npages;
- /*
- * set if we're doing a parity rebuild
- * for a read from higher up, which is handled
- * differently from a parity rebuild as part of
- * rmw
- */
- enum btrfs_rbio_ops operation;
-
- /* first bad stripe */
- int faila;
-
- /* second bad stripe (for raid6 use) */
- int failb;
-
- int scrubp;
- /*
- * number of pages needed to represent the full
- * stripe
- */
- int nr_pages;
-
- /*
- * size of all the bios in the bio_list. This
- * helps us decide if the rbio maps to a full
- * stripe or not
- */
- int bio_list_bytes;
-
- int generic_bio_cnt;
-
- refcount_t refs;
-
- atomic_t stripes_pending;
+/*
+ * The PFN may still be valid, but our paddrs should always be block size
+ * aligned, thus such -1 paddr is definitely not a valid one.
+ */
+#define INVALID_PADDR (~(phys_addr_t)0)
- atomic_t error;
- /*
- * these are two arrays of pointers. We allocate the
- * rbio big enough to hold them both and setup their
- * locations when the rbio is allocated
- */
+static void rmw_rbio_work(struct work_struct *work);
+static void rmw_rbio_work_locked(struct work_struct *work);
+static void index_rbio_pages(struct btrfs_raid_bio *rbio);
+static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
- /* pointers to pages that we allocated for
- * reading/writing stripes directly from the disk (including P/Q)
- */
- struct page **stripe_pages;
+static int finish_parity_scrub(struct btrfs_raid_bio *rbio);
+static void scrub_rbio_work_locked(struct work_struct *work);
- /*
- * pointers to the pages in the bio_list. Stored
- * here for faster lookup
- */
- struct page **bio_pages;
+static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
+{
+ bitmap_free(rbio->error_bitmap);
+ kfree(rbio->stripe_pages);
+ kfree(rbio->bio_paddrs);
+ kfree(rbio->stripe_paddrs);
+ kfree(rbio->finish_pointers);
+}
- /*
- * bitmap to record which horizontal stripe has data
- */
- unsigned long *dbitmap;
+static void free_raid_bio(struct btrfs_raid_bio *rbio)
+{
+ int i;
- /* allocated with real_stripes-many pointers for finish_*() calls */
- void **finish_pointers;
+ if (!refcount_dec_and_test(&rbio->refs))
+ return;
- /* allocated with stripe_npages-many bits for finish_*() calls */
- unsigned long *finish_pbitmap;
-};
+ WARN_ON(!list_empty(&rbio->stripe_cache));
+ WARN_ON(!list_empty(&rbio->hash_list));
+ WARN_ON(!bio_list_empty(&rbio->bio_list));
-static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
-static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
-static void rmw_work(struct btrfs_work *work);
-static void read_rebuild_work(struct btrfs_work *work);
-static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
-static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
-static void __free_raid_bio(struct btrfs_raid_bio *rbio);
-static void index_rbio_pages(struct btrfs_raid_bio *rbio);
-static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
+ for (i = 0; i < rbio->nr_pages; i++) {
+ if (rbio->stripe_pages[i]) {
+ __free_page(rbio->stripe_pages[i]);
+ rbio->stripe_pages[i] = NULL;
+ }
+ }
-static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
- int need_check);
-static void scrub_parity_work(struct btrfs_work *work);
+ btrfs_put_bioc(rbio->bioc);
+ free_raid_bio_pointers(rbio);
+ kfree(rbio);
+}
-static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
+static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
{
- btrfs_init_work(&rbio->work, work_func, NULL, NULL);
- btrfs_queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
+ INIT_WORK(&rbio->work, work_func);
+ queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
}
/*
@@ -204,8 +195,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
struct btrfs_stripe_hash_table *x;
struct btrfs_stripe_hash *cur;
struct btrfs_stripe_hash *h;
- int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
- int i;
+ unsigned int num_entries = 1U << BTRFS_STRIPE_HASH_TABLE_BITS;
if (info->stripe_hash_table)
return 0;
@@ -226,7 +216,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
h = table->table;
- for (i = 0; i < num_entries; i++) {
+ for (unsigned int i = 0; i < num_entries; i++) {
cur = h + i;
INIT_LIST_HEAD(&cur->hash_list);
spin_lock_init(&cur->lock);
@@ -237,9 +227,27 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
return 0;
}
+static void memcpy_from_bio_to_stripe(struct btrfs_raid_bio *rbio, unsigned int sector_nr)
+{
+ const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
+
+ ASSERT(sector_nr < rbio->nr_sectors);
+ for (int i = 0; i < rbio->sector_nsteps; i++) {
+ unsigned int index = sector_nr * rbio->sector_nsteps + i;
+ phys_addr_t dst = rbio->stripe_paddrs[index];
+ phys_addr_t src = rbio->bio_paddrs[index];
+
+ ASSERT(dst != INVALID_PADDR);
+ ASSERT(src != INVALID_PADDR);
+
+ memcpy_page(phys_to_page(dst), offset_in_page(dst),
+ phys_to_page(src), offset_in_page(src), step);
+ }
+}
+
/*
* caching an rbio means to copy anything from the
- * bio_pages array into the stripe_pages array. We
+ * bio_sectors array into the stripe_pages array. We
* use the page uptodate bit in the stripe cache array
* to indicate if it has valid data
*
@@ -255,12 +263,21 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
if (ret)
return;
- for (i = 0; i < rbio->nr_pages; i++) {
- if (!rbio->bio_pages[i])
+ for (i = 0; i < rbio->nr_sectors; i++) {
+ /* Some range not covered by bio (partial write), skip it */
+ if (rbio->bio_paddrs[i * rbio->sector_nsteps] == INVALID_PADDR) {
+ /*
+ * Even if the sector is not covered by bio, if it is
+ * a data sector it should still be uptodate as it is
+ * read from disk.
+ */
+ if (i < rbio->nr_data * rbio->stripe_nsectors)
+ ASSERT(test_bit(i, rbio->stripe_uptodate_bitmap));
continue;
+ }
- copy_highpage(rbio->stripe_pages[i], rbio->bio_pages[i]);
- SetPageUptodate(rbio->stripe_pages[i]);
+ memcpy_from_bio_to_stripe(rbio, i);
+ set_bit(i, rbio->stripe_uptodate_bitmap);
}
set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
}
@@ -270,7 +287,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
*/
static int rbio_bucket(struct btrfs_raid_bio *rbio)
{
- u64 num = rbio->bioc->raid_map[0];
+ u64 num = rbio->bioc->full_stripe_logical;
/*
* we shift down quite a bit. We're using byte
@@ -283,32 +300,143 @@ static int rbio_bucket(struct btrfs_raid_bio *rbio)
return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
}
+/* Get the sector number of the first sector covered by @page_nr. */
+static u32 page_nr_to_sector_nr(struct btrfs_raid_bio *rbio, unsigned int page_nr)
+{
+ u32 sector_nr;
+
+ ASSERT(page_nr < rbio->nr_pages);
+
+ sector_nr = (page_nr << PAGE_SHIFT) >> rbio->bioc->fs_info->sectorsize_bits;
+ ASSERT(sector_nr < rbio->nr_sectors);
+ return sector_nr;
+}
+
+/*
+ * Get the number of sectors covered by @page_nr.
+ *
+ * For bs > ps cases, the result will always be 1.
+ * For bs <= ps cases, the result will be ps / bs.
+ */
+static u32 page_nr_to_num_sectors(struct btrfs_raid_bio *rbio, unsigned int page_nr)
+{
+ struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ u32 nr_sectors;
+
+ ASSERT(page_nr < rbio->nr_pages);
+
+ nr_sectors = round_up(PAGE_SIZE, fs_info->sectorsize) >> fs_info->sectorsize_bits;
+ ASSERT(nr_sectors > 0);
+ return nr_sectors;
+}
+
+static __maybe_unused bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
+ unsigned int page_nr)
+{
+ const u32 sector_nr = page_nr_to_sector_nr(rbio, page_nr);
+ const u32 nr_bits = page_nr_to_num_sectors(rbio, page_nr);
+ int i;
+
+ ASSERT(page_nr < rbio->nr_pages);
+ ASSERT(sector_nr + nr_bits < rbio->nr_sectors);
+
+ for (i = sector_nr; i < sector_nr + nr_bits; i++) {
+ if (!test_bit(i, rbio->stripe_uptodate_bitmap))
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Update the stripe_sectors[] array to use correct page and pgoff
+ *
+ * Should be called every time any page pointer in stripes_pages[] got modified.
+ */
+static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
+{
+ const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
+ u32 offset;
+ int i;
+
+ for (i = 0, offset = 0; i < rbio->nr_sectors * rbio->sector_nsteps;
+ i++, offset += step) {
+ int page_index = offset >> PAGE_SHIFT;
+
+ ASSERT(page_index < rbio->nr_pages);
+ if (!rbio->stripe_pages[page_index])
+ continue;
+
+ rbio->stripe_paddrs[i] = page_to_phys(rbio->stripe_pages[page_index]) +
+ offset_in_page(offset);
+ }
+}
+
+static void steal_rbio_page(struct btrfs_raid_bio *src,
+ struct btrfs_raid_bio *dest, int page_nr)
+{
+ const u32 sector_nr = page_nr_to_sector_nr(src, page_nr);
+ const u32 nr_bits = page_nr_to_num_sectors(src, page_nr);
+
+ ASSERT(page_nr < src->nr_pages);
+ ASSERT(sector_nr + nr_bits < src->nr_sectors);
+
+ if (dest->stripe_pages[page_nr])
+ __free_page(dest->stripe_pages[page_nr]);
+ dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
+ src->stripe_pages[page_nr] = NULL;
+
+ /* Also update the stripe_uptodate_bitmap bits. */
+ bitmap_set(dest->stripe_uptodate_bitmap, sector_nr, nr_bits);
+}
+
+static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
+{
+ const int sector_nr = page_nr_to_sector_nr(rbio, page_nr);
+
+ /*
+ * We have ensured PAGE_SIZE is aligned with sectorsize, thus
+ * we won't have a page which is half data half parity.
+ *
+ * Thus if the first sector of the page belongs to data stripes, then
+ * the full page belongs to data stripes.
+ */
+ return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
+}
+
/*
- * stealing an rbio means taking all the uptodate pages from the stripe
- * array in the source rbio and putting them into the destination rbio
+ * Stealing an rbio means taking all the uptodate pages from the stripe array
+ * in the source rbio and putting them into the destination rbio.
+ *
+ * This will also update the involved stripe_sectors[] which are referring to
+ * the old pages.
*/
static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
{
int i;
- struct page *s;
- struct page *d;
if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
return;
for (i = 0; i < dest->nr_pages; i++) {
- s = src->stripe_pages[i];
- if (!s || !PageUptodate(s)) {
- continue;
- }
+ struct page *p = src->stripe_pages[i];
- d = dest->stripe_pages[i];
- if (d)
- __free_page(d);
+ /*
+ * We don't need to steal P/Q pages as they will always be
+ * regenerated for RMW or full write anyway.
+ */
+ if (!is_data_stripe_page(src, i))
+ continue;
- dest->stripe_pages[i] = s;
- src->stripe_pages[i] = NULL;
+ /*
+ * If @src already has RBIO_CACHE_READY_BIT, it should have
+ * all data stripe pages present and uptodate.
+ */
+ ASSERT(p);
+ ASSERT(full_page_sectors_uptodate(src, i));
+ steal_rbio_page(src, dest, i);
}
+ index_stripe_sectors(dest);
+ index_stripe_sectors(src);
}
/*
@@ -321,10 +449,11 @@ static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
static void merge_rbio(struct btrfs_raid_bio *dest,
struct btrfs_raid_bio *victim)
{
- bio_list_merge(&dest->bio_list, &victim->bio_list);
+ bio_list_merge_init(&dest->bio_list, &victim->bio_list);
dest->bio_list_bytes += victim->bio_list_bytes;
- dest->generic_bio_cnt += victim->generic_bio_cnt;
- bio_list_init(&victim->bio_list);
+ /* Also inherit the bitmaps from @victim. */
+ bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
+ dest->stripe_nsectors);
}
/*
@@ -385,7 +514,7 @@ static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
spin_unlock(&h->lock);
if (freeit)
- __free_raid_bio(rbio);
+ free_raid_bio(rbio);
}
/*
@@ -394,16 +523,15 @@ static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
{
struct btrfs_stripe_hash_table *table;
- unsigned long flags;
if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
return;
table = rbio->bioc->fs_info->stripe_hash_table;
- spin_lock_irqsave(&table->cache_lock, flags);
+ spin_lock(&table->cache_lock);
__remove_rbio_from_cache(rbio);
- spin_unlock_irqrestore(&table->cache_lock, flags);
+ spin_unlock(&table->cache_lock);
}
/*
@@ -412,19 +540,17 @@ static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
{
struct btrfs_stripe_hash_table *table;
- unsigned long flags;
struct btrfs_raid_bio *rbio;
table = info->stripe_hash_table;
- spin_lock_irqsave(&table->cache_lock, flags);
+ spin_lock(&table->cache_lock);
while (!list_empty(&table->stripe_cache)) {
- rbio = list_entry(table->stripe_cache.next,
- struct btrfs_raid_bio,
- stripe_cache);
+ rbio = list_first_entry(&table->stripe_cache,
+ struct btrfs_raid_bio, stripe_cache);
__remove_rbio_from_cache(rbio);
}
- spin_unlock_irqrestore(&table->cache_lock, flags);
+ spin_unlock(&table->cache_lock);
}
/*
@@ -454,14 +580,13 @@ void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
static void cache_rbio(struct btrfs_raid_bio *rbio)
{
struct btrfs_stripe_hash_table *table;
- unsigned long flags;
if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
return;
table = rbio->bioc->fs_info->stripe_hash_table;
- spin_lock_irqsave(&table->cache_lock, flags);
+ spin_lock(&table->cache_lock);
spin_lock(&rbio->bio_list_lock);
/* bump our ref if we were not in the list before */
@@ -480,15 +605,15 @@ static void cache_rbio(struct btrfs_raid_bio *rbio)
if (table->cache_size > RBIO_CACHE_SIZE) {
struct btrfs_raid_bio *found;
- found = list_entry(table->stripe_cache.prev,
- struct btrfs_raid_bio,
- stripe_cache);
+ found = list_last_entry(&table->stripe_cache,
+ struct btrfs_raid_bio,
+ stripe_cache);
if (found != rbio)
__remove_rbio_from_cache(found);
}
- spin_unlock_irqrestore(&table->cache_lock, flags);
+ spin_unlock(&table->cache_lock);
}
/*
@@ -517,15 +642,14 @@ static void run_xor(void **pages, int src_cnt, ssize_t len)
*/
static int rbio_is_full(struct btrfs_raid_bio *rbio)
{
- unsigned long flags;
unsigned long size = rbio->bio_list_bytes;
int ret = 1;
- spin_lock_irqsave(&rbio->bio_list_lock, flags);
- if (size != rbio->nr_data * rbio->stripe_len)
+ spin_lock(&rbio->bio_list_lock);
+ if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
ret = 0;
- BUG_ON(size > rbio->nr_data * rbio->stripe_len);
- spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
+ BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
+ spin_unlock(&rbio->bio_list_lock);
return ret;
}
@@ -558,7 +682,7 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
test_bit(RBIO_CACHE_BIT, &cur->flags))
return 0;
- if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
+ if (last->bioc->full_stripe_logical != cur->bioc->full_stripe_logical)
return 0;
/* we can't merge with different operations */
@@ -575,64 +699,68 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
return 0;
- if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
+ if (last->operation == BTRFS_RBIO_READ_REBUILD)
return 0;
- if (last->operation == BTRFS_RBIO_READ_REBUILD) {
- int fa = last->faila;
- int fb = last->failb;
- int cur_fa = cur->faila;
- int cur_fb = cur->failb;
+ return 1;
+}
- if (last->faila >= last->failb) {
- fa = last->failb;
- fb = last->faila;
- }
+/* Return the sector index for @stripe_nr and @sector_nr. */
+static unsigned int rbio_sector_index(const struct btrfs_raid_bio *rbio,
+ unsigned int stripe_nr,
+ unsigned int sector_nr)
+{
+ unsigned int ret;
- if (cur->faila >= cur->failb) {
- cur_fa = cur->failb;
- cur_fb = cur->faila;
- }
+ ASSERT_RBIO_STRIPE(stripe_nr < rbio->real_stripes, rbio, stripe_nr);
+ ASSERT_RBIO_SECTOR(sector_nr < rbio->stripe_nsectors, rbio, sector_nr);
- if (fa != cur_fa || fb != cur_fb)
- return 0;
- }
- return 1;
+ ret = stripe_nr * rbio->stripe_nsectors + sector_nr;
+ ASSERT(ret < rbio->nr_sectors);
+ return ret;
}
-static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
- int index)
+/* Return the paddr array index for @stripe_nr, @sector_nr and @step_nr. */
+static unsigned int rbio_paddr_index(const struct btrfs_raid_bio *rbio,
+ unsigned int stripe_nr,
+ unsigned int sector_nr,
+ unsigned int step_nr)
{
- return stripe * rbio->stripe_npages + index;
+ unsigned int ret;
+
+ ASSERT_RBIO_SECTOR(step_nr < rbio->sector_nsteps, rbio, step_nr);
+
+ ret = rbio_sector_index(rbio, stripe_nr, sector_nr) * rbio->sector_nsteps + step_nr;
+ ASSERT(ret < rbio->nr_sectors * rbio->sector_nsteps);
+ return ret;
}
-/*
- * these are just the pages from the rbio array, not from anything
- * the FS sent down to us
- */
-static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
- int index)
+static phys_addr_t rbio_stripe_paddr(const struct btrfs_raid_bio *rbio,
+ unsigned int stripe_nr, unsigned int sector_nr,
+ unsigned int step_nr)
{
- return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
+ return rbio->stripe_paddrs[rbio_paddr_index(rbio, stripe_nr, sector_nr, step_nr)];
}
-/*
- * helper to index into the pstripe
- */
-static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
+static phys_addr_t rbio_pstripe_paddr(const struct btrfs_raid_bio *rbio,
+ unsigned int sector_nr, unsigned int step_nr)
{
- return rbio_stripe_page(rbio, rbio->nr_data, index);
+ return rbio_stripe_paddr(rbio, rbio->nr_data, sector_nr, step_nr);
}
-/*
- * helper to index into the qstripe, returns null
- * if there is no qstripe
- */
-static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
+static phys_addr_t rbio_qstripe_paddr(const struct btrfs_raid_bio *rbio,
+ unsigned int sector_nr, unsigned int step_nr)
{
if (rbio->nr_data + 1 == rbio->real_stripes)
- return NULL;
- return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
+ return INVALID_PADDR;
+ return rbio_stripe_paddr(rbio, rbio->nr_data + 1, sector_nr, step_nr);
+}
+
+/* Return a paddr pointer into the rbio::stripe_paddrs[] for the specified sector. */
+static phys_addr_t *rbio_stripe_paddrs(const struct btrfs_raid_bio *rbio,
+ unsigned int stripe_nr, unsigned int sector_nr)
+{
+ return &rbio->stripe_paddrs[rbio_paddr_index(rbio, stripe_nr, sector_nr, 0)];
}
/*
@@ -662,16 +790,15 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
struct btrfs_stripe_hash *h;
struct btrfs_raid_bio *cur;
struct btrfs_raid_bio *pending;
- unsigned long flags;
struct btrfs_raid_bio *freeit = NULL;
struct btrfs_raid_bio *cache_drop = NULL;
int ret = 0;
h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
- spin_lock_irqsave(&h->lock, flags);
+ spin_lock(&h->lock);
list_for_each_entry(cur, &h->hash_list, hash_list) {
- if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
+ if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical)
continue;
spin_lock(&cur->bio_list_lock);
@@ -729,14 +856,16 @@ lockit:
refcount_inc(&rbio->refs);
list_add(&rbio->hash_list, &h->hash_list);
out:
- spin_unlock_irqrestore(&h->lock, flags);
+ spin_unlock(&h->lock);
if (cache_drop)
remove_rbio_from_cache(cache_drop);
if (freeit)
- __free_raid_bio(freeit);
+ free_raid_bio(freeit);
return ret;
}
+static void recover_rbio_work_locked(struct work_struct *work);
+
/*
* called as rmw or parity rebuild is completed. If the plug list has more
* rbios waiting for this stripe, the next one on the list will be started
@@ -745,7 +874,6 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
{
int bucket;
struct btrfs_stripe_hash *h;
- unsigned long flags;
int keep_cache = 0;
bucket = rbio_bucket(rbio);
@@ -754,7 +882,7 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
if (list_empty(&rbio->plug_list))
cache_rbio(rbio);
- spin_lock_irqsave(&h->lock, flags);
+ spin_lock(&h->lock);
spin_lock(&rbio->bio_list_lock);
if (!list_empty(&rbio->hash_list)) {
@@ -791,19 +919,16 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
list_add(&next->hash_list, &h->hash_list);
refcount_inc(&next->refs);
spin_unlock(&rbio->bio_list_lock);
- spin_unlock_irqrestore(&h->lock, flags);
+ spin_unlock(&h->lock);
- if (next->operation == BTRFS_RBIO_READ_REBUILD)
- start_async_work(next, read_rebuild_work);
- else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
- steal_rbio(rbio, next);
- start_async_work(next, read_rebuild_work);
+ if (next->operation == BTRFS_RBIO_READ_REBUILD) {
+ start_async_work(next, recover_rbio_work_locked);
} else if (next->operation == BTRFS_RBIO_WRITE) {
steal_rbio(rbio, next);
- start_async_work(next, rmw_work);
+ start_async_work(next, rmw_rbio_work_locked);
} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
steal_rbio(rbio, next);
- start_async_work(next, scrub_parity_work);
+ start_async_work(next, scrub_rbio_work_locked);
}
goto done_nolock;
@@ -811,43 +936,21 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
}
done:
spin_unlock(&rbio->bio_list_lock);
- spin_unlock_irqrestore(&h->lock, flags);
+ spin_unlock(&h->lock);
done_nolock:
if (!keep_cache)
remove_rbio_from_cache(rbio);
}
-static void __free_raid_bio(struct btrfs_raid_bio *rbio)
-{
- int i;
-
- if (!refcount_dec_and_test(&rbio->refs))
- return;
-
- WARN_ON(!list_empty(&rbio->stripe_cache));
- WARN_ON(!list_empty(&rbio->hash_list));
- WARN_ON(!bio_list_empty(&rbio->bio_list));
-
- for (i = 0; i < rbio->nr_pages; i++) {
- if (rbio->stripe_pages[i]) {
- __free_page(rbio->stripe_pages[i]);
- rbio->stripe_pages[i] = NULL;
- }
- }
-
- btrfs_put_bioc(rbio->bioc);
- kfree(rbio);
-}
-
-static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
+static void rbio_endio_bio_list(struct bio *cur, blk_status_t status)
{
struct bio *next;
while (cur) {
next = cur->bi_next;
cur->bi_next = NULL;
- cur->bi_status = err;
+ cur->bi_status = status;
bio_endio(cur);
cur = next;
}
@@ -857,13 +960,22 @@ static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
* this frees the rbio and runs through all the bios in the
* bio_list and calls end_io on them
*/
-static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
+static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t status)
{
struct bio *cur = bio_list_get(&rbio->bio_list);
struct bio *extra;
- if (rbio->generic_bio_cnt)
- btrfs_bio_counter_sub(rbio->bioc->fs_info, rbio->generic_bio_cnt);
+ kfree(rbio->csum_buf);
+ bitmap_free(rbio->csum_bitmap);
+ rbio->csum_buf = NULL;
+ rbio->csum_bitmap = NULL;
+
+ /*
+ * Clear the data bitmap, as the rbio may be cached for later usage.
+ * do this before before unlock_stripe() so there will be no new bio
+ * for this bio.
+ */
+ bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
/*
* At this moment, rbio->bio_list is empty, however since rbio does not
@@ -875,83 +987,70 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
*/
unlock_stripe(rbio);
extra = bio_list_get(&rbio->bio_list);
- __free_raid_bio(rbio);
+ free_raid_bio(rbio);
- rbio_endio_bio_list(cur, err);
+ rbio_endio_bio_list(cur, status);
if (extra)
- rbio_endio_bio_list(extra, err);
-}
-
-/*
- * end io function used by finish_rmw. When we finally
- * get here, we've written a full stripe
- */
-static void raid_write_end_io(struct bio *bio)
-{
- struct btrfs_raid_bio *rbio = bio->bi_private;
- blk_status_t err = bio->bi_status;
- int max_errors;
-
- if (err)
- fail_bio_stripe(rbio, bio);
-
- bio_put(bio);
-
- if (!atomic_dec_and_test(&rbio->stripes_pending))
- return;
-
- err = BLK_STS_OK;
-
- /* OK, we have read all the stripes we need to. */
- max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
- 0 : rbio->bioc->max_errors;
- if (atomic_read(&rbio->error) > max_errors)
- err = BLK_STS_IOERR;
-
- rbio_orig_end_io(rbio, err);
+ rbio_endio_bio_list(extra, status);
}
/*
- * the read/modify/write code wants to use the original bio for
- * any pages it included, and then use the rbio for everything
- * else. This function decides if a given index (stripe number)
- * and page number in that stripe fall inside the original bio
- * or the rbio.
+ * Get paddr pointer for the sector specified by its @stripe_nr and @sector_nr.
*
- * if you set bio_list_only, you'll get a NULL back for any ranges
- * that are outside the bio_list
+ * @rbio: The raid bio
+ * @stripe_nr: Stripe number, valid range [0, real_stripe)
+ * @sector_nr: Sector number inside the stripe,
+ * valid range [0, stripe_nsectors)
+ * @bio_list_only: Whether to use sectors inside the bio list only.
*
- * This doesn't take any refs on anything, you get a bare page pointer
- * and the caller must bump refs as required.
+ * The read/modify/write code wants to reuse the original bio page as much
+ * as possible, and only use stripe_sectors as fallback.
*
- * You must call index_rbio_pages once before you can trust
- * the answers from this function.
+ * Return NULL if bio_list_only is set but the specified sector has no
+ * coresponding bio.
*/
-static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
- int index, int pagenr, int bio_list_only)
+static phys_addr_t *sector_paddrs_in_rbio(struct btrfs_raid_bio *rbio,
+ int stripe_nr, int sector_nr,
+ bool bio_list_only)
{
- int chunk_page;
- struct page *p = NULL;
+ phys_addr_t *ret = NULL;
+ const int index = rbio_paddr_index(rbio, stripe_nr, sector_nr, 0);
- chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
+ ASSERT(index >= 0 && index < rbio->nr_sectors * rbio->sector_nsteps);
- spin_lock_irq(&rbio->bio_list_lock);
- p = rbio->bio_pages[chunk_page];
- spin_unlock_irq(&rbio->bio_list_lock);
-
- if (p || bio_list_only)
- return p;
-
- return rbio->stripe_pages[chunk_page];
+ scoped_guard(spinlock, &rbio->bio_list_lock) {
+ if (rbio->bio_paddrs[index] != INVALID_PADDR || bio_list_only) {
+ /* Don't return sector without a valid page pointer */
+ if (rbio->bio_paddrs[index] != INVALID_PADDR)
+ ret = &rbio->bio_paddrs[index];
+ return ret;
+ }
+ }
+ return &rbio->stripe_paddrs[index];
}
/*
- * number of pages we need for the entire stripe across all the
- * drives
+ * Similar to sector_paddr_in_rbio(), but with extra consideration for
+ * bs > ps cases, where we can have multiple steps for a fs block.
*/
-static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
+static phys_addr_t sector_paddr_in_rbio(struct btrfs_raid_bio *rbio,
+ int stripe_nr, int sector_nr, int step_nr,
+ bool bio_list_only)
{
- return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
+ phys_addr_t ret = INVALID_PADDR;
+ const int index = rbio_paddr_index(rbio, stripe_nr, sector_nr, step_nr);
+
+ ASSERT(index >= 0 && index < rbio->nr_sectors * rbio->sector_nsteps);
+
+ scoped_guard(spinlock, &rbio->bio_list_lock) {
+ if (rbio->bio_paddrs[index] != INVALID_PADDR || bio_list_only) {
+ /* Don't return sector without a valid page pointer */
+ if (rbio->bio_paddrs[index] != INVALID_PADDR)
+ ret = rbio->bio_paddrs[index];
+ return ret;
+ }
+ }
+ return rbio->stripe_paddrs[index];
}
/*
@@ -959,134 +1058,223 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
* this does not allocate any pages for rbio->pages.
*/
static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
- struct btrfs_io_context *bioc,
- u64 stripe_len)
-{
+ struct btrfs_io_context *bioc)
+{
+ const unsigned int real_stripes = bioc->num_stripes - bioc->replace_nr_stripes;
+ const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT;
+ const unsigned int num_pages = stripe_npages * real_stripes;
+ const unsigned int stripe_nsectors =
+ BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
+ const unsigned int num_sectors = stripe_nsectors * real_stripes;
+ const unsigned int step = min(fs_info->sectorsize, PAGE_SIZE);
+ const unsigned int sector_nsteps = fs_info->sectorsize / step;
struct btrfs_raid_bio *rbio;
- int nr_data = 0;
- int real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
- int num_pages = rbio_nr_pages(stripe_len, real_stripes);
- int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
- void *p;
-
- rbio = kzalloc(sizeof(*rbio) +
- sizeof(*rbio->stripe_pages) * num_pages +
- sizeof(*rbio->bio_pages) * num_pages +
- sizeof(*rbio->finish_pointers) * real_stripes +
- sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
- sizeof(*rbio->finish_pbitmap) *
- BITS_TO_LONGS(stripe_npages),
- GFP_NOFS);
+
+ /*
+ * For bs <= ps cases, ps must be aligned to bs.
+ * For bs > ps cases, bs must be aligned to ps.
+ */
+ ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize) ||
+ IS_ALIGNED(fs_info->sectorsize, PAGE_SIZE));
+ /*
+ * Our current stripe len should be fixed to 64k thus stripe_nsectors
+ * (at most 16) should be no larger than BITS_PER_LONG.
+ */
+ ASSERT(stripe_nsectors <= BITS_PER_LONG);
+
+ /*
+ * Real stripes must be between 2 (2 disks RAID5, aka RAID1) and 256
+ * (limited by u8).
+ */
+ ASSERT(real_stripes >= 2);
+ ASSERT(real_stripes <= U8_MAX);
+
+ rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
if (!rbio)
return ERR_PTR(-ENOMEM);
+ rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
+ GFP_NOFS);
+ rbio->bio_paddrs = kcalloc(num_sectors * sector_nsteps, sizeof(phys_addr_t), GFP_NOFS);
+ rbio->stripe_paddrs = kcalloc(num_sectors * sector_nsteps, sizeof(phys_addr_t), GFP_NOFS);
+ rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
+ rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
+ rbio->stripe_uptodate_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
+
+ if (!rbio->stripe_pages || !rbio->bio_paddrs || !rbio->stripe_paddrs ||
+ !rbio->finish_pointers || !rbio->error_bitmap || !rbio->stripe_uptodate_bitmap) {
+ free_raid_bio_pointers(rbio);
+ kfree(rbio);
+ return ERR_PTR(-ENOMEM);
+ }
+ for (int i = 0; i < num_sectors * sector_nsteps; i++) {
+ rbio->stripe_paddrs[i] = INVALID_PADDR;
+ rbio->bio_paddrs[i] = INVALID_PADDR;
+ }
bio_list_init(&rbio->bio_list);
+ init_waitqueue_head(&rbio->io_wait);
INIT_LIST_HEAD(&rbio->plug_list);
spin_lock_init(&rbio->bio_list_lock);
INIT_LIST_HEAD(&rbio->stripe_cache);
INIT_LIST_HEAD(&rbio->hash_list);
+ btrfs_get_bioc(bioc);
rbio->bioc = bioc;
- rbio->stripe_len = stripe_len;
rbio->nr_pages = num_pages;
+ rbio->nr_sectors = num_sectors;
rbio->real_stripes = real_stripes;
rbio->stripe_npages = stripe_npages;
- rbio->faila = -1;
- rbio->failb = -1;
+ rbio->stripe_nsectors = stripe_nsectors;
+ rbio->sector_nsteps = sector_nsteps;
refcount_set(&rbio->refs, 1);
- atomic_set(&rbio->error, 0);
atomic_set(&rbio->stripes_pending, 0);
- /*
- * the stripe_pages, bio_pages, etc arrays point to the extra
- * memory we allocated past the end of the rbio
- */
- p = rbio + 1;
-#define CONSUME_ALLOC(ptr, count) do { \
- ptr = p; \
- p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
- } while (0)
- CONSUME_ALLOC(rbio->stripe_pages, num_pages);
- CONSUME_ALLOC(rbio->bio_pages, num_pages);
- CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
- CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
- CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
-#undef CONSUME_ALLOC
-
- if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
- nr_data = real_stripes - 1;
- else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
- nr_data = real_stripes - 2;
- else
- BUG();
+ ASSERT(btrfs_nr_parity_stripes(bioc->map_type));
+ rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
+ ASSERT(rbio->nr_data > 0);
- rbio->nr_data = nr_data;
return rbio;
}
/* allocate pages for all the stripes in the bio, including parity */
static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
{
- int i;
- struct page *page;
+ int ret;
- for (i = 0; i < rbio->nr_pages; i++) {
- if (rbio->stripe_pages[i])
- continue;
- page = alloc_page(GFP_NOFS);
- if (!page)
- return -ENOMEM;
- rbio->stripe_pages[i] = page;
- }
+ ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages, false);
+ if (ret < 0)
+ return ret;
+ /* Mapping all sectors */
+ index_stripe_sectors(rbio);
return 0;
}
/* only allocate pages for p/q stripes */
static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
{
- int i;
- struct page *page;
+ const int data_pages = rbio->nr_data * rbio->stripe_npages;
+ int ret;
+
+ ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
+ rbio->stripe_pages + data_pages, false);
+ if (ret < 0)
+ return ret;
- i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
+ index_stripe_sectors(rbio);
+ return 0;
+}
- for (; i < rbio->nr_pages; i++) {
- if (rbio->stripe_pages[i])
- continue;
- page = alloc_page(GFP_NOFS);
- if (!page)
- return -ENOMEM;
- rbio->stripe_pages[i] = page;
+/*
+ * Return the total number of errors found in the vertical stripe of @sector_nr.
+ *
+ * @faila and @failb will also be updated to the first and second stripe
+ * number of the errors.
+ */
+static int get_rbio_vertical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
+ int *faila, int *failb)
+{
+ int stripe_nr;
+ int found_errors = 0;
+
+ if (faila || failb) {
+ /*
+ * Both @faila and @failb should be valid pointers if any of
+ * them is specified.
+ */
+ ASSERT(faila && failb);
+ *faila = -1;
+ *failb = -1;
}
+
+ for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
+ int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr;
+
+ if (test_bit(total_sector_nr, rbio->error_bitmap)) {
+ found_errors++;
+ if (faila) {
+ /* Update faila and failb. */
+ if (*faila < 0)
+ *faila = stripe_nr;
+ else if (*failb < 0)
+ *failb = stripe_nr;
+ }
+ }
+ }
+ return found_errors;
+}
+
+static int bio_add_paddrs(struct bio *bio, phys_addr_t *paddrs, unsigned int nr_steps,
+ unsigned int step)
+{
+ int added = 0;
+ int ret;
+
+ for (int i = 0; i < nr_steps; i++) {
+ ret = bio_add_page(bio, phys_to_page(paddrs[i]), step,
+ offset_in_page(paddrs[i]));
+ if (ret != step)
+ goto revert;
+ added += ret;
+ }
+ return added;
+revert:
+ /*
+ * We don't need to revert the bvec, as the bio will be submitted immediately,
+ * as long as the size is reduced the extra bvec will not be accessed.
+ */
+ bio->bi_iter.bi_size -= added;
return 0;
}
/*
- * add a single page from a specific stripe into our list of bios for IO
- * this will try to merge into existing bios if possible, and returns
- * zero if all went well.
+ * Add a single sector @sector into our list of bios for IO.
+ *
+ * Return 0 if everything went well.
+ * Return <0 for error, and no byte will be added to @rbio.
*/
-static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
- struct bio_list *bio_list,
- struct page *page,
- int stripe_nr,
- unsigned long page_index,
- unsigned long bio_max_len)
+static int rbio_add_io_paddrs(struct btrfs_raid_bio *rbio, struct bio_list *bio_list,
+ phys_addr_t *paddrs, unsigned int stripe_nr,
+ unsigned int sector_nr, enum req_op op)
{
+ const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
+ const u32 step = min(sectorsize, PAGE_SIZE);
struct bio *last = bio_list->tail;
int ret;
struct bio *bio;
struct btrfs_io_stripe *stripe;
u64 disk_start;
+ /*
+ * Note: here stripe_nr has taken device replace into consideration,
+ * thus it can be larger than rbio->real_stripe.
+ * So here we check against bioc->num_stripes, not rbio->real_stripes.
+ */
+ ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes,
+ rbio, stripe_nr);
+ ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors,
+ rbio, sector_nr);
+ ASSERT(paddrs != NULL);
+
stripe = &rbio->bioc->stripes[stripe_nr];
- disk_start = stripe->physical + (page_index << PAGE_SHIFT);
+ disk_start = stripe->physical + sector_nr * sectorsize;
/* if the device is missing, just fail this stripe */
- if (!stripe->dev->bdev)
- return fail_rbio_index(rbio, stripe_nr);
+ if (!stripe->dev->bdev) {
+ int found_errors;
+
+ set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr,
+ rbio->error_bitmap);
+
+ /* Check if we have reached tolerance early. */
+ found_errors = get_rbio_vertical_errors(rbio, sector_nr,
+ NULL, NULL);
+ if (unlikely(found_errors > rbio->bioc->max_errors))
+ return -EIO;
+ return 0;
+ }
/* see if we can add this page onto our existing bio */
if (last) {
- u64 last_end = last->bi_iter.bi_sector << 9;
+ u64 last_end = last->bi_iter.bi_sector << SECTOR_SHIFT;
last_end += last->bi_iter.bi_size;
/*
@@ -1095,38 +1283,40 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
*/
if (last_end == disk_start && !last->bi_status &&
last->bi_bdev == stripe->dev->bdev) {
- ret = bio_add_page(last, page, PAGE_SIZE, 0);
- if (ret == PAGE_SIZE)
+ ret = bio_add_paddrs(last, paddrs, rbio->sector_nsteps, step);
+ if (ret == sectorsize)
return 0;
}
}
/* put a new bio on the list */
- bio = btrfs_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
- btrfs_bio(bio)->device = stripe->dev;
- bio->bi_iter.bi_size = 0;
- bio_set_dev(bio, stripe->dev->bdev);
- bio->bi_iter.bi_sector = disk_start >> 9;
-
- bio_add_page(bio, page, PAGE_SIZE, 0);
+ bio = bio_alloc(stripe->dev->bdev,
+ max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1),
+ op, GFP_NOFS);
+ bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT;
+ bio->bi_private = rbio;
+
+ ret = bio_add_paddrs(bio, paddrs, rbio->sector_nsteps, step);
+ ASSERT(ret == sectorsize);
bio_list_add(bio_list, bio);
return 0;
}
-/*
- * while we're doing the read/modify/write cycle, we could
- * have errors in reading pages off the disk. This checks
- * for errors and if we're not able to read the page it'll
- * trigger parity reconstruction. The rmw will be finished
- * after we've reconstructed the failed stripes
- */
-static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
+static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
{
- if (rbio->faila >= 0 || rbio->failb >= 0) {
- BUG_ON(rbio->faila == rbio->real_stripes - 1);
- __raid56_parity_recover(rbio);
- } else {
- finish_rmw(rbio);
+ struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
+ const u32 step_bits = min(fs_info->sectorsize_bits, PAGE_SHIFT);
+ struct bvec_iter iter = bio->bi_iter;
+ phys_addr_t paddr;
+ u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
+ rbio->bioc->full_stripe_logical;
+
+ btrfs_bio_for_each_block(paddr, bio, &iter, step) {
+ unsigned int index = (offset >> step_bits);
+
+ rbio->bio_paddrs[index] = paddr;
+ offset += step;
}
}
@@ -1141,479 +1331,437 @@ static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
static void index_rbio_pages(struct btrfs_raid_bio *rbio)
{
struct bio *bio;
- u64 start;
- unsigned long stripe_offset;
- unsigned long page_index;
-
- spin_lock_irq(&rbio->bio_list_lock);
- bio_list_for_each(bio, &rbio->bio_list) {
- struct bio_vec bvec;
- struct bvec_iter iter;
- int i = 0;
-
- start = bio->bi_iter.bi_sector << 9;
- stripe_offset = start - rbio->bioc->raid_map[0];
- page_index = stripe_offset >> PAGE_SHIFT;
-
- if (bio_flagged(bio, BIO_CLONED))
- bio->bi_iter = btrfs_bio(bio)->iter;
-
- bio_for_each_segment(bvec, bio, iter) {
- rbio->bio_pages[page_index + i] = bvec.bv_page;
- i++;
- }
+
+ spin_lock(&rbio->bio_list_lock);
+ bio_list_for_each(bio, &rbio->bio_list)
+ index_one_bio(rbio, bio);
+
+ spin_unlock(&rbio->bio_list_lock);
+}
+
+static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
+ struct raid56_bio_trace_info *trace_info)
+{
+ const struct btrfs_io_context *bioc = rbio->bioc;
+ int i;
+
+ ASSERT(bioc);
+
+ /* We rely on bio->bi_bdev to find the stripe number. */
+ if (!bio->bi_bdev)
+ goto not_found;
+
+ for (i = 0; i < bioc->num_stripes; i++) {
+ if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
+ continue;
+ trace_info->stripe_nr = i;
+ trace_info->devid = bioc->stripes[i].dev->devid;
+ trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
+ bioc->stripes[i].physical;
+ return;
}
- spin_unlock_irq(&rbio->bio_list_lock);
+
+not_found:
+ trace_info->devid = -1;
+ trace_info->offset = -1;
+ trace_info->stripe_nr = -1;
}
-/*
- * this is called from one of two situations. We either
- * have a full stripe from the higher layers, or we've read all
- * the missing bits off disk.
- *
- * This will calculate the parity and then send down any
- * changed blocks.
- */
-static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
+static inline void bio_list_put(struct bio_list *bio_list)
{
- struct btrfs_io_context *bioc = rbio->bioc;
- void **pointers = rbio->finish_pointers;
- int nr_data = rbio->nr_data;
- int stripe;
- int pagenr;
- bool has_qstripe;
- struct bio_list bio_list;
struct bio *bio;
- int ret;
- bio_list_init(&bio_list);
+ while ((bio = bio_list_pop(bio_list)))
+ bio_put(bio);
+}
- if (rbio->real_stripes - rbio->nr_data == 1)
- has_qstripe = false;
- else if (rbio->real_stripes - rbio->nr_data == 2)
- has_qstripe = true;
- else
- BUG();
+static void assert_rbio(struct btrfs_raid_bio *rbio)
+{
+ if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
+ return;
- /* at this point we either have a full stripe,
- * or we've read the full stripe from the drive.
- * recalculate the parity and write the new results.
- *
- * We're not allowed to add any new bios to the
- * bio list here, anyone else that wants to
- * change this stripe needs to do their own rmw.
+ /*
+ * At least two stripes (2 disks RAID5), and since real_stripes is U8,
+ * we won't go beyond 256 disks anyway.
*/
- spin_lock_irq(&rbio->bio_list_lock);
- set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
- spin_unlock_irq(&rbio->bio_list_lock);
-
- atomic_set(&rbio->error, 0);
+ ASSERT_RBIO(rbio->real_stripes >= 2, rbio);
+ ASSERT_RBIO(rbio->nr_data > 0, rbio);
/*
- * now that we've set rmw_locked, run through the
- * bio list one last time and map the page pointers
- *
- * We don't cache full rbios because we're assuming
- * the higher layers are unlikely to use this area of
- * the disk again soon. If they do use it again,
- * hopefully they will send another full bio.
+ * This is another check to make sure nr data stripes is smaller
+ * than total stripes.
*/
- index_rbio_pages(rbio);
- if (!rbio_is_full(rbio))
- cache_rbio_pages(rbio);
- else
- clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+ ASSERT_RBIO(rbio->nr_data < rbio->real_stripes, rbio);
+}
- for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
- struct page *p;
- /* first collect one page from each data stripe */
- for (stripe = 0; stripe < nr_data; stripe++) {
- p = page_in_rbio(rbio, stripe, pagenr, 0);
- pointers[stripe] = kmap_local_page(p);
- }
+static inline void *kmap_local_paddr(phys_addr_t paddr)
+{
+ /* The sector pointer must have a page mapped to it. */
+ ASSERT(paddr != INVALID_PADDR);
+
+ return kmap_local_page(phys_to_page(paddr)) + offset_in_page(paddr);
+}
- /* then add the parity stripe */
- p = rbio_pstripe_page(rbio, pagenr);
- SetPageUptodate(p);
- pointers[stripe++] = kmap_local_page(p);
+static void generate_pq_vertical_step(struct btrfs_raid_bio *rbio, unsigned int sector_nr,
+ unsigned int step_nr)
+{
+ void **pointers = rbio->finish_pointers;
+ const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
+ int stripe;
+ const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
- if (has_qstripe) {
+ /* First collect one sector from each data stripe */
+ for (stripe = 0; stripe < rbio->nr_data; stripe++)
+ pointers[stripe] = kmap_local_paddr(
+ sector_paddr_in_rbio(rbio, stripe, sector_nr, step_nr, 0));
- /*
- * raid6, add the qstripe and call the
- * library function to fill in our p/q
- */
- p = rbio_qstripe_page(rbio, pagenr);
- SetPageUptodate(p);
- pointers[stripe++] = kmap_local_page(p);
+ /* Then add the parity stripe */
+ pointers[stripe++] = kmap_local_paddr(rbio_pstripe_paddr(rbio, sector_nr, step_nr));
- raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
- pointers);
- } else {
- /* raid5 */
- copy_page(pointers[nr_data], pointers[0]);
- run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
- }
- for (stripe = stripe - 1; stripe >= 0; stripe--)
- kunmap_local(pointers[stripe]);
+ if (has_qstripe) {
+ /*
+ * RAID6, add the qstripe and call the library function
+ * to fill in our p/q
+ */
+ pointers[stripe++] = kmap_local_paddr(
+ rbio_qstripe_paddr(rbio, sector_nr, step_nr));
+
+ assert_rbio(rbio);
+ raid6_call.gen_syndrome(rbio->real_stripes, step, pointers);
+ } else {
+ /* raid5 */
+ memcpy(pointers[rbio->nr_data], pointers[0], step);
+ run_xor(pointers + 1, rbio->nr_data - 1, step);
}
+ for (stripe = stripe - 1; stripe >= 0; stripe--)
+ kunmap_local(pointers[stripe]);
+}
+
+/* Generate PQ for one vertical stripe. */
+static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
+{
+ const bool has_qstripe = (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6);
+
+ for (int i = 0; i < rbio->sector_nsteps; i++)
+ generate_pq_vertical_step(rbio, sectornr, i);
+
+ set_bit(rbio_sector_index(rbio, rbio->nr_data, sectornr),
+ rbio->stripe_uptodate_bitmap);
+ if (has_qstripe)
+ set_bit(rbio_sector_index(rbio, rbio->nr_data + 1, sectornr),
+ rbio->stripe_uptodate_bitmap);
+}
+
+static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
+ struct bio_list *bio_list)
+{
+ /* The total sector number inside the full stripe. */
+ int total_sector_nr;
+ int sectornr;
+ int stripe;
+ int ret;
+
+ ASSERT(bio_list_size(bio_list) == 0);
+
+ /* We should have at least one data sector. */
+ ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
/*
- * time to start writing. Make bios for everything from the
- * higher layers (the bio_list in our rbio) and our p/q. Ignore
- * everything else.
+ * Reset errors, as we may have errors inherited from from degraded
+ * write.
*/
- for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
- for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
- struct page *page;
- if (stripe < rbio->nr_data) {
- page = page_in_rbio(rbio, stripe, pagenr, 1);
- if (!page)
- continue;
- } else {
- page = rbio_stripe_page(rbio, stripe, pagenr);
- }
+ bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
- ret = rbio_add_io_page(rbio, &bio_list,
- page, stripe, pagenr, rbio->stripe_len);
- if (ret)
- goto cleanup;
+ /*
+ * Start assembly. Make bios for everything from the higher layers (the
+ * bio_list in our rbio) and our P/Q. Ignore everything else.
+ */
+ for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
+ total_sector_nr++) {
+ phys_addr_t *paddrs;
+
+ stripe = total_sector_nr / rbio->stripe_nsectors;
+ sectornr = total_sector_nr % rbio->stripe_nsectors;
+
+ /* This vertical stripe has no data, skip it. */
+ if (!test_bit(sectornr, &rbio->dbitmap))
+ continue;
+
+ if (stripe < rbio->nr_data) {
+ paddrs = sector_paddrs_in_rbio(rbio, stripe, sectornr, 1);
+ if (paddrs == NULL)
+ continue;
+ } else {
+ paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr);
}
+
+ ret = rbio_add_io_paddrs(rbio, bio_list, paddrs, stripe,
+ sectornr, REQ_OP_WRITE);
+ if (ret)
+ goto error;
}
- if (likely(!bioc->num_tgtdevs))
- goto write_data;
+ if (likely(!rbio->bioc->replace_nr_stripes))
+ return 0;
- for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
- if (!bioc->tgtdev_map[stripe])
- continue;
+ /*
+ * Make a copy for the replace target device.
+ *
+ * Thus the source stripe number (in replace_stripe_src) should be valid.
+ */
+ ASSERT(rbio->bioc->replace_stripe_src >= 0);
- for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
- struct page *page;
- if (stripe < rbio->nr_data) {
- page = page_in_rbio(rbio, stripe, pagenr, 1);
- if (!page)
- continue;
- } else {
- page = rbio_stripe_page(rbio, stripe, pagenr);
- }
+ for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
+ total_sector_nr++) {
+ phys_addr_t *paddrs;
- ret = rbio_add_io_page(rbio, &bio_list, page,
- rbio->bioc->tgtdev_map[stripe],
- pagenr, rbio->stripe_len);
- if (ret)
- goto cleanup;
+ stripe = total_sector_nr / rbio->stripe_nsectors;
+ sectornr = total_sector_nr % rbio->stripe_nsectors;
+
+ /*
+ * For RAID56, there is only one device that can be replaced,
+ * and replace_stripe_src[0] indicates the stripe number we
+ * need to copy from.
+ */
+ if (stripe != rbio->bioc->replace_stripe_src) {
+ /*
+ * We can skip the whole stripe completely, note
+ * total_sector_nr will be increased by one anyway.
+ */
+ ASSERT(sectornr == 0);
+ total_sector_nr += rbio->stripe_nsectors - 1;
+ continue;
}
- }
-write_data:
- atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
- BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
+ /* This vertical stripe has no data, skip it. */
+ if (!test_bit(sectornr, &rbio->dbitmap))
+ continue;
- while ((bio = bio_list_pop(&bio_list))) {
- bio->bi_private = rbio;
- bio->bi_end_io = raid_write_end_io;
- bio->bi_opf = REQ_OP_WRITE;
+ if (stripe < rbio->nr_data) {
+ paddrs = sector_paddrs_in_rbio(rbio, stripe, sectornr, 1);
+ if (paddrs == NULL)
+ continue;
+ } else {
+ paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr);
+ }
- submit_bio(bio);
+ ret = rbio_add_io_paddrs(rbio, bio_list, paddrs,
+ rbio->real_stripes,
+ sectornr, REQ_OP_WRITE);
+ if (ret)
+ goto error;
}
- return;
-
-cleanup:
- rbio_orig_end_io(rbio, BLK_STS_IOERR);
- while ((bio = bio_list_pop(&bio_list)))
- bio_put(bio);
+ return 0;
+error:
+ bio_list_put(bio_list);
+ return -EIO;
}
-/*
- * helper to find the stripe number for a given bio. Used to figure out which
- * stripe has failed. This expects the bio to correspond to a physical disk,
- * so it looks up based on physical sector numbers.
- */
-static int find_bio_stripe(struct btrfs_raid_bio *rbio,
- struct bio *bio)
+static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
{
- u64 physical = bio->bi_iter.bi_sector;
- int i;
- struct btrfs_io_stripe *stripe;
+ struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
+ rbio->bioc->full_stripe_logical;
+ int total_nr_sector = offset >> fs_info->sectorsize_bits;
- physical <<= 9;
+ ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors);
- for (i = 0; i < rbio->bioc->num_stripes; i++) {
- stripe = &rbio->bioc->stripes[i];
- if (in_range(physical, stripe->physical, rbio->stripe_len) &&
- stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) {
- return i;
+ bitmap_set(rbio->error_bitmap, total_nr_sector,
+ bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
+
+ /*
+ * Special handling for raid56_alloc_missing_rbio() used by
+ * scrub/replace. Unlike call path in raid56_parity_recover(), they
+ * pass an empty bio here. Thus we have to find out the missing device
+ * and mark the stripe error instead.
+ */
+ if (bio->bi_iter.bi_size == 0) {
+ bool found_missing = false;
+ int stripe_nr;
+
+ for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
+ if (!rbio->bioc->stripes[stripe_nr].dev->bdev) {
+ found_missing = true;
+ bitmap_set(rbio->error_bitmap,
+ stripe_nr * rbio->stripe_nsectors,
+ rbio->stripe_nsectors);
+ }
}
+ ASSERT(found_missing);
}
- return -1;
}
/*
- * helper to find the stripe number for a given
- * bio (before mapping). Used to figure out which stripe has
- * failed. This looks up based on logical block numbers.
+ * Return the index inside the rbio->stripe_sectors[] array.
+ *
+ * Return -1 if not found.
*/
-static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
- struct bio *bio)
+static int find_stripe_sector_nr(struct btrfs_raid_bio *rbio, phys_addr_t paddr)
{
- u64 logical = bio->bi_iter.bi_sector << 9;
- int i;
-
- for (i = 0; i < rbio->nr_data; i++) {
- u64 stripe_start = rbio->bioc->raid_map[i];
-
- if (in_range(logical, stripe_start, rbio->stripe_len))
+ for (int i = 0; i < rbio->nr_sectors; i++) {
+ if (rbio->stripe_paddrs[i * rbio->sector_nsteps] == paddr)
return i;
}
return -1;
}
/*
- * returns -EIO if we had too many failures
+ * this sets each page in the bio uptodate. It should only be used on private
+ * rbio pages, nothing that comes in from the higher layers
*/
-static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
+static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
{
- unsigned long flags;
- int ret = 0;
+ const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
+ const u32 step = min(sectorsize, PAGE_SIZE);
+ u32 offset = 0;
+ phys_addr_t paddr;
- spin_lock_irqsave(&rbio->bio_list_lock, flags);
+ ASSERT(!bio_flagged(bio, BIO_CLONED));
- /* we already know this stripe is bad, move on */
- if (rbio->faila == failed || rbio->failb == failed)
- goto out;
+ btrfs_bio_for_each_block_all(paddr, bio, step) {
+ /* Hitting the first step of a sector. */
+ if (IS_ALIGNED(offset, sectorsize)) {
+ int sector_nr = find_stripe_sector_nr(rbio, paddr);
- if (rbio->faila == -1) {
- /* first failure on this rbio */
- rbio->faila = failed;
- atomic_inc(&rbio->error);
- } else if (rbio->failb == -1) {
- /* second failure on this rbio */
- rbio->failb = failed;
- atomic_inc(&rbio->error);
- } else {
- ret = -EIO;
+ ASSERT(sector_nr >= 0);
+ if (sector_nr >= 0)
+ set_bit(sector_nr, rbio->stripe_uptodate_bitmap);
+ }
+ offset += step;
}
-out:
- spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
-
- return ret;
}
-/*
- * helper to fail a stripe based on a physical disk
- * bio.
- */
-static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
- struct bio *bio)
+static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
{
- int failed = find_bio_stripe(rbio, bio);
-
- if (failed < 0)
- return -EIO;
+ phys_addr_t bvec_paddr = bvec_phys(bio_first_bvec_all(bio));
+ int i;
- return fail_rbio_index(rbio, failed);
+ for (i = 0; i < rbio->nr_sectors; i++) {
+ if (rbio->stripe_paddrs[i * rbio->sector_nsteps] == bvec_paddr)
+ break;
+ if (rbio->bio_paddrs[i * rbio->sector_nsteps] == bvec_paddr)
+ break;
+ }
+ ASSERT(i < rbio->nr_sectors);
+ return i;
}
-/*
- * this sets each page in the bio uptodate. It should only be used on private
- * rbio pages, nothing that comes in from the higher layers
- */
-static void set_bio_pages_uptodate(struct bio *bio)
+static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
{
+ int total_sector_nr = get_bio_sector_nr(rbio, bio);
+ u32 bio_size = 0;
struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ int i;
- ASSERT(!bio_flagged(bio, BIO_CLONED));
+ bio_for_each_bvec_all(bvec, bio, i)
+ bio_size += bvec->bv_len;
- bio_for_each_segment_all(bvec, bio, iter_all)
- SetPageUptodate(bvec->bv_page);
+ /*
+ * Since we can have multiple bios touching the error_bitmap, we cannot
+ * call bitmap_set() without protection.
+ *
+ * Instead use set_bit() for each bit, as set_bit() itself is atomic.
+ */
+ for (i = total_sector_nr; i < total_sector_nr +
+ (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
+ set_bit(i, rbio->error_bitmap);
}
-/*
- * end io for the read phase of the rmw cycle. All the bios here are physical
- * stripe bios we've read from the disk so we can recalculate the parity of the
- * stripe.
- *
- * This will usually kick off finish_rmw once all the bios are read in, but it
- * may trigger parity reconstruction if we had any errors along the way
- */
-static void raid_rmw_end_io(struct bio *bio)
+/* Verify the data sectors at read time. */
+static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
+ struct bio *bio)
{
- struct btrfs_raid_bio *rbio = bio->bi_private;
+ struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
+ const u32 nr_steps = rbio->sector_nsteps;
+ int total_sector_nr = get_bio_sector_nr(rbio, bio);
+ u32 offset = 0;
+ phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
+ phys_addr_t paddr;
- if (bio->bi_status)
- fail_bio_stripe(rbio, bio);
- else
- set_bio_pages_uptodate(bio);
-
- bio_put(bio);
+ /* No data csum for the whole stripe, no need to verify. */
+ if (!rbio->csum_bitmap || !rbio->csum_buf)
+ return;
- if (!atomic_dec_and_test(&rbio->stripes_pending))
+ /* P/Q stripes, they have no data csum to verify against. */
+ if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors)
return;
- if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
- goto cleanup;
+ btrfs_bio_for_each_block_all(paddr, bio, step) {
+ u8 csum_buf[BTRFS_CSUM_SIZE];
+ u8 *expected_csum;
- /*
- * this will normally call finish_rmw to start our write
- * but if there are any failed stripes we'll reconstruct
- * from parity first
- */
- validate_rbio_for_rmw(rbio);
- return;
+ paddrs[(offset / step) % nr_steps] = paddr;
+ offset += step;
-cleanup:
+ /* Not yet covering the full fs block, continue to the next step. */
+ if (!IS_ALIGNED(offset, fs_info->sectorsize))
+ continue;
+
+ /* No csum for this sector, skip to the next sector. */
+ if (!test_bit(total_sector_nr, rbio->csum_bitmap))
+ continue;
- rbio_orig_end_io(rbio, BLK_STS_IOERR);
+ expected_csum = rbio->csum_buf + total_sector_nr * fs_info->csum_size;
+ btrfs_calculate_block_csum_pages(fs_info, paddrs, csum_buf);
+ if (unlikely(memcmp(csum_buf, expected_csum, fs_info->csum_size) != 0))
+ set_bit(total_sector_nr, rbio->error_bitmap);
+ total_sector_nr++;
+ }
}
-/*
- * the stripe must be locked by the caller. It will
- * unlock after all the writes are done
- */
-static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
+static void raid_wait_read_end_io(struct bio *bio)
{
- int bios_to_read = 0;
- struct bio_list bio_list;
- int ret;
- int pagenr;
- int stripe;
- struct bio *bio;
+ struct btrfs_raid_bio *rbio = bio->bi_private;
- bio_list_init(&bio_list);
+ if (bio->bi_status) {
+ rbio_update_error_bitmap(rbio, bio);
+ } else {
+ set_bio_pages_uptodate(rbio, bio);
+ verify_bio_data_sectors(rbio, bio);
+ }
- ret = alloc_rbio_pages(rbio);
- if (ret)
- goto cleanup;
+ bio_put(bio);
+ if (atomic_dec_and_test(&rbio->stripes_pending))
+ wake_up(&rbio->io_wait);
+}
- index_rbio_pages(rbio);
+static void submit_read_wait_bio_list(struct btrfs_raid_bio *rbio,
+ struct bio_list *bio_list)
+{
+ struct bio *bio;
- atomic_set(&rbio->error, 0);
- /*
- * build a list of bios to read all the missing parts of this
- * stripe
- */
- for (stripe = 0; stripe < rbio->nr_data; stripe++) {
- for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
- struct page *page;
- /*
- * we want to find all the pages missing from
- * the rbio and read them from the disk. If
- * page_in_rbio finds a page in the bio list
- * we don't need to read it off the stripe.
- */
- page = page_in_rbio(rbio, stripe, pagenr, 1);
- if (page)
- continue;
+ atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
+ while ((bio = bio_list_pop(bio_list))) {
+ bio->bi_end_io = raid_wait_read_end_io;
- page = rbio_stripe_page(rbio, stripe, pagenr);
- /*
- * the bio cache may have handed us an uptodate
- * page. If so, be happy and use it
- */
- if (PageUptodate(page))
- continue;
+ if (trace_raid56_read_enabled()) {
+ struct raid56_bio_trace_info trace_info = { 0 };
- ret = rbio_add_io_page(rbio, &bio_list, page,
- stripe, pagenr, rbio->stripe_len);
- if (ret)
- goto cleanup;
+ bio_get_trace_info(rbio, bio, &trace_info);
+ trace_raid56_read(rbio, bio, &trace_info);
}
- }
-
- bios_to_read = bio_list_size(&bio_list);
- if (!bios_to_read) {
- /*
- * this can happen if others have merged with
- * us, it means there is nothing left to read.
- * But if there are missing devices it may not be
- * safe to do the full stripe write yet.
- */
- goto finish;
- }
-
- /*
- * The bioc may be freed once we submit the last bio. Make sure not to
- * touch it after that.
- */
- atomic_set(&rbio->stripes_pending, bios_to_read);
- while ((bio = bio_list_pop(&bio_list))) {
- bio->bi_private = rbio;
- bio->bi_end_io = raid_rmw_end_io;
- bio->bi_opf = REQ_OP_READ;
-
- btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
-
submit_bio(bio);
}
- /* the actual write will happen once the reads are done */
- return 0;
-
-cleanup:
- rbio_orig_end_io(rbio, BLK_STS_IOERR);
-
- while ((bio = bio_list_pop(&bio_list)))
- bio_put(bio);
-
- return -EIO;
-finish:
- validate_rbio_for_rmw(rbio);
- return 0;
+ wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
}
-/*
- * if the upper layers pass in a full stripe, we thank them by only allocating
- * enough pages to hold the parity, and sending it all down quickly.
- */
-static int full_stripe_write(struct btrfs_raid_bio *rbio)
+static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
{
+ const int data_pages = rbio->nr_data * rbio->stripe_npages;
int ret;
- ret = alloc_rbio_parity_pages(rbio);
- if (ret) {
- __free_raid_bio(rbio);
+ ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages, false);
+ if (ret < 0)
return ret;
- }
- ret = lock_stripe_add(rbio);
- if (ret == 0)
- finish_rmw(rbio);
+ index_stripe_sectors(rbio);
return 0;
}
/*
- * partial stripe writes get handed over to async helpers.
- * We're really hoping to merge a few more writes into this
- * rbio before calculating new parity
- */
-static int partial_stripe_write(struct btrfs_raid_bio *rbio)
-{
- int ret;
-
- ret = lock_stripe_add(rbio);
- if (ret == 0)
- start_async_work(rbio, rmw_work);
- return 0;
-}
-
-/*
- * sometimes while we were reading from the drive to
- * recalculate parity, enough new bios come into create
- * a full stripe. So we do a check here to see if we can
- * go directly to finish_rmw
- */
-static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
-{
- /* head off into rmw land if we don't have a full stripe */
- if (!rbio_is_full(rbio))
- return partial_stripe_write(rbio);
- return full_stripe_write(rbio);
-}
-
-/*
* We use plugging call backs to collect full stripes.
* Any time we get a partial stripe write while plugged
* we collect it into a list. When the unplug comes down,
@@ -1624,7 +1772,6 @@ struct btrfs_plug_cb {
struct blk_plug_cb cb;
struct btrfs_fs_info *info;
struct list_head rbio_list;
- struct btrfs_work work;
};
/*
@@ -1647,465 +1794,466 @@ static int plug_cmp(void *priv, const struct list_head *a,
return 0;
}
-static void run_plug(struct btrfs_plug_cb *plug)
+static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
+ struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
struct btrfs_raid_bio *cur;
struct btrfs_raid_bio *last = NULL;
- /*
- * sort our plug list then try to merge
- * everything we can in hopes of creating full
- * stripes.
- */
list_sort(NULL, &plug->rbio_list, plug_cmp);
+
while (!list_empty(&plug->rbio_list)) {
- cur = list_entry(plug->rbio_list.next,
- struct btrfs_raid_bio, plug_list);
+ cur = list_first_entry(&plug->rbio_list,
+ struct btrfs_raid_bio, plug_list);
list_del_init(&cur->plug_list);
if (rbio_is_full(cur)) {
- int ret;
-
- /* we have a full stripe, send it down */
- ret = full_stripe_write(cur);
- BUG_ON(ret);
+ /* We have a full stripe, queue it down. */
+ start_async_work(cur, rmw_rbio_work);
continue;
}
if (last) {
if (rbio_can_merge(last, cur)) {
merge_rbio(last, cur);
- __free_raid_bio(cur);
+ free_raid_bio(cur);
continue;
-
}
- __raid56_parity_write(last);
+ start_async_work(last, rmw_rbio_work);
}
last = cur;
}
- if (last) {
- __raid56_parity_write(last);
- }
+ if (last)
+ start_async_work(last, rmw_rbio_work);
kfree(plug);
}
-/*
- * if the unplug comes from schedule, we have to push the
- * work off to a helper thread
- */
-static void unplug_work(struct btrfs_work *work)
+/* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
+static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
{
- struct btrfs_plug_cb *plug;
- plug = container_of(work, struct btrfs_plug_cb, work);
- run_plug(plug);
-}
+ const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ const u64 full_stripe_start = rbio->bioc->full_stripe_logical;
+ const u32 orig_len = orig_bio->bi_iter.bi_size;
+ const u32 sectorsize = fs_info->sectorsize;
+ u64 cur_logical;
-static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
-{
- struct btrfs_plug_cb *plug;
- plug = container_of(cb, struct btrfs_plug_cb, cb);
+ ASSERT_RBIO_LOGICAL(orig_logical >= full_stripe_start &&
+ orig_logical + orig_len <= full_stripe_start +
+ rbio->nr_data * BTRFS_STRIPE_LEN,
+ rbio, orig_logical);
- if (from_schedule) {
- btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
- btrfs_queue_work(plug->info->rmw_workers,
- &plug->work);
- return;
+ bio_list_add(&rbio->bio_list, orig_bio);
+ rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
+
+ /* Update the dbitmap. */
+ for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
+ cur_logical += sectorsize) {
+ int bit = ((u32)(cur_logical - full_stripe_start) >>
+ fs_info->sectorsize_bits) % rbio->stripe_nsectors;
+
+ set_bit(bit, &rbio->dbitmap);
}
- run_plug(plug);
}
/*
* our main entry point for writes from the rest of the FS.
*/
-int raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc,
- u64 stripe_len)
+void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
{
struct btrfs_fs_info *fs_info = bioc->fs_info;
struct btrfs_raid_bio *rbio;
struct btrfs_plug_cb *plug = NULL;
struct blk_plug_cb *cb;
- int ret;
- rbio = alloc_rbio(fs_info, bioc, stripe_len);
+ rbio = alloc_rbio(fs_info, bioc);
if (IS_ERR(rbio)) {
- btrfs_put_bioc(bioc);
- return PTR_ERR(rbio);
+ bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
+ bio_endio(bio);
+ return;
}
- bio_list_add(&rbio->bio_list, bio);
- rbio->bio_list_bytes = bio->bi_iter.bi_size;
rbio->operation = BTRFS_RBIO_WRITE;
-
- btrfs_bio_counter_inc_noblocked(fs_info);
- rbio->generic_bio_cnt = 1;
+ rbio_add_bio(rbio, bio);
/*
- * don't plug on full rbios, just get them out the door
+ * Don't plug on full rbios, just get them out the door
* as quickly as we can
*/
- if (rbio_is_full(rbio)) {
- ret = full_stripe_write(rbio);
- if (ret)
- btrfs_bio_counter_dec(fs_info);
- return ret;
- }
-
- cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
- if (cb) {
- plug = container_of(cb, struct btrfs_plug_cb, cb);
- if (!plug->info) {
- plug->info = fs_info;
- INIT_LIST_HEAD(&plug->rbio_list);
+ if (!rbio_is_full(rbio)) {
+ cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
+ if (cb) {
+ plug = container_of(cb, struct btrfs_plug_cb, cb);
+ if (!plug->info) {
+ plug->info = fs_info;
+ INIT_LIST_HEAD(&plug->rbio_list);
+ }
+ list_add_tail(&rbio->plug_list, &plug->rbio_list);
+ return;
}
- list_add_tail(&rbio->plug_list, &plug->rbio_list);
- ret = 0;
- } else {
- ret = __raid56_parity_write(rbio);
- if (ret)
- btrfs_bio_counter_dec(fs_info);
}
- return ret;
+
+ /*
+ * Either we don't have any existing plug, or we're doing a full stripe,
+ * queue the rmw work now.
+ */
+ start_async_work(rbio, rmw_rbio_work);
}
-/*
- * all parity reconstruction happens here. We've read in everything
- * we can find from the drives and this does the heavy lifting of
- * sorting the good from the bad.
- */
-static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
+static int verify_one_sector(struct btrfs_raid_bio *rbio,
+ int stripe_nr, int sector_nr)
{
- int pagenr, stripe;
- void **pointers;
- void **unmap_array;
- int faila = -1, failb = -1;
- struct page *page;
- blk_status_t err;
- int i;
+ struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ phys_addr_t *paddrs;
+ u8 csum_buf[BTRFS_CSUM_SIZE];
+ u8 *csum_expected;
- pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
- if (!pointers) {
- err = BLK_STS_RESOURCE;
- goto cleanup_io;
- }
+ if (!rbio->csum_bitmap || !rbio->csum_buf)
+ return 0;
+ /* No way to verify P/Q as they are not covered by data csum. */
+ if (stripe_nr >= rbio->nr_data)
+ return 0;
/*
- * Store copy of pointers that does not get reordered during
- * reconstruction so that kunmap_local works.
+ * If we're rebuilding a read, we have to use pages from the
+ * bio list if possible.
*/
- unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
- if (!unmap_array) {
- err = BLK_STS_RESOURCE;
- goto cleanup_pointers;
+ if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
+ paddrs = sector_paddrs_in_rbio(rbio, stripe_nr, sector_nr, 0);
+ } else {
+ paddrs = rbio_stripe_paddrs(rbio, stripe_nr, sector_nr);
}
- faila = rbio->faila;
- failb = rbio->failb;
+ csum_expected = rbio->csum_buf +
+ (stripe_nr * rbio->stripe_nsectors + sector_nr) *
+ fs_info->csum_size;
+ btrfs_calculate_block_csum_pages(fs_info, paddrs, csum_buf);
+ if (unlikely(memcmp(csum_buf, csum_expected, fs_info->csum_size) != 0))
+ return -EIO;
+ return 0;
+}
- if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
- rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
- spin_lock_irq(&rbio->bio_list_lock);
- set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
- spin_unlock_irq(&rbio->bio_list_lock);
- }
+static void recover_vertical_step(struct btrfs_raid_bio *rbio,
+ unsigned int sector_nr,
+ unsigned int step_nr,
+ int faila, int failb,
+ void **pointers, void **unmap_array)
+{
+ struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
+ int stripe_nr;
- index_rbio_pages(rbio);
+ ASSERT(step_nr < rbio->sector_nsteps);
+ ASSERT(sector_nr < rbio->stripe_nsectors);
- for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
- /*
- * Now we just use bitmap to mark the horizontal stripes in
- * which we have data when doing parity scrub.
- */
- if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
- !test_bit(pagenr, rbio->dbitmap))
- continue;
+ /*
+ * Setup our array of pointers with sectors from each stripe
+ *
+ * NOTE: store a duplicate array of pointers to preserve the
+ * pointer order.
+ */
+ for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
+ phys_addr_t paddr;
/*
- * Setup our array of pointers with pages from each stripe
- *
- * NOTE: store a duplicate array of pointers to preserve the
- * pointer order
+ * If we're rebuilding a read, we have to use pages from the
+ * bio list if possible.
*/
- for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
- /*
- * if we're rebuilding a read, we have to use
- * pages from the bio list
- */
- if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
- rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
- (stripe == faila || stripe == failb)) {
- page = page_in_rbio(rbio, stripe, pagenr, 0);
- } else {
- page = rbio_stripe_page(rbio, stripe, pagenr);
- }
- pointers[stripe] = kmap_local_page(page);
- unmap_array[stripe] = pointers[stripe];
+ if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
+ paddr = sector_paddr_in_rbio(rbio, stripe_nr, sector_nr, step_nr, 0);
+ } else {
+ paddr = rbio_stripe_paddr(rbio, stripe_nr, sector_nr, step_nr);
}
+ pointers[stripe_nr] = kmap_local_paddr(paddr);
+ unmap_array[stripe_nr] = pointers[stripe_nr];
+ }
- /* all raid6 handling here */
- if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
- /*
- * single failure, rebuild from parity raid5
- * style
- */
- if (failb < 0) {
- if (faila == rbio->nr_data) {
- /*
- * Just the P stripe has failed, without
- * a bad data or Q stripe.
- * TODO, we should redo the xor here.
- */
- err = BLK_STS_IOERR;
- goto cleanup;
- }
+ /* All raid6 handling here */
+ if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
+ /* Single failure, rebuild from parity raid5 style */
+ if (failb < 0) {
+ if (faila == rbio->nr_data)
/*
- * a single failure in raid6 is rebuilt
- * in the pstripe code below
+ * Just the P stripe has failed, without
+ * a bad data or Q stripe.
+ * We have nothing to do, just skip the
+ * recovery for this stripe.
*/
- goto pstripe;
- }
-
- /* make sure our ps and qs are in order */
- if (faila > failb)
- swap(faila, failb);
-
- /* if the q stripe is failed, do a pstripe reconstruction
- * from the xors.
- * If both the q stripe and the P stripe are failed, we're
- * here due to a crc mismatch and we can't give them the
- * data they want
+ goto cleanup;
+ /*
+ * a single failure in raid6 is rebuilt
+ * in the pstripe code below
*/
- if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
- if (rbio->bioc->raid_map[faila] ==
- RAID5_P_STRIPE) {
- err = BLK_STS_IOERR;
- goto cleanup;
- }
+ goto pstripe;
+ }
+
+ /*
+ * If the q stripe is failed, do a pstripe reconstruction from
+ * the xors.
+ * If both the q stripe and the P stripe are failed, we're
+ * here due to a crc mismatch and we can't give them the
+ * data they want.
+ */
+ if (failb == rbio->real_stripes - 1) {
+ if (faila == rbio->real_stripes - 2)
/*
- * otherwise we have one bad data stripe and
- * a good P stripe. raid5!
+ * Only P and Q are corrupted.
+ * We only care about data stripes recovery,
+ * can skip this vertical stripe.
*/
- goto pstripe;
- }
+ goto cleanup;
+ /*
+ * Otherwise we have one bad data stripe and
+ * a good P stripe. raid5!
+ */
+ goto pstripe;
+ }
- if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
- raid6_datap_recov(rbio->real_stripes,
- PAGE_SIZE, faila, pointers);
- } else {
- raid6_2data_recov(rbio->real_stripes,
- PAGE_SIZE, faila, failb,
- pointers);
- }
+ if (failb == rbio->real_stripes - 2) {
+ raid6_datap_recov(rbio->real_stripes, step,
+ faila, pointers);
} else {
- void *p;
+ raid6_2data_recov(rbio->real_stripes, step,
+ faila, failb, pointers);
+ }
+ } else {
+ void *p;
- /* rebuild from P stripe here (raid5 or raid6) */
- BUG_ON(failb != -1);
+ /* Rebuild from P stripe here (raid5 or raid6). */
+ ASSERT(failb == -1);
pstripe:
- /* Copy parity block into failed block to start with */
- copy_page(pointers[faila], pointers[rbio->nr_data]);
-
- /* rearrange the pointer array */
- p = pointers[faila];
- for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
- pointers[stripe] = pointers[stripe + 1];
- pointers[rbio->nr_data - 1] = p;
-
- /* xor in the rest */
- run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
- }
- /* if we're doing this rebuild as part of an rmw, go through
- * and set all of our private rbio pages in the
- * failed stripes as uptodate. This way finish_rmw will
- * know they can be trusted. If this was a read reconstruction,
- * other endio functions will fiddle the uptodate bits
- */
- if (rbio->operation == BTRFS_RBIO_WRITE) {
- for (i = 0; i < rbio->stripe_npages; i++) {
- if (faila != -1) {
- page = rbio_stripe_page(rbio, faila, i);
- SetPageUptodate(page);
- }
- if (failb != -1) {
- page = rbio_stripe_page(rbio, failb, i);
- SetPageUptodate(page);
- }
- }
- }
- for (stripe = rbio->real_stripes - 1; stripe >= 0; stripe--)
- kunmap_local(unmap_array[stripe]);
+ /* Copy parity block into failed block to start with */
+ memcpy(pointers[faila], pointers[rbio->nr_data], step);
+
+ /* Rearrange the pointer array */
+ p = pointers[faila];
+ for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
+ stripe_nr++)
+ pointers[stripe_nr] = pointers[stripe_nr + 1];
+ pointers[rbio->nr_data - 1] = p;
+
+ /* Xor in the rest */
+ run_xor(pointers, rbio->nr_data - 1, step);
}
- err = BLK_STS_OK;
cleanup:
- kfree(unmap_array);
-cleanup_pointers:
- kfree(pointers);
+ for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
+ kunmap_local(unmap_array[stripe_nr]);
+}
+
+/*
+ * Recover a vertical stripe specified by @sector_nr.
+ * @*pointers are the pre-allocated pointers by the caller, so we don't
+ * need to allocate/free the pointers again and again.
+ */
+static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
+ void **pointers, void **unmap_array)
+{
+ int found_errors;
+ int faila;
+ int failb;
+ int ret = 0;
-cleanup_io:
/*
- * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
- * valid rbio which is consistent with ondisk content, thus such a
- * valid rbio can be cached to avoid further disk reads.
+ * Now we just use bitmap to mark the horizontal stripes in
+ * which we have data when doing parity scrub.
*/
- if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
- rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
- /*
- * - In case of two failures, where rbio->failb != -1:
- *
- * Do not cache this rbio since the above read reconstruction
- * (raid6_datap_recov() or raid6_2data_recov()) may have
- * changed some content of stripes which are not identical to
- * on-disk content any more, otherwise, a later write/recover
- * may steal stripe_pages from this rbio and end up with
- * corruptions or rebuild failures.
- *
- * - In case of single failure, where rbio->failb == -1:
- *
- * Cache this rbio iff the above read reconstruction is
- * executed without problems.
- */
- if (err == BLK_STS_OK && rbio->failb < 0)
- cache_rbio_pages(rbio);
- else
- clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
-
- rbio_orig_end_io(rbio, err);
- } else if (err == BLK_STS_OK) {
- rbio->faila = -1;
- rbio->failb = -1;
-
- if (rbio->operation == BTRFS_RBIO_WRITE)
- finish_rmw(rbio);
- else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
- finish_parity_scrub(rbio, 0);
- else
- BUG();
- } else {
- rbio_orig_end_io(rbio, err);
+ if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
+ !test_bit(sector_nr, &rbio->dbitmap))
+ return 0;
+
+ found_errors = get_rbio_vertical_errors(rbio, sector_nr, &faila,
+ &failb);
+ /*
+ * No errors in the vertical stripe, skip it. Can happen for recovery
+ * which only part of a stripe failed csum check.
+ */
+ if (!found_errors)
+ return 0;
+
+ if (unlikely(found_errors > rbio->bioc->max_errors))
+ return -EIO;
+
+ for (int i = 0; i < rbio->sector_nsteps; i++)
+ recover_vertical_step(rbio, sector_nr, i, faila, failb,
+ pointers, unmap_array);
+ if (faila >= 0) {
+ ret = verify_one_sector(rbio, faila, sector_nr);
+ if (ret < 0)
+ return ret;
+
+ set_bit(rbio_sector_index(rbio, faila, sector_nr),
+ rbio->stripe_uptodate_bitmap);
+ }
+ if (failb >= 0) {
+ ret = verify_one_sector(rbio, failb, sector_nr);
+ if (ret < 0)
+ return ret;
+
+ set_bit(rbio_sector_index(rbio, failb, sector_nr),
+ rbio->stripe_uptodate_bitmap);
}
+ return ret;
}
-/*
- * This is called only for stripes we've read from disk to
- * reconstruct the parity.
- */
-static void raid_recover_end_io(struct bio *bio)
+static int recover_sectors(struct btrfs_raid_bio *rbio)
{
- struct btrfs_raid_bio *rbio = bio->bi_private;
+ void **pointers = NULL;
+ void **unmap_array = NULL;
+ int sectornr;
+ int ret = 0;
/*
- * we only read stripe pages off the disk, set them
- * up to date if there were no errors
+ * @pointers array stores the pointer for each sector.
+ *
+ * @unmap_array stores copy of pointers that does not get reordered
+ * during reconstruction so that kunmap_local works.
*/
- if (bio->bi_status)
- fail_bio_stripe(rbio, bio);
- else
- set_bio_pages_uptodate(bio);
- bio_put(bio);
+ pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
+ unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
+ if (!pointers || !unmap_array) {
+ ret = -ENOMEM;
+ goto out;
+ }
- if (!atomic_dec_and_test(&rbio->stripes_pending))
- return;
+ if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
+ spin_lock(&rbio->bio_list_lock);
+ set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+ spin_unlock(&rbio->bio_list_lock);
+ }
- if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
- rbio_orig_end_io(rbio, BLK_STS_IOERR);
- else
- __raid_recover_end_io(rbio);
+ index_rbio_pages(rbio);
+
+ for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
+ ret = recover_vertical(rbio, sectornr, pointers, unmap_array);
+ if (ret < 0)
+ break;
+ }
+
+out:
+ kfree(pointers);
+ kfree(unmap_array);
+ return ret;
}
-/*
- * reads everything we need off the disk to reconstruct
- * the parity. endio handlers trigger final reconstruction
- * when the IO is done.
- *
- * This is used both for reads from the higher layers and for
- * parity construction required to finish a rmw cycle.
- */
-static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
+static void recover_rbio(struct btrfs_raid_bio *rbio)
{
- int bios_to_read = 0;
- struct bio_list bio_list;
- int ret;
- int pagenr;
- int stripe;
- struct bio *bio;
+ struct bio_list bio_list = BIO_EMPTY_LIST;
+ int total_sector_nr;
+ int ret = 0;
- bio_list_init(&bio_list);
+ /*
+ * Either we're doing recover for a read failure or degraded write,
+ * caller should have set error bitmap correctly.
+ */
+ ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors));
+ /* For recovery, we need to read all sectors including P/Q. */
ret = alloc_rbio_pages(rbio);
- if (ret)
- goto cleanup;
+ if (ret < 0)
+ goto out;
- atomic_set(&rbio->error, 0);
+ index_rbio_pages(rbio);
/*
- * read everything that hasn't failed. Thanks to the
- * stripe cache, it is possible that some or all of these
- * pages are going to be uptodate.
+ * Read everything that hasn't failed. However this time we will
+ * not trust any cached sector.
+ * As we may read out some stale data but higher layer is not reading
+ * that stale part.
+ *
+ * So here we always re-read everything in recovery path.
*/
- for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
- if (rbio->faila == stripe || rbio->failb == stripe) {
- atomic_inc(&rbio->error);
- continue;
- }
-
- for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
- struct page *p;
+ for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
+ total_sector_nr++) {
+ int stripe = total_sector_nr / rbio->stripe_nsectors;
+ int sectornr = total_sector_nr % rbio->stripe_nsectors;
+ phys_addr_t *paddrs;
+ /*
+ * Skip the range which has error. It can be a range which is
+ * marked error (for csum mismatch), or it can be a missing
+ * device.
+ */
+ if (!rbio->bioc->stripes[stripe].dev->bdev ||
+ test_bit(total_sector_nr, rbio->error_bitmap)) {
/*
- * the rmw code may have already read this
- * page in
+ * Also set the error bit for missing device, which
+ * may not yet have its error bit set.
*/
- p = rbio_stripe_page(rbio, stripe, pagenr);
- if (PageUptodate(p))
- continue;
-
- ret = rbio_add_io_page(rbio, &bio_list,
- rbio_stripe_page(rbio, stripe, pagenr),
- stripe, pagenr, rbio->stripe_len);
- if (ret < 0)
- goto cleanup;
+ set_bit(total_sector_nr, rbio->error_bitmap);
+ continue;
}
- }
- bios_to_read = bio_list_size(&bio_list);
- if (!bios_to_read) {
- /*
- * we might have no bios to read just because the pages
- * were up to date, or we might have no bios to read because
- * the devices were gone.
- */
- if (atomic_read(&rbio->error) <= rbio->bioc->max_errors) {
- __raid_recover_end_io(rbio);
- return 0;
- } else {
- goto cleanup;
+ paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr);
+ ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, stripe,
+ sectornr, REQ_OP_READ);
+ if (ret < 0) {
+ bio_list_put(&bio_list);
+ goto out;
}
}
- /*
- * The bioc may be freed once we submit the last bio. Make sure not to
- * touch it after that.
- */
- atomic_set(&rbio->stripes_pending, bios_to_read);
- while ((bio = bio_list_pop(&bio_list))) {
- bio->bi_private = rbio;
- bio->bi_end_io = raid_recover_end_io;
- bio->bi_opf = REQ_OP_READ;
+ submit_read_wait_bio_list(rbio, &bio_list);
+ ret = recover_sectors(rbio);
+out:
+ rbio_orig_end_io(rbio, errno_to_blk_status(ret));
+}
- btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
+static void recover_rbio_work(struct work_struct *work)
+{
+ struct btrfs_raid_bio *rbio;
- submit_bio(bio);
- }
+ rbio = container_of(work, struct btrfs_raid_bio, work);
+ if (!lock_stripe_add(rbio))
+ recover_rbio(rbio);
+}
- return 0;
+static void recover_rbio_work_locked(struct work_struct *work)
+{
+ recover_rbio(container_of(work, struct btrfs_raid_bio, work));
+}
-cleanup:
- if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
- rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
- rbio_orig_end_io(rbio, BLK_STS_IOERR);
+static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num)
+{
+ bool found = false;
+ int sector_nr;
- while ((bio = bio_list_pop(&bio_list)))
- bio_put(bio);
+ /*
+ * This is for RAID6 extra recovery tries, thus mirror number should
+ * be large than 2.
+ * Mirror 1 means read from data stripes. Mirror 2 means rebuild using
+ * RAID5 methods.
+ */
+ ASSERT(mirror_num > 2);
+ for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
+ int found_errors;
+ int faila;
+ int failb;
+
+ found_errors = get_rbio_vertical_errors(rbio, sector_nr,
+ &faila, &failb);
+ /* This vertical stripe doesn't have errors. */
+ if (!found_errors)
+ continue;
- return -EIO;
+ /*
+ * If we found errors, there should be only one error marked
+ * by previous set_rbio_range_error().
+ */
+ ASSERT(found_errors == 1);
+ found = true;
+
+ /* Now select another stripe to mark as error. */
+ failb = rbio->real_stripes - (mirror_num - 1);
+ if (failb <= faila)
+ failb--;
+
+ /* Set the extra bit in error bitmap. */
+ if (failb >= 0)
+ set_bit(failb * rbio->stripe_nsectors + sector_nr,
+ rbio->error_bitmap);
+ }
+
+ /* We should found at least one vertical stripe with error.*/
+ ASSERT(found);
}
/*
@@ -2114,99 +2262,287 @@ cleanup:
* so we assume the bio they send down corresponds to a failed part
* of the drive.
*/
-int raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
- u64 stripe_len, int mirror_num, int generic_io)
+void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
+ int mirror_num)
{
struct btrfs_fs_info *fs_info = bioc->fs_info;
struct btrfs_raid_bio *rbio;
- int ret;
-
- if (generic_io) {
- ASSERT(bioc->mirror_num == mirror_num);
- btrfs_bio(bio)->mirror_num = mirror_num;
- }
- rbio = alloc_rbio(fs_info, bioc, stripe_len);
+ rbio = alloc_rbio(fs_info, bioc);
if (IS_ERR(rbio)) {
- if (generic_io)
- btrfs_put_bioc(bioc);
- return PTR_ERR(rbio);
+ bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
+ bio_endio(bio);
+ return;
}
rbio->operation = BTRFS_RBIO_READ_REBUILD;
- bio_list_add(&rbio->bio_list, bio);
- rbio->bio_list_bytes = bio->bi_iter.bi_size;
-
- rbio->faila = find_logical_bio_stripe(rbio, bio);
- if (rbio->faila == -1) {
- btrfs_warn(fs_info,
-"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bioc has map_type %llu)",
- __func__, bio->bi_iter.bi_sector << 9,
- (u64)bio->bi_iter.bi_size, bioc->map_type);
- if (generic_io)
- btrfs_put_bioc(bioc);
- kfree(rbio);
- return -EIO;
- }
+ rbio_add_bio(rbio, bio);
- if (generic_io) {
- btrfs_bio_counter_inc_noblocked(fs_info);
- rbio->generic_bio_cnt = 1;
- } else {
- btrfs_get_bioc(bioc);
- }
+ set_rbio_range_error(rbio, bio);
/*
* Loop retry:
* for 'mirror == 2', reconstruct from all other stripes.
* for 'mirror_num > 2', select a stripe to fail on every retry.
*/
- if (mirror_num > 2) {
+ if (mirror_num > 2)
+ set_rbio_raid6_extra_error(rbio, mirror_num);
+
+ start_async_work(rbio, recover_rbio_work);
+}
+
+static void fill_data_csums(struct btrfs_raid_bio *rbio)
+{
+ struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ struct btrfs_root *csum_root = btrfs_csum_root(fs_info,
+ rbio->bioc->full_stripe_logical);
+ const u64 start = rbio->bioc->full_stripe_logical;
+ const u32 len = (rbio->nr_data * rbio->stripe_nsectors) <<
+ fs_info->sectorsize_bits;
+ int ret;
+
+ /* The rbio should not have its csum buffer initialized. */
+ ASSERT(!rbio->csum_buf && !rbio->csum_bitmap);
+
+ /*
+ * Skip the csum search if:
+ *
+ * - The rbio doesn't belong to data block groups
+ * Then we are doing IO for tree blocks, no need to search csums.
+ *
+ * - The rbio belongs to mixed block groups
+ * This is to avoid deadlock, as we're already holding the full
+ * stripe lock, if we trigger a metadata read, and it needs to do
+ * raid56 recovery, we will deadlock.
+ */
+ if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) ||
+ rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA)
+ return;
+
+ rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors *
+ fs_info->csum_size, GFP_NOFS);
+ rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors,
+ GFP_NOFS);
+ if (!rbio->csum_buf || !rbio->csum_bitmap) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = btrfs_lookup_csums_bitmap(csum_root, NULL, start, start + len - 1,
+ rbio->csum_buf, rbio->csum_bitmap);
+ if (ret < 0)
+ goto error;
+ if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits))
+ goto no_csum;
+ return;
+
+error:
+ /*
+ * We failed to allocate memory or grab the csum, but it's not fatal,
+ * we can still continue. But better to warn users that RMW is no
+ * longer safe for this particular sub-stripe write.
+ */
+ btrfs_warn_rl(fs_info,
+"sub-stripe write for full stripe %llu is not safe, failed to get csum: %d",
+ rbio->bioc->full_stripe_logical, ret);
+no_csum:
+ kfree(rbio->csum_buf);
+ bitmap_free(rbio->csum_bitmap);
+ rbio->csum_buf = NULL;
+ rbio->csum_bitmap = NULL;
+}
+
+static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio)
+{
+ struct bio_list bio_list = BIO_EMPTY_LIST;
+ int total_sector_nr;
+ int ret = 0;
+
+ /*
+ * Fill the data csums we need for data verification. We need to fill
+ * the csum_bitmap/csum_buf first, as our endio function will try to
+ * verify the data sectors.
+ */
+ fill_data_csums(rbio);
+
+ /*
+ * Build a list of bios to read all sectors (including data and P/Q).
+ *
+ * This behavior is to compensate the later csum verification and recovery.
+ */
+ for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
+ total_sector_nr++) {
+ int stripe = total_sector_nr / rbio->stripe_nsectors;
+ int sectornr = total_sector_nr % rbio->stripe_nsectors;
+ phys_addr_t *paddrs;
+
+ paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr);
+ ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, stripe,
+ sectornr, REQ_OP_READ);
+ if (ret) {
+ bio_list_put(&bio_list);
+ return ret;
+ }
+ }
+
+ /*
+ * We may or may not have any corrupted sectors (including missing dev
+ * and csum mismatch), just let recover_sectors() to handle them all.
+ */
+ submit_read_wait_bio_list(rbio, &bio_list);
+ return recover_sectors(rbio);
+}
+
+static void raid_wait_write_end_io(struct bio *bio)
+{
+ struct btrfs_raid_bio *rbio = bio->bi_private;
+
+ if (bio->bi_status)
+ rbio_update_error_bitmap(rbio, bio);
+ bio_put(bio);
+ if (atomic_dec_and_test(&rbio->stripes_pending))
+ wake_up(&rbio->io_wait);
+}
+
+static void submit_write_bios(struct btrfs_raid_bio *rbio,
+ struct bio_list *bio_list)
+{
+ struct bio *bio;
+
+ atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
+ while ((bio = bio_list_pop(bio_list))) {
+ bio->bi_end_io = raid_wait_write_end_io;
+
+ if (trace_raid56_write_enabled()) {
+ struct raid56_bio_trace_info trace_info = { 0 };
+
+ bio_get_trace_info(rbio, bio, &trace_info);
+ trace_raid56_write(rbio, bio, &trace_info);
+ }
+ submit_bio(bio);
+ }
+}
+
+/*
+ * To determine if we need to read any sector from the disk.
+ * Should only be utilized in RMW path, to skip cached rbio.
+ */
+static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio)
+{
+ int i;
+
+ for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) {
+ phys_addr_t paddr = rbio->stripe_paddrs[i * rbio->sector_nsteps];
+
/*
- * 'mirror == 3' is to fail the p stripe and
- * reconstruct from the q stripe. 'mirror > 3' is to
- * fail a data stripe and reconstruct from p+q stripe.
+ * We have a sector which doesn't have page nor uptodate,
+ * thus this rbio can not be cached one, as cached one must
+ * have all its data sectors present and uptodate.
*/
- rbio->failb = rbio->real_stripes - (mirror_num - 1);
- ASSERT(rbio->failb > 0);
- if (rbio->failb <= rbio->faila)
- rbio->failb--;
+ if (paddr == INVALID_PADDR ||
+ !test_bit(i, rbio->stripe_uptodate_bitmap))
+ return true;
}
+ return false;
+}
- ret = lock_stripe_add(rbio);
+static void rmw_rbio(struct btrfs_raid_bio *rbio)
+{
+ struct bio_list bio_list;
+ int sectornr;
+ int ret = 0;
/*
- * __raid56_parity_recover will end the bio with
- * any errors it hits. We don't want to return
- * its error value up the stack because our caller
- * will end up calling bio_endio with any nonzero
- * return
+ * Allocate the pages for parity first, as P/Q pages will always be
+ * needed for both full-stripe and sub-stripe writes.
*/
- if (ret == 0)
- __raid56_parity_recover(rbio);
+ ret = alloc_rbio_parity_pages(rbio);
+ if (ret < 0)
+ goto out;
+
/*
- * our rbio has been added to the list of
- * rbios that will be handled after the
- * currently lock owner is done
+ * Either full stripe write, or we have every data sector already
+ * cached, can go to write path immediately.
*/
- return 0;
+ if (!rbio_is_full(rbio) && need_read_stripe_sectors(rbio)) {
+ /*
+ * Now we're doing sub-stripe write, also need all data stripes
+ * to do the full RMW.
+ */
+ ret = alloc_rbio_data_pages(rbio);
+ if (ret < 0)
+ goto out;
+
+ index_rbio_pages(rbio);
+
+ ret = rmw_read_wait_recover(rbio);
+ if (ret < 0)
+ goto out;
+ }
+
+ /*
+ * At this stage we're not allowed to add any new bios to the
+ * bio list any more, anyone else that wants to change this stripe
+ * needs to do their own rmw.
+ */
+ spin_lock(&rbio->bio_list_lock);
+ set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+ spin_unlock(&rbio->bio_list_lock);
+
+ bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
+
+ index_rbio_pages(rbio);
+
+ /*
+ * We don't cache full rbios because we're assuming
+ * the higher layers are unlikely to use this area of
+ * the disk again soon. If they do use it again,
+ * hopefully they will send another full bio.
+ */
+ if (!rbio_is_full(rbio))
+ cache_rbio_pages(rbio);
+ else
+ clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+ for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
+ generate_pq_vertical(rbio, sectornr);
+
+ bio_list_init(&bio_list);
+ ret = rmw_assemble_write_bios(rbio, &bio_list);
+ if (ret < 0)
+ goto out;
+
+ /* We should have at least one bio assembled. */
+ ASSERT(bio_list_size(&bio_list));
+ submit_write_bios(rbio, &bio_list);
+ wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
+
+ /* We may have more errors than our tolerance during the read. */
+ for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
+ int found_errors;
+
+ found_errors = get_rbio_vertical_errors(rbio, sectornr, NULL, NULL);
+ if (unlikely(found_errors > rbio->bioc->max_errors)) {
+ ret = -EIO;
+ break;
+ }
+ }
+out:
+ rbio_orig_end_io(rbio, errno_to_blk_status(ret));
}
-static void rmw_work(struct btrfs_work *work)
+static void rmw_rbio_work(struct work_struct *work)
{
struct btrfs_raid_bio *rbio;
rbio = container_of(work, struct btrfs_raid_bio, work);
- raid56_rmw_stripe(rbio);
+ if (lock_stripe_add(rbio) == 0)
+ rmw_rbio(rbio);
}
-static void read_rebuild_work(struct btrfs_work *work)
+static void rmw_rbio_work_locked(struct work_struct *work)
{
- struct btrfs_raid_bio *rbio;
-
- rbio = container_of(work, struct btrfs_raid_bio, work);
- __raid56_parity_recover(rbio);
+ rmw_rbio(container_of(work, struct btrfs_raid_bio, work));
}
/*
@@ -2221,14 +2557,14 @@ static void read_rebuild_work(struct btrfs_work *work)
struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
struct btrfs_io_context *bioc,
- u64 stripe_len, struct btrfs_device *scrub_dev,
+ struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors)
{
struct btrfs_fs_info *fs_info = bioc->fs_info;
struct btrfs_raid_bio *rbio;
int i;
- rbio = alloc_rbio(fs_info, bioc, stripe_len);
+ rbio = alloc_rbio(fs_info, bioc);
if (IS_ERR(rbio))
return NULL;
bio_list_add(&rbio->bio_list, bio);
@@ -2250,35 +2586,30 @@ struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
break;
}
}
- ASSERT(i < rbio->real_stripes);
-
- /* Now we just support the sectorsize equals to page size */
- ASSERT(fs_info->sectorsize == PAGE_SIZE);
- ASSERT(rbio->stripe_npages == stripe_nsectors);
- bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
-
- /*
- * We have already increased bio_counter when getting bioc, record it
- * so we can free it at rbio_orig_end_io().
- */
- rbio->generic_bio_cnt = 1;
+ ASSERT_RBIO_STRIPE(i < rbio->real_stripes, rbio, i);
+ bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
return rbio;
}
-/* Used for both parity scrub and missing. */
-void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
- u64 logical)
+static int alloc_rbio_sector_pages(struct btrfs_raid_bio *rbio,
+ int sector_nr)
{
- int stripe_offset;
- int index;
+ const u32 step = min(PAGE_SIZE, rbio->bioc->fs_info->sectorsize);
+ const u32 base = sector_nr * rbio->sector_nsteps;
+
+ for (int i = base; i < base + rbio->sector_nsteps; i++) {
+ const unsigned int page_index = (i * step) >> PAGE_SHIFT;
+ struct page *page;
- ASSERT(logical >= rbio->bioc->raid_map[0]);
- ASSERT(logical + PAGE_SIZE <= rbio->bioc->raid_map[0] +
- rbio->stripe_len * rbio->nr_data);
- stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
- index = stripe_offset >> PAGE_SHIFT;
- rbio->bio_pages[index] = page;
+ if (rbio->stripe_pages[page_index])
+ continue;
+ page = alloc_page(GFP_NOFS);
+ if (!page)
+ return -ENOMEM;
+ rbio->stripe_pages[page_index] = page;
+ }
+ return 0;
}
/*
@@ -2287,40 +2618,96 @@ void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
*/
static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
{
- int i;
- int bit;
- int index;
- struct page *page;
+ int total_sector_nr;
- for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
- for (i = 0; i < rbio->real_stripes; i++) {
- index = i * rbio->stripe_npages + bit;
- if (rbio->stripe_pages[index])
- continue;
+ for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
+ total_sector_nr++) {
+ int sectornr = total_sector_nr % rbio->stripe_nsectors;
+ int ret;
- page = alloc_page(GFP_NOFS);
- if (!page)
- return -ENOMEM;
- rbio->stripe_pages[index] = page;
- }
+ if (!test_bit(sectornr, &rbio->dbitmap))
+ continue;
+ ret = alloc_rbio_sector_pages(rbio, total_sector_nr);
+ if (ret < 0)
+ return ret;
}
+ index_stripe_sectors(rbio);
return 0;
}
-static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
- int need_check)
+/* Return true if the content of the step matches the caclulated one. */
+static bool verify_one_parity_step(struct btrfs_raid_bio *rbio,
+ void *pointers[], unsigned int sector_nr,
+ unsigned int step_nr)
+{
+ const unsigned int nr_data = rbio->nr_data;
+ const bool has_qstripe = (rbio->real_stripes - rbio->nr_data == 2);
+ const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
+ void *parity;
+ bool ret = false;
+
+ ASSERT(step_nr < rbio->sector_nsteps);
+
+ /* First collect one page from each data stripe. */
+ for (int stripe = 0; stripe < nr_data; stripe++)
+ pointers[stripe] = kmap_local_paddr(
+ sector_paddr_in_rbio(rbio, stripe, sector_nr,
+ step_nr, 0));
+
+ if (has_qstripe) {
+ assert_rbio(rbio);
+ /* RAID6, call the library function to fill in our P/Q. */
+ raid6_call.gen_syndrome(rbio->real_stripes, step, pointers);
+ } else {
+ /* RAID5. */
+ memcpy(pointers[nr_data], pointers[0], step);
+ run_xor(pointers + 1, nr_data - 1, step);
+ }
+
+ /* Check scrubbing parity and repair it. */
+ parity = kmap_local_paddr(rbio_stripe_paddr(rbio, rbio->scrubp, sector_nr, step_nr));
+ if (memcmp(parity, pointers[rbio->scrubp], step) != 0)
+ memcpy(parity, pointers[rbio->scrubp], step);
+ else
+ ret = true;
+ kunmap_local(parity);
+
+ for (int stripe = nr_data - 1; stripe >= 0; stripe--)
+ kunmap_local(pointers[stripe]);
+ return ret;
+}
+
+/*
+ * The @pointers array should have the P/Q parity already mapped.
+ */
+static void verify_one_parity_sector(struct btrfs_raid_bio *rbio,
+ void *pointers[], unsigned int sector_nr)
+{
+ bool found_error = false;
+
+ for (int step_nr = 0; step_nr < rbio->sector_nsteps; step_nr++) {
+ bool match;
+
+ match = verify_one_parity_step(rbio, pointers, sector_nr, step_nr);
+ if (!match)
+ found_error = true;
+ }
+ if (!found_error)
+ bitmap_clear(&rbio->dbitmap, sector_nr, 1);
+}
+
+static int finish_parity_scrub(struct btrfs_raid_bio *rbio)
{
struct btrfs_io_context *bioc = rbio->bioc;
void **pointers = rbio->finish_pointers;
- unsigned long *pbitmap = rbio->finish_pbitmap;
+ unsigned long *pbitmap = &rbio->finish_pbitmap;
int nr_data = rbio->nr_data;
- int stripe;
- int pagenr;
+ int sectornr;
bool has_qstripe;
- struct page *p_page = NULL;
- struct page *q_page = NULL;
+ struct page *page;
+ phys_addr_t p_paddr = INVALID_PADDR;
+ phys_addr_t q_paddr = INVALID_PADDR;
struct bio_list bio_list;
- struct bio *bio;
int is_replace = 0;
int ret;
@@ -2333,9 +2720,13 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
else
BUG();
- if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) {
+ /*
+ * Replace is running and our P/Q stripe is being replaced, then we
+ * need to duplicate the final write to replace target.
+ */
+ if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) {
is_replace = 1;
- bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
+ bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
}
/*
@@ -2345,82 +2736,52 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
*/
clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
- if (!need_check)
- goto writeback;
-
- p_page = alloc_page(GFP_NOFS);
- if (!p_page)
- goto cleanup;
- SetPageUptodate(p_page);
+ page = alloc_page(GFP_NOFS);
+ if (!page)
+ return -ENOMEM;
+ p_paddr = page_to_phys(page);
+ page = NULL;
+ pointers[nr_data] = kmap_local_paddr(p_paddr);
if (has_qstripe) {
/* RAID6, allocate and map temp space for the Q stripe */
- q_page = alloc_page(GFP_NOFS);
- if (!q_page) {
- __free_page(p_page);
- goto cleanup;
+ page = alloc_page(GFP_NOFS);
+ if (!page) {
+ __free_page(phys_to_page(p_paddr));
+ p_paddr = INVALID_PADDR;
+ return -ENOMEM;
}
- SetPageUptodate(q_page);
- pointers[rbio->real_stripes - 1] = kmap_local_page(q_page);
+ q_paddr = page_to_phys(page);
+ page = NULL;
+ pointers[rbio->real_stripes - 1] = kmap_local_paddr(q_paddr);
}
- atomic_set(&rbio->error, 0);
+ bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
/* Map the parity stripe just once */
- pointers[nr_data] = kmap_local_page(p_page);
-
- for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
- struct page *p;
- void *parity;
- /* first collect one page from each data stripe */
- for (stripe = 0; stripe < nr_data; stripe++) {
- p = page_in_rbio(rbio, stripe, pagenr, 0);
- pointers[stripe] = kmap_local_page(p);
- }
-
- if (has_qstripe) {
- /* RAID6, call the library function to fill in our P/Q */
- raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
- pointers);
- } else {
- /* raid5 */
- copy_page(pointers[nr_data], pointers[0]);
- run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
- }
- /* Check scrubbing parity and repair it */
- p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
- parity = kmap_local_page(p);
- if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
- copy_page(parity, pointers[rbio->scrubp]);
- else
- /* Parity is right, needn't writeback */
- bitmap_clear(rbio->dbitmap, pagenr, 1);
- kunmap_local(parity);
-
- for (stripe = nr_data - 1; stripe >= 0; stripe--)
- kunmap_local(pointers[stripe]);
- }
+ for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors)
+ verify_one_parity_sector(rbio, pointers, sectornr);
kunmap_local(pointers[nr_data]);
- __free_page(p_page);
- if (q_page) {
- kunmap_local(pointers[rbio->real_stripes - 1]);
- __free_page(q_page);
+ __free_page(phys_to_page(p_paddr));
+ p_paddr = INVALID_PADDR;
+ if (q_paddr != INVALID_PADDR) {
+ __free_page(phys_to_page(q_paddr));
+ q_paddr = INVALID_PADDR;
}
-writeback:
/*
* time to start writing. Make bios for everything from the
* higher layers (the bio_list in our rbio) and our p/q. Ignore
* everything else.
*/
- for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
- struct page *page;
+ for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
+ phys_addr_t *paddrs;
- page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
- ret = rbio_add_io_page(rbio, &bio_list,
- page, rbio->scrubp, pagenr, rbio->stripe_len);
+ paddrs = rbio_stripe_paddrs(rbio, rbio->scrubp, sectornr);
+ ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, rbio->scrubp,
+ sectornr, REQ_OP_WRITE);
if (ret)
goto cleanup;
}
@@ -2428,41 +2789,28 @@ writeback:
if (!is_replace)
goto submit_write;
- for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
- struct page *page;
+ /*
+ * Replace is running and our parity stripe needs to be duplicated to
+ * the target device. Check we have a valid source stripe number.
+ */
+ ASSERT_RBIO(rbio->bioc->replace_stripe_src >= 0, rbio);
+ for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
+ phys_addr_t *paddrs;
- page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
- ret = rbio_add_io_page(rbio, &bio_list, page,
- bioc->tgtdev_map[rbio->scrubp],
- pagenr, rbio->stripe_len);
+ paddrs = rbio_stripe_paddrs(rbio, rbio->scrubp, sectornr);
+ ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, rbio->real_stripes,
+ sectornr, REQ_OP_WRITE);
if (ret)
goto cleanup;
}
submit_write:
- nr_data = bio_list_size(&bio_list);
- if (!nr_data) {
- /* Every parity is right */
- rbio_orig_end_io(rbio, BLK_STS_OK);
- return;
- }
-
- atomic_set(&rbio->stripes_pending, nr_data);
-
- while ((bio = bio_list_pop(&bio_list))) {
- bio->bi_private = rbio;
- bio->bi_end_io = raid_write_end_io;
- bio->bi_opf = REQ_OP_WRITE;
-
- submit_bio(bio);
- }
- return;
+ submit_write_bios(rbio, &bio_list);
+ return 0;
cleanup:
- rbio_orig_end_io(rbio, BLK_STS_IOERR);
-
- while ((bio = bio_list_pop(&bio_list)))
- bio_put(bio);
+ bio_list_put(&bio_list);
+ return ret;
}
static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
@@ -2472,238 +2820,238 @@ static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
return 0;
}
-/*
- * While we're doing the parity check and repair, we could have errors
- * in reading pages off the disk. This checks for errors and if we're
- * not able to read the page it'll trigger parity reconstruction. The
- * parity scrub will be finished after we've reconstructed the failed
- * stripes
- */
-static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
+static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
{
- if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
- goto cleanup;
+ void **pointers = NULL;
+ void **unmap_array = NULL;
+ int sector_nr;
+ int ret = 0;
- if (rbio->faila >= 0 || rbio->failb >= 0) {
+ /*
+ * @pointers array stores the pointer for each sector.
+ *
+ * @unmap_array stores copy of pointers that does not get reordered
+ * during reconstruction so that kunmap_local works.
+ */
+ pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
+ unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
+ if (!pointers || !unmap_array) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
int dfail = 0, failp = -1;
+ int faila;
+ int failb;
+ int found_errors;
+
+ found_errors = get_rbio_vertical_errors(rbio, sector_nr,
+ &faila, &failb);
+ if (unlikely(found_errors > rbio->bioc->max_errors)) {
+ ret = -EIO;
+ goto out;
+ }
+ if (found_errors == 0)
+ continue;
- if (is_data_stripe(rbio, rbio->faila))
- dfail++;
- else if (is_parity_stripe(rbio->faila))
- failp = rbio->faila;
+ /* We should have at least one error here. */
+ ASSERT(faila >= 0 || failb >= 0);
- if (is_data_stripe(rbio, rbio->failb))
+ if (is_data_stripe(rbio, faila))
dfail++;
- else if (is_parity_stripe(rbio->failb))
- failp = rbio->failb;
+ else if (is_parity_stripe(faila))
+ failp = faila;
+ if (is_data_stripe(rbio, failb))
+ dfail++;
+ else if (is_parity_stripe(failb))
+ failp = failb;
/*
- * Because we can not use a scrubbing parity to repair
- * the data, so the capability of the repair is declined.
- * (In the case of RAID5, we can not repair anything)
+ * Because we can not use a scrubbing parity to repair the
+ * data, so the capability of the repair is declined. (In the
+ * case of RAID5, we can not repair anything.)
*/
- if (dfail > rbio->bioc->max_errors - 1)
- goto cleanup;
-
+ if (unlikely(dfail > rbio->bioc->max_errors - 1)) {
+ ret = -EIO;
+ goto out;
+ }
/*
- * If all data is good, only parity is correctly, just
- * repair the parity.
+ * If all data is good, only parity is correctly, just repair
+ * the parity, no need to recover data stripes.
*/
- if (dfail == 0) {
- finish_parity_scrub(rbio, 0);
- return;
- }
+ if (dfail == 0)
+ continue;
/*
* Here means we got one corrupted data stripe and one
- * corrupted parity on RAID6, if the corrupted parity
- * is scrubbing parity, luckily, use the other one to repair
- * the data, or we can not repair the data stripe.
+ * corrupted parity on RAID6, if the corrupted parity is
+ * scrubbing parity, luckily, use the other one to repair the
+ * data, or we can not repair the data stripe.
*/
- if (failp != rbio->scrubp)
- goto cleanup;
+ if (unlikely(failp != rbio->scrubp)) {
+ ret = -EIO;
+ goto out;
+ }
- __raid_recover_end_io(rbio);
- } else {
- finish_parity_scrub(rbio, 1);
+ ret = recover_vertical(rbio, sector_nr, pointers, unmap_array);
+ if (ret < 0)
+ goto out;
}
- return;
-
-cleanup:
- rbio_orig_end_io(rbio, BLK_STS_IOERR);
+out:
+ kfree(pointers);
+ kfree(unmap_array);
+ return ret;
}
-/*
- * end io for the read phase of the rmw cycle. All the bios here are physical
- * stripe bios we've read from the disk so we can recalculate the parity of the
- * stripe.
- *
- * This will usually kick off finish_rmw once all the bios are read in, but it
- * may trigger parity reconstruction if we had any errors along the way
- */
-static void raid56_parity_scrub_end_io(struct bio *bio)
+static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio)
{
- struct btrfs_raid_bio *rbio = bio->bi_private;
+ struct bio_list bio_list = BIO_EMPTY_LIST;
+ int total_sector_nr;
+ int ret = 0;
- if (bio->bi_status)
- fail_bio_stripe(rbio, bio);
- else
- set_bio_pages_uptodate(bio);
+ /* Build a list of bios to read all the missing parts. */
+ for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
+ total_sector_nr++) {
+ int sectornr = total_sector_nr % rbio->stripe_nsectors;
+ int stripe = total_sector_nr / rbio->stripe_nsectors;
+ phys_addr_t *paddrs;
- bio_put(bio);
+ /* No data in the vertical stripe, no need to read. */
+ if (!test_bit(sectornr, &rbio->dbitmap))
+ continue;
- if (!atomic_dec_and_test(&rbio->stripes_pending))
- return;
+ /*
+ * We want to find all the sectors missing from the rbio and
+ * read them from the disk. If sector_paddr_in_rbio() finds a sector
+ * in the bio list we don't need to read it off the stripe.
+ */
+ paddrs = sector_paddrs_in_rbio(rbio, stripe, sectornr, 1);
+ if (paddrs == NULL)
+ continue;
- /*
- * this will normally call finish_rmw to start our write
- * but if there are any failed stripes we'll reconstruct
- * from parity first
- */
- validate_rbio_for_parity_scrub(rbio);
+ paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr);
+ /*
+ * The bio cache may have handed us an uptodate sector. If so,
+ * use it.
+ */
+ if (test_bit(rbio_sector_index(rbio, stripe, sectornr),
+ rbio->stripe_uptodate_bitmap))
+ continue;
+
+ ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, stripe,
+ sectornr, REQ_OP_READ);
+ if (ret) {
+ bio_list_put(&bio_list);
+ return ret;
+ }
+ }
+
+ submit_read_wait_bio_list(rbio, &bio_list);
+ return 0;
}
-static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
+static void scrub_rbio(struct btrfs_raid_bio *rbio)
{
- int bios_to_read = 0;
- struct bio_list bio_list;
+ int sector_nr;
int ret;
- int pagenr;
- int stripe;
- struct bio *bio;
-
- bio_list_init(&bio_list);
ret = alloc_rbio_essential_pages(rbio);
if (ret)
- goto cleanup;
+ goto out;
- atomic_set(&rbio->error, 0);
- /*
- * build a list of bios to read all the missing parts of this
- * stripe
- */
- for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
- for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
- struct page *page;
- /*
- * we want to find all the pages missing from
- * the rbio and read them from the disk. If
- * page_in_rbio finds a page in the bio list
- * we don't need to read it off the stripe.
- */
- page = page_in_rbio(rbio, stripe, pagenr, 1);
- if (page)
- continue;
+ bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
- page = rbio_stripe_page(rbio, stripe, pagenr);
- /*
- * the bio cache may have handed us an uptodate
- * page. If so, be happy and use it
- */
- if (PageUptodate(page))
- continue;
-
- ret = rbio_add_io_page(rbio, &bio_list, page,
- stripe, pagenr, rbio->stripe_len);
- if (ret)
- goto cleanup;
- }
- }
+ ret = scrub_assemble_read_bios(rbio);
+ if (ret < 0)
+ goto out;
- bios_to_read = bio_list_size(&bio_list);
- if (!bios_to_read) {
- /*
- * this can happen if others have merged with
- * us, it means there is nothing left to read.
- * But if there are missing devices it may not be
- * safe to do the full stripe write yet.
- */
- goto finish;
- }
+ /* We may have some failures, recover the failed sectors first. */
+ ret = recover_scrub_rbio(rbio);
+ if (ret < 0)
+ goto out;
/*
- * The bioc may be freed once we submit the last bio. Make sure not to
- * touch it after that.
+ * We have every sector properly prepared. Can finish the scrub
+ * and writeback the good content.
*/
- atomic_set(&rbio->stripes_pending, bios_to_read);
- while ((bio = bio_list_pop(&bio_list))) {
- bio->bi_private = rbio;
- bio->bi_end_io = raid56_parity_scrub_end_io;
- bio->bi_opf = REQ_OP_READ;
-
- btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
-
- submit_bio(bio);
+ ret = finish_parity_scrub(rbio);
+ wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
+ for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
+ int found_errors;
+
+ found_errors = get_rbio_vertical_errors(rbio, sector_nr, NULL, NULL);
+ if (unlikely(found_errors > rbio->bioc->max_errors)) {
+ ret = -EIO;
+ break;
+ }
}
- /* the actual write will happen once the reads are done */
- return;
-
-cleanup:
- rbio_orig_end_io(rbio, BLK_STS_IOERR);
-
- while ((bio = bio_list_pop(&bio_list)))
- bio_put(bio);
-
- return;
-
-finish:
- validate_rbio_for_parity_scrub(rbio);
+out:
+ rbio_orig_end_io(rbio, errno_to_blk_status(ret));
}
-static void scrub_parity_work(struct btrfs_work *work)
+static void scrub_rbio_work_locked(struct work_struct *work)
{
- struct btrfs_raid_bio *rbio;
-
- rbio = container_of(work, struct btrfs_raid_bio, work);
- raid56_parity_scrub_stripe(rbio);
+ scrub_rbio(container_of(work, struct btrfs_raid_bio, work));
}
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
{
if (!lock_stripe_add(rbio))
- start_async_work(rbio, scrub_parity_work);
+ start_async_work(rbio, scrub_rbio_work_locked);
}
-/* The following code is used for dev replace of a missing RAID 5/6 device. */
-
-struct btrfs_raid_bio *
-raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc,
- u64 length)
-{
- struct btrfs_fs_info *fs_info = bioc->fs_info;
- struct btrfs_raid_bio *rbio;
-
- rbio = alloc_rbio(fs_info, bioc, length);
- if (IS_ERR(rbio))
- return NULL;
+/*
+ * This is for scrub call sites where we already have correct data contents.
+ * This allows us to avoid reading data stripes again.
+ *
+ * Unfortunately here we have to do folio copy, other than reusing the pages.
+ * This is due to the fact rbio has its own page management for its cache.
+ */
+void raid56_parity_cache_data_folios(struct btrfs_raid_bio *rbio,
+ struct folio **data_folios, u64 data_logical)
+{
+ struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
+ const u64 offset_in_full_stripe = data_logical -
+ rbio->bioc->full_stripe_logical;
+ unsigned int findex = 0;
+ unsigned int foffset = 0;
+ int ret;
- rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
- bio_list_add(&rbio->bio_list, bio);
/*
- * This is a special bio which is used to hold the completion handler
- * and make the scrub rbio is similar to the other types
+ * If we hit ENOMEM temporarily, but later at
+ * raid56_parity_submit_scrub_rbio() time it succeeded, we just do
+ * the extra read, not a big deal.
+ *
+ * If we hit ENOMEM later at raid56_parity_submit_scrub_rbio() time,
+ * the bio would got proper error number set.
*/
- ASSERT(!bio->bi_iter.bi_size);
+ ret = alloc_rbio_data_pages(rbio);
+ if (ret < 0)
+ return;
- rbio->faila = find_logical_bio_stripe(rbio, bio);
- if (rbio->faila == -1) {
- BUG();
- kfree(rbio);
- return NULL;
+ /* data_logical must be at stripe boundary and inside the full stripe. */
+ ASSERT(IS_ALIGNED(offset_in_full_stripe, BTRFS_STRIPE_LEN));
+ ASSERT(offset_in_full_stripe < (rbio->nr_data << BTRFS_STRIPE_LEN_SHIFT));
+
+ for (unsigned int cur_off = offset_in_full_stripe;
+ cur_off < offset_in_full_stripe + BTRFS_STRIPE_LEN;
+ cur_off += PAGE_SIZE) {
+ const unsigned int pindex = cur_off >> PAGE_SHIFT;
+ void *kaddr;
+
+ kaddr = kmap_local_page(rbio->stripe_pages[pindex]);
+ memcpy_from_folio(kaddr, data_folios[findex], foffset, PAGE_SIZE);
+ kunmap_local(kaddr);
+
+ foffset += PAGE_SIZE;
+ ASSERT(foffset <= folio_size(data_folios[findex]));
+ if (foffset == folio_size(data_folios[findex])) {
+ findex++;
+ foffset = 0;
+ }
}
-
- /*
- * When we get bioc, we have already increased bio_counter, record it
- * so we can free it at rbio_orig_end_io()
- */
- rbio->generic_bio_cnt = 1;
-
- return rbio;
-}
-
-void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
-{
- if (!lock_stripe_add(rbio))
- start_async_work(rbio, read_rebuild_work);
+ bitmap_set(rbio->stripe_uptodate_bitmap,
+ offset_in_full_stripe >> fs_info->sectorsize_bits,
+ BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
}
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index 72c00fc284b5..1f463ecf7e41 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -7,47 +7,283 @@
#ifndef BTRFS_RAID56_H
#define BTRFS_RAID56_H
-static inline int nr_parity_stripes(const struct map_lookup *map)
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/bio.h>
+#include <linux/refcount.h>
+#include <linux/workqueue.h>
+#include "volumes.h"
+
+struct page;
+struct btrfs_fs_info;
+
+enum btrfs_rbio_ops {
+ BTRFS_RBIO_WRITE,
+ BTRFS_RBIO_READ_REBUILD,
+ BTRFS_RBIO_PARITY_SCRUB,
+};
+
+/*
+ * Overview of btrfs_raid_bio.
+ *
+ * One btrfs_raid_bio represents a full stripe of RAID56, including both data
+ * and P/Q stripes. For now, each data and P/Q stripe is of a fixed length (64K).
+ *
+ * One btrfs_raid_bio can have one or more bios from higher layer, covering
+ * part or all of the data stripes.
+ *
+ * [PAGES FROM HIGHER LAYER BIOS]
+ * Higher layer bios are in the btrfs_raid_bio::bio_list.
+ *
+ * Pages from the bio_list are represented like the following:
+ *
+ * bio_list: |<- Bio 1 ->| |<- Bio 2 ->| ...
+ * bio_paddrs: [0] [1] [2] [3] [4] [5] ...
+ *
+ * If there is a bio covering a sector (one btrfs fs block), the corresponding
+ * pointer in btrfs_raid_bio::bio_paddrs[] will point to the physical address
+ * (with the offset inside the page) of the corresponding bio.
+ *
+ * If there is no bio covering a sector, then btrfs_raid_bio::bio_paddrs[i] will
+ * be INVALID_PADDR.
+ *
+ * The length of each entry in bio_paddrs[] is a step (aka, min(sectorsize, PAGE_SIZE)).
+ *
+ * [PAGES FOR INTERNAL USAGES]
+ * Pages not covered by any bio or belonging to P/Q stripes are stored in
+ * btrfs_raid_bio::stripe_pages[] and stripe_paddrs[], like the following:
+ *
+ * stripe_pages: |<- Page 0 ->|<- Page 1 ->| ...
+ * stripe_paddrs: [0] [1] [2] [3] [4] ...
+ *
+ * stripe_pages[] array stores all the pages covering the full stripe, including
+ * data and P/Q pages.
+ * stripe_pages[0] is the first page of the first data stripe.
+ * stripe_pages[BTRFS_STRIPE_LEN / PAGE_SIZE] is the first page of the second
+ * data stripe.
+ *
+ * Some pointers inside stripe_pages[] can be NULL, e.g. for a full stripe write
+ * (the bio covers all data stripes) there is no need to allocate pages for
+ * data stripes (can grab from bio_paddrs[]).
+ *
+ * If the corresponding page of stripe_paddrs[i] is not allocated, the value of
+ * stripe_paddrs[i] will be INVALID_PADDR.
+ *
+ * The length of each entry in stripe_paddrs[] is a step.
+ *
+ * [LOCATING A SECTOR]
+ * To locate a sector for IO, we need the following info:
+ *
+ * - stripe_nr
+ * Starts from 0 (representing the first data stripe), ends at
+ * @nr_data (RAID5, P stripe) or @nr_data + 1 (RAID6, Q stripe).
+ *
+ * - sector_nr
+ * Starts from 0 (representing the first sector of the stripe), ends
+ * at BTRFS_STRIPE_LEN / sectorsize - 1.
+ *
+ * - step_nr
+ * A step is min(sector_size, PAGE_SIZE).
+ *
+ * Starts from 0 (representing the first step of the sector), ends
+ * at @sector_nsteps - 1.
+ *
+ * For most call sites they do not need to bother this parameter.
+ * It is for bs > ps support and only for vertical stripe related works.
+ * (e.g. RMW/recover)
+ *
+ * - from which array
+ * Whether grabbing from stripe_paddrs[] (aka, internal pages) or from the
+ * bio_paddrs[] (aka, from the higher layer bios).
+ *
+ * For IO, a physical address is returned, so that we can extract the page and
+ * the offset inside the page for IO.
+ * A special value INVALID_PADDR represents when the physical address is invalid,
+ * normally meaning there is no page allocated for the specified sector.
+ */
+struct btrfs_raid_bio {
+ struct btrfs_io_context *bioc;
+
+ /*
+ * While we're doing RMW on a stripe we put it into a hash table so we
+ * can lock the stripe and merge more rbios into it.
+ */
+ struct list_head hash_list;
+
+ /* LRU list for the stripe cache */
+ struct list_head stripe_cache;
+
+ /* For scheduling work in the helper threads */
+ struct work_struct work;
+
+ /*
+ * bio_list and bio_list_lock are used to add more bios into the stripe
+ * in hopes of avoiding the full RMW
+ */
+ struct bio_list bio_list;
+ spinlock_t bio_list_lock;
+
+ /*
+ * Also protected by the bio_list_lock, the plug list is used by the
+ * plugging code to collect partial bios while plugged. The stripe
+ * locking code also uses it to hand off the stripe lock to the next
+ * pending IO.
+ */
+ struct list_head plug_list;
+
+ /* Flags that tell us if it is safe to merge with this bio. */
+ unsigned long flags;
+
+ /*
+ * Set if we're doing a parity rebuild for a read from higher up, which
+ * is handled differently from a parity rebuild as part of RMW.
+ */
+ enum btrfs_rbio_ops operation;
+
+ /* How many pages there are for the full stripe including P/Q */
+ u16 nr_pages;
+
+ /* How many sectors there are for the full stripe including P/Q */
+ u16 nr_sectors;
+
+ /* Number of data stripes (no p/q) */
+ u8 nr_data;
+
+ /* Number of all stripes (including P/Q) */
+ u8 real_stripes;
+
+ /* How many pages there are for each stripe */
+ u8 stripe_npages;
+
+ /* How many sectors there are for each stripe */
+ u8 stripe_nsectors;
+
+ /*
+ * How many steps there are for one sector.
+ *
+ * For bs > ps cases, it's sectorsize / PAGE_SIZE.
+ * For bs <= ps cases, it's always 1.
+ */
+ u8 sector_nsteps;
+
+ /* Stripe number that we're scrubbing */
+ u8 scrubp;
+
+ /*
+ * Size of all the bios in the bio_list. This helps us decide if the
+ * rbio maps to a full stripe or not.
+ */
+ int bio_list_bytes;
+
+ refcount_t refs;
+
+ atomic_t stripes_pending;
+
+ wait_queue_head_t io_wait;
+
+ /* Bitmap to record which horizontal stripe has data */
+ unsigned long dbitmap;
+
+ /* Allocated with stripe_nsectors-many bits for finish_*() calls */
+ unsigned long finish_pbitmap;
+
+ /*
+ * These are two arrays of pointers. We allocate the rbio big enough
+ * to hold them both and setup their locations when the rbio is
+ * allocated.
+ */
+
+ /*
+ * Pointers to pages that we allocated for reading/writing stripes
+ * directly from the disk (including P/Q).
+ */
+ struct page **stripe_pages;
+
+ /* Pointers to the sectors in the bio_list, for faster lookup */
+ phys_addr_t *bio_paddrs;
+
+ /* Pointers to the sectors in the stripe_pages[]. */
+ phys_addr_t *stripe_paddrs;
+
+ /* Each set bit means the corresponding sector in stripe_sectors[] is uptodate. */
+ unsigned long *stripe_uptodate_bitmap;
+
+ /* Allocated with real_stripes-many pointers for finish_*() calls */
+ void **finish_pointers;
+
+ /*
+ * The bitmap recording where IO errors happened.
+ * Each bit is corresponding to one sector in either bio_sectors[] or
+ * stripe_sectors[] array.
+ */
+ unsigned long *error_bitmap;
+
+ /*
+ * Checksum buffer if the rbio is for data. The buffer should cover
+ * all data sectors (excluding P/Q sectors).
+ */
+ u8 *csum_buf;
+
+ /*
+ * Each bit represents if the corresponding sector has data csum found.
+ * Should only cover data sectors (excluding P/Q sectors).
+ */
+ unsigned long *csum_bitmap;
+};
+
+/*
+ * For trace event usage only. Records useful debug info for each bio submitted
+ * by RAID56 to each physical device.
+ *
+ * No matter signed or not, (-1) is always the one indicating we can not grab
+ * the proper stripe number.
+ */
+struct raid56_bio_trace_info {
+ u64 devid;
+
+ /* The offset inside the stripe. (<= STRIPE_LEN) */
+ u32 offset;
+
+ /*
+ * Stripe number.
+ * 0 is the first data stripe, and nr_data for P stripe,
+ * nr_data + 1 for Q stripe.
+ * >= real_stripes for
+ */
+ u8 stripe_nr;
+};
+
+static inline int nr_data_stripes(const struct btrfs_chunk_map *map)
{
- if (map->type & BTRFS_BLOCK_GROUP_RAID5)
- return 1;
- else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
- return 2;
- else
- return 0;
+ return map->num_stripes - btrfs_nr_parity_stripes(map->type);
}
-static inline int nr_data_stripes(const struct map_lookup *map)
+static inline int nr_bioc_data_stripes(const struct btrfs_io_context *bioc)
{
- return map->num_stripes - nr_parity_stripes(map);
+ return bioc->num_stripes - btrfs_nr_parity_stripes(bioc->map_type);
}
+
#define RAID5_P_STRIPE ((u64)-2)
#define RAID6_Q_STRIPE ((u64)-1)
#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \
((x) == RAID6_Q_STRIPE))
-struct btrfs_raid_bio;
struct btrfs_device;
-int raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
- u64 stripe_len, int mirror_num, int generic_io);
-int raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc,
- u64 stripe_len);
-
-void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
- u64 logical);
+void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
+ int mirror_num);
+void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc);
struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
- struct btrfs_io_context *bioc, u64 stripe_len,
+ struct btrfs_io_context *bioc,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors);
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
-struct btrfs_raid_bio *
-raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc,
- u64 length);
-void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
+void raid56_parity_cache_data_folios(struct btrfs_raid_bio *rbio,
+ struct folio **data_folios, u64 data_logical);
int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h
deleted file mode 100644
index 5c1a617eb25d..000000000000
--- a/fs/btrfs/rcu-string.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Red Hat. All rights reserved.
- */
-
-#ifndef BTRFS_RCU_STRING_H
-#define BTRFS_RCU_STRING_H
-
-struct rcu_string {
- struct rcu_head rcu;
- char str[];
-};
-
-static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask)
-{
- size_t len = strlen(src) + 1;
- struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) +
- (len * sizeof(char)), mask);
- if (!ret)
- return ret;
- strncpy(ret->str, src, len);
- return ret;
-}
-
-static inline void rcu_string_free(struct rcu_string *str)
-{
- if (str)
- kfree_rcu(str, rcu);
-}
-
-#define printk_in_rcu(fmt, ...) do { \
- rcu_read_lock(); \
- printk(fmt, __VA_ARGS__); \
- rcu_read_unlock(); \
-} while (0)
-
-#define printk_ratelimited_in_rcu(fmt, ...) do { \
- rcu_read_lock(); \
- printk_ratelimited(fmt, __VA_ARGS__); \
- rcu_read_unlock(); \
-} while (0)
-
-#define rcu_str_deref(rcu_str) ({ \
- struct rcu_string *__str = rcu_dereference(rcu_str); \
- __str->str; \
-})
-
-#endif
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
deleted file mode 100644
index eb96fdc3be25..000000000000
--- a/fs/btrfs/reada.c
+++ /dev/null
@@ -1,1086 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2011 STRATO. All rights reserved.
- */
-
-#include <linux/sched.h>
-#include <linux/pagemap.h>
-#include <linux/writeback.h>
-#include <linux/blkdev.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include "ctree.h"
-#include "volumes.h"
-#include "disk-io.h"
-#include "transaction.h"
-#include "dev-replace.h"
-#include "block-group.h"
-
-#undef DEBUG
-
-/*
- * This is the implementation for the generic read ahead framework.
- *
- * To trigger a readahead, btrfs_reada_add must be called. It will start
- * a read ahead for the given range [start, end) on tree root. The returned
- * handle can either be used to wait on the readahead to finish
- * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
- *
- * The read ahead works as follows:
- * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
- * reada_start_machine will then search for extents to prefetch and trigger
- * some reads. When a read finishes for a node, all contained node/leaf
- * pointers that lie in the given range will also be enqueued. The reads will
- * be triggered in sequential order, thus giving a big win over a naive
- * enumeration. It will also make use of multi-device layouts. Each disk
- * will have its on read pointer and all disks will by utilized in parallel.
- * Also will no two disks read both sides of a mirror simultaneously, as this
- * would waste seeking capacity. Instead both disks will read different parts
- * of the filesystem.
- * Any number of readaheads can be started in parallel. The read order will be
- * determined globally, i.e. 2 parallel readaheads will normally finish faster
- * than the 2 started one after another.
- */
-
-#define MAX_IN_FLIGHT 6
-
-struct reada_extctl {
- struct list_head list;
- struct reada_control *rc;
- u64 generation;
-};
-
-struct reada_extent {
- u64 logical;
- u64 owner_root;
- struct btrfs_key top;
- struct list_head extctl;
- int refcnt;
- spinlock_t lock;
- struct reada_zone *zones[BTRFS_MAX_MIRRORS];
- int nzones;
- int scheduled;
- int level;
-};
-
-struct reada_zone {
- u64 start;
- u64 end;
- u64 elems;
- struct list_head list;
- spinlock_t lock;
- int locked;
- struct btrfs_device *device;
- struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl
- * self */
- int ndevs;
- struct kref refcnt;
-};
-
-struct reada_machine_work {
- struct btrfs_work work;
- struct btrfs_fs_info *fs_info;
-};
-
-static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
-static void reada_control_release(struct kref *kref);
-static void reada_zone_release(struct kref *kref);
-static void reada_start_machine(struct btrfs_fs_info *fs_info);
-static void __reada_start_machine(struct btrfs_fs_info *fs_info);
-
-static int reada_add_block(struct reada_control *rc, u64 logical,
- struct btrfs_key *top, u64 owner_root,
- u64 generation, int level);
-
-/* recurses */
-/* in case of err, eb might be NULL */
-static void __readahead_hook(struct btrfs_fs_info *fs_info,
- struct reada_extent *re, struct extent_buffer *eb,
- int err)
-{
- int nritems;
- int i;
- u64 bytenr;
- u64 generation;
- struct list_head list;
-
- spin_lock(&re->lock);
- /*
- * just take the full list from the extent. afterwards we
- * don't need the lock anymore
- */
- list_replace_init(&re->extctl, &list);
- re->scheduled = 0;
- spin_unlock(&re->lock);
-
- /*
- * this is the error case, the extent buffer has not been
- * read correctly. We won't access anything from it and
- * just cleanup our data structures. Effectively this will
- * cut the branch below this node from read ahead.
- */
- if (err)
- goto cleanup;
-
- /*
- * FIXME: currently we just set nritems to 0 if this is a leaf,
- * effectively ignoring the content. In a next step we could
- * trigger more readahead depending from the content, e.g.
- * fetch the checksums for the extents in the leaf.
- */
- if (!btrfs_header_level(eb))
- goto cleanup;
-
- nritems = btrfs_header_nritems(eb);
- generation = btrfs_header_generation(eb);
- for (i = 0; i < nritems; i++) {
- struct reada_extctl *rec;
- u64 n_gen;
- struct btrfs_key key;
- struct btrfs_key next_key;
-
- btrfs_node_key_to_cpu(eb, &key, i);
- if (i + 1 < nritems)
- btrfs_node_key_to_cpu(eb, &next_key, i + 1);
- else
- next_key = re->top;
- bytenr = btrfs_node_blockptr(eb, i);
- n_gen = btrfs_node_ptr_generation(eb, i);
-
- list_for_each_entry(rec, &list, list) {
- struct reada_control *rc = rec->rc;
-
- /*
- * if the generation doesn't match, just ignore this
- * extctl. This will probably cut off a branch from
- * prefetch. Alternatively one could start a new (sub-)
- * prefetch for this branch, starting again from root.
- * FIXME: move the generation check out of this loop
- */
-#ifdef DEBUG
- if (rec->generation != generation) {
- btrfs_debug(fs_info,
- "generation mismatch for (%llu,%d,%llu) %llu != %llu",
- key.objectid, key.type, key.offset,
- rec->generation, generation);
- }
-#endif
- if (rec->generation == generation &&
- btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
- btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
- reada_add_block(rc, bytenr, &next_key,
- btrfs_header_owner(eb), n_gen,
- btrfs_header_level(eb) - 1);
- }
- }
-
-cleanup:
- /*
- * free extctl records
- */
- while (!list_empty(&list)) {
- struct reada_control *rc;
- struct reada_extctl *rec;
-
- rec = list_first_entry(&list, struct reada_extctl, list);
- list_del(&rec->list);
- rc = rec->rc;
- kfree(rec);
-
- kref_get(&rc->refcnt);
- if (atomic_dec_and_test(&rc->elems)) {
- kref_put(&rc->refcnt, reada_control_release);
- wake_up(&rc->wait);
- }
- kref_put(&rc->refcnt, reada_control_release);
-
- reada_extent_put(fs_info, re); /* one ref for each entry */
- }
-
- return;
-}
-
-int btree_readahead_hook(struct extent_buffer *eb, int err)
-{
- struct btrfs_fs_info *fs_info = eb->fs_info;
- int ret = 0;
- struct reada_extent *re;
-
- /* find extent */
- spin_lock(&fs_info->reada_lock);
- re = radix_tree_lookup(&fs_info->reada_tree,
- eb->start >> fs_info->sectorsize_bits);
- if (re)
- re->refcnt++;
- spin_unlock(&fs_info->reada_lock);
- if (!re) {
- ret = -1;
- goto start_machine;
- }
-
- __readahead_hook(fs_info, re, eb, err);
- reada_extent_put(fs_info, re); /* our ref */
-
-start_machine:
- reada_start_machine(fs_info);
- return ret;
-}
-
-static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
- struct btrfs_io_context *bioc)
-{
- struct btrfs_fs_info *fs_info = dev->fs_info;
- int ret;
- struct reada_zone *zone;
- struct btrfs_block_group *cache = NULL;
- u64 start;
- u64 end;
- int i;
-
- zone = NULL;
- spin_lock(&fs_info->reada_lock);
- ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
- logical >> fs_info->sectorsize_bits, 1);
- if (ret == 1 && logical >= zone->start && logical <= zone->end) {
- kref_get(&zone->refcnt);
- spin_unlock(&fs_info->reada_lock);
- return zone;
- }
-
- spin_unlock(&fs_info->reada_lock);
-
- cache = btrfs_lookup_block_group(fs_info, logical);
- if (!cache)
- return NULL;
-
- start = cache->start;
- end = start + cache->length - 1;
- btrfs_put_block_group(cache);
-
- zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- if (!zone)
- return NULL;
-
- ret = radix_tree_preload(GFP_KERNEL);
- if (ret) {
- kfree(zone);
- return NULL;
- }
-
- zone->start = start;
- zone->end = end;
- INIT_LIST_HEAD(&zone->list);
- spin_lock_init(&zone->lock);
- zone->locked = 0;
- kref_init(&zone->refcnt);
- zone->elems = 0;
- zone->device = dev; /* our device always sits at index 0 */
- for (i = 0; i < bioc->num_stripes; ++i) {
- /* bounds have already been checked */
- zone->devs[i] = bioc->stripes[i].dev;
- }
- zone->ndevs = bioc->num_stripes;
-
- spin_lock(&fs_info->reada_lock);
- ret = radix_tree_insert(&dev->reada_zones,
- (unsigned long)(zone->end >> fs_info->sectorsize_bits),
- zone);
-
- if (ret == -EEXIST) {
- kfree(zone);
- ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
- logical >> fs_info->sectorsize_bits, 1);
- if (ret == 1 && logical >= zone->start && logical <= zone->end)
- kref_get(&zone->refcnt);
- else
- zone = NULL;
- }
- spin_unlock(&fs_info->reada_lock);
- radix_tree_preload_end();
-
- return zone;
-}
-
-static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
- u64 logical,
- struct btrfs_key *top,
- u64 owner_root, int level)
-{
- int ret;
- struct reada_extent *re = NULL;
- struct reada_extent *re_exist = NULL;
- struct btrfs_io_context *bioc = NULL;
- struct btrfs_device *dev;
- struct btrfs_device *prev_dev;
- u64 length;
- int real_stripes;
- int nzones = 0;
- unsigned long index = logical >> fs_info->sectorsize_bits;
- int dev_replace_is_ongoing;
- int have_zone = 0;
-
- spin_lock(&fs_info->reada_lock);
- re = radix_tree_lookup(&fs_info->reada_tree, index);
- if (re)
- re->refcnt++;
- spin_unlock(&fs_info->reada_lock);
-
- if (re)
- return re;
-
- re = kzalloc(sizeof(*re), GFP_KERNEL);
- if (!re)
- return NULL;
-
- re->logical = logical;
- re->top = *top;
- INIT_LIST_HEAD(&re->extctl);
- spin_lock_init(&re->lock);
- re->refcnt = 1;
- re->owner_root = owner_root;
- re->level = level;
-
- /*
- * map block
- */
- length = fs_info->nodesize;
- ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
- &length, &bioc, 0);
- if (ret || !bioc || length < fs_info->nodesize)
- goto error;
-
- if (bioc->num_stripes > BTRFS_MAX_MIRRORS) {
- btrfs_err(fs_info,
- "readahead: more than %d copies not supported",
- BTRFS_MAX_MIRRORS);
- goto error;
- }
-
- real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
- for (nzones = 0; nzones < real_stripes; ++nzones) {
- struct reada_zone *zone;
-
- dev = bioc->stripes[nzones].dev;
-
- /* cannot read ahead on missing device. */
- if (!dev->bdev)
- continue;
-
- zone = reada_find_zone(dev, logical, bioc);
- if (!zone)
- continue;
-
- re->zones[re->nzones++] = zone;
- spin_lock(&zone->lock);
- if (!zone->elems)
- kref_get(&zone->refcnt);
- ++zone->elems;
- spin_unlock(&zone->lock);
- spin_lock(&fs_info->reada_lock);
- kref_put(&zone->refcnt, reada_zone_release);
- spin_unlock(&fs_info->reada_lock);
- }
- if (re->nzones == 0) {
- /* not a single zone found, error and out */
- goto error;
- }
-
- /* Insert extent in reada tree + all per-device trees, all or nothing */
- down_read(&fs_info->dev_replace.rwsem);
- ret = radix_tree_preload(GFP_KERNEL);
- if (ret) {
- up_read(&fs_info->dev_replace.rwsem);
- goto error;
- }
-
- spin_lock(&fs_info->reada_lock);
- ret = radix_tree_insert(&fs_info->reada_tree, index, re);
- if (ret == -EEXIST) {
- re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
- re_exist->refcnt++;
- spin_unlock(&fs_info->reada_lock);
- radix_tree_preload_end();
- up_read(&fs_info->dev_replace.rwsem);
- goto error;
- }
- if (ret) {
- spin_unlock(&fs_info->reada_lock);
- radix_tree_preload_end();
- up_read(&fs_info->dev_replace.rwsem);
- goto error;
- }
- radix_tree_preload_end();
- prev_dev = NULL;
- dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
- &fs_info->dev_replace);
- for (nzones = 0; nzones < re->nzones; ++nzones) {
- dev = re->zones[nzones]->device;
-
- if (dev == prev_dev) {
- /*
- * in case of DUP, just add the first zone. As both
- * are on the same device, there's nothing to gain
- * from adding both.
- * Also, it wouldn't work, as the tree is per device
- * and adding would fail with EEXIST
- */
- continue;
- }
- if (!dev->bdev)
- continue;
-
- if (test_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state))
- continue;
-
- if (dev_replace_is_ongoing &&
- dev == fs_info->dev_replace.tgtdev) {
- /*
- * as this device is selected for reading only as
- * a last resort, skip it for read ahead.
- */
- continue;
- }
- prev_dev = dev;
- ret = radix_tree_insert(&dev->reada_extents, index, re);
- if (ret) {
- while (--nzones >= 0) {
- dev = re->zones[nzones]->device;
- BUG_ON(dev == NULL);
- /* ignore whether the entry was inserted */
- radix_tree_delete(&dev->reada_extents, index);
- }
- radix_tree_delete(&fs_info->reada_tree, index);
- spin_unlock(&fs_info->reada_lock);
- up_read(&fs_info->dev_replace.rwsem);
- goto error;
- }
- have_zone = 1;
- }
- if (!have_zone)
- radix_tree_delete(&fs_info->reada_tree, index);
- spin_unlock(&fs_info->reada_lock);
- up_read(&fs_info->dev_replace.rwsem);
-
- if (!have_zone)
- goto error;
-
- btrfs_put_bioc(bioc);
- return re;
-
-error:
- for (nzones = 0; nzones < re->nzones; ++nzones) {
- struct reada_zone *zone;
-
- zone = re->zones[nzones];
- kref_get(&zone->refcnt);
- spin_lock(&zone->lock);
- --zone->elems;
- if (zone->elems == 0) {
- /*
- * no fs_info->reada_lock needed, as this can't be
- * the last ref
- */
- kref_put(&zone->refcnt, reada_zone_release);
- }
- spin_unlock(&zone->lock);
-
- spin_lock(&fs_info->reada_lock);
- kref_put(&zone->refcnt, reada_zone_release);
- spin_unlock(&fs_info->reada_lock);
- }
- btrfs_put_bioc(bioc);
- kfree(re);
- return re_exist;
-}
-
-static void reada_extent_put(struct btrfs_fs_info *fs_info,
- struct reada_extent *re)
-{
- int i;
- unsigned long index = re->logical >> fs_info->sectorsize_bits;
-
- spin_lock(&fs_info->reada_lock);
- if (--re->refcnt) {
- spin_unlock(&fs_info->reada_lock);
- return;
- }
-
- radix_tree_delete(&fs_info->reada_tree, index);
- for (i = 0; i < re->nzones; ++i) {
- struct reada_zone *zone = re->zones[i];
-
- radix_tree_delete(&zone->device->reada_extents, index);
- }
-
- spin_unlock(&fs_info->reada_lock);
-
- for (i = 0; i < re->nzones; ++i) {
- struct reada_zone *zone = re->zones[i];
-
- kref_get(&zone->refcnt);
- spin_lock(&zone->lock);
- --zone->elems;
- if (zone->elems == 0) {
- /* no fs_info->reada_lock needed, as this can't be
- * the last ref */
- kref_put(&zone->refcnt, reada_zone_release);
- }
- spin_unlock(&zone->lock);
-
- spin_lock(&fs_info->reada_lock);
- kref_put(&zone->refcnt, reada_zone_release);
- spin_unlock(&fs_info->reada_lock);
- }
-
- kfree(re);
-}
-
-static void reada_zone_release(struct kref *kref)
-{
- struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
- struct btrfs_fs_info *fs_info = zone->device->fs_info;
-
- lockdep_assert_held(&fs_info->reada_lock);
-
- radix_tree_delete(&zone->device->reada_zones,
- zone->end >> fs_info->sectorsize_bits);
-
- kfree(zone);
-}
-
-static void reada_control_release(struct kref *kref)
-{
- struct reada_control *rc = container_of(kref, struct reada_control,
- refcnt);
-
- kfree(rc);
-}
-
-static int reada_add_block(struct reada_control *rc, u64 logical,
- struct btrfs_key *top, u64 owner_root,
- u64 generation, int level)
-{
- struct btrfs_fs_info *fs_info = rc->fs_info;
- struct reada_extent *re;
- struct reada_extctl *rec;
-
- /* takes one ref */
- re = reada_find_extent(fs_info, logical, top, owner_root, level);
- if (!re)
- return -1;
-
- rec = kzalloc(sizeof(*rec), GFP_KERNEL);
- if (!rec) {
- reada_extent_put(fs_info, re);
- return -ENOMEM;
- }
-
- rec->rc = rc;
- rec->generation = generation;
- atomic_inc(&rc->elems);
-
- spin_lock(&re->lock);
- list_add_tail(&rec->list, &re->extctl);
- spin_unlock(&re->lock);
-
- /* leave the ref on the extent */
-
- return 0;
-}
-
-/*
- * called with fs_info->reada_lock held
- */
-static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
-{
- int i;
- unsigned long index = zone->end >> zone->device->fs_info->sectorsize_bits;
-
- for (i = 0; i < zone->ndevs; ++i) {
- struct reada_zone *peer;
- peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
- if (peer && peer->device != zone->device)
- peer->locked = lock;
- }
-}
-
-/*
- * called with fs_info->reada_lock held
- */
-static int reada_pick_zone(struct btrfs_device *dev)
-{
- struct reada_zone *top_zone = NULL;
- struct reada_zone *top_locked_zone = NULL;
- u64 top_elems = 0;
- u64 top_locked_elems = 0;
- unsigned long index = 0;
- int ret;
-
- if (dev->reada_curr_zone) {
- reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
- kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
- dev->reada_curr_zone = NULL;
- }
- /* pick the zone with the most elements */
- while (1) {
- struct reada_zone *zone;
-
- ret = radix_tree_gang_lookup(&dev->reada_zones,
- (void **)&zone, index, 1);
- if (ret == 0)
- break;
- index = (zone->end >> dev->fs_info->sectorsize_bits) + 1;
- if (zone->locked) {
- if (zone->elems > top_locked_elems) {
- top_locked_elems = zone->elems;
- top_locked_zone = zone;
- }
- } else {
- if (zone->elems > top_elems) {
- top_elems = zone->elems;
- top_zone = zone;
- }
- }
- }
- if (top_zone)
- dev->reada_curr_zone = top_zone;
- else if (top_locked_zone)
- dev->reada_curr_zone = top_locked_zone;
- else
- return 0;
-
- dev->reada_next = dev->reada_curr_zone->start;
- kref_get(&dev->reada_curr_zone->refcnt);
- reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
-
- return 1;
-}
-
-static int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 owner_root, int level, int mirror_num,
- struct extent_buffer **eb)
-{
- struct extent_buffer *buf = NULL;
- int ret;
-
- buf = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
- if (IS_ERR(buf))
- return 0;
-
- set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
-
- ret = read_extent_buffer_pages(buf, WAIT_PAGE_LOCK, mirror_num);
- if (ret) {
- free_extent_buffer_stale(buf);
- return ret;
- }
-
- if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
- free_extent_buffer_stale(buf);
- return -EIO;
- } else if (extent_buffer_uptodate(buf)) {
- *eb = buf;
- } else {
- free_extent_buffer(buf);
- }
- return 0;
-}
-
-static int reada_start_machine_dev(struct btrfs_device *dev)
-{
- struct btrfs_fs_info *fs_info = dev->fs_info;
- struct reada_extent *re = NULL;
- int mirror_num = 0;
- struct extent_buffer *eb = NULL;
- u64 logical;
- int ret;
- int i;
-
- spin_lock(&fs_info->reada_lock);
- if (dev->reada_curr_zone == NULL) {
- ret = reada_pick_zone(dev);
- if (!ret) {
- spin_unlock(&fs_info->reada_lock);
- return 0;
- }
- }
- /*
- * FIXME currently we issue the reads one extent at a time. If we have
- * a contiguous block of extents, we could also coagulate them or use
- * plugging to speed things up
- */
- ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
- dev->reada_next >> fs_info->sectorsize_bits, 1);
- if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
- ret = reada_pick_zone(dev);
- if (!ret) {
- spin_unlock(&fs_info->reada_lock);
- return 0;
- }
- re = NULL;
- ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
- dev->reada_next >> fs_info->sectorsize_bits, 1);
- }
- if (ret == 0) {
- spin_unlock(&fs_info->reada_lock);
- return 0;
- }
- dev->reada_next = re->logical + fs_info->nodesize;
- re->refcnt++;
-
- spin_unlock(&fs_info->reada_lock);
-
- spin_lock(&re->lock);
- if (re->scheduled || list_empty(&re->extctl)) {
- spin_unlock(&re->lock);
- reada_extent_put(fs_info, re);
- return 0;
- }
- re->scheduled = 1;
- spin_unlock(&re->lock);
-
- /*
- * find mirror num
- */
- for (i = 0; i < re->nzones; ++i) {
- if (re->zones[i]->device == dev) {
- mirror_num = i + 1;
- break;
- }
- }
- logical = re->logical;
-
- atomic_inc(&dev->reada_in_flight);
- ret = reada_tree_block_flagged(fs_info, logical, re->owner_root,
- re->level, mirror_num, &eb);
- if (ret)
- __readahead_hook(fs_info, re, NULL, ret);
- else if (eb)
- __readahead_hook(fs_info, re, eb, ret);
-
- if (eb)
- free_extent_buffer(eb);
-
- atomic_dec(&dev->reada_in_flight);
- reada_extent_put(fs_info, re);
-
- return 1;
-
-}
-
-static void reada_start_machine_worker(struct btrfs_work *work)
-{
- struct reada_machine_work *rmw;
- int old_ioprio;
-
- rmw = container_of(work, struct reada_machine_work, work);
-
- old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
- task_nice_ioprio(current));
- set_task_ioprio(current, BTRFS_IOPRIO_READA);
- __reada_start_machine(rmw->fs_info);
- set_task_ioprio(current, old_ioprio);
-
- atomic_dec(&rmw->fs_info->reada_works_cnt);
-
- kfree(rmw);
-}
-
-/* Try to start up to 10k READA requests for a group of devices */
-static int reada_start_for_fsdevs(struct btrfs_fs_devices *fs_devices)
-{
- u64 enqueued;
- u64 total = 0;
- struct btrfs_device *device;
-
- do {
- enqueued = 0;
- list_for_each_entry(device, &fs_devices->devices, dev_list) {
- if (atomic_read(&device->reada_in_flight) <
- MAX_IN_FLIGHT)
- enqueued += reada_start_machine_dev(device);
- }
- total += enqueued;
- } while (enqueued && total < 10000);
-
- return total;
-}
-
-static void __reada_start_machine(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
- int i;
- u64 enqueued = 0;
-
- mutex_lock(&fs_devices->device_list_mutex);
-
- enqueued += reada_start_for_fsdevs(fs_devices);
- list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
- enqueued += reada_start_for_fsdevs(seed_devs);
-
- mutex_unlock(&fs_devices->device_list_mutex);
- if (enqueued == 0)
- return;
-
- /*
- * If everything is already in the cache, this is effectively single
- * threaded. To a) not hold the caller for too long and b) to utilize
- * more cores, we broke the loop above after 10000 iterations and now
- * enqueue to workers to finish it. This will distribute the load to
- * the cores.
- */
- for (i = 0; i < 2; ++i) {
- reada_start_machine(fs_info);
- if (atomic_read(&fs_info->reada_works_cnt) >
- BTRFS_MAX_MIRRORS * 2)
- break;
- }
-}
-
-static void reada_start_machine(struct btrfs_fs_info *fs_info)
-{
- struct reada_machine_work *rmw;
-
- rmw = kzalloc(sizeof(*rmw), GFP_KERNEL);
- if (!rmw) {
- /* FIXME we cannot handle this properly right now */
- BUG();
- }
- btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
- rmw->fs_info = fs_info;
-
- btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
- atomic_inc(&fs_info->reada_works_cnt);
-}
-
-#ifdef DEBUG
-static void dump_devs(struct btrfs_fs_info *fs_info, int all)
-{
- struct btrfs_device *device;
- struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
- unsigned long index;
- int ret;
- int i;
- int j;
- int cnt;
-
- spin_lock(&fs_info->reada_lock);
- list_for_each_entry(device, &fs_devices->devices, dev_list) {
- btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid,
- atomic_read(&device->reada_in_flight));
- index = 0;
- while (1) {
- struct reada_zone *zone;
- ret = radix_tree_gang_lookup(&device->reada_zones,
- (void **)&zone, index, 1);
- if (ret == 0)
- break;
- pr_debug(" zone %llu-%llu elems %llu locked %d devs",
- zone->start, zone->end, zone->elems,
- zone->locked);
- for (j = 0; j < zone->ndevs; ++j) {
- pr_cont(" %lld",
- zone->devs[j]->devid);
- }
- if (device->reada_curr_zone == zone)
- pr_cont(" curr off %llu",
- device->reada_next - zone->start);
- pr_cont("\n");
- index = (zone->end >> fs_info->sectorsize_bits) + 1;
- }
- cnt = 0;
- index = 0;
- while (all) {
- struct reada_extent *re = NULL;
-
- ret = radix_tree_gang_lookup(&device->reada_extents,
- (void **)&re, index, 1);
- if (ret == 0)
- break;
- pr_debug(" re: logical %llu size %u empty %d scheduled %d",
- re->logical, fs_info->nodesize,
- list_empty(&re->extctl), re->scheduled);
-
- for (i = 0; i < re->nzones; ++i) {
- pr_cont(" zone %llu-%llu devs",
- re->zones[i]->start,
- re->zones[i]->end);
- for (j = 0; j < re->zones[i]->ndevs; ++j) {
- pr_cont(" %lld",
- re->zones[i]->devs[j]->devid);
- }
- }
- pr_cont("\n");
- index = (re->logical >> fs_info->sectorsize_bits) + 1;
- if (++cnt > 15)
- break;
- }
- }
-
- index = 0;
- cnt = 0;
- while (all) {
- struct reada_extent *re = NULL;
-
- ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
- index, 1);
- if (ret == 0)
- break;
- if (!re->scheduled) {
- index = (re->logical >> fs_info->sectorsize_bits) + 1;
- continue;
- }
- pr_debug("re: logical %llu size %u list empty %d scheduled %d",
- re->logical, fs_info->nodesize,
- list_empty(&re->extctl), re->scheduled);
- for (i = 0; i < re->nzones; ++i) {
- pr_cont(" zone %llu-%llu devs",
- re->zones[i]->start,
- re->zones[i]->end);
- for (j = 0; j < re->zones[i]->ndevs; ++j) {
- pr_cont(" %lld",
- re->zones[i]->devs[j]->devid);
- }
- }
- pr_cont("\n");
- index = (re->logical >> fs_info->sectorsize_bits) + 1;
- }
- spin_unlock(&fs_info->reada_lock);
-}
-#endif
-
-/*
- * interface
- */
-struct reada_control *btrfs_reada_add(struct btrfs_root *root,
- struct btrfs_key *key_start, struct btrfs_key *key_end)
-{
- struct reada_control *rc;
- u64 start;
- u64 generation;
- int ret;
- int level;
- struct extent_buffer *node;
- static struct btrfs_key max_key = {
- .objectid = (u64)-1,
- .type = (u8)-1,
- .offset = (u64)-1
- };
-
- rc = kzalloc(sizeof(*rc), GFP_KERNEL);
- if (!rc)
- return ERR_PTR(-ENOMEM);
-
- rc->fs_info = root->fs_info;
- rc->key_start = *key_start;
- rc->key_end = *key_end;
- atomic_set(&rc->elems, 0);
- init_waitqueue_head(&rc->wait);
- kref_init(&rc->refcnt);
- kref_get(&rc->refcnt); /* one ref for having elements */
-
- node = btrfs_root_node(root);
- start = node->start;
- generation = btrfs_header_generation(node);
- level = btrfs_header_level(node);
- free_extent_buffer(node);
-
- ret = reada_add_block(rc, start, &max_key, root->root_key.objectid,
- generation, level);
- if (ret) {
- kfree(rc);
- return ERR_PTR(ret);
- }
-
- reada_start_machine(root->fs_info);
-
- return rc;
-}
-
-#ifdef DEBUG
-int btrfs_reada_wait(void *handle)
-{
- struct reada_control *rc = handle;
- struct btrfs_fs_info *fs_info = rc->fs_info;
-
- while (atomic_read(&rc->elems)) {
- if (!atomic_read(&fs_info->reada_works_cnt))
- reada_start_machine(fs_info);
- wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
- 5 * HZ);
- dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
- }
-
- dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
-
- kref_put(&rc->refcnt, reada_control_release);
-
- return 0;
-}
-#else
-int btrfs_reada_wait(void *handle)
-{
- struct reada_control *rc = handle;
- struct btrfs_fs_info *fs_info = rc->fs_info;
-
- while (atomic_read(&rc->elems)) {
- if (!atomic_read(&fs_info->reada_works_cnt))
- reada_start_machine(fs_info);
- wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
- (HZ + 9) / 10);
- }
-
- kref_put(&rc->refcnt, reada_control_release);
-
- return 0;
-}
-#endif
-
-void btrfs_reada_detach(void *handle)
-{
- struct reada_control *rc = handle;
-
- kref_put(&rc->refcnt, reada_control_release);
-}
-
-/*
- * Before removing a device (device replace or device remove ioctls), call this
- * function to wait for all existing readahead requests on the device and to
- * make sure no one queues more readahead requests for the device.
- *
- * Must be called without holding neither the device list mutex nor the device
- * replace semaphore, otherwise it will deadlock.
- */
-void btrfs_reada_remove_dev(struct btrfs_device *dev)
-{
- struct btrfs_fs_info *fs_info = dev->fs_info;
-
- /* Serialize with readahead extent creation at reada_find_extent(). */
- spin_lock(&fs_info->reada_lock);
- set_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state);
- spin_unlock(&fs_info->reada_lock);
-
- /*
- * There might be readahead requests added to the radix trees which
- * were not yet added to the readahead work queue. We need to start
- * them and wait for their completion, otherwise we can end up with
- * use-after-free problems when dropping the last reference on the
- * readahead extents and their zones, as they need to access the
- * device structure.
- */
- reada_start_machine(fs_info);
- btrfs_flush_workqueue(fs_info->readahead_workers);
-}
-
-/*
- * If when removing a device (device replace or device remove ioctls) an error
- * happens after calling btrfs_reada_remove_dev(), call this to undo what that
- * function did. This is safe to call even if btrfs_reada_remove_dev() was not
- * called before.
- */
-void btrfs_reada_undo_remove_dev(struct btrfs_device *dev)
-{
- spin_lock(&dev->fs_info->reada_lock);
- clear_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state);
- spin_unlock(&dev->fs_info->reada_lock);
-}
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index e2b9f8616501..e9224145d754 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -5,11 +5,14 @@
#include <linux/sched.h>
#include <linux/stacktrace.h>
+#include "messages.h"
#include "ctree.h"
#include "disk-io.h"
#include "locking.h"
#include "delayed-ref.h"
#include "ref-verify.h"
+#include "fs.h"
+#include "accessors.h"
/*
* Used to keep track the roots and number of refs each root has for a given
@@ -72,69 +75,70 @@ struct block_entry {
struct list_head actions;
};
+static int block_entry_bytenr_key_cmp(const void *key, const struct rb_node *node)
+{
+ const u64 *bytenr = key;
+ const struct block_entry *entry = rb_entry(node, struct block_entry, node);
+
+ if (entry->bytenr < *bytenr)
+ return 1;
+ else if (entry->bytenr > *bytenr)
+ return -1;
+
+ return 0;
+}
+
+static int block_entry_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ const struct block_entry *new_entry = rb_entry(new, struct block_entry, node);
+
+ return block_entry_bytenr_key_cmp(&new_entry->bytenr, existing);
+}
+
static struct block_entry *insert_block_entry(struct rb_root *root,
struct block_entry *be)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent_node = NULL;
- struct block_entry *entry;
-
- while (*p) {
- parent_node = *p;
- entry = rb_entry(parent_node, struct block_entry, node);
- if (entry->bytenr > be->bytenr)
- p = &(*p)->rb_left;
- else if (entry->bytenr < be->bytenr)
- p = &(*p)->rb_right;
- else
- return entry;
- }
-
- rb_link_node(&be->node, parent_node, p);
- rb_insert_color(&be->node, root);
- return NULL;
+ struct rb_node *node;
+
+ node = rb_find_add(&be->node, root, block_entry_bytenr_cmp);
+ return rb_entry_safe(node, struct block_entry, node);
}
static struct block_entry *lookup_block_entry(struct rb_root *root, u64 bytenr)
{
- struct rb_node *n;
- struct block_entry *entry = NULL;
+ struct rb_node *node;
- n = root->rb_node;
- while (n) {
- entry = rb_entry(n, struct block_entry, node);
- if (entry->bytenr < bytenr)
- n = n->rb_right;
- else if (entry->bytenr > bytenr)
- n = n->rb_left;
- else
- return entry;
- }
- return NULL;
+ node = rb_find(&bytenr, root, block_entry_bytenr_key_cmp);
+ return rb_entry_safe(node, struct block_entry, node);
}
-static struct root_entry *insert_root_entry(struct rb_root *root,
- struct root_entry *re)
+static int root_entry_root_objectid_key_cmp(const void *key, const struct rb_node *node)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent_node = NULL;
- struct root_entry *entry;
+ const u64 *objectid = key;
+ const struct root_entry *entry = rb_entry(node, struct root_entry, node);
- while (*p) {
- parent_node = *p;
- entry = rb_entry(parent_node, struct root_entry, node);
- if (entry->root_objectid > re->root_objectid)
- p = &(*p)->rb_left;
- else if (entry->root_objectid < re->root_objectid)
- p = &(*p)->rb_right;
- else
- return entry;
- }
+ if (entry->root_objectid < *objectid)
+ return 1;
+ else if (entry->root_objectid > *objectid)
+ return -1;
+
+ return 0;
+}
+
+static int root_entry_root_objectid_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ const struct root_entry *new_entry = rb_entry(new, struct root_entry, node);
- rb_link_node(&re->node, parent_node, p);
- rb_insert_color(&re->node, root);
- return NULL;
+ return root_entry_root_objectid_key_cmp(&new_entry->root_objectid, existing);
+}
+
+static struct root_entry *insert_root_entry(struct rb_root *root,
+ struct root_entry *re)
+{
+ struct rb_node *node;
+ node = rb_find_add(&re->node, root, root_entry_root_objectid_cmp);
+ return rb_entry_safe(node, struct root_entry, node);
}
static int comp_refs(struct ref_entry *ref1, struct ref_entry *ref2)
@@ -158,48 +162,29 @@ static int comp_refs(struct ref_entry *ref1, struct ref_entry *ref2)
return 0;
}
+static int ref_entry_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ struct ref_entry *new_entry = rb_entry(new, struct ref_entry, node);
+ struct ref_entry *existing_entry = rb_entry(existing, struct ref_entry, node);
+
+ return comp_refs(new_entry, existing_entry);
+}
+
static struct ref_entry *insert_ref_entry(struct rb_root *root,
struct ref_entry *ref)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent_node = NULL;
- struct ref_entry *entry;
- int cmp;
-
- while (*p) {
- parent_node = *p;
- entry = rb_entry(parent_node, struct ref_entry, node);
- cmp = comp_refs(entry, ref);
- if (cmp > 0)
- p = &(*p)->rb_left;
- else if (cmp < 0)
- p = &(*p)->rb_right;
- else
- return entry;
- }
-
- rb_link_node(&ref->node, parent_node, p);
- rb_insert_color(&ref->node, root);
- return NULL;
+ struct rb_node *node;
+ node = rb_find_add(&ref->node, root, ref_entry_cmp);
+ return rb_entry_safe(node, struct ref_entry, node);
}
static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid)
{
- struct rb_node *n;
- struct root_entry *entry = NULL;
+ struct rb_node *node;
- n = root->rb_node;
- while (n) {
- entry = rb_entry(n, struct root_entry, node);
- if (entry->root_objectid < objectid)
- n = n->rb_right;
- else if (entry->root_objectid > objectid)
- n = n->rb_left;
- else
- return entry;
- }
- return NULL;
+ node = rb_find(&objectid, root, root_entry_root_objectid_key_cmp);
+ return rb_entry_safe(node, struct root_entry, node);
}
#ifdef CONFIG_STACKTRACE
@@ -435,10 +420,11 @@ static int process_extent_item(struct btrfs_fs_info *fs_info,
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
struct extent_buffer *leaf = path->nodes[0];
- u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u32 item_size = btrfs_item_size(leaf, slot);
unsigned long end, ptr;
u64 offset, flags, count;
- int type, ret;
+ int type;
+ int ret = 0;
ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
flags = btrfs_extent_flags(leaf, ei);
@@ -482,6 +468,13 @@ static int process_extent_item(struct btrfs_fs_info *fs_info,
ret = add_shared_data_ref(fs_info, offset, count,
key->objectid, key->offset);
break;
+ case BTRFS_EXTENT_OWNER_REF_KEY:
+ if (!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)) {
+ btrfs_err(fs_info,
+ "found extent owner ref without simple quotas enabled");
+ ret = -EINVAL;
+ }
+ break;
default:
btrfs_err(fs_info, "invalid key type in iref");
ret = -EINVAL;
@@ -649,7 +642,7 @@ static void dump_block_entry(struct btrfs_fs_info *fs_info,
}
/*
- * btrfs_ref_tree_mod: called when we modify a ref for a bytenr
+ * Called when we modify a ref for a bytenr.
*
* This will add an action item to the given bytenr and do sanity checks to make
* sure we haven't messed something up. If we are making a new allocation and
@@ -657,7 +650,7 @@ static void dump_block_entry(struct btrfs_fs_info *fs_info,
* our sanity checks pass as they are no longer needed.
*/
int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
- struct btrfs_ref *generic_ref)
+ const struct btrfs_ref *generic_ref)
{
struct ref_entry *ref = NULL, *exist;
struct ref_action *ra = NULL;
@@ -667,7 +660,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
int ret = 0;
bool metadata;
u64 bytenr = generic_ref->bytenr;
- u64 num_bytes = generic_ref->len;
+ u64 num_bytes = generic_ref->num_bytes;
u64 parent = generic_ref->parent;
u64 ref_root = 0;
u64 owner = 0;
@@ -678,11 +671,11 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
if (generic_ref->type == BTRFS_REF_METADATA) {
if (!parent)
- ref_root = generic_ref->tree_ref.owning_root;
+ ref_root = generic_ref->ref_root;
owner = generic_ref->tree_ref.level;
} else if (!parent) {
- ref_root = generic_ref->data_ref.owning_root;
- owner = generic_ref->data_ref.ino;
+ ref_root = generic_ref->ref_root;
+ owner = generic_ref->data_ref.objectid;
offset = generic_ref->data_ref.offset;
}
metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
@@ -788,6 +781,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
dump_ref_action(fs_info, ra);
kfree(ref);
kfree(ra);
+ kfree(re);
goto out_unlock;
} else if (be->num_refs == 0) {
btrfs_err(fs_info,
@@ -797,6 +791,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
dump_ref_action(fs_info, ra);
kfree(ref);
kfree(ra);
+ kfree(re);
goto out_unlock;
}
@@ -844,6 +839,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
"dropping a ref for a root that doesn't have a ref on the block");
dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra);
+ rb_erase(&ref->node, &be->refs);
kfree(ref);
kfree(ra);
goto out_unlock;
@@ -881,8 +877,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
out_unlock:
spin_unlock(&fs_info->ref_verify_lock);
out:
- if (ret)
+ if (ret) {
+ btrfs_free_ref_cache(fs_info);
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ }
return ret;
}
@@ -972,7 +970,8 @@ void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start,
/* Walk down all roots and build the ref tree, meant to be called at mount */
int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
{
- struct btrfs_path *path;
+ struct btrfs_root *extent_root;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *eb;
int tree_block_level = 0;
u64 bytenr = 0, num_bytes = 0;
@@ -981,11 +980,19 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
if (!btrfs_test_opt(fs_info, REF_VERIFY))
return 0;
+ extent_root = btrfs_extent_root(fs_info, 0);
+ /* If the extent tree is damaged we cannot ignore it (IGNOREBADROOTS). */
+ if (!extent_root) {
+ btrfs_warn(fs_info, "ref-verify: extent tree not available, disabling");
+ btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ return 0;
+ }
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- eb = btrfs_read_lock_root_node(fs_info->extent_root);
+ eb = btrfs_read_lock_root_node(extent_root);
level = btrfs_header_level(eb);
path->nodes[level] = eb;
path->slots[level] = 0;
@@ -998,7 +1005,7 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
* would have had to added a ref key item which may appear on a
* different leaf from the original extent item.
*/
- ret = walk_down_tree(fs_info->extent_root, path, level,
+ ret = walk_down_tree(extent_root, path, level,
&bytenr, &num_bytes, &tree_block_level);
if (ret)
break;
@@ -1011,9 +1018,8 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
}
}
if (ret) {
- btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
btrfs_free_ref_cache(fs_info);
+ btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
}
- btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/ref-verify.h b/fs/btrfs/ref-verify.h
index 855de37719b5..1ce544d53cc5 100644
--- a/fs/btrfs/ref-verify.h
+++ b/fs/btrfs/ref-verify.h
@@ -6,11 +6,20 @@
#ifndef BTRFS_REF_VERIFY_H
#define BTRFS_REF_VERIFY_H
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+#include <linux/types.h>
+#include <linux/rbtree_types.h>
+
+struct btrfs_fs_info;
+struct btrfs_ref;
+
+#ifdef CONFIG_BTRFS_DEBUG
+
+#include <linux/spinlock.h>
+
int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info);
void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info);
int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
- struct btrfs_ref *generic_ref);
+ const struct btrfs_ref *generic_ref);
void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start,
u64 len);
@@ -30,7 +39,7 @@ static inline void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info)
}
static inline int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
- struct btrfs_ref *generic_ref)
+ const struct btrfs_ref *generic_ref)
{
return 0;
}
@@ -44,6 +53,6 @@ static inline void btrfs_init_ref_verify(struct btrfs_fs_info *fs_info)
{
}
-#endif /* CONFIG_BTRFS_FS_REF_VERIFY */
+#endif /* CONFIG_BTRFS_DEBUG */
#endif
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index e0f93b357548..b5fe95baf92e 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -1,13 +1,21 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/blkdev.h>
+#include <linux/fscrypt.h>
#include <linux/iversion.h>
-#include "compression.h"
#include "ctree.h"
+#include "fs.h"
+#include "messages.h"
+#include "compression.h"
#include "delalloc-space.h"
+#include "disk-io.h"
#include "reflink.h"
#include "transaction.h"
#include "subpage.h"
+#include "accessors.h"
+#include "file-item.h"
+#include "file.h"
+#include "super.h"
#define BTRFS_MAX_DEDUPE_LEN SZ_16M
@@ -16,14 +24,14 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
u64 endoff,
const u64 destoff,
const u64 olen,
- int no_time_update)
+ bool no_time_update)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
inode_inc_iversion(inode);
- if (!no_time_update)
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ if (!no_time_update) {
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ }
/*
* We round up to the block size at eof when determining which
* extents to clone above, but shouldn't round up the file size.
@@ -35,15 +43,13 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
}
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
- if (ret) {
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
- goto out;
+ return ret;
}
- ret = btrfs_end_transaction(trans);
-out:
- return ret;
+ return btrfs_end_transaction(trans);
}
static int copy_inline_to_page(struct btrfs_inode *inode,
@@ -59,7 +65,7 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0);
char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0);
struct extent_changeset *data_reserved = NULL;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct address_space *mapping = inode->vfs_inode.i_mapping;
int ret;
@@ -76,20 +82,20 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
if (ret)
goto out;
- page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT,
- btrfs_alloc_write_mask(mapping));
- if (!page) {
- ret = -ENOMEM;
+ folio = __filemap_get_folio(mapping, file_offset >> PAGE_SHIFT,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ btrfs_alloc_write_mask(mapping));
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
goto out_unlock;
}
- ret = set_page_extent_mapped(page);
+ ret = set_folio_extent_mapped(folio);
if (ret < 0)
goto out_unlock;
- clear_extent_bit(&inode->io_tree, file_offset, range_end,
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, NULL);
+ btrfs_clear_extent_bit(&inode->io_tree, file_offset, range_end,
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, NULL);
ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL);
if (ret)
goto out_unlock;
@@ -108,16 +114,15 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
if (comp_type == BTRFS_COMPRESS_NONE) {
- memcpy_to_page(page, offset_in_page(file_offset), data_start,
- datal);
- flush_dcache_page(page);
+ memcpy_to_folio(folio, offset_in_folio(folio, file_offset), data_start,
+ datal);
} else {
- ret = btrfs_decompress(comp_type, data_start, page,
- offset_in_page(file_offset),
+ ret = btrfs_decompress(comp_type, data_start, folio,
+ offset_in_folio(folio, file_offset),
inline_size, datal);
if (ret)
goto out_unlock;
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
}
/*
@@ -132,18 +137,16 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
*
* So what's in the range [500, 4095] corresponds to zeroes.
*/
- if (datal < block_size) {
- memzero_page(page, datal, block_size - datal);
- flush_dcache_page(page);
- }
+ if (datal < block_size)
+ folio_zero_range(folio, datal, block_size - datal);
- btrfs_page_set_uptodate(fs_info, page, file_offset, block_size);
- btrfs_page_clear_checked(fs_info, page, file_offset, block_size);
- btrfs_page_set_dirty(fs_info, page, file_offset, block_size);
+ btrfs_folio_set_uptodate(fs_info, folio, file_offset, block_size);
+ btrfs_folio_clear_checked(fs_info, folio, file_offset, block_size);
+ btrfs_folio_set_dirty(fs_info, folio, file_offset, block_size);
out_unlock:
- if (page) {
- unlock_page(page);
- put_page(page);
+ if (!IS_ERR(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
}
if (ret)
btrfs_delalloc_release_space(inode, data_reserved, file_offset,
@@ -160,7 +163,7 @@ out:
* the source inode to destination inode when possible. When not possible we
* copy the inline extent's data into the respective page of the inode.
*/
-static int clone_copy_inline_extent(struct inode *dst,
+static int clone_copy_inline_extent(struct btrfs_inode *inode,
struct btrfs_path *path,
struct btrfs_key *new_key,
const u64 drop_start,
@@ -170,8 +173,8 @@ static int clone_copy_inline_extent(struct inode *dst,
char *inline_data,
struct btrfs_trans_handle **trans_out)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
- struct btrfs_root *root = BTRFS_I(dst)->root;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
const u64 aligned_end = ALIGN(new_key->offset + datal,
fs_info->sectorsize);
struct btrfs_trans_handle *trans = NULL;
@@ -180,12 +183,12 @@ static int clone_copy_inline_extent(struct inode *dst,
struct btrfs_key key;
if (new_key->offset > 0) {
- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+ ret = copy_inline_to_page(inode, new_key->offset,
inline_data, size, datal, comp_type);
goto out;
}
- key.objectid = btrfs_ino(BTRFS_I(dst));
+ key.objectid = btrfs_ino(inode);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -200,7 +203,7 @@ static int clone_copy_inline_extent(struct inode *dst,
goto copy_inline_extent;
}
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
+ if (key.objectid == btrfs_ino(inode) &&
key.type == BTRFS_EXTENT_DATA_KEY) {
/*
* There's an implicit hole at file offset 0, copy the
@@ -209,7 +212,7 @@ static int clone_copy_inline_extent(struct inode *dst,
ASSERT(key.offset > 0);
goto copy_to_page;
}
- } else if (i_size_read(dst) <= datal) {
+ } else if (i_size_read(&inode->vfs_inode) <= datal) {
struct btrfs_file_extent_item *ei;
ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -231,7 +234,7 @@ copy_inline_extent:
* We have no extent items, or we have an extent at offset 0 which may
* or may not be inlined. All these cases are dealt the same way.
*/
- if (i_size_read(dst) > datal) {
+ if (i_size_read(&inode->vfs_inode) > datal) {
/*
* At the destination offset 0 we have either a hole, a regular
* extent or an inline extent larger then the one we want to
@@ -265,20 +268,26 @@ copy_inline_extent:
drop_args.start = drop_start;
drop_args.end = aligned_end;
drop_args.drop_cache = true;
- ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args);
- if (ret)
+ ret = btrfs_drop_extents(trans, root, inode, &drop_args);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
- if (ret)
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
write_extent_buffer(path->nodes[0], inline_data,
btrfs_item_ptr_offset(path->nodes[0],
path->slots[0]),
size);
- btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found);
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags);
- ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
+ btrfs_update_inode_bytes(inode, datal, drop_args.bytes_found);
+ btrfs_set_inode_full_sync(inode);
+ ret = btrfs_inode_set_file_extent_range(inode, 0, aligned_end);
+ if (unlikely(ret))
+ btrfs_abort_transaction(trans, ret);
out:
if (!ret && !trans) {
/*
@@ -293,10 +302,8 @@ out:
trans = NULL;
}
}
- if (ret && trans) {
- btrfs_abort_transaction(trans, ret);
+ if (ret && trans)
btrfs_end_transaction(trans);
- }
if (!ret)
*trans_out = trans;
@@ -313,37 +320,38 @@ copy_to_page:
*/
btrfs_release_path(path);
- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+ ret = copy_inline_to_page(inode, new_key->offset,
inline_data, size, datal, comp_type);
goto out;
}
-/**
- * btrfs_clone() - clone a range from inode file to another
+/*
+ * Clone a range from inode file to another.
*
- * @src: Inode to clone from
- * @inode: Inode to clone to
- * @off: Offset within source to start clone from
- * @olen: Original length, passed by user, of range to clone
- * @olen_aligned: Block-aligned value of olen
- * @destoff: Offset within @inode to start clone
- * @no_time_update: Whether to update mtime/ctime on the target inode
+ * @src: Inode to clone from
+ * @inode: Inode to clone to
+ * @off: Offset within source to start clone from
+ * @olen: Original length, passed by user, of range to clone
+ * @olen_aligned: Block-aligned value of olen
+ * @destoff: Offset within @inode to start clone
+ * @no_time_update: Whether to update mtime/ctime on the target inode
*/
static int btrfs_clone(struct inode *src, struct inode *inode,
const u64 off, const u64 olen, const u64 olen_aligned,
- const u64 destoff, int no_time_update)
+ const u64 destoff, bool no_time_update)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_path *path = NULL;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_trans_handle *trans;
- char *buf = NULL;
+ char AUTO_KVFREE(buf);
struct btrfs_key key;
u32 nritems;
int slot;
int ret;
const u64 len = olen_aligned;
u64 last_dest_end = destoff;
+ u64 prev_extent_end = off;
ret = -ENOMEM;
buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
@@ -351,10 +359,8 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
return ret;
path = btrfs_alloc_path();
- if (!path) {
- kvfree(buf);
+ if (!path)
return ret;
- }
path->reada = READA_FORWARD;
/* Clone data */
@@ -363,7 +369,6 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
key.offset = off;
while (1) {
- u64 next_key_min_offset = key.offset + 1;
struct btrfs_file_extent_item *extent;
u64 extent_gen;
int type;
@@ -431,15 +436,22 @@ process_slot:
* The first search might have left us at an extent item that
* ends before our target range's start, can happen if we have
* holes and NO_HOLES feature enabled.
+ *
+ * Subsequent searches may leave us on a file range we have
+ * processed before - this happens due to a race with ordered
+ * extent completion for a file range that is outside our source
+ * range, but that range was part of a file extent item that
+ * also covered a leading part of our source range.
*/
- if (key.offset + datal <= off) {
+ if (key.offset + datal <= prev_extent_end) {
path->slots[0]++;
goto process_slot;
} else if (key.offset >= off + len) {
break;
}
- next_key_min_offset = key.offset + datal;
- size = btrfs_item_size_nr(leaf, slot);
+
+ prev_extent_end = key.offset + datal;
+ size = btrfs_item_size(leaf, slot);
read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot),
size);
@@ -489,12 +501,14 @@ process_slot:
clone_info.file_offset = new_key.offset;
clone_info.extent_buf = buf;
clone_info.is_new_extent = false;
+ clone_info.update_times = !no_time_update;
ret = btrfs_replace_file_extents(BTRFS_I(inode), path,
drop_start, new_key.offset + datal - 1,
&clone_info, &trans);
if (ret)
goto out;
- } else if (type == BTRFS_FILE_EXTENT_INLINE) {
+ } else {
+ ASSERT(type == BTRFS_FILE_EXTENT_INLINE);
/*
* Inline extents always have to start at file offset 0
* and can never be bigger then the sector size. We can
@@ -505,10 +519,14 @@ process_slot:
*/
ASSERT(key.offset == 0);
ASSERT(datal <= fs_info->sectorsize);
- if (key.offset != 0 || datal > fs_info->sectorsize)
- return -EUCLEAN;
+ if (WARN_ON(type != BTRFS_FILE_EXTENT_INLINE) ||
+ WARN_ON(key.offset != 0) ||
+ WARN_ON(datal > fs_info->sectorsize)) {
+ ret = -EUCLEAN;
+ goto out;
+ }
- ret = clone_copy_inline_extent(inode, path, &new_key,
+ ret = clone_copy_inline_extent(BTRFS_I(inode), path, &new_key,
drop_start, datal, size,
comp, buf, &trans);
if (ret)
@@ -518,17 +536,22 @@ process_slot:
btrfs_release_path(path);
/*
- * If this is a new extent update the last_reflink_trans of both
- * inodes. This is used by fsync to make sure it does not log
- * multiple checksum items with overlapping ranges. For older
- * extents we don't need to do it since inode logging skips the
- * checksums for older extents. Also ignore holes and inline
- * extents because they don't have checksums in the csum tree.
+ * Whenever we share an extent we update the last_reflink_trans
+ * of each inode to the current transaction. This is needed to
+ * make sure fsync does not log multiple checksum items with
+ * overlapping ranges (because some extent items might refer
+ * only to sections of the original extent). For the destination
+ * inode we do this regardless of the generation of the extents
+ * or even if they are inline extents or explicit holes, to make
+ * sure a full fsync does not skip them. For the source inode,
+ * we only need to update last_reflink_trans in case it's a new
+ * extent that is not a hole or an inline extent, to deal with
+ * the checksums problem on fsync.
*/
- if (extent_gen == trans->transid && disko > 0) {
+ if (extent_gen == trans->transid && disko > 0)
BTRFS_I(src)->last_reflink_trans = trans->transid;
- BTRFS_I(inode)->last_reflink_trans = trans->transid;
- }
+
+ BTRFS_I(inode)->last_reflink_trans = trans->transid;
last_dest_end = ALIGN(new_key.offset + datal,
fs_info->sectorsize);
@@ -540,7 +563,7 @@ process_slot:
break;
btrfs_release_path(path);
- key.offset = next_key_min_offset;
+ key.offset = prev_extent_end;
if (fatal_signal_pending(current)) {
ret = -EINTR;
@@ -575,8 +598,7 @@ process_slot:
* replaced file extent items.
*/
if (last_dest_end >= i_size_read(inode))
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
+ btrfs_set_inode_full_sync(BTRFS_I(inode));
ret = btrfs_replace_file_extents(BTRFS_I(inode), path,
last_dest_end, destoff + len - 1, NULL, &trans);
@@ -588,60 +610,46 @@ process_slot:
}
out:
- btrfs_free_path(path);
- kvfree(buf);
clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
return ret;
}
-static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
- struct inode *inode2, u64 loff2, u64 len)
-{
- unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
- unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
-}
-
-static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
- struct inode *inode2, u64 loff2, u64 len)
-{
- if (inode1 < inode2) {
- swap(inode1, inode2);
- swap(loff1, loff2);
- } else if (inode1 == inode2 && loff2 < loff1) {
- swap(loff1, loff2);
- }
- lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
- lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
-}
-
-static void btrfs_double_mmap_lock(struct inode *inode1, struct inode *inode2)
+static void btrfs_double_mmap_lock(struct btrfs_inode *inode1, struct btrfs_inode *inode2)
{
if (inode1 < inode2)
swap(inode1, inode2);
- down_write(&BTRFS_I(inode1)->i_mmap_lock);
- down_write_nested(&BTRFS_I(inode2)->i_mmap_lock, SINGLE_DEPTH_NESTING);
+ down_write(&inode1->i_mmap_lock);
+ down_write_nested(&inode2->i_mmap_lock, SINGLE_DEPTH_NESTING);
}
-static void btrfs_double_mmap_unlock(struct inode *inode1, struct inode *inode2)
+static void btrfs_double_mmap_unlock(struct btrfs_inode *inode1, struct btrfs_inode *inode2)
{
- up_write(&BTRFS_I(inode1)->i_mmap_lock);
- up_write(&BTRFS_I(inode2)->i_mmap_lock);
+ up_write(&inode1->i_mmap_lock);
+ up_write(&inode2->i_mmap_lock);
}
-static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
- struct inode *dst, u64 dst_loff)
+static int btrfs_extent_same_range(struct btrfs_inode *src, u64 loff, u64 len,
+ struct btrfs_inode *dst, u64 dst_loff)
{
- const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
+ const u64 end = dst_loff + len - 1;
+ struct extent_state *cached_state = NULL;
+ struct btrfs_fs_info *fs_info = src->root->fs_info;
+ const u64 bs = fs_info->sectorsize;
int ret;
/*
- * Lock destination range to serialize with concurrent readpages() and
- * source range to serialize with relocation.
+ * Lock destination range to serialize with concurrent readahead(), and
+ * we are safe from concurrency with relocation of source extents
+ * because we have already locked the inode's i_mmap_lock in exclusive
+ * mode.
*/
- btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
- ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
- btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
+ btrfs_lock_extent(&dst->io_tree, dst_loff, end, &cached_state);
+ ret = btrfs_clone(&src->vfs_inode, &dst->vfs_inode, loff, len,
+ ALIGN(len, bs), dst_loff, 1);
+ btrfs_unlock_extent(&dst->io_tree, dst_loff, end, &cached_state);
+
+ btrfs_btree_balance_dirty(fs_info);
return ret;
}
@@ -657,7 +665,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
if (root_dst->send_in_progress) {
btrfs_warn_rl(root_dst->fs_info,
"cannot deduplicate to root %llu while send operations are using it (%d in progress)",
- root_dst->root_key.objectid,
+ btrfs_root_id(root_dst),
root_dst->send_in_progress);
spin_unlock(&root_dst->root_item_lock);
return -EAGAIN;
@@ -669,8 +677,8 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
for (i = 0; i < chunk_count; i++) {
- ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
- dst, dst_loff);
+ ret = btrfs_extent_same_range(BTRFS_I(src), loff, BTRFS_MAX_DEDUPE_LEN,
+ BTRFS_I(dst), dst_loff);
if (ret)
goto out;
@@ -679,7 +687,8 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
}
if (tail_len > 0)
- ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff);
+ ret = btrfs_extent_same_range(BTRFS_I(src), loff, tail_len,
+ BTRFS_I(dst), dst_loff);
out:
spin_lock(&root_dst->root_item_lock);
root_dst->dedupe_in_progress--;
@@ -691,13 +700,15 @@ out:
static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
u64 off, u64 olen, u64 destoff)
{
+ struct extent_state *cached_state = NULL;
struct inode *inode = file_inode(file);
struct inode *src = file_inode(file_src);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
int ret;
int wb_ret;
u64 len = olen;
- u64 bs = fs_info->sb->s_blocksize;
+ u64 bs = fs_info->sectorsize;
+ u64 end;
/*
* VFS's generic_remap_file_range_prep() protects us from cloning the
@@ -723,26 +734,29 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
* we found the previous extent covering eof and before we
* attempted to increment its reference count).
*/
- ret = btrfs_wait_ordered_range(inode, wb_start,
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode), wb_start,
destoff - wb_start);
if (ret)
return ret;
}
/*
- * Lock destination range to serialize with concurrent readpages() and
- * source range to serialize with relocation.
+ * Lock destination range to serialize with concurrent readahead(), and
+ * we are safe from concurrency with relocation of source extents
+ * because we have already locked the inode's i_mmap_lock in exclusive
+ * mode.
*/
- btrfs_double_extent_lock(src, off, inode, destoff, len);
+ end = destoff + len - 1;
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
- btrfs_double_extent_unlock(src, off, inode, destoff, len);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
/*
* We may have copied an inline extent into a page of the destination
* range, so wait for writeback to complete before truncating pages
* from the page cache. This is a rare case.
*/
- wb_ret = btrfs_wait_ordered_range(inode, destoff, len);
+ wb_ret = btrfs_wait_ordered_range(BTRFS_I(inode), destoff, len);
ret = ret ? ret : wb_ret;
/*
* Truncate page cache pages so that future reads will see the cloned
@@ -752,6 +766,8 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
round_down(destoff, PAGE_SIZE),
round_up(destoff + len, PAGE_SIZE) - 1);
+ btrfs_btree_balance_dirty(fs_info);
+
return ret;
}
@@ -759,27 +775,28 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *len, unsigned int remap_flags)
{
- struct inode *inode_in = file_inode(file_in);
- struct inode *inode_out = file_inode(file_out);
- u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize;
- bool same_inode = inode_out == inode_in;
+ struct btrfs_inode *inode_in = BTRFS_I(file_inode(file_in));
+ struct btrfs_inode *inode_out = BTRFS_I(file_inode(file_out));
+ u64 bs = inode_out->root->fs_info->sectorsize;
u64 wb_len;
int ret;
if (!(remap_flags & REMAP_FILE_DEDUP)) {
- struct btrfs_root *root_out = BTRFS_I(inode_out)->root;
+ struct btrfs_root *root_out = inode_out->root;
if (btrfs_root_readonly(root_out))
return -EROFS;
- if (file_in->f_path.mnt != file_out->f_path.mnt ||
- inode_in->i_sb != inode_out->i_sb)
- return -EXDEV;
+ ASSERT(inode_in->vfs_inode.i_sb == inode_out->vfs_inode.i_sb);
}
+ /* Can only reflink encrypted files if both files are encrypted. */
+ if (IS_ENCRYPTED(&inode_in->vfs_inode) != IS_ENCRYPTED(&inode_out->vfs_inode))
+ return -EINVAL;
+
/* Don't make the dst file partly checksummed */
- if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) !=
- (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) {
+ if ((inode_in->flags & BTRFS_INODE_NODATASUM) !=
+ (inode_out->flags & BTRFS_INODE_NODATASUM)) {
return -EINVAL;
}
@@ -798,20 +815,11 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
* to complete so that new file extent items are in the fs tree.
*/
if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP))
- wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs);
+ wb_len = ALIGN(inode_in->vfs_inode.i_size, bs) - ALIGN_DOWN(pos_in, bs);
else
wb_len = ALIGN(*len, bs);
/*
- * Since we don't lock ranges, wait for ongoing lockless dio writes (as
- * any in progress could create its ordered extents after we wait for
- * existing ordered extents below).
- */
- inode_dio_wait(inode_in);
- if (!same_inode)
- inode_dio_wait(inode_out);
-
- /*
* Workaround to make sure NOCOW buffered write reach disk as NOCOW.
*
* Btrfs' back references do not have a block level granularity, they
@@ -828,16 +836,14 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
* Also we don't need to check ASYNC_EXTENT, as async extent will be
* CoWed anyway, not affecting nocow part.
*/
- ret = filemap_flush(inode_in->i_mapping);
+ ret = filemap_flush(inode_in->vfs_inode.i_mapping);
if (ret < 0)
return ret;
- ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs),
- wb_len);
+ ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), wb_len);
if (ret < 0)
return ret;
- ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs),
- wb_len);
+ ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), wb_len);
if (ret < 0)
return ret;
@@ -859,18 +865,21 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
struct file *dst_file, loff_t destoff, loff_t len,
unsigned int remap_flags)
{
- struct inode *src_inode = file_inode(src_file);
- struct inode *dst_inode = file_inode(dst_file);
+ struct btrfs_inode *src_inode = BTRFS_I(file_inode(src_file));
+ struct btrfs_inode *dst_inode = BTRFS_I(file_inode(dst_file));
bool same_inode = dst_inode == src_inode;
int ret;
+ if (unlikely(btrfs_is_shutdown(inode_to_fs_info(file_inode(src_file)))))
+ return -EIO;
+
if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
return -EINVAL;
if (same_inode) {
btrfs_inode_lock(src_inode, BTRFS_ILOCK_MMAP);
} else {
- lock_two_nondirectories(src_inode, dst_inode);
+ lock_two_nondirectories(&src_inode->vfs_inode, &dst_inode->vfs_inode);
btrfs_double_mmap_lock(src_inode, dst_inode);
}
@@ -880,7 +889,8 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
goto out_unlock;
if (remap_flags & REMAP_FILE_DEDUP)
- ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff);
+ ret = btrfs_extent_same(&src_inode->vfs_inode, off, len,
+ &dst_inode->vfs_inode, destoff);
else
ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
@@ -889,7 +899,8 @@ out_unlock:
btrfs_inode_unlock(src_inode, BTRFS_ILOCK_MMAP);
} else {
btrfs_double_mmap_unlock(src_inode, dst_inode);
- unlock_two_nondirectories(src_inode, dst_inode);
+ unlock_two_nondirectories(&src_inode->vfs_inode,
+ &dst_inode->vfs_inode);
}
/*
diff --git a/fs/btrfs/reflink.h b/fs/btrfs/reflink.h
index ecb309b4dad0..1e291f7d85c4 100644
--- a/fs/btrfs/reflink.h
+++ b/fs/btrfs/reflink.h
@@ -3,7 +3,9 @@
#ifndef BTRFS_REFLINK_H
#define BTRFS_REFLINK_H
-#include <linux/fs.h>
+#include <linux/types.h>
+
+struct file;
loff_t btrfs_remap_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 33a0ee7ac590..5bfefc3e9c06 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -26,6 +26,17 @@
#include "misc.h"
#include "subpage.h"
#include "zoned.h"
+#include "inode-item.h"
+#include "space-info.h"
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "root-tree.h"
+#include "file-item.h"
+#include "relocation.h"
+#include "super.h"
+#include "tree-checker.h"
+#include "raid-stripe-tree.h"
/*
* Relocation overview
@@ -79,10 +90,15 @@
* map address of tree root to tree
*/
struct mapping_node {
- struct {
- struct rb_node rb_node;
- u64 bytenr;
- }; /* Use rb_simle_node for search/insert */
+ union {
+ /* Use rb_simple_node for search/insert */
+ struct {
+ struct rb_node rb_node;
+ u64 bytenr;
+ };
+
+ struct rb_simple_node simple_node;
+ };
void *data;
};
@@ -95,14 +111,19 @@ struct mapping_tree {
* present a tree block to process
*/
struct tree_block {
- struct {
- struct rb_node rb_node;
- u64 bytenr;
- }; /* Use rb_simple_node for search/insert */
+ union {
+ /* Use rb_simple_node for search/insert */
+ struct {
+ struct rb_node rb_node;
+ u64 bytenr;
+ };
+
+ struct rb_simple_node simple_node;
+ };
u64 owner;
struct btrfs_key key;
- unsigned int level:8;
- unsigned int key_ready:1;
+ u8 level;
+ bool key_ready;
};
#define MAX_EXTENTS 128
@@ -112,6 +133,13 @@ struct file_extent_cluster {
u64 end;
u64 boundary[MAX_EXTENTS];
unsigned int nr;
+ u64 owning_root;
+};
+
+/* Stages of data relocation. */
+enum reloc_stage {
+ MOVE_DATA_EXTENTS,
+ UPDATE_DATA_PTRS
};
struct reloc_control {
@@ -145,16 +173,12 @@ struct reloc_control {
u64 search_start;
u64 extents_found;
- unsigned int stage:8;
- unsigned int create_reloc_tree:1;
- unsigned int merge_reloc_tree:1;
- unsigned int found_file_extent:1;
+ enum reloc_stage stage;
+ bool create_reloc_tree;
+ bool merge_reloc_tree;
+ bool found_file_extent;
};
-/* stages of data relocation */
-#define MOVE_DATA_EXTENTS 0
-#define UPDATE_DATA_PTRS 1
-
static void mark_block_processed(struct reloc_control *rc,
struct btrfs_backref_node *node)
{
@@ -164,19 +188,13 @@ static void mark_block_processed(struct reloc_control *rc,
in_range(node->bytenr, rc->block_group->start,
rc->block_group->length)) {
blocksize = rc->extent_root->fs_info->nodesize;
- set_extent_bits(&rc->processed_blocks, node->bytenr,
- node->bytenr + blocksize - 1, EXTENT_DIRTY);
+ btrfs_set_extent_bit(&rc->processed_blocks, node->bytenr,
+ node->bytenr + blocksize - 1, EXTENT_DIRTY,
+ NULL);
}
node->processed = 1;
}
-
-static void mapping_tree_init(struct mapping_tree *tree)
-{
- tree->rb_root = RB_ROOT;
- spin_lock_init(&tree->lock);
-}
-
/*
* walk up backref nodes until reach node presents tree root
*/
@@ -188,8 +206,8 @@ static struct btrfs_backref_node *walk_up_backref(
int idx = *index;
while (!list_empty(&node->upper)) {
- edge = list_entry(node->upper.next,
- struct btrfs_backref_edge, list[LOWER]);
+ edge = list_first_entry(&node->upper, struct btrfs_backref_edge,
+ list[LOWER]);
edges[idx++] = edge;
node = edge->node[UPPER];
}
@@ -215,8 +233,8 @@ static struct btrfs_backref_node *walk_down_backref(
idx--;
continue;
}
- edge = list_entry(edge->list[LOWER].next,
- struct btrfs_backref_edge, list[LOWER]);
+ edge = list_first_entry(&edge->list[LOWER], struct btrfs_backref_edge,
+ list[LOWER]);
edges[idx - 1] = edge;
*index = idx;
return edge->node[UPPER];
@@ -225,71 +243,7 @@ static struct btrfs_backref_node *walk_down_backref(
return NULL;
}
-static void update_backref_node(struct btrfs_backref_cache *cache,
- struct btrfs_backref_node *node, u64 bytenr)
-{
- struct rb_node *rb_node;
- rb_erase(&node->rb_node, &cache->rb_root);
- node->bytenr = bytenr;
- rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
- if (rb_node)
- btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
-}
-
-/*
- * update backref cache after a transaction commit
- */
-static int update_backref_cache(struct btrfs_trans_handle *trans,
- struct btrfs_backref_cache *cache)
-{
- struct btrfs_backref_node *node;
- int level = 0;
-
- if (cache->last_trans == 0) {
- cache->last_trans = trans->transid;
- return 0;
- }
-
- if (cache->last_trans == trans->transid)
- return 0;
-
- /*
- * detached nodes are used to avoid unnecessary backref
- * lookup. transaction commit changes the extent tree.
- * so the detached nodes are no longer useful.
- */
- while (!list_empty(&cache->detached)) {
- node = list_entry(cache->detached.next,
- struct btrfs_backref_node, list);
- btrfs_backref_cleanup_node(cache, node);
- }
-
- while (!list_empty(&cache->changed)) {
- node = list_entry(cache->changed.next,
- struct btrfs_backref_node, list);
- list_del_init(&node->list);
- BUG_ON(node->pending);
- update_backref_node(cache, node, node->new_bytenr);
- }
-
- /*
- * some nodes can be left in the pending list if there were
- * errors during processing the pending nodes.
- */
- for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
- list_for_each_entry(node, &cache->pending[level], list) {
- BUG_ON(!node->pending);
- if (node->bytenr == node->new_bytenr)
- continue;
- update_backref_node(cache, node, node->new_bytenr);
- }
- }
-
- cache->last_trans = 0;
- return 1;
-}
-
-static bool reloc_root_is_dead(struct btrfs_root *root)
+static bool reloc_root_is_dead(const struct btrfs_root *root)
{
/*
* Pair with set_bit/clear_bit in clean_dirty_subvols and
@@ -310,7 +264,7 @@ static bool reloc_root_is_dead(struct btrfs_root *root)
* from no reloc root. But btrfs_should_ignore_reloc_root() below is a
* special case.
*/
-static bool have_reloc_root(struct btrfs_root *root)
+static bool have_reloc_root(const struct btrfs_root *root)
{
if (reloc_root_is_dead(root))
return false;
@@ -319,31 +273,30 @@ static bool have_reloc_root(struct btrfs_root *root)
return true;
}
-int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
+bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root)
{
struct btrfs_root *reloc_root;
if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
- return 0;
+ return false;
/* This root has been merged with its reloc tree, we can ignore it */
if (reloc_root_is_dead(root))
- return 1;
+ return true;
reloc_root = root->reloc_root;
if (!reloc_root)
- return 0;
+ return false;
if (btrfs_header_generation(reloc_root->commit_root) ==
root->fs_info->running_transaction->transid)
- return 0;
+ return false;
/*
- * if there is reloc tree and it was created in previous
- * transaction backref lookup can find the reloc tree,
- * so backref node for the fs tree root is useless for
- * relocation.
+ * If there is reloc tree and it was created in previous transaction
+ * backref lookup can find the reloc tree, so backref node for the fs
+ * tree root is useless for relocation.
*/
- return 1;
+ return true;
}
/*
@@ -361,7 +314,7 @@ struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
- root = (struct btrfs_root *)node->data;
+ root = node->data;
}
spin_unlock(&rc->reloc_root_tree.lock);
return btrfs_grab_root(root);
@@ -400,19 +353,13 @@ static bool handle_useless_nodes(struct reloc_control *rc,
if (cur == node)
ret = true;
- /* The node is the lowest node */
- if (cur->lowest) {
- list_del_init(&cur->lower);
- cur->lowest = 0;
- }
-
/* Cleanup the lower edges */
while (!list_empty(&cur->lower)) {
struct btrfs_backref_edge *edge;
struct btrfs_backref_node *lower;
- edge = list_entry(cur->lower.next,
- struct btrfs_backref_edge, list[UPPER]);
+ edge = list_first_entry(&cur->lower, struct btrfs_backref_edge,
+ list[UPPER]);
list_del(&edge->list[UPPER]);
list_del(&edge->list[LOWER]);
lower = edge->node[LOWER];
@@ -431,7 +378,6 @@ static bool handle_useless_nodes(struct reloc_control *rc,
* cache to avoid unnecessary backref lookup.
*/
if (cur->level > 0) {
- list_add(&cur->list, &cache->detached);
cur->detached = 1;
} else {
rb_erase(&cur->rb_node, &cache->rb_root);
@@ -456,6 +402,7 @@ static bool handle_useless_nodes(struct reloc_control *rc,
* cached.
*/
static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
+ struct btrfs_trans_handle *trans,
struct reloc_control *rc, struct btrfs_key *node_key,
int level, u64 bytenr)
{
@@ -467,34 +414,31 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
struct btrfs_backref_node *node = NULL;
struct btrfs_backref_edge *edge;
int ret;
- int err = 0;
- iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info, GFP_NOFS);
+ iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info);
if (!iter)
return ERR_PTR(-ENOMEM);
path = btrfs_alloc_path();
if (!path) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
node = btrfs_backref_alloc_node(cache, bytenr, level);
if (!node) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
- node->lowest = 1;
cur = node;
/* Breadth-first search to build backref cache */
do {
- ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
- cur);
- if (ret < 0) {
- err = ret;
+ ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
+ node_key, cur);
+ if (ret < 0)
goto out;
- }
+
edge = list_first_entry_or_null(&cache->pending_edge,
struct btrfs_backref_edge, list[UPPER]);
/*
@@ -509,19 +453,18 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
/* Finish the upper linkage of newly added edges/nodes */
ret = btrfs_backref_finish_upper_links(cache, node);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out;
- }
if (handle_useless_nodes(rc, node))
node = NULL;
out:
- btrfs_backref_iter_free(iter);
+ btrfs_free_path(iter->path);
+ kfree(iter);
btrfs_free_path(path);
- if (err) {
+ if (ret) {
btrfs_backref_error_cleanup(cache, node);
- return ERR_PTR(err);
+ return ERR_PTR(ret);
}
ASSERT(!node || !node->detached);
ASSERT(list_empty(&cache->useless_node) &&
@@ -530,98 +473,9 @@ out:
}
/*
- * helper to add backref node for the newly created snapshot.
- * the backref node is created by cloning backref node that
- * corresponds to root of source tree
- */
-static int clone_backref_node(struct btrfs_trans_handle *trans,
- struct reloc_control *rc,
- struct btrfs_root *src,
- struct btrfs_root *dest)
-{
- struct btrfs_root *reloc_root = src->reloc_root;
- struct btrfs_backref_cache *cache = &rc->backref_cache;
- struct btrfs_backref_node *node = NULL;
- struct btrfs_backref_node *new_node;
- struct btrfs_backref_edge *edge;
- struct btrfs_backref_edge *new_edge;
- struct rb_node *rb_node;
-
- if (cache->last_trans > 0)
- update_backref_cache(trans, cache);
-
- rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
- if (rb_node) {
- node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
- if (node->detached)
- node = NULL;
- else
- BUG_ON(node->new_bytenr != reloc_root->node->start);
- }
-
- if (!node) {
- rb_node = rb_simple_search(&cache->rb_root,
- reloc_root->commit_root->start);
- if (rb_node) {
- node = rb_entry(rb_node, struct btrfs_backref_node,
- rb_node);
- BUG_ON(node->detached);
- }
- }
-
- if (!node)
- return 0;
-
- new_node = btrfs_backref_alloc_node(cache, dest->node->start,
- node->level);
- if (!new_node)
- return -ENOMEM;
-
- new_node->lowest = node->lowest;
- new_node->checked = 1;
- new_node->root = btrfs_grab_root(dest);
- ASSERT(new_node->root);
-
- if (!node->lowest) {
- list_for_each_entry(edge, &node->lower, list[UPPER]) {
- new_edge = btrfs_backref_alloc_edge(cache);
- if (!new_edge)
- goto fail;
-
- btrfs_backref_link_edge(new_edge, edge->node[LOWER],
- new_node, LINK_UPPER);
- }
- } else {
- list_add_tail(&new_node->lower, &cache->leaves);
- }
-
- rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
- &new_node->rb_node);
- if (rb_node)
- btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
-
- if (!new_node->lowest) {
- list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
- list_add_tail(&new_edge->list[LOWER],
- &new_edge->node[LOWER]->upper);
- }
- }
- return 0;
-fail:
- while (!list_empty(&new_node->lower)) {
- new_edge = list_entry(new_node->lower.next,
- struct btrfs_backref_edge, list[UPPER]);
- list_del(&new_edge->list[UPPER]);
- btrfs_backref_free_edge(cache, new_edge);
- }
- btrfs_backref_free_node(cache, new_node);
- return -ENOMEM;
-}
-
-/*
* helper to add 'address of tree root -> reloc tree' mapping
*/
-static int __must_check __add_reloc_root(struct btrfs_root *root)
+static int __add_reloc_root(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
@@ -636,8 +490,7 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
node->data = root;
spin_lock(&rc->reloc_root_tree.lock);
- rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
- node->bytenr, &node->rb_node);
+ rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node) {
btrfs_err(fs_info,
@@ -658,7 +511,7 @@ static void __del_reloc_root(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
- struct mapping_node *node = NULL;
+ struct mapping_node AUTO_KFREE(node);
struct reloc_control *rc = fs_info->reloc_ctl;
bool put_ref = false;
@@ -691,7 +544,6 @@ static void __del_reloc_root(struct btrfs_root *root)
spin_unlock(&fs_info->trans_lock);
if (put_ref)
btrfs_put_root(root);
- kfree(node);
}
/*
@@ -720,8 +572,7 @@ static int __update_reloc_root(struct btrfs_root *root)
spin_lock(&rc->reloc_root_tree.lock);
node->bytenr = root->node->start;
- rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
- node->bytenr, &node->rb_node);
+ rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node)
btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
@@ -734,10 +585,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
struct extent_buffer *eb;
- struct btrfs_root_item *root_item;
+ struct btrfs_root_item AUTO_KFREE(root_item);
struct btrfs_key root_key;
int ret = 0;
- bool must_abort = false;
root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
if (!root_item)
@@ -747,14 +597,32 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
root_key.type = BTRFS_ROOT_ITEM_KEY;
root_key.offset = objectid;
- if (root->root_key.objectid == objectid) {
+ if (btrfs_root_id(root) == objectid) {
u64 commit_root_gen;
+ /*
+ * Relocation will wait for cleaner thread, and any half-dropped
+ * subvolume will be fully cleaned up at mount time.
+ * So here we shouldn't hit a subvolume with non-zero drop_progress.
+ *
+ * If this isn't the case, error out since it can make us attempt to
+ * drop references for extents that were already dropped before.
+ */
+ if (unlikely(btrfs_disk_key_objectid(&root->root_item.drop_progress))) {
+ struct btrfs_key cpu_key;
+
+ btrfs_disk_key_to_cpu(&cpu_key, &root->root_item.drop_progress);
+ btrfs_err(fs_info,
+ "cannot relocate partially dropped subvolume %llu, drop progress key " BTRFS_KEY_FMT,
+ objectid, BTRFS_KEY_FMT_VALUE(&cpu_key));
+ return ERR_PTR(-EUCLEAN);
+ }
+
/* called by btrfs_init_reloc_root */
ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
BTRFS_TREE_RELOC_OBJECTID);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
/*
* Set the last_snapshot field to the generation of the commit
@@ -777,21 +645,20 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
ret = btrfs_copy_root(trans, root, root->node, &eb,
BTRFS_TREE_RELOC_OBJECTID);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
}
/*
* We have changed references at this point, we must abort the
- * transaction if anything fails.
+ * transaction if anything fails (i.e. 'goto abort').
*/
- must_abort = true;
memcpy(root_item, &root->root_item, sizeof(*root_item));
btrfs_set_root_bytenr(root_item, eb->start);
btrfs_set_root_level(root_item, btrfs_header_level(eb));
btrfs_set_root_generation(root_item, trans->transid);
- if (root->root_key.objectid == objectid) {
+ if (btrfs_root_id(root) == objectid) {
btrfs_set_root_refs(root_item, 0);
memset(&root_item->drop_progress, 0,
sizeof(struct btrfs_disk_key));
@@ -804,9 +671,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
ret = btrfs_insert_root(trans, fs_info->tree_root,
&root_key, root_item);
if (ret)
- goto fail;
-
- kfree(root_item);
+ goto abort;
reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
if (IS_ERR(reloc_root)) {
@@ -814,13 +679,11 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
goto abort;
}
set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
- reloc_root->last_trans = trans->transid;
+ btrfs_set_root_last_trans(reloc_root, trans->transid);
return reloc_root;
-fail:
- kfree(root_item);
+
abort:
- if (must_abort)
- btrfs_abort_transaction(trans, ret);
+ btrfs_abort_transaction(trans, ret);
return ERR_PTR(ret);
}
@@ -861,7 +724,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
*/
if (root->reloc_root) {
reloc_root = root->reloc_root;
- reloc_root->last_trans = trans->transid;
+ btrfs_set_root_last_trans(reloc_root, trans->transid);
return 0;
}
@@ -869,8 +732,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
* We are merging reloc roots, we do not need new reloc trees. Also
* reloc trees never need their own reloc tree.
*/
- if (!rc->create_reloc_tree ||
- root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+ if (!rc->create_reloc_tree || btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
return 0;
if (!trans->reloc_reserved) {
@@ -878,7 +740,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
trans->block_rsv = rc->block_rsv;
clear_rsv = 1;
}
- reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
+ reloc_root = create_reloc_root(trans, root, btrfs_root_id(root));
if (clear_rsv)
trans->block_rsv = rsv;
if (IS_ERR(reloc_root))
@@ -920,7 +782,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
btrfs_grab_root(reloc_root);
/* root->reloc_root will stay until current relocation finished */
- if (fs_info->reloc_ctl->merge_reloc_tree &&
+ if (fs_info->reloc_ctl && fs_info->reloc_ctl->merge_reloc_tree &&
btrfs_root_refs(root_item) == 0) {
set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
/*
@@ -945,67 +807,13 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
}
/*
- * helper to find first cached inode with inode number >= objectid
- * in a subvolume
- */
-static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
-{
- struct rb_node *node;
- struct rb_node *prev;
- struct btrfs_inode *entry;
- struct inode *inode;
-
- spin_lock(&root->inode_lock);
-again:
- node = root->inode_tree.rb_node;
- prev = NULL;
- while (node) {
- prev = node;
- entry = rb_entry(node, struct btrfs_inode, rb_node);
-
- if (objectid < btrfs_ino(entry))
- node = node->rb_left;
- else if (objectid > btrfs_ino(entry))
- node = node->rb_right;
- else
- break;
- }
- if (!node) {
- while (prev) {
- entry = rb_entry(prev, struct btrfs_inode, rb_node);
- if (objectid <= btrfs_ino(entry)) {
- node = prev;
- break;
- }
- prev = rb_next(prev);
- }
- }
- while (node) {
- entry = rb_entry(node, struct btrfs_inode, rb_node);
- inode = igrab(&entry->vfs_inode);
- if (inode) {
- spin_unlock(&root->inode_lock);
- return inode;
- }
-
- objectid = btrfs_ino(entry) + 1;
- if (cond_resched_lock(&root->inode_lock))
- goto again;
-
- node = rb_next(node);
- }
- spin_unlock(&root->inode_lock);
- return NULL;
-}
-
-/*
* get new location of data
*/
static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
u64 bytenr, u64 num_bytes)
{
struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_file_extent_item *fi;
struct extent_buffer *leaf;
int ret;
@@ -1014,15 +822,13 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
if (!path)
return -ENOMEM;
- bytenr -= BTRFS_I(reloc_inode)->index_cnt;
+ bytenr -= BTRFS_I(reloc_inode)->reloc_block_group_start;
ret = btrfs_lookup_file_extent(NULL, root, path,
btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
if (ret < 0)
- goto out;
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ return ret;
+ if (ret > 0)
+ return -ENOENT;
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0],
@@ -1033,16 +839,11 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi));
- if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
- ret = -EINVAL;
- goto out;
- }
+ if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi))
+ return -EINVAL;
*new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/*
@@ -1058,7 +859,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
- struct inode *inode = NULL;
+ struct btrfs_inode *inode = NULL;
u64 parent;
u64 bytenr;
u64 new_bytenr = 0;
@@ -1068,13 +869,12 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
u32 i;
int ret = 0;
int first = 1;
- int dirty = 0;
if (rc->stage != UPDATE_DATA_PTRS)
return 0;
/* reloc trees always use full backref */
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
parent = leaf->start;
else
parent = 0;
@@ -1100,33 +900,40 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
continue;
/*
- * if we are modifying block in fs tree, wait for readpage
+ * if we are modifying block in fs tree, wait for read_folio
* to complete and drop the extent cache
*/
- if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
+ if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) {
if (first) {
- inode = find_next_inode(root, key.objectid);
+ inode = btrfs_find_first_inode(root, key.objectid);
first = 0;
- } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
+ } else if (inode && btrfs_ino(inode) < key.objectid) {
btrfs_add_delayed_iput(inode);
- inode = find_next_inode(root, key.objectid);
+ inode = btrfs_find_first_inode(root, key.objectid);
}
- if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
+ if (inode && btrfs_ino(inode) == key.objectid) {
+ struct extent_state *cached_state = NULL;
+
end = key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
WARN_ON(!IS_ALIGNED(key.offset,
fs_info->sectorsize));
WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
end--;
- ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
- key.offset, end);
- if (!ret)
+ /* Take mmap lock to serialize with reflinks. */
+ if (!down_read_trylock(&inode->i_mmap_lock))
+ continue;
+ ret = btrfs_try_lock_extent(&inode->io_tree, key.offset,
+ end, &cached_state);
+ if (!ret) {
+ up_read(&inode->i_mmap_lock);
continue;
+ }
- btrfs_drop_extent_cache(BTRFS_I(inode),
- key.offset, end, 1);
- unlock_extent(&BTRFS_I(inode)->io_tree,
- key.offset, end);
+ btrfs_drop_extent_map_range(inode, key.offset, end, true);
+ btrfs_unlock_extent(&inode->io_tree, key.offset, end,
+ &cached_state);
+ up_read(&inode->i_mmap_lock);
}
}
@@ -1141,41 +948,44 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
}
btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
- dirty = 1;
key.offset -= btrfs_file_extent_offset(leaf, fi);
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
- num_bytes, parent);
- btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
- key.objectid, key.offset,
- root->root_key.objectid, false);
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.bytenr = new_bytenr;
+ ref.num_bytes = num_bytes;
+ ref.parent = parent;
+ ref.owning_root = btrfs_root_id(root);
+ ref.ref_root = btrfs_header_owner(leaf);
+ btrfs_init_data_ref(&ref, key.objectid, key.offset,
+ btrfs_root_id(root), false);
ret = btrfs_inc_extent_ref(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
- btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
- num_bytes, parent);
- btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
- key.objectid, key.offset,
- root->root_key.objectid, false);
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ref.bytenr = bytenr;
+ ref.num_bytes = num_bytes;
+ ref.parent = parent;
+ ref.owning_root = btrfs_root_id(root);
+ ref.ref_root = btrfs_header_owner(leaf);
+ btrfs_init_data_ref(&ref, key.objectid, key.offset,
+ btrfs_root_id(root), false);
ret = btrfs_free_extent(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
}
- if (dirty)
- btrfs_mark_buffer_dirty(leaf);
if (inode)
btrfs_add_delayed_iput(inode);
return ret;
}
-static noinline_for_stack
-int memcmp_node_keys(struct extent_buffer *eb, int slot,
- struct btrfs_path *path, int level)
+static noinline_for_stack int memcmp_node_keys(const struct extent_buffer *eb,
+ int slot, const struct btrfs_path *path,
+ int level)
{
struct btrfs_disk_key key1;
struct btrfs_disk_key key2;
@@ -1215,8 +1025,8 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
int ret;
int slot;
- ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
- ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+ ASSERT(btrfs_root_id(src) == BTRFS_TREE_RELOC_OBJECTID);
+ ASSERT(btrfs_root_id(dest) != BTRFS_TREE_RELOC_OBJECTID);
last_snapshot = btrfs_root_last_snapshot(&src->root_item);
again:
@@ -1253,7 +1063,7 @@ again:
level = btrfs_header_level(parent);
ASSERT(level >= lowest_level);
- ret = btrfs_bin_search(parent, &key, &slot);
+ ret = btrfs_bin_search(parent, 0, &key, &slot);
if (ret < 0)
break;
if (ret && slot > 0)
@@ -1325,7 +1135,9 @@ again:
btrfs_release_path(path);
path->lowest_level = level;
+ set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
+ clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
path->lowest_level = 0;
if (ret) {
if (ret > 0)
@@ -1347,7 +1159,7 @@ again:
* The real subtree rescan is delayed until we have new
* CoW on the subtree root node before transaction commit.
*/
- ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
+ ret = btrfs_qgroup_add_swapped_blocks(dest,
rc->block_group, parent, slot,
path->nodes[level], path->slots[level],
last_snapshot);
@@ -1358,49 +1170,62 @@ again:
*/
btrfs_set_node_blockptr(parent, slot, new_bytenr);
btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
- btrfs_mark_buffer_dirty(parent);
btrfs_set_node_blockptr(path->nodes[level],
path->slots[level], old_bytenr);
btrfs_set_node_ptr_generation(path->nodes[level],
path->slots[level], old_ptr_gen);
- btrfs_mark_buffer_dirty(path->nodes[level]);
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
- blocksize, path->nodes[level]->start);
- btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
- 0, true);
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.bytenr = old_bytenr;
+ ref.num_bytes = blocksize;
+ ref.parent = path->nodes[level]->start;
+ ref.owning_root = btrfs_root_id(src);
+ ref.ref_root = btrfs_root_id(src);
+ btrfs_init_tree_ref(&ref, level - 1, 0, true);
ret = btrfs_inc_extent_ref(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
- blocksize, 0);
- btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
- true);
+
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.bytenr = new_bytenr;
+ ref.num_bytes = blocksize;
+ ref.parent = 0;
+ ref.owning_root = btrfs_root_id(dest);
+ ref.ref_root = btrfs_root_id(dest);
+ btrfs_init_tree_ref(&ref, level - 1, 0, true);
ret = btrfs_inc_extent_ref(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
- btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
- blocksize, path->nodes[level]->start);
- btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
- 0, true);
+ /* We don't know the real owning_root, use 0. */
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ref.bytenr = new_bytenr;
+ ref.num_bytes = blocksize;
+ ref.parent = path->nodes[level]->start;
+ ref.owning_root = 0;
+ ref.ref_root = btrfs_root_id(src);
+ btrfs_init_tree_ref(&ref, level - 1, 0, true);
ret = btrfs_free_extent(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
- btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
- blocksize, 0);
- btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
- 0, true);
+ /* We don't know the real owning_root, use 0. */
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ref.bytenr = old_bytenr;
+ ref.num_bytes = blocksize;
+ ref.parent = 0;
+ ref.owning_root = 0;
+ ref.ref_root = btrfs_root_id(dest);
+ btrfs_init_tree_ref(&ref, level - 1, 0, true);
ret = btrfs_free_extent(trans, &ref);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
break;
}
@@ -1502,35 +1327,38 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
* [min_key, max_key)
*/
static int invalidate_extent_cache(struct btrfs_root *root,
- struct btrfs_key *min_key,
- struct btrfs_key *max_key)
+ const struct btrfs_key *min_key,
+ const struct btrfs_key *max_key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct inode *inode = NULL;
+ struct btrfs_inode *inode = NULL;
u64 objectid;
u64 start, end;
u64 ino;
objectid = min_key->objectid;
while (1) {
+ struct extent_state *cached_state = NULL;
+
cond_resched();
- iput(inode);
+ if (inode)
+ iput(&inode->vfs_inode);
if (objectid > max_key->objectid)
break;
- inode = find_next_inode(root, objectid);
+ inode = btrfs_find_first_inode(root, objectid);
if (!inode)
break;
- ino = btrfs_ino(BTRFS_I(inode));
+ ino = btrfs_ino(inode);
if (ino > max_key->objectid) {
- iput(inode);
+ iput(&inode->vfs_inode);
break;
}
objectid = ino + 1;
- if (!S_ISREG(inode->i_mode))
+ if (!S_ISREG(inode->vfs_inode.i_mode))
continue;
if (unlikely(min_key->objectid == ino)) {
@@ -1562,10 +1390,10 @@ static int invalidate_extent_cache(struct btrfs_root *root,
end = (u64)-1;
}
- /* the lock_extent waits for readpage to complete */
- lock_extent(&BTRFS_I(inode)->io_tree, start, end);
- btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
- unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
+ /* the lock_extent waits for read_folio to complete */
+ btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_drop_extent_map_range(inode, start, end, true);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
}
return 0;
}
@@ -1600,7 +1428,7 @@ static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
int ret;
/* @root must be a subvolume tree root with a valid reloc tree */
- ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+ ASSERT(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID);
ASSERT(reloc_root);
reloc_root_item = &reloc_root->root_item;
@@ -1629,7 +1457,7 @@ static int clean_dirty_subvols(struct reloc_control *rc)
list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
reloc_dirty_list) {
- if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
+ if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) {
/* Merged subvolume, cleanup its reloc root */
struct btrfs_root *reloc_root = root->reloc_root;
@@ -1647,7 +1475,7 @@ static int clean_dirty_subvols(struct reloc_control *rc)
* ->reloc_root. If it fails however we must
* drop the ref ourselves.
*/
- ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
+ ret2 = btrfs_drop_snapshot(reloc_root, false, true);
if (ret2 < 0) {
btrfs_put_root(reloc_root);
if (!ret)
@@ -1657,7 +1485,7 @@ static int clean_dirty_subvols(struct reloc_control *rc)
btrfs_put_root(root);
} else {
/* Orphan reloc tree, just clean it up */
- ret2 = btrfs_drop_snapshot(root, 0, 1);
+ ret2 = btrfs_drop_snapshot(root, false, true);
if (ret2 < 0) {
btrfs_put_root(root);
if (!ret)
@@ -1700,7 +1528,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
level = btrfs_root_level(root_item);
- atomic_inc(&reloc_root->node->refs);
+ refcount_inc(&reloc_root->node->refs);
path->nodes[level] = reloc_root->node;
path->slots[level] = 0;
} else {
@@ -1736,7 +1564,8 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
memset(&next_key, 0, sizeof(next_key));
while (1) {
- ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
+ ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
+ min_reserved,
BTRFS_RESERVE_FLUSH_LIMIT);
if (ret)
goto out;
@@ -1757,7 +1586,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
* btrfs_update_reloc_root() and update our root item
* appropriately.
*/
- reloc_root->last_trans = trans->transid;
+ btrfs_set_root_last_trans(reloc_root, trans->transid);
trans->block_rsv = rc->block_rsv;
replaced = 0;
@@ -1855,7 +1684,7 @@ int prepare_to_merge(struct reloc_control *rc, int err)
again:
if (!err) {
num_bytes = rc->merging_rsv_size;
- ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
+ ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
BTRFS_RESERVE_FLUSH_ALL);
if (ret)
err = ret;
@@ -1878,11 +1707,11 @@ again:
}
}
- rc->merge_reloc_tree = 1;
+ rc->merge_reloc_tree = true;
while (!list_empty(&rc->reloc_roots)) {
- reloc_root = list_entry(rc->reloc_roots.next,
- struct btrfs_root, root_list);
+ reloc_root = list_first_entry(&rc->reloc_roots,
+ struct btrfs_root, root_list);
list_del_init(&reloc_root->root_list);
root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
@@ -1898,7 +1727,39 @@ again:
err = PTR_ERR(root);
break;
}
- ASSERT(root->reloc_root == reloc_root);
+
+ if (unlikely(root->reloc_root != reloc_root)) {
+ if (root->reloc_root) {
+ btrfs_err(fs_info,
+"reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu",
+ btrfs_root_id(root),
+ btrfs_root_id(root->reloc_root),
+ root->reloc_root->root_key.type,
+ root->reloc_root->root_key.offset,
+ btrfs_root_generation(
+ &root->reloc_root->root_item),
+ btrfs_root_id(reloc_root),
+ reloc_root->root_key.type,
+ reloc_root->root_key.offset,
+ btrfs_root_generation(
+ &reloc_root->root_item));
+ } else {
+ btrfs_err(fs_info,
+"reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu",
+ btrfs_root_id(root),
+ btrfs_root_id(reloc_root),
+ reloc_root->root_key.type,
+ reloc_root->root_key.offset,
+ btrfs_root_generation(
+ &reloc_root->root_item));
+ }
+ list_add(&reloc_root->root_list, &reloc_roots);
+ btrfs_put_root(root);
+ btrfs_abort_transaction(trans, -EUCLEAN);
+ if (!err)
+ err = -EUCLEAN;
+ break;
+ }
/*
* set reference count to 1, so btrfs_recover_relocation
@@ -1915,7 +1776,7 @@ again:
list_add(&reloc_root->root_list, &reloc_roots);
btrfs_put_root(root);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
if (!err)
err = ret;
@@ -1965,13 +1826,12 @@ again:
while (!list_empty(&reloc_roots)) {
found = 1;
- reloc_root = list_entry(reloc_roots.next,
- struct btrfs_root, root_list);
+ reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list);
root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
false);
if (btrfs_root_refs(&reloc_root->root_item) > 0) {
- if (IS_ERR(root)) {
+ if (WARN_ON(IS_ERR(root))) {
/*
* For recovery we read the fs roots on mount,
* and if we didn't find the root then we marked
@@ -1980,17 +1840,14 @@ again:
* memory. However there's no reason we can't
* handle the error properly here just in case.
*/
- ASSERT(0);
ret = PTR_ERR(root);
goto out;
}
- if (root->reloc_root != reloc_root) {
+ if (WARN_ON(root->reloc_root != reloc_root)) {
/*
- * This is actually impossible without something
- * going really wrong (like weird race condition
- * or cosmic rays).
+ * This can happen if on-disk metadata has some
+ * corruption, e.g. bad reloc tree key offset.
*/
- ASSERT(0);
ret = -EINVAL;
goto out;
}
@@ -2071,7 +1928,7 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root;
int ret;
- if (reloc_root->last_trans == trans->transid)
+ if (btrfs_get_root_last_trans(reloc_root) == trans->transid)
return 0;
root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
@@ -2085,11 +1942,11 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
* reloc root without a corresponding root this could return ENOENT.
*/
if (IS_ERR(root)) {
- ASSERT(0);
+ DEBUG_WARN("error %ld reading root for reloc root", PTR_ERR(root));
return PTR_ERR(root);
}
- if (root->reloc_root != reloc_root) {
- ASSERT(0);
+ if (unlikely(root->reloc_root != reloc_root)) {
+ DEBUG_WARN("unexpected reloc root found");
btrfs_err(fs_info,
"root %llu has two reloc roots associated with it",
reloc_root->root_key.offset);
@@ -2113,100 +1970,72 @@ struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
int index = 0;
int ret;
- next = node;
- while (1) {
- cond_resched();
- next = walk_up_backref(next, edges, &index);
- root = next->root;
-
- /*
- * If there is no root, then our references for this block are
- * incomplete, as we should be able to walk all the way up to a
- * block that is owned by a root.
- *
- * This path is only for SHAREABLE roots, so if we come upon a
- * non-SHAREABLE root then we have backrefs that resolve
- * improperly.
- *
- * Both of these cases indicate file system corruption, or a bug
- * in the backref walking code.
- */
- if (!root) {
- ASSERT(0);
- btrfs_err(trans->fs_info,
- "bytenr %llu doesn't have a backref path ending in a root",
- node->bytenr);
- return ERR_PTR(-EUCLEAN);
- }
- if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
- ASSERT(0);
- btrfs_err(trans->fs_info,
- "bytenr %llu has multiple refs with one ending in a non-shareable root",
- node->bytenr);
- return ERR_PTR(-EUCLEAN);
- }
+ next = walk_up_backref(node, edges, &index);
+ root = next->root;
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
- ret = record_reloc_root_in_trans(trans, root);
- if (ret)
- return ERR_PTR(ret);
- break;
- }
+ /*
+ * If there is no root, then our references for this block are
+ * incomplete, as we should be able to walk all the way up to a block
+ * that is owned by a root.
+ *
+ * This path is only for SHAREABLE roots, so if we come upon a
+ * non-SHAREABLE root then we have backrefs that resolve improperly.
+ *
+ * Both of these cases indicate file system corruption, or a bug in the
+ * backref walking code.
+ */
+ if (unlikely(!root)) {
+ btrfs_err(trans->fs_info,
+ "bytenr %llu doesn't have a backref path ending in a root",
+ node->bytenr);
+ return ERR_PTR(-EUCLEAN);
+ }
+ if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
+ btrfs_err(trans->fs_info,
+ "bytenr %llu has multiple refs with one ending in a non-shareable root",
+ node->bytenr);
+ return ERR_PTR(-EUCLEAN);
+ }
- ret = btrfs_record_root_in_trans(trans, root);
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
+ ret = record_reloc_root_in_trans(trans, root);
if (ret)
return ERR_PTR(ret);
- root = root->reloc_root;
-
- /*
- * We could have raced with another thread which failed, so
- * root->reloc_root may not be set, return ENOENT in this case.
- */
- if (!root)
- return ERR_PTR(-ENOENT);
+ goto found;
+ }
- if (next->new_bytenr != root->node->start) {
- /*
- * We just created the reloc root, so we shouldn't have
- * ->new_bytenr set and this shouldn't be in the changed
- * list. If it is then we have multiple roots pointing
- * at the same bytenr which indicates corruption, or
- * we've made a mistake in the backref walking code.
- */
- ASSERT(next->new_bytenr == 0);
- ASSERT(list_empty(&next->list));
- if (next->new_bytenr || !list_empty(&next->list)) {
- btrfs_err(trans->fs_info,
- "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
- node->bytenr, next->bytenr);
- return ERR_PTR(-EUCLEAN);
- }
+ ret = btrfs_record_root_in_trans(trans, root);
+ if (ret)
+ return ERR_PTR(ret);
+ root = root->reloc_root;
- next->new_bytenr = root->node->start;
- btrfs_put_root(next->root);
- next->root = btrfs_grab_root(root);
- ASSERT(next->root);
- list_add_tail(&next->list,
- &rc->backref_cache.changed);
- mark_block_processed(rc, next);
- break;
- }
+ /*
+ * We could have raced with another thread which failed, so
+ * root->reloc_root may not be set, return ENOENT in this case.
+ */
+ if (!root)
+ return ERR_PTR(-ENOENT);
- WARN_ON(1);
- root = NULL;
- next = walk_down_backref(edges, &index);
- if (!next || next->level <= node->level)
- break;
- }
- if (!root) {
+ if (unlikely(next->new_bytenr)) {
/*
- * This can happen if there's fs corruption or if there's a bug
- * in the backref lookup code.
+ * We just created the reloc root, so we shouldn't have
+ * ->new_bytenr set yet. If it is then we have multiple roots
+ * pointing at the same bytenr which indicates corruption, or
+ * we've made a mistake in the backref walking code.
*/
- ASSERT(0);
- return ERR_PTR(-ENOENT);
+ ASSERT(next->new_bytenr == 0);
+ btrfs_err(trans->fs_info,
+ "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
+ node->bytenr, next->bytenr);
+ return ERR_PTR(-EUCLEAN);
}
+ next->new_bytenr = root->node->start;
+ btrfs_put_root(next->root);
+ next->root = btrfs_grab_root(root);
+ ASSERT(next->root);
+ mark_block_processed(rc, next);
+found:
next = node;
/* setup backref node path for btrfs_reloc_cow_block */
while (1) {
@@ -2246,14 +2075,14 @@ struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
* This can occur if we have incomplete extent refs leading all
* the way up a particular path, in this case return -EUCLEAN.
*/
- if (!root)
+ if (unlikely(!root))
return ERR_PTR(-EUCLEAN);
/* No other choice for non-shareable tree */
if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
return root;
- if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
+ if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID)
fs_root = root;
if (next != node)
@@ -2269,9 +2098,8 @@ struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
return fs_root;
}
-static noinline_for_stack
-u64 calcu_metadata_size(struct reloc_control *rc,
- struct btrfs_backref_node *node, int reserve)
+static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc,
+ struct btrfs_backref_node *node)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_backref_node *next = node;
@@ -2280,12 +2108,12 @@ u64 calcu_metadata_size(struct reloc_control *rc,
u64 num_bytes = 0;
int index = 0;
- BUG_ON(reserve && node->processed);
+ BUG_ON(node->processed);
while (next) {
cond_resched();
while (1) {
- if (next->processed && (reserve || next != node))
+ if (next->processed)
break;
num_bytes += fs_info->nodesize;
@@ -2293,8 +2121,8 @@ u64 calcu_metadata_size(struct reloc_control *rc,
if (list_empty(&next->upper))
break;
- edge = list_entry(next->upper.next,
- struct btrfs_backref_edge, list[LOWER]);
+ edge = list_first_entry(&next->upper, struct btrfs_backref_edge,
+ list[LOWER]);
edges[index++] = edge;
next = edge->node[UPPER];
}
@@ -2303,17 +2131,11 @@ u64 calcu_metadata_size(struct reloc_control *rc,
return num_bytes;
}
-static int reserve_metadata_space(struct btrfs_trans_handle *trans,
- struct reloc_control *rc,
- struct btrfs_backref_node *node)
+static int refill_metadata_space(struct btrfs_trans_handle *trans,
+ struct reloc_control *rc, u64 num_bytes)
{
- struct btrfs_root *root = rc->extent_root;
- struct btrfs_fs_info *fs_info = root->fs_info;
- u64 num_bytes;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
- u64 tmp;
-
- num_bytes = calcu_metadata_size(rc, node, 1) * 2;
trans->block_rsv = rc->block_rsv;
rc->reserved_bytes += num_bytes;
@@ -2323,10 +2145,11 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
* If we get an enospc just kick back -EAGAIN so we know to drop the
* transaction and try to refill when we can flush all the things.
*/
- ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
- BTRFS_RESERVE_FLUSH_LIMIT);
+ ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
+ BTRFS_RESERVE_FLUSH_LIMIT);
if (ret) {
- tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
+ u64 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
+
while (tmp <= rc->reserved_bytes)
tmp <<= 1;
/*
@@ -2344,6 +2167,16 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
return 0;
}
+static int reserve_metadata_space(struct btrfs_trans_handle *trans,
+ struct reloc_control *rc,
+ struct btrfs_backref_node *node)
+{
+ u64 num_bytes;
+
+ num_bytes = calcu_metadata_size(rc, node) * 2;
+ return refill_metadata_space(trans, rc, num_bytes);
+}
+
/*
* relocate a block tree, and then update pointers in upper level
* blocks that reference the block to point to the new location.
@@ -2376,8 +2209,6 @@ static int do_relocation(struct btrfs_trans_handle *trans,
path->lowest_level = node->level + 1;
rc->backref_cache.path[node->level] = node;
list_for_each_entry(edge, &node->upper, list[LOWER]) {
- struct btrfs_ref ref = { 0 };
-
cond_resched();
upper = edge->node[UPPER];
@@ -2389,7 +2220,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
if (upper->eb && !upper->locked) {
if (!lowest) {
- ret = btrfs_bin_search(upper->eb, key, &slot);
+ ret = btrfs_bin_search(upper->eb, 0, key, &slot);
if (ret < 0)
goto next;
BUG_ON(ret);
@@ -2423,7 +2254,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
slot = path->slots[upper->level];
btrfs_release_path(path);
} else {
- ret = btrfs_bin_search(upper->eb, key, &slot);
+ ret = btrfs_bin_search(upper->eb, 0, key, &slot);
if (ret < 0)
goto next;
BUG_ON(ret);
@@ -2431,7 +2262,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
bytenr = btrfs_node_blockptr(upper->eb, slot);
if (lowest) {
- if (bytenr != node->bytenr) {
+ if (unlikely(bytenr != node->bytenr)) {
btrfs_err(root->fs_info,
"lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
bytenr, node->bytenr, slot,
@@ -2465,23 +2296,28 @@ static int do_relocation(struct btrfs_trans_handle *trans,
*/
ASSERT(node->eb == eb);
} else {
+ struct btrfs_ref ref = {
+ .action = BTRFS_ADD_DELAYED_REF,
+ .bytenr = node->eb->start,
+ .num_bytes = blocksize,
+ .parent = upper->eb->start,
+ .owning_root = btrfs_header_owner(upper->eb),
+ .ref_root = btrfs_header_owner(upper->eb),
+ };
+
btrfs_set_node_blockptr(upper->eb, slot,
node->eb->start);
btrfs_set_node_ptr_generation(upper->eb, slot,
trans->transid);
- btrfs_mark_buffer_dirty(upper->eb);
+ btrfs_mark_buffer_dirty(trans, upper->eb);
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
- node->eb->start, blocksize,
- upper->eb->start);
btrfs_init_tree_ref(&ref, node->level,
- btrfs_header_owner(upper->eb),
- root->root_key.objectid, false);
+ btrfs_root_id(root), false);
ret = btrfs_inc_extent_ref(trans, &ref);
if (!ret)
ret = btrfs_drop_subtree(trans, root, eb,
upper->eb);
- if (ret)
+ if (unlikely(ret))
btrfs_abort_transaction(trans, ret);
}
next:
@@ -2495,7 +2331,7 @@ next:
if (!ret && node->pending) {
btrfs_backref_drop_node_buffer(node);
- list_move_tail(&node->list, &rc->backref_cache.changed);
+ list_del_init(&node->list);
node->pending = 0;
}
@@ -2532,8 +2368,8 @@ static int finish_pending_nodes(struct btrfs_trans_handle *trans,
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
while (!list_empty(&cache->pending[level])) {
- node = list_entry(cache->pending[level].next,
- struct btrfs_backref_node, list);
+ node = list_first_entry(&cache->pending[level],
+ struct btrfs_backref_node, list);
list_move_tail(&node->list, &list);
BUG_ON(!node->pending);
@@ -2571,8 +2407,8 @@ static void update_processed_blocks(struct reloc_control *rc,
if (list_empty(&next->upper))
break;
- edge = list_entry(next->upper.next,
- struct btrfs_backref_edge, list[LOWER]);
+ edge = list_first_entry(&next->upper, struct btrfs_backref_edge,
+ list[LOWER]);
edges[index++] = edge;
next = edge->node[UPPER];
}
@@ -2584,8 +2420,8 @@ static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
{
u32 blocksize = rc->extent_root->fs_info->nodesize;
- if (test_range_bit(&rc->processed_blocks, bytenr,
- bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
+ if (btrfs_test_range_bit(&rc->processed_blocks, bytenr,
+ bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
return 1;
return 0;
}
@@ -2593,13 +2429,17 @@ static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
static int get_tree_block_key(struct btrfs_fs_info *fs_info,
struct tree_block *block)
{
+ struct btrfs_tree_parent_check check = {
+ .level = block->level,
+ .owner_root = block->owner,
+ .transid = block->key.offset
+ };
struct extent_buffer *eb;
- eb = read_tree_block(fs_info, block->bytenr, block->owner,
- block->key.offset, block->level, NULL);
- if (IS_ERR(eb)) {
+ eb = read_tree_block(fs_info, block->bytenr, &check);
+ if (IS_ERR(eb))
return PTR_ERR(eb);
- } else if (!extent_buffer_uptodate(eb)) {
+ if (unlikely(!extent_buffer_uptodate(eb))) {
free_extent_buffer(eb);
return -EIO;
}
@@ -2608,7 +2448,7 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info,
else
btrfs_node_key_to_cpu(eb, &block->key, 0);
free_extent_buffer(eb);
- block->key_ready = 1;
+ block->key_ready = true;
return 0;
}
@@ -2654,8 +2494,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
/*
* This block was the root block of a root, and this is
* the first time we're processing the block and thus it
- * should not have had the ->new_bytenr modified and
- * should have not been included on the changed list.
+ * should not have had the ->new_bytenr modified.
*
* However in the case of corruption we could have
* multiple refs pointing to the same block improperly,
@@ -2665,8 +2504,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
* normal user in the case of corruption.
*/
ASSERT(node->new_bytenr == 0);
- ASSERT(list_empty(&node->list));
- if (node->new_bytenr || !list_empty(&node->list)) {
+ if (unlikely(node->new_bytenr)) {
btrfs_err(root->fs_info,
"bytenr %llu has improper references to it",
node->bytenr);
@@ -2689,17 +2527,12 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
btrfs_put_root(node->root);
node->root = btrfs_grab_root(root);
ASSERT(node->root);
- list_add_tail(&node->list, &rc->backref_cache.changed);
} else {
- path->lowest_level = node->level;
- if (root == root->fs_info->chunk_root)
- btrfs_reserve_chunk_metadata(trans, false);
- ret = btrfs_search_slot(trans, root, key, path, 0, 1);
- btrfs_release_path(path);
- if (root == root->fs_info->chunk_root)
- btrfs_trans_release_chunk_metadata(trans);
- if (ret > 0)
- ret = 0;
+ btrfs_err(root->fs_info,
+ "bytenr %llu resolved to a non-shareable root",
+ node->bytenr);
+ ret = -EUCLEAN;
+ goto out;
}
if (!ret)
update_processed_blocks(rc, node);
@@ -2707,11 +2540,50 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
ret = do_relocation(trans, rc, node, key, path, 1);
}
out:
- if (ret || node->level == 0 || node->cowonly)
+ if (ret || node->level == 0)
btrfs_backref_cleanup_node(&rc->backref_cache, node);
return ret;
}
+static int relocate_cowonly_block(struct btrfs_trans_handle *trans,
+ struct reloc_control *rc, struct tree_block *block,
+ struct btrfs_path *path)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *root;
+ u64 num_bytes;
+ int nr_levels;
+ int ret;
+
+ root = btrfs_get_fs_root(fs_info, block->owner, true);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ nr_levels = max(btrfs_header_level(root->node) - block->level, 0) + 1;
+
+ num_bytes = fs_info->nodesize * nr_levels;
+ ret = refill_metadata_space(trans, rc, num_bytes);
+ if (ret) {
+ btrfs_put_root(root);
+ return ret;
+ }
+ path->lowest_level = block->level;
+ if (root == root->fs_info->chunk_root)
+ btrfs_reserve_chunk_metadata(trans, false);
+
+ ret = btrfs_search_slot(trans, root, &block->key, path, 0, 1);
+ path->lowest_level = 0;
+ btrfs_release_path(path);
+
+ if (root == root->fs_info->chunk_root)
+ btrfs_trans_release_chunk_metadata(trans);
+ if (ret > 0)
+ ret = 0;
+ btrfs_put_root(root);
+
+ return ret;
+}
+
/*
* relocate a list of blocks
*/
@@ -2724,12 +2596,11 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
struct tree_block *block;
struct tree_block *next;
- int ret;
- int err = 0;
+ int ret = 0;
path = btrfs_alloc_path();
if (!path) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out_free_blocks;
}
@@ -2744,112 +2615,79 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
/* Get first keys */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
if (!block->key_ready) {
- err = get_tree_block_key(fs_info, block);
- if (err)
+ ret = get_tree_block_key(fs_info, block);
+ if (ret)
goto out_free_path;
}
}
/* Do tree relocation */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
- node = build_backref_tree(rc, &block->key,
+ /*
+ * For COWonly blocks, or the data reloc tree, we only need to
+ * COW down to the block, there's no need to generate a backref
+ * tree.
+ */
+ if (block->owner &&
+ (!btrfs_is_fstree(block->owner) ||
+ block->owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) {
+ ret = relocate_cowonly_block(trans, rc, block, path);
+ if (ret)
+ break;
+ continue;
+ }
+
+ node = build_backref_tree(trans, rc, &block->key,
block->level, block->bytenr);
if (IS_ERR(node)) {
- err = PTR_ERR(node);
+ ret = PTR_ERR(node);
goto out;
}
ret = relocate_tree_block(trans, rc, node, &block->key,
path);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
break;
- }
}
out:
- err = finish_pending_nodes(trans, rc, path, err);
+ ret = finish_pending_nodes(trans, rc, path, ret);
out_free_path:
btrfs_free_path(path);
out_free_blocks:
free_block_list(blocks);
- return err;
+ return ret;
}
-static noinline_for_stack int prealloc_file_extent_cluster(
- struct btrfs_inode *inode,
- struct file_extent_cluster *cluster)
+static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control *rc)
{
+ const struct file_extent_cluster *cluster = &rc->cluster;
+ struct btrfs_inode *inode = BTRFS_I(rc->data_inode);
u64 alloc_hint = 0;
u64 start;
u64 end;
- u64 offset = inode->index_cnt;
+ u64 offset = inode->reloc_block_group_start;
u64 num_bytes;
int nr;
int ret = 0;
- u64 i_size = i_size_read(&inode->vfs_inode);
u64 prealloc_start = cluster->start - offset;
u64 prealloc_end = cluster->end - offset;
u64 cur_offset = prealloc_start;
/*
- * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
- * This means the range [i_size, PAGE_END + 1) is filled with zeros by
- * btrfs_do_readpage() call of previously relocated file cluster.
+ * For blocksize < folio size case (either bs < page size or large folios),
+ * beyond i_size, all blocks are filled with zero.
*
- * If the current cluster starts in the above range, btrfs_do_readpage()
- * will skip the read, and relocate_one_page() will later writeback
+ * If the current cluster covers the above range, btrfs_do_readpage()
+ * will skip the read, and relocate_one_folio() will later writeback
* the padding zeros as new data, causing data corruption.
*
- * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
+ * Here we have to invalidate the cache covering our cluster.
*/
- if (!IS_ALIGNED(i_size, PAGE_SIZE)) {
- struct address_space *mapping = inode->vfs_inode.i_mapping;
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- const u32 sectorsize = fs_info->sectorsize;
- struct page *page;
-
- ASSERT(sectorsize < PAGE_SIZE);
- ASSERT(IS_ALIGNED(i_size, sectorsize));
-
- /*
- * Subpage can't handle page with DIRTY but without UPTODATE
- * bit as it can lead to the following deadlock:
- *
- * btrfs_readpage()
- * | Page already *locked*
- * |- btrfs_lock_and_flush_ordered_range()
- * |- btrfs_start_ordered_extent()
- * |- extent_write_cache_pages()
- * |- lock_page()
- * We try to lock the page we already hold.
- *
- * Here we just writeback the whole data reloc inode, so that
- * we will be ensured to have no dirty range in the page, and
- * are safe to clear the uptodate bits.
- *
- * This shouldn't cause too much overhead, as we need to write
- * the data back anyway.
- */
- ret = filemap_write_and_wait(mapping);
- if (ret < 0)
- return ret;
-
- clear_extent_bits(&inode->io_tree, i_size,
- round_up(i_size, PAGE_SIZE) - 1,
- EXTENT_UPTODATE);
- page = find_lock_page(mapping, i_size >> PAGE_SHIFT);
- /*
- * If page is freed we don't need to do anything then, as we
- * will re-read the whole page anyway.
- */
- if (page) {
- btrfs_subpage_clear_uptodate(fs_info, page, i_size,
- round_up(i_size, PAGE_SIZE) - i_size);
- unlock_page(page);
- put_page(page);
- }
- }
+ ret = filemap_invalidate_inode(&inode->vfs_inode, true, prealloc_start,
+ prealloc_end);
+ if (ret < 0)
+ return ret;
BUG_ON(cluster->start != cluster->boundary[0]);
ret = btrfs_alloc_data_chunk_ondemand(inode,
@@ -2857,68 +2695,67 @@ static noinline_for_stack int prealloc_file_extent_cluster(
if (ret)
return ret;
- btrfs_inode_lock(&inode->vfs_inode, 0);
+ btrfs_inode_lock(inode, 0);
for (nr = 0; nr < cluster->nr; nr++) {
+ struct extent_state *cached_state = NULL;
+
start = cluster->boundary[nr] - offset;
if (nr + 1 < cluster->nr)
end = cluster->boundary[nr + 1] - 1 - offset;
else
end = cluster->end - offset;
- lock_extent(&inode->io_tree, start, end);
+ btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
num_bytes = end + 1 - start;
ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
num_bytes, num_bytes,
end + 1, &alloc_hint);
cur_offset = end + 1;
- unlock_extent(&inode->io_tree, start, end);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
if (ret)
break;
}
- btrfs_inode_unlock(&inode->vfs_inode, 0);
+ btrfs_inode_unlock(inode, 0);
if (cur_offset < prealloc_end)
- btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
- prealloc_end + 1 - cur_offset);
+ btrfs_free_reserved_data_space_noquota(inode,
+ prealloc_end + 1 - cur_offset);
return ret;
}
-static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode,
- u64 start, u64 end, u64 block_start)
+static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_control *rc)
{
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct btrfs_inode *inode = BTRFS_I(rc->data_inode);
struct extent_map *em;
+ struct extent_state *cached_state = NULL;
+ u64 offset = inode->reloc_block_group_start;
+ u64 start = rc->cluster.start - offset;
+ u64 end = rc->cluster.end - offset;
int ret = 0;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em)
return -ENOMEM;
em->start = start;
em->len = end + 1 - start;
- em->block_len = em->len;
- em->block_start = block_start;
- set_bit(EXTENT_FLAG_PINNED, &em->flags);
+ em->disk_bytenr = rc->cluster.start;
+ em->disk_num_bytes = em->len;
+ em->ram_bytes = em->len;
+ em->flags |= EXTENT_FLAG_PINNED;
+
+ btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
+ ret = btrfs_replace_extent_map_range(inode, em, false);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_free_extent_map(em);
- lock_extent(&BTRFS_I(inode)->io_tree, start, end);
- while (1) {
- write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 0);
- write_unlock(&em_tree->lock);
- if (ret != -EEXIST) {
- free_extent_map(em);
- break;
- }
- btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
- }
- unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
return ret;
}
/*
* Allow error injection to test balance/relocation cancellation
*/
-noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
+noinline int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info)
{
return atomic_read(&fs_info->balance_cancel_req) ||
atomic_read(&fs_info->reloc_cancel_req) ||
@@ -2926,7 +2763,7 @@ noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
}
ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
-static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
+static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
int cluster_nr)
{
/* Last extent, use cluster end directly */
@@ -2937,103 +2774,138 @@ static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
return cluster->boundary[cluster_nr + 1] - 1;
}
-static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
- struct file_extent_cluster *cluster,
- int *cluster_nr, unsigned long page_index)
+static int relocate_one_folio(struct reloc_control *rc,
+ struct file_ra_state *ra,
+ int *cluster_nr, u64 *file_offset_ret)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- u64 offset = BTRFS_I(inode)->index_cnt;
- const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
+ const struct file_extent_cluster *cluster = &rc->cluster;
+ struct inode *inode = rc->data_inode;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ const u64 orig_file_offset = *file_offset_ret;
+ u64 offset = BTRFS_I(inode)->reloc_block_group_start;
+ const pgoff_t last_index = (cluster->end - offset) >> PAGE_SHIFT;
+ const pgoff_t index = orig_file_offset >> PAGE_SHIFT;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
- struct page *page;
- u64 page_start;
- u64 page_end;
+ struct folio *folio;
+ u64 folio_start;
+ u64 folio_end;
u64 cur;
int ret;
+ const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags);
+
+ ASSERT(index <= last_index);
+again:
+ folio = filemap_lock_folio(inode->i_mapping, index);
+ if (IS_ERR(folio)) {
- ASSERT(page_index <= last_index);
- page = find_lock_page(inode->i_mapping, page_index);
- if (!page) {
- page_cache_sync_readahead(inode->i_mapping, ra, NULL,
- page_index, last_index + 1 - page_index);
- page = find_or_create_page(inode->i_mapping, page_index, mask);
- if (!page)
- return -ENOMEM;
+ /*
+ * On relocation we're doing readahead on the relocation inode,
+ * but if the filesystem is backed by a RAID stripe tree we can
+ * get ENOENT (e.g. due to preallocated extents not being
+ * mapped in the RST) from the lookup.
+ *
+ * But readahead doesn't handle the error and submits invalid
+ * reads to the device, causing a assertion failures.
+ */
+ if (!use_rst)
+ page_cache_sync_readahead(inode->i_mapping, ra, NULL,
+ index, last_index + 1 - index);
+ folio = __filemap_get_folio(inode->i_mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mask);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
}
- ret = set_page_extent_mapped(page);
- if (ret < 0)
- goto release_page;
- if (PageReadahead(page))
- page_cache_async_readahead(inode->i_mapping, ra, NULL, page,
- page_index, last_index + 1 - page_index);
+ if (folio_test_readahead(folio) && !use_rst)
+ page_cache_async_readahead(inode->i_mapping, ra, NULL,
+ folio, last_index + 1 - index);
- if (!PageUptodate(page)) {
- btrfs_readpage(NULL, page);
- lock_page(page);
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
+ btrfs_read_folio(NULL, folio);
+ folio_lock(folio);
+ if (unlikely(!folio_test_uptodate(folio))) {
ret = -EIO;
- goto release_page;
+ goto release_folio;
+ }
+ if (folio->mapping != inode->i_mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
}
}
- page_start = page_offset(page);
- page_end = page_start + PAGE_SIZE - 1;
+ /*
+ * We could have lost folio private when we dropped the lock to read the
+ * folio above, make sure we set_folio_extent_mapped() here so we have any
+ * of the subpage blocksize stuff we need in place.
+ */
+ ret = set_folio_extent_mapped(folio);
+ if (ret < 0)
+ goto release_folio;
+
+ folio_start = folio_pos(folio);
+ folio_end = folio_start + folio_size(folio) - 1;
/*
* Start from the cluster, as for subpage case, the cluster can start
- * inside the page.
+ * inside the folio.
*/
- cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
- while (cur <= page_end) {
+ cur = max(folio_start, cluster->boundary[*cluster_nr] - offset);
+ while (cur <= folio_end) {
+ struct extent_state *cached_state = NULL;
u64 extent_start = cluster->boundary[*cluster_nr] - offset;
u64 extent_end = get_cluster_boundary_end(cluster,
*cluster_nr) - offset;
- u64 clamped_start = max(page_start, extent_start);
- u64 clamped_end = min(page_end, extent_end);
+ u64 clamped_start = max(folio_start, extent_start);
+ u64 clamped_end = min(folio_end, extent_end);
u32 clamped_len = clamped_end + 1 - clamped_start;
/* Reserve metadata for this range */
ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
- clamped_len);
+ clamped_len, clamped_len,
+ false);
if (ret)
- goto release_page;
+ goto release_folio;
/* Mark the range delalloc and dirty for later writeback */
- lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end);
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, clamped_start,
+ clamped_end, &cached_state);
ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
- clamped_end, 0, NULL);
+ clamped_end, 0, &cached_state);
if (ret) {
- clear_extent_bits(&BTRFS_I(inode)->io_tree,
- clamped_start, clamped_end,
- EXTENT_LOCKED | EXTENT_BOUNDARY);
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ clamped_start, clamped_end,
+ EXTENT_LOCKED | EXTENT_BOUNDARY,
+ &cached_state);
btrfs_delalloc_release_metadata(BTRFS_I(inode),
clamped_len, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
clamped_len);
- goto release_page;
+ goto release_folio;
}
- btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len);
+ btrfs_folio_set_dirty(fs_info, folio, clamped_start, clamped_len);
/*
- * Set the boundary if it's inside the page.
+ * Set the boundary if it's inside the folio.
* Data relocation requires the destination extents to have the
* same size as the source.
* EXTENT_BOUNDARY bit prevents current extent from being merged
* with previous extent.
*/
if (in_range(cluster->boundary[*cluster_nr] - offset,
- page_start, PAGE_SIZE)) {
+ folio_start, folio_size(folio))) {
u64 boundary_start = cluster->boundary[*cluster_nr] -
offset;
u64 boundary_end = boundary_start +
fs_info->sectorsize - 1;
- set_extent_bits(&BTRFS_I(inode)->io_tree,
- boundary_start, boundary_end,
- EXTENT_BOUNDARY);
+ btrfs_set_extent_bit(&BTRFS_I(inode)->io_tree,
+ boundary_start, boundary_end,
+ EXTENT_BOUNDARY, NULL);
}
- unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
+ &cached_state);
btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
cur += clamped_len;
@@ -3045,28 +2917,29 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
break;
}
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
balance_dirty_pages_ratelimited(inode->i_mapping);
btrfs_throttle(fs_info);
if (btrfs_should_cancel_balance(fs_info))
ret = -ECANCELED;
+ *file_offset_ret = folio_end + 1;
return ret;
-release_page:
- unlock_page(page);
- put_page(page);
+release_folio:
+ folio_unlock(folio);
+ folio_put(folio);
return ret;
}
-static int relocate_file_extent_cluster(struct inode *inode,
- struct file_extent_cluster *cluster)
+static int relocate_file_extent_cluster(struct reloc_control *rc)
{
- u64 offset = BTRFS_I(inode)->index_cnt;
- unsigned long index;
- unsigned long last_index;
- struct file_ra_state *ra;
+ struct inode *inode = rc->data_inode;
+ const struct file_extent_cluster *cluster = &rc->cluster;
+ u64 offset = BTRFS_I(inode)->reloc_block_group_start;
+ u64 cur_file_offset = cluster->start - offset;
+ struct file_ra_state AUTO_KFREE(ra);
int cluster_nr = 0;
int ret = 0;
@@ -3077,43 +2950,73 @@ static int relocate_file_extent_cluster(struct inode *inode,
if (!ra)
return -ENOMEM;
- ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
+ ret = prealloc_file_extent_cluster(rc);
if (ret)
- goto out;
+ return ret;
file_ra_state_init(ra, inode->i_mapping);
- ret = setup_relocation_extent_mapping(inode, cluster->start - offset,
- cluster->end - offset, cluster->start);
+ ret = setup_relocation_extent_mapping(rc);
if (ret)
- goto out;
+ return ret;
- last_index = (cluster->end - offset) >> PAGE_SHIFT;
- for (index = (cluster->start - offset) >> PAGE_SHIFT;
- index <= last_index && !ret; index++)
- ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
+ while (cur_file_offset < cluster->end - offset) {
+ ret = relocate_one_folio(rc, ra, &cluster_nr, &cur_file_offset);
+ if (ret)
+ break;
+ }
if (ret == 0)
WARN_ON(cluster_nr != cluster->nr);
-out:
- kfree(ra);
return ret;
}
-static noinline_for_stack
-int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
- struct file_extent_cluster *cluster)
+static noinline_for_stack int relocate_data_extent(struct reloc_control *rc,
+ const struct btrfs_key *extent_key)
{
+ struct inode *inode = rc->data_inode;
+ struct file_extent_cluster *cluster = &rc->cluster;
int ret;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
- ret = relocate_file_extent_cluster(inode, cluster);
+ ret = relocate_file_extent_cluster(rc);
if (ret)
return ret;
cluster->nr = 0;
}
- if (!cluster->nr)
+ /*
+ * Under simple quotas, we set root->relocation_src_root when we find
+ * the extent. If adjacent extents have different owners, we can't merge
+ * them while relocating. Handle this by storing the owning root that
+ * started a cluster and if we see an extent from a different root break
+ * cluster formation (just like the above case of non-adjacent extents).
+ *
+ * Without simple quotas, relocation_src_root is always 0, so we should
+ * never see a mismatch, and it should have no effect on relocation
+ * clusters.
+ */
+ if (cluster->nr > 0 && cluster->owning_root != root->relocation_src_root) {
+ u64 tmp = root->relocation_src_root;
+
+ /*
+ * root->relocation_src_root is the state that actually affects
+ * the preallocation we do here, so set it to the root owning
+ * the cluster we need to relocate.
+ */
+ root->relocation_src_root = cluster->owning_root;
+ ret = relocate_file_extent_cluster(rc);
+ if (ret)
+ return ret;
+ cluster->nr = 0;
+ /* And reset it back for the current extent's owning root. */
+ root->relocation_src_root = tmp;
+ }
+
+ if (!cluster->nr) {
cluster->start = extent_key->objectid;
+ cluster->owning_root = root->relocation_src_root;
+ }
else
BUG_ON(cluster->nr >= MAX_EXTENTS);
cluster->end = extent_key->objectid + extent_key->offset - 1;
@@ -3121,7 +3024,7 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
cluster->nr++;
if (cluster->nr >= MAX_EXTENTS) {
- ret = relocate_file_extent_cluster(inode, cluster);
+ ret = relocate_file_extent_cluster(rc);
if (ret)
return ret;
cluster->nr = 0;
@@ -3134,7 +3037,7 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
* the major work is getting the generation and level of the block
*/
static int add_tree_block(struct reloc_control *rc,
- struct btrfs_key *extent_key,
+ const struct btrfs_key *extent_key,
struct btrfs_path *path,
struct rb_root *blocks)
{
@@ -3149,7 +3052,7 @@ static int add_tree_block(struct reloc_control *rc,
u64 owner = 0;
eb = path->nodes[0];
- item_size = btrfs_item_size_nr(eb, path->slots[0]);
+ item_size = btrfs_item_size(eb, path->slots[0]);
if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
item_size >= sizeof(*ei) + sizeof(*bi)) {
@@ -3198,12 +3101,13 @@ static int add_tree_block(struct reloc_control *rc,
if (type == BTRFS_TREE_BLOCK_REF_KEY)
owner = btrfs_extent_inline_ref_offset(eb, iref);
}
- } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
- btrfs_print_v0_err(eb->fs_info);
- btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
- return -EINVAL;
} else {
- BUG();
+ btrfs_print_leaf(eb);
+ btrfs_err(rc->block_group->fs_info,
+ "unrecognized tree backref at tree block %llu slot %u",
+ eb->start, path->slots[0]);
+ btrfs_release_path(path);
+ return -EUCLEAN;
}
btrfs_release_path(path);
@@ -3218,10 +3122,10 @@ static int add_tree_block(struct reloc_control *rc,
block->key.objectid = rc->extent_root->fs_info->nodesize;
block->key.offset = generation;
block->level = level;
- block->key_ready = 0;
+ block->key_ready = false;
block->owner = owner;
- rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
+ rb_node = rb_simple_insert(blocks, &block->simple_node);
if (rb_node)
btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
-EEXIST);
@@ -3237,7 +3141,7 @@ static int __add_tree_block(struct reloc_control *rc,
struct rb_root *blocks)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
int ret;
bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
@@ -3261,11 +3165,11 @@ again:
key.offset = blocksize;
}
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret > 0 && skinny) {
if (path->slots[0]) {
@@ -3292,31 +3196,29 @@ again:
"tree block extent item (%llu) is not found in extent tree",
bytenr);
WARN_ON(1);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- ret = add_tree_block(rc, &key, path, blocks);
-out:
- btrfs_free_path(path);
- return ret;
+ return add_tree_block(rc, &key, path, blocks);
}
-static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group *block_group,
+static int delete_block_group_cache(struct btrfs_block_group *block_group,
struct inode *inode,
u64 ino)
{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
+ struct btrfs_inode *btrfs_inode;
int ret = 0;
if (inode)
goto truncate;
- inode = btrfs_iget(fs_info->sb, ino, root);
- if (IS_ERR(inode))
+ btrfs_inode = btrfs_iget(ino, root);
+ if (IS_ERR(btrfs_inode))
return -ENOENT;
+ inode = &btrfs_inode->vfs_inode;
truncate:
ret = btrfs_check_trunc_cache_free_space(fs_info,
@@ -3376,38 +3278,40 @@ static int delete_v1_space_cache(struct extent_buffer *leaf,
}
if (!found)
return -ENOENT;
- ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
- space_cache_ino);
+ ret = delete_block_group_cache(block_group, NULL, space_cache_ino);
return ret;
}
/*
* helper to find all tree blocks that reference a given data extent
*/
-static noinline_for_stack
-int add_data_references(struct reloc_control *rc,
- struct btrfs_key *extent_key,
- struct btrfs_path *path,
- struct rb_root *blocks)
+static noinline_for_stack int add_data_references(struct reloc_control *rc,
+ const struct btrfs_key *extent_key,
+ struct btrfs_path *path,
+ struct rb_root *blocks)
{
- struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- struct ulist *leaves = NULL;
+ struct btrfs_backref_walk_ctx ctx = { 0 };
struct ulist_iterator leaf_uiter;
struct ulist_node *ref_node = NULL;
- const u32 blocksize = fs_info->nodesize;
+ const u32 blocksize = rc->extent_root->fs_info->nodesize;
int ret = 0;
btrfs_release_path(path);
- ret = btrfs_find_all_leafs(NULL, fs_info, extent_key->objectid,
- 0, &leaves, NULL, true);
+
+ ctx.bytenr = extent_key->objectid;
+ ctx.skip_inode_ref_list = true;
+ ctx.fs_info = rc->extent_root->fs_info;
+
+ ret = btrfs_find_all_leafs(&ctx);
if (ret < 0)
return ret;
ULIST_ITER_INIT(&leaf_uiter);
- while ((ref_node = ulist_next(leaves, &leaf_uiter))) {
+ while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) {
+ struct btrfs_tree_parent_check check = { 0 };
struct extent_buffer *eb;
- eb = read_tree_block(fs_info, ref_node->val, 0, 0, 0, NULL);
+ eb = read_tree_block(ctx.fs_info, ref_node->val, &check);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
break;
@@ -3423,7 +3327,7 @@ int add_data_references(struct reloc_control *rc,
}
if (ret < 0)
free_block_list(blocks);
- ulist_free(leaves);
+ ulist_free(ctx.refs);
return ret;
}
@@ -3442,6 +3346,8 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
last = rc->block_group->start + rc->block_group->length;
while (1) {
+ bool block_found;
+
cond_resched();
if (rc->search_start >= last) {
ret = 1;
@@ -3452,8 +3358,8 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = 0;
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
0, 0);
if (ret < 0)
@@ -3492,11 +3398,11 @@ next:
goto next;
}
- ret = find_first_extent_bit(&rc->processed_blocks,
- key.objectid, &start, &end,
- EXTENT_DIRTY, NULL);
+ block_found = btrfs_find_first_extent_bit(&rc->processed_blocks,
+ key.objectid, &start, &end,
+ EXTENT_DIRTY, NULL);
- if (ret == 0 && start <= key.objectid) {
+ if (block_found && start <= key.objectid) {
btrfs_release_path(path);
rc->search_start = end + 1;
} else {
@@ -3550,13 +3456,13 @@ int prepare_to_relocate(struct reloc_control *rc)
rc->reserved_bytes = 0;
rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
RELOCATION_RESERVED_NODES;
- ret = btrfs_block_rsv_refill(rc->extent_root,
+ ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
rc->block_rsv, rc->block_rsv->size,
BTRFS_RESERVE_FLUSH_ALL);
if (ret)
return ret;
- rc->create_reloc_tree = 1;
+ rc->create_reloc_tree = true;
set_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root);
@@ -3569,7 +3475,12 @@ int prepare_to_relocate(struct reloc_control *rc)
*/
return PTR_ERR(trans);
}
- return btrfs_commit_transaction(trans);
+
+ ret = btrfs_commit_transaction(trans);
+ if (ret)
+ unset_reloc_control(rc);
+
+ return ret;
}
static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
@@ -3578,7 +3489,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
struct rb_root blocks = RB_ROOT;
struct btrfs_key key;
struct btrfs_trans_handle *trans = NULL;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_extent_item *ei;
u64 flags;
int ret;
@@ -3598,9 +3509,9 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
while (1) {
rc->reserved_bytes = 0;
- ret = btrfs_block_rsv_refill(rc->extent_root,
- rc->block_rsv, rc->block_rsv->size,
- BTRFS_RESERVE_FLUSH_ALL);
+ ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
+ rc->block_rsv->size,
+ BTRFS_RESERVE_FLUSH_ALL);
if (ret) {
err = ret;
break;
@@ -3613,11 +3524,9 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
break;
}
restart:
- if (update_backref_cache(trans, &rc->backref_cache)) {
- btrfs_end_transaction(trans);
- trans = NULL;
- continue;
- }
+ if (rc->backref_cache.last_trans != trans->transid)
+ btrfs_backref_release_cache(&rc->backref_cache);
+ rc->backref_cache.last_trans = trans->transid;
ret = find_next_extent(rc, path, &key);
if (ret < 0)
@@ -3631,6 +3540,21 @@ restart:
struct btrfs_extent_item);
flags = btrfs_extent_flags(path->nodes[0], ei);
+ /*
+ * If we are relocating a simple quota owned extent item, we
+ * need to note the owner on the reloc data root so that when
+ * we allocate the replacement item, we can attribute it to the
+ * correct eventual owner (rather than the reloc data root).
+ */
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
+ struct btrfs_root *root = BTRFS_I(rc->data_inode)->root;
+ u64 owning_root_id = btrfs_get_extent_owner_root(fs_info,
+ path->nodes[0],
+ path->slots[0]);
+
+ root->relocation_src_root = owning_root_id;
+ }
+
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
ret = add_tree_block(rc, &key, path, &blocks);
} else if (rc->stage == UPDATE_DATA_PTRS &&
@@ -3663,9 +3587,8 @@ restart:
if (rc->stage == MOVE_DATA_EXTENTS &&
(flags & BTRFS_EXTENT_FLAG_DATA)) {
- rc->found_file_extent = 1;
- ret = relocate_data_extent(rc->data_inode,
- &key, &rc->cluster);
+ rc->found_file_extent = true;
+ ret = relocate_data_extent(rc, &key);
if (ret < 0) {
err = ret;
break;
@@ -3686,7 +3609,7 @@ restart:
}
btrfs_release_path(path);
- clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
+ btrfs_clear_extent_bit(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, NULL);
if (trans) {
btrfs_end_transaction_throttle(trans);
@@ -3694,13 +3617,12 @@ restart:
}
if (!err) {
- ret = relocate_file_extent_cluster(rc->data_inode,
- &rc->cluster);
+ ret = relocate_file_extent_cluster(rc);
if (ret < 0)
err = ret;
}
- rc->create_reloc_tree = 0;
+ rc->create_reloc_tree = false;
set_reloc_control(rc);
btrfs_backref_release_cache(&rc->backref_cache);
@@ -3718,7 +3640,7 @@ restart:
merge_reloc_roots(rc);
- rc->merge_reloc_tree = 0;
+ rc->merge_reloc_tree = false;
unset_reloc_control(rc);
btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
@@ -3736,14 +3658,13 @@ out_free:
if (ret < 0 && !err)
err = ret;
btrfs_free_block_rsv(fs_info, rc->block_rsv);
- btrfs_free_path(path);
return err;
}
static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 objectid)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_inode_item *item;
struct extent_buffer *leaf;
int ret;
@@ -3754,7 +3675,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_inode(trans, root, path, objectid);
if (ret)
- goto out;
+ return ret;
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
@@ -3764,16 +3685,13 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
BTRFS_INODE_PREALLOC);
- btrfs_mark_buffer_dirty(leaf);
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
static void delete_orphan_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 objectid)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
int ret = 0;
@@ -3796,22 +3714,21 @@ static void delete_orphan_inode(struct btrfs_trans_handle *trans,
out:
if (ret)
btrfs_abort_transaction(trans, ret);
- btrfs_free_path(path);
}
/*
* helper to create inode for data relocation.
* the inode is in data relocation tree and its link count is 0
*/
-static noinline_for_stack
-struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group *group)
+static noinline_for_stack struct inode *create_reloc_inode(
+ const struct btrfs_block_group *group)
{
- struct inode *inode = NULL;
+ struct btrfs_fs_info *fs_info = group->fs_info;
+ struct btrfs_inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root;
u64 objectid;
- int err = 0;
+ int ret = 0;
root = btrfs_grab_root(fs_info->data_reloc_root);
trans = btrfs_start_transaction(root, 6);
@@ -3820,70 +3737,58 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
return ERR_CAST(trans);
}
- err = btrfs_get_free_objectid(root, &objectid);
- if (err)
+ ret = btrfs_get_free_objectid(root, &objectid);
+ if (ret)
goto out;
- err = __insert_orphan_inode(trans, root, objectid);
- if (err)
+ ret = __insert_orphan_inode(trans, root, objectid);
+ if (ret)
goto out;
- inode = btrfs_iget(fs_info->sb, objectid, root);
+ inode = btrfs_iget(objectid, root);
if (IS_ERR(inode)) {
delete_orphan_inode(trans, root, objectid);
- err = PTR_ERR(inode);
+ ret = PTR_ERR(inode);
inode = NULL;
goto out;
}
- BTRFS_I(inode)->index_cnt = group->start;
+ inode->reloc_block_group_start = group->start;
- err = btrfs_orphan_add(trans, BTRFS_I(inode));
+ ret = btrfs_orphan_add(trans, inode);
out:
btrfs_put_root(root);
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
- if (err) {
+ if (ret) {
if (inode)
- iput(inode);
- inode = ERR_PTR(err);
+ iput(&inode->vfs_inode);
+ return ERR_PTR(ret);
}
- return inode;
+ return &inode->vfs_inode;
}
/*
* Mark start of chunk relocation that is cancellable. Check if the cancellation
* has been requested meanwhile and don't start in that case.
+ * NOTE: if this returns an error, reloc_chunk_end() must not be called.
*
* Return:
* 0 success
* -EINPROGRESS operation is already in progress, that's probably a bug
* -ECANCELED cancellation request was set before the operation started
- * -EAGAIN can not start because there are ongoing send operations
*/
static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
{
- spin_lock(&fs_info->send_reloc_lock);
- if (fs_info->send_in_progress) {
- btrfs_warn_rl(fs_info,
-"cannot run relocation while send operations are in progress (%d in progress)",
- fs_info->send_in_progress);
- spin_unlock(&fs_info->send_reloc_lock);
- return -EAGAIN;
- }
if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
/* This should not happen */
- spin_unlock(&fs_info->send_reloc_lock);
btrfs_err(fs_info, "reloc already running, cannot start");
return -EINPROGRESS;
}
- spin_unlock(&fs_info->send_reloc_lock);
if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
btrfs_info(fs_info, "chunk relocation canceled on start");
- /*
- * On cancel, clear all requests but let the caller mark
- * the end after cleanup operations.
- */
+ /* On cancel, clear all requests. */
+ clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
atomic_set(&fs_info->reloc_cancel_req, 0);
return -ECANCELED;
}
@@ -3892,15 +3797,15 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
/*
* Mark end of chunk relocation that is cancellable and wake any waiters.
+ * NOTE: call only if a previous call to reloc_chunk_start() succeeded.
*/
static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
{
+ ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags));
/* Requested after start, clear bit first so any waiters can continue */
if (atomic_read(&fs_info->reloc_cancel_req) > 0)
btrfs_info(fs_info, "chunk relocation canceled during operation");
- spin_lock(&fs_info->send_reloc_lock);
clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
- spin_unlock(&fs_info->send_reloc_lock);
atomic_set(&fs_info->reloc_cancel_req, 0);
}
@@ -3914,10 +3819,10 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&rc->reloc_roots);
INIT_LIST_HEAD(&rc->dirty_subvol_roots);
- btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
- mapping_tree_init(&rc->reloc_root_tree);
- extent_io_tree_init(fs_info, &rc->processed_blocks,
- IO_TREE_RELOC_BLOCKS, NULL);
+ btrfs_backref_init_cache(fs_info, &rc->backref_cache, true);
+ rc->reloc_root_tree.rb_root = RB_ROOT;
+ spin_lock_init(&rc->reloc_root_tree.lock);
+ btrfs_extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
return rc;
}
@@ -3936,19 +3841,17 @@ static void free_reloc_control(struct reloc_control *rc)
/*
* Print the block group being relocated
*/
-static void describe_relocation(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group *block_group)
+static void describe_relocation(struct btrfs_block_group *block_group)
{
- char buf[128] = {'\0'};
+ char buf[128] = "NONE";
btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
- btrfs_info(fs_info,
- "relocating block group %llu flags %s",
+ btrfs_info(block_group->fs_info, "relocating block group %llu flags %s",
block_group->start, buf);
}
-static const char *stage_to_string(int stage)
+static const char *stage_to_string(enum reloc_stage stage)
{
if (stage == MOVE_DATA_EXTENTS)
return "move data extents";
@@ -3960,21 +3863,45 @@ static const char *stage_to_string(int stage)
/*
* function to relocate all extents in a block group.
*/
-int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
+int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start,
+ bool verbose)
{
struct btrfs_block_group *bg;
- struct btrfs_root *extent_root = fs_info->extent_root;
+ struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
struct reloc_control *rc;
struct inode *inode;
struct btrfs_path *path;
int ret;
- int rw = 0;
- int err = 0;
+ bool bg_is_ro = false;
+
+ /*
+ * This only gets set if we had a half-deleted snapshot on mount. We
+ * cannot allow relocation to start while we're still trying to clean up
+ * these pending deletions.
+ */
+ ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
+ if (ret)
+ return ret;
+
+ /* We may have been woken up by close_ctree, so bail if we're closing. */
+ if (btrfs_fs_closing(fs_info))
+ return -EINTR;
bg = btrfs_lookup_block_group(fs_info, group_start);
if (!bg)
return -ENOENT;
+ /*
+ * Relocation of a data block group creates ordered extents. Without
+ * sb_start_write(), we can freeze the filesystem while unfinished
+ * ordered extents are left. Such ordered extents can cause a deadlock
+ * e.g. when syncfs() is waiting for their completion but they can't
+ * finish because they block when joining a transaction, due to the
+ * fact that the freeze locks are being held in write mode.
+ */
+ if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
+ ASSERT(sb_write_started(fs_info->sb));
+
if (btrfs_pinned_by_swapfile(fs_info, bg)) {
btrfs_put_block_group(bg);
return -ETXTBSY;
@@ -3987,24 +3914,20 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
}
ret = reloc_chunk_start(fs_info);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out_put_bg;
- }
rc->extent_root = extent_root;
rc->block_group = bg;
ret = btrfs_inc_block_group_ro(rc->block_group, true);
- if (ret) {
- err = ret;
+ if (ret)
goto out;
- }
- rw = 1;
+ bg_is_ro = true;
path = btrfs_alloc_path();
if (!path) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
@@ -4012,41 +3935,36 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
btrfs_free_path(path);
if (!IS_ERR(inode))
- ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
+ ret = delete_block_group_cache(rc->block_group, inode, 0);
else
ret = PTR_ERR(inode);
- if (ret && ret != -ENOENT) {
- err = ret;
+ if (ret && ret != -ENOENT)
goto out;
- }
- rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
+ rc->data_inode = create_reloc_inode(rc->block_group);
if (IS_ERR(rc->data_inode)) {
- err = PTR_ERR(rc->data_inode);
+ ret = PTR_ERR(rc->data_inode);
rc->data_inode = NULL;
goto out;
}
- describe_relocation(fs_info, rc->block_group);
+ if (verbose)
+ describe_relocation(rc->block_group);
btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group);
- btrfs_wait_ordered_roots(fs_info, U64_MAX,
- rc->block_group->start,
- rc->block_group->length);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, rc->block_group);
ret = btrfs_zone_finish(rc->block_group);
WARN_ON(ret && ret != -EAGAIN);
while (1) {
- int finishes_stage;
+ enum reloc_stage finishes_stage;
mutex_lock(&fs_info->cleaner_mutex);
ret = relocate_block_group(rc);
mutex_unlock(&fs_info->cleaner_mutex);
- if (ret < 0)
- err = ret;
finishes_stage = rc->stage;
/*
@@ -4059,37 +3977,41 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
* out of the loop if we hit an error.
*/
if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
- ret = btrfs_wait_ordered_range(rc->data_inode, 0,
- (u64)-1);
- if (ret)
- err = ret;
+ int wb_ret;
+
+ wb_ret = btrfs_wait_ordered_range(BTRFS_I(rc->data_inode), 0,
+ (u64)-1);
+ if (wb_ret && ret == 0)
+ ret = wb_ret;
invalidate_mapping_pages(rc->data_inode->i_mapping,
0, -1);
rc->stage = UPDATE_DATA_PTRS;
}
- if (err < 0)
+ if (ret < 0)
goto out;
if (rc->extents_found == 0)
break;
- btrfs_info(fs_info, "found %llu extents, stage: %s",
- rc->extents_found, stage_to_string(finishes_stage));
+ if (verbose)
+ btrfs_info(fs_info, "found %llu extents, stage: %s",
+ rc->extents_found,
+ stage_to_string(finishes_stage));
}
WARN_ON(rc->block_group->pinned > 0);
WARN_ON(rc->block_group->reserved > 0);
WARN_ON(rc->block_group->used > 0);
out:
- if (err && rw)
+ if (ret && bg_is_ro)
btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode);
+ reloc_chunk_end(fs_info);
out_put_bg:
btrfs_put_block_group(bg);
- reloc_chunk_end(fs_info);
free_reloc_control(rc);
- return err;
+ return ret;
}
static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
@@ -4121,9 +4043,8 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
* this function resumes merging reloc trees with corresponding fs trees.
* this is important for keeping the sharing of tree blocks
*/
-int btrfs_recover_relocation(struct btrfs_root *root)
+int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
LIST_HEAD(reloc_roots);
struct btrfs_key key;
struct btrfs_root *fs_root;
@@ -4132,8 +4053,8 @@ int btrfs_recover_relocation(struct btrfs_root *root)
struct extent_buffer *leaf;
struct reloc_control *rc = NULL;
struct btrfs_trans_handle *trans;
- int ret;
- int err = 0;
+ int ret2;
+ int ret = 0;
path = btrfs_alloc_path();
if (!path)
@@ -4147,15 +4068,14 @@ int btrfs_recover_relocation(struct btrfs_root *root)
while (1) {
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
path, 0, 0);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out;
- }
if (ret > 0) {
if (path->slots[0] == 0)
break;
path->slots[0]--;
}
+ ret = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(path);
@@ -4164,9 +4084,9 @@ int btrfs_recover_relocation(struct btrfs_root *root)
key.type != BTRFS_ROOT_ITEM_KEY)
break;
- reloc_root = btrfs_read_tree_root(root, &key);
+ reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
if (IS_ERR(reloc_root)) {
- err = PTR_ERR(reloc_root);
+ ret = PTR_ERR(reloc_root);
goto out;
}
@@ -4178,15 +4098,12 @@ int btrfs_recover_relocation(struct btrfs_root *root)
reloc_root->root_key.offset, false);
if (IS_ERR(fs_root)) {
ret = PTR_ERR(fs_root);
- if (ret != -ENOENT) {
- err = ret;
+ if (ret != -ENOENT)
goto out;
- }
ret = mark_garbage_root(reloc_root);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out;
- }
+ ret = 0;
} else {
btrfs_put_root(fs_root);
}
@@ -4204,31 +4121,28 @@ int btrfs_recover_relocation(struct btrfs_root *root)
rc = alloc_reloc_control(fs_info);
if (!rc) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
ret = reloc_chunk_start(fs_info);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out_end;
- }
- rc->extent_root = fs_info->extent_root;
+ rc->extent_root = btrfs_extent_root(fs_info, 0);
set_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
goto out_unset;
}
- rc->merge_reloc_tree = 1;
+ rc->merge_reloc_tree = true;
while (!list_empty(&reloc_roots)) {
- reloc_root = list_entry(reloc_roots.next,
- struct btrfs_root, root_list);
+ reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list);
list_del(&reloc_root->root_list);
if (btrfs_root_refs(&reloc_root->root_item) == 0) {
@@ -4240,15 +4154,15 @@ int btrfs_recover_relocation(struct btrfs_root *root)
fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
false);
if (IS_ERR(fs_root)) {
- err = PTR_ERR(fs_root);
+ ret = PTR_ERR(fs_root);
list_add_tail(&reloc_root->root_list, &reloc_roots);
btrfs_end_transaction(trans);
goto out_unset;
}
- err = __add_reloc_root(reloc_root);
- ASSERT(err != -EEXIST);
- if (err) {
+ ret = __add_reloc_root(reloc_root);
+ ASSERT(ret != -EEXIST);
+ if (ret) {
list_add_tail(&reloc_root->root_list, &reloc_roots);
btrfs_put_root(fs_root);
btrfs_end_transaction(trans);
@@ -4258,8 +4172,8 @@ int btrfs_recover_relocation(struct btrfs_root *root)
btrfs_put_root(fs_root);
}
- err = btrfs_commit_transaction(trans);
- if (err)
+ ret = btrfs_commit_transaction(trans);
+ if (ret)
goto out_unset;
merge_reloc_roots(rc);
@@ -4268,32 +4182,32 @@ int btrfs_recover_relocation(struct btrfs_root *root)
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
goto out_clean;
}
- err = btrfs_commit_transaction(trans);
+ ret = btrfs_commit_transaction(trans);
out_clean:
- ret = clean_dirty_subvols(rc);
- if (ret < 0 && !err)
- err = ret;
+ ret2 = clean_dirty_subvols(rc);
+ if (ret2 < 0 && !ret)
+ ret = ret2;
out_unset:
unset_reloc_control(rc);
-out_end:
reloc_chunk_end(fs_info);
+out_end:
free_reloc_control(rc);
out:
free_reloc_roots(&reloc_roots);
btrfs_free_path(path);
- if (err == 0) {
+ if (ret == 0) {
/* cleanup orphan inode in data relocation tree */
fs_root = btrfs_grab_root(fs_info->data_reloc_root);
ASSERT(fs_root);
- err = btrfs_orphan_cleanup(fs_root);
+ ret = btrfs_orphan_cleanup(fs_root);
btrfs_put_root(fs_root);
}
- return err;
+ return ret;
}
/*
@@ -4302,27 +4216,27 @@ out:
* cloning checksum properly handles the nodatasum extents.
* it also saves CPU time to re-calculate the checksum.
*/
-int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len)
+int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
{
+ struct btrfs_inode *inode = ordered->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_ordered_sum *sums;
- struct btrfs_ordered_extent *ordered;
- int ret;
- u64 disk_bytenr;
- u64 new_bytenr;
+ u64 disk_bytenr = ordered->file_offset + inode->reloc_block_group_start;
+ struct btrfs_root *csum_root = btrfs_csum_root(fs_info, disk_bytenr);
LIST_HEAD(list);
+ int ret;
- ordered = btrfs_lookup_ordered_extent(inode, file_pos);
- BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len);
-
- disk_bytenr = file_pos + inode->index_cnt;
- ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
- disk_bytenr + len - 1, &list, 0);
- if (ret)
- goto out;
+ ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
+ disk_bytenr + ordered->num_bytes - 1,
+ &list, false);
+ if (ret < 0) {
+ btrfs_mark_ordered_extent_error(ordered);
+ return ret;
+ }
while (!list_empty(&list)) {
- sums = list_entry(list.next, struct btrfs_ordered_sum, list);
+ struct btrfs_ordered_sum *sums =
+ list_first_entry(&list, struct btrfs_ordered_sum, list);
+
list_del_init(&sums->list);
/*
@@ -4337,18 +4251,16 @@ int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len)
* disk_len vs real len like with real inodes since it's all
* disk length.
*/
- new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr;
- sums->bytenr = new_bytenr;
-
+ sums->logical = ordered->disk_bytenr + sums->logical - disk_bytenr;
btrfs_add_ordered_sum(ordered, sums);
}
-out:
- btrfs_put_ordered_extent(ordered);
- return ret;
+
+ return 0;
}
int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *buf,
+ struct btrfs_root *root,
+ const struct extent_buffer *buf,
struct extent_buffer *cow)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -4369,16 +4281,25 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
btrfs_root_last_snapshot(&root->root_item))
first_cow = 1;
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
- rc->create_reloc_tree) {
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID && rc->create_reloc_tree) {
WARN_ON(!first_cow && level == 0);
node = rc->backref_cache.path[level];
- BUG_ON(node->bytenr != buf->start &&
- node->new_bytenr != buf->start);
+
+ /*
+ * If node->bytenr != buf->start and node->new_bytenr !=
+ * buf->start then we've got the wrong backref node for what we
+ * expected to see here and the cache is incorrect.
+ */
+ if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) {
+ btrfs_err(fs_info,
+"bytenr %llu was found but our backref cache was expecting %llu or %llu",
+ buf->start, node->bytenr, node->new_bytenr);
+ return -EUCLEAN;
+ }
btrfs_backref_drop_node_buffer(node);
- atomic_inc(&cow->refs);
+ refcount_inc(&cow->refs);
node->eb = cow;
node->new_bytenr = cow->start;
@@ -4463,8 +4384,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
}
new_root = pending->snap;
- reloc_root = create_reloc_root(trans, root->reloc_root,
- new_root->root_key.objectid);
+ reloc_root = create_reloc_root(trans, root->reloc_root, btrfs_root_id(new_root));
if (IS_ERR(reloc_root))
return PTR_ERR(reloc_root);
@@ -4476,8 +4396,21 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
return ret;
}
new_root->reloc_root = btrfs_grab_root(reloc_root);
+ return 0;
+}
- if (rc->create_reloc_tree)
- ret = clone_backref_node(trans, rc, root, reloc_root);
- return ret;
+/*
+ * Get the current bytenr for the block group which is being relocated.
+ *
+ * Return U64_MAX if no running relocation.
+ */
+u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info)
+{
+ u64 logical = U64_MAX;
+
+ lockdep_assert_held(&fs_info->reloc_mutex);
+
+ if (fs_info->reloc_ctl && fs_info->reloc_ctl->block_group)
+ logical = fs_info->reloc_ctl->block_group->start;
+ return logical;
}
diff --git a/fs/btrfs/relocation.h b/fs/btrfs/relocation.h
new file mode 100644
index 000000000000..5c36b3f84b57
--- /dev/null
+++ b/fs/btrfs/relocation.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_RELOCATION_H
+#define BTRFS_RELOCATION_H
+
+#include <linux/types.h>
+
+struct extent_buffer;
+struct btrfs_fs_info;
+struct btrfs_root;
+struct btrfs_trans_handle;
+struct btrfs_ordered_extent;
+struct btrfs_pending_snapshot;
+
+int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start,
+ bool verbose);
+int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, struct btrfs_root *root);
+int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
+int btrfs_recover_relocation(struct btrfs_fs_info *fs_info);
+int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered);
+int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ const struct extent_buffer *buf,
+ struct extent_buffer *cow);
+void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
+ u64 *bytes_to_reserve);
+int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
+ struct btrfs_pending_snapshot *pending);
+int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info);
+struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr);
+bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root);
+u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info);
+
+#endif
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 12ceb14a1141..6a7e297ab0a7 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -6,11 +6,15 @@
#include <linux/err.h>
#include <linux/uuid.h>
#include "ctree.h"
+#include "fs.h"
+#include "messages.h"
#include "transaction.h"
#include "disk-io.h"
-#include "print-tree.h"
#include "qgroup.h"
#include "space-info.h"
+#include "accessors.h"
+#include "root-tree.h"
+#include "orphan.h"
/*
* Read a root item from the tree. In case we detect a root item smaller then
@@ -25,7 +29,7 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
u32 len;
int need_reset = 0;
- len = btrfs_item_size_nr(eb, slot);
+ len = btrfs_item_size(eb, slot);
read_extent_buffer(eb, item, btrfs_item_ptr_offset(eb, slot),
min_t(u32, len, sizeof(*item)));
if (len < sizeof(*item))
@@ -46,7 +50,8 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
}
/*
- * btrfs_find_root - lookup the root by the key.
+ * Lookup the root by the key.
+ *
* root: the root of the root tree
* search_key: the key to search
* path: the path we search
@@ -76,7 +81,14 @@ int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key,
if (ret > 0)
goto out;
} else {
- BUG_ON(ret == 0); /* Logical error */
+ /*
+ * Key with offset -1 found, there would have to exist a root
+ * with such id, but this is out of the valid range.
+ */
+ if (unlikely(ret == 0)) {
+ ret = -EUCLEAN;
+ goto out;
+ }
if (path->slots[0] == 0)
goto out;
path->slots[0]--;
@@ -118,7 +130,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
*item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *l;
int ret;
int slot;
@@ -131,22 +143,21 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
if (ret < 0)
- goto out;
+ return ret;
- if (ret > 0) {
+ if (unlikely(ret > 0)) {
btrfs_crit(fs_info,
- "unable to find root key (%llu %u %llu) in tree %llu",
- key->objectid, key->type, key->offset,
- root->root_key.objectid);
+ "unable to find root key " BTRFS_KEY_FMT " in tree %llu",
+ BTRFS_KEY_FMT_VALUE(key), btrfs_root_id(root));
ret = -EUCLEAN;
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
l = path->nodes[0];
slot = path->slots[0];
ptr = btrfs_item_ptr_offset(l, slot);
- old_len = btrfs_item_size_nr(l, slot);
+ old_len = btrfs_item_size(l, slot);
/*
* If this is the first time we update the root item which originated
@@ -157,22 +168,22 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, key, path,
-1, 1);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
ret = btrfs_del_item(trans, root, path);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
btrfs_release_path(path);
ret = btrfs_insert_empty_item(trans, root, path,
key, sizeof(*item));
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
l = path->nodes[0];
slot = path->slots[0];
@@ -186,9 +197,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
write_extent_buffer(l, item, ptr, sizeof(*item));
- btrfs_mark_buffer_dirty(path->nodes[0]);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -206,7 +214,7 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *tree_root = fs_info->tree_root;
struct extent_buffer *leaf;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_root *root;
int err = 0;
@@ -278,13 +286,27 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state));
if (btrfs_root_refs(&root->root_item) == 0) {
+ struct btrfs_key drop_key;
+
+ btrfs_disk_key_to_cpu(&drop_key, &root->root_item.drop_progress);
+ /*
+ * If we have a non-zero drop_progress then we know we
+ * made it partly through deleting this snapshot, and
+ * thus we need to make sure we block any balance from
+ * happening until this snapshot is completely dropped.
+ */
+ if (drop_key.objectid != 0 || drop_key.type != 0 ||
+ drop_key.offset != 0) {
+ set_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
+ set_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state);
+ }
+
set_bit(BTRFS_ROOT_DEAD_TREE, &root->state);
btrfs_add_dead_root(root);
}
btrfs_put_root(root);
}
- btrfs_free_path(path);
return err;
}
@@ -293,7 +315,7 @@ int btrfs_del_root(struct btrfs_trans_handle *trans,
const struct btrfs_key *key)
{
struct btrfs_root *root = trans->fs_info->tree_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
path = btrfs_alloc_path();
@@ -301,28 +323,24 @@ int btrfs_del_root(struct btrfs_trans_handle *trans,
return -ENOMEM;
ret = btrfs_search_slot(trans, root, key, path, -1, 1);
if (ret < 0)
- goto out;
-
- BUG_ON(ret != 0);
+ return ret;
+ if (unlikely(ret > 0))
+ /* The root must exist but we did not find it by the key. */
+ return -EUCLEAN;
- ret = btrfs_del_item(trans, root, path);
-out:
- btrfs_free_path(path);
- return ret;
+ return btrfs_del_item(trans, root, path);
}
int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
- u64 ref_id, u64 dirid, u64 *sequence, const char *name,
- int name_len)
-
+ u64 ref_id, u64 dirid, u64 *sequence,
+ const struct fscrypt_str *name)
{
struct btrfs_root *tree_root = trans->fs_info->tree_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
struct btrfs_key key;
unsigned long ptr;
- int err = 0;
int ret;
path = btrfs_alloc_path();
@@ -334,27 +352,26 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
key.offset = ref_id;
again:
ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
- BUG_ON(ret < 0);
- if (ret == 0) {
+ if (ret < 0) {
+ return ret;
+ } else if (ret == 0) {
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_root_ref);
ptr = (unsigned long)(ref + 1);
if ((btrfs_root_ref_dirid(leaf, ref) != dirid) ||
- (btrfs_root_ref_name_len(leaf, ref) != name_len) ||
- memcmp_extent_buffer(leaf, name, ptr, name_len)) {
- err = -ENOENT;
- goto out;
- }
+ (btrfs_root_ref_name_len(leaf, ref) != name->len) ||
+ memcmp_extent_buffer(leaf, name->name, ptr, name->len))
+ return -ENOENT;
+
*sequence = btrfs_root_ref_sequence(leaf, ref);
ret = btrfs_del_item(trans, tree_root, path);
- if (ret) {
- err = ret;
- goto out;
- }
- } else
- err = -ENOENT;
+ if (ret)
+ return ret;
+ } else {
+ return -ENOENT;
+ }
if (key.type == BTRFS_ROOT_BACKREF_KEY) {
btrfs_release_path(path);
@@ -364,9 +381,7 @@ again:
goto again;
}
-out:
- btrfs_free_path(path);
- return err;
+ return ret;
}
/*
@@ -385,13 +400,13 @@ out:
* Will return 0, -ENOMEM, or anything from the CoW path
*/
int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
- u64 ref_id, u64 dirid, u64 sequence, const char *name,
- int name_len)
+ u64 ref_id, u64 dirid, u64 sequence,
+ const struct fscrypt_str *name)
{
struct btrfs_root *tree_root = trans->fs_info->tree_root;
struct btrfs_key key;
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
unsigned long ptr;
@@ -405,10 +420,9 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
key.offset = ref_id;
again:
ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
- sizeof(*ref) + name_len);
- if (ret) {
+ sizeof(*ref) + name->len);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- btrfs_free_path(path);
return ret;
}
@@ -416,10 +430,9 @@ again:
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
btrfs_set_root_ref_dirid(leaf, ref, dirid);
btrfs_set_root_ref_sequence(leaf, ref, sequence);
- btrfs_set_root_ref_name_len(leaf, ref, name_len);
+ btrfs_set_root_ref_name_len(leaf, ref, name->len);
ptr = (unsigned long)(ref + 1);
- write_extent_buffer(leaf, name, ptr, name_len);
- btrfs_mark_buffer_dirty(leaf);
+ write_extent_buffer(leaf, name->name, ptr, name->len);
if (key.type == BTRFS_ROOT_BACKREF_KEY) {
btrfs_release_path(path);
@@ -429,7 +442,6 @@ again:
goto again;
}
- btrfs_free_path(path);
return 0;
}
@@ -466,7 +478,8 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
}
/*
- * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
+ * Reserve space for subvolume operation.
+ *
* root: the root of the parent directory
* rsv: block reservation
* items: the number of items that we need do reservation
@@ -489,11 +502,12 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
- if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
+ if (btrfs_qgroup_enabled(fs_info)) {
/* One for parent inode, two for dir entries */
qgroup_num_bytes = 3 * fs_info->nodesize;
ret = btrfs_qgroup_reserve_meta_prealloc(root,
- qgroup_num_bytes, true);
+ qgroup_num_bytes, true,
+ false);
if (ret)
return ret;
}
@@ -501,7 +515,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
num_bytes = btrfs_calc_insert_metadata_size(fs_info, items);
rsv->space_info = btrfs_find_space_info(fs_info,
BTRFS_BLOCK_GROUP_METADATA);
- ret = btrfs_block_rsv_add(root, rsv, num_bytes,
+ ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes,
BTRFS_RESERVE_FLUSH_ALL);
if (ret == -ENOSPC && use_global_rsv)
@@ -517,13 +531,3 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
}
return ret;
}
-
-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
- struct btrfs_block_rsv *rsv)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- u64 qgroup_to_release;
-
- btrfs_block_rsv_release(fs_info, rsv, (u64)-1, &qgroup_to_release);
- btrfs_qgroup_convert_reserved_meta(root, qgroup_to_release);
-}
diff --git a/fs/btrfs/root-tree.h b/fs/btrfs/root-tree.h
new file mode 100644
index 000000000000..8f5739e732b9
--- /dev/null
+++ b/fs/btrfs/root-tree.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_ROOT_TREE_H
+#define BTRFS_ROOT_TREE_H
+
+#include <linux/types.h>
+
+struct fscrypt_str;
+struct extent_buffer;
+struct btrfs_key;
+struct btrfs_root;
+struct btrfs_root_item;
+struct btrfs_path;
+struct btrfs_fs_info;
+struct btrfs_block_rsv;
+struct btrfs_trans_handle;
+
+int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv,
+ int nitems, bool use_global_rsv);
+int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ u64 ref_id, u64 dirid, u64 sequence,
+ const struct fscrypt_str *name);
+int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ u64 ref_id, u64 dirid, u64 *sequence,
+ const struct fscrypt_str *name);
+int btrfs_del_root(struct btrfs_trans_handle *trans, const struct btrfs_key *key);
+int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ const struct btrfs_key *key,
+ struct btrfs_root_item *item);
+int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_key *key, struct btrfs_root_item *item);
+int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key,
+ struct btrfs_path *path, struct btrfs_root_item *root_item,
+ struct btrfs_key *root_key);
+int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info);
+void btrfs_set_root_node(struct btrfs_root_item *item,
+ struct extent_buffer *node);
+void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
+void btrfs_update_root_times(struct btrfs_trans_handle *trans, struct btrfs_root *root);
+
+#endif
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index cf82ea6f54fb..a40ee41f42c6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -16,11 +16,14 @@
#include "backref.h"
#include "extent_io.h"
#include "dev-replace.h"
-#include "check-integrity.h"
-#include "rcu-string.h"
#include "raid56.h"
#include "block-group.h"
#include "zoned.h"
+#include "fs.h"
+#include "accessors.h"
+#include "file-item.h"
+#include "scrub.h"
+#include "raid-stripe-tree.h"
/*
* This is only the first step towards a full-features scrub. It reads all
@@ -35,148 +38,179 @@
* - add a mode to also read unallocated space
*/
-struct scrub_block;
struct scrub_ctx;
/*
- * the following three values only influence the performance.
- * The last one configures the number of parallel and outstanding I/O
- * operations. The first two values configure an upper limit for the number
- * of (dynamically allocated) pages that are added to a bio.
+ * The following value only influences the performance.
+ *
+ * This determines how many stripes would be submitted in one go,
+ * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
+ */
+#define SCRUB_STRIPES_PER_GROUP 8
+
+/*
+ * How many groups we have for each sctx.
+ *
+ * This would be 8M per device, the same value as the old scrub in-flight bios
+ * size limit.
*/
-#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
-#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
-#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
+#define SCRUB_GROUPS_PER_SCTX 16
+
+#define SCRUB_TOTAL_STRIPES (SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
/*
- * the following value times PAGE_SIZE needs to be large enough to match the
+ * The following value times PAGE_SIZE needs to be large enough to match the
* largest node/leaf/sector size that shall be supported.
- * Values larger than BTRFS_STRIPE_LEN are not supported.
*/
-#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
+#define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
-struct scrub_recover {
- refcount_t refs;
- struct btrfs_io_context *bioc;
- u64 map_length;
-};
+/* Represent one sector and its needed info to verify the content. */
+struct scrub_sector_verification {
+ union {
+ /*
+ * Csum pointer for data csum verification. Should point to a
+ * sector csum inside scrub_stripe::csums.
+ *
+ * NULL if this data sector has no csum.
+ */
+ u8 *csum;
-struct scrub_page {
- struct scrub_block *sblock;
- struct page *page;
- struct btrfs_device *dev;
- struct list_head list;
- u64 flags; /* extent flags */
- u64 generation;
- u64 logical;
- u64 physical;
- u64 physical_for_dev_replace;
- atomic_t refs;
- u8 mirror_num;
- int have_csum:1;
- int io_error:1;
- u8 csum[BTRFS_CSUM_SIZE];
-
- struct scrub_recover *recover;
+ /*
+ * Extra info for metadata verification. All sectors inside a
+ * tree block share the same generation.
+ */
+ u64 generation;
+ };
};
-struct scrub_bio {
- int index;
- struct scrub_ctx *sctx;
- struct btrfs_device *dev;
- struct bio *bio;
- blk_status_t status;
- u64 logical;
- u64 physical;
-#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
- struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
-#else
- struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
-#endif
- int page_count;
- int next_free;
- struct btrfs_work work;
+enum scrub_stripe_flags {
+ /* Set when @mirror_num, @dev, @physical and @logical are set. */
+ SCRUB_STRIPE_FLAG_INITIALIZED,
+
+ /* Set when the read-repair is finished. */
+ SCRUB_STRIPE_FLAG_REPAIR_DONE,
+
+ /*
+ * Set for data stripes if it's triggered from P/Q stripe.
+ * During such scrub, we should not report errors in data stripes, nor
+ * update the accounting.
+ */
+ SCRUB_STRIPE_FLAG_NO_REPORT,
};
-struct scrub_block {
- struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
- int page_count;
- atomic_t outstanding_pages;
- refcount_t refs; /* free mem on transition to zero */
- struct scrub_ctx *sctx;
- struct scrub_parity *sparity;
- struct {
- unsigned int header_error:1;
- unsigned int checksum_error:1;
- unsigned int no_io_error_seen:1;
- unsigned int generation_error:1; /* also sets header_error */
-
- /* The following is for the data used to check parity */
- /* It is for the data with checksum */
- unsigned int data_corrected:1;
- };
- struct btrfs_work work;
+/*
+ * We have multiple bitmaps for one scrub_stripe.
+ * However each bitmap has at most (BTRFS_STRIPE_LEN / blocksize) bits,
+ * which is normally 16, and much smaller than BITS_PER_LONG (32 or 64).
+ *
+ * So to reduce memory usage for each scrub_stripe, we pack those bitmaps
+ * into a larger one.
+ *
+ * These enum records where the sub-bitmap are inside the larger one.
+ * Each subbitmap starts at scrub_bitmap_nr_##name * nr_sectors bit.
+ */
+enum {
+ /* Which blocks are covered by extent items. */
+ scrub_bitmap_nr_has_extent = 0,
+
+ /* Which blocks are metadata. */
+ scrub_bitmap_nr_is_metadata,
+
+ /*
+ * Which blocks have errors, including IO, csum, and metadata
+ * errors.
+ * This sub-bitmap is the OR results of the next few error related
+ * sub-bitmaps.
+ */
+ scrub_bitmap_nr_error,
+ scrub_bitmap_nr_io_error,
+ scrub_bitmap_nr_csum_error,
+ scrub_bitmap_nr_meta_error,
+ scrub_bitmap_nr_meta_gen_error,
+ scrub_bitmap_nr_last,
};
-/* Used for the chunks with parity stripe such RAID5/6 */
-struct scrub_parity {
- struct scrub_ctx *sctx;
+#define SCRUB_STRIPE_MAX_FOLIOS (BTRFS_STRIPE_LEN / PAGE_SIZE)
- struct btrfs_device *scrub_dev;
+/*
+ * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
+ */
+struct scrub_stripe {
+ struct scrub_ctx *sctx;
+ struct btrfs_block_group *bg;
- u64 logic_start;
+ struct folio *folios[SCRUB_STRIPE_MAX_FOLIOS];
+ struct scrub_sector_verification *sectors;
- u64 logic_end;
+ struct btrfs_device *dev;
+ u64 logical;
+ u64 physical;
- int nsectors;
+ u16 mirror_num;
- u32 stripe_len;
+ /* Should be BTRFS_STRIPE_LEN / sectorsize. */
+ u16 nr_sectors;
- refcount_t refs;
+ /*
+ * How many data/meta extents are in this stripe. Only for scrub status
+ * reporting purposes.
+ */
+ u16 nr_data_extents;
+ u16 nr_meta_extents;
- struct list_head spages;
+ atomic_t pending_io;
+ wait_queue_head_t io_wait;
+ wait_queue_head_t repair_wait;
- /* Work of parity check and repair */
- struct btrfs_work work;
+ /*
+ * Indicate the states of the stripe. Bits are defined in
+ * scrub_stripe_flags enum.
+ */
+ unsigned long state;
- /* Mark the parity blocks which have data */
- unsigned long *dbitmap;
+ /* The large bitmap contains all the sub-bitmaps. */
+ unsigned long bitmaps[BITS_TO_LONGS(scrub_bitmap_nr_last *
+ (BTRFS_STRIPE_LEN / BTRFS_MIN_BLOCKSIZE))];
/*
- * Mark the parity blocks which have data, but errors happen when
- * read data or check data
+ * For writeback (repair or replace) error reporting.
+ * This one is protected by a spinlock, thus can not be packed into
+ * the larger bitmap.
*/
- unsigned long *ebitmap;
+ unsigned long write_error_bitmap;
+
+ /* Writeback can be concurrent, thus we need to protect the bitmap. */
+ spinlock_t write_error_lock;
- unsigned long bitmap[];
+ /*
+ * Checksum for the whole stripe if this stripe is inside a data block
+ * group.
+ */
+ u8 *csums;
+
+ struct work_struct work;
};
struct scrub_ctx {
- struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
+ struct scrub_stripe stripes[SCRUB_TOTAL_STRIPES];
+ struct scrub_stripe *raid56_data_stripes;
struct btrfs_fs_info *fs_info;
+ struct btrfs_path extent_path;
+ struct btrfs_path csum_path;
int first_free;
- int curr;
- atomic_t bios_in_flight;
- atomic_t workers_pending;
- spinlock_t list_lock;
- wait_queue_head_t list_wait;
- struct list_head csum_list;
+ int cur_stripe;
atomic_t cancel_req;
int readonly;
- int pages_per_rd_bio;
/* State of IO submission throttling affecting the associated device */
ktime_t throttle_deadline;
u64 throttle_sent;
- int is_dev_replace;
+ bool is_dev_replace;
u64 write_pointer;
- struct scrub_bio *wr_curr_bio;
struct mutex wr_lock;
- int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
struct btrfs_device *wr_tgtdev;
- bool flush_all_writes;
/*
* statistics
@@ -194,6 +228,90 @@ struct scrub_ctx {
refcount_t refs;
};
+#define scrub_calc_start_bit(stripe, name, block_nr) \
+({ \
+ unsigned int __start_bit; \
+ \
+ ASSERT(block_nr < stripe->nr_sectors, \
+ "nr_sectors=%u block_nr=%u", stripe->nr_sectors, block_nr); \
+ __start_bit = scrub_bitmap_nr_##name * stripe->nr_sectors + block_nr; \
+ __start_bit; \
+})
+
+#define IMPLEMENT_SCRUB_BITMAP_OPS(name) \
+static inline void scrub_bitmap_set_##name(struct scrub_stripe *stripe, \
+ unsigned int block_nr, \
+ unsigned int nr_blocks) \
+{ \
+ const unsigned int start_bit = scrub_calc_start_bit(stripe, \
+ name, block_nr); \
+ \
+ bitmap_set(stripe->bitmaps, start_bit, nr_blocks); \
+} \
+static inline void scrub_bitmap_clear_##name(struct scrub_stripe *stripe, \
+ unsigned int block_nr, \
+ unsigned int nr_blocks) \
+{ \
+ const unsigned int start_bit = scrub_calc_start_bit(stripe, name, \
+ block_nr); \
+ \
+ bitmap_clear(stripe->bitmaps, start_bit, nr_blocks); \
+} \
+static inline bool scrub_bitmap_test_bit_##name(struct scrub_stripe *stripe, \
+ unsigned int block_nr) \
+{ \
+ const unsigned int start_bit = scrub_calc_start_bit(stripe, name, \
+ block_nr); \
+ \
+ return test_bit(start_bit, stripe->bitmaps); \
+} \
+static inline void scrub_bitmap_set_bit_##name(struct scrub_stripe *stripe, \
+ unsigned int block_nr) \
+{ \
+ const unsigned int start_bit = scrub_calc_start_bit(stripe, name, \
+ block_nr); \
+ \
+ set_bit(start_bit, stripe->bitmaps); \
+} \
+static inline void scrub_bitmap_clear_bit_##name(struct scrub_stripe *stripe, \
+ unsigned int block_nr) \
+{ \
+ const unsigned int start_bit = scrub_calc_start_bit(stripe, name, \
+ block_nr); \
+ \
+ clear_bit(start_bit, stripe->bitmaps); \
+} \
+static inline unsigned long scrub_bitmap_read_##name(struct scrub_stripe *stripe) \
+{ \
+ const unsigned int nr_blocks = stripe->nr_sectors; \
+ \
+ ASSERT(nr_blocks > 0 && nr_blocks <= BITS_PER_LONG, \
+ "nr_blocks=%u BITS_PER_LONG=%u", \
+ nr_blocks, BITS_PER_LONG); \
+ \
+ return bitmap_read(stripe->bitmaps, nr_blocks * scrub_bitmap_nr_##name, \
+ stripe->nr_sectors); \
+} \
+static inline bool scrub_bitmap_empty_##name(struct scrub_stripe *stripe) \
+{ \
+ unsigned long bitmap = scrub_bitmap_read_##name(stripe); \
+ \
+ return bitmap_empty(&bitmap, stripe->nr_sectors); \
+} \
+static inline unsigned int scrub_bitmap_weight_##name(struct scrub_stripe *stripe) \
+{ \
+ unsigned long bitmap = scrub_bitmap_read_##name(stripe); \
+ \
+ return bitmap_weight(&bitmap, stripe->nr_sectors); \
+}
+IMPLEMENT_SCRUB_BITMAP_OPS(has_extent);
+IMPLEMENT_SCRUB_BITMAP_OPS(is_metadata);
+IMPLEMENT_SCRUB_BITMAP_OPS(error);
+IMPLEMENT_SCRUB_BITMAP_OPS(io_error);
+IMPLEMENT_SCRUB_BITMAP_OPS(csum_error);
+IMPLEMENT_SCRUB_BITMAP_OPS(meta_error);
+IMPLEMENT_SCRUB_BITMAP_OPS(meta_gen_error);
+
struct scrub_warning {
struct btrfs_path *path;
u64 extent_item_size;
@@ -203,73 +321,82 @@ struct scrub_warning {
struct btrfs_device *dev;
};
-struct full_stripe_lock {
- struct rb_node node;
- u64 logical;
- u64 refs;
- struct mutex mutex;
-};
+struct scrub_error_records {
+ /*
+ * Bitmap recording which blocks hit errors (IO/csum/...) during the
+ * initial read.
+ */
+ unsigned long init_error_bitmap;
-static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
- struct scrub_block *sblocks_for_recheck);
-static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
- struct scrub_block *sblock,
- int retry_failed_mirror);
-static void scrub_recheck_block_checksum(struct scrub_block *sblock);
-static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
- struct scrub_block *sblock_good);
-static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
- struct scrub_block *sblock_good,
- int page_num, int force_write);
-static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
-static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
- int page_num);
-static int scrub_checksum_data(struct scrub_block *sblock);
-static int scrub_checksum_tree_block(struct scrub_block *sblock);
-static int scrub_checksum_super(struct scrub_block *sblock);
-static void scrub_block_put(struct scrub_block *sblock);
-static void scrub_page_get(struct scrub_page *spage);
-static void scrub_page_put(struct scrub_page *spage);
-static void scrub_parity_get(struct scrub_parity *sparity);
-static void scrub_parity_put(struct scrub_parity *sparity);
-static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u32 len,
- u64 physical, struct btrfs_device *dev, u64 flags,
- u64 gen, int mirror_num, u8 *csum,
- u64 physical_for_dev_replace);
-static void scrub_bio_end_io(struct bio *bio);
-static void scrub_bio_end_io_worker(struct btrfs_work *work);
-static void scrub_block_complete(struct scrub_block *sblock);
-static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
- u64 extent_logical, u32 extent_len,
- u64 *extent_physical,
- struct btrfs_device **extent_dev,
- int *extent_mirror_num);
-static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
- struct scrub_page *spage);
-static void scrub_wr_submit(struct scrub_ctx *sctx);
-static void scrub_wr_bio_end_io(struct bio *bio);
-static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
-static void scrub_put_ctx(struct scrub_ctx *sctx);
+ unsigned int nr_io_errors;
+ unsigned int nr_csum_errors;
+ unsigned int nr_meta_errors;
+ unsigned int nr_meta_gen_errors;
+};
-static inline int scrub_is_page_on_raid56(struct scrub_page *spage)
+static void release_scrub_stripe(struct scrub_stripe *stripe)
{
- return spage->recover &&
- (spage->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
+ if (!stripe)
+ return;
+
+ for (int i = 0; i < SCRUB_STRIPE_MAX_FOLIOS; i++) {
+ if (stripe->folios[i])
+ folio_put(stripe->folios[i]);
+ stripe->folios[i] = NULL;
+ }
+ kfree(stripe->sectors);
+ kfree(stripe->csums);
+ stripe->sectors = NULL;
+ stripe->csums = NULL;
+ stripe->sctx = NULL;
+ stripe->state = 0;
}
-static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
+static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
+ struct scrub_stripe *stripe)
{
- refcount_inc(&sctx->refs);
- atomic_inc(&sctx->bios_in_flight);
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
+ int ret;
+
+ memset(stripe, 0, sizeof(*stripe));
+
+ stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
+ stripe->state = 0;
+
+ init_waitqueue_head(&stripe->io_wait);
+ init_waitqueue_head(&stripe->repair_wait);
+ atomic_set(&stripe->pending_io, 0);
+ spin_lock_init(&stripe->write_error_lock);
+
+ ASSERT(BTRFS_STRIPE_LEN >> min_folio_shift <= SCRUB_STRIPE_MAX_FOLIOS);
+ ret = btrfs_alloc_folio_array(BTRFS_STRIPE_LEN >> min_folio_shift,
+ fs_info->block_min_order, stripe->folios);
+ if (ret < 0)
+ goto error;
+
+ stripe->sectors = kcalloc(stripe->nr_sectors,
+ sizeof(struct scrub_sector_verification),
+ GFP_KERNEL);
+ if (!stripe->sectors)
+ goto error;
+
+ stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
+ fs_info->csum_size, GFP_KERNEL);
+ if (!stripe->csums)
+ goto error;
+ return 0;
+error:
+ release_scrub_stripe(stripe);
+ return -ENOMEM;
}
-static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
+static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
{
- atomic_dec(&sctx->bios_in_flight);
- wake_up(&sctx->list_wait);
- scrub_put_ctx(sctx);
+ wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
}
+static void scrub_put_ctx(struct scrub_ctx *sctx);
+
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{
while (atomic_read(&fs_info->scrub_pause_req)) {
@@ -302,234 +429,6 @@ static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
scrub_pause_off(fs_info);
}
-/*
- * Insert new full stripe lock into full stripe locks tree
- *
- * Return pointer to existing or newly inserted full_stripe_lock structure if
- * everything works well.
- * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
- *
- * NOTE: caller must hold full_stripe_locks_root->lock before calling this
- * function
- */
-static struct full_stripe_lock *insert_full_stripe_lock(
- struct btrfs_full_stripe_locks_tree *locks_root,
- u64 fstripe_logical)
-{
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct full_stripe_lock *entry;
- struct full_stripe_lock *ret;
-
- lockdep_assert_held(&locks_root->lock);
-
- p = &locks_root->root.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct full_stripe_lock, node);
- if (fstripe_logical < entry->logical) {
- p = &(*p)->rb_left;
- } else if (fstripe_logical > entry->logical) {
- p = &(*p)->rb_right;
- } else {
- entry->refs++;
- return entry;
- }
- }
-
- /*
- * Insert new lock.
- */
- ret = kmalloc(sizeof(*ret), GFP_KERNEL);
- if (!ret)
- return ERR_PTR(-ENOMEM);
- ret->logical = fstripe_logical;
- ret->refs = 1;
- mutex_init(&ret->mutex);
-
- rb_link_node(&ret->node, parent, p);
- rb_insert_color(&ret->node, &locks_root->root);
- return ret;
-}
-
-/*
- * Search for a full stripe lock of a block group
- *
- * Return pointer to existing full stripe lock if found
- * Return NULL if not found
- */
-static struct full_stripe_lock *search_full_stripe_lock(
- struct btrfs_full_stripe_locks_tree *locks_root,
- u64 fstripe_logical)
-{
- struct rb_node *node;
- struct full_stripe_lock *entry;
-
- lockdep_assert_held(&locks_root->lock);
-
- node = locks_root->root.rb_node;
- while (node) {
- entry = rb_entry(node, struct full_stripe_lock, node);
- if (fstripe_logical < entry->logical)
- node = node->rb_left;
- else if (fstripe_logical > entry->logical)
- node = node->rb_right;
- else
- return entry;
- }
- return NULL;
-}
-
-/*
- * Helper to get full stripe logical from a normal bytenr.
- *
- * Caller must ensure @cache is a RAID56 block group.
- */
-static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
-{
- u64 ret;
-
- /*
- * Due to chunk item size limit, full stripe length should not be
- * larger than U32_MAX. Just a sanity check here.
- */
- WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
-
- /*
- * round_down() can only handle power of 2, while RAID56 full
- * stripe length can be 64KiB * n, so we need to manually round down.
- */
- ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
- cache->full_stripe_len + cache->start;
- return ret;
-}
-
-/*
- * Lock a full stripe to avoid concurrency of recovery and read
- *
- * It's only used for profiles with parities (RAID5/6), for other profiles it
- * does nothing.
- *
- * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
- * So caller must call unlock_full_stripe() at the same context.
- *
- * Return <0 if encounters error.
- */
-static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
- bool *locked_ret)
-{
- struct btrfs_block_group *bg_cache;
- struct btrfs_full_stripe_locks_tree *locks_root;
- struct full_stripe_lock *existing;
- u64 fstripe_start;
- int ret = 0;
-
- *locked_ret = false;
- bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
- if (!bg_cache) {
- ASSERT(0);
- return -ENOENT;
- }
-
- /* Profiles not based on parity don't need full stripe lock */
- if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
- goto out;
- locks_root = &bg_cache->full_stripe_locks_root;
-
- fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
-
- /* Now insert the full stripe lock */
- mutex_lock(&locks_root->lock);
- existing = insert_full_stripe_lock(locks_root, fstripe_start);
- mutex_unlock(&locks_root->lock);
- if (IS_ERR(existing)) {
- ret = PTR_ERR(existing);
- goto out;
- }
- mutex_lock(&existing->mutex);
- *locked_ret = true;
-out:
- btrfs_put_block_group(bg_cache);
- return ret;
-}
-
-/*
- * Unlock a full stripe.
- *
- * NOTE: Caller must ensure it's the same context calling corresponding
- * lock_full_stripe().
- *
- * Return 0 if we unlock full stripe without problem.
- * Return <0 for error
- */
-static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
- bool locked)
-{
- struct btrfs_block_group *bg_cache;
- struct btrfs_full_stripe_locks_tree *locks_root;
- struct full_stripe_lock *fstripe_lock;
- u64 fstripe_start;
- bool freeit = false;
- int ret = 0;
-
- /* If we didn't acquire full stripe lock, no need to continue */
- if (!locked)
- return 0;
-
- bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
- if (!bg_cache) {
- ASSERT(0);
- return -ENOENT;
- }
- if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
- goto out;
-
- locks_root = &bg_cache->full_stripe_locks_root;
- fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
-
- mutex_lock(&locks_root->lock);
- fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
- /* Unpaired unlock_full_stripe() detected */
- if (!fstripe_lock) {
- WARN_ON(1);
- ret = -ENOENT;
- mutex_unlock(&locks_root->lock);
- goto out;
- }
-
- if (fstripe_lock->refs == 0) {
- WARN_ON(1);
- btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
- fstripe_lock->logical);
- } else {
- fstripe_lock->refs--;
- }
-
- if (fstripe_lock->refs == 0) {
- rb_erase(&fstripe_lock->node, &locks_root->root);
- freeit = true;
- }
- mutex_unlock(&locks_root->lock);
-
- mutex_unlock(&fstripe_lock->mutex);
- if (freeit)
- kfree(fstripe_lock);
-out:
- btrfs_put_block_group(bg_cache);
- return ret;
-}
-
-static void scrub_free_csums(struct scrub_ctx *sctx)
-{
- while (!list_empty(&sctx->csum_list)) {
- struct btrfs_ordered_sum *sum;
- sum = list_first_entry(&sctx->csum_list,
- struct btrfs_ordered_sum, list);
- list_del(&sum->list);
- kfree(sum);
- }
-}
-
static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
{
int i;
@@ -537,28 +436,10 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
if (!sctx)
return;
- /* this can happen when scrub is cancelled */
- if (sctx->curr != -1) {
- struct scrub_bio *sbio = sctx->bios[sctx->curr];
-
- for (i = 0; i < sbio->page_count; i++) {
- WARN_ON(!sbio->pagev[i]->page);
- scrub_block_put(sbio->pagev[i]->sblock);
- }
- bio_put(sbio->bio);
- }
-
- for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
- struct scrub_bio *sbio = sctx->bios[i];
+ for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
+ release_scrub_stripe(&sctx->stripes[i]);
- if (!sbio)
- break;
- kfree(sbio);
- }
-
- kfree(sctx->wr_curr_bio);
- scrub_free_csums(sctx);
- kfree(sctx);
+ kvfree(sctx);
}
static void scrub_put_ctx(struct scrub_ctx *sctx)
@@ -568,57 +449,42 @@ static void scrub_put_ctx(struct scrub_ctx *sctx)
}
static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
- struct btrfs_fs_info *fs_info, int is_dev_replace)
+ struct btrfs_fs_info *fs_info, bool is_dev_replace)
{
struct scrub_ctx *sctx;
int i;
- sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
+ /* Since sctx has inline 128 stripes, it can go beyond 64K easily. Use
+ * kvzalloc().
+ */
+ sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
if (!sctx)
goto nomem;
refcount_set(&sctx->refs, 1);
sctx->is_dev_replace = is_dev_replace;
- sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
- sctx->curr = -1;
sctx->fs_info = fs_info;
- INIT_LIST_HEAD(&sctx->csum_list);
- for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
- struct scrub_bio *sbio;
+ sctx->extent_path.search_commit_root = true;
+ sctx->extent_path.skip_locking = true;
+ sctx->csum_path.search_commit_root = true;
+ sctx->csum_path.skip_locking = true;
+ for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
+ int ret;
- sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
- if (!sbio)
+ ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
+ if (ret < 0)
goto nomem;
- sctx->bios[i] = sbio;
-
- sbio->index = i;
- sbio->sctx = sctx;
- sbio->page_count = 0;
- btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
- NULL);
-
- if (i != SCRUB_BIOS_PER_SCTX - 1)
- sctx->bios[i]->next_free = i + 1;
- else
- sctx->bios[i]->next_free = -1;
+ sctx->stripes[i].sctx = sctx;
}
sctx->first_free = 0;
- atomic_set(&sctx->bios_in_flight, 0);
- atomic_set(&sctx->workers_pending, 0);
atomic_set(&sctx->cancel_req, 0);
- spin_lock_init(&sctx->list_lock);
spin_lock_init(&sctx->stat_lock);
- init_waitqueue_head(&sctx->list_wait);
sctx->throttle_deadline = 0;
- WARN_ON(sctx->wr_curr_bio != NULL);
mutex_init(&sctx->wr_lock);
- sctx->wr_curr_bio = NULL;
if (is_dev_replace) {
WARN_ON(!fs_info->dev_replace.tgtdev);
- sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
- sctx->flush_all_writes = false;
}
return sctx;
@@ -628,8 +494,8 @@ nomem:
return ERR_PTR(-ENOMEM);
}
-static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
- void *warn_ctx)
+static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
+ u64 root, void *warn_ctx)
{
u32 nlink;
int ret;
@@ -639,7 +505,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
struct btrfs_inode_item *inode_item;
struct scrub_warning *swarn = warn_ctx;
struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
- struct inode_fs_paths *ipath = NULL;
+ struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL;
struct btrfs_root *local_root;
struct btrfs_key key;
@@ -693,1322 +559,834 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
* hold all of the paths here
*/
for (i = 0; i < ipath->fspath->elem_cnt; ++i)
- btrfs_warn_in_rcu(fs_info,
-"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
+ btrfs_warn(fs_info,
+"scrub: %s at logical %llu on dev %s, physical %llu root %llu inode %llu offset %llu length %u links %u (path: %s)",
swarn->errstr, swarn->logical,
- rcu_str_deref(swarn->dev->name),
+ btrfs_dev_name(swarn->dev),
swarn->physical,
root, inum, offset,
fs_info->sectorsize, nlink,
(char *)(unsigned long)ipath->fspath->val[i]);
btrfs_put_root(local_root);
- free_ipath(ipath);
return 0;
err:
- btrfs_warn_in_rcu(fs_info,
- "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
+ btrfs_warn(fs_info,
+ "scrub: %s at logical %llu on dev %s, physical %llu root %llu inode %llu offset %llu: path resolving failed with ret=%d",
swarn->errstr, swarn->logical,
- rcu_str_deref(swarn->dev->name),
+ btrfs_dev_name(swarn->dev),
swarn->physical,
root, inum, offset, ret);
- free_ipath(ipath);
return 0;
}
-static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
+static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
+ bool is_super, u64 logical, u64 physical)
{
- struct btrfs_device *dev;
- struct btrfs_fs_info *fs_info;
- struct btrfs_path *path;
+ struct btrfs_fs_info *fs_info = dev->fs_info;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key found_key;
struct extent_buffer *eb;
struct btrfs_extent_item *ei;
struct scrub_warning swarn;
- unsigned long ptr = 0;
- u64 extent_item_pos;
u64 flags = 0;
- u64 ref_root;
u32 item_size;
- u8 ref_level = 0;
int ret;
- WARN_ON(sblock->page_count < 1);
- dev = sblock->pagev[0]->dev;
- fs_info = sblock->sctx->fs_info;
-
+ /* Super block error, no need to search extent tree. */
+ if (is_super) {
+ btrfs_warn(fs_info, "scrub: %s on device %s, physical %llu",
+ errstr, btrfs_dev_name(dev), physical);
+ return;
+ }
path = btrfs_alloc_path();
if (!path)
return;
- swarn.physical = sblock->pagev[0]->physical;
- swarn.logical = sblock->pagev[0]->logical;
+ swarn.physical = physical;
+ swarn.logical = logical;
swarn.errstr = errstr;
swarn.dev = NULL;
ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
&flags);
if (ret < 0)
- goto out;
+ return;
- extent_item_pos = swarn.logical - found_key.objectid;
swarn.extent_item_size = found_key.offset;
eb = path->nodes[0];
ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
- item_size = btrfs_item_size_nr(eb, path->slots[0]);
+ item_size = btrfs_item_size(eb, path->slots[0]);
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
- do {
+ unsigned long ptr = 0;
+ u8 ref_level;
+ u64 ref_root;
+
+ while (true) {
ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
item_size, &ref_root,
&ref_level);
- btrfs_warn_in_rcu(fs_info,
-"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
- errstr, swarn.logical,
- rcu_str_deref(dev->name),
- swarn.physical,
- ref_level ? "node" : "leaf",
- ret < 0 ? -1 : ref_level,
- ret < 0 ? -1 : ref_root);
- } while (ret != 1);
+ if (ret < 0) {
+ btrfs_warn(fs_info,
+ "scrub: failed to resolve tree backref for logical %llu: %d",
+ swarn.logical, ret);
+ break;
+ }
+ if (ret > 0)
+ break;
+ btrfs_warn(fs_info,
+"scrub: %s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
+ errstr, swarn.logical, btrfs_dev_name(dev),
+ swarn.physical, (ref_level ? "node" : "leaf"),
+ ref_level, ref_root);
+ }
btrfs_release_path(path);
} else {
+ struct btrfs_backref_walk_ctx ctx = { 0 };
+
btrfs_release_path(path);
- swarn.path = path;
- swarn.dev = dev;
- iterate_extent_inodes(fs_info, found_key.objectid,
- extent_item_pos, 1,
- scrub_print_warning_inode, &swarn, false);
- }
-out:
- btrfs_free_path(path);
-}
+ ctx.bytenr = found_key.objectid;
+ ctx.extent_item_pos = swarn.logical - found_key.objectid;
+ ctx.fs_info = fs_info;
-static inline void scrub_get_recover(struct scrub_recover *recover)
-{
- refcount_inc(&recover->refs);
-}
+ swarn.path = path;
+ swarn.dev = dev;
-static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
- struct scrub_recover *recover)
-{
- if (refcount_dec_and_test(&recover->refs)) {
- btrfs_bio_counter_dec(fs_info);
- btrfs_put_bioc(recover->bioc);
- kfree(recover);
+ iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
}
}
-/*
- * scrub_handle_errored_block gets called when either verification of the
- * pages failed or the bio failed to read, e.g. with EIO. In the latter
- * case, this function handles all pages in the bio, even though only one
- * may be bad.
- * The goal of this function is to repair the errored block by using the
- * contents of one of the mirrors.
- */
-static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
+static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
{
- struct scrub_ctx *sctx = sblock_to_check->sctx;
- struct btrfs_device *dev;
- struct btrfs_fs_info *fs_info;
- u64 logical;
- unsigned int failed_mirror_index;
- unsigned int is_metadata;
- unsigned int have_csum;
- struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
- struct scrub_block *sblock_bad;
- int ret;
- int mirror_index;
- int page_num;
- int success;
- bool full_stripe_locked;
- unsigned int nofs_flag;
- static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
+ int ret = 0;
+ u64 length;
- BUG_ON(sblock_to_check->page_count < 1);
- fs_info = sctx->fs_info;
- if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
- /*
- * if we find an error in a super block, we just report it.
- * They will get written with the next transaction commit
- * anyway
- */
- spin_lock(&sctx->stat_lock);
- ++sctx->stat.super_errors;
- spin_unlock(&sctx->stat_lock);
+ if (!btrfs_is_zoned(sctx->fs_info))
return 0;
- }
- logical = sblock_to_check->pagev[0]->logical;
- BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
- failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
- is_metadata = !(sblock_to_check->pagev[0]->flags &
- BTRFS_EXTENT_FLAG_DATA);
- have_csum = sblock_to_check->pagev[0]->have_csum;
- dev = sblock_to_check->pagev[0]->dev;
-
- if (btrfs_is_zoned(fs_info) && !sctx->is_dev_replace)
- return btrfs_repair_one_zone(fs_info, logical);
- /*
- * We must use GFP_NOFS because the scrub task might be waiting for a
- * worker task executing this function and in turn a transaction commit
- * might be waiting the scrub task to pause (which needs to wait for all
- * the worker tasks to complete before pausing).
- * We do allocations in the workers through insert_full_stripe_lock()
- * and scrub_add_page_to_wr_bio(), which happens down the call chain of
- * this function.
- */
- nofs_flag = memalloc_nofs_save();
- /*
- * For RAID5/6, race can happen for a different device scrub thread.
- * For data corruption, Parity and Data threads will both try
- * to recovery the data.
- * Race can lead to doubly added csum error, or even unrecoverable
- * error.
- */
- ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
- if (ret < 0) {
- memalloc_nofs_restore(nofs_flag);
- spin_lock(&sctx->stat_lock);
- if (ret == -ENOMEM)
- sctx->stat.malloc_errors++;
- sctx->stat.read_errors++;
- sctx->stat.uncorrectable_errors++;
- spin_unlock(&sctx->stat_lock);
- return ret;
- }
-
- /*
- * read all mirrors one after the other. This includes to
- * re-read the extent or metadata block that failed (that was
- * the cause that this fixup code is called) another time,
- * sector by sector this time in order to know which sectors
- * caused I/O errors and which ones are good (for all mirrors).
- * It is the goal to handle the situation when more than one
- * mirror contains I/O errors, but the errors do not
- * overlap, i.e. the data can be repaired by selecting the
- * sectors from those mirrors without I/O error on the
- * particular sectors. One example (with blocks >= 2 * sectorsize)
- * would be that mirror #1 has an I/O error on the first sector,
- * the second sector is good, and mirror #2 has an I/O error on
- * the second sector, but the first sector is good.
- * Then the first sector of the first mirror can be repaired by
- * taking the first sector of the second mirror, and the
- * second sector of the second mirror can be repaired by
- * copying the contents of the 2nd sector of the 1st mirror.
- * One more note: if the sectors of one mirror contain I/O
- * errors, the checksum cannot be verified. In order to get
- * the best data for repairing, the first attempt is to find
- * a mirror without I/O errors and with a validated checksum.
- * Only if this is not possible, the sectors are picked from
- * mirrors with I/O errors without considering the checksum.
- * If the latter is the case, at the end, the checksum of the
- * repaired area is verified in order to correctly maintain
- * the statistics.
- */
-
- sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
- sizeof(*sblocks_for_recheck), GFP_KERNEL);
- if (!sblocks_for_recheck) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- sctx->stat.read_errors++;
- sctx->stat.uncorrectable_errors++;
- spin_unlock(&sctx->stat_lock);
- btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
- goto out;
- }
-
- /* setup the context, map the logical blocks and alloc the pages */
- ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
- if (ret) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.read_errors++;
- sctx->stat.uncorrectable_errors++;
- spin_unlock(&sctx->stat_lock);
- btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
- goto out;
- }
- BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
- sblock_bad = sblocks_for_recheck + failed_mirror_index;
-
- /* build and submit the bios for the failed mirror, check checksums */
- scrub_recheck_block(fs_info, sblock_bad, 1);
-
- if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
- sblock_bad->no_io_error_seen) {
- /*
- * the error disappeared after reading page by page, or
- * the area was part of a huge bio and other parts of the
- * bio caused I/O errors, or the block layer merged several
- * read requests into one and the error is caused by a
- * different bio (usually one of the two latter cases is
- * the cause)
- */
- spin_lock(&sctx->stat_lock);
- sctx->stat.unverified_errors++;
- sblock_to_check->data_corrected = 1;
- spin_unlock(&sctx->stat_lock);
-
- if (sctx->is_dev_replace)
- scrub_write_block_to_dev_replace(sblock_bad);
- goto out;
- }
+ if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
+ return 0;
- if (!sblock_bad->no_io_error_seen) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.read_errors++;
- spin_unlock(&sctx->stat_lock);
- if (__ratelimit(&rs))
- scrub_print_warning("i/o error", sblock_to_check);
- btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
- } else if (sblock_bad->checksum_error) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.csum_errors++;
- spin_unlock(&sctx->stat_lock);
- if (__ratelimit(&rs))
- scrub_print_warning("checksum error", sblock_to_check);
- btrfs_dev_stat_inc_and_print(dev,
- BTRFS_DEV_STAT_CORRUPTION_ERRS);
- } else if (sblock_bad->header_error) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.verify_errors++;
- spin_unlock(&sctx->stat_lock);
- if (__ratelimit(&rs))
- scrub_print_warning("checksum/header error",
- sblock_to_check);
- if (sblock_bad->generation_error)
- btrfs_dev_stat_inc_and_print(dev,
- BTRFS_DEV_STAT_GENERATION_ERRS);
- else
- btrfs_dev_stat_inc_and_print(dev,
- BTRFS_DEV_STAT_CORRUPTION_ERRS);
- }
+ if (sctx->write_pointer < physical) {
+ length = physical - sctx->write_pointer;
- if (sctx->readonly) {
- ASSERT(!sctx->is_dev_replace);
- goto out;
+ ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
+ sctx->write_pointer, length);
+ if (!ret)
+ sctx->write_pointer = physical;
}
+ return ret;
+}
- /*
- * now build and submit the bios for the other mirrors, check
- * checksums.
- * First try to pick the mirror which is completely without I/O
- * errors and also does not have a checksum error.
- * If one is found, and if a checksum is present, the full block
- * that is known to contain an error is rewritten. Afterwards
- * the block is known to be corrected.
- * If a mirror is found which is completely correct, and no
- * checksum is present, only those pages are rewritten that had
- * an I/O error in the block to be repaired, since it cannot be
- * determined, which copy of the other pages is better (and it
- * could happen otherwise that a correct page would be
- * overwritten by a bad one).
- */
- for (mirror_index = 0; ;mirror_index++) {
- struct scrub_block *sblock_other;
-
- if (mirror_index == failed_mirror_index)
- continue;
-
- /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
- if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
- if (mirror_index >= BTRFS_MAX_MIRRORS)
- break;
- if (!sblocks_for_recheck[mirror_index].page_count)
- break;
-
- sblock_other = sblocks_for_recheck + mirror_index;
- } else {
- struct scrub_recover *r = sblock_bad->pagev[0]->recover;
- int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs;
-
- if (mirror_index >= max_allowed)
- break;
- if (!sblocks_for_recheck[1].page_count)
- break;
+static void *scrub_stripe_get_kaddr(struct scrub_stripe *stripe, int sector_nr)
+{
+ struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
+ u32 offset = (sector_nr << fs_info->sectorsize_bits);
+ const struct folio *folio = stripe->folios[offset >> min_folio_shift];
- ASSERT(failed_mirror_index == 0);
- sblock_other = sblocks_for_recheck + 1;
- sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
- }
+ /* stripe->folios[] is allocated by us and no highmem is allowed. */
+ ASSERT(folio);
+ ASSERT(!folio_test_highmem(folio));
+ return folio_address(folio) + offset_in_folio(folio, offset);
+}
- /* build and submit the bios, check checksums */
- scrub_recheck_block(fs_info, sblock_other, 0);
+static phys_addr_t scrub_stripe_get_paddr(struct scrub_stripe *stripe, int sector_nr)
+{
+ struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
+ u32 offset = (sector_nr << fs_info->sectorsize_bits);
+ const struct folio *folio = stripe->folios[offset >> min_folio_shift];
- if (!sblock_other->header_error &&
- !sblock_other->checksum_error &&
- sblock_other->no_io_error_seen) {
- if (sctx->is_dev_replace) {
- scrub_write_block_to_dev_replace(sblock_other);
- goto corrected_error;
- } else {
- ret = scrub_repair_block_from_good_copy(
- sblock_bad, sblock_other);
- if (!ret)
- goto corrected_error;
- }
- }
- }
+ /* stripe->folios[] is allocated by us and no highmem is allowed. */
+ ASSERT(folio);
+ ASSERT(!folio_test_highmem(folio));
+ /* And the range must be contained inside the folio. */
+ ASSERT(offset_in_folio(folio, offset) + fs_info->sectorsize <= folio_size(folio));
+ return page_to_phys(folio_page(folio, 0)) + offset_in_folio(folio, offset);
+}
- if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
- goto did_not_correct_error;
+static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
+{
+ struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
+ const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
+ void *first_kaddr = scrub_stripe_get_kaddr(stripe, sector_nr);
+ struct btrfs_header *header = first_kaddr;
+ SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
+ u8 on_disk_csum[BTRFS_CSUM_SIZE];
+ u8 calculated_csum[BTRFS_CSUM_SIZE];
/*
- * In case of I/O errors in the area that is supposed to be
- * repaired, continue by picking good copies of those sectors.
- * Select the good sectors from mirrors to rewrite bad sectors from
- * the area to fix. Afterwards verify the checksum of the block
- * that is supposed to be repaired. This verification step is
- * only done for the purpose of statistic counting and for the
- * final scrub report, whether errors remain.
- * A perfect algorithm could make use of the checksum and try
- * all possible combinations of sectors from the different mirrors
- * until the checksum verification succeeds. For example, when
- * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector
- * of mirror #2 is readable but the final checksum test fails,
- * then the 2nd sector of mirror #3 could be tried, whether now
- * the final checksum succeeds. But this would be a rare
- * exception and is therefore not implemented. At least it is
- * avoided that the good copy is overwritten.
- * A more useful improvement would be to pick the sectors
- * without I/O error based on sector sizes (512 bytes on legacy
- * disks) instead of on sectorsize. Then maybe 512 byte of one
- * mirror could be repaired by taking 512 byte of a different
- * mirror, even if other 512 byte sectors in the same sectorsize
- * area are unreadable.
+ * Here we don't have a good way to attach the pages (and subpages)
+ * to a dummy extent buffer, thus we have to directly grab the members
+ * from pages.
*/
- success = 1;
- for (page_num = 0; page_num < sblock_bad->page_count;
- page_num++) {
- struct scrub_page *spage_bad = sblock_bad->pagev[page_num];
- struct scrub_block *sblock_other = NULL;
-
- /* skip no-io-error page in scrub */
- if (!spage_bad->io_error && !sctx->is_dev_replace)
- continue;
-
- if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
- /*
- * In case of dev replace, if raid56 rebuild process
- * didn't work out correct data, then copy the content
- * in sblock_bad to make sure target device is identical
- * to source device, instead of writing garbage data in
- * sblock_for_recheck array to target device.
- */
- sblock_other = NULL;
- } else if (spage_bad->io_error) {
- /* try to find no-io-error page in mirrors */
- for (mirror_index = 0;
- mirror_index < BTRFS_MAX_MIRRORS &&
- sblocks_for_recheck[mirror_index].page_count > 0;
- mirror_index++) {
- if (!sblocks_for_recheck[mirror_index].
- pagev[page_num]->io_error) {
- sblock_other = sblocks_for_recheck +
- mirror_index;
- break;
- }
- }
- if (!sblock_other)
- success = 0;
- }
-
- if (sctx->is_dev_replace) {
- /*
- * did not find a mirror to fetch the page
- * from. scrub_write_page_to_dev_replace()
- * handles this case (page->io_error), by
- * filling the block with zeros before
- * submitting the write request
- */
- if (!sblock_other)
- sblock_other = sblock_bad;
-
- if (scrub_write_page_to_dev_replace(sblock_other,
- page_num) != 0) {
- atomic64_inc(
- &fs_info->dev_replace.num_write_errors);
- success = 0;
- }
- } else if (sblock_other) {
- ret = scrub_repair_page_from_good_copy(sblock_bad,
- sblock_other,
- page_num, 0);
- if (0 == ret)
- spage_bad->io_error = 0;
- else
- success = 0;
- }
+ memcpy(on_disk_csum, header->csum, fs_info->csum_size);
+
+ if (logical != btrfs_stack_header_bytenr(header)) {
+ scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
+ btrfs_warn_rl(fs_info,
+ "scrub: tree block %llu mirror %u has bad bytenr, has %llu want %llu",
+ logical, stripe->mirror_num,
+ btrfs_stack_header_bytenr(header), logical);
+ return;
}
-
- if (success && !sctx->is_dev_replace) {
- if (is_metadata || have_csum) {
- /*
- * need to verify the checksum now that all
- * sectors on disk are repaired (the write
- * request for data to be repaired is on its way).
- * Just be lazy and use scrub_recheck_block()
- * which re-reads the data before the checksum
- * is verified, but most likely the data comes out
- * of the page cache.
- */
- scrub_recheck_block(fs_info, sblock_bad, 1);
- if (!sblock_bad->header_error &&
- !sblock_bad->checksum_error &&
- sblock_bad->no_io_error_seen)
- goto corrected_error;
- else
- goto did_not_correct_error;
- } else {
-corrected_error:
- spin_lock(&sctx->stat_lock);
- sctx->stat.corrected_errors++;
- sblock_to_check->data_corrected = 1;
- spin_unlock(&sctx->stat_lock);
- btrfs_err_rl_in_rcu(fs_info,
- "fixed up error at logical %llu on dev %s",
- logical, rcu_str_deref(dev->name));
- }
- } else {
-did_not_correct_error:
- spin_lock(&sctx->stat_lock);
- sctx->stat.uncorrectable_errors++;
- spin_unlock(&sctx->stat_lock);
- btrfs_err_rl_in_rcu(fs_info,
- "unable to fixup (regular) error at logical %llu on dev %s",
- logical, rcu_str_deref(dev->name));
+ if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
+ BTRFS_FSID_SIZE) != 0) {
+ scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
+ btrfs_warn_rl(fs_info,
+ "scrub: tree block %llu mirror %u has bad fsid, has %pU want %pU",
+ logical, stripe->mirror_num,
+ header->fsid, fs_info->fs_devices->fsid);
+ return;
}
-
-out:
- if (sblocks_for_recheck) {
- for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
- mirror_index++) {
- struct scrub_block *sblock = sblocks_for_recheck +
- mirror_index;
- struct scrub_recover *recover;
- int page_index;
-
- for (page_index = 0; page_index < sblock->page_count;
- page_index++) {
- sblock->pagev[page_index]->sblock = NULL;
- recover = sblock->pagev[page_index]->recover;
- if (recover) {
- scrub_put_recover(fs_info, recover);
- sblock->pagev[page_index]->recover =
- NULL;
- }
- scrub_page_put(sblock->pagev[page_index]);
- }
- }
- kfree(sblocks_for_recheck);
+ if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
+ BTRFS_UUID_SIZE) != 0) {
+ scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
+ btrfs_warn_rl(fs_info,
+ "scrub: tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
+ logical, stripe->mirror_num,
+ header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
+ return;
}
- ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
- memalloc_nofs_restore(nofs_flag);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc)
-{
- if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
- return 2;
- else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
- return 3;
- else
- return (int)bioc->num_stripes;
-}
-
-static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
- u64 *raid_map,
- u64 mapped_length,
- int nstripes, int mirror,
- int *stripe_index,
- u64 *stripe_offset)
-{
- int i;
-
- if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- /* RAID5/6 */
- for (i = 0; i < nstripes; i++) {
- if (raid_map[i] == RAID6_Q_STRIPE ||
- raid_map[i] == RAID5_P_STRIPE)
- continue;
+ /* Now check tree block csum. */
+ shash->tfm = fs_info->csum_shash;
+ crypto_shash_init(shash);
+ crypto_shash_update(shash, first_kaddr + BTRFS_CSUM_SIZE,
+ fs_info->sectorsize - BTRFS_CSUM_SIZE);
- if (logical >= raid_map[i] &&
- logical < raid_map[i] + mapped_length)
- break;
- }
+ for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
+ crypto_shash_update(shash, scrub_stripe_get_kaddr(stripe, i),
+ fs_info->sectorsize);
+ }
- *stripe_index = i;
- *stripe_offset = logical - raid_map[i];
- } else {
- /* The other RAID type */
- *stripe_index = mirror;
- *stripe_offset = 0;
+ crypto_shash_final(shash, calculated_csum);
+ if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
+ scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
+ btrfs_warn_rl(fs_info,
+"scrub: tree block %llu mirror %u has bad csum, has " BTRFS_CSUM_FMT " want " BTRFS_CSUM_FMT,
+ logical, stripe->mirror_num,
+ BTRFS_CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
+ BTRFS_CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
+ return;
}
+ if (stripe->sectors[sector_nr].generation !=
+ btrfs_stack_header_generation(header)) {
+ scrub_bitmap_set_meta_gen_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
+ btrfs_warn_rl(fs_info,
+ "scrub: tree block %llu mirror %u has bad generation, has %llu want %llu",
+ logical, stripe->mirror_num,
+ btrfs_stack_header_generation(header),
+ stripe->sectors[sector_nr].generation);
+ return;
+ }
+ scrub_bitmap_clear_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_clear_csum_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_clear_meta_error(stripe, sector_nr, sectors_per_tree);
+ scrub_bitmap_clear_meta_gen_error(stripe, sector_nr, sectors_per_tree);
}
-static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
- struct scrub_block *sblocks_for_recheck)
+static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
{
- struct scrub_ctx *sctx = original_sblock->sctx;
- struct btrfs_fs_info *fs_info = sctx->fs_info;
- u64 length = original_sblock->page_count * fs_info->sectorsize;
- u64 logical = original_sblock->pagev[0]->logical;
- u64 generation = original_sblock->pagev[0]->generation;
- u64 flags = original_sblock->pagev[0]->flags;
- u64 have_csum = original_sblock->pagev[0]->have_csum;
- struct scrub_recover *recover;
- struct btrfs_io_context *bioc;
- u64 sublen;
- u64 mapped_length;
- u64 stripe_offset;
- int stripe_index;
- int page_index = 0;
- int mirror_index;
- int nmirrors;
+ struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
+ const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
+ phys_addr_t paddr = scrub_stripe_get_paddr(stripe, sector_nr);
+ u8 csum_buf[BTRFS_CSUM_SIZE];
int ret;
- /*
- * note: the two members refs and outstanding_pages
- * are not used (and not set) in the blocks that are used for
- * the recheck procedure
- */
+ ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
+
+ /* Sector not utilized, skip it. */
+ if (!scrub_bitmap_test_bit_has_extent(stripe, sector_nr))
+ return;
- while (length > 0) {
- sublen = min_t(u64, length, fs_info->sectorsize);
- mapped_length = sublen;
- bioc = NULL;
+ /* IO error, no need to check. */
+ if (scrub_bitmap_test_bit_io_error(stripe, sector_nr))
+ return;
+ /* Metadata, verify the full tree block. */
+ if (scrub_bitmap_test_bit_is_metadata(stripe, sector_nr)) {
/*
- * With a length of sectorsize, each returned stripe represents
- * one mirror
+ * Check if the tree block crosses the stripe boundary. If
+ * crossed the boundary, we cannot verify it but only give a
+ * warning.
+ *
+ * This can only happen on a very old filesystem where chunks
+ * are not ensured to be stripe aligned.
*/
- btrfs_bio_counter_inc_blocked(fs_info);
- ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
- logical, &mapped_length, &bioc);
- if (ret || !bioc || mapped_length < sublen) {
- btrfs_put_bioc(bioc);
- btrfs_bio_counter_dec(fs_info);
- return -EIO;
- }
-
- recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
- if (!recover) {
- btrfs_put_bioc(bioc);
- btrfs_bio_counter_dec(fs_info);
- return -ENOMEM;
- }
-
- refcount_set(&recover->refs, 1);
- recover->bioc = bioc;
- recover->map_length = mapped_length;
-
- BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
-
- nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS);
-
- for (mirror_index = 0; mirror_index < nmirrors;
- mirror_index++) {
- struct scrub_block *sblock;
- struct scrub_page *spage;
-
- sblock = sblocks_for_recheck + mirror_index;
- sblock->sctx = sctx;
-
- spage = kzalloc(sizeof(*spage), GFP_NOFS);
- if (!spage) {
-leave_nomem:
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- spin_unlock(&sctx->stat_lock);
- scrub_put_recover(fs_info, recover);
- return -ENOMEM;
- }
- scrub_page_get(spage);
- sblock->pagev[page_index] = spage;
- spage->sblock = sblock;
- spage->flags = flags;
- spage->generation = generation;
- spage->logical = logical;
- spage->have_csum = have_csum;
- if (have_csum)
- memcpy(spage->csum,
- original_sblock->pagev[0]->csum,
- sctx->fs_info->csum_size);
-
- scrub_stripe_index_and_offset(logical,
- bioc->map_type,
- bioc->raid_map,
- mapped_length,
- bioc->num_stripes -
- bioc->num_tgtdevs,
- mirror_index,
- &stripe_index,
- &stripe_offset);
- spage->physical = bioc->stripes[stripe_index].physical +
- stripe_offset;
- spage->dev = bioc->stripes[stripe_index].dev;
-
- BUG_ON(page_index >= original_sblock->page_count);
- spage->physical_for_dev_replace =
- original_sblock->pagev[page_index]->
- physical_for_dev_replace;
- /* for missing devices, dev->bdev is NULL */
- spage->mirror_num = mirror_index + 1;
- sblock->page_count++;
- spage->page = alloc_page(GFP_NOFS);
- if (!spage->page)
- goto leave_nomem;
-
- scrub_get_recover(recover);
- spage->recover = recover;
+ if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
+ btrfs_warn_rl(fs_info,
+ "scrub: tree block at %llu crosses stripe boundary %llu",
+ stripe->logical +
+ (sector_nr << fs_info->sectorsize_bits),
+ stripe->logical);
+ return;
}
- scrub_put_recover(fs_info, recover);
- length -= sublen;
- logical += sublen;
- page_index++;
+ scrub_verify_one_metadata(stripe, sector_nr);
+ return;
}
- return 0;
-}
+ /*
+ * Data is easier, we just verify the data csum (if we have it). For
+ * cases without csum, we have no other choice but to trust it.
+ */
+ if (!sector->csum) {
+ scrub_bitmap_clear_bit_error(stripe, sector_nr);
+ return;
+ }
-static void scrub_bio_wait_endio(struct bio *bio)
-{
- complete(bio->bi_private);
+ ret = btrfs_check_block_csum(fs_info, paddr, csum_buf, sector->csum);
+ if (ret < 0) {
+ scrub_bitmap_set_bit_csum_error(stripe, sector_nr);
+ scrub_bitmap_set_bit_error(stripe, sector_nr);
+ } else {
+ scrub_bitmap_clear_bit_csum_error(stripe, sector_nr);
+ scrub_bitmap_clear_bit_error(stripe, sector_nr);
+ }
}
-static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
- struct bio *bio,
- struct scrub_page *spage)
+/* Verify specified sectors of a stripe. */
+static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
{
- DECLARE_COMPLETION_ONSTACK(done);
- int ret;
- int mirror_num;
+ struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
+ int sector_nr;
- bio->bi_iter.bi_sector = spage->logical >> 9;
- bio->bi_private = &done;
- bio->bi_end_io = scrub_bio_wait_endio;
-
- mirror_num = spage->sblock->pagev[0]->mirror_num;
- ret = raid56_parity_recover(bio, spage->recover->bioc,
- spage->recover->map_length,
- mirror_num, 0);
- if (ret)
- return ret;
-
- wait_for_completion_io(&done);
- return blk_status_to_errno(bio->bi_status);
+ for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
+ scrub_verify_one_sector(stripe, sector_nr);
+ if (scrub_bitmap_test_bit_is_metadata(stripe, sector_nr))
+ sector_nr += sectors_per_tree - 1;
+ }
}
-static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
- struct scrub_block *sblock)
+static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
{
- struct scrub_page *first_page = sblock->pagev[0];
- struct bio *bio;
- int page_num;
-
- /* All pages in sblock belong to the same stripe on the same device. */
- ASSERT(first_page->dev);
- if (!first_page->dev->bdev)
- goto out;
-
- bio = btrfs_bio_alloc(BIO_MAX_VECS);
- bio_set_dev(bio, first_page->dev->bdev);
-
- for (page_num = 0; page_num < sblock->page_count; page_num++) {
- struct scrub_page *spage = sblock->pagev[page_num];
-
- WARN_ON(!spage->page);
- bio_add_page(bio, spage->page, PAGE_SIZE, 0);
- }
+ int i;
- if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
- bio_put(bio);
- goto out;
+ for (i = 0; i < stripe->nr_sectors; i++) {
+ if (scrub_stripe_get_kaddr(stripe, i) == bvec_virt(first_bvec))
+ break;
}
-
- bio_put(bio);
-
- scrub_recheck_block_checksum(sblock);
-
- return;
-out:
- for (page_num = 0; page_num < sblock->page_count; page_num++)
- sblock->pagev[page_num]->io_error = 1;
-
- sblock->no_io_error_seen = 0;
+ ASSERT(i < stripe->nr_sectors);
+ return i;
}
/*
- * this function will check the on disk data for checksum errors, header
- * errors and read I/O errors. If any I/O errors happen, the exact pages
- * which are errored are marked as being bad. The goal is to enable scrub
- * to take those pages that are not errored from all the mirrors so that
- * the pages that are errored in the just handled mirror can be repaired.
+ * Repair read is different to the regular read:
+ *
+ * - Only reads the failed sectors
+ * - May have extra blocksize limits
*/
-static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
- struct scrub_block *sblock,
- int retry_failed_mirror)
+static void scrub_repair_read_endio(struct btrfs_bio *bbio)
{
- int page_num;
-
- sblock->no_io_error_seen = 1;
-
- /* short cut for raid56 */
- if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0]))
- return scrub_recheck_block_on_raid56(fs_info, sblock);
-
- for (page_num = 0; page_num < sblock->page_count; page_num++) {
- struct bio *bio;
- struct scrub_page *spage = sblock->pagev[page_num];
-
- if (spage->dev->bdev == NULL) {
- spage->io_error = 1;
- sblock->no_io_error_seen = 0;
- continue;
- }
-
- WARN_ON(!spage->page);
- bio = btrfs_bio_alloc(1);
- bio_set_dev(bio, spage->dev->bdev);
+ struct scrub_stripe *stripe = bbio->private;
+ struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ struct bio_vec *bvec;
+ int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
+ u32 bio_size = 0;
+ int i;
- bio_add_page(bio, spage->page, fs_info->sectorsize, 0);
- bio->bi_iter.bi_sector = spage->physical >> 9;
- bio->bi_opf = REQ_OP_READ;
+ ASSERT(sector_nr < stripe->nr_sectors);
- if (btrfsic_submit_bio_wait(bio)) {
- spage->io_error = 1;
- sblock->no_io_error_seen = 0;
- }
+ bio_for_each_bvec_all(bvec, &bbio->bio, i)
+ bio_size += bvec->bv_len;
- bio_put(bio);
+ if (bbio->bio.bi_status) {
+ scrub_bitmap_set_io_error(stripe, sector_nr,
+ bio_size >> fs_info->sectorsize_bits);
+ scrub_bitmap_set_error(stripe, sector_nr,
+ bio_size >> fs_info->sectorsize_bits);
+ } else {
+ scrub_bitmap_clear_io_error(stripe, sector_nr,
+ bio_size >> fs_info->sectorsize_bits);
}
-
- if (sblock->no_io_error_seen)
- scrub_recheck_block_checksum(sblock);
+ bio_put(&bbio->bio);
+ if (atomic_dec_and_test(&stripe->pending_io))
+ wake_up(&stripe->io_wait);
}
-static inline int scrub_check_fsid(u8 fsid[],
- struct scrub_page *spage)
+static int calc_next_mirror(int mirror, int num_copies)
{
- struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
- int ret;
-
- ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
- return !ret;
+ ASSERT(mirror <= num_copies);
+ return (mirror + 1 > num_copies) ? 1 : mirror + 1;
}
-static void scrub_recheck_block_checksum(struct scrub_block *sblock)
+static void scrub_bio_add_sector(struct btrfs_bio *bbio, struct scrub_stripe *stripe,
+ int sector_nr)
{
- sblock->header_error = 0;
- sblock->checksum_error = 0;
- sblock->generation_error = 0;
+ struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+ void *kaddr = scrub_stripe_get_kaddr(stripe, sector_nr);
+ int ret;
- if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
- scrub_checksum_data(sblock);
- else
- scrub_checksum_tree_block(sblock);
+ ret = bio_add_page(&bbio->bio, virt_to_page(kaddr), fs_info->sectorsize,
+ offset_in_page(kaddr));
+ /*
+ * Caller should ensure the bbio has enough size.
+ * And we cannot use __bio_add_page(), which doesn't do any merge.
+ *
+ * Meanwhile for scrub_submit_initial_read() we fully rely on the merge
+ * to create the minimal amount of bio vectors, for fs block size < page
+ * size cases.
+ */
+ ASSERT(ret == fs_info->sectorsize);
}
-static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
- struct scrub_block *sblock_good)
+static struct btrfs_bio *alloc_scrub_bbio(struct btrfs_fs_info *fs_info,
+ unsigned int nr_vecs, blk_opf_t opf,
+ u64 logical,
+ btrfs_bio_end_io_t end_io, void *private)
{
- int page_num;
- int ret = 0;
-
- for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
- int ret_sub;
-
- ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
- sblock_good,
- page_num, 1);
- if (ret_sub)
- ret = ret_sub;
- }
+ struct btrfs_bio *bbio;
- return ret;
+ bbio = btrfs_bio_alloc(nr_vecs, opf, BTRFS_I(fs_info->btree_inode),
+ logical, end_io, private);
+ bbio->is_scrub = true;
+ bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
+ return bbio;
}
-static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
- struct scrub_block *sblock_good,
- int page_num, int force_write)
+static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
+ int mirror, int blocksize, bool wait)
{
- struct scrub_page *spage_bad = sblock_bad->pagev[page_num];
- struct scrub_page *spage_good = sblock_good->pagev[page_num];
- struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
- const u32 sectorsize = fs_info->sectorsize;
-
- BUG_ON(spage_bad->page == NULL);
- BUG_ON(spage_good->page == NULL);
- if (force_write || sblock_bad->header_error ||
- sblock_bad->checksum_error || spage_bad->io_error) {
- struct bio *bio;
- int ret;
+ struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ struct btrfs_bio *bbio = NULL;
+ const unsigned long old_error_bitmap = scrub_bitmap_read_error(stripe);
+ int i;
- if (!spage_bad->dev->bdev) {
- btrfs_warn_rl(fs_info,
- "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
- return -EIO;
+ ASSERT(stripe->mirror_num >= 1, "stripe->mirror_num=%d", stripe->mirror_num);
+ ASSERT(atomic_read(&stripe->pending_io) == 0,
+ "atomic_read(&stripe->pending_io)=%d", atomic_read(&stripe->pending_io));
+
+ for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
+ /* The current sector cannot be merged, submit the bio. */
+ if (bbio && ((i > 0 && !test_bit(i - 1, &old_error_bitmap)) ||
+ bbio->bio.bi_iter.bi_size >= blocksize)) {
+ ASSERT(bbio->bio.bi_iter.bi_size);
+ atomic_inc(&stripe->pending_io);
+ btrfs_submit_bbio(bbio, mirror);
+ if (wait)
+ wait_scrub_stripe_io(stripe);
+ bbio = NULL;
}
- bio = btrfs_bio_alloc(1);
- bio_set_dev(bio, spage_bad->dev->bdev);
- bio->bi_iter.bi_sector = spage_bad->physical >> 9;
- bio->bi_opf = REQ_OP_WRITE;
-
- ret = bio_add_page(bio, spage_good->page, sectorsize, 0);
- if (ret != sectorsize) {
- bio_put(bio);
- return -EIO;
- }
+ if (!bbio)
+ bbio = alloc_scrub_bbio(fs_info, stripe->nr_sectors, REQ_OP_READ,
+ stripe->logical + (i << fs_info->sectorsize_bits),
+ scrub_repair_read_endio, stripe);
- if (btrfsic_submit_bio_wait(bio)) {
- btrfs_dev_stat_inc_and_print(spage_bad->dev,
- BTRFS_DEV_STAT_WRITE_ERRS);
- atomic64_inc(&fs_info->dev_replace.num_write_errors);
- bio_put(bio);
- return -EIO;
- }
- bio_put(bio);
+ scrub_bio_add_sector(bbio, stripe, i);
+ }
+ if (bbio) {
+ ASSERT(bbio->bio.bi_iter.bi_size);
+ atomic_inc(&stripe->pending_io);
+ btrfs_submit_bbio(bbio, mirror);
+ if (wait)
+ wait_scrub_stripe_io(stripe);
}
-
- return 0;
}
-static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
+static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
+ struct scrub_stripe *stripe,
+ const struct scrub_error_records *errors)
{
- struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
- int page_num;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct btrfs_device *dev = NULL;
+ const unsigned long extent_bitmap = scrub_bitmap_read_has_extent(stripe);
+ const unsigned long error_bitmap = scrub_bitmap_read_error(stripe);
+ u64 physical = 0;
+ int nr_data_sectors = 0;
+ int nr_meta_sectors = 0;
+ int nr_nodatacsum_sectors = 0;
+ int nr_repaired_sectors = 0;
+ int sector_nr;
+
+ if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
+ return;
/*
- * This block is used for the check of the parity on the source device,
- * so the data needn't be written into the destination device.
+ * Init needed infos for error reporting.
+ *
+ * Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio()
+ * thus no need for dev/physical, error reporting still needs dev and physical.
*/
- if (sblock->sparity)
- return;
-
- for (page_num = 0; page_num < sblock->page_count; page_num++) {
+ if (!bitmap_empty(&errors->init_error_bitmap, stripe->nr_sectors)) {
+ u64 mapped_len = fs_info->sectorsize;
+ struct btrfs_io_context *bioc = NULL;
+ int stripe_index = stripe->mirror_num - 1;
int ret;
- ret = scrub_write_page_to_dev_replace(sblock, page_num);
- if (ret)
- atomic64_inc(&fs_info->dev_replace.num_write_errors);
- }
-}
-
-static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
- int page_num)
-{
- struct scrub_page *spage = sblock->pagev[page_num];
-
- BUG_ON(spage->page == NULL);
- if (spage->io_error)
- clear_page(page_address(spage->page));
-
- return scrub_add_page_to_wr_bio(sblock->sctx, spage);
-}
-
-static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
-{
- int ret = 0;
- u64 length;
-
- if (!btrfs_is_zoned(sctx->fs_info))
- return 0;
-
- if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
- return 0;
-
- if (sctx->write_pointer < physical) {
- length = physical - sctx->write_pointer;
-
- ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
- sctx->write_pointer, length);
- if (!ret)
- sctx->write_pointer = physical;
+ /* For scrub, our mirror_num should always start at 1. */
+ ASSERT(stripe->mirror_num >= 1, "stripe->mirror_num=%d", stripe->mirror_num);
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
+ stripe->logical, &mapped_len, &bioc,
+ NULL, NULL);
+ /*
+ * If we failed, dev will be NULL, and later detailed reports
+ * will just be skipped.
+ */
+ if (ret < 0)
+ goto skip;
+ physical = bioc->stripes[stripe_index].physical;
+ dev = bioc->stripes[stripe_index].dev;
+ btrfs_put_bioc(bioc);
}
- return ret;
-}
-
-static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
- struct scrub_page *spage)
-{
- struct scrub_bio *sbio;
- int ret;
- const u32 sectorsize = sctx->fs_info->sectorsize;
- mutex_lock(&sctx->wr_lock);
-again:
- if (!sctx->wr_curr_bio) {
- sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
- GFP_KERNEL);
- if (!sctx->wr_curr_bio) {
- mutex_unlock(&sctx->wr_lock);
- return -ENOMEM;
- }
- sctx->wr_curr_bio->sctx = sctx;
- sctx->wr_curr_bio->page_count = 0;
- }
- sbio = sctx->wr_curr_bio;
- if (sbio->page_count == 0) {
- struct bio *bio;
+skip:
+ for_each_set_bit(sector_nr, &extent_bitmap, stripe->nr_sectors) {
+ bool repaired = false;
- ret = fill_writer_pointer_gap(sctx,
- spage->physical_for_dev_replace);
- if (ret) {
- mutex_unlock(&sctx->wr_lock);
- return ret;
+ if (scrub_bitmap_test_bit_is_metadata(stripe, sector_nr)) {
+ nr_meta_sectors++;
+ } else {
+ nr_data_sectors++;
+ if (!stripe->sectors[sector_nr].csum)
+ nr_nodatacsum_sectors++;
}
- sbio->physical = spage->physical_for_dev_replace;
- sbio->logical = spage->logical;
- sbio->dev = sctx->wr_tgtdev;
- bio = sbio->bio;
- if (!bio) {
- bio = btrfs_bio_alloc(sctx->pages_per_wr_bio);
- sbio->bio = bio;
+ if (test_bit(sector_nr, &errors->init_error_bitmap) &&
+ !test_bit(sector_nr, &error_bitmap)) {
+ nr_repaired_sectors++;
+ repaired = true;
}
- bio->bi_private = sbio;
- bio->bi_end_io = scrub_wr_bio_end_io;
- bio_set_dev(bio, sbio->dev->bdev);
- bio->bi_iter.bi_sector = sbio->physical >> 9;
- bio->bi_opf = REQ_OP_WRITE;
- sbio->status = 0;
- } else if (sbio->physical + sbio->page_count * sectorsize !=
- spage->physical_for_dev_replace ||
- sbio->logical + sbio->page_count * sectorsize !=
- spage->logical) {
- scrub_wr_submit(sctx);
- goto again;
- }
+ /* Good sector from the beginning, nothing need to be done. */
+ if (!test_bit(sector_nr, &errors->init_error_bitmap))
+ continue;
- ret = bio_add_page(sbio->bio, spage->page, sectorsize, 0);
- if (ret != sectorsize) {
- if (sbio->page_count < 1) {
- bio_put(sbio->bio);
- sbio->bio = NULL;
- mutex_unlock(&sctx->wr_lock);
- return -EIO;
+ /*
+ * Report error for the corrupted sectors. If repaired, just
+ * output the message of repaired message.
+ */
+ if (repaired) {
+ if (dev) {
+ btrfs_err_rl(fs_info,
+ "scrub: fixed up error at logical %llu on dev %s physical %llu",
+ stripe->logical, btrfs_dev_name(dev),
+ physical);
+ } else {
+ btrfs_err_rl(fs_info,
+ "scrub: fixed up error at logical %llu on mirror %u",
+ stripe->logical, stripe->mirror_num);
+ }
+ continue;
}
- scrub_wr_submit(sctx);
- goto again;
- }
-
- sbio->pagev[sbio->page_count] = spage;
- scrub_page_get(spage);
- sbio->page_count++;
- if (sbio->page_count == sctx->pages_per_wr_bio)
- scrub_wr_submit(sctx);
- mutex_unlock(&sctx->wr_lock);
-
- return 0;
-}
-static void scrub_wr_submit(struct scrub_ctx *sctx)
-{
- struct scrub_bio *sbio;
+ /* The remaining are all for unrepaired. */
+ if (dev) {
+ btrfs_err_rl(fs_info,
+"scrub: unable to fixup (regular) error at logical %llu on dev %s physical %llu",
+ stripe->logical, btrfs_dev_name(dev),
+ physical);
+ } else {
+ btrfs_err_rl(fs_info,
+ "scrub: unable to fixup (regular) error at logical %llu on mirror %u",
+ stripe->logical, stripe->mirror_num);
+ }
- if (!sctx->wr_curr_bio)
- return;
+ if (scrub_bitmap_test_bit_io_error(stripe, sector_nr))
+ if (__ratelimit(&rs) && dev)
+ scrub_print_common_warning("i/o error", dev, false,
+ stripe->logical, physical);
+ if (scrub_bitmap_test_bit_csum_error(stripe, sector_nr))
+ if (__ratelimit(&rs) && dev)
+ scrub_print_common_warning("checksum error", dev, false,
+ stripe->logical, physical);
+ if (scrub_bitmap_test_bit_meta_error(stripe, sector_nr))
+ if (__ratelimit(&rs) && dev)
+ scrub_print_common_warning("header error", dev, false,
+ stripe->logical, physical);
+ if (scrub_bitmap_test_bit_meta_gen_error(stripe, sector_nr))
+ if (__ratelimit(&rs) && dev)
+ scrub_print_common_warning("generation error", dev, false,
+ stripe->logical, physical);
+ }
+
+ /* Update the device stats. */
+ for (int i = 0; i < errors->nr_io_errors; i++)
+ btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_READ_ERRS);
+ for (int i = 0; i < errors->nr_csum_errors; i++)
+ btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
+ /* Generation mismatch error is based on each metadata, not each block. */
+ for (int i = 0; i < errors->nr_meta_gen_errors;
+ i += (fs_info->nodesize >> fs_info->sectorsize_bits))
+ btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_GENERATION_ERRS);
- sbio = sctx->wr_curr_bio;
- sctx->wr_curr_bio = NULL;
- WARN_ON(!sbio->bio->bi_bdev);
- scrub_pending_bio_inc(sctx);
- /* process all writes in a single worker thread. Then the block layer
- * orders the requests before sending them to the driver which
- * doubled the write performance on spinning disks when measured
- * with Linux 3.5 */
- btrfsic_submit_bio(sbio->bio);
-
- if (btrfs_is_zoned(sctx->fs_info))
- sctx->write_pointer = sbio->physical + sbio->page_count *
- sctx->fs_info->sectorsize;
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
+ sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
+ sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
+ sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
+ sctx->stat.no_csum += nr_nodatacsum_sectors;
+ sctx->stat.read_errors += errors->nr_io_errors;
+ sctx->stat.csum_errors += errors->nr_csum_errors;
+ sctx->stat.verify_errors += errors->nr_meta_errors +
+ errors->nr_meta_gen_errors;
+ sctx->stat.uncorrectable_errors +=
+ bitmap_weight(&error_bitmap, stripe->nr_sectors);
+ sctx->stat.corrected_errors += nr_repaired_sectors;
+ spin_unlock(&sctx->stat_lock);
}
-static void scrub_wr_bio_end_io(struct bio *bio)
-{
- struct scrub_bio *sbio = bio->bi_private;
- struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
-
- sbio->status = bio->bi_status;
- sbio->bio = bio;
-
- btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
- btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
-}
+static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
+ unsigned long write_bitmap, bool dev_replace);
-static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
+/*
+ * The main entrance for all read related scrub work, including:
+ *
+ * - Wait for the initial read to finish
+ * - Verify and locate any bad sectors
+ * - Go through the remaining mirrors and try to read as large blocksize as
+ * possible
+ * - Go through all mirrors (including the failed mirror) sector-by-sector
+ * - Submit writeback for repaired sectors
+ *
+ * Writeback for dev-replace does not happen here, it needs extra
+ * synchronization for zoned devices.
+ */
+static void scrub_stripe_read_repair_worker(struct work_struct *work)
{
- struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
- struct scrub_ctx *sctx = sbio->sctx;
+ struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
+ struct scrub_ctx *sctx = stripe->sctx;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct scrub_error_records errors = { 0 };
+ int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
+ stripe->bg->length);
+ unsigned long repaired;
+ unsigned long error;
+ int mirror;
int i;
- WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
- if (sbio->status) {
- struct btrfs_dev_replace *dev_replace =
- &sbio->sctx->fs_info->dev_replace;
+ ASSERT(stripe->mirror_num >= 1, "stripe->mirror_num=%d", stripe->mirror_num);
- for (i = 0; i < sbio->page_count; i++) {
- struct scrub_page *spage = sbio->pagev[i];
+ wait_scrub_stripe_io(stripe);
+ scrub_verify_one_stripe(stripe, scrub_bitmap_read_has_extent(stripe));
+ /* Save the initial failed bitmap for later repair and report usage. */
+ errors.init_error_bitmap = scrub_bitmap_read_error(stripe);
+ errors.nr_io_errors = scrub_bitmap_weight_io_error(stripe);
+ errors.nr_csum_errors = scrub_bitmap_weight_csum_error(stripe);
+ errors.nr_meta_errors = scrub_bitmap_weight_meta_error(stripe);
+ errors.nr_meta_gen_errors = scrub_bitmap_weight_meta_gen_error(stripe);
- spage->io_error = 1;
- atomic64_inc(&dev_replace->num_write_errors);
- }
- }
-
- for (i = 0; i < sbio->page_count; i++)
- scrub_page_put(sbio->pagev[i]);
-
- bio_put(sbio->bio);
- kfree(sbio);
- scrub_pending_bio_dec(sctx);
-}
-
-static int scrub_checksum(struct scrub_block *sblock)
-{
- u64 flags;
- int ret;
+ if (bitmap_empty(&errors.init_error_bitmap, stripe->nr_sectors))
+ goto out;
/*
- * No need to initialize these stats currently,
- * because this function only use return value
- * instead of these stats value.
+ * Try all remaining mirrors.
*
- * Todo:
- * always use stats
+ * Here we still try to read as large block as possible, as this is
+ * faster and we have extra safety nets to rely on.
*/
- sblock->header_error = 0;
- sblock->generation_error = 0;
- sblock->checksum_error = 0;
-
- WARN_ON(sblock->page_count < 1);
- flags = sblock->pagev[0]->flags;
- ret = 0;
- if (flags & BTRFS_EXTENT_FLAG_DATA)
- ret = scrub_checksum_data(sblock);
- else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
- ret = scrub_checksum_tree_block(sblock);
- else if (flags & BTRFS_EXTENT_FLAG_SUPER)
- (void)scrub_checksum_super(sblock);
- else
- WARN_ON(1);
- if (ret)
- scrub_handle_errored_block(sblock);
-
- return ret;
-}
-
-static int scrub_checksum_data(struct scrub_block *sblock)
-{
- struct scrub_ctx *sctx = sblock->sctx;
- struct btrfs_fs_info *fs_info = sctx->fs_info;
- SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
- u8 csum[BTRFS_CSUM_SIZE];
- struct scrub_page *spage;
- char *kaddr;
-
- BUG_ON(sblock->page_count < 1);
- spage = sblock->pagev[0];
- if (!spage->have_csum)
- return 0;
+ for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
+ mirror != stripe->mirror_num;
+ mirror = calc_next_mirror(mirror, num_copies)) {
+ const unsigned long old_error_bitmap = scrub_bitmap_read_error(stripe);
+
+ scrub_stripe_submit_repair_read(stripe, mirror,
+ BTRFS_STRIPE_LEN, false);
+ wait_scrub_stripe_io(stripe);
+ scrub_verify_one_stripe(stripe, old_error_bitmap);
+ if (scrub_bitmap_empty_error(stripe))
+ goto out;
+ }
- kaddr = page_address(spage->page);
+ /*
+ * Last safety net, try re-checking all mirrors, including the failed
+ * one, sector-by-sector.
+ *
+ * As if one sector failed the drive's internal csum, the whole read
+ * containing the offending sector would be marked as error.
+ * Thus here we do sector-by-sector read.
+ *
+ * This can be slow, thus we only try it as the last resort.
+ */
- shash->tfm = fs_info->csum_shash;
- crypto_shash_init(shash);
+ for (i = 0, mirror = stripe->mirror_num;
+ i < num_copies;
+ i++, mirror = calc_next_mirror(mirror, num_copies)) {
+ const unsigned long old_error_bitmap = scrub_bitmap_read_error(stripe);
+ scrub_stripe_submit_repair_read(stripe, mirror,
+ fs_info->sectorsize, true);
+ wait_scrub_stripe_io(stripe);
+ scrub_verify_one_stripe(stripe, old_error_bitmap);
+ if (scrub_bitmap_empty_error(stripe))
+ goto out;
+ }
+out:
+ error = scrub_bitmap_read_error(stripe);
/*
- * In scrub_pages() and scrub_pages_for_parity() we ensure each spage
- * only contains one sector of data.
+ * Submit the repaired sectors. For zoned case, we cannot do repair
+ * in-place, but queue the bg to be relocated.
*/
- crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
+ bitmap_andnot(&repaired, &errors.init_error_bitmap, &error,
+ stripe->nr_sectors);
+ if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) {
+ if (btrfs_is_zoned(fs_info)) {
+ btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
+ } else {
+ scrub_write_sectors(sctx, stripe, repaired, false);
+ wait_scrub_stripe_io(stripe);
+ }
+ }
- if (memcmp(csum, spage->csum, fs_info->csum_size))
- sblock->checksum_error = 1;
- return sblock->checksum_error;
+ scrub_stripe_report_errors(sctx, stripe, &errors);
+ set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
+ wake_up(&stripe->repair_wait);
}
-static int scrub_checksum_tree_block(struct scrub_block *sblock)
+static void scrub_read_endio(struct btrfs_bio *bbio)
{
- struct scrub_ctx *sctx = sblock->sctx;
- struct btrfs_header *h;
- struct btrfs_fs_info *fs_info = sctx->fs_info;
- SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
- u8 calculated_csum[BTRFS_CSUM_SIZE];
- u8 on_disk_csum[BTRFS_CSUM_SIZE];
- /*
- * This is done in sectorsize steps even for metadata as there's a
- * constraint for nodesize to be aligned to sectorsize. This will need
- * to change so we don't misuse data and metadata units like that.
- */
- const u32 sectorsize = sctx->fs_info->sectorsize;
- const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits;
+ struct scrub_stripe *stripe = bbio->private;
+ struct bio_vec *bvec;
+ int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
+ int num_sectors;
+ u32 bio_size = 0;
int i;
- struct scrub_page *spage;
- char *kaddr;
- BUG_ON(sblock->page_count < 1);
-
- /* Each member in pagev is just one block, not a full page */
- ASSERT(sblock->page_count == num_sectors);
-
- spage = sblock->pagev[0];
- kaddr = page_address(spage->page);
- h = (struct btrfs_header *)kaddr;
- memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
-
- /*
- * we don't use the getter functions here, as we
- * a) don't have an extent buffer and
- * b) the page is already kmapped
- */
- if (spage->logical != btrfs_stack_header_bytenr(h))
- sblock->header_error = 1;
+ ASSERT(sector_nr < stripe->nr_sectors);
+ bio_for_each_bvec_all(bvec, &bbio->bio, i)
+ bio_size += bvec->bv_len;
+ num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
- if (spage->generation != btrfs_stack_header_generation(h)) {
- sblock->header_error = 1;
- sblock->generation_error = 1;
+ if (bbio->bio.bi_status) {
+ scrub_bitmap_set_io_error(stripe, sector_nr, num_sectors);
+ scrub_bitmap_set_error(stripe, sector_nr, num_sectors);
+ } else {
+ scrub_bitmap_clear_io_error(stripe, sector_nr, num_sectors);
}
-
- if (!scrub_check_fsid(h->fsid, spage))
- sblock->header_error = 1;
-
- if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
- BTRFS_UUID_SIZE))
- sblock->header_error = 1;
-
- shash->tfm = fs_info->csum_shash;
- crypto_shash_init(shash);
- crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
- sectorsize - BTRFS_CSUM_SIZE);
-
- for (i = 1; i < num_sectors; i++) {
- kaddr = page_address(sblock->pagev[i]->page);
- crypto_shash_update(shash, kaddr, sectorsize);
+ bio_put(&bbio->bio);
+ if (atomic_dec_and_test(&stripe->pending_io)) {
+ wake_up(&stripe->io_wait);
+ INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
+ queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
}
-
- crypto_shash_final(shash, calculated_csum);
- if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size))
- sblock->checksum_error = 1;
-
- return sblock->header_error || sblock->checksum_error;
}
-static int scrub_checksum_super(struct scrub_block *sblock)
+static void scrub_write_endio(struct btrfs_bio *bbio)
{
- struct btrfs_super_block *s;
- struct scrub_ctx *sctx = sblock->sctx;
- struct btrfs_fs_info *fs_info = sctx->fs_info;
- SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
- u8 calculated_csum[BTRFS_CSUM_SIZE];
- struct scrub_page *spage;
- char *kaddr;
- int fail_gen = 0;
- int fail_cor = 0;
-
- BUG_ON(sblock->page_count < 1);
- spage = sblock->pagev[0];
- kaddr = page_address(spage->page);
- s = (struct btrfs_super_block *)kaddr;
-
- if (spage->logical != btrfs_super_bytenr(s))
- ++fail_cor;
-
- if (spage->generation != btrfs_super_generation(s))
- ++fail_gen;
+ struct scrub_stripe *stripe = bbio->private;
+ struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ struct bio_vec *bvec;
+ int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
+ u32 bio_size = 0;
+ int i;
- if (!scrub_check_fsid(s->fsid, spage))
- ++fail_cor;
+ bio_for_each_bvec_all(bvec, &bbio->bio, i)
+ bio_size += bvec->bv_len;
- shash->tfm = fs_info->csum_shash;
- crypto_shash_init(shash);
- crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE,
- BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum);
+ if (bbio->bio.bi_status) {
+ unsigned long flags;
- if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size))
- ++fail_cor;
-
- if (fail_cor + fail_gen) {
- /*
- * if we find an error in a super block, we just report it.
- * They will get written with the next transaction commit
- * anyway
- */
- spin_lock(&sctx->stat_lock);
- ++sctx->stat.super_errors;
- spin_unlock(&sctx->stat_lock);
- if (fail_cor)
- btrfs_dev_stat_inc_and_print(spage->dev,
- BTRFS_DEV_STAT_CORRUPTION_ERRS);
- else
- btrfs_dev_stat_inc_and_print(spage->dev,
- BTRFS_DEV_STAT_GENERATION_ERRS);
+ spin_lock_irqsave(&stripe->write_error_lock, flags);
+ bitmap_set(&stripe->write_error_bitmap, sector_nr,
+ bio_size >> fs_info->sectorsize_bits);
+ spin_unlock_irqrestore(&stripe->write_error_lock, flags);
+ for (i = 0; i < (bio_size >> fs_info->sectorsize_bits); i++)
+ btrfs_dev_stat_inc_and_print(stripe->dev,
+ BTRFS_DEV_STAT_WRITE_ERRS);
}
+ bio_put(&bbio->bio);
- return fail_cor + fail_gen;
-}
-
-static void scrub_block_get(struct scrub_block *sblock)
-{
- refcount_inc(&sblock->refs);
+ if (atomic_dec_and_test(&stripe->pending_io))
+ wake_up(&stripe->io_wait);
}
-static void scrub_block_put(struct scrub_block *sblock)
+static void scrub_submit_write_bio(struct scrub_ctx *sctx,
+ struct scrub_stripe *stripe,
+ struct btrfs_bio *bbio, bool dev_replace)
{
- if (refcount_dec_and_test(&sblock->refs)) {
- int i;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ u32 bio_len = bbio->bio.bi_iter.bi_size;
+ u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
+ stripe->logical;
- if (sblock->sparity)
- scrub_parity_put(sblock->sparity);
+ fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
+ atomic_inc(&stripe->pending_io);
+ btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
+ if (!btrfs_is_zoned(fs_info))
+ return;
+ /*
+ * For zoned writeback, queue depth must be 1, thus we must wait for
+ * the write to finish before the next write.
+ */
+ wait_scrub_stripe_io(stripe);
- for (i = 0; i < sblock->page_count; i++)
- scrub_page_put(sblock->pagev[i]);
- kfree(sblock);
- }
+ /*
+ * And also need to update the write pointer if write finished
+ * successfully.
+ */
+ if (!test_bit(bio_off >> fs_info->sectorsize_bits,
+ &stripe->write_error_bitmap))
+ sctx->write_pointer += bio_len;
}
-static void scrub_page_get(struct scrub_page *spage)
+/*
+ * Submit the write bio(s) for the sectors specified by @write_bitmap.
+ *
+ * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
+ *
+ * - Only needs logical bytenr and mirror_num
+ * Just like the scrub read path
+ *
+ * - Would only result in writes to the specified mirror
+ * Unlike the regular writeback path, which would write back to all stripes
+ *
+ * - Handle dev-replace and read-repair writeback differently
+ */
+static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
+ unsigned long write_bitmap, bool dev_replace)
{
- atomic_inc(&spage->refs);
-}
+ struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ struct btrfs_bio *bbio = NULL;
+ int sector_nr;
-static void scrub_page_put(struct scrub_page *spage)
-{
- if (atomic_dec_and_test(&spage->refs)) {
- if (spage->page)
- __free_page(spage->page);
- kfree(spage);
+ for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
+ /* We should only writeback sectors covered by an extent. */
+ ASSERT(scrub_bitmap_test_bit_has_extent(stripe, sector_nr));
+
+ /* Cannot merge with previous sector, submit the current one. */
+ if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
+ scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
+ bbio = NULL;
+ }
+ if (!bbio)
+ bbio = alloc_scrub_bbio(fs_info, stripe->nr_sectors, REQ_OP_WRITE,
+ stripe->logical + (sector_nr << fs_info->sectorsize_bits),
+ scrub_write_endio, stripe);
+ scrub_bio_add_sector(bbio, stripe, sector_nr);
}
+ if (bbio)
+ scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
}
/*
* Throttling of IO submission, bandwidth-limit based, the timeslice is 1
* second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
*/
-static void scrub_throttle(struct scrub_ctx *sctx)
+static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
+ unsigned int bio_size)
{
const int time_slice = 1000;
- struct scrub_bio *sbio;
- struct btrfs_device *device;
s64 delta;
ktime_t now;
u32 div;
u64 bwlimit;
- sbio = sctx->bios[sctx->curr];
- device = sbio->dev;
bwlimit = READ_ONCE(device->scrub_speed_max);
if (bwlimit == 0)
return;
@@ -2017,8 +1395,7 @@ static void scrub_throttle(struct scrub_ctx *sctx)
* Slice is divided into intervals when the IO is submitted, adjust by
* bwlimit and maximum of 64 intervals.
*/
- div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
- div = min_t(u32, 64, div);
+ div = clamp(bwlimit / (16 * 1024 * 1024), 1, 64);
/* Start new epoch, set deadline */
now = ktime_get();
@@ -2030,7 +1407,7 @@ static void scrub_throttle(struct scrub_ctx *sctx)
/* Still in the time to send? */
if (ktime_before(now, sctx->throttle_deadline)) {
/* If current bio is within the limit, send it */
- sctx->throttle_sent += sbio->bio->bi_iter.bi_size;
+ sctx->throttle_sent += bio_size;
if (sctx->throttle_sent <= div_u64(bwlimit, div))
return;
@@ -2052,1522 +1429,1177 @@ static void scrub_throttle(struct scrub_ctx *sctx)
sctx->throttle_deadline = 0;
}
-static void scrub_submit(struct scrub_ctx *sctx)
+/*
+ * Given a physical address, this will calculate it's
+ * logical offset. if this is a parity stripe, it will return
+ * the most left data stripe's logical offset.
+ *
+ * return 0 if it is a data stripe, 1 means parity stripe.
+ */
+static int get_raid56_logic_offset(u64 physical, int num,
+ struct btrfs_chunk_map *map, u64 *offset,
+ u64 *stripe_start)
{
- struct scrub_bio *sbio;
-
- if (sctx->curr == -1)
- return;
-
- scrub_throttle(sctx);
+ int i;
+ int j = 0;
+ u64 last_offset;
+ const int data_stripes = nr_data_stripes(map);
- sbio = sctx->bios[sctx->curr];
- sctx->curr = -1;
- scrub_pending_bio_inc(sctx);
- btrfsic_submit_bio(sbio->bio);
-}
+ last_offset = (physical - map->stripes[num].physical) * data_stripes;
+ if (stripe_start)
+ *stripe_start = last_offset;
-static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
- struct scrub_page *spage)
-{
- struct scrub_block *sblock = spage->sblock;
- struct scrub_bio *sbio;
- const u32 sectorsize = sctx->fs_info->sectorsize;
- int ret;
+ *offset = last_offset;
+ for (i = 0; i < data_stripes; i++) {
+ u32 stripe_nr;
+ u32 stripe_index;
+ u32 rot;
-again:
- /*
- * grab a fresh bio or wait for one to become available
- */
- while (sctx->curr == -1) {
- spin_lock(&sctx->list_lock);
- sctx->curr = sctx->first_free;
- if (sctx->curr != -1) {
- sctx->first_free = sctx->bios[sctx->curr]->next_free;
- sctx->bios[sctx->curr]->next_free = -1;
- sctx->bios[sctx->curr]->page_count = 0;
- spin_unlock(&sctx->list_lock);
- } else {
- spin_unlock(&sctx->list_lock);
- wait_event(sctx->list_wait, sctx->first_free != -1);
- }
- }
- sbio = sctx->bios[sctx->curr];
- if (sbio->page_count == 0) {
- struct bio *bio;
-
- sbio->physical = spage->physical;
- sbio->logical = spage->logical;
- sbio->dev = spage->dev;
- bio = sbio->bio;
- if (!bio) {
- bio = btrfs_bio_alloc(sctx->pages_per_rd_bio);
- sbio->bio = bio;
- }
+ *offset = last_offset + btrfs_stripe_nr_to_offset(i);
- bio->bi_private = sbio;
- bio->bi_end_io = scrub_bio_end_io;
- bio_set_dev(bio, sbio->dev->bdev);
- bio->bi_iter.bi_sector = sbio->physical >> 9;
- bio->bi_opf = REQ_OP_READ;
- sbio->status = 0;
- } else if (sbio->physical + sbio->page_count * sectorsize !=
- spage->physical ||
- sbio->logical + sbio->page_count * sectorsize !=
- spage->logical ||
- sbio->dev != spage->dev) {
- scrub_submit(sctx);
- goto again;
- }
+ stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
- sbio->pagev[sbio->page_count] = spage;
- ret = bio_add_page(sbio->bio, spage->page, sectorsize, 0);
- if (ret != sectorsize) {
- if (sbio->page_count < 1) {
- bio_put(sbio->bio);
- sbio->bio = NULL;
- return -EIO;
- }
- scrub_submit(sctx);
- goto again;
+ /* Work out the disk rotation on this stripe-set */
+ rot = stripe_nr % map->num_stripes;
+ /* calculate which stripe this data locates */
+ rot += i;
+ stripe_index = rot % map->num_stripes;
+ if (stripe_index == num)
+ return 0;
+ if (stripe_index < num)
+ j++;
}
-
- scrub_block_get(sblock); /* one for the page added to the bio */
- atomic_inc(&sblock->outstanding_pages);
- sbio->page_count++;
- if (sbio->page_count == sctx->pages_per_rd_bio)
- scrub_submit(sctx);
-
- return 0;
+ *offset = last_offset + btrfs_stripe_nr_to_offset(j);
+ return 1;
}
-static void scrub_missing_raid56_end_io(struct bio *bio)
+/*
+ * Return 0 if the extent item range covers any byte of the range.
+ * Return <0 if the extent item is before @search_start.
+ * Return >0 if the extent item is after @start_start + @search_len.
+ */
+static int compare_extent_item_range(struct btrfs_path *path,
+ u64 search_start, u64 search_len)
{
- struct scrub_block *sblock = bio->bi_private;
- struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
-
- if (bio->bi_status)
- sblock->no_io_error_seen = 0;
+ struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
+ u64 len;
+ struct btrfs_key key;
- bio_put(bio);
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
+ key.type == BTRFS_METADATA_ITEM_KEY, "key.type=%u", key.type);
+ if (key.type == BTRFS_METADATA_ITEM_KEY)
+ len = fs_info->nodesize;
+ else
+ len = key.offset;
- btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
+ if (key.objectid + len <= search_start)
+ return -1;
+ if (key.objectid >= search_start + search_len)
+ return 1;
+ return 0;
}
-static void scrub_missing_raid56_worker(struct btrfs_work *work)
+/*
+ * Locate one extent item which covers any byte in range
+ * [@search_start, @search_start + @search_length)
+ *
+ * If the path is not initialized, we will initialize the search by doing
+ * a btrfs_search_slot().
+ * If the path is already initialized, we will use the path as the initial
+ * slot, to avoid duplicated btrfs_search_slot() calls.
+ *
+ * NOTE: If an extent item starts before @search_start, we will still
+ * return the extent item. This is for data extent crossing stripe boundary.
+ *
+ * Return 0 if we found such extent item, and @path will point to the extent item.
+ * Return >0 if no such extent item can be found, and @path will be released.
+ * Return <0 if hit fatal error, and @path will be released.
+ */
+static int find_first_extent_item(struct btrfs_root *extent_root,
+ struct btrfs_path *path,
+ u64 search_start, u64 search_len)
{
- struct scrub_block *sblock = container_of(work, struct scrub_block, work);
- struct scrub_ctx *sctx = sblock->sctx;
- struct btrfs_fs_info *fs_info = sctx->fs_info;
- u64 logical;
- struct btrfs_device *dev;
+ struct btrfs_fs_info *fs_info = extent_root->fs_info;
+ struct btrfs_key key;
+ int ret;
- logical = sblock->pagev[0]->logical;
- dev = sblock->pagev[0]->dev;
+ /* Continue using the existing path */
+ if (path->nodes[0])
+ goto search_forward;
- if (sblock->no_io_error_seen)
- scrub_recheck_block_checksum(sblock);
+ key.objectid = search_start;
+ if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
+ key.type = BTRFS_METADATA_ITEM_KEY;
+ else
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = (u64)-1;
- if (!sblock->no_io_error_seen) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.read_errors++;
- spin_unlock(&sctx->stat_lock);
- btrfs_err_rl_in_rcu(fs_info,
- "IO error rebuilding logical %llu for dev %s",
- logical, rcu_str_deref(dev->name));
- } else if (sblock->header_error || sblock->checksum_error) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.uncorrectable_errors++;
- spin_unlock(&sctx->stat_lock);
- btrfs_err_rl_in_rcu(fs_info,
- "failed to rebuild valid logical %llu for dev %s",
- logical, rcu_str_deref(dev->name));
- } else {
- scrub_write_block_to_dev_replace(sblock);
+ ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+ if (unlikely(ret == 0)) {
+ /*
+ * Key with offset -1 found, there would have to exist an extent
+ * item with such offset, but this is out of the valid range.
+ */
+ btrfs_release_path(path);
+ return -EUCLEAN;
}
- if (sctx->is_dev_replace && sctx->flush_all_writes) {
- mutex_lock(&sctx->wr_lock);
- scrub_wr_submit(sctx);
- mutex_unlock(&sctx->wr_lock);
+ /*
+ * Here we intentionally pass 0 as @min_objectid, as there could be
+ * an extent item starting before @search_start.
+ */
+ ret = btrfs_previous_extent_item(extent_root, path, 0);
+ if (ret < 0)
+ return ret;
+ /*
+ * No matter whether we have found an extent item, the next loop will
+ * properly do every check on the key.
+ */
+search_forward:
+ while (true) {
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (key.objectid >= search_start + search_len)
+ break;
+ if (key.type != BTRFS_METADATA_ITEM_KEY &&
+ key.type != BTRFS_EXTENT_ITEM_KEY)
+ goto next;
+
+ ret = compare_extent_item_range(path, search_start, search_len);
+ if (ret == 0)
+ return ret;
+ if (ret > 0)
+ break;
+next:
+ ret = btrfs_next_item(extent_root, path);
+ if (ret) {
+ /* Either no more items or a fatal error. */
+ btrfs_release_path(path);
+ return ret;
+ }
}
+ btrfs_release_path(path);
+ return 1;
+}
+
+static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
+ u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
+{
+ struct btrfs_key key;
+ struct btrfs_extent_item *ei;
- scrub_block_put(sblock);
- scrub_pending_bio_dec(sctx);
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
+ key.type == BTRFS_EXTENT_ITEM_KEY, "key.type=%u", key.type);
+ *extent_start_ret = key.objectid;
+ if (key.type == BTRFS_METADATA_ITEM_KEY)
+ *size_ret = path->nodes[0]->fs_info->nodesize;
+ else
+ *size_ret = key.offset;
+ ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
+ *flags_ret = btrfs_extent_flags(path->nodes[0], ei);
+ *generation_ret = btrfs_extent_generation(path->nodes[0], ei);
}
-static void scrub_missing_raid56_pages(struct scrub_block *sblock)
+static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
+ u64 physical, u64 physical_end)
{
- struct scrub_ctx *sctx = sblock->sctx;
struct btrfs_fs_info *fs_info = sctx->fs_info;
- u64 length = sblock->page_count * PAGE_SIZE;
- u64 logical = sblock->pagev[0]->logical;
- struct btrfs_io_context *bioc = NULL;
- struct bio *bio;
- struct btrfs_raid_bio *rbio;
- int ret;
- int i;
+ int ret = 0;
- btrfs_bio_counter_inc_blocked(fs_info);
- ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
- &length, &bioc);
- if (ret || !bioc || !bioc->raid_map)
- goto bioc_out;
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
- if (WARN_ON(!sctx->is_dev_replace ||
- !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
- /*
- * We shouldn't be scrubbing a missing device. Even for dev
- * replace, we should only get here for RAID 5/6. We either
- * managed to mount something with no mirrors remaining or
- * there's a bug in scrub_remap_extent()/btrfs_map_block().
- */
- goto bioc_out;
+ mutex_lock(&sctx->wr_lock);
+ if (sctx->write_pointer < physical_end) {
+ ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
+ physical,
+ sctx->write_pointer);
+ if (ret)
+ btrfs_err(fs_info, "scrub: zoned: failed to recover write pointer");
}
+ mutex_unlock(&sctx->wr_lock);
+ btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
- bio = btrfs_bio_alloc(BIO_MAX_VECS);
- bio->bi_iter.bi_sector = logical >> 9;
- bio->bi_private = sblock;
- bio->bi_end_io = scrub_missing_raid56_end_io;
-
- rbio = raid56_alloc_missing_rbio(bio, bioc, length);
- if (!rbio)
- goto rbio_out;
-
- for (i = 0; i < sblock->page_count; i++) {
- struct scrub_page *spage = sblock->pagev[i];
+ return ret;
+}
- raid56_add_scrub_pages(rbio, spage->page, spage->logical);
+static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
+ struct scrub_stripe *stripe,
+ u64 extent_start, u64 extent_len,
+ u64 extent_flags, u64 extent_gen)
+{
+ for (u64 cur_logical = max(stripe->logical, extent_start);
+ cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
+ extent_start + extent_len);
+ cur_logical += fs_info->sectorsize) {
+ const int nr_sector = (cur_logical - stripe->logical) >>
+ fs_info->sectorsize_bits;
+ struct scrub_sector_verification *sector =
+ &stripe->sectors[nr_sector];
+
+ scrub_bitmap_set_bit_has_extent(stripe, nr_sector);
+ if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ scrub_bitmap_set_bit_is_metadata(stripe, nr_sector);
+ sector->generation = extent_gen;
+ }
}
-
- btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);
- scrub_block_get(sblock);
- scrub_pending_bio_inc(sctx);
- raid56_submit_missing_rbio(rbio);
- return;
-
-rbio_out:
- bio_put(bio);
-bioc_out:
- btrfs_bio_counter_dec(fs_info);
- btrfs_put_bioc(bioc);
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- spin_unlock(&sctx->stat_lock);
}
-static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u32 len,
- u64 physical, struct btrfs_device *dev, u64 flags,
- u64 gen, int mirror_num, u8 *csum,
- u64 physical_for_dev_replace)
+static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
{
- struct scrub_block *sblock;
- const u32 sectorsize = sctx->fs_info->sectorsize;
- int index;
+ ASSERT(stripe->nr_sectors);
+ bitmap_zero(stripe->bitmaps, scrub_bitmap_nr_last * stripe->nr_sectors);
+}
- sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
- if (!sblock) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- spin_unlock(&sctx->stat_lock);
- return -ENOMEM;
+/*
+ * Locate one stripe which has at least one extent in its range.
+ *
+ * Return 0 if found such stripe, and store its info into @stripe.
+ * Return >0 if there is no such stripe in the specified range.
+ * Return <0 for error.
+ */
+static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
+ struct btrfs_path *extent_path,
+ struct btrfs_path *csum_path,
+ struct btrfs_device *dev, u64 physical,
+ int mirror_num, u64 logical_start,
+ u32 logical_len,
+ struct scrub_stripe *stripe)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+ struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
+ struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
+ const u64 logical_end = logical_start + logical_len;
+ u64 cur_logical = logical_start;
+ u64 stripe_end;
+ u64 extent_start;
+ u64 extent_len;
+ u64 extent_flags;
+ u64 extent_gen;
+ int ret;
+
+ if (unlikely(!extent_root || !csum_root)) {
+ btrfs_err(fs_info, "scrub: no valid extent or csum root found");
+ return -EUCLEAN;
}
+ memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
+ stripe->nr_sectors);
+ scrub_stripe_reset_bitmaps(stripe);
- /* one ref inside this function, plus one for each page added to
- * a bio later on */
- refcount_set(&sblock->refs, 1);
- sblock->sctx = sctx;
- sblock->no_io_error_seen = 1;
+ /* The range must be inside the bg. */
+ ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length,
+ "bg->start=%llu logical_start=%llu logical_end=%llu end=%llu",
+ bg->start, logical_start, logical_end, bg->start + bg->length);
- for (index = 0; len > 0; index++) {
- struct scrub_page *spage;
- /*
- * Here we will allocate one page for one sector to scrub.
- * This is fine if PAGE_SIZE == sectorsize, but will cost
- * more memory for PAGE_SIZE > sectorsize case.
- */
- u32 l = min(sectorsize, len);
+ ret = find_first_extent_item(extent_root, extent_path, logical_start,
+ logical_len);
+ /* Either error or not found. */
+ if (ret)
+ goto out;
+ get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
+ &extent_gen);
+ if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+ stripe->nr_meta_extents++;
+ if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
+ stripe->nr_data_extents++;
+ cur_logical = max(extent_start, cur_logical);
- spage = kzalloc(sizeof(*spage), GFP_KERNEL);
- if (!spage) {
-leave_nomem:
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- spin_unlock(&sctx->stat_lock);
- scrub_block_put(sblock);
- return -ENOMEM;
- }
- BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
- scrub_page_get(spage);
- sblock->pagev[index] = spage;
- spage->sblock = sblock;
- spage->dev = dev;
- spage->flags = flags;
- spage->generation = gen;
- spage->logical = logical;
- spage->physical = physical;
- spage->physical_for_dev_replace = physical_for_dev_replace;
- spage->mirror_num = mirror_num;
- if (csum) {
- spage->have_csum = 1;
- memcpy(spage->csum, csum, sctx->fs_info->csum_size);
- } else {
- spage->have_csum = 0;
+ /*
+ * Round down to stripe boundary.
+ *
+ * The extra calculation against bg->start is to handle block groups
+ * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
+ */
+ stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
+ bg->start;
+ stripe->physical = physical + stripe->logical - logical_start;
+ stripe->dev = dev;
+ stripe->bg = bg;
+ stripe->mirror_num = mirror_num;
+ stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
+
+ /* Fill the first extent info into stripe->sectors[] array. */
+ fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
+ extent_flags, extent_gen);
+ cur_logical = extent_start + extent_len;
+
+ /* Fill the extent info for the remaining sectors. */
+ while (cur_logical <= stripe_end) {
+ ret = find_first_extent_item(extent_root, extent_path, cur_logical,
+ stripe_end - cur_logical + 1);
+ if (ret < 0)
+ goto out;
+ if (ret > 0) {
+ ret = 0;
+ break;
}
- sblock->page_count++;
- spage->page = alloc_page(GFP_KERNEL);
- if (!spage->page)
- goto leave_nomem;
- len -= l;
- logical += l;
- physical += l;
- physical_for_dev_replace += l;
+ get_extent_info(extent_path, &extent_start, &extent_len,
+ &extent_flags, &extent_gen);
+ if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+ stripe->nr_meta_extents++;
+ if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
+ stripe->nr_data_extents++;
+ fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
+ extent_flags, extent_gen);
+ cur_logical = extent_start + extent_len;
}
- WARN_ON(sblock->page_count == 0);
- if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
+ /* Now fill the data csum. */
+ if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
+ int sector_nr;
+ unsigned long csum_bitmap = 0;
+
+ /* Csum space should have already been allocated. */
+ ASSERT(stripe->csums);
+
/*
- * This case should only be hit for RAID 5/6 device replace. See
- * the comment in scrub_missing_raid56_pages() for details.
+ * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
+ * should contain at most 16 sectors.
*/
- scrub_missing_raid56_pages(sblock);
- } else {
- for (index = 0; index < sblock->page_count; index++) {
- struct scrub_page *spage = sblock->pagev[index];
- int ret;
+ ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
- ret = scrub_add_page_to_rd_bio(sctx, spage);
- if (ret) {
- scrub_block_put(sblock);
- return ret;
- }
- }
+ ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
+ stripe->logical, stripe_end,
+ stripe->csums, &csum_bitmap);
+ if (ret < 0)
+ goto out;
+ if (ret > 0)
+ ret = 0;
- if (flags & BTRFS_EXTENT_FLAG_SUPER)
- scrub_submit(sctx);
+ for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
+ stripe->sectors[sector_nr].csum = stripe->csums +
+ sector_nr * fs_info->csum_size;
+ }
}
-
- /* last one frees, either here or in bio completion for last page */
- scrub_block_put(sblock);
- return 0;
+ set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
+out:
+ return ret;
}
-static void scrub_bio_end_io(struct bio *bio)
+static void scrub_reset_stripe(struct scrub_stripe *stripe)
{
- struct scrub_bio *sbio = bio->bi_private;
- struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
+ scrub_stripe_reset_bitmaps(stripe);
- sbio->status = bio->bi_status;
- sbio->bio = bio;
+ stripe->nr_meta_extents = 0;
+ stripe->nr_data_extents = 0;
+ stripe->state = 0;
- btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
+ for (int i = 0; i < stripe->nr_sectors; i++) {
+ stripe->sectors[i].csum = NULL;
+ stripe->sectors[i].generation = 0;
+ }
}
-static void scrub_bio_end_io_worker(struct btrfs_work *work)
+static u32 stripe_length(const struct scrub_stripe *stripe)
{
- struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
- struct scrub_ctx *sctx = sbio->sctx;
- int i;
-
- BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
- if (sbio->status) {
- for (i = 0; i < sbio->page_count; i++) {
- struct scrub_page *spage = sbio->pagev[i];
-
- spage->io_error = 1;
- spage->sblock->no_io_error_seen = 0;
- }
- }
-
- /* now complete the scrub_block items that have all pages completed */
- for (i = 0; i < sbio->page_count; i++) {
- struct scrub_page *spage = sbio->pagev[i];
- struct scrub_block *sblock = spage->sblock;
-
- if (atomic_dec_and_test(&sblock->outstanding_pages))
- scrub_block_complete(sblock);
- scrub_block_put(sblock);
- }
+ ASSERT(stripe->bg);
- bio_put(sbio->bio);
- sbio->bio = NULL;
- spin_lock(&sctx->list_lock);
- sbio->next_free = sctx->first_free;
- sctx->first_free = sbio->index;
- spin_unlock(&sctx->list_lock);
-
- if (sctx->is_dev_replace && sctx->flush_all_writes) {
- mutex_lock(&sctx->wr_lock);
- scrub_wr_submit(sctx);
- mutex_unlock(&sctx->wr_lock);
- }
-
- scrub_pending_bio_dec(sctx);
+ return min(BTRFS_STRIPE_LEN,
+ stripe->bg->start + stripe->bg->length - stripe->logical);
}
-static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
- unsigned long *bitmap,
- u64 start, u32 len)
+static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe)
{
- u64 offset;
- u32 nsectors;
- u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
-
- if (len >= sparity->stripe_len) {
- bitmap_set(bitmap, 0, sparity->nsectors);
- return;
- }
+ struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ struct btrfs_bio *bbio = NULL;
+ unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits;
+ const unsigned long has_extent = scrub_bitmap_read_has_extent(stripe);
+ u64 stripe_len = BTRFS_STRIPE_LEN;
+ int mirror = stripe->mirror_num;
+ int i;
- start -= sparity->logic_start;
- start = div64_u64_rem(start, sparity->stripe_len, &offset);
- offset = offset >> sectorsize_bits;
- nsectors = len >> sectorsize_bits;
+ atomic_inc(&stripe->pending_io);
- if (offset + nsectors <= sparity->nsectors) {
- bitmap_set(bitmap, offset, nsectors);
- return;
- }
+ for_each_set_bit(i, &has_extent, stripe->nr_sectors) {
+ /* We're beyond the chunk boundary, no need to read anymore. */
+ if (i >= nr_sectors)
+ break;
- bitmap_set(bitmap, offset, sparity->nsectors - offset);
- bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
-}
+ /* The current sector cannot be merged, submit the bio. */
+ if (bbio &&
+ ((i > 0 && !test_bit(i - 1, &has_extent)) ||
+ bbio->bio.bi_iter.bi_size >= stripe_len)) {
+ ASSERT(bbio->bio.bi_iter.bi_size);
+ atomic_inc(&stripe->pending_io);
+ btrfs_submit_bbio(bbio, mirror);
+ bbio = NULL;
+ }
-static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
- u64 start, u32 len)
-{
- __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
-}
+ if (!bbio) {
+ struct btrfs_io_stripe io_stripe = {};
+ struct btrfs_io_context *bioc = NULL;
+ const u64 logical = stripe->logical +
+ (i << fs_info->sectorsize_bits);
+ int ret;
-static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
- u64 start, u32 len)
-{
- __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
-}
+ io_stripe.rst_search_commit_root = true;
+ stripe_len = (nr_sectors - i) << fs_info->sectorsize_bits;
+ /*
+ * For RST cases, we need to manually split the bbio to
+ * follow the RST boundary.
+ */
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
+ &stripe_len, &bioc, &io_stripe, &mirror);
+ btrfs_put_bioc(bioc);
+ if (ret < 0) {
+ if (ret != -ENODATA) {
+ /*
+ * Earlier btrfs_get_raid_extent_offset()
+ * returned -ENODATA, which means there's
+ * no entry for the corresponding range
+ * in the stripe tree. But if it's in
+ * the extent tree, then it's a preallocated
+ * extent and not an error.
+ */
+ scrub_bitmap_set_bit_io_error(stripe, i);
+ scrub_bitmap_set_bit_error(stripe, i);
+ }
+ continue;
+ }
-static void scrub_block_complete(struct scrub_block *sblock)
-{
- int corrupted = 0;
+ bbio = alloc_scrub_bbio(fs_info, stripe->nr_sectors, REQ_OP_READ,
+ logical, scrub_read_endio, stripe);
+ }
- if (!sblock->no_io_error_seen) {
- corrupted = 1;
- scrub_handle_errored_block(sblock);
- } else {
- /*
- * if has checksum error, write via repair mechanism in
- * dev replace case, otherwise write here in dev replace
- * case.
- */
- corrupted = scrub_checksum(sblock);
- if (!corrupted && sblock->sctx->is_dev_replace)
- scrub_write_block_to_dev_replace(sblock);
+ scrub_bio_add_sector(bbio, stripe, i);
}
- if (sblock->sparity && corrupted && !sblock->data_corrected) {
- u64 start = sblock->pagev[0]->logical;
- u64 end = sblock->pagev[sblock->page_count - 1]->logical +
- sblock->sctx->fs_info->sectorsize;
+ if (bbio) {
+ ASSERT(bbio->bio.bi_iter.bi_size);
+ atomic_inc(&stripe->pending_io);
+ btrfs_submit_bbio(bbio, mirror);
+ }
- ASSERT(end - start <= U32_MAX);
- scrub_parity_mark_sectors_error(sblock->sparity,
- start, end - start);
+ if (atomic_dec_and_test(&stripe->pending_io)) {
+ wake_up(&stripe->io_wait);
+ INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
+ queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
}
}
-static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum)
+static void scrub_submit_initial_read(struct scrub_ctx *sctx,
+ struct scrub_stripe *stripe)
{
- sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits;
- list_del(&sum->list);
- kfree(sum);
-}
-
-/*
- * Find the desired csum for range [logical, logical + sectorsize), and store
- * the csum into @csum.
- *
- * The search source is sctx->csum_list, which is a pre-populated list
- * storing bytenr ordered csum ranges. We're responsible to cleanup any range
- * that is before @logical.
- *
- * Return 0 if there is no csum for the range.
- * Return 1 if there is csum for the range and copied to @csum.
- */
-static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
-{
- bool found = false;
-
- while (!list_empty(&sctx->csum_list)) {
- struct btrfs_ordered_sum *sum = NULL;
- unsigned long index;
- unsigned long num_sectors;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct btrfs_bio *bbio;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
+ unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits;
+ int mirror = stripe->mirror_num;
- sum = list_first_entry(&sctx->csum_list,
- struct btrfs_ordered_sum, list);
- /* The current csum range is beyond our range, no csum found */
- if (sum->bytenr > logical)
- break;
+ ASSERT(stripe->bg);
+ ASSERT(stripe->mirror_num > 0);
+ ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
- /*
- * The current sum is before our bytenr, since scrub is always
- * done in bytenr order, the csum will never be used anymore,
- * clean it up so that later calls won't bother with the range,
- * and continue search the next range.
- */
- if (sum->bytenr + sum->len <= logical) {
- drop_csum_range(sctx, sum);
- continue;
- }
+ if (btrfs_need_stripe_tree_update(fs_info, stripe->bg->flags)) {
+ scrub_submit_extent_sector_read(stripe);
+ return;
+ }
- /* Now the csum range covers our bytenr, copy the csum */
- found = true;
- index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
- num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
+ bbio = alloc_scrub_bbio(fs_info, BTRFS_STRIPE_LEN >> min_folio_shift, REQ_OP_READ,
+ stripe->logical, scrub_read_endio, stripe);
+ /* Read the whole range inside the chunk boundary. */
+ for (unsigned int cur = 0; cur < nr_sectors; cur++)
+ scrub_bio_add_sector(bbio, stripe, cur);
+ atomic_inc(&stripe->pending_io);
- memcpy(csum, sum->sums + index * sctx->fs_info->csum_size,
- sctx->fs_info->csum_size);
+ /*
+ * For dev-replace, either user asks to avoid the source dev, or
+ * the device is missing, we try the next mirror instead.
+ */
+ if (sctx->is_dev_replace &&
+ (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
+ BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
+ !stripe->dev->bdev)) {
+ int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
+ stripe->bg->length);
- /* Cleanup the range if we're at the end of the csum range */
- if (index == num_sectors - 1)
- drop_csum_range(sctx, sum);
- break;
+ mirror = calc_next_mirror(mirror, num_copies);
}
- if (!found)
- return 0;
- return 1;
+ btrfs_submit_bbio(bbio, mirror);
}
-/* scrub extent tries to collect up to 64 kB for each bio */
-static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
- u64 logical, u32 len,
- u64 physical, struct btrfs_device *dev, u64 flags,
- u64 gen, int mirror_num, u64 physical_for_dev_replace)
+static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
{
- int ret;
- u8 csum[BTRFS_CSUM_SIZE];
- u32 blocksize;
-
- if (flags & BTRFS_EXTENT_FLAG_DATA) {
- if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
- blocksize = map->stripe_len;
- else
- blocksize = sctx->fs_info->sectorsize;
- spin_lock(&sctx->stat_lock);
- sctx->stat.data_extents_scrubbed++;
- sctx->stat.data_bytes_scrubbed += len;
- spin_unlock(&sctx->stat_lock);
- } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
- if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
- blocksize = map->stripe_len;
- else
- blocksize = sctx->fs_info->nodesize;
- spin_lock(&sctx->stat_lock);
- sctx->stat.tree_extents_scrubbed++;
- sctx->stat.tree_bytes_scrubbed += len;
- spin_unlock(&sctx->stat_lock);
- } else {
- blocksize = sctx->fs_info->sectorsize;
- WARN_ON(1);
- }
+ const unsigned long error = scrub_bitmap_read_error(stripe);
+ int i;
- while (len) {
- u32 l = min(len, blocksize);
- int have_csum = 0;
+ for_each_set_bit(i, &error, stripe->nr_sectors) {
+ if (scrub_bitmap_test_bit_is_metadata(stripe, i)) {
+ struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
- if (flags & BTRFS_EXTENT_FLAG_DATA) {
- /* push csums to sbio */
- have_csum = scrub_find_csum(sctx, logical, csum);
- if (have_csum == 0)
- ++sctx->stat.no_csum;
+ btrfs_err(fs_info,
+ "scrub: stripe %llu has unrepaired metadata sector at logical %llu",
+ stripe->logical,
+ stripe->logical + (i << fs_info->sectorsize_bits));
+ return true;
}
- ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
- mirror_num, have_csum ? csum : NULL,
- physical_for_dev_replace);
- if (ret)
- return ret;
- len -= l;
- logical += l;
- physical += l;
- physical_for_dev_replace += l;
}
- return 0;
+ return false;
}
-static int scrub_pages_for_parity(struct scrub_parity *sparity,
- u64 logical, u32 len,
- u64 physical, struct btrfs_device *dev,
- u64 flags, u64 gen, int mirror_num, u8 *csum)
+static void submit_initial_group_read(struct scrub_ctx *sctx,
+ unsigned int first_slot,
+ unsigned int nr_stripes)
{
- struct scrub_ctx *sctx = sparity->sctx;
- struct scrub_block *sblock;
- const u32 sectorsize = sctx->fs_info->sectorsize;
- int index;
+ struct blk_plug plug;
- ASSERT(IS_ALIGNED(len, sectorsize));
+ ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
+ ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
- sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
- if (!sblock) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- spin_unlock(&sctx->stat_lock);
- return -ENOMEM;
+ scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
+ btrfs_stripe_nr_to_offset(nr_stripes));
+ blk_start_plug(&plug);
+ for (int i = 0; i < nr_stripes; i++) {
+ struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
+
+ /* Those stripes should be initialized. */
+ ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
+ scrub_submit_initial_read(sctx, stripe);
}
+ blk_finish_plug(&plug);
+}
- /* one ref inside this function, plus one for each page added to
- * a bio later on */
- refcount_set(&sblock->refs, 1);
- sblock->sctx = sctx;
- sblock->no_io_error_seen = 1;
- sblock->sparity = sparity;
- scrub_parity_get(sparity);
+static int flush_scrub_stripes(struct scrub_ctx *sctx)
+{
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct scrub_stripe *stripe;
+ const int nr_stripes = sctx->cur_stripe;
+ int ret = 0;
- for (index = 0; len > 0; index++) {
- struct scrub_page *spage;
+ if (!nr_stripes)
+ return 0;
- spage = kzalloc(sizeof(*spage), GFP_KERNEL);
- if (!spage) {
-leave_nomem:
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- spin_unlock(&sctx->stat_lock);
- scrub_block_put(sblock);
- return -ENOMEM;
- }
- BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
- /* For scrub block */
- scrub_page_get(spage);
- sblock->pagev[index] = spage;
- /* For scrub parity */
- scrub_page_get(spage);
- list_add_tail(&spage->list, &sparity->spages);
- spage->sblock = sblock;
- spage->dev = dev;
- spage->flags = flags;
- spage->generation = gen;
- spage->logical = logical;
- spage->physical = physical;
- spage->mirror_num = mirror_num;
- if (csum) {
- spage->have_csum = 1;
- memcpy(spage->csum, csum, sctx->fs_info->csum_size);
- } else {
- spage->have_csum = 0;
- }
- sblock->page_count++;
- spage->page = alloc_page(GFP_KERNEL);
- if (!spage->page)
- goto leave_nomem;
+ ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
+ /* Submit the stripes which are populated but not submitted. */
+ if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
+ const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
- /* Iterate over the stripe range in sectorsize steps */
- len -= sectorsize;
- logical += sectorsize;
- physical += sectorsize;
+ submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
}
- WARN_ON(sblock->page_count == 0);
- for (index = 0; index < sblock->page_count; index++) {
- struct scrub_page *spage = sblock->pagev[index];
- int ret;
+ for (int i = 0; i < nr_stripes; i++) {
+ stripe = &sctx->stripes[i];
- ret = scrub_add_page_to_rd_bio(sctx, spage);
- if (ret) {
- scrub_block_put(sblock);
- return ret;
- }
+ wait_event(stripe->repair_wait,
+ test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
}
- /* last one frees, either here or in bio completion for last page */
- scrub_block_put(sblock);
- return 0;
-}
+ /* Submit for dev-replace. */
+ if (sctx->is_dev_replace) {
+ /*
+ * For dev-replace, if we know there is something wrong with
+ * metadata, we should immediately abort.
+ */
+ for (int i = 0; i < nr_stripes; i++) {
+ if (unlikely(stripe_has_metadata_error(&sctx->stripes[i]))) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+ for (int i = 0; i < nr_stripes; i++) {
+ unsigned long good;
+ unsigned long has_extent;
+ unsigned long error;
-static int scrub_extent_for_parity(struct scrub_parity *sparity,
- u64 logical, u32 len,
- u64 physical, struct btrfs_device *dev,
- u64 flags, u64 gen, int mirror_num)
-{
- struct scrub_ctx *sctx = sparity->sctx;
- int ret;
- u8 csum[BTRFS_CSUM_SIZE];
- u32 blocksize;
+ stripe = &sctx->stripes[i];
- if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
- scrub_parity_mark_sectors_error(sparity, logical, len);
- return 0;
- }
+ ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
- if (flags & BTRFS_EXTENT_FLAG_DATA) {
- blocksize = sparity->stripe_len;
- } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
- blocksize = sparity->stripe_len;
- } else {
- blocksize = sctx->fs_info->sectorsize;
- WARN_ON(1);
+ has_extent = scrub_bitmap_read_has_extent(stripe);
+ error = scrub_bitmap_read_error(stripe);
+ bitmap_andnot(&good, &has_extent, &error, stripe->nr_sectors);
+ scrub_write_sectors(sctx, stripe, good, true);
+ }
}
- while (len) {
- u32 l = min(len, blocksize);
- int have_csum = 0;
+ /* Wait for the above writebacks to finish. */
+ for (int i = 0; i < nr_stripes; i++) {
+ stripe = &sctx->stripes[i];
- if (flags & BTRFS_EXTENT_FLAG_DATA) {
- /* push csums to sbio */
- have_csum = scrub_find_csum(sctx, logical, csum);
- if (have_csum == 0)
- goto skip;
- }
- ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
- flags, gen, mirror_num,
- have_csum ? csum : NULL);
- if (ret)
- return ret;
-skip:
- len -= l;
- logical += l;
- physical += l;
+ wait_scrub_stripe_io(stripe);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.last_physical = stripe->physical + stripe_length(stripe);
+ spin_unlock(&sctx->stat_lock);
+ scrub_reset_stripe(stripe);
}
- return 0;
+out:
+ sctx->cur_stripe = 0;
+ return ret;
}
-/*
- * Given a physical address, this will calculate it's
- * logical offset. if this is a parity stripe, it will return
- * the most left data stripe's logical offset.
- *
- * return 0 if it is a data stripe, 1 means parity stripe.
- */
-static int get_raid56_logic_offset(u64 physical, int num,
- struct map_lookup *map, u64 *offset,
- u64 *stripe_start)
+static void raid56_scrub_wait_endio(struct bio *bio)
{
- int i;
- int j = 0;
- u64 stripe_nr;
- u64 last_offset;
- u32 stripe_index;
- u32 rot;
- const int data_stripes = nr_data_stripes(map);
-
- last_offset = (physical - map->stripes[num].physical) * data_stripes;
- if (stripe_start)
- *stripe_start = last_offset;
-
- *offset = last_offset;
- for (i = 0; i < data_stripes; i++) {
- *offset = last_offset + i * map->stripe_len;
-
- stripe_nr = div64_u64(*offset, map->stripe_len);
- stripe_nr = div_u64(stripe_nr, data_stripes);
-
- /* Work out the disk rotation on this stripe-set */
- stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
- /* calculate which stripe this data locates */
- rot += i;
- stripe_index = rot % map->num_stripes;
- if (stripe_index == num)
- return 0;
- if (stripe_index < num)
- j++;
- }
- *offset = last_offset + j * map->stripe_len;
- return 1;
+ complete(bio->bi_private);
}
-static void scrub_free_parity(struct scrub_parity *sparity)
+static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
+ struct btrfs_device *dev, int mirror_num,
+ u64 logical, u32 length, u64 physical,
+ u64 *found_logical_ret)
{
- struct scrub_ctx *sctx = sparity->sctx;
- struct scrub_page *curr, *next;
- int nbits;
+ struct scrub_stripe *stripe;
+ int ret;
- nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
- if (nbits) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.read_errors += nbits;
- sctx->stat.uncorrectable_errors += nbits;
- spin_unlock(&sctx->stat_lock);
- }
+ /*
+ * There should always be one slot left, as caller filling the last
+ * slot should flush them all.
+ */
+ ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
- list_for_each_entry_safe(curr, next, &sparity->spages, list) {
- list_del_init(&curr->list);
- scrub_page_put(curr);
- }
+ /* @found_logical_ret must be specified. */
+ ASSERT(found_logical_ret);
- kfree(sparity);
-}
+ stripe = &sctx->stripes[sctx->cur_stripe];
+ scrub_reset_stripe(stripe);
+ ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
+ &sctx->csum_path, dev, physical,
+ mirror_num, logical, length, stripe);
+ /* Either >0 as no more extents or <0 for error. */
+ if (ret)
+ return ret;
+ *found_logical_ret = stripe->logical;
+ sctx->cur_stripe++;
-static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
-{
- struct scrub_parity *sparity = container_of(work, struct scrub_parity,
- work);
- struct scrub_ctx *sctx = sparity->sctx;
+ /* We filled one group, submit it. */
+ if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
+ const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
+
+ submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
+ }
- scrub_free_parity(sparity);
- scrub_pending_bio_dec(sctx);
+ /* Last slot used, flush them all. */
+ if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
+ return flush_scrub_stripes(sctx);
+ return 0;
}
-static void scrub_parity_bio_endio(struct bio *bio)
+/*
+ * Return 0 if we should not cancel the scrub.
+ * Return <0 if we need to cancel the scrub, returned value will
+ * indicate the reason:
+ * - -ECANCELED - Being explicitly canceled through ioctl.
+ * - -EINTR - Being interrupted by signal or fs/process freezing.
+ */
+static int should_cancel_scrub(const struct scrub_ctx *sctx)
{
- struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
- struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
-
- if (bio->bi_status)
- bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
- sparity->nsectors);
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
- bio_put(bio);
+ if (atomic_read(&fs_info->scrub_cancel_req) ||
+ atomic_read(&sctx->cancel_req))
+ return -ECANCELED;
- btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL,
- NULL);
- btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
+ /*
+ * The user (e.g. fsfreeze command) or power management (PM)
+ * suspend/hibernate can freeze the fs. And PM suspend/hibernate will
+ * also freeze all user processes.
+ *
+ * A user process can only be frozen when it is in user space, thus we
+ * have to cancel the run so that the process can return to the user
+ * space.
+ *
+ * Furthermore we have to check both filesystem and process freezing,
+ * as PM can be configured to freeze the filesystems before processes.
+ *
+ * If we only check fs freezing, then suspend without fs freezing
+ * will timeout, as the process is still in kernel space.
+ *
+ * If we only check process freezing, then suspend with fs freezing
+ * will timeout, as the running scrub will prevent the fs from being frozen.
+ */
+ if (fs_info->sb->s_writers.frozen > SB_UNFROZEN ||
+ freezing(current) || signal_pending(current))
+ return -EINTR;
+ return 0;
}
-static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
+static int scrub_raid56_cached_parity(struct scrub_ctx *sctx,
+ struct btrfs_device *scrub_dev,
+ struct btrfs_chunk_map *map,
+ u64 full_stripe_start,
+ unsigned long *extent_bitmap)
{
- struct scrub_ctx *sctx = sparity->sctx;
+ DECLARE_COMPLETION_ONSTACK(io_done);
struct btrfs_fs_info *fs_info = sctx->fs_info;
- struct bio *bio;
- struct btrfs_raid_bio *rbio;
struct btrfs_io_context *bioc = NULL;
- u64 length;
+ struct btrfs_raid_bio *rbio;
+ struct bio bio;
+ const int data_stripes = nr_data_stripes(map);
+ u64 length = btrfs_stripe_nr_to_offset(data_stripes);
int ret;
- if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
- sparity->nsectors))
- goto out;
-
- length = sparity->logic_end - sparity->logic_start;
+ bio_init(&bio, NULL, NULL, 0, REQ_OP_READ);
+ bio.bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
+ bio.bi_private = &io_done;
+ bio.bi_end_io = raid56_scrub_wait_endio;
btrfs_bio_counter_inc_blocked(fs_info);
- ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
- &length, &bioc);
- if (ret || !bioc || !bioc->raid_map)
- goto bioc_out;
-
- bio = btrfs_bio_alloc(BIO_MAX_VECS);
- bio->bi_iter.bi_sector = sparity->logic_start >> 9;
- bio->bi_private = sparity;
- bio->bi_end_io = scrub_parity_bio_endio;
-
- rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, length,
- sparity->scrub_dev,
- sparity->dbitmap,
- sparity->nsectors);
- if (!rbio)
- goto rbio_out;
-
- scrub_pending_bio_inc(sctx);
- raid56_parity_submit_scrub_rbio(rbio);
- return;
-
-rbio_out:
- bio_put(bio);
-bioc_out:
- btrfs_bio_counter_dec(fs_info);
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
+ &length, &bioc, NULL, NULL);
+ if (ret < 0)
+ goto out;
+ /* For RAID56 write there must be an @bioc allocated. */
+ ASSERT(bioc);
+ rbio = raid56_parity_alloc_scrub_rbio(&bio, bioc, scrub_dev, extent_bitmap,
+ BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
btrfs_put_bioc(bioc);
- bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
- sparity->nsectors);
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- spin_unlock(&sctx->stat_lock);
+ if (!rbio) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Use the recovered stripes as cache to avoid read them from disk again. */
+ for (int i = 0; i < data_stripes; i++) {
+ struct scrub_stripe *stripe = &sctx->raid56_data_stripes[i];
+
+ raid56_parity_cache_data_folios(rbio, stripe->folios,
+ full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
+ }
+ raid56_parity_submit_scrub_rbio(rbio);
+ wait_for_completion_io(&io_done);
+ ret = blk_status_to_errno(bio.bi_status);
out:
- scrub_free_parity(sparity);
+ btrfs_bio_counter_dec(fs_info);
+ bio_uninit(&bio);
+ return ret;
}
-static inline int scrub_calc_parity_bitmap_len(int nsectors)
+static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
+ struct btrfs_device *scrub_dev,
+ struct btrfs_block_group *bg,
+ struct btrfs_chunk_map *map,
+ u64 full_stripe_start)
{
- return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
-}
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct btrfs_path extent_path = { 0 };
+ struct btrfs_path csum_path = { 0 };
+ struct scrub_stripe *stripe;
+ bool all_empty = true;
+ const int data_stripes = nr_data_stripes(map);
+ unsigned long extent_bitmap = 0;
+ int ret;
-static void scrub_parity_get(struct scrub_parity *sparity)
-{
- refcount_inc(&sparity->refs);
-}
+ ASSERT(sctx->raid56_data_stripes);
-static void scrub_parity_put(struct scrub_parity *sparity)
-{
- if (!refcount_dec_and_test(&sparity->refs))
- return;
+ ret = should_cancel_scrub(sctx);
+ if (ret < 0)
+ return ret;
- scrub_parity_check_and_repair(sparity);
-}
+ if (atomic_read(&fs_info->scrub_pause_req))
+ scrub_blocked_if_needed(fs_info);
-static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
- struct map_lookup *map,
- struct btrfs_device *sdev,
- struct btrfs_path *path,
- u64 logic_start,
- u64 logic_end)
-{
- struct btrfs_fs_info *fs_info = sctx->fs_info;
- struct btrfs_root *root = fs_info->extent_root;
- struct btrfs_root *csum_root = fs_info->csum_root;
- struct btrfs_extent_item *extent;
- struct btrfs_io_context *bioc = NULL;
- u64 flags;
- int ret;
- int slot;
- struct extent_buffer *l;
- struct btrfs_key key;
- u64 generation;
- u64 extent_logical;
- u64 extent_physical;
- /* Check the comment in scrub_stripe() for why u32 is enough here */
- u32 extent_len;
- u64 mapped_length;
- struct btrfs_device *extent_dev;
- struct scrub_parity *sparity;
- int nsectors;
- int bitmap_len;
- int extent_mirror_num;
- int stop_loop = 0;
-
- ASSERT(map->stripe_len <= U32_MAX);
- nsectors = map->stripe_len >> fs_info->sectorsize_bits;
- bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
- sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
- GFP_NOFS);
- if (!sparity) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- spin_unlock(&sctx->stat_lock);
- return -ENOMEM;
+ spin_lock(&bg->lock);
+ if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
+ spin_unlock(&bg->lock);
+ return 0;
}
+ spin_unlock(&bg->lock);
- ASSERT(map->stripe_len <= U32_MAX);
- sparity->stripe_len = map->stripe_len;
- sparity->nsectors = nsectors;
- sparity->sctx = sctx;
- sparity->scrub_dev = sdev;
- sparity->logic_start = logic_start;
- sparity->logic_end = logic_end;
- refcount_set(&sparity->refs, 1);
- INIT_LIST_HEAD(&sparity->spages);
- sparity->dbitmap = sparity->bitmap;
- sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
-
- ret = 0;
- while (logic_start < logic_end) {
- if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
- key.type = BTRFS_METADATA_ITEM_KEY;
- else
- key.type = BTRFS_EXTENT_ITEM_KEY;
- key.objectid = logic_start;
- key.offset = (u64)-1;
-
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ /*
+ * For data stripe search, we cannot reuse the same extent/csum paths,
+ * as the data stripe bytenr may be smaller than previous extent. Thus
+ * we have to use our own extent/csum paths.
+ */
+ extent_path.search_commit_root = true;
+ extent_path.skip_locking = true;
+ csum_path.search_commit_root = true;
+ csum_path.skip_locking = true;
+
+ for (int i = 0; i < data_stripes; i++) {
+ int stripe_index;
+ int rot;
+ u64 physical;
+
+ stripe = &sctx->raid56_data_stripes[i];
+ rot = div_u64(full_stripe_start - bg->start,
+ data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
+ stripe_index = (i + rot) % map->num_stripes;
+ physical = map->stripes[stripe_index].physical +
+ btrfs_stripe_nr_to_offset(rot);
+
+ scrub_reset_stripe(stripe);
+ set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
+ ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
+ map->stripes[stripe_index].dev, physical, 1,
+ full_stripe_start + btrfs_stripe_nr_to_offset(i),
+ BTRFS_STRIPE_LEN, stripe);
if (ret < 0)
goto out;
-
+ /*
+ * No extent in this data stripe, need to manually mark them
+ * initialized to make later read submission happy.
+ */
if (ret > 0) {
- ret = btrfs_previous_extent_item(root, path, 0);
- if (ret < 0)
- goto out;
- if (ret > 0) {
- btrfs_release_path(path);
- ret = btrfs_search_slot(NULL, root, &key,
- path, 0, 0);
- if (ret < 0)
- goto out;
- }
+ stripe->logical = full_stripe_start +
+ btrfs_stripe_nr_to_offset(i);
+ stripe->dev = map->stripes[stripe_index].dev;
+ stripe->mirror_num = 1;
+ set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
}
+ }
- stop_loop = 0;
- while (1) {
- u64 bytes;
-
- l = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(l)) {
- ret = btrfs_next_leaf(root, path);
- if (ret == 0)
- continue;
- if (ret < 0)
- goto out;
-
- stop_loop = 1;
- break;
- }
- btrfs_item_key_to_cpu(l, &key, slot);
+ /* Check if all data stripes are empty. */
+ for (int i = 0; i < data_stripes; i++) {
+ stripe = &sctx->raid56_data_stripes[i];
+ if (!scrub_bitmap_empty_has_extent(stripe)) {
+ all_empty = false;
+ break;
+ }
+ }
+ if (all_empty) {
+ ret = 0;
+ goto out;
+ }
- if (key.type != BTRFS_EXTENT_ITEM_KEY &&
- key.type != BTRFS_METADATA_ITEM_KEY)
- goto next;
+ for (int i = 0; i < data_stripes; i++) {
+ stripe = &sctx->raid56_data_stripes[i];
+ scrub_submit_initial_read(sctx, stripe);
+ }
+ for (int i = 0; i < data_stripes; i++) {
+ stripe = &sctx->raid56_data_stripes[i];
- if (key.type == BTRFS_METADATA_ITEM_KEY)
- bytes = fs_info->nodesize;
- else
- bytes = key.offset;
+ wait_event(stripe->repair_wait,
+ test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
+ }
+ /* For now, no zoned support for RAID56. */
+ ASSERT(!btrfs_is_zoned(sctx->fs_info));
- if (key.objectid + bytes <= logic_start)
- goto next;
+ /*
+ * Now all data stripes are properly verified. Check if we have any
+ * unrepaired, if so abort immediately or we could further corrupt the
+ * P/Q stripes.
+ *
+ * During the loop, also populate extent_bitmap.
+ */
+ for (int i = 0; i < data_stripes; i++) {
+ unsigned long error;
+ unsigned long has_extent;
- if (key.objectid >= logic_end) {
- stop_loop = 1;
- break;
- }
+ stripe = &sctx->raid56_data_stripes[i];
- while (key.objectid >= logic_start + map->stripe_len)
- logic_start += map->stripe_len;
-
- extent = btrfs_item_ptr(l, slot,
- struct btrfs_extent_item);
- flags = btrfs_extent_flags(l, extent);
- generation = btrfs_extent_generation(l, extent);
-
- if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
- (key.objectid < logic_start ||
- key.objectid + bytes >
- logic_start + map->stripe_len)) {
- btrfs_err(fs_info,
- "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
- key.objectid, logic_start);
- spin_lock(&sctx->stat_lock);
- sctx->stat.uncorrectable_errors++;
- spin_unlock(&sctx->stat_lock);
- goto next;
- }
-again:
- extent_logical = key.objectid;
- ASSERT(bytes <= U32_MAX);
- extent_len = bytes;
-
- if (extent_logical < logic_start) {
- extent_len -= logic_start - extent_logical;
- extent_logical = logic_start;
- }
+ error = scrub_bitmap_read_error(stripe);
+ has_extent = scrub_bitmap_read_has_extent(stripe);
- if (extent_logical + extent_len >
- logic_start + map->stripe_len)
- extent_len = logic_start + map->stripe_len -
- extent_logical;
-
- scrub_parity_mark_sectors_data(sparity, extent_logical,
- extent_len);
-
- mapped_length = extent_len;
- bioc = NULL;
- ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
- extent_logical, &mapped_length, &bioc,
- 0);
- if (!ret) {
- if (!bioc || mapped_length < extent_len)
- ret = -EIO;
- }
- if (ret) {
- btrfs_put_bioc(bioc);
- goto out;
- }
- extent_physical = bioc->stripes[0].physical;
- extent_mirror_num = bioc->mirror_num;
- extent_dev = bioc->stripes[0].dev;
- btrfs_put_bioc(bioc);
+ /*
+ * We should only check the errors where there is an extent.
+ * As we may hit an empty data stripe while it's missing.
+ */
+ bitmap_and(&error, &error, &has_extent, stripe->nr_sectors);
+ if (unlikely(!bitmap_empty(&error, stripe->nr_sectors))) {
+ btrfs_err(fs_info,
+"scrub: unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
+ full_stripe_start, i, stripe->nr_sectors,
+ &error);
+ ret = -EIO;
+ goto out;
+ }
+ bitmap_or(&extent_bitmap, &extent_bitmap, &has_extent,
+ stripe->nr_sectors);
+ }
- ret = btrfs_lookup_csums_range(csum_root,
- extent_logical,
- extent_logical + extent_len - 1,
- &sctx->csum_list, 1);
- if (ret)
- goto out;
+ /* Now we can check and regenerate the P/Q stripe. */
+ ret = scrub_raid56_cached_parity(sctx, scrub_dev, map, full_stripe_start,
+ &extent_bitmap);
+out:
+ btrfs_release_path(&extent_path);
+ btrfs_release_path(&csum_path);
+ return ret;
+}
- ret = scrub_extent_for_parity(sparity, extent_logical,
- extent_len,
- extent_physical,
- extent_dev, flags,
- generation,
- extent_mirror_num);
+/*
+ * Scrub one range which can only has simple mirror based profile.
+ * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
+ * RAID0/RAID10).
+ *
+ * Since we may need to handle a subset of block group, we need @logical_start
+ * and @logical_length parameter.
+ */
+static int scrub_simple_mirror(struct scrub_ctx *sctx,
+ struct btrfs_block_group *bg,
+ u64 logical_start, u64 logical_length,
+ struct btrfs_device *device,
+ u64 physical, int mirror_num)
+{
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ const u64 logical_end = logical_start + logical_length;
+ u64 cur_logical = logical_start;
+ int ret = 0;
- scrub_free_csums(sctx);
+ /* The range must be inside the bg */
+ ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
- if (ret)
- goto out;
+ /* Go through each extent items inside the logical range */
+ while (cur_logical < logical_end) {
+ u64 found_logical = U64_MAX;
+ u64 cur_physical = physical + cur_logical - logical_start;
- if (extent_logical + extent_len <
- key.objectid + bytes) {
- logic_start += map->stripe_len;
+ ret = should_cancel_scrub(sctx);
+ if (ret < 0)
+ break;
- if (logic_start >= logic_end) {
- stop_loop = 1;
- break;
- }
+ if (atomic_read(&fs_info->scrub_pause_req))
+ scrub_blocked_if_needed(fs_info);
- if (logic_start < key.objectid + bytes) {
- cond_resched();
- goto again;
- }
- }
-next:
- path->slots[0]++;
+ spin_lock(&bg->lock);
+ if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
+ spin_unlock(&bg->lock);
+ ret = 0;
+ break;
}
+ spin_unlock(&bg->lock);
- btrfs_release_path(path);
-
- if (stop_loop)
+ ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
+ cur_logical, logical_end - cur_logical,
+ cur_physical, &found_logical);
+ if (ret > 0) {
+ /* No more extent, just update the accounting */
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.last_physical = physical + logical_length;
+ spin_unlock(&sctx->stat_lock);
+ ret = 0;
+ break;
+ }
+ if (ret < 0)
break;
- logic_start += map->stripe_len;
- }
-out:
- if (ret < 0) {
- ASSERT(logic_end - logic_start <= U32_MAX);
- scrub_parity_mark_sectors_error(sparity, logic_start,
- logic_end - logic_start);
- }
- scrub_parity_put(sparity);
- scrub_submit(sctx);
- mutex_lock(&sctx->wr_lock);
- scrub_wr_submit(sctx);
- mutex_unlock(&sctx->wr_lock);
+ /* queue_scrub_stripe() returned 0, @found_logical must be updated. */
+ ASSERT(found_logical != U64_MAX);
+ cur_logical = found_logical + BTRFS_STRIPE_LEN;
- btrfs_release_path(path);
- return ret < 0 ? ret : 0;
+ /* Don't hold CPU for too long time */
+ cond_resched();
+ }
+ return ret;
}
-static void sync_replace_for_zoned(struct scrub_ctx *sctx)
+/* Calculate the full stripe length for simple stripe based profiles */
+static u64 simple_stripe_full_stripe_len(const struct btrfs_chunk_map *map)
{
- if (!btrfs_is_zoned(sctx->fs_info))
- return;
+ ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID10));
- sctx->flush_all_writes = true;
- scrub_submit(sctx);
- mutex_lock(&sctx->wr_lock);
- scrub_wr_submit(sctx);
- mutex_unlock(&sctx->wr_lock);
+ return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
+}
+
+/* Get the logical bytenr for the stripe */
+static u64 simple_stripe_get_logical(struct btrfs_chunk_map *map,
+ struct btrfs_block_group *bg,
+ int stripe_index)
+{
+ ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID10));
+ ASSERT(stripe_index < map->num_stripes);
- wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
+ /*
+ * (stripe_index / sub_stripes) gives how many data stripes we need to
+ * skip.
+ */
+ return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
+ bg->start;
}
-static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
- u64 physical, u64 physical_end)
+/* Get the mirror number for the stripe */
+static int simple_stripe_mirror_num(struct btrfs_chunk_map *map, int stripe_index)
{
- struct btrfs_fs_info *fs_info = sctx->fs_info;
- int ret = 0;
+ ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID10));
+ ASSERT(stripe_index < map->num_stripes);
- if (!btrfs_is_zoned(fs_info))
- return 0;
+ /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
+ return stripe_index % map->sub_stripes + 1;
+}
- wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
+static int scrub_simple_stripe(struct scrub_ctx *sctx,
+ struct btrfs_block_group *bg,
+ struct btrfs_chunk_map *map,
+ struct btrfs_device *device,
+ int stripe_index)
+{
+ const u64 logical_increment = simple_stripe_full_stripe_len(map);
+ const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
+ const u64 orig_physical = map->stripes[stripe_index].physical;
+ const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
+ u64 cur_logical = orig_logical;
+ u64 cur_physical = orig_physical;
+ int ret = 0;
- mutex_lock(&sctx->wr_lock);
- if (sctx->write_pointer < physical_end) {
- ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
- physical,
- sctx->write_pointer);
+ while (cur_logical < bg->start + bg->length) {
+ /*
+ * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
+ * just RAID1, so we can reuse scrub_simple_mirror() to scrub
+ * this stripe.
+ */
+ ret = scrub_simple_mirror(sctx, bg, cur_logical,
+ BTRFS_STRIPE_LEN, device, cur_physical,
+ mirror_num);
if (ret)
- btrfs_err(fs_info,
- "zoned: failed to recover write pointer");
+ return ret;
+ /* Skip to next stripe which belongs to the target device */
+ cur_logical += logical_increment;
+ /* For physical offset, we just go to next stripe */
+ cur_physical += BTRFS_STRIPE_LEN;
}
- mutex_unlock(&sctx->wr_lock);
- btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
-
return ret;
}
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
- struct map_lookup *map,
+ struct btrfs_block_group *bg,
+ struct btrfs_chunk_map *map,
struct btrfs_device *scrub_dev,
- int num, u64 base, u64 length,
- struct btrfs_block_group *cache)
+ int stripe_index)
{
- struct btrfs_path *path, *ppath;
struct btrfs_fs_info *fs_info = sctx->fs_info;
- struct btrfs_root *root = fs_info->extent_root;
- struct btrfs_root *csum_root = fs_info->csum_root;
- struct btrfs_extent_item *extent;
- struct blk_plug plug;
- u64 flags;
+ const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
+ const u64 chunk_logical = bg->start;
int ret;
- int slot;
- u64 nstripes;
- struct extent_buffer *l;
- u64 physical;
+ int ret2;
+ u64 physical = map->stripes[stripe_index].physical;
+ const u64 dev_stripe_len = btrfs_calc_stripe_length(map);
+ const u64 physical_end = physical + dev_stripe_len;
u64 logical;
u64 logic_end;
- u64 physical_end;
- u64 generation;
- int mirror_num;
- struct reada_control *reada1;
- struct reada_control *reada2;
- struct btrfs_key key;
- struct btrfs_key key_end;
- u64 increment = map->stripe_len;
+ /* The logical increment after finishing one stripe */
+ u64 increment;
+ /* Offset inside the chunk */
u64 offset;
- u64 extent_logical;
- u64 extent_physical;
- /*
- * Unlike chunk length, extent length should never go beyond
- * BTRFS_MAX_EXTENT_SIZE, thus u32 is enough here.
- */
- u32 extent_len;
u64 stripe_logical;
- u64 stripe_end;
- struct btrfs_device *extent_dev;
- int extent_mirror_num;
- int stop_loop = 0;
-
- physical = map->stripes[num].physical;
- offset = 0;
- nstripes = div64_u64(length, map->stripe_len);
- mirror_num = 1;
- increment = map->stripe_len;
- if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
- offset = map->stripe_len * num;
- increment = map->stripe_len * map->num_stripes;
- } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
- int factor = map->num_stripes / map->sub_stripes;
- offset = map->stripe_len * (num / map->sub_stripes);
- increment = map->stripe_len * factor;
- mirror_num = num % map->sub_stripes + 1;
- } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
- mirror_num = num % map->num_stripes + 1;
- } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
- mirror_num = num % map->num_stripes + 1;
- } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- get_raid56_logic_offset(physical, num, map, &offset, NULL);
- increment = map->stripe_len * nr_data_stripes(map);
- }
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- ppath = btrfs_alloc_path();
- if (!ppath) {
- btrfs_free_path(path);
- return -ENOMEM;
- }
- /*
- * work on commit root. The related disk blocks are static as
- * long as COW is applied. This means, it is save to rewrite
- * them to repair disk errors without any race conditions
- */
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ /* Extent_path should be released by now. */
+ ASSERT(sctx->extent_path.nodes[0] == NULL);
- ppath->search_commit_root = 1;
- ppath->skip_locking = 1;
- /*
- * trigger the readahead for extent tree csum tree and wait for
- * completion. During readahead, the scrub is officially paused
- * to not hold off transaction commits
- */
- logical = base + offset;
- physical_end = physical + nstripes * map->stripe_len;
- if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- get_raid56_logic_offset(physical_end, num,
- map, &logic_end, NULL);
- logic_end += base;
- } else {
- logic_end = logical + increment * nstripes;
- }
- wait_event(sctx->list_wait,
- atomic_read(&sctx->bios_in_flight) == 0);
scrub_blocked_if_needed(fs_info);
- /* FIXME it might be better to start readahead at commit root */
- key.objectid = logical;
- key.type = BTRFS_EXTENT_ITEM_KEY;
- key.offset = (u64)0;
- key_end.objectid = logic_end;
- key_end.type = BTRFS_METADATA_ITEM_KEY;
- key_end.offset = (u64)-1;
- reada1 = btrfs_reada_add(root, &key, &key_end);
-
- if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
- key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
- key.type = BTRFS_EXTENT_CSUM_KEY;
- key.offset = logical;
- key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
- key_end.type = BTRFS_EXTENT_CSUM_KEY;
- key_end.offset = logic_end;
- reada2 = btrfs_reada_add(csum_root, &key, &key_end);
- } else {
- reada2 = NULL;
- }
-
- if (!IS_ERR(reada1))
- btrfs_reada_wait(reada1);
- if (!IS_ERR_OR_NULL(reada2))
- btrfs_reada_wait(reada2);
-
-
- /*
- * collect all data csums for the stripe to avoid seeking during
- * the scrub. This might currently (crc32) end up to be about 1MB
- */
- blk_start_plug(&plug);
-
if (sctx->is_dev_replace &&
btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
mutex_lock(&sctx->wr_lock);
sctx->write_pointer = physical;
mutex_unlock(&sctx->wr_lock);
- sctx->flush_all_writes = true;
}
- /*
- * now find all extents for each stripe and scrub them
- */
- ret = 0;
- while (physical < physical_end) {
- /*
- * canceled?
- */
- if (atomic_read(&fs_info->scrub_cancel_req) ||
- atomic_read(&sctx->cancel_req)) {
- ret = -ECANCELED;
- goto out;
- }
- /*
- * check to see if we have to pause
- */
- if (atomic_read(&fs_info->scrub_pause_req)) {
- /* push queued extents */
- sctx->flush_all_writes = true;
- scrub_submit(sctx);
- mutex_lock(&sctx->wr_lock);
- scrub_wr_submit(sctx);
- mutex_unlock(&sctx->wr_lock);
- wait_event(sctx->list_wait,
- atomic_read(&sctx->bios_in_flight) == 0);
- sctx->flush_all_writes = false;
- scrub_blocked_if_needed(fs_info);
- }
-
- if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- ret = get_raid56_logic_offset(physical, num, map,
- &logical,
- &stripe_logical);
- logical += base;
- if (ret) {
- /* it is parity strip */
- stripe_logical += base;
- stripe_end = stripe_logical + increment;
- ret = scrub_raid56_parity(sctx, map, scrub_dev,
- ppath, stripe_logical,
- stripe_end);
- if (ret)
- goto out;
- goto skip;
- }
- }
-
- if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
- key.type = BTRFS_METADATA_ITEM_KEY;
- else
- key.type = BTRFS_EXTENT_ITEM_KEY;
- key.objectid = logical;
- key.offset = (u64)-1;
+ /* Prepare the extra data stripes used by RAID56. */
+ if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+ ASSERT(sctx->raid56_data_stripes == NULL);
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
+ sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
+ sizeof(struct scrub_stripe),
+ GFP_KERNEL);
+ if (!sctx->raid56_data_stripes) {
+ ret = -ENOMEM;
goto out;
-
- if (ret > 0) {
- ret = btrfs_previous_extent_item(root, path, 0);
+ }
+ for (int i = 0; i < nr_data_stripes(map); i++) {
+ ret = init_scrub_stripe(fs_info,
+ &sctx->raid56_data_stripes[i]);
if (ret < 0)
goto out;
- if (ret > 0) {
- /* there's no smaller item, so stick with the
- * larger one */
- btrfs_release_path(path);
- ret = btrfs_search_slot(NULL, root, &key,
- path, 0, 0);
- if (ret < 0)
- goto out;
- }
+ sctx->raid56_data_stripes[i].bg = bg;
+ sctx->raid56_data_stripes[i].sctx = sctx;
}
+ }
+ /*
+ * There used to be a big double loop to handle all profiles using the
+ * same routine, which grows larger and more gross over time.
+ *
+ * So here we handle each profile differently, so simpler profiles
+ * have simpler scrubbing function.
+ */
+ if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_RAID56_MASK))) {
+ /*
+ * Above check rules out all complex profile, the remaining
+ * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
+ * mirrored duplication without stripe.
+ *
+ * Only @physical and @mirror_num needs to calculated using
+ * @stripe_index.
+ */
+ ret = scrub_simple_mirror(sctx, bg, bg->start, bg->length,
+ scrub_dev, map->stripes[stripe_index].physical,
+ stripe_index + 1);
+ offset = 0;
+ goto out;
+ }
+ if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
+ ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
+ offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
+ goto out;
+ }
- stop_loop = 0;
- while (1) {
- u64 bytes;
-
- l = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(l)) {
- ret = btrfs_next_leaf(root, path);
- if (ret == 0)
- continue;
- if (ret < 0)
- goto out;
-
- stop_loop = 1;
- break;
- }
- btrfs_item_key_to_cpu(l, &key, slot);
-
- if (key.type != BTRFS_EXTENT_ITEM_KEY &&
- key.type != BTRFS_METADATA_ITEM_KEY)
- goto next;
-
- if (key.type == BTRFS_METADATA_ITEM_KEY)
- bytes = fs_info->nodesize;
- else
- bytes = key.offset;
-
- if (key.objectid + bytes <= logical)
- goto next;
-
- if (key.objectid >= logical + map->stripe_len) {
- /* out of this device extent */
- if (key.objectid >= logic_end)
- stop_loop = 1;
- break;
- }
-
- /*
- * If our block group was removed in the meanwhile, just
- * stop scrubbing since there is no point in continuing.
- * Continuing would prevent reusing its device extents
- * for new block groups for a long time.
- */
- spin_lock(&cache->lock);
- if (cache->removed) {
- spin_unlock(&cache->lock);
- ret = 0;
- goto out;
- }
- spin_unlock(&cache->lock);
-
- extent = btrfs_item_ptr(l, slot,
- struct btrfs_extent_item);
- flags = btrfs_extent_flags(l, extent);
- generation = btrfs_extent_generation(l, extent);
-
- if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
- (key.objectid < logical ||
- key.objectid + bytes >
- logical + map->stripe_len)) {
- btrfs_err(fs_info,
- "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
- key.objectid, logical);
- spin_lock(&sctx->stat_lock);
- sctx->stat.uncorrectable_errors++;
- spin_unlock(&sctx->stat_lock);
- goto next;
- }
-
-again:
- extent_logical = key.objectid;
- ASSERT(bytes <= U32_MAX);
- extent_len = bytes;
-
- /*
- * trim extent to this stripe
- */
- if (extent_logical < logical) {
- extent_len -= logical - extent_logical;
- extent_logical = logical;
- }
- if (extent_logical + extent_len >
- logical + map->stripe_len) {
- extent_len = logical + map->stripe_len -
- extent_logical;
- }
-
- extent_physical = extent_logical - logical + physical;
- extent_dev = scrub_dev;
- extent_mirror_num = mirror_num;
- if (sctx->is_dev_replace)
- scrub_remap_extent(fs_info, extent_logical,
- extent_len, &extent_physical,
- &extent_dev,
- &extent_mirror_num);
-
- if (flags & BTRFS_EXTENT_FLAG_DATA) {
- ret = btrfs_lookup_csums_range(csum_root,
- extent_logical,
- extent_logical + extent_len - 1,
- &sctx->csum_list, 1);
- if (ret)
- goto out;
- }
+ /* Only RAID56 goes through the old code */
+ ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
+ ret = 0;
- ret = scrub_extent(sctx, map, extent_logical, extent_len,
- extent_physical, extent_dev, flags,
- generation, extent_mirror_num,
- extent_logical - logical + physical);
+ /* Calculate the logical end of the stripe */
+ get_raid56_logic_offset(physical_end, stripe_index,
+ map, &logic_end, NULL);
+ logic_end += chunk_logical;
- scrub_free_csums(sctx);
+ /* Initialize @offset in case we need to go to out: label */
+ get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
+ increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
+ /*
+ * Due to the rotation, for RAID56 it's better to iterate each stripe
+ * using their physical offset.
+ */
+ while (physical < physical_end) {
+ ret = get_raid56_logic_offset(physical, stripe_index, map,
+ &logical, &stripe_logical);
+ logical += chunk_logical;
+ if (ret) {
+ /* it is parity strip */
+ stripe_logical += chunk_logical;
+ ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
+ map, stripe_logical);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.last_physical = min(physical + BTRFS_STRIPE_LEN,
+ physical_end);
+ spin_unlock(&sctx->stat_lock);
if (ret)
goto out;
+ goto next;
+ }
- if (sctx->is_dev_replace)
- sync_replace_for_zoned(sctx);
-
- if (extent_logical + extent_len <
- key.objectid + bytes) {
- if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- /*
- * loop until we find next data stripe
- * or we have finished all stripes.
- */
-loop:
- physical += map->stripe_len;
- ret = get_raid56_logic_offset(physical,
- num, map, &logical,
- &stripe_logical);
- logical += base;
-
- if (ret && physical < physical_end) {
- stripe_logical += base;
- stripe_end = stripe_logical +
- increment;
- ret = scrub_raid56_parity(sctx,
- map, scrub_dev, ppath,
- stripe_logical,
- stripe_end);
- if (ret)
- goto out;
- goto loop;
- }
- } else {
- physical += map->stripe_len;
- logical += increment;
- }
- if (logical < key.objectid + bytes) {
- cond_resched();
- goto again;
- }
-
- if (physical >= physical_end) {
- stop_loop = 1;
- break;
- }
- }
+ /*
+ * Now we're at a data stripe, scrub each extents in the range.
+ *
+ * At this stage, if we ignore the repair part, inside each data
+ * stripe it is no different than SINGLE profile.
+ * We can reuse scrub_simple_mirror() here, as the repair part
+ * is still based on @mirror_num.
+ */
+ ret = scrub_simple_mirror(sctx, bg, logical, BTRFS_STRIPE_LEN,
+ scrub_dev, physical, 1);
+ if (ret < 0)
+ goto out;
next:
- path->slots[0]++;
- }
- btrfs_release_path(path);
-skip:
logical += increment;
- physical += map->stripe_len;
+ physical += BTRFS_STRIPE_LEN;
spin_lock(&sctx->stat_lock);
- if (stop_loop)
- sctx->stat.last_physical = map->stripes[num].physical +
- length;
- else
- sctx->stat.last_physical = physical;
+ sctx->stat.last_physical = physical;
spin_unlock(&sctx->stat_lock);
- if (stop_loop)
- break;
}
out:
- /* push queued extents */
- scrub_submit(sctx);
- mutex_lock(&sctx->wr_lock);
- scrub_wr_submit(sctx);
- mutex_unlock(&sctx->wr_lock);
+ ret2 = flush_scrub_stripes(sctx);
+ if (!ret)
+ ret = ret2;
+ btrfs_release_path(&sctx->extent_path);
+ btrfs_release_path(&sctx->csum_path);
- blk_finish_plug(&plug);
- btrfs_free_path(path);
- btrfs_free_path(ppath);
+ if (sctx->raid56_data_stripes) {
+ for (int i = 0; i < nr_data_stripes(map); i++)
+ release_scrub_stripe(&sctx->raid56_data_stripes[i]);
+ kfree(sctx->raid56_data_stripes);
+ sctx->raid56_data_stripes = NULL;
+ }
if (sctx->is_dev_replace && ret >= 0) {
- int ret2;
-
- ret2 = sync_write_pointer_for_zoned(sctx, base + offset,
- map->stripes[num].physical,
- physical_end);
+ ret2 = sync_write_pointer_for_zoned(sctx,
+ chunk_logical + offset,
+ map->stripes[stripe_index].physical,
+ physical_end);
if (ret2)
ret = ret2;
}
@@ -3576,53 +2608,44 @@ out:
}
static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
+ struct btrfs_block_group *bg,
struct btrfs_device *scrub_dev,
- u64 chunk_offset, u64 length,
u64 dev_offset,
- struct btrfs_block_group *cache)
+ u64 dev_extent_len)
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
- struct extent_map_tree *map_tree = &fs_info->mapping_tree;
- struct map_lookup *map;
- struct extent_map *em;
+ struct btrfs_chunk_map *map;
int i;
int ret = 0;
- read_lock(&map_tree->lock);
- em = lookup_extent_mapping(map_tree, chunk_offset, 1);
- read_unlock(&map_tree->lock);
-
- if (!em) {
+ map = btrfs_find_chunk_map(fs_info, bg->start, bg->length);
+ if (!map) {
/*
* Might have been an unused block group deleted by the cleaner
* kthread or relocation.
*/
- spin_lock(&cache->lock);
- if (!cache->removed)
+ spin_lock(&bg->lock);
+ if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
ret = -EINVAL;
- spin_unlock(&cache->lock);
+ spin_unlock(&bg->lock);
return ret;
}
-
- map = em->map_lookup;
- if (em->start != chunk_offset)
+ if (map->start != bg->start)
goto out;
-
- if (em->len < length)
+ if (map->chunk_len < dev_extent_len)
goto out;
for (i = 0; i < map->num_stripes; ++i) {
if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
map->stripes[i].physical == dev_offset) {
- ret = scrub_stripe(sctx, map, scrub_dev, i,
- chunk_offset, length, cache);
+ ret = scrub_stripe(sctx, bg, map, scrub_dev, i);
if (ret)
goto out;
}
}
out:
- free_extent_map(em);
+ btrfs_free_chunk_map(map);
return ret;
}
@@ -3631,19 +2654,15 @@ static int finish_extent_writes_for_zoned(struct btrfs_root *root,
struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
- struct btrfs_trans_handle *trans;
if (!btrfs_is_zoned(fs_info))
return 0;
btrfs_wait_block_group_reservations(cache);
btrfs_wait_nocow_writers(cache);
- btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- return btrfs_commit_transaction(trans);
+ return btrfs_commit_current_transaction(root);
}
static noinline_for_stack
@@ -3651,10 +2670,9 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev, u64 start, u64 end)
{
struct btrfs_dev_extent *dev_extent = NULL;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *root = fs_info->dev_root;
- u64 length;
u64 chunk_offset;
int ret = 0;
int ro_set;
@@ -3670,14 +2688,16 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
return -ENOMEM;
path->reada = READA_FORWARD;
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
key.objectid = scrub_dev->devid;
- key.offset = 0ull;
key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = 0ull;
while (1) {
+ u64 dev_extent_len;
+
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
break;
@@ -3714,9 +2734,9 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
break;
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
- length = btrfs_dev_extent_length(l, dev_extent);
+ dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
- if (found_key.offset + length <= start)
+ if (found_key.offset + dev_extent_len <= start)
goto skip;
chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
@@ -3732,14 +2752,36 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (!cache)
goto skip;
+ ASSERT(cache->start <= chunk_offset);
+ /*
+ * We are using the commit root to search for device extents, so
+ * that means we could have found a device extent item from a
+ * block group that was deleted in the current transaction. The
+ * logical start offset of the deleted block group, stored at
+ * @chunk_offset, might be part of the logical address range of
+ * a new block group (which uses different physical extents).
+ * In this case btrfs_lookup_block_group() has returned the new
+ * block group, and its start address is less than @chunk_offset.
+ *
+ * We skip such new block groups, because it's pointless to
+ * process them, as we won't find their extents because we search
+ * for them using the commit root of the extent tree. For a device
+ * replace it's also fine to skip it, we won't miss copying them
+ * to the target device because we have the write duplication
+ * setup through the regular write path (by btrfs_map_block()),
+ * and we have committed a transaction when we started the device
+ * replace, right after setting up the device replace state.
+ */
+ if (cache->start < chunk_offset) {
+ btrfs_put_block_group(cache);
+ goto skip;
+ }
+
if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
- spin_lock(&cache->lock);
- if (!cache->to_copy) {
- spin_unlock(&cache->lock);
+ if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
btrfs_put_block_group(cache);
goto skip;
}
- spin_unlock(&cache->lock);
}
/*
@@ -3751,7 +2793,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* repair extents.
*/
spin_lock(&cache->lock);
- if (cache->removed) {
+ if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
spin_unlock(&cache->lock);
btrfs_put_block_group(cache);
goto skip;
@@ -3812,25 +2854,32 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (ret == 0) {
ro_set = 1;
- } else if (ret == -ENOSPC && !sctx->is_dev_replace) {
+ } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
+ !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
/*
* btrfs_inc_block_group_ro return -ENOSPC when it
* failed in creating new chunk for metadata.
* It is not a problem for scrub, because
* metadata are always cowed, and our scrub paused
* commit_transactions.
+ *
+ * For RAID56 chunks, we have to mark them read-only
+ * for scrub, as later we would use our own cache
+ * out of RAID56 realm.
+ * Thus we want the RAID56 bg to be marked RO to
+ * prevent RMW from screwing up out cache.
*/
ro_set = 0;
} else if (ret == -ETXTBSY) {
btrfs_warn(fs_info,
- "skipping scrub of block group %llu due to active swapfile",
+ "scrub: skipping scrub of block group %llu due to active swapfile",
cache->start);
scrub_pause_off(fs_info);
ret = 0;
goto skip_unfreeze;
} else {
- btrfs_warn(fs_info,
- "failed setting block group ro: %d", ret);
+ btrfs_warn(fs_info, "scrub: failed setting block group ro: %d",
+ ret);
btrfs_unfreeze_block_group(cache);
btrfs_put_block_group(cache);
scrub_pause_off(fs_info);
@@ -3844,52 +2893,18 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
*/
if (sctx->is_dev_replace) {
btrfs_wait_nocow_writers(cache);
- btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
- cache->length);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
}
scrub_pause_off(fs_info);
down_write(&dev_replace->rwsem);
- dev_replace->cursor_right = found_key.offset + length;
+ dev_replace->cursor_right = found_key.offset + dev_extent_len;
dev_replace->cursor_left = found_key.offset;
dev_replace->item_needs_writeback = 1;
up_write(&dev_replace->rwsem);
- ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
- found_key.offset, cache);
-
- /*
- * flush, submit all pending read and write bios, afterwards
- * wait for them.
- * Note that in the dev replace case, a read request causes
- * write requests that are submitted in the read completion
- * worker. Therefore in the current situation, it is required
- * that all write requests are flushed, so that all read and
- * write requests are really completed when bios_in_flight
- * changes to 0.
- */
- sctx->flush_all_writes = true;
- scrub_submit(sctx);
- mutex_lock(&sctx->wr_lock);
- scrub_wr_submit(sctx);
- mutex_unlock(&sctx->wr_lock);
-
- wait_event(sctx->list_wait,
- atomic_read(&sctx->bios_in_flight) == 0);
-
- scrub_pause_on(fs_info);
-
- /*
- * must be called before we decrease @scrub_paused.
- * make sure we don't block transaction commit while
- * we are waiting pending workers finished.
- */
- wait_event(sctx->list_wait,
- atomic_read(&sctx->workers_pending) == 0);
- sctx->flush_all_writes = false;
-
- scrub_pause_off(fs_info);
-
+ ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
+ dev_extent_len);
if (sctx->is_dev_replace &&
!btrfs_finish_block_group_to_copy(dev_replace->srcdev,
cache, found_key.offset))
@@ -3911,8 +2926,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* balance is triggered or it becomes used and unused again.
*/
spin_lock(&cache->lock);
- if (!cache->removed && !cache->ro && cache->reserved == 0 &&
- cache->used == 0) {
+ if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
+ !cache->ro && cache->reserved == 0 && cache->used == 0) {
spin_unlock(&cache->lock);
if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
btrfs_discard_queue_work(&fs_info->discard_ctl,
@@ -3927,8 +2942,8 @@ skip_unfreeze:
btrfs_put_block_group(cache);
if (ret)
break;
- if (sctx->is_dev_replace &&
- atomic64_read(&dev_replace->num_write_errors) > 0) {
+ if (unlikely(sctx->is_dev_replace &&
+ atomic64_read(&dev_replace->num_write_errors) > 0)) {
ret = -EIO;
break;
}
@@ -3937,49 +2952,95 @@ skip_unfreeze:
break;
}
skip:
- key.offset = found_key.offset + length;
+ key.offset = found_key.offset + dev_extent_len;
btrfs_release_path(path);
}
- btrfs_free_path(path);
-
return ret;
}
+static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
+ struct page *page, u64 physical, u64 generation)
+{
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct btrfs_super_block *sb = page_address(page);
+ int ret;
+
+ ret = bdev_rw_virt(dev->bdev, physical >> SECTOR_SHIFT, sb,
+ BTRFS_SUPER_INFO_SIZE, REQ_OP_READ);
+ if (ret < 0)
+ return ret;
+ ret = btrfs_check_super_csum(fs_info, sb);
+ if (unlikely(ret != 0)) {
+ btrfs_err_rl(fs_info,
+ "scrub: super block at physical %llu devid %llu has bad csum",
+ physical, dev->devid);
+ return -EIO;
+ }
+ if (unlikely(btrfs_super_generation(sb) != generation)) {
+ btrfs_err_rl(fs_info,
+"scrub: super block at physical %llu devid %llu has bad generation %llu expect %llu",
+ physical, dev->devid,
+ btrfs_super_generation(sb), generation);
+ return -EUCLEAN;
+ }
+
+ return btrfs_validate_super(fs_info, sb, -1);
+}
+
static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev)
{
int i;
u64 bytenr;
u64 gen;
- int ret;
+ int ret = 0;
+ struct page *page;
struct btrfs_fs_info *fs_info = sctx->fs_info;
if (BTRFS_FS_ERROR(fs_info))
return -EROFS;
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.malloc_errors++;
+ spin_unlock(&sctx->stat_lock);
+ return -ENOMEM;
+ }
+
/* Seed devices of a new filesystem has their own generation. */
if (scrub_dev->fs_devices != fs_info->fs_devices)
gen = scrub_dev->generation;
else
- gen = fs_info->last_trans_committed;
+ gen = btrfs_get_last_trans_committed(fs_info);
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
- bytenr = btrfs_sb_offset(i);
+ ret = btrfs_sb_log_location(scrub_dev, i, 0, &bytenr);
+ if (ret == -ENOENT)
+ break;
+
+ if (ret) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.super_errors++;
+ spin_unlock(&sctx->stat_lock);
+ continue;
+ }
+
if (bytenr + BTRFS_SUPER_INFO_SIZE >
scrub_dev->commit_total_bytes)
break;
if (!btrfs_check_super_location(scrub_dev, bytenr))
continue;
- ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
- scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
- NULL, bytenr);
- if (ret)
- return ret;
+ ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
+ if (ret) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.super_errors++;
+ spin_unlock(&sctx->stat_lock);
+ }
}
- wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
-
+ __free_page(page);
return 0;
}
@@ -3987,34 +3048,22 @@ static void scrub_workers_put(struct btrfs_fs_info *fs_info)
{
if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
&fs_info->scrub_lock)) {
- struct btrfs_workqueue *scrub_workers = NULL;
- struct btrfs_workqueue *scrub_wr_comp = NULL;
- struct btrfs_workqueue *scrub_parity = NULL;
-
- scrub_workers = fs_info->scrub_workers;
- scrub_wr_comp = fs_info->scrub_wr_completion_workers;
- scrub_parity = fs_info->scrub_parity_workers;
+ struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
fs_info->scrub_workers = NULL;
- fs_info->scrub_wr_completion_workers = NULL;
- fs_info->scrub_parity_workers = NULL;
mutex_unlock(&fs_info->scrub_lock);
- btrfs_destroy_workqueue(scrub_workers);
- btrfs_destroy_workqueue(scrub_wr_comp);
- btrfs_destroy_workqueue(scrub_parity);
+ if (scrub_workers)
+ destroy_workqueue(scrub_workers);
}
}
/*
* get a reference count on fs_info->scrub_workers. start worker if necessary
*/
-static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
- int is_dev_replace)
+static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
{
- struct btrfs_workqueue *scrub_workers = NULL;
- struct btrfs_workqueue *scrub_wr_comp = NULL;
- struct btrfs_workqueue *scrub_parity = NULL;
+ struct workqueue_struct *scrub_workers = NULL;
unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
int max_active = fs_info->thread_pool_size;
int ret = -ENOMEM;
@@ -4022,29 +3071,14 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
return 0;
- scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags,
- is_dev_replace ? 1 : max_active, 4);
+ scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
if (!scrub_workers)
- goto fail_scrub_workers;
-
- scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
- max_active, 2);
- if (!scrub_wr_comp)
- goto fail_scrub_wr_completion_workers;
-
- scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
- max_active, 2);
- if (!scrub_parity)
- goto fail_scrub_parity_workers;
+ return -ENOMEM;
mutex_lock(&fs_info->scrub_lock);
if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
- ASSERT(fs_info->scrub_workers == NULL &&
- fs_info->scrub_wr_completion_workers == NULL &&
- fs_info->scrub_parity_workers == NULL);
+ ASSERT(fs_info->scrub_workers == NULL);
fs_info->scrub_workers = scrub_workers;
- fs_info->scrub_wr_completion_workers = scrub_wr_comp;
- fs_info->scrub_parity_workers = scrub_parity;
refcount_set(&fs_info->scrub_workers_refcnt, 1);
mutex_unlock(&fs_info->scrub_lock);
return 0;
@@ -4054,63 +3088,47 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
mutex_unlock(&fs_info->scrub_lock);
ret = 0;
- btrfs_destroy_workqueue(scrub_parity);
-fail_scrub_parity_workers:
- btrfs_destroy_workqueue(scrub_wr_comp);
-fail_scrub_wr_completion_workers:
- btrfs_destroy_workqueue(scrub_workers);
-fail_scrub_workers:
+
+ destroy_workqueue(scrub_workers);
return ret;
}
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
u64 end, struct btrfs_scrub_progress *progress,
- int readonly, int is_dev_replace)
+ bool readonly, bool is_dev_replace)
{
struct btrfs_dev_lookup_args args = { .devid = devid };
struct scrub_ctx *sctx;
int ret;
struct btrfs_device *dev;
unsigned int nofs_flag;
+ bool need_commit = false;
+
+ /* Set the basic fallback @last_physical before we got a sctx. */
+ if (progress)
+ progress->last_physical = start;
if (btrfs_fs_closing(fs_info))
return -EAGAIN;
- if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
- /*
- * in this case scrub is unable to calculate the checksum
- * the way scrub is implemented. Do not handle this
- * situation at all because it won't ever happen.
- */
- btrfs_err(fs_info,
- "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
- fs_info->nodesize,
- BTRFS_STRIPE_LEN);
- return -EINVAL;
- }
+ /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
+ ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
- if (fs_info->nodesize >
- PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
- fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
- /*
- * would exhaust the array bounds of pagev member in
- * struct scrub_block
- */
- btrfs_err(fs_info,
- "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
- fs_info->nodesize,
- SCRUB_MAX_PAGES_PER_BLOCK,
- fs_info->sectorsize,
- SCRUB_MAX_PAGES_PER_BLOCK);
- return -EINVAL;
- }
+ /*
+ * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
+ * value (max nodesize / min sectorsize), thus nodesize should always
+ * be fine.
+ */
+ ASSERT(fs_info->nodesize <=
+ SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
/* Allocate outside of device_list_mutex */
sctx = scrub_setup_ctx(fs_info, is_dev_replace);
if (IS_ERR(sctx))
return PTR_ERR(sctx);
+ sctx->stat.last_physical = start;
- ret = scrub_workers_get(fs_info, is_dev_replace);
+ ret = scrub_workers_get(fs_info);
if (ret)
goto out_free_ctx;
@@ -4126,16 +3144,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (!is_dev_replace && !readonly &&
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- btrfs_err_in_rcu(fs_info,
- "scrub on devid %llu: filesystem on %s is not writable",
- devid, rcu_str_deref(dev->name));
+ btrfs_err(fs_info,
+ "scrub: devid %llu: filesystem on %s is not writable",
+ devid, btrfs_dev_name(dev));
ret = -EROFS;
goto out;
}
mutex_lock(&fs_info->scrub_lock);
- if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
- test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
+ if (unlikely(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state))) {
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ret = -EIO;
@@ -4169,7 +3187,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
/*
* In order to avoid deadlock with reclaim when there is a transaction
* trying to pause scrub, make sure we use GFP_NOFS for all the
- * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
+ * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
* invoked by our callees. The pausing request is done when the
* transaction commit starts, and it blocks the transaction until scrub
* is paused (done at specific points at scrub_stripe() or right above
@@ -4177,6 +3195,12 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
*/
nofs_flag = memalloc_nofs_save();
if (!is_dev_replace) {
+ u64 old_super_errors;
+
+ spin_lock(&sctx->stat_lock);
+ old_super_errors = sctx->stat.super_errors;
+ spin_unlock(&sctx->stat_lock);
+
btrfs_info(fs_info, "scrub: started on devid %llu", devid);
/*
* by holding device list mutex, we can
@@ -4185,18 +3209,25 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
mutex_lock(&fs_info->fs_devices->device_list_mutex);
ret = scrub_supers(sctx, dev);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+
+ spin_lock(&sctx->stat_lock);
+ /*
+ * Super block errors found, but we can not commit transaction
+ * at current context, since btrfs_commit_transaction() needs
+ * to pause the current running scrub (hold by ourselves).
+ */
+ if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
+ need_commit = true;
+ spin_unlock(&sctx->stat_lock);
}
if (!ret)
ret = scrub_enumerate_chunks(sctx, dev, start, end);
memalloc_nofs_restore(nofs_flag);
- wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
atomic_dec(&fs_info->scrubs_running);
wake_up(&fs_info->scrub_pause_wait);
- wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
-
if (progress)
memcpy(progress, &sctx->stat, sizeof(*progress));
@@ -4211,6 +3242,25 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
scrub_workers_put(fs_info);
scrub_put_ctx(sctx);
+ /*
+ * We found some super block errors before, now try to force a
+ * transaction commit, as scrub has finished.
+ */
+ if (need_commit) {
+ struct btrfs_trans_handle *trans;
+
+ trans = btrfs_start_transaction(fs_info->tree_root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ btrfs_err(fs_info,
+ "scrub: failed to start transaction to fix super block errors: %d", ret);
+ return ret;
+ }
+ ret = btrfs_commit_transaction(trans);
+ if (ret < 0)
+ btrfs_err(fs_info,
+ "scrub: failed to commit transaction to fix super block errors: %d", ret);
+ }
return ret;
out:
scrub_workers_put(fs_info);
@@ -4302,28 +3352,3 @@ int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
}
-
-static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
- u64 extent_logical, u32 extent_len,
- u64 *extent_physical,
- struct btrfs_device **extent_dev,
- int *extent_mirror_num)
-{
- u64 mapped_length;
- struct btrfs_io_context *bioc = NULL;
- int ret;
-
- mapped_length = extent_len;
- ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
- &mapped_length, &bioc, 0);
- if (ret || !bioc || mapped_length < extent_len ||
- !bioc->stripes[0].dev->bdev) {
- btrfs_put_bioc(bioc);
- return;
- }
-
- *extent_physical = bioc->stripes[0].physical;
- *extent_mirror_num = bioc->mirror_num;
- *extent_dev = bioc->stripes[0].dev;
- btrfs_put_bioc(bioc);
-}
diff --git a/fs/btrfs/scrub.h b/fs/btrfs/scrub.h
new file mode 100644
index 000000000000..aa68b6ebaf55
--- /dev/null
+++ b/fs/btrfs/scrub.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_SCRUB_H
+#define BTRFS_SCRUB_H
+
+#include <linux/types.h>
+
+struct btrfs_fs_info;
+struct btrfs_device;
+struct btrfs_scrub_progress;
+
+int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
+ u64 end, struct btrfs_scrub_progress *progress,
+ bool readonly, bool is_dev_replace);
+void btrfs_scrub_pause(struct btrfs_fs_info *fs_info);
+void btrfs_scrub_continue(struct btrfs_fs_info *fs_info);
+int btrfs_scrub_cancel(struct btrfs_fs_info *info);
+int btrfs_scrub_cancel_dev(struct btrfs_device *dev);
+int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
+ struct btrfs_scrub_progress *progress);
+
+#endif
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 040324d71118..2522faa97478 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4,6 +4,7 @@
*/
#include <linux/bsearch.h>
+#include <linux/falloc.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/sort.h>
@@ -15,15 +16,22 @@
#include <linux/string.h>
#include <linux/compat.h>
#include <linux/crc32c.h>
-
+#include <linux/fsverity.h>
#include "send.h"
+#include "ctree.h"
#include "backref.h"
#include "locking.h"
#include "disk-io.h"
#include "btrfs_inode.h"
#include "transaction.h"
#include "compression.h"
-#include "xattr.h"
+#include "print-tree.h"
+#include "accessors.h"
+#include "dir-item.h"
+#include "file-item.h"
+#include "ioctl.h"
+#include "verity.h"
+#include "lru_cache.h"
/*
* Maximum number of references an extent can have in order for us to attempt to
@@ -31,7 +39,7 @@
* avoid hitting limitations of the backreference walking code (taking a lot of
* time and using too much memory for extents with large number of references).
*/
-#define SEND_MAX_EXTENT_REFS 64
+#define SEND_MAX_EXTENT_REFS 1024
/*
* A fs_path is a helper to dynamically build path names with unknown size.
@@ -39,28 +47,30 @@
* It allows fast adding of path elements on the right side (normal path) and
* fast adding to the left side (reversed path). A reversed path can also be
* unreversed if needed.
+ *
+ * The definition of struct fs_path relies on -fms-extensions to allow
+ * including a tagged struct as an anonymous member.
*/
+struct __fs_path {
+ char *start;
+ char *end;
+
+ char *buf;
+ unsigned short buf_len:15;
+ unsigned short reversed:1;
+};
+static_assert(sizeof(struct __fs_path) < 256);
struct fs_path {
- union {
- struct {
- char *start;
- char *end;
-
- char *buf;
- unsigned short buf_len:15;
- unsigned short reversed:1;
- char inline_buf[];
- };
- /*
- * Average path length does not exceed 200 bytes, we'll have
- * better packing in the slab and higher chance to satisfy
- * a allocation later during send.
- */
- char pad[256];
- };
+ struct __fs_path;
+ /*
+ * Average path length does not exceed 200 bytes, we'll have
+ * better packing in the slab and higher chance to satisfy
+ * an allocation later during send.
+ */
+ char inline_buf[256 - sizeof(struct __fs_path)];
};
#define FS_PATH_INLINE_SIZE \
- (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
+ sizeof_field(struct fs_path, inline_buf)
/* reused for each extent */
@@ -68,12 +78,61 @@ struct clone_root {
struct btrfs_root *root;
u64 ino;
u64 offset;
+ u64 num_bytes;
+ bool found_ref;
+};
+
+#define SEND_MAX_NAME_CACHE_SIZE 256
+
+/*
+ * Limit the root_ids array of struct backref_cache_entry to 17 elements.
+ * This makes the size of a cache entry to be exactly 192 bytes on x86_64, which
+ * can be satisfied from the kmalloc-192 slab, without wasting any space.
+ * The most common case is to have a single root for cloning, which corresponds
+ * to the send root. Having the user specify more than 16 clone roots is not
+ * common, and in such rare cases we simply don't use caching if the number of
+ * cloning roots that lead down to a leaf is more than 17.
+ */
+#define SEND_MAX_BACKREF_CACHE_ROOTS 17
- u64 found_refs;
+/*
+ * Max number of entries in the cache.
+ * With SEND_MAX_BACKREF_CACHE_ROOTS as 17, the size in bytes, excluding
+ * maple tree's internal nodes, is 24K.
+ */
+#define SEND_MAX_BACKREF_CACHE_SIZE 128
+
+/*
+ * A backref cache entry maps a leaf to a list of IDs of roots from which the
+ * leaf is accessible and we can use for clone operations.
+ * With SEND_MAX_BACKREF_CACHE_ROOTS as 12, each cache entry is 128 bytes (on
+ * x86_64).
+ */
+struct backref_cache_entry {
+ struct btrfs_lru_cache_entry entry;
+ u64 root_ids[SEND_MAX_BACKREF_CACHE_ROOTS];
+ /* Number of valid elements in the root_ids array. */
+ int num_roots;
};
-#define SEND_CTX_MAX_NAME_CACHE_SIZE 128
-#define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
+/* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */
+static_assert(offsetof(struct backref_cache_entry, entry) == 0);
+
+/*
+ * Max number of entries in the cache that stores directories that were already
+ * created. The cache uses raw struct btrfs_lru_cache_entry entries, so it uses
+ * at most 4096 bytes - sizeof(struct btrfs_lru_cache_entry) is 48 bytes, but
+ * the kmalloc-64 slab is used, so we get 4096 bytes (64 bytes * 64).
+ */
+#define SEND_MAX_DIR_CREATED_CACHE_SIZE 64
+
+/*
+ * Max number of entries in the cache that stores directories that were already
+ * created. The cache uses raw struct btrfs_lru_cache_entry entries, so it uses
+ * at most 4096 bytes - sizeof(struct btrfs_lru_cache_entry) is 48 bytes, but
+ * the kmalloc-64 slab is used, so we get 4096 bytes (64 bytes * 64).
+ */
+#define SEND_MAX_DIR_UTIMES_CACHE_SIZE 64
struct send_ctx {
struct file *send_filp;
@@ -81,8 +140,12 @@ struct send_ctx {
char *send_buf;
u32 send_size;
u32 send_max_size;
- u64 total_send_size;
- u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
+ /*
+ * Whether BTRFS_SEND_A_DATA attribute was already added to current
+ * command (since protocol v2, data must be the last attribute).
+ */
+ bool put_data;
+ struct page **send_buf_pages;
u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
/* Protocol version compatibility requested */
u32 proto;
@@ -98,31 +161,47 @@ struct send_ctx {
struct btrfs_key *cmp_key;
/*
+ * Keep track of the generation of the last transaction that was used
+ * for relocating a block group. This is periodically checked in order
+ * to detect if a relocation happened since the last check, so that we
+ * don't operate on stale extent buffers for nodes (level >= 1) or on
+ * stale disk_bytenr values of file extent items.
+ */
+ u64 last_reloc_trans;
+
+ /*
* infos of the currently processed inode. In case of deleted inodes,
* these are the values from the deleted inode.
*/
u64 cur_ino;
u64 cur_inode_gen;
- int cur_inode_new;
- int cur_inode_new_gen;
- int cur_inode_deleted;
u64 cur_inode_size;
u64 cur_inode_mode;
u64 cur_inode_rdev;
u64 cur_inode_last_extent;
u64 cur_inode_next_write_offset;
+ bool cur_inode_new;
+ bool cur_inode_new_gen;
+ bool cur_inode_deleted;
bool ignore_cur_inode;
+ bool cur_inode_needs_verity;
+ void *verity_descriptor;
u64 send_progress;
struct list_head new_refs;
struct list_head deleted_refs;
- struct radix_tree_root name_cache;
- struct list_head name_cache_list;
- int name_cache_size;
+ struct btrfs_lru_cache name_cache;
+ /*
+ * The inode we are currently processing. It's not NULL only when we
+ * need to issue write commands for data extents from this inode.
+ */
+ struct inode *cur_inode;
struct file_ra_state ra;
+ u64 page_cache_clear_start;
+ bool clean_page_cache;
/*
* We process inodes by their increasing order, so if before an
@@ -218,6 +297,17 @@ struct send_ctx {
* Indexed by the inode number of the directory to be deleted.
*/
struct rb_root orphan_dirs;
+
+ struct rb_root rbtree_new_refs;
+ struct rb_root rbtree_deleted_refs;
+
+ struct btrfs_lru_cache backref_cache;
+ u64 backref_cache_last_reloc_trans;
+
+ struct btrfs_lru_cache dir_created_cache;
+ struct btrfs_lru_cache dir_utimes_cache;
+
+ struct fs_path cur_inode_path;
};
struct pending_dir_move {
@@ -247,29 +337,28 @@ struct orphan_dir_info {
u64 ino;
u64 gen;
u64 last_dir_index_offset;
+ u64 dir_high_seq_ino;
};
struct name_cache_entry {
- struct list_head list;
/*
- * radix_tree has only 32bit entries but we need to handle 64bit inums.
- * We use the lower 32bit of the 64bit inum to store it in the tree. If
- * more then one inum would fall into the same entry, we use radix_list
- * to store the additional entries. radix_list is also used to store
- * entries where two entries have the same inum but different
- * generations.
+ * The key in the entry is an inode number, and the generation matches
+ * the inode's generation.
*/
- struct list_head radix_list;
- u64 ino;
- u64 gen;
+ struct btrfs_lru_cache_entry entry;
u64 parent_ino;
u64 parent_gen;
int ret;
int need_later_update;
+ /* Name length without NUL terminator. */
int name_len;
- char name[];
+ /* Not NUL terminated. */
+ char name[] __counted_by(name_len) __nonstring;
};
+/* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */
+static_assert(offsetof(struct name_cache_entry, entry) == 0);
+
#define ADVANCE 1
#define ADVANCE_ONLY_NEXT -1
@@ -298,28 +387,28 @@ static void inconsistent_snapshot_error(struct send_ctx *sctx,
result_string = "updated";
break;
case BTRFS_COMPARE_TREE_SAME:
- ASSERT(0);
+ DEBUG_WARN("no change between trees");
result_string = "unchanged";
break;
default:
- ASSERT(0);
+ DEBUG_WARN("unexpected comparison result %d", result);
result_string = "unexpected";
}
btrfs_err(sctx->send_root->fs_info,
"Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
result_string, what, sctx->cmp_key->objectid,
- sctx->send_root->root_key.objectid,
- (sctx->parent_root ?
- sctx->parent_root->root_key.objectid : 0));
+ btrfs_root_id(sctx->send_root),
+ (sctx->parent_root ? btrfs_root_id(sctx->parent_root) : 0));
}
__maybe_unused
static bool proto_cmd_ok(const struct send_ctx *sctx, int cmd)
{
switch (sctx->proto) {
- case 1: return cmd < __BTRFS_SEND_C_MAX_V1;
- case 2: return cmd < __BTRFS_SEND_C_MAX_V2;
+ case 1: return cmd <= BTRFS_SEND_C_MAX_V1;
+ case 2: return cmd <= BTRFS_SEND_C_MAX_V2;
+ case 3: return cmd <= BTRFS_SEND_C_MAX_V3;
default: return false;
}
}
@@ -340,15 +429,21 @@ static int need_send_hole(struct send_ctx *sctx)
static void fs_path_reset(struct fs_path *p)
{
- if (p->reversed) {
+ if (p->reversed)
p->start = p->buf + p->buf_len - 1;
- p->end = p->start;
- *p->start = 0;
- } else {
+ else
p->start = p->buf;
- p->end = p->start;
- *p->start = 0;
- }
+
+ p->end = p->start;
+ *p->start = 0;
+}
+
+static void init_path(struct fs_path *p)
+{
+ p->reversed = 0;
+ p->buf = p->inline_buf;
+ p->buf_len = FS_PATH_INLINE_SIZE;
+ fs_path_reset(p);
}
static struct fs_path *fs_path_alloc(void)
@@ -358,10 +453,7 @@ static struct fs_path *fs_path_alloc(void)
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return NULL;
- p->reversed = 0;
- p->buf = p->inline_buf;
- p->buf_len = FS_PATH_INLINE_SIZE;
- fs_path_reset(p);
+ init_path(p);
return p;
}
@@ -386,7 +478,7 @@ static void fs_path_free(struct fs_path *p)
kfree(p);
}
-static int fs_path_len(struct fs_path *p)
+static inline int fs_path_len(const struct fs_path *p)
{
return p->end - p->start;
}
@@ -402,15 +494,18 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
if (p->buf_len >= len)
return 0;
- if (len > PATH_MAX) {
- WARN_ON(1);
- return -ENOMEM;
- }
+ if (WARN_ON(len > PATH_MAX))
+ return -ENAMETOOLONG;
- path_len = p->end - p->start;
+ path_len = fs_path_len(p);
old_buf_len = p->buf_len;
/*
+ * Allocate to the next largest kmalloc bucket size, to let
+ * the fast path happen most of the time.
+ */
+ len = kmalloc_size_roundup(len);
+ /*
* First time the inline_buf does not suffice
*/
if (p->buf == p->inline_buf) {
@@ -423,11 +518,7 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
if (!tmp_buf)
return -ENOMEM;
p->buf = tmp_buf;
- /*
- * The real size of the buffer is bigger, this will let the fast path
- * happen most of the time
- */
- p->buf_len = ksize(p->buf);
+ p->buf_len = len;
if (p->reversed) {
tmp_buf = p->buf + old_buf_len - path_len - 1;
@@ -447,12 +538,12 @@ static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
int ret;
int new_len;
- new_len = p->end - p->start + name_len;
+ new_len = fs_path_len(p) + name_len;
if (p->start != p->end)
new_len++;
ret = fs_path_ensure_buf(p, new_len);
if (ret < 0)
- goto out;
+ return ret;
if (p->reversed) {
if (p->start != p->end)
@@ -467,8 +558,7 @@ static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
*p->end = 0;
}
-out:
- return ret;
+ return 0;
}
static int fs_path_add(struct fs_path *p, const char *name, int name_len)
@@ -478,25 +568,15 @@ static int fs_path_add(struct fs_path *p, const char *name, int name_len)
ret = fs_path_prepare_for_add(p, name_len, &prepared);
if (ret < 0)
- goto out;
+ return ret;
memcpy(prepared, name, name_len);
-out:
- return ret;
+ return 0;
}
-static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
+static inline int fs_path_add_path(struct fs_path *p, const struct fs_path *p2)
{
- int ret;
- char *prepared;
-
- ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
- if (ret < 0)
- goto out;
- memcpy(prepared, p2->start, p2->end - p2->start);
-
-out:
- return ret;
+ return fs_path_add(p, p2->start, fs_path_len(p2));
}
static int fs_path_add_from_extent_buffer(struct fs_path *p,
@@ -508,27 +588,21 @@ static int fs_path_add_from_extent_buffer(struct fs_path *p,
ret = fs_path_prepare_for_add(p, len, &prepared);
if (ret < 0)
- goto out;
+ return ret;
read_extent_buffer(eb, prepared, off, len);
-out:
- return ret;
+ return 0;
}
static int fs_path_copy(struct fs_path *p, struct fs_path *from)
{
- int ret;
-
p->reversed = from->reversed;
fs_path_reset(p);
- ret = fs_path_add_path(p, from);
-
- return ret;
+ return fs_path_add_path(p, from);
}
-
static void fs_path_unreverse(struct fs_path *p)
{
char *tmp;
@@ -538,13 +612,21 @@ static void fs_path_unreverse(struct fs_path *p)
return;
tmp = p->start;
- len = p->end - p->start;
+ len = fs_path_len(p);
p->start = p->buf;
p->end = p->start + len;
memmove(p->start, tmp, len + 1);
p->reversed = 0;
}
+static inline bool is_current_inode_path(const struct send_ctx *sctx,
+ const struct fs_path *path)
+{
+ const struct fs_path *cur = &sctx->cur_inode_path;
+
+ return (strncmp(path->start, cur->start, fs_path_len(cur)) == 0);
+}
+
static struct btrfs_path *alloc_path_for_send(void)
{
struct btrfs_path *path;
@@ -552,9 +634,9 @@ static struct btrfs_path *alloc_path_for_send(void)
path = btrfs_alloc_path();
if (!path)
return NULL;
- path->search_commit_root = 1;
- path->skip_locking = 1;
- path->need_commit_sem = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
+ path->need_commit_sem = true;
return path;
}
@@ -565,15 +647,10 @@ static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
while (pos < len) {
ret = kernel_write(filp, buf + pos, len - pos, off);
- /* TODO handle that correctly */
- /*if (ret == -ERESTARTSYS) {
- continue;
- }*/
if (ret < 0)
return ret;
- if (ret == 0) {
+ if (unlikely(ret == 0))
return -EIO;
- }
pos += ret;
}
@@ -586,6 +663,9 @@ static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
int total_len = sizeof(*hdr) + len;
int left = sctx->send_max_size - sctx->send_size;
+ if (WARN_ON_ONCE(sctx->put_data))
+ return -EINVAL;
+
if (unlikely(left < total_len))
return -EOVERFLOW;
@@ -606,6 +686,8 @@ static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
}
+TLV_PUT_DEFINE_INT(8)
+TLV_PUT_DEFINE_INT(32)
TLV_PUT_DEFINE_INT(64)
static int tlv_put_string(struct send_ctx *sctx, u16 attr,
@@ -659,7 +741,7 @@ static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
#define TLV_PUT_PATH(sctx, attrtype, p) \
do { \
ret = tlv_put_string(sctx, attrtype, p->start, \
- p->end - p->start); \
+ fs_path_len((p))); \
if (ret < 0) \
goto tlv_put_failure; \
} while(0)
@@ -680,9 +762,8 @@ static int send_header(struct send_ctx *sctx)
{
struct btrfs_stream_header hdr;
- strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
- hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
-
+ strscpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
+ hdr.version = cpu_to_le32(sctx->proto);
return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
&sctx->send_off);
}
@@ -697,7 +778,12 @@ static int begin_cmd(struct send_ctx *sctx, int cmd)
if (WARN_ON(!sctx->send_buf))
return -EINVAL;
- BUG_ON(sctx->send_size);
+ if (unlikely(sctx->send_size != 0)) {
+ btrfs_err(sctx->send_root->fs_info,
+ "send: command header buffer not empty cmd %d offset %llu",
+ cmd, sctx->send_off);
+ return -EINVAL;
+ }
sctx->send_size += sizeof(*hdr);
hdr = (struct btrfs_cmd_header *)sctx->send_buf;
@@ -716,15 +802,14 @@ static int send_cmd(struct send_ctx *sctx)
put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len);
put_unaligned_le32(0, &hdr->crc);
- crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
+ crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
put_unaligned_le32(crc, &hdr->crc);
ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
&sctx->send_off);
- sctx->total_send_size += sctx->send_size;
- sctx->cmd_send_size[get_unaligned_le16(&hdr->cmd)] += sctx->send_size;
sctx->send_size = 0;
+ sctx->put_data = false;
return ret;
}
@@ -735,14 +820,11 @@ static int send_cmd(struct send_ctx *sctx)
static int send_rename(struct send_ctx *sctx,
struct fs_path *from, struct fs_path *to)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
- btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
-
ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
@@ -750,7 +832,6 @@ static int send_rename(struct send_ctx *sctx,
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
@@ -760,14 +841,11 @@ out:
static int send_link(struct send_ctx *sctx,
struct fs_path *path, struct fs_path *lnk)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
- btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
-
ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
@@ -775,7 +853,6 @@ static int send_link(struct send_ctx *sctx,
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
@@ -784,21 +861,17 @@ out:
*/
static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
- btrfs_debug(fs_info, "send_unlink %s", path->start);
-
ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
@@ -807,35 +880,46 @@ out:
*/
static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
- btrfs_debug(fs_info, "send_rmdir %s", path->start);
-
ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
+struct btrfs_inode_info {
+ u64 size;
+ u64 gen;
+ u64 mode;
+ u64 uid;
+ u64 gid;
+ u64 rdev;
+ u64 fileattr;
+ u64 nlink;
+};
+
/*
* Helper function to retrieve some fields from an inode item.
*/
-static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
- u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
- u64 *gid, u64 *rdev)
+static int get_inode_info(struct btrfs_root *root, u64 ino,
+ struct btrfs_inode_info *info)
{
int ret;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_inode_item *ii;
struct btrfs_key key;
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
key.objectid = ino;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
@@ -846,44 +930,40 @@ static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
return ret;
}
+ if (!info)
+ return 0;
+
ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
- if (size)
- *size = btrfs_inode_size(path->nodes[0], ii);
- if (gen)
- *gen = btrfs_inode_generation(path->nodes[0], ii);
- if (mode)
- *mode = btrfs_inode_mode(path->nodes[0], ii);
- if (uid)
- *uid = btrfs_inode_uid(path->nodes[0], ii);
- if (gid)
- *gid = btrfs_inode_gid(path->nodes[0], ii);
- if (rdev)
- *rdev = btrfs_inode_rdev(path->nodes[0], ii);
+ info->size = btrfs_inode_size(path->nodes[0], ii);
+ info->gen = btrfs_inode_generation(path->nodes[0], ii);
+ info->mode = btrfs_inode_mode(path->nodes[0], ii);
+ info->uid = btrfs_inode_uid(path->nodes[0], ii);
+ info->gid = btrfs_inode_gid(path->nodes[0], ii);
+ info->rdev = btrfs_inode_rdev(path->nodes[0], ii);
+ info->nlink = btrfs_inode_nlink(path->nodes[0], ii);
+ /*
+ * Transfer the unchanged u64 value of btrfs_inode_item::flags, that's
+ * otherwise logically split to 32/32 parts.
+ */
+ info->fileattr = btrfs_inode_flags(path->nodes[0], ii);
- return ret;
+ return 0;
}
-static int get_inode_info(struct btrfs_root *root,
- u64 ino, u64 *size, u64 *gen,
- u64 *mode, u64 *uid, u64 *gid,
- u64 *rdev)
+static int get_inode_gen(struct btrfs_root *root, u64 ino, u64 *gen)
{
- struct btrfs_path *path;
int ret;
+ struct btrfs_inode_info info = { 0 };
- path = alloc_path_for_send();
- if (!path)
- return -ENOMEM;
- ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
- rdev);
- btrfs_free_path(path);
+ ASSERT(gen);
+
+ ret = get_inode_info(root, ino, &info);
+ *gen = info.gen;
return ret;
}
-typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
- struct fs_path *p,
- void *ctx);
+typedef int (*iterate_inode_ref_t)(u64 dir, struct fs_path *p, void *ctx);
/*
* Helper function to iterate the entries in ONE btrfs_inode_ref or
@@ -894,14 +974,13 @@ typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
* path must point to the INODE_REF or INODE_EXTREF when called.
*/
static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *found_key, int resolve,
+ struct btrfs_key *found_key, bool resolve,
iterate_inode_ref_t iterate, void *ctx)
{
struct extent_buffer *eb = path->nodes[0];
- struct btrfs_item *item;
struct btrfs_inode_ref *iref;
struct btrfs_inode_extref *extref;
- struct btrfs_path *tmp_path;
+ BTRFS_PATH_AUTO_FREE(tmp_path);
struct fs_path *p;
u32 cur = 0;
u32 total;
@@ -909,8 +988,6 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
u32 name_len;
char *start;
int ret = 0;
- int num = 0;
- int index;
u64 dir;
unsigned long name_off;
unsigned long elem_size;
@@ -930,12 +1007,11 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
if (found_key->type == BTRFS_INODE_REF_KEY) {
ptr = (unsigned long)btrfs_item_ptr(eb, slot,
struct btrfs_inode_ref);
- item = btrfs_item_nr(slot);
- total = btrfs_item_size(eb, item);
+ total = btrfs_item_size(eb, slot);
elem_size = sizeof(*iref);
} else {
ptr = btrfs_item_ptr_offset(eb, slot);
- total = btrfs_item_size_nr(eb, slot);
+ total = btrfs_item_size(eb, slot);
elem_size = sizeof(*extref);
}
@@ -946,13 +1022,11 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
iref = (struct btrfs_inode_ref *)(ptr + cur);
name_len = btrfs_inode_ref_name_len(eb, iref);
name_off = (unsigned long)(iref + 1);
- index = btrfs_inode_ref_index(eb, iref);
dir = found_key->offset;
} else {
extref = (struct btrfs_inode_extref *)(ptr + cur);
name_len = btrfs_inode_extref_name_len(eb, extref);
name_off = (unsigned long)&extref->name;
- index = btrfs_inode_extref_index(eb, extref);
dir = btrfs_inode_extref_parent(eb, extref);
}
@@ -978,7 +1052,13 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
ret = PTR_ERR(start);
goto out;
}
- BUG_ON(start < p->buf);
+ if (unlikely(start < p->buf)) {
+ btrfs_err(root->fs_info,
+ "send: path ref buffer underflow for key " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(found_key));
+ ret = -EINVAL;
+ goto out;
+ }
}
p->start = start;
} else {
@@ -989,14 +1069,12 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
}
cur += elem_size + name_len;
- ret = iterate(num, dir, index, p, ctx);
+ ret = iterate(dir, p, ctx);
if (ret)
goto out;
- num++;
}
out:
- btrfs_free_path(tmp_path);
fs_path_free(p);
return ret;
}
@@ -1004,7 +1082,7 @@ out:
typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
const char *name, int name_len,
const char *data, int data_len,
- u8 type, void *ctx);
+ void *ctx);
/*
* Helper function to iterate the entries in ONE btrfs_dir_item.
@@ -1018,7 +1096,6 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
{
int ret = 0;
struct extent_buffer *eb;
- struct btrfs_item *item;
struct btrfs_dir_item *di;
struct btrfs_key di_key;
char *buf = NULL;
@@ -1030,12 +1107,11 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
u32 total;
int slot;
int num;
- u8 type;
/*
* Start with a small buffer (1 page). If later we end up needing more
* space, which can happen for xattrs on a fs with a leaf size greater
- * then the page size, attempt to increase the buffer. Typically xattr
+ * than the page size, attempt to increase the buffer. Typically xattr
* values are small.
*/
buf_len = PATH_MAX;
@@ -1047,26 +1123,24 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
eb = path->nodes[0];
slot = path->slots[0];
- item = btrfs_item_nr(slot);
di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
cur = 0;
len = 0;
- total = btrfs_item_size(eb, item);
+ total = btrfs_item_size(eb, slot);
num = 0;
while (cur < total) {
name_len = btrfs_dir_name_len(eb, di);
data_len = btrfs_dir_data_len(eb, di);
- type = btrfs_dir_type(eb, di);
btrfs_dir_item_key_to_cpu(eb, di, &di_key);
- if (type == BTRFS_FT_XATTR) {
- if (name_len > XATTR_NAME_MAX) {
+ if (btrfs_dir_ftype(eb, di) == BTRFS_FT_XATTR) {
+ if (unlikely(name_len > XATTR_NAME_MAX)) {
ret = -ENAMETOOLONG;
goto out;
}
- if (name_len + data_len >
- BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
+ if (unlikely(name_len + data_len >
+ BTRFS_MAX_XATTR_SIZE(root->fs_info))) {
ret = -E2BIG;
goto out;
}
@@ -1074,7 +1148,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
/*
* Path too long
*/
- if (name_len + data_len > PATH_MAX) {
+ if (unlikely(name_len + data_len > PATH_MAX)) {
ret = -ENAMETOOLONG;
goto out;
}
@@ -1110,7 +1184,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
cur += len;
ret = iterate(num, &di_key, buf, name_len, buf + name_len,
- data_len, type, ctx);
+ data_len, ctx);
if (ret < 0)
goto out;
if (ret) {
@@ -1126,8 +1200,7 @@ out:
return ret;
}
-static int __copy_first_ref(int num, u64 dir, int index,
- struct fs_path *p, void *ctx)
+static int __copy_first_ref(u64 dir, struct fs_path *p, void *ctx)
{
int ret;
struct fs_path *pt = ctx;
@@ -1149,7 +1222,7 @@ static int get_inode_path(struct btrfs_root *root,
{
int ret;
struct btrfs_key key, found_key;
- struct btrfs_path *p;
+ BTRFS_PATH_AUTO_FREE(p);
p = alloc_path_for_send();
if (!p)
@@ -1163,28 +1236,20 @@ static int get_inode_path(struct btrfs_root *root,
ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
if (ret < 0)
- goto out;
- if (ret) {
- ret = 1;
- goto out;
- }
+ return ret;
+ if (ret)
+ return 1;
+
btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
if (found_key.objectid != ino ||
(found_key.type != BTRFS_INODE_REF_KEY &&
- found_key.type != BTRFS_INODE_EXTREF_KEY)) {
- ret = -ENOENT;
- goto out;
- }
+ found_key.type != BTRFS_INODE_EXTREF_KEY))
+ return -ENOENT;
- ret = iterate_inode_ref(root, p, &found_key, 1,
- __copy_first_ref, path);
+ ret = iterate_inode_ref(root, p, &found_key, true, __copy_first_ref, path);
if (ret < 0)
- goto out;
- ret = 0;
-
-out:
- btrfs_free_path(p);
- return ret;
+ return ret;
+ return 0;
}
struct backref_ctx {
@@ -1203,8 +1268,12 @@ struct backref_ctx {
/* may be truncated in case it's the last extent in a file */
u64 extent_len;
- /* Just to check for bugs in backref resolving */
- int found_itself;
+ /* The bytenr the file extent item we are processing refers to. */
+ u64 bytenr;
+ /* The owner (root id) of the data backref for the current extent. */
+ u64 backref_owner;
+ /* The offset of the data backref for the current extent. */
+ u64 backref_offset;
};
static int __clone_root_cmp_bsearch(const void *key, const void *elt)
@@ -1212,9 +1281,9 @@ static int __clone_root_cmp_bsearch(const void *key, const void *elt)
u64 root = (u64)(uintptr_t)key;
const struct clone_root *cr = elt;
- if (root < cr->root->root_key.objectid)
+ if (root < btrfs_root_id(cr->root))
return -1;
- if (root > cr->root->root_key.objectid)
+ if (root > btrfs_root_id(cr->root))
return 1;
return 0;
}
@@ -1224,41 +1293,42 @@ static int __clone_root_cmp_sort(const void *e1, const void *e2)
const struct clone_root *cr1 = e1;
const struct clone_root *cr2 = e2;
- if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
+ if (btrfs_root_id(cr1->root) < btrfs_root_id(cr2->root))
return -1;
- if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
+ if (btrfs_root_id(cr1->root) > btrfs_root_id(cr2->root))
return 1;
return 0;
}
/*
* Called for every backref that is found for the current extent.
- * Results are collected in sctx->clone_roots->ino/offset/found_refs
+ * Results are collected in sctx->clone_roots->ino/offset.
*/
-static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
+static int iterate_backrefs(u64 ino, u64 offset, u64 num_bytes, u64 root_id,
+ void *ctx_)
{
struct backref_ctx *bctx = ctx_;
- struct clone_root *found;
+ struct clone_root *clone_root;
/* First check if the root is in the list of accepted clone sources */
- found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
- bctx->sctx->clone_roots_cnt,
- sizeof(struct clone_root),
- __clone_root_cmp_bsearch);
- if (!found)
+ clone_root = bsearch((void *)(uintptr_t)root_id, bctx->sctx->clone_roots,
+ bctx->sctx->clone_roots_cnt,
+ sizeof(struct clone_root),
+ __clone_root_cmp_bsearch);
+ if (!clone_root)
return 0;
- if (found->root == bctx->sctx->send_root &&
+ /* This is our own reference, bail out as we can't clone from it. */
+ if (clone_root->root == bctx->sctx->send_root &&
ino == bctx->cur_objectid &&
- offset == bctx->cur_offset) {
- bctx->found_itself = 1;
- }
+ offset == bctx->cur_offset)
+ return 0;
/*
* Make sure we don't consider clones from send_root that are
* behind the current inode/offset.
*/
- if (found->root == bctx->sctx->send_root) {
+ if (clone_root->root == bctx->sctx->send_root) {
/*
* If the source inode was not yet processed we can't issue a
* clone operation, as the source extent does not exist yet at
@@ -1279,21 +1349,191 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
}
bctx->found++;
- found->found_refs++;
- if (ino < found->ino) {
- found->ino = ino;
- found->offset = offset;
- } else if (found->ino == ino) {
+ clone_root->found_ref = true;
+
+ /*
+ * If the given backref refers to a file extent item with a larger
+ * number of bytes than what we found before, use the new one so that
+ * we clone more optimally and end up doing less writes and getting
+ * less exclusive, non-shared extents at the destination.
+ */
+ if (num_bytes > clone_root->num_bytes) {
+ clone_root->ino = ino;
+ clone_root->offset = offset;
+ clone_root->num_bytes = num_bytes;
+
/*
- * same extent found more then once in the same file.
+ * Found a perfect candidate, so there's no need to continue
+ * backref walking.
*/
- if (found->offset > offset + bctx->extent_len)
- found->offset = offset;
+ if (num_bytes >= bctx->extent_len)
+ return BTRFS_ITERATE_EXTENT_INODES_STOP;
}
return 0;
}
+static bool lookup_backref_cache(u64 leaf_bytenr, void *ctx,
+ const u64 **root_ids_ret, int *root_count_ret)
+{
+ struct backref_ctx *bctx = ctx;
+ struct send_ctx *sctx = bctx->sctx;
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
+ const u64 key = leaf_bytenr >> fs_info->nodesize_bits;
+ struct btrfs_lru_cache_entry *raw_entry;
+ struct backref_cache_entry *entry;
+
+ if (sctx->backref_cache.size == 0)
+ return false;
+
+ /*
+ * If relocation happened since we first filled the cache, then we must
+ * empty the cache and can not use it, because even though we operate on
+ * read-only roots, their leaves and nodes may have been reallocated and
+ * now be used for different nodes/leaves of the same tree or some other
+ * tree.
+ *
+ * We are called from iterate_extent_inodes() while either holding a
+ * transaction handle or holding fs_info->commit_root_sem, so no need
+ * to take any lock here.
+ */
+ if (fs_info->last_reloc_trans > sctx->backref_cache_last_reloc_trans) {
+ btrfs_lru_cache_clear(&sctx->backref_cache);
+ return false;
+ }
+
+ raw_entry = btrfs_lru_cache_lookup(&sctx->backref_cache, key, 0);
+ if (!raw_entry)
+ return false;
+
+ entry = container_of(raw_entry, struct backref_cache_entry, entry);
+ *root_ids_ret = entry->root_ids;
+ *root_count_ret = entry->num_roots;
+
+ return true;
+}
+
+static void store_backref_cache(u64 leaf_bytenr, const struct ulist *root_ids,
+ void *ctx)
+{
+ struct backref_ctx *bctx = ctx;
+ struct send_ctx *sctx = bctx->sctx;
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
+ struct backref_cache_entry *new_entry;
+ struct ulist_iterator uiter;
+ struct ulist_node *node;
+ int ret;
+
+ /*
+ * We're called while holding a transaction handle or while holding
+ * fs_info->commit_root_sem (at iterate_extent_inodes()), so must do a
+ * NOFS allocation.
+ */
+ new_entry = kmalloc(sizeof(struct backref_cache_entry), GFP_NOFS);
+ /* No worries, cache is optional. */
+ if (!new_entry)
+ return;
+
+ new_entry->entry.key = leaf_bytenr >> fs_info->nodesize_bits;
+ new_entry->entry.gen = 0;
+ new_entry->num_roots = 0;
+ ULIST_ITER_INIT(&uiter);
+ while ((node = ulist_next(root_ids, &uiter)) != NULL) {
+ const u64 root_id = node->val;
+ struct clone_root *root;
+
+ root = bsearch((void *)(uintptr_t)root_id, sctx->clone_roots,
+ sctx->clone_roots_cnt, sizeof(struct clone_root),
+ __clone_root_cmp_bsearch);
+ if (!root)
+ continue;
+
+ /* Too many roots, just exit, no worries as caching is optional. */
+ if (new_entry->num_roots >= SEND_MAX_BACKREF_CACHE_ROOTS) {
+ kfree(new_entry);
+ return;
+ }
+
+ new_entry->root_ids[new_entry->num_roots] = root_id;
+ new_entry->num_roots++;
+ }
+
+ /*
+ * We may have not added any roots to the new cache entry, which means
+ * none of the roots is part of the list of roots from which we are
+ * allowed to clone. Cache the new entry as it's still useful to avoid
+ * backref walking to determine which roots have a path to the leaf.
+ *
+ * Also use GFP_NOFS because we're called while holding a transaction
+ * handle or while holding fs_info->commit_root_sem.
+ */
+ ret = btrfs_lru_cache_store(&sctx->backref_cache, &new_entry->entry,
+ GFP_NOFS);
+ ASSERT(ret == 0 || ret == -ENOMEM);
+ if (ret) {
+ /* Caching is optional, no worries. */
+ kfree(new_entry);
+ return;
+ }
+
+ /*
+ * We are called from iterate_extent_inodes() while either holding a
+ * transaction handle or holding fs_info->commit_root_sem, so no need
+ * to take any lock here.
+ */
+ if (sctx->backref_cache.size == 1)
+ sctx->backref_cache_last_reloc_trans = fs_info->last_reloc_trans;
+}
+
+static int check_extent_item(u64 bytenr, const struct btrfs_extent_item *ei,
+ const struct extent_buffer *leaf, void *ctx)
+{
+ const u64 refs = btrfs_extent_refs(leaf, ei);
+ const struct backref_ctx *bctx = ctx;
+ const struct send_ctx *sctx = bctx->sctx;
+
+ if (bytenr == bctx->bytenr) {
+ const u64 flags = btrfs_extent_flags(leaf, ei);
+
+ if (WARN_ON(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
+ return -EUCLEAN;
+
+ /*
+ * If we have only one reference and only the send root as a
+ * clone source - meaning no clone roots were given in the
+ * struct btrfs_ioctl_send_args passed to the send ioctl - then
+ * it's our reference and there's no point in doing backref
+ * walking which is expensive, so exit early.
+ */
+ if (refs == 1 && sctx->clone_roots_cnt == 1)
+ return -ENOENT;
+ }
+
+ /*
+ * Backreference walking (iterate_extent_inodes() below) is currently
+ * too expensive when an extent has a large number of references, both
+ * in time spent and used memory. So for now just fallback to write
+ * operations instead of clone operations when an extent has more than
+ * a certain amount of references.
+ */
+ if (refs > SEND_MAX_EXTENT_REFS)
+ return -ENOENT;
+
+ return 0;
+}
+
+static bool skip_self_data_ref(u64 root, u64 ino, u64 offset, void *ctx)
+{
+ const struct backref_ctx *bctx = ctx;
+
+ if (ino == bctx->cur_objectid &&
+ root == bctx->backref_owner &&
+ offset == bctx->backref_offset)
+ return true;
+
+ return false;
+}
+
/*
* Given an inode, offset and extent item, it finds a good clone for a clone
* instruction. Returns -ENOENT when none could be found. The function makes
@@ -1312,81 +1552,36 @@ static int find_extent_clone(struct send_ctx *sctx,
struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
int extent_type;
- u64 logical;
u64 disk_byte;
u64 num_bytes;
- u64 extent_item_pos;
- u64 flags = 0;
struct btrfs_file_extent_item *fi;
struct extent_buffer *eb = path->nodes[0];
- struct backref_ctx backref_ctx = {0};
+ struct backref_ctx backref_ctx = { 0 };
+ struct btrfs_backref_walk_ctx backref_walk_ctx = { 0 };
struct clone_root *cur_clone_root;
- struct btrfs_key found_key;
- struct btrfs_path *tmp_path;
- struct btrfs_extent_item *ei;
int compressed;
u32 i;
- tmp_path = alloc_path_for_send();
- if (!tmp_path)
- return -ENOMEM;
-
- /* We only use this path under the commit sem */
- tmp_path->need_commit_sem = 0;
-
- if (data_offset >= ino_size) {
- /*
- * There may be extents that lie behind the file's size.
- * I at least had this in combination with snapshotting while
- * writing large files.
- */
- ret = 0;
- goto out;
- }
+ /*
+ * With fallocate we can get prealloc extents beyond the inode's i_size,
+ * so we don't do anything here because clone operations can not clone
+ * to a range beyond i_size without increasing the i_size of the
+ * destination inode.
+ */
+ if (data_offset >= ino_size)
+ return 0;
- fi = btrfs_item_ptr(eb, path->slots[0],
- struct btrfs_file_extent_item);
+ fi = btrfs_item_ptr(eb, path->slots[0], struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(eb, fi);
- if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- ret = -ENOENT;
- goto out;
- }
- compressed = btrfs_file_extent_compression(eb, fi);
+ if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+ return -ENOENT;
- num_bytes = btrfs_file_extent_num_bytes(eb, fi);
disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
- if (disk_byte == 0) {
- ret = -ENOENT;
- goto out;
- }
- logical = disk_byte + btrfs_file_extent_offset(eb, fi);
-
- down_read(&fs_info->commit_root_sem);
- ret = extent_from_logical(fs_info, disk_byte, tmp_path,
- &found_key, &flags);
- up_read(&fs_info->commit_root_sem);
-
- if (ret < 0)
- goto out;
- if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
- ret = -EIO;
- goto out;
- }
+ if (disk_byte == 0)
+ return -ENOENT;
- ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
- struct btrfs_extent_item);
- /*
- * Backreference walking (iterate_extent_inodes() below) is currently
- * too expensive when an extent has a large number of references, both
- * in time spent and used memory. So for now just fallback to write
- * operations instead of clone operations when an extent has more than
- * a certain amount of references.
- */
- if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
- ret = -ENOENT;
- goto out;
- }
- btrfs_release_path(tmp_path);
+ compressed = btrfs_file_extent_compression(eb, fi);
+ num_bytes = btrfs_file_extent_num_bytes(eb, fi);
/*
* Setup the clone roots.
@@ -1395,64 +1590,105 @@ static int find_extent_clone(struct send_ctx *sctx,
cur_clone_root = sctx->clone_roots + i;
cur_clone_root->ino = (u64)-1;
cur_clone_root->offset = 0;
- cur_clone_root->found_refs = 0;
+ cur_clone_root->num_bytes = 0;
+ cur_clone_root->found_ref = false;
}
backref_ctx.sctx = sctx;
- backref_ctx.found = 0;
backref_ctx.cur_objectid = ino;
backref_ctx.cur_offset = data_offset;
- backref_ctx.found_itself = 0;
- backref_ctx.extent_len = num_bytes;
+ backref_ctx.bytenr = disk_byte;
+ /*
+ * Use the header owner and not the send root's id, because in case of a
+ * snapshot we can have shared subtrees.
+ */
+ backref_ctx.backref_owner = btrfs_header_owner(eb);
+ backref_ctx.backref_offset = data_offset - btrfs_file_extent_offset(eb, fi);
/*
* The last extent of a file may be too large due to page alignment.
* We need to adjust extent_len in this case so that the checks in
- * __iterate_backrefs work.
+ * iterate_backrefs() work.
*/
if (data_offset + num_bytes >= ino_size)
backref_ctx.extent_len = ino_size - data_offset;
+ else
+ backref_ctx.extent_len = num_bytes;
/*
* Now collect all backrefs.
*/
+ backref_walk_ctx.bytenr = disk_byte;
if (compressed == BTRFS_COMPRESS_NONE)
- extent_item_pos = logical - found_key.objectid;
- else
- extent_item_pos = 0;
- ret = iterate_extent_inodes(fs_info, found_key.objectid,
- extent_item_pos, 1, __iterate_backrefs,
- &backref_ctx, false);
+ backref_walk_ctx.extent_item_pos = btrfs_file_extent_offset(eb, fi);
+ backref_walk_ctx.fs_info = fs_info;
+ backref_walk_ctx.cache_lookup = lookup_backref_cache;
+ backref_walk_ctx.cache_store = store_backref_cache;
+ backref_walk_ctx.indirect_ref_iterator = iterate_backrefs;
+ backref_walk_ctx.check_extent_item = check_extent_item;
+ backref_walk_ctx.user_ctx = &backref_ctx;
+
+ /*
+ * If have a single clone root, then it's the send root and we can tell
+ * the backref walking code to skip our own backref and not resolve it,
+ * since we can not use it for cloning - the source and destination
+ * ranges can't overlap and in case the leaf is shared through a subtree
+ * due to snapshots, we can't use those other roots since they are not
+ * in the list of clone roots.
+ */
+ if (sctx->clone_roots_cnt == 1)
+ backref_walk_ctx.skip_data_ref = skip_self_data_ref;
+ ret = iterate_extent_inodes(&backref_walk_ctx, true, iterate_backrefs,
+ &backref_ctx);
if (ret < 0)
- goto out;
+ return ret;
- if (!backref_ctx.found_itself) {
- /* found a bug in backref code? */
- ret = -EIO;
- btrfs_err(fs_info,
- "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
- ino, data_offset, disk_byte, found_key.objectid);
- goto out;
+ down_read(&fs_info->commit_root_sem);
+ if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
+ /*
+ * A transaction commit for a transaction in which block group
+ * relocation was done just happened.
+ * The disk_bytenr of the file extent item we processed is
+ * possibly stale, referring to the extent's location before
+ * relocation. So act as if we haven't found any clone sources
+ * and fallback to write commands, which will read the correct
+ * data from the new extent location. Otherwise we will fail
+ * below because we haven't found our own back reference or we
+ * could be getting incorrect sources in case the old extent
+ * was already reallocated after the relocation.
+ */
+ up_read(&fs_info->commit_root_sem);
+ return -ENOENT;
}
-
- btrfs_debug(fs_info,
- "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
- data_offset, ino, num_bytes, logical);
+ up_read(&fs_info->commit_root_sem);
if (!backref_ctx.found)
- btrfs_debug(fs_info, "no clones found");
+ return -ENOENT;
cur_clone_root = NULL;
for (i = 0; i < sctx->clone_roots_cnt; i++) {
- if (sctx->clone_roots[i].found_refs) {
- if (!cur_clone_root)
- cur_clone_root = sctx->clone_roots + i;
- else if (sctx->clone_roots[i].root == sctx->send_root)
- /* prefer clones from send_root over others */
- cur_clone_root = sctx->clone_roots + i;
- }
+ struct clone_root *clone_root = &sctx->clone_roots[i];
+
+ if (!clone_root->found_ref)
+ continue;
+
+ /*
+ * Choose the root from which we can clone more bytes, to
+ * minimize write operations and therefore have more extent
+ * sharing at the destination (the same as in the source).
+ */
+ if (!cur_clone_root ||
+ clone_root->num_bytes > cur_clone_root->num_bytes) {
+ cur_clone_root = clone_root;
+ /*
+ * We found an optimal clone candidate (any inode from
+ * any root is fine), so we're done.
+ */
+ if (clone_root->num_bytes >= backref_ctx.extent_len)
+ break;
+ }
}
if (cur_clone_root) {
@@ -1462,8 +1698,6 @@ static int find_extent_clone(struct send_ctx *sctx,
ret = -ENOENT;
}
-out:
- btrfs_free_path(tmp_path);
return ret;
}
@@ -1472,7 +1706,7 @@ static int read_symlink(struct btrfs_root *root,
struct fs_path *dest)
{
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_file_extent_item *ei;
u8 type;
@@ -1489,38 +1723,45 @@ static int read_symlink(struct btrfs_root *root,
key.offset = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
- if (ret) {
+ return ret;
+ if (unlikely(ret)) {
/*
* An empty symlink inode. Can happen in rare error paths when
* creating a symlink (transaction committed before the inode
* eviction handler removed the symlink inode items and a crash
- * happened in between or the subvol was snapshoted in between).
+ * happened in between or the subvol was snapshotted in between).
* Print an informative message to dmesg/syslog so that the user
* can delete the symlink.
*/
btrfs_err(root->fs_info,
"Found empty symlink inode %llu at root %llu",
- ino, root->root_key.objectid);
- ret = -EIO;
- goto out;
+ ino, btrfs_root_id(root));
+ return -EIO;
}
ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_file_extent_item);
type = btrfs_file_extent_type(path->nodes[0], ei);
+ if (unlikely(type != BTRFS_FILE_EXTENT_INLINE)) {
+ ret = -EUCLEAN;
+ btrfs_crit(root->fs_info,
+"send: found symlink extent that is not inline, ino %llu root %llu extent type %d",
+ ino, btrfs_root_id(root), type);
+ return ret;
+ }
compression = btrfs_file_extent_compression(path->nodes[0], ei);
- BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
- BUG_ON(compression);
+ if (unlikely(compression != BTRFS_COMPRESS_NONE)) {
+ ret = -EUCLEAN;
+ btrfs_crit(root->fs_info,
+"send: found symlink extent with compression, ino %llu root %llu compression type %d",
+ ino, btrfs_root_id(root), compression);
+ return ret;
+ }
off = btrfs_file_extent_inline_start(ei);
len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
- ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
-
-out:
- btrfs_free_path(path);
- return ret;
+ return fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
}
/*
@@ -1531,8 +1772,7 @@ static int gen_unique_name(struct send_ctx *sctx,
u64 ino, u64 gen,
struct fs_path *dest)
{
- int ret = 0;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dir_item *di;
char tmp[64];
int len;
@@ -1543,18 +1783,21 @@ static int gen_unique_name(struct send_ctx *sctx,
return -ENOMEM;
while (1) {
+ struct fscrypt_str tmp_name;
+
len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
ino, gen, idx);
ASSERT(len < sizeof(tmp));
+ tmp_name.name = tmp;
+ tmp_name.len = len;
di = btrfs_lookup_dir_item(NULL, sctx->send_root,
path, BTRFS_FIRST_FREE_OBJECTID,
- tmp, strlen(tmp), 0);
+ &tmp_name, 0);
btrfs_release_path(path);
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
- }
+ if (IS_ERR(di))
+ return PTR_ERR(di);
+
if (di) {
/* not unique, try again */
idx++;
@@ -1563,18 +1806,16 @@ static int gen_unique_name(struct send_ctx *sctx,
if (!sctx->parent_root) {
/* unique */
- ret = 0;
break;
}
di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
path, BTRFS_FIRST_FREE_OBJECTID,
- tmp, strlen(tmp), 0);
+ &tmp_name, 0);
btrfs_release_path(path);
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
- }
+ if (IS_ERR(di))
+ return PTR_ERR(di);
+
if (di) {
/* not unique, try again */
idx++;
@@ -1584,11 +1825,7 @@ static int gen_unique_name(struct send_ctx *sctx,
break;
}
- ret = fs_path_add(dest, tmp, strlen(tmp));
-
-out:
- btrfs_free_path(path);
- return ret;
+ return fs_path_add(dest, tmp, len);
}
enum inode_state {
@@ -1599,28 +1836,34 @@ enum inode_state {
inode_state_did_delete,
};
-static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
+static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen,
+ u64 *send_gen, u64 *parent_gen)
{
int ret;
int left_ret;
int right_ret;
u64 left_gen;
- u64 right_gen;
+ u64 right_gen = 0;
+ struct btrfs_inode_info info;
- ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
- NULL, NULL);
+ ret = get_inode_info(sctx->send_root, ino, &info);
if (ret < 0 && ret != -ENOENT)
- goto out;
- left_ret = ret;
+ return ret;
+ left_ret = (info.nlink == 0) ? -ENOENT : ret;
+ left_gen = info.gen;
+ if (send_gen)
+ *send_gen = ((left_ret == -ENOENT) ? 0 : info.gen);
if (!sctx->parent_root) {
right_ret = -ENOENT;
} else {
- ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
- NULL, NULL, NULL, NULL);
+ ret = get_inode_info(sctx->parent_root, ino, &info);
if (ret < 0 && ret != -ENOENT)
- goto out;
- right_ret = ret;
+ return ret;
+ right_ret = (info.nlink == 0) ? -ENOENT : ret;
+ right_gen = info.gen;
+ if (parent_gen)
+ *parent_gen = ((right_ret == -ENOENT) ? 0 : info.gen);
}
if (!left_ret && !right_ret) {
@@ -1661,30 +1904,27 @@ static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
ret = -ENOENT;
}
-out:
return ret;
}
-static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
+static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen,
+ u64 *send_gen, u64 *parent_gen)
{
int ret;
if (ino == BTRFS_FIRST_FREE_OBJECTID)
return 1;
- ret = get_cur_inode_state(sctx, ino, gen);
+ ret = get_cur_inode_state(sctx, ino, gen, send_gen, parent_gen);
if (ret < 0)
- goto out;
+ return ret;
if (ret == inode_state_no_change ||
ret == inode_state_did_create ||
ret == inode_state_will_delete)
- ret = 1;
- else
- ret = 0;
+ return 1;
-out:
- return ret;
+ return 0;
}
/*
@@ -1692,34 +1932,28 @@ out:
*/
static int lookup_dir_item_inode(struct btrfs_root *root,
u64 dir, const char *name, int name_len,
- u64 *found_inode,
- u8 *found_type)
+ u64 *found_inode)
{
int ret = 0;
struct btrfs_dir_item *di;
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
+ struct fscrypt_str name_str = FSTR_INIT((char *)name, name_len);
path = alloc_path_for_send();
if (!path)
return -ENOMEM;
- di = btrfs_lookup_dir_item(NULL, root, path,
- dir, name, name_len, 0);
- if (IS_ERR_OR_NULL(di)) {
- ret = di ? PTR_ERR(di) : -ENOENT;
- goto out;
- }
+ di = btrfs_lookup_dir_item(NULL, root, path, dir, &name_str, 0);
+ if (IS_ERR_OR_NULL(di))
+ return di ? PTR_ERR(di) : -ENOENT;
+
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
- if (key.type == BTRFS_ROOT_ITEM_KEY) {
- ret = -ENOENT;
- goto out;
- }
+ if (key.type == BTRFS_ROOT_ITEM_KEY)
+ return -ENOENT;
+
*found_inode = key.objectid;
- *found_type = btrfs_dir_type(path->nodes[0], di);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -1733,7 +1967,7 @@ static int get_first_ref(struct btrfs_root *root, u64 ino,
int ret;
struct btrfs_key key;
struct btrfs_key found_key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int len;
u64 parent_dir;
@@ -1747,16 +1981,14 @@ static int get_first_ref(struct btrfs_root *root, u64 ino,
ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
if (ret < 0)
- goto out;
+ return ret;
if (!ret)
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
if (ret || found_key.objectid != ino ||
(found_key.type != BTRFS_INODE_REF_KEY &&
- found_key.type != BTRFS_INODE_EXTREF_KEY)) {
- ret = -ENOENT;
- goto out;
- }
+ found_key.type != BTRFS_INODE_EXTREF_KEY))
+ return -ENOENT;
if (found_key.type == BTRFS_INODE_REF_KEY) {
struct btrfs_inode_ref *iref;
@@ -1777,20 +2009,17 @@ static int get_first_ref(struct btrfs_root *root, u64 ino,
parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
}
if (ret < 0)
- goto out;
+ return ret;
btrfs_release_path(path);
if (dir_gen) {
- ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
- NULL, NULL, NULL);
+ ret = get_inode_gen(root, parent_dir, dir_gen);
if (ret < 0)
- goto out;
+ return ret;
}
*dir = parent_dir;
-out:
- btrfs_free_path(path);
return ret;
}
@@ -1836,44 +2065,36 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
const char *name, int name_len,
u64 *who_ino, u64 *who_gen, u64 *who_mode)
{
- int ret = 0;
- u64 gen;
+ int ret;
+ u64 parent_root_dir_gen;
u64 other_inode = 0;
- u8 other_type = 0;
+ struct btrfs_inode_info info;
if (!sctx->parent_root)
- goto out;
+ return 0;
- ret = is_inode_existent(sctx, dir, dir_gen);
+ ret = is_inode_existent(sctx, dir, dir_gen, NULL, &parent_root_dir_gen);
if (ret <= 0)
- goto out;
+ return 0;
/*
* If we have a parent root we need to verify that the parent dir was
* not deleted and then re-created, if it was then we have no overwrite
* and we can just unlink this entry.
+ *
+ * @parent_root_dir_gen was set to 0 if the inode does not exist in the
+ * parent root.
*/
- if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
- ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
- NULL, NULL, NULL);
- if (ret < 0 && ret != -ENOENT)
- goto out;
- if (ret) {
- ret = 0;
- goto out;
- }
- if (gen != dir_gen)
- goto out;
- }
+ if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID &&
+ parent_root_dir_gen != dir_gen)
+ return 0;
ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
- &other_inode, &other_type);
- if (ret < 0 && ret != -ENOENT)
- goto out;
- if (ret) {
- ret = 0;
- goto out;
- }
+ &other_inode);
+ if (ret == -ENOENT)
+ return 0;
+ else if (ret < 0)
+ return ret;
/*
* Check if the overwritten ref was already processed. If yes, the ref
@@ -1882,19 +2103,17 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
*/
if (other_inode > sctx->send_progress ||
is_waiting_for_move(sctx, other_inode)) {
- ret = get_inode_info(sctx->parent_root, other_inode, NULL,
- who_gen, who_mode, NULL, NULL, NULL);
+ ret = get_inode_info(sctx->parent_root, other_inode, &info);
if (ret < 0)
- goto out;
+ return ret;
- ret = 1;
*who_ino = other_inode;
- } else {
- ret = 0;
+ *who_gen = info.gen;
+ *who_mode = info.mode;
+ return 1;
}
-out:
- return ret;
+ return 0;
}
/*
@@ -1909,50 +2128,43 @@ static int did_overwrite_ref(struct send_ctx *sctx,
u64 ino, u64 ino_gen,
const char *name, int name_len)
{
- int ret = 0;
- u64 gen;
+ int ret;
u64 ow_inode;
- u8 other_type;
+ u64 ow_gen = 0;
+ u64 send_root_dir_gen;
if (!sctx->parent_root)
- goto out;
+ return 0;
- ret = is_inode_existent(sctx, dir, dir_gen);
+ ret = is_inode_existent(sctx, dir, dir_gen, &send_root_dir_gen, NULL);
if (ret <= 0)
- goto out;
+ return ret;
- if (dir != BTRFS_FIRST_FREE_OBJECTID) {
- ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
- NULL, NULL, NULL);
- if (ret < 0 && ret != -ENOENT)
- goto out;
- if (ret) {
- ret = 0;
- goto out;
- }
- if (gen != dir_gen)
- goto out;
- }
+ /*
+ * @send_root_dir_gen was set to 0 if the inode does not exist in the
+ * send root.
+ */
+ if (dir != BTRFS_FIRST_FREE_OBJECTID && send_root_dir_gen != dir_gen)
+ return 0;
/* check if the ref was overwritten by another ref */
ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
- &ow_inode, &other_type);
- if (ret < 0 && ret != -ENOENT)
- goto out;
- if (ret) {
+ &ow_inode);
+ if (ret == -ENOENT) {
/* was never and will never be overwritten */
- ret = 0;
- goto out;
+ return 0;
+ } else if (ret < 0) {
+ return ret;
}
- ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
- NULL, NULL);
- if (ret < 0)
- goto out;
+ if (ow_inode == ino) {
+ ret = get_inode_gen(sctx->send_root, ow_inode, &ow_gen);
+ if (ret < 0)
+ return ret;
- if (ow_inode == ino && gen == ino_gen) {
- ret = 0;
- goto out;
+ /* It's the same inode, so no overwrite happened. */
+ if (ow_gen == ino_gen)
+ return 0;
}
/*
@@ -1961,15 +2173,20 @@ static int did_overwrite_ref(struct send_ctx *sctx,
* inode 'ino' to be orphanized, therefore check if ow_inode matches
* the current inode being processed.
*/
- if ((ow_inode < sctx->send_progress) ||
- (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
- gen == sctx->cur_inode_gen))
- ret = 1;
- else
- ret = 0;
+ if (ow_inode < sctx->send_progress)
+ return 1;
-out:
- return ret;
+ if (ino != sctx->cur_ino && ow_inode == sctx->cur_ino) {
+ if (ow_gen == 0) {
+ ret = get_inode_gen(sctx->send_root, ow_inode, &ow_gen);
+ if (ret < 0)
+ return ret;
+ }
+ if (ow_gen == sctx->cur_inode_gen)
+ return 1;
+ }
+
+ return 0;
}
/*
@@ -2003,113 +2220,16 @@ out:
return ret;
}
-/*
- * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
- * so we need to do some special handling in case we have clashes. This function
- * takes care of this with the help of name_cache_entry::radix_list.
- * In case of error, nce is kfreed.
- */
-static int name_cache_insert(struct send_ctx *sctx,
- struct name_cache_entry *nce)
+static inline struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
+ u64 ino, u64 gen)
{
- int ret = 0;
- struct list_head *nce_head;
-
- nce_head = radix_tree_lookup(&sctx->name_cache,
- (unsigned long)nce->ino);
- if (!nce_head) {
- nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
- if (!nce_head) {
- kfree(nce);
- return -ENOMEM;
- }
- INIT_LIST_HEAD(nce_head);
-
- ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
- if (ret < 0) {
- kfree(nce_head);
- kfree(nce);
- return ret;
- }
- }
- list_add_tail(&nce->radix_list, nce_head);
- list_add_tail(&nce->list, &sctx->name_cache_list);
- sctx->name_cache_size++;
-
- return ret;
-}
-
-static void name_cache_delete(struct send_ctx *sctx,
- struct name_cache_entry *nce)
-{
- struct list_head *nce_head;
-
- nce_head = radix_tree_lookup(&sctx->name_cache,
- (unsigned long)nce->ino);
- if (!nce_head) {
- btrfs_err(sctx->send_root->fs_info,
- "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
- nce->ino, sctx->name_cache_size);
- }
-
- list_del(&nce->radix_list);
- list_del(&nce->list);
- sctx->name_cache_size--;
-
- /*
- * We may not get to the final release of nce_head if the lookup fails
- */
- if (nce_head && list_empty(nce_head)) {
- radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
- kfree(nce_head);
- }
-}
+ struct btrfs_lru_cache_entry *entry;
-static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
- u64 ino, u64 gen)
-{
- struct list_head *nce_head;
- struct name_cache_entry *cur;
-
- nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
- if (!nce_head)
+ entry = btrfs_lru_cache_lookup(&sctx->name_cache, ino, gen);
+ if (!entry)
return NULL;
- list_for_each_entry(cur, nce_head, radix_list) {
- if (cur->ino == ino && cur->gen == gen)
- return cur;
- }
- return NULL;
-}
-
-/*
- * Remove some entries from the beginning of name_cache_list.
- */
-static void name_cache_clean_unused(struct send_ctx *sctx)
-{
- struct name_cache_entry *nce;
-
- if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
- return;
-
- while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
- nce = list_entry(sctx->name_cache_list.next,
- struct name_cache_entry, list);
- name_cache_delete(sctx, nce);
- kfree(nce);
- }
-}
-
-static void name_cache_free(struct send_ctx *sctx)
-{
- struct name_cache_entry *nce;
-
- while (!list_empty(&sctx->name_cache_list)) {
- nce = list_entry(sctx->name_cache_list.next,
- struct name_cache_entry, list);
- name_cache_delete(sctx, nce);
- kfree(nce);
- }
+ return container_of(entry, struct name_cache_entry, entry);
}
/*
@@ -2128,7 +2248,7 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
{
int ret;
int nce_ret;
- struct name_cache_entry *nce = NULL;
+ struct name_cache_entry *nce;
/*
* First check if we already did a call to this function with the same
@@ -2138,40 +2258,31 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
nce = name_cache_search(sctx, ino, gen);
if (nce) {
if (ino < sctx->send_progress && nce->need_later_update) {
- name_cache_delete(sctx, nce);
- kfree(nce);
+ btrfs_lru_cache_remove(&sctx->name_cache, &nce->entry);
nce = NULL;
} else {
- /*
- * Removes the entry from the list and adds it back to
- * the end. This marks the entry as recently used so
- * that name_cache_clean_unused does not remove it.
- */
- list_move_tail(&nce->list, &sctx->name_cache_list);
-
*parent_ino = nce->parent_ino;
*parent_gen = nce->parent_gen;
ret = fs_path_add(dest, nce->name, nce->name_len);
if (ret < 0)
- goto out;
- ret = nce->ret;
- goto out;
+ return ret;
+ return nce->ret;
}
}
/*
* If the inode is not existent yet, add the orphan name and return 1.
* This should only happen for the parent dir that we determine in
- * __record_new_ref
+ * record_new_ref_if_needed().
*/
- ret = is_inode_existent(sctx, ino, gen);
+ ret = is_inode_existent(sctx, ino, gen, NULL, NULL);
if (ret < 0)
- goto out;
+ return ret;
if (!ret) {
ret = gen_unique_name(sctx, ino, gen, dest);
if (ret < 0)
- goto out;
+ return ret;
ret = 1;
goto out_cache;
}
@@ -2187,21 +2298,21 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
ret = get_first_ref(sctx->parent_root, ino,
parent_ino, parent_gen, dest);
if (ret < 0)
- goto out;
+ return ret;
/*
* Check if the ref was overwritten by an inode's ref that was processed
* earlier. If yes, treat as orphan and return 1.
*/
ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
- dest->start, dest->end - dest->start);
+ dest->start, fs_path_len(dest));
if (ret < 0)
- goto out;
+ return ret;
if (ret) {
fs_path_reset(dest);
ret = gen_unique_name(sctx, ino, gen, dest);
if (ret < 0)
- goto out;
+ return ret;
ret = 1;
}
@@ -2209,31 +2320,29 @@ out_cache:
/*
* Store the result of the lookup in the name cache.
*/
- nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
- if (!nce) {
- ret = -ENOMEM;
- goto out;
- }
+ nce = kmalloc(sizeof(*nce) + fs_path_len(dest), GFP_KERNEL);
+ if (!nce)
+ return -ENOMEM;
- nce->ino = ino;
- nce->gen = gen;
+ nce->entry.key = ino;
+ nce->entry.gen = gen;
nce->parent_ino = *parent_ino;
nce->parent_gen = *parent_gen;
nce->name_len = fs_path_len(dest);
nce->ret = ret;
- strcpy(nce->name, dest->start);
+ memcpy(nce->name, dest->start, nce->name_len);
if (ino < sctx->send_progress)
nce->need_later_update = 0;
else
nce->need_later_update = 1;
- nce_ret = name_cache_insert(sctx, nce);
- if (nce_ret < 0)
- ret = nce_ret;
- name_cache_clean_unused(sctx);
+ nce_ret = btrfs_lru_cache_store(&sctx->name_cache, &nce->entry, GFP_KERNEL);
+ if (nce_ret < 0) {
+ kfree(nce);
+ return nce_ret;
+ }
-out:
return ret;
}
@@ -2270,6 +2379,14 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
u64 parent_inode = 0;
u64 parent_gen = 0;
int stop = 0;
+ const bool is_cur_inode = (ino == sctx->cur_ino && gen == sctx->cur_inode_gen);
+
+ if (is_cur_inode && fs_path_len(&sctx->cur_inode_path) > 0) {
+ if (dest != &sctx->cur_inode_path)
+ return fs_path_copy(dest, &sctx->cur_inode_path);
+
+ return 0;
+ }
name = fs_path_alloc();
if (!name) {
@@ -2321,8 +2438,12 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
out:
fs_path_free(name);
- if (!ret)
+ if (!ret) {
fs_path_unreverse(dest);
+ if (is_cur_inode && dest != &sctx->cur_inode_path)
+ ret = fs_path_copy(&sctx->cur_inode_path, dest);
+ }
+
return ret;
}
@@ -2334,11 +2455,11 @@ static int send_subvol_begin(struct send_ctx *sctx)
int ret;
struct btrfs_root *send_root = sctx->send_root;
struct btrfs_root *parent_root = sctx->parent_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
- char *name = NULL;
+ char AUTO_KFREE(name);
int namelen;
path = btrfs_alloc_path();
@@ -2346,30 +2467,25 @@ static int send_subvol_begin(struct send_ctx *sctx)
return -ENOMEM;
name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
- if (!name) {
- btrfs_free_path(path);
+ if (!name)
return -ENOMEM;
- }
- key.objectid = send_root->root_key.objectid;
+ key.objectid = btrfs_root_id(send_root);
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = 0;
ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
&key, path, 1, 0);
if (ret < 0)
- goto out;
- if (ret) {
- ret = -ENOENT;
- goto out;
- }
+ return ret;
+ if (ret)
+ return -ENOENT;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.type != BTRFS_ROOT_BACKREF_KEY ||
- key.objectid != send_root->root_key.objectid) {
- ret = -ENOENT;
- goto out;
+ key.objectid != btrfs_root_id(send_root)) {
+ return -ENOENT;
}
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
namelen = btrfs_root_ref_name_len(leaf, ref);
@@ -2379,11 +2495,11 @@ static int send_subvol_begin(struct send_ctx *sctx)
if (parent_root) {
ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
if (ret < 0)
- goto out;
+ return ret;
} else {
ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
if (ret < 0)
- goto out;
+ return ret;
}
TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
@@ -2411,31 +2527,63 @@ static int send_subvol_begin(struct send_ctx *sctx)
ret = send_cmd(sctx);
tlv_put_failure:
-out:
- btrfs_free_path(path);
- kfree(name);
return ret;
}
+static struct fs_path *get_cur_inode_path(struct send_ctx *sctx)
+{
+ if (fs_path_len(&sctx->cur_inode_path) == 0) {
+ int ret;
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+ &sctx->cur_inode_path);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+
+ return &sctx->cur_inode_path;
+}
+
+static struct fs_path *get_path_for_command(struct send_ctx *sctx, u64 ino, u64 gen)
+{
+ struct fs_path *path;
+ int ret;
+
+ if (ino == sctx->cur_ino && gen == sctx->cur_inode_gen)
+ return get_cur_inode_path(sctx);
+
+ path = fs_path_alloc();
+ if (!path)
+ return ERR_PTR(-ENOMEM);
+
+ ret = get_cur_path(sctx, ino, gen, path);
+ if (ret < 0) {
+ fs_path_free(path);
+ return ERR_PTR(ret);
+ }
+
+ return path;
+}
+
+static void free_path_for_command(const struct send_ctx *sctx, struct fs_path *path)
+{
+ if (path != &sctx->cur_inode_path)
+ fs_path_free(path);
+}
+
static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
- btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
-
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
+ p = get_path_for_command(sctx, ino, gen);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
if (ret < 0)
goto out;
- ret = get_cur_path(sctx, ino, gen, p);
- if (ret < 0)
- goto out;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
@@ -2443,29 +2591,23 @@ static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
tlv_put_failure:
out:
- fs_path_free(p);
+ free_path_for_command(sctx, p);
return ret;
}
static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
- btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
-
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
+ p = get_path_for_command(sctx, ino, gen);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
if (ret < 0)
goto out;
- ret = get_cur_path(sctx, ino, gen, p);
- if (ret < 0)
- goto out;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
@@ -2473,30 +2615,50 @@ static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
tlv_put_failure:
out:
- fs_path_free(p);
+ free_path_for_command(sctx, p);
return ret;
}
-static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
+static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
- btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
- ino, uid, gid);
+ if (sctx->proto < 2)
+ return 0;
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
+ p = get_path_for_command(sctx, ino, gen);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
- ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
+ ret = begin_cmd(sctx, BTRFS_SEND_C_FILEATTR);
if (ret < 0)
goto out;
- ret = get_cur_path(sctx, ino, gen, p);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_FILEATTR, fileattr);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ free_path_for_command(sctx, p);
+ return ret;
+}
+
+static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
+{
+ int ret = 0;
+ struct fs_path *p;
+
+ p = get_path_for_command(sctx, ino, gen);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
if (ret < 0)
goto out;
+
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
@@ -2505,26 +2667,23 @@ static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
tlv_put_failure:
out:
- fs_path_free(p);
+ free_path_for_command(sctx, p);
return ret;
}
static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p = NULL;
struct btrfs_inode_item *ii;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *eb;
struct btrfs_key key;
int slot;
- btrfs_debug(fs_info, "send_utimes %llu", ino);
-
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
+ p = get_path_for_command(sctx, ino, gen);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
path = alloc_path_for_send();
if (!path) {
@@ -2549,50 +2708,103 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
if (ret < 0)
goto out;
- ret = get_cur_path(sctx, ino, gen, p);
- if (ret < 0)
- goto out;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
- /* TODO Add otime support when the otime patches get into upstream */
+ if (sctx->proto >= 2)
+ TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_OTIME, eb, &ii->otime);
ret = send_cmd(sctx);
tlv_put_failure:
out:
- fs_path_free(p);
- btrfs_free_path(path);
+ free_path_for_command(sctx, p);
return ret;
}
/*
+ * If the cache is full, we can't remove entries from it and do a call to
+ * send_utimes() for each respective inode, because we might be finishing
+ * processing an inode that is a directory and it just got renamed, and existing
+ * entries in the cache may refer to inodes that have the directory in their
+ * full path - in which case we would generate outdated paths (pre-rename)
+ * for the inodes that the cache entries point to. Instead of pruning the
+ * cache when inserting, do it after we finish processing each inode at
+ * finish_inode_if_needed().
+ */
+static int cache_dir_utimes(struct send_ctx *sctx, u64 dir, u64 gen)
+{
+ struct btrfs_lru_cache_entry *entry;
+ int ret;
+
+ entry = btrfs_lru_cache_lookup(&sctx->dir_utimes_cache, dir, gen);
+ if (entry != NULL)
+ return 0;
+
+ /* Caching is optional, don't fail if we can't allocate memory. */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return send_utimes(sctx, dir, gen);
+
+ entry->key = dir;
+ entry->gen = gen;
+
+ ret = btrfs_lru_cache_store(&sctx->dir_utimes_cache, entry, GFP_KERNEL);
+ ASSERT(ret != -EEXIST);
+ if (ret) {
+ kfree(entry);
+ return send_utimes(sctx, dir, gen);
+ }
+
+ return 0;
+}
+
+static int trim_dir_utimes_cache(struct send_ctx *sctx)
+{
+ while (sctx->dir_utimes_cache.size > SEND_MAX_DIR_UTIMES_CACHE_SIZE) {
+ struct btrfs_lru_cache_entry *lru;
+ int ret;
+
+ lru = btrfs_lru_cache_lru_entry(&sctx->dir_utimes_cache);
+ ASSERT(lru != NULL);
+
+ ret = send_utimes(sctx, lru->key, lru->gen);
+ if (ret)
+ return ret;
+
+ btrfs_lru_cache_remove(&sctx->dir_utimes_cache, lru);
+ }
+
+ return 0;
+}
+
+/*
* Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
* a valid path yet because we did not process the refs yet. So, the inode
* is created as orphan.
*/
static int send_create_inode(struct send_ctx *sctx, u64 ino)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
int cmd;
+ struct btrfs_inode_info info;
u64 gen;
u64 mode;
u64 rdev;
- btrfs_debug(fs_info, "send_create_inode %llu", ino);
-
p = fs_path_alloc();
if (!p)
return -ENOMEM;
if (ino != sctx->cur_ino) {
- ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
- NULL, NULL, &rdev);
+ ret = get_inode_info(sctx->send_root, ino, &info);
if (ret < 0)
goto out;
+ gen = info.gen;
+ mode = info.mode;
+ rdev = info.rdev;
} else {
gen = sctx->cur_inode_gen;
mode = sctx->cur_inode_mode;
@@ -2652,6 +2864,23 @@ out:
return ret;
}
+static void cache_dir_created(struct send_ctx *sctx, u64 dir)
+{
+ struct btrfs_lru_cache_entry *entry;
+ int ret;
+
+ /* Caching is optional, ignore any failures. */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ entry->key = dir;
+ entry->gen = 0;
+ ret = btrfs_lru_cache_store(&sctx->dir_created_cache, entry, GFP_KERNEL);
+ if (ret < 0)
+ kfree(entry);
+}
+
/*
* We need some special handling for inodes that get processed before the parent
* directory got created. See process_recorded_refs for details.
@@ -2660,62 +2889,47 @@ out:
static int did_create_dir(struct send_ctx *sctx, u64 dir)
{
int ret = 0;
- struct btrfs_path *path = NULL;
+ int iter_ret = 0;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_key di_key;
- struct extent_buffer *eb;
struct btrfs_dir_item *di;
- int slot;
+
+ if (btrfs_lru_cache_lookup(&sctx->dir_created_cache, dir, 0))
+ return 1;
path = alloc_path_for_send();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
key.objectid = dir;
key.type = BTRFS_DIR_INDEX_KEY;
key.offset = 0;
- ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
- while (1) {
- eb = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(eb)) {
- ret = btrfs_next_leaf(sctx->send_root, path);
- if (ret < 0) {
- goto out;
- } else if (ret > 0) {
- ret = 0;
- break;
- }
- continue;
- }
+ btrfs_for_each_slot(sctx->send_root, &key, &found_key, path, iter_ret) {
+ struct extent_buffer *eb = path->nodes[0];
- btrfs_item_key_to_cpu(eb, &found_key, slot);
if (found_key.objectid != key.objectid ||
found_key.type != key.type) {
ret = 0;
- goto out;
+ break;
}
- di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
+ di = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dir_item);
btrfs_dir_item_key_to_cpu(eb, di, &di_key);
if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
di_key.objectid < sctx->send_progress) {
ret = 1;
- goto out;
+ cache_dir_created(sctx, dir);
+ break;
}
-
- path->slots[0]++;
}
+ /* Catch error found during iteration */
+ if (iter_ret < 0)
+ ret = iter_ret;
-out:
- btrfs_free_path(path);
return ret;
}
@@ -2737,7 +2951,12 @@ static int send_create_inode_if_needed(struct send_ctx *sctx)
return 0;
}
- return send_create_inode(sctx, sctx->cur_ino);
+ ret = send_create_inode(sctx, sctx->cur_ino);
+
+ if (ret == 0 && S_ISDIR(sctx->cur_inode_mode))
+ cache_dir_created(sctx, sctx->cur_ino);
+
+ return ret;
}
struct recorded_ref {
@@ -2747,48 +2966,50 @@ struct recorded_ref {
u64 dir;
u64 dir_gen;
int name_len;
+ struct rb_node node;
+ struct rb_root *root;
};
-static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
+static struct recorded_ref *recorded_ref_alloc(void)
{
- ref->full_path = path;
- ref->name = (char *)kbasename(ref->full_path->start);
- ref->name_len = ref->full_path->end - ref->name;
+ struct recorded_ref *ref;
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ return NULL;
+ RB_CLEAR_NODE(&ref->node);
+ INIT_LIST_HEAD(&ref->list);
+ return ref;
}
-/*
- * We need to process new refs before deleted refs, but compare_tree gives us
- * everything mixed. So we first record all refs and later process them.
- * This function is a helper to record one ref.
- */
-static int __record_ref(struct list_head *head, u64 dir,
- u64 dir_gen, struct fs_path *path)
+static void recorded_ref_free(struct recorded_ref *ref)
{
- struct recorded_ref *ref;
-
- ref = kmalloc(sizeof(*ref), GFP_KERNEL);
if (!ref)
- return -ENOMEM;
+ return;
+ if (!RB_EMPTY_NODE(&ref->node))
+ rb_erase(&ref->node, ref->root);
+ list_del(&ref->list);
+ fs_path_free(ref->full_path);
+ kfree(ref);
+}
- ref->dir = dir;
- ref->dir_gen = dir_gen;
- set_ref_path(ref, path);
- list_add_tail(&ref->list, head);
- return 0;
+static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
+{
+ ref->full_path = path;
+ ref->name = (char *)kbasename(ref->full_path->start);
+ ref->name_len = ref->full_path->end - ref->name;
}
static int dup_ref(struct recorded_ref *ref, struct list_head *list)
{
struct recorded_ref *new;
- new = kmalloc(sizeof(*ref), GFP_KERNEL);
+ new = recorded_ref_alloc();
if (!new)
return -ENOMEM;
new->dir = ref->dir;
new->dir_gen = ref->dir_gen;
- new->full_path = NULL;
- INIT_LIST_HEAD(&new->list);
list_add_tail(&new->list, list);
return 0;
}
@@ -2798,10 +3019,8 @@ static void __free_recorded_refs(struct list_head *head)
struct recorded_ref *cur;
while (!list_empty(head)) {
- cur = list_entry(head->next, struct recorded_ref, list);
- fs_path_free(cur->full_path);
- list_del(&cur->list);
- kfree(cur);
+ cur = list_first_entry(head, struct recorded_ref, list);
+ recorded_ref_free(cur);
}
}
@@ -2831,6 +3050,11 @@ static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
goto out;
ret = send_rename(sctx, path, orphan);
+ if (ret < 0)
+ goto out;
+
+ if (ino == sctx->cur_ino && gen == sctx->cur_inode_gen)
+ ret = fs_path_copy(&sctx->cur_inode_path, orphan);
out:
fs_path_free(orphan);
@@ -2865,6 +3089,7 @@ static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
odi->ino = dir_ino;
odi->gen = dir_gen;
odi->last_dir_index_offset = 0;
+ odi->dir_high_seq_ino = 0;
rb_link_node(&odi->node, parent, p);
rb_insert_color(&odi->node, &sctx->orphan_dirs);
@@ -2914,10 +3139,10 @@ static void free_orphan_dir_info(struct send_ctx *sctx,
* We check this by iterating all dir items and checking if the inode behind
* the dir item was already processed.
*/
-static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
- u64 send_progress)
+static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen)
{
int ret = 0;
+ int iter_ret = 0;
struct btrfs_root *root = sctx->parent_root;
struct btrfs_path *path;
struct btrfs_key key;
@@ -2925,6 +3150,8 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
struct btrfs_key loc;
struct btrfs_dir_item *di;
struct orphan_dir_info *odi = NULL;
+ u64 dir_high_seq_ino = 0;
+ u64 last_dir_index_offset = 0;
/*
* Don't try to rmdir the top/root subvolume dir.
@@ -2932,35 +3159,66 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
if (dir == BTRFS_FIRST_FREE_OBJECTID)
return 0;
+ odi = get_orphan_dir_info(sctx, dir, dir_gen);
+ if (odi && sctx->cur_ino < odi->dir_high_seq_ino)
+ return 0;
+
path = alloc_path_for_send();
if (!path)
return -ENOMEM;
- key.objectid = dir;
- key.type = BTRFS_DIR_INDEX_KEY;
- key.offset = 0;
+ if (!odi) {
+ /*
+ * Find the inode number associated with the last dir index
+ * entry. This is very likely the inode with the highest number
+ * of all inodes that have an entry in the directory. We can
+ * then use it to avoid future calls to can_rmdir(), when
+ * processing inodes with a lower number, from having to search
+ * the parent root b+tree for dir index keys.
+ */
+ key.objectid = dir;
+ key.type = BTRFS_DIR_INDEX_KEY;
+ key.offset = (u64)-1;
- odi = get_orphan_dir_info(sctx, dir, dir_gen);
- if (odi)
- key.offset = odi->last_dir_index_offset;
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0) {
+ goto out;
+ } else if (ret > 0) {
+ /* Can't happen, the root is never empty. */
+ ASSERT(path->slots[0] > 0);
+ if (WARN_ON(path->slots[0] == 0)) {
+ ret = -EUCLEAN;
+ goto out;
+ }
+ path->slots[0]--;
+ }
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (key.objectid != dir || key.type != BTRFS_DIR_INDEX_KEY) {
+ /* No index keys, dir can be removed. */
+ ret = 1;
+ goto out;
+ }
- while (1) {
+ di = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_dir_item);
+ btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
+ dir_high_seq_ino = loc.objectid;
+ if (sctx->cur_ino < dir_high_seq_ino) {
+ ret = 0;
+ goto out;
+ }
+
+ btrfs_release_path(path);
+ }
+
+ key.objectid = dir;
+ key.type = BTRFS_DIR_INDEX_KEY;
+ key.offset = (odi ? odi->last_dir_index_offset : 0);
+
+ btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
struct waiting_dir_move *dm;
- if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- goto out;
- else if (ret > 0)
- break;
- continue;
- }
- btrfs_item_key_to_cpu(path->nodes[0], &found_key,
- path->slots[0]);
if (found_key.objectid != key.objectid ||
found_key.type != key.type)
break;
@@ -2969,34 +3227,25 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
struct btrfs_dir_item);
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
+ dir_high_seq_ino = max(dir_high_seq_ino, loc.objectid);
+ last_dir_index_offset = found_key.offset;
+
dm = get_waiting_dir_move(sctx, loc.objectid);
if (dm) {
- odi = add_orphan_dir_info(sctx, dir, dir_gen);
- if (IS_ERR(odi)) {
- ret = PTR_ERR(odi);
- goto out;
- }
- odi->gen = dir_gen;
- odi->last_dir_index_offset = found_key.offset;
dm->rmdir_ino = dir;
dm->rmdir_gen = dir_gen;
ret = 0;
goto out;
}
- if (loc.objectid > send_progress) {
- odi = add_orphan_dir_info(sctx, dir, dir_gen);
- if (IS_ERR(odi)) {
- ret = PTR_ERR(odi);
- goto out;
- }
- odi->gen = dir_gen;
- odi->last_dir_index_offset = found_key.offset;
+ if (loc.objectid > sctx->cur_ino) {
ret = 0;
goto out;
}
-
- path->slots[0]++;
+ }
+ if (iter_ret < 0) {
+ ret = iter_ret;
+ goto out;
}
free_orphan_dir_info(sctx, odi);
@@ -3004,7 +3253,22 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
out:
btrfs_free_path(path);
- return ret;
+
+ if (ret)
+ return ret;
+
+ if (!odi) {
+ odi = add_orphan_dir_info(sctx, dir, dir_gen);
+ if (IS_ERR(odi))
+ return PTR_ERR(odi);
+
+ odi->gen = dir_gen;
+ }
+
+ odi->last_dir_index_offset = last_dir_index_offset;
+ odi->dir_high_seq_ino = max(odi->dir_high_seq_ino, dir_high_seq_ino);
+
+ return 0;
}
static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
@@ -3289,7 +3553,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
}
gen = odi->gen;
- ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
+ ret = can_rmdir(sctx, rmdir_ino, gen);
if (ret < 0)
goto out;
if (!ret)
@@ -3309,7 +3573,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
}
finish:
- ret = send_utimes(sctx, pm->ino, pm->gen);
+ ret = cache_dir_utimes(sctx, pm->ino, pm->gen);
if (ret < 0)
goto out;
@@ -3321,8 +3585,7 @@ finish:
/*
* The parent inode might have been deleted in the send snapshot
*/
- ret = get_inode_info(sctx->send_root, cur->dir, NULL,
- NULL, NULL, NULL, NULL, NULL);
+ ret = get_inode_info(sctx->send_root, cur->dir, NULL);
if (ret == -ENOENT) {
ret = 0;
continue;
@@ -3330,7 +3593,7 @@ finish:
if (ret < 0)
goto out;
- ret = send_utimes(sctx, cur->dir, cur->dir_gen);
+ ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen);
if (ret < 0)
goto out;
}
@@ -3375,7 +3638,7 @@ static void tail_append_pending_moves(struct send_ctx *sctx,
static int apply_children_dir_moves(struct send_ctx *sctx)
{
struct pending_dir_move *pm;
- struct list_head stack;
+ LIST_HEAD(stack);
u64 parent_ino = sctx->cur_ino;
int ret = 0;
@@ -3383,7 +3646,6 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
if (!pm)
return 0;
- INIT_LIST_HEAD(&stack);
tail_append_pending_moves(sctx, pm, &stack);
while (!list_empty(&stack)) {
@@ -3447,8 +3709,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
struct recorded_ref *parent_ref,
const bool is_orphan)
{
- struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key di_key;
struct btrfs_dir_item *di;
@@ -3469,19 +3730,15 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
- if (ret < 0) {
- goto out;
- } else if (ret > 0) {
- ret = 0;
- goto out;
- }
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return 0;
- di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
+ di = btrfs_match_dir_item_name(path, parent_ref->name,
parent_ref->name_len);
- if (!di) {
- ret = 0;
- goto out;
- }
+ if (!di)
+ return 0;
/*
* di_key.objectid has the number of the inode that has a dentry in the
* parent directory with the same name that sctx->cur_ino is being
@@ -3491,28 +3748,22 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
* that it happens after that other inode is renamed.
*/
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
- if (di_key.type != BTRFS_INODE_ITEM_KEY) {
- ret = 0;
- goto out;
- }
+ if (di_key.type != BTRFS_INODE_ITEM_KEY)
+ return 0;
- ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
- &left_gen, NULL, NULL, NULL, NULL);
+ ret = get_inode_gen(sctx->parent_root, di_key.objectid, &left_gen);
if (ret < 0)
- goto out;
- ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
- &right_gen, NULL, NULL, NULL, NULL);
+ return ret;
+ ret = get_inode_gen(sctx->send_root, di_key.objectid, &right_gen);
if (ret < 0) {
if (ret == -ENOENT)
ret = 0;
- goto out;
+ return ret;
}
/* Different inode, no need to delay the rename of sctx->cur_ino */
- if (right_gen != left_gen) {
- ret = 0;
- goto out;
- }
+ if (right_gen != left_gen)
+ return 0;
wdm = get_waiting_dir_move(sctx, di_key.objectid);
if (wdm && !wdm->orphanized) {
@@ -3526,8 +3777,6 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
if (!ret)
ret = 1;
}
-out:
- btrfs_free_path(path);
return ret;
}
@@ -3564,7 +3813,7 @@ static int check_ino_in_path(struct btrfs_root *root,
}
/*
- * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
+ * Check if inode ino1 is an ancestor of inode ino2 in the given root for any
* possible path (in case ino2 is not a directory and has multiple hard links).
* Return 1 if true, 0 if false and < 0 on error.
*/
@@ -3576,7 +3825,8 @@ static int is_ancestor(struct btrfs_root *root,
{
bool free_fs_path = false;
int ret = 0;
- struct btrfs_path *path = NULL;
+ int iter_ret = 0;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
if (!fs_path) {
@@ -3596,33 +3846,19 @@ static int is_ancestor(struct btrfs_root *root,
key.type = BTRFS_INODE_REF_KEY;
key.offset = 0;
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
-
- while (true) {
+ btrfs_for_each_slot(root, &key, &key, path, iter_ret) {
struct extent_buffer *leaf = path->nodes[0];
int slot = path->slots[0];
u32 cur_offset = 0;
u32 item_size;
- if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- goto out;
- if (ret > 0)
- break;
- continue;
- }
-
- btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid != ino2)
break;
if (key.type != BTRFS_INODE_REF_KEY &&
key.type != BTRFS_INODE_EXTREF_KEY)
break;
- item_size = btrfs_item_size_nr(leaf, slot);
+ item_size = btrfs_item_size(leaf, slot);
while (cur_offset < item_size) {
u64 parent;
u64 parent_gen;
@@ -3644,8 +3880,7 @@ static int is_ancestor(struct btrfs_root *root,
cur_offset = item_size;
}
- ret = get_inode_info(root, parent, NULL, &parent_gen,
- NULL, NULL, NULL, NULL);
+ ret = get_inode_gen(root, parent, &parent_gen);
if (ret < 0)
goto out;
ret = check_ino_in_path(root, ino1, ino1_gen,
@@ -3653,11 +3888,12 @@ static int is_ancestor(struct btrfs_root *root,
if (ret)
goto out;
}
- path->slots[0]++;
}
ret = 0;
- out:
- btrfs_free_path(path);
+ if (iter_ret < 0)
+ ret = iter_ret;
+
+out:
if (free_fs_path)
fs_path_free(fs_path);
return ret;
@@ -3733,9 +3969,7 @@ static int wait_for_parent_move(struct send_ctx *sctx,
memcmp(path_before->start, path_after->start, len1))) {
u64 parent_ino_gen;
- ret = get_inode_info(sctx->parent_root, ino, NULL,
- &parent_ino_gen, NULL, NULL, NULL,
- NULL);
+ ret = get_inode_gen(sctx->parent_root, ino, &parent_ino_gen);
if (ret < 0)
goto out;
if (ino_gen == parent_ino_gen) {
@@ -3839,7 +4073,7 @@ static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
*/
static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
{
- char *name;
+ char AUTO_KFREE(name);
int ret;
name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
@@ -3849,17 +4083,75 @@ static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
fs_path_reset(ref->full_path);
ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
if (ret < 0)
- goto out;
+ return ret;
ret = fs_path_add(ref->full_path, name, ref->name_len);
if (ret < 0)
- goto out;
+ return ret;
/* Update the reference's base name pointer. */
set_ref_path(ref, ref->full_path);
-out:
- kfree(name);
- return ret;
+
+ return 0;
+}
+
+static int rbtree_check_dir_ref_comp(const void *k, const struct rb_node *node)
+{
+ const struct recorded_ref *data = k;
+ const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node);
+
+ if (data->dir > ref->dir)
+ return 1;
+ if (data->dir < ref->dir)
+ return -1;
+ if (data->dir_gen > ref->dir_gen)
+ return 1;
+ if (data->dir_gen < ref->dir_gen)
+ return -1;
+ return 0;
+}
+
+static bool rbtree_check_dir_ref_less(struct rb_node *node, const struct rb_node *parent)
+{
+ const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node);
+
+ return rbtree_check_dir_ref_comp(entry, parent) < 0;
+}
+
+static int record_check_dir_ref_in_tree(struct rb_root *root,
+ struct recorded_ref *ref, struct list_head *list)
+{
+ struct recorded_ref *tmp_ref;
+ int ret;
+
+ if (rb_find(ref, root, rbtree_check_dir_ref_comp))
+ return 0;
+
+ ret = dup_ref(ref, list);
+ if (ret < 0)
+ return ret;
+
+ tmp_ref = list_last_entry(list, struct recorded_ref, list);
+ rb_add(&tmp_ref->node, root, rbtree_check_dir_ref_less);
+ tmp_ref->root = root;
+ return 0;
+}
+
+static int rename_current_inode(struct send_ctx *sctx,
+ struct fs_path *current_path,
+ struct fs_path *new_path)
+{
+ int ret;
+
+ ret = send_rename(sctx, current_path, new_path);
+ if (ret < 0)
+ return ret;
+
+ ret = fs_path_copy(&sctx->cur_inode_path, new_path);
+ if (ret < 0)
+ return ret;
+
+ return fs_path_copy(current_path, new_path);
}
/*
@@ -3871,26 +4163,29 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
int ret = 0;
struct recorded_ref *cur;
struct recorded_ref *cur2;
- struct list_head check_dirs;
+ LIST_HEAD(check_dirs);
+ struct rb_root rbtree_check_dirs = RB_ROOT;
struct fs_path *valid_path = NULL;
u64 ow_inode = 0;
u64 ow_gen;
u64 ow_mode;
- int did_overwrite = 0;
- int is_orphan = 0;
- u64 last_dir_ino_rm = 0;
+ bool did_overwrite = false;
+ bool is_orphan = false;
bool can_rename = true;
bool orphanized_dir = false;
bool orphanized_ancestor = false;
- btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
-
/*
* This should never happen as the root dir always has the same ref
* which is always '..'
*/
- BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
- INIT_LIST_HEAD(&check_dirs);
+ if (unlikely(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID)) {
+ btrfs_err(fs_info,
+ "send: unexpected inode %llu in process_recorded_refs()",
+ sctx->cur_ino);
+ ret = -EINVAL;
+ goto out;
+ }
valid_path = fs_path_alloc();
if (!valid_path) {
@@ -3915,14 +4210,14 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
if (ret < 0)
goto out;
if (ret)
- did_overwrite = 1;
+ did_overwrite = true;
}
if (sctx->cur_inode_new || did_overwrite) {
ret = gen_unique_name(sctx, sctx->cur_ino,
sctx->cur_inode_gen, valid_path);
if (ret < 0)
goto out;
- is_orphan = 1;
+ is_orphan = true;
} else {
ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
valid_path);
@@ -3969,7 +4264,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* "testdir_2".
*/
list_for_each_entry(cur, &sctx->new_refs, list) {
- ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
+ ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL);
if (ret < 0)
goto out;
if (ret == inode_state_will_create)
@@ -4015,12 +4310,9 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* the source path when performing its rename
* operation.
*/
- if (is_waiting_for_move(sctx, ow_inode)) {
- wdm = get_waiting_dir_move(sctx,
- ow_inode);
- ASSERT(wdm);
+ wdm = get_waiting_dir_move(sctx, ow_inode);
+ if (wdm)
wdm->orphanized = true;
- }
/*
* Make sure we clear our orphanized inode's
@@ -4033,10 +4325,9 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* and get instead the orphan name.
*/
nce = name_cache_search(sctx, ow_inode, ow_gen);
- if (nce) {
- name_cache_delete(sctx, nce);
- kfree(nce);
- }
+ if (nce)
+ btrfs_lru_cache_remove(&sctx->name_cache,
+ &nce->entry);
/*
* ow_inode might currently be an ancestor of
@@ -4051,6 +4342,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
if (ret > 0) {
orphanized_ancestor = true;
fs_path_reset(valid_path);
+ fs_path_reset(&sctx->cur_inode_path);
ret = get_cur_path(sctx, sctx->cur_ino,
sctx->cur_inode_gen,
valid_path);
@@ -4085,7 +4377,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* parent directory out of order. But we need to check if this
* did already happen before due to other refs in the same dir.
*/
- ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
+ ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL);
if (ret < 0)
goto out;
if (ret == inode_state_will_create) {
@@ -4115,6 +4407,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
ret = send_create_inode(sctx, cur->dir);
if (ret < 0)
goto out;
+ cache_dir_created(sctx, cur->dir);
}
}
@@ -4145,13 +4438,10 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* it depending on the inode mode.
*/
if (is_orphan && can_rename) {
- ret = send_rename(sctx, valid_path, cur->full_path);
- if (ret < 0)
- goto out;
- is_orphan = 0;
- ret = fs_path_copy(valid_path, cur->full_path);
+ ret = rename_current_inode(sctx, valid_path, cur->full_path);
if (ret < 0)
goto out;
+ is_orphan = false;
} else if (can_rename) {
if (S_ISDIR(sctx->cur_inode_mode)) {
/*
@@ -4159,10 +4449,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* dirs, we always have one new and one deleted
* ref. The deleted ref is ignored later.
*/
- ret = send_rename(sctx, valid_path,
- cur->full_path);
- if (!ret)
- ret = fs_path_copy(valid_path,
+ ret = rename_current_inode(sctx, valid_path,
cur->full_path);
if (ret < 0)
goto out;
@@ -4185,7 +4472,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
goto out;
}
}
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
}
@@ -4197,8 +4484,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* later, we do this check again and rmdir it then if possible.
* See the use of check_dirs for more details.
*/
- ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
- sctx->cur_ino);
+ ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen);
if (ret < 0)
goto out;
if (ret) {
@@ -4210,11 +4496,11 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
sctx->cur_inode_gen, valid_path);
if (ret < 0)
goto out;
- is_orphan = 1;
+ is_orphan = true;
}
list_for_each_entry(cur, &sctx->deleted_refs, list) {
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
}
@@ -4223,9 +4509,8 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
/*
* We have a moved dir. Add the old parent to check_dirs
*/
- cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
- list);
- ret = dup_ref(cur, &check_dirs);
+ cur = list_first_entry(&sctx->deleted_refs, struct recorded_ref, list);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
} else if (!S_ISDIR(sctx->cur_inode_mode)) {
@@ -4256,8 +4541,10 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
ret = send_unlink(sctx, cur->full_path);
if (ret < 0)
goto out;
+ if (is_current_inode_path(sctx, cur->full_path))
+ fs_path_reset(&sctx->cur_inode_path);
}
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
}
@@ -4291,20 +4578,17 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
if (cur->dir > sctx->cur_ino)
continue;
- ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
+ ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL);
if (ret < 0)
goto out;
if (ret == inode_state_did_create ||
ret == inode_state_no_change) {
- /* TODO delayed utimes */
- ret = send_utimes(sctx, cur->dir, cur->dir_gen);
+ ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen);
if (ret < 0)
goto out;
- } else if (ret == inode_state_did_delete &&
- cur->dir != last_dir_ino_rm) {
- ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
- sctx->cur_ino);
+ } else if (ret == inode_state_did_delete) {
+ ret = can_rmdir(sctx, cur->dir, cur->dir_gen);
if (ret < 0)
goto out;
if (ret) {
@@ -4315,7 +4599,6 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
ret = send_rmdir(sctx, valid_path);
if (ret < 0)
goto out;
- last_dir_ino_rm = cur->dir;
}
}
}
@@ -4329,204 +4612,171 @@ out:
return ret;
}
-static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
- void *ctx, struct list_head *refs)
+static int rbtree_ref_comp(const void *k, const struct rb_node *node)
{
- int ret = 0;
- struct send_ctx *sctx = ctx;
- struct fs_path *p;
- u64 gen;
-
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
-
- ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
- NULL, NULL);
- if (ret < 0)
- goto out;
-
- ret = get_cur_path(sctx, dir, gen, p);
- if (ret < 0)
- goto out;
- ret = fs_path_add_path(p, name);
- if (ret < 0)
- goto out;
+ const struct recorded_ref *data = k;
+ const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node);
- ret = __record_ref(refs, dir, gen, p);
-
-out:
- if (ret)
- fs_path_free(p);
- return ret;
+ if (data->dir > ref->dir)
+ return 1;
+ if (data->dir < ref->dir)
+ return -1;
+ if (data->dir_gen > ref->dir_gen)
+ return 1;
+ if (data->dir_gen < ref->dir_gen)
+ return -1;
+ if (data->name_len > ref->name_len)
+ return 1;
+ if (data->name_len < ref->name_len)
+ return -1;
+ return strcmp(data->name, ref->name);
}
-static int __record_new_ref(int num, u64 dir, int index,
- struct fs_path *name,
- void *ctx)
+static bool rbtree_ref_less(struct rb_node *node, const struct rb_node *parent)
{
- struct send_ctx *sctx = ctx;
- return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
-}
-
+ const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node);
-static int __record_deleted_ref(int num, u64 dir, int index,
- struct fs_path *name,
- void *ctx)
-{
- struct send_ctx *sctx = ctx;
- return record_ref(sctx->parent_root, dir, name, ctx,
- &sctx->deleted_refs);
+ return rbtree_ref_comp(entry, parent) < 0;
}
-static int record_new_ref(struct send_ctx *sctx)
+static int record_ref_in_tree(struct rb_root *root, struct list_head *refs,
+ struct fs_path *name, u64 dir, u64 dir_gen,
+ struct send_ctx *sctx)
{
- int ret;
+ int ret = 0;
+ struct fs_path *path = NULL;
+ struct recorded_ref *ref = NULL;
- ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
- sctx->cmp_key, 0, __record_new_ref, sctx);
- if (ret < 0)
+ path = fs_path_alloc();
+ if (!path) {
+ ret = -ENOMEM;
goto out;
- ret = 0;
-
-out:
- return ret;
-}
+ }
-static int record_deleted_ref(struct send_ctx *sctx)
-{
- int ret;
+ ref = recorded_ref_alloc();
+ if (!ref) {
+ ret = -ENOMEM;
+ goto out;
+ }
- ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
- sctx->cmp_key, 0, __record_deleted_ref, sctx);
+ ret = get_cur_path(sctx, dir, dir_gen, path);
+ if (ret < 0)
+ goto out;
+ ret = fs_path_add_path(path, name);
if (ret < 0)
goto out;
- ret = 0;
+ ref->dir = dir;
+ ref->dir_gen = dir_gen;
+ set_ref_path(ref, path);
+ list_add_tail(&ref->list, refs);
+ rb_add(&ref->node, root, rbtree_ref_less);
+ ref->root = root;
out:
+ if (ret) {
+ if (path && (!ref || !ref->full_path))
+ fs_path_free(path);
+ recorded_ref_free(ref);
+ }
return ret;
}
-struct find_ref_ctx {
- u64 dir;
- u64 dir_gen;
- struct btrfs_root *root;
- struct fs_path *name;
- int found_idx;
-};
-
-static int __find_iref(int num, u64 dir, int index,
- struct fs_path *name,
- void *ctx_)
+static int record_new_ref_if_needed(u64 dir, struct fs_path *name, void *ctx)
{
- struct find_ref_ctx *ctx = ctx_;
- u64 dir_gen;
int ret;
+ struct send_ctx *sctx = ctx;
+ struct rb_node *node = NULL;
+ struct recorded_ref data;
+ struct recorded_ref *ref;
+ u64 dir_gen;
- if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
- strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
- /*
- * To avoid doing extra lookups we'll only do this if everything
- * else matches.
- */
- ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
- NULL, NULL, NULL);
- if (ret)
- return ret;
- if (dir_gen != ctx->dir_gen)
- return 0;
- ctx->found_idx = num;
- return 1;
+ ret = get_inode_gen(sctx->send_root, dir, &dir_gen);
+ if (ret < 0)
+ return ret;
+
+ data.dir = dir;
+ data.dir_gen = dir_gen;
+ set_ref_path(&data, name);
+ node = rb_find(&data, &sctx->rbtree_deleted_refs, rbtree_ref_comp);
+ if (node) {
+ ref = rb_entry(node, struct recorded_ref, node);
+ recorded_ref_free(ref);
+ } else {
+ ret = record_ref_in_tree(&sctx->rbtree_new_refs,
+ &sctx->new_refs, name, dir, dir_gen,
+ sctx);
}
- return 0;
+
+ return ret;
}
-static int find_iref(struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *key,
- u64 dir, u64 dir_gen, struct fs_path *name)
+static int record_deleted_ref_if_needed(u64 dir, struct fs_path *name, void *ctx)
{
int ret;
- struct find_ref_ctx ctx;
-
- ctx.dir = dir;
- ctx.name = name;
- ctx.dir_gen = dir_gen;
- ctx.found_idx = -1;
- ctx.root = root;
+ struct send_ctx *sctx = ctx;
+ struct rb_node *node = NULL;
+ struct recorded_ref data;
+ struct recorded_ref *ref;
+ u64 dir_gen;
- ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
+ ret = get_inode_gen(sctx->parent_root, dir, &dir_gen);
if (ret < 0)
return ret;
- if (ctx.found_idx == -1)
- return -ENOENT;
+ data.dir = dir;
+ data.dir_gen = dir_gen;
+ set_ref_path(&data, name);
+ node = rb_find(&data, &sctx->rbtree_new_refs, rbtree_ref_comp);
+ if (node) {
+ ref = rb_entry(node, struct recorded_ref, node);
+ recorded_ref_free(ref);
+ } else {
+ ret = record_ref_in_tree(&sctx->rbtree_deleted_refs,
+ &sctx->deleted_refs, name, dir,
+ dir_gen, sctx);
+ }
- return ctx.found_idx;
+ return ret;
}
-static int __record_changed_new_ref(int num, u64 dir, int index,
- struct fs_path *name,
- void *ctx)
+static int record_new_ref(struct send_ctx *sctx)
{
- u64 dir_gen;
int ret;
- struct send_ctx *sctx = ctx;
- ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
- NULL, NULL, NULL);
- if (ret)
+ ret = iterate_inode_ref(sctx->send_root, sctx->left_path, sctx->cmp_key,
+ false, record_new_ref_if_needed, sctx);
+ if (ret < 0)
return ret;
- ret = find_iref(sctx->parent_root, sctx->right_path,
- sctx->cmp_key, dir, dir_gen, name);
- if (ret == -ENOENT)
- ret = __record_new_ref(num, dir, index, name, sctx);
- else if (ret > 0)
- ret = 0;
-
- return ret;
+ return 0;
}
-static int __record_changed_deleted_ref(int num, u64 dir, int index,
- struct fs_path *name,
- void *ctx)
+static int record_deleted_ref(struct send_ctx *sctx)
{
- u64 dir_gen;
int ret;
- struct send_ctx *sctx = ctx;
- ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
- NULL, NULL, NULL);
- if (ret)
+ ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, sctx->cmp_key,
+ false, record_deleted_ref_if_needed, sctx);
+ if (ret < 0)
return ret;
- ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
- dir, dir_gen, name);
- if (ret == -ENOENT)
- ret = __record_deleted_ref(num, dir, index, name, sctx);
- else if (ret > 0)
- ret = 0;
-
- return ret;
+ return 0;
}
static int record_changed_ref(struct send_ctx *sctx)
{
- int ret = 0;
+ int ret;
- ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
- sctx->cmp_key, 0, __record_changed_new_ref, sctx);
+ ret = iterate_inode_ref(sctx->send_root, sctx->left_path, sctx->cmp_key,
+ false, record_new_ref_if_needed, sctx);
if (ret < 0)
- goto out;
- ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
- sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
+ return ret;
+ ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, sctx->cmp_key,
+ false, record_deleted_ref_if_needed, sctx);
if (ret < 0)
- goto out;
- ret = 0;
+ return ret;
-out:
- return ret;
+ return 0;
}
/*
@@ -4536,13 +4786,12 @@ out:
static int process_all_refs(struct send_ctx *sctx,
enum btrfs_compare_tree_result cmd)
{
- int ret;
+ int ret = 0;
+ int iter_ret = 0;
struct btrfs_root *root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
- struct extent_buffer *eb;
- int slot;
iterate_inode_ref_t cb;
int pending_move = 0;
@@ -4552,49 +4801,33 @@ static int process_all_refs(struct send_ctx *sctx,
if (cmd == BTRFS_COMPARE_TREE_NEW) {
root = sctx->send_root;
- cb = __record_new_ref;
+ cb = record_new_ref_if_needed;
} else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
root = sctx->parent_root;
- cb = __record_deleted_ref;
+ cb = record_deleted_ref_if_needed;
} else {
btrfs_err(sctx->send_root->fs_info,
"Wrong command %d in process_all_refs", cmd);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
key.objectid = sctx->cmp_key->objectid;
key.type = BTRFS_INODE_REF_KEY;
key.offset = 0;
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
-
- while (1) {
- eb = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(eb)) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- goto out;
- else if (ret > 0)
- break;
- continue;
- }
-
- btrfs_item_key_to_cpu(eb, &found_key, slot);
-
+ btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
if (found_key.objectid != key.objectid ||
(found_key.type != BTRFS_INODE_REF_KEY &&
found_key.type != BTRFS_INODE_EXTREF_KEY))
break;
- ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
+ ret = iterate_inode_ref(root, path, &found_key, false, cb, sctx);
if (ret < 0)
- goto out;
-
- path->slots[0]++;
+ return ret;
}
+ /* Catch error found during iteration */
+ if (iter_ret < 0)
+ return iter_ret;
+
btrfs_release_path(path);
/*
@@ -4602,22 +4835,23 @@ static int process_all_refs(struct send_ctx *sctx,
* re-creating this inode and will be rename'ing it into place once we
* rename the parent directory.
*/
- ret = process_recorded_refs(sctx, &pending_move);
-out:
- btrfs_free_path(path);
- return ret;
+ return process_recorded_refs(sctx, &pending_move);
}
static int send_set_xattr(struct send_ctx *sctx,
- struct fs_path *path,
const char *name, int name_len,
const char *data, int data_len)
{
- int ret = 0;
+ struct fs_path *path;
+ int ret;
+
+ path = get_cur_inode_path(sctx);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
@@ -4626,7 +4860,6 @@ static int send_set_xattr(struct send_ctx *sctx,
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
@@ -4634,11 +4867,11 @@ static int send_remove_xattr(struct send_ctx *sctx,
struct fs_path *path,
const char *name, int name_len)
{
- int ret = 0;
+ int ret;
ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
@@ -4646,28 +4879,20 @@ static int send_remove_xattr(struct send_ctx *sctx,
ret = send_cmd(sctx);
tlv_put_failure:
-out:
return ret;
}
static int __process_new_xattr(int num, struct btrfs_key *di_key,
- const char *name, int name_len,
- const char *data, int data_len,
- u8 type, void *ctx)
+ const char *name, int name_len, const char *data,
+ int data_len, void *ctx)
{
- int ret;
struct send_ctx *sctx = ctx;
- struct fs_path *p;
struct posix_acl_xattr_header dummy_acl;
/* Capabilities are emitted by finish_inode_if_needed */
if (!strncmp(name, XATTR_NAME_CAPS, name_len))
return 0;
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
-
/*
* This hack is needed because empty acls are stored as zero byte
* data in xattrs. Problem with that is, that receiving these zero byte
@@ -4684,49 +4909,27 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
}
}
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
- if (ret < 0)
- goto out;
-
- ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
-
-out:
- fs_path_free(p);
- return ret;
+ return send_set_xattr(sctx, name, name_len, data, data_len);
}
static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
const char *name, int name_len,
- const char *data, int data_len,
- u8 type, void *ctx)
+ const char *data, int data_len, void *ctx)
{
- int ret;
struct send_ctx *sctx = ctx;
struct fs_path *p;
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
-
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
- if (ret < 0)
- goto out;
-
- ret = send_remove_xattr(sctx, p, name, name_len);
+ p = get_cur_inode_path(sctx);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
-out:
- fs_path_free(p);
- return ret;
+ return send_remove_xattr(sctx, p, name, name_len);
}
static int process_new_xattr(struct send_ctx *sctx)
{
- int ret = 0;
-
- ret = iterate_dir_item(sctx->send_root, sctx->left_path,
- __process_new_xattr, sctx);
-
- return ret;
+ return iterate_dir_item(sctx->send_root, sctx->left_path,
+ __process_new_xattr, sctx);
}
static int process_deleted_xattr(struct send_ctx *sctx)
@@ -4741,12 +4944,11 @@ struct find_xattr_ctx {
int found_idx;
char *found_data;
int found_data_len;
+ bool copy_data;
};
-static int __find_xattr(int num, struct btrfs_key *di_key,
- const char *name, int name_len,
- const char *data, int data_len,
- u8 type, void *vctx)
+static int __find_xattr(int num, struct btrfs_key *di_key, const char *name,
+ int name_len, const char *data, int data_len, void *vctx)
{
struct find_xattr_ctx *ctx = vctx;
@@ -4754,9 +4956,11 @@ static int __find_xattr(int num, struct btrfs_key *di_key,
strncmp(name, ctx->name, name_len) == 0) {
ctx->found_idx = num;
ctx->found_data_len = data_len;
- ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
- if (!ctx->found_data)
- return -ENOMEM;
+ if (ctx->copy_data) {
+ ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
+ if (!ctx->found_data)
+ return -ENOMEM;
+ }
return 1;
}
return 0;
@@ -4776,6 +4980,7 @@ static int find_xattr(struct btrfs_root *root,
ctx.found_idx = -1;
ctx.found_data = NULL;
ctx.found_data_len = 0;
+ ctx.copy_data = (data != NULL);
ret = iterate_dir_item(root, path, __find_xattr, &ctx);
if (ret < 0)
@@ -4787,7 +4992,7 @@ static int find_xattr(struct btrfs_root *root,
*data = ctx.found_data;
*data_len = ctx.found_data_len;
} else {
- kfree(ctx.found_data);
+ ASSERT(ctx.found_data == NULL);
}
return ctx.found_idx;
}
@@ -4796,37 +5001,36 @@ static int find_xattr(struct btrfs_root *root,
static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
const char *name, int name_len,
const char *data, int data_len,
- u8 type, void *ctx)
+ void *ctx)
{
int ret;
struct send_ctx *sctx = ctx;
- char *found_data = NULL;
- int found_data_len = 0;
+ char AUTO_KFREE(found_data);
+ int found_data_len = 0;
ret = find_xattr(sctx->parent_root, sctx->right_path,
sctx->cmp_key, name, name_len, &found_data,
&found_data_len);
if (ret == -ENOENT) {
ret = __process_new_xattr(num, di_key, name, name_len, data,
- data_len, type, ctx);
+ data_len, ctx);
} else if (ret >= 0) {
if (data_len != found_data_len ||
memcmp(data, found_data, data_len)) {
ret = __process_new_xattr(num, di_key, name, name_len,
- data, data_len, type, ctx);
+ data, data_len, ctx);
} else {
ret = 0;
}
}
- kfree(found_data);
return ret;
}
static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
const char *name, int name_len,
const char *data, int data_len,
- u8 type, void *ctx)
+ void *ctx)
{
int ret;
struct send_ctx *sctx = ctx;
@@ -4835,7 +5039,7 @@ static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
name, name_len, NULL, NULL);
if (ret == -ENOENT)
ret = __process_deleted_xattr(num, di_key, name, name_len, data,
- data_len, type, ctx);
+ data_len, ctx);
else if (ret >= 0)
ret = 0;
@@ -4844,28 +5048,25 @@ static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
static int process_changed_xattr(struct send_ctx *sctx)
{
- int ret = 0;
+ int ret;
ret = iterate_dir_item(sctx->send_root, sctx->left_path,
__process_changed_new_xattr, sctx);
if (ret < 0)
- goto out;
- ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
- __process_changed_deleted_xattr, sctx);
+ return ret;
-out:
- return ret;
+ return iterate_dir_item(sctx->parent_root, sctx->right_path,
+ __process_changed_deleted_xattr, sctx);
}
static int process_all_new_xattrs(struct send_ctx *sctx)
{
- int ret;
+ int ret = 0;
+ int iter_ret = 0;
struct btrfs_root *root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
- struct extent_buffer *eb;
- int slot;
path = alloc_path_for_send();
if (!path)
@@ -4876,40 +5077,89 @@ static int process_all_new_xattrs(struct send_ctx *sctx)
key.objectid = sctx->cmp_key->objectid;
key.type = BTRFS_XATTR_ITEM_KEY;
key.offset = 0;
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
-
- while (1) {
- eb = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(eb)) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0) {
- goto out;
- } else if (ret > 0) {
- ret = 0;
- break;
- }
- continue;
- }
-
- btrfs_item_key_to_cpu(eb, &found_key, slot);
+ btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
if (found_key.objectid != key.objectid ||
found_key.type != key.type) {
ret = 0;
- goto out;
+ break;
}
ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
if (ret < 0)
- goto out;
+ break;
+ }
+ /* Catch error found during iteration */
+ if (iter_ret < 0)
+ ret = iter_ret;
- path->slots[0]++;
+ return ret;
+}
+
+static int send_verity(struct send_ctx *sctx, struct fs_path *path,
+ struct fsverity_descriptor *desc)
+{
+ int ret;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_ENABLE_VERITY);
+ if (ret < 0)
+ return ret;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+ TLV_PUT_U8(sctx, BTRFS_SEND_A_VERITY_ALGORITHM,
+ le8_to_cpu(desc->hash_algorithm));
+ TLV_PUT_U32(sctx, BTRFS_SEND_A_VERITY_BLOCK_SIZE,
+ 1U << le8_to_cpu(desc->log_blocksize));
+ TLV_PUT(sctx, BTRFS_SEND_A_VERITY_SALT_DATA, desc->salt,
+ le8_to_cpu(desc->salt_size));
+ TLV_PUT(sctx, BTRFS_SEND_A_VERITY_SIG_DATA, desc->signature,
+ le32_to_cpu(desc->sig_size));
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+ return ret;
+}
+
+static int process_verity(struct send_ctx *sctx)
+{
+ int ret = 0;
+ struct btrfs_inode *inode;
+ struct fs_path *p;
+
+ inode = btrfs_iget(sctx->cur_ino, sctx->send_root);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ ret = btrfs_get_verity_descriptor(&inode->vfs_inode, NULL, 0);
+ if (ret < 0)
+ goto iput;
+
+ if (unlikely(ret > FS_VERITY_MAX_DESCRIPTOR_SIZE)) {
+ ret = -EMSGSIZE;
+ goto iput;
+ }
+ if (!sctx->verity_descriptor) {
+ sctx->verity_descriptor = kvmalloc(FS_VERITY_MAX_DESCRIPTOR_SIZE,
+ GFP_KERNEL);
+ if (!sctx->verity_descriptor) {
+ ret = -ENOMEM;
+ goto iput;
+ }
}
-out:
- btrfs_free_path(path);
+ ret = btrfs_get_verity_descriptor(&inode->vfs_inode, sctx->verity_descriptor, ret);
+ if (ret < 0)
+ goto iput;
+
+ p = get_cur_inode_path(sctx);
+ if (IS_ERR(p)) {
+ ret = PTR_ERR(p);
+ goto iput;
+ }
+
+ ret = send_verity(sctx, p, sctx->verity_descriptor);
+iput:
+ iput(&inode->vfs_inode);
return ret;
}
@@ -4920,14 +5170,28 @@ static inline u64 max_send_read_size(const struct send_ctx *sctx)
static int put_data_header(struct send_ctx *sctx, u32 len)
{
- struct btrfs_tlv_header *hdr;
+ if (WARN_ON_ONCE(sctx->put_data))
+ return -EINVAL;
+ sctx->put_data = true;
+ if (sctx->proto >= 2) {
+ /*
+ * Since v2, the data attribute header doesn't include a length,
+ * it is implicitly to the end of the command.
+ */
+ if (unlikely(sctx->send_max_size - sctx->send_size < sizeof(__le16) + len))
+ return -EOVERFLOW;
+ put_unaligned_le16(BTRFS_SEND_A_DATA, sctx->send_buf + sctx->send_size);
+ sctx->send_size += sizeof(__le16);
+ } else {
+ struct btrfs_tlv_header *hdr;
- if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len)
- return -EOVERFLOW;
- hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size);
- put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type);
- put_unaligned_le16(len, &hdr->tlv_len);
- sctx->send_size += sizeof(*hdr);
+ if (unlikely(sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len))
+ return -EOVERFLOW;
+ hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size);
+ put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type);
+ put_unaligned_le16(len, &hdr->tlv_len);
+ sctx->send_size += sizeof(*hdr);
+ }
return 0;
}
@@ -4935,70 +5199,69 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
{
struct btrfs_root *root = sctx->send_root;
struct btrfs_fs_info *fs_info = root->fs_info;
- struct inode *inode;
- struct page *page;
- pgoff_t index = offset >> PAGE_SHIFT;
- pgoff_t last_index;
- unsigned pg_offset = offset_in_page(offset);
+ u64 cur = offset;
+ const u64 end = offset + len;
+ const pgoff_t last_index = ((end - 1) >> PAGE_SHIFT);
+ struct address_space *mapping = sctx->cur_inode->i_mapping;
int ret;
ret = put_data_header(sctx, len);
if (ret)
return ret;
- inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
- last_index = (offset + len - 1) >> PAGE_SHIFT;
-
- /* initial readahead */
- memset(&sctx->ra, 0, sizeof(struct file_ra_state));
- file_ra_state_init(&sctx->ra, inode->i_mapping);
-
- while (index <= last_index) {
- unsigned cur_len = min_t(unsigned, len,
- PAGE_SIZE - pg_offset);
-
- page = find_lock_page(inode->i_mapping, index);
- if (!page) {
- page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
- NULL, index, last_index + 1 - index);
-
- page = find_or_create_page(inode->i_mapping, index,
- GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
+ while (cur < end) {
+ pgoff_t index = (cur >> PAGE_SHIFT);
+ unsigned int cur_len;
+ unsigned int pg_offset;
+ struct folio *folio;
+
+ folio = filemap_lock_folio(mapping, index);
+ if (IS_ERR(folio)) {
+ page_cache_sync_readahead(mapping,
+ &sctx->ra, NULL, index,
+ last_index + 1 - index);
+
+ folio = filemap_grab_folio(mapping, index);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
break;
}
}
-
- if (PageReadahead(page)) {
- page_cache_async_readahead(inode->i_mapping, &sctx->ra,
- NULL, page, index, last_index + 1 - index);
- }
-
- if (!PageUptodate(page)) {
- btrfs_readpage(NULL, page);
- lock_page(page);
- if (!PageUptodate(page)) {
- unlock_page(page);
- put_page(page);
+ pg_offset = offset_in_folio(folio, cur);
+ cur_len = min_t(unsigned int, end - cur, folio_size(folio) - pg_offset);
+
+ if (folio_test_readahead(folio))
+ page_cache_async_readahead(mapping, &sctx->ra, NULL, folio,
+ last_index + 1 - index);
+
+ if (!folio_test_uptodate(folio)) {
+ btrfs_read_folio(NULL, folio);
+ folio_lock(folio);
+ if (unlikely(!folio_test_uptodate(folio))) {
+ folio_unlock(folio);
+ btrfs_err(fs_info,
+ "send: IO error at offset %llu for inode %llu root %llu",
+ folio_pos(folio), sctx->cur_ino,
+ btrfs_root_id(sctx->send_root));
+ folio_put(folio);
ret = -EIO;
break;
}
+ if (folio->mapping != mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+ continue;
+ }
}
- memcpy_from_page(sctx->send_buf + sctx->send_size, page,
- pg_offset, cur_len);
- unlock_page(page);
- put_page(page);
- index++;
- pg_offset = 0;
- len -= cur_len;
+ memcpy_from_folio(sctx->send_buf + sctx->send_size, folio,
+ pg_offset, cur_len);
+ folio_unlock(folio);
+ folio_put(folio);
+ cur += cur_len;
sctx->send_size += cur_len;
}
- iput(inode);
+
return ret;
}
@@ -5008,35 +5271,26 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
*/
static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
{
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
-
- btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
+ p = get_cur_inode_path(sctx);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
if (ret < 0)
- goto out;
-
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
- if (ret < 0)
- goto out;
+ return ret;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
ret = put_file_data(sctx, offset, len);
if (ret < 0)
- goto out;
+ return ret;
ret = send_cmd(sctx);
tlv_put_failure:
-out:
- fs_path_free(p);
return ret;
}
@@ -5049,12 +5303,12 @@ static int send_clone(struct send_ctx *sctx,
{
int ret = 0;
struct fs_path *p;
+ struct fs_path *cur_inode_path;
u64 gen;
- btrfs_debug(sctx->send_root->fs_info,
- "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
- offset, len, clone_root->root->root_key.objectid,
- clone_root->ino, clone_root->offset);
+ cur_inode_path = get_cur_inode_path(sctx);
+ if (IS_ERR(cur_inode_path))
+ return PTR_ERR(cur_inode_path);
p = fs_path_alloc();
if (!p)
@@ -5064,17 +5318,12 @@ static int send_clone(struct send_ctx *sctx,
if (ret < 0)
goto out;
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
- if (ret < 0)
- goto out;
-
TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
- TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, cur_inode_path);
if (clone_root->root == sctx->send_root) {
- ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
- &gen, NULL, NULL, NULL, NULL);
+ ret = get_inode_gen(sctx->send_root, clone_root->ino, &gen);
if (ret < 0)
goto out;
ret = get_cur_path(sctx, clone_root->ino, gen, p);
@@ -5122,27 +5371,45 @@ static int send_update_extent(struct send_ctx *sctx,
int ret = 0;
struct fs_path *p;
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
+ p = get_cur_inode_path(sctx);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
if (ret < 0)
- goto out;
+ return ret;
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+ return ret;
+}
+
+static int send_fallocate(struct send_ctx *sctx, u32 mode, u64 offset, u64 len)
+{
+ struct fs_path *path;
+ int ret;
+
+ path = get_cur_inode_path(sctx);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_FALLOCATE);
if (ret < 0)
- goto out;
+ return ret;
- TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+ TLV_PUT_U32(sctx, BTRFS_SEND_A_FALLOCATE_MODE, mode);
TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
ret = send_cmd(sctx);
tlv_put_failure:
-out:
- fs_path_free(p);
return ret;
}
@@ -5154,6 +5421,14 @@ static int send_hole(struct send_ctx *sctx, u64 end)
int ret = 0;
/*
+ * Starting with send stream v2 we have fallocate and can use it to
+ * punch holes instead of sending writes full of zeroes.
+ */
+ if (proto_cmd_ok(sctx, BTRFS_SEND_C_FALLOCATE))
+ return send_fallocate(sctx, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ offset, end - offset);
+
+ /*
* A hole that starts at EOF or beyond it. Since we do not yet support
* fallocate (for extent preallocation and hole punching), sending a
* write of zeroes starting at EOF or beyond would later require issuing
@@ -5171,12 +5446,10 @@ static int send_hole(struct send_ctx *sctx, u64 end)
if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
return send_update_extent(sctx, offset, end - offset);
- p = fs_path_alloc();
- if (!p)
- return -ENOMEM;
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
- if (ret < 0)
- goto tlv_put_failure;
+ p = get_cur_inode_path(sctx);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
while (offset < end) {
u64 len = min(end - offset, read_size);
@@ -5197,20 +5470,233 @@ static int send_hole(struct send_ctx *sctx, u64 end)
}
sctx->cur_inode_next_write_offset = offset;
tlv_put_failure:
- fs_path_free(p);
return ret;
}
-static int send_extent_data(struct send_ctx *sctx,
- const u64 offset,
- const u64 len)
+static int send_encoded_inline_extent(struct send_ctx *sctx,
+ struct btrfs_path *path, u64 offset,
+ u64 len)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
+ struct fs_path *fspath;
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_key key;
+ struct btrfs_file_extent_item *ei;
+ u64 ram_bytes;
+ size_t inline_size;
+ int ret;
+
+ fspath = get_cur_inode_path(sctx);
+ if (IS_ERR(fspath))
+ return PTR_ERR(fspath);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE);
+ if (ret < 0)
+ return ret;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+ ram_bytes = btrfs_file_extent_ram_bytes(leaf, ei);
+ inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, fspath);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_FILE_LEN,
+ min(key.offset + ram_bytes - offset, len));
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_LEN, ram_bytes);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_OFFSET, offset - key.offset);
+ ret = btrfs_encoded_io_compression_from_extent(fs_info,
+ btrfs_file_extent_compression(leaf, ei));
+ if (ret < 0)
+ return ret;
+ TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret);
+
+ ret = put_data_header(sctx, inline_size);
+ if (ret < 0)
+ return ret;
+ read_extent_buffer(leaf, sctx->send_buf + sctx->send_size,
+ btrfs_file_extent_inline_start(ei), inline_size);
+ sctx->send_size += inline_size;
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+ return ret;
+}
+
+static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
+ u64 offset, u64 len)
+{
+ struct btrfs_root *root = sctx->send_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_inode *inode;
+ struct fs_path *fspath;
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_key key;
+ struct btrfs_file_extent_item *ei;
+ u64 disk_bytenr, disk_num_bytes;
+ u32 data_offset;
+ struct btrfs_cmd_header *hdr;
+ u32 crc;
+ int ret;
+
+ inode = btrfs_iget(sctx->cur_ino, root);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ fspath = get_cur_inode_path(sctx);
+ if (IS_ERR(fspath)) {
+ ret = PTR_ERR(fspath);
+ goto out;
+ }
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE);
+ if (ret < 0)
+ goto out;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+ disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, ei);
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, fspath);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_FILE_LEN,
+ min(key.offset + btrfs_file_extent_num_bytes(leaf, ei) - offset,
+ len));
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_LEN,
+ btrfs_file_extent_ram_bytes(leaf, ei));
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_OFFSET,
+ offset - key.offset + btrfs_file_extent_offset(leaf, ei));
+ ret = btrfs_encoded_io_compression_from_extent(fs_info,
+ btrfs_file_extent_compression(leaf, ei));
+ if (ret < 0)
+ goto out;
+ TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret);
+ TLV_PUT_U32(sctx, BTRFS_SEND_A_ENCRYPTION, 0);
+
+ ret = put_data_header(sctx, disk_num_bytes);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * We want to do I/O directly into the send buffer, so get the next page
+ * boundary in the send buffer. This means that there may be a gap
+ * between the beginning of the command and the file data.
+ */
+ data_offset = PAGE_ALIGN(sctx->send_size);
+ if (unlikely(data_offset > sctx->send_max_size ||
+ sctx->send_max_size - data_offset < disk_num_bytes)) {
+ ret = -EOVERFLOW;
+ goto out;
+ }
+
+ /*
+ * Note that send_buf is a mapping of send_buf_pages, so this is really
+ * reading into send_buf.
+ */
+ ret = btrfs_encoded_read_regular_fill_pages(inode,
+ disk_bytenr, disk_num_bytes,
+ sctx->send_buf_pages +
+ (data_offset >> PAGE_SHIFT),
+ NULL);
+ if (ret)
+ goto out;
+
+ hdr = (struct btrfs_cmd_header *)sctx->send_buf;
+ hdr->len = cpu_to_le32(sctx->send_size + disk_num_bytes - sizeof(*hdr));
+ hdr->crc = 0;
+ crc = crc32c(0, sctx->send_buf, sctx->send_size);
+ crc = crc32c(crc, sctx->send_buf + data_offset, disk_num_bytes);
+ hdr->crc = cpu_to_le32(crc);
+
+ ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
+ &sctx->send_off);
+ if (!ret) {
+ ret = write_buf(sctx->send_filp, sctx->send_buf + data_offset,
+ disk_num_bytes, &sctx->send_off);
+ }
+ sctx->send_size = 0;
+ sctx->put_data = false;
+
+tlv_put_failure:
+out:
+ iput(&inode->vfs_inode);
+ return ret;
+}
+
+static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path,
+ const u64 offset, const u64 len)
+{
+ const u64 end = offset + len;
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_file_extent_item *ei;
u64 read_size = max_send_read_size(sctx);
u64 sent = 0;
if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
return send_update_extent(sctx, offset, len);
+ ei = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ if ((sctx->flags & BTRFS_SEND_FLAG_COMPRESSED) &&
+ btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
+ bool is_inline = (btrfs_file_extent_type(leaf, ei) ==
+ BTRFS_FILE_EXTENT_INLINE);
+
+ /*
+ * Send the compressed extent unless the compressed data is
+ * larger than the decompressed data. This can happen if we're
+ * not sending the entire extent, either because it has been
+ * partially overwritten/truncated or because this is a part of
+ * the extent that we couldn't clone in clone_range().
+ */
+ if (is_inline &&
+ btrfs_file_extent_inline_item_len(leaf,
+ path->slots[0]) <= len) {
+ return send_encoded_inline_extent(sctx, path, offset,
+ len);
+ } else if (!is_inline &&
+ btrfs_file_extent_disk_num_bytes(leaf, ei) <= len) {
+ return send_encoded_extent(sctx, path, offset, len);
+ }
+ }
+
+ if (sctx->cur_inode == NULL) {
+ struct btrfs_inode *btrfs_inode;
+ struct btrfs_root *root = sctx->send_root;
+
+ btrfs_inode = btrfs_iget(sctx->cur_ino, root);
+ if (IS_ERR(btrfs_inode))
+ return PTR_ERR(btrfs_inode);
+
+ sctx->cur_inode = &btrfs_inode->vfs_inode;
+ memset(&sctx->ra, 0, sizeof(struct file_ra_state));
+ file_ra_state_init(&sctx->ra, sctx->cur_inode->i_mapping);
+
+ /*
+ * It's very likely there are no pages from this inode in the page
+ * cache, so after reading extents and sending their data, we clean
+ * the page cache to avoid trashing the page cache (adding pressure
+ * to the page cache and forcing eviction of other data more useful
+ * for applications).
+ *
+ * We decide if we should clean the page cache simply by checking
+ * if the inode's mapping nrpages is 0 when we first open it, and
+ * not by using something like filemap_range_has_page() before
+ * reading an extent because when we ask the readahead code to
+ * read a given file range, it may (and almost always does) read
+ * pages from beyond that range (see the documentation for
+ * page_cache_sync_readahead()), so it would not be reliable,
+ * because after reading the first extent future calls to
+ * filemap_range_has_page() would return true because the readahead
+ * on the previous extent resulted in reading pages of the current
+ * extent as well.
+ */
+ sctx->clean_page_cache = (sctx->cur_inode->i_mapping->nrpages == 0);
+ sctx->page_cache_clear_start = round_down(offset, PAGE_SIZE);
+ }
+
while (sent < len) {
u64 size = min(len - sent, read_size);
int ret;
@@ -5220,6 +5706,37 @@ static int send_extent_data(struct send_ctx *sctx,
return ret;
sent += size;
}
+
+ if (sctx->clean_page_cache && PAGE_ALIGNED(end)) {
+ /*
+ * Always operate only on ranges that are a multiple of the page
+ * size. This is not only to prevent zeroing parts of a page in
+ * the case of subpage sector size, but also to guarantee we evict
+ * pages, as passing a range that is smaller than page size does
+ * not evict the respective page (only zeroes part of its content).
+ *
+ * Always start from the end offset of the last range cleared.
+ * This is because the readahead code may (and very often does)
+ * reads pages beyond the range we request for readahead. So if
+ * we have an extent layout like this:
+ *
+ * [ extent A ] [ extent B ] [ extent C ]
+ *
+ * When we ask page_cache_sync_readahead() to read extent A, it
+ * may also trigger reads for pages of extent B. If we are doing
+ * an incremental send and extent B has not changed between the
+ * parent and send snapshots, some or all of its pages may end
+ * up being read and placed in the page cache. So when truncating
+ * the page cache we always start from the end offset of the
+ * previously processed extent up to the end of the current
+ * extent.
+ */
+ truncate_inode_pages_range(&sctx->cur_inode->i_data,
+ sctx->page_cache_clear_start,
+ end - 1);
+ sctx->page_cache_clear_start = end;
+ }
+
return 0;
}
@@ -5232,12 +5749,11 @@ static int send_extent_data(struct send_ctx *sctx,
*/
static int send_capabilities(struct send_ctx *sctx)
{
- struct fs_path *fspath = NULL;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dir_item *di;
struct extent_buffer *leaf;
unsigned long data_ptr;
- char *buf = NULL;
+ char AUTO_KFREE(buf);
int buf_len;
int ret = 0;
@@ -5249,48 +5765,34 @@ static int send_capabilities(struct send_ctx *sctx)
XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
if (!di) {
/* There is no xattr for this inode */
- goto out;
+ return 0;
} else if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
+ return PTR_ERR(di);
}
leaf = path->nodes[0];
buf_len = btrfs_dir_data_len(leaf, di);
- fspath = fs_path_alloc();
buf = kmalloc(buf_len, GFP_KERNEL);
- if (!fspath || !buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
- if (ret < 0)
- goto out;
+ if (!buf)
+ return -ENOMEM;
data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
read_extent_buffer(leaf, buf, data_ptr, buf_len);
- ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
+ ret = send_set_xattr(sctx, XATTR_NAME_CAPS,
strlen(XATTR_NAME_CAPS), buf, buf_len);
-out:
- kfree(buf);
- fs_path_free(fspath);
- btrfs_free_path(path);
return ret;
}
-static int clone_range(struct send_ctx *sctx,
- struct clone_root *clone_root,
- const u64 disk_byte,
- u64 data_offset,
- u64 offset,
- u64 len)
+static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
+ struct clone_root *clone_root, const u64 disk_byte,
+ u64 data_offset, u64 offset, u64 len)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
int ret;
+ struct btrfs_inode_info info;
u64 clone_src_i_size = 0;
/*
@@ -5310,7 +5812,7 @@ static int clone_range(struct send_ctx *sctx,
*/
if (clone_root->offset == 0 &&
len == sctx->send_root->fs_info->sectorsize)
- return send_extent_data(sctx, offset, len);
+ return send_extent_data(sctx, dst_path, offset, len);
path = alloc_path_for_send();
if (!path)
@@ -5320,11 +5822,11 @@ static int clone_range(struct send_ctx *sctx,
* There are inodes that have extents that lie behind its i_size. Don't
* accept clones from these extents.
*/
- ret = __get_inode_info(clone_root->root, path, clone_root->ino,
- &clone_src_i_size, NULL, NULL, NULL, NULL, NULL);
+ ret = get_inode_info(clone_root->root, clone_root->ino, &info);
btrfs_release_path(path);
if (ret < 0)
- goto out;
+ return ret;
+ clone_src_i_size = info.size;
/*
* We can't send a clone operation for the entire range if we find
@@ -5353,7 +5855,7 @@ static int clone_range(struct send_ctx *sctx,
key.offset = clone_root->offset;
ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret > 0 && path->slots[0] > 0) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
if (key.objectid == clone_root->ino &&
@@ -5369,11 +5871,12 @@ static int clone_range(struct send_ctx *sctx,
u64 ext_len;
u64 clone_len;
u64 clone_data_offset;
+ bool crossed_src_i_size = false;
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(clone_root->root, path);
if (ret < 0)
- goto out;
+ return ret;
else if (ret > 0)
break;
continue;
@@ -5407,9 +5910,10 @@ static int clone_range(struct send_ctx *sctx,
if (hole_len > len)
hole_len = len;
- ret = send_extent_data(sctx, offset, hole_len);
+ ret = send_extent_data(sctx, dst_path, offset,
+ hole_len);
if (ret < 0)
- goto out;
+ return ret;
len -= hole_len;
if (len == 0)
@@ -5425,8 +5929,10 @@ static int clone_range(struct send_ctx *sctx,
if (key.offset >= clone_src_i_size)
break;
- if (key.offset + ext_len > clone_src_i_size)
+ if (key.offset + ext_len > clone_src_i_size) {
ext_len = clone_src_i_size - key.offset;
+ crossed_src_i_size = true;
+ }
clone_data_offset = btrfs_file_extent_offset(leaf, ei);
if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
@@ -5478,20 +5984,41 @@ static int clone_range(struct send_ctx *sctx,
ret = send_clone(sctx, offset, slen,
clone_root);
if (ret < 0)
- goto out;
+ return ret;
}
- ret = send_extent_data(sctx, offset + slen,
+ ret = send_extent_data(sctx, dst_path,
+ offset + slen,
clone_len - slen);
} else {
ret = send_clone(sctx, offset, clone_len,
clone_root);
}
+ } else if (crossed_src_i_size && clone_len < len) {
+ /*
+ * If we are at i_size of the clone source inode and we
+ * can not clone from it, terminate the loop. This is
+ * to avoid sending two write operations, one with a
+ * length matching clone_len and the final one after
+ * this loop with a length of len - clone_len.
+ *
+ * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED
+ * was passed to the send ioctl), this helps avoid
+ * sending an encoded write for an offset that is not
+ * sector size aligned, in case the i_size of the source
+ * inode is not sector size aligned. That will make the
+ * receiver fallback to decompression of the data and
+ * writing it using regular buffered IO, therefore while
+ * not incorrect, it's not optimal due decompression and
+ * possible re-compression at the receiver.
+ */
+ break;
} else {
- ret = send_extent_data(sctx, offset, clone_len);
+ ret = send_extent_data(sctx, dst_path, offset,
+ clone_len);
}
if (ret < 0)
- goto out;
+ return ret;
len -= clone_len;
if (len == 0)
@@ -5519,11 +6046,9 @@ next:
}
if (len > 0)
- ret = send_extent_data(sctx, offset, len);
+ ret = send_extent_data(sctx, dst_path, offset, len);
else
ret = 0;
-out:
- btrfs_free_path(path);
return ret;
}
@@ -5535,26 +6060,73 @@ static int send_write_or_clone(struct send_ctx *sctx,
int ret = 0;
u64 offset = key->offset;
u64 end;
- u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
+ u64 bs = sctx->send_root->fs_info->sectorsize;
+ struct btrfs_file_extent_item *ei;
+ u64 disk_byte;
+ u64 data_offset;
+ u64 num_bytes;
+ struct btrfs_inode_info info = { 0 };
end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
if (offset >= end)
return 0;
- if (clone_root && IS_ALIGNED(end, bs)) {
- struct btrfs_file_extent_item *ei;
- u64 disk_byte;
- u64 data_offset;
+ num_bytes = end - offset;
- ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_file_extent_item);
- disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
- data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
- ret = clone_range(sctx, clone_root, disk_byte, data_offset,
- offset, end - offset);
- } else {
- ret = send_extent_data(sctx, offset, end - offset);
+ if (!clone_root)
+ goto write_data;
+
+ if (IS_ALIGNED(end, bs))
+ goto clone_data;
+
+ /*
+ * If the extent end is not aligned, we can clone if the extent ends at
+ * the i_size of the inode and the clone range ends at the i_size of the
+ * source inode, otherwise the clone operation fails with -EINVAL.
+ */
+ if (end != sctx->cur_inode_size)
+ goto write_data;
+
+ ret = get_inode_info(clone_root->root, clone_root->ino, &info);
+ if (ret < 0)
+ return ret;
+
+ if (clone_root->offset + num_bytes == info.size) {
+ /*
+ * The final size of our file matches the end offset, but it may
+ * be that its current size is larger, so we have to truncate it
+ * to any value between the start offset of the range and the
+ * final i_size, otherwise the clone operation is invalid
+ * because it's unaligned and it ends before the current EOF.
+ * We do this truncate to the final i_size when we finish
+ * processing the inode, but it's too late by then. And here we
+ * truncate to the start offset of the range because it's always
+ * sector size aligned while if it were the final i_size it
+ * would result in dirtying part of a page, filling part of a
+ * page with zeroes and then having the clone operation at the
+ * receiver trigger IO and wait for it due to the dirty page.
+ */
+ if (sctx->parent_root != NULL) {
+ ret = send_truncate(sctx, sctx->cur_ino,
+ sctx->cur_inode_gen, offset);
+ if (ret < 0)
+ return ret;
+ }
+ goto clone_data;
}
+
+write_data:
+ ret = send_extent_data(sctx, path, offset, num_bytes);
+ sctx->cur_inode_next_write_offset = end;
+ return ret;
+
+clone_data:
+ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+ disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
+ data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
+ ret = clone_range(sctx, path, clone_root, disk_byte, data_offset, offset,
+ num_bytes);
sctx->cur_inode_next_write_offset = end;
return ret;
}
@@ -5565,7 +6137,7 @@ static int is_extent_unchanged(struct send_ctx *sctx,
{
int ret = 0;
struct btrfs_key key;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *eb;
int slot;
struct btrfs_key found_key;
@@ -5591,10 +6163,9 @@ static int is_extent_unchanged(struct send_ctx *sctx,
ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
left_type = btrfs_file_extent_type(eb, ei);
- if (left_type != BTRFS_FILE_EXTENT_REG) {
- ret = 0;
- goto out;
- }
+ if (left_type != BTRFS_FILE_EXTENT_REG)
+ return 0;
+
left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
left_len = btrfs_file_extent_num_bytes(eb, ei);
left_offset = btrfs_file_extent_offset(eb, ei);
@@ -5626,11 +6197,9 @@ static int is_extent_unchanged(struct send_ctx *sctx,
key.offset = ekey->offset;
ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
if (ret < 0)
- goto out;
- if (ret) {
- ret = 0;
- goto out;
- }
+ return ret;
+ if (ret)
+ return 0;
/*
* Handle special case where the right side has no extents at all.
@@ -5639,11 +6208,9 @@ static int is_extent_unchanged(struct send_ctx *sctx,
slot = path->slots[0];
btrfs_item_key_to_cpu(eb, &found_key, slot);
if (found_key.objectid != key.objectid ||
- found_key.type != key.type) {
+ found_key.type != key.type)
/* If we're a hole then just pretend nothing changed */
- ret = (left_disknr) ? 0 : 1;
- goto out;
- }
+ return (left_disknr ? 0 : 1);
/*
* We're now on 2a, 2b or 7.
@@ -5653,10 +6220,8 @@ static int is_extent_unchanged(struct send_ctx *sctx,
ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
right_type = btrfs_file_extent_type(eb, ei);
if (right_type != BTRFS_FILE_EXTENT_REG &&
- right_type != BTRFS_FILE_EXTENT_INLINE) {
- ret = 0;
- goto out;
- }
+ right_type != BTRFS_FILE_EXTENT_INLINE)
+ return 0;
if (right_type == BTRFS_FILE_EXTENT_INLINE) {
right_len = btrfs_file_extent_ram_bytes(eb, ei);
@@ -5669,11 +6234,9 @@ static int is_extent_unchanged(struct send_ctx *sctx,
* Are we at extent 8? If yes, we know the extent is changed.
* This may only happen on the first iteration.
*/
- if (found_key.offset + right_len <= ekey->offset) {
+ if (found_key.offset + right_len <= ekey->offset)
/* If we're a hole just pretend nothing changed */
- ret = (left_disknr) ? 0 : 1;
- goto out;
- }
+ return (left_disknr ? 0 : 1);
/*
* We just wanted to see if when we have an inline extent, what
@@ -5683,10 +6246,8 @@ static int is_extent_unchanged(struct send_ctx *sctx,
* compressed extent representing data with a size matching
* the page size (currently the same as sector size).
*/
- if (right_type == BTRFS_FILE_EXTENT_INLINE) {
- ret = 0;
- goto out;
- }
+ if (right_type == BTRFS_FILE_EXTENT_INLINE)
+ return 0;
right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
right_offset = btrfs_file_extent_offset(eb, ei);
@@ -5706,17 +6267,15 @@ static int is_extent_unchanged(struct send_ctx *sctx,
*/
if (left_disknr != right_disknr ||
left_offset_fixed != right_offset ||
- left_gen != right_gen) {
- ret = 0;
- goto out;
- }
+ left_gen != right_gen)
+ return 0;
/*
* Go to the next extent.
*/
ret = btrfs_next_item(sctx->parent_root, path);
if (ret < 0)
- goto out;
+ return ret;
if (!ret) {
eb = path->nodes[0];
slot = path->slots[0];
@@ -5727,10 +6286,9 @@ static int is_extent_unchanged(struct send_ctx *sctx,
key.offset += right_len;
break;
}
- if (found_key.offset != key.offset + right_len) {
- ret = 0;
- goto out;
- }
+ if (found_key.offset != key.offset + right_len)
+ return 0;
+
key = found_key;
}
@@ -5743,15 +6301,12 @@ static int is_extent_unchanged(struct send_ctx *sctx,
else
ret = 0;
-
-out:
- btrfs_free_path(path);
return ret;
}
static int get_last_extent(struct send_ctx *sctx, u64 offset)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = sctx->send_root;
struct btrfs_key key;
int ret;
@@ -5767,15 +6322,13 @@ static int get_last_extent(struct send_ctx *sctx, u64 offset)
key.offset = offset;
ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
if (ret < 0)
- goto out;
+ return ret;
ret = 0;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
- goto out;
+ return ret;
sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -5783,7 +6336,7 @@ static int range_is_hole_in_parent(struct send_ctx *sctx,
const u64 start,
const u64 end)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_root *root = sctx->parent_root;
u64 search_start = start;
@@ -5798,7 +6351,7 @@ static int range_is_hole_in_parent(struct send_ctx *sctx,
key.offset = search_start;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret > 0 && path->slots[0] > 0)
path->slots[0]--;
@@ -5811,8 +6364,8 @@ static int range_is_hole_in_parent(struct send_ctx *sctx,
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
- goto out;
- else if (ret > 0)
+ return ret;
+ if (ret > 0)
break;
continue;
}
@@ -5834,15 +6387,11 @@ static int range_is_hole_in_parent(struct send_ctx *sctx,
search_start = extent_end;
goto next;
}
- ret = 0;
- goto out;
+ return 0;
next:
path->slots[0]++;
}
- ret = 1;
-out:
- btrfs_free_path(path);
- return ret;
+ return 1;
}
static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
@@ -5853,21 +6402,18 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
return 0;
- if (sctx->cur_inode_last_extent == (u64)-1) {
- ret = get_last_extent(sctx, key->offset - 1);
- if (ret)
- return ret;
- }
-
- if (path->slots[0] == 0 &&
- sctx->cur_inode_last_extent < key->offset) {
- /*
- * We might have skipped entire leafs that contained only
- * file extent items for our current inode. These leafs have
- * a generation number smaller (older) than the one in the
- * current leaf and the leaf our last extent came from, and
- * are located between these 2 leafs.
- */
+ /*
+ * Get last extent's end offset (exclusive) if we haven't determined it
+ * yet (we're processing the first file extent item that is new), or if
+ * we're at the first slot of a leaf and the last extent's end is less
+ * than the current extent's offset, because we might have skipped
+ * entire leaves that contained only file extent items for our current
+ * inode. These leaves have a generation number smaller (older) than the
+ * one in the current leaf and the leaf our last extent came from, and
+ * are located between these 2 leaves.
+ */
+ if ((sctx->cur_inode_last_extent == (u64)-1) ||
+ (path->slots[0] == 0 && sctx->cur_inode_last_extent < key->offset)) {
ret = get_last_extent(sctx, key->offset - 1);
if (ret)
return ret;
@@ -5950,13 +6496,12 @@ out:
static int process_all_extents(struct send_ctx *sctx)
{
- int ret;
+ int ret = 0;
+ int iter_ret = 0;
struct btrfs_root *root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
- struct extent_buffer *eb;
- int slot;
root = sctx->send_root;
path = alloc_path_for_send();
@@ -5966,46 +6511,25 @@ static int process_all_extents(struct send_ctx *sctx)
key.objectid = sctx->cmp_key->objectid;
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = 0;
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
-
- while (1) {
- eb = path->nodes[0];
- slot = path->slots[0];
-
- if (slot >= btrfs_header_nritems(eb)) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0) {
- goto out;
- } else if (ret > 0) {
- ret = 0;
- break;
- }
- continue;
- }
-
- btrfs_item_key_to_cpu(eb, &found_key, slot);
-
+ btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
if (found_key.objectid != key.objectid ||
found_key.type != key.type) {
ret = 0;
- goto out;
+ break;
}
ret = process_extent(sctx, path, &found_key);
if (ret < 0)
- goto out;
-
- path->slots[0]++;
+ break;
}
+ /* Catch error found during iteration */
+ if (iter_ret < 0)
+ ret = iter_ret;
-out:
- btrfs_free_path(path);
return ret;
}
-static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
+static int process_recorded_refs_if_needed(struct send_ctx *sctx, bool at_end,
int *pending_move,
int *refs_processed)
{
@@ -6028,17 +6552,21 @@ out:
return ret;
}
-static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
+static int finish_inode_if_needed(struct send_ctx *sctx, bool at_end)
{
int ret = 0;
+ struct btrfs_inode_info info;
u64 left_mode;
u64 left_uid;
u64 left_gid;
+ u64 left_fileattr;
u64 right_mode;
u64 right_uid;
u64 right_gid;
+ u64 right_fileattr;
int need_chmod = 0;
int need_chown = 0;
+ bool need_fileattr = false;
int need_truncate = 1;
int pending_move = 0;
int refs_processed = 0;
@@ -6070,11 +6598,13 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
goto out;
if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
goto out;
-
- ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
- &left_mode, &left_uid, &left_gid, NULL);
+ ret = get_inode_info(sctx->send_root, sctx->cur_ino, &info);
if (ret < 0)
goto out;
+ left_mode = info.mode;
+ left_uid = info.uid;
+ left_gid = info.gid;
+ left_fileattr = info.fileattr;
if (!sctx->parent_root || sctx->cur_inode_new) {
need_chown = 1;
@@ -6085,16 +6615,21 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
} else {
u64 old_size;
- ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
- &old_size, NULL, &right_mode, &right_uid,
- &right_gid, NULL);
+ ret = get_inode_info(sctx->parent_root, sctx->cur_ino, &info);
if (ret < 0)
goto out;
+ old_size = info.size;
+ right_mode = info.mode;
+ right_uid = info.uid;
+ right_gid = info.gid;
+ right_fileattr = info.fileattr;
if (left_uid != right_uid || left_gid != right_gid)
need_chown = 1;
if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
need_chmod = 1;
+ if (!S_ISLNK(sctx->cur_inode_mode) && left_fileattr != right_fileattr)
+ need_fileattr = true;
if ((old_size == sctx->cur_inode_size) ||
(sctx->cur_inode_size > old_size &&
sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
@@ -6110,11 +6645,20 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
if (ret)
goto out;
}
- if (sctx->cur_inode_last_extent <
- sctx->cur_inode_size) {
- ret = send_hole(sctx, sctx->cur_inode_size);
- if (ret)
+ if (sctx->cur_inode_last_extent < sctx->cur_inode_size) {
+ ret = range_is_hole_in_parent(sctx,
+ sctx->cur_inode_last_extent,
+ sctx->cur_inode_size);
+ if (ret < 0) {
goto out;
+ } else if (ret == 0) {
+ ret = send_hole(sctx, sctx->cur_inode_size);
+ if (ret < 0)
+ goto out;
+ } else {
+ /* Range is already a hole, skip. */
+ ret = 0;
+ }
}
}
if (need_truncate) {
@@ -6138,6 +6682,19 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
if (ret < 0)
goto out;
}
+ if (need_fileattr) {
+ ret = send_fileattr(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+ left_fileattr);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (proto_cmd_ok(sctx, BTRFS_SEND_C_ENABLE_VERITY)
+ && sctx->cur_inode_needs_verity) {
+ ret = process_verity(sctx);
+ if (ret < 0)
+ goto out;
+ }
ret = send_capabilities(sctx);
if (ret < 0)
@@ -6159,100 +6716,51 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
* it's moved/renamed, therefore we don't need to do it here.
*/
sctx->send_progress = sctx->cur_ino + 1;
- ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
+
+ /*
+ * If the current inode is a non-empty directory, delay issuing
+ * the utimes command for it, as it's very likely we have inodes
+ * with an higher number inside it. We want to issue the utimes
+ * command only after adding all dentries to it.
+ */
+ if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_size > 0)
+ ret = cache_dir_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
+ else
+ ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
+
if (ret < 0)
goto out;
}
out:
- return ret;
-}
-
-struct parent_paths_ctx {
- struct list_head *refs;
- struct send_ctx *sctx;
-};
-
-static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
- void *ctx)
-{
- struct parent_paths_ctx *ppctx = ctx;
+ if (!ret)
+ ret = trim_dir_utimes_cache(sctx);
- return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
- ppctx->refs);
+ return ret;
}
-/*
- * Issue unlink operations for all paths of the current inode found in the
- * parent snapshot.
- */
-static int btrfs_unlink_all_paths(struct send_ctx *sctx)
+static void close_current_inode(struct send_ctx *sctx)
{
- LIST_HEAD(deleted_refs);
- struct btrfs_path *path;
- struct btrfs_key key;
- struct parent_paths_ctx ctx;
- int ret;
-
- path = alloc_path_for_send();
- if (!path)
- return -ENOMEM;
-
- key.objectid = sctx->cur_ino;
- key.type = BTRFS_INODE_REF_KEY;
- key.offset = 0;
- ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
-
- ctx.refs = &deleted_refs;
- ctx.sctx = sctx;
-
- while (true) {
- struct extent_buffer *eb = path->nodes[0];
- int slot = path->slots[0];
-
- if (slot >= btrfs_header_nritems(eb)) {
- ret = btrfs_next_leaf(sctx->parent_root, path);
- if (ret < 0)
- goto out;
- else if (ret > 0)
- break;
- continue;
- }
-
- btrfs_item_key_to_cpu(eb, &key, slot);
- if (key.objectid != sctx->cur_ino)
- break;
- if (key.type != BTRFS_INODE_REF_KEY &&
- key.type != BTRFS_INODE_EXTREF_KEY)
- break;
+ u64 i_size;
- ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
- record_parent_ref, &ctx);
- if (ret < 0)
- goto out;
+ if (sctx->cur_inode == NULL)
+ return;
- path->slots[0]++;
- }
+ i_size = i_size_read(sctx->cur_inode);
- while (!list_empty(&deleted_refs)) {
- struct recorded_ref *ref;
+ /*
+ * If we are doing an incremental send, we may have extents between the
+ * last processed extent and the i_size that have not been processed
+ * because they haven't changed but we may have read some of their pages
+ * through readahead, see the comments at send_extent_data().
+ */
+ if (sctx->clean_page_cache && sctx->page_cache_clear_start < i_size)
+ truncate_inode_pages_range(&sctx->cur_inode->i_data,
+ sctx->page_cache_clear_start,
+ round_up(i_size, PAGE_SIZE) - 1);
- ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
- ret = send_unlink(sctx, ref->full_path);
- if (ret < 0)
- goto out;
- fs_path_free(ref->full_path);
- list_del(&ref->list);
- kfree(ref);
- }
- ret = 0;
-out:
- btrfs_free_path(path);
- if (ret)
- __free_recorded_refs(&deleted_refs);
- return ret;
+ iput(sctx->cur_inode);
+ sctx->cur_inode = NULL;
}
static int changed_inode(struct send_ctx *sctx,
@@ -6265,11 +6773,14 @@ static int changed_inode(struct send_ctx *sctx,
u64 left_gen = 0;
u64 right_gen = 0;
+ close_current_inode(sctx);
+
sctx->cur_ino = key->objectid;
- sctx->cur_inode_new_gen = 0;
+ sctx->cur_inode_new_gen = false;
sctx->cur_inode_last_extent = (u64)-1;
sctx->cur_inode_next_write_offset = 0;
sctx->ignore_cur_inode = false;
+ fs_path_reset(&sctx->cur_inode_path);
/*
* Set send_progress to current inode. This will tell all get_cur_xxx
@@ -6307,7 +6818,7 @@ static int changed_inode(struct send_ctx *sctx,
*/
if (left_gen != right_gen &&
sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
- sctx->cur_inode_new_gen = 1;
+ sctx->cur_inode_new_gen = true;
}
/*
@@ -6319,28 +6830,39 @@ static int changed_inode(struct send_ctx *sctx,
* file descriptor against it or turning a RO snapshot into RW mode,
* keep an open file descriptor against a file, delete it and then
* turn the snapshot back to RO mode before using it for a send
- * operation. So if we find such cases, ignore the inode and all its
- * items completely if it's a new inode, or if it's a changed inode
- * make sure all its previous paths (from the parent snapshot) are all
- * unlinked and all other the inode items are ignored.
+ * operation. The former is what the receiver operation does.
+ * Therefore, if we want to send these snapshots soon after they're
+ * received, we need to handle orphan inodes as well. Moreover, orphans
+ * can appear not only in the send snapshot but also in the parent
+ * snapshot. Here are several cases:
+ *
+ * Case 1: BTRFS_COMPARE_TREE_NEW
+ * | send snapshot | action
+ * --------------------------------
+ * nlink | 0 | ignore
+ *
+ * Case 2: BTRFS_COMPARE_TREE_DELETED
+ * | parent snapshot | action
+ * ----------------------------------
+ * nlink | 0 | as usual
+ * Note: No unlinks will be sent because there're no paths for it.
+ *
+ * Case 3: BTRFS_COMPARE_TREE_CHANGED
+ * | | parent snapshot | send snapshot | action
+ * -----------------------------------------------------------------------
+ * subcase 1 | nlink | 0 | 0 | ignore
+ * subcase 2 | nlink | >0 | 0 | new_gen(deletion)
+ * subcase 3 | nlink | 0 | >0 | new_gen(creation)
+ *
*/
- if (result == BTRFS_COMPARE_TREE_NEW ||
- result == BTRFS_COMPARE_TREE_CHANGED) {
- u32 nlinks;
-
- nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
- if (nlinks == 0) {
+ if (result == BTRFS_COMPARE_TREE_NEW) {
+ if (btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii) == 0) {
sctx->ignore_cur_inode = true;
- if (result == BTRFS_COMPARE_TREE_CHANGED)
- ret = btrfs_unlink_all_paths(sctx);
goto out;
}
- }
-
- if (result == BTRFS_COMPARE_TREE_NEW) {
sctx->cur_inode_gen = left_gen;
- sctx->cur_inode_new = 1;
- sctx->cur_inode_deleted = 0;
+ sctx->cur_inode_new = true;
+ sctx->cur_inode_deleted = false;
sctx->cur_inode_size = btrfs_inode_size(
sctx->left_path->nodes[0], left_ii);
sctx->cur_inode_mode = btrfs_inode_mode(
@@ -6351,13 +6873,23 @@ static int changed_inode(struct send_ctx *sctx,
ret = send_create_inode_if_needed(sctx);
} else if (result == BTRFS_COMPARE_TREE_DELETED) {
sctx->cur_inode_gen = right_gen;
- sctx->cur_inode_new = 0;
- sctx->cur_inode_deleted = 1;
+ sctx->cur_inode_new = false;
+ sctx->cur_inode_deleted = true;
sctx->cur_inode_size = btrfs_inode_size(
sctx->right_path->nodes[0], right_ii);
sctx->cur_inode_mode = btrfs_inode_mode(
sctx->right_path->nodes[0], right_ii);
} else if (result == BTRFS_COMPARE_TREE_CHANGED) {
+ u32 new_nlinks, old_nlinks;
+
+ new_nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
+ old_nlinks = btrfs_inode_nlink(sctx->right_path->nodes[0], right_ii);
+ if (new_nlinks == 0 && old_nlinks == 0) {
+ sctx->ignore_cur_inode = true;
+ goto out;
+ } else if (new_nlinks == 0 || old_nlinks == 0) {
+ sctx->cur_inode_new_gen = 1;
+ }
/*
* We need to do some special handling in case the inode was
* reported as changed with a changed generation number. This
@@ -6369,58 +6901,66 @@ static int changed_inode(struct send_ctx *sctx,
/*
* First, process the inode as if it was deleted.
*/
- sctx->cur_inode_gen = right_gen;
- sctx->cur_inode_new = 0;
- sctx->cur_inode_deleted = 1;
- sctx->cur_inode_size = btrfs_inode_size(
- sctx->right_path->nodes[0], right_ii);
- sctx->cur_inode_mode = btrfs_inode_mode(
- sctx->right_path->nodes[0], right_ii);
- ret = process_all_refs(sctx,
- BTRFS_COMPARE_TREE_DELETED);
- if (ret < 0)
- goto out;
+ if (old_nlinks > 0) {
+ sctx->cur_inode_gen = right_gen;
+ sctx->cur_inode_new = false;
+ sctx->cur_inode_deleted = true;
+ sctx->cur_inode_size = btrfs_inode_size(
+ sctx->right_path->nodes[0], right_ii);
+ sctx->cur_inode_mode = btrfs_inode_mode(
+ sctx->right_path->nodes[0], right_ii);
+ ret = process_all_refs(sctx,
+ BTRFS_COMPARE_TREE_DELETED);
+ if (ret < 0)
+ goto out;
+ }
/*
* Now process the inode as if it was new.
*/
- sctx->cur_inode_gen = left_gen;
- sctx->cur_inode_new = 1;
- sctx->cur_inode_deleted = 0;
- sctx->cur_inode_size = btrfs_inode_size(
- sctx->left_path->nodes[0], left_ii);
- sctx->cur_inode_mode = btrfs_inode_mode(
- sctx->left_path->nodes[0], left_ii);
- sctx->cur_inode_rdev = btrfs_inode_rdev(
- sctx->left_path->nodes[0], left_ii);
- ret = send_create_inode_if_needed(sctx);
- if (ret < 0)
- goto out;
+ if (new_nlinks > 0) {
+ sctx->cur_inode_gen = left_gen;
+ sctx->cur_inode_new = true;
+ sctx->cur_inode_deleted = false;
+ sctx->cur_inode_size = btrfs_inode_size(
+ sctx->left_path->nodes[0],
+ left_ii);
+ sctx->cur_inode_mode = btrfs_inode_mode(
+ sctx->left_path->nodes[0],
+ left_ii);
+ sctx->cur_inode_rdev = btrfs_inode_rdev(
+ sctx->left_path->nodes[0],
+ left_ii);
+ ret = send_create_inode_if_needed(sctx);
+ if (ret < 0)
+ goto out;
- ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
- if (ret < 0)
- goto out;
- /*
- * Advance send_progress now as we did not get into
- * process_recorded_refs_if_needed in the new_gen case.
- */
- sctx->send_progress = sctx->cur_ino + 1;
+ ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
+ if (ret < 0)
+ goto out;
+ /*
+ * Advance send_progress now as we did not get
+ * into process_recorded_refs_if_needed in the
+ * new_gen case.
+ */
+ sctx->send_progress = sctx->cur_ino + 1;
- /*
- * Now process all extents and xattrs of the inode as if
- * they were all new.
- */
- ret = process_all_extents(sctx);
- if (ret < 0)
- goto out;
- ret = process_all_new_xattrs(sctx);
- if (ret < 0)
- goto out;
+ /*
+ * Now process all extents and xattrs of the
+ * inode as if they were all new.
+ */
+ ret = process_all_extents(sctx);
+ if (ret < 0)
+ goto out;
+ ret = process_all_new_xattrs(sctx);
+ if (ret < 0)
+ goto out;
+ }
} else {
sctx->cur_inode_gen = left_gen;
- sctx->cur_inode_new = 0;
- sctx->cur_inode_new_gen = 0;
- sctx->cur_inode_deleted = 0;
+ sctx->cur_inode_new = false;
+ sctx->cur_inode_new_gen = false;
+ sctx->cur_inode_deleted = false;
sctx->cur_inode_size = btrfs_inode_size(
sctx->left_path->nodes[0], left_ii);
sctx->cur_inode_mode = btrfs_inode_mode(
@@ -6447,7 +6987,7 @@ static int changed_ref(struct send_ctx *sctx,
{
int ret = 0;
- if (sctx->cur_ino != sctx->cmp_key->objectid) {
+ if (unlikely(sctx->cur_ino != sctx->cmp_key->objectid)) {
inconsistent_snapshot_error(sctx, result, "reference");
return -EIO;
}
@@ -6475,7 +7015,7 @@ static int changed_xattr(struct send_ctx *sctx,
{
int ret = 0;
- if (sctx->cur_ino != sctx->cmp_key->objectid) {
+ if (unlikely(sctx->cur_ino != sctx->cmp_key->objectid)) {
inconsistent_snapshot_error(sctx, result, "xattr");
return -EIO;
}
@@ -6527,18 +7067,25 @@ static int changed_extent(struct send_ctx *sctx,
return ret;
}
+static int changed_verity(struct send_ctx *sctx, enum btrfs_compare_tree_result result)
+{
+ if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
+ if (result == BTRFS_COMPARE_TREE_NEW)
+ sctx->cur_inode_needs_verity = true;
+ }
+ return 0;
+}
+
static int dir_changed(struct send_ctx *sctx, u64 dir)
{
u64 orig_gen, new_gen;
int ret;
- ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
- NULL, NULL);
+ ret = get_inode_gen(sctx->send_root, dir, &new_gen);
if (ret)
return ret;
- ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
- NULL, NULL, NULL);
+ ret = get_inode_gen(sctx->parent_root, dir, &orig_gen);
if (ret)
return ret;
@@ -6566,7 +7113,7 @@ static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
}
leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ item_size = btrfs_item_size(leaf, path->slots[0]);
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
while (cur_offset < item_size) {
extref = (struct btrfs_inode_extref *)(ptr +
@@ -6595,7 +7142,51 @@ static int changed_cb(struct btrfs_path *left_path,
enum btrfs_compare_tree_result result,
struct send_ctx *sctx)
{
- int ret = 0;
+ int ret;
+
+ /*
+ * We can not hold the commit root semaphore here. This is because in
+ * the case of sending and receiving to the same filesystem, using a
+ * pipe, could result in a deadlock:
+ *
+ * 1) The task running send blocks on the pipe because it's full;
+ *
+ * 2) The task running receive, which is the only consumer of the pipe,
+ * is waiting for a transaction commit (for example due to a space
+ * reservation when doing a write or triggering a transaction commit
+ * when creating a subvolume);
+ *
+ * 3) The transaction is waiting to write lock the commit root semaphore,
+ * but can not acquire it since it's being held at 1).
+ *
+ * Down this call chain we write to the pipe through kernel_write().
+ * The same type of problem can also happen when sending to a file that
+ * is stored in the same filesystem - when reserving space for a write
+ * into the file, we can trigger a transaction commit.
+ *
+ * Our caller has supplied us with clones of leaves from the send and
+ * parent roots, so we're safe here from a concurrent relocation and
+ * further reallocation of metadata extents while we are here. Below we
+ * also assert that the leaves are clones.
+ */
+ lockdep_assert_not_held(&sctx->send_root->fs_info->commit_root_sem);
+
+ /*
+ * We always have a send root, so left_path is never NULL. We will not
+ * have a leaf when we have reached the end of the send root but have
+ * not yet reached the end of the parent root.
+ */
+ if (left_path->nodes[0])
+ ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED,
+ &left_path->nodes[0]->bflags));
+ /*
+ * When doing a full send we don't have a parent root, so right_path is
+ * NULL. When doing an incremental send, we may have reached the end of
+ * the parent root already, so we don't have a leaf at right_path.
+ */
+ if (right_path && right_path->nodes[0])
+ ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED,
+ &right_path->nodes[0]->bflags));
if (result == BTRFS_COMPARE_TREE_SAME) {
if (key->type == BTRFS_INODE_REF_KEY ||
@@ -6611,7 +7202,6 @@ static int changed_cb(struct btrfs_path *left_path,
return 0;
}
result = BTRFS_COMPARE_TREE_CHANGED;
- ret = 0;
}
sctx->left_path = left_path;
@@ -6637,20 +7227,55 @@ static int changed_cb(struct btrfs_path *left_path,
ret = changed_xattr(sctx, result);
else if (key->type == BTRFS_EXTENT_DATA_KEY)
ret = changed_extent(sctx, result);
+ else if (key->type == BTRFS_VERITY_DESC_ITEM_KEY &&
+ key->offset == 0)
+ ret = changed_verity(sctx, result);
}
out:
return ret;
}
+static int search_key_again(const struct send_ctx *sctx,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const struct btrfs_key *key)
+{
+ int ret;
+
+ if (!path->need_commit_sem)
+ lockdep_assert_held_read(&root->fs_info->commit_root_sem);
+
+ /*
+ * Roots used for send operations are readonly and no one can add,
+ * update or remove keys from them, so we should be able to find our
+ * key again. The only exception is deduplication, which can operate on
+ * readonly roots and add, update or remove keys to/from them - but at
+ * the moment we don't allow it to run in parallel with send.
+ */
+ ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+ ASSERT(ret <= 0);
+ if (unlikely(ret > 0)) {
+ btrfs_print_tree(path->nodes[path->lowest_level], false);
+ btrfs_err(root->fs_info,
+"send: key " BTRFS_KEY_FMT" not found in %s root %llu, lowest_level %d, slot %d",
+ BTRFS_KEY_FMT_VALUE(key),
+ (root == sctx->parent_root ? "parent" : "send"),
+ btrfs_root_id(root), path->lowest_level,
+ path->slots[path->lowest_level]);
+ return -EUCLEAN;
+ }
+
+ return ret;
+}
+
static int full_send_tree(struct send_ctx *sctx)
{
int ret;
struct btrfs_root *send_root = sctx->send_root;
struct btrfs_key key;
- struct btrfs_path *path;
- struct extent_buffer *eb;
- int slot;
+ struct btrfs_fs_info *fs_info = send_root->fs_info;
+ BTRFS_PATH_AUTO_FREE(path);
path = alloc_path_for_send();
if (!path)
@@ -6661,25 +7286,49 @@ static int full_send_tree(struct send_ctx *sctx)
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
+ down_read(&fs_info->commit_root_sem);
+ sctx->last_reloc_trans = fs_info->last_reloc_trans;
+ up_read(&fs_info->commit_root_sem);
+
ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret)
goto out_finish;
while (1) {
- eb = path->nodes[0];
- slot = path->slots[0];
- btrfs_item_key_to_cpu(eb, &key, slot);
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
ret = changed_cb(path, NULL, &key,
BTRFS_COMPARE_TREE_NEW, sctx);
if (ret < 0)
- goto out;
+ return ret;
+
+ down_read(&fs_info->commit_root_sem);
+ if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
+ sctx->last_reloc_trans = fs_info->last_reloc_trans;
+ up_read(&fs_info->commit_root_sem);
+ /*
+ * A transaction used for relocating a block group was
+ * committed or is about to finish its commit. Release
+ * our path (leaf) and restart the search, so that we
+ * avoid operating on any file extent items that are
+ * stale, with a disk_bytenr that reflects a pre
+ * relocation value. This way we avoid as much as
+ * possible to fallback to regular writes when checking
+ * if we can clone file ranges.
+ */
+ btrfs_release_path(path);
+ ret = search_key_again(sctx, send_root, path, &key);
+ if (ret < 0)
+ return ret;
+ } else {
+ up_read(&fs_info->commit_root_sem);
+ }
ret = btrfs_next_item(send_root, path);
if (ret < 0)
- goto out;
+ return ret;
if (ret) {
ret = 0;
break;
@@ -6687,11 +7336,21 @@ static int full_send_tree(struct send_ctx *sctx)
}
out_finish:
- ret = finish_inode_if_needed(sctx, 1);
+ return finish_inode_if_needed(sctx, 1);
+}
-out:
- btrfs_free_path(path);
- return ret;
+static int replace_node_with_clone(struct btrfs_path *path, int level)
+{
+ struct extent_buffer *clone;
+
+ clone = btrfs_clone_extent_buffer(path->nodes[level]);
+ if (!clone)
+ return -ENOMEM;
+
+ free_extent_buffer(path->nodes[level]);
+ path->nodes[level] = clone;
+
+ return 0;
}
static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen)
@@ -6703,7 +7362,9 @@ static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen
u64 reada_max;
u64 reada_done = 0;
- BUG_ON(*level == 0);
+ lockdep_assert_held_read(&parent->fs_info->commit_root_sem);
+ ASSERT(*level != 0);
+
eb = btrfs_read_node_slot(parent, slot);
if (IS_ERR(eb))
return PTR_ERR(eb);
@@ -6726,6 +7387,10 @@ static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen
path->nodes[*level - 1] = eb;
path->slots[*level - 1] = 0;
(*level)--;
+
+ if (*level == 0)
+ return replace_node_with_clone(path, 0);
+
return 0;
}
@@ -6739,8 +7404,10 @@ static int tree_move_next_or_upnext(struct btrfs_path *path,
path->slots[*level]++;
while (path->slots[*level] >= nritems) {
- if (*level == root_level)
+ if (*level == root_level) {
+ path->slots[*level] = nritems - 1;
return -1;
+ }
/* move upnext */
path->slots[*level] = 0;
@@ -6772,14 +7439,20 @@ static int tree_advance(struct btrfs_path *path,
} else {
ret = tree_move_down(path, level, reada_min_gen);
}
- if (ret >= 0) {
- if (*level == 0)
- btrfs_item_key_to_cpu(path->nodes[*level], key,
- path->slots[*level]);
- else
- btrfs_node_key_to_cpu(path->nodes[*level], key,
- path->slots[*level]);
- }
+
+ /*
+ * Even if we have reached the end of a tree, ret is -1, update the key
+ * anyway, so that in case we need to restart due to a block group
+ * relocation, we can assert that the last key of the root node still
+ * exists in the tree.
+ */
+ if (*level == 0)
+ btrfs_item_key_to_cpu(path->nodes[*level], key,
+ path->slots[*level]);
+ else
+ btrfs_node_key_to_cpu(path->nodes[*level], key,
+ path->slots[*level]);
+
return ret;
}
@@ -6791,8 +7464,8 @@ static int tree_compare_item(struct btrfs_path *left_path,
int len1, len2;
unsigned long off1, off2;
- len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
- len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
+ len1 = btrfs_item_size(left_path->nodes[0], left_path->slots[0]);
+ len2 = btrfs_item_size(right_path->nodes[0], right_path->slots[0]);
if (len1 != len2)
return 1;
@@ -6809,6 +7482,97 @@ static int tree_compare_item(struct btrfs_path *left_path,
}
/*
+ * A transaction used for relocating a block group was committed or is about to
+ * finish its commit. Release our paths and restart the search, so that we are
+ * not using stale extent buffers:
+ *
+ * 1) For levels > 0, we are only holding references of extent buffers, without
+ * any locks on them, which does not prevent them from having been relocated
+ * and reallocated after the last time we released the commit root semaphore.
+ * The exception are the root nodes, for which we always have a clone, see
+ * the comment at btrfs_compare_trees();
+ *
+ * 2) For leaves, level 0, we are holding copies (clones) of extent buffers, so
+ * we are safe from the concurrent relocation and reallocation. However they
+ * can have file extent items with a pre relocation disk_bytenr value, so we
+ * restart the start from the current commit roots and clone the new leaves so
+ * that we get the post relocation disk_bytenr values. Not doing so, could
+ * make us clone the wrong data in case there are new extents using the old
+ * disk_bytenr that happen to be shared.
+ */
+static int restart_after_relocation(struct btrfs_path *left_path,
+ struct btrfs_path *right_path,
+ const struct btrfs_key *left_key,
+ const struct btrfs_key *right_key,
+ int left_level,
+ int right_level,
+ const struct send_ctx *sctx)
+{
+ int root_level;
+ int ret;
+
+ lockdep_assert_held_read(&sctx->send_root->fs_info->commit_root_sem);
+
+ btrfs_release_path(left_path);
+ btrfs_release_path(right_path);
+
+ /*
+ * Since keys can not be added or removed to/from our roots because they
+ * are readonly and we do not allow deduplication to run in parallel
+ * (which can add, remove or change keys), the layout of the trees should
+ * not change.
+ */
+ left_path->lowest_level = left_level;
+ ret = search_key_again(sctx, sctx->send_root, left_path, left_key);
+ if (ret < 0)
+ return ret;
+
+ right_path->lowest_level = right_level;
+ ret = search_key_again(sctx, sctx->parent_root, right_path, right_key);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * If the lowest level nodes are leaves, clone them so that they can be
+ * safely used by changed_cb() while not under the protection of the
+ * commit root semaphore, even if relocation and reallocation happens in
+ * parallel.
+ */
+ if (left_level == 0) {
+ ret = replace_node_with_clone(left_path, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (right_level == 0) {
+ ret = replace_node_with_clone(right_path, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Now clone the root nodes (unless they happen to be the leaves we have
+ * already cloned). This is to protect against concurrent snapshotting of
+ * the send and parent roots (see the comment at btrfs_compare_trees()).
+ */
+ root_level = btrfs_header_level(sctx->send_root->commit_root);
+ if (root_level > 0) {
+ ret = replace_node_with_clone(left_path, root_level);
+ if (ret < 0)
+ return ret;
+ }
+
+ root_level = btrfs_header_level(sctx->parent_root->commit_root);
+ if (root_level > 0) {
+ ret = replace_node_with_clone(right_path, root_level);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
* This function compares two trees and calls the provided callback for
* every changed/new/deleted item it finds.
* If shared tree blocks are encountered, whole subtrees are skipped, making
@@ -6827,8 +7591,8 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
struct btrfs_fs_info *fs_info = left_root->fs_info;
int ret;
int cmp;
- struct btrfs_path *left_path = NULL;
- struct btrfs_path *right_path = NULL;
+ BTRFS_PATH_AUTO_FREE(left_path);
+ BTRFS_PATH_AUTO_FREE(right_path);
struct btrfs_key left_key;
struct btrfs_key right_key;
char *tmp_buf = NULL;
@@ -6836,10 +7600,10 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
int right_root_level;
int left_level;
int right_level;
- int left_end_reached;
- int right_end_reached;
- int advance_left;
- int advance_right;
+ int left_end_reached = 0;
+ int right_end_reached = 0;
+ int advance_left = 0;
+ int advance_right = 0;
u64 left_blockptr;
u64 right_blockptr;
u64 left_gen;
@@ -6863,10 +7627,10 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
goto out;
}
- left_path->search_commit_root = 1;
- left_path->skip_locking = 1;
- right_path->search_commit_root = 1;
- right_path->skip_locking = 1;
+ left_path->search_commit_root = true;
+ left_path->skip_locking = true;
+ right_path->search_commit_root = true;
+ right_path->skip_locking = true;
/*
* Strategy: Go to the first items of both trees. Then do
@@ -6907,12 +7671,18 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
down_read(&fs_info->commit_root_sem);
left_level = btrfs_header_level(left_root->commit_root);
left_root_level = left_level;
+ /*
+ * We clone the root node of the send and parent roots to prevent races
+ * with snapshot creation of these roots. Snapshot creation COWs the
+ * root node of a tree, so after the transaction is committed the old
+ * extent can be reallocated while this send operation is still ongoing.
+ * So we clone them, under the commit root semaphore, to be race free.
+ */
left_path->nodes[left_level] =
btrfs_clone_extent_buffer(left_root->commit_root);
if (!left_path->nodes[left_level]) {
- up_read(&fs_info->commit_root_sem);
ret = -ENOMEM;
- goto out;
+ goto out_unlock;
}
right_level = btrfs_header_level(right_root->commit_root);
@@ -6920,9 +7690,8 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
right_path->nodes[right_level] =
btrfs_clone_extent_buffer(right_root->commit_root);
if (!right_path->nodes[right_level]) {
- up_read(&fs_info->commit_root_sem);
ret = -ENOMEM;
- goto out;
+ goto out_unlock;
}
/*
* Our right root is the parent root, while the left root is the "send"
@@ -6932,7 +7701,6 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
* will need to read them at some point.
*/
reada_min_gen = btrfs_header_generation(right_root->commit_root);
- up_read(&fs_info->commit_root_sem);
if (left_level == 0)
btrfs_item_key_to_cpu(left_path->nodes[left_level],
@@ -6947,11 +7715,26 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
btrfs_node_key_to_cpu(right_path->nodes[right_level],
&right_key, right_path->slots[right_level]);
- left_end_reached = right_end_reached = 0;
- advance_left = advance_right = 0;
+ sctx->last_reloc_trans = fs_info->last_reloc_trans;
while (1) {
- cond_resched();
+ if (need_resched() ||
+ rwsem_is_contended(&fs_info->commit_root_sem)) {
+ up_read(&fs_info->commit_root_sem);
+ cond_resched();
+ down_read(&fs_info->commit_root_sem);
+ }
+
+ if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
+ ret = restart_after_relocation(left_path, right_path,
+ &left_key, &right_key,
+ left_level, right_level,
+ sctx);
+ if (ret < 0)
+ goto out_unlock;
+ sctx->last_reloc_trans = fs_info->last_reloc_trans;
+ }
+
if (advance_left && !left_end_reached) {
ret = tree_advance(left_path, &left_level,
left_root_level,
@@ -6960,7 +7743,7 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
if (ret == -1)
left_end_reached = ADVANCE;
else if (ret < 0)
- goto out;
+ goto out_unlock;
advance_left = 0;
}
if (advance_right && !right_end_reached) {
@@ -6971,54 +7754,55 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
if (ret == -1)
right_end_reached = ADVANCE;
else if (ret < 0)
- goto out;
+ goto out_unlock;
advance_right = 0;
}
if (left_end_reached && right_end_reached) {
ret = 0;
- goto out;
+ goto out_unlock;
} else if (left_end_reached) {
if (right_level == 0) {
+ up_read(&fs_info->commit_root_sem);
ret = changed_cb(left_path, right_path,
&right_key,
BTRFS_COMPARE_TREE_DELETED,
sctx);
if (ret < 0)
goto out;
+ down_read(&fs_info->commit_root_sem);
}
advance_right = ADVANCE;
continue;
} else if (right_end_reached) {
if (left_level == 0) {
+ up_read(&fs_info->commit_root_sem);
ret = changed_cb(left_path, right_path,
&left_key,
BTRFS_COMPARE_TREE_NEW,
sctx);
if (ret < 0)
goto out;
+ down_read(&fs_info->commit_root_sem);
}
advance_left = ADVANCE;
continue;
}
if (left_level == 0 && right_level == 0) {
+ up_read(&fs_info->commit_root_sem);
cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
if (cmp < 0) {
ret = changed_cb(left_path, right_path,
&left_key,
BTRFS_COMPARE_TREE_NEW,
sctx);
- if (ret < 0)
- goto out;
advance_left = ADVANCE;
} else if (cmp > 0) {
ret = changed_cb(left_path, right_path,
&right_key,
BTRFS_COMPARE_TREE_DELETED,
sctx);
- if (ret < 0)
- goto out;
advance_right = ADVANCE;
} else {
enum btrfs_compare_tree_result result;
@@ -7032,11 +7816,13 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
result = BTRFS_COMPARE_TREE_SAME;
ret = changed_cb(left_path, right_path,
&left_key, result, sctx);
- if (ret < 0)
- goto out;
advance_left = ADVANCE;
advance_right = ADVANCE;
}
+
+ if (ret < 0)
+ goto out;
+ down_read(&fs_info->commit_root_sem);
} else if (left_level == right_level) {
cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
if (cmp < 0) {
@@ -7076,9 +7862,9 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
}
}
+out_unlock:
+ up_read(&fs_info->commit_root_sem);
out:
- btrfs_free_path(left_path);
- btrfs_free_path(right_path);
kvfree(tmp_buf);
return ret;
}
@@ -7130,38 +7916,22 @@ out:
*/
static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
{
- int i;
- struct btrfs_trans_handle *trans = NULL;
-
-again:
- if (sctx->parent_root &&
- sctx->parent_root->node != sctx->parent_root->commit_root)
- goto commit_trans;
-
- for (i = 0; i < sctx->clone_roots_cnt; i++)
- if (sctx->clone_roots[i].root->node !=
- sctx->clone_roots[i].root->commit_root)
- goto commit_trans;
-
- if (trans)
- return btrfs_end_transaction(trans);
+ struct btrfs_root *root = sctx->parent_root;
- return 0;
+ if (root && root->node != root->commit_root)
+ return btrfs_commit_current_transaction(root);
-commit_trans:
- /* Use any root, all fs roots will get their commit roots updated. */
- if (!trans) {
- trans = btrfs_join_transaction(sctx->send_root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- goto again;
+ for (int i = 0; i < sctx->clone_roots_cnt; i++) {
+ root = sctx->clone_roots[i].root;
+ if (root->node != root->commit_root)
+ return btrfs_commit_current_transaction(root);
}
- return btrfs_commit_transaction(trans);
+ return 0;
}
/*
- * Make sure any existing dellaloc is flushed for any root used by a send
+ * Make sure any existing delalloc is flushed for any root used by a send
* operation so that we do not miss any data and we do not race with writeback
* finishing and changing a tree while send is using the tree. This could
* happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
@@ -7178,7 +7948,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
ret = btrfs_start_delalloc_snapshot(root, false);
if (ret)
return ret;
- btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
+ btrfs_wait_ordered_extents(root, U64_MAX, NULL);
}
for (i = 0; i < sctx->clone_roots_cnt; i++) {
@@ -7186,7 +7956,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
ret = btrfs_start_delalloc_snapshot(root, false);
if (ret)
return ret;
- btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
+ btrfs_wait_ordered_extents(root, U64_MAX, NULL);
}
return 0;
@@ -7203,7 +7973,7 @@ static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
if (root->send_in_progress < 0)
btrfs_err(root->fs_info,
"send_in_progress unbalanced %d root %llu",
- root->send_in_progress, root->root_key.objectid);
+ root->send_in_progress, btrfs_root_id(root));
spin_unlock(&root->root_item_lock);
}
@@ -7211,13 +7981,12 @@ static void dedupe_in_progress_warn(const struct btrfs_root *root)
{
btrfs_warn_rl(root->fs_info,
"cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
- root->root_key.objectid, root->dedupe_in_progress);
+ btrfs_root_id(root), root->dedupe_in_progress);
}
-long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
+long btrfs_ioctl_send(struct btrfs_root *send_root, const struct btrfs_ioctl_send_args *arg)
{
int ret = 0;
- struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
struct btrfs_fs_info *fs_info = send_root->fs_info;
struct btrfs_root *clone_root;
struct send_ctx *sctx = NULL;
@@ -7226,6 +7995,8 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
int clone_sources_to_rollback = 0;
size_t alloc_size;
int sort_clone_roots = 0;
+ struct btrfs_lru_cache_entry *entry;
+ struct btrfs_lru_cache_entry *tmp;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -7235,7 +8006,20 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
* making it RW. This also protects against deletion.
*/
spin_lock(&send_root->root_item_lock);
- if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
+ /*
+ * Unlikely but possible, if the subvolume is marked for deletion but
+ * is slow to remove the directory entry, send can still be started.
+ */
+ if (btrfs_root_dead(send_root)) {
+ spin_unlock(&send_root->root_item_lock);
+ return -EPERM;
+ }
+ /* Userspace tools do the checks and warn the user if it's not RO. */
+ if (!btrfs_root_readonly(send_root)) {
+ spin_unlock(&send_root->root_item_lock);
+ return -EPERM;
+ }
+ if (send_root->dedupe_in_progress) {
dedupe_in_progress_warn(send_root);
spin_unlock(&send_root->root_item_lock);
return -EAGAIN;
@@ -7244,27 +8028,18 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
spin_unlock(&send_root->root_item_lock);
/*
- * Userspace tools do the checks and warn the user if it's
- * not RO.
- */
- if (!btrfs_root_readonly(send_root)) {
- ret = -EPERM;
- goto out;
- }
-
- /*
* Check that we don't overflow at later allocations, we request
* clone_sources_count + 1 items, and compare to unsigned long inside
- * access_ok.
+ * access_ok. Also set an upper limit for allocation size so this can't
+ * easily exhaust memory. Max number of clone sources is about 200K.
*/
- if (arg->clone_sources_count >
- ULONG_MAX / sizeof(struct clone_root) - 1) {
+ if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) {
ret = -EINVAL;
goto out;
}
if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
- ret = -EINVAL;
+ ret = -EOPNOTSUPP;
goto out;
}
@@ -7274,10 +8049,25 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
goto out;
}
+ init_path(&sctx->cur_inode_path);
INIT_LIST_HEAD(&sctx->new_refs);
INIT_LIST_HEAD(&sctx->deleted_refs);
- INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
- INIT_LIST_HEAD(&sctx->name_cache_list);
+
+ btrfs_lru_cache_init(&sctx->name_cache, SEND_MAX_NAME_CACHE_SIZE);
+ btrfs_lru_cache_init(&sctx->backref_cache, SEND_MAX_BACKREF_CACHE_SIZE);
+ btrfs_lru_cache_init(&sctx->dir_created_cache,
+ SEND_MAX_DIR_CREATED_CACHE_SIZE);
+ /*
+ * This cache is periodically trimmed to a fixed size elsewhere, see
+ * cache_dir_utimes() and trim_dir_utimes_cache().
+ */
+ btrfs_lru_cache_init(&sctx->dir_utimes_cache, 0);
+
+ sctx->pending_dir_moves = RB_ROOT;
+ sctx->waiting_dir_moves = RB_ROOT;
+ sctx->orphan_dirs = RB_ROOT;
+ sctx->rbtree_new_refs = RB_ROOT;
+ sctx->rbtree_deleted_refs = RB_ROOT;
sctx->flags = arg->flags;
@@ -7291,38 +8081,52 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
} else {
sctx->proto = 1;
}
+ if ((arg->flags & BTRFS_SEND_FLAG_COMPRESSED) && sctx->proto < 2) {
+ ret = -EINVAL;
+ goto out;
+ }
sctx->send_filp = fget(arg->send_fd);
- if (!sctx->send_filp) {
+ if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
ret = -EBADF;
goto out;
}
sctx->send_root = send_root;
- /*
- * Unlikely but possible, if the subvolume is marked for deletion but
- * is slow to remove the directory entry, send can still be started
- */
- if (btrfs_root_dead(sctx->send_root)) {
- ret = -EPERM;
- goto out;
- }
-
sctx->clone_roots_cnt = arg->clone_sources_count;
- sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
- sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
+ if (sctx->proto >= 2) {
+ u32 send_buf_num_pages;
+
+ sctx->send_max_size = BTRFS_SEND_BUF_SIZE_V2;
+ sctx->send_buf = vmalloc(sctx->send_max_size);
+ if (!sctx->send_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ send_buf_num_pages = sctx->send_max_size >> PAGE_SHIFT;
+ sctx->send_buf_pages = kcalloc(send_buf_num_pages,
+ sizeof(*sctx->send_buf_pages),
+ GFP_KERNEL);
+ if (!sctx->send_buf_pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < send_buf_num_pages; i++) {
+ sctx->send_buf_pages[i] =
+ vmalloc_to_page(sctx->send_buf + (i << PAGE_SHIFT));
+ }
+ } else {
+ sctx->send_max_size = BTRFS_SEND_BUF_SIZE_V1;
+ sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
+ }
if (!sctx->send_buf) {
ret = -ENOMEM;
goto out;
}
- sctx->pending_dir_moves = RB_ROOT;
- sctx->waiting_dir_moves = RB_ROOT;
- sctx->orphan_dirs = RB_ROOT;
-
- sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
- arg->clone_sources_count + 1,
+ sctx->clone_roots = kvcalloc(arg->clone_sources_count + 1,
+ sizeof(*sctx->clone_roots),
GFP_KERNEL);
if (!sctx->clone_roots) {
ret = -ENOMEM;
@@ -7425,24 +8229,17 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
if (ret)
goto out;
- spin_lock(&fs_info->send_reloc_lock);
- if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
- spin_unlock(&fs_info->send_reloc_lock);
- btrfs_warn_rl(fs_info,
- "cannot run send because a relocation operation is in progress");
- ret = -EAGAIN;
- goto out;
- }
- fs_info->send_in_progress++;
- spin_unlock(&fs_info->send_reloc_lock);
-
ret = send_subvol(sctx);
- spin_lock(&fs_info->send_reloc_lock);
- fs_info->send_in_progress--;
- spin_unlock(&fs_info->send_reloc_lock);
if (ret < 0)
goto out;
+ btrfs_lru_cache_for_each_entry_safe(&sctx->dir_utimes_cache, entry, tmp) {
+ ret = send_utimes(sctx, entry->key, entry->gen);
+ if (ret < 0)
+ goto out;
+ btrfs_lru_cache_remove(&sctx->dir_utimes_cache, entry);
+ }
+
if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
ret = begin_cmd(sctx, BTRFS_SEND_C_END);
if (ret < 0)
@@ -7518,9 +8315,19 @@ out:
fput(sctx->send_filp);
kvfree(sctx->clone_roots);
+ kfree(sctx->send_buf_pages);
kvfree(sctx->send_buf);
+ kvfree(sctx->verity_descriptor);
+
+ close_current_inode(sctx);
+
+ btrfs_lru_cache_clear(&sctx->name_cache);
+ btrfs_lru_cache_clear(&sctx->backref_cache);
+ btrfs_lru_cache_clear(&sctx->dir_created_cache);
+ btrfs_lru_cache_clear(&sctx->dir_utimes_cache);
- name_cache_free(sctx);
+ if (sctx->cur_inode_path.buf != sctx->cur_inode_path.inline_buf)
+ kfree(sctx->cur_inode_path.buf);
kfree(sctx);
}
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h
index 23bcefc84e49..652bb28f63d4 100644
--- a/fs/btrfs/send.h
+++ b/fs/btrfs/send.h
@@ -7,12 +7,28 @@
#ifndef BTRFS_SEND_H
#define BTRFS_SEND_H
-#include "ctree.h"
+#include <linux/types.h>
+#include <linux/sizes.h>
+#include <linux/align.h>
+
+struct btrfs_root;
+struct btrfs_ioctl_send_args;
#define BTRFS_SEND_STREAM_MAGIC "btrfs-stream"
-#define BTRFS_SEND_STREAM_VERSION 1
+/* Conditional support for the upcoming protocol version. */
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+#define BTRFS_SEND_STREAM_VERSION 3
+#else
+#define BTRFS_SEND_STREAM_VERSION 2
+#endif
-#define BTRFS_SEND_BUF_SIZE SZ_64K
+/*
+ * In send stream v1, no command is larger than 64K. In send stream v2, no
+ * limit should be assumed, the buffer size is set to be a header with
+ * compressed extent size.
+ */
+#define BTRFS_SEND_BUF_SIZE_V1 SZ_64K
+#define BTRFS_SEND_BUF_SIZE_V2 ALIGN(SZ_16K + BTRFS_MAX_COMPRESSED, PAGE_SIZE)
enum btrfs_tlv_type {
BTRFS_TLV_U8,
@@ -46,87 +62,126 @@ struct btrfs_tlv_header {
/* commands */
enum btrfs_send_cmd {
- BTRFS_SEND_C_UNSPEC,
+ BTRFS_SEND_C_UNSPEC = 0,
/* Version 1 */
- BTRFS_SEND_C_SUBVOL,
- BTRFS_SEND_C_SNAPSHOT,
+ BTRFS_SEND_C_SUBVOL = 1,
+ BTRFS_SEND_C_SNAPSHOT = 2,
- BTRFS_SEND_C_MKFILE,
- BTRFS_SEND_C_MKDIR,
- BTRFS_SEND_C_MKNOD,
- BTRFS_SEND_C_MKFIFO,
- BTRFS_SEND_C_MKSOCK,
- BTRFS_SEND_C_SYMLINK,
+ BTRFS_SEND_C_MKFILE = 3,
+ BTRFS_SEND_C_MKDIR = 4,
+ BTRFS_SEND_C_MKNOD = 5,
+ BTRFS_SEND_C_MKFIFO = 6,
+ BTRFS_SEND_C_MKSOCK = 7,
+ BTRFS_SEND_C_SYMLINK = 8,
- BTRFS_SEND_C_RENAME,
- BTRFS_SEND_C_LINK,
- BTRFS_SEND_C_UNLINK,
- BTRFS_SEND_C_RMDIR,
+ BTRFS_SEND_C_RENAME = 9,
+ BTRFS_SEND_C_LINK = 10,
+ BTRFS_SEND_C_UNLINK = 11,
+ BTRFS_SEND_C_RMDIR = 12,
- BTRFS_SEND_C_SET_XATTR,
- BTRFS_SEND_C_REMOVE_XATTR,
+ BTRFS_SEND_C_SET_XATTR = 13,
+ BTRFS_SEND_C_REMOVE_XATTR = 14,
- BTRFS_SEND_C_WRITE,
- BTRFS_SEND_C_CLONE,
+ BTRFS_SEND_C_WRITE = 15,
+ BTRFS_SEND_C_CLONE = 16,
- BTRFS_SEND_C_TRUNCATE,
- BTRFS_SEND_C_CHMOD,
- BTRFS_SEND_C_CHOWN,
- BTRFS_SEND_C_UTIMES,
+ BTRFS_SEND_C_TRUNCATE = 17,
+ BTRFS_SEND_C_CHMOD = 18,
+ BTRFS_SEND_C_CHOWN = 19,
+ BTRFS_SEND_C_UTIMES = 20,
- BTRFS_SEND_C_END,
- BTRFS_SEND_C_UPDATE_EXTENT,
- __BTRFS_SEND_C_MAX_V1,
+ BTRFS_SEND_C_END = 21,
+ BTRFS_SEND_C_UPDATE_EXTENT = 22,
+ BTRFS_SEND_C_MAX_V1 = 22,
/* Version 2 */
- __BTRFS_SEND_C_MAX_V2,
-
+ BTRFS_SEND_C_FALLOCATE = 23,
+ BTRFS_SEND_C_FILEATTR = 24,
+ BTRFS_SEND_C_ENCODED_WRITE = 25,
+ BTRFS_SEND_C_MAX_V2 = 25,
+
+ /* Version 3 */
+ BTRFS_SEND_C_ENABLE_VERITY = 26,
+ BTRFS_SEND_C_MAX_V3 = 26,
/* End */
- __BTRFS_SEND_C_MAX,
+ BTRFS_SEND_C_MAX = 26,
};
-#define BTRFS_SEND_C_MAX (__BTRFS_SEND_C_MAX - 1)
/* attributes in send stream */
enum {
- BTRFS_SEND_A_UNSPEC,
-
- BTRFS_SEND_A_UUID,
- BTRFS_SEND_A_CTRANSID,
-
- BTRFS_SEND_A_INO,
- BTRFS_SEND_A_SIZE,
- BTRFS_SEND_A_MODE,
- BTRFS_SEND_A_UID,
- BTRFS_SEND_A_GID,
- BTRFS_SEND_A_RDEV,
- BTRFS_SEND_A_CTIME,
- BTRFS_SEND_A_MTIME,
- BTRFS_SEND_A_ATIME,
- BTRFS_SEND_A_OTIME,
-
- BTRFS_SEND_A_XATTR_NAME,
- BTRFS_SEND_A_XATTR_DATA,
-
- BTRFS_SEND_A_PATH,
- BTRFS_SEND_A_PATH_TO,
- BTRFS_SEND_A_PATH_LINK,
-
- BTRFS_SEND_A_FILE_OFFSET,
- BTRFS_SEND_A_DATA,
-
- BTRFS_SEND_A_CLONE_UUID,
- BTRFS_SEND_A_CLONE_CTRANSID,
- BTRFS_SEND_A_CLONE_PATH,
- BTRFS_SEND_A_CLONE_OFFSET,
- BTRFS_SEND_A_CLONE_LEN,
-
- __BTRFS_SEND_A_MAX,
+ BTRFS_SEND_A_UNSPEC = 0,
+
+ /* Version 1 */
+ BTRFS_SEND_A_UUID = 1,
+ BTRFS_SEND_A_CTRANSID = 2,
+
+ BTRFS_SEND_A_INO = 3,
+ BTRFS_SEND_A_SIZE = 4,
+ BTRFS_SEND_A_MODE = 5,
+ BTRFS_SEND_A_UID = 6,
+ BTRFS_SEND_A_GID = 7,
+ BTRFS_SEND_A_RDEV = 8,
+ BTRFS_SEND_A_CTIME = 9,
+ BTRFS_SEND_A_MTIME = 10,
+ BTRFS_SEND_A_ATIME = 11,
+ BTRFS_SEND_A_OTIME = 12,
+
+ BTRFS_SEND_A_XATTR_NAME = 13,
+ BTRFS_SEND_A_XATTR_DATA = 14,
+
+ BTRFS_SEND_A_PATH = 15,
+ BTRFS_SEND_A_PATH_TO = 16,
+ BTRFS_SEND_A_PATH_LINK = 17,
+
+ BTRFS_SEND_A_FILE_OFFSET = 18,
+ /*
+ * As of send stream v2, this attribute is special: it must be the last
+ * attribute in a command, its header contains only the type, and its
+ * length is implicitly the remaining length of the command.
+ */
+ BTRFS_SEND_A_DATA = 19,
+
+ BTRFS_SEND_A_CLONE_UUID = 20,
+ BTRFS_SEND_A_CLONE_CTRANSID = 21,
+ BTRFS_SEND_A_CLONE_PATH = 22,
+ BTRFS_SEND_A_CLONE_OFFSET = 23,
+ BTRFS_SEND_A_CLONE_LEN = 24,
+
+ BTRFS_SEND_A_MAX_V1 = 24,
+
+ /* Version 2 */
+ BTRFS_SEND_A_FALLOCATE_MODE = 25,
+
+ /*
+ * File attributes from the FS_*_FL namespace (i_flags, xflags),
+ * translated to BTRFS_INODE_* bits (BTRFS_INODE_FLAG_MASK) and stored
+ * in btrfs_inode_item::flags (represented by btrfs_inode::flags and
+ * btrfs_inode::ro_flags).
+ */
+ BTRFS_SEND_A_FILEATTR = 26,
+
+ BTRFS_SEND_A_UNENCODED_FILE_LEN = 27,
+ BTRFS_SEND_A_UNENCODED_LEN = 28,
+ BTRFS_SEND_A_UNENCODED_OFFSET = 29,
+ /*
+ * COMPRESSION and ENCRYPTION default to NONE (0) if omitted from
+ * BTRFS_SEND_C_ENCODED_WRITE.
+ */
+ BTRFS_SEND_A_COMPRESSION = 30,
+ BTRFS_SEND_A_ENCRYPTION = 31,
+ BTRFS_SEND_A_MAX_V2 = 31,
+
+ /* Version 3 */
+ BTRFS_SEND_A_VERITY_ALGORITHM = 32,
+ BTRFS_SEND_A_VERITY_BLOCK_SIZE = 33,
+ BTRFS_SEND_A_VERITY_SALT_DATA = 34,
+ BTRFS_SEND_A_VERITY_SIG_DATA = 35,
+ BTRFS_SEND_A_MAX_V3 = 35,
+
+ __BTRFS_SEND_A_MAX = 35,
};
-#define BTRFS_SEND_A_MAX (__BTRFS_SEND_A_MAX - 1)
-#ifdef __KERNEL__
-long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg);
-#endif
+long btrfs_ioctl_send(struct btrfs_root *send_root, const struct btrfs_ioctl_send_args *arg);
#endif
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 48d77f360a24..6babbe333741 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/spinlock.h>
+#include <linux/minmax.h>
#include "misc.h"
#include "ctree.h"
#include "space-info.h"
@@ -9,6 +11,11 @@
#include "ordered-data.h"
#include "transaction.h"
#include "block-group.h"
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "zoned.h"
+#include "delayed-inode.h"
/*
* HOW DOES SPACE RESERVATION WORK
@@ -44,11 +51,11 @@
* num_bytes we want to reserve.
*
* ->reserve
- * space_info->bytes_may_reserve += num_bytes
+ * space_info->bytes_may_use += num_bytes
*
* ->extent allocation
* Call btrfs_add_reserved_bytes() which does
- * space_info->bytes_may_reserve -= num_bytes
+ * space_info->bytes_may_use -= num_bytes
* space_info->bytes_reserved += extent_bytes
*
* ->insert reference
@@ -61,7 +68,7 @@
* Assume we are unable to simply make the reservation because we do not have
* enough space
*
- * -> __reserve_bytes
+ * -> reserve_bytes
* create a reserve_ticket with ->bytes set to our reservation, add it to
* the tail of space_info->tickets, kick async flush thread
*
@@ -122,6 +129,14 @@
* churn a lot and we can avoid making some extent tree modifications if we
* are able to delay for as long as possible.
*
+ * RESET_ZONES
+ * This state works only for the zoned mode. On the zoned mode, we cannot
+ * reuse once allocated then freed region until we reset the zone, due to
+ * the sequential write zone requirement. The RESET_ZONES state resets the
+ * zones of an unused block group and let us reuse the space. The reusing
+ * is faster than removing the block group and allocating another block
+ * group on the zones.
+ *
* ALLOC_CHUNK
* We will skip this the first time through space reservation, because of
* overcommit and we don't want to have a lot of useless metadata space when
@@ -158,15 +173,14 @@
* thing with or without extra unallocated space.
*/
-u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
- bool may_use_included)
-{
- ASSERT(s_info);
- return s_info->bytes_used + s_info->bytes_reserved +
- s_info->bytes_pinned + s_info->bytes_readonly +
- s_info->bytes_zone_unusable +
- (may_use_included ? s_info->bytes_may_use : 0);
-}
+struct reserve_ticket {
+ u64 bytes;
+ int error;
+ bool steal;
+ struct list_head list;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+};
/*
* after adding space to the filesystem, we need to clear the full flags
@@ -178,21 +192,53 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
struct btrfs_space_info *found;
list_for_each_entry(found, head, list)
- found->full = 0;
+ found->full = false;
}
-static int create_space_info(struct btrfs_fs_info *info, u64 flags)
+/*
+ * Block groups with more than this value (percents) of unusable space will be
+ * scheduled for background reclaim.
+ */
+#define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
+
+#define BTRFS_UNALLOC_BLOCK_GROUP_TARGET (10ULL)
+
+/*
+ * Calculate chunk size depending on volume type (regular or zoned).
+ */
+static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
{
+ if (btrfs_is_zoned(fs_info))
+ return fs_info->zone_size;
- struct btrfs_space_info *space_info;
- int i;
- int ret;
+ ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK, "flags=%llu", flags);
- space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
- if (!space_info)
- return -ENOMEM;
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ return BTRFS_MAX_DATA_CHUNK_SIZE;
+ else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ return SZ_32M;
+
+ /* Handle BTRFS_BLOCK_GROUP_METADATA */
+ if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
+ return SZ_1G;
+
+ return SZ_256M;
+}
- for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
+/*
+ * Update default chunk size.
+ */
+void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
+ u64 chunk_size)
+{
+ WRITE_ONCE(space_info->chunk_size, chunk_size);
+}
+
+static void init_space_info(struct btrfs_fs_info *info,
+ struct btrfs_space_info *space_info, u64 flags)
+{
+ space_info->fs_info = info;
+ for (int i = 0; i < BTRFS_NR_RAID_TYPES; i++)
INIT_LIST_HEAD(&space_info->block_groups[i]);
init_rwsem(&space_info->groups_sem);
spin_lock_init(&space_info->lock);
@@ -202,8 +248,68 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
INIT_LIST_HEAD(&space_info->tickets);
INIT_LIST_HEAD(&space_info->priority_tickets);
space_info->clamp = 1;
+ btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
+ space_info->subgroup_id = BTRFS_SUB_GROUP_PRIMARY;
- ret = btrfs_sysfs_add_space_info_type(info, space_info);
+ if (btrfs_is_zoned(info))
+ space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
+}
+
+static int create_space_info_sub_group(struct btrfs_space_info *parent, u64 flags,
+ enum btrfs_space_info_sub_group id, int index)
+{
+ struct btrfs_fs_info *fs_info = parent->fs_info;
+ struct btrfs_space_info *sub_group;
+ int ret;
+
+ ASSERT(parent->subgroup_id == BTRFS_SUB_GROUP_PRIMARY,
+ "parent->subgroup_id=%d", parent->subgroup_id);
+ ASSERT(id != BTRFS_SUB_GROUP_PRIMARY, "id=%d", id);
+
+ sub_group = kzalloc(sizeof(*sub_group), GFP_NOFS);
+ if (!sub_group)
+ return -ENOMEM;
+
+ init_space_info(fs_info, sub_group, flags);
+ parent->sub_group[index] = sub_group;
+ sub_group->parent = parent;
+ sub_group->subgroup_id = id;
+
+ ret = btrfs_sysfs_add_space_info_type(sub_group);
+ if (ret) {
+ kfree(sub_group);
+ parent->sub_group[index] = NULL;
+ }
+ return ret;
+}
+
+static int create_space_info(struct btrfs_fs_info *info, u64 flags)
+{
+
+ struct btrfs_space_info *space_info;
+ int ret = 0;
+
+ space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
+ if (!space_info)
+ return -ENOMEM;
+
+ init_space_info(info, space_info, flags);
+
+ if (btrfs_is_zoned(info)) {
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ ret = create_space_info_sub_group(space_info, flags,
+ BTRFS_SUB_GROUP_DATA_RELOC,
+ 0);
+ else if (flags & BTRFS_BLOCK_GROUP_METADATA)
+ ret = create_space_info_sub_group(space_info, flags,
+ BTRFS_SUB_GROUP_TREELOG,
+ 0);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = btrfs_sysfs_add_space_info_type(space_info);
if (ret)
return ret;
@@ -251,30 +357,32 @@ out:
return ret;
}
-void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
- u64 total_bytes, u64 bytes_used,
- u64 bytes_readonly, u64 bytes_zone_unusable,
- struct btrfs_space_info **space_info)
+void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
+ struct btrfs_block_group *block_group)
{
- struct btrfs_space_info *found;
- int factor;
+ struct btrfs_space_info *space_info = block_group->space_info;
+ int factor, index;
- factor = btrfs_bg_type_to_factor(flags);
-
- found = btrfs_find_space_info(info, flags);
- ASSERT(found);
- spin_lock(&found->lock);
- found->total_bytes += total_bytes;
- found->disk_total += total_bytes * factor;
- found->bytes_used += bytes_used;
- found->disk_used += bytes_used * factor;
- found->bytes_readonly += bytes_readonly;
- found->bytes_zone_unusable += bytes_zone_unusable;
- if (total_bytes > 0)
- found->full = 0;
- btrfs_try_granting_tickets(info, found);
- spin_unlock(&found->lock);
- *space_info = found;
+ factor = btrfs_bg_type_to_factor(block_group->flags);
+
+ spin_lock(&space_info->lock);
+ space_info->total_bytes += block_group->length;
+ space_info->disk_total += block_group->length * factor;
+ space_info->bytes_used += block_group->used;
+ space_info->disk_used += block_group->used * factor;
+ space_info->bytes_readonly += block_group->bytes_super;
+ btrfs_space_info_update_bytes_zone_unusable(space_info, block_group->zone_unusable);
+ if (block_group->length > 0)
+ space_info->full = false;
+ btrfs_try_granting_tickets(space_info);
+ spin_unlock(&space_info->lock);
+
+ block_group->space_info = space_info;
+
+ index = btrfs_bg_flags_to_raid_index(block_group->flags);
+ down_write(&space_info->groups_sem);
+ list_add_tail(&block_group->list, &space_info->block_groups[index]);
+ up_write(&space_info->groups_sem);
}
struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
@@ -292,12 +400,35 @@ struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
return NULL;
}
-static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- enum btrfs_reserve_flush_enum flush)
+static u64 calc_effective_data_chunk_size(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_space_info *data_sinfo;
+ u64 data_chunk_size;
+
+ /*
+ * Calculate the data_chunk_size, space_info->chunk_size is the
+ * "optimal" chunk size based on the fs size. However when we actually
+ * allocate the chunk we will strip this down further, making it no
+ * more than 10% of the disk or 1G, whichever is smaller.
+ *
+ * On the zoned mode, we need to use zone_size (= data_sinfo->chunk_size)
+ * as it is.
+ */
+ data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
+ if (btrfs_is_zoned(fs_info))
+ return data_sinfo->chunk_size;
+ data_chunk_size = min(data_sinfo->chunk_size,
+ mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
+ return min_t(u64, data_chunk_size, SZ_1G);
+}
+
+static u64 calc_available_free_space(const struct btrfs_space_info *space_info,
+ enum btrfs_reserve_flush_enum flush)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
u64 profile;
u64 avail;
+ u64 data_chunk_size;
int factor;
if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
@@ -315,6 +446,27 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
*/
factor = btrfs_bg_type_to_factor(profile);
avail = div_u64(avail, factor);
+ if (avail == 0)
+ return 0;
+
+ data_chunk_size = calc_effective_data_chunk_size(fs_info);
+
+ /*
+ * Since data allocations immediately use block groups as part of the
+ * reservation, because we assume that data reservations will == actual
+ * usage, we could potentially overcommit and then immediately have that
+ * available space used by a data allocation, which could put us in a
+ * bind when we get close to filling the file system.
+ *
+ * To handle this simply remove the data_chunk_size from the available
+ * space. If we are relatively empty this won't affect our ability to
+ * overcommit much, and if we're very close to full it'll keep us from
+ * getting into a position where we've given ourselves very little
+ * metadata wiggle room.
+ */
+ if (avail <= data_chunk_size)
+ return 0;
+ avail -= data_chunk_size;
/*
* If we aren't flushing all things, let us overcommit up to
@@ -325,47 +477,91 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
avail >>= 3;
else
avail >>= 1;
+
+ /*
+ * On the zoned mode, we always allocate one zone as one chunk.
+ * Returning non-zone size aligned bytes here will result in
+ * less pressure for the async metadata reclaim process, and it
+ * will over-commit too much leading to ENOSPC. Align down to the
+ * zone size to avoid that.
+ */
+ if (btrfs_is_zoned(fs_info))
+ avail = ALIGN_DOWN(avail, fs_info->zone_size);
+
return avail;
}
-int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info, u64 bytes,
- enum btrfs_reserve_flush_enum flush)
+static inline bool check_can_overcommit(const struct btrfs_space_info *space_info,
+ u64 space_info_used_bytes, u64 bytes,
+ enum btrfs_reserve_flush_enum flush)
+{
+ const u64 avail = calc_available_free_space(space_info, flush);
+
+ return (space_info_used_bytes + bytes < space_info->total_bytes + avail);
+}
+
+static inline bool can_overcommit(const struct btrfs_space_info *space_info,
+ u64 space_info_used_bytes, u64 bytes,
+ enum btrfs_reserve_flush_enum flush)
+{
+ /* Don't overcommit when in mixed mode. */
+ if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
+ return false;
+
+ return check_can_overcommit(space_info, space_info_used_bytes, bytes, flush);
+}
+
+bool btrfs_can_overcommit(const struct btrfs_space_info *space_info, u64 bytes,
+ enum btrfs_reserve_flush_enum flush)
{
- u64 avail;
u64 used;
/* Don't overcommit when in mixed mode */
if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
- return 0;
+ return false;
used = btrfs_space_info_used(space_info, true);
- avail = calc_available_free_space(fs_info, space_info, flush);
- if (used + bytes < space_info->total_bytes + avail)
- return 1;
- return 0;
+ return check_can_overcommit(space_info, used, bytes, flush);
}
static void remove_ticket(struct btrfs_space_info *space_info,
- struct reserve_ticket *ticket)
+ struct reserve_ticket *ticket, int error)
{
+ lockdep_assert_held(&space_info->lock);
+
if (!list_empty(&ticket->list)) {
list_del_init(&ticket->list);
- ASSERT(space_info->reclaim_size >= ticket->bytes);
+ ASSERT(space_info->reclaim_size >= ticket->bytes,
+ "space_info->reclaim_size=%llu ticket->bytes=%llu",
+ space_info->reclaim_size, ticket->bytes);
space_info->reclaim_size -= ticket->bytes;
}
+
+ spin_lock(&ticket->lock);
+ /*
+ * If we are called from a task waiting on the ticket, it may happen
+ * that before it sets an error on the ticket, a reclaim task was able
+ * to satisfy the ticket. In that case ignore the error.
+ */
+ if (error && ticket->bytes > 0)
+ ticket->error = error;
+ else
+ ticket->bytes = 0;
+
+ wake_up(&ticket->wait);
+ spin_unlock(&ticket->lock);
}
/*
* This is for space we already have accounted in space_info->bytes_may_use, so
* basically when we're returning space from block_rsv's.
*/
-void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
+void btrfs_try_granting_tickets(struct btrfs_space_info *space_info)
{
struct list_head *head;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
+ u64 used = btrfs_space_info_used(space_info, true);
lockdep_assert_held(&space_info->lock);
@@ -373,21 +569,18 @@ void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
again:
while (!list_empty(head)) {
struct reserve_ticket *ticket;
- u64 used = btrfs_space_info_used(space_info, true);
+ u64 used_after;
ticket = list_first_entry(head, struct reserve_ticket, list);
+ used_after = used + ticket->bytes;
/* Check and see if our ticket can be satisfied now. */
- if ((used + ticket->bytes <= space_info->total_bytes) ||
- btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
- flush)) {
- btrfs_space_info_update_bytes_may_use(fs_info,
- space_info,
- ticket->bytes);
- remove_ticket(space_info, ticket);
- ticket->bytes = 0;
+ if (used_after <= space_info->total_bytes ||
+ can_overcommit(space_info, used, ticket->bytes, flush)) {
+ btrfs_space_info_update_bytes_may_use(space_info, ticket->bytes);
+ remove_ticket(space_info, ticket, 0);
space_info->tickets_id++;
- wake_up(&ticket->wait);
+ used = used_after;
} else {
break;
}
@@ -409,39 +602,61 @@ do { \
spin_unlock(&__rsv->lock); \
} while (0)
-static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *info)
+static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info)
{
+ switch (space_info->flags) {
+ case BTRFS_BLOCK_GROUP_SYSTEM:
+ return "SYSTEM";
+ case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
+ return "DATA+METADATA";
+ case BTRFS_BLOCK_GROUP_DATA:
+ return "DATA";
+ case BTRFS_BLOCK_GROUP_METADATA:
+ return "METADATA";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
+{
+ DUMP_BLOCK_RSV(fs_info, global_block_rsv);
+ DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
+ DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
+ DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
+ DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
+}
+
+static void __btrfs_dump_space_info(const struct btrfs_space_info *info)
+{
+ const struct btrfs_fs_info *fs_info = info->fs_info;
+ const char *flag_str = space_info_flag_to_str(info);
lockdep_assert_held(&info->lock);
/* The free space could be negative in case of overcommit */
- btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
- info->flags,
+ btrfs_info(fs_info,
+ "space_info %s (sub-group id %d) has %lld free, is %sfull",
+ flag_str, info->subgroup_id,
(s64)(info->total_bytes - btrfs_space_info_used(info, true)),
info->full ? "" : "not ");
btrfs_info(fs_info,
- "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
+"space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
info->total_bytes, info->bytes_used, info->bytes_pinned,
info->bytes_reserved, info->bytes_may_use,
info->bytes_readonly, info->bytes_zone_unusable);
-
- DUMP_BLOCK_RSV(fs_info, global_block_rsv);
- DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
- DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
- DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
- DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
-
}
-void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *info, u64 bytes,
- int dump_block_groups)
+void btrfs_dump_space_info(struct btrfs_space_info *info, u64 bytes,
+ bool dump_block_groups)
{
+ struct btrfs_fs_info *fs_info = info->fs_info;
struct btrfs_block_group *cache;
+ u64 total_avail = 0;
int index = 0;
spin_lock(&info->lock);
- __btrfs_dump_space_info(fs_info, info);
+ __btrfs_dump_space_info(info);
+ dump_global_block_rsv(fs_info);
spin_unlock(&info->lock);
if (!dump_block_groups)
@@ -450,21 +665,29 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
down_read(&info->groups_sem);
again:
list_for_each_entry(cache, &info->block_groups[index], list) {
+ u64 avail;
+
spin_lock(&cache->lock);
+ avail = cache->length - cache->used - cache->pinned -
+ cache->reserved - cache->bytes_super - cache->zone_unusable;
btrfs_info(fs_info,
- "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
- cache->start, cache->length, cache->used, cache->pinned,
- cache->reserved, cache->zone_unusable,
- cache->ro ? "[readonly]" : "");
+"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
+ cache->start, cache->length, cache->used, cache->pinned,
+ cache->reserved, cache->delalloc_bytes,
+ cache->bytes_super, cache->zone_unusable,
+ avail, cache->ro ? "[readonly]" : "");
spin_unlock(&cache->lock);
btrfs_dump_free_space(cache, bytes);
+ total_avail += avail;
}
if (++index < BTRFS_NR_RAID_TYPES)
goto again;
up_read(&info->groups_sem);
+
+ btrfs_info(fs_info, "%llu bytes available across all block groups", total_avail);
}
-static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
+static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
u64 to_reclaim)
{
u64 bytes;
@@ -477,16 +700,14 @@ static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
return nr;
}
-#define EXTENT_SIZE_PER_ITEM SZ_256K
-
/*
* shrink metadata reservation for delalloc
*/
-static void shrink_delalloc(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+static void shrink_delalloc(struct btrfs_space_info *space_info,
u64 to_reclaim, bool wait_ordered,
bool for_preempt)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
struct btrfs_trans_handle *trans;
u64 delalloc_bytes;
u64 ordered_bytes;
@@ -519,7 +740,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
}
- trans = (struct btrfs_trans_handle *)current->journal_info;
+ trans = current->journal_info;
/*
* If we are doing more ordered than delalloc we need to just wait on
@@ -578,7 +799,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
skip_async:
loops++;
if (wait_ordered && !trans) {
- btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, items, NULL);
} else {
time_left = schedule_timeout_killable(1);
if (time_left)
@@ -613,11 +834,11 @@ skip_async:
* and may fail for various reasons. The caller is supposed to examine the
* state of @space_info to detect the outcome.
*/
-static void flush_space(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info, u64 num_bytes,
- enum btrfs_flush_state state, bool for_preempt)
+static void flush_space(struct btrfs_space_info *space_info, u64 num_bytes,
+ enum btrfs_flush_state state, bool for_preempt)
{
- struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
int nr;
int ret = 0;
@@ -630,9 +851,11 @@ static void flush_space(struct btrfs_fs_info *fs_info,
else
nr = -1;
- trans = btrfs_join_transaction(root);
+ trans = btrfs_join_transaction_nostart(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ if (ret == -ENOENT)
+ ret = 0;
break;
}
ret = btrfs_run_delayed_items_nr(trans, nr);
@@ -643,21 +866,22 @@ static void flush_space(struct btrfs_fs_info *fs_info,
case FLUSH_DELALLOC_FULL:
if (state == FLUSH_DELALLOC_FULL)
num_bytes = U64_MAX;
- shrink_delalloc(fs_info, space_info, num_bytes,
+ shrink_delalloc(space_info, num_bytes,
state != FLUSH_DELALLOC, for_preempt);
break;
case FLUSH_DELAYED_REFS_NR:
case FLUSH_DELAYED_REFS:
- trans = btrfs_join_transaction(root);
+ trans = btrfs_join_transaction_nostart(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ if (ret == -ENOENT)
+ ret = 0;
break;
}
if (state == FLUSH_DELAYED_REFS_NR)
- nr = calc_reclaim_items_nr(fs_info, num_bytes);
+ btrfs_run_delayed_refs(trans, num_bytes);
else
- nr = 0;
- btrfs_run_delayed_refs(trans, nr);
+ btrfs_run_delayed_refs(trans, 0);
btrfs_end_transaction(trans);
break;
case ALLOC_CHUNK:
@@ -667,11 +891,12 @@ static void flush_space(struct btrfs_fs_info *fs_info,
ret = PTR_ERR(trans);
break;
}
- ret = btrfs_chunk_alloc(trans,
+ ret = btrfs_chunk_alloc(trans, space_info,
btrfs_get_alloc_profile(fs_info, space_info->flags),
(state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
CHUNK_ALLOC_FORCE);
btrfs_end_transaction(trans);
+
if (ret > 0 || ret == -ENOSPC)
ret = 0;
break;
@@ -686,12 +911,17 @@ static void flush_space(struct btrfs_fs_info *fs_info,
break;
case COMMIT_TRANS:
ASSERT(current->journal_info == NULL);
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- break;
- }
- ret = btrfs_commit_transaction(trans);
+ /*
+ * We don't want to start a new transaction, just attach to the
+ * current one or wait it fully commits in case its commit is
+ * happening at the moment. Note: we don't use a nostart join
+ * because that does not wait for a transaction to fully commit
+ * (only for it to be unblocked, state TRANS_STATE_UNBLOCKED).
+ */
+ ret = btrfs_commit_current_transaction(root);
+ break;
+ case RESET_ZONES:
+ ret = btrfs_reset_unused_block_groups(space_info, num_bytes);
break;
default:
ret = -ENOSPC;
@@ -703,9 +933,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
return;
}
-static inline u64
-btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
+static u64 btrfs_calc_reclaim_metadata_size(const struct btrfs_space_info *space_info)
{
u64 used;
u64 avail;
@@ -713,8 +941,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
lockdep_assert_held(&space_info->lock);
- avail = calc_available_free_space(fs_info, space_info,
- BTRFS_RESERVE_FLUSH_ALL);
+ avail = calc_available_free_space(space_info, BTRFS_RESERVE_FLUSH_ALL);
used = btrfs_space_info_used(space_info, true);
/*
@@ -729,14 +956,25 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
return to_reclaim;
}
-static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
+static bool need_preemptive_reclaim(const struct btrfs_space_info *space_info)
{
- u64 global_rsv_size = fs_info->global_block_rsv.reserved;
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
u64 ordered, delalloc;
- u64 thresh = div_factor_fine(space_info->total_bytes, 90);
+ u64 thresh;
u64 used;
+ lockdep_assert_held(&space_info->lock);
+
+ /*
+ * We have tickets queued, bail so we don't compete with the async
+ * flushers.
+ */
+ if (space_info->reclaim_size)
+ return false;
+
+ thresh = mult_perc(space_info->total_bytes, 90);
+
/* If we're just plain full then async reclaim just slows us down. */
if ((space_info->bytes_used + space_info->bytes_reserved +
global_rsv_size) >= thresh)
@@ -757,13 +995,6 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
return false;
/*
- * We have tickets queued, bail so we don't compete with the async
- * flushers.
- */
- if (space_info->reclaim_size)
- return false;
-
- /*
* If we have over half of the free space occupied by reservations or
* pinned then we want to start flushing.
*
@@ -792,8 +1023,7 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
* much delalloc we need for the background flusher to kick in.
*/
- thresh = calc_available_free_space(fs_info, space_info,
- BTRFS_RESERVE_FLUSH_ALL);
+ thresh = calc_available_free_space(space_info, BTRFS_RESERVE_FLUSH_ALL);
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_readonly + global_rsv_size;
if (used < space_info->total_bytes)
@@ -828,8 +1058,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
if (ordered >= delalloc)
- used += fs_info->delayed_refs_rsv.reserved +
- fs_info->delayed_block_rsv.reserved;
+ used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
+ btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
else
used += space_info->bytes_may_use - global_rsv_size;
@@ -837,37 +1067,41 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
}
-static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+static bool steal_from_global_rsv(struct btrfs_space_info *space_info,
struct reserve_ticket *ticket)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
u64 min_bytes;
+ lockdep_assert_held(&space_info->lock);
+
+ if (!ticket->steal)
+ return false;
+
if (global_rsv->space_info != space_info)
return false;
spin_lock(&global_rsv->lock);
- min_bytes = div_factor(global_rsv->size, 1);
+ min_bytes = mult_perc(global_rsv->size, 10);
if (global_rsv->reserved < min_bytes + ticket->bytes) {
spin_unlock(&global_rsv->lock);
return false;
}
global_rsv->reserved -= ticket->bytes;
- remove_ticket(space_info, ticket);
- ticket->bytes = 0;
- wake_up(&ticket->wait);
- space_info->tickets_id++;
if (global_rsv->reserved < global_rsv->size)
- global_rsv->full = 0;
+ global_rsv->full = false;
spin_unlock(&global_rsv->lock);
+ remove_ticket(space_info, ticket, 0);
+ space_info->tickets_id++;
+
return true;
}
/*
- * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
- * @fs_info - fs_info for this fs
+ * We've exhausted our flushing, start failing tickets.
+ *
* @space_info - the space info we were flushing
*
* We call this when we've exhausted our flushing ability and haven't made
@@ -880,73 +1114,66 @@ static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
* other tickets, or if it stumbles across a ticket that was smaller than the
* first ticket.
*/
-static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
+static bool maybe_fail_all_tickets(struct btrfs_space_info *space_info)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
struct reserve_ticket *ticket;
u64 tickets_id = space_info->tickets_id;
- const bool aborted = BTRFS_FS_ERROR(fs_info);
+ const int abort_error = BTRFS_FS_ERROR(fs_info);
trace_btrfs_fail_all_tickets(fs_info, space_info);
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
- __btrfs_dump_space_info(fs_info, space_info);
+ __btrfs_dump_space_info(space_info);
}
while (!list_empty(&space_info->tickets) &&
tickets_id == space_info->tickets_id) {
ticket = list_first_entry(&space_info->tickets,
struct reserve_ticket, list);
+ if (unlikely(abort_error)) {
+ remove_ticket(space_info, ticket, abort_error);
+ } else {
+ if (steal_from_global_rsv(space_info, ticket))
+ return true;
- if (!aborted && ticket->steal &&
- steal_from_global_rsv(fs_info, space_info, ticket))
- return true;
-
- if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
- btrfs_info(fs_info, "failing ticket with %llu bytes",
- ticket->bytes);
+ if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
+ btrfs_info(fs_info, "failing ticket with %llu bytes",
+ ticket->bytes);
- remove_ticket(space_info, ticket);
- if (aborted)
- ticket->error = -EIO;
- else
- ticket->error = -ENOSPC;
- wake_up(&ticket->wait);
+ remove_ticket(space_info, ticket, -ENOSPC);
- /*
- * We're just throwing tickets away, so more flushing may not
- * trip over btrfs_try_granting_tickets, so we need to call it
- * here to see if we can make progress with the next ticket in
- * the list.
- */
- if (!aborted)
- btrfs_try_granting_tickets(fs_info, space_info);
+ /*
+ * We're just throwing tickets away, so more flushing may
+ * not trip over btrfs_try_granting_tickets, so we need
+ * to call it here to see if we can make progress with
+ * the next ticket in the list.
+ */
+ btrfs_try_granting_tickets(space_info);
+ }
}
return (tickets_id != space_info->tickets_id);
}
-/*
- * This is for normal flushers, we can wait all goddamned day if we want to. We
- * will loop and continuously try to flush as long as we are making progress.
- * We count progress as clearing off tickets each time we have to loop.
- */
-static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
+static void do_async_reclaim_metadata_space(struct btrfs_space_info *space_info)
{
- struct btrfs_fs_info *fs_info;
- struct btrfs_space_info *space_info;
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
u64 to_reclaim;
enum btrfs_flush_state flush_state;
int commit_cycles = 0;
u64 last_tickets_id;
+ enum btrfs_flush_state final_state;
- fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
- space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
+ if (btrfs_is_zoned(fs_info))
+ final_state = RESET_ZONES;
+ else
+ final_state = COMMIT_TRANS;
spin_lock(&space_info->lock);
- to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
+ to_reclaim = btrfs_calc_reclaim_metadata_size(space_info);
if (!to_reclaim) {
- space_info->flush = 0;
+ space_info->flush = false;
spin_unlock(&space_info->lock);
return;
}
@@ -955,15 +1182,14 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
flush_state = FLUSH_DELAYED_ITEMS_NR;
do {
- flush_space(fs_info, space_info, to_reclaim, flush_state, false);
+ flush_space(space_info, to_reclaim, flush_state, false);
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
- space_info->flush = 0;
+ space_info->flush = false;
spin_unlock(&space_info->lock);
return;
}
- to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
- space_info);
+ to_reclaim = btrfs_calc_reclaim_metadata_size(space_info);
if (last_tickets_id == space_info->tickets_id) {
flush_state++;
} else {
@@ -994,21 +1220,40 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
flush_state++;
- if (flush_state > COMMIT_TRANS) {
+ if (flush_state > final_state) {
commit_cycles++;
if (commit_cycles > 2) {
- if (maybe_fail_all_tickets(fs_info, space_info)) {
+ if (maybe_fail_all_tickets(space_info)) {
flush_state = FLUSH_DELAYED_ITEMS_NR;
commit_cycles--;
} else {
- space_info->flush = 0;
+ space_info->flush = false;
}
} else {
flush_state = FLUSH_DELAYED_ITEMS_NR;
}
}
spin_unlock(&space_info->lock);
- } while (flush_state <= COMMIT_TRANS);
+ } while (flush_state <= final_state);
+}
+
+/*
+ * This is for normal flushers, it can wait as much time as needed. We will
+ * loop and continuously try to flush as long as we are making progress. We
+ * count progress as clearing off tickets each time we have to loop.
+ */
+static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
+{
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_space_info *space_info;
+
+ fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
+ space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
+ do_async_reclaim_metadata_space(space_info);
+ for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) {
+ if (space_info->sub_group[i])
+ do_async_reclaim_metadata_space(space_info->sub_group[i]);
+ }
}
/*
@@ -1038,14 +1283,15 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
trans_rsv = &fs_info->trans_block_rsv;
spin_lock(&space_info->lock);
- while (need_preemptive_reclaim(fs_info, space_info)) {
+ while (need_preemptive_reclaim(space_info)) {
enum btrfs_flush_state flush;
u64 delalloc_size = 0;
u64 to_reclaim, block_rsv_size;
- u64 global_rsv_size = global_rsv->reserved;
-
- loops++;
+ const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
+ const u64 bytes_may_use = space_info->bytes_may_use;
+ const u64 bytes_pinned = space_info->bytes_pinned;
+ spin_unlock(&space_info->lock);
/*
* We don't have a precise counter for the metadata being
* reserved for delalloc, so we'll approximate it by subtracting
@@ -1054,12 +1300,11 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
* assume it's tied up in delalloc reservations.
*/
block_rsv_size = global_rsv_size +
- delayed_block_rsv->reserved +
- delayed_refs_rsv->reserved +
- trans_rsv->reserved;
- if (block_rsv_size < space_info->bytes_may_use)
- delalloc_size = space_info->bytes_may_use - block_rsv_size;
- spin_unlock(&space_info->lock);
+ btrfs_block_rsv_reserved(delayed_block_rsv) +
+ btrfs_block_rsv_reserved(delayed_refs_rsv) +
+ btrfs_block_rsv_reserved(trans_rsv);
+ if (block_rsv_size < bytes_may_use)
+ delalloc_size = bytes_may_use - block_rsv_size;
/*
* We don't want to include the global_rsv in our calculation,
@@ -1076,20 +1321,22 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
if (delalloc_size > block_rsv_size) {
to_reclaim = delalloc_size;
flush = FLUSH_DELALLOC;
- } else if (space_info->bytes_pinned >
- (delayed_block_rsv->reserved +
- delayed_refs_rsv->reserved)) {
- to_reclaim = space_info->bytes_pinned;
+ } else if (bytes_pinned >
+ (btrfs_block_rsv_reserved(delayed_block_rsv) +
+ btrfs_block_rsv_reserved(delayed_refs_rsv))) {
+ to_reclaim = bytes_pinned;
flush = COMMIT_TRANS;
- } else if (delayed_block_rsv->reserved >
- delayed_refs_rsv->reserved) {
- to_reclaim = delayed_block_rsv->reserved;
+ } else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
+ btrfs_block_rsv_reserved(delayed_refs_rsv)) {
+ to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
flush = FLUSH_DELAYED_ITEMS_NR;
} else {
- to_reclaim = delayed_refs_rsv->reserved;
+ to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
flush = FLUSH_DELAYED_REFS_NR;
}
+ loops++;
+
/*
* We don't want to reclaim everything, just a portion, so scale
* down the to_reclaim by 1/4. If it takes us down to 0,
@@ -1098,7 +1345,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
to_reclaim >>= 2;
if (!to_reclaim)
to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
- flush_space(fs_info, space_info, to_reclaim, flush, true);
+ flush_space(space_info, to_reclaim, flush, true);
cond_resched();
spin_lock(&space_info->lock);
}
@@ -1131,13 +1378,17 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
* If we are freeing inodes, we want to make sure all delayed iputs have
* completed, because they could have been on an inode with i_nlink == 0, and
* thus have been truncated and freed up space. But again this space is not
- * immediately re-usable, it comes in the form of a delayed ref, which must be
+ * immediately reusable, it comes in the form of a delayed ref, which must be
* run and then the transaction must be committed.
*
* COMMIT_TRANS
* This is where we reclaim all of the pinned space generated by running the
* iputs
*
+ * RESET_ZONES
+ * This state works only for the zoned mode. We scan the unused block group
+ * list and reset the zones and reuse the block group.
+ *
* ALLOC_CHUNK_FORCE
* For data we start with alloc chunk force, however we could have been full
* before, and then the transaction commit could have freed new block groups,
@@ -1147,22 +1398,19 @@ static const enum btrfs_flush_state data_flush_states[] = {
FLUSH_DELALLOC_FULL,
RUN_DELAYED_IPUTS,
COMMIT_TRANS,
+ RESET_ZONES,
ALLOC_CHUNK_FORCE,
};
-static void btrfs_async_reclaim_data_space(struct work_struct *work)
+static void do_async_reclaim_data_space(struct btrfs_space_info *space_info)
{
- struct btrfs_fs_info *fs_info;
- struct btrfs_space_info *space_info;
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
u64 last_tickets_id;
enum btrfs_flush_state flush_state = 0;
- fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
- space_info = fs_info->data_sinfo;
-
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
- space_info->flush = 0;
+ space_info->flush = false;
spin_unlock(&space_info->lock);
return;
}
@@ -1170,27 +1418,27 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work)
spin_unlock(&space_info->lock);
while (!space_info->full) {
- flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
+ flush_space(space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
- space_info->flush = 0;
+ space_info->flush = false;
spin_unlock(&space_info->lock);
return;
}
/* Something happened, fail everything and bail. */
- if (BTRFS_FS_ERROR(fs_info))
+ if (unlikely(BTRFS_FS_ERROR(fs_info)))
goto aborted_fs;
last_tickets_id = space_info->tickets_id;
spin_unlock(&space_info->lock);
}
while (flush_state < ARRAY_SIZE(data_flush_states)) {
- flush_space(fs_info, space_info, U64_MAX,
+ flush_space(space_info, U64_MAX,
data_flush_states[flush_state], false);
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
- space_info->flush = 0;
+ space_info->flush = false;
spin_unlock(&space_info->lock);
return;
}
@@ -1204,16 +1452,16 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work)
if (flush_state >= ARRAY_SIZE(data_flush_states)) {
if (space_info->full) {
- if (maybe_fail_all_tickets(fs_info, space_info))
+ if (maybe_fail_all_tickets(space_info))
flush_state = 0;
else
- space_info->flush = 0;
+ space_info->flush = false;
} else {
flush_state = 0;
}
/* Something happened, fail everything and bail. */
- if (BTRFS_FS_ERROR(fs_info))
+ if (unlikely(BTRFS_FS_ERROR(fs_info)))
goto aborted_fs;
}
@@ -1222,11 +1470,24 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work)
return;
aborted_fs:
- maybe_fail_all_tickets(fs_info, space_info);
- space_info->flush = 0;
+ maybe_fail_all_tickets(space_info);
+ space_info->flush = false;
spin_unlock(&space_info->lock);
}
+static void btrfs_async_reclaim_data_space(struct work_struct *work)
+{
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_space_info *space_info;
+
+ fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
+ space_info = fs_info->data_sinfo;
+ do_async_reclaim_data_space(space_info);
+ for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++)
+ if (space_info->sub_group[i])
+ do_async_reclaim_data_space(space_info->sub_group[i]);
+}
+
void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
{
INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
@@ -1238,6 +1499,7 @@ void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
static const enum btrfs_flush_state priority_flush_states[] = {
FLUSH_DELAYED_ITEMS_NR,
FLUSH_DELAYED_ITEMS,
+ RESET_ZONES,
ALLOC_CHUNK,
};
@@ -1251,65 +1513,105 @@ static const enum btrfs_flush_state evict_flush_states[] = {
FLUSH_DELALLOC_FULL,
ALLOC_CHUNK,
COMMIT_TRANS,
+ RESET_ZONES,
};
-static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- struct reserve_ticket *ticket,
- const enum btrfs_flush_state *states,
- int states_nr)
+static bool is_ticket_served(struct reserve_ticket *ticket)
+{
+ bool ret;
+
+ spin_lock(&ticket->lock);
+ ret = (ticket->bytes == 0);
+ spin_unlock(&ticket->lock);
+
+ return ret;
+}
+
+static void priority_reclaim_metadata_space(struct btrfs_space_info *space_info,
+ struct reserve_ticket *ticket,
+ const enum btrfs_flush_state *states,
+ int states_nr)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
u64 to_reclaim;
- int flush_state;
+ int flush_state = 0;
- spin_lock(&space_info->lock);
- to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
- if (!to_reclaim) {
- spin_unlock(&space_info->lock);
+ /*
+ * This is the priority reclaim path, so to_reclaim could be >0 still
+ * because we may have only satisfied the priority tickets and still
+ * left non priority tickets on the list. We would then have
+ * to_reclaim but ->bytes == 0.
+ */
+ if (is_ticket_served(ticket))
return;
- }
+
+ spin_lock(&space_info->lock);
+ to_reclaim = btrfs_calc_reclaim_metadata_size(space_info);
spin_unlock(&space_info->lock);
- flush_state = 0;
- do {
- flush_space(fs_info, space_info, to_reclaim, states[flush_state],
- false);
- flush_state++;
- spin_lock(&space_info->lock);
- if (ticket->bytes == 0) {
- spin_unlock(&space_info->lock);
+ while (flush_state < states_nr) {
+ flush_space(space_info, to_reclaim, states[flush_state], false);
+ if (is_ticket_served(ticket))
return;
- }
- spin_unlock(&space_info->lock);
- } while (flush_state < states_nr);
+ flush_state++;
+ }
+
+ spin_lock(&space_info->lock);
+ /*
+ * Attempt to steal from the global rsv if we can, except if the fs was
+ * turned into error mode due to a transaction abort when flushing space
+ * above, in that case fail with the abort error instead of returning
+ * success to the caller if we can steal from the global rsv - this is
+ * just to have caller fail immediately instead of later when trying to
+ * modify the fs, making it easier to debug -ENOSPC problems.
+ */
+ if (unlikely(BTRFS_FS_ERROR(fs_info)))
+ remove_ticket(space_info, ticket, BTRFS_FS_ERROR(fs_info));
+ else if (!steal_from_global_rsv(space_info, ticket))
+ remove_ticket(space_info, ticket, -ENOSPC);
+
+ /*
+ * We must run try_granting_tickets here because we could be a large
+ * ticket in front of a smaller ticket that can now be satisfied with
+ * the available space.
+ */
+ btrfs_try_granting_tickets(space_info);
+ spin_unlock(&space_info->lock);
}
-static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+static void priority_reclaim_data_space(struct btrfs_space_info *space_info,
struct reserve_ticket *ticket)
{
+ /* We could have been granted before we got here. */
+ if (is_ticket_served(ticket))
+ return;
+
+ spin_lock(&space_info->lock);
while (!space_info->full) {
- flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
- spin_lock(&space_info->lock);
- if (ticket->bytes == 0) {
- spin_unlock(&space_info->lock);
- return;
- }
spin_unlock(&space_info->lock);
+ flush_space(space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
+ if (is_ticket_served(ticket))
+ return;
+ spin_lock(&space_info->lock);
}
+
+ remove_ticket(space_info, ticket, -ENOSPC);
+ btrfs_try_granting_tickets(space_info);
+ spin_unlock(&space_info->lock);
}
-static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+static void wait_reserve_ticket(struct btrfs_space_info *space_info,
struct reserve_ticket *ticket)
{
DEFINE_WAIT(wait);
- int ret = 0;
- spin_lock(&space_info->lock);
+ spin_lock(&ticket->lock);
while (ticket->bytes > 0 && ticket->error == 0) {
+ int ret;
+
ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
+ spin_unlock(&ticket->lock);
if (ret) {
/*
* Delete us from the list. After we unlock the space
@@ -1319,24 +1621,23 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
* despite getting an error, resulting in a space leak
* (bytes_may_use counter of our space_info).
*/
- remove_ticket(space_info, ticket);
- ticket->error = -EINTR;
- break;
+ spin_lock(&space_info->lock);
+ remove_ticket(space_info, ticket, -EINTR);
+ spin_unlock(&space_info->lock);
+ return;
}
- spin_unlock(&space_info->lock);
schedule();
finish_wait(&ticket->wait, &wait);
- spin_lock(&space_info->lock);
+ spin_lock(&ticket->lock);
}
- spin_unlock(&space_info->lock);
+ spin_unlock(&ticket->lock);
}
-/**
- * Do the appropriate flushing and waiting for a ticket
+/*
+ * Do the appropriate flushing and waiting for a ticket.
*
- * @fs_info: the filesystem
* @space_info: space info for the reservation
* @ticket: ticket for the reservation
* @start_ns: timestamp when the reservation started
@@ -1346,8 +1647,7 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
* This does the work of figuring out how to flush for the ticket, waiting for
* the reservation, and returning the appropriate error if there is one.
*/
-static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+static int handle_reserve_ticket(struct btrfs_space_info *space_info,
struct reserve_ticket *ticket,
u64 start_ns, u64 orig_bytes,
enum btrfs_reserve_flush_enum flush)
@@ -1358,45 +1658,27 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
case BTRFS_RESERVE_FLUSH_DATA:
case BTRFS_RESERVE_FLUSH_ALL:
case BTRFS_RESERVE_FLUSH_ALL_STEAL:
- wait_reserve_ticket(fs_info, space_info, ticket);
+ wait_reserve_ticket(space_info, ticket);
break;
case BTRFS_RESERVE_FLUSH_LIMIT:
- priority_reclaim_metadata_space(fs_info, space_info, ticket,
+ priority_reclaim_metadata_space(space_info, ticket,
priority_flush_states,
ARRAY_SIZE(priority_flush_states));
break;
case BTRFS_RESERVE_FLUSH_EVICT:
- priority_reclaim_metadata_space(fs_info, space_info, ticket,
+ priority_reclaim_metadata_space(space_info, ticket,
evict_flush_states,
ARRAY_SIZE(evict_flush_states));
break;
case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
- priority_reclaim_data_space(fs_info, space_info, ticket);
+ priority_reclaim_data_space(space_info, ticket);
break;
default:
- ASSERT(0);
+ ASSERT(0, "flush=%d", flush);
break;
}
- spin_lock(&space_info->lock);
ret = ticket->error;
- if (ticket->bytes || ticket->error) {
- /*
- * We were a priority ticket, so we need to delete ourselves
- * from the list. Because we could have other priority tickets
- * behind us that require less space, run
- * btrfs_try_granting_tickets() to see if their reservations can
- * now be made.
- */
- if (!list_empty(&ticket->list)) {
- remove_ticket(space_info, ticket);
- btrfs_try_granting_tickets(fs_info, space_info);
- }
-
- if (!ret)
- ret = -ENOSPC;
- }
- spin_unlock(&space_info->lock);
ASSERT(list_empty(&ticket->list));
/*
* Check that we can't have an error set if the reservation succeeded,
@@ -1404,9 +1686,10 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
* releasing reserved space (if an error happens the expectation is that
* space wasn't reserved at all).
*/
- ASSERT(!(ticket->bytes == 0 && ticket->error));
- trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
- start_ns, flush, ticket->error);
+ ASSERT(!(ticket->bytes == 0 && ticket->error),
+ "ticket->bytes=%llu ticket->error=%d", ticket->bytes, ticket->error);
+ trace_btrfs_reserve_ticket(space_info->fs_info, space_info->flags,
+ orig_bytes, start_ns, flush, ticket->error);
return ret;
}
@@ -1420,9 +1703,9 @@ static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
(flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
}
-static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
+static inline void maybe_clamp_preempt(struct btrfs_space_info *space_info)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
@@ -1438,10 +1721,25 @@ static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
space_info->clamp = min(space_info->clamp + 1, 8);
}
-/**
- * Try to reserve bytes from the block_rsv's space
+static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
+{
+ return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
+ flush == BTRFS_RESERVE_FLUSH_EVICT);
+}
+
+/*
+ * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to
+ * fail as quickly as possible.
+ */
+static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
+{
+ return (flush != BTRFS_RESERVE_NO_FLUSH &&
+ flush != BTRFS_RESERVE_FLUSH_EMERGENCY);
+}
+
+/*
+ * Try to reserve bytes from the block_rsv's space.
*
- * @fs_info: the filesystem
* @space_info: space info we want to allocate from
* @orig_bytes: number of bytes we want
* @flush: whether or not we can flush to make our reservation
@@ -1453,19 +1751,30 @@ static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
* regain reservations will be made and this will fail if there is not enough
* space already.
*/
-static int __reserve_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info, u64 orig_bytes,
- enum btrfs_reserve_flush_enum flush)
+static int reserve_bytes(struct btrfs_space_info *space_info, u64 orig_bytes,
+ enum btrfs_reserve_flush_enum flush)
{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
struct work_struct *async_work;
struct reserve_ticket ticket;
u64 start_ns = 0;
u64 used;
- int ret = 0;
+ int ret = -ENOSPC;
bool pending_tickets;
- ASSERT(orig_bytes);
- ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
+ ASSERT(orig_bytes, "orig_bytes=%llu", orig_bytes);
+ /*
+ * If have a transaction handle (current->journal_info != NULL), then
+ * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor
+ * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those
+ * flushing methods can trigger transaction commits.
+ */
+ if (current->journal_info) {
+ /* One assert per line for easier debugging. */
+ ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL, "flush=%d", flush);
+ ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL, "flush=%d", flush);
+ ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT, "flush=%d", flush);
+ }
if (flush == BTRFS_RESERVE_FLUSH_DATA)
async_work = &fs_info->async_data_reclaim_work;
@@ -1473,7 +1782,6 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
async_work = &fs_info->async_reclaim_work;
spin_lock(&space_info->lock);
- ret = -ENOSPC;
used = btrfs_space_info_used(space_info, true);
/*
@@ -1493,25 +1801,38 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
*/
if (!pending_tickets &&
((used + orig_bytes <= space_info->total_bytes) ||
- btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
- btrfs_space_info_update_bytes_may_use(fs_info, space_info,
- orig_bytes);
+ can_overcommit(space_info, used, orig_bytes, flush))) {
+ btrfs_space_info_update_bytes_may_use(space_info, orig_bytes);
ret = 0;
}
/*
+ * Things are dire, we need to make a reservation so we don't abort. We
+ * will let this reservation go through as long as we have actual space
+ * left to allocate for the block.
+ */
+ if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
+ used -= space_info->bytes_may_use;
+ if (used + orig_bytes <= space_info->total_bytes) {
+ btrfs_space_info_update_bytes_may_use(space_info, orig_bytes);
+ ret = 0;
+ }
+ }
+
+ /*
* If we couldn't make a reservation then setup our reservation ticket
* and kick the async worker if it's not already running.
*
* If we are a priority flusher then we just need to add our ticket to
* the list and we will do our own flushing further down.
*/
- if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
+ if (ret && can_ticket(flush)) {
ticket.bytes = orig_bytes;
ticket.error = 0;
space_info->reclaim_size += ticket.bytes;
init_waitqueue_head(&ticket.wait);
- ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
+ spin_lock_init(&ticket.lock);
+ ticket.steal = can_steal(flush);
if (trace_btrfs_reserve_ticket_enabled())
start_ns = ktime_get_ns();
@@ -1527,21 +1848,20 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* preemptive flushing in order to keep up with
* the workload.
*/
- maybe_clamp_preempt(fs_info, space_info);
+ maybe_clamp_preempt(space_info);
- space_info->flush = 1;
+ space_info->flush = true;
trace_btrfs_trigger_flush(fs_info,
space_info->flags,
orig_bytes, flush,
"enospc");
- queue_work(system_unbound_wq, async_work);
+ queue_work(system_dfl_wq, async_work);
}
} else {
list_add_tail(&ticket.list,
&space_info->priority_tickets);
}
} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
- used += orig_bytes;
/*
* We will do the space reservation dance during log replay,
* which means we won't have fs_info->fs_root set, so don't do
@@ -1549,26 +1869,24 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
*/
if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
!work_busy(&fs_info->preempt_reclaim_work) &&
- need_preemptive_reclaim(fs_info, space_info)) {
+ need_preemptive_reclaim(space_info)) {
trace_btrfs_trigger_flush(fs_info, space_info->flags,
orig_bytes, flush, "preempt");
- queue_work(system_unbound_wq,
+ queue_work(system_dfl_wq,
&fs_info->preempt_reclaim_work);
}
}
spin_unlock(&space_info->lock);
- if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
+ if (!ret || !can_ticket(flush))
return ret;
- return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
- orig_bytes, flush);
+ return handle_reserve_ticket(space_info, &ticket, start_ns, orig_bytes, flush);
}
-/**
- * Trye to reserve metadata bytes from the block_rsv's space
+/*
+ * Try to reserve metadata bytes from the block_rsv's space.
*
- * @root: the root we're allocating for
- * @block_rsv: block_rsv we're allocating for
+ * @space_info: the space_info we're allocating for
* @orig_bytes: number of bytes we want
* @flush: whether or not we can flush to make our reservation
*
@@ -1579,60 +1897,329 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* regain reservations will be made and this will fail if there is not enough
* space already.
*/
-int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
+int btrfs_reserve_metadata_bytes(struct btrfs_space_info *space_info,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
int ret;
- ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
- if (ret == -ENOSPC &&
- unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
- if (block_rsv != global_rsv &&
- !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes))
- ret = 0;
- }
+ ret = reserve_bytes(space_info, orig_bytes, flush);
if (ret == -ENOSPC) {
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+
trace_btrfs_space_reservation(fs_info, "space_info:enospc",
- block_rsv->space_info->flags,
- orig_bytes, 1);
+ space_info->flags, orig_bytes, 1);
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
- btrfs_dump_space_info(fs_info, block_rsv->space_info,
- orig_bytes, 0);
+ btrfs_dump_space_info(space_info, orig_bytes, false);
}
return ret;
}
-/**
- * Try to reserve data bytes for an allocation
+/*
+ * Try to reserve data bytes for an allocation.
*
- * @fs_info: the filesystem
+ * @space_info: the space_info we're allocating for
* @bytes: number of bytes we need
* @flush: how we are allowed to flush
*
* This will reserve bytes from the data space info. If there is not enough
* space then we will attempt to flush space as specified by flush.
*/
-int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
+int btrfs_reserve_data_bytes(struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush)
{
- struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
int ret;
ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
- flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE);
- ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
+ flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
+ flush == BTRFS_RESERVE_NO_FLUSH, "flush=%d", flush);
+ ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA,
+ "current->journal_info=0x%lx flush=%d",
+ (unsigned long)current->journal_info, flush);
- ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
+ ret = reserve_bytes(space_info, bytes, flush);
if (ret == -ENOSPC) {
trace_btrfs_space_reservation(fs_info, "space_info:enospc",
- data_sinfo->flags, bytes, 1);
+ space_info->flags, bytes, 1);
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
- btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
+ btrfs_dump_space_info(space_info, bytes, false);
}
return ret;
}
+
+/* Dump all the space infos when we abort a transaction due to ENOSPC. */
+__cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_space_info *space_info;
+
+ btrfs_info(fs_info, "dumping space info:");
+ list_for_each_entry(space_info, &fs_info->space_info, list) {
+ spin_lock(&space_info->lock);
+ __btrfs_dump_space_info(space_info);
+ spin_unlock(&space_info->lock);
+ }
+ dump_global_block_rsv(fs_info);
+}
+
+/*
+ * Account the unused space of all the readonly block group in the space_info.
+ * takes mirrors into account.
+ */
+u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
+{
+ struct btrfs_block_group *block_group;
+ u64 free_bytes = 0;
+ int factor;
+
+ /* It's df, we don't care if it's racy */
+ if (data_race(list_empty(&sinfo->ro_bgs)))
+ return 0;
+
+ spin_lock(&sinfo->lock);
+ list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
+ spin_lock(&block_group->lock);
+
+ if (!block_group->ro) {
+ spin_unlock(&block_group->lock);
+ continue;
+ }
+
+ factor = btrfs_bg_type_to_factor(block_group->flags);
+ free_bytes += (block_group->length -
+ block_group->used) * factor;
+
+ spin_unlock(&block_group->lock);
+ }
+ spin_unlock(&sinfo->lock);
+
+ return free_bytes;
+}
+
+static u64 calc_pct_ratio(u64 x, u64 y)
+{
+ int ret;
+
+ if (!y)
+ return 0;
+again:
+ ret = check_mul_overflow(100, x, &x);
+ if (ret)
+ goto lose_precision;
+ return div64_u64(x, y);
+lose_precision:
+ x >>= 10;
+ y >>= 10;
+ if (!y)
+ y = 1;
+ goto again;
+}
+
+/*
+ * A reasonable buffer for unallocated space is 10 data block_groups.
+ * If we claw this back repeatedly, we can still achieve efficient
+ * utilization when near full, and not do too much reclaim while
+ * always maintaining a solid buffer for workloads that quickly
+ * allocate and pressure the unallocated space.
+ */
+static u64 calc_unalloc_target(struct btrfs_fs_info *fs_info)
+{
+ u64 chunk_sz = calc_effective_data_chunk_size(fs_info);
+
+ return BTRFS_UNALLOC_BLOCK_GROUP_TARGET * chunk_sz;
+}
+
+/*
+ * The fundamental goal of automatic reclaim is to protect the filesystem's
+ * unallocated space and thus minimize the probability of the filesystem going
+ * read only when a metadata allocation failure causes a transaction abort.
+ *
+ * However, relocations happen into the space_info's unused space, therefore
+ * automatic reclaim must also back off as that space runs low. There is no
+ * value in doing trivial "relocations" of re-writing the same block group
+ * into a fresh one.
+ *
+ * Furthermore, we want to avoid doing too much reclaim even if there are good
+ * candidates. This is because the allocator is pretty good at filling up the
+ * holes with writes. So we want to do just enough reclaim to try and stay
+ * safe from running out of unallocated space but not be wasteful about it.
+ *
+ * Therefore, the dynamic reclaim threshold is calculated as follows:
+ * - calculate a target unallocated amount of 5 block group sized chunks
+ * - ratchet up the intensity of reclaim depending on how far we are from
+ * that target by using a formula of unalloc / target to set the threshold.
+ *
+ * Typically with 10 block groups as the target, the discrete values this comes
+ * out to are 0, 10, 20, ... , 80, 90, and 99.
+ */
+static int calc_dynamic_reclaim_threshold(const struct btrfs_space_info *space_info)
+{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
+ u64 target = calc_unalloc_target(fs_info);
+ u64 alloc = space_info->total_bytes;
+ u64 used = btrfs_space_info_used(space_info, false);
+ u64 unused = alloc - used;
+ u64 want = target > unalloc ? target - unalloc : 0;
+ u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
+
+ /* If we have no unused space, don't bother, it won't work anyway. */
+ if (unused < data_chunk_size)
+ return 0;
+
+ /* Cast to int is OK because want <= target. */
+ return calc_pct_ratio(want, target);
+}
+
+int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info)
+{
+ lockdep_assert_held(&space_info->lock);
+
+ if (READ_ONCE(space_info->dynamic_reclaim))
+ return calc_dynamic_reclaim_threshold(space_info);
+ return READ_ONCE(space_info->bg_reclaim_threshold);
+}
+
+/*
+ * Under "urgent" reclaim, we will reclaim even fresh block groups that have
+ * recently seen successful allocations, as we are desperate to reclaim
+ * whatever we can to avoid ENOSPC in a transaction leading to a readonly fs.
+ */
+static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
+{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
+ u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
+
+ return unalloc < data_chunk_size;
+}
+
+static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
+{
+ struct btrfs_block_group *bg;
+ int thresh_pct;
+ bool try_again = true;
+ bool urgent;
+
+ spin_lock(&space_info->lock);
+ urgent = is_reclaim_urgent(space_info);
+ thresh_pct = btrfs_calc_reclaim_threshold(space_info);
+ spin_unlock(&space_info->lock);
+
+ down_read(&space_info->groups_sem);
+again:
+ list_for_each_entry(bg, &space_info->block_groups[raid], list) {
+ u64 thresh;
+ bool reclaim = false;
+
+ btrfs_get_block_group(bg);
+ spin_lock(&bg->lock);
+ thresh = mult_perc(bg->length, thresh_pct);
+ if (bg->used < thresh && bg->reclaim_mark) {
+ try_again = false;
+ reclaim = true;
+ }
+ bg->reclaim_mark++;
+ spin_unlock(&bg->lock);
+ if (reclaim)
+ btrfs_mark_bg_to_reclaim(bg);
+ btrfs_put_block_group(bg);
+ }
+
+ /*
+ * In situations where we are very motivated to reclaim (low unalloc)
+ * use two passes to make the reclaim mark check best effort.
+ *
+ * If we have any staler groups, we don't touch the fresher ones, but if we
+ * really need a block group, do take a fresh one.
+ */
+ if (try_again && urgent) {
+ try_again = false;
+ goto again;
+ }
+
+ up_read(&space_info->groups_sem);
+}
+
+void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
+{
+ u64 chunk_sz = calc_effective_data_chunk_size(space_info->fs_info);
+
+ lockdep_assert_held(&space_info->lock);
+ space_info->reclaimable_bytes += bytes;
+
+ if (space_info->reclaimable_bytes >= chunk_sz)
+ btrfs_set_periodic_reclaim_ready(space_info, true);
+}
+
+void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready)
+{
+ lockdep_assert_held(&space_info->lock);
+ if (!READ_ONCE(space_info->periodic_reclaim))
+ return;
+ if (ready != space_info->periodic_reclaim_ready) {
+ space_info->periodic_reclaim_ready = ready;
+ if (!ready)
+ space_info->reclaimable_bytes = 0;
+ }
+}
+
+static bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
+{
+ bool ret;
+
+ if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ return false;
+ if (!READ_ONCE(space_info->periodic_reclaim))
+ return false;
+
+ spin_lock(&space_info->lock);
+ ret = space_info->periodic_reclaim_ready;
+ btrfs_set_periodic_reclaim_ready(space_info, false);
+ spin_unlock(&space_info->lock);
+
+ return ret;
+}
+
+void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
+{
+ int raid;
+ struct btrfs_space_info *space_info;
+
+ list_for_each_entry(space_info, &fs_info->space_info, list) {
+ if (!btrfs_should_periodic_reclaim(space_info))
+ continue;
+ for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++)
+ do_reclaim_sweep(space_info, raid);
+ }
+}
+
+void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len)
+{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+
+ lockdep_assert_held(&space_info->lock);
+
+ /* Prioritize the global reservation to receive the freed space. */
+ if (global_rsv->space_info != space_info)
+ goto grant;
+
+ spin_lock(&global_rsv->lock);
+ if (!global_rsv->full) {
+ u64 to_add = min(len, global_rsv->size - global_rsv->reserved);
+
+ global_rsv->reserved += to_add;
+ btrfs_space_info_update_bytes_may_use(space_info, to_add);
+ if (global_rsv->reserved >= global_rsv->size)
+ global_rsv->full = true;
+ len -= to_add;
+ }
+ spin_unlock(&global_rsv->lock);
+
+grant:
+ /* Add to any tickets we may have. */
+ if (len)
+ btrfs_try_granting_tickets(space_info);
+}
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index cb5056472e79..446c0614ad4a 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -3,7 +3,113 @@
#ifndef BTRFS_SPACE_INFO_H
#define BTRFS_SPACE_INFO_H
+#include <trace/events/btrfs.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/lockdep.h>
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+#include "volumes.h"
+
+struct btrfs_fs_info;
+struct btrfs_block_group;
+
+/*
+ * Different levels for to flush space when doing space reservations.
+ *
+ * The higher the level, the more methods we try to reclaim space.
+ */
+enum btrfs_reserve_flush_enum {
+ /* If we are in the transaction, we can't flush anything.*/
+ BTRFS_RESERVE_NO_FLUSH,
+
+ /*
+ * Flush space by:
+ * - Running delayed inode items
+ * - Allocating a new chunk
+ */
+ BTRFS_RESERVE_FLUSH_LIMIT,
+
+ /*
+ * Flush space by:
+ * - Running delayed inode items
+ * - Running delayed refs
+ * - Running delalloc and waiting for ordered extents
+ * - Allocating a new chunk
+ * - Committing transaction
+ */
+ BTRFS_RESERVE_FLUSH_EVICT,
+
+ /*
+ * Flush space by above mentioned methods and by:
+ * - Running delayed iputs
+ * - Committing transaction
+ *
+ * Can be interrupted by a fatal signal.
+ */
+ BTRFS_RESERVE_FLUSH_DATA,
+ BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE,
+ BTRFS_RESERVE_FLUSH_ALL,
+
+ /*
+ * Pretty much the same as FLUSH_ALL, but can also steal space from
+ * global rsv.
+ *
+ * Can be interrupted by a fatal signal.
+ */
+ BTRFS_RESERVE_FLUSH_ALL_STEAL,
+
+ /*
+ * This is for btrfs_use_block_rsv only. We have exhausted our block
+ * rsv and our global block rsv. This can happen for things like
+ * delalloc where we are overwriting a lot of extents with a single
+ * extent and didn't reserve enough space. Alternatively it can happen
+ * with delalloc where we reserve 1 extents worth for a large extent but
+ * fragmentation leads to multiple extents being created. This will
+ * give us the reservation in the case of
+ *
+ * if (num_bytes < (space_info->total_bytes -
+ * btrfs_space_info_used(space_info, false))
+ *
+ * Which ignores bytes_may_use. This is potentially dangerous, but our
+ * reservation system is generally pessimistic so is able to absorb this
+ * style of mistake.
+ */
+ BTRFS_RESERVE_FLUSH_EMERGENCY,
+};
+
+/*
+ * Please be aware that the order of enum values will be the order of the reclaim
+ * process in btrfs_async_reclaim_metadata_space().
+ */
+enum btrfs_flush_state {
+ FLUSH_DELAYED_ITEMS_NR = 1,
+ FLUSH_DELAYED_ITEMS = 2,
+ FLUSH_DELAYED_REFS_NR = 3,
+ FLUSH_DELAYED_REFS = 4,
+ FLUSH_DELALLOC = 5,
+ FLUSH_DELALLOC_WAIT = 6,
+ FLUSH_DELALLOC_FULL = 7,
+ ALLOC_CHUNK = 8,
+ ALLOC_CHUNK_FORCE = 9,
+ RUN_DELAYED_IPUTS = 10,
+ COMMIT_TRANS = 11,
+ RESET_ZONES = 12,
+};
+
+enum btrfs_space_info_sub_group {
+ BTRFS_SUB_GROUP_PRIMARY,
+ BTRFS_SUB_GROUP_DATA_RELOC,
+ BTRFS_SUB_GROUP_TREELOG,
+};
+
+#define BTRFS_SPACE_INFO_SUB_GROUP_MAX 1
struct btrfs_space_info {
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_space_info *parent;
+ struct btrfs_space_info *sub_group[BTRFS_SPACE_INFO_SUB_GROUP_MAX];
+ int subgroup_id;
spinlock_t lock;
u64 total_bytes; /* total bytes in the space,
@@ -23,16 +129,24 @@ struct btrfs_space_info {
u64 max_extent_size; /* This will hold the maximum extent size of
the space info if we had an ENOSPC in the
allocator. */
+ /* Chunk size in bytes */
+ u64 chunk_size;
+
+ /*
+ * Once a block group drops below this threshold (percents) we'll
+ * schedule it for reclaim.
+ */
+ int bg_reclaim_threshold;
int clamp; /* Used to scale our threshold for preemptive
flushing. The value is >> clamp, so turns
out to be a 2^clamp divisor. */
- unsigned int full:1; /* indicates that we cannot allocate any more
+ bool full; /* indicates that we cannot allocate any more
chunks for this space */
- unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
+ bool chunk_alloc; /* set if we are allocating a chunk */
- unsigned int flush:1; /* set if we are trying to make space */
+ bool flush; /* set if we are trying to make space */
unsigned int force_alloc; /* set if we need to force a chunk
alloc for this space */
@@ -67,17 +181,50 @@ struct btrfs_space_info {
struct kobject kobj;
struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES];
-};
-struct reserve_ticket {
- u64 bytes;
- int error;
- bool steal;
- struct list_head list;
- wait_queue_head_t wait;
+ /*
+ * Monotonically increasing counter of block group reclaim attempts
+ * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_count
+ */
+ u64 reclaim_count;
+
+ /*
+ * Monotonically increasing counter of reclaimed bytes
+ * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_bytes
+ */
+ u64 reclaim_bytes;
+
+ /*
+ * Monotonically increasing counter of reclaim errors
+ * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_errors
+ */
+ u64 reclaim_errors;
+
+ /*
+ * If true, use the dynamic relocation threshold, instead of the
+ * fixed bg_reclaim_threshold.
+ */
+ bool dynamic_reclaim;
+
+ /*
+ * Periodically check all block groups against the reclaim
+ * threshold in the cleaner thread.
+ */
+ bool periodic_reclaim;
+
+ /*
+ * Periodic reclaim should be a no-op if a space_info hasn't
+ * freed any space since the last time we tried.
+ */
+ bool periodic_reclaim_ready;
+
+ /*
+ * Net bytes freed or allocated since the last reclaim pass.
+ */
+ s64 reclaimable_bytes;
};
-static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
+static inline bool btrfs_mixed_space_info(const struct btrfs_space_info *space_info)
{
return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) &&
(space_info->flags & BTRFS_BLOCK_GROUP_DATA));
@@ -89,10 +236,10 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
*/
#define DECLARE_SPACE_INFO_UPDATE(name, trace_name) \
static inline void \
-btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
- struct btrfs_space_info *sinfo, \
+btrfs_space_info_update_##name(struct btrfs_space_info *sinfo, \
s64 bytes) \
{ \
+ struct btrfs_fs_info *fs_info = sinfo->fs_info; \
const u64 abs_bytes = (bytes < 0) ? -bytes : bytes; \
lockdep_assert_held(&sinfo->lock); \
trace_update_##name(fs_info, sinfo, sinfo->name, bytes); \
@@ -109,40 +256,55 @@ btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info");
DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
+DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable");
+
+static inline u64 btrfs_space_info_used(const struct btrfs_space_info *s_info,
+ bool may_use_included)
+{
+ lockdep_assert_held(&s_info->lock);
+
+ return s_info->bytes_used + s_info->bytes_reserved +
+ s_info->bytes_pinned + s_info->bytes_readonly +
+ s_info->bytes_zone_unusable +
+ (may_use_included ? s_info->bytes_may_use : 0);
+}
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
-void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
- u64 total_bytes, u64 bytes_used,
- u64 bytes_readonly, u64 bytes_zone_unusable,
- struct btrfs_space_info **space_info);
+void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
+ struct btrfs_block_group *block_group);
+void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
+ u64 chunk_size);
struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
u64 flags);
-u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
- bool may_use_included);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
-void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *info, u64 bytes,
- int dump_block_groups);
-int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
+void btrfs_dump_space_info(struct btrfs_space_info *info, u64 bytes,
+ bool dump_block_groups);
+int btrfs_reserve_metadata_bytes(struct btrfs_space_info *space_info,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush);
-void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info);
-int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info, u64 bytes,
- enum btrfs_reserve_flush_enum flush);
+void btrfs_try_granting_tickets(struct btrfs_space_info *space_info);
+bool btrfs_can_overcommit(const struct btrfs_space_info *space_info, u64 bytes,
+ enum btrfs_reserve_flush_enum flush);
static inline void btrfs_space_info_free_bytes_may_use(
- struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes)
{
spin_lock(&space_info->lock);
- btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes);
- btrfs_try_granting_tickets(fs_info, space_info);
+ btrfs_space_info_update_bytes_may_use(space_info, -num_bytes);
+ btrfs_try_granting_tickets(space_info);
spin_unlock(&space_info->lock);
}
-int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
+int btrfs_reserve_data_bytes(struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush);
+void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info);
+void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info);
+u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
+
+void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes);
+void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready);
+int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info);
+void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info);
+void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len);
+
#endif /* BTRFS_SPACE_INFO_H */
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
deleted file mode 100644
index f429256f56db..000000000000
--- a/fs/btrfs/struct-funcs.c
+++ /dev/null
@@ -1,171 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2007 Oracle. All rights reserved.
- */
-
-#include <asm/unaligned.h>
-
-#include "ctree.h"
-
-static bool check_setget_bounds(const struct extent_buffer *eb,
- const void *ptr, unsigned off, int size)
-{
- const unsigned long member_offset = (unsigned long)ptr + off;
-
- if (member_offset > eb->len) {
- btrfs_warn(eb->fs_info,
- "bad eb member start: ptr 0x%lx start %llu member offset %lu size %d",
- (unsigned long)ptr, eb->start, member_offset, size);
- return false;
- }
- if (member_offset + size > eb->len) {
- btrfs_warn(eb->fs_info,
- "bad eb member end: ptr 0x%lx start %llu member offset %lu size %d",
- (unsigned long)ptr, eb->start, member_offset, size);
- return false;
- }
-
- return true;
-}
-
-/*
- * Macro templates that define helpers to read/write extent buffer data of a
- * given size, that are also used via ctree.h for access to item members by
- * specialized helpers.
- *
- * Generic helpers:
- * - btrfs_set_8 (for 8/16/32/64)
- * - btrfs_get_8 (for 8/16/32/64)
- *
- * Generic helpers with a token (cached address of the most recently accessed
- * page):
- * - btrfs_set_token_8 (for 8/16/32/64)
- * - btrfs_get_token_8 (for 8/16/32/64)
- *
- * The set/get functions handle data spanning two pages transparently, in case
- * metadata block size is larger than page. Every pointer to metadata items is
- * an offset into the extent buffer page array, cast to a specific type. This
- * gives us all the type checking.
- *
- * The extent buffer pages stored in the array pages do not form a contiguous
- * phyusical range, but the API functions assume the linear offset to the range
- * from 0 to metadata node size.
- */
-
-#define DEFINE_BTRFS_SETGET_BITS(bits) \
-u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
- const void *ptr, unsigned long off) \
-{ \
- const unsigned long member_offset = (unsigned long)ptr + off; \
- const unsigned long idx = get_eb_page_index(member_offset); \
- const unsigned long oip = get_eb_offset_in_page(token->eb, \
- member_offset); \
- const int size = sizeof(u##bits); \
- u8 lebytes[sizeof(u##bits)]; \
- const int part = PAGE_SIZE - oip; \
- \
- ASSERT(token); \
- ASSERT(token->kaddr); \
- ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
- if (token->offset <= member_offset && \
- member_offset + size <= token->offset + PAGE_SIZE) { \
- return get_unaligned_le##bits(token->kaddr + oip); \
- } \
- token->kaddr = page_address(token->eb->pages[idx]); \
- token->offset = idx << PAGE_SHIFT; \
- if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE ) \
- return get_unaligned_le##bits(token->kaddr + oip); \
- \
- memcpy(lebytes, token->kaddr + oip, part); \
- token->kaddr = page_address(token->eb->pages[idx + 1]); \
- token->offset = (idx + 1) << PAGE_SHIFT; \
- memcpy(lebytes + part, token->kaddr, size - part); \
- return get_unaligned_le##bits(lebytes); \
-} \
-u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
- const void *ptr, unsigned long off) \
-{ \
- const unsigned long member_offset = (unsigned long)ptr + off; \
- const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \
- const unsigned long idx = get_eb_page_index(member_offset); \
- char *kaddr = page_address(eb->pages[idx]); \
- const int size = sizeof(u##bits); \
- const int part = PAGE_SIZE - oip; \
- u8 lebytes[sizeof(u##bits)]; \
- \
- ASSERT(check_setget_bounds(eb, ptr, off, size)); \
- if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) \
- return get_unaligned_le##bits(kaddr + oip); \
- \
- memcpy(lebytes, kaddr + oip, part); \
- kaddr = page_address(eb->pages[idx + 1]); \
- memcpy(lebytes + part, kaddr, size - part); \
- return get_unaligned_le##bits(lebytes); \
-} \
-void btrfs_set_token_##bits(struct btrfs_map_token *token, \
- const void *ptr, unsigned long off, \
- u##bits val) \
-{ \
- const unsigned long member_offset = (unsigned long)ptr + off; \
- const unsigned long idx = get_eb_page_index(member_offset); \
- const unsigned long oip = get_eb_offset_in_page(token->eb, \
- member_offset); \
- const int size = sizeof(u##bits); \
- u8 lebytes[sizeof(u##bits)]; \
- const int part = PAGE_SIZE - oip; \
- \
- ASSERT(token); \
- ASSERT(token->kaddr); \
- ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
- if (token->offset <= member_offset && \
- member_offset + size <= token->offset + PAGE_SIZE) { \
- put_unaligned_le##bits(val, token->kaddr + oip); \
- return; \
- } \
- token->kaddr = page_address(token->eb->pages[idx]); \
- token->offset = idx << PAGE_SHIFT; \
- if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \
- put_unaligned_le##bits(val, token->kaddr + oip); \
- return; \
- } \
- put_unaligned_le##bits(val, lebytes); \
- memcpy(token->kaddr + oip, lebytes, part); \
- token->kaddr = page_address(token->eb->pages[idx + 1]); \
- token->offset = (idx + 1) << PAGE_SHIFT; \
- memcpy(token->kaddr, lebytes + part, size - part); \
-} \
-void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
- unsigned long off, u##bits val) \
-{ \
- const unsigned long member_offset = (unsigned long)ptr + off; \
- const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \
- const unsigned long idx = get_eb_page_index(member_offset); \
- char *kaddr = page_address(eb->pages[idx]); \
- const int size = sizeof(u##bits); \
- const int part = PAGE_SIZE - oip; \
- u8 lebytes[sizeof(u##bits)]; \
- \
- ASSERT(check_setget_bounds(eb, ptr, off, size)); \
- if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \
- put_unaligned_le##bits(val, kaddr + oip); \
- return; \
- } \
- \
- put_unaligned_le##bits(val, lebytes); \
- memcpy(kaddr + oip, lebytes, part); \
- kaddr = page_address(eb->pages[idx + 1]); \
- memcpy(kaddr, lebytes + part, size - part); \
-}
-
-DEFINE_BTRFS_SETGET_BITS(8)
-DEFINE_BTRFS_SETGET_BITS(16)
-DEFINE_BTRFS_SETGET_BITS(32)
-DEFINE_BTRFS_SETGET_BITS(64)
-
-void btrfs_node_key(const struct extent_buffer *eb,
- struct btrfs_disk_key *disk_key, int nr)
-{
- unsigned long ptr = btrfs_node_key_ptr_offset(nr);
- read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
- struct btrfs_key_ptr, key, disk_key);
-}
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index 29bd8c7a7706..f82e71f5d88b 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
-#include "ctree.h"
+#include "messages.h"
#include "subpage.h"
#include "btrfs_inode.h"
/*
- * Subpage (sectorsize < PAGE_SIZE) support overview:
+ * Subpage (block size < folio size) support overview:
*
* Limitations:
*
@@ -49,7 +49,7 @@
* Implementation:
*
* - Common
- * Both metadata and data will use a new structure, btrfs_subpage, to
+ * Both metadata and data will use a new structure, btrfs_folio_state, to
* record the status of each sector inside a page. This provides the extra
* granularity needed.
*
@@ -63,398 +63,342 @@
* This means a slightly higher tree locking latency.
*/
-void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
+int btrfs_attach_folio_state(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, enum btrfs_folio_type type)
{
- unsigned int cur = 0;
- unsigned int nr_bits;
+ struct btrfs_folio_state *bfs;
- ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
-
- nr_bits = PAGE_SIZE / sectorsize;
- subpage_info->bitmap_nr_bits = nr_bits;
-
- subpage_info->uptodate_offset = cur;
- cur += nr_bits;
-
- subpage_info->error_offset = cur;
- cur += nr_bits;
-
- subpage_info->dirty_offset = cur;
- cur += nr_bits;
-
- subpage_info->writeback_offset = cur;
- cur += nr_bits;
-
- subpage_info->ordered_offset = cur;
- cur += nr_bits;
-
- subpage_info->checked_offset = cur;
- cur += nr_bits;
-
- subpage_info->total_nr_bits = cur;
-}
-
-int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
- struct page *page, enum btrfs_subpage_type type)
-{
- struct btrfs_subpage *subpage;
+ /* For metadata we don't support large folio yet. */
+ if (type == BTRFS_SUBPAGE_METADATA)
+ ASSERT(!folio_test_large(folio));
/*
- * We have cases like a dummy extent buffer page, which is not mappped
+ * We have cases like a dummy extent buffer page, which is not mapped
* and doesn't need to be locked.
*/
- if (page->mapping)
- ASSERT(PageLocked(page));
+ if (folio->mapping)
+ ASSERT(folio_test_locked(folio));
- /* Either not subpage, or the page already has private attached */
- if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page))
+ /* Either not subpage, or the folio already has private attached. */
+ if (folio_test_private(folio))
+ return 0;
+ if (type == BTRFS_SUBPAGE_METADATA && !btrfs_meta_is_subpage(fs_info))
+ return 0;
+ if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio))
return 0;
- subpage = btrfs_alloc_subpage(fs_info, type);
- if (IS_ERR(subpage))
- return PTR_ERR(subpage);
+ bfs = btrfs_alloc_folio_state(fs_info, folio_size(folio), type);
+ if (IS_ERR(bfs))
+ return PTR_ERR(bfs);
- attach_page_private(page, subpage);
+ folio_attach_private(folio, bfs);
return 0;
}
-void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
- struct page *page)
+void btrfs_detach_folio_state(const struct btrfs_fs_info *fs_info, struct folio *folio,
+ enum btrfs_folio_type type)
{
- struct btrfs_subpage *subpage;
+ struct btrfs_folio_state *bfs;
- /* Either not subpage, or already detached */
- if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page))
+ /* Either not subpage, or the folio already has private attached. */
+ if (!folio_test_private(folio))
+ return;
+ if (type == BTRFS_SUBPAGE_METADATA && !btrfs_meta_is_subpage(fs_info))
+ return;
+ if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio))
return;
- subpage = (struct btrfs_subpage *)detach_page_private(page);
- ASSERT(subpage);
- btrfs_free_subpage(subpage);
+ bfs = folio_detach_private(folio);
+ ASSERT(bfs);
+ btrfs_free_folio_state(bfs);
}
-struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
- enum btrfs_subpage_type type)
+struct btrfs_folio_state *btrfs_alloc_folio_state(const struct btrfs_fs_info *fs_info,
+ size_t fsize, enum btrfs_folio_type type)
{
- struct btrfs_subpage *ret;
+ struct btrfs_folio_state *ret;
unsigned int real_size;
- ASSERT(fs_info->sectorsize < PAGE_SIZE);
+ ASSERT(fs_info->sectorsize < fsize);
real_size = struct_size(ret, bitmaps,
- BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
+ BITS_TO_LONGS(btrfs_bitmap_nr_max *
+ (fsize >> fs_info->sectorsize_bits)));
ret = kzalloc(real_size, GFP_NOFS);
if (!ret)
return ERR_PTR(-ENOMEM);
spin_lock_init(&ret->lock);
- if (type == BTRFS_SUBPAGE_METADATA) {
+ if (type == BTRFS_SUBPAGE_METADATA)
atomic_set(&ret->eb_refs, 0);
- } else {
- atomic_set(&ret->readers, 0);
- atomic_set(&ret->writers, 0);
- }
+ else
+ atomic_set(&ret->nr_locked, 0);
return ret;
}
-void btrfs_free_subpage(struct btrfs_subpage *subpage)
-{
- kfree(subpage);
-}
-
/*
* Increase the eb_refs of current subpage.
*
* This is important for eb allocation, to prevent race with last eb freeing
* of the same page.
* With the eb_refs increased before the eb inserted into radix tree,
- * detach_extent_buffer_page() won't detach the page private while we're still
+ * detach_extent_buffer_page() won't detach the folio private while we're still
* allocating the extent buffer.
*/
-void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
- struct page *page)
+void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
{
- struct btrfs_subpage *subpage;
+ struct btrfs_folio_state *bfs;
- if (fs_info->sectorsize == PAGE_SIZE)
+ if (!btrfs_meta_is_subpage(fs_info))
return;
- ASSERT(PagePrivate(page) && page->mapping);
- lockdep_assert_held(&page->mapping->private_lock);
+ ASSERT(folio_test_private(folio) && folio->mapping);
+ lockdep_assert_held(&folio->mapping->i_private_lock);
- subpage = (struct btrfs_subpage *)page->private;
- atomic_inc(&subpage->eb_refs);
+ bfs = folio_get_private(folio);
+ atomic_inc(&bfs->eb_refs);
}
-void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
- struct page *page)
+void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
{
- struct btrfs_subpage *subpage;
+ struct btrfs_folio_state *bfs;
- if (fs_info->sectorsize == PAGE_SIZE)
+ if (!btrfs_meta_is_subpage(fs_info))
return;
- ASSERT(PagePrivate(page) && page->mapping);
- lockdep_assert_held(&page->mapping->private_lock);
+ ASSERT(folio_test_private(folio) && folio->mapping);
+ lockdep_assert_held(&folio->mapping->i_private_lock);
- subpage = (struct btrfs_subpage *)page->private;
- ASSERT(atomic_read(&subpage->eb_refs));
- atomic_dec(&subpage->eb_refs);
+ bfs = folio_get_private(folio);
+ ASSERT(atomic_read(&bfs->eb_refs));
+ atomic_dec(&bfs->eb_refs);
}
static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+ struct folio *folio, u64 start, u32 len)
{
/* Basic checks */
- ASSERT(PagePrivate(page) && page->private);
+ ASSERT(folio_test_private(folio) && folio_get_private(folio));
ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
- IS_ALIGNED(len, fs_info->sectorsize));
+ IS_ALIGNED(len, fs_info->sectorsize), "start=%llu len=%u", start, len);
/*
* The range check only works for mapped page, we can still have
* unmapped page like dummy extent buffer pages.
*/
- if (page->mapping)
- ASSERT(page_offset(page) <= start &&
- start + len <= page_offset(page) + PAGE_SIZE);
+ if (folio->mapping)
+ ASSERT(folio_pos(folio) <= start &&
+ start + len <= folio_next_pos(folio),
+ "start=%llu len=%u folio_pos=%llu folio_size=%zu",
+ start, len, folio_pos(folio), folio_size(folio));
}
-void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
-{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- const int nbits = len >> fs_info->sectorsize_bits;
-
- btrfs_subpage_assert(fs_info, page, start, len);
-
- atomic_add(nbits, &subpage->readers);
-}
-
-void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
-{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- const int nbits = len >> fs_info->sectorsize_bits;
- bool is_data;
- bool last;
-
- btrfs_subpage_assert(fs_info, page, start, len);
- is_data = is_data_inode(page->mapping->host);
- ASSERT(atomic_read(&subpage->readers) >= nbits);
- last = atomic_sub_and_test(nbits, &subpage->readers);
-
- /*
- * For data we need to unlock the page if the last read has finished.
- *
- * And please don't replace @last with atomic_sub_and_test() call
- * inside if () condition.
- * As we want the atomic_sub_and_test() to be always executed.
- */
- if (is_data && last)
- unlock_page(page);
-}
+#define subpage_calc_start_bit(fs_info, folio, name, start, len) \
+({ \
+ unsigned int __start_bit; \
+ const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
+ \
+ btrfs_subpage_assert(fs_info, folio, start, len); \
+ __start_bit = offset_in_folio(folio, start) >> fs_info->sectorsize_bits; \
+ __start_bit += __bpf * btrfs_bitmap_nr_##name; \
+ __start_bit; \
+})
-static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len)
+static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
{
u64 orig_start = *start;
u32 orig_len = *len;
- *start = max_t(u64, page_offset(page), orig_start);
+ *start = max_t(u64, folio_pos(folio), orig_start);
/*
* For certain call sites like btrfs_drop_pages(), we may have pages
* beyond the target range. In that case, just set @len to 0, subpage
* helpers can handle @len == 0 without any problem.
*/
- if (page_offset(page) >= orig_start + orig_len)
+ if (folio_pos(folio) >= orig_start + orig_len)
*len = 0;
else
- *len = min_t(u64, page_offset(page) + PAGE_SIZE,
- orig_start + orig_len) - *start;
+ *len = min_t(u64, folio_next_pos(folio), orig_start + orig_len) - *start;
}
-void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- const int nbits = (len >> fs_info->sectorsize_bits);
- int ret;
-
- btrfs_subpage_assert(fs_info, page, start, len);
-
- ASSERT(atomic_read(&subpage->readers) == 0);
- ret = atomic_add_return(nbits, &subpage->writers);
- ASSERT(ret == nbits);
-}
-
-bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
-{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
const int nbits = (len >> fs_info->sectorsize_bits);
+ unsigned long flags;
+ unsigned int cleared = 0;
+ int bit = start_bit;
+ bool last;
- btrfs_subpage_assert(fs_info, page, start, len);
+ btrfs_subpage_assert(fs_info, folio, start, len);
+ spin_lock_irqsave(&bfs->lock, flags);
/*
* We have call sites passing @lock_page into
* extent_clear_unlock_delalloc() for compression path.
*
* This @locked_page is locked by plain lock_page(), thus its
- * subpage::writers is 0. Handle them in a special way.
+ * subpage::locked is 0. Handle them in a special way.
*/
- if (atomic_read(&subpage->writers) == 0)
+ if (atomic_read(&bfs->nr_locked) == 0) {
+ spin_unlock_irqrestore(&bfs->lock, flags);
return true;
+ }
- ASSERT(atomic_read(&subpage->writers) >= nbits);
- return atomic_sub_and_test(nbits, &subpage->writers);
+ for_each_set_bit_from(bit, bfs->bitmaps, start_bit + nbits) {
+ clear_bit(bit, bfs->bitmaps);
+ cleared++;
+ }
+ ASSERT(atomic_read(&bfs->nr_locked) >= cleared,
+ "atomic_read(&bfs->nr_locked)=%d cleared=%d",
+ atomic_read(&bfs->nr_locked), cleared);
+ last = atomic_sub_and_test(cleared, &bfs->nr_locked);
+ spin_unlock_irqrestore(&bfs->lock, flags);
+ return last;
}
/*
- * Lock a page for delalloc page writeback.
+ * Handle different locked folios:
*
- * Return -EAGAIN if the page is not properly initialized.
- * Return 0 with the page locked, and writer counter updated.
+ * - Non-subpage folio
+ * Just unlock it.
*
- * Even with 0 returned, the page still need extra check to make sure
- * it's really the correct page, as the caller is using
- * find_get_pages_contig(), which can race with page invalidating.
+ * - folio locked but without any subpage locked
+ * This happens either before writepage_delalloc() or the delalloc range is
+ * already handled by previous folio.
+ * We can simple unlock it.
+ *
+ * - folio locked with subpage range locked.
+ * We go through the locked sectors inside the range and clear their locked
+ * bitmap, reduce the writer lock number, and unlock the page if that's
+ * the last locked range.
*/
-int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len)
{
- if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {
- lock_page(page);
- return 0;
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+
+ ASSERT(folio_test_locked(folio));
+
+ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio)) {
+ folio_unlock(folio);
+ return;
}
- lock_page(page);
- if (!PagePrivate(page) || !page->private) {
- unlock_page(page);
- return -EAGAIN;
+
+ /*
+ * For subpage case, there are two types of locked page. With or
+ * without locked number.
+ *
+ * Since we own the page lock, no one else could touch subpage::locked
+ * and we are safe to do several atomic operations without spinlock.
+ */
+ if (atomic_read(&bfs->nr_locked) == 0) {
+ /* No subpage lock, locked by plain lock_page(). */
+ folio_unlock(folio);
+ return;
}
- btrfs_subpage_clamp_range(page, &start, &len);
- btrfs_subpage_start_writer(fs_info, page, start, len);
- return 0;
-}
-void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
-{
- if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)
- return unlock_page(page);
- btrfs_subpage_clamp_range(page, &start, &len);
- if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
- unlock_page(page);
+ btrfs_subpage_clamp_range(folio, &start, &len);
+ if (btrfs_subpage_end_and_test_lock(fs_info, folio, start, len))
+ folio_unlock(folio);
}
-static bool bitmap_test_range_all_set(unsigned long *addr, unsigned int start,
- unsigned int nbits)
+void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, unsigned long bitmap)
{
- unsigned int found_zero;
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
+ const int start_bit = blocks_per_folio * btrfs_bitmap_nr_locked;
+ unsigned long flags;
+ bool last = false;
+ int cleared = 0;
+ int bit;
- found_zero = find_next_zero_bit(addr, start + nbits, start);
- if (found_zero == start + nbits)
- return true;
- return false;
-}
+ if (!btrfs_is_subpage(fs_info, folio)) {
+ folio_unlock(folio);
+ return;
+ }
-static bool bitmap_test_range_all_zero(unsigned long *addr, unsigned int start,
- unsigned int nbits)
-{
- unsigned int found_set;
+ if (atomic_read(&bfs->nr_locked) == 0) {
+ /* No subpage lock, locked by plain lock_page(). */
+ folio_unlock(folio);
+ return;
+ }
- found_set = find_next_bit(addr, start + nbits, start);
- if (found_set == start + nbits)
- return true;
- return false;
+ spin_lock_irqsave(&bfs->lock, flags);
+ for_each_set_bit(bit, &bitmap, blocks_per_folio) {
+ if (test_and_clear_bit(bit + start_bit, bfs->bitmaps))
+ cleared++;
+ }
+ ASSERT(atomic_read(&bfs->nr_locked) >= cleared,
+ "atomic_read(&bfs->nr_locked)=%d cleared=%d",
+ atomic_read(&bfs->nr_locked), cleared);
+ last = atomic_sub_and_test(cleared, &bfs->nr_locked);
+ spin_unlock_irqrestore(&bfs->lock, flags);
+ if (last)
+ folio_unlock(folio);
}
-#define subpage_calc_start_bit(fs_info, page, name, start, len) \
+#define subpage_test_bitmap_all_set(fs_info, folio, name) \
({ \
- unsigned int start_bit; \
+ struct btrfs_folio_state *__bfs = folio_get_private(folio); \
+ const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
\
- btrfs_subpage_assert(fs_info, page, start, len); \
- start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
- start_bit += fs_info->subpage_info->name##_offset; \
- start_bit; \
+ bitmap_test_range_all_set(__bfs->bitmaps, \
+ __bpf * btrfs_bitmap_nr_##name, __bpf); \
})
-#define subpage_test_bitmap_all_set(fs_info, subpage, name) \
- bitmap_test_range_all_set(subpage->bitmaps, \
- fs_info->subpage_info->name##_offset, \
- fs_info->subpage_info->bitmap_nr_bits)
-
-#define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
- bitmap_test_range_all_zero(subpage->bitmaps, \
- fs_info->subpage_info->name##_offset, \
- fs_info->subpage_info->bitmap_nr_bits)
+#define subpage_test_bitmap_all_zero(fs_info, folio, name) \
+({ \
+ struct btrfs_folio_state *__bfs = folio_get_private(folio); \
+ const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
+ \
+ bitmap_test_range_all_zero(__bfs->bitmaps, \
+ __bpf * btrfs_bitmap_nr_##name, __bpf); \
+})
void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
uptodate, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
- SetPageUptodate(page);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ if (subpage_test_bitmap_all_set(fs_info, folio, uptodate))
+ folio_mark_uptodate(folio);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
uptodate, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- ClearPageUptodate(page);
- spin_unlock_irqrestore(&subpage->lock, flags);
-}
-
-void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
-{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
- error, start, len);
- unsigned long flags;
-
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- SetPageError(page);
- spin_unlock_irqrestore(&subpage->lock, flags);
-}
-
-void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
-{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
- error, start, len);
- unsigned long flags;
-
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_zero(fs_info, subpage, error))
- ClearPageError(page);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ folio_clear_uptodate(folio);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
dirty, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- spin_unlock_irqrestore(&subpage->lock, flags);
- set_page_dirty(page);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ spin_unlock_irqrestore(&bfs->lock, flags);
+ folio_mark_dirty(folio);
}
/*
@@ -468,119 +412,129 @@ void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
* extra handling for tree blocks.
*/
bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
dirty, start, len);
unsigned long flags;
bool last = false;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ if (subpage_test_bitmap_all_zero(fs_info, folio, dirty))
last = true;
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_unlock_irqrestore(&bfs->lock, flags);
return last;
}
void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+ struct folio *folio, u64 start, u32 len)
{
bool last;
- last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len);
+ last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len);
if (last)
- clear_page_dirty_for_io(page);
+ folio_clear_dirty_for_io(folio);
}
void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
writeback, start, len);
unsigned long flags;
+ bool keep_write;
+
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- set_page_writeback(page);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ /*
+ * Don't clear the TOWRITE tag when starting writeback on a still-dirty
+ * folio. Doing so can cause WB_SYNC_ALL writepages() to overlook it,
+ * assume writeback is complete, and exit too early — violating sync
+ * ordering guarantees.
+ */
+ keep_write = folio_test_dirty(folio);
+ if (!folio_test_writeback(folio))
+ __folio_start_writeback(folio, keep_write);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
writeback, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
- ASSERT(PageWriteback(page));
- end_page_writeback(page);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ if (subpage_test_bitmap_all_zero(fs_info, folio, writeback)) {
+ ASSERT(folio_test_writeback(folio));
+ folio_end_writeback(folio);
}
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
ordered, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- SetPageOrdered(page);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ folio_set_ordered(folio);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
ordered, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
- ClearPageOrdered(page);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ if (subpage_test_bitmap_all_zero(fs_info, folio, ordered))
+ folio_clear_ordered(folio);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
checked, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
- SetPageChecked(page);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ if (subpage_test_bitmap_all_set(fs_info, folio, checked))
+ folio_set_checked(folio);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len)
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
+ struct btrfs_folio_state *bfs = folio_get_private(folio);
+ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
checked, start, len);
unsigned long flags;
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- ClearPageChecked(page);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ spin_lock_irqsave(&bfs->lock, flags);
+ bitmap_clear(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+ folio_clear_checked(folio);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
/*
@@ -589,22 +543,21 @@ void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
*/
#define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len) \
+ struct folio *folio, u64 start, u32 len) \
{ \
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
- unsigned int start_bit = subpage_calc_start_bit(fs_info, page, \
+ struct btrfs_folio_state *bfs = folio_get_private(folio); \
+ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \
name, start, len); \
unsigned long flags; \
bool ret; \
\
- spin_lock_irqsave(&subpage->lock, flags); \
- ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
+ spin_lock_irqsave(&bfs->lock, flags); \
+ ret = bitmap_test_range_all_set(bfs->bitmaps, start_bit, \
len >> fs_info->sectorsize_bits); \
- spin_unlock_irqrestore(&subpage->lock, flags); \
+ spin_unlock_irqrestore(&bfs->lock, flags); \
return ret; \
}
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
-IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
@@ -615,131 +568,261 @@ IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
* in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
* back to regular sectorsize branch.
*/
-#define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func, \
- test_page_func) \
-void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len) \
+#define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \
+ folio_clear_func, folio_test_func) \
+void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
+ struct folio *folio, u64 start, u32 len) \
{ \
- if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \
- set_page_func(page); \
+ if (unlikely(!fs_info) || \
+ !btrfs_is_subpage(fs_info, folio)) { \
+ folio_set_func(folio); \
return; \
} \
- btrfs_subpage_set_##name(fs_info, page, start, len); \
+ btrfs_subpage_set_##name(fs_info, folio, start, len); \
} \
-void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len) \
+void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
+ struct folio *folio, u64 start, u32 len) \
{ \
- if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \
- clear_page_func(page); \
+ if (unlikely(!fs_info) || \
+ !btrfs_is_subpage(fs_info, folio)) { \
+ folio_clear_func(folio); \
return; \
} \
- btrfs_subpage_clear_##name(fs_info, page, start, len); \
+ btrfs_subpage_clear_##name(fs_info, folio, start, len); \
} \
-bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len) \
+bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
+ struct folio *folio, u64 start, u32 len) \
{ \
- if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) \
- return test_page_func(page); \
- return btrfs_subpage_test_##name(fs_info, page, start, len); \
+ if (unlikely(!fs_info) || \
+ !btrfs_is_subpage(fs_info, folio)) \
+ return folio_test_func(folio); \
+ return btrfs_subpage_test_##name(fs_info, folio, start, len); \
} \
-void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len) \
+void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
+ struct folio *folio, u64 start, u32 len) \
{ \
- if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \
- set_page_func(page); \
+ if (unlikely(!fs_info) || \
+ !btrfs_is_subpage(fs_info, folio)) { \
+ folio_set_func(folio); \
return; \
} \
- btrfs_subpage_clamp_range(page, &start, &len); \
- btrfs_subpage_set_##name(fs_info, page, start, len); \
+ btrfs_subpage_clamp_range(folio, &start, &len); \
+ btrfs_subpage_set_##name(fs_info, folio, start, len); \
} \
-void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len) \
+void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
+ struct folio *folio, u64 start, u32 len) \
{ \
- if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \
- clear_page_func(page); \
+ if (unlikely(!fs_info) || \
+ !btrfs_is_subpage(fs_info, folio)) { \
+ folio_clear_func(folio); \
return; \
} \
- btrfs_subpage_clamp_range(page, &start, &len); \
- btrfs_subpage_clear_##name(fs_info, page, start, len); \
+ btrfs_subpage_clamp_range(folio, &start, &len); \
+ btrfs_subpage_clear_##name(fs_info, folio, start, len); \
} \
-bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len) \
+bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
+ struct folio *folio, u64 start, u32 len) \
+{ \
+ if (unlikely(!fs_info) || \
+ !btrfs_is_subpage(fs_info, folio)) \
+ return folio_test_func(folio); \
+ btrfs_subpage_clamp_range(folio, &start, &len); \
+ return btrfs_subpage_test_##name(fs_info, folio, start, len); \
+} \
+void btrfs_meta_folio_set_##name(struct folio *folio, const struct extent_buffer *eb) \
+{ \
+ if (!btrfs_meta_is_subpage(eb->fs_info)) { \
+ folio_set_func(folio); \
+ return; \
+ } \
+ btrfs_subpage_set_##name(eb->fs_info, folio, eb->start, eb->len); \
+} \
+void btrfs_meta_folio_clear_##name(struct folio *folio, const struct extent_buffer *eb) \
+{ \
+ if (!btrfs_meta_is_subpage(eb->fs_info)) { \
+ folio_clear_func(folio); \
+ return; \
+ } \
+ btrfs_subpage_clear_##name(eb->fs_info, folio, eb->start, eb->len); \
+} \
+bool btrfs_meta_folio_test_##name(struct folio *folio, const struct extent_buffer *eb) \
+{ \
+ if (!btrfs_meta_is_subpage(eb->fs_info)) \
+ return folio_test_func(folio); \
+ return btrfs_subpage_test_##name(eb->fs_info, folio, eb->start, eb->len); \
+}
+IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate,
+ folio_test_uptodate);
+IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io,
+ folio_test_dirty);
+IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback,
+ folio_test_writeback);
+IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
+ folio_test_ordered);
+IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
+ folio_test_checked);
+
+#define GET_SUBPAGE_BITMAP(fs_info, folio, name, dst) \
+{ \
+ const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
+ const struct btrfs_folio_state *__bfs = folio_get_private(folio); \
+ \
+ ASSERT(__bpf <= BITS_PER_LONG); \
+ *dst = bitmap_read(__bfs->bitmaps, \
+ __bpf * btrfs_bitmap_nr_##name, __bpf); \
+}
+
+#define SUBPAGE_DUMP_BITMAP(fs_info, folio, name, start, len) \
{ \
- if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) \
- return test_page_func(page); \
- btrfs_subpage_clamp_range(page, &start, &len); \
- return btrfs_subpage_test_##name(fs_info, page, start, len); \
+ unsigned long bitmap; \
+ const unsigned int __bpf = btrfs_blocks_per_folio(fs_info, folio); \
+ \
+ GET_SUBPAGE_BITMAP(fs_info, folio, name, &bitmap); \
+ btrfs_warn(fs_info, \
+ "dumping bitmap start=%llu len=%u folio=%llu " #name "_bitmap=%*pbl", \
+ start, len, folio_pos(folio), __bpf, &bitmap); \
}
-IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
- PageUptodate);
-IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);
-IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io,
- PageDirty);
-IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
- PageWriteback);
-IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
- PageOrdered);
-IMPLEMENT_BTRFS_PAGE_OPS(checked, SetPageChecked, ClearPageChecked, PageChecked);
/*
* Make sure not only the page dirty bit is cleared, but also subpage dirty bit
* is cleared.
*/
-void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
- struct page *page)
+void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ struct btrfs_folio_state *bfs;
+ unsigned int start_bit;
+ unsigned int nbits;
+ unsigned long flags;
if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
return;
- ASSERT(!PageDirty(page));
- if (fs_info->sectorsize == PAGE_SIZE)
+ if (!btrfs_is_subpage(fs_info, folio)) {
+ ASSERT(!folio_test_dirty(folio));
return;
+ }
- ASSERT(PagePrivate(page) && page->private);
- ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
+ start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len);
+ nbits = len >> fs_info->sectorsize_bits;
+ bfs = folio_get_private(folio);
+ ASSERT(bfs);
+ spin_lock_irqsave(&bfs->lock, flags);
+ if (unlikely(!bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits))) {
+ SUBPAGE_DUMP_BITMAP(fs_info, folio, dirty, start, len);
+ ASSERT(bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits));
+ }
+ ASSERT(bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits));
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
/*
- * Handle different locked pages with different page sizes:
+ * This is for folio already locked by plain lock_page()/folio_lock(), which
+ * doesn't have any subpage awareness.
*
- * - Page locked by plain lock_page()
- * It should not have any subpage::writers count.
- * Can be unlocked by unlock_page().
- * This is the most common locked page for __extent_writepage() called
- * inside extent_write_cache_pages() or extent_write_full_page().
- * Rarer cases include the @locked_page from extent_write_locked_range().
+ * This populates the involved subpage ranges so that subpage helpers can
+ * properly unlock them.
+ */
+void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len)
+{
+ struct btrfs_folio_state *bfs;
+ unsigned long flags;
+ unsigned int start_bit;
+ unsigned int nbits;
+ int ret;
+
+ ASSERT(folio_test_locked(folio));
+ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio))
+ return;
+
+ bfs = folio_get_private(folio);
+ start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
+ nbits = len >> fs_info->sectorsize_bits;
+ spin_lock_irqsave(&bfs->lock, flags);
+ /* Target range should not yet be locked. */
+ if (unlikely(!bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits))) {
+ SUBPAGE_DUMP_BITMAP(fs_info, folio, locked, start, len);
+ ASSERT(bitmap_test_range_all_zero(bfs->bitmaps, start_bit, nbits));
+ }
+ bitmap_set(bfs->bitmaps, start_bit, nbits);
+ ret = atomic_add_return(nbits, &bfs->nr_locked);
+ ASSERT(ret <= btrfs_blocks_per_folio(fs_info, folio));
+ spin_unlock_irqrestore(&bfs->lock, flags);
+}
+
+/*
+ * Clear the dirty flag for the folio.
*
- * - Page locked by lock_delalloc_pages()
- * There is only one caller, all pages except @locked_page for
- * extent_write_locked_range().
- * In this case, we have to call subpage helper to handle the case.
+ * If the affected folio is no longer dirty, return true. Otherwise return false.
*/
-void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
- u64 start, u32 len)
+bool btrfs_meta_folio_clear_and_test_dirty(struct folio *folio, const struct extent_buffer *eb)
{
- struct btrfs_subpage *subpage;
+ bool last;
- ASSERT(PageLocked(page));
- /* For regular page size case, we just unlock the page */
- if (fs_info->sectorsize == PAGE_SIZE)
- return unlock_page(page);
+ if (!btrfs_meta_is_subpage(eb->fs_info)) {
+ folio_clear_dirty_for_io(folio);
+ return true;
+ }
- ASSERT(PagePrivate(page) && page->private);
- subpage = (struct btrfs_subpage *)page->private;
+ last = btrfs_subpage_clear_and_test_dirty(eb->fs_info, folio, eb->start, eb->len);
+ if (last) {
+ folio_clear_dirty_for_io(folio);
+ return true;
+ }
+ return false;
+}
- /*
- * For subpage case, there are two types of locked page. With or
- * without writers number.
- *
- * Since we own the page lock, no one else could touch subpage::writers
- * and we are safe to do several atomic operations without spinlock.
- */
- if (atomic_read(&subpage->writers))
- /* No writers, locked by plain lock_page() */
- return unlock_page(page);
+void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len)
+{
+ struct btrfs_folio_state *bfs;
+ const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
+ unsigned long uptodate_bitmap;
+ unsigned long dirty_bitmap;
+ unsigned long writeback_bitmap;
+ unsigned long ordered_bitmap;
+ unsigned long checked_bitmap;
+ unsigned long locked_bitmap;
+ unsigned long flags;
+
+ ASSERT(folio_test_private(folio) && folio_get_private(folio));
+ ASSERT(blocks_per_folio > 1);
+ bfs = folio_get_private(folio);
+
+ spin_lock_irqsave(&bfs->lock, flags);
+ GET_SUBPAGE_BITMAP(fs_info, folio, uptodate, &uptodate_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, dirty, &dirty_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, writeback, &writeback_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, ordered, &ordered_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, checked, &checked_bitmap);
+ GET_SUBPAGE_BITMAP(fs_info, folio, locked, &locked_bitmap);
+ spin_unlock_irqrestore(&bfs->lock, flags);
+
+ dump_page(folio_page(folio, 0), "btrfs folio state dump");
+ btrfs_warn(fs_info,
+"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl locked=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
+ start, len, folio_pos(folio),
+ blocks_per_folio, &uptodate_bitmap,
+ blocks_per_folio, &dirty_bitmap,
+ blocks_per_folio, &locked_bitmap,
+ blocks_per_folio, &writeback_bitmap,
+ blocks_per_folio, &ordered_bitmap,
+ blocks_per_folio, &checked_bitmap);
+}
+
+void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
+ struct folio *folio,
+ unsigned long *ret_bitmap)
+{
+ struct btrfs_folio_state *bfs;
+ unsigned long flags;
+
+ ASSERT(folio_test_private(folio) && folio_get_private(folio));
+ ASSERT(btrfs_blocks_per_folio(fs_info, folio) > 1);
+ bfs = folio_get_private(folio);
- /* Have writers, use proper subpage helper to end it */
- btrfs_page_end_writer_lock(fs_info, page, start, len);
+ spin_lock_irqsave(&bfs->lock, flags);
+ GET_SUBPAGE_BITMAP(fs_info, folio, dirty, ret_bitmap);
+ spin_unlock_irqrestore(&bfs->lock, flags);
}
diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
index 7accb5c40d33..d81a0ade559f 100644
--- a/fs/btrfs/subpage.h
+++ b/fs/btrfs/subpage.h
@@ -4,153 +4,209 @@
#define BTRFS_SUBPAGE_H
#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/sizes.h>
+#include "btrfs_inode.h"
+
+struct address_space;
+struct folio;
/*
- * Extra info for subpapge bitmap.
+ * Extra info for subpage bitmap.
*
- * For subpage we pack all uptodate/error/dirty/writeback/ordered bitmaps into
+ * For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into
* one larger bitmap.
*
* This structure records how they are organized in the bitmap:
*
- * /- uptodate_offset /- error_offset /- dirty_offset
+ * /- uptodate /- dirty /- ordered
* | | |
* v v v
- * |u|u|u|u|........|u|u|e|e|.......|e|e| ... |o|o|
- * |<- bitmap_nr_bits ->|
- * |<--------------- total_nr_bits ---------------->|
+ * |u|u|u|u|........|u|u|d|d|.......|d|d|o|o|.......|o|o|
+ * |< sectors_per_page >|
+ *
+ * Unlike regular macro-like enums, here we do not go upper-case names, as
+ * these names will be utilized in various macros to define function names.
*/
-struct btrfs_subpage_info {
- /* Number of bits for each bitmap */
- unsigned int bitmap_nr_bits;
+enum {
+ btrfs_bitmap_nr_uptodate = 0,
+ btrfs_bitmap_nr_dirty,
- /* Total number of bits for the whole bitmap */
- unsigned int total_nr_bits;
+ /*
+ * This can be changed to atomic eventually. But this change will rely
+ * on the async delalloc range rework for locked bitmap. As async
+ * delalloc can unlock its range and mark blocks writeback at random
+ * timing.
+ */
+ btrfs_bitmap_nr_writeback,
/*
- * *_start indicates where the bitmap starts, the length is always
- * @bitmap_size, which is calculated from PAGE_SIZE / sectorsize.
+ * The ordered and checked flags are for COW fixup, already marked
+ * deprecated, and will be removed eventually.
*/
- unsigned int uptodate_offset;
- unsigned int error_offset;
- unsigned int dirty_offset;
- unsigned int writeback_offset;
- unsigned int ordered_offset;
- unsigned int checked_offset;
+ btrfs_bitmap_nr_ordered,
+ btrfs_bitmap_nr_checked,
+
+ /*
+ * The locked bit is for async delalloc range (compression), currently
+ * async extent is queued with the range locked, until the compression
+ * is done.
+ * So an async extent can unlock the range at any random timing.
+ *
+ * This will need a rework on the async extent lifespan (mark writeback
+ * and do compression) before deprecating this flag.
+ */
+ btrfs_bitmap_nr_locked,
+ btrfs_bitmap_nr_max
};
/*
* Structure to trace status of each sector inside a page, attached to
* page::private for both data and metadata inodes.
*/
-struct btrfs_subpage {
+struct btrfs_folio_state {
/* Common members for both data and metadata pages */
spinlock_t lock;
- /*
- * Both data and metadata needs to track how many readers are for the
- * page.
- * Data relies on @readers to unlock the page when last reader finished.
- * While metadata doesn't need page unlock, it needs to prevent
- * page::private get cleared before the last end_page_read().
- */
- atomic_t readers;
union {
/*
* Structures only used by metadata
*
* @eb_refs should only be operated under private_lock, as it
- * manages whether the subpage can be detached.
+ * manages whether the btrfs_folio_state can be detached.
*/
atomic_t eb_refs;
- /* Structures only used by data */
- atomic_t writers;
+ /*
+ * Structures only used by data,
+ *
+ * How many sectors inside the page is locked.
+ */
+ atomic_t nr_locked;
};
unsigned long bitmaps[];
};
-enum btrfs_subpage_type {
+enum btrfs_folio_type {
BTRFS_SUBPAGE_METADATA,
BTRFS_SUBPAGE_DATA,
};
-void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize);
-int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
- struct page *page, enum btrfs_subpage_type type);
-void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
- struct page *page);
+/*
+ * Subpage support for metadata is more complex, as we can have dummy extent
+ * buffers, where folios have no mapping to determine the owning inode.
+ *
+ * Thankfully we only need to check if node size is smaller than page size.
+ * Even with larger folio support, we will only allocate a folio as large as
+ * node size.
+ * Thus if nodesize < PAGE_SIZE, we know metadata needs need to subpage routine.
+ */
+static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
+{
+ return fs_info->nodesize < PAGE_SIZE;
+}
+static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
+ struct folio *folio)
+{
+ if (folio->mapping && folio->mapping->host)
+ ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
+ return fs_info->sectorsize < folio_size(folio);
+}
+
+int btrfs_attach_folio_state(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, enum btrfs_folio_type type);
+void btrfs_detach_folio_state(const struct btrfs_fs_info *fs_info, struct folio *folio,
+ enum btrfs_folio_type type);
/* Allocate additional data where page represents more than one sector */
-struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
- enum btrfs_subpage_type type);
-void btrfs_free_subpage(struct btrfs_subpage *subpage);
-
-void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
- struct page *page);
-void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
- struct page *page);
-
-void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len);
-void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len);
-
-void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len);
-bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len);
-int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len);
-void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len);
-
+struct btrfs_folio_state *btrfs_alloc_folio_state(const struct btrfs_fs_info *fs_info,
+ size_t fsize, enum btrfs_folio_type type);
+static inline void btrfs_free_folio_state(struct btrfs_folio_state *bfs)
+{
+ kfree(bfs);
+}
+
+void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
+void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
+
+void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len);
+void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len);
+void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, unsigned long bitmap);
/*
* Template for subpage related operations.
*
- * btrfs_subpage_*() are for call sites where the page has subpage attached and
- * the range is ensured to be inside the page.
+ * btrfs_subpage_*() are for call sites where the folio has subpage attached and
+ * the range is ensured to be inside the folio's single page.
*
- * btrfs_page_*() are for call sites where the page can either be subpage
- * specific or regular page. The function will handle both cases.
- * But the range still needs to be inside the page.
+ * btrfs_folio_*() are for call sites where the page can either be subpage
+ * specific or regular folios. The function will handle both cases.
+ * But the range still needs to be inside one single page.
*
- * btrfs_page_clamp_*() are similar to btrfs_page_*(), except the range doesn't
+ * btrfs_folio_clamp_*() are similar to btrfs_folio_*(), except the range doesn't
* need to be inside the page. Those functions will truncate the range
* automatically.
+ *
+ * Both btrfs_folio_*() and btrfs_folio_clamp_*() are for data folios.
+ *
+ * For metadata, one should use btrfs_meta_folio_*() helpers instead, and there
+ * is no clamp version for metadata helpers, as we either go subpage
+ * (nodesize < PAGE_SIZE) or go regular folio helpers (nodesize >= PAGE_SIZE,
+ * and our folio is never larger than nodesize).
*/
#define DECLARE_BTRFS_SUBPAGE_OPS(name) \
void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len); \
+ struct folio *folio, u64 start, u32 len); \
void btrfs_subpage_clear_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len); \
+ struct folio *folio, u64 start, u32 len); \
bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len); \
-void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len); \
-void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len); \
-bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len); \
-void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len); \
-void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len); \
-bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
- struct page *page, u64 start, u32 len);
+ struct folio *folio, u64 start, u32 len); \
+void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
+ struct folio *folio, u64 start, u32 len); \
+void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
+ struct folio *folio, u64 start, u32 len); \
+bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
+ struct folio *folio, u64 start, u32 len); \
+void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
+ struct folio *folio, u64 start, u32 len); \
+void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
+ struct folio *folio, u64 start, u32 len); \
+bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
+ struct folio *folio, u64 start, u32 len); \
+void btrfs_meta_folio_set_##name(struct folio *folio, const struct extent_buffer *eb); \
+void btrfs_meta_folio_clear_##name(struct folio *folio, const struct extent_buffer *eb); \
+bool btrfs_meta_folio_test_##name(struct folio *folio, const struct extent_buffer *eb);
DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
-DECLARE_BTRFS_SUBPAGE_OPS(error);
DECLARE_BTRFS_SUBPAGE_OPS(dirty);
DECLARE_BTRFS_SUBPAGE_OPS(writeback);
DECLARE_BTRFS_SUBPAGE_OPS(ordered);
DECLARE_BTRFS_SUBPAGE_OPS(checked);
-bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 start, u32 len);
+/*
+ * Helper for error cleanup, where a folio will have its dirty flag cleared,
+ * with writeback started and finished.
+ */
+static inline void btrfs_folio_clamp_finish_io(struct btrfs_fs_info *fs_info,
+ struct folio *locked_folio,
+ u64 start, u32 len)
+{
+ btrfs_folio_clamp_clear_dirty(fs_info, locked_folio, start, len);
+ btrfs_folio_clamp_set_writeback(fs_info, locked_folio, start, len);
+ btrfs_folio_clamp_clear_writeback(fs_info, locked_folio, start, len);
+}
-void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
- struct page *page);
-void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
- u64 start, u32 len);
+bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len);
+
+void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len);
+bool btrfs_meta_folio_clear_and_test_dirty(struct folio *folio, const struct extent_buffer *eb);
+void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
+ struct folio *folio,
+ unsigned long *ret_bitmap);
+void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len);
#endif
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a1c54a2c787c..1999533b52be 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -23,22 +23,23 @@
#include <linux/miscdevice.h>
#include <linux/magic.h>
#include <linux/slab.h>
-#include <linux/cleancache.h>
#include <linux/ratelimit.h>
#include <linux/crc32c.h>
#include <linux/btrfs.h>
+#include <linux/security.h>
+#include <linux/fs_parser.h>
+#include "messages.h"
#include "delayed-inode.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "print-tree.h"
+#include "direct-io.h"
#include "props.h"
#include "xattr.h"
-#include "volumes.h"
+#include "bio.h"
#include "export.h"
#include "compression.h"
-#include "rcu-string.h"
#include "dev-replace.h"
#include "free-space-cache.h"
#include "backref.h"
@@ -49,299 +50,49 @@
#include "block-group.h"
#include "discard.h"
#include "qgroup.h"
+#include "raid56.h"
+#include "fs.h"
+#include "accessors.h"
+#include "defrag.h"
+#include "dir-item.h"
+#include "ioctl.h"
+#include "scrub.h"
+#include "verity.h"
+#include "super.h"
+#include "extent-tree.h"
#define CREATE_TRACE_POINTS
#include <trace/events/btrfs.h>
static const struct super_operations btrfs_super_ops;
-
-/*
- * Types for mounting the default subvolume and a subvolume explicitly
- * requested by subvol=/path. That way the callchain is straightforward and we
- * don't have to play tricks with the mount options and recursive calls to
- * btrfs_mount.
- *
- * The new btrfs_root_fs_type also servers as a tag for the bdev_holder.
- */
static struct file_system_type btrfs_fs_type;
-static struct file_system_type btrfs_root_fs_type;
-
-static int btrfs_remount(struct super_block *sb, int *flags, char *data);
-
-/*
- * Generally the error codes correspond to their respective errors, but there
- * are a few special cases.
- *
- * EUCLEAN: Any sort of corruption that we encounter. The tree-checker for
- * instance will return EUCLEAN if any of the blocks are corrupted in
- * a way that is problematic. We want to reserve EUCLEAN for these
- * sort of corruptions.
- *
- * EROFS: If we check BTRFS_FS_STATE_ERROR and fail out with a return error, we
- * need to use EROFS for this case. We will have no idea of the
- * original failure, that will have been reported at the time we tripped
- * over the error. Each subsequent error that doesn't have any context
- * of the original error should use EROFS when handling BTRFS_FS_STATE_ERROR.
- */
-const char * __attribute_const__ btrfs_decode_error(int errno)
-{
- char *errstr = "unknown";
-
- switch (errno) {
- case -ENOENT: /* -2 */
- errstr = "No such entry";
- break;
- case -EIO: /* -5 */
- errstr = "IO failure";
- break;
- case -ENOMEM: /* -12*/
- errstr = "Out of memory";
- break;
- case -EEXIST: /* -17 */
- errstr = "Object already exists";
- break;
- case -ENOSPC: /* -28 */
- errstr = "No space left";
- break;
- case -EROFS: /* -30 */
- errstr = "Readonly filesystem";
- break;
- case -EOPNOTSUPP: /* -95 */
- errstr = "Operation not supported";
- break;
- case -EUCLEAN: /* -117 */
- errstr = "Filesystem corrupted";
- break;
- case -EDQUOT: /* -122 */
- errstr = "Quota exceeded";
- break;
- }
-
- return errstr;
-}
-/*
- * __btrfs_handle_fs_error decodes expected errors from the caller and
- * invokes the appropriate error response.
- */
-__cold
-void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
- unsigned int line, int errno, const char *fmt, ...)
+static void btrfs_put_super(struct super_block *sb)
{
- struct super_block *sb = fs_info->sb;
-#ifdef CONFIG_PRINTK
- const char *errstr;
-#endif
-
- /*
- * Special case: if the error is EROFS, and we're already
- * under SB_RDONLY, then it is safe here.
- */
- if (errno == -EROFS && sb_rdonly(sb))
- return;
-
-#ifdef CONFIG_PRINTK
- errstr = btrfs_decode_error(errno);
- if (fmt) {
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
-
- pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
- sb->s_id, function, line, errno, errstr, &vaf);
- va_end(args);
- } else {
- pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
- sb->s_id, function, line, errno, errstr);
- }
-#endif
-
- /*
- * Today we only save the error info to memory. Long term we'll
- * also send it down to the disk
- */
- set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
-
- /* Don't go through full error handling during mount */
- if (!(sb->s_flags & SB_BORN))
- return;
-
- if (sb_rdonly(sb))
- return;
-
- btrfs_discard_stop(fs_info);
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
- /* btrfs handle error by forcing the filesystem readonly */
- btrfs_set_sb_rdonly(sb);
- btrfs_info(fs_info, "forced readonly");
- /*
- * Note that a running device replace operation is not canceled here
- * although there is no way to update the progress. It would add the
- * risk of a deadlock, therefore the canceling is omitted. The only
- * penalty is that some I/O remains active until the procedure
- * completes. The next time when the filesystem is mounted writable
- * again, the device replace operation continues.
- */
+ btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid);
+ close_ctree(fs_info);
}
-#ifdef CONFIG_PRINTK
-static const char * const logtypes[] = {
- "emergency",
- "alert",
- "critical",
- "error",
- "warning",
- "notice",
- "info",
- "debug",
-};
-
-
-/*
- * Use one ratelimit state per log level so that a flood of less important
- * messages doesn't cause more important ones to be dropped.
- */
-static struct ratelimit_state printk_limits[] = {
- RATELIMIT_STATE_INIT(printk_limits[0], DEFAULT_RATELIMIT_INTERVAL, 100),
- RATELIMIT_STATE_INIT(printk_limits[1], DEFAULT_RATELIMIT_INTERVAL, 100),
- RATELIMIT_STATE_INIT(printk_limits[2], DEFAULT_RATELIMIT_INTERVAL, 100),
- RATELIMIT_STATE_INIT(printk_limits[3], DEFAULT_RATELIMIT_INTERVAL, 100),
- RATELIMIT_STATE_INIT(printk_limits[4], DEFAULT_RATELIMIT_INTERVAL, 100),
- RATELIMIT_STATE_INIT(printk_limits[5], DEFAULT_RATELIMIT_INTERVAL, 100),
- RATELIMIT_STATE_INIT(printk_limits[6], DEFAULT_RATELIMIT_INTERVAL, 100),
- RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
+/* Store the mount options related information. */
+struct btrfs_fs_context {
+ char *subvol_name;
+ u64 subvol_objectid;
+ u64 max_inline;
+ u32 commit_interval;
+ u32 metadata_ratio;
+ u32 thread_pool_size;
+ unsigned long long mount_opt;
+ unsigned long compress_type:4;
+ int compress_level;
+ refcount_t refs;
};
-void __cold btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
-{
- char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
- struct va_format vaf;
- va_list args;
- int kern_level;
- const char *type = logtypes[4];
- struct ratelimit_state *ratelimit = &printk_limits[4];
-
- va_start(args, fmt);
-
- while ((kern_level = printk_get_level(fmt)) != 0) {
- size_t size = printk_skip_level(fmt) - fmt;
-
- if (kern_level >= '0' && kern_level <= '7') {
- memcpy(lvl, fmt, size);
- lvl[size] = '\0';
- type = logtypes[kern_level - '0'];
- ratelimit = &printk_limits[kern_level - '0'];
- }
- fmt += size;
- }
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- if (__ratelimit(ratelimit)) {
- if (fs_info)
- printk("%sBTRFS %s (device %s): %pV\n", lvl, type,
- fs_info->sb->s_id, &vaf);
- else
- printk("%sBTRFS %s: %pV\n", lvl, type, &vaf);
- }
-
- va_end(args);
-}
-#endif
-
-#if BITS_PER_LONG == 32
-void __cold btrfs_warn_32bit_limit(struct btrfs_fs_info *fs_info)
-{
- if (!test_and_set_bit(BTRFS_FS_32BIT_WARN, &fs_info->flags)) {
- btrfs_warn(fs_info, "reaching 32bit limit for logical addresses");
- btrfs_warn(fs_info,
-"due to page cache limit on 32bit systems, btrfs can't access metadata at or beyond %lluT",
- BTRFS_32BIT_MAX_FILE_SIZE >> 40);
- btrfs_warn(fs_info,
- "please consider upgrading to 64bit kernel/hardware");
- }
-}
-
-void __cold btrfs_err_32bit_limit(struct btrfs_fs_info *fs_info)
-{
- if (!test_and_set_bit(BTRFS_FS_32BIT_ERROR, &fs_info->flags)) {
- btrfs_err(fs_info, "reached 32bit limit for logical addresses");
- btrfs_err(fs_info,
-"due to page cache limit on 32bit systems, metadata beyond %lluT can't be accessed",
- BTRFS_32BIT_MAX_FILE_SIZE >> 40);
- btrfs_err(fs_info,
- "please consider upgrading to 64bit kernel/hardware");
- }
-}
-#endif
-
-/*
- * We only mark the transaction aborted and then set the file system read-only.
- * This will prevent new transactions from starting or trying to join this
- * one.
- *
- * This means that error recovery at the call site is limited to freeing
- * any local memory allocations and passing the error code up without
- * further cleanup. The transaction should complete as it normally would
- * in the call path but will return -EIO.
- *
- * We'll complete the cleanup in btrfs_end_transaction and
- * btrfs_commit_transaction.
- */
-__cold
-void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
- const char *function,
- unsigned int line, int errno)
-{
- struct btrfs_fs_info *fs_info = trans->fs_info;
-
- WRITE_ONCE(trans->aborted, errno);
- WRITE_ONCE(trans->transaction->aborted, errno);
- /* Wake up anybody who may be waiting on this transaction */
- wake_up(&fs_info->transaction_wait);
- wake_up(&fs_info->transaction_blocked_wait);
- __btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
-}
-/*
- * __btrfs_panic decodes unexpected, fatal errors from the caller,
- * issues an alert, and either panics or BUGs, depending on mount options.
- */
-__cold
-void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
- unsigned int line, int errno, const char *fmt, ...)
-{
- char *s_id = "<unknown>";
- const char *errstr;
- struct va_format vaf = { .fmt = fmt };
- va_list args;
-
- if (fs_info)
- s_id = fs_info->sb->s_id;
-
- va_start(args, fmt);
- vaf.va = &args;
-
- errstr = btrfs_decode_error(errno);
- if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR)))
- panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
- s_id, function, line, &vaf, errno, errstr);
-
- btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)",
- function, line, &vaf, errno, errstr);
- va_end(args);
- /* Caller calls BUG() */
-}
-
-static void btrfs_put_super(struct super_block *sb)
-{
- close_ctree(btrfs_sb(sb));
-}
+static void btrfs_emit_options(struct btrfs_fs_info *info,
+ struct btrfs_fs_context *old);
enum {
- Opt_acl, Opt_noacl,
+ Opt_acl,
Opt_clear_cache,
Opt_commit_interval,
Opt_compress,
@@ -351,817 +102,701 @@ enum {
Opt_degraded,
Opt_device,
Opt_fatal_errors,
- Opt_flushoncommit, Opt_noflushoncommit,
+ Opt_flushoncommit,
Opt_max_inline,
- Opt_barrier, Opt_nobarrier,
- Opt_datacow, Opt_nodatacow,
- Opt_datasum, Opt_nodatasum,
- Opt_defrag, Opt_nodefrag,
- Opt_discard, Opt_nodiscard,
+ Opt_barrier,
+ Opt_datacow,
+ Opt_datasum,
+ Opt_defrag,
+ Opt_discard,
Opt_discard_mode,
- Opt_norecovery,
Opt_ratio,
Opt_rescan_uuid_tree,
Opt_skip_balance,
- Opt_space_cache, Opt_no_space_cache,
+ Opt_space_cache,
Opt_space_cache_version,
- Opt_ssd, Opt_nossd,
- Opt_ssd_spread, Opt_nossd_spread,
+ Opt_ssd,
+ Opt_ssd_spread,
Opt_subvol,
Opt_subvol_empty,
Opt_subvolid,
Opt_thread_pool,
- Opt_treelog, Opt_notreelog,
+ Opt_treelog,
Opt_user_subvol_rm_allowed,
+ Opt_norecovery,
/* Rescue options */
Opt_rescue,
Opt_usebackuproot,
- Opt_nologreplay,
- Opt_ignorebadroots,
- Opt_ignoredatacsums,
- Opt_rescue_all,
-
- /* Deprecated options */
- Opt_recovery,
- Opt_inode_cache, Opt_noinode_cache,
/* Debugging options */
- Opt_check_integrity,
- Opt_check_integrity_including_extent_data,
- Opt_check_integrity_print_mask,
- Opt_enospc_debug, Opt_noenospc_debug,
+ Opt_enospc_debug,
#ifdef CONFIG_BTRFS_DEBUG
- Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
-#endif
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+ Opt_fragment, Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
Opt_ref_verify,
+ Opt_ref_tracker,
#endif
Opt_err,
};
-static const match_table_t tokens = {
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_clear_cache, "clear_cache"},
- {Opt_commit_interval, "commit=%u"},
- {Opt_compress, "compress"},
- {Opt_compress_type, "compress=%s"},
- {Opt_compress_force, "compress-force"},
- {Opt_compress_force_type, "compress-force=%s"},
- {Opt_degraded, "degraded"},
- {Opt_device, "device=%s"},
- {Opt_fatal_errors, "fatal_errors=%s"},
- {Opt_flushoncommit, "flushoncommit"},
- {Opt_noflushoncommit, "noflushoncommit"},
- {Opt_inode_cache, "inode_cache"},
- {Opt_noinode_cache, "noinode_cache"},
- {Opt_max_inline, "max_inline=%s"},
- {Opt_barrier, "barrier"},
- {Opt_nobarrier, "nobarrier"},
- {Opt_datacow, "datacow"},
- {Opt_nodatacow, "nodatacow"},
- {Opt_datasum, "datasum"},
- {Opt_nodatasum, "nodatasum"},
- {Opt_defrag, "autodefrag"},
- {Opt_nodefrag, "noautodefrag"},
- {Opt_discard, "discard"},
- {Opt_discard_mode, "discard=%s"},
- {Opt_nodiscard, "nodiscard"},
- {Opt_norecovery, "norecovery"},
- {Opt_ratio, "metadata_ratio=%u"},
- {Opt_rescan_uuid_tree, "rescan_uuid_tree"},
- {Opt_skip_balance, "skip_balance"},
- {Opt_space_cache, "space_cache"},
- {Opt_no_space_cache, "nospace_cache"},
- {Opt_space_cache_version, "space_cache=%s"},
- {Opt_ssd, "ssd"},
- {Opt_nossd, "nossd"},
- {Opt_ssd_spread, "ssd_spread"},
- {Opt_nossd_spread, "nossd_spread"},
- {Opt_subvol, "subvol=%s"},
- {Opt_subvol_empty, "subvol="},
- {Opt_subvolid, "subvolid=%s"},
- {Opt_thread_pool, "thread_pool=%u"},
- {Opt_treelog, "treelog"},
- {Opt_notreelog, "notreelog"},
- {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
+enum {
+ Opt_fatal_errors_panic,
+ Opt_fatal_errors_bug,
+};
- /* Rescue options */
- {Opt_rescue, "rescue=%s"},
- /* Deprecated, with alias rescue=nologreplay */
- {Opt_nologreplay, "nologreplay"},
- /* Deprecated, with alias rescue=usebackuproot */
- {Opt_usebackuproot, "usebackuproot"},
+static const struct constant_table btrfs_parameter_fatal_errors[] = {
+ { "panic", Opt_fatal_errors_panic },
+ { "bug", Opt_fatal_errors_bug },
+ {}
+};
+
+enum {
+ Opt_discard_sync,
+ Opt_discard_async,
+};
- /* Deprecated options */
- {Opt_recovery, "recovery"},
+static const struct constant_table btrfs_parameter_discard[] = {
+ { "sync", Opt_discard_sync },
+ { "async", Opt_discard_async },
+ {}
+};
+
+enum {
+ Opt_space_cache_v1,
+ Opt_space_cache_v2,
+};
+
+static const struct constant_table btrfs_parameter_space_cache[] = {
+ { "v1", Opt_space_cache_v1 },
+ { "v2", Opt_space_cache_v2 },
+ {}
+};
+
+enum {
+ Opt_rescue_usebackuproot,
+ Opt_rescue_nologreplay,
+ Opt_rescue_ignorebadroots,
+ Opt_rescue_ignoredatacsums,
+ Opt_rescue_ignoremetacsums,
+ Opt_rescue_ignoresuperflags,
+ Opt_rescue_parameter_all,
+};
+
+static const struct constant_table btrfs_parameter_rescue[] = {
+ { "usebackuproot", Opt_rescue_usebackuproot },
+ { "nologreplay", Opt_rescue_nologreplay },
+ { "ignorebadroots", Opt_rescue_ignorebadroots },
+ { "ibadroots", Opt_rescue_ignorebadroots },
+ { "ignoredatacsums", Opt_rescue_ignoredatacsums },
+ { "ignoremetacsums", Opt_rescue_ignoremetacsums},
+ { "ignoresuperflags", Opt_rescue_ignoresuperflags},
+ { "idatacsums", Opt_rescue_ignoredatacsums },
+ { "imetacsums", Opt_rescue_ignoremetacsums},
+ { "isuperflags", Opt_rescue_ignoresuperflags},
+ { "all", Opt_rescue_parameter_all },
+ {}
+};
- /* Debugging options */
- {Opt_check_integrity, "check_int"},
- {Opt_check_integrity_including_extent_data, "check_int_data"},
- {Opt_check_integrity_print_mask, "check_int_print_mask=%u"},
- {Opt_enospc_debug, "enospc_debug"},
- {Opt_noenospc_debug, "noenospc_debug"},
#ifdef CONFIG_BTRFS_DEBUG
- {Opt_fragment_data, "fragment=data"},
- {Opt_fragment_metadata, "fragment=metadata"},
- {Opt_fragment_all, "fragment=all"},
-#endif
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
- {Opt_ref_verify, "ref_verify"},
-#endif
- {Opt_err, NULL},
+enum {
+ Opt_fragment_parameter_data,
+ Opt_fragment_parameter_metadata,
+ Opt_fragment_parameter_all,
};
-static const match_table_t rescue_tokens = {
- {Opt_usebackuproot, "usebackuproot"},
- {Opt_nologreplay, "nologreplay"},
- {Opt_ignorebadroots, "ignorebadroots"},
- {Opt_ignorebadroots, "ibadroots"},
- {Opt_ignoredatacsums, "ignoredatacsums"},
- {Opt_ignoredatacsums, "idatacsums"},
- {Opt_rescue_all, "all"},
- {Opt_err, NULL},
+static const struct constant_table btrfs_parameter_fragment[] = {
+ { "data", Opt_fragment_parameter_data },
+ { "metadata", Opt_fragment_parameter_metadata },
+ { "all", Opt_fragment_parameter_all },
+ {}
};
+#endif
-static bool check_ro_option(struct btrfs_fs_info *fs_info, unsigned long opt,
- const char *opt_name)
-{
- if (fs_info->mount_opt & opt) {
- btrfs_err(fs_info, "%s must be used with ro mount option",
- opt_name);
- return true;
- }
- return false;
-}
+static const struct fs_parameter_spec btrfs_fs_parameters[] = {
+ fsparam_flag_no("acl", Opt_acl),
+ fsparam_flag_no("autodefrag", Opt_defrag),
+ fsparam_flag_no("barrier", Opt_barrier),
+ fsparam_flag("clear_cache", Opt_clear_cache),
+ fsparam_u32("commit", Opt_commit_interval),
+ fsparam_flag("compress", Opt_compress),
+ fsparam_string("compress", Opt_compress_type),
+ fsparam_flag("compress-force", Opt_compress_force),
+ fsparam_string("compress-force", Opt_compress_force_type),
+ fsparam_flag_no("datacow", Opt_datacow),
+ fsparam_flag_no("datasum", Opt_datasum),
+ fsparam_flag("degraded", Opt_degraded),
+ fsparam_string("device", Opt_device),
+ fsparam_flag_no("discard", Opt_discard),
+ fsparam_enum("discard", Opt_discard_mode, btrfs_parameter_discard),
+ fsparam_enum("fatal_errors", Opt_fatal_errors, btrfs_parameter_fatal_errors),
+ fsparam_flag_no("flushoncommit", Opt_flushoncommit),
+ fsparam_string("max_inline", Opt_max_inline),
+ fsparam_u32("metadata_ratio", Opt_ratio),
+ fsparam_flag("rescan_uuid_tree", Opt_rescan_uuid_tree),
+ fsparam_flag("skip_balance", Opt_skip_balance),
+ fsparam_flag_no("space_cache", Opt_space_cache),
+ fsparam_enum("space_cache", Opt_space_cache_version, btrfs_parameter_space_cache),
+ fsparam_flag_no("ssd", Opt_ssd),
+ fsparam_flag_no("ssd_spread", Opt_ssd_spread),
+ fsparam_string("subvol", Opt_subvol),
+ fsparam_flag("subvol=", Opt_subvol_empty),
+ fsparam_u64("subvolid", Opt_subvolid),
+ fsparam_u32("thread_pool", Opt_thread_pool),
+ fsparam_flag_no("treelog", Opt_treelog),
+ fsparam_flag("user_subvol_rm_allowed", Opt_user_subvol_rm_allowed),
+
+ /* Rescue options. */
+ fsparam_enum("rescue", Opt_rescue, btrfs_parameter_rescue),
+ /* Deprecated, with alias rescue=usebackuproot */
+ __fsparam(NULL, "usebackuproot", Opt_usebackuproot, fs_param_deprecated, NULL),
+ /* For compatibility only, alias for "rescue=nologreplay". */
+ fsparam_flag("norecovery", Opt_norecovery),
-static int parse_rescue_options(struct btrfs_fs_info *info, const char *options)
-{
- char *opts;
- char *orig;
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int ret = 0;
+ /* Debugging options. */
+ fsparam_flag_no("enospc_debug", Opt_enospc_debug),
+#ifdef CONFIG_BTRFS_DEBUG
+ fsparam_enum("fragment", Opt_fragment, btrfs_parameter_fragment),
+ fsparam_flag("ref_tracker", Opt_ref_tracker),
+ fsparam_flag("ref_verify", Opt_ref_verify),
+#endif
+ {}
+};
- opts = kstrdup(options, GFP_KERNEL);
- if (!opts)
- return -ENOMEM;
- orig = opts;
+static bool btrfs_match_compress_type(const char *string, const char *type, bool may_have_level)
+{
+ const int len = strlen(type);
- while ((p = strsep(&opts, ":")) != NULL) {
- int token;
+ return (strncmp(string, type, len) == 0) &&
+ ((may_have_level && string[len] == ':') || string[len] == '\0');
+}
- if (!*p)
- continue;
- token = match_token(p, rescue_tokens, args);
- switch (token){
- case Opt_usebackuproot:
- btrfs_info(info,
- "trying to use backup root at mount time");
- btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
- break;
- case Opt_nologreplay:
- btrfs_set_and_info(info, NOLOGREPLAY,
- "disabling log replay at mount time");
- break;
- case Opt_ignorebadroots:
- btrfs_set_and_info(info, IGNOREBADROOTS,
- "ignoring bad roots");
- break;
- case Opt_ignoredatacsums:
- btrfs_set_and_info(info, IGNOREDATACSUMS,
- "ignoring data csums");
- break;
- case Opt_rescue_all:
- btrfs_info(info, "enabling all of the rescue options");
- btrfs_set_and_info(info, IGNOREDATACSUMS,
- "ignoring data csums");
- btrfs_set_and_info(info, IGNOREBADROOTS,
- "ignoring bad roots");
- btrfs_set_and_info(info, NOLOGREPLAY,
- "disabling log replay at mount time");
- break;
- case Opt_err:
- btrfs_info(info, "unrecognized rescue option '%s'", p);
- ret = -EINVAL;
- goto out;
- default:
- break;
- }
+static int btrfs_parse_compress(struct btrfs_fs_context *ctx,
+ const struct fs_parameter *param, int opt)
+{
+ const char *string = param->string;
+ int ret;
+ /*
+ * Provide the same semantics as older kernels that don't use fs
+ * context, specifying the "compress" option clears "force-compress"
+ * without the need to pass "compress-force=[no|none]" before
+ * specifying "compress".
+ */
+ if (opt != Opt_compress_force && opt != Opt_compress_force_type)
+ btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS);
+
+ if (opt == Opt_compress || opt == Opt_compress_force) {
+ ctx->compress_type = BTRFS_COMPRESS_ZLIB;
+ ctx->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
+ btrfs_set_opt(ctx->mount_opt, COMPRESS);
+ btrfs_clear_opt(ctx->mount_opt, NODATACOW);
+ btrfs_clear_opt(ctx->mount_opt, NODATASUM);
+ } else if (btrfs_match_compress_type(string, "zlib", true)) {
+ ctx->compress_type = BTRFS_COMPRESS_ZLIB;
+ ret = btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB, string + 4,
+ &ctx->compress_level);
+ if (ret < 0)
+ goto error;
+ btrfs_set_opt(ctx->mount_opt, COMPRESS);
+ btrfs_clear_opt(ctx->mount_opt, NODATACOW);
+ btrfs_clear_opt(ctx->mount_opt, NODATASUM);
+ } else if (btrfs_match_compress_type(string, "lzo", true)) {
+ ctx->compress_type = BTRFS_COMPRESS_LZO;
+ ret = btrfs_compress_str2level(BTRFS_COMPRESS_LZO, string + 3,
+ &ctx->compress_level);
+ if (ret < 0)
+ goto error;
+ if (string[3] == ':' && string[4])
+ btrfs_warn(NULL, "Compression level ignored for LZO");
+ btrfs_set_opt(ctx->mount_opt, COMPRESS);
+ btrfs_clear_opt(ctx->mount_opt, NODATACOW);
+ btrfs_clear_opt(ctx->mount_opt, NODATASUM);
+ } else if (btrfs_match_compress_type(string, "zstd", true)) {
+ ctx->compress_type = BTRFS_COMPRESS_ZSTD;
+ ret = btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD, string + 4,
+ &ctx->compress_level);
+ if (ret < 0)
+ goto error;
+ btrfs_set_opt(ctx->mount_opt, COMPRESS);
+ btrfs_clear_opt(ctx->mount_opt, NODATACOW);
+ btrfs_clear_opt(ctx->mount_opt, NODATASUM);
+ } else if (btrfs_match_compress_type(string, "no", false) ||
+ btrfs_match_compress_type(string, "none", false)) {
+ ctx->compress_level = 0;
+ ctx->compress_type = 0;
+ btrfs_clear_opt(ctx->mount_opt, COMPRESS);
+ btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS);
+ } else {
+ ret = -EINVAL;
+ goto error;
}
-out:
- kfree(orig);
+ return 0;
+error:
+ btrfs_err(NULL, "failed to parse compression option '%s'", string);
return ret;
+
}
-/*
- * Regular mount options parser. Everything that is needed only when
- * reading in a new superblock is parsed here.
- * XXX JDM: This needs to be cleaned up for remount.
- */
-int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
- unsigned long new_flags)
+static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- substring_t args[MAX_OPT_ARGS];
- char *p, *num;
- int intarg;
- int ret = 0;
- char *compress_type;
- bool compress_force = false;
- enum btrfs_compression_type saved_compress_type;
- int saved_compress_level;
- bool saved_compress_force;
- int no_compress = 0;
-
- if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
- btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
- else if (btrfs_free_space_cache_v1_active(info)) {
- if (btrfs_is_zoned(info)) {
- btrfs_info(info,
- "zoned: clearing existing space cache");
- btrfs_set_super_cache_generation(info->super_copy, 0);
- } else {
- btrfs_set_opt(info->mount_opt, SPACE_CACHE);
- }
- }
+ struct btrfs_fs_context *ctx = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
- /*
- * Even the options are empty, we still need to do extra check
- * against new flags
- */
- if (!options)
- goto check;
+ opt = fs_parse(fc, btrfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
+ switch (opt) {
+ case Opt_degraded:
+ btrfs_set_opt(ctx->mount_opt, DEGRADED);
+ break;
+ case Opt_subvol_empty:
+ /*
+ * This exists because we used to allow it on accident, so we're
+ * keeping it to maintain ABI. See 37becec95ac3 ("Btrfs: allow
+ * empty subvol= again").
+ */
+ break;
+ case Opt_subvol:
+ kfree(ctx->subvol_name);
+ ctx->subvol_name = kstrdup(param->string, GFP_KERNEL);
+ if (!ctx->subvol_name)
+ return -ENOMEM;
+ break;
+ case Opt_subvolid:
+ ctx->subvol_objectid = result.uint_64;
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_degraded:
- btrfs_info(info, "allowing degraded mounts");
- btrfs_set_opt(info->mount_opt, DEGRADED);
- break;
- case Opt_subvol:
- case Opt_subvol_empty:
- case Opt_subvolid:
- case Opt_device:
- /*
- * These are parsed by btrfs_parse_subvol_options or
- * btrfs_parse_device_options and can be ignored here.
- */
- break;
- case Opt_nodatasum:
- btrfs_set_and_info(info, NODATASUM,
- "setting nodatasum");
- break;
- case Opt_datasum:
- if (btrfs_test_opt(info, NODATASUM)) {
- if (btrfs_test_opt(info, NODATACOW))
- btrfs_info(info,
- "setting datasum, datacow enabled");
- else
- btrfs_info(info, "setting datasum");
- }
- btrfs_clear_opt(info->mount_opt, NODATACOW);
- btrfs_clear_opt(info->mount_opt, NODATASUM);
- break;
- case Opt_nodatacow:
- if (!btrfs_test_opt(info, NODATACOW)) {
- if (!btrfs_test_opt(info, COMPRESS) ||
- !btrfs_test_opt(info, FORCE_COMPRESS)) {
- btrfs_info(info,
- "setting nodatacow, compression disabled");
- } else {
- btrfs_info(info, "setting nodatacow");
- }
- }
- btrfs_clear_opt(info->mount_opt, COMPRESS);
- btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
- btrfs_set_opt(info->mount_opt, NODATACOW);
- btrfs_set_opt(info->mount_opt, NODATASUM);
- break;
- case Opt_datacow:
- btrfs_clear_and_info(info, NODATACOW,
- "setting datacow");
- break;
- case Opt_compress_force:
- case Opt_compress_force_type:
- compress_force = true;
- fallthrough;
- case Opt_compress:
- case Opt_compress_type:
- saved_compress_type = btrfs_test_opt(info,
- COMPRESS) ?
- info->compress_type : BTRFS_COMPRESS_NONE;
- saved_compress_force =
- btrfs_test_opt(info, FORCE_COMPRESS);
- saved_compress_level = info->compress_level;
- if (token == Opt_compress ||
- token == Opt_compress_force ||
- strncmp(args[0].from, "zlib", 4) == 0) {
- compress_type = "zlib";
-
- info->compress_type = BTRFS_COMPRESS_ZLIB;
- info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
- /*
- * args[0] contains uninitialized data since
- * for these tokens we don't expect any
- * parameter.
- */
- if (token != Opt_compress &&
- token != Opt_compress_force)
- info->compress_level =
- btrfs_compress_str2level(
- BTRFS_COMPRESS_ZLIB,
- args[0].from + 4);
- btrfs_set_opt(info->mount_opt, COMPRESS);
- btrfs_clear_opt(info->mount_opt, NODATACOW);
- btrfs_clear_opt(info->mount_opt, NODATASUM);
- no_compress = 0;
- } else if (strncmp(args[0].from, "lzo", 3) == 0) {
- compress_type = "lzo";
- info->compress_type = BTRFS_COMPRESS_LZO;
- info->compress_level = 0;
- btrfs_set_opt(info->mount_opt, COMPRESS);
- btrfs_clear_opt(info->mount_opt, NODATACOW);
- btrfs_clear_opt(info->mount_opt, NODATASUM);
- btrfs_set_fs_incompat(info, COMPRESS_LZO);
- no_compress = 0;
- } else if (strncmp(args[0].from, "zstd", 4) == 0) {
- compress_type = "zstd";
- info->compress_type = BTRFS_COMPRESS_ZSTD;
- info->compress_level =
- btrfs_compress_str2level(
- BTRFS_COMPRESS_ZSTD,
- args[0].from + 4);
- btrfs_set_opt(info->mount_opt, COMPRESS);
- btrfs_clear_opt(info->mount_opt, NODATACOW);
- btrfs_clear_opt(info->mount_opt, NODATASUM);
- btrfs_set_fs_incompat(info, COMPRESS_ZSTD);
- no_compress = 0;
- } else if (strncmp(args[0].from, "no", 2) == 0) {
- compress_type = "no";
- info->compress_level = 0;
- info->compress_type = 0;
- btrfs_clear_opt(info->mount_opt, COMPRESS);
- btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
- compress_force = false;
- no_compress++;
- } else {
- ret = -EINVAL;
- goto out;
- }
+ /* subvolid=0 means give me the original fs_tree. */
+ if (!ctx->subvol_objectid)
+ ctx->subvol_objectid = BTRFS_FS_TREE_OBJECTID;
+ break;
+ case Opt_device: {
+ struct btrfs_device *device;
- if (compress_force) {
- btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
- } else {
- /*
- * If we remount from compress-force=xxx to
- * compress=xxx, we need clear FORCE_COMPRESS
- * flag, otherwise, there is no way for users
- * to disable forcible compression separately.
- */
- btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
- }
- if (no_compress == 1) {
- btrfs_info(info, "use no compression");
- } else if ((info->compress_type != saved_compress_type) ||
- (compress_force != saved_compress_force) ||
- (info->compress_level != saved_compress_level)) {
- btrfs_info(info, "%s %s compression, level %d",
- (compress_force) ? "force" : "use",
- compress_type, info->compress_level);
- }
- compress_force = false;
- break;
- case Opt_ssd:
- btrfs_set_and_info(info, SSD,
- "enabling ssd optimizations");
- btrfs_clear_opt(info->mount_opt, NOSSD);
- break;
- case Opt_ssd_spread:
- btrfs_set_and_info(info, SSD,
- "enabling ssd optimizations");
- btrfs_set_and_info(info, SSD_SPREAD,
- "using spread ssd allocation scheme");
- btrfs_clear_opt(info->mount_opt, NOSSD);
- break;
- case Opt_nossd:
- btrfs_set_opt(info->mount_opt, NOSSD);
- btrfs_clear_and_info(info, SSD,
- "not using ssd optimizations");
- fallthrough;
- case Opt_nossd_spread:
- btrfs_clear_and_info(info, SSD_SPREAD,
- "not using spread ssd allocation scheme");
- break;
- case Opt_barrier:
- btrfs_clear_and_info(info, NOBARRIER,
- "turning on barriers");
- break;
- case Opt_nobarrier:
- btrfs_set_and_info(info, NOBARRIER,
- "turning off barriers");
- break;
- case Opt_thread_pool:
- ret = match_int(&args[0], &intarg);
- if (ret) {
- goto out;
- } else if (intarg == 0) {
- ret = -EINVAL;
- goto out;
- }
- info->thread_pool_size = intarg;
- break;
- case Opt_max_inline:
- num = match_strdup(&args[0]);
- if (num) {
- info->max_inline = memparse(num, NULL);
- kfree(num);
-
- if (info->max_inline) {
- info->max_inline = min_t(u64,
- info->max_inline,
- info->sectorsize);
- }
- btrfs_info(info, "max_inline at %llu",
- info->max_inline);
- } else {
- ret = -ENOMEM;
- goto out;
- }
- break;
- case Opt_acl:
+ mutex_lock(&uuid_mutex);
+ device = btrfs_scan_one_device(param->string, false);
+ mutex_unlock(&uuid_mutex);
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+ break;
+ }
+ case Opt_datasum:
+ if (result.negated) {
+ btrfs_set_opt(ctx->mount_opt, NODATASUM);
+ } else {
+ btrfs_clear_opt(ctx->mount_opt, NODATACOW);
+ btrfs_clear_opt(ctx->mount_opt, NODATASUM);
+ }
+ break;
+ case Opt_datacow:
+ if (result.negated) {
+ btrfs_clear_opt(ctx->mount_opt, COMPRESS);
+ btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS);
+ btrfs_set_opt(ctx->mount_opt, NODATACOW);
+ btrfs_set_opt(ctx->mount_opt, NODATASUM);
+ } else {
+ btrfs_clear_opt(ctx->mount_opt, NODATACOW);
+ }
+ break;
+ case Opt_compress_force:
+ case Opt_compress_force_type:
+ btrfs_set_opt(ctx->mount_opt, FORCE_COMPRESS);
+ fallthrough;
+ case Opt_compress:
+ case Opt_compress_type:
+ if (btrfs_parse_compress(ctx, param, opt))
+ return -EINVAL;
+ break;
+ case Opt_ssd:
+ if (result.negated) {
+ btrfs_set_opt(ctx->mount_opt, NOSSD);
+ btrfs_clear_opt(ctx->mount_opt, SSD);
+ btrfs_clear_opt(ctx->mount_opt, SSD_SPREAD);
+ } else {
+ btrfs_set_opt(ctx->mount_opt, SSD);
+ btrfs_clear_opt(ctx->mount_opt, NOSSD);
+ }
+ break;
+ case Opt_ssd_spread:
+ if (result.negated) {
+ btrfs_clear_opt(ctx->mount_opt, SSD_SPREAD);
+ } else {
+ btrfs_set_opt(ctx->mount_opt, SSD);
+ btrfs_set_opt(ctx->mount_opt, SSD_SPREAD);
+ btrfs_clear_opt(ctx->mount_opt, NOSSD);
+ }
+ break;
+ case Opt_barrier:
+ if (result.negated)
+ btrfs_set_opt(ctx->mount_opt, NOBARRIER);
+ else
+ btrfs_clear_opt(ctx->mount_opt, NOBARRIER);
+ break;
+ case Opt_thread_pool:
+ if (result.uint_32 == 0) {
+ btrfs_err(NULL, "invalid value 0 for thread_pool");
+ return -EINVAL;
+ }
+ ctx->thread_pool_size = result.uint_32;
+ break;
+ case Opt_max_inline:
+ ctx->max_inline = memparse(param->string, NULL);
+ break;
+ case Opt_acl:
+ if (result.negated) {
+ fc->sb_flags &= ~SB_POSIXACL;
+ } else {
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
- info->sb->s_flags |= SB_POSIXACL;
- break;
+ fc->sb_flags |= SB_POSIXACL;
#else
- btrfs_err(info, "support for ACL not compiled in!");
- ret = -EINVAL;
- goto out;
+ btrfs_err(NULL, "support for ACL not compiled in");
+ return -EINVAL;
#endif
- case Opt_noacl:
- info->sb->s_flags &= ~SB_POSIXACL;
- break;
- case Opt_notreelog:
- btrfs_set_and_info(info, NOTREELOG,
- "disabling tree log");
- break;
- case Opt_treelog:
- btrfs_clear_and_info(info, NOTREELOG,
- "enabling tree log");
- break;
- case Opt_norecovery:
- case Opt_nologreplay:
- btrfs_warn(info,
- "'nologreplay' is deprecated, use 'rescue=nologreplay' instead");
- btrfs_set_and_info(info, NOLOGREPLAY,
- "disabling log replay at mount time");
- break;
- case Opt_flushoncommit:
- btrfs_set_and_info(info, FLUSHONCOMMIT,
- "turning on flush-on-commit");
- break;
- case Opt_noflushoncommit:
- btrfs_clear_and_info(info, FLUSHONCOMMIT,
- "turning off flush-on-commit");
- break;
- case Opt_ratio:
- ret = match_int(&args[0], &intarg);
- if (ret)
- goto out;
- info->metadata_ratio = intarg;
- btrfs_info(info, "metadata ratio %u",
- info->metadata_ratio);
- break;
- case Opt_discard:
- case Opt_discard_mode:
- if (token == Opt_discard ||
- strcmp(args[0].from, "sync") == 0) {
- btrfs_clear_opt(info->mount_opt, DISCARD_ASYNC);
- btrfs_set_and_info(info, DISCARD_SYNC,
- "turning on sync discard");
- } else if (strcmp(args[0].from, "async") == 0) {
- btrfs_clear_opt(info->mount_opt, DISCARD_SYNC);
- btrfs_set_and_info(info, DISCARD_ASYNC,
- "turning on async discard");
- } else {
- ret = -EINVAL;
- goto out;
- }
- break;
- case Opt_nodiscard:
- btrfs_clear_and_info(info, DISCARD_SYNC,
- "turning off discard");
- btrfs_clear_and_info(info, DISCARD_ASYNC,
- "turning off async discard");
- break;
- case Opt_space_cache:
- case Opt_space_cache_version:
- if (token == Opt_space_cache ||
- strcmp(args[0].from, "v1") == 0) {
- btrfs_clear_opt(info->mount_opt,
- FREE_SPACE_TREE);
- btrfs_set_and_info(info, SPACE_CACHE,
- "enabling disk space caching");
- } else if (strcmp(args[0].from, "v2") == 0) {
- btrfs_clear_opt(info->mount_opt,
- SPACE_CACHE);
- btrfs_set_and_info(info, FREE_SPACE_TREE,
- "enabling free space tree");
- } else {
- ret = -EINVAL;
- goto out;
- }
- break;
- case Opt_rescan_uuid_tree:
- btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
- break;
- case Opt_no_space_cache:
- if (btrfs_test_opt(info, SPACE_CACHE)) {
- btrfs_clear_and_info(info, SPACE_CACHE,
- "disabling disk space caching");
- }
- if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
- btrfs_clear_and_info(info, FREE_SPACE_TREE,
- "disabling free space tree");
- }
- break;
- case Opt_inode_cache:
- case Opt_noinode_cache:
- btrfs_warn(info,
- "the 'inode_cache' option is deprecated and has no effect since 5.11");
- break;
- case Opt_clear_cache:
- btrfs_set_and_info(info, CLEAR_CACHE,
- "force clearing of disk cache");
- break;
- case Opt_user_subvol_rm_allowed:
- btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
+ }
+ /*
+ * VFS limits the ability to toggle ACL on and off via remount,
+ * despite every file system allowing this. This seems to be
+ * an oversight since we all do, but it'll fail if we're
+ * remounting. So don't set the mask here, we'll check it in
+ * btrfs_reconfigure and do the toggling ourselves.
+ */
+ if (fc->purpose != FS_CONTEXT_FOR_RECONFIGURE)
+ fc->sb_flags_mask |= SB_POSIXACL;
+ break;
+ case Opt_treelog:
+ if (result.negated)
+ btrfs_set_opt(ctx->mount_opt, NOTREELOG);
+ else
+ btrfs_clear_opt(ctx->mount_opt, NOTREELOG);
+ break;
+ case Opt_norecovery:
+ btrfs_info(NULL,
+"'norecovery' is for compatibility only, recommended to use 'rescue=nologreplay'");
+ btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
+ break;
+ case Opt_flushoncommit:
+ if (result.negated)
+ btrfs_clear_opt(ctx->mount_opt, FLUSHONCOMMIT);
+ else
+ btrfs_set_opt(ctx->mount_opt, FLUSHONCOMMIT);
+ break;
+ case Opt_ratio:
+ ctx->metadata_ratio = result.uint_32;
+ break;
+ case Opt_discard:
+ if (result.negated) {
+ btrfs_clear_opt(ctx->mount_opt, DISCARD_SYNC);
+ btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC);
+ btrfs_set_opt(ctx->mount_opt, NODISCARD);
+ } else {
+ btrfs_set_opt(ctx->mount_opt, DISCARD_SYNC);
+ btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC);
+ }
+ break;
+ case Opt_discard_mode:
+ switch (result.uint_32) {
+ case Opt_discard_sync:
+ btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC);
+ btrfs_set_opt(ctx->mount_opt, DISCARD_SYNC);
break;
- case Opt_enospc_debug:
- btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
+ case Opt_discard_async:
+ btrfs_clear_opt(ctx->mount_opt, DISCARD_SYNC);
+ btrfs_set_opt(ctx->mount_opt, DISCARD_ASYNC);
break;
- case Opt_noenospc_debug:
- btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG);
+ default:
+ btrfs_err(NULL, "unrecognized discard mode value %s",
+ param->key);
+ return -EINVAL;
+ }
+ btrfs_clear_opt(ctx->mount_opt, NODISCARD);
+ break;
+ case Opt_space_cache:
+ if (result.negated) {
+ btrfs_set_opt(ctx->mount_opt, NOSPACECACHE);
+ btrfs_clear_opt(ctx->mount_opt, SPACE_CACHE);
+ btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE);
+ } else {
+ btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE);
+ btrfs_set_opt(ctx->mount_opt, SPACE_CACHE);
+ }
+ break;
+ case Opt_space_cache_version:
+ switch (result.uint_32) {
+ case Opt_space_cache_v1:
+ btrfs_set_opt(ctx->mount_opt, SPACE_CACHE);
+ btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE);
break;
- case Opt_defrag:
- btrfs_set_and_info(info, AUTO_DEFRAG,
- "enabling auto defrag");
+ case Opt_space_cache_v2:
+ btrfs_clear_opt(ctx->mount_opt, SPACE_CACHE);
+ btrfs_set_opt(ctx->mount_opt, FREE_SPACE_TREE);
break;
- case Opt_nodefrag:
- btrfs_clear_and_info(info, AUTO_DEFRAG,
- "disabling auto defrag");
+ default:
+ btrfs_err(NULL, "unrecognized space_cache value %s",
+ param->key);
+ return -EINVAL;
+ }
+ break;
+ case Opt_rescan_uuid_tree:
+ btrfs_set_opt(ctx->mount_opt, RESCAN_UUID_TREE);
+ break;
+ case Opt_clear_cache:
+ btrfs_set_opt(ctx->mount_opt, CLEAR_CACHE);
+ break;
+ case Opt_user_subvol_rm_allowed:
+ btrfs_set_opt(ctx->mount_opt, USER_SUBVOL_RM_ALLOWED);
+ break;
+ case Opt_enospc_debug:
+ if (result.negated)
+ btrfs_clear_opt(ctx->mount_opt, ENOSPC_DEBUG);
+ else
+ btrfs_set_opt(ctx->mount_opt, ENOSPC_DEBUG);
+ break;
+ case Opt_defrag:
+ if (result.negated)
+ btrfs_clear_opt(ctx->mount_opt, AUTO_DEFRAG);
+ else
+ btrfs_set_opt(ctx->mount_opt, AUTO_DEFRAG);
+ break;
+ case Opt_usebackuproot:
+ btrfs_warn(NULL,
+ "'usebackuproot' is deprecated, use 'rescue=usebackuproot' instead");
+ btrfs_set_opt(ctx->mount_opt, USEBACKUPROOT);
+
+ /* If we're loading the backup roots we can't trust the space cache. */
+ btrfs_set_opt(ctx->mount_opt, CLEAR_CACHE);
+ break;
+ case Opt_skip_balance:
+ btrfs_set_opt(ctx->mount_opt, SKIP_BALANCE);
+ break;
+ case Opt_fatal_errors:
+ switch (result.uint_32) {
+ case Opt_fatal_errors_panic:
+ btrfs_set_opt(ctx->mount_opt, PANIC_ON_FATAL_ERROR);
break;
- case Opt_recovery:
- case Opt_usebackuproot:
- btrfs_warn(info,
- "'%s' is deprecated, use 'rescue=usebackuproot' instead",
- token == Opt_recovery ? "recovery" :
- "usebackuproot");
- btrfs_info(info,
- "trying to use backup root at mount time");
- btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
+ case Opt_fatal_errors_bug:
+ btrfs_clear_opt(ctx->mount_opt, PANIC_ON_FATAL_ERROR);
break;
- case Opt_skip_balance:
- btrfs_set_opt(info->mount_opt, SKIP_BALANCE);
+ default:
+ btrfs_err(NULL, "unrecognized fatal_errors value %s",
+ param->key);
+ return -EINVAL;
+ }
+ break;
+ case Opt_commit_interval:
+ ctx->commit_interval = result.uint_32;
+ if (ctx->commit_interval > BTRFS_WARNING_COMMIT_INTERVAL) {
+ btrfs_warn(NULL, "excessive commit interval %u, use with care",
+ ctx->commit_interval);
+ }
+ if (ctx->commit_interval == 0)
+ ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
+ break;
+ case Opt_rescue:
+ switch (result.uint_32) {
+ case Opt_rescue_usebackuproot:
+ btrfs_set_opt(ctx->mount_opt, USEBACKUPROOT);
break;
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- case Opt_check_integrity_including_extent_data:
- btrfs_info(info,
- "enabling check integrity including extent data");
- btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY_DATA);
- btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
+ case Opt_rescue_nologreplay:
+ btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
break;
- case Opt_check_integrity:
- btrfs_info(info, "enabling check integrity");
- btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
+ case Opt_rescue_ignorebadroots:
+ btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS);
break;
- case Opt_check_integrity_print_mask:
- ret = match_int(&args[0], &intarg);
- if (ret)
- goto out;
- info->check_integrity_print_mask = intarg;
- btrfs_info(info, "check_integrity_print_mask 0x%x",
- info->check_integrity_print_mask);
+ case Opt_rescue_ignoredatacsums:
+ btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS);
break;
-#else
- case Opt_check_integrity_including_extent_data:
- case Opt_check_integrity:
- case Opt_check_integrity_print_mask:
- btrfs_err(info,
- "support for check_integrity* not compiled in!");
- ret = -EINVAL;
- goto out;
-#endif
- case Opt_fatal_errors:
- if (strcmp(args[0].from, "panic") == 0)
- btrfs_set_opt(info->mount_opt,
- PANIC_ON_FATAL_ERROR);
- else if (strcmp(args[0].from, "bug") == 0)
- btrfs_clear_opt(info->mount_opt,
- PANIC_ON_FATAL_ERROR);
- else {
- ret = -EINVAL;
- goto out;
- }
+ case Opt_rescue_ignoremetacsums:
+ btrfs_set_opt(ctx->mount_opt, IGNOREMETACSUMS);
break;
- case Opt_commit_interval:
- intarg = 0;
- ret = match_int(&args[0], &intarg);
- if (ret)
- goto out;
- if (intarg == 0) {
- btrfs_info(info,
- "using default commit interval %us",
- BTRFS_DEFAULT_COMMIT_INTERVAL);
- intarg = BTRFS_DEFAULT_COMMIT_INTERVAL;
- } else if (intarg > 300) {
- btrfs_warn(info, "excessive commit interval %d",
- intarg);
- }
- info->commit_interval = intarg;
+ case Opt_rescue_ignoresuperflags:
+ btrfs_set_opt(ctx->mount_opt, IGNORESUPERFLAGS);
break;
- case Opt_rescue:
- ret = parse_rescue_options(info, args[0].from);
- if (ret < 0)
- goto out;
+ case Opt_rescue_parameter_all:
+ btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS);
+ btrfs_set_opt(ctx->mount_opt, IGNOREMETACSUMS);
+ btrfs_set_opt(ctx->mount_opt, IGNORESUPERFLAGS);
+ btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS);
+ btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
break;
+ default:
+ btrfs_info(NULL, "unrecognized rescue option '%s'",
+ param->key);
+ return -EINVAL;
+ }
+ break;
#ifdef CONFIG_BTRFS_DEBUG
- case Opt_fragment_all:
- btrfs_info(info, "fragmenting all space");
- btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
- btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
+ case Opt_fragment:
+ switch (result.uint_32) {
+ case Opt_fragment_parameter_all:
+ btrfs_set_opt(ctx->mount_opt, FRAGMENT_DATA);
+ btrfs_set_opt(ctx->mount_opt, FRAGMENT_METADATA);
break;
- case Opt_fragment_metadata:
- btrfs_info(info, "fragmenting metadata");
- btrfs_set_opt(info->mount_opt,
- FRAGMENT_METADATA);
+ case Opt_fragment_parameter_metadata:
+ btrfs_set_opt(ctx->mount_opt, FRAGMENT_METADATA);
break;
- case Opt_fragment_data:
- btrfs_info(info, "fragmenting data");
- btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
+ case Opt_fragment_parameter_data:
+ btrfs_set_opt(ctx->mount_opt, FRAGMENT_DATA);
break;
-#endif
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
- case Opt_ref_verify:
- btrfs_info(info, "doing ref verification");
- btrfs_set_opt(info->mount_opt, REF_VERIFY);
- break;
-#endif
- case Opt_err:
- btrfs_err(info, "unrecognized mount option '%s'", p);
- ret = -EINVAL;
- goto out;
default:
- break;
+ btrfs_info(NULL, "unrecognized fragment option '%s'",
+ param->key);
+ return -EINVAL;
}
+ break;
+ case Opt_ref_verify:
+ btrfs_set_opt(ctx->mount_opt, REF_VERIFY);
+ break;
+ case Opt_ref_tracker:
+ btrfs_set_opt(ctx->mount_opt, REF_TRACKER);
+ break;
+#endif
+ default:
+ btrfs_err(NULL, "unrecognized mount option '%s'", param->key);
+ return -EINVAL;
}
-check:
- /* We're read-only, don't have to check. */
- if (new_flags & SB_RDONLY)
- goto out;
- if (check_ro_option(info, BTRFS_MOUNT_NOLOGREPLAY, "nologreplay") ||
- check_ro_option(info, BTRFS_MOUNT_IGNOREBADROOTS, "ignorebadroots") ||
- check_ro_option(info, BTRFS_MOUNT_IGNOREDATACSUMS, "ignoredatacsums"))
- ret = -EINVAL;
-out:
- if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
- !btrfs_test_opt(info, FREE_SPACE_TREE) &&
- !btrfs_test_opt(info, CLEAR_CACHE)) {
- btrfs_err(info, "cannot disable free space tree");
- ret = -EINVAL;
-
- }
- if (!ret)
- ret = btrfs_check_mountopts_zoned(info);
- if (!ret && btrfs_test_opt(info, SPACE_CACHE))
- btrfs_info(info, "disk space caching is enabled");
- if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
- btrfs_info(info, "using free space tree");
- return ret;
+ return 0;
}
/*
- * Parse mount options that are required early in the mount process.
- *
- * All other options will be parsed on much later in the mount process and
- * only when we need to allocate a new super block.
+ * Some options only have meaning at mount time and shouldn't persist across
+ * remounts, or be displayed. Clear these at the end of mount and remount code
+ * paths.
*/
-static int btrfs_parse_device_options(const char *options, fmode_t flags,
- void *holder)
+static void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info)
{
- substring_t args[MAX_OPT_ARGS];
- char *device_name, *opts, *orig, *p;
- struct btrfs_device *device = NULL;
- int error = 0;
+ btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
+ btrfs_clear_opt(fs_info->mount_opt, CLEAR_CACHE);
+ btrfs_clear_opt(fs_info->mount_opt, NOSPACECACHE);
+}
- lockdep_assert_held(&uuid_mutex);
+static bool check_ro_option(const struct btrfs_fs_info *fs_info,
+ unsigned long long mount_opt, unsigned long long opt,
+ const char *opt_name)
+{
+ if (mount_opt & opt) {
+ btrfs_err(fs_info, "%s must be used with ro mount option",
+ opt_name);
+ return true;
+ }
+ return false;
+}
- if (!options)
- return 0;
+bool btrfs_check_options(const struct btrfs_fs_info *info,
+ unsigned long long *mount_opt,
+ unsigned long flags)
+{
+ bool ret = true;
- /*
- * strsep changes the string, duplicate it because btrfs_parse_options
- * gets called later
- */
- opts = kstrdup(options, GFP_KERNEL);
- if (!opts)
- return -ENOMEM;
- orig = opts;
+ if (!(flags & SB_RDONLY) &&
+ (check_ro_option(info, *mount_opt, BTRFS_MOUNT_NOLOGREPLAY, "nologreplay") ||
+ check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREBADROOTS, "ignorebadroots") ||
+ check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREDATACSUMS, "ignoredatacsums") ||
+ check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREMETACSUMS, "ignoremetacsums") ||
+ check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNORESUPERFLAGS, "ignoresuperflags")))
+ ret = false;
- while ((p = strsep(&opts, ",")) != NULL) {
- int token;
+ if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
+ !btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE) &&
+ !btrfs_raw_test_opt(*mount_opt, CLEAR_CACHE)) {
+ btrfs_err(info, "cannot disable free-space-tree");
+ ret = false;
+ }
+ if (btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE) &&
+ !btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE)) {
+ btrfs_err(info, "cannot disable free-space-tree with block-group-tree feature");
+ ret = false;
+ }
- if (!*p)
- continue;
+ if (btrfs_check_mountopts_zoned(info, mount_opt))
+ ret = false;
- token = match_token(p, tokens, args);
- if (token == Opt_device) {
- device_name = match_strdup(&args[0]);
- if (!device_name) {
- error = -ENOMEM;
- goto out;
- }
- device = btrfs_scan_one_device(device_name, flags,
- holder);
- kfree(device_name);
- if (IS_ERR(device)) {
- error = PTR_ERR(device);
- goto out;
- }
+ if (!test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state)) {
+ if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {
+ btrfs_warn(info,
+"space cache v1 is being deprecated and will be removed in a future release, please use -o space_cache=v2");
}
}
-out:
- kfree(orig);
- return error;
+ return ret;
}
/*
- * Parse mount options that are related to subvolume id
+ * This is subtle, we only call this during open_ctree(). We need to pre-load
+ * the mount options with the on-disk settings. Before the new mount API took
+ * effect we would do this on mount and remount. With the new mount API we'll
+ * only do this on the initial mount.
*
- * The value is later passed to mount_subvol()
+ * This isn't a change in behavior, because we're using the current state of the
+ * file system to set the current mount options. If you mounted with special
+ * options to disable these features and then remounted we wouldn't revert the
+ * settings, because mounting without these features cleared the on-disk
+ * settings, so this being called on re-mount is not needed.
*/
-static int btrfs_parse_subvol_options(const char *options, char **subvol_name,
- u64 *subvol_objectid)
+void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info)
{
- substring_t args[MAX_OPT_ARGS];
- char *opts, *orig, *p;
- int error = 0;
- u64 subvolid;
-
- if (!options)
- return 0;
+ if (fs_info->sectorsize < PAGE_SIZE) {
+ btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
+ if (!btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
+ btrfs_info(fs_info,
+ "forcing free space tree for sector size %u with page size %lu",
+ fs_info->sectorsize, PAGE_SIZE);
+ btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
+ }
+ }
/*
- * strsep changes the string, duplicate it because
- * btrfs_parse_device_options gets called later
+ * At this point our mount options are populated, so we only mess with
+ * these settings if we don't have any settings already.
*/
- opts = kstrdup(options, GFP_KERNEL);
- if (!opts)
- return -ENOMEM;
- orig = opts;
+ if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
+ return;
- while ((p = strsep(&opts, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
+ if (btrfs_is_zoned(fs_info) &&
+ btrfs_free_space_cache_v1_active(fs_info)) {
+ btrfs_info(fs_info, "zoned: clearing existing space cache");
+ btrfs_set_super_cache_generation(fs_info->super_copy, 0);
+ return;
+ }
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_subvol:
- kfree(*subvol_name);
- *subvol_name = match_strdup(&args[0]);
- if (!*subvol_name) {
- error = -ENOMEM;
- goto out;
- }
- break;
- case Opt_subvolid:
- error = match_u64(&args[0], &subvolid);
- if (error)
- goto out;
+ if (btrfs_test_opt(fs_info, SPACE_CACHE))
+ return;
- /* we want the original fs_tree */
- if (subvolid == 0)
- subvolid = BTRFS_FS_TREE_OBJECTID;
+ if (btrfs_test_opt(fs_info, NOSPACECACHE))
+ return;
- *subvol_objectid = subvolid;
- break;
- default:
- break;
- }
- }
+ /*
+ * At this point we don't have explicit options set by the user, set
+ * them ourselves based on the state of the file system.
+ */
+ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
+ btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
+ else if (btrfs_free_space_cache_v1_active(fs_info))
+ btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE);
+}
-out:
- kfree(orig);
- return error;
+static void set_device_specific_options(struct btrfs_fs_info *fs_info)
+{
+ if (!btrfs_test_opt(fs_info, NOSSD) &&
+ !fs_info->fs_devices->rotating)
+ btrfs_set_opt(fs_info->mount_opt, SSD);
+
+ /*
+ * For devices supporting discard turn on discard=async automatically,
+ * unless it's already set or disabled. This could be turned off by
+ * nodiscard for the same mount.
+ *
+ * The zoned mode piggy backs on the discard functionality for
+ * resetting a zone. There is no reason to delay the zone reset as it is
+ * fast enough. So, do not enable async discard for zoned mode.
+ */
+ if (!(btrfs_test_opt(fs_info, DISCARD_SYNC) ||
+ btrfs_test_opt(fs_info, DISCARD_ASYNC) ||
+ btrfs_test_opt(fs_info, NODISCARD)) &&
+ fs_info->fs_devices->discardable &&
+ !btrfs_is_zoned(fs_info))
+ btrfs_set_opt(fs_info->mount_opt, DISCARD_ASYNC);
}
char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
@@ -1172,17 +807,15 @@ char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
struct btrfs_root_ref *root_ref;
struct btrfs_inode_ref *inode_ref;
struct btrfs_key key;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
char *name = NULL, *ptr;
u64 dirid;
int len;
int ret;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!path)
+ return ERR_PTR(-ENOMEM);
name = kmalloc(PATH_MAX, GFP_KERNEL);
if (!name) {
@@ -1270,7 +903,6 @@ char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
fs_root = NULL;
}
- btrfs_free_path(path);
if (ptr == name + PATH_MAX - 1) {
name[0] = '/';
name[1] = '\0';
@@ -1281,7 +913,6 @@ char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
err:
btrfs_put_root(fs_root);
- btrfs_free_path(path);
kfree(name);
return ERR_PTR(ret);
}
@@ -1290,8 +921,9 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
{
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_dir_item *di;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key location;
+ struct fscrypt_str name = FSTR_INIT("default", 7);
u64 dir_id;
path = btrfs_alloc_path();
@@ -1304,9 +936,8 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
* to mount.
*/
dir_id = btrfs_super_root_dir(fs_info->super_copy);
- di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
+ di = btrfs_lookup_dir_item(NULL, root, path, dir_id, &name, 0);
if (IS_ERR(di)) {
- btrfs_free_path(path);
return PTR_ERR(di);
}
if (!di) {
@@ -1315,72 +946,67 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
* it's always been there, but don't freak out, just try and
* mount the top-level subvolume.
*/
- btrfs_free_path(path);
*objectid = BTRFS_FS_TREE_OBJECTID;
return 0;
}
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
- btrfs_free_path(path);
*objectid = location.objectid;
return 0;
}
static int btrfs_fill_super(struct super_block *sb,
- struct btrfs_fs_devices *fs_devices,
- void *data)
+ struct btrfs_fs_devices *fs_devices)
{
- struct inode *inode;
+ struct btrfs_inode *inode;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
- int err;
+ int ret;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_magic = BTRFS_SUPER_MAGIC;
sb->s_op = &btrfs_super_ops;
- sb->s_d_op = &btrfs_dentry_operations;
+ set_default_d_op(sb, &btrfs_dentry_operations);
sb->s_export_op = &btrfs_export_ops;
#ifdef CONFIG_FS_VERITY
sb->s_vop = &btrfs_verityops;
#endif
sb->s_xattr = btrfs_xattr_handlers;
sb->s_time_gran = 1;
-#ifdef CONFIG_BTRFS_FS_POSIX_ACL
- sb->s_flags |= SB_POSIXACL;
-#endif
- sb->s_flags |= SB_I_VERSION;
- sb->s_iflags |= SB_I_CGROUPWB;
+ sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM;
- err = super_setup_bdi(sb);
- if (err) {
+ ret = super_setup_bdi(sb);
+ if (ret) {
btrfs_err(fs_info, "super_setup_bdi failed");
- return err;
+ return ret;
}
- err = open_ctree(sb, fs_devices, (char *)data);
- if (err) {
- btrfs_err(fs_info, "open_ctree failed");
- return err;
+ ret = open_ctree(sb, fs_devices);
+ if (ret) {
+ btrfs_err(fs_info, "open_ctree failed: %d", ret);
+ return ret;
}
- inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
+ btrfs_emit_options(fs_info, NULL);
+
+ inode = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
+ ret = PTR_ERR(inode);
+ btrfs_handle_fs_error(fs_info, ret, NULL);
goto fail_close;
}
- sb->s_root = d_make_root(inode);
+ sb->s_root = d_make_root(&inode->vfs_inode);
if (!sb->s_root) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto fail_close;
}
- cleancache_init_fs(sb);
sb->s_flags |= SB_ACTIVE;
return 0;
fail_close:
close_ctree(fs_info);
- return err;
+ return ret;
}
int btrfs_sync_fs(struct super_block *sb, int wait)
@@ -1396,7 +1022,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
return 0;
}
- btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
@@ -1406,7 +1032,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
* Exit unless we have some pending changes
* that need to go through commit
*/
- if (fs_info->pending_changes == 0)
+ if (!test_bit(BTRFS_FS_NEED_TRANS_COMMIT,
+ &fs_info->flags))
return 0;
/*
* A non-blocking test if the fs is frozen. We must not
@@ -1458,7 +1085,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_printf(seq, ",compress-force=%s", compress_type);
else
seq_printf(seq, ",compress=%s", compress_type);
- if (info->compress_level)
+ if (info->compress_level && info->compress_type != BTRFS_COMPRESS_LZO)
seq_printf(seq, ":%d", info->compress_level);
}
if (btrfs_test_opt(info, NOSSD))
@@ -1477,6 +1104,10 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
print_rescue_option(seq, "ignorebadroots", &printed);
if (btrfs_test_opt(info, IGNOREDATACSUMS))
print_rescue_option(seq, "ignoredatacsums", &printed);
+ if (btrfs_test_opt(info, IGNOREMETACSUMS))
+ print_rescue_option(seq, "ignoremetacsums", &printed);
+ if (btrfs_test_opt(info, IGNORESUPERFLAGS))
+ print_rescue_option(seq, "ignoresuperflags", &printed);
if (btrfs_test_opt(info, FLUSHONCOMMIT))
seq_puts(seq, ",flushoncommit");
if (btrfs_test_opt(info, DISCARD_SYNC))
@@ -1503,15 +1134,6 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts(seq, ",autodefrag");
if (btrfs_test_opt(info, SKIP_BALANCE))
seq_puts(seq, ",skip_balance");
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(info, CHECK_INTEGRITY_DATA))
- seq_puts(seq, ",check_int_data");
- else if (btrfs_test_opt(info, CHECK_INTEGRITY))
- seq_puts(seq, ",check_int");
- if (info->check_integrity_print_mask)
- seq_printf(seq, ",check_int_print_mask=%d",
- info->check_integrity_print_mask);
-#endif
if (info->metadata_ratio)
seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio);
if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
@@ -1526,42 +1148,26 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
#endif
if (btrfs_test_opt(info, REF_VERIFY))
seq_puts(seq, ",ref_verify");
- seq_printf(seq, ",subvolid=%llu",
- BTRFS_I(d_inode(dentry))->root->root_key.objectid);
+ if (btrfs_test_opt(info, REF_TRACKER))
+ seq_puts(seq, ",ref_tracker");
+ seq_printf(seq, ",subvolid=%llu", btrfs_root_id(BTRFS_I(d_inode(dentry))->root));
subvol_name = btrfs_get_subvol_name_from_objectid(info,
- BTRFS_I(d_inode(dentry))->root->root_key.objectid);
+ btrfs_root_id(BTRFS_I(d_inode(dentry))->root));
if (!IS_ERR(subvol_name)) {
- seq_puts(seq, ",subvol=");
- seq_escape(seq, subvol_name, " \t\n\\");
+ seq_show_option(seq, "subvol", subvol_name);
kfree(subvol_name);
}
return 0;
}
-static int btrfs_test_super(struct super_block *s, void *data)
-{
- struct btrfs_fs_info *p = data;
- struct btrfs_fs_info *fs_info = btrfs_sb(s);
-
- return fs_info->fs_devices == p->fs_devices;
-}
-
-static int btrfs_set_super(struct super_block *s, void *data)
-{
- int err = set_anon_super(s, data);
- if (!err)
- s->s_fs_info = data;
- return err;
-}
-
/*
* subvolumes are identified by ino 256
*/
-static inline int is_subvolume_inode(struct inode *inode)
+static inline bool is_subvolume_inode(struct inode *inode)
{
if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
- return 1;
- return 0;
+ return true;
+ return false;
}
static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
@@ -1597,7 +1203,7 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
struct super_block *s = root->d_sb;
struct btrfs_fs_info *fs_info = btrfs_sb(s);
struct inode *root_inode = d_inode(root);
- u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
+ u64 root_objectid = btrfs_root_id(BTRFS_I(root_inode)->root);
ret = 0;
if (!is_subvolume_inode(root_inode)) {
@@ -1629,198 +1235,6 @@ out:
return root;
}
-/*
- * Find a superblock for the given device / mount point.
- *
- * Note: This is based on mount_bdev from fs/super.c with a few additions
- * for multiple device setup. Make sure to keep it in sync.
- */
-static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
- int flags, const char *device_name, void *data)
-{
- struct block_device *bdev = NULL;
- struct super_block *s;
- struct btrfs_device *device = NULL;
- struct btrfs_fs_devices *fs_devices = NULL;
- struct btrfs_fs_info *fs_info = NULL;
- void *new_sec_opts = NULL;
- fmode_t mode = FMODE_READ;
- int error = 0;
-
- if (!(flags & SB_RDONLY))
- mode |= FMODE_WRITE;
-
- if (data) {
- error = security_sb_eat_lsm_opts(data, &new_sec_opts);
- if (error)
- return ERR_PTR(error);
- }
-
- /*
- * Setup a dummy root and fs_info for test/set super. This is because
- * we don't actually fill this stuff out until open_ctree, but we need
- * then open_ctree will properly initialize the file system specific
- * settings later. btrfs_init_fs_info initializes the static elements
- * of the fs_info (locks and such) to make cleanup easier if we find a
- * superblock with our given fs_devices later on at sget() time.
- */
- fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
- if (!fs_info) {
- error = -ENOMEM;
- goto error_sec_opts;
- }
- btrfs_init_fs_info(fs_info);
-
- fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
- fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
- if (!fs_info->super_copy || !fs_info->super_for_commit) {
- error = -ENOMEM;
- goto error_fs_info;
- }
-
- mutex_lock(&uuid_mutex);
- error = btrfs_parse_device_options(data, mode, fs_type);
- if (error) {
- mutex_unlock(&uuid_mutex);
- goto error_fs_info;
- }
-
- device = btrfs_scan_one_device(device_name, mode, fs_type);
- if (IS_ERR(device)) {
- mutex_unlock(&uuid_mutex);
- error = PTR_ERR(device);
- goto error_fs_info;
- }
-
- fs_devices = device->fs_devices;
- fs_info->fs_devices = fs_devices;
-
- error = btrfs_open_devices(fs_devices, mode, fs_type);
- mutex_unlock(&uuid_mutex);
- if (error)
- goto error_fs_info;
-
- if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
- error = -EACCES;
- goto error_close_devices;
- }
-
- bdev = fs_devices->latest_dev->bdev;
- s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
- fs_info);
- if (IS_ERR(s)) {
- error = PTR_ERR(s);
- goto error_close_devices;
- }
-
- if (s->s_root) {
- btrfs_close_devices(fs_devices);
- btrfs_free_fs_info(fs_info);
- if ((flags ^ s->s_flags) & SB_RDONLY)
- error = -EBUSY;
- } else {
- snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
- btrfs_sb(s)->bdev_holder = fs_type;
- if (!strstr(crc32c_impl(), "generic"))
- set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
- error = btrfs_fill_super(s, fs_devices, data);
- }
- if (!error)
- error = security_sb_set_mnt_opts(s, new_sec_opts, 0, NULL);
- security_free_mnt_opts(&new_sec_opts);
- if (error) {
- deactivate_locked_super(s);
- return ERR_PTR(error);
- }
-
- return dget(s->s_root);
-
-error_close_devices:
- btrfs_close_devices(fs_devices);
-error_fs_info:
- btrfs_free_fs_info(fs_info);
-error_sec_opts:
- security_free_mnt_opts(&new_sec_opts);
- return ERR_PTR(error);
-}
-
-/*
- * Mount function which is called by VFS layer.
- *
- * In order to allow mounting a subvolume directly, btrfs uses mount_subtree()
- * which needs vfsmount* of device's root (/). This means device's root has to
- * be mounted internally in any case.
- *
- * Operation flow:
- * 1. Parse subvol id related options for later use in mount_subvol().
- *
- * 2. Mount device's root (/) by calling vfs_kern_mount().
- *
- * NOTE: vfs_kern_mount() is used by VFS to call btrfs_mount() in the
- * first place. In order to avoid calling btrfs_mount() again, we use
- * different file_system_type which is not registered to VFS by
- * register_filesystem() (btrfs_root_fs_type). As a result,
- * btrfs_mount_root() is called. The return value will be used by
- * mount_subtree() in mount_subvol().
- *
- * 3. Call mount_subvol() to get the dentry of subvolume. Since there is
- * "btrfs subvolume set-default", mount_subvol() is called always.
- */
-static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
- const char *device_name, void *data)
-{
- struct vfsmount *mnt_root;
- struct dentry *root;
- char *subvol_name = NULL;
- u64 subvol_objectid = 0;
- int error = 0;
-
- error = btrfs_parse_subvol_options(data, &subvol_name,
- &subvol_objectid);
- if (error) {
- kfree(subvol_name);
- return ERR_PTR(error);
- }
-
- /* mount device's root (/) */
- mnt_root = vfs_kern_mount(&btrfs_root_fs_type, flags, device_name, data);
- if (PTR_ERR_OR_ZERO(mnt_root) == -EBUSY) {
- if (flags & SB_RDONLY) {
- mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
- flags & ~SB_RDONLY, device_name, data);
- } else {
- mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
- flags | SB_RDONLY, device_name, data);
- if (IS_ERR(mnt_root)) {
- root = ERR_CAST(mnt_root);
- kfree(subvol_name);
- goto out;
- }
-
- down_write(&mnt_root->mnt_sb->s_umount);
- error = btrfs_remount(mnt_root->mnt_sb, &flags, NULL);
- up_write(&mnt_root->mnt_sb->s_umount);
- if (error < 0) {
- root = ERR_PTR(error);
- mntput(mnt_root);
- kfree(subvol_name);
- goto out;
- }
- }
- }
- if (IS_ERR(mnt_root)) {
- root = ERR_CAST(mnt_root);
- kfree(subvol_name);
- goto out;
- }
-
- /* mount_subvol() will free subvol_name and mnt_root */
- root = mount_subvol(subvol_name, subvol_objectid, mnt_root);
-
-out:
- return root;
-}
-
static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
u32 new_pool_size, u32 old_pool_size)
{
@@ -1835,20 +1249,15 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
- btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
- btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
- btrfs_workqueue_set_max(fs_info->endio_meta_write_workers,
- new_pool_size);
+ workqueue_set_max_active(fs_info->endio_workers, new_pool_size);
+ workqueue_set_max_active(fs_info->endio_meta_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
- btrfs_workqueue_set_max(fs_info->readahead_workers, new_pool_size);
- btrfs_workqueue_set_max(fs_info->scrub_wr_completion_workers,
- new_pool_size);
}
static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
- unsigned long old_opts, int flags)
+ unsigned long long old_opts, int flags)
{
if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
(!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
@@ -1862,12 +1271,12 @@ static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
}
static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
- unsigned long old_opts)
+ unsigned long long old_opts)
{
const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
/*
- * We need to cleanup all defragable inodes if the autodefragment is
+ * We need to cleanup all defraggable inodes if the autodefragment is
* close or the filesystem is read only.
*/
if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
@@ -1888,192 +1297,284 @@ static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
}
-static int btrfs_remount(struct super_block *sb, int *flags, char *data)
+static int btrfs_remount_rw(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(sb);
- unsigned old_flags = sb->s_flags;
- unsigned long old_opts = fs_info->mount_opt;
- unsigned long old_compress_type = fs_info->compress_type;
- u64 old_max_inline = fs_info->max_inline;
- u32 old_thread_pool_size = fs_info->thread_pool_size;
- u32 old_metadata_ratio = fs_info->metadata_ratio;
int ret;
- sync_filesystem(sb);
- set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+ if (BTRFS_FS_ERROR(fs_info)) {
+ btrfs_err(fs_info,
+ "remounting read-write after error is not allowed");
+ return -EINVAL;
+ }
+
+ if (fs_info->fs_devices->rw_devices == 0)
+ return -EACCES;
- if (data) {
- void *new_sec_opts = NULL;
+ if (!btrfs_check_rw_degradable(fs_info, NULL)) {
+ btrfs_warn(fs_info,
+ "too many missing devices, writable remount is not allowed");
+ return -EACCES;
+ }
- ret = security_sb_eat_lsm_opts(data, &new_sec_opts);
- if (!ret)
- ret = security_sb_remount(sb, new_sec_opts);
- security_free_mnt_opts(&new_sec_opts);
- if (ret)
- goto restore;
+ if (btrfs_super_log_root(fs_info->super_copy) != 0) {
+ btrfs_warn(fs_info,
+ "mount required to replay tree-log, cannot remount read-write");
+ return -EINVAL;
}
- ret = btrfs_parse_options(fs_info, data, *flags);
+ /*
+ * NOTE: when remounting with a change that does writes, don't put it
+ * anywhere above this point, as we are not sure to be safe to write
+ * until we pass the above checks.
+ */
+ ret = btrfs_start_pre_rw_mount(fs_info);
if (ret)
- goto restore;
+ return ret;
- btrfs_remount_begin(fs_info, old_opts, *flags);
- btrfs_resize_thread_pool(fs_info,
- fs_info->thread_pool_size, old_thread_pool_size);
+ btrfs_clear_sb_rdonly(fs_info->sb);
- if ((bool)btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
- (bool)btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
- (!sb_rdonly(sb) || (*flags & SB_RDONLY))) {
- btrfs_warn(fs_info,
- "remount supports changing free space tree only from ro to rw");
- /* Make sure free space cache options match the state on disk */
- if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
- btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
- btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
- }
- if (btrfs_free_space_cache_v1_active(fs_info)) {
- btrfs_clear_opt(fs_info->mount_opt, FREE_SPACE_TREE);
- btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE);
- }
- }
+ set_bit(BTRFS_FS_OPEN, &fs_info->flags);
- if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
- goto out;
+ /*
+ * If we've gone from readonly -> read-write, we need to get our
+ * sync/async discard lists in the right state.
+ */
+ btrfs_discard_resume(fs_info);
- if (*flags & SB_RDONLY) {
- /*
- * this also happens on 'umount -rf' or on shutdown, when
- * the filesystem is busy.
- */
- cancel_work_sync(&fs_info->async_reclaim_work);
- cancel_work_sync(&fs_info->async_data_reclaim_work);
+ return 0;
+}
- btrfs_discard_cleanup(fs_info);
+static int btrfs_remount_ro(struct btrfs_fs_info *fs_info)
+{
+ /*
+ * This also happens on 'umount -rf' or on shutdown, when the
+ * filesystem is busy.
+ */
+ cancel_work_sync(&fs_info->async_reclaim_work);
+ cancel_work_sync(&fs_info->async_data_reclaim_work);
- /* wait for the uuid_scan task to finish */
- down(&fs_info->uuid_tree_rescan_sem);
- /* avoid complains from lockdep et al. */
- up(&fs_info->uuid_tree_rescan_sem);
+ btrfs_discard_cleanup(fs_info);
- btrfs_set_sb_rdonly(sb);
+ /* Wait for the uuid_scan task to finish */
+ down(&fs_info->uuid_tree_rescan_sem);
+ /* Avoid complains from lockdep et al. */
+ up(&fs_info->uuid_tree_rescan_sem);
- /*
- * Setting SB_RDONLY will put the cleaner thread to
- * sleep at the next loop if it's already active.
- * If it's already asleep, we'll leave unused block
- * groups on disk until we're mounted read-write again
- * unless we clean them up here.
- */
- btrfs_delete_unused_bgs(fs_info);
+ btrfs_set_sb_rdonly(fs_info->sb);
- /*
- * The cleaner task could be already running before we set the
- * flag BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock).
- * We must make sure that after we finish the remount, i.e. after
- * we call btrfs_commit_super(), the cleaner can no longer start
- * a transaction - either because it was dropping a dead root,
- * running delayed iputs or deleting an unused block group (the
- * cleaner picked a block group from the list of unused block
- * groups before we were able to in the previous call to
- * btrfs_delete_unused_bgs()).
- */
- wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING,
- TASK_UNINTERRUPTIBLE);
+ /*
+ * Setting SB_RDONLY will put the cleaner thread to sleep at the next
+ * loop if it's already active. If it's already asleep, we'll leave
+ * unused block groups on disk until we're mounted read-write again
+ * unless we clean them up here.
+ */
+ btrfs_delete_unused_bgs(fs_info);
- /*
- * We've set the superblock to RO mode, so we might have made
- * the cleaner task sleep without running all pending delayed
- * iputs. Go through all the delayed iputs here, so that if an
- * unmount happens without remounting RW we don't end up at
- * finishing close_ctree() with a non-empty list of delayed
- * iputs.
- */
- btrfs_run_delayed_iputs(fs_info);
+ /*
+ * The cleaner task could be already running before we set the flag
+ * BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock). We must make
+ * sure that after we finish the remount, i.e. after we call
+ * btrfs_commit_super(), the cleaner can no longer start a transaction
+ * - either because it was dropping a dead root, running delayed iputs
+ * or deleting an unused block group (the cleaner picked a block
+ * group from the list of unused block groups before we were able to
+ * in the previous call to btrfs_delete_unused_bgs()).
+ */
+ wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING, TASK_UNINTERRUPTIBLE);
- btrfs_dev_replace_suspend_for_unmount(fs_info);
- btrfs_scrub_cancel(fs_info);
- btrfs_pause_balance(fs_info);
+ /*
+ * We've set the superblock to RO mode, so we might have made the
+ * cleaner task sleep without running all pending delayed iputs. Go
+ * through all the delayed iputs here, so that if an unmount happens
+ * without remounting RW we don't end up at finishing close_ctree()
+ * with a non-empty list of delayed iputs.
+ */
+ btrfs_run_delayed_iputs(fs_info);
- /*
- * Pause the qgroup rescan worker if it is running. We don't want
- * it to be still running after we are in RO mode, as after that,
- * by the time we unmount, it might have left a transaction open,
- * so we would leak the transaction and/or crash.
- */
- btrfs_qgroup_wait_for_completion(fs_info, false);
+ btrfs_dev_replace_suspend_for_unmount(fs_info);
+ btrfs_scrub_cancel(fs_info);
+ btrfs_pause_balance(fs_info);
- ret = btrfs_commit_super(fs_info);
- if (ret)
- goto restore;
- } else {
- if (BTRFS_FS_ERROR(fs_info)) {
- btrfs_err(fs_info,
- "Remounting read-write after error is not allowed");
- ret = -EINVAL;
- goto restore;
- }
- if (fs_info->fs_devices->rw_devices == 0) {
- ret = -EACCES;
- goto restore;
- }
+ /*
+ * Pause the qgroup rescan worker if it is running. We don't want it to
+ * be still running after we are in RO mode, as after that, by the time
+ * we unmount, it might have left a transaction open, so we would leak
+ * the transaction and/or crash.
+ */
+ btrfs_qgroup_wait_for_completion(fs_info, false);
- if (!btrfs_check_rw_degradable(fs_info, NULL)) {
- btrfs_warn(fs_info,
- "too many missing devices, writable remount is not allowed");
- ret = -EACCES;
- goto restore;
- }
+ return btrfs_commit_super(fs_info);
+}
- if (btrfs_super_log_root(fs_info->super_copy) != 0) {
- btrfs_warn(fs_info,
- "mount required to replay tree-log, cannot remount read-write");
- ret = -EINVAL;
- goto restore;
- }
+static void btrfs_ctx_to_info(struct btrfs_fs_info *fs_info, struct btrfs_fs_context *ctx)
+{
+ fs_info->max_inline = ctx->max_inline;
+ fs_info->commit_interval = ctx->commit_interval;
+ fs_info->metadata_ratio = ctx->metadata_ratio;
+ fs_info->thread_pool_size = ctx->thread_pool_size;
+ fs_info->mount_opt = ctx->mount_opt;
+ fs_info->compress_type = ctx->compress_type;
+ fs_info->compress_level = ctx->compress_level;
+}
- /*
- * NOTE: when remounting with a change that does writes, don't
- * put it anywhere above this point, as we are not sure to be
- * safe to write until we pass the above checks.
- */
- ret = btrfs_start_pre_rw_mount(fs_info);
- if (ret)
- goto restore;
+static void btrfs_info_to_ctx(struct btrfs_fs_info *fs_info, struct btrfs_fs_context *ctx)
+{
+ ctx->max_inline = fs_info->max_inline;
+ ctx->commit_interval = fs_info->commit_interval;
+ ctx->metadata_ratio = fs_info->metadata_ratio;
+ ctx->thread_pool_size = fs_info->thread_pool_size;
+ ctx->mount_opt = fs_info->mount_opt;
+ ctx->compress_type = fs_info->compress_type;
+ ctx->compress_level = fs_info->compress_level;
+}
+
+#define btrfs_info_if_set(fs_info, old_ctx, opt, fmt, args...) \
+do { \
+ if ((!old_ctx || !btrfs_raw_test_opt(old_ctx->mount_opt, opt)) && \
+ btrfs_raw_test_opt(fs_info->mount_opt, opt)) \
+ btrfs_info(fs_info, fmt, ##args); \
+} while (0)
+
+#define btrfs_info_if_unset(fs_info, old_ctx, opt, fmt, args...) \
+do { \
+ if ((old_ctx && btrfs_raw_test_opt(old_ctx->mount_opt, opt)) && \
+ !btrfs_raw_test_opt(fs_info->mount_opt, opt)) \
+ btrfs_info(fs_info, fmt, ##args); \
+} while (0)
+
+static void btrfs_emit_options(struct btrfs_fs_info *info,
+ struct btrfs_fs_context *old)
+{
+ btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum");
+ btrfs_info_if_set(info, old, DEGRADED, "allowing degraded mounts");
+ btrfs_info_if_set(info, old, NODATACOW, "setting nodatacow");
+ btrfs_info_if_set(info, old, SSD, "enabling ssd optimizations");
+ btrfs_info_if_set(info, old, SSD_SPREAD, "using spread ssd allocation scheme");
+ btrfs_info_if_set(info, old, NOBARRIER, "turning off barriers");
+ btrfs_info_if_set(info, old, NOTREELOG, "disabling tree log");
+ btrfs_info_if_set(info, old, NOLOGREPLAY, "disabling log replay at mount time");
+ btrfs_info_if_set(info, old, FLUSHONCOMMIT, "turning on flush-on-commit");
+ btrfs_info_if_set(info, old, DISCARD_SYNC, "turning on sync discard");
+ btrfs_info_if_set(info, old, DISCARD_ASYNC, "turning on async discard");
+ btrfs_info_if_set(info, old, FREE_SPACE_TREE, "enabling free space tree");
+ btrfs_info_if_set(info, old, SPACE_CACHE, "enabling disk space caching");
+ btrfs_info_if_set(info, old, CLEAR_CACHE, "force clearing of disk cache");
+ btrfs_info_if_set(info, old, AUTO_DEFRAG, "enabling auto defrag");
+ btrfs_info_if_set(info, old, FRAGMENT_DATA, "fragmenting data");
+ btrfs_info_if_set(info, old, FRAGMENT_METADATA, "fragmenting metadata");
+ btrfs_info_if_set(info, old, REF_VERIFY, "doing ref verification");
+ btrfs_info_if_set(info, old, USEBACKUPROOT, "trying to use backup root at mount time");
+ btrfs_info_if_set(info, old, IGNOREBADROOTS, "ignoring bad roots");
+ btrfs_info_if_set(info, old, IGNOREDATACSUMS, "ignoring data csums");
+ btrfs_info_if_set(info, old, IGNOREMETACSUMS, "ignoring meta csums");
+ btrfs_info_if_set(info, old, IGNORESUPERFLAGS, "ignoring unknown super block flags");
+
+ btrfs_info_if_unset(info, old, NODATASUM, "setting datasum");
+ btrfs_info_if_unset(info, old, NODATACOW, "setting datacow");
+ btrfs_info_if_unset(info, old, SSD, "not using ssd optimizations");
+ btrfs_info_if_unset(info, old, SSD_SPREAD, "not using spread ssd allocation scheme");
+ btrfs_info_if_unset(info, old, NOBARRIER, "turning on barriers");
+ btrfs_info_if_unset(info, old, NOTREELOG, "enabling tree log");
+ btrfs_info_if_unset(info, old, SPACE_CACHE, "disabling disk space caching");
+ btrfs_info_if_unset(info, old, FREE_SPACE_TREE, "disabling free space tree");
+ btrfs_info_if_unset(info, old, AUTO_DEFRAG, "disabling auto defrag");
+ btrfs_info_if_unset(info, old, COMPRESS, "use no compression");
+
+ /* Did the compression settings change? */
+ if (btrfs_test_opt(info, COMPRESS) &&
+ (!old ||
+ old->compress_type != info->compress_type ||
+ old->compress_level != info->compress_level ||
+ (!btrfs_raw_test_opt(old->mount_opt, FORCE_COMPRESS) &&
+ btrfs_raw_test_opt(info->mount_opt, FORCE_COMPRESS)))) {
+ const char *compress_type = btrfs_compress_type2str(info->compress_type);
+
+ btrfs_info(info, "%s %s compression, level %d",
+ btrfs_test_opt(info, FORCE_COMPRESS) ? "force" : "use",
+ compress_type, info->compress_level);
+ }
+
+ if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
+ btrfs_info(info, "max_inline set to %llu", info->max_inline);
+}
+
+static int btrfs_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb = fc->root->d_sb;
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ struct btrfs_fs_context *ctx = fc->fs_private;
+ struct btrfs_fs_context old_ctx;
+ int ret = 0;
+ bool mount_reconfigure = (fc->s_fs_info != NULL);
+
+ btrfs_info_to_ctx(fs_info, &old_ctx);
+
+ /*
+ * This is our "bind mount" trick, we don't want to allow the user to do
+ * anything other than mount a different ro/rw and a different subvol,
+ * all of the mount options should be maintained.
+ */
+ if (mount_reconfigure)
+ ctx->mount_opt = old_ctx.mount_opt;
+
+ sync_filesystem(sb);
+ set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+
+ if (!btrfs_check_options(fs_info, &ctx->mount_opt, fc->sb_flags))
+ return -EINVAL;
+
+ ret = btrfs_check_features(fs_info, !(fc->sb_flags & SB_RDONLY));
+ if (ret < 0)
+ return ret;
- btrfs_clear_sb_rdonly(sb);
+ btrfs_ctx_to_info(fs_info, ctx);
+ btrfs_remount_begin(fs_info, old_ctx.mount_opt, fc->sb_flags);
+ btrfs_resize_thread_pool(fs_info, fs_info->thread_pool_size,
+ old_ctx.thread_pool_size);
- set_bit(BTRFS_FS_OPEN, &fs_info->flags);
+ if ((bool)btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
+ (bool)btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+ (!sb_rdonly(sb) || (fc->sb_flags & SB_RDONLY))) {
+ btrfs_warn(fs_info,
+ "remount supports changing free space tree only from RO to RW");
+ /* Make sure free space cache options match the state on disk. */
+ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
+ btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
+ btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
+ }
+ if (btrfs_free_space_cache_v1_active(fs_info)) {
+ btrfs_clear_opt(fs_info->mount_opt, FREE_SPACE_TREE);
+ btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE);
+ }
}
-out:
+
+ ret = 0;
+ if (!sb_rdonly(sb) && (fc->sb_flags & SB_RDONLY))
+ ret = btrfs_remount_ro(fs_info);
+ else if (sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY))
+ ret = btrfs_remount_rw(fs_info);
+ if (ret)
+ goto restore;
+
/*
- * We need to set SB_I_VERSION here otherwise it'll get cleared by VFS,
- * since the absence of the flag means it can be toggled off by remount.
+ * If we set the mask during the parameter parsing VFS would reject the
+ * remount. Here we can set the mask and the value will be updated
+ * appropriately.
*/
- *flags |= SB_I_VERSION;
+ if ((fc->sb_flags & SB_POSIXACL) != (sb->s_flags & SB_POSIXACL))
+ fc->sb_flags_mask |= SB_POSIXACL;
+ btrfs_emit_options(fs_info, &old_ctx);
wake_up_process(fs_info->transaction_kthread);
- btrfs_remount_cleanup(fs_info, old_opts);
+ btrfs_remount_cleanup(fs_info, old_ctx.mount_opt);
btrfs_clear_oneshot_options(fs_info);
clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
return 0;
-
restore:
- /* We've hit an error - don't reset SB_RDONLY */
- if (sb_rdonly(sb))
- old_flags |= SB_RDONLY;
- if (!(old_flags & SB_RDONLY))
- clear_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
- sb->s_flags = old_flags;
- fs_info->mount_opt = old_opts;
- fs_info->compress_type = old_compress_type;
- fs_info->max_inline = old_max_inline;
- btrfs_resize_thread_pool(fs_info,
- old_thread_pool_size, fs_info->thread_pool_size);
- fs_info->metadata_ratio = old_metadata_ratio;
- btrfs_remount_cleanup(fs_info, old_opts);
+ btrfs_ctx_to_info(fs_info, &old_ctx);
+ btrfs_remount_cleanup(fs_info, old_ctx.mount_opt);
clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
-
return ret;
}
@@ -2109,7 +1610,7 @@ static inline void btrfs_descending_sort_devices(
static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
u64 *free_bytes)
{
- struct btrfs_device_info *devices_info;
+ struct btrfs_device_info AUTO_KFREE(devices_info);
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
u64 type;
@@ -2145,12 +1646,8 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
if (type & BTRFS_BLOCK_GROUP_RAID0)
num_stripes = nr_devices;
- else if (type & BTRFS_BLOCK_GROUP_RAID1)
- num_stripes = 2;
- else if (type & BTRFS_BLOCK_GROUP_RAID1C3)
- num_stripes = 3;
- else if (type & BTRFS_BLOCK_GROUP_RAID1C4)
- num_stripes = 4;
+ else if (type & BTRFS_BLOCK_GROUP_RAID1_MASK)
+ num_stripes = rattr->ncopies;
else if (type & BTRFS_BLOCK_GROUP_RAID10)
num_stripes = 4;
@@ -2174,17 +1671,13 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN);
/*
- * In order to avoid overwriting the superblock on the drive,
- * btrfs starts at an offset of at least 1MB when doing chunk
- * allocation.
- *
- * This ensures we have at least min_stripe_size free space
- * after excluding 1MB.
+ * Ensure we have at least min_stripe_size on top of the
+ * reserved space on the device.
*/
- if (avail_space <= SZ_1M + min_stripe_size)
+ if (avail_space <= BTRFS_DEVICE_RANGE_RESERVED + min_stripe_size)
continue;
- avail_space -= SZ_1M;
+ avail_space -= BTRFS_DEVICE_RANGE_RESERVED;
devices_info[i].dev = device;
devices_info[i].max_avail = avail_space;
@@ -2215,7 +1708,6 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
nr_devices--;
}
- kfree(devices_info);
*free_bytes = avail_space;
return 0;
}
@@ -2265,7 +1757,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
}
/*
- * Metadata in mixed block goup profiles are accounted in data
+ * Metadata in mixed block group profiles are accounted in data
*/
if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) {
if (found->flags & BTRFS_BLOCK_GROUP_DATA)
@@ -2321,11 +1813,11 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
* calculated f_bavail.
*/
if (!mixed && block_rsv->space_info->full &&
- total_free_meta - thresh < block_rsv->size)
+ (total_free_meta < thresh || total_free_meta - thresh < block_rsv->size))
buf->f_bavail = 0;
buf->f_type = BTRFS_SUPER_MAGIC;
- buf->f_bsize = dentry->d_sb->s_blocksize;
+ buf->f_bsize = fs_info->sectorsize;
buf->f_namelen = BTRFS_NAME_LEN;
/* We treat it as constant endianness (it doesn't matter _which_)
@@ -2334,14 +1826,303 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
/* Mask in the root object ID too, to disambiguate subvols */
- buf->f_fsid.val[0] ^=
- BTRFS_I(d_inode(dentry))->root->root_key.objectid >> 32;
- buf->f_fsid.val[1] ^=
- BTRFS_I(d_inode(dentry))->root->root_key.objectid;
+ buf->f_fsid.val[0] ^= btrfs_root_id(BTRFS_I(d_inode(dentry))->root) >> 32;
+ buf->f_fsid.val[1] ^= btrfs_root_id(BTRFS_I(d_inode(dentry))->root);
+
+ return 0;
+}
+
+static int btrfs_fc_test_super(struct super_block *sb, struct fs_context *fc)
+{
+ struct btrfs_fs_info *p = fc->s_fs_info;
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+
+ return fs_info->fs_devices == p->fs_devices;
+}
+
+static int btrfs_get_tree_super(struct fs_context *fc)
+{
+ struct btrfs_fs_info *fs_info = fc->s_fs_info;
+ struct btrfs_fs_context *ctx = fc->fs_private;
+ struct btrfs_fs_devices *fs_devices = NULL;
+ struct btrfs_device *device;
+ struct super_block *sb;
+ blk_mode_t mode = sb_open_mode(fc->sb_flags);
+ int ret;
+
+ btrfs_ctx_to_info(fs_info, ctx);
+ mutex_lock(&uuid_mutex);
+
+ /*
+ * With 'true' passed to btrfs_scan_one_device() (mount time) we expect
+ * either a valid device or an error.
+ */
+ device = btrfs_scan_one_device(fc->source, true);
+ ASSERT(device != NULL);
+ if (IS_ERR(device)) {
+ mutex_unlock(&uuid_mutex);
+ return PTR_ERR(device);
+ }
+ fs_devices = device->fs_devices;
+ /*
+ * We cannot hold uuid_mutex calling sget_fc(), it will lead to a
+ * locking order reversal with s_umount.
+ *
+ * So here we increase the holding number of fs_devices, this will ensure
+ * the fs_devices itself won't be freed.
+ */
+ btrfs_fs_devices_inc_holding(fs_devices);
+ fs_info->fs_devices = fs_devices;
+ mutex_unlock(&uuid_mutex);
+
+
+ sb = sget_fc(fc, btrfs_fc_test_super, set_anon_super_fc);
+ if (IS_ERR(sb)) {
+ mutex_lock(&uuid_mutex);
+ btrfs_fs_devices_dec_holding(fs_devices);
+ /*
+ * Since the fs_devices is not opened, it can be freed at any
+ * time after unlocking uuid_mutex. We need to avoid double
+ * free through put_fs_context()->btrfs_free_fs_info().
+ * So here we reset fs_info->fs_devices to NULL, and let the
+ * regular fs_devices reclaim path to handle it.
+ *
+ * This applies to all later branches where no fs_devices is
+ * opened.
+ */
+ fs_info->fs_devices = NULL;
+ mutex_unlock(&uuid_mutex);
+ return PTR_ERR(sb);
+ }
+
+ if (sb->s_root) {
+ /*
+ * Not the first mount of the fs thus got an existing super block.
+ * Will reuse the returned super block, fs_info and fs_devices.
+ *
+ * fc->s_fs_info is not touched and will be later freed by
+ * put_fs_context() through btrfs_free_fs_context().
+ */
+ ASSERT(fc->s_fs_info == fs_info);
+
+ mutex_lock(&uuid_mutex);
+ btrfs_fs_devices_dec_holding(fs_devices);
+ fs_info->fs_devices = NULL;
+ mutex_unlock(&uuid_mutex);
+ /*
+ * At this stage we may have RO flag mismatch between
+ * fc->sb_flags and sb->s_flags. Caller should detect such
+ * mismatch and reconfigure with sb->s_umount rwsem held if
+ * needed.
+ */
+ } else {
+ struct block_device *bdev;
+ /*
+ * The first mount of the fs thus a new superblock, fc->s_fs_info
+ * must be NULL, and the ownership of our fs_info and fs_devices is
+ * transferred to the super block.
+ */
+ ASSERT(fc->s_fs_info == NULL);
+
+ mutex_lock(&uuid_mutex);
+ btrfs_fs_devices_dec_holding(fs_devices);
+ ret = btrfs_open_devices(fs_devices, mode, sb);
+ if (ret < 0)
+ fs_info->fs_devices = NULL;
+ mutex_unlock(&uuid_mutex);
+ if (ret < 0) {
+ deactivate_locked_super(sb);
+ return ret;
+ }
+ if (!(fc->sb_flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
+ deactivate_locked_super(sb);
+ return -EACCES;
+ }
+ set_device_specific_options(fs_info);
+ bdev = fs_devices->latest_dev->bdev;
+ snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
+ shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id);
+ ret = btrfs_fill_super(sb, fs_devices);
+ if (ret) {
+ deactivate_locked_super(sb);
+ return ret;
+ }
+ }
+
+ btrfs_clear_oneshot_options(fs_info);
+
+ fc->root = dget(sb->s_root);
return 0;
}
+/*
+ * Ever since commit 0723a0473fb4 ("btrfs: allow mounting btrfs subvolumes
+ * with different ro/rw options") the following works:
+ *
+ * (i) mount /dev/sda3 -o subvol=foo,ro /mnt/foo
+ * (ii) mount /dev/sda3 -o subvol=bar,rw /mnt/bar
+ *
+ * which looks nice and innocent but is actually pretty intricate and deserves
+ * a long comment.
+ *
+ * On another filesystem a subvolume mount is close to something like:
+ *
+ * (iii) # create rw superblock + initial mount
+ * mount -t xfs /dev/sdb /opt/
+ *
+ * # create ro bind mount
+ * mount --bind -o ro /opt/foo /mnt/foo
+ *
+ * # unmount initial mount
+ * umount /opt
+ *
+ * Of course, there's some special subvolume sauce and there's the fact that the
+ * sb->s_root dentry is really swapped after mount_subtree(). But conceptually
+ * it's very close and will help us understand the issue.
+ *
+ * The old mount API didn't cleanly distinguish between a mount being made ro
+ * and a superblock being made ro. The only way to change the ro state of
+ * either object was by passing ms_rdonly. If a new mount was created via
+ * mount(2) such as:
+ *
+ * mount("/dev/sdb", "/mnt", "xfs", ms_rdonly, null);
+ *
+ * the MS_RDONLY flag being specified had two effects:
+ *
+ * (1) MNT_READONLY was raised -> the resulting mount got
+ * @mnt->mnt_flags |= MNT_READONLY raised.
+ *
+ * (2) MS_RDONLY was passed to the filesystem's mount method and the filesystems
+ * made the superblock ro. Note, how SB_RDONLY has the same value as
+ * ms_rdonly and is raised whenever MS_RDONLY is passed through mount(2).
+ *
+ * Creating a subtree mount via (iii) ends up leaving a rw superblock with a
+ * subtree mounted ro.
+ *
+ * But consider the effect on the old mount API on btrfs subvolume mounting
+ * which combines the distinct step in (iii) into a single step.
+ *
+ * By issuing (i) both the mount and the superblock are turned ro. Now when (ii)
+ * is issued the superblock is ro and thus even if the mount created for (ii) is
+ * rw it wouldn't help. Hence, btrfs needed to transition the superblock from ro
+ * to rw for (ii) which it did using an internal remount call.
+ *
+ * IOW, subvolume mounting was inherently complicated due to the ambiguity of
+ * MS_RDONLY in mount(2). Note, this ambiguity has mount(8) always translate
+ * "ro" to MS_RDONLY. IOW, in both (i) and (ii) "ro" becomes MS_RDONLY when
+ * passed by mount(8) to mount(2).
+ *
+ * Enter the new mount API. The new mount API disambiguates making a mount ro
+ * and making a superblock ro.
+ *
+ * (3) To turn a mount ro the MOUNT_ATTR_ONLY flag can be used with either
+ * fsmount() or mount_setattr() this is a pure VFS level change for a
+ * specific mount or mount tree that is never seen by the filesystem itself.
+ *
+ * (4) To turn a superblock ro the "ro" flag must be used with
+ * fsconfig(FSCONFIG_SET_FLAG, "ro"). This option is seen by the filesystem
+ * in fc->sb_flags.
+ *
+ * But, currently the util-linux mount command already utilizes the new mount
+ * API and is still setting fsconfig(FSCONFIG_SET_FLAG, "ro") no matter if it's
+ * btrfs or not, setting the whole super block RO. To make per-subvolume mounting
+ * work with different options work we need to keep backward compatibility.
+ */
+static int btrfs_reconfigure_for_mount(struct fs_context *fc)
+{
+ int ret = 0;
+
+ if (!(fc->sb_flags & SB_RDONLY) && (fc->root->d_sb->s_flags & SB_RDONLY))
+ ret = btrfs_reconfigure(fc);
+
+ return ret;
+}
+
+static int btrfs_get_tree_subvol(struct fs_context *fc)
+{
+ struct btrfs_fs_info *fs_info = NULL;
+ struct btrfs_fs_context *ctx = fc->fs_private;
+ struct fs_context *dup_fc;
+ struct dentry *dentry;
+ struct vfsmount *mnt;
+ int ret = 0;
+
+ /*
+ * Setup a dummy root and fs_info for test/set super. This is because
+ * we don't actually fill this stuff out until open_ctree, but we need
+ * then open_ctree will properly initialize the file system specific
+ * settings later. btrfs_init_fs_info initializes the static elements
+ * of the fs_info (locks and such) to make cleanup easier if we find a
+ * superblock with our given fs_devices later on at sget() time.
+ */
+ fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
+ if (!fs_info)
+ return -ENOMEM;
+
+ fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
+ fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
+ if (!fs_info->super_copy || !fs_info->super_for_commit) {
+ /*
+ * Dont call btrfs_free_fs_info() to free it as it's still
+ * initialized partially.
+ */
+ kfree(fs_info->super_copy);
+ kfree(fs_info->super_for_commit);
+ kvfree(fs_info);
+ return -ENOMEM;
+ }
+ btrfs_init_fs_info(fs_info);
+
+ dup_fc = vfs_dup_fs_context(fc);
+ if (IS_ERR(dup_fc)) {
+ btrfs_free_fs_info(fs_info);
+ return PTR_ERR(dup_fc);
+ }
+
+ /*
+ * When we do the sget_fc this gets transferred to the sb, so we only
+ * need to set it on the dup_fc as this is what creates the super block.
+ */
+ dup_fc->s_fs_info = fs_info;
+
+ ret = btrfs_get_tree_super(dup_fc);
+ if (ret)
+ goto error;
+
+ ret = btrfs_reconfigure_for_mount(dup_fc);
+ up_write(&dup_fc->root->d_sb->s_umount);
+ if (ret)
+ goto error;
+ mnt = vfs_create_mount(dup_fc);
+ put_fs_context(dup_fc);
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
+
+ /*
+ * This free's ->subvol_name, because if it isn't set we have to
+ * allocate a buffer to hold the subvol_name, so we just drop our
+ * reference to it here.
+ */
+ dentry = mount_subvol(ctx->subvol_name, ctx->subvol_objectid, mnt);
+ ctx->subvol_name = NULL;
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ fc->root = dentry;
+ return 0;
+error:
+ put_fs_context(dup_fc);
+ return ret;
+}
+
+static int btrfs_get_tree(struct fs_context *fc)
+{
+ ASSERT(fc->s_fs_info == NULL);
+
+ return btrfs_get_tree_subvol(fc);
+}
+
static void btrfs_kill_super(struct super_block *sb)
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
@@ -2349,22 +2130,86 @@ static void btrfs_kill_super(struct super_block *sb)
btrfs_free_fs_info(fs_info);
}
-static struct file_system_type btrfs_fs_type = {
- .owner = THIS_MODULE,
- .name = "btrfs",
- .mount = btrfs_mount,
- .kill_sb = btrfs_kill_super,
- .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
-};
+static void btrfs_free_fs_context(struct fs_context *fc)
+{
+ struct btrfs_fs_context *ctx = fc->fs_private;
+ struct btrfs_fs_info *fs_info = fc->s_fs_info;
+
+ if (fs_info)
+ btrfs_free_fs_info(fs_info);
+
+ if (ctx && refcount_dec_and_test(&ctx->refs)) {
+ kfree(ctx->subvol_name);
+ kfree(ctx);
+ }
+}
+
+static int btrfs_dup_fs_context(struct fs_context *fc, struct fs_context *src_fc)
+{
+ struct btrfs_fs_context *ctx = src_fc->fs_private;
+
+ /*
+ * Give a ref to our ctx to this dup, as we want to keep it around for
+ * our original fc so we can have the subvolume name or objectid.
+ *
+ * We unset ->source in the original fc because the dup needs it for
+ * mounting, and then once we free the dup it'll free ->source, so we
+ * need to make sure we're only pointing to it in one fc.
+ */
+ refcount_inc(&ctx->refs);
+ fc->fs_private = ctx;
+ fc->source = src_fc->source;
+ src_fc->source = NULL;
+ return 0;
+}
-static struct file_system_type btrfs_root_fs_type = {
- .owner = THIS_MODULE,
- .name = "btrfs",
- .mount = btrfs_mount_root,
- .kill_sb = btrfs_kill_super,
- .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | FS_ALLOW_IDMAP,
+static const struct fs_context_operations btrfs_fs_context_ops = {
+ .parse_param = btrfs_parse_param,
+ .reconfigure = btrfs_reconfigure,
+ .get_tree = btrfs_get_tree,
+ .dup = btrfs_dup_fs_context,
+ .free = btrfs_free_fs_context,
};
+static int btrfs_init_fs_context(struct fs_context *fc)
+{
+ struct btrfs_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct btrfs_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ refcount_set(&ctx->refs, 1);
+ fc->fs_private = ctx;
+ fc->ops = &btrfs_fs_context_ops;
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ btrfs_info_to_ctx(btrfs_sb(fc->root->d_sb), ctx);
+ } else {
+ ctx->thread_pool_size =
+ min_t(unsigned long, num_online_cpus() + 2, 8);
+ ctx->max_inline = BTRFS_DEFAULT_MAX_INLINE;
+ ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
+ }
+
+#ifdef CONFIG_BTRFS_FS_POSIX_ACL
+ fc->sb_flags |= SB_POSIXACL;
+#endif
+ fc->sb_flags |= SB_I_VERSION;
+
+ return 0;
+}
+
+static struct file_system_type btrfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "btrfs",
+ .init_fs_context = btrfs_init_fs_context,
+ .parameters = btrfs_fs_parameters,
+ .kill_sb = btrfs_kill_super,
+ .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA |
+ FS_ALLOW_IDMAP | FS_MGTIME,
+ };
+
MODULE_ALIAS_FS("btrfs");
static int btrfs_control_open(struct inode *inode, struct file *file)
@@ -2386,6 +2231,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
{
struct btrfs_ioctl_vol_args *vol;
struct btrfs_device *device = NULL;
+ dev_t devt = 0;
int ret = -ENOTTY;
if (!capable(CAP_SYS_ADMIN))
@@ -2394,26 +2240,39 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
vol = memdup_user((void __user *)arg, sizeof(*vol));
if (IS_ERR(vol))
return PTR_ERR(vol);
- vol->name[BTRFS_PATH_NAME_MAX] = '\0';
+ ret = btrfs_check_ioctl_vol_args_path(vol);
+ if (ret < 0)
+ goto out;
switch (cmd) {
case BTRFS_IOC_SCAN_DEV:
mutex_lock(&uuid_mutex);
- device = btrfs_scan_one_device(vol->name, FMODE_READ,
- &btrfs_root_fs_type);
+ /*
+ * Scanning outside of mount can return NULL which would turn
+ * into 0 error code.
+ */
+ device = btrfs_scan_one_device(vol->name, false);
ret = PTR_ERR_OR_ZERO(device);
mutex_unlock(&uuid_mutex);
break;
case BTRFS_IOC_FORGET_DEV:
- ret = btrfs_forget_devices(vol->name);
+ if (vol->name[0] != 0) {
+ ret = lookup_bdev(vol->name, &devt);
+ if (ret)
+ break;
+ }
+ ret = btrfs_forget_devices(devt);
break;
case BTRFS_IOC_DEVICES_READY:
mutex_lock(&uuid_mutex);
- device = btrfs_scan_one_device(vol->name, FMODE_READ,
- &btrfs_root_fs_type);
- if (IS_ERR(device)) {
+ /*
+ * Scanning outside of mount can return NULL which would turn
+ * into 0 error code.
+ */
+ device = btrfs_scan_one_device(vol->name, false);
+ if (IS_ERR_OR_NULL(device)) {
mutex_unlock(&uuid_mutex);
- ret = PTR_ERR(device);
+ ret = PTR_ERR_OR_ZERO(device);
break;
}
ret = !(device->fs_devices->num_devices ==
@@ -2425,15 +2284,14 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
break;
}
+out:
kfree(vol);
return ret;
}
static int btrfs_freeze(struct super_block *sb)
{
- struct btrfs_trans_handle *trans;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
- struct btrfs_root *root = fs_info->tree_root;
set_bit(BTRFS_FS_FROZEN, &fs_info->flags);
/*
@@ -2442,21 +2300,91 @@ static int btrfs_freeze(struct super_block *sb)
* we want to avoid on a frozen filesystem), or do the commit
* ourselves.
*/
- trans = btrfs_attach_transaction_barrier(root);
- if (IS_ERR(trans)) {
- /* no transaction, don't bother */
- if (PTR_ERR(trans) == -ENOENT)
- return 0;
- return PTR_ERR(trans);
+ return btrfs_commit_current_transaction(fs_info->tree_root);
+}
+
+static int check_dev_super(struct btrfs_device *dev)
+{
+ struct btrfs_fs_info *fs_info = dev->fs_info;
+ struct btrfs_super_block *sb;
+ u64 last_trans;
+ u16 csum_type;
+ int ret = 0;
+
+ /* This should be called with fs still frozen. */
+ ASSERT(test_bit(BTRFS_FS_FROZEN, &fs_info->flags));
+
+ /* Missing dev, no need to check. */
+ if (!dev->bdev)
+ return 0;
+
+ /* Only need to check the primary super block. */
+ sb = btrfs_read_disk_super(dev->bdev, 0, true);
+ if (IS_ERR(sb))
+ return PTR_ERR(sb);
+
+ /* Verify the checksum. */
+ csum_type = btrfs_super_csum_type(sb);
+ if (unlikely(csum_type != btrfs_super_csum_type(fs_info->super_copy))) {
+ btrfs_err(fs_info, "csum type changed, has %u expect %u",
+ csum_type, btrfs_super_csum_type(fs_info->super_copy));
+ ret = -EUCLEAN;
+ goto out;
}
- return btrfs_commit_transaction(trans);
+
+ if (unlikely(btrfs_check_super_csum(fs_info, sb))) {
+ btrfs_err(fs_info, "csum for on-disk super block no longer matches");
+ ret = -EUCLEAN;
+ goto out;
+ }
+
+ /* Btrfs_validate_super() includes fsid check against super->fsid. */
+ ret = btrfs_validate_super(fs_info, sb, 0);
+ if (ret < 0)
+ goto out;
+
+ last_trans = btrfs_get_last_trans_committed(fs_info);
+ if (unlikely(btrfs_super_generation(sb) != last_trans)) {
+ btrfs_err(fs_info, "transid mismatch, has %llu expect %llu",
+ btrfs_super_generation(sb), last_trans);
+ ret = -EUCLEAN;
+ goto out;
+ }
+out:
+ btrfs_release_disk_super(sb);
+ return ret;
}
static int btrfs_unfreeze(struct super_block *sb)
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ struct btrfs_device *device;
+ int ret = 0;
+ /*
+ * Make sure the fs is not changed by accident (like hibernation then
+ * modified by other OS).
+ * If we found anything wrong, we mark the fs error immediately.
+ *
+ * And since the fs is frozen, no one can modify the fs yet, thus
+ * we don't need to hold device_list_mutex.
+ */
+ list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
+ ret = check_dev_super(device);
+ if (ret < 0) {
+ btrfs_handle_fs_error(fs_info, ret,
+ "super block on devid %llu got modified unexpectedly",
+ device->devid);
+ break;
+ }
+ }
clear_bit(BTRFS_FS_FROZEN, &fs_info->flags);
+
+ /*
+ * We still return 0, to allow VFS layer to unfreeze the fs even the
+ * above checks failed. Since the fs is either fine or read-only, we're
+ * safe to continue, without causing further damage.
+ */
return 0;
}
@@ -2470,12 +2398,93 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
* the end of RCU grace period.
*/
rcu_read_lock();
- seq_escape(m, rcu_str_deref(fs_info->fs_devices->latest_dev->name), " \t\n\\");
+ seq_escape(m, btrfs_dev_name(fs_info->fs_devices->latest_dev), " \t\n\\");
rcu_read_unlock();
return 0;
}
+static long btrfs_nr_cached_objects(struct super_block *sb, struct shrink_control *sc)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ const s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
+
+ trace_btrfs_extent_map_shrinker_count(fs_info, nr);
+
+ return nr;
+}
+
+static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_control *sc)
+{
+ const long nr_to_scan = min_t(unsigned long, LONG_MAX, sc->nr_to_scan);
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+
+ btrfs_free_extent_maps(fs_info, nr_to_scan);
+
+ /* The extent map shrinker runs asynchronously, so always return 0. */
+ return 0;
+}
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+static int btrfs_remove_bdev(struct super_block *sb, struct block_device *bdev)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ struct btrfs_device *device;
+ struct btrfs_dev_lookup_args lookup_args = { .devt = bdev->bd_dev };
+ bool can_rw;
+
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ device = btrfs_find_device(fs_info->fs_devices, &lookup_args);
+ if (!device) {
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ /* Device not found, should not affect the running fs, just give a warning. */
+ btrfs_warn(fs_info, "unable to find btrfs device for block device '%pg'", bdev);
+ return 0;
+ }
+ /*
+ * The to-be-removed device is already missing?
+ *
+ * That's weird but no special handling needed and can exit right now.
+ */
+ if (unlikely(test_and_set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))) {
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ btrfs_warn(fs_info, "btrfs device id %llu is already missing", device->devid);
+ return 0;
+ }
+
+ device->fs_devices->missing_devices++;
+ if (test_and_clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
+ list_del_init(&device->dev_alloc_list);
+ WARN_ON(device->fs_devices->rw_devices < 1);
+ device->fs_devices->rw_devices--;
+ }
+ can_rw = btrfs_check_rw_degradable(fs_info, device);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ /*
+ * Now device is considered missing, btrfs_device_name() won't give a
+ * meaningful result anymore, so only output the devid.
+ */
+ if (unlikely(!can_rw)) {
+ btrfs_crit(fs_info,
+ "btrfs device id %llu has gone missing, can not maintain read-write",
+ device->devid);
+ return -EIO;
+ }
+ btrfs_warn(fs_info,
+ "btrfs device id %llu has gone missing, continue as degraded",
+ device->devid);
+ btrfs_set_opt(fs_info->mount_opt, DEGRADED);
+ return 0;
+}
+
+static void btrfs_shutdown(struct super_block *sb)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+
+ btrfs_force_shutdown(fs_info);
+}
+#endif
+
static const struct super_operations btrfs_super_ops = {
.drop_inode = btrfs_drop_inode,
.evict_inode = btrfs_evict_inode,
@@ -2487,9 +2496,14 @@ static const struct super_operations btrfs_super_ops = {
.destroy_inode = btrfs_destroy_inode,
.free_inode = btrfs_free_inode,
.statfs = btrfs_statfs,
- .remount_fs = btrfs_remount,
.freeze_fs = btrfs_freeze,
.unfreeze_fs = btrfs_unfreeze,
+ .nr_cached_objects = btrfs_nr_cached_objects,
+ .free_cached_objects = btrfs_free_cached_objects,
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ .remove_bdev = btrfs_remove_bdev,
+ .shutdown = btrfs_shutdown,
+#endif
};
static const struct file_operations btrfs_ctl_fops = {
@@ -2519,21 +2533,18 @@ static __cold void btrfs_interface_exit(void)
misc_deregister(&btrfs_misc);
}
-static void __init btrfs_print_mod_info(void)
+static int __init btrfs_print_mod_info(void)
{
static const char options[] = ""
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ ", experimental=on"
+#endif
#ifdef CONFIG_BTRFS_DEBUG
", debug=on"
#endif
#ifdef CONFIG_BTRFS_ASSERT
", assert=on"
#endif
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- ", integrity-checker=on"
-#endif
-#ifdef CONFIG_BTRFS_FS_REF_VERIFY
- ", ref-verify=on"
-#endif
#ifdef CONFIG_BLK_DEV_ZONED
", zoned=yes"
#else
@@ -2545,128 +2556,151 @@ static void __init btrfs_print_mod_info(void)
", fsverity=no"
#endif
;
- pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
-}
-
-static int __init init_btrfs_fs(void)
-{
- int err;
-
- btrfs_props_init();
-
- err = btrfs_init_sysfs();
- if (err)
- return err;
-
- btrfs_init_compress();
-
- err = btrfs_init_cachep();
- if (err)
- goto free_compress;
-
- err = extent_io_init();
- if (err)
- goto free_cachep;
- err = extent_state_cache_init();
- if (err)
- goto free_extent_io;
-
- err = extent_map_init();
- if (err)
- goto free_extent_state_cache;
-
- err = ordered_data_init();
- if (err)
- goto free_extent_map;
-
- err = btrfs_delayed_inode_init();
- if (err)
- goto free_ordered_data;
-
- err = btrfs_auto_defrag_init();
- if (err)
- goto free_delayed_inode;
-
- err = btrfs_delayed_ref_init();
- if (err)
- goto free_auto_defrag;
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ if (btrfs_get_mod_read_policy() == NULL)
+ pr_info("Btrfs loaded%s\n", options);
+ else
+ pr_info("Btrfs loaded%s, read_policy=%s\n",
+ options, btrfs_get_mod_read_policy());
+#else
+ pr_info("Btrfs loaded%s\n", options);
+#endif
- err = btrfs_prelim_ref_init();
- if (err)
- goto free_delayed_ref;
+ return 0;
+}
- err = btrfs_end_io_wq_init();
- if (err)
- goto free_prelim_ref;
+static int register_btrfs(void)
+{
+ return register_filesystem(&btrfs_fs_type);
+}
- err = btrfs_interface_init();
- if (err)
- goto free_end_io_wq;
+static void unregister_btrfs(void)
+{
+ unregister_filesystem(&btrfs_fs_type);
+}
- btrfs_print_mod_info();
+/* Helper structure for long init/exit functions. */
+struct init_sequence {
+ int (*init_func)(void);
+ /* Can be NULL if the init_func doesn't need cleanup. */
+ void (*exit_func)(void);
+};
- err = btrfs_run_sanity_tests();
- if (err)
- goto unregister_ioctl;
+static const struct init_sequence mod_init_seq[] = {
+ {
+ .init_func = btrfs_props_init,
+ .exit_func = NULL,
+ }, {
+ .init_func = btrfs_init_sysfs,
+ .exit_func = btrfs_exit_sysfs,
+ }, {
+ .init_func = btrfs_init_compress,
+ .exit_func = btrfs_exit_compress,
+ }, {
+ .init_func = btrfs_init_cachep,
+ .exit_func = btrfs_destroy_cachep,
+ }, {
+ .init_func = btrfs_init_dio,
+ .exit_func = btrfs_destroy_dio,
+ }, {
+ .init_func = btrfs_transaction_init,
+ .exit_func = btrfs_transaction_exit,
+ }, {
+ .init_func = btrfs_ctree_init,
+ .exit_func = btrfs_ctree_exit,
+ }, {
+ .init_func = btrfs_free_space_init,
+ .exit_func = btrfs_free_space_exit,
+ }, {
+ .init_func = btrfs_extent_state_init_cachep,
+ .exit_func = btrfs_extent_state_free_cachep,
+ }, {
+ .init_func = extent_buffer_init_cachep,
+ .exit_func = extent_buffer_free_cachep,
+ }, {
+ .init_func = btrfs_bioset_init,
+ .exit_func = btrfs_bioset_exit,
+ }, {
+ .init_func = btrfs_extent_map_init,
+ .exit_func = btrfs_extent_map_exit,
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ }, {
+ .init_func = btrfs_read_policy_init,
+ .exit_func = NULL,
+#endif
+ }, {
+ .init_func = ordered_data_init,
+ .exit_func = ordered_data_exit,
+ }, {
+ .init_func = btrfs_delayed_inode_init,
+ .exit_func = btrfs_delayed_inode_exit,
+ }, {
+ .init_func = btrfs_auto_defrag_init,
+ .exit_func = btrfs_auto_defrag_exit,
+ }, {
+ .init_func = btrfs_delayed_ref_init,
+ .exit_func = btrfs_delayed_ref_exit,
+ }, {
+ .init_func = btrfs_prelim_ref_init,
+ .exit_func = btrfs_prelim_ref_exit,
+ }, {
+ .init_func = btrfs_interface_init,
+ .exit_func = btrfs_interface_exit,
+ }, {
+ .init_func = btrfs_print_mod_info,
+ .exit_func = NULL,
+ }, {
+ .init_func = btrfs_run_sanity_tests,
+ .exit_func = NULL,
+ }, {
+ .init_func = register_btrfs,
+ .exit_func = unregister_btrfs,
+ }
+};
- err = register_filesystem(&btrfs_fs_type);
- if (err)
- goto unregister_ioctl;
+static bool mod_init_result[ARRAY_SIZE(mod_init_seq)];
- return 0;
+static __always_inline void btrfs_exit_btrfs_fs(void)
+{
+ int i;
-unregister_ioctl:
- btrfs_interface_exit();
-free_end_io_wq:
- btrfs_end_io_wq_exit();
-free_prelim_ref:
- btrfs_prelim_ref_exit();
-free_delayed_ref:
- btrfs_delayed_ref_exit();
-free_auto_defrag:
- btrfs_auto_defrag_exit();
-free_delayed_inode:
- btrfs_delayed_inode_exit();
-free_ordered_data:
- ordered_data_exit();
-free_extent_map:
- extent_map_exit();
-free_extent_state_cache:
- extent_state_cache_exit();
-free_extent_io:
- extent_io_exit();
-free_cachep:
- btrfs_destroy_cachep();
-free_compress:
- btrfs_exit_compress();
- btrfs_exit_sysfs();
-
- return err;
+ for (i = ARRAY_SIZE(mod_init_seq) - 1; i >= 0; i--) {
+ if (!mod_init_result[i])
+ continue;
+ if (mod_init_seq[i].exit_func)
+ mod_init_seq[i].exit_func();
+ mod_init_result[i] = false;
+ }
}
static void __exit exit_btrfs_fs(void)
{
- btrfs_destroy_cachep();
- btrfs_delayed_ref_exit();
- btrfs_auto_defrag_exit();
- btrfs_delayed_inode_exit();
- btrfs_prelim_ref_exit();
- ordered_data_exit();
- extent_map_exit();
- extent_state_cache_exit();
- extent_io_exit();
- btrfs_interface_exit();
- btrfs_end_io_wq_exit();
- unregister_filesystem(&btrfs_fs_type);
- btrfs_exit_sysfs();
+ btrfs_exit_btrfs_fs();
btrfs_cleanup_fs_uuids();
- btrfs_exit_compress();
+}
+
+static int __init init_btrfs_fs(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mod_init_seq); i++) {
+ ASSERT(!mod_init_result[i]);
+ ret = mod_init_seq[i].init_func();
+ if (ret < 0) {
+ btrfs_exit_btrfs_fs();
+ return ret;
+ }
+ mod_init_result[i] = true;
+ }
+ return 0;
}
late_initcall(init_btrfs_fs);
module_exit(exit_btrfs_fs)
+MODULE_DESCRIPTION("B-Tree File System (BTRFS)");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crc32c");
MODULE_SOFTDEP("pre: xxhash64");
diff --git a/fs/btrfs/super.h b/fs/btrfs/super.h
new file mode 100644
index 000000000000..d80a86acfbbe
--- /dev/null
+++ b/fs/btrfs/super.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_SUPER_H
+#define BTRFS_SUPER_H
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include "fs.h"
+
+struct super_block;
+struct btrfs_fs_info;
+
+bool btrfs_check_options(const struct btrfs_fs_info *info,
+ unsigned long long *mount_opt,
+ unsigned long flags);
+int btrfs_sync_fs(struct super_block *sb, int wait);
+char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
+ u64 subvol_objectid);
+void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info);
+
+static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+static inline void btrfs_set_sb_rdonly(struct super_block *sb)
+{
+ sb->s_flags |= SB_RDONLY;
+ set_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
+}
+
+static inline void btrfs_clear_sb_rdonly(struct super_block *sb)
+{
+ sb->s_flags &= ~SB_RDONLY;
+ clear_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
+}
+
+#endif
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index f9eff3b0f77c..1f64c132b387 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -9,8 +9,10 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/bug.h>
+#include <linux/list.h>
+#include <linux/string_choices.h>
#include <crypto/hash.h>
-
+#include "messages.h"
#include "ctree.h"
#include "discard.h"
#include "disk-io.h"
@@ -21,6 +23,10 @@
#include "space-info.h"
#include "block-group.h"
#include "qgroup.h"
+#include "misc.h"
+#include "fs.h"
+#include "accessors.h"
+#include "zoned.h"
/*
* Structure name Path
@@ -34,12 +40,12 @@
* qgroup_attrs /sys/fs/btrfs/<uuid>/qgroups/<level>_<qgroupid>
* space_info_attrs /sys/fs/btrfs/<uuid>/allocation/<bg-type>
* raid_attrs /sys/fs/btrfs/<uuid>/allocation/<bg-type>/<bg-profile>
+ * discard_attrs /sys/fs/btrfs/<uuid>/discard
*
* When built with BTRFS_CONFIG_DEBUG:
*
* btrfs_debug_feature_attrs /sys/fs/btrfs/debug
* btrfs_debug_mount_attrs /sys/fs/btrfs/<uuid>/debug
- * discard_debug_attrs /sys/fs/btrfs/<uuid>/debug/discard
*/
struct btrfs_feature_attr {
@@ -61,6 +67,10 @@ struct raid_kobject {
.store = _store, \
}
+#define BTRFS_ATTR_W(_prefix, _name, _store) \
+ static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \
+ __INIT_KOBJ_ATTR(_name, 0200, NULL, _store)
+
#define BTRFS_ATTR_RW(_prefix, _name, _show, _store) \
static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \
__INIT_KOBJ_ATTR(_name, 0644, _show, _store)
@@ -92,6 +102,7 @@ static struct btrfs_feature_attr btrfs_attr_features_##_name = { \
static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj);
static inline struct btrfs_fs_devices *to_fs_devs(struct kobject *kobj);
+static struct kobject *get_btrfs_kobj(struct kobject *kobj);
static struct btrfs_feature_attr *to_btrfs_feature_attr(struct kobj_attribute *a)
{
@@ -151,8 +162,7 @@ static int can_modify_feature(struct btrfs_feature_attr *fa)
clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR;
break;
default:
- pr_warn("btrfs: sysfs: unknown feature set %d\n",
- fa->feature_set);
+ btrfs_warn(NULL, "sysfs: unknown feature set %d", fa->feature_set);
return 0;
}
@@ -242,7 +252,7 @@ static ssize_t btrfs_feature_attr_store(struct kobject *kobj,
/*
* We don't want to do full transaction commit from inside sysfs
*/
- btrfs_set_pending(fs_info, COMMIT);
+ set_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
wake_up_process(fs_info->transaction_kthread);
return count;
@@ -270,23 +280,28 @@ static umode_t btrfs_feature_visible(struct kobject *kobj,
return mode;
}
-BTRFS_FEAT_ATTR_INCOMPAT(mixed_backref, MIXED_BACKREF);
BTRFS_FEAT_ATTR_INCOMPAT(default_subvol, DEFAULT_SUBVOL);
BTRFS_FEAT_ATTR_INCOMPAT(mixed_groups, MIXED_GROUPS);
BTRFS_FEAT_ATTR_INCOMPAT(compress_lzo, COMPRESS_LZO);
BTRFS_FEAT_ATTR_INCOMPAT(compress_zstd, COMPRESS_ZSTD);
-BTRFS_FEAT_ATTR_INCOMPAT(big_metadata, BIG_METADATA);
BTRFS_FEAT_ATTR_INCOMPAT(extended_iref, EXTENDED_IREF);
BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56);
BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA);
BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES);
BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID);
BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE);
+BTRFS_FEAT_ATTR_COMPAT_RO(block_group_tree, BLOCK_GROUP_TREE);
BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34);
-/* Remove once support for zoned allocation is feature complete */
-#ifdef CONFIG_BTRFS_DEBUG
+BTRFS_FEAT_ATTR_INCOMPAT(simple_quota, SIMPLE_QUOTA);
+#ifdef CONFIG_BLK_DEV_ZONED
BTRFS_FEAT_ATTR_INCOMPAT(zoned, ZONED);
#endif
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+/* Remove once support for extent tree v2 is feature complete */
+BTRFS_FEAT_ATTR_INCOMPAT(extent_tree_v2, EXTENT_TREE_V2);
+/* Remove once support for raid stripe tree is feature complete. */
+BTRFS_FEAT_ATTR_INCOMPAT(raid_stripe_tree, RAID_STRIPE_TREE);
+#endif
#ifdef CONFIG_FS_VERITY
BTRFS_FEAT_ATTR_COMPAT_RO(verity, VERITY);
#endif
@@ -294,17 +309,15 @@ BTRFS_FEAT_ATTR_COMPAT_RO(verity, VERITY);
/*
* Features which depend on feature bits and may differ between each fs.
*
- * /sys/fs/btrfs/features - all available features implemeted by this version
+ * /sys/fs/btrfs/features - all available features implemented by this version
* /sys/fs/btrfs/UUID/features - features of the fs which are enabled or
* can be changed on a mounted filesystem.
*/
static struct attribute *btrfs_supported_feature_attrs[] = {
- BTRFS_FEAT_ATTR_PTR(mixed_backref),
BTRFS_FEAT_ATTR_PTR(default_subvol),
BTRFS_FEAT_ATTR_PTR(mixed_groups),
BTRFS_FEAT_ATTR_PTR(compress_lzo),
BTRFS_FEAT_ATTR_PTR(compress_zstd),
- BTRFS_FEAT_ATTR_PTR(big_metadata),
BTRFS_FEAT_ATTR_PTR(extended_iref),
BTRFS_FEAT_ATTR_PTR(raid56),
BTRFS_FEAT_ATTR_PTR(skinny_metadata),
@@ -312,9 +325,15 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(metadata_uuid),
BTRFS_FEAT_ATTR_PTR(free_space_tree),
BTRFS_FEAT_ATTR_PTR(raid1c34),
-#ifdef CONFIG_BTRFS_DEBUG
+ BTRFS_FEAT_ATTR_PTR(block_group_tree),
+ BTRFS_FEAT_ATTR_PTR(simple_quota),
+#ifdef CONFIG_BLK_DEV_ZONED
BTRFS_FEAT_ATTR_PTR(zoned),
#endif
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ BTRFS_FEAT_ATTR_PTR(extent_tree_v2),
+ BTRFS_FEAT_ATTR_PTR(raid_stripe_tree),
+#endif
#ifdef CONFIG_FS_VERITY
BTRFS_FEAT_ATTR_PTR(verity),
#endif
@@ -367,6 +386,8 @@ static const char *rescue_opts[] = {
"nologreplay",
"ignorebadroots",
"ignoredatacsums",
+ "ignoremetacsums",
+ "ignoresuperflags",
"all",
};
@@ -390,19 +411,35 @@ static ssize_t supported_sectorsizes_show(struct kobject *kobj,
char *buf)
{
ssize_t ret = 0;
+ bool has_output = false;
- /* 4K sector size is also supported with 64K page size */
- if (PAGE_SIZE == SZ_64K)
- ret += sysfs_emit_at(buf, ret, "%u ", SZ_4K);
-
- /* Only sectorsize == PAGE_SIZE is now supported */
- ret += sysfs_emit_at(buf, ret, "%lu\n", PAGE_SIZE);
-
+ for (u32 cur = BTRFS_MIN_BLOCKSIZE; cur <= BTRFS_MAX_BLOCKSIZE; cur *= 2) {
+ if (!btrfs_supported_blocksize(cur))
+ continue;
+ if (has_output)
+ ret += sysfs_emit_at(buf, ret, " ");
+ ret += sysfs_emit_at(buf, ret, "%u", cur);
+ has_output = true;
+ }
+ ret += sysfs_emit_at(buf, ret, "\n");
return ret;
}
BTRFS_ATTR(static_feature, supported_sectorsizes,
supported_sectorsizes_show);
+static ssize_t acl_show(struct kobject *kobj, struct kobj_attribute *a, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", IS_ENABLED(CONFIG_BTRFS_FS_POSIX_ACL));
+}
+BTRFS_ATTR(static_feature, acl, acl_show);
+
+static ssize_t temp_fsid_supported_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ return sysfs_emit(buf, "0\n");
+}
+BTRFS_ATTR(static_feature, temp_fsid, temp_fsid_supported_show);
+
/*
* Features which only depend on kernel version.
*
@@ -410,11 +447,13 @@ BTRFS_ATTR(static_feature, supported_sectorsizes,
* btrfs_supported_feature_attrs.
*/
static struct attribute *btrfs_supported_static_feature_attrs[] = {
+ BTRFS_ATTR_PTR(static_feature, acl),
BTRFS_ATTR_PTR(static_feature, rmdir_subvol),
BTRFS_ATTR_PTR(static_feature, supported_checksums),
BTRFS_ATTR_PTR(static_feature, send_stream_version),
BTRFS_ATTR_PTR(static_feature, supported_rescue_options),
BTRFS_ATTR_PTR(static_feature, supported_sectorsizes),
+ BTRFS_ATTR_PTR(static_feature, temp_fsid),
NULL
};
@@ -423,12 +462,10 @@ static const struct attribute_group btrfs_static_feature_attr_group = {
.attrs = btrfs_supported_static_feature_attrs,
};
-#ifdef CONFIG_BTRFS_DEBUG
-
/*
* Discard statistics and tunables
*/
-#define discard_to_fs_info(_kobj) to_fs_info((_kobj)->parent->parent)
+#define discard_to_fs_info(_kobj) to_fs_info(get_btrfs_kobj(_kobj))
static ssize_t btrfs_discardable_bytes_show(struct kobject *kobj,
struct kobj_attribute *a,
@@ -577,11 +614,11 @@ BTRFS_ATTR_RW(discard, max_discard_size, btrfs_discard_max_discard_size_show,
btrfs_discard_max_discard_size_store);
/*
- * Per-filesystem debugging of discard (when mounted with discard=async).
+ * Per-filesystem stats for discard (when mounted with discard=async).
*
- * Path: /sys/fs/btrfs/<uuid>/debug/discard/
+ * Path: /sys/fs/btrfs/<uuid>/discard/
*/
-static const struct attribute *discard_debug_attrs[] = {
+static const struct attribute *discard_attrs[] = {
BTRFS_ATTR_PTR(discard, discardable_bytes),
BTRFS_ATTR_PTR(discard, discardable_extents),
BTRFS_ATTR_PTR(discard, discard_bitmap_bytes),
@@ -593,6 +630,8 @@ static const struct attribute *discard_debug_attrs[] = {
NULL,
};
+#ifdef CONFIG_BTRFS_DEBUG
+
/*
* Per-filesystem runtime debugging exported via sysfs.
*
@@ -692,7 +731,7 @@ static void release_raid_kobj(struct kobject *kobj)
kfree(to_raid_kobj(kobj));
}
-static struct kobj_type btrfs_raid_ktype = {
+static const struct kobj_type btrfs_raid_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = release_raid_kobj,
.default_groups = raid_groups,
@@ -708,6 +747,151 @@ static ssize_t btrfs_space_info_show_##field(struct kobject *kobj, \
} \
BTRFS_ATTR(space_info, field, btrfs_space_info_show_##field)
+static ssize_t btrfs_chunk_size_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_space_info *sinfo = to_space_info(kobj);
+
+ return sysfs_emit(buf, "%llu\n", READ_ONCE(sinfo->chunk_size));
+}
+
+/*
+ * Store new chunk size in space info. Can be called on a read-only filesystem.
+ *
+ * If the new chunk size value is larger than 10% of free space it is reduced
+ * to match that limit. Alignment must be to 256M and the system chunk size
+ * cannot be set.
+ */
+static ssize_t btrfs_chunk_size_store(struct kobject *kobj,
+ struct kobj_attribute *a,
+ const char *buf, size_t len)
+{
+ struct btrfs_space_info *space_info = to_space_info(kobj);
+ struct btrfs_fs_info *fs_info = to_fs_info(get_btrfs_kobj(kobj));
+ char *retptr;
+ u64 val;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!fs_info->fs_devices)
+ return -EINVAL;
+
+ if (btrfs_is_zoned(fs_info))
+ return -EINVAL;
+
+ /* System block type must not be changed. */
+ if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ return -EPERM;
+
+ val = memparse(buf, &retptr);
+ /* There could be trailing '\n', also catch any typos after the value */
+ retptr = skip_spaces(retptr);
+ if (*retptr != 0 || val == 0)
+ return -EINVAL;
+
+ val = min(val, BTRFS_MAX_DATA_CHUNK_SIZE);
+
+ /* Limit stripe size to 10% of available space. */
+ val = min(mult_perc(fs_info->fs_devices->total_rw_bytes, 10), val);
+
+ /* Must be multiple of 256M. */
+ val &= ~((u64)SZ_256M - 1);
+
+ /* Must be at least 256M. */
+ if (val < SZ_256M)
+ return -EINVAL;
+
+ btrfs_update_space_info_chunk_size(space_info, val);
+
+ return len;
+}
+
+static ssize_t btrfs_size_classes_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_space_info *sinfo = to_space_info(kobj);
+ struct btrfs_block_group *bg;
+ u32 none = 0;
+ u32 small = 0;
+ u32 medium = 0;
+ u32 large = 0;
+
+ for (int i = 0; i < BTRFS_NR_RAID_TYPES; ++i) {
+ down_read(&sinfo->groups_sem);
+ list_for_each_entry(bg, &sinfo->block_groups[i], list) {
+ if (!btrfs_block_group_should_use_size_class(bg))
+ continue;
+ switch (bg->size_class) {
+ case BTRFS_BG_SZ_NONE:
+ none++;
+ break;
+ case BTRFS_BG_SZ_SMALL:
+ small++;
+ break;
+ case BTRFS_BG_SZ_MEDIUM:
+ medium++;
+ break;
+ case BTRFS_BG_SZ_LARGE:
+ large++;
+ break;
+ }
+ }
+ up_read(&sinfo->groups_sem);
+ }
+ return sysfs_emit(buf, "none %u\n"
+ "small %u\n"
+ "medium %u\n"
+ "large %u\n",
+ none, small, medium, large);
+}
+
+#ifdef CONFIG_BTRFS_DEBUG
+/*
+ * Request chunk allocation with current chunk size.
+ */
+static ssize_t btrfs_force_chunk_alloc_store(struct kobject *kobj,
+ struct kobj_attribute *a,
+ const char *buf, size_t len)
+{
+ struct btrfs_space_info *space_info = to_space_info(kobj);
+ struct btrfs_fs_info *fs_info = to_fs_info(get_btrfs_kobj(kobj));
+ struct btrfs_trans_handle *trans;
+ bool val;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (sb_rdonly(fs_info->sb))
+ return -EROFS;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ if (!val)
+ return -EINVAL;
+
+ /*
+ * This is unsafe to be called from sysfs context and may cause
+ * unexpected problems.
+ */
+ trans = btrfs_start_transaction(fs_info->tree_root, 0);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ ret = btrfs_force_chunk_alloc(trans, space_info->flags);
+ btrfs_end_transaction(trans);
+
+ if (ret == 1)
+ return len;
+
+ return -ENOSPC;
+}
+BTRFS_ATTR_W(space_info, force_chunk_alloc, btrfs_force_chunk_alloc_store);
+
+#endif
+
SPACE_INFO_ATTR(flags);
SPACE_INFO_ATTR(total_bytes);
SPACE_INFO_ATTR(bytes_used);
@@ -718,6 +902,117 @@ SPACE_INFO_ATTR(bytes_readonly);
SPACE_INFO_ATTR(bytes_zone_unusable);
SPACE_INFO_ATTR(disk_used);
SPACE_INFO_ATTR(disk_total);
+SPACE_INFO_ATTR(reclaim_count);
+SPACE_INFO_ATTR(reclaim_bytes);
+SPACE_INFO_ATTR(reclaim_errors);
+BTRFS_ATTR_RW(space_info, chunk_size, btrfs_chunk_size_show, btrfs_chunk_size_store);
+BTRFS_ATTR(space_info, size_classes, btrfs_size_classes_show);
+
+static ssize_t btrfs_sinfo_bg_reclaim_threshold_show(struct kobject *kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_space_info *space_info = to_space_info(kobj);
+ ssize_t ret;
+
+ spin_lock(&space_info->lock);
+ ret = sysfs_emit(buf, "%d\n", btrfs_calc_reclaim_threshold(space_info));
+ spin_unlock(&space_info->lock);
+ return ret;
+}
+
+static ssize_t btrfs_sinfo_bg_reclaim_threshold_store(struct kobject *kobj,
+ struct kobj_attribute *a,
+ const char *buf, size_t len)
+{
+ struct btrfs_space_info *space_info = to_space_info(kobj);
+ int thresh;
+ int ret;
+
+ if (READ_ONCE(space_info->dynamic_reclaim))
+ return -EINVAL;
+
+ ret = kstrtoint(buf, 10, &thresh);
+ if (ret)
+ return ret;
+
+ if (thresh < 0 || thresh > 100)
+ return -EINVAL;
+
+ WRITE_ONCE(space_info->bg_reclaim_threshold, thresh);
+
+ return len;
+}
+
+BTRFS_ATTR_RW(space_info, bg_reclaim_threshold,
+ btrfs_sinfo_bg_reclaim_threshold_show,
+ btrfs_sinfo_bg_reclaim_threshold_store);
+
+static ssize_t btrfs_sinfo_dynamic_reclaim_show(struct kobject *kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_space_info *space_info = to_space_info(kobj);
+
+ return sysfs_emit(buf, "%d\n", READ_ONCE(space_info->dynamic_reclaim));
+}
+
+static ssize_t btrfs_sinfo_dynamic_reclaim_store(struct kobject *kobj,
+ struct kobj_attribute *a,
+ const char *buf, size_t len)
+{
+ struct btrfs_space_info *space_info = to_space_info(kobj);
+ int dynamic_reclaim;
+ int ret;
+
+ ret = kstrtoint(buf, 10, &dynamic_reclaim);
+ if (ret)
+ return ret;
+
+ if (dynamic_reclaim < 0)
+ return -EINVAL;
+
+ WRITE_ONCE(space_info->dynamic_reclaim, dynamic_reclaim != 0);
+
+ return len;
+}
+
+BTRFS_ATTR_RW(space_info, dynamic_reclaim,
+ btrfs_sinfo_dynamic_reclaim_show,
+ btrfs_sinfo_dynamic_reclaim_store);
+
+static ssize_t btrfs_sinfo_periodic_reclaim_show(struct kobject *kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_space_info *space_info = to_space_info(kobj);
+
+ return sysfs_emit(buf, "%d\n", READ_ONCE(space_info->periodic_reclaim));
+}
+
+static ssize_t btrfs_sinfo_periodic_reclaim_store(struct kobject *kobj,
+ struct kobj_attribute *a,
+ const char *buf, size_t len)
+{
+ struct btrfs_space_info *space_info = to_space_info(kobj);
+ int periodic_reclaim;
+ int ret;
+
+ ret = kstrtoint(buf, 10, &periodic_reclaim);
+ if (ret)
+ return ret;
+
+ if (periodic_reclaim < 0)
+ return -EINVAL;
+
+ WRITE_ONCE(space_info->periodic_reclaim, periodic_reclaim != 0);
+
+ return len;
+}
+
+BTRFS_ATTR_RW(space_info, periodic_reclaim,
+ btrfs_sinfo_periodic_reclaim_show,
+ btrfs_sinfo_periodic_reclaim_store);
/*
* Allocation information about block group types.
@@ -735,6 +1030,17 @@ static struct attribute *space_info_attrs[] = {
BTRFS_ATTR_PTR(space_info, bytes_zone_unusable),
BTRFS_ATTR_PTR(space_info, disk_used),
BTRFS_ATTR_PTR(space_info, disk_total),
+ BTRFS_ATTR_PTR(space_info, bg_reclaim_threshold),
+ BTRFS_ATTR_PTR(space_info, dynamic_reclaim),
+ BTRFS_ATTR_PTR(space_info, chunk_size),
+ BTRFS_ATTR_PTR(space_info, size_classes),
+ BTRFS_ATTR_PTR(space_info, reclaim_count),
+ BTRFS_ATTR_PTR(space_info, reclaim_bytes),
+ BTRFS_ATTR_PTR(space_info, reclaim_errors),
+ BTRFS_ATTR_PTR(space_info, periodic_reclaim),
+#ifdef CONFIG_BTRFS_DEBUG
+ BTRFS_ATTR_PTR(space_info, force_chunk_alloc),
+#endif
NULL,
};
ATTRIBUTE_GROUPS(space_info);
@@ -745,7 +1051,7 @@ static void space_info_release(struct kobject *kobj)
kfree(sinfo);
}
-static struct kobj_type space_info_ktype = {
+static const struct kobj_type space_info_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = space_info_release,
.default_groups = space_info_groups,
@@ -806,7 +1112,7 @@ static ssize_t btrfs_label_store(struct kobject *kobj,
/*
* We don't want to do full transaction commit from inside sysfs
*/
- btrfs_set_pending(fs_info, COMMIT);
+ set_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
wake_up_process(fs_info->transaction_kthread);
return len;
@@ -818,7 +1124,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->nodesize);
+ return sysfs_emit(buf, "%u\n", fs_info->nodesize);
}
BTRFS_ATTR(, nodesize, btrfs_nodesize_show);
@@ -828,17 +1134,117 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize);
+ return sysfs_emit(buf, "%u\n", fs_info->sectorsize);
}
BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show);
+static ssize_t btrfs_commit_stats_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ u64 now = ktime_get_ns();
+ u64 start_time = fs_info->commit_stats.critical_section_start_time;
+ u64 pending = 0;
+
+ if (start_time)
+ pending = now - start_time;
+
+ return sysfs_emit(buf,
+ "commits %llu\n"
+ "cur_commit_ms %llu\n"
+ "last_commit_ms %llu\n"
+ "max_commit_ms %llu\n"
+ "total_commit_ms %llu\n",
+ fs_info->commit_stats.commit_count,
+ div_u64(pending, NSEC_PER_MSEC),
+ div_u64(fs_info->commit_stats.last_commit_dur, NSEC_PER_MSEC),
+ div_u64(fs_info->commit_stats.max_commit_dur, NSEC_PER_MSEC),
+ div_u64(fs_info->commit_stats.total_commit_dur, NSEC_PER_MSEC));
+}
+
+static ssize_t btrfs_commit_stats_store(struct kobject *kobj,
+ struct kobj_attribute *a,
+ const char *buf, size_t len)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ unsigned long val;
+ int ret;
+
+ if (!fs_info)
+ return -EPERM;
+
+ if (!capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val)
+ return -EINVAL;
+
+ WRITE_ONCE(fs_info->commit_stats.max_commit_dur, 0);
+
+ return len;
+}
+BTRFS_ATTR_RW(, commit_stats, btrfs_commit_stats_show, btrfs_commit_stats_store);
+
+static ssize_t btrfs_zoned_stats_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ struct btrfs_block_group *bg;
+ size_t ret = 0;
+
+
+ if (!btrfs_is_zoned(fs_info))
+ return ret;
+
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ ret += sysfs_emit_at(buf, ret, "active block-groups: %zu\n",
+ list_count_nodes(&fs_info->zone_active_bgs));
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+
+ mutex_lock(&fs_info->reclaim_bgs_lock);
+ spin_lock(&fs_info->unused_bgs_lock);
+ ret += sysfs_emit_at(buf, ret, "\treclaimable: %zu\n",
+ list_count_nodes(&fs_info->reclaim_bgs));
+ ret += sysfs_emit_at(buf, ret, "\tunused: %zu\n",
+ list_count_nodes(&fs_info->unused_bgs));
+ spin_unlock(&fs_info->unused_bgs_lock);
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
+
+ ret += sysfs_emit_at(buf, ret, "\tneed reclaim: %s\n",
+ str_true_false(btrfs_zoned_should_reclaim(fs_info)));
+
+ if (fs_info->data_reloc_bg)
+ ret += sysfs_emit_at(buf, ret,
+ "data relocation block-group: %llu\n",
+ fs_info->data_reloc_bg);
+ if (fs_info->treelog_bg)
+ ret += sysfs_emit_at(buf, ret,
+ "tree-log block-group: %llu\n",
+ fs_info->treelog_bg);
+
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ ret += sysfs_emit_at(buf, ret, "active zones:\n");
+ list_for_each_entry(bg, &fs_info->zone_active_bgs, active_bg_list) {
+ ret += sysfs_emit_at(buf, ret,
+ "\tstart: %llu, wp: %llu used: %llu, reserved: %llu, unusable: %llu\n",
+ bg->start, bg->alloc_offset, bg->used,
+ bg->reserved, bg->zone_unusable);
+ }
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+ return ret;
+}
+BTRFS_ATTR(, zoned_stats, btrfs_zoned_stats_show);
+
static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize);
+ return sysfs_emit(buf, "%u\n", fs_info->sectorsize);
}
BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show);
@@ -859,7 +1265,7 @@ static ssize_t quota_override_store(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
unsigned long knob;
- int err;
+ int ret;
if (!fs_info)
return -EPERM;
@@ -867,9 +1273,9 @@ static ssize_t quota_override_store(struct kobject *kobj,
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- err = kstrtoul(buf, 10, &knob);
- if (err)
- return err;
+ ret = kstrtoul(buf, 10, &knob);
+ if (ret)
+ return ret;
if (knob > 1)
return -EINVAL;
@@ -919,6 +1325,9 @@ static ssize_t btrfs_exclusive_operation_show(struct kobject *kobj,
case BTRFS_EXCLOP_BALANCE:
str = "balance\n";
break;
+ case BTRFS_EXCLOP_BALANCE_PAUSED:
+ str = "balance paused\n";
+ break;
case BTRFS_EXCLOP_DEV_ADD:
str = "device add\n";
break;
@@ -947,50 +1356,119 @@ static ssize_t btrfs_generation_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%llu\n", fs_info->generation);
+ return sysfs_emit(buf, "%llu\n", btrfs_get_fs_generation(fs_info));
}
BTRFS_ATTR(, generation, btrfs_generation_show);
-/*
- * Look for an exact string @string in @buffer with possible leading or
- * trailing whitespace
- */
-static bool strmatch(const char *buffer, const char *string)
+static ssize_t btrfs_temp_fsid_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+
+ return sysfs_emit(buf, "%d\n", fs_info->fs_devices->temp_fsid);
+}
+BTRFS_ATTR(, temp_fsid, btrfs_temp_fsid_show);
+
+static const char *btrfs_read_policy_name[] = {
+ "pid",
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ "round-robin",
+ "devid",
+#endif
+};
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+
+/* Global module configuration parameters. */
+static char *read_policy;
+char *btrfs_get_mod_read_policy(void)
{
- const size_t len = strlen(string);
+ return read_policy;
+}
+
+/* Set perms to 0, disable /sys/module/btrfs/parameter/read_policy interface. */
+module_param(read_policy, charp, 0);
+MODULE_PARM_DESC(read_policy,
+"Global read policy: pid (default), round-robin[:<min_contig_read>], devid[:<devid>]");
+#endif
+
+int btrfs_read_policy_to_enum(const char *str, s64 *value_ret)
+{
+ char param[32];
+ char __maybe_unused *value_str;
+
+ if (!str || strlen(str) == 0)
+ return 0;
+
+ strscpy(param, str);
- /* Skip leading whitespace */
- buffer = skip_spaces(buffer);
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /* Separate value from input in policy:value format. */
+ value_str = strchr(param, ':');
+ if (value_str) {
+ char *retptr;
- /* Match entire string, check if the rest is whitespace or empty */
- if (strncmp(string, buffer, len) == 0 &&
- strlen(skip_spaces(buffer + len)) == 0)
- return true;
+ *value_str = 0;
+ value_str++;
+ if (!value_ret)
+ return -EINVAL;
- return false;
+ *value_ret = memparse(value_str, &retptr);
+ /* There could be any trailing typos after the value. */
+ retptr = skip_spaces(retptr);
+ if (*retptr != 0 || *value_ret <= 0)
+ return -EINVAL;
+ }
+#endif
+
+ return sysfs_match_string(btrfs_read_policy_name, param);
}
-static const char * const btrfs_read_policy_name[] = { "pid" };
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+int __init btrfs_read_policy_init(void)
+{
+ s64 value;
+
+ if (btrfs_read_policy_to_enum(read_policy, &value) == -EINVAL) {
+ btrfs_err(NULL, "invalid read policy or value %s", read_policy);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
static ssize_t btrfs_read_policy_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
struct btrfs_fs_devices *fs_devices = to_fs_devs(kobj);
+ const enum btrfs_read_policy policy = READ_ONCE(fs_devices->read_policy);
ssize_t ret = 0;
int i;
for (i = 0; i < BTRFS_NR_READ_POLICY; i++) {
- if (fs_devices->read_policy == i)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s[%s]",
- (ret == 0 ? "" : " "),
- btrfs_read_policy_name[i]);
- else
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
- (ret == 0 ? "" : " "),
- btrfs_read_policy_name[i]);
+ if (ret != 0)
+ ret += sysfs_emit_at(buf, ret, " ");
+
+ if (i == policy)
+ ret += sysfs_emit_at(buf, ret, "[");
+
+ ret += sysfs_emit_at(buf, ret, "%s", btrfs_read_policy_name[i]);
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ if (i == BTRFS_READ_POLICY_RR)
+ ret += sysfs_emit_at(buf, ret, ":%u",
+ READ_ONCE(fs_devices->rr_min_contig_read));
+
+ if (i == BTRFS_READ_POLICY_DEVID)
+ ret += sysfs_emit_at(buf, ret, ":%llu",
+ READ_ONCE(fs_devices->read_devid));
+#endif
+ if (i == policy)
+ ret += sysfs_emit_at(buf, ret, "]");
}
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ ret += sysfs_emit_at(buf, ret, "\n");
return ret;
}
@@ -1000,21 +1478,80 @@ static ssize_t btrfs_read_policy_store(struct kobject *kobj,
const char *buf, size_t len)
{
struct btrfs_fs_devices *fs_devices = to_fs_devs(kobj);
- int i;
+ int index;
+ s64 value = -1;
- for (i = 0; i < BTRFS_NR_READ_POLICY; i++) {
- if (strmatch(buf, btrfs_read_policy_name[i])) {
- if (i != fs_devices->read_policy) {
- fs_devices->read_policy = i;
- btrfs_info(fs_devices->fs_info,
- "read policy set to '%s'",
- btrfs_read_policy_name[i]);
+ index = btrfs_read_policy_to_enum(buf, &value);
+ if (index < 0)
+ return -EINVAL;
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /* If moving from RR then disable collecting fs stats. */
+ if (fs_devices->read_policy == BTRFS_READ_POLICY_RR && index != BTRFS_READ_POLICY_RR)
+ fs_devices->collect_fs_stats = false;
+
+ if (index == BTRFS_READ_POLICY_RR) {
+ if (value != -1) {
+ const u32 sectorsize = fs_devices->fs_info->sectorsize;
+
+ if (!IS_ALIGNED(value, sectorsize)) {
+ u64 temp_value = round_up(value, sectorsize);
+
+ btrfs_debug(fs_devices->fs_info,
+"read_policy: min contig read %lld should be multiple of sectorsize %u, rounded to %llu",
+ value, sectorsize, temp_value);
+ value = temp_value;
}
- return len;
+ } else {
+ value = BTRFS_DEFAULT_RR_MIN_CONTIG_READ;
}
+
+ if (index != READ_ONCE(fs_devices->read_policy) ||
+ value != READ_ONCE(fs_devices->rr_min_contig_read)) {
+ WRITE_ONCE(fs_devices->read_policy, index);
+ WRITE_ONCE(fs_devices->rr_min_contig_read, value);
+
+ btrfs_info(fs_devices->fs_info, "read policy set to '%s:%lld'",
+ btrfs_read_policy_name[index], value);
+ }
+
+ fs_devices->collect_fs_stats = true;
+
+ return len;
+ }
+
+ if (index == BTRFS_READ_POLICY_DEVID) {
+ if (value != -1) {
+ BTRFS_DEV_LOOKUP_ARGS(args);
+
+ /* Validate input devid. */
+ args.devid = value;
+ if (btrfs_find_device(fs_devices, &args) == NULL)
+ return -EINVAL;
+ } else {
+ /* Set default devid to the devid of the latest device. */
+ value = fs_devices->latest_dev->devid;
+ }
+
+ if (index != READ_ONCE(fs_devices->read_policy) ||
+ value != READ_ONCE(fs_devices->read_devid)) {
+ WRITE_ONCE(fs_devices->read_policy, index);
+ WRITE_ONCE(fs_devices->read_devid, value);
+
+ btrfs_info(fs_devices->fs_info, "read policy set to '%s:%llu'",
+ btrfs_read_policy_name[index], value);
+ }
+
+ return len;
+ }
+#endif
+ if (index != READ_ONCE(fs_devices->read_policy)) {
+ WRITE_ONCE(fs_devices->read_policy, index);
+ btrfs_info(fs_devices->fs_info, "read policy set to '%s'",
+ btrfs_read_policy_name[index]);
}
- return -EINVAL;
+ return len;
}
BTRFS_ATTR_RW(, read_policy, btrfs_read_policy_show, btrfs_read_policy_store);
@@ -1023,11 +1560,8 @@ static ssize_t btrfs_bg_reclaim_threshold_show(struct kobject *kobj,
char *buf)
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- ssize_t ret;
- ret = sysfs_emit(buf, "%d\n", READ_ONCE(fs_info->bg_reclaim_threshold));
-
- return ret;
+ return sysfs_emit(buf, "%d\n", READ_ONCE(fs_info->bg_reclaim_threshold));
}
static ssize_t btrfs_bg_reclaim_threshold_store(struct kobject *kobj,
@@ -1042,8 +1576,13 @@ static ssize_t btrfs_bg_reclaim_threshold_store(struct kobject *kobj,
if (ret)
return ret;
+#ifdef CONFIG_BTRFS_DEBUG
+ if (thresh != 0 && (thresh > 100))
+ return -EINVAL;
+#else
if (thresh != 0 && (thresh <= 50 || thresh > 100))
return -EINVAL;
+#endif
WRITE_ONCE(fs_info->bg_reclaim_threshold, thresh);
@@ -1052,6 +1591,47 @@ static ssize_t btrfs_bg_reclaim_threshold_store(struct kobject *kobj,
BTRFS_ATTR_RW(, bg_reclaim_threshold, btrfs_bg_reclaim_threshold_show,
btrfs_bg_reclaim_threshold_store);
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+static ssize_t btrfs_offload_csum_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_fs_devices *fs_devices = to_fs_devs(kobj);
+
+ switch (READ_ONCE(fs_devices->offload_csum_mode)) {
+ case BTRFS_OFFLOAD_CSUM_AUTO:
+ return sysfs_emit(buf, "auto\n");
+ case BTRFS_OFFLOAD_CSUM_FORCE_ON:
+ return sysfs_emit(buf, "1\n");
+ case BTRFS_OFFLOAD_CSUM_FORCE_OFF:
+ return sysfs_emit(buf, "0\n");
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+static ssize_t btrfs_offload_csum_store(struct kobject *kobj,
+ struct kobj_attribute *a, const char *buf,
+ size_t len)
+{
+ struct btrfs_fs_devices *fs_devices = to_fs_devs(kobj);
+ int ret;
+ bool val;
+
+ ret = kstrtobool(buf, &val);
+ if (ret == 0)
+ WRITE_ONCE(fs_devices->offload_csum_mode,
+ val ? BTRFS_OFFLOAD_CSUM_FORCE_ON : BTRFS_OFFLOAD_CSUM_FORCE_OFF);
+ else if (ret == -EINVAL && sysfs_streq(buf, "auto"))
+ WRITE_ONCE(fs_devices->offload_csum_mode, BTRFS_OFFLOAD_CSUM_AUTO);
+ else
+ return -EINVAL;
+
+ return len;
+}
+BTRFS_ATTR_RW(, offload_csum, btrfs_offload_csum_show, btrfs_offload_csum_store);
+#endif
+
/*
* Per-filesystem information and stats.
*
@@ -1069,6 +1649,12 @@ static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, generation),
BTRFS_ATTR_PTR(, read_policy),
BTRFS_ATTR_PTR(, bg_reclaim_threshold),
+ BTRFS_ATTR_PTR(, commit_stats),
+ BTRFS_ATTR_PTR(, temp_fsid),
+ BTRFS_ATTR_PTR(, zoned_stats),
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ BTRFS_ATTR_PTR(, offload_csum),
+#endif
NULL,
};
@@ -1080,7 +1666,7 @@ static void btrfs_release_fsid_kobj(struct kobject *kobj)
complete(&fs_devs->kobj_unregister);
}
-static struct kobj_type btrfs_ktype = {
+static const struct kobj_type btrfs_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = btrfs_release_fsid_kobj,
};
@@ -1099,11 +1685,26 @@ static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj)
return to_fs_devs(kobj)->fs_info;
}
+static struct kobject *get_btrfs_kobj(struct kobject *kobj)
+{
+ while (kobj) {
+ if (kobj->ktype == &btrfs_ktype)
+ return kobj;
+ kobj = kobj->parent;
+ }
+ return NULL;
+}
+
#define NUM_FEATURE_BITS 64
#define BTRFS_FEATURE_NAME_MAX 13
static char btrfs_unknown_feature_names[FEAT_MAX][NUM_FEATURE_BITS][BTRFS_FEATURE_NAME_MAX];
static struct btrfs_feature_attr btrfs_feature_attrs[FEAT_MAX][NUM_FEATURE_BITS];
+static_assert(ARRAY_SIZE(btrfs_unknown_feature_names) ==
+ ARRAY_SIZE(btrfs_feature_attrs));
+static_assert(ARRAY_SIZE(btrfs_unknown_feature_names[0]) ==
+ ARRAY_SIZE(btrfs_feature_attrs[0]));
+
static const u64 supported_feature_masks[FEAT_MAX] = {
[FEAT_COMPAT] = BTRFS_FEATURE_COMPAT_SUPP,
[FEAT_COMPAT_RO] = BTRFS_FEATURE_COMPAT_RO_SUPP,
@@ -1212,13 +1813,12 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info)
kobject_del(fs_info->space_info_kobj);
kobject_put(fs_info->space_info_kobj);
}
-#ifdef CONFIG_BTRFS_DEBUG
- if (fs_info->discard_debug_kobj) {
- sysfs_remove_files(fs_info->discard_debug_kobj,
- discard_debug_attrs);
- kobject_del(fs_info->discard_debug_kobj);
- kobject_put(fs_info->discard_debug_kobj);
+ if (fs_info->discard_kobj) {
+ sysfs_remove_files(fs_info->discard_kobj, discard_attrs);
+ kobject_del(fs_info->discard_kobj);
+ kobject_put(fs_info->discard_kobj);
}
+#ifdef CONFIG_BTRFS_DEBUG
if (fs_info->debug_kobj) {
sysfs_remove_files(fs_info->debug_kobj, btrfs_debug_mount_attrs);
kobject_del(fs_info->debug_kobj);
@@ -1272,11 +1872,6 @@ static void init_feature_attrs(void)
struct btrfs_feature_attr *fa;
int set, i;
- BUILD_BUG_ON(ARRAY_SIZE(btrfs_unknown_feature_names) !=
- ARRAY_SIZE(btrfs_feature_attrs));
- BUILD_BUG_ON(ARRAY_SIZE(btrfs_unknown_feature_names[0]) !=
- ARRAY_SIZE(btrfs_feature_attrs[0]));
-
memset(btrfs_feature_attrs, 0, sizeof(btrfs_feature_attrs));
memset(btrfs_unknown_feature_names, 0,
sizeof(btrfs_unknown_feature_names));
@@ -1399,16 +1994,35 @@ void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info)
kobject_put(&space_info->kobj);
}
-static const char *alloc_name(u64 flags)
+static const char *alloc_name(struct btrfs_space_info *space_info)
{
+ u64 flags = space_info->flags;
+
switch (flags) {
case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
return "mixed";
case BTRFS_BLOCK_GROUP_METADATA:
- return "metadata";
+ switch (space_info->subgroup_id) {
+ case BTRFS_SUB_GROUP_PRIMARY:
+ return "metadata";
+ case BTRFS_SUB_GROUP_TREELOG:
+ return "metadata-treelog";
+ default:
+ WARN_ON_ONCE(1);
+ return "metadata (unknown sub-group)";
+ }
case BTRFS_BLOCK_GROUP_DATA:
- return "data";
+ switch (space_info->subgroup_id) {
+ case BTRFS_SUB_GROUP_PRIMARY:
+ return "data";
+ case BTRFS_SUB_GROUP_DATA_RELOC:
+ return "data-reloc";
+ default:
+ WARN_ON_ONCE(1);
+ return "data (unknown sub-group)";
+ }
case BTRFS_BLOCK_GROUP_SYSTEM:
+ ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_PRIMARY);
return "system";
default:
WARN_ON(1);
@@ -1420,14 +2034,13 @@ static const char *alloc_name(u64 flags)
* Create a sysfs entry for a space info type at path
* /sys/fs/btrfs/UUID/allocation/TYPE
*/
-int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
+int btrfs_sysfs_add_space_info_type(struct btrfs_space_info *space_info)
{
int ret;
ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
- fs_info->space_info_kobj, "%s",
- alloc_name(space_info->flags));
+ space_info->fs_info->space_info_kobj, "%s",
+ alloc_name(space_info));
if (ret) {
kobject_put(&space_info->kobj);
return ret;
@@ -1518,6 +2131,10 @@ static ssize_t btrfs_devinfo_scrub_speed_max_store(struct kobject *kobj,
unsigned long long limit;
limit = memparse(buf, &endptr);
+ /* There could be trailing '\n', also catch any typos after the value. */
+ endptr = skip_spaces(endptr);
+ if (*endptr != 0)
+ return -EINVAL;
WRITE_ONCE(device->scrub_speed_max, limit);
return len;
}
@@ -1537,6 +2154,16 @@ static ssize_t btrfs_devinfo_writeable_show(struct kobject *kobj,
}
BTRFS_ATTR(devid, writeable, btrfs_devinfo_writeable_show);
+static ssize_t btrfs_devinfo_fsid_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_device *device = container_of(kobj, struct btrfs_device,
+ devid_kobj);
+
+ return sysfs_emit(buf, "%pU\n", device->fs_devices->fsid);
+}
+BTRFS_ATTR(devid, fsid, btrfs_devinfo_fsid_show);
+
static ssize_t btrfs_devinfo_error_stats_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
@@ -1572,6 +2199,7 @@ BTRFS_ATTR(devid, error_stats, btrfs_devinfo_error_stats_show);
*/
static struct attribute *devid_attrs[] = {
BTRFS_ATTR_PTR(devid, error_stats),
+ BTRFS_ATTR_PTR(devid, fsid),
BTRFS_ATTR_PTR(devid, in_fs_metadata),
BTRFS_ATTR_PTR(devid, missing),
BTRFS_ATTR_PTR(devid, replace_target),
@@ -1590,7 +2218,7 @@ static void btrfs_release_devid_kobj(struct kobject *kobj)
complete(&device->kobj_unregister);
}
-static struct kobj_type devid_ktype = {
+static const struct kobj_type devid_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = devid_groups,
.release = btrfs_release_devid_kobj,
@@ -1674,7 +2302,7 @@ void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action)
ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
if (ret)
- pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
+ btrfs_warn(NULL, "sending event %d to kobject: '%s' (%p): failed",
action, kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
&disk_to_dev(bdev->bd_disk)->kobj);
}
@@ -1717,15 +2345,15 @@ static struct kset *btrfs_kset;
*/
int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs)
{
- int error;
+ int ret;
init_completion(&fs_devs->kobj_unregister);
fs_devs->fsid_kobj.kset = btrfs_kset;
- error = kobject_init_and_add(&fs_devs->fsid_kobj, &btrfs_ktype, NULL,
- "%pU", fs_devs->fsid);
- if (error) {
+ ret = kobject_init_and_add(&fs_devs->fsid_kobj, &btrfs_ktype, NULL,
+ "%pU", fs_devs->fsid);
+ if (ret) {
kobject_put(&fs_devs->fsid_kobj);
- return error;
+ return ret;
}
fs_devs->devices_kobj = kobject_create_and_add("devices",
@@ -1751,75 +2379,192 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs)
int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info)
{
- int error;
+ int ret;
struct btrfs_fs_devices *fs_devs = fs_info->fs_devices;
struct kobject *fsid_kobj = &fs_devs->fsid_kobj;
- error = btrfs_sysfs_add_fs_devices(fs_devs);
- if (error)
- return error;
+ ret = btrfs_sysfs_add_fs_devices(fs_devs);
+ if (ret)
+ return ret;
- error = sysfs_create_files(fsid_kobj, btrfs_attrs);
- if (error) {
+ ret = sysfs_create_files(fsid_kobj, btrfs_attrs);
+ if (ret) {
btrfs_sysfs_remove_fs_devices(fs_devs);
- return error;
+ return ret;
}
- error = sysfs_create_group(fsid_kobj,
- &btrfs_feature_attr_group);
- if (error)
+ ret = sysfs_create_group(fsid_kobj, &btrfs_feature_attr_group);
+ if (ret)
goto failure;
#ifdef CONFIG_BTRFS_DEBUG
fs_info->debug_kobj = kobject_create_and_add("debug", fsid_kobj);
if (!fs_info->debug_kobj) {
- error = -ENOMEM;
+ ret = -ENOMEM;
goto failure;
}
- error = sysfs_create_files(fs_info->debug_kobj, btrfs_debug_mount_attrs);
- if (error)
+ ret = sysfs_create_files(fs_info->debug_kobj, btrfs_debug_mount_attrs);
+ if (ret)
goto failure;
+#endif
/* Discard directory */
- fs_info->discard_debug_kobj = kobject_create_and_add("discard",
- fs_info->debug_kobj);
- if (!fs_info->discard_debug_kobj) {
- error = -ENOMEM;
+ fs_info->discard_kobj = kobject_create_and_add("discard", fsid_kobj);
+ if (!fs_info->discard_kobj) {
+ ret = -ENOMEM;
goto failure;
}
- error = sysfs_create_files(fs_info->discard_debug_kobj,
- discard_debug_attrs);
- if (error)
+ ret = sysfs_create_files(fs_info->discard_kobj, discard_attrs);
+ if (ret)
goto failure;
-#endif
- error = addrm_unknown_feature_attrs(fs_info, true);
- if (error)
+ ret = addrm_unknown_feature_attrs(fs_info, true);
+ if (ret)
goto failure;
- error = sysfs_create_link(fsid_kobj, &fs_info->sb->s_bdi->dev->kobj, "bdi");
- if (error)
+ ret = sysfs_create_link(fsid_kobj, &fs_info->sb->s_bdi->dev->kobj, "bdi");
+ if (ret)
goto failure;
fs_info->space_info_kobj = kobject_create_and_add("allocation",
fsid_kobj);
if (!fs_info->space_info_kobj) {
- error = -ENOMEM;
+ ret = -ENOMEM;
goto failure;
}
- error = sysfs_create_files(fs_info->space_info_kobj, allocation_attrs);
- if (error)
+ ret = sysfs_create_files(fs_info->space_info_kobj, allocation_attrs);
+ if (ret)
goto failure;
return 0;
failure:
btrfs_sysfs_remove_mounted(fs_info);
- return error;
+ return ret;
+}
+
+static ssize_t qgroup_enabled_show(struct kobject *qgroups_kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent);
+ bool enabled;
+
+ spin_lock(&fs_info->qgroup_lock);
+ enabled = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON;
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return sysfs_emit(buf, "%d\n", enabled);
+}
+BTRFS_ATTR(qgroups, enabled, qgroup_enabled_show);
+
+static ssize_t qgroup_mode_show(struct kobject *qgroups_kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent);
+ ssize_t ret = 0;
+
+ spin_lock(&fs_info->qgroup_lock);
+ ASSERT(btrfs_qgroup_enabled(fs_info));
+ switch (btrfs_qgroup_mode(fs_info)) {
+ case BTRFS_QGROUP_MODE_FULL:
+ ret = sysfs_emit(buf, "qgroup\n");
+ break;
+ case BTRFS_QGROUP_MODE_SIMPLE:
+ ret = sysfs_emit(buf, "squota\n");
+ break;
+ default:
+ btrfs_warn(fs_info, "unexpected qgroup mode %d\n",
+ btrfs_qgroup_mode(fs_info));
+ break;
+ }
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return ret;
+}
+BTRFS_ATTR(qgroups, mode, qgroup_mode_show);
+
+static ssize_t qgroup_inconsistent_show(struct kobject *qgroups_kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent);
+ bool inconsistent;
+
+ spin_lock(&fs_info->qgroup_lock);
+ inconsistent = (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT);
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return sysfs_emit(buf, "%d\n", inconsistent);
+}
+BTRFS_ATTR(qgroups, inconsistent, qgroup_inconsistent_show);
+
+static ssize_t qgroup_drop_subtree_thres_show(struct kobject *qgroups_kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent);
+ u8 result;
+
+ spin_lock(&fs_info->qgroup_lock);
+ result = fs_info->qgroup_drop_subtree_thres;
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return sysfs_emit(buf, "%d\n", result);
}
+static ssize_t qgroup_drop_subtree_thres_store(struct kobject *qgroups_kobj,
+ struct kobj_attribute *a,
+ const char *buf, size_t len)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent);
+ u8 new_thres;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &new_thres);
+ if (ret)
+ return -EINVAL;
+
+ if (new_thres > BTRFS_MAX_LEVEL)
+ return -EINVAL;
+
+ spin_lock(&fs_info->qgroup_lock);
+ fs_info->qgroup_drop_subtree_thres = new_thres;
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return len;
+}
+BTRFS_ATTR_RW(qgroups, drop_subtree_threshold, qgroup_drop_subtree_thres_show,
+ qgroup_drop_subtree_thres_store);
+
+/*
+ * Qgroups global info
+ *
+ * Path: /sys/fs/btrfs/<uuid>/qgroups/
+ */
+static struct attribute *qgroups_attrs[] = {
+ BTRFS_ATTR_PTR(qgroups, enabled),
+ BTRFS_ATTR_PTR(qgroups, inconsistent),
+ BTRFS_ATTR_PTR(qgroups, drop_subtree_threshold),
+ BTRFS_ATTR_PTR(qgroups, mode),
+ NULL
+};
+ATTRIBUTE_GROUPS(qgroups);
+
+static void qgroups_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static const struct kobj_type qgroups_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = qgroups_groups,
+ .release = qgroups_release,
+};
+
static inline struct btrfs_fs_info *qgroup_kobj_to_fs_info(struct kobject *kobj)
{
return to_fs_info(kobj->parent->parent);
@@ -1884,7 +2629,7 @@ static void qgroup_release(struct kobject *kobj)
memset(&qgroup->kobj, 0, sizeof(*kobj));
}
-static struct kobj_type qgroup_ktype = {
+static const struct kobj_type qgroup_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = qgroup_release,
.default_groups = qgroup_groups,
@@ -1896,7 +2641,7 @@ int btrfs_sysfs_add_one_qgroup(struct btrfs_fs_info *fs_info,
struct kobject *qgroups_kobj = fs_info->qgroups_kobj;
int ret;
- if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state))
+ if (btrfs_is_testing(fs_info))
return 0;
if (qgroup->kobj.state_initialized)
return 0;
@@ -1917,7 +2662,7 @@ void btrfs_sysfs_del_qgroups(struct btrfs_fs_info *fs_info)
struct btrfs_qgroup *qgroup;
struct btrfs_qgroup *next;
- if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state))
+ if (btrfs_is_testing(fs_info))
return;
rbtree_postorder_for_each_entry_safe(qgroup, next,
@@ -1938,18 +2683,22 @@ int btrfs_sysfs_add_qgroups(struct btrfs_fs_info *fs_info)
struct btrfs_qgroup *next;
int ret = 0;
- if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state))
+ if (btrfs_is_testing(fs_info))
return 0;
ASSERT(fsid_kobj);
if (fs_info->qgroups_kobj)
return 0;
- fs_info->qgroups_kobj = kobject_create_and_add("qgroups", fsid_kobj);
- if (!fs_info->qgroups_kobj) {
- ret = -ENOMEM;
+ fs_info->qgroups_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+ if (!fs_info->qgroups_kobj)
+ return -ENOMEM;
+
+ ret = kobject_init_and_add(fs_info->qgroups_kobj, &qgroups_ktype,
+ fsid_kobj, "qgroups");
+ if (ret < 0)
goto out;
- }
+
rbtree_postorder_for_each_entry_safe(qgroup, next,
&fs_info->qgroup_tree, node) {
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
@@ -1966,7 +2715,7 @@ out:
void btrfs_sysfs_del_one_qgroup(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qgroup)
{
- if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state))
+ if (btrfs_is_testing(fs_info))
return;
if (qgroup->kobj.state_initialized) {
@@ -1979,36 +2728,23 @@ void btrfs_sysfs_del_one_qgroup(struct btrfs_fs_info *fs_info,
* Change per-fs features in /sys/fs/btrfs/UUID/features to match current
* values in superblock. Call after any changes to incompat/compat_ro flags
*/
-void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
- u64 bit, enum btrfs_feature_set set)
+void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_devices *fs_devs;
struct kobject *fsid_kobj;
- u64 __maybe_unused features;
- int __maybe_unused ret;
+ int ret;
if (!fs_info)
return;
- /*
- * See 14e46e04958df74 and e410e34fad913dd, feature bit updates are not
- * safe when called from some contexts (eg. balance)
- */
- features = get_features(fs_info, set);
- ASSERT(bit & supported_feature_masks[set]);
-
- fs_devs = fs_info->fs_devices;
- fsid_kobj = &fs_devs->fsid_kobj;
-
+ fsid_kobj = &fs_info->fs_devices->fsid_kobj;
if (!fsid_kobj->state_initialized)
return;
- /*
- * FIXME: this is too heavy to update just one value, ideally we'd like
- * to use sysfs_update_group but some refactoring is needed first.
- */
- sysfs_remove_group(fsid_kobj, &btrfs_feature_attr_group);
- ret = sysfs_create_group(fsid_kobj, &btrfs_feature_attr_group);
+ ret = sysfs_update_group(fsid_kobj, &btrfs_feature_attr_group);
+ if (ret < 0)
+ btrfs_warn(fs_info,
+ "failed to update /sys/fs/btrfs/%pU/features: %d",
+ fs_info->fs_devices->fsid, ret);
}
int __init btrfs_init_sysfs(void)
@@ -2030,8 +2766,11 @@ int __init btrfs_init_sysfs(void)
#ifdef CONFIG_BTRFS_DEBUG
ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group);
- if (ret)
- goto out2;
+ if (ret) {
+ sysfs_unmerge_group(&btrfs_kset->kobj,
+ &btrfs_static_feature_attr_group);
+ goto out_remove_group;
+ }
#endif
return 0;
@@ -2054,4 +2793,3 @@ void __cold btrfs_exit_sysfs(void)
#endif
kset_unregister(btrfs_kset);
}
-
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
index bacef43f7267..05498e5346c3 100644
--- a/fs/btrfs/sysfs.h
+++ b/fs/btrfs/sysfs.h
@@ -3,8 +3,18 @@
#ifndef BTRFS_SYSFS_H
#define BTRFS_SYSFS_H
+#include <linux/types.h>
+#include <linux/compiler_types.h>
#include <linux/kobject.h>
+struct block_device;
+struct btrfs_fs_info;
+struct btrfs_device;
+struct btrfs_fs_devices;
+struct btrfs_block_group;
+struct btrfs_space_info;
+struct btrfs_qgroup;
+
enum btrfs_feature_set {
FEAT_COMPAT,
FEAT_COMPAT_RO,
@@ -19,8 +29,7 @@ void btrfs_sysfs_remove_device(struct btrfs_device *device);
int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs);
void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs);
void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices);
-void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
- u64 bit, enum btrfs_feature_set set);
+void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info);
void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action);
int __init btrfs_init_sysfs(void);
@@ -28,8 +37,7 @@ void __cold btrfs_exit_sysfs(void);
int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info);
void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache);
-int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info);
+int btrfs_sysfs_add_space_info_type(struct btrfs_space_info *space_info);
void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info);
void btrfs_sysfs_update_devid(struct btrfs_device *device);
@@ -39,5 +47,11 @@ void btrfs_sysfs_del_qgroups(struct btrfs_fs_info *fs_info);
int btrfs_sysfs_add_qgroups(struct btrfs_fs_info *fs_info);
void btrfs_sysfs_del_one_qgroup(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qgroup);
+int btrfs_read_policy_to_enum(const char *str, s64 *value);
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+int __init btrfs_read_policy_init(void);
+char *btrfs_get_mod_read_policy(void);
+#endif
#endif
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 3a4099a2bf05..b576897d71cc 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -16,6 +16,7 @@
#include "../disk-io.h"
#include "../qgroup.h"
#include "../block-group.h"
+#include "../fs.h"
static struct vfsmount *test_mnt = NULL;
@@ -27,6 +28,9 @@ const char *test_error[] = {
[TEST_ALLOC_INODE] = "cannot allocate inode",
[TEST_ALLOC_BLOCK_GROUP] = "cannot allocate block group",
[TEST_ALLOC_EXTENT_MAP] = "cannot allocate extent map",
+ [TEST_ALLOC_CHUNK_MAP] = "cannot allocate chunk map",
+ [TEST_ALLOC_IO_CONTEXT] = "cannot allocate io context",
+ [TEST_ALLOC_TRANSACTION] = "cannot allocate transaction",
};
static const struct super_operations btrfs_test_super_ops = {
@@ -59,10 +63,8 @@ struct inode *btrfs_new_test_inode(void)
return NULL;
inode->i_mode = S_IFREG;
- BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
- BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
- BTRFS_I(inode)->location.offset = 0;
- inode_init_owner(&init_user_ns, inode, NULL, S_IFREG);
+ btrfs_set_inode_number(BTRFS_I(inode), BTRFS_FIRST_FREE_OBJECTID);
+ inode_init_owner(&nop_mnt_idmap, inode, NULL, S_IFREG);
return inode;
}
@@ -100,7 +102,7 @@ struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info)
if (!dev)
return ERR_PTR(-ENOMEM);
- extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL);
+ btrfs_extent_io_tree_init(fs_info, &dev->alloc_state, 0);
INIT_LIST_HEAD(&dev->dev_list);
list_add(&dev->dev_list, &fs_info->fs_devices->devices);
@@ -109,7 +111,7 @@ struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info)
static void btrfs_free_dummy_device(struct btrfs_device *dev)
{
- extent_io_tree_release(&dev->alloc_state);
+ btrfs_extent_io_tree_release(&dev->alloc_state);
kfree(dev);
}
@@ -141,6 +143,11 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
fs_info->nodesize = nodesize;
fs_info->sectorsize = sectorsize;
fs_info->sectorsize_bits = ilog2(sectorsize);
+
+ /* CRC32C csum size. */
+ fs_info->csum_size = 4;
+ fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) /
+ fs_info->csum_size;
set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
test_mnt->mnt_sb->s_fs_info = fs_info;
@@ -150,40 +157,27 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
{
- struct radix_tree_iter iter;
- void **slot;
struct btrfs_device *dev, *tmp;
+ struct extent_buffer *eb;
+ unsigned long index;
if (!fs_info)
return;
- if (WARN_ON(!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO,
- &fs_info->fs_state)))
+ if (WARN_ON(!btrfs_is_testing(fs_info)))
return;
test_mnt->mnt_sb->s_fs_info = NULL;
- spin_lock(&fs_info->buffer_lock);
- radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
- struct extent_buffer *eb;
-
- eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock);
- if (!eb)
- continue;
- /* Shouldn't happen but that kind of thinking creates CVE's */
- if (radix_tree_exception(eb)) {
- if (radix_tree_deref_retry(eb))
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
- slot = radix_tree_iter_resume(slot, &iter);
- spin_unlock(&fs_info->buffer_lock);
- free_extent_buffer_stale(eb);
- spin_lock(&fs_info->buffer_lock);
+ xa_lock_irq(&fs_info->buffer_tree);
+ xa_for_each(&fs_info->buffer_tree, index, eb) {
+ xa_unlock_irq(&fs_info->buffer_tree);
+ free_extent_buffer(eb);
+ xa_lock_irq(&fs_info->buffer_tree);
}
- spin_unlock(&fs_info->buffer_lock);
+ xa_unlock_irq(&fs_info->buffer_tree);
- btrfs_mapping_tree_free(&fs_info->mapping_tree);
+ btrfs_mapping_tree_free(fs_info);
list_for_each_entry_safe(dev, tmp, &fs_info->fs_devices->devices,
dev_list) {
btrfs_free_dummy_device(dev);
@@ -199,11 +193,12 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
void btrfs_free_dummy_root(struct btrfs_root *root)
{
- if (!root)
+ if (IS_ERR_OR_NULL(root))
return;
/* Will be freed by btrfs_free_fs_roots */
if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))
return;
+ btrfs_global_root_delete(root);
btrfs_put_root(root);
}
@@ -241,11 +236,20 @@ void btrfs_free_dummy_block_group(struct btrfs_block_group *cache)
{
if (!cache)
return;
- __btrfs_remove_free_space_cache(cache->free_space_ctl);
+ btrfs_remove_free_space_cache(cache);
kfree(cache->free_space_ctl);
kfree(cache);
}
+void btrfs_init_dummy_transaction(struct btrfs_transaction *trans, struct btrfs_fs_info *fs_info)
+{
+ memset(trans, 0, sizeof(*trans));
+ trans->fs_info = fs_info;
+ xa_init(&trans->delayed_refs.head_refs);
+ xa_init(&trans->delayed_refs.dirty_extents);
+ spin_lock_init(&trans->delayed_refs.lock);
+}
+
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
@@ -291,6 +295,12 @@ int btrfs_run_sanity_tests(void)
ret = btrfs_test_free_space_tree(sectorsize, nodesize);
if (ret)
goto out;
+ ret = btrfs_test_raid_stripe_tree(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_delayed_refs(sectorsize, nodesize);
+ if (ret)
+ goto out;
}
}
ret = btrfs_test_extent_map();
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index 7a2d7ffbe30e..4307bdaa6749 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -6,6 +6,8 @@
#ifndef BTRFS_TESTS_H
#define BTRFS_TESTS_H
+#include <linux/types.h>
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
int btrfs_run_sanity_tests(void);
@@ -23,12 +25,16 @@ enum {
TEST_ALLOC_INODE,
TEST_ALLOC_BLOCK_GROUP,
TEST_ALLOC_EXTENT_MAP,
+ TEST_ALLOC_CHUNK_MAP,
+ TEST_ALLOC_IO_CONTEXT,
+ TEST_ALLOC_TRANSACTION,
};
extern const char *test_error[];
struct btrfs_root;
struct btrfs_trans_handle;
+struct btrfs_transaction;
int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
@@ -36,7 +42,9 @@ int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
+int btrfs_test_raid_stripe_tree(u32 sectorsize, u32 nodesize);
int btrfs_test_extent_map(void);
+int btrfs_test_delayed_refs(u32 sectorsize, u32 nodesize);
struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
@@ -46,6 +54,7 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long lengt
void btrfs_free_dummy_block_group(struct btrfs_block_group *cache);
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
+void btrfs_init_dummy_transaction(struct btrfs_transaction *trans, struct btrfs_fs_info *fs_info);
struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info);
#else
static inline int btrfs_run_sanity_tests(void)
diff --git a/fs/btrfs/tests/delayed-refs-tests.c b/fs/btrfs/tests/delayed-refs-tests.c
new file mode 100644
index 000000000000..e2248acb906b
--- /dev/null
+++ b/fs/btrfs/tests/delayed-refs-tests.c
@@ -0,0 +1,1016 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/sizes.h>
+#include "btrfs-tests.h"
+#include "../transaction.h"
+#include "../delayed-ref.h"
+#include "../extent-tree.h"
+
+#define FAKE_ROOT_OBJECTID 256
+#define FAKE_BYTENR 0
+#define FAKE_LEVEL 1
+#define FAKE_INO 256
+#define FAKE_FILE_OFFSET 0
+#define FAKE_PARENT SZ_1M
+
+struct ref_head_check {
+ u64 bytenr;
+ u64 num_bytes;
+ int ref_mod;
+ int total_ref_mod;
+ int must_insert;
+};
+
+struct ref_node_check {
+ u64 bytenr;
+ u64 num_bytes;
+ int ref_mod;
+ enum btrfs_delayed_ref_action action;
+ u8 type;
+ u64 parent;
+ u64 root;
+ u64 owner;
+ u64 offset;
+};
+
+static enum btrfs_ref_type ref_type_from_disk_ref_type(u8 type)
+{
+ if ((type == BTRFS_TREE_BLOCK_REF_KEY) ||
+ (type == BTRFS_SHARED_BLOCK_REF_KEY))
+ return BTRFS_REF_METADATA;
+ return BTRFS_REF_DATA;
+}
+
+static void delete_delayed_ref_head(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+
+ spin_lock(&delayed_refs->lock);
+ spin_lock(&head->lock);
+ btrfs_delete_ref_head(fs_info, delayed_refs, head);
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+
+ btrfs_delayed_ref_unlock(head);
+ btrfs_put_delayed_ref_head(head);
+}
+
+static void delete_delayed_ref_node(struct btrfs_delayed_ref_head *head,
+ struct btrfs_delayed_ref_node *node)
+{
+ rb_erase_cached(&node->ref_node, &head->ref_tree);
+ RB_CLEAR_NODE(&node->ref_node);
+ if (!list_empty(&node->add_list))
+ list_del_init(&node->add_list);
+ btrfs_put_delayed_ref(node);
+}
+
+static int validate_ref_head(struct btrfs_delayed_ref_head *head,
+ struct ref_head_check *check)
+{
+ if (head->bytenr != check->bytenr) {
+ test_err("invalid bytenr have: %llu want: %llu", head->bytenr,
+ check->bytenr);
+ return -EINVAL;
+ }
+
+ if (head->num_bytes != check->num_bytes) {
+ test_err("invalid num_bytes have: %llu want: %llu",
+ head->num_bytes, check->num_bytes);
+ return -EINVAL;
+ }
+
+ if (head->ref_mod != check->ref_mod) {
+ test_err("invalid ref_mod have: %d want: %d", head->ref_mod,
+ check->ref_mod);
+ return -EINVAL;
+ }
+
+ if (head->total_ref_mod != check->total_ref_mod) {
+ test_err("invalid total_ref_mod have: %d want: %d",
+ head->total_ref_mod, check->total_ref_mod);
+ return -EINVAL;
+ }
+
+ if (head->must_insert_reserved != check->must_insert) {
+ test_err("invalid must_insert have: %d want: %d",
+ head->must_insert_reserved, check->must_insert);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_ref_node(struct btrfs_delayed_ref_node *node,
+ struct ref_node_check *check)
+{
+ if (node->bytenr != check->bytenr) {
+ test_err("invalid bytenr have: %llu want: %llu", node->bytenr,
+ check->bytenr);
+ return -EINVAL;
+ }
+
+ if (node->num_bytes != check->num_bytes) {
+ test_err("invalid num_bytes have: %llu want: %llu",
+ node->num_bytes, check->num_bytes);
+ return -EINVAL;
+ }
+
+ if (node->ref_mod != check->ref_mod) {
+ test_err("invalid ref_mod have: %d want: %d", node->ref_mod,
+ check->ref_mod);
+ return -EINVAL;
+ }
+
+ if (node->action != check->action) {
+ test_err("invalid action have: %d want: %d", node->action,
+ check->action);
+ return -EINVAL;
+ }
+
+ if (node->parent != check->parent) {
+ test_err("invalid parent have: %llu want: %llu", node->parent,
+ check->parent);
+ return -EINVAL;
+ }
+
+ if (node->ref_root != check->root) {
+ test_err("invalid root have: %llu want: %llu", node->ref_root,
+ check->root);
+ return -EINVAL;
+ }
+
+ if (node->type != check->type) {
+ test_err("invalid type have: %d want: %d", node->type,
+ check->type);
+ return -EINVAL;
+ }
+
+ if (btrfs_delayed_ref_owner(node) != check->owner) {
+ test_err("invalid owner have: %llu want: %llu",
+ btrfs_delayed_ref_owner(node), check->owner);
+ return -EINVAL;
+ }
+
+ if (btrfs_delayed_ref_offset(node) != check->offset) {
+ test_err("invalid offset have: %llu want: %llu",
+ btrfs_delayed_ref_offset(node), check->offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int simple_test(struct btrfs_trans_handle *trans,
+ struct ref_head_check *head_check,
+ struct ref_node_check *node_check)
+{
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_head *head;
+ struct btrfs_delayed_ref_node *node;
+ struct btrfs_ref ref = {
+ .type = ref_type_from_disk_ref_type(node_check->type),
+ .action = node_check->action,
+ .parent = node_check->parent,
+ .ref_root = node_check->root,
+ .bytenr = node_check->bytenr,
+ .num_bytes = fs_info->nodesize,
+ };
+ int ret;
+
+ if (ref.type == BTRFS_REF_METADATA)
+ btrfs_init_tree_ref(&ref, node_check->owner, node_check->root,
+ false);
+ else
+ btrfs_init_data_ref(&ref, node_check->owner, node_check->offset,
+ node_check->root, true);
+
+ if (ref.type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ return ret;
+ }
+
+ head = btrfs_select_ref_head(fs_info, delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ return -EINVAL;
+ }
+
+ ret = -EINVAL;
+ if (validate_ref_head(head, head_check))
+ goto out;
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, node_check))
+ goto out;
+ ret = 0;
+out:
+ btrfs_unselect_ref_head(delayed_refs, head);
+ btrfs_destroy_delayed_refs(trans->transaction);
+ return ret;
+}
+
+/*
+ * These are simple tests, make sure that our btrfs_ref's get turned into the
+ * appropriate btrfs_delayed_ref_node based on their settings and action.
+ */
+static int simple_tests(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct ref_head_check head_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 1,
+ .total_ref_mod = 1,
+ };
+ struct ref_node_check node_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 1,
+ .action = BTRFS_ADD_DELAYED_REF,
+ .type = BTRFS_TREE_BLOCK_REF_KEY,
+ .parent = 0,
+ .root = FAKE_ROOT_OBJECTID,
+ .owner = FAKE_LEVEL,
+ .offset = 0,
+ };
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single add tree block failed");
+ return -EINVAL;
+ }
+
+ node_check.type = BTRFS_EXTENT_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single add extent data failed");
+ return -EINVAL;
+ }
+
+ node_check.parent = FAKE_PARENT;
+ node_check.type = BTRFS_SHARED_BLOCK_REF_KEY;
+ node_check.owner = FAKE_LEVEL;
+ node_check.offset = 0;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single add shared block failed");
+ return -EINVAL;
+ }
+
+ node_check.type = BTRFS_SHARED_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single add shared data failed");
+ return -EINVAL;
+ }
+
+ head_check.ref_mod = -1;
+ head_check.total_ref_mod = -1;
+ node_check.action = BTRFS_DROP_DELAYED_REF;
+ node_check.type = BTRFS_TREE_BLOCK_REF_KEY;
+ node_check.owner = FAKE_LEVEL;
+ node_check.offset = 0;
+ node_check.parent = 0;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single drop tree block failed");
+ return -EINVAL;
+ }
+
+ node_check.type = BTRFS_EXTENT_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single drop extent data failed");
+ return -EINVAL;
+ }
+
+ node_check.parent = FAKE_PARENT;
+ node_check.type = BTRFS_SHARED_BLOCK_REF_KEY;
+ node_check.owner = FAKE_LEVEL;
+ node_check.offset = 0;
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single drop shared block failed");
+ return -EINVAL;
+ }
+
+ node_check.type = BTRFS_SHARED_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single drop shared data failed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Merge tests, validate that we do delayed ref merging properly, the ref counts
+ * all end up properly, and delayed refs are deleted once they're no longer
+ * needed.
+ */
+static int merge_tests(struct btrfs_trans_handle *trans,
+ enum btrfs_ref_type type)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_head *head = NULL;
+ struct btrfs_delayed_ref_node *node;
+ struct btrfs_ref ref = {
+ .type = type,
+ .action = BTRFS_ADD_DELAYED_REF,
+ .parent = 0,
+ .ref_root = FAKE_ROOT_OBJECTID,
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ };
+ struct ref_head_check head_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 0,
+ .total_ref_mod = 0,
+ };
+ struct ref_node_check node_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 2,
+ .action = BTRFS_ADD_DELAYED_REF,
+ .parent = 0,
+ .root = FAKE_ROOT_OBJECTID,
+ };
+ int ret;
+
+ /*
+ * First add a ref and then drop it, make sure we get a head ref with a
+ * 0 total ref mod and no nodes.
+ */
+ if (type == BTRFS_REF_METADATA) {
+ node_check.type = BTRFS_TREE_BLOCK_REF_KEY;
+ node_check.owner = FAKE_LEVEL;
+ btrfs_init_tree_ref(&ref, FAKE_LEVEL, FAKE_ROOT_OBJECTID, false);
+ } else {
+ node_check.type = BTRFS_EXTENT_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+ btrfs_init_data_ref(&ref, FAKE_INO, FAKE_FILE_OFFSET,
+ FAKE_ROOT_OBJECTID, true);
+ }
+
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ return ret;
+ }
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("single add and drop failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /*
+ * Add a ref, then add another ref, make sure we get a head ref with a
+ * 2 total ref mod and 1 node.
+ */
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ goto out;
+ }
+
+ head_check.ref_mod = 2;
+ head_check.total_ref_mod = 2;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("double add failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /* Add two drop refs, make sure they are merged properly. */
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ goto out;
+ }
+
+ head_check.ref_mod = -2;
+ head_check.total_ref_mod = -2;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("double drop failed");
+ goto out;
+ }
+
+ node_check.action = BTRFS_DROP_DELAYED_REF;
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /* Add multiple refs, then drop until we go negative again. */
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ for (int i = 0; i < 10; i++) {
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ for (int i = 0; i < 12; i++) {
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ head_check.ref_mod = -2;
+ head_check.total_ref_mod = -2;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("double drop failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /* Drop multiple refs, then add until we go positive again. */
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ for (int i = 0; i < 10; i++) {
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ for (int i = 0; i < 12; i++) {
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ head_check.ref_mod = 2;
+ head_check.total_ref_mod = 2;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("add and drop to positive failed");
+ goto out;
+ }
+
+ node_check.action = BTRFS_ADD_DELAYED_REF;
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /*
+ * Add a bunch of refs with different roots and parents, then drop them
+ * all, make sure everything is properly merged.
+ */
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ for (int i = 0; i < 50; i++) {
+ if (!(i % 2)) {
+ ref.parent = 0;
+ ref.ref_root = FAKE_ROOT_OBJECTID + i;
+ } else {
+ ref.parent = FAKE_PARENT + (i * fs_info->nodesize);
+ }
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ for (int i = 0; i < 50; i++) {
+ if (!(i % 2)) {
+ ref.parent = 0;
+ ref.ref_root = FAKE_ROOT_OBJECTID + i;
+ } else {
+ ref.parent = FAKE_PARENT + (i * fs_info->nodesize);
+ }
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ head_check.ref_mod = 0;
+ head_check.total_ref_mod = 0;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("add and drop multiple failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+ ret = 0;
+out:
+ if (!IS_ERR_OR_NULL(head))
+ btrfs_unselect_ref_head(&trans->transaction->delayed_refs, head);
+ btrfs_destroy_delayed_refs(trans->transaction);
+ return ret;
+}
+
+/*
+ * Basic test to validate we always get the add operations first followed by any
+ * delete operations.
+ */
+static int select_delayed_refs_test(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_head *head = NULL;
+ struct btrfs_delayed_ref_node *node;
+ struct btrfs_ref ref = {
+ .type = BTRFS_REF_METADATA,
+ .action = BTRFS_DROP_DELAYED_REF,
+ .parent = 0,
+ .ref_root = FAKE_ROOT_OBJECTID,
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ };
+ struct ref_head_check head_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 0,
+ .total_ref_mod = 0,
+ };
+ struct ref_node_check node_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 1,
+ .action = BTRFS_ADD_DELAYED_REF,
+ .type = BTRFS_TREE_BLOCK_REF_KEY,
+ .parent = 0,
+ .owner = FAKE_LEVEL,
+ .offset = 0,
+ };
+ int ret;
+
+ /* Add the drop first. */
+ btrfs_init_tree_ref(&ref, FAKE_LEVEL, FAKE_ROOT_OBJECTID, false);
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ return ret;
+ }
+
+ /*
+ * Now add the add, and make it a different root so it's logically later
+ * in the rb tree.
+ */
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.ref_root = FAKE_ROOT_OBJECTID + 1;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ head = NULL;
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("head check failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ node_check.root = FAKE_ROOT_OBJECTID + 1;
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ node_check.action = BTRFS_DROP_DELAYED_REF;
+ node_check.root = FAKE_ROOT_OBJECTID;
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+ delete_delayed_ref_node(head, node);
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /*
+ * Now we're going to do the same thing, but we're going to have an add
+ * that gets deleted because of a merge, and make sure we still have
+ * another add in place.
+ */
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ref.ref_root = FAKE_ROOT_OBJECTID;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.ref_root = FAKE_ROOT_OBJECTID + 1;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.ref_root = FAKE_ROOT_OBJECTID + 2;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ head = NULL;
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("head check failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ node_check.action = BTRFS_ADD_DELAYED_REF;
+ node_check.root = FAKE_ROOT_OBJECTID + 2;
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ node_check.action = BTRFS_DROP_DELAYED_REF;
+ node_check.root = FAKE_ROOT_OBJECTID;
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+ delete_delayed_ref_node(head, node);
+ ret = 0;
+out:
+ if (head)
+ btrfs_unselect_ref_head(delayed_refs, head);
+ btrfs_destroy_delayed_refs(trans->transaction);
+ return ret;
+}
+
+int btrfs_test_delayed_refs(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_transaction *transaction;
+ struct btrfs_trans_handle trans;
+ struct btrfs_fs_info *fs_info;
+ int ret;
+
+ test_msg("running delayed refs tests");
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ return -ENOMEM;
+ }
+ transaction = kmalloc(sizeof(*transaction), GFP_KERNEL);
+ if (!transaction) {
+ test_std_err(TEST_ALLOC_TRANSACTION);
+ ret = -ENOMEM;
+ goto out_free_fs_info;
+ }
+ btrfs_init_dummy_trans(&trans, fs_info);
+ btrfs_init_dummy_transaction(transaction, fs_info);
+ trans.transaction = transaction;
+
+ ret = simple_tests(&trans);
+ if (!ret) {
+ test_msg("running delayed refs merge tests on metadata refs");
+ ret = merge_tests(&trans, BTRFS_REF_METADATA);
+ }
+
+ if (!ret) {
+ test_msg("running delayed refs merge tests on data refs");
+ ret = merge_tests(&trans, BTRFS_REF_DATA);
+ }
+
+ if (!ret)
+ ret = select_delayed_refs_test(&trans);
+
+ kfree(transaction);
+out_free_fs_info:
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
index 2a95f7224e18..6a43a64ba55a 100644
--- a/fs/btrfs/tests/extent-buffer-tests.c
+++ b/fs/btrfs/tests/extent-buffer-tests.c
@@ -8,6 +8,7 @@
#include "../ctree.h"
#include "../extent_io.h"
#include "../disk-io.h"
+#include "../accessors.h"
static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
{
@@ -15,7 +16,6 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
struct btrfs_path *path = NULL;
struct btrfs_root *root = NULL;
struct extent_buffer *eb;
- struct btrfs_item *item;
char *value = "mary had a little lamb";
char *split1 = "mary had a little";
char *split2 = " lamb";
@@ -48,7 +48,8 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
goto out;
}
- path->nodes[0] = eb = alloc_dummy_extent_buffer(fs_info, nodesize);
+ eb = alloc_dummy_extent_buffer(fs_info, nodesize);
+ path->nodes[0] = eb;
if (!eb) {
test_std_err(TEST_ALLOC_EXTENT_BUFFER);
ret = -ENOMEM;
@@ -60,8 +61,11 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
key.type = BTRFS_EXTENT_CSUM_KEY;
key.offset = 0;
- btrfs_setup_item_for_insert(root, path, &key, value_len);
- item = btrfs_item_nr(0);
+ /*
+ * Passing a NULL trans handle is fine here, we have a dummy root eb
+ * and the tree is a single node (level 0).
+ */
+ btrfs_setup_item_for_insert(NULL, root, path, &key, value_len);
write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0),
value_len);
@@ -90,8 +94,7 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
goto out;
}
- item = btrfs_item_nr(0);
- if (btrfs_item_size(eb, item) != strlen(split1)) {
+ if (btrfs_item_size(eb, 0) != strlen(split1)) {
test_err("invalid len in the first split");
ret = -EINVAL;
goto out;
@@ -115,8 +118,7 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
goto out;
}
- item = btrfs_item_nr(1);
- if (btrfs_item_size(eb, item) != strlen(split2)) {
+ if (btrfs_item_size(eb, 1) != strlen(split2)) {
test_err("invalid len in the second split");
ret = -EINVAL;
goto out;
@@ -147,8 +149,7 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
goto out;
}
- item = btrfs_item_nr(0);
- if (btrfs_item_size(eb, item) != strlen(split3)) {
+ if (btrfs_item_size(eb, 0) != strlen(split3)) {
test_err("invalid len in the first split");
ret = -EINVAL;
goto out;
@@ -171,8 +172,7 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
goto out;
}
- item = btrfs_item_nr(1);
- if (btrfs_item_size(eb, item) != strlen(split4)) {
+ if (btrfs_item_size(eb, 1) != strlen(split4)) {
test_err("invalid len in the second split");
ret = -EINVAL;
goto out;
@@ -195,8 +195,7 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
goto out;
}
- item = btrfs_item_nr(2);
- if (btrfs_item_size(eb, item) != strlen(split2)) {
+ if (btrfs_item_size(eb, 2) != strlen(split2)) {
test_err("invalid len in the second split");
ret = -EINVAL;
goto out;
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index c2e72e7a8ff0..a0187d6163df 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -4,65 +4,116 @@
*/
#include <linux/pagemap.h>
+#include <linux/pagevec.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sizes.h>
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../extent_io.h"
+#include "../disk-io.h"
#include "../btrfs_inode.h"
-#define PROCESS_UNLOCK (1 << 0)
-#define PROCESS_RELEASE (1 << 1)
-#define PROCESS_TEST_LOCKED (1 << 2)
+#define PROCESS_UNLOCK (1U << 0)
+#define PROCESS_RELEASE (1U << 1)
+#define PROCESS_TEST_LOCKED (1U << 2)
static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
unsigned long flags)
{
int ret;
- struct page *pages[16];
- unsigned long index = start >> PAGE_SHIFT;
- unsigned long end_index = end >> PAGE_SHIFT;
- unsigned long nr_pages = end_index - index + 1;
+ struct folio_batch fbatch;
+ pgoff_t index = start >> PAGE_SHIFT;
+ pgoff_t end_index = end >> PAGE_SHIFT;
int i;
int count = 0;
int loops = 0;
- while (nr_pages > 0) {
- ret = find_get_pages_contig(inode->i_mapping, index,
- min_t(unsigned long, nr_pages,
- ARRAY_SIZE(pages)), pages);
+ folio_batch_init(&fbatch);
+
+ while (index <= end_index) {
+ ret = filemap_get_folios_contig(inode->i_mapping, &index,
+ end_index, &fbatch);
for (i = 0; i < ret; i++) {
+ struct folio *folio = fbatch.folios[i];
+
if (flags & PROCESS_TEST_LOCKED &&
- !PageLocked(pages[i]))
+ !folio_test_locked(folio))
count++;
- if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
- unlock_page(pages[i]);
- put_page(pages[i]);
+ if (flags & PROCESS_UNLOCK && folio_test_locked(folio))
+ folio_unlock(folio);
if (flags & PROCESS_RELEASE)
- put_page(pages[i]);
+ folio_put(folio);
}
- nr_pages -= ret;
- index += ret;
+ folio_batch_release(&fbatch);
cond_resched();
loops++;
if (loops > 100000) {
printk(KERN_ERR
- "stuck in a loop, start %llu, end %llu, nr_pages %lu, ret %d\n",
- start, end, nr_pages, ret);
+ "stuck in a loop, start %llu, end %llu, ret %d\n",
+ start, end, ret);
break;
}
}
+
return count;
}
-static int test_find_delalloc(u32 sectorsize)
+#define STATE_FLAG_STR_LEN 256
+
+#define PRINT_ONE_FLAG(state, dest, cur, name) \
+({ \
+ if (state->state & EXTENT_##name) \
+ cur += scnprintf(dest + cur, STATE_FLAG_STR_LEN - cur, \
+ "%s" #name, cur == 0 ? "" : "|"); \
+})
+
+static void extent_flag_to_str(const struct extent_state *state, char *dest)
+{
+ int cur = 0;
+
+ dest[0] = 0;
+ PRINT_ONE_FLAG(state, dest, cur, DIRTY);
+ PRINT_ONE_FLAG(state, dest, cur, LOCKED);
+ PRINT_ONE_FLAG(state, dest, cur, DIRTY_LOG1);
+ PRINT_ONE_FLAG(state, dest, cur, DIRTY_LOG2);
+ PRINT_ONE_FLAG(state, dest, cur, DELALLOC);
+ PRINT_ONE_FLAG(state, dest, cur, DEFRAG);
+ PRINT_ONE_FLAG(state, dest, cur, BOUNDARY);
+ PRINT_ONE_FLAG(state, dest, cur, NODATASUM);
+ PRINT_ONE_FLAG(state, dest, cur, CLEAR_META_RESV);
+ PRINT_ONE_FLAG(state, dest, cur, NEED_WAIT);
+ PRINT_ONE_FLAG(state, dest, cur, NORESERVE);
+ PRINT_ONE_FLAG(state, dest, cur, QGROUP_RESERVED);
+ PRINT_ONE_FLAG(state, dest, cur, CLEAR_DATA_RESV);
+}
+
+static void dump_extent_io_tree(const struct extent_io_tree *tree)
+{
+ struct rb_node *node;
+ char flags_str[STATE_FLAG_STR_LEN];
+
+ node = rb_first(&tree->state);
+ test_msg("io tree content:");
+ while (node) {
+ struct extent_state *state;
+
+ state = rb_entry(node, struct extent_state, rb_node);
+ extent_flag_to_str(state, flags_str);
+ test_msg(" start=%llu len=%llu flags=%s", state->start,
+ state->end + 1 - state->start, flags_str);
+ node = rb_next(node);
+ }
+}
+
+static int test_find_delalloc(u32 sectorsize, u32 nodesize)
{
- struct inode *inode;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_root *root = NULL;
+ struct inode *inode = NULL;
struct extent_io_tree *tmp;
struct page *page;
struct page *locked_page = NULL;
- unsigned long index = 0;
/* In this test we need at least 2 file extents at its maximum size */
u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
u64 total_dirty = 2 * max_bytes;
@@ -72,25 +123,40 @@ static int test_find_delalloc(u32 sectorsize)
test_msg("running find delalloc tests");
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ return -ENOMEM;
+ }
+
+ root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = PTR_ERR(root);
+ goto out;
+ }
+
inode = btrfs_new_test_inode();
if (!inode) {
test_std_err(TEST_ALLOC_INODE);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
tmp = &BTRFS_I(inode)->io_tree;
+ BTRFS_I(inode)->root = root;
/*
* Passing NULL as we don't have fs_info but tracepoints are not used
* at this point
*/
- extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST, NULL);
+ btrfs_extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST);
/*
* First go through and create and mark all of our pages dirty, we pin
* everything to make sure our pages don't get evicted and screw up our
* test.
*/
- for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
+ for (pgoff_t index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
if (!page) {
test_err("failed to allocate test page");
@@ -110,10 +176,10 @@ static int test_find_delalloc(u32 sectorsize)
* |--- delalloc ---|
* |--- search ---|
*/
- set_extent_delalloc(tmp, 0, sectorsize - 1, 0, NULL);
+ btrfs_set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL);
start = 0;
end = start + PAGE_SIZE - 1;
- found = find_lock_delalloc_range(inode, locked_page, &start,
+ found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end);
if (!found) {
test_err("should have found at least one delalloc");
@@ -124,7 +190,7 @@ static int test_find_delalloc(u32 sectorsize)
sectorsize - 1, start, end);
goto out_bits;
}
- unlock_extent(tmp, start, end);
+ btrfs_unlock_extent(tmp, start, end, NULL);
unlock_page(locked_page);
put_page(locked_page);
@@ -141,10 +207,10 @@ static int test_find_delalloc(u32 sectorsize)
test_err("couldn't find the locked page");
goto out_bits;
}
- set_extent_delalloc(tmp, sectorsize, max_bytes - 1, 0, NULL);
+ btrfs_set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL);
start = test_start;
end = start + PAGE_SIZE - 1;
- found = find_lock_delalloc_range(inode, locked_page, &start,
+ found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end);
if (!found) {
test_err("couldn't find delalloc in our range");
@@ -160,7 +226,7 @@ static int test_find_delalloc(u32 sectorsize)
test_err("there were unlocked pages in the range");
goto out_bits;
}
- unlock_extent(tmp, start, end);
+ btrfs_unlock_extent(tmp, start, end, NULL);
/* locked_page was unlocked above */
put_page(locked_page);
@@ -178,7 +244,7 @@ static int test_find_delalloc(u32 sectorsize)
}
start = test_start;
end = start + PAGE_SIZE - 1;
- found = find_lock_delalloc_range(inode, locked_page, &start,
+ found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end);
if (found) {
test_err("found range when we shouldn't have");
@@ -196,10 +262,10 @@ static int test_find_delalloc(u32 sectorsize)
*
* We are re-using our test_start from above since it works out well.
*/
- set_extent_delalloc(tmp, max_bytes, total_dirty - 1, 0, NULL);
+ btrfs_set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL);
start = test_start;
end = start + PAGE_SIZE - 1;
- found = find_lock_delalloc_range(inode, locked_page, &start,
+ found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end);
if (!found) {
test_err("didn't find our range");
@@ -215,7 +281,7 @@ static int test_find_delalloc(u32 sectorsize)
test_err("pages in range were not all locked");
goto out_bits;
}
- unlock_extent(tmp, start, end);
+ btrfs_unlock_extent(tmp, start, end, NULL);
/*
* Now to test where we run into a page that is no longer dirty in the
@@ -240,7 +306,7 @@ static int test_find_delalloc(u32 sectorsize)
* this changes at any point in the future we will need to fix this
* tests expected behavior.
*/
- found = find_lock_delalloc_range(inode, locked_page, &start,
+ found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end);
if (!found) {
test_err("didn't find our range");
@@ -258,96 +324,153 @@ static int test_find_delalloc(u32 sectorsize)
}
ret = 0;
out_bits:
- clear_extent_bits(tmp, 0, total_dirty - 1, (unsigned)-1);
+ if (ret)
+ dump_extent_io_tree(tmp);
+ btrfs_clear_extent_bit(tmp, 0, total_dirty - 1, (unsigned)-1, NULL);
out:
if (locked_page)
put_page(locked_page);
process_page_range(inode, 0, total_dirty - 1,
PROCESS_UNLOCK | PROCESS_RELEASE);
iput(inode);
+ btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
-static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb,
- unsigned long len)
+static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb)
{
unsigned long i;
- for (i = 0; i < len * BITS_PER_BYTE; i++) {
- int bit, bit1;
+ for (i = 0; i < eb->len * BITS_PER_BYTE; i++) {
+ bool bit_set, bit1_set;
+
+ bit_set = test_bit(i, bitmap);
+ bit1_set = extent_buffer_test_bit(eb, 0, i);
+ if (bit1_set != bit_set) {
+ u8 has;
+ u8 expect;
- bit = !!test_bit(i, bitmap);
- bit1 = !!extent_buffer_test_bit(eb, 0, i);
- if (bit1 != bit) {
- test_err("bits do not match");
+ read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1);
+ expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE));
+
+ test_err(
+ "bits do not match, start byte 0 bit %lu, byte %lu has 0x%02x expect 0x%02x",
+ i, i / BITS_PER_BYTE, has, expect);
return -EINVAL;
}
- bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
- i % BITS_PER_BYTE);
- if (bit1 != bit) {
- test_err("offset bits do not match");
+ bit1_set = extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
+ i % BITS_PER_BYTE);
+ if (bit1_set != bit_set) {
+ u8 has;
+ u8 expect;
+
+ read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1);
+ expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE));
+
+ test_err(
+ "bits do not match, start byte %lu bit %lu, byte %lu has 0x%02x expect 0x%02x",
+ i / BITS_PER_BYTE, i % BITS_PER_BYTE,
+ i / BITS_PER_BYTE, has, expect);
return -EINVAL;
}
}
return 0;
}
-static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
- unsigned long len)
+static int test_bitmap_set(const char *name, unsigned long *bitmap,
+ struct extent_buffer *eb,
+ unsigned long byte_start, unsigned long bit_start,
+ unsigned long bit_len)
+{
+ int ret;
+
+ bitmap_set(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len);
+ extent_buffer_bitmap_set(eb, byte_start, bit_start, bit_len);
+ ret = check_eb_bitmap(bitmap, eb);
+ if (ret < 0)
+ test_err("%s test failed", name);
+ return ret;
+}
+
+static int test_bitmap_clear(const char *name, unsigned long *bitmap,
+ struct extent_buffer *eb,
+ unsigned long byte_start, unsigned long bit_start,
+ unsigned long bit_len)
+{
+ int ret;
+
+ bitmap_clear(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len);
+ extent_buffer_bitmap_clear(eb, byte_start, bit_start, bit_len);
+ ret = check_eb_bitmap(bitmap, eb);
+ if (ret < 0)
+ test_err("%s test failed", name);
+ return ret;
+}
+static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb)
{
unsigned long i, j;
+ unsigned long byte_len = eb->len;
u32 x;
int ret;
- memset(bitmap, 0, len);
- memzero_extent_buffer(eb, 0, len);
- if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
- test_err("bitmap was not zeroed");
- return -EINVAL;
- }
+ ret = test_bitmap_clear("clear all run 1", bitmap, eb, 0, 0,
+ byte_len * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
- bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
- extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
- ret = check_eb_bitmap(bitmap, eb, len);
- if (ret) {
- test_err("setting all bits failed");
+ ret = test_bitmap_set("set all", bitmap, eb, 0, 0, byte_len * BITS_PER_BYTE);
+ if (ret < 0)
return ret;
- }
- bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
- extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
- ret = check_eb_bitmap(bitmap, eb, len);
- if (ret) {
- test_err("clearing all bits failed");
+ ret = test_bitmap_clear("clear all run 2", bitmap, eb, 0, 0,
+ byte_len * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_set("same byte set", bitmap, eb, 0, 2, 4);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_clear("same byte partial clear", bitmap, eb, 0, 4, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_set("cross byte set", bitmap, eb, 2, 4, 8);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_set("cross multi byte set", bitmap, eb, 4, 4, 24);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_clear("cross byte clear", bitmap, eb, 2, 6, 4);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_clear("cross multi byte clear", bitmap, eb, 4, 6, 20);
+ if (ret < 0)
return ret;
- }
/* Straddling pages test */
- if (len > PAGE_SIZE) {
- bitmap_set(bitmap,
- (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
- sizeof(long) * BITS_PER_BYTE);
- extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
- sizeof(long) * BITS_PER_BYTE);
- ret = check_eb_bitmap(bitmap, eb, len);
- if (ret) {
- test_err("setting straddling pages failed");
+ if (byte_len > PAGE_SIZE) {
+ ret = test_bitmap_set("cross page set", bitmap, eb,
+ PAGE_SIZE - sizeof(long) / 2, 0,
+ sizeof(long) * BITS_PER_BYTE);
+ if (ret < 0)
return ret;
- }
- bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
- bitmap_clear(bitmap,
- (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
- sizeof(long) * BITS_PER_BYTE);
- extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
- extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
+ ret = test_bitmap_set("cross page set all", bitmap, eb, 0, 0,
+ byte_len * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_clear("cross page clear", bitmap, eb,
+ PAGE_SIZE - sizeof(long) / 2, 0,
sizeof(long) * BITS_PER_BYTE);
- ret = check_eb_bitmap(bitmap, eb, len);
- if (ret) {
- test_err("clearing straddling pages failed");
+ if (ret < 0)
return ret;
- }
}
/*
@@ -355,9 +478,12 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
* something repetitive that could miss some hypothetical off-by-n bug.
*/
x = 0;
- bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
- extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
- for (i = 0; i < len * BITS_PER_BYTE / 32; i++) {
+ ret = test_bitmap_clear("clear all run 3", bitmap, eb, 0, 0,
+ byte_len * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < byte_len * BITS_PER_BYTE / 32; i++) {
x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU;
for (j = 0; j < 32; j++) {
if (x & (1U << j)) {
@@ -367,7 +493,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
}
}
- ret = check_eb_bitmap(bitmap, eb, len);
+ ret = check_eb_bitmap(bitmap, eb);
if (ret) {
test_err("random bit pattern failed");
return ret;
@@ -379,7 +505,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
{
struct btrfs_fs_info *fs_info;
- unsigned long *bitmap = NULL;
+ unsigned long AUTO_KFREE(bitmap);
struct extent_buffer *eb = NULL;
int ret;
@@ -398,14 +524,14 @@ static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
goto out;
}
- eb = __alloc_dummy_extent_buffer(fs_info, 0, nodesize);
+ eb = alloc_dummy_extent_buffer(fs_info, 0);
if (!eb) {
test_std_err(TEST_ALLOC_ROOT);
ret = -ENOMEM;
goto out;
}
- ret = __test_eb_bitmaps(bitmap, eb, nodesize);
+ ret = __test_eb_bitmaps(bitmap, eb);
if (ret)
goto out;
@@ -415,17 +541,16 @@ static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
* Test again for case where the tree block is sectorsize aligned but
* not nodesize aligned.
*/
- eb = __alloc_dummy_extent_buffer(fs_info, sectorsize, nodesize);
+ eb = alloc_dummy_extent_buffer(fs_info, sectorsize);
if (!eb) {
test_std_err(TEST_ALLOC_ROOT);
ret = -ENOMEM;
goto out;
}
- ret = __test_eb_bitmaps(bitmap, eb, nodesize);
+ ret = __test_eb_bitmaps(bitmap, eb);
out:
free_extent_buffer(eb);
- kfree(bitmap);
btrfs_free_dummy_fs_info(fs_info);
return ret;
}
@@ -438,10 +563,10 @@ static int test_find_first_clear_extent_bit(void)
test_msg("running find_first_clear_extent_bit test");
- extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST, NULL);
+ btrfs_extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST);
/* Test correct handling of empty tree */
- find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED);
+ btrfs_find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED);
if (start != 0 || end != -1) {
test_err(
"error getting a range from completely empty tree: start %llu end %llu",
@@ -452,11 +577,11 @@ static int test_find_first_clear_extent_bit(void)
* Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between
* 4M-32M
*/
- set_extent_bits(&tree, SZ_1M, SZ_4M - 1,
- CHUNK_TRIMMED | CHUNK_ALLOCATED);
+ btrfs_set_extent_bit(&tree, SZ_1M, SZ_4M - 1,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
- find_first_clear_extent_bit(&tree, SZ_512K, &start, &end,
- CHUNK_TRIMMED | CHUNK_ALLOCATED);
+ btrfs_find_first_clear_extent_bit(&tree, SZ_512K, &start, &end,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED);
if (start != 0 || end != SZ_1M - 1) {
test_err("error finding beginning range: start %llu end %llu",
@@ -465,14 +590,14 @@ static int test_find_first_clear_extent_bit(void)
}
/* Now add 32M-64M so that we have a hole between 4M-32M */
- set_extent_bits(&tree, SZ_32M, SZ_64M - 1,
- CHUNK_TRIMMED | CHUNK_ALLOCATED);
+ btrfs_set_extent_bit(&tree, SZ_32M, SZ_64M - 1,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
/*
* Request first hole starting at 12M, we should get 4M-32M
*/
- find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end,
- CHUNK_TRIMMED | CHUNK_ALLOCATED);
+ btrfs_find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED);
if (start != SZ_4M || end != SZ_32M - 1) {
test_err("error finding trimmed range: start %llu end %llu",
@@ -484,8 +609,8 @@ static int test_find_first_clear_extent_bit(void)
* Search in the middle of allocated range, should get the next one
* available, which happens to be unallocated -> 4M-32M
*/
- find_first_clear_extent_bit(&tree, SZ_2M, &start, &end,
- CHUNK_TRIMMED | CHUNK_ALLOCATED);
+ btrfs_find_first_clear_extent_bit(&tree, SZ_2M, &start, &end,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED);
if (start != SZ_4M || end != SZ_32M - 1) {
test_err("error finding next unalloc range: start %llu end %llu",
@@ -497,9 +622,9 @@ static int test_find_first_clear_extent_bit(void)
* Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag
* being unset in this range, we should get the entry in range 64M-72M
*/
- set_extent_bits(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED);
- find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end,
- CHUNK_TRIMMED);
+ btrfs_set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL);
+ btrfs_find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end,
+ CHUNK_TRIMMED);
if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
test_err("error finding exact range: start %llu end %llu",
@@ -507,8 +632,8 @@ static int test_find_first_clear_extent_bit(void)
goto out;
}
- find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end,
- CHUNK_TRIMMED);
+ btrfs_find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end,
+ CHUNK_TRIMMED);
/*
* Search in the middle of set range whose immediate neighbour doesn't
@@ -524,7 +649,7 @@ static int test_find_first_clear_extent_bit(void)
* Search beyond any known range, shall return after last known range
* and end should be -1
*/
- find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED);
+ btrfs_find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED);
if (start != SZ_64M + SZ_8M || end != -1) {
test_err(
"error handling beyond end of range search: start %llu end %llu",
@@ -534,18 +659,160 @@ static int test_find_first_clear_extent_bit(void)
ret = 0;
out:
- clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED);
+ if (ret)
+ dump_extent_io_tree(&tree);
+ btrfs_clear_extent_bit(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
return ret;
}
+static void dump_eb_and_memory_contents(struct extent_buffer *eb, void *memory,
+ const char *test_name)
+{
+ for (int i = 0; i < eb->len; i++) {
+ struct page *page = folio_page(eb->folios[i >> PAGE_SHIFT], 0);
+ void *addr = page_address(page) + offset_in_page(i);
+
+ if (memcmp(addr, memory + i, 1) != 0) {
+ test_err("%s failed", test_name);
+ test_err("eb and memory diffs at byte %u, eb has 0x%02x memory has 0x%02x",
+ i, *(u8 *)addr, *(u8 *)(memory + i));
+ return;
+ }
+ }
+}
+
+static int verify_eb_and_memory(struct extent_buffer *eb, void *memory,
+ const char *test_name)
+{
+ for (int i = 0; i < (eb->len >> PAGE_SHIFT); i++) {
+ void *eb_addr = folio_address(eb->folios[i]);
+
+ if (memcmp(memory + (i << PAGE_SHIFT), eb_addr, PAGE_SIZE) != 0) {
+ dump_eb_and_memory_contents(eb, memory, test_name);
+ return -EUCLEAN;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Init both memory and extent buffer contents to the same randomly generated
+ * contents.
+ */
+static void init_eb_and_memory(struct extent_buffer *eb, void *memory)
+{
+ get_random_bytes(memory, eb->len);
+ write_extent_buffer(eb, memory, 0, eb->len);
+}
+
+static int test_eb_mem_ops(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_fs_info *fs_info;
+ struct extent_buffer *eb = NULL;
+ void *memory = NULL;
+ int ret;
+
+ test_msg("running extent buffer memory operation tests");
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ return -ENOMEM;
+ }
+
+ memory = kvzalloc(nodesize, GFP_KERNEL);
+ if (!memory) {
+ test_err("failed to allocate memory");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ eb = alloc_dummy_extent_buffer(fs_info, SZ_1M);
+ if (!eb) {
+ test_std_err(TEST_ALLOC_EXTENT_BUFFER);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ init_eb_and_memory(eb, memory);
+ ret = verify_eb_and_memory(eb, memory, "full eb write");
+ if (ret < 0)
+ goto out;
+
+ memcpy(memory, memory + 16, 16);
+ memcpy_extent_buffer(eb, 0, 16, 16);
+ ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 1");
+ if (ret < 0)
+ goto out;
+
+ memcpy(memory, memory + 2048, 16);
+ memcpy_extent_buffer(eb, 0, 2048, 16);
+ ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 2");
+ if (ret < 0)
+ goto out;
+ memcpy(memory, memory + 2048, 2048);
+ memcpy_extent_buffer(eb, 0, 2048, 2048);
+ ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 3");
+ if (ret < 0)
+ goto out;
+
+ memmove(memory + 512, memory + 256, 512);
+ memmove_extent_buffer(eb, 512, 256, 512);
+ ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 1");
+ if (ret < 0)
+ goto out;
+
+ memmove(memory + 2048, memory + 512, 2048);
+ memmove_extent_buffer(eb, 2048, 512, 2048);
+ ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 2");
+ if (ret < 0)
+ goto out;
+ memmove(memory + 512, memory + 2048, 2048);
+ memmove_extent_buffer(eb, 512, 2048, 2048);
+ ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 3");
+ if (ret < 0)
+ goto out;
+
+ if (nodesize > PAGE_SIZE) {
+ memcpy(memory, memory + 4096 - 128, 256);
+ memcpy_extent_buffer(eb, 0, 4096 - 128, 256);
+ ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 1");
+ if (ret < 0)
+ goto out;
+
+ memcpy(memory + 4096 - 128, memory + 4096 + 128, 256);
+ memcpy_extent_buffer(eb, 4096 - 128, 4096 + 128, 256);
+ ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 2");
+ if (ret < 0)
+ goto out;
+
+ memmove(memory + 4096 - 128, memory + 4096 - 64, 256);
+ memmove_extent_buffer(eb, 4096 - 128, 4096 - 64, 256);
+ ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 1");
+ if (ret < 0)
+ goto out;
+
+ memmove(memory + 4096 - 64, memory + 4096 - 128, 256);
+ memmove_extent_buffer(eb, 4096 - 64, 4096 - 128, 256);
+ ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 2");
+ if (ret < 0)
+ goto out;
+ }
+out:
+ free_extent_buffer(eb);
+ kvfree(memory);
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
+
int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
{
int ret;
test_msg("running extent I/O tests");
- ret = test_find_delalloc(sectorsize);
+ ret = test_find_delalloc(sectorsize, nodesize);
if (ret)
goto out;
@@ -554,6 +821,10 @@ int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
goto out;
ret = test_eb_bitmaps(sectorsize, nodesize);
+ if (ret)
+ goto out;
+
+ ret = test_eb_mem_ops(sectorsize, nodesize);
out:
return ret;
}
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index 319fed82d741..0b9f25dd1a68 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -6,32 +6,41 @@
#include <linux/types.h>
#include "btrfs-tests.h"
#include "../ctree.h"
+#include "../btrfs_inode.h"
#include "../volumes.h"
#include "../disk-io.h"
#include "../block-group.h"
-static void free_extent_map_tree(struct extent_map_tree *em_tree)
+static int free_extent_map_tree(struct btrfs_inode *inode)
{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
struct rb_node *node;
+ int ret = 0;
- while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) {
- node = rb_first_cached(&em_tree->map);
+ write_lock(&em_tree->lock);
+ while (!RB_EMPTY_ROOT(&em_tree->root)) {
+ node = rb_first(&em_tree->root);
em = rb_entry(node, struct extent_map, rb_node);
- remove_extent_mapping(em_tree, em);
+ btrfs_remove_extent_mapping(inode, em);
#ifdef CONFIG_BTRFS_DEBUG
if (refcount_read(&em->refs) != 1) {
+ ret = -EINVAL;
test_err(
-"em leak: em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx) refs %d",
- em->start, em->len, em->block_start,
- em->block_len, refcount_read(&em->refs));
+"em leak: em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu offset %llu) refs %d",
+ em->start, em->len, em->disk_bytenr,
+ em->disk_num_bytes, em->offset,
+ refcount_read(&em->refs));
refcount_set(&em->refs, 1);
}
#endif
- free_extent_map(em);
+ btrfs_free_extent_map(em);
}
+ write_unlock(&em_tree->lock);
+
+ return ret;
}
/*
@@ -50,15 +59,16 @@ static void free_extent_map_tree(struct extent_map_tree *em_tree)
* ->add_extent_mapping(0, 16K)
* -> #handle -EEXIST
*/
-static int test_case_1(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree)
+static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
u64 start = 0;
u64 len = SZ_8K;
int ret;
+ int ret2;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
return -ENOMEM;
@@ -67,19 +77,20 @@ static int test_case_1(struct btrfs_fs_info *fs_info,
/* Add [0, 16K) */
em->start = 0;
em->len = SZ_16K;
- em->block_start = 0;
- em->block_len = SZ_16K;
+ em->disk_bytenr = 0;
+ em->disk_num_bytes = SZ_16K;
+ em->ram_bytes = SZ_16K;
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 0);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [0, 16K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Add [16K, 20K) following [0, 16K) */
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -88,18 +99,19 @@ static int test_case_1(struct btrfs_fs_info *fs_info,
em->start = SZ_16K;
em->len = SZ_4K;
- em->block_start = SZ_32K; /* avoid merging */
- em->block_len = SZ_4K;
+ em->disk_bytenr = SZ_32K; /* avoid merging */
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_4K;
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 0);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [16K, 20K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -109,27 +121,35 @@ static int test_case_1(struct btrfs_fs_info *fs_info,
/* Add [0, 8K), should return [0, 16K) instead. */
em->start = start;
em->len = len;
- em->block_start = start;
- em->block_len = len;
+ em->disk_bytenr = start;
+ em->disk_num_bytes = len;
+ em->ram_bytes = len;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret) {
test_err("case1 [%llu %llu]: ret %d", start, start + len, ret);
goto out;
}
- if (em &&
- (em->start != 0 || extent_map_end(em) != SZ_16K ||
- em->block_start != 0 || em->block_len != SZ_16K)) {
+ if (!em) {
+ test_err("case1 [%llu %llu]: no extent map returned",
+ start, start + len);
+ ret = -ENOENT;
+ goto out;
+ }
+ if (em->start != 0 || btrfs_extent_map_end(em) != SZ_16K ||
+ em->disk_bytenr != 0 || em->disk_num_bytes != SZ_16K) {
test_err(
-"case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu",
+"case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu",
start, start + len, ret, em->start, em->len,
- em->block_start, em->block_len);
+ em->disk_bytenr, em->disk_num_bytes);
ret = -EINVAL;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
out:
- free_extent_map_tree(em_tree);
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
return ret;
}
@@ -140,13 +160,14 @@ out:
* Reading the inline ending up with EEXIST, ie. read an inline
* extent and discard page cache and read it again.
*/
-static int test_case_2(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree)
+static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
int ret;
+ int ret2;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
return -ENOMEM;
@@ -155,19 +176,20 @@ static int test_case_2(struct btrfs_fs_info *fs_info,
/* Add [0, 1K) */
em->start = 0;
em->len = SZ_1K;
- em->block_start = EXTENT_MAP_INLINE;
- em->block_len = (u64)-1;
+ em->disk_bytenr = EXTENT_MAP_INLINE;
+ em->disk_num_bytes = 0;
+ em->ram_bytes = SZ_1K;
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 0);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [0, 1K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Add [4K, 8K) following [0, 1K) */
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -176,18 +198,19 @@ static int test_case_2(struct btrfs_fs_info *fs_info,
em->start = SZ_4K;
em->len = SZ_4K;
- em->block_start = SZ_4K;
- em->block_len = SZ_4K;
+ em->disk_bytenr = SZ_4K;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_4K;
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 0);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [4K, 8K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -197,39 +220,47 @@ static int test_case_2(struct btrfs_fs_info *fs_info,
/* Add [0, 1K) */
em->start = 0;
em->len = SZ_1K;
- em->block_start = EXTENT_MAP_INLINE;
- em->block_len = (u64)-1;
+ em->disk_bytenr = EXTENT_MAP_INLINE;
+ em->disk_num_bytes = 0;
+ em->ram_bytes = SZ_1K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret) {
test_err("case2 [0 1K]: ret %d", ret);
goto out;
}
- if (em &&
- (em->start != 0 || extent_map_end(em) != SZ_1K ||
- em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1)) {
+ if (!em) {
+ test_err("case2 [0 1K]: no extent map returned");
+ ret = -ENOENT;
+ goto out;
+ }
+ if (em->start != 0 || btrfs_extent_map_end(em) != SZ_1K ||
+ em->disk_bytenr != EXTENT_MAP_INLINE) {
test_err(
-"case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu",
- ret, em->start, em->len, em->block_start,
- em->block_len);
+"case2 [0 1K]: ret %d return a wrong em (start %llu len %llu disk_bytenr %llu",
+ ret, em->start, em->len, em->disk_bytenr);
ret = -EINVAL;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
out:
- free_extent_map_tree(em_tree);
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
return ret;
}
static int __test_case_3(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree, u64 start)
+ struct btrfs_inode *inode, u64 start)
{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
u64 len = SZ_4K;
int ret;
+ int ret2;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
return -ENOMEM;
@@ -238,18 +269,19 @@ static int __test_case_3(struct btrfs_fs_info *fs_info,
/* Add [4K, 8K) */
em->start = SZ_4K;
em->len = SZ_4K;
- em->block_start = SZ_4K;
- em->block_len = SZ_4K;
+ em->disk_bytenr = SZ_4K;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_4K;
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 0);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [4K, 8K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -259,32 +291,40 @@ static int __test_case_3(struct btrfs_fs_info *fs_info,
/* Add [0, 16K) */
em->start = 0;
em->len = SZ_16K;
- em->block_start = 0;
- em->block_len = SZ_16K;
+ em->disk_bytenr = 0;
+ em->disk_num_bytes = SZ_16K;
+ em->ram_bytes = SZ_16K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
+ ret = btrfs_add_extent_mapping(inode, &em, start, len);
write_unlock(&em_tree->lock);
if (ret) {
- test_err("case3 [0x%llx 0x%llx): ret %d",
+ test_err("case3 [%llu %llu): ret %d",
start, start + len, ret);
goto out;
}
+ if (!em) {
+ test_err("case3 [%llu %llu): no extent map returned",
+ start, start + len);
+ ret = -ENOENT;
+ goto out;
+ }
/*
* Since bytes within em are contiguous, em->block_start is identical to
* em->start.
*/
- if (em &&
- (start < em->start || start + len > extent_map_end(em) ||
- em->start != em->block_start || em->len != em->block_len)) {
+ if (start < em->start || start + len > btrfs_extent_map_end(em) ||
+ em->start != btrfs_extent_map_block_start(em)) {
test_err(
-"case3 [0x%llx 0x%llx): ret %d em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)",
+"case3 [%llu %llu): ret %d em (start %llu len %llu disk_bytenr %llu block_len %llu)",
start, start + len, ret, em->start, em->len,
- em->block_start, em->block_len);
+ em->disk_bytenr, em->disk_num_bytes);
ret = -EINVAL;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
out:
- free_extent_map_tree(em_tree);
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
return ret;
}
@@ -305,30 +345,31 @@ out:
* -> add_extent_mapping()
* -> add_extent_mapping()
*/
-static int test_case_3(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree)
+static int test_case_3(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
{
int ret;
- ret = __test_case_3(fs_info, em_tree, 0);
+ ret = __test_case_3(fs_info, inode, 0);
if (ret)
return ret;
- ret = __test_case_3(fs_info, em_tree, SZ_8K);
+ ret = __test_case_3(fs_info, inode, SZ_8K);
if (ret)
return ret;
- ret = __test_case_3(fs_info, em_tree, (12 * SZ_1K));
+ ret = __test_case_3(fs_info, inode, (12 * SZ_1K));
return ret;
}
static int __test_case_4(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree, u64 start)
+ struct btrfs_inode *inode, u64 start)
{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
u64 len = SZ_4K;
int ret;
+ int ret2;
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
return -ENOMEM;
@@ -337,18 +378,19 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
/* Add [0K, 8K) */
em->start = 0;
em->len = SZ_8K;
- em->block_start = 0;
- em->block_len = SZ_8K;
+ em->disk_bytenr = 0;
+ em->disk_num_bytes = SZ_8K;
+ em->ram_bytes = SZ_8K;
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 0);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [0, 8K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -358,18 +400,19 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
/* Add [8K, 32K) */
em->start = SZ_8K;
em->len = 24 * SZ_1K;
- em->block_start = SZ_16K; /* avoid merging */
- em->block_len = 24 * SZ_1K;
+ em->disk_bytenr = SZ_16K; /* avoid merging */
+ em->disk_num_bytes = 24 * SZ_1K;
+ em->ram_bytes = 24 * SZ_1K;
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 0);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [8K, 32K)");
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = alloc_extent_map();
+ em = btrfs_alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
ret = -ENOMEM;
@@ -378,26 +421,35 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
/* Add [0K, 32K) */
em->start = 0;
em->len = SZ_32K;
- em->block_start = 0;
- em->block_len = SZ_32K;
+ em->disk_bytenr = 0;
+ em->disk_num_bytes = SZ_32K;
+ em->ram_bytes = SZ_32K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
+ ret = btrfs_add_extent_mapping(inode, &em, start, len);
write_unlock(&em_tree->lock);
if (ret) {
- test_err("case4 [0x%llx 0x%llx): ret %d",
- start, len, ret);
+ test_err("case4 [%llu %llu): ret %d",
+ start, start + len, ret);
goto out;
}
- if (em && (start < em->start || start + len > extent_map_end(em))) {
+ if (!em) {
+ test_err("case4 [%llu %llu): no extent map returned",
+ start, start + len);
+ ret = -ENOENT;
+ goto out;
+ }
+ if (start < em->start || start + len > btrfs_extent_map_end(em)) {
test_err(
-"case4 [0x%llx 0x%llx): ret %d, added wrong em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)",
- start, len, ret, em->start, em->len, em->block_start,
- em->block_len);
+"case4 [%llu %llu): ret %d, added wrong em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu)",
+ start, start + len, ret, em->start, em->len,
+ em->disk_bytenr, em->disk_num_bytes);
ret = -EINVAL;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
out:
- free_extent_map_tree(em_tree);
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
return ret;
}
@@ -427,15 +479,519 @@ out:
* # handle -EEXIST when adding
* # [0, 32K)
*/
-static int test_case_4(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree)
+static int test_case_4(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
{
int ret;
- ret = __test_case_4(fs_info, em_tree, 0);
+ ret = __test_case_4(fs_info, inode, 0);
if (ret)
return ret;
- ret = __test_case_4(fs_info, em_tree, SZ_4K);
+ ret = __test_case_4(fs_info, inode, SZ_4K);
+
+ return ret;
+}
+
+static int add_compressed_extent(struct btrfs_inode *inode,
+ u64 start, u64 len, u64 block_start)
+{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+ struct extent_map *em;
+ int ret;
+
+ em = btrfs_alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ em->start = start;
+ em->len = len;
+ em->disk_bytenr = block_start;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = len;
+ em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
+ write_unlock(&em_tree->lock);
+ btrfs_free_extent_map(em);
+ if (ret < 0) {
+ test_err("cannot add extent map [%llu, %llu)", start, start + len);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct extent_range {
+ u64 start;
+ u64 len;
+};
+
+/* The valid states of the tree after every drop, as described below. */
+struct extent_range valid_ranges[][7] = {
+ {
+ { .start = 0, .len = SZ_8K }, /* [0, 8K) */
+ { .start = SZ_4K * 3, .len = SZ_4K * 3}, /* [12k, 24k) */
+ { .start = SZ_4K * 6, .len = SZ_4K * 3}, /* [24k, 36k) */
+ { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
+ { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */
+ },
+ {
+ { .start = 0, .len = SZ_8K }, /* [0, 8K) */
+ { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */
+ { .start = SZ_4K * 6, .len = SZ_4K * 3}, /* [24k, 36k) */
+ { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
+ { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */
+ },
+ {
+ { .start = 0, .len = SZ_8K }, /* [0, 8K) */
+ { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */
+ { .start = SZ_4K * 6, .len = SZ_4K}, /* [24k, 28k) */
+ { .start = SZ_32K, .len = SZ_4K}, /* [32k, 36k) */
+ { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
+ { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */
+ },
+ {
+ { .start = 0, .len = SZ_8K}, /* [0, 8K) */
+ { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */
+ { .start = SZ_4K * 6, .len = SZ_4K}, /* [24k, 28k) */
+ }
+};
+
+static int validate_range(struct extent_map_tree *em_tree, int index)
+{
+ struct rb_node *n;
+ int i;
+
+ for (i = 0, n = rb_first(&em_tree->root);
+ valid_ranges[index][i].len && n;
+ i++, n = rb_next(n)) {
+ struct extent_map *entry = rb_entry(n, struct extent_map, rb_node);
+
+ if (entry->start != valid_ranges[index][i].start) {
+ test_err("mapping has start %llu expected %llu",
+ entry->start, valid_ranges[index][i].start);
+ return -EINVAL;
+ }
+
+ if (entry->len != valid_ranges[index][i].len) {
+ test_err("mapping has len %llu expected %llu",
+ entry->len, valid_ranges[index][i].len);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * We exited because we don't have any more entries in the extent_map
+ * but we still expect more valid entries.
+ */
+ if (valid_ranges[index][i].len) {
+ test_err("missing an entry");
+ return -EINVAL;
+ }
+
+ /* We exited the loop but still have entries in the extent map. */
+ if (n) {
+ test_err("we have a left over entry in the extent map we didn't expect");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Test scenario:
+ *
+ * Test the various edge cases of btrfs_drop_extent_map_range, create the
+ * following ranges
+ *
+ * [0, 12k)[12k, 24k)[24k, 36k)[36k, 40k)[40k,64k)
+ *
+ * And then we'll drop:
+ *
+ * [8k, 12k) - test the single front split
+ * [12k, 20k) - test the single back split
+ * [28k, 32k) - test the double split
+ * [32k, 64k) - test whole em dropping
+ *
+ * They'll have the EXTENT_FLAG_COMPRESSED flag set to keep the em tree from
+ * merging the em's.
+ */
+static int test_case_5(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
+{
+ u64 start, end;
+ int ret;
+ int ret2;
+
+ test_msg("Running btrfs_drop_extent_map_range tests");
+
+ /* [0, 12k) */
+ ret = add_compressed_extent(inode, 0, SZ_4K * 3, 0);
+ if (ret) {
+ test_err("cannot add extent range [0, 12K)");
+ goto out;
+ }
+
+ /* [12k, 24k) */
+ ret = add_compressed_extent(inode, SZ_4K * 3, SZ_4K * 3, SZ_4K);
+ if (ret) {
+ test_err("cannot add extent range [12k, 24k)");
+ goto out;
+ }
+
+ /* [24k, 36k) */
+ ret = add_compressed_extent(inode, SZ_4K * 6, SZ_4K * 3, SZ_8K);
+ if (ret) {
+ test_err("cannot add extent range [12k, 24k)");
+ goto out;
+ }
+
+ /* [36k, 40k) */
+ ret = add_compressed_extent(inode, SZ_32K + SZ_4K, SZ_4K, SZ_4K * 3);
+ if (ret) {
+ test_err("cannot add extent range [12k, 24k)");
+ goto out;
+ }
+
+ /* [40k, 64k) */
+ ret = add_compressed_extent(inode, SZ_4K * 10, SZ_4K * 6, SZ_16K);
+ if (ret) {
+ test_err("cannot add extent range [12k, 24k)");
+ goto out;
+ }
+
+ /* Drop [8k, 12k) */
+ start = SZ_8K;
+ end = (3 * SZ_4K) - 1;
+ btrfs_drop_extent_map_range(inode, start, end, false);
+ ret = validate_range(&inode->extent_tree, 0);
+ if (ret)
+ goto out;
+
+ /* Drop [12k, 20k) */
+ start = SZ_4K * 3;
+ end = SZ_16K + SZ_4K - 1;
+ btrfs_drop_extent_map_range(inode, start, end, false);
+ ret = validate_range(&inode->extent_tree, 1);
+ if (ret)
+ goto out;
+
+ /* Drop [28k, 32k) */
+ start = SZ_32K - SZ_4K;
+ end = SZ_32K - 1;
+ btrfs_drop_extent_map_range(inode, start, end, false);
+ ret = validate_range(&inode->extent_tree, 2);
+ if (ret)
+ goto out;
+
+ /* Drop [32k, 64k) */
+ start = SZ_32K;
+ end = SZ_64K - 1;
+ btrfs_drop_extent_map_range(inode, start, end, false);
+ ret = validate_range(&inode->extent_tree, 3);
+ if (ret)
+ goto out;
+out:
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
+
+ return ret;
+}
+
+/*
+ * Test the btrfs_add_extent_mapping helper which will attempt to create an em
+ * for areas between two existing ems. Validate it doesn't do this when there
+ * are two unmerged em's side by side.
+ */
+static int test_case_6(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
+{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+ struct extent_map *em = NULL;
+ int ret;
+ int ret2;
+
+ ret = add_compressed_extent(inode, 0, SZ_4K, 0);
+ if (ret)
+ goto out;
+
+ ret = add_compressed_extent(inode, SZ_4K, SZ_4K, 0);
+ if (ret)
+ goto out;
+
+ em = btrfs_alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ em->start = SZ_4K;
+ em->len = SZ_4K;
+ em->disk_bytenr = SZ_16K;
+ em->disk_num_bytes = SZ_16K;
+ em->ram_bytes = SZ_16K;
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(inode, &em, 0, SZ_8K);
+ write_unlock(&em_tree->lock);
+
+ if (ret != 0) {
+ test_err("got an error when adding our em: %d", ret);
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (em->start != 0) {
+ test_err("unexpected em->start at %llu, wanted 0", em->start);
+ goto out;
+ }
+ if (em->len != SZ_4K) {
+ test_err("unexpected em->len %llu, expected 4K", em->len);
+ goto out;
+ }
+ ret = 0;
+out:
+ btrfs_free_extent_map(em);
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
+
+ return ret;
+}
+
+/*
+ * Regression test for btrfs_drop_extent_map_range. Calling with skip_pinned ==
+ * true would mess up the start/end calculations and subsequent splits would be
+ * incorrect.
+ */
+static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
+{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+ struct extent_map *em;
+ int ret;
+ int ret2;
+
+ test_msg("Running btrfs_drop_extent_cache with pinned");
+
+ em = btrfs_alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ /* [0, 16K), pinned */
+ em->start = 0;
+ em->len = SZ_16K;
+ em->disk_bytenr = 0;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_16K;
+ em->flags |= (EXTENT_FLAG_PINNED | EXTENT_FLAG_COMPRESS_ZLIB);
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("couldn't add extent map");
+ goto out;
+ }
+ btrfs_free_extent_map(em);
+
+ em = btrfs_alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* [32K, 48K), not pinned */
+ em->start = SZ_32K;
+ em->len = SZ_16K;
+ em->disk_bytenr = SZ_32K;
+ em->disk_num_bytes = SZ_16K;
+ em->ram_bytes = SZ_16K;
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("couldn't add extent map");
+ goto out;
+ }
+ btrfs_free_extent_map(em);
+
+ /*
+ * Drop [0, 36K) This should skip the [0, 4K) extent and then split the
+ * [32K, 48K) extent.
+ */
+ btrfs_drop_extent_map_range(inode, 0, (36 * SZ_1K) - 1, true);
+
+ /* Make sure our extent maps look sane. */
+ ret = -EINVAL;
+
+ em = btrfs_lookup_extent_mapping(em_tree, 0, SZ_16K);
+ if (!em) {
+ test_err("didn't find an em at 0 as expected");
+ goto out;
+ }
+
+ if (em->start != 0) {
+ test_err("em->start is %llu, expected 0", em->start);
+ goto out;
+ }
+
+ if (em->len != SZ_16K) {
+ test_err("em->len is %llu, expected 16K", em->len);
+ goto out;
+ }
+
+ btrfs_free_extent_map(em);
+
+ read_lock(&em_tree->lock);
+ em = btrfs_lookup_extent_mapping(em_tree, SZ_16K, SZ_16K);
+ read_unlock(&em_tree->lock);
+ if (em) {
+ test_err("found an em when we weren't expecting one");
+ goto out;
+ }
+
+ read_lock(&em_tree->lock);
+ em = btrfs_lookup_extent_mapping(em_tree, SZ_32K, SZ_16K);
+ read_unlock(&em_tree->lock);
+ if (!em) {
+ test_err("didn't find an em at 32K as expected");
+ goto out;
+ }
+
+ if (em->start != (36 * SZ_1K)) {
+ test_err("em->start is %llu, expected 36K", em->start);
+ goto out;
+ }
+
+ if (em->len != (12 * SZ_1K)) {
+ test_err("em->len is %llu, expected 12K", em->len);
+ goto out;
+ }
+
+ if (btrfs_extent_map_block_start(em) != SZ_32K + SZ_4K) {
+ test_err("em->block_start is %llu, expected 36K",
+ btrfs_extent_map_block_start(em));
+ goto out;
+ }
+
+ btrfs_free_extent_map(em);
+
+ read_lock(&em_tree->lock);
+ em = btrfs_lookup_extent_mapping(em_tree, 48 * SZ_1K, (u64)-1);
+ read_unlock(&em_tree->lock);
+ if (em) {
+ test_err("found an unexpected em above 48K");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ btrfs_free_extent_map(em);
+ /* Unpin our extent to prevent warning when removing it below. */
+ ret2 = btrfs_unpin_extent_cache(inode, 0, SZ_16K, 0);
+ if (ret == 0)
+ ret = ret2;
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
+
+ return ret;
+}
+
+/*
+ * Test a regression for compressed extent map adjustment when we attempt to
+ * add an extent map that is partially overlapped by another existing extent
+ * map. The resulting extent map offset was left unchanged despite having
+ * incremented its start offset.
+ */
+static int test_case_8(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
+{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+ struct extent_map *em;
+ int ret;
+ int ret2;
+
+ em = btrfs_alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ /* Compressed extent for the file range [120K, 128K). */
+ em->start = SZ_1K * 120;
+ em->len = SZ_8K;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_8K;
+ em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
+ write_unlock(&em_tree->lock);
+ btrfs_free_extent_map(em);
+ if (ret < 0) {
+ test_err("couldn't add extent map for range [120K, 128K)");
+ goto out;
+ }
+
+ em = btrfs_alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Compressed extent for the file range [108K, 144K), which overlaps
+ * with the [120K, 128K) we previously inserted.
+ */
+ em->start = SZ_1K * 108;
+ em->len = SZ_1K * 36;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_1K * 36;
+ em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
+
+ /*
+ * Try to add the extent map but with a search range of [140K, 144K),
+ * this should succeed and adjust the extent map to the range
+ * [128K, 144K), with a length of 16K and an offset of 20K.
+ *
+ * This simulates a scenario where in the subvolume tree of an inode we
+ * have a compressed file extent item for the range [108K, 144K) and we
+ * have an overlapping compressed extent map for the range [120K, 128K),
+ * which was created by an encoded write, but its ordered extent was not
+ * yet completed, so the subvolume tree doesn't have yet the file extent
+ * item for that range - we only have the extent map in the inode's
+ * extent map tree.
+ */
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(inode, &em, SZ_1K * 140, SZ_4K);
+ write_unlock(&em_tree->lock);
+ btrfs_free_extent_map(em);
+ if (ret < 0) {
+ test_err("couldn't add extent map for range [108K, 144K)");
+ goto out;
+ }
+
+ if (em->start != SZ_128K) {
+ test_err("unexpected extent map start %llu (should be 128K)", em->start);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (em->len != SZ_16K) {
+ test_err("unexpected extent map length %llu (should be 16K)", em->len);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (em->offset != SZ_1K * 20) {
+ test_err("unexpected extent map offset %llu (should be 20K)", em->offset);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
return ret;
}
@@ -456,35 +1012,22 @@ struct rmap_test_vector {
static int test_rmap_block(struct btrfs_fs_info *fs_info,
struct rmap_test_vector *test)
{
- struct extent_map *em;
- struct map_lookup *map = NULL;
- u64 *logical = NULL;
+ struct btrfs_chunk_map *map;
+ u64 AUTO_KFREE(logical);
int i, out_ndaddrs, out_stripe_len;
int ret;
- em = alloc_extent_map();
- if (!em) {
- test_std_err(TEST_ALLOC_EXTENT_MAP);
- return -ENOMEM;
- }
-
- map = kmalloc(map_lookup_size(test->num_stripes), GFP_KERNEL);
+ map = btrfs_alloc_chunk_map(test->num_stripes, GFP_KERNEL);
if (!map) {
- kfree(em);
- test_std_err(TEST_ALLOC_EXTENT_MAP);
+ test_std_err(TEST_ALLOC_CHUNK_MAP);
return -ENOMEM;
}
- set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
/* Start at 4GiB logical address */
- em->start = SZ_4G;
- em->len = test->data_stripe_size * test->num_data_stripes;
- em->block_len = em->len;
- em->orig_block_len = test->data_stripe_size;
- em->map_lookup = map;
-
+ map->start = SZ_4G;
+ map->chunk_len = test->data_stripe_size * test->num_data_stripes;
+ map->stripe_size = test->data_stripe_size;
map->num_stripes = test->num_stripes;
- map->stripe_len = BTRFS_STRIPE_LEN;
map->type = test->raid_type;
for (i = 0; i < map->num_stripes; i++) {
@@ -499,15 +1042,14 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info,
map->stripes[i].physical = test->data_stripe_phys_start[i];
}
- write_lock(&fs_info->mapping_tree.lock);
- ret = add_extent_mapping(&fs_info->mapping_tree, em, 0);
- write_unlock(&fs_info->mapping_tree.lock);
+ ret = btrfs_add_chunk_map(fs_info, map);
if (ret) {
- test_err("error adding block group mapping to mapping tree");
- goto out_free;
+ test_err("error adding chunk map to mapping tree");
+ btrfs_free_chunk_map(map);
+ return ret;
}
- ret = btrfs_rmap_block(fs_info, em->start, NULL, btrfs_sb_offset(1),
+ ret = btrfs_rmap_block(fs_info, map->start, btrfs_sb_offset(1),
&logical, &out_ndaddrs, &out_stripe_len);
if (ret || (out_ndaddrs == 0 && test->expected_mapped_addr)) {
test_err("didn't rmap anything but expected %d",
@@ -536,29 +1078,22 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info,
ret = 0;
out:
- write_lock(&fs_info->mapping_tree.lock);
- remove_extent_mapping(&fs_info->mapping_tree, em);
- write_unlock(&fs_info->mapping_tree.lock);
- /* For us */
- free_extent_map(em);
-out_free:
- /* For the tree */
- free_extent_map(em);
- kfree(logical);
+ btrfs_remove_chunk_map(fs_info, map);
return ret;
}
int btrfs_test_extent_map(void)
{
struct btrfs_fs_info *fs_info = NULL;
- struct extent_map_tree *em_tree;
+ struct inode *inode;
+ struct btrfs_root *root = NULL;
int ret = 0, i;
struct rmap_test_vector rmap_tests[] = {
{
/*
* Test a chunk with 2 data stripes one of which
* intersects the physical address of the super block
- * is correctly recognised.
+ * is correctly recognized.
*/
.raid_type = BTRFS_BLOCK_GROUP_RAID1,
.physical_start = SZ_64M - SZ_4M,
@@ -600,24 +1135,47 @@ int btrfs_test_extent_map(void)
return -ENOMEM;
}
- em_tree = kzalloc(sizeof(*em_tree), GFP_KERNEL);
- if (!em_tree) {
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
ret = -ENOMEM;
goto out;
}
- extent_map_tree_init(em_tree);
+ root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = PTR_ERR(root);
+ root = NULL;
+ goto out;
+ }
+
+ BTRFS_I(inode)->root = root;
- ret = test_case_1(fs_info, em_tree);
+ ret = test_case_1(fs_info, BTRFS_I(inode));
+ if (ret)
+ goto out;
+ ret = test_case_2(fs_info, BTRFS_I(inode));
+ if (ret)
+ goto out;
+ ret = test_case_3(fs_info, BTRFS_I(inode));
+ if (ret)
+ goto out;
+ ret = test_case_4(fs_info, BTRFS_I(inode));
+ if (ret)
+ goto out;
+ ret = test_case_5(fs_info, BTRFS_I(inode));
+ if (ret)
+ goto out;
+ ret = test_case_6(fs_info, BTRFS_I(inode));
if (ret)
goto out;
- ret = test_case_2(fs_info, em_tree);
+ ret = test_case_7(fs_info, BTRFS_I(inode));
if (ret)
goto out;
- ret = test_case_3(fs_info, em_tree);
+ ret = test_case_8(fs_info, BTRFS_I(inode));
if (ret)
goto out;
- ret = test_case_4(fs_info, em_tree);
test_msg("running rmap tests");
for (i = 0; i < ARRAY_SIZE(rmap_tests); i++) {
@@ -627,7 +1185,8 @@ int btrfs_test_extent_map(void)
}
out:
- kfree(em_tree);
+ iput(inode);
+ btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info);
return ret;
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index 8f05c1eb833f..ebf68fcd2149 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -82,7 +82,7 @@ static int test_extents(struct btrfs_block_group *cache)
}
/* Cleanup */
- __btrfs_remove_free_space_cache(cache->free_space_ctl);
+ btrfs_remove_free_space_cache(cache);
return 0;
}
@@ -149,7 +149,7 @@ static int test_bitmaps(struct btrfs_block_group *cache, u32 sectorsize)
return -1;
}
- __btrfs_remove_free_space_cache(cache->free_space_ctl);
+ btrfs_remove_free_space_cache(cache);
return 0;
}
@@ -230,7 +230,7 @@ static int test_bitmaps_and_extents(struct btrfs_block_group *cache,
return -1;
}
- __btrfs_remove_free_space_cache(cache->free_space_ctl);
+ btrfs_remove_free_space_cache(cache);
/* Now with the extent entry offset into the bitmap */
ret = test_add_free_space_entry(cache, SZ_4M, SZ_4M, 1);
@@ -266,7 +266,7 @@ static int test_bitmaps_and_extents(struct btrfs_block_group *cache,
* [ bitmap ]
* [ del ]
*/
- __btrfs_remove_free_space_cache(cache->free_space_ctl);
+ btrfs_remove_free_space_cache(cache);
ret = test_add_free_space_entry(cache, bitmap_offset + SZ_4M, SZ_4M, 1);
if (ret) {
test_err("couldn't add bitmap %d", ret);
@@ -291,7 +291,7 @@ static int test_bitmaps_and_extents(struct btrfs_block_group *cache,
return -1;
}
- __btrfs_remove_free_space_cache(cache->free_space_ctl);
+ btrfs_remove_free_space_cache(cache);
/*
* This blew up before, we have part of the free space in a bitmap and
@@ -317,7 +317,7 @@ static int test_bitmaps_and_extents(struct btrfs_block_group *cache,
return ret;
}
- __btrfs_remove_free_space_cache(cache->free_space_ctl);
+ btrfs_remove_free_space_cache(cache);
return 0;
}
@@ -629,7 +629,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group *cache,
if (ret)
return ret;
- __btrfs_remove_free_space_cache(cache->free_space_ctl);
+ btrfs_remove_free_space_cache(cache);
/*
* Now test a similar scenario, but where our extent entry is located
@@ -819,11 +819,189 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group *cache,
return ret;
cache->free_space_ctl->op = orig_free_space_ops;
- __btrfs_remove_free_space_cache(cache->free_space_ctl);
+ btrfs_remove_free_space_cache(cache);
return 0;
}
+static bool bytes_index_use_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
+{
+ return true;
+}
+
+static int test_bytes_index(struct btrfs_block_group *cache, u32 sectorsize)
+{
+ const struct btrfs_free_space_op test_free_space_ops = {
+ .use_bitmap = bytes_index_use_bitmap,
+ };
+ const struct btrfs_free_space_op *orig_free_space_ops;
+ struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
+ struct btrfs_free_space *entry;
+ struct rb_node *node;
+ u64 offset, max_extent_size, bytes;
+ int ret, i;
+
+ test_msg("running bytes index tests");
+
+ /* First just validate that it does everything in order. */
+ offset = 0;
+ for (i = 0; i < 10; i++) {
+ bytes = (i + 1) * SZ_1M;
+ ret = test_add_free_space_entry(cache, offset, bytes, 0);
+ if (ret) {
+ test_err("couldn't add extent entry %d\n", ret);
+ return ret;
+ }
+ offset += bytes + sectorsize;
+ }
+
+ for (node = rb_first_cached(&ctl->free_space_bytes), i = 9; node;
+ node = rb_next(node), i--) {
+ entry = rb_entry(node, struct btrfs_free_space, bytes_index);
+ bytes = (i + 1) * SZ_1M;
+ if (entry->bytes != bytes) {
+ test_err("invalid bytes index order, found %llu expected %llu",
+ entry->bytes, bytes);
+ return -EINVAL;
+ }
+ }
+
+ /* Now validate bitmaps do the correct thing. */
+ btrfs_remove_free_space_cache(cache);
+ for (i = 0; i < 2; i++) {
+ offset = i * BITS_PER_BITMAP * sectorsize;
+ bytes = (i + 1) * SZ_1M;
+ ret = test_add_free_space_entry(cache, offset, bytes, 1);
+ if (ret) {
+ test_err("couldn't add bitmap entry");
+ return ret;
+ }
+ }
+
+ for (node = rb_first_cached(&ctl->free_space_bytes), i = 1; node;
+ node = rb_next(node), i--) {
+ entry = rb_entry(node, struct btrfs_free_space, bytes_index);
+ bytes = (i + 1) * SZ_1M;
+ if (entry->bytes != bytes) {
+ test_err("invalid bytes index order, found %llu expected %llu",
+ entry->bytes, bytes);
+ return -EINVAL;
+ }
+ }
+
+ /* Now validate bitmaps with different ->max_extent_size. */
+ btrfs_remove_free_space_cache(cache);
+ orig_free_space_ops = cache->free_space_ctl->op;
+ cache->free_space_ctl->op = &test_free_space_ops;
+
+ ret = test_add_free_space_entry(cache, 0, sectorsize, 1);
+ if (ret) {
+ test_err("couldn't add bitmap entry");
+ return ret;
+ }
+
+ offset = BITS_PER_BITMAP * sectorsize;
+ ret = test_add_free_space_entry(cache, offset, sectorsize, 1);
+ if (ret) {
+ test_err("couldn't add bitmap_entry");
+ return ret;
+ }
+
+ /*
+ * Now set a bunch of sectorsize extents in the first entry so it's
+ * ->bytes is large.
+ */
+ for (i = 2; i < 20; i += 2) {
+ offset = sectorsize * i;
+ ret = btrfs_add_free_space(cache, offset, sectorsize);
+ if (ret) {
+ test_err("error populating sparse bitmap %d", ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Now set a contiguous extent in the second bitmap so its
+ * ->max_extent_size is larger than the first bitmaps.
+ */
+ offset = (BITS_PER_BITMAP * sectorsize) + sectorsize;
+ ret = btrfs_add_free_space(cache, offset, sectorsize);
+ if (ret) {
+ test_err("error adding contiguous extent %d", ret);
+ return ret;
+ }
+
+ /*
+ * Since we don't set ->max_extent_size unless we search everything
+ * should be indexed on bytes.
+ */
+ entry = rb_entry(rb_first_cached(&ctl->free_space_bytes),
+ struct btrfs_free_space, bytes_index);
+ if (entry->bytes != (10 * sectorsize)) {
+ test_err("error, wrong entry in the first slot in bytes_index");
+ return -EINVAL;
+ }
+
+ max_extent_size = 0;
+ offset = btrfs_find_space_for_alloc(cache, cache->start, sectorsize * 3,
+ 0, &max_extent_size);
+ if (offset != 0) {
+ test_err("found space to alloc even though we don't have enough space");
+ return -EINVAL;
+ }
+
+ if (max_extent_size != (2 * sectorsize)) {
+ test_err("got the wrong max_extent size %llu expected %llu",
+ max_extent_size, (unsigned long long)(2 * sectorsize));
+ return -EINVAL;
+ }
+
+ /*
+ * The search should have re-arranged the bytes index to use the
+ * ->max_extent_size, validate it's now what we expect it to be.
+ */
+ entry = rb_entry(rb_first_cached(&ctl->free_space_bytes),
+ struct btrfs_free_space, bytes_index);
+ if (entry->bytes != (2 * sectorsize)) {
+ test_err("error, the bytes index wasn't recalculated properly");
+ return -EINVAL;
+ }
+
+ /* Add another sectorsize to re-arrange the tree back to ->bytes. */
+ offset = (BITS_PER_BITMAP * sectorsize) - sectorsize;
+ ret = btrfs_add_free_space(cache, offset, sectorsize);
+ if (ret) {
+ test_err("error adding extent to the sparse entry %d", ret);
+ return ret;
+ }
+
+ entry = rb_entry(rb_first_cached(&ctl->free_space_bytes),
+ struct btrfs_free_space, bytes_index);
+ if (entry->bytes != (11 * sectorsize)) {
+ test_err("error, wrong entry in the first slot in bytes_index");
+ return -EINVAL;
+ }
+
+ /*
+ * Now make sure we find our correct entry after searching that will
+ * result in a re-arranging of the tree.
+ */
+ max_extent_size = 0;
+ offset = btrfs_find_space_for_alloc(cache, cache->start, sectorsize * 2,
+ 0, &max_extent_size);
+ if (offset != (BITS_PER_BITMAP * sectorsize)) {
+ test_err("error, found %llu instead of %llu for our alloc",
+ offset,
+ (unsigned long long)(BITS_PER_BITMAP * sectorsize));
+ return -EINVAL;
+ }
+
+ cache->free_space_ctl->op = orig_free_space_ops;
+ btrfs_remove_free_space_cache(cache);
+ return 0;
+}
+
int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
{
struct btrfs_fs_info *fs_info;
@@ -858,7 +1036,10 @@ int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
goto out;
}
- root->fs_info->extent_root = root;
+ root->root_key.objectid = BTRFS_EXTENT_TREE_OBJECTID;
+ root->root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root->root_key.offset = 0;
+ btrfs_global_root_insert(root);
ret = test_extents(cache);
if (ret)
@@ -871,6 +1052,9 @@ int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
goto out;
ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize);
+ if (ret)
+ goto out;
+ ret = test_bytes_index(cache, sectorsize);
out:
btrfs_free_dummy_block_group(cache);
btrfs_free_dummy_root(root);
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index 2c783d2f5228..c8822edd32e2 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -10,6 +10,7 @@
#include "../free-space-tree.h"
#include "../transaction.h"
#include "../block-group.h"
+#include "../accessors.h"
struct free_space_extent {
u64 start;
@@ -31,7 +32,7 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans,
unsigned int i;
int ret;
- info = search_free_space_info(trans, cache, path, 0);
+ info = btrfs_search_free_space_info(trans, cache, path, 0);
if (IS_ERR(info)) {
test_err("could not find free space info");
ret = PTR_ERR(info);
@@ -56,7 +57,7 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans,
goto invalid;
offset = key.objectid;
while (offset < key.objectid + key.offset) {
- bit = free_space_test_bit(cache, path, offset);
+ bit = btrfs_free_space_test_bit(cache, path, offset);
if (prev_bit == 0 && bit == 1) {
extent_start = offset;
} else if (prev_bit == 1 && bit == 0) {
@@ -114,7 +115,7 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans,
u32 flags;
int ret;
- info = search_free_space_info(trans, cache, path, 0);
+ info = btrfs_search_free_space_info(trans, cache, path, 0);
if (IS_ERR(info)) {
test_err("could not find free space info");
btrfs_release_path(path);
@@ -130,13 +131,13 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans,
/* Flip it to the other format and check that for good measure. */
if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
- ret = convert_free_space_to_extents(trans, cache, path);
+ ret = btrfs_convert_free_space_to_extents(trans, cache, path);
if (ret) {
test_err("could not convert to extents");
return ret;
}
} else {
- ret = convert_free_space_to_bitmaps(trans, cache, path);
+ ret = btrfs_convert_free_space_to_bitmaps(trans, cache, path);
if (ret) {
test_err("could not convert to bitmaps");
return ret;
@@ -169,9 +170,8 @@ static int test_remove_all(struct btrfs_trans_handle *trans,
const struct free_space_extent extents[] = {};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start,
- cache->length);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
@@ -192,8 +192,8 @@ static int test_remove_beginning(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start, alignment);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start, alignment);
if (ret) {
test_err("could not remove free space");
return ret;
@@ -215,7 +215,7 @@ static int test_remove_end(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
cache->start + cache->length - alignment,
alignment);
if (ret) {
@@ -239,9 +239,9 @@ static int test_remove_middle(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start + alignment,
- alignment);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start + alignment,
+ alignment);
if (ret) {
test_err("could not remove free space");
return ret;
@@ -262,23 +262,22 @@ static int test_merge_left(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start, cache->length);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path, cache->start,
- alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path, cache->start,
+ alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + alignment,
- alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
@@ -299,24 +298,23 @@ static int test_merge_right(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start, cache->length);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + 2 * alignment,
- alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + 2 * alignment,
+ alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + alignment,
- alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
@@ -337,29 +335,29 @@ static int test_merge_both(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start, cache->length);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path, cache->start,
- alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path, cache->start,
+ alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + 2 * alignment, alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + 2 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + alignment, alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
@@ -382,29 +380,29 @@ static int test_merge_none(struct btrfs_trans_handle *trans,
};
int ret;
- ret = __remove_from_free_space_tree(trans, cache, path,
- cache->start, cache->length);
+ ret = __btrfs_remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path, cache->start,
- alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path, cache->start,
+ alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + 4 * alignment, alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + 4 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path,
- cache->start + 2 * alignment, alignment);
+ ret = __btrfs_add_to_free_space_tree(trans, cache, path,
+ cache->start + 2 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
@@ -446,7 +444,10 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
btrfs_set_super_compat_ro_flags(root->fs_info->super_copy,
BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE);
- root->fs_info->free_space_root = root;
+ root->root_key.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
+ root->root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root->root_key.offset = 0;
+ btrfs_global_root_insert(root);
root->fs_info->tree_root = root;
root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
@@ -467,7 +468,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
}
cache->bitmap_low_thresh = 0;
cache->bitmap_high_thresh = (u32)-1;
- cache->needs_free_space = 1;
+ set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
cache->fs_info = root->fs_info;
btrfs_init_dummy_trans(&trans, root->fs_info);
@@ -479,14 +480,14 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
goto out;
}
- ret = add_block_group_free_space(&trans, cache);
+ ret = btrfs_add_block_group_free_space(&trans, cache);
if (ret) {
test_err("could not add block group free space");
goto out;
}
if (bitmaps) {
- ret = convert_free_space_to_bitmaps(&trans, cache, path);
+ ret = btrfs_convert_free_space_to_bitmaps(&trans, cache, path);
if (ret) {
test_err("could not convert block group to bitmaps");
goto out;
@@ -497,7 +498,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
if (ret)
goto out;
- ret = remove_block_group_free_space(&trans, cache);
+ ret = btrfs_remove_block_group_free_space(&trans, cache);
if (ret) {
test_err("could not remove block group free space");
goto out;
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index cac89c388131..a4c2b7748b95 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -11,6 +11,7 @@
#include "../extent_io.h"
#include "../volumes.h"
#include "../compression.h"
+#include "../accessors.h"
static void insert_extent(struct btrfs_root *root, u64 start, u64 len,
u64 ram_bytes, u64 offset, u64 disk_bytenr,
@@ -33,7 +34,11 @@ static void insert_extent(struct btrfs_root *root, u64 start, u64 len,
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = start;
- btrfs_setup_item_for_insert(root, &path, &key, value_len);
+ /*
+ * Passing a NULL trans handle is fine here, we have a dummy root eb
+ * and the tree is a single node (level 0).
+ */
+ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi, 1);
btrfs_set_file_extent_type(leaf, fi, type);
@@ -63,7 +68,11 @@ static void insert_inode_item_key(struct btrfs_root *root)
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- btrfs_setup_item_for_insert(root, &path, &key, value_len);
+ /*
+ * Passing a NULL trans handle is fine here, we have a dummy root eb
+ * and the tree is a single node (level 0).
+ */
+ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
}
/*
@@ -72,8 +81,8 @@ static void insert_inode_item_key(struct btrfs_root *root)
* diagram of how the extents will look though this may not be possible we still
* want to make sure everything acts normally (the last number is not inclusive)
*
- * [0 - 5][5 - 6][ 6 - 4096 ][ 4096 - 4100][4100 - 8195][8195 - 12291]
- * [hole ][inline][hole but no extent][ hole ][ regular ][regular1 split]
+ * [0 - 6][ 6 - 4096 ][ 4096 - 4100][4100 - 8195][8195 - 12291]
+ * [inline][hole but no extent][ hole ][ regular ][regular1 split]
*
* [12291 - 16387][16387 - 24579][24579 - 28675][ 28675 - 32771][32771 - 36867 ]
* [ hole ][regular1 split][ prealloc ][ prealloc1 ][prealloc1 written]
@@ -90,19 +99,12 @@ static void setup_file_extents(struct btrfs_root *root, u32 sectorsize)
u64 disk_bytenr = SZ_1M;
u64 offset = 0;
- /* First we want a hole */
- insert_extent(root, offset, 5, 5, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
- slot);
- slot++;
- offset += 5;
-
/*
- * Now we want an inline extent, I don't think this is possible but hey
- * why not? Also keep in mind if we have an inline extent it counts as
- * the whole first page. If we were to expand it we would have to cow
- * and we wouldn't have an inline extent anymore.
+ * Tree-checker has strict limits on inline extents that they can only
+ * exist at file offset 0, thus we can only have one inline file extent
+ * at most.
*/
- insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0,
+ insert_extent(root, offset, 6, 6, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0,
slot);
slot++;
offset = sectorsize;
@@ -115,7 +117,7 @@ static void setup_file_extents(struct btrfs_root *root, u32 sectorsize)
/* Now for a regular extent */
insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0,
- disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
+ disk_bytenr, sectorsize - 1, BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
disk_bytenr += sectorsize;
offset += sectorsize - 1;
@@ -209,9 +211,9 @@ static void setup_file_extents(struct btrfs_root *root, u32 sectorsize)
sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
}
-static unsigned long prealloc_only = 0;
-static unsigned long compressed_only = 0;
-static unsigned long vacancy_only = 0;
+static u32 prealloc_only = 0;
+static u32 compressed_only = 0;
+static u32 vacancy_only = 0;
static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
{
@@ -256,18 +258,18 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
/* First with no extents */
BTRFS_I(inode)->root = root;
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, sectorsize);
if (IS_ERR(em)) {
em = NULL;
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != EXTENT_MAP_HOLE) {
- test_err("expected a hole, got %llu", em->block_start);
+ if (em->disk_bytenr != EXTENT_MAP_HOLE) {
+ test_err("expected a hole, got %llu", em->disk_bytenr);
goto out;
}
- free_extent_map(em);
- btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
+ btrfs_free_extent_map(em);
+ btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
/*
* All of the magic numbers are based on the mapping setup in
@@ -276,46 +278,34 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
*/
setup_file_extents(root, sectorsize);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, (u64)-1);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, (u64)-1);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != EXTENT_MAP_HOLE) {
- test_err("expected a hole, got %llu", em->block_start);
+ if (em->disk_bytenr != EXTENT_MAP_INLINE) {
+ test_err("expected an inline, got %llu", em->disk_bytenr);
goto out;
}
- if (em->start != 0 || em->len != 5) {
- test_err(
- "unexpected extent wanted start 0 len 5, got start %llu len %llu",
- em->start, em->len);
- goto out;
- }
- if (em->flags != 0) {
- test_err("unexpected flags set, want 0 have %lu", em->flags);
- goto out;
- }
- offset = em->start + em->len;
- free_extent_map(em);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
- if (IS_ERR(em)) {
- test_err("got an error when we shouldn't have");
- goto out;
- }
- if (em->block_start != EXTENT_MAP_INLINE) {
- test_err("expected an inline, got %llu", em->block_start);
- goto out;
- }
-
- if (em->start != offset || em->len != (sectorsize - 5)) {
+ /*
+ * For inline extent, we always round up the em to sectorsize, as
+ * they are either:
+ *
+ * a) a hidden hole
+ * The range will be zeroed at inline extent read time.
+ *
+ * b) a file extent with unaligned bytenr
+ * Tree checker will reject it.
+ */
+ if (em->start != 0 || em->len != sectorsize) {
test_err(
- "unexpected extent wanted start %llu len 1, got start %llu len %llu",
- offset, em->start, em->len);
+ "unexpected extent wanted start 0 len %u, got start %llu len %llu",
+ sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
- test_err("unexpected flags set, want 0 have %lu", em->flags);
+ test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
/*
@@ -324,15 +314,15 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
* this?
*/
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != EXTENT_MAP_HOLE) {
- test_err("expected a hole, got %llu", em->block_start);
+ if (em->disk_bytenr != EXTENT_MAP_HOLE) {
+ test_err("expected a hole, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != 4) {
@@ -342,20 +332,20 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != 0) {
- test_err("unexpected flags set, want 0 have %lu", em->flags);
+ test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Regular extent */
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize - 1) {
@@ -365,25 +355,24 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != 0) {
- test_err("unexpected flags set, want 0 have %lu", em->flags);
+ test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* The next 3 are split extents */
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -393,26 +382,25 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != 0) {
- test_err("unexpected flags set, want 0 have %lu", em->flags);
+ test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
- disk_bytenr = em->block_start;
+ disk_bytenr = btrfs_extent_map_block_start(em);
orig_start = em->start;
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != EXTENT_MAP_HOLE) {
- test_err("expected a hole, got %llu", em->block_start);
+ if (em->disk_bytenr != EXTENT_MAP_HOLE) {
+ test_err("expected a hole, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -422,19 +410,19 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != 0) {
- test_err("unexpected flags set, want 0 have %lu", em->flags);
+ test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != 2 * sectorsize) {
@@ -444,31 +432,31 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != 0) {
- test_err("unexpected flags set, want 0 have %lu", em->flags);
+ test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != orig_start) {
- test_err("wrong orig offset, want %llu, have %llu",
- orig_start, em->orig_start);
+ if (em->start - em->offset != orig_start) {
+ test_err("wrong offset, em->start=%llu em->offset=%llu orig_start=%llu",
+ em->start, em->offset, orig_start);
goto out;
}
disk_bytenr += (em->start - orig_start);
- if (em->block_start != disk_bytenr) {
+ if (btrfs_extent_map_block_start(em) != disk_bytenr) {
test_err("wrong block start, want %llu, have %llu",
- disk_bytenr, em->block_start);
+ disk_bytenr, btrfs_extent_map_block_start(em));
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Prealloc extent */
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -478,26 +466,25 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != prealloc_only) {
- test_err("unexpected flags set, want %lu have %lu",
+ test_err("unexpected flags set, want %u have %u",
prealloc_only, em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* The next 3 are a half written prealloc extent */
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -507,27 +494,26 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != prealloc_only) {
- test_err("unexpected flags set, want %lu have %lu",
+ test_err("unexpected flags set, want %u have %u",
prealloc_only, em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
- disk_bytenr = em->block_start;
+ disk_bytenr = btrfs_extent_map_block_start(em);
orig_start = em->start;
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_HOLE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_HOLE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -537,30 +523,29 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != 0) {
- test_err("unexpected flags set, want 0 have %lu", em->flags);
+ test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != orig_start) {
- test_err("unexpected orig offset, wanted %llu, have %llu",
- orig_start, em->orig_start);
+ if (em->start - em->offset != orig_start) {
+ test_err("unexpected offset, wanted %llu, have %llu",
+ em->start - orig_start, em->offset);
goto out;
}
- if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) {
+ if (btrfs_extent_map_block_start(em) != disk_bytenr + em->offset) {
test_err("unexpected block start, wanted %llu, have %llu",
- disk_bytenr + (em->start - em->orig_start),
- em->block_start);
+ disk_bytenr + em->offset, btrfs_extent_map_block_start(em));
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != 2 * sectorsize) {
@@ -570,32 +555,31 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != prealloc_only) {
- test_err("unexpected flags set, want %lu have %lu",
+ test_err("unexpected flags set, want %u have %u",
prealloc_only, em->flags);
goto out;
}
- if (em->orig_start != orig_start) {
- test_err("wrong orig offset, want %llu, have %llu", orig_start,
- em->orig_start);
+ if (em->start - em->offset != orig_start) {
+ test_err("wrong offset, em->start=%llu em->offset=%llu orig_start=%llu",
+ em->start, em->offset, orig_start);
goto out;
}
- if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) {
+ if (btrfs_extent_map_block_start(em) != disk_bytenr + em->offset) {
test_err("unexpected block start, wanted %llu, have %llu",
- disk_bytenr + (em->start - em->orig_start),
- em->block_start);
+ disk_bytenr + em->offset, btrfs_extent_map_block_start(em));
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Now for the compressed extent */
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != 2 * sectorsize) {
@@ -605,31 +589,30 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != compressed_only) {
- test_err("unexpected flags set, want %lu have %lu",
+ test_err("unexpected flags set, want %u have %u",
compressed_only, em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu",
- em->start, em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
- if (em->compress_type != BTRFS_COMPRESS_ZLIB) {
+ if (btrfs_extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
test_err("unexpected compress type, wanted %d, got %d",
- BTRFS_COMPRESS_ZLIB, em->compress_type);
+ BTRFS_COMPRESS_ZLIB, btrfs_extent_map_compression(em));
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* Split compressed extent */
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -639,32 +622,31 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != compressed_only) {
- test_err("unexpected flags set, want %lu have %lu",
+ test_err("unexpected flags set, want %u have %u",
compressed_only, em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu",
- em->start, em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
- if (em->compress_type != BTRFS_COMPRESS_ZLIB) {
+ if (btrfs_extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
test_err("unexpected compress type, wanted %d, got %d",
- BTRFS_COMPRESS_ZLIB, em->compress_type);
+ BTRFS_COMPRESS_ZLIB, btrfs_extent_map_compression(em));
goto out;
}
- disk_bytenr = em->block_start;
+ disk_bytenr = btrfs_extent_map_block_start(em);
orig_start = em->start;
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -674,25 +656,24 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != 0) {
- test_err("unexpected flags set, want 0 have %lu", em->flags);
+ test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != disk_bytenr) {
+ if (btrfs_extent_map_block_start(em) != disk_bytenr) {
test_err("block start does not match, want %llu got %llu",
- disk_bytenr, em->block_start);
+ disk_bytenr, btrfs_extent_map_block_start(em));
goto out;
}
if (em->start != offset || em->len != 2 * sectorsize) {
@@ -702,31 +683,31 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != compressed_only) {
- test_err("unexpected flags set, want %lu have %lu",
+ test_err("unexpected flags set, want %u have %u",
compressed_only, em->flags);
goto out;
}
- if (em->orig_start != orig_start) {
- test_err("wrong orig offset, want %llu, have %llu",
- em->start, orig_start);
+ if (em->start - em->offset != orig_start) {
+ test_err("wrong offset, em->start=%llu em->offset=%llu orig_start=%llu",
+ em->start, em->offset, orig_start);
goto out;
}
- if (em->compress_type != BTRFS_COMPRESS_ZLIB) {
+ if (btrfs_extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
test_err("unexpected compress type, wanted %d, got %d",
- BTRFS_COMPRESS_ZLIB, em->compress_type);
+ BTRFS_COMPRESS_ZLIB, btrfs_extent_map_compression(em));
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
/* A hole between regular extents but no hole extent */
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset + 6, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset + 6, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -736,24 +717,23 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != 0) {
- test_err("unexpected flags set, want 0 have %lu", em->flags);
+ test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, SZ_4M);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, SZ_4M);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != EXTENT_MAP_HOLE) {
- test_err("expected a hole extent, got %llu", em->block_start);
+ if (em->disk_bytenr != EXTENT_MAP_HOLE) {
+ test_err("expected a hole extent, got %llu", em->disk_bytenr);
goto out;
}
/*
@@ -768,25 +748,24 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != vacancy_only) {
- test_err("unexpected flags set, want %lu have %lu",
+ test_err("unexpected flags set, want %u have %u",
vacancy_only, em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
offset = em->start + em->len;
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -796,18 +775,17 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != 0) {
- test_err("unexpected flags set, want 0 have %lu", em->flags);
+ test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong orig offset, want 0, have %llu", em->offset);
goto out;
}
ret = 0;
out:
if (!IS_ERR(em))
- free_extent_map(em);
+ btrfs_free_extent_map(em);
iput(inode);
btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info);
@@ -860,13 +838,13 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
insert_inode_item_key(root);
insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize,
sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, 2 * sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 2 * sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != EXTENT_MAP_HOLE) {
- test_err("expected a hole, got %llu", em->block_start);
+ if (em->disk_bytenr != EXTENT_MAP_HOLE) {
+ test_err("expected a hole, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != 0 || em->len != sectorsize) {
@@ -876,19 +854,20 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != vacancy_only) {
- test_err("wrong flags, wanted %lu, have %lu", vacancy_only,
+ test_err("wrong flags, wanted %u, have %u", vacancy_only,
em->flags);
goto out;
}
- free_extent_map(em);
+ btrfs_free_extent_map(em);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, sectorsize, 2 * sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, sectorsize, 2 * sectorsize);
if (IS_ERR(em)) {
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != sectorsize) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (btrfs_extent_map_block_start(em) != sectorsize) {
+ test_err("expected a real extent, got %llu",
+ btrfs_extent_map_block_start(em));
goto out;
}
if (em->start != sectorsize || em->len != sectorsize) {
@@ -898,14 +877,14 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
goto out;
}
if (em->flags != 0) {
- test_err("unexpected flags set, wanted 0 got %lu",
+ test_err("unexpected flags set, wanted 0 got %u",
em->flags);
goto out;
}
ret = 0;
out:
if (!IS_ERR(em))
- free_extent_map(em);
+ btrfs_free_extent_map(em);
iput(inode);
btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info);
@@ -971,11 +950,10 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
}
/* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */
- ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
- BTRFS_MAX_EXTENT_SIZE >> 1,
- (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
- EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_UPTODATE, 0, 0, NULL);
+ ret = btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ BTRFS_MAX_EXTENT_SIZE >> 1,
+ (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW, NULL);
if (ret) {
test_err("clear_extent_bit returned %d", ret);
goto out;
@@ -1039,11 +1017,10 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
}
/* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */
- ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
- BTRFS_MAX_EXTENT_SIZE + sectorsize,
- BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
- EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_UPTODATE, 0, 0, NULL);
+ ret = btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ BTRFS_MAX_EXTENT_SIZE + sectorsize,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW, NULL);
if (ret) {
test_err("clear_extent_bit returned %d", ret);
goto out;
@@ -1074,9 +1051,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
}
/* Empty */
- ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
- EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_UPTODATE, 0, 0, NULL);
+ ret = btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW, NULL);
if (ret) {
test_err("clear_extent_bit returned %d", ret);
goto out;
@@ -1090,9 +1066,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
ret = 0;
out:
if (ret)
- clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
- EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_UPTODATE, 0, 0, NULL);
+ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW, NULL);
iput(inode);
btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info);
@@ -1105,8 +1080,8 @@ int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
test_msg("running inode tests");
- set_bit(EXTENT_FLAG_COMPRESSED, &compressed_only);
- set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
+ compressed_only |= EXTENT_FLAG_COMPRESS_ZLIB;
+ prealloc_only |= EXTENT_FLAG_PREALLOC;
ret = test_btrfs_get_extent(sectorsize, nodesize);
if (ret)
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 19ba7d5b7d8f..05cfda8af422 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -10,6 +10,8 @@
#include "../disk-io.h"
#include "../qgroup.h"
#include "../backref.h"
+#include "../fs.h"
+#include "../accessors.h"
static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
u64 num_bytes, u64 parent, u64 root_objectid)
@@ -18,7 +20,7 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
struct btrfs_extent_item *item;
struct btrfs_extent_inline_ref *iref;
struct btrfs_tree_block_info *block_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key ins;
u32 size = sizeof(*item) + sizeof(*iref) + sizeof(*block_info);
@@ -39,7 +41,6 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
ret = btrfs_insert_empty_item(&trans, root, path, &ins, size);
if (ret) {
test_err("couldn't insert ref %d", ret);
- btrfs_free_path(path);
return ret;
}
@@ -59,7 +60,6 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_TREE_BLOCK_REF_KEY);
btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
}
- btrfs_free_path(path);
return 0;
}
@@ -68,7 +68,7 @@ static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
{
struct btrfs_trans_handle trans;
struct btrfs_extent_item *item;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
u64 refs;
int ret;
@@ -88,7 +88,6 @@ static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
ret = btrfs_search_slot(&trans, root, &key, path, 0, 1);
if (ret) {
test_err("couldn't find extent ref");
- btrfs_free_path(path);
return ret;
}
@@ -110,7 +109,6 @@ static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
ret = btrfs_insert_empty_item(&trans, root, path, &key, 0);
if (ret)
test_err("failed to insert backref");
- btrfs_free_path(path);
return ret;
}
@@ -119,7 +117,7 @@ static int remove_extent_item(struct btrfs_root *root, u64 bytenr,
{
struct btrfs_trans_handle trans;
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
btrfs_init_dummy_trans(&trans, NULL);
@@ -137,11 +135,9 @@ static int remove_extent_item(struct btrfs_root *root, u64 bytenr,
ret = btrfs_search_slot(&trans, root, &key, path, -1, 1);
if (ret) {
test_err("didn't find our key %d", ret);
- btrfs_free_path(path);
return ret;
}
btrfs_del_item(&trans, root, path);
- btrfs_free_path(path);
return 0;
}
@@ -150,7 +146,7 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
{
struct btrfs_trans_handle trans;
struct btrfs_extent_item *item;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
u64 refs;
int ret;
@@ -170,7 +166,6 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
ret = btrfs_search_slot(&trans, root, &key, path, 0, 1);
if (ret) {
test_err("couldn't find extent ref");
- btrfs_free_path(path);
return ret;
}
@@ -196,13 +191,13 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
return ret;
}
btrfs_del_item(&trans, root, path);
- btrfs_free_path(path);
return ret;
}
static int test_no_shared_qgroup(struct btrfs_root *root,
u32 sectorsize, u32 nodesize)
{
+ struct btrfs_backref_walk_ctx ctx = { 0 };
struct btrfs_trans_handle trans;
struct btrfs_fs_info *fs_info = root->fs_info;
struct ulist *old_roots = NULL;
@@ -218,30 +213,38 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
return ret;
}
+ ctx.bytenr = nodesize;
+ ctx.trans = &trans;
+ ctx.fs_info = fs_info;
+
/*
* Since the test trans doesn't have the complicated delayed refs,
* we can only call btrfs_qgroup_account_extent() directly to test
* quota.
*/
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false);
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret) {
- ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
+ old_roots = ctx.roots;
+ ctx.roots = NULL;
ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
BTRFS_FS_TREE_OBJECTID);
- if (ret)
+ if (ret) {
+ ulist_free(old_roots);
return ret;
+ }
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret) {
ulist_free(old_roots);
- ulist_free(new_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
+ new_roots = ctx.roots;
+ ctx.roots = NULL;
ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
new_roots);
@@ -250,32 +253,38 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
return ret;
}
+ /* btrfs_qgroup_account_extent() always frees the ulists passed to it. */
+ old_roots = NULL;
+ new_roots = NULL;
+
if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
nodesize, nodesize)) {
test_err("qgroup counts didn't match expected values");
return -EINVAL;
}
- old_roots = NULL;
- new_roots = NULL;
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false);
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret) {
- ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
+ old_roots = ctx.roots;
+ ctx.roots = NULL;
ret = remove_extent_item(root, nodesize, nodesize);
- if (ret)
+ if (ret) {
+ ulist_free(old_roots);
return -EINVAL;
+ }
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret) {
ulist_free(old_roots);
- ulist_free(new_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
+ new_roots = ctx.roots;
+ ctx.roots = NULL;
ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
new_roots);
@@ -300,6 +309,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
static int test_multiple_refs(struct btrfs_root *root,
u32 sectorsize, u32 nodesize)
{
+ struct btrfs_backref_walk_ctx ctx = { 0 };
struct btrfs_trans_handle trans;
struct btrfs_fs_info *fs_info = root->fs_info;
struct ulist *old_roots = NULL;
@@ -320,25 +330,33 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
}
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false);
+ ctx.bytenr = nodesize;
+ ctx.trans = &trans;
+ ctx.fs_info = fs_info;
+
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret) {
- ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
+ old_roots = ctx.roots;
+ ctx.roots = NULL;
ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
BTRFS_FS_TREE_OBJECTID);
- if (ret)
+ if (ret) {
+ ulist_free(old_roots);
return ret;
+ }
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret) {
ulist_free(old_roots);
- ulist_free(new_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
+ new_roots = ctx.roots;
+ ctx.roots = NULL;
ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
new_roots);
@@ -353,25 +371,29 @@ static int test_multiple_refs(struct btrfs_root *root,
return -EINVAL;
}
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false);
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret) {
- ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
+ old_roots = ctx.roots;
+ ctx.roots = NULL;
ret = add_tree_ref(root, nodesize, nodesize, 0,
BTRFS_FIRST_FREE_OBJECTID);
- if (ret)
+ if (ret) {
+ ulist_free(old_roots);
return ret;
+ }
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret) {
ulist_free(old_roots);
- ulist_free(new_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
+ new_roots = ctx.roots;
+ ctx.roots = NULL;
ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
new_roots);
@@ -392,25 +414,29 @@ static int test_multiple_refs(struct btrfs_root *root,
return -EINVAL;
}
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false);
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret) {
- ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
+ old_roots = ctx.roots;
+ ctx.roots = NULL;
ret = remove_extent_ref(root, nodesize, nodesize, 0,
BTRFS_FIRST_FREE_OBJECTID);
- if (ret)
+ if (ret) {
+ ulist_free(old_roots);
return ret;
+ }
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
+ ret = btrfs_find_all_roots(&ctx, false);
if (ret) {
ulist_free(old_roots);
- ulist_free(new_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
+ new_roots = ctx.roots;
+ ctx.roots = NULL;
ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
new_roots);
@@ -455,7 +481,10 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
}
/* We are using this root as our extent root */
- root->fs_info->extent_root = root;
+ root->root_key.objectid = BTRFS_EXTENT_TREE_OBJECTID;
+ root->root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root->root_key.offset = 0;
+ btrfs_global_root_insert(root);
/*
* Some of the paths we test assume we have a filled out fs_info, so we
diff --git a/fs/btrfs/tests/raid-stripe-tree-tests.c b/fs/btrfs/tests/raid-stripe-tree-tests.c
new file mode 100644
index 000000000000..a7bc58a5c1e2
--- /dev/null
+++ b/fs/btrfs/tests/raid-stripe-tree-tests.c
@@ -0,0 +1,1161 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/sizes.h>
+#include "../fs.h"
+#include "../disk-io.h"
+#include "../transaction.h"
+#include "../volumes.h"
+#include "../raid-stripe-tree.h"
+#include "btrfs-tests.h"
+
+#define RST_TEST_NUM_DEVICES (2)
+#define RST_TEST_RAID1_TYPE (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_RAID1)
+
+#define SZ_48K (SZ_32K + SZ_16K)
+
+typedef int (*test_func_t)(struct btrfs_trans_handle *trans);
+
+static struct btrfs_device *btrfs_device_by_devid(struct btrfs_fs_devices *fs_devices,
+ u64 devid)
+{
+ struct btrfs_device *dev;
+
+ list_for_each_entry(dev, &fs_devices->devices, dev_list) {
+ if (dev->devid == devid)
+ return dev;
+ }
+
+ return NULL;
+}
+
+/*
+ * Test creating a range of three extents and then punch a hole in the middle,
+ * deleting all of the middle extents and partially deleting the "book ends".
+ */
+static int test_punch_hole_3extents(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical1 = SZ_1M;
+ u64 len1 = SZ_1M;
+ u64 logical2 = logical1 + len1;
+ u64 len2 = SZ_1M;
+ u64 logical3 = logical2 + len2;
+ u64 len3 = SZ_1M;
+ u64 hole_start = logical1 + SZ_256K;
+ u64 hole_len = SZ_2M;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+
+ /* Prepare for the test, 1st create 3 x 1M extents. */
+ bioc->map_type = map_type;
+ bioc->size = len1;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical1 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical2;
+ bioc->size = len2;
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical2 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical3;
+ bioc->size = len3;
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical3 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ /*
+ * Delete a range starting at logical1 + 256K and 2M in length. Extent
+ * 1 is truncated to 256k length, extent 2 is completely dropped and
+ * extent 3 is moved 256K to the right.
+ */
+ ret = btrfs_delete_raid_extent(trans, hole_start, hole_len);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ hole_start, hole_start + hole_len);
+ goto out;
+ }
+
+ /* Get the first extent and check its size. */
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len1, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical1) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical1, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len1 != SZ_256K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_256K, len1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get the second extent and check it's absent. */
+ ret = btrfs_get_raid_extent_offset(fs_info, logical2, &len2, map_type,
+ 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID extent [%llu, %llu] succeeded should fail",
+ logical2, logical2 + len2);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get the third extent and check its size. */
+ logical3 += SZ_256K;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical3, &len3, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical3, logical3 + len3);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical3) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical3 + SZ_256K, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len3 != SZ_1M - SZ_256K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_1M - SZ_256K, len3);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical1, len1);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1);
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical3, len3);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1);
+ goto out;
+ }
+
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+static int test_delete_two_extents(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical1 = SZ_1M;
+ u64 len1 = SZ_1M;
+ u64 logical2 = logical1 + len1;
+ u64 len2 = SZ_1M;
+ u64 logical3 = logical2 + len2;
+ u64 len3 = SZ_1M;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+
+ /* Prepare for the test, 1st create 3 x 1M extents. */
+ bioc->map_type = map_type;
+ bioc->size = len1;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical1 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical2;
+ bioc->size = len2;
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical2 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical3;
+ bioc->size = len3;
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical3 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ /*
+ * Delete a range starting at logical1 and 2M in length. Extents 1
+ * and 2 are dropped and extent 3 is kept as is.
+ */
+ ret = btrfs_delete_raid_extent(trans, logical1, len1 + len2);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1 + len2);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len1, map_type,
+ 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID extent [%llu, %llu] succeeded, should fail",
+ logical1, len1);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical2, &len2, map_type,
+ 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID extent [%llu, %llu] succeeded, should fail",
+ logical2, len2);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical3, &len3, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical3, len3);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical3) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical3, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len3 != SZ_1M) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_1M, len3);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical3, len3);
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/* Test punching a hole into a single RAID stripe-extent. */
+static int test_punch_hole(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical1 = SZ_1M;
+ u64 hole_start = logical1 + SZ_32K;
+ u64 hole_len = SZ_64K;
+ u64 logical2 = hole_start + hole_len;
+ u64 len = SZ_1M;
+ u64 len1 = SZ_32K;
+ u64 len2 = len - len1 - hole_len;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ bioc->map_type = map_type;
+ bioc->size = len;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical1 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len, map_type, 0,
+ &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical1,
+ logical1 + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical1) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical1, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_1M) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_1M, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, hole_start, hole_len);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ hole_start, hole_start + hole_len);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len1, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical1) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical1, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len1 != SZ_32K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_32K, len1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical2, &len2, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical2,
+ logical2 + len2);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical2) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical2, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len2 != len - len1 - hole_len) {
+ test_err("invalid length, expected %llu, got %llu",
+ len - len1 - hole_len, len2);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Check for the absence of the hole. */
+ ret = btrfs_get_raid_extent_offset(fs_info, hole_start, &hole_len,
+ map_type, 0, &io_stripe);
+ if (ret != -ENODATA) {
+ ret = -EINVAL;
+ test_err("lookup of RAID extent [%llu, %llu] succeeded, should fail",
+ hole_start, hole_start + SZ_64K);
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical1, len1);
+ if (ret)
+ goto out;
+
+ ret = btrfs_delete_raid_extent(trans, logical2, len2);
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/*
+ * Test a 1M RST write that spans two adjacent RST items on disk and then
+ * delete a portion starting in the first item and spanning into the second
+ * item. This is similar to test_front_delete(), but spanning multiple items.
+ */
+static int test_front_delete_prev_item(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical1 = SZ_1M;
+ u64 logical2 = SZ_2M;
+ u64 len = SZ_1M;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ bioc->map_type = map_type;
+ bioc->size = len;
+
+ /* Insert RAID extent 1. */
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical1 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical2;
+ /* Insert RAID extent 2, directly adjacent to it. */
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical2 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical1 + SZ_512K, SZ_1M);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical1 + SZ_512K, (u64)SZ_1M);
+ goto out;
+ }
+
+ /* Verify item 1 is truncated to 512K. */
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len, map_type, 0,
+ &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical1,
+ logical1 + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical1) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical1, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_512K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_512K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Verify item 2's start is moved by 512K. */
+ ret = btrfs_get_raid_extent_offset(fs_info, logical2 + SZ_512K, &len,
+ map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical2 + SZ_512K, logical2 + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical2 + SZ_512K) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical2 + SZ_512K, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_512K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_512K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Verify there's a hole at [1M+512K, 2M+512K] . */
+ len = SZ_1M;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1 + SZ_512K, &len,
+ map_type, 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID [%llu, %llu] succeeded, should fail",
+ logical1 + SZ_512K, logical1 + SZ_512K + len);
+ goto out;
+ }
+
+ /* Clean up after us. */
+ ret = btrfs_delete_raid_extent(trans, logical1, SZ_512K);
+ if (ret)
+ goto out;
+
+ ret = btrfs_delete_raid_extent(trans, logical2 + SZ_512K, SZ_512K);
+
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/*
+ * Test a 64K RST write on a 2 disk RAID1 at a logical address of 1M and then
+ * delete the 1st 32K, making the new start address 1M+32K.
+ */
+static int test_front_delete(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical = SZ_1M;
+ u64 len = SZ_64K;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ bioc->map_type = map_type;
+ bioc->size = len;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_64K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_64K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical, SZ_16K);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed", logical,
+ logical + SZ_16K);
+ goto out;
+ }
+
+ len -= SZ_16K;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical + SZ_16K, &len,
+ map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical + SZ_16K, logical + SZ_64K);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical + SZ_16K) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical + SZ_16K, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_48K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_48K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret != -ENODATA) {
+ ret = -EINVAL;
+ test_err("lookup of RAID extent [%llu, %llu] succeeded, should fail",
+ logical, logical + SZ_16K);
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical + SZ_16K, SZ_48K);
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/*
+ * Test a 64K RST write on a 2 disk RAID1 at a logical address of 1M and then
+ * truncate the stripe extent down to 32K.
+ */
+static int test_tail_delete(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical = SZ_1M;
+ u64 len = SZ_64K;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ bioc->map_type = map_type;
+ bioc->size = len;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ if (!io_stripe.dev) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_64K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_64K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical + SZ_48K, SZ_16K);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical + SZ_48K, logical + SZ_64K);
+ goto out;
+ }
+
+ len = SZ_48K;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_48K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_48K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ len = SZ_16K;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical + SZ_48K, &len,
+ map_type, 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID extent [%llu, %llu] succeeded should fail",
+ logical + SZ_48K, logical + SZ_64K);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical, len);
+ if (ret)
+ test_err("deleting RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/*
+ * Test a 64K RST write on a 2 disk RAID1 at a logical address of 1M and then
+ * overwrite the whole range giving it new physical address at an offset of 1G.
+ * The intent of this test is to exercise the 'update_raid_extent_item()'
+ * function called be btrfs_insert_one_raid_extent().
+ */
+static int test_create_update_delete(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical = SZ_1M;
+ u64 len = SZ_64K;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ bioc->map_type = map_type;
+ bioc->size = len;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ if (!io_stripe.dev) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_64K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_64K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = SZ_1G + logical + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("updating RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical + SZ_1G) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical + SZ_1G, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_64K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_64K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical, len);
+ if (ret)
+ test_err("deleting RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/*
+ * Test a simple 64K RST write on a 2 disk RAID1 at a logical address of 1M.
+ * The "physical" copy on device 0 is at 1M, on device 1 it is at 1G+1M.
+ */
+static int test_simple_create_delete(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical = SZ_1M;
+ u64 len = SZ_64K;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ bioc->map_type = map_type;
+ bioc->size = SZ_64K;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ if (!io_stripe.dev) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_64K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_64K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical, len);
+ if (ret)
+ test_err("deleting RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+static const test_func_t tests[] = {
+ test_simple_create_delete,
+ test_create_update_delete,
+ test_tail_delete,
+ test_front_delete,
+ test_front_delete_prev_item,
+ test_punch_hole,
+ test_punch_hole_3extents,
+ test_delete_two_extents,
+};
+
+static int run_test(test_func_t test, u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_trans_handle trans;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_root *root = NULL;
+ int ret;
+
+ fs_info = btrfs_alloc_dummy_fs_info(sectorsize, nodesize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = PTR_ERR(root);
+ goto out;
+ }
+ btrfs_set_super_incompat_flags(root->fs_info->super_copy,
+ BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE);
+ root->root_key.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
+ root->root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root->root_key.offset = 0;
+ fs_info->stripe_root = root;
+ root->fs_info->tree_root = root;
+
+ root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
+ if (IS_ERR(root->node)) {
+ test_std_err(TEST_ALLOC_EXTENT_BUFFER);
+ ret = PTR_ERR(root->node);
+ goto out;
+ }
+ btrfs_set_header_level(root->node, 0);
+ btrfs_set_header_nritems(root->node, 0);
+ root->alloc_bytenr += 2 * nodesize;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_device *dev;
+
+ dev = btrfs_alloc_dummy_device(fs_info);
+ if (IS_ERR(dev)) {
+ test_err("cannot allocate device");
+ ret = PTR_ERR(dev);
+ goto out;
+ }
+ dev->devid = i;
+ }
+
+ btrfs_init_dummy_trans(&trans, root->fs_info);
+ ret = test(&trans);
+ if (ret)
+ goto out;
+
+out:
+ btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
+
+ return ret;
+}
+
+int btrfs_test_raid_stripe_tree(u32 sectorsize, u32 nodesize)
+{
+ int ret = 0;
+
+ test_msg("running raid-stripe-tree tests");
+ for (int i = 0; i < ARRAY_SIZE(tests); i++) {
+ ret = run_test(tests[i], sectorsize, nodesize);
+ if (ret) {
+ test_err("test-case %ps failed with %d\n", tests[i], ret);
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 1c3a1189c0bd..05ee4391c83a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -6,10 +6,12 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/writeback.h>
#include <linux/pagemap.h>
#include <linux/blkdev.h>
#include <linux/uuid.h>
+#include <linux/timekeeping.h>
#include "misc.h"
#include "ctree.h"
#include "disk-io.h"
@@ -21,9 +23,19 @@
#include "qgroup.h"
#include "block-group.h"
#include "space-info.h"
-#include "zoned.h"
-
-#define BTRFS_ROOT_TRANS_TAG 0
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "root-tree.h"
+#include "dir-item.h"
+#include "uuid-tree.h"
+#include "ioctl.h"
+#include "relocation.h"
+#include "scrub.h"
+#include "ordered-data.h"
+#include "delayed-inode.h"
+
+static struct kmem_cache *btrfs_trans_handle_cachep;
/*
* Transaction states and transitions
@@ -42,12 +54,17 @@
* | Call btrfs_commit_transaction() on any trans handle attached to
* | transaction N
* V
- * Transaction N [[TRANS_STATE_COMMIT_START]]
+ * Transaction N [[TRANS_STATE_COMMIT_PREP]]
+ * |
+ * | If there are simultaneous calls to btrfs_commit_transaction() one will win
+ * | the race and the rest will wait for the winner to commit the transaction.
* |
- * | Will wait for previous running transaction to completely finish if there
- * | is one
+ * | The winner will wait for previous running transaction to completely finish
+ * | if there is one.
* |
- * | Then one of the following happes:
+ * Transaction N [[TRANS_STATE_COMMIT_START]]
+ * |
+ * | Then one of the following happens:
* | - Wait for all other trans handle holders to release.
* | The btrfs_commit_transaction() caller will do the commit work.
* | - Wait for current transaction to be committed by others.
@@ -88,7 +105,7 @@
* | attached to transid N+1. |
* | |
* | To next stage: |
- * | Until all tree blocks are super blocks are |
+ * | Until all tree blocks and super blocks are |
* | written to block devices |
* V |
* Transaction N [[TRANS_STATE_COMPLETED]] V
@@ -98,6 +115,7 @@
*/
static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
[TRANS_STATE_RUNNING] = 0U,
+ [TRANS_STATE_COMMIT_PREP] = 0U,
[TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
[TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
__TRANS_ATTACH |
@@ -122,13 +140,10 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
void btrfs_put_transaction(struct btrfs_transaction *transaction)
{
- WARN_ON(refcount_read(&transaction->use_count) == 0);
if (refcount_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
- WARN_ON(!RB_EMPTY_ROOT(
- &transaction->delayed_refs.href_root.rb_root));
- WARN_ON(!RB_EMPTY_ROOT(
- &transaction->delayed_refs.dirty_extent_root));
+ WARN_ON(!xa_empty(&transaction->delayed_refs.head_refs));
+ WARN_ON(!xa_empty(&transaction->delayed_refs.dirty_extents));
if (transaction->delayed_refs.pending_csums)
btrfs_err(transaction->fs_info,
"pending csums is %llu",
@@ -146,7 +161,13 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
cache = list_first_entry(&transaction->deleted_bgs,
struct btrfs_block_group,
bg_list);
+ /*
+ * Not strictly necessary to lock, as no other task will be using a
+ * block_group on the deleted_bgs list during a transaction abort.
+ */
+ spin_lock(&transaction->fs_info->unused_bgs_lock);
list_del_init(&cache->bg_list);
+ spin_unlock(&transaction->fs_info->unused_bgs_lock);
btrfs_unfreeze_block_group(cache);
btrfs_put_block_group(cache);
}
@@ -160,15 +181,25 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root, *tmp;
- struct btrfs_caching_control *caching_ctl, *next;
+
+ /*
+ * At this point no one can be using this transaction to modify any tree
+ * and no one can start another transaction to modify any tree either.
+ */
+ ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING,
+ "cur_trans->state=%d", cur_trans->state);
down_write(&fs_info->commit_root_sem);
+
+ if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
+ fs_info->last_reloc_trans = trans->transid;
+
list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits,
dirty_list) {
list_del_init(&root->dirty_list);
free_extent_buffer(root->commit_root);
root->commit_root = btrfs_root_node(root);
- extent_io_tree_release(&root->dirty_log_pages);
+ btrfs_extent_io_tree_release(&root->dirty_log_pages);
btrfs_qgroup_clean_swapped_blocks(root);
}
@@ -185,46 +216,6 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
}
spin_unlock(&cur_trans->dropped_roots_lock);
- /*
- * We have to update the last_byte_to_unpin under the commit_root_sem,
- * at the same time we swap out the commit roots.
- *
- * This is because we must have a real view of the last spot the caching
- * kthreads were while caching. Consider the following views of the
- * extent tree for a block group
- *
- * commit root
- * +----+----+----+----+----+----+----+
- * |\\\\| |\\\\|\\\\| |\\\\|\\\\|
- * +----+----+----+----+----+----+----+
- * 0 1 2 3 4 5 6 7
- *
- * new commit root
- * +----+----+----+----+----+----+----+
- * | | | |\\\\| | |\\\\|
- * +----+----+----+----+----+----+----+
- * 0 1 2 3 4 5 6 7
- *
- * If the cache_ctl->progress was at 3, then we are only allowed to
- * unpin [0,1) and [2,3], because the caching thread has already
- * processed those extents. We are not allowed to unpin [5,6), because
- * the caching thread will re-start it's search from 3, and thus find
- * the hole from [4,6) to add to the free space cache.
- */
- spin_lock(&fs_info->block_group_cache_lock);
- list_for_each_entry_safe(caching_ctl, next,
- &fs_info->caching_block_groups, list) {
- struct btrfs_block_group *cache = caching_ctl->block_group;
-
- if (btrfs_block_group_done(cache)) {
- cache->last_byte_to_unpin = (u64)-1;
- list_del_init(&caching_ctl->list);
- btrfs_put_caching_control(caching_ctl);
- } else {
- cache->last_byte_to_unpin = caching_ctl->progress;
- }
- }
- spin_unlock(&fs_info->block_group_cache_lock);
up_write(&fs_info->commit_root_sem);
}
@@ -291,8 +282,10 @@ loop:
cur_trans = fs_info->running_transaction;
if (cur_trans) {
if (TRANS_ABORTED(cur_trans)) {
+ const int abort_error = cur_trans->aborted;
+
spin_unlock(&fs_info->trans_lock);
- return cur_trans->aborted;
+ return abort_error;
}
if (btrfs_blocked_trans_types[cur_trans->state] & type) {
spin_unlock(&fs_info->trans_lock);
@@ -302,15 +295,18 @@ loop:
atomic_inc(&cur_trans->num_writers);
extwriter_counter_inc(cur_trans, type);
spin_unlock(&fs_info->trans_lock);
+ btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers);
+ btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters);
return 0;
}
spin_unlock(&fs_info->trans_lock);
/*
- * If we are ATTACH, we just want to catch the current transaction,
- * and commit it. If there is no transaction, just return ENOENT.
+ * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the
+ * current transaction, and commit it. If there is no transaction, just
+ * return ENOENT.
*/
- if (type == TRANS_ATTACH)
+ if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART)
return -ENOENT;
/*
@@ -323,16 +319,23 @@ loop:
if (!cur_trans)
return -ENOMEM;
+ btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers);
+ btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters);
+
spin_lock(&fs_info->trans_lock);
if (fs_info->running_transaction) {
/*
* someone started a transaction after we unlocked. Make sure
* to redo the checks above
*/
+ btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
+ btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
kfree(cur_trans);
goto loop;
} else if (BTRFS_FS_ERROR(fs_info)) {
spin_unlock(&fs_info->trans_lock);
+ btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
+ btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
kfree(cur_trans);
return -EROFS;
}
@@ -355,9 +358,8 @@ loop:
memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
- cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
- cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
- atomic_set(&cur_trans->delayed_refs.num_entries, 0);
+ xa_init(&cur_trans->delayed_refs.head_refs);
+ xa_init(&cur_trans->delayed_refs.dirty_extents);
/*
* although the tree mod log is per file system and not per transaction,
@@ -382,14 +384,12 @@ loop:
spin_lock_init(&cur_trans->dirty_bgs_lock);
INIT_LIST_HEAD(&cur_trans->deleted_bgs);
spin_lock_init(&cur_trans->dropped_roots_lock);
- INIT_LIST_HEAD(&cur_trans->releasing_ebs);
- spin_lock_init(&cur_trans->releasing_ebs_lock);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
- extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
- IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
- extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
- IO_TREE_FS_PINNED_EXTENTS, NULL);
- fs_info->generation++;
+ btrfs_extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
+ IO_TREE_TRANS_DIRTY_PAGES);
+ btrfs_extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
+ IO_TREE_FS_PINNED_EXTENTS);
+ btrfs_set_fs_generation(fs_info, fs_info->generation + 1);
cur_trans->transid = fs_info->generation;
fs_info->running_transaction = cur_trans;
cur_trans->aborted = 0;
@@ -406,14 +406,13 @@ loop:
*/
static int record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- int force)
+ bool force)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
- root->last_trans < trans->transid) || force) {
- WARN_ON(root == fs_info->extent_root);
+ btrfs_get_root_last_trans(root) < trans->transid) || force) {
WARN_ON(!force && root->commit_root != root->node);
/*
@@ -429,15 +428,15 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
smp_wmb();
spin_lock(&fs_info->fs_roots_radix_lock);
- if (root->last_trans == trans->transid && !force) {
+ if (btrfs_get_root_last_trans(root) == trans->transid && !force) {
spin_unlock(&fs_info->fs_roots_radix_lock);
return 0;
}
radix_tree_tag_set(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid,
+ (unsigned long)btrfs_root_id(root),
BTRFS_ROOT_TRANS_TAG);
spin_unlock(&fs_info->fs_roots_radix_lock);
- root->last_trans = trans->transid;
+ btrfs_set_root_last_trans(root, trans->transid);
/* this is pretty tricky. We don't want to
* take the relocation lock in btrfs_record_root_in_trans
@@ -480,7 +479,7 @@ void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
/* Make sure we don't try to update the root at commit time */
spin_lock(&fs_info->fs_roots_radix_lock);
radix_tree_tag_clear(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid,
+ (unsigned long)btrfs_root_id(root),
BTRFS_ROOT_TRANS_TAG);
spin_unlock(&fs_info->fs_roots_radix_lock);
}
@@ -499,7 +498,7 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
* and barriers
*/
smp_rmb();
- if (root->last_trans == trans->transid &&
+ if (btrfs_get_root_last_trans(root) == trans->transid &&
!test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
return 0;
@@ -531,6 +530,7 @@ static void wait_current_trans(struct btrfs_fs_info *fs_info)
refcount_inc(&cur_trans->use_count);
spin_unlock(&fs_info->trans_lock);
+ btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
wait_event(fs_info->transaction_wait,
cur_trans->state >= TRANS_STATE_UNBLOCKED ||
TRANS_ABORTED(cur_trans));
@@ -540,15 +540,15 @@ static void wait_current_trans(struct btrfs_fs_info *fs_info)
}
}
-static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
+static bool may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
{
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
- return 0;
+ return false;
if (type == TRANS_START)
- return 1;
+ return true;
- return 0;
+ return false;
}
static inline bool need_reserve_reloc_root(struct btrfs_root *root)
@@ -557,13 +557,42 @@ static inline bool need_reserve_reloc_root(struct btrfs_root *root)
if (!fs_info->reloc_ctl ||
!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
- root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
+ btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID ||
root->reloc_root)
return false;
return true;
}
+static int btrfs_reserve_trans_metadata(struct btrfs_fs_info *fs_info,
+ enum btrfs_reserve_flush_enum flush,
+ u64 num_bytes,
+ u64 *delayed_refs_bytes)
+{
+ struct btrfs_space_info *si = fs_info->trans_block_rsv.space_info;
+ u64 bytes = num_bytes + *delayed_refs_bytes;
+ int ret;
+
+ /*
+ * We want to reserve all the bytes we may need all at once, so we only
+ * do 1 enospc flushing cycle per transaction start.
+ */
+ ret = btrfs_reserve_metadata_bytes(si, bytes, flush);
+
+ /*
+ * If we are an emergency flush, which can steal from the global block
+ * reserve, then attempt to not reserve space for the delayed refs, as
+ * we will consume space for them from the global block reserve.
+ */
+ if (ret && flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
+ bytes -= *delayed_refs_bytes;
+ *delayed_refs_bytes = 0;
+ ret = btrfs_reserve_metadata_bytes(si, bytes, flush);
+ }
+
+ return ret;
+}
+
static struct btrfs_trans_handle *
start_transaction(struct btrfs_root *root, unsigned int num_items,
unsigned int type, enum btrfs_reserve_flush_enum flush,
@@ -571,10 +600,12 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
+ struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv;
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
u64 num_bytes = 0;
u64 qgroup_reserved = 0;
+ u64 delayed_refs_bytes = 0;
bool reloc_reserved = false;
bool do_chunk_alloc = false;
int ret;
@@ -597,28 +628,27 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
* the appropriate flushing if need be.
*/
if (num_items && root != fs_info->chunk_root) {
- struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
- u64 delayed_refs_bytes = 0;
-
qgroup_reserved = num_items * fs_info->nodesize;
- ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
- enforce_qgroups);
+ /*
+ * Use prealloc for now, as there might be a currently running
+ * transaction that could free this reserved space prematurely
+ * by committing.
+ */
+ ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserved,
+ enforce_qgroups, false);
if (ret)
return ERR_PTR(ret);
+ num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
/*
- * We want to reserve all the bytes we may need all at once, so
- * we only do 1 enospc flushing cycle per transaction start. We
- * accomplish this by simply assuming we'll do 2 x num_items
- * worth of delayed refs updates in this trans handle, and
- * refill that amount for whatever is missing in the reserve.
+ * If we plan to insert/update/delete "num_items" from a btree,
+ * we will also generate delayed refs for extent buffers in the
+ * respective btree paths, so reserve space for the delayed refs
+ * that will be generated by the caller as it modifies btrees.
+ * Try to reserve them to avoid excessive use of the global
+ * block reserve.
*/
- num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
- if (flush == BTRFS_RESERVE_FLUSH_ALL &&
- delayed_refs_rsv->full == 0) {
- delayed_refs_bytes = num_bytes;
- num_bytes <<= 1;
- }
+ delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info, num_items);
/*
* Do the reservation for the relocation root creation
@@ -628,19 +658,17 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
reloc_reserved = true;
}
- ret = btrfs_block_rsv_add(root, rsv, num_bytes, flush);
+ ret = btrfs_reserve_trans_metadata(fs_info, flush, num_bytes,
+ &delayed_refs_bytes);
if (ret)
goto reserve_fail;
- if (delayed_refs_bytes) {
- btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
- delayed_refs_bytes);
- num_bytes -= delayed_refs_bytes;
- }
- if (rsv->space_info->force_alloc)
+ btrfs_block_rsv_add_bytes(trans_rsv, num_bytes, true);
+
+ if (trans_rsv->space_info->force_alloc)
do_chunk_alloc = true;
} else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
- !delayed_refs_rsv->full) {
+ !btrfs_block_rsv_full(delayed_refs_rsv)) {
/*
* Some people call with btrfs_start_transaction(root, 0)
* because they can be throttled, but have some other mechanism
@@ -692,12 +720,12 @@ again:
h->transid = cur_trans->transid;
h->transaction = cur_trans;
- h->root = root;
refcount_set(&h->use_count, 1);
h->fs_info = root->fs_info;
h->type = type;
INIT_LIST_HEAD(&h->new_bgs);
+ btrfs_init_metadata_block_rsv(fs_info, &h->delayed_rsv, BTRFS_BLOCK_RSV_DELOPS);
smp_mb();
if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
@@ -710,8 +738,17 @@ again:
if (num_bytes) {
trace_btrfs_space_reservation(fs_info, "transaction",
h->transid, num_bytes, 1);
- h->block_rsv = &fs_info->trans_block_rsv;
+ h->block_rsv = trans_rsv;
h->bytes_reserved = num_bytes;
+ if (delayed_refs_bytes > 0) {
+ trace_btrfs_space_reservation(fs_info,
+ "local_delayed_refs_rsv",
+ h->transid,
+ delayed_refs_bytes, 1);
+ h->delayed_refs_bytes_reserved = delayed_refs_bytes;
+ btrfs_block_rsv_add_bytes(&h->delayed_rsv, delayed_refs_bytes, true);
+ delayed_refs_bytes = 0;
+ }
h->reloc_reserved = reloc_reserved;
}
@@ -726,9 +763,10 @@ got_it:
* value here.
*/
if (do_chunk_alloc && num_bytes) {
- u64 flags = h->block_rsv->space_info->flags;
+ struct btrfs_space_info *space_info = h->block_rsv->space_info;
+ u64 flags = space_info->flags;
- btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags),
+ btrfs_chunk_alloc(h, space_info, btrfs_get_alloc_profile(fs_info, flags),
CHUNK_ALLOC_NO_FORCE);
}
@@ -748,8 +786,15 @@ got_it:
* not just freed.
*/
btrfs_end_transaction(h);
- return ERR_PTR(ret);
+ goto reserve_fail;
}
+ /*
+ * Now that we have found a transaction to be a part of, convert the
+ * qgroup reservation from prealloc to pertrans. A different transaction
+ * can't race in and free our pertrans out from under us.
+ */
+ if (qgroup_reserved)
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
return h;
@@ -759,10 +804,11 @@ join_fail:
kmem_cache_free(btrfs_trans_handle_cachep, h);
alloc_fail:
if (num_bytes)
- btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
- num_bytes, NULL);
+ btrfs_block_rsv_release(fs_info, trans_rsv, num_bytes, NULL);
+ if (delayed_refs_bytes)
+ btrfs_space_info_free_bytes_may_use(trans_rsv->space_info, delayed_refs_bytes);
reserve_fail:
- btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
return ERR_PTR(ret);
}
@@ -795,7 +841,10 @@ struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *
/*
* Similar to regular join but it never starts a transaction when none is
- * running or after waiting for the current one to finish.
+ * running or when there's a running one at a state >= TRANS_STATE_UNBLOCKED.
+ * This is similar to btrfs_attach_transaction() but it allows the join to
+ * happen if the transaction commit already started but it's not yet in the
+ * "doing" phase (the state is < TRANS_STATE_COMMIT_DOING).
*/
struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
{
@@ -804,7 +853,7 @@ struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *roo
}
/*
- * btrfs_attach_transaction() - catch the running transaction
+ * Catch the running transaction.
*
* It is used when we want to commit the current the transaction, but
* don't want to start a new one.
@@ -823,7 +872,7 @@ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
}
/*
- * btrfs_attach_transaction_barrier() - catch the running transaction
+ * Catch the running transaction.
*
* It is similar to the above function, the difference is this one
* will wait for all the inactive transactions until they fully
@@ -836,8 +885,13 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
trans = start_transaction(root, 0, TRANS_ATTACH,
BTRFS_RESERVE_NO_FLUSH, true);
- if (trans == ERR_PTR(-ENOENT))
- btrfs_wait_for_commit(root->fs_info, 0);
+ if (trans == ERR_PTR(-ENOENT)) {
+ int ret;
+
+ ret = btrfs_wait_for_commit(root->fs_info, 0);
+ if (ret)
+ return ERR_PTR(ret);
+ }
return trans;
}
@@ -846,7 +900,46 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
static noinline void wait_for_commit(struct btrfs_transaction *commit,
const enum btrfs_trans_state min_state)
{
- wait_event(commit->commit_wait, commit->state >= min_state);
+ struct btrfs_fs_info *fs_info = commit->fs_info;
+ u64 transid = commit->transid;
+ bool put = false;
+
+ /*
+ * At the moment this function is called with min_state either being
+ * TRANS_STATE_COMPLETED or TRANS_STATE_SUPER_COMMITTED.
+ */
+ if (min_state == TRANS_STATE_COMPLETED)
+ btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
+ else
+ btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
+
+ while (1) {
+ wait_event(commit->commit_wait, commit->state >= min_state);
+ if (put)
+ btrfs_put_transaction(commit);
+
+ if (min_state < TRANS_STATE_COMPLETED)
+ break;
+
+ /*
+ * A transaction isn't really completed until all of the
+ * previous transactions are completed, but with fsync we can
+ * end up with SUPER_COMMITTED transactions before a COMPLETED
+ * transaction. Wait for those.
+ */
+
+ spin_lock(&fs_info->trans_lock);
+ commit = list_first_entry_or_null(&fs_info->trans_list,
+ struct btrfs_transaction,
+ list);
+ if (!commit || commit->transid > transid) {
+ spin_unlock(&fs_info->trans_lock);
+ break;
+ }
+ refcount_inc(&commit->use_count);
+ put = true;
+ spin_unlock(&fs_info->trans_lock);
+ }
}
int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
@@ -855,7 +948,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
int ret = 0;
if (transid) {
- if (transid <= fs_info->last_trans_committed)
+ if (transid <= btrfs_get_last_trans_committed(fs_info))
goto out;
/* find specified transaction */
@@ -879,7 +972,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
* raced with btrfs_commit_transaction
*/
if (!cur_trans) {
- if (transid > fs_info->last_trans_committed)
+ if (transid > btrfs_get_last_trans_committed(fs_info))
ret = -EINVAL;
goto out;
}
@@ -902,6 +995,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
}
wait_for_commit(cur_trans, TRANS_STATE_COMPLETED);
+ ret = cur_trans->aborted;
btrfs_put_transaction(cur_trans);
out:
return ret;
@@ -912,16 +1006,6 @@ void btrfs_throttle(struct btrfs_fs_info *fs_info)
wait_current_trans(fs_info);
}
-static bool should_end_transaction(struct btrfs_trans_handle *trans)
-{
- struct btrfs_fs_info *fs_info = trans->fs_info;
-
- if (btrfs_check_space_for_delayed_refs(fs_info))
- return true;
-
- return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
-}
-
bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_transaction *cur_trans = trans->transaction;
@@ -930,7 +1014,10 @@ bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags))
return true;
- return should_end_transaction(trans);
+ if (btrfs_check_space_for_delayed_refs(trans->fs_info))
+ return true;
+
+ return !!btrfs_block_rsv_check(&trans->fs_info->global_block_rsv, 50);
}
static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
@@ -939,12 +1026,20 @@ static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
struct btrfs_fs_info *fs_info = trans->fs_info;
if (!trans->block_rsv) {
- ASSERT(!trans->bytes_reserved);
+ ASSERT(trans->bytes_reserved == 0,
+ "trans->bytes_reserved=%llu", trans->bytes_reserved);
+ ASSERT(trans->delayed_refs_bytes_reserved == 0,
+ "trans->delayed_refs_bytes_reserved=%llu",
+ trans->delayed_refs_bytes_reserved);
return;
}
- if (!trans->bytes_reserved)
+ if (!trans->bytes_reserved) {
+ ASSERT(trans->delayed_refs_bytes_reserved == 0,
+ "trans->delayed_refs_bytes_reserved=%llu",
+ trans->delayed_refs_bytes_reserved);
return;
+ }
ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
trace_btrfs_space_reservation(fs_info, "transaction",
@@ -952,6 +1047,16 @@ static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
btrfs_block_rsv_release(fs_info, trans->block_rsv,
trans->bytes_reserved, NULL);
trans->bytes_reserved = 0;
+
+ if (!trans->delayed_refs_bytes_reserved)
+ return;
+
+ trace_btrfs_space_reservation(fs_info, "local_delayed_refs_rsv",
+ trans->transid,
+ trans->delayed_refs_bytes_reserved, 0);
+ btrfs_block_rsv_release(fs_info, &trans->delayed_rsv,
+ trans->delayed_refs_bytes_reserved, NULL);
+ trans->delayed_refs_bytes_reserved = 0;
}
static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
@@ -959,7 +1064,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
- int err = 0;
+ int ret = 0;
if (refcount_read(&trans->use_count) > 1) {
refcount_dec(&trans->use_count);
@@ -983,6 +1088,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
extwriter_counter_dec(cur_trans, trans->type);
cond_wake_up(&cur_trans->writer_wait);
+
+ btrfs_lockdep_release(info, btrfs_trans_num_extwriters);
+ btrfs_lockdep_release(info, btrfs_trans_num_writers);
+
btrfs_put_transaction(cur_trans);
if (current->journal_info == trans)
@@ -994,13 +1103,13 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
if (TRANS_ABORTED(trans) || BTRFS_FS_ERROR(info)) {
wake_up_process(info->transaction_kthread);
if (TRANS_ABORTED(trans))
- err = trans->aborted;
+ ret = trans->aborted;
else
- err = -EROFS;
+ ret = -EROFS;
}
kmem_cache_free(btrfs_trans_handle_cachep, trans);
- return err;
+ return ret;
}
int btrfs_end_transaction(struct btrfs_trans_handle *trans)
@@ -1021,21 +1130,19 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark)
{
- int err = 0;
- int werr = 0;
+ int ret = 0;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
- atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
- while (!find_first_extent_bit(dirty_pages, start, &start, &end,
- mark, &cached_state)) {
+ while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end,
+ mark, &cached_state)) {
bool wait_writeback = false;
- err = convert_extent_bit(dirty_pages, start, end,
- EXTENT_NEED_WAIT,
- mark, &cached_state);
+ ret = btrfs_convert_extent_bit(dirty_pages, start, end,
+ EXTENT_NEED_WAIT,
+ mark, &cached_state);
/*
* convert_extent_bit can return -ENOMEM, which is most of the
* time a temporary error. So when it happens, ignore the error
@@ -1049,23 +1156,22 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
* We cleanup any entries left in the io tree when committing
* the transaction (through extent_io_tree_release()).
*/
- if (err == -ENOMEM) {
- err = 0;
+ if (ret == -ENOMEM) {
+ ret = 0;
wait_writeback = true;
}
- if (!err)
- err = filemap_fdatawrite_range(mapping, start, end);
- if (err)
- werr = err;
- else if (wait_writeback)
- werr = filemap_fdatawait_range(mapping, start, end);
- free_extent_state(cached_state);
+ if (!ret)
+ ret = filemap_fdatawrite_range(mapping, start, end);
+ if (!ret && wait_writeback)
+ btrfs_btree_wait_writeback_range(fs_info, start, end);
+ btrfs_free_extent_state(cached_state);
+ if (ret)
+ break;
cached_state = NULL;
cond_resched();
start = end + 1;
}
- atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
- return werr;
+ return ret;
}
/*
@@ -1077,15 +1183,13 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages)
{
- int err = 0;
- int werr = 0;
- struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
+ int ret = 0;
- while (!find_first_extent_bit(dirty_pages, start, &start, &end,
- EXTENT_NEED_WAIT, &cached_state)) {
+ while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end,
+ EXTENT_NEED_WAIT, &cached_state)) {
/*
* Ignore -ENOMEM errors returned by clear_extent_bit().
* When committing the transaction, we'll remove any entries
@@ -1094,37 +1198,35 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
* concurrently - we do it only at transaction commit time when
* it's safe to do it (through extent_io_tree_release()).
*/
- err = clear_extent_bit(dirty_pages, start, end,
- EXTENT_NEED_WAIT, 0, 0, &cached_state);
- if (err == -ENOMEM)
- err = 0;
- if (!err)
- err = filemap_fdatawait_range(mapping, start, end);
- if (err)
- werr = err;
- free_extent_state(cached_state);
+ ret = btrfs_clear_extent_bit(dirty_pages, start, end,
+ EXTENT_NEED_WAIT, &cached_state);
+ if (ret == -ENOMEM)
+ ret = 0;
+ if (!ret)
+ btrfs_btree_wait_writeback_range(fs_info, start, end);
+ btrfs_free_extent_state(cached_state);
+ if (ret)
+ break;
cached_state = NULL;
cond_resched();
start = end + 1;
}
- if (err)
- werr = err;
- return werr;
+ return ret;
}
static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages)
{
bool errors = false;
- int err;
+ int ret;
- err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
+ ret = __btrfs_wait_marked_extents(fs_info, dirty_pages);
if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
errors = true;
- if (errors && !err)
- err = -EIO;
- return err;
+ if (errors && !ret)
+ ret = -EIO;
+ return ret;
}
int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
@@ -1132,22 +1234,23 @@ int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
struct btrfs_fs_info *fs_info = log_root->fs_info;
struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
bool errors = false;
- int err;
+ int ret;
- ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+ ASSERT(btrfs_root_id(log_root) == BTRFS_TREE_LOG_OBJECTID,
+ "root_id(log_root)=%llu", btrfs_root_id(log_root));
- err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
- if ((mark & EXTENT_DIRTY) &&
+ ret = __btrfs_wait_marked_extents(fs_info, dirty_pages);
+ if ((mark & EXTENT_DIRTY_LOG1) &&
test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
errors = true;
- if ((mark & EXTENT_NEW) &&
+ if ((mark & EXTENT_DIRTY_LOG2) &&
test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
errors = true;
- if (errors && !err)
- err = -EIO;
- return err;
+ if (errors && !ret)
+ ret = -EIO;
+ return ret;
}
/*
@@ -1170,7 +1273,7 @@ static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
blk_finish_plug(&plug);
ret2 = btrfs_wait_extents(fs_info, dirty_pages);
- extent_io_tree_release(&trans->transaction->dirty_pages);
+ btrfs_extent_io_tree_release(&trans->transaction->dirty_pages);
if (ret)
return ret;
@@ -1232,10 +1335,16 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
struct list_head *io_bgs = &trans->transaction->io_bgs;
- struct list_head *next;
struct extent_buffer *eb;
int ret;
+ /*
+ * At this point no one can be using this transaction to modify any tree
+ * and no one can start another transaction to modify any tree either.
+ */
+ ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING,
+ "trans->transaction->state=%d", trans->transaction->state);
+
eb = btrfs_lock_root_node(fs_info->tree_root);
ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
0, &eb, BTRFS_NESTING_COW);
@@ -1262,21 +1371,20 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
again:
while (!list_empty(&fs_info->dirty_cowonly_roots)) {
struct btrfs_root *root;
- next = fs_info->dirty_cowonly_roots.next;
- list_del_init(next);
- root = list_entry(next, struct btrfs_root, dirty_list);
+
+ root = list_first_entry(&fs_info->dirty_cowonly_roots,
+ struct btrfs_root, dirty_list);
clear_bit(BTRFS_ROOT_DIRTY, &root->state);
+ list_move_tail(&root->dirty_list,
+ &trans->transaction->switch_commits);
- if (root != fs_info->extent_root)
- list_add_tail(&root->dirty_list,
- &trans->transaction->switch_commits);
ret = update_cowonly_root(trans, root);
if (ret)
return ret;
}
/* Now flush any delayed refs generated by updating all of the roots */
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (ret)
return ret;
@@ -1291,7 +1399,7 @@ again:
* so we want to keep this flushing in this loop to make sure
* everything gets run.
*/
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (ret)
return ret;
}
@@ -1299,9 +1407,6 @@ again:
if (!list_empty(&fs_info->dirty_cowonly_roots))
goto again;
- list_add_tail(&fs_info->extent_root->dirty_list,
- &trans->transaction->switch_commits);
-
/* Update dev-replace pointer once everything is committed */
fs_info->dev_replace.committed_cursor_left =
fs_info->dev_replace.cursor_left_last_write_of_item;
@@ -1310,6 +1415,32 @@ again:
}
/*
+ * If we had a pending drop we need to see if there are any others left in our
+ * dead roots list, and if not clear our bit and wake any waiters.
+ */
+void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
+{
+ /*
+ * We put the drop in progress roots at the front of the list, so if the
+ * first entry doesn't have UNFINISHED_DROP set we can wake everybody
+ * up.
+ */
+ spin_lock(&fs_info->trans_lock);
+ if (!list_empty(&fs_info->dead_roots)) {
+ struct btrfs_root *root = list_first_entry(&fs_info->dead_roots,
+ struct btrfs_root,
+ root_list);
+ if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) {
+ spin_unlock(&fs_info->trans_lock);
+ return;
+ }
+ }
+ spin_unlock(&fs_info->trans_lock);
+
+ btrfs_wake_unfinished_drop(fs_info);
+}
+
+/*
* dead roots are old snapshots that need to be deleted. This allocates
* a dirty root struct and adds it into the list of dead roots that need to
* be deleted
@@ -1321,13 +1452,19 @@ void btrfs_add_dead_root(struct btrfs_root *root)
spin_lock(&fs_info->trans_lock);
if (list_empty(&root->root_list)) {
btrfs_grab_root(root);
- list_add_tail(&root->root_list, &fs_info->dead_roots);
+
+ /* We want to process the partially complete drops first. */
+ if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state))
+ list_add(&root->root_list, &fs_info->dead_roots);
+ else
+ list_add_tail(&root->root_list, &fs_info->dead_roots);
}
spin_unlock(&fs_info->trans_lock);
}
/*
- * update all the cowonly tree roots on disk
+ * Update each subvolume root and its relocation root, if it exists, in the tree
+ * of tree roots. Also free log roots if they exist.
*/
static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
{
@@ -1336,6 +1473,13 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
int i;
int ret;
+ /*
+ * At this point no one can be using this transaction to modify any tree
+ * and no one can start another transaction to modify any tree either.
+ */
+ ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING,
+ "trans->transaction->state=%d", trans->transaction->state);
+
spin_lock(&fs_info->fs_roots_radix_lock);
while (1) {
ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
@@ -1348,9 +1492,24 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
struct btrfs_root *root = gang[i];
int ret2;
+ /*
+ * At this point we can neither have tasks logging inodes
+ * from a root nor trying to commit a log tree.
+ */
+ ASSERT(atomic_read(&root->log_writers) == 0,
+ "atomic_read(&root->log_writers)=%d",
+ atomic_read(&root->log_writers));
+ ASSERT(atomic_read(&root->log_commit[0]) == 0,
+ "atomic_read(&root->log_commit[0])=%d",
+ atomic_read(&root->log_commit[0]));
+ ASSERT(atomic_read(&root->log_commit[1]) == 0,
+ "atomic_read(&root->log_commit[1])=%d",
+ atomic_read(&root->log_commit[1]));
+
radix_tree_tag_clear(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid,
+ (unsigned long)btrfs_root_id(root),
BTRFS_ROOT_TRANS_TAG);
+ btrfs_qgroup_free_meta_all_pertrans(root);
spin_unlock(&fs_info->fs_roots_radix_lock);
btrfs_free_log(trans, root);
@@ -1375,7 +1534,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
if (ret2)
return ret2;
spin_lock(&fs_info->fs_roots_radix_lock);
- btrfs_qgroup_free_meta_all_pertrans(root);
}
}
spin_unlock(&fs_info->fs_roots_radix_lock);
@@ -1383,45 +1541,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
}
/*
- * defrag a given btree.
- * Every leaf in the btree is read and defragged.
- */
-int btrfs_defrag_root(struct btrfs_root *root)
-{
- struct btrfs_fs_info *info = root->fs_info;
- struct btrfs_trans_handle *trans;
- int ret;
-
- if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
- return 0;
-
- while (1) {
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- break;
- }
-
- ret = btrfs_defrag_leaves(trans, root);
-
- btrfs_end_transaction(trans);
- btrfs_btree_balance_dirty(info);
- cond_resched();
-
- if (btrfs_fs_closing(info) || ret != -EAGAIN)
- break;
-
- if (btrfs_defrag_cancelled(info)) {
- btrfs_debug(info, "defrag_root cancelled");
- ret = -EAGAIN;
- break;
- }
- }
- clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
- return ret;
-}
-
-/*
* Do all special snapshot related qgroup dirty hack.
*
* Will do all needed qgroup inherit and dirty hack like switch commit
@@ -1438,11 +1557,10 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
int ret;
/*
- * Save some performance in the case that qgroups are not
- * enabled. If this check races with the ioctl, rescan will
- * kick in anyway.
+ * Save some performance in the case that qgroups are not enabled. If
+ * this check races with the ioctl, rescan will kick in anyway.
*/
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
/*
@@ -1466,18 +1584,12 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
* for now flush the delayed refs to narrow the race window where the
* qgroup counters could end up wrong.
*/
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
- if (ret) {
+ ret = btrfs_run_delayed_refs(trans, U64_MAX);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
return ret;
}
- /*
- * We are going to commit transaction, see btrfs_commit_transaction()
- * comment for reason locking tree_log_mutex
- */
- mutex_lock(&fs_info->tree_log_mutex);
-
ret = commit_fs_roots(trans);
if (ret)
goto out;
@@ -1486,8 +1598,8 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
goto out;
/* Now qgroup are all updated, we can inherit it to new qgroups */
- ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
- inherit);
+ ret = btrfs_qgroup_inherit(trans, btrfs_root_id(src), dst_objectid,
+ btrfs_root_id(parent), inherit);
if (ret < 0)
goto out;
@@ -1513,8 +1625,6 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
"Error while writing out transaction for qgroup");
out:
- mutex_unlock(&fs_info->tree_log_mutex);
-
/*
* Force parent root to be updated, as we recorded it before so its
* last_trans == cur_transid.
@@ -1546,10 +1656,9 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_root *root = pending->root;
struct btrfs_root *parent_root;
struct btrfs_block_rsv *rsv;
- struct inode *parent_inode;
- struct btrfs_path *path;
+ struct btrfs_inode *parent_inode = pending->dir;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dir_item *dir_item;
- struct dentry *dentry;
struct extent_buffer *tmp;
struct extent_buffer *old;
struct timespec64 cur_time;
@@ -1558,6 +1667,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
u64 index = 0;
u64 objectid;
u64 root_flags;
+ unsigned int nofs_flags;
+ struct fscrypt_name fname;
ASSERT(pending->path);
path = pending->path;
@@ -1565,9 +1676,22 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ASSERT(pending->root_item);
new_root_item = pending->root_item;
+ /*
+ * We're inside a transaction and must make sure that any potential
+ * allocations with GFP_KERNEL in fscrypt won't recurse back to
+ * filesystem.
+ */
+ nofs_flags = memalloc_nofs_save();
+ pending->error = fscrypt_setup_filename(&parent_inode->vfs_inode,
+ &pending->dentry->d_name, 0,
+ &fname);
+ memalloc_nofs_restore(nofs_flags);
+ if (pending->error)
+ goto free_pending;
+
pending->error = btrfs_get_free_objectid(tree_root, &objectid);
if (pending->error)
- goto no_free_objectid;
+ goto free_fname;
/*
* Make qgroup to skip current new snapshot's qgroupid, as it is
@@ -1578,7 +1702,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_reloc_pre_snapshot(pending, &to_reserve);
if (to_reserve > 0) {
- pending->error = btrfs_block_rsv_add(root,
+ pending->error = btrfs_block_rsv_add(fs_info,
&pending->block_rsv,
to_reserve,
BTRFS_RESERVE_NO_FLUSH);
@@ -1586,35 +1710,31 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
goto clear_skip_qgroup;
}
- key.objectid = objectid;
- key.offset = (u64)-1;
- key.type = BTRFS_ROOT_ITEM_KEY;
-
rsv = trans->block_rsv;
trans->block_rsv = &pending->block_rsv;
trans->bytes_reserved = trans->block_rsv->reserved;
trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid,
trans->bytes_reserved, 1);
- dentry = pending->dentry;
- parent_inode = pending->dir;
- parent_root = BTRFS_I(parent_inode)->root;
+ parent_root = parent_inode->root;
ret = record_root_in_trans(trans, parent_root, 0);
if (ret)
goto fail;
- cur_time = current_time(parent_inode);
+ cur_time = current_time(&parent_inode->vfs_inode);
/*
* insert the directory item
*/
- ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
- BUG_ON(ret); /* -ENOMEM */
+ ret = btrfs_set_inode_index(parent_inode, &index);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto fail;
+ }
/* check if there is a file/dir which has the same name. */
dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
- btrfs_ino(BTRFS_I(parent_inode)),
- dentry->d_name.name,
- dentry->d_name.len, 0);
+ btrfs_ino(parent_inode),
+ &fname.disk_name, 0);
if (dir_item != NULL && !IS_ERR(dir_item)) {
pending->error = -EEXIST;
goto dir_item_existed;
@@ -1625,6 +1745,14 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
+ ret = btrfs_create_qgroup(trans, objectid);
+ if (ret && ret != -EEXIST) {
+ if (unlikely(ret != -ENOTCONN || btrfs_qgroup_enabled(fs_info))) {
+ btrfs_abort_transaction(trans, ret);
+ goto fail;
+ }
+ }
+
/*
* pull in the delayed directory update
* and the delayed inode item
@@ -1632,13 +1760,13 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* snapshot
*/
ret = btrfs_run_delayed_items(trans);
- if (ret) { /* Transaction aborted */
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
ret = record_root_in_trans(trans, root, 0);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1673,7 +1801,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
old = btrfs_lock_root_node(root);
ret = btrfs_cow_block(trans, root, old, NULL, 0, &old,
BTRFS_NESTING_COW);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_tree_unlock(old);
free_extent_buffer(old);
btrfs_abort_transaction(trans, ret);
@@ -1684,21 +1812,23 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
/* clean up in any case */
btrfs_tree_unlock(old);
free_extent_buffer(old);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
/* see comments in should_cow_block() */
set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
- smp_wmb();
+ smp_mb__after_atomic();
btrfs_set_root_node(new_root_item, tmp);
/* record when the snapshot was created in key.offset */
+ key.objectid = objectid;
+ key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = trans->transid;
ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
btrfs_tree_unlock(tmp);
free_extent_buffer(tmp);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1707,16 +1837,16 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* insert root back/forward references
*/
ret = btrfs_add_root_ref(trans, objectid,
- parent_root->root_key.objectid,
- btrfs_ino(BTRFS_I(parent_inode)), index,
- dentry->d_name.name, dentry->d_name.len);
- if (ret) {
+ btrfs_root_id(parent_root),
+ btrfs_ino(parent_inode), index,
+ &fname.disk_name);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
key.offset = (u64)-1;
- pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
+ pending->snap = btrfs_get_new_fs_root(fs_info, objectid, &pending->anon_dev);
if (IS_ERR(pending->snap)) {
ret = PTR_ERR(pending->snap);
pending->snap = NULL;
@@ -1725,7 +1855,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
ret = btrfs_reloc_post_snapshot(trans, pending);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1736,34 +1866,36 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* To co-operate with that hack, we do hack again.
* Or snapshot will be greatly slowed down by a subtree qgroup rescan
*/
- ret = qgroup_account_snapshot(trans, root, parent_root,
- pending->inherit, objectid);
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL)
+ ret = qgroup_account_snapshot(trans, root, parent_root,
+ pending->inherit, objectid);
+ else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+ ret = btrfs_qgroup_inherit(trans, btrfs_root_id(root), objectid,
+ btrfs_root_id(parent_root), pending->inherit);
if (ret < 0)
goto fail;
- ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
- dentry->d_name.len, BTRFS_I(parent_inode),
- &key, BTRFS_FT_DIR, index);
- /* We have check then name at the beginning, so it is impossible. */
- BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
- if (ret) {
+ ret = btrfs_insert_dir_item(trans, &fname.disk_name,
+ parent_inode, &key, BTRFS_FT_DIR,
+ index);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
- btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
- dentry->d_name.len * 2);
- parent_inode->i_mtime = parent_inode->i_ctime =
- current_time(parent_inode);
- ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
- if (ret) {
+ btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
+ fname.disk_name.len * 2);
+ inode_set_mtime_to_ts(&parent_inode->vfs_inode,
+ inode_set_ctime_current(&parent_inode->vfs_inode));
+ ret = btrfs_update_inode_fallback(trans, parent_inode);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
ret = btrfs_uuid_tree_add(trans, new_root_item->uuid,
BTRFS_UUID_KEY_SUBVOL,
objectid);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1771,7 +1903,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
objectid);
- if (ret && ret != -EEXIST) {
+ if (unlikely(ret && ret != -EEXIST)) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1784,10 +1916,11 @@ dir_item_existed:
trans->bytes_reserved = 0;
clear_skip_qgroup:
btrfs_clear_skip_qgroup(trans);
-no_free_objectid:
+free_fname:
+ fscrypt_free_filename(&fname);
+free_pending:
kfree(new_root_item);
pending->root_item = NULL;
- btrfs_free_path(path);
pending->path = NULL;
return ret;
@@ -1835,19 +1968,6 @@ static void update_super_roots(struct btrfs_fs_info *fs_info)
super->uuid_tree_generation = root_item->generation;
}
-int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
-{
- struct btrfs_transaction *trans;
- int ret = 0;
-
- spin_lock(&info->trans_lock);
- trans = info->running_transaction;
- if (trans)
- ret = (trans->state >= TRANS_STATE_COMMIT_START);
- spin_unlock(&info->trans_lock);
- return ret;
-}
-
int btrfs_transaction_blocked(struct btrfs_fs_info *info)
{
struct btrfs_transaction *trans;
@@ -1861,50 +1981,14 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info)
return ret;
}
-/*
- * commit transactions asynchronously. once btrfs_commit_transaction_async
- * returns, any subsequent transaction will not be allowed to join.
- */
-struct btrfs_async_commit {
- struct btrfs_trans_handle *newtrans;
- struct work_struct work;
-};
-
-static void do_async_commit(struct work_struct *work)
-{
- struct btrfs_async_commit *ac =
- container_of(work, struct btrfs_async_commit, work);
-
- /*
- * We've got freeze protection passed with the transaction.
- * Tell lockdep about it.
- */
- if (ac->newtrans->type & __TRANS_FREEZABLE)
- __sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
-
- current->journal_info = ac->newtrans;
-
- btrfs_commit_transaction(ac->newtrans);
- kfree(ac);
-}
-
-int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
+void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_async_commit *ac;
struct btrfs_transaction *cur_trans;
- ac = kmalloc(sizeof(*ac), GFP_NOFS);
- if (!ac)
- return -ENOMEM;
-
- INIT_WORK(&ac->work, do_async_commit);
- ac->newtrans = btrfs_join_transaction(trans->root);
- if (IS_ERR(ac->newtrans)) {
- int err = PTR_ERR(ac->newtrans);
- kfree(ac);
- return err;
- }
+ /* Kick the transaction kthread. */
+ set_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags);
+ wake_up_process(fs_info->transaction_kthread);
/* take transaction reference */
cur_trans = trans->transaction;
@@ -1913,27 +1997,34 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
btrfs_end_transaction(trans);
/*
- * Tell lockdep we've released the freeze rwsem, since the
- * async commit thread will be the one to unlock it.
- */
- if (ac->newtrans->type & __TRANS_FREEZABLE)
- __sb_writers_release(fs_info->sb, SB_FREEZE_FS);
-
- schedule_work(&ac->work);
- /*
* Wait for the current transaction commit to start and block
* subsequent transaction joins
*/
+ btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
wait_event(fs_info->transaction_blocked_wait,
cur_trans->state >= TRANS_STATE_COMMIT_START ||
TRANS_ABORTED(cur_trans));
- if (current->journal_info == trans)
- current->journal_info = NULL;
-
btrfs_put_transaction(cur_trans);
- return 0;
}
+/*
+ * If there is a running transaction commit it or if it's already committing,
+ * wait for its commit to complete. Does not start and commit a new transaction
+ * if there isn't any running.
+ */
+int btrfs_commit_current_transaction(struct btrfs_root *root)
+{
+ struct btrfs_trans_handle *trans;
+
+ trans = btrfs_attach_transaction_barrier(root);
+ if (IS_ERR(trans)) {
+ int ret = PTR_ERR(trans);
+
+ return (ret == -ENOENT) ? 0 : ret;
+ }
+
+ return btrfs_commit_transaction(trans);
+}
static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
{
@@ -1956,6 +2047,12 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
if (cur_trans == fs_info->running_transaction) {
cur_trans->state = TRANS_STATE_COMMIT_DOING;
spin_unlock(&fs_info->trans_lock);
+
+ /*
+ * The thread has already released the lockdep map as reader
+ * already in btrfs_commit_transaction().
+ */
+ btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers);
wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1);
@@ -1974,7 +2071,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
spin_unlock(&fs_info->trans_lock);
- btrfs_cleanup_one_transaction(trans->transaction, fs_info);
+ btrfs_cleanup_one_transaction(trans->transaction);
spin_lock(&fs_info->trans_lock);
if (cur_trans == fs_info->running_transaction)
@@ -1986,11 +2083,24 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
btrfs_put_transaction(cur_trans);
btrfs_put_transaction(cur_trans);
- trace_btrfs_transaction_commit(trans->root);
+ trace_btrfs_transaction_commit(fs_info);
if (current->journal_info == trans)
current->journal_info = NULL;
- btrfs_scrub_cancel(fs_info);
+
+ /*
+ * If relocation is running, we can't cancel scrub because that will
+ * result in a deadlock. Before relocating a block group, relocation
+ * pauses scrub, then starts and commits a transaction before unpausing
+ * scrub. If the transaction commit is being done by the relocation
+ * task or triggered by another task and the relocation task is waiting
+ * for the commit, and we end up here due to an error in the commit
+ * path, then calling btrfs_scrub_cancel() will deadlock, as we are
+ * asking for scrub to stop while having it asked to be paused higher
+ * above in relocation code.
+ */
+ if (!test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
+ btrfs_scrub_cancel(fs_info);
kmem_cache_free(btrfs_trans_handle_cachep, trans);
}
@@ -2005,31 +2115,83 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
struct btrfs_block_group *block_group, *tmp;
list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
- btrfs_delayed_refs_rsv_release(fs_info, 1);
+ btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
+ /*
+ * Not strictly necessary to lock, as no other task will be using a
+ * block_group on the new_bgs list during a transaction abort.
+ */
+ spin_lock(&fs_info->unused_bgs_lock);
list_del_init(&block_group->bg_list);
+ btrfs_put_block_group(block_group);
+ spin_unlock(&fs_info->unused_bgs_lock);
}
}
static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
{
/*
- * We use writeback_inodes_sb here because if we used
+ * We use try_to_writeback_inodes_sb() here because if we used
* btrfs_start_delalloc_roots we would deadlock with fs freeze.
* Currently are holding the fs freeze lock, if we do an async flush
* we'll do btrfs_join_transaction() and deadlock because we need to
* wait for the fs freeze lock. Using the direct flushing we benefit
* from already being in a transaction and our join_transaction doesn't
* have to re-take the fs freeze lock.
+ *
+ * Note that try_to_writeback_inodes_sb() will only trigger writeback
+ * if it can read lock sb->s_umount. It will always be able to lock it,
+ * except when the filesystem is being unmounted or being frozen, but in
+ * those cases sync_filesystem() is called, which results in calling
+ * writeback_inodes_sb() while holding a write lock on sb->s_umount.
+ * Note that we don't call writeback_inodes_sb() directly, because it
+ * will emit a warning if sb->s_umount is not locked.
*/
if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
- writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
+ try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
return 0;
}
static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
{
if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
- btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
+}
+
+/*
+ * Add a pending snapshot associated with the given transaction handle to the
+ * respective handle. This must be called after the transaction commit started
+ * and while holding fs_info->trans_lock.
+ * This serves to guarantee a caller of btrfs_commit_transaction() that it can
+ * safely free the pending snapshot pointer in case btrfs_commit_transaction()
+ * returns an error.
+ */
+static void add_pending_snapshot(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_transaction *cur_trans = trans->transaction;
+
+ if (!trans->pending_snapshot)
+ return;
+
+ lockdep_assert_held(&trans->fs_info->trans_lock);
+ ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP,
+ "cur_trans->state=%d", cur_trans->state);
+
+ list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
+}
+
+static void update_commit_stats(struct btrfs_fs_info *fs_info)
+{
+ ktime_t now = ktime_get_ns();
+ ktime_t interval = now - fs_info->commit_stats.critical_section_start_time;
+
+ ASSERT(fs_info->commit_stats.critical_section_start_time);
+
+ fs_info->commit_stats.commit_count++;
+ fs_info->commit_stats.last_commit_dur = interval;
+ fs_info->commit_stats.max_commit_dur =
+ max_t(u64, fs_info->commit_stats.max_commit_dur, interval);
+ fs_info->commit_stats.total_commit_dur += interval;
+ fs_info->commit_stats.critical_section_start_time = 0;
}
int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
@@ -2039,13 +2201,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
struct btrfs_transaction *prev_trans = NULL;
int ret;
- ASSERT(refcount_read(&trans->use_count) == 1);
+ ASSERT(refcount_read(&trans->use_count) == 1,
+ "refcount_read(&trans->use_count)=%d", refcount_read(&trans->use_count));
+ btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
+
+ clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
/* Stop the commit early if ->aborted is set */
if (TRANS_ABORTED(cur_trans)) {
ret = cur_trans->aborted;
- btrfs_end_transaction(trans);
- return ret;
+ goto lockdep_trans_commit_start_release;
}
btrfs_trans_release_metadata(trans);
@@ -2062,10 +2227,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
* Any running threads may add more while we are here.
*/
ret = btrfs_run_delayed_refs(trans, 0);
- if (ret) {
- btrfs_end_transaction(trans);
- return ret;
- }
+ if (ret)
+ goto lockdep_trans_commit_start_release;
}
btrfs_create_pending_block_groups(trans);
@@ -2094,22 +2257,25 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (run_it) {
ret = btrfs_start_dirty_block_groups(trans);
- if (ret) {
- btrfs_end_transaction(trans);
- return ret;
- }
+ if (ret)
+ goto lockdep_trans_commit_start_release;
}
}
spin_lock(&fs_info->trans_lock);
- if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
+ if (cur_trans->state >= TRANS_STATE_COMMIT_PREP) {
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
+ add_pending_snapshot(trans);
+
spin_unlock(&fs_info->trans_lock);
refcount_inc(&cur_trans->use_count);
if (trans->in_fsync)
want_state = TRANS_STATE_SUPER_COMMITTED;
+
+ btrfs_trans_state_lockdep_release(fs_info,
+ BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
ret = btrfs_end_transaction(trans);
wait_for_commit(cur_trans, want_state);
@@ -2121,17 +2287,17 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
return ret;
}
- cur_trans->state = TRANS_STATE_COMMIT_START;
+ cur_trans->state = TRANS_STATE_COMMIT_PREP;
wake_up(&fs_info->transaction_blocked_wait);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
- if (cur_trans->list.prev != &fs_info->trans_list) {
+ if (!list_is_first(&cur_trans->list, &fs_info->trans_list)) {
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
if (trans->in_fsync)
want_state = TRANS_STATE_SUPER_COMMITTED;
- prev_trans = list_entry(cur_trans->list.prev,
- struct btrfs_transaction, list);
+ prev_trans = list_prev_entry(cur_trans, list);
if (prev_trans->state < want_state) {
refcount_inc(&prev_trans->use_count);
spin_unlock(&fs_info->trans_lock);
@@ -2142,12 +2308,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
btrfs_put_transaction(prev_trans);
if (ret)
- goto cleanup_transaction;
- } else {
- spin_unlock(&fs_info->trans_lock);
+ goto lockdep_release;
+ spin_lock(&fs_info->trans_lock);
}
} else {
- spin_unlock(&fs_info->trans_lock);
/*
* The previous transaction was aborted and was already removed
* from the list of transactions at fs_info->trans_list. So we
@@ -2155,28 +2319,47 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
* corrupt state (pointing to trees with unwritten nodes/leafs).
*/
if (BTRFS_FS_ERROR(fs_info)) {
+ spin_unlock(&fs_info->trans_lock);
ret = -EROFS;
- goto cleanup_transaction;
+ goto lockdep_release;
}
}
+ cur_trans->state = TRANS_STATE_COMMIT_START;
+ wake_up(&fs_info->transaction_blocked_wait);
+ spin_unlock(&fs_info->trans_lock);
+
+ /*
+ * Get the time spent on the work done by the commit thread and not
+ * the time spent waiting on a previous commit
+ */
+ fs_info->commit_stats.critical_section_start_time = ktime_get_ns();
extwriter_counter_dec(cur_trans, trans->type);
ret = btrfs_start_delalloc_flush(fs_info);
if (ret)
- goto cleanup_transaction;
+ goto lockdep_release;
ret = btrfs_run_delayed_items(trans);
if (ret)
- goto cleanup_transaction;
+ goto lockdep_release;
+ /*
+ * The thread has started/joined the transaction thus it holds the
+ * lockdep map as a reader. It has to release it before acquiring the
+ * lockdep map as a writer.
+ */
+ btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
+ btrfs_might_wait_for_event(fs_info, btrfs_trans_num_extwriters);
wait_event(cur_trans->writer_wait,
extwriter_counter_read(cur_trans) == 0);
/* some pending stuffs might be added after the previous flush. */
ret = btrfs_run_delayed_items(trans);
- if (ret)
+ if (ret) {
+ btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
goto cleanup_transaction;
+ }
btrfs_wait_delalloc_flush(fs_info);
@@ -2185,6 +2368,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
* transaction. Otherwise if this transaction commits before the ordered
* extents complete we lose logged data after a power failure.
*/
+ btrfs_might_wait_for_event(fs_info, btrfs_trans_pending_ordered);
wait_event(cur_trans->pending_wait,
atomic_read(&cur_trans->pending_ordered) == 0);
@@ -2195,13 +2379,40 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
* COMMIT_DOING so make sure to wait for num_writers to == 1 again.
*/
spin_lock(&fs_info->trans_lock);
+ add_pending_snapshot(trans);
cur_trans->state = TRANS_STATE_COMMIT_DOING;
spin_unlock(&fs_info->trans_lock);
+
+ /*
+ * The thread has started/joined the transaction thus it holds the
+ * lockdep map as a reader. It has to release it before acquiring the
+ * lockdep map as a writer.
+ */
+ btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
+ btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers);
wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1);
+ /*
+ * Make lockdep happy by acquiring the state locks after
+ * btrfs_trans_num_writers is released. If we acquired the state locks
+ * before releasing the btrfs_trans_num_writers lock then lockdep would
+ * complain because we did not follow the reverse order unlocking rule.
+ */
+ btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
+ btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
+ btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
+
+ /*
+ * We've started the commit, clear the flag in case we were triggered to
+ * do an async commit but somebody else started before the transaction
+ * kthread could do the work.
+ */
+ clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags);
+
if (TRANS_ABORTED(cur_trans)) {
ret = cur_trans->aborted;
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
goto scrub_continue;
}
/*
@@ -2227,14 +2438,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
* them.
*
* We needn't worry that this operation will corrupt the snapshots,
- * because all the tree which are snapshoted will be forced to COW
+ * because all the tree which are snapshotted will be forced to COW
* the nodes and leaves.
*/
ret = btrfs_run_delayed_items(trans);
if (ret)
goto unlock_reloc;
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (ret)
goto unlock_reloc;
@@ -2246,30 +2457,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
WARN_ON(cur_trans != trans->transaction);
- /* btrfs_commit_tree_roots is responsible for getting the
- * various roots consistent with each other. Every pointer
- * in the tree of tree roots has to point to the most up to date
- * root for every subvolume and other tree. So, we have to keep
- * the tree logging code from jumping in and changing any
- * of the trees.
- *
- * At this point in the commit, there can't be any tree-log
- * writers, but a little lower down we drop the trans mutex
- * and let new people in. By holding the tree_log_mutex
- * from now until after the super is written, we avoid races
- * with the tree-log code.
- */
- mutex_lock(&fs_info->tree_log_mutex);
-
ret = commit_fs_roots(trans);
if (ret)
- goto unlock_tree_log;
-
- /*
- * Since the transaction is done, we can apply the pending changes
- * before the next transaction.
- */
- btrfs_apply_pending_changes(fs_info);
+ goto unlock_reloc;
/* commit_fs_roots gets rid of all the tree log roots, it is now
* safe to free the root of tree log roots
@@ -2282,11 +2472,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
*/
ret = btrfs_qgroup_account_extents(trans);
if (ret < 0)
- goto unlock_tree_log;
+ goto unlock_reloc;
ret = commit_cowonly_roots(trans);
if (ret)
- goto unlock_tree_log;
+ goto unlock_reloc;
/*
* The tasks which save the space cache and inode cache may also
@@ -2294,7 +2484,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
*/
if (TRANS_ABORTED(cur_trans)) {
ret = cur_trans->aborted;
- goto unlock_tree_log;
+ goto unlock_reloc;
}
cur_trans = fs_info->running_transaction;
@@ -2309,6 +2499,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
list_add_tail(&fs_info->chunk_root->dirty_list,
&cur_trans->switch_commits);
+ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+ btrfs_set_root_node(&fs_info->block_group_root->root_item,
+ fs_info->block_group_root->node);
+ list_add_tail(&fs_info->block_group_root->dirty_list,
+ &cur_trans->switch_commits);
+ }
+
switch_commit_roots(trans);
ASSERT(list_empty(&cur_trans->dirty_bgs));
@@ -2327,6 +2524,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
btrfs_trans_release_chunk_metadata(trans);
+ /*
+ * Before changing the transaction state to TRANS_STATE_UNBLOCKED and
+ * setting fs_info->running_transaction to NULL, lock tree_log_mutex to
+ * make sure that before we commit our superblock, no other task can
+ * start a new transaction and commit a log tree before we commit our
+ * superblock. Anyone trying to commit a log tree locks this mutex before
+ * writing its superblock.
+ */
+ mutex_lock(&fs_info->tree_log_mutex);
+
spin_lock(&fs_info->trans_lock);
cur_trans->state = TRANS_STATE_UNBLOCKED;
fs_info->running_transaction = NULL;
@@ -2334,26 +2541,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
mutex_unlock(&fs_info->reloc_mutex);
wake_up(&fs_info->transaction_wait);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
+
+ /* If we have features changed, wake up the cleaner to update sysfs. */
+ if (test_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags) &&
+ fs_info->cleaner_kthread)
+ wake_up_process(fs_info->cleaner_kthread);
ret = btrfs_write_and_wait_transaction(trans);
if (ret) {
btrfs_handle_fs_error(fs_info, ret,
"Error while writing out transaction");
- /*
- * reloc_mutex has been unlocked, tree_log_mutex is still held
- * but we can't jump to unlock_tree_log causing double unlock
- */
mutex_unlock(&fs_info->tree_log_mutex);
goto scrub_continue;
}
- /*
- * At this point, we should have written all the tree blocks allocated
- * in this transaction. So it's now safe to free the redirtyied extent
- * buffers.
- */
- btrfs_free_redirty_list(cur_trans);
-
ret = write_all_supers(fs_info, 0);
/*
* the super is written, we can safely allow the tree-loggers
@@ -2363,25 +2565,30 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (ret)
goto scrub_continue;
+ update_commit_stats(fs_info);
/*
* We needn't acquire the lock here because there is no other task
* which can change it.
*/
cur_trans->state = TRANS_STATE_SUPER_COMMITTED;
wake_up(&cur_trans->commit_wait);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
- btrfs_finish_extent_commit(trans);
+ ret = btrfs_finish_extent_commit(trans);
+ if (ret)
+ goto scrub_continue;
if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
btrfs_clear_space_info_full(fs_info);
- fs_info->last_trans_committed = cur_trans->transid;
+ btrfs_set_last_trans_committed(fs_info, cur_trans->transid);
/*
* We needn't acquire the lock here because there is no other task
* which can change it.
*/
cur_trans->state = TRANS_STATE_COMPLETED;
wake_up(&cur_trans->commit_wait);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
spin_lock(&fs_info->trans_lock);
list_del_init(&cur_trans->list);
@@ -2393,7 +2600,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (trans->type & __TRANS_FREEZABLE)
sb_end_intwrite(fs_info->sb);
- trace_btrfs_transaction_commit(trans->root);
+ trace_btrfs_transaction_commit(fs_info);
btrfs_scrub_continue(fs_info);
@@ -2404,11 +2611,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
return ret;
-unlock_tree_log:
- mutex_unlock(&fs_info->tree_log_mutex);
unlock_reloc:
mutex_unlock(&fs_info->reloc_mutex);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
scrub_continue:
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
btrfs_scrub_continue(fs_info);
cleanup_transaction:
btrfs_trans_release_metadata(trans);
@@ -2421,6 +2629,16 @@ cleanup_transaction:
cleanup_transaction(trans, ret);
return ret;
+
+lockdep_release:
+ btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
+ btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
+ goto cleanup_transaction;
+
+lockdep_trans_commit_start_release:
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
+ btrfs_end_transaction(trans);
+ return ret;
}
/*
@@ -2433,10 +2651,10 @@ cleanup_transaction:
* because btrfs_commit_super will poke cleaner thread and it will process it a
* few seconds later.
*/
-int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
+int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info)
{
+ struct btrfs_root *root;
int ret;
- struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&fs_info->trans_lock);
if (list_empty(&fs_info->dead_roots)) {
@@ -2448,35 +2666,58 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
list_del_init(&root->root_list);
spin_unlock(&fs_info->trans_lock);
- btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
+ btrfs_debug(fs_info, "cleaner removing %llu", btrfs_root_id(root));
btrfs_kill_all_delayed_nodes(root);
if (btrfs_header_backref_rev(root->node) <
BTRFS_MIXED_BACKREF_REV)
- ret = btrfs_drop_snapshot(root, 0, 0);
+ ret = btrfs_drop_snapshot(root, false, false);
else
- ret = btrfs_drop_snapshot(root, 1, 0);
+ ret = btrfs_drop_snapshot(root, true, false);
btrfs_put_root(root);
return (ret < 0) ? 0 : 1;
}
-void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
+/*
+ * We only mark the transaction aborted and then set the file system read-only.
+ * This will prevent new transactions from starting or trying to join this
+ * one.
+ *
+ * This means that error recovery at the call site is limited to freeing
+ * any local memory allocations and passing the error code up without
+ * further cleanup. The transaction should complete as it normally would
+ * in the call path but will return -EIO.
+ *
+ * We'll complete the cleanup in btrfs_end_transaction and
+ * btrfs_commit_transaction.
+ */
+void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
+ const char *function,
+ unsigned int line, int error, bool first_hit)
{
- unsigned long prev;
- unsigned long bit;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
- prev = xchg(&fs_info->pending_changes, 0);
- if (!prev)
- return;
+ WRITE_ONCE(trans->aborted, error);
+ WRITE_ONCE(trans->transaction->aborted, error);
+ if (first_hit && error == -ENOSPC)
+ btrfs_dump_space_info_for_trans_abort(fs_info);
+ /* Wake up anybody who may be waiting on this transaction */
+ wake_up(&fs_info->transaction_wait);
+ wake_up(&fs_info->transaction_blocked_wait);
+ __btrfs_handle_fs_error(fs_info, function, line, error, NULL);
+}
- bit = 1 << BTRFS_PENDING_COMMIT;
- if (prev & bit)
- btrfs_debug(fs_info, "pending commit done");
- prev &= ~bit;
+int __init btrfs_transaction_init(void)
+{
+ btrfs_trans_handle_cachep = KMEM_CACHE(btrfs_trans_handle, SLAB_TEMPORARY);
+ if (!btrfs_trans_handle_cachep)
+ return -ENOMEM;
+ return 0;
+}
- if (prev)
- btrfs_warn(fs_info,
- "unknown pending changes left 0x%lx, ignoring", prev);
+void __cold btrfs_transaction_exit(void)
+{
+ kmem_cache_destroy(btrfs_trans_handle_cachep);
}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index ba45065f9451..18ef069197e5 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -6,13 +6,35 @@
#ifndef BTRFS_TRANSACTION_H
#define BTRFS_TRANSACTION_H
+#include <linux/atomic.h>
#include <linux/refcount.h>
+#include <linux/list.h>
+#include <linux/time64.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
#include "btrfs_inode.h"
#include "delayed-ref.h"
-#include "ctree.h"
+
+struct dentry;
+struct inode;
+struct btrfs_pending_snapshot;
+struct btrfs_fs_info;
+struct btrfs_root_item;
+struct btrfs_root;
+struct btrfs_path;
+
+/*
+ * Signal that a direct IO write is in progress, to avoid deadlock for sync
+ * direct IO writes when fsync is called during the direct IO write path.
+ */
+#define BTRFS_TRANS_DIO_WRITE_STUB ((void *) 1)
+
+/* Radix-tree tag for roots that are part of the transaction. */
+#define BTRFS_ROOT_TRANS_TAG 0
enum btrfs_trans_state {
TRANS_STATE_RUNNING,
+ TRANS_STATE_COMMIT_PREP,
TRANS_STATE_COMMIT_START,
TRANS_STATE_COMMIT_DOING,
TRANS_STATE_UNBLOCKED,
@@ -93,19 +115,17 @@ struct btrfs_transaction {
*/
atomic_t pending_ordered;
wait_queue_head_t pending_wait;
-
- spinlock_t releasing_ebs_lock;
- struct list_head releasing_ebs;
};
-#define __TRANS_FREEZABLE (1U << 0)
-
-#define __TRANS_START (1U << 9)
-#define __TRANS_ATTACH (1U << 10)
-#define __TRANS_JOIN (1U << 11)
-#define __TRANS_JOIN_NOLOCK (1U << 12)
-#define __TRANS_DUMMY (1U << 13)
-#define __TRANS_JOIN_NOSTART (1U << 14)
+enum {
+ ENUM_BIT(__TRANS_FREEZABLE),
+ ENUM_BIT(__TRANS_START),
+ ENUM_BIT(__TRANS_ATTACH),
+ ENUM_BIT(__TRANS_JOIN),
+ ENUM_BIT(__TRANS_JOIN_NOLOCK),
+ ENUM_BIT(__TRANS_DUMMY),
+ ENUM_BIT(__TRANS_JOIN_NOSTART),
+};
#define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
#define TRANS_ATTACH (__TRANS_ATTACH)
@@ -118,11 +138,15 @@ struct btrfs_transaction {
struct btrfs_trans_handle {
u64 transid;
u64 bytes_reserved;
+ u64 delayed_refs_bytes_reserved;
u64 chunk_bytes_reserved;
unsigned long delayed_ref_updates;
+ unsigned long delayed_ref_csum_deletions;
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
+ /* Set by a task that wants to create a snapshot. */
+ struct btrfs_pending_snapshot *pending_snapshot;
refcount_t use_count;
unsigned int type;
/*
@@ -135,9 +159,9 @@ struct btrfs_trans_handle {
bool removing_chunk;
bool reloc_reserved;
bool in_fsync;
- struct btrfs_root *root;
struct btrfs_fs_info *fs_info;
struct list_head new_bgs;
+ struct btrfs_block_rsv delayed_rsv;
};
/*
@@ -150,7 +174,7 @@ struct btrfs_trans_handle {
struct btrfs_pending_snapshot {
struct dentry *dentry;
- struct inode *dir;
+ struct btrfs_inode *dir;
struct btrfs_root *root;
struct btrfs_root_item *root_item;
struct btrfs_root *snap;
@@ -171,7 +195,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
{
spin_lock(&inode->lock);
inode->last_trans = trans->transaction->transid;
- inode->last_sub_trans = inode->root->log_transid;
+ inode->last_sub_trans = btrfs_get_root_log_transid(inode->root);
inode->last_log_commit = inode->last_sub_trans - 1;
spin_unlock(&inode->lock);
}
@@ -199,6 +223,48 @@ static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
delayed_refs->qgroup_to_skip = 0;
}
+/*
+ * We want the transaction abort to print stack trace only for errors where the
+ * cause could be a bug, eg. due to ENOSPC, and not for common errors that are
+ * caused by external factors.
+ */
+static inline bool btrfs_abort_should_print_stack(int error)
+{
+ switch (error) {
+ case -EIO:
+ case -EROFS:
+ case -ENOMEM:
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Call btrfs_abort_transaction as early as possible when an error condition is
+ * detected, that way the exact stack trace is reported for some errors.
+ */
+#define btrfs_abort_transaction(trans, error) \
+do { \
+ bool __first = false; \
+ /* Report first abort since mount */ \
+ if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
+ &((trans)->fs_info->fs_state))) { \
+ __first = true; \
+ if (WARN(btrfs_abort_should_print_stack(error), \
+ KERN_ERR \
+ "BTRFS: Transaction aborted (error %d)\n", \
+ (error))) { \
+ /* Stack trace printed. */ \
+ } else { \
+ btrfs_err((trans)->fs_info, \
+ "Transaction aborted (error %d)", \
+ (error)); \
+ } \
+ } \
+ __btrfs_abort_transaction((trans), __func__, \
+ __LINE__, (error), __first); \
+} while (0)
+
int btrfs_end_transaction(struct btrfs_trans_handle *trans);
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
unsigned int num_items);
@@ -214,10 +280,11 @@ struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
void btrfs_add_dead_root(struct btrfs_root *root);
-int btrfs_defrag_root(struct btrfs_root *root);
-int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
+void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info);
+int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
-int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans);
+void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans);
+int btrfs_commit_current_transaction(struct btrfs_root *root);
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
void btrfs_throttle(struct btrfs_fs_info *fs_info);
@@ -227,11 +294,15 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark);
int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
int btrfs_transaction_blocked(struct btrfs_fs_info *info);
-int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
void btrfs_put_transaction(struct btrfs_transaction *transaction);
-void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info);
void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
+void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
+ const char *function,
+ unsigned int line, int error, bool first_hit);
+
+int __init btrfs_transaction_init(void);
+void __cold btrfs_transaction_exit(void);
#endif
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 7733e8ac0a69..c21c21adf61e 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -18,13 +18,18 @@
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/error-injection.h>
+#include "messages.h"
#include "ctree.h"
#include "tree-checker.h"
-#include "disk-io.h"
#include "compression.h"
#include "volumes.h"
#include "misc.h"
-#include "btrfs_inode.h"
+#include "fs.h"
+#include "accessors.h"
+#include "file-item.h"
+#include "inode-item.h"
+#include "dir-item.h"
+#include "extent-tree.h"
/*
* Error message should follow the following format:
@@ -60,6 +65,7 @@ static void generic_err(const struct extent_buffer *eb, int slot,
vaf.fmt = fmt;
vaf.va = &args;
+ dump_page(folio_page(eb->folios[0], 0), "eb page dump");
btrfs_crit(fs_info,
"corrupt %s: root=%llu block=%llu slot=%d, %pV",
btrfs_header_level(eb) == 0 ? "leaf" : "node",
@@ -87,6 +93,7 @@ static void file_extent_err(const struct extent_buffer *eb, int slot,
vaf.fmt = fmt;
vaf.va = &args;
+ dump_page(folio_page(eb->folios[0], 0), "eb page dump");
btrfs_crit(fs_info,
"corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV",
btrfs_header_level(eb) == 0 ? "leaf" : "node",
@@ -147,6 +154,7 @@ static void dir_item_err(const struct extent_buffer *eb, int slot,
vaf.fmt = fmt;
vaf.va = &args;
+ dump_page(folio_page(eb->folios[0], 0), "eb page dump");
btrfs_crit(fs_info,
"corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
btrfs_header_level(eb) == 0 ? "leaf" : "node",
@@ -175,15 +183,16 @@ static bool check_prev_ino(struct extent_buffer *leaf,
/* Only these key->types needs to be checked */
ASSERT(key->type == BTRFS_XATTR_ITEM_KEY ||
key->type == BTRFS_INODE_REF_KEY ||
+ key->type == BTRFS_INODE_EXTREF_KEY ||
key->type == BTRFS_DIR_INDEX_KEY ||
key->type == BTRFS_DIR_ITEM_KEY ||
- key->type == BTRFS_EXTENT_DATA_KEY);
+ key->type == BTRFS_EXTENT_DATA_KEY, "key->type=%u", key->type);
/*
* Only subvolume trees along with their reloc trees need this check.
* Things like log tree doesn't follow this ino requirement.
*/
- if (!is_fstree(btrfs_header_owner(leaf)))
+ if (!btrfs_is_fstree(btrfs_header_owner(leaf)))
return true;
if (key->objectid == prev_key->objectid)
@@ -202,7 +211,7 @@ static int check_extent_data_item(struct extent_buffer *leaf,
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_file_extent_item *fi;
u32 sectorsize = fs_info->sectorsize;
- u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u32 item_size = btrfs_item_size(leaf, slot);
u64 extent_end;
if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) {
@@ -332,6 +341,24 @@ static int check_extent_data_item(struct extent_buffer *leaf,
}
}
+ /*
+ * For non-compressed data extents, ram_bytes should match its
+ * disk_num_bytes.
+ * However we do not really utilize ram_bytes in this case, so this check
+ * is only optional for DEBUG builds for developers to catch the
+ * unexpected behaviors.
+ */
+ if (IS_ENABLED(CONFIG_BTRFS_DEBUG) &&
+ btrfs_file_extent_compression(leaf, fi) == BTRFS_COMPRESS_NONE &&
+ btrfs_file_extent_disk_bytenr(leaf, fi)) {
+ if (WARN_ON(btrfs_file_extent_ram_bytes(leaf, fi) !=
+ btrfs_file_extent_disk_num_bytes(leaf, fi)))
+ file_extent_err(leaf, slot,
+"mismatch ram_bytes (%llu) and disk_num_bytes (%llu) for non-compressed extent",
+ btrfs_file_extent_ram_bytes(leaf, fi),
+ btrfs_file_extent_disk_num_bytes(leaf, fi));
+ }
+
return 0;
}
@@ -354,17 +381,17 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
key->offset, sectorsize);
return -EUCLEAN;
}
- if (unlikely(!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize))) {
+ if (unlikely(!IS_ALIGNED(btrfs_item_size(leaf, slot), csumsize))) {
generic_err(leaf, slot,
"unaligned item size for csum item, have %u should be aligned to %u",
- btrfs_item_size_nr(leaf, slot), csumsize);
+ btrfs_item_size(leaf, slot), csumsize);
return -EUCLEAN;
}
if (slot > 0 && prev_key->type == BTRFS_EXTENT_CSUM_KEY) {
u64 prev_csum_end;
u32 prev_item_size;
- prev_item_size = btrfs_item_size_nr(leaf, slot - 1);
+ prev_item_size = btrfs_item_size(leaf, slot - 1);
prev_csum_end = (prev_item_size / csumsize) * sectorsize;
prev_csum_end += prev_key->offset;
if (unlikely(prev_csum_end > key->offset)) {
@@ -442,6 +469,20 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
btrfs_item_key_to_cpu(leaf, &item_key, slot);
is_root_item = (item_key.type == BTRFS_ROOT_ITEM_KEY);
+ /*
+ * Bad rootid for reloc trees.
+ *
+ * Reloc trees are only for subvolume trees, other trees only need
+ * to be COWed to be relocated.
+ */
+ if (unlikely(is_root_item && key->objectid == BTRFS_TREE_RELOC_OBJECTID &&
+ !btrfs_is_fstree(key->offset))) {
+ generic_err(leaf, slot,
+ "invalid reloc tree for root %lld, root id is not a subvolume tree",
+ key->offset);
+ return -EUCLEAN;
+ }
+
/* No such tree id */
if (unlikely(key->objectid == 0)) {
if (is_root_item)
@@ -453,7 +494,7 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
}
/* DIR_ITEM/INDEX/INODE_REF is not allowed to point to non-fs trees */
- if (unlikely(!is_fstree(key->objectid) && !is_root_item)) {
+ if (unlikely(!btrfs_is_fstree(key->objectid) && !is_root_item)) {
dir_item_err(leaf, slot,
"invalid location key objectid, have %llu expect [%llu, %llu]",
key->objectid, BTRFS_FIRST_FREE_OBJECTID,
@@ -483,7 +524,7 @@ static int check_dir_item(struct extent_buffer *leaf,
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_dir_item *di;
- u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u32 item_size = btrfs_item_size(leaf, slot);
u32 cur = 0;
if (unlikely(!check_prev_ino(leaf, key, slot, prev_key)))
@@ -528,10 +569,11 @@ static int check_dir_item(struct extent_buffer *leaf,
}
/* dir type check */
- dir_type = btrfs_dir_type(leaf, di);
- if (unlikely(dir_type >= BTRFS_FT_MAX)) {
+ dir_type = btrfs_dir_ftype(leaf, di);
+ if (unlikely(dir_type <= BTRFS_FT_UNKNOWN ||
+ dir_type >= BTRFS_FT_MAX)) {
dir_item_err(leaf, slot,
- "invalid dir item type, have %u expect [0, %u)",
+ "invalid dir item type, have %u expect (0, %u)",
dir_type, BTRFS_FT_MAX);
return -EUCLEAN;
}
@@ -594,7 +636,7 @@ static int check_dir_item(struct extent_buffer *leaf,
*/
if (key->type == BTRFS_DIR_ITEM_KEY ||
key->type == BTRFS_XATTR_ITEM_KEY) {
- char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
+ char namebuf[MAX(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
read_extent_buffer(leaf, namebuf,
(unsigned long)(di + 1), name_len);
@@ -628,6 +670,7 @@ static void block_group_err(const struct extent_buffer *eb, int slot,
vaf.fmt = fmt;
vaf.va = &args;
+ dump_page(folio_page(eb->folios[0], 0), "eb page dump");
btrfs_crit(fs_info,
"corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
btrfs_header_level(eb) == 0 ? "leaf" : "node",
@@ -639,8 +682,10 @@ static void block_group_err(const struct extent_buffer *eb, int slot,
static int check_block_group_item(struct extent_buffer *leaf,
struct btrfs_key *key, int slot)
{
+ struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_block_group_item bgi;
- u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u32 item_size = btrfs_item_size(leaf, slot);
+ u64 chunk_objectid;
u64 flags;
u64 type;
@@ -663,8 +708,23 @@ static int check_block_group_item(struct extent_buffer *leaf,
read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
sizeof(bgi));
- if (unlikely(btrfs_stack_block_group_chunk_objectid(&bgi) !=
- BTRFS_FIRST_CHUNK_TREE_OBJECTID)) {
+ chunk_objectid = btrfs_stack_block_group_chunk_objectid(&bgi);
+ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+ /*
+ * We don't init the nr_global_roots until we load the global
+ * roots, so this could be 0 at mount time. If it's 0 we'll
+ * just assume we're fine, and later we'll check against our
+ * actual value.
+ */
+ if (unlikely(fs_info->nr_global_roots &&
+ chunk_objectid >= fs_info->nr_global_roots)) {
+ block_group_err(leaf, slot,
+ "invalid block group global root id, have %llu, needs to be <= %llu",
+ chunk_objectid,
+ fs_info->nr_global_roots);
+ return -EUCLEAN;
+ }
+ } else if (unlikely(chunk_objectid != BTRFS_FIRST_CHUNK_TREE_OBJECTID)) {
block_group_err(leaf, slot,
"invalid block group chunk objectid, have %llu expect %llu",
btrfs_stack_block_group_chunk_objectid(&bgi),
@@ -705,22 +765,19 @@ static int check_block_group_item(struct extent_buffer *leaf,
return 0;
}
-__printf(4, 5)
+__printf(5, 6)
__cold
-static void chunk_err(const struct extent_buffer *leaf,
+static void chunk_err(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *leaf,
const struct btrfs_chunk *chunk, u64 logical,
const char *fmt, ...)
{
- const struct btrfs_fs_info *fs_info = leaf->fs_info;
- bool is_sb;
+ bool is_sb = !leaf;
struct va_format vaf;
va_list args;
int i;
int slot = -1;
- /* Only superblock eb is able to have such small offset */
- is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);
-
if (!is_sb) {
/*
* Get the slot number by iterating through all slots, this
@@ -753,13 +810,17 @@ static void chunk_err(const struct extent_buffer *leaf,
/*
* The common chunk check which could also work on super block sys chunk array.
*
+ * If @leaf is NULL, then @chunk must be an on-stack chunk item.
+ * (For superblock sys_chunk array, and fs_info->sectorsize is unreliable)
+ *
* Return -EUCLEAN if anything is corrupted.
* Return 0 if everything is OK.
*/
-int btrfs_check_chunk_valid(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk, u64 logical)
+int btrfs_check_chunk_valid(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *leaf,
+ const struct btrfs_chunk *chunk, u64 logical,
+ u32 sectorsize)
{
- struct btrfs_fs_info *fs_info = leaf->fs_info;
u64 length;
u64 chunk_end;
u64 stripe_len;
@@ -767,86 +828,109 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
u16 sub_stripes;
u64 type;
u64 features;
+ u32 chunk_sector_size;
bool mixed = false;
int raid_index;
int nparity;
int ncopies;
- length = btrfs_chunk_length(leaf, chunk);
- stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
- num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
- sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
- type = btrfs_chunk_type(leaf, chunk);
+ if (leaf) {
+ length = btrfs_chunk_length(leaf, chunk);
+ stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+ num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+ sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+ type = btrfs_chunk_type(leaf, chunk);
+ chunk_sector_size = btrfs_chunk_sector_size(leaf, chunk);
+ } else {
+ length = btrfs_stack_chunk_length(chunk);
+ stripe_len = btrfs_stack_chunk_stripe_len(chunk);
+ num_stripes = btrfs_stack_chunk_num_stripes(chunk);
+ sub_stripes = btrfs_stack_chunk_sub_stripes(chunk);
+ type = btrfs_stack_chunk_type(chunk);
+ chunk_sector_size = btrfs_stack_chunk_sector_size(chunk);
+ }
raid_index = btrfs_bg_flags_to_raid_index(type);
ncopies = btrfs_raid_array[raid_index].ncopies;
nparity = btrfs_raid_array[raid_index].nparity;
if (unlikely(!num_stripes)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk num_stripes, have %u", num_stripes);
return -EUCLEAN;
}
if (unlikely(num_stripes < ncopies)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk num_stripes < ncopies, have %u < %d",
num_stripes, ncopies);
return -EUCLEAN;
}
if (unlikely(nparity && num_stripes == nparity)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk num_stripes == nparity, have %u == %d",
num_stripes, nparity);
return -EUCLEAN;
}
- if (unlikely(!IS_ALIGNED(logical, fs_info->sectorsize))) {
- chunk_err(leaf, chunk, logical,
+ if (unlikely(!IS_ALIGNED(logical, sectorsize))) {
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk logical, have %llu should aligned to %u",
- logical, fs_info->sectorsize);
+ logical, sectorsize);
return -EUCLEAN;
}
- if (unlikely(btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize)) {
- chunk_err(leaf, chunk, logical,
+ if (unlikely(chunk_sector_size != sectorsize)) {
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk sectorsize, have %u expect %u",
- btrfs_chunk_sector_size(leaf, chunk),
- fs_info->sectorsize);
+ chunk_sector_size, sectorsize);
return -EUCLEAN;
}
- if (unlikely(!length || !IS_ALIGNED(length, fs_info->sectorsize))) {
- chunk_err(leaf, chunk, logical,
+ if (unlikely(!length || !IS_ALIGNED(length, sectorsize))) {
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk length, have %llu", length);
return -EUCLEAN;
}
if (unlikely(check_add_overflow(logical, length, &chunk_end))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk logical start and length, have logical start %llu length %llu",
logical, length);
return -EUCLEAN;
}
if (unlikely(!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk stripe length: %llu",
stripe_len);
return -EUCLEAN;
}
+ /*
+ * We artificially limit the chunk size, so that the number of stripes
+ * inside a chunk can be fit into a U32. The current limit (256G) is
+ * way too large for real world usage anyway, and it's also much larger
+ * than our existing limit (10G).
+ *
+ * Thus it should be a good way to catch obvious bitflips.
+ */
+ if (unlikely(length >= btrfs_stripe_nr_to_offset(U32_MAX))) {
+ chunk_err(fs_info, leaf, chunk, logical,
+ "chunk length too large: have %llu limit %llu",
+ length, btrfs_stripe_nr_to_offset(U32_MAX));
+ return -EUCLEAN;
+ }
if (unlikely(type & ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
BTRFS_BLOCK_GROUP_PROFILE_MASK))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"unrecognized chunk type: 0x%llx",
~(BTRFS_BLOCK_GROUP_TYPE_MASK |
- BTRFS_BLOCK_GROUP_PROFILE_MASK) &
- btrfs_chunk_type(leaf, chunk));
+ BTRFS_BLOCK_GROUP_PROFILE_MASK) & type);
return -EUCLEAN;
}
if (unlikely(!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
return -EUCLEAN;
}
if (unlikely((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
type, BTRFS_BLOCK_GROUP_TYPE_MASK);
return -EUCLEAN;
@@ -855,7 +939,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
if (unlikely((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
(type & (BTRFS_BLOCK_GROUP_METADATA |
BTRFS_BLOCK_GROUP_DATA)))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"system chunk with data or metadata type: 0x%llx",
type);
return -EUCLEAN;
@@ -868,7 +952,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
if (!mixed) {
if (unlikely((type & BTRFS_BLOCK_GROUP_METADATA) &&
(type & BTRFS_BLOCK_GROUP_DATA))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"mixed chunk type in non-mixed mode: 0x%llx", type);
return -EUCLEAN;
}
@@ -890,7 +974,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
num_stripes != btrfs_raid_array[BTRFS_RAID_DUP].dev_stripes) ||
((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
num_stripes != btrfs_raid_array[BTRFS_RAID_SINGLE].dev_stripes))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid num_stripes:sub_stripes %u:%u for profile %llu",
num_stripes, sub_stripes,
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
@@ -910,14 +994,15 @@ static int check_leaf_chunk_item(struct extent_buffer *leaf,
struct btrfs_chunk *chunk,
struct btrfs_key *key, int slot)
{
+ struct btrfs_fs_info *fs_info = leaf->fs_info;
int num_stripes;
- if (unlikely(btrfs_item_size_nr(leaf, slot) < sizeof(struct btrfs_chunk))) {
- chunk_err(leaf, chunk, key->offset,
+ if (unlikely(btrfs_item_size(leaf, slot) < sizeof(struct btrfs_chunk))) {
+ chunk_err(fs_info, leaf, chunk, key->offset,
"invalid chunk item size: have %u expect [%zu, %u)",
- btrfs_item_size_nr(leaf, slot),
+ btrfs_item_size(leaf, slot),
sizeof(struct btrfs_chunk),
- BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
+ BTRFS_LEAF_DATA_SIZE(fs_info));
return -EUCLEAN;
}
@@ -927,15 +1012,16 @@ static int check_leaf_chunk_item(struct extent_buffer *leaf,
goto out;
if (unlikely(btrfs_chunk_item_size(num_stripes) !=
- btrfs_item_size_nr(leaf, slot))) {
- chunk_err(leaf, chunk, key->offset,
+ btrfs_item_size(leaf, slot))) {
+ chunk_err(fs_info, leaf, chunk, key->offset,
"invalid chunk item size: have %u expect %lu",
- btrfs_item_size_nr(leaf, slot),
+ btrfs_item_size(leaf, slot),
btrfs_chunk_item_size(num_stripes));
return -EUCLEAN;
}
out:
- return btrfs_check_chunk_valid(leaf, chunk, key->offset);
+ return btrfs_check_chunk_valid(fs_info, leaf, chunk, key->offset,
+ fs_info->sectorsize);
}
__printf(3, 4)
@@ -953,6 +1039,7 @@ static void dev_item_err(const struct extent_buffer *eb, int slot,
vaf.fmt = fmt;
vaf.va = &args;
+ dump_page(folio_page(eb->folios[0], 0), "eb page dump");
btrfs_crit(eb->fs_info,
"corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV",
btrfs_header_level(eb) == 0 ? "leaf" : "node",
@@ -965,6 +1052,7 @@ static int check_dev_item(struct extent_buffer *leaf,
struct btrfs_key *key, int slot)
{
struct btrfs_dev_item *ditem;
+ const u32 item_size = btrfs_item_size(leaf, slot);
if (unlikely(key->objectid != BTRFS_DEV_ITEMS_OBJECTID)) {
dev_item_err(leaf, slot,
@@ -972,6 +1060,13 @@ static int check_dev_item(struct extent_buffer *leaf,
key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
return -EUCLEAN;
}
+
+ if (unlikely(item_size != sizeof(*ditem))) {
+ dev_item_err(leaf, slot, "invalid item size: has %u expect %zu",
+ item_size, sizeof(*ditem));
+ return -EUCLEAN;
+ }
+
ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
if (unlikely(btrfs_device_id(leaf, ditem) != key->offset)) {
dev_item_err(leaf, slot,
@@ -1007,6 +1102,7 @@ static int check_inode_item(struct extent_buffer *leaf,
struct btrfs_inode_item *iitem;
u64 super_gen = btrfs_super_generation(fs_info->super_copy);
u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
+ const u32 item_size = btrfs_item_size(leaf, slot);
u32 mode;
int ret;
u32 flags;
@@ -1016,6 +1112,12 @@ static int check_inode_item(struct extent_buffer *leaf,
if (unlikely(ret < 0))
return ret;
+ if (unlikely(item_size != sizeof(*iitem))) {
+ generic_err(leaf, slot, "invalid item size: has %u expect %zu",
+ item_size, sizeof(*iitem));
+ return -EUCLEAN;
+ }
+
iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
/* Here we use super block generation + 1 to handle log tree */
@@ -1095,12 +1197,12 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
if (unlikely(ret < 0))
return ret;
- if (unlikely(btrfs_item_size_nr(leaf, slot) != sizeof(ri) &&
- btrfs_item_size_nr(leaf, slot) !=
+ if (unlikely(btrfs_item_size(leaf, slot) != sizeof(ri) &&
+ btrfs_item_size(leaf, slot) !=
btrfs_legacy_root_item_size())) {
generic_err(leaf, slot,
"invalid root item size, have %u expect %zu or %u",
- btrfs_item_size_nr(leaf, slot), sizeof(ri),
+ btrfs_item_size(leaf, slot), sizeof(ri),
btrfs_legacy_root_item_size());
return -EUCLEAN;
}
@@ -1108,10 +1210,10 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
/*
* For legacy root item, the members starting at generation_v2 will be
* all filled with 0.
- * And since we allow geneartion_v2 as 0, it will still pass the check.
+ * And since we allow generation_v2 as 0, it will still pass the check.
*/
read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
- btrfs_item_size_nr(leaf, slot));
+ btrfs_item_size(leaf, slot));
/* Generation related */
if (unlikely(btrfs_root_generation(&ri) >
@@ -1193,6 +1295,7 @@ static void extent_err(const struct extent_buffer *eb, int slot,
vaf.fmt = fmt;
vaf.va = &args;
+ dump_page(folio_page(eb->folios[0], 0), "eb page dump");
btrfs_crit(eb->fs_info,
"corrupt %s: block=%llu slot=%d extent bytenr=%llu len=%llu %pV",
btrfs_header_level(eb) == 0 ? "leaf" : "node",
@@ -1200,15 +1303,31 @@ static void extent_err(const struct extent_buffer *eb, int slot,
va_end(args);
}
+static bool is_valid_dref_root(u64 rootid)
+{
+ /*
+ * The following tree root objectids are allowed to have a data backref:
+ * - subvolume trees
+ * - data reloc tree
+ * - tree root
+ * For v1 space cache
+ */
+ return btrfs_is_fstree(rootid) || rootid == BTRFS_DATA_RELOC_TREE_OBJECTID ||
+ rootid == BTRFS_ROOT_TREE_OBJECTID;
+}
+
static int check_extent_item(struct extent_buffer *leaf,
- struct btrfs_key *key, int slot)
+ struct btrfs_key *key, int slot,
+ struct btrfs_key *prev_key)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_extent_item *ei;
bool is_tree_block = false;
unsigned long ptr; /* Current pointer inside inline refs */
unsigned long end; /* Extent item end */
- const u32 item_size = btrfs_item_size_nr(leaf, slot);
+ const u32 item_size = btrfs_item_size(leaf, slot);
+ u8 last_type = 0;
+ u64 last_seq = U64_MAX;
u64 flags;
u64 generation;
u64 total_refs; /* Total refs in btrfs_extent_item */
@@ -1255,6 +1374,18 @@ static int check_extent_item(struct extent_buffer *leaf,
* 2.2) Ref type specific data
* Either using btrfs_extent_inline_ref::offset, or specific
* data structure.
+ *
+ * All above inline items should follow the order:
+ *
+ * - All btrfs_extent_inline_ref::type should be in an ascending
+ * order
+ *
+ * - Within the same type, the items should follow a descending
+ * order by their sequence number. The sequence number is
+ * determined by:
+ * * btrfs_extent_inline_ref::offset for all types other than
+ * EXTENT_DATA_REF
+ * * hash_extent_data_ref() for EXTENT_DATA_REF
*/
if (unlikely(item_size < sizeof(*ei))) {
extent_err(leaf, slot,
@@ -1336,6 +1467,9 @@ static int check_extent_item(struct extent_buffer *leaf,
struct btrfs_extent_inline_ref *iref;
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
+ u64 seq;
+ u64 dref_root;
+ u64 dref_objectid;
u64 dref_offset;
u64 inline_offset;
u8 inline_type;
@@ -1349,10 +1483,11 @@ static int check_extent_item(struct extent_buffer *leaf,
iref = (struct btrfs_extent_inline_ref *)ptr;
inline_type = btrfs_extent_inline_ref_type(leaf, iref);
inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
+ seq = inline_offset;
if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
extent_err(leaf, slot,
"inline ref item overflows extent item, ptr %lu iref size %u end %lu",
- ptr, inline_type, end);
+ ptr, btrfs_extent_inline_ref_size(inline_type), end);
return -EUCLEAN;
}
@@ -1378,7 +1513,26 @@ static int check_extent_item(struct extent_buffer *leaf,
*/
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
+ dref_root = btrfs_extent_data_ref_root(leaf, dref);
+ dref_objectid = btrfs_extent_data_ref_objectid(leaf, dref);
dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
+ seq = hash_extent_data_ref(
+ btrfs_extent_data_ref_root(leaf, dref),
+ btrfs_extent_data_ref_objectid(leaf, dref),
+ btrfs_extent_data_ref_offset(leaf, dref));
+ if (unlikely(!is_valid_dref_root(dref_root))) {
+ extent_err(leaf, slot,
+ "invalid data ref root value %llu",
+ dref_root);
+ return -EUCLEAN;
+ }
+ if (unlikely(dref_objectid < BTRFS_FIRST_FREE_OBJECTID ||
+ dref_objectid > BTRFS_LAST_FREE_OBJECTID)) {
+ extent_err(leaf, slot,
+ "invalid data ref objectid value %llu",
+ dref_objectid);
+ return -EUCLEAN;
+ }
if (unlikely(!IS_ALIGNED(dref_offset,
fs_info->sectorsize))) {
extent_err(leaf, slot,
@@ -1386,6 +1540,11 @@ static int check_extent_item(struct extent_buffer *leaf,
dref_offset, fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid data ref count, should have non-zero value");
+ return -EUCLEAN;
+ }
inline_refs += btrfs_extent_data_ref_count(leaf, dref);
break;
/* Contains parent bytenr and ref count */
@@ -1398,13 +1557,39 @@ static int check_extent_item(struct extent_buffer *leaf,
inline_offset, fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid shared data ref count, should have non-zero value");
+ return -EUCLEAN;
+ }
inline_refs += btrfs_shared_data_ref_count(leaf, sref);
break;
+ case BTRFS_EXTENT_OWNER_REF_KEY:
+ WARN_ON(!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
+ break;
default:
extent_err(leaf, slot, "unknown inline ref type: %u",
inline_type);
return -EUCLEAN;
}
+ if (unlikely(inline_type < last_type)) {
+ extent_err(leaf, slot,
+ "inline ref out-of-order: has type %u, prev type %u",
+ inline_type, last_type);
+ return -EUCLEAN;
+ }
+ /* Type changed, allow the sequence starts from U64_MAX again. */
+ if (inline_type > last_type)
+ last_seq = U64_MAX;
+ if (unlikely(seq > last_seq)) {
+ extent_err(leaf, slot,
+"inline ref out-of-order: has type %u offset %llu seq 0x%llx, prev type %u seq 0x%llx",
+ inline_type, inline_offset, seq,
+ last_type, last_seq);
+ return -EUCLEAN;
+ }
+ last_type = inline_type;
+ last_seq = seq;
ptr += btrfs_extent_inline_ref_size(inline_type);
}
/* No padding is allowed */
@@ -1421,6 +1606,25 @@ static int check_extent_item(struct extent_buffer *leaf,
total_refs, inline_refs);
return -EUCLEAN;
}
+
+ if ((prev_key->type == BTRFS_EXTENT_ITEM_KEY) ||
+ (prev_key->type == BTRFS_METADATA_ITEM_KEY)) {
+ u64 prev_end = prev_key->objectid;
+
+ if (prev_key->type == BTRFS_METADATA_ITEM_KEY)
+ prev_end += fs_info->nodesize;
+ else
+ prev_end += prev_key->offset;
+
+ if (unlikely(prev_end > key->objectid)) {
+ extent_err(leaf, slot,
+ "previous extent " BTRFS_KEY_FMT " overlaps current extent " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(prev_key),
+ BTRFS_KEY_FMT_VALUE(key));
+ return -EUCLEAN;
+ }
+ }
+
return 0;
}
@@ -1429,13 +1633,23 @@ static int check_simple_keyed_refs(struct extent_buffer *leaf,
{
u32 expect_item_size = 0;
- if (key->type == BTRFS_SHARED_DATA_REF_KEY)
+ if (key->type == BTRFS_SHARED_DATA_REF_KEY) {
+ struct btrfs_shared_data_ref *sref;
+
+ sref = btrfs_item_ptr(leaf, slot, struct btrfs_shared_data_ref);
+ if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid shared data backref count, should have non-zero value");
+ return -EUCLEAN;
+ }
+
expect_item_size = sizeof(struct btrfs_shared_data_ref);
+ }
- if (unlikely(btrfs_item_size_nr(leaf, slot) != expect_item_size)) {
+ if (unlikely(btrfs_item_size(leaf, slot) != expect_item_size)) {
generic_err(leaf, slot,
"invalid item size, have %u expect %u for key type %u",
- btrfs_item_size_nr(leaf, slot),
+ btrfs_item_size(leaf, slot),
expect_item_size, key->type);
return -EUCLEAN;
}
@@ -1460,12 +1674,12 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
{
struct btrfs_extent_data_ref *dref;
unsigned long ptr = btrfs_item_ptr_offset(leaf, slot);
- const unsigned long end = ptr + btrfs_item_size_nr(leaf, slot);
+ const unsigned long end = ptr + btrfs_item_size(leaf, slot);
- if (unlikely(btrfs_item_size_nr(leaf, slot) % sizeof(*dref) != 0)) {
+ if (unlikely(btrfs_item_size(leaf, slot) % sizeof(*dref) != 0)) {
generic_err(leaf, slot,
"invalid item size, have %u expect aligned to %zu for key type %u",
- btrfs_item_size_nr(leaf, slot),
+ btrfs_item_size(leaf, slot),
sizeof(*dref), key->type);
return -EUCLEAN;
}
@@ -1476,6 +1690,8 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
return -EUCLEAN;
}
for (; ptr < end; ptr += sizeof(*dref)) {
+ u64 root;
+ u64 objectid;
u64 offset;
/*
@@ -1483,13 +1699,33 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
* overflow from the leaf due to hash collisions.
*/
dref = (struct btrfs_extent_data_ref *)ptr;
+ root = btrfs_extent_data_ref_root(leaf, dref);
+ objectid = btrfs_extent_data_ref_objectid(leaf, dref);
offset = btrfs_extent_data_ref_offset(leaf, dref);
+ if (unlikely(!is_valid_dref_root(root))) {
+ extent_err(leaf, slot,
+ "invalid extent data backref root value %llu",
+ root);
+ return -EUCLEAN;
+ }
+ if (unlikely(objectid < BTRFS_FIRST_FREE_OBJECTID ||
+ objectid > BTRFS_LAST_FREE_OBJECTID)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref objectid value %llu",
+ root);
+ return -EUCLEAN;
+ }
if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
extent_err(leaf, slot,
"invalid extent data backref offset, have %llu expect aligned to %u",
offset, leaf->fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref count, should have non-zero value");
+ return -EUCLEAN;
+ }
}
return 0;
}
@@ -1507,23 +1743,23 @@ static int check_inode_ref(struct extent_buffer *leaf,
if (unlikely(!check_prev_ino(leaf, key, slot, prev_key)))
return -EUCLEAN;
/* namelen can't be 0, so item_size == sizeof() is also invalid */
- if (unlikely(btrfs_item_size_nr(leaf, slot) <= sizeof(*iref))) {
+ if (unlikely(btrfs_item_size(leaf, slot) <= sizeof(*iref))) {
inode_ref_err(leaf, slot,
"invalid item size, have %u expect (%zu, %u)",
- btrfs_item_size_nr(leaf, slot),
+ btrfs_item_size(leaf, slot),
sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
return -EUCLEAN;
}
ptr = btrfs_item_ptr_offset(leaf, slot);
- end = ptr + btrfs_item_size_nr(leaf, slot);
+ end = ptr + btrfs_item_size(leaf, slot);
while (ptr < end) {
u16 namelen;
- if (unlikely(ptr + sizeof(iref) > end)) {
+ if (unlikely(ptr + sizeof(*iref) > end)) {
inode_ref_err(leaf, slot,
"inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
- ptr, end, sizeof(iref));
+ ptr, end, sizeof(*iref));
return -EUCLEAN;
}
@@ -1546,12 +1782,131 @@ static int check_inode_ref(struct extent_buffer *leaf,
return 0;
}
+static int check_inode_extref(struct extent_buffer *leaf,
+ struct btrfs_key *key, struct btrfs_key *prev_key,
+ int slot)
+{
+ unsigned long ptr = btrfs_item_ptr_offset(leaf, slot);
+ unsigned long end = ptr + btrfs_item_size(leaf, slot);
+
+ if (unlikely(!check_prev_ino(leaf, key, slot, prev_key)))
+ return -EUCLEAN;
+
+ while (ptr < end) {
+ struct btrfs_inode_extref *extref = (struct btrfs_inode_extref *)ptr;
+ u16 namelen;
+
+ if (unlikely(ptr + sizeof(*extref) > end)) {
+ inode_ref_err(leaf, slot,
+ "inode extref overflow, ptr %lu end %lu inode_extref size %zu",
+ ptr, end, sizeof(*extref));
+ return -EUCLEAN;
+ }
+
+ namelen = btrfs_inode_extref_name_len(leaf, extref);
+ if (unlikely(ptr + sizeof(*extref) + namelen > end)) {
+ inode_ref_err(leaf, slot,
+ "inode extref overflow, ptr %lu end %lu namelen %u",
+ ptr, end, namelen);
+ return -EUCLEAN;
+ }
+ ptr += sizeof(*extref) + namelen;
+ }
+ return 0;
+}
+
+static int check_raid_stripe_extent(const struct extent_buffer *leaf,
+ const struct btrfs_key *key, int slot)
+{
+ if (unlikely(!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize))) {
+ generic_err(leaf, slot,
+"invalid key objectid for raid stripe extent, have %llu expect aligned to %u",
+ key->objectid, leaf->fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+
+ if (unlikely(!btrfs_fs_incompat(leaf->fs_info, RAID_STRIPE_TREE))) {
+ generic_err(leaf, slot,
+ "RAID_STRIPE_EXTENT present but RAID_STRIPE_TREE incompat bit unset");
+ return -EUCLEAN;
+ }
+
+ return 0;
+}
+
+static int check_dev_extent_item(const struct extent_buffer *leaf,
+ const struct btrfs_key *key,
+ int slot,
+ struct btrfs_key *prev_key)
+{
+ struct btrfs_dev_extent *de;
+ const u32 sectorsize = leaf->fs_info->sectorsize;
+
+ de = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
+ /* Basic fixed member checks. */
+ if (unlikely(btrfs_dev_extent_chunk_tree(leaf, de) !=
+ BTRFS_CHUNK_TREE_OBJECTID)) {
+ generic_err(leaf, slot,
+ "invalid dev extent chunk tree id, has %llu expect %llu",
+ btrfs_dev_extent_chunk_tree(leaf, de),
+ BTRFS_CHUNK_TREE_OBJECTID);
+ return -EUCLEAN;
+ }
+ if (unlikely(btrfs_dev_extent_chunk_objectid(leaf, de) !=
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID)) {
+ generic_err(leaf, slot,
+ "invalid dev extent chunk objectid, has %llu expect %llu",
+ btrfs_dev_extent_chunk_objectid(leaf, de),
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ return -EUCLEAN;
+ }
+ /* Alignment check. */
+ if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) {
+ generic_err(leaf, slot,
+ "invalid dev extent key.offset, has %llu not aligned to %u",
+ key->offset, sectorsize);
+ return -EUCLEAN;
+ }
+ if (unlikely(!IS_ALIGNED(btrfs_dev_extent_chunk_offset(leaf, de),
+ sectorsize))) {
+ generic_err(leaf, slot,
+ "invalid dev extent chunk offset, has %llu not aligned to %u",
+ btrfs_dev_extent_chunk_objectid(leaf, de),
+ sectorsize);
+ return -EUCLEAN;
+ }
+ if (unlikely(!IS_ALIGNED(btrfs_dev_extent_length(leaf, de),
+ sectorsize))) {
+ generic_err(leaf, slot,
+ "invalid dev extent length, has %llu not aligned to %u",
+ btrfs_dev_extent_length(leaf, de), sectorsize);
+ return -EUCLEAN;
+ }
+ /* Overlap check with previous dev extent. */
+ if (slot && prev_key->objectid == key->objectid &&
+ prev_key->type == key->type) {
+ struct btrfs_dev_extent *prev_de;
+ u64 prev_len;
+
+ prev_de = btrfs_item_ptr(leaf, slot - 1, struct btrfs_dev_extent);
+ prev_len = btrfs_dev_extent_length(leaf, prev_de);
+ if (unlikely(prev_key->offset + prev_len > key->offset)) {
+ generic_err(leaf, slot,
+ "dev extent overlap, prev offset %llu len %llu current offset %llu",
+ prev_key->objectid, prev_len, key->offset);
+ return -EUCLEAN;
+ }
+ }
+ return 0;
+}
+
/*
* Common point to switch the item-specific validation.
*/
-static int check_leaf_item(struct extent_buffer *leaf,
- struct btrfs_key *key, int slot,
- struct btrfs_key *prev_key)
+static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf,
+ struct btrfs_key *key,
+ int slot,
+ struct btrfs_key *prev_key)
{
int ret = 0;
struct btrfs_chunk *chunk;
@@ -1571,6 +1926,9 @@ static int check_leaf_item(struct extent_buffer *leaf,
case BTRFS_INODE_REF_KEY:
ret = check_inode_ref(leaf, key, prev_key, slot);
break;
+ case BTRFS_INODE_EXTREF_KEY:
+ ret = check_inode_extref(leaf, key, prev_key, slot);
+ break;
case BTRFS_BLOCK_GROUP_ITEM_KEY:
ret = check_block_group_item(leaf, key, slot);
break;
@@ -1581,6 +1939,9 @@ static int check_leaf_item(struct extent_buffer *leaf,
case BTRFS_DEV_ITEM_KEY:
ret = check_dev_item(leaf, key, slot);
break;
+ case BTRFS_DEV_EXTENT_KEY:
+ ret = check_dev_extent_item(leaf, key, slot, prev_key);
+ break;
case BTRFS_INODE_ITEM_KEY:
ret = check_inode_item(leaf, key, slot);
break;
@@ -1589,7 +1950,7 @@ static int check_leaf_item(struct extent_buffer *leaf,
break;
case BTRFS_EXTENT_ITEM_KEY:
case BTRFS_METADATA_ITEM_KEY:
- ret = check_extent_item(leaf, key, slot);
+ ret = check_extent_item(leaf, key, slot, prev_key);
break;
case BTRFS_TREE_BLOCK_REF_KEY:
case BTRFS_SHARED_DATA_REF_KEY:
@@ -1599,11 +1960,17 @@ static int check_leaf_item(struct extent_buffer *leaf,
case BTRFS_EXTENT_DATA_REF_KEY:
ret = check_extent_data_ref(leaf, key, slot);
break;
+ case BTRFS_RAID_STRIPE_KEY:
+ ret = check_raid_stripe_extent(leaf, key, slot);
+ break;
}
- return ret;
+
+ if (unlikely(ret))
+ return BTRFS_TREE_BLOCK_INVALID_ITEM;
+ return BTRFS_TREE_BLOCK_CLEAN;
}
-static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
+enum btrfs_tree_block_status __btrfs_check_leaf(struct extent_buffer *leaf)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
/* No valid key type is 0, so all key should be larger than this key */
@@ -1616,7 +1983,12 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
generic_err(leaf, 0,
"invalid level for leaf, have %d expect 0",
btrfs_header_level(leaf));
- return -EUCLEAN;
+ return BTRFS_TREE_BLOCK_INVALID_LEVEL;
+ }
+
+ if (unlikely(!btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_WRITTEN))) {
+ generic_err(leaf, 0, "invalid flag for leaf, WRITTEN not set");
+ return BTRFS_TREE_BLOCK_WRITTEN_NOT_SET;
}
/*
@@ -1633,26 +2005,38 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
/* These trees must never be empty */
if (unlikely(owner == BTRFS_ROOT_TREE_OBJECTID ||
owner == BTRFS_CHUNK_TREE_OBJECTID ||
- owner == BTRFS_EXTENT_TREE_OBJECTID ||
owner == BTRFS_DEV_TREE_OBJECTID ||
owner == BTRFS_FS_TREE_OBJECTID ||
owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) {
generic_err(leaf, 0,
"invalid root, root %llu must never be empty",
owner);
- return -EUCLEAN;
+ return BTRFS_TREE_BLOCK_INVALID_NRITEMS;
}
+
/* Unknown tree */
if (unlikely(owner == 0)) {
generic_err(leaf, 0,
"invalid owner, root 0 is not defined");
- return -EUCLEAN;
+ return BTRFS_TREE_BLOCK_INVALID_OWNER;
}
- return 0;
+
+ /* EXTENT_TREE_V2 can have empty extent trees. */
+ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
+ return BTRFS_TREE_BLOCK_CLEAN;
+
+ if (unlikely(owner == BTRFS_EXTENT_TREE_OBJECTID)) {
+ generic_err(leaf, 0,
+ "invalid root, root %llu must never be empty",
+ owner);
+ return BTRFS_TREE_BLOCK_INVALID_NRITEMS;
+ }
+
+ return BTRFS_TREE_BLOCK_CLEAN;
}
if (unlikely(nritems == 0))
- return 0;
+ return BTRFS_TREE_BLOCK_CLEAN;
/*
* Check the following things to make sure this is a good leaf, and
@@ -1667,20 +2051,22 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
*/
for (slot = 0; slot < nritems; slot++) {
u32 item_end_expected;
- int ret;
+ u64 item_data_end;
+ enum btrfs_tree_block_status ret;
btrfs_item_key_to_cpu(leaf, &key, slot);
/* Make sure the keys are in the right order */
if (unlikely(btrfs_comp_cpu_keys(&prev_key, &key) >= 0)) {
generic_err(leaf, slot,
- "bad key order, prev (%llu %u %llu) current (%llu %u %llu)",
- prev_key.objectid, prev_key.type,
- prev_key.offset, key.objectid, key.type,
- key.offset);
- return -EUCLEAN;
+ "bad key order, prev " BTRFS_KEY_FMT " current " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(&prev_key),
+ BTRFS_KEY_FMT_VALUE(&key));
+ return BTRFS_TREE_BLOCK_BAD_KEY_ORDER;
}
+ item_data_end = (u64)btrfs_item_offset(leaf, slot) +
+ btrfs_item_size(leaf, slot);
/*
* Make sure the offset and ends are right, remember that the
* item data starts at the end of the leaf and grows towards the
@@ -1689,14 +2075,13 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
if (slot == 0)
item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info);
else
- item_end_expected = btrfs_item_offset_nr(leaf,
+ item_end_expected = btrfs_item_offset(leaf,
slot - 1);
- if (unlikely(btrfs_item_end_nr(leaf, slot) != item_end_expected)) {
+ if (unlikely(item_data_end != item_end_expected)) {
generic_err(leaf, slot,
- "unexpected item end, have %u expect %u",
- btrfs_item_end_nr(leaf, slot),
- item_end_expected);
- return -EUCLEAN;
+ "unexpected item end, have %llu expect %u",
+ item_data_end, item_end_expected);
+ return BTRFS_TREE_BLOCK_INVALID_OFFSETS;
}
/*
@@ -1704,56 +2089,49 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
* just in case all the items are consistent to each other, but
* all point outside of the leaf.
*/
- if (unlikely(btrfs_item_end_nr(leaf, slot) >
- BTRFS_LEAF_DATA_SIZE(fs_info))) {
+ if (unlikely(item_data_end > BTRFS_LEAF_DATA_SIZE(fs_info))) {
generic_err(leaf, slot,
- "slot end outside of leaf, have %u expect range [0, %u]",
- btrfs_item_end_nr(leaf, slot),
- BTRFS_LEAF_DATA_SIZE(fs_info));
- return -EUCLEAN;
+ "slot end outside of leaf, have %llu expect range [0, %u]",
+ item_data_end, BTRFS_LEAF_DATA_SIZE(fs_info));
+ return BTRFS_TREE_BLOCK_INVALID_OFFSETS;
}
/* Also check if the item pointer overlaps with btrfs item. */
if (unlikely(btrfs_item_ptr_offset(leaf, slot) <
- btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item))) {
+ btrfs_item_nr_offset(leaf, slot) + sizeof(struct btrfs_item))) {
generic_err(leaf, slot,
"slot overlaps with its data, item end %lu data start %lu",
- btrfs_item_nr_offset(slot) +
+ btrfs_item_nr_offset(leaf, slot) +
sizeof(struct btrfs_item),
btrfs_item_ptr_offset(leaf, slot));
- return -EUCLEAN;
+ return BTRFS_TREE_BLOCK_INVALID_OFFSETS;
}
- if (check_item_data) {
- /*
- * Check if the item size and content meet other
- * criteria
- */
- ret = check_leaf_item(leaf, &key, slot, &prev_key);
- if (unlikely(ret < 0))
- return ret;
- }
+ /* Check if the item size and content meet other criteria. */
+ ret = check_leaf_item(leaf, &key, slot, &prev_key);
+ if (unlikely(ret != BTRFS_TREE_BLOCK_CLEAN))
+ return ret;
prev_key.objectid = key.objectid;
prev_key.type = key.type;
prev_key.offset = key.offset;
}
- return 0;
+ return BTRFS_TREE_BLOCK_CLEAN;
}
-int btrfs_check_leaf_full(struct extent_buffer *leaf)
+int btrfs_check_leaf(struct extent_buffer *leaf)
{
- return check_leaf(leaf, true);
-}
-ALLOW_ERROR_INJECTION(btrfs_check_leaf_full, ERRNO);
+ enum btrfs_tree_block_status ret;
-int btrfs_check_leaf_relaxed(struct extent_buffer *leaf)
-{
- return check_leaf(leaf, false);
+ ret = __btrfs_check_leaf(leaf);
+ if (unlikely(ret != BTRFS_TREE_BLOCK_CLEAN))
+ return -EUCLEAN;
+ return 0;
}
+ALLOW_ERROR_INJECTION(btrfs_check_leaf, ERRNO);
-int btrfs_check_node(struct extent_buffer *node)
+enum btrfs_tree_block_status __btrfs_check_node(struct extent_buffer *node)
{
struct btrfs_fs_info *fs_info = node->fs_info;
unsigned long nr = btrfs_header_nritems(node);
@@ -1761,13 +2139,17 @@ int btrfs_check_node(struct extent_buffer *node)
int slot;
int level = btrfs_header_level(node);
u64 bytenr;
- int ret = 0;
+
+ if (unlikely(!btrfs_header_flag(node, BTRFS_HEADER_FLAG_WRITTEN))) {
+ generic_err(node, 0, "invalid flag for node, WRITTEN not set");
+ return BTRFS_TREE_BLOCK_WRITTEN_NOT_SET;
+ }
if (unlikely(level <= 0 || level >= BTRFS_MAX_LEVEL)) {
generic_err(node, 0,
"invalid level for node, have %d expect [1, %d]",
level, BTRFS_MAX_LEVEL - 1);
- return -EUCLEAN;
+ return BTRFS_TREE_BLOCK_INVALID_LEVEL;
}
if (unlikely(nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info))) {
btrfs_crit(fs_info,
@@ -1775,7 +2157,7 @@ int btrfs_check_node(struct extent_buffer *node)
btrfs_header_owner(node), node->start,
nr == 0 ? "small" : "large", nr,
BTRFS_NODEPTRS_PER_BLOCK(fs_info));
- return -EUCLEAN;
+ return BTRFS_TREE_BLOCK_INVALID_NRITEMS;
}
for (slot = 0; slot < nr - 1; slot++) {
@@ -1786,28 +2168,144 @@ int btrfs_check_node(struct extent_buffer *node)
if (unlikely(!bytenr)) {
generic_err(node, slot,
"invalid NULL node pointer");
- ret = -EUCLEAN;
- goto out;
+ return BTRFS_TREE_BLOCK_INVALID_BLOCKPTR;
}
if (unlikely(!IS_ALIGNED(bytenr, fs_info->sectorsize))) {
generic_err(node, slot,
"unaligned pointer, have %llu should be aligned to %u",
bytenr, fs_info->sectorsize);
- ret = -EUCLEAN;
- goto out;
+ return BTRFS_TREE_BLOCK_INVALID_BLOCKPTR;
}
if (unlikely(btrfs_comp_cpu_keys(&key, &next_key) >= 0)) {
generic_err(node, slot,
- "bad key order, current (%llu %u %llu) next (%llu %u %llu)",
- key.objectid, key.type, key.offset,
- next_key.objectid, next_key.type,
- next_key.offset);
- ret = -EUCLEAN;
- goto out;
+ "bad key order, current " BTRFS_KEY_FMT " next " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(&key),
+ BTRFS_KEY_FMT_VALUE(&next_key));
+ return BTRFS_TREE_BLOCK_BAD_KEY_ORDER;
}
}
-out:
- return ret;
+ return BTRFS_TREE_BLOCK_CLEAN;
+}
+
+int btrfs_check_node(struct extent_buffer *node)
+{
+ enum btrfs_tree_block_status ret;
+
+ ret = __btrfs_check_node(node);
+ if (unlikely(ret != BTRFS_TREE_BLOCK_CLEAN))
+ return -EUCLEAN;
+ return 0;
}
ALLOW_ERROR_INJECTION(btrfs_check_node, ERRNO);
+
+int btrfs_check_eb_owner(const struct extent_buffer *eb, u64 root_owner)
+{
+ const bool is_subvol = btrfs_is_fstree(root_owner);
+ const u64 eb_owner = btrfs_header_owner(eb);
+
+ /*
+ * Skip dummy fs, as selftests don't create unique ebs for each dummy
+ * root.
+ */
+ if (btrfs_is_testing(eb->fs_info))
+ return 0;
+ /*
+ * There are several call sites (backref walking, qgroup, and data
+ * reloc) passing 0 as @root_owner, as they are not holding the
+ * tree root. In that case, we can not do a reliable ownership check,
+ * so just exit.
+ */
+ if (root_owner == 0)
+ return 0;
+ /*
+ * These trees use key.offset as their owner, our callers don't have
+ * the extra capacity to pass key.offset here. So we just skip them.
+ */
+ if (root_owner == BTRFS_TREE_LOG_OBJECTID ||
+ root_owner == BTRFS_TREE_RELOC_OBJECTID)
+ return 0;
+
+ if (!is_subvol) {
+ /* For non-subvolume trees, the eb owner should match root owner */
+ if (unlikely(root_owner != eb_owner)) {
+ btrfs_crit(eb->fs_info,
+"corrupted %s, root=%llu block=%llu owner mismatch, have %llu expect %llu",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ root_owner, btrfs_header_bytenr(eb), eb_owner,
+ root_owner);
+ return -EUCLEAN;
+ }
+ return 0;
+ }
+
+ /*
+ * For subvolume trees, owners can mismatch, but they should all belong
+ * to subvolume trees.
+ */
+ if (unlikely(is_subvol != btrfs_is_fstree(eb_owner))) {
+ btrfs_crit(eb->fs_info,
+"corrupted %s, root=%llu block=%llu owner mismatch, have %llu expect [%llu, %llu]",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ root_owner, btrfs_header_bytenr(eb), eb_owner,
+ BTRFS_FIRST_FREE_OBJECTID, BTRFS_LAST_FREE_OBJECTID);
+ return -EUCLEAN;
+ }
+ return 0;
+}
+
+int btrfs_verify_level_key(struct extent_buffer *eb,
+ const struct btrfs_tree_parent_check *check)
+{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+ int found_level;
+ struct btrfs_key found_key;
+ int ret;
+
+ found_level = btrfs_header_level(eb);
+ if (unlikely(found_level != check->level)) {
+ DEBUG_WARN();
+ btrfs_err(fs_info,
+"tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
+ eb->start, check->level, found_level);
+ return -EUCLEAN;
+ }
+
+ if (!check->has_first_key)
+ return 0;
+
+ /*
+ * For live tree block (new tree blocks in current transaction),
+ * we need proper lock context to avoid race, which is impossible here.
+ * So we only checks tree blocks which is read from disk, whose
+ * generation <= fs_info->last_trans_committed.
+ */
+ if (btrfs_header_generation(eb) > btrfs_get_last_trans_committed(fs_info))
+ return 0;
+
+ /* We have @first_key, so this @eb must have at least one item */
+ if (unlikely(btrfs_header_nritems(eb) == 0)) {
+ btrfs_err(fs_info,
+ "invalid tree nritems, bytenr=%llu nritems=0 expect >0",
+ eb->start);
+ DEBUG_WARN();
+ return -EUCLEAN;
+ }
+
+ if (found_level)
+ btrfs_node_key_to_cpu(eb, &found_key, 0);
+ else
+ btrfs_item_key_to_cpu(eb, &found_key, 0);
+
+ ret = btrfs_comp_cpu_keys(&check->first_key, &found_key);
+ if (unlikely(ret)) {
+ DEBUG_WARN();
+ btrfs_err(fs_info,
+"tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
+ eb->start, check->transid, check->first_key.objectid,
+ check->first_key.type, check->first_key.offset,
+ found_key.objectid, found_key.type,
+ found_key.offset);
+ }
+ return ret;
+}
diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h
index 32fecc9dc1dd..eb201f4ec3c7 100644
--- a/fs/btrfs/tree-checker.h
+++ b/fs/btrfs/tree-checker.h
@@ -6,24 +6,73 @@
#ifndef BTRFS_TREE_CHECKER_H
#define BTRFS_TREE_CHECKER_H
-#include "ctree.h"
-#include "extent_io.h"
+#include <linux/types.h>
+#include <uapi/linux/btrfs_tree.h>
-/*
- * Comprehensive leaf checker.
- * Will check not only the item pointers, but also every possible member
- * in item data.
- */
-int btrfs_check_leaf_full(struct extent_buffer *leaf);
+struct extent_buffer;
+struct btrfs_fs_info;
+struct btrfs_chunk;
+struct btrfs_key;
+
+/* All the extra info needed to verify the parentness of a tree block. */
+struct btrfs_tree_parent_check {
+ /*
+ * The owner check against the tree block.
+ *
+ * Can be 0 to skip the owner check.
+ */
+ u64 owner_root;
+
+ /*
+ * Expected transid, can be 0 to skip the check, but such skip
+ * should only be utilized for backref walk related code.
+ */
+ u64 transid;
+
+ /*
+ * The expected first key.
+ *
+ * This check can be skipped if @has_first_key is false, such skip
+ * can happen for case where we don't have the parent node key,
+ * e.g. reading the tree root, doing backref walk.
+ */
+ struct btrfs_key first_key;
+ bool has_first_key;
+
+ /* The expected level. Should always be set. */
+ u8 level;
+};
+
+enum btrfs_tree_block_status {
+ BTRFS_TREE_BLOCK_CLEAN,
+ BTRFS_TREE_BLOCK_INVALID_NRITEMS,
+ BTRFS_TREE_BLOCK_INVALID_PARENT_KEY,
+ BTRFS_TREE_BLOCK_BAD_KEY_ORDER,
+ BTRFS_TREE_BLOCK_INVALID_LEVEL,
+ BTRFS_TREE_BLOCK_INVALID_FREE_SPACE,
+ BTRFS_TREE_BLOCK_INVALID_OFFSETS,
+ BTRFS_TREE_BLOCK_INVALID_BLOCKPTR,
+ BTRFS_TREE_BLOCK_INVALID_ITEM,
+ BTRFS_TREE_BLOCK_INVALID_OWNER,
+ BTRFS_TREE_BLOCK_WRITTEN_NOT_SET,
+};
/*
- * Less strict leaf checker.
- * Will only check item pointers, not reading item data.
+ * Exported simply for btrfs-progs which wants to have the
+ * btrfs_tree_block_status return codes.
*/
-int btrfs_check_leaf_relaxed(struct extent_buffer *leaf);
+enum btrfs_tree_block_status __btrfs_check_leaf(struct extent_buffer *leaf);
+enum btrfs_tree_block_status __btrfs_check_node(struct extent_buffer *node);
+
+int btrfs_check_leaf(struct extent_buffer *leaf);
int btrfs_check_node(struct extent_buffer *node);
-int btrfs_check_chunk_valid(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk, u64 logical);
+int btrfs_check_chunk_valid(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *leaf,
+ const struct btrfs_chunk *chunk, u64 logical,
+ u32 sectorsize);
+int btrfs_check_eb_owner(const struct extent_buffer *eb, u64 root_owner);
+int btrfs_verify_level_key(struct extent_buffer *eb,
+ const struct btrfs_tree_parent_check *check);
#endif
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
deleted file mode 100644
index 7c45d960b53c..000000000000
--- a/fs/btrfs/tree-defrag.c
+++ /dev/null
@@ -1,140 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2007 Oracle. All rights reserved.
- */
-
-#include <linux/sched.h>
-#include "ctree.h"
-#include "disk-io.h"
-#include "print-tree.h"
-#include "transaction.h"
-#include "locking.h"
-
-/*
- * Defrag all the leaves in a given btree.
- * Read all the leaves and try to get key order to
- * better reflect disk order
- */
-
-int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
-{
- struct btrfs_path *path = NULL;
- struct btrfs_key key;
- int ret = 0;
- int wret;
- int level;
- int next_key_ret = 0;
- u64 last_ret = 0;
-
- if (root->fs_info->extent_root == root) {
- /*
- * there's recursion here right now in the tree locking,
- * we can't defrag the extent root without deadlock
- */
- goto out;
- }
-
- if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
- goto out;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- level = btrfs_header_level(root->node);
-
- if (level == 0)
- goto out;
-
- if (root->defrag_progress.objectid == 0) {
- struct extent_buffer *root_node;
- u32 nritems;
-
- root_node = btrfs_lock_root_node(root);
- nritems = btrfs_header_nritems(root_node);
- root->defrag_max.objectid = 0;
- /* from above we know this is not a leaf */
- btrfs_node_key_to_cpu(root_node, &root->defrag_max,
- nritems - 1);
- btrfs_tree_unlock(root_node);
- free_extent_buffer(root_node);
- memset(&key, 0, sizeof(key));
- } else {
- memcpy(&key, &root->defrag_progress, sizeof(key));
- }
-
- path->keep_locks = 1;
-
- ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION);
- if (ret < 0)
- goto out;
- if (ret > 0) {
- ret = 0;
- goto out;
- }
- btrfs_release_path(path);
- /*
- * We don't need a lock on a leaf. btrfs_realloc_node() will lock all
- * leafs from path->nodes[1], so set lowest_level to 1 to avoid later
- * a deadlock (attempting to write lock an already write locked leaf).
- */
- path->lowest_level = 1;
- wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
-
- if (wret < 0) {
- ret = wret;
- goto out;
- }
- if (!path->nodes[1]) {
- ret = 0;
- goto out;
- }
- /*
- * The node at level 1 must always be locked when our path has
- * keep_locks set and lowest_level is 1, regardless of the value of
- * path->slots[1].
- */
- BUG_ON(path->locks[1] == 0);
- ret = btrfs_realloc_node(trans, root,
- path->nodes[1], 0,
- &last_ret,
- &root->defrag_progress);
- if (ret) {
- WARN_ON(ret == -EAGAIN);
- goto out;
- }
- /*
- * Now that we reallocated the node we can find the next key. Note that
- * btrfs_find_next_key() can release our path and do another search
- * without COWing, this is because even with path->keep_locks = 1,
- * btrfs_search_slot() / ctree.c:unlock_up() does not keeps a lock on a
- * node when path->slots[node_level - 1] does not point to the last
- * item or a slot beyond the last item (ctree.c:unlock_up()). Therefore
- * we search for the next key after reallocating our node.
- */
- path->slots[1] = btrfs_header_nritems(path->nodes[1]);
- next_key_ret = btrfs_find_next_key(root, path, &key, 1,
- BTRFS_OLDEST_GENERATION);
- if (next_key_ret == 0) {
- memcpy(&root->defrag_progress, &key, sizeof(key));
- ret = -EAGAIN;
- }
-out:
- btrfs_free_path(path);
- if (ret == -EAGAIN) {
- if (root->defrag_max.objectid > root->defrag_progress.objectid)
- goto done;
- if (root->defrag_max.type > root->defrag_progress.type)
- goto done;
- if (root->defrag_max.offset > root->defrag_progress.offset)
- goto done;
- ret = 0;
- }
-done:
- if (ret != -EAGAIN)
- memset(&root->defrag_progress, 0,
- sizeof(root->defrag_progress));
-
- return ret;
-}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 8ab33caf016f..fff37c8d96a4 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -13,13 +13,25 @@
#include "tree-log.h"
#include "disk-io.h"
#include "locking.h"
-#include "print-tree.h"
#include "backref.h"
#include "compression.h"
#include "qgroup.h"
#include "block-group.h"
#include "space-info.h"
-#include "zoned.h"
+#include "inode-item.h"
+#include "fs.h"
+#include "accessors.h"
+#include "extent-tree.h"
+#include "root-tree.h"
+#include "dir-item.h"
+#include "file-item.h"
+#include "file.h"
+#include "orphan.h"
+#include "print-tree.h"
+#include "tree-checker.h"
+#include "delayed-inode.h"
+
+#define MAX_CONFLICT_INODES 10
/* magic values for the inode_only field in btrfs_log_inode:
*
@@ -30,8 +42,6 @@
enum {
LOG_INODE_ALL,
LOG_INODE_EXISTS,
- LOG_OTHER_INODE,
- LOG_OTHER_INODE_ALL,
};
/*
@@ -93,18 +103,135 @@ enum {
LOG_WALK_REPLAY_ALL,
};
+/*
+ * The walk control struct is used to pass state down the chain when processing
+ * the log tree. The stage field tells us which part of the log tree processing
+ * we are currently doing.
+ */
+struct walk_control {
+ /*
+ * Signal that we are freeing the metadata extents of a log tree.
+ * This is used at transaction commit time while freeing a log tree.
+ */
+ bool free;
+
+ /*
+ * Signal that we are pinning the metadata extents of a log tree and the
+ * data extents its leaves point to (if using mixed block groups).
+ * This happens in the first stage of log replay to ensure that during
+ * replay, while we are modifying subvolume trees, we don't overwrite
+ * the metadata extents of log trees.
+ */
+ bool pin;
+
+ /* What stage of the replay code we're currently in. */
+ int stage;
+
+ /*
+ * Ignore any items from the inode currently being processed. Needs
+ * to be set every time we find a BTRFS_INODE_ITEM_KEY.
+ */
+ bool ignore_cur_inode;
+
+ /*
+ * The root we are currently replaying to. This is NULL for the replay
+ * stage LOG_WALK_PIN_ONLY.
+ */
+ struct btrfs_root *root;
+
+ /* The log tree we are currently processing (not NULL for any stage). */
+ struct btrfs_root *log;
+
+ /* The transaction handle used for replaying all log trees. */
+ struct btrfs_trans_handle *trans;
+
+ /*
+ * The function that gets used to process blocks we find in the tree.
+ * Note the extent_buffer might not be up to date when it is passed in,
+ * and it must be checked or read if you need the data inside it.
+ */
+ int (*process_func)(struct extent_buffer *eb,
+ struct walk_control *wc, u64 gen, int level);
+
+ /*
+ * The following are used only when stage is >= LOG_WALK_REPLAY_INODES
+ * and by the replay_one_buffer() callback.
+ */
+
+ /* The current log leaf being processed. */
+ struct extent_buffer *log_leaf;
+ /* The key being processed of the current log leaf. */
+ struct btrfs_key log_key;
+ /* The slot being processed of the current log leaf. */
+ int log_slot;
+
+ /* A path used for searches and modifications to subvolume trees. */
+ struct btrfs_path *subvol_path;
+};
+
+static void do_abort_log_replay(struct walk_control *wc, const char *function,
+ unsigned int line, int error, const char *fmt, ...)
+{
+ struct btrfs_fs_info *fs_info = wc->trans->fs_info;
+ struct va_format vaf;
+ va_list args;
+
+ /*
+ * Do nothing if we already aborted, to avoid dumping leaves again which
+ * can be verbose. Further more, only the first call is useful since it
+ * is where we have a problem. Note that we do not use the flag
+ * BTRFS_FS_STATE_TRANS_ABORTED because log replay calls functions that
+ * are outside of tree-log.c that can abort transactions (such as
+ * btrfs_add_link() for example), so if that happens we still want to
+ * dump all log replay specific information below.
+ */
+ if (test_and_set_bit(BTRFS_FS_STATE_LOG_REPLAY_ABORTED, &fs_info->fs_state))
+ return;
+
+ btrfs_abort_transaction(wc->trans, error);
+
+ if (wc->subvol_path->nodes[0]) {
+ btrfs_crit(fs_info,
+ "subvolume (root %llu) leaf currently being processed:",
+ btrfs_root_id(wc->root));
+ btrfs_print_leaf(wc->subvol_path->nodes[0]);
+ }
+
+ if (wc->log_leaf) {
+ btrfs_crit(fs_info,
+"log tree (for root %llu) leaf currently being processed (slot %d key " BTRFS_KEY_FMT "):",
+ btrfs_root_id(wc->root), wc->log_slot,
+ BTRFS_KEY_FMT_VALUE(&wc->log_key));
+ btrfs_print_leaf(wc->log_leaf);
+ }
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(fs_info,
+ "log replay failed in %s:%u for root %llu, stage %d, with error %d: %pV",
+ function, line, btrfs_root_id(wc->root), wc->stage, error, &vaf);
+
+ va_end(args);
+}
+
+/*
+ * Use this for aborting a transaction during log replay while we are down the
+ * call chain of replay_one_buffer(), so that we get a lot more useful
+ * information for debugging issues when compared to a plain call to
+ * btrfs_abort_transaction().
+ */
+#define btrfs_abort_log_replay(wc, error, fmt, args...) \
+ do_abort_log_replay((wc), __func__, __LINE__, (error), fmt, ##args)
+
static int btrfs_log_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
int inode_only,
struct btrfs_log_ctx *ctx);
-static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, u64 objectid);
-static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_root *log,
- struct btrfs_path *path,
- u64 dirid, int del_all);
+static int link_to_fixup_dir(struct walk_control *wc, u64 objectid);
+static noinline int replay_dir_deletes(struct walk_control *wc,
+ u64 dirid, bool del_all);
static void wait_log_commit(struct btrfs_root *root, int transid);
/*
@@ -130,6 +257,28 @@ static void wait_log_commit(struct btrfs_root *root, int transid);
* and once to do all the other items.
*/
+static struct btrfs_inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
+{
+ unsigned int nofs_flag;
+ struct btrfs_inode *inode;
+
+ /* Only meant to be called for subvolume roots and not for log roots. */
+ ASSERT(btrfs_is_fstree(btrfs_root_id(root)), "root_id=%llu", btrfs_root_id(root));
+
+ /*
+ * We're holding a transaction handle whether we are logging or
+ * replaying a log tree, so we must make sure NOFS semantics apply
+ * because btrfs_alloc_inode() may be triggered and it uses GFP_KERNEL
+ * to allocate an inode, which can recurse back into the filesystem and
+ * attempt a transaction commit, resulting in a deadlock.
+ */
+ nofs_flag = memalloc_nofs_save();
+ inode = btrfs_iget(objectid, root);
+ memalloc_nofs_restore(nofs_flag);
+
+ return inode;
+}
+
/*
* start a sub transaction and setup the log tree
* this increments the log tree writer count to make the people
@@ -170,7 +319,7 @@ again:
int index = (root->log_transid + 1) % 2;
if (btrfs_need_log_full_commit(trans)) {
- ret = -EAGAIN;
+ ret = BTRFS_LOG_FORCE_COMMIT;
goto out;
}
@@ -193,7 +342,7 @@ again:
* writing.
*/
if (zoned && !created) {
- ret = -EAGAIN;
+ ret = BTRFS_LOG_FORCE_COMMIT;
goto out;
}
@@ -269,77 +418,14 @@ void btrfs_end_log_trans(struct btrfs_root *root)
}
}
-static int btrfs_write_tree_block(struct extent_buffer *buf)
-{
- return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
- buf->start + buf->len - 1);
-}
-
-static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
-{
- filemap_fdatawait_range(buf->pages[0]->mapping,
- buf->start, buf->start + buf->len - 1);
-}
-
-/*
- * the walk control struct is used to pass state down the chain when
- * processing the log tree. The stage field tells us which part
- * of the log tree processing we are currently doing. The others
- * are state fields used for that specific part
- */
-struct walk_control {
- /* should we free the extent on disk when done? This is used
- * at transaction commit time while freeing a log tree
- */
- int free;
-
- /* should we write out the extent buffer? This is used
- * while flushing the log tree to disk during a sync
- */
- int write;
-
- /* should we wait for the extent buffer io to finish? Also used
- * while flushing the log tree to disk for a sync
- */
- int wait;
-
- /* pin only walk, we record which extents on disk belong to the
- * log trees
- */
- int pin;
-
- /* what stage of the replay code we're currently in */
- int stage;
-
- /*
- * Ignore any items from the inode currently being processed. Needs
- * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
- * the LOG_WALK_REPLAY_INODES stage.
- */
- bool ignore_cur_inode;
-
- /* the root we are currently replaying */
- struct btrfs_root *replay_dest;
-
- /* the trans handle for the current replay */
- struct btrfs_trans_handle *trans;
-
- /* the function that gets used to process blocks we find in the
- * tree. Note the extent_buffer might not be up to date when it is
- * passed in, and it must be checked or read if you need the data
- * inside it
- */
- int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
- struct walk_control *wc, u64 gen, int level);
-};
-
/*
* process_func used to pin down extents, write them or wait on them
*/
-static int process_one_buffer(struct btrfs_root *log,
- struct extent_buffer *eb,
+static int process_one_buffer(struct extent_buffer *eb,
struct walk_control *wc, u64 gen, int level)
{
+ struct btrfs_root *log = wc->log;
+ struct btrfs_trans_handle *trans = wc->trans;
struct btrfs_fs_info *fs_info = log->fs_info;
int ret = 0;
@@ -348,93 +434,114 @@ static int process_one_buffer(struct btrfs_root *log,
* pin down any logged extents, so we have to read the block.
*/
if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
- ret = btrfs_read_buffer(eb, gen, level, NULL);
- if (ret)
+ struct btrfs_tree_parent_check check = {
+ .level = level,
+ .transid = gen
+ };
+
+ ret = btrfs_read_extent_buffer(eb, &check);
+ if (unlikely(ret)) {
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(fs_info, ret, NULL);
return ret;
+ }
}
- if (wc->pin)
- ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start,
- eb->len);
+ if (wc->pin) {
+ ASSERT(trans != NULL);
+ ret = btrfs_pin_extent_for_log_replay(trans, eb);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
- if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
- if (wc->pin && btrfs_header_level(eb) == 0)
+ if (btrfs_buffer_uptodate(eb, gen, false) && level == 0) {
ret = btrfs_exclude_logged_extents(eb);
- if (wc->write)
- btrfs_write_tree_block(eb);
- if (wc->wait)
- btrfs_wait_tree_block_writeback(eb);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ }
}
return ret;
}
-static int do_overwrite_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct extent_buffer *eb, int slot,
- struct btrfs_key *key)
+/*
+ * Item overwrite used by log replay. The given log tree leaf, slot and key
+ * from the walk_control structure all refer to the source data we are copying
+ * out.
+ *
+ * The given root is for the tree we are copying into, and path is a scratch
+ * path for use in this function (it should be released on entry and will be
+ * released on exit).
+ *
+ * If the key is already in the destination tree the existing item is
+ * overwritten. If the existing item isn't big enough, it is extended.
+ * If it is too large, it is truncated.
+ *
+ * If the key isn't in the destination yet, a new item is inserted.
+ */
+static int overwrite_item(struct walk_control *wc)
{
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
int ret;
u32 item_size;
u64 saved_i_size = 0;
int save_old_i_size = 0;
unsigned long src_ptr;
unsigned long dst_ptr;
- int overwrite_root = 0;
- bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
-
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
- overwrite_root = 1;
-
- item_size = btrfs_item_size_nr(eb, slot);
- src_ptr = btrfs_item_ptr_offset(eb, slot);
-
- /* Our caller must have done a search for the key for us. */
- ASSERT(path->nodes[0] != NULL);
+ struct extent_buffer *dst_eb;
+ int dst_slot;
+ const bool is_inode_item = (wc->log_key.type == BTRFS_INODE_ITEM_KEY);
/*
- * And the slot must point to the exact key or the slot where the key
- * should be at (the first item with a key greater than 'key')
+ * This is only used during log replay, so the root is always from a
+ * fs/subvolume tree. In case we ever need to support a log root, then
+ * we'll have to clone the leaf in the path, release the path and use
+ * the leaf before writing into the log tree. See the comments at
+ * copy_items() for more details.
*/
- if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
- struct btrfs_key found_key;
+ ASSERT(btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID, "root_id=%llu", btrfs_root_id(root));
- btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
- ret = btrfs_comp_cpu_keys(&found_key, key);
- ASSERT(ret >= 0);
- } else {
- ret = 1;
+ item_size = btrfs_item_size(wc->log_leaf, wc->log_slot);
+ src_ptr = btrfs_item_ptr_offset(wc->log_leaf, wc->log_slot);
+
+ /* Look for the key in the destination tree. */
+ ret = btrfs_search_slot(NULL, root, &wc->log_key, wc->subvol_path, 0, 0);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to search subvolume tree for key " BTRFS_KEY_FMT " root %llu",
+ BTRFS_KEY_FMT_VALUE(&wc->log_key),
+ btrfs_root_id(root));
+ return ret;
}
+ dst_eb = wc->subvol_path->nodes[0];
+ dst_slot = wc->subvol_path->slots[0];
+
if (ret == 0) {
char *src_copy;
- char *dst_copy;
- u32 dst_size = btrfs_item_size_nr(path->nodes[0],
- path->slots[0]);
+ const u32 dst_size = btrfs_item_size(dst_eb, dst_slot);
+
if (dst_size != item_size)
goto insert;
if (item_size == 0) {
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return 0;
}
- dst_copy = kmalloc(item_size, GFP_NOFS);
src_copy = kmalloc(item_size, GFP_NOFS);
- if (!dst_copy || !src_copy) {
- btrfs_release_path(path);
- kfree(dst_copy);
- kfree(src_copy);
+ if (!src_copy) {
+ btrfs_abort_log_replay(wc, -ENOMEM,
+ "failed to allocate memory for log leaf item");
return -ENOMEM;
}
- read_extent_buffer(eb, src_copy, src_ptr, item_size);
-
- dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
- read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
- item_size);
- ret = memcmp(dst_copy, src_copy, item_size);
+ read_extent_buffer(wc->log_leaf, src_copy, src_ptr, item_size);
+ dst_ptr = btrfs_item_ptr_offset(dst_eb, dst_slot);
+ ret = memcmp_extent_buffer(dst_eb, src_copy, dst_ptr, item_size);
- kfree(dst_copy);
kfree(src_copy);
/*
* they have the same contents, just return, this saves
@@ -443,7 +550,7 @@ static int do_overwrite_item(struct btrfs_trans_handle *trans,
* sync
*/
if (ret == 0) {
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return 0;
}
@@ -451,28 +558,28 @@ static int do_overwrite_item(struct btrfs_trans_handle *trans,
* We need to load the old nbytes into the inode so when we
* replay the extents we've logged we get the right nbytes.
*/
- if (inode_item) {
+ if (is_inode_item) {
struct btrfs_inode_item *item;
u64 nbytes;
u32 mode;
- item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ item = btrfs_item_ptr(dst_eb, dst_slot,
struct btrfs_inode_item);
- nbytes = btrfs_inode_nbytes(path->nodes[0], item);
- item = btrfs_item_ptr(eb, slot,
+ nbytes = btrfs_inode_nbytes(dst_eb, item);
+ item = btrfs_item_ptr(wc->log_leaf, wc->log_slot,
struct btrfs_inode_item);
- btrfs_set_inode_nbytes(eb, item, nbytes);
+ btrfs_set_inode_nbytes(wc->log_leaf, item, nbytes);
/*
* If this is a directory we need to reset the i_size to
* 0 so that we can set it up properly when replaying
* the rest of the items in this log.
*/
- mode = btrfs_inode_mode(eb, item);
+ mode = btrfs_inode_mode(wc->log_leaf, item);
if (S_ISDIR(mode))
- btrfs_set_inode_size(eb, item, 0);
+ btrfs_set_inode_size(wc->log_leaf, item, 0);
}
- } else if (inode_item) {
+ } else if (is_inode_item) {
struct btrfs_inode_item *item;
u32 mode;
@@ -480,40 +587,43 @@ static int do_overwrite_item(struct btrfs_trans_handle *trans,
* New inode, set nbytes to 0 so that the nbytes comes out
* properly when we replay the extents.
*/
- item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
- btrfs_set_inode_nbytes(eb, item, 0);
+ item = btrfs_item_ptr(wc->log_leaf, wc->log_slot, struct btrfs_inode_item);
+ btrfs_set_inode_nbytes(wc->log_leaf, item, 0);
/*
* If this is a directory we need to reset the i_size to 0 so
* that we can set it up properly when replaying the rest of
* the items in this log.
*/
- mode = btrfs_inode_mode(eb, item);
+ mode = btrfs_inode_mode(wc->log_leaf, item);
if (S_ISDIR(mode))
- btrfs_set_inode_size(eb, item, 0);
+ btrfs_set_inode_size(wc->log_leaf, item, 0);
}
insert:
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
/* try to insert the key into the destination tree */
- path->skip_release_on_error = 1;
- ret = btrfs_insert_empty_item(trans, root, path,
- key, item_size);
- path->skip_release_on_error = 0;
+ wc->subvol_path->skip_release_on_error = true;
+ ret = btrfs_insert_empty_item(trans, root, wc->subvol_path, &wc->log_key, item_size);
+ wc->subvol_path->skip_release_on_error = false;
+
+ dst_eb = wc->subvol_path->nodes[0];
+ dst_slot = wc->subvol_path->slots[0];
/* make sure any existing item is the correct size */
if (ret == -EEXIST || ret == -EOVERFLOW) {
- u32 found_size;
- found_size = btrfs_item_size_nr(path->nodes[0],
- path->slots[0]);
+ const u32 found_size = btrfs_item_size(dst_eb, dst_slot);
+
if (found_size > item_size)
- btrfs_truncate_item(path, item_size, 1);
+ btrfs_truncate_item(trans, wc->subvol_path, item_size, 1);
else if (found_size < item_size)
- btrfs_extend_item(path, item_size - found_size);
+ btrfs_extend_item(trans, wc->subvol_path, item_size - found_size);
} else if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to insert item for key " BTRFS_KEY_FMT,
+ BTRFS_KEY_FMT_VALUE(&wc->log_key));
return ret;
}
- dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
- path->slots[0]);
+ dst_ptr = btrfs_item_ptr_offset(dst_eb, dst_slot);
/* don't overwrite an existing inode if the generation number
* was logged as zero. This is done when the tree logging code
@@ -524,16 +634,15 @@ insert:
* state of the tree found in the subvolume, and i_size is modified
* as it goes
*/
- if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
+ if (is_inode_item && ret == -EEXIST) {
struct btrfs_inode_item *src_item;
struct btrfs_inode_item *dst_item;
src_item = (struct btrfs_inode_item *)src_ptr;
dst_item = (struct btrfs_inode_item *)dst_ptr;
- if (btrfs_inode_generation(eb, src_item) == 0) {
- struct extent_buffer *dst_eb = path->nodes[0];
- const u64 ino_size = btrfs_inode_size(eb, src_item);
+ if (btrfs_inode_generation(wc->log_leaf, src_item) == 0) {
+ const u64 ino_size = btrfs_inode_size(wc->log_leaf, src_item);
/*
* For regular files an ino_size == 0 is used only when
@@ -542,89 +651,55 @@ insert:
* case don't set the size of the inode in the fs/subvol
* tree, otherwise we would be throwing valid data away.
*/
- if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
+ if (S_ISREG(btrfs_inode_mode(wc->log_leaf, src_item)) &&
S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
ino_size != 0)
btrfs_set_inode_size(dst_eb, dst_item, ino_size);
goto no_copy;
}
- if (overwrite_root &&
- S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
- S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
+ if (S_ISDIR(btrfs_inode_mode(wc->log_leaf, src_item)) &&
+ S_ISDIR(btrfs_inode_mode(dst_eb, dst_item))) {
save_old_i_size = 1;
- saved_i_size = btrfs_inode_size(path->nodes[0],
- dst_item);
+ saved_i_size = btrfs_inode_size(dst_eb, dst_item);
}
}
- copy_extent_buffer(path->nodes[0], eb, dst_ptr,
- src_ptr, item_size);
+ copy_extent_buffer(dst_eb, wc->log_leaf, dst_ptr, src_ptr, item_size);
if (save_old_i_size) {
struct btrfs_inode_item *dst_item;
+
dst_item = (struct btrfs_inode_item *)dst_ptr;
- btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
+ btrfs_set_inode_size(dst_eb, dst_item, saved_i_size);
}
/* make sure the generation is filled in */
- if (key->type == BTRFS_INODE_ITEM_KEY) {
+ if (is_inode_item) {
struct btrfs_inode_item *dst_item;
+
dst_item = (struct btrfs_inode_item *)dst_ptr;
- if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
- btrfs_set_inode_generation(path->nodes[0], dst_item,
- trans->transid);
- }
+ if (btrfs_inode_generation(dst_eb, dst_item) == 0)
+ btrfs_set_inode_generation(dst_eb, dst_item, trans->transid);
}
no_copy:
- btrfs_mark_buffer_dirty(path->nodes[0]);
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return 0;
}
-/*
- * Item overwrite used by replay and tree logging. eb, slot and key all refer
- * to the src data we are copying out.
- *
- * root is the tree we are copying into, and path is a scratch
- * path for use in this function (it should be released on entry and
- * will be released on exit).
- *
- * If the key is already in the destination tree the existing item is
- * overwritten. If the existing item isn't big enough, it is extended.
- * If it is too large, it is truncated.
- *
- * If the key isn't in the destination yet, a new item is inserted.
- */
-static int overwrite_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct extent_buffer *eb, int slot,
- struct btrfs_key *key)
+static int read_alloc_one_name(struct extent_buffer *eb, void *start, int len,
+ struct fscrypt_str *name)
{
- int ret;
+ char *buf;
- /* Look for the key in the destination tree. */
- ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
- if (ret < 0)
- return ret;
-
- return do_overwrite_item(trans, root, path, eb, slot, key);
-}
-
-/*
- * simple helper to read an inode off the disk from a given root
- * This can only be called for subvolume roots and not for the log
- */
-static noinline struct inode *read_one_inode(struct btrfs_root *root,
- u64 objectid)
-{
- struct inode *inode;
+ buf = kmalloc(len, GFP_NOFS);
+ if (!buf)
+ return -ENOMEM;
- inode = btrfs_iget(root->fs_info->sb, objectid, root);
- if (IS_ERR(inode))
- inode = NULL;
- return inode;
+ read_extent_buffer(eb, buf, (unsigned long)start, len);
+ name->name = buf;
+ name->len = len;
+ return 0;
}
/* replays a single extent in 'eb' at 'slot' with 'key' into the
@@ -639,51 +714,53 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
* The extent is inserted into the file, dropping any existing extents
* from the file that overlap the new one.
*/
-static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct extent_buffer *eb, int slot,
- struct btrfs_key *key)
+static noinline int replay_one_extent(struct walk_control *wc)
{
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
struct btrfs_drop_extents_args drop_args = { 0 };
struct btrfs_fs_info *fs_info = root->fs_info;
int found_type;
u64 extent_end;
- u64 start = key->offset;
+ const u64 start = wc->log_key.offset;
u64 nbytes = 0;
+ u64 csum_start;
+ u64 csum_end;
+ LIST_HEAD(ordered_sums);
+ u64 offset;
+ unsigned long dest_offset;
+ struct btrfs_key ins;
struct btrfs_file_extent_item *item;
- struct inode *inode = NULL;
- unsigned long size;
+ struct btrfs_inode *inode = NULL;
int ret = 0;
- item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
- found_type = btrfs_file_extent_type(eb, item);
+ item = btrfs_item_ptr(wc->log_leaf, wc->log_slot, struct btrfs_file_extent_item);
+ found_type = btrfs_file_extent_type(wc->log_leaf, item);
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
- nbytes = btrfs_file_extent_num_bytes(eb, item);
- extent_end = start + nbytes;
-
- /*
- * We don't add to the inodes nbytes if we are prealloc or a
- * hole.
- */
- if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
- nbytes = 0;
+ extent_end = start + btrfs_file_extent_num_bytes(wc->log_leaf, item);
+ /* Holes don't take up space. */
+ if (btrfs_file_extent_disk_bytenr(wc->log_leaf, item) != 0)
+ nbytes = btrfs_file_extent_num_bytes(wc->log_leaf, item);
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
- size = btrfs_file_extent_ram_bytes(eb, item);
- nbytes = btrfs_file_extent_ram_bytes(eb, item);
- extent_end = ALIGN(start + size,
- fs_info->sectorsize);
+ nbytes = btrfs_file_extent_ram_bytes(wc->log_leaf, item);
+ extent_end = ALIGN(start + nbytes, fs_info->sectorsize);
} else {
- ret = 0;
- goto out;
- }
-
- inode = read_one_inode(root, key->objectid);
- if (!inode) {
- ret = -EIO;
- goto out;
+ btrfs_abort_log_replay(wc, -EUCLEAN,
+ "unexpected extent type=%d root=%llu inode=%llu offset=%llu",
+ found_type, btrfs_root_id(root),
+ wc->log_key.objectid, wc->log_key.offset);
+ return -EUCLEAN;
+ }
+
+ inode = btrfs_iget_logging(wc->log_key.objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get inode %llu for root %llu",
+ wc->log_key.objectid, btrfs_root_id(root));
+ return ret;
}
/*
@@ -691,224 +768,299 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* file. This must be done before the btrfs_drop_extents run
* so we don't try to drop this extent.
*/
- ret = btrfs_lookup_file_extent(trans, root, path,
- btrfs_ino(BTRFS_I(inode)), start, 0);
+ ret = btrfs_lookup_file_extent(trans, root, wc->subvol_path,
+ btrfs_ino(inode), start, 0);
if (ret == 0 &&
(found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
- struct btrfs_file_extent_item cmp1;
- struct btrfs_file_extent_item cmp2;
- struct btrfs_file_extent_item *existing;
- struct extent_buffer *leaf;
-
- leaf = path->nodes[0];
- existing = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
+ struct extent_buffer *leaf = wc->subvol_path->nodes[0];
+ struct btrfs_file_extent_item existing;
+ unsigned long ptr;
- read_extent_buffer(eb, &cmp1, (unsigned long)item,
- sizeof(cmp1));
- read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
- sizeof(cmp2));
+ ptr = btrfs_item_ptr_offset(leaf, wc->subvol_path->slots[0]);
+ read_extent_buffer(leaf, &existing, ptr, sizeof(existing));
/*
* we already have a pointer to this exact extent,
* we don't have to do anything
*/
- if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
- btrfs_release_path(path);
+ if (memcmp_extent_buffer(wc->log_leaf, &existing, (unsigned long)item,
+ sizeof(existing)) == 0) {
+ btrfs_release_path(wc->subvol_path);
goto out;
}
}
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
/* drop any overlapping extents */
drop_args.start = start;
drop_args.end = extent_end;
drop_args.drop_cache = true;
- ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args);
- if (ret)
+ drop_args.path = wc->subvol_path;
+ ret = btrfs_drop_extents(trans, root, inode, &drop_args);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to drop extents for inode %llu range [%llu, %llu) root %llu",
+ wc->log_key.objectid, start, extent_end,
+ btrfs_root_id(root));
goto out;
+ }
- if (found_type == BTRFS_FILE_EXTENT_REG ||
- found_type == BTRFS_FILE_EXTENT_PREALLOC) {
- u64 offset;
- unsigned long dest_offset;
- struct btrfs_key ins;
-
- if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
- btrfs_fs_incompat(fs_info, NO_HOLES))
- goto update_inode;
-
- ret = btrfs_insert_empty_item(trans, root, path, key,
- sizeof(*item));
+ if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+ /* inline extents are easy, we just overwrite them */
+ ret = overwrite_item(wc);
if (ret)
goto out;
- dest_offset = btrfs_item_ptr_offset(path->nodes[0],
- path->slots[0]);
- copy_extent_buffer(path->nodes[0], eb, dest_offset,
- (unsigned long)item, sizeof(*item));
+ goto update_inode;
+ }
- ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
- ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
- ins.type = BTRFS_EXTENT_ITEM_KEY;
- offset = key->offset - btrfs_file_extent_offset(eb, item);
+ /*
+ * If not an inline extent, it can only be a regular or prealloc one.
+ * We have checked that above and returned -EUCLEAN if not.
+ */
- /*
- * Manually record dirty extent, as here we did a shallow
- * file extent item copy and skip normal backref update,
- * but modifying extent tree all by ourselves.
- * So need to manually record dirty extent for qgroup,
- * as the owner of the file extent changed from log tree
- * (doesn't affect qgroup) to fs/file tree(affects qgroup)
- */
- ret = btrfs_qgroup_trace_extent(trans,
- btrfs_file_extent_disk_bytenr(eb, item),
- btrfs_file_extent_disk_num_bytes(eb, item),
- GFP_NOFS);
- if (ret < 0)
+ /* A hole and NO_HOLES feature enabled, nothing else to do. */
+ if (btrfs_file_extent_disk_bytenr(wc->log_leaf, item) == 0 &&
+ btrfs_fs_incompat(fs_info, NO_HOLES))
+ goto update_inode;
+
+ ret = btrfs_insert_empty_item(trans, root, wc->subvol_path,
+ &wc->log_key, sizeof(*item));
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to insert item with key " BTRFS_KEY_FMT " root %llu",
+ BTRFS_KEY_FMT_VALUE(&wc->log_key),
+ btrfs_root_id(root));
+ goto out;
+ }
+ dest_offset = btrfs_item_ptr_offset(wc->subvol_path->nodes[0],
+ wc->subvol_path->slots[0]);
+ copy_extent_buffer(wc->subvol_path->nodes[0], wc->log_leaf, dest_offset,
+ (unsigned long)item, sizeof(*item));
+
+ /*
+ * We have an explicit hole and NO_HOLES is not enabled. We have added
+ * the hole file extent item to the subvolume tree, so we don't have
+ * anything else to do other than update the file extent item range and
+ * update the inode item.
+ */
+ if (btrfs_file_extent_disk_bytenr(wc->log_leaf, item) == 0) {
+ btrfs_release_path(wc->subvol_path);
+ goto update_inode;
+ }
+
+ ins.objectid = btrfs_file_extent_disk_bytenr(wc->log_leaf, item);
+ ins.type = BTRFS_EXTENT_ITEM_KEY;
+ ins.offset = btrfs_file_extent_disk_num_bytes(wc->log_leaf, item);
+ offset = wc->log_key.offset - btrfs_file_extent_offset(wc->log_leaf, item);
+
+ /*
+ * Manually record dirty extent, as here we did a shallow file extent
+ * item copy and skip normal backref update, but modifying extent tree
+ * all by ourselves. So need to manually record dirty extent for qgroup,
+ * as the owner of the file extent changed from log tree (doesn't affect
+ * qgroup) to fs/file tree (affects qgroup).
+ */
+ ret = btrfs_qgroup_trace_extent(trans, ins.objectid, ins.offset);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to trace extent for bytenr %llu disk_num_bytes %llu inode %llu root %llu",
+ ins.objectid, ins.offset,
+ wc->log_key.objectid, btrfs_root_id(root));
+ goto out;
+ }
+
+ /*
+ * Is this extent already allocated in the extent tree?
+ * If so, just add a reference.
+ */
+ ret = btrfs_lookup_data_extent(fs_info, ins.objectid, ins.offset);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to lookup data extent for bytenr %llu disk_num_bytes %llu inode %llu root %llu",
+ ins.objectid, ins.offset,
+ wc->log_key.objectid, btrfs_root_id(root));
+ goto out;
+ } else if (ret == 0) {
+ struct btrfs_ref ref = {
+ .action = BTRFS_ADD_DELAYED_REF,
+ .bytenr = ins.objectid,
+ .num_bytes = ins.offset,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_root_id(root),
+ };
+
+ btrfs_init_data_ref(&ref, wc->log_key.objectid, offset, 0, false);
+ ret = btrfs_inc_extent_ref(trans, &ref);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to increment data extent for bytenr %llu disk_num_bytes %llu inode %llu root %llu",
+ ins.objectid, ins.offset,
+ wc->log_key.objectid,
+ btrfs_root_id(root));
goto out;
+ }
+ } else {
+ /* Insert the extent pointer in the extent tree. */
+ ret = btrfs_alloc_logged_file_extent(trans, btrfs_root_id(root),
+ wc->log_key.objectid, offset, &ins);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to allocate logged data extent for bytenr %llu disk_num_bytes %llu offset %llu inode %llu root %llu",
+ ins.objectid, ins.offset, offset,
+ wc->log_key.objectid, btrfs_root_id(root));
+ goto out;
+ }
+ }
- if (ins.objectid > 0) {
- struct btrfs_ref ref = { 0 };
- u64 csum_start;
- u64 csum_end;
- LIST_HEAD(ordered_sums);
+ btrfs_release_path(wc->subvol_path);
- /*
- * is this extent already allocated in the extent
- * allocation tree? If so, just add a reference
- */
- ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
- ins.offset);
- if (ret < 0) {
- goto out;
- } else if (ret == 0) {
- btrfs_init_generic_ref(&ref,
- BTRFS_ADD_DELAYED_REF,
- ins.objectid, ins.offset, 0);
- btrfs_init_data_ref(&ref,
- root->root_key.objectid,
- key->objectid, offset, 0, false);
- ret = btrfs_inc_extent_ref(trans, &ref);
- if (ret)
- goto out;
- } else {
- /*
- * insert the extent pointer in the extent
- * allocation tree
- */
- ret = btrfs_alloc_logged_file_extent(trans,
- root->root_key.objectid,
- key->objectid, offset, &ins);
- if (ret)
- goto out;
- }
- btrfs_release_path(path);
+ if (btrfs_file_extent_compression(wc->log_leaf, item)) {
+ csum_start = ins.objectid;
+ csum_end = csum_start + ins.offset;
+ } else {
+ csum_start = ins.objectid + btrfs_file_extent_offset(wc->log_leaf, item);
+ csum_end = csum_start + btrfs_file_extent_num_bytes(wc->log_leaf, item);
+ }
- if (btrfs_file_extent_compression(eb, item)) {
- csum_start = ins.objectid;
- csum_end = csum_start + ins.offset;
- } else {
- csum_start = ins.objectid +
- btrfs_file_extent_offset(eb, item);
- csum_end = csum_start +
- btrfs_file_extent_num_bytes(eb, item);
- }
+ ret = btrfs_lookup_csums_list(root->log_root, csum_start, csum_end - 1,
+ &ordered_sums, false);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookups csums for range [%llu, %llu) inode %llu root %llu",
+ csum_start, csum_end, wc->log_key.objectid,
+ btrfs_root_id(root));
+ goto out;
+ }
+ ret = 0;
+ /*
+ * Now delete all existing cums in the csum root that cover our range.
+ * We do this because we can have an extent that is completely
+ * referenced by one file extent item and partially referenced by
+ * another file extent item (like after using the clone or extent_same
+ * ioctls). In this case if we end up doing the replay of the one that
+ * partially references the extent first, and we do not do the csum
+ * deletion below, we can get 2 csum items in the csum tree that overlap
+ * each other. For example, imagine our log has the two following file
+ * extent items:
+ *
+ * key (257 EXTENT_DATA 409600)
+ * extent data disk byte 12845056 nr 102400
+ * extent data offset 20480 nr 20480 ram 102400
+ *
+ * key (257 EXTENT_DATA 819200)
+ * extent data disk byte 12845056 nr 102400
+ * extent data offset 0 nr 102400 ram 102400
+ *
+ * Where the second one fully references the 100K extent that starts at
+ * disk byte 12845056, and the log tree has a single csum item that
+ * covers the entire range of the extent:
+ *
+ * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
+ *
+ * After the first file extent item is replayed, the csum tree gets the
+ * following csum item:
+ *
+ * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
+ *
+ * Which covers the 20K sub-range starting at offset 20K of our extent.
+ * Now when we replay the second file extent item, if we do not delete
+ * existing csum items that cover any of its blocks, we end up getting
+ * two csum items in our csum tree that overlap each other:
+ *
+ * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
+ * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
+ *
+ * Which is a problem, because after this anyone trying to lookup for
+ * the checksum of any block of our extent starting at an offset of 40K
+ * or higher, will end up looking at the second csum item only, which
+ * does not contain the checksum for any block starting at offset 40K or
+ * higher of our extent.
+ */
+ while (!list_empty(&ordered_sums)) {
+ struct btrfs_ordered_sum *sums;
+ struct btrfs_root *csum_root;
- ret = btrfs_lookup_csums_range(root->log_root,
- csum_start, csum_end - 1,
- &ordered_sums, 0);
+ sums = list_first_entry(&ordered_sums, struct btrfs_ordered_sum, list);
+ csum_root = btrfs_csum_root(fs_info, sums->logical);
+ if (!ret) {
+ ret = btrfs_del_csums(trans, csum_root, sums->logical,
+ sums->len);
if (ret)
- goto out;
- /*
- * Now delete all existing cums in the csum root that
- * cover our range. We do this because we can have an
- * extent that is completely referenced by one file
- * extent item and partially referenced by another
- * file extent item (like after using the clone or
- * extent_same ioctls). In this case if we end up doing
- * the replay of the one that partially references the
- * extent first, and we do not do the csum deletion
- * below, we can get 2 csum items in the csum tree that
- * overlap each other. For example, imagine our log has
- * the two following file extent items:
- *
- * key (257 EXTENT_DATA 409600)
- * extent data disk byte 12845056 nr 102400
- * extent data offset 20480 nr 20480 ram 102400
- *
- * key (257 EXTENT_DATA 819200)
- * extent data disk byte 12845056 nr 102400
- * extent data offset 0 nr 102400 ram 102400
- *
- * Where the second one fully references the 100K extent
- * that starts at disk byte 12845056, and the log tree
- * has a single csum item that covers the entire range
- * of the extent:
- *
- * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
- *
- * After the first file extent item is replayed, the
- * csum tree gets the following csum item:
- *
- * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
- *
- * Which covers the 20K sub-range starting at offset 20K
- * of our extent. Now when we replay the second file
- * extent item, if we do not delete existing csum items
- * that cover any of its blocks, we end up getting two
- * csum items in our csum tree that overlap each other:
- *
- * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
- * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
- *
- * Which is a problem, because after this anyone trying
- * to lookup up for the checksum of any block of our
- * extent starting at an offset of 40K or higher, will
- * end up looking at the second csum item only, which
- * does not contain the checksum for any block starting
- * at offset 40K or higher of our extent.
- */
- while (!list_empty(&ordered_sums)) {
- struct btrfs_ordered_sum *sums;
- sums = list_entry(ordered_sums.next,
- struct btrfs_ordered_sum,
- list);
- if (!ret)
- ret = btrfs_del_csums(trans,
- fs_info->csum_root,
- sums->bytenr,
- sums->len);
- if (!ret)
- ret = btrfs_csum_file_blocks(trans,
- fs_info->csum_root, sums);
- list_del(&sums->list);
- kfree(sums);
- }
+ btrfs_abort_log_replay(wc, ret,
+ "failed to delete csums for range [%llu, %llu) inode %llu root %llu",
+ sums->logical,
+ sums->logical + sums->len,
+ wc->log_key.objectid,
+ btrfs_root_id(root));
+ }
+ if (!ret) {
+ ret = btrfs_csum_file_blocks(trans, csum_root, sums);
if (ret)
- goto out;
- } else {
- btrfs_release_path(path);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to add csums for range [%llu, %llu) inode %llu root %llu",
+ sums->logical,
+ sums->logical + sums->len,
+ wc->log_key.objectid,
+ btrfs_root_id(root));
}
- } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
- /* inline extents are easy, we just overwrite them */
- ret = overwrite_item(trans, root, path, eb, slot, key);
- if (ret)
- goto out;
+ list_del(&sums->list);
+ kfree(sums);
}
-
- ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start,
- extent_end - start);
if (ret)
goto out;
update_inode:
- btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_inode_set_file_extent_range(inode, start, extent_end - start);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to set file extent range [%llu, %llu) inode %llu root %llu",
+ start, extent_end, wc->log_key.objectid,
+ btrfs_root_id(root));
+ goto out;
+ }
+
+ btrfs_update_inode_bytes(inode, nbytes, drop_args.bytes_found);
+ ret = btrfs_update_inode(trans, inode);
+ if (ret)
+ btrfs_abort_log_replay(wc, ret,
+ "failed to update inode %llu root %llu",
+ wc->log_key.objectid, btrfs_root_id(root));
out:
- if (inode)
- iput(inode);
+ iput(&inode->vfs_inode);
+ return ret;
+}
+
+static int unlink_inode_for_log_replay(struct walk_control *wc,
+ struct btrfs_inode *dir,
+ struct btrfs_inode *inode,
+ const struct fscrypt_str *name)
+{
+ struct btrfs_trans_handle *trans = wc->trans;
+ int ret;
+
+ ret = btrfs_unlink_inode(trans, dir, inode, name);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to unlink inode %llu parent dir %llu name %.*s root %llu",
+ btrfs_ino(inode), btrfs_ino(dir), name->len,
+ name->name, btrfs_root_id(inode->root));
+ return ret;
+ }
+ /*
+ * Whenever we need to check if a name exists or not, we check the
+ * fs/subvolume tree. So after an unlink we must run delayed items, so
+ * that future checks for a name during log replay see that the name
+ * does not exists anymore.
+ */
+ ret = btrfs_run_delayed_items(trans);
+ if (ret)
+ btrfs_abort_log_replay(wc, ret,
+"failed to run delayed items current inode %llu parent dir %llu name %.*s root %llu",
+ btrfs_ino(inode), btrfs_ino(dir), name->len,
+ name->name, btrfs_root_id(inode->root));
+
return ret;
}
@@ -920,49 +1072,48 @@ out:
* This is a helper function to do the unlink of a specific directory
* item
*/
-static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
+static noinline int drop_one_dir_item(struct walk_control *wc,
struct btrfs_inode *dir,
struct btrfs_dir_item *di)
{
struct btrfs_root *root = dir->root;
- struct inode *inode;
- char *name;
- int name_len;
- struct extent_buffer *leaf;
+ struct btrfs_inode *inode;
+ struct fscrypt_str name;
+ struct extent_buffer *leaf = wc->subvol_path->nodes[0];
struct btrfs_key location;
int ret;
- leaf = path->nodes[0];
-
btrfs_dir_item_key_to_cpu(leaf, di, &location);
- name_len = btrfs_dir_name_len(leaf, di);
- name = kmalloc(name_len, GFP_NOFS);
- if (!name)
- return -ENOMEM;
+ ret = read_alloc_one_name(leaf, di + 1, btrfs_dir_name_len(leaf, di), &name);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to allocate name for dir %llu root %llu",
+ btrfs_ino(dir), btrfs_root_id(root));
+ return ret;
+ }
- read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
- inode = read_one_inode(root, location.objectid);
- if (!inode) {
- ret = -EIO;
+ inode = btrfs_iget_logging(location.objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to open inode %llu parent dir %llu name %.*s root %llu",
+ location.objectid, btrfs_ino(dir),
+ name.len, name.name, btrfs_root_id(root));
+ inode = NULL;
goto out;
}
- ret = link_to_fixup_dir(trans, root, path, location.objectid);
+ ret = link_to_fixup_dir(wc, location.objectid);
if (ret)
goto out;
- ret = btrfs_unlink_inode(trans, dir, BTRFS_I(inode), name,
- name_len);
- if (ret)
- goto out;
- else
- ret = btrfs_run_delayed_items(trans);
+ ret = unlink_inode_for_log_replay(wc, dir, inode, &name);
out:
- kfree(name);
- iput(inode);
+ kfree(name.name);
+ if (inode)
+ iput(&inode->vfs_inode);
return ret;
}
@@ -976,14 +1127,14 @@ out:
static noinline int inode_in_dir(struct btrfs_root *root,
struct btrfs_path *path,
u64 dirid, u64 objectid, u64 index,
- const char *name, int name_len)
+ struct fscrypt_str *name)
{
struct btrfs_dir_item *di;
struct btrfs_key location;
int ret = 0;
di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
- index, name, name_len, 0);
+ index, name, 0);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
goto out;
@@ -996,7 +1147,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
}
btrfs_release_path(path);
- di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
+ di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, 0);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
goto out;
@@ -1023,9 +1174,9 @@ out:
static noinline int backref_in_log(struct btrfs_root *log,
struct btrfs_key *key,
u64 ref_objectid,
- const char *name, int namelen)
+ const struct fscrypt_str *name)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
path = btrfs_alloc_path();
@@ -1033,229 +1184,253 @@ static noinline int backref_in_log(struct btrfs_root *log,
return -ENOMEM;
ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
- if (ret < 0) {
- goto out;
- } else if (ret == 1) {
- ret = 0;
- goto out;
- }
+ if (ret < 0)
+ return ret;
+ if (ret == 1)
+ return 0;
if (key->type == BTRFS_INODE_EXTREF_KEY)
ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
path->slots[0],
- ref_objectid,
- name, namelen);
+ ref_objectid, name);
else
ret = !!btrfs_find_name_in_backref(path->nodes[0],
- path->slots[0],
- name, namelen);
-out:
- btrfs_free_path(path);
+ path->slots[0], name);
return ret;
}
-static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_root *log_root,
+static int unlink_refs_not_in_log(struct walk_control *wc,
+ struct btrfs_key *search_key,
struct btrfs_inode *dir,
- struct btrfs_inode *inode,
- u64 inode_objectid, u64 parent_objectid,
- u64 ref_index, char *name, int namelen,
- int *search_done)
+ struct btrfs_inode *inode)
{
- int ret;
- char *victim_name;
- int victim_name_len;
- struct extent_buffer *leaf;
- struct btrfs_dir_item *di;
- struct btrfs_key search_key;
- struct btrfs_inode_extref *extref;
+ struct extent_buffer *leaf = wc->subvol_path->nodes[0];
+ unsigned long ptr;
+ unsigned long ptr_end;
-again:
- /* Search old style refs */
- search_key.objectid = inode_objectid;
- search_key.type = BTRFS_INODE_REF_KEY;
- search_key.offset = parent_objectid;
- ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
- if (ret == 0) {
+ /*
+ * Check all the names in this back reference to see if they are in the
+ * log. If so, we allow them to stay otherwise they must be unlinked as
+ * a conflict.
+ */
+ ptr = btrfs_item_ptr_offset(leaf, wc->subvol_path->slots[0]);
+ ptr_end = ptr + btrfs_item_size(leaf, wc->subvol_path->slots[0]);
+ while (ptr < ptr_end) {
+ struct fscrypt_str victim_name;
struct btrfs_inode_ref *victim_ref;
- unsigned long ptr;
- unsigned long ptr_end;
-
- leaf = path->nodes[0];
-
- /* are we trying to overwrite a back ref for the root directory
- * if so, just jump out, we're done
- */
- if (search_key.objectid == search_key.offset)
- return 1;
-
- /* check all the names in this back reference to see
- * if they are in the log. if so, we allow them to stay
- * otherwise they must be unlinked as a conflict
- */
- ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
- ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
- while (ptr < ptr_end) {
- victim_ref = (struct btrfs_inode_ref *)ptr;
- victim_name_len = btrfs_inode_ref_name_len(leaf,
- victim_ref);
- victim_name = kmalloc(victim_name_len, GFP_NOFS);
- if (!victim_name)
- return -ENOMEM;
+ int ret;
- read_extent_buffer(leaf, victim_name,
- (unsigned long)(victim_ref + 1),
- victim_name_len);
+ victim_ref = (struct btrfs_inode_ref *)ptr;
+ ret = read_alloc_one_name(leaf, (victim_ref + 1),
+ btrfs_inode_ref_name_len(leaf, victim_ref),
+ &victim_name);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to allocate name for inode %llu parent dir %llu root %llu",
+ btrfs_ino(inode), btrfs_ino(dir),
+ btrfs_root_id(inode->root));
+ return ret;
+ }
- ret = backref_in_log(log_root, &search_key,
- parent_objectid, victim_name,
- victim_name_len);
+ ret = backref_in_log(wc->log, search_key, btrfs_ino(dir), &victim_name);
+ if (ret) {
if (ret < 0) {
- kfree(victim_name);
+ btrfs_abort_log_replay(wc, ret,
+"failed to check if backref is in log tree for inode %llu parent dir %llu name %.*s root %llu",
+ btrfs_ino(inode), btrfs_ino(dir),
+ victim_name.len, victim_name.name,
+ btrfs_root_id(inode->root));
+ kfree(victim_name.name);
return ret;
- } else if (!ret) {
- inc_nlink(&inode->vfs_inode);
- btrfs_release_path(path);
-
- ret = btrfs_unlink_inode(trans, dir, inode,
- victim_name, victim_name_len);
- kfree(victim_name);
- if (ret)
- return ret;
- ret = btrfs_run_delayed_items(trans);
- if (ret)
- return ret;
- *search_done = 1;
- goto again;
}
- kfree(victim_name);
-
- ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
+ kfree(victim_name.name);
+ ptr = (unsigned long)(victim_ref + 1) + victim_name.len;
+ continue;
}
- /*
- * NOTE: we have searched root tree and checked the
- * corresponding ref, it does not need to check again.
- */
- *search_done = 1;
+ inc_nlink(&inode->vfs_inode);
+ btrfs_release_path(wc->subvol_path);
+
+ ret = unlink_inode_for_log_replay(wc, dir, inode, &victim_name);
+ kfree(victim_name.name);
+ if (ret)
+ return ret;
+ return -EAGAIN;
}
- btrfs_release_path(path);
- /* Same search but for extended refs */
- extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
- inode_objectid, parent_objectid, 0,
- 0);
- if (!IS_ERR_OR_NULL(extref)) {
- u32 item_size;
- u32 cur_offset = 0;
- unsigned long base;
- struct inode *victim_parent;
+ return 0;
+}
- leaf = path->nodes[0];
+static int unlink_extrefs_not_in_log(struct walk_control *wc,
+ struct btrfs_key *search_key,
+ struct btrfs_inode *dir,
+ struct btrfs_inode *inode)
+{
+ struct extent_buffer *leaf = wc->subvol_path->nodes[0];
+ const unsigned long base = btrfs_item_ptr_offset(leaf, wc->subvol_path->slots[0]);
+ const u32 item_size = btrfs_item_size(leaf, wc->subvol_path->slots[0]);
+ u32 cur_offset = 0;
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
- base = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ while (cur_offset < item_size) {
+ struct btrfs_root *log_root = wc->log;
+ struct btrfs_inode_extref *extref;
+ struct fscrypt_str victim_name;
+ int ret;
- while (cur_offset < item_size) {
- extref = (struct btrfs_inode_extref *)(base + cur_offset);
+ extref = (struct btrfs_inode_extref *)(base + cur_offset);
+ victim_name.len = btrfs_inode_extref_name_len(leaf, extref);
- victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
+ if (btrfs_inode_extref_parent(leaf, extref) != btrfs_ino(dir))
+ goto next;
- if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
- goto next;
+ ret = read_alloc_one_name(leaf, &extref->name, victim_name.len,
+ &victim_name);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to allocate name for inode %llu parent dir %llu root %llu",
+ btrfs_ino(inode), btrfs_ino(dir),
+ btrfs_root_id(inode->root));
+ return ret;
+ }
- victim_name = kmalloc(victim_name_len, GFP_NOFS);
- if (!victim_name)
- return -ENOMEM;
- read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
- victim_name_len);
-
- search_key.objectid = inode_objectid;
- search_key.type = BTRFS_INODE_EXTREF_KEY;
- search_key.offset = btrfs_extref_hash(parent_objectid,
- victim_name,
- victim_name_len);
- ret = backref_in_log(log_root, &search_key,
- parent_objectid, victim_name,
- victim_name_len);
+ search_key->objectid = btrfs_ino(inode);
+ search_key->type = BTRFS_INODE_EXTREF_KEY;
+ search_key->offset = btrfs_extref_hash(btrfs_ino(dir),
+ victim_name.name,
+ victim_name.len);
+ ret = backref_in_log(log_root, search_key, btrfs_ino(dir), &victim_name);
+ if (ret) {
if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to check if backref is in log tree for inode %llu parent dir %llu name %.*s root %llu",
+ btrfs_ino(inode), btrfs_ino(dir),
+ victim_name.len, victim_name.name,
+ btrfs_root_id(inode->root));
+ kfree(victim_name.name);
return ret;
- } else if (!ret) {
- ret = -ENOENT;
- victim_parent = read_one_inode(root,
- parent_objectid);
- if (victim_parent) {
- inc_nlink(&inode->vfs_inode);
- btrfs_release_path(path);
-
- ret = btrfs_unlink_inode(trans,
- BTRFS_I(victim_parent),
- inode,
- victim_name,
- victim_name_len);
- if (!ret)
- ret = btrfs_run_delayed_items(
- trans);
- }
- iput(victim_parent);
- kfree(victim_name);
- if (ret)
- return ret;
- *search_done = 1;
- goto again;
}
- kfree(victim_name);
+ kfree(victim_name.name);
next:
- cur_offset += victim_name_len + sizeof(*extref);
+ cur_offset += victim_name.len + sizeof(*extref);
+ continue;
}
- *search_done = 1;
+
+ inc_nlink(&inode->vfs_inode);
+ btrfs_release_path(wc->subvol_path);
+
+ ret = unlink_inode_for_log_replay(wc, dir, inode, &victim_name);
+ kfree(victim_name.name);
+ if (ret)
+ return ret;
+ return -EAGAIN;
}
- btrfs_release_path(path);
+
+ return 0;
+}
+
+static inline int __add_inode_ref(struct walk_control *wc,
+ struct btrfs_inode *dir,
+ struct btrfs_inode *inode,
+ u64 ref_index, struct fscrypt_str *name)
+{
+ int ret;
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
+ struct btrfs_dir_item *di;
+ struct btrfs_key search_key;
+ struct btrfs_inode_extref *extref;
+
+again:
+ /* Search old style refs */
+ search_key.objectid = btrfs_ino(inode);
+ search_key.type = BTRFS_INODE_REF_KEY;
+ search_key.offset = btrfs_ino(dir);
+ ret = btrfs_search_slot(NULL, root, &search_key, wc->subvol_path, 0, 0);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to search subvolume tree for key " BTRFS_KEY_FMT " root %llu",
+ BTRFS_KEY_FMT_VALUE(&search_key),
+ btrfs_root_id(root));
+ return ret;
+ } else if (ret == 0) {
+ /*
+ * Are we trying to overwrite a back ref for the root directory?
+ * If so, we're done.
+ */
+ if (search_key.objectid == search_key.offset)
+ return 1;
+
+ ret = unlink_refs_not_in_log(wc, &search_key, dir, inode);
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ return ret;
+ }
+ btrfs_release_path(wc->subvol_path);
+
+ /* Same search but for extended refs */
+ extref = btrfs_lookup_inode_extref(root, wc->subvol_path, name,
+ btrfs_ino(inode), btrfs_ino(dir));
+ if (IS_ERR(extref)) {
+ return PTR_ERR(extref);
+ } else if (extref) {
+ ret = unlink_extrefs_not_in_log(wc, &search_key, dir, inode);
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ return ret;
+ }
+ btrfs_release_path(wc->subvol_path);
/* look for a conflicting sequence number */
- di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
- ref_index, name, namelen, 0);
+ di = btrfs_lookup_dir_index_item(trans, root, wc->subvol_path, btrfs_ino(dir),
+ ref_index, name, 0);
if (IS_ERR(di)) {
- return PTR_ERR(di);
+ ret = PTR_ERR(di);
+ btrfs_abort_log_replay(wc, ret,
+"failed to lookup dir index item for dir %llu ref_index %llu name %.*s root %llu",
+ btrfs_ino(dir), ref_index, name->len,
+ name->name, btrfs_root_id(root));
+ return ret;
} else if (di) {
- ret = drop_one_dir_item(trans, path, dir, di);
+ ret = drop_one_dir_item(wc, dir, di);
if (ret)
return ret;
}
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
/* look for a conflicting name */
- di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
- name, namelen, 0);
+ di = btrfs_lookup_dir_item(trans, root, wc->subvol_path, btrfs_ino(dir), name, 0);
if (IS_ERR(di)) {
- return PTR_ERR(di);
+ ret = PTR_ERR(di);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir item for dir %llu name %.*s root %llu",
+ btrfs_ino(dir), name->len, name->name,
+ btrfs_root_id(root));
+ return ret;
} else if (di) {
- ret = drop_one_dir_item(trans, path, dir, di);
+ ret = drop_one_dir_item(wc, dir, di);
if (ret)
return ret;
}
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return 0;
}
static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
- u32 *namelen, char **name, u64 *index,
+ struct fscrypt_str *name, u64 *index,
u64 *parent_objectid)
{
struct btrfs_inode_extref *extref;
+ int ret;
extref = (struct btrfs_inode_extref *)ref_ptr;
- *namelen = btrfs_inode_extref_name_len(eb, extref);
- *name = kmalloc(*namelen, GFP_NOFS);
- if (*name == NULL)
- return -ENOMEM;
-
- read_extent_buffer(eb, *name, (unsigned long)&extref->name,
- *namelen);
+ ret = read_alloc_one_name(eb, &extref->name,
+ btrfs_inode_extref_name_len(eb, extref), name);
+ if (ret)
+ return ret;
if (index)
*index = btrfs_inode_extref_index(eb, extref);
@@ -1266,18 +1441,17 @@ static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
}
static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
- u32 *namelen, char **name, u64 *index)
+ struct fscrypt_str *name, u64 *index)
{
struct btrfs_inode_ref *ref;
+ int ret;
ref = (struct btrfs_inode_ref *)ref_ptr;
- *namelen = btrfs_inode_ref_name_len(eb, ref);
- *name = kmalloc(*namelen, GFP_NOFS);
- if (*name == NULL)
- return -ENOMEM;
-
- read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
+ ret = read_alloc_one_name(eb, ref + 1, btrfs_inode_ref_name_len(eb, ref),
+ name);
+ if (ret)
+ return ret;
if (index)
*index = btrfs_inode_ref_index(eb, ref);
@@ -1292,231 +1466,133 @@ static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
* proper unlink of that name (that is, remove its entry from the inode
* reference item and both dir index keys).
*/
-static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_inode *inode,
- struct extent_buffer *log_eb,
- int log_slot,
- struct btrfs_key *key)
+static int unlink_old_inode_refs(struct walk_control *wc, struct btrfs_inode *inode)
{
+ struct btrfs_root *root = wc->root;
int ret;
unsigned long ref_ptr;
unsigned long ref_end;
struct extent_buffer *eb;
again:
- btrfs_release_path(path);
- ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+ btrfs_release_path(wc->subvol_path);
+ ret = btrfs_search_slot(NULL, root, &wc->log_key, wc->subvol_path, 0, 0);
if (ret > 0) {
ret = 0;
goto out;
}
- if (ret < 0)
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to search subvolume tree for key " BTRFS_KEY_FMT " root %llu",
+ BTRFS_KEY_FMT_VALUE(&wc->log_key),
+ btrfs_root_id(root));
goto out;
+ }
- eb = path->nodes[0];
- ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
- ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
+ eb = wc->subvol_path->nodes[0];
+ ref_ptr = btrfs_item_ptr_offset(eb, wc->subvol_path->slots[0]);
+ ref_end = ref_ptr + btrfs_item_size(eb, wc->subvol_path->slots[0]);
while (ref_ptr < ref_end) {
- char *name = NULL;
- int namelen;
+ struct fscrypt_str name;
u64 parent_id;
- if (key->type == BTRFS_INODE_EXTREF_KEY) {
- ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
+ if (wc->log_key.type == BTRFS_INODE_EXTREF_KEY) {
+ ret = extref_get_fields(eb, ref_ptr, &name,
NULL, &parent_id);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get extref details for inode %llu root %llu",
+ btrfs_ino(inode),
+ btrfs_root_id(root));
+ goto out;
+ }
} else {
- parent_id = key->offset;
- ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
- NULL);
+ parent_id = wc->log_key.offset;
+ ret = ref_get_fields(eb, ref_ptr, &name, NULL);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get ref details for inode %llu parent_id %llu root %llu",
+ btrfs_ino(inode), parent_id,
+ btrfs_root_id(root));
+ goto out;
+ }
}
- if (ret)
- goto out;
- if (key->type == BTRFS_INODE_EXTREF_KEY)
- ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
- parent_id, name,
- namelen);
+ if (wc->log_key.type == BTRFS_INODE_EXTREF_KEY)
+ ret = !!btrfs_find_name_in_ext_backref(wc->log_leaf, wc->log_slot,
+ parent_id, &name);
else
- ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
- name, namelen);
+ ret = !!btrfs_find_name_in_backref(wc->log_leaf, wc->log_slot,
+ &name);
if (!ret) {
- struct inode *dir;
-
- btrfs_release_path(path);
- dir = read_one_inode(root, parent_id);
- if (!dir) {
- ret = -ENOENT;
- kfree(name);
+ struct btrfs_inode *dir;
+
+ btrfs_release_path(wc->subvol_path);
+ dir = btrfs_iget_logging(parent_id, root);
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ kfree(name.name);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir inode %llu root %llu",
+ parent_id, btrfs_root_id(root));
goto out;
}
- ret = btrfs_unlink_inode(trans, BTRFS_I(dir),
- inode, name, namelen);
- kfree(name);
- iput(dir);
+ ret = unlink_inode_for_log_replay(wc, dir, inode, &name);
+ kfree(name.name);
+ iput(&dir->vfs_inode);
if (ret)
goto out;
goto again;
}
- kfree(name);
- ref_ptr += namelen;
- if (key->type == BTRFS_INODE_EXTREF_KEY)
+ kfree(name.name);
+ ref_ptr += name.len;
+ if (wc->log_key.type == BTRFS_INODE_EXTREF_KEY)
ref_ptr += sizeof(struct btrfs_inode_extref);
else
ref_ptr += sizeof(struct btrfs_inode_ref);
}
ret = 0;
out:
- btrfs_release_path(path);
- return ret;
-}
-
-static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
- const u8 ref_type, const char *name,
- const int namelen)
-{
- struct btrfs_key key;
- struct btrfs_path *path;
- const u64 parent_id = btrfs_ino(BTRFS_I(dir));
- int ret;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- key.objectid = btrfs_ino(BTRFS_I(inode));
- key.type = ref_type;
- if (key.type == BTRFS_INODE_REF_KEY)
- key.offset = parent_id;
- else
- key.offset = btrfs_extref_hash(parent_id, name, namelen);
-
- ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
- if (ret > 0) {
- ret = 0;
- goto out;
- }
- if (key.type == BTRFS_INODE_EXTREF_KEY)
- ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
- path->slots[0], parent_id, name, namelen);
- else
- ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
- name, namelen);
-
-out:
- btrfs_free_path(path);
- return ret;
-}
-
-static int add_link(struct btrfs_trans_handle *trans,
- struct inode *dir, struct inode *inode, const char *name,
- int namelen, u64 ref_index)
-{
- struct btrfs_root *root = BTRFS_I(dir)->root;
- struct btrfs_dir_item *dir_item;
- struct btrfs_key key;
- struct btrfs_path *path;
- struct inode *other_inode = NULL;
- int ret;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- dir_item = btrfs_lookup_dir_item(NULL, root, path,
- btrfs_ino(BTRFS_I(dir)),
- name, namelen, 0);
- if (!dir_item) {
- btrfs_release_path(path);
- goto add_link;
- } else if (IS_ERR(dir_item)) {
- ret = PTR_ERR(dir_item);
- goto out;
- }
-
- /*
- * Our inode's dentry collides with the dentry of another inode which is
- * in the log but not yet processed since it has a higher inode number.
- * So delete that other dentry.
- */
- btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
- btrfs_release_path(path);
- other_inode = read_one_inode(root, key.objectid);
- if (!other_inode) {
- ret = -ENOENT;
- goto out;
- }
- ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(other_inode),
- name, namelen);
- if (ret)
- goto out;
- /*
- * If we dropped the link count to 0, bump it so that later the iput()
- * on the inode will not free it. We will fixup the link count later.
- */
- if (other_inode->i_nlink == 0)
- inc_nlink(other_inode);
-
- ret = btrfs_run_delayed_items(trans);
- if (ret)
- goto out;
-add_link:
- ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
- name, namelen, 0, ref_index);
-out:
- iput(other_inode);
- btrfs_free_path(path);
-
+ btrfs_release_path(wc->subvol_path);
return ret;
}
/*
- * replay one inode back reference item found in the log tree.
- * eb, slot and key refer to the buffer and key found in the log tree.
- * root is the destination we are replaying into, and path is for temp
- * use by this function. (it should be released on return).
+ * Replay one inode back reference item found in the log tree.
+ * Path is for temporary use by this function (it should be released on return).
*/
-static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_root *log,
- struct btrfs_path *path,
- struct extent_buffer *eb, int slot,
- struct btrfs_key *key)
+static noinline int add_inode_ref(struct walk_control *wc)
{
- struct inode *dir = NULL;
- struct inode *inode = NULL;
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
+ struct btrfs_inode *dir = NULL;
+ struct btrfs_inode *inode = NULL;
unsigned long ref_ptr;
unsigned long ref_end;
- char *name = NULL;
- int namelen;
+ struct fscrypt_str name = { 0 };
int ret;
- int search_done = 0;
- int log_ref_ver = 0;
+ const bool is_extref_item = (wc->log_key.type == BTRFS_INODE_EXTREF_KEY);
u64 parent_objectid;
u64 inode_objectid;
u64 ref_index = 0;
int ref_struct_size;
- ref_ptr = btrfs_item_ptr_offset(eb, slot);
- ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
+ ref_ptr = btrfs_item_ptr_offset(wc->log_leaf, wc->log_slot);
+ ref_end = ref_ptr + btrfs_item_size(wc->log_leaf, wc->log_slot);
- if (key->type == BTRFS_INODE_EXTREF_KEY) {
+ if (is_extref_item) {
struct btrfs_inode_extref *r;
ref_struct_size = sizeof(struct btrfs_inode_extref);
- log_ref_ver = 1;
r = (struct btrfs_inode_extref *)ref_ptr;
- parent_objectid = btrfs_inode_extref_parent(eb, r);
+ parent_objectid = btrfs_inode_extref_parent(wc->log_leaf, r);
} else {
ref_struct_size = sizeof(struct btrfs_inode_ref);
- parent_objectid = key->offset;
+ parent_objectid = wc->log_key.offset;
}
- inode_objectid = key->objectid;
+ inode_objectid = wc->log_key.objectid;
/*
* it is possible that we didn't log all the parent directories
@@ -1524,43 +1600,93 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
* copy the back ref in. The link count fixup code will take
* care of the rest
*/
- dir = read_one_inode(root, parent_objectid);
- if (!dir) {
- ret = -ENOENT;
+ dir = btrfs_iget_logging(parent_objectid, root);
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ if (ret == -ENOENT)
+ ret = 0;
+ else
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir inode %llu root %llu",
+ parent_objectid, btrfs_root_id(root));
+ dir = NULL;
goto out;
}
- inode = read_one_inode(root, inode_objectid);
- if (!inode) {
- ret = -EIO;
+ inode = btrfs_iget_logging(inode_objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup inode %llu root %llu",
+ inode_objectid, btrfs_root_id(root));
+ inode = NULL;
goto out;
}
while (ref_ptr < ref_end) {
- if (log_ref_ver) {
- ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
+ if (is_extref_item) {
+ ret = extref_get_fields(wc->log_leaf, ref_ptr, &name,
&ref_index, &parent_objectid);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get extref details for inode %llu root %llu",
+ btrfs_ino(inode),
+ btrfs_root_id(root));
+ goto out;
+ }
/*
* parent object can change from one array
* item to another.
*/
- if (!dir)
- dir = read_one_inode(root, parent_objectid);
if (!dir) {
- ret = -ENOENT;
- goto out;
+ dir = btrfs_iget_logging(parent_objectid, root);
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ dir = NULL;
+ /*
+ * A new parent dir may have not been
+ * logged and not exist in the subvolume
+ * tree, see the comment above before
+ * the loop when getting the first
+ * parent dir.
+ */
+ if (ret == -ENOENT) {
+ /*
+ * The next extref may refer to
+ * another parent dir that
+ * exists, so continue.
+ */
+ ret = 0;
+ goto next;
+ } else {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir inode %llu root %llu",
+ parent_objectid,
+ btrfs_root_id(root));
+ }
+ goto out;
+ }
}
} else {
- ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
- &ref_index);
+ ret = ref_get_fields(wc->log_leaf, ref_ptr, &name, &ref_index);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get ref details for inode %llu parent_objectid %llu root %llu",
+ btrfs_ino(inode),
+ parent_objectid,
+ btrfs_root_id(root));
+ goto out;
+ }
}
- if (ret)
- goto out;
- ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
- btrfs_ino(BTRFS_I(inode)), ref_index,
- name, namelen);
+ ret = inode_in_dir(root, wc->subvol_path, btrfs_ino(dir),
+ btrfs_ino(inode), ref_index, &name);
if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to check if inode %llu is in dir %llu ref_index %llu name %.*s root %llu",
+ btrfs_ino(inode), btrfs_ino(dir),
+ ref_index, name.len, name.name,
+ btrfs_root_id(root));
goto out;
} else if (ret == 0) {
/*
@@ -1570,65 +1696,42 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
* overwrite any existing back reference, and we don't
* want to create dangling pointers in the directory.
*/
-
- if (!search_done) {
- ret = __add_inode_ref(trans, root, path, log,
- BTRFS_I(dir),
- BTRFS_I(inode),
- inode_objectid,
- parent_objectid,
- ref_index, name, namelen,
- &search_done);
- if (ret) {
- if (ret == 1)
- ret = 0;
- goto out;
- }
- }
-
- /*
- * If a reference item already exists for this inode
- * with the same parent and name, but different index,
- * drop it and the corresponding directory index entries
- * from the parent before adding the new reference item
- * and dir index entries, otherwise we would fail with
- * -EEXIST returned from btrfs_add_link() below.
- */
- ret = btrfs_inode_ref_exists(inode, dir, key->type,
- name, namelen);
- if (ret > 0) {
- ret = btrfs_unlink_inode(trans,
- BTRFS_I(dir),
- BTRFS_I(inode),
- name, namelen);
- /*
- * If we dropped the link count to 0, bump it so
- * that later the iput() on the inode will not
- * free it. We will fixup the link count later.
- */
- if (!ret && inode->i_nlink == 0)
- inc_nlink(inode);
- }
- if (ret < 0)
+ ret = __add_inode_ref(wc, dir, inode, ref_index, &name);
+ if (ret) {
+ if (ret == 1)
+ ret = 0;
goto out;
+ }
/* insert our name */
- ret = add_link(trans, dir, inode, name, namelen,
- ref_index);
- if (ret)
+ ret = btrfs_add_link(trans, dir, inode, &name, 0, ref_index);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to add link for inode %llu in dir %llu ref_index %llu name %.*s root %llu",
+ btrfs_ino(inode),
+ btrfs_ino(dir), ref_index,
+ name.len, name.name,
+ btrfs_root_id(root));
goto out;
+ }
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
- if (ret)
+ ret = btrfs_update_inode(trans, inode);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to update inode %llu root %llu",
+ btrfs_ino(inode),
+ btrfs_root_id(root));
goto out;
+ }
}
/* Else, ret == 1, we already have a perfect match, we're done. */
- ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
- kfree(name);
- name = NULL;
- if (log_ref_ver) {
- iput(dir);
+next:
+ ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + name.len;
+ kfree(name.name);
+ name.name = NULL;
+ if (is_extref_item && dir) {
+ iput(&dir->vfs_inode);
dir = NULL;
}
}
@@ -1641,23 +1744,23 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
* dir index entries exist for a name but there is no inode reference
* item with the same name.
*/
- ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
- key);
+ ret = unlink_old_inode_refs(wc, inode);
if (ret)
goto out;
/* finally write the back reference in the inode */
- ret = overwrite_item(trans, root, path, eb, slot, key);
+ ret = overwrite_item(wc);
out:
- btrfs_release_path(path);
- kfree(name);
- iput(dir);
- iput(inode);
+ btrfs_release_path(wc->subvol_path);
+ kfree(name.name);
+ if (dir)
+ iput(&dir->vfs_inode);
+ if (inode)
+ iput(&inode->vfs_inode);
return ret;
}
-static int count_inode_extrefs(struct btrfs_root *root,
- struct btrfs_inode *inode, struct btrfs_path *path)
+static int count_inode_extrefs(struct btrfs_inode *inode, struct btrfs_path *path)
{
int ret = 0;
int name_len;
@@ -1671,13 +1774,13 @@ static int count_inode_extrefs(struct btrfs_root *root,
struct extent_buffer *leaf;
while (1) {
- ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
- &extref, &offset);
+ ret = btrfs_find_one_extref(inode->root, inode_objectid, offset,
+ path, &extref, &offset);
if (ret)
break;
leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ item_size = btrfs_item_size(leaf, path->slots[0]);
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
cur_offset = 0;
@@ -1700,8 +1803,7 @@ static int count_inode_extrefs(struct btrfs_root *root,
return nlink;
}
-static int count_inode_refs(struct btrfs_root *root,
- struct btrfs_inode *inode, struct btrfs_path *path)
+static int count_inode_refs(struct btrfs_inode *inode, struct btrfs_path *path)
{
int ret;
struct btrfs_key key;
@@ -1716,7 +1818,7 @@ static int count_inode_refs(struct btrfs_root *root,
key.offset = (u64)-1;
while (1) {
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, inode->root, &key, path, 0, 0);
if (ret < 0)
break;
if (ret > 0) {
@@ -1731,7 +1833,7 @@ process_slot:
key.type != BTRFS_INODE_REF_KEY)
break;
ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
- ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
+ ptr_end = ptr + btrfs_item_size(path->nodes[0],
path->slots[0]);
while (ptr < ptr_end) {
struct btrfs_inode_ref *ref;
@@ -1767,26 +1869,22 @@ process_slot:
* number of back refs found. If it goes down to zero, the iput
* will free the inode.
*/
-static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *inode)
+static noinline int fixup_inode_link_count(struct walk_control *wc,
+ struct btrfs_inode *inode)
{
- struct btrfs_path *path;
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = inode->root;
int ret;
u64 nlink = 0;
- u64 ino = btrfs_ino(BTRFS_I(inode));
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+ const u64 ino = btrfs_ino(inode);
- ret = count_inode_refs(root, BTRFS_I(inode), path);
+ ret = count_inode_refs(inode, wc->subvol_path);
if (ret < 0)
goto out;
nlink = ret;
- ret = count_inode_extrefs(root, BTRFS_I(inode), path);
+ ret = count_inode_extrefs(inode, wc->subvol_path);
if (ret < 0)
goto out;
@@ -1794,18 +1892,18 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
ret = 0;
- if (nlink != inode->i_nlink) {
- set_nlink(inode, nlink);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ if (nlink != inode->vfs_inode.i_nlink) {
+ set_nlink(&inode->vfs_inode, nlink);
+ ret = btrfs_update_inode(trans, inode);
if (ret)
goto out;
}
- BTRFS_I(inode)->index_cnt = (u64)-1;
+ if (S_ISDIR(inode->vfs_inode.i_mode))
+ inode->index_cnt = (u64)-1;
- if (inode->i_nlink == 0) {
- if (S_ISDIR(inode->i_mode)) {
- ret = replay_dir_deletes(trans, root, NULL, path,
- ino, 1);
+ if (inode->vfs_inode.i_nlink == 0) {
+ if (S_ISDIR(inode->vfs_inode.i_mode)) {
+ ret = replay_dir_deletes(wc, ino, true);
if (ret)
goto out;
}
@@ -1815,62 +1913,63 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
}
out:
- btrfs_free_path(path);
+ btrfs_release_path(wc->subvol_path);
return ret;
}
-static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path)
+static noinline int fixup_inode_link_counts(struct walk_control *wc)
{
int ret;
struct btrfs_key key;
- struct inode *inode;
key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
key.type = BTRFS_ORPHAN_ITEM_KEY;
key.offset = (u64)-1;
while (1) {
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
+ struct btrfs_inode *inode;
+
+ ret = btrfs_search_slot(trans, root, &key, wc->subvol_path, -1, 1);
if (ret < 0)
break;
if (ret == 1) {
ret = 0;
- if (path->slots[0] == 0)
+ if (wc->subvol_path->slots[0] == 0)
break;
- path->slots[0]--;
+ wc->subvol_path->slots[0]--;
}
- btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ btrfs_item_key_to_cpu(wc->subvol_path->nodes[0], &key, wc->subvol_path->slots[0]);
if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
key.type != BTRFS_ORPHAN_ITEM_KEY)
break;
- ret = btrfs_del_item(trans, root, path);
+ ret = btrfs_del_item(trans, root, wc->subvol_path);
if (ret)
break;
- btrfs_release_path(path);
- inode = read_one_inode(root, key.offset);
- if (!inode) {
- ret = -EIO;
+ btrfs_release_path(wc->subvol_path);
+ inode = btrfs_iget_logging(key.offset, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
break;
}
- ret = fixup_inode_link_count(trans, root, inode);
- iput(inode);
+ ret = fixup_inode_link_count(wc, inode);
+ iput(&inode->vfs_inode);
if (ret)
break;
/*
* fixup on a directory may create new entries,
- * make sure we always look for the highset possible
+ * make sure we always look for the highest possible
* offset
*/
key.offset = (u64)-1;
}
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return ret;
}
@@ -1880,36 +1979,50 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
* count when replay is done. The link count is incremented here
* so the inode won't go away until we check it
*/
-static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- u64 objectid)
+static noinline int link_to_fixup_dir(struct walk_control *wc, u64 objectid)
{
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
struct btrfs_key key;
int ret = 0;
- struct inode *inode;
-
- inode = read_one_inode(root, objectid);
- if (!inode)
- return -EIO;
+ struct btrfs_inode *inode;
+ struct inode *vfs_inode;
+
+ inode = btrfs_iget_logging(objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup inode %llu root %llu",
+ objectid, btrfs_root_id(root));
+ return ret;
+ }
+ vfs_inode = &inode->vfs_inode;
key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
key.type = BTRFS_ORPHAN_ITEM_KEY;
key.offset = objectid;
- ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
+ ret = btrfs_insert_empty_item(trans, root, wc->subvol_path, &key, 0);
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
if (ret == 0) {
- if (!inode->i_nlink)
- set_nlink(inode, 1);
+ if (!vfs_inode->i_nlink)
+ set_nlink(vfs_inode, 1);
else
- inc_nlink(inode);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ inc_nlink(vfs_inode);
+ ret = btrfs_update_inode(trans, inode);
+ if (ret)
+ btrfs_abort_log_replay(wc, ret,
+ "failed to update inode %llu root %llu",
+ objectid, btrfs_root_id(root));
} else if (ret == -EEXIST) {
ret = 0;
+ } else {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to insert fixup item for inode %llu root %llu",
+ objectid, btrfs_root_id(root));
}
- iput(inode);
+ iput(vfs_inode);
return ret;
}
@@ -1922,33 +2035,59 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
static noinline int insert_one_name(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 dirid, u64 index,
- char *name, int name_len,
+ const struct fscrypt_str *name,
struct btrfs_key *location)
{
- struct inode *inode;
- struct inode *dir;
+ struct btrfs_inode *inode;
+ struct btrfs_inode *dir;
int ret;
- inode = read_one_inode(root, location->objectid);
- if (!inode)
- return -ENOENT;
+ inode = btrfs_iget_logging(location->objectid, root);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
- dir = read_one_inode(root, dirid);
- if (!dir) {
- iput(inode);
- return -EIO;
+ dir = btrfs_iget_logging(dirid, root);
+ if (IS_ERR(dir)) {
+ iput(&inode->vfs_inode);
+ return PTR_ERR(dir);
}
- ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
- name_len, 1, index);
+ ret = btrfs_add_link(trans, dir, inode, name, 1, index);
/* FIXME, put inode into FIXUP list */
- iput(inode);
- iput(dir);
+ iput(&inode->vfs_inode);
+ iput(&dir->vfs_inode);
return ret;
}
+static int delete_conflicting_dir_entry(struct walk_control *wc,
+ struct btrfs_inode *dir,
+ struct btrfs_dir_item *dst_di,
+ const struct btrfs_key *log_key,
+ u8 log_flags,
+ bool exists)
+{
+ struct btrfs_key found_key;
+
+ btrfs_dir_item_key_to_cpu(wc->subvol_path->nodes[0], dst_di, &found_key);
+ /* The existing dentry points to the same inode, don't delete it. */
+ if (found_key.objectid == log_key->objectid &&
+ found_key.type == log_key->type &&
+ found_key.offset == log_key->offset &&
+ btrfs_dir_flags(wc->subvol_path->nodes[0], dst_di) == log_flags)
+ return 1;
+
+ /*
+ * Don't drop the conflicting directory entry if the inode for the new
+ * entry doesn't exist.
+ */
+ if (!exists)
+ return 0;
+
+ return drop_one_dir_item(wc, dir, dst_di);
+}
+
/*
* take a single entry in a log directory item and replay it into
* the subvolume.
@@ -1965,119 +2104,123 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
* Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
* non-existing inode) and 1 if the name was replayed.
*/
-static noinline int replay_one_name(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct extent_buffer *eb,
- struct btrfs_dir_item *di,
- struct btrfs_key *key)
+static noinline int replay_one_name(struct walk_control *wc, struct btrfs_dir_item *di)
{
- char *name;
- int name_len;
- struct btrfs_dir_item *dst_di;
- struct btrfs_key found_key;
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
+ struct fscrypt_str name = { 0 };
+ struct btrfs_dir_item *dir_dst_di;
+ struct btrfs_dir_item *index_dst_di;
+ bool dir_dst_matches = false;
+ bool index_dst_matches = false;
struct btrfs_key log_key;
- struct inode *dir;
- u8 log_type;
+ struct btrfs_key search_key;
+ struct btrfs_inode *dir;
+ u8 log_flags;
bool exists;
int ret;
- bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
+ bool update_size = true;
bool name_added = false;
- dir = read_one_inode(root, key->objectid);
- if (!dir)
- return -EIO;
+ dir = btrfs_iget_logging(wc->log_key.objectid, root);
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir inode %llu root %llu",
+ wc->log_key.objectid, btrfs_root_id(root));
+ return ret;
+ }
- name_len = btrfs_dir_name_len(eb, di);
- name = kmalloc(name_len, GFP_NOFS);
- if (!name) {
- ret = -ENOMEM;
+ ret = read_alloc_one_name(wc->log_leaf, di + 1,
+ btrfs_dir_name_len(wc->log_leaf, di), &name);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to allocate name for dir %llu root %llu",
+ btrfs_ino(dir), btrfs_root_id(root));
goto out;
}
- log_type = btrfs_dir_type(eb, di);
- read_extent_buffer(eb, name, (unsigned long)(di + 1),
- name_len);
-
- btrfs_dir_item_key_to_cpu(eb, di, &log_key);
- ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
- btrfs_release_path(path);
- if (ret < 0)
+ log_flags = btrfs_dir_flags(wc->log_leaf, di);
+ btrfs_dir_item_key_to_cpu(wc->log_leaf, di, &log_key);
+ ret = btrfs_lookup_inode(trans, root, wc->subvol_path, &log_key, 0);
+ btrfs_release_path(wc->subvol_path);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup inode %llu root %llu",
+ log_key.objectid, btrfs_root_id(root));
goto out;
+ }
exists = (ret == 0);
ret = 0;
- if (key->type == BTRFS_DIR_ITEM_KEY) {
- dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
- name, name_len, 1);
- } else if (key->type == BTRFS_DIR_INDEX_KEY) {
- dst_di = btrfs_lookup_dir_index_item(trans, root, path,
- key->objectid,
- key->offset, name,
- name_len, 1);
- } else {
- /* Corruption */
- ret = -EINVAL;
+ dir_dst_di = btrfs_lookup_dir_item(trans, root, wc->subvol_path,
+ wc->log_key.objectid, &name, 1);
+ if (IS_ERR(dir_dst_di)) {
+ ret = PTR_ERR(dir_dst_di);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir item for dir %llu name %.*s root %llu",
+ wc->log_key.objectid, name.len, name.name,
+ btrfs_root_id(root));
goto out;
+ } else if (dir_dst_di) {
+ ret = delete_conflicting_dir_entry(wc, dir, dir_dst_di,
+ &log_key, log_flags, exists);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to delete conflicting entry for dir %llu name %.*s root %llu",
+ btrfs_ino(dir), name.len, name.name,
+ btrfs_root_id(root));
+ goto out;
+ }
+ dir_dst_matches = (ret == 1);
}
- if (IS_ERR(dst_di)) {
- ret = PTR_ERR(dst_di);
+ btrfs_release_path(wc->subvol_path);
+
+ index_dst_di = btrfs_lookup_dir_index_item(trans, root, wc->subvol_path,
+ wc->log_key.objectid,
+ wc->log_key.offset, &name, 1);
+ if (IS_ERR(index_dst_di)) {
+ ret = PTR_ERR(index_dst_di);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir index item for dir %llu name %.*s root %llu",
+ wc->log_key.objectid, name.len, name.name,
+ btrfs_root_id(root));
goto out;
- } else if (!dst_di) {
- /* we need a sequence number to insert, so we only
- * do inserts for the BTRFS_DIR_INDEX_KEY types
- */
- if (key->type != BTRFS_DIR_INDEX_KEY)
+ } else if (index_dst_di) {
+ ret = delete_conflicting_dir_entry(wc, dir, index_dst_di,
+ &log_key, log_flags, exists);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to delete conflicting entry for dir %llu name %.*s root %llu",
+ btrfs_ino(dir), name.len, name.name,
+ btrfs_root_id(root));
goto out;
- goto insert;
+ }
+ index_dst_matches = (ret == 1);
}
- btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
- /* the existing item matches the logged item */
- if (found_key.objectid == log_key.objectid &&
- found_key.type == log_key.type &&
- found_key.offset == log_key.offset &&
- btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
- update_size = false;
- goto out;
- }
+ btrfs_release_path(wc->subvol_path);
- /*
- * don't drop the conflicting directory entry if the inode
- * for the new entry doesn't exist
- */
- if (!exists)
- goto out;
-
- ret = drop_one_dir_item(trans, path, BTRFS_I(dir), dst_di);
- if (ret)
+ if (dir_dst_matches && index_dst_matches) {
+ ret = 0;
+ update_size = false;
goto out;
-
- if (key->type == BTRFS_DIR_INDEX_KEY)
- goto insert;
-out:
- btrfs_release_path(path);
- if (!ret && update_size) {
- btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
- ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
}
- kfree(name);
- iput(dir);
- if (!ret && name_added)
- ret = 1;
- return ret;
-insert:
/*
* Check if the inode reference exists in the log for the given name,
* inode and parent inode
*/
- found_key.objectid = log_key.objectid;
- found_key.type = BTRFS_INODE_REF_KEY;
- found_key.offset = key->objectid;
- ret = backref_in_log(root->log_root, &found_key, 0, name, name_len);
+ search_key.objectid = log_key.objectid;
+ search_key.type = BTRFS_INODE_REF_KEY;
+ search_key.offset = wc->log_key.objectid;
+ ret = backref_in_log(root->log_root, &search_key, 0, &name);
if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to check if ref item is logged for inode %llu dir %llu name %.*s root %llu",
+ search_key.objectid, btrfs_ino(dir),
+ name.len, name.name, btrfs_root_id(root));
goto out;
} else if (ret) {
/* The dentry will be added later. */
@@ -2086,12 +2229,15 @@ insert:
goto out;
}
- found_key.objectid = log_key.objectid;
- found_key.type = BTRFS_INODE_EXTREF_KEY;
- found_key.offset = key->objectid;
- ret = backref_in_log(root->log_root, &found_key, key->objectid, name,
- name_len);
+ search_key.objectid = log_key.objectid;
+ search_key.type = BTRFS_INODE_EXTREF_KEY;
+ search_key.offset = btrfs_extref_hash(wc->log_key.objectid, name.name, name.len);
+ ret = backref_in_log(root->log_root, &search_key, wc->log_key.objectid, &name);
if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+"failed to check if extref item is logged for inode %llu dir %llu name %.*s root %llu",
+ search_key.objectid, btrfs_ino(dir),
+ name.len, name.name, btrfs_root_id(root));
goto out;
} else if (ret) {
/* The dentry will be added later. */
@@ -2099,96 +2245,84 @@ insert:
update_size = false;
goto out;
}
- btrfs_release_path(path);
- ret = insert_one_name(trans, root, key->objectid, key->offset,
- name, name_len, &log_key);
- if (ret && ret != -ENOENT && ret != -EEXIST)
+ ret = insert_one_name(trans, root, wc->log_key.objectid, wc->log_key.offset,
+ &name, &log_key);
+ if (ret && ret != -ENOENT && ret != -EEXIST) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to insert name %.*s for inode %llu dir %llu root %llu",
+ name.len, name.name, log_key.objectid,
+ btrfs_ino(dir), btrfs_root_id(root));
goto out;
+ }
if (!ret)
name_added = true;
update_size = false;
ret = 0;
- goto out;
+
+out:
+ if (!ret && update_size) {
+ btrfs_i_size_write(dir, dir->vfs_inode.i_size + name.len * 2);
+ ret = btrfs_update_inode(trans, dir);
+ if (ret)
+ btrfs_abort_log_replay(wc, ret,
+ "failed to update dir inode %llu root %llu",
+ btrfs_ino(dir), btrfs_root_id(root));
+ }
+ kfree(name.name);
+ iput(&dir->vfs_inode);
+ if (!ret && name_added)
+ ret = 1;
+ return ret;
}
-/*
- * find all the names in a directory item and reconcile them into
- * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
- * one name in a directory item, but the same code gets used for
- * both directory index types
- */
-static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct extent_buffer *eb, int slot,
- struct btrfs_key *key)
+/* Replay one dir item from a BTRFS_DIR_INDEX_KEY key. */
+static noinline int replay_one_dir_item(struct walk_control *wc)
{
- int ret = 0;
- u32 item_size = btrfs_item_size_nr(eb, slot);
+ int ret;
struct btrfs_dir_item *di;
- int name_len;
- unsigned long ptr;
- unsigned long ptr_end;
- struct btrfs_path *fixup_path = NULL;
- ptr = btrfs_item_ptr_offset(eb, slot);
- ptr_end = ptr + item_size;
- while (ptr < ptr_end) {
- di = (struct btrfs_dir_item *)ptr;
- name_len = btrfs_dir_name_len(eb, di);
- ret = replay_one_name(trans, root, path, eb, di, key);
- if (ret < 0)
- break;
- ptr = (unsigned long)(di + 1);
- ptr += name_len;
+ /* We only log dir index keys, which only contain a single dir item. */
+ ASSERT(wc->log_key.type == BTRFS_DIR_INDEX_KEY,
+ "wc->log_key.type=%u", wc->log_key.type);
- /*
- * If this entry refers to a non-directory (directories can not
- * have a link count > 1) and it was added in the transaction
- * that was not committed, make sure we fixup the link count of
- * the inode it the entry points to. Otherwise something like
- * the following would result in a directory pointing to an
- * inode with a wrong link that does not account for this dir
- * entry:
- *
- * mkdir testdir
- * touch testdir/foo
- * touch testdir/bar
- * sync
- *
- * ln testdir/bar testdir/bar_link
- * ln testdir/foo testdir/foo_link
- * xfs_io -c "fsync" testdir/bar
- *
- * <power failure>
- *
- * mount fs, log replay happens
- *
- * File foo would remain with a link count of 1 when it has two
- * entries pointing to it in the directory testdir. This would
- * make it impossible to ever delete the parent directory has
- * it would result in stale dentries that can never be deleted.
- */
- if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
- struct btrfs_key di_key;
+ di = btrfs_item_ptr(wc->log_leaf, wc->log_slot, struct btrfs_dir_item);
+ ret = replay_one_name(wc, di);
+ if (ret < 0)
+ return ret;
- if (!fixup_path) {
- fixup_path = btrfs_alloc_path();
- if (!fixup_path) {
- ret = -ENOMEM;
- break;
- }
- }
+ /*
+ * If this entry refers to a non-directory (directories can not have a
+ * link count > 1) and it was added in the transaction that was not
+ * committed, make sure we fixup the link count of the inode the entry
+ * points to. Otherwise something like the following would result in a
+ * directory pointing to an inode with a wrong link that does not account
+ * for this dir entry:
+ *
+ * mkdir testdir
+ * touch testdir/foo
+ * touch testdir/bar
+ * sync
+ *
+ * ln testdir/bar testdir/bar_link
+ * ln testdir/foo testdir/foo_link
+ * xfs_io -c "fsync" testdir/bar
+ *
+ * <power failure>
+ *
+ * mount fs, log replay happens
+ *
+ * File foo would remain with a link count of 1 when it has two entries
+ * pointing to it in the directory testdir. This would make it impossible
+ * to ever delete the parent directory has it would result in stale
+ * dentries that can never be deleted.
+ */
+ if (ret == 1 && btrfs_dir_ftype(wc->log_leaf, di) != BTRFS_FT_DIR) {
+ struct btrfs_key di_key;
- btrfs_dir_item_key_to_cpu(eb, di, &di_key);
- ret = link_to_fixup_dir(trans, root, fixup_path,
- di_key.objectid);
- if (ret)
- break;
- }
- ret = 0;
+ btrfs_dir_item_key_to_cpu(wc->log_leaf, di, &di_key);
+ ret = link_to_fixup_dir(wc, di_key.objectid);
}
- btrfs_free_path(fixup_path);
+
return ret;
}
@@ -2205,7 +2339,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
*/
static noinline int find_dir_range(struct btrfs_root *root,
struct btrfs_path *path,
- u64 dirid, int key_type,
+ u64 dirid,
u64 *start_ret, u64 *end_ret)
{
struct btrfs_key key;
@@ -2218,7 +2352,7 @@ static noinline int find_dir_range(struct btrfs_root *root,
return 1;
key.objectid = dirid;
- key.type = key_type;
+ key.type = BTRFS_DIR_LOG_INDEX_KEY;
key.offset = *start_ret;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -2232,7 +2366,7 @@ static noinline int find_dir_range(struct btrfs_root *root,
if (ret != 0)
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- if (key.type != key_type || key.objectid != dirid) {
+ if (key.type != BTRFS_DIR_LOG_INDEX_KEY || key.objectid != dirid) {
ret = 1;
goto next;
}
@@ -2259,7 +2393,7 @@ next:
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- if (key.type != key_type || key.objectid != dirid) {
+ if (key.type != BTRFS_DIR_LOG_INDEX_KEY || key.objectid != dirid) {
ret = 1;
goto out;
}
@@ -2279,162 +2413,157 @@ out:
* item is not in the log, the item is removed and the inode it points
* to is unlinked
*/
-static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
- struct btrfs_root *log,
- struct btrfs_path *path,
+static noinline int check_item_in_log(struct walk_control *wc,
struct btrfs_path *log_path,
- struct inode *dir,
- struct btrfs_key *dir_key)
+ struct btrfs_inode *dir,
+ struct btrfs_key *dir_key,
+ bool force_remove)
{
- struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = dir->root;
int ret;
struct extent_buffer *eb;
int slot;
- u32 item_size;
struct btrfs_dir_item *di;
- struct btrfs_dir_item *log_di;
- int name_len;
- unsigned long ptr;
- unsigned long ptr_end;
- char *name;
- struct inode *inode;
+ struct fscrypt_str name = { 0 };
+ struct btrfs_inode *inode = NULL;
struct btrfs_key location;
-again:
- eb = path->nodes[0];
- slot = path->slots[0];
- item_size = btrfs_item_size_nr(eb, slot);
- ptr = btrfs_item_ptr_offset(eb, slot);
- ptr_end = ptr + item_size;
- while (ptr < ptr_end) {
- di = (struct btrfs_dir_item *)ptr;
- name_len = btrfs_dir_name_len(eb, di);
- name = kmalloc(name_len, GFP_NOFS);
- if (!name) {
- ret = -ENOMEM;
- goto out;
- }
- read_extent_buffer(eb, name, (unsigned long)(di + 1),
- name_len);
- log_di = NULL;
- if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
- log_di = btrfs_lookup_dir_item(trans, log, log_path,
- dir_key->objectid,
- name, name_len, 0);
- } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
- log_di = btrfs_lookup_dir_index_item(trans, log,
- log_path,
- dir_key->objectid,
- dir_key->offset,
- name, name_len, 0);
- }
- if (!log_di) {
- btrfs_dir_item_key_to_cpu(eb, di, &location);
- btrfs_release_path(path);
- btrfs_release_path(log_path);
- inode = read_one_inode(root, location.objectid);
- if (!inode) {
- kfree(name);
- return -EIO;
- }
+ /*
+ * Currently we only log dir index keys. Even if we replay a log created
+ * by an older kernel that logged both dir index and dir item keys, all
+ * we need to do is process the dir index keys, we (and our caller) can
+ * safely ignore dir item keys (key type BTRFS_DIR_ITEM_KEY).
+ */
+ ASSERT(dir_key->type == BTRFS_DIR_INDEX_KEY, "dir_key->type=%u", dir_key->type);
- ret = link_to_fixup_dir(trans, root,
- path, location.objectid);
- if (ret) {
- kfree(name);
- iput(inode);
- goto out;
- }
+ eb = wc->subvol_path->nodes[0];
+ slot = wc->subvol_path->slots[0];
+ di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
+ ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to allocate name for dir %llu index %llu root %llu",
+ btrfs_ino(dir), dir_key->offset,
+ btrfs_root_id(root));
+ goto out;
+ }
- inc_nlink(inode);
- ret = btrfs_unlink_inode(trans, BTRFS_I(dir),
- BTRFS_I(inode), name, name_len);
- if (!ret)
- ret = btrfs_run_delayed_items(trans);
- kfree(name);
- iput(inode);
- if (ret)
- goto out;
+ if (!force_remove) {
+ struct btrfs_dir_item *log_di;
- /* there might still be more names under this key
- * check and repeat if required
- */
- ret = btrfs_search_slot(NULL, root, dir_key, path,
- 0, 0);
- if (ret == 0)
- goto again;
+ log_di = btrfs_lookup_dir_index_item(trans, wc->log, log_path,
+ dir_key->objectid,
+ dir_key->offset, &name, 0);
+ if (IS_ERR(log_di)) {
+ ret = PTR_ERR(log_di);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir index item for dir %llu index %llu name %.*s root %llu",
+ btrfs_ino(dir), dir_key->offset,
+ name.len, name.name,
+ btrfs_root_id(root));
+ goto out;
+ } else if (log_di) {
+ /* The dentry exists in the log, we have nothing to do. */
ret = 0;
goto out;
- } else if (IS_ERR(log_di)) {
- kfree(name);
- return PTR_ERR(log_di);
}
- btrfs_release_path(log_path);
- kfree(name);
+ }
- ptr = (unsigned long)(di + 1);
- ptr += name_len;
+ btrfs_dir_item_key_to_cpu(eb, di, &location);
+ btrfs_release_path(wc->subvol_path);
+ btrfs_release_path(log_path);
+ inode = btrfs_iget_logging(location.objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ inode = NULL;
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup inode %llu root %llu",
+ location.objectid, btrfs_root_id(root));
+ goto out;
}
- ret = 0;
+
+ ret = link_to_fixup_dir(wc, location.objectid);
+ if (ret)
+ goto out;
+
+ inc_nlink(&inode->vfs_inode);
+ ret = unlink_inode_for_log_replay(wc, dir, inode, &name);
+ /*
+ * Unlike dir item keys, dir index keys can only have one name (entry) in
+ * them, as there are no key collisions since each key has a unique offset
+ * (an index number), so we're done.
+ */
out:
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
btrfs_release_path(log_path);
+ kfree(name.name);
+ if (inode)
+ iput(&inode->vfs_inode);
return ret;
}
-static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_root *log,
- struct btrfs_path *path,
- const u64 ino)
+static int replay_xattr_deletes(struct walk_control *wc)
{
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_root *root = wc->root;
+ struct btrfs_root *log = wc->log;
struct btrfs_key search_key;
- struct btrfs_path *log_path;
- int i;
+ BTRFS_PATH_AUTO_FREE(log_path);
+ const u64 ino = wc->log_key.objectid;
int nritems;
int ret;
log_path = btrfs_alloc_path();
- if (!log_path)
+ if (!log_path) {
+ btrfs_abort_log_replay(wc, -ENOMEM, "failed to allocate path");
return -ENOMEM;
+ }
search_key.objectid = ino;
search_key.type = BTRFS_XATTR_ITEM_KEY;
search_key.offset = 0;
again:
- ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
- if (ret < 0)
+ ret = btrfs_search_slot(NULL, root, &search_key, wc->subvol_path, 0, 0);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to search xattrs for inode %llu root %llu",
+ ino, btrfs_root_id(root));
goto out;
+ }
process_leaf:
- nritems = btrfs_header_nritems(path->nodes[0]);
- for (i = path->slots[0]; i < nritems; i++) {
+ nritems = btrfs_header_nritems(wc->subvol_path->nodes[0]);
+ for (int i = wc->subvol_path->slots[0]; i < nritems; i++) {
struct btrfs_key key;
struct btrfs_dir_item *di;
struct btrfs_dir_item *log_di;
u32 total_size;
u32 cur;
- btrfs_item_key_to_cpu(path->nodes[0], &key, i);
+ btrfs_item_key_to_cpu(wc->subvol_path->nodes[0], &key, i);
if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
ret = 0;
goto out;
}
- di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
- total_size = btrfs_item_size_nr(path->nodes[0], i);
+ di = btrfs_item_ptr(wc->subvol_path->nodes[0], i, struct btrfs_dir_item);
+ total_size = btrfs_item_size(wc->subvol_path->nodes[0], i);
cur = 0;
while (cur < total_size) {
- u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
- u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
+ u16 name_len = btrfs_dir_name_len(wc->subvol_path->nodes[0], di);
+ u16 data_len = btrfs_dir_data_len(wc->subvol_path->nodes[0], di);
u32 this_len = sizeof(*di) + name_len + data_len;
char *name;
name = kmalloc(name_len, GFP_NOFS);
if (!name) {
ret = -ENOMEM;
+ btrfs_abort_log_replay(wc, ret,
+ "failed to allocate memory for name of length %u",
+ name_len);
goto out;
}
- read_extent_buffer(path->nodes[0], name,
+ read_extent_buffer(wc->subvol_path->nodes[0], name,
(unsigned long)(di + 1), name_len);
log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
@@ -2442,40 +2571,59 @@ process_leaf:
btrfs_release_path(log_path);
if (!log_di) {
/* Doesn't exist in log tree, so delete it. */
- btrfs_release_path(path);
- di = btrfs_lookup_xattr(trans, root, path, ino,
+ btrfs_release_path(wc->subvol_path);
+ di = btrfs_lookup_xattr(trans, root, wc->subvol_path, ino,
name, name_len, -1);
- kfree(name);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup xattr with name %.*s for inode %llu root %llu",
+ name_len, name, ino,
+ btrfs_root_id(root));
+ kfree(name);
goto out;
}
ASSERT(di);
ret = btrfs_delete_one_dir_name(trans, root,
- path, di);
- if (ret)
+ wc->subvol_path, di);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to delete xattr with name %.*s for inode %llu root %llu",
+ name_len, name, ino,
+ btrfs_root_id(root));
+ kfree(name);
goto out;
- btrfs_release_path(path);
+ }
+ btrfs_release_path(wc->subvol_path);
+ kfree(name);
search_key = key;
goto again;
}
- kfree(name);
if (IS_ERR(log_di)) {
ret = PTR_ERR(log_di);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup xattr in log tree with name %.*s for inode %llu root %llu",
+ name_len, name, ino,
+ btrfs_root_id(root));
+ kfree(name);
goto out;
}
+ kfree(name);
cur += this_len;
di = (struct btrfs_dir_item *)((char *)di + this_len);
}
}
- ret = btrfs_next_leaf(root, path);
+ ret = btrfs_next_leaf(root, wc->subvol_path);
if (ret > 0)
ret = 0;
else if (ret == 0)
goto process_leaf;
+ else
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get next leaf in subvolume root %llu",
+ btrfs_root_id(root));
out:
- btrfs_free_path(log_path);
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
return ret;
}
@@ -2490,103 +2638,113 @@ out:
* Anything we don't find in the log is unlinked and removed from the
* directory.
*/
-static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_root *log,
- struct btrfs_path *path,
- u64 dirid, int del_all)
+static noinline int replay_dir_deletes(struct walk_control *wc,
+ u64 dirid, bool del_all)
{
+ struct btrfs_root *root = wc->root;
+ struct btrfs_root *log = (del_all ? NULL : wc->log);
u64 range_start;
u64 range_end;
- int key_type = BTRFS_DIR_LOG_ITEM_KEY;
int ret = 0;
struct btrfs_key dir_key;
struct btrfs_key found_key;
- struct btrfs_path *log_path;
- struct inode *dir;
+ BTRFS_PATH_AUTO_FREE(log_path);
+ struct btrfs_inode *dir;
dir_key.objectid = dirid;
- dir_key.type = BTRFS_DIR_ITEM_KEY;
+ dir_key.type = BTRFS_DIR_INDEX_KEY;
log_path = btrfs_alloc_path();
- if (!log_path)
+ if (!log_path) {
+ btrfs_abort_log_replay(wc, -ENOMEM, "failed to allocate path");
return -ENOMEM;
+ }
- dir = read_one_inode(root, dirid);
- /* it isn't an error if the inode isn't there, that can happen
- * because we replay the deletes before we copy in the inode item
- * from the log
+ dir = btrfs_iget_logging(dirid, root);
+ /*
+ * It isn't an error if the inode isn't there, that can happen because
+ * we replay the deletes before we copy in the inode item from the log.
*/
- if (!dir) {
- btrfs_free_path(log_path);
- return 0;
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ if (ret == -ENOENT)
+ ret = 0;
+ else
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup dir inode %llu root %llu",
+ dirid, btrfs_root_id(root));
+ return ret;
}
-again:
+
range_start = 0;
range_end = 0;
while (1) {
if (del_all)
range_end = (u64)-1;
else {
- ret = find_dir_range(log, path, dirid, key_type,
+ ret = find_dir_range(log, wc->subvol_path, dirid,
&range_start, &range_end);
- if (ret < 0)
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to find range for dir %llu in log tree root %llu",
+ dirid, btrfs_root_id(root));
goto out;
- else if (ret > 0)
+ } else if (ret > 0) {
break;
+ }
}
dir_key.offset = range_start;
while (1) {
int nritems;
- ret = btrfs_search_slot(NULL, root, &dir_key, path,
- 0, 0);
- if (ret < 0)
+ ret = btrfs_search_slot(NULL, root, &dir_key,
+ wc->subvol_path, 0, 0);
+ if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to search root %llu for key " BTRFS_KEY_FMT,
+ btrfs_root_id(root),
+ BTRFS_KEY_FMT_VALUE(&dir_key));
goto out;
+ }
- nritems = btrfs_header_nritems(path->nodes[0]);
- if (path->slots[0] >= nritems) {
- ret = btrfs_next_leaf(root, path);
- if (ret == 1)
+ nritems = btrfs_header_nritems(wc->subvol_path->nodes[0]);
+ if (wc->subvol_path->slots[0] >= nritems) {
+ ret = btrfs_next_leaf(root, wc->subvol_path);
+ if (ret == 1) {
break;
- else if (ret < 0)
+ } else if (ret < 0) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to get next leaf in subvolume root %llu",
+ btrfs_root_id(root));
goto out;
+ }
}
- btrfs_item_key_to_cpu(path->nodes[0], &found_key,
- path->slots[0]);
+ btrfs_item_key_to_cpu(wc->subvol_path->nodes[0], &found_key,
+ wc->subvol_path->slots[0]);
if (found_key.objectid != dirid ||
- found_key.type != dir_key.type)
- goto next_type;
+ found_key.type != dir_key.type) {
+ ret = 0;
+ goto out;
+ }
if (found_key.offset > range_end)
break;
- ret = check_item_in_log(trans, log, path,
- log_path, dir,
- &found_key);
+ ret = check_item_in_log(wc, log_path, dir, &found_key, del_all);
if (ret)
goto out;
if (found_key.offset == (u64)-1)
break;
dir_key.offset = found_key.offset + 1;
}
- btrfs_release_path(path);
+ btrfs_release_path(wc->subvol_path);
if (range_end == (u64)-1)
break;
range_start = range_end + 1;
}
-
-next_type:
ret = 0;
- if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
- key_type = BTRFS_DIR_LOG_INDEX_KEY;
- dir_key.type = BTRFS_DIR_INDEX_KEY;
- btrfs_release_path(path);
- goto again;
- }
out:
- btrfs_release_path(path);
- btrfs_free_path(log_path);
- iput(dir);
+ btrfs_release_path(wc->subvol_path);
+ iput(&dir->vfs_inode);
return ret;
}
@@ -2601,48 +2759,70 @@ out:
* only in the log (references come from either directory items or inode
* back refs).
*/
-static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
+static int replay_one_buffer(struct extent_buffer *eb,
struct walk_control *wc, u64 gen, int level)
{
int nritems;
- struct btrfs_path *path;
- struct btrfs_root *root = wc->replay_dest;
- struct btrfs_key key;
- int i;
+ struct btrfs_tree_parent_check check = {
+ .transid = gen,
+ .level = level
+ };
+ struct btrfs_root *root = wc->root;
+ struct btrfs_trans_handle *trans = wc->trans;
int ret;
- ret = btrfs_read_buffer(eb, gen, level, NULL);
- if (ret)
- return ret;
-
- level = btrfs_header_level(eb);
-
if (level != 0)
return 0;
- path = btrfs_alloc_path();
- if (!path)
+ /*
+ * Set to NULL since it was not yet read and in case we abort log replay
+ * on error, we have no valid log tree leaf to dump.
+ */
+ wc->log_leaf = NULL;
+ ret = btrfs_read_extent_buffer(eb, &check);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to read log tree leaf %llu for root %llu",
+ eb->start, btrfs_root_id(root));
+ return ret;
+ }
+
+ ASSERT(wc->subvol_path == NULL);
+ wc->subvol_path = btrfs_alloc_path();
+ if (!wc->subvol_path) {
+ btrfs_abort_log_replay(wc, -ENOMEM, "failed to allocate path");
return -ENOMEM;
+ }
+
+ wc->log_leaf = eb;
nritems = btrfs_header_nritems(eb);
- for (i = 0; i < nritems; i++) {
- btrfs_item_key_to_cpu(eb, &key, i);
+ for (wc->log_slot = 0; wc->log_slot < nritems; wc->log_slot++) {
+ struct btrfs_inode_item *inode_item;
- /* inode keys are done during the first stage */
- if (key.type == BTRFS_INODE_ITEM_KEY &&
- wc->stage == LOG_WALK_REPLAY_INODES) {
- struct btrfs_inode_item *inode_item;
- u32 mode;
+ btrfs_item_key_to_cpu(eb, &wc->log_key, wc->log_slot);
- inode_item = btrfs_item_ptr(eb, i,
- struct btrfs_inode_item);
+ if (wc->log_key.type == BTRFS_INODE_ITEM_KEY) {
+ inode_item = btrfs_item_ptr(eb, wc->log_slot,
+ struct btrfs_inode_item);
/*
- * If we have a tmpfile (O_TMPFILE) that got fsync'ed
- * and never got linked before the fsync, skip it, as
- * replaying it is pointless since it would be deleted
- * later. We skip logging tmpfiles, but it's always
- * possible we are replaying a log created with a kernel
- * that used to log tmpfiles.
+ * An inode with no links is either:
+ *
+ * 1) A tmpfile (O_TMPFILE) that got fsync'ed and never
+ * got linked before the fsync, skip it, as replaying
+ * it is pointless since it would be deleted later.
+ * We skip logging tmpfiles, but it's always possible
+ * we are replaying a log created with a kernel that
+ * used to log tmpfiles;
+ *
+ * 2) A non-tmpfile which got its last link deleted
+ * while holding an open fd on it and later got
+ * fsynced through that fd. We always log the
+ * parent inodes when inode->last_unlink_trans is
+ * set to the current transaction, so ignore all the
+ * inode items for this inode. We will delete the
+ * inode when processing the parent directory with
+ * replay_dir_deletes().
*/
if (btrfs_inode_nlink(eb, inode_item) == 0) {
wc->ignore_cur_inode = true;
@@ -2650,19 +2830,23 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
} else {
wc->ignore_cur_inode = false;
}
- ret = replay_xattr_deletes(wc->trans, root, log,
- path, key.objectid);
+ }
+
+ /* Inode keys are done during the first stage. */
+ if (wc->log_key.type == BTRFS_INODE_ITEM_KEY &&
+ wc->stage == LOG_WALK_REPLAY_INODES) {
+ u32 mode;
+
+ ret = replay_xattr_deletes(wc);
if (ret)
break;
mode = btrfs_inode_mode(eb, inode_item);
if (S_ISDIR(mode)) {
- ret = replay_dir_deletes(wc->trans,
- root, log, path, key.objectid, 0);
+ ret = replay_dir_deletes(wc, wc->log_key.objectid, false);
if (ret)
break;
}
- ret = overwrite_item(wc->trans, root, path,
- eb, i, &key);
+ ret = overwrite_item(wc);
if (ret)
break;
@@ -2676,36 +2860,48 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
*/
if (S_ISREG(mode)) {
struct btrfs_drop_extents_args drop_args = { 0 };
- struct inode *inode;
+ struct btrfs_inode *inode;
u64 from;
- inode = read_one_inode(root, key.objectid);
- if (!inode) {
- ret = -EIO;
+ inode = btrfs_iget_logging(wc->log_key.objectid, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ btrfs_abort_log_replay(wc, ret,
+ "failed to lookup inode %llu root %llu",
+ wc->log_key.objectid,
+ btrfs_root_id(root));
break;
}
- from = ALIGN(i_size_read(inode),
+ from = ALIGN(i_size_read(&inode->vfs_inode),
root->fs_info->sectorsize);
drop_args.start = from;
drop_args.end = (u64)-1;
drop_args.drop_cache = true;
- ret = btrfs_drop_extents(wc->trans, root,
- BTRFS_I(inode),
- &drop_args);
- if (!ret) {
- inode_sub_bytes(inode,
+ drop_args.path = wc->subvol_path;
+ ret = btrfs_drop_extents(trans, root, inode, &drop_args);
+ if (ret) {
+ btrfs_abort_log_replay(wc, ret,
+ "failed to drop extents for inode %llu root %llu offset %llu",
+ btrfs_ino(inode),
+ btrfs_root_id(root),
+ from);
+ } else {
+ inode_sub_bytes(&inode->vfs_inode,
drop_args.bytes_found);
/* Update the inode's nbytes. */
- ret = btrfs_update_inode(wc->trans,
- root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, inode);
+ if (ret)
+ btrfs_abort_log_replay(wc, ret,
+ "failed to update inode %llu root %llu",
+ btrfs_ino(inode),
+ btrfs_root_id(root));
}
- iput(inode);
+ iput(&inode->vfs_inode);
if (ret)
break;
}
- ret = link_to_fixup_dir(wc->trans, root,
- path, key.objectid);
+ ret = link_to_fixup_dir(wc, wc->log_key.objectid);
if (ret)
break;
}
@@ -2713,10 +2909,9 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
if (wc->ignore_cur_inode)
continue;
- if (key.type == BTRFS_DIR_INDEX_KEY &&
+ if (wc->log_key.type == BTRFS_DIR_INDEX_KEY &&
wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
- ret = replay_one_dir_item(wc->trans, root, path,
- eb, i, &key);
+ ret = replay_one_dir_item(wc);
if (ret)
break;
}
@@ -2725,72 +2920,84 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
continue;
/* these keys are simply copied */
- if (key.type == BTRFS_XATTR_ITEM_KEY) {
- ret = overwrite_item(wc->trans, root, path,
- eb, i, &key);
+ if (wc->log_key.type == BTRFS_XATTR_ITEM_KEY) {
+ ret = overwrite_item(wc);
if (ret)
break;
- } else if (key.type == BTRFS_INODE_REF_KEY ||
- key.type == BTRFS_INODE_EXTREF_KEY) {
- ret = add_inode_ref(wc->trans, root, log, path,
- eb, i, &key);
- if (ret && ret != -ENOENT)
- break;
- ret = 0;
- } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
- ret = replay_one_extent(wc->trans, root, path,
- eb, i, &key);
+ } else if (wc->log_key.type == BTRFS_INODE_REF_KEY ||
+ wc->log_key.type == BTRFS_INODE_EXTREF_KEY) {
+ ret = add_inode_ref(wc);
if (ret)
break;
- } else if (key.type == BTRFS_DIR_ITEM_KEY) {
- ret = replay_one_dir_item(wc->trans, root, path,
- eb, i, &key);
+ } else if (wc->log_key.type == BTRFS_EXTENT_DATA_KEY) {
+ ret = replay_one_extent(wc);
if (ret)
break;
}
+ /*
+ * We don't log BTRFS_DIR_ITEM_KEY keys anymore, only the
+ * BTRFS_DIR_INDEX_KEY items which we use to derive the
+ * BTRFS_DIR_ITEM_KEY items. If we are replaying a log from an
+ * older kernel with such keys, ignore them.
+ */
}
- btrfs_free_path(path);
+ btrfs_free_path(wc->subvol_path);
+ wc->subvol_path = NULL;
return ret;
}
-/*
- * Correctly adjust the reserved bytes occupied by a log tree extent buffer
- */
-static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
+static int clean_log_buffer(struct btrfs_trans_handle *trans,
+ struct extent_buffer *eb)
{
- struct btrfs_block_group *cache;
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+ struct btrfs_block_group *bg;
- cache = btrfs_lookup_block_group(fs_info, start);
- if (!cache) {
- btrfs_err(fs_info, "unable to find block group for %llu", start);
- return;
+ btrfs_tree_lock(eb);
+ btrfs_clear_buffer_dirty(trans, eb);
+ wait_on_extent_buffer_writeback(eb);
+ btrfs_tree_unlock(eb);
+
+ if (trans) {
+ int ret;
+
+ ret = btrfs_pin_reserved_extent(trans, eb);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
- cache->reserved -= fs_info->nodesize;
- cache->space_info->bytes_reserved -= fs_info->nodesize;
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
+ bg = btrfs_lookup_block_group(fs_info, eb->start);
+ if (!bg) {
+ btrfs_err(fs_info, "unable to find block group for %llu", eb->start);
+ btrfs_handle_fs_error(fs_info, -ENOENT, NULL);
+ return -ENOENT;
+ }
+
+ spin_lock(&bg->space_info->lock);
+ spin_lock(&bg->lock);
+ bg->reserved -= fs_info->nodesize;
+ bg->space_info->bytes_reserved -= fs_info->nodesize;
+ spin_unlock(&bg->lock);
+ spin_unlock(&bg->space_info->lock);
+
+ btrfs_put_block_group(bg);
- btrfs_put_block_group(cache);
+ return 0;
}
-static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, int *level,
- struct walk_control *wc)
+static noinline int walk_down_log_tree(struct btrfs_path *path, int *level,
+ struct walk_control *wc)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_trans_handle *trans = wc->trans;
+ struct btrfs_fs_info *fs_info = wc->log->fs_info;
u64 bytenr;
u64 ptr_gen;
struct extent_buffer *next;
struct extent_buffer *cur;
- u32 blocksize;
int ret = 0;
while (*level > 0) {
- struct btrfs_key first_key;
+ struct btrfs_tree_parent_check check = { 0 };
cur = path->nodes[*level];
@@ -2802,18 +3009,25 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
- btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
- blocksize = fs_info->nodesize;
+ check.transid = ptr_gen;
+ check.level = *level - 1;
+ check.has_first_key = true;
+ btrfs_node_key_to_cpu(cur, &check.first_key, path->slots[*level]);
next = btrfs_find_create_tree_block(fs_info, bytenr,
btrfs_header_owner(cur),
*level - 1);
- if (IS_ERR(next))
- return PTR_ERR(next);
+ if (IS_ERR(next)) {
+ ret = PTR_ERR(next);
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(fs_info, ret, NULL);
+ return ret;
+ }
if (*level == 1) {
- ret = wc->process_func(root, next, wc, ptr_gen,
- *level - 1);
+ ret = wc->process_func(next, wc, ptr_gen, *level - 1);
if (ret) {
free_extent_buffer(next);
return ret;
@@ -2821,38 +3035,32 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
path->slots[*level]++;
if (wc->free) {
- ret = btrfs_read_buffer(next, ptr_gen,
- *level - 1, &first_key);
+ ret = btrfs_read_extent_buffer(next, &check);
if (ret) {
free_extent_buffer(next);
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(fs_info, ret, NULL);
return ret;
}
- if (trans) {
- btrfs_tree_lock(next);
- btrfs_clean_tree_block(next);
- btrfs_wait_tree_block_writeback(next);
- btrfs_tree_unlock(next);
- ret = btrfs_pin_reserved_extent(trans,
- bytenr, blocksize);
- if (ret) {
- free_extent_buffer(next);
- return ret;
- }
- btrfs_redirty_list_add(
- trans->transaction, next);
- } else {
- if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
- clear_extent_buffer_dirty(next);
- unaccount_log_buffer(fs_info, bytenr);
+ ret = clean_log_buffer(trans, next);
+ if (ret) {
+ free_extent_buffer(next);
+ return ret;
}
}
free_extent_buffer(next);
continue;
}
- ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
+ ret = btrfs_read_extent_buffer(next, &check);
if (ret) {
free_extent_buffer(next);
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(fs_info, ret, NULL);
return ret;
}
@@ -2869,12 +3077,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
return 0;
}
-static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, int *level,
- struct walk_control *wc)
+static noinline int walk_up_log_tree(struct btrfs_path *path, int *level,
+ struct walk_control *wc)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int i;
int slot;
int ret;
@@ -2887,34 +3092,16 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
WARN_ON(*level == 0);
return 0;
} else {
- ret = wc->process_func(root, path->nodes[*level], wc,
+ ret = wc->process_func(path->nodes[*level], wc,
btrfs_header_generation(path->nodes[*level]),
*level);
if (ret)
return ret;
if (wc->free) {
- struct extent_buffer *next;
-
- next = path->nodes[*level];
-
- if (trans) {
- btrfs_tree_lock(next);
- btrfs_clean_tree_block(next);
- btrfs_wait_tree_block_writeback(next);
- btrfs_tree_unlock(next);
- ret = btrfs_pin_reserved_extent(trans,
- path->nodes[*level]->start,
- path->nodes[*level]->len);
- if (ret)
- return ret;
- } else {
- if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
- clear_extent_buffer_dirty(next);
-
- unaccount_log_buffer(fs_info,
- path->nodes[*level]->start);
- }
+ ret = clean_log_buffer(wc->trans, path->nodes[*level]);
+ if (ret)
+ return ret;
}
free_extent_buffer(path->nodes[*level]);
path->nodes[*level] = NULL;
@@ -2929,14 +3116,13 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
* the tree freeing any blocks that have a ref count of zero after being
* decremented.
*/
-static int walk_log_tree(struct btrfs_trans_handle *trans,
- struct btrfs_root *log, struct walk_control *wc)
+static int walk_log_tree(struct walk_control *wc)
{
- struct btrfs_fs_info *fs_info = log->fs_info;
+ struct btrfs_root *log = wc->log;
int ret = 0;
int wret;
int level;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int orig_level;
path = btrfs_alloc_path();
@@ -2946,58 +3132,34 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
level = btrfs_header_level(log->node);
orig_level = level;
path->nodes[level] = log->node;
- atomic_inc(&log->node->refs);
+ refcount_inc(&log->node->refs);
path->slots[level] = 0;
while (1) {
- wret = walk_down_log_tree(trans, log, path, &level, wc);
+ wret = walk_down_log_tree(path, &level, wc);
if (wret > 0)
break;
- if (wret < 0) {
- ret = wret;
- goto out;
- }
+ if (wret < 0)
+ return wret;
- wret = walk_up_log_tree(trans, log, path, &level, wc);
+ wret = walk_up_log_tree(path, &level, wc);
if (wret > 0)
break;
- if (wret < 0) {
- ret = wret;
- goto out;
- }
+ if (wret < 0)
+ return wret;
}
/* was the root node processed? if not, catch it here */
if (path->nodes[orig_level]) {
- ret = wc->process_func(log, path->nodes[orig_level], wc,
+ ret = wc->process_func(path->nodes[orig_level], wc,
btrfs_header_generation(path->nodes[orig_level]),
orig_level);
if (ret)
- goto out;
- if (wc->free) {
- struct extent_buffer *next;
-
- next = path->nodes[orig_level];
-
- if (trans) {
- btrfs_tree_lock(next);
- btrfs_clean_tree_block(next);
- btrfs_wait_tree_block_writeback(next);
- btrfs_tree_unlock(next);
- ret = btrfs_pin_reserved_extent(trans,
- next->start, next->len);
- if (ret)
- goto out;
- } else {
- if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
- clear_extent_buffer_dirty(next);
- unaccount_log_buffer(fs_info, next->start);
- }
- }
+ return ret;
+ if (wc->free)
+ ret = clean_log_buffer(wc->trans, path->nodes[orig_level]);
}
-out:
- btrfs_free_path(path);
return ret;
}
@@ -3065,6 +3227,52 @@ static void wait_for_writer(struct btrfs_root *root)
finish_wait(&root->log_writer_wait, &wait);
}
+void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, struct btrfs_inode *inode)
+{
+ ctx->log_ret = 0;
+ ctx->log_transid = 0;
+ ctx->log_new_dentries = false;
+ ctx->logging_new_name = false;
+ ctx->logging_new_delayed_dentries = false;
+ ctx->logged_before = false;
+ ctx->inode = inode;
+ INIT_LIST_HEAD(&ctx->list);
+ INIT_LIST_HEAD(&ctx->ordered_extents);
+ INIT_LIST_HEAD(&ctx->conflict_inodes);
+ ctx->num_conflict_inodes = 0;
+ ctx->logging_conflict_inodes = false;
+ ctx->scratch_eb = NULL;
+}
+
+void btrfs_init_log_ctx_scratch_eb(struct btrfs_log_ctx *ctx)
+{
+ struct btrfs_inode *inode = ctx->inode;
+
+ if (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
+ !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
+ return;
+
+ /*
+ * Don't care about allocation failure. This is just for optimization,
+ * if we fail to allocate here, we will try again later if needed.
+ */
+ ctx->scratch_eb = alloc_dummy_extent_buffer(inode->root->fs_info, 0);
+}
+
+void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx)
+{
+ struct btrfs_ordered_extent *ordered;
+ struct btrfs_ordered_extent *tmp;
+
+ btrfs_assert_inode_locked(ctx->inode);
+
+ list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
+ list_del_init(&ordered->log_list);
+ btrfs_put_ordered_extent(ordered);
+ }
+}
+
+
static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
struct btrfs_log_ctx *ctx)
{
@@ -3090,10 +3298,9 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
}
/*
- * btrfs_sync_log does sends a given tree log down to the disk and
- * updates the super blocks to record it. When this call is done,
- * you know that any inodes previously logged are safely on disk only
- * if it returns 0.
+ * Sends a given tree log down to the disk and updates the super blocks to
+ * record it. When this call is done, you know that any inodes previously
+ * logged are safely on disk only if it returns 0.
*
* Any other return value means you need to call btrfs_commit_transaction.
* Some of the edge cases for fsyncing directories that have had unlinks
@@ -3131,7 +3338,8 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&root->log_mutex);
return ctx->log_ret;
}
- ASSERT(log_transid == root->log_transid);
+ ASSERT(log_transid == root->log_transid,
+ "log_transid=%d root->log_transid=%d", log_transid, root->log_transid);
atomic_set(&root->log_commit[index1], 1);
/* wait for previous tree log sync to complete */
@@ -3154,15 +3362,15 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
/* bail out if we need to do a full commit */
if (btrfs_need_log_full_commit(trans)) {
- ret = -EAGAIN;
+ ret = BTRFS_LOG_FORCE_COMMIT;
mutex_unlock(&root->log_mutex);
goto out;
}
if (log_transid % 2 == 0)
- mark = EXTENT_DIRTY;
+ mark = EXTENT_DIRTY_LOG1;
else
- mark = EXTENT_NEW;
+ mark = EXTENT_DIRTY_LOG2;
/* we start IO on all the marked extents here, but we don't actually
* wait for them until later.
@@ -3182,7 +3390,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = 0;
if (ret) {
blk_finish_plug(&plug);
- btrfs_abort_transaction(trans, ret);
btrfs_set_log_full_commit(trans);
mutex_unlock(&root->log_mutex);
goto out;
@@ -3204,7 +3411,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
btrfs_set_root_node(&log->root_item, log->node);
memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
- root->log_transid++;
+ btrfs_set_root_log_transid(root, root->log_transid + 1);
log->log_transid = root->log_transid;
root->log_start_pid = 0;
/*
@@ -3220,6 +3427,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
if (ret) {
mutex_unlock(&fs_info->tree_root->log_mutex);
+ blk_finish_plug(&plug);
goto out;
}
}
@@ -3241,20 +3449,15 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
*/
ret = update_log_root(trans, log, &new_root_item);
if (ret) {
- if (!list_empty(&root_log_ctx.list))
- list_del_init(&root_log_ctx.list);
-
+ list_del_init(&root_log_ctx.list);
blk_finish_plug(&plug);
btrfs_set_log_full_commit(trans);
-
- if (ret != -ENOSPC) {
- btrfs_abort_transaction(trans, ret);
- mutex_unlock(&log_root_tree->log_mutex);
- goto out;
- }
+ if (ret != -ENOSPC)
+ btrfs_err(fs_info,
+ "failed to update log for root %llu ret %d",
+ btrfs_root_id(root), ret);
btrfs_wait_tree_log_extents(log, mark);
mutex_unlock(&log_root_tree->log_mutex);
- ret = -EAGAIN;
goto out;
}
@@ -3266,7 +3469,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
goto out;
}
- index2 = root_log_ctx.log_transid % 2;
if (atomic_read(&log_root_tree->log_commit[index2])) {
blk_finish_plug(&plug);
ret = btrfs_wait_tree_log_extents(log, mark);
@@ -3277,7 +3479,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = root_log_ctx.log_ret;
goto out;
}
- ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
+ ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid,
+ "root_log_ctx.log_transid=%d log_root_tree->log_transid=%d",
+ root_log_ctx.log_transid, log_root_tree->log_transid);
atomic_set(&log_root_tree->log_commit[index2], 1);
if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
@@ -3293,13 +3497,13 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
blk_finish_plug(&plug);
btrfs_wait_tree_log_extents(log, mark);
mutex_unlock(&log_root_tree->log_mutex);
- ret = -EAGAIN;
+ ret = BTRFS_LOG_FORCE_COMMIT;
goto out_wake_log_root;
}
ret = btrfs_write_marked_extents(fs_info,
&log_root_tree->dirty_log_pages,
- EXTENT_DIRTY | EXTENT_NEW);
+ EXTENT_DIRTY_LOG1 | EXTENT_DIRTY_LOG2);
blk_finish_plug(&plug);
/*
* As described above, -EAGAIN indicates a hole in the extents. We
@@ -3313,14 +3517,13 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
goto out_wake_log_root;
} else if (ret) {
btrfs_set_log_full_commit(trans);
- btrfs_abort_transaction(trans, ret);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
ret = btrfs_wait_tree_log_extents(log, mark);
if (!ret)
ret = btrfs_wait_tree_log_extents(log_root_tree,
- EXTENT_NEW | EXTENT_DIRTY);
+ EXTENT_DIRTY_LOG1 | EXTENT_DIRTY_LOG2);
if (ret) {
btrfs_set_log_full_commit(trans);
mutex_unlock(&log_root_tree->log_mutex);
@@ -3368,7 +3571,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
ret = write_all_supers(fs_info, 1);
mutex_unlock(&fs_info->tree_log_mutex);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_set_log_full_commit(trans);
btrfs_abort_transaction(trans, ret);
goto out_wake_log_root;
@@ -3382,8 +3585,10 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* someone else already started it. We use <= and not < because the
* first log transaction has an ID of 0.
*/
- ASSERT(root->last_log_commit <= log_transid);
- root->last_log_commit = log_transid;
+ ASSERT(btrfs_get_root_last_log_commit(root) <= log_transid,
+ "last_log_commit(root)=%d log_transid=%d",
+ btrfs_get_root_last_log_commit(root), log_transid);
+ btrfs_set_root_last_log_commit(root, log_transid);
out_wake_log_root:
mutex_lock(&log_root_tree->log_mutex);
@@ -3420,13 +3625,38 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
{
int ret;
struct walk_control wc = {
- .free = 1,
- .process_func = process_one_buffer
+ .free = true,
+ .process_func = process_one_buffer,
+ .log = log,
+ .trans = trans,
};
if (log->node) {
- ret = walk_log_tree(trans, log, &wc);
+ ret = walk_log_tree(&wc);
if (ret) {
+ /*
+ * We weren't able to traverse the entire log tree, the
+ * typical scenario is getting an -EIO when reading an
+ * extent buffer of the tree, due to a previous writeback
+ * failure of it.
+ */
+ set_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
+ &log->fs_info->fs_state);
+
+ /*
+ * Some extent buffers of the log tree may still be dirty
+ * and not yet written back to storage, because we may
+ * have updates to a log tree without syncing a log tree,
+ * such as during rename and link operations. So flush
+ * them out and wait for their writeback to complete, so
+ * that we properly cleanup their state and pages.
+ */
+ btrfs_write_marked_extents(log->fs_info,
+ &log->dirty_log_pages,
+ EXTENT_DIRTY_LOG1 | EXTENT_DIRTY_LOG2);
+ btrfs_wait_tree_log_extents(log,
+ EXTENT_DIRTY_LOG1 | EXTENT_DIRTY_LOG2);
+
if (trans)
btrfs_abort_transaction(trans, ret);
else
@@ -3434,12 +3664,9 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
}
}
- clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
- EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
- extent_io_tree_release(&log->log_csum_range);
+ btrfs_extent_io_tree_release(&log->dirty_log_pages);
+ btrfs_extent_io_tree_release(&log->log_csum_range);
- if (trans && log->node)
- btrfs_redirty_list_add(trans->transaction, log->node);
btrfs_put_root(log);
}
@@ -3468,36 +3695,185 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
return 0;
}
+static bool mark_inode_as_not_logged(const struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode)
+{
+ bool ret = false;
+
+ /*
+ * Do this only if ->logged_trans is still 0 to prevent races with
+ * concurrent logging as we may see the inode not logged when
+ * inode_logged() is called but it gets logged after inode_logged() did
+ * not find it in the log tree and we end up setting ->logged_trans to a
+ * value less than trans->transid after the concurrent logging task has
+ * set it to trans->transid. As a consequence, subsequent rename, unlink
+ * and link operations may end up not logging new names and removing old
+ * names from the log.
+ */
+ spin_lock(&inode->lock);
+ if (inode->logged_trans == 0)
+ inode->logged_trans = trans->transid - 1;
+ else if (inode->logged_trans == trans->transid)
+ ret = true;
+ spin_unlock(&inode->lock);
+
+ return ret;
+}
+
/*
- * Check if an inode was logged in the current transaction. This may often
- * return some false positives, because logged_trans is an in memory only field,
- * not persisted anywhere. This is meant to be used in contexts where a false
- * positive has no functional consequences.
+ * Check if an inode was logged in the current transaction. This correctly deals
+ * with the case where the inode was logged but has a logged_trans of 0, which
+ * happens if the inode is evicted and loaded again, as logged_trans is an in
+ * memory only field (not persisted).
+ *
+ * Returns 1 if the inode was logged before in the transaction, 0 if it was not,
+ * and < 0 on error.
*/
-static bool inode_logged(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode)
+static int inode_logged(const struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode,
+ struct btrfs_path *path_in)
{
- if (inode->logged_trans == trans->transid)
- return true;
+ struct btrfs_path *path = path_in;
+ struct btrfs_key key;
+ int ret;
+ /*
+ * Quick lockless call, since once ->logged_trans is set to the current
+ * transaction, we never set it to a lower value anywhere else.
+ */
+ if (data_race(inode->logged_trans) == trans->transid)
+ return 1;
+
+ /*
+ * If logged_trans is not 0 and not trans->transid, then we know the
+ * inode was not logged in this transaction, so we can return false
+ * right away. We take the lock to avoid a race caused by load/store
+ * tearing with a concurrent btrfs_log_inode() call or a concurrent task
+ * in this function further below - an update to trans->transid can be
+ * teared into two 32 bits updates for example, in which case we could
+ * see a positive value that is not trans->transid and assume the inode
+ * was not logged when it was.
+ */
+ spin_lock(&inode->lock);
+ if (inode->logged_trans == trans->transid) {
+ spin_unlock(&inode->lock);
+ return 1;
+ } else if (inode->logged_trans > 0) {
+ spin_unlock(&inode->lock);
+ return 0;
+ }
+ spin_unlock(&inode->lock);
+
+ /*
+ * If no log tree was created for this root in this transaction, then
+ * the inode can not have been logged in this transaction. In that case
+ * set logged_trans to anything greater than 0 and less than the current
+ * transaction's ID, to avoid the search below in a future call in case
+ * a log tree gets created after this.
+ */
if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state))
- return false;
+ return mark_inode_as_not_logged(trans, inode);
/*
- * The inode's logged_trans is always 0 when we load it (because it is
- * not persisted in the inode item or elsewhere). So if it is 0, the
- * inode was last modified in the current transaction then the inode may
- * have been logged before in the current transaction, then evicted and
- * loaded again in the current transaction - or may have never been logged
- * in the current transaction, but since we can not be sure, we have to
- * assume it was, otherwise our callers can leave an inconsistent log.
+ * We have a log tree and the inode's logged_trans is 0. We can't tell
+ * for sure if the inode was logged before in this transaction by looking
+ * only at logged_trans. We could be pessimistic and assume it was, but
+ * that can lead to unnecessarily logging an inode during rename and link
+ * operations, and then further updating the log in followup rename and
+ * link operations, specially if it's a directory, which adds latency
+ * visible to applications doing a series of rename or link operations.
+ *
+ * A logged_trans of 0 here can mean several things:
+ *
+ * 1) The inode was never logged since the filesystem was mounted, and may
+ * or may have not been evicted and loaded again;
+ *
+ * 2) The inode was logged in a previous transaction, then evicted and
+ * then loaded again;
+ *
+ * 3) The inode was logged in the current transaction, then evicted and
+ * then loaded again.
+ *
+ * For cases 1) and 2) we don't want to return true, but we need to detect
+ * case 3) and return true. So we do a search in the log root for the inode
+ * item.
*/
- if (inode->logged_trans == 0 &&
- inode->last_trans == trans->transid &&
- !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
- return true;
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+ if (!path) {
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ }
+
+ ret = btrfs_search_slot(NULL, inode->root->log_root, &key, path, 0, 0);
+
+ if (path_in)
+ btrfs_release_path(path);
+ else
+ btrfs_free_path(path);
+
+ /*
+ * Logging an inode always results in logging its inode item. So if we
+ * did not find the item we know the inode was not logged for sure.
+ */
+ if (ret < 0) {
+ return ret;
+ } else if (ret > 0) {
+ /*
+ * Set logged_trans to a value greater than 0 and less then the
+ * current transaction to avoid doing the search in future calls.
+ */
+ return mark_inode_as_not_logged(trans, inode);
+ }
+
+ /*
+ * The inode was previously logged and then evicted, set logged_trans to
+ * the current transaction's ID, to avoid future tree searches as long as
+ * the inode is not evicted again.
+ */
+ spin_lock(&inode->lock);
+ inode->logged_trans = trans->transid;
+ spin_unlock(&inode->lock);
+
+ return 1;
+}
+
+/*
+ * Delete a directory entry from the log if it exists.
+ *
+ * Returns < 0 on error
+ * 1 if the entry does not exists
+ * 0 if the entry existed and was successfully deleted
+ */
+static int del_logged_dentry(struct btrfs_trans_handle *trans,
+ struct btrfs_root *log,
+ struct btrfs_path *path,
+ u64 dir_ino,
+ const struct fscrypt_str *name,
+ u64 index)
+{
+ struct btrfs_dir_item *di;
- return false;
+ /*
+ * We only log dir index items of a directory, so we don't need to look
+ * for dir item keys.
+ */
+ di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
+ index, name, -1);
+ if (IS_ERR(di))
+ return PTR_ERR(di);
+ else if (!di)
+ return 1;
+
+ /*
+ * We do not need to update the size field of the directory's
+ * inode item because on log replay we update the field to reflect
+ * all existing entries in the directory (see overwrite_item()).
+ */
+ return btrfs_del_item(trans, log, path);
}
/*
@@ -3522,96 +3898,67 @@ static bool inode_logged(struct btrfs_trans_handle *trans,
* or the entire directory.
*/
void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- const char *name, int name_len,
+ const struct fscrypt_str *name,
struct btrfs_inode *dir, u64 index)
{
- struct btrfs_root *log;
- struct btrfs_dir_item *di;
- struct btrfs_path *path;
+ struct btrfs_root *root = dir->root;
+ BTRFS_PATH_AUTO_FREE(path);
int ret;
- int err = 0;
- u64 dir_ino = btrfs_ino(dir);
- if (!inode_logged(trans, dir))
+ ret = inode_logged(trans, dir, NULL);
+ if (ret == 0)
return;
-
- ret = join_running_log_trans(root);
- if (ret)
+ if (ret < 0) {
+ btrfs_set_log_full_commit(trans);
return;
+ }
- mutex_lock(&dir->log_mutex);
-
- log = root->log_root;
path = btrfs_alloc_path();
if (!path) {
- err = -ENOMEM;
- goto out_unlock;
+ btrfs_set_log_full_commit(trans);
+ return;
}
- di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
- name, name_len, -1);
- if (IS_ERR(di)) {
- err = PTR_ERR(di);
- goto fail;
- }
- if (di) {
- ret = btrfs_delete_one_dir_name(trans, log, path, di);
- if (ret) {
- err = ret;
- goto fail;
- }
- }
- btrfs_release_path(path);
- di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
- index, name, name_len, -1);
- if (IS_ERR(di)) {
- err = PTR_ERR(di);
- goto fail;
- }
- if (di) {
- ret = btrfs_delete_one_dir_name(trans, log, path, di);
- if (ret) {
- err = ret;
- goto fail;
- }
- }
+ ret = join_running_log_trans(root);
+ ASSERT(ret == 0, "join_running_log_trans() ret=%d", ret);
+ if (WARN_ON(ret))
+ return;
- /*
- * We do not need to update the size field of the directory's inode item
- * because on log replay we update the field to reflect all existing
- * entries in the directory (see overwrite_item()).
- */
-fail:
- btrfs_free_path(path);
-out_unlock:
+ mutex_lock(&dir->log_mutex);
+
+ ret = del_logged_dentry(trans, root->log_root, path, btrfs_ino(dir),
+ name, index);
mutex_unlock(&dir->log_mutex);
- if (err < 0)
+ if (ret < 0)
btrfs_set_log_full_commit(trans);
btrfs_end_log_trans(root);
}
/* see comments for btrfs_del_dir_entries_in_log */
void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- const char *name, int name_len,
- struct btrfs_inode *inode, u64 dirid)
+ const struct fscrypt_str *name,
+ struct btrfs_inode *inode,
+ struct btrfs_inode *dir)
{
- struct btrfs_root *log;
- u64 index;
+ struct btrfs_root *root = dir->root;
int ret;
- if (!inode_logged(trans, inode))
+ ret = inode_logged(trans, inode, NULL);
+ if (ret == 0)
+ return;
+ else if (ret < 0) {
+ btrfs_set_log_full_commit(trans);
return;
+ }
ret = join_running_log_trans(root);
- if (ret)
+ ASSERT(ret == 0, "join_running_log_trans() ret=%d", ret);
+ if (WARN_ON(ret))
return;
- log = root->log_root;
mutex_lock(&inode->log_mutex);
- ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
- dirid, &index);
+ ret = btrfs_del_inode_ref(trans, root->log_root, name, btrfs_ino(inode),
+ btrfs_ino(dir), NULL);
mutex_unlock(&inode->log_mutex);
if (ret < 0 && ret != -ENOENT)
btrfs_set_log_full_commit(trans);
@@ -3626,7 +3973,7 @@ void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
struct btrfs_root *log,
struct btrfs_path *path,
- int key_type, u64 dirid,
+ u64 dirid,
u64 first_offset, u64 last_offset)
{
int ret;
@@ -3634,46 +3981,62 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
struct btrfs_dir_log_item *item;
key.objectid = dirid;
+ key.type = BTRFS_DIR_LOG_INDEX_KEY;
key.offset = first_offset;
- if (key_type == BTRFS_DIR_ITEM_KEY)
- key.type = BTRFS_DIR_LOG_ITEM_KEY;
- else
- key.type = BTRFS_DIR_LOG_INDEX_KEY;
ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
- if (ret)
+ /*
+ * -EEXIST is fine and can happen sporadically when we are logging a
+ * directory and have concurrent insertions in the subvolume's tree for
+ * items from other inodes and that result in pushing off some dir items
+ * from one leaf to another in order to accommodate for the new items.
+ * This results in logging the same dir index range key.
+ */
+ if (ret && ret != -EEXIST)
return ret;
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_dir_log_item);
+ if (ret == -EEXIST) {
+ const u64 curr_end = btrfs_dir_log_end(path->nodes[0], item);
+
+ /*
+ * btrfs_del_dir_entries_in_log() might have been called during
+ * an unlink between the initial insertion of this key and the
+ * current update, or we might be logging a single entry deletion
+ * during a rename, so set the new last_offset to the max value.
+ */
+ last_offset = max(last_offset, curr_end);
+ }
btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
- btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_release_path(path);
return 0;
}
static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
- struct btrfs_root *log,
+ struct btrfs_inode *inode,
struct extent_buffer *src,
struct btrfs_path *dst_path,
int start_slot,
int count)
{
- char *ins_data = NULL;
+ struct btrfs_root *log = inode->root->log_root;
+ char AUTO_KFREE(ins_data);
struct btrfs_item_batch batch;
struct extent_buffer *dst;
unsigned long src_offset;
unsigned long dst_offset;
+ u64 last_index;
struct btrfs_key key;
u32 item_size;
int ret;
int i;
- ASSERT(count > 0);
+ ASSERT(count > 0, "count=%d", count);
batch.nr = count;
if (count == 1) {
btrfs_item_key_to_cpu(src, &key, start_slot);
- item_size = btrfs_item_size_nr(src, start_slot);
+ item_size = btrfs_item_size(src, start_slot);
batch.keys = &key;
batch.data_sizes = &item_size;
batch.total_data_size = item_size;
@@ -3681,8 +4044,7 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
struct btrfs_key *ins_keys;
u32 *ins_sizes;
- ins_data = kmalloc(count * sizeof(u32) +
- count * sizeof(struct btrfs_key), GFP_NOFS);
+ ins_data = kmalloc_array(count, sizeof(u32) + sizeof(struct btrfs_key), GFP_NOFS);
if (!ins_data)
return -ENOMEM;
@@ -3696,14 +4058,14 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
const int slot = start_slot + i;
btrfs_item_key_to_cpu(src, &ins_keys[i], slot);
- ins_sizes[i] = btrfs_item_size_nr(src, slot);
+ ins_sizes[i] = btrfs_item_size(src, slot);
batch.total_data_size += ins_sizes[i];
}
}
ret = btrfs_insert_empty_items(trans, log, dst_path, &batch);
if (ret)
- goto out;
+ return ret;
dst = dst_path->nodes[0];
/*
@@ -3720,47 +4082,114 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
src_offset = btrfs_item_ptr_offset(src, start_slot + count - 1);
copy_extent_buffer(dst, src, dst_offset, src_offset, batch.total_data_size);
btrfs_release_path(dst_path);
-out:
- kfree(ins_data);
+
+ last_index = batch.keys[count - 1].offset;
+ ASSERT(last_index > inode->last_dir_index_offset,
+ "last_index=%llu inode->last_dir_index_offset=%llu",
+ last_index, inode->last_dir_index_offset);
+
+ /*
+ * If for some unexpected reason the last item's index is not greater
+ * than the last index we logged, warn and force a transaction commit.
+ */
+ if (WARN_ON(last_index <= inode->last_dir_index_offset))
+ ret = BTRFS_LOG_FORCE_COMMIT;
+ else
+ inode->last_dir_index_offset = last_index;
+
+ if (btrfs_get_first_dir_index_to_log(inode) == 0)
+ btrfs_set_first_dir_index_to_log(inode, batch.keys[0].offset);
return ret;
}
+static int clone_leaf(struct btrfs_path *path, struct btrfs_log_ctx *ctx)
+{
+ const int slot = path->slots[0];
+
+ if (ctx->scratch_eb) {
+ copy_extent_buffer_full(ctx->scratch_eb, path->nodes[0]);
+ } else {
+ ctx->scratch_eb = btrfs_clone_extent_buffer(path->nodes[0]);
+ if (!ctx->scratch_eb)
+ return -ENOMEM;
+ }
+
+ btrfs_release_path(path);
+ path->nodes[0] = ctx->scratch_eb;
+ path->slots[0] = slot;
+ /*
+ * Add extra ref to scratch eb so that it is not freed when callers
+ * release the path, so we can reuse it later if needed.
+ */
+ refcount_inc(&ctx->scratch_eb->refs);
+
+ return 0;
+}
+
static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
struct btrfs_path *path,
struct btrfs_path *dst_path,
- int key_type,
- struct btrfs_log_ctx *ctx)
+ struct btrfs_log_ctx *ctx,
+ u64 *last_old_dentry_offset)
{
struct btrfs_root *log = inode->root->log_root;
- struct extent_buffer *src = path->nodes[0];
- const int nritems = btrfs_header_nritems(src);
+ struct extent_buffer *src;
+ const int nritems = btrfs_header_nritems(path->nodes[0]);
const u64 ino = btrfs_ino(inode);
- const bool inode_logged_before = inode_logged(trans, inode);
- u64 last_logged_key_offset;
bool last_found = false;
int batch_start = 0;
int batch_size = 0;
- int i;
+ int ret;
- if (key_type == BTRFS_DIR_ITEM_KEY)
- last_logged_key_offset = inode->last_dir_item_offset;
- else
- last_logged_key_offset = inode->last_dir_index_offset;
+ /*
+ * We need to clone the leaf, release the read lock on it, and use the
+ * clone before modifying the log tree. See the comment at copy_items()
+ * about why we need to do this.
+ */
+ ret = clone_leaf(path, ctx);
+ if (ret < 0)
+ return ret;
- for (i = path->slots[0]; i < nritems; i++) {
+ src = path->nodes[0];
+
+ for (int i = path->slots[0]; i < nritems; i++) {
+ struct btrfs_dir_item *di;
struct btrfs_key key;
- int ret;
btrfs_item_key_to_cpu(src, &key, i);
- if (key.objectid != ino || key.type != key_type) {
+ if (key.objectid != ino || key.type != BTRFS_DIR_INDEX_KEY) {
last_found = true;
break;
}
- ctx->last_dir_item_offset = key.offset;
+ di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
+
+ /*
+ * Skip ranges of items that consist only of dir item keys created
+ * in past transactions. However if we find a gap, we must log a
+ * dir index range item for that gap, so that index keys in that
+ * gap are deleted during log replay.
+ */
+ if (btrfs_dir_transid(src, di) < trans->transid) {
+ if (key.offset > *last_old_dentry_offset + 1) {
+ ret = insert_dir_log_key(trans, log, dst_path,
+ ino, *last_old_dentry_offset + 1,
+ key.offset - 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ *last_old_dentry_offset = key.offset;
+ continue;
+ }
+
+ /* If we logged this dir index item before, we can skip it. */
+ if (key.offset <= inode->last_dir_index_offset)
+ continue;
+
/*
* We must make sure that when we log a directory entry, the
* corresponding inode, after log replay, has a matching link
@@ -3784,69 +4213,20 @@ static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
* resulting in -ENOTEMPTY errors.
*/
if (!ctx->log_new_dentries) {
- struct btrfs_dir_item *di;
struct btrfs_key di_key;
- di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
btrfs_dir_item_key_to_cpu(src, di, &di_key);
- if ((btrfs_dir_transid(src, di) == trans->transid ||
- btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
- di_key.type != BTRFS_ROOT_ITEM_KEY)
+ if (di_key.type != BTRFS_ROOT_ITEM_KEY)
ctx->log_new_dentries = true;
}
- if (!inode_logged_before)
- goto add_to_batch;
-
- /*
- * If we were logged before and have logged dir items, we can skip
- * checking if any item with a key offset larger than the last one
- * we logged is in the log tree, saving time and avoiding adding
- * contention on the log tree.
- */
- if (key.offset > last_logged_key_offset)
- goto add_to_batch;
- /*
- * Check if the key was already logged before. If not we can add
- * it to a batch for bulk insertion.
- */
- ret = btrfs_search_slot(NULL, log, &key, dst_path, 0, 0);
- if (ret < 0) {
- return ret;
- } else if (ret > 0) {
- btrfs_release_path(dst_path);
- goto add_to_batch;
- }
-
- /*
- * Item exists in the log. Overwrite the item in the log if it
- * has different content or do nothing if it has exactly the same
- * content. And then flush the current batch if any - do it after
- * overwriting the current item, or we would deadlock otherwise,
- * since we are holding a path for the existing item.
- */
- ret = do_overwrite_item(trans, log, dst_path, src, i, &key);
- if (ret < 0)
- return ret;
-
- if (batch_size > 0) {
- ret = flush_dir_items_batch(trans, log, src, dst_path,
- batch_start, batch_size);
- if (ret < 0)
- return ret;
- batch_size = 0;
- }
- continue;
-add_to_batch:
if (batch_size == 0)
batch_start = i;
batch_size++;
}
if (batch_size > 0) {
- int ret;
-
- ret = flush_dir_items_batch(trans, log, src, dst_path,
+ ret = flush_dir_items_batch(trans, inode, src, dst_path,
batch_start, batch_size);
if (ret < 0)
return ret;
@@ -3863,21 +4243,20 @@ add_to_batch:
static noinline int log_dir_items(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
struct btrfs_path *path,
- struct btrfs_path *dst_path, int key_type,
+ struct btrfs_path *dst_path,
struct btrfs_log_ctx *ctx,
u64 min_offset, u64 *last_offset_ret)
{
struct btrfs_key min_key;
struct btrfs_root *root = inode->root;
struct btrfs_root *log = root->log_root;
- int err = 0;
int ret;
- u64 first_offset = min_offset;
+ u64 last_old_dentry_offset = min_offset - 1;
u64 last_offset = (u64)-1;
u64 ino = btrfs_ino(inode);
min_key.objectid = ino;
- min_key.type = key_type;
+ min_key.type = BTRFS_DIR_INDEX_KEY;
min_key.offset = min_offset;
ret = btrfs_search_forward(root, &min_key, path, trans->transid);
@@ -3886,9 +4265,10 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
* we didn't find anything from this transaction, see if there
* is anything at all
*/
- if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
+ if (ret != 0 || min_key.objectid != ino ||
+ min_key.type != BTRFS_DIR_INDEX_KEY) {
min_key.objectid = ino;
- min_key.type = key_type;
+ min_key.type = BTRFS_DIR_INDEX_KEY;
min_key.offset = (u64)-1;
btrfs_release_path(path);
ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
@@ -3896,7 +4276,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
return ret;
}
- ret = btrfs_previous_item(root, path, ino, key_type);
+ ret = btrfs_previous_item(root, path, ino, BTRFS_DIR_INDEX_KEY);
/* if ret == 0 there are items for this type,
* create a range to tell us the last key of this type.
@@ -3905,43 +4285,65 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
*/
if (ret == 0) {
struct btrfs_key tmp;
+
btrfs_item_key_to_cpu(path->nodes[0], &tmp,
path->slots[0]);
- if (key_type == tmp.type)
- first_offset = max(min_offset, tmp.offset) + 1;
+ if (tmp.type == BTRFS_DIR_INDEX_KEY)
+ last_old_dentry_offset = tmp.offset;
+ } else if (ret > 0) {
+ ret = 0;
}
+
goto done;
}
/* go backward to find any previous key */
- ret = btrfs_previous_item(root, path, ino, key_type);
+ ret = btrfs_previous_item(root, path, ino, BTRFS_DIR_INDEX_KEY);
if (ret == 0) {
struct btrfs_key tmp;
+
btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
- if (key_type == tmp.type) {
- first_offset = tmp.offset;
- ret = overwrite_item(trans, log, dst_path,
- path->nodes[0], path->slots[0],
- &tmp);
- if (ret) {
- err = ret;
- goto done;
- }
- }
+ /*
+ * The dir index key before the first one we found that needs to
+ * be logged might be in a previous leaf, and there might be a
+ * gap between these keys, meaning that we had deletions that
+ * happened. So the key range item we log (key type
+ * BTRFS_DIR_LOG_INDEX_KEY) must cover a range that starts at the
+ * previous key's offset plus 1, so that those deletes are replayed.
+ */
+ if (tmp.type == BTRFS_DIR_INDEX_KEY)
+ last_old_dentry_offset = tmp.offset;
+ } else if (ret < 0) {
+ goto done;
}
+
btrfs_release_path(path);
/*
- * Find the first key from this transaction again. See the note for
- * log_new_dir_dentries, if we're logging a directory recursively we
- * won't be holding its i_mutex, which means we can modify the directory
- * while we're logging it. If we remove an entry between our first
- * search and this search we'll not find the key again and can just
- * bail.
+ * Find the first key from this transaction again or the one we were at
+ * in the loop below in case we had to reschedule. We may be logging the
+ * directory without holding its VFS lock, which happen when logging new
+ * dentries (through log_new_dir_dentries()) or in some cases when we
+ * need to log the parent directory of an inode. This means a dir index
+ * key might be deleted from the inode's root, and therefore we may not
+ * find it anymore. If we can't find it, just move to the next key. We
+ * can not bail out and ignore, because if we do that we will simply
+ * not log dir index keys that come after the one that was just deleted
+ * and we can end up logging a dir index range that ends at (u64)-1
+ * (@last_offset is initialized to that), resulting in removing dir
+ * entries we should not remove at log replay time.
*/
search:
ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
- if (ret != 0)
+ if (ret > 0) {
+ ret = btrfs_next_item(root, path);
+ if (ret > 0) {
+ /* There are no more keys in the inode's root. */
+ ret = 0;
+ goto done;
+ }
+ }
+ if (ret < 0)
goto done;
/*
@@ -3949,11 +4351,11 @@ search:
* from our directory
*/
while (1) {
- ret = process_dir_items_leaf(trans, inode, path, dst_path,
- key_type, ctx);
+ ret = process_dir_items_leaf(trans, inode, path, dst_path, ctx,
+ &last_old_dentry_offset);
if (ret != 0) {
- if (ret < 0)
- err = ret;
+ if (ret > 0)
+ ret = 0;
goto done;
}
path->slots[0] = btrfs_header_nritems(path->nodes[0]);
@@ -3964,25 +4366,28 @@ search:
*/
ret = btrfs_next_leaf(root, path);
if (ret) {
- if (ret == 1)
+ if (ret == 1) {
last_offset = (u64)-1;
- else
- err = ret;
+ ret = 0;
+ }
goto done;
}
btrfs_item_key_to_cpu(path->nodes[0], &min_key, path->slots[0]);
- if (min_key.objectid != ino || min_key.type != key_type) {
+ if (min_key.objectid != ino || min_key.type != BTRFS_DIR_INDEX_KEY) {
last_offset = (u64)-1;
goto done;
}
if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
- ret = overwrite_item(trans, log, dst_path,
- path->nodes[0], path->slots[0],
- &min_key);
- if (ret)
- err = ret;
- else
- last_offset = min_key.offset;
+ /*
+ * The next leaf was not changed in the current transaction
+ * and has at least one dir index key.
+ * We check for the next key because there might have been
+ * one or more deletions between the last key we logged and
+ * that next key. So the key range item we log (key type
+ * BTRFS_DIR_LOG_INDEX_KEY) must end at the next key's
+ * offset minus 1, so that those deletes are replayed.
+ */
+ last_offset = min_key.offset - 1;
goto done;
}
if (need_resched()) {
@@ -3995,18 +4400,91 @@ done:
btrfs_release_path(path);
btrfs_release_path(dst_path);
- if (err == 0) {
+ if (ret == 0) {
*last_offset_ret = last_offset;
/*
- * insert the log range keys to indicate where the log
- * is valid
+ * In case the leaf was changed in the current transaction but
+ * all its dir items are from a past transaction, the last item
+ * in the leaf is a dir item and there's no gap between that last
+ * dir item and the first one on the next leaf (which did not
+ * change in the current transaction), then we don't need to log
+ * a range, last_old_dentry_offset is == to last_offset.
*/
- ret = insert_dir_log_key(trans, log, path, key_type,
- ino, first_offset, last_offset);
- if (ret)
- err = ret;
+ ASSERT(last_old_dentry_offset <= last_offset,
+ "last_old_dentry_offset=%llu last_offset=%llu",
+ last_old_dentry_offset, last_offset);
+ if (last_old_dentry_offset < last_offset)
+ ret = insert_dir_log_key(trans, log, path, ino,
+ last_old_dentry_offset + 1,
+ last_offset);
+ }
+
+ return ret;
+}
+
+/*
+ * If the inode was logged before and it was evicted, then its
+ * last_dir_index_offset is 0, so we don't know the value of the last index
+ * key offset. If that's the case, search for it and update the inode. This
+ * is to avoid lookups in the log tree every time we try to insert a dir index
+ * key from a leaf changed in the current transaction, and to allow us to always
+ * do batch insertions of dir index keys.
+ */
+static int update_last_dir_index_offset(struct btrfs_inode *inode,
+ struct btrfs_path *path,
+ const struct btrfs_log_ctx *ctx)
+{
+ const u64 ino = btrfs_ino(inode);
+ struct btrfs_key key;
+ int ret;
+
+ lockdep_assert_held(&inode->log_mutex);
+
+ if (inode->last_dir_index_offset != 0)
+ return 0;
+
+ if (!ctx->logged_before) {
+ inode->last_dir_index_offset = BTRFS_DIR_START_INDEX - 1;
+ return 0;
}
- return err;
+
+ key.objectid = ino;
+ key.type = BTRFS_DIR_INDEX_KEY;
+ key.offset = (u64)-1;
+
+ ret = btrfs_search_slot(NULL, inode->root->log_root, &key, path, 0, 0);
+ /*
+ * An error happened or we actually have an index key with an offset
+ * value of (u64)-1. Bail out, we're done.
+ */
+ if (ret <= 0)
+ goto out;
+
+ ret = 0;
+ inode->last_dir_index_offset = BTRFS_DIR_START_INDEX - 1;
+
+ /*
+ * No dir index items, bail out and leave last_dir_index_offset with
+ * the value right before the first valid index value.
+ */
+ if (path->slots[0] == 0)
+ goto out;
+
+ /*
+ * btrfs_search_slot() left us at one slot beyond the slot with the last
+ * index key, or beyond the last key of the directory that is not an
+ * index key. If we have an index key before, set last_dir_index_offset
+ * to its offset value, otherwise leave it with a value right before the
+ * first valid index value, as it means we have an empty directory.
+ */
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
+ if (key.objectid == ino && key.type == BTRFS_DIR_INDEX_KEY)
+ inode->last_dir_index_offset = key.offset;
+
+out:
+ btrfs_release_path(path);
+
+ return ret;
}
/*
@@ -4030,35 +4508,16 @@ static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
u64 min_key;
u64 max_key;
int ret;
- int key_type = BTRFS_DIR_ITEM_KEY;
- /*
- * If this is the first time we are being logged in the current
- * transaction, or we were logged before but the inode was evicted and
- * reloaded later, in which case its logged_trans is 0, reset the values
- * of the last logged key offsets. Note that we don't use the helper
- * function inode_logged() here - that is because the function returns
- * true after an inode eviction, assuming the worst case as it can not
- * know for sure if the inode was logged before. So we can not skip key
- * searches in the case the inode was evicted, because it may not have
- * been logged in this transaction and may have been logged in a past
- * transaction, so we need to reset the last dir item and index offsets
- * to (u64)-1.
- */
- if (inode->logged_trans != trans->transid) {
- inode->last_dir_item_offset = (u64)-1;
- inode->last_dir_index_offset = (u64)-1;
- }
-again:
- min_key = 0;
+ ret = update_last_dir_index_offset(inode, path, ctx);
+ if (ret)
+ return ret;
+
+ min_key = BTRFS_DIR_START_INDEX;
max_key = 0;
- if (key_type == BTRFS_DIR_ITEM_KEY)
- ctx->last_dir_item_offset = inode->last_dir_item_offset;
- else
- ctx->last_dir_item_offset = inode->last_dir_index_offset;
while (1) {
- ret = log_dir_items(trans, inode, path, dst_path, key_type,
+ ret = log_dir_items(trans, inode, path, dst_path,
ctx, min_key, &max_key);
if (ret)
return ret;
@@ -4067,13 +4526,6 @@ again:
min_key = max_key + 1;
}
- if (key_type == BTRFS_DIR_ITEM_KEY) {
- inode->last_dir_item_offset = ctx->last_dir_item_offset;
- key_type = BTRFS_DIR_INDEX_KEY;
- goto again;
- } else {
- inode->last_dir_index_offset = ctx->last_dir_item_offset;
- }
return 0;
}
@@ -4094,23 +4546,20 @@ static int drop_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_key found_key;
int start_slot;
- if (!inode_logged(trans, inode))
- return 0;
-
key.objectid = btrfs_ino(inode);
key.type = max_key_type;
key.offset = (u64)-1;
while (1) {
ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
- BUG_ON(ret == 0); /* Logic error */
- if (ret < 0)
- break;
-
- if (path->slots[0] == 0)
+ if (ret < 0) {
break;
+ } else if (ret > 0) {
+ if (path->slots[0] == 0)
+ break;
+ path->slots[0]--;
+ }
- path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
@@ -4119,7 +4568,7 @@ static int drop_inode_items(struct btrfs_trans_handle *trans,
found_key.offset = 0;
found_key.type = 0;
- ret = btrfs_bin_search(path->nodes[0], &found_key, &start_slot);
+ ret = btrfs_bin_search(path->nodes[0], 0, &found_key, &start_slot);
if (ret < 0)
break;
@@ -4144,60 +4593,53 @@ static int truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
u64 new_size, u32 min_type)
{
- int ret;
-
- do {
- ret = btrfs_truncate_inode_items(trans, log_root, inode,
- new_size, min_type, NULL);
- } while (ret == -EAGAIN);
+ struct btrfs_truncate_control control = {
+ .new_size = new_size,
+ .ino = btrfs_ino(inode),
+ .min_type = min_type,
+ .skip_ref_updates = true,
+ };
- return ret;
+ return btrfs_truncate_inode_items(trans, log_root, &control);
}
static void fill_inode_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf,
struct btrfs_inode_item *item,
- struct inode *inode, int log_inode_only,
+ struct inode *inode, bool log_inode_only,
u64 logged_isize)
{
- struct btrfs_map_token token;
u64 flags;
- btrfs_init_map_token(&token, leaf);
-
if (log_inode_only) {
/* set the generation to zero so the recover code
* can tell the difference between an logging
* just to say 'this inode exists' and a logging
* to say 'update this inode with these values'
*/
- btrfs_set_token_inode_generation(&token, item, 0);
- btrfs_set_token_inode_size(&token, item, logged_isize);
+ btrfs_set_inode_generation(leaf, item, 0);
+ btrfs_set_inode_size(leaf, item, logged_isize);
} else {
- btrfs_set_token_inode_generation(&token, item,
- BTRFS_I(inode)->generation);
- btrfs_set_token_inode_size(&token, item, inode->i_size);
+ btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
+ btrfs_set_inode_size(leaf, item, inode->i_size);
}
- btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
- btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
- btrfs_set_token_inode_mode(&token, item, inode->i_mode);
- btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
+ btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
+ btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
+ btrfs_set_inode_mode(leaf, item, inode->i_mode);
+ btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
+
+ btrfs_set_timespec_sec(leaf, &item->atime, inode_get_atime_sec(inode));
+ btrfs_set_timespec_nsec(leaf, &item->atime, inode_get_atime_nsec(inode));
- btrfs_set_token_timespec_sec(&token, &item->atime,
- inode->i_atime.tv_sec);
- btrfs_set_token_timespec_nsec(&token, &item->atime,
- inode->i_atime.tv_nsec);
+ btrfs_set_timespec_sec(leaf, &item->mtime, inode_get_mtime_sec(inode));
+ btrfs_set_timespec_nsec(leaf, &item->mtime, inode_get_mtime_nsec(inode));
- btrfs_set_token_timespec_sec(&token, &item->mtime,
- inode->i_mtime.tv_sec);
- btrfs_set_token_timespec_nsec(&token, &item->mtime,
- inode->i_mtime.tv_nsec);
+ btrfs_set_timespec_sec(leaf, &item->ctime, inode_get_ctime_sec(inode));
+ btrfs_set_timespec_nsec(leaf, &item->ctime, inode_get_ctime_nsec(inode));
- btrfs_set_token_timespec_sec(&token, &item->ctime,
- inode->i_ctime.tv_sec);
- btrfs_set_token_timespec_nsec(&token, &item->ctime,
- inode->i_ctime.tv_nsec);
+ btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec);
+ btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec);
/*
* We do not need to set the nbytes field, in fact during a fast fsync
@@ -4208,13 +4650,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
* inode item in subvolume tree as needed (see overwrite_item()).
*/
- btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
- btrfs_set_token_inode_transid(&token, item, trans->transid);
- btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
+ btrfs_set_inode_sequence(leaf, item, inode_peek_iversion(inode));
+ btrfs_set_inode_transid(leaf, item, trans->transid);
+ btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
BTRFS_I(inode)->ro_flags);
- btrfs_set_token_inode_flags(&token, item, flags);
- btrfs_set_token_inode_block_group(&token, item, 0);
+ btrfs_set_inode_flags(leaf, item, flags);
+ btrfs_set_inode_block_group(leaf, item, 0);
}
static int log_inode_item(struct btrfs_trans_handle *trans,
@@ -4222,8 +4664,10 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, bool inode_item_dropped)
{
struct btrfs_inode_item *inode_item;
+ struct btrfs_key key;
int ret;
+ btrfs_get_inode_key(inode, &key);
/*
* If we are doing a fast fsync and the inode was logged before in the
* current transaction, then we know the inode was previously logged and
@@ -4235,7 +4679,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
* already exists can also result in unnecessarily splitting a leaf.
*/
if (!inode_item_dropped && inode->logged_trans == trans->transid) {
- ret = btrfs_search_slot(trans, log, &inode->location, path, 0, 1);
+ ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
ASSERT(ret <= 0);
if (ret > 0)
ret = -ENOENT;
@@ -4249,7 +4693,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
* the inode, we set BTRFS_INODE_NEEDS_FULL_SYNC on its runtime
* flags and set ->logged_trans to 0.
*/
- ret = btrfs_insert_empty_item(trans, log, path, &inode->location,
+ ret = btrfs_insert_empty_item(trans, log, path, &key,
sizeof(*inode_item));
ASSERT(ret != -EEXIST);
}
@@ -4258,7 +4702,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
- 0, 0);
+ false, 0);
btrfs_release_path(path);
return 0;
}
@@ -4268,7 +4712,7 @@ static int log_csums(struct btrfs_trans_handle *trans,
struct btrfs_root *log_root,
struct btrfs_ordered_sum *sums)
{
- const u64 lock_end = sums->bytenr + sums->len - 1;
+ const u64 lock_end = sums->logical + sums->len - 1;
struct extent_state *cached_state = NULL;
int ret;
@@ -4286,8 +4730,8 @@ static int log_csums(struct btrfs_trans_handle *trans,
* file which happens to refer to the same extent as well. Such races
* can leave checksum items in the log with overlapping ranges.
*/
- ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr,
- lock_end, &cached_state);
+ ret = btrfs_lock_extent(&log_root->log_csum_range, sums->logical, lock_end,
+ &cached_state);
if (ret)
return ret;
/*
@@ -4299,12 +4743,12 @@ static int log_csums(struct btrfs_trans_handle *trans,
* some checksums missing in the fs/subvolume tree. So just delete (or
* trim and adjust) any existing csum items in the log for this range.
*/
- ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
+ ret = btrfs_del_csums(trans, log_root, sums->logical, sums->len);
if (!ret)
ret = btrfs_csum_file_blocks(trans, log_root, sums);
- unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end,
- &cached_state);
+ btrfs_unlock_extent(&log_root->log_csum_range, sums->logical, lock_end,
+ &cached_state);
return ret;
}
@@ -4314,28 +4758,55 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
struct btrfs_path *dst_path,
struct btrfs_path *src_path,
int start_slot, int nr, int inode_only,
- u64 logged_isize)
+ u64 logged_isize, struct btrfs_log_ctx *ctx)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- unsigned long src_offset;
- unsigned long dst_offset;
struct btrfs_root *log = inode->root->log_root;
struct btrfs_file_extent_item *extent;
- struct btrfs_inode_item *inode_item;
- struct extent_buffer *src = src_path->nodes[0];
+ struct extent_buffer *src;
int ret;
struct btrfs_key *ins_keys;
u32 *ins_sizes;
struct btrfs_item_batch batch;
- char *ins_data;
- int i;
- struct list_head ordered_sums;
- int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
+ char AUTO_KFREE(ins_data);
+ int dst_index;
+ const bool skip_csum = (inode->flags & BTRFS_INODE_NODATASUM);
+ const u64 i_size = i_size_read(&inode->vfs_inode);
- INIT_LIST_HEAD(&ordered_sums);
+ /*
+ * To keep lockdep happy and avoid deadlocks, clone the source leaf and
+ * use the clone. This is because otherwise we would be changing the log
+ * tree, to insert items from the subvolume tree or insert csum items,
+ * while holding a read lock on a leaf from the subvolume tree, which
+ * creates a nasty lock dependency when COWing log tree nodes/leaves:
+ *
+ * 1) Modifying the log tree triggers an extent buffer allocation while
+ * holding a write lock on a parent extent buffer from the log tree.
+ * Allocating the pages for an extent buffer, or the extent buffer
+ * struct, can trigger inode eviction and finally the inode eviction
+ * will trigger a release/remove of a delayed node, which requires
+ * taking the delayed node's mutex;
+ *
+ * 2) Allocating a metadata extent for a log tree can trigger the async
+ * reclaim thread and make us wait for it to release enough space and
+ * unblock our reservation ticket. The reclaim thread can start
+ * flushing delayed items, and that in turn results in the need to
+ * lock delayed node mutexes and in the need to write lock extent
+ * buffers of a subvolume tree - all this while holding a write lock
+ * on the parent extent buffer in the log tree.
+ *
+ * So one task in scenario 1) running in parallel with another task in
+ * scenario 2) could lead to a deadlock, one wanting to lock a delayed
+ * node mutex while having a read lock on a leaf from the subvolume,
+ * while the other is holding the delayed node's mutex and wants to
+ * write lock the same subvolume leaf for flushing delayed items.
+ */
+ ret = clone_leaf(src_path, ctx);
+ if (ret < 0)
+ return ret;
- ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
- nr * sizeof(u32), GFP_NOFS);
+ src = src_path->nodes[0];
+
+ ins_data = kmalloc_array(nr, sizeof(struct btrfs_key) + sizeof(u32), GFP_NOFS);
if (!ins_data)
return -ENOMEM;
@@ -4344,28 +4815,153 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
batch.keys = ins_keys;
batch.data_sizes = ins_sizes;
batch.total_data_size = 0;
- batch.nr = nr;
+ batch.nr = 0;
+
+ dst_index = 0;
+ for (int i = 0; i < nr; i++) {
+ const int src_slot = start_slot + i;
+ struct btrfs_root *csum_root;
+ struct btrfs_ordered_sum *sums;
+ struct btrfs_ordered_sum *sums_next;
+ LIST_HEAD(ordered_sums);
+ u64 disk_bytenr;
+ u64 disk_num_bytes;
+ u64 extent_offset;
+ u64 extent_num_bytes;
+ bool is_old_extent;
+
+ btrfs_item_key_to_cpu(src, &ins_keys[dst_index], src_slot);
+
+ if (ins_keys[dst_index].type != BTRFS_EXTENT_DATA_KEY)
+ goto add_to_batch;
+
+ extent = btrfs_item_ptr(src, src_slot,
+ struct btrfs_file_extent_item);
+
+ is_old_extent = (btrfs_file_extent_generation(src, extent) <
+ trans->transid);
+
+ /*
+ * Don't copy extents from past generations. That would make us
+ * log a lot more metadata for common cases like doing only a
+ * few random writes into a file and then fsync it for the first
+ * time or after the full sync flag is set on the inode. We can
+ * get leaves full of extent items, most of which are from past
+ * generations, so we can skip them - as long as the inode has
+ * not been the target of a reflink operation in this transaction,
+ * as in that case it might have had file extent items with old
+ * generations copied into it. We also must always log prealloc
+ * extents that start at or beyond eof, otherwise we would lose
+ * them on log replay.
+ */
+ if (is_old_extent &&
+ ins_keys[dst_index].offset < i_size &&
+ inode->last_reflink_trans < trans->transid)
+ continue;
+
+ if (skip_csum)
+ goto add_to_batch;
+
+ /* Only regular extents have checksums. */
+ if (btrfs_file_extent_type(src, extent) != BTRFS_FILE_EXTENT_REG)
+ goto add_to_batch;
+
+ /*
+ * If it's an extent created in a past transaction, then its
+ * checksums are already accessible from the committed csum tree,
+ * no need to log them.
+ */
+ if (is_old_extent)
+ goto add_to_batch;
+
+ disk_bytenr = btrfs_file_extent_disk_bytenr(src, extent);
+ /* If it's an explicit hole, there are no checksums. */
+ if (disk_bytenr == 0)
+ goto add_to_batch;
+
+ disk_num_bytes = btrfs_file_extent_disk_num_bytes(src, extent);
+
+ if (btrfs_file_extent_compression(src, extent)) {
+ extent_offset = 0;
+ extent_num_bytes = disk_num_bytes;
+ } else {
+ extent_offset = btrfs_file_extent_offset(src, extent);
+ extent_num_bytes = btrfs_file_extent_num_bytes(src, extent);
+ }
+
+ csum_root = btrfs_csum_root(trans->fs_info, disk_bytenr);
+ disk_bytenr += extent_offset;
+ ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
+ disk_bytenr + extent_num_bytes - 1,
+ &ordered_sums, false);
+ if (ret < 0)
+ return ret;
+ ret = 0;
- for (i = 0; i < nr; i++) {
- ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
- batch.total_data_size += ins_sizes[i];
- btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
+ list_for_each_entry_safe(sums, sums_next, &ordered_sums, list) {
+ if (!ret)
+ ret = log_csums(trans, inode, log, sums);
+ list_del(&sums->list);
+ kfree(sums);
+ }
+ if (ret)
+ return ret;
+
+add_to_batch:
+ ins_sizes[dst_index] = btrfs_item_size(src, src_slot);
+ batch.total_data_size += ins_sizes[dst_index];
+ batch.nr++;
+ dst_index++;
}
+
+ /*
+ * We have a leaf full of old extent items that don't need to be logged,
+ * so we don't need to do anything.
+ */
+ if (batch.nr == 0)
+ return 0;
+
ret = btrfs_insert_empty_items(trans, log, dst_path, &batch);
- if (ret) {
- kfree(ins_data);
+ if (ret)
return ret;
- }
- for (i = 0; i < nr; i++, dst_path->slots[0]++) {
- dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
- dst_path->slots[0]);
+ dst_index = 0;
+ for (int i = 0; i < nr; i++) {
+ const int src_slot = start_slot + i;
+ const int dst_slot = dst_path->slots[0] + dst_index;
+ struct btrfs_key key;
+ unsigned long src_offset;
+ unsigned long dst_offset;
- src_offset = btrfs_item_ptr_offset(src, start_slot + i);
+ /*
+ * We're done, all the remaining items in the source leaf
+ * correspond to old file extent items.
+ */
+ if (dst_index >= batch.nr)
+ break;
+
+ btrfs_item_key_to_cpu(src, &key, src_slot);
+
+ if (key.type != BTRFS_EXTENT_DATA_KEY)
+ goto copy_item;
+
+ extent = btrfs_item_ptr(src, src_slot,
+ struct btrfs_file_extent_item);
- if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
- inode_item = btrfs_item_ptr(dst_path->nodes[0],
- dst_path->slots[0],
+ /* See the comment in the previous loop, same logic. */
+ if (btrfs_file_extent_generation(src, extent) < trans->transid &&
+ key.offset < i_size &&
+ inode->last_reflink_trans < trans->transid)
+ continue;
+
+copy_item:
+ dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], dst_slot);
+ src_offset = btrfs_item_ptr_offset(src, src_slot);
+
+ if (key.type == BTRFS_INODE_ITEM_KEY) {
+ struct btrfs_inode_item *inode_item;
+
+ inode_item = btrfs_item_ptr(dst_path->nodes[0], dst_slot,
struct btrfs_inode_item);
fill_inode_item(trans, dst_path->nodes[0], inode_item,
&inode->vfs_inode,
@@ -4373,69 +4969,13 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
logged_isize);
} else {
copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
- src_offset, ins_sizes[i]);
+ src_offset, ins_sizes[dst_index]);
}
- /* take a reference on file data extents so that truncates
- * or deletes of this inode don't have to relog the inode
- * again
- */
- if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
- !skip_csum) {
- int found_type;
- extent = btrfs_item_ptr(src, start_slot + i,
- struct btrfs_file_extent_item);
-
- if (btrfs_file_extent_generation(src, extent) < trans->transid)
- continue;
-
- found_type = btrfs_file_extent_type(src, extent);
- if (found_type == BTRFS_FILE_EXTENT_REG) {
- u64 ds, dl, cs, cl;
- ds = btrfs_file_extent_disk_bytenr(src,
- extent);
- /* ds == 0 is a hole */
- if (ds == 0)
- continue;
-
- dl = btrfs_file_extent_disk_num_bytes(src,
- extent);
- cs = btrfs_file_extent_offset(src, extent);
- cl = btrfs_file_extent_num_bytes(src,
- extent);
- if (btrfs_file_extent_compression(src,
- extent)) {
- cs = 0;
- cl = dl;
- }
-
- ret = btrfs_lookup_csums_range(
- fs_info->csum_root,
- ds + cs, ds + cs + cl - 1,
- &ordered_sums, 0);
- if (ret)
- break;
- }
- }
+ dst_index++;
}
- btrfs_mark_buffer_dirty(dst_path->nodes[0]);
btrfs_release_path(dst_path);
- kfree(ins_data);
-
- /*
- * we have to do this after the loop above to avoid changing the
- * log tree while trying to change the log tree.
- */
- while (!list_empty(&ordered_sums)) {
- struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
- struct btrfs_ordered_sum,
- list);
- if (!ret)
- ret = log_csums(trans, inode, log, sums);
- list_del(&sums->list);
- kfree(sums);
- }
return ret;
}
@@ -4462,16 +5002,18 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
struct btrfs_log_ctx *ctx)
{
struct btrfs_ordered_extent *ordered;
+ struct btrfs_root *csum_root;
+ u64 block_start;
u64 csum_offset;
u64 csum_len;
- u64 mod_start = em->mod_start;
- u64 mod_len = em->mod_len;
+ u64 mod_start = em->start;
+ u64 mod_len = em->len;
LIST_HEAD(ordered_sums);
int ret = 0;
if (inode->flags & BTRFS_INODE_NODATASUM ||
- test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
- em->block_start == EXTENT_MAP_HOLE)
+ (em->flags & EXTENT_FLAG_PREALLOC) ||
+ em->disk_bytenr == EXTENT_MAP_HOLE)
return 0;
list_for_each_entry(ordered, &ctx->ordered_extents, log_list) {
@@ -4533,26 +5075,28 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
return 0;
/* If we're compressed we have to save the entire range of csums. */
- if (em->compress_type) {
+ if (btrfs_extent_map_is_compressed(em)) {
csum_offset = 0;
- csum_len = max(em->block_len, em->orig_block_len);
+ csum_len = em->disk_num_bytes;
} else {
csum_offset = mod_start - em->start;
csum_len = mod_len;
}
/* block start is already adjusted for the file extent offset. */
- ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
- em->block_start + csum_offset,
- em->block_start + csum_offset +
- csum_len - 1, &ordered_sums, 0);
- if (ret)
+ block_start = btrfs_extent_map_block_start(em);
+ csum_root = btrfs_csum_root(trans->fs_info, block_start);
+ ret = btrfs_lookup_csums_list(csum_root, block_start + csum_offset,
+ block_start + csum_offset + csum_len - 1,
+ &ordered_sums, false);
+ if (ret < 0)
return ret;
+ ret = 0;
while (!list_empty(&ordered_sums)) {
- struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
- struct btrfs_ordered_sum,
- list);
+ struct btrfs_ordered_sum *sums = list_first_entry(&ordered_sums,
+ struct btrfs_ordered_sum,
+ list);
if (!ret)
ret = log_csums(trans, inode, log_root, sums);
list_del(&sums->list);
@@ -4570,14 +5114,36 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
{
struct btrfs_drop_extents_args drop_args = { 0 };
struct btrfs_root *log = inode->root->log_root;
- struct btrfs_file_extent_item *fi;
+ struct btrfs_file_extent_item fi = { 0 };
struct extent_buffer *leaf;
- struct btrfs_map_token token;
struct btrfs_key key;
- u64 extent_offset = em->start - em->orig_start;
+ enum btrfs_compression_type compress_type;
+ u64 extent_offset = em->offset;
+ u64 block_start = btrfs_extent_map_block_start(em);
u64 block_len;
int ret;
+ btrfs_set_stack_file_extent_generation(&fi, trans->transid);
+ if (em->flags & EXTENT_FLAG_PREALLOC)
+ btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_PREALLOC);
+ else
+ btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_REG);
+
+ block_len = em->disk_num_bytes;
+ compress_type = btrfs_extent_map_compression(em);
+ if (compress_type != BTRFS_COMPRESS_NONE) {
+ btrfs_set_stack_file_extent_disk_bytenr(&fi, block_start);
+ btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len);
+ } else if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
+ btrfs_set_stack_file_extent_disk_bytenr(&fi, block_start - extent_offset);
+ btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len);
+ }
+
+ btrfs_set_stack_file_extent_offset(&fi, extent_offset);
+ btrfs_set_stack_file_extent_num_bytes(&fi, em->len);
+ btrfs_set_stack_file_extent_ram_bytes(&fi, em->ram_bytes);
+ btrfs_set_stack_file_extent_compression(&fi, compress_type);
+
ret = log_extent_csums(trans, inode, log, em, ctx);
if (ret)
return ret;
@@ -4591,12 +5157,12 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
* are small, with a root at level 2 or 3 at most, due to their short
* life span.
*/
- if (inode_logged(trans, inode)) {
+ if (ctx->logged_before) {
drop_args.path = path;
drop_args.start = em->start;
drop_args.end = em->start + em->len;
drop_args.replace_extent = true;
- drop_args.extent_item_size = sizeof(*fi);
+ drop_args.extent_item_size = sizeof(fi);
ret = btrfs_drop_extents(trans, log, inode, &drop_args);
if (ret)
return ret;
@@ -4608,45 +5174,14 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
key.offset = em->start;
ret = btrfs_insert_empty_item(trans, log, path, &key,
- sizeof(*fi));
+ sizeof(fi));
if (ret)
return ret;
}
leaf = path->nodes[0];
- btrfs_init_map_token(&token, leaf);
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
-
- btrfs_set_token_file_extent_generation(&token, fi, trans->transid);
- if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
- btrfs_set_token_file_extent_type(&token, fi,
- BTRFS_FILE_EXTENT_PREALLOC);
- else
- btrfs_set_token_file_extent_type(&token, fi,
- BTRFS_FILE_EXTENT_REG);
-
- block_len = max(em->block_len, em->orig_block_len);
- if (em->compress_type != BTRFS_COMPRESS_NONE) {
- btrfs_set_token_file_extent_disk_bytenr(&token, fi,
- em->block_start);
- btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
- } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
- btrfs_set_token_file_extent_disk_bytenr(&token, fi,
- em->block_start -
- extent_offset);
- btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
- } else {
- btrfs_set_token_file_extent_disk_bytenr(&token, fi, 0);
- btrfs_set_token_file_extent_disk_num_bytes(&token, fi, 0);
- }
-
- btrfs_set_token_file_extent_offset(&token, fi, extent_offset);
- btrfs_set_token_file_extent_num_bytes(&token, fi, em->len);
- btrfs_set_token_file_extent_ram_bytes(&token, fi, em->ram_bytes);
- btrfs_set_token_file_extent_compression(&token, fi, em->compress_type);
- btrfs_set_token_file_extent_encryption(&token, fi, 0);
- btrfs_set_token_file_extent_other_encoding(&token, fi, 0);
- btrfs_mark_buffer_dirty(leaf);
+ write_extent_buffer(leaf, &fi,
+ btrfs_item_ptr_offset(leaf, path->slots[0]),
+ sizeof(fi));
btrfs_release_path(path);
@@ -4655,7 +5190,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
/*
* Log all prealloc extents beyond the inode's i_size to make sure we do not
- * lose them after doing a fast fsync and replaying the log. We scan the
+ * lose them after doing a full/fast fsync and replaying the log. We scan the
* subvolume's root instead of iterating the inode's extent map tree because
* otherwise we can log incorrect extent items based on extent map conversion.
* That can happen due to the fact that extent maps are merged when they
@@ -4663,19 +5198,20 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
*/
static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
- struct btrfs_path *path)
+ struct btrfs_path *path,
+ struct btrfs_log_ctx *ctx)
{
struct btrfs_root *root = inode->root;
struct btrfs_key key;
const u64 i_size = i_size_read(&inode->vfs_inode);
const u64 ino = btrfs_ino(inode);
- struct btrfs_path *dst_path = NULL;
+ BTRFS_PATH_AUTO_FREE(dst_path);
bool dropped_extents = false;
u64 truncate_offset = i_size;
struct extent_buffer *leaf;
int slot;
int ins_nr = 0;
- int start_slot;
+ int start_slot = 0;
int ret;
if (!(inode->flags & BTRFS_INODE_PREALLOC))
@@ -4729,7 +5265,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
if (slot >= btrfs_header_nritems(leaf)) {
if (ins_nr > 0) {
ret = copy_items(trans, inode, dst_path, path,
- start_slot, ins_nr, 1, 0);
+ start_slot, ins_nr, 1, 0, ctx);
if (ret < 0)
goto out;
ins_nr = 0;
@@ -4753,18 +5289,23 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
path->slots[0]++;
continue;
}
- if (!dropped_extents) {
- /*
- * Avoid logging extent items logged in past fsync calls
- * and leading to duplicate keys in the log tree.
- */
+ /*
+ * Avoid overlapping items in the log tree. The first time we
+ * get here, get rid of everything from a past fsync. After
+ * that, if the current extent starts before the end of the last
+ * extent we copied, truncate the last one. This can happen if
+ * an ordered extent completion modifies the subvolume tree
+ * while btrfs_next_leaf() has the tree unlocked.
+ */
+ if (!dropped_extents || key.offset < truncate_offset) {
ret = truncate_inode_items(trans, root->log_root, inode,
- truncate_offset,
+ min(key.offset, truncate_offset),
BTRFS_EXTENT_DATA_KEY);
if (ret)
goto out;
dropped_extents = true;
}
+ truncate_offset = btrfs_file_extent_end(path);
if (ins_nr == 0)
start_slot = slot;
ins_nr++;
@@ -4779,10 +5320,9 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
}
if (ins_nr > 0)
ret = copy_items(trans, inode, dst_path, path,
- start_slot, ins_nr, 1, 0);
+ start_slot, ins_nr, 1, 0, ctx);
out:
btrfs_release_path(path);
- btrfs_free_path(dst_path);
return ret;
}
@@ -4794,13 +5334,11 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_extent *tmp;
struct extent_map *em, *n;
- struct list_head extents;
+ LIST_HEAD(extents);
struct extent_map_tree *tree = &inode->extent_tree;
int ret = 0;
int num = 0;
- INIT_LIST_HEAD(&extents);
-
write_lock(&tree->lock);
list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
@@ -4821,13 +5359,13 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
continue;
/* We log prealloc extents beyond eof later. */
- if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
+ if ((em->flags & EXTENT_FLAG_PREALLOC) &&
em->start >= i_size_read(&inode->vfs_inode))
continue;
/* Need a ref to keep it from getting evicted from cache */
refcount_inc(&em->refs);
- set_bit(EXTENT_FLAG_LOGGING, &em->flags);
+ em->flags |= EXTENT_FLAG_LOGGING;
list_add_tail(&em->list, &extents);
num++;
}
@@ -4835,7 +5373,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
list_sort(NULL, &extents, extent_cmp);
process:
while (!list_empty(&extents)) {
- em = list_entry(extents.next, struct extent_map, list);
+ em = list_first_entry(&extents, struct extent_map, list);
list_del_init(&em->list);
@@ -4844,8 +5382,8 @@ process:
* private list.
*/
if (ret) {
- clear_em_logging(tree, em);
- free_extent_map(em);
+ btrfs_clear_em_logging(inode, em);
+ btrfs_free_extent_map(em);
continue;
}
@@ -4853,15 +5391,14 @@ process:
ret = log_one_extent(trans, inode, em, path, ctx);
write_lock(&tree->lock);
- clear_em_logging(tree, em);
- free_extent_map(em);
+ btrfs_clear_em_logging(inode, em);
+ btrfs_free_extent_map(em);
}
WARN_ON(!list_empty(&extents));
write_unlock(&tree->lock);
- btrfs_release_path(path);
if (!ret)
- ret = btrfs_log_prealloc_extents(trans, inode, path);
+ ret = btrfs_log_prealloc_extents(trans, inode, path, ctx);
if (ret)
return ret;
@@ -4877,12 +5414,12 @@ process:
set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags);
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
- spin_lock_irq(&inode->ordered_tree.lock);
+ spin_lock(&inode->ordered_tree_lock);
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
atomic_inc(&trans->transaction->pending_ordered);
}
- spin_unlock_irq(&inode->ordered_tree.lock);
+ spin_unlock(&inode->ordered_tree_lock);
}
btrfs_put_ordered_extent(ordered);
}
@@ -4942,7 +5479,8 @@ static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
struct btrfs_path *path,
- struct btrfs_path *dst_path)
+ struct btrfs_path *dst_path,
+ struct btrfs_log_ctx *ctx)
{
struct btrfs_root *root = inode->root;
int ret;
@@ -4971,7 +5509,7 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
if (slot >= nritems) {
if (ins_nr > 0) {
ret = copy_items(trans, inode, dst_path, path,
- start_slot, ins_nr, 1, 0);
+ start_slot, ins_nr, 1, 0, ctx);
if (ret < 0)
return ret;
ins_nr = 0;
@@ -4997,7 +5535,7 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
}
if (ins_nr > 0) {
ret = copy_items(trans, inode, dst_path, path,
- start_slot, ins_nr, 1, 0);
+ start_slot, ins_nr, 1, 0, ctx);
if (ret < 0)
return ret;
}
@@ -5068,10 +5606,9 @@ static int btrfs_log_holes(struct btrfs_trans_handle *trans,
* leafs from the log root.
*/
btrfs_release_path(path);
- ret = btrfs_insert_file_extent(trans, root->log_root,
- ino, prev_extent_end, 0,
- 0, hole_len, 0, hole_len,
- 0, 0, 0);
+ ret = btrfs_insert_hole_extent(trans, root->log_root,
+ ino, prev_extent_end,
+ hole_len);
if (ret < 0)
return ret;
@@ -5100,10 +5637,8 @@ static int btrfs_log_holes(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
- ret = btrfs_insert_file_extent(trans, root->log_root,
- ino, prev_extent_end, 0, 0,
- hole_len, 0, hole_len,
- 0, 0, 0);
+ ret = btrfs_insert_hole_extent(trans, root->log_root, ino,
+ prev_extent_end, hole_len);
if (ret < 0)
return ret;
}
@@ -5159,19 +5694,18 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
struct btrfs_inode *inode,
u64 *other_ino, u64 *other_parent)
{
- int ret;
- struct btrfs_path *search_path;
- char *name = NULL;
+ BTRFS_PATH_AUTO_FREE(search_path);
+ char AUTO_KFREE(name);
u32 name_len = 0;
- u32 item_size = btrfs_item_size_nr(eb, slot);
+ u32 item_size = btrfs_item_size(eb, slot);
u32 cur_offset = 0;
unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
search_path = btrfs_alloc_path();
if (!search_path)
return -ENOMEM;
- search_path->search_commit_root = 1;
- search_path->skip_locking = 1;
+ search_path->search_commit_root = true;
+ search_path->skip_locking = true;
while (cur_offset < item_size) {
u64 parent;
@@ -5179,6 +5713,7 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
u32 this_len;
unsigned long name_ptr;
struct btrfs_dir_item *di;
+ struct fscrypt_str name_str;
if (key->type == BTRFS_INODE_REF_KEY) {
struct btrfs_inode_ref *iref;
@@ -5203,17 +5738,18 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
char *new_name;
new_name = krealloc(name, this_name_len, GFP_NOFS);
- if (!new_name) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!new_name)
+ return -ENOMEM;
name_len = this_name_len;
name = new_name;
}
read_extent_buffer(eb, name, name_ptr, this_name_len);
+
+ name_str.name = name;
+ name_str.len = this_name_len;
di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
- parent, name, this_name_len, 0);
+ parent, &name_str, 0);
if (di && !IS_ERR(di)) {
struct btrfs_key di_key;
@@ -5221,28 +5757,243 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
di, &di_key);
if (di_key.type == BTRFS_INODE_ITEM_KEY) {
if (di_key.objectid != key->objectid) {
- ret = 1;
*other_ino = di_key.objectid;
*other_parent = parent;
+ return 1;
} else {
- ret = 0;
+ return 0;
}
} else {
- ret = -EAGAIN;
+ return -EAGAIN;
}
- goto out;
} else if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
+ return PTR_ERR(di);
}
btrfs_release_path(search_path);
cur_offset += this_len;
}
- ret = 0;
+
+ return 0;
+}
+
+/*
+ * Check if we need to log an inode. This is used in contexts where while
+ * logging an inode we need to log another inode (either that it exists or in
+ * full mode). This is used instead of btrfs_inode_in_log() because the later
+ * requires the inode to be in the log and have the log transaction committed,
+ * while here we do not care if the log transaction was already committed - our
+ * caller will commit the log later - and we want to avoid logging an inode
+ * multiple times when multiple tasks have joined the same log transaction.
+ */
+static bool need_log_inode(const struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode)
+{
+ /*
+ * If a directory was not modified, no dentries added or removed, we can
+ * and should avoid logging it.
+ */
+ if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid)
+ return false;
+
+ /*
+ * If this inode does not have new/updated/deleted xattrs since the last
+ * time it was logged and is flagged as logged in the current transaction,
+ * we can skip logging it. As for new/deleted names, those are updated in
+ * the log by link/unlink/rename operations.
+ * In case the inode was logged and then evicted and reloaded, its
+ * logged_trans will be 0, in which case we have to fully log it since
+ * logged_trans is a transient field, not persisted.
+ */
+ if (inode_logged(trans, inode, NULL) == 1 &&
+ !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
+ return false;
+
+ return true;
+}
+
+struct btrfs_dir_list {
+ u64 ino;
+ struct list_head list;
+};
+
+/*
+ * Log the inodes of the new dentries of a directory.
+ * See process_dir_items_leaf() for details about why it is needed.
+ * This is a recursive operation - if an existing dentry corresponds to a
+ * directory, that directory's new entries are logged too (same behaviour as
+ * ext3/4, xfs, f2fs, nilfs2). Note that when logging the inodes
+ * the dentries point to we do not acquire their VFS lock, otherwise lockdep
+ * complains about the following circular lock dependency / possible deadlock:
+ *
+ * CPU0 CPU1
+ * ---- ----
+ * lock(&type->i_mutex_dir_key#3/2);
+ * lock(sb_internal#2);
+ * lock(&type->i_mutex_dir_key#3/2);
+ * lock(&sb->s_type->i_mutex_key#14);
+ *
+ * Where sb_internal is the lock (a counter that works as a lock) acquired by
+ * sb_start_intwrite() in btrfs_start_transaction().
+ * Not acquiring the VFS lock of the inodes is still safe because:
+ *
+ * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
+ * that while logging the inode new references (names) are added or removed
+ * from the inode, leaving the logged inode item with a link count that does
+ * not match the number of logged inode reference items. This is fine because
+ * at log replay time we compute the real number of links and correct the
+ * link count in the inode item (see replay_one_buffer() and
+ * link_to_fixup_dir());
+ *
+ * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
+ * while logging the inode's items new index items (key type
+ * BTRFS_DIR_INDEX_KEY) are added to fs/subvol tree and the logged inode item
+ * has a size that doesn't match the sum of the lengths of all the logged
+ * names - this is ok, not a problem, because at log replay time we set the
+ * directory's i_size to the correct value (see replay_one_name() and
+ * overwrite_item()).
+ */
+static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *start_inode,
+ struct btrfs_log_ctx *ctx)
+{
+ struct btrfs_root *root = start_inode->root;
+ struct btrfs_path *path;
+ LIST_HEAD(dir_list);
+ struct btrfs_dir_list *dir_elem;
+ u64 ino = btrfs_ino(start_inode);
+ struct btrfs_inode *curr_inode = start_inode;
+ int ret = 0;
+
+ /*
+ * If we are logging a new name, as part of a link or rename operation,
+ * don't bother logging new dentries, as we just want to log the names
+ * of an inode and that any new parents exist.
+ */
+ if (ctx->logging_new_name)
+ return 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ /* Pairs with btrfs_add_delayed_iput below. */
+ ihold(&curr_inode->vfs_inode);
+
+ while (true) {
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ u64 next_index;
+ bool continue_curr_inode = true;
+ int iter_ret;
+
+ key.objectid = ino;
+ key.type = BTRFS_DIR_INDEX_KEY;
+ key.offset = btrfs_get_first_dir_index_to_log(curr_inode);
+ next_index = key.offset;
+again:
+ btrfs_for_each_slot(root->log_root, &key, &found_key, path, iter_ret) {
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_dir_item *di;
+ struct btrfs_key di_key;
+ struct btrfs_inode *di_inode;
+ int log_mode = LOG_INODE_EXISTS;
+ int type;
+
+ if (found_key.objectid != ino ||
+ found_key.type != BTRFS_DIR_INDEX_KEY) {
+ continue_curr_inode = false;
+ break;
+ }
+
+ next_index = found_key.offset + 1;
+
+ di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
+ type = btrfs_dir_ftype(leaf, di);
+ if (btrfs_dir_transid(leaf, di) < trans->transid)
+ continue;
+ btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
+ if (di_key.type == BTRFS_ROOT_ITEM_KEY)
+ continue;
+
+ btrfs_release_path(path);
+ di_inode = btrfs_iget_logging(di_key.objectid, root);
+ if (IS_ERR(di_inode)) {
+ ret = PTR_ERR(di_inode);
+ goto out;
+ }
+
+ if (!need_log_inode(trans, di_inode)) {
+ btrfs_add_delayed_iput(di_inode);
+ break;
+ }
+
+ ctx->log_new_dentries = false;
+ if (type == BTRFS_FT_DIR)
+ log_mode = LOG_INODE_ALL;
+ ret = btrfs_log_inode(trans, di_inode, log_mode, ctx);
+ btrfs_add_delayed_iput(di_inode);
+ if (ret)
+ goto out;
+ if (ctx->log_new_dentries) {
+ dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
+ if (!dir_elem) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ dir_elem->ino = di_key.objectid;
+ list_add_tail(&dir_elem->list, &dir_list);
+ }
+ break;
+ }
+
+ btrfs_release_path(path);
+
+ if (iter_ret < 0) {
+ ret = iter_ret;
+ goto out;
+ } else if (iter_ret > 0) {
+ continue_curr_inode = false;
+ } else {
+ key = found_key;
+ }
+
+ if (continue_curr_inode && key.offset < (u64)-1) {
+ key.offset++;
+ goto again;
+ }
+
+ btrfs_set_first_dir_index_to_log(curr_inode, next_index);
+
+ if (list_empty(&dir_list))
+ break;
+
+ dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list, list);
+ ino = dir_elem->ino;
+ list_del(&dir_elem->list);
+ kfree(dir_elem);
+
+ btrfs_add_delayed_iput(curr_inode);
+
+ curr_inode = btrfs_iget_logging(ino, root);
+ if (IS_ERR(curr_inode)) {
+ ret = PTR_ERR(curr_inode);
+ curr_inode = NULL;
+ break;
+ }
+ }
out:
- btrfs_free_path(search_path);
- kfree(name);
+ btrfs_free_path(path);
+ if (curr_inode)
+ btrfs_add_delayed_iput(curr_inode);
+
+ if (ret) {
+ struct btrfs_dir_list *next;
+
+ list_for_each_entry_safe(dir_elem, next, &dir_list, list)
+ kfree(dir_elem);
+ }
+
return ret;
}
@@ -5252,105 +6003,251 @@ struct btrfs_ino_list {
struct list_head list;
};
-static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_log_ctx *ctx,
- u64 ino, u64 parent)
+static void free_conflicting_inodes(struct btrfs_log_ctx *ctx)
+{
+ struct btrfs_ino_list *curr;
+ struct btrfs_ino_list *next;
+
+ list_for_each_entry_safe(curr, next, &ctx->conflict_inodes, list) {
+ list_del(&curr->list);
+ kfree(curr);
+ }
+}
+
+static int conflicting_inode_is_dir(struct btrfs_root *root, u64 ino,
+ struct btrfs_path *path)
+{
+ struct btrfs_key key;
+ int ret;
+
+ key.objectid = ino;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+ path->search_commit_root = true;
+ path->skip_locking = true;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (WARN_ON_ONCE(ret > 0)) {
+ /*
+ * We have previously found the inode through the commit root
+ * so this should not happen. If it does, just error out and
+ * fallback to a transaction commit.
+ */
+ ret = -ENOENT;
+ } else if (ret == 0) {
+ struct btrfs_inode_item *item;
+
+ item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_item);
+ if (S_ISDIR(btrfs_inode_mode(path->nodes[0], item)))
+ ret = 1;
+ }
+
+ btrfs_release_path(path);
+ path->search_commit_root = false;
+ path->skip_locking = false;
+
+ return ret;
+}
+
+static int add_conflicting_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ u64 ino, u64 parent,
+ struct btrfs_log_ctx *ctx)
{
struct btrfs_ino_list *ino_elem;
- LIST_HEAD(inode_list);
- int ret = 0;
+ struct btrfs_inode *inode;
+
+ /*
+ * It's rare to have a lot of conflicting inodes, in practice it is not
+ * common to have more than 1 or 2. We don't want to collect too many,
+ * as we could end up logging too many inodes (even if only in
+ * LOG_INODE_EXISTS mode) and slow down other fsyncs or transaction
+ * commits.
+ */
+ if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES)
+ return BTRFS_LOG_FORCE_COMMIT;
+
+ inode = btrfs_iget_logging(ino, root);
+ /*
+ * If the other inode that had a conflicting dir entry was deleted in
+ * the current transaction then we either:
+ *
+ * 1) Log the parent directory (later after adding it to the list) if
+ * the inode is a directory. This is because it may be a deleted
+ * subvolume/snapshot or it may be a regular directory that had
+ * deleted subvolumes/snapshots (or subdirectories that had them),
+ * and at the moment we can't deal with dropping subvolumes/snapshots
+ * during log replay. So we just log the parent, which will result in
+ * a fallback to a transaction commit if we are dealing with those
+ * cases (last_unlink_trans will match the current transaction);
+ *
+ * 2) Do nothing if it's not a directory. During log replay we simply
+ * unlink the conflicting dentry from the parent directory and then
+ * add the dentry for our inode. Like this we can avoid logging the
+ * parent directory (and maybe fallback to a transaction commit in
+ * case it has a last_unlink_trans == trans->transid, due to moving
+ * some inode from it to some other directory).
+ */
+ if (IS_ERR(inode)) {
+ int ret = PTR_ERR(inode);
+
+ if (ret != -ENOENT)
+ return ret;
+
+ ret = conflicting_inode_is_dir(root, ino, path);
+ /* Not a directory or we got an error. */
+ if (ret <= 0)
+ return ret;
+
+ /* Conflicting inode is a directory, so we'll log its parent. */
+ ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
+ if (!ino_elem)
+ return -ENOMEM;
+ ino_elem->ino = ino;
+ ino_elem->parent = parent;
+ list_add_tail(&ino_elem->list, &ctx->conflict_inodes);
+ ctx->num_conflict_inodes++;
+
+ return 0;
+ }
+
+ /*
+ * If the inode was already logged skip it - otherwise we can hit an
+ * infinite loop. Example:
+ *
+ * From the commit root (previous transaction) we have the following
+ * inodes:
+ *
+ * inode 257 a directory
+ * inode 258 with references "zz" and "zz_link" on inode 257
+ * inode 259 with reference "a" on inode 257
+ *
+ * And in the current (uncommitted) transaction we have:
+ *
+ * inode 257 a directory, unchanged
+ * inode 258 with references "a" and "a2" on inode 257
+ * inode 259 with reference "zz_link" on inode 257
+ * inode 261 with reference "zz" on inode 257
+ *
+ * When logging inode 261 the following infinite loop could
+ * happen if we don't skip already logged inodes:
+ *
+ * - we detect inode 258 as a conflicting inode, with inode 261
+ * on reference "zz", and log it;
+ *
+ * - we detect inode 259 as a conflicting inode, with inode 258
+ * on reference "a", and log it;
+ *
+ * - we detect inode 258 as a conflicting inode, with inode 259
+ * on reference "zz_link", and log it - again! After this we
+ * repeat the above steps forever.
+ *
+ * Here we can use need_log_inode() because we only need to log the
+ * inode in LOG_INODE_EXISTS mode and rename operations update the log,
+ * so that the log ends up with the new name and without the old name.
+ */
+ if (!need_log_inode(trans, inode)) {
+ btrfs_add_delayed_iput(inode);
+ return 0;
+ }
+
+ btrfs_add_delayed_iput(inode);
ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
if (!ino_elem)
return -ENOMEM;
ino_elem->ino = ino;
ino_elem->parent = parent;
- list_add_tail(&ino_elem->list, &inode_list);
+ list_add_tail(&ino_elem->list, &ctx->conflict_inodes);
+ ctx->num_conflict_inodes++;
- while (!list_empty(&inode_list)) {
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_key key;
- struct inode *inode;
-
- ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
- list);
- ino = ino_elem->ino;
- parent = ino_elem->parent;
- list_del(&ino_elem->list);
- kfree(ino_elem);
- if (ret)
- continue;
+ return 0;
+}
- btrfs_release_path(path);
+static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_log_ctx *ctx)
+{
+ int ret = 0;
+
+ /*
+ * Conflicting inodes are logged by the first call to btrfs_log_inode(),
+ * otherwise we could have unbounded recursion of btrfs_log_inode()
+ * calls. This check guarantees we can have only 1 level of recursion.
+ */
+ if (ctx->logging_conflict_inodes)
+ return 0;
+
+ ctx->logging_conflict_inodes = true;
+
+ /*
+ * New conflicting inodes may be found and added to the list while we
+ * are logging a conflicting inode, so keep iterating while the list is
+ * not empty.
+ */
+ while (!list_empty(&ctx->conflict_inodes)) {
+ struct btrfs_ino_list *curr;
+ struct btrfs_inode *inode;
+ u64 ino;
+ u64 parent;
+
+ curr = list_first_entry(&ctx->conflict_inodes,
+ struct btrfs_ino_list, list);
+ ino = curr->ino;
+ parent = curr->parent;
+ list_del(&curr->list);
+ kfree(curr);
- inode = btrfs_iget(fs_info->sb, ino, root);
+ inode = btrfs_iget_logging(ino, root);
/*
* If the other inode that had a conflicting dir entry was
* deleted in the current transaction, we need to log its parent
- * directory.
+ * directory. See the comment at add_conflicting_inode().
*/
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
- if (ret == -ENOENT) {
- inode = btrfs_iget(fs_info->sb, parent, root);
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
- } else {
- ret = btrfs_log_inode(trans,
- BTRFS_I(inode),
- LOG_OTHER_INODE_ALL,
- ctx);
- btrfs_add_delayed_iput(inode);
- }
+ if (ret != -ENOENT)
+ break;
+
+ inode = btrfs_iget_logging(parent, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ break;
}
+
+ /*
+ * Always log the directory, we cannot make this
+ * conditional on need_log_inode() because the directory
+ * might have been logged in LOG_INODE_EXISTS mode or
+ * the dir index of the conflicting inode is not in a
+ * dir index key range logged for the directory. So we
+ * must make sure the deletion is recorded.
+ */
+ ret = btrfs_log_inode(trans, inode, LOG_INODE_ALL, ctx);
+ btrfs_add_delayed_iput(inode);
+ if (ret)
+ break;
continue;
}
+
/*
- * If the inode was already logged skip it - otherwise we can
- * hit an infinite loop. Example:
- *
- * From the commit root (previous transaction) we have the
- * following inodes:
- *
- * inode 257 a directory
- * inode 258 with references "zz" and "zz_link" on inode 257
- * inode 259 with reference "a" on inode 257
- *
- * And in the current (uncommitted) transaction we have:
+ * Here we can use need_log_inode() because we only need to log
+ * the inode in LOG_INODE_EXISTS mode and rename operations
+ * update the log, so that the log ends up with the new name and
+ * without the old name.
*
- * inode 257 a directory, unchanged
- * inode 258 with references "a" and "a2" on inode 257
- * inode 259 with reference "zz_link" on inode 257
- * inode 261 with reference "zz" on inode 257
- *
- * When logging inode 261 the following infinite loop could
- * happen if we don't skip already logged inodes:
- *
- * - we detect inode 258 as a conflicting inode, with inode 261
- * on reference "zz", and log it;
- *
- * - we detect inode 259 as a conflicting inode, with inode 258
- * on reference "a", and log it;
- *
- * - we detect inode 258 as a conflicting inode, with inode 259
- * on reference "zz_link", and log it - again! After this we
- * repeat the above steps forever.
+ * We did this check at add_conflicting_inode(), but here we do
+ * it again because if some other task logged the inode after
+ * that, we can avoid doing it again.
*/
- spin_lock(&BTRFS_I(inode)->lock);
- /*
- * Check the inode's logged_trans only instead of
- * btrfs_inode_in_log(). This is because the last_log_commit of
- * the inode is not updated when we only log that it exists (see
- * btrfs_log_inode()).
- */
- if (BTRFS_I(inode)->logged_trans == trans->transid) {
- spin_unlock(&BTRFS_I(inode)->lock);
+ if (!need_log_inode(trans, inode)) {
btrfs_add_delayed_iput(inode);
continue;
}
- spin_unlock(&BTRFS_I(inode)->lock);
+
/*
* We are safe logging the other inode without acquiring its
* lock as long as we log with the LOG_INODE_EXISTS mode. We
@@ -5358,67 +6255,16 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
* well because during a rename we pin the log and update the
* log with the new name before we unpin it.
*/
- ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_OTHER_INODE, ctx);
- if (ret) {
- btrfs_add_delayed_iput(inode);
- continue;
- }
-
- key.objectid = ino;
- key.type = BTRFS_INODE_REF_KEY;
- key.offset = 0;
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0) {
- btrfs_add_delayed_iput(inode);
- continue;
- }
-
- while (true) {
- struct extent_buffer *leaf = path->nodes[0];
- int slot = path->slots[0];
- u64 other_ino = 0;
- u64 other_parent = 0;
-
- if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0) {
- break;
- } else if (ret > 0) {
- ret = 0;
- break;
- }
- continue;
- }
-
- btrfs_item_key_to_cpu(leaf, &key, slot);
- if (key.objectid != ino ||
- (key.type != BTRFS_INODE_REF_KEY &&
- key.type != BTRFS_INODE_EXTREF_KEY)) {
- ret = 0;
- break;
- }
-
- ret = btrfs_check_ref_name_override(leaf, slot, &key,
- BTRFS_I(inode), &other_ino,
- &other_parent);
- if (ret < 0)
- break;
- if (ret > 0) {
- ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
- if (!ino_elem) {
- ret = -ENOMEM;
- break;
- }
- ino_elem->ino = other_ino;
- ino_elem->parent = other_parent;
- list_add_tail(&ino_elem->list, &inode_list);
- ret = 0;
- }
- path->slots[0]++;
- }
+ ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx);
btrfs_add_delayed_iput(inode);
+ if (ret)
+ break;
}
+ ctx->logging_conflict_inodes = false;
+ if (ret)
+ free_conflicting_inodes(ctx);
+
return ret;
}
@@ -5429,11 +6275,11 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_path *dst_path,
const u64 logged_isize,
- const bool recursive_logging,
const int inode_only,
struct btrfs_log_ctx *ctx,
bool *need_log_inode_item)
{
+ const u64 i_size = i_size_read(&inode->vfs_inode);
struct btrfs_root *root = inode->root;
int ins_start_slot = 0;
int ins_nr = 0;
@@ -5454,13 +6300,21 @@ again:
if (min_key->type > max_key->type)
break;
- if (min_key->type == BTRFS_INODE_ITEM_KEY)
+ if (min_key->type == BTRFS_INODE_ITEM_KEY) {
*need_log_inode_item = false;
-
- if ((min_key->type == BTRFS_INODE_REF_KEY ||
- min_key->type == BTRFS_INODE_EXTREF_KEY) &&
- inode->generation == trans->transid &&
- !recursive_logging) {
+ } else if (min_key->type == BTRFS_EXTENT_DATA_KEY &&
+ min_key->offset >= i_size) {
+ /*
+ * Extents at and beyond eof are logged with
+ * btrfs_log_prealloc_extents().
+ * Only regular files have BTRFS_EXTENT_DATA_KEY keys,
+ * and no keys greater than that, so bail out.
+ */
+ break;
+ } else if ((min_key->type == BTRFS_INODE_REF_KEY ||
+ min_key->type == BTRFS_INODE_EXTREF_KEY) &&
+ (inode->generation == trans->transid ||
+ ctx->logging_conflict_inodes)) {
u64 other_ino = 0;
u64 other_parent = 0;
@@ -5470,7 +6324,7 @@ again:
if (ret < 0) {
return ret;
} else if (ret > 0 &&
- other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
+ other_ino != btrfs_ino(ctx->inode)) {
if (ins_nr > 0) {
ins_nr++;
} else {
@@ -5479,27 +6333,26 @@ again:
}
ret = copy_items(trans, inode, dst_path, path,
ins_start_slot, ins_nr,
- inode_only, logged_isize);
+ inode_only, logged_isize, ctx);
if (ret < 0)
return ret;
ins_nr = 0;
- ret = log_conflicting_inodes(trans, root, path,
- ctx, other_ino, other_parent);
+ btrfs_release_path(path);
+ ret = add_conflicting_inode(trans, root, path,
+ other_ino,
+ other_parent, ctx);
if (ret)
return ret;
- btrfs_release_path(path);
goto next_key;
}
- }
-
- /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
- if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
+ } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
+ /* Skip xattrs, logged later with btrfs_log_all_xattrs() */
if (ins_nr == 0)
goto next_slot;
ret = copy_items(trans, inode, dst_path, path,
ins_start_slot,
- ins_nr, inode_only, logged_isize);
+ ins_nr, inode_only, logged_isize, ctx);
if (ret < 0)
return ret;
ins_nr = 0;
@@ -5516,7 +6369,7 @@ again:
}
ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
- ins_nr, inode_only, logged_isize);
+ ins_nr, inode_only, logged_isize, ctx);
if (ret < 0)
return ret;
ins_nr = 1;
@@ -5531,7 +6384,7 @@ next_slot:
if (ins_nr) {
ret = copy_items(trans, inode, dst_path, path,
ins_start_slot, ins_nr, inode_only,
- logged_isize);
+ logged_isize, ctx);
if (ret < 0)
return ret;
ins_nr = 0;
@@ -5546,10 +6399,396 @@ next_key:
} else {
break;
}
+
+ /*
+ * We may process many leaves full of items for our inode, so
+ * avoid monopolizing a cpu for too long by rescheduling while
+ * not holding locks on any tree.
+ */
+ cond_resched();
}
- if (ins_nr)
+ if (ins_nr) {
ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
- ins_nr, inode_only, logged_isize);
+ ins_nr, inode_only, logged_isize, ctx);
+ if (ret)
+ return ret;
+ }
+
+ if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) {
+ /*
+ * Release the path because otherwise we might attempt to double
+ * lock the same leaf with btrfs_log_prealloc_extents() below.
+ */
+ btrfs_release_path(path);
+ ret = btrfs_log_prealloc_extents(trans, inode, dst_path, ctx);
+ }
+
+ return ret;
+}
+
+static int insert_delayed_items_batch(struct btrfs_trans_handle *trans,
+ struct btrfs_root *log,
+ struct btrfs_path *path,
+ const struct btrfs_item_batch *batch,
+ const struct btrfs_delayed_item *first_item)
+{
+ const struct btrfs_delayed_item *curr = first_item;
+ int ret;
+
+ ret = btrfs_insert_empty_items(trans, log, path, batch);
+ if (ret)
+ return ret;
+
+ for (int i = 0; i < batch->nr; i++) {
+ char *data_ptr;
+
+ data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
+ write_extent_buffer(path->nodes[0], &curr->data,
+ (unsigned long)data_ptr, curr->data_len);
+ curr = list_next_entry(curr, log_list);
+ path->slots[0]++;
+ }
+
+ btrfs_release_path(path);
+
+ return 0;
+}
+
+static int log_delayed_insertion_items(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode,
+ struct btrfs_path *path,
+ const struct list_head *delayed_ins_list,
+ struct btrfs_log_ctx *ctx)
+{
+ /* 195 (4095 bytes of keys and sizes) fits in a single 4K page. */
+ const int max_batch_size = 195;
+ const int leaf_data_size = BTRFS_LEAF_DATA_SIZE(trans->fs_info);
+ const u64 ino = btrfs_ino(inode);
+ struct btrfs_root *log = inode->root->log_root;
+ struct btrfs_item_batch batch = {
+ .nr = 0,
+ .total_data_size = 0,
+ };
+ const struct btrfs_delayed_item *first = NULL;
+ const struct btrfs_delayed_item *curr;
+ char *ins_data;
+ struct btrfs_key *ins_keys;
+ u32 *ins_sizes;
+ u64 curr_batch_size = 0;
+ int batch_idx = 0;
+ int ret;
+
+ /* We are adding dir index items to the log tree. */
+ lockdep_assert_held(&inode->log_mutex);
+
+ /*
+ * We collect delayed items before copying index keys from the subvolume
+ * to the log tree. However just after we collected them, they may have
+ * been flushed (all of them or just some of them), and therefore we
+ * could have copied them from the subvolume tree to the log tree.
+ * So find the first delayed item that was not yet logged (they are
+ * sorted by index number).
+ */
+ list_for_each_entry(curr, delayed_ins_list, log_list) {
+ if (curr->index > inode->last_dir_index_offset) {
+ first = curr;
+ break;
+ }
+ }
+
+ /* Empty list or all delayed items were already logged. */
+ if (!first)
+ return 0;
+
+ ins_data = kmalloc_array(max_batch_size, sizeof(u32) + sizeof(struct btrfs_key), GFP_NOFS);
+ if (!ins_data)
+ return -ENOMEM;
+ ins_sizes = (u32 *)ins_data;
+ batch.data_sizes = ins_sizes;
+ ins_keys = (struct btrfs_key *)(ins_data + max_batch_size * sizeof(u32));
+ batch.keys = ins_keys;
+
+ curr = first;
+ while (!list_entry_is_head(curr, delayed_ins_list, log_list)) {
+ const u32 curr_size = curr->data_len + sizeof(struct btrfs_item);
+
+ if (curr_batch_size + curr_size > leaf_data_size ||
+ batch.nr == max_batch_size) {
+ ret = insert_delayed_items_batch(trans, log, path,
+ &batch, first);
+ if (ret)
+ goto out;
+ batch_idx = 0;
+ batch.nr = 0;
+ batch.total_data_size = 0;
+ curr_batch_size = 0;
+ first = curr;
+ }
+
+ ins_sizes[batch_idx] = curr->data_len;
+ ins_keys[batch_idx].objectid = ino;
+ ins_keys[batch_idx].type = BTRFS_DIR_INDEX_KEY;
+ ins_keys[batch_idx].offset = curr->index;
+ curr_batch_size += curr_size;
+ batch.total_data_size += curr->data_len;
+ batch.nr++;
+ batch_idx++;
+ curr = list_next_entry(curr, log_list);
+ }
+
+ ASSERT(batch.nr >= 1, "batch.nr=%d", batch.nr);
+ ret = insert_delayed_items_batch(trans, log, path, &batch, first);
+
+ curr = list_last_entry(delayed_ins_list, struct btrfs_delayed_item,
+ log_list);
+ inode->last_dir_index_offset = curr->index;
+out:
+ kfree(ins_data);
+
+ return ret;
+}
+
+static int log_delayed_deletions_full(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode,
+ struct btrfs_path *path,
+ const struct list_head *delayed_del_list,
+ struct btrfs_log_ctx *ctx)
+{
+ const u64 ino = btrfs_ino(inode);
+ const struct btrfs_delayed_item *curr;
+
+ curr = list_first_entry(delayed_del_list, struct btrfs_delayed_item,
+ log_list);
+
+ while (!list_entry_is_head(curr, delayed_del_list, log_list)) {
+ u64 first_dir_index = curr->index;
+ u64 last_dir_index;
+ const struct btrfs_delayed_item *next;
+ int ret;
+
+ /*
+ * Find a range of consecutive dir index items to delete. Like
+ * this we log a single dir range item spanning several contiguous
+ * dir items instead of logging one range item per dir index item.
+ */
+ next = list_next_entry(curr, log_list);
+ while (!list_entry_is_head(next, delayed_del_list, log_list)) {
+ if (next->index != curr->index + 1)
+ break;
+ curr = next;
+ next = list_next_entry(next, log_list);
+ }
+
+ last_dir_index = curr->index;
+ ASSERT(last_dir_index >= first_dir_index,
+ "last_dir_index=%llu first_dir_index=%llu",
+ last_dir_index, first_dir_index);
+
+ ret = insert_dir_log_key(trans, inode->root->log_root, path,
+ ino, first_dir_index, last_dir_index);
+ if (ret)
+ return ret;
+ curr = list_next_entry(curr, log_list);
+ }
+
+ return 0;
+}
+
+static int batch_delete_dir_index_items(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode,
+ struct btrfs_path *path,
+ const struct list_head *delayed_del_list,
+ const struct btrfs_delayed_item *first,
+ const struct btrfs_delayed_item **last_ret)
+{
+ const struct btrfs_delayed_item *next;
+ struct extent_buffer *leaf = path->nodes[0];
+ const int last_slot = btrfs_header_nritems(leaf) - 1;
+ int slot = path->slots[0] + 1;
+ const u64 ino = btrfs_ino(inode);
+
+ next = list_next_entry(first, log_list);
+
+ while (slot < last_slot &&
+ !list_entry_is_head(next, delayed_del_list, log_list)) {
+ struct btrfs_key key;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (key.objectid != ino ||
+ key.type != BTRFS_DIR_INDEX_KEY ||
+ key.offset != next->index)
+ break;
+
+ slot++;
+ *last_ret = next;
+ next = list_next_entry(next, log_list);
+ }
+
+ return btrfs_del_items(trans, inode->root->log_root, path,
+ path->slots[0], slot - path->slots[0]);
+}
+
+static int log_delayed_deletions_incremental(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode,
+ struct btrfs_path *path,
+ const struct list_head *delayed_del_list,
+ struct btrfs_log_ctx *ctx)
+{
+ struct btrfs_root *log = inode->root->log_root;
+ const struct btrfs_delayed_item *curr;
+ u64 last_range_start = 0;
+ u64 last_range_end = 0;
+ struct btrfs_key key;
+
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_DIR_INDEX_KEY;
+ curr = list_first_entry(delayed_del_list, struct btrfs_delayed_item,
+ log_list);
+
+ while (!list_entry_is_head(curr, delayed_del_list, log_list)) {
+ const struct btrfs_delayed_item *last = curr;
+ u64 first_dir_index = curr->index;
+ u64 last_dir_index;
+ bool deleted_items = false;
+ int ret;
+
+ key.offset = curr->index;
+ ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
+ if (ret < 0) {
+ return ret;
+ } else if (ret == 0) {
+ ret = batch_delete_dir_index_items(trans, inode, path,
+ delayed_del_list, curr,
+ &last);
+ if (ret)
+ return ret;
+ deleted_items = true;
+ }
+
+ btrfs_release_path(path);
+
+ /*
+ * If we deleted items from the leaf, it means we have a range
+ * item logging their range, so no need to add one or update an
+ * existing one. Otherwise we have to log a dir range item.
+ */
+ if (deleted_items)
+ goto next_batch;
+
+ last_dir_index = last->index;
+ ASSERT(last_dir_index >= first_dir_index,
+ "last_dir_index=%llu first_dir_index=%llu",
+ last_dir_index, first_dir_index);
+ /*
+ * If this range starts right after where the previous one ends,
+ * then we want to reuse the previous range item and change its
+ * end offset to the end of this range. This is just to minimize
+ * leaf space usage, by avoiding adding a new range item.
+ */
+ if (last_range_end != 0 && first_dir_index == last_range_end + 1)
+ first_dir_index = last_range_start;
+
+ ret = insert_dir_log_key(trans, log, path, key.objectid,
+ first_dir_index, last_dir_index);
+ if (ret)
+ return ret;
+
+ last_range_start = first_dir_index;
+ last_range_end = last_dir_index;
+next_batch:
+ curr = list_next_entry(last, log_list);
+ }
+
+ return 0;
+}
+
+static int log_delayed_deletion_items(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode,
+ struct btrfs_path *path,
+ const struct list_head *delayed_del_list,
+ struct btrfs_log_ctx *ctx)
+{
+ /*
+ * We are deleting dir index items from the log tree or adding range
+ * items to it.
+ */
+ lockdep_assert_held(&inode->log_mutex);
+
+ if (list_empty(delayed_del_list))
+ return 0;
+
+ if (ctx->logged_before)
+ return log_delayed_deletions_incremental(trans, inode, path,
+ delayed_del_list, ctx);
+
+ return log_delayed_deletions_full(trans, inode, path, delayed_del_list,
+ ctx);
+}
+
+/*
+ * Similar logic as for log_new_dir_dentries(), but it iterates over the delayed
+ * items instead of the subvolume tree.
+ */
+static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode,
+ const struct list_head *delayed_ins_list,
+ struct btrfs_log_ctx *ctx)
+{
+ const bool orig_log_new_dentries = ctx->log_new_dentries;
+ struct btrfs_delayed_item *item;
+ int ret = 0;
+
+ /*
+ * No need for the log mutex, plus to avoid potential deadlocks or
+ * lockdep annotations due to nesting of delayed inode mutexes and log
+ * mutexes.
+ */
+ lockdep_assert_not_held(&inode->log_mutex);
+
+ ASSERT(!ctx->logging_new_delayed_dentries,
+ "ctx->logging_new_delayed_dentries=%d", ctx->logging_new_delayed_dentries);
+ ctx->logging_new_delayed_dentries = true;
+
+ list_for_each_entry(item, delayed_ins_list, log_list) {
+ struct btrfs_dir_item *dir_item;
+ struct btrfs_inode *di_inode;
+ struct btrfs_key key;
+ int log_mode = LOG_INODE_EXISTS;
+
+ dir_item = (struct btrfs_dir_item *)item->data;
+ btrfs_disk_key_to_cpu(&key, &dir_item->location);
+
+ if (key.type == BTRFS_ROOT_ITEM_KEY)
+ continue;
+
+ di_inode = btrfs_iget_logging(key.objectid, inode->root);
+ if (IS_ERR(di_inode)) {
+ ret = PTR_ERR(di_inode);
+ break;
+ }
+
+ if (!need_log_inode(trans, di_inode)) {
+ btrfs_add_delayed_iput(di_inode);
+ continue;
+ }
+
+ if (btrfs_stack_dir_ftype(dir_item) == BTRFS_FT_DIR)
+ log_mode = LOG_INODE_ALL;
+
+ ctx->log_new_dentries = false;
+ ret = btrfs_log_inode(trans, di_inode, log_mode, ctx);
+
+ if (!ret && ctx->log_new_dentries)
+ ret = log_new_dir_dentries(trans, di_inode, ctx);
+
+ btrfs_add_delayed_iput(di_inode);
+
+ if (ret)
+ break;
+ }
+
+ ctx->log_new_dentries = orig_log_new_dentries;
+ ctx->logging_new_delayed_dentries = false;
return ret;
}
@@ -5578,16 +6817,17 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
struct btrfs_key min_key;
struct btrfs_key max_key;
struct btrfs_root *log = inode->root->log_root;
- int err = 0;
- int ret = 0;
+ int ret;
bool fast_search = false;
u64 ino = btrfs_ino(inode);
struct extent_map_tree *em_tree = &inode->extent_tree;
u64 logged_isize = 0;
bool need_log_inode_item = true;
bool xattrs_logged = false;
- bool recursive_logging = false;
bool inode_item_dropped = true;
+ bool full_dir_logging = false;
+ LIST_HEAD(delayed_ins_list);
+ LIST_HEAD(delayed_del_list);
path = btrfs_alloc_path();
if (!path)
@@ -5615,27 +6855,69 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
max_key.type = (u8)-1;
max_key.offset = (u64)-1;
+ if (S_ISDIR(inode->vfs_inode.i_mode) && inode_only == LOG_INODE_ALL)
+ full_dir_logging = true;
+
/*
- * Only run delayed items if we are a directory. We want to make sure
- * all directory indexes hit the fs/subvolume tree so we can find them
- * and figure out which index ranges have to be logged.
+ * If we are logging a directory while we are logging dentries of the
+ * delayed items of some other inode, then we need to flush the delayed
+ * items of this directory and not log the delayed items directly. This
+ * is to prevent more than one level of recursion into btrfs_log_inode()
+ * by having something like this:
+ *
+ * $ mkdir -p a/b/c/d/e/f/g/h/...
+ * $ xfs_io -c "fsync" a
+ *
+ * Where all directories in the path did not exist before and are
+ * created in the current transaction.
+ * So in such a case we directly log the delayed items of the main
+ * directory ("a") without flushing them first, while for each of its
+ * subdirectories we flush their delayed items before logging them.
+ * This prevents a potential unbounded recursion like this:
+ *
+ * btrfs_log_inode()
+ * log_new_delayed_dentries()
+ * btrfs_log_inode()
+ * log_new_delayed_dentries()
+ * btrfs_log_inode()
+ * log_new_delayed_dentries()
+ * (...)
+ *
+ * We have thresholds for the maximum number of delayed items to have in
+ * memory, and once they are hit, the items are flushed asynchronously.
+ * However the limit is quite high, so lets prevent deep levels of
+ * recursion to happen by limiting the maximum depth to be 1.
*/
- if (S_ISDIR(inode->vfs_inode.i_mode)) {
- err = btrfs_commit_inode_delayed_items(trans, inode);
- if (err)
+ if (full_dir_logging && ctx->logging_new_delayed_dentries) {
+ ret = btrfs_commit_inode_delayed_items(trans, inode);
+ if (ret)
goto out;
}
- if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
- recursive_logging = true;
- if (inode_only == LOG_OTHER_INODE)
- inode_only = LOG_INODE_EXISTS;
- else
- inode_only = LOG_INODE_ALL;
- mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
- } else {
- mutex_lock(&inode->log_mutex);
- }
+ mutex_lock(&inode->log_mutex);
+
+ /*
+ * For symlinks, we must always log their content, which is stored in an
+ * inline extent, otherwise we could end up with an empty symlink after
+ * log replay, which is invalid on linux (symlink(2) returns -ENOENT if
+ * one attempts to create an empty symlink).
+ * We don't need to worry about flushing delalloc, because when we create
+ * the inline extent when the symlink is created (we never have delalloc
+ * for symlinks).
+ */
+ if (S_ISLNK(inode->vfs_inode.i_mode))
+ inode_only = LOG_INODE_ALL;
+
+ /*
+ * Before logging the inode item, cache the value returned by
+ * inode_logged(), because after that we have the need to figure out if
+ * the inode was previously logged in this transaction.
+ */
+ ret = inode_logged(trans, inode, path);
+ if (ret < 0)
+ goto out_unlock;
+ ctx->logged_before = (ret == 1);
+ ret = 0;
/*
* This is for cases where logging a directory could result in losing a
@@ -5644,11 +6926,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
* to known the file was moved from A to B, so logging just A would
* result in losing the file after a log replay.
*/
- if (S_ISDIR(inode->vfs_inode.i_mode) &&
- inode_only == LOG_INODE_ALL &&
- inode->last_unlink_trans >= trans->transid) {
- btrfs_set_log_full_commit(trans);
- err = 1;
+ if (full_dir_logging && inode->last_unlink_trans >= trans->transid) {
+ ret = BTRFS_LOG_FORCE_COMMIT;
goto out_unlock;
}
@@ -5657,14 +6936,12 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
* copies of everything.
*/
if (S_ISDIR(inode->vfs_inode.i_mode)) {
- int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
-
clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags);
- if (inode_only == LOG_INODE_EXISTS)
- max_key_type = BTRFS_XATTR_ITEM_KEY;
- ret = drop_inode_items(trans, log, path, inode, max_key_type);
+ if (ctx->logged_before)
+ ret = drop_inode_items(trans, log, path, inode,
+ BTRFS_XATTR_ITEM_KEY);
} else {
- if (inode_only == LOG_INODE_EXISTS && inode_logged(trans, inode)) {
+ if (inode_only == LOG_INODE_EXISTS && ctx->logged_before) {
/*
* Make sure the new inode item we write to the log has
* the same isize as the current one (if it exists).
@@ -5678,22 +6955,23 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
* (zeroes), as if an expanding truncate happened,
* instead of getting a file of 4Kb only.
*/
- err = logged_inode_size(log, inode, path, &logged_isize);
- if (err)
+ ret = logged_inode_size(log, inode, path, &logged_isize);
+ if (ret)
goto out_unlock;
}
if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&inode->runtime_flags)) {
if (inode_only == LOG_INODE_EXISTS) {
max_key.type = BTRFS_XATTR_ITEM_KEY;
- ret = drop_inode_items(trans, log, path, inode,
- max_key.type);
+ if (ctx->logged_before)
+ ret = drop_inode_items(trans, log, path,
+ inode, max_key.type);
} else {
clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&inode->runtime_flags);
clear_bit(BTRFS_INODE_COPY_EVERYTHING,
&inode->runtime_flags);
- if (inode_logged(trans, inode))
+ if (ctx->logged_before)
ret = truncate_inode_items(trans, log,
inode, 0, 0);
}
@@ -5703,8 +6981,9 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
if (inode_only == LOG_INODE_ALL)
fast_search = true;
max_key.type = BTRFS_XATTR_ITEM_KEY;
- ret = drop_inode_items(trans, log, path, inode,
- max_key.type);
+ if (ctx->logged_before)
+ ret = drop_inode_items(trans, log, path, inode,
+ max_key.type);
} else {
if (inode_only == LOG_INODE_ALL)
fast_search = true;
@@ -5713,37 +6992,58 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
}
}
- if (ret) {
- err = ret;
+ if (ret)
goto out_unlock;
+
+ /*
+ * If we are logging a directory in full mode, collect the delayed items
+ * before iterating the subvolume tree, so that we don't miss any new
+ * dir index items in case they get flushed while or right after we are
+ * iterating the subvolume tree.
+ */
+ if (full_dir_logging && !ctx->logging_new_delayed_dentries)
+ btrfs_log_get_delayed_items(inode, &delayed_ins_list,
+ &delayed_del_list);
+
+ /*
+ * If we are fsyncing a file with 0 hard links, then commit the delayed
+ * inode because the last inode ref (or extref) item may still be in the
+ * subvolume tree and if we log it the file will still exist after a log
+ * replay. So commit the delayed inode to delete that last ref and we
+ * skip logging it.
+ */
+ if (inode->vfs_inode.i_nlink == 0) {
+ ret = btrfs_commit_inode_delayed_inode(inode);
+ if (ret)
+ goto out_unlock;
}
- err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
+ ret = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
path, dst_path, logged_isize,
- recursive_logging, inode_only, ctx,
+ inode_only, ctx,
&need_log_inode_item);
- if (err)
+ if (ret)
goto out_unlock;
btrfs_release_path(path);
btrfs_release_path(dst_path);
- err = btrfs_log_all_xattrs(trans, inode, path, dst_path);
- if (err)
+ ret = btrfs_log_all_xattrs(trans, inode, path, dst_path, ctx);
+ if (ret)
goto out_unlock;
xattrs_logged = true;
if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
btrfs_release_path(path);
btrfs_release_path(dst_path);
- err = btrfs_log_holes(trans, inode, path);
- if (err)
+ ret = btrfs_log_holes(trans, inode, path);
+ if (ret)
goto out_unlock;
}
log_extents:
btrfs_release_path(path);
btrfs_release_path(dst_path);
if (need_log_inode_item) {
- err = log_inode_item(trans, log, dst_path, inode, inode_item_dropped);
- if (err)
+ ret = log_inode_item(trans, log, dst_path, inode, inode_item_dropped);
+ if (ret)
goto out_unlock;
/*
* If we are doing a fast fsync and the inode was logged before
@@ -5754,18 +7054,16 @@ log_extents:
* BTRFS_INODE_COPY_EVERYTHING set.
*/
if (!xattrs_logged && inode->logged_trans < trans->transid) {
- err = btrfs_log_all_xattrs(trans, inode, path, dst_path);
- if (err)
+ ret = btrfs_log_all_xattrs(trans, inode, path, dst_path, ctx);
+ if (ret)
goto out_unlock;
btrfs_release_path(path);
}
}
if (fast_search) {
ret = btrfs_log_changed_extents(trans, inode, dst_path, ctx);
- if (ret) {
- err = ret;
+ if (ret)
goto out_unlock;
- }
} else if (inode_only == LOG_INODE_ALL) {
struct extent_map *em, *n;
@@ -5775,12 +7073,18 @@ log_extents:
write_unlock(&em_tree->lock);
}
- if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
+ if (full_dir_logging) {
ret = log_directory_changes(trans, inode, path, dst_path, ctx);
- if (ret) {
- err = ret;
+ if (ret)
+ goto out_unlock;
+ ret = log_delayed_insertion_items(trans, inode, path,
+ &delayed_ins_list, ctx);
+ if (ret)
+ goto out_unlock;
+ ret = log_delayed_deletion_items(trans, inode, path,
+ &delayed_del_list, ctx);
+ if (ret)
goto out_unlock;
- }
}
spin_lock(&inode->lock);
@@ -5816,232 +7120,37 @@ log_extents:
* a power failure unless the log was synced as part of an fsync
* against any other unrelated inode.
*/
- if (inode_only != LOG_INODE_EXISTS)
+ if (!ctx->logging_new_name && inode_only != LOG_INODE_EXISTS)
inode->last_log_commit = inode->last_sub_trans;
spin_unlock(&inode->lock);
+
+ /*
+ * Reset the last_reflink_trans so that the next fsync does not need to
+ * go through the slower path when logging extents and their checksums.
+ */
+ if (inode_only == LOG_INODE_ALL)
+ inode->last_reflink_trans = 0;
+
out_unlock:
mutex_unlock(&inode->log_mutex);
out:
btrfs_free_path(path);
btrfs_free_path(dst_path);
- return err;
-}
-/*
- * Check if we need to log an inode. This is used in contexts where while
- * logging an inode we need to log another inode (either that it exists or in
- * full mode). This is used instead of btrfs_inode_in_log() because the later
- * requires the inode to be in the log and have the log transaction committed,
- * while here we do not care if the log transaction was already committed - our
- * caller will commit the log later - and we want to avoid logging an inode
- * multiple times when multiple tasks have joined the same log transaction.
- */
-static bool need_log_inode(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode)
-{
- /*
- * If a directory was not modified, no dentries added or removed, we can
- * and should avoid logging it.
- */
- if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid)
- return false;
-
- /*
- * If this inode does not have new/updated/deleted xattrs since the last
- * time it was logged and is flagged as logged in the current transaction,
- * we can skip logging it. As for new/deleted names, those are updated in
- * the log by link/unlink/rename operations.
- * In case the inode was logged and then evicted and reloaded, its
- * logged_trans will be 0, in which case we have to fully log it since
- * logged_trans is a transient field, not persisted.
- */
- if (inode->logged_trans == trans->transid &&
- !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
- return false;
-
- return true;
-}
-
-struct btrfs_dir_list {
- u64 ino;
- struct list_head list;
-};
-
-/*
- * Log the inodes of the new dentries of a directory. See log_dir_items() for
- * details about the why it is needed.
- * This is a recursive operation - if an existing dentry corresponds to a
- * directory, that directory's new entries are logged too (same behaviour as
- * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
- * the dentries point to we do not lock their i_mutex, otherwise lockdep
- * complains about the following circular lock dependency / possible deadlock:
- *
- * CPU0 CPU1
- * ---- ----
- * lock(&type->i_mutex_dir_key#3/2);
- * lock(sb_internal#2);
- * lock(&type->i_mutex_dir_key#3/2);
- * lock(&sb->s_type->i_mutex_key#14);
- *
- * Where sb_internal is the lock (a counter that works as a lock) acquired by
- * sb_start_intwrite() in btrfs_start_transaction().
- * Not locking i_mutex of the inodes is still safe because:
- *
- * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
- * that while logging the inode new references (names) are added or removed
- * from the inode, leaving the logged inode item with a link count that does
- * not match the number of logged inode reference items. This is fine because
- * at log replay time we compute the real number of links and correct the
- * link count in the inode item (see replay_one_buffer() and
- * link_to_fixup_dir());
- *
- * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
- * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
- * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
- * has a size that doesn't match the sum of the lengths of all the logged
- * names. This does not result in a problem because if a dir_item key is
- * logged but its matching dir_index key is not logged, at log replay time we
- * don't use it to replay the respective name (see replay_one_name()). On the
- * other hand if only the dir_index key ends up being logged, the respective
- * name is added to the fs/subvol tree with both the dir_item and dir_index
- * keys created (see replay_one_name()).
- * The directory's inode item with a wrong i_size is not a problem as well,
- * since we don't use it at log replay time to set the i_size in the inode
- * item of the fs/subvol tree (see overwrite_item()).
- */
-static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_inode *start_inode,
- struct btrfs_log_ctx *ctx)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_root *log = root->log_root;
- struct btrfs_path *path;
- LIST_HEAD(dir_list);
- struct btrfs_dir_list *dir_elem;
- int ret = 0;
-
- /*
- * If we are logging a new name, as part of a link or rename operation,
- * don't bother logging new dentries, as we just want to log the names
- * of an inode and that any new parents exist.
- */
- if (ctx->logging_new_name)
- return 0;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
- if (!dir_elem) {
- btrfs_free_path(path);
- return -ENOMEM;
- }
- dir_elem->ino = btrfs_ino(start_inode);
- list_add_tail(&dir_elem->list, &dir_list);
-
- while (!list_empty(&dir_list)) {
- struct extent_buffer *leaf;
- struct btrfs_key min_key;
- int nritems;
- int i;
-
- dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
- list);
- if (ret)
- goto next_dir_inode;
-
- min_key.objectid = dir_elem->ino;
- min_key.type = BTRFS_DIR_ITEM_KEY;
- min_key.offset = 0;
-again:
- btrfs_release_path(path);
- ret = btrfs_search_forward(log, &min_key, path, trans->transid);
- if (ret < 0) {
- goto next_dir_inode;
- } else if (ret > 0) {
- ret = 0;
- goto next_dir_inode;
- }
-
-process_leaf:
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- for (i = path->slots[0]; i < nritems; i++) {
- struct btrfs_dir_item *di;
- struct btrfs_key di_key;
- struct inode *di_inode;
- struct btrfs_dir_list *new_dir_elem;
- int log_mode = LOG_INODE_EXISTS;
- int type;
-
- btrfs_item_key_to_cpu(leaf, &min_key, i);
- if (min_key.objectid != dir_elem->ino ||
- min_key.type != BTRFS_DIR_ITEM_KEY)
- goto next_dir_inode;
-
- di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
- type = btrfs_dir_type(leaf, di);
- if (btrfs_dir_transid(leaf, di) < trans->transid &&
- type != BTRFS_FT_DIR)
- continue;
- btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
- if (di_key.type == BTRFS_ROOT_ITEM_KEY)
- continue;
-
- btrfs_release_path(path);
- di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
- if (IS_ERR(di_inode)) {
- ret = PTR_ERR(di_inode);
- goto next_dir_inode;
- }
+ if (ret)
+ free_conflicting_inodes(ctx);
+ else
+ ret = log_conflicting_inodes(trans, inode->root, ctx);
- if (!need_log_inode(trans, BTRFS_I(di_inode))) {
- btrfs_add_delayed_iput(di_inode);
- break;
- }
+ if (full_dir_logging && !ctx->logging_new_delayed_dentries) {
+ if (!ret)
+ ret = log_new_delayed_dentries(trans, inode,
+ &delayed_ins_list, ctx);
- ctx->log_new_dentries = false;
- if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
- log_mode = LOG_INODE_ALL;
- ret = btrfs_log_inode(trans, BTRFS_I(di_inode),
- log_mode, ctx);
- btrfs_add_delayed_iput(di_inode);
- if (ret)
- goto next_dir_inode;
- if (ctx->log_new_dentries) {
- new_dir_elem = kmalloc(sizeof(*new_dir_elem),
- GFP_NOFS);
- if (!new_dir_elem) {
- ret = -ENOMEM;
- goto next_dir_inode;
- }
- new_dir_elem->ino = di_key.objectid;
- list_add_tail(&new_dir_elem->list, &dir_list);
- }
- break;
- }
- if (i == nritems) {
- ret = btrfs_next_leaf(log, path);
- if (ret < 0) {
- goto next_dir_inode;
- } else if (ret > 0) {
- ret = 0;
- goto next_dir_inode;
- }
- goto process_leaf;
- }
- if (min_key.offset < (u64)-1) {
- min_key.offset++;
- goto again;
- }
-next_dir_inode:
- list_del(&dir_elem->list);
- kfree(dir_elem);
+ btrfs_log_put_delayed_items(inode, &delayed_ins_list,
+ &delayed_del_list);
}
- btrfs_free_path(path);
return ret;
}
@@ -6049,9 +7158,8 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
struct btrfs_log_ctx *ctx)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_root *root = inode->root;
const u64 ino = btrfs_ino(inode);
@@ -6059,15 +7167,15 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->skip_locking = 1;
- path->search_commit_root = 1;
+ path->skip_locking = true;
+ path->search_commit_root = true;
key.objectid = ino;
key.type = BTRFS_INODE_REF_KEY;
key.offset = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
while (true) {
struct extent_buffer *leaf = path->nodes[0];
@@ -6079,8 +7187,8 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
- goto out;
- else if (ret > 0)
+ return ret;
+ if (ret > 0)
break;
continue;
}
@@ -6090,32 +7198,27 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
break;
- item_size = btrfs_item_size_nr(leaf, slot);
+ item_size = btrfs_item_size(leaf, slot);
ptr = btrfs_item_ptr_offset(leaf, slot);
while (cur_offset < item_size) {
- struct btrfs_key inode_key;
- struct inode *dir_inode;
-
- inode_key.type = BTRFS_INODE_ITEM_KEY;
- inode_key.offset = 0;
+ u64 dir_id;
+ struct btrfs_inode *dir_inode;
if (key.type == BTRFS_INODE_EXTREF_KEY) {
struct btrfs_inode_extref *extref;
extref = (struct btrfs_inode_extref *)
(ptr + cur_offset);
- inode_key.objectid = btrfs_inode_extref_parent(
- leaf, extref);
+ dir_id = btrfs_inode_extref_parent(leaf, extref);
cur_offset += sizeof(*extref);
cur_offset += btrfs_inode_extref_name_len(leaf,
extref);
} else {
- inode_key.objectid = key.offset;
+ dir_id = key.offset;
cur_offset = item_size;
}
- dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
- root);
+ dir_inode = btrfs_iget_logging(dir_id, root);
/*
* If the parent inode was deleted, return an error to
* fallback to a transaction commit. This is to prevent
@@ -6139,32 +7242,25 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
* at both parents and the old parent B would still
* exist.
*/
- if (IS_ERR(dir_inode)) {
- ret = PTR_ERR(dir_inode);
- goto out;
- }
+ if (IS_ERR(dir_inode))
+ return PTR_ERR(dir_inode);
- if (!need_log_inode(trans, BTRFS_I(dir_inode))) {
+ if (!need_log_inode(trans, dir_inode)) {
btrfs_add_delayed_iput(dir_inode);
continue;
}
ctx->log_new_dentries = false;
- ret = btrfs_log_inode(trans, BTRFS_I(dir_inode),
- LOG_INODE_ALL, ctx);
+ ret = btrfs_log_inode(trans, dir_inode, LOG_INODE_ALL, ctx);
if (!ret && ctx->log_new_dentries)
- ret = log_new_dir_dentries(trans, root,
- BTRFS_I(dir_inode), ctx);
+ ret = log_new_dir_dentries(trans, dir_inode, ctx);
btrfs_add_delayed_iput(dir_inode);
if (ret)
- goto out;
+ return ret;
}
path->slots[0]++;
}
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
static int log_new_ancestors(struct btrfs_trans_handle *trans,
@@ -6177,11 +7273,10 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
while (true) {
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct extent_buffer *leaf = path->nodes[0];
- int slot = path->slots[0];
+ struct extent_buffer *leaf;
+ int slot;
struct btrfs_key search_key;
- struct inode *inode;
+ struct btrfs_inode *inode;
u64 ino;
int ret = 0;
@@ -6192,14 +7287,13 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
search_key.objectid = found_key.offset;
search_key.type = BTRFS_INODE_ITEM_KEY;
search_key.offset = 0;
- inode = btrfs_iget(fs_info->sb, ino, root);
+ inode = btrfs_iget_logging(ino, root);
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (BTRFS_I(inode)->generation >= trans->transid &&
- need_log_inode(trans, BTRFS_I(inode)))
- ret = btrfs_log_inode(trans, BTRFS_I(inode),
- LOG_INODE_EXISTS, ctx);
+ if (inode->generation >= trans->transid &&
+ need_log_inode(trans, inode))
+ ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx);
btrfs_add_delayed_iput(inode);
if (ret)
return ret;
@@ -6277,7 +7371,7 @@ static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = inode->root;
const u64 ino = btrfs_ino(inode);
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key search_key;
int ret;
@@ -6298,7 +7392,7 @@ static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
again:
ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret == 0)
path->slots[0]++;
@@ -6310,8 +7404,8 @@ again:
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
- goto out;
- else if (ret > 0)
+ return ret;
+ if (ret > 0)
break;
continue;
}
@@ -6328,10 +7422,8 @@ again:
* this loop, etc). So just return some error to fallback to
* a transaction commit.
*/
- if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
- ret = -EMLINK;
- goto out;
- }
+ if (found_key.type == BTRFS_INODE_EXTREF_KEY)
+ return -EMLINK;
/*
* Logging ancestors needs to do more searches on the fs/subvol
@@ -6343,14 +7435,11 @@ again:
ret = log_new_ancestors(trans, root, path, ctx);
if (ret)
- goto out;
+ return ret;
btrfs_release_path(path);
goto again;
}
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/*
@@ -6368,33 +7457,29 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
- bool log_dentries = false;
+ bool log_dentries;
- if (btrfs_test_opt(fs_info, NOTREELOG)) {
- ret = 1;
- goto end_no_trans;
- }
+ if (btrfs_test_opt(fs_info, NOTREELOG))
+ return BTRFS_LOG_FORCE_COMMIT;
- if (btrfs_root_refs(&root->root_item) == 0) {
- ret = 1;
- goto end_no_trans;
- }
+ if (btrfs_root_refs(&root->root_item) == 0)
+ return BTRFS_LOG_FORCE_COMMIT;
/*
- * Skip already logged inodes or inodes corresponding to tmpfiles
- * (since logging them is pointless, a link count of 0 means they
- * will never be accessible).
+ * If we're logging an inode from a subvolume created in the current
+ * transaction we must force a commit since the root is not persisted.
*/
- if ((btrfs_inode_in_log(inode, trans->transid) &&
- list_empty(&ctx->ordered_extents)) ||
- inode->vfs_inode.i_nlink == 0) {
- ret = BTRFS_NO_LOG_SYNC;
- goto end_no_trans;
- }
+ if (btrfs_root_generation(&root->root_item) == trans->transid)
+ return BTRFS_LOG_FORCE_COMMIT;
+
+ /* Skip already logged inodes and without new extents. */
+ if (btrfs_inode_in_log(inode, trans->transid) &&
+ list_empty(&ctx->ordered_extents))
+ return BTRFS_NO_LOG_SYNC;
ret = start_log_trans(trans, root, ctx);
if (ret)
- goto end_no_trans;
+ return ret;
ret = btrfs_log_inode(trans, inode, inode_only, ctx);
if (ret)
@@ -6413,8 +7498,11 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
goto end_trans;
}
- if (S_ISDIR(inode->vfs_inode.i_mode) && ctx->log_new_dentries)
- log_dentries = true;
+ /*
+ * Track if we need to log dentries because ctx->log_new_dentries can
+ * be modified in the call chains below.
+ */
+ log_dentries = ctx->log_new_dentries;
/*
* On unlink we must make sure all our current and old parent directory
@@ -6468,19 +7556,17 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
goto end_trans;
if (log_dentries)
- ret = log_new_dir_dentries(trans, root, inode, ctx);
- else
- ret = 0;
+ ret = log_new_dir_dentries(trans, inode, ctx);
end_trans:
if (ret < 0) {
btrfs_set_log_full_commit(trans);
- ret = 1;
+ ret = BTRFS_LOG_FORCE_COMMIT;
}
if (ret)
btrfs_remove_log_ctx(root, ctx);
btrfs_end_log_trans(root);
-end_no_trans:
+
return ret;
}
@@ -6514,8 +7600,6 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
struct btrfs_path *path;
struct btrfs_trans_handle *trans;
struct btrfs_key key;
- struct btrfs_key found_key;
- struct btrfs_root *log;
struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
struct walk_control wc = {
.process_func = process_one_buffer,
@@ -6535,23 +7619,27 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
}
wc.trans = trans;
- wc.pin = 1;
+ wc.pin = true;
+ wc.log = log_root_tree;
- ret = walk_log_tree(trans, log_root_tree, &wc);
- if (ret) {
+ ret = walk_log_tree(&wc);
+ wc.log = NULL;
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto error;
}
again:
key.objectid = BTRFS_TREE_LOG_OBJECTID;
- key.offset = (u64)-1;
key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
while (1) {
+ struct btrfs_key found_key;
+
ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
goto error;
}
@@ -6566,17 +7654,22 @@ again:
if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
break;
- log = btrfs_read_tree_root(log_root_tree, &found_key);
- if (IS_ERR(log)) {
- ret = PTR_ERR(log);
+ wc.log = btrfs_read_tree_root(log_root_tree, &found_key);
+ if (IS_ERR(wc.log)) {
+ ret = PTR_ERR(wc.log);
+ wc.log = NULL;
btrfs_abort_transaction(trans, ret);
goto error;
}
- wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset,
- true);
- if (IS_ERR(wc.replay_dest)) {
- ret = PTR_ERR(wc.replay_dest);
+ wc.root = btrfs_get_fs_root(fs_info, found_key.offset, true);
+ if (IS_ERR(wc.root)) {
+ ret = PTR_ERR(wc.root);
+ wc.root = NULL;
+ if (unlikely(ret != -ENOENT)) {
+ btrfs_abort_transaction(trans, ret);
+ goto error;
+ }
/*
* We didn't find the subvol, likely because it was
@@ -6589,38 +7682,37 @@ again:
* block from being modified, and we'll just bail for
* each subsequent pass.
*/
- if (ret == -ENOENT)
- ret = btrfs_pin_extent_for_log_replay(trans,
- log->node->start,
- log->node->len);
- btrfs_put_root(log);
+ ret = btrfs_pin_extent_for_log_replay(trans, wc.log->node);
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto error;
+ }
+ goto next;
+ }
- if (!ret)
- goto next;
+ wc.root->log_root = wc.log;
+ ret = btrfs_record_root_in_trans(trans, wc.root);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- goto error;
+ goto next;
}
- wc.replay_dest->log_root = log;
- ret = btrfs_record_root_in_trans(trans, wc.replay_dest);
- if (ret)
- /* The loop needs to continue due to the root refs */
+ ret = walk_log_tree(&wc);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
- else
- ret = walk_log_tree(trans, log, &wc);
-
- if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
- ret = fixup_inode_link_counts(trans, wc.replay_dest,
- path);
- if (ret)
- btrfs_abort_transaction(trans, ret);
+ goto next;
}
- if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
- struct btrfs_root *root = wc.replay_dest;
-
- btrfs_release_path(path);
+ if (wc.stage == LOG_WALK_REPLAY_ALL) {
+ struct btrfs_root *root = wc.root;
+ wc.subvol_path = path;
+ ret = fixup_inode_link_counts(&wc);
+ wc.subvol_path = NULL;
+ if (unlikely(ret)) {
+ btrfs_abort_transaction(trans, ret);
+ goto next;
+ }
/*
* We have just replayed everything, and the highest
* objectid of fs roots probably has changed in case
@@ -6630,17 +7722,21 @@ again:
* could only happen during mount.
*/
ret = btrfs_init_root_free_objectid(root);
- if (ret)
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
+ goto next;
+ }
}
-
- wc.replay_dest->log_root = NULL;
- btrfs_put_root(wc.replay_dest);
- btrfs_put_root(log);
+next:
+ if (wc.root) {
+ wc.root->log_root = NULL;
+ btrfs_put_root(wc.root);
+ }
+ btrfs_put_root(wc.log);
+ wc.log = NULL;
if (ret)
goto error;
-next:
if (found_key.offset == 0)
break;
key.offset = found_key.offset - 1;
@@ -6649,7 +7745,7 @@ next:
/* step one is to pin it all, step two is to replay just inodes */
if (wc.pin) {
- wc.pin = 0;
+ wc.pin = false;
wc.process_func = replay_one_buffer;
wc.stage = LOG_WALK_REPLAY_INODES;
goto again;
@@ -6667,14 +7763,13 @@ next:
if (ret)
return ret;
- log_root_tree->log_root = NULL;
clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
- btrfs_put_root(log_root_tree);
return 0;
error:
if (wc.trans)
btrfs_end_transaction(wc.trans);
+ btrfs_put_root(wc.log);
clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
btrfs_free_path(path);
return ret;
@@ -6693,7 +7788,7 @@ error:
*/
void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir, struct btrfs_inode *inode,
- int for_rename)
+ bool for_rename)
{
/*
* when we're logging a file, if it hasn't been renamed
@@ -6709,18 +7804,25 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
inode->last_unlink_trans = trans->transid;
mutex_unlock(&inode->log_mutex);
+ if (!for_rename)
+ return;
+
/*
- * if this directory was already logged any new
- * names for this file/dir will get recorded
+ * If this directory was already logged, any new names will be logged
+ * with btrfs_log_new_name() and old names will be deleted from the log
+ * tree with btrfs_del_dir_entries_in_log() or with
+ * btrfs_del_inode_ref_in_log().
*/
- if (dir->logged_trans == trans->transid)
+ if (inode_logged(trans, dir, NULL) == 1)
return;
/*
- * if the inode we're about to unlink was logged,
- * the log will be properly updated for any new names
+ * If the inode we're about to unlink was logged before, the log will be
+ * properly updated with the new name with btrfs_log_new_name() and the
+ * old name removed with btrfs_del_dir_entries_in_log() or with
+ * btrfs_del_inode_ref_in_log().
*/
- if (inode->logged_trans == trans->transid)
+ if (inode_logged(trans, inode, NULL) == 1)
return;
/*
@@ -6730,13 +7832,6 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
* properly. So, we have to be conservative and force commits
* so the new name gets discovered.
*/
- if (for_rename)
- goto record;
-
- /* we can safely do the unlink without any special recording */
- return;
-
-record:
mutex_lock(&dir->log_mutex);
dir->last_unlink_trans = trans->transid;
mutex_unlock(&dir->log_mutex);
@@ -6763,14 +7858,57 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
}
/*
- * Call this after adding a new name for a file and it will properly
- * update the log to reflect the new name.
+ * Call this when creating a subvolume in a directory.
+ * Because we don't commit a transaction when creating a subvolume, we can't
+ * allow the directory pointing to the subvolume to be logged with an entry that
+ * points to an unpersisted root if we are still in the transaction used to
+ * create the subvolume, so make any attempt to log the directory to result in a
+ * full log sync.
+ * Also we don't need to worry with renames, since btrfs_rename() marks the log
+ * for full commit when renaming a subvolume.
+ *
+ * Must be called before creating the subvolume entry in its parent directory.
+ */
+void btrfs_record_new_subvolume(const struct btrfs_trans_handle *trans,
+ struct btrfs_inode *dir)
+{
+ mutex_lock(&dir->log_mutex);
+ dir->last_unlink_trans = trans->transid;
+ mutex_unlock(&dir->log_mutex);
+}
+
+/*
+ * Update the log after adding a new name for an inode.
+ *
+ * @trans: Transaction handle.
+ * @old_dentry: The dentry associated with the old name and the old
+ * parent directory.
+ * @old_dir: The inode of the previous parent directory for the case
+ * of a rename. For a link operation, it must be NULL.
+ * @old_dir_index: The index number associated with the old name, meaningful
+ * only for rename operations (when @old_dir is not NULL).
+ * Ignored for link operations.
+ * @parent: The dentry associated with the directory under which the
+ * new name is located.
+ *
+ * Call this after adding a new name for an inode, as a result of a link or
+ * rename operation, and it will properly update the log to reflect the new name.
*/
void btrfs_log_new_name(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode, struct btrfs_inode *old_dir,
- struct dentry *parent)
+ struct dentry *old_dentry, struct btrfs_inode *old_dir,
+ u64 old_dir_index, struct dentry *parent)
{
+ struct btrfs_inode *inode = BTRFS_I(d_inode(old_dentry));
+ struct btrfs_root *root = inode->root;
struct btrfs_log_ctx ctx;
+ bool log_pinned = false;
+ int ret;
+
+ /* The inode has a new name (ref/extref), so make sure we log it. */
+ set_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags);
+
+ btrfs_init_log_ctx(&ctx, inode);
+ ctx.logging_new_name = true;
/*
* this will force the logging code to walk the dentry chain
@@ -6783,30 +7921,110 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
* if this inode hasn't been logged and directory we're renaming it
* from hasn't been logged, we don't need to log it
*/
- if (!inode_logged(trans, inode) &&
- (!old_dir || !inode_logged(trans, old_dir)))
- return;
+ ret = inode_logged(trans, inode, NULL);
+ if (ret < 0) {
+ goto out;
+ } else if (ret == 0) {
+ if (!old_dir)
+ return;
+ /*
+ * If the inode was not logged and we are doing a rename (old_dir is not
+ * NULL), check if old_dir was logged - if it was not we can return and
+ * do nothing.
+ */
+ ret = inode_logged(trans, old_dir, NULL);
+ if (ret < 0)
+ goto out;
+ else if (ret == 0)
+ return;
+ }
+ ret = 0;
+
+ /*
+ * Now that we know we need to update the log, allocate the scratch eb
+ * for the context before joining a log transaction below, as this can
+ * take time and therefore we could delay log commits from other tasks.
+ */
+ btrfs_init_log_ctx_scratch_eb(&ctx);
/*
* If we are doing a rename (old_dir is not NULL) from a directory that
- * was previously logged, make sure the next log attempt on the directory
- * is not skipped and logs the inode again. This is because the log may
- * not currently be authoritative for a range including the old
- * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make
- * sure after a log replay we do not end up with both the new and old
- * dentries around (in case the inode is a directory we would have a
- * directory with two hard links and 2 inode references for different
- * parents). The next log attempt of old_dir will happen at
- * btrfs_log_all_parents(), called through btrfs_log_inode_parent()
- * below, because we have previously set inode->last_unlink_trans to the
- * current transaction ID, either here or at btrfs_record_unlink_dir() in
- * case inode is a directory.
+ * was previously logged, make sure that on log replay we get the old
+ * dir entry deleted. This is needed because we will also log the new
+ * name of the renamed inode, so we need to make sure that after log
+ * replay we don't end up with both the new and old dir entries existing.
*/
- if (old_dir)
- old_dir->logged_trans = 0;
+ if (old_dir && old_dir->logged_trans == trans->transid) {
+ struct btrfs_root *log = old_dir->root->log_root;
+ struct btrfs_path *path;
+ struct fscrypt_name fname;
+
+ ASSERT(old_dir_index >= BTRFS_DIR_START_INDEX,
+ "old_dir_index=%llu", old_dir_index);
+
+ ret = fscrypt_setup_filename(&old_dir->vfs_inode,
+ &old_dentry->d_name, 0, &fname);
+ if (ret)
+ goto out;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ fscrypt_free_filename(&fname);
+ goto out;
+ }
+
+ /*
+ * We have two inodes to update in the log, the old directory and
+ * the inode that got renamed, so we must pin the log to prevent
+ * anyone from syncing the log until we have updated both inodes
+ * in the log.
+ */
+ ret = join_running_log_trans(root);
+ /*
+ * At least one of the inodes was logged before, so this should
+ * not fail, but if it does, it's not serious, just bail out and
+ * mark the log for a full commit.
+ */
+ if (WARN_ON_ONCE(ret < 0)) {
+ btrfs_free_path(path);
+ fscrypt_free_filename(&fname);
+ goto out;
+ }
+
+ log_pinned = true;
+
+ /*
+ * Other concurrent task might be logging the old directory,
+ * as it can be triggered when logging other inode that had or
+ * still has a dentry in the old directory. We lock the old
+ * directory's log_mutex to ensure the deletion of the old
+ * name is persisted, because during directory logging we
+ * delete all BTRFS_DIR_LOG_INDEX_KEY keys and the deletion of
+ * the old name's dir index item is in the delayed items, so
+ * it could be missed by an in progress directory logging.
+ */
+ mutex_lock(&old_dir->log_mutex);
+ ret = del_logged_dentry(trans, log, path, btrfs_ino(old_dir),
+ &fname.disk_name, old_dir_index);
+ if (ret > 0) {
+ /*
+ * The dentry does not exist in the log, so record its
+ * deletion.
+ */
+ btrfs_release_path(path);
+ ret = insert_dir_log_key(trans, log, path,
+ btrfs_ino(old_dir),
+ old_dir_index, old_dir_index);
+ }
+ mutex_unlock(&old_dir->log_mutex);
+
+ btrfs_free_path(path);
+ fscrypt_free_filename(&fname);
+ if (ret < 0)
+ goto out;
+ }
- btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
- ctx.logging_new_name = true;
/*
* We don't care about the return value. If we fail to log the new name
* then we know the next attempt to sync the log will fallback to a full
@@ -6815,5 +8033,18 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
* inconsistent state after a rename operation.
*/
btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx);
+ ASSERT(list_empty(&ctx.conflict_inodes));
+out:
+ /*
+ * If an error happened mark the log for a full commit because it's not
+ * consistent and up to date or we couldn't find out if one of the
+ * inodes was logged before in this transaction. Do it before unpinning
+ * the log, to avoid any races with someone else trying to commit it.
+ */
+ if (ret < 0)
+ btrfs_set_log_full_commit(trans);
+ if (log_pinned)
+ btrfs_end_log_trans(root);
+ free_extent_buffer(ctx.scratch_eb);
}
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index f6811c3df38a..41e47fda036d 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -6,49 +6,57 @@
#ifndef BTRFS_TREE_LOG_H
#define BTRFS_TREE_LOG_H
-#include "ctree.h"
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/fscrypt.h>
#include "transaction.h"
+struct inode;
+struct dentry;
+struct btrfs_ordered_extent;
+struct btrfs_root;
+struct btrfs_trans_handle;
+
/* return value for btrfs_log_dentry_safe that means we don't need to log it at all */
#define BTRFS_NO_LOG_SYNC 256
+/*
+ * We can't use the tree log for whatever reason, force a transaction commit.
+ * We use a negative value because there are functions through the logging code
+ * that need to return an error (< 0 value), false (0) or true (1). Any negative
+ * value will do, as it will cause the log to be marked for a full sync.
+ */
+#define BTRFS_LOG_FORCE_COMMIT (-(MAX_ERRNO + 1))
+
struct btrfs_log_ctx {
int log_ret;
int log_transid;
bool log_new_dentries;
bool logging_new_name;
- /* Tracks the last logged dir item/index key offset. */
- u64 last_dir_item_offset;
- struct inode *inode;
+ bool logging_new_delayed_dentries;
+ /* Indicate if the inode being logged was logged before. */
+ bool logged_before;
+ struct btrfs_inode *inode;
struct list_head list;
/* Only used for fast fsyncs. */
struct list_head ordered_extents;
+ struct list_head conflict_inodes;
+ int num_conflict_inodes;
+ bool logging_conflict_inodes;
+ /*
+ * Used for fsyncs that need to copy items from the subvolume tree to
+ * the log tree (full sync flag set or copy everything flag set) to
+ * avoid allocating a temporary extent buffer while holding a lock on
+ * an extent buffer of the subvolume tree and under the log transaction.
+ * Also helps to avoid allocating and freeing a temporary extent buffer
+ * in case we need to process multiple leaves from the subvolume tree.
+ */
+ struct extent_buffer *scratch_eb;
};
-static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx,
- struct inode *inode)
-{
- ctx->log_ret = 0;
- ctx->log_transid = 0;
- ctx->log_new_dentries = false;
- ctx->logging_new_name = false;
- ctx->inode = inode;
- INIT_LIST_HEAD(&ctx->list);
- INIT_LIST_HEAD(&ctx->ordered_extents);
-}
-
-static inline void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx)
-{
- struct btrfs_ordered_extent *ordered;
- struct btrfs_ordered_extent *tmp;
-
- ASSERT(inode_is_locked(ctx->inode));
-
- list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
- list_del_init(&ordered->log_list);
- btrfs_put_ordered_extent(ordered);
- }
-}
+void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, struct btrfs_inode *inode);
+void btrfs_init_log_ctx_scratch_eb(struct btrfs_log_ctx *ctx);
+void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx);
static inline void btrfs_set_log_full_commit(struct btrfs_trans_handle *trans)
{
@@ -71,22 +79,23 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
struct dentry *dentry,
struct btrfs_log_ctx *ctx);
void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- const char *name, int name_len,
+ const struct fscrypt_str *name,
struct btrfs_inode *dir, u64 index);
void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- const char *name, int name_len,
- struct btrfs_inode *inode, u64 dirid);
+ const struct fscrypt_str *name,
+ struct btrfs_inode *inode,
+ struct btrfs_inode *dir);
void btrfs_end_log_trans(struct btrfs_root *root);
void btrfs_pin_log_trans(struct btrfs_root *root);
void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir, struct btrfs_inode *inode,
- int for_rename);
+ bool for_rename);
void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir);
+void btrfs_record_new_subvolume(const struct btrfs_trans_handle *trans,
+ struct btrfs_inode *dir);
void btrfs_log_new_name(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode, struct btrfs_inode *old_dir,
- struct dentry *parent);
+ struct dentry *old_dentry, struct btrfs_inode *old_dir,
+ u64 old_dir_index, struct dentry *parent);
#endif
diff --git a/fs/btrfs/tree-mod-log.c b/fs/btrfs/tree-mod-log.c
index 8a3a14686d3e..9e8cb3b7c064 100644
--- a/fs/btrfs/tree-mod-log.c
+++ b/fs/btrfs/tree-mod-log.c
@@ -1,7 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
+#include "messages.h"
#include "tree-mod-log.h"
#include "disk-io.h"
+#include "fs.h"
+#include "accessors.h"
+#include "tree-checker.h"
struct tree_mod_root {
u64 logical;
@@ -23,24 +27,35 @@ struct tree_mod_elem {
/* This is used for BTRFS_MOD_LOG_KEY* and BTRFS_MOD_LOG_ROOT_REPLACE. */
u64 generation;
- /* Those are used for op == BTRFS_MOD_LOG_KEY_{REPLACE,REMOVE}. */
- struct btrfs_disk_key key;
- u64 blockptr;
-
- /* This is used for op == BTRFS_MOD_LOG_MOVE_KEYS. */
- struct {
- int dst_slot;
- int nr_items;
- } move;
-
- /* This is used for op == BTRFS_MOD_LOG_ROOT_REPLACE. */
- struct tree_mod_root old_root;
+ union {
+ /*
+ * This is used for the following op types:
+ *
+ * BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING
+ * BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING
+ * BTRFS_MOD_LOG_KEY_REMOVE
+ * BTRFS_MOD_LOG_KEY_REPLACE
+ */
+ struct {
+ struct btrfs_disk_key key;
+ u64 blockptr;
+ } slot_change;
+
+ /* This is used for op == BTRFS_MOD_LOG_MOVE_KEYS. */
+ struct {
+ int dst_slot;
+ int nr_items;
+ } move;
+
+ /* This is used for op == BTRFS_MOD_LOG_ROOT_REPLACE. */
+ struct tree_mod_root old_root;
+ };
};
/*
* Pull a new tree mod seq number for our operation.
*/
-static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
+static u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
{
return atomic64_inc_return(&fs_info->tree_mod_seq);
}
@@ -160,18 +175,41 @@ static noinline int tree_mod_log_insert(struct btrfs_fs_info *fs_info,
return 0;
}
+static inline bool skip_eb_logging(const struct extent_buffer *eb)
+{
+ const u64 owner = btrfs_header_owner(eb);
+
+ if (btrfs_header_level(eb) == 0)
+ return true;
+
+ /*
+ * Tree mod logging exists so that there's a consistent view of the
+ * extents and backrefs of inodes even if while a task is iterating over
+ * them other tasks are modifying subvolume trees and the extent tree
+ * (including running delayed refs). So we only need to log extent
+ * buffers from the extent tree and subvolume trees.
+ */
+
+ if (owner == BTRFS_EXTENT_TREE_OBJECTID)
+ return false;
+
+ if (btrfs_is_fstree(owner))
+ return false;
+
+ return true;
+}
+
/*
* Determines if logging can be omitted. Returns true if it can. Otherwise, it
* returns false with the tree_mod_log_lock acquired. The caller must hold
* this until all tree mod log insertions are recorded in the rb tree and then
* write unlock fs_info::tree_mod_log_lock.
*/
-static inline bool tree_mod_dont_log(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb)
+static bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, const struct extent_buffer *eb)
{
if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
return true;
- if (eb && btrfs_header_level(eb) == 0)
+ if (eb && skip_eb_logging(eb))
return true;
write_lock(&fs_info->tree_mod_log_lock);
@@ -184,33 +222,34 @@ static inline bool tree_mod_dont_log(struct btrfs_fs_info *fs_info,
}
/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
-static inline bool tree_mod_need_log(const struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb)
+static bool tree_mod_need_log(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *eb)
{
if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
return false;
- if (eb && btrfs_header_level(eb) == 0)
+ if (eb && skip_eb_logging(eb))
return false;
return true;
}
-static struct tree_mod_elem *alloc_tree_mod_elem(struct extent_buffer *eb,
+static struct tree_mod_elem *alloc_tree_mod_elem(const struct extent_buffer *eb,
int slot,
- enum btrfs_mod_log_op op,
- gfp_t flags)
+ enum btrfs_mod_log_op op)
{
struct tree_mod_elem *tm;
- tm = kzalloc(sizeof(*tm), flags);
+ /* Can't be one of these types, due to union in struct tree_mod_elem. */
+ ASSERT(op != BTRFS_MOD_LOG_MOVE_KEYS);
+ ASSERT(op != BTRFS_MOD_LOG_ROOT_REPLACE);
+
+ tm = kzalloc(sizeof(*tm), GFP_NOFS);
if (!tm)
return NULL;
tm->logical = eb->start;
- if (op != BTRFS_MOD_LOG_KEY_ADD) {
- btrfs_node_key(eb, &tm->key, slot);
- tm->blockptr = btrfs_node_blockptr(eb, slot);
- }
+ btrfs_node_key(eb, &tm->slot_change.key, slot);
+ tm->slot_change.blockptr = btrfs_node_blockptr(eb, slot);
tm->op = op;
tm->slot = slot;
tm->generation = btrfs_node_ptr_generation(eb, slot);
@@ -219,25 +258,36 @@ static struct tree_mod_elem *alloc_tree_mod_elem(struct extent_buffer *eb,
return tm;
}
-int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
- enum btrfs_mod_log_op op, gfp_t flags)
+int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot,
+ enum btrfs_mod_log_op op)
{
struct tree_mod_elem *tm;
- int ret;
+ int ret = 0;
if (!tree_mod_need_log(eb->fs_info, eb))
return 0;
- tm = alloc_tree_mod_elem(eb, slot, op, flags);
+ tm = alloc_tree_mod_elem(eb, slot, op);
if (!tm)
- return -ENOMEM;
+ ret = -ENOMEM;
if (tree_mod_dont_log(eb->fs_info, eb)) {
kfree(tm);
+ /*
+ * Don't error if we failed to allocate memory because we don't
+ * need to log.
+ */
return 0;
+ } else if (ret != 0) {
+ /*
+ * We previously failed to allocate memory and we need to log,
+ * so we have to fail.
+ */
+ goto out_unlock;
}
ret = tree_mod_log_insert(eb->fs_info, tm);
+out_unlock:
write_unlock(&eb->fs_info->tree_mod_log_lock);
if (ret)
kfree(tm);
@@ -245,7 +295,27 @@ int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
return ret;
}
-int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb,
+static struct tree_mod_elem *tree_mod_log_alloc_move(const struct extent_buffer *eb,
+ int dst_slot, int src_slot,
+ int nr_items)
+{
+ struct tree_mod_elem *tm;
+
+ tm = kzalloc(sizeof(*tm), GFP_NOFS);
+ if (!tm)
+ return ERR_PTR(-ENOMEM);
+
+ tm->logical = eb->start;
+ tm->slot = src_slot;
+ tm->move.dst_slot = dst_slot;
+ tm->move.nr_items = nr_items;
+ tm->op = BTRFS_MOD_LOG_MOVE_KEYS;
+ RB_CLEAR_NODE(&tm->node);
+
+ return tm;
+}
+
+int btrfs_tree_mod_log_insert_move(const struct extent_buffer *eb,
int dst_slot, int src_slot,
int nr_items)
{
@@ -259,35 +329,46 @@ int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb,
return 0;
tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
- if (!tm_list)
- return -ENOMEM;
-
- tm = kzalloc(sizeof(*tm), GFP_NOFS);
- if (!tm) {
+ if (!tm_list) {
ret = -ENOMEM;
- goto free_tms;
+ goto lock;
}
- tm->logical = eb->start;
- tm->slot = src_slot;
- tm->move.dst_slot = dst_slot;
- tm->move.nr_items = nr_items;
- tm->op = BTRFS_MOD_LOG_MOVE_KEYS;
+ tm = tree_mod_log_alloc_move(eb, dst_slot, src_slot, nr_items);
+ if (IS_ERR(tm)) {
+ ret = PTR_ERR(tm);
+ tm = NULL;
+ goto lock;
+ }
for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
- BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
+ BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING);
if (!tm_list[i]) {
ret = -ENOMEM;
- goto free_tms;
+ goto lock;
}
}
- if (tree_mod_dont_log(eb->fs_info, eb))
+lock:
+ if (tree_mod_dont_log(eb->fs_info, eb)) {
+ /*
+ * Don't error if we failed to allocate memory because we don't
+ * need to log.
+ */
+ ret = 0;
goto free_tms;
+ }
locked = true;
/*
+ * We previously failed to allocate memory and we need to log, so we
+ * have to fail.
+ */
+ if (ret != 0)
+ goto free_tms;
+
+ /*
* When we override something during the move, we log these removals.
* This can only happen when we move towards the beginning of the
* buffer, i.e. dst_slot < src_slot.
@@ -307,10 +388,12 @@ int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb,
return 0;
free_tms:
- for (i = 0; i < nr_items; i++) {
- if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
- rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
- kfree(tm_list[i]);
+ if (tm_list) {
+ for (i = 0; i < nr_items; i++) {
+ if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
+ rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
+ kfree(tm_list[i]);
+ }
}
if (locked)
write_unlock(&eb->fs_info->tree_mod_log_lock);
@@ -320,9 +403,9 @@ free_tms:
return ret;
}
-static inline int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
- struct tree_mod_elem **tm_list,
- int nritems)
+static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
+ struct tree_mod_elem **tm_list,
+ int nritems)
{
int i, j;
int ret;
@@ -360,14 +443,14 @@ int btrfs_tree_mod_log_insert_root(struct extent_buffer *old_root,
GFP_NOFS);
if (!tm_list) {
ret = -ENOMEM;
- goto free_tms;
+ goto lock;
}
for (i = 0; i < nritems; i++) {
tm_list[i] = alloc_tree_mod_elem(old_root, i,
- BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
+ BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING);
if (!tm_list[i]) {
ret = -ENOMEM;
- goto free_tms;
+ goto lock;
}
}
}
@@ -375,7 +458,7 @@ int btrfs_tree_mod_log_insert_root(struct extent_buffer *old_root,
tm = kzalloc(sizeof(*tm), GFP_NOFS);
if (!tm) {
ret = -ENOMEM;
- goto free_tms;
+ goto lock;
}
tm->logical = new_root->start;
@@ -384,14 +467,28 @@ int btrfs_tree_mod_log_insert_root(struct extent_buffer *old_root,
tm->generation = btrfs_header_generation(old_root);
tm->op = BTRFS_MOD_LOG_ROOT_REPLACE;
- if (tree_mod_dont_log(fs_info, NULL))
+lock:
+ if (tree_mod_dont_log(fs_info, NULL)) {
+ /*
+ * Don't error if we failed to allocate memory because we don't
+ * need to log.
+ */
+ ret = 0;
goto free_tms;
+ } else if (ret != 0) {
+ /*
+ * We previously failed to allocate memory and we need to log,
+ * so we have to fail.
+ */
+ goto out_unlock;
+ }
if (tm_list)
ret = tree_mod_log_free_eb(fs_info, tm_list, nritems);
if (!ret)
ret = tree_mod_log_insert(fs_info, tm);
+out_unlock:
write_unlock(&fs_info->tree_mod_log_lock);
if (ret)
goto free_tms;
@@ -475,7 +572,7 @@ static struct tree_mod_elem *tree_mod_log_search(struct btrfs_fs_info *fs_info,
}
int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst,
- struct extent_buffer *src,
+ const struct extent_buffer *src,
unsigned long dst_offset,
unsigned long src_offset,
int nr_items)
@@ -483,9 +580,14 @@ int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst,
struct btrfs_fs_info *fs_info = dst->fs_info;
int ret = 0;
struct tree_mod_elem **tm_list = NULL;
- struct tree_mod_elem **tm_list_add, **tm_list_rem;
+ struct tree_mod_elem **tm_list_add = NULL;
+ struct tree_mod_elem **tm_list_rem = NULL;
int i;
bool locked = false;
+ struct tree_mod_elem *dst_move_tm = NULL;
+ struct tree_mod_elem *src_move_tm = NULL;
+ u32 dst_move_nr_items = btrfs_header_nritems(dst) - dst_offset;
+ u32 src_move_nr_items = btrfs_header_nritems(src) - (src_offset + nr_items);
if (!tree_mod_need_log(fs_info, NULL))
return 0;
@@ -495,31 +597,72 @@ int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst,
tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
GFP_NOFS);
- if (!tm_list)
- return -ENOMEM;
+ if (!tm_list) {
+ ret = -ENOMEM;
+ goto lock;
+ }
+
+ if (dst_move_nr_items) {
+ dst_move_tm = tree_mod_log_alloc_move(dst, dst_offset + nr_items,
+ dst_offset, dst_move_nr_items);
+ if (IS_ERR(dst_move_tm)) {
+ ret = PTR_ERR(dst_move_tm);
+ dst_move_tm = NULL;
+ goto lock;
+ }
+ }
+ if (src_move_nr_items) {
+ src_move_tm = tree_mod_log_alloc_move(src, src_offset,
+ src_offset + nr_items,
+ src_move_nr_items);
+ if (IS_ERR(src_move_tm)) {
+ ret = PTR_ERR(src_move_tm);
+ src_move_tm = NULL;
+ goto lock;
+ }
+ }
tm_list_add = tm_list;
tm_list_rem = tm_list + nr_items;
for (i = 0; i < nr_items; i++) {
tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
- BTRFS_MOD_LOG_KEY_REMOVE, GFP_NOFS);
+ BTRFS_MOD_LOG_KEY_REMOVE);
if (!tm_list_rem[i]) {
ret = -ENOMEM;
- goto free_tms;
+ goto lock;
}
tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
- BTRFS_MOD_LOG_KEY_ADD, GFP_NOFS);
+ BTRFS_MOD_LOG_KEY_ADD);
if (!tm_list_add[i]) {
ret = -ENOMEM;
- goto free_tms;
+ goto lock;
}
}
- if (tree_mod_dont_log(fs_info, NULL))
+lock:
+ if (tree_mod_dont_log(fs_info, NULL)) {
+ /*
+ * Don't error if we failed to allocate memory because we don't
+ * need to log.
+ */
+ ret = 0;
goto free_tms;
+ }
locked = true;
+ /*
+ * We previously failed to allocate memory and we need to log, so we
+ * have to fail.
+ */
+ if (ret != 0)
+ goto free_tms;
+
+ if (dst_move_tm) {
+ ret = tree_mod_log_insert(fs_info, dst_move_tm);
+ if (ret)
+ goto free_tms;
+ }
for (i = 0; i < nr_items; i++) {
ret = tree_mod_log_insert(fs_info, tm_list_rem[i]);
if (ret)
@@ -528,6 +671,11 @@ int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst,
if (ret)
goto free_tms;
}
+ if (src_move_tm) {
+ ret = tree_mod_log_insert(fs_info, src_move_tm);
+ if (ret)
+ goto free_tms;
+ }
write_unlock(&fs_info->tree_mod_log_lock);
kfree(tm_list);
@@ -535,10 +683,18 @@ int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst,
return 0;
free_tms:
- for (i = 0; i < nr_items * 2; i++) {
- if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
- rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
- kfree(tm_list[i]);
+ if (dst_move_tm && !RB_EMPTY_NODE(&dst_move_tm->node))
+ rb_erase(&dst_move_tm->node, &fs_info->tree_mod_log);
+ kfree(dst_move_tm);
+ if (src_move_tm && !RB_EMPTY_NODE(&src_move_tm->node))
+ rb_erase(&src_move_tm->node, &fs_info->tree_mod_log);
+ kfree(src_move_tm);
+ if (tm_list) {
+ for (i = 0; i < nr_items * 2; i++) {
+ if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
+ rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
+ kfree(tm_list[i]);
+ }
}
if (locked)
write_unlock(&fs_info->tree_mod_log_lock);
@@ -559,22 +715,38 @@ int btrfs_tree_mod_log_free_eb(struct extent_buffer *eb)
nritems = btrfs_header_nritems(eb);
tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
- if (!tm_list)
- return -ENOMEM;
+ if (!tm_list) {
+ ret = -ENOMEM;
+ goto lock;
+ }
for (i = 0; i < nritems; i++) {
tm_list[i] = alloc_tree_mod_elem(eb, i,
- BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
+ BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING);
if (!tm_list[i]) {
ret = -ENOMEM;
- goto free_tms;
+ goto lock;
}
}
- if (tree_mod_dont_log(eb->fs_info, eb))
+lock:
+ if (tree_mod_dont_log(eb->fs_info, eb)) {
+ /*
+ * Don't error if we failed to allocate memory because we don't
+ * need to log.
+ */
+ ret = 0;
goto free_tms;
+ } else if (ret != 0) {
+ /*
+ * We previously failed to allocate memory and we need to log,
+ * so we have to fail.
+ */
+ goto out_unlock;
+ }
ret = tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
+out_unlock:
write_unlock(&eb->fs_info->tree_mod_log_lock);
if (ret)
goto free_tms;
@@ -583,9 +755,11 @@ int btrfs_tree_mod_log_free_eb(struct extent_buffer *eb)
return 0;
free_tms:
- for (i = 0; i < nritems; i++)
- kfree(tm_list[i]);
- kfree(tm_list);
+ if (tm_list) {
+ for (i = 0; i < nritems; i++)
+ kfree(tm_list[i]);
+ kfree(tm_list);
+ }
return ret;
}
@@ -661,10 +835,27 @@ static void tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
unsigned long o_dst;
unsigned long o_src;
unsigned long p_size = sizeof(struct btrfs_key_ptr);
+ /*
+ * max_slot tracks the maximum valid slot of the rewind eb at every
+ * step of the rewind. This is in contrast with 'n' which eventually
+ * matches the number of items, but can be wrong during moves or if
+ * removes overlap on already valid slots (which is probably separately
+ * a bug). We do this to validate the offsets of memmoves for rewinding
+ * moves and detect invalid memmoves.
+ *
+ * Since a rewind eb can start empty, max_slot is a signed integer with
+ * a special meaning for -1, which is that no slot is valid to move out
+ * of. Any other negative value is invalid.
+ */
+ int max_slot;
+ int move_src_end_slot;
+ int move_dst_end_slot;
n = btrfs_header_nritems(eb);
+ max_slot = n - 1;
read_lock(&fs_info->tree_mod_log_lock);
while (tm && tm->seq >= time_seq) {
+ ASSERT(max_slot >= -1);
/*
* All the operations are recorded with the operator used for
* the modification. As we're going backwards, we do the
@@ -676,28 +867,53 @@ static void tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
fallthrough;
case BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING:
case BTRFS_MOD_LOG_KEY_REMOVE:
- btrfs_set_node_key(eb, &tm->key, tm->slot);
- btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
+ btrfs_set_node_key(eb, &tm->slot_change.key, tm->slot);
+ btrfs_set_node_blockptr(eb, tm->slot, tm->slot_change.blockptr);
btrfs_set_node_ptr_generation(eb, tm->slot,
tm->generation);
n++;
+ if (tm->slot > max_slot)
+ max_slot = tm->slot;
break;
case BTRFS_MOD_LOG_KEY_REPLACE:
BUG_ON(tm->slot >= n);
- btrfs_set_node_key(eb, &tm->key, tm->slot);
- btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
+ btrfs_set_node_key(eb, &tm->slot_change.key, tm->slot);
+ btrfs_set_node_blockptr(eb, tm->slot, tm->slot_change.blockptr);
btrfs_set_node_ptr_generation(eb, tm->slot,
tm->generation);
break;
case BTRFS_MOD_LOG_KEY_ADD:
+ /*
+ * It is possible we could have already removed keys
+ * behind the known max slot, so this will be an
+ * overestimate. In practice, the copy operation
+ * inserts them in increasing order, and overestimating
+ * just means we miss some warnings, so it's OK. It
+ * isn't worth carefully tracking the full array of
+ * valid slots to check against when moving.
+ */
+ if (tm->slot == max_slot)
+ max_slot--;
/* if a move operation is needed it's in the log */
n--;
break;
case BTRFS_MOD_LOG_MOVE_KEYS:
- o_dst = btrfs_node_key_ptr_offset(tm->slot);
- o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
+ ASSERT(tm->move.nr_items > 0);
+ move_src_end_slot = tm->move.dst_slot + tm->move.nr_items - 1;
+ move_dst_end_slot = tm->slot + tm->move.nr_items - 1;
+ o_dst = btrfs_node_key_ptr_offset(eb, tm->slot);
+ o_src = btrfs_node_key_ptr_offset(eb, tm->move.dst_slot);
+ if (WARN_ON(move_src_end_slot > max_slot ||
+ tm->move.nr_items <= 0)) {
+ btrfs_warn(fs_info,
+"move from invalid tree mod log slot eb %llu slot %d dst_slot %d nr_items %d seq %llu n %u max_slot %d",
+ eb->start, tm->slot,
+ tm->move.dst_slot, tm->move.nr_items,
+ tm->seq, n, max_slot);
+ }
memmove_extent_buffer(eb, o_dst, o_src,
tm->move.nr_items * p_size);
+ max_slot = move_dst_end_slot;
break;
case BTRFS_MOD_LOG_ROOT_REPLACE:
/*
@@ -730,7 +946,6 @@ static void tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
* is freed (its refcount is decremented).
*/
struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
struct extent_buffer *eb,
u64 time_seq)
{
@@ -819,10 +1034,15 @@ struct extent_buffer *btrfs_get_old_root(struct btrfs_root *root, u64 time_seq)
tm = tree_mod_log_search(fs_info, logical, time_seq);
if (old_root && tm && tm->op != BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
+ struct btrfs_tree_parent_check check = { 0 };
+
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
- old = read_tree_block(fs_info, logical, root->root_key.objectid,
- 0, level, NULL);
+
+ check.level = level;
+ check.owner_root = btrfs_root_id(root);
+
+ old = read_tree_block(fs_info, logical, &check);
if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
if (!IS_ERR(old))
free_extent_buffer(old);
diff --git a/fs/btrfs/tree-mod-log.h b/fs/btrfs/tree-mod-log.h
index 12605d19621b..1c12566040db 100644
--- a/fs/btrfs/tree-mod-log.h
+++ b/fs/btrfs/tree-mod-log.h
@@ -1,9 +1,15 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BTRFS_TREE_MOD_LOG_H
#define BTRFS_TREE_MOD_LOG_H
-#include "ctree.h"
+#include <linux/list.h>
+
+struct extent_buffer;
+struct btrfs_fs_info;
+struct btrfs_path;
+struct btrfs_root;
+struct btrfs_seq_list;
/* Represents a tree mod log user. */
struct btrfs_seq_list {
@@ -31,21 +37,20 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
int btrfs_tree_mod_log_insert_root(struct extent_buffer *old_root,
struct extent_buffer *new_root,
bool log_removal);
-int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
- enum btrfs_mod_log_op op, gfp_t flags);
+int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot,
+ enum btrfs_mod_log_op op);
int btrfs_tree_mod_log_free_eb(struct extent_buffer *eb);
struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
struct extent_buffer *eb,
u64 time_seq);
struct extent_buffer *btrfs_get_old_root(struct btrfs_root *root, u64 time_seq);
int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst,
- struct extent_buffer *src,
+ const struct extent_buffer *src,
unsigned long dst_offset,
unsigned long src_offset,
int nr_items);
-int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb,
+int btrfs_tree_mod_log_insert_move(const struct extent_buffer *eb,
int dst_slot, int src_slot,
int nr_items);
u64 btrfs_tree_mod_log_lowest_seq(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 3374c9e9be67..7e16a253fb35 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -5,8 +5,8 @@
*/
#include <linux/slab.h>
+#include "messages.h"
#include "ulist.h"
-#include "ctree.h"
/*
* ulist is a generic data structure to hold a collection of unique u64
@@ -37,8 +37,9 @@
* loop would be similar to the above.
*/
-/**
- * ulist_init - freshly initialize a ulist
+/*
+ * Freshly initialize a ulist.
+ *
* @ulist: the ulist to initialize
*
* Note: don't use this function to init an already used ulist, use
@@ -49,10 +50,12 @@ void ulist_init(struct ulist *ulist)
INIT_LIST_HEAD(&ulist->nodes);
ulist->root = RB_ROOT;
ulist->nnodes = 0;
+ ulist->prealloc = NULL;
}
-/**
- * ulist_release - free up additionally allocated memory for the ulist
+/*
+ * Free up additionally allocated memory for the ulist.
+ *
* @ulist: the ulist from which to free the additional memory
*
* This is useful in cases where the base 'struct ulist' has been statically
@@ -66,12 +69,15 @@ void ulist_release(struct ulist *ulist)
list_for_each_entry_safe(node, next, &ulist->nodes, list) {
kfree(node);
}
+ kfree(ulist->prealloc);
+ ulist->prealloc = NULL;
ulist->root = RB_ROOT;
INIT_LIST_HEAD(&ulist->nodes);
}
-/**
- * ulist_reinit - prepare a ulist for reuse
+/*
+ * Prepare a ulist for reuse.
+ *
* @ulist: ulist to be reused
*
* Free up all additional memory allocated for the list elements and reinit
@@ -83,8 +89,9 @@ void ulist_reinit(struct ulist *ulist)
ulist_init(ulist);
}
-/**
- * ulist_alloc - dynamically allocate a ulist
+/*
+ * Dynamically allocate a ulist.
+ *
* @gfp_mask: allocation flags to for base allocation
*
* The allocated ulist will be returned in an initialized state.
@@ -101,8 +108,15 @@ struct ulist *ulist_alloc(gfp_t gfp_mask)
return ulist;
}
-/**
- * ulist_free - free dynamically allocated ulist
+void ulist_prealloc(struct ulist *ulist, gfp_t gfp_mask)
+{
+ if (!ulist->prealloc)
+ ulist->prealloc = kzalloc(sizeof(*ulist->prealloc), gfp_mask);
+}
+
+/*
+ * Free dynamically allocated ulist.
+ *
* @ulist: ulist to free
*
* It is not necessary to call ulist_release before.
@@ -115,21 +129,25 @@ void ulist_free(struct ulist *ulist)
kfree(ulist);
}
+static int ulist_node_val_key_cmp(const void *key, const struct rb_node *node)
+{
+ const u64 *val = key;
+ const struct ulist_node *unode = rb_entry(node, struct ulist_node, rb_node);
+
+ if (unode->val < *val)
+ return 1;
+ else if (unode->val > *val)
+ return -1;
+
+ return 0;
+}
+
static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
{
- struct rb_node *n = ulist->root.rb_node;
- struct ulist_node *u = NULL;
-
- while (n) {
- u = rb_entry(n, struct ulist_node, rb_node);
- if (u->val < val)
- n = n->rb_right;
- else if (u->val > val)
- n = n->rb_left;
- else
- return u;
- }
- return NULL;
+ struct rb_node *node;
+
+ node = rb_find(&val, &ulist->root, ulist_node_val_key_cmp);
+ return rb_entry_safe(node, struct ulist_node, rb_node);
}
static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
@@ -141,30 +159,26 @@ static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
ulist->nnodes--;
}
+static int ulist_node_val_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ const struct ulist_node *unode = rb_entry(new, struct ulist_node, rb_node);
+
+ return ulist_node_val_key_cmp(&unode->val, existing);
+}
+
static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
{
- struct rb_node **p = &ulist->root.rb_node;
- struct rb_node *parent = NULL;
- struct ulist_node *cur = NULL;
-
- while (*p) {
- parent = *p;
- cur = rb_entry(parent, struct ulist_node, rb_node);
-
- if (cur->val < ins->val)
- p = &(*p)->rb_right;
- else if (cur->val > ins->val)
- p = &(*p)->rb_left;
- else
- return -EEXIST;
- }
- rb_link_node(&ins->rb_node, parent, p);
- rb_insert_color(&ins->rb_node, &ulist->root);
+ struct rb_node *node;
+
+ node = rb_find_add(&ins->rb_node, &ulist->root, ulist_node_val_cmp);
+ if (node)
+ return -EEXIST;
return 0;
}
-/**
- * ulist_add - add an element to the ulist
+/*
+ * Add an element to the ulist.
+ *
* @ulist: ulist to add the element to
* @val: value to add to ulist
* @aux: auxiliary value to store along with val
@@ -200,9 +214,15 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
*old_aux = node->aux;
return 0;
}
- node = kmalloc(sizeof(*node), gfp_mask);
- if (!node)
- return -ENOMEM;
+
+ if (ulist->prealloc) {
+ node = ulist->prealloc;
+ ulist->prealloc = NULL;
+ } else {
+ node = kmalloc(sizeof(*node), gfp_mask);
+ if (!node)
+ return -ENOMEM;
+ }
node->val = val;
node->aux = aux;
@@ -216,7 +236,8 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
}
/*
- * ulist_del - delete one node from ulist
+ * Delete one node from ulist.
+ *
* @ulist: ulist to remove node from
* @val: value to delete
* @aux: aux to delete
@@ -242,8 +263,9 @@ int ulist_del(struct ulist *ulist, u64 val, u64 aux)
return 0;
}
-/**
- * ulist_next - iterate ulist
+/*
+ * Iterate ulist.
+ *
* @ulist: ulist to iterate
* @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator)
*
@@ -258,7 +280,7 @@ int ulist_del(struct ulist *ulist, u64 val, u64 aux)
* It is allowed to call ulist_add during an enumeration. Newly added items
* are guaranteed to show up in the running enumeration.
*/
-struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
+struct ulist_node *ulist_next(const struct ulist *ulist, struct ulist_iterator *uiter)
{
struct ulist_node *node;
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
index 02fda0a2d4ce..c62a372f1462 100644
--- a/fs/btrfs/ulist.h
+++ b/fs/btrfs/ulist.h
@@ -7,6 +7,7 @@
#ifndef BTRFS_ULIST_H
#define BTRFS_ULIST_H
+#include <linux/types.h>
#include <linux/list.h>
#include <linux/rbtree.h>
@@ -40,12 +41,14 @@ struct ulist {
struct list_head nodes;
struct rb_root root;
+ struct ulist_node *prealloc;
};
void ulist_init(struct ulist *ulist);
void ulist_release(struct ulist *ulist);
void ulist_reinit(struct ulist *ulist);
struct ulist *ulist_alloc(gfp_t gfp_mask);
+void ulist_prealloc(struct ulist *ulist, gfp_t mask);
void ulist_free(struct ulist *ulist);
int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
@@ -66,7 +69,7 @@ static inline int ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux,
#endif
}
-struct ulist_node *ulist_next(struct ulist *ulist,
+struct ulist_node *ulist_next(const struct ulist *ulist,
struct ulist_iterator *uiter);
#define ULIST_ITER_INIT(uiter) ((uiter)->cur_list = NULL)
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 74023c8a783f..e3a1310fa7d5 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -3,15 +3,19 @@
* Copyright (C) STRATO AG 2013. All rights reserved.
*/
+#include <linux/kthread.h>
#include <linux/uuid.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
+#include "messages.h"
#include "ctree.h"
#include "transaction.h"
#include "disk-io.h"
-#include "print-tree.h"
+#include "fs.h"
+#include "accessors.h"
+#include "uuid-tree.h"
+#include "ioctl.h"
-
-static void btrfs_uuid_to_key(u8 *uuid, u8 type, struct btrfs_key *key)
+static void btrfs_uuid_to_key(const u8 *uuid, u8 type, struct btrfs_key *key)
{
key->type = type;
key->objectid = get_unaligned_le64(uuid);
@@ -19,40 +23,34 @@ static void btrfs_uuid_to_key(u8 *uuid, u8 type, struct btrfs_key *key)
}
/* return -ENOENT for !found, < 0 for errors, or 0 if an item was found */
-static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, u8 *uuid,
+static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, const u8 *uuid,
u8 type, u64 subid)
{
int ret;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *eb;
int slot;
u32 item_size;
unsigned long offset;
struct btrfs_key key;
- if (WARN_ON_ONCE(!uuid_root)) {
- ret = -ENOENT;
- goto out;
- }
+ if (WARN_ON_ONCE(!uuid_root))
+ return -ENOENT;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
btrfs_uuid_to_key(uuid, type, &key);
ret = btrfs_search_slot(NULL, uuid_root, &key, path, 0, 0);
- if (ret < 0) {
- goto out;
- } else if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return -ENOENT;
eb = path->nodes[0];
slot = path->slots[0];
- item_size = btrfs_item_size_nr(eb, slot);
+ item_size = btrfs_item_size(eb, slot);
offset = btrfs_item_ptr_offset(eb, slot);
ret = -ENOENT;
@@ -60,7 +58,7 @@ static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, u8 *uuid,
btrfs_warn(uuid_root->fs_info,
"uuid item with illegal size %lu!",
(unsigned long)item_size);
- goto out;
+ return ret;
}
while (item_size) {
__le64 data;
@@ -74,18 +72,16 @@ static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, u8 *uuid,
item_size -= sizeof(data);
}
-out:
- btrfs_free_path(path);
return ret;
}
-int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
u64 subid_cpu)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *uuid_root = fs_info->uuid_root;
int ret;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *eb;
int slot;
@@ -96,22 +92,18 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
if (ret != -ENOENT)
return ret;
- if (WARN_ON_ONCE(!uuid_root)) {
- ret = -EINVAL;
- goto out;
- }
+ if (WARN_ON_ONCE(!uuid_root))
+ return -EINVAL;
btrfs_uuid_to_key(uuid, type, &key);
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
ret = btrfs_insert_empty_item(trans, uuid_root, path, &key,
sizeof(subid_le));
- if (ret >= 0) {
+ if (ret == 0) {
/* Add an item for the type for the first time */
eb = path->nodes[0];
slot = path->slots[0];
@@ -121,35 +113,30 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
* An item with that type already exists.
* Extend the item and store the new subid at the end.
*/
- btrfs_extend_item(path, sizeof(subid_le));
+ btrfs_extend_item(trans, path, sizeof(subid_le));
eb = path->nodes[0];
slot = path->slots[0];
offset = btrfs_item_ptr_offset(eb, slot);
- offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le);
+ offset += btrfs_item_size(eb, slot) - sizeof(subid_le);
} else {
btrfs_warn(fs_info,
"insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!",
ret, key.objectid, key.offset, type);
- goto out;
+ return ret;
}
- ret = 0;
subid_le = cpu_to_le64(subid_cpu);
write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
- btrfs_mark_buffer_dirty(eb);
-
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
-int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
u64 subid)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *uuid_root = fs_info->uuid_root;
int ret;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *eb;
int slot;
@@ -159,39 +146,32 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
unsigned long move_src;
unsigned long move_len;
- if (WARN_ON_ONCE(!uuid_root)) {
- ret = -EINVAL;
- goto out;
- }
+ if (WARN_ON_ONCE(!uuid_root))
+ return -EINVAL;
btrfs_uuid_to_key(uuid, type, &key);
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
if (ret < 0) {
btrfs_warn(fs_info, "error %d while searching for uuid item!",
ret);
- goto out;
- }
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
+ return ret;
}
+ if (ret > 0)
+ return -ENOENT;
eb = path->nodes[0];
slot = path->slots[0];
offset = btrfs_item_ptr_offset(eb, slot);
- item_size = btrfs_item_size_nr(eb, slot);
+ item_size = btrfs_item_size(eb, slot);
if (!IS_ALIGNED(item_size, sizeof(u64))) {
btrfs_warn(fs_info, "uuid item with illegal size %lu!",
(unsigned long)item_size);
- ret = -ENOENT;
- goto out;
+ return -ENOENT;
}
while (item_size) {
__le64 read_subid;
@@ -203,26 +183,20 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
item_size -= sizeof(read_subid);
}
- if (!item_size) {
- ret = -ENOENT;
- goto out;
- }
+ if (!item_size)
+ return -ENOENT;
- item_size = btrfs_item_size_nr(eb, slot);
- if (item_size == sizeof(subid)) {
- ret = btrfs_del_item(trans, uuid_root, path);
- goto out;
- }
+ item_size = btrfs_item_size(eb, slot);
+ if (item_size == sizeof(subid))
+ return btrfs_del_item(trans, uuid_root, path);
move_dst = offset;
move_src = offset + sizeof(subid);
move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
memmove_extent_buffer(eb, move_dst, move_src, move_len);
- btrfs_truncate_item(path, item_size - sizeof(subid), 1);
+ btrfs_truncate_item(trans, path, item_size - sizeof(subid), 1);
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
static int btrfs_uuid_iter_rem(struct btrfs_root *uuid_root, u8 *uuid, u8 type,
@@ -254,7 +228,7 @@ out:
* < 0 if an error occurred
*/
static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
- u8 *uuid, u8 type, u64 subvolid)
+ const u8 *uuid, u8 type, u64 subvolid)
{
int ret = 0;
struct btrfs_root *subvol_root;
@@ -291,7 +265,7 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root = fs_info->uuid_root;
struct btrfs_key key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
int ret = 0;
struct extent_buffer *leaf;
int slot;
@@ -299,10 +273,8 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info)
unsigned long offset;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!path)
+ return -ENOMEM;
key.objectid = 0;
key.type = 0;
@@ -310,17 +282,15 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info)
again_search_slot:
ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION);
- if (ret) {
- if (ret > 0)
- ret = 0;
- goto out;
- }
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return 0;
while (1) {
- if (btrfs_fs_closing(fs_info)) {
- ret = -EINTR;
- goto out;
- }
+ if (btrfs_fs_closing(fs_info))
+ return -EINTR;
+
cond_resched();
leaf = path->nodes[0];
slot = path->slots[0];
@@ -331,7 +301,7 @@ again_search_slot:
goto skip;
offset = btrfs_item_ptr_offset(leaf, slot);
- item_size = btrfs_item_size_nr(leaf, slot);
+ item_size = btrfs_item_size(leaf, slot);
if (!IS_ALIGNED(item_size, sizeof(u64))) {
btrfs_warn(fs_info,
"uuid item with illegal size %lu!",
@@ -351,7 +321,7 @@ again_search_slot:
ret = btrfs_check_uuid_tree_entry(fs_info, uuid,
key.type, subid_cpu);
if (ret < 0)
- goto out;
+ return ret;
if (ret > 0) {
btrfs_release_path(path);
ret = btrfs_uuid_iter_rem(root, uuid, key.type,
@@ -367,7 +337,7 @@ again_search_slot:
goto again_search_slot;
}
if (ret < 0 && ret != -ENOENT)
- goto out;
+ return ret;
key.offset++;
goto again_search_slot;
}
@@ -384,7 +354,182 @@ skip:
break;
}
+ return ret;
+}
+
+int btrfs_uuid_scan_kthread(void *data)
+{
+ struct btrfs_fs_info *fs_info = data;
+ struct btrfs_root *root = fs_info->tree_root;
+ struct btrfs_key key;
+ struct btrfs_path *path = NULL;
+ int ret = 0;
+ struct extent_buffer *eb;
+ int slot;
+ struct btrfs_root_item root_item;
+ u32 item_size;
+ struct btrfs_trans_handle *trans = NULL;
+ bool closing = false;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ key.objectid = 0;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = 0;
+
+ while (1) {
+ if (btrfs_fs_closing(fs_info)) {
+ closing = true;
+ break;
+ }
+ ret = btrfs_search_forward(root, &key, path,
+ BTRFS_OLDEST_GENERATION);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ break;
+ }
+
+ if (key.type != BTRFS_ROOT_ITEM_KEY ||
+ (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
+ key.objectid != BTRFS_FS_TREE_OBJECTID) ||
+ key.objectid > BTRFS_LAST_FREE_OBJECTID)
+ goto skip;
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ item_size = btrfs_item_size(eb, slot);
+ if (item_size < sizeof(root_item))
+ goto skip;
+
+ read_extent_buffer(eb, &root_item,
+ btrfs_item_ptr_offset(eb, slot),
+ (int)sizeof(root_item));
+ if (btrfs_root_refs(&root_item) == 0)
+ goto skip;
+
+ if (!btrfs_is_empty_uuid(root_item.uuid) ||
+ !btrfs_is_empty_uuid(root_item.received_uuid)) {
+ if (trans)
+ goto update_tree;
+
+ btrfs_release_path(path);
+ /*
+ * 1 - subvol uuid item
+ * 1 - received_subvol uuid item
+ */
+ trans = btrfs_start_transaction(fs_info->uuid_root, 2);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ break;
+ }
+ continue;
+ } else {
+ goto skip;
+ }
+update_tree:
+ btrfs_release_path(path);
+ if (!btrfs_is_empty_uuid(root_item.uuid)) {
+ ret = btrfs_uuid_tree_add(trans, root_item.uuid,
+ BTRFS_UUID_KEY_SUBVOL,
+ key.objectid);
+ if (ret < 0) {
+ btrfs_warn(fs_info, "uuid_tree_add failed %d",
+ ret);
+ break;
+ }
+ }
+
+ if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
+ ret = btrfs_uuid_tree_add(trans,
+ root_item.received_uuid,
+ BTRFS_UUID_KEY_RECEIVED_SUBVOL,
+ key.objectid);
+ if (ret < 0) {
+ btrfs_warn(fs_info, "uuid_tree_add failed %d",
+ ret);
+ break;
+ }
+ }
+
+skip:
+ btrfs_release_path(path);
+ if (trans) {
+ ret = btrfs_end_transaction(trans);
+ trans = NULL;
+ if (ret)
+ break;
+ }
+
+ if (key.offset < (u64)-1) {
+ key.offset++;
+ } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
+ key.offset = 0;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ } else if (key.objectid < (u64)-1) {
+ key.offset = 0;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.objectid++;
+ } else {
+ break;
+ }
+ cond_resched();
+ }
+
out:
btrfs_free_path(path);
- return ret;
+ if (trans && !IS_ERR(trans))
+ btrfs_end_transaction(trans);
+ if (ret)
+ btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
+ else if (!closing)
+ set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
+ up(&fs_info->uuid_tree_rescan_sem);
+ return 0;
+}
+
+int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *tree_root = fs_info->tree_root;
+ struct btrfs_root *uuid_root;
+ struct task_struct *task;
+ int ret;
+
+ /*
+ * 1 - root node
+ * 1 - root item
+ */
+ trans = btrfs_start_transaction(tree_root, 2);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
+ if (IS_ERR(uuid_root)) {
+ ret = PTR_ERR(uuid_root);
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ return ret;
+ }
+
+ fs_info->uuid_root = uuid_root;
+
+ ret = btrfs_commit_transaction(trans);
+ if (ret)
+ return ret;
+
+ down(&fs_info->uuid_tree_rescan_sem);
+ task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
+ if (IS_ERR(task)) {
+ /* fs_info->update_uuid_tree_gen remains 0 in all error case */
+ btrfs_warn(fs_info, "failed to start uuid_scan task");
+ up(&fs_info->uuid_tree_rescan_sem);
+ return PTR_ERR(task);
+ }
+
+ return 0;
}
diff --git a/fs/btrfs/uuid-tree.h b/fs/btrfs/uuid-tree.h
new file mode 100644
index 000000000000..c60ad20325cc
--- /dev/null
+++ b/fs/btrfs/uuid-tree.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_UUID_TREE_H
+#define BTRFS_UUID_TREE_H
+
+#include <linux/types.h>
+
+struct btrfs_trans_handle;
+struct btrfs_fs_info;
+
+int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
+ u64 subid);
+int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
+ u64 subid);
+int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info);
+int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
+int btrfs_uuid_scan_kthread(void *data);
+
+#endif
diff --git a/fs/btrfs/verity.c b/fs/btrfs/verity.c
index 4968535dfff0..a2ac3fb68bc8 100644
--- a/fs/btrfs/verity.c
+++ b/fs/btrfs/verity.c
@@ -10,11 +10,16 @@
#include <linux/iversion.h>
#include <linux/fsverity.h>
#include <linux/sched/mm.h>
+#include "messages.h"
#include "ctree.h"
#include "btrfs_inode.h"
#include "transaction.h"
-#include "disk-io.h"
#include "locking.h"
+#include "fs.h"
+#include "accessors.h"
+#include "ioctl.h"
+#include "verity.h"
+#include "orphan.h"
/*
* Implementation of the interface defined in struct fsverity_operations.
@@ -104,7 +109,7 @@ static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = inode->root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
int count = 0;
int ret;
@@ -116,10 +121,8 @@ static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
while (1) {
/* 1 for the item being dropped */
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- goto out;
- }
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
/*
* Walk backwards through all the items until we find one that
@@ -138,7 +141,7 @@ static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
path->slots[0]--;
} else if (ret < 0) {
btrfs_end_transaction(trans);
- goto out;
+ return ret;
}
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -156,17 +159,14 @@ static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
ret = btrfs_del_items(trans, root, path, path->slots[0], 1);
if (ret) {
btrfs_end_transaction(trans);
- goto out;
+ return ret;
}
count++;
btrfs_release_path(path);
btrfs_end_transaction(trans);
}
- ret = count;
btrfs_end_transaction(trans);
-out:
- btrfs_free_path(path);
- return ret;
+ return count;
}
/*
@@ -212,7 +212,7 @@ static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
const char *src, u64 len)
{
struct btrfs_trans_handle *trans;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -228,10 +228,8 @@ static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
while (len > 0) {
/* 1 for the new item being inserted */
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- break;
- }
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
key.objectid = btrfs_ino(inode);
key.type = key_type;
@@ -262,7 +260,6 @@ static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
btrfs_end_transaction(trans);
}
- btrfs_free_path(path);
return ret;
}
@@ -279,7 +276,7 @@ static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
* page and ignore dest, but it must still be non-NULL to avoid the
* counting-only behavior.
* @len: length in bytes to read
- * @dest_page: copy into this page instead of the dest buffer
+ * @dest_folio: copy into this folio instead of the dest buffer
*
* Helper function to read items from the btree. This returns the number of
* bytes read or < 0 for errors. We can return short reads if the items don't
@@ -289,9 +286,9 @@ static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
* Returns number of bytes read or a negative error code on failure.
*/
static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
- char *dest, u64 len, struct page *dest_page)
+ char *dest, u64 len, struct folio *dest_folio)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -309,7 +306,7 @@ static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
if (!path)
return -ENOMEM;
- if (dest_page)
+ if (dest_folio)
path->reada = READA_FORWARD;
key.objectid = btrfs_ino(inode);
@@ -333,7 +330,7 @@ static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
if (key.objectid != btrfs_ino(inode) || key.type != key_type)
break;
- item_end = btrfs_item_size_nr(leaf, path->slots[0]) + key.offset;
+ item_end = btrfs_item_size(leaf, path->slots[0]) + key.offset;
if (copied > 0) {
/*
@@ -366,15 +363,15 @@ static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
copy_offset = offset - key.offset;
if (dest) {
- if (dest_page)
- kaddr = kmap_local_page(dest_page);
+ if (dest_folio)
+ kaddr = kmap_local_folio(dest_folio, 0);
data = btrfs_item_ptr(leaf, path->slots[0], void);
read_extent_buffer(leaf, kaddr + dest_offset,
(unsigned long)data + copy_offset,
copy_bytes);
- if (dest_page)
+ if (dest_folio)
kunmap_local(kaddr);
}
@@ -399,7 +396,6 @@ static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
}
}
out:
- btrfs_free_path(path);
if (!ret)
ret = copied;
return ret;
@@ -455,7 +451,7 @@ static int rollback_verity(struct btrfs_inode *inode)
struct btrfs_root *root = inode->root;
int ret;
- ASSERT(inode_is_locked(&inode->vfs_inode));
+ btrfs_assert_inode_locked(inode);
truncate_inode_pages(inode->vfs_inode.i_mapping, inode->vfs_inode.i_size);
clear_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
ret = btrfs_drop_verity_items(inode);
@@ -480,14 +476,14 @@ static int rollback_verity(struct btrfs_inode *inode)
goto out;
}
inode->ro_flags &= ~BTRFS_INODE_RO_VERITY;
- btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
- ret = btrfs_update_inode(trans, root, inode);
- if (ret) {
+ btrfs_sync_inode_flags_to_i_flags(inode);
+ ret = btrfs_update_inode(trans, inode);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = del_orphan(trans, inode);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -547,8 +543,8 @@ static int finish_verity(struct btrfs_inode *inode, const void *desc,
goto out;
}
inode->ro_flags |= BTRFS_INODE_RO_VERITY;
- btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
- ret = btrfs_update_inode(trans, root, inode);
+ btrfs_sync_inode_flags_to_i_flags(inode);
+ ret = btrfs_update_inode(trans, inode);
if (ret)
goto end_trans;
ret = del_orphan(trans, inode);
@@ -580,7 +576,10 @@ static int btrfs_begin_enable_verity(struct file *filp)
struct btrfs_trans_handle *trans;
int ret;
- ASSERT(inode_is_locked(file_inode(filp)));
+ btrfs_assert_inode_locked(inode);
+
+ if (IS_ENCRYPTED(&inode->vfs_inode))
+ return -EOPNOTSUPP;
if (test_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags))
return -EBUSY;
@@ -628,7 +627,7 @@ static int btrfs_end_enable_verity(struct file *filp, const void *desc,
int ret = 0;
int rollback_ret;
- ASSERT(inode_is_locked(file_inode(filp)));
+ btrfs_assert_inode_locked(inode);
if (desc == NULL)
goto rollback;
@@ -659,8 +658,7 @@ rollback:
*
* Returns the size on success or a negative error code on failure.
*/
-static int btrfs_get_verity_descriptor(struct inode *inode, void *buf,
- size_t buf_size)
+int btrfs_get_verity_descriptor(struct inode *inode, void *buf, size_t buf_size)
{
u64 true_size;
int ret = 0;
@@ -672,11 +670,11 @@ static int btrfs_get_verity_descriptor(struct inode *inode, void *buf,
if (ret < 0)
return ret;
- if (item.reserved[0] != 0 || item.reserved[1] != 0)
+ if (unlikely(item.reserved[0] != 0 || item.reserved[1] != 0))
return -EUCLEAN;
true_size = btrfs_stack_verity_descriptor_size(&item);
- if (true_size > INT_MAX)
+ if (unlikely(true_size > INT_MAX))
return -EUCLEAN;
if (buf_size == 0)
@@ -710,7 +708,7 @@ static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
{
- struct page *page;
+ struct folio *folio;
u64 off = (u64)index << PAGE_SHIFT;
loff_t merkle_pos = merkle_file_pos(inode);
int ret;
@@ -721,29 +719,36 @@ static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
return ERR_PTR(-EFBIG);
index += merkle_pos >> PAGE_SHIFT;
again:
- page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
- if (page) {
- if (PageUptodate(page))
- return page;
+ folio = __filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
+ if (!IS_ERR(folio)) {
+ if (folio_test_uptodate(folio))
+ goto out;
- lock_page(page);
- /*
- * We only insert uptodate pages, so !Uptodate has to be
- * an error
- */
- if (!PageUptodate(page)) {
- unlock_page(page);
- put_page(page);
+ folio_lock(folio);
+ /* If it's not uptodate after we have the lock, we got a read error. */
+ if (!folio_test_uptodate(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
return ERR_PTR(-EIO);
}
- unlock_page(page);
- return page;
+ folio_unlock(folio);
+ goto out;
}
- page = __page_cache_alloc(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
- if (!page)
+ folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS),
+ 0, NULL);
+ if (!folio)
return ERR_PTR(-ENOMEM);
+ ret = filemap_add_folio(inode->i_mapping, folio, index, GFP_NOFS);
+ if (ret) {
+ folio_put(folio);
+ /* Did someone else insert a folio here? */
+ if (ret == -EEXIST)
+ goto again;
+ return ERR_PTR(ret);
+ }
+
/*
* Merkle item keys are indexed from byte 0 in the merkle tree.
* They have the form:
@@ -751,60 +756,48 @@ again:
* [ inode objectid, BTRFS_MERKLE_ITEM_KEY, offset in bytes ]
*/
ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY, off,
- page_address(page), PAGE_SIZE, page);
+ folio_address(folio), PAGE_SIZE, folio);
if (ret < 0) {
- put_page(page);
+ folio_put(folio);
return ERR_PTR(ret);
}
if (ret < PAGE_SIZE)
- memzero_page(page, ret, PAGE_SIZE - ret);
+ folio_zero_segment(folio, ret, PAGE_SIZE);
- SetPageUptodate(page);
- ret = add_to_page_cache_lru(page, inode->i_mapping, index, GFP_NOFS);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
- if (!ret) {
- /* Inserted and ready for fsverity */
- unlock_page(page);
- } else {
- put_page(page);
- /* Did someone race us into inserting this page? */
- if (ret == -EEXIST)
- goto again;
- page = ERR_PTR(ret);
- }
- return page;
+out:
+ return folio_file_page(folio, index);
}
/*
* fsverity op that writes a Merkle tree block into the btree.
*
- * @inode: inode to write a Merkle tree block for
- * @buf: Merkle tree data block to write
- * @index: index of the block in the Merkle tree
- * @log_blocksize: log base 2 of the Merkle tree block size
- *
- * Note that the block size could be different from the page size, so it is not
- * safe to assume that index is a page index.
+ * @inode: inode to write a Merkle tree block for
+ * @buf: Merkle tree block to write
+ * @pos: the position of the block in the Merkle tree (in bytes)
+ * @size: the Merkle tree block size (in bytes)
*
* Returns 0 on success or negative error code on failure
*/
static int btrfs_write_merkle_tree_block(struct inode *inode, const void *buf,
- u64 index, int log_blocksize)
+ u64 pos, unsigned int size)
{
- u64 off = index << log_blocksize;
- u64 len = 1ULL << log_blocksize;
loff_t merkle_pos = merkle_file_pos(inode);
if (merkle_pos < 0)
return merkle_pos;
- if (merkle_pos > inode->i_sb->s_maxbytes - off - len)
+ if (merkle_pos > inode->i_sb->s_maxbytes - pos - size)
return -EFBIG;
return write_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY,
- off, buf, len);
+ pos, buf, size);
}
const struct fsverity_operations btrfs_verityops = {
+ .inode_info_offs = (int)offsetof(struct btrfs_inode, i_verity_info) -
+ (int)offsetof(struct btrfs_inode, vfs_inode),
.begin_enable_verity = btrfs_begin_enable_verity,
.end_enable_verity = btrfs_end_enable_verity,
.get_verity_descriptor = btrfs_get_verity_descriptor,
diff --git a/fs/btrfs/verity.h b/fs/btrfs/verity.h
new file mode 100644
index 000000000000..d696659e43e4
--- /dev/null
+++ b/fs/btrfs/verity.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_VERITY_H
+#define BTRFS_VERITY_H
+
+struct inode;
+struct btrfs_inode;
+
+#ifdef CONFIG_FS_VERITY
+
+#include <linux/fsverity.h>
+
+extern const struct fsverity_operations btrfs_verityops;
+
+int btrfs_drop_verity_items(struct btrfs_inode *inode);
+int btrfs_get_verity_descriptor(struct inode *inode, void *buf, size_t buf_size);
+
+#else
+
+#include <linux/errno.h>
+
+static inline int btrfs_drop_verity_items(struct btrfs_inode *inode)
+{
+ return 0;
+}
+
+static inline int btrfs_get_verity_descriptor(struct inode *inode, void *buf,
+ size_t buf_size)
+{
+ return -EPERM;
+}
+
+#endif
+
+#endif
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 61ac57bcbf1a..ae1742a35e76 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5,27 +5,19 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
-#include <linux/bio.h>
#include <linux/slab.h>
-#include <linux/blkdev.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
-#include <linux/raid/pq.h>
#include <linux/semaphore.h>
#include <linux/uuid.h>
#include <linux/list_sort.h>
#include <linux/namei.h>
#include "misc.h"
-#include "ctree.h"
-#include "extent_map.h"
#include "disk-io.h"
+#include "extent-tree.h"
#include "transaction.h"
-#include "print-tree.h"
#include "volumes.h"
#include "raid56.h"
-#include "async-thread.h"
-#include "check-integrity.h"
-#include "rcu-string.h"
#include "dev-replace.h"
#include "sysfs.h"
#include "tree-checker.h"
@@ -33,6 +25,30 @@
#include "block-group.h"
#include "discard.h"
#include "zoned.h"
+#include "fs.h"
+#include "accessors.h"
+#include "uuid-tree.h"
+#include "ioctl.h"
+#include "relocation.h"
+#include "scrub.h"
+#include "super.h"
+#include "raid-stripe-tree.h"
+
+#define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
+ BTRFS_BLOCK_GROUP_RAID10 | \
+ BTRFS_BLOCK_GROUP_RAID56_MASK)
+
+struct btrfs_io_geometry {
+ u32 stripe_index;
+ u32 stripe_nr;
+ int mirror_num;
+ int num_stripes;
+ u64 stripe_offset;
+ u64 raid56_full_stripe_start;
+ int max_errors;
+ enum btrfs_map_op op;
+ bool use_rst;
+};
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = {
@@ -160,24 +176,12 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
*/
enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
{
- if (flags & BTRFS_BLOCK_GROUP_RAID10)
- return BTRFS_RAID_RAID10;
- else if (flags & BTRFS_BLOCK_GROUP_RAID1)
- return BTRFS_RAID_RAID1;
- else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
- return BTRFS_RAID_RAID1C3;
- else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
- return BTRFS_RAID_RAID1C4;
- else if (flags & BTRFS_BLOCK_GROUP_DUP)
- return BTRFS_RAID_DUP;
- else if (flags & BTRFS_BLOCK_GROUP_RAID0)
- return BTRFS_RAID_RAID0;
- else if (flags & BTRFS_BLOCK_GROUP_RAID5)
- return BTRFS_RAID_RAID5;
- else if (flags & BTRFS_BLOCK_GROUP_RAID6)
- return BTRFS_RAID_RAID6;
-
- return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
+ const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+
+ if (!profile)
+ return BTRFS_RAID_SINGLE;
+
+ return BTRFS_BG_FLAG_TO_INDEX(profile);
}
const char *btrfs_bg_type_to_raid_name(u64 flags)
@@ -190,6 +194,13 @@ const char *btrfs_bg_type_to_raid_name(u64 flags)
return btrfs_raid_array[index].raid_name;
}
+int btrfs_nr_parity_stripes(u64 type)
+{
+ enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type);
+
+ return btrfs_raid_array[index].nparity;
+}
+
/*
* Fill @buf with textual description of @bg_flags, no more than @size_buf
* bytes including terminating null byte.
@@ -202,10 +213,8 @@ void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
u64 flags = bg_flags;
u32 size_bp = size_buf;
- if (!flags) {
- strcpy(bp, "NONE");
+ if (!flags)
return;
- }
#define DESCRIBE_FLAG(flag, desc) \
do { \
@@ -246,13 +255,7 @@ out_overflow:;
static int init_first_rw_device(struct btrfs_trans_handle *trans);
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
-static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
-static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
- enum btrfs_map_op op,
- u64 logical, u64 *length,
- struct btrfs_io_context **bioc_ret,
- int mirror_num, int need_raid_map);
/*
* Device locking
@@ -362,16 +365,16 @@ struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
}
/*
- * alloc_fs_devices - allocate struct btrfs_fs_devices
- * @fsid: if not NULL, copy the UUID to fs_devices::fsid
- * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
+ * Allocate new btrfs_fs_devices structure identified by a fsid.
+ *
+ * @fsid: if not NULL, copy the UUID to fs_devices::fsid and to
+ * fs_devices::metadata_fsid
*
* Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
* The returned struct is not linked onto any lists and can be destroyed with
* kfree() right away.
*/
-static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
- const u8 *metadata_fsid)
+static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
{
struct btrfs_fs_devices *fs_devs;
@@ -385,23 +388,24 @@ static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
INIT_LIST_HEAD(&fs_devs->alloc_list);
INIT_LIST_HEAD(&fs_devs->fs_list);
INIT_LIST_HEAD(&fs_devs->seed_list);
- if (fsid)
- memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
- if (metadata_fsid)
- memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
- else if (fsid)
+ if (fsid) {
+ memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
+ }
return fs_devs;
}
-void btrfs_free_device(struct btrfs_device *device)
+static void btrfs_free_device(struct btrfs_device *device)
{
WARN_ON(!list_empty(&device->post_commit_list));
- rcu_string_free(device->name);
- extent_io_tree_release(&device->alloc_state);
- bio_put(device->flush_bio);
+ /*
+ * No need to call kfree_rcu() nor do RCU lock/unlock, nothing is
+ * reading the device name.
+ */
+ kfree(rcu_dereference_raw(device->name));
+ btrfs_extent_io_tree_release(&device->alloc_state);
btrfs_destroy_dev_zone_info(device);
kfree(device);
}
@@ -409,10 +413,12 @@ void btrfs_free_device(struct btrfs_device *device)
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device;
+
WARN_ON(fs_devices->opened);
+ WARN_ON(fs_devices->holding);
while (!list_empty(&fs_devices->devices)) {
- device = list_entry(fs_devices->devices.next,
- struct btrfs_device, dev_list);
+ device = list_first_entry(&fs_devices->devices,
+ struct btrfs_device, dev_list);
list_del(&device->dev_list);
btrfs_free_device(device);
}
@@ -424,146 +430,110 @@ void __exit btrfs_cleanup_fs_uuids(void)
struct btrfs_fs_devices *fs_devices;
while (!list_empty(&fs_uuids)) {
- fs_devices = list_entry(fs_uuids.next,
- struct btrfs_fs_devices, fs_list);
+ fs_devices = list_first_entry(&fs_uuids, struct btrfs_fs_devices,
+ fs_list);
list_del(&fs_devices->fs_list);
free_fs_devices(fs_devices);
}
}
-static noinline struct btrfs_fs_devices *find_fsid(
- const u8 *fsid, const u8 *metadata_fsid)
+static bool match_fsid_fs_devices(const struct btrfs_fs_devices *fs_devices,
+ const u8 *fsid, const u8 *metadata_fsid)
{
- struct btrfs_fs_devices *fs_devices;
+ if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0)
+ return false;
- ASSERT(fsid);
+ if (!metadata_fsid)
+ return true;
- /* Handle non-split brain cases */
- list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
- if (metadata_fsid) {
- if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
- && memcmp(metadata_fsid, fs_devices->metadata_uuid,
- BTRFS_FSID_SIZE) == 0)
- return fs_devices;
- } else {
- if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
- return fs_devices;
- }
- }
- return NULL;
+ if (memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) != 0)
+ return false;
+
+ return true;
}
-static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
- struct btrfs_super_block *disk_super)
+static noinline struct btrfs_fs_devices *find_fsid(
+ const u8 *fsid, const u8 *metadata_fsid)
{
-
struct btrfs_fs_devices *fs_devices;
- /*
- * Handle scanned device having completed its fsid change but
- * belonging to a fs_devices that was created by first scanning
- * a device which didn't have its fsid/metadata_uuid changed
- * at all and the CHANGING_FSID_V2 flag set.
- */
- list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
- if (fs_devices->fsid_change &&
- memcmp(disk_super->metadata_uuid, fs_devices->fsid,
- BTRFS_FSID_SIZE) == 0 &&
- memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
- BTRFS_FSID_SIZE) == 0) {
- return fs_devices;
- }
- }
- /*
- * Handle scanned device having completed its fsid change but
- * belonging to a fs_devices that was created by a device that
- * has an outdated pair of fsid/metadata_uuid and
- * CHANGING_FSID_V2 flag set.
- */
+ ASSERT(fsid);
+
+ /* Handle non-split brain cases */
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
- if (fs_devices->fsid_change &&
- memcmp(fs_devices->metadata_uuid,
- fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
- memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
- BTRFS_FSID_SIZE) == 0) {
+ if (match_fsid_fs_devices(fs_devices, fsid, metadata_fsid))
return fs_devices;
- }
}
-
- return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
+ return NULL;
}
-
static int
-btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
- int flush, struct block_device **bdev,
+btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder,
+ int flush, struct file **bdev_file,
struct btrfs_super_block **disk_super)
{
+ struct block_device *bdev;
int ret;
- *bdev = blkdev_get_by_path(device_path, flags, holder);
+ *bdev_file = bdev_file_open_by_path(device_path, flags, holder, &fs_holder_ops);
- if (IS_ERR(*bdev)) {
- ret = PTR_ERR(*bdev);
+ if (IS_ERR(*bdev_file)) {
+ ret = PTR_ERR(*bdev_file);
+ btrfs_err(NULL, "failed to open device for path %s with flags 0x%x: %d",
+ device_path, flags, ret);
goto error;
}
+ bdev = file_bdev(*bdev_file);
if (flush)
- sync_blockdev(*bdev);
- ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
- if (ret) {
- blkdev_put(*bdev, flags);
- goto error;
+ sync_blockdev(bdev);
+ if (holder) {
+ ret = set_blocksize(*bdev_file, BTRFS_BDEV_BLOCKSIZE);
+ if (ret) {
+ bdev_fput(*bdev_file);
+ goto error;
+ }
}
- invalidate_bdev(*bdev);
- *disk_super = btrfs_read_dev_super(*bdev);
+ invalidate_bdev(bdev);
+ *disk_super = btrfs_read_disk_super(bdev, 0, false);
if (IS_ERR(*disk_super)) {
ret = PTR_ERR(*disk_super);
- blkdev_put(*bdev, flags);
+ bdev_fput(*bdev_file);
goto error;
}
return 0;
error:
- *bdev = NULL;
+ *disk_super = NULL;
+ *bdev_file = NULL;
return ret;
}
-static bool device_path_matched(const char *path, struct btrfs_device *device)
-{
- int found;
-
- rcu_read_lock();
- found = strcmp(rcu_str_deref(device->name), path);
- rcu_read_unlock();
-
- return found == 0;
-}
-
/*
- * Search and remove all stale (devices which are not mounted) devices.
- * When both inputs are NULL, it will search and release all stale devices.
- * path: Optional. When provided will it release all unmounted devices
- * matching this path only.
- * skip_dev: Optional. Will skip this device when searching for the stale
- * devices.
- * Return: 0 for success or if @path is NULL.
- * -EBUSY if @path is a mounted device.
- * -ENOENT if @path does not match any device in the list.
+ * Search and remove all stale devices (which are not mounted). When both
+ * inputs are NULL, it will search and release all stale devices.
+ *
+ * @devt: Optional. When provided will it release all unmounted devices
+ * matching this devt only.
+ * @skip_device: Optional. Will skip this device when searching for the stale
+ * devices.
+ *
+ * Return: 0 for success or if @devt is 0.
+ * -EBUSY if @devt is a mounted device.
+ * -ENOENT if @devt does not match any device in the list.
*/
-static int btrfs_free_stale_devices(const char *path,
- struct btrfs_device *skip_device)
+static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device)
{
struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
struct btrfs_device *device, *tmp_device;
- int ret = 0;
+ int ret;
+ bool freed = false;
lockdep_assert_held(&uuid_mutex);
- if (path)
- ret = -ENOENT;
-
+ /* Return good status if there is no instance of devt. */
+ ret = 0;
list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
mutex_lock(&fs_devices->device_list_mutex);
@@ -571,13 +541,10 @@ static int btrfs_free_stale_devices(const char *path,
&fs_devices->devices, dev_list) {
if (skip_device && skip_device == device)
continue;
- if (path && !device->name)
- continue;
- if (path && !device_path_matched(path, device))
+ if (devt && devt != device->devt)
continue;
- if (fs_devices->opened) {
- /* for an already deleted device return 0 */
- if (path && ret != 0)
+ if (fs_devices->opened || fs_devices->holding) {
+ if (devt)
ret = -EBUSY;
break;
}
@@ -587,7 +554,7 @@ static int btrfs_free_stale_devices(const char *path,
list_del(&device->dev_list);
btrfs_free_device(device);
- ret = 0;
+ freed = true;
}
mutex_unlock(&fs_devices->device_list_mutex);
@@ -598,20 +565,91 @@ static int btrfs_free_stale_devices(const char *path,
}
}
+ /* If there is at least one freed device return 0. */
+ if (freed)
+ return 0;
+
return ret;
}
+static struct btrfs_fs_devices *find_fsid_by_device(
+ struct btrfs_super_block *disk_super,
+ dev_t devt, bool *same_fsid_diff_dev)
+{
+ struct btrfs_fs_devices *fsid_fs_devices;
+ struct btrfs_fs_devices *devt_fs_devices;
+ const bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
+ BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
+ bool found_by_devt = false;
+
+ /* Find the fs_device by the usual method, if found use it. */
+ fsid_fs_devices = find_fsid(disk_super->fsid,
+ has_metadata_uuid ? disk_super->metadata_uuid : NULL);
+
+ /* The temp_fsid feature is supported only with single device filesystem. */
+ if (btrfs_super_num_devices(disk_super) != 1)
+ return fsid_fs_devices;
+
+ /*
+ * A seed device is an integral component of the sprout device, which
+ * functions as a multi-device filesystem. So, temp-fsid feature is
+ * not supported.
+ */
+ if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)
+ return fsid_fs_devices;
+
+ /* Try to find a fs_devices by matching devt. */
+ list_for_each_entry(devt_fs_devices, &fs_uuids, fs_list) {
+ struct btrfs_device *device;
+
+ list_for_each_entry(device, &devt_fs_devices->devices, dev_list) {
+ if (device->devt == devt) {
+ found_by_devt = true;
+ break;
+ }
+ }
+ if (found_by_devt)
+ break;
+ }
+
+ if (found_by_devt) {
+ /* Existing device. */
+ if (fsid_fs_devices == NULL) {
+ if (devt_fs_devices->opened == 0) {
+ /* Stale device. */
+ return NULL;
+ } else {
+ /* temp_fsid is mounting a subvol. */
+ return devt_fs_devices;
+ }
+ } else {
+ /* Regular or temp_fsid device mounting a subvol. */
+ return devt_fs_devices;
+ }
+ } else {
+ /* New device. */
+ if (fsid_fs_devices == NULL) {
+ return NULL;
+ } else {
+ /* sb::fsid is already used create a new temp_fsid. */
+ *same_fsid_diff_dev = true;
+ return NULL;
+ }
+ }
+
+ /* Not reached. */
+}
+
/*
* This is only used on mount, and we are protected from competing things
* messing with our fs_devices by the uuid_mutex, thus we do not need the
* fs_devices->device_list_mutex here.
*/
static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
- struct btrfs_device *device, fmode_t flags,
+ struct btrfs_device *device, blk_mode_t flags,
void *holder)
{
- struct request_queue *q;
- struct block_device *bdev;
+ struct file *bdev_file;
struct btrfs_super_block *disk_super;
u64 devid;
int ret;
@@ -621,8 +659,8 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
if (!device->name)
return -EINVAL;
- ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
- &bdev, &disk_super);
+ ret = btrfs_get_bdev_and_sb(rcu_dereference_raw(device->name), flags, holder, 1,
+ &bdev_file, &disk_super);
if (ret)
return ret;
@@ -638,27 +676,39 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
if (btrfs_super_incompat_flags(disk_super) &
BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
- pr_err(
- "BTRFS: Invalid seeding and uuid-changed device detected\n");
+ btrfs_err(NULL,
+ "invalid seeding and uuid-changed device detected");
goto error_free_page;
}
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
fs_devices->seeding = true;
} else {
- if (bdev_read_only(bdev))
+ if (bdev_read_only(file_bdev(bdev_file)))
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
else
set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
}
- q = bdev_get_queue(bdev);
- if (!blk_queue_nonrot(q))
+ if (!bdev_nonrot(file_bdev(bdev_file)))
fs_devices->rotating = true;
- device->bdev = bdev;
+ if (bdev_max_discard_sectors(file_bdev(bdev_file)))
+ fs_devices->discardable = true;
+
+ device->bdev_file = bdev_file;
+ device->bdev = file_bdev(bdev_file);
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
- device->mode = flags;
+
+ if (device->devt != device->bdev->bd_dev) {
+ btrfs_warn(NULL,
+ "device %s maj:min changed from %d:%d to %d:%d",
+ rcu_dereference_raw(device->name), MAJOR(device->devt),
+ MINOR(device->devt), MAJOR(device->bdev->bd_dev),
+ MINOR(device->bdev->bd_dev));
+
+ device->devt = device->bdev->bd_dev;
+ }
fs_devices->open_devices++;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
@@ -672,95 +722,54 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
error_free_page:
btrfs_release_disk_super(disk_super);
- blkdev_put(bdev, flags);
+ bdev_fput(bdev_file);
return -EINVAL;
}
-/*
- * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
- * being created with a disk that has already completed its fsid change. Such
- * disk can belong to an fs which has its FSID changed or to one which doesn't.
- * Handle both cases here.
- */
-static struct btrfs_fs_devices *find_fsid_inprogress(
- struct btrfs_super_block *disk_super)
+const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb)
{
- struct btrfs_fs_devices *fs_devices;
+ bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) &
+ BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
- list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
- if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
- BTRFS_FSID_SIZE) != 0 &&
- memcmp(fs_devices->metadata_uuid, disk_super->fsid,
- BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
- return fs_devices;
- }
- }
-
- return find_fsid(disk_super->fsid, NULL);
+ return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
}
-
-static struct btrfs_fs_devices *find_fsid_changed(
- struct btrfs_super_block *disk_super)
+static bool is_same_device(struct btrfs_device *device, const char *new_path)
{
- struct btrfs_fs_devices *fs_devices;
-
- /*
- * Handles the case where scanned device is part of an fs that had
- * multiple successful changes of FSID but currently device didn't
- * observe it. Meaning our fsid will be different than theirs. We need
- * to handle two subcases :
- * 1 - The fs still continues to have different METADATA/FSID uuids.
- * 2 - The fs is switched back to its original FSID (METADATA/FSID
- * are equal).
- */
- list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
- /* Changed UUIDs */
- if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
- BTRFS_FSID_SIZE) != 0 &&
- memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
- BTRFS_FSID_SIZE) == 0 &&
- memcmp(fs_devices->fsid, disk_super->fsid,
- BTRFS_FSID_SIZE) != 0)
- return fs_devices;
-
- /* Unchanged UUIDs */
- if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
- BTRFS_FSID_SIZE) == 0 &&
- memcmp(fs_devices->fsid, disk_super->metadata_uuid,
- BTRFS_FSID_SIZE) == 0)
- return fs_devices;
- }
+ struct path old = { .mnt = NULL, .dentry = NULL };
+ struct path new = { .mnt = NULL, .dentry = NULL };
+ char AUTO_KFREE(old_path);
+ bool is_same = false;
+ int ret;
- return NULL;
-}
+ if (!device->name)
+ goto out;
-static struct btrfs_fs_devices *find_fsid_reverted_metadata(
- struct btrfs_super_block *disk_super)
-{
- struct btrfs_fs_devices *fs_devices;
+ old_path = kzalloc(PATH_MAX, GFP_NOFS);
+ if (!old_path)
+ goto out;
- /*
- * Handle the case where the scanned device is part of an fs whose last
- * metadata UUID change reverted it to the original FSID. At the same
- * time * fs_devices was first created by another constitutent device
- * which didn't fully observe the operation. This results in an
- * btrfs_fs_devices created with metadata/fsid different AND
- * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
- * fs_devices equal to the FSID of the disk.
- */
- list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
- if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
- BTRFS_FSID_SIZE) != 0 &&
- memcmp(fs_devices->metadata_uuid, disk_super->fsid,
- BTRFS_FSID_SIZE) == 0 &&
- fs_devices->fsid_change)
- return fs_devices;
- }
+ rcu_read_lock();
+ ret = strscpy(old_path, rcu_dereference(device->name), PATH_MAX);
+ rcu_read_unlock();
+ if (ret < 0)
+ goto out;
- return NULL;
+ ret = kern_path(old_path, LOOKUP_FOLLOW, &old);
+ if (ret)
+ goto out;
+ ret = kern_path(new_path, LOOKUP_FOLLOW, &new);
+ if (ret)
+ goto out;
+ if (path_equal(&old, &new))
+ is_same = true;
+out:
+ path_put(&old);
+ path_put(&new);
+ return is_same;
}
+
/*
* Add new device to list of registered devices
*
@@ -774,39 +783,47 @@ static noinline struct btrfs_device *device_list_add(const char *path,
{
struct btrfs_device *device;
struct btrfs_fs_devices *fs_devices = NULL;
- struct rcu_string *name;
+ const char *name;
u64 found_transid = btrfs_super_generation(disk_super);
u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
+ dev_t path_devt;
+ int ret;
+ bool same_fsid_diff_dev = false;
bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
- bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
- BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
- if (fsid_change_in_progress) {
- if (!has_metadata_uuid)
- fs_devices = find_fsid_inprogress(disk_super);
- else
- fs_devices = find_fsid_changed(disk_super);
- } else if (has_metadata_uuid) {
- fs_devices = find_fsid_with_metadata_uuid(disk_super);
- } else {
- fs_devices = find_fsid_reverted_metadata(disk_super);
- if (!fs_devices)
- fs_devices = find_fsid(disk_super->fsid, NULL);
+ if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
+ btrfs_err(NULL,
+"device %s has incomplete metadata_uuid change, please use btrfstune to complete",
+ path);
+ return ERR_PTR(-EAGAIN);
}
+ ret = lookup_bdev(path, &path_devt);
+ if (ret) {
+ btrfs_err(NULL, "failed to lookup block device for path %s: %d",
+ path, ret);
+ return ERR_PTR(ret);
+ }
- if (!fs_devices) {
- if (has_metadata_uuid)
- fs_devices = alloc_fs_devices(disk_super->fsid,
- disk_super->metadata_uuid);
- else
- fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
+ fs_devices = find_fsid_by_device(disk_super, path_devt, &same_fsid_diff_dev);
+ if (!fs_devices) {
+ fs_devices = alloc_fs_devices(disk_super->fsid);
if (IS_ERR(fs_devices))
return ERR_CAST(fs_devices);
- fs_devices->fsid_change = fsid_change_in_progress;
+ if (has_metadata_uuid)
+ memcpy(fs_devices->metadata_uuid,
+ disk_super->metadata_uuid, BTRFS_FSID_SIZE);
+
+ if (same_fsid_diff_dev) {
+ generate_random_uuid(fs_devices->fsid);
+ fs_devices->temp_fsid = true;
+ btrfs_info(NULL, "device %s (%d:%d) using temp-fsid %pU",
+ path, MAJOR(path_devt), MINOR(path_devt),
+ fs_devices->fsid);
+ }
mutex_lock(&fs_devices->device_list_mutex);
list_add(&fs_devices->fs_list, &fs_uuids);
@@ -821,49 +838,38 @@ static noinline struct btrfs_device *device_list_add(const char *path,
mutex_lock(&fs_devices->device_list_mutex);
device = btrfs_find_device(fs_devices, &args);
- /*
- * If this disk has been pulled into an fs devices created by
- * a device which had the CHANGING_FSID_V2 flag then replace the
- * metadata_uuid/fsid values of the fs_devices.
- */
- if (fs_devices->fsid_change &&
- found_transid > fs_devices->latest_generation) {
+ if (found_transid > fs_devices->latest_generation) {
memcpy(fs_devices->fsid, disk_super->fsid,
BTRFS_FSID_SIZE);
-
- if (has_metadata_uuid)
- memcpy(fs_devices->metadata_uuid,
- disk_super->metadata_uuid,
- BTRFS_FSID_SIZE);
- else
- memcpy(fs_devices->metadata_uuid,
- disk_super->fsid, BTRFS_FSID_SIZE);
-
- fs_devices->fsid_change = false;
+ memcpy(fs_devices->metadata_uuid,
+ btrfs_sb_fsid_ptr(disk_super), BTRFS_FSID_SIZE);
}
}
if (!device) {
+ unsigned int nofs_flag;
+
if (fs_devices->opened) {
+ btrfs_err(NULL,
+"device %s (%d:%d) belongs to fsid %pU, and the fs is already mounted, scanned by %s (%d)",
+ path, MAJOR(path_devt), MINOR(path_devt),
+ fs_devices->fsid, current->comm,
+ task_pid_nr(current));
mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-EBUSY);
}
+ nofs_flag = memalloc_nofs_save();
device = btrfs_alloc_device(NULL, &devid,
- disk_super->dev_item.uuid);
+ disk_super->dev_item.uuid, path);
+ memalloc_nofs_restore(nofs_flag);
if (IS_ERR(device)) {
mutex_unlock(&fs_devices->device_list_mutex);
/* we can safely leave the fs_devices entry around */
return device;
}
- name = rcu_string_strdup(path, GFP_NOFS);
- if (!name) {
- btrfs_free_device(device);
- mutex_unlock(&fs_devices->device_list_mutex);
- return ERR_PTR(-ENOMEM);
- }
- rcu_assign_pointer(device->name, name);
+ device->devt = path_devt;
list_add_rcu(&device->dev_list, &fs_devices->devices);
fs_devices->num_devices++;
@@ -873,16 +879,20 @@ static noinline struct btrfs_device *device_list_add(const char *path,
if (disk_super->label[0])
pr_info(
- "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
+"BTRFS: device label %s devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n",
disk_super->label, devid, found_transid, path,
+ MAJOR(path_devt), MINOR(path_devt),
current->comm, task_pid_nr(current));
else
pr_info(
- "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
+"BTRFS: device fsid %pU devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n",
disk_super->fsid, devid, found_transid, path,
+ MAJOR(path_devt), MINOR(path_devt),
current->comm, task_pid_nr(current));
- } else if (!device->name || strcmp(device->name->str, path)) {
+ } else if (!device->name || !is_same_device(device, path)) {
+ const char *old_name;
+
/*
* When FS is already mounted.
* 1. If you are here and if the device->name is NULL that
@@ -918,56 +928,54 @@ static noinline struct btrfs_device *device_list_add(const char *path,
* generation are equal.
*/
mutex_unlock(&fs_devices->device_list_mutex);
+ btrfs_err(NULL,
+"device %s already registered with a higher generation, found %llu expect %llu",
+ path, found_transid, device->generation);
return ERR_PTR(-EEXIST);
}
/*
* We are going to replace the device path for a given devid,
* make sure it's the same device if the device is mounted
+ *
+ * NOTE: the device->fs_info may not be reliable here so pass
+ * in a NULL to message helpers instead. This avoids a possible
+ * use-after-free when the fs_info and fs_info->sb are already
+ * torn down.
*/
if (device->bdev) {
- int error;
- dev_t path_dev;
-
- error = lookup_bdev(path, &path_dev);
- if (error) {
+ if (device->devt != path_devt) {
mutex_unlock(&fs_devices->device_list_mutex);
- return ERR_PTR(error);
- }
-
- if (device->bdev->bd_dev != path_dev) {
- mutex_unlock(&fs_devices->device_list_mutex);
- /*
- * device->fs_info may not be reliable here, so
- * pass in a NULL instead. This avoids a
- * possible use-after-free when the fs_info and
- * fs_info->sb are already torn down.
- */
- btrfs_warn_in_rcu(NULL,
+ btrfs_warn(NULL,
"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
path, devid, found_transid,
current->comm,
task_pid_nr(current));
return ERR_PTR(-EEXIST);
}
- btrfs_info_in_rcu(device->fs_info,
+ btrfs_info(NULL,
"devid %llu device path %s changed to %s scanned by %s (%d)",
- devid, rcu_str_deref(device->name),
+ devid, btrfs_dev_name(device),
path, current->comm,
task_pid_nr(current));
}
- name = rcu_string_strdup(path, GFP_NOFS);
+ name = kstrdup(path, GFP_NOFS);
if (!name) {
mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-ENOMEM);
}
- rcu_string_free(device->name);
+ rcu_read_lock();
+ old_name = rcu_dereference(device->name);
+ rcu_read_unlock();
rcu_assign_pointer(device->name, name);
+ kfree_rcu_mightsleep(old_name);
+
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
fs_devices->missing_devices--;
clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
}
+ device->devt = path_devt;
}
/*
@@ -997,35 +1005,39 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
lockdep_assert_held(&uuid_mutex);
- fs_devices = alloc_fs_devices(orig->fsid, NULL);
+ fs_devices = alloc_fs_devices(orig->fsid);
if (IS_ERR(fs_devices))
return fs_devices;
fs_devices->total_devices = orig->total_devices;
list_for_each_entry(orig_dev, &orig->devices, dev_list) {
- struct rcu_string *name;
+ const char *dev_path = NULL;
+
+ /*
+ * This is ok to do without RCU read locked because we hold the
+ * uuid mutex so nothing we touch in here is going to disappear.
+ */
+ if (orig_dev->name)
+ dev_path = rcu_dereference_raw(orig_dev->name);
device = btrfs_alloc_device(NULL, &orig_dev->devid,
- orig_dev->uuid);
+ orig_dev->uuid, dev_path);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
goto error;
}
- /*
- * This is ok to do without rcu read locked because we hold the
- * uuid mutex so nothing we touch in here is going to disappear.
- */
- if (orig_dev->name) {
- name = rcu_string_strdup(orig_dev->name->str,
- GFP_KERNEL);
- if (!name) {
+ if (orig_dev->zone_info) {
+ struct btrfs_zoned_device_info *zone_info;
+
+ zone_info = btrfs_clone_dev_zone_info(orig_dev);
+ if (!zone_info) {
btrfs_free_device(device);
ret = -ENOMEM;
goto error;
}
- rcu_assign_pointer(device->name, name);
+ device->zone_info = zone_info;
}
list_add(&device->dev_list, &fs_devices->devices);
@@ -1064,9 +1076,10 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
if (device->devid == BTRFS_DEV_REPLACE_DEVID)
continue;
- if (device->bdev) {
- blkdev_put(device->bdev, device->mode);
+ if (device->bdev_file) {
+ bdev_fput(device->bdev_file);
device->bdev = NULL;
+ device->bdev_file = NULL;
fs_devices->open_devices--;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
@@ -1111,7 +1124,7 @@ static void btrfs_close_bdev(struct btrfs_device *device)
invalidate_bdev(device->bdev);
}
- blkdev_put(device->bdev, device->mode);
+ bdev_fput(device->bdev_file);
}
static void btrfs_close_one_device(struct btrfs_device *device)
@@ -1136,13 +1149,14 @@ static void btrfs_close_one_device(struct btrfs_device *device)
if (device->bdev) {
fs_devices->open_devices--;
device->bdev = NULL;
+ device->bdev_file = NULL;
}
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
btrfs_destroy_dev_zone_info(device);
device->fs_info = NULL;
atomic_set(&device->dev_stats_ccnt, 0);
- extent_io_tree_release(&device->alloc_state);
+ btrfs_extent_io_tree_release(&device->alloc_state);
/*
* Reset the flush error record. We might have a transient flush error
@@ -1158,11 +1172,10 @@ static void btrfs_close_one_device(struct btrfs_device *device)
device->last_flush_error = 0;
/* Verify the device is back in a pristine state */
- ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
- ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
- ASSERT(list_empty(&device->dev_alloc_list));
- ASSERT(list_empty(&device->post_commit_list));
- ASSERT(atomic_read(&device->reada_in_flight) == 0);
+ WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
+ WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
+ WARN_ON(!list_empty(&device->dev_alloc_list));
+ WARN_ON(!list_empty(&device->post_commit_list));
}
static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
@@ -1191,9 +1204,22 @@ void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
mutex_lock(&uuid_mutex);
close_fs_devices(fs_devices);
- if (!fs_devices->opened)
+ if (!fs_devices->opened && !fs_devices->holding) {
list_splice_init(&fs_devices->seed_list, &list);
+ /*
+ * If the struct btrfs_fs_devices is not assembled with any
+ * other device, it can be re-initialized during the next mount
+ * without the needing device-scan step. Therefore, it can be
+ * fully freed.
+ */
+ if (fs_devices->num_devices == 1) {
+ list_del(&fs_devices->fs_list);
+ free_fs_devices(fs_devices);
+ }
+ }
+
+
list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
close_fs_devices(fs_devices);
list_del(&fs_devices->seed_list);
@@ -1203,36 +1229,58 @@ void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
}
static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
- fmode_t flags, void *holder)
+ blk_mode_t flags, void *holder)
{
struct btrfs_device *device;
struct btrfs_device *latest_dev = NULL;
struct btrfs_device *tmp_device;
-
- flags |= FMODE_EXCL;
+ s64 __maybe_unused value = 0;
+ int ret = 0;
list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
dev_list) {
- int ret;
+ int ret2;
- ret = btrfs_open_one_device(fs_devices, device, flags, holder);
- if (ret == 0 &&
+ ret2 = btrfs_open_one_device(fs_devices, device, flags, holder);
+ if (ret2 == 0 &&
(!latest_dev || device->generation > latest_dev->generation)) {
latest_dev = device;
- } else if (ret == -ENODATA) {
+ } else if (ret2 == -ENODATA) {
fs_devices->num_devices--;
list_del(&device->dev_list);
btrfs_free_device(device);
}
+ if (ret == 0 && ret2 != 0)
+ ret = ret2;
}
- if (fs_devices->open_devices == 0)
+
+ if (fs_devices->open_devices == 0) {
+ if (ret)
+ return ret;
return -EINVAL;
+ }
fs_devices->opened = 1;
fs_devices->latest_dev = latest_dev;
fs_devices->total_rw_bytes = 0;
fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ fs_devices->rr_min_contig_read = BTRFS_DEFAULT_RR_MIN_CONTIG_READ;
+ fs_devices->read_devid = latest_dev->devid;
+ fs_devices->read_policy = btrfs_read_policy_to_enum(btrfs_get_mod_read_policy(),
+ &value);
+ if (fs_devices->read_policy == BTRFS_READ_POLICY_RR)
+ fs_devices->collect_fs_stats = true;
+
+ if (value) {
+ if (fs_devices->read_policy == BTRFS_READ_POLICY_RR)
+ fs_devices->rr_min_contig_read = value;
+ if (fs_devices->read_policy == BTRFS_READ_POLICY_DEVID)
+ fs_devices->read_devid = value;
+ }
+#else
fs_devices->read_policy = BTRFS_READ_POLICY_PID;
+#endif
return 0;
}
@@ -1253,7 +1301,7 @@ static int devid_cmp(void *priv, const struct list_head *a,
}
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
- fmode_t flags, void *holder)
+ blk_mode_t flags, void *holder)
{
int ret;
@@ -1284,111 +1332,172 @@ void btrfs_release_disk_super(struct btrfs_super_block *super)
put_page(page);
}
-static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
- u64 bytenr, u64 bytenr_orig)
+struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
+ int copy_num, bool drop_cache)
{
- struct btrfs_super_block *disk_super;
+ struct btrfs_super_block *super;
struct page *page;
- void *p;
- pgoff_t index;
+ u64 bytenr, bytenr_orig;
+ struct address_space *mapping = bdev->bd_mapping;
+ int ret;
- /* make sure our super fits in the device */
- if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev))
- return ERR_PTR(-EINVAL);
+ bytenr_orig = btrfs_sb_offset(copy_num);
+ ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr);
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ ret = -EINVAL;
+ return ERR_PTR(ret);
+ }
- /* make sure our super fits in the page */
- if (sizeof(*disk_super) > PAGE_SIZE)
+ if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev))
return ERR_PTR(-EINVAL);
- /* make sure our super doesn't straddle pages on disk */
- index = bytenr >> PAGE_SHIFT;
- if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
- return ERR_PTR(-EINVAL);
+ if (drop_cache) {
+ /* This should only be called with the primary sb. */
+ ASSERT(copy_num == 0);
- /* pull in the page with our super */
- page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
+ /*
+ * Drop the page of the primary superblock, so later read will
+ * always read from the device.
+ */
+ invalidate_inode_pages2_range(mapping, bytenr >> PAGE_SHIFT,
+ (bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT);
+ }
+ page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
if (IS_ERR(page))
return ERR_CAST(page);
- p = page_address(page);
-
- /* align our pointer to the offset of the super block */
- disk_super = p + offset_in_page(bytenr);
-
- if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
- btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
- btrfs_release_disk_super(p);
+ super = page_address(page);
+ if (btrfs_super_magic(super) != BTRFS_MAGIC ||
+ btrfs_super_bytenr(super) != bytenr_orig) {
+ btrfs_release_disk_super(super);
return ERR_PTR(-EINVAL);
}
- if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
- disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
+ /*
+ * Make sure the last byte of label is properly NUL terminated. We use
+ * '%s' to print the label, if not properly NUL terminated we can access
+ * beyond the label.
+ */
+ if (super->label[0] && super->label[BTRFS_LABEL_SIZE - 1])
+ super->label[BTRFS_LABEL_SIZE - 1] = 0;
- return disk_super;
+ return super;
}
-int btrfs_forget_devices(const char *path)
+int btrfs_forget_devices(dev_t devt)
{
int ret;
mutex_lock(&uuid_mutex);
- ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
+ ret = btrfs_free_stale_devices(devt, NULL);
mutex_unlock(&uuid_mutex);
return ret;
}
+static bool btrfs_skip_registration(struct btrfs_super_block *disk_super,
+ const char *path, dev_t devt,
+ bool mount_arg_dev)
+{
+ struct btrfs_fs_devices *fs_devices;
+
+ /*
+ * Do not skip device registration for mounted devices with matching
+ * maj:min but different paths. Booting without initrd relies on
+ * /dev/root initially, later replaced with the actual root device.
+ * A successful scan ensures grub2-probe selects the correct device.
+ */
+ list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
+ struct btrfs_device *device;
+
+ mutex_lock(&fs_devices->device_list_mutex);
+
+ if (!fs_devices->opened) {
+ mutex_unlock(&fs_devices->device_list_mutex);
+ continue;
+ }
+
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ if (device->bdev && (device->bdev->bd_dev == devt) &&
+ strcmp(rcu_dereference_raw(device->name), path) != 0) {
+ mutex_unlock(&fs_devices->device_list_mutex);
+
+ /* Do not skip registration. */
+ return false;
+ }
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
+ }
+
+ if (!mount_arg_dev && btrfs_super_num_devices(disk_super) == 1 &&
+ !(btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING))
+ return true;
+
+ return false;
+}
+
/*
* Look for a btrfs signature on a device. This may be called out of the mount path
* and we are not allowed to call set_blocksize during the scan. The superblock
- * is read via pagecache
+ * is read via pagecache.
+ *
+ * With @mount_arg_dev it's a scan during mount time that will always register
+ * the device or return an error. Multi-device and seeding devices are registered
+ * in both cases.
*/
-struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
- void *holder)
+struct btrfs_device *btrfs_scan_one_device(const char *path,
+ bool mount_arg_dev)
{
struct btrfs_super_block *disk_super;
bool new_device_added = false;
struct btrfs_device *device = NULL;
- struct block_device *bdev;
- u64 bytenr, bytenr_orig;
- int ret;
+ struct file *bdev_file;
+ dev_t devt;
lockdep_assert_held(&uuid_mutex);
/*
- * we would like to check all the supers, but that would make
- * a btrfs mount succeed after a mkfs from a different FS.
- * So, we need to add a special mount option to scan for
- * later supers, using BTRFS_SUPER_MIRROR_MAX instead
+ * Avoid an exclusive open here, as the systemd-udev may initiate the
+ * device scan which may race with the user's mount or mkfs command,
+ * resulting in failure.
+ * Since the device scan is solely for reading purposes, there is no
+ * need for an exclusive open. Additionally, the devices are read again
+ * during the mount process. It is ok to get some inconsistent
+ * values temporarily, as the device paths of the fsid are the only
+ * required information for assembling the volume.
*/
- flags |= FMODE_EXCL;
-
- bdev = blkdev_get_by_path(path, flags, holder);
- if (IS_ERR(bdev))
- return ERR_CAST(bdev);
-
- bytenr_orig = btrfs_sb_offset(0);
- ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
- if (ret)
- return ERR_PTR(ret);
+ bdev_file = bdev_file_open_by_path(path, BLK_OPEN_READ, NULL, NULL);
+ if (IS_ERR(bdev_file))
+ return ERR_CAST(bdev_file);
- disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
+ disk_super = btrfs_read_disk_super(file_bdev(bdev_file), 0, false);
if (IS_ERR(disk_super)) {
device = ERR_CAST(disk_super);
goto error_bdev_put;
}
- device = device_list_add(path, disk_super, &new_device_added);
- if (!IS_ERR(device)) {
- if (new_device_added)
- btrfs_free_stale_devices(path, device);
+ devt = file_bdev(bdev_file)->bd_dev;
+ if (btrfs_skip_registration(disk_super, path, devt, mount_arg_dev)) {
+ btrfs_debug(NULL, "skip registering single non-seed device %s (%d:%d)",
+ path, MAJOR(devt), MINOR(devt));
+
+ btrfs_free_stale_devices(devt, NULL);
+
+ device = NULL;
+ goto free_disk_super;
}
+ device = device_list_add(path, disk_super, &new_device_added);
+ if (!IS_ERR(device) && new_device_added)
+ btrfs_free_stale_devices(device->devt, device);
+
+free_disk_super:
btrfs_release_disk_super(disk_super);
error_bdev_put:
- blkdev_put(bdev, flags);
+ bdev_fput(bdev_file);
return device;
}
@@ -1404,13 +1513,13 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
lockdep_assert_held(&device->fs_info->chunk_mutex);
- if (!find_first_extent_bit(&device->alloc_state, *start,
- &physical_start, &physical_end,
- CHUNK_ALLOCATED, NULL)) {
+ if (btrfs_find_first_extent_bit(&device->alloc_state, *start,
+ &physical_start, &physical_end,
+ CHUNK_ALLOCATED, NULL)) {
if (in_range(physical_start, *start, len) ||
in_range(*start, physical_start,
- physical_end - physical_start)) {
+ physical_end + 1 - physical_start)) {
*start = physical_end + 1;
return true;
}
@@ -1418,25 +1527,21 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
return false;
}
-static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
+static u64 dev_extent_search_start(struct btrfs_device *device)
{
switch (device->fs_devices->chunk_alloc_policy) {
+ default:
+ btrfs_warn_unknown_chunk_allocation(device->fs_devices->chunk_alloc_policy);
+ fallthrough;
case BTRFS_CHUNK_ALLOC_REGULAR:
- /*
- * We don't want to overwrite the superblock on the drive nor
- * any area used by the boot loader (grub for example), so we
- * make sure to start at an offset of at least 1MB.
- */
- return max_t(u64, start, SZ_1M);
+ return BTRFS_DEVICE_RANGE_RESERVED;
case BTRFS_CHUNK_ALLOC_ZONED:
/*
* We don't care about the starting region like regular
* allocator, because we anyway use/reserve the first two zones
* for superblock logging.
*/
- return ALIGN(start, device->zone_info->zone_size);
- default:
- BUG();
+ return 0;
}
}
@@ -1449,7 +1554,8 @@ static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
int ret;
bool changed = false;
- ASSERT(IS_ALIGNED(*hole_start, zone_size));
+ ASSERT(IS_ALIGNED(*hole_start, zone_size),
+ "hole_start=%llu zone_size=%llu", *hole_start, zone_size);
while (*hole_size > 0) {
pos = btrfs_find_allocatable_zones(device, *hole_start,
@@ -1484,8 +1590,9 @@ static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
return changed;
}
-/**
- * dev_extent_hole_check - check if specified hole is suitable for allocation
+/*
+ * Check if specified hole is suitable for allocation.
+ *
* @device: the device which we have the hole
* @hole_start: starting position of the hole
* @hole_size: the size of the hole
@@ -1514,6 +1621,9 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
}
switch (device->fs_devices->chunk_alloc_policy) {
+ default:
+ btrfs_warn_unknown_chunk_allocation(device->fs_devices->chunk_alloc_policy);
+ fallthrough;
case BTRFS_CHUNK_ALLOC_REGULAR:
/* No extra check */
break;
@@ -1528,8 +1638,6 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
continue;
}
break;
- default:
- BUG();
}
break;
@@ -1539,7 +1647,8 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
}
/*
- * find_free_dev_extent_start - find free space in the specified device
+ * Find free space in the specified device.
+ *
* @device: the device which we search the free space in
* @num_bytes: the size of the free space that we need
* @search_start: the position from which to begin the search
@@ -1547,9 +1656,8 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
* @len: the size of the free space. that we find, or the size
* of the max free space if we don't find suitable free space
*
- * this uses a pretty simple search, the expectation is that it is
- * called very infrequently and that a given device has a small number
- * of extents
+ * This does a pretty simple search, the expectation is that it is called very
+ * infrequently and that a given device has a small number of extents.
*
* @start is used to store the start of the free space if we find. But if we
* don't find suitable free space, it will be used to store the start position
@@ -1565,36 +1673,35 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
* correct usable device space, as device extent freed in current transaction
* is not reported as available.
*/
-static int find_free_dev_extent_start(struct btrfs_device *device,
- u64 num_bytes, u64 search_start, u64 *start,
- u64 *len)
+static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
+ u64 *start, u64 *len)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
struct btrfs_dev_extent *dev_extent;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
+ u64 search_start;
u64 hole_size;
u64 max_hole_start;
- u64 max_hole_size;
+ u64 max_hole_size = 0;
u64 extent_end;
u64 search_end = device->total_bytes;
int ret;
int slot;
struct extent_buffer *l;
- search_start = dev_extent_search_start(device, search_start);
+ search_start = dev_extent_search_start(device);
+ max_hole_start = search_start;
WARN_ON(device->zone_info &&
!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- max_hole_start = search_start;
- max_hole_size = 0;
-
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
again:
if (search_start >= search_end ||
test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
@@ -1603,18 +1710,18 @@ again:
}
path->reada = READA_FORWARD;
- path->search_commit_root = 1;
- path->skip_locking = 1;
+ path->search_commit_root = true;
+ path->skip_locking = true;
key.objectid = device->devid;
- key.offset = search_start;
key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = search_start;
ret = btrfs_search_backwards(root, &key, path);
if (ret < 0)
goto out;
- while (1) {
+ while (search_start < search_end) {
l = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) {
@@ -1637,6 +1744,9 @@ again:
if (key.type != BTRFS_DEV_EXTENT_KEY)
goto next;
+ if (key.offset > search_end)
+ break;
+
if (key.offset > search_start) {
hole_size = key.offset - search_start;
dev_extent_hole_check(device, &search_start, &hole_size,
@@ -1697,21 +1807,16 @@ next:
else
ret = 0;
+ ASSERT(max_hole_start + max_hole_size <= search_end,
+ "max_hole_start=%llu max_hole_size=%llu search_end=%llu",
+ max_hole_start, max_hole_size, search_end);
out:
- btrfs_free_path(path);
*start = max_hole_start;
if (len)
*len = max_hole_size;
return ret;
}
-int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
- u64 *start, u64 *len)
-{
- /* FIXME use last free of some kind */
- return find_free_dev_extent_start(device, num_bytes, 0, start, len);
-}
-
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
u64 start, u64 *dev_extent_len)
@@ -1719,7 +1824,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
struct extent_buffer *leaf = NULL;
@@ -1730,15 +1835,15 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
return -ENOMEM;
key.objectid = device->devid;
- key.offset = start;
key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = start;
again:
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
ret = btrfs_previous_item(root, path, key.objectid,
BTRFS_DEV_EXTENT_KEY);
if (ret)
- goto out;
+ return ret;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
extent = btrfs_item_ptr(leaf, path->slots[0],
@@ -1753,7 +1858,7 @@ again:
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
} else {
- goto out;
+ return ret;
}
*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
@@ -1761,26 +1866,23 @@ again:
ret = btrfs_del_item(trans, root, path);
if (ret == 0)
set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
-out:
- btrfs_free_path(path);
return ret;
}
static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
{
- struct extent_map_tree *em_tree;
- struct extent_map *em;
struct rb_node *n;
u64 ret = 0;
- em_tree = &fs_info->mapping_tree;
- read_lock(&em_tree->lock);
- n = rb_last(&em_tree->map.rb_root);
+ read_lock(&fs_info->mapping_tree_lock);
+ n = rb_last(&fs_info->mapping_tree.rb_root);
if (n) {
- em = rb_entry(n, struct extent_map, rb_node);
- ret = em->start + em->len;
+ struct btrfs_chunk_map *map;
+
+ map = rb_entry(n, struct btrfs_chunk_map, rb_node);
+ ret = map->start + map->chunk_len;
}
- read_unlock(&em_tree->lock);
+ read_unlock(&fs_info->mapping_tree_lock);
return ret;
}
@@ -1791,7 +1893,7 @@ static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
int ret;
struct btrfs_key key;
struct btrfs_key found_key;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
path = btrfs_alloc_path();
if (!path)
@@ -1803,13 +1905,12 @@ static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
if (ret < 0)
- goto error;
+ return ret;
- if (ret == 0) {
+ if (unlikely(ret == 0)) {
/* Corruption */
btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
- ret = -EUCLEAN;
- goto error;
+ return -EUCLEAN;
}
ret = btrfs_previous_item(fs_info->chunk_root, path,
@@ -1822,10 +1923,7 @@ static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
path->slots[0]);
*devid_ret = found_key.offset + 1;
}
- ret = 0;
-error:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/*
@@ -1836,7 +1934,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
struct btrfs_device *device)
{
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_dev_item *dev_item;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -1855,7 +1953,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
&key, sizeof(*dev_item));
btrfs_trans_release_chunk_metadata(trans);
if (ret)
- goto out;
+ return ret;
leaf = path->nodes[0];
dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
@@ -1880,12 +1978,8 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
ptr = btrfs_device_fsid(dev_item);
write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
ptr, BTRFS_FSID_SIZE);
- btrfs_mark_buffer_dirty(leaf);
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
/*
@@ -1897,35 +1991,25 @@ out:
static void update_dev_time(const char *device_path)
{
struct path path;
- struct timespec64 now;
- int ret;
- ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
- if (ret)
- return;
-
- now = current_time(d_inode(path.dentry));
- inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
- path_put(&path);
+ if (!kern_path(device_path, LOOKUP_FOLLOW, &path)) {
+ vfs_utimes(&path, NULL);
+ path_put(&path);
+ }
}
-static int btrfs_rm_dev_item(struct btrfs_device *device)
+static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
+ struct btrfs_device *device)
{
struct btrfs_root *root = device->fs_info->chunk_root;
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
- struct btrfs_trans_handle *trans;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans)) {
- btrfs_free_path(path);
- return PTR_ERR(trans);
- }
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
@@ -1933,25 +2017,12 @@ static int btrfs_rm_dev_item(struct btrfs_device *device)
btrfs_reserve_chunk_metadata(trans, false);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
btrfs_trans_release_chunk_metadata(trans);
- if (ret) {
- if (ret > 0)
- ret = -ENOENT;
- btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans);
- goto out;
- }
-
- ret = btrfs_del_item(trans, root, path);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans);
- }
+ if (ret > 0)
+ return -ENOENT;
+ if (ret < 0)
+ return ret;
-out:
- btrfs_free_path(path);
- if (!ret)
- ret = btrfs_commit_transaction(trans);
- return ret;
+ return btrfs_del_item(trans, root, path);
}
/*
@@ -2034,7 +2105,7 @@ static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
down_read(&fs_info->dev_replace.rwsem);
if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
- ASSERT(num_devices > 1);
+ ASSERT(num_devices > 1, "num_devices=%llu", num_devices);
num_devices--;
}
up_read(&fs_info->dev_replace.rwsem);
@@ -2042,61 +2113,66 @@ static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
return num_devices;
}
-void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
- struct block_device *bdev,
- const char *device_path)
+static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info,
+ struct block_device *bdev, int copy_num)
{
struct btrfs_super_block *disk_super;
- int copy_num;
+ const size_t len = sizeof(disk_super->magic);
+ const u64 bytenr = btrfs_sb_offset(copy_num);
+ int ret;
- if (!bdev)
+ disk_super = btrfs_read_disk_super(bdev, copy_num, false);
+ if (IS_ERR(disk_super))
return;
- for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
- struct page *page;
- int ret;
-
- disk_super = btrfs_read_dev_one_super(bdev, copy_num);
- if (IS_ERR(disk_super))
- continue;
+ memset(&disk_super->magic, 0, len);
+ folio_mark_dirty(virt_to_folio(disk_super));
+ btrfs_release_disk_super(disk_super);
- if (bdev_is_zoned(bdev)) {
- btrfs_reset_sb_log_zones(bdev, copy_num);
- continue;
- }
+ ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1);
+ if (ret)
+ btrfs_warn(fs_info, "error clearing superblock number %d (%d)",
+ copy_num, ret);
+}
- memset(&disk_super->magic, 0, sizeof(disk_super->magic));
+void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, struct btrfs_device *device)
+{
+ int copy_num;
+ struct block_device *bdev = device->bdev;
- page = virt_to_page(disk_super);
- set_page_dirty(page);
- lock_page(page);
- /* write_on_page() unlocks the page */
- ret = write_one_page(page);
- if (ret)
- btrfs_warn(fs_info,
- "error clearing superblock number %d (%d)",
- copy_num, ret);
- btrfs_release_disk_super(disk_super);
+ if (!bdev)
+ return;
+ for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
+ if (bdev_is_zoned(bdev))
+ btrfs_reset_sb_log_zones(bdev, copy_num);
+ else
+ btrfs_scratch_superblock(fs_info, bdev, copy_num);
}
/* Notify udev that device has changed */
btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
/* Update ctime/mtime for device path for libblkid */
- update_dev_time(device_path);
+ update_dev_time(rcu_dereference_raw(device->name));
}
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
struct btrfs_dev_lookup_args *args,
- struct block_device **bdev, fmode_t *mode)
+ struct file **bdev_file)
{
+ struct btrfs_trans_handle *trans;
struct btrfs_device *device;
struct btrfs_fs_devices *cur_devices;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
u64 num_devices;
int ret = 0;
+ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+ btrfs_err(fs_info, "device remove not supported on extent tree v2 yet");
+ return -EINVAL;
+ }
+
/*
* The device list in fs_devices is accessed without locks (neither
* uuid_mutex nor device_list_mutex) as it won't change on a mounted
@@ -2106,7 +2182,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
if (ret)
- goto out;
+ return ret;
device = btrfs_find_device(fs_info->fs_devices, args);
if (!device) {
@@ -2114,27 +2190,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
else
ret = -ENOENT;
- goto out;
+ return ret;
}
if (btrfs_pinned_by_swapfile(fs_info, device)) {
- btrfs_warn_in_rcu(fs_info,
+ btrfs_warn(fs_info,
"cannot remove device %s (devid %llu) due to active swapfile",
- rcu_str_deref(device->name), device->devid);
- ret = -ETXTBSY;
- goto out;
+ btrfs_dev_name(device), device->devid);
+ return -ETXTBSY;
}
- if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
- ret = BTRFS_ERROR_DEV_TGT_REPLACE;
- goto out;
- }
+ if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
+ return BTRFS_ERROR_DEV_TGT_REPLACE;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
- fs_info->fs_devices->rw_devices == 1) {
- ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
- goto out;
- }
+ fs_info->fs_devices->rw_devices == 1)
+ return BTRFS_ERROR_DEV_ONLY_WRITABLE;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
@@ -2144,19 +2215,25 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
}
ret = btrfs_shrink_device(device, 0);
- if (!ret)
- btrfs_reada_remove_dev(device);
if (ret)
goto error_undo;
- /*
- * TODO: the superblock still includes this device in its num_devices
- * counter although write_all_supers() is not locked out. This
- * could give a filesystem state which requires a degraded mount.
- */
- ret = btrfs_rm_dev_item(device);
- if (ret)
+ trans = btrfs_start_transaction(fs_info->chunk_root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
goto error_undo;
+ }
+
+ ret = btrfs_rm_dev_item(trans, device);
+ if (unlikely(ret)) {
+ /* Any error in dev item removal is critical */
+ btrfs_crit(fs_info,
+ "failed to remove device item for devid %llu: %d",
+ device->devid, ret);
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ return ret;
+ }
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
btrfs_scrub_cancel_dev(device);
@@ -2191,7 +2268,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
btrfs_assign_next_active_device(device, NULL);
- if (device->bdev) {
+ if (device->bdev_file) {
cur_devices->open_devices--;
/* remove sysfs entry */
btrfs_sysfs_remove_device(device);
@@ -2207,21 +2284,19 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
* free the device.
*
* We cannot call btrfs_close_bdev() here because we're holding the sb
- * write lock, and blkdev_put() will pull in the ->open_mutex on the
- * block device and it's dependencies. Instead just flush the device
- * and let the caller do the final blkdev_put.
+ * write lock, and bdev_fput() on the block device will pull in the
+ * ->open_mutex on the block device and it's dependencies. Instead
+ * just flush the device and let the caller do the final bdev_release.
*/
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
- btrfs_scratch_superblocks(fs_info, device->bdev,
- device->name->str);
+ btrfs_scratch_superblocks(fs_info, device);
if (device->bdev) {
sync_blockdev(device->bdev);
invalidate_bdev(device->bdev);
}
}
- *bdev = device->bdev;
- *mode = device->mode;
+ *bdev_file = device->bdev_file;
synchronize_rcu();
btrfs_free_device(device);
@@ -2234,16 +2309,16 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
*/
if (cur_devices->num_devices == 0) {
list_del_init(&cur_devices->seed_list);
- ASSERT(cur_devices->opened == 1);
+ ASSERT(cur_devices->opened == 1, "opened=%d", cur_devices->opened);
cur_devices->opened--;
free_fs_devices(cur_devices);
}
-out:
+ ret = btrfs_commit_transaction(trans);
+
return ret;
error_undo:
- btrfs_reada_undo_remove_dev(device);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_add(&device->dev_alloc_list,
@@ -2251,7 +2326,7 @@ error_undo:
device->fs_devices->rw_devices++;
mutex_unlock(&fs_info->chunk_mutex);
}
- goto out;
+ return ret;
}
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
@@ -2327,16 +2402,15 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
mutex_unlock(&fs_devices->device_list_mutex);
- btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
- tgtdev->name->str);
+ btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev);
btrfs_close_bdev(tgtdev);
synchronize_rcu();
btrfs_free_device(tgtdev);
}
-/**
- * Populate args from device at path
+/*
+ * Populate args from device at path.
*
* @fs_info: the filesystem
* @args: the args to populate
@@ -2358,7 +2432,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
const char *path)
{
struct btrfs_super_block *disk_super;
- struct block_device *bdev;
+ struct file *bdev_file;
int ret;
if (!path || !path[0])
@@ -2375,10 +2449,13 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
return -ENOMEM;
}
- ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0,
- &bdev, &disk_super);
- if (ret)
+ ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0,
+ &bdev_file, &disk_super);
+ if (ret) {
+ btrfs_put_dev_args_from_path(args);
return ret;
+ }
+
args->devid = btrfs_stack_device_id(&disk_super->dev_item);
memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
if (btrfs_fs_incompat(fs_info, METADATA_UUID))
@@ -2386,7 +2463,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
else
memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
btrfs_release_disk_super(disk_super);
- blkdev_put(bdev, FMODE_READ);
+ bdev_fput(bdev_file);
return 0;
}
@@ -2429,29 +2506,23 @@ struct btrfs_device *btrfs_find_device_by_devspec(
return device;
}
-/*
- * does all the dirty work required for changing file system's UUID.
- */
-static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
+static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_fs_devices *old_devices;
struct btrfs_fs_devices *seed_devices;
- struct btrfs_super_block *disk_super = fs_info->super_copy;
- struct btrfs_device *device;
- u64 super_flags;
lockdep_assert_held(&uuid_mutex);
if (!fs_devices->seeding)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
/*
* Private copy of the seed devices, anchored at
* fs_info->fs_devices->seed_list
*/
- seed_devices = alloc_fs_devices(NULL, NULL);
+ seed_devices = alloc_fs_devices(NULL);
if (IS_ERR(seed_devices))
- return PTR_ERR(seed_devices);
+ return seed_devices;
/*
* It's necessary to retain a copy of the original seed fs_devices in
@@ -2462,7 +2533,7 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
old_devices = clone_fs_devices(fs_devices);
if (IS_ERR(old_devices)) {
kfree(seed_devices);
- return PTR_ERR(old_devices);
+ return old_devices;
}
list_add(&old_devices->fs_list, &fs_uuids);
@@ -2473,7 +2544,41 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&seed_devices->alloc_list);
mutex_init(&seed_devices->device_list_mutex);
- mutex_lock(&fs_devices->device_list_mutex);
+ return seed_devices;
+}
+
+/*
+ * Splice seed devices into the sprout fs_devices.
+ * Generate a new fsid for the sprouted read-write filesystem.
+ */
+static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info,
+ struct btrfs_fs_devices *seed_devices)
+{
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct btrfs_super_block *disk_super = fs_info->super_copy;
+ struct btrfs_device *device;
+ u64 super_flags;
+
+ /*
+ * We are updating the fsid, the thread leading to device_list_add()
+ * could race, so uuid_mutex is needed.
+ */
+ lockdep_assert_held(&uuid_mutex);
+
+ /*
+ * The threads listed below may traverse dev_list but can do that without
+ * device_list_mutex:
+ * - All device ops and balance - as we are in btrfs_exclop_start.
+ * - Various dev_list readers - are using RCU.
+ * - btrfs_ioctl_fitrim() - is using RCU.
+ *
+ * For-read threads as below are using device_list_mutex:
+ * - Readonly scrub btrfs_scrub_dev()
+ * - Readonly scrub btrfs_scrub_progress()
+ * - btrfs_get_dev_stats()
+ */
+ lockdep_assert_held(&fs_devices->device_list_mutex);
+
list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
synchronize_rcu);
list_for_each_entry(device, &seed_devices->devices, dev_list)
@@ -2489,13 +2594,10 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
generate_random_uuid(fs_devices->fsid);
memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
- mutex_unlock(&fs_devices->device_list_mutex);
super_flags = btrfs_super_flags(disk_super) &
~BTRFS_SUPER_FLAG_SEEDING;
btrfs_set_super_flags(disk_super, super_flags);
-
- return 0;
}
/*
@@ -2506,7 +2608,7 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
BTRFS_DEV_LOOKUP_ARGS(args);
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = fs_info->chunk_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_dev_item *dev_item;
struct btrfs_device *device;
@@ -2520,15 +2622,15 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
return -ENOMEM;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
- key.offset = 0;
key.type = BTRFS_DEV_ITEM_KEY;
+ key.offset = 0;
while (1) {
btrfs_reserve_chunk_metadata(trans, false);
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
btrfs_trans_release_chunk_metadata(trans);
if (ret < 0)
- goto error;
+ return ret;
leaf = path->nodes[0];
next_slot:
@@ -2537,7 +2639,7 @@ next_slot:
if (ret > 0)
break;
if (ret < 0)
- goto error;
+ return ret;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(path);
@@ -2561,62 +2663,61 @@ next_slot:
device = btrfs_find_device(fs_info->fs_devices, &args);
BUG_ON(!device); /* Logic error */
- if (device->fs_devices->seeding) {
+ if (device->fs_devices->seeding)
btrfs_set_device_generation(leaf, dev_item,
device->generation);
- btrfs_mark_buffer_dirty(leaf);
- }
path->slots[0]++;
goto next_slot;
}
- ret = 0;
-error:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
{
struct btrfs_root *root = fs_info->dev_root;
- struct request_queue *q;
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
- struct block_device *bdev;
+ struct file *bdev_file;
struct super_block *sb = fs_info->sb;
- struct rcu_string *name;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct btrfs_fs_devices *seed_devices = NULL;
u64 orig_super_total_bytes;
u64 orig_super_num_devices;
- int seeding_dev = 0;
int ret = 0;
+ bool seeding_dev = false;
bool locked = false;
if (sb_rdonly(sb) && !fs_devices->seeding)
return -EROFS;
- bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
- fs_info->bdev_holder);
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
+ bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE,
+ fs_info->sb, &fs_holder_ops);
+ if (IS_ERR(bdev_file))
+ return PTR_ERR(bdev_file);
+
+ if (!btrfs_check_device_zone_type(fs_info, file_bdev(bdev_file))) {
+ ret = -EINVAL;
+ goto error;
+ }
- if (!btrfs_check_device_zone_type(fs_info, bdev)) {
+ if (bdev_nr_bytes(file_bdev(bdev_file)) <= BTRFS_DEVICE_RANGE_RESERVED) {
ret = -EINVAL;
goto error;
}
if (fs_devices->seeding) {
- seeding_dev = 1;
+ seeding_dev = true;
down_write(&sb->s_umount);
mutex_lock(&uuid_mutex);
locked = true;
}
- sync_blockdev(bdev);
+ sync_blockdev(file_bdev(bdev_file));
rcu_read_lock();
list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
- if (device->bdev == bdev) {
+ if (device->bdev == file_bdev(bdev_file)) {
ret = -EEXIST;
rcu_read_unlock();
goto error;
@@ -2624,24 +2725,21 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
}
rcu_read_unlock();
- device = btrfs_alloc_device(fs_info, NULL, NULL);
+ device = btrfs_alloc_device(fs_info, NULL, NULL, device_path);
if (IS_ERR(device)) {
/* we can safely leave the fs_devices entry around */
ret = PTR_ERR(device);
goto error;
}
- name = rcu_string_strdup(device_path, GFP_KERNEL);
- if (!name) {
- ret = -ENOMEM;
- goto error_free_device;
- }
- rcu_assign_pointer(device->name, name);
-
device->fs_info = fs_info;
- device->bdev = bdev;
+ device->bdev_file = bdev_file;
+ device->bdev = file_bdev(bdev_file);
+ ret = lookup_bdev(device_path, &device->devt);
+ if (ret)
+ goto error_free_device;
- ret = btrfs_get_dev_zone_info(device);
+ ret = btrfs_get_dev_zone_info(device, false);
if (ret)
goto error_free_device;
@@ -2651,36 +2749,39 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
goto error_free_zone;
}
- q = bdev_get_queue(bdev);
set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
device->generation = trans->transid;
device->io_width = fs_info->sectorsize;
device->io_align = fs_info->sectorsize;
device->sector_size = fs_info->sectorsize;
device->total_bytes =
- round_down(bdev_nr_bytes(bdev), fs_info->sectorsize);
+ round_down(bdev_nr_bytes(device->bdev), fs_info->sectorsize);
device->disk_total_bytes = device->total_bytes;
device->commit_total_bytes = device->total_bytes;
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
- device->mode = FMODE_EXCL;
device->dev_stats_valid = 1;
- set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
+ set_blocksize(device->bdev_file, BTRFS_BDEV_BLOCKSIZE);
if (seeding_dev) {
- btrfs_clear_sb_rdonly(sb);
- ret = btrfs_prepare_sprout(fs_info);
- if (ret) {
+ /* GFP_KERNEL allocation must not be under device_list_mutex */
+ seed_devices = btrfs_init_sprout(fs_info);
+ if (IS_ERR(seed_devices)) {
+ ret = PTR_ERR(seed_devices);
btrfs_abort_transaction(trans, ret);
goto error_trans;
}
+ }
+
+ mutex_lock(&fs_devices->device_list_mutex);
+ if (seeding_dev) {
+ btrfs_setup_sprout(fs_info, seed_devices);
btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev,
device);
}
device->fs_devices = fs_devices;
- mutex_lock(&fs_devices->device_list_mutex);
mutex_lock(&fs_info->chunk_mutex);
list_add_rcu(&device->dev_list, &fs_devices->devices);
list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
@@ -2692,7 +2793,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
- if (!blk_queue_nonrot(q))
+ if (!bdev_nonrot(device->bdev))
fs_devices->rotating = true;
orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
@@ -2721,28 +2822,28 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
mutex_lock(&fs_info->chunk_mutex);
ret = init_first_rw_device(trans);
mutex_unlock(&fs_info->chunk_mutex);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
}
}
ret = btrfs_add_dev_item(trans, device);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
}
if (seeding_dev) {
ret = btrfs_finish_sprout(trans);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
}
/*
* fs_devices now represents the newly sprouted filesystem and
- * its fsid has been changed by btrfs_prepare_sprout
+ * its fsid has been changed by btrfs_sprout_splice().
*/
btrfs_sysfs_update_sprout_fsid(fs_devices);
}
@@ -2779,7 +2880,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
* We can ignore the return value as it typically returns -EINVAL and
* only succeeds if the device was an alien.
*/
- btrfs_forget_devices(device_path);
+ btrfs_forget_devices(device->devt);
/* Update ctime/mtime for blkid or udev */
update_dev_time(device_path);
@@ -2805,8 +2906,6 @@ error_sysfs:
mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
error_trans:
- if (seeding_dev)
- btrfs_set_sb_rdonly(sb);
if (trans)
btrfs_end_transaction(trans);
error_free_zone:
@@ -2814,7 +2913,7 @@ error_free_zone:
error_free_device:
btrfs_free_device(device);
error:
- blkdev_put(bdev, FMODE_EXCL);
+ bdev_fput(bdev_file);
if (locked) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
@@ -2826,7 +2925,7 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device)
{
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = device->fs_info->chunk_root;
struct btrfs_dev_item *dev_item;
struct extent_buffer *leaf;
@@ -2842,12 +2941,10 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0)
- goto out;
+ return ret;
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
+ if (ret > 0)
+ return -ENOENT;
leaf = path->nodes[0];
dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
@@ -2861,10 +2958,6 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
btrfs_device_get_disk_total_bytes(device));
btrfs_set_device_bytes_used(leaf, dev_item,
btrfs_device_get_bytes_used(device));
- btrfs_mark_buffer_dirty(leaf);
-
-out:
- btrfs_free_path(path);
return ret;
}
@@ -2895,6 +2988,7 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
btrfs_set_super_total_bytes(super_copy,
round_down(old_total + diff, fs_info->sectorsize));
device->fs_devices->total_rw_bytes += diff;
+ atomic64_add(diff, &fs_info->free_chunk_space);
btrfs_device_set_total_bytes(device, new_size);
btrfs_device_set_disk_total_bytes(device, new_size);
@@ -2916,7 +3010,7 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = fs_info->chunk_root;
int ret;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
path = btrfs_alloc_path();
@@ -2924,25 +3018,26 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
return -ENOMEM;
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
- key.offset = chunk_offset;
key.type = BTRFS_CHUNK_ITEM_KEY;
+ key.offset = chunk_offset;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
- goto out;
- else if (ret > 0) { /* Logic error or corruption */
- btrfs_handle_fs_error(fs_info, -ENOENT,
- "Failed lookup while freeing chunk.");
- ret = -ENOENT;
- goto out;
+ return ret;
+ if (unlikely(ret > 0)) {
+ /* Logic error or corruption */
+ btrfs_err(fs_info, "failed to lookup chunk %llu when freeing",
+ chunk_offset);
+ btrfs_abort_transaction(trans, -ENOENT);
+ return -EUCLEAN;
}
ret = btrfs_del_item(trans, root, path);
- if (ret < 0)
- btrfs_handle_fs_error(fs_info, ret,
- "Failed to delete chunk item.");
-out:
- btrfs_free_path(path);
+ if (unlikely(ret < 0)) {
+ btrfs_err(fs_info, "failed to delete chunk %llu item", chunk_offset);
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
return ret;
}
@@ -2992,44 +3087,118 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
return ret;
}
+struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length)
+{
+ struct rb_node *node = fs_info->mapping_tree.rb_root.rb_node;
+ struct rb_node *prev = NULL;
+ struct rb_node *orig_prev;
+ struct btrfs_chunk_map *map;
+ struct btrfs_chunk_map *prev_map = NULL;
+
+ while (node) {
+ map = rb_entry(node, struct btrfs_chunk_map, rb_node);
+ prev = node;
+ prev_map = map;
+
+ if (logical < map->start) {
+ node = node->rb_left;
+ } else if (logical >= map->start + map->chunk_len) {
+ node = node->rb_right;
+ } else {
+ refcount_inc(&map->refs);
+ return map;
+ }
+ }
+
+ if (!prev)
+ return NULL;
+
+ orig_prev = prev;
+ while (prev && logical >= prev_map->start + prev_map->chunk_len) {
+ prev = rb_next(prev);
+ prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node);
+ }
+
+ if (!prev) {
+ prev = orig_prev;
+ prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node);
+ while (prev && logical < prev_map->start) {
+ prev = rb_prev(prev);
+ prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node);
+ }
+ }
+
+ if (prev) {
+ u64 end = logical + length;
+
+ /*
+ * Caller can pass a U64_MAX length when it wants to get any
+ * chunk starting at an offset of 'logical' or higher, so deal
+ * with underflow by resetting the end offset to U64_MAX.
+ */
+ if (end < logical)
+ end = U64_MAX;
+
+ if (end > prev_map->start &&
+ logical < prev_map->start + prev_map->chunk_len) {
+ refcount_inc(&prev_map->refs);
+ return prev_map;
+ }
+ }
+
+ return NULL;
+}
+
+struct btrfs_chunk_map *btrfs_find_chunk_map(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length)
+{
+ struct btrfs_chunk_map *map;
+
+ read_lock(&fs_info->mapping_tree_lock);
+ map = btrfs_find_chunk_map_nolock(fs_info, logical, length);
+ read_unlock(&fs_info->mapping_tree_lock);
+
+ return map;
+}
+
/*
- * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
+ * Find the mapping containing the given logical extent.
+ *
* @logical: Logical block offset in bytes.
* @length: Length of extent in bytes.
*
* Return: Chunk mapping or ERR_PTR.
*/
-struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
- u64 logical, u64 length)
+struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length)
{
- struct extent_map_tree *em_tree;
- struct extent_map *em;
+ struct btrfs_chunk_map *map;
- em_tree = &fs_info->mapping_tree;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, logical, length);
- read_unlock(&em_tree->lock);
+ map = btrfs_find_chunk_map(fs_info, logical, length);
- if (!em) {
- btrfs_crit(fs_info, "unable to find logical %llu length %llu",
+ if (unlikely(!map)) {
+ btrfs_crit(fs_info,
+ "unable to find chunk map for logical %llu length %llu",
logical, length);
return ERR_PTR(-EINVAL);
}
- if (em->start > logical || em->start + em->len < logical) {
+ if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) {
btrfs_crit(fs_info,
- "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
- logical, length, em->start, em->start + em->len);
- free_extent_map(em);
+ "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
+ logical, logical + length, map->start,
+ map->start + map->chunk_len);
+ btrfs_free_chunk_map(map);
return ERR_PTR(-EINVAL);
}
- /* callers are responsible for dropping em's ref. */
- return em;
+ /* Callers are responsible for dropping the reference. */
+ return map;
}
static int remove_chunk_item(struct btrfs_trans_handle *trans,
- struct map_lookup *map, u64 chunk_offset)
+ struct btrfs_chunk_map *map, u64 chunk_offset)
{
int i;
@@ -3054,23 +3223,22 @@ static int remove_chunk_item(struct btrfs_trans_handle *trans,
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct extent_map *em;
- struct map_lookup *map;
+ struct btrfs_chunk_map *map;
u64 dev_extent_len = 0;
int i, ret = 0;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
- em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
- if (IS_ERR(em)) {
+ map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
+ if (IS_ERR(map)) {
/*
* This is a logic error, but we don't want to just rely on the
* user having built with ASSERT enabled, so if ASSERT doesn't
* do anything we still error out.
*/
- ASSERT(0);
- return PTR_ERR(em);
+ DEBUG_WARN("errr %ld reading chunk map at offset %llu",
+ PTR_ERR(map), chunk_offset);
+ return PTR_ERR(map);
}
- map = em->map_lookup;
/*
* First delete the device extent items from the devices btree.
@@ -3088,7 +3256,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
ret = btrfs_free_dev_extent(trans, device,
map->stripes[i].physical,
&dev_extent_len);
- if (ret) {
+ if (unlikely(ret)) {
mutex_unlock(&fs_devices->device_list_mutex);
btrfs_abort_transaction(trans, ret);
goto out;
@@ -3100,6 +3268,12 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
device->bytes_used - dev_extent_len);
atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
btrfs_clear_space_info_full(fs_info);
+
+ if (list_empty(&device->post_commit_list)) {
+ list_add_tail(&device->post_commit_list,
+ &trans->transaction->dev_update_list);
+ }
+
mutex_unlock(&fs_info->chunk_mutex);
}
}
@@ -3149,8 +3323,16 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
if (ret == -ENOSPC) {
const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
struct btrfs_block_group *sys_bg;
+ struct btrfs_space_info *space_info;
+
+ space_info = btrfs_find_space_info(fs_info, sys_flags);
+ if (unlikely(!space_info)) {
+ ret = -EINVAL;
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
- sys_bg = btrfs_create_chunk(trans, sys_flags);
+ sys_bg = btrfs_create_chunk(trans, space_info, sys_flags);
if (IS_ERR(sys_bg)) {
ret = PTR_ERR(sys_bg);
btrfs_abort_transaction(trans, ret);
@@ -3158,26 +3340,26 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
}
ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = remove_chunk_item(trans, map, chunk_offset);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
- } else if (ret) {
+ } else if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
- trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
+ trace_btrfs_chunk_free(fs_info, map, chunk_offset, map->chunk_len);
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
- if (ret) {
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -3192,8 +3374,8 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
*/
btrfs_trans_release_chunk_metadata(trans);
- ret = btrfs_remove_block_group(trans, chunk_offset, em);
- if (ret) {
+ ret = btrfs_remove_block_group(trans, map);
+ if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -3204,11 +3386,12 @@ out:
trans->removing_chunk = false;
}
/* once for us */
- free_extent_map(em);
+ btrfs_free_chunk_map(map);
return ret;
}
-int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
+int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+ bool verbose)
{
struct btrfs_root *root = fs_info->chunk_root;
struct btrfs_trans_handle *trans;
@@ -3216,6 +3399,12 @@ int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
u64 length;
int ret;
+ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+ btrfs_err(fs_info,
+ "relocate: not supported on extent tree v2 yet");
+ return -EINVAL;
+ }
+
/*
* Prevent races with automatic removal of unused block groups.
* After we relocate and before we remove the chunk with offset
@@ -3232,10 +3421,17 @@ int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
/* step one, relocate all the extents inside this chunk */
btrfs_scrub_pause(fs_info);
- ret = btrfs_relocate_block_group(fs_info, chunk_offset);
+ ret = btrfs_relocate_block_group(fs_info, chunk_offset, true);
btrfs_scrub_continue(fs_info);
- if (ret)
+ if (ret) {
+ /*
+ * If we had a transaction abort, stop all running scrubs.
+ * See transaction.c:cleanup_transaction() why we do it here.
+ */
+ if (BTRFS_FS_ERROR(fs_info))
+ btrfs_scrub_cancel(fs_info);
return ret;
+ }
block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
if (!block_group)
@@ -3278,7 +3474,7 @@ int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *chunk_root = fs_info->chunk_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_chunk *chunk;
struct btrfs_key key;
@@ -3294,24 +3490,34 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
again:
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
- key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
+ key.offset = (u64)-1;
while (1) {
mutex_lock(&fs_info->reclaim_bgs_lock);
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0) {
mutex_unlock(&fs_info->reclaim_bgs_lock);
- goto error;
+ return ret;
+ }
+ if (unlikely(ret == 0)) {
+ /*
+ * On the first search we would find chunk tree with
+ * offset -1, which is not possible. On subsequent
+ * loops this would find an existing item on an invalid
+ * offset (one less than the previous one, wrong
+ * alignment and size).
+ */
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
+ return -EUCLEAN;
}
- BUG_ON(ret == 0); /* Corruption */
ret = btrfs_previous_item(chunk_root, path, key.objectid,
key.type);
if (ret)
mutex_unlock(&fs_info->reclaim_bgs_lock);
if (ret < 0)
- goto error;
+ return ret;
if (ret > 0)
break;
@@ -3324,7 +3530,8 @@ again:
btrfs_release_path(path);
if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
- ret = btrfs_relocate_chunk(fs_info, found_key.offset);
+ ret = btrfs_relocate_chunk(fs_info, found_key.offset,
+ true);
if (ret == -ENOSPC)
failed++;
else
@@ -3344,8 +3551,6 @@ again:
} else if (WARN_ON(failed && retried)) {
ret = -ENOSPC;
}
-error:
- btrfs_free_path(path);
return ret;
}
@@ -3391,6 +3596,44 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
return 0;
}
+static void btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
+ const struct btrfs_disk_balance_args *disk)
+{
+ memset(cpu, 0, sizeof(*cpu));
+
+ cpu->profiles = le64_to_cpu(disk->profiles);
+ cpu->usage = le64_to_cpu(disk->usage);
+ cpu->devid = le64_to_cpu(disk->devid);
+ cpu->pstart = le64_to_cpu(disk->pstart);
+ cpu->pend = le64_to_cpu(disk->pend);
+ cpu->vstart = le64_to_cpu(disk->vstart);
+ cpu->vend = le64_to_cpu(disk->vend);
+ cpu->target = le64_to_cpu(disk->target);
+ cpu->flags = le64_to_cpu(disk->flags);
+ cpu->limit = le64_to_cpu(disk->limit);
+ cpu->stripes_min = le32_to_cpu(disk->stripes_min);
+ cpu->stripes_max = le32_to_cpu(disk->stripes_max);
+}
+
+static void btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
+ const struct btrfs_balance_args *cpu)
+{
+ memset(disk, 0, sizeof(*disk));
+
+ disk->profiles = cpu_to_le64(cpu->profiles);
+ disk->usage = cpu_to_le64(cpu->usage);
+ disk->devid = cpu_to_le64(cpu->devid);
+ disk->pstart = cpu_to_le64(cpu->pstart);
+ disk->pend = cpu_to_le64(cpu->pend);
+ disk->vstart = cpu_to_le64(cpu->vstart);
+ disk->vend = cpu_to_le64(cpu->vend);
+ disk->target = cpu_to_le64(cpu->target);
+ disk->flags = cpu_to_le64(cpu->flags);
+ disk->limit = cpu_to_le64(cpu->limit);
+ disk->stripes_min = cpu_to_le32(cpu->stripes_min);
+ disk->stripes_max = cpu_to_le32(cpu->stripes_max);
+}
+
static int insert_balance_item(struct btrfs_fs_info *fs_info,
struct btrfs_balance_control *bctl)
{
@@ -3433,10 +3676,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
btrfs_set_balance_meta(leaf, item, &disk_bargs);
btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
btrfs_set_balance_sys(leaf, item, &disk_bargs);
-
btrfs_set_balance_flags(leaf, item, bctl->flags);
-
- btrfs_mark_buffer_dirty(leaf);
out:
btrfs_free_path(path);
err = btrfs_commit_transaction(trans);
@@ -3535,7 +3775,7 @@ static void reset_balance_state(struct btrfs_fs_info *fs_info)
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
int ret;
- BUG_ON(!fs_info->balance_ctl);
+ ASSERT(fs_info->balance_ctl);
spin_lock(&fs_info->balance_lock);
fs_info->balance_ctl = NULL;
@@ -3551,26 +3791,25 @@ static void reset_balance_state(struct btrfs_fs_info *fs_info)
* Balance filters. Return 1 if chunk should be filtered out
* (should not be balanced).
*/
-static int chunk_profiles_filter(u64 chunk_type,
- struct btrfs_balance_args *bargs)
+static bool chunk_profiles_filter(u64 chunk_type, struct btrfs_balance_args *bargs)
{
chunk_type = chunk_to_extended(chunk_type) &
BTRFS_EXTENDED_PROFILE_MASK;
if (bargs->profiles & chunk_type)
- return 0;
+ return false;
- return 1;
+ return true;
}
-static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
- struct btrfs_balance_args *bargs)
+static bool chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+ struct btrfs_balance_args *bargs)
{
struct btrfs_block_group *cache;
u64 chunk_used;
u64 user_thresh_min;
u64 user_thresh_max;
- int ret = 1;
+ bool ret = true;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
chunk_used = cache->used;
@@ -3578,30 +3817,28 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off
if (bargs->usage_min == 0)
user_thresh_min = 0;
else
- user_thresh_min = div_factor_fine(cache->length,
- bargs->usage_min);
+ user_thresh_min = mult_perc(cache->length, bargs->usage_min);
if (bargs->usage_max == 0)
user_thresh_max = 1;
else if (bargs->usage_max > 100)
user_thresh_max = cache->length;
else
- user_thresh_max = div_factor_fine(cache->length,
- bargs->usage_max);
+ user_thresh_max = mult_perc(cache->length, bargs->usage_max);
if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
- ret = 0;
+ ret = false;
btrfs_put_block_group(cache);
return ret;
}
-static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
- u64 chunk_offset, struct btrfs_balance_args *bargs)
+static bool chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+ struct btrfs_balance_args *bargs)
{
struct btrfs_block_group *cache;
u64 chunk_used, user_thresh;
- int ret = 1;
+ bool ret = true;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
chunk_used = cache->used;
@@ -3611,18 +3848,17 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
else if (bargs->usage > 100)
user_thresh = cache->length;
else
- user_thresh = div_factor_fine(cache->length, bargs->usage);
+ user_thresh = mult_perc(cache->length, bargs->usage);
if (chunk_used < user_thresh)
- ret = 0;
+ ret = false;
btrfs_put_block_group(cache);
return ret;
}
-static int chunk_devid_filter(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk,
- struct btrfs_balance_args *bargs)
+static bool chunk_devid_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk,
+ struct btrfs_balance_args *bargs)
{
struct btrfs_stripe *stripe;
int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
@@ -3631,10 +3867,10 @@ static int chunk_devid_filter(struct extent_buffer *leaf,
for (i = 0; i < num_stripes; i++) {
stripe = btrfs_stripe_nr(chunk, i);
if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static u64 calc_data_stripes(u64 type, int num_stripes)
@@ -3647,9 +3883,8 @@ static u64 calc_data_stripes(u64 type, int num_stripes)
}
/* [pstart, pend) */
-static int chunk_drange_filter(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk,
- struct btrfs_balance_args *bargs)
+static bool chunk_drange_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk,
+ struct btrfs_balance_args *bargs)
{
struct btrfs_stripe *stripe;
int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
@@ -3660,7 +3895,7 @@ static int chunk_drange_filter(struct extent_buffer *leaf,
int i;
if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
- return 0;
+ return false;
type = btrfs_chunk_type(leaf, chunk);
factor = calc_data_stripes(type, num_stripes);
@@ -3676,56 +3911,53 @@ static int chunk_drange_filter(struct extent_buffer *leaf,
if (stripe_offset < bargs->pend &&
stripe_offset + stripe_length > bargs->pstart)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
/* [vstart, vend) */
-static int chunk_vrange_filter(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk,
- u64 chunk_offset,
- struct btrfs_balance_args *bargs)
+static bool chunk_vrange_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk,
+ u64 chunk_offset, struct btrfs_balance_args *bargs)
{
if (chunk_offset < bargs->vend &&
chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
/* at least part of the chunk is inside this vrange */
- return 0;
+ return false;
- return 1;
+ return true;
}
-static int chunk_stripes_range_filter(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk,
- struct btrfs_balance_args *bargs)
+static bool chunk_stripes_range_filter(struct extent_buffer *leaf,
+ struct btrfs_chunk *chunk,
+ struct btrfs_balance_args *bargs)
{
int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
if (bargs->stripes_min <= num_stripes
&& num_stripes <= bargs->stripes_max)
- return 0;
+ return false;
- return 1;
+ return true;
}
-static int chunk_soft_convert_filter(u64 chunk_type,
- struct btrfs_balance_args *bargs)
+static bool chunk_soft_convert_filter(u64 chunk_type, struct btrfs_balance_args *bargs)
{
if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
- return 0;
+ return false;
chunk_type = chunk_to_extended(chunk_type) &
BTRFS_EXTENDED_PROFILE_MASK;
if (bargs->target == chunk_type)
- return 1;
+ return true;
- return 0;
+ return false;
}
-static int should_balance_chunk(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk, u64 chunk_offset)
+static bool should_balance_chunk(struct extent_buffer *leaf, struct btrfs_chunk *chunk,
+ u64 chunk_offset)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
@@ -3735,7 +3967,7 @@ static int should_balance_chunk(struct extent_buffer *leaf,
/* type filter */
if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
(bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
- return 0;
+ return false;
}
if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
@@ -3748,46 +3980,46 @@ static int should_balance_chunk(struct extent_buffer *leaf,
/* profiles filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
chunk_profiles_filter(chunk_type, bargs)) {
- return 0;
+ return false;
}
/* usage filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
chunk_usage_filter(fs_info, chunk_offset, bargs)) {
- return 0;
+ return false;
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
- return 0;
+ return false;
}
/* devid filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
chunk_devid_filter(leaf, chunk, bargs)) {
- return 0;
+ return false;
}
/* drange filter, makes sense only with devid filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
chunk_drange_filter(leaf, chunk, bargs)) {
- return 0;
+ return false;
}
/* vrange filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
- return 0;
+ return false;
}
/* stripes filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
chunk_stripes_range_filter(leaf, chunk, bargs)) {
- return 0;
+ return false;
}
/* soft profile changing mode */
if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
chunk_soft_convert_filter(chunk_type, bargs)) {
- return 0;
+ return false;
}
/*
@@ -3795,7 +4027,7 @@ static int should_balance_chunk(struct extent_buffer *leaf,
*/
if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
if (bargs->limit == 0)
- return 0;
+ return false;
else
bargs->limit--;
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
@@ -3805,12 +4037,12 @@ static int should_balance_chunk(struct extent_buffer *leaf,
* about the count of all chunks that satisfy the filters.
*/
if (bargs->limit_max == 0)
- return 0;
+ return false;
else
bargs->limit_max--;
}
- return 1;
+ return true;
}
static int __btrfs_balance(struct btrfs_fs_info *fs_info)
@@ -3819,7 +4051,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
struct btrfs_root *chunk_root = fs_info->chunk_root;
u64 chunk_type;
struct btrfs_chunk *chunk;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
struct extent_buffer *leaf;
@@ -3857,8 +4089,8 @@ again:
bctl->sys.limit = limit_sys;
}
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
- key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
+ key.offset = (u64)-1;
while (1) {
if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
@@ -3962,7 +4194,7 @@ again:
}
}
- ret = btrfs_relocate_chunk(fs_info, found_key.offset);
+ ret = btrfs_relocate_chunk(fs_info, found_key.offset, true);
mutex_unlock(&fs_info->reclaim_bgs_lock);
if (ret == -ENOSPC) {
enospc_errors++;
@@ -3990,7 +4222,6 @@ loop:
goto again;
}
error:
- btrfs_free_path(path);
if (enospc_errors) {
btrfs_info(fs_info, "%d enospc errors during balance",
enospc_errors);
@@ -4001,12 +4232,13 @@ error:
return ret;
}
-/**
- * alloc_profile_is_valid - see if a given profile is valid and reduced
- * @flags: profile to validate
- * @extended: if true @flags is treated as an extended profile
+/*
+ * See if a given profile is valid and reduced.
+ *
+ * @flags: profile to validate
+ * @extended: if true @flags is treated as an extended profile
*/
-static int alloc_profile_is_valid(u64 flags, int extended)
+static int alloc_profile_is_valid(u64 flags, bool extended)
{
u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
BTRFS_BLOCK_GROUP_PROFILE_MASK);
@@ -4024,14 +4256,6 @@ static int alloc_profile_is_valid(u64 flags, int extended)
return has_single_bit_set(flags);
}
-static inline int balance_need_close(struct btrfs_fs_info *fs_info)
-{
- /* cancel requested || normal exit path */
- return atomic_read(&fs_info->balance_cancel_req) ||
- (atomic_read(&fs_info->balance_pause_req) == 0 &&
- atomic_read(&fs_info->balance_cancel_req) == 0);
-}
-
/*
* Validate target profile against allowed profiles and return true if it's OK.
* Otherwise print the error message and return false.
@@ -4043,13 +4267,6 @@ static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
return true;
- if (fs_info->sectorsize < PAGE_SIZE &&
- bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- btrfs_err(fs_info,
- "RAID56 is not yet supported for sectorsize %u with page size %lu",
- fs_info->sectorsize, PAGE_SIZE);
- return false;
- }
/* Profile is valid and does not have bits outside of the allowed set */
if (alloc_profile_is_valid(bargs->target, 1) &&
(bargs->target & ~allowed) == 0)
@@ -4162,7 +4379,7 @@ static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
{
u32 size_buf = 1024;
char tmp_buf[192] = {'\0'};
- char *buf;
+ char AUTO_KFREE(buf);
char *bp;
u32 size_bp = size_buf;
int ret;
@@ -4210,12 +4427,10 @@ out_overflow:
btrfs_info(fs_info, "balance: %s %s",
(bctl->flags & BTRFS_BALANCE_RESUME) ?
"resume" : "start", buf);
-
- kfree(buf);
}
/*
- * Should be called with balance mutexe held
+ * Should be called with balance mutex held
*/
int btrfs_balance(struct btrfs_fs_info *fs_info,
struct btrfs_balance_control *bctl,
@@ -4228,6 +4443,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
u64 num_devices;
unsigned seq;
bool reducing_redundancy;
+ bool paused = false;
int i;
if (btrfs_fs_closing(fs_info) ||
@@ -4355,8 +4571,11 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
ret = __btrfs_balance(fs_info);
mutex_lock(&fs_info->balance_mutex);
- if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
+ if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) {
btrfs_info(fs_info, "balance: paused");
+ btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED);
+ paused = true;
+ }
/*
* Balance can be canceled by:
*
@@ -4384,8 +4603,8 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
btrfs_update_ioctl_balance_args(fs_info, bargs);
}
- if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
- balance_need_close(fs_info)) {
+ /* We didn't pause, we can clean everything up. */
+ if (!paused) {
reset_balance_state(fs_info);
btrfs_exclop_finish(fs_info);
}
@@ -4408,6 +4627,8 @@ static int balance_kthread(void *data)
struct btrfs_fs_info *fs_info = data;
int ret = 0;
+ guard(super_write)(fs_info->sb);
+
mutex_lock(&fs_info->balance_mutex);
if (fs_info->balance_ctl)
ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
@@ -4432,6 +4653,11 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
return 0;
}
+ spin_lock(&fs_info->super_lock);
+ ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED,
+ "exclusive_operation=%d", fs_info->exclusive_operation);
+ fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
+ spin_unlock(&fs_info->super_lock);
/*
* A ro->rw remount sequence should continue with the paused balance
* regardless of who pauses it, system or the user as of now, so set
@@ -4450,7 +4676,7 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
struct btrfs_balance_control *bctl;
struct btrfs_balance_item *item;
struct btrfs_disk_balance_args disk_bargs;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key key;
int ret;
@@ -4465,17 +4691,14 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret > 0) { /* ret = -ENOENT; */
- ret = 0;
- goto out;
+ return 0;
}
bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
- if (!bctl) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!bctl)
+ return -ENOMEM;
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
@@ -4500,7 +4723,7 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
* is in a paused state and must have fs_info::balance_ctl properly
* set up.
*/
- if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
+ if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED))
btrfs_warn(fs_info,
"balance: cannot set exclusive op status, resume manually");
@@ -4512,8 +4735,6 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
fs_info->balance_ctl = bctl;
spin_unlock(&fs_info->balance_lock);
mutex_unlock(&fs_info->balance_mutex);
-out:
- btrfs_free_path(path);
return ret;
}
@@ -4589,190 +4810,12 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
}
}
- BUG_ON(fs_info->balance_ctl ||
- test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
+ ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
atomic_dec(&fs_info->balance_cancel_req);
mutex_unlock(&fs_info->balance_mutex);
return 0;
}
-int btrfs_uuid_scan_kthread(void *data)
-{
- struct btrfs_fs_info *fs_info = data;
- struct btrfs_root *root = fs_info->tree_root;
- struct btrfs_key key;
- struct btrfs_path *path = NULL;
- int ret = 0;
- struct extent_buffer *eb;
- int slot;
- struct btrfs_root_item root_item;
- u32 item_size;
- struct btrfs_trans_handle *trans = NULL;
- bool closing = false;
-
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
-
- key.objectid = 0;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = 0;
-
- while (1) {
- if (btrfs_fs_closing(fs_info)) {
- closing = true;
- break;
- }
- ret = btrfs_search_forward(root, &key, path,
- BTRFS_OLDEST_GENERATION);
- if (ret) {
- if (ret > 0)
- ret = 0;
- break;
- }
-
- if (key.type != BTRFS_ROOT_ITEM_KEY ||
- (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
- key.objectid != BTRFS_FS_TREE_OBJECTID) ||
- key.objectid > BTRFS_LAST_FREE_OBJECTID)
- goto skip;
-
- eb = path->nodes[0];
- slot = path->slots[0];
- item_size = btrfs_item_size_nr(eb, slot);
- if (item_size < sizeof(root_item))
- goto skip;
-
- read_extent_buffer(eb, &root_item,
- btrfs_item_ptr_offset(eb, slot),
- (int)sizeof(root_item));
- if (btrfs_root_refs(&root_item) == 0)
- goto skip;
-
- if (!btrfs_is_empty_uuid(root_item.uuid) ||
- !btrfs_is_empty_uuid(root_item.received_uuid)) {
- if (trans)
- goto update_tree;
-
- btrfs_release_path(path);
- /*
- * 1 - subvol uuid item
- * 1 - received_subvol uuid item
- */
- trans = btrfs_start_transaction(fs_info->uuid_root, 2);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- break;
- }
- continue;
- } else {
- goto skip;
- }
-update_tree:
- btrfs_release_path(path);
- if (!btrfs_is_empty_uuid(root_item.uuid)) {
- ret = btrfs_uuid_tree_add(trans, root_item.uuid,
- BTRFS_UUID_KEY_SUBVOL,
- key.objectid);
- if (ret < 0) {
- btrfs_warn(fs_info, "uuid_tree_add failed %d",
- ret);
- break;
- }
- }
-
- if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
- ret = btrfs_uuid_tree_add(trans,
- root_item.received_uuid,
- BTRFS_UUID_KEY_RECEIVED_SUBVOL,
- key.objectid);
- if (ret < 0) {
- btrfs_warn(fs_info, "uuid_tree_add failed %d",
- ret);
- break;
- }
- }
-
-skip:
- btrfs_release_path(path);
- if (trans) {
- ret = btrfs_end_transaction(trans);
- trans = NULL;
- if (ret)
- break;
- }
-
- if (key.offset < (u64)-1) {
- key.offset++;
- } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
- key.offset = 0;
- key.type = BTRFS_ROOT_ITEM_KEY;
- } else if (key.objectid < (u64)-1) {
- key.offset = 0;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.objectid++;
- } else {
- break;
- }
- cond_resched();
- }
-
-out:
- btrfs_free_path(path);
- if (trans && !IS_ERR(trans))
- btrfs_end_transaction(trans);
- if (ret)
- btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
- else if (!closing)
- set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
- up(&fs_info->uuid_tree_rescan_sem);
- return 0;
-}
-
-int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_trans_handle *trans;
- struct btrfs_root *tree_root = fs_info->tree_root;
- struct btrfs_root *uuid_root;
- struct task_struct *task;
- int ret;
-
- /*
- * 1 - root node
- * 1 - root item
- */
- trans = btrfs_start_transaction(tree_root, 2);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
-
- uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
- if (IS_ERR(uuid_root)) {
- ret = PTR_ERR(uuid_root);
- btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans);
- return ret;
- }
-
- fs_info->uuid_root = uuid_root;
-
- ret = btrfs_commit_transaction(trans);
- if (ret)
- return ret;
-
- down(&fs_info->uuid_tree_rescan_sem);
- task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
- if (IS_ERR(task)) {
- /* fs_info->update_uuid_tree_gen remains 0 in all error case */
- btrfs_warn(fs_info, "failed to start uuid_scan task");
- up(&fs_info->uuid_tree_rescan_sem);
- return PTR_ERR(task);
- }
-
- return 0;
-}
-
/*
* shrinking a device means finding all of the device extents past
* the new size, and then following the back refs to the chunks.
@@ -4798,6 +4841,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
u64 old_size = btrfs_device_get_total_bytes(device);
u64 diff;
u64 start;
+ u64 free_diff = 0;
new_size = round_down(new_size, fs_info->sectorsize);
start = new_size;
@@ -4823,7 +4867,19 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
btrfs_device_set_total_bytes(device, new_size);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
device->fs_devices->total_rw_bytes -= diff;
- atomic64_sub(diff, &fs_info->free_chunk_space);
+
+ /*
+ * The new free_chunk_space is new_size - used, so we have to
+ * subtract the delta of the old free_chunk_space which included
+ * old_size - used. If used > new_size then just subtract this
+ * entire device's free space.
+ */
+ if (device->bytes_used < new_size)
+ free_diff = (old_size - device->bytes_used) -
+ (new_size - device->bytes_used);
+ else
+ free_diff = old_size - device->bytes_used;
+ atomic64_sub(free_diff, &fs_info->free_chunk_space);
}
/*
@@ -4843,8 +4899,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
again:
key.objectid = device->devid;
- key.offset = (u64)-1;
key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = (u64)-1;
do {
mutex_lock(&fs_info->reclaim_bgs_lock);
@@ -4898,7 +4954,7 @@ again:
goto done;
}
- ret = btrfs_relocate_chunk(fs_info, chunk_offset);
+ ret = btrfs_relocate_chunk(fs_info, chunk_offset, true);
mutex_unlock(&fs_info->reclaim_bgs_lock);
if (ret == -ENOSPC) {
failed++;
@@ -4930,8 +4986,8 @@ again:
mutex_lock(&fs_info->chunk_mutex);
/* Clear all state bits beyond the shrunk device size */
- clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
- CHUNK_STATE_MASK);
+ btrfs_clear_extent_bit(&device->alloc_state, new_size, (u64)-1,
+ CHUNK_STATE_MASK, NULL);
btrfs_device_set_disk_total_bytes(device, new_size);
if (list_empty(&device->post_commit_list))
@@ -4947,7 +5003,7 @@ again:
/* Now btrfs_update_device() will change the on-disk size. */
ret = btrfs_update_device(trans, device);
btrfs_trans_release_chunk_metadata(trans);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
} else {
@@ -4958,9 +5014,10 @@ done:
if (ret) {
mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_total_bytes(device, old_size);
- if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
device->fs_devices->total_rw_bytes += diff;
- atomic64_add(diff, &fs_info->free_chunk_space);
+ atomic64_add(free_diff, &fs_info->free_chunk_space);
+ }
mutex_unlock(&fs_info->chunk_mutex);
}
return ret;
@@ -5057,37 +5114,29 @@ struct alloc_chunk_ctl {
u64 stripe_size;
u64 chunk_size;
int ndevs;
+ /* Space_info the block group is going to belong. */
+ struct btrfs_space_info *space_info;
};
static void init_alloc_chunk_ctl_policy_regular(
struct btrfs_fs_devices *fs_devices,
struct alloc_chunk_ctl *ctl)
{
- u64 type = ctl->type;
+ struct btrfs_space_info *space_info;
- if (type & BTRFS_BLOCK_GROUP_DATA) {
- ctl->max_stripe_size = SZ_1G;
- ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
- } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
- /* For larger filesystems, use larger metadata chunks */
- if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
- ctl->max_stripe_size = SZ_1G;
- else
- ctl->max_stripe_size = SZ_256M;
- ctl->max_chunk_size = ctl->max_stripe_size;
- } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
- ctl->max_stripe_size = SZ_32M;
- ctl->max_chunk_size = 2 * ctl->max_stripe_size;
- ctl->devs_max = min_t(int, ctl->devs_max,
- BTRFS_MAX_DEVS_SYS_CHUNK);
- } else {
- BUG();
- }
+ space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type);
+ ASSERT(space_info);
+
+ ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
+ ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G);
+
+ if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
+ ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
/* We don't want a chunk larger than 10% of writable space */
- ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
+ ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10),
ctl->max_chunk_size);
- ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
+ ctl->dev_extent_min = btrfs_stripe_nr_to_offset(ctl->dev_stripes);
}
static void init_alloc_chunk_ctl_policy_zoned(
@@ -5116,7 +5165,7 @@ static void init_alloc_chunk_ctl_policy_zoned(
}
/* We don't want a chunk larger than 10% of writable space */
- limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1),
+ limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10),
zone_size),
min_chunk_size);
ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
@@ -5140,14 +5189,15 @@ static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
ctl->ndevs = 0;
switch (fs_devices->chunk_alloc_policy) {
+ default:
+ btrfs_warn_unknown_chunk_allocation(fs_devices->chunk_alloc_policy);
+ fallthrough;
case BTRFS_CHUNK_ALLOC_REGULAR:
init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
break;
case BTRFS_CHUNK_ALLOC_ZONED:
init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
break;
- default:
- BUG();
}
}
@@ -5265,6 +5315,9 @@ static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
ctl->stripe_size);
}
+ /* Stripe size should not go beyond 1G. */
+ ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G);
+
/* Align to BTRFS_STRIPE_LEN */
ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
ctl->chunk_size = ctl->stripe_size * data_stripes;
@@ -5283,20 +5336,24 @@ static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
* It should hold because:
* dev_extent_min == dev_extent_want == zone_size * dev_stripes
*/
- ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
+ ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min,
+ "ndevs=%d max_avail=%llu dev_extent_min=%llu", ctl->ndevs,
+ devices_info[ctl->ndevs - 1].max_avail, ctl->dev_extent_min);
ctl->stripe_size = zone_size;
ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
- /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
+ /* stripe_size is fixed in zoned filesystem. Reduce ndevs instead. */
if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
ctl->stripe_size) + ctl->nparity,
ctl->dev_stripes);
ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
- ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
+ ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size,
+ "stripe_size=%llu data_stripes=%d max_chunk_size=%llu",
+ ctl->stripe_size, data_stripes, ctl->max_chunk_size);
}
ctl->chunk_size = ctl->stripe_size * data_stripes;
@@ -5329,79 +5386,151 @@ static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
switch (fs_devices->chunk_alloc_policy) {
+ default:
+ btrfs_warn_unknown_chunk_allocation(fs_devices->chunk_alloc_policy);
+ fallthrough;
case BTRFS_CHUNK_ALLOC_REGULAR:
return decide_stripe_size_regular(ctl, devices_info);
case BTRFS_CHUNK_ALLOC_ZONED:
return decide_stripe_size_zoned(ctl, devices_info);
- default:
- BUG();
}
}
+static void chunk_map_device_set_bits(struct btrfs_chunk_map *map, unsigned int bits)
+{
+ for (int i = 0; i < map->num_stripes; i++) {
+ struct btrfs_io_stripe *stripe = &map->stripes[i];
+ struct btrfs_device *device = stripe->dev;
+
+ btrfs_set_extent_bit(&device->alloc_state, stripe->physical,
+ stripe->physical + map->stripe_size - 1,
+ bits | EXTENT_NOWAIT, NULL);
+ }
+}
+
+static void chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned int bits)
+{
+ for (int i = 0; i < map->num_stripes; i++) {
+ struct btrfs_io_stripe *stripe = &map->stripes[i];
+ struct btrfs_device *device = stripe->dev;
+
+ btrfs_clear_extent_bit(&device->alloc_state, stripe->physical,
+ stripe->physical + map->stripe_size - 1,
+ bits | EXTENT_NOWAIT, NULL);
+ }
+}
+
+void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
+{
+ write_lock(&fs_info->mapping_tree_lock);
+ rb_erase_cached(&map->rb_node, &fs_info->mapping_tree);
+ RB_CLEAR_NODE(&map->rb_node);
+ chunk_map_device_clear_bits(map, CHUNK_ALLOCATED);
+ write_unlock(&fs_info->mapping_tree_lock);
+
+ /* Once for the tree reference. */
+ btrfs_free_chunk_map(map);
+}
+
+static int btrfs_chunk_map_cmp(const struct rb_node *new,
+ const struct rb_node *exist)
+{
+ const struct btrfs_chunk_map *new_map =
+ rb_entry(new, struct btrfs_chunk_map, rb_node);
+ const struct btrfs_chunk_map *exist_map =
+ rb_entry(exist, struct btrfs_chunk_map, rb_node);
+
+ if (new_map->start == exist_map->start)
+ return 0;
+ if (new_map->start < exist_map->start)
+ return -1;
+ return 1;
+}
+
+EXPORT_FOR_TESTS
+int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
+{
+ struct rb_node *exist;
+
+ write_lock(&fs_info->mapping_tree_lock);
+ exist = rb_find_add_cached(&map->rb_node, &fs_info->mapping_tree,
+ btrfs_chunk_map_cmp);
+
+ if (exist) {
+ write_unlock(&fs_info->mapping_tree_lock);
+ return -EEXIST;
+ }
+ chunk_map_device_set_bits(map, CHUNK_ALLOCATED);
+ chunk_map_device_clear_bits(map, CHUNK_TRIMMED);
+ write_unlock(&fs_info->mapping_tree_lock);
+
+ return 0;
+}
+
+EXPORT_FOR_TESTS
+struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp)
+{
+ struct btrfs_chunk_map *map;
+
+ map = kmalloc(btrfs_chunk_map_size(num_stripes), gfp);
+ if (!map)
+ return NULL;
+
+ refcount_set(&map->refs, 1);
+ RB_CLEAR_NODE(&map->rb_node);
+
+ return map;
+}
+
static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
struct alloc_chunk_ctl *ctl,
struct btrfs_device_info *devices_info)
{
struct btrfs_fs_info *info = trans->fs_info;
- struct map_lookup *map = NULL;
- struct extent_map_tree *em_tree;
+ struct btrfs_chunk_map *map;
struct btrfs_block_group *block_group;
- struct extent_map *em;
u64 start = ctl->start;
u64 type = ctl->type;
int ret;
- int i;
- int j;
- map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
+ map = btrfs_alloc_chunk_map(ctl->num_stripes, GFP_NOFS);
if (!map)
return ERR_PTR(-ENOMEM);
+
+ map->start = start;
+ map->chunk_len = ctl->chunk_size;
+ map->stripe_size = ctl->stripe_size;
+ map->type = type;
+ map->io_align = BTRFS_STRIPE_LEN;
+ map->io_width = BTRFS_STRIPE_LEN;
+ map->sub_stripes = ctl->sub_stripes;
map->num_stripes = ctl->num_stripes;
- for (i = 0; i < ctl->ndevs; ++i) {
- for (j = 0; j < ctl->dev_stripes; ++j) {
+ for (int i = 0; i < ctl->ndevs; i++) {
+ for (int j = 0; j < ctl->dev_stripes; j++) {
int s = i * ctl->dev_stripes + j;
map->stripes[s].dev = devices_info[i].dev;
map->stripes[s].physical = devices_info[i].dev_offset +
j * ctl->stripe_size;
}
}
- map->stripe_len = BTRFS_STRIPE_LEN;
- map->io_align = BTRFS_STRIPE_LEN;
- map->io_width = BTRFS_STRIPE_LEN;
- map->type = type;
- map->sub_stripes = ctl->sub_stripes;
trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
- em = alloc_extent_map();
- if (!em) {
- kfree(map);
- return ERR_PTR(-ENOMEM);
- }
- set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
- em->map_lookup = map;
- em->start = start;
- em->len = ctl->chunk_size;
- em->block_start = 0;
- em->block_len = em->len;
- em->orig_block_len = ctl->stripe_size;
-
- em_tree = &info->mapping_tree;
- write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 0);
+ ret = btrfs_add_chunk_map(info, map);
if (ret) {
- write_unlock(&em_tree->lock);
- free_extent_map(em);
+ btrfs_free_chunk_map(map);
return ERR_PTR(ret);
}
- write_unlock(&em_tree->lock);
- block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
- if (IS_ERR(block_group))
- goto error_del_extent;
+ block_group = btrfs_make_block_group(trans, ctl->space_info, type, start,
+ ctl->chunk_size);
+ if (IS_ERR(block_group)) {
+ btrfs_remove_chunk_map(info, map);
+ return block_group;
+ }
- for (i = 0; i < map->num_stripes; i++) {
+ for (int i = 0; i < map->num_stripes; i++) {
struct btrfs_device *dev = map->stripes[i].dev;
btrfs_device_set_bytes_used(dev,
@@ -5414,39 +5543,26 @@ static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
atomic64_sub(ctl->stripe_size * map->num_stripes,
&info->free_chunk_space);
- free_extent_map(em);
check_raid56_incompat_flag(info, type);
check_raid1c34_incompat_flag(info, type);
return block_group;
-
-error_del_extent:
- write_lock(&em_tree->lock);
- remove_extent_mapping(em_tree, em);
- write_unlock(&em_tree->lock);
-
- /* One for our allocation */
- free_extent_map(em);
- /* One for the tree reference */
- free_extent_map(em);
-
- return block_group;
}
struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
- u64 type)
+ struct btrfs_space_info *space_info,
+ u64 type)
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_fs_devices *fs_devices = info->fs_devices;
- struct btrfs_device_info *devices_info = NULL;
+ struct btrfs_device_info AUTO_KFREE(devices_info);
struct alloc_chunk_ctl ctl;
- struct btrfs_block_group *block_group;
int ret;
lockdep_assert_held(&info->chunk_mutex);
if (!alloc_profile_is_valid(type, 0)) {
- ASSERT(0);
+ DEBUG_WARN("invalid alloc profile for type %llu", type);
return ERR_PTR(-EINVAL);
}
@@ -5458,12 +5574,13 @@ struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
btrfs_err(info, "invalid chunk type 0x%llx requested", type);
- ASSERT(0);
+ DEBUG_WARN();
return ERR_PTR(-EINVAL);
}
ctl.start = find_next_chunk(info);
ctl.type = type;
+ ctl.space_info = space_info;
init_alloc_chunk_ctl(fs_devices, &ctl);
devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
@@ -5472,22 +5589,14 @@ struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
return ERR_PTR(-ENOMEM);
ret = gather_device_info(fs_devices, &ctl, devices_info);
- if (ret < 0) {
- block_group = ERR_PTR(ret);
- goto out;
- }
+ if (ret < 0)
+ return ERR_PTR(ret);
ret = decide_stripe_size(fs_devices, &ctl, devices_info);
- if (ret < 0) {
- block_group = ERR_PTR(ret);
- goto out;
- }
-
- block_group = create_chunk(trans, &ctl, devices_info);
+ if (ret < 0)
+ return ERR_PTR(ret);
-out:
- kfree(devices_info);
- return block_group;
+ return create_chunk(trans, &ctl, devices_info);
}
/*
@@ -5502,13 +5611,11 @@ int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
struct btrfs_block_group *bg)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_root *chunk_root = fs_info->chunk_root;
struct btrfs_key key;
struct btrfs_chunk *chunk;
struct btrfs_stripe *stripe;
- struct extent_map *em;
- struct map_lookup *map;
+ struct btrfs_chunk_map *map;
size_t item_size;
int i;
int ret;
@@ -5537,18 +5644,17 @@ int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
*/
lockdep_assert_held(&fs_info->chunk_mutex);
- em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
+ map = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
+ if (IS_ERR(map)) {
+ ret = PTR_ERR(map);
btrfs_abort_transaction(trans, ret);
return ret;
}
- map = em->map_lookup;
item_size = btrfs_chunk_item_size(map->num_stripes);
chunk = kzalloc(item_size, GFP_NOFS);
- if (!chunk) {
+ if (unlikely(!chunk)) {
ret = -ENOMEM;
btrfs_abort_transaction(trans, ret);
goto out;
@@ -5574,12 +5680,12 @@ int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
}
btrfs_set_stack_chunk_length(chunk, bg->length);
- btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
- btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
+ btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
+ btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
btrfs_set_stack_chunk_type(chunk, map->type);
btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
- btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
- btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
+ btrfs_set_stack_chunk_io_align(chunk, BTRFS_STRIPE_LEN);
+ btrfs_set_stack_chunk_io_width(chunk, BTRFS_STRIPE_LEN);
btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
@@ -5591,7 +5697,7 @@ int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- bg->chunk_item_inserted = 1;
+ set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags);
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
@@ -5601,7 +5707,7 @@ int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
out:
kfree(chunk);
- free_extent_map(em);
+ btrfs_free_chunk_map(map);
return ret;
}
@@ -5610,7 +5716,9 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
struct btrfs_fs_info *fs_info = trans->fs_info;
u64 alloc_profile;
struct btrfs_block_group *meta_bg;
+ struct btrfs_space_info *meta_space_info;
struct btrfs_block_group *sys_bg;
+ struct btrfs_space_info *sys_space_info;
/*
* When adding a new device for sprouting, the seed device is read-only
@@ -5634,19 +5742,29 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
*/
alloc_profile = btrfs_metadata_alloc_profile(fs_info);
- meta_bg = btrfs_create_chunk(trans, alloc_profile);
+ meta_space_info = btrfs_find_space_info(fs_info, alloc_profile);
+ if (!meta_space_info) {
+ DEBUG_WARN();
+ return -EINVAL;
+ }
+ meta_bg = btrfs_create_chunk(trans, meta_space_info, alloc_profile);
if (IS_ERR(meta_bg))
return PTR_ERR(meta_bg);
alloc_profile = btrfs_system_alloc_profile(fs_info);
- sys_bg = btrfs_create_chunk(trans, alloc_profile);
+ sys_space_info = btrfs_find_space_info(fs_info, alloc_profile);
+ if (!sys_space_info) {
+ DEBUG_WARN();
+ return -EINVAL;
+ }
+ sys_bg = btrfs_create_chunk(trans, sys_space_info, alloc_profile);
if (IS_ERR(sys_bg))
return PTR_ERR(sys_bg);
return 0;
}
-static inline int btrfs_chunk_max_errors(struct map_lookup *map)
+static inline int btrfs_chunk_max_errors(struct btrfs_chunk_map *map)
{
const int index = btrfs_bg_flags_to_raid_index(map->type);
@@ -5655,17 +5773,15 @@ static inline int btrfs_chunk_max_errors(struct map_lookup *map)
bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset)
{
- struct extent_map *em;
- struct map_lookup *map;
+ struct btrfs_chunk_map *map;
int miss_ndevs = 0;
int i;
bool ret = true;
- em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
- if (IS_ERR(em))
+ map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
+ if (IS_ERR(map))
return false;
- map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
if (test_bit(BTRFS_DEV_STATE_MISSING,
&map->stripes[i].dev->dev_state)) {
@@ -5686,37 +5802,57 @@ bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset)
if (miss_ndevs > btrfs_chunk_max_errors(map))
ret = false;
end:
- free_extent_map(em);
+ btrfs_free_chunk_map(map);
return ret;
}
-void btrfs_mapping_tree_free(struct extent_map_tree *tree)
+void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info)
{
- struct extent_map *em;
+ write_lock(&fs_info->mapping_tree_lock);
+ while (!RB_EMPTY_ROOT(&fs_info->mapping_tree.rb_root)) {
+ struct btrfs_chunk_map *map;
+ struct rb_node *node;
- while (1) {
- write_lock(&tree->lock);
- em = lookup_extent_mapping(tree, 0, (u64)-1);
- if (em)
- remove_extent_mapping(tree, em);
- write_unlock(&tree->lock);
- if (!em)
- break;
- /* once for us */
- free_extent_map(em);
- /* once for the tree */
- free_extent_map(em);
+ node = rb_first_cached(&fs_info->mapping_tree);
+ map = rb_entry(node, struct btrfs_chunk_map, rb_node);
+ rb_erase_cached(&map->rb_node, &fs_info->mapping_tree);
+ RB_CLEAR_NODE(&map->rb_node);
+ chunk_map_device_clear_bits(map, CHUNK_ALLOCATED);
+ /* Once for the tree ref. */
+ btrfs_free_chunk_map(map);
+ cond_resched_rwlock_write(&fs_info->mapping_tree_lock);
}
+ write_unlock(&fs_info->mapping_tree_lock);
+}
+
+static int btrfs_chunk_map_num_copies(const struct btrfs_chunk_map *map)
+{
+ enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(map->type);
+
+ if (map->type & BTRFS_BLOCK_GROUP_RAID5)
+ return 2;
+
+ /*
+ * There could be two corrupted data stripes, we need to loop retry in
+ * order to rebuild the correct data.
+ *
+ * Fail a stripe at a time on every retry except the stripe under
+ * reconstruction.
+ */
+ if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+ return map->num_stripes;
+
+ /* Non-RAID56, use their ncopies from btrfs_raid_array. */
+ return btrfs_raid_array[index].ncopies;
}
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
{
- struct extent_map *em;
- struct map_lookup *map;
+ struct btrfs_chunk_map *map;
int ret;
- em = btrfs_get_chunk_map(fs_info, logical, len);
- if (IS_ERR(em))
+ map = btrfs_get_chunk_map(fs_info, logical, len);
+ if (IS_ERR(map))
/*
* We could return errors for these cases, but that could get
* ugly and we'd probably do the same thing which is just not do
@@ -5725,99 +5861,137 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
*/
return 1;
- map = em->map_lookup;
- if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
- ret = map->num_stripes;
- else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
- ret = map->sub_stripes;
- else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
- ret = 2;
- else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
- /*
- * There could be two corrupted data stripes, we need
- * to loop retry in order to rebuild the correct data.
- *
- * Fail a stripe at a time on every retry except the
- * stripe under reconstruction.
- */
- ret = map->num_stripes;
- else
- ret = 1;
- free_extent_map(em);
-
- down_read(&fs_info->dev_replace.rwsem);
- if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
- fs_info->dev_replace.tgtdev)
- ret++;
- up_read(&fs_info->dev_replace.rwsem);
-
+ ret = btrfs_chunk_map_num_copies(map);
+ btrfs_free_chunk_map(map);
return ret;
}
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
u64 logical)
{
- struct extent_map *em;
- struct map_lookup *map;
+ struct btrfs_chunk_map *map;
unsigned long len = fs_info->sectorsize;
- em = btrfs_get_chunk_map(fs_info, logical, len);
+ if (!btrfs_fs_incompat(fs_info, RAID56))
+ return len;
- if (!WARN_ON(IS_ERR(em))) {
- map = em->map_lookup;
+ map = btrfs_get_chunk_map(fs_info, logical, len);
+
+ if (!WARN_ON(IS_ERR(map))) {
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
- len = map->stripe_len * nr_data_stripes(map);
- free_extent_map(em);
+ len = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
+ btrfs_free_chunk_map(map);
}
return len;
}
-int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+static int btrfs_read_preferred(struct btrfs_chunk_map *map, int first, int num_stripes)
{
- struct extent_map *em;
- struct map_lookup *map;
- int ret = 0;
+ for (int index = first; index < first + num_stripes; index++) {
+ const struct btrfs_device *device = map->stripes[index].dev;
- em = btrfs_get_chunk_map(fs_info, logical, len);
+ if (device->devid == READ_ONCE(device->fs_devices->read_devid))
+ return index;
+ }
- if(!WARN_ON(IS_ERR(em))) {
- map = em->map_lookup;
- if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
- ret = 1;
- free_extent_map(em);
+ /* If no read-preferred device is set use the first stripe. */
+ return first;
+}
+
+struct stripe_mirror {
+ u64 devid;
+ int num;
+};
+
+static int btrfs_cmp_devid(const void *a, const void *b)
+{
+ const struct stripe_mirror *s1 = (const struct stripe_mirror *)a;
+ const struct stripe_mirror *s2 = (const struct stripe_mirror *)b;
+
+ if (s1->devid < s2->devid)
+ return -1;
+ if (s1->devid > s2->devid)
+ return 1;
+ return 0;
+}
+
+/*
+ * Select a stripe for reading using the round-robin algorithm.
+ *
+ * 1. Compute the read cycle as the total sectors read divided by the minimum
+ * sectors per device.
+ * 2. Determine the stripe number for the current read by taking the modulus
+ * of the read cycle with the total number of stripes:
+ *
+ * stripe index = (total sectors / min sectors per dev) % num stripes
+ *
+ * The calculated stripe index is then used to select the corresponding device
+ * from the list of devices, which is ordered by devid.
+ */
+static int btrfs_read_rr(const struct btrfs_chunk_map *map, int first, int num_stripes)
+{
+ struct stripe_mirror stripes[BTRFS_RAID1_MAX_MIRRORS] = { 0 };
+ struct btrfs_device *device = map->stripes[first].dev;
+ struct btrfs_fs_info *fs_info = device->fs_devices->fs_info;
+ unsigned int read_cycle;
+ unsigned int total_reads;
+ unsigned int min_reads_per_dev;
+
+ total_reads = percpu_counter_sum(&fs_info->stats_read_blocks);
+ min_reads_per_dev = READ_ONCE(fs_info->fs_devices->rr_min_contig_read) >>
+ fs_info->sectorsize_bits;
+
+ for (int index = 0, i = first; i < first + num_stripes; i++) {
+ stripes[index].devid = map->stripes[i].dev->devid;
+ stripes[index].num = i;
+ index++;
}
- return ret;
+ sort(stripes, num_stripes, sizeof(struct stripe_mirror),
+ btrfs_cmp_devid, NULL);
+
+ read_cycle = total_reads / min_reads_per_dev;
+ return stripes[read_cycle % num_stripes].num;
}
+#endif
static int find_live_mirror(struct btrfs_fs_info *fs_info,
- struct map_lookup *map, int first,
- int dev_replace_is_ongoing)
+ struct btrfs_chunk_map *map, int first,
+ bool dev_replace_is_ongoing)
{
+ const enum btrfs_read_policy policy = READ_ONCE(fs_info->fs_devices->read_policy);
int i;
int num_stripes;
int preferred_mirror;
int tolerance;
struct btrfs_device *srcdev;
- ASSERT((map->type &
- (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
+ ASSERT((map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)),
+ "type=%llu", map->type);
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
num_stripes = map->sub_stripes;
else
num_stripes = map->num_stripes;
- switch (fs_info->fs_devices->read_policy) {
+ switch (policy) {
default:
/* Shouldn't happen, just warn and use pid instead of failing */
- btrfs_warn_rl(fs_info,
- "unknown read_policy type %u, reset to pid",
- fs_info->fs_devices->read_policy);
- fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
+ btrfs_warn_rl(fs_info, "unknown read_policy type %u, reset to pid",
+ policy);
+ WRITE_ONCE(fs_info->fs_devices->read_policy, BTRFS_READ_POLICY_PID);
fallthrough;
case BTRFS_READ_POLICY_PID:
preferred_mirror = first + (current->pid % num_stripes);
break;
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ case BTRFS_READ_POLICY_RR:
+ preferred_mirror = btrfs_read_rr(map, first, num_stripes);
+ break;
+ case BTRFS_READ_POLICY_DEVID:
+ preferred_mirror = btrfs_read_preferred(map, first, num_stripes);
+ break;
+#endif
}
if (dev_replace_is_ongoing &&
@@ -5849,49 +6023,23 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
return preferred_mirror;
}
-/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
-static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes)
+EXPORT_FOR_TESTS
+struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
+ u64 logical, u16 total_stripes)
{
- int i;
- int again = 1;
-
- while (again) {
- again = 0;
- for (i = 0; i < num_stripes - 1; i++) {
- /* Swap if parity is on a smaller index */
- if (bioc->raid_map[i] > bioc->raid_map[i + 1]) {
- swap(bioc->stripes[i], bioc->stripes[i + 1]);
- swap(bioc->raid_map[i], bioc->raid_map[i + 1]);
- again = 1;
- }
- }
- }
-}
+ struct btrfs_io_context *bioc;
-static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
- int total_stripes,
- int real_stripes)
-{
- struct btrfs_io_context *bioc = kzalloc(
- /* The size of btrfs_io_context */
- sizeof(struct btrfs_io_context) +
- /* Plus the variable array for the stripes */
- sizeof(struct btrfs_io_stripe) * (total_stripes) +
- /* Plus the variable array for the tgt dev */
- sizeof(int) * (real_stripes) +
- /*
- * Plus the raid_map, which includes both the tgt dev
- * and the stripes.
- */
- sizeof(u64) * (total_stripes),
- GFP_NOFS|__GFP_NOFAIL);
+ bioc = kzalloc(struct_size(bioc, stripes, total_stripes), GFP_NOFS);
+
+ if (!bioc)
+ return NULL;
- atomic_set(&bioc->error, 0);
refcount_set(&bioc->refs, 1);
bioc->fs_info = fs_info;
- bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes);
- bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes);
+ bioc->replace_stripe_src = -1;
+ bioc->full_stripe_logical = (u64)-1;
+ bioc->logical = logical;
return bioc;
}
@@ -5910,75 +6058,66 @@ void btrfs_put_bioc(struct btrfs_io_context *bioc)
kfree(bioc);
}
-/* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
/*
* Please note that, discard won't be sent to target device of device
* replace.
*/
-static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
- u64 logical, u64 *length_ret,
- struct btrfs_io_context **bioc_ret)
+struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 *length_ret,
+ u32 *num_stripes)
{
- struct extent_map *em;
- struct map_lookup *map;
- struct btrfs_io_context *bioc;
+ struct btrfs_chunk_map *map;
+ struct btrfs_discard_stripe *stripes;
u64 length = *length_ret;
u64 offset;
- u64 stripe_nr;
- u64 stripe_nr_end;
+ u32 stripe_nr;
+ u32 stripe_nr_end;
+ u32 stripe_cnt;
u64 stripe_end_offset;
- u64 stripe_cnt;
- u64 stripe_len;
u64 stripe_offset;
- u64 num_stripes;
u32 stripe_index;
u32 factor = 0;
u32 sub_stripes = 0;
- u64 stripes_per_dev = 0;
+ u32 stripes_per_dev = 0;
u32 remaining_stripes = 0;
u32 last_stripe = 0;
- int ret = 0;
+ int ret;
int i;
- /* Discard always returns a bioc. */
- ASSERT(bioc_ret);
-
- em = btrfs_get_chunk_map(fs_info, logical, length);
- if (IS_ERR(em))
- return PTR_ERR(em);
+ map = btrfs_get_chunk_map(fs_info, logical, length);
+ if (IS_ERR(map))
+ return ERR_CAST(map);
- map = em->map_lookup;
/* we don't discard raid56 yet */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ret = -EOPNOTSUPP;
- goto out;
+ goto out_free_map;
}
- offset = logical - em->start;
- length = min_t(u64, em->start + em->len - logical, length);
+ offset = logical - map->start;
+ length = min_t(u64, map->start + map->chunk_len - logical, length);
*length_ret = length;
- stripe_len = map->stripe_len;
/*
* stripe_nr counts the total number of stripes we have to stride
* to get to this block
*/
- stripe_nr = div64_u64(offset, stripe_len);
+ stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT;
/* stripe_offset is the offset of this block in its stripe */
- stripe_offset = offset - stripe_nr * stripe_len;
+ stripe_offset = offset - btrfs_stripe_nr_to_offset(stripe_nr);
- stripe_nr_end = round_up(offset + length, map->stripe_len);
- stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
+ stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >>
+ BTRFS_STRIPE_LEN_SHIFT;
stripe_cnt = stripe_nr_end - stripe_nr;
- stripe_end_offset = stripe_nr_end * map->stripe_len -
+ stripe_end_offset = btrfs_stripe_nr_to_offset(stripe_nr_end) -
(offset + length);
/*
* after this, stripe_nr is the number of stripes on this
* device we have to walk to find the data, and stripe_index is
* the number of our device in the stripe array
*/
- num_stripes = 1;
+ *num_stripes = 1;
stripe_index = 0;
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)) {
@@ -5988,41 +6127,41 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
sub_stripes = map->sub_stripes;
factor = map->num_stripes / sub_stripes;
- num_stripes = min_t(u64, map->num_stripes,
+ *num_stripes = min_t(u64, map->num_stripes,
sub_stripes * stripe_cnt);
- stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
+ stripe_index = stripe_nr % factor;
+ stripe_nr /= factor;
stripe_index *= sub_stripes;
- stripes_per_dev = div_u64_rem(stripe_cnt, factor,
- &remaining_stripes);
- div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
- last_stripe *= sub_stripes;
+
+ remaining_stripes = stripe_cnt % factor;
+ stripes_per_dev = stripe_cnt / factor;
+ last_stripe = ((stripe_nr_end - 1) % factor) * sub_stripes;
} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
BTRFS_BLOCK_GROUP_DUP)) {
- num_stripes = map->num_stripes;
+ *num_stripes = map->num_stripes;
} else {
- stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
- &stripe_index);
+ stripe_index = stripe_nr % map->num_stripes;
+ stripe_nr /= map->num_stripes;
}
- bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0);
- if (!bioc) {
+ stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS);
+ if (!stripes) {
ret = -ENOMEM;
- goto out;
+ goto out_free_map;
}
- for (i = 0; i < num_stripes; i++) {
- bioc->stripes[i].physical =
+ for (i = 0; i < *num_stripes; i++) {
+ stripes[i].physical =
map->stripes[stripe_index].physical +
- stripe_offset + stripe_nr * map->stripe_len;
- bioc->stripes[i].dev = map->stripes[stripe_index].dev;
+ stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr);
+ stripes[i].dev = map->stripes[stripe_index].dev;
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)) {
- bioc->stripes[i].length = stripes_per_dev *
- map->stripe_len;
+ stripes[i].length = btrfs_stripe_nr_to_offset(stripes_per_dev);
if (i / sub_stripes < remaining_stripes)
- bioc->stripes[i].length += map->stripe_len;
+ stripes[i].length += BTRFS_STRIPE_LEN;
/*
* Special for the first stripe and
@@ -6033,17 +6172,17 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
* off end_off
*/
if (i < sub_stripes)
- bioc->stripes[i].length -= stripe_offset;
+ stripes[i].length -= stripe_offset;
if (stripe_index >= last_stripe &&
stripe_index <= (last_stripe +
sub_stripes - 1))
- bioc->stripes[i].length -= stripe_end_offset;
+ stripes[i].length -= stripe_end_offset;
if (i == sub_stripes - 1)
stripe_offset = 0;
} else {
- bioc->stripes[i].length = length;
+ stripes[i].length = length;
}
stripe_index++;
@@ -6053,89 +6192,11 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
}
}
- *bioc_ret = bioc;
- bioc->map_type = map->type;
- bioc->num_stripes = num_stripes;
-out:
- free_extent_map(em);
- return ret;
-}
-
-/*
- * In dev-replace case, for repair case (that's the only case where the mirror
- * is selected explicitly when calling btrfs_map_block), blocks left of the
- * left cursor can also be read from the target drive.
- *
- * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
- * array of stripes.
- * For READ, it also needs to be supported using the same mirror number.
- *
- * If the requested block is not left of the left cursor, EIO is returned. This
- * can happen because btrfs_num_copies() returns one more in the dev-replace
- * case.
- */
-static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
- u64 logical, u64 length,
- u64 srcdev_devid, int *mirror_num,
- u64 *physical)
-{
- struct btrfs_io_context *bioc = NULL;
- int num_stripes;
- int index_srcdev = 0;
- int found = 0;
- u64 physical_of_found = 0;
- int i;
- int ret = 0;
-
- ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
- logical, &length, &bioc, 0, 0);
- if (ret) {
- ASSERT(bioc == NULL);
- return ret;
- }
-
- num_stripes = bioc->num_stripes;
- if (*mirror_num > num_stripes) {
- /*
- * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
- * that means that the requested area is not left of the left
- * cursor
- */
- btrfs_put_bioc(bioc);
- return -EIO;
- }
-
- /*
- * process the rest of the function using the mirror_num of the source
- * drive. Therefore look it up first. At the end, patch the device
- * pointer to the one of the target drive.
- */
- for (i = 0; i < num_stripes; i++) {
- if (bioc->stripes[i].dev->devid != srcdev_devid)
- continue;
-
- /*
- * In case of DUP, in order to keep it simple, only add the
- * mirror with the lowest physical address
- */
- if (found &&
- physical_of_found <= bioc->stripes[i].physical)
- continue;
-
- index_srcdev = i;
- found = 1;
- physical_of_found = bioc->stripes[i].physical;
- }
-
- btrfs_put_bioc(bioc);
-
- ASSERT(found);
- if (!found)
- return -EIO;
-
- *mirror_num = index_srcdev + 1;
- *physical = physical_of_found;
- return ret;
+ btrfs_free_chunk_map(map);
+ return stripes;
+out_free_map:
+ btrfs_free_chunk_map(map);
+ return ERR_PTR(ret);
}
static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
@@ -6149,662 +6210,536 @@ static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
cache = btrfs_lookup_block_group(fs_info, logical);
- spin_lock(&cache->lock);
- ret = cache->to_copy;
- spin_unlock(&cache->lock);
+ ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
btrfs_put_block_group(cache);
return ret;
}
-static void handle_ops_on_dev_replace(enum btrfs_map_op op,
- struct btrfs_io_context **bioc_ret,
+static void handle_ops_on_dev_replace(struct btrfs_io_context *bioc,
struct btrfs_dev_replace *dev_replace,
u64 logical,
- int *num_stripes_ret, int *max_errors_ret)
+ struct btrfs_io_geometry *io_geom)
{
- struct btrfs_io_context *bioc = *bioc_ret;
u64 srcdev_devid = dev_replace->srcdev->devid;
- int tgtdev_indexes = 0;
- int num_stripes = *num_stripes_ret;
- int max_errors = *max_errors_ret;
+ /*
+ * At this stage, num_stripes is still the real number of stripes,
+ * excluding the duplicated stripes.
+ */
+ int num_stripes = io_geom->num_stripes;
+ int max_errors = io_geom->max_errors;
+ int nr_extra_stripes = 0;
int i;
- if (op == BTRFS_MAP_WRITE) {
- int index_where_to_add;
+ /*
+ * A block group which has "to_copy" set will eventually be copied by
+ * the dev-replace process. We can avoid cloning IO here.
+ */
+ if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
+ return;
- /*
- * A block group which have "to_copy" set will eventually
- * copied by dev-replace process. We can avoid cloning IO here.
- */
- if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
- return;
+ /*
+ * Duplicate the write operations while the dev-replace procedure is
+ * running. Since the copying of the old disk to the new disk takes
+ * place at run time while the filesystem is mounted writable, the
+ * regular write operations to the old disk have to be duplicated to go
+ * to the new disk as well.
+ *
+ * Note that device->missing is handled by the caller, and that the
+ * write to the old disk is already set up in the stripes array.
+ */
+ for (i = 0; i < num_stripes; i++) {
+ struct btrfs_io_stripe *old = &bioc->stripes[i];
+ struct btrfs_io_stripe *new = &bioc->stripes[num_stripes + nr_extra_stripes];
- /*
- * duplicate the write operations while the dev replace
- * procedure is running. Since the copying of the old disk to
- * the new disk takes place at run time while the filesystem is
- * mounted writable, the regular write operations to the old
- * disk have to be duplicated to go to the new disk as well.
- *
- * Note that device->missing is handled by the caller, and that
- * the write to the old disk is already set up in the stripes
- * array.
- */
- index_where_to_add = num_stripes;
- for (i = 0; i < num_stripes; i++) {
- if (bioc->stripes[i].dev->devid == srcdev_devid) {
- /* write to new disk, too */
- struct btrfs_io_stripe *new =
- bioc->stripes + index_where_to_add;
- struct btrfs_io_stripe *old =
- bioc->stripes + i;
-
- new->physical = old->physical;
- new->length = old->length;
- new->dev = dev_replace->tgtdev;
- bioc->tgtdev_map[i] = index_where_to_add;
- index_where_to_add++;
- max_errors++;
- tgtdev_indexes++;
- }
- }
- num_stripes = index_where_to_add;
- } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
- int index_srcdev = 0;
- int found = 0;
- u64 physical_of_found = 0;
+ if (old->dev->devid != srcdev_devid)
+ continue;
+
+ new->physical = old->physical;
+ new->dev = dev_replace->tgtdev;
+ if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK)
+ bioc->replace_stripe_src = i;
+ nr_extra_stripes++;
+ }
+
+ /* We can only have at most 2 extra nr_stripes (for DUP). */
+ ASSERT(nr_extra_stripes <= 2, "nr_extra_stripes=%d", nr_extra_stripes);
+ /*
+ * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for
+ * replace.
+ * If we have 2 extra stripes, only choose the one with smaller physical.
+ */
+ if (io_geom->op == BTRFS_MAP_GET_READ_MIRRORS && nr_extra_stripes == 2) {
+ struct btrfs_io_stripe *first = &bioc->stripes[num_stripes];
+ struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1];
+
+ /* Only DUP can have two extra stripes. */
+ ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP,
+ "map_type=%llu", bioc->map_type);
/*
- * During the dev-replace procedure, the target drive can also
- * be used to read data in case it is needed to repair a corrupt
- * block elsewhere. This is possible if the requested area is
- * left of the left cursor. In this area, the target drive is a
- * full copy of the source drive.
+ * Swap the last stripe stripes and reduce @nr_extra_stripes.
+ * The extra stripe would still be there, but won't be accessed.
*/
- for (i = 0; i < num_stripes; i++) {
- if (bioc->stripes[i].dev->devid == srcdev_devid) {
- /*
- * In case of DUP, in order to keep it simple,
- * only add the mirror with the lowest physical
- * address
- */
- if (found &&
- physical_of_found <= bioc->stripes[i].physical)
- continue;
- index_srcdev = i;
- found = 1;
- physical_of_found = bioc->stripes[i].physical;
- }
- }
- if (found) {
- struct btrfs_io_stripe *tgtdev_stripe =
- bioc->stripes + num_stripes;
-
- tgtdev_stripe->physical = physical_of_found;
- tgtdev_stripe->length =
- bioc->stripes[index_srcdev].length;
- tgtdev_stripe->dev = dev_replace->tgtdev;
- bioc->tgtdev_map[index_srcdev] = num_stripes;
-
- tgtdev_indexes++;
- num_stripes++;
+ if (first->physical > second->physical) {
+ swap(second->physical, first->physical);
+ swap(second->dev, first->dev);
+ nr_extra_stripes--;
}
}
- *num_stripes_ret = num_stripes;
- *max_errors_ret = max_errors;
- bioc->num_tgtdevs = tgtdev_indexes;
- *bioc_ret = bioc;
-}
-
-static bool need_full_stripe(enum btrfs_map_op op)
-{
- return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
+ io_geom->num_stripes = num_stripes + nr_extra_stripes;
+ io_geom->max_errors = max_errors + nr_extra_stripes;
+ bioc->replace_nr_stripes = nr_extra_stripes;
}
-/*
- * Calculate the geometry of a particular (address, len) tuple. This
- * information is used to calculate how big a particular bio can get before it
- * straddles a stripe.
- *
- * @fs_info: the filesystem
- * @em: mapping containing the logical extent
- * @op: type of operation - write or read
- * @logical: address that we want to figure out the geometry of
- * @io_geom: pointer used to return values
- *
- * Returns < 0 in case a chunk for the given logical address cannot be found,
- * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
- */
-int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
- enum btrfs_map_op op, u64 logical,
- struct btrfs_io_geometry *io_geom)
+static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset,
+ struct btrfs_io_geometry *io_geom)
{
- struct map_lookup *map;
- u64 len;
- u64 offset;
- u64 stripe_offset;
- u64 stripe_nr;
- u64 stripe_len;
- u64 raid56_full_stripe_start = (u64)-1;
- int data_stripes;
-
- ASSERT(op != BTRFS_MAP_DISCARD);
-
- map = em->map_lookup;
- /* Offset of this logical address in the chunk */
- offset = logical - em->start;
- /* Len of a stripe in a chunk */
- stripe_len = map->stripe_len;
- /* Stripe where this block falls in */
- stripe_nr = div64_u64(offset, stripe_len);
- /* Offset of stripe in the chunk */
- stripe_offset = stripe_nr * stripe_len;
- if (offset < stripe_offset) {
- btrfs_crit(fs_info,
-"stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
- stripe_offset, offset, em->start, logical, stripe_len);
- return -EINVAL;
- }
-
- /* stripe_offset is the offset of this block in its stripe */
- stripe_offset = offset - stripe_offset;
- data_stripes = nr_data_stripes(map);
+ /*
+ * Stripe_nr is the stripe where this block falls. stripe_offset is
+ * the offset of this block in its stripe.
+ */
+ io_geom->stripe_offset = offset & BTRFS_STRIPE_LEN_MASK;
+ io_geom->stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT;
+ ASSERT(io_geom->stripe_offset < U32_MAX,
+ "stripe_offset=%llu", io_geom->stripe_offset);
- if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
- u64 max_len = stripe_len - stripe_offset;
+ if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+ unsigned long full_stripe_len =
+ btrfs_stripe_nr_to_offset(nr_data_stripes(map));
/*
- * In case of raid56, we need to know the stripe aligned start
+ * For full stripe start, we use previously calculated
+ * @stripe_nr. Align it to nr_data_stripes, then multiply with
+ * STRIPE_LEN.
+ *
+ * By this we can avoid u64 division completely. And we have
+ * to go rounddown(), not round_down(), as nr_data_stripes is
+ * not ensured to be power of 2.
*/
- if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- unsigned long full_stripe_len = stripe_len * data_stripes;
- raid56_full_stripe_start = offset;
+ io_geom->raid56_full_stripe_start = btrfs_stripe_nr_to_offset(
+ rounddown(io_geom->stripe_nr, nr_data_stripes(map)));
+
+ ASSERT(io_geom->raid56_full_stripe_start + full_stripe_len > offset,
+ "raid56_full_stripe_start=%llu full_stripe_len=%lu offset=%llu",
+ io_geom->raid56_full_stripe_start, full_stripe_len, offset);
+ ASSERT(io_geom->raid56_full_stripe_start <= offset,
+ "raid56_full_stripe_start=%llu offset=%llu",
+ io_geom->raid56_full_stripe_start, offset);
+ /*
+ * For writes to RAID56, allow to write a full stripe set, but
+ * no straddling of stripe sets.
+ */
+ if (io_geom->op == BTRFS_MAP_WRITE)
+ return full_stripe_len - (offset - io_geom->raid56_full_stripe_start);
+ }
- /*
- * Allow a write of a full stripe, but make sure we
- * don't allow straddling of stripes
- */
- raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
- full_stripe_len);
- raid56_full_stripe_start *= full_stripe_len;
+ /*
+ * For other RAID types and for RAID56 reads, allow a single stripe (on
+ * a single disk).
+ */
+ if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK)
+ return BTRFS_STRIPE_LEN - io_geom->stripe_offset;
+ return U64_MAX;
+}
- /*
- * For writes to RAID[56], allow a full stripeset across
- * all disks. For other RAID types and for RAID[56]
- * reads, just allow a single stripe (on a single disk).
- */
- if (op == BTRFS_MAP_WRITE) {
- max_len = stripe_len * data_stripes -
- (offset - raid56_full_stripe_start);
- }
- }
- len = min_t(u64, em->len - offset, max_len);
- } else {
- len = em->len - offset;
- }
+static int set_io_stripe(struct btrfs_fs_info *fs_info, u64 logical,
+ u64 *length, struct btrfs_io_stripe *dst,
+ struct btrfs_chunk_map *map,
+ struct btrfs_io_geometry *io_geom)
+{
+ dst->dev = map->stripes[io_geom->stripe_index].dev;
- io_geom->len = len;
- io_geom->offset = offset;
- io_geom->stripe_len = stripe_len;
- io_geom->stripe_nr = stripe_nr;
- io_geom->stripe_offset = stripe_offset;
- io_geom->raid56_stripe_offset = raid56_full_stripe_start;
+ if (io_geom->op == BTRFS_MAP_READ && io_geom->use_rst)
+ return btrfs_get_raid_extent_offset(fs_info, logical, length,
+ map->type,
+ io_geom->stripe_index, dst);
+ dst->physical = map->stripes[io_geom->stripe_index].physical +
+ io_geom->stripe_offset +
+ btrfs_stripe_nr_to_offset(io_geom->stripe_nr);
return 0;
}
-static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
- enum btrfs_map_op op,
- u64 logical, u64 *length,
- struct btrfs_io_context **bioc_ret,
- int mirror_num, int need_raid_map)
+static bool is_single_device_io(struct btrfs_fs_info *fs_info,
+ const struct btrfs_io_stripe *smap,
+ const struct btrfs_chunk_map *map,
+ int num_alloc_stripes,
+ struct btrfs_io_geometry *io_geom)
{
- struct extent_map *em;
- struct map_lookup *map;
- u64 stripe_offset;
- u64 stripe_nr;
- u64 stripe_len;
- u32 stripe_index;
- int data_stripes;
- int i;
- int ret = 0;
- int num_stripes;
- int max_errors = 0;
- int tgtdev_indexes = 0;
- struct btrfs_io_context *bioc = NULL;
- struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
- int dev_replace_is_ongoing = 0;
- int num_alloc_stripes;
- int patch_the_first_stripe_for_dev_replace = 0;
- u64 physical_to_patch_in_first_stripe = 0;
- u64 raid56_full_stripe_start = (u64)-1;
- struct btrfs_io_geometry geom;
-
- ASSERT(bioc_ret);
- ASSERT(op != BTRFS_MAP_DISCARD);
+ if (!smap)
+ return false;
- em = btrfs_get_chunk_map(fs_info, logical, *length);
- ASSERT(!IS_ERR(em));
+ if (num_alloc_stripes != 1)
+ return false;
- ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom);
- if (ret < 0)
- return ret;
+ if (io_geom->use_rst && io_geom->op != BTRFS_MAP_READ)
+ return false;
- map = em->map_lookup;
+ if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && io_geom->mirror_num > 1)
+ return false;
- *length = geom.len;
- stripe_len = geom.stripe_len;
- stripe_nr = geom.stripe_nr;
- stripe_offset = geom.stripe_offset;
- raid56_full_stripe_start = geom.raid56_stripe_offset;
- data_stripes = nr_data_stripes(map);
+ return true;
+}
- down_read(&dev_replace->rwsem);
- dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
- /*
- * Hold the semaphore for read during the whole operation, write is
- * requested at commit time but must wait.
- */
- if (!dev_replace_is_ongoing)
- up_read(&dev_replace->rwsem);
+static void map_blocks_raid0(const struct btrfs_chunk_map *map,
+ struct btrfs_io_geometry *io_geom)
+{
+ io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes;
+ io_geom->stripe_nr /= map->num_stripes;
+ if (io_geom->op == BTRFS_MAP_READ)
+ io_geom->mirror_num = 1;
+}
- if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
- !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
- ret = get_extra_mirror_from_replace(fs_info, logical, *length,
- dev_replace->srcdev->devid,
- &mirror_num,
- &physical_to_patch_in_first_stripe);
- if (ret)
- goto out;
- else
- patch_the_first_stripe_for_dev_replace = 1;
- } else if (mirror_num > map->num_stripes) {
- mirror_num = 0;
+static void map_blocks_raid1(struct btrfs_fs_info *fs_info,
+ struct btrfs_chunk_map *map,
+ struct btrfs_io_geometry *io_geom,
+ bool dev_replace_is_ongoing)
+{
+ if (io_geom->op != BTRFS_MAP_READ) {
+ io_geom->num_stripes = map->num_stripes;
+ return;
}
- num_stripes = 1;
- stripe_index = 0;
- if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
- stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
- &stripe_index);
- if (!need_full_stripe(op))
- mirror_num = 1;
- } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
- if (need_full_stripe(op))
- num_stripes = map->num_stripes;
- else if (mirror_num)
- stripe_index = mirror_num - 1;
- else {
- stripe_index = find_live_mirror(fs_info, map, 0,
- dev_replace_is_ongoing);
- mirror_num = stripe_index + 1;
- }
-
- } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
- if (need_full_stripe(op)) {
- num_stripes = map->num_stripes;
- } else if (mirror_num) {
- stripe_index = mirror_num - 1;
- } else {
- mirror_num = 1;
- }
-
- } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
- u32 factor = map->num_stripes / map->sub_stripes;
-
- stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
- stripe_index *= map->sub_stripes;
-
- if (need_full_stripe(op))
- num_stripes = map->sub_stripes;
- else if (mirror_num)
- stripe_index += mirror_num - 1;
- else {
- int old_stripe_index = stripe_index;
- stripe_index = find_live_mirror(fs_info, map,
- stripe_index,
- dev_replace_is_ongoing);
- mirror_num = stripe_index - old_stripe_index + 1;
- }
-
- } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
- /* push stripe_nr back to the start of the full stripe */
- stripe_nr = div64_u64(raid56_full_stripe_start,
- stripe_len * data_stripes);
-
- /* RAID[56] write or recovery. Return all stripes */
- num_stripes = map->num_stripes;
- max_errors = nr_parity_stripes(map);
-
- *length = map->stripe_len;
- stripe_index = 0;
- stripe_offset = 0;
- } else {
- /*
- * Mirror #0 or #1 means the original data block.
- * Mirror #2 is RAID5 parity block.
- * Mirror #3 is RAID6 Q block.
- */
- stripe_nr = div_u64_rem(stripe_nr,
- data_stripes, &stripe_index);
- if (mirror_num > 1)
- stripe_index = data_stripes + mirror_num - 2;
-
- /* We distribute the parity blocks across stripes */
- div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
- &stripe_index);
- if (!need_full_stripe(op) && mirror_num <= 1)
- mirror_num = 1;
- }
- } else {
- /*
- * after this, stripe_nr is the number of stripes on this
- * device we have to walk to find the data, and stripe_index is
- * the number of our device in the stripe array
- */
- stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
- &stripe_index);
- mirror_num = stripe_index + 1;
- }
- if (stripe_index >= map->num_stripes) {
- btrfs_crit(fs_info,
- "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
- stripe_index, map->num_stripes);
- ret = -EINVAL;
- goto out;
+ if (io_geom->mirror_num) {
+ io_geom->stripe_index = io_geom->mirror_num - 1;
+ return;
}
- num_alloc_stripes = num_stripes;
- if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
- if (op == BTRFS_MAP_WRITE)
- num_alloc_stripes <<= 1;
- if (op == BTRFS_MAP_GET_READ_MIRRORS)
- num_alloc_stripes++;
- tgtdev_indexes = num_stripes;
- }
+ io_geom->stripe_index = find_live_mirror(fs_info, map, 0,
+ dev_replace_is_ongoing);
+ io_geom->mirror_num = io_geom->stripe_index + 1;
+}
- bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes);
- if (!bioc) {
- ret = -ENOMEM;
- goto out;
+static void map_blocks_dup(const struct btrfs_chunk_map *map,
+ struct btrfs_io_geometry *io_geom)
+{
+ if (io_geom->op != BTRFS_MAP_READ) {
+ io_geom->num_stripes = map->num_stripes;
+ return;
}
- for (i = 0; i < num_stripes; i++) {
- bioc->stripes[i].physical = map->stripes[stripe_index].physical +
- stripe_offset + stripe_nr * map->stripe_len;
- bioc->stripes[i].dev = map->stripes[stripe_index].dev;
- stripe_index++;
+ if (io_geom->mirror_num) {
+ io_geom->stripe_index = io_geom->mirror_num - 1;
+ return;
}
- /* Build raid_map */
- if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
- (need_full_stripe(op) || mirror_num > 1)) {
- u64 tmp;
- unsigned rot;
-
- /* Work out the disk rotation on this stripe-set */
- div_u64_rem(stripe_nr, num_stripes, &rot);
+ io_geom->mirror_num = 1;
+}
- /* Fill in the logical address of each stripe */
- tmp = stripe_nr * data_stripes;
- for (i = 0; i < data_stripes; i++)
- bioc->raid_map[(i + rot) % num_stripes] =
- em->start + (tmp + i) * map->stripe_len;
+static void map_blocks_raid10(struct btrfs_fs_info *fs_info,
+ struct btrfs_chunk_map *map,
+ struct btrfs_io_geometry *io_geom,
+ bool dev_replace_is_ongoing)
+{
+ u32 factor = map->num_stripes / map->sub_stripes;
+ int old_stripe_index;
- bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE;
- if (map->type & BTRFS_BLOCK_GROUP_RAID6)
- bioc->raid_map[(i + rot + 1) % num_stripes] =
- RAID6_Q_STRIPE;
+ io_geom->stripe_index = (io_geom->stripe_nr % factor) * map->sub_stripes;
+ io_geom->stripe_nr /= factor;
- sort_parity_stripes(bioc, num_stripes);
+ if (io_geom->op != BTRFS_MAP_READ) {
+ io_geom->num_stripes = map->sub_stripes;
+ return;
}
- if (need_full_stripe(op))
- max_errors = btrfs_chunk_max_errors(map);
-
- if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
- need_full_stripe(op)) {
- handle_ops_on_dev_replace(op, &bioc, dev_replace, logical,
- &num_stripes, &max_errors);
+ if (io_geom->mirror_num) {
+ io_geom->stripe_index += io_geom->mirror_num - 1;
+ return;
}
- *bioc_ret = bioc;
- bioc->map_type = map->type;
- bioc->num_stripes = num_stripes;
- bioc->max_errors = max_errors;
- bioc->mirror_num = mirror_num;
+ old_stripe_index = io_geom->stripe_index;
+ io_geom->stripe_index = find_live_mirror(fs_info, map,
+ io_geom->stripe_index,
+ dev_replace_is_ongoing);
+ io_geom->mirror_num = io_geom->stripe_index - old_stripe_index + 1;
+}
+
+static void map_blocks_raid56_write(struct btrfs_chunk_map *map,
+ struct btrfs_io_geometry *io_geom,
+ u64 logical, u64 *length)
+{
+ int data_stripes = nr_data_stripes(map);
/*
- * this is the case that REQ_READ && dev_replace_is_ongoing &&
- * mirror_num == num_stripes + 1 && dev_replace target drive is
- * available as a mirror
+ * Needs full stripe mapping.
+ *
+ * Push stripe_nr back to the start of the full stripe For those cases
+ * needing a full stripe, @stripe_nr is the full stripe number.
+ *
+ * Originally we go raid56_full_stripe_start / full_stripe_len, but
+ * that can be expensive. Here we just divide @stripe_nr with
+ * @data_stripes.
*/
- if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
- WARN_ON(num_stripes > 1);
- bioc->stripes[0].dev = dev_replace->tgtdev;
- bioc->stripes[0].physical = physical_to_patch_in_first_stripe;
- bioc->mirror_num = map->num_stripes + 1;
- }
-out:
- if (dev_replace_is_ongoing) {
- lockdep_assert_held(&dev_replace->rwsem);
- /* Unlock and let waiting writers proceed */
- up_read(&dev_replace->rwsem);
- }
- free_extent_map(em);
- return ret;
+ io_geom->stripe_nr /= data_stripes;
+
+ /* RAID[56] write or recovery. Return all stripes */
+ io_geom->num_stripes = map->num_stripes;
+ io_geom->max_errors = btrfs_chunk_max_errors(map);
+
+ /* Return the length to the full stripe end. */
+ *length = min(logical + *length,
+ io_geom->raid56_full_stripe_start + map->start +
+ btrfs_stripe_nr_to_offset(data_stripes)) -
+ logical;
+ io_geom->stripe_index = 0;
+ io_geom->stripe_offset = 0;
}
-int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
- u64 logical, u64 *length,
- struct btrfs_io_context **bioc_ret, int mirror_num)
+static void map_blocks_raid56_read(struct btrfs_chunk_map *map,
+ struct btrfs_io_geometry *io_geom)
{
- if (op == BTRFS_MAP_DISCARD)
- return __btrfs_map_block_for_discard(fs_info, logical,
- length, bioc_ret);
+ int data_stripes = nr_data_stripes(map);
+
+ ASSERT(io_geom->mirror_num <= 1, "mirror_num=%d", io_geom->mirror_num);
+ /* Just grab the data stripe directly. */
+ io_geom->stripe_index = io_geom->stripe_nr % data_stripes;
+ io_geom->stripe_nr /= data_stripes;
+
+ /* We distribute the parity blocks across stripes. */
+ io_geom->stripe_index =
+ (io_geom->stripe_nr + io_geom->stripe_index) % map->num_stripes;
- return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
- mirror_num, 0);
+ if (io_geom->op == BTRFS_MAP_READ && io_geom->mirror_num < 1)
+ io_geom->mirror_num = 1;
}
-/* For Scrub/replace */
-int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
- u64 logical, u64 *length,
- struct btrfs_io_context **bioc_ret)
+static void map_blocks_single(const struct btrfs_chunk_map *map,
+ struct btrfs_io_geometry *io_geom)
{
- return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1);
+ io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes;
+ io_geom->stripe_nr /= map->num_stripes;
+ io_geom->mirror_num = io_geom->stripe_index + 1;
}
-static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio)
+/*
+ * Map one logical range to one or more physical ranges.
+ *
+ * @length: (Mandatory) mapped length of this run.
+ * One logical range can be split into different segments
+ * due to factors like zones and RAID0/5/6/10 stripe
+ * boundaries.
+ *
+ * @bioc_ret: (Mandatory) returned btrfs_io_context structure.
+ * which has one or more physical ranges (btrfs_io_stripe)
+ * recorded inside.
+ * Caller should call btrfs_put_bioc() to free it after use.
+ *
+ * @smap: (Optional) single physical range optimization.
+ * If the map request can be fulfilled by one single
+ * physical range, and this is parameter is not NULL,
+ * then @bioc_ret would be NULL, and @smap would be
+ * updated.
+ *
+ * @mirror_num_ret: (Mandatory) returned mirror number if the original
+ * value is 0.
+ *
+ * Mirror number 0 means to choose any live mirrors.
+ *
+ * For non-RAID56 profiles, non-zero mirror_num means
+ * the Nth mirror. (e.g. mirror_num 1 means the first
+ * copy).
+ *
+ * For RAID56 profile, mirror 1 means rebuild from P and
+ * the remaining data stripes.
+ *
+ * For RAID6 profile, mirror > 2 means mark another
+ * data/P stripe error and rebuild from the remaining
+ * stripes..
+ */
+int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
+ u64 logical, u64 *length,
+ struct btrfs_io_context **bioc_ret,
+ struct btrfs_io_stripe *smap, int *mirror_num_ret)
{
- bio->bi_private = bioc->private;
- bio->bi_end_io = bioc->end_io;
- bio_endio(bio);
+ struct btrfs_chunk_map *map;
+ struct btrfs_io_geometry io_geom = { 0 };
+ u64 map_offset;
+ int ret = 0;
+ int num_copies;
+ struct btrfs_io_context *bioc = NULL;
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ bool dev_replace_is_ongoing = false;
+ u16 num_alloc_stripes;
+ u64 max_len;
- btrfs_put_bioc(bioc);
-}
+ ASSERT(bioc_ret);
-static void btrfs_end_bio(struct bio *bio)
-{
- struct btrfs_io_context *bioc = bio->bi_private;
- int is_orig_bio = 0;
+ io_geom.mirror_num = (mirror_num_ret ? *mirror_num_ret : 0);
+ io_geom.num_stripes = 1;
+ io_geom.stripe_index = 0;
+ io_geom.op = op;
- if (bio->bi_status) {
- atomic_inc(&bioc->error);
- if (bio->bi_status == BLK_STS_IOERR ||
- bio->bi_status == BLK_STS_TARGET) {
- struct btrfs_device *dev = btrfs_bio(bio)->device;
+ map = btrfs_get_chunk_map(fs_info, logical, *length);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
- ASSERT(dev->bdev);
- if (btrfs_op(bio) == BTRFS_MAP_WRITE)
- btrfs_dev_stat_inc_and_print(dev,
- BTRFS_DEV_STAT_WRITE_ERRS);
- else if (!(bio->bi_opf & REQ_RAHEAD))
- btrfs_dev_stat_inc_and_print(dev,
- BTRFS_DEV_STAT_READ_ERRS);
- if (bio->bi_opf & REQ_PREFLUSH)
- btrfs_dev_stat_inc_and_print(dev,
- BTRFS_DEV_STAT_FLUSH_ERRS);
- }
- }
+ num_copies = btrfs_chunk_map_num_copies(map);
+ if (io_geom.mirror_num > num_copies)
+ return -EINVAL;
- if (bio == bioc->orig_bio)
- is_orig_bio = 1;
+ map_offset = logical - map->start;
+ io_geom.raid56_full_stripe_start = (u64)-1;
+ max_len = btrfs_max_io_len(map, map_offset, &io_geom);
+ *length = min_t(u64, map->chunk_len - map_offset, max_len);
+ io_geom.use_rst = btrfs_need_stripe_tree_update(fs_info, map->type);
- btrfs_bio_counter_dec(bioc->fs_info);
+ if (dev_replace->replace_task != current)
+ down_read(&dev_replace->rwsem);
- if (atomic_dec_and_test(&bioc->stripes_pending)) {
- if (!is_orig_bio) {
- bio_put(bio);
- bio = bioc->orig_bio;
- }
+ dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
+ /*
+ * Hold the semaphore for read during the whole operation, write is
+ * requested at commit time but must wait.
+ */
+ if (!dev_replace_is_ongoing && dev_replace->replace_task != current)
+ up_read(&dev_replace->rwsem);
- btrfs_bio(bio)->mirror_num = bioc->mirror_num;
- /* only send an error to the higher layers if it is
- * beyond the tolerance of the btrfs bio
+ switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ case BTRFS_BLOCK_GROUP_RAID0:
+ map_blocks_raid0(map, &io_geom);
+ break;
+ case BTRFS_BLOCK_GROUP_RAID1:
+ case BTRFS_BLOCK_GROUP_RAID1C3:
+ case BTRFS_BLOCK_GROUP_RAID1C4:
+ map_blocks_raid1(fs_info, map, &io_geom, dev_replace_is_ongoing);
+ break;
+ case BTRFS_BLOCK_GROUP_DUP:
+ map_blocks_dup(map, &io_geom);
+ break;
+ case BTRFS_BLOCK_GROUP_RAID10:
+ map_blocks_raid10(fs_info, map, &io_geom, dev_replace_is_ongoing);
+ break;
+ case BTRFS_BLOCK_GROUP_RAID5:
+ case BTRFS_BLOCK_GROUP_RAID6:
+ if (op != BTRFS_MAP_READ || io_geom.mirror_num > 1)
+ map_blocks_raid56_write(map, &io_geom, logical, length);
+ else
+ map_blocks_raid56_read(map, &io_geom);
+ break;
+ default:
+ /*
+ * After this, stripe_nr is the number of stripes on this
+ * device we have to walk to find the data, and stripe_index is
+ * the number of our device in the stripe array
*/
- if (atomic_read(&bioc->error) > bioc->max_errors) {
- bio->bi_status = BLK_STS_IOERR;
- } else {
- /*
- * this bio is actually up to date, we didn't
- * go over the max number of errors
- */
- bio->bi_status = BLK_STS_OK;
- }
-
- btrfs_end_bioc(bioc, bio);
- } else if (!is_orig_bio) {
- bio_put(bio);
+ map_blocks_single(map, &io_geom);
+ break;
+ }
+ if (io_geom.stripe_index >= map->num_stripes) {
+ btrfs_crit(fs_info,
+ "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
+ io_geom.stripe_index, map->num_stripes);
+ ret = -EINVAL;
+ goto out;
}
-}
-static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio,
- u64 physical, struct btrfs_device *dev)
-{
- struct btrfs_fs_info *fs_info = bioc->fs_info;
+ num_alloc_stripes = io_geom.num_stripes;
+ if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
+ op != BTRFS_MAP_READ)
+ /*
+ * For replace case, we need to add extra stripes for extra
+ * duplicated stripes.
+ *
+ * For both WRITE and GET_READ_MIRRORS, we may have at most
+ * 2 more stripes (DUP types, otherwise 1).
+ */
+ num_alloc_stripes += 2;
- bio->bi_private = bioc;
- btrfs_bio(bio)->device = dev;
- bio->bi_end_io = btrfs_end_bio;
- bio->bi_iter.bi_sector = physical >> 9;
/*
- * For zone append writing, bi_sector must point the beginning of the
- * zone
+ * If this I/O maps to a single device, try to return the device and
+ * physical block information on the stack instead of allocating an
+ * I/O context structure.
*/
- if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
- if (btrfs_dev_is_sequential(dev, physical)) {
- u64 zone_start = round_down(physical, fs_info->zone_size);
-
- bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
- } else {
- bio->bi_opf &= ~REQ_OP_ZONE_APPEND;
- bio->bi_opf |= REQ_OP_WRITE;
- }
+ if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, &io_geom)) {
+ ret = set_io_stripe(fs_info, logical, length, smap, map, &io_geom);
+ if (mirror_num_ret)
+ *mirror_num_ret = io_geom.mirror_num;
+ *bioc_ret = NULL;
+ goto out;
}
- btrfs_debug_in_rcu(fs_info,
- "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
- bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
- (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
- dev->devid, bio->bi_iter.bi_size);
- bio_set_dev(bio, dev->bdev);
-
- btrfs_bio_counter_inc_noblocked(fs_info);
-
- btrfsic_submit_bio(bio);
-}
-static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical)
-{
- atomic_inc(&bioc->error);
- if (atomic_dec_and_test(&bioc->stripes_pending)) {
- /* Should be the original bio. */
- WARN_ON(bio != bioc->orig_bio);
-
- btrfs_bio(bio)->mirror_num = bioc->mirror_num;
- bio->bi_iter.bi_sector = logical >> 9;
- if (atomic_read(&bioc->error) > bioc->max_errors)
- bio->bi_status = BLK_STS_IOERR;
- else
- bio->bi_status = BLK_STS_OK;
- btrfs_end_bioc(bioc, bio);
+ bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes);
+ if (!bioc) {
+ ret = -ENOMEM;
+ goto out;
}
-}
-
-blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
- int mirror_num)
-{
- struct btrfs_device *dev;
- struct bio *first_bio = bio;
- u64 logical = bio->bi_iter.bi_sector << 9;
- u64 length = 0;
- u64 map_length;
- int ret;
- int dev_nr;
- int total_devs;
- struct btrfs_io_context *bioc = NULL;
-
- length = bio->bi_iter.bi_size;
- map_length = length;
+ bioc->map_type = map->type;
+ bioc->use_rst = io_geom.use_rst;
- btrfs_bio_counter_inc_blocked(fs_info);
- ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
- &map_length, &bioc, mirror_num, 1);
- if (ret) {
- btrfs_bio_counter_dec(fs_info);
- return errno_to_blk_status(ret);
- }
-
- total_devs = bioc->num_stripes;
- bioc->orig_bio = first_bio;
- bioc->private = first_bio->bi_private;
- bioc->end_io = first_bio->bi_end_io;
- atomic_set(&bioc->stripes_pending, bioc->num_stripes);
-
- if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
- ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
- /* In this case, map_length has been set to the length of
- a single stripe; not the whole write */
- if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
- ret = raid56_parity_write(bio, bioc, map_length);
- } else {
- ret = raid56_parity_recover(bio, bioc, map_length,
- mirror_num, 1);
+ /*
+ * For RAID56 full map, we need to make sure the stripes[] follows the
+ * rule that data stripes are all ordered, then followed with P and Q
+ * (if we have).
+ *
+ * It's still mostly the same as other profiles, just with extra rotation.
+ */
+ if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
+ (op != BTRFS_MAP_READ || io_geom.mirror_num > 1)) {
+ /*
+ * For RAID56 @stripe_nr is already the number of full stripes
+ * before us, which is also the rotation value (needs to modulo
+ * with num_stripes).
+ *
+ * In this case, we just add @stripe_nr with @i, then do the
+ * modulo, to reduce one modulo call.
+ */
+ bioc->full_stripe_logical = map->start +
+ btrfs_stripe_nr_to_offset(io_geom.stripe_nr *
+ nr_data_stripes(map));
+ for (int i = 0; i < io_geom.num_stripes; i++) {
+ struct btrfs_io_stripe *dst = &bioc->stripes[i];
+ u32 stripe_index;
+
+ stripe_index = (i + io_geom.stripe_nr) % io_geom.num_stripes;
+ dst->dev = map->stripes[stripe_index].dev;
+ dst->physical =
+ map->stripes[stripe_index].physical +
+ io_geom.stripe_offset +
+ btrfs_stripe_nr_to_offset(io_geom.stripe_nr);
+ }
+ } else {
+ /*
+ * For all other non-RAID56 profiles, just copy the target
+ * stripe into the bioc.
+ */
+ for (int i = 0; i < io_geom.num_stripes; i++) {
+ ret = set_io_stripe(fs_info, logical, length,
+ &bioc->stripes[i], map, &io_geom);
+ if (ret < 0)
+ break;
+ io_geom.stripe_index++;
}
-
- btrfs_bio_counter_dec(fs_info);
- return errno_to_blk_status(ret);
}
- if (map_length < length) {
- btrfs_crit(fs_info,
- "mapping failed logical %llu bio len %llu len %llu",
- logical, length, map_length);
- BUG();
+ if (ret) {
+ *bioc_ret = NULL;
+ btrfs_put_bioc(bioc);
+ goto out;
}
- for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
- dev = bioc->stripes[dev_nr].dev;
- if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
- &dev->dev_state) ||
- (btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
- !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
- bioc_error(bioc, first_bio, logical);
- continue;
- }
+ if (op != BTRFS_MAP_READ)
+ io_geom.max_errors = btrfs_chunk_max_errors(map);
- if (dev_nr < total_devs - 1)
- bio = btrfs_bio_clone(first_bio);
- else
- bio = first_bio;
+ if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
+ op != BTRFS_MAP_READ) {
+ handle_ops_on_dev_replace(bioc, dev_replace, logical, &io_geom);
+ }
- submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev);
+ *bioc_ret = bioc;
+ bioc->num_stripes = io_geom.num_stripes;
+ bioc->max_errors = io_geom.max_errors;
+ bioc->mirror_num = io_geom.mirror_num;
+
+out:
+ if (dev_replace_is_ongoing && dev_replace->replace_task != current) {
+ lockdep_assert_held(&dev_replace->rwsem);
+ /* Unlock and let waiting writers proceed */
+ up_read(&dev_replace->rwsem);
}
- btrfs_bio_counter_dec(fs_info);
- return BLK_STS_OK;
+ btrfs_free_chunk_map(map);
+ return ret;
}
static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
@@ -6820,18 +6755,20 @@ static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
const struct btrfs_device *device)
{
- ASSERT((args->devid != (u64)-1) || args->missing);
+ if (args->devt)
+ return device->devt == args->devt;
+ if (args->missing) {
+ if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
+ !device->bdev)
+ return true;
+ return false;
+ }
- if ((args->devid != (u64)-1) && device->devid != args->devid)
+ if (device->devid != args->devid)
return false;
if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0)
return false;
- if (!args->missing)
- return true;
- if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
- !device->bdev)
- return true;
- return false;
+ return true;
}
/*
@@ -6878,8 +6815,9 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
* always do NOFS because we use it in a lot of other GFP_KERNEL safe
* places.
*/
+
nofs_flag = memalloc_nofs_save();
- device = btrfs_alloc_device(NULL, &devid, dev_uuid);
+ device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(device))
return device;
@@ -6894,22 +6832,24 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
return device;
}
-/**
- * btrfs_alloc_device - allocate struct btrfs_device
+/*
+ * Allocate new device struct, set up devid and UUID.
+ *
* @fs_info: used only for generating a new devid, can be NULL if
* devid is provided (i.e. @devid != NULL).
* @devid: a pointer to devid for this device. If NULL a new devid
* is generated.
* @uuid: a pointer to UUID for this device. If NULL a new UUID
* is generated.
+ * @path: a pointer to device path if available, NULL otherwise.
*
* Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
* on error. Returned struct is not linked onto any lists and must be
* destroyed with btrfs_free_device.
*/
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
- const u64 *devid,
- const u8 *uuid)
+ const u64 *devid, const u8 *uuid,
+ const char *path)
{
struct btrfs_device *dev;
u64 tmp;
@@ -6921,27 +6861,13 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
if (!dev)
return ERR_PTR(-ENOMEM);
- /*
- * Preallocate a bio that's always going to be used for flushing device
- * barriers and matches the device lifespan
- */
- dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0);
- if (!dev->flush_bio) {
- kfree(dev);
- return ERR_PTR(-ENOMEM);
- }
-
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_alloc_list);
INIT_LIST_HEAD(&dev->post_commit_list);
- atomic_set(&dev->reada_in_flight, 0);
atomic_set(&dev->dev_stats_ccnt, 0);
btrfs_device_data_ordered_init(dev);
- INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
- INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
- extent_io_tree_init(fs_info, &dev->alloc_state,
- IO_TREE_DEVICE_ALLOC_STATE, NULL);
+ btrfs_extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE);
if (devid)
tmp = *devid;
@@ -6961,6 +6887,17 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
else
generate_random_uuid(dev->uuid);
+ if (path) {
+ const char *name;
+
+ name = kstrdup(path, GFP_KERNEL);
+ if (!name) {
+ btrfs_free_device(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+ rcu_assign_pointer(dev->name, name);
+ }
+
return dev;
}
@@ -6975,11 +6912,11 @@ static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
devid, uuid);
}
-static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
+u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map)
{
- const int data_stripes = calc_data_stripes(type, num_stripes);
+ const int data_stripes = calc_data_stripes(map->type, map->num_stripes);
- return div_u64(chunk_len, data_stripes);
+ return div_u64(map->chunk_len, data_stripes);
}
#if BITS_PER_LONG == 32
@@ -7022,19 +6959,39 @@ static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
}
#endif
+static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info,
+ u64 devid, u8 *uuid)
+{
+ struct btrfs_device *dev;
+
+ if (!btrfs_test_opt(fs_info, DEGRADED)) {
+ btrfs_report_missing_device(fs_info, devid, uuid, true);
+ return ERR_PTR(-ENOENT);
+ }
+
+ dev = add_missing_dev(fs_info->fs_devices, devid, uuid);
+ if (IS_ERR(dev)) {
+ btrfs_err(fs_info, "failed to init missing device %llu: %ld",
+ devid, PTR_ERR(dev));
+ return dev;
+ }
+ btrfs_report_missing_device(fs_info, devid, uuid, false);
+
+ return dev;
+}
+
static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
struct btrfs_chunk *chunk)
{
BTRFS_DEV_LOOKUP_ARGS(args);
struct btrfs_fs_info *fs_info = leaf->fs_info;
- struct extent_map_tree *map_tree = &fs_info->mapping_tree;
- struct map_lookup *map;
- struct extent_map *em;
+ struct btrfs_chunk_map *map;
u64 logical;
u64 length;
u64 devid;
u64 type;
u8 uuid[BTRFS_UUID_SIZE];
+ int index;
int num_stripes;
int ret;
int i;
@@ -7042,6 +6999,7 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
logical = key->offset;
length = btrfs_chunk_length(leaf, chunk);
type = btrfs_chunk_type(leaf, chunk);
+ index = btrfs_bg_flags_to_raid_index(type);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
#if BITS_PER_LONG == 32
@@ -7051,54 +7009,37 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
warn_32bit_meta_chunk(fs_info, logical, length, type);
#endif
- /*
- * Only need to verify chunk item if we're reading from sys chunk array,
- * as chunk item in tree block is already verified by tree-checker.
- */
- if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
- ret = btrfs_check_chunk_valid(leaf, chunk, logical);
- if (ret)
- return ret;
- }
-
- read_lock(&map_tree->lock);
- em = lookup_extent_mapping(map_tree, logical, 1);
- read_unlock(&map_tree->lock);
+ map = btrfs_find_chunk_map(fs_info, logical, 1);
/* already mapped? */
- if (em && em->start <= logical && em->start + em->len > logical) {
- free_extent_map(em);
+ if (map && map->start <= logical && map->start + map->chunk_len > logical) {
+ btrfs_free_chunk_map(map);
return 0;
- } else if (em) {
- free_extent_map(em);
+ } else if (map) {
+ btrfs_free_chunk_map(map);
}
- em = alloc_extent_map();
- if (!em)
- return -ENOMEM;
- map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
- if (!map) {
- free_extent_map(em);
+ map = btrfs_alloc_chunk_map(num_stripes, GFP_NOFS);
+ if (!map)
return -ENOMEM;
- }
-
- set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
- em->map_lookup = map;
- em->start = logical;
- em->len = length;
- em->orig_start = 0;
- em->block_start = 0;
- em->block_len = em->len;
+ map->start = logical;
+ map->chunk_len = length;
map->num_stripes = num_stripes;
map->io_width = btrfs_chunk_io_width(leaf, chunk);
map->io_align = btrfs_chunk_io_align(leaf, chunk);
- map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
map->type = type;
- map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+ /*
+ * We can't use the sub_stripes value, as for profiles other than
+ * RAID10, they may have 0 as sub_stripes for filesystems created by
+ * older mkfs (<v5.4).
+ * In that case, it can cause divide-by-zero errors later.
+ * Since currently sub_stripes is fixed for each profile, let's
+ * use the trusted value instead.
+ */
+ map->sub_stripes = btrfs_raid_array[index].sub_stripes;
map->verified_stripes = 0;
- em->orig_block_len = calc_stripe_length(type, em->len,
- map->num_stripes);
+ map->stripe_size = btrfs_calc_stripe_length(map);
for (i = 0; i < num_stripes; i++) {
map->stripes[i].physical =
btrfs_stripe_offset_nr(leaf, chunk, i);
@@ -7109,39 +7050,27 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
BTRFS_UUID_SIZE);
args.uuid = uuid;
map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args);
- if (!map->stripes[i].dev &&
- !btrfs_test_opt(fs_info, DEGRADED)) {
- free_extent_map(em);
- btrfs_report_missing_device(fs_info, devid, uuid, true);
- return -ENOENT;
- }
if (!map->stripes[i].dev) {
- map->stripes[i].dev =
- add_missing_dev(fs_info->fs_devices, devid,
- uuid);
+ map->stripes[i].dev = handle_missing_device(fs_info,
+ devid, uuid);
if (IS_ERR(map->stripes[i].dev)) {
- free_extent_map(em);
- btrfs_err(fs_info,
- "failed to init missing dev %llu: %ld",
- devid, PTR_ERR(map->stripes[i].dev));
- return PTR_ERR(map->stripes[i].dev);
+ ret = PTR_ERR(map->stripes[i].dev);
+ btrfs_free_chunk_map(map);
+ return ret;
}
- btrfs_report_missing_device(fs_info, devid, uuid, false);
}
+
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
&(map->stripes[i].dev->dev_state));
-
}
- write_lock(&map_tree->lock);
- ret = add_extent_mapping(map_tree, em, 0);
- write_unlock(&map_tree->lock);
+ ret = btrfs_add_chunk_map(fs_info, map);
if (ret < 0) {
btrfs_err(fs_info,
"failed to add chunk map, start=%llu len=%llu: %d",
- em->start, em->len, ret);
+ map->start, map->chunk_len, ret);
+ btrfs_free_chunk_map(map);
}
- free_extent_map(em);
return ret;
}
@@ -7186,10 +7115,14 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
fs_devices = find_fsid(fsid, NULL);
if (!fs_devices) {
- if (!btrfs_test_opt(fs_info, DEGRADED))
+ if (!btrfs_test_opt(fs_info, DEGRADED)) {
+ btrfs_err(fs_info,
+ "failed to find fsid %pU when attempting to open seed devices",
+ fsid);
return ERR_PTR(-ENOENT);
+ }
- fs_devices = alloc_fs_devices(fsid, NULL);
+ fs_devices = alloc_fs_devices(fsid);
if (IS_ERR(fs_devices))
return fs_devices;
@@ -7206,7 +7139,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
if (IS_ERR(fs_devices))
return fs_devices;
- ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
+ ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->sb);
if (ret) {
free_fs_devices(fs_devices);
return ERR_PTR(ret);
@@ -7235,7 +7168,8 @@ static int read_one_dev(struct extent_buffer *leaf,
u8 fs_uuid[BTRFS_FSID_SIZE];
u8 dev_uuid[BTRFS_UUID_SIZE];
- devid = args.devid = btrfs_device_id(leaf, dev_item);
+ devid = btrfs_device_id(leaf, dev_item);
+ args.devid = devid;
read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
BTRFS_UUID_SIZE);
read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
@@ -7335,46 +7269,26 @@ static int read_one_dev(struct extent_buffer *leaf,
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
{
- struct btrfs_root *root = fs_info->tree_root;
struct btrfs_super_block *super_copy = fs_info->super_copy;
struct extent_buffer *sb;
- struct btrfs_disk_key *disk_key;
- struct btrfs_chunk *chunk;
u8 *array_ptr;
unsigned long sb_array_offset;
int ret = 0;
- u32 num_stripes;
u32 array_size;
- u32 len = 0;
u32 cur_offset;
- u64 type;
struct btrfs_key key;
ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
+
/*
- * This will create extent buffer of nodesize, superblock size is
- * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
- * overallocate but we can keep it as-is, only the first page is used.
+ * We allocated a dummy extent, just to use extent buffer accessors.
+ * There will be unused space after BTRFS_SUPER_INFO_SIZE, but
+ * that's fine, we will not go beyond system chunk array anyway.
*/
- sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET,
- root->root_key.objectid, 0);
- if (IS_ERR(sb))
- return PTR_ERR(sb);
+ sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET);
+ if (!sb)
+ return -ENOMEM;
set_extent_buffer_uptodate(sb);
- /*
- * The sb extent buffer is artificial and just used to read the system array.
- * set_extent_buffer_uptodate() call does not properly mark all it's
- * pages up-to-date when the page is larger: extent does not cover the
- * whole page and consequently check_page_uptodate does not find all
- * the page's extents up-to-date (the hole beyond sb),
- * write_extent_buffer then triggers a WARN_ON.
- *
- * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
- * but sb spans only this function. Add an explicit SetPageUptodate call
- * to silence the warning eg. on PowerPC 64.
- */
- if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
- SetPageUptodate(sb->pages[0]);
write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
array_size = btrfs_super_sys_array_size(super_copy);
@@ -7384,10 +7298,15 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
cur_offset = 0;
while (cur_offset < array_size) {
- disk_key = (struct btrfs_disk_key *)array_ptr;
- len = sizeof(*disk_key);
- if (cur_offset + len > array_size)
- goto out_short_read;
+ struct btrfs_chunk *chunk;
+ struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)array_ptr;
+ u32 len = sizeof(*disk_key);
+
+ /*
+ * The sys_chunk_array has been already verified at super block
+ * read time. Only do ASSERT()s for basic checks.
+ */
+ ASSERT(cur_offset + len <= array_size);
btrfs_disk_key_to_cpu(&key, disk_key);
@@ -7395,44 +7314,14 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
sb_array_offset += len;
cur_offset += len;
- if (key.type != BTRFS_CHUNK_ITEM_KEY) {
- btrfs_err(fs_info,
- "unexpected item type %u in sys_array at offset %u",
- (u32)key.type, cur_offset);
- ret = -EIO;
- break;
- }
+ ASSERT(key.type == BTRFS_CHUNK_ITEM_KEY);
chunk = (struct btrfs_chunk *)sb_array_offset;
- /*
- * At least one btrfs_chunk with one stripe must be present,
- * exact stripe count check comes afterwards
- */
- len = btrfs_chunk_item_size(1);
- if (cur_offset + len > array_size)
- goto out_short_read;
-
- num_stripes = btrfs_chunk_num_stripes(sb, chunk);
- if (!num_stripes) {
- btrfs_err(fs_info,
- "invalid number of stripes %u in sys_array at offset %u",
- num_stripes, cur_offset);
- ret = -EIO;
- break;
- }
+ ASSERT(btrfs_chunk_type(sb, chunk) & BTRFS_BLOCK_GROUP_SYSTEM);
- type = btrfs_chunk_type(sb, chunk);
- if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
- btrfs_err(fs_info,
- "invalid chunk type %llu in sys_array at offset %u",
- type, cur_offset);
- ret = -EIO;
- break;
- }
+ len = btrfs_chunk_item_size(btrfs_chunk_num_stripes(sb, chunk));
- len = btrfs_chunk_item_size(num_stripes);
- if (cur_offset + len > array_size)
- goto out_short_read;
+ ASSERT(cur_offset + len <= array_size);
ret = read_one_chunk(&key, sb, chunk);
if (ret)
@@ -7445,13 +7334,6 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
clear_extent_buffer_uptodate(sb);
free_extent_buffer_stale(sb);
return ret;
-
-out_short_read:
- btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
- len, cur_offset);
- clear_extent_buffer_uptodate(sb);
- free_extent_buffer_stale(sb);
- return -EIO;
}
/*
@@ -7465,26 +7347,21 @@ out_short_read:
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
struct btrfs_device *failing_dev)
{
- struct extent_map_tree *map_tree = &fs_info->mapping_tree;
- struct extent_map *em;
- u64 next_start = 0;
+ struct btrfs_chunk_map *map;
+ u64 next_start;
bool ret = true;
- read_lock(&map_tree->lock);
- em = lookup_extent_mapping(map_tree, 0, (u64)-1);
- read_unlock(&map_tree->lock);
+ map = btrfs_find_chunk_map(fs_info, 0, U64_MAX);
/* No chunk at all? Return false anyway */
- if (!em) {
+ if (!map) {
ret = false;
goto out;
}
- while (em) {
- struct map_lookup *map;
+ while (map) {
int missing = 0;
int max_tolerated;
int i;
- map = em->map_lookup;
max_tolerated =
btrfs_get_num_tolerated_disk_barrier_failures(
map->type);
@@ -7502,18 +7379,15 @@ bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
if (!failing_dev)
btrfs_warn(fs_info,
"chunk %llu missing %d devices, max tolerance is %d for writable mount",
- em->start, missing, max_tolerated);
- free_extent_map(em);
+ map->start, missing, max_tolerated);
+ btrfs_free_chunk_map(map);
ret = false;
goto out;
}
- next_start = extent_map_end(em);
- free_extent_map(em);
+ next_start = map->start + map->chunk_len;
+ btrfs_free_chunk_map(map);
- read_lock(&map_tree->lock);
- em = lookup_extent_mapping(map_tree, next_start,
- (u64)(-1) - next_start);
- read_unlock(&map_tree->lock);
+ map = btrfs_find_chunk_map(fs_info, next_start, U64_MAX - next_start);
}
out:
return ret;
@@ -7531,12 +7405,13 @@ static void readahead_tree_node_children(struct extent_buffer *node)
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root = fs_info->chunk_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
struct btrfs_key key;
struct btrfs_key found_key;
int ret;
int slot;
+ int iter_ret = 0;
u64 total_dev = 0;
u64 last_ra_node = 0;
@@ -7559,42 +7434,39 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
fs_info->fs_devices->total_rw_bytes = 0;
/*
+ * Lockdep complains about possible circular locking dependency between
+ * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
+ * used for freeze protection of a fs (struct super_block.s_writers),
+ * which we take when starting a transaction, and extent buffers of the
+ * chunk tree if we call read_one_dev() while holding a lock on an
+ * extent buffer of the chunk tree. Since we are mounting the filesystem
+ * and at this point there can't be any concurrent task modifying the
+ * chunk tree, to keep it simple, just skip locking on the chunk tree.
+ */
+ ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
+ path->skip_locking = true;
+
+ /*
* Read all device items, and then all the chunk items. All
* device items are found before any chunk item (their object id
* is smaller than the lowest possible object id for a chunk
* item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
*/
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
- key.offset = 0;
key.type = 0;
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto error;
- while (1) {
- struct extent_buffer *node;
+ key.offset = 0;
+ btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
+ struct extent_buffer *node = path->nodes[1];
leaf = path->nodes[0];
slot = path->slots[0];
- if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(root, path);
- if (ret == 0)
- continue;
- if (ret < 0)
- goto error;
- break;
- }
- /*
- * The nodes on level 1 are not locked but we don't need to do
- * that during mount time as nothing else can access the tree
- */
- node = path->nodes[1];
+
if (node) {
if (last_ra_node != node->start) {
readahead_tree_node_children(node);
last_ra_node = node->start;
}
}
- btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.type == BTRFS_DEV_ITEM_KEY) {
struct btrfs_dev_item *dev_item;
dev_item = btrfs_item_ptr(leaf, slot,
@@ -7614,13 +7486,16 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
* requirement for chunk allocation, see the comment on
* top of btrfs_chunk_alloc() for details.
*/
- ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
ret = read_one_chunk(&found_key, leaf, chunk);
if (ret)
goto error;
}
- path->slots[0]++;
+ }
+ /* Catch error found during iteration */
+ if (iter_ret < 0) {
+ ret = iter_ret;
+ goto error;
}
/*
@@ -7628,12 +7503,12 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
* do another round of validation checks.
*/
if (total_dev != fs_info->fs_devices->total_devices) {
- btrfs_err(fs_info,
- "super_num_devices %llu mismatch with num_devices %llu found here",
+ btrfs_warn(fs_info,
+"super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
btrfs_super_num_devices(fs_info->super_copy),
total_dev);
- ret = -EINVAL;
- goto error;
+ fs_info->fs_devices->total_devices = total_dev;
+ btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
}
if (btrfs_super_total_bytes(fs_info->super_copy) <
fs_info->fs_devices->total_rw_bytes) {
@@ -7647,29 +7522,32 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
ret = 0;
error:
mutex_unlock(&uuid_mutex);
-
- btrfs_free_path(path);
return ret;
}
-void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
+int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
struct btrfs_device *device;
-
- fs_devices->fs_info = fs_info;
+ int ret = 0;
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list)
device->fs_info = fs_info;
list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
- list_for_each_entry(device, &seed_devs->devices, dev_list)
+ list_for_each_entry(device, &seed_devs->devices, dev_list) {
device->fs_info = fs_info;
+ ret = btrfs_get_dev_zone_info(device, false);
+ if (ret)
+ break;
+ }
seed_devs->fs_info = fs_info;
}
mutex_unlock(&fs_devices->device_list_mutex);
+
+ return ret;
}
static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
@@ -7720,7 +7598,7 @@ static int btrfs_device_init_dev_stats(struct btrfs_device *device,
}
slot = path->slots[0];
eb = path->nodes[0];
- item_size = btrfs_item_size_nr(eb, slot);
+ item_size = btrfs_item_size(eb, slot);
ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
@@ -7743,7 +7621,7 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
struct btrfs_device *device;
- struct btrfs_path *path = NULL;
+ BTRFS_PATH_AUTO_FREE(path);
int ret = 0;
path = btrfs_alloc_path();
@@ -7765,8 +7643,6 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
}
out:
mutex_unlock(&fs_devices->device_list_mutex);
-
- btrfs_free_path(path);
return ret;
}
@@ -7775,7 +7651,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *dev_root = fs_info->dev_root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct extent_buffer *eb;
struct btrfs_dev_stats_item *ptr;
@@ -7791,21 +7667,21 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
return -ENOMEM;
ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
if (ret < 0) {
- btrfs_warn_in_rcu(fs_info,
+ btrfs_warn(fs_info,
"error %d while searching for dev_stats item for device %s",
- ret, rcu_str_deref(device->name));
- goto out;
+ ret, btrfs_dev_name(device));
+ return ret;
}
if (ret == 0 &&
- btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
+ btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
/* need to delete old one and insert a new one */
ret = btrfs_del_item(trans, dev_root, path);
if (ret != 0) {
- btrfs_warn_in_rcu(fs_info,
+ btrfs_warn(fs_info,
"delete too small dev_stats item for device %s failed %d",
- rcu_str_deref(device->name), ret);
- goto out;
+ btrfs_dev_name(device), ret);
+ return ret;
}
ret = 1;
}
@@ -7816,10 +7692,10 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, dev_root, path,
&key, sizeof(*ptr));
if (ret < 0) {
- btrfs_warn_in_rcu(fs_info,
+ btrfs_warn(fs_info,
"insert dev_stats item for device %s failed %d",
- rcu_str_deref(device->name), ret);
- goto out;
+ btrfs_dev_name(device), ret);
+ return ret;
}
}
@@ -7828,10 +7704,6 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
btrfs_set_dev_stats_value(eb, ptr, i,
btrfs_dev_stat_read(device, i));
- btrfs_mark_buffer_dirty(eb);
-
-out:
- btrfs_free_path(path);
return ret;
}
@@ -7878,16 +7750,12 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
{
btrfs_dev_stat_inc(dev, index);
- btrfs_dev_stat_print_on_error(dev);
-}
-static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
-{
if (!dev->dev_stats_valid)
return;
- btrfs_err_rl_in_rcu(dev->fs_info,
+ btrfs_err_rl(dev->fs_info,
"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
- rcu_str_deref(dev->name),
+ btrfs_dev_name(dev),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
@@ -7905,9 +7773,9 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
if (i == BTRFS_DEV_STAT_VALUES_MAX)
return; /* all values == 0, suppress message */
- btrfs_info_in_rcu(dev->fs_info,
+ btrfs_info(dev->fs_info,
"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
- rcu_str_deref(dev->name),
+ btrfs_dev_name(dev),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
@@ -7965,7 +7833,7 @@ void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
{
struct btrfs_device *curr, *next;
- ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
+ ASSERT(trans->state == TRANS_STATE_COMMIT_DOING, "state=%d" , trans->state);
if (list_empty(&trans->dev_update_list))
return;
@@ -7995,27 +7863,20 @@ int btrfs_bg_type_to_factor(u64 flags)
return btrfs_raid_array[index].ncopies;
}
-
-
static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
u64 chunk_offset, u64 devid,
u64 physical_offset, u64 physical_len)
{
struct btrfs_dev_lookup_args args = { .devid = devid };
- struct extent_map_tree *em_tree = &fs_info->mapping_tree;
- struct extent_map *em;
- struct map_lookup *map;
+ struct btrfs_chunk_map *map;
struct btrfs_device *dev;
u64 stripe_len;
bool found = false;
int ret = 0;
int i;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, chunk_offset, 1);
- read_unlock(&em_tree->lock);
-
- if (!em) {
+ map = btrfs_find_chunk_map(fs_info, chunk_offset, 1);
+ if (unlikely(!map)) {
btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
physical_offset, devid);
@@ -8023,25 +7884,34 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
goto out;
}
- map = em->map_lookup;
- stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
- if (physical_len != stripe_len) {
+ stripe_len = btrfs_calc_stripe_length(map);
+ if (unlikely(physical_len != stripe_len)) {
btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
- physical_offset, devid, em->start, physical_len,
+ physical_offset, devid, map->start, physical_len,
stripe_len);
ret = -EUCLEAN;
goto out;
}
+ /*
+ * Very old mkfs.btrfs (before v4.15) will not respect the reserved
+ * space. Although kernel can handle it without problem, better to warn
+ * the users.
+ */
+ if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED)
+ btrfs_warn(fs_info,
+ "devid %llu physical %llu len %llu inside the reserved space",
+ devid, physical_offset, physical_len);
+
for (i = 0; i < map->num_stripes; i++) {
- if (map->stripes[i].dev->devid == devid &&
- map->stripes[i].physical == physical_offset) {
+ if (unlikely(map->stripes[i].dev->devid == devid &&
+ map->stripes[i].physical == physical_offset)) {
found = true;
if (map->verified_stripes >= map->num_stripes) {
btrfs_err(fs_info,
"too many dev extents for chunk %llu found",
- em->start);
+ map->start);
ret = -EUCLEAN;
goto out;
}
@@ -8049,7 +7919,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
break;
}
}
- if (!found) {
+ if (unlikely(!found)) {
btrfs_err(fs_info,
"dev extent physical offset %llu devid %llu has no corresponding chunk",
physical_offset, devid);
@@ -8058,13 +7928,13 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
/* Make sure no dev extent is beyond device boundary */
dev = btrfs_find_device(fs_info->fs_devices, &args);
- if (!dev) {
+ if (unlikely(!dev)) {
btrfs_err(fs_info, "failed to find devid %llu", devid);
ret = -EUCLEAN;
goto out;
}
- if (physical_offset + physical_len > dev->disk_total_bytes) {
+ if (unlikely(physical_offset + physical_len > dev->disk_total_bytes)) {
btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
devid, physical_offset, physical_len,
@@ -8076,8 +7946,8 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
if (dev->zone_info) {
u64 zone_size = dev->zone_info->zone_size;
- if (!IS_ALIGNED(physical_offset, zone_size) ||
- !IS_ALIGNED(physical_len, zone_size)) {
+ if (unlikely(!IS_ALIGNED(physical_offset, zone_size) ||
+ !IS_ALIGNED(physical_len, zone_size))) {
btrfs_err(fs_info,
"zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
devid, physical_offset, physical_len);
@@ -8087,32 +7957,30 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
}
out:
- free_extent_map(em);
+ btrfs_free_chunk_map(map);
return ret;
}
static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
{
- struct extent_map_tree *em_tree = &fs_info->mapping_tree;
- struct extent_map *em;
struct rb_node *node;
int ret = 0;
- read_lock(&em_tree->lock);
- for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
- em = rb_entry(node, struct extent_map, rb_node);
- if (em->map_lookup->num_stripes !=
- em->map_lookup->verified_stripes) {
+ read_lock(&fs_info->mapping_tree_lock);
+ for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) {
+ struct btrfs_chunk_map *map;
+
+ map = rb_entry(node, struct btrfs_chunk_map, rb_node);
+ if (unlikely(map->num_stripes != map->verified_stripes)) {
btrfs_err(fs_info,
"chunk %llu has missing dev extent, have %d expect %d",
- em->start, em->map_lookup->verified_stripes,
- em->map_lookup->num_stripes);
+ map->start, map->verified_stripes, map->num_stripes);
ret = -EUCLEAN;
goto out;
}
}
out:
- read_unlock(&em_tree->lock);
+ read_unlock(&fs_info->mapping_tree_lock);
return ret;
}
@@ -8125,7 +7993,7 @@ out:
*/
int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
u64 prev_devid = 0;
@@ -8156,17 +8024,15 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
path->reada = READA_FORWARD;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
- goto out;
+ return ret;
/* No dev extents at all? Not good */
- if (ret > 0) {
- ret = -EUCLEAN;
- goto out;
- }
+ if (unlikely(ret > 0))
+ return -EUCLEAN;
}
while (1) {
struct extent_buffer *leaf = path->nodes[0];
@@ -8188,24 +8054,23 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
physical_len = btrfs_dev_extent_length(leaf, dext);
/* Check if this dev extent overlaps with the previous one */
- if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
+ if (unlikely(devid == prev_devid && physical_offset < prev_dev_ext_end)) {
btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
devid, physical_offset, prev_dev_ext_end);
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
physical_offset, physical_len);
if (ret < 0)
- goto out;
+ return ret;
prev_devid = devid;
prev_dev_ext_end = physical_offset + physical_len;
ret = btrfs_next_item(root, path);
if (ret < 0)
- goto out;
+ return ret;
if (ret > 0) {
ret = 0;
break;
@@ -8213,10 +8078,7 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
}
/* Ensure all chunks have corresponding dev extents */
- ret = verify_chunk_dev_extent_mapping(fs_info);
-out:
- btrfs_free_path(path);
- return ret;
+ return verify_chunk_dev_extent_mapping(fs_info);
}
/*
@@ -8245,7 +8107,7 @@ bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
static int relocating_repair_kthread(void *data)
{
- struct btrfs_block_group *cache = (struct btrfs_block_group *)data;
+ struct btrfs_block_group *cache = data;
struct btrfs_fs_info *fs_info = cache->fs_info;
u64 target;
int ret = 0;
@@ -8253,6 +8115,8 @@ static int relocating_repair_kthread(void *data)
target = cache->start;
btrfs_put_block_group(cache);
+ guard(super_write)(fs_info->sb);
+
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
btrfs_info(fs_info,
"zoned: skip relocating block group %llu to repair: EBUSY",
@@ -8267,7 +8131,7 @@ static int relocating_repair_kthread(void *data)
if (!cache)
goto out;
- if (!cache->relocating_repair)
+ if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags))
goto out;
ret = btrfs_may_alloc_data_chunk(fs_info, target);
@@ -8277,7 +8141,7 @@ static int relocating_repair_kthread(void *data)
btrfs_info(fs_info,
"zoned: relocating block group %llu to repair IO failure",
target);
- ret = btrfs_relocate_chunk(fs_info, target);
+ ret = btrfs_relocate_chunk(fs_info, target, true);
out:
if (cache)
@@ -8288,29 +8152,102 @@ out:
return ret;
}
-int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
+bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
{
struct btrfs_block_group *cache;
+ if (!btrfs_is_zoned(fs_info))
+ return false;
+
/* Do not attempt to repair in degraded state */
if (btrfs_test_opt(fs_info, DEGRADED))
- return 0;
+ return true;
cache = btrfs_lookup_block_group(fs_info, logical);
if (!cache)
- return 0;
+ return true;
- spin_lock(&cache->lock);
- if (cache->relocating_repair) {
- spin_unlock(&cache->lock);
+ if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) {
btrfs_put_block_group(cache);
- return 0;
+ return true;
}
- cache->relocating_repair = 1;
- spin_unlock(&cache->lock);
kthread_run(relocating_repair_kthread, cache,
"btrfs-relocating-repair");
+ return true;
+}
+
+static void map_raid56_repair_block(struct btrfs_io_context *bioc,
+ struct btrfs_io_stripe *smap,
+ u64 logical)
+{
+ int data_stripes = nr_bioc_data_stripes(bioc);
+ int i;
+
+ for (i = 0; i < data_stripes; i++) {
+ u64 stripe_start = bioc->full_stripe_logical +
+ btrfs_stripe_nr_to_offset(i);
+
+ if (logical >= stripe_start &&
+ logical < stripe_start + BTRFS_STRIPE_LEN)
+ break;
+ }
+ ASSERT(i < data_stripes, "i=%d data_stripes=%d", i, data_stripes);
+ smap->dev = bioc->stripes[i].dev;
+ smap->physical = bioc->stripes[i].physical +
+ ((logical - bioc->full_stripe_logical) &
+ BTRFS_STRIPE_LEN_MASK);
+}
+
+/*
+ * Map a repair write into a single device.
+ *
+ * A repair write is triggered by read time repair or scrub, which would only
+ * update the contents of a single device.
+ * Not update any other mirrors nor go through RMW path.
+ *
+ * Callers should ensure:
+ *
+ * - Call btrfs_bio_counter_inc_blocked() first
+ * - The range does not cross stripe boundary
+ * - Has a valid @mirror_num passed in.
+ */
+int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
+ struct btrfs_io_stripe *smap, u64 logical,
+ u32 length, int mirror_num)
+{
+ struct btrfs_io_context *bioc = NULL;
+ u64 map_length = length;
+ int mirror_ret = mirror_num;
+ int ret;
+
+ ASSERT(mirror_num > 0, "mirror_num=%d", mirror_num);
+
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length,
+ &bioc, smap, &mirror_ret);
+ if (ret < 0)
+ return ret;
+
+ /* The map range should not cross stripe boundary. */
+ ASSERT(map_length >= length, "map_length=%llu length=%u", map_length, length);
+
+ /* Already mapped to single stripe. */
+ if (!bioc)
+ goto out;
+
+ /* Map the RAID56 multi-stripe writes to a single one. */
+ if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+ map_raid56_repair_block(bioc, smap, logical);
+ goto out;
+ }
+
+ ASSERT(mirror_num <= bioc->num_stripes,
+ "mirror_num=%d num_stripes=%d", mirror_num, bioc->num_stripes);
+ smap->dev = bioc->stripes[mirror_num - 1].dev;
+ smap->physical = bioc->stripes[mirror_num - 1].physical;
+out:
+ btrfs_put_bioc(bioc);
+ ASSERT(smap->dev);
return 0;
}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 3b8130680749..34b854c1a303 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -6,30 +6,78 @@
#ifndef BTRFS_VOLUMES_H
#define BTRFS_VOLUMES_H
-#include <linux/bio.h>
+#include <linux/blk_types.h>
+#include <linux/blkdev.h>
+#include <linux/sizes.h>
+#include <linux/atomic.h>
#include <linux/sort.h>
-#include <linux/btrfs.h>
-#include "async-thread.h"
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/log2.h>
+#include <linux/kobject.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
+#include <linux/rbtree.h>
+#include <uapi/linux/btrfs.h>
+#include <uapi/linux/btrfs_tree.h>
+#include "messages.h"
+#include "extent-io-tree.h"
+
+struct block_device;
+struct bdev_handle;
+struct btrfs_fs_info;
+struct btrfs_block_group;
+struct btrfs_trans_handle;
+struct btrfs_transaction;
+struct btrfs_zoned_device_info;
#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
+/*
+ * Arbitrary maximum size of one discard request to limit potentially long time
+ * spent in blkdev_issue_discard().
+ */
+#define BTRFS_MAX_DISCARD_CHUNK_SIZE (SZ_1G)
+
extern struct mutex uuid_mutex;
-#define BTRFS_STRIPE_LEN SZ_64K
-
-struct btrfs_io_geometry {
- /* remaining bytes before crossing a stripe */
- u64 len;
- /* offset of logical address in chunk */
- u64 offset;
- /* length of single IO stripe */
- u64 stripe_len;
- /* number of stripe where address falls */
- u64 stripe_nr;
- /* offset of address in stripe */
- u64 stripe_offset;
- /* offset of raid56 stripe into the chunk */
- u64 raid56_stripe_offset;
+#define BTRFS_STRIPE_LEN SZ_64K
+#define BTRFS_STRIPE_LEN_SHIFT (16)
+#define BTRFS_STRIPE_LEN_MASK (BTRFS_STRIPE_LEN - 1)
+
+static_assert(ilog2(BTRFS_STRIPE_LEN) == BTRFS_STRIPE_LEN_SHIFT);
+
+/* Used by sanity check for btrfs_raid_types. */
+#define const_ffs(n) (__builtin_ctzll(n) + 1)
+
+/*
+ * The conversion from BTRFS_BLOCK_GROUP_* bits to btrfs_raid_type requires
+ * RAID0 always to be the lowest profile bit.
+ * Although it's part of on-disk format and should never change, do extra
+ * compile-time sanity checks.
+ */
+static_assert(const_ffs(BTRFS_BLOCK_GROUP_RAID0) <
+ const_ffs(BTRFS_BLOCK_GROUP_PROFILE_MASK & ~BTRFS_BLOCK_GROUP_RAID0));
+static_assert(ilog2(BTRFS_BLOCK_GROUP_RAID0) > ilog2(BTRFS_BLOCK_GROUP_TYPE_MASK));
+
+/* ilog2() can handle both constants and variables */
+#define BTRFS_BG_FLAG_TO_INDEX(profile) \
+ ilog2((profile) >> (ilog2(BTRFS_BLOCK_GROUP_RAID0) - 1))
+
+enum btrfs_raid_types {
+ /* SINGLE is the special one as it doesn't have on-disk bit. */
+ BTRFS_RAID_SINGLE = 0,
+
+ BTRFS_RAID_RAID0 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID0),
+ BTRFS_RAID_RAID1 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1),
+ BTRFS_RAID_DUP = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_DUP),
+ BTRFS_RAID_RAID10 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID10),
+ BTRFS_RAID_RAID5 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID5),
+ BTRFS_RAID_RAID6 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID6),
+ BTRFS_RAID_RAID1C3 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1C3),
+ BTRFS_RAID_RAID1C4 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1C4),
+
+ BTRFS_NR_RAID_TYPES
};
/*
@@ -52,7 +100,10 @@ struct btrfs_io_geometry {
#define BTRFS_DEV_STATE_FLUSH_SENT (4)
#define BTRFS_DEV_STATE_NO_READA (5)
-struct btrfs_zoned_device_info;
+/* Special value encoding failure to write primary super block. */
+#define BTRFS_SUPER_PRIMARY_WRITE_ERROR (INT_MAX / 2)
+
+struct btrfs_fs_devices;
struct btrfs_device {
struct list_head dev_list; /* device_list_mutex */
@@ -61,17 +112,21 @@ struct btrfs_device {
struct btrfs_fs_devices *fs_devices;
struct btrfs_fs_info *fs_info;
- struct rcu_string __rcu *name;
+ /* Device path or NULL if missing. */
+ const char __rcu *name;
u64 generation;
+ struct file *bdev_file;
struct block_device *bdev;
struct btrfs_zoned_device_info *zone_info;
- /* the mode sent to blkdev_get */
- fmode_t mode;
-
+ /*
+ * Device's major-minor number. Must be set even if the device is not
+ * opened (bdev == NULL), unless the device is missing.
+ */
+ dev_t devt;
unsigned long dev_state;
blk_status_t last_flush_error;
@@ -99,6 +154,12 @@ struct btrfs_device {
/* type and info about this device */
u64 type;
+ /*
+ * Counter of super block write errors, values larger than
+ * BTRFS_SUPER_PRIMARY_WRITE_ERROR encode primary super block write failure.
+ */
+ atomic_t sb_write_errors;
+
/* minimal io size for this device */
u32 sector_size;
@@ -116,20 +177,13 @@ struct btrfs_device {
/* bytes used on the current transaction */
u64 commit_bytes_used;
- /* for sending down flush barriers */
- struct bio *flush_bio;
+ /* Bio used for flushing device barriers */
+ struct bio flush_bio;
struct completion flush_wait;
/* per-device scrub information */
struct scrub_ctx *scrub_ctx;
- /* readahead state */
- atomic_t reada_in_flight;
- u64 reada_next;
- struct reada_zone *reada_curr_zone;
- struct radix_tree_root reada_zones;
- struct radix_tree_root reada_extents;
-
/* disk I/O failure stats. For detailed description refer to
* enum btrfs_dev_stat_values in ioctl.h */
int dev_stats_valid;
@@ -149,6 +203,31 @@ struct btrfs_device {
};
/*
+ * Block group or device which contains an active swapfile. Used for preventing
+ * unsafe operations while a swapfile is active.
+ *
+ * These are sorted on (ptr, inode) (note that a block group or device can
+ * contain more than one swapfile). We compare the pointer values because we
+ * don't actually care what the object is, we just need a quick check whether
+ * the object exists in the rbtree.
+ */
+struct btrfs_swapfile_pin {
+ struct rb_node node;
+ void *ptr;
+ struct inode *inode;
+ /*
+ * If true, ptr points to a struct btrfs_block_group. Otherwise, ptr
+ * points to a struct btrfs_device.
+ */
+ bool is_block_group;
+ /*
+ * Only used when 'is_block_group' is true and it is the number of
+ * extents used by a swapfile for this block group ('ptr' field).
+ */
+ int bg_extent_count;
+};
+
+/*
* If we read those variants at the context of their own lock, we needn't
* use the following helpers, reading them directly is safe.
*/
@@ -220,6 +299,9 @@ enum btrfs_chunk_allocation_policy {
BTRFS_CHUNK_ALLOC_ZONED,
};
+#define BTRFS_DEFAULT_RR_MIN_CONTIG_READ (SZ_256K)
+/* Keep in sync with raid_attr table, current maximum is RAID1C4. */
+#define BTRFS_RAID1_MAX_MIRRORS (4)
/*
* Read policies for mirrored block group profiles, read picks the stripe based
* on these policies.
@@ -227,13 +309,62 @@ enum btrfs_chunk_allocation_policy {
enum btrfs_read_policy {
/* Use process PID to choose the stripe */
BTRFS_READ_POLICY_PID,
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /* Balancing RAID1 reads across all striped devices (round-robin). */
+ BTRFS_READ_POLICY_RR,
+ /* Read from a specific device. */
+ BTRFS_READ_POLICY_DEVID,
+#endif
BTRFS_NR_READ_POLICY,
};
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+/*
+ * Checksum mode - offload it to workqueues or do it synchronously in
+ * btrfs_submit_chunk().
+ */
+enum btrfs_offload_csum_mode {
+ /*
+ * Choose offloading checksum or do it synchronously automatically.
+ * Do it synchronously if the checksum is fast, or offload to workqueues
+ * otherwise.
+ */
+ BTRFS_OFFLOAD_CSUM_AUTO,
+ /* Always offload checksum to workqueues. */
+ BTRFS_OFFLOAD_CSUM_FORCE_ON,
+ /* Never offload checksum to workqueues. */
+ BTRFS_OFFLOAD_CSUM_FORCE_OFF,
+};
+#endif
+
struct btrfs_fs_devices {
u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
+
+ /*
+ * UUID written into the btree blocks:
+ *
+ * - If metadata_uuid != fsid then super block must have
+ * BTRFS_FEATURE_INCOMPAT_METADATA_UUID flag set.
+ *
+ * - Following shall be true at all times:
+ * - metadata_uuid == btrfs_header::fsid
+ * - metadata_uuid == btrfs_dev_item::fsid
+ *
+ * - Relations between fsid and metadata_uuid in sb and fs_devices:
+ * - Normal:
+ * fs_devices->fsid == fs_devices->metadata_uuid == sb->fsid
+ * sb->metadata_uuid == 0
+ *
+ * - When the BTRFS_FEATURE_INCOMPAT_METADATA_UUID flag is set:
+ * fs_devices->fsid == sb->fsid
+ * fs_devices->metadata_uuid == sb->metadata_uuid
+ *
+ * - When in-memory fs_devices->temp_fsid is true
+ * fs_devices->fsid = random
+ * fs_devices->metadata_uuid == sb->fsid
+ */
u8 metadata_uuid[BTRFS_FSID_SIZE];
- bool fsid_change;
+
struct list_head fs_list;
/*
@@ -271,32 +402,45 @@ struct btrfs_fs_devices {
*/
struct btrfs_device *latest_dev;
- /* all of the devices in the FS, protected by a mutex
- * so we can safely walk it to write out the supers without
- * worrying about add/remove by the multi-device code.
- * Scrubbing super can kick off supers writing by holding
- * this mutex lock.
+ /*
+ * All of the devices in the filesystem, protected by a mutex so we can
+ * safely walk it to write out the super blocks without worrying about
+ * adding/removing by the multi-device code. Scrubbing super block can
+ * kick off supers writing by holding this mutex lock.
*/
struct mutex device_list_mutex;
/* List of all devices, protected by device_list_mutex */
struct list_head devices;
- /*
- * Devices which can satisfy space allocation. Protected by
- * chunk_mutex
- */
+ /* Devices which can satisfy space allocation. Protected by * chunk_mutex. */
struct list_head alloc_list;
struct list_head seed_list;
- bool seeding;
+ /* Count fs-devices opened. */
int opened;
- /* set when we find or add a device that doesn't have the
- * nonrot flag set
+ /*
+ * Counter of the processes that are holding this fs_devices but not
+ * yet opened.
+ * This is for mounting handling, as we can only open the fs_devices
+ * after a super block is created. But we cannot take uuid_mutex
+ * during sget_fc(), thus we have to hold the fs_devices (meaning it
+ * cannot be released) until a super block is returned.
*/
+ int holding;
+
+ /* Set when we find or add a device that doesn't have the nonrot flag set. */
bool rotating;
+ /* Devices support TRIM/discard commands. */
+ bool discardable;
+ /* The filesystem is a seed filesystem. */
+ bool seeding;
+ /* The mount needs to use a randomly generated fsid. */
+ bool temp_fsid;
+ /* Enable/disable the filesystem stats tracking. */
+ bool collect_fs_stats;
struct btrfs_fs_info *fs_info;
/* sysfs kobjects */
@@ -307,11 +451,23 @@ struct btrfs_fs_devices {
enum btrfs_chunk_allocation_policy chunk_alloc_policy;
- /* Policy used to read the mirrored stripes */
+ /* Policy used to read the mirrored stripes. */
enum btrfs_read_policy read_policy;
-};
-#define BTRFS_BIO_INLINE_CSUM_SIZE 64
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /*
+ * Minimum contiguous reads before switching to next device, the unit
+ * is one block/sectorsize.
+ */
+ u32 rr_min_contig_read;
+
+ /* Device to be used for reading in case of RAID1. */
+ u64 read_devid;
+
+ /* Checksum mode - offload it or do it synchronously. */
+ enum btrfs_offload_csum_mode offload_csum_mode;
+#endif
+};
#define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \
- sizeof(struct btrfs_chunk)) \
@@ -322,48 +478,23 @@ struct btrfs_fs_devices {
- 2 * sizeof(struct btrfs_chunk)) \
/ sizeof(struct btrfs_stripe) + 1)
-/*
- * Additional info to pass along bio.
- *
- * Mostly for btrfs specific features like csum and mirror_num.
- */
-struct btrfs_bio {
- unsigned int mirror_num;
-
- /* @device is for stripe IO submission. */
- struct btrfs_device *device;
- u8 *csum;
- u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
- struct bvec_iter iter;
-
- /*
- * This member must come last, bio_alloc_bioset will allocate enough
- * bytes for entire btrfs_bio but relies on bio being last.
- */
- struct bio bio;
+struct btrfs_io_stripe {
+ struct btrfs_device *dev;
+ /* Block mapping. */
+ u64 physical;
+ bool rst_search_commit_root;
+ /* For the endio handler. */
+ struct btrfs_io_context *bioc;
};
-static inline struct btrfs_bio *btrfs_bio(struct bio *bio)
-{
- return container_of(bio, struct btrfs_bio, bio);
-}
-
-static inline void btrfs_bio_free_csum(struct btrfs_bio *bbio)
-{
- if (bbio->csum != bbio->csum_inline) {
- kfree(bbio->csum);
- bbio->csum = NULL;
- }
-}
-
-struct btrfs_io_stripe {
+struct btrfs_discard_stripe {
struct btrfs_device *dev;
u64 physical;
- u64 length; /* only used for discard mappings */
+ u64 length;
};
/*
- * Context for IO subsmission for device stripe.
+ * Context for IO submission for device stripe.
*
* - Track the unfinished mirrors for mirror based profiles
* Mirror based profiles are SINGLE/DUP/RAID1/RAID10.
@@ -380,24 +511,85 @@ struct btrfs_io_stripe {
*/
struct btrfs_io_context {
refcount_t refs;
- atomic_t stripes_pending;
struct btrfs_fs_info *fs_info;
- u64 map_type; /* get from map_lookup->type */
- bio_end_io_t *end_io;
+ /* Taken from struct btrfs_chunk_map::type. */
+ u64 map_type;
struct bio *orig_bio;
- void *private;
atomic_t error;
- int max_errors;
- int num_stripes;
- int mirror_num;
- int num_tgtdevs;
- int *tgtdev_map;
+ u16 max_errors;
+ bool use_rst;
+
+ u64 logical;
+ u64 size;
+ /* Raid stripe tree ordered entry. */
+ struct list_head rst_ordered_entry;
+
/*
- * logical block numbers for the start of each stripe
- * The last one or two are p/q. These are sorted,
- * so raid_map[0] is the start of our full stripe
+ * The total number of stripes, including the extra duplicated
+ * stripe for replace.
+ */
+ u16 num_stripes;
+
+ /*
+ * The mirror_num of this bioc.
+ *
+ * This is for reads which use 0 as mirror_num, thus we should return a
+ * valid mirror_num (>0) for the reader.
*/
- u64 *raid_map;
+ u16 mirror_num;
+
+ /*
+ * The following two members are for dev-replace case only.
+ *
+ * @replace_nr_stripes: Number of duplicated stripes which need to be
+ * written to replace target.
+ * Should be <= 2 (2 for DUP, otherwise <= 1).
+ * @replace_stripe_src: The array indicates where the duplicated stripes
+ * are from.
+ *
+ * The @replace_stripe_src[] array is mostly for RAID56 cases.
+ * As non-RAID56 stripes share the same contents of the mapped range,
+ * thus no need to bother where the duplicated ones are from.
+ *
+ * But for RAID56 case, all stripes contain different contents, thus
+ * we need a way to know the mapping.
+ *
+ * There is an example for the two members, using a RAID5 write:
+ *
+ * num_stripes: 4 (3 + 1 duplicated write)
+ * stripes[0]: dev = devid 1, physical = X
+ * stripes[1]: dev = devid 2, physical = Y
+ * stripes[2]: dev = devid 3, physical = Z
+ * stripes[3]: dev = devid 0, physical = Y
+ *
+ * replace_nr_stripes = 1
+ * replace_stripe_src = 1 <- Means stripes[1] is involved in replace.
+ * The duplicated stripe index would be
+ * (@num_stripes - 1).
+ *
+ * Note, that we can still have cases replace_nr_stripes = 2 for DUP.
+ * In that case, all stripes share the same content, thus we don't
+ * need to bother @replace_stripe_src value at all.
+ */
+ u16 replace_nr_stripes;
+ s16 replace_stripe_src;
+ /*
+ * Logical bytenr of the full stripe start, only for RAID56 cases.
+ *
+ * When this value is set to other than (u64)-1, the stripes[] should
+ * follow this pattern:
+ *
+ * (real_stripes = num_stripes - replace_nr_stripes)
+ * (data_stripes = (is_raid6) ? (real_stripes - 2) : (real_stripes - 1))
+ *
+ * stripes[0]: The first data stripe
+ * stripes[1]: The second data stripe
+ * ...
+ * stripes[data_stripes - 1]: The last data stripe
+ * stripes[data_stripes]: The P stripe
+ * stripes[data_stripes + 1]: The Q stripe (only for RAID6).
+ */
+ u64 full_stripe_logical;
struct btrfs_io_stripe stripes[];
};
@@ -425,22 +617,33 @@ struct btrfs_raid_attr {
extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES];
-struct map_lookup {
+struct btrfs_chunk_map {
+ struct rb_node rb_node;
+ /* For mount time dev extent verification. */
+ int verified_stripes;
+ refcount_t refs;
+ u64 start;
+ u64 chunk_len;
+ u64 stripe_size;
u64 type;
int io_align;
int io_width;
- u64 stripe_len;
int num_stripes;
int sub_stripes;
- int verified_stripes; /* For mount time dev extent verification */
struct btrfs_io_stripe stripes[];
};
-#define map_lookup_size(n) (sizeof(struct map_lookup) + \
- (sizeof(struct btrfs_io_stripe) * (n)))
+#define btrfs_chunk_map_size(n) (sizeof(struct btrfs_chunk_map) + \
+ (sizeof(struct btrfs_io_stripe) * (n)))
+
+static inline void btrfs_free_chunk_map(struct btrfs_chunk_map *map)
+{
+ if (map && refcount_dec_and_test(&map->refs)) {
+ ASSERT(RB_EMPTY_NODE(&map->rb_node));
+ kfree(map);
+ }
+}
-struct btrfs_balance_args;
-struct btrfs_balance_progress;
struct btrfs_balance_control {
struct btrfs_balance_args data;
struct btrfs_balance_args meta;
@@ -458,6 +661,11 @@ struct btrfs_dev_lookup_args {
u64 devid;
u8 *uuid;
u8 *fsid;
+ /*
+ * If devt is specified, all other members will be ignored as it is
+ * enough to uniquely locate a device.
+ */
+ dev_t devt;
bool missing;
};
@@ -470,15 +678,12 @@ struct btrfs_dev_lookup_args {
enum btrfs_map_op {
BTRFS_MAP_READ,
BTRFS_MAP_WRITE,
- BTRFS_MAP_DISCARD,
BTRFS_MAP_GET_READ_MIRRORS,
};
-static inline enum btrfs_map_op btrfs_op(struct bio *bio)
+static inline enum btrfs_map_op btrfs_op(const struct bio *bio)
{
switch (bio_op(bio)) {
- case REQ_OP_DISCARD:
- return BTRFS_MAP_DISCARD;
case REQ_OP_WRITE:
case REQ_OP_ZONE_APPEND:
return BTRFS_MAP_WRITE;
@@ -490,29 +695,46 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio)
}
}
+static inline unsigned long btrfs_chunk_item_size(int num_stripes)
+{
+ ASSERT(num_stripes);
+ return sizeof(struct btrfs_chunk) +
+ sizeof(struct btrfs_stripe) * (num_stripes - 1);
+}
+
+/*
+ * Do the type safe conversion from stripe_nr to offset inside the chunk.
+ *
+ * @stripe_nr is u32, with left shift it can overflow u32 for chunks larger
+ * than 4G. This does the proper type cast to avoid overflow.
+ */
+static inline u64 btrfs_stripe_nr_to_offset(u32 stripe_nr)
+{
+ return (u64)stripe_nr << BTRFS_STRIPE_LEN_SHIFT;
+}
+
void btrfs_get_bioc(struct btrfs_io_context *bioc);
void btrfs_put_bioc(struct btrfs_io_context *bioc);
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
- struct btrfs_io_context **bioc_ret, int mirror_num);
-int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
- u64 logical, u64 *length,
- struct btrfs_io_context **bioc_ret);
-int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
- enum btrfs_map_op op, u64 logical,
- struct btrfs_io_geometry *io_geom);
+ struct btrfs_io_context **bioc_ret,
+ struct btrfs_io_stripe *smap, int *mirror_num_ret);
+int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
+ struct btrfs_io_stripe *smap, u64 logical,
+ u32 length, int mirror_num);
+struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 *length_ret,
+ u32 *num_stripes);
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
- u64 type);
-void btrfs_mapping_tree_free(struct extent_map_tree *tree);
-blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
- int mirror_num);
+ struct btrfs_space_info *space_info,
+ u64 type);
+void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
- fmode_t flags, void *holder);
-struct btrfs_device *btrfs_scan_one_device(const char *path,
- fmode_t flags, void *holder);
-int btrfs_forget_devices(const char *path);
+ blk_mode_t flags, void *holder);
+struct btrfs_device *btrfs_scan_one_device(const char *path, bool mount_arg_dev);
+int btrfs_forget_devices(dev_t devt);
void btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices);
void btrfs_assign_next_active_device(struct btrfs_device *device,
@@ -524,13 +746,12 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
struct btrfs_dev_lookup_args *args,
const char *path);
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
- const u64 *devid,
- const u8 *uuid);
+ const u64 *devid, const u8 *uuid,
+ const char *path);
void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args);
-void btrfs_free_device(struct btrfs_device *device);
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
struct btrfs_dev_lookup_args *args,
- struct block_device **bdev, fmode_t *mode);
+ struct file **bdev_file);
void __exit btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
@@ -546,31 +767,41 @@ void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf);
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
-int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset);
+int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+ bool verbose);
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
-int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
-int btrfs_uuid_scan_kthread(void *data);
bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset);
-int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
- u64 *start, u64 *max_avail);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_get_dev_stats *stats);
-void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
+int btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev);
void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
-int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
- u64 logical, u64 len);
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
u64 logical);
+u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map);
+int btrfs_nr_parity_stripes(u64 type);
int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
struct btrfs_block_group *bg);
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
-struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
- u64 logical, u64 length);
+
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp);
+int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map);
+#endif
+
+struct btrfs_chunk_map *btrfs_find_chunk_map(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length);
+struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length);
+struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length);
+void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map);
+struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
+ int copy_num, bool drop_cache);
void btrfs_release_disk_super(struct btrfs_super_block *super);
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
@@ -624,19 +855,52 @@ static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
atomic_inc(&dev->dev_stats_ccnt);
}
+static inline const char *btrfs_dev_name(const struct btrfs_device *device)
+{
+ if (!device || test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
+ return "<missing disk>";
+ else
+ return rcu_dereference(device->name);
+}
+
+static inline void btrfs_warn_unknown_chunk_allocation(enum btrfs_chunk_allocation_policy pol)
+{
+ WARN_ONCE(1, "unknown allocation policy %d, fallback to regular", pol);
+}
+
+static inline void btrfs_fs_devices_inc_holding(struct btrfs_fs_devices *fs_devices)
+{
+ lockdep_assert_held(&uuid_mutex);
+ ASSERT(fs_devices->holding >= 0);
+ fs_devices->holding++;
+}
+
+static inline void btrfs_fs_devices_dec_holding(struct btrfs_fs_devices *fs_devices)
+{
+ lockdep_assert_held(&uuid_mutex);
+ ASSERT(fs_devices->holding > 0);
+ fs_devices->holding--;
+}
+
void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
struct list_head * __attribute_const__ btrfs_get_fs_uuids(void);
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
struct btrfs_device *failing_dev);
-void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
- struct block_device *bdev,
- const char *device_path);
+void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, struct btrfs_device *device);
enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags);
int btrfs_bg_type_to_factor(u64 flags);
const char *btrfs_bg_type_to_raid_name(u64 flags);
int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
-int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical);
+bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical);
+
+bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
+const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb);
+
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
+ u64 logical, u16 total_stripes);
+#endif
#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 2837b4c8424d..ab55d10bd71f 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -13,21 +13,24 @@
#include <linux/iversion.h>
#include <linux/sched/mm.h>
#include "ctree.h"
+#include "fs.h"
+#include "messages.h"
#include "btrfs_inode.h"
#include "transaction.h"
#include "xattr.h"
#include "disk-io.h"
#include "props.h"
#include "locking.h"
+#include "accessors.h"
+#include "dir-item.h"
-int btrfs_getxattr(struct inode *inode, const char *name,
+int btrfs_getxattr(const struct inode *inode, const char *name,
void *buffer, size_t size)
{
struct btrfs_dir_item *di;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct extent_buffer *leaf;
- int ret = 0;
unsigned long data_ptr;
path = btrfs_alloc_path();
@@ -37,26 +40,19 @@ int btrfs_getxattr(struct inode *inode, const char *name,
/* lookup the xattr by name */
di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(BTRFS_I(inode)),
name, strlen(name), 0);
- if (!di) {
- ret = -ENODATA;
- goto out;
- } else if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
- }
+ if (!di)
+ return -ENODATA;
+ if (IS_ERR(di))
+ return PTR_ERR(di);
leaf = path->nodes[0];
/* if size is 0, that means we want the size of the attr */
- if (!size) {
- ret = btrfs_dir_data_len(leaf, di);
- goto out;
- }
+ if (!size)
+ return btrfs_dir_data_len(leaf, di);
/* now get the data out of our dir_item */
- if (btrfs_dir_data_len(leaf, di) > size) {
- ret = -ERANGE;
- goto out;
- }
+ if (btrfs_dir_data_len(leaf, di) > size)
+ return -ERANGE;
/*
* The way things are packed into the leaf is like this
@@ -69,11 +65,7 @@ int btrfs_getxattr(struct inode *inode, const char *name,
btrfs_dir_name_len(leaf, di));
read_extent_buffer(leaf, buffer, data_ptr,
btrfs_dir_data_len(leaf, di));
- ret = btrfs_dir_data_len(leaf, di);
-
-out:
- btrfs_free_path(path);
- return ret;
+ return btrfs_dir_data_len(leaf, di);
}
int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
@@ -81,8 +73,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
{
struct btrfs_dir_item *di = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
size_t name_len = strlen(name);
int ret = 0;
@@ -94,7 +85,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->skip_release_on_error = 1;
+ path->skip_release_on_error = true;
if (!value) {
di = btrfs_lookup_xattr(trans, root, path,
@@ -116,7 +107,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
* locks the inode's i_mutex before calling setxattr or removexattr.
*/
if (flags & XATTR_REPLACE) {
- ASSERT(inode_is_locked(inode));
+ btrfs_assert_inode_locked(BTRFS_I(inode));
di = btrfs_lookup_xattr(NULL, root, path,
btrfs_ino(BTRFS_I(inode)), name, name_len, 0);
if (!di)
@@ -139,14 +130,14 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
*/
ret = 0;
btrfs_assert_tree_write_locked(path->nodes[0]);
- di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
+ di = btrfs_match_dir_item_name(path, name, name_len);
if (!di && !(flags & XATTR_REPLACE)) {
ret = -ENOSPC;
goto out;
}
} else if (ret == -EEXIST) {
ret = 0;
- di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
+ di = btrfs_match_dir_item_name(path, name, name_len);
ASSERT(di); /* logic error */
} else if (ret) {
goto out;
@@ -168,9 +159,8 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
const int slot = path->slots[0];
struct extent_buffer *leaf = path->nodes[0];
const u16 old_data_len = btrfs_dir_data_len(leaf, di);
- const u32 item_size = btrfs_item_size_nr(leaf, slot);
+ const u32 item_size = btrfs_item_size(leaf, slot);
const u32 data_size = sizeof(*di) + name_len + size;
- struct btrfs_item *item;
unsigned long data_ptr;
char *ptr;
@@ -185,25 +175,23 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
if (old_data_len + name_len + sizeof(*di) == item_size) {
/* No other xattrs packed in the same leaf item. */
if (size > old_data_len)
- btrfs_extend_item(path, size - old_data_len);
+ btrfs_extend_item(trans, path, size - old_data_len);
else if (size < old_data_len)
- btrfs_truncate_item(path, data_size, 1);
+ btrfs_truncate_item(trans, path, data_size, 1);
} else {
/* There are other xattrs packed in the same item. */
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto out;
- btrfs_extend_item(path, data_size);
+ btrfs_extend_item(trans, path, data_size);
}
- item = btrfs_item_nr(slot);
ptr = btrfs_item_ptr(leaf, slot, char);
- ptr += btrfs_item_size(leaf, item) - data_size;
+ ptr += btrfs_item_size(leaf, slot) - data_size;
di = (struct btrfs_dir_item *)ptr;
btrfs_set_dir_data_len(leaf, di, size);
data_ptr = ((unsigned long)(di + 1)) + name_len;
write_extent_buffer(leaf, value, data_ptr, size);
- btrfs_mark_buffer_dirty(leaf);
} else {
/*
* Insert, and we had space for the xattr, so path->slots[0] is
@@ -212,7 +200,6 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
*/
}
out:
- btrfs_free_path(path);
if (!ret) {
set_bit(BTRFS_INODE_COPY_EVERYTHING,
&BTRFS_I(inode)->runtime_flags);
@@ -262,9 +249,10 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
goto out;
inode_inc_iversion(inode);
- inode->i_ctime = current_time(inode);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
- BUG_ON(ret);
+ inode_set_ctime_current(inode);
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
out:
if (start_trans)
btrfs_end_transaction(trans);
@@ -273,10 +261,12 @@ out:
ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
+ struct btrfs_key found_key;
struct btrfs_key key;
struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
+ int iter_ret = 0;
int ret = 0;
size_t total_size = 0, size_left = size;
@@ -295,47 +285,26 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
path->reada = READA_FORWARD;
/* search for our xattrs */
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto err;
-
- while (1) {
+ btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
struct extent_buffer *leaf;
int slot;
struct btrfs_dir_item *di;
- struct btrfs_key found_key;
u32 item_size;
u32 cur;
leaf = path->nodes[0];
slot = path->slots[0];
- /* this is where we start walking through the path */
- if (slot >= btrfs_header_nritems(leaf)) {
- /*
- * if we've reached the last slot in this leaf we need
- * to go to the next leaf and reset everything
- */
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- goto err;
- else if (ret > 0)
- break;
- continue;
- }
-
- btrfs_item_key_to_cpu(leaf, &found_key, slot);
-
/* check to make sure this item is what we want */
if (found_key.objectid != key.objectid)
break;
if (found_key.type > BTRFS_XATTR_ITEM_KEY)
break;
if (found_key.type < BTRFS_XATTR_ITEM_KEY)
- goto next_item;
+ continue;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
- item_size = btrfs_item_size_nr(leaf, slot);
+ item_size = btrfs_item_size(leaf, slot);
cur = 0;
while (cur < item_size) {
u16 name_len = btrfs_dir_name_len(leaf, di);
@@ -352,8 +321,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
goto next;
if (!buffer || (name_len + 1) > size_left) {
- ret = -ERANGE;
- goto err;
+ iter_ret = -ERANGE;
+ break;
}
read_extent_buffer(leaf, buffer, name_ptr, name_len);
@@ -365,13 +334,12 @@ next:
cur += this_len;
di = (struct btrfs_dir_item *)((char *)di + this_len);
}
-next_item:
- path->slots[0]++;
}
- ret = total_size;
-err:
- btrfs_free_path(path);
+ if (iter_ret < 0)
+ ret = iter_ret;
+ else
+ ret = total_size;
return ret;
}
@@ -385,17 +353,67 @@ static int btrfs_xattr_handler_get(const struct xattr_handler *handler,
}
static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{
+ if (btrfs_root_readonly(BTRFS_I(inode)->root))
+ return -EROFS;
+
name = xattr_full_name(handler, name);
return btrfs_setxattr_trans(inode, name, buffer, size, flags);
}
+static int btrfs_xattr_handler_get_security(const struct xattr_handler *handler,
+ struct dentry *unused,
+ struct inode *inode,
+ const char *name, void *buffer,
+ size_t size)
+{
+ int ret;
+ bool is_cap = false;
+
+ name = xattr_full_name(handler, name);
+
+ /*
+ * security.capability doesn't cache the results, so calls into us
+ * constantly to see if there's a capability xattr. Cache the result
+ * here in order to avoid wasting time doing lookups for xattrs we know
+ * don't exist.
+ */
+ if (strcmp(name, XATTR_NAME_CAPS) == 0) {
+ is_cap = true;
+ if (test_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags))
+ return -ENODATA;
+ }
+
+ ret = btrfs_getxattr(inode, name, buffer, size);
+ if (ret == -ENODATA && is_cap)
+ set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
+ return ret;
+}
+
+static int btrfs_xattr_handler_set_security(const struct xattr_handler *handler,
+ struct mnt_idmap *idmap,
+ struct dentry *unused,
+ struct inode *inode,
+ const char *name,
+ const void *buffer,
+ size_t size, int flags)
+{
+ if (btrfs_root_readonly(BTRFS_I(inode)->root))
+ return -EROFS;
+
+ name = xattr_full_name(handler, name);
+ if (strcmp(name, XATTR_NAME_CAPS) == 0)
+ clear_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
+
+ return btrfs_setxattr_trans(inode, name, buffer, size, flags);
+}
+
static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -405,20 +423,24 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
struct btrfs_root *root = BTRFS_I(inode)->root;
name = xattr_full_name(handler, name);
- ret = btrfs_validate_prop(name, value, size);
+ ret = btrfs_validate_prop(BTRFS_I(inode), name, value, size);
if (ret)
return ret;
+ if (btrfs_ignore_prop(BTRFS_I(inode), name))
+ return 0;
+
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_set_prop(trans, inode, name, value, size, flags);
+ ret = btrfs_set_prop(trans, BTRFS_I(inode), name, value, size, flags);
if (!ret) {
inode_inc_iversion(inode);
- inode->i_ctime = current_time(inode);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
- BUG_ON(ret);
+ inode_set_ctime_current(inode);
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
}
btrfs_end_transaction(trans);
@@ -428,8 +450,8 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
static const struct xattr_handler btrfs_security_xattr_handler = {
.prefix = XATTR_SECURITY_PREFIX,
- .get = btrfs_xattr_handler_get,
- .set = btrfs_xattr_handler_set,
+ .get = btrfs_xattr_handler_get_security,
+ .set = btrfs_xattr_handler_set_security,
};
static const struct xattr_handler btrfs_trusted_xattr_handler = {
@@ -450,12 +472,8 @@ static const struct xattr_handler btrfs_btrfs_xattr_handler = {
.set = btrfs_xattr_handler_set_prop,
};
-const struct xattr_handler *btrfs_xattr_handlers[] = {
+const struct xattr_handler * const btrfs_xattr_handlers[] = {
&btrfs_security_xattr_handler,
-#ifdef CONFIG_BTRFS_FS_POSIX_ACL
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
-#endif
&btrfs_trusted_xattr_handler,
&btrfs_user_xattr_handler,
&btrfs_btrfs_xattr_handler,
@@ -469,7 +487,7 @@ static int btrfs_initxattrs(struct inode *inode,
const struct xattr *xattr;
unsigned int nofs_flag;
char *name;
- int err = 0;
+ int ret = 0;
/*
* We're holding a transaction handle, so use a NOFS memory allocation
@@ -477,22 +495,27 @@ static int btrfs_initxattrs(struct inode *inode,
*/
nofs_flag = memalloc_nofs_save();
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
- name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
- strlen(xattr->name) + 1, GFP_KERNEL);
+ const size_t name_len = XATTR_SECURITY_PREFIX_LEN +
+ strlen(xattr->name) + 1;
+
+ name = kmalloc(name_len, GFP_KERNEL);
if (!name) {
- err = -ENOMEM;
+ ret = -ENOMEM;
break;
}
- strcpy(name, XATTR_SECURITY_PREFIX);
- strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
- err = btrfs_setxattr(trans, inode, name, xattr->value,
+ scnprintf(name, name_len, "%s%s", XATTR_SECURITY_PREFIX, xattr->name);
+
+ if (strcmp(name, XATTR_NAME_CAPS) == 0)
+ clear_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
+
+ ret = btrfs_setxattr(trans, inode, name, xattr->value,
xattr->value_len, 0);
kfree(name);
- if (err < 0)
+ if (ret < 0)
break;
}
memalloc_nofs_restore(nofs_flag);
- return err;
+ return ret;
}
int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
index 1cd3fc0a8f17..0ce10e4ec836 100644
--- a/fs/btrfs/xattr.h
+++ b/fs/btrfs/xattr.h
@@ -6,11 +6,17 @@
#ifndef BTRFS_XATTR_H
#define BTRFS_XATTR_H
-#include <linux/xattr.h>
+#include <linux/types.h>
-extern const struct xattr_handler *btrfs_xattr_handlers[];
+struct dentry;
+struct inode;
+struct qstr;
+struct xattr_handler;
+struct btrfs_trans_handle;
-int btrfs_getxattr(struct inode *inode, const char *name,
+extern const struct xattr_handler * const btrfs_xattr_handlers[];
+
+int btrfs_getxattr(const struct inode *inode, const char *name,
void *buffer, size_t size);
int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
const char *name, const void *value, size_t size, int flags);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 767a0c6c9694..6caba8be7c84 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -18,7 +18,10 @@
#include <linux/pagemap.h>
#include <linux/bio.h>
#include <linux/refcount.h>
+#include "btrfs_inode.h"
#include "compression.h"
+#include "fs.h"
+#include "subpage.h"
/* workspace buffer size for s390 zlib hardware support */
#define ZLIB_DFLTCC_BUF_SIZE (4 * PAGE_SIZE)
@@ -31,11 +34,9 @@ struct workspace {
int level;
};
-static struct workspace_manager wsm;
-
-struct list_head *zlib_get_workspace(unsigned int level)
+struct list_head *zlib_get_workspace(struct btrfs_fs_info *fs_info, unsigned int level)
{
- struct list_head *ws = btrfs_get_workspace(BTRFS_COMPRESS_ZLIB, level);
+ struct list_head *ws = btrfs_get_workspace(fs_info, BTRFS_COMPRESS_ZLIB, level);
struct workspace *workspace = list_entry(ws, struct workspace, list);
workspace->level = level;
@@ -52,8 +53,25 @@ void zlib_free_workspace(struct list_head *ws)
kfree(workspace);
}
-struct list_head *zlib_alloc_workspace(unsigned int level)
+/*
+ * For s390 hardware acceleration, the buffer size should be at least
+ * ZLIB_DFLTCC_BUF_SIZE to achieve the best performance.
+ *
+ * But if bs > ps we can have large enough folios that meet the s390 hardware
+ * handling.
+ */
+static bool need_special_buffer(struct btrfs_fs_info *fs_info)
+{
+ if (!zlib_deflate_dfltcc_enabled())
+ return false;
+ if (btrfs_min_folio_size(fs_info) >= ZLIB_DFLTCC_BUF_SIZE)
+ return false;
+ return true;
+}
+
+struct list_head *zlib_alloc_workspace(struct btrfs_fs_info *fs_info, unsigned int level)
{
+ const u32 blocksize = fs_info->sectorsize;
struct workspace *workspace;
int workspacesize;
@@ -63,22 +81,18 @@ struct list_head *zlib_alloc_workspace(unsigned int level)
workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
zlib_inflate_workspacesize());
- workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
+ workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL | __GFP_NOWARN);
workspace->level = level;
workspace->buf = NULL;
- /*
- * In case of s390 zlib hardware support, allocate lager workspace
- * buffer. If allocator fails, fall back to a single page buffer.
- */
- if (zlib_deflate_dfltcc_enabled()) {
+ if (need_special_buffer(fs_info)) {
workspace->buf = kmalloc(ZLIB_DFLTCC_BUF_SIZE,
__GFP_NOMEMALLOC | __GFP_NORETRY |
__GFP_NOWARN | GFP_NOIO);
workspace->buf_size = ZLIB_DFLTCC_BUF_SIZE;
}
if (!workspace->buf) {
- workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- workspace->buf_size = PAGE_SIZE;
+ workspace->buf = kmalloc(blocksize, GFP_KERNEL);
+ workspace->buf_size = blocksize;
}
if (!workspace->strm.workspace || !workspace->buf)
goto fail;
@@ -91,29 +105,75 @@ fail:
return ERR_PTR(-ENOMEM);
}
-int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
- u64 start, struct page **pages, unsigned long *out_pages,
- unsigned long *total_in, unsigned long *total_out)
+/*
+ * Helper for S390x with hardware zlib compression support.
+ *
+ * That hardware acceleration requires a buffer size larger than a single page
+ * to get ideal performance, thus we need to do the memory copy rather than
+ * use the page cache directly as input buffer.
+ */
+static int copy_data_into_buffer(struct address_space *mapping,
+ struct workspace *workspace, u64 filepos,
+ unsigned long length)
+{
+ u64 cur = filepos;
+
+ /* It's only for hardware accelerated zlib code. */
+ ASSERT(zlib_deflate_dfltcc_enabled());
+
+ while (cur < filepos + length) {
+ struct folio *folio;
+ void *data_in;
+ unsigned int offset;
+ unsigned long copy_length;
+ int ret;
+
+ ret = btrfs_compress_filemap_get_folio(mapping, cur, &folio);
+ if (ret < 0)
+ return ret;
+
+ offset = offset_in_folio(folio, cur);
+ copy_length = min(folio_size(folio) - offset,
+ filepos + length - cur);
+
+ data_in = kmap_local_folio(folio, offset);
+ memcpy(workspace->buf + cur - filepos, data_in, copy_length);
+ kunmap_local(data_in);
+ cur += copy_length;
+ }
+ return 0;
+}
+
+int zlib_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
+ u64 start, struct folio **folios, unsigned long *out_folios,
+ unsigned long *total_in, unsigned long *total_out)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct workspace *workspace = list_entry(ws, struct workspace, list);
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
int ret;
- char *data_in;
- char *cpage_out;
- int nr_pages = 0;
- struct page *in_page = NULL;
- struct page *out_page = NULL;
- unsigned long bytes_left;
- unsigned int in_buf_pages;
+ char *data_in = NULL;
+ char *cfolio_out;
+ int nr_folios = 0;
+ struct folio *in_folio = NULL;
+ struct folio *out_folio = NULL;
unsigned long len = *total_out;
- unsigned long nr_dest_pages = *out_pages;
- const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
+ unsigned long nr_dest_folios = *out_folios;
+ const unsigned long max_out = nr_dest_folios << min_folio_shift;
+ const u32 blocksize = fs_info->sectorsize;
+ const u64 orig_end = start + len;
- *out_pages = 0;
+ *out_folios = 0;
*total_out = 0;
*total_in = 0;
- if (Z_OK != zlib_deflateInit(&workspace->strm, workspace->level)) {
- pr_warn("BTRFS: deflateInit failed\n");
+ ret = zlib_deflateInit(&workspace->strm, workspace->level);
+ if (unlikely(ret != Z_OK)) {
+ btrfs_err(fs_info,
+ "zlib compression init failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode), start);
ret = -EIO;
goto out;
}
@@ -121,19 +181,19 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
workspace->strm.total_in = 0;
workspace->strm.total_out = 0;
- out_page = alloc_page(GFP_NOFS);
- if (out_page == NULL) {
+ out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
}
- cpage_out = kmap(out_page);
- pages[0] = out_page;
- nr_pages = 1;
+ cfolio_out = folio_address(out_folio);
+ folios[0] = out_folio;
+ nr_folios = 1;
workspace->strm.next_in = workspace->buf;
workspace->strm.avail_in = 0;
- workspace->strm.next_out = cpage_out;
- workspace->strm.avail_out = PAGE_SIZE;
+ workspace->strm.next_out = cfolio_out;
+ workspace->strm.avail_out = min_folio_size;
while (workspace->strm.total_in < len) {
/*
@@ -141,51 +201,56 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
* the workspace buffer if required.
*/
if (workspace->strm.avail_in == 0) {
- bytes_left = len - workspace->strm.total_in;
- in_buf_pages = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
- workspace->buf_size / PAGE_SIZE);
- if (in_buf_pages > 1) {
- int i;
-
- for (i = 0; i < in_buf_pages; i++) {
- if (in_page) {
- kunmap(in_page);
- put_page(in_page);
- }
- in_page = find_get_page(mapping,
- start >> PAGE_SHIFT);
- data_in = kmap(in_page);
- memcpy(workspace->buf + i * PAGE_SIZE,
- data_in, PAGE_SIZE);
- start += PAGE_SIZE;
- }
+ unsigned long bytes_left = len - workspace->strm.total_in;
+ unsigned int copy_length = min(bytes_left, workspace->buf_size);
+
+ /*
+ * For s390 hardware accelerated zlib, and our folio is smaller
+ * than the copy_length, we need to fill the buffer so that
+ * we can take full advantage of hardware acceleration.
+ */
+ if (need_special_buffer(fs_info)) {
+ ret = copy_data_into_buffer(mapping, workspace,
+ start, copy_length);
+ if (ret < 0)
+ goto out;
+ start += copy_length;
workspace->strm.next_in = workspace->buf;
+ workspace->strm.avail_in = copy_length;
} else {
- if (in_page) {
- kunmap(in_page);
- put_page(in_page);
+ unsigned int cur_len;
+
+ if (data_in) {
+ kunmap_local(data_in);
+ folio_put(in_folio);
+ data_in = NULL;
}
- in_page = find_get_page(mapping,
- start >> PAGE_SHIFT);
- data_in = kmap(in_page);
- start += PAGE_SIZE;
+ ret = btrfs_compress_filemap_get_folio(mapping,
+ start, &in_folio);
+ if (ret < 0)
+ goto out;
+ cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
+ data_in = kmap_local_folio(in_folio,
+ offset_in_folio(in_folio, start));
+ start += cur_len;
workspace->strm.next_in = data_in;
+ workspace->strm.avail_in = cur_len;
}
- workspace->strm.avail_in = min(bytes_left,
- (unsigned long) workspace->buf_size);
}
ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
- if (ret != Z_OK) {
- pr_debug("BTRFS: deflate in loop returned %d\n",
- ret);
+ if (unlikely(ret != Z_OK)) {
+ btrfs_warn(fs_info,
+ "zlib compression failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode),
+ start);
zlib_deflateEnd(&workspace->strm);
ret = -EIO;
goto out;
}
/* we're making it bigger, give up */
- if (workspace->strm.total_in > 8192 &&
+ if (workspace->strm.total_in > blocksize * 2 &&
workspace->strm.total_in <
workspace->strm.total_out) {
ret = -E2BIG;
@@ -196,22 +261,20 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
* the stream end if required
*/
if (workspace->strm.avail_out == 0) {
- kunmap(out_page);
- if (nr_pages == nr_dest_pages) {
- out_page = NULL;
+ if (nr_folios == nr_dest_folios) {
ret = -E2BIG;
goto out;
}
- out_page = alloc_page(GFP_NOFS);
- if (out_page == NULL) {
+ out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
}
- cpage_out = kmap(out_page);
- pages[nr_pages] = out_page;
- nr_pages++;
- workspace->strm.avail_out = PAGE_SIZE;
- workspace->strm.next_out = cpage_out;
+ cfolio_out = folio_address(out_folio);
+ folios[nr_folios] = out_folio;
+ nr_folios++;
+ workspace->strm.avail_out = min_folio_size;
+ workspace->strm.next_out = cfolio_out;
}
/* we're all done */
if (workspace->strm.total_in >= len)
@@ -228,28 +291,26 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = zlib_deflate(&workspace->strm, Z_FINISH);
if (ret == Z_STREAM_END)
break;
- if (ret != Z_OK && ret != Z_BUF_ERROR) {
+ if (unlikely(ret != Z_OK && ret != Z_BUF_ERROR)) {
zlib_deflateEnd(&workspace->strm);
ret = -EIO;
goto out;
} else if (workspace->strm.avail_out == 0) {
- /* get another page for the stream end */
- kunmap(out_page);
- if (nr_pages == nr_dest_pages) {
- out_page = NULL;
+ /* Get another folio for the stream end. */
+ if (nr_folios == nr_dest_folios) {
ret = -E2BIG;
goto out;
}
- out_page = alloc_page(GFP_NOFS);
- if (out_page == NULL) {
+ out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
}
- cpage_out = kmap(out_page);
- pages[nr_pages] = out_page;
- nr_pages++;
- workspace->strm.avail_out = PAGE_SIZE;
- workspace->strm.next_out = cpage_out;
+ cfolio_out = folio_address(out_folio);
+ folios[nr_folios] = out_folio;
+ nr_folios++;
+ workspace->strm.avail_out = min_folio_size;
+ workspace->strm.next_out = cfolio_out;
}
}
zlib_deflateEnd(&workspace->strm);
@@ -263,33 +324,33 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_out = workspace->strm.total_out;
*total_in = workspace->strm.total_in;
out:
- *out_pages = nr_pages;
- if (out_page)
- kunmap(out_page);
-
- if (in_page) {
- kunmap(in_page);
- put_page(in_page);
+ *out_folios = nr_folios;
+ if (data_in) {
+ kunmap_local(data_in);
+ folio_put(in_folio);
}
+
return ret;
}
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
+ struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
struct workspace *workspace = list_entry(ws, struct workspace, list);
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
int ret = 0, ret2;
int wbits = MAX_WBITS;
char *data_in;
size_t total_out = 0;
- unsigned long page_in_index = 0;
+ unsigned long folio_in_index = 0;
size_t srclen = cb->compressed_len;
- unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
+ unsigned long total_folios_in = DIV_ROUND_UP(srclen, min_folio_size);
unsigned long buf_start;
- struct page **pages_in = cb->compressed_pages;
+ struct folio **folios_in = cb->compressed_folios;
- data_in = kmap(pages_in[page_in_index]);
+ data_in = kmap_local_folio(folios_in[folio_in_index], 0);
workspace->strm.next_in = data_in;
- workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
+ workspace->strm.avail_in = min_t(size_t, srclen, min_folio_size);
workspace->strm.total_in = 0;
workspace->strm.total_out = 0;
@@ -307,9 +368,14 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
workspace->strm.avail_in -= 2;
}
- if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
- pr_warn("BTRFS: inflateInit failed\n");
- kunmap(pages_in[page_in_index]);
+ ret = zlib_inflateInit2(&workspace->strm, wbits);
+ if (unlikely(ret != Z_OK)) {
+ struct btrfs_inode *inode = cb->bbio.inode;
+
+ kunmap_local(data_in);
+ btrfs_err(inode->root->fs_info,
+ "zlib decompression init failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode), cb->start);
return -EIO;
}
while (workspace->strm.total_in < srclen) {
@@ -336,44 +402,42 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
if (workspace->strm.avail_in == 0) {
unsigned long tmp;
- kunmap(pages_in[page_in_index]);
- page_in_index++;
- if (page_in_index >= total_pages_in) {
+ kunmap_local(data_in);
+ folio_in_index++;
+ if (folio_in_index >= total_folios_in) {
data_in = NULL;
break;
}
- data_in = kmap(pages_in[page_in_index]);
+ data_in = kmap_local_folio(folios_in[folio_in_index], 0);
workspace->strm.next_in = data_in;
tmp = srclen - workspace->strm.total_in;
- workspace->strm.avail_in = min(tmp, PAGE_SIZE);
+ workspace->strm.avail_in = min(tmp, min_folio_size);
}
}
- if (ret != Z_STREAM_END)
+ if (unlikely(ret != Z_STREAM_END)) {
+ btrfs_err(cb->bbio.inode->root->fs_info,
+ "zlib decompression failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(cb->bbio.inode->root),
+ btrfs_ino(cb->bbio.inode), cb->start);
ret = -EIO;
- else
+ } else {
ret = 0;
+ }
done:
zlib_inflateEnd(&workspace->strm);
if (data_in)
- kunmap(pages_in[page_in_index]);
- if (!ret)
- zero_fill_bio(cb->orig_bio);
+ kunmap_local(data_in);
return ret;
}
-int zlib_decompress(struct list_head *ws, unsigned char *data_in,
- struct page *dest_page, unsigned long start_byte, size_t srclen,
+int zlib_decompress(struct list_head *ws, const u8 *data_in,
+ struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0;
int wbits = MAX_WBITS;
- unsigned long bytes_left;
- unsigned long total_out = 0;
- unsigned long pg_offset = 0;
-
- destlen = min_t(unsigned long, destlen, PAGE_SIZE);
- bytes_left = destlen;
+ unsigned long to_copy;
workspace->strm.next_in = data_in;
workspace->strm.avail_in = srclen;
@@ -393,70 +457,50 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
workspace->strm.avail_in -= 2;
}
- if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
- pr_warn("BTRFS: inflateInit failed\n");
+ ret = zlib_inflateInit2(&workspace->strm, wbits);
+ if (unlikely(ret != Z_OK)) {
+ struct btrfs_inode *inode = folio_to_inode(dest_folio);
+
+ btrfs_err(inode->root->fs_info,
+ "zlib decompression init failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode),
+ folio_pos(dest_folio));
return -EIO;
}
- while (bytes_left > 0) {
- unsigned long buf_start;
- unsigned long buf_offset;
- unsigned long bytes;
-
- ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
- if (ret != Z_OK && ret != Z_STREAM_END)
- break;
-
- buf_start = total_out;
- total_out = workspace->strm.total_out;
-
- if (total_out == buf_start) {
- ret = -EIO;
- break;
- }
-
- if (total_out <= start_byte)
- goto next;
-
- if (total_out > start_byte && buf_start < start_byte)
- buf_offset = start_byte - buf_start;
- else
- buf_offset = 0;
-
- bytes = min(PAGE_SIZE - pg_offset,
- PAGE_SIZE - (buf_offset % PAGE_SIZE));
- bytes = min(bytes, bytes_left);
+ /*
+ * Everything (in/out buf) should be at most one sector, there should
+ * be no need to switch any input/output buffer.
+ */
+ ret = zlib_inflate(&workspace->strm, Z_FINISH);
+ to_copy = min(workspace->strm.total_out, destlen);
+ if (ret != Z_STREAM_END)
+ goto out;
- memcpy_to_page(dest_page, pg_offset,
- workspace->buf + buf_offset, bytes);
+ memcpy_to_folio(dest_folio, dest_pgoff, workspace->buf, to_copy);
- pg_offset += bytes;
- bytes_left -= bytes;
-next:
- workspace->strm.next_out = workspace->buf;
- workspace->strm.avail_out = workspace->buf_size;
- }
+out:
+ if (unlikely(to_copy != destlen)) {
+ struct btrfs_inode *inode = folio_to_inode(dest_folio);
- if (ret != Z_STREAM_END && bytes_left != 0)
+ btrfs_err(inode->root->fs_info,
+"zlib decompression failed, error %d root %llu inode %llu offset %llu decompressed %lu expected %zu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode),
+ folio_pos(dest_folio), to_copy, destlen);
ret = -EIO;
- else
+ } else {
ret = 0;
+ }
zlib_inflateEnd(&workspace->strm);
- /*
- * this should only happen if zlib returned fewer bytes than we
- * expected. btrfs_get_block is responsible for zeroing from the
- * end of the inline extent (destlen) to the end of the page
- */
- if (pg_offset < destlen) {
- memzero_page(dest_page, pg_offset, destlen - pg_offset);
- }
+ if (unlikely(to_copy < destlen))
+ folio_zero_range(dest_folio, dest_pgoff + to_copy, destlen - to_copy);
return ret;
}
-const struct btrfs_compress_op btrfs_zlib_compress = {
- .workspace_manager = &wsm,
+const struct btrfs_compress_levels btrfs_zlib_compress = {
+ .min_level = 1,
.max_level = 9,
.default_level = BTRFS_ZLIB_DEFAULT_LEVEL,
};
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 67d932d70798..359a98e6de85 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -5,15 +5,19 @@
#include <linux/blkdev.h>
#include <linux/sched/mm.h>
#include <linux/atomic.h>
+#include <linux/vmalloc.h>
#include "ctree.h"
#include "volumes.h"
#include "zoned.h"
-#include "rcu-string.h"
#include "disk-io.h"
#include "block-group.h"
-#include "transaction.h"
#include "dev-replace.h"
#include "space-info.h"
+#include "fs.h"
+#include "accessors.h"
+#include "bio.h"
+#include "transaction.h"
+#include "sysfs.h"
/* Maximum number of zones to report per blkdev_report_zones() call */
#define BTRFS_REPORT_NR_ZONES 4096
@@ -33,12 +37,15 @@
#define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
#define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
-#define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
-#define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
+#define BTRFS_SB_LOG_FIRST_SHIFT ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
+#define BTRFS_SB_LOG_SECOND_SHIFT ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
/* Number of superblock log zones */
#define BTRFS_NR_SB_LOG_ZONES 2
+/* Default number of max active zones when the device has no limits. */
+#define BTRFS_DEFAULT_MAX_ACTIVE_ZONES 128
+
/*
* Minimum of active zones we need:
*
@@ -50,14 +57,19 @@
#define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5)
/*
- * Maximum supported zone size. Currently, SMR disks have a zone size of
- * 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range. We do not
- * expect the zone size to become larger than 8GiB in the near future.
+ * Minimum / maximum supported zone size. Currently, SMR disks have a zone
+ * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
+ * We do not expect the zone size to become larger than 8GiB or smaller than
+ * 4MiB in the near future.
*/
#define BTRFS_MAX_ZONE_SIZE SZ_8G
+#define BTRFS_MIN_ZONE_SIZE SZ_4M
#define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
+static void wait_eb_writebacks(struct btrfs_block_group *block_group);
+static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written);
+
static inline bool sb_zone_is_full(const struct blk_zone *zone)
{
return (zone->cond == BLK_ZONE_COND_FULL) ||
@@ -79,10 +91,10 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
bool empty[BTRFS_NR_SB_LOG_ZONES];
bool full[BTRFS_NR_SB_LOG_ZONES];
sector_t sector;
- int i;
- for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
- ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
+ for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
+ ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL,
+ "zones[%d].type=%d", i, zones[i].type);
empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
full[i] = sb_zone_is_full(&zones[i]);
}
@@ -91,9 +103,9 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
* Possible states of log buffer zones
*
* Empty[0] In use[0] Full[0]
- * Empty[1] * x 0
- * In use[1] 0 x 0
- * Full[1] 1 1 C
+ * Empty[1] * 0 1
+ * In use[1] x x 1
+ * Full[1] 0 0 C
*
* Log position:
* *: Special case, no superblock is written
@@ -110,16 +122,14 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
return -ENOENT;
} else if (full[0] && full[1]) {
/* Compare two super blocks */
- struct address_space *mapping = bdev->bd_inode->i_mapping;
+ struct address_space *mapping = bdev->bd_mapping;
struct page *page[BTRFS_NR_SB_LOG_ZONES];
struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
- int i;
-
- for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
- u64 bytenr;
- bytenr = ((zones[i].start + zones[i].len)
- << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
+ for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
+ u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
+ u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) -
+ BTRFS_SUPER_INFO_SIZE;
page[i] = read_cache_page_gfp(mapping,
bytenr >> PAGE_SHIFT, GFP_NOFS);
@@ -131,12 +141,13 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
super[i] = page_address(page[i]);
}
- if (super[0]->generation > super[1]->generation)
+ if (btrfs_super_generation(super[0]) >
+ btrfs_super_generation(super[1]))
sector = zones[1].start;
else
sector = zones[0].start;
- for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
+ for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
btrfs_release_disk_super(super[i]);
} else if (!full[0] && (empty[1] || full[1])) {
sector = zones[0].wp;
@@ -154,16 +165,16 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
*/
static inline u32 sb_zone_number(int shift, int mirror)
{
- u64 zone;
+ u64 zone = U64_MAX;
- ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
+ ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX, "mirror=%d", mirror);
switch (mirror) {
case 0: zone = 0; break;
case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
}
- ASSERT(zone <= U32_MAX);
+ ASSERT(zone <= U32_MAX, "zone=%llu", zone);
return (u32)zone;
}
@@ -213,6 +224,7 @@ static int emulate_report_zones(struct btrfs_device *device, u64 pos,
static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
struct blk_zone *zones, unsigned int *nr_zones)
{
+ struct btrfs_zoned_device_info *zinfo = device->zone_info;
int ret;
if (!*nr_zones)
@@ -224,26 +236,64 @@ static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
return 0;
}
- ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
- copy_zone_info_cb, zones);
+ /* Check cache */
+ if (zinfo->zone_cache) {
+ unsigned int i;
+ u32 zno;
+
+ ASSERT(IS_ALIGNED(pos, zinfo->zone_size),
+ "pos=%llu zinfo->zone_size=%llu", pos, zinfo->zone_size);
+ zno = pos >> zinfo->zone_size_shift;
+ /*
+ * We cannot report zones beyond the zone end. So, it is OK to
+ * cap *nr_zones to at the end.
+ */
+ *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
+
+ for (i = 0; i < *nr_zones; i++) {
+ struct blk_zone *zone_info;
+
+ zone_info = &zinfo->zone_cache[zno + i];
+ if (!zone_info->len)
+ break;
+ }
+
+ if (i == *nr_zones) {
+ /* Cache hit on all the zones */
+ memcpy(zones, zinfo->zone_cache + zno,
+ sizeof(*zinfo->zone_cache) * *nr_zones);
+ return 0;
+ }
+ }
+
+ ret = blkdev_report_zones_cached(device->bdev, pos >> SECTOR_SHIFT,
+ *nr_zones, copy_zone_info_cb, zones);
if (ret < 0) {
- btrfs_err_in_rcu(device->fs_info,
+ btrfs_err(device->fs_info,
"zoned: failed to read zone %llu on %s (devid %llu)",
- pos, rcu_str_deref(device->name),
+ pos, rcu_dereference(device->name),
device->devid);
return ret;
}
*nr_zones = ret;
- if (!ret)
+ if (unlikely(!ret))
return -EIO;
+ /* Populate cache */
+ if (zinfo->zone_cache) {
+ u32 zno = pos >> zinfo->zone_size_shift;
+
+ memcpy(zinfo->zone_cache + zno, zones,
+ sizeof(*zinfo->zone_cache) * *nr_zones);
+ }
+
return 0;
}
/* The emulated zone size is determined from the size of device extent */
static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
struct extent_buffer *leaf;
@@ -260,28 +310,21 @@ static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
- goto out;
+ return ret;
/* No dev extents at all? Not good */
- if (ret > 0) {
- ret = -EUCLEAN;
- goto out;
- }
+ if (unlikely(ret > 0))
+ return -EUCLEAN;
}
leaf = path->nodes[0];
dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
- ret = 0;
-
-out:
- btrfs_free_path(path);
-
- return ret;
+ return 0;
}
int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
@@ -300,7 +343,7 @@ int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
if (!device->bdev)
continue;
- ret = btrfs_get_dev_zone_info(device);
+ ret = btrfs_get_dev_zone_info(device, true);
if (ret)
break;
}
@@ -309,12 +352,11 @@ int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
return ret;
}
-int btrfs_get_dev_zone_info(struct btrfs_device *device)
+int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_zoned_device_info *zone_info = NULL;
struct block_device *bdev = device->bdev;
- struct request_queue *queue = bdev_get_queue(bdev);
unsigned int max_active_zones;
unsigned int nactive;
sector_t nr_sectors;
@@ -339,6 +381,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
if (!zone_info)
return -ENOMEM;
+ device->zone_info = zone_info;
+
if (!bdev_is_zoned(bdev)) {
if (!fs_info->zone_size) {
ret = calculate_emulated_zone_size(fs_info);
@@ -352,18 +396,24 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
zone_sectors = bdev_zone_sectors(bdev);
}
- /* Check if it's power of 2 (see is_power_of_2) */
- ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
+ ASSERT(is_power_of_two_u64(zone_sectors));
zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
/* We reject devices with a zone size larger than 8GB */
if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
- btrfs_err_in_rcu(fs_info,
+ btrfs_err(fs_info,
"zoned: %s: zone size %llu larger than supported maximum %llu",
- rcu_str_deref(device->name),
+ rcu_dereference(device->name),
zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
ret = -EINVAL;
goto out;
+ } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
+ btrfs_err(fs_info,
+ "zoned: %s: zone size %llu smaller than supported minimum %u",
+ rcu_dereference(device->name),
+ zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
+ ret = -EINVAL;
+ goto out;
}
nr_sectors = bdev_nr_sectors(bdev);
@@ -372,11 +422,14 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
if (!IS_ALIGNED(nr_sectors, zone_sectors))
zone_info->nr_zones++;
- max_active_zones = queue_max_active_zones(queue);
+ max_active_zones = min_not_zero(bdev_max_active_zones(bdev),
+ bdev_max_open_zones(bdev));
+ if (!max_active_zones && zone_info->nr_zones > BTRFS_DEFAULT_MAX_ACTIVE_ZONES)
+ max_active_zones = BTRFS_DEFAULT_MAX_ACTIVE_ZONES;
if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
- btrfs_err_in_rcu(fs_info,
+ btrfs_err(fs_info,
"zoned: %s: max active zones %u is too small, need at least %u active zones",
- rcu_str_deref(device->name), max_active_zones,
+ rcu_dereference(device->name), max_active_zones,
BTRFS_MIN_ACTIVE_ZONES);
ret = -EINVAL;
goto out;
@@ -401,12 +454,29 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
goto out;
}
- zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
+ zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
if (!zones) {
ret = -ENOMEM;
goto out;
}
+ /*
+ * Enable zone cache only for a zoned device. On a non-zoned device, we
+ * fill the zone info with emulated CONVENTIONAL zones, so no need to
+ * use the cache.
+ */
+ if (populate_cache && bdev_is_zoned(device->bdev)) {
+ zone_info->zone_cache = vcalloc(zone_info->nr_zones,
+ sizeof(struct blk_zone));
+ if (!zone_info->zone_cache) {
+ btrfs_err(device->fs_info,
+ "zoned: failed to allocate zone cache for %s",
+ rcu_dereference(device->name));
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
/* Get zones type */
nactive = 0;
while (sector < nr_sectors) {
@@ -426,6 +496,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
case BLK_ZONE_COND_CLOSED:
+ case BLK_ZONE_COND_ACTIVE:
__set_bit(nreported, zone_info->active_zones);
nactive++;
break;
@@ -435,28 +506,35 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
}
- if (nreported != zone_info->nr_zones) {
- btrfs_err_in_rcu(device->fs_info,
+ if (unlikely(nreported != zone_info->nr_zones)) {
+ btrfs_err(device->fs_info,
"inconsistent number of zones on %s (%u/%u)",
- rcu_str_deref(device->name), nreported,
+ rcu_dereference(device->name), nreported,
zone_info->nr_zones);
ret = -EIO;
goto out;
}
if (max_active_zones) {
- if (nactive > max_active_zones) {
- btrfs_err_in_rcu(device->fs_info,
+ if (unlikely(nactive > max_active_zones)) {
+ if (bdev_max_active_zones(bdev) == 0) {
+ max_active_zones = 0;
+ zone_info->max_active_zones = 0;
+ goto validate;
+ }
+ btrfs_err(device->fs_info,
"zoned: %u active zones on %s exceeds max_active_zones %u",
- nactive, rcu_str_deref(device->name),
+ nactive, rcu_dereference(device->name),
max_active_zones);
ret = -EIO;
goto out;
}
atomic_set(&zone_info->active_zones_left,
max_active_zones - nactive);
+ set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
}
+validate:
/* Validate superblock log */
nr_zones = BTRFS_NR_SB_LOG_ZONES;
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
@@ -475,8 +553,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
if (ret)
goto out;
- if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
- btrfs_err_in_rcu(device->fs_info,
+ if (unlikely(nr_zones != BTRFS_NR_SB_LOG_ZONES)) {
+ btrfs_err(device->fs_info,
"zoned: failed to read super block log zone info at devid %llu zone %u",
device->devid, sb_zone);
ret = -EUCLEAN;
@@ -493,8 +571,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
ret = sb_write_pointer(device->bdev,
&zone_info->sb_zones[sb_pos], &sb_wp);
- if (ret != -ENOENT && ret) {
- btrfs_err_in_rcu(device->fs_info,
+ if (unlikely(ret != -ENOENT && ret)) {
+ btrfs_err(device->fs_info,
"zoned: super block log zone corrupted devid %llu zone %u",
device->devid, sb_zone);
ret = -EUCLEAN;
@@ -503,48 +581,26 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
}
- kfree(zones);
-
- device->zone_info = zone_info;
+ kvfree(zones);
- switch (bdev_zoned_model(bdev)) {
- case BLK_ZONED_HM:
+ if (bdev_is_zoned(bdev)) {
model = "host-managed zoned";
emulated = "";
- break;
- case BLK_ZONED_HA:
- model = "host-aware zoned";
- emulated = "";
- break;
- case BLK_ZONED_NONE:
+ } else {
model = "regular";
emulated = "emulated ";
- break;
- default:
- /* Just in case */
- btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
- bdev_zoned_model(bdev),
- rcu_str_deref(device->name));
- ret = -EOPNOTSUPP;
- goto out_free_zone_info;
}
- btrfs_info_in_rcu(fs_info,
+ btrfs_info(fs_info,
"%s block device %s, %u %szones of %llu bytes",
- model, rcu_str_deref(device->name), zone_info->nr_zones,
+ model, rcu_dereference(device->name), zone_info->nr_zones,
emulated, zone_info->zone_size);
return 0;
out:
- kfree(zones);
-out_free_zone_info:
- bitmap_free(zone_info->active_zones);
- bitmap_free(zone_info->empty_zones);
- bitmap_free(zone_info->seq_zones);
- kfree(zone_info);
- device->zone_info = NULL;
-
+ kvfree(zones);
+ btrfs_destroy_dev_zone_info(device);
return ret;
}
@@ -558,12 +614,52 @@ void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
bitmap_free(zone_info->active_zones);
bitmap_free(zone_info->seq_zones);
bitmap_free(zone_info->empty_zones);
+ vfree(zone_info->zone_cache);
kfree(zone_info);
device->zone_info = NULL;
}
-int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
- struct blk_zone *zone)
+struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
+{
+ struct btrfs_zoned_device_info *zone_info;
+
+ zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
+ if (!zone_info)
+ return NULL;
+
+ zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
+ if (!zone_info->seq_zones)
+ goto out;
+
+ bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
+ zone_info->nr_zones);
+
+ zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
+ if (!zone_info->empty_zones)
+ goto out;
+
+ bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
+ zone_info->nr_zones);
+
+ zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
+ if (!zone_info->active_zones)
+ goto out;
+
+ bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
+ zone_info->nr_zones);
+ zone_info->zone_cache = NULL;
+
+ return zone_info;
+
+out:
+ bitmap_free(zone_info->seq_zones);
+ bitmap_free(zone_info->empty_zones);
+ bitmap_free(zone_info->active_zones);
+ kfree(zone_info);
+ return NULL;
+}
+
+static int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, struct blk_zone *zone)
{
unsigned int nr_zones = 1;
int ret;
@@ -575,75 +671,66 @@ int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
return 0;
}
+static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_device *device;
+
+ list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
+ if (device->bdev && bdev_is_zoned(device->bdev)) {
+ btrfs_err(fs_info,
+ "zoned: mode not enabled but zoned device found: %pg",
+ device->bdev);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct queue_limits *lim = &fs_info->limits;
struct btrfs_device *device;
- u64 zoned_devices = 0;
- u64 nr_devices = 0;
u64 zone_size = 0;
- const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
- int ret = 0;
+ int ret;
- /* Count zoned devices */
- list_for_each_entry(device, &fs_devices->devices, dev_list) {
- enum blk_zoned_model model;
+ /*
+ * Host-Managed devices can't be used without the ZONED flag. With the
+ * ZONED all devices can be used, using zone emulation if required.
+ */
+ if (!btrfs_fs_incompat(fs_info, ZONED))
+ return btrfs_check_for_zoned_device(fs_info);
+
+ blk_set_stacking_limits(lim);
+
+ list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
+ struct btrfs_zoned_device_info *zone_info = device->zone_info;
if (!device->bdev)
continue;
- model = bdev_zoned_model(device->bdev);
- /*
- * A Host-Managed zoned device must be used as a zoned device.
- * A Host-Aware zoned device and a non-zoned devices can be
- * treated as a zoned device, if ZONED flag is enabled in the
- * superblock.
- */
- if (model == BLK_ZONED_HM ||
- (model == BLK_ZONED_HA && incompat_zoned) ||
- (model == BLK_ZONED_NONE && incompat_zoned)) {
- struct btrfs_zoned_device_info *zone_info =
- device->zone_info;
-
- zone_info = device->zone_info;
- zoned_devices++;
- if (!zone_size) {
- zone_size = zone_info->zone_size;
- } else if (zone_info->zone_size != zone_size) {
- btrfs_err(fs_info,
+ if (!zone_size) {
+ zone_size = zone_info->zone_size;
+ } else if (zone_info->zone_size != zone_size) {
+ btrfs_err(fs_info,
"zoned: unequal block device zone sizes: have %llu found %llu",
- device->zone_info->zone_size,
- zone_size);
- ret = -EINVAL;
- goto out;
- }
+ zone_info->zone_size, zone_size);
+ return -EINVAL;
}
- nr_devices++;
- }
-
- if (!zoned_devices && !incompat_zoned)
- goto out;
-
- if (!zoned_devices && incompat_zoned) {
- /* No zoned block device found on ZONED filesystem */
- btrfs_err(fs_info,
- "zoned: no zoned devices found on a zoned filesystem");
- ret = -EINVAL;
- goto out;
- }
- if (zoned_devices && !incompat_zoned) {
- btrfs_err(fs_info,
- "zoned: mode not enabled but zoned device found");
- ret = -EINVAL;
- goto out;
+ /*
+ * With the zoned emulation, we can have non-zoned device on the
+ * zoned mode. In this case, we don't have a valid max zone
+ * append size.
+ */
+ if (bdev_is_zoned(device->bdev))
+ blk_stack_limits(lim, bdev_limits(device->bdev), 0);
}
- if (zoned_devices != nr_devices) {
- btrfs_err(fs_info,
- "zoned: cannot mix zoned and regular devices");
- ret = -EINVAL;
- goto out;
+ ret = blk_validate_limits(lim);
+ if (ret) {
+ btrfs_err(fs_info, "zoned: failed to validate queue limits");
+ return ret;
}
/*
@@ -655,33 +742,46 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
btrfs_err(fs_info,
"zoned: zone size %llu not aligned to stripe %u",
zone_size, BTRFS_STRIPE_LEN);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
btrfs_err(fs_info, "zoned: mixed block groups not supported");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
fs_info->zone_size = zone_size;
+ /*
+ * Also limit max_zone_append_size by max_segments * PAGE_SIZE.
+ * Technically, we can have multiple pages per segment. But, since
+ * we add the pages one by one to a bio, and cannot increase the
+ * metadata reservation even if it increases the number of extents, it
+ * is safe to stick with the limit.
+ */
+ fs_info->max_zone_append_size = ALIGN_DOWN(
+ min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT,
+ (u64)lim->max_sectors << SECTOR_SHIFT,
+ (u64)lim->max_segments << PAGE_SHIFT),
+ fs_info->sectorsize);
fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
+ fs_info->max_extent_size = min_not_zero(fs_info->max_extent_size,
+ fs_info->max_zone_append_size);
+
/*
* Check mount options here, because we might change fs_info->zoned
* from fs_info->zone_size.
*/
- ret = btrfs_check_mountopts_zoned(fs_info);
+ ret = btrfs_check_mountopts_zoned(fs_info, &fs_info->mount_opt);
if (ret)
- goto out;
+ return ret;
btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
-out:
- return ret;
+ return 0;
}
-int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
+int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info,
+ unsigned long long *mount_opt)
{
if (!btrfs_is_zoned(info))
return 0;
@@ -690,16 +790,22 @@ int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
* Space cache writing is not COWed. Disable that to avoid write errors
* in sequential zones.
*/
- if (btrfs_test_opt(info, SPACE_CACHE)) {
+ if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {
btrfs_err(info, "zoned: space cache v1 is not supported");
return -EINVAL;
}
- if (btrfs_test_opt(info, NODATACOW)) {
+ if (btrfs_raw_test_opt(*mount_opt, NODATACOW)) {
btrfs_err(info, "zoned: NODATACOW not supported");
return -EINVAL;
}
+ if (btrfs_raw_test_opt(*mount_opt, DISCARD_ASYNC)) {
+ btrfs_info(info,
+ "zoned: async discard ignored and disabled for zoned mode");
+ btrfs_clear_opt(*mount_opt, DISCARD_ASYNC);
+ }
+
return 0;
}
@@ -727,11 +833,14 @@ static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
reset = &zones[1];
if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
+ unsigned int nofs_flags;
+
ASSERT(sb_zone_is_full(reset));
+ nofs_flags = memalloc_nofs_save();
ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
- reset->start, reset->len,
- GFP_NOFS);
+ reset->start, reset->len);
+ memalloc_nofs_restore(nofs_flags);
if (ret)
return ret;
@@ -790,12 +899,12 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
if (sb_zone + 1 >= nr_zones)
return -ENOENT;
- ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
- BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
- zones);
+ ret = blkdev_report_zones_cached(bdev, zone_start_sector(sb_zone, bdev),
+ BTRFS_NR_SB_LOG_ZONES,
+ copy_zone_info_cb, zones);
if (ret < 0)
return ret;
- if (ret != BTRFS_NR_SB_LOG_ZONES)
+ if (unlikely(ret != BTRFS_NR_SB_LOG_ZONES))
return -EIO;
return sb_log_location(bdev, zones, rw, bytenr_ret);
@@ -877,11 +986,14 @@ int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
* explicit ZONE_FINISH is not necessary.
*/
if (zone->wp != zone->start + zone->capacity) {
+ unsigned int nofs_flags;
int ret;
+ nofs_flags = memalloc_nofs_save();
ret = blkdev_zone_mgmt(device->bdev,
REQ_OP_ZONE_FINISH, zone->start,
- zone->len, GFP_NOFS);
+ zone->len);
+ memalloc_nofs_restore(nofs_flags);
if (ret)
return ret;
}
@@ -893,17 +1005,19 @@ int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
}
/* All the zones are FULL. Should not reach here. */
- ASSERT(0);
+ DEBUG_WARN("unexpected state, all zones full");
return -EIO;
}
int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
{
+ unsigned int nofs_flags;
sector_t zone_sectors;
sector_t nr_sectors;
u8 zone_sectors_shift;
u32 sb_zone;
u32 nr_zones;
+ int ret;
zone_sectors = bdev_zone_sectors(bdev);
zone_sectors_shift = ilog2(zone_sectors);
@@ -914,13 +1028,16 @@ int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
if (sb_zone + 1 >= nr_zones)
return -ENOENT;
- return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
- zone_start_sector(sb_zone, bdev),
- zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
+ nofs_flags = memalloc_nofs_save();
+ ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
+ zone_start_sector(sb_zone, bdev),
+ zone_sectors * BTRFS_NR_SB_LOG_ZONES);
+ memalloc_nofs_restore(nofs_flags);
+ return ret;
}
-/**
- * btrfs_find_allocatable_zones - find allocatable zones within a given region
+/*
+ * Find allocatable zones within a given region.
*
* @device: the device to allocate a region on
* @hole_start: the position of the hole to allocate the region
@@ -941,8 +1058,10 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
bool have_sb;
int i;
- ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
- ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
+ ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size),
+ "hole_start=%llu zinfo->zone_size=%llu", hole_start, zinfo->zone_size);
+ ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size),
+ "num_bytes=%llu zinfo->zone_size=%llu", num_bytes, zinfo->zone_size);
while (pos < hole_end) {
begin = pos >> shift;
@@ -953,7 +1072,7 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
/* Check if zones in the region are all empty */
if (btrfs_dev_is_sequential(device, pos) &&
- find_next_zero_bit(zinfo->empty_zones, end, begin) != end) {
+ !bitmap_test_range_all_set(zinfo->empty_zones, begin, nzones)) {
pos += zinfo->zone_size;
continue;
}
@@ -1027,12 +1146,14 @@ static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
u64 length, u64 *bytes)
{
+ unsigned int nofs_flags;
int ret;
*bytes = 0;
+ nofs_flags = memalloc_nofs_save();
ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
- physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
- GFP_NOFS);
+ physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT);
+ memalloc_nofs_restore(nofs_flags);
if (ret)
return ret;
@@ -1052,23 +1173,25 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
struct btrfs_zoned_device_info *zinfo = device->zone_info;
const u8 shift = zinfo->zone_size_shift;
unsigned long begin = start >> shift;
- unsigned long end = (start + size) >> shift;
+ unsigned long nbits = size >> shift;
u64 pos;
int ret;
- ASSERT(IS_ALIGNED(start, zinfo->zone_size));
- ASSERT(IS_ALIGNED(size, zinfo->zone_size));
+ ASSERT(IS_ALIGNED(start, zinfo->zone_size),
+ "start=%llu, zinfo->zone_size=%llu", start, zinfo->zone_size);
+ ASSERT(IS_ALIGNED(size, zinfo->zone_size),
+ "size=%llu, zinfo->zone_size=%llu", size, zinfo->zone_size);
- if (end > zinfo->nr_zones)
+ if (begin + nbits > zinfo->nr_zones)
return -ERANGE;
/* All the zones are conventional */
- if (find_next_bit(zinfo->seq_zones, begin, end) == end)
+ if (bitmap_test_range_all_zero(zinfo->seq_zones, begin, nbits))
return 0;
/* All the zones are sequential and empty */
- if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
- find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
+ if (bitmap_test_range_all_set(zinfo->seq_zones, begin, nbits) &&
+ bitmap_test_range_all_set(zinfo->empty_zones, begin, nbits))
return 0;
for (pos = start; pos < start + size; pos += zinfo->zone_size) {
@@ -1079,10 +1202,10 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
continue;
/* Free regions should be empty */
- btrfs_warn_in_rcu(
+ btrfs_warn(
device->fs_info,
"zoned: resetting device %s (devid %llu) zone %llu for allocation",
- rcu_str_deref(device->name), device->devid, pos >> shift);
+ rcu_dereference(device->name), device->devid, pos >> shift);
WARN_ON_ONCE(1);
ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
@@ -1101,16 +1224,31 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
* offset.
*/
static int calculate_alloc_pointer(struct btrfs_block_group *cache,
- u64 *offset_ret)
+ u64 *offset_ret, bool new)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
- struct btrfs_root *root = fs_info->extent_root;
- struct btrfs_path *path;
+ struct btrfs_root *root;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
int ret;
u64 length;
+ /*
+ * Avoid tree lookups for a new block group, there's no use for it.
+ * It must always be 0.
+ *
+ * Also, we have a lock chain of extent buffer lock -> chunk mutex.
+ * For new a block group, this function is called from
+ * btrfs_make_block_group() which is already taking the chunk mutex.
+ * Thus, we cannot call calculate_alloc_pointer() which takes extent
+ * buffer locks to avoid deadlock.
+ */
+ if (new) {
+ *offset_ret = 0;
+ return 0;
+ }
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -1119,12 +1257,13 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
key.type = 0;
key.offset = 0;
+ root = btrfs_extent_root(fs_info, key.objectid);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
/* We should not find the exact match */
- if (!ret)
+ if (unlikely(!ret))
ret = -EUCLEAN;
if (ret < 0)
- goto out;
+ return ret;
ret = btrfs_previous_extent_item(root, path, cache->start);
if (ret) {
@@ -1132,7 +1271,7 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
ret = 0;
*offset_ret = 0;
}
- goto out;
+ return ret;
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
@@ -1142,232 +1281,451 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
else
length = fs_info->nodesize;
- if (!(found_key.objectid >= cache->start &&
- found_key.objectid + length <= cache->start + cache->length)) {
- ret = -EUCLEAN;
- goto out;
+ if (unlikely(!(found_key.objectid >= cache->start &&
+ found_key.objectid + length <= cache->start + cache->length))) {
+ return -EUCLEAN;
}
*offset_ret = found_key.objectid + length - cache->start;
- ret = 0;
-
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
-int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+struct zone_info {
+ u64 physical;
+ u64 capacity;
+ u64 alloc_offset;
+};
+
+static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
+ struct zone_info *info, unsigned long *active,
+ struct btrfs_chunk_map *map, bool new)
{
- struct btrfs_fs_info *fs_info = cache->fs_info;
- struct extent_map_tree *em_tree = &fs_info->mapping_tree;
- struct extent_map *em;
- struct map_lookup *map;
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
struct btrfs_device *device;
- u64 logical = cache->start;
- u64 length = cache->length;
- u64 physical = 0;
- int ret;
- int i;
+ int dev_replace_is_ongoing = 0;
unsigned int nofs_flag;
- u64 *alloc_offsets = NULL;
- u64 *caps = NULL;
- unsigned long *active = NULL;
- u64 last_alloc = 0;
- u32 num_sequential = 0, num_conventional = 0;
+ struct blk_zone zone;
+ int ret;
- if (!btrfs_is_zoned(fs_info))
+ info->physical = map->stripes[zone_idx].physical;
+
+ down_read(&dev_replace->rwsem);
+ device = map->stripes[zone_idx].dev;
+
+ if (!device->bdev) {
+ up_read(&dev_replace->rwsem);
+ info->alloc_offset = WP_MISSING_DEV;
return 0;
+ }
- /* Sanity check */
- if (!IS_ALIGNED(length, fs_info->zone_size)) {
+ /* Consider a zone as active if we can allow any number of active zones. */
+ if (!device->zone_info->max_active_zones)
+ __set_bit(zone_idx, active);
+
+ if (!btrfs_dev_is_sequential(device, info->physical)) {
+ up_read(&dev_replace->rwsem);
+ info->alloc_offset = WP_CONVENTIONAL;
+ info->capacity = device->zone_info->zone_size;
+ return 0;
+ }
+
+ ASSERT(!new || btrfs_dev_is_empty_zone(device, info->physical));
+
+ /* This zone will be used for allocation, so mark this zone non-empty. */
+ btrfs_dev_clear_zone_empty(device, info->physical);
+
+ dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
+ if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
+ btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
+
+ /*
+ * The group is mapped to a sequential zone. Get the zone write pointer
+ * to determine the allocation offset within the zone.
+ */
+ WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
+
+ if (new) {
+ sector_t capacity;
+
+ capacity = bdev_zone_capacity(device->bdev, info->physical >> SECTOR_SHIFT);
+ up_read(&dev_replace->rwsem);
+ info->alloc_offset = 0;
+ info->capacity = capacity << SECTOR_SHIFT;
+
+ return 0;
+ }
+
+ nofs_flag = memalloc_nofs_save();
+ ret = btrfs_get_dev_zone(device, info->physical, &zone);
+ memalloc_nofs_restore(nofs_flag);
+ if (ret) {
+ up_read(&dev_replace->rwsem);
+ if (ret != -EIO && ret != -EOPNOTSUPP)
+ return ret;
+ info->alloc_offset = WP_MISSING_DEV;
+ return 0;
+ }
+
+ if (unlikely(zone.type == BLK_ZONE_TYPE_CONVENTIONAL)) {
btrfs_err(fs_info,
- "zoned: block group %llu len %llu unaligned to zone size %llu",
- logical, length, fs_info->zone_size);
+ "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
+ zone.start << SECTOR_SHIFT, rcu_dereference(device->name),
+ device->devid);
+ up_read(&dev_replace->rwsem);
return -EIO;
}
- /* Get the chunk mapping */
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, logical, length);
- read_unlock(&em_tree->lock);
+ info->capacity = (zone.capacity << SECTOR_SHIFT);
+
+ switch (zone.cond) {
+ case BLK_ZONE_COND_OFFLINE:
+ case BLK_ZONE_COND_READONLY:
+ btrfs_err(fs_info,
+ "zoned: offline/readonly zone %llu on device %s (devid %llu)",
+ (info->physical >> device->zone_info->zone_size_shift),
+ rcu_dereference(device->name), device->devid);
+ info->alloc_offset = WP_MISSING_DEV;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ info->alloc_offset = 0;
+ break;
+ case BLK_ZONE_COND_FULL:
+ info->alloc_offset = info->capacity;
+ break;
+ default:
+ /* Partially used zone. */
+ info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
+ __set_bit(zone_idx, active);
+ break;
+ }
+
+ up_read(&dev_replace->rwsem);
+
+ return 0;
+}
- if (!em)
+static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
+ struct zone_info *info,
+ unsigned long *active)
+{
+ if (unlikely(info->alloc_offset == WP_MISSING_DEV)) {
+ btrfs_err(bg->fs_info,
+ "zoned: cannot recover write pointer for zone %llu",
+ info->physical);
+ return -EIO;
+ }
+
+ bg->alloc_offset = info->alloc_offset;
+ bg->zone_capacity = info->capacity;
+ if (test_bit(0, active))
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
+ return 0;
+}
+
+static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
+ struct btrfs_chunk_map *map,
+ struct zone_info *zone_info,
+ unsigned long *active,
+ u64 last_alloc)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree");
return -EINVAL;
+ }
- map = em->map_lookup;
+ bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
- cache->physical_map = kmemdup(map, map_lookup_size(map->num_stripes), GFP_NOFS);
- if (!cache->physical_map) {
- ret = -ENOMEM;
- goto out;
+ if (unlikely(zone_info[0].alloc_offset == WP_MISSING_DEV)) {
+ btrfs_err(bg->fs_info,
+ "zoned: cannot recover write pointer for zone %llu",
+ zone_info[0].physical);
+ return -EIO;
+ }
+ if (unlikely(zone_info[1].alloc_offset == WP_MISSING_DEV)) {
+ btrfs_err(bg->fs_info,
+ "zoned: cannot recover write pointer for zone %llu",
+ zone_info[1].physical);
+ return -EIO;
}
- alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
- if (!alloc_offsets) {
- ret = -ENOMEM;
- goto out;
+ if (zone_info[0].alloc_offset == WP_CONVENTIONAL)
+ zone_info[0].alloc_offset = last_alloc;
+
+ if (zone_info[1].alloc_offset == WP_CONVENTIONAL)
+ zone_info[1].alloc_offset = last_alloc;
+
+ if (unlikely(zone_info[0].alloc_offset != zone_info[1].alloc_offset)) {
+ btrfs_err(bg->fs_info,
+ "zoned: write pointer offset mismatch of zones in DUP profile");
+ return -EIO;
}
- caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
- if (!caps) {
- ret = -ENOMEM;
- goto out;
+ if (test_bit(0, active) != test_bit(1, active)) {
+ if (unlikely(!btrfs_zone_activate(bg)))
+ return -EIO;
+ } else if (test_bit(0, active)) {
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
}
- active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
- if (!active) {
- ret = -ENOMEM;
- goto out;
+ bg->alloc_offset = zone_info[0].alloc_offset;
+ return 0;
+}
+
+static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
+ struct btrfs_chunk_map *map,
+ struct zone_info *zone_info,
+ unsigned long *active,
+ u64 last_alloc)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+ int i;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+ btrfs_bg_type_to_raid_name(map->type));
+ return -EINVAL;
}
+ /* In case a device is missing we have a cap of 0, so don't use it. */
+ bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
+
for (i = 0; i < map->num_stripes; i++) {
- bool is_sequential;
- struct blk_zone zone;
- struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
- int dev_replace_is_ongoing = 0;
+ if (zone_info[i].alloc_offset == WP_MISSING_DEV)
+ continue;
- device = map->stripes[i].dev;
- physical = map->stripes[i].physical;
+ if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
+ zone_info[i].alloc_offset = last_alloc;
- if (device->bdev == NULL) {
- alloc_offsets[i] = WP_MISSING_DEV;
- continue;
+ if (unlikely((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
+ !btrfs_test_opt(fs_info, DEGRADED))) {
+ btrfs_err(fs_info,
+ "zoned: write pointer offset mismatch of zones in %s profile",
+ btrfs_bg_type_to_raid_name(map->type));
+ return -EIO;
}
+ if (test_bit(0, active) != test_bit(i, active)) {
+ if (unlikely(!btrfs_test_opt(fs_info, DEGRADED) &&
+ !btrfs_zone_activate(bg))) {
+ return -EIO;
+ }
+ } else {
+ if (test_bit(0, active))
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
+ }
+ }
- is_sequential = btrfs_dev_is_sequential(device, physical);
- if (is_sequential)
- num_sequential++;
- else
- num_conventional++;
+ if (zone_info[0].alloc_offset != WP_MISSING_DEV)
+ bg->alloc_offset = zone_info[0].alloc_offset;
+ else
+ bg->alloc_offset = zone_info[i - 1].alloc_offset;
+
+ return 0;
+}
- if (!is_sequential) {
- alloc_offsets[i] = WP_CONVENTIONAL;
+static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ struct btrfs_chunk_map *map,
+ struct zone_info *zone_info,
+ unsigned long *active,
+ u64 last_alloc)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+ u64 stripe_nr = 0, stripe_offset = 0;
+ u32 stripe_index = 0;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+ btrfs_bg_type_to_raid_name(map->type));
+ return -EINVAL;
+ }
+
+ if (last_alloc) {
+ u32 factor = map->num_stripes;
+
+ stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
+ stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
+ stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
+ }
+
+ for (int i = 0; i < map->num_stripes; i++) {
+ if (zone_info[i].alloc_offset == WP_MISSING_DEV)
continue;
+
+ if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
+
+ zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
+
+ if (stripe_index > i)
+ zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
+ else if (stripe_index == i)
+ zone_info[i].alloc_offset += stripe_offset;
}
- /*
- * This zone will be used for allocation, so mark this zone
- * non-empty.
- */
- btrfs_dev_clear_zone_empty(device, physical);
+ if (test_bit(0, active) != test_bit(i, active)) {
+ if (unlikely(!btrfs_zone_activate(bg)))
+ return -EIO;
+ } else {
+ if (test_bit(0, active))
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
+ }
+ bg->zone_capacity += zone_info[i].capacity;
+ bg->alloc_offset += zone_info[i].alloc_offset;
+ }
- down_read(&dev_replace->rwsem);
- dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
- if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
- btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical);
- up_read(&dev_replace->rwsem);
+ return 0;
+}
- /*
- * The group is mapped to a sequential zone. Get the zone write
- * pointer to determine the allocation offset within the zone.
- */
- WARN_ON(!IS_ALIGNED(physical, fs_info->zone_size));
- nofs_flag = memalloc_nofs_save();
- ret = btrfs_get_dev_zone(device, physical, &zone);
- memalloc_nofs_restore(nofs_flag);
- if (ret == -EIO || ret == -EOPNOTSUPP) {
- ret = 0;
- alloc_offsets[i] = WP_MISSING_DEV;
+static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ struct btrfs_chunk_map *map,
+ struct zone_info *zone_info,
+ unsigned long *active,
+ u64 last_alloc)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+ u64 stripe_nr = 0, stripe_offset = 0;
+ u32 stripe_index = 0;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+ btrfs_bg_type_to_raid_name(map->type));
+ return -EINVAL;
+ }
+
+ if (last_alloc) {
+ u32 factor = map->num_stripes / map->sub_stripes;
+
+ stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
+ stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
+ stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
+ }
+
+ for (int i = 0; i < map->num_stripes; i++) {
+ if (zone_info[i].alloc_offset == WP_MISSING_DEV)
continue;
- } else if (ret) {
- goto out;
- }
- if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
- btrfs_err_in_rcu(fs_info,
- "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
- zone.start << SECTOR_SHIFT,
- rcu_str_deref(device->name), device->devid);
- ret = -EIO;
- goto out;
+ if (test_bit(0, active) != test_bit(i, active)) {
+ if (unlikely(!btrfs_zone_activate(bg)))
+ return -EIO;
+ } else {
+ if (test_bit(0, active))
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
}
- caps[i] = (zone.capacity << SECTOR_SHIFT);
+ if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
+ zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
- switch (zone.cond) {
- case BLK_ZONE_COND_OFFLINE:
- case BLK_ZONE_COND_READONLY:
- btrfs_err(fs_info,
- "zoned: offline/readonly zone %llu on device %s (devid %llu)",
- physical >> device->zone_info->zone_size_shift,
- rcu_str_deref(device->name), device->devid);
- alloc_offsets[i] = WP_MISSING_DEV;
- break;
- case BLK_ZONE_COND_EMPTY:
- alloc_offsets[i] = 0;
- break;
- case BLK_ZONE_COND_FULL:
- alloc_offsets[i] = caps[i];
- break;
- default:
- /* Partially used zone */
- alloc_offsets[i] =
- ((zone.wp - zone.start) << SECTOR_SHIFT);
- __set_bit(i, active);
- break;
+ if (stripe_index > (i / map->sub_stripes))
+ zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
+ else if (stripe_index == (i / map->sub_stripes))
+ zone_info[i].alloc_offset += stripe_offset;
}
- /*
- * Consider a zone as active if we can allow any number of
- * active zones.
- */
- if (!device->zone_info->max_active_zones)
- __set_bit(i, active);
+ if ((i % map->sub_stripes) == 0) {
+ bg->zone_capacity += zone_info[i].capacity;
+ bg->alloc_offset += zone_info[i].alloc_offset;
+ }
}
- if (num_sequential > 0)
- cache->seq_zone = true;
+ return 0;
+}
- if (num_conventional > 0) {
- /*
- * Avoid calling calculate_alloc_pointer() for new BG. It
- * is no use for new BG. It must be always 0.
- *
- * Also, we have a lock chain of extent buffer lock ->
- * chunk mutex. For new BG, this function is called from
- * btrfs_make_block_group() which is already taking the
- * chunk mutex. Thus, we cannot call
- * calculate_alloc_pointer() which takes extent buffer
- * locks to avoid deadlock.
- */
+int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_chunk_map *map;
+ u64 logical = cache->start;
+ u64 length = cache->length;
+ struct zone_info AUTO_KFREE(zone_info);
+ int ret;
+ int i;
+ unsigned long *active = NULL;
+ u64 last_alloc = 0;
+ u32 num_sequential = 0, num_conventional = 0;
+ u64 profile;
+
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
+
+ /* Sanity check */
+ if (unlikely(!IS_ALIGNED(length, fs_info->zone_size))) {
+ btrfs_err(fs_info,
+ "zoned: block group %llu len %llu unaligned to zone size %llu",
+ logical, length, fs_info->zone_size);
+ return -EIO;
+ }
+
+ map = btrfs_find_chunk_map(fs_info, logical, length);
+ if (!map)
+ return -EINVAL;
- /* Zone capacity is always zone size in emulation */
- cache->zone_capacity = cache->length;
- if (new) {
- cache->alloc_offset = 0;
+ cache->physical_map = map;
+
+ zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
+ if (!zone_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
+ if (!active) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < map->num_stripes; i++) {
+ ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map, new);
+ if (ret)
goto out;
- }
- ret = calculate_alloc_pointer(cache, &last_alloc);
- if (ret || map->num_stripes == num_conventional) {
- if (!ret)
- cache->alloc_offset = last_alloc;
- else
- btrfs_err(fs_info,
+
+ if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
+ num_conventional++;
+ else
+ num_sequential++;
+ }
+
+ if (num_sequential > 0)
+ set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
+
+ if (num_conventional > 0) {
+ ret = calculate_alloc_pointer(cache, &last_alloc, new);
+ if (ret) {
+ btrfs_err(fs_info,
"zoned: failed to determine allocation offset of bg %llu",
- cache->start);
+ cache->start);
+ goto out;
+ } else if (map->num_stripes == num_conventional) {
+ cache->alloc_offset = last_alloc;
+ cache->zone_capacity = cache->length;
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
goto out;
}
}
- switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
+ switch (profile) {
case 0: /* single */
- if (alloc_offsets[0] == WP_MISSING_DEV) {
- btrfs_err(fs_info,
- "zoned: cannot recover write pointer for zone %llu",
- physical);
- ret = -EIO;
- goto out;
- }
- cache->alloc_offset = alloc_offsets[0];
- cache->zone_capacity = caps[0];
- cache->zone_is_active = test_bit(0, active);
+ ret = btrfs_load_block_group_single(cache, &zone_info[0], active);
break;
case BTRFS_BLOCK_GROUP_DUP:
+ ret = btrfs_load_block_group_dup(cache, map, zone_info, active,
+ last_alloc);
+ break;
case BTRFS_BLOCK_GROUP_RAID1:
+ case BTRFS_BLOCK_GROUP_RAID1C3:
+ case BTRFS_BLOCK_GROUP_RAID1C4:
+ ret = btrfs_load_block_group_raid1(cache, map, zone_info,
+ active, last_alloc);
+ break;
case BTRFS_BLOCK_GROUP_RAID0:
+ ret = btrfs_load_block_group_raid0(cache, map, zone_info,
+ active, last_alloc);
+ break;
case BTRFS_BLOCK_GROUP_RAID10:
+ ret = btrfs_load_block_group_raid10(cache, map, zone_info,
+ active, last_alloc);
+ break;
case BTRFS_BLOCK_GROUP_RAID5:
case BTRFS_BLOCK_GROUP_RAID6:
- /* non-single profiles are not supported yet */
default:
btrfs_err(fs_info, "zoned: profile %s not yet supported",
btrfs_bg_type_to_raid_name(map->type));
@@ -1375,22 +1733,33 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
goto out;
}
- if (cache->zone_is_active) {
- btrfs_get_block_group(cache);
- spin_lock(&fs_info->zone_active_bgs_lock);
- list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs);
- spin_unlock(&fs_info->zone_active_bgs_lock);
+ if (ret == -EIO && profile != 0 && profile != BTRFS_BLOCK_GROUP_RAID0 &&
+ profile != BTRFS_BLOCK_GROUP_RAID10) {
+ /*
+ * Detected broken write pointer. Make this block group
+ * unallocatable by setting the allocation pointer at the end of
+ * allocatable region. Relocating this block group will fix the
+ * mismatch.
+ *
+ * Currently, we cannot handle RAID0 or RAID10 case like this
+ * because we don't have a proper zone_capacity value. But,
+ * reading from this block group won't work anyway by a missing
+ * stripe.
+ */
+ cache->alloc_offset = cache->zone_capacity;
}
out:
- if (cache->alloc_offset > fs_info->zone_size) {
- btrfs_err(fs_info,
- "zoned: invalid write pointer %llu in block group %llu",
- cache->alloc_offset, cache->start);
- ret = -EIO;
+ /* Reject non SINGLE data profiles without RST */
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) &&
+ (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
+ !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+ btrfs_bg_type_to_raid_name(map->type));
+ ret = -EINVAL;
}
- if (cache->alloc_offset > cache->zone_capacity) {
+ if (unlikely(cache->alloc_offset > cache->zone_capacity)) {
btrfs_err(fs_info,
"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
cache->alloc_offset, cache->zone_capacity,
@@ -1406,17 +1775,20 @@ out:
ret = -EIO;
}
- if (!ret)
+ if (!ret) {
cache->meta_write_pointer = cache->alloc_offset + cache->start;
-
- if (ret) {
- kfree(cache->physical_map);
+ if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
+ btrfs_get_block_group(cache);
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ list_add_tail(&cache->active_bg_list,
+ &fs_info->zone_active_bgs);
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+ }
+ } else {
+ btrfs_free_chunk_map(cache->physical_map);
cache->physical_map = NULL;
}
bitmap_free(active);
- kfree(caps);
- kfree(alloc_offsets);
- free_extent_map(em);
return ret;
}
@@ -1434,50 +1806,15 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
free = cache->zone_capacity - cache->alloc_offset;
/* We only need ->free_space in ALLOC_SEQ block groups */
- cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
cache->free_space_ctl->free_space = free;
cache->zone_unusable = unusable;
}
-void btrfs_redirty_list_add(struct btrfs_transaction *trans,
- struct extent_buffer *eb)
-{
- struct btrfs_fs_info *fs_info = eb->fs_info;
-
- if (!btrfs_is_zoned(fs_info) ||
- btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN) ||
- !list_empty(&eb->release_list))
- return;
-
- set_extent_buffer_dirty(eb);
- set_extent_bits_nowait(&trans->dirty_pages, eb->start,
- eb->start + eb->len - 1, EXTENT_DIRTY);
- memzero_extent_buffer(eb, 0, eb->len);
- set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
-
- spin_lock(&trans->releasing_ebs_lock);
- list_add_tail(&eb->release_list, &trans->releasing_ebs);
- spin_unlock(&trans->releasing_ebs_lock);
- atomic_inc(&eb->refs);
-}
-
-void btrfs_free_redirty_list(struct btrfs_transaction *trans)
-{
- spin_lock(&trans->releasing_ebs_lock);
- while (!list_empty(&trans->releasing_ebs)) {
- struct extent_buffer *eb;
-
- eb = list_first_entry(&trans->releasing_ebs,
- struct extent_buffer, release_list);
- list_del_init(&eb->release_list);
- free_extent_buffer(eb);
- }
- spin_unlock(&trans->releasing_ebs_lock);
-}
-
-bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
+bool btrfs_use_zone_append(struct btrfs_bio *bbio)
{
+ u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
+ struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_block_group *cache;
bool ret = false;
@@ -1485,11 +1822,14 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
if (!btrfs_is_zoned(fs_info))
return false;
- if (!is_data_inode(&inode->vfs_inode))
+ if (!is_data_inode(inode))
+ return false;
+
+ if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
return false;
/*
- * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
+ * Using REQ_OP_ZONE_APPEND for relocation can break assumptions on the
* extent layout the relocation code has.
* Furthermore we have set aside own block-group from which only the
* relocation "process" can allocate and make sure only one process at a
@@ -1504,123 +1844,229 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
if (!cache)
return false;
- ret = cache->seq_zone;
+ ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
btrfs_put_block_group(cache);
return ret;
}
-void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
- struct bio *bio)
+void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
{
- struct btrfs_ordered_extent *ordered;
- const u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
+ struct btrfs_ordered_sum *sum = bbio->sums;
- if (bio_op(bio) != REQ_OP_ZONE_APPEND)
- return;
+ if (physical < bbio->orig_physical)
+ sum->logical -= bbio->orig_physical - physical;
+ else
+ sum->logical += physical - bbio->orig_physical;
+}
- ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), file_offset);
- if (WARN_ON(!ordered))
- return;
+static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
+ u64 logical)
+{
+ struct extent_map_tree *em_tree = &ordered->inode->extent_tree;
+ struct extent_map *em;
- ordered->physical = physical;
- ordered->bdev = bio->bi_bdev;
+ ordered->disk_bytenr = logical;
- btrfs_put_ordered_extent(ordered);
+ write_lock(&em_tree->lock);
+ em = btrfs_search_extent_mapping(em_tree, ordered->file_offset,
+ ordered->num_bytes);
+ /* The em should be a new COW extent, thus it should not have an offset. */
+ ASSERT(em->offset == 0, "em->offset=%llu", em->offset);
+ em->disk_bytenr = logical;
+ btrfs_free_extent_map(em);
+ write_unlock(&em_tree->lock);
}
-void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
+static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
+ u64 logical, u64 len)
{
- struct btrfs_inode *inode = BTRFS_I(ordered->inode);
+ struct btrfs_ordered_extent *new;
+
+ if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
+ btrfs_split_extent_map(ordered->inode, ordered->file_offset,
+ ordered->num_bytes, len, logical))
+ return false;
+
+ new = btrfs_split_ordered_extent(ordered, len);
+ if (IS_ERR(new))
+ return false;
+ new->disk_bytenr = logical;
+ btrfs_finish_one_ordered(new);
+ return true;
+}
+
+void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
+{
+ struct btrfs_inode *inode = ordered->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct extent_map_tree *em_tree;
- struct extent_map *em;
struct btrfs_ordered_sum *sum;
- u64 orig_logical = ordered->disk_bytenr;
- u64 *logical = NULL;
- int nr, stripe_len;
+ u64 logical, len;
- /* Zoned devices should not have partitions. So, we can assume it is 0 */
- ASSERT(!bdev_is_partition(ordered->bdev));
- if (WARN_ON(!ordered->bdev))
+ /*
+ * Write to pre-allocated region is for the data relocation, and so
+ * it should use WRITE operation. No split/rewrite are necessary.
+ */
+ if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
return;
- if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, ordered->bdev,
- ordered->physical, &logical, &nr,
- &stripe_len)))
- goto out;
+ ASSERT(!list_empty(&ordered->list));
+ /* The ordered->list can be empty in the above pre-alloc case. */
+ sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list);
+ logical = sum->logical;
+ len = sum->len;
- WARN_ON(nr != 1);
+ while (len < ordered->disk_num_bytes) {
+ sum = list_next_entry(sum, list);
+ if (sum->logical == logical + len) {
+ len += sum->len;
+ continue;
+ }
+ if (!btrfs_zoned_split_ordered(ordered, logical, len)) {
+ set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
+ btrfs_err(fs_info, "failed to split ordered extent");
+ goto out;
+ }
+ logical = sum->logical;
+ len = sum->len;
+ }
- if (orig_logical == *logical)
- goto out;
+ if (ordered->disk_bytenr != logical)
+ btrfs_rewrite_logical_zoned(ordered, logical);
- ordered->disk_bytenr = *logical;
+out:
+ /*
+ * If we end up here for nodatasum I/O, the btrfs_ordered_sum structures
+ * were allocated by btrfs_alloc_dummy_sum only to record the logical
+ * addresses and don't contain actual checksums. We thus must free them
+ * here so that we don't attempt to log the csums later.
+ */
+ if ((inode->flags & BTRFS_INODE_NODATASUM) ||
+ test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state)) {
+ while ((sum = list_first_entry_or_null(&ordered->list,
+ typeof(*sum), list))) {
+ list_del(&sum->list);
+ kfree(sum);
+ }
+ }
+}
- em_tree = &inode->extent_tree;
- write_lock(&em_tree->lock);
- em = search_extent_mapping(em_tree, ordered->file_offset,
- ordered->num_bytes);
- em->block_start = *logical;
- free_extent_map(em);
- write_unlock(&em_tree->lock);
+static bool check_bg_is_active(struct btrfs_eb_write_context *ctx,
+ struct btrfs_block_group **active_bg)
+{
+ const struct writeback_control *wbc = ctx->wbc;
+ struct btrfs_block_group *block_group = ctx->zoned_bg;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
- list_for_each_entry(sum, &ordered->list, list) {
- if (*logical < orig_logical)
- sum->bytenr -= orig_logical - *logical;
- else
- sum->bytenr += *logical - orig_logical;
+ if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
+ return true;
+
+ if (fs_info->treelog_bg == block_group->start) {
+ if (!btrfs_zone_activate(block_group)) {
+ int ret_fin = btrfs_zone_finish_one_bg(fs_info);
+
+ if (ret_fin != 1 || !btrfs_zone_activate(block_group))
+ return false;
+ }
+ } else if (*active_bg != block_group) {
+ struct btrfs_block_group *tgt = *active_bg;
+
+ /* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */
+ lockdep_assert_held(&fs_info->zoned_meta_io_lock);
+
+ if (tgt) {
+ /*
+ * If there is an unsent IO left in the allocated area,
+ * we cannot wait for them as it may cause a deadlock.
+ */
+ if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) {
+ if (wbc->sync_mode == WB_SYNC_NONE ||
+ (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync))
+ return false;
+ }
+
+ /* Pivot active metadata/system block group. */
+ btrfs_zoned_meta_io_unlock(fs_info);
+ wait_eb_writebacks(tgt);
+ do_zone_finish(tgt, true);
+ btrfs_zoned_meta_io_lock(fs_info);
+ if (*active_bg == tgt) {
+ btrfs_put_block_group(tgt);
+ *active_bg = NULL;
+ }
+ }
+ if (!btrfs_zone_activate(block_group))
+ return false;
+ if (*active_bg != block_group) {
+ ASSERT(*active_bg == NULL);
+ *active_bg = block_group;
+ btrfs_get_block_group(block_group);
+ }
}
-out:
- kfree(logical);
+ return true;
}
-bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb,
- struct btrfs_block_group **cache_ret)
+/*
+ * Check if @ctx->eb is aligned to the write pointer.
+ *
+ * Return:
+ * 0: @ctx->eb is at the write pointer. You can write it.
+ * -EAGAIN: There is a hole. The caller should handle the case.
+ * -EBUSY: There is a hole, but the caller can just bail out.
+ */
+int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+ struct btrfs_eb_write_context *ctx)
{
- struct btrfs_block_group *cache;
- bool ret = true;
+ const struct writeback_control *wbc = ctx->wbc;
+ const struct extent_buffer *eb = ctx->eb;
+ struct btrfs_block_group *block_group = ctx->zoned_bg;
if (!btrfs_is_zoned(fs_info))
- return true;
+ return 0;
- cache = *cache_ret;
+ if (block_group) {
+ if (block_group->start > eb->start ||
+ block_group->start + block_group->length <= eb->start) {
+ btrfs_put_block_group(block_group);
+ block_group = NULL;
+ ctx->zoned_bg = NULL;
+ }
+ }
- if (cache && (eb->start < cache->start ||
- cache->start + cache->length <= eb->start)) {
- btrfs_put_block_group(cache);
- cache = NULL;
- *cache_ret = NULL;
+ if (!block_group) {
+ block_group = btrfs_lookup_block_group(fs_info, eb->start);
+ if (!block_group)
+ return 0;
+ ctx->zoned_bg = block_group;
}
- if (!cache)
- cache = btrfs_lookup_block_group(fs_info, eb->start);
+ if (block_group->meta_write_pointer == eb->start) {
+ struct btrfs_block_group **tgt;
- if (cache) {
- if (cache->meta_write_pointer != eb->start) {
- btrfs_put_block_group(cache);
- cache = NULL;
- ret = false;
- } else {
- cache->meta_write_pointer = eb->start + eb->len;
- }
+ if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
+ return 0;
- *cache_ret = cache;
+ if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ tgt = &fs_info->active_system_bg;
+ else
+ tgt = &fs_info->active_meta_bg;
+ if (check_bg_is_active(ctx, tgt))
+ return 0;
}
- return ret;
-}
-
-void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
- struct extent_buffer *eb)
-{
- if (!btrfs_is_zoned(eb->fs_info) || !cache)
- return;
+ /*
+ * Since we may release fs_info->zoned_meta_io_lock, someone can already
+ * start writing this eb. In that case, we can just bail out.
+ */
+ if (block_group->meta_write_pointer > eb->start)
+ return -EBUSY;
- ASSERT(cache->meta_write_pointer == eb->start + eb->len);
- cache->meta_write_pointer = eb->start;
+ /* If for_sync, this hole will be filled with transaction commit. */
+ if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
+ return -EAGAIN;
+ return -EBUSY;
}
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
@@ -1641,15 +2087,17 @@ static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
int nmirrors;
int i, ret;
- ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
- &mapped_length, &bioc);
- if (ret || !bioc || mapped_length < PAGE_SIZE) {
- btrfs_put_bioc(bioc);
- return -EIO;
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
+ &mapped_length, &bioc, NULL, NULL);
+ if (unlikely(ret || !bioc || mapped_length < PAGE_SIZE)) {
+ ret = -EIO;
+ goto out_put_bioc;
}
- if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK)
- return -EINVAL;
+ if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+ ret = -EINVAL;
+ goto out_put_bioc;
+ }
nofs_flag = memalloc_nofs_save();
nmirrors = (int)bioc->num_stripes;
@@ -1668,7 +2116,8 @@ static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
break;
}
memalloc_nofs_restore(nofs_flag);
-
+out_put_bioc:
+ btrfs_put_bioc(bioc);
return ret;
}
@@ -1698,35 +2147,14 @@ int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
if (physical_pos == wp)
return 0;
- if (physical_pos > wp)
+ if (unlikely(physical_pos > wp))
return -EUCLEAN;
length = wp - physical_pos;
return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
}
-struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
- u64 logical, u64 length)
-{
- struct btrfs_device *device;
- struct extent_map *em;
- struct map_lookup *map;
-
- em = btrfs_get_chunk_map(fs_info, logical, length);
- if (IS_ERR(em))
- return ERR_CAST(em);
-
- map = em->map_lookup;
- /* We only support single profile for now */
- ASSERT(map->num_stripes == 1);
- device = map->stripes[0].dev;
-
- free_extent_map(em);
-
- return device;
-}
-
-/**
+/*
* Activate block group and underlying device zones
*
* @block_group: the block group to activate
@@ -1736,52 +2164,76 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
bool btrfs_zone_activate(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct map_lookup *map;
+ struct btrfs_chunk_map *map;
struct btrfs_device *device;
u64 physical;
+ const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA);
bool ret;
+ int i;
if (!btrfs_is_zoned(block_group->fs_info))
return true;
map = block_group->physical_map;
- /* Currently support SINGLE profile only */
- ASSERT(map->num_stripes == 1);
- device = map->stripes[0].dev;
- physical = map->stripes[0].physical;
-
- if (device->zone_info->max_active_zones == 0)
- return true;
+ spin_lock(&fs_info->zone_active_bgs_lock);
spin_lock(&block_group->lock);
-
- if (block_group->zone_is_active) {
+ if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
ret = true;
goto out_unlock;
}
- /* No space left */
- if (block_group->alloc_offset == block_group->zone_capacity) {
- ret = false;
- goto out_unlock;
+ if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) {
+ /* The caller should check if the block group is full. */
+ if (WARN_ON_ONCE(btrfs_zoned_bg_is_full(block_group))) {
+ ret = false;
+ goto out_unlock;
+ }
+ } else {
+ /* Since it is already written, it should have been active. */
+ WARN_ON_ONCE(block_group->meta_write_pointer != block_group->start);
}
- if (!btrfs_dev_set_active_zone(device, physical)) {
- /* Cannot activate the zone */
- ret = false;
- goto out_unlock;
+ for (i = 0; i < map->num_stripes; i++) {
+ struct btrfs_zoned_device_info *zinfo;
+ int reserved = 0;
+
+ device = map->stripes[i].dev;
+ physical = map->stripes[i].physical;
+ zinfo = device->zone_info;
+
+ if (!device->bdev)
+ continue;
+
+ if (zinfo->max_active_zones == 0)
+ continue;
+
+ if (is_data)
+ reserved = zinfo->reserved_active_zones;
+ /*
+ * For the data block group, leave active zones for one
+ * metadata block group and one system block group.
+ */
+ if (atomic_read(&zinfo->active_zones_left) <= reserved) {
+ ret = false;
+ goto out_unlock;
+ }
+
+ if (!btrfs_dev_set_active_zone(device, physical)) {
+ /* Cannot activate the zone */
+ ret = false;
+ goto out_unlock;
+ }
+ if (!is_data)
+ zinfo->reserved_active_zones--;
}
/* Successfully activated all the zones */
- block_group->zone_is_active = 1;
-
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
spin_unlock(&block_group->lock);
/* For the active block group list */
btrfs_get_block_group(block_group);
-
- spin_lock(&fs_info->zone_active_bgs_lock);
- ASSERT(list_empty(&block_group->active_bg_list));
list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
spin_unlock(&fs_info->zone_active_bgs_lock);
@@ -1789,179 +2241,295 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
out_unlock:
spin_unlock(&block_group->lock);
+ spin_unlock(&fs_info->zone_active_bgs_lock);
return ret;
}
-int btrfs_zone_finish(struct btrfs_block_group *block_group)
+static void wait_eb_writebacks(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct map_lookup *map;
- struct btrfs_device *device;
- u64 physical;
- int ret = 0;
+ const u64 end = block_group->start + block_group->length;
+ struct extent_buffer *eb;
+ unsigned long index, start = (block_group->start >> fs_info->nodesize_bits);
- if (!btrfs_is_zoned(fs_info))
- return 0;
+ rcu_read_lock();
+ xa_for_each_start(&fs_info->buffer_tree, index, eb, start) {
+ if (eb->start < block_group->start)
+ continue;
+ if (eb->start >= end)
+ break;
+ rcu_read_unlock();
+ wait_on_extent_buffer_writeback(eb);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+}
- map = block_group->physical_map;
- /* Currently support SINGLE profile only */
- ASSERT(map->num_stripes == 1);
+static int call_zone_finish(struct btrfs_block_group *block_group,
+ struct btrfs_io_stripe *stripe)
+{
+ struct btrfs_device *device = stripe->dev;
+ const u64 physical = stripe->physical;
+ struct btrfs_zoned_device_info *zinfo = device->zone_info;
+ int ret;
- device = map->stripes[0].dev;
- physical = map->stripes[0].physical;
+ if (!device->bdev)
+ return 0;
- if (device->zone_info->max_active_zones == 0)
+ if (zinfo->max_active_zones == 0)
return 0;
+ if (btrfs_dev_is_sequential(device, physical)) {
+ unsigned int nofs_flags;
+
+ nofs_flags = memalloc_nofs_save();
+ ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
+ physical >> SECTOR_SHIFT,
+ zinfo->zone_size >> SECTOR_SHIFT);
+ memalloc_nofs_restore(nofs_flags);
+
+ if (ret)
+ return ret;
+ }
+
+ if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+ zinfo->reserved_active_zones++;
+ btrfs_dev_clear_active_zone(device, physical);
+
+ return 0;
+}
+
+static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
+{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct btrfs_chunk_map *map;
+ const bool is_metadata = (block_group->flags &
+ (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ int ret = 0;
+ int i;
+
spin_lock(&block_group->lock);
- if (!block_group->zone_is_active) {
+ if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
spin_unlock(&block_group->lock);
return 0;
}
/* Check if we have unwritten allocated space */
- if ((block_group->flags &
- (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) &&
- block_group->alloc_offset > block_group->meta_write_pointer) {
+ if (is_metadata &&
+ block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
spin_unlock(&block_group->lock);
return -EAGAIN;
}
- spin_unlock(&block_group->lock);
-
- ret = btrfs_inc_block_group_ro(block_group, false);
- if (ret)
- return ret;
-
- /* Ensure all writes in this block group finish */
- btrfs_wait_block_group_reservations(block_group);
- /* No need to wait for NOCOW writers. Zoned mode does not allow that. */
- btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
- block_group->length);
-
- spin_lock(&block_group->lock);
/*
- * Bail out if someone already deactivated the block group, or
- * allocated space is left in the block group.
+ * If we are sure that the block group is full (= no more room left for
+ * new allocation) and the IO for the last usable block is completed, we
+ * don't need to wait for the other IOs. This holds because we ensure
+ * the sequential IO submissions using the ZONE_APPEND command for data
+ * and block_group->meta_write_pointer for metadata.
*/
- if (!block_group->zone_is_active) {
+ if (!fully_written) {
+ if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
+ spin_unlock(&block_group->lock);
+ return -EAGAIN;
+ }
spin_unlock(&block_group->lock);
- btrfs_dec_block_group_ro(block_group);
- return 0;
- }
- if (block_group->reserved) {
- spin_unlock(&block_group->lock);
- btrfs_dec_block_group_ro(block_group);
- return -EAGAIN;
+ ret = btrfs_inc_block_group_ro(block_group, false);
+ if (ret)
+ return ret;
+
+ /* Ensure all writes in this block group finish */
+ btrfs_wait_block_group_reservations(block_group);
+ /* No need to wait for NOCOW writers. Zoned mode does not allow that */
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group);
+ /* Wait for extent buffers to be written. */
+ if (is_metadata)
+ wait_eb_writebacks(block_group);
+
+ spin_lock(&block_group->lock);
+
+ /*
+ * Bail out if someone already deactivated the block group, or
+ * allocated space is left in the block group.
+ */
+ if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+ &block_group->runtime_flags)) {
+ spin_unlock(&block_group->lock);
+ btrfs_dec_block_group_ro(block_group);
+ return 0;
+ }
+
+ if (block_group->reserved ||
+ test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
+ &block_group->runtime_flags)) {
+ spin_unlock(&block_group->lock);
+ btrfs_dec_block_group_ro(block_group);
+ return -EAGAIN;
+ }
}
- block_group->zone_is_active = 0;
+ clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
block_group->alloc_offset = block_group->zone_capacity;
+ if (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM))
+ block_group->meta_write_pointer = block_group->start +
+ block_group->zone_capacity;
block_group->free_space_ctl->free_space = 0;
btrfs_clear_treelog_bg(block_group);
+ btrfs_clear_data_reloc_bg(block_group);
spin_unlock(&block_group->lock);
- ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
- physical >> SECTOR_SHIFT,
- device->zone_info->zone_size >> SECTOR_SHIFT,
- GFP_NOFS);
- btrfs_dec_block_group_ro(block_group);
+ down_read(&dev_replace->rwsem);
+ map = block_group->physical_map;
+ for (i = 0; i < map->num_stripes; i++) {
- if (!ret) {
- btrfs_dev_clear_active_zone(device, physical);
+ ret = call_zone_finish(block_group, &map->stripes[i]);
+ if (ret) {
+ up_read(&dev_replace->rwsem);
+ return ret;
+ }
+ }
+ up_read(&dev_replace->rwsem);
- spin_lock(&fs_info->zone_active_bgs_lock);
- ASSERT(!list_empty(&block_group->active_bg_list));
- list_del_init(&block_group->active_bg_list);
- spin_unlock(&fs_info->zone_active_bgs_lock);
+ if (!fully_written)
+ btrfs_dec_block_group_ro(block_group);
- /* For active_bg_list */
- btrfs_put_block_group(block_group);
- }
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ ASSERT(!list_empty(&block_group->active_bg_list));
+ list_del_init(&block_group->active_bg_list);
+ spin_unlock(&fs_info->zone_active_bgs_lock);
- return ret;
+ /* For active_bg_list */
+ btrfs_put_block_group(block_group);
+
+ clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
+
+ return 0;
}
-bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, int raid_index)
+int btrfs_zone_finish(struct btrfs_block_group *block_group)
{
+ if (!btrfs_is_zoned(block_group->fs_info))
+ return 0;
+
+ return do_zone_finish(block_group, false);
+}
+
+bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
+{
+ struct btrfs_fs_info *fs_info = fs_devices->fs_info;
struct btrfs_device *device;
bool ret = false;
- if (!btrfs_is_zoned(fs_devices->fs_info))
+ if (!btrfs_is_zoned(fs_info))
return true;
- /* Non-single profiles are not supported yet */
- if (raid_index != BTRFS_RAID_SINGLE)
+ if (test_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags))
return false;
/* Check if there is a device with active zones left */
- mutex_lock(&fs_devices->device_list_mutex);
- list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ mutex_lock(&fs_info->chunk_mutex);
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
struct btrfs_zoned_device_info *zinfo = device->zone_info;
+ int reserved = 0;
if (!device->bdev)
continue;
- if (!zinfo->max_active_zones ||
- atomic_read(&zinfo->active_zones_left)) {
+ if (!zinfo->max_active_zones) {
ret = true;
break;
}
+
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ reserved = zinfo->reserved_active_zones;
+
+ switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ case 0: /* single */
+ ret = (atomic_read(&zinfo->active_zones_left) >= (1 + reserved));
+ break;
+ case BTRFS_BLOCK_GROUP_DUP:
+ ret = (atomic_read(&zinfo->active_zones_left) >= (2 + reserved));
+ break;
+ }
+ if (ret)
+ break;
}
- mutex_unlock(&fs_devices->device_list_mutex);
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+ mutex_unlock(&fs_info->chunk_mutex);
+
+ if (!ret)
+ set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
return ret;
}
-void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
+int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
{
struct btrfs_block_group *block_group;
- struct map_lookup *map;
- struct btrfs_device *device;
- u64 physical;
+ u64 min_alloc_bytes;
if (!btrfs_is_zoned(fs_info))
- return;
+ return 0;
block_group = btrfs_lookup_block_group(fs_info, logical);
- ASSERT(block_group);
-
- if (logical + length < block_group->start + block_group->zone_capacity)
- goto out;
+ if (WARN_ON_ONCE(!block_group))
+ return -ENOENT;
- spin_lock(&block_group->lock);
+ /* No MIXED_BG on zoned btrfs. */
+ if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
+ min_alloc_bytes = fs_info->sectorsize;
+ else
+ min_alloc_bytes = fs_info->nodesize;
- if (!block_group->zone_is_active) {
- spin_unlock(&block_group->lock);
+ /* Bail out if we can allocate more data from this block group. */
+ if (logical + length + min_alloc_bytes <=
+ block_group->start + block_group->zone_capacity)
goto out;
- }
- block_group->zone_is_active = 0;
- /* We should have consumed all the free space */
- ASSERT(block_group->alloc_offset == block_group->zone_capacity);
- ASSERT(block_group->free_space_ctl->free_space == 0);
- btrfs_clear_treelog_bg(block_group);
- spin_unlock(&block_group->lock);
+ do_zone_finish(block_group, true);
- map = block_group->physical_map;
- device = map->stripes[0].dev;
- physical = map->stripes[0].physical;
+out:
+ btrfs_put_block_group(block_group);
+ return 0;
+}
- if (!device->zone_info->max_active_zones)
- goto out;
+static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
+{
+ int ret;
+ struct btrfs_block_group *bg =
+ container_of(work, struct btrfs_block_group, zone_finish_work);
- btrfs_dev_clear_active_zone(device, physical);
+ wait_on_extent_buffer_writeback(bg->last_eb);
+ free_extent_buffer(bg->last_eb);
+ ret = do_zone_finish(bg, true);
+ if (ret)
+ btrfs_handle_fs_error(bg->fs_info, ret,
+ "Failed to finish block-group's zone");
+ btrfs_put_block_group(bg);
+}
- spin_lock(&fs_info->zone_active_bgs_lock);
- ASSERT(!list_empty(&block_group->active_bg_list));
- list_del_init(&block_group->active_bg_list);
- spin_unlock(&fs_info->zone_active_bgs_lock);
+void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
+ struct extent_buffer *eb)
+{
+ if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
+ eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
+ return;
- btrfs_put_block_group(block_group);
+ if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
+ btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
+ bg->start);
+ return;
+ }
-out:
- btrfs_put_block_group(block_group);
+ /* For the work */
+ btrfs_get_block_group(bg);
+ refcount_inc(&eb->refs);
+ bg->last_eb = eb;
+ INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
+ queue_work(system_dfl_wq, &bg->zone_finish_work);
}
void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
@@ -1973,3 +2541,446 @@ void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
fs_info->data_reloc_bg = 0;
spin_unlock(&fs_info->relocation_bg_lock);
}
+
+void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
+ struct btrfs_space_info *space_info = data_sinfo;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_block_group *bg;
+ struct list_head *bg_list;
+ u64 alloc_flags;
+ bool first = true;
+ bool did_chunk_alloc = false;
+ int index;
+ int ret;
+
+ if (!btrfs_is_zoned(fs_info))
+ return;
+
+ if (fs_info->data_reloc_bg)
+ return;
+
+ if (sb_rdonly(fs_info->sb))
+ return;
+
+ alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags);
+ index = btrfs_bg_flags_to_raid_index(alloc_flags);
+
+ /* Scan the data space_info to find empty block groups. Take the second one. */
+again:
+ bg_list = &space_info->block_groups[index];
+ list_for_each_entry(bg, bg_list, list) {
+ if (bg->alloc_offset != 0)
+ continue;
+
+ if (first) {
+ first = false;
+ continue;
+ }
+
+ if (space_info == data_sinfo) {
+ /* Migrate the block group to the data relocation space_info. */
+ struct btrfs_space_info *reloc_sinfo = data_sinfo->sub_group[0];
+ int factor;
+
+ ASSERT(reloc_sinfo->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC,
+ "reloc_sinfo->subgroup_id=%d", reloc_sinfo->subgroup_id);
+ factor = btrfs_bg_type_to_factor(bg->flags);
+
+ down_write(&space_info->groups_sem);
+ list_del_init(&bg->list);
+ /* We can assume this as we choose the second empty one. */
+ ASSERT(!list_empty(&space_info->block_groups[index]));
+ up_write(&space_info->groups_sem);
+
+ spin_lock(&space_info->lock);
+ space_info->total_bytes -= bg->length;
+ space_info->disk_total -= bg->length * factor;
+ space_info->disk_total -= bg->zone_unusable;
+ /* There is no allocation ever happened. */
+ ASSERT(bg->used == 0, "bg->used=%llu", bg->used);
+ /* No super block in a block group on the zoned setup. */
+ ASSERT(bg->bytes_super == 0, "bg->bytes_super=%llu", bg->bytes_super);
+ spin_unlock(&space_info->lock);
+
+ bg->space_info = reloc_sinfo;
+ if (reloc_sinfo->block_group_kobjs[index] == NULL)
+ btrfs_sysfs_add_block_group_type(bg);
+
+ btrfs_add_bg_to_space_info(fs_info, bg);
+ }
+
+ fs_info->data_reloc_bg = bg->start;
+ set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &bg->runtime_flags);
+ btrfs_zone_activate(bg);
+
+ return;
+ }
+
+ if (did_chunk_alloc)
+ return;
+
+ trans = btrfs_join_transaction(fs_info->tree_root);
+ if (IS_ERR(trans))
+ return;
+
+ /* Allocate new BG in the data relocation space_info. */
+ space_info = data_sinfo->sub_group[0];
+ ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC,
+ "space_info->subgroup_id=%d", space_info->subgroup_id);
+ ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE);
+ btrfs_end_transaction(trans);
+ if (ret == 1) {
+ /*
+ * We allocated a new block group in the data relocation space_info. We
+ * can take that one.
+ */
+ first = false;
+ did_chunk_alloc = true;
+ goto again;
+ }
+}
+
+void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct btrfs_device *device;
+
+ if (!btrfs_is_zoned(fs_info))
+ return;
+
+ mutex_lock(&fs_devices->device_list_mutex);
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ if (device->zone_info) {
+ vfree(device->zone_info->zone_cache);
+ device->zone_info->zone_cache = NULL;
+ }
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
+}
+
+bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct btrfs_device *device;
+ u64 total = btrfs_super_total_bytes(fs_info->super_copy);
+ u64 used = 0;
+ u64 factor;
+
+ ASSERT(btrfs_is_zoned(fs_info));
+
+ if (fs_info->bg_reclaim_threshold == 0)
+ return false;
+
+ mutex_lock(&fs_devices->device_list_mutex);
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ if (!device->bdev)
+ continue;
+
+ used += device->bytes_used;
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
+
+ factor = div64_u64(used * 100, total);
+ return factor >= fs_info->bg_reclaim_threshold;
+}
+
+void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
+ u64 length)
+{
+ struct btrfs_block_group *block_group;
+
+ if (!btrfs_is_zoned(fs_info))
+ return;
+
+ block_group = btrfs_lookup_block_group(fs_info, logical);
+ /* It should be called on a previous data relocation block group. */
+ ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
+
+ spin_lock(&block_group->lock);
+ if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
+ goto out;
+
+ /* All relocation extents are written. */
+ if (block_group->start + block_group->alloc_offset == logical + length) {
+ /*
+ * Now, release this block group for further allocations and
+ * zone finish.
+ */
+ clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
+ &block_group->runtime_flags);
+ }
+
+out:
+ spin_unlock(&block_group->lock);
+ btrfs_put_block_group(block_group);
+}
+
+int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_block_group *block_group;
+ struct btrfs_block_group *min_bg = NULL;
+ u64 min_avail = U64_MAX;
+ int ret;
+
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ list_for_each_entry(block_group, &fs_info->zone_active_bgs,
+ active_bg_list) {
+ u64 avail;
+
+ spin_lock(&block_group->lock);
+ if (block_group->reserved || block_group->alloc_offset == 0 ||
+ !(block_group->flags & BTRFS_BLOCK_GROUP_DATA) ||
+ test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
+ spin_unlock(&block_group->lock);
+ continue;
+ }
+
+ avail = block_group->zone_capacity - block_group->alloc_offset;
+ if (min_avail > avail) {
+ if (min_bg)
+ btrfs_put_block_group(min_bg);
+ min_bg = block_group;
+ min_avail = avail;
+ btrfs_get_block_group(min_bg);
+ }
+ spin_unlock(&block_group->lock);
+ }
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+
+ if (!min_bg)
+ return 0;
+
+ ret = btrfs_zone_finish(min_bg);
+ btrfs_put_block_group(min_bg);
+
+ return ret < 0 ? ret : 1;
+}
+
+int btrfs_zoned_activate_one_bg(struct btrfs_space_info *space_info, bool do_finish)
+{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ struct btrfs_block_group *bg;
+ int index;
+
+ if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
+ return 0;
+
+ for (;;) {
+ int ret;
+ bool need_finish = false;
+
+ down_read(&space_info->groups_sem);
+ for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
+ list_for_each_entry(bg, &space_info->block_groups[index],
+ list) {
+ if (!spin_trylock(&bg->lock))
+ continue;
+ if (btrfs_zoned_bg_is_full(bg) ||
+ test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+ &bg->runtime_flags)) {
+ spin_unlock(&bg->lock);
+ continue;
+ }
+ spin_unlock(&bg->lock);
+
+ if (btrfs_zone_activate(bg)) {
+ up_read(&space_info->groups_sem);
+ return 1;
+ }
+
+ need_finish = true;
+ }
+ }
+ up_read(&space_info->groups_sem);
+
+ if (!do_finish || !need_finish)
+ break;
+
+ ret = btrfs_zone_finish_one_bg(fs_info);
+ if (ret == 0)
+ break;
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Reserve zones for one metadata block group, one tree-log block group, and one
+ * system block group.
+ */
+void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct btrfs_block_group *block_group;
+ struct btrfs_device *device;
+ /* Reserve zones for normal SINGLE metadata and tree-log block group. */
+ unsigned int metadata_reserve = 2;
+ /* Reserve a zone for SINGLE system block group. */
+ unsigned int system_reserve = 1;
+
+ if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
+ return;
+
+ /*
+ * This function is called from the mount context. So, there is no
+ * parallel process touching the bits. No need for read_seqretry().
+ */
+ if (fs_info->avail_metadata_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
+ metadata_reserve = 4;
+ if (fs_info->avail_system_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
+ system_reserve = 2;
+
+ /* Apply the reservation on all the devices. */
+ mutex_lock(&fs_devices->device_list_mutex);
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ if (!device->bdev)
+ continue;
+
+ device->zone_info->reserved_active_zones =
+ metadata_reserve + system_reserve;
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
+
+ /* Release reservation for currently active block groups. */
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
+ struct btrfs_chunk_map *map = block_group->physical_map;
+
+ if (!(block_group->flags &
+ (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
+ continue;
+
+ for (int i = 0; i < map->num_stripes; i++)
+ map->stripes[i].dev->zone_info->reserved_active_zones--;
+ }
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+}
+
+/*
+ * Reset the zones of unused block groups from @space_info->bytes_zone_unusable.
+ *
+ * @space_info: the space to work on
+ * @num_bytes: targeting reclaim bytes
+ *
+ * This one resets the zones of a block group, so we can reuse the region
+ * without removing the block group. On the other hand, btrfs_delete_unused_bgs()
+ * just removes a block group and frees up the underlying zones. So, we still
+ * need to allocate a new block group to reuse the zones.
+ *
+ * Resetting is faster than deleting/recreating a block group. It is similar
+ * to freeing the logical space on the regular mode. However, we cannot change
+ * the block group's profile with this operation.
+ */
+int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info, u64 num_bytes)
+{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ const sector_t zone_size_sectors = fs_info->zone_size >> SECTOR_SHIFT;
+
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
+
+ while (num_bytes > 0) {
+ struct btrfs_chunk_map *map;
+ struct btrfs_block_group *bg = NULL;
+ bool found = false;
+ u64 reclaimed = 0;
+
+ /*
+ * Here, we choose a fully zone_unusable block group. It's
+ * technically possible to reset a partly zone_unusable block
+ * group, which still has some free space left. However,
+ * handling that needs to cope with the allocation side, which
+ * makes the logic more complex. So, let's handle the easy case
+ * for now.
+ */
+ spin_lock(&fs_info->unused_bgs_lock);
+ list_for_each_entry(bg, &fs_info->unused_bgs, bg_list) {
+ if ((bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != space_info->flags)
+ continue;
+
+ /*
+ * Use trylock to avoid locking order violation. In
+ * btrfs_reclaim_bgs_work(), the lock order is
+ * &bg->lock -> &fs_info->unused_bgs_lock. We skip a
+ * block group if we cannot take its lock.
+ */
+ if (!spin_trylock(&bg->lock))
+ continue;
+ if (btrfs_is_block_group_used(bg) || bg->zone_unusable < bg->length) {
+ spin_unlock(&bg->lock);
+ continue;
+ }
+ spin_unlock(&bg->lock);
+ found = true;
+ break;
+ }
+ if (!found) {
+ spin_unlock(&fs_info->unused_bgs_lock);
+ return 0;
+ }
+
+ list_del_init(&bg->bg_list);
+ btrfs_put_block_group(bg);
+ spin_unlock(&fs_info->unused_bgs_lock);
+
+ /*
+ * Since the block group is fully zone_unusable and we cannot
+ * allocate from this block group anymore, we don't need to set
+ * this block group read-only.
+ */
+
+ down_read(&fs_info->dev_replace.rwsem);
+ map = bg->physical_map;
+ for (int i = 0; i < map->num_stripes; i++) {
+ struct btrfs_io_stripe *stripe = &map->stripes[i];
+ unsigned int nofs_flags;
+ int ret;
+
+ nofs_flags = memalloc_nofs_save();
+ ret = blkdev_zone_mgmt(stripe->dev->bdev, REQ_OP_ZONE_RESET,
+ stripe->physical >> SECTOR_SHIFT,
+ zone_size_sectors);
+ memalloc_nofs_restore(nofs_flags);
+
+ if (ret) {
+ up_read(&fs_info->dev_replace.rwsem);
+ return ret;
+ }
+ }
+ up_read(&fs_info->dev_replace.rwsem);
+
+ spin_lock(&space_info->lock);
+ spin_lock(&bg->lock);
+ ASSERT(!btrfs_is_block_group_used(bg));
+ if (bg->ro) {
+ spin_unlock(&bg->lock);
+ spin_unlock(&space_info->lock);
+ continue;
+ }
+
+ reclaimed = bg->alloc_offset;
+ bg->zone_unusable = bg->length - bg->zone_capacity;
+ bg->alloc_offset = 0;
+ /*
+ * This holds because we currently reset fully used then freed
+ * block group.
+ */
+ ASSERT(reclaimed == bg->zone_capacity,
+ "reclaimed=%llu bg->zone_capacity=%llu", reclaimed, bg->zone_capacity);
+ bg->free_space_ctl->free_space += reclaimed;
+ space_info->bytes_zone_unusable -= reclaimed;
+ spin_unlock(&bg->lock);
+ btrfs_return_free_space(space_info, reclaimed);
+ spin_unlock(&space_info->lock);
+
+ if (num_bytes <= reclaimed)
+ break;
+ num_bytes -= reclaimed;
+ }
+
+ return 0;
+}
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index e53ab7b96437..5cefdeb08b7b 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -4,16 +4,28 @@
#define BTRFS_ZONED_H
#include <linux/types.h>
+#include <linux/atomic.h>
#include <linux/blkdev.h>
+#include <linux/blkzoned.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include "messages.h"
#include "volumes.h"
#include "disk-io.h"
#include "block-group.h"
+#include "btrfs_inode.h"
-/*
- * Block groups with more than this value (percents) of unusable space will be
- * scheduled for background reclaim.
- */
-#define BTRFS_DEFAULT_RECLAIM_THRESH 75
+struct block_device;
+struct extent_buffer;
+struct btrfs_bio;
+struct btrfs_ordered_extent;
+struct btrfs_fs_info;
+struct btrfs_space_info;
+struct btrfs_eb_write_context;
+struct btrfs_fs_devices;
+
+#define BTRFS_DEFAULT_RECLAIM_THRESH (75)
struct btrfs_zoned_device_info {
/*
@@ -24,21 +36,29 @@ struct btrfs_zoned_device_info {
u8 zone_size_shift;
u32 nr_zones;
unsigned int max_active_zones;
+ /*
+ * Reserved active zones for one metadata and one system block group.
+ * It can vary per-device depending on the allocation status.
+ */
+ int reserved_active_zones;
atomic_t active_zones_left;
unsigned long *seq_zones;
unsigned long *empty_zones;
unsigned long *active_zones;
+ struct blk_zone *zone_cache;
struct blk_zone sb_zones[2 * BTRFS_SUPER_MIRROR_MAX];
};
+void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered);
+
#ifdef CONFIG_BLK_DEV_ZONED
-int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
- struct blk_zone *zone);
int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info);
-int btrfs_get_dev_zone_info(struct btrfs_device *device);
+int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache);
void btrfs_destroy_dev_zone_info(struct btrfs_device *device);
+struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev);
int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info);
-int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info);
+int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info,
+ unsigned long long *mount_opt);
int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
u64 *bytenr_ret);
int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
@@ -52,49 +72,55 @@ int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size);
int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new);
void btrfs_calc_zone_unusable(struct btrfs_block_group *cache);
-void btrfs_redirty_list_add(struct btrfs_transaction *trans,
- struct extent_buffer *eb);
-void btrfs_free_redirty_list(struct btrfs_transaction *trans);
-bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start);
-void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
- struct bio *bio);
-void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered);
-bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb,
- struct btrfs_block_group **cache_ret);
-void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
- struct extent_buffer *eb);
+bool btrfs_use_zone_append(struct btrfs_bio *bbio);
+void btrfs_record_physical_zoned(struct btrfs_bio *bbio);
+int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+ struct btrfs_eb_write_context *ctx);
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length);
int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
u64 physical_start, u64 physical_pos);
-struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
- u64 logical, u64 length);
bool btrfs_zone_activate(struct btrfs_block_group *block_group);
int btrfs_zone_finish(struct btrfs_block_group *block_group);
-bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices,
- int raid_index);
-void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical,
+bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags);
+int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical,
u64 length);
+void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
+ struct extent_buffer *eb);
void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg);
+void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info);
+void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info);
+bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info);
+void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
+ u64 length);
+int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
+int btrfs_zoned_activate_one_bg(struct btrfs_space_info *space_info, bool do_finish);
+void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info);
+int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info, u64 num_bytes);
#else /* CONFIG_BLK_DEV_ZONED */
-static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
- struct blk_zone *zone)
-{
- return 0;
-}
static inline int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
{
return 0;
}
-static inline int btrfs_get_dev_zone_info(struct btrfs_device *device)
+static inline int btrfs_get_dev_zone_info(struct btrfs_device *device,
+ bool populate_cache)
{
return 0;
}
static inline void btrfs_destroy_dev_zone_info(struct btrfs_device *device) { }
+/*
+ * In case the kernel is compiled without CONFIG_BLK_DEV_ZONED we'll never call
+ * into btrfs_clone_dev_zone_info() so it's safe to return NULL here.
+ */
+static inline struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(
+ struct btrfs_device *orig_dev)
+{
+ return NULL;
+}
+
static inline int btrfs_check_zoned_mode(const struct btrfs_fs_info *fs_info)
{
if (!btrfs_is_zoned(fs_info))
@@ -104,7 +130,8 @@ static inline int btrfs_check_zoned_mode(const struct btrfs_fs_info *fs_info)
return -EOPNOTSUPP;
}
-static inline int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
+static inline int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info,
+ unsigned long long *mount_opt)
{
return 0;
}
@@ -161,34 +188,19 @@ static inline int btrfs_load_block_group_zone_info(
static inline void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) { }
-static inline void btrfs_redirty_list_add(struct btrfs_transaction *trans,
- struct extent_buffer *eb) { }
-static inline void btrfs_free_redirty_list(struct btrfs_transaction *trans) { }
-
-static inline bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
+static inline bool btrfs_use_zone_append(struct btrfs_bio *bbio)
{
return false;
}
-static inline void btrfs_record_physical_zoned(struct inode *inode,
- u64 file_offset, struct bio *bio)
+static inline void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
{
}
-static inline void btrfs_rewrite_logical_zoned(
- struct btrfs_ordered_extent *ordered) { }
-
-static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb,
- struct btrfs_block_group **cache_ret)
-{
- return true;
-}
-
-static inline void btrfs_revert_meta_write_pointer(
- struct btrfs_block_group *cache,
- struct extent_buffer *eb)
+static inline int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+ struct btrfs_eb_write_context *ctx)
{
+ return 0;
}
static inline int btrfs_zoned_issue_zeroout(struct btrfs_device *device,
@@ -204,13 +216,6 @@ static inline int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev,
return -EOPNOTSUPP;
}
-static inline struct btrfs_device *btrfs_zoned_get_device(
- struct btrfs_fs_info *fs_info,
- u64 logical, u64 length)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
static inline bool btrfs_zone_activate(struct btrfs_block_group *block_group)
{
return true;
@@ -222,16 +227,54 @@ static inline int btrfs_zone_finish(struct btrfs_block_group *block_group)
}
static inline bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices,
- int raid_index)
+ u64 flags)
{
return true;
}
-static inline void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info,
- u64 logical, u64 length) { }
+static inline int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length)
+{
+ return 0;
+}
+
+static inline void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
+ struct extent_buffer *eb) { }
static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { }
+static inline void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info) { }
+
+static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { }
+
+static inline bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info)
+{
+ return false;
+}
+
+static inline void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length) { }
+
+static inline int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
+{
+ return 1;
+}
+
+static inline int btrfs_zoned_activate_one_bg(struct btrfs_space_info *space_info,
+ bool do_finish)
+{
+ /* Consider all the block groups are active */
+ return 0;
+}
+
+static inline void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) { }
+
+static inline int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info,
+ u64 num_bytes)
+{
+ return 0;
+}
+
#endif
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
@@ -295,8 +338,8 @@ static inline bool btrfs_check_device_zone_type(const struct btrfs_fs_info *fs_i
(bdev_zone_sectors(bdev) << SECTOR_SHIFT);
}
- /* Do not allow Host Manged zoned device */
- return bdev_zoned_model(bdev) != BLK_ZONED_HM;
+ /* Do not allow Host Managed zoned device. */
+ return !bdev_is_zoned(bdev);
}
static inline bool btrfs_check_super_location(struct btrfs_device *device, u64 pos)
@@ -350,4 +393,26 @@ static inline void btrfs_clear_treelog_bg(struct btrfs_block_group *bg)
spin_unlock(&fs_info->treelog_bg_lock);
}
+static inline void btrfs_zoned_data_reloc_lock(struct btrfs_inode *inode)
+{
+ struct btrfs_root *root = inode->root;
+
+ if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info))
+ mutex_lock(&root->fs_info->zoned_data_reloc_io_lock);
+}
+
+static inline void btrfs_zoned_data_reloc_unlock(struct btrfs_inode *inode)
+{
+ struct btrfs_root *root = inode->root;
+
+ if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info))
+ mutex_unlock(&root->fs_info->zoned_data_reloc_io_lock);
+}
+
+static inline bool btrfs_zoned_bg_is_full(const struct btrfs_block_group *bg)
+{
+ ASSERT(btrfs_is_zoned(bg->fs_info));
+ return (bg->alloc_offset == bg->zone_capacity);
+}
+
#endif
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index f06b68040352..c9cddcfa337b 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -18,20 +18,23 @@
#include <linux/slab.h>
#include <linux/zstd.h>
#include "misc.h"
+#include "fs.h"
+#include "btrfs_inode.h"
#include "compression.h"
-#include "ctree.h"
+#include "super.h"
#define ZSTD_BTRFS_MAX_WINDOWLOG 17
-#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
+#define ZSTD_BTRFS_MAX_INPUT (1U << ZSTD_BTRFS_MAX_WINDOWLOG)
#define ZSTD_BTRFS_DEFAULT_LEVEL 3
+#define ZSTD_BTRFS_MIN_LEVEL -15
#define ZSTD_BTRFS_MAX_LEVEL 15
/* 307s to avoid pathologically clashing with transaction commit */
#define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
-static ZSTD_parameters zstd_get_btrfs_parameters(unsigned int level,
+static zstd_parameters zstd_get_btrfs_parameters(int level,
size_t src_len)
{
- ZSTD_parameters params = ZSTD_getParams(level, src_len, 0);
+ zstd_parameters params = zstd_get_params(level, src_len);
if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
@@ -43,13 +46,14 @@ struct workspace {
void *mem;
size_t size;
char *buf;
- unsigned int level;
- unsigned int req_level;
+ int level;
+ int req_level;
unsigned long last_used; /* jiffies */
struct list_head list;
struct list_head lru_list;
- ZSTD_inBuffer in_buf;
- ZSTD_outBuffer out_buf;
+ zstd_in_buffer in_buf;
+ zstd_out_buffer out_buf;
+ zstd_parameters params;
};
/*
@@ -73,7 +77,6 @@ struct workspace {
*/
struct zstd_workspace_manager {
- const struct btrfs_compress_op *ops;
spinlock_t lock;
struct list_head lru_list;
struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL];
@@ -82,8 +85,6 @@ struct zstd_workspace_manager {
struct timer_list timer;
};
-static struct zstd_workspace_manager wsm;
-
static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL];
static inline struct workspace *list_to_workspace(struct list_head *list)
@@ -91,31 +92,39 @@ static inline struct workspace *list_to_workspace(struct list_head *list)
return container_of(list, struct workspace, list);
}
-void zstd_free_workspace(struct list_head *ws);
-struct list_head *zstd_alloc_workspace(unsigned int level);
+static inline int clip_level(int level)
+{
+ return max(0, level - 1);
+}
+
/*
- * zstd_reclaim_timer_fn - reclaim timer
+ * Timer callback to free unused workspaces.
+ *
* @t: timer
*
* This scans the lru_list and attempts to reclaim any workspace that hasn't
* been used for ZSTD_BTRFS_RECLAIM_JIFFIES.
+ *
+ * The context is softirq and does not need the _bh locking primitives.
*/
static void zstd_reclaim_timer_fn(struct timer_list *timer)
{
+ struct zstd_workspace_manager *zwsm =
+ container_of(timer, struct zstd_workspace_manager, timer);
unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES;
struct list_head *pos, *next;
- spin_lock_bh(&wsm.lock);
+ spin_lock(&zwsm->lock);
- if (list_empty(&wsm.lru_list)) {
- spin_unlock_bh(&wsm.lock);
+ if (list_empty(&zwsm->lru_list)) {
+ spin_unlock(&zwsm->lock);
return;
}
- list_for_each_prev_safe(pos, next, &wsm.lru_list) {
+ list_for_each_prev_safe(pos, next, &zwsm->lru_list) {
struct workspace *victim = container_of(pos, struct workspace,
lru_list);
- unsigned int level;
+ int level;
if (time_after(victim->last_used, reclaim_threshold))
break;
@@ -129,19 +138,19 @@ static void zstd_reclaim_timer_fn(struct timer_list *timer)
list_del(&victim->list);
zstd_free_workspace(&victim->list);
- if (list_empty(&wsm.idle_ws[level - 1]))
- clear_bit(level - 1, &wsm.active_map);
+ if (list_empty(&zwsm->idle_ws[level]))
+ clear_bit(level, &zwsm->active_map);
}
- if (!list_empty(&wsm.lru_list))
- mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
+ if (!list_empty(&zwsm->lru_list))
+ mod_timer(&zwsm->timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
- spin_unlock_bh(&wsm.lock);
+ spin_unlock(&zwsm->lock);
}
/*
- * zstd_calc_ws_mem_sizes - calculate monotonic memory bounds
+ * Calculate monotonic memory bounds.
*
* It is possible based on the level configurations that a higher level
* workspace uses less memory than a lower level workspace. In order to reuse
@@ -152,69 +161,79 @@ static void zstd_reclaim_timer_fn(struct timer_list *timer)
static void zstd_calc_ws_mem_sizes(void)
{
size_t max_size = 0;
- unsigned int level;
+ int level;
- for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
- ZSTD_parameters params =
+ for (level = ZSTD_BTRFS_MIN_LEVEL; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
+ if (level == 0)
+ continue;
+ zstd_parameters params =
zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
size_t level_size =
max_t(size_t,
- ZSTD_CStreamWorkspaceBound(params.cParams),
- ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
+ zstd_cstream_workspace_bound(&params.cParams),
+ zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
max_size = max_t(size_t, max_size, level_size);
- zstd_ws_mem_sizes[level - 1] = max_size;
+ /* Use level 1 workspace size for all the fast mode negative levels. */
+ zstd_ws_mem_sizes[clip_level(level)] = max_size;
}
}
-void zstd_init_workspace_manager(void)
+int zstd_alloc_workspace_manager(struct btrfs_fs_info *fs_info)
{
+ struct zstd_workspace_manager *zwsm;
struct list_head *ws;
- int i;
+ ASSERT(fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD] == NULL);
+ zwsm = kzalloc(sizeof(*zwsm), GFP_KERNEL);
+ if (!zwsm)
+ return -ENOMEM;
zstd_calc_ws_mem_sizes();
+ spin_lock_init(&zwsm->lock);
+ init_waitqueue_head(&zwsm->wait);
+ timer_setup(&zwsm->timer, zstd_reclaim_timer_fn, 0);
- wsm.ops = &btrfs_zstd_compress;
- spin_lock_init(&wsm.lock);
- init_waitqueue_head(&wsm.wait);
- timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0);
-
- INIT_LIST_HEAD(&wsm.lru_list);
- for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
- INIT_LIST_HEAD(&wsm.idle_ws[i]);
+ INIT_LIST_HEAD(&zwsm->lru_list);
+ for (int i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
+ INIT_LIST_HEAD(&zwsm->idle_ws[i]);
+ fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD] = zwsm;
- ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL);
+ ws = zstd_alloc_workspace(fs_info, ZSTD_BTRFS_MAX_LEVEL);
if (IS_ERR(ws)) {
- pr_warn(
- "BTRFS: cannot preallocate zstd compression workspace\n");
+ btrfs_warn(NULL, "cannot preallocate zstd compression workspace");
} else {
- set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map);
- list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
+ set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &zwsm->active_map);
+ list_add(ws, &zwsm->idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
}
+ return 0;
}
-void zstd_cleanup_workspace_manager(void)
+void zstd_free_workspace_manager(struct btrfs_fs_info *fs_info)
{
+ struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
struct workspace *workspace;
- int i;
- spin_lock_bh(&wsm.lock);
- for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
- while (!list_empty(&wsm.idle_ws[i])) {
- workspace = container_of(wsm.idle_ws[i].next,
+ if (!zwsm)
+ return;
+ fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD] = NULL;
+ spin_lock_bh(&zwsm->lock);
+ for (int i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
+ while (!list_empty(&zwsm->idle_ws[i])) {
+ workspace = container_of(zwsm->idle_ws[i].next,
struct workspace, list);
list_del(&workspace->list);
list_del(&workspace->lru_list);
zstd_free_workspace(&workspace->list);
}
}
- spin_unlock_bh(&wsm.lock);
-
- del_timer_sync(&wsm.timer);
+ spin_unlock_bh(&zwsm->lock);
+ timer_delete_sync(&zwsm->timer);
+ kfree(zwsm);
}
/*
- * zstd_find_workspace - find workspace
+ * Find workspace for given level.
+ *
* @level: compression level
*
* This iterates over the set bits in the active_map beginning at the requested
@@ -224,35 +243,38 @@ void zstd_cleanup_workspace_manager(void)
* offer the opportunity to reclaim the workspace in favor of allocating an
* appropriately sized one in the future.
*/
-static struct list_head *zstd_find_workspace(unsigned int level)
+static struct list_head *zstd_find_workspace(struct btrfs_fs_info *fs_info, int level)
{
+ struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
struct list_head *ws;
struct workspace *workspace;
- int i = level - 1;
+ int i = clip_level(level);
- spin_lock_bh(&wsm.lock);
- for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) {
- if (!list_empty(&wsm.idle_ws[i])) {
- ws = wsm.idle_ws[i].next;
+ ASSERT(zwsm);
+ spin_lock_bh(&zwsm->lock);
+ for_each_set_bit_from(i, &zwsm->active_map, ZSTD_BTRFS_MAX_LEVEL) {
+ if (!list_empty(&zwsm->idle_ws[i])) {
+ ws = zwsm->idle_ws[i].next;
workspace = list_to_workspace(ws);
list_del_init(ws);
/* keep its place if it's a lower level using this */
workspace->req_level = level;
- if (level == workspace->level)
+ if (clip_level(level) == workspace->level)
list_del(&workspace->lru_list);
- if (list_empty(&wsm.idle_ws[i]))
- clear_bit(i, &wsm.active_map);
- spin_unlock_bh(&wsm.lock);
+ if (list_empty(&zwsm->idle_ws[i]))
+ clear_bit(i, &zwsm->active_map);
+ spin_unlock_bh(&zwsm->lock);
return ws;
}
}
- spin_unlock_bh(&wsm.lock);
+ spin_unlock_bh(&zwsm->lock);
return NULL;
}
/*
- * zstd_get_workspace - zstd's get_workspace
+ * Zstd get_workspace for level.
+ *
* @level: compression level
*
* If @level is 0, then any compression level can be used. Therefore, we begin
@@ -260,30 +282,33 @@ static struct list_head *zstd_find_workspace(unsigned int level)
* attempt to allocate a new workspace. If we fail to allocate one due to
* memory pressure, go to sleep waiting for the max level workspace to free up.
*/
-struct list_head *zstd_get_workspace(unsigned int level)
+struct list_head *zstd_get_workspace(struct btrfs_fs_info *fs_info, int level)
{
+ struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
struct list_head *ws;
unsigned int nofs_flag;
+ ASSERT(zwsm);
+
/* level == 0 means we can use any workspace */
if (!level)
level = 1;
again:
- ws = zstd_find_workspace(level);
+ ws = zstd_find_workspace(fs_info, level);
if (ws)
return ws;
nofs_flag = memalloc_nofs_save();
- ws = zstd_alloc_workspace(level);
+ ws = zstd_alloc_workspace(fs_info, level);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(ws)) {
DEFINE_WAIT(wait);
- prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&zwsm->wait, &wait, TASK_UNINTERRUPTIBLE);
schedule();
- finish_wait(&wsm.wait, &wait);
+ finish_wait(&zwsm->wait, &wait);
goto again;
}
@@ -292,7 +317,8 @@ again:
}
/*
- * zstd_put_workspace - zstd put_workspace
+ * Zstd put_workspace.
+ *
* @ws: list_head for the workspace
*
* When putting back a workspace, we only need to update the LRU if we are of
@@ -301,34 +327,36 @@ again:
* isn't set, it is also set here. Only the max level workspace tries and wakes
* up waiting workspaces.
*/
-void zstd_put_workspace(struct list_head *ws)
+void zstd_put_workspace(struct btrfs_fs_info *fs_info, struct list_head *ws)
{
+ struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
struct workspace *workspace = list_to_workspace(ws);
- spin_lock_bh(&wsm.lock);
+ ASSERT(zwsm);
+ spin_lock_bh(&zwsm->lock);
/* A node is only taken off the lru if we are the corresponding level */
- if (workspace->req_level == workspace->level) {
+ if (clip_level(workspace->req_level) == workspace->level) {
/* Hide a max level workspace from reclaim */
- if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
+ if (list_empty(&zwsm->idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
INIT_LIST_HEAD(&workspace->lru_list);
} else {
workspace->last_used = jiffies;
- list_add(&workspace->lru_list, &wsm.lru_list);
- if (!timer_pending(&wsm.timer))
- mod_timer(&wsm.timer,
+ list_add(&workspace->lru_list, &zwsm->lru_list);
+ if (!timer_pending(&zwsm->timer))
+ mod_timer(&zwsm->timer,
jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
}
}
- set_bit(workspace->level - 1, &wsm.active_map);
- list_add(&workspace->list, &wsm.idle_ws[workspace->level - 1]);
+ set_bit(workspace->level, &zwsm->active_map);
+ list_add(&workspace->list, &zwsm->idle_ws[workspace->level]);
workspace->req_level = 0;
- spin_unlock_bh(&wsm.lock);
+ spin_unlock_bh(&zwsm->lock);
- if (workspace->level == ZSTD_BTRFS_MAX_LEVEL)
- cond_wake_up(&wsm.wait);
+ if (workspace->level == clip_level(ZSTD_BTRFS_MAX_LEVEL))
+ cond_wake_up(&zwsm->wait);
}
void zstd_free_workspace(struct list_head *ws)
@@ -340,20 +368,22 @@ void zstd_free_workspace(struct list_head *ws)
kfree(workspace);
}
-struct list_head *zstd_alloc_workspace(unsigned int level)
+struct list_head *zstd_alloc_workspace(struct btrfs_fs_info *fs_info, int level)
{
+ const u32 blocksize = fs_info->sectorsize;
struct workspace *workspace;
workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
if (!workspace)
return ERR_PTR(-ENOMEM);
- workspace->size = zstd_ws_mem_sizes[level - 1];
- workspace->level = level;
+ /* Use level 1 workspace size for all the fast mode negative levels. */
+ workspace->size = zstd_ws_mem_sizes[clip_level(level)];
+ workspace->level = clip_level(level);
workspace->req_level = level;
workspace->last_used = jiffies;
- workspace->mem = kvmalloc(workspace->size, GFP_KERNEL);
- workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ workspace->mem = kvmalloc(workspace->size, GFP_KERNEL | __GFP_NOWARN);
+ workspace->buf = kmalloc(blocksize, GFP_KERNEL);
if (!workspace->mem || !workspace->buf)
goto fail;
@@ -366,69 +396,82 @@ fail:
return ERR_PTR(-ENOMEM);
}
-int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
- u64 start, struct page **pages, unsigned long *out_pages,
- unsigned long *total_in, unsigned long *total_out)
+int zstd_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
+ u64 start, struct folio **folios, unsigned long *out_folios,
+ unsigned long *total_in, unsigned long *total_out)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct workspace *workspace = list_entry(ws, struct workspace, list);
- ZSTD_CStream *stream;
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ zstd_cstream *stream;
int ret = 0;
- int nr_pages = 0;
- struct page *in_page = NULL; /* The current page to read */
- struct page *out_page = NULL; /* The current page to write to */
+ int nr_folios = 0;
+ struct folio *in_folio = NULL; /* The current folio to read. */
+ struct folio *out_folio = NULL; /* The current folio to write to. */
unsigned long tot_in = 0;
unsigned long tot_out = 0;
unsigned long len = *total_out;
- const unsigned long nr_dest_pages = *out_pages;
- unsigned long max_out = nr_dest_pages * PAGE_SIZE;
- ZSTD_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
- len);
-
- *out_pages = 0;
+ const unsigned long nr_dest_folios = *out_folios;
+ const u64 orig_end = start + len;
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
+ unsigned long max_out = nr_dest_folios * min_folio_size;
+ unsigned int cur_len;
+
+ workspace->params = zstd_get_btrfs_parameters(workspace->req_level, len);
+ *out_folios = 0;
*total_out = 0;
*total_in = 0;
/* Initialize the stream */
- stream = ZSTD_initCStream(params, len, workspace->mem,
+ stream = zstd_init_cstream(&workspace->params, len, workspace->mem,
workspace->size);
- if (!stream) {
- pr_warn("BTRFS: ZSTD_initCStream failed\n");
+ if (unlikely(!stream)) {
+ btrfs_err(fs_info,
+ "zstd compression init level %d failed, root %llu inode %llu offset %llu",
+ workspace->req_level, btrfs_root_id(inode->root),
+ btrfs_ino(inode), start);
ret = -EIO;
goto out;
}
/* map in the first page of input data */
- in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- workspace->in_buf.src = kmap(in_page);
+ ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
+ if (ret < 0)
+ goto out;
+ cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
+ workspace->in_buf.src = kmap_local_folio(in_folio, offset_in_folio(in_folio, start));
workspace->in_buf.pos = 0;
- workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
-
+ workspace->in_buf.size = cur_len;
/* Allocate and map in the output buffer */
- out_page = alloc_page(GFP_NOFS);
- if (out_page == NULL) {
+ out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
}
- pages[nr_pages++] = out_page;
- workspace->out_buf.dst = kmap(out_page);
+ folios[nr_folios++] = out_folio;
+ workspace->out_buf.dst = folio_address(out_folio);
workspace->out_buf.pos = 0;
- workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ workspace->out_buf.size = min_t(size_t, max_out, min_folio_size);
while (1) {
size_t ret2;
- ret2 = ZSTD_compressStream(stream, &workspace->out_buf,
+ ret2 = zstd_compress_stream(stream, &workspace->out_buf,
&workspace->in_buf);
- if (ZSTD_isError(ret2)) {
- pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
- ZSTD_getErrorCode(ret2));
+ if (unlikely(zstd_is_error(ret2))) {
+ btrfs_warn(fs_info,
+"zstd compression level %d failed, error %d root %llu inode %llu offset %llu",
+ workspace->req_level, zstd_get_error_code(ret2),
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ start);
ret = -EIO;
goto out;
}
/* Check to see if we are making it bigger */
- if (tot_in + workspace->in_buf.pos > 8192 &&
+ if (tot_in + workspace->in_buf.pos > blocksize * 2 &&
tot_in + workspace->in_buf.pos <
tot_out + workspace->out_buf.pos) {
ret = -E2BIG;
@@ -444,24 +487,21 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
/* Check if we need more output space */
if (workspace->out_buf.pos == workspace->out_buf.size) {
- tot_out += PAGE_SIZE;
- max_out -= PAGE_SIZE;
- kunmap(out_page);
- if (nr_pages == nr_dest_pages) {
- out_page = NULL;
+ tot_out += min_folio_size;
+ max_out -= min_folio_size;
+ if (nr_folios == nr_dest_folios) {
ret = -E2BIG;
goto out;
}
- out_page = alloc_page(GFP_NOFS);
- if (out_page == NULL) {
+ out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
}
- pages[nr_pages++] = out_page;
- workspace->out_buf.dst = kmap(out_page);
+ folios[nr_folios++] = out_folio;
+ workspace->out_buf.dst = folio_address(out_folio);
workspace->out_buf.pos = 0;
- workspace->out_buf.size = min_t(size_t, max_out,
- PAGE_SIZE);
+ workspace->out_buf.size = min_t(size_t, max_out, min_folio_size);
}
/* We've reached the end of the input */
@@ -472,25 +512,32 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
/* Check if we need more input */
if (workspace->in_buf.pos == workspace->in_buf.size) {
- tot_in += PAGE_SIZE;
- kunmap(in_page);
- put_page(in_page);
-
- start += PAGE_SIZE;
- len -= PAGE_SIZE;
- in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- workspace->in_buf.src = kmap(in_page);
+ tot_in += workspace->in_buf.size;
+ kunmap_local(workspace->in_buf.src);
+ workspace->in_buf.src = NULL;
+ folio_put(in_folio);
+ start += cur_len;
+ len -= cur_len;
+ ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
+ if (ret < 0)
+ goto out;
+ cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
+ workspace->in_buf.src = kmap_local_folio(in_folio,
+ offset_in_folio(in_folio, start));
workspace->in_buf.pos = 0;
- workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
+ workspace->in_buf.size = cur_len;
}
}
while (1) {
size_t ret2;
- ret2 = ZSTD_endStream(stream, &workspace->out_buf);
- if (ZSTD_isError(ret2)) {
- pr_debug("BTRFS: ZSTD_endStream returned %d\n",
- ZSTD_getErrorCode(ret2));
+ ret2 = zstd_end_stream(stream, &workspace->out_buf);
+ if (unlikely(zstd_is_error(ret2))) {
+ btrfs_err(fs_info,
+"zstd compression end level %d failed, error %d root %llu inode %llu offset %llu",
+ workspace->req_level, zstd_get_error_code(ret2),
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ start);
ret = -EIO;
goto out;
}
@@ -504,23 +551,21 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
}
- tot_out += PAGE_SIZE;
- max_out -= PAGE_SIZE;
- kunmap(out_page);
- if (nr_pages == nr_dest_pages) {
- out_page = NULL;
+ tot_out += min_folio_size;
+ max_out -= min_folio_size;
+ if (nr_folios == nr_dest_folios) {
ret = -E2BIG;
goto out;
}
- out_page = alloc_page(GFP_NOFS);
- if (out_page == NULL) {
+ out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
}
- pages[nr_pages++] = out_page;
- workspace->out_buf.dst = kmap(out_page);
+ folios[nr_folios++] = out_folio;
+ workspace->out_buf.dst = folio_address(out_folio);
workspace->out_buf.pos = 0;
- workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ workspace->out_buf.size = min_t(size_t, max_out, min_folio_size);
}
if (tot_out >= tot_in) {
@@ -532,53 +577,61 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_in = tot_in;
*total_out = tot_out;
out:
- *out_pages = nr_pages;
- /* Cleanup */
- if (in_page) {
- kunmap(in_page);
- put_page(in_page);
+ *out_folios = nr_folios;
+ if (workspace->in_buf.src) {
+ kunmap_local(workspace->in_buf.src);
+ folio_put(in_folio);
}
- if (out_page)
- kunmap(out_page);
return ret;
}
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
+ struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
struct workspace *workspace = list_entry(ws, struct workspace, list);
- struct page **pages_in = cb->compressed_pages;
+ struct folio **folios_in = cb->compressed_folios;
size_t srclen = cb->compressed_len;
- ZSTD_DStream *stream;
+ zstd_dstream *stream;
int ret = 0;
- unsigned long page_in_index = 0;
- unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
+ const u32 blocksize = fs_info->sectorsize;
+ const unsigned int min_folio_size = btrfs_min_folio_size(fs_info);
+ unsigned long folio_in_index = 0;
+ unsigned long total_folios_in = DIV_ROUND_UP(srclen, min_folio_size);
unsigned long buf_start;
unsigned long total_out = 0;
- stream = ZSTD_initDStream(
+ stream = zstd_init_dstream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
- if (!stream) {
- pr_debug("BTRFS: ZSTD_initDStream failed\n");
+ if (unlikely(!stream)) {
+ struct btrfs_inode *inode = cb->bbio.inode;
+
+ btrfs_err(inode->root->fs_info,
+ "zstd decompression init failed, root %llu inode %llu offset %llu",
+ btrfs_root_id(inode->root), btrfs_ino(inode), cb->start);
ret = -EIO;
goto done;
}
- workspace->in_buf.src = kmap(pages_in[page_in_index]);
+ workspace->in_buf.src = kmap_local_folio(folios_in[folio_in_index], 0);
workspace->in_buf.pos = 0;
- workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
+ workspace->in_buf.size = min_t(size_t, srclen, min_folio_size);
workspace->out_buf.dst = workspace->buf;
workspace->out_buf.pos = 0;
- workspace->out_buf.size = PAGE_SIZE;
+ workspace->out_buf.size = blocksize;
while (1) {
size_t ret2;
- ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
+ ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
&workspace->in_buf);
- if (ZSTD_isError(ret2)) {
- pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
- ZSTD_getErrorCode(ret2));
+ if (unlikely(zstd_is_error(ret2))) {
+ struct btrfs_inode *inode = cb->bbio.inode;
+
+ btrfs_err(inode->root->fs_info,
+ "zstd decompression failed, error %d root %llu inode %llu offset %llu",
+ zstd_get_error_code(ret2), btrfs_root_id(inode->root),
+ btrfs_ino(inode), cb->start);
ret = -EIO;
goto done;
}
@@ -599,108 +652,86 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
break;
if (workspace->in_buf.pos == workspace->in_buf.size) {
- kunmap(pages_in[page_in_index++]);
- if (page_in_index >= total_pages_in) {
+ kunmap_local(workspace->in_buf.src);
+ folio_in_index++;
+ if (unlikely(folio_in_index >= total_folios_in)) {
workspace->in_buf.src = NULL;
ret = -EIO;
goto done;
}
- srclen -= PAGE_SIZE;
- workspace->in_buf.src = kmap(pages_in[page_in_index]);
+ srclen -= min_folio_size;
+ workspace->in_buf.src =
+ kmap_local_folio(folios_in[folio_in_index], 0);
workspace->in_buf.pos = 0;
- workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
+ workspace->in_buf.size = min_t(size_t, srclen, min_folio_size);
}
}
ret = 0;
- zero_fill_bio(cb->orig_bio);
done:
if (workspace->in_buf.src)
- kunmap(pages_in[page_in_index]);
+ kunmap_local(workspace->in_buf.src);
return ret;
}
-int zstd_decompress(struct list_head *ws, unsigned char *data_in,
- struct page *dest_page, unsigned long start_byte, size_t srclen,
+int zstd_decompress(struct list_head *ws, const u8 *data_in,
+ struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
- ZSTD_DStream *stream;
+ struct btrfs_fs_info *fs_info = btrfs_sb(folio_inode(dest_folio)->i_sb);
+ const u32 sectorsize = fs_info->sectorsize;
+ zstd_dstream *stream;
int ret = 0;
- size_t ret2;
- unsigned long total_out = 0;
- unsigned long pg_offset = 0;
+ unsigned long to_copy = 0;
- stream = ZSTD_initDStream(
+ stream = zstd_init_dstream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
- if (!stream) {
- pr_warn("BTRFS: ZSTD_initDStream failed\n");
+ if (unlikely(!stream)) {
+ struct btrfs_inode *inode = folio_to_inode(dest_folio);
+
+ btrfs_err(inode->root->fs_info,
+ "zstd decompression init failed, root %llu inode %llu offset %llu",
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ folio_pos(dest_folio));
ret = -EIO;
goto finish;
}
- destlen = min_t(size_t, destlen, PAGE_SIZE);
-
workspace->in_buf.src = data_in;
workspace->in_buf.pos = 0;
workspace->in_buf.size = srclen;
workspace->out_buf.dst = workspace->buf;
workspace->out_buf.pos = 0;
- workspace->out_buf.size = PAGE_SIZE;
-
- ret2 = 1;
- while (pg_offset < destlen
- && workspace->in_buf.pos < workspace->in_buf.size) {
- unsigned long buf_start;
- unsigned long buf_offset;
- unsigned long bytes;
-
- /* Check if the frame is over and we still need more input */
- if (ret2 == 0) {
- pr_debug("BTRFS: ZSTD_decompressStream ended early\n");
- ret = -EIO;
- goto finish;
- }
- ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
- &workspace->in_buf);
- if (ZSTD_isError(ret2)) {
- pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
- ZSTD_getErrorCode(ret2));
- ret = -EIO;
- goto finish;
- }
-
- buf_start = total_out;
- total_out += workspace->out_buf.pos;
- workspace->out_buf.pos = 0;
-
- if (total_out <= start_byte)
- continue;
-
- if (total_out > start_byte && buf_start < start_byte)
- buf_offset = start_byte - buf_start;
- else
- buf_offset = 0;
-
- bytes = min_t(unsigned long, destlen - pg_offset,
- workspace->out_buf.size - buf_offset);
-
- memcpy_to_page(dest_page, pg_offset,
- workspace->out_buf.dst + buf_offset, bytes);
-
- pg_offset += bytes;
+ workspace->out_buf.size = sectorsize;
+
+ /*
+ * Since both input and output buffers should not exceed one sector,
+ * one call should end the decompression.
+ */
+ ret = zstd_decompress_stream(stream, &workspace->out_buf, &workspace->in_buf);
+ if (unlikely(zstd_is_error(ret))) {
+ struct btrfs_inode *inode = folio_to_inode(dest_folio);
+
+ btrfs_err(inode->root->fs_info,
+ "zstd decompression failed, error %d root %llu inode %llu offset %llu",
+ zstd_get_error_code(ret), btrfs_root_id(inode->root),
+ btrfs_ino(inode), folio_pos(dest_folio));
+ goto finish;
}
- ret = 0;
+ to_copy = workspace->out_buf.pos;
+ memcpy_to_folio(dest_folio, dest_pgoff, workspace->out_buf.dst, to_copy);
finish:
- if (pg_offset < destlen) {
- memzero_page(dest_page, pg_offset, destlen - pg_offset);
+ /* Error or early end. */
+ if (unlikely(to_copy < destlen)) {
+ ret = -EIO;
+ folio_zero_range(dest_folio, dest_pgoff + to_copy, destlen - to_copy);
}
return ret;
}
-const struct btrfs_compress_op btrfs_zstd_compress = {
- /* ZSTD uses own workspace manager */
- .workspace_manager = NULL,
+const struct btrfs_compress_levels btrfs_zstd_compress = {
+ .min_level = ZSTD_BTRFS_MIN_LEVEL,
.max_level = ZSTD_BTRFS_MAX_LEVEL,
.default_level = ZSTD_BTRFS_DEFAULT_LEVEL,
};
diff --git a/fs/buffer.c b/fs/buffer.c
index 46bc589b7a03..838c0c571022 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -48,19 +48,21 @@
#include <linux/sched/mm.h>
#include <trace/events/block.h>
#include <linux/fscrypt.h>
+#include <linux/fsverity.h>
+#include <linux/sched/isolation.h>
#include "internal.h"
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
-static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
- enum rw_hint hint, struct writeback_control *wbc);
+static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
+ enum rw_hint hint, struct writeback_control *wbc);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
inline void touch_buffer(struct buffer_head *bh)
{
trace_block_touch_buffer(bh);
- mark_page_accessed(bh->b_page);
+ folio_mark_accessed(bh->b_folio);
}
EXPORT_SYMBOL(touch_buffer);
@@ -79,26 +81,26 @@ void unlock_buffer(struct buffer_head *bh)
EXPORT_SYMBOL(unlock_buffer);
/*
- * Returns if the page has dirty or writeback buffers. If all the buffers
- * are unlocked and clean then the PageDirty information is stale. If
- * any of the pages are locked, it is assumed they are locked for IO.
+ * Returns if the folio has dirty or writeback buffers. If all the buffers
+ * are unlocked and clean then the folio_test_dirty information is stale. If
+ * any of the buffers are locked, it is assumed they are locked for IO.
*/
-void buffer_check_dirty_writeback(struct page *page,
+void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback)
{
struct buffer_head *head, *bh;
*dirty = false;
*writeback = false;
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
- if (!page_has_buffers(page))
+ head = folio_buffers(folio);
+ if (!head)
return;
- if (PageWriteback(page))
+ if (folio_test_writeback(folio))
*writeback = true;
- head = page_buffers(page);
bh = head;
do {
if (buffer_locked(bh))
@@ -110,7 +112,6 @@ void buffer_check_dirty_writeback(struct page *page,
bh = bh->b_this_page;
} while (bh != head);
}
-EXPORT_SYMBOL(buffer_check_dirty_writeback);
/*
* Block until a buffer comes unlocked. This doesn't stop it
@@ -152,12 +153,12 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
/*
* Default synchronous end-of-IO handler.. Just mark it up-to-date and
- * unlock the buffer. This is what ll_rw_block uses too.
+ * unlock the buffer.
*/
void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
{
- __end_buffer_read_notouch(bh, uptodate);
put_bh(bh);
+ __end_buffer_read_notouch(bh, uptodate);
}
EXPORT_SYMBOL(end_buffer_read_sync);
@@ -175,38 +176,46 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
}
EXPORT_SYMBOL(end_buffer_write_sync);
-/*
- * Various filesystems appear to want __find_get_block to be non-blocking.
- * But it's the page lock which protects the buffers. To get around this,
- * we get exclusion from try_to_free_buffers with the blockdev mapping's
- * private_lock.
- *
- * Hack idea: for the blockdev mapping, private_lock contention
- * may be quite high. This code could TryLock the page, and if that
- * succeeds, there is no need to take private_lock.
- */
static struct buffer_head *
-__find_get_block_slow(struct block_device *bdev, sector_t block)
+__find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic)
{
- struct inode *bd_inode = bdev->bd_inode;
- struct address_space *bd_mapping = bd_inode->i_mapping;
+ struct address_space *bd_mapping = bdev->bd_mapping;
+ const int blkbits = bd_mapping->host->i_blkbits;
struct buffer_head *ret = NULL;
pgoff_t index;
struct buffer_head *bh;
struct buffer_head *head;
- struct page *page;
+ struct folio *folio;
int all_mapped = 1;
static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
- index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
- page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
- if (!page)
+ index = ((loff_t)block << blkbits) / PAGE_SIZE;
+ folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
+ if (IS_ERR(folio))
goto out;
- spin_lock(&bd_mapping->private_lock);
- if (!page_has_buffers(page))
+ /*
+ * Folio lock protects the buffers. Callers that cannot block
+ * will fallback to serializing vs try_to_free_buffers() via
+ * the i_private_lock.
+ */
+ if (atomic)
+ spin_lock(&bd_mapping->i_private_lock);
+ else
+ folio_lock(folio);
+
+ head = folio_buffers(folio);
+ if (!head)
goto out_unlock;
- head = page_buffers(page);
+ /*
+ * Upon a noref migration, the folio lock serializes here;
+ * otherwise bail.
+ */
+ if (test_bit_acquire(BH_Migrate, &head->b_state)) {
+ WARN_ON(!atomic);
+ goto out_unlock;
+ }
+
bh = head;
do {
if (!buffer_mapped(bh))
@@ -232,11 +241,14 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
(unsigned long long)block,
(unsigned long long)bh->b_blocknr,
bh->b_state, bh->b_size, bdev,
- 1 << bd_inode->i_blkbits);
+ 1 << blkbits);
}
out_unlock:
- spin_unlock(&bd_mapping->private_lock);
- put_page(page);
+ if (atomic)
+ spin_unlock(&bd_mapping->i_private_lock);
+ else
+ folio_unlock(folio);
+ folio_put(folio);
out:
return ret;
}
@@ -246,18 +258,17 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
unsigned long flags;
struct buffer_head *first;
struct buffer_head *tmp;
- struct page *page;
- int page_uptodate = 1;
+ struct folio *folio;
+ int folio_uptodate = 1;
BUG_ON(!buffer_async_read(bh));
- page = bh->b_page;
+ folio = bh->b_folio;
if (uptodate) {
set_buffer_uptodate(bh);
} else {
clear_buffer_uptodate(bh);
buffer_io_error(bh, ", async page read");
- SetPageError(page);
}
/*
@@ -265,14 +276,14 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
* two buffer heads end IO at almost the same time and both
* decide that the page is now completely done.
*/
- first = page_buffers(page);
+ first = folio_buffers(folio);
spin_lock_irqsave(&first->b_uptodate_lock, flags);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
do {
if (!buffer_uptodate(tmp))
- page_uptodate = 0;
+ folio_uptodate = 0;
if (buffer_async_read(tmp)) {
BUG_ON(!buffer_locked(tmp));
goto still_busy;
@@ -281,53 +292,87 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
} while (tmp != bh);
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
- /*
- * If none of the buffers had errors and they are all
- * uptodate then we can set the page uptodate.
- */
- if (page_uptodate && !PageError(page))
- SetPageUptodate(page);
- unlock_page(page);
+ folio_end_read(folio, folio_uptodate);
return;
still_busy:
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
- return;
}
-struct decrypt_bh_ctx {
+struct postprocess_bh_ctx {
struct work_struct work;
struct buffer_head *bh;
};
+static void verify_bh(struct work_struct *work)
+{
+ struct postprocess_bh_ctx *ctx =
+ container_of(work, struct postprocess_bh_ctx, work);
+ struct buffer_head *bh = ctx->bh;
+ bool valid;
+
+ valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
+ end_buffer_async_read(bh, valid);
+ kfree(ctx);
+}
+
+static bool need_fsverity(struct buffer_head *bh)
+{
+ struct folio *folio = bh->b_folio;
+ struct inode *inode = folio->mapping->host;
+
+ return fsverity_active(inode) &&
+ /* needed by ext4 */
+ folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
+}
+
static void decrypt_bh(struct work_struct *work)
{
- struct decrypt_bh_ctx *ctx =
- container_of(work, struct decrypt_bh_ctx, work);
+ struct postprocess_bh_ctx *ctx =
+ container_of(work, struct postprocess_bh_ctx, work);
struct buffer_head *bh = ctx->bh;
int err;
- err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size,
+ err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
bh_offset(bh));
+ if (err == 0 && need_fsverity(bh)) {
+ /*
+ * We use different work queues for decryption and for verity
+ * because verity may require reading metadata pages that need
+ * decryption, and we shouldn't recurse to the same workqueue.
+ */
+ INIT_WORK(&ctx->work, verify_bh);
+ fsverity_enqueue_verify_work(&ctx->work);
+ return;
+ }
end_buffer_async_read(bh, err == 0);
kfree(ctx);
}
/*
- * I/O completion handler for block_read_full_page() - pages
+ * I/O completion handler for block_read_full_folio() - pages
* which come unlocked at the end of I/O.
*/
static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
{
- /* Decrypt if needed */
- if (uptodate &&
- fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) {
- struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+ struct inode *inode = bh->b_folio->mapping->host;
+ bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
+ bool verify = need_fsverity(bh);
+
+ /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
+ if (uptodate && (decrypt || verify)) {
+ struct postprocess_bh_ctx *ctx =
+ kmalloc(sizeof(*ctx), GFP_ATOMIC);
if (ctx) {
- INIT_WORK(&ctx->work, decrypt_bh);
ctx->bh = bh;
- fscrypt_enqueue_decrypt_work(&ctx->work);
+ if (decrypt) {
+ INIT_WORK(&ctx->work, decrypt_bh);
+ fscrypt_enqueue_decrypt_work(&ctx->work);
+ } else {
+ INIT_WORK(&ctx->work, verify_bh);
+ fsverity_enqueue_verify_work(&ctx->work);
+ }
return;
}
uptodate = 0;
@@ -336,29 +381,28 @@ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
}
/*
- * Completion handler for block_write_full_page() - pages which are unlocked
- * during I/O, and which have PageWriteback cleared upon I/O completion.
+ * Completion handler for block_write_full_folio() - folios which are unlocked
+ * during I/O, and which have the writeback flag cleared upon I/O completion.
*/
-void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
unsigned long flags;
struct buffer_head *first;
struct buffer_head *tmp;
- struct page *page;
+ struct folio *folio;
BUG_ON(!buffer_async_write(bh));
- page = bh->b_page;
+ folio = bh->b_folio;
if (uptodate) {
set_buffer_uptodate(bh);
} else {
buffer_io_error(bh, ", lost async page write");
mark_buffer_write_io_error(bh);
clear_buffer_uptodate(bh);
- SetPageError(page);
}
- first = page_buffers(page);
+ first = folio_buffers(folio);
spin_lock_irqsave(&first->b_uptodate_lock, flags);
clear_buffer_async_write(bh);
@@ -372,14 +416,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
tmp = tmp->b_this_page;
}
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
- end_page_writeback(page);
+ folio_end_writeback(folio);
return;
still_busy:
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
- return;
}
-EXPORT_SYMBOL(end_buffer_async_write);
/*
* If a page's buffers are under async readin (end_buffer_async_read
@@ -429,27 +471,27 @@ EXPORT_SYMBOL(mark_buffer_async_write);
* a successful fsync(). For example, ext2 indirect blocks need to be
* written back and waited upon before fsync() returns.
*
- * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
+ * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
* inode_has_buffers() and invalidate_inode_buffers() are provided for the
- * management of a list of dependent buffers at ->i_mapping->private_list.
+ * management of a list of dependent buffers at ->i_mapping->i_private_list.
*
* Locking is a little subtle: try_to_free_buffers() will remove buffers
* from their controlling inode's queue when they are being freed. But
* try_to_free_buffers() will be operating against the *blockdev* mapping
* at the time, not against the S_ISREG file which depends on those buffers.
- * So the locking for private_list is via the private_lock in the address_space
+ * So the locking for i_private_list is via the i_private_lock in the address_space
* which backs the buffers. Which is different from the address_space
* against which the buffers are listed. So for a particular address_space,
- * mapping->private_lock does *not* protect mapping->private_list! In fact,
- * mapping->private_list will always be protected by the backing blockdev's
- * ->private_lock.
+ * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact,
+ * mapping->i_private_list will always be protected by the backing blockdev's
+ * ->i_private_lock.
*
* Which introduces a requirement: all buffers on an address_space's
- * ->private_list must be from the same address_space: the blockdev's.
+ * ->i_private_list must be from the same address_space: the blockdev's.
*
- * address_spaces which do not place buffers at ->private_list via these
- * utility functions are free to use private_lock and private_list for
- * whatever they want. The only requirement is that list_empty(private_list)
+ * address_spaces which do not place buffers at ->i_private_list via these
+ * utility functions are free to use i_private_lock and i_private_list for
+ * whatever they want. The only requirement is that list_empty(i_private_list)
* be true at clear_inode() time.
*
* FIXME: clear_inode should not call invalidate_inode_buffers(). The
@@ -472,7 +514,7 @@ EXPORT_SYMBOL(mark_buffer_async_write);
*/
/*
- * The buffer's backing address_space's private_lock must be held
+ * The buffer's backing address_space's i_private_lock must be held
*/
static void __remove_assoc_queue(struct buffer_head *bh)
{
@@ -483,7 +525,7 @@ static void __remove_assoc_queue(struct buffer_head *bh)
int inode_has_buffers(struct inode *inode)
{
- return !list_empty(&inode->i_data.private_list);
+ return !list_empty(&inode->i_data.i_private_list);
}
/*
@@ -491,8 +533,8 @@ int inode_has_buffers(struct inode *inode)
* all already-submitted IO to complete, but does not queue any new
* writes to the disk.
*
- * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
- * you dirty the buffers, and then use osync_inode_buffers to wait for
+ * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
+ * as you dirty the buffers, and then use osync_inode_buffers to wait for
* completion. Any other dirty buffers which are not yet queued for
* write will not be flushed to disk by the osync.
*/
@@ -521,17 +563,11 @@ repeat:
return err;
}
-void emergency_thaw_bdev(struct super_block *sb)
-{
- while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
- printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
-}
-
/**
* sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
* @mapping: the mapping which wants those buffers written
*
- * Starts I/O against the buffers at mapping->private_list, and waits upon
+ * Starts I/O against the buffers at mapping->i_private_list, and waits upon
* that I/O.
*
* Basically, this is a convenience function for fsync().
@@ -540,16 +576,86 @@ void emergency_thaw_bdev(struct super_block *sb)
*/
int sync_mapping_buffers(struct address_space *mapping)
{
- struct address_space *buffer_mapping = mapping->private_data;
+ struct address_space *buffer_mapping = mapping->i_private_data;
- if (buffer_mapping == NULL || list_empty(&mapping->private_list))
+ if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
return 0;
- return fsync_buffers_list(&buffer_mapping->private_lock,
- &mapping->private_list);
+ return fsync_buffers_list(&buffer_mapping->i_private_lock,
+ &mapping->i_private_list);
}
EXPORT_SYMBOL(sync_mapping_buffers);
+/**
+ * generic_buffers_fsync_noflush - generic buffer fsync implementation
+ * for simple filesystems with no inode lock
+ *
+ * @file: file to synchronize
+ * @start: start offset in bytes
+ * @end: end offset in bytes (inclusive)
+ * @datasync: only synchronize essential metadata if true
+ *
+ * This is a generic implementation of the fsync method for simple
+ * filesystems which track all non-inode metadata in the buffers list
+ * hanging off the address_space structure.
+ */
+int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
+ bool datasync)
+{
+ struct inode *inode = file->f_mapping->host;
+ int err;
+ int ret;
+
+ err = file_write_and_wait_range(file, start, end);
+ if (err)
+ return err;
+
+ ret = sync_mapping_buffers(inode->i_mapping);
+ if (!(inode_state_read_once(inode) & I_DIRTY_ALL))
+ goto out;
+ if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))
+ goto out;
+
+ err = sync_inode_metadata(inode, 1);
+ if (ret == 0)
+ ret = err;
+
+out:
+ /* check and advance again to catch errors after syncing out buffers */
+ err = file_check_and_advance_wb_err(file);
+ if (ret == 0)
+ ret = err;
+ return ret;
+}
+EXPORT_SYMBOL(generic_buffers_fsync_noflush);
+
+/**
+ * generic_buffers_fsync - generic buffer fsync implementation
+ * for simple filesystems with no inode lock
+ *
+ * @file: file to synchronize
+ * @start: start offset in bytes
+ * @end: end offset in bytes (inclusive)
+ * @datasync: only synchronize essential metadata if true
+ *
+ * This is a generic implementation of the fsync method for simple
+ * filesystems which track all non-inode metadata in the buffers list
+ * hanging off the address_space structure. This also makes sure that
+ * a device cache flush operation is called at the end.
+ */
+int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
+ bool datasync)
+{
+ struct inode *inode = file->f_mapping->host;
+ int ret;
+
+ ret = generic_buffers_fsync_noflush(file, start, end, datasync);
+ if (!ret)
+ ret = blkdev_issue_flush(inode->i_sb->s_bdev);
+ return ret;
+}
+EXPORT_SYMBOL(generic_buffers_fsync);
+
/*
* Called when we've recently written block `bblock', and it is known that
* `bblock' was for a buffer_boundary() buffer. This means that the block at
@@ -559,10 +665,12 @@ EXPORT_SYMBOL(sync_mapping_buffers);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize)
{
- struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
+ struct buffer_head *bh;
+
+ bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize);
if (bh) {
if (buffer_dirty(bh))
- ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
+ write_dirty_buffer(bh, 0);
put_bh(bh);
}
}
@@ -570,60 +678,64 @@ void write_boundary_block(struct block_device *bdev,
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
{
struct address_space *mapping = inode->i_mapping;
- struct address_space *buffer_mapping = bh->b_page->mapping;
+ struct address_space *buffer_mapping = bh->b_folio->mapping;
mark_buffer_dirty(bh);
- if (!mapping->private_data) {
- mapping->private_data = buffer_mapping;
+ if (!mapping->i_private_data) {
+ mapping->i_private_data = buffer_mapping;
} else {
- BUG_ON(mapping->private_data != buffer_mapping);
+ BUG_ON(mapping->i_private_data != buffer_mapping);
}
if (!bh->b_assoc_map) {
- spin_lock(&buffer_mapping->private_lock);
+ spin_lock(&buffer_mapping->i_private_lock);
list_move_tail(&bh->b_assoc_buffers,
- &mapping->private_list);
+ &mapping->i_private_list);
bh->b_assoc_map = mapping;
- spin_unlock(&buffer_mapping->private_lock);
+ spin_unlock(&buffer_mapping->i_private_lock);
}
}
EXPORT_SYMBOL(mark_buffer_dirty_inode);
-/*
- * Add a page to the dirty page list.
- *
- * It is a sad fact of life that this function is called from several places
- * deeply under spinlocking. It may not sleep.
- *
- * If the page has buffers, the uptodate buffers are set dirty, to preserve
- * dirty-state coherency between the page and the buffers. It the page does
- * not have buffers then when they are later attached they will all be set
- * dirty.
- *
- * The buffers are dirtied before the page is dirtied. There's a small race
- * window in which a writepage caller may see the page cleanness but not the
- * buffer dirtiness. That's fine. If this code were to set the page dirty
- * before the buffers, a concurrent writepage caller could clear the page dirty
- * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
- * page on the dirty page list.
- *
- * We use private_lock to lock against try_to_free_buffers while using the
- * page's buffer list. Also use this to protect against clean buffers being
- * added to the page after it was set dirty.
- *
- * FIXME: may need to call ->reservepage here as well. That's rather up to the
- * address_space though.
+/**
+ * block_dirty_folio - Mark a folio as dirty.
+ * @mapping: The address space containing this folio.
+ * @folio: The folio to mark dirty.
+ *
+ * Filesystems which use buffer_heads can use this function as their
+ * ->dirty_folio implementation. Some filesystems need to do a little
+ * work before calling this function. Filesystems which do not use
+ * buffer_heads should call filemap_dirty_folio() instead.
+ *
+ * If the folio has buffers, the uptodate buffers are set dirty, to
+ * preserve dirty-state coherency between the folio and the buffers.
+ * Buffers added to a dirty folio are created dirty.
+ *
+ * The buffers are dirtied before the folio is dirtied. There's a small
+ * race window in which writeback may see the folio cleanness but not the
+ * buffer dirtiness. That's fine. If this code were to set the folio
+ * dirty before the buffers, writeback could clear the folio dirty flag,
+ * see a bunch of clean buffers and we'd end up with dirty buffers/clean
+ * folio on the dirty folio list.
+ *
+ * We use i_private_lock to lock against try_to_free_buffers() while
+ * using the folio's buffer list. This also prevents clean buffers
+ * being added to the folio after it was set dirty.
+ *
+ * Context: May only be called from process context. Does not sleep.
+ * Caller must ensure that @folio cannot be truncated during this call,
+ * typically by holding the folio lock or having a page in the folio
+ * mapped and holding the page table lock.
+ *
+ * Return: True if the folio was dirtied; false if it was already dirtied.
*/
-int __set_page_dirty_buffers(struct page *page)
+bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
{
- int newly_dirty;
- struct address_space *mapping = page_mapping(page);
-
- if (unlikely(!mapping))
- return !TestSetPageDirty(page);
+ struct buffer_head *head;
+ bool newly_dirty;
- spin_lock(&mapping->private_lock);
- if (page_has_buffers(page)) {
- struct buffer_head *head = page_buffers(page);
+ spin_lock(&mapping->i_private_lock);
+ head = folio_buffers(folio);
+ if (head) {
struct buffer_head *bh = head;
do {
@@ -635,21 +747,18 @@ int __set_page_dirty_buffers(struct page *page)
* Lock out page's memcg migration to keep PageDirty
* synchronized with per-memcg dirty page counters.
*/
- lock_page_memcg(page);
- newly_dirty = !TestSetPageDirty(page);
- spin_unlock(&mapping->private_lock);
+ newly_dirty = !folio_test_set_dirty(folio);
+ spin_unlock(&mapping->i_private_lock);
if (newly_dirty)
- __set_page_dirty(page, mapping, 1);
-
- unlock_page_memcg(page);
+ __folio_mark_dirty(folio, mapping, 1);
if (newly_dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return newly_dirty;
}
-EXPORT_SYMBOL(__set_page_dirty_buffers);
+EXPORT_SYMBOL(block_dirty_folio);
/*
* Write out and wait upon a list of buffers.
@@ -673,12 +782,11 @@ EXPORT_SYMBOL(__set_page_dirty_buffers);
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
- struct list_head tmp;
struct address_space *mapping;
int err = 0, err2;
struct blk_plug plug;
+ LIST_HEAD(tmp);
- INIT_LIST_HEAD(&tmp);
blk_start_plug(&plug);
spin_lock(lock);
@@ -730,7 +838,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
smp_mb();
if (buffer_dirty(bh)) {
list_add(&bh->b_assoc_buffers,
- &mapping->private_list);
+ &mapping->i_private_list);
bh->b_assoc_map = mapping;
}
spin_unlock(lock);
@@ -754,21 +862,20 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* probably unmounting the fs, but that doesn't mean we have already
* done a sync(). Just drop the buffers from the inode list.
*
- * NOTE: we take the inode's blockdev's mapping's private_lock. Which
- * assumes that all the buffers are against the blockdev. Not true
- * for reiserfs.
+ * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which
+ * assumes that all the buffers are against the blockdev.
*/
void invalidate_inode_buffers(struct inode *inode)
{
if (inode_has_buffers(inode)) {
struct address_space *mapping = &inode->i_data;
- struct list_head *list = &mapping->private_list;
- struct address_space *buffer_mapping = mapping->private_data;
+ struct list_head *list = &mapping->i_private_list;
+ struct address_space *buffer_mapping = mapping->i_private_data;
- spin_lock(&buffer_mapping->private_lock);
+ spin_lock(&buffer_mapping->i_private_lock);
while (!list_empty(list))
__remove_assoc_queue(BH_ENTRY(list->next));
- spin_unlock(&buffer_mapping->private_lock);
+ spin_unlock(&buffer_mapping->i_private_lock);
}
}
EXPORT_SYMBOL(invalidate_inode_buffers);
@@ -785,10 +892,10 @@ int remove_inode_buffers(struct inode *inode)
if (inode_has_buffers(inode)) {
struct address_space *mapping = &inode->i_data;
- struct list_head *list = &mapping->private_list;
- struct address_space *buffer_mapping = mapping->private_data;
+ struct list_head *list = &mapping->i_private_list;
+ struct address_space *buffer_mapping = mapping->i_private_data;
- spin_lock(&buffer_mapping->private_lock);
+ spin_lock(&buffer_mapping->i_private_lock);
while (!list_empty(list)) {
struct buffer_head *bh = BH_ENTRY(list->next);
if (buffer_dirty(bh)) {
@@ -797,13 +904,13 @@ int remove_inode_buffers(struct inode *inode)
}
__remove_assoc_queue(bh);
}
- spin_unlock(&buffer_mapping->private_lock);
+ spin_unlock(&buffer_mapping->i_private_lock);
}
return ret;
}
/*
- * Create the appropriate buffers when given a page for data area and
+ * Create the appropriate buffers when given a folio for data area and
* the size of each buffer.. Use the bh->b_this_page linked list to
* follow the buffers created. Return NULL if unable to create more
* buffers.
@@ -811,23 +918,19 @@ int remove_inode_buffers(struct inode *inode)
* The retry flag is used to differentiate async IO (paging, swapping)
* which may not fail from ordinary buffer allocations.
*/
-struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
- bool retry)
+struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
+ gfp_t gfp)
{
struct buffer_head *bh, *head;
- gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
long offset;
struct mem_cgroup *memcg, *old_memcg;
- if (retry)
- gfp |= __GFP_NOFAIL;
-
- /* The page lock pins the memcg */
- memcg = page_memcg(page);
+ /* The folio lock pins the memcg */
+ memcg = folio_memcg(folio);
old_memcg = set_active_memcg(memcg);
head = NULL;
- offset = PAGE_SIZE;
+ offset = folio_size(folio);
while ((offset -= size) >= 0) {
bh = alloc_buffer_head(gfp);
if (!bh)
@@ -839,8 +942,8 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bh->b_size = size;
- /* Link the buffer to its page */
- set_bh_page(bh, page, offset);
+ /* Link the buffer to its folio */
+ folio_set_bh(bh, folio, offset);
}
out:
set_active_memcg(old_memcg);
@@ -859,10 +962,18 @@ no_grow:
goto out;
}
+EXPORT_SYMBOL_GPL(folio_alloc_buffers);
+
+struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
+{
+ gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
+
+ return folio_alloc_buffers(page_folio(page), size, gfp);
+}
EXPORT_SYMBOL_GPL(alloc_page_buffers);
-static inline void
-link_dev_buffers(struct page *page, struct buffer_head *head)
+static inline void link_dev_buffers(struct folio *folio,
+ struct buffer_head *head)
{
struct buffer_head *bh, *tail;
@@ -872,7 +983,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
bh = bh->b_this_page;
} while (bh);
tail->b_this_page = head;
- attach_page_private(page, head);
+ folio_attach_private(folio, head);
}
static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
@@ -888,15 +999,15 @@ static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
}
/*
- * Initialise the state of a blockdev page's buffers.
+ * Initialise the state of a blockdev folio's buffers.
*/
-static sector_t
-init_page_buffers(struct page *page, struct block_device *bdev,
- sector_t block, int size)
+static sector_t folio_init_buffers(struct folio *folio,
+ struct block_device *bdev, unsigned size)
{
- struct buffer_head *head = page_buffers(page);
+ struct buffer_head *head = folio_buffers(folio);
struct buffer_head *bh = head;
- int uptodate = PageUptodate(page);
+ bool uptodate = folio_test_uptodate(folio);
+ sector_t block = div_u64(folio_pos(folio), size);
sector_t end_block = blkdev_max_block(bdev, size);
do {
@@ -921,126 +1032,114 @@ init_page_buffers(struct page *page, struct block_device *bdev,
}
/*
- * Create the page-cache page that contains the requested block.
+ * Create the page-cache folio that contains the requested block.
*
* This is used purely for blockdev mappings.
+ *
+ * Returns false if we have a failure which cannot be cured by retrying
+ * without sleeping. Returns true if we succeeded, or the caller should retry.
*/
-static int
-grow_dev_page(struct block_device *bdev, sector_t block,
- pgoff_t index, int size, int sizebits, gfp_t gfp)
+static bool grow_dev_folio(struct block_device *bdev, sector_t block,
+ pgoff_t index, unsigned size, gfp_t gfp)
{
- struct inode *inode = bdev->bd_inode;
- struct page *page;
+ struct address_space *mapping = bdev->bd_mapping;
+ struct folio *folio;
struct buffer_head *bh;
- sector_t end_block;
- int ret = 0;
- gfp_t gfp_mask;
-
- gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
+ sector_t end_block = 0;
- /*
- * XXX: __getblk_slow() can not really deal with failure and
- * will endlessly loop on improvised global reclaim. Prefer
- * looping in the allocator rather than here, at least that
- * code knows what it's doing.
- */
- gfp_mask |= __GFP_NOFAIL;
-
- page = find_or_create_page(inode->i_mapping, index, gfp_mask);
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
+ if (IS_ERR(folio))
+ return false;
- BUG_ON(!PageLocked(page));
-
- if (page_has_buffers(page)) {
- bh = page_buffers(page);
+ bh = folio_buffers(folio);
+ if (bh) {
if (bh->b_size == size) {
- end_block = init_page_buffers(page, bdev,
- (sector_t)index << sizebits,
- size);
- goto done;
+ end_block = folio_init_buffers(folio, bdev, size);
+ goto unlock;
+ }
+
+ /*
+ * Retrying may succeed; for example the folio may finish
+ * writeback, or buffers may be cleaned. This should not
+ * happen very often; maybe we have old buffers attached to
+ * this blockdev's page cache and we're trying to change
+ * the block size?
+ */
+ if (!try_to_free_buffers(folio)) {
+ end_block = ~0ULL;
+ goto unlock;
}
- if (!try_to_free_buffers(page))
- goto failed;
}
- /*
- * Allocate some buffers for this page
- */
- bh = alloc_page_buffers(page, size, true);
+ bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
+ if (!bh)
+ goto unlock;
/*
- * Link the page to the buffers and initialise them. Take the
+ * Link the folio to the buffers and initialise them. Take the
* lock to be atomic wrt __find_get_block(), which does not
- * run under the page lock.
+ * run under the folio lock.
*/
- spin_lock(&inode->i_mapping->private_lock);
- link_dev_buffers(page, bh);
- end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
- size);
- spin_unlock(&inode->i_mapping->private_lock);
-done:
- ret = (block < end_block) ? 1 : -ENXIO;
-failed:
- unlock_page(page);
- put_page(page);
- return ret;
+ spin_lock(&mapping->i_private_lock);
+ link_dev_buffers(folio, bh);
+ end_block = folio_init_buffers(folio, bdev, size);
+ spin_unlock(&mapping->i_private_lock);
+unlock:
+ folio_unlock(folio);
+ folio_put(folio);
+ return block < end_block;
}
/*
- * Create buffers for the specified block device block's page. If
- * that page was dirty, the buffers are set dirty also.
+ * Create buffers for the specified block device block's folio. If
+ * that folio was dirty, the buffers are set dirty also. Returns false
+ * if we've hit a permanent error.
*/
-static int
-grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
+static bool grow_buffers(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp)
{
- pgoff_t index;
- int sizebits;
-
- sizebits = PAGE_SHIFT - __ffs(size);
- index = block >> sizebits;
+ loff_t pos;
/*
- * Check for a block which wants to lie outside our maximum possible
- * pagecache index. (this comparison is done using sector_t types).
+ * Check for a block which lies outside our maximum possible
+ * pagecache index.
*/
- if (unlikely(index != block >> sizebits)) {
- printk(KERN_ERR "%s: requested out-of-range block %llu for "
- "device %pg\n",
+ if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
+ printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
__func__, (unsigned long long)block,
bdev);
- return -EIO;
+ return false;
}
- /* Create a page with the proper size buffers.. */
- return grow_dev_page(bdev, block, index, size, sizebits, gfp);
+ /* Create a folio with the proper size buffers */
+ return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
}
static struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block,
unsigned size, gfp_t gfp)
{
- /* Size must be multiple of hard sectorsize */
- if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
- (size < 512 || size > PAGE_SIZE))) {
- printk(KERN_ERR "getblk(): invalid block size %d requested\n",
- size);
- printk(KERN_ERR "logical block size: %d\n",
- bdev_logical_block_size(bdev));
+ bool blocking = gfpflags_allow_blocking(gfp);
- dump_stack();
+ if (WARN_ON_ONCE(!IS_ALIGNED(size, bdev_logical_block_size(bdev)))) {
+ printk(KERN_ERR "getblk(): block size %d not aligned to logical block size %d\n",
+ size, bdev_logical_block_size(bdev));
return NULL;
}
for (;;) {
struct buffer_head *bh;
- int ret;
- bh = __find_get_block(bdev, block, size);
+ if (!grow_buffers(bdev, block, size, gfp))
+ return NULL;
+
+ if (blocking)
+ bh = __find_get_block_nonatomic(bdev, block, size);
+ else
+ bh = __find_get_block(bdev, block, size);
if (bh)
return bh;
-
- ret = grow_buffers(bdev, block, size, gfp);
- if (ret < 0)
- return NULL;
}
}
@@ -1063,8 +1162,8 @@ __getblk_slow(struct block_device *bdev, sector_t block,
* Also. When blockdev buffers are explicitly read with bread(), they
* individually become uptodate. But their backing page remains not
* uptodate - even if all of its buffers are uptodate. A subsequent
- * block_read_full_page() against that page will discover all the uptodate
- * buffers, will set the page uptodate and will perform no I/O.
+ * block_read_full_folio() against that folio will discover all the uptodate
+ * buffers, will set the folio uptodate and will perform no I/O.
*/
/**
@@ -1076,7 +1175,7 @@ __getblk_slow(struct block_device *bdev, sector_t block,
* and then attach the address_space's inode to its superblock's dirty
* inode list.
*
- * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
+ * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock,
* i_pages lock and mapping->host->i_lock.
*/
void mark_buffer_dirty(struct buffer_head *bh)
@@ -1098,16 +1197,14 @@ void mark_buffer_dirty(struct buffer_head *bh)
}
if (!test_set_buffer_dirty(bh)) {
- struct page *page = bh->b_page;
+ struct folio *folio = bh->b_folio;
struct address_space *mapping = NULL;
- lock_page_memcg(page);
- if (!TestSetPageDirty(page)) {
- mapping = page_mapping(page);
+ if (!folio_test_set_dirty(folio)) {
+ mapping = folio->mapping;
if (mapping)
- __set_page_dirty(page, mapping, 0);
+ __folio_mark_dirty(folio, mapping, 0);
}
- unlock_page_memcg(page);
if (mapping)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}
@@ -1116,53 +1213,48 @@ EXPORT_SYMBOL(mark_buffer_dirty);
void mark_buffer_write_io_error(struct buffer_head *bh)
{
- struct super_block *sb;
-
set_buffer_write_io_error(bh);
/* FIXME: do we need to set this in both places? */
- if (bh->b_page && bh->b_page->mapping)
- mapping_set_error(bh->b_page->mapping, -EIO);
+ if (bh->b_folio && bh->b_folio->mapping)
+ mapping_set_error(bh->b_folio->mapping, -EIO);
if (bh->b_assoc_map)
mapping_set_error(bh->b_assoc_map, -EIO);
- rcu_read_lock();
- sb = READ_ONCE(bh->b_bdev->bd_super);
- if (sb)
- errseq_set(&sb->s_wb_err, -EIO);
- rcu_read_unlock();
}
EXPORT_SYMBOL(mark_buffer_write_io_error);
-/*
- * Decrement a buffer_head's reference count. If all buffers against a page
- * have zero reference count, are clean and unlocked, and if the page is clean
- * and unlocked then try_to_free_buffers() may strip the buffers from the page
- * in preparation for freeing it (sometimes, rarely, buffers are removed from
- * a page but it ends up not being freed, and buffers may later be reattached).
+/**
+ * __brelse - Release a buffer.
+ * @bh: The buffer to release.
+ *
+ * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
*/
-void __brelse(struct buffer_head * buf)
+void __brelse(struct buffer_head *bh)
{
- if (atomic_read(&buf->b_count)) {
- put_bh(buf);
+ if (atomic_read(&bh->b_count)) {
+ put_bh(bh);
return;
}
WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
}
EXPORT_SYMBOL(__brelse);
-/*
- * bforget() is like brelse(), except it discards any
- * potentially dirty data.
+/**
+ * __bforget - Discard any dirty data in a buffer.
+ * @bh: The buffer to forget.
+ *
+ * This variant of bforget() can be called if @bh is guaranteed to not
+ * be NULL.
*/
void __bforget(struct buffer_head *bh)
{
clear_buffer_dirty(bh);
if (bh->b_assoc_map) {
- struct address_space *buffer_mapping = bh->b_page->mapping;
+ struct address_space *buffer_mapping = bh->b_folio->mapping;
- spin_lock(&buffer_mapping->private_lock);
+ spin_lock(&buffer_mapping->i_private_lock);
list_del_init(&bh->b_assoc_buffers);
bh->b_assoc_map = NULL;
- spin_unlock(&buffer_mapping->private_lock);
+ spin_unlock(&buffer_mapping->i_private_lock);
}
__brelse(bh);
}
@@ -1177,7 +1269,7 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh)
} else {
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
- submit_bh(REQ_OP_READ, 0, bh);
+ submit_bh(REQ_OP_READ, bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
@@ -1235,16 +1327,18 @@ static void bh_lru_install(struct buffer_head *bh)
int i;
check_irqs_on();
+ bh_lru_lock();
+
/*
* the refcount of buffer_head in bh_lru prevents dropping the
* attached page(i.e., try_to_free_buffers) so it could cause
* failing page migration.
* Skip putting upcoming bh into bh_lru until migration is done.
*/
- if (lru_cache_disabled())
+ if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
+ bh_lru_unlock();
return;
-
- bh_lru_lock();
+ }
b = this_cpu_ptr(&bh_lrus);
for (i = 0; i < BH_LRU_SIZE; i++) {
@@ -1271,6 +1365,10 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
check_irqs_on();
bh_lru_lock();
+ if (cpu_is_isolated(smp_processor_id())) {
+ bh_lru_unlock();
+ return NULL;
+ }
for (i = 0; i < BH_LRU_SIZE; i++) {
struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
@@ -1296,16 +1394,18 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
/*
* Perform a pagecache lookup for the matching buffer. If it's there, refresh
* it in the LRU and mark it as accessed. If it is not present then return
- * NULL
+ * NULL. Atomic context callers may also return NULL if the buffer is being
+ * migrated; similarly the page is not marked accessed either.
*/
-struct buffer_head *
-__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
+static struct buffer_head *
+find_get_block_common(struct block_device *bdev, sector_t block,
+ unsigned size, bool atomic)
{
struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
if (bh == NULL) {
/* __find_get_block_slow will mark the page accessed */
- bh = __find_get_block_slow(bdev, block);
+ bh = __find_get_block_slow(bdev, block, atomic);
if (bh)
bh_lru_install(bh);
} else
@@ -1313,70 +1413,106 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
return bh;
}
+
+struct buffer_head *
+__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
+{
+ return find_get_block_common(bdev, block, size, true);
+}
EXPORT_SYMBOL(__find_get_block);
-/*
- * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
- * which corresponds to the passed block_device, block and size. The
- * returned buffer has its reference count incremented.
- *
- * __getblk_gfp() will lock up the machine if grow_dev_page's
- * try_to_free_buffers() attempt is failing. FIXME, perhaps?
- */
+/* same as __find_get_block() but allows sleeping contexts */
struct buffer_head *
-__getblk_gfp(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp)
+__find_get_block_nonatomic(struct block_device *bdev, sector_t block,
+ unsigned size)
{
- struct buffer_head *bh = __find_get_block(bdev, block, size);
+ return find_get_block_common(bdev, block, size, false);
+}
+EXPORT_SYMBOL(__find_get_block_nonatomic);
- might_sleep();
- if (bh == NULL)
- bh = __getblk_slow(bdev, block, size, gfp);
- return bh;
+/**
+ * bdev_getblk - Get a buffer_head in a block device's buffer cache.
+ * @bdev: The block device.
+ * @block: The block number.
+ * @size: The size of buffer_heads for this @bdev.
+ * @gfp: The memory allocation flags to use.
+ *
+ * The returned buffer head has its reference count incremented, but is
+ * not locked. The caller should call brelse() when it has finished
+ * with the buffer. The buffer may not be uptodate. If needed, the
+ * caller can bring it uptodate either by reading it or overwriting it.
+ *
+ * Return: The buffer head, or NULL if memory could not be allocated.
+ */
+struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp)
+{
+ struct buffer_head *bh;
+
+ if (gfpflags_allow_blocking(gfp))
+ bh = __find_get_block_nonatomic(bdev, block, size);
+ else
+ bh = __find_get_block(bdev, block, size);
+
+ might_alloc(gfp);
+ if (bh)
+ return bh;
+
+ return __getblk_slow(bdev, block, size, gfp);
}
-EXPORT_SYMBOL(__getblk_gfp);
+EXPORT_SYMBOL(bdev_getblk);
/*
* Do async read-ahead on a buffer..
*/
void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
{
- struct buffer_head *bh = __getblk(bdev, block, size);
- if (likely(bh)) {
- ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
- brelse(bh);
- }
-}
-EXPORT_SYMBOL(__breadahead);
+ struct buffer_head *bh = bdev_getblk(bdev, block, size,
+ GFP_NOWAIT | __GFP_MOVABLE);
-void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
- gfp_t gfp)
-{
- struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
if (likely(bh)) {
- ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
+ bh_readahead(bh, REQ_RAHEAD);
brelse(bh);
}
}
-EXPORT_SYMBOL(__breadahead_gfp);
+EXPORT_SYMBOL(__breadahead);
/**
- * __bread_gfp() - reads a specified block and returns the bh
- * @bdev: the block_device to read from
- * @block: number of block
- * @size: size (in bytes) to read
- * @gfp: page allocation flag
- *
- * Reads a specified block, and returns buffer head that contains it.
- * The page cache can be allocated from non-movable area
- * not to prevent page migration if you set gfp to zero.
- * It returns NULL if the block was unreadable.
+ * __bread_gfp() - Read a block.
+ * @bdev: The block device to read from.
+ * @block: Block number in units of block size.
+ * @size: The block size of this device in bytes.
+ * @gfp: Not page allocation flags; see below.
+ *
+ * You are not expected to call this function. You should use one of
+ * sb_bread(), sb_bread_unmovable() or __bread().
+ *
+ * Read a specified block, and return the buffer head that refers to it.
+ * If @gfp is 0, the memory will be allocated using the block device's
+ * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be
+ * allocated from a movable area. Do not pass in a complete set of
+ * GFP flags.
+ *
+ * The returned buffer head has its refcount increased. The caller should
+ * call brelse() when it has finished with the buffer.
+ *
+ * Context: May sleep waiting for I/O.
+ * Return: NULL if the block was unreadable.
*/
-struct buffer_head *
-__bread_gfp(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp)
+struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp)
{
- struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
+ struct buffer_head *bh;
+
+ gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
+
+ /*
+ * Prefer looping in the allocator rather than here, at least that
+ * code knows what it's doing.
+ */
+ gfp |= __GFP_NOFAIL;
+
+ bh = bdev_getblk(bdev, block, size, gfp);
if (likely(bh) && !buffer_uptodate(bh))
bh = __bread_slow(bh);
@@ -1439,20 +1575,20 @@ void invalidate_bh_lrus_cpu(void)
bh_lru_unlock();
}
-void set_bh_page(struct buffer_head *bh,
- struct page *page, unsigned long offset)
+void folio_set_bh(struct buffer_head *bh, struct folio *folio,
+ unsigned long offset)
{
- bh->b_page = page;
- BUG_ON(offset >= PAGE_SIZE);
- if (PageHighMem(page))
+ bh->b_folio = folio;
+ BUG_ON(offset >= folio_size(folio));
+ if (folio_test_highmem(folio))
/*
* This catches illegal uses and preserves the offset:
*/
bh->b_data = (char *)(0 + offset);
else
- bh->b_data = page_address(page) + offset;
+ bh->b_data = folio_address(folio) + offset;
}
-EXPORT_SYMBOL(set_bh_page);
+EXPORT_SYMBOL(folio_set_bh);
/*
* Called when truncating a buffer on a page completely.
@@ -1465,58 +1601,53 @@ EXPORT_SYMBOL(set_bh_page);
static void discard_buffer(struct buffer_head * bh)
{
- unsigned long b_state, b_state_old;
+ unsigned long b_state;
lock_buffer(bh);
clear_buffer_dirty(bh);
bh->b_bdev = NULL;
- b_state = bh->b_state;
- for (;;) {
- b_state_old = cmpxchg(&bh->b_state, b_state,
- (b_state & ~BUFFER_FLAGS_DISCARD));
- if (b_state_old == b_state)
- break;
- b_state = b_state_old;
- }
+ b_state = READ_ONCE(bh->b_state);
+ do {
+ } while (!try_cmpxchg_relaxed(&bh->b_state, &b_state,
+ b_state & ~BUFFER_FLAGS_DISCARD));
unlock_buffer(bh);
}
/**
- * block_invalidatepage - invalidate part or all of a buffer-backed page
- *
- * @page: the page which is affected
+ * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
+ * @folio: The folio which is affected.
* @offset: start of the range to invalidate
* @length: length of the range to invalidate
*
- * block_invalidatepage() is called when all or part of the page has become
+ * block_invalidate_folio() is called when all or part of the folio has been
* invalidated by a truncate operation.
*
- * block_invalidatepage() does not have to release all buffers, but it must
+ * block_invalidate_folio() does not have to release all buffers, but it must
* ensure that no dirty buffer is left outside @offset and that no I/O
* is underway against any of the blocks which are outside the truncation
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
-void block_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
struct buffer_head *head, *bh, *next;
- unsigned int curr_off = 0;
- unsigned int stop = length + offset;
+ size_t curr_off = 0;
+ size_t stop = length + offset;
- BUG_ON(!PageLocked(page));
- if (!page_has_buffers(page))
- goto out;
+ BUG_ON(!folio_test_locked(folio));
/*
* Check for overflow
*/
- BUG_ON(stop > PAGE_SIZE || stop < length);
+ BUG_ON(stop > folio_size(folio) || stop < length);
+
+ head = folio_buffers(folio);
+ if (!head)
+ return;
- head = page_buffers(page);
bh = head;
do {
- unsigned int next_off = curr_off + bh->b_size;
+ size_t next_off = curr_off + bh->b_size;
next = bh->b_this_page;
/*
@@ -1535,29 +1666,29 @@ void block_invalidatepage(struct page *page, unsigned int offset,
} while (bh != head);
/*
- * We release buffers only if the entire page is being invalidated.
+ * We release buffers only if the entire folio is being invalidated.
* The get_block cached value has been unconditionally invalidated,
* so real IO is not possible anymore.
*/
- if (length == PAGE_SIZE)
- try_to_release_page(page, 0);
+ if (length == folio_size(folio))
+ filemap_release_folio(folio, 0);
out:
- return;
+ folio_clear_mappedtodisk(folio);
}
-EXPORT_SYMBOL(block_invalidatepage);
-
+EXPORT_SYMBOL(block_invalidate_folio);
/*
* We attach and possibly dirty the buffers atomically wrt
- * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
- * is already excluded via the page lock.
+ * block_dirty_folio() via i_private_lock. try_to_free_buffers
+ * is already excluded via the folio lock.
*/
-void create_empty_buffers(struct page *page,
- unsigned long blocksize, unsigned long b_state)
+struct buffer_head *create_empty_buffers(struct folio *folio,
+ unsigned long blocksize, unsigned long b_state)
{
struct buffer_head *bh, *head, *tail;
+ gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
- head = alloc_page_buffers(page, blocksize, true);
+ head = folio_alloc_buffers(folio, blocksize, gfp);
bh = head;
do {
bh->b_state |= b_state;
@@ -1566,19 +1697,21 @@ void create_empty_buffers(struct page *page,
} while (bh);
tail->b_this_page = head;
- spin_lock(&page->mapping->private_lock);
- if (PageUptodate(page) || PageDirty(page)) {
+ spin_lock(&folio->mapping->i_private_lock);
+ if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
bh = head;
do {
- if (PageDirty(page))
+ if (folio_test_dirty(folio))
set_buffer_dirty(bh);
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
set_buffer_uptodate(bh);
bh = bh->b_this_page;
} while (bh != head);
}
- attach_page_private(page, head);
- spin_unlock(&page->mapping->private_lock);
+ folio_attach_private(folio, head);
+ spin_unlock(&folio->mapping->i_private_lock);
+
+ return head;
}
EXPORT_SYMBOL(create_empty_buffers);
@@ -1604,34 +1737,34 @@ EXPORT_SYMBOL(create_empty_buffers);
*/
void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
{
- struct inode *bd_inode = bdev->bd_inode;
- struct address_space *bd_mapping = bd_inode->i_mapping;
- struct pagevec pvec;
- pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
+ struct address_space *bd_mapping = bdev->bd_mapping;
+ const int blkbits = bd_mapping->host->i_blkbits;
+ struct folio_batch fbatch;
+ pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
pgoff_t end;
int i, count;
struct buffer_head *bh;
struct buffer_head *head;
- end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
- pagevec_init(&pvec);
- while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) {
- count = pagevec_count(&pvec);
+ end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
+ folio_batch_init(&fbatch);
+ while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
+ count = folio_batch_count(&fbatch);
for (i = 0; i < count; i++) {
- struct page *page = pvec.pages[i];
+ struct folio *folio = fbatch.folios[i];
- if (!page_has_buffers(page))
+ if (!folio_buffers(folio))
continue;
/*
- * We use page lock instead of bd_mapping->private_lock
+ * We use folio lock instead of bd_mapping->i_private_lock
* to pin buffers here since we can afford to sleep and
* it scales better than a global spinlock lock.
*/
- lock_page(page);
- /* Recheck when the page is locked which pins bhs */
- if (!page_has_buffers(page))
+ folio_lock(folio);
+ /* Recheck when the folio is locked which pins bhs */
+ head = folio_buffers(folio);
+ if (!head)
goto unlock_page;
- head = page_buffers(page);
bh = head;
do {
if (!buffer_mapped(bh) || (bh->b_blocknr < block))
@@ -1645,9 +1778,9 @@ next:
bh = bh->b_this_page;
} while (bh != head);
unlock_page:
- unlock_page(page);
+ folio_unlock(folio);
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
/* End of range already reached? */
if (index > end || !index)
@@ -1656,27 +1789,19 @@ unlock_page:
}
EXPORT_SYMBOL(clean_bdev_aliases);
-/*
- * Size is a power-of-two in the range 512..PAGE_SIZE,
- * and the case we care about most is PAGE_SIZE.
- *
- * So this *could* possibly be written with those
- * constraints in mind (relevant mostly if some
- * architecture has a slow bit-scan instruction)
- */
-static inline int block_size_bits(unsigned int blocksize)
+static struct buffer_head *folio_create_buffers(struct folio *folio,
+ struct inode *inode,
+ unsigned int b_state)
{
- return ilog2(blocksize);
-}
+ struct buffer_head *bh;
-static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
-{
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
- if (!page_has_buffers(page))
- create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
- b_state);
- return page_buffers(page);
+ bh = folio_buffers(folio);
+ if (!bh)
+ bh = create_empty_buffers(folio,
+ 1 << READ_ONCE(inode->i_blkbits), b_state);
+ return bh;
}
/*
@@ -1693,52 +1818,50 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
*/
/*
- * While block_write_full_page is writing back the dirty buffers under
+ * While block_write_full_folio is writing back the dirty buffers under
* the page lock, whoever dirtied the buffers may decide to clean them
* again at any time. We handle that by only looking at the buffer
* state inside lock_buffer().
*
- * If block_write_full_page() is called for regular writeback
+ * If block_write_full_folio() is called for regular writeback
* (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
* locked buffer. This only can happen if someone has written the buffer
* directly, with submit_bh(). At the address_space level PageWriteback
* prevents this contention from occurring.
*
- * If block_write_full_page() is called with wbc->sync_mode ==
+ * If block_write_full_folio() is called with wbc->sync_mode ==
* WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
* causes the writes to be flagged as synchronous writes.
*/
-int __block_write_full_page(struct inode *inode, struct page *page,
- get_block_t *get_block, struct writeback_control *wbc,
- bh_end_io_t *handler)
+int __block_write_full_folio(struct inode *inode, struct folio *folio,
+ get_block_t *get_block, struct writeback_control *wbc)
{
int err;
sector_t block;
sector_t last_block;
struct buffer_head *bh, *head;
- unsigned int blocksize, bbits;
+ size_t blocksize;
int nr_underway = 0;
- int write_flags = wbc_to_write_flags(wbc);
+ blk_opf_t write_flags = wbc_to_write_flags(wbc);
- head = create_page_buffers(page, inode,
- (1 << BH_Dirty)|(1 << BH_Uptodate));
+ head = folio_create_buffers(folio, inode,
+ (1 << BH_Dirty) | (1 << BH_Uptodate));
/*
- * Be very careful. We have no exclusion from __set_page_dirty_buffers
+ * Be very careful. We have no exclusion from block_dirty_folio
* here, and the (potentially unmapped) buffers may become dirty at
* any time. If a buffer becomes dirty here after we've inspected it
- * then we just miss that fact, and the page stays dirty.
+ * then we just miss that fact, and the folio stays dirty.
*
- * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
+ * Buffers outside i_size may be dirtied by block_dirty_folio;
* handle that here by just cleaning them.
*/
bh = head;
blocksize = bh->b_size;
- bbits = block_size_bits(blocksize);
- block = (sector_t)page->index << (PAGE_SHIFT - bbits);
- last_block = (i_size_read(inode) - 1) >> bbits;
+ block = div_u64(folio_pos(folio), blocksize);
+ last_block = div_u64(i_size_read(inode) - 1, blocksize);
/*
* Get all the dirty buffers mapped to disk addresses and
@@ -1748,11 +1871,11 @@ int __block_write_full_page(struct inode *inode, struct page *page,
if (block > last_block) {
/*
* mapped buffers outside i_size will occur, because
- * this page can be outside i_size when there is a
+ * this folio can be outside i_size when there is a
* truncate in progress.
*/
/*
- * The buffer was zeroed by block_write_full_page()
+ * The buffer was zeroed by block_write_full_folio()
*/
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
@@ -1778,7 +1901,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
continue;
/*
* If it's a fully non-blocking write attempt and we cannot
- * lock the buffer then redirty the page. Note that this can
+ * lock the buffer then redirty the folio. Note that this can
* potentially cause a busy-wait loop from writeback threads
* and kswapd activity, but those code paths have their own
* higher-level throttling.
@@ -1786,46 +1909,47 @@ int __block_write_full_page(struct inode *inode, struct page *page,
if (wbc->sync_mode != WB_SYNC_NONE) {
lock_buffer(bh);
} else if (!trylock_buffer(bh)) {
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
continue;
}
if (test_clear_buffer_dirty(bh)) {
- mark_buffer_async_write_endio(bh, handler);
+ mark_buffer_async_write_endio(bh,
+ end_buffer_async_write);
} else {
unlock_buffer(bh);
}
} while ((bh = bh->b_this_page) != head);
/*
- * The page and its buffers are protected by PageWriteback(), so we can
- * drop the bh refcounts early.
+ * The folio and its buffers are protected by the writeback flag,
+ * so we can drop the bh refcounts early.
*/
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
+ BUG_ON(folio_test_writeback(folio));
+ folio_start_writeback(folio);
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
- submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
- inode->i_write_hint, wbc);
+ submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
+ inode->i_write_hint, wbc);
nr_underway++;
}
bh = next;
} while (bh != head);
- unlock_page(page);
+ folio_unlock(folio);
err = 0;
done:
if (nr_underway == 0) {
/*
- * The page was marked dirty, but the buffers were
+ * The folio was marked dirty, but the buffers were
* clean. Someone wrote them back by hand with
- * ll_rw_block/submit_bh. A rare case.
+ * write_dirty_buffer/submit_bh. A rare case.
*/
- end_page_writeback(page);
+ folio_end_writeback(folio);
/*
- * The page and buffer_heads can be released at any time from
+ * The folio and buffer_heads can be released at any time from
* here on.
*/
}
@@ -1836,7 +1960,7 @@ recover:
* ENOSPC, or some other error. We may already have added some
* blocks to the file, so we need to write these out to avoid
* exposing stale data.
- * The page is currently locked and not marked for writeback
+ * The folio is currently locked and not marked for writeback
*/
bh = head;
/* Recovery: lock and submit the mapped buffers */
@@ -1844,62 +1968,63 @@ recover:
if (buffer_mapped(bh) && buffer_dirty(bh) &&
!buffer_delay(bh)) {
lock_buffer(bh);
- mark_buffer_async_write_endio(bh, handler);
+ mark_buffer_async_write_endio(bh,
+ end_buffer_async_write);
} else {
/*
* The buffer may have been set dirty during
- * attachment to a dirty page.
+ * attachment to a dirty folio.
*/
clear_buffer_dirty(bh);
}
} while ((bh = bh->b_this_page) != head);
- SetPageError(page);
- BUG_ON(PageWriteback(page));
- mapping_set_error(page->mapping, err);
- set_page_writeback(page);
+ BUG_ON(folio_test_writeback(folio));
+ mapping_set_error(folio->mapping, err);
+ folio_start_writeback(folio);
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
clear_buffer_dirty(bh);
- submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
- inode->i_write_hint, wbc);
+ submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
+ inode->i_write_hint, wbc);
nr_underway++;
}
bh = next;
} while (bh != head);
- unlock_page(page);
+ folio_unlock(folio);
goto done;
}
-EXPORT_SYMBOL(__block_write_full_page);
+EXPORT_SYMBOL(__block_write_full_folio);
/*
- * If a page has any new buffers, zero them out here, and mark them uptodate
+ * If a folio has any new buffers, zero them out here, and mark them uptodate
* and dirty so they'll be written out (in order to prevent uninitialised
* block data from leaking). And clear the new bit.
*/
-void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
+void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
{
- unsigned int block_start, block_end;
+ size_t block_start, block_end;
struct buffer_head *head, *bh;
- BUG_ON(!PageLocked(page));
- if (!page_has_buffers(page))
+ BUG_ON(!folio_test_locked(folio));
+ head = folio_buffers(folio);
+ if (!head)
return;
- bh = head = page_buffers(page);
+ bh = head;
block_start = 0;
do {
block_end = block_start + bh->b_size;
if (buffer_new(bh)) {
if (block_end > from && block_start < to) {
- if (!PageUptodate(page)) {
- unsigned start, size;
+ if (!folio_test_uptodate(folio)) {
+ size_t start, xend;
start = max(from, block_start);
- size = min(to, block_end) - start;
+ xend = min(to, block_end);
- zero_user(page, start, size);
+ folio_zero_segment(folio, start, xend);
set_buffer_uptodate(bh);
}
@@ -1912,13 +2037,13 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
bh = bh->b_this_page;
} while (bh != head);
}
-EXPORT_SYMBOL(page_zero_new_buffers);
+EXPORT_SYMBOL(folio_zero_new_buffers);
-static void
+static int
iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
const struct iomap *iomap)
{
- loff_t offset = block << inode->i_blkbits;
+ loff_t offset = (loff_t)block << inode->i_blkbits;
bh->b_bdev = iomap->bdev;
@@ -1928,7 +2053,8 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
* current block, then do not map the buffer and let the caller
* handle it.
*/
- BUG_ON(offset >= iomap->offset + iomap->length);
+ if (offset >= iomap->offset + iomap->length)
+ return -EIO;
switch (iomap->type) {
case IOMAP_HOLE:
@@ -1940,7 +2066,7 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
if (!buffer_uptodate(bh) ||
(offset >= i_size_read(inode)))
set_buffer_new(bh);
- break;
+ return 0;
case IOMAP_DELALLOC:
if (!buffer_uptodate(bh) ||
(offset >= i_size_read(inode)))
@@ -1948,7 +2074,7 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
set_buffer_uptodate(bh);
set_buffer_mapped(bh);
set_buffer_delay(bh);
- break;
+ return 0;
case IOMAP_UNWRITTEN:
/*
* For unwritten regions, we always need to ensure that regions
@@ -1960,43 +2086,52 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
fallthrough;
case IOMAP_MAPPED:
if ((iomap->flags & IOMAP_F_NEW) ||
- offset >= i_size_read(inode))
+ offset >= i_size_read(inode)) {
+ /*
+ * This can happen if truncating the block device races
+ * with the check in the caller as i_size updates on
+ * block devices aren't synchronized by i_rwsem for
+ * block devices.
+ */
+ if (S_ISBLK(inode->i_mode))
+ return -EIO;
set_buffer_new(bh);
+ }
bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
inode->i_blkbits;
set_buffer_mapped(bh);
- break;
+ return 0;
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
}
}
-int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
+int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block, const struct iomap *iomap)
{
- unsigned from = pos & (PAGE_SIZE - 1);
- unsigned to = from + len;
- struct inode *inode = page->mapping->host;
- unsigned block_start, block_end;
+ size_t from = offset_in_folio(folio, pos);
+ size_t to = from + len;
+ struct inode *inode = folio->mapping->host;
+ size_t block_start, block_end;
sector_t block;
int err = 0;
- unsigned blocksize, bbits;
+ size_t blocksize;
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
- BUG_ON(!PageLocked(page));
- BUG_ON(from > PAGE_SIZE);
- BUG_ON(to > PAGE_SIZE);
+ BUG_ON(!folio_test_locked(folio));
+ BUG_ON(to > folio_size(folio));
BUG_ON(from > to);
- head = create_page_buffers(page, inode, 0);
+ head = folio_create_buffers(folio, inode, 0);
blocksize = head->b_size;
- bbits = block_size_bits(blocksize);
-
- block = (sector_t)page->index << (PAGE_SHIFT - bbits);
+ block = div_u64(folio_pos(folio), blocksize);
- for(bh = head, block_start = 0; bh != head || !block_start;
+ for (bh = head, block_start = 0; bh != head || !block_start;
block++, block_start=block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
}
@@ -2006,30 +2141,29 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
- if (get_block) {
+ if (get_block)
err = get_block(inode, block, bh, 1);
- if (err)
- break;
- } else {
- iomap_to_bh(inode, block, bh, iomap);
- }
+ else
+ err = iomap_to_bh(inode, block, bh, iomap);
+ if (err)
+ break;
if (buffer_new(bh)) {
clean_bdev_bh_alias(bh);
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
clear_buffer_new(bh);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from)
- zero_user_segments(page,
+ folio_zero_segments(folio,
to, block_end,
block_start, from);
continue;
}
}
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
continue;
@@ -2037,7 +2171,7 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
- ll_rw_block(REQ_OP_READ, 0, 1, &bh);
+ bh_read_nowait(bh, 0);
*wait_bh++=bh;
}
}
@@ -2050,26 +2184,27 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
err = -EIO;
}
if (unlikely(err))
- page_zero_new_buffers(page, from, to);
+ folio_zero_new_buffers(folio, from, to);
return err;
}
-int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block)
{
- return __block_write_begin_int(page, pos, len, get_block, NULL);
+ return __block_write_begin_int(folio, pos, len, get_block, NULL);
}
EXPORT_SYMBOL(__block_write_begin);
-static int __block_commit_write(struct inode *inode, struct page *page,
- unsigned from, unsigned to)
+void block_commit_write(struct folio *folio, size_t from, size_t to)
{
- unsigned block_start, block_end;
- int partial = 0;
+ size_t block_start, block_end;
+ bool partial = false;
unsigned blocksize;
struct buffer_head *bh, *head;
- bh = head = page_buffers(page);
+ bh = head = folio_buffers(folio);
+ if (!bh)
+ return;
blocksize = bh->b_size;
block_start = 0;
@@ -2077,7 +2212,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (!buffer_uptodate(bh))
- partial = 1;
+ partial = true;
} else {
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
@@ -2091,14 +2226,14 @@ static int __block_commit_write(struct inode *inode, struct page *page,
/*
* If this is a partial write which happened to make all buffers
- * uptodate then we can optimize away a bogus readpage() for
- * the next read(). Here we 'discover' whether the page went
+ * uptodate then we can optimize away a bogus read_folio() for
+ * the next read(). Here we 'discover' whether the folio went
* uptodate as a result of this (potentially partial) write.
*/
if (!partial)
- SetPageUptodate(page);
- return 0;
+ folio_mark_uptodate(folio);
}
+EXPORT_SYMBOL(block_commit_write);
/*
* block_write_begin takes care of the basic task of block allocation and
@@ -2107,79 +2242,76 @@ static int __block_commit_write(struct inode *inode, struct page *page,
* The filesystem needs to handle block truncation upon failure.
*/
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- unsigned flags, struct page **pagep, get_block_t *get_block)
+ struct folio **foliop, get_block_t *get_block)
{
pgoff_t index = pos >> PAGE_SHIFT;
- struct page *page;
+ struct folio *folio;
int status;
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- status = __block_write_begin(page, pos, len, get_block);
+ status = __block_write_begin_int(folio, pos, len, get_block, NULL);
if (unlikely(status)) {
- unlock_page(page);
- put_page(page);
- page = NULL;
+ folio_unlock(folio);
+ folio_put(folio);
+ folio = NULL;
}
- *pagep = page;
+ *foliop = folio;
return status;
}
EXPORT_SYMBOL(block_write_begin);
-int block_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+int block_write_end(loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio)
{
- struct inode *inode = mapping->host;
- unsigned start;
-
- start = pos & (PAGE_SIZE - 1);
+ size_t start = pos - folio_pos(folio);
if (unlikely(copied < len)) {
/*
- * The buffers that were written will now be uptodate, so we
- * don't have to worry about a readpage reading them and
- * overwriting a partial write. However if we have encountered
- * a short write and only partially written into a buffer, it
- * will not be marked uptodate, so a readpage might come in and
- * destroy our partial write.
+ * The buffers that were written will now be uptodate, so
+ * we don't have to worry about a read_folio reading them
+ * and overwriting a partial write. However if we have
+ * encountered a short write and only partially written
+ * into a buffer, it will not be marked uptodate, so a
+ * read_folio might come in and destroy our partial write.
*
* Do the simplest thing, and just treat any short write to a
- * non uptodate page as a zero-length write, and force the
+ * non uptodate folio as a zero-length write, and force the
* caller to redo the whole thing.
*/
- if (!PageUptodate(page))
+ if (!folio_test_uptodate(folio))
copied = 0;
- page_zero_new_buffers(page, start+copied, start+len);
+ folio_zero_new_buffers(folio, start+copied, start+len);
}
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
/* This could be a short (even 0-length) commit */
- __block_commit_write(inode, page, start, start+copied);
+ block_commit_write(folio, start, start + copied);
return copied;
}
EXPORT_SYMBOL(block_write_end);
-int generic_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+int generic_write_end(const struct kiocb *iocb, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
loff_t old_size = inode->i_size;
bool i_size_changed = false;
- copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+ copied = block_write_end(pos, len, copied, folio);
/*
* No need to use i_size_read() here, the i_size cannot change under us
* because we hold i_rwsem.
*
- * But it's important to update i_size while still holding page lock:
+ * But it's important to update i_size while still holding folio lock:
* page writeout could otherwise come in and zero beyond i_size.
*/
if (pos + copied > inode->i_size) {
@@ -2187,8 +2319,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
i_size_changed = true;
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
@@ -2205,29 +2337,27 @@ int generic_write_end(struct file *file, struct address_space *mapping,
EXPORT_SYMBOL(generic_write_end);
/*
- * block_is_partially_uptodate checks whether buffers within a page are
+ * block_is_partially_uptodate checks whether buffers within a folio are
* uptodate or not.
*
- * Returns true if all buffers which correspond to a file portion
- * we want to read are uptodate.
+ * Returns true if all buffers which correspond to the specified part
+ * of the folio are uptodate.
*/
-int block_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count)
+bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
{
unsigned block_start, block_end, blocksize;
unsigned to;
struct buffer_head *bh, *head;
- int ret = 1;
+ bool ret = true;
- if (!page_has_buffers(page))
- return 0;
-
- head = page_buffers(page);
+ head = folio_buffers(folio);
+ if (!head)
+ return false;
blocksize = head->b_size;
- to = min_t(unsigned, PAGE_SIZE - from, count);
+ to = min_t(unsigned, folio_size(folio) - from, count);
to = from + to;
- if (from < blocksize && to > PAGE_SIZE - blocksize)
- return 0;
+ if (from < blocksize && to > folio_size(folio) - blocksize)
+ return false;
bh = head;
block_start = 0;
@@ -2235,7 +2365,7 @@ int block_is_partially_uptodate(struct page *page, unsigned long from,
block_end = block_start + blocksize;
if (block_end > from && block_start < to) {
if (!buffer_uptodate(bh)) {
- ret = 0;
+ ret = false;
break;
}
if (block_end >= to)
@@ -2250,30 +2380,32 @@ int block_is_partially_uptodate(struct page *page, unsigned long from,
EXPORT_SYMBOL(block_is_partially_uptodate);
/*
- * Generic "read page" function for block devices that have the normal
+ * Generic "read_folio" function for block devices that have the normal
* get_block functionality. This is most of the block device filesystems.
- * Reads the page asynchronously --- the unlock_buffer() and
+ * Reads the folio asynchronously --- the unlock_buffer() and
* set/clear_buffer_uptodate() functions propagate buffer state into the
- * page struct once IO has completed.
+ * folio once IO has completed.
*/
-int block_read_full_page(struct page *page, get_block_t *get_block)
+int block_read_full_folio(struct folio *folio, get_block_t *get_block)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
sector_t iblock, lblock;
- struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
- unsigned int blocksize, bbits;
- int nr, i;
+ struct buffer_head *bh, *head, *prev = NULL;
+ size_t blocksize;
int fully_mapped = 1;
+ bool page_error = false;
+ loff_t limit = i_size_read(inode);
- head = create_page_buffers(page, inode, 0);
+ /* This is needed for ext4. */
+ if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
+ limit = inode->i_sb->s_maxbytes;
+
+ head = folio_create_buffers(folio, inode, 0);
blocksize = head->b_size;
- bbits = block_size_bits(blocksize);
- iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
- lblock = (i_size_read(inode)+blocksize-1) >> bbits;
+ iblock = div_u64(folio_pos(folio), blocksize);
+ lblock = div_u64(limit + blocksize - 1, blocksize);
bh = head;
- nr = 0;
- i = 0;
do {
if (buffer_uptodate(bh))
@@ -2287,10 +2419,11 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
- SetPageError(page);
+ page_error = true;
}
if (!buffer_mapped(bh)) {
- zero_user(page, i * blocksize, blocksize);
+ folio_zero_range(folio, bh_offset(bh),
+ blocksize);
if (!err)
set_buffer_uptodate(bh);
continue;
@@ -2302,45 +2435,36 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
if (buffer_uptodate(bh))
continue;
}
- arr[nr++] = bh;
- } while (i++, iblock++, (bh = bh->b_this_page) != head);
-
- if (fully_mapped)
- SetPageMappedToDisk(page);
-
- if (!nr) {
- /*
- * All buffers are uptodate - we can set the page uptodate
- * as well. But not if get_block() returned an error.
- */
- if (!PageError(page))
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
- }
- /* Stage two: lock the buffers */
- for (i = 0; i < nr; i++) {
- bh = arr[i];
lock_buffer(bh);
+ if (buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ continue;
+ }
+
mark_buffer_async_read(bh);
- }
+ if (prev)
+ submit_bh(REQ_OP_READ, prev);
+ prev = bh;
+ } while (iblock++, (bh = bh->b_this_page) != head);
+
+ if (fully_mapped)
+ folio_set_mappedtodisk(folio);
/*
- * Stage 3: start the IO. Check for uptodateness
- * inside the buffer lock in case another process reading
- * the underlying blockdev brought it uptodate (the sct fix).
+ * All buffers are uptodate or get_block() returned an error
+ * when trying to map them - we must finish the read because
+ * end_buffer_async_read() will never be called on any buffer
+ * in this folio.
*/
- for (i = 0; i < nr; i++) {
- bh = arr[i];
- if (buffer_uptodate(bh))
- end_buffer_async_read(bh, 1);
- else
- submit_bh(REQ_OP_READ, 0, bh);
- }
+ if (prev)
+ submit_bh(REQ_OP_READ, prev);
+ else
+ folio_end_read(folio, !page_error);
+
return 0;
}
-EXPORT_SYMBOL(block_read_full_page);
+EXPORT_SYMBOL(block_read_full_folio);
/* utility function for filesystems that need to do work on expanding
* truncates. Uses filesystem pagecache writes to allow the filesystem to
@@ -2349,20 +2473,20 @@ EXPORT_SYMBOL(block_read_full_page);
int generic_cont_expand_simple(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page;
- void *fsdata;
+ const struct address_space_operations *aops = mapping->a_ops;
+ struct folio *folio;
+ void *fsdata = NULL;
int err;
err = inode_newsize_ok(inode, size);
if (err)
goto out;
- err = pagecache_write_begin(NULL, mapping, size, 0,
- AOP_FLAG_CONT_EXPAND, &page, &fsdata);
+ err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
if (err)
goto out;
- err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
+ err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
BUG_ON(err > 0);
out:
@@ -2370,13 +2494,15 @@ out:
}
EXPORT_SYMBOL(generic_cont_expand_simple);
-static int cont_expand_zero(struct file *file, struct address_space *mapping,
+static int cont_expand_zero(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, loff_t *bytes)
{
struct inode *inode = mapping->host;
+ const struct address_space_operations *aops = mapping->a_ops;
unsigned int blocksize = i_blocksize(inode);
- struct page *page;
- void *fsdata;
+ struct folio *folio;
+ void *fsdata = NULL;
pgoff_t index, curidx;
loff_t curpos;
unsigned zerofrom, offset, len;
@@ -2393,13 +2519,13 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
}
len = PAGE_SIZE - zerofrom;
- err = pagecache_write_begin(file, mapping, curpos, len, 0,
- &page, &fsdata);
+ err = aops->write_begin(iocb, mapping, curpos, len,
+ &folio, &fsdata);
if (err)
goto out;
- zero_user(page, zerofrom, len);
- err = pagecache_write_end(file, mapping, curpos, len, len,
- page, fsdata);
+ folio_zero_range(folio, offset_in_folio(folio, curpos), len);
+ err = aops->write_end(iocb, mapping, curpos, len, len,
+ folio, fsdata);
if (err < 0)
goto out;
BUG_ON(err != len);
@@ -2426,13 +2552,13 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
}
len = offset - zerofrom;
- err = pagecache_write_begin(file, mapping, curpos, len, 0,
- &page, &fsdata);
+ err = aops->write_begin(iocb, mapping, curpos, len,
+ &folio, &fsdata);
if (err)
goto out;
- zero_user(page, zerofrom, len);
- err = pagecache_write_end(file, mapping, curpos, len, len,
- page, fsdata);
+ folio_zero_range(folio, offset_in_folio(folio, curpos), len);
+ err = aops->write_end(iocb, mapping, curpos, len, len,
+ folio, fsdata);
if (err < 0)
goto out;
BUG_ON(err != len);
@@ -2446,17 +2572,16 @@ out:
* For moronic filesystems that do not allow holes in file.
* We may have to extend the file.
*/
-int cont_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata,
- get_block_t *get_block, loff_t *bytes)
+int cont_write_begin(const struct kiocb *iocb, struct address_space *mapping,
+ loff_t pos, unsigned len, struct folio **foliop,
+ void **fsdata, get_block_t *get_block, loff_t *bytes)
{
struct inode *inode = mapping->host;
unsigned int blocksize = i_blocksize(inode);
unsigned int zerofrom;
int err;
- err = cont_expand_zero(file, mapping, pos, bytes);
+ err = cont_expand_zero(iocb, mapping, pos, bytes);
if (err)
return err;
@@ -2466,18 +2591,10 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
(*bytes)++;
}
- return block_write_begin(mapping, pos, len, flags, pagep, get_block);
+ return block_write_begin(mapping, pos, len, foliop, get_block);
}
EXPORT_SYMBOL(cont_write_begin);
-int block_commit_write(struct page *page, unsigned from, unsigned to)
-{
- struct inode *inode = page->mapping->host;
- __block_commit_write(inode,page,from,to);
- return 0;
-}
-EXPORT_SYMBOL(block_commit_write);
-
/*
* block_page_mkwrite() is not allowed to change the file size as it gets
* called from a page fault handler when a page is first dirtied. Hence we must
@@ -2486,7 +2603,7 @@ EXPORT_SYMBOL(block_commit_write);
* holes and correct delalloc and unwritten extent mapping on filesystems that
* support these features.
*
- * We are not allowed to take the i_mutex here so we have to play games to
+ * We are not allowed to take the i_rwsem here so we have to play games to
* protect against truncate races as the page could now be beyond EOF. Because
* truncate writes the inode size before removing pages, once we have the
* page lock we can determine safely if the page is beyond EOF. If it is not
@@ -2499,410 +2616,73 @@ EXPORT_SYMBOL(block_commit_write);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vma->vm_file);
unsigned long end;
loff_t size;
int ret;
- lock_page(page);
+ folio_lock(folio);
size = i_size_read(inode);
- if ((page->mapping != inode->i_mapping) ||
- (page_offset(page) > size)) {
+ if ((folio->mapping != inode->i_mapping) ||
+ (folio_pos(folio) >= size)) {
/* We overload EFAULT to mean page got truncated */
ret = -EFAULT;
goto out_unlock;
}
- /* page is wholly or partially inside EOF */
- if (((page->index + 1) << PAGE_SHIFT) > size)
- end = size & ~PAGE_MASK;
- else
- end = PAGE_SIZE;
-
- ret = __block_write_begin(page, 0, end, get_block);
- if (!ret)
- ret = block_commit_write(page, 0, end);
+ end = folio_size(folio);
+ /* folio is wholly or partially inside EOF */
+ if (folio_pos(folio) + end > size)
+ end = size - folio_pos(folio);
- if (unlikely(ret < 0))
+ ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
+ if (unlikely(ret))
goto out_unlock;
- set_page_dirty(page);
- wait_for_stable_page(page);
- return 0;
-out_unlock:
- unlock_page(page);
- return ret;
-}
-EXPORT_SYMBOL(block_page_mkwrite);
-/*
- * nobh_write_begin()'s prereads are special: the buffer_heads are freed
- * immediately, while under the page lock. So it needs a special end_io
- * handler which does not touch the bh after unlocking it.
- */
-static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
-{
- __end_buffer_read_notouch(bh, uptodate);
-}
-
-/*
- * Attach the singly-linked list of buffers created by nobh_write_begin, to
- * the page (converting it to circular linked list and taking care of page
- * dirty races).
- */
-static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
-{
- struct buffer_head *bh;
-
- BUG_ON(!PageLocked(page));
-
- spin_lock(&page->mapping->private_lock);
- bh = head;
- do {
- if (PageDirty(page))
- set_buffer_dirty(bh);
- if (!bh->b_this_page)
- bh->b_this_page = head;
- bh = bh->b_this_page;
- } while (bh != head);
- attach_page_private(page, head);
- spin_unlock(&page->mapping->private_lock);
-}
-
-/*
- * On entry, the page is fully not uptodate.
- * On exit the page is fully uptodate in the areas outside (from,to)
- * The filesystem needs to handle block truncation upon failure.
- */
-int nobh_write_begin(struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata,
- get_block_t *get_block)
-{
- struct inode *inode = mapping->host;
- const unsigned blkbits = inode->i_blkbits;
- const unsigned blocksize = 1 << blkbits;
- struct buffer_head *head, *bh;
- struct page *page;
- pgoff_t index;
- unsigned from, to;
- unsigned block_in_page;
- unsigned block_start, block_end;
- sector_t block_in_file;
- int nr_reads = 0;
- int ret = 0;
- int is_mapped_to_disk = 1;
-
- index = pos >> PAGE_SHIFT;
- from = pos & (PAGE_SIZE - 1);
- to = from + len;
-
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
- *pagep = page;
- *fsdata = NULL;
-
- if (page_has_buffers(page)) {
- ret = __block_write_begin(page, pos, len, get_block);
- if (unlikely(ret))
- goto out_release;
- return ret;
- }
-
- if (PageMappedToDisk(page))
- return 0;
-
- /*
- * Allocate buffers so that we can keep track of state, and potentially
- * attach them to the page if an error occurs. In the common case of
- * no error, they will just be freed again without ever being attached
- * to the page (which is all OK, because we're under the page lock).
- *
- * Be careful: the buffer linked list is a NULL terminated one, rather
- * than the circular one we're used to.
- */
- head = alloc_page_buffers(page, blocksize, false);
- if (!head) {
- ret = -ENOMEM;
- goto out_release;
- }
-
- block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
-
- /*
- * We loop across all blocks in the page, whether or not they are
- * part of the affected region. This is so we can discover if the
- * page is fully mapped-to-disk.
- */
- for (block_start = 0, block_in_page = 0, bh = head;
- block_start < PAGE_SIZE;
- block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
- int create;
-
- block_end = block_start + blocksize;
- bh->b_state = 0;
- create = 1;
- if (block_start >= to)
- create = 0;
- ret = get_block(inode, block_in_file + block_in_page,
- bh, create);
- if (ret)
- goto failed;
- if (!buffer_mapped(bh))
- is_mapped_to_disk = 0;
- if (buffer_new(bh))
- clean_bdev_bh_alias(bh);
- if (PageUptodate(page)) {
- set_buffer_uptodate(bh);
- continue;
- }
- if (buffer_new(bh) || !buffer_mapped(bh)) {
- zero_user_segments(page, block_start, from,
- to, block_end);
- continue;
- }
- if (buffer_uptodate(bh))
- continue; /* reiserfs does this */
- if (block_start < from || block_end > to) {
- lock_buffer(bh);
- bh->b_end_io = end_buffer_read_nobh;
- submit_bh(REQ_OP_READ, 0, bh);
- nr_reads++;
- }
- }
-
- if (nr_reads) {
- /*
- * The page is locked, so these buffers are protected from
- * any VM or truncate activity. Hence we don't need to care
- * for the buffer_head refcounts.
- */
- for (bh = head; bh; bh = bh->b_this_page) {
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- ret = -EIO;
- }
- if (ret)
- goto failed;
- }
-
- if (is_mapped_to_disk)
- SetPageMappedToDisk(page);
-
- *fsdata = head; /* to be released by nobh_write_end */
+ block_commit_write(folio, 0, end);
+ folio_mark_dirty(folio);
+ folio_wait_stable(folio);
return 0;
-
-failed:
- BUG_ON(!ret);
- /*
- * Error recovery is a bit difficult. We need to zero out blocks that
- * were newly allocated, and dirty them to ensure they get written out.
- * Buffers need to be attached to the page at this point, otherwise
- * the handling of potential IO errors during writeout would be hard
- * (could try doing synchronous writeout, but what if that fails too?)
- */
- attach_nobh_buffers(page, head);
- page_zero_new_buffers(page, from, to);
-
-out_release:
- unlock_page(page);
- put_page(page);
- *pagep = NULL;
-
- return ret;
-}
-EXPORT_SYMBOL(nobh_write_begin);
-
-int nobh_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- struct inode *inode = page->mapping->host;
- struct buffer_head *head = fsdata;
- struct buffer_head *bh;
- BUG_ON(fsdata != NULL && page_has_buffers(page));
-
- if (unlikely(copied < len) && head)
- attach_nobh_buffers(page, head);
- if (page_has_buffers(page))
- return generic_write_end(file, mapping, pos, len,
- copied, page, fsdata);
-
- SetPageUptodate(page);
- set_page_dirty(page);
- if (pos+copied > inode->i_size) {
- i_size_write(inode, pos+copied);
- mark_inode_dirty(inode);
- }
-
- unlock_page(page);
- put_page(page);
-
- while (head) {
- bh = head;
- head = head->b_this_page;
- free_buffer_head(bh);
- }
-
- return copied;
-}
-EXPORT_SYMBOL(nobh_write_end);
-
-/*
- * nobh_writepage() - based on block_full_write_page() except
- * that it tries to operate without attaching bufferheads to
- * the page.
- */
-int nobh_writepage(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc)
-{
- struct inode * const inode = page->mapping->host;
- loff_t i_size = i_size_read(inode);
- const pgoff_t end_index = i_size >> PAGE_SHIFT;
- unsigned offset;
- int ret;
-
- /* Is the page fully inside i_size? */
- if (page->index < end_index)
- goto out;
-
- /* Is the page fully outside i_size? (truncate in progress) */
- offset = i_size & (PAGE_SIZE-1);
- if (page->index >= end_index+1 || !offset) {
- unlock_page(page);
- return 0; /* don't care */
- }
-
- /*
- * The page straddles i_size. It must be zeroed out on each and every
- * writepage invocation because it may be mmapped. "A file is mapped
- * in multiples of the page size. For a file that is not a multiple of
- * the page size, the remaining memory is zeroed when mapped, and
- * writes to that region are not written out to the file."
- */
- zero_user_segment(page, offset, PAGE_SIZE);
-out:
- ret = mpage_writepage(page, get_block, wbc);
- if (ret == -EAGAIN)
- ret = __block_write_full_page(inode, page, get_block, wbc,
- end_buffer_async_write);
+out_unlock:
+ folio_unlock(folio);
return ret;
}
-EXPORT_SYMBOL(nobh_writepage);
-
-int nobh_truncate_page(struct address_space *mapping,
- loff_t from, get_block_t *get_block)
-{
- pgoff_t index = from >> PAGE_SHIFT;
- unsigned offset = from & (PAGE_SIZE-1);
- unsigned blocksize;
- sector_t iblock;
- unsigned length, pos;
- struct inode *inode = mapping->host;
- struct page *page;
- struct buffer_head map_bh;
- int err;
-
- blocksize = i_blocksize(inode);
- length = offset & (blocksize - 1);
-
- /* Block boundary? Nothing to do */
- if (!length)
- return 0;
-
- length = blocksize - length;
- iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
-
- page = grab_cache_page(mapping, index);
- err = -ENOMEM;
- if (!page)
- goto out;
-
- if (page_has_buffers(page)) {
-has_buffers:
- unlock_page(page);
- put_page(page);
- return block_truncate_page(mapping, from, get_block);
- }
-
- /* Find the buffer that contains "offset" */
- pos = blocksize;
- while (offset >= pos) {
- iblock++;
- pos += blocksize;
- }
-
- map_bh.b_size = blocksize;
- map_bh.b_state = 0;
- err = get_block(inode, iblock, &map_bh, 0);
- if (err)
- goto unlock;
- /* unmapped? It's a hole - nothing to do */
- if (!buffer_mapped(&map_bh))
- goto unlock;
-
- /* Ok, it's mapped. Make sure it's up-to-date */
- if (!PageUptodate(page)) {
- err = mapping->a_ops->readpage(NULL, page);
- if (err) {
- put_page(page);
- goto out;
- }
- lock_page(page);
- if (!PageUptodate(page)) {
- err = -EIO;
- goto unlock;
- }
- if (page_has_buffers(page))
- goto has_buffers;
- }
- zero_user(page, offset, length);
- set_page_dirty(page);
- err = 0;
-
-unlock:
- unlock_page(page);
- put_page(page);
-out:
- return err;
-}
-EXPORT_SYMBOL(nobh_truncate_page);
+EXPORT_SYMBOL(block_page_mkwrite);
int block_truncate_page(struct address_space *mapping,
loff_t from, get_block_t *get_block)
{
pgoff_t index = from >> PAGE_SHIFT;
- unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize;
sector_t iblock;
- unsigned length, pos;
+ size_t offset, length, pos;
struct inode *inode = mapping->host;
- struct page *page;
+ struct folio *folio;
struct buffer_head *bh;
- int err;
+ int err = 0;
blocksize = i_blocksize(inode);
- length = offset & (blocksize - 1);
+ length = from & (blocksize - 1);
/* Block boundary? Nothing to do */
if (!length)
return 0;
length = blocksize - length;
- iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
-
- page = grab_cache_page(mapping, index);
- err = -ENOMEM;
- if (!page)
- goto out;
+ iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
+
+ folio = filemap_grab_folio(mapping, index);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- if (!page_has_buffers(page))
- create_empty_buffers(page, blocksize, 0);
+ bh = folio_buffers(folio);
+ if (!bh)
+ bh = create_empty_buffers(folio, blocksize, 0);
/* Find the buffer that contains "offset" */
- bh = page_buffers(page);
+ offset = offset_in_folio(folio, from);
pos = blocksize;
while (offset >= pos) {
bh = bh->b_this_page;
@@ -2910,7 +2690,6 @@ int block_truncate_page(struct address_space *mapping,
pos += blocksize;
}
- err = 0;
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
@@ -2922,65 +2701,57 @@ int block_truncate_page(struct address_space *mapping,
}
/* Ok, it's mapped. Make sure it's up-to-date */
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
- err = -EIO;
- ll_rw_block(REQ_OP_READ, 0, 1, &bh);
- wait_on_buffer(bh);
+ err = bh_read(bh, 0);
/* Uhhuh. Read error. Complain and punt. */
- if (!buffer_uptodate(bh))
+ if (err < 0)
goto unlock;
}
- zero_user(page, offset, length);
+ folio_zero_range(folio, offset, length);
mark_buffer_dirty(bh);
- err = 0;
unlock:
- unlock_page(page);
- put_page(page);
-out:
+ folio_unlock(folio);
+ folio_put(folio);
+
return err;
}
EXPORT_SYMBOL(block_truncate_page);
/*
- * The generic ->writepage function for buffer-backed address_spaces
+ * The generic write folio function for buffer-backed address_spaces
*/
-int block_write_full_page(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc)
+int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
+ void *get_block)
{
- struct inode * const inode = page->mapping->host;
+ struct inode * const inode = folio->mapping->host;
loff_t i_size = i_size_read(inode);
- const pgoff_t end_index = i_size >> PAGE_SHIFT;
- unsigned offset;
-
- /* Is the page fully inside i_size? */
- if (page->index < end_index)
- return __block_write_full_page(inode, page, get_block, wbc,
- end_buffer_async_write);
-
- /* Is the page fully outside i_size? (truncate in progress) */
- offset = i_size & (PAGE_SIZE-1);
- if (page->index >= end_index+1 || !offset) {
- unlock_page(page);
+
+ /* Is the folio fully inside i_size? */
+ if (folio_next_pos(folio) <= i_size)
+ return __block_write_full_folio(inode, folio, get_block, wbc);
+
+ /* Is the folio fully outside i_size? (truncate in progress) */
+ if (folio_pos(folio) >= i_size) {
+ folio_unlock(folio);
return 0; /* don't care */
}
/*
- * The page straddles i_size. It must be zeroed out on each and every
- * writepage invocation because it may be mmapped. "A file is mapped
+ * The folio straddles i_size. It must be zeroed out on each and every
+ * writeback invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
- * the page size, the remaining memory is zeroed when mapped, and
+ * the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- zero_user_segment(page, offset, PAGE_SIZE);
- return __block_write_full_page(inode, page, get_block, wbc,
- end_buffer_async_write);
+ folio_zero_segment(folio, offset_in_folio(folio, i_size),
+ folio_size(folio));
+ return __block_write_full_folio(inode, folio, get_block, wbc);
}
-EXPORT_SYMBOL(block_write_full_page);
sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
get_block_t *get_block)
@@ -3006,9 +2777,11 @@ static void end_bio_bh_io_sync(struct bio *bio)
bio_put(bio);
}
-static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
- enum rw_hint write_hint, struct writeback_control *wbc)
+static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
+ enum rw_hint write_hint,
+ struct writeback_control *wbc)
{
+ const enum req_op op = opf & REQ_OP_MASK;
struct bio *bio;
BUG_ON(!buffer_locked(bh));
@@ -3023,100 +2796,41 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
clear_buffer_write_io_error(bh);
- bio = bio_alloc(GFP_NOIO, 1);
+ if (buffer_meta(bh))
+ opf |= REQ_META;
+ if (buffer_prio(bh))
+ opf |= REQ_PRIO;
+
+ bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio_set_dev(bio, bh->b_bdev);
bio->bi_write_hint = write_hint;
- bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
- BUG_ON(bio->bi_iter.bi_size != bh->b_size);
+ bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
- if (buffer_meta(bh))
- op_flags |= REQ_META;
- if (buffer_prio(bh))
- op_flags |= REQ_PRIO;
- bio_set_op_attrs(bio, op, op_flags);
-
/* Take care of bh's that straddle the end of the device */
guard_bio_eod(bio);
if (wbc) {
wbc_init_bio(wbc, bio);
- wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
+ wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
}
submit_bio(bio);
- return 0;
}
-int submit_bh(int op, int op_flags, struct buffer_head *bh)
+void submit_bh(blk_opf_t opf, struct buffer_head *bh)
{
- return submit_bh_wbc(op, op_flags, bh, 0, NULL);
+ submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
}
EXPORT_SYMBOL(submit_bh);
-/**
- * ll_rw_block: low-level access to block devices (DEPRECATED)
- * @op: whether to %READ or %WRITE
- * @op_flags: req_flag_bits
- * @nr: number of &struct buffer_heads in the array
- * @bhs: array of pointers to &struct buffer_head
- *
- * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
- * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE.
- * @op_flags contains flags modifying the detailed I/O behavior, most notably
- * %REQ_RAHEAD.
- *
- * This function drops any buffer that it cannot get a lock on (with the
- * BH_Lock state bit), any buffer that appears to be clean when doing a write
- * request, and any buffer that appears to be up-to-date when doing read
- * request. Further it marks as clean buffers that are processed for
- * writing (the buffer cache won't assume that they are actually clean
- * until the buffer gets unlocked).
- *
- * ll_rw_block sets b_end_io to simple completion handler that marks
- * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
- * any waiters.
- *
- * All of the buffers must be for the same device, and must also be a
- * multiple of the current approved size for the device.
- */
-void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[])
-{
- int i;
-
- for (i = 0; i < nr; i++) {
- struct buffer_head *bh = bhs[i];
-
- if (!trylock_buffer(bh))
- continue;
- if (op == WRITE) {
- if (test_clear_buffer_dirty(bh)) {
- bh->b_end_io = end_buffer_write_sync;
- get_bh(bh);
- submit_bh(op, op_flags, bh);
- continue;
- }
- } else {
- if (!buffer_uptodate(bh)) {
- bh->b_end_io = end_buffer_read_sync;
- get_bh(bh);
- submit_bh(op, op_flags, bh);
- continue;
- }
- }
- unlock_buffer(bh);
- }
-}
-EXPORT_SYMBOL(ll_rw_block);
-
-void write_dirty_buffer(struct buffer_head *bh, int op_flags)
+void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
{
lock_buffer(bh);
if (!test_clear_buffer_dirty(bh)) {
@@ -3125,7 +2839,7 @@ void write_dirty_buffer(struct buffer_head *bh, int op_flags)
}
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
- submit_bh(REQ_OP_WRITE, op_flags, bh);
+ submit_bh(REQ_OP_WRITE | op_flags, bh);
}
EXPORT_SYMBOL(write_dirty_buffer);
@@ -3134,10 +2848,8 @@ EXPORT_SYMBOL(write_dirty_buffer);
* and then start new I/O and then wait upon it. The caller must have a ref on
* the buffer_head.
*/
-int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
+int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
{
- int ret = 0;
-
WARN_ON(atomic_read(&bh->b_count) < 1);
lock_buffer(bh);
if (test_clear_buffer_dirty(bh)) {
@@ -3152,14 +2864,14 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
- ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
+ submit_bh(REQ_OP_WRITE | op_flags, bh);
wait_on_buffer(bh);
- if (!ret && !buffer_uptodate(bh))
- ret = -EIO;
+ if (!buffer_uptodate(bh))
+ return -EIO;
} else {
unlock_buffer(bh);
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL(__sync_dirty_buffer);
@@ -3169,36 +2881,16 @@ int sync_dirty_buffer(struct buffer_head *bh)
}
EXPORT_SYMBOL(sync_dirty_buffer);
-/*
- * try_to_free_buffers() checks if all the buffers on this particular page
- * are unused, and releases them if so.
- *
- * Exclusion against try_to_free_buffers may be obtained by either
- * locking the page or by holding its mapping's private_lock.
- *
- * If the page is dirty but all the buffers are clean then we need to
- * be sure to mark the page clean as well. This is because the page
- * may be against a block device, and a later reattachment of buffers
- * to a dirty page will set *all* buffers dirty. Which would corrupt
- * filesystem data on the same device.
- *
- * The same applies to regular filesystem pages: if all the buffers are
- * clean then we set the page clean and proceed. To do that, we require
- * total exclusion from __set_page_dirty_buffers(). That is obtained with
- * private_lock.
- *
- * try_to_free_buffers() is non-blocking.
- */
static inline int buffer_busy(struct buffer_head *bh)
{
return atomic_read(&bh->b_count) |
(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
}
-static int
-drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
+static bool
+drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
{
- struct buffer_head *head = page_buffers(page);
+ struct buffer_head *head = folio_buffers(folio);
struct buffer_head *bh;
bh = head;
@@ -3216,47 +2908,71 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
bh = next;
} while (bh != head);
*buffers_to_free = head;
- detach_page_private(page);
- return 1;
+ folio_detach_private(folio);
+ return true;
failed:
- return 0;
+ return false;
}
-int try_to_free_buffers(struct page *page)
+/**
+ * try_to_free_buffers - Release buffers attached to this folio.
+ * @folio: The folio.
+ *
+ * If any buffers are in use (dirty, under writeback, elevated refcount),
+ * no buffers will be freed.
+ *
+ * If the folio is dirty but all the buffers are clean then we need to
+ * be sure to mark the folio clean as well. This is because the folio
+ * may be against a block device, and a later reattachment of buffers
+ * to a dirty folio will set *all* buffers dirty. Which would corrupt
+ * filesystem data on the same device.
+ *
+ * The same applies to regular filesystem folios: if all the buffers are
+ * clean then we set the folio clean and proceed. To do that, we require
+ * total exclusion from block_dirty_folio(). That is obtained with
+ * i_private_lock.
+ *
+ * Exclusion against try_to_free_buffers may be obtained by either
+ * locking the folio or by holding its mapping's i_private_lock.
+ *
+ * Context: Process context. @folio must be locked. Will not sleep.
+ * Return: true if all buffers attached to this folio were freed.
+ */
+bool try_to_free_buffers(struct folio *folio)
{
- struct address_space * const mapping = page->mapping;
+ struct address_space * const mapping = folio->mapping;
struct buffer_head *buffers_to_free = NULL;
- int ret = 0;
+ bool ret = 0;
- BUG_ON(!PageLocked(page));
- if (PageWriteback(page))
- return 0;
+ BUG_ON(!folio_test_locked(folio));
+ if (folio_test_writeback(folio))
+ return false;
if (mapping == NULL) { /* can this still happen? */
- ret = drop_buffers(page, &buffers_to_free);
+ ret = drop_buffers(folio, &buffers_to_free);
goto out;
}
- spin_lock(&mapping->private_lock);
- ret = drop_buffers(page, &buffers_to_free);
+ spin_lock(&mapping->i_private_lock);
+ ret = drop_buffers(folio, &buffers_to_free);
/*
* If the filesystem writes its buffers by hand (eg ext3)
- * then we can have clean buffers against a dirty page. We
- * clean the page here; otherwise the VM will never notice
+ * then we can have clean buffers against a dirty folio. We
+ * clean the folio here; otherwise the VM will never notice
* that the filesystem did any IO at all.
*
* Also, during truncate, discard_buffer will have marked all
- * the page's buffers clean. We discover that here and clean
- * the page also.
+ * the folio's buffers clean. We discover that here and clean
+ * the folio also.
*
- * private_lock must be held over this entire operation in order
- * to synchronise against __set_page_dirty_buffers and prevent the
+ * i_private_lock must be held over this entire operation in order
+ * to synchronise against block_dirty_folio and prevent the
* dirty bit from being lost.
*/
if (ret)
- cancel_dirty_page(page);
- spin_unlock(&mapping->private_lock);
+ folio_cancel_dirty(folio);
+ spin_unlock(&mapping->i_private_lock);
out:
if (buffers_to_free) {
struct buffer_head *bh = buffers_to_free;
@@ -3274,13 +2990,13 @@ EXPORT_SYMBOL(try_to_free_buffers);
/*
* Buffer-head allocation
*/
-static struct kmem_cache *bh_cachep __read_mostly;
+static struct kmem_cache *bh_cachep __ro_after_init;
/*
* Once the number of bh's in the machine exceeds this level, we start
* stripping them in writeback.
*/
-static unsigned long max_buffer_heads;
+static unsigned long max_buffer_heads __ro_after_init;
int buffer_heads_over_limit;
@@ -3364,41 +3080,77 @@ int bh_uptodate_or_lock(struct buffer_head *bh)
EXPORT_SYMBOL(bh_uptodate_or_lock);
/**
- * bh_submit_read - Submit a locked buffer for reading
+ * __bh_read - Submit read for a locked buffer
* @bh: struct buffer_head
+ * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
+ * @wait: wait until reading finish
*
- * Returns zero on success and -EIO on error.
+ * Returns zero on success or don't wait, and -EIO on error.
*/
-int bh_submit_read(struct buffer_head *bh)
+int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
{
- BUG_ON(!buffer_locked(bh));
+ int ret = 0;
- if (buffer_uptodate(bh)) {
- unlock_buffer(bh);
- return 0;
- }
+ BUG_ON(!buffer_locked(bh));
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
- submit_bh(REQ_OP_READ, 0, bh);
- wait_on_buffer(bh);
- if (buffer_uptodate(bh))
- return 0;
- return -EIO;
+ submit_bh(REQ_OP_READ | op_flags, bh);
+ if (wait) {
+ wait_on_buffer(bh);
+ if (!buffer_uptodate(bh))
+ ret = -EIO;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(__bh_read);
+
+/**
+ * __bh_read_batch - Submit read for a batch of unlocked buffers
+ * @nr: entry number of the buffer batch
+ * @bhs: a batch of struct buffer_head
+ * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
+ * @force_lock: force to get a lock on the buffer if set, otherwise drops any
+ * buffer that cannot lock.
+ *
+ * Returns zero on success or don't wait, and -EIO on error.
+ */
+void __bh_read_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags, bool force_lock)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ struct buffer_head *bh = bhs[i];
+
+ if (buffer_uptodate(bh))
+ continue;
+
+ if (force_lock)
+ lock_buffer(bh);
+ else
+ if (!trylock_buffer(bh))
+ continue;
+
+ if (buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ continue;
+ }
+
+ bh->b_end_io = end_buffer_read_sync;
+ get_bh(bh);
+ submit_bh(REQ_OP_READ | op_flags, bh);
+ }
}
-EXPORT_SYMBOL(bh_submit_read);
+EXPORT_SYMBOL(__bh_read_batch);
void __init buffer_init(void)
{
unsigned long nrpages;
int ret;
- bh_cachep = kmem_cache_create("buffer_head",
- sizeof(struct buffer_head), 0,
- (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
- SLAB_MEM_SPREAD),
- NULL);
-
+ bh_cachep = KMEM_CACHE(buffer_head,
+ SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL
*/
diff --git a/fs/cachefiles/Kconfig b/fs/cachefiles/Kconfig
index 6827b40f7ddc..c5a070550ee3 100644
--- a/fs/cachefiles/Kconfig
+++ b/fs/cachefiles/Kconfig
@@ -2,7 +2,7 @@
config CACHEFILES
tristate "Filesystem caching on files"
- depends on FSCACHE && BLOCK
+ depends on NETFS_SUPPORT && FSCACHE && BLOCK
help
This permits use of a mounted filesystem as a cache for other
filesystems - primarily networking filesystems - thus allowing fast
@@ -19,3 +19,22 @@ config CACHEFILES_DEBUG
caching on files module. If this is set, the debugging output may be
enabled by setting bits in /sys/modules/cachefiles/parameter/debug or
by including a debugging specifier in /etc/cachefilesd.conf.
+
+config CACHEFILES_ERROR_INJECTION
+ bool "Provide error injection for cachefiles"
+ depends on CACHEFILES && SYSCTL
+ help
+ This permits error injection to be enabled in cachefiles whilst a
+ cache is in service.
+
+config CACHEFILES_ONDEMAND
+ bool "Support for on-demand read"
+ depends on CACHEFILES
+ default n
+ help
+ This permits userspace to enable the cachefiles on-demand read mode.
+ In this mode, when a cache miss occurs, responsibility for fetching
+ the data lies with the cachefiles backend instead of with the netfs
+ and is delegated to userspace.
+
+ If unsure, say N.
diff --git a/fs/cachefiles/Makefile b/fs/cachefiles/Makefile
index 02fd17731769..c37a7a9af10b 100644
--- a/fs/cachefiles/Makefile
+++ b/fs/cachefiles/Makefile
@@ -4,15 +4,18 @@
#
cachefiles-y := \
- bind.o \
+ cache.o \
daemon.o \
interface.o \
io.o \
key.o \
main.o \
namei.o \
- rdwr.o \
security.o \
+ volume.o \
xattr.o
+cachefiles-$(CONFIG_CACHEFILES_ERROR_INJECTION) += error_inject.o
+cachefiles-$(CONFIG_CACHEFILES_ONDEMAND) += ondemand.o
+
obj-$(CONFIG_CACHEFILES) := cachefiles.o
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
deleted file mode 100644
index d463d89f5db8..000000000000
--- a/fs/cachefiles/bind.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Bind and unbind a cache from the filesystem backing it
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/namei.h>
-#include <linux/mount.h>
-#include <linux/statfs.h>
-#include <linux/ctype.h>
-#include <linux/xattr.h>
-#include "internal.h"
-
-static int cachefiles_daemon_add_cache(struct cachefiles_cache *caches);
-
-/*
- * bind a directory as a cache
- */
-int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
-{
- _enter("{%u,%u,%u,%u,%u,%u},%s",
- cache->frun_percent,
- cache->fcull_percent,
- cache->fstop_percent,
- cache->brun_percent,
- cache->bcull_percent,
- cache->bstop_percent,
- args);
-
- /* start by checking things over */
- ASSERT(cache->fstop_percent >= 0 &&
- cache->fstop_percent < cache->fcull_percent &&
- cache->fcull_percent < cache->frun_percent &&
- cache->frun_percent < 100);
-
- ASSERT(cache->bstop_percent >= 0 &&
- cache->bstop_percent < cache->bcull_percent &&
- cache->bcull_percent < cache->brun_percent &&
- cache->brun_percent < 100);
-
- if (*args) {
- pr_err("'bind' command doesn't take an argument\n");
- return -EINVAL;
- }
-
- if (!cache->rootdirname) {
- pr_err("No cache directory specified\n");
- return -EINVAL;
- }
-
- /* don't permit already bound caches to be re-bound */
- if (test_bit(CACHEFILES_READY, &cache->flags)) {
- pr_err("Cache already bound\n");
- return -EBUSY;
- }
-
- /* make sure we have copies of the tag and dirname strings */
- if (!cache->tag) {
- /* the tag string is released by the fops->release()
- * function, so we don't release it on error here */
- cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
- if (!cache->tag)
- return -ENOMEM;
- }
-
- /* add the cache */
- return cachefiles_daemon_add_cache(cache);
-}
-
-/*
- * add a cache
- */
-static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
-{
- struct cachefiles_object *fsdef;
- struct path path;
- struct kstatfs stats;
- struct dentry *graveyard, *cachedir, *root;
- const struct cred *saved_cred;
- int ret;
-
- _enter("");
-
- /* we want to work under the module's security ID */
- ret = cachefiles_get_security_ID(cache);
- if (ret < 0)
- return ret;
-
- cachefiles_begin_secure(cache, &saved_cred);
-
- /* allocate the root index object */
- ret = -ENOMEM;
-
- fsdef = kmem_cache_alloc(cachefiles_object_jar, GFP_KERNEL);
- if (!fsdef)
- goto error_root_object;
-
- ASSERTCMP(fsdef->backer, ==, NULL);
-
- atomic_set(&fsdef->usage, 1);
- fsdef->type = FSCACHE_COOKIE_TYPE_INDEX;
-
- /* look up the directory at the root of the cache */
- ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path);
- if (ret < 0)
- goto error_open_root;
-
- cache->mnt = path.mnt;
- root = path.dentry;
-
- ret = -EINVAL;
- if (mnt_user_ns(path.mnt) != &init_user_ns) {
- pr_warn("File cache on idmapped mounts not supported");
- goto error_unsupported;
- }
-
- /* check parameters */
- ret = -EOPNOTSUPP;
- if (d_is_negative(root) ||
- !d_backing_inode(root)->i_op->lookup ||
- !d_backing_inode(root)->i_op->mkdir ||
- !(d_backing_inode(root)->i_opflags & IOP_XATTR) ||
- !root->d_sb->s_op->statfs ||
- !root->d_sb->s_op->sync_fs)
- goto error_unsupported;
-
- ret = -EROFS;
- if (sb_rdonly(root->d_sb))
- goto error_unsupported;
-
- /* determine the security of the on-disk cache as this governs
- * security ID of files we create */
- ret = cachefiles_determine_cache_security(cache, root, &saved_cred);
- if (ret < 0)
- goto error_unsupported;
-
- /* get the cache size and blocksize */
- ret = vfs_statfs(&path, &stats);
- if (ret < 0)
- goto error_unsupported;
-
- ret = -ERANGE;
- if (stats.f_bsize <= 0)
- goto error_unsupported;
-
- ret = -EOPNOTSUPP;
- if (stats.f_bsize > PAGE_SIZE)
- goto error_unsupported;
-
- cache->bsize = stats.f_bsize;
- cache->bshift = 0;
- if (stats.f_bsize < PAGE_SIZE)
- cache->bshift = PAGE_SHIFT - ilog2(stats.f_bsize);
-
- _debug("blksize %u (shift %u)",
- cache->bsize, cache->bshift);
-
- _debug("size %llu, avail %llu",
- (unsigned long long) stats.f_blocks,
- (unsigned long long) stats.f_bavail);
-
- /* set up caching limits */
- do_div(stats.f_files, 100);
- cache->fstop = stats.f_files * cache->fstop_percent;
- cache->fcull = stats.f_files * cache->fcull_percent;
- cache->frun = stats.f_files * cache->frun_percent;
-
- _debug("limits {%llu,%llu,%llu} files",
- (unsigned long long) cache->frun,
- (unsigned long long) cache->fcull,
- (unsigned long long) cache->fstop);
-
- stats.f_blocks >>= cache->bshift;
- do_div(stats.f_blocks, 100);
- cache->bstop = stats.f_blocks * cache->bstop_percent;
- cache->bcull = stats.f_blocks * cache->bcull_percent;
- cache->brun = stats.f_blocks * cache->brun_percent;
-
- _debug("limits {%llu,%llu,%llu} blocks",
- (unsigned long long) cache->brun,
- (unsigned long long) cache->bcull,
- (unsigned long long) cache->bstop);
-
- /* get the cache directory and check its type */
- cachedir = cachefiles_get_directory(cache, root, "cache");
- if (IS_ERR(cachedir)) {
- ret = PTR_ERR(cachedir);
- goto error_unsupported;
- }
-
- fsdef->dentry = cachedir;
- fsdef->fscache.cookie = NULL;
-
- ret = cachefiles_check_object_type(fsdef);
- if (ret < 0)
- goto error_unsupported;
-
- /* get the graveyard directory */
- graveyard = cachefiles_get_directory(cache, root, "graveyard");
- if (IS_ERR(graveyard)) {
- ret = PTR_ERR(graveyard);
- goto error_unsupported;
- }
-
- cache->graveyard = graveyard;
-
- /* publish the cache */
- fscache_init_cache(&cache->cache,
- &cachefiles_cache_ops,
- "%s",
- fsdef->dentry->d_sb->s_id);
-
- fscache_object_init(&fsdef->fscache, &fscache_fsdef_index,
- &cache->cache);
-
- ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag);
- if (ret < 0)
- goto error_add_cache;
-
- /* done */
- set_bit(CACHEFILES_READY, &cache->flags);
- dput(root);
-
- pr_info("File cache on %s registered\n", cache->cache.identifier);
-
- /* check how much space the cache has */
- cachefiles_has_space(cache, 0, 0);
- cachefiles_end_secure(cache, saved_cred);
- return 0;
-
-error_add_cache:
- dput(cache->graveyard);
- cache->graveyard = NULL;
-error_unsupported:
- mntput(cache->mnt);
- cache->mnt = NULL;
- dput(fsdef->dentry);
- fsdef->dentry = NULL;
- dput(root);
-error_open_root:
- kmem_cache_free(cachefiles_object_jar, fsdef);
-error_root_object:
- cachefiles_end_secure(cache, saved_cred);
- pr_err("Failed to register: %d\n", ret);
- return ret;
-}
-
-/*
- * unbind a cache on fd release
- */
-void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
-{
- _enter("");
-
- if (test_bit(CACHEFILES_READY, &cache->flags)) {
- pr_info("File cache on %s unregistering\n",
- cache->cache.identifier);
-
- fscache_withdraw_cache(&cache->cache);
- }
-
- dput(cache->graveyard);
- mntput(cache->mnt);
-
- kfree(cache->rootdirname);
- kfree(cache->secctx);
- kfree(cache->tag);
-
- _leave("");
-}
diff --git a/fs/cachefiles/cache.c b/fs/cachefiles/cache.c
new file mode 100644
index 000000000000..9fb06dc16520
--- /dev/null
+++ b/fs/cachefiles/cache.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Manage high-level VFS aspects of a cache.
+ *
+ * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include <linux/statfs.h>
+#include <linux/namei.h>
+#include <trace/events/fscache.h>
+#include "internal.h"
+
+/*
+ * Bring a cache online.
+ */
+int cachefiles_add_cache(struct cachefiles_cache *cache)
+{
+ struct fscache_cache *cache_cookie;
+ struct path path;
+ struct kstatfs stats;
+ struct dentry *graveyard, *cachedir, *root;
+ const struct cred *saved_cred;
+ int ret;
+
+ _enter("");
+
+ cache_cookie = fscache_acquire_cache(cache->tag);
+ if (IS_ERR(cache_cookie))
+ return PTR_ERR(cache_cookie);
+
+ /* we want to work under the module's security ID */
+ ret = cachefiles_get_security_ID(cache);
+ if (ret < 0)
+ goto error_getsec;
+
+ cachefiles_begin_secure(cache, &saved_cred);
+
+ /* look up the directory at the root of the cache */
+ ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path);
+ if (ret < 0)
+ goto error_open_root;
+
+ cache->mnt = path.mnt;
+ root = path.dentry;
+
+ ret = -EINVAL;
+ if (is_idmapped_mnt(path.mnt)) {
+ pr_warn("File cache on idmapped mounts not supported");
+ goto error_unsupported;
+ }
+
+ /* Check features of the backing filesystem:
+ * - Directories must support looking up and directory creation
+ * - We create tmpfiles to handle invalidation
+ * - We use xattrs to store metadata
+ * - We need to be able to query the amount of space available
+ * - We want to be able to sync the filesystem when stopping the cache
+ * - We use DIO to/from pages, so the blocksize mustn't be too big.
+ */
+ ret = -EOPNOTSUPP;
+ if (d_is_negative(root) ||
+ !d_backing_inode(root)->i_op->lookup ||
+ !d_backing_inode(root)->i_op->mkdir ||
+ !d_backing_inode(root)->i_op->tmpfile ||
+ !(d_backing_inode(root)->i_opflags & IOP_XATTR) ||
+ !root->d_sb->s_op->statfs ||
+ !root->d_sb->s_op->sync_fs ||
+ root->d_sb->s_blocksize > PAGE_SIZE)
+ goto error_unsupported;
+
+ ret = -EROFS;
+ if (sb_rdonly(root->d_sb))
+ goto error_unsupported;
+
+ /* determine the security of the on-disk cache as this governs
+ * security ID of files we create */
+ ret = cachefiles_determine_cache_security(cache, root, &saved_cred);
+ if (ret < 0)
+ goto error_unsupported;
+
+ /* get the cache size and blocksize */
+ ret = vfs_statfs(&path, &stats);
+ if (ret < 0)
+ goto error_unsupported;
+
+ ret = -ERANGE;
+ if (stats.f_bsize <= 0)
+ goto error_unsupported;
+
+ ret = -EOPNOTSUPP;
+ if (stats.f_bsize > PAGE_SIZE)
+ goto error_unsupported;
+
+ cache->bsize = stats.f_bsize;
+ cache->bshift = ilog2(stats.f_bsize);
+
+ _debug("blksize %u (shift %u)",
+ cache->bsize, cache->bshift);
+
+ _debug("size %llu, avail %llu",
+ (unsigned long long) stats.f_blocks,
+ (unsigned long long) stats.f_bavail);
+
+ /* set up caching limits */
+ do_div(stats.f_files, 100);
+ cache->fstop = stats.f_files * cache->fstop_percent;
+ cache->fcull = stats.f_files * cache->fcull_percent;
+ cache->frun = stats.f_files * cache->frun_percent;
+
+ _debug("limits {%llu,%llu,%llu} files",
+ (unsigned long long) cache->frun,
+ (unsigned long long) cache->fcull,
+ (unsigned long long) cache->fstop);
+
+ do_div(stats.f_blocks, 100);
+ cache->bstop = stats.f_blocks * cache->bstop_percent;
+ cache->bcull = stats.f_blocks * cache->bcull_percent;
+ cache->brun = stats.f_blocks * cache->brun_percent;
+
+ _debug("limits {%llu,%llu,%llu} blocks",
+ (unsigned long long) cache->brun,
+ (unsigned long long) cache->bcull,
+ (unsigned long long) cache->bstop);
+
+ /* get the cache directory and check its type */
+ cachedir = cachefiles_get_directory(cache, root, "cache", NULL);
+ if (IS_ERR(cachedir)) {
+ ret = PTR_ERR(cachedir);
+ goto error_unsupported;
+ }
+
+ cache->store = cachedir;
+
+ /* get the graveyard directory */
+ graveyard = cachefiles_get_directory(cache, root, "graveyard", NULL);
+ if (IS_ERR(graveyard)) {
+ ret = PTR_ERR(graveyard);
+ goto error_unsupported;
+ }
+
+ cache->graveyard = graveyard;
+ cache->cache = cache_cookie;
+
+ ret = fscache_add_cache(cache_cookie, &cachefiles_cache_ops, cache);
+ if (ret < 0)
+ goto error_add_cache;
+
+ /* done */
+ set_bit(CACHEFILES_READY, &cache->flags);
+ dput(root);
+
+ pr_info("File cache on %s registered\n", cache_cookie->name);
+
+ /* check how much space the cache has */
+ cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
+ cachefiles_end_secure(cache, saved_cred);
+ _leave(" = 0 [%px]", cache->cache);
+ return 0;
+
+error_add_cache:
+ cachefiles_put_directory(cache->graveyard);
+ cache->graveyard = NULL;
+error_unsupported:
+ cachefiles_put_directory(cache->store);
+ cache->store = NULL;
+ mntput(cache->mnt);
+ cache->mnt = NULL;
+ dput(root);
+error_open_root:
+ cachefiles_end_secure(cache, saved_cred);
+ put_cred(cache->cache_cred);
+ cache->cache_cred = NULL;
+error_getsec:
+ fscache_relinquish_cache(cache_cookie);
+ cache->cache = NULL;
+ pr_err("Failed to register: %d\n", ret);
+ return ret;
+}
+
+/*
+ * See if we have space for a number of pages and/or a number of files in the
+ * cache
+ */
+int cachefiles_has_space(struct cachefiles_cache *cache,
+ unsigned fnr, unsigned bnr,
+ enum cachefiles_has_space_for reason)
+{
+ struct kstatfs stats;
+ u64 b_avail, b_writing;
+ int ret;
+
+ struct path path = {
+ .mnt = cache->mnt,
+ .dentry = cache->mnt->mnt_root,
+ };
+
+ //_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u",
+ // (unsigned long long) cache->frun,
+ // (unsigned long long) cache->fcull,
+ // (unsigned long long) cache->fstop,
+ // (unsigned long long) cache->brun,
+ // (unsigned long long) cache->bcull,
+ // (unsigned long long) cache->bstop,
+ // fnr, bnr);
+
+ /* find out how many pages of blockdev are available */
+ memset(&stats, 0, sizeof(stats));
+
+ ret = vfs_statfs(&path, &stats);
+ if (ret < 0) {
+ trace_cachefiles_vfs_error(NULL, d_inode(path.dentry), ret,
+ cachefiles_trace_statfs_error);
+ if (ret == -EIO)
+ cachefiles_io_error(cache, "statfs failed");
+ _leave(" = %d", ret);
+ return ret;
+ }
+
+ b_avail = stats.f_bavail;
+ b_writing = atomic_long_read(&cache->b_writing);
+ if (b_avail > b_writing)
+ b_avail -= b_writing;
+ else
+ b_avail = 0;
+
+ //_debug("avail %llu,%llu",
+ // (unsigned long long)stats.f_ffree,
+ // (unsigned long long)b_avail);
+
+ /* see if there is sufficient space */
+ if (stats.f_ffree > fnr)
+ stats.f_ffree -= fnr;
+ else
+ stats.f_ffree = 0;
+
+ if (b_avail > bnr)
+ b_avail -= bnr;
+ else
+ b_avail = 0;
+
+ ret = -ENOBUFS;
+ if (stats.f_ffree < cache->fstop ||
+ b_avail < cache->bstop)
+ goto stop_and_begin_cull;
+
+ ret = 0;
+ if (stats.f_ffree < cache->fcull ||
+ b_avail < cache->bcull)
+ goto begin_cull;
+
+ if (test_bit(CACHEFILES_CULLING, &cache->flags) &&
+ stats.f_ffree >= cache->frun &&
+ b_avail >= cache->brun &&
+ test_and_clear_bit(CACHEFILES_CULLING, &cache->flags)
+ ) {
+ _debug("cease culling");
+ cachefiles_state_changed(cache);
+ }
+
+ //_leave(" = 0");
+ return 0;
+
+stop_and_begin_cull:
+ switch (reason) {
+ case cachefiles_has_space_for_write:
+ fscache_count_no_write_space();
+ break;
+ case cachefiles_has_space_for_create:
+ fscache_count_no_create_space();
+ break;
+ default:
+ break;
+ }
+begin_cull:
+ if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) {
+ _debug("### CULL CACHE ###");
+ cachefiles_state_changed(cache);
+ }
+
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Mark all the objects as being out of service and queue them all for cleanup.
+ */
+static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
+{
+ struct cachefiles_object *object;
+ unsigned int count = 0;
+
+ _enter("");
+
+ spin_lock(&cache->object_list_lock);
+
+ while (!list_empty(&cache->object_list)) {
+ object = list_first_entry(&cache->object_list,
+ struct cachefiles_object, cache_link);
+ cachefiles_see_object(object, cachefiles_obj_see_withdrawal);
+ list_del_init(&object->cache_link);
+ fscache_withdraw_cookie(object->cookie);
+ count++;
+ if ((count & 63) == 0) {
+ spin_unlock(&cache->object_list_lock);
+ cond_resched();
+ spin_lock(&cache->object_list_lock);
+ }
+ }
+
+ spin_unlock(&cache->object_list_lock);
+ _leave(" [%u objs]", count);
+}
+
+/*
+ * Withdraw fscache volumes.
+ */
+static void cachefiles_withdraw_fscache_volumes(struct cachefiles_cache *cache)
+{
+ struct list_head *cur;
+ struct cachefiles_volume *volume;
+ struct fscache_volume *vcookie;
+
+ _enter("");
+retry:
+ spin_lock(&cache->object_list_lock);
+ list_for_each(cur, &cache->volumes) {
+ volume = list_entry(cur, struct cachefiles_volume, cache_link);
+
+ if (atomic_read(&volume->vcookie->n_accesses) == 0)
+ continue;
+
+ vcookie = fscache_try_get_volume(volume->vcookie,
+ fscache_volume_get_withdraw);
+ if (vcookie) {
+ spin_unlock(&cache->object_list_lock);
+ fscache_withdraw_volume(vcookie);
+ fscache_put_volume(vcookie, fscache_volume_put_withdraw);
+ goto retry;
+ }
+ }
+ spin_unlock(&cache->object_list_lock);
+
+ _leave("");
+}
+
+/*
+ * Withdraw cachefiles volumes.
+ */
+static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
+{
+ _enter("");
+
+ for (;;) {
+ struct fscache_volume *vcookie = NULL;
+ struct cachefiles_volume *volume = NULL;
+
+ spin_lock(&cache->object_list_lock);
+ if (!list_empty(&cache->volumes)) {
+ volume = list_first_entry(&cache->volumes,
+ struct cachefiles_volume, cache_link);
+ vcookie = fscache_try_get_volume(volume->vcookie,
+ fscache_volume_get_withdraw);
+ if (!vcookie) {
+ spin_unlock(&cache->object_list_lock);
+ cpu_relax();
+ continue;
+ }
+ list_del_init(&volume->cache_link);
+ }
+ spin_unlock(&cache->object_list_lock);
+ if (!volume)
+ break;
+
+ cachefiles_withdraw_volume(volume);
+ fscache_put_volume(vcookie, fscache_volume_put_withdraw);
+ }
+
+ _leave("");
+}
+
+/*
+ * Sync a cache to backing disk.
+ */
+static void cachefiles_sync_cache(struct cachefiles_cache *cache)
+{
+ const struct cred *saved_cred;
+ int ret;
+
+ _enter("%s", cache->cache->name);
+
+ /* make sure all pages pinned by operations on behalf of the netfs are
+ * written to disc */
+ cachefiles_begin_secure(cache, &saved_cred);
+ down_read(&cache->mnt->mnt_sb->s_umount);
+ ret = sync_filesystem(cache->mnt->mnt_sb);
+ up_read(&cache->mnt->mnt_sb->s_umount);
+ cachefiles_end_secure(cache, saved_cred);
+
+ if (ret == -EIO)
+ cachefiles_io_error(cache,
+ "Attempt to sync backing fs superblock returned error %d",
+ ret);
+}
+
+/*
+ * Withdraw cache objects.
+ */
+void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
+{
+ struct fscache_cache *fscache = cache->cache;
+
+ pr_info("File cache on %s unregistering\n", fscache->name);
+
+ fscache_withdraw_cache(fscache);
+ cachefiles_withdraw_fscache_volumes(cache);
+
+ /* we now have to destroy all the active objects pertaining to this
+ * cache - which we do by passing them off to thread pool to be
+ * disposed of */
+ cachefiles_withdraw_objects(cache);
+ fscache_wait_for_objects(fscache);
+
+ cachefiles_withdraw_volumes(cache);
+ cachefiles_sync_cache(cache);
+ cache->cache = NULL;
+ fscache_relinquish_cache(fscache);
+}
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 752c1e43416f..1806bff8e59b 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Daemon interface
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -15,6 +15,7 @@
#include <linux/namei.h>
#include <linux/poll.h>
#include <linux/mount.h>
+#include <linux/security.h>
#include <linux/statfs.h>
#include <linux/ctype.h>
#include <linux/string.h>
@@ -41,6 +42,8 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_bind(struct cachefiles_cache *, char *);
+static void cachefiles_daemon_unbind(struct cachefiles_cache *);
static unsigned long cachefiles_open;
@@ -73,12 +76,16 @@ static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
{ "inuse", cachefiles_daemon_inuse },
{ "secctx", cachefiles_daemon_secctx },
{ "tag", cachefiles_daemon_tag },
+#ifdef CONFIG_CACHEFILES_ONDEMAND
+ { "copen", cachefiles_ondemand_copen },
+ { "restore", cachefiles_ondemand_restore },
+#endif
{ "", NULL }
};
/*
- * do various checks
+ * Prepare a cache for caching.
*/
static int cachefiles_daemon_open(struct inode *inode, struct file *file)
{
@@ -102,9 +109,13 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file)
}
mutex_init(&cache->daemon_mutex);
- cache->active_nodes = RB_ROOT;
- rwlock_init(&cache->active_lock);
init_waitqueue_head(&cache->daemon_pollwq);
+ INIT_LIST_HEAD(&cache->volumes);
+ INIT_LIST_HEAD(&cache->object_list);
+ spin_lock_init(&cache->object_list_lock);
+ refcount_set(&cache->unbind_pincount, 1);
+ xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC);
+ xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1);
/* set default caching limits
* - limit at 1% free space and/or free files
@@ -123,8 +134,56 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file)
return 0;
}
+void cachefiles_flush_reqs(struct cachefiles_cache *cache)
+{
+ struct xarray *xa = &cache->reqs;
+ struct cachefiles_req *req;
+ unsigned long index;
+
+ /*
+ * Make sure the following two operations won't be reordered.
+ * 1) set CACHEFILES_DEAD bit
+ * 2) flush requests in the xarray
+ * Otherwise the request may be enqueued after xarray has been
+ * flushed, leaving the orphan request never being completed.
+ *
+ * CPU 1 CPU 2
+ * ===== =====
+ * flush requests in the xarray
+ * test CACHEFILES_DEAD bit
+ * enqueue the request
+ * set CACHEFILES_DEAD bit
+ */
+ smp_mb();
+
+ xa_lock(xa);
+ xa_for_each(xa, index, req) {
+ req->error = -EIO;
+ complete(&req->done);
+ __xa_erase(xa, index);
+ }
+ xa_unlock(xa);
+
+ xa_destroy(&cache->reqs);
+ xa_destroy(&cache->ondemand_ids);
+}
+
+void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache)
+{
+ if (refcount_dec_and_test(&cache->unbind_pincount)) {
+ cachefiles_daemon_unbind(cache);
+ cachefiles_open = 0;
+ kfree(cache);
+ }
+}
+
+void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache)
+{
+ refcount_inc(&cache->unbind_pincount);
+}
+
/*
- * release a cache
+ * Release a cache.
*/
static int cachefiles_daemon_release(struct inode *inode, struct file *file)
{
@@ -136,40 +195,29 @@ static int cachefiles_daemon_release(struct inode *inode, struct file *file)
set_bit(CACHEFILES_DEAD, &cache->flags);
- cachefiles_daemon_unbind(cache);
-
- ASSERT(!cache->active_nodes.rb_node);
+ if (cachefiles_in_ondemand_mode(cache))
+ cachefiles_flush_reqs(cache);
/* clean up the control file interface */
cache->cachefilesd = NULL;
file->private_data = NULL;
- cachefiles_open = 0;
- kfree(cache);
+ cachefiles_put_unbind_pincount(cache);
_leave("");
return 0;
}
-/*
- * read the cache state
- */
-static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
- size_t buflen, loff_t *pos)
+static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache,
+ char __user *_buffer, size_t buflen)
{
- struct cachefiles_cache *cache = file->private_data;
unsigned long long b_released;
unsigned f_released;
char buffer[256];
int n;
- //_enter(",,%zu,", buflen);
-
- if (!test_bit(CACHEFILES_READY, &cache->flags))
- return 0;
-
/* check how much space the cache has */
- cachefiles_has_space(cache, 0, 0);
+ cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
/* summarise */
f_released = atomic_xchg(&cache->f_released, 0);
@@ -206,7 +254,26 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
}
/*
- * command the cache
+ * Read the cache state.
+ */
+static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
+ size_t buflen, loff_t *pos)
+{
+ struct cachefiles_cache *cache = file->private_data;
+
+ //_enter(",,%zu,", buflen);
+
+ if (!test_bit(CACHEFILES_READY, &cache->flags))
+ return 0;
+
+ if (cachefiles_in_ondemand_mode(cache))
+ return cachefiles_ondemand_daemon_read(cache, _buffer, buflen);
+ else
+ return cachefiles_do_daemon_read(cache, _buffer, buflen);
+}
+
+/*
+ * Take a command from cachefilesd, parse it and act on it.
*/
static ssize_t cachefiles_daemon_write(struct file *file,
const char __user *_data,
@@ -225,7 +292,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
if (test_bit(CACHEFILES_DEAD, &cache->flags))
return -EIO;
- if (datalen < 0 || datalen > PAGE_SIZE - 1)
+ if (datalen > PAGE_SIZE - 1)
return -EOPNOTSUPP;
/* drag the command string into the kernel so we can parse it */
@@ -284,20 +351,35 @@ found_command:
}
/*
- * poll for culling state
+ * Poll for culling state
* - use EPOLLOUT to indicate culling state
*/
static __poll_t cachefiles_daemon_poll(struct file *file,
struct poll_table_struct *poll)
{
struct cachefiles_cache *cache = file->private_data;
+ XA_STATE(xas, &cache->reqs, 0);
+ struct cachefiles_req *req;
__poll_t mask;
poll_wait(file, &cache->daemon_pollwq, poll);
mask = 0;
- if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
- mask |= EPOLLIN;
+ if (cachefiles_in_ondemand_mode(cache)) {
+ if (!xa_empty(&cache->reqs)) {
+ xas_lock(&xas);
+ xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
+ if (!cachefiles_ondemand_is_reopening_read(req)) {
+ mask |= EPOLLIN;
+ break;
+ }
+ }
+ xas_unlock(&xas);
+ }
+ } else {
+ if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
+ mask |= EPOLLIN;
+ }
if (test_bit(CACHEFILES_CULLING, &cache->flags))
mask |= EPOLLOUT;
@@ -306,7 +388,7 @@ static __poll_t cachefiles_daemon_poll(struct file *file,
}
/*
- * give a range error for cache space constraints
+ * Give a range error for cache space constraints
* - can be tail-called
*/
static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
@@ -318,7 +400,7 @@ static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
}
/*
- * set the percentage of files at which to stop culling
+ * Set the percentage of files at which to stop culling
* - command: "frun <N>%"
*/
static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
@@ -342,7 +424,7 @@ static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
}
/*
- * set the percentage of files at which to start culling
+ * Set the percentage of files at which to start culling
* - command: "fcull <N>%"
*/
static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
@@ -366,7 +448,7 @@ static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
}
/*
- * set the percentage of files at which to stop allocating
+ * Set the percentage of files at which to stop allocating
* - command: "fstop <N>%"
*/
static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
@@ -382,7 +464,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
if (args[0] != '%' || args[1] != '\0')
return -EINVAL;
- if (fstop < 0 || fstop >= cache->fcull_percent)
+ if (fstop >= cache->fcull_percent)
return cachefiles_daemon_range_error(cache, args);
cache->fstop_percent = fstop;
@@ -390,7 +472,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
}
/*
- * set the percentage of blocks at which to stop culling
+ * Set the percentage of blocks at which to stop culling
* - command: "brun <N>%"
*/
static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
@@ -414,7 +496,7 @@ static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
}
/*
- * set the percentage of blocks at which to start culling
+ * Set the percentage of blocks at which to start culling
* - command: "bcull <N>%"
*/
static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
@@ -438,7 +520,7 @@ static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
}
/*
- * set the percentage of blocks at which to stop allocating
+ * Set the percentage of blocks at which to stop allocating
* - command: "bstop <N>%"
*/
static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
@@ -454,7 +536,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
if (args[0] != '%' || args[1] != '\0')
return -EINVAL;
- if (bstop < 0 || bstop >= cache->bcull_percent)
+ if (bstop >= cache->bcull_percent)
return cachefiles_daemon_range_error(cache, args);
cache->bstop_percent = bstop;
@@ -462,7 +544,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
}
/*
- * set the cache directory
+ * Set the cache directory
* - command: "dir <name>"
*/
static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
@@ -490,12 +572,12 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
}
/*
- * set the cache security context
+ * Set the cache security context
* - command: "secctx <ctx>"
*/
static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
{
- char *secctx;
+ int err;
_enter(",%s", args);
@@ -504,21 +586,21 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
return -EINVAL;
}
- if (cache->secctx) {
+ if (cache->have_secid) {
pr_err("Second security context specified\n");
return -EINVAL;
}
- secctx = kstrdup(args, GFP_KERNEL);
- if (!secctx)
- return -ENOMEM;
+ err = security_secctx_to_secid(args, strlen(args), &cache->secid);
+ if (err)
+ return err;
- cache->secctx = secctx;
+ cache->have_secid = true;
return 0;
}
/*
- * set the cache tag
+ * Set the cache tag
* - command: "tag <name>"
*/
static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
@@ -544,7 +626,7 @@ static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
}
/*
- * request a node in the cache be culled from the current working directory
+ * Request a node in the cache be culled from the current working directory
* - command: "cull <name>"
*/
static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
@@ -568,7 +650,6 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
return -EIO;
}
- /* extract the directory dentry from the cwd */
get_fs_pwd(current->fs, &path);
if (!d_can_lookup(path.dentry))
@@ -593,7 +674,7 @@ inval:
}
/*
- * set debugging mode
+ * Set debugging mode
* - command: "debug <mask>"
*/
static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
@@ -616,7 +697,7 @@ inval:
}
/*
- * find out whether an object in the current working directory is in use or not
+ * Find out whether an object in the current working directory is in use or not
* - command: "inuse <name>"
*/
static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
@@ -640,7 +721,6 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
return -EIO;
}
- /* extract the directory dentry from the cwd */
get_fs_pwd(current->fs, &path);
if (!d_can_lookup(path.dentry))
@@ -665,84 +745,83 @@ inval:
}
/*
- * see if we have space for a number of pages and/or a number of files in the
- * cache
+ * Bind a directory as a cache
*/
-int cachefiles_has_space(struct cachefiles_cache *cache,
- unsigned fnr, unsigned bnr)
+static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
{
- struct kstatfs stats;
- struct path path = {
- .mnt = cache->mnt,
- .dentry = cache->mnt->mnt_root,
- };
- int ret;
+ _enter("{%u,%u,%u,%u,%u,%u},%s",
+ cache->frun_percent,
+ cache->fcull_percent,
+ cache->fstop_percent,
+ cache->brun_percent,
+ cache->bcull_percent,
+ cache->bstop_percent,
+ args);
+
+ if (cache->fstop_percent >= cache->fcull_percent ||
+ cache->fcull_percent >= cache->frun_percent ||
+ cache->frun_percent >= 100)
+ return -ERANGE;
+
+ if (cache->bstop_percent >= cache->bcull_percent ||
+ cache->bcull_percent >= cache->brun_percent ||
+ cache->brun_percent >= 100)
+ return -ERANGE;
+
+ if (!cache->rootdirname) {
+ pr_err("No cache directory specified\n");
+ return -EINVAL;
+ }
+
+ /* Don't permit already bound caches to be re-bound */
+ if (test_bit(CACHEFILES_READY, &cache->flags)) {
+ pr_err("Cache already bound\n");
+ return -EBUSY;
+ }
- //_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u",
- // (unsigned long long) cache->frun,
- // (unsigned long long) cache->fcull,
- // (unsigned long long) cache->fstop,
- // (unsigned long long) cache->brun,
- // (unsigned long long) cache->bcull,
- // (unsigned long long) cache->bstop,
- // fnr, bnr);
-
- /* find out how many pages of blockdev are available */
- memset(&stats, 0, sizeof(stats));
-
- ret = vfs_statfs(&path, &stats);
- if (ret < 0) {
- if (ret == -EIO)
- cachefiles_io_error(cache, "statfs failed");
- _leave(" = %d", ret);
- return ret;
+ if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) {
+ if (!strcmp(args, "ondemand")) {
+ set_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags);
+ } else if (*args) {
+ pr_err("Invalid argument to the 'bind' command\n");
+ return -EINVAL;
+ }
+ } else if (*args) {
+ pr_err("'bind' command doesn't take an argument\n");
+ return -EINVAL;
}
- stats.f_bavail >>= cache->bshift;
+ /* Make sure we have copies of the tag string */
+ if (!cache->tag) {
+ /*
+ * The tag string is released by the fops->release()
+ * function, so we don't release it on error here
+ */
+ cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
+ if (!cache->tag)
+ return -ENOMEM;
+ }
- //_debug("avail %llu,%llu",
- // (unsigned long long) stats.f_ffree,
- // (unsigned long long) stats.f_bavail);
+ return cachefiles_add_cache(cache);
+}
- /* see if there is sufficient space */
- if (stats.f_ffree > fnr)
- stats.f_ffree -= fnr;
- else
- stats.f_ffree = 0;
+/*
+ * Unbind a cache.
+ */
+static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
+{
+ _enter("");
- if (stats.f_bavail > bnr)
- stats.f_bavail -= bnr;
- else
- stats.f_bavail = 0;
-
- ret = -ENOBUFS;
- if (stats.f_ffree < cache->fstop ||
- stats.f_bavail < cache->bstop)
- goto begin_cull;
-
- ret = 0;
- if (stats.f_ffree < cache->fcull ||
- stats.f_bavail < cache->bcull)
- goto begin_cull;
-
- if (test_bit(CACHEFILES_CULLING, &cache->flags) &&
- stats.f_ffree >= cache->frun &&
- stats.f_bavail >= cache->brun &&
- test_and_clear_bit(CACHEFILES_CULLING, &cache->flags)
- ) {
- _debug("cease culling");
- cachefiles_state_changed(cache);
- }
+ if (test_bit(CACHEFILES_READY, &cache->flags))
+ cachefiles_withdraw_cache(cache);
- //_leave(" = 0");
- return 0;
+ cachefiles_put_directory(cache->graveyard);
+ cachefiles_put_directory(cache->store);
+ mntput(cache->mnt);
+ put_cred(cache->cache_cred);
-begin_cull:
- if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) {
- _debug("### CULL CACHE ###");
- cachefiles_state_changed(cache);
- }
+ kfree(cache->rootdirname);
+ kfree(cache->tag);
- _leave(" = %d", ret);
- return ret;
+ _leave("");
}
diff --git a/fs/cachefiles/error_inject.c b/fs/cachefiles/error_inject.c
new file mode 100644
index 000000000000..e341ade47dd8
--- /dev/null
+++ b/fs/cachefiles/error_inject.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Error injection handling.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/sysctl.h>
+#include "internal.h"
+
+unsigned int cachefiles_error_injection_state;
+
+static struct ctl_table_header *cachefiles_sysctl;
+static const struct ctl_table cachefiles_sysctls[] = {
+ {
+ .procname = "error_injection",
+ .data = &cachefiles_error_injection_state,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+};
+
+int __init cachefiles_register_error_injection(void)
+{
+ cachefiles_sysctl = register_sysctl("cachefiles", cachefiles_sysctls);
+ if (!cachefiles_sysctl)
+ return -ENOMEM;
+ return 0;
+
+}
+
+void cachefiles_unregister_error_injection(void)
+{
+ unregister_sysctl_table(cachefiles_sysctl);
+}
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index da28ac1fa225..a08250d244ea 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -1,572 +1,461 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* FS-Cache interface to CacheFiles
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/slab.h>
#include <linux/mount.h>
+#include <linux/xattr.h>
+#include <linux/file.h>
+#include <linux/namei.h>
+#include <linux/falloc.h>
+#include <trace/events/fscache.h>
#include "internal.h"
-struct cachefiles_lookup_data {
- struct cachefiles_xattr *auxdata; /* auxiliary data */
- char *key; /* key path */
-};
-
-static int cachefiles_attr_changed(struct fscache_object *_object);
+static atomic_t cachefiles_object_debug_id;
/*
- * allocate an object record for a cookie lookup and prepare the lookup data
+ * Allocate a cache object record.
*/
-static struct fscache_object *cachefiles_alloc_object(
- struct fscache_cache *_cache,
- struct fscache_cookie *cookie)
+static
+struct cachefiles_object *cachefiles_alloc_object(struct fscache_cookie *cookie)
{
- struct cachefiles_lookup_data *lookup_data;
+ struct fscache_volume *vcookie = cookie->volume;
+ struct cachefiles_volume *volume = vcookie->cache_priv;
struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- struct cachefiles_xattr *auxdata;
- unsigned keylen, auxlen;
- void *buffer, *p;
- char *key;
- cache = container_of(_cache, struct cachefiles_cache, cache);
+ _enter("{%s},%x,", vcookie->key, cookie->debug_id);
- _enter("{%s},%x,", cache->cache.identifier, cookie->debug_id);
-
- lookup_data = kmalloc(sizeof(*lookup_data), cachefiles_gfp);
- if (!lookup_data)
- goto nomem_lookup_data;
-
- /* create a new object record and a temporary leaf image */
- object = kmem_cache_alloc(cachefiles_object_jar, cachefiles_gfp);
+ object = kmem_cache_zalloc(cachefiles_object_jar, GFP_KERNEL);
if (!object)
- goto nomem_object;
-
- ASSERTCMP(object->backer, ==, NULL);
+ return NULL;
- BUG_ON(test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags));
- atomic_set(&object->usage, 1);
-
- fscache_object_init(&object->fscache, cookie, &cache->cache);
+ if (cachefiles_ondemand_init_obj_info(object, volume)) {
+ kmem_cache_free(cachefiles_object_jar, object);
+ return NULL;
+ }
- object->type = cookie->def->type;
+ refcount_set(&object->ref, 1);
- /* get hold of the raw key
- * - stick the length on the front and leave space on the back for the
- * encoder
- */
- buffer = kmalloc((2 + 512) + 3, cachefiles_gfp);
- if (!buffer)
- goto nomem_buffer;
-
- keylen = cookie->key_len;
- if (keylen <= sizeof(cookie->inline_key))
- p = cookie->inline_key;
- else
- p = cookie->key;
- memcpy(buffer + 2, p, keylen);
-
- *(uint16_t *)buffer = keylen;
- ((char *)buffer)[keylen + 2] = 0;
- ((char *)buffer)[keylen + 3] = 0;
- ((char *)buffer)[keylen + 4] = 0;
-
- /* turn the raw key into something that can work with as a filename */
- key = cachefiles_cook_key(buffer, keylen + 2, object->type);
- if (!key)
- goto nomem_key;
-
- /* get hold of the auxiliary data and prepend the object type */
- auxdata = buffer;
- auxlen = cookie->aux_len;
- if (auxlen) {
- if (auxlen <= sizeof(cookie->inline_aux))
- p = cookie->inline_aux;
- else
- p = cookie->aux;
- memcpy(auxdata->data, p, auxlen);
- }
+ spin_lock_init(&object->lock);
+ INIT_LIST_HEAD(&object->cache_link);
+ object->volume = volume;
+ object->debug_id = atomic_inc_return(&cachefiles_object_debug_id);
+ object->cookie = fscache_get_cookie(cookie, fscache_cookie_get_attach_object);
- auxdata->len = auxlen + 1;
- auxdata->type = cookie->type;
-
- lookup_data->auxdata = auxdata;
- lookup_data->key = key;
- object->lookup_data = lookup_data;
-
- _leave(" = %x [%p]", object->fscache.debug_id, lookup_data);
- return &object->fscache;
-
-nomem_key:
- kfree(buffer);
-nomem_buffer:
- BUG_ON(test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags));
- kmem_cache_free(cachefiles_object_jar, object);
- fscache_object_destroyed(&cache->cache);
-nomem_object:
- kfree(lookup_data);
-nomem_lookup_data:
- _leave(" = -ENOMEM");
- return ERR_PTR(-ENOMEM);
+ fscache_count_object(vcookie->cache);
+ trace_cachefiles_ref(object->debug_id, cookie->debug_id, 1,
+ cachefiles_obj_new);
+ return object;
}
/*
- * attempt to look up the nominated node in this cache
- * - return -ETIMEDOUT to be scheduled again
+ * Note that an object has been seen.
*/
-static int cachefiles_lookup_object(struct fscache_object *_object)
+void cachefiles_see_object(struct cachefiles_object *object,
+ enum cachefiles_obj_ref_trace why)
{
- struct cachefiles_lookup_data *lookup_data;
- struct cachefiles_object *parent, *object;
- struct cachefiles_cache *cache;
- const struct cred *saved_cred;
- int ret;
-
- _enter("{OBJ%x}", _object->debug_id);
-
- cache = container_of(_object->cache, struct cachefiles_cache, cache);
- parent = container_of(_object->parent,
- struct cachefiles_object, fscache);
- object = container_of(_object, struct cachefiles_object, fscache);
- lookup_data = object->lookup_data;
-
- ASSERTCMP(lookup_data, !=, NULL);
-
- /* look up the key, creating any missing bits */
- cachefiles_begin_secure(cache, &saved_cred);
- ret = cachefiles_walk_to_object(parent, object,
- lookup_data->key,
- lookup_data->auxdata);
- cachefiles_end_secure(cache, saved_cred);
-
- /* polish off by setting the attributes of non-index files */
- if (ret == 0 &&
- object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
- cachefiles_attr_changed(&object->fscache);
-
- if (ret < 0 && ret != -ETIMEDOUT) {
- if (ret != -ENOBUFS)
- pr_warn("Lookup failed error %d\n", ret);
- fscache_object_lookup_error(&object->fscache);
- }
-
- _leave(" [%d]", ret);
- return ret;
+ trace_cachefiles_ref(object->debug_id, object->cookie->debug_id,
+ refcount_read(&object->ref), why);
}
/*
- * indication of lookup completion
+ * Increment the usage count on an object;
*/
-static void cachefiles_lookup_complete(struct fscache_object *_object)
+struct cachefiles_object *cachefiles_grab_object(struct cachefiles_object *object,
+ enum cachefiles_obj_ref_trace why)
{
- struct cachefiles_object *object;
-
- object = container_of(_object, struct cachefiles_object, fscache);
+ int r;
- _enter("{OBJ%x,%p}", object->fscache.debug_id, object->lookup_data);
-
- if (object->lookup_data) {
- kfree(object->lookup_data->key);
- kfree(object->lookup_data->auxdata);
- kfree(object->lookup_data);
- object->lookup_data = NULL;
- }
+ __refcount_inc(&object->ref, &r);
+ trace_cachefiles_ref(object->debug_id, object->cookie->debug_id, r, why);
+ return object;
}
/*
- * increment the usage count on an inode object (may fail if unmounting)
+ * dispose of a reference to an object
*/
-static
-struct fscache_object *cachefiles_grab_object(struct fscache_object *_object,
- enum fscache_obj_ref_trace why)
+void cachefiles_put_object(struct cachefiles_object *object,
+ enum cachefiles_obj_ref_trace why)
{
- struct cachefiles_object *object =
- container_of(_object, struct cachefiles_object, fscache);
- int u;
+ unsigned int object_debug_id = object->debug_id;
+ unsigned int cookie_debug_id = object->cookie->debug_id;
+ struct fscache_cache *cache;
+ bool done;
+ int r;
+
+ done = __refcount_dec_and_test(&object->ref, &r);
+ trace_cachefiles_ref(object_debug_id, cookie_debug_id, r, why);
+ if (done) {
+ _debug("- kill object OBJ%x", object_debug_id);
- _enter("{OBJ%x,%d}", _object->debug_id, atomic_read(&object->usage));
+ ASSERTCMP(object->file, ==, NULL);
-#ifdef CACHEFILES_DEBUG_SLAB
- ASSERT((atomic_read(&object->usage) & 0xffff0000) != 0x6b6b0000);
-#endif
+ kfree(object->d_name);
+ cachefiles_ondemand_deinit_obj_info(object);
+ cache = object->volume->cache->cache;
+ fscache_put_cookie(object->cookie, fscache_cookie_put_object);
+ object->cookie = NULL;
+ kmem_cache_free(cachefiles_object_jar, object);
+ fscache_uncount_object(cache);
+ }
- u = atomic_inc_return(&object->usage);
- trace_cachefiles_ref(object, _object->cookie,
- (enum cachefiles_obj_ref_trace)why, u);
- return &object->fscache;
+ _leave("");
}
/*
- * update the auxiliary data for an object object on disk
+ * Adjust the size of a cache file if necessary to match the DIO size. We keep
+ * the EOF marker a multiple of DIO blocks so that we don't fall back to doing
+ * non-DIO for a partial block straddling the EOF, but we also have to be
+ * careful of someone expanding the file and accidentally accreting the
+ * padding.
*/
-static void cachefiles_update_object(struct fscache_object *_object)
+static int cachefiles_adjust_size(struct cachefiles_object *object)
{
- struct cachefiles_object *object;
- struct cachefiles_xattr *auxdata;
- struct cachefiles_cache *cache;
- struct fscache_cookie *cookie;
- const struct cred *saved_cred;
- const void *aux;
- unsigned auxlen;
+ struct iattr newattrs;
+ struct file *file = object->file;
+ uint64_t ni_size;
+ loff_t oi_size;
+ int ret;
- _enter("{OBJ%x}", _object->debug_id);
+ ni_size = object->cookie->object_size;
+ ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
- object = container_of(_object, struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache, struct cachefiles_cache,
- cache);
+ _enter("{OBJ%x},[%llu]",
+ object->debug_id, (unsigned long long) ni_size);
- if (!fscache_use_cookie(_object)) {
- _leave(" [relinq]");
- return;
- }
+ if (!file)
+ return -ENOBUFS;
- cookie = object->fscache.cookie;
- auxlen = cookie->aux_len;
+ oi_size = i_size_read(file_inode(file));
+ if (oi_size == ni_size)
+ return 0;
- if (!auxlen) {
- fscache_unuse_cookie(_object);
- _leave(" [no aux]");
- return;
- }
+ inode_lock(file_inode(file));
- auxdata = kmalloc(2 + auxlen + 3, cachefiles_gfp);
- if (!auxdata) {
- fscache_unuse_cookie(_object);
- _leave(" [nomem]");
- return;
+ /* if there's an extension to a partial page at the end of the backing
+ * file, we need to discard the partial page so that we pick up new
+ * data after it */
+ if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
+ _debug("discard tail %llx", oi_size);
+ newattrs.ia_valid = ATTR_SIZE;
+ newattrs.ia_size = oi_size & PAGE_MASK;
+ ret = cachefiles_inject_remove_error();
+ if (ret == 0)
+ ret = notify_change(&nop_mnt_idmap, file->f_path.dentry,
+ &newattrs, NULL);
+ if (ret < 0)
+ goto truncate_failed;
}
- aux = (auxlen <= sizeof(cookie->inline_aux)) ?
- cookie->inline_aux : cookie->aux;
+ newattrs.ia_valid = ATTR_SIZE;
+ newattrs.ia_size = ni_size;
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
+ ret = notify_change(&nop_mnt_idmap, file->f_path.dentry,
+ &newattrs, NULL);
- memcpy(auxdata->data, aux, auxlen);
- fscache_unuse_cookie(_object);
+truncate_failed:
+ inode_unlock(file_inode(file));
- auxdata->len = auxlen + 1;
- auxdata->type = cookie->type;
+ if (ret < 0)
+ trace_cachefiles_io_error(NULL, file_inode(file), ret,
+ cachefiles_trace_notify_change_error);
+ if (ret == -EIO) {
+ cachefiles_io_error_obj(object, "Size set failed");
+ ret = -ENOBUFS;
+ }
- cachefiles_begin_secure(cache, &saved_cred);
- cachefiles_update_object_xattr(object, auxdata);
- cachefiles_end_secure(cache, saved_cred);
- kfree(auxdata);
- _leave("");
+ _leave(" = %d", ret);
+ return ret;
}
/*
- * discard the resources pinned by an object and effect retirement if
- * requested
+ * Attempt to look up the nominated node in this cache
*/
-static void cachefiles_drop_object(struct fscache_object *_object)
+static bool cachefiles_lookup_cookie(struct fscache_cookie *cookie)
{
struct cachefiles_object *object;
- struct cachefiles_cache *cache;
+ struct cachefiles_cache *cache = cookie->volume->cache->cache_priv;
const struct cred *saved_cred;
- struct inode *inode;
- blkcnt_t i_blocks = 0;
+ bool success;
- ASSERT(_object);
+ object = cachefiles_alloc_object(cookie);
+ if (!object)
+ goto fail;
- object = container_of(_object, struct cachefiles_object, fscache);
+ _enter("{OBJ%x}", object->debug_id);
- _enter("{OBJ%x,%d}",
- object->fscache.debug_id, atomic_read(&object->usage));
+ if (!cachefiles_cook_key(object))
+ goto fail_put;
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
+ cookie->cache_priv = object;
-#ifdef CACHEFILES_DEBUG_SLAB
- ASSERT((atomic_read(&object->usage) & 0xffff0000) != 0x6b6b0000);
-#endif
+ cachefiles_begin_secure(cache, &saved_cred);
- /* We need to tidy the object up if we did in fact manage to open it.
- * It's possible for us to get here before the object is fully
- * initialised if the parent goes away or the object gets retired
- * before we set it up.
- */
- if (object->dentry) {
- /* delete retired objects */
- if (test_bit(FSCACHE_OBJECT_RETIRED, &object->fscache.flags) &&
- _object != cache->cache.fsdef
- ) {
- _debug("- retire object OBJ%x", object->fscache.debug_id);
- inode = d_backing_inode(object->dentry);
- if (inode)
- i_blocks = inode->i_blocks;
-
- cachefiles_begin_secure(cache, &saved_cred);
- cachefiles_delete_object(cache, object);
- cachefiles_end_secure(cache, saved_cred);
- }
+ success = cachefiles_look_up_object(object);
+ if (!success)
+ goto fail_withdraw;
- /* close the filesystem stuff attached to the object */
- if (object->backer != object->dentry)
- dput(object->backer);
- object->backer = NULL;
- }
+ cachefiles_see_object(object, cachefiles_obj_see_lookup_cookie);
+
+ spin_lock(&cache->object_list_lock);
+ list_add(&object->cache_link, &cache->object_list);
+ spin_unlock(&cache->object_list_lock);
+ cachefiles_adjust_size(object);
- /* note that the object is now inactive */
- if (test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags))
- cachefiles_mark_object_inactive(cache, object, i_blocks);
+ cachefiles_end_secure(cache, saved_cred);
+ _leave(" = t");
+ return true;
- dput(object->dentry);
- object->dentry = NULL;
+fail_withdraw:
+ cachefiles_end_secure(cache, saved_cred);
+ cachefiles_see_object(object, cachefiles_obj_see_lookup_failed);
+ fscache_caching_failed(cookie);
+ _debug("failed c=%08x o=%08x", cookie->debug_id, object->debug_id);
+ /* The caller holds an access count on the cookie, so we need them to
+ * drop it before we can withdraw the object.
+ */
+ return false;
- _leave("");
+fail_put:
+ cachefiles_put_object(object, cachefiles_obj_put_alloc_fail);
+fail:
+ return false;
}
/*
- * dispose of a reference to an object
+ * Shorten the backing object to discard any dirty data and free up
+ * any unused granules.
*/
-void cachefiles_put_object(struct fscache_object *_object,
- enum fscache_obj_ref_trace why)
+static bool cachefiles_shorten_object(struct cachefiles_object *object,
+ struct file *file, loff_t new_size)
{
- struct cachefiles_object *object;
- struct fscache_cache *cache;
- int u;
-
- ASSERT(_object);
-
- object = container_of(_object, struct cachefiles_object, fscache);
-
- _enter("{OBJ%x,%d}",
- object->fscache.debug_id, atomic_read(&object->usage));
-
-#ifdef CACHEFILES_DEBUG_SLAB
- ASSERT((atomic_read(&object->usage) & 0xffff0000) != 0x6b6b0000);
-#endif
-
- ASSERTIFCMP(object->fscache.parent,
- object->fscache.parent->n_children, >, 0);
-
- u = atomic_dec_return(&object->usage);
- trace_cachefiles_ref(object, _object->cookie,
- (enum cachefiles_obj_ref_trace)why, u);
- ASSERTCMP(u, !=, -1);
- if (u == 0) {
- _debug("- kill object OBJ%x", object->fscache.debug_id);
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct inode *inode = file_inode(file);
+ loff_t i_size, dio_size;
+ int ret;
- ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags));
- ASSERTCMP(object->fscache.parent, ==, NULL);
- ASSERTCMP(object->backer, ==, NULL);
- ASSERTCMP(object->dentry, ==, NULL);
- ASSERTCMP(object->fscache.n_ops, ==, 0);
- ASSERTCMP(object->fscache.n_children, ==, 0);
+ dio_size = round_up(new_size, CACHEFILES_DIO_BLOCK_SIZE);
+ i_size = i_size_read(inode);
+
+ trace_cachefiles_trunc(object, inode, i_size, dio_size,
+ cachefiles_trunc_shrink);
+ ret = cachefiles_inject_remove_error();
+ if (ret == 0)
+ ret = vfs_truncate(&file->f_path, dio_size);
+ if (ret < 0) {
+ trace_cachefiles_io_error(object, file_inode(file), ret,
+ cachefiles_trace_trunc_error);
+ cachefiles_io_error_obj(object, "Trunc-to-size failed %d", ret);
+ cachefiles_remove_object_xattr(cache, object, file->f_path.dentry);
+ return false;
+ }
- if (object->lookup_data) {
- kfree(object->lookup_data->key);
- kfree(object->lookup_data->auxdata);
- kfree(object->lookup_data);
- object->lookup_data = NULL;
+ if (new_size < dio_size) {
+ trace_cachefiles_trunc(object, inode, dio_size, new_size,
+ cachefiles_trunc_dio_adjust);
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
+ ret = vfs_fallocate(file, FALLOC_FL_ZERO_RANGE,
+ new_size, dio_size - new_size);
+ if (ret < 0) {
+ trace_cachefiles_io_error(object, file_inode(file), ret,
+ cachefiles_trace_fallocate_error);
+ cachefiles_io_error_obj(object, "Trunc-to-dio-size failed %d", ret);
+ cachefiles_remove_object_xattr(cache, object, file->f_path.dentry);
+ return false;
}
-
- cache = object->fscache.cache;
- fscache_object_destroy(&object->fscache);
- kmem_cache_free(cachefiles_object_jar, object);
- fscache_object_destroyed(cache);
}
- _leave("");
+ return true;
}
/*
- * sync a cache
+ * Resize the backing object.
*/
-static void cachefiles_sync_cache(struct fscache_cache *_cache)
+static void cachefiles_resize_cookie(struct netfs_cache_resources *cres,
+ loff_t new_size)
{
- struct cachefiles_cache *cache;
+ struct cachefiles_object *object = cachefiles_cres_object(cres);
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct fscache_cookie *cookie = object->cookie;
const struct cred *saved_cred;
- int ret;
+ struct file *file = cachefiles_cres_file(cres);
+ loff_t old_size = cookie->object_size;
- _enter("%s", _cache->tag->name);
+ _enter("%llu->%llu", old_size, new_size);
- cache = container_of(_cache, struct cachefiles_cache, cache);
-
- /* make sure all pages pinned by operations on behalf of the netfs are
- * written to disc */
- cachefiles_begin_secure(cache, &saved_cred);
- down_read(&cache->mnt->mnt_sb->s_umount);
- ret = sync_filesystem(cache->mnt->mnt_sb);
- up_read(&cache->mnt->mnt_sb->s_umount);
- cachefiles_end_secure(cache, saved_cred);
+ if (new_size < old_size) {
+ cachefiles_begin_secure(cache, &saved_cred);
+ cachefiles_shorten_object(object, file, new_size);
+ cachefiles_end_secure(cache, saved_cred);
+ object->cookie->object_size = new_size;
+ return;
+ }
- if (ret == -EIO)
- cachefiles_io_error(cache,
- "Attempt to sync backing fs superblock"
- " returned error %d",
- ret);
+ /* The file is being expanded. We don't need to do anything
+ * particularly. cookie->initial_size doesn't change and so the point
+ * at which we have to download before doesn't change.
+ */
+ cookie->object_size = new_size;
}
/*
- * check if the backing cache is updated to FS-Cache
- * - called by FS-Cache when evaluates if need to invalidate the cache
+ * Commit changes to the object as we drop it.
*/
-static int cachefiles_check_consistency(struct fscache_operation *op)
+static void cachefiles_commit_object(struct cachefiles_object *object,
+ struct cachefiles_cache *cache)
{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- const struct cred *saved_cred;
- int ret;
-
- _enter("{OBJ%x}", op->object->debug_id);
-
- object = container_of(op->object, struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
+ bool update = false;
- cachefiles_begin_secure(cache, &saved_cred);
- ret = cachefiles_check_auxdata(object);
- cachefiles_end_secure(cache, saved_cred);
+ if (test_and_clear_bit(FSCACHE_COOKIE_LOCAL_WRITE, &object->cookie->flags))
+ update = true;
+ if (test_and_clear_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags))
+ update = true;
+ if (update)
+ cachefiles_set_object_xattr(object);
- _leave(" = %d", ret);
- return ret;
+ if (test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags))
+ cachefiles_commit_tmpfile(cache, object);
}
/*
- * notification the attributes on an object have changed
- * - called with reads/writes excluded by FS-Cache
+ * Finalise and object and close the VFS structs that we have.
*/
-static int cachefiles_attr_changed(struct fscache_object *_object)
+static void cachefiles_clean_up_object(struct cachefiles_object *object,
+ struct cachefiles_cache *cache)
{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- const struct cred *saved_cred;
- struct iattr newattrs;
- uint64_t ni_size;
- loff_t oi_size;
- int ret;
-
- ni_size = _object->store_limit_l;
-
- _enter("{OBJ%x},[%llu]",
- _object->debug_id, (unsigned long long) ni_size);
-
- object = container_of(_object, struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- if (ni_size == object->i_size)
- return 0;
+ struct file *file;
+
+ if (test_bit(FSCACHE_COOKIE_RETIRED, &object->cookie->flags)) {
+ if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
+ cachefiles_see_object(object, cachefiles_obj_see_clean_delete);
+ _debug("- inval object OBJ%x", object->debug_id);
+ cachefiles_delete_object(object, FSCACHE_OBJECT_WAS_RETIRED);
+ } else {
+ cachefiles_see_object(object, cachefiles_obj_see_clean_drop_tmp);
+ _debug("- inval object OBJ%x tmpfile", object->debug_id);
+ }
+ } else {
+ cachefiles_see_object(object, cachefiles_obj_see_clean_commit);
+ cachefiles_commit_object(object, cache);
+ }
- if (!object->backer)
- return -ENOBUFS;
+ cachefiles_unmark_inode_in_use(object, object->file);
- ASSERT(d_is_reg(object->backer));
+ spin_lock(&object->lock);
+ file = object->file;
+ object->file = NULL;
+ spin_unlock(&object->lock);
- fscache_set_store_limit(&object->fscache, ni_size);
+ if (file)
+ fput(file);
+}
- oi_size = i_size_read(d_backing_inode(object->backer));
- if (oi_size == ni_size)
- return 0;
+/*
+ * Withdraw caching for a cookie.
+ */
+static void cachefiles_withdraw_cookie(struct fscache_cookie *cookie)
+{
+ struct cachefiles_object *object = cookie->cache_priv;
+ struct cachefiles_cache *cache = object->volume->cache;
+ const struct cred *saved_cred;
- cachefiles_begin_secure(cache, &saved_cred);
- inode_lock(d_inode(object->backer));
+ _enter("o=%x", object->debug_id);
+ cachefiles_see_object(object, cachefiles_obj_see_withdraw_cookie);
- /* if there's an extension to a partial page at the end of the backing
- * file, we need to discard the partial page so that we pick up new
- * data after it */
- if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
- _debug("discard tail %llx", oi_size);
- newattrs.ia_valid = ATTR_SIZE;
- newattrs.ia_size = oi_size & PAGE_MASK;
- ret = notify_change(&init_user_ns, object->backer, &newattrs, NULL);
- if (ret < 0)
- goto truncate_failed;
+ if (!list_empty(&object->cache_link)) {
+ spin_lock(&cache->object_list_lock);
+ cachefiles_see_object(object, cachefiles_obj_see_withdrawal);
+ list_del_init(&object->cache_link);
+ spin_unlock(&cache->object_list_lock);
}
- newattrs.ia_valid = ATTR_SIZE;
- newattrs.ia_size = ni_size;
- ret = notify_change(&init_user_ns, object->backer, &newattrs, NULL);
+ cachefiles_ondemand_clean_object(object);
-truncate_failed:
- inode_unlock(d_inode(object->backer));
- cachefiles_end_secure(cache, saved_cred);
-
- if (ret == -EIO) {
- fscache_set_store_limit(&object->fscache, 0);
- cachefiles_io_error_obj(object, "Size set failed");
- ret = -ENOBUFS;
+ if (object->file) {
+ cachefiles_begin_secure(cache, &saved_cred);
+ cachefiles_clean_up_object(object, cache);
+ cachefiles_end_secure(cache, saved_cred);
}
- _leave(" = %d", ret);
- return ret;
+ cookie->cache_priv = NULL;
+ cachefiles_put_object(object, cachefiles_obj_put_detach);
}
/*
- * Invalidate an object
+ * Invalidate the storage associated with a cookie.
*/
-static void cachefiles_invalidate_object(struct fscache_operation *op)
+static bool cachefiles_invalidate_cookie(struct fscache_cookie *cookie)
{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- const struct cred *saved_cred;
- struct path path;
- uint64_t ni_size;
- int ret;
-
- object = container_of(op->object, struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- ni_size = op->object->store_limit_l;
-
- _enter("{OBJ%x},[%llu]",
- op->object->debug_id, (unsigned long long)ni_size);
-
- if (object->backer) {
- ASSERT(d_is_reg(object->backer));
+ struct cachefiles_object *object = cookie->cache_priv;
+ struct file *new_file, *old_file;
+ bool old_tmpfile;
- fscache_set_store_limit(&object->fscache, ni_size);
+ _enter("o=%x,[%llu]", object->debug_id, object->cookie->object_size);
- path.dentry = object->backer;
- path.mnt = cache->mnt;
+ old_tmpfile = test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
- cachefiles_begin_secure(cache, &saved_cred);
- ret = vfs_truncate(&path, 0);
- if (ret == 0)
- ret = vfs_truncate(&path, ni_size);
- cachefiles_end_secure(cache, saved_cred);
+ if (!object->file) {
+ fscache_resume_after_invalidation(cookie);
+ _leave(" = t [light]");
+ return true;
+ }
- if (ret != 0) {
- fscache_set_store_limit(&object->fscache, 0);
- if (ret == -EIO)
- cachefiles_io_error_obj(object,
- "Invalidate failed");
+ new_file = cachefiles_create_tmpfile(object);
+ if (IS_ERR(new_file))
+ goto failed;
+
+ /* Substitute the VFS target */
+ _debug("sub");
+ spin_lock(&object->lock);
+
+ old_file = object->file;
+ object->file = new_file;
+ object->content_info = CACHEFILES_CONTENT_NO_DATA;
+ set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
+
+ spin_unlock(&object->lock);
+ _debug("subbed");
+
+ /* Allow I/O to take place again */
+ fscache_resume_after_invalidation(cookie);
+
+ if (old_file) {
+ if (!old_tmpfile) {
+ struct cachefiles_volume *volume = object->volume;
+ struct dentry *fan = volume->fanout[(u8)cookie->key_hash];
+ struct dentry *obj;
+
+ obj = start_removing_dentry(fan, old_file->f_path.dentry);
+ if (!IS_ERR(obj))
+ cachefiles_bury_object(volume->cache, object,
+ fan, obj,
+ FSCACHE_OBJECT_INVALIDATED);
}
+ fput(old_file);
}
- fscache_op_complete(op, true);
- _leave("");
-}
+ _leave(" = t");
+ return true;
-/*
- * dissociate a cache from all the pages it was backing
- */
-static void cachefiles_dissociate_pages(struct fscache_cache *cache)
-{
- _enter("");
+failed:
+ _leave(" = f");
+ return false;
}
const struct fscache_cache_ops cachefiles_cache_ops = {
.name = "cachefiles",
- .alloc_object = cachefiles_alloc_object,
- .lookup_object = cachefiles_lookup_object,
- .lookup_complete = cachefiles_lookup_complete,
- .grab_object = cachefiles_grab_object,
- .update_object = cachefiles_update_object,
- .invalidate_object = cachefiles_invalidate_object,
- .drop_object = cachefiles_drop_object,
- .put_object = cachefiles_put_object,
- .sync_cache = cachefiles_sync_cache,
- .attr_changed = cachefiles_attr_changed,
- .read_or_alloc_page = cachefiles_read_or_alloc_page,
- .read_or_alloc_pages = cachefiles_read_or_alloc_pages,
- .allocate_page = cachefiles_allocate_page,
- .allocate_pages = cachefiles_allocate_pages,
- .write_page = cachefiles_write_page,
- .uncache_page = cachefiles_uncache_page,
- .dissociate_pages = cachefiles_dissociate_pages,
- .check_consistency = cachefiles_check_consistency,
- .begin_read_operation = cachefiles_begin_read_operation,
+ .acquire_volume = cachefiles_acquire_volume,
+ .free_volume = cachefiles_free_volume,
+ .lookup_cookie = cachefiles_lookup_cookie,
+ .withdraw_cookie = cachefiles_withdraw_cookie,
+ .invalidate_cookie = cachefiles_invalidate_cookie,
+ .begin_operation = cachefiles_begin_operation,
+ .resize_cookie = cachefiles_resize_cookie,
+ .prepare_to_write = cachefiles_prepare_to_write,
};
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 0a511c36dab8..b62cd3e9a18e 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General netfs cache on cache files internal defs
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -13,58 +13,93 @@
#include <linux/fscache-cache.h>
-#include <linux/timer.h>
-#include <linux/wait_bit.h>
#include <linux/cred.h>
-#include <linux/workqueue.h>
#include <linux/security.h>
+#include <linux/xarray.h>
+#include <linux/cachefiles.h>
+
+#define CACHEFILES_DIO_BLOCK_SIZE 4096
struct cachefiles_cache;
struct cachefiles_object;
-extern unsigned cachefiles_debug;
-#define CACHEFILES_DEBUG_KENTER 1
-#define CACHEFILES_DEBUG_KLEAVE 2
-#define CACHEFILES_DEBUG_KDEBUG 4
+enum cachefiles_content {
+ /* These values are saved on disk */
+ CACHEFILES_CONTENT_NO_DATA = 0, /* No content stored */
+ CACHEFILES_CONTENT_SINGLE = 1, /* Content is monolithic, all is present */
+ CACHEFILES_CONTENT_ALL = 2, /* Content is all present, no map */
+ CACHEFILES_CONTENT_BACKFS_MAP = 3, /* Content is piecemeal, mapped through backing fs */
+ CACHEFILES_CONTENT_DIRTY = 4, /* Content is dirty (only seen on disk) */
+ nr__cachefiles_content
+};
-#define cachefiles_gfp (__GFP_RECLAIM | __GFP_NORETRY | __GFP_NOMEMALLOC)
+/*
+ * Cached volume representation.
+ */
+struct cachefiles_volume {
+ struct cachefiles_cache *cache;
+ struct list_head cache_link; /* Link in cache->volumes */
+ struct fscache_volume *vcookie; /* The netfs's representation */
+ struct dentry *dentry; /* The volume dentry */
+ struct dentry *fanout[256]; /* Fanout subdirs */
+};
+
+enum cachefiles_object_state {
+ CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */
+ CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */
+ CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */
+ CACHEFILES_ONDEMAND_OBJSTATE_DROPPING, /* Object is being dropped. */
+};
+
+struct cachefiles_ondemand_info {
+ struct work_struct ondemand_work;
+ int ondemand_id;
+ enum cachefiles_object_state state;
+ struct cachefiles_object *object;
+ spinlock_t lock;
+};
/*
- * node records
+ * Backing file state.
*/
struct cachefiles_object {
- struct fscache_object fscache; /* fscache handle */
- struct cachefiles_lookup_data *lookup_data; /* cached lookup data */
- struct dentry *dentry; /* the file/dir representing this object */
- struct dentry *backer; /* backing file */
- loff_t i_size; /* object size */
+ struct fscache_cookie *cookie; /* Netfs data storage object cookie */
+ struct cachefiles_volume *volume; /* Cache volume that holds this object */
+ struct list_head cache_link; /* Link in cache->*_list */
+ struct file *file; /* The file representing this object */
+ char *d_name; /* Backing file name */
+ int debug_id;
+ spinlock_t lock;
+ refcount_t ref;
+ enum cachefiles_content content_info:8; /* Info about content presence */
unsigned long flags;
-#define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */
- atomic_t usage; /* object usage count */
- uint8_t type; /* object type */
- uint8_t new; /* T if object new */
- spinlock_t work_lock;
- struct rb_node active_node; /* link in active tree (dentry is key) */
+#define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */
+#ifdef CONFIG_CACHEFILES_ONDEMAND
+ struct cachefiles_ondemand_info *ondemand;
+#endif
};
-extern struct kmem_cache *cachefiles_object_jar;
+#define CACHEFILES_ONDEMAND_ID_CLOSED -1
/*
* Cache files cache definition
*/
struct cachefiles_cache {
- struct fscache_cache cache; /* FS-Cache record */
+ struct fscache_cache *cache; /* Cache cookie */
struct vfsmount *mnt; /* mountpoint holding the cache */
+ struct dentry *store; /* Directory into which live objects go */
struct dentry *graveyard; /* directory into which dead objects go */
struct file *cachefilesd; /* manager daemon handle */
+ struct list_head volumes; /* List of volume objects */
+ struct list_head object_list; /* List of active objects */
+ spinlock_t object_list_lock; /* Lock for volumes and object_list */
const struct cred *cache_cred; /* security override for accessing cache */
struct mutex daemon_mutex; /* command serialisation mutex */
wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
- struct rb_root active_nodes; /* active nodes (can't be culled) */
- rwlock_t active_lock; /* lock for active_nodes */
atomic_t gravecounter; /* graveyard uniquifier */
atomic_t f_released; /* number of objects released lately */
atomic_long_t b_released; /* number of blocks released lately */
+ atomic_long_t b_writing; /* Number of blocks being written */
unsigned frun_percent; /* when to stop culling (% files) */
unsigned fcull_percent; /* when to start culling (% files) */
unsigned fstop_percent; /* when to stop allocating (% files) */
@@ -72,7 +107,7 @@ struct cachefiles_cache {
unsigned bcull_percent; /* when to start culling (% blocks) */
unsigned bstop_percent; /* when to stop allocating (% blocks) */
unsigned bsize; /* cache's block size */
- unsigned bshift; /* min(ilog2(PAGE_SIZE / bsize), 0) */
+ unsigned bshift; /* ilog2(bsize) */
uint64_t frun; /* when to stop culling */
uint64_t fcull; /* when to start culling */
uint64_t fstop; /* when to stop allocating */
@@ -84,44 +119,49 @@ struct cachefiles_cache {
#define CACHEFILES_DEAD 1 /* T if cache dead */
#define CACHEFILES_CULLING 2 /* T if cull engaged */
#define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */
+#define CACHEFILES_ONDEMAND_MODE 4 /* T if in on-demand read mode */
char *rootdirname; /* name of cache root directory */
- char *secctx; /* LSM security context */
char *tag; /* cache binding tag */
+ refcount_t unbind_pincount;/* refcount to do daemon unbind */
+ struct xarray reqs; /* xarray of pending on-demand requests */
+ unsigned long req_id_next;
+ struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
+ u32 ondemand_id_next;
+ u32 msg_id_next;
+ u32 secid; /* LSM security id */
+ bool have_secid; /* whether "secid" was set */
};
-/*
- * backing file read tracking
- */
-struct cachefiles_one_read {
- wait_queue_entry_t monitor; /* link into monitored waitqueue */
- struct page *back_page; /* backing file page we're waiting for */
- struct page *netfs_page; /* netfs page we're going to fill */
- struct fscache_retrieval *op; /* retrieval op covering this */
- struct list_head op_link; /* link in op's todo list */
-};
+static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
+{
+ return IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND) &&
+ test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags);
+}
-/*
- * backing file write tracking
- */
-struct cachefiles_one_write {
- struct page *netfs_page; /* netfs page to copy */
- struct cachefiles_object *object;
- struct list_head obj_link; /* link in object's lists */
- fscache_rw_complete_t end_io_func;
- void *context;
+struct cachefiles_req {
+ struct cachefiles_object *object;
+ struct completion done;
+ refcount_t ref;
+ int error;
+ struct cachefiles_msg msg;
};
-/*
- * auxiliary data xattr buffer
- */
-struct cachefiles_xattr {
- uint16_t len;
- uint8_t type;
- uint8_t data[];
-};
+#define CACHEFILES_REQ_NEW XA_MARK_1
#include <trace/events/cachefiles.h>
+static inline
+struct file *cachefiles_cres_file(struct netfs_cache_resources *cres)
+{
+ return cres->cache_priv2;
+}
+
+static inline
+struct cachefiles_object *cachefiles_cres_object(struct netfs_cache_resources *cres)
+{
+ return fscache_cres_cookie(cres)->cache_priv;
+}
+
/*
* note change of state for daemon
*/
@@ -132,74 +172,215 @@ static inline void cachefiles_state_changed(struct cachefiles_cache *cache)
}
/*
- * bind.c
+ * cache.c
*/
-extern int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args);
-extern void cachefiles_daemon_unbind(struct cachefiles_cache *cache);
+extern int cachefiles_add_cache(struct cachefiles_cache *cache);
+extern void cachefiles_withdraw_cache(struct cachefiles_cache *cache);
+
+enum cachefiles_has_space_for {
+ cachefiles_has_space_check,
+ cachefiles_has_space_for_write,
+ cachefiles_has_space_for_create,
+};
+extern int cachefiles_has_space(struct cachefiles_cache *cache,
+ unsigned fnr, unsigned bnr,
+ enum cachefiles_has_space_for reason);
/*
* daemon.c
*/
extern const struct file_operations cachefiles_daemon_fops;
+extern void cachefiles_flush_reqs(struct cachefiles_cache *cache);
+extern void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache);
+extern void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache);
-extern int cachefiles_has_space(struct cachefiles_cache *cache,
- unsigned fnr, unsigned bnr);
+/*
+ * error_inject.c
+ */
+#ifdef CONFIG_CACHEFILES_ERROR_INJECTION
+extern unsigned int cachefiles_error_injection_state;
+extern int cachefiles_register_error_injection(void);
+extern void cachefiles_unregister_error_injection(void);
+
+#else
+#define cachefiles_error_injection_state 0
+
+static inline int cachefiles_register_error_injection(void)
+{
+ return 0;
+}
+
+static inline void cachefiles_unregister_error_injection(void)
+{
+}
+#endif
+
+
+static inline int cachefiles_inject_read_error(void)
+{
+ return cachefiles_error_injection_state & 2 ? -EIO : 0;
+}
+
+static inline int cachefiles_inject_write_error(void)
+{
+ return cachefiles_error_injection_state & 2 ? -EIO :
+ cachefiles_error_injection_state & 1 ? -ENOSPC :
+ 0;
+}
+
+static inline int cachefiles_inject_remove_error(void)
+{
+ return cachefiles_error_injection_state & 2 ? -EIO : 0;
+}
/*
* interface.c
*/
extern const struct fscache_cache_ops cachefiles_cache_ops;
+extern void cachefiles_see_object(struct cachefiles_object *object,
+ enum cachefiles_obj_ref_trace why);
+extern struct cachefiles_object *cachefiles_grab_object(struct cachefiles_object *object,
+ enum cachefiles_obj_ref_trace why);
+extern void cachefiles_put_object(struct cachefiles_object *object,
+ enum cachefiles_obj_ref_trace why);
-void cachefiles_put_object(struct fscache_object *_object,
- enum fscache_obj_ref_trace why);
+/*
+ * io.c
+ */
+extern bool cachefiles_begin_operation(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state);
+extern int __cachefiles_prepare_write(struct cachefiles_object *object,
+ struct file *file,
+ loff_t *_start, size_t *_len, size_t upper_len,
+ bool no_space_allocated_yet);
+extern int __cachefiles_write(struct cachefiles_object *object,
+ struct file *file,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv);
/*
* key.c
*/
-extern char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type);
+extern bool cachefiles_cook_key(struct cachefiles_object *object);
+
+/*
+ * main.c
+ */
+extern struct kmem_cache *cachefiles_object_jar;
/*
* namei.c
*/
-extern void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
- struct cachefiles_object *object,
- blkcnt_t i_blocks);
-extern int cachefiles_delete_object(struct cachefiles_cache *cache,
- struct cachefiles_object *object);
-extern int cachefiles_walk_to_object(struct cachefiles_object *parent,
- struct cachefiles_object *object,
- const char *key,
- struct cachefiles_xattr *auxdata);
+extern void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
+ struct file *file);
+extern int cachefiles_bury_object(struct cachefiles_cache *cache,
+ struct cachefiles_object *object,
+ struct dentry *dir,
+ struct dentry *rep,
+ enum fscache_why_object_killed why);
+extern int cachefiles_delete_object(struct cachefiles_object *object,
+ enum fscache_why_object_killed why);
+extern bool cachefiles_look_up_object(struct cachefiles_object *object);
extern struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
struct dentry *dir,
- const char *name);
+ const char *name,
+ bool *_is_new);
+extern void cachefiles_put_directory(struct dentry *dir);
extern int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
char *filename);
extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
struct dentry *dir, char *filename);
+extern struct file *cachefiles_create_tmpfile(struct cachefiles_object *object);
+extern bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
+ struct cachefiles_object *object);
/*
- * rdwr.c
+ * ondemand.c
*/
-extern int cachefiles_read_or_alloc_page(struct fscache_retrieval *,
- struct page *, gfp_t);
-extern int cachefiles_read_or_alloc_pages(struct fscache_retrieval *,
- struct list_head *, unsigned *,
- gfp_t);
-extern int cachefiles_allocate_page(struct fscache_retrieval *, struct page *,
- gfp_t);
-extern int cachefiles_allocate_pages(struct fscache_retrieval *,
- struct list_head *, unsigned *, gfp_t);
-extern int cachefiles_write_page(struct fscache_storage *, struct page *);
-extern void cachefiles_uncache_page(struct fscache_object *, struct page *);
+#ifdef CONFIG_CACHEFILES_ONDEMAND
+extern ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ char __user *_buffer, size_t buflen);
+
+extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache,
+ char *args);
+
+extern int cachefiles_ondemand_restore(struct cachefiles_cache *cache,
+ char *args);
+
+extern int cachefiles_ondemand_init_object(struct cachefiles_object *object);
+extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object);
+
+extern int cachefiles_ondemand_read(struct cachefiles_object *object,
+ loff_t pos, size_t len);
+
+extern int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj,
+ struct cachefiles_volume *volume);
+extern void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj);
+
+#define CACHEFILES_OBJECT_STATE_FUNCS(_state, _STATE) \
+static inline bool \
+cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) \
+{ \
+ return object->ondemand->state == CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \
+} \
+ \
+static inline void \
+cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \
+{ \
+ object->ondemand->state = CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \
+}
-/*
- * rdwr2.c
- */
-extern int cachefiles_begin_read_operation(struct netfs_read_request *,
- struct fscache_retrieval *);
+CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN);
+CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE);
+CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING);
+CACHEFILES_OBJECT_STATE_FUNCS(dropping, DROPPING);
+
+static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req)
+{
+ return cachefiles_ondemand_object_is_reopening(req->object) &&
+ req->msg.opcode == CACHEFILES_OP_READ;
+}
+
+#else
+static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ char __user *_buffer, size_t buflen)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int cachefiles_ondemand_init_object(struct cachefiles_object *object)
+{
+ return 0;
+}
+
+static inline void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
+{
+}
+
+static inline int cachefiles_ondemand_read(struct cachefiles_object *object,
+ loff_t pos, size_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj,
+ struct cachefiles_volume *volume)
+{
+ return 0;
+}
+static inline void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj)
+{
+}
+
+static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req)
+{
+ return false;
+}
+#endif
/*
* security.c
@@ -222,44 +403,55 @@ static inline void cachefiles_end_secure(struct cachefiles_cache *cache,
}
/*
+ * volume.c
+ */
+void cachefiles_acquire_volume(struct fscache_volume *volume);
+void cachefiles_free_volume(struct fscache_volume *volume);
+void cachefiles_withdraw_volume(struct cachefiles_volume *volume);
+
+/*
* xattr.c
*/
-extern int cachefiles_check_object_type(struct cachefiles_object *object);
-extern int cachefiles_set_object_xattr(struct cachefiles_object *object,
- struct cachefiles_xattr *auxdata);
-extern int cachefiles_update_object_xattr(struct cachefiles_object *object,
- struct cachefiles_xattr *auxdata);
-extern int cachefiles_check_auxdata(struct cachefiles_object *object);
-extern int cachefiles_check_object_xattr(struct cachefiles_object *object,
- struct cachefiles_xattr *auxdata);
+extern int cachefiles_set_object_xattr(struct cachefiles_object *object);
+extern int cachefiles_check_auxdata(struct cachefiles_object *object,
+ struct file *file);
extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
+ struct cachefiles_object *object,
struct dentry *dentry);
-
+extern void cachefiles_prepare_to_write(struct fscache_cookie *cookie);
+extern bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume);
+extern int cachefiles_check_volume_xattr(struct cachefiles_volume *volume);
/*
- * error handling
+ * Error handling
*/
-
#define cachefiles_io_error(___cache, FMT, ...) \
do { \
pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
- fscache_io_error(&(___cache)->cache); \
+ fscache_io_error((___cache)->cache); \
set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
+ if (cachefiles_in_ondemand_mode(___cache)) \
+ cachefiles_flush_reqs(___cache); \
} while (0)
#define cachefiles_io_error_obj(object, FMT, ...) \
do { \
struct cachefiles_cache *___cache; \
\
- ___cache = container_of((object)->fscache.cache, \
- struct cachefiles_cache, cache); \
- cachefiles_io_error(___cache, FMT, ##__VA_ARGS__); \
+ ___cache = (object)->volume->cache; \
+ cachefiles_io_error(___cache, FMT " [o=%08x]", ##__VA_ARGS__, \
+ (object)->debug_id); \
} while (0)
/*
- * debug tracing
+ * Debug tracing
*/
+extern unsigned cachefiles_debug;
+#define CACHEFILES_DEBUG_KENTER 1
+#define CACHEFILES_DEBUG_KLEAVE 2
+#define CACHEFILES_DEBUG_KDEBUG 4
+
#define dbgprintk(FMT, ...) \
printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index effe37ef8629..3e0576d9db1d 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -9,8 +9,11 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/uio.h>
+#include <linux/bio.h>
+#include <linux/falloc.h>
#include <linux/sched/mm.h>
-#include <linux/netfs.h>
+#include <trace/events/fscache.h>
+#include <trace/events/netfs.h>
#include "internal.h"
struct cachefiles_kiocb {
@@ -21,14 +24,18 @@ struct cachefiles_kiocb {
size_t skipped;
size_t len;
};
+ struct cachefiles_object *object;
netfs_io_terminated_t term_func;
void *term_func_priv;
bool was_async;
+ unsigned int inval_counter; /* Copy of cookie->inval_counter */
+ u64 b_writing;
};
static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
{
if (refcount_dec_and_test(&ki->ki_refcnt)) {
+ cachefiles_put_object(ki->object, cachefiles_obj_put_ioreq);
fput(ki->iocb.ki_filp);
kfree(ki);
}
@@ -40,13 +47,23 @@ static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
static void cachefiles_read_complete(struct kiocb *iocb, long ret)
{
struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
+ struct inode *inode = file_inode(ki->iocb.ki_filp);
_enter("%ld", ret);
+ if (ret < 0)
+ trace_cachefiles_io_error(ki->object, inode, ret,
+ cachefiles_trace_read_error);
+
if (ki->term_func) {
- if (ret >= 0)
- ret += ki->skipped;
- ki->term_func(ki->term_func_priv, ret, ki->was_async);
+ if (ret >= 0) {
+ if (ki->object->cookie->inval_counter == ki->inval_counter)
+ ki->skipped += ret;
+ else
+ ret = -ESTALE;
+ }
+
+ ki->term_func(ki->term_func_priv, ret);
}
cachefiles_put_kiocb(ki);
@@ -58,16 +75,24 @@ static void cachefiles_read_complete(struct kiocb *iocb, long ret)
static int cachefiles_read(struct netfs_cache_resources *cres,
loff_t start_pos,
struct iov_iter *iter,
- bool seek_data,
+ enum netfs_read_from_hole read_hole,
netfs_io_terminated_t term_func,
void *term_func_priv)
{
+ struct cachefiles_object *object;
struct cachefiles_kiocb *ki;
- struct file *file = cres->cache_priv2;
+ struct file *file;
unsigned int old_nofs;
ssize_t ret = -ENOBUFS;
size_t len = iov_iter_count(iter), skipped = 0;
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
+ goto presubmission_error;
+
+ fscache_count_read();
+ object = cachefiles_cres_object(cres);
+ file = cachefiles_cres_file(cres);
+
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start_pos, len,
i_size_read(file_inode(file)));
@@ -75,10 +100,12 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
/* If the caller asked us to seek for data before doing the read, then
* we should do that now. If we find a gap, we fill it with zeros.
*/
- if (seek_data) {
+ if (read_hole != NETFS_READ_HOLE_IGNORE) {
loff_t off = start_pos, off2;
- off2 = vfs_llseek(file, off, SEEK_DATA);
+ off2 = cachefiles_inject_read_error();
+ if (off2 == 0)
+ off2 = vfs_llseek(file, off, SEEK_DATA);
if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO && off2 != -ENXIO) {
skipped = 0;
ret = off2;
@@ -90,6 +117,10 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
* in the region, so clear the rest of the buffer and
* return success.
*/
+ ret = -ENODATA;
+ if (read_hole == NETFS_READ_HOLE_FAIL)
+ goto presubmission_error;
+
iov_iter_zero(len, iter);
skipped = len;
ret = 0;
@@ -100,7 +131,7 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
iov_iter_zero(skipped, iter);
}
- ret = -ENOBUFS;
+ ret = -ENOMEM;
ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
if (!ki)
goto presubmission_error;
@@ -109,9 +140,10 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
ki->iocb.ki_filp = file;
ki->iocb.ki_pos = start_pos + skipped;
ki->iocb.ki_flags = IOCB_DIRECT;
- ki->iocb.ki_hint = ki_hint_validate(file_write_hint(file));
ki->iocb.ki_ioprio = get_current_ioprio();
ki->skipped = skipped;
+ ki->object = object;
+ ki->inval_counter = cres->inval_counter;
ki->term_func = term_func;
ki->term_func_priv = term_func_priv;
ki->was_async = true;
@@ -120,9 +152,13 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
ki->iocb.ki_complete = cachefiles_read_complete;
get_file(ki->iocb.ki_filp);
+ cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
+ trace_cachefiles_read(object, file_inode(file), ki->iocb.ki_pos, len - skipped);
old_nofs = memalloc_nofs_save();
- ret = vfs_iocb_iter_read(file, &ki->iocb, iter);
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ ret = vfs_iocb_iter_read(file, &ki->iocb, iter);
memalloc_nofs_restore(old_nofs);
switch (ret) {
case -EIOCBQUEUED:
@@ -152,82 +188,148 @@ in_progress:
presubmission_error:
if (term_func)
- term_func(term_func_priv, ret < 0 ? ret : skipped, false);
+ term_func(term_func_priv, ret < 0 ? ret : skipped);
return ret;
}
/*
+ * Query the occupancy of the cache in a region, returning where the next chunk
+ * of data starts and how long it is.
+ */
+static int cachefiles_query_occupancy(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len)
+{
+ struct cachefiles_object *object;
+ struct file *file;
+ loff_t off, off2;
+
+ *_data_start = -1;
+ *_data_len = 0;
+
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
+ return -ENOBUFS;
+
+ object = cachefiles_cres_object(cres);
+ file = cachefiles_cres_file(cres);
+ granularity = max_t(size_t, object->volume->cache->bsize, granularity);
+
+ _enter("%pD,%li,%llx,%zx/%llx",
+ file, file_inode(file)->i_ino, start, len,
+ i_size_read(file_inode(file)));
+
+ off = cachefiles_inject_read_error();
+ if (off == 0)
+ off = vfs_llseek(file, start, SEEK_DATA);
+ if (off == -ENXIO)
+ return -ENODATA; /* Beyond EOF */
+ if (off < 0 && off >= (loff_t)-MAX_ERRNO)
+ return -ENOBUFS; /* Error. */
+ if (round_up(off, granularity) >= start + len)
+ return -ENODATA; /* No data in range */
+
+ off2 = cachefiles_inject_read_error();
+ if (off2 == 0)
+ off2 = vfs_llseek(file, off, SEEK_HOLE);
+ if (off2 == -ENXIO)
+ return -ENODATA; /* Beyond EOF */
+ if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO)
+ return -ENOBUFS; /* Error. */
+
+ /* Round away partial blocks */
+ off = round_up(off, granularity);
+ off2 = round_down(off2, granularity);
+ if (off2 <= off)
+ return -ENODATA;
+
+ *_data_start = off;
+ if (off2 > start + len)
+ *_data_len = len;
+ else
+ *_data_len = off2 - off;
+ return 0;
+}
+
+/*
* Handle completion of a write to the cache.
*/
static void cachefiles_write_complete(struct kiocb *iocb, long ret)
{
struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
+ struct cachefiles_object *object = ki->object;
struct inode *inode = file_inode(ki->iocb.ki_filp);
_enter("%ld", ret);
- /* Tell lockdep we inherited freeze protection from submission thread */
- __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
- __sb_end_write(inode->i_sb, SB_FREEZE_WRITE);
+ if (ki->was_async)
+ kiocb_end_write(iocb);
- if (ki->term_func)
- ki->term_func(ki->term_func_priv, ret, ki->was_async);
+ if (ret < 0)
+ trace_cachefiles_io_error(object, inode, ret,
+ cachefiles_trace_write_error);
+ atomic_long_sub(ki->b_writing, &object->volume->cache->b_writing);
+ set_bit(FSCACHE_COOKIE_HAVE_DATA, &object->cookie->flags);
+ if (ki->term_func)
+ ki->term_func(ki->term_func_priv, ret);
cachefiles_put_kiocb(ki);
}
/*
* Initiate a write to the cache.
*/
-static int cachefiles_write(struct netfs_cache_resources *cres,
- loff_t start_pos,
- struct iov_iter *iter,
- netfs_io_terminated_t term_func,
- void *term_func_priv)
+int __cachefiles_write(struct cachefiles_object *object,
+ struct file *file,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
+ struct cachefiles_cache *cache;
struct cachefiles_kiocb *ki;
- struct inode *inode;
- struct file *file = cres->cache_priv2;
unsigned int old_nofs;
- ssize_t ret = -ENOBUFS;
+ ssize_t ret;
size_t len = iov_iter_count(iter);
+ fscache_count_write();
+ cache = object->volume->cache;
+
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start_pos, len,
i_size_read(file_inode(file)));
ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
- if (!ki)
- goto presubmission_error;
+ if (!ki) {
+ if (term_func)
+ term_func(term_func_priv, -ENOMEM);
+ return -ENOMEM;
+ }
refcount_set(&ki->ki_refcnt, 2);
ki->iocb.ki_filp = file;
ki->iocb.ki_pos = start_pos;
ki->iocb.ki_flags = IOCB_DIRECT | IOCB_WRITE;
- ki->iocb.ki_hint = ki_hint_validate(file_write_hint(file));
ki->iocb.ki_ioprio = get_current_ioprio();
+ ki->object = object;
ki->start = start_pos;
ki->len = len;
ki->term_func = term_func;
ki->term_func_priv = term_func_priv;
ki->was_async = true;
+ ki->b_writing = (len + (1 << cache->bshift) - 1) >> cache->bshift;
if (ki->term_func)
ki->iocb.ki_complete = cachefiles_write_complete;
-
- /* Open-code file_start_write here to grab freeze protection, which
- * will be released by another thread in aio_complete_rw(). Fool
- * lockdep by telling it the lock got released so that it doesn't
- * complain about the held lock when we return to userspace.
- */
- inode = file_inode(file);
- __sb_start_write(inode->i_sb, SB_FREEZE_WRITE);
- __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
+ atomic_long_add(ki->b_writing, &cache->b_writing);
get_file(ki->iocb.ki_filp);
+ cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
+ trace_cachefiles_write(object, file_inode(file), ki->iocb.ki_pos, len);
old_nofs = memalloc_nofs_save();
- ret = vfs_iocb_iter_write(file, &ki->iocb, iter);
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
+ ret = vfs_iocb_iter_write(file, &ki->iocb, iter);
memalloc_nofs_restore(old_nofs);
switch (ret) {
case -EIOCBQUEUED:
@@ -245,8 +347,6 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
default:
ki->was_async = false;
cachefiles_write_complete(&ki->iocb, ret);
- if (ret > 0)
- ret = 0;
break;
}
@@ -254,96 +354,361 @@ in_progress:
cachefiles_put_kiocb(ki);
_leave(" = %zd", ret);
return ret;
+}
-presubmission_error:
- if (term_func)
- term_func(term_func_priv, -ENOMEM, false);
- return -ENOMEM;
+static int cachefiles_write(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
+{
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) {
+ if (term_func)
+ term_func(term_func_priv, -ENOBUFS);
+ trace_netfs_sreq(term_func_priv, netfs_sreq_trace_cache_nowrite);
+ return -ENOBUFS;
+ }
+
+ return __cachefiles_write(cachefiles_cres_object(cres),
+ cachefiles_cres_file(cres),
+ start_pos, iter,
+ term_func, term_func_priv);
}
-/*
- * Prepare a read operation, shortening it to a cached/uncached
- * boundary as appropriate.
- */
-static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subrequest *subreq,
- loff_t i_size)
+static inline enum netfs_io_source
+cachefiles_do_prepare_read(struct netfs_cache_resources *cres,
+ loff_t start, size_t *_len, loff_t i_size,
+ unsigned long *_flags, ino_t netfs_ino)
{
- struct fscache_retrieval *op = subreq->rreq->cache_resources.cache_priv;
- struct cachefiles_object *object;
+ enum cachefiles_prepare_read_trace why;
+ struct cachefiles_object *object = NULL;
struct cachefiles_cache *cache;
+ struct fscache_cookie *cookie = fscache_cres_cookie(cres);
const struct cred *saved_cred;
- struct file *file = subreq->rreq->cache_resources.cache_priv2;
+ struct file *file = cachefiles_cres_file(cres);
+ enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
+ size_t len = *_len;
loff_t off, to;
+ ino_t ino = file ? file_inode(file)->i_ino : 0;
+ int rc;
- _enter("%zx @%llx/%llx", subreq->len, subreq->start, i_size);
+ _enter("%zx @%llx/%llx", len, start, i_size);
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
+ if (start >= i_size) {
+ ret = NETFS_FILL_WITH_ZEROES;
+ why = cachefiles_trace_read_after_eof;
+ goto out_no_object;
+ }
- if (!file)
- goto cache_fail_nosec;
+ if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
+ __set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
+ why = cachefiles_trace_read_no_data;
+ if (!test_bit(NETFS_SREQ_ONDEMAND, _flags))
+ goto out_no_object;
+ }
- if (subreq->start >= i_size)
- return NETFS_FILL_WITH_ZEROES;
+ /* The object and the file may be being created in the background. */
+ if (!file) {
+ why = cachefiles_trace_read_no_file;
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
+ goto out_no_object;
+ file = cachefiles_cres_file(cres);
+ if (!file)
+ goto out_no_object;
+ ino = file_inode(file)->i_ino;
+ }
+ object = cachefiles_cres_object(cres);
+ cache = object->volume->cache;
cachefiles_begin_secure(cache, &saved_cred);
-
- off = vfs_llseek(file, subreq->start, SEEK_DATA);
+retry:
+ off = cachefiles_inject_read_error();
+ if (off == 0)
+ off = vfs_llseek(file, start, SEEK_DATA);
if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
- if (off == (loff_t)-ENXIO)
+ if (off == (loff_t)-ENXIO) {
+ why = cachefiles_trace_read_seek_nxio;
goto download_and_store;
- goto cache_fail;
+ }
+ trace_cachefiles_io_error(object, file_inode(file), off,
+ cachefiles_trace_seek_error);
+ why = cachefiles_trace_read_seek_error;
+ goto out;
}
- if (off >= subreq->start + subreq->len)
+ if (off >= start + len) {
+ why = cachefiles_trace_read_found_hole;
goto download_and_store;
+ }
- if (off > subreq->start) {
+ if (off > start) {
off = round_up(off, cache->bsize);
- subreq->len = off - subreq->start;
+ len = off - start;
+ *_len = len;
+ why = cachefiles_trace_read_found_part;
goto download_and_store;
}
- to = vfs_llseek(file, subreq->start, SEEK_HOLE);
- if (to < 0 && to >= (loff_t)-MAX_ERRNO)
- goto cache_fail;
+ to = cachefiles_inject_read_error();
+ if (to == 0)
+ to = vfs_llseek(file, start, SEEK_HOLE);
+ if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
+ trace_cachefiles_io_error(object, file_inode(file), to,
+ cachefiles_trace_seek_error);
+ why = cachefiles_trace_read_seek_error;
+ goto out;
+ }
- if (to < subreq->start + subreq->len) {
- if (subreq->start + subreq->len >= i_size)
+ if (to < start + len) {
+ if (start + len >= i_size)
to = round_up(to, cache->bsize);
else
to = round_down(to, cache->bsize);
- subreq->len = to - subreq->start;
+ len = to - start;
+ *_len = len;
}
- cachefiles_end_secure(cache, saved_cred);
- return NETFS_READ_FROM_CACHE;
+ why = cachefiles_trace_read_have_data;
+ ret = NETFS_READ_FROM_CACHE;
+ goto out;
download_and_store:
- if (cachefiles_has_space(cache, 0, (subreq->len + PAGE_SIZE - 1) / PAGE_SIZE) == 0)
- __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
-cache_fail:
+ __set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
+ if (test_bit(NETFS_SREQ_ONDEMAND, _flags)) {
+ rc = cachefiles_ondemand_read(object, start, len);
+ if (!rc) {
+ __clear_bit(NETFS_SREQ_ONDEMAND, _flags);
+ goto retry;
+ }
+ ret = NETFS_INVALID_READ;
+ }
+out:
cachefiles_end_secure(cache, saved_cred);
-cache_fail_nosec:
- return NETFS_DOWNLOAD_FROM_SERVER;
+out_no_object:
+ trace_cachefiles_prep_read(object, start, len, *_flags, ret, why, ino, netfs_ino);
+ return ret;
+}
+
+/*
+ * Prepare a read operation, shortening it to a cached/uncached
+ * boundary as appropriate.
+ */
+static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
+ unsigned long long i_size)
+{
+ return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
+ subreq->start, &subreq->len, i_size,
+ &subreq->flags, subreq->rreq->inode->i_ino);
+}
+
+/*
+ * Prepare an on-demand read operation, shortening it to a cached/uncached
+ * boundary as appropriate.
+ */
+static enum netfs_io_source
+cachefiles_prepare_ondemand_read(struct netfs_cache_resources *cres,
+ loff_t start, size_t *_len, loff_t i_size,
+ unsigned long *_flags, ino_t ino)
+{
+ return cachefiles_do_prepare_read(cres, start, _len, i_size, _flags, ino);
}
/*
* Prepare for a write to occur.
*/
-static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size)
+int __cachefiles_prepare_write(struct cachefiles_object *object,
+ struct file *file,
+ loff_t *_start, size_t *_len, size_t upper_len,
+ bool no_space_allocated_yet)
{
- loff_t start = *_start;
- size_t len = *_len, down;
+ struct cachefiles_cache *cache = object->volume->cache;
+ loff_t start = *_start, pos;
+ size_t len = *_len;
+ int ret;
/* Round to DIO size */
- down = start - round_down(start, PAGE_SIZE);
- *_start = start - down;
- *_len = round_up(down + len, PAGE_SIZE);
- return 0;
+ start = round_down(*_start, PAGE_SIZE);
+ if (start != *_start || *_len > upper_len) {
+ /* Probably asked to cache a streaming write written into the
+ * pagecache when the cookie was temporarily out of service to
+ * culling.
+ */
+ fscache_count_dio_misfit();
+ return -ENOBUFS;
+ }
+
+ *_len = round_up(len, PAGE_SIZE);
+
+ /* We need to work out whether there's sufficient disk space to perform
+ * the write - but we can skip that check if we have space already
+ * allocated.
+ */
+ if (no_space_allocated_yet)
+ goto check_space;
+
+ pos = cachefiles_inject_read_error();
+ if (pos == 0)
+ pos = vfs_llseek(file, start, SEEK_DATA);
+ if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
+ if (pos == -ENXIO)
+ goto check_space; /* Unallocated tail */
+ trace_cachefiles_io_error(object, file_inode(file), pos,
+ cachefiles_trace_seek_error);
+ return pos;
+ }
+ if ((u64)pos >= (u64)start + *_len)
+ goto check_space; /* Unallocated region */
+
+ /* We have a block that's at least partially filled - if we're low on
+ * space, we need to see if it's fully allocated. If it's not, we may
+ * want to cull it.
+ */
+ if (cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
+ cachefiles_has_space_check) == 0)
+ return 0; /* Enough space to simply overwrite the whole block */
+
+ pos = cachefiles_inject_read_error();
+ if (pos == 0)
+ pos = vfs_llseek(file, start, SEEK_HOLE);
+ if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
+ trace_cachefiles_io_error(object, file_inode(file), pos,
+ cachefiles_trace_seek_error);
+ return pos;
+ }
+ if ((u64)pos >= (u64)start + *_len)
+ return 0; /* Fully allocated */
+
+ /* Partially allocated, but insufficient space: cull. */
+ fscache_count_no_write_space();
+ ret = cachefiles_inject_remove_error();
+ if (ret == 0)
+ ret = vfs_fallocate(file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ start, *_len);
+ if (ret < 0) {
+ trace_cachefiles_io_error(object, file_inode(file), ret,
+ cachefiles_trace_fallocate_error);
+ cachefiles_io_error_obj(object,
+ "CacheFiles: fallocate failed (%d)\n", ret);
+ ret = -EIO;
+ }
+
+ return ret;
+
+check_space:
+ return cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
+ cachefiles_has_space_for_write);
+}
+
+static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, size_t upper_len,
+ loff_t i_size, bool no_space_allocated_yet)
+{
+ struct cachefiles_object *object = cachefiles_cres_object(cres);
+ struct cachefiles_cache *cache = object->volume->cache;
+ const struct cred *saved_cred;
+ int ret;
+
+ if (!cachefiles_cres_file(cres)) {
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
+ return -ENOBUFS;
+ if (!cachefiles_cres_file(cres))
+ return -ENOBUFS;
+ }
+
+ cachefiles_begin_secure(cache, &saved_cred);
+ ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
+ _start, _len, upper_len,
+ no_space_allocated_yet);
+ cachefiles_end_secure(cache, saved_cred);
+ return ret;
+}
+
+static void cachefiles_prepare_write_subreq(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *wreq = subreq->rreq;
+ struct netfs_cache_resources *cres = &wreq->cache_resources;
+ struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
+
+ _enter("W=%x[%x] %llx", wreq->debug_id, subreq->debug_index, subreq->start);
+
+ stream->sreq_max_len = MAX_RW_COUNT;
+ stream->sreq_max_segs = BIO_MAX_VECS;
+
+ if (!cachefiles_cres_file(cres)) {
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
+ return netfs_prepare_write_failed(subreq);
+ if (!cachefiles_cres_file(cres))
+ return netfs_prepare_write_failed(subreq);
+ }
+}
+
+static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *wreq = subreq->rreq;
+ struct netfs_cache_resources *cres = &wreq->cache_resources;
+ struct cachefiles_object *object = cachefiles_cres_object(cres);
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
+ const struct cred *saved_cred;
+ size_t off, pre, post, len = subreq->len;
+ loff_t start = subreq->start;
+ int ret;
+
+ _enter("W=%x[%x] %llx-%llx",
+ wreq->debug_id, subreq->debug_index, start, start + len - 1);
+
+ /* We need to start on the cache granularity boundary */
+ off = start & (CACHEFILES_DIO_BLOCK_SIZE - 1);
+ if (off) {
+ pre = CACHEFILES_DIO_BLOCK_SIZE - off;
+ if (pre >= len) {
+ fscache_count_dio_misfit();
+ netfs_write_subrequest_terminated(subreq, len);
+ return;
+ }
+ subreq->transferred += pre;
+ start += pre;
+ len -= pre;
+ iov_iter_advance(&subreq->io_iter, pre);
+ }
+
+ /* We also need to end on the cache granularity boundary */
+ if (start + len == wreq->i_size) {
+ size_t part = len % CACHEFILES_DIO_BLOCK_SIZE;
+ size_t need = CACHEFILES_DIO_BLOCK_SIZE - part;
+
+ if (part && stream->submit_extendable_to >= need) {
+ len += need;
+ subreq->len += need;
+ subreq->io_iter.count += need;
+ }
+ }
+
+ post = len & (CACHEFILES_DIO_BLOCK_SIZE - 1);
+ if (post) {
+ len -= post;
+ if (len == 0) {
+ fscache_count_dio_misfit();
+ netfs_write_subrequest_terminated(subreq, post);
+ return;
+ }
+ iov_iter_truncate(&subreq->io_iter, len);
+ }
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_cache_prepare);
+ cachefiles_begin_secure(cache, &saved_cred);
+ ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
+ &start, &len, len, true);
+ cachefiles_end_secure(cache, saved_cred);
+ if (ret < 0) {
+ netfs_write_subrequest_terminated(subreq, ret);
+ return;
+ }
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_cache_write);
+ cachefiles_write(&subreq->rreq->cache_resources,
+ subreq->start, &subreq->io_iter,
+ netfs_write_subrequest_terminated, subreq);
}
/*
@@ -351,70 +716,47 @@ static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
*/
static void cachefiles_end_operation(struct netfs_cache_resources *cres)
{
- struct fscache_retrieval *op = cres->cache_priv;
- struct file *file = cres->cache_priv2;
-
- _enter("");
+ struct file *file = cachefiles_cres_file(cres);
if (file)
fput(file);
- if (op) {
- fscache_op_complete(&op->op, false);
- fscache_put_retrieval(op);
- }
-
- _leave("");
+ fscache_end_cookie_access(fscache_cres_cookie(cres), fscache_access_io_end);
}
static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
.end_operation = cachefiles_end_operation,
.read = cachefiles_read,
.write = cachefiles_write,
+ .issue_write = cachefiles_issue_write,
.prepare_read = cachefiles_prepare_read,
.prepare_write = cachefiles_prepare_write,
+ .prepare_write_subreq = cachefiles_prepare_write_subreq,
+ .prepare_ondemand_read = cachefiles_prepare_ondemand_read,
+ .query_occupancy = cachefiles_query_occupancy,
};
/*
* Open the cache file when beginning a cache operation.
*/
-int cachefiles_begin_read_operation(struct netfs_read_request *rreq,
- struct fscache_retrieval *op)
+bool cachefiles_begin_operation(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state)
{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- struct path path;
- struct file *file;
-
- _enter("");
-
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- path.mnt = cache->mnt;
- path.dentry = object->backer;
- file = open_with_fake_path(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
- d_inode(object->backer), cache->cache_cred);
- if (IS_ERR(file))
- return PTR_ERR(file);
- if (!S_ISREG(file_inode(file)->i_mode))
- goto error_file;
- if (unlikely(!file->f_op->read_iter) ||
- unlikely(!file->f_op->write_iter)) {
- pr_notice("Cache does not support read_iter and write_iter\n");
- goto error_file;
+ struct cachefiles_object *object = cachefiles_cres_object(cres);
+
+ if (!cachefiles_cres_file(cres)) {
+ cres->ops = &cachefiles_netfs_cache_ops;
+ if (object->file) {
+ spin_lock(&object->lock);
+ if (!cres->cache_priv2 && object->file)
+ cres->cache_priv2 = get_file(object->file);
+ spin_unlock(&object->lock);
+ }
}
- fscache_get_retrieval(op);
- rreq->cache_resources.cache_priv = op;
- rreq->cache_resources.cache_priv2 = file;
- rreq->cache_resources.ops = &cachefiles_netfs_cache_ops;
- rreq->cache_resources.debug_id = object->fscache.debug_id;
- _leave("");
- return 0;
+ if (!cachefiles_cres_file(cres) && want_state != FSCACHE_WANT_PARAMS) {
+ pr_err("failed to get cres->file\n");
+ return false;
+ }
-error_file:
- fput(file);
- return -EIO;
+ return true;
}
diff --git a/fs/cachefiles/key.c b/fs/cachefiles/key.c
index 7f94efc97e23..aae86af48ed5 100644
--- a/fs/cachefiles/key.c
+++ b/fs/cachefiles/key.c
@@ -1,14 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Key to pathname encoder
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/slab.h>
#include "internal.h"
-static const char cachefiles_charmap[64] =
+static const char cachefiles_charmap[64] __nonstring =
"0123456789" /* 0 - 9 */
"abcdefghijklmnopqrstuvwxyz" /* 10 - 35 */
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" /* 36 - 61 */
@@ -22,134 +22,116 @@ static const char cachefiles_filecharmap[256] = {
[48 ... 127] = 1, /* '0' -> '~' */
};
+static inline unsigned int how_many_hex_digits(unsigned int x)
+{
+ return x ? round_up(ilog2(x) + 1, 4) / 4 : 0;
+}
+
/*
* turn the raw key into something cooked
- * - the raw key should include the length in the two bytes at the front
- * - the key may be up to 514 bytes in length (including the length word)
+ * - the key may be up to NAME_MAX in length (including the length word)
* - "base64" encode the strange keys, mapping 3 bytes of raw to four of
* cooked
* - need to cut the cooked key into 252 char lengths (189 raw bytes)
*/
-char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type)
+bool cachefiles_cook_key(struct cachefiles_object *object)
{
- unsigned char csum, ch;
- unsigned int acc;
- char *key;
- int loop, len, max, seg, mark, print;
+ const u8 *key = fscache_get_key(object->cookie), *kend;
+ unsigned char ch;
+ unsigned int acc, i, n, nle, nbe, keylen = object->cookie->key_len;
+ unsigned int b64len, len, print, pad;
+ char *name, sep;
- _enter(",%d", keylen);
+ _enter(",%u,%*phN", keylen, keylen, key);
- BUG_ON(keylen < 2 || keylen > 514);
+ BUG_ON(keylen > NAME_MAX - 3);
- csum = raw[0] + raw[1];
print = 1;
- for (loop = 2; loop < keylen; loop++) {
- ch = raw[loop];
- csum += ch;
+ for (i = 0; i < keylen; i++) {
+ ch = key[i];
print &= cachefiles_filecharmap[ch];
}
+ /* If the path is usable ASCII, then we render it directly */
if (print) {
- /* if the path is usable ASCII, then we render it directly */
- max = keylen - 2;
- max += 2; /* two base64'd length chars on the front */
- max += 5; /* @checksum/M */
- max += 3 * 2; /* maximum number of segment dividers (".../M")
- * is ((514 + 251) / 252) = 3
- */
- max += 1; /* NUL on end */
- } else {
- /* calculate the maximum length of the cooked key */
- keylen = (keylen + 2) / 3;
-
- max = keylen * 4;
- max += 5; /* @checksum/M */
- max += 3 * 2; /* maximum number of segment dividers (".../M")
- * is ((514 + 188) / 189) = 3
- */
- max += 1; /* NUL on end */
+ len = 1 + keylen;
+ name = kmalloc(len + 1, GFP_KERNEL);
+ if (!name)
+ return false;
+
+ name[0] = 'D'; /* Data object type, string encoding */
+ memcpy(name + 1, key, keylen);
+ goto success;
}
- max += 1; /* 2nd NUL on end */
-
- _debug("max: %d", max);
-
- key = kmalloc(max, cachefiles_gfp);
- if (!key)
- return NULL;
-
- len = 0;
-
- /* build the cooked key */
- sprintf(key, "@%02x%c+", (unsigned) csum, 0);
- len = 5;
- mark = len - 1;
-
- if (print) {
- acc = *(uint16_t *) raw;
- raw += 2;
-
- key[len + 1] = cachefiles_charmap[acc & 63];
- acc >>= 6;
- key[len] = cachefiles_charmap[acc & 63];
- len += 2;
-
- seg = 250;
- for (loop = keylen; loop > 0; loop--) {
- if (seg <= 0) {
- key[len++] = '\0';
- mark = len;
- key[len++] = '+';
- seg = 252;
- }
-
- key[len++] = *raw++;
- ASSERT(len < max);
- }
-
- switch (type) {
- case FSCACHE_COOKIE_TYPE_INDEX: type = 'I'; break;
- case FSCACHE_COOKIE_TYPE_DATAFILE: type = 'D'; break;
- default: type = 'S'; break;
- }
- } else {
- seg = 252;
- for (loop = keylen; loop > 0; loop--) {
- if (seg <= 0) {
- key[len++] = '\0';
- mark = len;
- key[len++] = '+';
- seg = 252;
- }
-
- acc = *raw++;
- acc |= *raw++ << 8;
- acc |= *raw++ << 16;
-
- _debug("acc: %06x", acc);
-
- key[len++] = cachefiles_charmap[acc & 63];
- acc >>= 6;
- key[len++] = cachefiles_charmap[acc & 63];
- acc >>= 6;
- key[len++] = cachefiles_charmap[acc & 63];
- acc >>= 6;
- key[len++] = cachefiles_charmap[acc & 63];
-
- ASSERT(len < max);
- }
+ /* See if it makes sense to encode it as "hex,hex,hex" for each 32-bit
+ * chunk. We rely on the key having been padded out to a whole number
+ * of 32-bit words.
+ */
+ n = round_up(keylen, 4);
+ nbe = nle = 0;
+ for (i = 0; i < n; i += 4) {
+ u32 be = be32_to_cpu(*(__be32 *)(key + i));
+ u32 le = le32_to_cpu(*(__le32 *)(key + i));
+
+ nbe += 1 + how_many_hex_digits(be);
+ nle += 1 + how_many_hex_digits(le);
+ }
- switch (type) {
- case FSCACHE_COOKIE_TYPE_INDEX: type = 'J'; break;
- case FSCACHE_COOKIE_TYPE_DATAFILE: type = 'E'; break;
- default: type = 'T'; break;
+ b64len = DIV_ROUND_UP(keylen, 3);
+ pad = b64len * 3 - keylen;
+ b64len = 2 + b64len * 4; /* Length if we base64-encode it */
+ _debug("len=%u nbe=%u nle=%u b64=%u", keylen, nbe, nle, b64len);
+ if (nbe < b64len || nle < b64len) {
+ unsigned int nlen = min(nbe, nle) + 1;
+ name = kmalloc(nlen, GFP_KERNEL);
+ if (!name)
+ return false;
+ sep = (nbe <= nle) ? 'S' : 'T'; /* Encoding indicator */
+ len = 0;
+ for (i = 0; i < n; i += 4) {
+ u32 x;
+ if (nbe <= nle)
+ x = be32_to_cpu(*(__be32 *)(key + i));
+ else
+ x = le32_to_cpu(*(__le32 *)(key + i));
+ name[len++] = sep;
+ if (x != 0)
+ len += snprintf(name + len, nlen - len, "%x", x);
+ sep = ',';
}
+ goto success;
}
- key[mark] = type;
- key[len++] = 0;
- key[len] = 0;
+ /* We need to base64-encode it */
+ name = kmalloc(b64len + 1, GFP_KERNEL);
+ if (!name)
+ return false;
+
+ name[0] = 'E';
+ name[1] = '0' + pad;
+ len = 2;
+ kend = key + keylen;
+ do {
+ acc = *key++;
+ if (key < kend) {
+ acc |= *key++ << 8;
+ if (key < kend)
+ acc |= *key++ << 16;
+ }
- _leave(" = %s %d", key, len);
- return key;
+ name[len++] = cachefiles_charmap[acc & 63];
+ acc >>= 6;
+ name[len++] = cachefiles_charmap[acc & 63];
+ acc >>= 6;
+ name[len++] = cachefiles_charmap[acc & 63];
+ acc >>= 6;
+ name[len++] = cachefiles_charmap[acc & 63];
+ } while (key < kend);
+
+success:
+ name[len] = 0;
+ object->d_name = name;
+ _leave(" = %s", object->d_name);
+ return true;
}
diff --git a/fs/cachefiles/main.c b/fs/cachefiles/main.c
index 9c8d34c49b12..3f369c6f816d 100644
--- a/fs/cachefiles/main.c
+++ b/fs/cachefiles/main.c
@@ -2,7 +2,7 @@
/* Network filesystem caching backend to use cache files on a premounted
* filesystem
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -18,6 +18,8 @@
#include <linux/statfs.h>
#include <linux/sysctl.h>
#include <linux/miscdevice.h>
+#include <linux/netfs.h>
+#include <trace/events/netfs.h>
#define CREATE_TRACE_POINTS
#include "internal.h"
@@ -37,14 +39,6 @@ static struct miscdevice cachefiles_dev = {
.fops = &cachefiles_daemon_fops,
};
-static void cachefiles_object_init_once(void *_object)
-{
- struct cachefiles_object *object = _object;
-
- memset(object, 0, sizeof(*object));
- spin_lock_init(&object->work_lock);
-}
-
/*
* initialise the fs caching module
*/
@@ -52,6 +46,9 @@ static int __init cachefiles_init(void)
{
int ret;
+ ret = cachefiles_register_error_injection();
+ if (ret < 0)
+ goto error_einj;
ret = misc_register(&cachefiles_dev);
if (ret < 0)
goto error_dev;
@@ -61,9 +58,7 @@ static int __init cachefiles_init(void)
cachefiles_object_jar =
kmem_cache_create("cachefiles_object_jar",
sizeof(struct cachefiles_object),
- 0,
- SLAB_HWCACHE_ALIGN,
- cachefiles_object_init_once);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!cachefiles_object_jar) {
pr_notice("Failed to allocate an object jar\n");
goto error_object_jar;
@@ -75,6 +70,8 @@ static int __init cachefiles_init(void)
error_object_jar:
misc_deregister(&cachefiles_dev);
error_dev:
+ cachefiles_unregister_error_injection();
+error_einj:
pr_err("failed to register: %d\n", ret);
return ret;
}
@@ -90,6 +87,7 @@ static void __exit cachefiles_exit(void)
kmem_cache_destroy(cachefiles_object_jar);
misc_deregister(&cachefiles_dev);
+ cachefiles_unregister_error_injection();
}
module_exit(cachefiles_exit);
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index a9aca5ab5970..e5ec90dccc27 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -1,295 +1,274 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* CacheFiles path walking and related routines
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/fsnotify.h>
-#include <linux/quotaops.h>
-#include <linux/xattr.h>
-#include <linux/mount.h>
#include <linux/namei.h>
-#include <linux/security.h>
-#include <linux/slab.h>
#include "internal.h"
-#define CACHEFILES_KEYBUF_SIZE 512
-
/*
- * dump debugging info about an object
+ * Mark the backing file as being a cache file if it's not already in use. The
+ * mark tells the culling request command that it's not allowed to cull the
+ * file or directory. The caller must hold the inode lock.
*/
-static noinline
-void __cachefiles_printk_object(struct cachefiles_object *object,
- const char *prefix)
+static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
+ struct inode *inode)
{
- struct fscache_cookie *cookie;
- const u8 *k;
- unsigned loop;
-
- pr_err("%sobject: OBJ%x\n", prefix, object->fscache.debug_id);
- pr_err("%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
- prefix, object->fscache.state->name,
- object->fscache.flags, work_busy(&object->fscache.work),
- object->fscache.events, object->fscache.event_mask);
- pr_err("%sops=%u inp=%u exc=%u\n",
- prefix, object->fscache.n_ops, object->fscache.n_in_progress,
- object->fscache.n_exclusive);
- pr_err("%sparent=%x\n",
- prefix, object->fscache.parent ? object->fscache.parent->debug_id : 0);
-
- spin_lock(&object->fscache.lock);
- cookie = object->fscache.cookie;
- if (cookie) {
- pr_err("%scookie=%x [pr=%x nd=%p fl=%lx]\n",
- prefix,
- cookie->debug_id,
- cookie->parent ? cookie->parent->debug_id : 0,
- cookie->netfs_data,
- cookie->flags);
- pr_err("%skey=[%u] '", prefix, cookie->key_len);
- k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
- cookie->inline_key : cookie->key;
- for (loop = 0; loop < cookie->key_len; loop++)
- pr_cont("%02x", k[loop]);
- pr_cont("'\n");
+ bool can_use = false;
+
+ if (!(inode->i_flags & S_KERNEL_FILE)) {
+ inode->i_flags |= S_KERNEL_FILE;
+ trace_cachefiles_mark_active(object, inode);
+ can_use = true;
} else {
- pr_err("%scookie=NULL\n", prefix);
+ trace_cachefiles_mark_failed(object, inode);
}
- spin_unlock(&object->fscache.lock);
+
+ return can_use;
+}
+
+static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
+ struct inode *inode)
+{
+ bool can_use;
+
+ inode_lock(inode);
+ can_use = __cachefiles_mark_inode_in_use(object, inode);
+ inode_unlock(inode);
+ return can_use;
}
/*
- * dump debugging info about a pair of objects
+ * Unmark a backing inode. The caller must hold the inode lock.
*/
-static noinline void cachefiles_printk_object(struct cachefiles_object *object,
- struct cachefiles_object *xobject)
+static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
+ struct inode *inode)
{
- if (object)
- __cachefiles_printk_object(object, "");
- if (xobject)
- __cachefiles_printk_object(xobject, "x");
+ inode->i_flags &= ~S_KERNEL_FILE;
+ trace_cachefiles_mark_inactive(object, inode);
+}
+
+static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
+ struct inode *inode)
+{
+ inode_lock(inode);
+ __cachefiles_unmark_inode_in_use(object, inode);
+ inode_unlock(inode);
}
/*
- * mark the owner of a dentry, if there is one, to indicate that that dentry
- * has been preemptively deleted
- * - the caller must hold the i_mutex on the dentry's parent as required to
- * call vfs_unlink(), vfs_rmdir() or vfs_rename()
+ * Unmark a backing inode and tell cachefilesd that there's something that can
+ * be culled.
*/
-static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
- struct dentry *dentry,
- enum fscache_why_object_killed why)
+void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
+ struct file *file)
{
- struct cachefiles_object *object;
- struct rb_node *p;
-
- _enter(",'%pd'", dentry);
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct inode *inode = file_inode(file);
- write_lock(&cache->active_lock);
+ cachefiles_do_unmark_inode_in_use(object, inode);
- p = cache->active_nodes.rb_node;
- while (p) {
- object = rb_entry(p, struct cachefiles_object, active_node);
- if (object->dentry > dentry)
- p = p->rb_left;
- else if (object->dentry < dentry)
- p = p->rb_right;
- else
- goto found_dentry;
+ if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
+ atomic_long_add(inode->i_blocks, &cache->b_released);
+ if (atomic_inc_return(&cache->f_released))
+ cachefiles_state_changed(cache);
}
+}
- write_unlock(&cache->active_lock);
- trace_cachefiles_mark_buried(NULL, dentry, why);
- _leave(" [no owner]");
- return;
+/*
+ * get a subdirectory
+ */
+struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
+ struct dentry *dir,
+ const char *dirname,
+ bool *_is_new)
+{
+ struct dentry *subdir;
+ struct path path;
+ int ret;
- /* found the dentry for */
-found_dentry:
- kdebug("preemptive burial: OBJ%x [%s] %pd",
- object->fscache.debug_id,
- object->fscache.state->name,
- dentry);
+ _enter(",,%s", dirname);
- trace_cachefiles_mark_buried(object, dentry, why);
+ /* search the current directory for the element name */
- if (fscache_object_is_live(&object->fscache)) {
- pr_err("\n");
- pr_err("Error: Can't preemptively bury live object\n");
- cachefiles_printk_object(object, NULL);
- } else {
- if (why != FSCACHE_OBJECT_IS_STALE)
- fscache_object_mark_killed(&object->fscache, why);
+retry:
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ subdir = start_creating(&nop_mnt_idmap, dir, &QSTR(dirname));
+ else
+ subdir = ERR_PTR(ret);
+ trace_cachefiles_lookup(NULL, dir, subdir);
+ if (IS_ERR(subdir)) {
+ trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
+ PTR_ERR(subdir),
+ cachefiles_trace_lookup_error);
+ if (PTR_ERR(subdir) == -ENOMEM)
+ goto nomem_d_alloc;
+ goto lookup_error;
}
- write_unlock(&cache->active_lock);
- _leave(" [owner marked]");
-}
+ _debug("subdir -> %pd %s",
+ subdir, d_backing_inode(subdir) ? "positive" : "negative");
-/*
- * record the fact that an object is now active
- */
-static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
- struct cachefiles_object *object)
-{
- struct cachefiles_object *xobject;
- struct rb_node **_p, *_parent = NULL;
- struct dentry *dentry;
+ /* we need to create the subdir if it doesn't exist yet */
+ if (d_is_negative(subdir)) {
+ ret = cachefiles_has_space(cache, 1, 0,
+ cachefiles_has_space_for_create);
+ if (ret < 0)
+ goto mkdir_error;
- _enter(",%x", object->fscache.debug_id);
+ _debug("attempt mkdir");
-try_again:
- write_lock(&cache->active_lock);
+ path.mnt = cache->mnt;
+ path.dentry = dir;
+ ret = security_path_mkdir(&path, subdir, 0700);
+ if (ret < 0)
+ goto mkdir_error;
+ ret = cachefiles_inject_write_error();
+ if (ret == 0) {
+ subdir = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700, NULL);
+ } else {
+ end_creating(subdir);
+ subdir = ERR_PTR(ret);
+ }
+ if (IS_ERR(subdir)) {
+ trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
+ cachefiles_trace_mkdir_error);
+ goto mkdir_error;
+ }
+ trace_cachefiles_mkdir(dir, subdir);
- dentry = object->dentry;
- trace_cachefiles_mark_active(object, dentry);
+ if (unlikely(d_unhashed(subdir) || d_is_negative(subdir))) {
+ end_creating(subdir);
+ goto retry;
+ }
+ ASSERT(d_backing_inode(subdir));
- if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
- pr_err("Error: Object already active\n");
- cachefiles_printk_object(object, NULL);
- BUG();
+ _debug("mkdir -> %pd{ino=%lu}",
+ subdir, d_backing_inode(subdir)->i_ino);
+ if (_is_new)
+ *_is_new = true;
}
- _p = &cache->active_nodes.rb_node;
- while (*_p) {
- _parent = *_p;
- xobject = rb_entry(_parent,
- struct cachefiles_object, active_node);
+ /* Tell rmdir() it's not allowed to delete the subdir */
+ inode_lock(d_inode(subdir));
+ end_creating_keep(subdir);
- ASSERT(xobject != object);
-
- if (xobject->dentry > dentry)
- _p = &(*_p)->rb_left;
- else if (xobject->dentry < dentry)
- _p = &(*_p)->rb_right;
- else
- goto wait_for_old_object;
+ if (!__cachefiles_mark_inode_in_use(NULL, d_inode(subdir))) {
+ pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
+ subdir, d_inode(subdir)->i_ino);
+ goto mark_error;
}
- rb_link_node(&object->active_node, _parent, _p);
- rb_insert_color(&object->active_node, &cache->active_nodes);
+ inode_unlock(d_inode(subdir));
- write_unlock(&cache->active_lock);
- _leave(" = 0");
- return 0;
+ /* we need to make sure the subdir is a directory */
+ ASSERT(d_backing_inode(subdir));
- /* an old object from a previous incarnation is hogging the slot - we
- * need to wait for it to be destroyed */
-wait_for_old_object:
- trace_cachefiles_wait_active(object, dentry, xobject);
- clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
-
- if (fscache_object_is_live(&xobject->fscache)) {
- pr_err("\n");
- pr_err("Error: Unexpected object collision\n");
- cachefiles_printk_object(object, xobject);
- }
- atomic_inc(&xobject->usage);
- write_unlock(&cache->active_lock);
-
- if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
- wait_queue_head_t *wq;
-
- signed long timeout = 60 * HZ;
- wait_queue_entry_t wait;
- bool requeue;
-
- /* if the object we're waiting for is queued for processing,
- * then just put ourselves on the queue behind it */
- if (work_pending(&xobject->fscache.work)) {
- _debug("queue OBJ%x behind OBJ%x immediately",
- object->fscache.debug_id,
- xobject->fscache.debug_id);
- goto requeue;
- }
+ if (!d_can_lookup(subdir)) {
+ pr_err("%s is not a directory\n", dirname);
+ ret = -EIO;
+ goto check_error;
+ }
- /* otherwise we sleep until either the object we're waiting for
- * is done, or the fscache_object is congested */
- wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
- init_wait(&wait);
- requeue = false;
- do {
- prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
- if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
- break;
-
- requeue = fscache_object_sleep_till_congested(&timeout);
- } while (timeout > 0 && !requeue);
- finish_wait(wq, &wait);
-
- if (requeue &&
- test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
- _debug("queue OBJ%x behind OBJ%x after wait",
- object->fscache.debug_id,
- xobject->fscache.debug_id);
- goto requeue;
- }
+ ret = -EPERM;
+ if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
+ !d_backing_inode(subdir)->i_op->lookup ||
+ !d_backing_inode(subdir)->i_op->mkdir ||
+ !d_backing_inode(subdir)->i_op->rename ||
+ !d_backing_inode(subdir)->i_op->rmdir ||
+ !d_backing_inode(subdir)->i_op->unlink)
+ goto check_error;
- if (timeout <= 0) {
- pr_err("\n");
- pr_err("Error: Overlong wait for old active object to go away\n");
- cachefiles_printk_object(object, xobject);
- goto requeue;
- }
- }
+ _leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
+ return subdir;
- ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
+check_error:
+ cachefiles_put_directory(subdir);
+ _leave(" = %d [check]", ret);
+ return ERR_PTR(ret);
- cache->cache.ops->put_object(&xobject->fscache,
- (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_retry);
- goto try_again;
+mark_error:
+ inode_unlock(d_inode(subdir));
+ dput(subdir);
+ return ERR_PTR(-EBUSY);
-requeue:
- cache->cache.ops->put_object(&xobject->fscache,
- (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_timeo);
- _leave(" = -ETIMEDOUT");
- return -ETIMEDOUT;
+mkdir_error:
+ end_creating(subdir);
+ pr_err("mkdir %s failed with error %d\n", dirname, ret);
+ return ERR_PTR(ret);
+
+lookup_error:
+ ret = PTR_ERR(subdir);
+ pr_err("Lookup %s failed with error %d\n", dirname, ret);
+ return ERR_PTR(ret);
+
+nomem_d_alloc:
+ inode_unlock(d_inode(dir));
+ _leave(" = -ENOMEM");
+ return ERR_PTR(-ENOMEM);
}
/*
- * Mark an object as being inactive.
+ * Put a subdirectory.
*/
-void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
- struct cachefiles_object *object,
- blkcnt_t i_blocks)
+void cachefiles_put_directory(struct dentry *dir)
{
- struct dentry *dentry = object->dentry;
- struct inode *inode = d_backing_inode(dentry);
-
- trace_cachefiles_mark_inactive(object, dentry, inode);
+ if (dir) {
+ cachefiles_do_unmark_inode_in_use(NULL, d_inode(dir));
+ dput(dir);
+ }
+}
- write_lock(&cache->active_lock);
- rb_erase(&object->active_node, &cache->active_nodes);
- clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
- write_unlock(&cache->active_lock);
+/*
+ * Remove a regular file from the cache.
+ */
+static int cachefiles_unlink(struct cachefiles_cache *cache,
+ struct cachefiles_object *object,
+ struct dentry *dir, struct dentry *dentry,
+ enum fscache_why_object_killed why)
+{
+ struct path path = {
+ .mnt = cache->mnt,
+ .dentry = dir,
+ };
+ int ret;
- wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
+ trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why);
+ ret = security_path_unlink(&path, dentry);
+ if (ret < 0) {
+ cachefiles_io_error(cache, "Unlink security error");
+ return ret;
+ }
- /* This object can now be culled, so we need to let the daemon know
- * that there is something it can remove if it needs to.
- */
- atomic_long_add(i_blocks, &cache->b_released);
- if (atomic_inc_return(&cache->f_released))
- cachefiles_state_changed(cache);
+ ret = cachefiles_inject_remove_error();
+ if (ret == 0) {
+ ret = vfs_unlink(&nop_mnt_idmap, d_backing_inode(dir), dentry, NULL);
+ if (ret == -EIO)
+ cachefiles_io_error(cache, "Unlink failed");
+ }
+ if (ret != 0)
+ trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
+ cachefiles_trace_unlink_error);
+ return ret;
}
/*
- * delete an object representation from the cache
- * - file backed objects are unlinked
- * - directory backed objects are stuffed into the graveyard for userspace to
+ * Delete an object representation from the cache
+ * - File backed objects are unlinked
+ * - Directory backed objects are stuffed into the graveyard for userspace to
* delete
- * - unlocks the directory mutex
+ * On entry dir must be locked. It will be unlocked on exit.
+ * On entry there must be at least 2 refs on rep, one will be dropped on exit.
*/
-static int cachefiles_bury_object(struct cachefiles_cache *cache,
- struct cachefiles_object *object,
- struct dentry *dir,
- struct dentry *rep,
- bool preemptive,
- enum fscache_why_object_killed why)
+int cachefiles_bury_object(struct cachefiles_cache *cache,
+ struct cachefiles_object *object,
+ struct dentry *dir,
+ struct dentry *rep,
+ enum fscache_why_object_killed why)
{
struct dentry *grave, *trap;
struct path path, path_to_graveyard;
@@ -298,28 +277,16 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
_enter(",'%pd','%pd'", dir, rep);
+ if (rep->d_parent != dir) {
+ end_removing(rep);
+ _leave(" = -ESTALE");
+ return -ESTALE;
+ }
+
/* non-directories can just be unlinked */
if (!d_is_dir(rep)) {
- _debug("unlink stale object");
-
- path.mnt = cache->mnt;
- path.dentry = dir;
- ret = security_path_unlink(&path, rep);
- if (ret < 0) {
- cachefiles_io_error(cache, "Unlink security error");
- } else {
- trace_cachefiles_unlink(object, rep, why);
- ret = vfs_unlink(&init_user_ns, d_inode(dir), rep,
- NULL);
-
- if (preemptive)
- cachefiles_mark_object_buried(cache, rep, why);
- }
-
- inode_unlock(d_inode(dir));
-
- if (ret == -EIO)
- cachefiles_io_error(cache, "Unlink failed");
+ ret = cachefiles_unlink(cache, object, dir, rep, why);
+ end_removing(rep);
_leave(" = %d", ret);
return ret;
@@ -327,7 +294,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
/* directories have to be moved to the graveyard */
_debug("move stale object to graveyard");
- inode_unlock(d_inode(dir));
+ end_removing(rep);
try_again:
/* first step is to make up a grave dentry in the graveyard */
@@ -337,6 +304,8 @@ try_again:
/* do the multiway lock magic */
trap = lock_rename(cache->graveyard, dir);
+ if (IS_ERR(trap))
+ return PTR_ERR(trap);
/* do some checks before getting the grave dentry */
if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
@@ -365,17 +334,19 @@ try_again:
return -EIO;
}
- grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
+ grave = lookup_one(&nop_mnt_idmap, &QSTR(nbuffer), cache->graveyard);
if (IS_ERR(grave)) {
unlock_rename(cache->graveyard, dir);
+ trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
+ PTR_ERR(grave),
+ cachefiles_trace_lookup_error);
if (PTR_ERR(grave) == -ENOMEM) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
- cachefiles_io_error(cache, "Lookup error %ld",
- PTR_ERR(grave));
+ cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave));
return -EIO;
}
@@ -412,23 +383,25 @@ try_again:
cachefiles_io_error(cache, "Rename security error %d", ret);
} else {
struct renamedata rd = {
- .old_mnt_userns = &init_user_ns,
- .old_dir = d_inode(dir),
+ .mnt_idmap = &nop_mnt_idmap,
+ .old_parent = dir,
.old_dentry = rep,
- .new_mnt_userns = &init_user_ns,
- .new_dir = d_inode(cache->graveyard),
+ .new_parent = cache->graveyard,
.new_dentry = grave,
};
- trace_cachefiles_rename(object, rep, grave, why);
- ret = vfs_rename(&rd);
+ trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ ret = vfs_rename(&rd);
+ if (ret != 0)
+ trace_cachefiles_vfs_error(object, d_inode(dir), ret,
+ cachefiles_trace_rename_error);
if (ret != 0 && ret != -ENOMEM)
cachefiles_io_error(cache,
"Rename failed with error %d", ret);
-
- if (preemptive)
- cachefiles_mark_object_buried(cache, rep, why);
}
+ __cachefiles_unmark_inode_in_use(object, d_inode(rep));
unlock_rename(cache->graveyard, dir);
dput(grave);
_leave(" = 0");
@@ -436,493 +409,365 @@ try_again:
}
/*
- * delete an object representation from the cache
+ * Delete a cache file.
*/
-int cachefiles_delete_object(struct cachefiles_cache *cache,
- struct cachefiles_object *object)
+int cachefiles_delete_object(struct cachefiles_object *object,
+ enum fscache_why_object_killed why)
{
- struct dentry *dir;
+ struct cachefiles_volume *volume = object->volume;
+ struct dentry *dentry = object->file->f_path.dentry;
+ struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
int ret;
- _enter(",OBJ%x{%pd}", object->fscache.debug_id, object->dentry);
-
- ASSERT(object->dentry);
- ASSERT(d_backing_inode(object->dentry));
- ASSERT(object->dentry->d_parent);
+ _enter(",OBJ%x{%pD}", object->debug_id, object->file);
- dir = dget_parent(object->dentry);
-
- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
-
- if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->fscache.flags)) {
- /* object allocation for the same key preemptively deleted this
- * object's file so that it could create its own file */
- _debug("object preemptively buried");
- inode_unlock(d_inode(dir));
- ret = 0;
- } else {
- /* we need to check that our parent is _still_ our parent - it
- * may have been renamed */
- if (dir == object->dentry->d_parent) {
- ret = cachefiles_bury_object(cache, object, dir,
- object->dentry, false,
- FSCACHE_OBJECT_WAS_RETIRED);
- } else {
- /* it got moved, presumably by cachefilesd culling it,
- * so it's no longer in the key path and we can ignore
- * it */
- inode_unlock(d_inode(dir));
- ret = 0;
- }
- }
-
- dput(dir);
- _leave(" = %d", ret);
+ dentry = start_removing_dentry(fan, dentry);
+ if (IS_ERR(dentry))
+ ret = PTR_ERR(dentry);
+ else
+ ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
+ end_removing(dentry);
return ret;
}
/*
- * walk from the parent object to the child object through the backing
- * filesystem, creating directories as we go
+ * Create a temporary file and leave it unattached and un-xattr'd until the
+ * time comes to discard the object from memory.
*/
-int cachefiles_walk_to_object(struct cachefiles_object *parent,
- struct cachefiles_object *object,
- const char *key,
- struct cachefiles_xattr *auxdata)
+struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
{
- struct cachefiles_cache *cache;
- struct dentry *dir, *next = NULL;
- struct inode *inode;
- struct path path;
- const char *name;
- int ret, nlen;
-
- _enter("OBJ%x{%pd},OBJ%x,%s,",
- parent->fscache.debug_id, parent->dentry,
- object->fscache.debug_id, key);
-
- cache = container_of(parent->fscache.cache,
- struct cachefiles_cache, cache);
- path.mnt = cache->mnt;
-
- ASSERT(parent->dentry);
- ASSERT(d_backing_inode(parent->dentry));
-
- if (!(d_is_dir(parent->dentry))) {
- // TODO: convert file to dir
- _leave("looking up in none directory");
- return -ENOBUFS;
+ struct cachefiles_volume *volume = object->volume;
+ struct cachefiles_cache *cache = volume->cache;
+ const struct cred *saved_cred;
+ struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
+ struct file *file;
+ const struct path parentpath = { .mnt = cache->mnt, .dentry = fan };
+ uint64_t ni_size;
+ long ret;
+
+
+ cachefiles_begin_secure(cache, &saved_cred);
+
+ ret = cachefiles_inject_write_error();
+ if (ret == 0) {
+ file = kernel_tmpfile_open(&nop_mnt_idmap, &parentpath,
+ S_IFREG | 0600,
+ O_RDWR | O_LARGEFILE | O_DIRECT,
+ cache->cache_cred);
+ ret = PTR_ERR_OR_ZERO(file);
+ }
+ if (ret) {
+ trace_cachefiles_vfs_error(object, d_inode(fan), ret,
+ cachefiles_trace_tmpfile_error);
+ if (ret == -EIO)
+ cachefiles_io_error_obj(object, "Failed to create tmpfile");
+ goto err;
}
- dir = dget(parent->dentry);
-
-advance:
- /* attempt to transit the first directory component */
- name = key;
- nlen = strlen(key);
+ trace_cachefiles_tmpfile(object, file_inode(file));
- /* key ends in a double NUL */
- key = key + nlen + 1;
- if (!*key)
- key = NULL;
+ /* This is a newly created file with no other possible user */
+ if (!cachefiles_mark_inode_in_use(object, file_inode(file)))
+ WARN_ON(1);
-lookup_again:
- /* search the current directory for the element name */
- _debug("lookup '%s'", name);
-
- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
-
- next = lookup_one_len(name, dir, nlen);
- if (IS_ERR(next)) {
- trace_cachefiles_lookup(object, next, NULL);
- goto lookup_error;
- }
+ ret = cachefiles_ondemand_init_object(object);
+ if (ret < 0)
+ goto err_unuse;
- inode = d_backing_inode(next);
- trace_cachefiles_lookup(object, next, inode);
- _debug("next -> %pd %s", next, inode ? "positive" : "negative");
-
- if (!key)
- object->new = !inode;
-
- /* if this element of the path doesn't exist, then the lookup phase
- * failed, and we can release any readers in the certain knowledge that
- * there's nothing for them to actually read */
- if (d_is_negative(next))
- fscache_object_lookup_negative(&object->fscache);
-
- /* we need to create the object if it's negative */
- if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) {
- /* index objects and intervening tree levels must be subdirs */
- if (d_is_negative(next)) {
- ret = cachefiles_has_space(cache, 1, 0);
- if (ret < 0)
- goto no_space_error;
-
- path.dentry = dir;
- ret = security_path_mkdir(&path, next, 0);
- if (ret < 0)
- goto create_error;
- ret = vfs_mkdir(&init_user_ns, d_inode(dir), next, 0);
- if (!key)
- trace_cachefiles_mkdir(object, next, ret);
- if (ret < 0)
- goto create_error;
-
- if (unlikely(d_unhashed(next))) {
- dput(next);
- inode_unlock(d_inode(dir));
- goto lookup_again;
- }
- ASSERT(d_backing_inode(next));
-
- _debug("mkdir -> %pd{ino=%lu}",
- next, d_backing_inode(next)->i_ino);
-
- } else if (!d_can_lookup(next)) {
- pr_err("inode %lu is not a directory\n",
- d_backing_inode(next)->i_ino);
- ret = -ENOBUFS;
- goto error;
- }
+ ni_size = object->cookie->object_size;
+ ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
- } else {
- /* non-index objects start out life as files */
- if (d_is_negative(next)) {
- ret = cachefiles_has_space(cache, 1, 0);
- if (ret < 0)
- goto no_space_error;
-
- path.dentry = dir;
- ret = security_path_mknod(&path, next, S_IFREG, 0);
- if (ret < 0)
- goto create_error;
- ret = vfs_create(&init_user_ns, d_inode(dir), next,
- S_IFREG, true);
- trace_cachefiles_create(object, next, ret);
- if (ret < 0)
- goto create_error;
-
- ASSERT(d_backing_inode(next));
-
- _debug("create -> %pd{ino=%lu}",
- next, d_backing_inode(next)->i_ino);
-
- } else if (!d_can_lookup(next) &&
- !d_is_reg(next)
- ) {
- pr_err("inode %lu is not a file or directory\n",
- d_backing_inode(next)->i_ino);
- ret = -ENOBUFS;
- goto error;
+ if (ni_size > 0) {
+ trace_cachefiles_trunc(object, file_inode(file), 0, ni_size,
+ cachefiles_trunc_expand_tmpfile);
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
+ ret = vfs_truncate(&file->f_path, ni_size);
+ if (ret < 0) {
+ trace_cachefiles_vfs_error(
+ object, file_inode(file), ret,
+ cachefiles_trace_trunc_error);
+ goto err_unuse;
}
}
- /* process the next component */
- if (key) {
- _debug("advance");
- inode_unlock(d_inode(dir));
- dput(dir);
- dir = next;
- next = NULL;
- goto advance;
+ ret = -EINVAL;
+ if (unlikely(!file->f_op->read_iter) ||
+ unlikely(!file->f_op->write_iter)) {
+ fput(file);
+ pr_notice("Cache does not support read_iter and write_iter\n");
+ goto err_unuse;
}
+out:
+ cachefiles_end_secure(cache, saved_cred);
+ return file;
- /* we've found the object we were looking for */
- object->dentry = next;
-
- /* if we've found that the terminal object exists, then we need to
- * check its attributes and delete it if it's out of date */
- if (!object->new) {
- _debug("validate '%pd'", next);
-
- ret = cachefiles_check_object_xattr(object, auxdata);
- if (ret == -ESTALE) {
- /* delete the object (the deleter drops the directory
- * mutex) */
- object->dentry = NULL;
+err_unuse:
+ cachefiles_do_unmark_inode_in_use(object, file_inode(file));
+ fput(file);
+err:
+ file = ERR_PTR(ret);
+ goto out;
+}
- ret = cachefiles_bury_object(cache, object, dir, next,
- true,
- FSCACHE_OBJECT_IS_STALE);
- dput(next);
- next = NULL;
+/*
+ * Create a new file.
+ */
+static bool cachefiles_create_file(struct cachefiles_object *object)
+{
+ struct file *file;
+ int ret;
- if (ret < 0)
- goto delete_error;
+ ret = cachefiles_has_space(object->volume->cache, 1, 0,
+ cachefiles_has_space_for_create);
+ if (ret < 0)
+ return false;
- _debug("redo lookup");
- fscache_object_retrying_stale(&object->fscache);
- goto lookup_again;
- }
- }
+ file = cachefiles_create_tmpfile(object);
+ if (IS_ERR(file))
+ return false;
- /* note that we're now using this object */
- ret = cachefiles_mark_object_active(cache, object);
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
+ set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
+ _debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino);
+ object->file = file;
+ return true;
+}
- inode_unlock(d_inode(dir));
- dput(dir);
- dir = NULL;
+/*
+ * Open an existing file, checking its attributes and replacing it if it is
+ * stale.
+ */
+static bool cachefiles_open_file(struct cachefiles_object *object,
+ struct dentry *dentry)
+{
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct file *file;
+ struct path path;
+ int ret;
- if (ret == -ETIMEDOUT)
- goto mark_active_timed_out;
+ _enter("%pd", dentry);
- _debug("=== OBTAINED_OBJECT ===");
+ if (!cachefiles_mark_inode_in_use(object, d_inode(dentry))) {
+ pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
+ dentry, d_inode(dentry)->i_ino);
+ return false;
+ }
- if (object->new) {
- /* attach data to a newly constructed terminal object */
- ret = cachefiles_set_object_xattr(object, auxdata);
- if (ret < 0)
- goto check_error;
- } else {
- /* always update the atime on an object we've just looked up
- * (this is used to keep track of culling, and atimes are only
- * updated by read, write and readdir but not lookup or
- * open) */
- path.dentry = next;
- touch_atime(&path);
- }
-
- /* open a file interface onto a data file */
- if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
- if (d_is_reg(object->dentry)) {
- const struct address_space_operations *aops;
-
- ret = -EPERM;
- aops = d_backing_inode(object->dentry)->i_mapping->a_ops;
- if (!aops->bmap)
- goto check_error;
- if (object->dentry->d_sb->s_blocksize > PAGE_SIZE)
- goto check_error;
-
- object->backer = object->dentry;
- } else {
- BUG(); // TODO: open file in data-class subdir
- }
+ /* We need to open a file interface onto a data file now as we can't do
+ * it on demand because writeback called from do_exit() sees
+ * current->fs == NULL - which breaks d_path() called from ext4 open.
+ */
+ path.mnt = cache->mnt;
+ path.dentry = dentry;
+ file = kernel_file_open(&path, O_RDWR | O_LARGEFILE | O_DIRECT, cache->cache_cred);
+ if (IS_ERR(file)) {
+ trace_cachefiles_vfs_error(object, d_backing_inode(dentry),
+ PTR_ERR(file),
+ cachefiles_trace_open_error);
+ goto error;
}
- object->new = 0;
- fscache_obtained_object(&object->fscache);
+ if (unlikely(!file->f_op->read_iter) ||
+ unlikely(!file->f_op->write_iter)) {
+ pr_notice("Cache does not support read_iter and write_iter\n");
+ goto error_fput;
+ }
+ _debug("file -> %pd positive", dentry);
- _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino);
- return 0;
+ ret = cachefiles_ondemand_init_object(object);
+ if (ret < 0)
+ goto error_fput;
-no_space_error:
- fscache_object_mark_killed(&object->fscache, FSCACHE_OBJECT_NO_SPACE);
-create_error:
- _debug("create error %d", ret);
- if (ret == -EIO)
- cachefiles_io_error(cache, "Create/mkdir failed");
- goto error;
+ ret = cachefiles_check_auxdata(object, file);
+ if (ret < 0)
+ goto check_failed;
-mark_active_timed_out:
- _debug("mark active timed out");
- goto release_dentry;
+ clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &object->cookie->flags);
-check_error:
- _debug("check error %d", ret);
- cachefiles_mark_object_inactive(
- cache, object, d_backing_inode(object->dentry)->i_blocks);
-release_dentry:
- dput(object->dentry);
- object->dentry = NULL;
- goto error_out;
-
-delete_error:
- _debug("delete error %d", ret);
- goto error_out2;
+ object->file = file;
-lookup_error:
- _debug("lookup error %ld", PTR_ERR(next));
- ret = PTR_ERR(next);
- if (ret == -EIO)
- cachefiles_io_error(cache, "Lookup failed");
- next = NULL;
+ /* Always update the atime on an object we've just looked up (this is
+ * used to keep track of culling, and atimes are only updated by read,
+ * write and readdir but not lookup or open).
+ */
+ touch_atime(&file->f_path);
+ return true;
+
+check_failed:
+ fscache_cookie_lookup_negative(object->cookie);
+ cachefiles_unmark_inode_in_use(object, file);
+ fput(file);
+ if (ret == -ESTALE)
+ return cachefiles_create_file(object);
+ return false;
+
+error_fput:
+ fput(file);
error:
- inode_unlock(d_inode(dir));
- dput(next);
-error_out2:
- dput(dir);
-error_out:
- _leave(" = error %d", -ret);
- return ret;
+ cachefiles_do_unmark_inode_in_use(object, d_inode(dentry));
+ return false;
}
/*
- * get a subdirectory
+ * walk from the parent object to the child object through the backing
+ * filesystem, creating directories as we go
*/
-struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
- struct dentry *dir,
- const char *dirname)
+bool cachefiles_look_up_object(struct cachefiles_object *object)
{
- struct dentry *subdir;
- struct path path;
+ struct cachefiles_volume *volume = object->volume;
+ struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
int ret;
- _enter(",,%s", dirname);
-
- /* search the current directory for the element name */
- inode_lock(d_inode(dir));
-
-retry:
- subdir = lookup_one_len(dirname, dir, strlen(dirname));
- if (IS_ERR(subdir)) {
- if (PTR_ERR(subdir) == -ENOMEM)
- goto nomem_d_alloc;
- goto lookup_error;
+ _enter("OBJ%x,%s,", object->debug_id, object->d_name);
+
+ /* Look up path "cache/vol/fanout/file". */
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ dentry = lookup_one_positive_unlocked(&nop_mnt_idmap,
+ &QSTR(object->d_name), fan);
+ else
+ dentry = ERR_PTR(ret);
+ trace_cachefiles_lookup(object, fan, dentry);
+ if (IS_ERR(dentry)) {
+ if (dentry == ERR_PTR(-ENOENT))
+ goto new_file;
+ if (dentry == ERR_PTR(-EIO))
+ cachefiles_io_error_obj(object, "Lookup failed");
+ return false;
+ }
+
+ if (!d_is_reg(dentry)) {
+ pr_err("%pd is not a file\n", dentry);
+ struct dentry *de = start_removing_dentry(fan, dentry);
+ if (IS_ERR(de))
+ ret = PTR_ERR(de);
+ else
+ ret = cachefiles_bury_object(volume->cache, object,
+ fan, de,
+ FSCACHE_OBJECT_IS_WEIRD);
+ dput(dentry);
+ if (ret < 0)
+ return false;
+ goto new_file;
}
- _debug("subdir -> %pd %s",
- subdir, d_backing_inode(subdir) ? "positive" : "negative");
+ ret = cachefiles_open_file(object, dentry);
+ dput(dentry);
+ if (!ret)
+ return false;
- /* we need to create the subdir if it doesn't exist yet */
- if (d_is_negative(subdir)) {
- ret = cachefiles_has_space(cache, 1, 0);
- if (ret < 0)
- goto mkdir_error;
+ _leave(" = t [%lu]", file_inode(object->file)->i_ino);
+ return true;
- _debug("attempt mkdir");
+new_file:
+ fscache_cookie_lookup_negative(object->cookie);
+ return cachefiles_create_file(object);
+}
- path.mnt = cache->mnt;
- path.dentry = dir;
- ret = security_path_mkdir(&path, subdir, 0700);
- if (ret < 0)
- goto mkdir_error;
- ret = vfs_mkdir(&init_user_ns, d_inode(dir), subdir, 0700);
- if (ret < 0)
- goto mkdir_error;
+/*
+ * Attempt to link a temporary file into its rightful place in the cache.
+ */
+bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
+ struct cachefiles_object *object)
+{
+ struct cachefiles_volume *volume = object->volume;
+ struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
+ bool success = false;
+ int ret;
- if (unlikely(d_unhashed(subdir))) {
- dput(subdir);
- goto retry;
- }
- ASSERT(d_backing_inode(subdir));
+ _enter(",%pD", object->file);
- _debug("mkdir -> %pd{ino=%lu}",
- subdir, d_backing_inode(subdir)->i_ino);
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ dentry = start_creating(&nop_mnt_idmap, fan, &QSTR(object->d_name));
+ else
+ dentry = ERR_PTR(ret);
+ if (IS_ERR(dentry)) {
+ trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
+ cachefiles_trace_lookup_error);
+ _debug("lookup fail %ld", PTR_ERR(dentry));
+ goto out;
}
- inode_unlock(d_inode(dir));
+ /*
+ * This loop will only execute more than once if some other thread
+ * races to create the object we are trying to create.
+ */
+ while (!d_is_negative(dentry)) {
+ ret = cachefiles_unlink(volume->cache, object, fan, dentry,
+ FSCACHE_OBJECT_IS_STALE);
+ if (ret < 0)
+ goto out_end;
- /* we need to make sure the subdir is a directory */
- ASSERT(d_backing_inode(subdir));
+ end_creating(dentry);
- if (!d_can_lookup(subdir)) {
- pr_err("%s is not a directory\n", dirname);
- ret = -EIO;
- goto check_error;
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ dentry = start_creating(&nop_mnt_idmap, fan,
+ &QSTR(object->d_name));
+ else
+ dentry = ERR_PTR(ret);
+ if (IS_ERR(dentry)) {
+ trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
+ cachefiles_trace_lookup_error);
+ _debug("lookup fail %ld", PTR_ERR(dentry));
+ goto out;
+ }
}
- ret = -EPERM;
- if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
- !d_backing_inode(subdir)->i_op->lookup ||
- !d_backing_inode(subdir)->i_op->mkdir ||
- !d_backing_inode(subdir)->i_op->create ||
- !d_backing_inode(subdir)->i_op->rename ||
- !d_backing_inode(subdir)->i_op->rmdir ||
- !d_backing_inode(subdir)->i_op->unlink)
- goto check_error;
-
- _leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
- return subdir;
-
-check_error:
- dput(subdir);
- _leave(" = %d [check]", ret);
- return ERR_PTR(ret);
-
-mkdir_error:
- inode_unlock(d_inode(dir));
- dput(subdir);
- pr_err("mkdir %s failed with error %d\n", dirname, ret);
- return ERR_PTR(ret);
-
-lookup_error:
- inode_unlock(d_inode(dir));
- ret = PTR_ERR(subdir);
- pr_err("Lookup %s failed with error %d\n", dirname, ret);
- return ERR_PTR(ret);
-
-nomem_d_alloc:
- inode_unlock(d_inode(dir));
- _leave(" = -ENOMEM");
- return ERR_PTR(-ENOMEM);
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ ret = vfs_link(object->file->f_path.dentry, &nop_mnt_idmap,
+ d_inode(fan), dentry, NULL);
+ if (ret < 0) {
+ trace_cachefiles_vfs_error(object, d_inode(fan), ret,
+ cachefiles_trace_link_error);
+ _debug("link fail %d", ret);
+ } else {
+ trace_cachefiles_link(object, file_inode(object->file));
+ spin_lock(&object->lock);
+ /* TODO: Do we want to switch the file pointer to the new dentry? */
+ clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
+ spin_unlock(&object->lock);
+ success = true;
+ }
+
+out_end:
+ end_creating(dentry);
+out:
+ _leave(" = %u", success);
+ return success;
}
/*
- * find out if an object is in use or not
- * - if finds object and it's not in use:
- * - returns a pointer to the object and a reference on it
- * - returns with the directory locked
+ * Look up an inode to be checked or culled. Return -EBUSY if the inode is
+ * marked in use.
*/
-static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
- struct dentry *dir,
- char *filename)
+static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
+ struct dentry *dir,
+ char *filename)
{
- struct cachefiles_object *object;
- struct rb_node *_n;
struct dentry *victim;
- int ret;
+ int ret = -ENOENT;
- //_enter(",%pd/,%s",
- // dir, filename);
+ victim = start_removing(&nop_mnt_idmap, dir, &QSTR(filename));
- /* look up the victim */
- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
-
- victim = lookup_one_len(filename, dir, strlen(filename));
if (IS_ERR(victim))
goto lookup_error;
-
- //_debug("victim -> %pd %s",
- // victim, d_backing_inode(victim) ? "positive" : "negative");
-
- /* if the object is no longer there then we probably retired the object
- * at the netfs's request whilst the cull was in progress
- */
- if (d_is_negative(victim)) {
- inode_unlock(d_inode(dir));
- dput(victim);
- _leave(" = -ENOENT [absent]");
- return ERR_PTR(-ENOENT);
- }
-
- /* check to see if we're using this object */
- read_lock(&cache->active_lock);
-
- _n = cache->active_nodes.rb_node;
-
- while (_n) {
- object = rb_entry(_n, struct cachefiles_object, active_node);
-
- if (object->dentry > victim)
- _n = _n->rb_left;
- else if (object->dentry < victim)
- _n = _n->rb_right;
- else
- goto object_in_use;
- }
-
- read_unlock(&cache->active_lock);
-
- //_leave(" = %pd", victim);
+ if (d_inode(victim)->i_flags & S_KERNEL_FILE)
+ goto lookup_busy;
return victim;
-object_in_use:
- read_unlock(&cache->active_lock);
- inode_unlock(d_inode(dir));
- dput(victim);
- //_leave(" = -EBUSY [in use]");
- return ERR_PTR(-EBUSY);
+lookup_busy:
+ ret = -EBUSY;
+ end_removing(victim);
+ return ERR_PTR(ret);
lookup_error:
- inode_unlock(d_inode(dir));
ret = PTR_ERR(victim);
- if (ret == -ENOENT) {
- /* file or dir now absent - probably retired by netfs */
- _leave(" = -ESTALE [absent]");
- return ERR_PTR(-ESTALE);
- }
+ if (ret == -ENOENT)
+ return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
if (ret == -EIO) {
cachefiles_io_error(cache, "Lookup failed");
@@ -931,59 +776,55 @@ lookup_error:
ret = -EIO;
}
- _leave(" = %d", ret);
return ERR_PTR(ret);
}
/*
- * cull an object if it's not in use
+ * Cull an object if it's not in use
* - called only by cache manager daemon
*/
int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
char *filename)
{
struct dentry *victim;
+ struct inode *inode;
int ret;
_enter(",%pd/,%s", dir, filename);
- victim = cachefiles_check_active(cache, dir, filename);
+ victim = cachefiles_lookup_for_cull(cache, dir, filename);
if (IS_ERR(victim))
return PTR_ERR(victim);
- _debug("victim -> %pd %s",
- victim, d_backing_inode(victim) ? "positive" : "negative");
-
- /* okay... the victim is not being used so we can cull it
- * - start by marking it as stale
- */
- _debug("victim is cullable");
-
- ret = cachefiles_remove_object_xattr(cache, victim);
+ /* check to see if someone is using this object */
+ inode = d_inode(victim);
+ inode_lock(inode);
+ if (inode->i_flags & S_KERNEL_FILE) {
+ ret = -EBUSY;
+ } else {
+ /* Stop the cache from picking it back up */
+ inode->i_flags |= S_KERNEL_FILE;
+ ret = 0;
+ }
+ inode_unlock(inode);
if (ret < 0)
goto error_unlock;
- /* actually remove the victim (drops the dir mutex) */
- _debug("bury");
-
- ret = cachefiles_bury_object(cache, NULL, dir, victim, false,
+ ret = cachefiles_bury_object(cache, NULL, dir, victim,
FSCACHE_OBJECT_WAS_CULLED);
+ dput(victim);
if (ret < 0)
goto error;
- dput(victim);
+ fscache_count_culled();
_leave(" = 0");
return 0;
error_unlock:
- inode_unlock(d_inode(dir));
+ end_removing(victim);
error:
- dput(victim);
- if (ret == -ENOENT) {
- /* file or dir now absent - probably retired by netfs */
- _leave(" = -ESTALE [absent]");
- return -ESTALE;
- }
+ if (ret == -ENOENT)
+ return -ESTALE; /* Probably got retired by the netfs */
if (ret != -ENOMEM) {
pr_err("Internal error: %d\n", ret);
@@ -995,7 +836,7 @@ error:
}
/*
- * find out if an object is in use or not
+ * Find out if an object is in use or not
* - called only by cache manager daemon
* - returns -EBUSY or 0 to indicate whether an object is in use or not
*/
@@ -1003,16 +844,13 @@ int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
char *filename)
{
struct dentry *victim;
+ int ret = 0;
- //_enter(",%pd/,%s",
- // dir, filename);
-
- victim = cachefiles_check_active(cache, dir, filename);
+ victim = cachefiles_lookup_for_cull(cache, dir, filename);
if (IS_ERR(victim))
return PTR_ERR(victim);
inode_unlock(d_inode(dir));
dput(victim);
- //_leave(" = 0");
- return 0;
+ return ret;
}
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
new file mode 100644
index 000000000000..a7ed86fa98bb
--- /dev/null
+++ b/fs/cachefiles/ondemand.c
@@ -0,0 +1,762 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/anon_inodes.h>
+#include <linux/uio.h>
+#include "internal.h"
+
+struct ondemand_anon_file {
+ struct file *file;
+ int fd;
+};
+
+static inline void cachefiles_req_put(struct cachefiles_req *req)
+{
+ if (refcount_dec_and_test(&req->ref))
+ kfree(req);
+}
+
+static int cachefiles_ondemand_fd_release(struct inode *inode,
+ struct file *file)
+{
+ struct cachefiles_object *object = file->private_data;
+ struct cachefiles_cache *cache;
+ struct cachefiles_ondemand_info *info;
+ int object_id;
+ struct cachefiles_req *req;
+ XA_STATE(xas, NULL, 0);
+
+ if (!object)
+ return 0;
+
+ info = object->ondemand;
+ cache = object->volume->cache;
+ xas.xa = &cache->reqs;
+
+ xa_lock(&cache->reqs);
+ spin_lock(&info->lock);
+ object_id = info->ondemand_id;
+ info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
+ cachefiles_ondemand_set_object_close(object);
+ spin_unlock(&info->lock);
+
+ /* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
+ xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
+ if (req->msg.object_id == object_id &&
+ req->msg.opcode == CACHEFILES_OP_CLOSE) {
+ complete(&req->done);
+ xas_store(&xas, NULL);
+ }
+ }
+ xa_unlock(&cache->reqs);
+
+ xa_erase(&cache->ondemand_ids, object_id);
+ trace_cachefiles_ondemand_fd_release(object, object_id);
+ cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
+ cachefiles_put_unbind_pincount(cache);
+ return 0;
+}
+
+static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
+ struct iov_iter *iter)
+{
+ struct cachefiles_object *object = kiocb->ki_filp->private_data;
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct file *file;
+ size_t len = iter->count, aligned_len = len;
+ loff_t pos = kiocb->ki_pos;
+ const struct cred *saved_cred;
+ int ret;
+
+ spin_lock(&object->lock);
+ file = object->file;
+ if (!file) {
+ spin_unlock(&object->lock);
+ return -ENOBUFS;
+ }
+ get_file(file);
+ spin_unlock(&object->lock);
+
+ cachefiles_begin_secure(cache, &saved_cred);
+ ret = __cachefiles_prepare_write(object, file, &pos, &aligned_len, len, true);
+ cachefiles_end_secure(cache, saved_cred);
+ if (ret < 0)
+ goto out;
+
+ trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
+ ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
+ if (ret > 0)
+ kiocb->ki_pos += ret;
+
+out:
+ fput(file);
+ return ret;
+}
+
+static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
+ int whence)
+{
+ struct cachefiles_object *object = filp->private_data;
+ struct file *file;
+ loff_t ret;
+
+ spin_lock(&object->lock);
+ file = object->file;
+ if (!file) {
+ spin_unlock(&object->lock);
+ return -ENOBUFS;
+ }
+ get_file(file);
+ spin_unlock(&object->lock);
+
+ ret = vfs_llseek(file, pos, whence);
+ fput(file);
+
+ return ret;
+}
+
+static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long id)
+{
+ struct cachefiles_object *object = filp->private_data;
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct cachefiles_req *req;
+ XA_STATE(xas, &cache->reqs, id);
+
+ if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
+ return -EINVAL;
+
+ if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ return -EOPNOTSUPP;
+
+ xa_lock(&cache->reqs);
+ req = xas_load(&xas);
+ if (!req || req->msg.opcode != CACHEFILES_OP_READ ||
+ req->object != object) {
+ xa_unlock(&cache->reqs);
+ return -EINVAL;
+ }
+ xas_store(&xas, NULL);
+ xa_unlock(&cache->reqs);
+
+ trace_cachefiles_ondemand_cread(object, id);
+ complete(&req->done);
+ return 0;
+}
+
+static const struct file_operations cachefiles_ondemand_fd_fops = {
+ .owner = THIS_MODULE,
+ .release = cachefiles_ondemand_fd_release,
+ .write_iter = cachefiles_ondemand_fd_write_iter,
+ .llseek = cachefiles_ondemand_fd_llseek,
+ .unlocked_ioctl = cachefiles_ondemand_fd_ioctl,
+};
+
+/*
+ * OPEN request Completion (copen)
+ * - command: "copen <id>,<cache_size>"
+ * <cache_size> indicates the object size if >=0, error code if negative
+ */
+int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+{
+ struct cachefiles_req *req;
+ struct fscache_cookie *cookie;
+ struct cachefiles_ondemand_info *info;
+ char *pid, *psize;
+ unsigned long id;
+ long size;
+ int ret;
+ XA_STATE(xas, &cache->reqs, 0);
+
+ if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ return -EOPNOTSUPP;
+
+ if (!*args) {
+ pr_err("Empty id specified\n");
+ return -EINVAL;
+ }
+
+ pid = args;
+ psize = strchr(args, ',');
+ if (!psize) {
+ pr_err("Cache size is not specified\n");
+ return -EINVAL;
+ }
+
+ *psize = 0;
+ psize++;
+
+ ret = kstrtoul(pid, 0, &id);
+ if (ret)
+ return ret;
+
+ xa_lock(&cache->reqs);
+ xas.xa_index = id;
+ req = xas_load(&xas);
+ if (!req || req->msg.opcode != CACHEFILES_OP_OPEN ||
+ !req->object->ondemand->ondemand_id) {
+ xa_unlock(&cache->reqs);
+ return -EINVAL;
+ }
+ xas_store(&xas, NULL);
+ xa_unlock(&cache->reqs);
+
+ info = req->object->ondemand;
+ /* fail OPEN request if copen format is invalid */
+ ret = kstrtol(psize, 0, &size);
+ if (ret) {
+ req->error = ret;
+ goto out;
+ }
+
+ /* fail OPEN request if daemon reports an error */
+ if (size < 0) {
+ if (!IS_ERR_VALUE(size)) {
+ req->error = -EINVAL;
+ ret = -EINVAL;
+ } else {
+ req->error = size;
+ ret = 0;
+ }
+ goto out;
+ }
+
+ spin_lock(&info->lock);
+ /*
+ * The anonymous fd was closed before copen ? Fail the request.
+ *
+ * t1 | t2
+ * ---------------------------------------------------------
+ * cachefiles_ondemand_copen
+ * req = xa_erase(&cache->reqs, id)
+ * // Anon fd is maliciously closed.
+ * cachefiles_ondemand_fd_release
+ * xa_lock(&cache->reqs)
+ * cachefiles_ondemand_set_object_close(object)
+ * xa_unlock(&cache->reqs)
+ * cachefiles_ondemand_set_object_open
+ * // No one will ever close it again.
+ * cachefiles_ondemand_daemon_read
+ * cachefiles_ondemand_select_req
+ *
+ * Get a read req but its fd is already closed. The daemon can't
+ * issue a cread ioctl with an closed fd, then hung.
+ */
+ if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
+ spin_unlock(&info->lock);
+ req->error = -EBADFD;
+ goto out;
+ }
+ cookie = req->object->cookie;
+ cookie->object_size = size;
+ if (size)
+ clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ else
+ set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ trace_cachefiles_ondemand_copen(req->object, id, size);
+
+ cachefiles_ondemand_set_object_open(req->object);
+ spin_unlock(&info->lock);
+ wake_up_all(&cache->daemon_pollwq);
+
+out:
+ spin_lock(&info->lock);
+ /* Need to set object close to avoid reopen status continuing */
+ if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED)
+ cachefiles_ondemand_set_object_close(req->object);
+ spin_unlock(&info->lock);
+ complete(&req->done);
+ return ret;
+}
+
+int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
+{
+ struct cachefiles_req *req;
+
+ XA_STATE(xas, &cache->reqs, 0);
+
+ if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ return -EOPNOTSUPP;
+
+ /*
+ * Reset the requests to CACHEFILES_REQ_NEW state, so that the
+ * requests have been processed halfway before the crash of the
+ * user daemon could be reprocessed after the recovery.
+ */
+ xas_lock(&xas);
+ xas_for_each(&xas, req, ULONG_MAX)
+ xas_set_mark(&xas, CACHEFILES_REQ_NEW);
+ xas_unlock(&xas);
+
+ wake_up_all(&cache->daemon_pollwq);
+ return 0;
+}
+
+static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
+ struct ondemand_anon_file *anon_file)
+{
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
+ struct cachefiles_open *load;
+ u32 object_id;
+ int ret;
+
+ object = cachefiles_grab_object(req->object,
+ cachefiles_obj_get_ondemand_fd);
+ cache = object->volume->cache;
+
+ ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL,
+ XA_LIMIT(1, INT_MAX),
+ &cache->ondemand_id_next, GFP_KERNEL);
+ if (ret < 0)
+ goto err;
+
+ anon_file->fd = get_unused_fd_flags(O_WRONLY);
+ if (anon_file->fd < 0) {
+ ret = anon_file->fd;
+ goto err_free_id;
+ }
+
+ anon_file->file = anon_inode_getfile_fmode("[cachefiles]",
+ &cachefiles_ondemand_fd_fops, object,
+ O_WRONLY, FMODE_PWRITE | FMODE_LSEEK);
+ if (IS_ERR(anon_file->file)) {
+ ret = PTR_ERR(anon_file->file);
+ goto err_put_fd;
+ }
+
+ spin_lock(&object->ondemand->lock);
+ if (object->ondemand->ondemand_id > 0) {
+ spin_unlock(&object->ondemand->lock);
+ /* Pair with check in cachefiles_ondemand_fd_release(). */
+ anon_file->file->private_data = NULL;
+ ret = -EEXIST;
+ goto err_put_file;
+ }
+
+ load = (void *)req->msg.data;
+ load->fd = anon_file->fd;
+ object->ondemand->ondemand_id = object_id;
+ spin_unlock(&object->ondemand->lock);
+
+ cachefiles_get_unbind_pincount(cache);
+ trace_cachefiles_ondemand_open(object, &req->msg, load);
+ return 0;
+
+err_put_file:
+ fput(anon_file->file);
+ anon_file->file = NULL;
+err_put_fd:
+ put_unused_fd(anon_file->fd);
+ anon_file->fd = ret;
+err_free_id:
+ xa_erase(&cache->ondemand_ids, object_id);
+err:
+ spin_lock(&object->ondemand->lock);
+ /* Avoid marking an opened object as closed. */
+ if (object->ondemand->ondemand_id <= 0)
+ cachefiles_ondemand_set_object_close(object);
+ spin_unlock(&object->ondemand->lock);
+ cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
+ return ret;
+}
+
+static void ondemand_object_worker(struct work_struct *work)
+{
+ struct cachefiles_ondemand_info *info =
+ container_of(work, struct cachefiles_ondemand_info, ondemand_work);
+
+ cachefiles_ondemand_init_object(info->object);
+}
+
+/*
+ * If there are any inflight or subsequent READ requests on the
+ * closed object, reopen it.
+ * Skip read requests whose related object is reopening.
+ */
+static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas,
+ unsigned long xa_max)
+{
+ struct cachefiles_req *req;
+ struct cachefiles_object *object;
+ struct cachefiles_ondemand_info *info;
+
+ xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) {
+ if (req->msg.opcode != CACHEFILES_OP_READ)
+ return req;
+ object = req->object;
+ info = object->ondemand;
+ if (cachefiles_ondemand_object_is_close(object)) {
+ cachefiles_ondemand_set_object_reopening(object);
+ queue_work(fscache_wq, &info->ondemand_work);
+ continue;
+ }
+ if (cachefiles_ondemand_object_is_reopening(object))
+ continue;
+ return req;
+ }
+ return NULL;
+}
+
+static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
+ struct xa_state *xas, int err)
+{
+ if (unlikely(!xas || !req))
+ return false;
+
+ if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
+ return false;
+
+ req->error = err;
+ complete(&req->done);
+ return true;
+}
+
+ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ char __user *_buffer, size_t buflen)
+{
+ struct cachefiles_req *req;
+ struct cachefiles_msg *msg;
+ size_t n;
+ int ret = 0;
+ struct ondemand_anon_file anon_file;
+ XA_STATE(xas, &cache->reqs, cache->req_id_next);
+
+ xa_lock(&cache->reqs);
+ /*
+ * Cyclically search for a request that has not ever been processed,
+ * to prevent requests from being processed repeatedly, and make
+ * request distribution fair.
+ */
+ req = cachefiles_ondemand_select_req(&xas, ULONG_MAX);
+ if (!req && cache->req_id_next > 0) {
+ xas_set(&xas, 0);
+ req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1);
+ }
+ if (!req) {
+ xa_unlock(&cache->reqs);
+ return 0;
+ }
+
+ msg = &req->msg;
+ n = msg->len;
+
+ if (n > buflen) {
+ xa_unlock(&cache->reqs);
+ return -EMSGSIZE;
+ }
+
+ xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
+ cache->req_id_next = xas.xa_index + 1;
+ refcount_inc(&req->ref);
+ cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
+ xa_unlock(&cache->reqs);
+
+ if (msg->opcode == CACHEFILES_OP_OPEN) {
+ ret = cachefiles_ondemand_get_fd(req, &anon_file);
+ if (ret)
+ goto out;
+ }
+
+ msg->msg_id = xas.xa_index;
+ msg->object_id = req->object->ondemand->ondemand_id;
+
+ if (copy_to_user(_buffer, msg, n) != 0)
+ ret = -EFAULT;
+
+ if (msg->opcode == CACHEFILES_OP_OPEN) {
+ if (ret < 0) {
+ fput(anon_file.file);
+ put_unused_fd(anon_file.fd);
+ goto out;
+ }
+ fd_install(anon_file.fd, anon_file.file);
+ }
+out:
+ cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
+ /* Remove error request and CLOSE request has no reply */
+ if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
+ cachefiles_ondemand_finish_req(req, &xas, ret);
+ cachefiles_req_put(req);
+ return ret ? ret : n;
+}
+
+typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
+
+static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ enum cachefiles_opcode opcode,
+ size_t data_len,
+ init_req_fn init_req,
+ void *private)
+{
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct cachefiles_req *req = NULL;
+ XA_STATE(xas, &cache->reqs, 0);
+ int ret;
+
+ if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ return 0;
+
+ if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ refcount_set(&req->ref, 1);
+ req->object = object;
+ init_completion(&req->done);
+ req->msg.opcode = opcode;
+ req->msg.len = sizeof(struct cachefiles_msg) + data_len;
+
+ ret = init_req(req, private);
+ if (ret)
+ goto out;
+
+ do {
+ /*
+ * Stop enqueuing the request when daemon is dying. The
+ * following two operations need to be atomic as a whole.
+ * 1) check cache state, and
+ * 2) enqueue request if cache is alive.
+ * Otherwise the request may be enqueued after xarray has been
+ * flushed, leaving the orphan request never being completed.
+ *
+ * CPU 1 CPU 2
+ * ===== =====
+ * test CACHEFILES_DEAD bit
+ * set CACHEFILES_DEAD bit
+ * flush requests in the xarray
+ * enqueue the request
+ */
+ xas_lock(&xas);
+
+ if (test_bit(CACHEFILES_DEAD, &cache->flags) ||
+ cachefiles_ondemand_object_is_dropping(object)) {
+ xas_unlock(&xas);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* coupled with the barrier in cachefiles_flush_reqs() */
+ smp_mb();
+
+ if (opcode == CACHEFILES_OP_CLOSE &&
+ !cachefiles_ondemand_object_is_open(object)) {
+ WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
+ xas_unlock(&xas);
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * Cyclically find a free xas to avoid msg_id reuse that would
+ * cause the daemon to successfully copen a stale msg_id.
+ */
+ xas.xa_index = cache->msg_id_next;
+ xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
+ if (xas.xa_node == XAS_RESTART) {
+ xas.xa_index = 0;
+ xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK);
+ }
+ if (xas.xa_node == XAS_RESTART)
+ xas_set_err(&xas, -EBUSY);
+
+ xas_store(&xas, req);
+ if (xas_valid(&xas)) {
+ cache->msg_id_next = xas.xa_index + 1;
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ xas_set_mark(&xas, CACHEFILES_REQ_NEW);
+ }
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ ret = xas_error(&xas);
+ if (ret)
+ goto out;
+
+ wake_up_all(&cache->daemon_pollwq);
+wait:
+ ret = wait_for_completion_killable(&req->done);
+ if (!ret) {
+ ret = req->error;
+ } else {
+ ret = -EINTR;
+ if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
+ /* Someone will complete it soon. */
+ cpu_relax();
+ goto wait;
+ }
+ }
+ cachefiles_req_put(req);
+ return ret;
+out:
+ /* Reset the object to close state in error handling path.
+ * If error occurs after creating the anonymous fd,
+ * cachefiles_ondemand_fd_release() will set object to close.
+ */
+ if (opcode == CACHEFILES_OP_OPEN &&
+ !cachefiles_ondemand_object_is_dropping(object))
+ cachefiles_ondemand_set_object_close(object);
+ kfree(req);
+ return ret;
+}
+
+static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req,
+ void *private)
+{
+ struct cachefiles_object *object = req->object;
+ struct fscache_cookie *cookie = object->cookie;
+ struct fscache_volume *volume = object->volume->vcookie;
+ struct cachefiles_open *load = (void *)req->msg.data;
+ size_t volume_key_size, cookie_key_size;
+ void *volume_key, *cookie_key;
+
+ /*
+ * Volume key is a NUL-terminated string. key[0] stores strlen() of the
+ * string, followed by the content of the string (excluding '\0').
+ */
+ volume_key_size = volume->key[0] + 1;
+ volume_key = volume->key + 1;
+
+ /* Cookie key is binary data, which is netfs specific. */
+ cookie_key_size = cookie->key_len;
+ cookie_key = fscache_get_key(cookie);
+
+ if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) {
+ pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n");
+ return -EINVAL;
+ }
+
+ load->volume_key_size = volume_key_size;
+ load->cookie_key_size = cookie_key_size;
+ memcpy(load->data, volume_key, volume_key_size);
+ memcpy(load->data + volume_key_size, cookie_key, cookie_key_size);
+
+ return 0;
+}
+
+static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
+ void *private)
+{
+ struct cachefiles_object *object = req->object;
+
+ if (!cachefiles_ondemand_object_is_open(object))
+ return -ENOENT;
+
+ trace_cachefiles_ondemand_close(object, &req->msg);
+ return 0;
+}
+
+struct cachefiles_read_ctx {
+ loff_t off;
+ size_t len;
+};
+
+static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
+ void *private)
+{
+ struct cachefiles_object *object = req->object;
+ struct cachefiles_read *load = (void *)req->msg.data;
+ struct cachefiles_read_ctx *read_ctx = private;
+
+ load->off = read_ctx->off;
+ load->len = read_ctx->len;
+ trace_cachefiles_ondemand_read(object, &req->msg, load);
+ return 0;
+}
+
+int cachefiles_ondemand_init_object(struct cachefiles_object *object)
+{
+ struct fscache_cookie *cookie = object->cookie;
+ struct fscache_volume *volume = object->volume->vcookie;
+ size_t volume_key_size, cookie_key_size, data_len;
+
+ if (!object->ondemand)
+ return 0;
+
+ /*
+ * CacheFiles will firstly check the cache file under the root cache
+ * directory. If the coherency check failed, it will fallback to
+ * creating a new tmpfile as the cache file. Reuse the previously
+ * allocated object ID if any.
+ */
+ if (cachefiles_ondemand_object_is_open(object))
+ return 0;
+
+ volume_key_size = volume->key[0] + 1;
+ cookie_key_size = cookie->key_len;
+ data_len = sizeof(struct cachefiles_open) +
+ volume_key_size + cookie_key_size;
+
+ return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN,
+ data_len, cachefiles_ondemand_init_open_req, NULL);
+}
+
+void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
+{
+ unsigned long index;
+ struct cachefiles_req *req;
+ struct cachefiles_cache *cache;
+
+ if (!object->ondemand)
+ return;
+
+ cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
+ cachefiles_ondemand_init_close_req, NULL);
+
+ if (!object->ondemand->ondemand_id)
+ return;
+
+ /* Cancel all requests for the object that is being dropped. */
+ cache = object->volume->cache;
+ xa_lock(&cache->reqs);
+ cachefiles_ondemand_set_object_dropping(object);
+ xa_for_each(&cache->reqs, index, req) {
+ if (req->object == object) {
+ req->error = -EIO;
+ complete(&req->done);
+ __xa_erase(&cache->reqs, index);
+ }
+ }
+ xa_unlock(&cache->reqs);
+
+ /* Wait for ondemand_object_worker() to finish to avoid UAF. */
+ cancel_work_sync(&object->ondemand->ondemand_work);
+}
+
+int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
+ struct cachefiles_volume *volume)
+{
+ if (!cachefiles_in_ondemand_mode(volume->cache))
+ return 0;
+
+ object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info),
+ GFP_KERNEL);
+ if (!object->ondemand)
+ return -ENOMEM;
+
+ object->ondemand->object = object;
+ spin_lock_init(&object->ondemand->lock);
+ INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
+ return 0;
+}
+
+void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object)
+{
+ kfree(object->ondemand);
+ object->ondemand = NULL;
+}
+
+int cachefiles_ondemand_read(struct cachefiles_object *object,
+ loff_t pos, size_t len)
+{
+ struct cachefiles_read_ctx read_ctx = {pos, len};
+
+ return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ,
+ sizeof(struct cachefiles_read),
+ cachefiles_ondemand_init_read_req, &read_ctx);
+}
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
deleted file mode 100644
index fcf4f3b72923..000000000000
--- a/fs/cachefiles/rdwr.c
+++ /dev/null
@@ -1,972 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Storage object read/write
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/mount.h>
-#include <linux/slab.h>
-#include <linux/file.h>
-#include <linux/swap.h>
-#include "internal.h"
-
-/*
- * detect wake up events generated by the unlocking of pages in which we're
- * interested
- * - we use this to detect read completion of backing pages
- * - the caller holds the waitqueue lock
- */
-static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
- int sync, void *_key)
-{
- struct cachefiles_one_read *monitor =
- container_of(wait, struct cachefiles_one_read, monitor);
- struct cachefiles_object *object;
- struct fscache_retrieval *op = monitor->op;
- struct wait_page_key *key = _key;
- struct folio *folio = wait->private;
-
- ASSERT(key);
-
- _enter("{%lu},%u,%d,{%p,%u}",
- monitor->netfs_page->index, mode, sync,
- key->folio, key->bit_nr);
-
- if (key->folio != folio || key->bit_nr != PG_locked)
- return 0;
-
- _debug("--- monitor %p %lx ---", folio, folio->flags);
-
- if (!folio_test_uptodate(folio) && !folio_test_error(folio)) {
- /* unlocked, not uptodate and not erronous? */
- _debug("page probably truncated");
- }
-
- /* remove from the waitqueue */
- list_del(&wait->entry);
-
- /* move onto the action list and queue for FS-Cache thread pool */
- ASSERT(op);
-
- /* We need to temporarily bump the usage count as we don't own a ref
- * here otherwise cachefiles_read_copier() may free the op between the
- * monitor being enqueued on the op->to_do list and the op getting
- * enqueued on the work queue.
- */
- fscache_get_retrieval(op);
-
- object = container_of(op->op.object, struct cachefiles_object, fscache);
- spin_lock(&object->work_lock);
- list_add_tail(&monitor->op_link, &op->to_do);
- fscache_enqueue_retrieval(op);
- spin_unlock(&object->work_lock);
-
- fscache_put_retrieval(op);
- return 0;
-}
-
-/*
- * handle a probably truncated page
- * - check to see if the page is still relevant and reissue the read if
- * possible
- * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
- * must wait again and 0 if successful
- */
-static int cachefiles_read_reissue(struct cachefiles_object *object,
- struct cachefiles_one_read *monitor)
-{
- struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
- struct page *backpage = monitor->back_page, *backpage2;
- int ret;
-
- _enter("{ino=%lx},{%lx,%lx}",
- d_backing_inode(object->backer)->i_ino,
- backpage->index, backpage->flags);
-
- /* skip if the page was truncated away completely */
- if (backpage->mapping != bmapping) {
- _leave(" = -ENODATA [mapping]");
- return -ENODATA;
- }
-
- backpage2 = find_get_page(bmapping, backpage->index);
- if (!backpage2) {
- _leave(" = -ENODATA [gone]");
- return -ENODATA;
- }
-
- if (backpage != backpage2) {
- put_page(backpage2);
- _leave(" = -ENODATA [different]");
- return -ENODATA;
- }
-
- /* the page is still there and we already have a ref on it, so we don't
- * need a second */
- put_page(backpage2);
-
- INIT_LIST_HEAD(&monitor->op_link);
- folio_add_wait_queue(page_folio(backpage), &monitor->monitor);
-
- if (trylock_page(backpage)) {
- ret = -EIO;
- if (PageError(backpage))
- goto unlock_discard;
- ret = 0;
- if (PageUptodate(backpage))
- goto unlock_discard;
-
- _debug("reissue read");
- ret = bmapping->a_ops->readpage(NULL, backpage);
- if (ret < 0)
- goto discard;
- }
-
- /* but the page may have been read before the monitor was installed, so
- * the monitor may miss the event - so we have to ensure that we do get
- * one in such a case */
- if (trylock_page(backpage)) {
- _debug("jumpstart %p {%lx}", backpage, backpage->flags);
- unlock_page(backpage);
- }
-
- /* it'll reappear on the todo list */
- _leave(" = -EINPROGRESS");
- return -EINPROGRESS;
-
-unlock_discard:
- unlock_page(backpage);
-discard:
- spin_lock_irq(&object->work_lock);
- list_del(&monitor->op_link);
- spin_unlock_irq(&object->work_lock);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * copy data from backing pages to netfs pages to complete a read operation
- * - driven by FS-Cache's thread pool
- */
-static void cachefiles_read_copier(struct fscache_operation *_op)
-{
- struct cachefiles_one_read *monitor;
- struct cachefiles_object *object;
- struct fscache_retrieval *op;
- int error, max;
-
- op = container_of(_op, struct fscache_retrieval, op);
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
-
- _enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino);
-
- max = 8;
- spin_lock_irq(&object->work_lock);
-
- while (!list_empty(&op->to_do)) {
- monitor = list_entry(op->to_do.next,
- struct cachefiles_one_read, op_link);
- list_del(&monitor->op_link);
-
- spin_unlock_irq(&object->work_lock);
-
- _debug("- copy {%lu}", monitor->back_page->index);
-
- recheck:
- if (test_bit(FSCACHE_COOKIE_INVALIDATING,
- &object->fscache.cookie->flags)) {
- error = -ESTALE;
- } else if (PageUptodate(monitor->back_page)) {
- copy_highpage(monitor->netfs_page, monitor->back_page);
- fscache_mark_page_cached(monitor->op,
- monitor->netfs_page);
- error = 0;
- } else if (!PageError(monitor->back_page)) {
- /* the page has probably been truncated */
- error = cachefiles_read_reissue(object, monitor);
- if (error == -EINPROGRESS)
- goto next;
- goto recheck;
- } else {
- cachefiles_io_error_obj(
- object,
- "Readpage failed on backing file %lx",
- (unsigned long) monitor->back_page->flags);
- error = -EIO;
- }
-
- put_page(monitor->back_page);
-
- fscache_end_io(op, monitor->netfs_page, error);
- put_page(monitor->netfs_page);
- fscache_retrieval_complete(op, 1);
- fscache_put_retrieval(op);
- kfree(monitor);
-
- next:
- /* let the thread pool have some air occasionally */
- max--;
- if (max < 0 || need_resched()) {
- if (!list_empty(&op->to_do))
- fscache_enqueue_retrieval(op);
- _leave(" [maxed out]");
- return;
- }
-
- spin_lock_irq(&object->work_lock);
- }
-
- spin_unlock_irq(&object->work_lock);
- _leave("");
-}
-
-/*
- * read the corresponding page to the given set from the backing file
- * - an uncertain page is simply discarded, to be tried again another time
- */
-static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
- struct fscache_retrieval *op,
- struct page *netpage)
-{
- struct cachefiles_one_read *monitor;
- struct address_space *bmapping;
- struct page *newpage, *backpage;
- int ret;
-
- _enter("");
-
- _debug("read back %p{%lu,%d}",
- netpage, netpage->index, page_count(netpage));
-
- monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
- if (!monitor)
- goto nomem;
-
- monitor->netfs_page = netpage;
- monitor->op = fscache_get_retrieval(op);
-
- init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
-
- /* attempt to get hold of the backing page */
- bmapping = d_backing_inode(object->backer)->i_mapping;
- newpage = NULL;
-
- for (;;) {
- backpage = find_get_page(bmapping, netpage->index);
- if (backpage)
- goto backing_page_already_present;
-
- if (!newpage) {
- newpage = __page_cache_alloc(cachefiles_gfp);
- if (!newpage)
- goto nomem_monitor;
- }
-
- ret = add_to_page_cache_lru(newpage, bmapping,
- netpage->index, cachefiles_gfp);
- if (ret == 0)
- goto installed_new_backing_page;
- if (ret != -EEXIST)
- goto nomem_page;
- }
-
- /* we've installed a new backing page, so now we need to start
- * it reading */
-installed_new_backing_page:
- _debug("- new %p", newpage);
-
- backpage = newpage;
- newpage = NULL;
-
-read_backing_page:
- ret = bmapping->a_ops->readpage(NULL, backpage);
- if (ret < 0)
- goto read_error;
-
- /* set the monitor to transfer the data across */
-monitor_backing_page:
- _debug("- monitor add");
-
- /* install the monitor */
- get_page(monitor->netfs_page);
- get_page(backpage);
- monitor->back_page = backpage;
- monitor->monitor.private = backpage;
- folio_add_wait_queue(page_folio(backpage), &monitor->monitor);
- monitor = NULL;
-
- /* but the page may have been read before the monitor was installed, so
- * the monitor may miss the event - so we have to ensure that we do get
- * one in such a case */
- if (trylock_page(backpage)) {
- _debug("jumpstart %p {%lx}", backpage, backpage->flags);
- unlock_page(backpage);
- }
- goto success;
-
- /* if the backing page is already present, it can be in one of
- * three states: read in progress, read failed or read okay */
-backing_page_already_present:
- _debug("- present");
-
- if (newpage) {
- put_page(newpage);
- newpage = NULL;
- }
-
- if (PageError(backpage))
- goto io_error;
-
- if (PageUptodate(backpage))
- goto backing_page_already_uptodate;
-
- if (!trylock_page(backpage))
- goto monitor_backing_page;
- _debug("read %p {%lx}", backpage, backpage->flags);
- goto read_backing_page;
-
- /* the backing page is already up to date, attach the netfs
- * page to the pagecache and LRU and copy the data across */
-backing_page_already_uptodate:
- _debug("- uptodate");
-
- fscache_mark_page_cached(op, netpage);
-
- copy_highpage(netpage, backpage);
- fscache_end_io(op, netpage, 0);
- fscache_retrieval_complete(op, 1);
-
-success:
- _debug("success");
- ret = 0;
-
-out:
- if (backpage)
- put_page(backpage);
- if (monitor) {
- fscache_put_retrieval(monitor->op);
- kfree(monitor);
- }
- _leave(" = %d", ret);
- return ret;
-
-read_error:
- _debug("read error %d", ret);
- if (ret == -ENOMEM) {
- fscache_retrieval_complete(op, 1);
- goto out;
- }
-io_error:
- cachefiles_io_error_obj(object, "Page read error on backing file");
- fscache_retrieval_complete(op, 1);
- ret = -ENOBUFS;
- goto out;
-
-nomem_page:
- put_page(newpage);
-nomem_monitor:
- fscache_put_retrieval(monitor->op);
- kfree(monitor);
-nomem:
- fscache_retrieval_complete(op, 1);
- _leave(" = -ENOMEM");
- return -ENOMEM;
-}
-
-/*
- * read a page from the cache or allocate a block in which to store it
- * - cache withdrawal is prevented by the caller
- * - returns -EINTR if interrupted
- * - returns -ENOMEM if ran out of memory
- * - returns -ENOBUFS if no buffers can be made available
- * - returns -ENOBUFS if page is beyond EOF
- * - if the page is backed by a block in the cache:
- * - a read will be started which will call the callback on completion
- * - 0 will be returned
- * - else if the page is unbacked:
- * - the metadata will be retained
- * - -ENODATA will be returned
- */
-int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
- struct page *page,
- gfp_t gfp)
-{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- struct inode *inode;
- sector_t block;
- unsigned shift;
- int ret, ret2;
-
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- _enter("{%p},{%lx},,,", object, page->index);
-
- if (!object->backer)
- goto enobufs;
-
- inode = d_backing_inode(object->backer);
- ASSERT(S_ISREG(inode->i_mode));
-
- /* calculate the shift required to use bmap */
- shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
-
- op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
- op->op.flags |= FSCACHE_OP_ASYNC;
- op->op.processor = cachefiles_read_copier;
-
- /* we assume the absence or presence of the first block is a good
- * enough indication for the page as a whole
- * - TODO: don't use bmap() for this as it is _not_ actually good
- * enough for this as it doesn't indicate errors, but it's all we've
- * got for the moment
- */
- block = page->index;
- block <<= shift;
-
- ret2 = bmap(inode, &block);
- ASSERT(ret2 == 0);
-
- _debug("%llx -> %llx",
- (unsigned long long) (page->index << shift),
- (unsigned long long) block);
-
- if (block) {
- /* submit the apparently valid page to the backing fs to be
- * read from disk */
- ret = cachefiles_read_backing_file_one(object, op, page);
- } else if (cachefiles_has_space(cache, 0, 1) == 0) {
- /* there's space in the cache we can use */
- fscache_mark_page_cached(op, page);
- fscache_retrieval_complete(op, 1);
- ret = -ENODATA;
- } else {
- goto enobufs;
- }
-
- _leave(" = %d", ret);
- return ret;
-
-enobufs:
- fscache_retrieval_complete(op, 1);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-}
-
-/*
- * read the corresponding pages to the given set from the backing file
- * - any uncertain pages are simply discarded, to be tried again another time
- */
-static int cachefiles_read_backing_file(struct cachefiles_object *object,
- struct fscache_retrieval *op,
- struct list_head *list)
-{
- struct cachefiles_one_read *monitor = NULL;
- struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
- struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
- int ret = 0;
-
- _enter("");
-
- list_for_each_entry_safe(netpage, _n, list, lru) {
- list_del(&netpage->lru);
-
- _debug("read back %p{%lu,%d}",
- netpage, netpage->index, page_count(netpage));
-
- if (!monitor) {
- monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
- if (!monitor)
- goto nomem;
-
- monitor->op = fscache_get_retrieval(op);
- init_waitqueue_func_entry(&monitor->monitor,
- cachefiles_read_waiter);
- }
-
- for (;;) {
- backpage = find_get_page(bmapping, netpage->index);
- if (backpage)
- goto backing_page_already_present;
-
- if (!newpage) {
- newpage = __page_cache_alloc(cachefiles_gfp);
- if (!newpage)
- goto nomem;
- }
-
- ret = add_to_page_cache_lru(newpage, bmapping,
- netpage->index,
- cachefiles_gfp);
- if (ret == 0)
- goto installed_new_backing_page;
- if (ret != -EEXIST)
- goto nomem;
- }
-
- /* we've installed a new backing page, so now we need
- * to start it reading */
- installed_new_backing_page:
- _debug("- new %p", newpage);
-
- backpage = newpage;
- newpage = NULL;
-
- reread_backing_page:
- ret = bmapping->a_ops->readpage(NULL, backpage);
- if (ret < 0)
- goto read_error;
-
- /* add the netfs page to the pagecache and LRU, and set the
- * monitor to transfer the data across */
- monitor_backing_page:
- _debug("- monitor add");
-
- ret = add_to_page_cache_lru(netpage, op->mapping,
- netpage->index, cachefiles_gfp);
- if (ret < 0) {
- if (ret == -EEXIST) {
- put_page(backpage);
- backpage = NULL;
- put_page(netpage);
- netpage = NULL;
- fscache_retrieval_complete(op, 1);
- continue;
- }
- goto nomem;
- }
-
- /* install a monitor */
- get_page(netpage);
- monitor->netfs_page = netpage;
-
- get_page(backpage);
- monitor->back_page = backpage;
- monitor->monitor.private = backpage;
- folio_add_wait_queue(page_folio(backpage), &monitor->monitor);
- monitor = NULL;
-
- /* but the page may have been read before the monitor was
- * installed, so the monitor may miss the event - so we have to
- * ensure that we do get one in such a case */
- if (trylock_page(backpage)) {
- _debug("2unlock %p {%lx}", backpage, backpage->flags);
- unlock_page(backpage);
- }
-
- put_page(backpage);
- backpage = NULL;
-
- put_page(netpage);
- netpage = NULL;
- continue;
-
- /* if the backing page is already present, it can be in one of
- * three states: read in progress, read failed or read okay */
- backing_page_already_present:
- _debug("- present %p", backpage);
-
- if (PageError(backpage))
- goto io_error;
-
- if (PageUptodate(backpage))
- goto backing_page_already_uptodate;
-
- _debug("- not ready %p{%lx}", backpage, backpage->flags);
-
- if (!trylock_page(backpage))
- goto monitor_backing_page;
-
- if (PageError(backpage)) {
- _debug("error %lx", backpage->flags);
- unlock_page(backpage);
- goto io_error;
- }
-
- if (PageUptodate(backpage))
- goto backing_page_already_uptodate_unlock;
-
- /* we've locked a page that's neither up to date nor erroneous,
- * so we need to attempt to read it again */
- goto reread_backing_page;
-
- /* the backing page is already up to date, attach the netfs
- * page to the pagecache and LRU and copy the data across */
- backing_page_already_uptodate_unlock:
- _debug("uptodate %lx", backpage->flags);
- unlock_page(backpage);
- backing_page_already_uptodate:
- _debug("- uptodate");
-
- ret = add_to_page_cache_lru(netpage, op->mapping,
- netpage->index, cachefiles_gfp);
- if (ret < 0) {
- if (ret == -EEXIST) {
- put_page(backpage);
- backpage = NULL;
- put_page(netpage);
- netpage = NULL;
- fscache_retrieval_complete(op, 1);
- continue;
- }
- goto nomem;
- }
-
- copy_highpage(netpage, backpage);
-
- put_page(backpage);
- backpage = NULL;
-
- fscache_mark_page_cached(op, netpage);
-
- /* the netpage is unlocked and marked up to date here */
- fscache_end_io(op, netpage, 0);
- put_page(netpage);
- netpage = NULL;
- fscache_retrieval_complete(op, 1);
- continue;
- }
-
- netpage = NULL;
-
- _debug("out");
-
-out:
- /* tidy up */
- if (newpage)
- put_page(newpage);
- if (netpage)
- put_page(netpage);
- if (backpage)
- put_page(backpage);
- if (monitor) {
- fscache_put_retrieval(op);
- kfree(monitor);
- }
-
- list_for_each_entry_safe(netpage, _n, list, lru) {
- list_del(&netpage->lru);
- put_page(netpage);
- fscache_retrieval_complete(op, 1);
- }
-
- _leave(" = %d", ret);
- return ret;
-
-nomem:
- _debug("nomem");
- ret = -ENOMEM;
- goto record_page_complete;
-
-read_error:
- _debug("read error %d", ret);
- if (ret == -ENOMEM)
- goto record_page_complete;
-io_error:
- cachefiles_io_error_obj(object, "Page read error on backing file");
- ret = -ENOBUFS;
-record_page_complete:
- fscache_retrieval_complete(op, 1);
- goto out;
-}
-
-/*
- * read a list of pages from the cache or allocate blocks in which to store
- * them
- */
-int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
- struct list_head *pages,
- unsigned *nr_pages,
- gfp_t gfp)
-{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- struct list_head backpages;
- struct pagevec pagevec;
- struct inode *inode;
- struct page *page, *_n;
- unsigned shift, nrbackpages;
- int ret, ret2, space;
-
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- _enter("{OBJ%x,%d},,%d,,",
- object->fscache.debug_id, atomic_read(&op->op.usage),
- *nr_pages);
-
- if (!object->backer)
- goto all_enobufs;
-
- space = 1;
- if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
- space = 0;
-
- inode = d_backing_inode(object->backer);
- ASSERT(S_ISREG(inode->i_mode));
-
- /* calculate the shift required to use bmap */
- shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
-
- pagevec_init(&pagevec);
-
- op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
- op->op.flags |= FSCACHE_OP_ASYNC;
- op->op.processor = cachefiles_read_copier;
-
- INIT_LIST_HEAD(&backpages);
- nrbackpages = 0;
-
- ret = space ? -ENODATA : -ENOBUFS;
- list_for_each_entry_safe(page, _n, pages, lru) {
- sector_t block;
-
- /* we assume the absence or presence of the first block is a
- * good enough indication for the page as a whole
- * - TODO: don't use bmap() for this as it is _not_ actually
- * good enough for this as it doesn't indicate errors, but
- * it's all we've got for the moment
- */
- block = page->index;
- block <<= shift;
-
- ret2 = bmap(inode, &block);
- ASSERT(ret2 == 0);
-
- _debug("%llx -> %llx",
- (unsigned long long) (page->index << shift),
- (unsigned long long) block);
-
- if (block) {
- /* we have data - add it to the list to give to the
- * backing fs */
- list_move(&page->lru, &backpages);
- (*nr_pages)--;
- nrbackpages++;
- } else if (space && pagevec_add(&pagevec, page) == 0) {
- fscache_mark_pages_cached(op, &pagevec);
- fscache_retrieval_complete(op, 1);
- ret = -ENODATA;
- } else {
- fscache_retrieval_complete(op, 1);
- }
- }
-
- if (pagevec_count(&pagevec) > 0)
- fscache_mark_pages_cached(op, &pagevec);
-
- if (list_empty(pages))
- ret = 0;
-
- /* submit the apparently valid pages to the backing fs to be read from
- * disk */
- if (nrbackpages > 0) {
- ret2 = cachefiles_read_backing_file(object, op, &backpages);
- if (ret2 == -ENOMEM || ret2 == -EINTR)
- ret = ret2;
- }
-
- _leave(" = %d [nr=%u%s]",
- ret, *nr_pages, list_empty(pages) ? " empty" : "");
- return ret;
-
-all_enobufs:
- fscache_retrieval_complete(op, *nr_pages);
- return -ENOBUFS;
-}
-
-/*
- * allocate a block in the cache in which to store a page
- * - cache withdrawal is prevented by the caller
- * - returns -EINTR if interrupted
- * - returns -ENOMEM if ran out of memory
- * - returns -ENOBUFS if no buffers can be made available
- * - returns -ENOBUFS if page is beyond EOF
- * - otherwise:
- * - the metadata will be retained
- * - 0 will be returned
- */
-int cachefiles_allocate_page(struct fscache_retrieval *op,
- struct page *page,
- gfp_t gfp)
-{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- int ret;
-
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- _enter("%p,{%lx},", object, page->index);
-
- ret = cachefiles_has_space(cache, 0, 1);
- if (ret == 0)
- fscache_mark_page_cached(op, page);
- else
- ret = -ENOBUFS;
-
- fscache_retrieval_complete(op, 1);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * allocate blocks in the cache in which to store a set of pages
- * - cache withdrawal is prevented by the caller
- * - returns -EINTR if interrupted
- * - returns -ENOMEM if ran out of memory
- * - returns -ENOBUFS if some buffers couldn't be made available
- * - returns -ENOBUFS if some pages are beyond EOF
- * - otherwise:
- * - -ENODATA will be returned
- * - metadata will be retained for any page marked
- */
-int cachefiles_allocate_pages(struct fscache_retrieval *op,
- struct list_head *pages,
- unsigned *nr_pages,
- gfp_t gfp)
-{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- struct pagevec pagevec;
- struct page *page;
- int ret;
-
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- _enter("%p,,,%d,", object, *nr_pages);
-
- ret = cachefiles_has_space(cache, 0, *nr_pages);
- if (ret == 0) {
- pagevec_init(&pagevec);
-
- list_for_each_entry(page, pages, lru) {
- if (pagevec_add(&pagevec, page) == 0)
- fscache_mark_pages_cached(op, &pagevec);
- }
-
- if (pagevec_count(&pagevec) > 0)
- fscache_mark_pages_cached(op, &pagevec);
- ret = -ENODATA;
- } else {
- ret = -ENOBUFS;
- }
-
- fscache_retrieval_complete(op, *nr_pages);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * request a page be stored in the cache
- * - cache withdrawal is prevented by the caller
- * - this request may be ignored if there's no cache block available, in which
- * case -ENOBUFS will be returned
- * - if the op is in progress, 0 will be returned
- */
-int cachefiles_write_page(struct fscache_storage *op, struct page *page)
-{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- struct file *file;
- struct path path;
- loff_t pos, eof;
- size_t len;
- void *data;
- int ret = -ENOBUFS;
-
- ASSERT(op != NULL);
- ASSERT(page != NULL);
-
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
-
- _enter("%p,%p{%lx},,,", object, page, page->index);
-
- if (!object->backer) {
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
- }
-
- ASSERT(d_is_reg(object->backer));
-
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- pos = (loff_t)page->index << PAGE_SHIFT;
-
- /* We mustn't write more data than we have, so we have to beware of a
- * partial page at EOF.
- */
- eof = object->fscache.store_limit_l;
- if (pos >= eof)
- goto error;
-
- /* write the page to the backing filesystem and let it store it in its
- * own time */
- path.mnt = cache->mnt;
- path.dentry = object->backer;
- file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
- goto error_2;
- }
-
- len = PAGE_SIZE;
- if (eof & ~PAGE_MASK) {
- if (eof - pos < PAGE_SIZE) {
- _debug("cut short %llx to %llx",
- pos, eof);
- len = eof - pos;
- ASSERTCMP(pos + len, ==, eof);
- }
- }
-
- data = kmap(page);
- ret = kernel_write(file, data, len, &pos);
- kunmap(page);
- fput(file);
- if (ret != len)
- goto error_eio;
-
- _leave(" = 0");
- return 0;
-
-error_eio:
- ret = -EIO;
-error_2:
- if (ret == -EIO)
- cachefiles_io_error_obj(object,
- "Write page to backing file failed");
-error:
- _leave(" = -ENOBUFS [%d]", ret);
- return -ENOBUFS;
-}
-
-/*
- * detach a backing block from a page
- * - cache withdrawal is prevented by the caller
- */
-void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
- __releases(&object->fscache.cookie->lock)
-{
- struct cachefiles_object *object;
-
- object = container_of(_object, struct cachefiles_object, fscache);
-
- _enter("%p,{%lu}", object, page->index);
-
- spin_unlock(&object->fscache.cookie->lock);
-}
diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c
index aec13fd94692..fc6611886b3b 100644
--- a/fs/cachefiles/security.c
+++ b/fs/cachefiles/security.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* CacheFiles security management
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -18,7 +18,7 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache)
struct cred *new;
int ret;
- _enter("{%s}", cache->secctx);
+ _enter("{%u}", cache->have_secid ? cache->secid : 0);
new = prepare_kernel_cred(current);
if (!new) {
@@ -26,8 +26,8 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache)
goto error;
}
- if (cache->secctx) {
- ret = set_security_override_from_ctx(new, cache->secctx);
+ if (cache->have_secid) {
+ ret = set_security_override(new, cache->secid);
if (ret < 0) {
put_cred(new);
pr_err("Security denies permission to nominate security context: error %d\n",
diff --git a/fs/cachefiles/volume.c b/fs/cachefiles/volume.c
new file mode 100644
index 000000000000..90ba926f488e
--- /dev/null
+++ b/fs/cachefiles/volume.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Volume handling.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/namei.h>
+#include "internal.h"
+#include <trace/events/fscache.h>
+
+/*
+ * Allocate and set up a volume representation. We make sure all the fanout
+ * directories are created and pinned.
+ */
+void cachefiles_acquire_volume(struct fscache_volume *vcookie)
+{
+ struct cachefiles_volume *volume;
+ struct cachefiles_cache *cache = vcookie->cache->cache_priv;
+ const struct cred *saved_cred;
+ struct dentry *vdentry, *fan;
+ size_t len;
+ char *name;
+ bool is_new = false;
+ int ret, n_accesses, i;
+
+ _enter("");
+
+ volume = kzalloc(sizeof(struct cachefiles_volume), GFP_KERNEL);
+ if (!volume)
+ return;
+ volume->vcookie = vcookie;
+ volume->cache = cache;
+ INIT_LIST_HEAD(&volume->cache_link);
+
+ cachefiles_begin_secure(cache, &saved_cred);
+
+ len = vcookie->key[0];
+ name = kmalloc(len + 3, GFP_NOFS);
+ if (!name)
+ goto error_vol;
+ name[0] = 'I';
+ memcpy(name + 1, vcookie->key + 1, len);
+ name[len + 1] = 0;
+
+retry:
+ vdentry = cachefiles_get_directory(cache, cache->store, name, &is_new);
+ if (IS_ERR(vdentry))
+ goto error_name;
+ volume->dentry = vdentry;
+
+ if (is_new) {
+ if (!cachefiles_set_volume_xattr(volume))
+ goto error_dir;
+ } else {
+ ret = cachefiles_check_volume_xattr(volume);
+ if (ret < 0) {
+ if (ret != -ESTALE)
+ goto error_dir;
+ vdentry = start_removing_dentry(cache->store, vdentry);
+ if (!IS_ERR(vdentry))
+ cachefiles_bury_object(cache, NULL, cache->store,
+ vdentry,
+ FSCACHE_VOLUME_IS_WEIRD);
+ cachefiles_put_directory(volume->dentry);
+ cond_resched();
+ goto retry;
+ }
+ }
+
+ for (i = 0; i < 256; i++) {
+ sprintf(name, "@%02x", i);
+ fan = cachefiles_get_directory(cache, vdentry, name, NULL);
+ if (IS_ERR(fan))
+ goto error_fan;
+ volume->fanout[i] = fan;
+ }
+
+ cachefiles_end_secure(cache, saved_cred);
+
+ vcookie->cache_priv = volume;
+ n_accesses = atomic_inc_return(&vcookie->n_accesses); /* Stop wakeups on dec-to-0 */
+ trace_fscache_access_volume(vcookie->debug_id, 0,
+ refcount_read(&vcookie->ref),
+ n_accesses, fscache_access_cache_pin);
+
+ spin_lock(&cache->object_list_lock);
+ list_add(&volume->cache_link, &volume->cache->volumes);
+ spin_unlock(&cache->object_list_lock);
+
+ kfree(name);
+ return;
+
+error_fan:
+ for (i = 0; i < 256; i++)
+ cachefiles_put_directory(volume->fanout[i]);
+error_dir:
+ cachefiles_put_directory(volume->dentry);
+error_name:
+ kfree(name);
+error_vol:
+ kfree(volume);
+ cachefiles_end_secure(cache, saved_cred);
+}
+
+/*
+ * Release a volume representation.
+ */
+static void __cachefiles_free_volume(struct cachefiles_volume *volume)
+{
+ int i;
+
+ _enter("");
+
+ volume->vcookie->cache_priv = NULL;
+
+ for (i = 0; i < 256; i++)
+ cachefiles_put_directory(volume->fanout[i]);
+ cachefiles_put_directory(volume->dentry);
+ kfree(volume);
+}
+
+void cachefiles_free_volume(struct fscache_volume *vcookie)
+{
+ struct cachefiles_volume *volume = vcookie->cache_priv;
+
+ if (volume) {
+ spin_lock(&volume->cache->object_list_lock);
+ list_del_init(&volume->cache_link);
+ spin_unlock(&volume->cache->object_list_lock);
+ __cachefiles_free_volume(volume);
+ }
+}
+
+void cachefiles_withdraw_volume(struct cachefiles_volume *volume)
+{
+ cachefiles_set_volume_xattr(volume);
+ __cachefiles_free_volume(volume);
+}
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 9e82de668595..52383b1d0ba6 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* CacheFiles extended attribute management
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -15,310 +15,290 @@
#include <linux/slab.h>
#include "internal.h"
+#define CACHEFILES_COOKIE_TYPE_DATA 1
+
+struct cachefiles_xattr {
+ __be64 object_size; /* Actual size of the object */
+ __be64 zero_point; /* Size after which server has no data not written by us */
+ __u8 type; /* Type of object */
+ __u8 content; /* Content presence (enum cachefiles_content) */
+ __u8 data[]; /* netfs coherency data */
+} __packed;
+
static const char cachefiles_xattr_cache[] =
XATTR_USER_PREFIX "CacheFiles.cache";
+struct cachefiles_vol_xattr {
+ __be32 reserved; /* Reserved, should be 0 */
+ __u8 data[]; /* netfs volume coherency data */
+} __packed;
+
/*
- * check the type label on an object
- * - done using xattrs
+ * set the state xattr on a cache file
*/
-int cachefiles_check_object_type(struct cachefiles_object *object)
+int cachefiles_set_object_xattr(struct cachefiles_object *object)
{
- struct dentry *dentry = object->dentry;
- char type[3], xtype[3];
+ struct cachefiles_xattr *buf;
+ struct dentry *dentry;
+ struct file *file = object->file;
+ unsigned int len = object->cookie->aux_len;
int ret;
- ASSERT(dentry);
- ASSERT(d_backing_inode(dentry));
+ if (!file)
+ return -ESTALE;
+ dentry = file->f_path.dentry;
+
+ _enter("%x,#%d", object->debug_id, len);
- if (!object->fscache.cookie)
- strcpy(type, "C3");
- else
- snprintf(type, 3, "%02x", object->fscache.cookie->def->type);
+ buf = kmalloc(sizeof(struct cachefiles_xattr) + len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- _enter("%x{%s}", object->fscache.debug_id, type);
+ buf->object_size = cpu_to_be64(object->cookie->object_size);
+ buf->zero_point = 0;
+ buf->type = CACHEFILES_COOKIE_TYPE_DATA;
+ buf->content = object->content_info;
+ if (test_bit(FSCACHE_COOKIE_LOCAL_WRITE, &object->cookie->flags))
+ buf->content = CACHEFILES_CONTENT_DIRTY;
+ if (len > 0)
+ memcpy(buf->data, fscache_get_aux(object->cookie), len);
- /* attempt to install a type label directly */
- ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache, type,
- 2, XATTR_CREATE);
+ ret = cachefiles_inject_write_error();
if (ret == 0) {
- _debug("SET"); /* we succeeded */
- goto error;
- }
-
- if (ret != -EEXIST) {
- pr_err("Can't set xattr on %pd [%lu] (err %d)\n",
- dentry, d_backing_inode(dentry)->i_ino,
- -ret);
- goto error;
+ ret = mnt_want_write_file(file);
+ if (ret == 0) {
+ ret = vfs_setxattr(&nop_mnt_idmap, dentry,
+ cachefiles_xattr_cache, buf,
+ sizeof(struct cachefiles_xattr) + len, 0);
+ mnt_drop_write_file(file);
+ }
}
-
- /* read the current type label */
- ret = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache, xtype,
- 3);
if (ret < 0) {
- if (ret == -ERANGE)
- goto bad_type_length;
-
- pr_err("Can't read xattr on %pd [%lu] (err %d)\n",
- dentry, d_backing_inode(dentry)->i_ino,
- -ret);
- goto error;
+ trace_cachefiles_vfs_error(object, file_inode(file), ret,
+ cachefiles_trace_setxattr_error);
+ trace_cachefiles_coherency(object, file_inode(file)->i_ino,
+ be64_to_cpup((__be64 *)buf->data),
+ buf->content,
+ cachefiles_coherency_set_fail);
+ if (ret != -ENOMEM)
+ cachefiles_io_error_obj(
+ object,
+ "Failed to set xattr with error %d", ret);
+ } else {
+ trace_cachefiles_coherency(object, file_inode(file)->i_ino,
+ be64_to_cpup((__be64 *)buf->data),
+ buf->content,
+ cachefiles_coherency_set_ok);
}
- /* check the type is what we're expecting */
- if (ret != 2)
- goto bad_type_length;
-
- if (xtype[0] != type[0] || xtype[1] != type[1])
- goto bad_type;
-
- ret = 0;
-
-error:
+ kfree(buf);
_leave(" = %d", ret);
return ret;
-
-bad_type_length:
- pr_err("Cache object %lu type xattr length incorrect\n",
- d_backing_inode(dentry)->i_ino);
- ret = -EIO;
- goto error;
-
-bad_type:
- xtype[2] = 0;
- pr_err("Cache object %pd [%lu] type %s not %s\n",
- dentry, d_backing_inode(dentry)->i_ino,
- xtype, type);
- ret = -EIO;
- goto error;
}
/*
- * set the state xattr on a cache file
+ * check the consistency between the backing cache and the FS-Cache cookie
*/
-int cachefiles_set_object_xattr(struct cachefiles_object *object,
- struct cachefiles_xattr *auxdata)
+int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file)
{
- struct dentry *dentry = object->dentry;
- int ret;
-
- ASSERT(dentry);
+ struct cachefiles_xattr *buf;
+ struct dentry *dentry = file->f_path.dentry;
+ unsigned int len = object->cookie->aux_len, tlen;
+ const void *p = fscache_get_aux(object->cookie);
+ enum cachefiles_coherency_trace why;
+ ssize_t xlen;
+ int ret = -ESTALE;
- _enter("%p,#%d", object, auxdata->len);
+ tlen = sizeof(struct cachefiles_xattr) + len;
+ buf = kmalloc(tlen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- /* attempt to install the cache metadata directly */
- _debug("SET #%u", auxdata->len);
+ xlen = cachefiles_inject_read_error();
+ if (xlen == 0)
+ xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, tlen);
+ if (xlen != tlen) {
+ if (xlen < 0) {
+ ret = xlen;
+ trace_cachefiles_vfs_error(object, file_inode(file), xlen,
+ cachefiles_trace_getxattr_error);
+ }
+ if (xlen == -EIO)
+ cachefiles_io_error_obj(
+ object,
+ "Failed to read aux with error %zd", xlen);
+ why = cachefiles_coherency_check_xattr;
+ goto out;
+ }
- clear_bit(FSCACHE_COOKIE_AUX_UPDATED, &object->fscache.cookie->flags);
- ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
- &auxdata->type, auxdata->len, XATTR_CREATE);
- if (ret < 0 && ret != -ENOMEM)
- cachefiles_io_error_obj(
- object,
- "Failed to set xattr with error %d", ret);
+ if (buf->type != CACHEFILES_COOKIE_TYPE_DATA) {
+ why = cachefiles_coherency_check_type;
+ } else if (memcmp(buf->data, p, len) != 0) {
+ why = cachefiles_coherency_check_aux;
+ } else if (be64_to_cpu(buf->object_size) != object->cookie->object_size) {
+ why = cachefiles_coherency_check_objsize;
+ } else if (buf->content == CACHEFILES_CONTENT_DIRTY) {
+ // TODO: Begin conflict resolution
+ pr_warn("Dirty object in cache\n");
+ why = cachefiles_coherency_check_dirty;
+ } else {
+ why = cachefiles_coherency_check_ok;
+ ret = 0;
+ }
- _leave(" = %d", ret);
+out:
+ trace_cachefiles_coherency(object, file_inode(file)->i_ino,
+ be64_to_cpup((__be64 *)buf->data),
+ buf->content, why);
+ kfree(buf);
return ret;
}
/*
- * update the state xattr on a cache file
+ * remove the object's xattr to mark it stale
*/
-int cachefiles_update_object_xattr(struct cachefiles_object *object,
- struct cachefiles_xattr *auxdata)
+int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
+ struct cachefiles_object *object,
+ struct dentry *dentry)
{
- struct dentry *dentry = object->dentry;
int ret;
- if (!dentry)
- return -ESTALE;
-
- _enter("%x,#%d", object->fscache.debug_id, auxdata->len);
-
- /* attempt to install the cache metadata directly */
- _debug("SET #%u", auxdata->len);
-
- clear_bit(FSCACHE_COOKIE_AUX_UPDATED, &object->fscache.cookie->flags);
- ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
- &auxdata->type, auxdata->len, XATTR_REPLACE);
- if (ret < 0 && ret != -ENOMEM)
- cachefiles_io_error_obj(
- object,
- "Failed to update xattr with error %d", ret);
+ ret = cachefiles_inject_remove_error();
+ if (ret == 0) {
+ ret = mnt_want_write(cache->mnt);
+ if (ret == 0) {
+ ret = vfs_removexattr(&nop_mnt_idmap, dentry,
+ cachefiles_xattr_cache);
+ mnt_drop_write(cache->mnt);
+ }
+ }
+ if (ret < 0) {
+ trace_cachefiles_vfs_error(object, d_inode(dentry), ret,
+ cachefiles_trace_remxattr_error);
+ if (ret == -ENOENT || ret == -ENODATA)
+ ret = 0;
+ else if (ret != -ENOMEM)
+ cachefiles_io_error(cache,
+ "Can't remove xattr from %lu"
+ " (error %d)",
+ d_backing_inode(dentry)->i_ino, -ret);
+ }
_leave(" = %d", ret);
return ret;
}
/*
- * check the consistency between the backing cache and the FS-Cache cookie
+ * Stick a marker on the cache object to indicate that it's dirty.
*/
-int cachefiles_check_auxdata(struct cachefiles_object *object)
+void cachefiles_prepare_to_write(struct fscache_cookie *cookie)
{
- struct cachefiles_xattr *auxbuf;
- enum fscache_checkaux validity;
- struct dentry *dentry = object->dentry;
- ssize_t xlen;
- int ret;
-
- ASSERT(dentry);
- ASSERT(d_backing_inode(dentry));
- ASSERT(object->fscache.cookie->def->check_aux);
+ const struct cred *saved_cred;
+ struct cachefiles_object *object = cookie->cache_priv;
+ struct cachefiles_cache *cache = object->volume->cache;
- auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, GFP_KERNEL);
- if (!auxbuf)
- return -ENOMEM;
+ _enter("c=%08x", object->cookie->debug_id);
- xlen = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
- &auxbuf->type, 512 + 1);
- ret = -ESTALE;
- if (xlen < 1 ||
- auxbuf->type != object->fscache.cookie->def->type)
- goto error;
-
- xlen--;
- validity = fscache_check_aux(&object->fscache, &auxbuf->data, xlen,
- i_size_read(d_backing_inode(dentry)));
- if (validity != FSCACHE_CHECKAUX_OKAY)
- goto error;
-
- ret = 0;
-error:
- kfree(auxbuf);
- return ret;
+ if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
+ cachefiles_begin_secure(cache, &saved_cred);
+ cachefiles_set_object_xattr(object);
+ cachefiles_end_secure(cache, saved_cred);
+ }
}
/*
- * check the state xattr on a cache file
- * - return -ESTALE if the object should be deleted
+ * Set the state xattr on a volume directory.
*/
-int cachefiles_check_object_xattr(struct cachefiles_object *object,
- struct cachefiles_xattr *auxdata)
+bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
{
- struct cachefiles_xattr *auxbuf;
- struct dentry *dentry = object->dentry;
+ struct cachefiles_vol_xattr *buf;
+ unsigned int len = volume->vcookie->coherency_len;
+ const void *p = volume->vcookie->coherency;
+ struct dentry *dentry = volume->dentry;
int ret;
- _enter("%p,#%d", object, auxdata->len);
+ _enter("%x,#%d", volume->vcookie->debug_id, len);
- ASSERT(dentry);
- ASSERT(d_backing_inode(dentry));
+ len += sizeof(*buf);
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return false;
+ buf->reserved = cpu_to_be32(0);
+ memcpy(buf->data, p, volume->vcookie->coherency_len);
- auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, cachefiles_gfp);
- if (!auxbuf) {
- _leave(" = -ENOMEM");
- return -ENOMEM;
+ ret = cachefiles_inject_write_error();
+ if (ret == 0) {
+ ret = mnt_want_write(volume->cache->mnt);
+ if (ret == 0) {
+ ret = vfs_setxattr(&nop_mnt_idmap, dentry,
+ cachefiles_xattr_cache,
+ buf, len, 0);
+ mnt_drop_write(volume->cache->mnt);
+ }
}
-
- /* read the current type label */
- ret = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
- &auxbuf->type, 512 + 1);
if (ret < 0) {
- if (ret == -ENODATA)
- goto stale; /* no attribute - power went off
- * mid-cull? */
-
- if (ret == -ERANGE)
- goto bad_type_length;
-
- cachefiles_io_error_obj(object,
- "Can't read xattr on %lu (err %d)",
- d_backing_inode(dentry)->i_ino, -ret);
- goto error;
+ trace_cachefiles_vfs_error(NULL, d_inode(dentry), ret,
+ cachefiles_trace_setxattr_error);
+ trace_cachefiles_vol_coherency(volume, d_inode(dentry)->i_ino,
+ cachefiles_coherency_vol_set_fail);
+ if (ret != -ENOMEM)
+ cachefiles_io_error(
+ volume->cache, "Failed to set xattr with error %d", ret);
+ } else {
+ trace_cachefiles_vol_coherency(volume, d_inode(dentry)->i_ino,
+ cachefiles_coherency_vol_set_ok);
}
- /* check the on-disk object */
- if (ret < 1)
- goto bad_type_length;
-
- if (auxbuf->type != auxdata->type)
- goto stale;
-
- auxbuf->len = ret;
-
- /* consult the netfs */
- if (object->fscache.cookie->def->check_aux) {
- enum fscache_checkaux result;
- unsigned int dlen;
-
- dlen = auxbuf->len - 1;
-
- _debug("checkaux %s #%u",
- object->fscache.cookie->def->name, dlen);
-
- result = fscache_check_aux(&object->fscache,
- &auxbuf->data, dlen,
- i_size_read(d_backing_inode(dentry)));
-
- switch (result) {
- /* entry okay as is */
- case FSCACHE_CHECKAUX_OKAY:
- goto okay;
-
- /* entry requires update */
- case FSCACHE_CHECKAUX_NEEDS_UPDATE:
- break;
-
- /* entry requires deletion */
- case FSCACHE_CHECKAUX_OBSOLETE:
- goto stale;
-
- default:
- BUG();
- }
-
- /* update the current label */
- ret = vfs_setxattr(&init_user_ns, dentry,
- cachefiles_xattr_cache, &auxdata->type,
- auxdata->len, XATTR_REPLACE);
- if (ret < 0) {
- cachefiles_io_error_obj(object,
- "Can't update xattr on %lu"
- " (error %d)",
- d_backing_inode(dentry)->i_ino, -ret);
- goto error;
- }
- }
-
-okay:
- ret = 0;
-
-error:
- kfree(auxbuf);
+ kfree(buf);
_leave(" = %d", ret);
- return ret;
-
-bad_type_length:
- pr_err("Cache object %lu xattr length incorrect\n",
- d_backing_inode(dentry)->i_ino);
- ret = -EIO;
- goto error;
-
-stale:
- ret = -ESTALE;
- goto error;
+ return ret == 0;
}
/*
- * remove the object's xattr to mark it stale
+ * Check the consistency between the backing cache and the volume cookie.
*/
-int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
- struct dentry *dentry)
+int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
{
- int ret;
+ struct cachefiles_vol_xattr *buf;
+ struct dentry *dentry = volume->dentry;
+ unsigned int len = volume->vcookie->coherency_len;
+ const void *p = volume->vcookie->coherency;
+ enum cachefiles_coherency_trace why;
+ ssize_t xlen;
+ int ret = -ESTALE;
- ret = vfs_removexattr(&init_user_ns, dentry, cachefiles_xattr_cache);
- if (ret < 0) {
- if (ret == -ENOENT || ret == -ENODATA)
- ret = 0;
- else if (ret != -ENOMEM)
- cachefiles_io_error(cache,
- "Can't remove xattr from %lu"
- " (error %d)",
- d_backing_inode(dentry)->i_ino, -ret);
+ _enter("");
+
+ len += sizeof(*buf);
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ xlen = cachefiles_inject_read_error();
+ if (xlen == 0)
+ xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, len);
+ if (xlen != len) {
+ if (xlen < 0) {
+ ret = xlen;
+ trace_cachefiles_vfs_error(NULL, d_inode(dentry), xlen,
+ cachefiles_trace_getxattr_error);
+ if (xlen == -EIO)
+ cachefiles_io_error(
+ volume->cache,
+ "Failed to read xattr with error %zd", xlen);
+ }
+ why = cachefiles_coherency_vol_check_xattr;
+ } else if (buf->reserved != cpu_to_be32(0)) {
+ why = cachefiles_coherency_vol_check_resv;
+ } else if (memcmp(buf->data, p, len - sizeof(*buf)) != 0) {
+ why = cachefiles_coherency_vol_check_cmp;
+ } else {
+ why = cachefiles_coherency_vol_check_ok;
+ ret = 0;
}
+ trace_cachefiles_vol_coherency(volume, d_inode(dentry)->i_ino, why);
+ kfree(buf);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 94df854147d3..3e7def3d31c1 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -3,10 +3,11 @@ config CEPH_FS
tristate "Ceph distributed file system"
depends on INET
select CEPH_LIB
- select LIBCRC32C
+ select CRC32
select CRYPTO_AES
select CRYPTO
select NETFS_SUPPORT
+ select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
default n
help
Choose Y or M here to include support for mounting the
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index 50c635dc7f71..1f77ca04c426 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -12,3 +12,4 @@ ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
ceph-$(CONFIG_CEPH_FSCACHE) += cache.o
ceph-$(CONFIG_CEPH_FS_POSIX_ACL) += acl.o
+ceph-$(CONFIG_FS_ENCRYPTION) += crypto.o
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index f4fc8e0b847c..1564eacc253d 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include "super.h"
+#include "mds_client.h"
static inline void ceph_set_cached_acl(struct inode *inode,
int type, struct posix_acl *acl)
@@ -31,6 +32,7 @@ static inline void ceph_set_cached_acl(struct inode *inode,
struct posix_acl *ceph_get_acl(struct inode *inode, int type, bool rcu)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int size;
unsigned int retry_cnt = 0;
const char *name;
@@ -72,8 +74,8 @@ retry:
} else if (size == -ENODATA || size == 0) {
acl = NULL;
} else {
- pr_err_ratelimited("get acl %llx.%llx failed, err=%d\n",
- ceph_vinop(inode), size);
+ pr_err_ratelimited_client(cl, "%llx.%llx failed, err=%d\n",
+ ceph_vinop(inode), size);
acl = ERR_PTR(-EIO);
}
@@ -85,14 +87,15 @@ retry:
return acl;
}
-int ceph_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
int ret = 0, size = 0;
const char *name = NULL;
char *value = NULL;
struct iattr newattrs;
- struct timespec64 old_ctime = inode->i_ctime;
+ struct inode *inode = d_inode(dentry);
+ struct timespec64 old_ctime = inode_get_ctime(inode);
umode_t new_mode = inode->i_mode, old_mode = inode->i_mode;
if (ceph_snap(inode) != CEPH_NOSNAP) {
@@ -104,7 +107,7 @@ int ceph_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
- ret = posix_acl_update_mode(&init_user_ns, inode,
+ ret = posix_acl_update_mode(idmap, inode,
&new_mode, &acl);
if (ret)
goto out;
@@ -139,7 +142,7 @@ int ceph_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
newattrs.ia_ctime = current_time(inode);
newattrs.ia_mode = new_mode;
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
- ret = __ceph_setattr(inode, &newattrs);
+ ret = __ceph_setattr(idmap, inode, &newattrs, NULL);
if (ret)
goto out_free;
}
@@ -150,7 +153,7 @@ int ceph_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
newattrs.ia_ctime = old_ctime;
newattrs.ia_mode = old_mode;
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
- __ceph_setattr(inode, &newattrs);
+ __ceph_setattr(idmap, inode, &newattrs, NULL);
}
goto out_free;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 04bbe853bcb1..63b75d214210 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -4,8 +4,8 @@
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/swap.h>
#include <linux/pagemap.h>
-#include <linux/writeback.h> /* generic_writepages */
#include <linux/slab.h>
#include <linux/pagevec.h>
#include <linux/task_io_accounting_ops.h>
@@ -13,11 +13,13 @@
#include <linux/iversion.h>
#include <linux/ktime.h>
#include <linux/netfs.h>
+#include <trace/events/netfs.h>
#include "super.h"
#include "mds_client.h"
#include "cache.h"
#include "metric.h"
+#include "crypto.h"
#include <linux/ceph/osd_client.h>
#include <linux/ceph/striper.h>
@@ -63,7 +65,7 @@
(CONGESTION_ON_THRESH(congestion_kb) >> 2))
static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
- struct folio *folio, void **_fsdata);
+ struct folio **foliop, void **_fsdata);
static inline struct ceph_snap_context *page_snap_context(struct page *page)
{
@@ -76,26 +78,27 @@ static inline struct ceph_snap_context *page_snap_context(struct page *page)
* Dirty a page. Optimistically adjust accounting, on the assumption
* that we won't race with invalidate. If we do, readjust.
*/
-static int ceph_set_page_dirty(struct page *page)
+static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
{
- struct address_space *mapping = page->mapping;
- struct inode *inode;
+ struct inode *inode = mapping->host;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct ceph_inode_info *ci;
struct ceph_snap_context *snapc;
- if (PageDirty(page)) {
- dout("%p set_page_dirty %p idx %lu -- already dirty\n",
- mapping->host, page, page->index);
- BUG_ON(!PagePrivate(page));
- return 0;
+ if (folio_test_dirty(folio)) {
+ doutc(cl, "%llx.%llx %p idx %lu -- already dirty\n",
+ ceph_vinop(inode), folio, folio->index);
+ VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
+ return false;
}
- inode = mapping->host;
+ atomic64_inc(&mdsc->dirty_folios);
+
ci = ceph_inode(inode);
/* dirty the head */
spin_lock(&ci->i_ceph_lock);
- BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
if (__ceph_have_pending_cap_snap(ci)) {
struct ceph_cap_snap *capsnap =
list_last_entry(&ci->i_cap_snaps,
@@ -111,272 +114,467 @@ static int ceph_set_page_dirty(struct page *page)
if (ci->i_wrbuffer_ref == 0)
ihold(inode);
++ci->i_wrbuffer_ref;
- dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
- "snapc %p seq %lld (%d snaps)\n",
- mapping->host, page, page->index,
- ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
- ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
- snapc, snapc->seq, snapc->num_snaps);
+ doutc(cl, "%llx.%llx %p idx %lu head %d/%d -> %d/%d "
+ "snapc %p seq %lld (%d snaps)\n",
+ ceph_vinop(inode), folio, folio->index,
+ ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
+ ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
+ snapc, snapc->seq, snapc->num_snaps);
spin_unlock(&ci->i_ceph_lock);
/*
- * Reference snap context in page->private. Also set
- * PagePrivate so that we get invalidatepage callback.
+ * Reference snap context in folio->private. Also set
+ * PagePrivate so that we get invalidate_folio callback.
*/
- BUG_ON(PagePrivate(page));
- attach_page_private(page, snapc);
+ VM_WARN_ON_FOLIO(folio->private, folio);
+ folio_attach_private(folio, snapc);
- return __set_page_dirty_nobuffers(page);
+ return ceph_fscache_dirty_folio(mapping, folio);
}
/*
- * If we are truncating the full page (i.e. offset == 0), adjust the
- * dirty page counters appropriately. Only called if there is private
- * data on the page.
+ * If we are truncating the full folio (i.e. offset == 0), adjust the
+ * dirty folio counters appropriately. Only called if there is private
+ * data on the folio.
*/
-static void ceph_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void ceph_invalidate_folio(struct folio *folio, size_t offset,
+ size_t length)
{
- struct inode *inode;
- struct ceph_inode_info *ci;
+ struct inode *inode = folio->mapping->host;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_snap_context *snapc;
- wait_on_page_fscache(page);
- inode = page->mapping->host;
- ci = ceph_inode(inode);
-
- if (offset != 0 || length != thp_size(page)) {
- dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
- inode, page, page->index, offset, length);
+ if (offset != 0 || length != folio_size(folio)) {
+ doutc(cl, "%llx.%llx idx %lu partial dirty page %zu~%zu\n",
+ ceph_vinop(inode), folio->index, offset, length);
return;
}
- WARN_ON(!PageLocked(page));
- if (!PagePrivate(page))
- return;
-
- dout("%p invalidatepage %p idx %lu full dirty page\n",
- inode, page, page->index);
+ WARN_ON(!folio_test_locked(folio));
+ if (folio_test_private(folio)) {
+ doutc(cl, "%llx.%llx idx %lu full dirty page\n",
+ ceph_vinop(inode), folio->index);
- snapc = detach_page_private(page);
- ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
- ceph_put_snap_context(snapc);
-}
-
-static int ceph_releasepage(struct page *page, gfp_t gfp)
-{
- dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host,
- page, page->index, PageDirty(page) ? "" : "not ");
-
- if (PageFsCache(page)) {
- if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS))
- return 0;
- wait_on_page_fscache(page);
+ snapc = folio_detach_private(folio);
+ ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
+ ceph_put_snap_context(snapc);
}
- return !PagePrivate(page);
+
+ netfs_invalidate_folio(folio, offset, length);
}
-static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq)
+static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
{
- struct inode *inode = rreq->mapping->host;
+ struct inode *inode = rreq->inode;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_layout *lo = &ci->i_layout;
+ unsigned long max_pages = inode->i_sb->s_bdi->ra_pages;
+ loff_t end = rreq->start + rreq->len, new_end;
+ struct ceph_netfs_request_data *priv = rreq->netfs_priv;
+ unsigned long max_len;
u32 blockoff;
- u64 blockno;
- /* Expand the start downward */
- blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
- rreq->start = blockno * lo->stripe_unit;
- rreq->len += blockoff;
+ if (priv) {
+ /* Readahead is disabled by posix_fadvise POSIX_FADV_RANDOM */
+ if (priv->file_ra_disabled)
+ max_pages = 0;
+ else
+ max_pages = priv->file_ra_pages;
+
+ }
- /* Now, round up the length to the next block */
- rreq->len = roundup(rreq->len, lo->stripe_unit);
-}
+ /* Readahead is disabled */
+ if (!max_pages)
+ return;
-static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq)
-{
- struct inode *inode = subreq->rreq->mapping->host;
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_inode_info *ci = ceph_inode(inode);
- u64 objno, objoff;
- u32 xlen;
+ max_len = max_pages << PAGE_SHIFT;
- /* Truncate the extent at the end of the current block */
- ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len,
- &objno, &objoff, &xlen);
- subreq->len = min(xlen, fsc->mount_options->rsize);
- return true;
+ /*
+ * Try to expand the length forward by rounding up it to the next
+ * block, but do not exceed the file size, unless the original
+ * request already exceeds it.
+ */
+ new_end = umin(round_up(end, lo->stripe_unit), rreq->i_size);
+ if (new_end > end && new_end <= rreq->start + max_len)
+ rreq->len = new_end - rreq->start;
+
+ /* Try to expand the start downward */
+ div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
+ if (rreq->len + blockoff <= max_len) {
+ rreq->start -= blockoff;
+ rreq->len += blockoff;
+ }
}
static void finish_netfs_read(struct ceph_osd_request *req)
{
- struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode);
+ struct inode *inode = req->r_inode;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
- struct netfs_read_subrequest *subreq = req->r_priv;
- int num_pages;
+ struct netfs_io_subrequest *subreq = req->r_priv;
+ struct ceph_osd_req_op *op = &req->r_ops[0];
int err = req->r_result;
+ bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, osd_data->length, err);
- dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result,
- subreq->len, i_size_read(req->r_inode));
+ doutc(cl, "result %d subreq->len=%zu i_size=%lld\n", req->r_result,
+ subreq->len, i_size_read(req->r_inode));
/* no object means success but no data */
- if (err == -ENOENT)
+ if (err == -ENOENT) {
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
err = 0;
- else if (err == -EBLOCKLISTED)
+ } else if (err == -EBLOCKLISTED) {
fsc->blocklisted = true;
+ }
- if (err >= 0 && err < subreq->len)
+ if (err >= 0) {
+ if (sparse && err > 0)
+ err = ceph_sparse_ext_map_end(op);
+ if (err < subreq->len &&
+ subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
+ subreq->rreq->origin != NETFS_DIO_READ)
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ if (IS_ENCRYPTED(inode) && err > 0) {
+ err = ceph_fscrypt_decrypt_extents(inode,
+ osd_data->pages, subreq->start,
+ op->extent.sparse_ext,
+ op->extent.sparse_ext_cnt);
+ if (err > subreq->len)
+ err = subreq->len;
+ }
+ if (err > 0)
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ }
+
+ if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
+ ceph_put_page_vector(osd_data->pages,
+ calc_pages_for(osd_data->alignment,
+ osd_data->length), false);
+ }
+ if (err > 0) {
+ subreq->transferred = err;
+ err = 0;
+ }
+ subreq->error = err;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
+ netfs_read_subreq_terminated(subreq);
+ iput(req->r_inode);
+ ceph_dec_osd_stopping_blocker(fsc->mdsc);
+}
+
+static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct inode *inode = rreq->inode;
+ struct ceph_mds_reply_info_parsed *rinfo;
+ struct ceph_mds_reply_info_in *iinfo;
+ struct ceph_mds_request *req;
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ ssize_t err = 0;
+ size_t len;
+ int mode;
+
+ if (rreq->origin != NETFS_UNBUFFERED_READ &&
+ rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
- netfs_subreq_terminated(subreq, err, true);
+ if (subreq->start >= inode->i_size)
+ goto out;
- num_pages = calc_pages_for(osd_data->alignment, osd_data->length);
- ceph_put_page_vector(osd_data->pages, num_pages, false);
- iput(req->r_inode);
+ /* We need to fetch the inline data. */
+ mode = ceph_try_to_choose_auth_mds(inode, CEPH_STAT_CAP_INLINE_DATA);
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out;
+ }
+ req->r_ino1 = ci->i_vino;
+ req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INLINE_DATA);
+ req->r_num_caps = 2;
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ err = ceph_mdsc_do_request(mdsc, NULL, req);
+ if (err < 0)
+ goto out;
+
+ rinfo = &req->r_reply_info;
+ iinfo = &rinfo->targeti;
+ if (iinfo->inline_version == CEPH_INLINE_NONE) {
+ /* The data got uninlined */
+ ceph_mdsc_put_request(req);
+ return false;
+ }
+
+ len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len);
+ err = copy_to_iter(iinfo->inline_data + subreq->start, len, &subreq->io_iter);
+ if (err == 0) {
+ err = -EFAULT;
+ } else {
+ subreq->transferred += err;
+ err = 0;
+ }
+
+ ceph_mdsc_put_request(req);
+out:
+ subreq->error = err;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
+ netfs_read_subreq_terminated(subreq);
+ return true;
}
-static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq)
+static int ceph_netfs_prepare_read(struct netfs_io_subrequest *subreq)
{
- struct netfs_read_request *rreq = subreq->rreq;
- struct inode *inode = rreq->mapping->host;
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct inode *inode = rreq->inode;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_osd_request *req;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ u64 objno, objoff;
+ u32 xlen;
+
+ /* Truncate the extent at the end of the current block */
+ ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len,
+ &objno, &objoff, &xlen);
+ rreq->io_streams[0].sreq_max_len = umin(xlen, fsc->mount_options->rsize);
+ return 0;
+}
+
+static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct inode *inode = rreq->inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_osd_request *req = NULL;
struct ceph_vino vino = ceph_vino(inode);
- struct iov_iter iter;
- struct page **pages;
- size_t page_off;
- int err = 0;
- u64 len = subreq->len;
+ int err;
+ u64 len;
+ bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
+ u64 off = subreq->start;
+ int extent_cnt;
- req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len,
- 0, 1, CEPH_OSD_OP_READ,
- CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica,
- NULL, ci->i_truncate_seq, ci->i_truncate_size, false);
+ if (ceph_inode_is_shutdown(inode)) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
+ return;
+
+ // TODO: This rounding here is slightly dodgy. It *should* work, for
+ // now, as the cache only deals in blocks that are a multiple of
+ // PAGE_SIZE and fscrypt blocks are at most PAGE_SIZE. What needs to
+ // happen is for the fscrypt driving to be moved into netfslib and the
+ // data in the cache also to be stored encrypted.
+ len = subreq->len;
+ ceph_fscrypt_adjust_off_and_len(inode, &off, &len);
+
+ req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino,
+ off, &len, 0, 1, sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ,
+ CEPH_OSD_FLAG_READ, NULL, ci->i_truncate_seq,
+ ci->i_truncate_size, false);
if (IS_ERR(req)) {
err = PTR_ERR(req);
req = NULL;
goto out;
}
- dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
- iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len);
- err = iov_iter_get_pages_alloc(&iter, &pages, len, &page_off);
- if (err < 0) {
- dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err);
- goto out;
+ if (sparse) {
+ extent_cnt = __ceph_sparse_read_ext_count(inode, len);
+ err = ceph_alloc_sparse_ext_map(&req->r_ops[0], extent_cnt);
+ if (err)
+ goto out;
}
- /* should always give us a page-aligned read */
- WARN_ON_ONCE(page_off);
- len = err;
+ doutc(cl, "%llx.%llx pos=%llu orig_len=%zu len=%llu\n",
+ ceph_vinop(inode), subreq->start, subreq->len, len);
+
+ /*
+ * FIXME: For now, use CEPH_OSD_DATA_TYPE_PAGES instead of _ITER for
+ * encrypted inodes. We'd need infrastructure that handles an iov_iter
+ * instead of page arrays, and we don't have that as of yet. Once the
+ * dust settles on the write helpers and encrypt/decrypt routines for
+ * netfs, we should be able to rework this.
+ */
+ if (IS_ENCRYPTED(inode)) {
+ struct page **pages;
+ size_t page_off;
+
+ /*
+ * FIXME: io_iter.count needs to be corrected to aligned
+ * length. Otherwise, iov_iter_get_pages_alloc2() operates
+ * with the initial unaligned length value. As a result,
+ * ceph_msg_data_cursor_init() triggers BUG_ON() in the case
+ * if msg->sparse_read_total > msg->data_length.
+ */
+ subreq->io_iter.count = len;
- osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
+ err = iov_iter_get_pages_alloc2(&subreq->io_iter, &pages, len, &page_off);
+ if (err < 0) {
+ doutc(cl, "%llx.%llx failed to allocate pages, %d\n",
+ ceph_vinop(inode), err);
+ goto out;
+ }
+
+ /* should always give us a page-aligned read */
+ WARN_ON_ONCE(page_off);
+ len = err;
+ err = 0;
+
+ osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false,
+ false);
+ } else {
+ osd_req_op_extent_osd_iter(req, 0, &subreq->io_iter);
+ }
+ if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
+ err = -EIO;
+ goto out;
+ }
req->r_callback = finish_netfs_read;
req->r_priv = subreq;
req->r_inode = inode;
ihold(inode);
- err = ceph_osdc_start_request(req->r_osdc, req, false);
- if (err)
- iput(inode);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ ceph_osdc_start_request(req->r_osdc, req);
out:
ceph_osdc_put_request(req);
- if (err)
- netfs_subreq_terminated(subreq, err, false);
- dout("%s: result %d\n", __func__, err);
+ if (err) {
+ subreq->error = err;
+ netfs_read_subreq_terminated(subreq);
+ }
+ doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err);
}
-static void ceph_init_rreq(struct netfs_read_request *rreq, struct file *file)
+static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
{
+ struct inode *inode = rreq->inode;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+ int got = 0, want = CEPH_CAP_FILE_CACHE;
+ struct ceph_netfs_request_data *priv;
+ int ret = 0;
+
+ /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
+ __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
+
+ if (rreq->origin != NETFS_READAHEAD)
+ return 0;
+
+ priv = kzalloc(sizeof(*priv), GFP_NOFS);
+ if (!priv)
+ return -ENOMEM;
+
+ if (file) {
+ struct ceph_rw_context *rw_ctx;
+ struct ceph_file_info *fi = file->private_data;
+
+ priv->file_ra_pages = file->f_ra.ra_pages;
+ priv->file_ra_disabled = file->f_mode & FMODE_RANDOM;
+
+ rw_ctx = ceph_find_rw_context(fi);
+ if (rw_ctx) {
+ rreq->netfs_priv = priv;
+ return 0;
+ }
+ }
+
+ /*
+ * readahead callers do not necessarily hold Fcb caps
+ * (e.g. fadvise, madvise).
+ */
+ ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
+ if (ret < 0) {
+ doutc(cl, "%llx.%llx, error getting cap\n", ceph_vinop(inode));
+ goto out;
+ }
+
+ if (!(got & want)) {
+ doutc(cl, "%llx.%llx, no cache cap\n", ceph_vinop(inode));
+ ret = -EACCES;
+ goto out;
+ }
+ if (ret == 0) {
+ ret = -EACCES;
+ goto out;
+ }
+
+ priv->caps = got;
+ rreq->netfs_priv = priv;
+ rreq->io_streams[0].sreq_max_len = fsc->mount_options->rsize;
+
+out:
+ if (ret < 0) {
+ if (got)
+ ceph_put_cap_refs(ceph_inode(inode), got);
+ kfree(priv);
+ }
+
+ return ret;
}
-static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
+static void ceph_netfs_free_request(struct netfs_io_request *rreq)
{
- struct inode *inode = mapping->host;
- struct ceph_inode_info *ci = ceph_inode(inode);
- int got = (uintptr_t)priv;
+ struct ceph_netfs_request_data *priv = rreq->netfs_priv;
- if (got)
- ceph_put_cap_refs(ci, got);
+ if (!priv)
+ return;
+
+ if (priv->caps)
+ ceph_put_cap_refs(ceph_inode(rreq->inode), priv->caps);
+ kfree(priv);
+ rreq->netfs_priv = NULL;
}
-static const struct netfs_read_request_ops ceph_netfs_read_ops = {
- .init_rreq = ceph_init_rreq,
- .is_cache_enabled = ceph_is_cache_enabled,
- .begin_cache_operation = ceph_begin_cache_operation,
- .issue_op = ceph_netfs_issue_op,
+const struct netfs_request_ops ceph_netfs_ops = {
+ .init_request = ceph_init_request,
+ .free_request = ceph_netfs_free_request,
+ .prepare_read = ceph_netfs_prepare_read,
+ .issue_read = ceph_netfs_issue_read,
.expand_readahead = ceph_netfs_expand_readahead,
- .clamp_length = ceph_netfs_clamp_length,
.check_write_begin = ceph_netfs_check_write_begin,
- .cleanup = ceph_readahead_cleanup,
};
-/* read a single page, without unlocking it. */
-static int ceph_readpage(struct file *file, struct page *subpage)
+#ifdef CONFIG_CEPH_FSCACHE
+static void ceph_set_page_fscache(struct page *page)
{
- struct folio *folio = page_folio(subpage);
- struct inode *inode = file_inode(file);
- struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_vino vino = ceph_vino(inode);
- size_t len = folio_size(folio);
- u64 off = folio_file_pos(folio);
-
- if (ci->i_inline_version != CEPH_INLINE_NONE) {
- /*
- * Uptodate inline data should have been added
- * into page cache while getting Fcr caps.
- */
- if (off == 0) {
- folio_unlock(folio);
- return -EINVAL;
- }
- zero_user_segment(&folio->page, 0, folio_size(folio));
- folio_mark_uptodate(folio);
- folio_unlock(folio);
- return 0;
- }
-
- dout("readpage ino %llx.%llx file %p off %llu len %zu folio %p index %lu\n",
- vino.ino, vino.snap, file, off, len, folio, folio_index(folio));
-
- return netfs_readpage(file, folio, &ceph_netfs_read_ops, NULL);
+ folio_start_private_2(page_folio(page)); /* [DEPRECATED] */
}
-static void ceph_readahead(struct readahead_control *ractl)
+static void ceph_fscache_write_terminated(void *priv, ssize_t error)
{
- struct inode *inode = file_inode(ractl->file);
- struct ceph_file_info *fi = ractl->file->private_data;
- struct ceph_rw_context *rw_ctx;
- int got = 0;
- int ret = 0;
+ struct inode *inode = priv;
- if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
- return;
+ if (IS_ERR_VALUE(error) && error != -ENOBUFS)
+ ceph_fscache_invalidate(inode, false);
+}
- rw_ctx = ceph_find_rw_context(fi);
- if (!rw_ctx) {
- /*
- * readahead callers do not necessarily hold Fcb caps
- * (e.g. fadvise, madvise).
- */
- int want = CEPH_CAP_FILE_CACHE;
+static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
- ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
- if (ret < 0)
- dout("start_read %p, error getting cap\n", inode);
- else if (!(got & want))
- dout("start_read %p, no cache cap\n", inode);
+ fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode),
+ ceph_fscache_write_terminated, inode, true, caching);
+}
+#else
+static inline void ceph_set_page_fscache(struct page *page)
+{
+}
- if (ret <= 0)
- return;
- }
- netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got);
+static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
+{
}
+#endif /* CONFIG_CEPH_FSCACHE */
struct ceph_writeback_ctl
{
@@ -384,7 +582,36 @@ struct ceph_writeback_ctl
u64 truncate_size;
u32 truncate_seq;
bool size_stable;
+
bool head_snapc;
+ struct ceph_snap_context *snapc;
+ struct ceph_snap_context *last_snapc;
+
+ bool done;
+ bool should_loop;
+ bool range_whole;
+ pgoff_t start_index;
+ pgoff_t index;
+ pgoff_t end;
+ xa_mark_t tag;
+
+ pgoff_t strip_unit_end;
+ unsigned int wsize;
+ unsigned int nr_folios;
+ unsigned int max_pages;
+ unsigned int locked_pages;
+
+ int op_idx;
+ int num_ops;
+ u64 offset;
+ u64 len;
+
+ struct folio_batch fbatch;
+ unsigned int processed_in_fbatch;
+
+ bool from_pool;
+ struct page **pages;
+ struct page **data_pages;
};
/*
@@ -396,13 +623,14 @@ get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
struct ceph_snap_context *page_snapc)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_snap_context *snapc = NULL;
struct ceph_cap_snap *capsnap = NULL;
spin_lock(&ci->i_ceph_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
- dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
- capsnap->context, capsnap->dirty_pages);
+ doutc(cl, " capsnap %p snapc %p has %d dirty pages\n",
+ capsnap, capsnap->context, capsnap->dirty_pages);
if (!capsnap->dirty_pages)
continue;
@@ -434,8 +662,8 @@ get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
}
if (!snapc && ci->i_wrbuffer_ref_head) {
snapc = ceph_get_snap_context(ci->i_head_snapc);
- dout(" head snapc %p has %d dirty pages\n",
- snapc, ci->i_wrbuffer_ref_head);
+ doutc(cl, " head snapc %p has %d dirty pages\n", snapc,
+ ci->i_wrbuffer_ref_head);
if (ctl) {
ctl->i_size = i_size_read(inode);
ctl->truncate_size = ci->i_truncate_size;
@@ -452,10 +680,12 @@ static u64 get_writepages_data_length(struct inode *inode,
struct page *page, u64 start)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_snap_context *snapc = page_snap_context(page);
+ struct ceph_snap_context *snapc;
struct ceph_cap_snap *capsnap = NULL;
u64 end = i_size_read(inode);
+ u64 ret;
+ snapc = page_snap_context(ceph_fscrypt_pagecache_page(page));
if (snapc != ci->i_head_snapc) {
bool found = false;
spin_lock(&ci->i_ceph_lock);
@@ -470,91 +700,130 @@ static u64 get_writepages_data_length(struct inode *inode,
spin_unlock(&ci->i_ceph_lock);
WARN_ON(!found);
}
- if (end > page_offset(page) + thp_size(page))
- end = page_offset(page) + thp_size(page);
- return end > start ? end - start : 0;
+ if (end > ceph_fscrypt_page_offset(page) + thp_size(page))
+ end = ceph_fscrypt_page_offset(page) + thp_size(page);
+ ret = end > start ? end - start : 0;
+ if (ret && fscrypt_is_bounce_page(page))
+ ret = round_up(ret, CEPH_FSCRYPT_BLOCK_SIZE);
+ return ret;
}
/*
- * Write a single page, but leave the page locked.
+ * Write a folio, but leave it locked.
*
* If we get a write error, mark the mapping for error, but still adjust the
- * dirty page accounting (i.e., page is no longer dirty).
+ * dirty page accounting (i.e., folio is no longer dirty).
*/
-static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
+static int write_folio_nounlock(struct folio *folio,
+ struct writeback_control *wbc)
{
- struct inode *inode = page->mapping->host;
+ struct page *page = &folio->page;
+ struct inode *inode = folio->mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_snap_context *snapc, *oldest;
- loff_t page_off = page_offset(page);
+ loff_t page_off = folio_pos(folio);
int err;
- loff_t len = thp_size(page);
+ loff_t len = folio_size(folio);
+ loff_t wlen;
struct ceph_writeback_ctl ceph_wbc;
struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_osd_request *req;
+ bool caching = ceph_is_cache_enabled(inode);
+ struct page *bounce_page = NULL;
- dout("writepage %p idx %lu\n", page, page->index);
+ doutc(cl, "%llx.%llx folio %p idx %lu\n", ceph_vinop(inode), folio,
+ folio->index);
+
+ if (ceph_inode_is_shutdown(inode))
+ return -EIO;
/* verify this is a writeable snap context */
- snapc = page_snap_context(page);
+ snapc = page_snap_context(&folio->page);
if (!snapc) {
- dout("writepage %p page %p not dirty?\n", inode, page);
+ doutc(cl, "%llx.%llx folio %p not dirty?\n", ceph_vinop(inode),
+ folio);
return 0;
}
oldest = get_oldest_context(inode, &ceph_wbc, snapc);
if (snapc->seq > oldest->seq) {
- dout("writepage %p page %p snapc %p not writeable - noop\n",
- inode, page, snapc);
+ doutc(cl, "%llx.%llx folio %p snapc %p not writeable - noop\n",
+ ceph_vinop(inode), folio, snapc);
/* we should only noop if called by kswapd */
WARN_ON(!(current->flags & PF_MEMALLOC));
ceph_put_snap_context(oldest);
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
return 0;
}
ceph_put_snap_context(oldest);
/* is this a partial page at end of file? */
if (page_off >= ceph_wbc.i_size) {
- dout("%p page eof %llu\n", page, ceph_wbc.i_size);
- page->mapping->a_ops->invalidatepage(page, 0, thp_size(page));
+ doutc(cl, "%llx.%llx folio at %lu beyond eof %llu\n",
+ ceph_vinop(inode), folio->index, ceph_wbc.i_size);
+ folio_invalidate(folio, 0, folio_size(folio));
return 0;
}
if (ceph_wbc.i_size < page_off + len)
len = ceph_wbc.i_size - page_off;
- dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
- inode, page, page->index, page_off, len, snapc, snapc->seq);
+ wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
+ doutc(cl, "%llx.%llx folio %p index %lu on %llu~%llu snapc %p seq %lld\n",
+ ceph_vinop(inode), folio, folio->index, page_off, wlen, snapc,
+ snapc->seq);
if (atomic_long_inc_return(&fsc->writeback_count) >
CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
- set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
+ fsc->write_congested = true;
- set_page_writeback(page);
- req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1,
- CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc,
- ceph_wbc.truncate_seq, ceph_wbc.truncate_size,
- true);
+ req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode),
+ page_off, &wlen, 0, 1, CEPH_OSD_OP_WRITE,
+ CEPH_OSD_FLAG_WRITE, snapc,
+ ceph_wbc.truncate_seq,
+ ceph_wbc.truncate_size, true);
if (IS_ERR(req)) {
- redirty_page_for_writepage(wbc, page);
- end_page_writeback(page);
+ folio_redirty_for_writepage(wbc, folio);
return PTR_ERR(req);
}
- /* it may be a short write due to an object boundary */
- WARN_ON_ONCE(len > thp_size(page));
- osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false);
- dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len);
+ if (wlen < len)
+ len = wlen;
+
+ folio_start_writeback(folio);
+ if (caching)
+ ceph_set_page_fscache(&folio->page);
+ ceph_fscache_write_to_cache(inode, page_off, len, caching);
+
+ if (IS_ENCRYPTED(inode)) {
+ bounce_page = fscrypt_encrypt_pagecache_blocks(folio,
+ CEPH_FSCRYPT_BLOCK_SIZE, 0,
+ GFP_NOFS);
+ if (IS_ERR(bounce_page)) {
+ folio_redirty_for_writepage(wbc, folio);
+ folio_end_writeback(folio);
+ ceph_osdc_put_request(req);
+ return PTR_ERR(bounce_page);
+ }
+ }
- req->r_mtime = inode->i_mtime;
- err = ceph_osdc_start_request(osdc, req, true);
- if (!err)
- err = ceph_osdc_wait_request(osdc, req);
+ /* it may be a short write due to an object boundary */
+ WARN_ON_ONCE(len > folio_size(folio));
+ osd_req_op_extent_osd_data_pages(req, 0,
+ bounce_page ? &bounce_page : &page, wlen, 0,
+ false, false);
+ doutc(cl, "%llx.%llx %llu~%llu (%llu bytes, %sencrypted)\n",
+ ceph_vinop(inode), page_off, len, wlen,
+ IS_ENCRYPTED(inode) ? "" : "not ");
+
+ req->r_mtime = inode_get_mtime(inode);
+ ceph_osdc_start_request(osdc, req);
+ err = ceph_osdc_wait_request(osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, len, err);
-
+ fscrypt_free_bounce_page(bounce_page);
ceph_osdc_put_request(req);
if (err == 0)
err = len;
@@ -565,48 +834,33 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
wbc = &tmp_wbc;
if (err == -ERESTARTSYS) {
/* killed by SIGKILL */
- dout("writepage interrupted page %p\n", page);
- redirty_page_for_writepage(wbc, page);
- end_page_writeback(page);
+ doutc(cl, "%llx.%llx interrupted page %p\n",
+ ceph_vinop(inode), folio);
+ folio_redirty_for_writepage(wbc, folio);
+ folio_end_writeback(folio);
return err;
}
if (err == -EBLOCKLISTED)
fsc->blocklisted = true;
- dout("writepage setting page/mapping error %d %p\n",
- err, page);
+ doutc(cl, "%llx.%llx setting mapping error %d %p\n",
+ ceph_vinop(inode), err, folio);
mapping_set_error(&inode->i_data, err);
wbc->pages_skipped++;
} else {
- dout("writepage cleaned page %p\n", page);
+ doutc(cl, "%llx.%llx cleaned page %p\n",
+ ceph_vinop(inode), folio);
err = 0; /* vfs expects us to return 0 */
}
- oldest = detach_page_private(page);
+ oldest = folio_detach_private(folio);
WARN_ON_ONCE(oldest != snapc);
- end_page_writeback(page);
+ folio_end_writeback(folio);
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
ceph_put_snap_context(snapc); /* page's reference */
if (atomic_long_dec_return(&fsc->writeback_count) <
CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
- clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
-
- return err;
-}
+ fsc->write_congested = false;
-static int ceph_writepage(struct page *page, struct writeback_control *wbc)
-{
- int err;
- struct inode *inode = page->mapping->host;
- BUG_ON(!inode);
- ihold(inode);
- err = writepage_nounlock(page, wbc);
- if (err == -ERESTARTSYS) {
- /* direct memory reclaimer was killed by SIGKILL. return 0
- * to prevent caller from setting mapping/page error */
- err = 0;
- }
- unlock_page(page);
- iput(inode);
return err;
}
@@ -620,6 +874,7 @@ static void writepages_finish(struct ceph_osd_request *req)
{
struct inode *inode = req->r_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_osd_data *osd_data;
struct page *page;
int num_pages, total_pages = 0;
@@ -627,11 +882,12 @@ static void writepages_finish(struct ceph_osd_request *req)
int rc = req->r_result;
struct ceph_snap_context *snapc = req->r_snapc;
struct address_space *mapping = inode->i_mapping;
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
unsigned int len = 0;
bool remove_page;
- dout("writepages_finish %p rc %d\n", inode, rc);
+ doutc(cl, "%llx.%llx rc %d\n", ceph_vinop(inode), rc);
if (rc < 0) {
mapping_set_error(mapping, rc);
ceph_set_error_write(ci);
@@ -652,8 +908,13 @@ static void writepages_finish(struct ceph_osd_request *req)
/* clean all pages */
for (i = 0; i < req->r_num_ops; i++) {
- if (req->r_ops[i].op != CEPH_OSD_OP_WRITE)
+ if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
+ pr_warn_client(cl,
+ "%llx.%llx incorrect op %d req %p index %d tid %llu\n",
+ ceph_vinop(inode), req->r_ops[i].op, req, i,
+ req->r_tid);
break;
+ }
osd_data = osd_req_op_extent_osd_data(req, i);
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
@@ -663,27 +924,38 @@ static void writepages_finish(struct ceph_osd_request *req)
total_pages += num_pages;
for (j = 0; j < num_pages; j++) {
page = osd_data->pages[j];
+ if (fscrypt_is_bounce_page(page)) {
+ page = fscrypt_pagecache_page(page);
+ fscrypt_free_bounce_page(osd_data->pages[j]);
+ osd_data->pages[j] = page;
+ }
BUG_ON(!page);
WARN_ON(!PageUptodate(page));
if (atomic_long_dec_return(&fsc->writeback_count) <
CONGESTION_OFF_THRESH(
fsc->mount_options->congestion_kb))
- clear_bdi_congested(inode_to_bdi(inode),
- BLK_RW_ASYNC);
+ fsc->write_congested = false;
ceph_put_snap_context(detach_page_private(page));
end_page_writeback(page);
- dout("unlocking %p\n", page);
+
+ if (atomic64_dec_return(&mdsc->dirty_folios) <= 0) {
+ wake_up_all(&mdsc->flush_end_wq);
+ WARN_ON(atomic64_read(&mdsc->dirty_folios) < 0);
+ }
+
+ doutc(cl, "unlocking %p\n", page);
if (remove_page)
- generic_error_remove_page(inode->i_mapping,
- page);
+ generic_error_remove_folio(inode->i_mapping,
+ page_folio(page));
unlock_page(page);
}
- dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
- inode, osd_data->length, rc >= 0 ? num_pages : 0);
+ doutc(cl, "%llx.%llx wrote %llu bytes cleaned %d pages\n",
+ ceph_vinop(inode), osd_data->length,
+ rc >= 0 ? num_pages : 0);
release_pages(osd_data->pages, num_pages);
}
@@ -699,365 +971,748 @@ static void writepages_finish(struct ceph_osd_request *req)
else
kfree(osd_data->pages);
ceph_osdc_put_request(req);
+ ceph_dec_osd_stopping_blocker(fsc->mdsc);
}
-/*
- * initiate async writeback
- */
-static int ceph_writepages_start(struct address_space *mapping,
- struct writeback_control *wbc)
+static inline
+bool is_forced_umount(struct address_space *mapping)
{
struct inode *inode = mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_vino vino = ceph_vino(inode);
- pgoff_t index, start_index, end = -1;
- struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
- struct pagevec pvec;
- int rc = 0;
- unsigned int wsize = i_blocksize(inode);
- struct ceph_osd_request *req = NULL;
- struct ceph_writeback_ctl ceph_wbc;
- bool should_loop, range_whole = false;
- bool done = false;
-
- dout("writepages_start %p (mode=%s)\n", inode,
- wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
- (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
- if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
+ if (ceph_inode_is_shutdown(inode)) {
if (ci->i_wrbuffer_ref > 0) {
- pr_warn_ratelimited(
- "writepage_start %p %lld forced umount\n",
- inode, ceph_ino(inode));
+ pr_warn_ratelimited_client(cl,
+ "%llx.%llx %lld forced umount\n",
+ ceph_vinop(inode), ceph_ino(inode));
}
mapping_set_error(mapping, -EIO);
- return -EIO; /* we're in a forced umount, don't write! */
+ return true;
}
+
+ return false;
+}
+
+static inline
+unsigned int ceph_define_write_size(struct address_space *mapping)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ unsigned int wsize = i_blocksize(inode);
+
if (fsc->mount_options->wsize < wsize)
wsize = fsc->mount_options->wsize;
- pagevec_init(&pvec);
+ return wsize;
+}
- start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
- index = start_index;
+static inline
+void ceph_folio_batch_init(struct ceph_writeback_ctl *ceph_wbc)
+{
+ folio_batch_init(&ceph_wbc->fbatch);
+ ceph_wbc->processed_in_fbatch = 0;
+}
+
+static inline
+void ceph_folio_batch_reinit(struct ceph_writeback_ctl *ceph_wbc)
+{
+ folio_batch_release(&ceph_wbc->fbatch);
+ ceph_folio_batch_init(ceph_wbc);
+}
+
+static inline
+void ceph_init_writeback_ctl(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ ceph_wbc->snapc = NULL;
+ ceph_wbc->last_snapc = NULL;
+
+ ceph_wbc->strip_unit_end = 0;
+ ceph_wbc->wsize = ceph_define_write_size(mapping);
+
+ ceph_wbc->nr_folios = 0;
+ ceph_wbc->max_pages = 0;
+ ceph_wbc->locked_pages = 0;
+
+ ceph_wbc->done = false;
+ ceph_wbc->should_loop = false;
+ ceph_wbc->range_whole = false;
+
+ ceph_wbc->start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
+ ceph_wbc->index = ceph_wbc->start_index;
+ ceph_wbc->end = -1;
+
+ ceph_wbc->tag = wbc_to_tag(wbc);
+
+ ceph_wbc->op_idx = -1;
+ ceph_wbc->num_ops = 0;
+ ceph_wbc->offset = 0;
+ ceph_wbc->len = 0;
+ ceph_wbc->from_pool = false;
+
+ ceph_folio_batch_init(ceph_wbc);
+
+ ceph_wbc->pages = NULL;
+ ceph_wbc->data_pages = NULL;
+}
+
+static inline
+int ceph_define_writeback_range(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
-retry:
/* find oldest snap context with dirty data */
- snapc = get_oldest_context(inode, &ceph_wbc, NULL);
- if (!snapc) {
+ ceph_wbc->snapc = get_oldest_context(inode, ceph_wbc, NULL);
+ if (!ceph_wbc->snapc) {
/* hmm, why does writepages get called when there
is no dirty data? */
- dout(" no snap context with dirty data?\n");
- goto out;
+ doutc(cl, " no snap context with dirty data?\n");
+ return -ENODATA;
}
- dout(" oldest snapc is %p seq %lld (%d snaps)\n",
- snapc, snapc->seq, snapc->num_snaps);
- should_loop = false;
- if (ceph_wbc.head_snapc && snapc != last_snapc) {
+ doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n",
+ ceph_wbc->snapc, ceph_wbc->snapc->seq,
+ ceph_wbc->snapc->num_snaps);
+
+ ceph_wbc->should_loop = false;
+
+ if (ceph_wbc->head_snapc && ceph_wbc->snapc != ceph_wbc->last_snapc) {
/* where to start/end? */
if (wbc->range_cyclic) {
- index = start_index;
- end = -1;
- if (index > 0)
- should_loop = true;
- dout(" cyclic, start at %lu\n", index);
+ ceph_wbc->index = ceph_wbc->start_index;
+ ceph_wbc->end = -1;
+ if (ceph_wbc->index > 0)
+ ceph_wbc->should_loop = true;
+ doutc(cl, " cyclic, start at %lu\n", ceph_wbc->index);
} else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
+ ceph_wbc->index = wbc->range_start >> PAGE_SHIFT;
+ ceph_wbc->end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = true;
- dout(" not cyclic, %lu to %lu\n", index, end);
+ ceph_wbc->range_whole = true;
+ doutc(cl, " not cyclic, %lu to %lu\n",
+ ceph_wbc->index, ceph_wbc->end);
}
- } else if (!ceph_wbc.head_snapc) {
+ } else if (!ceph_wbc->head_snapc) {
/* Do not respect wbc->range_{start,end}. Dirty pages
* in that range can be associated with newer snapc.
* They are not writeable until we write all dirty pages
* associated with 'snapc' get written */
- if (index > 0)
- should_loop = true;
- dout(" non-head snapc, range whole\n");
+ if (ceph_wbc->index > 0)
+ ceph_wbc->should_loop = true;
+ doutc(cl, " non-head snapc, range whole\n");
}
- ceph_put_snap_context(last_snapc);
- last_snapc = snapc;
+ ceph_put_snap_context(ceph_wbc->last_snapc);
+ ceph_wbc->last_snapc = ceph_wbc->snapc;
- while (!done && index <= end) {
- int num_ops = 0, op_idx;
- unsigned i, pvec_pages, max_pages, locked_pages = 0;
- struct page **pages = NULL, **data_pages;
- struct page *page;
- pgoff_t strip_unit_end = 0;
- u64 offset = 0, len = 0;
- bool from_pool = false;
+ return 0;
+}
- max_pages = wsize >> PAGE_SHIFT;
+static inline
+bool has_writeback_done(struct ceph_writeback_ctl *ceph_wbc)
+{
+ return ceph_wbc->done && ceph_wbc->index > ceph_wbc->end;
+}
-get_more_pages:
- pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
- end, PAGECACHE_TAG_DIRTY);
- dout("pagevec_lookup_range_tag got %d\n", pvec_pages);
- if (!pvec_pages && !locked_pages)
- break;
- for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
- page = pvec.pages[i];
- dout("? %p idx %lu\n", page, page->index);
- if (locked_pages == 0)
- lock_page(page); /* first page */
- else if (!trylock_page(page))
- break;
+static inline
+bool can_next_page_be_processed(struct ceph_writeback_ctl *ceph_wbc,
+ unsigned index)
+{
+ return index < ceph_wbc->nr_folios &&
+ ceph_wbc->locked_pages < ceph_wbc->max_pages;
+}
- /* only dirty pages, or our accounting breaks */
- if (unlikely(!PageDirty(page)) ||
- unlikely(page->mapping != mapping)) {
- dout("!dirty or !mapping %p\n", page);
- unlock_page(page);
- continue;
- }
- /* only if matching snap context */
- pgsnapc = page_snap_context(page);
- if (pgsnapc != snapc) {
- dout("page snapc %p %lld != oldest %p %lld\n",
- pgsnapc, pgsnapc->seq, snapc, snapc->seq);
- if (!should_loop &&
- !ceph_wbc.head_snapc &&
- wbc->sync_mode != WB_SYNC_NONE)
- should_loop = true;
- unlock_page(page);
- continue;
- }
- if (page_offset(page) >= ceph_wbc.i_size) {
- dout("%p page eof %llu\n",
- page, ceph_wbc.i_size);
- if ((ceph_wbc.size_stable ||
- page_offset(page) >= i_size_read(inode)) &&
- clear_page_dirty_for_io(page))
- mapping->a_ops->invalidatepage(page,
- 0, thp_size(page));
- unlock_page(page);
- continue;
- }
- if (strip_unit_end && (page->index > strip_unit_end)) {
- dout("end of strip unit %p\n", page);
- unlock_page(page);
- break;
- }
- if (PageWriteback(page)) {
- if (wbc->sync_mode == WB_SYNC_NONE) {
- dout("%p under writeback\n", page);
- unlock_page(page);
- continue;
- }
- dout("waiting on writeback %p\n", page);
- wait_on_page_writeback(page);
- }
+static
+int ceph_check_page_before_write(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc,
+ struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_snap_context *pgsnapc;
+
+ /* only dirty folios, or our accounting breaks */
+ if (unlikely(!folio_test_dirty(folio) || folio->mapping != mapping)) {
+ doutc(cl, "!dirty or !mapping %p\n", folio);
+ return -ENODATA;
+ }
- if (!clear_page_dirty_for_io(page)) {
- dout("%p !clear_page_dirty_for_io\n", page);
- unlock_page(page);
- continue;
- }
+ /* only if matching snap context */
+ pgsnapc = page_snap_context(&folio->page);
+ if (pgsnapc != ceph_wbc->snapc) {
+ doutc(cl, "folio snapc %p %lld != oldest %p %lld\n",
+ pgsnapc, pgsnapc->seq,
+ ceph_wbc->snapc, ceph_wbc->snapc->seq);
- /*
- * We have something to write. If this is
- * the first locked page this time through,
- * calculate max possinle write size and
- * allocate a page array
- */
- if (locked_pages == 0) {
- u64 objnum;
- u64 objoff;
- u32 xlen;
-
- /* prepare async write request */
- offset = (u64)page_offset(page);
- ceph_calc_file_object_mapping(&ci->i_layout,
- offset, wsize,
- &objnum, &objoff,
- &xlen);
- len = xlen;
-
- num_ops = 1;
- strip_unit_end = page->index +
- ((len - 1) >> PAGE_SHIFT);
-
- BUG_ON(pages);
- max_pages = calc_pages_for(0, (u64)len);
- pages = kmalloc_array(max_pages,
- sizeof(*pages),
- GFP_NOFS);
- if (!pages) {
- from_pool = true;
- pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
- BUG_ON(!pages);
- }
-
- len = 0;
- } else if (page->index !=
- (offset + len) >> PAGE_SHIFT) {
- if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS :
- CEPH_OSD_MAX_OPS)) {
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- break;
- }
-
- num_ops++;
- offset = (u64)page_offset(page);
- len = 0;
- }
+ if (!ceph_wbc->should_loop && !ceph_wbc->head_snapc &&
+ wbc->sync_mode != WB_SYNC_NONE)
+ ceph_wbc->should_loop = true;
+
+ return -ENODATA;
+ }
+
+ if (folio_pos(folio) >= ceph_wbc->i_size) {
+ doutc(cl, "folio at %lu beyond eof %llu\n",
+ folio->index, ceph_wbc->i_size);
+
+ if ((ceph_wbc->size_stable ||
+ folio_pos(folio) >= i_size_read(inode)) &&
+ folio_clear_dirty_for_io(folio))
+ folio_invalidate(folio, 0, folio_size(folio));
+
+ return -ENODATA;
+ }
+
+ if (ceph_wbc->strip_unit_end &&
+ (folio->index > ceph_wbc->strip_unit_end)) {
+ doutc(cl, "end of strip unit %p\n", folio);
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static inline
+void __ceph_allocate_page_array(struct ceph_writeback_ctl *ceph_wbc,
+ unsigned int max_pages)
+{
+ ceph_wbc->pages = kmalloc_array(max_pages,
+ sizeof(*ceph_wbc->pages),
+ GFP_NOFS);
+ if (!ceph_wbc->pages) {
+ ceph_wbc->from_pool = true;
+ ceph_wbc->pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
+ BUG_ON(!ceph_wbc->pages);
+ }
+}
- /* note position of first page in pvec */
- dout("%p will write page %p idx %lu\n",
- inode, page, page->index);
+static inline
+void ceph_allocate_page_array(struct address_space *mapping,
+ struct ceph_writeback_ctl *ceph_wbc,
+ struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ u64 objnum;
+ u64 objoff;
+ u32 xlen;
+
+ /* prepare async write request */
+ ceph_wbc->offset = (u64)folio_pos(folio);
+ ceph_calc_file_object_mapping(&ci->i_layout,
+ ceph_wbc->offset, ceph_wbc->wsize,
+ &objnum, &objoff, &xlen);
+
+ ceph_wbc->num_ops = 1;
+ ceph_wbc->strip_unit_end = folio->index + ((xlen - 1) >> PAGE_SHIFT);
+
+ BUG_ON(ceph_wbc->pages);
+ ceph_wbc->max_pages = calc_pages_for(0, (u64)xlen);
+ __ceph_allocate_page_array(ceph_wbc, ceph_wbc->max_pages);
+
+ ceph_wbc->len = 0;
+}
+
+static inline
+bool is_folio_index_contiguous(const struct ceph_writeback_ctl *ceph_wbc,
+ const struct folio *folio)
+{
+ return folio->index == (ceph_wbc->offset + ceph_wbc->len) >> PAGE_SHIFT;
+}
+
+static inline
+bool is_num_ops_too_big(struct ceph_writeback_ctl *ceph_wbc)
+{
+ return ceph_wbc->num_ops >=
+ (ceph_wbc->from_pool ? CEPH_OSD_SLAB_OPS : CEPH_OSD_MAX_OPS);
+}
+
+static inline
+bool is_write_congestion_happened(struct ceph_fs_client *fsc)
+{
+ return atomic_long_inc_return(&fsc->writeback_count) >
+ CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb);
+}
- if (atomic_long_inc_return(&fsc->writeback_count) >
- CONGESTION_ON_THRESH(
- fsc->mount_options->congestion_kb)) {
- set_bdi_congested(inode_to_bdi(inode),
- BLK_RW_ASYNC);
+static inline int move_dirty_folio_in_page_array(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc, struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct page **pages = ceph_wbc->pages;
+ unsigned int index = ceph_wbc->locked_pages;
+ gfp_t gfp_flags = ceph_wbc->locked_pages ? GFP_NOWAIT : GFP_NOFS;
+
+ if (IS_ENCRYPTED(inode)) {
+ pages[index] = fscrypt_encrypt_pagecache_blocks(folio,
+ PAGE_SIZE,
+ 0,
+ gfp_flags);
+ if (IS_ERR(pages[index])) {
+ int err = PTR_ERR(pages[index]);
+
+ if (err == -EINVAL) {
+ pr_err_client(cl, "inode->i_blkbits=%hhu\n",
+ inode->i_blkbits);
}
+ /* better not fail on first page! */
+ BUG_ON(ceph_wbc->locked_pages == 0);
+
+ pages[index] = NULL;
+ return err;
+ }
+ } else {
+ pages[index] = &folio->page;
+ }
+
+ ceph_wbc->locked_pages++;
+
+ return 0;
+}
- pages[locked_pages++] = page;
- pvec.pages[i] = NULL;
+static
+int ceph_process_folio_batch(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct folio *folio = NULL;
+ unsigned i;
+ int rc = 0;
+
+ for (i = 0; can_next_page_be_processed(ceph_wbc, i); i++) {
+ folio = ceph_wbc->fbatch.folios[i];
- len += thp_size(page);
+ if (!folio)
+ continue;
+
+ doutc(cl, "? %p idx %lu, folio_test_writeback %#x, "
+ "folio_test_dirty %#x, folio_test_locked %#x\n",
+ folio, folio->index, folio_test_writeback(folio),
+ folio_test_dirty(folio),
+ folio_test_locked(folio));
+
+ if (folio_test_writeback(folio) ||
+ folio_test_private_2(folio) /* [DEPRECATED] */) {
+ doutc(cl, "waiting on writeback %p\n", folio);
+ folio_wait_writeback(folio);
+ folio_wait_private_2(folio); /* [DEPRECATED] */
+ continue;
}
- /* did we get anything? */
- if (!locked_pages)
- goto release_pvec_pages;
- if (i) {
- unsigned j, n = 0;
- /* shift unused page to beginning of pvec */
- for (j = 0; j < pvec_pages; j++) {
- if (!pvec.pages[j])
- continue;
- if (n < j)
- pvec.pages[n] = pvec.pages[j];
- n++;
- }
- pvec.nr = n;
+ if (ceph_wbc->locked_pages == 0)
+ folio_lock(folio);
+ else if (!folio_trylock(folio))
+ break;
- if (pvec_pages && i == pvec_pages &&
- locked_pages < max_pages) {
- dout("reached end pvec, trying for more\n");
- pagevec_release(&pvec);
- goto get_more_pages;
+ rc = ceph_check_page_before_write(mapping, wbc,
+ ceph_wbc, folio);
+ if (rc == -ENODATA) {
+ rc = 0;
+ folio_unlock(folio);
+ ceph_wbc->fbatch.folios[i] = NULL;
+ continue;
+ } else if (rc == -E2BIG) {
+ rc = 0;
+ folio_unlock(folio);
+ ceph_wbc->fbatch.folios[i] = NULL;
+ break;
+ }
+
+ if (!folio_clear_dirty_for_io(folio)) {
+ doutc(cl, "%p !folio_clear_dirty_for_io\n", folio);
+ folio_unlock(folio);
+ ceph_wbc->fbatch.folios[i] = NULL;
+ continue;
+ }
+
+ /*
+ * We have something to write. If this is
+ * the first locked page this time through,
+ * calculate max possible write size and
+ * allocate a page array
+ */
+ if (ceph_wbc->locked_pages == 0) {
+ ceph_allocate_page_array(mapping, ceph_wbc, folio);
+ } else if (!is_folio_index_contiguous(ceph_wbc, folio)) {
+ if (is_num_ops_too_big(ceph_wbc)) {
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
+ break;
}
+
+ ceph_wbc->num_ops++;
+ ceph_wbc->offset = (u64)folio_pos(folio);
+ ceph_wbc->len = 0;
+ }
+
+ /* note position of first page in fbatch */
+ doutc(cl, "%llx.%llx will write folio %p idx %lu\n",
+ ceph_vinop(inode), folio, folio->index);
+
+ fsc->write_congested = is_write_congestion_happened(fsc);
+
+ rc = move_dirty_folio_in_page_array(mapping, wbc, ceph_wbc,
+ folio);
+ if (rc) {
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
+ break;
}
+ ceph_wbc->fbatch.folios[i] = NULL;
+ ceph_wbc->len += folio_size(folio);
+ }
+
+ ceph_wbc->processed_in_fbatch = i;
+
+ return rc;
+}
+
+static inline
+void ceph_shift_unused_folios_left(struct folio_batch *fbatch)
+{
+ unsigned j, n = 0;
+
+ /* shift unused page to beginning of fbatch */
+ for (j = 0; j < folio_batch_count(fbatch); j++) {
+ if (!fbatch->folios[j])
+ continue;
+
+ if (n < j) {
+ fbatch->folios[n] = fbatch->folios[j];
+ }
+
+ n++;
+ }
+
+ fbatch->nr = n;
+}
+
+static
+int ceph_submit_write(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_vino vino = ceph_vino(inode);
+ struct ceph_osd_request *req = NULL;
+ struct page *page = NULL;
+ bool caching = ceph_is_cache_enabled(inode);
+ u64 offset;
+ u64 len;
+ unsigned i;
+
new_request:
- offset = page_offset(pages[0]);
- len = wsize;
+ offset = ceph_fscrypt_page_offset(ceph_wbc->pages[0]);
+ len = ceph_wbc->wsize;
+ req = ceph_osdc_new_request(&fsc->client->osdc,
+ &ci->i_layout, vino,
+ offset, &len, 0, ceph_wbc->num_ops,
+ CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
+ ceph_wbc->snapc, ceph_wbc->truncate_seq,
+ ceph_wbc->truncate_size, false);
+ if (IS_ERR(req)) {
req = ceph_osdc_new_request(&fsc->client->osdc,
- &ci->i_layout, vino,
- offset, &len, 0, num_ops,
- CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
- snapc, ceph_wbc.truncate_seq,
- ceph_wbc.truncate_size, false);
- if (IS_ERR(req)) {
- req = ceph_osdc_new_request(&fsc->client->osdc,
- &ci->i_layout, vino,
- offset, &len, 0,
- min(num_ops,
- CEPH_OSD_SLAB_OPS),
- CEPH_OSD_OP_WRITE,
- CEPH_OSD_FLAG_WRITE,
- snapc, ceph_wbc.truncate_seq,
- ceph_wbc.truncate_size, true);
- BUG_ON(IS_ERR(req));
+ &ci->i_layout, vino,
+ offset, &len, 0,
+ min(ceph_wbc->num_ops,
+ CEPH_OSD_SLAB_OPS),
+ CEPH_OSD_OP_WRITE,
+ CEPH_OSD_FLAG_WRITE,
+ ceph_wbc->snapc,
+ ceph_wbc->truncate_seq,
+ ceph_wbc->truncate_size,
+ true);
+ BUG_ON(IS_ERR(req));
+ }
+
+ page = ceph_wbc->pages[ceph_wbc->locked_pages - 1];
+ BUG_ON(len < ceph_fscrypt_page_offset(page) + thp_size(page) - offset);
+
+ if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
+ for (i = 0; i < folio_batch_count(&ceph_wbc->fbatch); i++) {
+ struct folio *folio = ceph_wbc->fbatch.folios[i];
+
+ if (!folio)
+ continue;
+
+ page = &folio->page;
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
}
- BUG_ON(len < page_offset(pages[locked_pages - 1]) +
- thp_size(page) - offset);
-
- req->r_callback = writepages_finish;
- req->r_inode = inode;
-
- /* Format the osd request message and submit the write */
- len = 0;
- data_pages = pages;
- op_idx = 0;
- for (i = 0; i < locked_pages; i++) {
- u64 cur_offset = page_offset(pages[i]);
- if (offset + len != cur_offset) {
- if (op_idx + 1 == req->r_num_ops)
- break;
- osd_req_op_extent_dup_last(req, op_idx,
- cur_offset - offset);
- dout("writepages got pages at %llu~%llu\n",
- offset, len);
- osd_req_op_extent_osd_data_pages(req, op_idx,
- data_pages, len, 0,
- from_pool, false);
- osd_req_op_extent_update(req, op_idx, len);
-
- len = 0;
- offset = cur_offset;
- data_pages = pages + i;
- op_idx++;
- }
- set_page_writeback(pages[i]);
- len += thp_size(page);
+ for (i = 0; i < ceph_wbc->locked_pages; i++) {
+ page = ceph_fscrypt_pagecache_page(ceph_wbc->pages[i]);
+
+ if (!page)
+ continue;
+
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
}
- if (ceph_wbc.size_stable) {
- len = min(len, ceph_wbc.i_size - offset);
- } else if (i == locked_pages) {
- /* writepages_finish() clears writeback pages
- * according to the data length, so make sure
- * data length covers all locked pages */
- u64 min_len = len + 1 - thp_size(page);
- len = get_writepages_data_length(inode, pages[i - 1],
- offset);
- len = max(len, min_len);
+ ceph_osdc_put_request(req);
+ return -EIO;
+ }
+
+ req->r_callback = writepages_finish;
+ req->r_inode = inode;
+
+ /* Format the osd request message and submit the write */
+ len = 0;
+ ceph_wbc->data_pages = ceph_wbc->pages;
+ ceph_wbc->op_idx = 0;
+ for (i = 0; i < ceph_wbc->locked_pages; i++) {
+ u64 cur_offset;
+
+ page = ceph_fscrypt_pagecache_page(ceph_wbc->pages[i]);
+ cur_offset = page_offset(page);
+
+ /*
+ * Discontinuity in page range? Ceph can handle that by just passing
+ * multiple extents in the write op.
+ */
+ if (offset + len != cur_offset) {
+ /* If it's full, stop here */
+ if (ceph_wbc->op_idx + 1 == req->r_num_ops)
+ break;
+
+ /* Kick off an fscache write with what we have so far. */
+ ceph_fscache_write_to_cache(inode, offset, len, caching);
+
+ /* Start a new extent */
+ osd_req_op_extent_dup_last(req, ceph_wbc->op_idx,
+ cur_offset - offset);
+
+ doutc(cl, "got pages at %llu~%llu\n", offset, len);
+
+ osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx,
+ ceph_wbc->data_pages,
+ len, 0,
+ ceph_wbc->from_pool,
+ false);
+ osd_req_op_extent_update(req, ceph_wbc->op_idx, len);
+
+ len = 0;
+ offset = cur_offset;
+ ceph_wbc->data_pages = ceph_wbc->pages + i;
+ ceph_wbc->op_idx++;
}
- dout("writepages got pages at %llu~%llu\n", offset, len);
-
- osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
- 0, from_pool, false);
- osd_req_op_extent_update(req, op_idx, len);
-
- BUG_ON(op_idx + 1 != req->r_num_ops);
-
- from_pool = false;
- if (i < locked_pages) {
- BUG_ON(num_ops <= req->r_num_ops);
- num_ops -= req->r_num_ops;
- locked_pages -= i;
-
- /* allocate new pages array for next request */
- data_pages = pages;
- pages = kmalloc_array(locked_pages, sizeof(*pages),
- GFP_NOFS);
- if (!pages) {
- from_pool = true;
- pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
- BUG_ON(!pages);
+
+ set_page_writeback(page);
+
+ if (caching)
+ ceph_set_page_fscache(page);
+
+ len += thp_size(page);
+ }
+
+ ceph_fscache_write_to_cache(inode, offset, len, caching);
+
+ if (ceph_wbc->size_stable) {
+ len = min(len, ceph_wbc->i_size - offset);
+ } else if (i == ceph_wbc->locked_pages) {
+ /* writepages_finish() clears writeback pages
+ * according to the data length, so make sure
+ * data length covers all locked pages */
+ u64 min_len = len + 1 - thp_size(page);
+ len = get_writepages_data_length(inode,
+ ceph_wbc->pages[i - 1],
+ offset);
+ len = max(len, min_len);
+ }
+
+ if (IS_ENCRYPTED(inode))
+ len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
+
+ doutc(cl, "got pages at %llu~%llu\n", offset, len);
+
+ if (IS_ENCRYPTED(inode) &&
+ ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK)) {
+ pr_warn_client(cl,
+ "bad encrypted write offset=%lld len=%llu\n",
+ offset, len);
+ }
+
+ osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx,
+ ceph_wbc->data_pages, len,
+ 0, ceph_wbc->from_pool, false);
+ osd_req_op_extent_update(req, ceph_wbc->op_idx, len);
+
+ BUG_ON(ceph_wbc->op_idx + 1 != req->r_num_ops);
+
+ ceph_wbc->from_pool = false;
+ if (i < ceph_wbc->locked_pages) {
+ BUG_ON(ceph_wbc->num_ops <= req->r_num_ops);
+ ceph_wbc->num_ops -= req->r_num_ops;
+ ceph_wbc->locked_pages -= i;
+
+ /* allocate new pages array for next request */
+ ceph_wbc->data_pages = ceph_wbc->pages;
+ __ceph_allocate_page_array(ceph_wbc, ceph_wbc->locked_pages);
+ memcpy(ceph_wbc->pages, ceph_wbc->data_pages + i,
+ ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages));
+ memset(ceph_wbc->data_pages + i, 0,
+ ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages));
+ } else {
+ BUG_ON(ceph_wbc->num_ops != req->r_num_ops);
+ /* request message now owns the pages array */
+ ceph_wbc->pages = NULL;
+ }
+
+ req->r_mtime = inode_get_mtime(inode);
+ ceph_osdc_start_request(&fsc->client->osdc, req);
+ req = NULL;
+
+ wbc->nr_to_write -= i;
+ if (ceph_wbc->pages)
+ goto new_request;
+
+ return 0;
+}
+
+static
+void ceph_wait_until_current_writes_complete(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ struct page *page;
+ unsigned i, nr;
+
+ if (wbc->sync_mode != WB_SYNC_NONE &&
+ ceph_wbc->start_index == 0 && /* all dirty pages were checked */
+ !ceph_wbc->head_snapc) {
+ ceph_wbc->index = 0;
+
+ while ((ceph_wbc->index <= ceph_wbc->end) &&
+ (nr = filemap_get_folios_tag(mapping,
+ &ceph_wbc->index,
+ (pgoff_t)-1,
+ PAGECACHE_TAG_WRITEBACK,
+ &ceph_wbc->fbatch))) {
+ for (i = 0; i < nr; i++) {
+ page = &ceph_wbc->fbatch.folios[i]->page;
+ if (page_snap_context(page) != ceph_wbc->snapc)
+ continue;
+ wait_on_page_writeback(page);
}
- memcpy(pages, data_pages + i,
- locked_pages * sizeof(*pages));
- memset(data_pages + i, 0,
- locked_pages * sizeof(*pages));
- } else {
- BUG_ON(num_ops != req->r_num_ops);
- index = pages[i - 1]->index + 1;
- /* request message now owns the pages array */
- pages = NULL;
+
+ folio_batch_release(&ceph_wbc->fbatch);
+ cond_resched();
}
+ }
+}
- req->r_mtime = inode->i_mtime;
- rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
- BUG_ON(rc);
- req = NULL;
+/*
+ * initiate async writeback
+ */
+static int ceph_writepages_start(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_writeback_ctl ceph_wbc;
+ int rc = 0;
+
+ if (wbc->sync_mode == WB_SYNC_NONE && fsc->write_congested)
+ return 0;
+
+ doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode),
+ wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
+ (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
+
+ if (is_forced_umount(mapping)) {
+ /* we're in a forced umount, don't write! */
+ return -EIO;
+ }
+
+ ceph_init_writeback_ctl(mapping, wbc, &ceph_wbc);
- wbc->nr_to_write -= i;
- if (pages)
- goto new_request;
+ if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
+ rc = -EIO;
+ goto out;
+ }
+
+retry:
+ rc = ceph_define_writeback_range(mapping, wbc, &ceph_wbc);
+ if (rc == -ENODATA) {
+ /* hmm, why does writepages get called when there
+ is no dirty data? */
+ rc = 0;
+ goto dec_osd_stopping_blocker;
+ }
+
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+ tag_pages_for_writeback(mapping, ceph_wbc.index, ceph_wbc.end);
+
+ while (!has_writeback_done(&ceph_wbc)) {
+ ceph_wbc.locked_pages = 0;
+ ceph_wbc.max_pages = ceph_wbc.wsize >> PAGE_SHIFT;
+
+get_more_pages:
+ ceph_folio_batch_reinit(&ceph_wbc);
+
+ ceph_wbc.nr_folios = filemap_get_folios_tag(mapping,
+ &ceph_wbc.index,
+ ceph_wbc.end,
+ ceph_wbc.tag,
+ &ceph_wbc.fbatch);
+ doutc(cl, "pagevec_lookup_range_tag for tag %#x got %d\n",
+ ceph_wbc.tag, ceph_wbc.nr_folios);
+
+ if (!ceph_wbc.nr_folios && !ceph_wbc.locked_pages)
+ break;
+
+process_folio_batch:
+ rc = ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
+ ceph_shift_unused_folios_left(&ceph_wbc.fbatch);
+ if (rc)
+ goto release_folios;
+
+ /* did we get anything? */
+ if (!ceph_wbc.locked_pages)
+ goto release_folios;
+
+ if (ceph_wbc.processed_in_fbatch) {
+ if (folio_batch_count(&ceph_wbc.fbatch) == 0 &&
+ ceph_wbc.locked_pages < ceph_wbc.max_pages) {
+ doutc(cl, "reached end fbatch, trying for more\n");
+ goto get_more_pages;
+ }
+ }
+
+ rc = ceph_submit_write(mapping, wbc, &ceph_wbc);
+ if (rc)
+ goto release_folios;
+
+ ceph_wbc.locked_pages = 0;
+ ceph_wbc.strip_unit_end = 0;
+
+ if (folio_batch_count(&ceph_wbc.fbatch) > 0) {
+ ceph_wbc.nr_folios =
+ folio_batch_count(&ceph_wbc.fbatch);
+ goto process_folio_batch;
+ }
/*
* We stop writing back only if we are not doing
@@ -1066,58 +1721,44 @@ new_request:
* we tagged for writeback prior to entering this loop.
*/
if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
- done = true;
+ ceph_wbc.done = true;
-release_pvec_pages:
- dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
- pvec.nr ? pvec.pages[0] : NULL);
- pagevec_release(&pvec);
+release_folios:
+ doutc(cl, "folio_batch release on %d folios (%p)\n",
+ (int)ceph_wbc.fbatch.nr,
+ ceph_wbc.fbatch.nr ? ceph_wbc.fbatch.folios[0] : NULL);
+ folio_batch_release(&ceph_wbc.fbatch);
}
- if (should_loop && !done) {
+ if (ceph_wbc.should_loop && !ceph_wbc.done) {
/* more to do; loop back to beginning of file */
- dout("writepages looping back to beginning of file\n");
- end = start_index - 1; /* OK even when start_index == 0 */
+ doutc(cl, "looping back to beginning of file\n");
+ /* OK even when start_index == 0 */
+ ceph_wbc.end = ceph_wbc.start_index - 1;
/* to write dirty pages associated with next snapc,
* we need to wait until current writes complete */
- if (wbc->sync_mode != WB_SYNC_NONE &&
- start_index == 0 && /* all dirty pages were checked */
- !ceph_wbc.head_snapc) {
- struct page *page;
- unsigned i, nr;
- index = 0;
- while ((index <= end) &&
- (nr = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_WRITEBACK))) {
- for (i = 0; i < nr; i++) {
- page = pvec.pages[i];
- if (page_snap_context(page) != snapc)
- continue;
- wait_on_page_writeback(page);
- }
- pagevec_release(&pvec);
- cond_resched();
- }
- }
+ ceph_wait_until_current_writes_complete(mapping, wbc, &ceph_wbc);
- start_index = 0;
- index = 0;
+ ceph_wbc.start_index = 0;
+ ceph_wbc.index = 0;
goto retry;
}
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = index;
+ if (wbc->range_cyclic || (ceph_wbc.range_whole && wbc->nr_to_write > 0))
+ mapping->writeback_index = ceph_wbc.index;
+
+dec_osd_stopping_blocker:
+ ceph_dec_osd_stopping_blocker(fsc->mdsc);
out:
- ceph_osdc_put_request(req);
- ceph_put_snap_context(last_snapc);
- dout("writepages dend - startone, rc = %d\n", rc);
+ ceph_put_snap_context(ceph_wbc.last_snapc);
+ doutc(cl, "%llx.%llx dend - startone, rc = %d\n", ceph_vinop(inode),
+ rc);
+
return rc;
}
-
-
/*
* See if a given @snapc is either writeable, or already written.
*/
@@ -1133,53 +1774,56 @@ static int context_is_writeable_or_written(struct inode *inode,
/**
* ceph_find_incompatible - find an incompatible context and return it
- * @page: page being dirtied
+ * @folio: folio being dirtied
*
- * We are only allowed to write into/dirty a page if the page is
+ * We are only allowed to write into/dirty a folio if the folio is
* clean, or already dirty within the same snap context. Returns a
* conflicting context if there is one, NULL if there isn't, or a
* negative error code on other errors.
*
- * Must be called with page lock held.
+ * Must be called with folio lock held.
*/
static struct ceph_snap_context *
-ceph_find_incompatible(struct page *page)
+ceph_find_incompatible(struct folio *folio)
{
- struct inode *inode = page->mapping->host;
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct inode *inode = folio->mapping->host;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
- if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
- dout(" page %p forced umount\n", page);
- return ERR_PTR(-EIO);
+ if (ceph_inode_is_shutdown(inode)) {
+ doutc(cl, " %llx.%llx folio %p is shutdown\n",
+ ceph_vinop(inode), folio);
+ return ERR_PTR(-ESTALE);
}
for (;;) {
struct ceph_snap_context *snapc, *oldest;
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
- snapc = page_snap_context(page);
+ snapc = page_snap_context(&folio->page);
if (!snapc || snapc == ci->i_head_snapc)
break;
/*
- * this page is already dirty in another (older) snap
+ * this folio is already dirty in another (older) snap
* context! is it writeable now?
*/
oldest = get_oldest_context(inode, NULL, NULL);
if (snapc->seq > oldest->seq) {
/* not writeable -- return it for the caller to deal with */
ceph_put_snap_context(oldest);
- dout(" page %p snapc %p not current or oldest\n", page, snapc);
+ doutc(cl, " %llx.%llx folio %p snapc %p not current or oldest\n",
+ ceph_vinop(inode), folio, snapc);
return ceph_get_snap_context(snapc);
}
ceph_put_snap_context(oldest);
- /* yay, writeable, do it now (without dropping page lock) */
- dout(" page %p snapc %p not current, but oldest\n", page, snapc);
- if (clear_page_dirty_for_io(page)) {
- int r = writepage_nounlock(page, NULL);
+ /* yay, writeable, do it now (without dropping folio lock) */
+ doutc(cl, " %llx.%llx folio %p snapc %p not current, but oldest\n",
+ ceph_vinop(inode), folio, snapc);
+ if (folio_clear_dirty_for_io(folio)) {
+ int r = write_folio_nounlock(folio, NULL);
if (r < 0)
return ERR_PTR(r);
}
@@ -1188,18 +1832,19 @@ ceph_find_incompatible(struct page *page)
}
static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
- struct folio *folio, void **_fsdata)
+ struct folio **foliop, void **_fsdata)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_snap_context *snapc;
- snapc = ceph_find_incompatible(folio_page(folio, 0));
+ snapc = ceph_find_incompatible(*foliop);
if (snapc) {
int r;
- folio_unlock(folio);
- folio_put(folio);
+ folio_unlock(*foliop);
+ folio_put(*foliop);
+ *foliop = NULL;
if (IS_ERR(snapc))
return PTR_ERR(snapc);
@@ -1216,76 +1861,41 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned
* We are only allowed to write into/dirty the page if the page is
* clean, or already dirty within the same snap context.
*/
-static int ceph_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned aop_flags,
- struct page **pagep, void **fsdata)
+static int ceph_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
+ struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct folio *folio = NULL;
- pgoff_t index = pos >> PAGE_SHIFT;
int r;
- /*
- * Uninlining should have already been done and everything updated, EXCEPT
- * for inline_version sent to the MDS.
- */
- if (ci->i_inline_version != CEPH_INLINE_NONE) {
- unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
- if (aop_flags & AOP_FLAG_NOFS)
- fgp_flags |= FGP_NOFS;
- folio = __filemap_get_folio(mapping, index, fgp_flags,
- mapping_gfp_mask(mapping));
- if (!folio)
- return -ENOMEM;
+ r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, foliop, NULL);
+ if (r < 0)
+ return r;
- /*
- * The inline_version on a new inode is set to 1. If that's the
- * case, then the folio is brand new and isn't yet Uptodate.
- */
- r = 0;
- if (index == 0 && ci->i_inline_version != 1) {
- if (!folio_test_uptodate(folio)) {
- WARN_ONCE(1, "ceph: write_begin called on still-inlined inode (inline_version %llu)!\n",
- ci->i_inline_version);
- r = -EINVAL;
- }
- goto out;
- }
- zero_user_segment(&folio->page, 0, folio_size(folio));
- folio_mark_uptodate(folio);
- goto out;
- }
-
- r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL,
- &ceph_netfs_read_ops, NULL);
-out:
- if (r == 0)
- folio_wait_fscache(folio);
- if (r < 0) {
- if (folio)
- folio_put(folio);
- } else {
- WARN_ON_ONCE(!folio_test_locked(folio));
- *pagep = &folio->page;
- }
- return r;
+ folio_wait_private_2(*foliop); /* [DEPRECATED] */
+ WARN_ON_ONCE(!folio_test_locked(*foliop));
+ return 0;
}
/*
* we don't do anything in here that simple_write_end doesn't do
* except adjust dirty page accounting
*/
-static int ceph_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *subpage, void *fsdata)
+static int ceph_write_end(const struct kiocb *iocb,
+ struct address_space *mapping, loff_t pos,
+ unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(subpage);
+ struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
bool check_cap = false;
- dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file,
- inode, folio, (int)pos, (int)copied, (int)len);
+ doutc(cl, "%llx.%llx file %p folio %p %d~%d (%d)\n", ceph_vinop(inode),
+ file, folio, (int)pos, (int)copied, (int)len);
if (!folio_test_uptodate(folio)) {
/* just return that nothing was copied on a short copy */
@@ -1307,33 +1917,22 @@ out:
folio_put(folio);
if (check_cap)
- ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
+ ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY);
return copied;
}
-/*
- * we set .direct_IO to indicate direct io is supported, but since we
- * intercept O_DIRECT reads and writes early, this function should
- * never get called.
- */
-static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
const struct address_space_operations ceph_aops = {
- .readpage = ceph_readpage,
- .readahead = ceph_readahead,
- .writepage = ceph_writepage,
+ .read_folio = netfs_read_folio,
+ .readahead = netfs_readahead,
.writepages = ceph_writepages_start,
.write_begin = ceph_write_begin,
.write_end = ceph_write_end,
- .set_page_dirty = ceph_set_page_dirty,
- .invalidatepage = ceph_invalidatepage,
- .releasepage = ceph_releasepage,
- .direct_IO = ceph_direct_io,
+ .dirty_folio = ceph_dirty_folio,
+ .invalidate_folio = ceph_invalidate_folio,
+ .release_folio = netfs_release_folio,
+ .direct_IO = noop_direct_IO,
+ .migrate_folio = filemap_migrate_folio,
};
static void ceph_block_sigs(sigset_t *oldset)
@@ -1356,16 +1955,20 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
int want, got, err;
sigset_t oldset;
vm_fault_t ret = VM_FAULT_SIGBUS;
+ if (ceph_inode_is_shutdown(inode))
+ return ret;
+
ceph_block_sigs(&oldset);
- dout("filemap_fault %p %llx.%llx %llu trying to get caps\n",
- inode, ceph_vinop(inode), off);
+ doutc(cl, "%llx.%llx %llu trying to get caps\n",
+ ceph_vinop(inode), off);
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else
@@ -1376,17 +1979,17 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
if (err < 0)
goto out_restore;
- dout("filemap_fault %p %llu got cap refs on %s\n",
- inode, off, ceph_cap_string(got));
+ doutc(cl, "%llx.%llx %llu got cap refs on %s\n", ceph_vinop(inode),
+ off, ceph_cap_string(got));
if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
- ci->i_inline_version == CEPH_INLINE_NONE) {
+ !ceph_has_inline_data(ci)) {
CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
ceph_add_rw_context(fi, &rw_ctx);
ret = filemap_fault(vmf);
ceph_del_rw_context(fi, &rw_ctx);
- dout("filemap_fault %p %llu drop cap refs %s ret %x\n",
- inode, off, ceph_cap_string(got), ret);
+ doutc(cl, "%llx.%llx %llu drop cap refs %s ret %x\n",
+ ceph_vinop(inode), off, ceph_cap_string(got), ret);
} else
err = -EAGAIN;
@@ -1427,8 +2030,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
out_inline:
filemap_invalidate_unlock_shared(mapping);
- dout("filemap_fault %p %llu read inline data ret %x\n",
- inode, off, ret);
+ doutc(cl, "%llx.%llx %llu read inline data ret %x\n",
+ ceph_vinop(inode), off, ret);
}
out_restore:
ceph_restore_sigs(&oldset);
@@ -1442,17 +2045,21 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
struct ceph_cap_flush *prealloc_cf;
- struct page *page = vmf->page;
- loff_t off = page_offset(page);
+ struct folio *folio = page_folio(vmf->page);
+ loff_t off = folio_pos(folio);
loff_t size = i_size_read(inode);
size_t len;
int want, got, err;
sigset_t oldset;
vm_fault_t ret = VM_FAULT_SIGBUS;
+ if (ceph_inode_is_shutdown(inode))
+ return ret;
+
prealloc_cf = ceph_alloc_cap_flush();
if (!prealloc_cf)
return VM_FAULT_OOM;
@@ -1460,26 +2067,13 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb);
ceph_block_sigs(&oldset);
- if (ci->i_inline_version != CEPH_INLINE_NONE) {
- struct page *locked_page = NULL;
- if (off == 0) {
- lock_page(page);
- locked_page = page;
- }
- err = ceph_uninline_data(vma->vm_file, locked_page);
- if (locked_page)
- unlock_page(locked_page);
- if (err < 0)
- goto out_free;
- }
-
- if (off + thp_size(page) <= size)
- len = thp_size(page);
+ if (off + folio_size(folio) <= size)
+ len = folio_size(folio);
else
- len = offset_in_thp(page, size);
+ len = offset_in_folio(folio, size);
- dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
- inode, ceph_vinop(inode), off, len, size);
+ doutc(cl, "%llx.%llx %llu~%zd getting caps i_size %llu\n",
+ ceph_vinop(inode), off, len, size);
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
else
@@ -1490,33 +2084,33 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
if (err < 0)
goto out_free;
- dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
- inode, off, len, ceph_cap_string(got));
+ doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode),
+ off, len, ceph_cap_string(got));
- /* Update time before taking page lock */
+ /* Update time before taking folio lock */
file_update_time(vma->vm_file);
inode_inc_iversion_raw(inode);
do {
struct ceph_snap_context *snapc;
- lock_page(page);
+ folio_lock(folio);
- if (page_mkwrite_check_truncate(page, inode) < 0) {
- unlock_page(page);
+ if (folio_mkwrite_check_truncate(folio, inode) < 0) {
+ folio_unlock(folio);
ret = VM_FAULT_NOPAGE;
break;
}
- snapc = ceph_find_incompatible(page);
+ snapc = ceph_find_incompatible(folio);
if (!snapc) {
- /* success. we'll keep the page locked. */
- set_page_dirty(page);
+ /* success. we'll keep the folio locked. */
+ folio_mark_dirty(folio);
ret = VM_FAULT_LOCKED;
break;
}
- unlock_page(page);
+ folio_unlock(folio);
if (IS_ERR(snapc)) {
ret = VM_FAULT_SIGBUS;
@@ -1529,11 +2123,9 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
ceph_put_snap_context(snapc);
} while (err == 0);
- if (ret == VM_FAULT_LOCKED ||
- ci->i_inline_version != CEPH_INLINE_NONE) {
+ if (ret == VM_FAULT_LOCKED) {
int dirty;
spin_lock(&ci->i_ceph_lock);
- ci->i_inline_version = CEPH_INLINE_NONE;
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
&prealloc_cf);
spin_unlock(&ci->i_ceph_lock);
@@ -1541,8 +2133,8 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
__mark_inode_dirty(inode, dirty);
}
- dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
- inode, off, len, ceph_cap_string(got), ret);
+ doutc(cl, "%llx.%llx %llu~%zd dropping cap refs on %s ret %x\n",
+ ceph_vinop(inode), off, len, ceph_cap_string(got), ret);
ceph_put_cap_refs_async(ci, got);
out_free:
ceph_restore_sigs(&oldset);
@@ -1556,6 +2148,7 @@ out_free:
void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
char *data, size_t len)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct address_space *mapping = inode->i_mapping;
struct page *page;
@@ -1576,8 +2169,8 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
}
}
- dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
- inode, ceph_vinop(inode), len, locked_page);
+ doutc(cl, "%p %llx.%llx len %zu locked_page %p\n", inode,
+ ceph_vinop(inode), len, locked_page);
if (len > 0) {
void *kaddr = kmap_atomic(page);
@@ -1597,82 +2190,69 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
}
}
-int ceph_uninline_data(struct file *filp, struct page *locked_page)
+int ceph_uninline_data(struct file *file)
{
- struct inode *inode = file_inode(filp);
+ struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_osd_request *req;
- struct page *page = NULL;
- u64 len, inline_version;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_osd_request *req = NULL;
+ struct ceph_cap_flush *prealloc_cf = NULL;
+ struct folio *folio = NULL;
+ u64 inline_version = CEPH_INLINE_NONE;
+ struct page *pages[1];
int err = 0;
- bool from_pagecache = false;
+ u64 len;
spin_lock(&ci->i_ceph_lock);
inline_version = ci->i_inline_version;
spin_unlock(&ci->i_ceph_lock);
- dout("uninline_data %p %llx.%llx inline_version %llu\n",
- inode, ceph_vinop(inode), inline_version);
+ doutc(cl, "%llx.%llx inline_version %llu\n", ceph_vinop(inode),
+ inline_version);
- if (inline_version == 1 || /* initial version, no data */
- inline_version == CEPH_INLINE_NONE)
+ if (ceph_inode_is_shutdown(inode)) {
+ err = -EIO;
goto out;
-
- if (locked_page) {
- page = locked_page;
- WARN_ON(!PageUptodate(page));
- } else if (ceph_caps_issued(ci) &
- (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) {
- page = find_get_page(inode->i_mapping, 0);
- if (page) {
- if (PageUptodate(page)) {
- from_pagecache = true;
- lock_page(page);
- } else {
- put_page(page);
- page = NULL;
- }
- }
}
- if (page) {
- len = i_size_read(inode);
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- } else {
- page = __page_cache_alloc(GFP_NOFS);
- if (!page) {
- err = -ENOMEM;
- goto out;
- }
- err = __ceph_do_getattr(inode, page,
- CEPH_STAT_CAP_INLINE_DATA, true);
- if (err < 0) {
- /* no inline data */
- if (err == -ENODATA)
- err = 0;
- goto out;
- }
- len = err;
+ if (inline_version == CEPH_INLINE_NONE)
+ return 0;
+
+ prealloc_cf = ceph_alloc_cap_flush();
+ if (!prealloc_cf)
+ return -ENOMEM;
+
+ if (inline_version == 1) /* initial version, no data */
+ goto out_uninline;
+
+ folio = read_mapping_folio(inode->i_mapping, 0, file);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
+ goto out;
}
+ folio_lock(folio);
+
+ len = i_size_read(inode);
+ if (len > folio_size(folio))
+ len = folio_size(folio);
+
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
ceph_vino(inode), 0, &len, 0, 1,
CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE,
NULL, 0, 0, false);
if (IS_ERR(req)) {
err = PTR_ERR(req);
- goto out;
+ goto out_unlock;
}
- req->r_mtime = inode->i_mtime;
- err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
- if (!err)
- err = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ req->r_mtime = inode_get_mtime(inode);
+ ceph_osdc_start_request(&fsc->client->osdc, req);
+ err = ceph_osdc_wait_request(&fsc->client->osdc, req);
ceph_osdc_put_request(req);
if (err < 0)
- goto out;
+ goto out_unlock;
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
ceph_vino(inode), 0, &len, 1, 3,
@@ -1681,10 +2261,11 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
ci->i_truncate_size, false);
if (IS_ERR(req)) {
err = PTR_ERR(req);
- goto out;
+ goto out_unlock;
}
- osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
+ pages[0] = folio_page(folio, 0);
+ osd_req_op_extent_osd_data_pages(req, 1, pages, len, 0, false, false);
{
__le64 xattr_buf = cpu_to_le64(inline_version);
@@ -1694,7 +2275,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
CEPH_OSD_CMPXATTR_OP_GT,
CEPH_OSD_CMPXATTR_MODE_U64);
if (err)
- goto out_put;
+ goto out_put_req;
}
{
@@ -1705,32 +2286,43 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
"inline_version",
xattr_buf, xattr_len, 0, 0);
if (err)
- goto out_put;
+ goto out_put_req;
}
- req->r_mtime = inode->i_mtime;
- err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
- if (!err)
- err = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ req->r_mtime = inode_get_mtime(inode);
+ ceph_osdc_start_request(&fsc->client->osdc, req);
+ err = ceph_osdc_wait_request(&fsc->client->osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, len, err);
-out_put:
+out_uninline:
+ if (!err) {
+ int dirty;
+
+ /* Set to CAP_INLINE_NONE and dirty the caps */
+ down_read(&fsc->mdsc->snap_rwsem);
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_inline_version = CEPH_INLINE_NONE;
+ dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &prealloc_cf);
+ spin_unlock(&ci->i_ceph_lock);
+ up_read(&fsc->mdsc->snap_rwsem);
+ if (dirty)
+ __mark_inode_dirty(inode, dirty);
+ }
+out_put_req:
ceph_osdc_put_request(req);
if (err == -ECANCELED)
err = 0;
-out:
- if (page && page != locked_page) {
- if (from_pagecache) {
- unlock_page(page);
- put_page(page);
- } else
- __free_pages(page, 0);
+out_unlock:
+ if (folio) {
+ folio_unlock(folio);
+ folio_put(folio);
}
-
- dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
- inode, ceph_vinop(inode), inline_version, err);
+out:
+ ceph_free_cap_flush(prealloc_cf);
+ doutc(cl, "%llx.%llx inline_version %llu = %d\n",
+ ceph_vinop(inode), inline_version, err);
return err;
}
@@ -1739,14 +2331,13 @@ static const struct vm_operations_struct ceph_vmops = {
.page_mkwrite = ceph_page_mkwrite,
};
-int ceph_mmap(struct file *file, struct vm_area_struct *vma)
+int ceph_mmap_prepare(struct vm_area_desc *desc)
{
- struct address_space *mapping = file->f_mapping;
+ struct address_space *mapping = desc->file->f_mapping;
- if (!mapping->a_ops->readpage)
+ if (!mapping->a_ops->read_folio)
return -ENOEXEC;
- file_accessed(file);
- vma->vm_ops = &ceph_vmops;
+ desc->vm_ops = &ceph_vmops;
return 0;
}
@@ -1758,8 +2349,9 @@ enum {
static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
s64 pool, struct ceph_string *pool_ns)
{
- struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
struct rb_node **p, *parent;
struct ceph_pool_perm *perm;
@@ -1794,10 +2386,10 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
goto out;
if (pool_ns)
- dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
- pool, (int)pool_ns->len, pool_ns->str);
+ doutc(cl, "pool %lld ns %.*s no perm cached\n", pool,
+ (int)pool_ns->len, pool_ns->str);
else
- dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
+ doutc(cl, "pool %lld no perm cached\n", pool);
down_write(&mdsc->pool_perm_rwsem);
p = &mdsc->pool_perm_tree.rb_node;
@@ -1871,15 +2463,13 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
0, false, true);
- err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
+ ceph_osdc_start_request(&fsc->client->osdc, rd_req);
- wr_req->r_mtime = ci->vfs_inode.i_mtime;
- err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
+ wr_req->r_mtime = inode_get_mtime(&ci->netfs.inode);
+ ceph_osdc_start_request(&fsc->client->osdc, wr_req);
- if (!err)
- err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
- if (!err2)
- err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
+ err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
+ err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
if (err >= 0 || err == -ENOENT)
have |= POOL_READ;
@@ -1899,7 +2489,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
}
pool_ns_len = pool_ns ? pool_ns->len : 0;
- perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS);
+ perm = kmalloc(struct_size(perm, pool_ns, pool_ns_len + 1), GFP_NOFS);
if (!perm) {
err = -ENOMEM;
goto out_unlock;
@@ -1924,15 +2514,16 @@ out:
if (!err)
err = have;
if (pool_ns)
- dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
- pool, (int)pool_ns->len, pool_ns->str, err);
+ doutc(cl, "pool %lld ns %.*s result = %d\n", pool,
+ (int)pool_ns->len, pool_ns->str, err);
else
- dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
+ doutc(cl, "pool %lld result = %d\n", pool, err);
return err;
}
int ceph_pool_perm_check(struct inode *inode, int need)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_string *pool_ns;
s64 pool;
@@ -1945,13 +2536,13 @@ int ceph_pool_perm_check(struct inode *inode, int need)
if (ci->i_vino.snap != CEPH_NOSNAP) {
/*
* Pool permission check needs to write to the first object.
- * But for snapshot, head of the first object may have alread
+ * But for snapshot, head of the first object may have already
* been deleted. Skip check to avoid creating orphan object.
*/
return 0;
}
- if (ceph_test_mount_opt(ceph_inode_to_client(inode),
+ if (ceph_test_mount_opt(ceph_inode_to_fs_client(inode),
NOPOOLPERM))
return 0;
@@ -1962,13 +2553,11 @@ int ceph_pool_perm_check(struct inode *inode, int need)
check:
if (flags & CEPH_I_POOL_PERM) {
if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
- dout("ceph_pool_perm_check pool %lld no read perm\n",
- pool);
+ doutc(cl, "pool %lld no read perm\n", pool);
return -EPERM;
}
if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
- dout("ceph_pool_perm_check pool %lld no write perm\n",
- pool);
+ doutc(cl, "pool %lld no write perm\n", pool);
return -EPERM;
}
return 0;
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 9cfadbb86568..f678bab189d8 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -12,216 +12,101 @@
#include "super.h"
#include "cache.h"
-struct ceph_aux_inode {
- u64 version;
- u64 mtime_sec;
- u64 mtime_nsec;
-};
-
-struct fscache_netfs ceph_cache_netfs = {
- .name = "ceph",
- .version = 0,
-};
-
-static DEFINE_MUTEX(ceph_fscache_lock);
-static LIST_HEAD(ceph_fscache_list);
-
-struct ceph_fscache_entry {
- struct list_head list;
- struct fscache_cookie *fscache;
- size_t uniq_len;
- /* The following members must be last */
- struct ceph_fsid fsid;
- char uniquifier[];
-};
-
-static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
- .name = "CEPH.fsid",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-int __init ceph_fscache_register(void)
-{
- return fscache_register_netfs(&ceph_cache_netfs);
-}
-
-void ceph_fscache_unregister(void)
-{
- fscache_unregister_netfs(&ceph_cache_netfs);
-}
-
-int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc)
+void ceph_fscache_register_inode_cookie(struct inode *inode)
{
- const struct ceph_fsid *fsid = &fsc->client->fsid;
- const char *fscache_uniq = fsc->mount_options->fscache_uniq;
- size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
- struct ceph_fscache_entry *ent;
- int err = 0;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
- mutex_lock(&ceph_fscache_lock);
- list_for_each_entry(ent, &ceph_fscache_list, list) {
- if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
- continue;
- if (ent->uniq_len != uniq_len)
- continue;
- if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
- continue;
-
- errorfc(fc, "fscache cookie already registered for fsid %pU, use fsc=<uniquifier> option",
- fsid);
- err = -EBUSY;
- goto out_unlock;
- }
+ /* No caching for filesystem? */
+ if (!fsc->fscache)
+ return;
- ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
- if (!ent) {
- err = -ENOMEM;
- goto out_unlock;
- }
+ /* Regular files only */
+ if (!S_ISREG(inode->i_mode))
+ return;
- memcpy(&ent->fsid, fsid, sizeof(*fsid));
- if (uniq_len > 0) {
- memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
- ent->uniq_len = uniq_len;
- }
+ /* Only new inodes! */
+ if (!(inode_state_read_once(inode) & I_NEW))
+ return;
- fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
- &ceph_fscache_fsid_object_def,
- &ent->fsid, sizeof(ent->fsid) + uniq_len,
- NULL, 0,
- fsc, 0, true);
+ WARN_ON_ONCE(ci->netfs.cache);
- if (fsc->fscache) {
- ent->fscache = fsc->fscache;
- list_add_tail(&ent->list, &ceph_fscache_list);
- } else {
- kfree(ent);
- errorfc(fc, "unable to register fscache cookie for fsid %pU",
- fsid);
- /* all other fs ignore this error */
- }
-out_unlock:
- mutex_unlock(&ceph_fscache_lock);
- return err;
+ ci->netfs.cache =
+ fscache_acquire_cookie(fsc->fscache, 0,
+ &ci->i_vino, sizeof(ci->i_vino),
+ &ci->i_version, sizeof(ci->i_version),
+ i_size_read(inode));
+ if (ci->netfs.cache)
+ mapping_set_release_always(inode->i_mapping);
}
-static enum fscache_checkaux ceph_fscache_inode_check_aux(
- void *cookie_netfs_data, const void *data, uint16_t dlen,
- loff_t object_size)
+void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info *ci)
{
- struct ceph_aux_inode aux;
- struct ceph_inode_info* ci = cookie_netfs_data;
- struct inode* inode = &ci->vfs_inode;
-
- if (dlen != sizeof(aux) ||
- i_size_read(inode) != object_size)
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- memset(&aux, 0, sizeof(aux));
- aux.version = ci->i_version;
- aux.mtime_sec = inode->i_mtime.tv_sec;
- aux.mtime_nsec = inode->i_mtime.tv_nsec;
+ fscache_relinquish_cookie(ceph_fscache_cookie(ci), false);
+}
- if (memcmp(data, &aux, sizeof(aux)) != 0)
- return FSCACHE_CHECKAUX_OBSOLETE;
+void ceph_fscache_use_cookie(struct inode *inode, bool will_modify)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
- dout("ceph inode 0x%p cached okay\n", ci);
- return FSCACHE_CHECKAUX_OKAY;
+ fscache_use_cookie(ceph_fscache_cookie(ci), will_modify);
}
-static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
- .name = "CEPH.inode",
- .type = FSCACHE_COOKIE_TYPE_DATAFILE,
- .check_aux = ceph_fscache_inode_check_aux,
-};
-
-void ceph_fscache_register_inode_cookie(struct inode *inode)
+void ceph_fscache_unuse_cookie(struct inode *inode, bool update)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_aux_inode aux;
- /* No caching for filesystem */
- if (!fsc->fscache)
- return;
+ if (update) {
+ loff_t i_size = i_size_read(inode);
- /* Only cache for regular files that are read only */
- if (!S_ISREG(inode->i_mode))
- return;
-
- inode_lock_nested(inode, I_MUTEX_CHILD);
- if (!ci->fscache) {
- memset(&aux, 0, sizeof(aux));
- aux.version = ci->i_version;
- aux.mtime_sec = inode->i_mtime.tv_sec;
- aux.mtime_nsec = inode->i_mtime.tv_nsec;
- ci->fscache = fscache_acquire_cookie(fsc->fscache,
- &ceph_fscache_inode_object_def,
- &ci->i_vino, sizeof(ci->i_vino),
- &aux, sizeof(aux),
- ci, i_size_read(inode), false);
+ fscache_unuse_cookie(ceph_fscache_cookie(ci),
+ &ci->i_version, &i_size);
+ } else {
+ fscache_unuse_cookie(ceph_fscache_cookie(ci), NULL, NULL);
}
- inode_unlock(inode);
}
-void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
+void ceph_fscache_update(struct inode *inode)
{
- struct fscache_cookie* cookie;
-
- if ((cookie = ci->fscache) == NULL)
- return;
-
- ci->fscache = NULL;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ loff_t i_size = i_size_read(inode);
- fscache_relinquish_cookie(cookie, &ci->i_vino, false);
+ fscache_update_cookie(ceph_fscache_cookie(ci), &ci->i_version, &i_size);
}
-static bool ceph_fscache_can_enable(void *data)
+void ceph_fscache_invalidate(struct inode *inode, bool dio_write)
{
- struct inode *inode = data;
- return !inode_is_open_for_write(inode);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ fscache_invalidate(ceph_fscache_cookie(ci),
+ &ci->i_version, i_size_read(inode),
+ dio_write ? FSCACHE_INVAL_DIO_WRITE : 0);
}
-void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
+int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc)
{
- struct ceph_inode_info *ci = ceph_inode(inode);
+ const struct ceph_fsid *fsid = &fsc->client->fsid;
+ const char *fscache_uniq = fsc->mount_options->fscache_uniq;
+ size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
+ char *name;
+ int err = 0;
- if (!fscache_cookie_valid(ci->fscache))
- return;
+ name = kasprintf(GFP_KERNEL, "ceph,%pU%s%s", fsid, uniq_len ? "," : "",
+ uniq_len ? fscache_uniq : "");
+ if (!name)
+ return -ENOMEM;
- if (inode_is_open_for_write(inode)) {
- dout("fscache_file_set_cookie %p %p disabling cache\n",
- inode, filp);
- fscache_disable_cookie(ci->fscache, &ci->i_vino, false);
- } else {
- fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode),
- ceph_fscache_can_enable, inode);
- if (fscache_cookie_enabled(ci->fscache)) {
- dout("fscache_file_set_cookie %p %p enabling cache\n",
- inode, filp);
- }
+ fsc->fscache = fscache_acquire_volume(name, NULL, NULL, 0);
+ if (IS_ERR_OR_NULL(fsc->fscache)) {
+ errorfc(fc, "Unable to register fscache cookie for %s", name);
+ err = fsc->fscache ? PTR_ERR(fsc->fscache) : -EOPNOTSUPP;
+ fsc->fscache = NULL;
}
+ kfree(name);
+ return err;
}
void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
{
- if (fscache_cookie_valid(fsc->fscache)) {
- struct ceph_fscache_entry *ent;
- bool found = false;
-
- mutex_lock(&ceph_fscache_lock);
- list_for_each_entry(ent, &ceph_fscache_list, list) {
- if (ent->fscache == fsc->fscache) {
- list_del(&ent->list);
- kfree(ent);
- found = true;
- break;
- }
- }
- WARN_ON_ONCE(!found);
- mutex_unlock(&ceph_fscache_lock);
-
- __fscache_relinquish_cookie(fsc->fscache, NULL, false);
- }
- fsc->fscache = NULL;
+ fscache_relinquish_volume(fsc->fscache, NULL, false);
}
diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h
index 058ea2a04376..20efac020394 100644
--- a/fs/ceph/cache.h
+++ b/fs/ceph/cache.h
@@ -12,106 +12,106 @@
#include <linux/netfs.h>
#ifdef CONFIG_CEPH_FSCACHE
-
-extern struct fscache_netfs ceph_cache_netfs;
-
-int ceph_fscache_register(void);
-void ceph_fscache_unregister(void);
+#include <linux/fscache.h>
int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc);
void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc);
void ceph_fscache_register_inode_cookie(struct inode *inode);
void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci);
-void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp);
-void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci);
-static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
+void ceph_fscache_use_cookie(struct inode *inode, bool will_modify);
+void ceph_fscache_unuse_cookie(struct inode *inode, bool update);
+
+void ceph_fscache_update(struct inode *inode);
+void ceph_fscache_invalidate(struct inode *inode, bool dio_write);
+
+static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci)
{
- ci->fscache = NULL;
+ return netfs_i_cookie(&ci->netfs);
}
-static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci)
+static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
{
- return ci->fscache;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
+
+ if (cookie) {
+ ceph_fscache_use_cookie(inode, true);
+ fscache_resize_cookie(cookie, to);
+ ceph_fscache_unuse_cookie(inode, true);
+ }
}
-static inline void ceph_fscache_invalidate(struct inode *inode)
+static inline int ceph_fscache_unpin_writeback(struct inode *inode,
+ struct writeback_control *wbc)
{
- fscache_invalidate(ceph_inode(inode)->fscache);
+ return netfs_unpin_writeback(inode, wbc);
}
+#define ceph_fscache_dirty_folio netfs_dirty_folio
+
static inline bool ceph_is_cache_enabled(struct inode *inode)
{
- struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(inode));
-
- if (!cookie)
- return false;
- return fscache_cookie_enabled(cookie);
+ return fscache_cookie_enabled(ceph_fscache_cookie(ceph_inode(inode)));
}
-static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq)
+#else /* CONFIG_CEPH_FSCACHE */
+static inline int ceph_fscache_register_fs(struct ceph_fs_client* fsc,
+ struct fs_context *fc)
{
- struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(rreq->inode));
-
- return fscache_begin_read_operation(rreq, cookie);
+ return 0;
}
-#else
-static inline int ceph_fscache_register(void)
+static inline void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
{
- return 0;
}
-static inline void ceph_fscache_unregister(void)
+static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
{
}
-static inline int ceph_fscache_register_fs(struct ceph_fs_client* fsc,
- struct fs_context *fc)
+static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
{
- return 0;
}
-static inline void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
+static inline void ceph_fscache_use_cookie(struct inode *inode, bool will_modify)
{
}
-static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
+static inline void ceph_fscache_unuse_cookie(struct inode *inode, bool update)
{
}
-static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci)
+static inline void ceph_fscache_update(struct inode *inode)
{
- return NULL;
}
-static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
+static inline void ceph_fscache_invalidate(struct inode *inode, bool dio_write)
{
}
-static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
+static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci)
{
+ return NULL;
}
-static inline void ceph_fscache_file_set_cookie(struct inode *inode,
- struct file *filp)
+static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
{
}
-static inline void ceph_fscache_invalidate(struct inode *inode)
+static inline int ceph_fscache_unpin_writeback(struct inode *inode,
+ struct writeback_control *wbc)
{
+ return 0;
}
+#define ceph_fscache_dirty_folio filemap_dirty_folio
+
static inline bool ceph_is_cache_enabled(struct inode *inode)
{
return false;
}
+#endif /* CONFIG_CEPH_FSCACHE */
-static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq)
-{
- return -ENOBUFS;
-}
#endif
-
-#endif /* _CEPH_CACHE_H */
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 8f537f1d9d1d..b1a8ff612c41 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -9,10 +9,13 @@
#include <linux/wait.h>
#include <linux/writeback.h>
#include <linux/iversion.h>
+#include <linux/filelock.h>
+#include <linux/jiffies.h>
#include "super.h"
#include "mds_client.h"
#include "cache.h"
+#include "crypto.h"
#include <linux/ceph/decode.h>
#include <linux/ceph/messenger.h>
@@ -184,10 +187,10 @@ static void __ceph_unreserve_caps(struct ceph_mds_client *mdsc, int nr_caps)
mdsc->caps_avail_count += nr_caps;
}
- dout("%s: caps %d = %d used + %d resv + %d avail\n",
- __func__,
- mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
+ doutc(mdsc->fsc->client,
+ "caps %d = %d used + %d resv + %d avail\n",
+ mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
mdsc->caps_reserve_count +
mdsc->caps_avail_count);
@@ -200,6 +203,7 @@ static void __ceph_unreserve_caps(struct ceph_mds_client *mdsc, int nr_caps)
int ceph_reserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx, int need)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int i, j;
struct ceph_cap *cap;
int have;
@@ -210,7 +214,7 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *s;
LIST_HEAD(newcaps);
- dout("reserve caps ctx=%p need=%d\n", ctx, need);
+ doutc(cl, "ctx=%p need=%d\n", ctx, need);
/* first reserve any caps that are already allocated */
spin_lock(&mdsc->caps_list_lock);
@@ -270,8 +274,8 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
continue;
}
- pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
- ctx, need, have + alloc);
+ pr_warn_client(cl, "ctx=%p ENOMEM need=%d got=%d\n", ctx, need,
+ have + alloc);
err = -ENOMEM;
break;
}
@@ -296,20 +300,21 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
spin_unlock(&mdsc->caps_list_lock);
- dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
- ctx, mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
+ doutc(cl, "ctx=%p %d = %d used + %d resv + %d avail\n", ctx,
+ mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
return err;
}
void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx)
{
+ struct ceph_client *cl = mdsc->fsc->client;
bool reclaim = false;
if (!ctx->count)
return;
- dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
+ doutc(cl, "ctx=%p count=%d\n", ctx, ctx->count);
spin_lock(&mdsc->caps_list_lock);
__ceph_unreserve_caps(mdsc, ctx->count);
ctx->count = 0;
@@ -326,6 +331,7 @@ void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_cap *cap = NULL;
/* temporary, until we do something about cap import/export */
@@ -357,9 +363,9 @@ struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
}
spin_lock(&mdsc->caps_list_lock);
- dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
- ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
+ doutc(cl, "ctx=%p (%d) %d = %d used + %d resv + %d avail\n", ctx,
+ ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
BUG_ON(!ctx->count);
BUG_ON(ctx->count > mdsc->caps_reserve_count);
BUG_ON(list_empty(&mdsc->caps_list));
@@ -380,10 +386,12 @@ struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
{
+ struct ceph_client *cl = mdsc->fsc->client;
+
spin_lock(&mdsc->caps_list_lock);
- dout("put_cap %p %d = %d used + %d resv + %d avail\n",
- cap, mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
+ doutc(cl, "%p %d = %d used + %d resv + %d avail\n", cap,
+ mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
mdsc->caps_use_count--;
/*
* Keep some preallocated caps around (ceph_min_count), to
@@ -430,7 +438,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
*
* Called with i_ceph_lock held.
*/
-static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
+struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
{
struct ceph_cap *cap;
struct rb_node *n = ci->i_caps.rb_node;
@@ -489,11 +497,13 @@ static void __insert_cap_node(struct ceph_inode_info *ci,
static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
{
+ struct inode *inode = &ci->netfs.inode;
struct ceph_mount_options *opt = mdsc->fsc->mount_options;
+
ci->i_hold_caps_max = round_jiffies(jiffies +
opt->caps_wanted_delay_max * HZ);
- dout("__cap_set_timeouts %p %lu\n", &ci->vfs_inode,
- ci->i_hold_caps_max - jiffies);
+ doutc(mdsc->fsc->client, "%p %llx.%llx %lu\n", inode,
+ ceph_vinop(inode), ci->i_hold_caps_max - jiffies);
}
/*
@@ -507,8 +517,11 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
{
- dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->vfs_inode,
- ci->i_ceph_flags, ci->i_hold_caps_max);
+ struct inode *inode = &ci->netfs.inode;
+
+ doutc(mdsc->fsc->client, "%p %llx.%llx flags 0x%lx at %lu\n",
+ inode, ceph_vinop(inode), ci->i_ceph_flags,
+ ci->i_hold_caps_max);
if (!mdsc->stopping) {
spin_lock(&mdsc->cap_delay_lock);
if (!list_empty(&ci->i_cap_delay_list)) {
@@ -531,7 +544,9 @@ no_change:
static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
{
- dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
+ struct inode *inode = &ci->netfs.inode;
+
+ doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode, ceph_vinop(inode));
spin_lock(&mdsc->cap_delay_lock);
ci->i_ceph_flags |= CEPH_I_FLUSH;
if (!list_empty(&ci->i_cap_delay_list))
@@ -548,7 +563,9 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
{
- dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
+ struct inode *inode = &ci->netfs.inode;
+
+ doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode, ceph_vinop(inode));
if (list_empty(&ci->i_cap_delay_list))
return;
spin_lock(&mdsc->cap_delay_lock);
@@ -560,6 +577,9 @@ static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
unsigned issued)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+
unsigned had = __ceph_caps_issued(ci, NULL);
lockdep_assert_held(&ci->i_ceph_lock);
@@ -568,7 +588,7 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
* Each time we receive FILE_CACHE anew, we increment
* i_rdcache_gen.
*/
- if (S_ISREG(ci->vfs_inode.i_mode) &&
+ if (S_ISREG(ci->netfs.inode.i_mode) &&
(issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
(had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) {
ci->i_rdcache_gen++;
@@ -583,14 +603,14 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
if ((issued & CEPH_CAP_FILE_SHARED) != (had & CEPH_CAP_FILE_SHARED)) {
if (issued & CEPH_CAP_FILE_SHARED)
atomic_inc(&ci->i_shared_gen);
- if (S_ISDIR(ci->vfs_inode.i_mode)) {
- dout(" marking %p NOT complete\n", &ci->vfs_inode);
+ if (S_ISDIR(ci->netfs.inode.i_mode)) {
+ doutc(cl, " marking %p NOT complete\n", inode);
__ceph_dir_clear_complete(ci);
}
}
/* Wipe saved layout if we're losing DIR_CREATE caps */
- if (S_ISDIR(ci->vfs_inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) &&
+ if (S_ISDIR(ci->netfs.inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) &&
!(issued & CEPH_CAP_DIR_CREATE)) {
ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
@@ -602,8 +622,8 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
* @ci: inode to be moved
* @session: new auth caps session
*/
-static void change_auth_cap_ses(struct ceph_inode_info *ci,
- struct ceph_mds_session *session)
+void change_auth_cap_ses(struct ceph_inode_info *ci,
+ struct ceph_mds_session *session)
{
lockdep_assert_held(&ci->i_ceph_lock);
@@ -633,7 +653,8 @@ void ceph_add_cap(struct inode *inode,
unsigned seq, unsigned mseq, u64 realmino, int flags,
struct ceph_cap **new_cap)
{
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap *cap;
int mds = session->s_mds;
@@ -642,8 +663,9 @@ void ceph_add_cap(struct inode *inode,
lockdep_assert_held(&ci->i_ceph_lock);
- dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
- session->s_mds, cap_id, ceph_cap_string(issued), seq);
+ doutc(cl, "%p %llx.%llx mds%d cap %llx %s seq %d\n", inode,
+ ceph_vinop(inode), session->s_mds, cap_id,
+ ceph_cap_string(issued), seq);
gen = atomic_read(&session->s_cap_gen);
@@ -721,9 +743,9 @@ void ceph_add_cap(struct inode *inode,
actual_wanted = __ceph_caps_wanted(ci);
if ((wanted & ~actual_wanted) ||
(issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
- dout(" issued %s, mds wanted %s, actual %s, queueing\n",
- ceph_cap_string(issued), ceph_cap_string(wanted),
- ceph_cap_string(actual_wanted));
+ doutc(cl, "issued %s, mds wanted %s, actual %s, queueing\n",
+ ceph_cap_string(issued), ceph_cap_string(wanted),
+ ceph_cap_string(actual_wanted));
__cap_delay_requeue(mdsc, ci);
}
@@ -740,9 +762,9 @@ void ceph_add_cap(struct inode *inode,
WARN_ON(ci->i_auth_cap == cap);
}
- dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
- inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
- ceph_cap_string(issued|cap->issued), seq, mds);
+ doutc(cl, "inode %p %llx.%llx cap %p %s now %s seq %d mds%d\n",
+ inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
+ ceph_cap_string(issued|cap->issued), seq, mds);
cap->cap_id = cap_id;
cap->issued = issued;
cap->implemented |= issued;
@@ -754,6 +776,7 @@ void ceph_add_cap(struct inode *inode,
cap->issue_seq = seq;
cap->mseq = mseq;
cap->cap_gen = gen;
+ wake_up_all(&ci->i_cap_wq);
}
/*
@@ -763,6 +786,8 @@ void ceph_add_cap(struct inode *inode,
*/
static int __cap_is_valid(struct ceph_cap *cap)
{
+ struct inode *inode = &cap->ci->netfs.inode;
+ struct ceph_client *cl = cap->session->s_mdsc->fsc->client;
unsigned long ttl;
u32 gen;
@@ -770,9 +795,9 @@ static int __cap_is_valid(struct ceph_cap *cap)
ttl = cap->session->s_cap_ttl;
if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
- dout("__cap_is_valid %p cap %p issued %s "
- "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
- cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
+ doutc(cl, "%p %llx.%llx cap %p issued %s but STALE (gen %u vs %u)\n",
+ inode, ceph_vinop(inode), cap,
+ ceph_cap_string(cap->issued), cap->cap_gen, gen);
return 0;
}
@@ -786,6 +811,8 @@ static int __cap_is_valid(struct ceph_cap *cap)
*/
int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int have = ci->i_snap_caps;
struct ceph_cap *cap;
struct rb_node *p;
@@ -796,8 +823,8 @@ int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
cap = rb_entry(p, struct ceph_cap, ci_node);
if (!__cap_is_valid(cap))
continue;
- dout("__ceph_caps_issued %p cap %p issued %s\n",
- &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
+ doutc(cl, "%p %llx.%llx cap %p issued %s\n", inode,
+ ceph_vinop(inode), cap, ceph_cap_string(cap->issued));
have |= cap->issued;
if (implemented)
*implemented |= cap->implemented;
@@ -840,16 +867,18 @@ int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
*/
static void __touch_cap(struct ceph_cap *cap)
{
+ struct inode *inode = &cap->ci->netfs.inode;
struct ceph_mds_session *s = cap->session;
+ struct ceph_client *cl = s->s_mdsc->fsc->client;
spin_lock(&s->s_cap_lock);
if (!s->s_cap_iterator) {
- dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
- s->s_mds);
+ doutc(cl, "%p %llx.%llx cap %p mds%d\n", inode,
+ ceph_vinop(inode), cap, s->s_mds);
list_move_tail(&cap->session_caps, &s->s_caps);
} else {
- dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
- &cap->ci->vfs_inode, cap, s->s_mds);
+ doutc(cl, "%p %llx.%llx cap %p mds%d NOP, iterating over caps\n",
+ inode, ceph_vinop(inode), cap, s->s_mds);
}
spin_unlock(&s->s_cap_lock);
}
@@ -861,15 +890,16 @@ static void __touch_cap(struct ceph_cap *cap)
*/
int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_cap *cap;
struct rb_node *p;
int have = ci->i_snap_caps;
if ((have & mask) == mask) {
- dout("__ceph_caps_issued_mask ino 0x%llx snap issued %s"
- " (mask %s)\n", ceph_ino(&ci->vfs_inode),
- ceph_cap_string(have),
- ceph_cap_string(mask));
+ doutc(cl, "mask %p %llx.%llx snap issued %s (mask %s)\n",
+ inode, ceph_vinop(inode), ceph_cap_string(have),
+ ceph_cap_string(mask));
return 1;
}
@@ -878,10 +908,10 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
if (!__cap_is_valid(cap))
continue;
if ((cap->issued & mask) == mask) {
- dout("__ceph_caps_issued_mask ino 0x%llx cap %p issued %s"
- " (mask %s)\n", ceph_ino(&ci->vfs_inode), cap,
- ceph_cap_string(cap->issued),
- ceph_cap_string(mask));
+ doutc(cl, "mask %p %llx.%llx cap %p issued %s (mask %s)\n",
+ inode, ceph_vinop(inode), cap,
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(mask));
if (touch)
__touch_cap(cap);
return 1;
@@ -890,10 +920,10 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
/* does a combination of caps satisfy mask? */
have |= cap->issued;
if ((have & mask) == mask) {
- dout("__ceph_caps_issued_mask ino 0x%llx combo issued %s"
- " (mask %s)\n", ceph_ino(&ci->vfs_inode),
- ceph_cap_string(cap->issued),
- ceph_cap_string(mask));
+ doutc(cl, "mask %p %llx.%llx combo issued %s (mask %s)\n",
+ inode, ceph_vinop(inode),
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(mask));
if (touch) {
struct rb_node *q;
@@ -919,7 +949,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
int touch)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
int r;
r = __ceph_caps_issued_mask(ci, mask, touch);
@@ -948,19 +978,6 @@ int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
return 0;
}
-int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
-{
- struct inode *inode = &ci->vfs_inode;
- int ret;
-
- spin_lock(&ci->i_ceph_lock);
- ret = __ceph_caps_revoking_other(ci, NULL, mask);
- spin_unlock(&ci->i_ceph_lock);
- dout("ceph_caps_revoking %p %s = %d\n", inode,
- ceph_cap_string(mask), ret);
- return ret;
-}
-
int __ceph_caps_used(struct ceph_inode_info *ci)
{
int used = 0;
@@ -969,8 +986,8 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
if (ci->i_rd_ref)
used |= CEPH_CAP_FILE_RD;
if (ci->i_rdcache_ref ||
- (S_ISREG(ci->vfs_inode.i_mode) &&
- ci->vfs_inode.i_data.nrpages))
+ (S_ISREG(ci->netfs.inode.i_mode) &&
+ ci->netfs.inode.i_data.nrpages))
used |= CEPH_CAP_FILE_CACHE;
if (ci->i_wr_ref)
used |= CEPH_CAP_FILE_WR;
@@ -993,11 +1010,11 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
const int WR_SHIFT = ffs(CEPH_FILE_MODE_WR);
const int LAZY_SHIFT = ffs(CEPH_FILE_MODE_LAZY);
struct ceph_mount_options *opt =
- ceph_inode_to_client(&ci->vfs_inode)->mount_options;
+ ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
unsigned long used_cutoff = jiffies - opt->caps_wanted_delay_max * HZ;
unsigned long idle_cutoff = jiffies - opt->caps_wanted_delay_min * HZ;
- if (S_ISDIR(ci->vfs_inode.i_mode)) {
+ if (S_ISDIR(ci->netfs.inode.i_mode)) {
int want = 0;
/* use used_cutoff here, to keep dir's wanted caps longer */
@@ -1050,7 +1067,7 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
int __ceph_caps_wanted(struct ceph_inode_info *ci)
{
int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci);
- if (S_ISDIR(ci->vfs_inode.i_mode)) {
+ if (S_ISDIR(ci->netfs.inode.i_mode)) {
/* we want EXCL if holding caps of dir ops */
if (w & CEPH_CAP_ANY_DIR_OPS)
w |= CEPH_CAP_FILE_EXCL;
@@ -1104,21 +1121,23 @@ int ceph_is_any_caps(struct inode *inode)
void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
{
struct ceph_mds_session *session = cap->session;
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
struct ceph_inode_info *ci = cap->ci;
+ struct inode *inode = &ci->netfs.inode;
struct ceph_mds_client *mdsc;
int removed = 0;
/* 'ci' being NULL means the remove have already occurred */
if (!ci) {
- dout("%s: cap inode is NULL\n", __func__);
+ doutc(cl, "inode is NULL\n");
return;
}
lockdep_assert_held(&ci->i_ceph_lock);
- dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
+ doutc(cl, "%p from %p %llx.%llx\n", cap, inode, ceph_vinop(inode));
- mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc;
+ mdsc = ceph_inode_to_fs_client(&ci->netfs.inode)->mdsc;
/* remove from inode's cap rbtree, and clear auth cap */
rb_erase(&cap->ci_node, &ci->i_caps);
@@ -1129,8 +1148,8 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
spin_lock(&session->s_cap_lock);
if (session->s_cap_iterator == cap) {
/* not yet, we are iterating over this very cap */
- dout("__ceph_remove_cap delaying %p removal from session %p\n",
- cap, cap->session);
+ doutc(cl, "delaying %p removal from session %p\n", cap,
+ cap->session);
} else {
list_del_init(&cap->session_caps);
session->s_nr_caps--;
@@ -1169,30 +1188,31 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
* keep i_snap_realm.
*/
if (ci->i_wr_ref == 0 && ci->i_snap_realm)
- ceph_change_snap_realm(&ci->vfs_inode, NULL);
+ ceph_change_snap_realm(&ci->netfs.inode, NULL);
__cap_delay_cancel(mdsc, ci);
}
}
-void ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
+void ceph_remove_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
+ bool queue_release)
{
struct ceph_inode_info *ci = cap->ci;
struct ceph_fs_client *fsc;
/* 'ci' being NULL means the remove have already occurred */
if (!ci) {
- dout("%s: cap inode is NULL\n", __func__);
+ doutc(mdsc->fsc->client, "inode is NULL\n");
return;
}
lockdep_assert_held(&ci->i_ceph_lock);
- fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
WARN_ON_ONCE(ci->i_auth_cap == cap &&
!list_empty(&ci->i_dirty_item) &&
!fsc->blocklisted &&
- READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN);
+ !ceph_inode_is_shutdown(&ci->netfs.inode));
__ceph_remove_cap(cap, queue_release);
}
@@ -1214,31 +1234,31 @@ struct cap_msg_args {
umode_t mode;
bool inline_data;
bool wake;
+ bool encrypted;
+ u32 fscrypt_auth_len;
+ u8 fscrypt_auth[sizeof(struct ceph_fscrypt_auth)]; // for context
};
-/*
- * cap struct size + flock buffer size + inline version + inline data size +
- * osd_epoch_barrier + oldest_flush_tid
- */
-#define CAP_MSG_SIZE (sizeof(struct ceph_mds_caps) + \
- 4 + 8 + 4 + 4 + 8 + 4 + 4 + 4 + 8 + 8 + 4)
-
/* Marshal up the cap msg to the MDS */
static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
{
struct ceph_mds_caps *fc;
void *p;
- struct ceph_osd_client *osdc = &arg->session->s_mdsc->fsc->client->osdc;
-
- dout("%s %s %llx %llx caps %s wanted %s dirty %s seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu xattr_ver %llu xattr_len %d\n",
- __func__, ceph_cap_op_name(arg->op), arg->cid, arg->ino,
- ceph_cap_string(arg->caps), ceph_cap_string(arg->wanted),
- ceph_cap_string(arg->dirty), arg->seq, arg->issue_seq,
- arg->flush_tid, arg->oldest_flush_tid, arg->mseq, arg->follows,
- arg->size, arg->max_size, arg->xattr_version,
- arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0);
-
- msg->hdr.version = cpu_to_le16(10);
+ struct ceph_mds_client *mdsc = arg->session->s_mdsc;
+ struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
+
+ doutc(mdsc->fsc->client,
+ "%s %llx %llx caps %s wanted %s dirty %s seq %u/%u"
+ " tid %llu/%llu mseq %u follows %lld size %llu/%llu"
+ " xattr_ver %llu xattr_len %d\n",
+ ceph_cap_op_name(arg->op), arg->cid, arg->ino,
+ ceph_cap_string(arg->caps), ceph_cap_string(arg->wanted),
+ ceph_cap_string(arg->dirty), arg->seq, arg->issue_seq,
+ arg->flush_tid, arg->oldest_flush_tid, arg->mseq, arg->follows,
+ arg->size, arg->max_size, arg->xattr_version,
+ arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0);
+
+ msg->hdr.version = cpu_to_le16(12);
msg->hdr.tid = cpu_to_le64(arg->flush_tid);
fc = msg->front.iov_base;
@@ -1255,7 +1275,13 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
fc->ino = cpu_to_le64(arg->ino);
fc->snap_follows = cpu_to_le64(arg->follows);
- fc->size = cpu_to_le64(arg->size);
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ if (arg->encrypted)
+ fc->size = cpu_to_le64(round_up(arg->size,
+ CEPH_FSCRYPT_BLOCK_SIZE));
+ else
+#endif
+ fc->size = cpu_to_le64(arg->size);
fc->max_size = cpu_to_le64(arg->max_size);
ceph_encode_timespec64(&fc->mtime, &arg->mtime);
ceph_encode_timespec64(&fc->atime, &arg->atime);
@@ -1309,6 +1335,27 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
/* Advisory flags (version 10) */
ceph_encode_32(&p, arg->flags);
+
+ /* dirstats (version 11) - these are r/o on the client */
+ ceph_encode_64(&p, 0);
+ ceph_encode_64(&p, 0);
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ /*
+ * fscrypt_auth and fscrypt_file (version 12)
+ *
+ * fscrypt_auth holds the crypto context (if any). fscrypt_file
+ * tracks the real i_size as an __le64 field (and we use a rounded-up
+ * i_size in the traditional size field).
+ */
+ ceph_encode_32(&p, arg->fscrypt_auth_len);
+ ceph_encode_copy(&p, arg->fscrypt_auth, arg->fscrypt_auth_len);
+ ceph_encode_32(&p, sizeof(__le64));
+ ceph_encode_64(&p, arg->size);
+#else /* CONFIG_FS_ENCRYPTION */
+ ceph_encode_32(&p, 0);
+ ceph_encode_32(&p, 0);
+#endif /* CONFIG_FS_ENCRYPTION */
}
/*
@@ -1316,6 +1363,8 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
*/
void __ceph_remove_caps(struct ceph_inode_info *ci)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
struct rb_node *p;
/* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
@@ -1325,7 +1374,7 @@ void __ceph_remove_caps(struct ceph_inode_info *ci)
while (p) {
struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
p = rb_next(p);
- ceph_remove_cap(cap, true);
+ ceph_remove_cap(mdsc, cap, true);
}
spin_unlock(&ci->i_ceph_lock);
}
@@ -1343,7 +1392,8 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
int flushing, u64 flush_tid, u64 oldest_flush_tid)
{
struct ceph_inode_info *ci = cap->ci;
- struct inode *inode = &ci->vfs_inode;
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int held, revoking;
lockdep_assert_held(&ci->i_ceph_lock);
@@ -1352,10 +1402,10 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
revoking = cap->implemented & ~cap->issued;
retain &= ~revoking;
- dout("%s %p cap %p session %p %s -> %s (revoking %s)\n",
- __func__, inode, cap, cap->session,
- ceph_cap_string(held), ceph_cap_string(held & retain),
- ceph_cap_string(revoking));
+ doutc(cl, "%p %llx.%llx cap %p session %p %s -> %s (revoking %s)\n",
+ inode, ceph_vinop(inode), cap, cap->session,
+ ceph_cap_string(held), ceph_cap_string(held & retain),
+ ceph_cap_string(revoking));
BUG_ON((retain & CEPH_CAP_PIN) == 0);
ci->i_ceph_flags &= ~CEPH_I_FLUSH;
@@ -1376,7 +1426,6 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
arg->follows = flushing ? ci->i_head_snapc->seq : 0;
arg->flush_tid = flush_tid;
arg->oldest_flush_tid = oldest_flush_tid;
-
arg->size = i_size_read(inode);
ci->i_reported_size = arg->size;
arg->max_size = ci->i_wanted_max_size;
@@ -1390,15 +1439,15 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
if (flushing & CEPH_CAP_XATTR_EXCL) {
arg->old_xattr_buf = __ceph_build_xattrs_blob(ci);
arg->xattr_version = ci->i_xattrs.version;
- arg->xattr_buf = ci->i_xattrs.blob;
+ arg->xattr_buf = ceph_buffer_get(ci->i_xattrs.blob);
} else {
arg->xattr_buf = NULL;
arg->old_xattr_buf = NULL;
}
- arg->mtime = inode->i_mtime;
- arg->atime = inode->i_atime;
- arg->ctime = inode->i_ctime;
+ arg->mtime = inode_get_mtime(inode);
+ arg->atime = inode_get_atime(inode);
+ arg->ctime = inode_get_ctime(inode);
arg->btime = ci->i_btime;
arg->change_attr = inode_peek_iversion_raw(inode);
@@ -1430,7 +1479,38 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
}
}
arg->flags = flags;
+ arg->encrypted = IS_ENCRYPTED(inode);
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ if (ci->fscrypt_auth_len &&
+ WARN_ON_ONCE(ci->fscrypt_auth_len > sizeof(struct ceph_fscrypt_auth))) {
+ /* Don't set this if it's too big */
+ arg->fscrypt_auth_len = 0;
+ } else {
+ arg->fscrypt_auth_len = ci->fscrypt_auth_len;
+ memcpy(arg->fscrypt_auth, ci->fscrypt_auth,
+ min_t(size_t, ci->fscrypt_auth_len,
+ sizeof(arg->fscrypt_auth)));
+ }
+#endif /* CONFIG_FS_ENCRYPTION */
+}
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+#define CAP_MSG_FIXED_FIELDS (sizeof(struct ceph_mds_caps) + \
+ 4 + 8 + 4 + 4 + 8 + 4 + 4 + 4 + 8 + 8 + 4 + 8 + 8 + 4 + 4 + 8)
+
+static inline int cap_msg_size(struct cap_msg_args *arg)
+{
+ return CAP_MSG_FIXED_FIELDS + arg->fscrypt_auth_len;
}
+#else
+#define CAP_MSG_FIXED_FIELDS (sizeof(struct ceph_mds_caps) + \
+ 4 + 8 + 4 + 4 + 8 + 4 + 4 + 4 + 8 + 8 + 4 + 8 + 8 + 4 + 4)
+
+static inline int cap_msg_size(struct cap_msg_args *arg)
+{
+ return CAP_MSG_FIXED_FIELDS;
+}
+#endif /* CONFIG_FS_ENCRYPTION */
/*
* Send a cap msg on the given inode.
@@ -1440,13 +1520,17 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci)
{
struct ceph_msg *msg;
- struct inode *inode = &ci->vfs_inode;
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
- msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, CAP_MSG_SIZE, GFP_NOFS, false);
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, cap_msg_size(arg), GFP_NOFS,
+ false);
if (!msg) {
- pr_err("error allocating cap msg: ino (%llx.%llx) flushing %s tid %llu, requeuing cap.\n",
- ceph_vinop(inode), ceph_cap_string(arg->dirty),
- arg->flush_tid);
+ pr_err_client(cl,
+ "error allocating cap msg: ino (%llx.%llx)"
+ " flushing %s tid %llu, requeuing cap.\n",
+ ceph_vinop(inode), ceph_cap_string(arg->dirty),
+ arg->flush_tid);
spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(arg->session->s_mdsc, ci);
spin_unlock(&ci->i_ceph_lock);
@@ -1456,6 +1540,7 @@ static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci)
encode_cap_msg(msg, arg);
ceph_con_send(&arg->session->s_con, msg);
ceph_buffer_put(arg->old_xattr_buf);
+ ceph_buffer_put(arg->xattr_buf);
if (arg->wake)
wake_up_all(&ci->i_cap_wq);
}
@@ -1468,10 +1553,6 @@ static inline int __send_flush_snap(struct inode *inode,
struct cap_msg_args arg;
struct ceph_msg *msg;
- msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, CAP_MSG_SIZE, GFP_NOFS, false);
- if (!msg)
- return -ENOMEM;
-
arg.session = session;
arg.ino = ceph_vino(inode).ino;
arg.cid = 0;
@@ -1508,6 +1589,15 @@ static inline int __send_flush_snap(struct inode *inode,
arg.inline_data = capsnap->inline_data;
arg.flags = 0;
arg.wake = false;
+ arg.encrypted = IS_ENCRYPTED(inode);
+
+ /* No fscrypt_auth changes from a capsnap.*/
+ arg.fscrypt_auth_len = 0;
+
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, cap_msg_size(&arg),
+ GFP_NOFS, false);
+ if (!msg)
+ return -ENOMEM;
encode_cap_msg(msg, &arg);
ceph_con_send(&arg.session->s_con, msg);
@@ -1528,13 +1618,15 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
__releases(ci->i_ceph_lock)
__acquires(ci->i_ceph_lock)
{
- struct inode *inode = &ci->vfs_inode;
+ struct inode *inode = &ci->netfs.inode;
struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_cap_snap *capsnap;
u64 oldest_flush_tid = 0;
u64 first_tid = 1, last_tid = 0;
- dout("__flush_snaps %p session %p\n", inode, session);
+ doutc(cl, "%p %llx.%llx session %p\n", inode, ceph_vinop(inode),
+ session);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
/*
@@ -1549,7 +1641,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
/* only flush each capsnap once */
if (capsnap->cap_flush.tid > 0) {
- dout(" already flushed %p, skipping\n", capsnap);
+ doutc(cl, "already flushed %p, skipping\n", capsnap);
continue;
}
@@ -1577,18 +1669,19 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
while (first_tid <= last_tid) {
struct ceph_cap *cap = ci->i_auth_cap;
- struct ceph_cap_flush *cf;
+ struct ceph_cap_flush *cf = NULL, *iter;
int ret;
if (!(cap && cap->session == session)) {
- dout("__flush_snaps %p auth cap %p not mds%d, "
- "stop\n", inode, cap, session->s_mds);
+ doutc(cl, "%p %llx.%llx auth cap %p not mds%d, stop\n",
+ inode, ceph_vinop(inode), cap, session->s_mds);
break;
}
ret = -ENOENT;
- list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
- if (cf->tid >= first_tid) {
+ list_for_each_entry(iter, &ci->i_cap_flush_list, i_list) {
+ if (iter->tid >= first_tid) {
+ cf = iter;
ret = 0;
break;
}
@@ -1602,15 +1695,17 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
refcount_inc(&capsnap->nref);
spin_unlock(&ci->i_ceph_lock);
- dout("__flush_snaps %p capsnap %p tid %llu %s\n",
- inode, capsnap, cf->tid, ceph_cap_string(capsnap->dirty));
+ doutc(cl, "%p %llx.%llx capsnap %p tid %llu %s\n", inode,
+ ceph_vinop(inode), capsnap, cf->tid,
+ ceph_cap_string(capsnap->dirty));
ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
oldest_flush_tid);
if (ret < 0) {
- pr_err("__flush_snaps: error sending cap flushsnap, "
- "ino (%llx.%llx) tid %llu follows %llu\n",
- ceph_vinop(inode), cf->tid, capsnap->follows);
+ pr_err_client(cl, "error sending cap flushsnap, "
+ "ino (%llx.%llx) tid %llu follows %llu\n",
+ ceph_vinop(inode), cf->tid,
+ capsnap->follows);
}
ceph_put_cap_snap(capsnap);
@@ -1621,28 +1716,30 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
void ceph_flush_snaps(struct ceph_inode_info *ci,
struct ceph_mds_session **psession)
{
- struct inode *inode = &ci->vfs_inode;
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_mds_session *session = NULL;
+ bool need_put = false;
int mds;
- dout("ceph_flush_snaps %p\n", inode);
+ doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
if (psession)
session = *psession;
retry:
spin_lock(&ci->i_ceph_lock);
if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
- dout(" no capsnap needs flush, doing nothing\n");
+ doutc(cl, " no capsnap needs flush, doing nothing\n");
goto out;
}
if (!ci->i_auth_cap) {
- dout(" no auth cap (migrating?), doing nothing\n");
+ doutc(cl, " no auth cap (migrating?), doing nothing\n");
goto out;
}
mds = ci->i_auth_cap->session->s_mds;
if (session && session->s_mds != mds) {
- dout(" oops, wrong session %p mutex\n", session);
+ doutc(cl, " oops, wrong session %p mutex\n", session);
ceph_put_mds_session(session);
session = NULL;
}
@@ -1668,8 +1765,13 @@ out:
ceph_put_mds_session(session);
/* we flushed them all; remove this inode from the queue */
spin_lock(&mdsc->snap_flush_lock);
+ if (!list_empty(&ci->i_snap_flush_item))
+ need_put = true;
list_del_init(&ci->i_snap_flush_item);
spin_unlock(&mdsc->snap_flush_lock);
+
+ if (need_put)
+ iput(inode);
}
/*
@@ -1681,23 +1783,25 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
struct ceph_cap_flush **pcf)
{
struct ceph_mds_client *mdsc =
- ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
- struct inode *inode = &ci->vfs_inode;
+ ceph_sb_to_fs_client(ci->netfs.inode.i_sb)->mdsc;
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int was = ci->i_dirty_caps;
int dirty = 0;
lockdep_assert_held(&ci->i_ceph_lock);
if (!ci->i_auth_cap) {
- pr_warn("__mark_dirty_caps %p %llx mask %s, "
- "but no auth cap (session was closed?)\n",
- inode, ceph_ino(inode), ceph_cap_string(mask));
+ pr_warn_client(cl, "%p %llx.%llx mask %s, "
+ "but no auth cap (session was closed?)\n",
+ inode, ceph_vinop(inode),
+ ceph_cap_string(mask));
return 0;
}
- dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
- ceph_cap_string(mask), ceph_cap_string(was),
- ceph_cap_string(was | mask));
+ doutc(cl, "%p %llx.%llx %s dirty %s -> %s\n", inode,
+ ceph_vinop(inode), ceph_cap_string(mask),
+ ceph_cap_string(was), ceph_cap_string(was | mask));
ci->i_dirty_caps |= mask;
if (was == 0) {
struct ceph_mds_session *session = ci->i_auth_cap->session;
@@ -1710,8 +1814,9 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
ci->i_head_snapc = ceph_get_snap_context(
ci->i_snap_realm->cached_context);
}
- dout(" inode %p now dirty snapc %p auth cap %p\n",
- &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
+ doutc(cl, "%p %llx.%llx now dirty snapc %p auth cap %p\n",
+ inode, ceph_vinop(inode), ci->i_head_snapc,
+ ci->i_auth_cap);
BUG_ON(!list_empty(&ci->i_dirty_item));
spin_lock(&mdsc->cap_dirty_lock);
list_add(&ci->i_dirty_item, &session->s_cap_dirty);
@@ -1804,7 +1909,8 @@ static u64 __mark_caps_flushing(struct inode *inode,
struct ceph_mds_session *session, bool wake,
u64 *oldest_flush_tid)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap_flush *cf = NULL;
int flushing;
@@ -1815,13 +1921,13 @@ static u64 __mark_caps_flushing(struct inode *inode,
BUG_ON(!ci->i_prealloc_cap_flush);
flushing = ci->i_dirty_caps;
- dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
- ceph_cap_string(flushing),
- ceph_cap_string(ci->i_flushing_caps),
- ceph_cap_string(ci->i_flushing_caps | flushing));
+ doutc(cl, "flushing %s, flushing_caps %s -> %s\n",
+ ceph_cap_string(flushing),
+ ceph_cap_string(ci->i_flushing_caps),
+ ceph_cap_string(ci->i_flushing_caps | flushing));
ci->i_flushing_caps |= flushing;
ci->i_dirty_caps = 0;
- dout(" inode %p now !dirty\n", inode);
+ doutc(cl, "%p %llx.%llx now !dirty\n", inode, ceph_vinop(inode));
swap(cf, ci->i_prealloc_cap_flush);
cf->caps = flushing;
@@ -1852,29 +1958,31 @@ static int try_nonblocking_invalidate(struct inode *inode)
__releases(ci->i_ceph_lock)
__acquires(ci->i_ceph_lock)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
u32 invalidating_gen = ci->i_rdcache_gen;
spin_unlock(&ci->i_ceph_lock);
- ceph_fscache_invalidate(inode);
+ ceph_fscache_invalidate(inode, false);
invalidate_mapping_pages(&inode->i_data, 0, -1);
spin_lock(&ci->i_ceph_lock);
if (inode->i_data.nrpages == 0 &&
invalidating_gen == ci->i_rdcache_gen) {
/* success. */
- dout("try_nonblocking_invalidate %p success\n", inode);
+ doutc(cl, "%p %llx.%llx success\n", inode,
+ ceph_vinop(inode));
/* save any racing async invalidate some trouble */
ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
return 0;
}
- dout("try_nonblocking_invalidate %p failed\n", inode);
+ doutc(cl, "%p %llx.%llx failed\n", inode, ceph_vinop(inode));
return -1;
}
bool __ceph_should_report_size(struct ceph_inode_info *ci)
{
- loff_t size = i_size_read(&ci->vfs_inode);
+ loff_t size = i_size_read(&ci->netfs.inode);
/* mds will adjust max size according to the reported size */
if (ci->i_flushing_caps & CEPH_CAP_FILE_WR)
return false;
@@ -1895,12 +2003,14 @@ bool __ceph_should_report_size(struct ceph_inode_info *ci)
* CHECK_CAPS_AUTHONLY - we should only check the auth cap
* CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
* further delay.
+ * CHECK_CAPS_FLUSH_FORCE - we should flush any caps immediately, without
+ * further delay.
*/
-void ceph_check_caps(struct ceph_inode_info *ci, int flags,
- struct ceph_mds_session *session)
+void ceph_check_caps(struct ceph_inode_info *ci, int flags)
{
- struct inode *inode = &ci->vfs_inode;
+ struct inode *inode = &ci->netfs.inode;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_cap *cap;
u64 flush_tid, oldest_flush_tid;
int file_wanted, used, cap_used;
@@ -1910,11 +2020,18 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct rb_node *p;
bool queue_invalidate = false;
bool tried_invalidate = false;
-
- if (session)
- ceph_get_mds_session(session);
+ bool queue_writeback = false;
+ struct ceph_mds_session *session = NULL;
spin_lock(&ci->i_ceph_lock);
+ if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
+ ci->i_ceph_flags |= CEPH_I_ASYNC_CHECK_CAPS;
+
+ /* Don't send messages until we get async create reply */
+ spin_unlock(&ci->i_ceph_lock);
+ return;
+ }
+
if (ci->i_ceph_flags & CEPH_I_FLUSH)
flags |= CHECK_CAPS_FLUSH;
retry:
@@ -1968,15 +2085,17 @@ retry:
}
}
- dout("check_caps %p file_want %s used %s dirty %s flushing %s"
- " issued %s revoking %s retain %s %s%s\n", inode,
- ceph_cap_string(file_wanted),
+ doutc(cl, "%p %llx.%llx file_want %s used %s dirty %s "
+ "flushing %s issued %s revoking %s retain %s %s%s%s%s\n",
+ inode, ceph_vinop(inode), ceph_cap_string(file_wanted),
ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
ceph_cap_string(ci->i_flushing_caps),
ceph_cap_string(issued), ceph_cap_string(revoking),
ceph_cap_string(retain),
(flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
- (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
+ (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "",
+ (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : "",
+ (flags & CHECK_CAPS_FLUSH_FORCE) ? " FLUSH_FORCE" : "");
/*
* If we no longer need to hold onto old our caps, and we may
@@ -1990,9 +2109,10 @@ retry:
(revoking & (CEPH_CAP_FILE_CACHE|
CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */
!tried_invalidate) {
- dout("check_caps trying to invalidate on %p\n", inode);
+ doutc(cl, "trying to invalidate on %p %llx.%llx\n",
+ inode, ceph_vinop(inode));
if (try_nonblocking_invalidate(inode) < 0) {
- dout("check_caps queuing invalidate\n");
+ doutc(cl, "queuing invalidate\n");
queue_invalidate = true;
ci->i_rdcache_revoking = ci->i_rdcache_gen;
}
@@ -2020,46 +2140,68 @@ retry:
cap_used &= ~ci->i_auth_cap->issued;
revoking = cap->implemented & ~cap->issued;
- dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
- cap->mds, cap, ceph_cap_string(cap_used),
- ceph_cap_string(cap->issued),
- ceph_cap_string(cap->implemented),
- ceph_cap_string(revoking));
+ doutc(cl, " mds%d cap %p used %s issued %s implemented %s revoking %s\n",
+ cap->mds, cap, ceph_cap_string(cap_used),
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(cap->implemented),
+ ceph_cap_string(revoking));
+
+ /* completed revocation? going down and there are no caps? */
+ if (revoking) {
+ if ((revoking & cap_used) == 0) {
+ doutc(cl, "completed revocation of %s\n",
+ ceph_cap_string(cap->implemented & ~cap->issued));
+ goto ack;
+ }
+
+ /*
+ * If the "i_wrbuffer_ref" was increased by mmap or generic
+ * cache write just before the ceph_check_caps() is called,
+ * the Fb capability revoking will fail this time. Then we
+ * must wait for the BDI's delayed work to flush the dirty
+ * pages and to release the "i_wrbuffer_ref", which will cost
+ * at most 5 seconds. That means the MDS needs to wait at
+ * most 5 seconds to finished the Fb capability's revocation.
+ *
+ * Let's queue a writeback for it.
+ */
+ if (S_ISREG(inode->i_mode) && ci->i_wrbuffer_ref &&
+ (revoking & CEPH_CAP_FILE_BUFFER))
+ queue_writeback = true;
+ }
+
+ if (flags & CHECK_CAPS_FLUSH_FORCE) {
+ doutc(cl, "force to flush caps\n");
+ goto ack;
+ }
if (cap == ci->i_auth_cap &&
(cap->issued & CEPH_CAP_FILE_WR)) {
/* request larger max_size from MDS? */
if (ci->i_wanted_max_size > ci->i_max_size &&
ci->i_wanted_max_size > ci->i_requested_max_size) {
- dout("requesting new max_size\n");
+ doutc(cl, "requesting new max_size\n");
goto ack;
}
/* approaching file_max? */
if (__ceph_should_report_size(ci)) {
- dout("i_size approaching max_size\n");
+ doutc(cl, "i_size approaching max_size\n");
goto ack;
}
}
/* flush anything dirty? */
if (cap == ci->i_auth_cap) {
if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) {
- dout("flushing dirty caps\n");
+ doutc(cl, "flushing dirty caps\n");
goto ack;
}
if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) {
- dout("flushing snap caps\n");
+ doutc(cl, "flushing snap caps\n");
goto ack;
}
}
- /* completed revocation? going down and there are no caps? */
- if (revoking && (revoking & cap_used) == 0) {
- dout("completed revocation of %s\n",
- ceph_cap_string(cap->implemented & ~cap->issued));
- goto ack;
- }
-
/* want more caps from mds? */
if (want & ~cap->mds_wanted) {
if (want & ~(cap->mds_wanted | cap->issued))
@@ -2127,6 +2269,8 @@ ack:
spin_unlock(&ci->i_ceph_lock);
ceph_put_mds_session(session);
+ if (queue_writeback)
+ ceph_queue_writeback(inode);
if (queue_invalidate)
ceph_queue_invalidate(inode);
}
@@ -2136,7 +2280,7 @@ ack:
*/
static int try_flush_caps(struct inode *inode, u64 *ptid)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
int flushing = 0;
u64 flush_tid = 0, oldest_flush_tid = 0;
@@ -2210,11 +2354,12 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid)
}
/*
- * wait for any unsafe requests to complete.
+ * flush the mdlog and wait for any unsafe requests to complete.
*/
-static int unsafe_request_wait(struct inode *inode)
+static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req1 = NULL, *req2 = NULL;
int ret, err = 0;
@@ -2240,33 +2385,29 @@ static int unsafe_request_wait(struct inode *inode)
* to wait the journal logs to be flushed by the MDSes periodically.
*/
if (req1 || req2) {
- struct ceph_mds_session **sessions = NULL;
- struct ceph_mds_session *s;
struct ceph_mds_request *req;
- unsigned int max;
+ struct ceph_mds_session **sessions;
+ struct ceph_mds_session *s;
+ unsigned int max_sessions;
int i;
- /*
- * The mdsc->max_sessions is unlikely to be changed
- * mostly, here we will retry it by reallocating the
- * sessions arrary memory to get rid of the mdsc->mutex
- * lock.
- */
-retry:
- max = mdsc->max_sessions;
- sessions = krealloc(sessions, max * sizeof(s), __GFP_ZERO);
- if (!sessions)
- return -ENOMEM;
+ mutex_lock(&mdsc->mutex);
+ max_sessions = mdsc->max_sessions;
+
+ sessions = kcalloc(max_sessions, sizeof(s), GFP_KERNEL);
+ if (!sessions) {
+ mutex_unlock(&mdsc->mutex);
+ err = -ENOMEM;
+ goto out;
+ }
spin_lock(&ci->i_unsafe_lock);
if (req1) {
list_for_each_entry(req, &ci->i_unsafe_dirops,
r_unsafe_dir_item) {
s = req->r_session;
- if (unlikely(s->s_mds >= max)) {
- spin_unlock(&ci->i_unsafe_lock);
- goto retry;
- }
+ if (!s)
+ continue;
if (!sessions[s->s_mds]) {
s = ceph_get_mds_session(s);
sessions[s->s_mds] = s;
@@ -2277,10 +2418,8 @@ retry:
list_for_each_entry(req, &ci->i_unsafe_iops,
r_unsafe_target_item) {
s = req->r_session;
- if (unlikely(s->s_mds >= max)) {
- spin_unlock(&ci->i_unsafe_lock);
- goto retry;
- }
+ if (!s)
+ continue;
if (!sessions[s->s_mds]) {
s = ceph_get_mds_session(s);
sessions[s->s_mds] = s;
@@ -2292,14 +2431,15 @@ retry:
/* the auth MDS */
spin_lock(&ci->i_ceph_lock);
if (ci->i_auth_cap) {
- s = ci->i_auth_cap->session;
- if (!sessions[s->s_mds])
- sessions[s->s_mds] = ceph_get_mds_session(s);
+ s = ci->i_auth_cap->session;
+ if (!sessions[s->s_mds])
+ sessions[s->s_mds] = ceph_get_mds_session(s);
}
spin_unlock(&ci->i_ceph_lock);
+ mutex_unlock(&mdsc->mutex);
/* send flush mdlog request to MDSes */
- for (i = 0; i < max; i++) {
+ for (i = 0; i < max_sessions; i++) {
s = sessions[i];
if (s) {
send_flush_mdlog(s);
@@ -2309,22 +2449,27 @@ retry:
kfree(sessions);
}
- dout("unsafe_request_wait %p wait on tid %llu %llu\n",
- inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
+ doutc(cl, "%p %llx.%llx wait on tid %llu %llu\n", inode,
+ ceph_vinop(inode), req1 ? req1->r_tid : 0ULL,
+ req2 ? req2->r_tid : 0ULL);
if (req1) {
ret = !wait_for_completion_timeout(&req1->r_safe_completion,
ceph_timeout_jiffies(req1->r_timeout));
if (ret)
err = -EIO;
- ceph_mdsc_put_request(req1);
}
if (req2) {
ret = !wait_for_completion_timeout(&req2->r_safe_completion,
ceph_timeout_jiffies(req2->r_timeout));
if (ret)
err = -EIO;
- ceph_mdsc_put_request(req2);
}
+
+out:
+ if (req1)
+ ceph_mdsc_put_request(req1);
+ if (req2)
+ ceph_mdsc_put_request(req2);
return err;
}
@@ -2332,11 +2477,13 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct inode *inode = file->f_mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
u64 flush_tid;
int ret, err;
int dirty;
- dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
+ doutc(cl, "%p %llx.%llx%s\n", inode, ceph_vinop(inode),
+ datasync ? " datasync" : "");
ret = file_write_and_wait_range(file, start, end);
if (datasync)
@@ -2347,9 +2494,9 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
dirty = try_flush_caps(inode, &flush_tid);
- dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
+ doutc(cl, "dirty caps are %s\n", ceph_cap_string(dirty));
- err = unsafe_request_wait(inode);
+ err = flush_mdlog_and_wait_inode_unsafe_requests(inode);
/*
* only wait on non-file metadata writeback (the mds
@@ -2368,7 +2515,8 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
if (err < 0)
ret = err;
out:
- dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
+ doutc(cl, "%p %llx.%llx%s result=%d\n", inode, ceph_vinop(inode),
+ datasync ? " datasync" : "", ret);
return ret;
}
@@ -2381,20 +2529,25 @@ out:
int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
u64 flush_tid;
int err = 0;
int dirty;
int wait = (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync);
- dout("write_inode %p wait=%d\n", inode, wait);
+ doutc(cl, "%p %llx.%llx wait=%d\n", inode, ceph_vinop(inode), wait);
+ ceph_fscache_unpin_writeback(inode, wbc);
if (wait) {
+ err = ceph_wait_on_async_create(inode);
+ if (err)
+ return err;
dirty = try_flush_caps(inode, &flush_tid);
if (dirty)
err = wait_event_interruptible(ci->i_cap_wq,
caps_are_flushed(inode, flush_tid));
} else {
struct ceph_mds_client *mdsc =
- ceph_sb_to_client(inode->i_sb)->mdsc;
+ ceph_sb_to_fs_client(inode->i_sb)->mdsc;
spin_lock(&ci->i_ceph_lock);
if (__ceph_caps_dirty(ci))
@@ -2411,13 +2564,18 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
__releases(ci->i_ceph_lock)
__acquires(ci->i_ceph_lock)
{
- struct inode *inode = &ci->vfs_inode;
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_cap *cap;
struct ceph_cap_flush *cf;
int ret;
u64 first_tid = 0;
u64 last_snap_flush = 0;
+ /* Don't do anything until create reply comes in */
+ if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE)
+ return;
+
ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) {
@@ -2433,8 +2591,8 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
cap = ci->i_auth_cap;
if (!(cap && cap->session == session)) {
- pr_err("%p auth cap %p not mds%d ???\n",
- inode, cap, session->s_mds);
+ pr_err_client(cl, "%p auth cap %p not mds%d ???\n",
+ inode, cap, session->s_mds);
break;
}
@@ -2443,8 +2601,9 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
if (!cf->is_capsnap) {
struct cap_msg_args arg;
- dout("kick_flushing_caps %p cap %p tid %llu %s\n",
- inode, cap, cf->tid, ceph_cap_string(cf->caps));
+ doutc(cl, "%p %llx.%llx cap %p tid %llu %s\n",
+ inode, ceph_vinop(inode), cap, cf->tid,
+ ceph_cap_string(cf->caps));
__prep_cap(&arg, cap, CEPH_CAP_OP_FLUSH,
(cf->tid < last_snap_flush ?
CEPH_CLIENT_CAPS_PENDING_CAPSNAP : 0),
@@ -2458,9 +2617,9 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_snap *capsnap =
container_of(cf, struct ceph_cap_snap,
cap_flush);
- dout("kick_flushing_caps %p capsnap %p tid %llu %s\n",
- inode, capsnap, cf->tid,
- ceph_cap_string(capsnap->dirty));
+ doutc(cl, "%p %llx.%llx capsnap %p tid %llu %s\n",
+ inode, ceph_vinop(inode), capsnap, cf->tid,
+ ceph_cap_string(capsnap->dirty));
refcount_inc(&capsnap->nref);
spin_unlock(&ci->i_ceph_lock);
@@ -2468,11 +2627,10 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
oldest_flush_tid);
if (ret < 0) {
- pr_err("kick_flushing_caps: error sending "
- "cap flushsnap, ino (%llx.%llx) "
- "tid %llu follows %llu\n",
- ceph_vinop(inode), cf->tid,
- capsnap->follows);
+ pr_err_client(cl, "error sending cap flushsnap,"
+ " %p %llx.%llx tid %llu follows %llu\n",
+ inode, ceph_vinop(inode), cf->tid,
+ capsnap->follows);
}
ceph_put_cap_snap(capsnap);
@@ -2485,22 +2643,26 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct ceph_cap *cap;
u64 oldest_flush_tid;
- dout("early_kick_flushing_caps mds%d\n", session->s_mds);
+ doutc(cl, "mds%d\n", session->s_mds);
spin_lock(&mdsc->cap_dirty_lock);
oldest_flush_tid = __get_oldest_flush_tid(mdsc);
spin_unlock(&mdsc->cap_dirty_lock);
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
+ struct inode *inode = &ci->netfs.inode;
+
spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (!(cap && cap->session == session)) {
- pr_err("%p auth cap %p not mds%d ???\n",
- &ci->vfs_inode, cap, session->s_mds);
+ pr_err_client(cl, "%p %llx.%llx auth cap %p not mds%d ???\n",
+ inode, ceph_vinop(inode), cap,
+ session->s_mds);
spin_unlock(&ci->i_ceph_lock);
continue;
}
@@ -2533,24 +2695,28 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct ceph_cap *cap;
u64 oldest_flush_tid;
lockdep_assert_held(&session->s_mutex);
- dout("kick_flushing_caps mds%d\n", session->s_mds);
+ doutc(cl, "mds%d\n", session->s_mds);
spin_lock(&mdsc->cap_dirty_lock);
oldest_flush_tid = __get_oldest_flush_tid(mdsc);
spin_unlock(&mdsc->cap_dirty_lock);
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
+ struct inode *inode = &ci->netfs.inode;
+
spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (!(cap && cap->session == session)) {
- pr_err("%p auth cap %p not mds%d ???\n",
- &ci->vfs_inode, cap, session->s_mds);
+ pr_err_client(cl, "%p %llx.%llx auth cap %p not mds%d ???\n",
+ inode, ceph_vinop(inode), cap,
+ session->s_mds);
spin_unlock(&ci->i_ceph_lock);
continue;
}
@@ -2567,11 +2733,13 @@ void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
{
struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_cap *cap = ci->i_auth_cap;
+ struct inode *inode = &ci->netfs.inode;
lockdep_assert_held(&ci->i_ceph_lock);
- dout("%s %p flushing %s\n", __func__, &ci->vfs_inode,
- ceph_cap_string(ci->i_flushing_caps));
+ doutc(mdsc->fsc->client, "%p %llx.%llx flushing %s\n",
+ inode, ceph_vinop(inode),
+ ceph_cap_string(ci->i_flushing_caps));
if (!list_empty(&ci->i_cap_flush_list)) {
u64 oldest_flush_tid;
@@ -2593,6 +2761,9 @@ void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
bool snap_rwsem_locked)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+
lockdep_assert_held(&ci->i_ceph_lock);
if (got & CEPH_CAP_PIN)
@@ -2613,10 +2784,10 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
}
if (got & CEPH_CAP_FILE_BUFFER) {
if (ci->i_wb_ref == 0)
- ihold(&ci->vfs_inode);
+ ihold(inode);
ci->i_wb_ref++;
- dout("%s %p wb %d -> %d (?)\n", __func__,
- &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
+ doutc(cl, "%p %llx.%llx wb %d -> %d (?)\n", inode,
+ ceph_vinop(inode), ci->i_wb_ref-1, ci->i_wb_ref);
}
}
@@ -2628,10 +2799,10 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
* requested from the MDS.
*
* Returns 0 if caps were not able to be acquired (yet), 1 if succeed,
- * or a negative error code. There are 3 speical error codes:
- * -EAGAIN: need to sleep but non-blocking is specified
- * -EFBIG: ask caller to call check_max_size() and try again.
- * -ESTALE: ask caller to call ceph_renew_caps() and try again.
+ * or a negative error code. There are 3 special error codes:
+ * -EAGAIN: need to sleep but non-blocking is specified
+ * -EFBIG: ask caller to call check_max_size() and try again.
+ * -EUCLEAN: ask caller to call ceph_renew_caps() and try again.
*/
enum {
/* first 8 bits are reserved for CEPH_FILE_MODE_FOO */
@@ -2643,20 +2814,23 @@ static int try_get_cap_refs(struct inode *inode, int need, int want,
loff_t endoff, int flags, int *got)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int ret = 0;
int have, implemented;
bool snap_rwsem_locked = false;
- dout("get_cap_refs %p need %s want %s\n", inode,
- ceph_cap_string(need), ceph_cap_string(want));
+ doutc(cl, "%p %llx.%llx need %s want %s\n", inode,
+ ceph_vinop(inode), ceph_cap_string(need),
+ ceph_cap_string(want));
again:
spin_lock(&ci->i_ceph_lock);
if ((flags & CHECK_FILELOCK) &&
(ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK)) {
- dout("try_get_cap_refs %p error filelock\n", inode);
+ doutc(cl, "%p %llx.%llx error filelock\n", inode,
+ ceph_vinop(inode));
ret = -EIO;
goto out_unlock;
}
@@ -2676,10 +2850,10 @@ again:
if (have & need & CEPH_CAP_FILE_WR) {
if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
- dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
- inode, endoff, ci->i_max_size);
+ doutc(cl, "%p %llx.%llx endoff %llu > maxsize %llu\n",
+ inode, ceph_vinop(inode), endoff, ci->i_max_size);
if (endoff > ci->i_requested_max_size)
- ret = ci->i_auth_cap ? -EFBIG : -ESTALE;
+ ret = ci->i_auth_cap ? -EFBIG : -EUCLEAN;
goto out_unlock;
}
/*
@@ -2687,7 +2861,8 @@ again:
* can get a final snapshot value for size+mtime.
*/
if (__ceph_have_pending_cap_snap(ci)) {
- dout("get_cap_refs %p cap_snap_pending\n", inode);
+ doutc(cl, "%p %llx.%llx cap_snap_pending\n", inode,
+ ceph_vinop(inode));
goto out_unlock;
}
}
@@ -2698,13 +2873,17 @@ again:
* on transition from wanted -> needed caps. This is needed
* for WRBUFFER|WR -> WR to avoid a new WR sync write from
* going before a prior buffered writeback happens.
+ *
+ * For RDCACHE|RD -> RD, there is not need to wait and we can
+ * just exclude the revoking caps and force to sync read.
*/
int not = want & ~(have & need);
int revoking = implemented & ~have;
- dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
- inode, ceph_cap_string(have), ceph_cap_string(not),
- ceph_cap_string(revoking));
- if ((revoking & not) == 0) {
+ int exclude = revoking & not;
+ doutc(cl, "%p %llx.%llx have %s but not %s (revoking %s)\n",
+ inode, ceph_vinop(inode), ceph_cap_string(have),
+ ceph_cap_string(not), ceph_cap_string(revoking));
+ if (!exclude || !(exclude & CEPH_CAP_FILE_BUFFER)) {
if (!snap_rwsem_locked &&
!ci->i_head_snapc &&
(need & CEPH_CAP_FILE_WR)) {
@@ -2726,7 +2905,7 @@ again:
snap_rwsem_locked = true;
}
if ((have & want) == want)
- *got = need | want;
+ *got = need | (want & ~exclude);
else
*got = need;
ceph_take_cap_refs(ci, *got, true);
@@ -2743,28 +2922,31 @@ again:
spin_unlock(&s->s_cap_lock);
}
if (session_readonly) {
- dout("get_cap_refs %p need %s but mds%d readonly\n",
- inode, ceph_cap_string(need), ci->i_auth_cap->mds);
+ doutc(cl, "%p %llx.%llx need %s but mds%d readonly\n",
+ inode, ceph_vinop(inode), ceph_cap_string(need),
+ ci->i_auth_cap->mds);
ret = -EROFS;
goto out_unlock;
}
- if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
- dout("get_cap_refs %p forced umount\n", inode);
- ret = -EIO;
+ if (ceph_inode_is_shutdown(inode)) {
+ doutc(cl, "%p %llx.%llx inode is shutdown\n",
+ inode, ceph_vinop(inode));
+ ret = -ESTALE;
goto out_unlock;
}
mds_wanted = __ceph_caps_mds_wanted(ci, false);
if (need & ~mds_wanted) {
- dout("get_cap_refs %p need %s > mds_wanted %s\n",
- inode, ceph_cap_string(need),
- ceph_cap_string(mds_wanted));
- ret = -ESTALE;
+ doutc(cl, "%p %llx.%llx need %s > mds_wanted %s\n",
+ inode, ceph_vinop(inode), ceph_cap_string(need),
+ ceph_cap_string(mds_wanted));
+ ret = -EUCLEAN;
goto out_unlock;
}
- dout("get_cap_refs %p have %s need %s\n", inode,
- ceph_cap_string(have), ceph_cap_string(need));
+ doutc(cl, "%p %llx.%llx have %s need %s\n", inode,
+ ceph_vinop(inode), ceph_cap_string(have),
+ ceph_cap_string(need));
}
out_unlock:
@@ -2779,8 +2961,8 @@ out_unlock:
else if (ret == 1)
ceph_update_cap_hit(&mdsc->metric);
- dout("get_cap_refs %p ret %d got %s\n", inode,
- ret, ceph_cap_string(*got));
+ doutc(cl, "%p %llx.%llx ret %d got %s\n", inode,
+ ceph_vinop(inode), ret, ceph_cap_string(*got));
return ret;
}
@@ -2792,13 +2974,14 @@ out_unlock:
static void check_max_size(struct inode *inode, loff_t endoff)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int check = 0;
/* do we need to explicitly request a larger max_size? */
spin_lock(&ci->i_ceph_lock);
if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
- dout("write %p at large endoff %llu, req max_size\n",
- inode, endoff);
+ doutc(cl, "write %p %llx.%llx at large endoff %llu, req max_size\n",
+ inode, ceph_vinop(inode), endoff);
ci->i_wanted_max_size = endoff;
}
/* duplicate ceph_check_caps()'s logic */
@@ -2809,7 +2992,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
check = 1;
spin_unlock(&ci->i_ceph_lock);
if (check)
- ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
+ ceph_check_caps(ci, CHECK_CAPS_AUTHONLY);
}
static inline int get_used_fmode(int caps)
@@ -2843,7 +3026,7 @@ int ceph_try_get_caps(struct inode *inode, int need, int want,
ret = try_get_cap_refs(inode, need, want, 0, flags, got);
/* three special error codes */
- if (ret == -EAGAIN || ret == -EFBIG || ret == -ESTALE)
+ if (ret == -EAGAIN || ret == -EFBIG || ret == -EUCLEAN)
ret = 0;
return ret;
}
@@ -2853,19 +3036,18 @@ int ceph_try_get_caps(struct inode *inode, int need, int want,
* due to a small max_size, make sure we check_max_size (and possibly
* ask the mds) so we don't get hung up indefinitely.
*/
-int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got)
+int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi, int need,
+ int want, loff_t endoff, int *got)
{
- struct ceph_file_info *fi = filp->private_data;
- struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
int ret, _got, flags;
ret = ceph_pool_perm_check(inode, need);
if (ret < 0)
return ret;
- if ((fi->fmode & CEPH_FILE_MODE_WR) &&
+ if (fi && (fi->fmode & CEPH_FILE_MODE_WR) &&
fi->filp_gen != READ_ONCE(fsc->filp_gen))
return -EBADF;
@@ -2873,17 +3055,20 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
while (true) {
flags &= CEPH_FILE_MODE_MASK;
- if (atomic_read(&fi->num_locks))
+ if (vfs_inode_has_locks(inode))
flags |= CHECK_FILELOCK;
_got = 0;
ret = try_get_cap_refs(inode, need, want, endoff,
flags, &_got);
WARN_ON_ONCE(ret == -EAGAIN);
if (!ret) {
+#ifdef CONFIG_DEBUG_FS
struct ceph_mds_client *mdsc = fsc->mdsc;
struct cap_wait cw;
+#endif
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+#ifdef CONFIG_DEBUG_FS
cw.ino = ceph_ino(inode);
cw.tgid = current->tgid;
cw.need = need;
@@ -2892,6 +3077,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
spin_lock(&mdsc->caps_list_lock);
list_add(&cw.list, &mdsc->cap_wait_list);
spin_unlock(&mdsc->caps_list_lock);
+#endif
/* make sure used fmode not timeout */
ceph_get_fmode(ci, flags, FMODE_WAIT_BIAS);
@@ -2910,15 +3096,17 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
remove_wait_queue(&ci->i_cap_wq, &wait);
ceph_put_fmode(ci, flags, FMODE_WAIT_BIAS);
+#ifdef CONFIG_DEBUG_FS
spin_lock(&mdsc->caps_list_lock);
list_del(&cw.list);
spin_unlock(&mdsc->caps_list_lock);
+#endif
if (ret == -EAGAIN)
continue;
}
- if ((fi->fmode & CEPH_FILE_MODE_WR) &&
+ if (fi && (fi->fmode & CEPH_FILE_MODE_WR) &&
fi->filp_gen != READ_ONCE(fsc->filp_gen)) {
if (ret >= 0 && _got)
ceph_put_cap_refs(ci, _got);
@@ -2926,7 +3114,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
}
if (ret < 0) {
- if (ret == -EFBIG || ret == -ESTALE) {
+ if (ret == -EFBIG || ret == -EUCLEAN) {
int ret2 = ceph_wait_on_async_create(inode);
if (ret2 < 0)
return ret2;
@@ -2935,7 +3123,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
check_max_size(inode, endoff);
continue;
}
- if (ret == -ESTALE) {
+ if (ret == -EUCLEAN) {
/* session was killed, try renew caps */
ret = ceph_renew_caps(inode, flags);
if (ret == 0)
@@ -2944,8 +3132,8 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
return ret;
}
- if (S_ISREG(ci->vfs_inode.i_mode) &&
- ci->i_inline_version != CEPH_INLINE_NONE &&
+ if (S_ISREG(ci->netfs.inode.i_mode) &&
+ ceph_has_inline_data(ci) &&
(_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
i_size_read(inode) > 0) {
struct page *page =
@@ -2981,6 +3169,15 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
return 0;
}
+int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff,
+ int *got)
+{
+ struct ceph_file_info *fi = filp->private_data;
+ struct inode *inode = file_inode(filp);
+
+ return __ceph_get_caps(inode, fi, need, want, endoff, got);
+}
+
/*
* Take cap refs. Caller must already know we hold at least one ref
* on the caps in question or we don't know this is safe.
@@ -3000,10 +3197,12 @@ void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+
if (!capsnap->need_flush &&
!capsnap->writing && !capsnap->dirty_pages) {
- dout("dropping cap_snap %p follows %llu\n",
- capsnap, capsnap->follows);
+ doutc(cl, "%p follows %llu\n", capsnap, capsnap->follows);
BUG_ON(capsnap->cap_flush.tid > 0);
ceph_put_snap_context(capsnap->context);
if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps))
@@ -3018,7 +3217,6 @@ static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
enum put_cap_refs_mode {
PUT_CAP_REFS_SYNC = 0,
- PUT_CAP_REFS_NO_CHECK,
PUT_CAP_REFS_ASYNC,
};
@@ -3034,7 +3232,8 @@ enum put_cap_refs_mode {
static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
enum put_cap_refs_mode mode)
{
- struct inode *inode = &ci->vfs_inode;
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int last = 0, put = 0, flushsnaps = 0, wake = 0;
bool check_flushsnaps = false;
@@ -3057,11 +3256,17 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
put++;
check_flushsnaps = true;
}
- dout("put_cap_refs %p wb %d -> %d (?)\n",
- inode, ci->i_wb_ref+1, ci->i_wb_ref);
+ doutc(cl, "%p %llx.%llx wb %d -> %d (?)\n", inode,
+ ceph_vinop(inode), ci->i_wb_ref+1, ci->i_wb_ref);
}
if (had & CEPH_CAP_FILE_WR) {
if (--ci->i_wr_ref == 0) {
+ /*
+ * The Fb caps will always be took and released
+ * together with the Fw caps.
+ */
+ WARN_ON_ONCE(ci->i_wb_ref);
+
last++;
check_flushsnaps = true;
if (ci->i_wrbuffer_ref_head == 0 &&
@@ -3092,13 +3297,13 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
}
spin_unlock(&ci->i_ceph_lock);
- dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
- last ? " last" : "", put ? " put" : "");
+ doutc(cl, "%p %llx.%llx had %s%s%s\n", inode, ceph_vinop(inode),
+ ceph_cap_string(had), last ? " last" : "", put ? " put" : "");
switch (mode) {
case PUT_CAP_REFS_SYNC:
if (last)
- ceph_check_caps(ci, 0, NULL);
+ ceph_check_caps(ci, 0);
else if (flushsnaps)
ceph_flush_snaps(ci, NULL);
break;
@@ -3127,11 +3332,6 @@ void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had)
__ceph_put_cap_refs(ci, had, PUT_CAP_REFS_ASYNC);
}
-void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had)
-{
- __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_NO_CHECK);
-}
-
/*
* Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
* context. Adjust per-snap dirty page accounting as appropriate.
@@ -3142,11 +3342,11 @@ void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had)
void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc)
{
- struct inode *inode = &ci->vfs_inode;
- struct ceph_cap_snap *capsnap = NULL;
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+ struct ceph_cap_snap *capsnap = NULL, *iter;
int put = 0;
bool last = false;
- bool found = false;
bool flush_snaps = false;
bool complete_capsnap = false;
@@ -3167,20 +3367,19 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
ceph_put_snap_context(ci->i_head_snapc);
ci->i_head_snapc = NULL;
}
- dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
- inode,
- ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
- ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
- last ? " LAST" : "");
+ doutc(cl, "on %p %llx.%llx head %d/%d -> %d/%d %s\n",
+ inode, ceph_vinop(inode), ci->i_wrbuffer_ref+nr,
+ ci->i_wrbuffer_ref_head+nr, ci->i_wrbuffer_ref,
+ ci->i_wrbuffer_ref_head, last ? " LAST" : "");
} else {
- list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
- if (capsnap->context == snapc) {
- found = true;
+ list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
+ if (iter->context == snapc) {
+ capsnap = iter;
break;
}
}
- if (!found) {
+ if (!capsnap) {
/*
* The capsnap should already be removed when removing
* auth cap in the case of a forced unmount.
@@ -3201,20 +3400,19 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
}
}
}
- dout("put_wrbuffer_cap_refs on %p cap_snap %p "
- " snap %lld %d/%d -> %d/%d %s%s\n",
- inode, capsnap, capsnap->context->seq,
- ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
- ci->i_wrbuffer_ref, capsnap->dirty_pages,
- last ? " (wrbuffer last)" : "",
- complete_capsnap ? " (complete capsnap)" : "");
+ doutc(cl, "%p %llx.%llx cap_snap %p snap %lld %d/%d -> %d/%d %s%s\n",
+ inode, ceph_vinop(inode), capsnap, capsnap->context->seq,
+ ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
+ ci->i_wrbuffer_ref, capsnap->dirty_pages,
+ last ? " (wrbuffer last)" : "",
+ complete_capsnap ? " (complete capsnap)" : "");
}
unlock:
spin_unlock(&ci->i_ceph_lock);
if (last) {
- ceph_check_caps(ci, 0, NULL);
+ ceph_check_caps(ci, 0);
} else if (flush_snaps) {
ceph_flush_snaps(ci, NULL);
}
@@ -3230,9 +3428,10 @@ unlock:
*/
static void invalidate_aliases(struct inode *inode)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct dentry *dn, *prev = NULL;
- dout("invalidate_aliases inode %p\n", inode);
+ doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
d_prune_aliases(inode);
/*
* For non-directory inode, d_find_alias() only returns
@@ -3271,6 +3470,9 @@ struct cap_extra_info {
/* currently issued */
int issued;
struct timespec64 btime;
+ u8 *fscrypt_auth;
+ u32 fscrypt_auth_len;
+ u64 fscrypt_file_size;
};
/*
@@ -3288,6 +3490,7 @@ static void handle_cap_grant(struct inode *inode,
__releases(ci->i_ceph_lock)
__releases(session->s_mdsc->snap_rwsem)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
int seq = le32_to_cpu(grant->seq);
int newcaps = le32_to_cpu(grant->caps);
@@ -3302,11 +3505,22 @@ static void handle_cap_grant(struct inode *inode,
bool queue_invalidate = false;
bool deleted_inode = false;
bool fill_inline = false;
+ bool revoke_wait = false;
+ int flags = 0;
+
+ /*
+ * If there is at least one crypto block then we'll trust
+ * fscrypt_file_size. If the real length of the file is 0, then
+ * ignore it (it has probably been truncated down to 0 by the MDS).
+ */
+ if (IS_ENCRYPTED(inode) && size)
+ size = extra_info->fscrypt_file_size;
- dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
- inode, cap, session->s_mds, seq, ceph_cap_string(newcaps));
- dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
- i_size_read(inode));
+ doutc(cl, "%p %llx.%llx cap %p mds%d seq %d %s\n", inode,
+ ceph_vinop(inode), cap, session->s_mds, seq,
+ ceph_cap_string(newcaps));
+ doutc(cl, " size %llu max_size %llu, i_size %llu\n", size,
+ max_size, i_size_read(inode));
/*
@@ -3366,16 +3580,25 @@ static void handle_cap_grant(struct inode *inode,
inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
ci->i_btime = extra_info->btime;
- dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
- from_kuid(&init_user_ns, inode->i_uid),
- from_kgid(&init_user_ns, inode->i_gid));
+ doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode,
+ ceph_vinop(inode), inode->i_mode,
+ from_kuid(&init_user_ns, inode->i_uid),
+ from_kgid(&init_user_ns, inode->i_gid));
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ if (ci->fscrypt_auth_len != extra_info->fscrypt_auth_len ||
+ memcmp(ci->fscrypt_auth, extra_info->fscrypt_auth,
+ ci->fscrypt_auth_len))
+ pr_warn_ratelimited_client(cl,
+ "cap grant attempt to change fscrypt_auth on non-I_NEW inode (old len %d new len %d)\n",
+ ci->fscrypt_auth_len,
+ extra_info->fscrypt_auth_len);
+#endif
}
if ((newcaps & CEPH_CAP_LINK_SHARED) &&
(extra_info->issued & CEPH_CAP_LINK_EXCL) == 0) {
set_nlink(inode, le32_to_cpu(grant->nlink));
- if (inode->i_nlink == 0 &&
- (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
+ if (inode->i_nlink == 0)
deleted_inode = true;
}
@@ -3385,8 +3608,8 @@ static void handle_cap_grant(struct inode *inode,
u64 version = le64_to_cpu(grant->xattr_version);
if (version > ci->i_xattrs.version) {
- dout(" got new xattrs v%llu on %p len %d\n",
- version, inode, len);
+ doutc(cl, " got new xattrs v%llu on %p %llx.%llx len %d\n",
+ version, inode, ceph_vinop(inode), len);
if (ci->i_xattrs.blob)
ceph_buffer_put(ci->i_xattrs.blob);
ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
@@ -3437,8 +3660,8 @@ static void handle_cap_grant(struct inode *inode,
if (ci->i_auth_cap == cap && (newcaps & CEPH_CAP_ANY_FILE_WR)) {
if (max_size != ci->i_max_size) {
- dout("max_size %lld -> %llu\n",
- ci->i_max_size, max_size);
+ doutc(cl, "max_size %lld -> %llu\n", ci->i_max_size,
+ max_size);
ci->i_max_size = max_size;
if (max_size >= ci->i_wanted_max_size) {
ci->i_wanted_max_size = 0; /* reset */
@@ -3452,10 +3675,9 @@ static void handle_cap_grant(struct inode *inode,
wanted = __ceph_caps_wanted(ci);
used = __ceph_caps_used(ci);
dirty = __ceph_caps_dirty(ci);
- dout(" my wanted = %s, used = %s, dirty %s\n",
- ceph_cap_string(wanted),
- ceph_cap_string(used),
- ceph_cap_string(dirty));
+ doutc(cl, " my wanted = %s, used = %s, dirty %s\n",
+ ceph_cap_string(wanted), ceph_cap_string(used),
+ ceph_cap_string(dirty));
if ((was_stale || le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) &&
(wanted & ~(cap->mds_wanted | newcaps))) {
@@ -3476,29 +3698,34 @@ static void handle_cap_grant(struct inode *inode,
if (cap->issued & ~newcaps) {
int revoking = cap->issued & ~newcaps;
- dout("revocation: %s -> %s (revoking %s)\n",
- ceph_cap_string(cap->issued),
- ceph_cap_string(newcaps),
- ceph_cap_string(revoking));
+ doutc(cl, "revocation: %s -> %s (revoking %s)\n",
+ ceph_cap_string(cap->issued), ceph_cap_string(newcaps),
+ ceph_cap_string(revoking));
if (S_ISREG(inode->i_mode) &&
- (revoking & used & CEPH_CAP_FILE_BUFFER))
+ (revoking & used & CEPH_CAP_FILE_BUFFER)) {
writeback = true; /* initiate writeback; will delay ack */
- else if (queue_invalidate &&
+ revoke_wait = true;
+ } else if (queue_invalidate &&
revoking == CEPH_CAP_FILE_CACHE &&
- (newcaps & CEPH_CAP_FILE_LAZYIO) == 0)
- ; /* do nothing yet, invalidation will be queued */
- else if (cap == ci->i_auth_cap)
+ (newcaps & CEPH_CAP_FILE_LAZYIO) == 0) {
+ revoke_wait = true; /* do nothing yet, invalidation will be queued */
+ } else if (cap == ci->i_auth_cap) {
check_caps = 1; /* check auth cap only */
- else
+ } else {
check_caps = 2; /* check all caps */
+ }
+ /* If there is new caps, try to wake up the waiters */
+ if (~cap->issued & newcaps)
+ wake = true;
cap->issued = newcaps;
cap->implemented |= newcaps;
} else if (cap->issued == newcaps) {
- dout("caps unchanged: %s -> %s\n",
- ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
+ doutc(cl, "caps unchanged: %s -> %s\n",
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(newcaps));
} else {
- dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
- ceph_cap_string(newcaps));
+ doutc(cl, "grant: %s -> %s\n", ceph_cap_string(cap->issued),
+ ceph_cap_string(newcaps));
/* non-auth MDS is revoking the newly grant caps ? */
if (cap == ci->i_auth_cap &&
__ceph_caps_revoking_other(ci, cap, newcaps))
@@ -3512,6 +3739,16 @@ static void handle_cap_grant(struct inode *inode,
}
BUG_ON(cap->issued & ~cap->implemented);
+ /* don't let check_caps skip sending a response to MDS for revoke msgs */
+ if (!revoke_wait && le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
+ cap->mds_wanted = 0;
+ flags |= CHECK_CAPS_FLUSH_FORCE;
+ if (cap == ci->i_auth_cap)
+ check_caps = 1; /* check auth cap only */
+ else
+ check_caps = 2; /* check all caps */
+ }
+
if (extra_info->inline_version > 0 &&
extra_info->inline_version >= ci->i_inline_version) {
ci->i_inline_version = extra_info->inline_version;
@@ -3520,24 +3757,23 @@ static void handle_cap_grant(struct inode *inode,
fill_inline = true;
}
- if (ci->i_auth_cap == cap &&
- le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
- if (newcaps & ~extra_info->issued)
- wake = true;
+ if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
+ if (ci->i_auth_cap == cap) {
+ if (newcaps & ~extra_info->issued)
+ wake = true;
- if (ci->i_requested_max_size > max_size ||
- !(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) {
- /* re-request max_size if necessary */
- ci->i_requested_max_size = 0;
- wake = true;
- }
+ if (ci->i_requested_max_size > max_size ||
+ !(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) {
+ /* re-request max_size if necessary */
+ ci->i_requested_max_size = 0;
+ wake = true;
+ }
- ceph_kick_flushing_inode_caps(session, ci);
- spin_unlock(&ci->i_ceph_lock);
+ ceph_kick_flushing_inode_caps(session, ci);
+ }
up_read(&session->s_mdsc->snap_rwsem);
- } else {
- spin_unlock(&ci->i_ceph_lock);
}
+ spin_unlock(&ci->i_ceph_lock);
if (fill_inline)
ceph_fill_inline_data(inode, NULL, extra_info->inline_data,
@@ -3562,10 +3798,9 @@ static void handle_cap_grant(struct inode *inode,
mutex_unlock(&session->s_mutex);
if (check_caps == 1)
- ceph_check_caps(ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL,
- session);
+ ceph_check_caps(ci, flags | CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL);
else if (check_caps == 2)
- ceph_check_caps(ci, CHECK_CAPS_NOINVAL, session);
+ ceph_check_caps(ci, flags | CHECK_CAPS_NOINVAL);
}
/*
@@ -3579,7 +3814,8 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
__releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_cap_flush *cf, *tmp_cf;
LIST_HEAD(to_remove);
unsigned seq = le32_to_cpu(m->seq);
@@ -3616,11 +3852,11 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
}
}
- dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
- " flushing %s -> %s\n",
- inode, session->s_mds, seq, ceph_cap_string(dirty),
- ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
- ceph_cap_string(ci->i_flushing_caps & ~cleaned));
+ doutc(cl, "%p %llx.%llx mds%d seq %d on %s cleaned %s, flushing %s -> %s\n",
+ inode, ceph_vinop(inode), session->s_mds, seq,
+ ceph_cap_string(dirty), ceph_cap_string(cleaned),
+ ceph_cap_string(ci->i_flushing_caps),
+ ceph_cap_string(ci->i_flushing_caps & ~cleaned));
if (list_empty(&to_remove) && !cleaned)
goto out;
@@ -3636,18 +3872,21 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
if (list_empty(&ci->i_cap_flush_list)) {
list_del_init(&ci->i_flushing_item);
if (!list_empty(&session->s_cap_flushing)) {
- dout(" mds%d still flushing cap on %p\n",
- session->s_mds,
- &list_first_entry(&session->s_cap_flushing,
- struct ceph_inode_info,
- i_flushing_item)->vfs_inode);
+ struct inode *inode =
+ &list_first_entry(&session->s_cap_flushing,
+ struct ceph_inode_info,
+ i_flushing_item)->netfs.inode;
+ doutc(cl, " mds%d still flushing cap on %p %llx.%llx\n",
+ session->s_mds, inode, ceph_vinop(inode));
}
}
mdsc->num_cap_flushing--;
- dout(" inode %p now !flushing\n", inode);
+ doutc(cl, " %p %llx.%llx now !flushing\n", inode,
+ ceph_vinop(inode));
if (ci->i_dirty_caps == 0) {
- dout(" inode %p now clean\n", inode);
+ doutc(cl, " %p %llx.%llx now clean\n", inode,
+ ceph_vinop(inode));
BUG_ON(!list_empty(&ci->i_dirty_item));
drop = true;
if (ci->i_wr_ref == 0 &&
@@ -3685,12 +3924,14 @@ void __ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap,
bool *wake_ci, bool *wake_mdsc)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
bool ret;
lockdep_assert_held(&ci->i_ceph_lock);
- dout("removing capsnap %p, inode %p ci %p\n", capsnap, inode, ci);
+ doutc(cl, "removing capsnap %p, %p %llx.%llx ci %p\n", capsnap,
+ inode, ceph_vinop(inode), ci);
list_del_init(&capsnap->ci_item);
ret = __detach_cap_flush_from_ci(ci, &capsnap->cap_flush);
@@ -3729,37 +3970,38 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
struct ceph_mds_session *session)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
u64 follows = le64_to_cpu(m->snap_follows);
- struct ceph_cap_snap *capsnap;
- bool flushed = false;
+ struct ceph_cap_snap *capsnap = NULL, *iter;
bool wake_ci = false;
bool wake_mdsc = false;
- dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
- inode, ci, session->s_mds, follows);
+ doutc(cl, "%p %llx.%llx ci %p mds%d follows %lld\n", inode,
+ ceph_vinop(inode), ci, session->s_mds, follows);
spin_lock(&ci->i_ceph_lock);
- list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
- if (capsnap->follows == follows) {
- if (capsnap->cap_flush.tid != flush_tid) {
- dout(" cap_snap %p follows %lld tid %lld !="
- " %lld\n", capsnap, follows,
- flush_tid, capsnap->cap_flush.tid);
+ list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
+ if (iter->follows == follows) {
+ if (iter->cap_flush.tid != flush_tid) {
+ doutc(cl, " cap_snap %p follows %lld "
+ "tid %lld != %lld\n", iter,
+ follows, flush_tid,
+ iter->cap_flush.tid);
break;
}
- flushed = true;
+ capsnap = iter;
break;
} else {
- dout(" skipping cap_snap %p follows %lld\n",
- capsnap, capsnap->follows);
+ doutc(cl, " skipping cap_snap %p follows %lld\n",
+ iter, iter->follows);
}
}
- if (flushed)
+ if (capsnap)
ceph_remove_capsnap(inode, capsnap, &wake_ci, &wake_mdsc);
spin_unlock(&ci->i_ceph_lock);
- if (flushed) {
+ if (capsnap) {
ceph_put_snap_context(capsnap->context);
ceph_put_cap_snap(capsnap);
if (wake_ci)
@@ -3777,9 +4019,11 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
*/
static bool handle_cap_trunc(struct inode *inode,
struct ceph_mds_caps *trunc,
- struct ceph_mds_session *session)
+ struct ceph_mds_session *session,
+ struct cap_extra_info *extra_info)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int mds = session->s_mds;
int seq = le32_to_cpu(trunc->seq);
u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
@@ -3794,8 +4038,16 @@ static bool handle_cap_trunc(struct inode *inode,
issued |= implemented | dirty;
- dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
- inode, mds, seq, truncate_size, truncate_seq);
+ /*
+ * If there is at least one crypto block then we'll trust
+ * fscrypt_file_size. If the real length of the file is 0, then
+ * ignore it (it has probably been truncated down to 0 by the MDS).
+ */
+ if (IS_ENCRYPTED(inode) && size)
+ size = extra_info->fscrypt_file_size;
+
+ doutc(cl, "%p %llx.%llx mds%d seq %d to %lld truncate seq %d\n",
+ inode, ceph_vinop(inode), mds, seq, truncate_size, truncate_seq);
queue_trunc = ceph_fill_file_size(inode, issued,
truncate_seq, truncate_size, size);
return queue_trunc;
@@ -3813,36 +4065,37 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
struct ceph_mds_cap_peer *ph,
struct ceph_mds_session *session)
{
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_session *tsession = NULL;
struct ceph_cap *cap, *tcap, *new_cap = NULL;
struct ceph_inode_info *ci = ceph_inode(inode);
u64 t_cap_id;
- unsigned mseq = le32_to_cpu(ex->migrate_seq);
- unsigned t_seq, t_mseq;
+ u32 t_issue_seq, t_mseq;
int target, issued;
int mds = session->s_mds;
if (ph) {
t_cap_id = le64_to_cpu(ph->cap_id);
- t_seq = le32_to_cpu(ph->seq);
+ t_issue_seq = le32_to_cpu(ph->issue_seq);
t_mseq = le32_to_cpu(ph->mseq);
target = le32_to_cpu(ph->mds);
} else {
- t_cap_id = t_seq = t_mseq = 0;
+ t_cap_id = t_issue_seq = t_mseq = 0;
target = -1;
}
- dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
- inode, ci, mds, mseq, target);
+ doutc(cl, " cap %llx.%llx export to peer %d piseq %u pmseq %u\n",
+ ceph_vinop(inode), target, t_issue_seq, t_mseq);
retry:
+ down_read(&mdsc->snap_rwsem);
spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
goto out_unlock;
if (target < 0) {
- ceph_remove_cap(cap, false);
+ ceph_remove_cap(mdsc, cap, false);
goto out_unlock;
}
@@ -3853,23 +4106,25 @@ retry:
issued = cap->issued;
if (issued != cap->implemented)
- pr_err_ratelimited("handle_cap_export: issued != implemented: "
- "ino (%llx.%llx) mds%d seq %d mseq %d "
- "issued %s implemented %s\n",
- ceph_vinop(inode), mds, cap->seq, cap->mseq,
- ceph_cap_string(issued),
- ceph_cap_string(cap->implemented));
+ pr_err_ratelimited_client(cl, "issued != implemented: "
+ "%p %llx.%llx mds%d seq %d mseq %d"
+ " issued %s implemented %s\n",
+ inode, ceph_vinop(inode), mds,
+ cap->seq, cap->mseq,
+ ceph_cap_string(issued),
+ ceph_cap_string(cap->implemented));
tcap = __get_cap_for_mds(ci, target);
if (tcap) {
/* already have caps from the target */
if (tcap->cap_id == t_cap_id &&
- ceph_seq_cmp(tcap->seq, t_seq) < 0) {
- dout(" updating import cap %p mds%d\n", tcap, target);
+ ceph_seq_cmp(tcap->seq, t_issue_seq) < 0) {
+ doutc(cl, " updating import cap %p mds%d\n", tcap,
+ target);
tcap->cap_id = t_cap_id;
- tcap->seq = t_seq - 1;
- tcap->issue_seq = t_seq - 1;
+ tcap->seq = t_issue_seq - 1;
+ tcap->issue_seq = t_issue_seq - 1;
tcap->issued |= issued;
tcap->implemented |= issued;
if (cap == ci->i_auth_cap) {
@@ -3877,14 +4132,14 @@ retry:
change_auth_cap_ses(ci, tcap->session);
}
}
- ceph_remove_cap(cap, false);
+ ceph_remove_cap(mdsc, cap, false);
goto out_unlock;
} else if (tsession) {
- /* add placeholder for the export tagert */
+ /* add placeholder for the export target */
int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
tcap = new_cap;
ceph_add_cap(inode, tsession, t_cap_id, issued, 0,
- t_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
+ t_issue_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
if (!list_empty(&ci->i_cap_flush_list) &&
ci->i_auth_cap == tcap) {
@@ -3894,11 +4149,12 @@ retry:
spin_unlock(&mdsc->cap_dirty_lock);
}
- ceph_remove_cap(cap, false);
+ ceph_remove_cap(mdsc, cap, false);
goto out_unlock;
}
spin_unlock(&ci->i_ceph_lock);
+ up_read(&mdsc->snap_rwsem);
mutex_unlock(&session->s_mutex);
/* open target session */
@@ -3924,6 +4180,7 @@ retry:
out_unlock:
spin_unlock(&ci->i_ceph_lock);
+ up_read(&mdsc->snap_rwsem);
mutex_unlock(&session->s_mutex);
if (tsession) {
mutex_unlock(&tsession->s_mutex);
@@ -3945,6 +4202,7 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
struct ceph_cap **target_cap, int *old_issued)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_cap *cap, *ocap, *new_cap = NULL;
int mds = session->s_mds;
int issued;
@@ -3955,18 +4213,22 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
u64 realmino = le64_to_cpu(im->realm);
u64 cap_id = le64_to_cpu(im->cap_id);
u64 p_cap_id;
+ u32 piseq = 0;
+ u32 pmseq = 0;
int peer;
if (ph) {
p_cap_id = le64_to_cpu(ph->cap_id);
peer = le32_to_cpu(ph->mds);
+ piseq = le32_to_cpu(ph->issue_seq);
+ pmseq = le32_to_cpu(ph->mseq);
} else {
p_cap_id = 0;
peer = -1;
}
- dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
- inode, ci, mds, mseq, peer);
+ doutc(cl, " cap %llx.%llx import from peer %d piseq %u pmseq %u\n",
+ ceph_vinop(inode), peer, piseq, pmseq);
retry:
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
@@ -3992,26 +4254,70 @@ retry:
ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
if (ocap && ocap->cap_id == p_cap_id) {
- dout(" remove export cap %p mds%d flags %d\n",
- ocap, peer, ph->flags);
+ doutc(cl, " remove export cap %p mds%d flags %d\n",
+ ocap, peer, ph->flags);
if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
- (ocap->seq != le32_to_cpu(ph->seq) ||
- ocap->mseq != le32_to_cpu(ph->mseq))) {
- pr_err_ratelimited("handle_cap_import: "
- "mismatched seq/mseq: ino (%llx.%llx) "
- "mds%d seq %d mseq %d importer mds%d "
- "has peer seq %d mseq %d\n",
- ceph_vinop(inode), peer, ocap->seq,
- ocap->mseq, mds, le32_to_cpu(ph->seq),
- le32_to_cpu(ph->mseq));
+ (ocap->seq != piseq ||
+ ocap->mseq != pmseq)) {
+ pr_err_ratelimited_client(cl, "mismatched seq/mseq: "
+ "%p %llx.%llx mds%d seq %d mseq %d"
+ " importer mds%d has peer seq %d mseq %d\n",
+ inode, ceph_vinop(inode), peer,
+ ocap->seq, ocap->mseq, mds, piseq, pmseq);
}
- ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
+ ceph_remove_cap(mdsc, ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
}
*old_issued = issued;
*target_cap = cap;
}
+#ifdef CONFIG_FS_ENCRYPTION
+static int parse_fscrypt_fields(void **p, void *end,
+ struct cap_extra_info *extra)
+{
+ u32 len;
+
+ ceph_decode_32_safe(p, end, extra->fscrypt_auth_len, bad);
+ if (extra->fscrypt_auth_len) {
+ ceph_decode_need(p, end, extra->fscrypt_auth_len, bad);
+ extra->fscrypt_auth = kmalloc(extra->fscrypt_auth_len,
+ GFP_KERNEL);
+ if (!extra->fscrypt_auth)
+ return -ENOMEM;
+ ceph_decode_copy_safe(p, end, extra->fscrypt_auth,
+ extra->fscrypt_auth_len, bad);
+ }
+
+ ceph_decode_32_safe(p, end, len, bad);
+ if (len >= sizeof(u64)) {
+ ceph_decode_64_safe(p, end, extra->fscrypt_file_size, bad);
+ len -= sizeof(u64);
+ }
+ ceph_decode_skip_n(p, end, len, bad);
+ return 0;
+bad:
+ return -EIO;
+}
+#else
+static int parse_fscrypt_fields(void **p, void *end,
+ struct cap_extra_info *extra)
+{
+ u32 len;
+
+ /* Don't care about these fields unless we're encryption-capable */
+ ceph_decode_32_safe(p, end, len, bad);
+ if (len)
+ ceph_decode_skip_n(p, end, len, bad);
+ ceph_decode_32_safe(p, end, len, bad);
+ if (len)
+ ceph_decode_skip_n(p, end, len, bad);
+ return 0;
+bad:
+ return -EIO;
+}
+#endif
+
/*
* Handle a caps message from the MDS.
*
@@ -4022,6 +4328,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
struct ceph_msg *msg)
{
struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct inode *inode;
struct ceph_inode_info *ci;
struct ceph_cap *cap;
@@ -4030,15 +4337,18 @@ void ceph_handle_caps(struct ceph_mds_session *session,
struct ceph_snap_realm *realm = NULL;
int op;
int msg_version = le16_to_cpu(msg->hdr.version);
- u32 seq, mseq;
+ u32 seq, mseq, issue_seq;
struct ceph_vino vino;
void *snaptrace;
size_t snaptrace_len;
void *p, *end;
struct cap_extra_info extra_info = {};
bool queue_trunc;
+ bool close_sessions = false;
+ bool do_cap_release = false;
- dout("handle_caps from mds%d\n", session->s_mds);
+ if (!ceph_inc_mds_stopping_blocker(mdsc, session))
+ return;
/* decode */
end = msg->front.iov_base + msg->front.iov_len;
@@ -4050,6 +4360,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
vino.snap = CEPH_NOSNAP;
seq = le32_to_cpu(h->seq);
mseq = le32_to_cpu(h->migrate_seq);
+ issue_seq = le32_to_cpu(h->issue_seq);
snaptrace = h + 1;
snaptrace_len = le32_to_cpu(h->snap_trace_len);
@@ -4130,34 +4441,34 @@ void ceph_handle_caps(struct ceph_mds_session *session,
ceph_decode_64_safe(&p, end, extra_info.nsubdirs, bad);
}
+ if (msg_version >= 12) {
+ if (parse_fscrypt_fields(&p, end, &extra_info))
+ goto bad;
+ }
+
/* lookup ino */
inode = ceph_find_inode(mdsc->fsc->sb, vino);
- ci = ceph_inode(inode);
- dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
- vino.snap, inode);
+ doutc(cl, " caps mds%d op %s ino %llx.%llx inode %p seq %u iseq %u mseq %u\n",
+ session->s_mds, ceph_cap_op_name(op), vino.ino, vino.snap, inode,
+ seq, issue_seq, mseq);
mutex_lock(&session->s_mutex);
- inc_session_sequence(session);
- dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
- (unsigned)seq);
if (!inode) {
- dout(" i don't have ino %llx\n", vino.ino);
+ doutc(cl, " i don't have ino %llx\n", vino.ino);
- if (op == CEPH_CAP_OP_IMPORT) {
- cap = ceph_get_cap(mdsc, NULL);
- cap->cap_ino = vino.ino;
- cap->queue_release = 1;
- cap->cap_id = le64_to_cpu(h->cap_id);
- cap->mseq = mseq;
- cap->seq = seq;
- cap->issue_seq = seq;
- spin_lock(&session->s_cap_lock);
- __ceph_queue_cap_release(session, cap);
- spin_unlock(&session->s_cap_lock);
+ switch (op) {
+ case CEPH_CAP_OP_IMPORT:
+ case CEPH_CAP_OP_REVOKE:
+ case CEPH_CAP_OP_GRANT:
+ do_cap_release = true;
+ break;
+ default:
+ break;
}
goto flush_cap_releases;
}
+ ci = ceph_inode(inode);
/* these will work even if we don't have a cap yet */
switch (op) {
@@ -4174,9 +4485,13 @@ void ceph_handle_caps(struct ceph_mds_session *session,
realm = NULL;
if (snaptrace_len) {
down_write(&mdsc->snap_rwsem);
- ceph_update_snap_trace(mdsc, snaptrace,
- snaptrace + snaptrace_len,
- false, &realm);
+ if (ceph_update_snap_trace(mdsc, snaptrace,
+ snaptrace + snaptrace_len,
+ false, &realm)) {
+ up_write(&mdsc->snap_rwsem);
+ close_sessions = true;
+ goto done;
+ }
downgrade_write(&mdsc->snap_rwsem);
} else {
down_read(&mdsc->snap_rwsem);
@@ -4195,10 +4510,18 @@ void ceph_handle_caps(struct ceph_mds_session *session,
spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ceph_inode(inode), session->s_mds);
if (!cap) {
- dout(" no cap on %p ino %llx.%llx from mds%d\n",
- inode, ceph_ino(inode), ceph_snap(inode),
- session->s_mds);
+ doutc(cl, " no cap on %p ino %llx.%llx from mds%d\n",
+ inode, ceph_ino(inode), ceph_snap(inode),
+ session->s_mds);
spin_unlock(&ci->i_ceph_lock);
+ switch (op) {
+ case CEPH_CAP_OP_REVOKE:
+ case CEPH_CAP_OP_GRANT:
+ do_cap_release = true;
+ break;
+ default:
+ break;
+ }
goto flush_cap_releases;
}
@@ -4218,7 +4541,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
break;
case CEPH_CAP_OP_TRUNC:
- queue_trunc = handle_cap_trunc(inode, h, session);
+ queue_trunc = handle_cap_trunc(inode, h, session,
+ &extra_info);
spin_unlock(&ci->i_ceph_lock);
if (queue_trunc)
ceph_queue_vmtruncate(inode);
@@ -4226,8 +4550,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
default:
spin_unlock(&ci->i_ceph_lock);
- pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
- ceph_cap_op_name(op));
+ pr_err_client(cl, "unknown cap op %d %s\n", op,
+ ceph_cap_op_name(op));
}
done:
@@ -4235,7 +4559,15 @@ done:
done_unlocked:
iput(inode);
out:
+ ceph_dec_mds_stopping_blocker(mdsc);
+
ceph_put_string(extra_info.pool_ns);
+
+ /* Defer closing the sessions after s_mutex lock being released */
+ if (close_sessions)
+ ceph_mdsc_close_sessions(mdsc);
+
+ kfree(extra_info.fscrypt_auth);
return;
flush_cap_releases:
@@ -4244,11 +4576,23 @@ flush_cap_releases:
* along for the mds (who clearly thinks we still have this
* cap).
*/
- ceph_flush_cap_releases(mdsc, session);
+ if (do_cap_release) {
+ cap = ceph_get_cap(mdsc, NULL);
+ cap->cap_ino = vino.ino;
+ cap->queue_release = 1;
+ cap->cap_id = le64_to_cpu(h->cap_id);
+ cap->mseq = mseq;
+ cap->seq = seq;
+ cap->issue_seq = seq;
+ spin_lock(&session->s_cap_lock);
+ __ceph_queue_cap_release(session, cap);
+ spin_unlock(&session->s_cap_lock);
+ }
+ ceph_flush_session_cap_releases(mdsc, session);
goto done;
bad:
- pr_err("ceph_handle_caps: corrupt message\n");
+ pr_err_client(cl, "corrupt message\n");
ceph_msg_dump(msg);
goto out;
}
@@ -4262,6 +4606,7 @@ bad:
*/
unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct inode *inode;
struct ceph_inode_info *ci;
struct ceph_mount_options *opt = mdsc->fsc->mount_options;
@@ -4269,14 +4614,14 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
unsigned long loop_start = jiffies;
unsigned long delay = 0;
- dout("check_delayed_caps\n");
+ doutc(cl, "begin\n");
spin_lock(&mdsc->cap_delay_lock);
while (!list_empty(&mdsc->cap_delay_list)) {
ci = list_first_entry(&mdsc->cap_delay_list,
struct ceph_inode_info,
i_cap_delay_list);
if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) {
- dout("%s caps added recently. Exiting loop", __func__);
+ doutc(cl, "caps added recently. Exiting loop");
delay = ci->i_hold_caps_max;
break;
}
@@ -4285,16 +4630,26 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
break;
list_del_init(&ci->i_cap_delay_list);
- inode = igrab(&ci->vfs_inode);
+ inode = igrab(&ci->netfs.inode);
if (inode) {
spin_unlock(&mdsc->cap_delay_lock);
- dout("check_delayed_caps on %p\n", inode);
- ceph_check_caps(ci, 0, NULL);
+ doutc(cl, "on %p %llx.%llx\n", inode,
+ ceph_vinop(inode));
+ ceph_check_caps(ci, 0);
iput(inode);
spin_lock(&mdsc->cap_delay_lock);
}
+
+ /*
+ * Make sure too many dirty caps or general
+ * slowness doesn't block mdsc delayed work,
+ * preventing send_renew_caps() from running.
+ */
+ if (time_after_eq(jiffies, loop_start + 5 * HZ))
+ break;
}
spin_unlock(&mdsc->cap_delay_lock);
+ doutc(cl, "done\n");
return delay;
}
@@ -4305,24 +4660,26 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
static void flush_dirty_session_caps(struct ceph_mds_session *s)
{
struct ceph_mds_client *mdsc = s->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct inode *inode;
- dout("flush_dirty_caps\n");
+ doutc(cl, "begin\n");
spin_lock(&mdsc->cap_dirty_lock);
while (!list_empty(&s->s_cap_dirty)) {
ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info,
i_dirty_item);
- inode = &ci->vfs_inode;
+ inode = &ci->netfs.inode;
ihold(inode);
- dout("flush_dirty_caps %p\n", inode);
+ doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
spin_unlock(&mdsc->cap_dirty_lock);
- ceph_check_caps(ci, CHECK_CAPS_FLUSH, NULL);
+ ceph_wait_on_async_create(inode);
+ ceph_check_caps(ci, CHECK_CAPS_FLUSH);
iput(inode);
spin_lock(&mdsc->cap_dirty_lock);
}
spin_unlock(&mdsc->cap_dirty_lock);
- dout("flush_dirty_caps done\n");
+ doutc(cl, "done\n");
}
void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
@@ -4330,6 +4687,28 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
ceph_mdsc_iterate_sessions(mdsc, flush_dirty_session_caps, true);
}
+/*
+ * Flush all cap releases to the mds
+ */
+static void flush_cap_releases(struct ceph_mds_session *s)
+{
+ struct ceph_mds_client *mdsc = s->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
+
+ doutc(cl, "begin\n");
+ spin_lock(&s->s_cap_lock);
+ if (s->s_num_cap_releases)
+ ceph_flush_session_cap_releases(mdsc, s);
+ spin_unlock(&s->s_cap_lock);
+ doutc(cl, "done\n");
+
+}
+
+void ceph_flush_cap_releases(struct ceph_mds_client *mdsc)
+{
+ ceph_mdsc_iterate_sessions(mdsc, flush_cap_releases, true);
+}
+
void __ceph_touch_fmode(struct ceph_inode_info *ci,
struct ceph_mds_client *mdsc, int fmode)
{
@@ -4347,9 +4726,9 @@ void __ceph_touch_fmode(struct ceph_inode_info *ci,
void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb);
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb);
int bits = (fmode << 1) | 1;
- bool is_opened = false;
+ bool already_opened = false;
int i;
if (count == 1)
@@ -4357,19 +4736,19 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
spin_lock(&ci->i_ceph_lock);
for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
- if (bits & (1 << i))
- ci->i_nr_by_mode[i] += count;
-
/*
- * If any of the mode ref is larger than 1,
+ * If any of the mode ref is larger than 0,
* that means it has been already opened by
* others. Just skip checking the PIN ref.
*/
- if (i && ci->i_nr_by_mode[i] > 1)
- is_opened = true;
+ if (i && ci->i_nr_by_mode[i])
+ already_opened = true;
+
+ if (bits & (1 << i))
+ ci->i_nr_by_mode[i] += count;
}
- if (!is_opened)
+ if (!already_opened)
percpu_counter_inc(&mdsc->metric.opened_inodes);
spin_unlock(&ci->i_ceph_lock);
}
@@ -4381,7 +4760,7 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
*/
void ceph_put_fmode(struct ceph_inode_info *ci, int fmode, int count)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb);
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb);
int bits = (fmode << 1) | 1;
bool is_closed = true;
int i;
@@ -4427,8 +4806,23 @@ int ceph_drop_caps_for_unlink(struct inode *inode)
if (__ceph_caps_dirty(ci)) {
struct ceph_mds_client *mdsc =
- ceph_inode_to_client(inode)->mdsc;
- __cap_delay_requeue_front(mdsc, ci);
+ ceph_inode_to_fs_client(inode)->mdsc;
+
+ doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode,
+ ceph_vinop(inode));
+ spin_lock(&mdsc->cap_delay_lock);
+ ci->i_ceph_flags |= CEPH_I_FLUSH;
+ if (!list_empty(&ci->i_cap_delay_list))
+ list_del_init(&ci->i_cap_delay_list);
+ list_add_tail(&ci->i_cap_delay_list,
+ &mdsc->cap_unlink_delay_list);
+ spin_unlock(&mdsc->cap_delay_lock);
+
+ /*
+ * Fire the work immediately, because the MDS maybe
+ * waiting for caps release.
+ */
+ ceph_queue_cap_unlink_work(mdsc);
}
}
spin_unlock(&ci->i_ceph_lock);
@@ -4447,6 +4841,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
int mds, int drop, int unless, int force)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_cap *cap;
struct ceph_mds_request_release *rel = *p;
int used, dirty;
@@ -4456,9 +4851,9 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
used = __ceph_caps_used(ci);
dirty = __ceph_caps_dirty(ci);
- dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
- inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
- ceph_cap_string(unless));
+ doutc(cl, "%p %llx.%llx mds%d used|dirty %s drop %s unless %s\n",
+ inode, ceph_vinop(inode), mds, ceph_cap_string(used|dirty),
+ ceph_cap_string(drop), ceph_cap_string(unless));
/* only drop unused, clean caps */
drop &= ~(used | dirty);
@@ -4480,12 +4875,13 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
if (force || (cap->issued & drop)) {
if (cap->issued & drop) {
int wanted = __ceph_caps_wanted(ci);
- dout("encode_inode_release %p cap %p "
- "%s -> %s, wanted %s -> %s\n", inode, cap,
- ceph_cap_string(cap->issued),
- ceph_cap_string(cap->issued & ~drop),
- ceph_cap_string(cap->mds_wanted),
- ceph_cap_string(wanted));
+ doutc(cl, "%p %llx.%llx cap %p %s -> %s, "
+ "wanted %s -> %s\n", inode,
+ ceph_vinop(inode), cap,
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(cap->issued & ~drop),
+ ceph_cap_string(cap->mds_wanted),
+ ceph_cap_string(wanted));
cap->issued &= ~drop;
cap->implemented &= ~drop;
@@ -4494,9 +4890,9 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
!(wanted & CEPH_CAP_ANY_FILE_WR))
ci->i_requested_max_size = 0;
} else {
- dout("encode_inode_release %p cap %p %s"
- " (force)\n", inode, cap,
- ceph_cap_string(cap->issued));
+ doutc(cl, "%p %llx.%llx cap %p %s (force)\n",
+ inode, ceph_vinop(inode), cap,
+ ceph_cap_string(cap->issued));
}
rel->ino = cpu_to_le64(ceph_ino(inode));
@@ -4511,24 +4907,40 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
*p += sizeof(*rel);
ret = 1;
} else {
- dout("encode_inode_release %p cap %p %s (noop)\n",
- inode, cap, ceph_cap_string(cap->issued));
+ doutc(cl, "%p %llx.%llx cap %p %s (noop)\n",
+ inode, ceph_vinop(inode), cap,
+ ceph_cap_string(cap->issued));
}
}
spin_unlock(&ci->i_ceph_lock);
return ret;
}
+/**
+ * ceph_encode_dentry_release - encode a dentry release into an outgoing request
+ * @p: outgoing request buffer
+ * @dentry: dentry to release
+ * @dir: dir to release it from
+ * @mds: mds that we're speaking to
+ * @drop: caps being dropped
+ * @unless: unless we have these caps
+ *
+ * Encode a dentry release into an outgoing request buffer. Returns 1 if the
+ * thing was released, or a negative error code otherwise.
+ */
int ceph_encode_dentry_release(void **p, struct dentry *dentry,
struct inode *dir,
int mds, int drop, int unless)
{
- struct dentry *parent = NULL;
struct ceph_mds_request_release *rel = *p;
struct ceph_dentry_info *di = ceph_dentry(dentry);
+ struct ceph_client *cl;
int force = 0;
int ret;
+ /* This shouldn't happen */
+ BUG_ON(!dir);
+
/*
* force an record for the directory caps if we have a dentry lease.
* this is racy (can't take i_ceph_lock and d_lock together), but it
@@ -4538,25 +4950,149 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
spin_lock(&dentry->d_lock);
if (di->lease_session && di->lease_session->s_mds == mds)
force = 1;
- if (!dir) {
- parent = dget(dentry->d_parent);
- dir = d_inode(parent);
- }
spin_unlock(&dentry->d_lock);
ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
- dput(parent);
+ cl = ceph_inode_to_client(dir);
spin_lock(&dentry->d_lock);
if (ret && di->lease_session && di->lease_session->s_mds == mds) {
- dout("encode_dentry_release %p mds%d seq %d\n",
- dentry, mds, (int)di->lease_seq);
- rel->dname_len = cpu_to_le32(dentry->d_name.len);
- memcpy(*p, dentry->d_name.name, dentry->d_name.len);
- *p += dentry->d_name.len;
+ int len = dentry->d_name.len;
+ doutc(cl, "%p mds%d seq %d\n", dentry, mds,
+ (int)di->lease_seq);
rel->dname_seq = cpu_to_le32(di->lease_seq);
__ceph_mdsc_drop_dentry_lease(dentry);
+ memcpy(*p, dentry->d_name.name, len);
+ spin_unlock(&dentry->d_lock);
+ if (IS_ENCRYPTED(dir) && fscrypt_has_encryption_key(dir)) {
+ len = ceph_encode_encrypted_dname(dir, *p, len);
+ if (len < 0)
+ return len;
+ }
+ rel->dname_len = cpu_to_le32(len);
+ *p += len;
+ } else {
+ spin_unlock(&dentry->d_lock);
}
- spin_unlock(&dentry->d_lock);
return ret;
}
+
+static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = mdsc->fsc->client;
+ struct ceph_cap_snap *capsnap;
+ int capsnap_release = 0;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ doutc(cl, "removing capsnaps, ci is %p, %p %llx.%llx\n",
+ ci, inode, ceph_vinop(inode));
+
+ while (!list_empty(&ci->i_cap_snaps)) {
+ capsnap = list_first_entry(&ci->i_cap_snaps,
+ struct ceph_cap_snap, ci_item);
+ __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
+ ceph_put_snap_context(capsnap->context);
+ ceph_put_cap_snap(capsnap);
+ capsnap_release++;
+ }
+ wake_up_all(&ci->i_cap_wq);
+ wake_up_all(&mdsc->cap_flushing_wq);
+ return capsnap_release;
+}
+
+int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate)
+{
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_client *cl = fsc->client;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ bool is_auth;
+ bool dirty_dropped = false;
+ int iputs = 0;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ doutc(cl, "removing cap %p, ci is %p, %p %llx.%llx\n",
+ cap, ci, inode, ceph_vinop(inode));
+
+ is_auth = (cap == ci->i_auth_cap);
+ __ceph_remove_cap(cap, false);
+ if (is_auth) {
+ struct ceph_cap_flush *cf;
+
+ if (ceph_inode_is_shutdown(inode)) {
+ if (inode->i_data.nrpages > 0)
+ *invalidate = true;
+ if (ci->i_wrbuffer_ref > 0)
+ mapping_set_error(&inode->i_data, -EIO);
+ }
+
+ spin_lock(&mdsc->cap_dirty_lock);
+
+ /* trash all of the cap flushes for this inode */
+ while (!list_empty(&ci->i_cap_flush_list)) {
+ cf = list_first_entry(&ci->i_cap_flush_list,
+ struct ceph_cap_flush, i_list);
+ list_del_init(&cf->g_list);
+ list_del_init(&cf->i_list);
+ if (!cf->is_capsnap)
+ ceph_free_cap_flush(cf);
+ }
+
+ if (!list_empty(&ci->i_dirty_item)) {
+ pr_warn_ratelimited_client(cl,
+ " dropping dirty %s state for %p %llx.%llx\n",
+ ceph_cap_string(ci->i_dirty_caps),
+ inode, ceph_vinop(inode));
+ ci->i_dirty_caps = 0;
+ list_del_init(&ci->i_dirty_item);
+ dirty_dropped = true;
+ }
+ if (!list_empty(&ci->i_flushing_item)) {
+ pr_warn_ratelimited_client(cl,
+ " dropping dirty+flushing %s state for %p %llx.%llx\n",
+ ceph_cap_string(ci->i_flushing_caps),
+ inode, ceph_vinop(inode));
+ ci->i_flushing_caps = 0;
+ list_del_init(&ci->i_flushing_item);
+ mdsc->num_cap_flushing--;
+ dirty_dropped = true;
+ }
+ spin_unlock(&mdsc->cap_dirty_lock);
+
+ if (dirty_dropped) {
+ mapping_set_error(inode->i_mapping, -EIO);
+
+ if (ci->i_wrbuffer_ref_head == 0 &&
+ ci->i_wr_ref == 0 &&
+ ci->i_dirty_caps == 0 &&
+ ci->i_flushing_caps == 0) {
+ ceph_put_snap_context(ci->i_head_snapc);
+ ci->i_head_snapc = NULL;
+ }
+ }
+
+ if (atomic_read(&ci->i_filelock_ref) > 0) {
+ /* make further file lock syscall return -EIO */
+ ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
+ pr_warn_ratelimited_client(cl,
+ " dropping file locks for %p %llx.%llx\n",
+ inode, ceph_vinop(inode));
+ }
+
+ if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
+ cf = ci->i_prealloc_cap_flush;
+ ci->i_prealloc_cap_flush = NULL;
+ if (!cf->is_capsnap)
+ ceph_free_cap_flush(cf);
+ }
+
+ if (!list_empty(&ci->i_cap_snaps))
+ iputs = remove_capsnaps(mdsc, inode);
+ }
+ if (dirty_dropped)
+ ++iputs;
+ return iputs;
+}
diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
new file mode 100644
index 000000000000..0ea4db650f85
--- /dev/null
+++ b/fs/ceph/crypto.c
@@ -0,0 +1,604 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The base64 encode/decode code was copied from fscrypt:
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ * Written by Uday Savagaonkar, 2014.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+#include <linux/ceph/ceph_debug.h>
+#include <linux/xattr.h>
+#include <linux/fscrypt.h>
+#include <linux/ceph/striper.h>
+
+#include "super.h"
+#include "mds_client.h"
+#include "crypto.h"
+
+static int ceph_crypt_get_context(struct inode *inode, void *ctx, size_t len)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fscrypt_auth *cfa = (struct ceph_fscrypt_auth *)ci->fscrypt_auth;
+ u32 ctxlen;
+
+ /* Non existent or too short? */
+ if (!cfa || (ci->fscrypt_auth_len < (offsetof(struct ceph_fscrypt_auth, cfa_blob) + 1)))
+ return -ENOBUFS;
+
+ /* Some format we don't recognize? */
+ if (le32_to_cpu(cfa->cfa_version) != CEPH_FSCRYPT_AUTH_VERSION)
+ return -ENOBUFS;
+
+ ctxlen = le32_to_cpu(cfa->cfa_blob_len);
+ if (len < ctxlen)
+ return -ERANGE;
+
+ memcpy(ctx, cfa->cfa_blob, ctxlen);
+ return ctxlen;
+}
+
+static int ceph_crypt_set_context(struct inode *inode, const void *ctx,
+ size_t len, void *fs_data)
+{
+ int ret;
+ struct iattr attr = { };
+ struct ceph_iattr cia = { };
+ struct ceph_fscrypt_auth *cfa;
+
+ WARN_ON_ONCE(fs_data);
+
+ if (len > FSCRYPT_SET_CONTEXT_MAX_SIZE)
+ return -EINVAL;
+
+ cfa = kzalloc(sizeof(*cfa), GFP_KERNEL);
+ if (!cfa)
+ return -ENOMEM;
+
+ cfa->cfa_version = cpu_to_le32(CEPH_FSCRYPT_AUTH_VERSION);
+ cfa->cfa_blob_len = cpu_to_le32(len);
+ memcpy(cfa->cfa_blob, ctx, len);
+
+ cia.fscrypt_auth = cfa;
+
+ ret = __ceph_setattr(&nop_mnt_idmap, inode, &attr, &cia);
+ if (ret == 0)
+ inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
+ kfree(cia.fscrypt_auth);
+ return ret;
+}
+
+static bool ceph_crypt_empty_dir(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ return ci->i_rsubdirs + ci->i_rfiles == 1;
+}
+
+static const union fscrypt_policy *ceph_get_dummy_policy(struct super_block *sb)
+{
+ return ceph_sb_to_fs_client(sb)->fsc_dummy_enc_policy.policy;
+}
+
+static struct fscrypt_operations ceph_fscrypt_ops = {
+ .inode_info_offs = (int)offsetof(struct ceph_inode_info, i_crypt_info) -
+ (int)offsetof(struct ceph_inode_info, netfs.inode),
+ .needs_bounce_pages = 1,
+ .get_context = ceph_crypt_get_context,
+ .set_context = ceph_crypt_set_context,
+ .get_dummy_policy = ceph_get_dummy_policy,
+ .empty_dir = ceph_crypt_empty_dir,
+};
+
+void ceph_fscrypt_set_ops(struct super_block *sb)
+{
+ fscrypt_set_ops(sb, &ceph_fscrypt_ops);
+}
+
+void ceph_fscrypt_free_dummy_policy(struct ceph_fs_client *fsc)
+{
+ fscrypt_free_dummy_policy(&fsc->fsc_dummy_enc_policy);
+}
+
+int ceph_fscrypt_prepare_context(struct inode *dir, struct inode *inode,
+ struct ceph_acl_sec_ctx *as)
+{
+ int ret, ctxsize;
+ bool encrypted = false;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ ret = fscrypt_prepare_new_inode(dir, inode, &encrypted);
+ if (ret)
+ return ret;
+ if (!encrypted)
+ return 0;
+
+ as->fscrypt_auth = kzalloc(sizeof(*as->fscrypt_auth), GFP_KERNEL);
+ if (!as->fscrypt_auth)
+ return -ENOMEM;
+
+ ctxsize = fscrypt_context_for_new_inode(as->fscrypt_auth->cfa_blob,
+ inode);
+ if (ctxsize < 0)
+ return ctxsize;
+
+ as->fscrypt_auth->cfa_version = cpu_to_le32(CEPH_FSCRYPT_AUTH_VERSION);
+ as->fscrypt_auth->cfa_blob_len = cpu_to_le32(ctxsize);
+
+ WARN_ON_ONCE(ci->fscrypt_auth);
+ kfree(ci->fscrypt_auth);
+ ci->fscrypt_auth_len = ceph_fscrypt_auth_len(as->fscrypt_auth);
+ ci->fscrypt_auth = kmemdup(as->fscrypt_auth, ci->fscrypt_auth_len,
+ GFP_KERNEL);
+ if (!ci->fscrypt_auth)
+ return -ENOMEM;
+
+ inode->i_flags |= S_ENCRYPTED;
+
+ return 0;
+}
+
+void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req,
+ struct ceph_acl_sec_ctx *as)
+{
+ swap(req->r_fscrypt_auth, as->fscrypt_auth);
+}
+
+/*
+ * User-created snapshots can't start with '_'. Snapshots that start with this
+ * character are special (hint: there aren't real snapshots) and use the
+ * following format:
+ *
+ * _<SNAPSHOT-NAME>_<INODE-NUMBER>
+ *
+ * where:
+ * - <SNAPSHOT-NAME> - the real snapshot name that may need to be decrypted,
+ * - <INODE-NUMBER> - the inode number (in decimal) for the actual snapshot
+ *
+ * This function parses these snapshot names and returns the inode
+ * <INODE-NUMBER>. 'name_len' will also bet set with the <SNAPSHOT-NAME>
+ * length.
+ */
+static struct inode *parse_longname(const struct inode *parent,
+ const char *name, int *name_len)
+{
+ struct ceph_client *cl = ceph_inode_to_client(parent);
+ struct inode *dir = NULL;
+ struct ceph_vino vino = { .snap = CEPH_NOSNAP };
+ char *name_end, *inode_number;
+ int ret = -EIO;
+ /* NUL-terminate */
+ char *str __free(kfree) = kmemdup_nul(name, *name_len, GFP_KERNEL);
+ if (!str)
+ return ERR_PTR(-ENOMEM);
+ /* Skip initial '_' */
+ str++;
+ name_end = strrchr(str, '_');
+ if (!name_end) {
+ doutc(cl, "failed to parse long snapshot name: %s\n", str);
+ return ERR_PTR(-EIO);
+ }
+ *name_len = (name_end - str);
+ if (*name_len <= 0) {
+ pr_err_client(cl, "failed to parse long snapshot name\n");
+ return ERR_PTR(-EIO);
+ }
+
+ /* Get the inode number */
+ inode_number = name_end + 1;
+ ret = kstrtou64(inode_number, 10, &vino.ino);
+ if (ret) {
+ doutc(cl, "failed to parse inode number: %s\n", str);
+ return ERR_PTR(ret);
+ }
+
+ /* And finally the inode */
+ dir = ceph_find_inode(parent->i_sb, vino);
+ if (!dir) {
+ /* This can happen if we're not mounting cephfs on the root */
+ dir = ceph_get_inode(parent->i_sb, vino, NULL);
+ if (IS_ERR(dir))
+ doutc(cl, "can't find inode %s (%s)\n", inode_number, name);
+ }
+ return dir;
+}
+
+int ceph_encode_encrypted_dname(struct inode *parent, char *buf, int elen)
+{
+ struct ceph_client *cl = ceph_inode_to_client(parent);
+ struct inode *dir = parent;
+ char *p = buf;
+ u32 len;
+ int name_len = elen;
+ int ret;
+ u8 *cryptbuf = NULL;
+
+ /* Handle the special case of snapshot names that start with '_' */
+ if (ceph_snap(dir) == CEPH_SNAPDIR && *p == '_') {
+ dir = parse_longname(parent, p, &name_len);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ p++; /* skip initial '_' */
+ }
+
+ if (!fscrypt_has_encryption_key(dir))
+ goto out;
+
+ /*
+ * Convert cleartext d_name to ciphertext. If result is longer than
+ * CEPH_NOHASH_NAME_MAX, sha256 the remaining bytes
+ *
+ * See: fscrypt_setup_filename
+ */
+ if (!fscrypt_fname_encrypted_size(dir, name_len, NAME_MAX, &len)) {
+ elen = -ENAMETOOLONG;
+ goto out;
+ }
+
+ /* Allocate a buffer appropriate to hold the result */
+ cryptbuf = kmalloc(len > CEPH_NOHASH_NAME_MAX ? NAME_MAX : len,
+ GFP_KERNEL);
+ if (!cryptbuf) {
+ elen = -ENOMEM;
+ goto out;
+ }
+
+ ret = fscrypt_fname_encrypt(dir,
+ &(struct qstr)QSTR_INIT(p, name_len),
+ cryptbuf, len);
+ if (ret) {
+ elen = ret;
+ goto out;
+ }
+
+ /* hash the end if the name is long enough */
+ if (len > CEPH_NOHASH_NAME_MAX) {
+ u8 hash[SHA256_DIGEST_SIZE];
+ u8 *extra = cryptbuf + CEPH_NOHASH_NAME_MAX;
+
+ /*
+ * hash the extra bytes and overwrite crypttext beyond that
+ * point with it
+ */
+ sha256(extra, len - CEPH_NOHASH_NAME_MAX, hash);
+ memcpy(extra, hash, SHA256_DIGEST_SIZE);
+ len = CEPH_NOHASH_NAME_MAX + SHA256_DIGEST_SIZE;
+ }
+
+ /* base64 encode the encrypted name */
+ elen = base64_encode(cryptbuf, len, p, false, BASE64_IMAP);
+ doutc(cl, "base64-encoded ciphertext name = %.*s\n", elen, p);
+
+ /* To understand the 240 limit, see CEPH_NOHASH_NAME_MAX comments */
+ WARN_ON(elen > 240);
+ if (dir != parent) // leading _ is already there; append _<inum>
+ elen += 1 + sprintf(p + elen, "_%ld", dir->i_ino);
+
+out:
+ kfree(cryptbuf);
+ if (dir != parent) {
+ if ((inode_state_read_once(dir) & I_NEW))
+ discard_new_inode(dir);
+ else
+ iput(dir);
+ }
+ return elen;
+}
+
+/**
+ * ceph_fname_to_usr - convert a filename for userland presentation
+ * @fname: ceph_fname to be converted
+ * @tname: temporary name buffer to use for conversion (may be NULL)
+ * @oname: where converted name should be placed
+ * @is_nokey: set to true if key wasn't available during conversion (may be NULL)
+ *
+ * Given a filename (usually from the MDS), format it for presentation to
+ * userland. If @parent is not encrypted, just pass it back as-is.
+ *
+ * Otherwise, base64 decode the string, and then ask fscrypt to format it
+ * for userland presentation.
+ *
+ * Returns 0 on success or negative error code on error.
+ */
+int ceph_fname_to_usr(const struct ceph_fname *fname, struct fscrypt_str *tname,
+ struct fscrypt_str *oname, bool *is_nokey)
+{
+ struct inode *dir = fname->dir;
+ struct fscrypt_str _tname = FSTR_INIT(NULL, 0);
+ struct fscrypt_str iname;
+ char *name = fname->name;
+ int name_len = fname->name_len;
+ int ret;
+
+ /* Sanity check that the resulting name will fit in the buffer */
+ if (fname->name_len > NAME_MAX || fname->ctext_len > NAME_MAX)
+ return -EIO;
+
+ /* Handle the special case of snapshot names that start with '_' */
+ if ((ceph_snap(dir) == CEPH_SNAPDIR) && (name_len > 0) &&
+ (name[0] == '_')) {
+ dir = parse_longname(dir, name, &name_len);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ name++; /* skip initial '_' */
+ }
+
+ if (!IS_ENCRYPTED(dir)) {
+ oname->name = fname->name;
+ oname->len = fname->name_len;
+ ret = 0;
+ goto out_inode;
+ }
+
+ ret = ceph_fscrypt_prepare_readdir(dir);
+ if (ret)
+ goto out_inode;
+
+ /*
+ * Use the raw dentry name as sent by the MDS instead of
+ * generating a nokey name via fscrypt.
+ */
+ if (!fscrypt_has_encryption_key(dir)) {
+ if (fname->no_copy)
+ oname->name = fname->name;
+ else
+ memcpy(oname->name, fname->name, fname->name_len);
+ oname->len = fname->name_len;
+ if (is_nokey)
+ *is_nokey = true;
+ ret = 0;
+ goto out_inode;
+ }
+
+ if (fname->ctext_len == 0) {
+ int declen;
+
+ if (!tname) {
+ ret = fscrypt_fname_alloc_buffer(NAME_MAX, &_tname);
+ if (ret)
+ goto out_inode;
+ tname = &_tname;
+ }
+
+ declen = base64_decode(name, name_len,
+ tname->name, false, BASE64_IMAP);
+ if (declen <= 0) {
+ ret = -EIO;
+ goto out;
+ }
+ iname.name = tname->name;
+ iname.len = declen;
+ } else {
+ iname.name = fname->ctext;
+ iname.len = fname->ctext_len;
+ }
+
+ ret = fscrypt_fname_disk_to_usr(dir, 0, 0, &iname, oname);
+ if (!ret && (dir != fname->dir)) {
+ char tmp_buf[BASE64_CHARS(NAME_MAX)];
+
+ name_len = snprintf(tmp_buf, sizeof(tmp_buf), "_%.*s_%ld",
+ oname->len, oname->name, dir->i_ino);
+ memcpy(oname->name, tmp_buf, name_len);
+ oname->len = name_len;
+ }
+
+out:
+ fscrypt_fname_free_buffer(&_tname);
+out_inode:
+ if (dir != fname->dir) {
+ if ((inode_state_read_once(dir) & I_NEW))
+ discard_new_inode(dir);
+ else
+ iput(dir);
+ }
+ return ret;
+}
+
+/**
+ * ceph_fscrypt_prepare_readdir - simple __fscrypt_prepare_readdir() wrapper
+ * @dir: directory inode for readdir prep
+ *
+ * Simple wrapper around __fscrypt_prepare_readdir() that will mark directory as
+ * non-complete if this call results in having the directory unlocked.
+ *
+ * Returns:
+ * 1 - if directory was locked and key is now loaded (i.e. dir is unlocked)
+ * 0 - if directory is still locked
+ * < 0 - if __fscrypt_prepare_readdir() fails
+ */
+int ceph_fscrypt_prepare_readdir(struct inode *dir)
+{
+ bool had_key = fscrypt_has_encryption_key(dir);
+ int err;
+
+ if (!IS_ENCRYPTED(dir))
+ return 0;
+
+ err = __fscrypt_prepare_readdir(dir);
+ if (err)
+ return err;
+ if (!had_key && fscrypt_has_encryption_key(dir)) {
+ /* directory just got unlocked, mark it as not complete */
+ ceph_dir_clear_complete(dir);
+ return 1;
+ }
+ return 0;
+}
+
+int ceph_fscrypt_decrypt_block_inplace(const struct inode *inode,
+ struct page *page, unsigned int len,
+ unsigned int offs, u64 lblk_num)
+{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+
+ doutc(cl, "%p %llx.%llx len %u offs %u blk %llu\n", inode,
+ ceph_vinop(inode), len, offs, lblk_num);
+ return fscrypt_decrypt_block_inplace(inode, page, len, offs, lblk_num);
+}
+
+int ceph_fscrypt_encrypt_block_inplace(const struct inode *inode,
+ struct page *page, unsigned int len,
+ unsigned int offs, u64 lblk_num)
+{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+
+ doutc(cl, "%p %llx.%llx len %u offs %u blk %llu\n", inode,
+ ceph_vinop(inode), len, offs, lblk_num);
+ return fscrypt_encrypt_block_inplace(inode, page, len, offs, lblk_num);
+}
+
+/**
+ * ceph_fscrypt_decrypt_pages - decrypt an array of pages
+ * @inode: pointer to inode associated with these pages
+ * @page: pointer to page array
+ * @off: offset into the file that the read data starts
+ * @len: max length to decrypt
+ *
+ * Decrypt an array of fscrypt'ed pages and return the amount of
+ * data decrypted. Any data in the page prior to the start of the
+ * first complete block in the read is ignored. Any incomplete
+ * crypto blocks at the end of the array are ignored (and should
+ * probably be zeroed by the caller).
+ *
+ * Returns the length of the decrypted data or a negative errno.
+ */
+int ceph_fscrypt_decrypt_pages(struct inode *inode, struct page **page,
+ u64 off, int len)
+{
+ int i, num_blocks;
+ u64 baseblk = off >> CEPH_FSCRYPT_BLOCK_SHIFT;
+ int ret = 0;
+
+ /*
+ * We can't deal with partial blocks on an encrypted file, so mask off
+ * the last bit.
+ */
+ num_blocks = ceph_fscrypt_blocks(off, len & CEPH_FSCRYPT_BLOCK_MASK);
+
+ /* Decrypt each block */
+ for (i = 0; i < num_blocks; ++i) {
+ int blkoff = i << CEPH_FSCRYPT_BLOCK_SHIFT;
+ int pgidx = blkoff >> PAGE_SHIFT;
+ unsigned int pgoffs = offset_in_page(blkoff);
+ int fret;
+
+ fret = ceph_fscrypt_decrypt_block_inplace(inode, page[pgidx],
+ CEPH_FSCRYPT_BLOCK_SIZE, pgoffs,
+ baseblk + i);
+ if (fret < 0) {
+ if (ret == 0)
+ ret = fret;
+ break;
+ }
+ ret += CEPH_FSCRYPT_BLOCK_SIZE;
+ }
+ return ret;
+}
+
+/**
+ * ceph_fscrypt_decrypt_extents: decrypt received extents in given buffer
+ * @inode: inode associated with pages being decrypted
+ * @page: pointer to page array
+ * @off: offset into the file that the data in page[0] starts
+ * @map: pointer to extent array
+ * @ext_cnt: length of extent array
+ *
+ * Given an extent map and a page array, decrypt the received data in-place,
+ * skipping holes. Returns the offset into buffer of end of last decrypted
+ * block.
+ */
+int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
+ u64 off, struct ceph_sparse_extent *map,
+ u32 ext_cnt)
+{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+ int i, ret = 0;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ u64 objno, objoff;
+ u32 xlen;
+
+ /* Nothing to do for empty array */
+ if (ext_cnt == 0) {
+ doutc(cl, "%p %llx.%llx empty array, ret 0\n", inode,
+ ceph_vinop(inode));
+ return 0;
+ }
+
+ ceph_calc_file_object_mapping(&ci->i_layout, off, map[0].len,
+ &objno, &objoff, &xlen);
+
+ for (i = 0; i < ext_cnt; ++i) {
+ struct ceph_sparse_extent *ext = &map[i];
+ int pgsoff = ext->off - objoff;
+ int pgidx = pgsoff >> PAGE_SHIFT;
+ int fret;
+
+ if ((ext->off | ext->len) & ~CEPH_FSCRYPT_BLOCK_MASK) {
+ pr_warn_client(cl,
+ "%p %llx.%llx bad encrypted sparse extent "
+ "idx %d off %llx len %llx\n",
+ inode, ceph_vinop(inode), i, ext->off,
+ ext->len);
+ return -EIO;
+ }
+ fret = ceph_fscrypt_decrypt_pages(inode, &page[pgidx],
+ off + pgsoff, ext->len);
+ doutc(cl, "%p %llx.%llx [%d] 0x%llx~0x%llx fret %d\n", inode,
+ ceph_vinop(inode), i, ext->off, ext->len, fret);
+ if (fret < 0) {
+ if (ret == 0)
+ ret = fret;
+ break;
+ }
+ ret = pgsoff + fret;
+ }
+ doutc(cl, "ret %d\n", ret);
+ return ret;
+}
+
+/**
+ * ceph_fscrypt_encrypt_pages - encrypt an array of pages
+ * @inode: pointer to inode associated with these pages
+ * @page: pointer to page array
+ * @off: offset into the file that the data starts
+ * @len: max length to encrypt
+ *
+ * Encrypt an array of cleartext pages and return the amount of
+ * data encrypted. Any data in the page prior to the start of the
+ * first complete block in the read is ignored. Any incomplete
+ * crypto blocks at the end of the array are ignored.
+ *
+ * Returns the length of the encrypted data or a negative errno.
+ */
+int ceph_fscrypt_encrypt_pages(struct inode *inode, struct page **page, u64 off,
+ int len)
+{
+ int i, num_blocks;
+ u64 baseblk = off >> CEPH_FSCRYPT_BLOCK_SHIFT;
+ int ret = 0;
+
+ /*
+ * We can't deal with partial blocks on an encrypted file, so mask off
+ * the last bit.
+ */
+ num_blocks = ceph_fscrypt_blocks(off, len & CEPH_FSCRYPT_BLOCK_MASK);
+
+ /* Encrypt each block */
+ for (i = 0; i < num_blocks; ++i) {
+ int blkoff = i << CEPH_FSCRYPT_BLOCK_SHIFT;
+ int pgidx = blkoff >> PAGE_SHIFT;
+ unsigned int pgoffs = offset_in_page(blkoff);
+ int fret;
+
+ fret = ceph_fscrypt_encrypt_block_inplace(inode, page[pgidx],
+ CEPH_FSCRYPT_BLOCK_SIZE, pgoffs,
+ baseblk + i);
+ if (fret < 0) {
+ if (ret == 0)
+ ret = fret;
+ break;
+ }
+ ret += CEPH_FSCRYPT_BLOCK_SIZE;
+ }
+ return ret;
+}
diff --git a/fs/ceph/crypto.h b/fs/ceph/crypto.h
new file mode 100644
index 000000000000..b748e2060bc9
--- /dev/null
+++ b/fs/ceph/crypto.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Ceph fscrypt functionality
+ */
+
+#ifndef _CEPH_CRYPTO_H
+#define _CEPH_CRYPTO_H
+
+#include <crypto/sha2.h>
+#include <linux/fscrypt.h>
+#include <linux/base64.h>
+
+#define CEPH_FSCRYPT_BLOCK_SHIFT 12
+#define CEPH_FSCRYPT_BLOCK_SIZE (_AC(1, UL) << CEPH_FSCRYPT_BLOCK_SHIFT)
+#define CEPH_FSCRYPT_BLOCK_MASK (~(CEPH_FSCRYPT_BLOCK_SIZE-1))
+
+struct ceph_fs_client;
+struct ceph_acl_sec_ctx;
+struct ceph_mds_request;
+
+struct ceph_fname {
+ struct inode *dir;
+ char *name; // b64 encoded, possibly hashed
+ unsigned char *ctext; // binary crypttext (if any)
+ u32 name_len; // length of name buffer
+ u32 ctext_len; // length of crypttext
+ bool no_copy;
+};
+
+/*
+ * Header for the encrypted file when truncating the size, this
+ * will be sent to MDS, and the MDS will update the encrypted
+ * last block and then truncate the size.
+ */
+struct ceph_fscrypt_truncate_size_header {
+ __u8 ver;
+ __u8 compat;
+
+ /*
+ * It will be sizeof(assert_ver + file_offset + block_size)
+ * if the last block is empty when it's located in a file
+ * hole. Or the data_len will plus CEPH_FSCRYPT_BLOCK_SIZE.
+ */
+ __le32 data_len;
+
+ __le64 change_attr;
+ __le64 file_offset;
+ __le32 block_size;
+} __packed;
+
+struct ceph_fscrypt_auth {
+ __le32 cfa_version;
+ __le32 cfa_blob_len;
+ u8 cfa_blob[FSCRYPT_SET_CONTEXT_MAX_SIZE];
+} __packed;
+
+#define CEPH_FSCRYPT_AUTH_VERSION 1
+static inline u32 ceph_fscrypt_auth_len(struct ceph_fscrypt_auth *fa)
+{
+ u32 ctxsize = le32_to_cpu(fa->cfa_blob_len);
+
+ return offsetof(struct ceph_fscrypt_auth, cfa_blob) + ctxsize;
+}
+
+#ifdef CONFIG_FS_ENCRYPTION
+/*
+ * We want to encrypt filenames when creating them, but the encrypted
+ * versions of those names may have illegal characters in them. To mitigate
+ * that, we base64 encode them, but that gives us a result that can exceed
+ * NAME_MAX.
+ *
+ * Follow a similar scheme to fscrypt itself, and cap the filename to a
+ * smaller size. If the ciphertext name is longer than the value below, then
+ * sha256 hash the remaining bytes.
+ *
+ * For the fscrypt_nokey_name struct the dirhash[2] member is useless in ceph
+ * so the corresponding struct will be:
+ *
+ * struct fscrypt_ceph_nokey_name {
+ * u8 bytes[157];
+ * u8 sha256[SHA256_DIGEST_SIZE];
+ * }; // 180 bytes => 240 bytes base64-encoded, which is <= NAME_MAX (255)
+ *
+ * (240 bytes is the maximum size allowed for snapshot names to take into
+ * account the format: '_<SNAPSHOT-NAME>_<INODE-NUMBER>'.)
+ *
+ * Note that for long names that end up having their tail portion hashed, we
+ * must also store the full encrypted name (in the dentry's alternate_name
+ * field).
+ */
+#define CEPH_NOHASH_NAME_MAX (180 - SHA256_DIGEST_SIZE)
+
+void ceph_fscrypt_set_ops(struct super_block *sb);
+
+void ceph_fscrypt_free_dummy_policy(struct ceph_fs_client *fsc);
+
+int ceph_fscrypt_prepare_context(struct inode *dir, struct inode *inode,
+ struct ceph_acl_sec_ctx *as);
+void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req,
+ struct ceph_acl_sec_ctx *as);
+int ceph_encode_encrypted_dname(struct inode *parent, char *buf, int len);
+
+static inline int ceph_fname_alloc_buffer(struct inode *parent,
+ struct fscrypt_str *fname)
+{
+ if (!IS_ENCRYPTED(parent))
+ return 0;
+ return fscrypt_fname_alloc_buffer(NAME_MAX, fname);
+}
+
+static inline void ceph_fname_free_buffer(struct inode *parent,
+ struct fscrypt_str *fname)
+{
+ if (IS_ENCRYPTED(parent))
+ fscrypt_fname_free_buffer(fname);
+}
+
+int ceph_fname_to_usr(const struct ceph_fname *fname, struct fscrypt_str *tname,
+ struct fscrypt_str *oname, bool *is_nokey);
+int ceph_fscrypt_prepare_readdir(struct inode *dir);
+
+static inline unsigned int ceph_fscrypt_blocks(u64 off, u64 len)
+{
+ /* crypto blocks cannot span more than one page */
+ BUILD_BUG_ON(CEPH_FSCRYPT_BLOCK_SHIFT > PAGE_SHIFT);
+
+ return ((off+len+CEPH_FSCRYPT_BLOCK_SIZE-1) >> CEPH_FSCRYPT_BLOCK_SHIFT) -
+ (off >> CEPH_FSCRYPT_BLOCK_SHIFT);
+}
+
+/*
+ * If we have an encrypted inode then we must adjust the offset and
+ * range of the on-the-wire read to cover an entire encryption block.
+ * The copy will be done using the original offset and length, after
+ * we've decrypted the result.
+ */
+static inline void ceph_fscrypt_adjust_off_and_len(struct inode *inode,
+ u64 *off, u64 *len)
+{
+ if (IS_ENCRYPTED(inode)) {
+ *len = ceph_fscrypt_blocks(*off, *len) * CEPH_FSCRYPT_BLOCK_SIZE;
+ *off &= CEPH_FSCRYPT_BLOCK_MASK;
+ }
+}
+
+int ceph_fscrypt_decrypt_block_inplace(const struct inode *inode,
+ struct page *page, unsigned int len,
+ unsigned int offs, u64 lblk_num);
+int ceph_fscrypt_encrypt_block_inplace(const struct inode *inode,
+ struct page *page, unsigned int len,
+ unsigned int offs, u64 lblk_num);
+int ceph_fscrypt_decrypt_pages(struct inode *inode, struct page **page,
+ u64 off, int len);
+int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
+ u64 off, struct ceph_sparse_extent *map,
+ u32 ext_cnt);
+int ceph_fscrypt_encrypt_pages(struct inode *inode, struct page **page, u64 off,
+ int len);
+
+static inline struct page *ceph_fscrypt_pagecache_page(struct page *page)
+{
+ return fscrypt_is_bounce_page(page) ? fscrypt_pagecache_page(page) : page;
+}
+
+#else /* CONFIG_FS_ENCRYPTION */
+
+static inline void ceph_fscrypt_set_ops(struct super_block *sb)
+{
+}
+
+static inline void ceph_fscrypt_free_dummy_policy(struct ceph_fs_client *fsc)
+{
+}
+
+static inline int ceph_fscrypt_prepare_context(struct inode *dir,
+ struct inode *inode,
+ struct ceph_acl_sec_ctx *as)
+{
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static inline void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req,
+ struct ceph_acl_sec_ctx *as_ctx)
+{
+}
+
+static inline int ceph_encode_encrypted_dname(struct inode *parent, char *buf,
+ int len)
+{
+ return len;
+}
+
+static inline int ceph_fname_alloc_buffer(struct inode *parent,
+ struct fscrypt_str *fname)
+{
+ return 0;
+}
+
+static inline void ceph_fname_free_buffer(struct inode *parent,
+ struct fscrypt_str *fname)
+{
+}
+
+static inline int ceph_fname_to_usr(const struct ceph_fname *fname,
+ struct fscrypt_str *tname,
+ struct fscrypt_str *oname, bool *is_nokey)
+{
+ oname->name = fname->name;
+ oname->len = fname->name_len;
+ return 0;
+}
+
+static inline int ceph_fscrypt_prepare_readdir(struct inode *dir)
+{
+ return 0;
+}
+
+static inline void ceph_fscrypt_adjust_off_and_len(struct inode *inode,
+ u64 *off, u64 *len)
+{
+}
+
+static inline int ceph_fscrypt_decrypt_block_inplace(const struct inode *inode,
+ struct page *page, unsigned int len,
+ unsigned int offs, u64 lblk_num)
+{
+ return 0;
+}
+
+static inline int ceph_fscrypt_encrypt_block_inplace(const struct inode *inode,
+ struct page *page, unsigned int len,
+ unsigned int offs, u64 lblk_num)
+{
+ return 0;
+}
+
+static inline int ceph_fscrypt_decrypt_pages(struct inode *inode,
+ struct page **page, u64 off,
+ int len)
+{
+ return 0;
+}
+
+static inline int ceph_fscrypt_decrypt_extents(struct inode *inode,
+ struct page **page, u64 off,
+ struct ceph_sparse_extent *map,
+ u32 ext_cnt)
+{
+ return 0;
+}
+
+static inline int ceph_fscrypt_encrypt_pages(struct inode *inode,
+ struct page **page, u64 off,
+ int len)
+{
+ return 0;
+}
+
+static inline struct page *ceph_fscrypt_pagecache_page(struct page *page)
+{
+ return page;
+}
+#endif /* CONFIG_FS_ENCRYPTION */
+
+static inline loff_t ceph_fscrypt_page_offset(struct page *page)
+{
+ return page_offset(ceph_fscrypt_pagecache_page(page));
+}
+
+#endif /* _CEPH_CRYPTO_H */
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 38b78b45811f..f3fe786b4143 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -55,8 +55,6 @@ static int mdsc_show(struct seq_file *s, void *p)
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct rb_node *rp;
- int pathlen = 0;
- u64 pathbase;
char *path;
mutex_lock(&mdsc->mutex);
@@ -81,8 +79,8 @@ static int mdsc_show(struct seq_file *s, void *p)
if (req->r_inode) {
seq_printf(s, " #%llx", ceph_ino(req->r_inode));
} else if (req->r_dentry) {
- path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
- &pathbase, 0);
+ struct ceph_path_info path_info;
+ path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
spin_lock(&req->r_dentry->d_lock);
@@ -91,7 +89,7 @@ static int mdsc_show(struct seq_file *s, void *p)
req->r_dentry,
path ? path : "");
spin_unlock(&req->r_dentry->d_lock);
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
} else if (req->r_path1) {
seq_printf(s, " #%llx/%s", req->r_ino1.ino,
req->r_path1);
@@ -100,8 +98,8 @@ static int mdsc_show(struct seq_file *s, void *p)
}
if (req->r_old_dentry) {
- path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
- &pathbase, 0);
+ struct ceph_path_info path_info;
+ path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
spin_lock(&req->r_old_dentry->d_lock);
@@ -111,7 +109,7 @@ static int mdsc_show(struct seq_file *s, void *p)
req->r_old_dentry,
path ? path : "");
spin_unlock(&req->r_old_dentry->d_lock);
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
} else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) {
if (req->r_ino2.ino)
seq_printf(s, " #%llx/%s", req->r_ino2.ino,
@@ -146,82 +144,92 @@ static int mdsc_show(struct seq_file *s, void *p)
name, total, avg, _min, max, sum); \
}
-static int metric_show(struct seq_file *s, void *p)
+static int metrics_file_show(struct seq_file *s, void *p)
{
struct ceph_fs_client *fsc = s->private;
- struct ceph_mds_client *mdsc = fsc->mdsc;
- struct ceph_client_metric *m = &mdsc->metric;
- int nr_caps = 0;
- s64 total, sum, avg, min, max, sq;
- u64 sum_sz, avg_sz, min_sz, max_sz;
+ struct ceph_client_metric *m = &fsc->mdsc->metric;
- sum = percpu_counter_sum(&m->total_inodes);
seq_printf(s, "item total\n");
seq_printf(s, "------------------------------------------\n");
- seq_printf(s, "%-35s%lld / %lld\n", "opened files / total inodes",
- atomic64_read(&m->opened_files), sum);
- seq_printf(s, "%-35s%lld / %lld\n", "pinned i_caps / total inodes",
- atomic64_read(&m->total_caps), sum);
- seq_printf(s, "%-35s%lld / %lld\n", "opened inodes / total inodes",
- percpu_counter_sum(&m->opened_inodes), sum);
-
- seq_printf(s, "\n");
+ seq_printf(s, "%-35s%lld\n", "total inodes",
+ percpu_counter_sum(&m->total_inodes));
+ seq_printf(s, "%-35s%lld\n", "opened files",
+ atomic64_read(&m->opened_files));
+ seq_printf(s, "%-35s%lld\n", "pinned i_caps",
+ atomic64_read(&m->total_caps));
+ seq_printf(s, "%-35s%lld\n", "opened inodes",
+ percpu_counter_sum(&m->opened_inodes));
+ return 0;
+}
+
+static const char * const metric_str[] = {
+ "read",
+ "write",
+ "metadata",
+ "copyfrom"
+};
+static int metrics_latency_show(struct seq_file *s, void *p)
+{
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_client_metric *cm = &fsc->mdsc->metric;
+ struct ceph_metric *m;
+ s64 total, avg, min, max, sq;
+ int i;
+
seq_printf(s, "item total avg_lat(us) min_lat(us) max_lat(us) stdev(us)\n");
seq_printf(s, "-----------------------------------------------------------------------------------\n");
- spin_lock(&m->read_metric_lock);
- total = m->total_reads;
- sum = m->read_latency_sum;
- avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
- min = m->read_latency_min;
- max = m->read_latency_max;
- sq = m->read_latency_sq_sum;
- spin_unlock(&m->read_metric_lock);
- CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
-
- spin_lock(&m->write_metric_lock);
- total = m->total_writes;
- sum = m->write_latency_sum;
- avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
- min = m->write_latency_min;
- max = m->write_latency_max;
- sq = m->write_latency_sq_sum;
- spin_unlock(&m->write_metric_lock);
- CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
-
- spin_lock(&m->metadata_metric_lock);
- total = m->total_metadatas;
- sum = m->metadata_latency_sum;
- avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
- min = m->metadata_latency_min;
- max = m->metadata_latency_max;
- sq = m->metadata_latency_sq_sum;
- spin_unlock(&m->metadata_metric_lock);
- CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
-
- seq_printf(s, "\n");
+ for (i = 0; i < METRIC_MAX; i++) {
+ m = &cm->metric[i];
+ spin_lock(&m->lock);
+ total = m->total;
+ avg = m->latency_avg;
+ min = m->latency_min;
+ max = m->latency_max;
+ sq = m->latency_sq_sum;
+ spin_unlock(&m->lock);
+ CEPH_LAT_METRIC_SHOW(metric_str[i], total, avg, min, max, sq);
+ }
+
+ return 0;
+}
+
+static int metrics_size_show(struct seq_file *s, void *p)
+{
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_client_metric *cm = &fsc->mdsc->metric;
+ struct ceph_metric *m;
+ s64 total;
+ u64 sum, avg, min, max;
+ int i;
+
seq_printf(s, "item total avg_sz(bytes) min_sz(bytes) max_sz(bytes) total_sz(bytes)\n");
seq_printf(s, "----------------------------------------------------------------------------------------\n");
- spin_lock(&m->read_metric_lock);
- total = m->total_reads;
- sum_sz = m->read_size_sum;
- avg_sz = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum_sz, total) : 0;
- min_sz = m->read_size_min;
- max_sz = m->read_size_max;
- spin_unlock(&m->read_metric_lock);
- CEPH_SZ_METRIC_SHOW("read", total, avg_sz, min_sz, max_sz, sum_sz);
-
- spin_lock(&m->write_metric_lock);
- total = m->total_writes;
- sum_sz = m->write_size_sum;
- avg_sz = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum_sz, total) : 0;
- min_sz = m->write_size_min;
- max_sz = m->write_size_max;
- spin_unlock(&m->write_metric_lock);
- CEPH_SZ_METRIC_SHOW("write", total, avg_sz, min_sz, max_sz, sum_sz);
-
- seq_printf(s, "\n");
+ for (i = 0; i < METRIC_MAX; i++) {
+ /* skip 'metadata' as it doesn't use the size metric */
+ if (i == METRIC_METADATA)
+ continue;
+ m = &cm->metric[i];
+ spin_lock(&m->lock);
+ total = m->total;
+ sum = m->size_sum;
+ avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
+ min = m->size_min;
+ max = m->size_max;
+ spin_unlock(&m->lock);
+ CEPH_SZ_METRIC_SHOW(metric_str[i], total, avg, min, max, sum);
+ }
+
+ return 0;
+}
+
+static int metrics_caps_show(struct seq_file *s, void *p)
+{
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_client_metric *m = &fsc->mdsc->metric;
+ int nr_caps = 0;
+
seq_printf(s, "item total miss hit\n");
seq_printf(s, "-------------------------------------------------\n");
@@ -238,14 +246,20 @@ static int metric_show(struct seq_file *s, void *p)
return 0;
}
-static int caps_show_cb(struct inode *inode, struct ceph_cap *cap, void *p)
+static int caps_show_cb(struct inode *inode, int mds, void *p)
{
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct seq_file *s = p;
-
- seq_printf(s, "0x%-17llx%-3d%-17s%-17s\n", ceph_ino(inode),
- cap->session->s_mds,
- ceph_cap_string(cap->issued),
- ceph_cap_string(cap->implemented));
+ struct ceph_cap *cap;
+
+ spin_lock(&ci->i_ceph_lock);
+ cap = __get_cap_for_mds(ci, mds);
+ if (cap)
+ seq_printf(s, "0x%-17llx%-3d%-17s%-17s\n", ceph_ino(inode),
+ cap->session->s_mds,
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(cap->implemented));
+ spin_unlock(&ci->i_ceph_lock);
return 0;
}
@@ -341,7 +355,7 @@ static int status_show(struct seq_file *s, void *p)
seq_printf(s, "instance: %s.%lld %s/%u\n", ENTITY_NAME(inst->name),
ceph_pr_addr(client_addr), le32_to_cpu(client_addr->nonce));
- seq_printf(s, "blocklisted: %s\n", fsc->blocklisted ? "true" : "false");
+ seq_printf(s, "blocklisted: %s\n", str_true_false(fsc->blocklisted));
return 0;
}
@@ -350,8 +364,11 @@ DEFINE_SHOW_ATTRIBUTE(mdsmap);
DEFINE_SHOW_ATTRIBUTE(mdsc);
DEFINE_SHOW_ATTRIBUTE(caps);
DEFINE_SHOW_ATTRIBUTE(mds_sessions);
-DEFINE_SHOW_ATTRIBUTE(metric);
DEFINE_SHOW_ATTRIBUTE(status);
+DEFINE_SHOW_ATTRIBUTE(metrics_file);
+DEFINE_SHOW_ATTRIBUTE(metrics_latency);
+DEFINE_SHOW_ATTRIBUTE(metrics_size);
+DEFINE_SHOW_ATTRIBUTE(metrics_caps);
/*
@@ -379,21 +396,23 @@ DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get,
void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
{
- dout("ceph_fs_debugfs_cleanup\n");
+ doutc(fsc->client, "begin\n");
debugfs_remove(fsc->debugfs_bdi);
debugfs_remove(fsc->debugfs_congestion_kb);
debugfs_remove(fsc->debugfs_mdsmap);
debugfs_remove(fsc->debugfs_mds_sessions);
debugfs_remove(fsc->debugfs_caps);
- debugfs_remove(fsc->debugfs_metric);
+ debugfs_remove(fsc->debugfs_status);
debugfs_remove(fsc->debugfs_mdsc);
+ debugfs_remove_recursive(fsc->debugfs_metrics_dir);
+ doutc(fsc->client, "done\n");
}
void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
{
- char name[100];
+ char name[NAME_MAX];
- dout("ceph_fs_debugfs_init\n");
+ doutc(fsc->client, "begin\n");
fsc->debugfs_congestion_kb =
debugfs_create_file("writeback_congestion_kb",
0600,
@@ -426,12 +445,6 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
fsc,
&mdsc_fops);
- fsc->debugfs_metric = debugfs_create_file("metrics",
- 0400,
- fsc->client->debugfs_dir,
- fsc,
- &metric_fops);
-
fsc->debugfs_caps = debugfs_create_file("caps",
0400,
fsc->client->debugfs_dir,
@@ -443,6 +456,19 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
fsc->client->debugfs_dir,
fsc,
&status_fops);
+
+ fsc->debugfs_metrics_dir = debugfs_create_dir("metrics",
+ fsc->client->debugfs_dir);
+
+ debugfs_create_file("file", 0400, fsc->debugfs_metrics_dir, fsc,
+ &metrics_file_fops);
+ debugfs_create_file("latency", 0400, fsc->debugfs_metrics_dir, fsc,
+ &metrics_latency_fops);
+ debugfs_create_file("size", 0400, fsc->debugfs_metrics_dir, fsc,
+ &metrics_size_fops);
+ debugfs_create_file("caps", 0400, fsc->debugfs_metrics_dir, fsc,
+ &metrics_caps_fops);
+ doutc(fsc->client, "done\n");
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 133dbd9338e7..86d7aa594ea9 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -9,6 +9,7 @@
#include "super.h"
#include "mds_client.h"
+#include "crypto.h"
/*
* Directory operations: readdir, lookup, create, link, unlink,
@@ -108,7 +109,9 @@ static int fpos_cmp(loff_t l, loff_t r)
* regardless of what dir changes take place on the
* server.
*/
-static int note_last_dentry(struct ceph_dir_file_info *dfi, const char *name,
+static int note_last_dentry(struct ceph_fs_client *fsc,
+ struct ceph_dir_file_info *dfi,
+ const char *name,
int len, unsigned next_offset)
{
char *buf = kmalloc(len+1, GFP_KERNEL);
@@ -119,7 +122,7 @@ static int note_last_dentry(struct ceph_dir_file_info *dfi, const char *name,
memcpy(dfi->last_name, name, len);
dfi->last_name[len] = 0;
dfi->next_offset = next_offset;
- dout("note_last_dentry '%s'\n", dfi->last_name);
+ doutc(fsc->client, "'%s'\n", dfi->last_name);
return 0;
}
@@ -129,6 +132,7 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
struct ceph_readdir_cache_control *cache_ctl)
{
struct inode *dir = d_inode(parent);
+ struct ceph_client *cl = ceph_inode_to_client(dir);
struct dentry *dentry;
unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
loff_t ptr_pos = idx * sizeof(struct dentry *);
@@ -137,17 +141,18 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
if (ptr_pos >= i_size_read(dir))
return NULL;
- if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) {
+ if (!cache_ctl->folio || ptr_pgoff != cache_ctl->folio->index) {
ceph_readdir_cache_release(cache_ctl);
- cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
- if (!cache_ctl->page) {
- dout(" page %lu not found\n", ptr_pgoff);
+ cache_ctl->folio = filemap_lock_folio(&dir->i_data, ptr_pgoff);
+ if (IS_ERR(cache_ctl->folio)) {
+ cache_ctl->folio = NULL;
+ doutc(cl, " folio %lu not found\n", ptr_pgoff);
return ERR_PTR(-EAGAIN);
}
/* reading/filling the cache are serialized by
- i_mutex, no need to use page lock */
- unlock_page(cache_ctl->page);
- cache_ctl->dentries = kmap(cache_ctl->page);
+ i_rwsem, no need to use folio lock */
+ folio_unlock(cache_ctl->folio);
+ cache_ctl->dentries = kmap_local_folio(cache_ctl->folio, 0);
}
cache_ctl->index = idx & idx_mask;
@@ -155,7 +160,7 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
rcu_read_lock();
spin_lock(&parent->d_lock);
/* check i_size again here, because empty directory can be
- * marked as complete while not holding the i_mutex. */
+ * marked as complete while not holding the i_rwsem. */
if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir))
dentry = cache_ctl->dentries[cache_ctl->index];
else
@@ -170,7 +175,7 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
/*
* When possible, we try to satisfy a readdir by peeking at the
* dcache. We make this work by carefully ordering dentries on
- * d_child when we initially get results back from the MDS, and
+ * d_children when we initially get results back from the MDS, and
* falling back to a "normal" sync readdir if any dentries in the dir
* are dropped.
*
@@ -184,13 +189,16 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
struct ceph_dir_file_info *dfi = file->private_data;
struct dentry *parent = file->f_path.dentry;
struct inode *dir = d_inode(parent);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(dir);
+ struct ceph_client *cl = ceph_inode_to_client(dir);
struct dentry *dentry, *last = NULL;
struct ceph_dentry_info *di;
struct ceph_readdir_cache_control cache_ctl = {};
u64 idx = 0;
int err = 0;
- dout("__dcache_readdir %p v%u at %llx\n", dir, (unsigned)shared_gen, ctx->pos);
+ doutc(cl, "%p %llx.%llx v%u at %llx\n", dir, ceph_vinop(dir),
+ (unsigned)shared_gen, ctx->pos);
/* search start position */
if (ctx->pos > 2) {
@@ -200,7 +208,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
dentry = __dcache_find_get_entry(parent, idx + step,
&cache_ctl);
if (!dentry) {
- /* use linar search */
+ /* use linear search */
idx = 0;
break;
}
@@ -220,7 +228,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
dput(dentry);
}
- dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
+ doutc(cl, "%p %llx.%llx cache idx %llu\n", dir,
+ ceph_vinop(dir), idx);
}
@@ -241,7 +250,9 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
di = ceph_dentry(dentry);
if (d_unhashed(dentry) ||
d_really_is_negative(dentry) ||
- di->lease_shared_gen != shared_gen) {
+ di->lease_shared_gen != shared_gen ||
+ ((dentry->d_flags & DCACHE_NOKEY_NAME) &&
+ fscrypt_has_encryption_key(dir))) {
spin_unlock(&dentry->d_lock);
dput(dentry);
err = -EAGAIN;
@@ -254,8 +265,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
spin_unlock(&dentry->d_lock);
if (emit_dentry) {
- dout(" %llx dentry %p %pd %p\n", di->offset,
- dentry, dentry, d_inode(dentry));
+ doutc(cl, " %llx dentry %p %pd %p\n", di->offset,
+ dentry, dentry, d_inode(dentry));
ctx->pos = di->offset;
if (!dir_emit(ctx, dentry->d_name.name,
dentry->d_name.len, ceph_present_inode(d_inode(dentry)),
@@ -278,7 +289,8 @@ out:
if (last) {
int ret;
di = ceph_dentry(last);
- ret = note_last_dentry(dfi, last->d_name.name, last->d_name.len,
+ ret = note_last_dentry(fsc, dfi, last->d_name.name,
+ last->d_name.len,
fpos_off(di->offset) + 1);
if (ret < 0)
err = ret;
@@ -307,20 +319,23 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
struct ceph_dir_file_info *dfi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_client *cl = fsc->client;
int i;
int err;
unsigned frag = -1;
struct ceph_mds_reply_info_parsed *rinfo;
- dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
+ doutc(cl, "%p %llx.%llx file %p pos %llx\n", inode,
+ ceph_vinop(inode), file, ctx->pos);
if (dfi->file_info.flags & CEPH_F_ATEND)
return 0;
/* always start with . and .. */
if (ctx->pos == 0) {
- dout("readdir off 0 -> '.'\n");
+ doutc(cl, "%p %llx.%llx off 0 -> '.'\n", inode,
+ ceph_vinop(inode));
if (!dir_emit(ctx, ".", 1, ceph_present_inode(inode),
inode->i_mode >> 12))
return 0;
@@ -334,12 +349,17 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
ino = ceph_present_inode(dentry->d_parent->d_inode);
spin_unlock(&dentry->d_lock);
- dout("readdir off 1 -> '..'\n");
+ doutc(cl, "%p %llx.%llx off 1 -> '..'\n", inode,
+ ceph_vinop(inode));
if (!dir_emit(ctx, "..", 2, ino, inode->i_mode >> 12))
return 0;
ctx->pos = 2;
}
+ err = ceph_fscrypt_prepare_readdir(inode);
+ if (err < 0)
+ return err;
+
spin_lock(&ci->i_ceph_lock);
/* request Fx cap. if have Fx, we don't need to release Fs cap
* for later create/unlink. */
@@ -384,11 +404,12 @@ more:
frag = fpos_frag(ctx->pos);
}
- dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
- ceph_vinop(inode), frag, dfi->last_name);
+ doutc(cl, "fetching %p %llx.%llx frag %x offset '%s'\n",
+ inode, ceph_vinop(inode), frag, dfi->last_name);
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
+
err = ceph_alloc_readdir_reply_buffer(req, inode);
if (err) {
ceph_mdsc_put_request(req);
@@ -402,11 +423,20 @@ more:
req->r_inode_drop = CEPH_CAP_FILE_EXCL;
}
if (dfi->last_name) {
- req->r_path2 = kstrdup(dfi->last_name, GFP_KERNEL);
+ int len = strlen(dfi->last_name);
+
+ req->r_path2 = kzalloc(NAME_MAX + 1, GFP_KERNEL);
if (!req->r_path2) {
ceph_mdsc_put_request(req);
return -ENOMEM;
}
+ memcpy(req->r_path2, dfi->last_name, len);
+
+ err = ceph_encode_encrypted_dname(inode, req->r_path2, len);
+ if (err < 0) {
+ ceph_mdsc_put_request(req);
+ return err;
+ }
} else if (is_hash_order(ctx->pos)) {
req->r_args.readdir.offset_hash =
cpu_to_le32(fpos_hash(ctx->pos));
@@ -428,12 +458,12 @@ more:
ceph_mdsc_put_request(req);
return err;
}
- dout("readdir got and parsed readdir result=%d on "
- "frag %x, end=%d, complete=%d, hash_order=%d\n",
- err, frag,
- (int)req->r_reply_info.dir_end,
- (int)req->r_reply_info.dir_complete,
- (int)req->r_reply_info.hash_order);
+ doutc(cl, "%p %llx.%llx got and parsed readdir result=%d"
+ "on frag %x, end=%d, complete=%d, hash_order=%d\n",
+ inode, ceph_vinop(inode), err, frag,
+ (int)req->r_reply_info.dir_end,
+ (int)req->r_reply_info.dir_complete,
+ (int)req->r_reply_info.hash_order);
rinfo = &req->r_reply_info;
if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
@@ -463,7 +493,8 @@ more:
dfi->dir_ordered_count = req->r_dir_ordered_cnt;
}
} else {
- dout("readdir !did_prepopulate\n");
+ doutc(cl, "%p %llx.%llx !did_prepopulate\n", inode,
+ ceph_vinop(inode));
/* disable readdir cache */
dfi->readdir_cache_idx = -1;
/* preclude from marking dir complete */
@@ -476,10 +507,13 @@ more:
rinfo->dir_entries + (rinfo->dir_nr-1);
unsigned next_offset = req->r_reply_info.dir_end ?
2 : (fpos_off(rde->offset) + 1);
- err = note_last_dentry(dfi, rde->name, rde->name_len,
- next_offset);
- if (err)
+ err = note_last_dentry(fsc, dfi, rde->name,
+ rde->name_len, next_offset);
+ if (err) {
+ ceph_mdsc_put_request(dfi->last_readdir);
+ dfi->last_readdir = NULL;
return err;
+ }
} else if (req->r_reply_info.dir_end) {
dfi->next_offset = 2;
/* keep last name */
@@ -487,9 +521,9 @@ more:
}
rinfo = &dfi->last_readdir->r_reply_info;
- dout("readdir frag %x num %d pos %llx chunk first %llx\n",
- dfi->frag, rinfo->dir_nr, ctx->pos,
- rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
+ doutc(cl, "%p %llx.%llx frag %x num %d pos %llx chunk first %llx\n",
+ inode, ceph_vinop(inode), dfi->frag, rinfo->dir_nr, ctx->pos,
+ rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
i = 0;
/* search start position */
@@ -508,21 +542,35 @@ more:
for (; i < rinfo->dir_nr; i++) {
struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
- BUG_ON(rde->offset < ctx->pos);
+ if (rde->offset < ctx->pos) {
+ pr_warn_client(cl,
+ "%p %llx.%llx rde->offset 0x%llx ctx->pos 0x%llx\n",
+ inode, ceph_vinop(inode), rde->offset, ctx->pos);
+ return -EIO;
+ }
+
+ if (WARN_ON_ONCE(!rde->inode.in))
+ return -EIO;
ctx->pos = rde->offset;
- dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
- i, rinfo->dir_nr, ctx->pos,
- rde->name_len, rde->name, &rde->inode.in);
-
- BUG_ON(!rde->inode.in);
+ doutc(cl, "%p %llx.%llx (%d/%d) -> %llx '%.*s' %p\n", inode,
+ ceph_vinop(inode), i, rinfo->dir_nr, ctx->pos,
+ rde->name_len, rde->name, &rde->inode.in);
if (!dir_emit(ctx, rde->name, rde->name_len,
ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)),
le32_to_cpu(rde->inode.in->mode) >> 12)) {
- dout("filldir stopping us...\n");
+ /*
+ * NOTE: Here no need to put the 'dfi->last_readdir',
+ * because when dir_emit stops us it's most likely
+ * doesn't have enough memory, etc. So for next readdir
+ * it will continue.
+ */
+ doutc(cl, "filldir stopping us...\n");
return 0;
}
+
+ /* Reset the lengths to their original allocated vals */
ctx->pos++;
}
@@ -549,7 +597,8 @@ more:
kfree(dfi->last_name);
dfi->last_name = NULL;
}
- dout("readdir next frag is %x\n", frag);
+ doutc(cl, "%p %llx.%llx next frag is %x\n", inode,
+ ceph_vinop(inode), frag);
goto more;
}
dfi->file_info.flags |= CEPH_F_ATEND;
@@ -564,21 +613,23 @@ more:
spin_lock(&ci->i_ceph_lock);
if (dfi->dir_ordered_count ==
atomic64_read(&ci->i_ordered_count)) {
- dout(" marking %p complete and ordered\n", inode);
+ doutc(cl, " marking %p %llx.%llx complete and ordered\n",
+ inode, ceph_vinop(inode));
/* use i_size to track number of entries in
* readdir cache */
BUG_ON(dfi->readdir_cache_idx < 0);
i_size_write(inode, dfi->readdir_cache_idx *
sizeof(struct dentry*));
} else {
- dout(" marking %p complete\n", inode);
+ doutc(cl, " marking %llx.%llx complete\n",
+ ceph_vinop(inode));
}
__ceph_dir_set_complete(ci, dfi->dir_release_count,
dfi->dir_ordered_count);
spin_unlock(&ci->i_ceph_lock);
}
-
- dout("readdir %p file %p done.\n", inode, file);
+ doutc(cl, "%p %llx.%llx file %p done.\n", inode, ceph_vinop(inode),
+ file);
return 0;
}
@@ -608,7 +659,7 @@ static bool need_reset_readdir(struct ceph_dir_file_info *dfi, loff_t new_pos)
return true;
if (is_hash_order(new_pos)) {
/* no need to reset last_name for a forward seek when
- * dentries are sotred in hash order */
+ * dentries are sorted in hash order */
} else if (dfi->frag != fpos_frag(new_pos)) {
return true;
}
@@ -624,6 +675,7 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct ceph_dir_file_info *dfi = file->private_data;
struct inode *inode = file->f_mapping->host;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
loff_t retval;
inode_lock(inode);
@@ -643,7 +695,8 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
if (offset >= 0) {
if (need_reset_readdir(dfi, offset)) {
- dout("dir_llseek dropping %p content\n", file);
+ doutc(cl, "%p %llx.%llx dropping %p content\n",
+ inode, ceph_vinop(inode), file);
reset_readdir(dfi);
} else if (is_hash_order(offset) && offset > file->f_pos) {
/* for hash offset, we don't know if a forward seek
@@ -654,7 +707,6 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
if (offset != file->f_pos) {
file->f_pos = offset;
- file->f_version = 0;
dfi->file_info.flags &= ~CEPH_F_ATEND;
}
retval = offset;
@@ -670,8 +722,9 @@ out:
struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
struct dentry *dentry)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
- struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+ struct inode *parent = d_inode(dentry->d_parent); /* we hold i_rwsem */
+ struct ceph_client *cl = ceph_inode_to_client(parent);
/* .snap dir? */
if (ceph_snap(parent) == CEPH_NOSNAP &&
@@ -680,8 +733,9 @@ struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
struct inode *inode = ceph_get_snapdir(parent);
res = d_splice_alias(inode, dentry);
- dout("ENOENT on snapdir %p '%pd', linking to snapdir %p. Spliced dentry %p\n",
- dentry, dentry, inode, res);
+ doutc(cl, "ENOENT on snapdir %p '%pd', linking to "
+ "snapdir %p %llx.%llx. Spliced dentry %p\n",
+ dentry, dentry, inode, ceph_vinop(inode), res);
if (res)
dentry = res;
}
@@ -702,12 +756,15 @@ struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err)
{
+ struct ceph_client *cl = req->r_mdsc->fsc->client;
+
if (err == -ENOENT) {
/* no trace? */
err = 0;
if (!req->r_reply_info.head->is_dentry) {
- dout("ENOENT and no trace, dentry %p inode %p\n",
- dentry, d_inode(dentry));
+ doutc(cl,
+ "ENOENT and no trace, dentry %p inode %llx.%llx\n",
+ dentry, ceph_vinop(d_inode(dentry)));
if (d_really_is_positive(dentry)) {
d_drop(dentry);
err = -ENOENT;
@@ -738,26 +795,40 @@ static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_request *req;
int op;
int mask;
int err;
- dout("lookup %p dentry %p '%pd'\n",
- dir, dentry, dentry);
+ doutc(cl, "%p %llx.%llx/'%pd' dentry %p\n", dir, ceph_vinop(dir),
+ dentry, dentry);
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
+ if (IS_ENCRYPTED(dir)) {
+ bool had_key = fscrypt_has_encryption_key(dir);
+
+ err = fscrypt_prepare_lookup_partial(dir, dentry);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ /* mark directory as incomplete if it has been unlocked */
+ if (!had_key && fscrypt_has_encryption_key(dir))
+ ceph_dir_clear_complete(dir);
+ }
+
/* can we conclude ENOENT locally? */
if (d_really_is_negative(dentry)) {
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_dentry_info *di = ceph_dentry(dentry);
spin_lock(&ci->i_ceph_lock);
- dout(" dir %p flags are 0x%lx\n", dir, ci->i_ceph_flags);
+ doutc(cl, " dir %llx.%llx flags are 0x%lx\n",
+ ceph_vinop(dir), ci->i_ceph_flags);
if (strncmp(dentry->d_name.name,
fsc->mount_options->snapdir_name,
dentry->d_name.len) &&
@@ -767,7 +838,8 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
__ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) {
__ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD);
spin_unlock(&ci->i_ceph_lock);
- dout(" dir %p complete, -ENOENT\n", dir);
+ doutc(cl, " dir %llx.%llx complete, -ENOENT\n",
+ ceph_vinop(dir));
d_add(dentry, NULL);
di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
return NULL;
@@ -805,7 +877,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
}
dentry = ceph_finish_lookup(req, dentry, err);
ceph_mdsc_put_request(req); /* will dput(dentry) */
- dout("lookup result=%p\n", dentry);
+ doutc(cl, "result=%p\n", dentry);
return dentry;
}
@@ -836,10 +908,11 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
return PTR_ERR(result);
}
-static int ceph_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
int err;
@@ -847,41 +920,51 @@ static int ceph_mknod(struct user_namespace *mnt_userns, struct inode *dir,
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
+ err = ceph_wait_on_conflict_unlink(dentry);
+ if (err)
+ return err;
+
if (ceph_quota_is_max_files_exceeded(dir)) {
err = -EDQUOT;
goto out;
}
- err = ceph_pre_init_acls(dir, &mode, &as_ctx);
- if (err < 0)
- goto out;
- err = ceph_security_init_secctx(dentry, mode, &as_ctx);
- if (err < 0)
- goto out;
-
- dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
- dir, dentry, mode, rdev);
+ doutc(cl, "%p %llx.%llx/'%pd' dentry %p mode 0%ho rdev %d\n",
+ dir, ceph_vinop(dir), dentry, dentry, mode, rdev);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out;
}
+
+ req->r_new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
+ if (IS_ERR(req->r_new_inode)) {
+ err = PTR_ERR(req->r_new_inode);
+ req->r_new_inode = NULL;
+ goto out_req;
+ }
+
+ if (S_ISREG(mode) && IS_ENCRYPTED(dir))
+ set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
+
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_parent = dir;
ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
+ req->r_mnt_idmap = mnt_idmap_get(idmap);
req->r_args.mknod.mode = cpu_to_le32(mode);
req->r_args.mknod.rdev = cpu_to_le32(rdev);
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
+ CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
- if (as_ctx.pagelist) {
- req->r_pagelist = as_ctx.pagelist;
- as_ctx.pagelist = NULL;
- }
+
+ ceph_as_ctx_to_req(req, &as_ctx);
+
err = ceph_mdsc_do_request(mdsc, dir, req);
if (!err && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
+out_req:
ceph_mdsc_put_request(req);
out:
if (!err)
@@ -892,59 +975,116 @@ out:
return err;
}
-static int ceph_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int ceph_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
- return ceph_mknod(mnt_userns, dir, dentry, mode, 0);
+ return ceph_mknod(idmap, dir, dentry, mode, 0);
+}
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static int prep_encrypted_symlink_target(struct ceph_mds_request *req,
+ const char *dest)
+{
+ int err;
+ int len = strlen(dest);
+ struct fscrypt_str osd_link = FSTR_INIT(NULL, 0);
+
+ err = fscrypt_prepare_symlink(req->r_parent, dest, len, PATH_MAX,
+ &osd_link);
+ if (err)
+ goto out;
+
+ err = fscrypt_encrypt_symlink(req->r_new_inode, dest, len, &osd_link);
+ if (err)
+ goto out;
+
+ req->r_path2 = kmalloc(BASE64_CHARS(osd_link.len) + 1, GFP_KERNEL);
+ if (!req->r_path2) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ len = base64_encode(osd_link.name, osd_link.len,
+ req->r_path2, false, BASE64_IMAP);
+ req->r_path2[len] = '\0';
+out:
+ fscrypt_fname_free_buffer(&osd_link);
+ return err;
+}
+#else
+static int prep_encrypted_symlink_target(struct ceph_mds_request *req,
+ const char *dest)
+{
+ return -EOPNOTSUPP;
}
+#endif
-static int ceph_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *dest)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
+ umode_t mode = S_IFLNK | 0777;
int err;
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
+ err = ceph_wait_on_conflict_unlink(dentry);
+ if (err)
+ return err;
+
if (ceph_quota_is_max_files_exceeded(dir)) {
err = -EDQUOT;
goto out;
}
- err = ceph_security_init_secctx(dentry, S_IFLNK | 0777, &as_ctx);
- if (err < 0)
- goto out;
-
- dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
+ doutc(cl, "%p %llx.%llx/'%pd' to '%s'\n", dir, ceph_vinop(dir), dentry,
+ dest);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out;
}
- req->r_path2 = kstrdup(dest, GFP_KERNEL);
- if (!req->r_path2) {
- err = -ENOMEM;
- ceph_mdsc_put_request(req);
- goto out;
+
+ req->r_new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
+ if (IS_ERR(req->r_new_inode)) {
+ err = PTR_ERR(req->r_new_inode);
+ req->r_new_inode = NULL;
+ goto out_req;
}
+
req->r_parent = dir;
ihold(dir);
+ if (IS_ENCRYPTED(req->r_new_inode)) {
+ err = prep_encrypted_symlink_target(req, dest);
+ if (err)
+ goto out_req;
+ } else {
+ req->r_path2 = kstrdup(dest, GFP_KERNEL);
+ if (!req->r_path2) {
+ err = -ENOMEM;
+ goto out_req;
+ }
+ }
+
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
+ req->r_mnt_idmap = mnt_idmap_get(idmap);
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
+ CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
- if (as_ctx.pagelist) {
- req->r_pagelist = as_ctx.pagelist;
- as_ctx.pagelist = NULL;
- }
+
+ ceph_as_ctx_to_req(req, &as_ctx);
+
err = ceph_mdsc_do_request(mdsc, dir, req);
if (!err && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
+out_req:
ceph_mdsc_put_request(req);
out:
if (err)
@@ -953,86 +1093,122 @@ out:
return err;
}
-static int ceph_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
- int err = -EROFS;
+ struct dentry *ret;
+ int err;
int op;
+ err = ceph_wait_on_conflict_unlink(dentry);
+ if (err)
+ return ERR_PTR(err);
+
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* mkdir .snap/foo is a MKSNAP */
op = CEPH_MDS_OP_MKSNAP;
- dout("mksnap dir %p snap '%pd' dn %p\n", dir,
- dentry, dentry);
+ doutc(cl, "mksnap %llx.%llx/'%pd' dentry %p\n",
+ ceph_vinop(dir), dentry, dentry);
} else if (ceph_snap(dir) == CEPH_NOSNAP) {
- dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
+ doutc(cl, "mkdir %llx.%llx/'%pd' dentry %p mode 0%ho\n",
+ ceph_vinop(dir), dentry, dentry, mode);
op = CEPH_MDS_OP_MKDIR;
} else {
+ ret = ERR_PTR(-EROFS);
goto out;
}
if (op == CEPH_MDS_OP_MKDIR &&
ceph_quota_is_max_files_exceeded(dir)) {
- err = -EDQUOT;
+ ret = ERR_PTR(-EDQUOT);
goto out;
}
-
- mode |= S_IFDIR;
- err = ceph_pre_init_acls(dir, &mode, &as_ctx);
- if (err < 0)
- goto out;
- err = ceph_security_init_secctx(dentry, mode, &as_ctx);
- if (err < 0)
+ if ((op == CEPH_MDS_OP_MKSNAP) && IS_ENCRYPTED(dir) &&
+ !fscrypt_has_encryption_key(dir)) {
+ ret = ERR_PTR(-ENOKEY);
goto out;
+ }
+
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req)) {
- err = PTR_ERR(req);
+ ret = ERR_CAST(req);
goto out;
}
+ mode |= S_IFDIR;
+ req->r_new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
+ if (IS_ERR(req->r_new_inode)) {
+ ret = ERR_CAST(req->r_new_inode);
+ req->r_new_inode = NULL;
+ goto out_req;
+ }
+
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_parent = dir;
ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
+ if (op == CEPH_MDS_OP_MKDIR)
+ req->r_mnt_idmap = mnt_idmap_get(idmap);
req->r_args.mkdir.mode = cpu_to_le32(mode);
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
+ CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
- if (as_ctx.pagelist) {
- req->r_pagelist = as_ctx.pagelist;
- as_ctx.pagelist = NULL;
- }
+
+ ceph_as_ctx_to_req(req, &as_ctx);
+
err = ceph_mdsc_do_request(mdsc, dir, req);
if (!err &&
!req->r_reply_info.head->is_target &&
!req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
+ ret = ERR_PTR(err);
+out_req:
+ if (!IS_ERR(ret) && req->r_dentry != dentry)
+ /* Some other dentry was spliced in */
+ ret = dget(req->r_dentry);
ceph_mdsc_put_request(req);
out:
- if (!err)
+ if (!IS_ERR(ret)) {
+ if (ret)
+ dentry = ret;
ceph_init_inode_acls(d_inode(dentry), &as_ctx);
- else
+ } else {
d_drop(dentry);
+ }
ceph_release_acl_sec_ctx(&as_ctx);
- return err;
+ return ret;
}
static int ceph_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
int err;
+ if (dentry->d_flags & DCACHE_DISCONNECTED)
+ return -EINVAL;
+
+ err = ceph_wait_on_conflict_unlink(dentry);
+ if (err)
+ return err;
+
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
- dout("link in dir %p old_dentry %p dentry %p\n", dir,
- old_dentry, dentry);
+ err = fscrypt_prepare_link(old_dentry, dir, dentry);
+ if (err)
+ return err;
+
+ doutc(cl, "%p %llx.%llx/'%pd' to '%pd'\n", dir, ceph_vinop(dir),
+ old_dentry, dentry);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
if (IS_ERR(req)) {
d_drop(dentry);
@@ -1041,10 +1217,16 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_old_dentry = dget(old_dentry);
+ /*
+ * The old_dentry maybe a DCACHE_DISCONNECTED dentry, then we
+ * will just pass the ino# to MDSs.
+ */
+ if (old_dentry->d_flags & DCACHE_DISCONNECTED)
+ req->r_ino2 = ceph_vino(d_inode(old_dentry));
req->r_parent = dir;
ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
/* release LINK_SHARED on source inode (mds will lock it) */
req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
@@ -1062,33 +1244,50 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
+ struct dentry *dentry = req->r_dentry;
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
int result = req->r_err ? req->r_err :
le32_to_cpu(req->r_reply_info.head->result);
+ if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
+ pr_warn_client(cl,
+ "dentry %p:%pd async unlink bit is not set\n",
+ dentry, dentry);
+
+ spin_lock(&fsc->async_unlink_conflict_lock);
+ hash_del_rcu(&di->hnode);
+ spin_unlock(&fsc->async_unlink_conflict_lock);
+
+ spin_lock(&dentry->d_lock);
+ clear_and_wake_up_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags);
+ spin_unlock(&dentry->d_lock);
+
+ synchronize_rcu();
+
if (result == -EJUKEBOX)
goto out;
/* If op failed, mark everyone involved for errors */
if (result) {
- int pathlen = 0;
- u64 base = 0;
- char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
- &base, 0);
+ struct ceph_path_info path_info = {0};
+ char *path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
/* mark error on parent + clear complete */
mapping_set_error(req->r_parent->i_mapping, result);
ceph_dir_clear_complete(req->r_parent);
/* drop the dentry -- we don't know its status */
- if (!d_unhashed(req->r_dentry))
- d_drop(req->r_dentry);
+ if (!d_unhashed(dentry))
+ d_drop(dentry);
/* mark inode itself for an error (since metadata is bogus) */
mapping_set_error(req->r_old_inode->i_mapping, result);
- pr_warn("ceph: async unlink failure path=(%llx)%s result=%d!\n",
- base, IS_ERR(path) ? "<<bad>>" : path, result);
- ceph_mdsc_free_path(path, pathlen);
+ pr_warn_client(cl, "failure path=(%llx)%s result=%d!\n",
+ path_info.vino.ino, IS_ERR(path) ? "<<bad>>" : path, result);
+ ceph_mdsc_free_path_info(&path_info);
}
out:
iput(req->r_old_inode);
@@ -1136,25 +1335,54 @@ static int get_caps_for_async_unlink(struct inode *dir, struct dentry *dentry)
*/
static int ceph_unlink(struct inode *dir, struct dentry *dentry)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = d_inode(dentry);
struct ceph_mds_request *req;
bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
+ struct dentry *dn;
int err = -EROFS;
int op;
+ char *path;
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* rmdir .snap/foo is RMSNAP */
- dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
+ doutc(cl, "rmsnap %llx.%llx/'%pd' dn\n", ceph_vinop(dir),
+ dentry);
op = CEPH_MDS_OP_RMSNAP;
} else if (ceph_snap(dir) == CEPH_NOSNAP) {
- dout("unlink/rmdir dir %p dn %p inode %p\n",
- dir, dentry, inode);
+ doutc(cl, "unlink/rmdir %llx.%llx/'%pd' inode %llx.%llx\n",
+ ceph_vinop(dir), dentry, ceph_vinop(inode));
op = d_is_dir(dentry) ?
CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
} else
goto out;
+
+ dn = d_find_alias(dir);
+ if (!dn) {
+ try_async = false;
+ } else {
+ struct ceph_path_info path_info;
+ path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0);
+ if (IS_ERR(path)) {
+ try_async = false;
+ err = 0;
+ } else {
+ err = ceph_mds_check_access(mdsc, path, MAY_WRITE);
+ }
+ ceph_mdsc_free_path_info(&path_info);
+ dput(dn);
+
+ /* For none EACCES cases will let the MDS do the mds auth check */
+ if (err == -EACCES) {
+ return err;
+ } else if (err < 0) {
+ try_async = false;
+ err = 0;
+ }
+ }
+
retry:
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req)) {
@@ -1165,19 +1393,31 @@ retry:
req->r_num_caps = 2;
req->r_parent = dir;
ihold(dir);
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
req->r_inode_drop = ceph_drop_caps_for_unlink(inode);
if (try_async && op == CEPH_MDS_OP_UNLINK &&
(req->r_dir_caps = get_caps_for_async_unlink(dir, dentry))) {
- dout("async unlink on %llu/%.*s caps=%s", ceph_ino(dir),
- dentry->d_name.len, dentry->d_name.name,
- ceph_cap_string(req->r_dir_caps));
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+
+ doutc(cl, "async unlink on %llx.%llx/'%pd' caps=%s",
+ ceph_vinop(dir), dentry,
+ ceph_cap_string(req->r_dir_caps));
set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
req->r_callback = ceph_async_unlink_cb;
req->r_old_inode = d_inode(dentry);
ihold(req->r_old_inode);
+
+ spin_lock(&dentry->d_lock);
+ di->flags |= CEPH_DENTRY_ASYNC_UNLINK;
+ spin_unlock(&dentry->d_lock);
+
+ spin_lock(&fsc->async_unlink_conflict_lock);
+ hash_add_rcu(fsc->async_unlink_conflict, &di->hnode,
+ dentry->d_name.hash);
+ spin_unlock(&fsc->async_unlink_conflict_lock);
+
err = ceph_mdsc_submit_request(mdsc, dir, req);
if (!err) {
/*
@@ -1186,10 +1426,20 @@ retry:
*/
drop_nlink(inode);
d_delete(dentry);
- } else if (err == -EJUKEBOX) {
- try_async = false;
- ceph_mdsc_put_request(req);
- goto retry;
+ } else {
+ spin_lock(&fsc->async_unlink_conflict_lock);
+ hash_del_rcu(&di->hnode);
+ spin_unlock(&fsc->async_unlink_conflict_lock);
+
+ spin_lock(&dentry->d_lock);
+ di->flags &= ~CEPH_DENTRY_ASYNC_UNLINK;
+ spin_unlock(&dentry->d_lock);
+
+ if (err == -EJUKEBOX) {
+ try_async = false;
+ ceph_mdsc_put_request(req);
+ goto retry;
+ }
}
} else {
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
@@ -1203,11 +1453,12 @@ out:
return err;
}
-static int ceph_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old_dir->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
int op = CEPH_MDS_OP_RENAME;
int err;
@@ -1228,8 +1479,18 @@ static int ceph_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
(!ceph_quota_is_same_realm(old_dir, new_dir)))
return -EXDEV;
- dout("rename dir %p dentry %p to dir %p dentry %p\n",
- old_dir, old_dentry, new_dir, new_dentry);
+ err = ceph_wait_on_conflict_unlink(new_dentry);
+ if (err)
+ return err;
+
+ err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry,
+ flags);
+ if (err)
+ return err;
+
+ doutc(cl, "%llx.%llx/'%pd' to %llx.%llx/'%pd'\n",
+ ceph_vinop(old_dir), old_dentry, ceph_vinop(new_dir),
+ new_dentry);
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -1241,9 +1502,9 @@ static int ceph_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
req->r_parent = new_dir;
ihold(new_dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
- req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_XATTR_EXCL;
req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
/* release LINK_RDCACHE on source inode (mds will lock it) */
req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
@@ -1274,9 +1535,10 @@ static int ceph_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
void __ceph_dentry_lease_touch(struct ceph_dentry_info *di)
{
struct dentry *dn = di->dentry;
- struct ceph_mds_client *mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
- dout("dentry_lease_touch %p %p '%pd'\n", di, dn, dn);
+ doutc(cl, "%p %p '%pd'\n", di, dn, dn);
di->flags |= CEPH_DENTRY_LEASE_LIST;
if (di->flags & CEPH_DENTRY_SHRINK_LIST) {
@@ -1284,7 +1546,6 @@ void __ceph_dentry_lease_touch(struct ceph_dentry_info *di)
return;
}
- mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_list_lock);
list_move_tail(&di->lease_list, &mdsc->dentry_leases);
spin_unlock(&mdsc->dentry_list_lock);
@@ -1308,10 +1569,10 @@ static void __dentry_dir_lease_touch(struct ceph_mds_client* mdsc,
void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
{
struct dentry *dn = di->dentry;
- struct ceph_mds_client *mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
- dout("dentry_dir_lease_touch %p %p '%pd' (offset 0x%llx)\n",
- di, dn, dn, di->offset);
+ doutc(cl, "%p %p '%pd' (offset 0x%llx)\n", di, dn, dn, di->offset);
if (!list_empty(&di->lease_list)) {
if (di->flags & CEPH_DENTRY_LEASE_LIST) {
@@ -1331,9 +1592,8 @@ void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
return;
}
- mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_list_lock);
- __dentry_dir_lease_touch(mdsc, di),
+ __dentry_dir_lease_touch(mdsc, di);
spin_unlock(&mdsc->dentry_list_lock);
}
@@ -1345,7 +1605,7 @@ static void __dentry_lease_unlist(struct ceph_dentry_info *di)
if (list_empty(&di->lease_list))
return;
- mdsc = ceph_sb_to_client(di->dentry->d_sb)->mdsc;
+ mdsc = ceph_sb_to_fs_client(di->dentry->d_sb)->mdsc;
spin_lock(&mdsc->dentry_list_lock);
list_del_init(&di->lease_list);
spin_unlock(&mdsc->dentry_list_lock);
@@ -1365,10 +1625,12 @@ struct ceph_lease_walk_control {
unsigned long dir_lease_ttl;
};
+static int __dir_lease_check(const struct dentry *, struct ceph_lease_walk_control *);
+static int __dentry_lease_check(const struct dentry *);
+
static unsigned long
__dentry_leases_walk(struct ceph_mds_client *mdsc,
- struct ceph_lease_walk_control *lwc,
- int (*check)(struct dentry*, void*))
+ struct ceph_lease_walk_control *lwc)
{
struct ceph_dentry_info *di, *tmp;
struct dentry *dentry, *last = NULL;
@@ -1396,7 +1658,10 @@ __dentry_leases_walk(struct ceph_mds_client *mdsc,
goto next;
}
- ret = check(dentry, lwc);
+ if (lwc->dir_lease)
+ ret = __dir_lease_check(dentry, lwc);
+ else
+ ret = __dentry_lease_check(dentry);
if (ret & TOUCH) {
/* move it into tail of dir lease list */
__dentry_dir_lease_touch(mdsc, di);
@@ -1453,7 +1718,7 @@ next:
return freed;
}
-static int __dentry_lease_check(struct dentry *dentry, void *arg)
+static int __dentry_lease_check(const struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
int ret;
@@ -1468,9 +1733,9 @@ static int __dentry_lease_check(struct dentry *dentry, void *arg)
return DELETE;
}
-static int __dir_lease_check(struct dentry *dentry, void *arg)
+static int __dir_lease_check(const struct dentry *dentry,
+ struct ceph_lease_walk_control *lwc)
{
- struct ceph_lease_walk_control *lwc = arg;
struct ceph_dentry_info *di = ceph_dentry(dentry);
int ret = __dir_lease_try_check(dentry);
@@ -1509,7 +1774,7 @@ int ceph_trim_dentries(struct ceph_mds_client *mdsc)
lwc.dir_lease = false;
lwc.nr_to_scan = CEPH_CAPS_PER_RELEASE * 2;
- freed = __dentry_leases_walk(mdsc, &lwc, __dentry_lease_check);
+ freed = __dentry_leases_walk(mdsc, &lwc);
if (!lwc.nr_to_scan) /* more invalid leases */
return -EAGAIN;
@@ -1519,7 +1784,7 @@ int ceph_trim_dentries(struct ceph_mds_client *mdsc)
lwc.dir_lease = true;
lwc.expire_dir_lease = freed < count;
lwc.dir_lease_ttl = mdsc->fsc->mount_options->caps_wanted_delay_max * HZ;
- freed +=__dentry_leases_walk(mdsc, &lwc, __dir_lease_check);
+ freed +=__dentry_leases_walk(mdsc, &lwc);
if (!lwc.nr_to_scan) /* more to check */
return -EAGAIN;
@@ -1572,6 +1837,8 @@ static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags)
{
struct ceph_dentry_info *di;
struct ceph_mds_session *session = NULL;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
u32 seq = 0;
int valid = 0;
@@ -1604,7 +1871,7 @@ static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags)
CEPH_MDS_LEASE_RENEW, seq);
ceph_put_mds_session(session);
}
- dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
+ doutc(cl, "dentry %p = %d\n", dentry, valid);
return valid;
}
@@ -1647,6 +1914,7 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
struct ceph_mds_client *mdsc)
{
struct ceph_inode_info *ci = ceph_inode(dir);
+ struct ceph_client *cl = mdsc->fsc->client;
int valid;
int shared_gen;
@@ -1668,42 +1936,39 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
valid = 0;
spin_unlock(&dentry->d_lock);
}
- dout("dir_lease_is_valid dir %p v%u dentry %p = %d\n",
- dir, (unsigned)atomic_read(&ci->i_shared_gen), dentry, valid);
+ doutc(cl, "dir %p %llx.%llx v%u dentry %p '%pd' = %d\n", dir,
+ ceph_vinop(dir), (unsigned)atomic_read(&ci->i_shared_gen),
+ dentry, dentry, valid);
return valid;
}
/*
* Check if cached dentry can be trusted.
*/
-static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int ceph_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
int valid = 0;
- struct dentry *parent;
- struct inode *dir, *inode;
- struct ceph_mds_client *mdsc;
+ struct inode *inode;
- if (flags & LOOKUP_RCU) {
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- inode = d_inode_rcu(dentry);
- } else {
- parent = dget_parent(dentry);
- dir = d_inode(parent);
- inode = d_inode(dentry);
- }
+ valid = fscrypt_d_revalidate(dir, name, dentry, flags);
+ if (valid <= 0)
+ return valid;
- dout("d_revalidate %p '%pd' inode %p offset 0x%llx\n", dentry,
- dentry, inode, ceph_dentry(dentry)->offset);
+ inode = d_inode_rcu(dentry);
- mdsc = ceph_sb_to_client(dir->i_sb)->mdsc;
+ doutc(cl, "%p '%pd' inode %p offset 0x%llx nokey %d\n",
+ dentry, dentry, inode, ceph_dentry(dentry)->offset,
+ !!(dentry->d_flags & DCACHE_NOKEY_NAME));
+
+ mdsc = ceph_sb_to_fs_client(dir->i_sb)->mdsc;
/* always trust cached snapped dentries, snapdir dentry */
if (ceph_snap(dir) != CEPH_NOSNAP) {
- dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
- dentry, inode);
+ doutc(cl, "%p '%pd' inode %p is SNAPPED\n", dentry,
+ dentry, inode);
valid = 1;
} else if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
valid = 1;
@@ -1738,6 +2003,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
req->r_parent = dir;
ihold(dir);
+ req->r_dname = name;
+
mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
if (ceph_security_xattr_wanted(dir))
mask |= CEPH_CAP_XATTR_SHARED;
@@ -1758,19 +2025,16 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
break;
}
ceph_mdsc_put_request(req);
- dout("d_revalidate %p lookup result=%d\n",
- dentry, err);
+ doutc(cl, "%p '%pd', lookup result=%d\n", dentry,
+ dentry, err);
}
} else {
percpu_counter_inc(&mdsc->metric.d_lease_hit);
}
- dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
+ doutc(cl, "%p '%pd' %s\n", dentry, dentry, valid ? "valid" : "invalid");
if (!valid)
ceph_dir_clear_complete(dir);
-
- if (!(flags & LOOKUP_RCU))
- dput(parent);
return valid;
}
@@ -1788,7 +2052,7 @@ static int ceph_d_delete(const struct dentry *dentry)
return 0;
if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
return 0;
- /* vaild lease? */
+ /* valid lease? */
di = ceph_dentry(dentry);
if (di) {
if (__dentry_lease_is_valid(di))
@@ -1805,9 +2069,9 @@ static int ceph_d_delete(const struct dentry *dentry)
static void ceph_d_release(struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
- dout("d_release %p\n", dentry);
+ doutc(fsc->client, "dentry %p '%pd'\n", dentry, dentry);
atomic64_dec(&fsc->mdsc->metric.total_dentries);
@@ -1828,10 +2092,12 @@ static void ceph_d_release(struct dentry *dentry)
*/
static void ceph_d_prune(struct dentry *dentry)
{
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dentry->d_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *dir_ci;
struct ceph_dentry_info *di;
- dout("ceph_d_prune %pd %p\n", dentry, dentry);
+ doutc(cl, "dentry %p '%pd'\n", dentry, dentry);
/* do we have a valid parent? */
if (IS_ROOT(dentry))
@@ -1874,7 +2140,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
int left;
const int bufsize = 1024;
- if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
+ if (!ceph_test_mount_opt(ceph_sb_to_fs_client(inode->i_sb), DIRSTAT))
return -EISDIR;
if (!dfi->dir_info) {
@@ -1890,7 +2156,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
" rfiles: %20lld\n"
" rsubdirs: %20lld\n"
"rbytes: %20lld\n"
- "rctime: %10lld.%09ld\n",
+ "rctime: %ptSp\n",
ci->i_files + ci->i_subdirs,
ci->i_files,
ci->i_subdirs,
@@ -1898,8 +2164,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
ci->i_rfiles,
ci->i_rsubdirs,
ci->i_rbytes,
- ci->i_rctime.tv_sec,
- ci->i_rctime.tv_nsec);
+ &ci->i_rctime);
}
if (*ppos >= dfi->dir_info_len)
@@ -1937,9 +2202,10 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
}
}
+WRAP_DIR_ITER(ceph_readdir) // FIXME!
const struct file_operations ceph_dir_fops = {
.read = ceph_read_dir,
- .iterate = ceph_readdir,
+ .iterate_shared = shared_ceph_readdir,
.llseek = ceph_dir_llseek,
.open = ceph_open,
.release = ceph_release,
@@ -1951,7 +2217,7 @@ const struct file_operations ceph_dir_fops = {
};
const struct file_operations ceph_snapdir_fops = {
- .iterate = ceph_readdir,
+ .iterate_shared = shared_ceph_readdir,
.llseek = ceph_dir_llseek,
.open = ceph_open,
.release = ceph_release,
@@ -1963,7 +2229,7 @@ const struct inode_operations ceph_dir_iops = {
.getattr = ceph_getattr,
.setattr = ceph_setattr,
.listxattr = ceph_listxattr,
- .get_acl = ceph_get_acl,
+ .get_inode_acl = ceph_get_acl,
.set_acl = ceph_set_acl,
.mknod = ceph_mknod,
.symlink = ceph_symlink,
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 1d65934c1262..b2f2af104679 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -3,10 +3,11 @@
#include <linux/exportfs.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "super.h"
#include "mds_client.h"
+#include "crypto.h"
/*
* Basic fh
@@ -32,11 +33,19 @@ struct ceph_nfs_snapfh {
u32 hash;
} __attribute__ ((packed));
+#define BYTES_PER_U32 (sizeof(u32))
+#define CEPH_FH_BASIC_SIZE \
+ (sizeof(struct ceph_nfs_fh) / BYTES_PER_U32)
+#define CEPH_FH_WITH_PARENT_SIZE \
+ (sizeof(struct ceph_nfs_confh) / BYTES_PER_U32)
+#define CEPH_FH_SNAPPED_INODE_SIZE \
+ (sizeof(struct ceph_nfs_snapfh) / BYTES_PER_U32)
+
static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
- static const int snap_handle_length =
- sizeof(struct ceph_nfs_snapfh) >> 2;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+ static const int snap_handle_length = CEPH_FH_SNAPPED_INODE_SIZE;
struct ceph_nfs_snapfh *sfh = (void *)rawfh;
u64 snapid = ceph_snap(inode);
int ret;
@@ -78,17 +87,16 @@ static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
*max_len = snap_handle_length;
ret = FILEID_BTRFS_WITH_PARENT;
out:
- dout("encode_snapfh %llx.%llx ret=%d\n", ceph_vinop(inode), ret);
+ doutc(cl, "%p %llx.%llx ret=%d\n", inode, ceph_vinop(inode), ret);
return ret;
}
static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
- static const int handle_length =
- sizeof(struct ceph_nfs_fh) >> 2;
- static const int connected_handle_length =
- sizeof(struct ceph_nfs_confh) >> 2;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+ static const int handle_length = CEPH_FH_BASIC_SIZE;
+ static const int connected_handle_length = CEPH_FH_WITH_PARENT_SIZE;
int type;
if (ceph_snap(inode) != CEPH_NOSNAP)
@@ -104,15 +112,15 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
if (parent_inode) {
struct ceph_nfs_confh *cfh = (void *)rawfh;
- dout("encode_fh %llx with parent %llx\n",
- ceph_ino(inode), ceph_ino(parent_inode));
+ doutc(cl, "%p %llx.%llx with parent %p %llx.%llx\n", inode,
+ ceph_vinop(inode), parent_inode, ceph_vinop(parent_inode));
cfh->ino = ceph_ino(inode);
cfh->parent_ino = ceph_ino(parent_inode);
*max_len = connected_handle_length;
type = FILEID_INO32_GEN_PARENT;
} else {
struct ceph_nfs_fh *fh = (void *)rawfh;
- dout("encode_fh %llx\n", ceph_ino(inode));
+ doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
fh->ino = ceph_ino(inode);
*max_len = handle_length;
type = FILEID_INO32_GEN;
@@ -122,7 +130,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
struct inode *inode;
struct ceph_vino vino;
int err;
@@ -157,6 +165,11 @@ static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
ceph_mdsc_put_request(req);
if (!inode)
return err < 0 ? ERR_PTR(err) : ERR_PTR(-ESTALE);
+ } else {
+ if (ceph_inode_is_shutdown(inode)) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
}
return inode;
}
@@ -176,6 +189,7 @@ struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino)
static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
{
struct inode *inode = __lookup_inode(sb, ino);
+ struct ceph_inode_info *ci = ceph_inode(inode);
int err;
if (IS_ERR(inode))
@@ -187,7 +201,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
return ERR_PTR(err);
}
/* -ESTALE if inode as been unlinked and no file is open */
- if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) {
+ if ((inode->i_nlink == 0) && !__ceph_is_file_opened(ci)) {
iput(inode);
return ERR_PTR(-ESTALE);
}
@@ -198,7 +212,8 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
struct ceph_nfs_snapfh *sfh,
bool want_parent)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct inode *inode;
struct ceph_vino vino;
@@ -223,8 +238,13 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
return ERR_PTR(-ESTALE);
inode = ceph_find_inode(sb, vino);
- if (inode)
+ if (inode) {
+ if (ceph_inode_is_shutdown(inode)) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
return d_obtain_alias(inode);
+ }
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO,
USE_ANY_MDS);
@@ -266,14 +286,11 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
ceph_mdsc_put_request(req);
if (want_parent) {
- dout("snapfh_to_parent %llx.%llx\n err=%d\n",
- vino.ino, vino.snap, err);
+ doutc(cl, "%llx.%llx\n err=%d\n", vino.ino, vino.snap, err);
} else {
- dout("snapfh_to_dentry %llx.%llx parent %llx hash %x err=%d",
- vino.ino, vino.snap, sfh->parent_ino, sfh->hash, err);
+ doutc(cl, "%llx.%llx parent %llx hash %x err=%d", vino.ino,
+ vino.snap, sfh->parent_ino, sfh->hash, err);
}
- if (IS_ERR(inode))
- return ERR_CAST(inode);
/* see comments in ceph_get_parent() */
return unlinked ? d_obtain_root(inode) : d_obtain_alias(inode);
}
@@ -285,6 +302,7 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
struct fid *fid,
int fh_len, int fh_type)
{
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
struct ceph_nfs_fh *fh = (void *)fid->raw;
if (fh_type == FILEID_BTRFS_WITH_PARENT) {
@@ -295,17 +313,17 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
if (fh_type != FILEID_INO32_GEN &&
fh_type != FILEID_INO32_GEN_PARENT)
return NULL;
- if (fh_len < sizeof(*fh) / 4)
+ if (fh_len < sizeof(*fh) / BYTES_PER_U32)
return NULL;
- dout("fh_to_dentry %llx\n", fh->ino);
+ doutc(fsc->client, "%llx\n", fh->ino);
return __fh_to_dentry(sb, fh->ino);
}
static struct dentry *__get_parent(struct super_block *sb,
struct dentry *child, u64 ino)
{
- struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
struct ceph_mds_request *req;
struct inode *inode;
int mask;
@@ -351,6 +369,7 @@ static struct dentry *__get_parent(struct super_block *sb,
static struct dentry *ceph_get_parent(struct dentry *child)
{
struct inode *inode = d_inode(child);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct dentry *dn;
if (ceph_snap(inode) != CEPH_NOSNAP) {
@@ -379,9 +398,9 @@ static struct dentry *ceph_get_parent(struct dentry *child)
}
dir = snapdir;
}
- /* If directory has already been deleted, futher get_parent
+ /* If directory has already been deleted, further get_parent
* will fail. Do not mark snapdir dentry as disconnected,
- * this prevent exportfs from doing futher get_parent. */
+ * this prevents exportfs from doing further get_parent. */
if (unlinked)
dn = d_obtain_root(dir);
else
@@ -390,8 +409,8 @@ static struct dentry *ceph_get_parent(struct dentry *child)
dn = __get_parent(child->d_sb, child, 0);
}
out:
- dout("get_parent %p ino %llx.%llx err=%ld\n",
- child, ceph_vinop(inode), (long)PTR_ERR_OR_ZERO(dn));
+ doutc(cl, "child %p %p %llx.%llx err=%ld\n", child, inode,
+ ceph_vinop(inode), (long)PTR_ERR_OR_ZERO(dn));
return dn;
}
@@ -402,6 +421,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
struct fid *fid,
int fh_len, int fh_type)
{
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
struct ceph_nfs_confh *cfh = (void *)fid->raw;
struct dentry *dentry;
@@ -412,10 +432,10 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
if (fh_type != FILEID_INO32_GEN_PARENT)
return NULL;
- if (fh_len < sizeof(*cfh) / 4)
+ if (fh_len < sizeof(*cfh) / BYTES_PER_U32)
return NULL;
- dout("fh_to_parent %llx\n", cfh->parent_ino);
+ doutc(fsc->client, "%llx\n", cfh->parent_ino);
dentry = __get_parent(sb, NULL, cfh->ino);
if (unlikely(dentry == ERR_PTR(-ENOENT)))
dentry = __fh_to_dentry(sb, cfh->parent_ino);
@@ -427,7 +447,7 @@ static int __get_snap_name(struct dentry *parent, char *name,
{
struct inode *inode = d_inode(child);
struct inode *dir = d_inode(parent);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_mds_request *req = NULL;
char *last_name = NULL;
unsigned next_offset = 2;
@@ -437,7 +457,13 @@ static int __get_snap_name(struct dentry *parent, char *name,
goto out;
if (ceph_snap(inode) == CEPH_SNAPDIR) {
if (ceph_snap(dir) == CEPH_NOSNAP) {
- strcpy(name, fsc->mount_options->snapdir_name);
+ /*
+ * .get_name() from struct export_operations
+ * assumes that its 'name' parameter is pointing
+ * to a NAME_MAX+1 sized buffer
+ */
+ strscpy(name, fsc->mount_options->snapdir_name,
+ NAME_MAX + 1);
err = 0;
}
goto out;
@@ -514,8 +540,8 @@ out:
if (req)
ceph_mdsc_put_request(req);
kfree(last_name);
- dout("get_snap_name %p ino %llx.%llx err=%d\n",
- child, ceph_vinop(inode), err);
+ doutc(fsc->client, "child dentry %p %p %llx.%llx err=%d\n", child,
+ inode, ceph_vinop(inode), err);
return err;
}
@@ -524,42 +550,61 @@ static int ceph_get_name(struct dentry *parent, char *name,
{
struct ceph_mds_client *mdsc;
struct ceph_mds_request *req;
+ struct inode *dir = d_inode(parent);
struct inode *inode = d_inode(child);
+ struct ceph_mds_reply_info_parsed *rinfo;
int err;
if (ceph_snap(inode) != CEPH_NOSNAP)
return __get_snap_name(parent, name, child);
- mdsc = ceph_inode_to_client(inode)->mdsc;
+ mdsc = ceph_inode_to_fs_client(inode)->mdsc;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPNAME,
USE_ANY_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
- inode_lock(d_inode(parent));
-
+ inode_lock(dir);
req->r_inode = inode;
ihold(inode);
req->r_ino2 = ceph_vino(d_inode(parent));
- req->r_parent = d_inode(parent);
- ihold(req->r_parent);
+ req->r_parent = dir;
+ ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_num_caps = 2;
err = ceph_mdsc_do_request(mdsc, NULL, req);
+ inode_unlock(dir);
- inode_unlock(d_inode(parent));
+ if (err)
+ goto out;
- if (!err) {
- struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+ rinfo = &req->r_reply_info;
+ if (!IS_ENCRYPTED(dir)) {
memcpy(name, rinfo->dname, rinfo->dname_len);
name[rinfo->dname_len] = 0;
- dout("get_name %p ino %llx.%llx name %s\n",
- child, ceph_vinop(inode), name);
} else {
- dout("get_name %p ino %llx.%llx err %d\n",
- child, ceph_vinop(inode), err);
- }
+ struct fscrypt_str oname = FSTR_INIT(NULL, 0);
+ struct ceph_fname fname = { .dir = dir,
+ .name = rinfo->dname,
+ .ctext = rinfo->altname,
+ .name_len = rinfo->dname_len,
+ .ctext_len = rinfo->altname_len };
+
+ err = ceph_fname_alloc_buffer(dir, &oname);
+ if (err < 0)
+ goto out;
+ err = ceph_fname_to_usr(&fname, NULL, &oname, NULL);
+ if (!err) {
+ memcpy(name, oname.name, oname.len);
+ name[oname.len] = 0;
+ }
+ ceph_fname_free_buffer(dir, &oname);
+ }
+out:
+ doutc(mdsc->fsc->client, "child dentry %p %p %llx.%llx err %d %s%s\n",
+ child, inode, ceph_vinop(inode), err, err ? "" : "name ",
+ err ? "" : name);
ceph_mdsc_put_request(req);
return err;
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index b129ea551378..983390069f73 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -12,6 +12,7 @@
#include <linux/falloc.h>
#include <linux/iversion.h>
#include <linux/ktime.h>
+#include <linux/splice.h>
#include "super.h"
#include "mds_client.h"
@@ -19,8 +20,9 @@
#include "io.h"
#include "metric.h"
-static __le32 ceph_flags_sys2wire(u32 flags)
+static __le32 ceph_flags_sys2wire(struct ceph_mds_client *mdsc, u32 flags)
{
+ struct ceph_client *cl = mdsc->fsc->client;
u32 wire_flags = 0;
switch (flags & O_ACCMODE) {
@@ -48,7 +50,7 @@ static __le32 ceph_flags_sys2wire(u32 flags)
#undef ceph_sys2wire
if (flags)
- dout("unused open flags: %x\n", flags);
+ doutc(cl, "unused open flags: %x\n", flags);
return cpu_to_le32(wire_flags);
}
@@ -95,23 +97,18 @@ static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
size_t start;
int idx = 0;
- bytes = iov_iter_get_pages(iter, pages, maxsize - size,
+ bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
ITER_GET_BVECS_PAGES, &start);
if (bytes < 0)
return size ?: bytes;
- iov_iter_advance(iter, bytes);
size += bytes;
for ( ; bytes; idx++, bvec_idx++) {
- struct bio_vec bv = {
- .bv_page = pages[idx],
- .bv_len = min_t(int, bytes, PAGE_SIZE - start),
- .bv_offset = start,
- };
-
- bvecs[bvec_idx] = bv;
- bytes -= bv.bv_len;
+ int len = min_t(int, bytes, PAGE_SIZE - start);
+
+ bvec_set_page(&bvecs[bvec_idx], pages[idx], len, start);
+ bytes -= len;
start = 0;
}
}
@@ -194,7 +191,7 @@ prepare_open_request(struct super_block *sb, int flags, int create_mode)
if (IS_ERR(req))
goto out;
req->r_fmode = ceph_flags_to_mode(flags);
- req->r_args.open.flags = ceph_flags_sys2wire(flags);
+ req->r_args.open.flags = ceph_flags_sys2wire(mdsc, flags);
req->r_args.open.mode = cpu_to_le32(create_mode);
out:
return req;
@@ -204,10 +201,14 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
int fmode, bool isdir)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mount_options *opt =
+ ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_file_info *fi;
+ int ret;
- dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
- inode->i_mode, isdir ? "dir" : "regular");
+ doutc(cl, "%p %llx.%llx %p 0%o (%s)\n", inode, ceph_vinop(inode),
+ file, inode->i_mode, isdir ? "dir" : "regular");
BUG_ON(inode->i_fop->release != ceph_release);
if (isdir) {
@@ -225,6 +226,9 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
if (!fi)
return -ENOMEM;
+ if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
+ fi->flags |= CEPH_F_SYNC;
+
file->private_data = fi;
}
@@ -233,9 +237,23 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
spin_lock_init(&fi->rw_contexts_lock);
INIT_LIST_HEAD(&fi->rw_contexts);
- fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
+ fi->filp_gen = READ_ONCE(ceph_inode_to_fs_client(inode)->filp_gen);
+
+ if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
+ ret = ceph_uninline_data(file);
+ if (ret < 0)
+ goto error;
+ }
return 0;
+
+error:
+ ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
+ ceph_put_fmode(ci, fi->fmode, 1);
+ kmem_cache_free(ceph_file_cachep, fi);
+ /* wake up anyone waiting for caps on this inode */
+ wake_up_all(&ci->i_cap_wq);
+ return ret;
}
/*
@@ -244,12 +262,12 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
*/
static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int ret = 0;
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
- ceph_fscache_register_inode_cookie(inode);
- ceph_fscache_file_set_cookie(inode, file);
+ ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
fallthrough;
case S_IFDIR:
ret = ceph_init_file_info(inode, file, fmode,
@@ -257,13 +275,13 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
break;
case S_IFLNK:
- dout("init_file %p %p 0%o (symlink)\n", inode, file,
- inode->i_mode);
+ doutc(cl, "%p %llx.%llx %p 0%o (symlink)\n", inode,
+ ceph_vinop(inode), file, inode->i_mode);
break;
default:
- dout("init_file %p %p 0%o (special)\n", inode, file,
- inode->i_mode);
+ doutc(cl, "%p %llx.%llx %p 0%o (special)\n", inode,
+ ceph_vinop(inode), file, inode->i_mode);
/*
* we need to drop the open ref now, since we don't
* have .release set to ceph_release.
@@ -282,6 +300,7 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
int ceph_renew_caps(struct inode *inode, int fmode)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req;
int err, flags, wanted;
@@ -293,9 +312,10 @@ int ceph_renew_caps(struct inode *inode, int fmode)
(!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
int issued = __ceph_caps_issued(ci, NULL);
spin_unlock(&ci->i_ceph_lock);
- dout("renew caps %p want %s issued %s updating mds_wanted\n",
- inode, ceph_cap_string(wanted), ceph_cap_string(issued));
- ceph_check_caps(ci, 0, NULL);
+ doutc(cl, "%p %llx.%llx want %s issued %s updating mds_wanted\n",
+ inode, ceph_vinop(inode), ceph_cap_string(wanted),
+ ceph_cap_string(issued));
+ ceph_check_caps(ci, 0);
return 0;
}
spin_unlock(&ci->i_ceph_lock);
@@ -325,7 +345,8 @@ int ceph_renew_caps(struct inode *inode, int fmode)
err = ceph_mdsc_do_request(mdsc, NULL, req);
ceph_mdsc_put_request(req);
out:
- dout("renew caps %p open result=%d\n", inode, err);
+ doutc(cl, "%p %llx.%llx open result=%d\n", inode, ceph_vinop(inode),
+ err);
return err < 0 ? err : 0;
}
@@ -338,28 +359,64 @@ out:
int ceph_open(struct inode *inode, struct file *file)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct ceph_file_info *fi = file->private_data;
int err;
int flags, fmode, wanted;
+ struct dentry *dentry;
+ char *path;
+ bool do_sync = false;
+ int mask = MAY_READ;
if (fi) {
- dout("open file %p is already opened\n", file);
+ doutc(cl, "file %p is already opened\n", file);
return 0;
}
/* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
flags = file->f_flags & ~(O_CREAT|O_EXCL);
- if (S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode)) {
flags = O_DIRECTORY; /* mds likes to know */
+ } else if (S_ISREG(inode->i_mode)) {
+ err = fscrypt_file_open(inode, file);
+ if (err)
+ return err;
+ }
- dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
- ceph_vinop(inode), file, flags, file->f_flags);
+ doutc(cl, "%p %llx.%llx file %p flags %d (%d)\n", inode,
+ ceph_vinop(inode), file, flags, file->f_flags);
fmode = ceph_flags_to_mode(flags);
wanted = ceph_caps_for_mode(fmode);
+ if (fmode & CEPH_FILE_MODE_WR)
+ mask |= MAY_WRITE;
+ dentry = d_find_alias(inode);
+ if (!dentry) {
+ do_sync = true;
+ } else {
+ struct ceph_path_info path_info;
+ path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
+ if (IS_ERR(path)) {
+ do_sync = true;
+ err = 0;
+ } else {
+ err = ceph_mds_check_access(mdsc, path, mask);
+ }
+ ceph_mdsc_free_path_info(&path_info);
+ dput(dentry);
+
+ /* For none EACCES cases will let the MDS do the mds auth check */
+ if (err == -EACCES) {
+ return err;
+ } else if (err < 0) {
+ do_sync = true;
+ err = 0;
+ }
+ }
+
/* snapped files are read-only */
if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
return -EROFS;
@@ -375,14 +432,14 @@ int ceph_open(struct inode *inode, struct file *file)
* asynchronously.
*/
spin_lock(&ci->i_ceph_lock);
- if (__ceph_is_any_real_caps(ci) &&
+ if (!do_sync && __ceph_is_any_real_caps(ci) &&
(((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
int mds_wanted = __ceph_caps_mds_wanted(ci, true);
int issued = __ceph_caps_issued(ci, NULL);
- dout("open %p fmode %d want %s issued %s using existing\n",
- inode, fmode, ceph_cap_string(wanted),
- ceph_cap_string(issued));
+ doutc(cl, "open %p fmode %d want %s issued %s using existing\n",
+ inode, fmode, ceph_cap_string(wanted),
+ ceph_cap_string(issued));
__ceph_touch_fmode(ci, mdsc, fmode);
spin_unlock(&ci->i_ceph_lock);
@@ -390,10 +447,10 @@ int ceph_open(struct inode *inode, struct file *file)
if ((issued & wanted) != wanted &&
(mds_wanted & wanted) != wanted &&
ceph_snap(inode) != CEPH_SNAPDIR)
- ceph_check_caps(ci, 0, NULL);
+ ceph_check_caps(ci, 0);
return ceph_init_file(inode, file, fmode);
- } else if (ceph_snap(inode) != CEPH_NOSNAP &&
+ } else if (!do_sync && ceph_snap(inode) != CEPH_NOSNAP &&
(ci->i_snap_caps & wanted) == wanted) {
__ceph_touch_fmode(ci, mdsc, fmode);
spin_unlock(&ci->i_ceph_lock);
@@ -402,7 +459,7 @@ int ceph_open(struct inode *inode, struct file *file)
spin_unlock(&ci->i_ceph_lock);
- dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
+ doutc(cl, "open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
req = prepare_open_request(inode->i_sb, flags, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -416,7 +473,7 @@ int ceph_open(struct inode *inode, struct file *file)
if (!err)
err = ceph_init_file(inode, file, req->r_fmode);
ceph_mdsc_put_request(req);
- dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
+ doutc(cl, "open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
out:
return err;
}
@@ -496,6 +553,7 @@ no_async:
static void restore_deleg_ino(struct inode *dir, u64 ino)
{
+ struct ceph_client *cl = ceph_inode_to_client(dir);
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_mds_session *s = NULL;
@@ -506,65 +564,93 @@ static void restore_deleg_ino(struct inode *dir, u64 ino)
if (s) {
int err = ceph_restore_deleg_ino(s, ino);
if (err)
- pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
+ pr_warn_client(cl,
+ "unable to restore delegated ino 0x%llx to session: %d\n",
ino, err);
ceph_put_mds_session(s);
}
}
+static void wake_async_create_waiters(struct inode *inode,
+ struct ceph_mds_session *session)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ bool check_cap = false;
+
+ spin_lock(&ci->i_ceph_lock);
+ if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
+ clear_and_wake_up_bit(CEPH_ASYNC_CREATE_BIT, &ci->i_ceph_flags);
+
+ if (ci->i_ceph_flags & CEPH_I_ASYNC_CHECK_CAPS) {
+ ci->i_ceph_flags &= ~CEPH_I_ASYNC_CHECK_CAPS;
+ check_cap = true;
+ }
+ }
+ ceph_kick_flushing_inode_caps(session, ci);
+ spin_unlock(&ci->i_ceph_lock);
+
+ if (check_cap)
+ ceph_check_caps(ci, CHECK_CAPS_FLUSH);
+}
+
static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
+ struct ceph_client *cl = mdsc->fsc->client;
+ struct dentry *dentry = req->r_dentry;
+ struct inode *dinode = d_inode(dentry);
+ struct inode *tinode = req->r_target_inode;
int result = req->r_err ? req->r_err :
le32_to_cpu(req->r_reply_info.head->result);
+ WARN_ON_ONCE(dinode && tinode && dinode != tinode);
+
+ /* MDS changed -- caller must resubmit */
if (result == -EJUKEBOX)
goto out;
mapping_set_error(req->r_parent->i_mapping, result);
if (result) {
- struct dentry *dentry = req->r_dentry;
- int pathlen = 0;
- u64 base = 0;
- char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
- &base, 0);
+ struct ceph_path_info path_info = {0};
+ char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
+
+ pr_warn_client(cl,
+ "async create failure path=(%llx)%s result=%d!\n",
+ path_info.vino.ino, IS_ERR(path) ? "<<bad>>" : path, result);
+ ceph_mdsc_free_path_info(&path_info);
ceph_dir_clear_complete(req->r_parent);
if (!d_unhashed(dentry))
d_drop(dentry);
- /* FIXME: start returning I/O errors on all accesses? */
- pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
- base, IS_ERR(path) ? "<<bad>>" : path, result);
- ceph_mdsc_free_path(path, pathlen);
+ if (dinode) {
+ mapping_set_error(dinode->i_mapping, result);
+ ceph_inode_shutdown(dinode);
+ wake_async_create_waiters(dinode, req->r_session);
+ }
}
- if (req->r_target_inode) {
- struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
- u64 ino = ceph_vino(req->r_target_inode).ino;
+ if (tinode) {
+ u64 ino = ceph_vino(tinode).ino;
if (req->r_deleg_ino != ino)
- pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
- __func__, req->r_err, req->r_deleg_ino, ino);
- mapping_set_error(req->r_target_inode->i_mapping, result);
+ pr_warn_client(cl,
+ "inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
+ req->r_err, req->r_deleg_ino, ino);
- spin_lock(&ci->i_ceph_lock);
- if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
- ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
- wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
- }
- ceph_kick_flushing_inode_caps(req->r_session, ci);
- spin_unlock(&ci->i_ceph_lock);
- } else {
- pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
- req->r_deleg_ino);
+ mapping_set_error(tinode->i_mapping, result);
+ wake_async_create_waiters(tinode, req->r_session);
+ } else if (!result) {
+ pr_warn_client(cl, "no req->r_target_inode for 0x%llx\n",
+ req->r_deleg_ino);
}
out:
ceph_mdsc_release_dir_caps(req);
}
-static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
+static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
+ struct dentry *dentry,
struct file *file, umode_t mode,
struct ceph_mds_request *req,
struct ceph_acl_sec_ctx *as_ctx,
@@ -575,25 +661,29 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
struct ceph_mds_reply_inode in = { };
struct ceph_mds_reply_info_in iinfo = { .in = &in };
struct ceph_inode_info *ci = ceph_inode(dir);
- struct inode *inode;
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
struct timespec64 now;
+ struct ceph_string *pool_ns;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_vino vino = { .ino = req->r_deleg_ino,
.snap = CEPH_NOSNAP };
ktime_get_real_ts64(&now);
- inode = ceph_get_inode(dentry->d_sb, vino);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
iinfo.inline_version = CEPH_INLINE_NONE;
iinfo.change_attr = 1;
ceph_encode_timespec64(&iinfo.btime, &now);
- iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
- iinfo.xattr_data = xattr_buf;
- memset(iinfo.xattr_data, 0, iinfo.xattr_len);
+ if (req->r_pagelist) {
+ iinfo.xattr_len = req->r_pagelist->length;
+ iinfo.xattr_data = req->r_pagelist->mapped_tail;
+ } else {
+ /* fake it */
+ iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
+ iinfo.xattr_data = xattr_buf;
+ memset(iinfo.xattr_data, 0, iinfo.xattr_len);
+ }
in.ino = cpu_to_le64(vino.ino);
in.snapid = cpu_to_le64(CEPH_NOSNAP);
@@ -603,37 +693,54 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
in.cap.flags = CEPH_CAP_FLAG_AUTH;
in.ctime = in.mtime = in.atime = iinfo.btime;
- in.mode = cpu_to_le32((u32)mode);
in.truncate_seq = cpu_to_le32(1);
in.truncate_size = cpu_to_le64(-1ULL);
in.xattr_version = cpu_to_le64(1);
- in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
- in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_mode & S_ISGID ?
- dir->i_gid : current_fsgid()));
+ in.uid = cpu_to_le32(from_kuid(&init_user_ns,
+ mapped_fsuid(req->r_mnt_idmap,
+ &init_user_ns)));
+ if (dir->i_mode & S_ISGID) {
+ in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
+
+ /* Directories always inherit the setgid bit. */
+ if (S_ISDIR(mode))
+ mode |= S_ISGID;
+ } else {
+ in.gid = cpu_to_le32(from_kgid(&init_user_ns,
+ mapped_fsgid(req->r_mnt_idmap,
+ &init_user_ns)));
+ }
+ in.mode = cpu_to_le32((u32)mode);
+
in.nlink = cpu_to_le32(1);
in.max_size = cpu_to_le64(lo->stripe_unit);
ceph_file_layout_to_legacy(lo, &in.layout);
+ /* lo is private, so pool_ns can't change */
+ pool_ns = rcu_dereference_raw(lo->pool_ns);
+ if (pool_ns) {
+ iinfo.pool_ns_len = pool_ns->len;
+ iinfo.pool_ns_data = pool_ns->str;
+ }
down_read(&mdsc->snap_rwsem);
ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
req->r_fmode, NULL);
up_read(&mdsc->snap_rwsem);
if (ret) {
- dout("%s failed to fill inode: %d\n", __func__, ret);
+ doutc(cl, "failed to fill inode: %d\n", ret);
ceph_dir_clear_complete(dir);
if (!d_unhashed(dentry))
d_drop(dentry);
- if (inode->i_state & I_NEW)
- discard_new_inode(inode);
+ discard_new_inode(inode);
} else {
struct dentry *dn;
- dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
- vino.ino, ceph_ino(dir), dentry->d_name.name);
+ doutc(cl, "d_adding new inode 0x%llx to 0x%llx/%s\n",
+ vino.ino, ceph_ino(dir), dentry->d_name.name);
ceph_dir_clear_ordered(dir);
ceph_init_inode_acls(inode, as_ctx);
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
/*
* If it's not I_NEW, then someone created this before
* we got here. Assume the server is aware of it at
@@ -652,6 +759,11 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
file->f_mode |= FMODE_CREATED;
ret = finish_open(file, dentry, ceph_open);
}
+
+ spin_lock(&dentry->d_lock);
+ clear_and_wake_up_bit(CEPH_DENTRY_ASYNC_CREATE_BIT, &di->flags);
+ spin_unlock(&dentry->d_lock);
+
return ret;
}
@@ -662,36 +774,83 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags, umode_t mode)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
+ struct inode *new_inode = NULL;
struct dentry *dn;
struct ceph_acl_sec_ctx as_ctx = {};
bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
int mask;
int err;
+ char *path;
- dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
- dir, dentry, dentry,
- d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
+ doutc(cl, "%p %llx.%llx dentry %p '%pd' %s flags %d mode 0%o\n",
+ dir, ceph_vinop(dir), dentry, dentry,
+ d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
if (dentry->d_name.len > NAME_MAX)
return -ENAMETOOLONG;
+ err = ceph_wait_on_conflict_unlink(dentry);
+ if (err)
+ return err;
+ /*
+ * Do not truncate the file, since atomic_open is called before the
+ * permission check. The caller will do the truncation afterward.
+ */
+ flags &= ~O_TRUNC;
+
+ dn = d_find_alias(dir);
+ if (!dn) {
+ try_async = false;
+ } else {
+ struct ceph_path_info path_info;
+ path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0);
+ if (IS_ERR(path)) {
+ try_async = false;
+ err = 0;
+ } else {
+ int fmode = ceph_flags_to_mode(flags);
+
+ mask = MAY_READ;
+ if (fmode & CEPH_FILE_MODE_WR)
+ mask |= MAY_WRITE;
+ err = ceph_mds_check_access(mdsc, path, mask);
+ }
+ ceph_mdsc_free_path_info(&path_info);
+ dput(dn);
+
+ /* For none EACCES cases will let the MDS do the mds auth check */
+ if (err == -EACCES) {
+ return err;
+ } else if (err < 0) {
+ try_async = false;
+ err = 0;
+ }
+ }
+
+retry:
if (flags & O_CREAT) {
if (ceph_quota_is_max_files_exceeded(dir))
return -EDQUOT;
- err = ceph_pre_init_acls(dir, &mode, &as_ctx);
- if (err < 0)
- return err;
- err = ceph_security_init_secctx(dentry, mode, &as_ctx);
- if (err < 0)
+
+ new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
+ if (IS_ERR(new_inode)) {
+ err = PTR_ERR(new_inode);
goto out_ctx;
+ }
+ /* Async create can't handle more than a page of xattrs */
+ if (as_ctx.pagelist &&
+ !list_is_singular(&as_ctx.pagelist->head))
+ try_async = false;
} else if (!d_in_lookup(dentry)) {
/* If it's not being looked up, it's negative */
return -ENOENT;
}
-retry:
+
/* do the open */
req = prepare_open_request(dir->i_sb, flags, mode);
if (IS_ERR(req)) {
@@ -705,43 +864,76 @@ retry:
mask |= CEPH_CAP_XATTR_SHARED;
req->r_args.open.mask = cpu_to_le32(mask);
req->r_parent = dir;
+ if (req->r_op == CEPH_MDS_OP_CREATE)
+ req->r_mnt_idmap = mnt_idmap_get(idmap);
ihold(dir);
+ if (IS_ENCRYPTED(dir)) {
+ set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
+ err = fscrypt_prepare_lookup_partial(dir, dentry);
+ if (err < 0)
+ goto out_req;
+ }
if (flags & O_CREAT) {
struct ceph_file_layout lo;
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
+ CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
- if (as_ctx.pagelist) {
- req->r_pagelist = as_ctx.pagelist;
- as_ctx.pagelist = NULL;
- }
- if (try_async &&
- (req->r_dir_caps =
- try_prep_async_create(dir, dentry, &lo,
- &req->r_deleg_ino))) {
+
+ ceph_as_ctx_to_req(req, &as_ctx);
+
+ if (try_async && (req->r_dir_caps =
+ try_prep_async_create(dir, dentry, &lo,
+ &req->r_deleg_ino))) {
+ struct ceph_vino vino = { .ino = req->r_deleg_ino,
+ .snap = CEPH_NOSNAP };
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+
set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
req->r_callback = ceph_async_create_cb;
+
+ /* Hash inode before RPC */
+ new_inode = ceph_get_inode(dir->i_sb, vino, new_inode);
+ if (IS_ERR(new_inode)) {
+ err = PTR_ERR(new_inode);
+ new_inode = NULL;
+ goto out_req;
+ }
+ WARN_ON_ONCE(!(inode_state_read_once(new_inode) & I_NEW));
+
+ spin_lock(&dentry->d_lock);
+ di->flags |= CEPH_DENTRY_ASYNC_CREATE;
+ spin_unlock(&dentry->d_lock);
+
err = ceph_mdsc_submit_request(mdsc, dir, req);
if (!err) {
- err = ceph_finish_async_create(dir, dentry,
- file, mode, req,
- &as_ctx, &lo);
+ err = ceph_finish_async_create(dir, new_inode,
+ dentry, file,
+ mode, req,
+ &as_ctx, &lo);
+ new_inode = NULL;
} else if (err == -EJUKEBOX) {
restore_deleg_ino(dir, req->r_deleg_ino);
ceph_mdsc_put_request(req);
+ discard_new_inode(new_inode);
+ ceph_release_acl_sec_ctx(&as_ctx);
+ memset(&as_ctx, 0, sizeof(as_ctx));
+ new_inode = NULL;
try_async = false;
+ ceph_put_string(rcu_dereference_raw(lo.pool_ns));
goto retry;
}
+ ceph_put_string(rcu_dereference_raw(lo.pool_ns));
goto out_req;
}
}
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
- err = ceph_mdsc_do_request(mdsc,
- (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
- req);
+ req->r_new_inode = new_inode;
+ new_inode = NULL;
+ err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
if (err == -ENOENT) {
dentry = ceph_handle_snapdir(req, dentry);
if (IS_ERR(dentry)) {
@@ -766,10 +958,18 @@ retry:
goto out_req;
if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
/* make vfs retry on splice, ENOENT, or symlink */
- dout("atomic_open finish_no_open on dn %p\n", dn);
+ doutc(cl, "finish_no_open on dn %p\n", dn);
err = finish_no_open(file, dn);
} else {
- dout("atomic_open finish_open on dn %p\n", dn);
+ if (IS_ENCRYPTED(dir) &&
+ !fscrypt_has_permitted_context(dir, d_inode(dentry))) {
+ pr_warn_client(cl,
+ "Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n",
+ ceph_vinop(dir), ceph_vinop(d_inode(dentry)));
+ goto out_req;
+ }
+
+ doutc(cl, "finish_open on dn %p\n", dn);
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
struct inode *newino = d_inode(dentry);
@@ -781,19 +981,22 @@ retry:
}
out_req:
ceph_mdsc_put_request(req);
+ iput(new_inode);
out_ctx:
ceph_release_acl_sec_ctx(&as_ctx);
- dout("atomic_open result=%d\n", err);
+ doutc(cl, "result=%d\n", err);
return err;
}
int ceph_release(struct inode *inode, struct file *file)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
if (S_ISDIR(inode->i_mode)) {
struct ceph_dir_file_info *dfi = file->private_data;
- dout("release inode %p dir file %p\n", inode, file);
+ doutc(cl, "%p %llx.%llx dir file %p\n", inode,
+ ceph_vinop(inode), file);
WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
ceph_put_fmode(ci, dfi->file_info.fmode, 1);
@@ -805,9 +1008,11 @@ int ceph_release(struct inode *inode, struct file *file)
kmem_cache_free(ceph_dir_file_cachep, dfi);
} else {
struct ceph_file_info *fi = file->private_data;
- dout("release inode %p regular file %p\n", inode, file);
+ doutc(cl, "%p %llx.%llx regular file %p\n", inode,
+ ceph_vinop(inode), file);
WARN_ON(!list_empty(&fi->rw_contexts));
+ ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
ceph_put_fmode(ci, fi->fmode, 1);
kmem_cache_free(ceph_file_cachep, fi);
@@ -834,22 +1039,28 @@ enum {
* If we get a short result from the OSD, check against i_size; we need to
* only return a short read to the caller if we hit EOF.
*/
-static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
- int *retry_op)
+ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ struct iov_iter *to, int *retry_op,
+ u64 *last_objver)
{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_client *osdc = &fsc->client->osdc;
ssize_t ret;
- u64 off = iocb->ki_pos;
+ u64 off = *ki_pos;
u64 len = iov_iter_count(to);
+ u64 i_size = i_size_read(inode);
+ bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
+ u64 objver = 0;
+
+ doutc(cl, "on inode %p %llx.%llx %llx~%llx\n", inode,
+ ceph_vinop(inode), *ki_pos, len);
- dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
- (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
+ if (ceph_inode_is_shutdown(inode))
+ return -EIO;
- if (!len)
+ if (!len || !i_size)
return 0;
/*
* flush any page cache pages in this range. this
@@ -868,14 +1079,25 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
struct page **pages;
int num_pages;
size_t page_off;
- u64 i_size;
bool more;
- int idx;
+ int idx = 0;
size_t left;
+ struct ceph_osd_req_op *op;
+ u64 read_off = off;
+ u64 read_len = len;
+ int extent_cnt;
+
+ /* determine new offset/length if encrypted */
+ ceph_fscrypt_adjust_off_and_len(inode, &read_off, &read_len);
+
+ doutc(cl, "orig %llu~%llu reading %llu~%llu", off, len,
+ read_off, read_len);
req = ceph_osdc_new_request(osdc, &ci->i_layout,
- ci->i_vino, off, &len, 0, 1,
- CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
+ ci->i_vino, read_off, &read_len, 0, 1,
+ sparse ? CEPH_OSD_OP_SPARSE_READ :
+ CEPH_OSD_OP_READ,
+ CEPH_OSD_FLAG_READ,
NULL, ci->i_truncate_seq,
ci->i_truncate_size, false);
if (IS_ERR(req)) {
@@ -883,10 +1105,23 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
break;
}
+ /* adjust len downward if the request truncated the len */
+ if (off + len > read_off + read_len)
+ len = read_off + read_len - off;
more = len < iov_iter_count(to);
- num_pages = calc_pages_for(off, len);
- page_off = off & ~PAGE_MASK;
+ op = &req->r_ops[0];
+ if (sparse) {
+ extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
+ ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ break;
+ }
+ }
+
+ num_pages = calc_pages_for(read_off, read_len);
+ page_off = offset_in_page(off);
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
if (IS_ERR(pages)) {
ceph_osdc_put_request(req);
@@ -894,74 +1129,133 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
break;
}
- osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
- false, false);
- ret = ceph_osdc_start_request(osdc, req, false);
- if (!ret)
- ret = ceph_osdc_wait_request(osdc, req);
+ osd_req_op_extent_osd_data_pages(req, 0, pages, read_len,
+ offset_in_page(read_off),
+ false, true);
+
+ ceph_osdc_start_request(osdc, req);
+ ret = ceph_osdc_wait_request(osdc, req);
ceph_update_read_metrics(&fsc->mdsc->metric,
req->r_start_latency,
req->r_end_latency,
- len, ret);
+ read_len, ret);
- ceph_osdc_put_request(req);
+ if (ret > 0)
+ objver = req->r_version;
i_size = i_size_read(inode);
- dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
- off, len, ret, i_size, (more ? " MORE" : ""));
+ doutc(cl, "%llu~%llu got %zd i_size %llu%s\n", off, len,
+ ret, i_size, (more ? " MORE" : ""));
- if (ret == -ENOENT)
+ /* Fix it to go to end of extent map */
+ if (sparse && ret >= 0)
+ ret = ceph_sparse_ext_map_end(op);
+ else if (ret == -ENOENT)
ret = 0;
- if (ret >= 0 && ret < len && (off + ret < i_size)) {
+
+ if (ret < 0) {
+ ceph_osdc_put_request(req);
+ if (ret == -EBLOCKLISTED)
+ fsc->blocklisted = true;
+ break;
+ }
+
+ if (IS_ENCRYPTED(inode)) {
+ int fret;
+
+ fret = ceph_fscrypt_decrypt_extents(inode, pages,
+ read_off, op->extent.sparse_ext,
+ op->extent.sparse_ext_cnt);
+ if (fret < 0) {
+ ret = fret;
+ ceph_osdc_put_request(req);
+ break;
+ }
+
+ /* account for any partial block at the beginning */
+ fret -= (off - read_off);
+
+ /*
+ * Short read after big offset adjustment?
+ * Nothing is usable, just call it a zero
+ * len read.
+ */
+ fret = max(fret, 0);
+
+ /* account for partial block at the end */
+ ret = min_t(ssize_t, fret, len);
+ }
+
+ /* Short read but not EOF? Zero out the remainder. */
+ if (ret < len && (off + ret < i_size)) {
int zlen = min(len - ret, i_size - off - ret);
int zoff = page_off + ret;
- dout("sync_read zero gap %llu~%llu\n",
- off + ret, off + ret + zlen);
+
+ doutc(cl, "zero gap %llu~%llu\n", off + ret,
+ off + ret + zlen);
ceph_zero_page_vector_range(zoff, zlen, pages);
ret += zlen;
}
- idx = 0;
- left = ret > 0 ? ret : 0;
+ if (off + ret > i_size)
+ left = (i_size > off) ? i_size - off : 0;
+ else
+ left = ret;
+
while (left > 0) {
- size_t len, copied;
- page_off = off & ~PAGE_MASK;
- len = min_t(size_t, left, PAGE_SIZE - page_off);
+ size_t plen, copied;
+
+ plen = min_t(size_t, left, PAGE_SIZE - page_off);
SetPageUptodate(pages[idx]);
copied = copy_page_to_iter(pages[idx++],
- page_off, len, to);
+ page_off, plen, to);
off += copied;
left -= copied;
- if (copied < len) {
+ page_off = 0;
+ if (copied < plen) {
ret = -EFAULT;
break;
}
}
- ceph_release_page_vector(pages, num_pages);
- if (ret < 0) {
- if (ret == -EBLOCKLISTED)
- fsc->blocklisted = true;
- break;
- }
+ ceph_osdc_put_request(req);
if (off >= i_size || !more)
break;
}
- if (off > iocb->ki_pos) {
- if (ret >= 0 &&
- iov_iter_count(to) > 0 && off >= i_size_read(inode))
+ if (ret > 0) {
+ if (off >= i_size) {
*retry_op = CHECK_EOF;
- ret = off - iocb->ki_pos;
- iocb->ki_pos = off;
- }
+ ret = i_size - *ki_pos;
+ *ki_pos = i_size;
+ } else {
+ ret = off - *ki_pos;
+ *ki_pos = off;
+ }
- dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
+ if (last_objver)
+ *last_objver = objver;
+ }
+ doutc(cl, "result %zd retry_op %d\n", ret, *retry_op);
return ret;
}
+static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
+ int *retry_op)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+
+ doutc(cl, "on file %p %llx~%zx %s\n", file, iocb->ki_pos,
+ iov_iter_count(to),
+ (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
+
+ return __ceph_sync_read(inode, &iocb->ki_pos, to, retry_op, NULL);
+}
+
struct ceph_aio_request {
struct kiocb *iocb;
size_t total_len;
@@ -985,6 +1279,7 @@ static void ceph_aio_retry_work(struct work_struct *work);
static void ceph_aio_complete(struct inode *inode,
struct ceph_aio_request *aio_req)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
int ret;
@@ -998,7 +1293,7 @@ static void ceph_aio_complete(struct inode *inode,
if (!ret)
ret = aio_req->total_len;
- dout("ceph_aio_complete %p rc %d\n", inode, ret);
+ doutc(cl, "%p %llx.%llx rc %d\n", inode, ceph_vinop(inode), ret);
if (ret >= 0 && aio_req->write) {
int dirty;
@@ -1006,11 +1301,10 @@ static void ceph_aio_complete(struct inode *inode,
loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
if (endoff > i_size_read(inode)) {
if (ceph_inode_set_size(inode, endoff))
- ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
+ ceph_check_caps(ci, CHECK_CAPS_AUTHONLY);
}
spin_lock(&ci->i_ceph_lock);
- ci->i_inline_version = CEPH_INLINE_NONE;
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
&aio_req->prealloc_cf);
spin_unlock(&ci->i_ceph_lock);
@@ -1034,13 +1328,17 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
struct inode *inode = req->r_inode;
struct ceph_aio_request *aio_req = req->r_priv;
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
+ struct ceph_osd_req_op *op = &req->r_ops[0];
struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
unsigned int len = osd_data->bvec_pos.iter.bi_size;
+ bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
BUG_ON(!osd_data->num_bvecs);
- dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
+ doutc(cl, "req %p inode %p %llx.%llx, rc %d bytes %u\n", req,
+ inode, ceph_vinop(inode), rc, len);
if (rc == -EOLDSNAPC) {
struct ceph_aio_work *aio_work;
@@ -1050,12 +1348,14 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
if (aio_work) {
INIT_WORK(&aio_work->work, ceph_aio_retry_work);
aio_work->req = req;
- queue_work(ceph_inode_to_client(inode)->inode_wq,
+ queue_work(ceph_inode_to_fs_client(inode)->inode_wq,
&aio_work->work);
return;
}
rc = -ENOMEM;
} else if (!aio_req->write) {
+ if (sparse && rc >= 0)
+ rc = ceph_sparse_ext_map_end(op);
if (rc == -ENOENT)
rc = 0;
if (rc >= 0 && len > rc) {
@@ -1076,7 +1376,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
aio_req->total_len = rc + zlen;
}
- iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
+ iov_iter_bvec(&i, ITER_DEST, osd_data->bvec_pos.bvecs,
osd_data->num_bvecs, len);
iov_iter_advance(&i, rc);
iov_iter_zero(zlen, &i);
@@ -1159,7 +1459,7 @@ static void ceph_aio_retry_work(struct work_struct *work)
req->r_inode = inode;
req->r_priv = aio_req;
- ret = ceph_osdc_start_request(req->r_osdc, req, false);
+ ceph_osdc_start_request(req->r_osdc, req);
out:
if (ret < 0) {
req->r_result = ret;
@@ -1178,7 +1478,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_client_metric *metric = &fsc->mdsc->metric;
struct ceph_vino vino;
struct ceph_osd_request *req;
@@ -1191,21 +1492,27 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
loff_t pos = iocb->ki_pos;
bool write = iov_iter_rw(iter) == WRITE;
- bool should_dirty = !write && iter_is_iovec(iter);
+ bool should_dirty = !write && user_backed_iter(iter);
+ bool sparse = ceph_test_mount_opt(fsc, SPARSEREAD);
if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
- dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
- (write ? "write" : "read"), file, pos, (unsigned)count,
- snapc, snapc ? snapc->seq : 0);
+ doutc(cl, "sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
+ (write ? "write" : "read"), file, pos, (unsigned)count,
+ snapc, snapc ? snapc->seq : 0);
if (write) {
- int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
+ int ret2;
+
+ ceph_fscache_invalidate(inode, true);
+
+ ret2 = invalidate_inode_pages2_range(inode->i_mapping,
pos >> PAGE_SHIFT,
(pos + count - 1) >> PAGE_SHIFT);
if (ret2 < 0)
- dout("invalidate_inode_pages2_range returned %d\n", ret2);
+ doutc(cl, "invalidate_inode_pages2_range returned %d\n",
+ ret2);
flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
} else {
@@ -1215,6 +1522,9 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
while (iov_iter_count(iter) > 0) {
u64 size = iov_iter_count(iter);
ssize_t len;
+ struct ceph_osd_req_op *op;
+ int readop = sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ;
+ int extent_cnt;
if (write)
size = min_t(u64, size, fsc->mount_options->wsize);
@@ -1225,8 +1535,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
vino, pos, &size, 0,
1,
- write ? CEPH_OSD_OP_WRITE :
- CEPH_OSD_OP_READ,
+ write ? CEPH_OSD_OP_WRITE : readop,
flags, snapc,
ci->i_truncate_seq,
ci->i_truncate_size,
@@ -1236,6 +1545,16 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
break;
}
+ op = &req->r_ops[0];
+ if (!write && sparse) {
+ extent_cnt = __ceph_sparse_read_ext_count(inode, size);
+ ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ break;
+ }
+ }
+
len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
if (len < 0) {
ceph_osdc_put_request(req);
@@ -1245,6 +1564,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (len != size)
osd_req_op_extent_update(req, 0, len);
+ osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
+
/*
* To simplify error handling, allow AIO when IO within i_size
* or IO can be satisfied by single OSD request.
@@ -1276,8 +1597,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
req->r_mtime = mtime;
}
- osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
-
if (aio_req) {
aio_req->total_len += len;
aio_req->num_reqs++;
@@ -1292,9 +1611,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
continue;
}
- ret = ceph_osdc_start_request(req->r_osdc, req, false);
- if (!ret)
- ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ ceph_osdc_start_request(req->r_osdc, req);
+ ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
if (write)
ceph_update_write_metrics(metric, req->r_start_latency,
@@ -1305,14 +1623,17 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
size = i_size_read(inode);
if (!write) {
- if (ret == -ENOENT)
+ if (sparse && ret >= 0)
+ ret = ceph_sparse_ext_map_end(op);
+ else if (ret == -ENOENT)
ret = 0;
+
if (ret >= 0 && ret < len && pos + ret < size) {
struct iov_iter i;
int zlen = min_t(size_t, len - ret,
size - pos - ret);
- iov_iter_bvec(&i, READ, bvecs, num_pages, len);
+ iov_iter_bvec(&i, ITER_DEST, bvecs, num_pages, len);
iov_iter_advance(&i, ret);
iov_iter_zero(zlen, &i);
ret += zlen;
@@ -1333,8 +1654,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (write && pos > size) {
if (ceph_inode_set_size(inode, pos))
ceph_check_caps(ceph_inode(inode),
- CHECK_CAPS_AUTHONLY,
- NULL);
+ CHECK_CAPS_AUTHONLY);
}
}
@@ -1357,8 +1677,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
r_private_item);
list_del_init(&req->r_private_item);
if (ret >= 0)
- ret = ceph_osdc_start_request(req->r_osdc,
- req, false);
+ ceph_osdc_start_request(req->r_osdc, req);
if (ret < 0) {
req->r_result = ret;
ceph_aio_complete_req(req);
@@ -1388,14 +1707,14 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_vino vino;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_osd_request *req;
struct page **pages;
u64 len;
int num_pages;
int written = 0;
- int flags;
int ret;
bool check_caps = false;
struct timespec64 mtime = current_time(inode);
@@ -1404,94 +1723,363 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
- dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
- file, pos, (unsigned)count, snapc, snapc->seq);
+ doutc(cl, "on file %p %lld~%u snapc %p seq %lld\n", file, pos,
+ (unsigned)count, snapc, snapc->seq);
ret = filemap_write_and_wait_range(inode->i_mapping,
pos, pos + count - 1);
if (ret < 0)
return ret;
- ret = invalidate_inode_pages2_range(inode->i_mapping,
- pos >> PAGE_SHIFT,
- (pos + count - 1) >> PAGE_SHIFT);
- if (ret < 0)
- dout("invalidate_inode_pages2_range returned %d\n", ret);
-
- flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
+ ceph_fscache_invalidate(inode, false);
while ((len = iov_iter_count(from)) > 0) {
size_t left;
int n;
-
- vino = ceph_vino(inode);
- req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
- vino, pos, &len, 0, 1,
- CEPH_OSD_OP_WRITE, flags, snapc,
- ci->i_truncate_seq,
- ci->i_truncate_size,
- false);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
- break;
- }
+ u64 write_pos = pos;
+ u64 write_len = len;
+ u64 objnum, objoff;
+ u32 xlen;
+ u64 assert_ver = 0;
+ bool rmw;
+ bool first, last;
+ struct iov_iter saved_iter = *from;
+ size_t off;
+
+ ceph_fscrypt_adjust_off_and_len(inode, &write_pos, &write_len);
+
+ /* clamp the length to the end of first object */
+ ceph_calc_file_object_mapping(&ci->i_layout, write_pos,
+ write_len, &objnum, &objoff,
+ &xlen);
+ write_len = xlen;
+
+ /* adjust len downward if it goes beyond current object */
+ if (pos + len > write_pos + write_len)
+ len = write_pos + write_len - pos;
/*
- * write from beginning of first page,
- * regardless of io alignment
+ * If we had to adjust the length or position to align with a
+ * crypto block, then we must do a read/modify/write cycle. We
+ * use a version assertion to redrive the thing if something
+ * changes in between.
*/
- num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ first = pos != write_pos;
+ last = (pos + len) != (write_pos + write_len);
+ rmw = first || last;
+
+ doutc(cl, "ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n",
+ ci->i_vino.ino, pos, len, write_pos, write_len,
+ rmw ? "" : "no ");
+ /*
+ * The data is emplaced into the page as it would be if it were
+ * in an array of pagecache pages.
+ */
+ num_pages = calc_pages_for(write_pos, write_len);
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
- goto out;
+ break;
+ }
+
+ /* Do we need to preload the pages? */
+ if (rmw) {
+ u64 first_pos = write_pos;
+ u64 last_pos = (write_pos + write_len) - CEPH_FSCRYPT_BLOCK_SIZE;
+ u64 read_len = CEPH_FSCRYPT_BLOCK_SIZE;
+ struct ceph_osd_req_op *op;
+
+ /* We should only need to do this for encrypted inodes */
+ WARN_ON_ONCE(!IS_ENCRYPTED(inode));
+
+ /* No need to do two reads if first and last blocks are same */
+ if (first && last_pos == first_pos)
+ last = false;
+
+ /*
+ * Allocate a read request for one or two extents,
+ * depending on how the request was aligned.
+ */
+ req = ceph_osdc_new_request(osdc, &ci->i_layout,
+ ci->i_vino, first ? first_pos : last_pos,
+ &read_len, 0, (first && last) ? 2 : 1,
+ CEPH_OSD_OP_SPARSE_READ, CEPH_OSD_FLAG_READ,
+ NULL, ci->i_truncate_seq,
+ ci->i_truncate_size, false);
+ if (IS_ERR(req)) {
+ ceph_release_page_vector(pages, num_pages);
+ ret = PTR_ERR(req);
+ break;
+ }
+
+ /* Something is misaligned! */
+ if (read_len != CEPH_FSCRYPT_BLOCK_SIZE) {
+ ceph_osdc_put_request(req);
+ ceph_release_page_vector(pages, num_pages);
+ ret = -EIO;
+ break;
+ }
+
+ /* Add extent for first block? */
+ op = &req->r_ops[0];
+
+ if (first) {
+ osd_req_op_extent_osd_data_pages(req, 0, pages,
+ CEPH_FSCRYPT_BLOCK_SIZE,
+ offset_in_page(first_pos),
+ false, false);
+ /* We only expect a single extent here */
+ ret = __ceph_alloc_sparse_ext_map(op, 1);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+ }
+
+ /* Add extent for last block */
+ if (last) {
+ /* Init the other extent if first extent has been used */
+ if (first) {
+ op = &req->r_ops[1];
+ osd_req_op_extent_init(req, 1,
+ CEPH_OSD_OP_SPARSE_READ,
+ last_pos, CEPH_FSCRYPT_BLOCK_SIZE,
+ ci->i_truncate_size,
+ ci->i_truncate_seq);
+ }
+
+ ret = __ceph_alloc_sparse_ext_map(op, 1);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+
+ osd_req_op_extent_osd_data_pages(req, first ? 1 : 0,
+ &pages[num_pages - 1],
+ CEPH_FSCRYPT_BLOCK_SIZE,
+ offset_in_page(last_pos),
+ false, false);
+ }
+
+ ceph_osdc_start_request(osdc, req);
+ ret = ceph_osdc_wait_request(osdc, req);
+
+ /* FIXME: length field is wrong if there are 2 extents */
+ ceph_update_read_metrics(&fsc->mdsc->metric,
+ req->r_start_latency,
+ req->r_end_latency,
+ read_len, ret);
+
+ /* Ok if object is not already present */
+ if (ret == -ENOENT) {
+ /*
+ * If there is no object, then we can't assert
+ * on its version. Set it to 0, and we'll use an
+ * exclusive create instead.
+ */
+ ceph_osdc_put_request(req);
+ ret = 0;
+
+ /*
+ * zero out the soon-to-be uncopied parts of the
+ * first and last pages.
+ */
+ if (first)
+ zero_user_segment(pages[0], 0,
+ offset_in_page(first_pos));
+ if (last)
+ zero_user_segment(pages[num_pages - 1],
+ offset_in_page(last_pos),
+ PAGE_SIZE);
+ } else {
+ if (ret < 0) {
+ ceph_osdc_put_request(req);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+
+ op = &req->r_ops[0];
+ if (op->extent.sparse_ext_cnt == 0) {
+ if (first)
+ zero_user_segment(pages[0], 0,
+ offset_in_page(first_pos));
+ else
+ zero_user_segment(pages[num_pages - 1],
+ offset_in_page(last_pos),
+ PAGE_SIZE);
+ } else if (op->extent.sparse_ext_cnt != 1 ||
+ ceph_sparse_ext_map_end(op) !=
+ CEPH_FSCRYPT_BLOCK_SIZE) {
+ ret = -EIO;
+ ceph_osdc_put_request(req);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+
+ if (first && last) {
+ op = &req->r_ops[1];
+ if (op->extent.sparse_ext_cnt == 0) {
+ zero_user_segment(pages[num_pages - 1],
+ offset_in_page(last_pos),
+ PAGE_SIZE);
+ } else if (op->extent.sparse_ext_cnt != 1 ||
+ ceph_sparse_ext_map_end(op) !=
+ CEPH_FSCRYPT_BLOCK_SIZE) {
+ ret = -EIO;
+ ceph_osdc_put_request(req);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+ }
+
+ /* Grab assert version. It must be non-zero. */
+ assert_ver = req->r_version;
+ WARN_ON_ONCE(ret > 0 && assert_ver == 0);
+
+ ceph_osdc_put_request(req);
+ if (first) {
+ ret = ceph_fscrypt_decrypt_block_inplace(inode,
+ pages[0], CEPH_FSCRYPT_BLOCK_SIZE,
+ offset_in_page(first_pos),
+ first_pos >> CEPH_FSCRYPT_BLOCK_SHIFT);
+ if (ret < 0) {
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+ }
+ if (last) {
+ ret = ceph_fscrypt_decrypt_block_inplace(inode,
+ pages[num_pages - 1],
+ CEPH_FSCRYPT_BLOCK_SIZE,
+ offset_in_page(last_pos),
+ last_pos >> CEPH_FSCRYPT_BLOCK_SHIFT);
+ if (ret < 0) {
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+ }
+ }
}
left = len;
+ off = offset_in_page(pos);
for (n = 0; n < num_pages; n++) {
- size_t plen = min_t(size_t, left, PAGE_SIZE);
- ret = copy_page_from_iter(pages[n], 0, plen, from);
+ size_t plen = min_t(size_t, left, PAGE_SIZE - off);
+
+ /* copy the data */
+ ret = copy_page_from_iter(pages[n], off, plen, from);
if (ret != plen) {
ret = -EFAULT;
break;
}
+ off = 0;
left -= ret;
}
-
if (ret < 0) {
+ doutc(cl, "write failed with %d\n", ret);
ceph_release_page_vector(pages, num_pages);
- goto out;
+ break;
}
- req->r_inode = inode;
+ if (IS_ENCRYPTED(inode)) {
+ ret = ceph_fscrypt_encrypt_pages(inode, pages,
+ write_pos, write_len);
+ if (ret < 0) {
+ doutc(cl, "encryption failed with %d\n", ret);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+ }
- osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
- false, true);
+ req = ceph_osdc_new_request(osdc, &ci->i_layout,
+ ci->i_vino, write_pos, &write_len,
+ rmw ? 1 : 0, rmw ? 2 : 1,
+ CEPH_OSD_OP_WRITE,
+ CEPH_OSD_FLAG_WRITE,
+ snapc, ci->i_truncate_seq,
+ ci->i_truncate_size, false);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+ doutc(cl, "write op %lld~%llu\n", write_pos, write_len);
+ osd_req_op_extent_osd_data_pages(req, rmw ? 1 : 0, pages, write_len,
+ offset_in_page(write_pos), false,
+ true);
+ req->r_inode = inode;
req->r_mtime = mtime;
- ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
- if (!ret)
- ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+
+ /* Set up the assertion */
+ if (rmw) {
+ /*
+ * Set up the assertion. If we don't have a version
+ * number, then the object doesn't exist yet. Use an
+ * exclusive create instead of a version assertion in
+ * that case.
+ */
+ if (assert_ver) {
+ osd_req_op_init(req, 0, CEPH_OSD_OP_ASSERT_VER, 0);
+ req->r_ops[0].assert_ver.ver = assert_ver;
+ } else {
+ osd_req_op_init(req, 0, CEPH_OSD_OP_CREATE,
+ CEPH_OSD_OP_FLAG_EXCL);
+ }
+ }
+
+ ceph_osdc_start_request(osdc, req);
+ ret = ceph_osdc_wait_request(osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, len, ret);
-out:
ceph_osdc_put_request(req);
if (ret != 0) {
+ doutc(cl, "osd write returned %d\n", ret);
+ /* Version changed! Must re-do the rmw cycle */
+ if ((assert_ver && (ret == -ERANGE || ret == -EOVERFLOW)) ||
+ (!assert_ver && ret == -EEXIST)) {
+ /* We should only ever see this on a rmw */
+ WARN_ON_ONCE(!rmw);
+
+ /* The version should never go backward */
+ WARN_ON_ONCE(ret == -EOVERFLOW);
+
+ *from = saved_iter;
+
+ /* FIXME: limit number of times we loop? */
+ continue;
+ }
ceph_set_error_write(ci);
break;
}
ceph_clear_error_write(ci);
+
+ /*
+ * We successfully wrote to a range of the file. Declare
+ * that region of the pagecache invalid.
+ */
+ ret = invalidate_inode_pages2_range(
+ inode->i_mapping,
+ pos >> PAGE_SHIFT,
+ (pos + len - 1) >> PAGE_SHIFT);
+ if (ret < 0) {
+ doutc(cl, "invalidate_inode_pages2_range returned %d\n",
+ ret);
+ ret = 0;
+ }
pos += len;
written += len;
+ doutc(cl, "written %d\n", written);
if (pos > i_size_read(inode)) {
check_caps = ceph_inode_set_size(inode, pos);
if (check_caps)
ceph_check_caps(ceph_inode(inode),
- CHECK_CAPS_AUTHONLY,
- NULL);
+ CHECK_CAPS_AUTHONLY);
}
}
@@ -1500,6 +2088,7 @@ out:
ret = written;
iocb->ki_pos = pos;
}
+ doutc(cl, "returning %d\n", ret);
return ret;
}
@@ -1518,26 +2107,31 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
ssize_t ret;
- int want, got = 0;
+ int want = 0, got = 0;
int retry_op = 0, read = 0;
again:
- dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
- inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
+ doutc(cl, "%llu~%u trying to get caps on %p %llx.%llx\n",
+ iocb->ki_pos, (unsigned)len, inode, ceph_vinop(inode));
- if (direct_lock)
- ceph_start_io_direct(inode);
- else
- ceph_start_io_read(inode);
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+ ret = direct_lock ? ceph_start_io_direct(inode) :
+ ceph_start_io_read(inode);
+ if (ret)
+ return ret;
+
+ if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
+ want |= CEPH_CAP_FILE_CACHE;
if (fi->fmode & CEPH_FILE_MODE_LAZY)
- want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
- else
- want = CEPH_CAP_FILE_CACHE;
+ want |= CEPH_CAP_FILE_LAZYIO;
+
ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
if (ret < 0) {
- if (iocb->ki_flags & IOCB_DIRECT)
+ if (direct_lock)
ceph_end_io_direct(inode);
else
ceph_end_io_read(inode);
@@ -1548,12 +2142,14 @@ again:
(iocb->ki_flags & IOCB_DIRECT) ||
(fi->flags & CEPH_F_SYNC)) {
- dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
- inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
- ceph_cap_string(got));
+ doutc(cl, "sync %p %llx.%llx %llu~%u got cap refs on %s\n",
+ inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
+ ceph_cap_string(got));
- if (ci->i_inline_version == CEPH_INLINE_NONE) {
- if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
+ if (!ceph_has_inline_data(ci)) {
+ if (!retry_op &&
+ (iocb->ki_flags & IOCB_DIRECT) &&
+ !IS_ENCRYPTED(inode)) {
ret = ceph_direct_read_write(iocb, to,
NULL, NULL);
if (ret >= 0 && ret < len)
@@ -1566,16 +2162,16 @@ again:
}
} else {
CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
- dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
- inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
- ceph_cap_string(got));
+ doutc(cl, "async %p %llx.%llx %llu~%u got cap refs on %s\n",
+ inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
+ ceph_cap_string(got));
ceph_add_rw_context(fi, &rw_ctx);
ret = generic_file_read_iter(iocb, to);
ceph_del_rw_context(fi, &rw_ctx);
}
- dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
- inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
+ doutc(cl, "%p %llx.%llx dropping cap refs on %s = %d\n",
+ inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
ceph_put_cap_refs(ci, got);
if (direct_lock)
@@ -1587,14 +2183,16 @@ again:
int statret;
struct page *page = NULL;
loff_t i_size;
+ int mask = CEPH_STAT_CAP_SIZE;
if (retry_op == READ_INLINE) {
page = __page_cache_alloc(GFP_KERNEL);
if (!page)
return -ENOMEM;
+
+ mask = CEPH_STAT_CAP_INLINE_DATA;
}
- statret = __ceph_do_getattr(inode, page,
- CEPH_STAT_CAP_INLINE_DATA, !!page);
+ statret = __ceph_do_getattr(inode, page, mask, !!page);
if (statret < 0) {
if (page)
__free_page(page);
@@ -1635,8 +2233,8 @@ again:
/* hit EOF or hole? */
if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
ret < len) {
- dout("sync_read hit hole, ppos %lld < size %lld"
- ", reading more\n", iocb->ki_pos, i_size);
+ doutc(cl, "may hit hole, ppos %lld < size %lld, reading more\n",
+ iocb->ki_pos, i_size);
read += ret;
len -= ret;
@@ -1652,6 +2250,71 @@ again:
}
/*
+ * Wrap filemap_splice_read with checks for cap bits on the inode.
+ * Atomically grab references, so that those bits are not released
+ * back to the MDS mid-read.
+ */
+static ssize_t ceph_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ struct ceph_file_info *fi = in->private_data;
+ struct inode *inode = file_inode(in);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ ssize_t ret;
+ int want = 0, got = 0;
+ CEPH_DEFINE_RW_CONTEXT(rw_ctx, 0);
+
+ dout("splice_read %p %llx.%llx %llu~%zu trying to get caps on %p\n",
+ inode, ceph_vinop(inode), *ppos, len, inode);
+
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
+ if (ceph_has_inline_data(ci) ||
+ (fi->flags & CEPH_F_SYNC))
+ return copy_splice_read(in, ppos, pipe, len, flags);
+
+ ret = ceph_start_io_read(inode);
+ if (ret)
+ return ret;
+
+ want = CEPH_CAP_FILE_CACHE;
+ if (fi->fmode & CEPH_FILE_MODE_LAZY)
+ want |= CEPH_CAP_FILE_LAZYIO;
+
+ ret = ceph_get_caps(in, CEPH_CAP_FILE_RD, want, -1, &got);
+ if (ret < 0)
+ goto out_end;
+
+ if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) == 0) {
+ dout("splice_read/sync %p %llx.%llx %llu~%zu got cap refs on %s\n",
+ inode, ceph_vinop(inode), *ppos, len,
+ ceph_cap_string(got));
+
+ ceph_put_cap_refs(ci, got);
+ ceph_end_io_read(inode);
+ return copy_splice_read(in, ppos, pipe, len, flags);
+ }
+
+ dout("splice_read %p %llx.%llx %llu~%zu got cap refs on %s\n",
+ inode, ceph_vinop(inode), *ppos, len, ceph_cap_string(got));
+
+ rw_ctx.caps = got;
+ ceph_add_rw_context(fi, &rw_ctx);
+ ret = filemap_splice_read(in, ppos, pipe, len, flags);
+ ceph_del_rw_context(fi, &rw_ctx);
+
+ dout("splice_read %p %llx.%llx dropping cap refs on %s = %zd\n",
+ inode, ceph_vinop(inode), ceph_cap_string(got), ret);
+
+ ceph_put_cap_refs(ci, got);
+out_end:
+ ceph_end_io_read(inode);
+ return ret;
+}
+
+/*
* Take cap references to avoid releasing caps to MDS mid-write.
*
* If we are synchronous, and write with an old snap context, the OSD
@@ -1667,17 +2330,21 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_cap_flush *prealloc_cf;
ssize_t count, written = 0;
- int err, want, got;
+ int err, want = 0, got;
bool direct_lock = false;
u32 map_flags;
u64 pool_flags;
loff_t pos;
loff_t limit = max(i_size_read(inode), fsc->max_file_size);
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
@@ -1689,13 +2356,10 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
direct_lock = true;
retry_snap:
- if (direct_lock)
- ceph_start_io_direct(inode);
- else
- ceph_start_io_write(inode);
-
- /* We can write back this queue in page reclaim */
- current->backing_dev_info = inode_to_bdi(inode);
+ err = direct_lock ? ceph_start_io_direct(inode) :
+ ceph_start_io_write(inode);
+ if (err)
+ goto out_unlocked;
if (iocb->ki_flags & IOCB_APPEND) {
err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
@@ -1735,18 +2399,13 @@ retry_snap:
if (err)
goto out;
- if (ci->i_inline_version != CEPH_INLINE_NONE) {
- err = ceph_uninline_data(file, NULL);
- if (err < 0)
- goto out;
- }
-
- dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
- inode, ceph_vinop(inode), pos, count, i_size_read(inode));
+ doutc(cl, "%p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
+ inode, ceph_vinop(inode), pos, count,
+ i_size_read(inode));
+ if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
+ want |= CEPH_CAP_FILE_BUFFER;
if (fi->fmode & CEPH_FILE_MODE_LAZY)
- want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
- else
- want = CEPH_CAP_FILE_BUFFER;
+ want |= CEPH_CAP_FILE_LAZYIO;
got = 0;
err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
if (err < 0)
@@ -1758,8 +2417,8 @@ retry_snap:
inode_inc_iversion_raw(inode);
- dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
- inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
+ doutc(cl, "%p %llx.%llx %llu~%zd got cap refs on %s\n",
+ inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
(iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
@@ -1782,7 +2441,7 @@ retry_snap:
/* we might need to revert back to that point */
data = *from;
- if (iocb->ki_flags & IOCB_DIRECT)
+ if ((iocb->ki_flags & IOCB_DIRECT) && !IS_ENCRYPTED(inode))
written = ceph_direct_read_write(iocb, &data, snapc,
&prealloc_cf);
else
@@ -1802,9 +2461,7 @@ retry_snap:
* are pending vmtruncate. So write and vmtruncate
* can not run at the same time
*/
- written = generic_perform_write(file, from, pos);
- if (likely(written >= 0))
- iocb->ki_pos = pos + written;
+ written = generic_perform_write(iocb, from);
ceph_end_io_write(inode);
}
@@ -1812,24 +2469,23 @@ retry_snap:
int dirty;
spin_lock(&ci->i_ceph_lock);
- ci->i_inline_version = CEPH_INLINE_NONE;
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
&prealloc_cf);
spin_unlock(&ci->i_ceph_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
- ceph_check_caps(ci, 0, NULL);
+ ceph_check_caps(ci, CHECK_CAPS_FLUSH);
}
- dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
- inode, ceph_vinop(inode), pos, (unsigned)count,
- ceph_cap_string(got));
+ doutc(cl, "%p %llx.%llx %llu~%u dropping cap refs on %s\n",
+ inode, ceph_vinop(inode), pos, (unsigned)count,
+ ceph_cap_string(got));
ceph_put_cap_refs(ci, got);
if (written == -EOLDSNAPC) {
- dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
- inode, ceph_vinop(inode), pos, (unsigned)count);
+ doutc(cl, "%p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
+ inode, ceph_vinop(inode), pos, (unsigned)count);
goto retry_snap;
}
@@ -1850,7 +2506,6 @@ out:
ceph_end_io_write(inode);
out_unlocked:
ceph_free_cap_flush(prealloc_cf);
- current->backing_dev_info = NULL;
return written ? written : err;
}
@@ -1859,72 +2514,30 @@ out_unlocked:
*/
static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
{
- struct inode *inode = file->f_mapping->host;
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- loff_t i_size;
- loff_t ret;
-
- inode_lock(inode);
-
if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
+ struct inode *inode = file_inode(file);
+ int ret;
+
ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
if (ret < 0)
- goto out;
- }
-
- i_size = i_size_read(inode);
- switch (whence) {
- case SEEK_END:
- offset += i_size;
- break;
- case SEEK_CUR:
- /*
- * Here we special-case the lseek(fd, 0, SEEK_CUR)
- * position-querying operation. Avoid rewriting the "same"
- * f_pos value back to the file because a concurrent read(),
- * write() or lseek() might have altered it
- */
- if (offset == 0) {
- ret = file->f_pos;
- goto out;
- }
- offset += file->f_pos;
- break;
- case SEEK_DATA:
- if (offset < 0 || offset >= i_size) {
- ret = -ENXIO;
- goto out;
- }
- break;
- case SEEK_HOLE:
- if (offset < 0 || offset >= i_size) {
- ret = -ENXIO;
- goto out;
- }
- offset = i_size;
- break;
+ return ret;
}
-
- ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
-
-out:
- inode_unlock(inode);
- return ret;
+ return generic_file_llseek(file, offset, whence);
}
-static inline void ceph_zero_partial_page(
- struct inode *inode, loff_t offset, unsigned size)
+static inline void ceph_zero_partial_page(struct inode *inode,
+ loff_t offset, size_t size)
{
- struct page *page;
- pgoff_t index = offset >> PAGE_SHIFT;
+ struct folio *folio;
- page = find_lock_page(inode->i_mapping, index);
- if (page) {
- wait_on_page_writeback(page);
- zero_user(page, offset & (PAGE_SIZE - 1), size);
- unlock_page(page);
- put_page(page);
- }
+ folio = filemap_lock_folio(inode->i_mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(folio))
+ return;
+
+ folio_wait_writeback(folio);
+ folio_zero_range(folio, offset_in_folio(folio, offset), size);
+ folio_unlock(folio);
+ folio_put(folio);
}
static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
@@ -1953,12 +2566,15 @@ static int ceph_zero_partial_object(struct inode *inode,
loff_t offset, loff_t *length)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_osd_request *req;
int ret = 0;
loff_t zero = 0;
int op;
+ if (ceph_inode_is_shutdown(inode))
+ return -EIO;
+
if (!length) {
op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
length = &zero;
@@ -1977,13 +2593,11 @@ static int ceph_zero_partial_object(struct inode *inode,
goto out;
}
- req->r_mtime = inode->i_mtime;
- ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
- if (!ret) {
- ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
- if (ret == -ENOENT)
- ret = 0;
- }
+ req->r_mtime = inode_get_mtime(inode);
+ ceph_osdc_start_request(&fsc->client->osdc, req);
+ ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ if (ret == -ENOENT)
+ ret = 0;
ceph_osdc_put_request(req);
out:
@@ -1997,7 +2611,7 @@ static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
s32 stripe_unit = ci->i_layout.stripe_unit;
s32 stripe_count = ci->i_layout.stripe_count;
s32 object_size = ci->i_layout.object_size;
- u64 object_set_size = object_size * stripe_count;
+ u64 object_set_size = (u64) object_size * stripe_count;
u64 nearly, t;
/* round offset up to next period boundary */
@@ -2043,18 +2657,25 @@ static long ceph_fallocate(struct file *file, int mode,
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap_flush *prealloc_cf;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int want, got = 0;
int dirty;
int ret = 0;
loff_t endoff = 0;
loff_t size;
+ doutc(cl, "%p %llx.%llx mode %x, offset %llu length %llu\n",
+ inode, ceph_vinop(inode), mode, offset, length);
+
if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
+ if (IS_ENCRYPTED(inode))
+ return -EOPNOTSUPP;
+
prealloc_cf = ceph_alloc_cap_flush();
if (!prealloc_cf)
return -ENOMEM;
@@ -2066,12 +2687,6 @@ static long ceph_fallocate(struct file *file, int mode,
goto unlock;
}
- if (ci->i_inline_version != CEPH_INLINE_NONE) {
- ret = ceph_uninline_data(file, NULL);
- if (ret < 0)
- goto unlock;
- }
-
size = i_size_read(inode);
/* Are we punching a hole beyond EOF? */
@@ -2089,13 +2704,17 @@ static long ceph_fallocate(struct file *file, int mode,
if (ret < 0)
goto unlock;
+ ret = file_modified(file);
+ if (ret)
+ goto put_caps;
+
filemap_invalidate_lock(inode->i_mapping);
+ ceph_fscache_invalidate(inode, false);
ceph_zero_pagecache_range(inode, offset, length);
ret = ceph_zero_objects(inode, offset, length);
if (!ret) {
spin_lock(&ci->i_ceph_lock);
- ci->i_inline_version = CEPH_INLINE_NONE;
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
&prealloc_cf);
spin_unlock(&ci->i_ceph_lock);
@@ -2104,6 +2723,7 @@ static long ceph_fallocate(struct file *file, int mode,
}
filemap_invalidate_unlock(inode->i_mapping);
+put_caps:
ceph_put_cap_refs(ci, got);
unlock:
inode_unlock(inode);
@@ -2174,6 +2794,7 @@ static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
loff_t src_off, loff_t dst_off, size_t len)
{
+ struct ceph_client *cl = ceph_inode_to_client(src_inode);
loff_t size, endoff;
size = i_size_read(src_inode);
@@ -2184,8 +2805,8 @@ static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
* inode.
*/
if (src_off + len > size) {
- dout("Copy beyond EOF (%llu + %zu > %llu)\n",
- src_off, len, size);
+ doutc(cl, "Copy beyond EOF (%llu + %zu > %llu)\n", src_off,
+ len, size);
return -EOPNOTSUPP;
}
size = i_size_read(dst_inode);
@@ -2200,6 +2821,54 @@ static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
return 0;
}
+static struct ceph_osd_request *
+ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
+ u64 src_snapid,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ struct ceph_object_id *dst_oid,
+ struct ceph_object_locator *dst_oloc,
+ u32 truncate_seq, u64 truncate_size)
+{
+ struct ceph_osd_request *req;
+ int ret;
+ u32 src_fadvise_flags =
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
+ CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
+ u32 dst_fadvise_flags =
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
+
+ req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->r_flags = CEPH_OSD_FLAG_WRITE;
+
+ ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
+ ceph_oid_copy(&req->r_t.base_oid, dst_oid);
+
+ ret = osd_req_op_copy_from_init(req, src_snapid, 0,
+ src_oid, src_oloc,
+ src_fadvise_flags,
+ dst_fadvise_flags,
+ truncate_seq,
+ truncate_size,
+ CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
+ if (ret)
+ goto out;
+
+ ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ return req;
+
+out:
+ ceph_osdc_put_request(req);
+ return ERR_PTR(ret);
+}
+
static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
struct ceph_inode_info *dst_ci, u64 *dst_off,
struct ceph_fs_client *fsc,
@@ -2207,16 +2876,20 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
{
struct ceph_object_locator src_oloc, dst_oloc;
struct ceph_object_id src_oid, dst_oid;
- size_t bytes = 0;
+ struct ceph_osd_client *osdc;
+ struct ceph_osd_request *req;
+ ssize_t bytes = 0;
u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
u32 src_objlen, dst_objlen;
u32 object_size = src_ci->i_layout.object_size;
+ struct ceph_client *cl = fsc->client;
int ret;
src_oloc.pool = src_ci->i_layout.pool_id;
src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
dst_oloc.pool = dst_ci->i_layout.pool_id;
dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
+ osdc = &fsc->client->osdc;
while (len >= object_size) {
ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
@@ -2232,24 +2905,30 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
ceph_oid_printf(&dst_oid, "%llx.%08llx",
dst_ci->i_vino.ino, dst_objnum);
/* Do an object remote copy */
- ret = ceph_osdc_copy_from(&fsc->client->osdc,
- src_ci->i_vino.snap, 0,
- &src_oid, &src_oloc,
- CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
- CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
- &dst_oid, &dst_oloc,
- CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
- CEPH_OSD_OP_FLAG_FADVISE_DONTNEED,
- dst_ci->i_truncate_seq,
- dst_ci->i_truncate_size,
- CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
+ req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
+ &src_oid, &src_oloc,
+ &dst_oid, &dst_oloc,
+ dst_ci->i_truncate_seq,
+ dst_ci->i_truncate_size);
+ if (IS_ERR(req))
+ ret = PTR_ERR(req);
+ else {
+ ceph_osdc_start_request(osdc, req);
+ ret = ceph_osdc_wait_request(osdc, req);
+ ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
+ req->r_start_latency,
+ req->r_end_latency,
+ object_size, ret);
+ ceph_osdc_put_request(req);
+ }
if (ret) {
if (ret == -EOPNOTSUPP) {
fsc->have_copy_from2 = false;
- pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
+ pr_notice_client(cl,
+ "OSDs don't support copy-from2; disabling copy offload\n");
}
- dout("ceph_osdc_copy_from returned %d\n", ret);
- if (!bytes)
+ doutc(cl, "returned %d\n", ret);
+ if (bytes <= 0)
bytes = ret;
goto out;
}
@@ -2274,7 +2953,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
struct ceph_inode_info *src_ci = ceph_inode(src_inode);
struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
struct ceph_cap_flush *prealloc_cf;
- struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
+ struct ceph_fs_client *src_fsc = ceph_inode_to_fs_client(src_inode);
+ struct ceph_client *cl = src_fsc->client;
loff_t size;
ssize_t ret = -EIO, bytes;
u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
@@ -2282,7 +2962,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
int src_got = 0, dst_got = 0, err, dirty;
if (src_inode->i_sb != dst_inode->i_sb) {
- struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
+ struct ceph_fs_client *dst_fsc = ceph_inode_to_fs_client(dst_inode);
if (ceph_fsid_compare(&src_fsc->client->fsid,
&dst_fsc->client->fsid)) {
@@ -2317,10 +2997,14 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
(src_ci->i_layout.stripe_count != 1) ||
(dst_ci->i_layout.stripe_count != 1) ||
(src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
- dout("Invalid src/dst files layout\n");
+ doutc(cl, "Invalid src/dst files layout\n");
return -EOPNOTSUPP;
}
+ /* Every encrypted inode gets its own key, so we can't offload them */
+ if (IS_ENCRYPTED(src_inode) || IS_ENCRYPTED(dst_inode))
+ return -EOPNOTSUPP;
+
if (len < src_ci->i_layout.object_size)
return -EOPNOTSUPP; /* no remote copy will be done */
@@ -2331,12 +3015,12 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
/* Start by sync'ing the source and destination files */
ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
if (ret < 0) {
- dout("failed to write src file (%zd)\n", ret);
+ doutc(cl, "failed to write src file (%zd)\n", ret);
goto out;
}
ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
if (ret < 0) {
- dout("failed to write dst file (%zd)\n", ret);
+ doutc(cl, "failed to write dst file (%zd)\n", ret);
goto out;
}
@@ -2348,7 +3032,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
err = get_rd_wr_caps(src_file, &src_got,
dst_file, (dst_off + len), &dst_got);
if (err < 0) {
- dout("get_rd_wr_caps returned %d\n", err);
+ doutc(cl, "get_rd_wr_caps returned %d\n", err);
ret = -EOPNOTSUPP;
goto out;
}
@@ -2358,11 +3042,13 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
goto out_caps;
/* Drop dst file cached pages */
+ ceph_fscache_invalidate(dst_inode, false);
ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
dst_off >> PAGE_SHIFT,
(dst_off + len) >> PAGE_SHIFT);
if (ret < 0) {
- dout("Failed to invalidate inode pages (%zd)\n", ret);
+ doutc(cl, "Failed to invalidate inode pages (%zd)\n",
+ ret);
ret = 0; /* XXX */
}
ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
@@ -2383,18 +3069,18 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
* starting at the src_off
*/
if (src_objoff) {
- dout("Initial partial copy of %u bytes\n", src_objlen);
+ doutc(cl, "Initial partial copy of %u bytes\n", src_objlen);
/*
* we need to temporarily drop all caps as we'll be calling
* {read,write}_iter, which will get caps again.
*/
put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
- ret = do_splice_direct(src_file, &src_off, dst_file,
- &dst_off, src_objlen, flags);
+ ret = splice_file_range(src_file, &src_off, dst_file, &dst_off,
+ src_objlen);
/* Abort on short copies or on error */
- if (ret < src_objlen) {
- dout("Failed partial copy (%zd)\n", ret);
+ if (ret < (long)src_objlen) {
+ doutc(cl, "Failed partial copy (%zd)\n", ret);
goto out;
}
len -= ret;
@@ -2416,7 +3102,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
ret = bytes;
goto out_caps;
}
- dout("Copied %zu bytes out of %zu\n", bytes, len);
+ doutc(cl, "Copied %zu bytes out of %zu\n", bytes, len);
len -= bytes;
ret += bytes;
@@ -2427,11 +3113,10 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
/* Let the MDS know about dst file size change */
if (ceph_inode_set_size(dst_inode, dst_off) ||
ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
- ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL);
+ ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_FLUSH);
}
/* Mark Fw dirty */
spin_lock(&dst_ci->i_ceph_lock);
- dst_ci->i_inline_version = CEPH_INLINE_NONE;
dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
spin_unlock(&dst_ci->i_ceph_lock);
if (dirty)
@@ -2445,13 +3130,13 @@ out_caps:
* there were errors in remote object copies (len >= object_size).
*/
if (len && (len < src_ci->i_layout.object_size)) {
- dout("Final partial copy of %zu bytes\n", len);
- bytes = do_splice_direct(src_file, &src_off, dst_file,
- &dst_off, len, flags);
+ doutc(cl, "Final partial copy of %zu bytes\n", len);
+ bytes = splice_file_range(src_file, &src_off, dst_file,
+ &dst_off, len);
if (bytes > 0)
ret += bytes;
else
- dout("Failed partial copy (%zd)\n", bytes);
+ doutc(cl, "Failed partial copy (%zd)\n", bytes);
}
out:
@@ -2470,8 +3155,8 @@ static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
len, flags);
if (ret == -EOPNOTSUPP || ret == -EXDEV)
- ret = generic_copy_file_range(src_file, src_off, dst_file,
- dst_off, len, flags);
+ ret = splice_copy_file_range(src_file, src_off, dst_file,
+ dst_off, len);
return ret;
}
@@ -2481,12 +3166,12 @@ const struct file_operations ceph_file_fops = {
.llseek = ceph_llseek,
.read_iter = ceph_read_iter,
.write_iter = ceph_write_iter,
- .mmap = ceph_mmap,
+ .mmap_prepare = ceph_mmap_prepare,
.fsync = ceph_fsync,
.lock = ceph_lock,
.setlease = simple_nosetlease,
.flock = ceph_flock,
- .splice_read = generic_file_splice_read,
+ .splice_read = ceph_splice_read,
.splice_write = iter_file_splice_write,
.unlocked_ioctl = ceph_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 1c7574105478..2966f88310e3 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -14,10 +14,12 @@
#include <linux/random.h>
#include <linux/sort.h>
#include <linux/iversion.h>
+#include <linux/fscrypt.h>
#include "super.h"
#include "mds_client.h"
#include "cache.h"
+#include "crypto.h"
#include <linux/ceph/decode.h>
/*
@@ -33,6 +35,7 @@
*/
static const struct inode_operations ceph_symlink_iops;
+static const struct inode_operations ceph_encrypted_symlink_iops;
static void ceph_inode_work(struct work_struct *work);
@@ -52,60 +55,213 @@ static int ceph_set_ino_cb(struct inode *inode, void *data)
return 0;
}
-struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
+/*
+ * Check if the parent inode matches the vino from directory reply info
+ */
+static inline bool ceph_vino_matches_parent(struct inode *parent,
+ struct ceph_vino vino)
+{
+ return ceph_ino(parent) == vino.ino && ceph_snap(parent) == vino.snap;
+}
+
+/*
+ * Validate that the directory inode referenced by @req->r_parent matches the
+ * inode number and snapshot id contained in the reply's directory record. If
+ * they do not match – which can theoretically happen if the parent dentry was
+ * moved between the time the request was issued and the reply arrived – fall
+ * back to looking up the correct inode in the inode cache.
+ *
+ * A reference is *always* returned. Callers that receive a different inode
+ * than the original @parent are responsible for dropping the extra reference
+ * once the reply has been processed.
+ */
+static struct inode *ceph_get_reply_dir(struct super_block *sb,
+ struct inode *parent,
+ struct ceph_mds_reply_info_parsed *rinfo)
{
+ struct ceph_vino vino;
+
+ if (unlikely(!rinfo->diri.in))
+ return parent; /* nothing to compare against */
+
+ /* If we didn't have a cached parent inode to begin with, just bail out. */
+ if (!parent)
+ return NULL;
+
+ vino.ino = le64_to_cpu(rinfo->diri.in->ino);
+ vino.snap = le64_to_cpu(rinfo->diri.in->snapid);
+
+ if (likely(ceph_vino_matches_parent(parent, vino)))
+ return parent; /* matches – use the original reference */
+
+ /* Mismatch – this should be rare. Emit a WARN and obtain the correct inode. */
+ WARN_ONCE(1, "ceph: reply dir mismatch (parent valid %llx.%llx reply %llx.%llx)\n",
+ ceph_ino(parent), ceph_snap(parent), vino.ino, vino.snap);
+
+ return ceph_get_inode(sb, vino, NULL);
+}
+
+/**
+ * ceph_new_inode - allocate a new inode in advance of an expected create
+ * @dir: parent directory for new inode
+ * @dentry: dentry that may eventually point to new inode
+ * @mode: mode of new inode
+ * @as_ctx: pointer to inherited security context
+ *
+ * Allocate a new inode in advance of an operation to create a new inode.
+ * This allocates the inode and sets up the acl_sec_ctx with appropriate
+ * info for the new inode.
+ *
+ * Returns a pointer to the new inode or an ERR_PTR.
+ */
+struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
+ umode_t *mode, struct ceph_acl_sec_ctx *as_ctx)
+{
+ int err;
+ struct inode *inode;
+
+ inode = new_inode(dir->i_sb);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
+
+ if (!S_ISLNK(*mode)) {
+ err = ceph_pre_init_acls(dir, mode, as_ctx);
+ if (err < 0)
+ goto out_err;
+ }
+
+ inode_state_assign_raw(inode, 0);
+ inode->i_mode = *mode;
+
+ err = ceph_security_init_secctx(dentry, *mode, as_ctx);
+ if (err < 0)
+ goto out_err;
+
+ /*
+ * We'll skip setting fscrypt context for snapshots, leaving that for
+ * the handle_reply().
+ */
+ if (ceph_snap(dir) != CEPH_SNAPDIR) {
+ err = ceph_fscrypt_prepare_context(dir, inode, as_ctx);
+ if (err)
+ goto out_err;
+ }
+
+ return inode;
+out_err:
+ iput(inode);
+ return ERR_PTR(err);
+}
+
+void ceph_as_ctx_to_req(struct ceph_mds_request *req,
+ struct ceph_acl_sec_ctx *as_ctx)
+{
+ if (as_ctx->pagelist) {
+ req->r_pagelist = as_ctx->pagelist;
+ as_ctx->pagelist = NULL;
+ }
+ ceph_fscrypt_as_ctx_to_req(req, as_ctx);
+}
+
+/**
+ * ceph_get_inode - find or create/hash a new inode
+ * @sb: superblock to search and allocate in
+ * @vino: vino to search for
+ * @newino: optional new inode to insert if one isn't found (may be NULL)
+ *
+ * Search for or insert a new inode into the hash for the given vino, and
+ * return a reference to it. If new is non-NULL, its reference is consumed.
+ */
+struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
+ struct inode *newino)
+{
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct inode *inode;
if (ceph_vino_is_reserved(vino))
return ERR_PTR(-EREMOTEIO);
- inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
- ceph_set_ino_cb, &vino);
- if (!inode)
+ if (newino) {
+ inode = inode_insert5(newino, (unsigned long)vino.ino,
+ ceph_ino_compare, ceph_set_ino_cb, &vino);
+ if (inode != newino)
+ iput(newino);
+ } else {
+ inode = iget5_locked(sb, (unsigned long)vino.ino,
+ ceph_ino_compare, ceph_set_ino_cb, &vino);
+ }
+
+ if (!inode) {
+ doutc(cl, "no inode found for %llx.%llx\n", vino.ino, vino.snap);
return ERR_PTR(-ENOMEM);
+ }
- dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
- ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
+ doutc(cl, "on %llx=%llx.%llx got %p new %d\n",
+ ceph_present_inode(inode), ceph_vinop(inode), inode,
+ !!(inode_state_read_once(inode) & I_NEW));
return inode;
}
/*
- * get/constuct snapdir inode for a given directory
+ * get/construct snapdir inode for a given directory
*/
struct inode *ceph_get_snapdir(struct inode *parent)
{
+ struct ceph_client *cl = ceph_inode_to_client(parent);
struct ceph_vino vino = {
.ino = ceph_ino(parent),
.snap = CEPH_SNAPDIR,
};
- struct inode *inode = ceph_get_inode(parent->i_sb, vino);
+ struct inode *inode = ceph_get_inode(parent->i_sb, vino, NULL);
struct ceph_inode_info *ci = ceph_inode(inode);
+ int ret = -ENOTDIR;
if (IS_ERR(inode))
return inode;
if (!S_ISDIR(parent->i_mode)) {
- pr_warn_once("bad snapdir parent type (mode=0%o)\n",
- parent->i_mode);
- return ERR_PTR(-ENOTDIR);
+ pr_warn_once_client(cl, "bad snapdir parent type (mode=0%o)\n",
+ parent->i_mode);
+ goto err;
}
- if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
- pr_warn_once("bad snapdir inode type (mode=0%o)\n",
- inode->i_mode);
- return ERR_PTR(-ENOTDIR);
+ if (!(inode_state_read_once(inode) & I_NEW) && !S_ISDIR(inode->i_mode)) {
+ pr_warn_once_client(cl, "bad snapdir inode type (mode=0%o)\n",
+ inode->i_mode);
+ goto err;
}
inode->i_mode = parent->i_mode;
inode->i_uid = parent->i_uid;
inode->i_gid = parent->i_gid;
- inode->i_mtime = parent->i_mtime;
- inode->i_ctime = parent->i_ctime;
- inode->i_atime = parent->i_atime;
+ inode_set_mtime_to_ts(inode, inode_get_mtime(parent));
+ inode_set_ctime_to_ts(inode, inode_get_ctime(parent));
+ inode_set_atime_to_ts(inode, inode_get_atime(parent));
ci->i_rbytes = 0;
ci->i_btime = ceph_inode(parent)->i_btime;
- if (inode->i_state & I_NEW) {
+#ifdef CONFIG_FS_ENCRYPTION
+ /* if encrypted, just borrow fscrypt_auth from parent */
+ if (IS_ENCRYPTED(parent)) {
+ struct ceph_inode_info *pci = ceph_inode(parent);
+
+ ci->fscrypt_auth = kmemdup(pci->fscrypt_auth,
+ pci->fscrypt_auth_len,
+ GFP_KERNEL);
+ if (ci->fscrypt_auth) {
+ inode->i_flags |= S_ENCRYPTED;
+ ci->fscrypt_auth_len = pci->fscrypt_auth_len;
+ } else {
+ doutc(cl, "Failed to alloc snapdir fscrypt_auth\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+#endif
+ if (inode_state_read_once(inode) & I_NEW) {
inode->i_op = &ceph_snapdir_iops;
inode->i_fop = &ceph_snapdir_fops;
ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
@@ -113,6 +269,12 @@ struct inode *ceph_get_snapdir(struct inode *parent)
}
return inode;
+err:
+ if ((inode_state_read_once(inode) & I_NEW))
+ discard_new_inode(inode);
+ else
+ iput(inode);
+ return ERR_PTR(ret);
}
const struct inode_operations ceph_file_iops = {
@@ -120,7 +282,7 @@ const struct inode_operations ceph_file_iops = {
.setattr = ceph_setattr,
.getattr = ceph_getattr,
.listxattr = ceph_listxattr,
- .get_acl = ceph_get_acl,
+ .get_inode_acl = ceph_get_acl,
.set_acl = ceph_set_acl,
};
@@ -139,6 +301,8 @@ const struct inode_operations ceph_file_iops = {
static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
u32 f)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct rb_node **p;
struct rb_node *parent = NULL;
struct ceph_inode_frag *frag;
@@ -169,8 +333,7 @@ static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
rb_link_node(&frag->node, parent, p);
rb_insert_color(&frag->node, &ci->i_fragtree);
- dout("get_or_create_frag added %llx.%llx frag %x\n",
- ceph_vinop(&ci->vfs_inode), f);
+ doutc(cl, "added %p %llx.%llx frag %x\n", inode, ceph_vinop(inode), f);
return frag;
}
@@ -203,6 +366,7 @@ struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
struct ceph_inode_frag *pfrag, int *found)
{
+ struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
u32 t = ceph_frag_make(0, 0);
struct ceph_inode_frag *frag;
unsigned nway, i;
@@ -226,8 +390,8 @@ static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
/* choose child */
nway = 1 << frag->split_by;
- dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
- frag->split_by, nway);
+ doutc(cl, "frag(%x) %x splits by %d (%d ways)\n", v, t,
+ frag->split_by, nway);
for (i = 0; i < nway; i++) {
n = ceph_frag_make_child(t, frag->split_by, i);
if (ceph_frag_contains_value(n, v)) {
@@ -237,7 +401,7 @@ static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
}
BUG_ON(i == nway);
}
- dout("choose_frag(%x) = %x\n", v, t);
+ doutc(cl, "frag(%x) = %x\n", v, t);
return t;
}
@@ -261,6 +425,7 @@ static int ceph_fill_dirfrag(struct inode *inode,
struct ceph_mds_reply_dirfrag *dirinfo)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_frag *frag;
u32 id = le32_to_cpu(dirinfo->frag);
int mds = le32_to_cpu(dirinfo->auth);
@@ -285,14 +450,14 @@ static int ceph_fill_dirfrag(struct inode *inode,
goto out;
if (frag->split_by == 0) {
/* tree leaf, remove */
- dout("fill_dirfrag removed %llx.%llx frag %x"
- " (no ref)\n", ceph_vinop(inode), id);
+ doutc(cl, "removed %p %llx.%llx frag %x (no ref)\n",
+ inode, ceph_vinop(inode), id);
rb_erase(&frag->node, &ci->i_fragtree);
kfree(frag);
} else {
/* tree branch, keep and clear */
- dout("fill_dirfrag cleared %llx.%llx frag %x"
- " referral\n", ceph_vinop(inode), id);
+ doutc(cl, "cleared %p %llx.%llx frag %x referral\n",
+ inode, ceph_vinop(inode), id);
frag->mds = -1;
frag->ndist = 0;
}
@@ -305,8 +470,9 @@ static int ceph_fill_dirfrag(struct inode *inode,
if (IS_ERR(frag)) {
/* this is not the end of the world; we can continue
with bad/inaccurate delegation info */
- pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
- ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
+ pr_err_client(cl, "ENOMEM on mds ref %p %llx.%llx fg %x\n",
+ inode, ceph_vinop(inode),
+ le32_to_cpu(dirinfo->frag));
err = -ENOMEM;
goto out;
}
@@ -315,8 +481,8 @@ static int ceph_fill_dirfrag(struct inode *inode,
frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
for (i = 0; i < frag->ndist; i++)
frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
- dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
- ceph_vinop(inode), frag->frag, frag->ndist);
+ doutc(cl, "%p %llx.%llx frag %x ndist=%d\n", inode,
+ ceph_vinop(inode), frag->frag, frag->ndist);
out:
mutex_unlock(&ci->i_fragtree_mutex);
@@ -344,6 +510,7 @@ static int ceph_fill_fragtree(struct inode *inode,
struct ceph_frag_tree_head *fragtree,
struct ceph_mds_reply_dirfrag *dirinfo)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_inode_frag *frag, *prev_frag = NULL;
struct rb_node *rb_node;
@@ -356,7 +523,7 @@ static int ceph_fill_fragtree(struct inode *inode,
if (nsplits != ci->i_fragtree_nsplits) {
update = true;
} else if (nsplits) {
- i = prandom_u32() % nsplits;
+ i = get_random_u32_below(nsplits);
id = le32_to_cpu(fragtree->splits[i].frag);
if (!__ceph_find_frag(ci, id))
update = true;
@@ -379,15 +546,15 @@ static int ceph_fill_fragtree(struct inode *inode,
frag_tree_split_cmp, NULL);
}
- dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
+ doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
rb_node = rb_first(&ci->i_fragtree);
for (i = 0; i < nsplits; i++) {
id = le32_to_cpu(fragtree->splits[i].frag);
split_by = le32_to_cpu(fragtree->splits[i].by);
if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
- pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
- "frag %x split by %d\n", ceph_vinop(inode),
- i, nsplits, id, split_by);
+ pr_err_client(cl, "%p %llx.%llx invalid split %d/%u, "
+ "frag %x split by %d\n", inode,
+ ceph_vinop(inode), i, nsplits, id, split_by);
continue;
}
frag = NULL;
@@ -419,7 +586,7 @@ static int ceph_fill_fragtree(struct inode *inode,
if (frag->split_by == 0)
ci->i_fragtree_nsplits++;
frag->split_by = split_by;
- dout(" frag %x split by %d\n", frag->frag, frag->split_by);
+ doutc(cl, " frag %x split by %d\n", frag->frag, frag->split_by);
prev_frag = frag;
}
while (rb_node) {
@@ -444,14 +611,18 @@ out_unlock:
*/
struct inode *ceph_alloc_inode(struct super_block *sb)
{
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
struct ceph_inode_info *ci;
int i;
- ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
+ ci = alloc_inode_sb(sb, ceph_inode_cachep, GFP_NOFS);
if (!ci)
return NULL;
- dout("alloc_inode %p\n", &ci->vfs_inode);
+ doutc(fsc->client, "%p\n", &ci->netfs.inode);
+
+ /* Set parameters for the netfs library */
+ netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
spin_lock_init(&ci->i_ceph_lock);
@@ -508,6 +679,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ci->i_truncate_seq = 0;
ci->i_truncate_size = 0;
ci->i_truncate_pending = 0;
+ ci->i_truncate_pagecache_size = 0;
ci->i_max_size = 0;
ci->i_reported_size = 0;
@@ -538,10 +710,12 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
INIT_WORK(&ci->i_work, ceph_inode_work);
ci->i_work_mask = 0;
memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
-
- ceph_fscache_inode_init(ci);
-
- return &ci->vfs_inode;
+#ifdef CONFIG_FS_ENCRYPTION
+ ci->i_crypt_info = NULL;
+ ci->fscrypt_auth = NULL;
+ ci->fscrypt_auth_len = 0;
+#endif
+ return &ci->netfs.inode;
}
void ceph_free_inode(struct inode *inode)
@@ -549,6 +723,10 @@ void ceph_free_inode(struct inode *inode)
struct ceph_inode_info *ci = ceph_inode(inode);
kfree(ci->i_symlink);
+#ifdef CONFIG_FS_ENCRYPTION
+ kfree(ci->fscrypt_auth);
+#endif
+ fscrypt_free_inode(inode);
kmem_cache_free(ceph_inode_cachep, ci);
}
@@ -556,21 +734,26 @@ void ceph_evict_inode(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_frag *frag;
struct rb_node *n;
- dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
+ doutc(cl, "%p ino %llx.%llx\n", inode, ceph_vinop(inode));
percpu_counter_dec(&mdsc->metric.total_inodes);
+ netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
+ if (inode_state_read_once(inode) & I_PINNING_NETFS_WB)
+ ceph_fscache_unuse_cookie(inode, true);
clear_inode(inode);
ceph_fscache_unregister_inode_cookie(ci);
+ fscrypt_put_encryption_info(inode);
__ceph_remove_caps(ci);
- if (__ceph_has_any_quota(ci))
+ if (__ceph_has_quota(ci, QUOTA_GET_ANY))
ceph_adjust_quota_realms_count(inode, false);
/*
@@ -579,8 +762,8 @@ void ceph_evict_inode(struct inode *inode)
*/
if (ci->i_snap_realm) {
if (ceph_snap(inode) == CEPH_NOSNAP) {
- dout(" dropping residual ref to snap realm %p\n",
- ci->i_snap_realm);
+ doutc(cl, " dropping residual ref to snap realm %p\n",
+ ci->i_snap_realm);
ceph_change_snap_realm(inode, NULL);
} else {
ceph_put_snapid_map(mdsc, ci->i_snapid_map);
@@ -621,29 +804,34 @@ static inline blkcnt_t calc_inode_blocks(u64 size)
int ceph_fill_file_size(struct inode *inode, int issued,
u32 truncate_seq, u64 truncate_size, u64 size)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
int queue_trunc = 0;
loff_t isize = i_size_read(inode);
if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
(truncate_seq == ci->i_truncate_seq && size > isize)) {
- dout("size %lld -> %llu\n", isize, size);
+ doutc(cl, "size %lld -> %llu\n", isize, size);
if (size > 0 && S_ISDIR(inode->i_mode)) {
- pr_err("fill_file_size non-zero size for directory\n");
+ pr_err_client(cl, "non-zero size for directory\n");
size = 0;
}
i_size_write(inode, size);
inode->i_blocks = calc_inode_blocks(size);
+ /*
+ * If we're expanding, then we should be able to just update
+ * the existing cookie.
+ */
+ if (size > isize)
+ ceph_fscache_update(inode);
ci->i_reported_size = size;
if (truncate_seq != ci->i_truncate_seq) {
- dout("truncate_seq %u -> %u\n",
- ci->i_truncate_seq, truncate_seq);
+ doutc(cl, "truncate_seq %u -> %u\n",
+ ci->i_truncate_seq, truncate_seq);
ci->i_truncate_seq = truncate_seq;
/* the MDS should have revoked these caps */
- WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
- CEPH_CAP_FILE_RD |
- CEPH_CAP_FILE_WR |
+ WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
CEPH_CAP_FILE_LAZYIO));
/*
* If we hold relevant caps, or in the case where we're
@@ -660,16 +848,28 @@ int ceph_fill_file_size(struct inode *inode, int issued,
}
}
}
- if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
- ci->i_truncate_size != truncate_size) {
- dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
- truncate_size);
- ci->i_truncate_size = truncate_size;
- }
- if (queue_trunc)
- ceph_fscache_invalidate(inode);
+ /*
+ * It's possible that the new sizes of the two consecutive
+ * size truncations will be in the same fscrypt last block,
+ * and we need to truncate the corresponding page caches
+ * anyway.
+ */
+ if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0) {
+ doutc(cl, "truncate_size %lld -> %llu, encrypted %d\n",
+ ci->i_truncate_size, truncate_size,
+ !!IS_ENCRYPTED(inode));
+
+ ci->i_truncate_size = truncate_size;
+ if (IS_ENCRYPTED(inode)) {
+ doutc(cl, "truncate_pagecache_size %lld -> %llu\n",
+ ci->i_truncate_pagecache_size, size);
+ ci->i_truncate_pagecache_size = size;
+ } else {
+ ci->i_truncate_pagecache_size = truncate_size;
+ }
+ }
return queue_trunc;
}
@@ -677,7 +877,11 @@ void ceph_fill_file_time(struct inode *inode, int issued,
u64 time_warp_seq, struct timespec64 *ctime,
struct timespec64 *mtime, struct timespec64 *atime)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct timespec64 iatime = inode_get_atime(inode);
+ struct timespec64 ictime = inode_get_ctime(inode);
+ struct timespec64 imtime = inode_get_mtime(inode);
int warn = 0;
if (issued & (CEPH_CAP_FILE_EXCL|
@@ -686,39 +890,28 @@ void ceph_fill_file_time(struct inode *inode, int issued,
CEPH_CAP_AUTH_EXCL|
CEPH_CAP_XATTR_EXCL)) {
if (ci->i_version == 0 ||
- timespec64_compare(ctime, &inode->i_ctime) > 0) {
- dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
- inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
- ctime->tv_sec, ctime->tv_nsec);
- inode->i_ctime = *ctime;
+ timespec64_compare(ctime, &ictime) > 0) {
+ doutc(cl, "ctime %ptSp -> %ptSp inc w/ cap\n", &ictime, ctime);
+ inode_set_ctime_to_ts(inode, *ctime);
}
if (ci->i_version == 0 ||
ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
/* the MDS did a utimes() */
- dout("mtime %lld.%09ld -> %lld.%09ld "
- "tw %d -> %d\n",
- inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
- mtime->tv_sec, mtime->tv_nsec,
- ci->i_time_warp_seq, (int)time_warp_seq);
-
- inode->i_mtime = *mtime;
- inode->i_atime = *atime;
+ doutc(cl, "mtime %ptSp -> %ptSp tw %d -> %d\n", &imtime, mtime,
+ ci->i_time_warp_seq, (int)time_warp_seq);
+
+ inode_set_mtime_to_ts(inode, *mtime);
+ inode_set_atime_to_ts(inode, *atime);
ci->i_time_warp_seq = time_warp_seq;
} else if (time_warp_seq == ci->i_time_warp_seq) {
/* nobody did utimes(); take the max */
- if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
- dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
- inode->i_mtime.tv_sec,
- inode->i_mtime.tv_nsec,
- mtime->tv_sec, mtime->tv_nsec);
- inode->i_mtime = *mtime;
+ if (timespec64_compare(mtime, &imtime) > 0) {
+ doutc(cl, "mtime %ptSp -> %ptSp inc\n", &imtime, mtime);
+ inode_set_mtime_to_ts(inode, *mtime);
}
- if (timespec64_compare(atime, &inode->i_atime) > 0) {
- dout("atime %lld.%09ld -> %lld.%09ld inc\n",
- inode->i_atime.tv_sec,
- inode->i_atime.tv_nsec,
- atime->tv_sec, atime->tv_nsec);
- inode->i_atime = *atime;
+ if (timespec64_compare(atime, &iatime) > 0) {
+ doutc(cl, "atime %ptSp -> %ptSp inc\n", &iatime, atime);
+ inode_set_atime_to_ts(inode, *atime);
}
} else if (issued & CEPH_CAP_FILE_EXCL) {
/* we did a utimes(); ignore mds values */
@@ -728,19 +921,53 @@ void ceph_fill_file_time(struct inode *inode, int issued,
} else {
/* we have no write|excl caps; whatever the MDS says is true */
if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
- inode->i_ctime = *ctime;
- inode->i_mtime = *mtime;
- inode->i_atime = *atime;
+ inode_set_ctime_to_ts(inode, *ctime);
+ inode_set_mtime_to_ts(inode, *mtime);
+ inode_set_atime_to_ts(inode, *atime);
ci->i_time_warp_seq = time_warp_seq;
} else {
warn = 1;
}
}
if (warn) /* time_warp_seq shouldn't go backwards */
- dout("%p mds time_warp_seq %llu < %u\n",
- inode, time_warp_seq, ci->i_time_warp_seq);
+ doutc(cl, "%p mds time_warp_seq %llu < %u\n", inode,
+ time_warp_seq, ci->i_time_warp_seq);
}
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
+ const char *encsym,
+ int enclen, u8 **decsym)
+{
+ struct ceph_client *cl = mdsc->fsc->client;
+ int declen;
+ u8 *sym;
+
+ sym = kmalloc(enclen + 1, GFP_NOFS);
+ if (!sym)
+ return -ENOMEM;
+
+ declen = base64_decode(encsym, enclen, sym, false, BASE64_IMAP);
+ if (declen < 0) {
+ pr_err_client(cl,
+ "can't decode symlink (%d). Content: %.*s\n",
+ declen, enclen, encsym);
+ kfree(sym);
+ return -EIO;
+ }
+ sym[declen + 1] = '\0';
+ *decsym = sym;
+ return declen;
+}
+#else
+static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
+ const char *encsym,
+ int symlen, u8 **decsym)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
/*
* Populate an inode based on info from mds. May be called on new or
* existing inodes.
@@ -752,6 +979,7 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
struct ceph_cap_reservation *caps_reservation)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_reply_inode *info = iinfo->in;
struct ceph_inode_info *ci = ceph_inode(inode);
int issued, new_issued, info_caps;
@@ -770,25 +998,26 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
lockdep_assert_held(&mdsc->snap_rwsem);
- dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
- inode, ceph_vinop(inode), le64_to_cpu(info->version),
- ci->i_version);
+ doutc(cl, "%p ino %llx.%llx v %llu had %llu\n", inode, ceph_vinop(inode),
+ le64_to_cpu(info->version), ci->i_version);
/* Once I_NEW is cleared, we can't change type or dev numbers */
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
inode->i_mode = mode;
} else {
if (inode_wrong_type(inode, mode)) {
- pr_warn_once("inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
- ceph_vinop(inode), inode->i_mode, mode);
+ pr_warn_once_client(cl,
+ "inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
+ ceph_vinop(inode), inode->i_mode, mode);
return -ESTALE;
}
if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
- pr_warn_once("dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
- ceph_vinop(inode), MAJOR(inode->i_rdev),
- MINOR(inode->i_rdev), MAJOR(rdev),
- MINOR(rdev));
+ pr_warn_once_client(cl,
+ "dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
+ ceph_vinop(inode), MAJOR(inode->i_rdev),
+ MINOR(inode->i_rdev), MAJOR(rdev),
+ MINOR(rdev));
return -ESTALE;
}
}
@@ -810,8 +1039,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
if (iinfo->xattr_len > 4) {
xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
if (!xattr_blob)
- pr_err("%s ENOMEM xattr blob %d bytes\n", __func__,
- iinfo->xattr_len);
+ pr_err_client(cl, "ENOMEM xattr blob %d bytes\n",
+ iinfo->xattr_len);
}
if (iinfo->pool_ns_len > 0)
@@ -846,27 +1075,42 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
issued |= __ceph_caps_dirty(ci);
new_issued = ~issued & info_caps;
- /* directories have fl_stripe_unit set to zero */
- if (le32_to_cpu(info->layout.fl_stripe_unit))
- inode->i_blkbits =
- fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
- else
- inode->i_blkbits = CEPH_BLOCK_SHIFT;
-
__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
+#ifdef CONFIG_FS_ENCRYPTION
+ if (iinfo->fscrypt_auth_len &&
+ ((inode_state_read_once(inode) & I_NEW) || (ci->fscrypt_auth_len == 0))) {
+ kfree(ci->fscrypt_auth);
+ ci->fscrypt_auth_len = iinfo->fscrypt_auth_len;
+ ci->fscrypt_auth = iinfo->fscrypt_auth;
+ iinfo->fscrypt_auth = NULL;
+ iinfo->fscrypt_auth_len = 0;
+ inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
+ }
+#endif
+
if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
(issued & CEPH_CAP_AUTH_EXCL) == 0) {
inode->i_mode = mode;
inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
- dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
- from_kuid(&init_user_ns, inode->i_uid),
- from_kgid(&init_user_ns, inode->i_gid));
+ doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode,
+ ceph_vinop(inode), inode->i_mode,
+ from_kuid(&init_user_ns, inode->i_uid),
+ from_kgid(&init_user_ns, inode->i_gid));
ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
}
+ /* directories have fl_stripe_unit set to zero */
+ if (IS_ENCRYPTED(inode))
+ inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
+ else if (le32_to_cpu(info->layout.fl_stripe_unit))
+ inode->i_blkbits =
+ fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+ else
+ inode->i_blkbits = CEPH_BLOCK_SHIFT;
+
if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
(issued & CEPH_CAP_LINK_EXCL) == 0)
set_nlink(inode, le32_to_cpu(info->nlink));
@@ -888,6 +1132,7 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
if (new_version ||
(new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
+ u64 size = le64_to_cpu(info->size);
s64 old_pool = ci->i_layout.pool_id;
struct ceph_string *old_ns;
@@ -901,15 +1146,28 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
pool_ns = old_ns;
+ if (IS_ENCRYPTED(inode) && size &&
+ iinfo->fscrypt_file_len == sizeof(__le64)) {
+ u64 fsize = __le64_to_cpu(*(__le64 *)iinfo->fscrypt_file);
+
+ if (size == round_up(fsize, CEPH_FSCRYPT_BLOCK_SIZE)) {
+ size = fsize;
+ } else {
+ pr_warn_client(cl,
+ "fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
+ info->size, size);
+ }
+ }
+
queue_trunc = ceph_fill_file_size(inode, issued,
le32_to_cpu(info->truncate_seq),
le64_to_cpu(info->truncate_size),
- le64_to_cpu(info->size));
+ size);
/* only update max_size on auth cap */
if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
ci->i_max_size != le64_to_cpu(info->max_size)) {
- dout("max_size %lld -> %llu\n", ci->i_max_size,
- le64_to_cpu(info->max_size));
+ doutc(cl, "max_size %lld -> %llu\n",
+ ci->i_max_size, le64_to_cpu(info->max_size));
ci->i_max_size = le64_to_cpu(info->max_size);
}
}
@@ -964,26 +1222,45 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
inode->i_fop = &ceph_file_fops;
break;
case S_IFLNK:
- inode->i_op = &ceph_symlink_iops;
if (!ci->i_symlink) {
u32 symlen = iinfo->symlink_len;
char *sym;
spin_unlock(&ci->i_ceph_lock);
- if (symlen != i_size_read(inode)) {
- pr_err("%s %llx.%llx BAD symlink "
- "size %lld\n", __func__,
- ceph_vinop(inode),
- i_size_read(inode));
+ if (IS_ENCRYPTED(inode)) {
+ if (symlen != i_size_read(inode))
+ pr_err_client(cl,
+ "%p %llx.%llx BAD symlink size %lld\n",
+ inode, ceph_vinop(inode),
+ i_size_read(inode));
+
+ err = decode_encrypted_symlink(mdsc, iinfo->symlink,
+ symlen, (u8 **)&sym);
+ if (err < 0) {
+ pr_err_client(cl,
+ "decoding encrypted symlink failed: %d\n",
+ err);
+ goto out;
+ }
+ symlen = err;
i_size_write(inode, symlen);
inode->i_blocks = calc_inode_blocks(symlen);
- }
+ } else {
+ if (symlen != i_size_read(inode)) {
+ pr_err_client(cl,
+ "%p %llx.%llx BAD symlink size %lld\n",
+ inode, ceph_vinop(inode),
+ i_size_read(inode));
+ i_size_write(inode, symlen);
+ inode->i_blocks = calc_inode_blocks(symlen);
+ }
- err = -ENOMEM;
- sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
- if (!sym)
- goto out;
+ err = -ENOMEM;
+ sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
+ if (!sym)
+ goto out;
+ }
spin_lock(&ci->i_ceph_lock);
if (!ci->i_symlink)
@@ -991,15 +1268,25 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
else
kfree(sym); /* lost a race */
}
- inode->i_link = ci->i_symlink;
+
+ if (IS_ENCRYPTED(inode)) {
+ /*
+ * Encrypted symlinks need to be decrypted before we can
+ * cache their targets in i_link. Don't touch it here.
+ */
+ inode->i_op = &ceph_encrypted_symlink_iops;
+ } else {
+ inode->i_link = ci->i_symlink;
+ inode->i_op = &ceph_symlink_iops;
+ }
break;
case S_IFDIR:
inode->i_op = &ceph_dir_iops;
inode->i_fop = &ceph_dir_fops;
break;
default:
- pr_err("%s %llx.%llx BAD mode 0%o\n", __func__,
- ceph_vinop(inode), inode->i_mode);
+ pr_err_client(cl, "%p %llx.%llx BAD mode 0%o\n", inode,
+ ceph_vinop(inode), inode->i_mode);
}
/* were we issued a capability? */
@@ -1020,7 +1307,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
(info_caps & CEPH_CAP_FILE_SHARED) &&
(issued & CEPH_CAP_FILE_EXCL) == 0 &&
!__ceph_dir_is_complete(ci)) {
- dout(" marking %p complete (empty)\n", inode);
+ doutc(cl, " marking %p complete (empty)\n",
+ inode);
i_size_write(inode, 0);
__ceph_dir_set_complete(ci,
atomic64_read(&ci->i_release_count),
@@ -1029,8 +1317,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
wake = true;
} else {
- dout(" %p got snap_caps %s\n", inode,
- ceph_cap_string(info_caps));
+ doutc(cl, " %p got snap_caps %s\n", inode,
+ ceph_cap_string(info_caps));
ci->i_snap_caps |= info_caps;
}
}
@@ -1039,20 +1327,22 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
iinfo->inline_version >= ci->i_inline_version) {
int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
ci->i_inline_version = iinfo->inline_version;
- if (ci->i_inline_version != CEPH_INLINE_NONE &&
+ if (ceph_has_inline_data(ci) &&
(locked_page || (info_caps & cache_caps)))
fill_inline = true;
}
if (cap_fmode >= 0) {
if (!info_caps)
- pr_warn("mds issued no caps on %llx.%llx\n",
- ceph_vinop(inode));
+ pr_warn_client(cl, "mds issued no caps on %llx.%llx\n",
+ ceph_vinop(inode));
__ceph_touch_fmode(ci, mdsc, cap_fmode);
}
spin_unlock(&ci->i_ceph_lock);
+ ceph_fscache_register_inode_cookie(inode);
+
if (fill_inline)
ceph_fill_inline_data(inode, locked_page,
iinfo->inline_data, iinfo->inline_len);
@@ -1091,14 +1381,14 @@ static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
unsigned long from_time,
struct ceph_mds_session **old_lease_session)
{
+ struct ceph_client *cl = ceph_inode_to_client(dir);
struct ceph_dentry_info *di = ceph_dentry(dentry);
unsigned mask = le16_to_cpu(lease->mask);
long unsigned duration = le32_to_cpu(lease->duration_ms);
long unsigned ttl = from_time + (duration * HZ) / 1000;
long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
- dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
- dentry, duration, ttl);
+ doutc(cl, "%p duration %lu ms ttl %lu\n", dentry, duration, ttl);
/* only track leases on regular dentries */
if (ceph_snap(dir) != CEPH_NOSNAP)
@@ -1195,10 +1485,11 @@ out_unlock:
/*
* splice a dentry to an inode.
- * caller must hold directory i_mutex for this to be safe.
+ * caller must hold directory i_rwsem for this to be safe.
*/
static int splice_dentry(struct dentry **pdn, struct inode *in)
{
+ struct ceph_client *cl = ceph_inode_to_client(in);
struct dentry *dn = *pdn;
struct dentry *realdn;
@@ -1230,23 +1521,21 @@ static int splice_dentry(struct dentry **pdn, struct inode *in)
d_drop(dn);
realdn = d_splice_alias(in, dn);
if (IS_ERR(realdn)) {
- pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
- PTR_ERR(realdn), dn, in, ceph_vinop(in));
+ pr_err_client(cl, "error %ld %p inode %p ino %llx.%llx\n",
+ PTR_ERR(realdn), dn, in, ceph_vinop(in));
return PTR_ERR(realdn);
}
if (realdn) {
- dout("dn %p (%d) spliced with %p (%d) "
- "inode %p ino %llx.%llx\n",
- dn, d_count(dn),
- realdn, d_count(realdn),
- d_inode(realdn), ceph_vinop(d_inode(realdn)));
+ doutc(cl, "dn %p (%d) spliced with %p (%d) inode %p ino %llx.%llx\n",
+ dn, d_count(dn), realdn, d_count(realdn),
+ d_inode(realdn), ceph_vinop(d_inode(realdn)));
dput(dn);
*pdn = realdn;
} else {
BUG_ON(!ceph_dentry(dn));
- dout("dn %p attached to %p ino %llx.%llx\n",
- dn, d_inode(dn), ceph_vinop(d_inode(dn)));
+ doutc(cl, "dn %p attached to %p ino %llx.%llx\n", dn,
+ d_inode(dn), ceph_vinop(d_inode(dn)));
}
return 0;
}
@@ -1268,24 +1557,33 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
struct inode *in = NULL;
struct ceph_vino tvino, dvino;
- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+ struct ceph_client *cl = fsc->client;
+ struct inode *parent_dir = NULL;
int err = 0;
- dout("fill_trace %p is_dentry %d is_target %d\n", req,
- rinfo->head->is_dentry, rinfo->head->is_target);
+ doutc(cl, "%p is_dentry %d is_target %d\n", req,
+ rinfo->head->is_dentry, rinfo->head->is_target);
if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
- dout("fill_trace reply is empty!\n");
+ doutc(cl, "reply is empty!\n");
if (rinfo->head->result == 0 && req->r_parent)
ceph_invalidate_dir_request(req);
return 0;
}
if (rinfo->head->is_dentry) {
- struct inode *dir = req->r_parent;
-
- if (dir) {
- err = ceph_fill_inode(dir, NULL, &rinfo->diri,
+ /*
+ * r_parent may be stale, in cases when R_PARENT_LOCKED is not set,
+ * so we need to get the correct inode
+ */
+ parent_dir = ceph_get_reply_dir(sb, req->r_parent, rinfo);
+ if (unlikely(IS_ERR(parent_dir))) {
+ err = PTR_ERR(parent_dir);
+ goto done;
+ }
+ if (parent_dir) {
+ err = ceph_fill_inode(parent_dir, NULL, &rinfo->diri,
rinfo->dirfrag, session, -1,
&req->r_caps_reservation);
if (err < 0)
@@ -1294,48 +1592,74 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
WARN_ON_ONCE(1);
}
- if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
+ if (parent_dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
+ bool is_nokey = false;
struct qstr dname;
struct dentry *dn, *parent;
+ struct fscrypt_str oname = FSTR_INIT(NULL, 0);
+ struct ceph_fname fname = { .dir = parent_dir,
+ .name = rinfo->dname,
+ .ctext = rinfo->altname,
+ .name_len = rinfo->dname_len,
+ .ctext_len = rinfo->altname_len };
BUG_ON(!rinfo->head->is_target);
BUG_ON(req->r_dentry);
- parent = d_find_any_alias(dir);
+ parent = d_find_any_alias(parent_dir);
BUG_ON(!parent);
- dname.name = rinfo->dname;
- dname.len = rinfo->dname_len;
+ err = ceph_fname_alloc_buffer(parent_dir, &oname);
+ if (err < 0) {
+ dput(parent);
+ goto done;
+ }
+
+ err = ceph_fname_to_usr(&fname, NULL, &oname, &is_nokey);
+ if (err < 0) {
+ dput(parent);
+ ceph_fname_free_buffer(parent_dir, &oname);
+ goto done;
+ }
+ dname.name = oname.name;
+ dname.len = oname.len;
dname.hash = full_name_hash(parent, dname.name, dname.len);
tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
retry_lookup:
dn = d_lookup(parent, &dname);
- dout("d_lookup on parent=%p name=%.*s got %p\n",
- parent, dname.len, dname.name, dn);
+ doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
+ parent, dname.len, dname.name, dn);
if (!dn) {
dn = d_alloc(parent, &dname);
- dout("d_alloc %p '%.*s' = %p\n", parent,
- dname.len, dname.name, dn);
+ doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
+ dname.len, dname.name, dn);
if (!dn) {
dput(parent);
+ ceph_fname_free_buffer(parent_dir, &oname);
err = -ENOMEM;
goto done;
}
+ if (is_nokey) {
+ spin_lock(&dn->d_lock);
+ dn->d_flags |= DCACHE_NOKEY_NAME;
+ spin_unlock(&dn->d_lock);
+ }
err = 0;
} else if (d_really_is_positive(dn) &&
(ceph_ino(d_inode(dn)) != tvino.ino ||
ceph_snap(d_inode(dn)) != tvino.snap)) {
- dout(" dn %p points to wrong inode %p\n",
- dn, d_inode(dn));
- ceph_dir_clear_ordered(dir);
+ doutc(cl, " dn %p points to wrong inode %p\n",
+ dn, d_inode(dn));
+ ceph_dir_clear_ordered(parent_dir);
d_delete(dn);
dput(dn);
goto retry_lookup;
}
+ ceph_fname_free_buffer(parent_dir, &oname);
req->r_dentry = dn;
dput(parent);
@@ -1354,16 +1678,16 @@ retry_lookup:
rinfo->head->result == 0) ? req->r_fmode : -1,
&req->r_caps_reservation);
if (err < 0) {
- pr_err("ceph_fill_inode badness %p %llx.%llx\n",
- in, ceph_vinop(in));
+ pr_err_client(cl, "badness %p %llx.%llx\n", in,
+ ceph_vinop(in));
req->r_target_inode = NULL;
- if (in->i_state & I_NEW)
+ if (inode_state_read_once(in) & I_NEW)
discard_new_inode(in);
else
iput(in);
goto done;
}
- if (in->i_state & I_NEW)
+ if (inode_state_read_once(in) & I_NEW)
unlock_new_inode(in);
}
@@ -1405,36 +1729,32 @@ retry_lookup:
have_lease = have_dir_cap ||
le32_to_cpu(rinfo->dlease->duration_ms);
if (!have_lease)
- dout("fill_trace no dentry lease or dir cap\n");
+ doutc(cl, "no dentry lease or dir cap\n");
/* rename? */
if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
struct inode *olddir = req->r_old_dentry_dir;
BUG_ON(!olddir);
- dout(" src %p '%pd' dst %p '%pd'\n",
- req->r_old_dentry,
- req->r_old_dentry,
- dn, dn);
- dout("fill_trace doing d_move %p -> %p\n",
- req->r_old_dentry, dn);
+ doutc(cl, " src %p '%pd' dst %p '%pd'\n",
+ req->r_old_dentry, req->r_old_dentry, dn, dn);
+ doutc(cl, "doing d_move %p -> %p\n", req->r_old_dentry, dn);
/* d_move screws up sibling dentries' offsets */
ceph_dir_clear_ordered(dir);
ceph_dir_clear_ordered(olddir);
d_move(req->r_old_dentry, dn);
- dout(" src %p '%pd' dst %p '%pd'\n",
- req->r_old_dentry,
- req->r_old_dentry,
- dn, dn);
+ doutc(cl, " src %p '%pd' dst %p '%pd'\n",
+ req->r_old_dentry, req->r_old_dentry, dn, dn);
/* ensure target dentry is invalidated, despite
rehashing bug in vfs_rename_dir */
ceph_invalidate_dentry_lease(dn);
- dout("dn %p gets new offset %lld\n", req->r_old_dentry,
- ceph_dentry(req->r_old_dentry)->offset);
+ doutc(cl, "dn %p gets new offset %lld\n",
+ req->r_old_dentry,
+ ceph_dentry(req->r_old_dentry)->offset);
/* swap r_dentry and r_old_dentry in case that
* splice_dentry() gets called later. This is safe
@@ -1446,18 +1766,25 @@ retry_lookup:
/* null dentry? */
if (!rinfo->head->is_target) {
- dout("fill_trace null dentry\n");
+ doutc(cl, "null dentry\n");
if (d_really_is_positive(dn)) {
- dout("d_delete %p\n", dn);
+ doutc(cl, "d_delete %p\n", dn);
ceph_dir_clear_ordered(dir);
d_delete(dn);
} else if (have_lease) {
if (d_unhashed(dn))
d_add(dn, NULL);
+ }
+
+ if (!d_unhashed(dn) && have_lease)
update_dentry_lease(dir, dn,
rinfo->dlease, session,
req->r_request_started);
- }
+ goto done;
+ }
+
+ if (unlikely(!in)) {
+ err = -EINVAL;
goto done;
}
@@ -1470,9 +1797,9 @@ retry_lookup:
goto done;
dn = req->r_dentry; /* may have spliced */
} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
- dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
- dn, d_inode(dn), ceph_vinop(d_inode(dn)),
- ceph_vinop(in));
+ doutc(cl, " %p links to %p %llx.%llx, not %llx.%llx\n",
+ dn, d_inode(dn), ceph_vinop(d_inode(dn)),
+ ceph_vinop(in));
d_invalidate(dn);
have_lease = false;
}
@@ -1482,7 +1809,7 @@ retry_lookup:
rinfo->dlease, session,
req->r_request_started);
}
- dout(" final dn %p\n", dn);
+ doutc(cl, " final dn %p\n", dn);
} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
req->r_op == CEPH_MDS_OP_MKSNAP) &&
test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
@@ -1493,14 +1820,21 @@ retry_lookup:
BUG_ON(!dir);
BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
BUG_ON(!req->r_dentry);
- dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
+ doutc(cl, " linking snapped dir %p to dn %p\n", in,
+ req->r_dentry);
ceph_dir_clear_ordered(dir);
+
+ if (unlikely(!in)) {
+ err = -EINVAL;
+ goto done;
+ }
+
ihold(in);
err = splice_dentry(&req->r_dentry, in);
if (err < 0)
goto done;
} else if (rinfo->head->is_dentry && req->r_dentry) {
- /* parent inode is not locked, be carefull */
+ /* parent inode is not locked, be careful */
struct ceph_vino *ptvino = NULL;
dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
@@ -1515,7 +1849,10 @@ retry_lookup:
&dvino, ptvino);
}
done:
- dout("fill_trace done err=%d\n", err);
+ /* Drop extra ref from ceph_get_reply_dir() if it returned a new inode */
+ if (unlikely(!IS_ERR_OR_NULL(parent_dir) && parent_dir != req->r_parent))
+ iput(parent_dir);
+ doutc(cl, "done err=%d\n", err);
return err;
}
@@ -1526,6 +1863,7 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
struct ceph_mds_session *session)
{
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
int i, err = 0;
for (i = 0; i < rinfo->dir_nr; i++) {
@@ -1537,23 +1875,23 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
vino.ino = le64_to_cpu(rde->inode.in->ino);
vino.snap = le64_to_cpu(rde->inode.in->snapid);
- in = ceph_get_inode(req->r_dentry->d_sb, vino);
+ in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL);
if (IS_ERR(in)) {
err = PTR_ERR(in);
- dout("new_inode badness got %d\n", err);
+ doutc(cl, "badness got %d\n", err);
continue;
}
rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
-1, &req->r_caps_reservation);
if (rc < 0) {
- pr_err("ceph_fill_inode badness on %p got %d\n",
- in, rc);
+ pr_err_client(cl, "inode badness on %p got %d\n", in,
+ rc);
err = rc;
- if (in->i_state & I_NEW) {
+ if (inode_state_read_once(in) & I_NEW) {
ihold(in);
discard_new_inode(in);
}
- } else if (in->i_state & I_NEW) {
+ } else if (inode_state_read_once(in) & I_NEW) {
unlock_new_inode(in);
}
@@ -1565,10 +1903,9 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
{
- if (ctl->page) {
- kunmap(ctl->page);
- put_page(ctl->page);
- ctl->page = NULL;
+ if (ctl->folio) {
+ folio_release_kmap(ctl->folio, ctl->dentries);
+ ctl->folio = NULL;
}
}
@@ -1576,36 +1913,43 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
struct ceph_readdir_cache_control *ctl,
struct ceph_mds_request *req)
{
+ struct ceph_client *cl = ceph_inode_to_client(dir);
struct ceph_inode_info *ci = ceph_inode(dir);
unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
unsigned idx = ctl->index % nsize;
pgoff_t pgoff = ctl->index / nsize;
- if (!ctl->page || pgoff != page_index(ctl->page)) {
+ if (!ctl->folio || pgoff != ctl->folio->index) {
ceph_readdir_cache_release(ctl);
+ fgf_t fgf = FGP_LOCK;
+
if (idx == 0)
- ctl->page = grab_cache_page(&dir->i_data, pgoff);
- else
- ctl->page = find_lock_page(&dir->i_data, pgoff);
- if (!ctl->page) {
+ fgf |= FGP_ACCESSED | FGP_CREAT;
+
+ ctl->folio = __filemap_get_folio(&dir->i_data, pgoff,
+ fgf, mapping_gfp_mask(&dir->i_data));
+ if (IS_ERR(ctl->folio)) {
+ int err = PTR_ERR(ctl->folio);
+
+ ctl->folio = NULL;
ctl->index = -1;
- return idx == 0 ? -ENOMEM : 0;
+ return idx == 0 ? err : 0;
}
/* reading/filling the cache are serialized by
- * i_mutex, no need to use page lock */
- unlock_page(ctl->page);
- ctl->dentries = kmap(ctl->page);
+ * i_rwsem, no need to use folio lock */
+ folio_unlock(ctl->folio);
+ ctl->dentries = kmap_local_folio(ctl->folio, 0);
if (idx == 0)
memset(ctl->dentries, 0, PAGE_SIZE);
}
if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
- dout("readdir cache dn %p idx %d\n", dn, ctl->index);
+ doutc(cl, "dn %p idx %d\n", dn, ctl->index);
ctl->dentries[idx] = dn;
ctl->index++;
} else {
- dout("disable readdir cache\n");
+ doutc(cl, "disable readdir cache\n");
ctl->index = -1;
}
return 0;
@@ -1615,8 +1959,10 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
struct ceph_mds_session *session)
{
struct dentry *parent = req->r_dentry;
- struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
+ struct inode *inode = d_inode(parent);
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
struct qstr dname;
struct dentry *dn;
struct inode *in;
@@ -1644,19 +1990,18 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
if (rinfo->dir_dir &&
le32_to_cpu(rinfo->dir_dir->frag) != frag) {
- dout("readdir_prepopulate got new frag %x -> %x\n",
- frag, le32_to_cpu(rinfo->dir_dir->frag));
+ doutc(cl, "got new frag %x -> %x\n", frag,
+ le32_to_cpu(rinfo->dir_dir->frag));
frag = le32_to_cpu(rinfo->dir_dir->frag);
if (!rinfo->hash_order)
req->r_readdir_offset = 2;
}
if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
- dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
- rinfo->dir_nr, parent);
+ doutc(cl, "%d items under SNAPDIR dn %p\n",
+ rinfo->dir_nr, parent);
} else {
- dout("readdir_prepopulate %d items under dn %p\n",
- rinfo->dir_nr, parent);
+ doutc(cl, "%d items under dn %p\n", rinfo->dir_nr, parent);
if (rinfo->dir_dir)
ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
@@ -1689,9 +2034,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
tvino.snap = le64_to_cpu(rde->inode.in->snapid);
if (rinfo->hash_order) {
- u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
- rde->name, rde->name_len);
- hash = ceph_frag_value(hash);
+ u32 hash = ceph_frag_value(rde->raw_hash);
if (hash != last_hash)
fpos_offset = 2;
last_hash = hash;
@@ -1702,24 +2045,29 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
retry_lookup:
dn = d_lookup(parent, &dname);
- dout("d_lookup on parent=%p name=%.*s got %p\n",
- parent, dname.len, dname.name, dn);
+ doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
+ parent, dname.len, dname.name, dn);
if (!dn) {
dn = d_alloc(parent, &dname);
- dout("d_alloc %p '%.*s' = %p\n", parent,
- dname.len, dname.name, dn);
+ doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
+ dname.len, dname.name, dn);
if (!dn) {
- dout("d_alloc badness\n");
+ doutc(cl, "d_alloc badness\n");
err = -ENOMEM;
goto out;
}
+ if (rde->is_nokey) {
+ spin_lock(&dn->d_lock);
+ dn->d_flags |= DCACHE_NOKEY_NAME;
+ spin_unlock(&dn->d_lock);
+ }
} else if (d_really_is_positive(dn) &&
(ceph_ino(d_inode(dn)) != tvino.ino ||
ceph_snap(d_inode(dn)) != tvino.snap)) {
struct ceph_dentry_info *di = ceph_dentry(dn);
- dout(" dn %p points to wrong inode %p\n",
- dn, d_inode(dn));
+ doutc(cl, " dn %p points to wrong inode %p\n",
+ dn, d_inode(dn));
spin_lock(&dn->d_lock);
if (di->offset > 0 &&
@@ -1739,9 +2087,9 @@ retry_lookup:
if (d_really_is_positive(dn)) {
in = d_inode(dn);
} else {
- in = ceph_get_inode(parent->d_sb, tvino);
+ in = ceph_get_inode(parent->d_sb, tvino, NULL);
if (IS_ERR(in)) {
- dout("new_inode badness\n");
+ doutc(cl, "new_inode badness\n");
d_drop(dn);
dput(dn);
err = PTR_ERR(in);
@@ -1752,9 +2100,10 @@ retry_lookup:
ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
-1, &req->r_caps_reservation);
if (ret < 0) {
- pr_err("ceph_fill_inode badness on %p\n", in);
+ pr_err_client(cl, "badness on %p %llx.%llx\n", in,
+ ceph_vinop(in));
if (d_really_is_negative(dn)) {
- if (in->i_state & I_NEW) {
+ if (inode_state_read_once(in) & I_NEW) {
ihold(in);
discard_new_inode(in);
}
@@ -1764,13 +2113,13 @@ retry_lookup:
err = ret;
goto next_item;
}
- if (in->i_state & I_NEW)
+ if (inode_state_read_once(in) & I_NEW)
unlock_new_inode(in);
if (d_really_is_negative(dn)) {
if (ceph_security_xattr_deadlock(in)) {
- dout(" skip splicing dn %p to inode %p"
- " (security xattr deadlock)\n", dn, in);
+ doutc(cl, " skip splicing dn %p to inode %p"
+ " (security xattr deadlock)\n", dn, in);
iput(in);
skipped++;
goto next_item;
@@ -1802,54 +2151,62 @@ out:
req->r_readdir_cache_idx = cache_ctl.index;
}
ceph_readdir_cache_release(&cache_ctl);
- dout("readdir_prepopulate done\n");
+ doutc(cl, "done\n");
return err;
}
bool ceph_inode_set_size(struct inode *inode, loff_t size)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
bool ret;
spin_lock(&ci->i_ceph_lock);
- dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
+ doutc(cl, "set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
i_size_write(inode, size);
+ ceph_fscache_update(inode);
inode->i_blocks = calc_inode_blocks(size);
ret = __ceph_should_report_size(ci);
spin_unlock(&ci->i_ceph_lock);
+
return ret;
}
void ceph_queue_inode_work(struct inode *inode, int work_bit)
{
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
struct ceph_inode_info *ci = ceph_inode(inode);
set_bit(work_bit, &ci->i_work_mask);
ihold(inode);
if (queue_work(fsc->inode_wq, &ci->i_work)) {
- dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask);
+ doutc(cl, "%p %llx.%llx mask=%lx\n", inode,
+ ceph_vinop(inode), ci->i_work_mask);
} else {
- dout("queue_inode_work %p already queued, mask=%lx\n",
- inode, ci->i_work_mask);
+ doutc(cl, "%p %llx.%llx already queued, mask=%lx\n",
+ inode, ceph_vinop(inode), ci->i_work_mask);
iput(inode);
}
}
static void ceph_do_invalidate_pages(struct inode *inode)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
u32 orig_gen;
int check = 0;
+ ceph_fscache_invalidate(inode, false);
+
mutex_lock(&ci->i_truncate_mutex);
- if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
- pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
- inode, ceph_ino(inode));
+ if (ceph_inode_is_shutdown(inode)) {
+ pr_warn_ratelimited_client(cl,
+ "%p %llx.%llx is shut down\n", inode,
+ ceph_vinop(inode));
mapping_set_error(inode->i_mapping, -EIO);
truncate_pagecache(inode, 0);
mutex_unlock(&ci->i_truncate_mutex);
@@ -1857,8 +2214,8 @@ static void ceph_do_invalidate_pages(struct inode *inode)
}
spin_lock(&ci->i_ceph_lock);
- dout("invalidate_pages %p gen %d revoking %d\n", inode,
- ci->i_rdcache_gen, ci->i_rdcache_revoking);
+ doutc(cl, "%p %llx.%llx gen %d revoking %d\n", inode,
+ ceph_vinop(inode), ci->i_rdcache_gen, ci->i_rdcache_revoking);
if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
check = 1;
@@ -1869,22 +2226,22 @@ static void ceph_do_invalidate_pages(struct inode *inode)
orig_gen = ci->i_rdcache_gen;
spin_unlock(&ci->i_ceph_lock);
- ceph_fscache_invalidate(inode);
if (invalidate_inode_pages2(inode->i_mapping) < 0) {
- pr_err("invalidate_pages %p fails\n", inode);
+ pr_err_client(cl, "invalidate_inode_pages2 %llx.%llx failed\n",
+ ceph_vinop(inode));
}
spin_lock(&ci->i_ceph_lock);
if (orig_gen == ci->i_rdcache_gen &&
orig_gen == ci->i_rdcache_revoking) {
- dout("invalidate_pages %p gen %d successful\n", inode,
- ci->i_rdcache_gen);
+ doutc(cl, "%p %llx.%llx gen %d successful\n", inode,
+ ceph_vinop(inode), ci->i_rdcache_gen);
ci->i_rdcache_revoking--;
check = 1;
} else {
- dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
- inode, orig_gen, ci->i_rdcache_gen,
- ci->i_rdcache_revoking);
+ doutc(cl, "%p %llx.%llx gen %d raced, now %d revoking %d\n",
+ inode, ceph_vinop(inode), orig_gen, ci->i_rdcache_gen,
+ ci->i_rdcache_revoking);
if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
check = 1;
}
@@ -1892,7 +2249,7 @@ static void ceph_do_invalidate_pages(struct inode *inode)
mutex_unlock(&ci->i_truncate_mutex);
out:
if (check)
- ceph_check_caps(ci, 0, NULL);
+ ceph_check_caps(ci, 0);
}
/*
@@ -1901,6 +2258,7 @@ out:
*/
void __ceph_do_pending_vmtruncate(struct inode *inode)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
u64 to;
int wrbuffer_refs, finish = 0;
@@ -1909,7 +2267,8 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
retry:
spin_lock(&ci->i_ceph_lock);
if (ci->i_truncate_pending == 0) {
- dout("__do_pending_vmtruncate %p none pending\n", inode);
+ doutc(cl, "%p %llx.%llx none pending\n", inode,
+ ceph_vinop(inode));
spin_unlock(&ci->i_ceph_lock);
mutex_unlock(&ci->i_truncate_mutex);
return;
@@ -1921,8 +2280,8 @@ retry:
*/
if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
spin_unlock(&ci->i_ceph_lock);
- dout("__do_pending_vmtruncate %p flushing snaps first\n",
- inode);
+ doutc(cl, "%p %llx.%llx flushing snaps first\n", inode,
+ ceph_vinop(inode));
filemap_write_and_wait_range(&inode->i_data, 0,
inode->i_sb->s_maxbytes);
goto retry;
@@ -1931,16 +2290,17 @@ retry:
/* there should be no reader or writer */
WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
- to = ci->i_truncate_size;
+ to = ci->i_truncate_pagecache_size;
wrbuffer_refs = ci->i_wrbuffer_ref;
- dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
- ci->i_truncate_pending, to);
+ doutc(cl, "%p %llx.%llx (%d) to %lld\n", inode, ceph_vinop(inode),
+ ci->i_truncate_pending, to);
spin_unlock(&ci->i_ceph_lock);
+ ceph_fscache_resize(inode, to);
truncate_pagecache(inode, to);
spin_lock(&ci->i_ceph_lock);
- if (to == ci->i_truncate_size) {
+ if (to == ci->i_truncate_pagecache_size) {
ci->i_truncate_pending = 0;
finish = 1;
}
@@ -1951,7 +2311,7 @@ retry:
mutex_unlock(&ci->i_truncate_mutex);
if (wrbuffer_refs == 0)
- ceph_check_caps(ci, 0, NULL);
+ ceph_check_caps(ci, 0);
wake_up_all(&ci->i_cap_wq);
}
@@ -1960,10 +2320,11 @@ static void ceph_inode_work(struct work_struct *work)
{
struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
i_work);
- struct inode *inode = &ci->vfs_inode;
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
- dout("writeback %p\n", inode);
+ doutc(cl, "writeback %p %llx.%llx\n", inode, ceph_vinop(inode));
filemap_fdatawrite(&inode->i_data);
}
if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
@@ -1973,7 +2334,7 @@ static void ceph_inode_work(struct work_struct *work)
__ceph_do_pending_vmtruncate(inode);
if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
- ceph_check_caps(ci, 0, NULL);
+ ceph_check_caps(ci, 0);
if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
ceph_flush_snaps(ci, NULL);
@@ -1981,6 +2342,32 @@ static void ceph_inode_work(struct work_struct *work)
iput(inode);
}
+static const char *ceph_encrypted_get_link(struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+
+ return fscrypt_get_symlink(inode, ci->i_symlink, i_size_read(inode),
+ done);
+}
+
+static int ceph_encrypted_symlink_getattr(struct mnt_idmap *idmap,
+ const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags)
+{
+ int ret;
+
+ ret = ceph_getattr(idmap, path, stat, request_mask, query_flags);
+ if (ret)
+ return ret;
+ return fscrypt_symlink_getattr(path, stat);
+}
+
/*
* symlinks
*/
@@ -1991,20 +2378,200 @@ static const struct inode_operations ceph_symlink_iops = {
.listxattr = ceph_listxattr,
};
-int __ceph_setattr(struct inode *inode, struct iattr *attr)
+static const struct inode_operations ceph_encrypted_symlink_iops = {
+ .get_link = ceph_encrypted_get_link,
+ .setattr = ceph_setattr,
+ .getattr = ceph_encrypted_symlink_getattr,
+ .listxattr = ceph_listxattr,
+};
+
+/*
+ * Transfer the encrypted last block to the MDS and the MDS
+ * will help update it when truncating a smaller size.
+ *
+ * We don't support a PAGE_SIZE that is smaller than the
+ * CEPH_FSCRYPT_BLOCK_SIZE.
+ */
+static int fill_fscrypt_truncate(struct inode *inode,
+ struct ceph_mds_request *req,
+ struct iattr *attr)
+{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int boff = attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE;
+ loff_t pos, orig_pos = round_down(attr->ia_size,
+ CEPH_FSCRYPT_BLOCK_SIZE);
+ u64 block = orig_pos >> CEPH_FSCRYPT_BLOCK_SHIFT;
+ struct ceph_pagelist *pagelist = NULL;
+ struct kvec iov = {0};
+ struct iov_iter iter;
+ struct page *page = NULL;
+ struct ceph_fscrypt_truncate_size_header header;
+ int retry_op = 0;
+ int len = CEPH_FSCRYPT_BLOCK_SIZE;
+ loff_t i_size = i_size_read(inode);
+ int got, ret, issued;
+ u64 objver;
+
+ ret = __ceph_get_caps(inode, NULL, CEPH_CAP_FILE_RD, 0, -1, &got);
+ if (ret < 0)
+ return ret;
+
+ issued = __ceph_caps_issued(ci, NULL);
+
+ doutc(cl, "size %lld -> %lld got cap refs on %s, issued %s\n",
+ i_size, attr->ia_size, ceph_cap_string(got),
+ ceph_cap_string(issued));
+
+ /* Try to writeback the dirty pagecaches */
+ if (issued & (CEPH_CAP_FILE_BUFFER)) {
+ loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SIZE - 1;
+
+ ret = filemap_write_and_wait_range(inode->i_mapping,
+ orig_pos, lend);
+ if (ret < 0)
+ goto out;
+ }
+
+ page = __page_cache_alloc(GFP_KERNEL);
+ if (page == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pagelist = ceph_pagelist_alloc(GFP_KERNEL);
+ if (!pagelist) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ iov.iov_base = kmap_local_page(page);
+ iov.iov_len = len;
+ iov_iter_kvec(&iter, READ, &iov, 1, len);
+
+ pos = orig_pos;
+ ret = __ceph_sync_read(inode, &pos, &iter, &retry_op, &objver);
+ if (ret < 0)
+ goto out;
+
+ /* Insert the header first */
+ header.ver = 1;
+ header.compat = 1;
+ header.change_attr = cpu_to_le64(inode_peek_iversion_raw(inode));
+
+ /*
+ * Always set the block_size to CEPH_FSCRYPT_BLOCK_SIZE,
+ * because in MDS it may need this to do the truncate.
+ */
+ header.block_size = cpu_to_le32(CEPH_FSCRYPT_BLOCK_SIZE);
+
+ /*
+ * If we hit a hole here, we should just skip filling
+ * the fscrypt for the request, because once the fscrypt
+ * is enabled, the file will be split into many blocks
+ * with the size of CEPH_FSCRYPT_BLOCK_SIZE, if there
+ * has a hole, the hole size should be multiple of block
+ * size.
+ *
+ * If the Rados object doesn't exist, it will be set to 0.
+ */
+ if (!objver) {
+ doutc(cl, "hit hole, ppos %lld < size %lld\n", pos, i_size);
+
+ header.data_len = cpu_to_le32(8 + 8 + 4);
+ header.file_offset = 0;
+ ret = 0;
+ } else {
+ header.data_len = cpu_to_le32(8 + 8 + 4 + CEPH_FSCRYPT_BLOCK_SIZE);
+ header.file_offset = cpu_to_le64(orig_pos);
+
+ doutc(cl, "encrypt block boff/bsize %d/%lu\n", boff,
+ CEPH_FSCRYPT_BLOCK_SIZE);
+
+ /* truncate and zero out the extra contents for the last block */
+ memset(iov.iov_base + boff, 0, PAGE_SIZE - boff);
+
+ /* encrypt the last block */
+ ret = ceph_fscrypt_encrypt_block_inplace(inode, page,
+ CEPH_FSCRYPT_BLOCK_SIZE,
+ 0, block);
+ if (ret)
+ goto out;
+ }
+
+ /* Insert the header */
+ ret = ceph_pagelist_append(pagelist, &header, sizeof(header));
+ if (ret)
+ goto out;
+
+ if (header.block_size) {
+ /* Append the last block contents to pagelist */
+ ret = ceph_pagelist_append(pagelist, iov.iov_base,
+ CEPH_FSCRYPT_BLOCK_SIZE);
+ if (ret)
+ goto out;
+ }
+ req->r_pagelist = pagelist;
+out:
+ doutc(cl, "%p %llx.%llx size dropping cap refs on %s\n", inode,
+ ceph_vinop(inode), ceph_cap_string(got));
+ ceph_put_cap_refs(ci, got);
+ if (iov.iov_base)
+ kunmap_local(iov.iov_base);
+ if (page)
+ __free_pages(page, 0);
+ if (ret && pagelist)
+ ceph_pagelist_release(pagelist);
+ return ret;
+}
+
+int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
+ struct iattr *attr, struct ceph_iattr *cia)
{
struct ceph_inode_info *ci = ceph_inode(inode);
unsigned int ia_valid = attr->ia_valid;
struct ceph_mds_request *req;
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_cap_flush *prealloc_cf;
+ loff_t isize = i_size_read(inode);
int issued;
int release = 0, dirtied = 0;
int mask = 0;
int err = 0;
int inode_dirty_flags = 0;
bool lock_snap_rwsem = false;
+ bool fill_fscrypt;
+ int truncate_retry = 20; /* The RMW will take around 50ms */
+ struct dentry *dentry;
+ char *path;
+ bool do_sync = false;
+
+ dentry = d_find_alias(inode);
+ if (!dentry) {
+ do_sync = true;
+ } else {
+ struct ceph_path_info path_info;
+ path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
+ if (IS_ERR(path)) {
+ do_sync = true;
+ err = 0;
+ } else {
+ err = ceph_mds_check_access(mdsc, path, MAY_WRITE);
+ }
+ ceph_mdsc_free_path_info(&path_info);
+ dput(dentry);
+
+ /* For none EACCES cases will let the MDS do the mds auth check */
+ if (err == -EACCES) {
+ return err;
+ } else if (err < 0) {
+ do_sync = true;
+ err = 0;
+ }
+ }
+retry:
prealloc_cf = ceph_alloc_cap_flush();
if (!prealloc_cf)
return -ENOMEM;
@@ -2016,6 +2583,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
return PTR_ERR(req);
}
+ fill_fscrypt = false;
spin_lock(&ci->i_ceph_lock);
issued = __ceph_caps_issued(ci, NULL);
@@ -2030,42 +2598,86 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
}
}
- dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
+ doutc(cl, "%p %llx.%llx issued %s\n", inode, ceph_vinop(inode),
+ ceph_cap_string(issued));
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ if (cia && cia->fscrypt_auth) {
+ u32 len = ceph_fscrypt_auth_len(cia->fscrypt_auth);
+
+ if (len > sizeof(*cia->fscrypt_auth)) {
+ err = -EINVAL;
+ spin_unlock(&ci->i_ceph_lock);
+ goto out;
+ }
+
+ doutc(cl, "%p %llx.%llx fscrypt_auth len %u to %u)\n", inode,
+ ceph_vinop(inode), ci->fscrypt_auth_len, len);
+
+ /* It should never be re-set once set */
+ WARN_ON_ONCE(ci->fscrypt_auth);
+
+ if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
+ dirtied |= CEPH_CAP_AUTH_EXCL;
+ kfree(ci->fscrypt_auth);
+ ci->fscrypt_auth = (u8 *)cia->fscrypt_auth;
+ ci->fscrypt_auth_len = len;
+ } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
+ ci->fscrypt_auth_len != len ||
+ memcmp(ci->fscrypt_auth, cia->fscrypt_auth, len)) {
+ req->r_fscrypt_auth = cia->fscrypt_auth;
+ mask |= CEPH_SETATTR_FSCRYPT_AUTH;
+ release |= CEPH_CAP_AUTH_SHARED;
+ }
+ cia->fscrypt_auth = NULL;
+ }
+#else
+ if (cia && cia->fscrypt_auth) {
+ err = -EINVAL;
+ spin_unlock(&ci->i_ceph_lock);
+ goto out;
+ }
+#endif /* CONFIG_FS_ENCRYPTION */
if (ia_valid & ATTR_UID) {
- dout("setattr %p uid %d -> %d\n", inode,
- from_kuid(&init_user_ns, inode->i_uid),
- from_kuid(&init_user_ns, attr->ia_uid));
- if (issued & CEPH_CAP_AUTH_EXCL) {
- inode->i_uid = attr->ia_uid;
+ kuid_t fsuid = from_vfsuid(idmap, i_user_ns(inode), attr->ia_vfsuid);
+
+ doutc(cl, "%p %llx.%llx uid %d -> %d\n", inode,
+ ceph_vinop(inode),
+ from_kuid(&init_user_ns, inode->i_uid),
+ from_kuid(&init_user_ns, attr->ia_uid));
+ if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
+ inode->i_uid = fsuid;
dirtied |= CEPH_CAP_AUTH_EXCL;
} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
- !uid_eq(attr->ia_uid, inode->i_uid)) {
+ !uid_eq(fsuid, inode->i_uid)) {
req->r_args.setattr.uid = cpu_to_le32(
- from_kuid(&init_user_ns, attr->ia_uid));
+ from_kuid(&init_user_ns, fsuid));
mask |= CEPH_SETATTR_UID;
release |= CEPH_CAP_AUTH_SHARED;
}
}
if (ia_valid & ATTR_GID) {
- dout("setattr %p gid %d -> %d\n", inode,
- from_kgid(&init_user_ns, inode->i_gid),
- from_kgid(&init_user_ns, attr->ia_gid));
- if (issued & CEPH_CAP_AUTH_EXCL) {
- inode->i_gid = attr->ia_gid;
+ kgid_t fsgid = from_vfsgid(idmap, i_user_ns(inode), attr->ia_vfsgid);
+
+ doutc(cl, "%p %llx.%llx gid %d -> %d\n", inode,
+ ceph_vinop(inode),
+ from_kgid(&init_user_ns, inode->i_gid),
+ from_kgid(&init_user_ns, attr->ia_gid));
+ if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
+ inode->i_gid = fsgid;
dirtied |= CEPH_CAP_AUTH_EXCL;
} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
- !gid_eq(attr->ia_gid, inode->i_gid)) {
+ !gid_eq(fsgid, inode->i_gid)) {
req->r_args.setattr.gid = cpu_to_le32(
- from_kgid(&init_user_ns, attr->ia_gid));
+ from_kgid(&init_user_ns, fsgid));
mask |= CEPH_SETATTR_GID;
release |= CEPH_CAP_AUTH_SHARED;
}
}
if (ia_valid & ATTR_MODE) {
- dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
- attr->ia_mode);
- if (issued & CEPH_CAP_AUTH_EXCL) {
+ doutc(cl, "%p %llx.%llx mode 0%o -> 0%o\n", inode,
+ ceph_vinop(inode), inode->i_mode, attr->ia_mode);
+ if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
inode->i_mode = attr->ia_mode;
dirtied |= CEPH_CAP_AUTH_EXCL;
} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
@@ -2078,20 +2690,21 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
}
if (ia_valid & ATTR_ATIME) {
- dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
- inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
- attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
- if (issued & CEPH_CAP_FILE_EXCL) {
+ struct timespec64 atime = inode_get_atime(inode);
+
+ doutc(cl, "%p %llx.%llx atime %ptSp -> %ptSp\n",
+ inode, ceph_vinop(inode), &atime, &attr->ia_atime);
+ if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
ci->i_time_warp_seq++;
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
dirtied |= CEPH_CAP_FILE_EXCL;
- } else if ((issued & CEPH_CAP_FILE_WR) &&
- timespec64_compare(&inode->i_atime,
- &attr->ia_atime) < 0) {
- inode->i_atime = attr->ia_atime;
+ } else if (!do_sync && (issued & CEPH_CAP_FILE_WR) &&
+ timespec64_compare(&atime,
+ &attr->ia_atime) < 0) {
+ inode_set_atime_to_ts(inode, attr->ia_atime);
dirtied |= CEPH_CAP_FILE_WR;
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
- !timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
+ !timespec64_equal(&atime, &attr->ia_atime)) {
ceph_encode_timespec64(&req->r_args.setattr.atime,
&attr->ia_atime);
mask |= CEPH_SETATTR_ATIME;
@@ -2100,39 +2713,72 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
}
}
if (ia_valid & ATTR_SIZE) {
- loff_t isize = i_size_read(inode);
-
- dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size);
- if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size > isize) {
- i_size_write(inode, attr->ia_size);
- inode->i_blocks = calc_inode_blocks(attr->ia_size);
- ci->i_reported_size = attr->ia_size;
- dirtied |= CEPH_CAP_FILE_EXCL;
- ia_valid |= ATTR_MTIME;
+ doutc(cl, "%p %llx.%llx size %lld -> %lld\n", inode,
+ ceph_vinop(inode), isize, attr->ia_size);
+ /*
+ * Only when the new size is smaller and not aligned to
+ * CEPH_FSCRYPT_BLOCK_SIZE will the RMW is needed.
+ */
+ if (IS_ENCRYPTED(inode) && attr->ia_size < isize &&
+ (attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE)) {
+ mask |= CEPH_SETATTR_SIZE;
+ release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
+ CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
+ set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
+ mask |= CEPH_SETATTR_FSCRYPT_FILE;
+ req->r_args.setattr.size =
+ cpu_to_le64(round_up(attr->ia_size,
+ CEPH_FSCRYPT_BLOCK_SIZE));
+ req->r_args.setattr.old_size =
+ cpu_to_le64(round_up(isize,
+ CEPH_FSCRYPT_BLOCK_SIZE));
+ req->r_fscrypt_file = attr->ia_size;
+ fill_fscrypt = true;
+ } else if (!do_sync && (issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
+ if (attr->ia_size > isize) {
+ i_size_write(inode, attr->ia_size);
+ inode->i_blocks = calc_inode_blocks(attr->ia_size);
+ ci->i_reported_size = attr->ia_size;
+ dirtied |= CEPH_CAP_FILE_EXCL;
+ ia_valid |= ATTR_MTIME;
+ }
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
attr->ia_size != isize) {
- req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
- req->r_args.setattr.old_size = cpu_to_le64(isize);
mask |= CEPH_SETATTR_SIZE;
release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
+ if (IS_ENCRYPTED(inode) && attr->ia_size) {
+ set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
+ mask |= CEPH_SETATTR_FSCRYPT_FILE;
+ req->r_args.setattr.size =
+ cpu_to_le64(round_up(attr->ia_size,
+ CEPH_FSCRYPT_BLOCK_SIZE));
+ req->r_args.setattr.old_size =
+ cpu_to_le64(round_up(isize,
+ CEPH_FSCRYPT_BLOCK_SIZE));
+ req->r_fscrypt_file = attr->ia_size;
+ } else {
+ req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
+ req->r_args.setattr.old_size = cpu_to_le64(isize);
+ req->r_fscrypt_file = 0;
+ }
}
}
if (ia_valid & ATTR_MTIME) {
- dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
- inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
- attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
- if (issued & CEPH_CAP_FILE_EXCL) {
+ struct timespec64 mtime = inode_get_mtime(inode);
+
+ doutc(cl, "%p %llx.%llx mtime %ptSp -> %ptSp\n",
+ inode, ceph_vinop(inode), &mtime, &attr->ia_mtime);
+ if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
ci->i_time_warp_seq++;
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
dirtied |= CEPH_CAP_FILE_EXCL;
- } else if ((issued & CEPH_CAP_FILE_WR) &&
- timespec64_compare(&inode->i_mtime,
- &attr->ia_mtime) < 0) {
- inode->i_mtime = attr->ia_mtime;
+ } else if (!do_sync && (issued & CEPH_CAP_FILE_WR) &&
+ timespec64_compare(&mtime, &attr->ia_mtime) < 0) {
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
dirtied |= CEPH_CAP_FILE_WR;
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
- !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
+ !timespec64_equal(&mtime, &attr->ia_mtime)) {
ceph_encode_timespec64(&req->r_args.setattr.mtime,
&attr->ia_mtime);
mask |= CEPH_SETATTR_MTIME;
@@ -2143,12 +2789,12 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
/* these do nothing */
if (ia_valid & ATTR_CTIME) {
+ struct timespec64 ictime = inode_get_ctime(inode);
bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
- dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
- inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
- attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
- only ? "ctime only" : "ignored");
+ doutc(cl, "%p %llx.%llx ctime %ptSp -> %ptSp (%s)\n",
+ inode, ceph_vinop(inode), &ictime, &attr->ia_ctime,
+ only ? "ctime only" : "ignored");
if (only) {
/*
* if kernel wants to dirty ctime but nothing else,
@@ -2166,23 +2812,26 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
}
}
if (ia_valid & ATTR_FILE)
- dout("setattr %p ATTR_FILE ... hrm!\n", inode);
+ doutc(cl, "%p %llx.%llx ATTR_FILE ... hrm!\n", inode,
+ ceph_vinop(inode));
if (dirtied) {
inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
&prealloc_cf);
- inode->i_ctime = attr->ia_ctime;
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
+ inode_inc_iversion_raw(inode);
}
release &= issued;
spin_unlock(&ci->i_ceph_lock);
- if (lock_snap_rwsem)
+ if (lock_snap_rwsem) {
up_read(&mdsc->snap_rwsem);
+ lock_snap_rwsem = false;
+ }
if (inode_dirty_flags)
__mark_inode_dirty(inode, inode_dirty_flags);
-
if (mask) {
req->r_inode = inode;
ihold(inode);
@@ -2190,10 +2839,32 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
req->r_args.setattr.mask = cpu_to_le32(mask);
req->r_num_caps = 1;
req->r_stamp = attr->ia_ctime;
+ if (fill_fscrypt) {
+ err = fill_fscrypt_truncate(inode, req, attr);
+ if (err)
+ goto out;
+ }
+
+ /*
+ * The truncate request will return -EAGAIN when the
+ * last block has been updated just before the MDS
+ * successfully gets the xlock for the FILE lock. To
+ * avoid corrupting the file contents we need to retry
+ * it.
+ */
err = ceph_mdsc_do_request(mdsc, NULL, req);
+ if (err == -EAGAIN && truncate_retry--) {
+ doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote), retry it!\n",
+ inode, ceph_vinop(inode), err,
+ ceph_cap_string(dirtied), mask);
+ ceph_mdsc_put_request(req);
+ ceph_free_cap_flush(prealloc_cf);
+ goto retry;
+ }
}
- dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
- ceph_cap_string(dirtied), mask);
+out:
+ doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote)\n", inode,
+ ceph_vinop(inode), err, ceph_cap_string(dirtied), mask);
ceph_mdsc_put_request(req);
ceph_free_cap_flush(prealloc_cf);
@@ -2207,17 +2878,24 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
/*
* setattr
*/
-int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
int err;
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
- err = setattr_prepare(&init_user_ns, dentry, attr);
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
+ err = fscrypt_prepare_setattr(dentry, attr);
+ if (err)
+ return err;
+
+ err = setattr_prepare(idmap, dentry, attr);
if (err != 0)
return err;
@@ -2229,14 +2907,44 @@ int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
return -EDQUOT;
- err = __ceph_setattr(inode, attr);
+ err = __ceph_setattr(idmap, inode, attr, NULL);
if (err >= 0 && (attr->ia_valid & ATTR_MODE))
- err = posix_acl_chmod(&init_user_ns, inode, attr->ia_mode);
+ err = posix_acl_chmod(idmap, dentry, attr->ia_mode);
return err;
}
+int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
+{
+ int issued = ceph_caps_issued(ceph_inode(inode));
+
+ /*
+ * If any 'x' caps is issued we can just choose the auth MDS
+ * instead of the random replica MDSes. Because only when the
+ * Locker is in LOCK_EXEC state will the loner client could
+ * get the 'x' caps. And if we send the getattr requests to
+ * any replica MDS it must auth pin and tries to rdlock from
+ * the auth MDS, and then the auth MDS need to do the Locker
+ * state transition to LOCK_SYNC. And after that the lock state
+ * will change back.
+ *
+ * This cost much when doing the Locker state transition and
+ * usually will need to revoke caps from clients.
+ *
+ * And for the 'Xs' caps for getxattr we will also choose the
+ * auth MDS, because the MDS side code is buggy due to setxattr
+ * won't notify the replica MDSes when the values changed and
+ * the replica MDS will return the old values. Though we will
+ * fix it in MDS code, but this still makes sense for old ceph.
+ */
+ if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
+ || (mask & (CEPH_STAT_RSTAT | CEPH_STAT_CAP_XATTR)))
+ return USE_AUTH_MDS;
+ else
+ return USE_ANY_MDS;
+}
+
/*
* Verify that we have a lease on the given mask. If not,
* do a getattr against an mds.
@@ -2244,23 +2952,25 @@ int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
int mask, bool force)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int mode;
int err;
if (ceph_snap(inode) == CEPH_SNAPDIR) {
- dout("do_getattr inode %p SNAPDIR\n", inode);
+ doutc(cl, "inode %p %llx.%llx SNAPDIR\n", inode,
+ ceph_vinop(inode));
return 0;
}
- dout("do_getattr inode %p mask %s mode 0%o\n",
- inode, ceph_cap_string(mask), inode->i_mode);
+ doutc(cl, "inode %p %llx.%llx mask %s mode 0%o\n", inode,
+ ceph_vinop(inode), ceph_cap_string(mask), inode->i_mode);
if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
return 0;
- mode = (mask & CEPH_STAT_RSTAT) ? USE_AUTH_MDS : USE_ANY_MDS;
+ mode = ceph_try_to_choose_auth_mds(inode, mask);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -2275,14 +2985,68 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
if (inline_version == 0) {
/* the reply is supposed to contain inline data */
err = -EINVAL;
- } else if (inline_version == CEPH_INLINE_NONE) {
+ } else if (inline_version == CEPH_INLINE_NONE ||
+ inline_version == 1) {
err = -ENODATA;
} else {
err = req->r_reply_info.targeti.inline_len;
}
}
ceph_mdsc_put_request(req);
- dout("do_getattr result=%d\n", err);
+ doutc(cl, "result=%d\n", err);
+ return err;
+}
+
+int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
+ size_t size)
+{
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_mds_request *req;
+ int mode = USE_AUTH_MDS;
+ int err;
+ char *xattr_value;
+ size_t xattr_value_len;
+
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETVXATTR, mode);
+ if (IS_ERR(req)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ req->r_feature_needed = CEPHFS_FEATURE_OP_GETVXATTR;
+ req->r_path2 = kstrdup(name, GFP_NOFS);
+ if (!req->r_path2) {
+ err = -ENOMEM;
+ goto put;
+ }
+
+ ihold(inode);
+ req->r_inode = inode;
+ err = ceph_mdsc_do_request(mdsc, NULL, req);
+ if (err < 0)
+ goto put;
+
+ xattr_value = req->r_reply_info.xattr_info.xattr_value;
+ xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
+
+ doutc(cl, "xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
+
+ err = (int)xattr_value_len;
+ if (size == 0)
+ goto put;
+
+ if (xattr_value_len > size) {
+ err = -ERANGE;
+ goto put;
+ }
+
+ memcpy(value, xattr_value, xattr_value_len);
+put:
+ ceph_mdsc_put_request(req);
+out:
+ doutc(cl, "result=%d\n", err);
return err;
}
@@ -2291,7 +3055,7 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
* Check inode permissions. We verify we have a valid value for
* the AUTH cap, then call the generic handler.
*/
-int ceph_permission(struct user_namespace *mnt_userns, struct inode *inode,
+int ceph_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask)
{
int err;
@@ -2302,7 +3066,7 @@ int ceph_permission(struct user_namespace *mnt_userns, struct inode *inode,
err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
if (!err)
- err = generic_permission(&init_user_ns, inode, mask);
+ err = generic_permission(idmap, inode, mask);
return err;
}
@@ -2311,10 +3075,10 @@ static int statx_to_caps(u32 want, umode_t mode)
{
int mask = 0;
- if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME))
+ if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME|STATX_CHANGE_COOKIE))
mask |= CEPH_CAP_AUTH_SHARED;
- if (want & (STATX_NLINK|STATX_CTIME)) {
+ if (want & (STATX_NLINK|STATX_CTIME|STATX_CHANGE_COOKIE)) {
/*
* The link count for directories depends on inode->i_subdirs,
* and that is only updated when Fs caps are held.
@@ -2325,11 +3089,10 @@ static int statx_to_caps(u32 want, umode_t mode)
mask |= CEPH_CAP_LINK_SHARED;
}
- if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|
- STATX_BLOCKS))
+ if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|STATX_BLOCKS|STATX_CHANGE_COOKIE))
mask |= CEPH_CAP_FILE_SHARED;
- if (want & (STATX_CTIME))
+ if (want & (STATX_CTIME|STATX_CHANGE_COOKIE))
mask |= CEPH_CAP_XATTR_SHARED;
return mask;
@@ -2339,16 +3102,20 @@ static int statx_to_caps(u32 want, umode_t mode)
* Get all the attributes. If we have sufficient caps for the requested attrs,
* then we can avoid talking to the MDS at all.
*/
-int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct inode *inode = d_inode(path->dentry);
+ struct super_block *sb = inode->i_sb;
struct ceph_inode_info *ci = ceph_inode(inode);
u32 valid_mask = STATX_BASIC_STATS;
int err = 0;
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
/* Skip the getattr altogether if we're asked not to sync */
- if (!(flags & AT_STATX_DONT_SYNC)) {
+ if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
err = ceph_do_getattr(inode,
statx_to_caps(request_mask, inode->i_mode),
flags & AT_STATX_FORCE_SYNC);
@@ -2356,7 +3123,7 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
return err;
}
- generic_fillattr(&init_user_ns, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
stat->ino = ceph_present_inode(inode);
/*
@@ -2368,17 +3135,40 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
valid_mask |= STATX_BTIME;
}
+ if (request_mask & STATX_CHANGE_COOKIE) {
+ stat->change_cookie = inode_peek_iversion_raw(inode);
+ valid_mask |= STATX_CHANGE_COOKIE;
+ }
+
if (ceph_snap(inode) == CEPH_NOSNAP)
- stat->dev = inode->i_sb->s_dev;
+ stat->dev = sb->s_dev;
else
stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
if (S_ISDIR(inode->i_mode)) {
- if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
- RBYTES))
+ if (ceph_test_mount_opt(ceph_sb_to_fs_client(sb), RBYTES)) {
stat->size = ci->i_rbytes;
- else
+ } else if (ceph_snap(inode) == CEPH_SNAPDIR) {
+ struct ceph_inode_info *pci;
+ struct ceph_snap_realm *realm;
+ struct inode *parent;
+
+ parent = ceph_lookup_inode(sb, ceph_ino(inode));
+ if (IS_ERR(parent))
+ return PTR_ERR(parent);
+
+ pci = ceph_inode(parent);
+ spin_lock(&pci->i_ceph_lock);
+ realm = pci->i_snap_realm;
+ if (realm)
+ stat->size = realm->num_snaps;
+ else
+ stat->size = 0;
+ spin_unlock(&pci->i_ceph_lock);
+ iput(parent);
+ } else {
stat->size = ci->i_files + ci->i_subdirs;
+ }
stat->blocks = 0;
stat->blksize = 65536;
/*
@@ -2391,6 +3181,36 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
stat->nlink = 1 + 1 + ci->i_subdirs;
}
+ stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
+ if (IS_ENCRYPTED(inode))
+ stat->attributes |= STATX_ATTR_ENCRYPTED;
+ stat->attributes_mask |= (STATX_ATTR_CHANGE_MONOTONIC |
+ STATX_ATTR_ENCRYPTED);
+
stat->result_mask = request_mask & valid_mask;
return err;
}
+
+void ceph_inode_shutdown(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct rb_node *p;
+ int iputs = 0;
+ bool invalidate = false;
+
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
+ p = rb_first(&ci->i_caps);
+ while (p) {
+ struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
+
+ p = rb_next(p);
+ iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
+ }
+ spin_unlock(&ci->i_ceph_lock);
+
+ if (invalidate)
+ ceph_queue_invalidate(inode);
+ while (iputs--)
+ iput(inode);
+}
diff --git a/fs/ceph/io.c b/fs/ceph/io.c
index c456509b31c3..2d10f49c93a9 100644
--- a/fs/ceph/io.c
+++ b/fs/ceph/io.c
@@ -21,14 +21,23 @@
/* Call with exclusively locked inode->i_rwsem */
static void ceph_block_o_direct(struct ceph_inode_info *ci, struct inode *inode)
{
+ bool is_odirect;
+
lockdep_assert_held_write(&inode->i_rwsem);
- if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT) {
- spin_lock(&ci->i_ceph_lock);
- ci->i_ceph_flags &= ~CEPH_I_ODIRECT;
- spin_unlock(&ci->i_ceph_lock);
- inode_dio_wait(inode);
+ spin_lock(&ci->i_ceph_lock);
+ /* ensure that bit state is consistent */
+ smp_mb__before_atomic();
+ is_odirect = READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT;
+ if (is_odirect) {
+ clear_bit(CEPH_I_ODIRECT_BIT, &ci->i_ceph_flags);
+ /* ensure modified bit is visible */
+ smp_mb__after_atomic();
}
+ spin_unlock(&ci->i_ceph_lock);
+
+ if (is_odirect)
+ inode_dio_wait(inode);
}
/**
@@ -47,20 +56,35 @@ static void ceph_block_o_direct(struct ceph_inode_info *ci, struct inode *inode)
* Note that buffered writes and truncates both take a write lock on
* inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
*/
-void
-ceph_start_io_read(struct inode *inode)
+int ceph_start_io_read(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ bool is_odirect;
+ int err;
/* Be an optimist! */
- down_read(&inode->i_rwsem);
- if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT))
- return;
+ err = down_read_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+
+ spin_lock(&ci->i_ceph_lock);
+ /* ensure that bit state is consistent */
+ smp_mb__before_atomic();
+ is_odirect = READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT;
+ spin_unlock(&ci->i_ceph_lock);
+ if (!is_odirect)
+ return 0;
up_read(&inode->i_rwsem);
+
/* Slow path.... */
- down_write(&inode->i_rwsem);
+ err = down_write_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+
ceph_block_o_direct(ci, inode);
downgrade_write(&inode->i_rwsem);
+
+ return 0;
}
/**
@@ -83,11 +107,12 @@ ceph_end_io_read(struct inode *inode)
* Declare that a buffered write operation is about to start, and ensure
* that we block all direct I/O.
*/
-void
-ceph_start_io_write(struct inode *inode)
+int ceph_start_io_write(struct inode *inode)
{
- down_write(&inode->i_rwsem);
- ceph_block_o_direct(ceph_inode(inode), inode);
+ int err = down_write_killable(&inode->i_rwsem);
+ if (!err)
+ ceph_block_o_direct(ceph_inode(inode), inode);
+ return err;
}
/**
@@ -106,12 +131,22 @@ ceph_end_io_write(struct inode *inode)
/* Call with exclusively locked inode->i_rwsem */
static void ceph_block_buffered(struct ceph_inode_info *ci, struct inode *inode)
{
+ bool is_odirect;
+
lockdep_assert_held_write(&inode->i_rwsem);
- if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)) {
- spin_lock(&ci->i_ceph_lock);
- ci->i_ceph_flags |= CEPH_I_ODIRECT;
- spin_unlock(&ci->i_ceph_lock);
+ spin_lock(&ci->i_ceph_lock);
+ /* ensure that bit state is consistent */
+ smp_mb__before_atomic();
+ is_odirect = READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT;
+ if (!is_odirect) {
+ set_bit(CEPH_I_ODIRECT_BIT, &ci->i_ceph_flags);
+ /* ensure modified bit is visible */
+ smp_mb__after_atomic();
+ }
+ spin_unlock(&ci->i_ceph_lock);
+
+ if (!is_odirect) {
/* FIXME: unmap_mapping_range? */
filemap_write_and_wait(inode->i_mapping);
}
@@ -133,20 +168,35 @@ static void ceph_block_buffered(struct ceph_inode_info *ci, struct inode *inode)
* Note that buffered writes and truncates both take a write lock on
* inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
*/
-void
-ceph_start_io_direct(struct inode *inode)
+int ceph_start_io_direct(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ bool is_odirect;
+ int err;
/* Be an optimist! */
- down_read(&inode->i_rwsem);
- if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)
- return;
+ err = down_read_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+
+ spin_lock(&ci->i_ceph_lock);
+ /* ensure that bit state is consistent */
+ smp_mb__before_atomic();
+ is_odirect = READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT;
+ spin_unlock(&ci->i_ceph_lock);
+ if (is_odirect)
+ return 0;
up_read(&inode->i_rwsem);
+
/* Slow path.... */
- down_write(&inode->i_rwsem);
+ err = down_write_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+
ceph_block_buffered(ci, inode);
downgrade_write(&inode->i_rwsem);
+
+ return 0;
}
/**
diff --git a/fs/ceph/io.h b/fs/ceph/io.h
index fa594cd77348..79029825e8b8 100644
--- a/fs/ceph/io.h
+++ b/fs/ceph/io.h
@@ -2,11 +2,13 @@
#ifndef _FS_CEPH_IO_H
#define _FS_CEPH_IO_H
-void ceph_start_io_read(struct inode *inode);
+#include <linux/compiler_attributes.h>
+
+int __must_check ceph_start_io_read(struct inode *inode);
void ceph_end_io_read(struct inode *inode);
-void ceph_start_io_write(struct inode *inode);
+int __must_check ceph_start_io_write(struct inode *inode);
void ceph_end_io_write(struct inode *inode);
-void ceph_start_io_direct(struct inode *inode);
+int __must_check ceph_start_io_direct(struct inode *inode);
void ceph_end_io_direct(struct inode *inode);
#endif /* FS_CEPH_IO_H */
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 6e061bf62ad4..15cde055f3da 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -6,6 +6,7 @@
#include "mds_client.h"
#include "ioctl.h"
#include <linux/ceph/striper.h>
+#include <linux/fscrypt.h>
/*
* ioctls
@@ -64,7 +65,7 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
struct ceph_inode_info *ci = ceph_inode(file_inode(file));
@@ -139,7 +140,7 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
int err;
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
/* copy and validate */
if (copy_from_user(&l, arg, sizeof(l)))
@@ -182,7 +183,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_osd_client *osdc =
- &ceph_sb_to_client(inode->i_sb)->client->osdc;
+ &ceph_sb_to_fs_client(inode->i_sb)->client->osdc;
struct ceph_object_locator oloc;
CEPH_DEFINE_OID_ONSTACK(oid);
u32 xlen;
@@ -243,20 +244,30 @@ static long ceph_ioctl_lazyio(struct file *file)
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
+ bool is_file_already_lazy = false;
+ spin_lock(&ci->i_ceph_lock);
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
- spin_lock(&ci->i_ceph_lock);
fi->fmode |= CEPH_FILE_MODE_LAZY;
ci->i_nr_by_mode[ffs(CEPH_FILE_MODE_LAZY)]++;
__ceph_touch_fmode(ci, mdsc, fi->fmode);
- spin_unlock(&ci->i_ceph_lock);
- dout("ioctl_layzio: file %p marked lazy\n", file);
+ } else {
+ is_file_already_lazy = true;
+ }
+ spin_unlock(&ci->i_ceph_lock);
- ceph_check_caps(ci, 0, NULL);
+ if (is_file_already_lazy) {
+ doutc(cl, "file %p %p %llx.%llx already lazy\n", file, inode,
+ ceph_vinop(inode));
} else {
- dout("ioctl_layzio: file %p already lazy\n", file);
+ doutc(cl, "file %p %p %llx.%llx marked lazy\n", file, inode,
+ ceph_vinop(inode));
+
+ ceph_check_caps(ci, 0);
}
+
return 0;
}
@@ -268,9 +279,98 @@ static long ceph_ioctl_syncio(struct file *file)
return 0;
}
+static int vet_mds_for_fscrypt(struct file *file)
+{
+ int i, ret = -EOPNOTSUPP;
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(file_inode(file)->i_sb);
+
+ mutex_lock(&mdsc->mutex);
+ for (i = 0; i < mdsc->max_sessions; i++) {
+ struct ceph_mds_session *s = mdsc->sessions[i];
+
+ if (!s)
+ continue;
+ if (test_bit(CEPHFS_FEATURE_ALTERNATE_NAME, &s->s_features))
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&mdsc->mutex);
+ return ret;
+}
+
+static long ceph_set_encryption_policy(struct file *file, unsigned long arg)
+{
+ int ret, got = 0;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ /* encrypted directories can't have striped layout */
+ if (ci->i_layout.stripe_count > 1)
+ return -EINVAL;
+
+ ret = vet_mds_for_fscrypt(file);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure we hold these caps so that we _know_ that the rstats check
+ * in the empty_dir check is reliable.
+ */
+ ret = ceph_get_caps(file, CEPH_CAP_FILE_SHARED, 0, -1, &got);
+ if (ret)
+ return ret;
+
+ ret = fscrypt_ioctl_set_policy(file, (const void __user *)arg);
+ if (got)
+ ceph_put_cap_refs(ci, got);
+
+ return ret;
+}
+
+static const char *ceph_ioctl_cmd_name(const unsigned int cmd)
+{
+ switch (cmd) {
+ case CEPH_IOC_GET_LAYOUT:
+ return "get_layout";
+ case CEPH_IOC_SET_LAYOUT:
+ return "set_layout";
+ case CEPH_IOC_SET_LAYOUT_POLICY:
+ return "set_layout_policy";
+ case CEPH_IOC_GET_DATALOC:
+ return "get_dataloc";
+ case CEPH_IOC_LAZYIO:
+ return "lazyio";
+ case CEPH_IOC_SYNCIO:
+ return "syncio";
+ case FS_IOC_SET_ENCRYPTION_POLICY:
+ return "set_encryption_policy";
+ case FS_IOC_GET_ENCRYPTION_POLICY:
+ return "get_encryption_policy";
+ case FS_IOC_GET_ENCRYPTION_POLICY_EX:
+ return "get_encryption_policy_ex";
+ case FS_IOC_ADD_ENCRYPTION_KEY:
+ return "add_encryption_key";
+ case FS_IOC_REMOVE_ENCRYPTION_KEY:
+ return "remove_encryption_key";
+ case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
+ return "remove_encryption_key_all_users";
+ case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
+ return "get_encryption_key_status";
+ case FS_IOC_GET_ENCRYPTION_NONCE:
+ return "get_encryption_nonce";
+ default:
+ return "unknown";
+ }
+}
+
long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- dout("ioctl file %p cmd %u arg %lu\n", file, cmd, arg);
+ struct inode *inode = file_inode(file);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ int ret;
+
+ doutc(fsc->client, "file %p %p %llx.%llx cmd %s arg %lu\n", file,
+ inode, ceph_vinop(inode), ceph_ioctl_cmd_name(cmd), arg);
switch (cmd) {
case CEPH_IOC_GET_LAYOUT:
return ceph_ioctl_get_layout(file, (void __user *)arg);
@@ -289,6 +389,43 @@ long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case CEPH_IOC_SYNCIO:
return ceph_ioctl_syncio(file);
+
+ case FS_IOC_SET_ENCRYPTION_POLICY:
+ return ceph_set_encryption_policy(file, arg);
+
+ case FS_IOC_GET_ENCRYPTION_POLICY:
+ ret = vet_mds_for_fscrypt(file);
+ if (ret)
+ return ret;
+ return fscrypt_ioctl_get_policy(file, (void __user *)arg);
+
+ case FS_IOC_GET_ENCRYPTION_POLICY_EX:
+ ret = vet_mds_for_fscrypt(file);
+ if (ret)
+ return ret;
+ return fscrypt_ioctl_get_policy_ex(file, (void __user *)arg);
+
+ case FS_IOC_ADD_ENCRYPTION_KEY:
+ ret = vet_mds_for_fscrypt(file);
+ if (ret)
+ return ret;
+ return fscrypt_ioctl_add_key(file, (void __user *)arg);
+
+ case FS_IOC_REMOVE_ENCRYPTION_KEY:
+ return fscrypt_ioctl_remove_key(file, (void __user *)arg);
+
+ case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
+ return fscrypt_ioctl_remove_key_all_users(file,
+ (void __user *)arg);
+
+ case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
+ return fscrypt_ioctl_get_key_status(file, (void __user *)arg);
+
+ case FS_IOC_GET_ENCRYPTION_NONCE:
+ ret = vet_mds_for_fscrypt(file);
+ if (ret)
+ return ret;
+ return fscrypt_ioctl_get_nonce(file, (void __user *)arg);
}
return -ENOTTY;
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index d8c31069fbf2..dd764f9c64b9 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -7,6 +7,7 @@
#include "super.h"
#include "mds_client.h"
+#include <linux/filelock.h>
#include <linux/ceph/pagelist.h>
static u64 lock_secret;
@@ -32,24 +33,36 @@ void __init ceph_flock_init(void)
static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
{
- struct ceph_file_info *fi = dst->fl_file->private_data;
- struct inode *inode = file_inode(dst->fl_file);
+ struct inode *inode = file_inode(dst->c.flc_file);
atomic_inc(&ceph_inode(inode)->i_filelock_ref);
- atomic_inc(&fi->num_locks);
+ dst->fl_u.ceph.inode = igrab(inode);
}
+/*
+ * Do not use the 'fl->fl_file' in release function, which
+ * is possibly already released by another thread.
+ */
static void ceph_fl_release_lock(struct file_lock *fl)
{
- struct ceph_file_info *fi = fl->fl_file->private_data;
- struct inode *inode = file_inode(fl->fl_file);
- struct ceph_inode_info *ci = ceph_inode(inode);
- atomic_dec(&fi->num_locks);
+ struct inode *inode = fl->fl_u.ceph.inode;
+ struct ceph_inode_info *ci;
+
+ /*
+ * If inode is NULL it should be a request file_lock,
+ * nothing we can do.
+ */
+ if (!inode)
+ return;
+
+ ci = ceph_inode(inode);
if (atomic_dec_and_test(&ci->i_filelock_ref)) {
/* clear error when all locks are released */
spin_lock(&ci->i_ceph_lock);
ci->i_ceph_flags &= ~CEPH_I_ERROR_FILELOCK;
spin_unlock(&ci->i_ceph_lock);
}
+ fl->fl_u.ceph.inode = NULL;
+ iput(inode);
}
static const struct file_lock_operations ceph_fl_lock_ops = {
@@ -64,6 +77,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
int cmd, u8 wait, struct file_lock *fl)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
int err;
u64 length = 0;
@@ -96,33 +110,34 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
else
length = fl->fl_end - fl->fl_start + 1;
- owner = secure_addr(fl->fl_owner);
+ owner = secure_addr(fl->c.flc_owner);
- dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, "
- "start: %llu, length: %llu, wait: %d, type: %d\n", (int)lock_type,
- (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length,
- wait, fl->fl_type);
+ doutc(cl, "rule: %d, op: %d, owner: %llx, pid: %llu, "
+ "start: %llu, length: %llu, wait: %d, type: %d\n",
+ (int)lock_type, (int)operation, owner,
+ (u64) fl->c.flc_pid,
+ fl->fl_start, length, wait, fl->c.flc_type);
req->r_args.filelock_change.rule = lock_type;
req->r_args.filelock_change.type = cmd;
req->r_args.filelock_change.owner = cpu_to_le64(owner);
- req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
+ req->r_args.filelock_change.pid = cpu_to_le64((u64) fl->c.flc_pid);
req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start);
req->r_args.filelock_change.length = cpu_to_le64(length);
req->r_args.filelock_change.wait = wait;
- if (wait)
- req->r_wait_for_completion = ceph_lock_wait_for_completion;
-
- err = ceph_mdsc_do_request(mdsc, inode, req);
+ err = ceph_mdsc_submit_request(mdsc, inode, req);
+ if (!err)
+ err = ceph_mdsc_wait_request(mdsc, req, wait ?
+ ceph_lock_wait_for_completion : NULL);
if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
- fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
+ fl->c.flc_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
- fl->fl_type = F_RDLCK;
+ fl->c.flc_type = F_RDLCK;
else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type)
- fl->fl_type = F_WRLCK;
+ fl->c.flc_type = F_WRLCK;
else
- fl->fl_type = F_UNLCK;
+ fl->c.flc_type = F_UNLCK;
fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start);
length = le64_to_cpu(req->r_reply_info.filelock_reply->start) +
@@ -134,16 +149,17 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
}
ceph_mdsc_put_request(req);
- dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
- "length: %llu, wait: %d, type: %d, err code %d\n", (int)lock_type,
- (int)operation, (u64)fl->fl_pid, fl->fl_start,
- length, wait, fl->fl_type, err);
+ doutc(cl, "rule: %d, op: %d, pid: %llu, start: %llu, "
+ "length: %llu, wait: %d, type: %d, err code %d\n",
+ (int)lock_type, (int)operation, (u64) fl->c.flc_pid,
+ fl->fl_start, length, wait, fl->c.flc_type, err);
return err;
}
static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *intr_req;
struct inode *inode = req->r_inode;
int err, lock_type;
@@ -161,8 +177,7 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
if (!err)
return 0;
- dout("ceph_lock_wait_for_completion: request %llu was interrupted\n",
- req->r_tid);
+ doutc(cl, "request %llu was interrupted\n", req->r_tid);
mutex_lock(&mdsc->mutex);
if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
@@ -206,17 +221,20 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
if (err && err != -ERESTARTSYS)
return err;
- wait_for_completion_killable(&req->r_safe_completion);
+ err = wait_for_completion_killable(&req->r_safe_completion);
+ if (err)
+ return err;
+
return 0;
}
static int try_unlock_file(struct file *file, struct file_lock *fl)
{
int err;
- unsigned int orig_flags = fl->fl_flags;
- fl->fl_flags |= FL_EXISTS;
+ unsigned int orig_flags = fl->c.flc_flags;
+ fl->c.flc_flags |= FL_EXISTS;
err = locks_lock_file_wait(file, fl);
- fl->fl_flags = orig_flags;
+ fl->c.flc_flags = orig_flags;
if (err == -ENOENT) {
if (!(orig_flags & FL_EXISTS))
err = 0;
@@ -233,15 +251,19 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int err = 0;
u16 op = CEPH_MDS_OP_SETFILELOCK;
u8 wait = 0;
u8 lock_cmd;
- if (!(fl->fl_flags & FL_POSIX))
+ if (!(fl->c.flc_flags & FL_POSIX))
return -ENOLCK;
- dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
+ doutc(cl, "fl_owner: %p\n", fl->c.flc_owner);
/* set wait bit as appropriate, then make command as Ceph expects it*/
if (IS_GETLK(cmd))
@@ -255,19 +277,19 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
}
spin_unlock(&ci->i_ceph_lock);
if (err < 0) {
- if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type)
+ if (op == CEPH_MDS_OP_SETFILELOCK && lock_is_unlock(fl))
posix_lock_file(file, fl, NULL);
return err;
}
- if (F_RDLCK == fl->fl_type)
+ if (lock_is_read(fl))
lock_cmd = CEPH_LOCK_SHARED;
- else if (F_WRLCK == fl->fl_type)
+ else if (lock_is_write(fl))
lock_cmd = CEPH_LOCK_EXCL;
else
lock_cmd = CEPH_LOCK_UNLOCK;
- if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type) {
+ if (op == CEPH_MDS_OP_SETFILELOCK && lock_is_unlock(fl)) {
err = try_unlock_file(file, fl);
if (err <= 0)
return err;
@@ -275,8 +297,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl);
if (!err) {
- if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->fl_type) {
- dout("mds locked, locking locally\n");
+ if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->c.flc_type) {
+ doutc(cl, "locking locally\n");
err = posix_lock_file(file, fl, NULL);
if (err) {
/* undo! This should only happen if
@@ -284,8 +306,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
* deadlock. */
ceph_lock_message(CEPH_LOCK_FCNTL, op, inode,
CEPH_LOCK_UNLOCK, 0, fl);
- dout("got %d on posix_lock_file, undid lock\n",
- err);
+ doutc(cl, "got %d on posix_lock_file, undid lock\n",
+ err);
}
}
}
@@ -296,14 +318,18 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int err = 0;
u8 wait = 0;
u8 lock_cmd;
- if (!(fl->fl_flags & FL_FLOCK))
+ if (!(fl->c.flc_flags & FL_FLOCK))
return -ENOLCK;
- dout("ceph_flock, fl_file: %p\n", fl->fl_file);
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
+ doutc(cl, "fl_file: %p\n", fl->c.flc_file);
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
@@ -311,7 +337,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
}
spin_unlock(&ci->i_ceph_lock);
if (err < 0) {
- if (F_UNLCK == fl->fl_type)
+ if (lock_is_unlock(fl))
locks_lock_file_wait(file, fl);
return err;
}
@@ -319,14 +345,14 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
if (IS_SETLKW(cmd))
wait = 1;
- if (F_RDLCK == fl->fl_type)
+ if (lock_is_read(fl))
lock_cmd = CEPH_LOCK_SHARED;
- else if (F_WRLCK == fl->fl_type)
+ else if (lock_is_write(fl))
lock_cmd = CEPH_LOCK_EXCL;
else
lock_cmd = CEPH_LOCK_UNLOCK;
- if (F_UNLCK == fl->fl_type) {
+ if (lock_is_unlock(fl)) {
err = try_unlock_file(file, fl);
if (err <= 0)
return err;
@@ -334,13 +360,14 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
inode, lock_cmd, wait, fl);
- if (!err && F_UNLCK != fl->fl_type) {
+ if (!err && F_UNLCK != fl->c.flc_type) {
err = locks_lock_file_wait(file, fl);
if (err) {
ceph_lock_message(CEPH_LOCK_FLOCK,
CEPH_MDS_OP_SETFILELOCK,
inode, CEPH_LOCK_UNLOCK, 0, fl);
- dout("got %d on locks_lock_file_wait, undid lock\n", err);
+ doutc(cl, "got %d on locks_lock_file_wait, undid lock\n",
+ err);
}
}
return err;
@@ -352,39 +379,43 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
*/
void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct file_lock *lock;
struct file_lock_context *ctx;
*fcntl_count = 0;
*flock_count = 0;
- ctx = inode->i_flctx;
+ ctx = locks_inode_context(inode);
if (ctx) {
spin_lock(&ctx->flc_lock);
- list_for_each_entry(lock, &ctx->flc_posix, fl_list)
+ for_each_file_lock(lock, &ctx->flc_posix)
++(*fcntl_count);
- list_for_each_entry(lock, &ctx->flc_flock, fl_list)
+ for_each_file_lock(lock, &ctx->flc_flock)
++(*flock_count);
spin_unlock(&ctx->flc_lock);
}
- dout("counted %d flock locks and %d fcntl locks\n",
- *flock_count, *fcntl_count);
+ doutc(cl, "counted %d flock locks and %d fcntl locks\n",
+ *flock_count, *fcntl_count);
}
/*
* Given a pointer to a lock, convert it to a ceph filelock
*/
-static int lock_to_ceph_filelock(struct file_lock *lock,
+static int lock_to_ceph_filelock(struct inode *inode,
+ struct file_lock *lock,
struct ceph_filelock *cephlock)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int err = 0;
+
cephlock->start = cpu_to_le64(lock->fl_start);
cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
cephlock->client = cpu_to_le64(0);
- cephlock->pid = cpu_to_le64((u64)lock->fl_pid);
- cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner));
+ cephlock->pid = cpu_to_le64((u64) lock->c.flc_pid);
+ cephlock->owner = cpu_to_le64(secure_addr(lock->c.flc_owner));
- switch (lock->fl_type) {
+ switch (lock->c.flc_type) {
case F_RDLCK:
cephlock->type = CEPH_LOCK_SHARED;
break;
@@ -395,7 +426,8 @@ static int lock_to_ceph_filelock(struct file_lock *lock,
cephlock->type = CEPH_LOCK_UNLOCK;
break;
default:
- dout("Have unknown lock type %d\n", lock->fl_type);
+ doutc(cl, "Have unknown lock type %d\n",
+ lock->c.flc_type);
err = -EINVAL;
}
@@ -412,37 +444,38 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
int num_fcntl_locks, int num_flock_locks)
{
struct file_lock *lock;
- struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock_context *ctx = locks_inode_context(inode);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
int err = 0;
int seen_fcntl = 0;
int seen_flock = 0;
int l = 0;
- dout("encoding %d flock and %d fcntl locks\n", num_flock_locks,
- num_fcntl_locks);
+ doutc(cl, "encoding %d flock and %d fcntl locks\n", num_flock_locks,
+ num_fcntl_locks);
if (!ctx)
return 0;
spin_lock(&ctx->flc_lock);
- list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
+ for_each_file_lock(lock, &ctx->flc_posix) {
++seen_fcntl;
if (seen_fcntl > num_fcntl_locks) {
err = -ENOSPC;
goto fail;
}
- err = lock_to_ceph_filelock(lock, &flocks[l]);
+ err = lock_to_ceph_filelock(inode, lock, &flocks[l]);
if (err)
goto fail;
++l;
}
- list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+ for_each_file_lock(lock, &ctx->flc_flock) {
++seen_flock;
if (seen_flock > num_flock_locks) {
err = -ENOSPC;
goto fail;
}
- err = lock_to_ceph_filelock(lock, &flocks[l]);
+ err = lock_to_ceph_filelock(inode, lock, &flocks[l]);
if (err)
goto fail;
++l;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index d64413adc0fd..1740047aef0f 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -12,9 +12,11 @@
#include <linux/bits.h>
#include <linux/ktime.h>
#include <linux/bitmap.h>
+#include <linux/mnt_idmapping.h>
#include "super.h"
#include "mds_client.h"
+#include "crypto.h"
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/messenger.h>
@@ -184,8 +186,54 @@ static int parse_reply_info_in(void **p, void *end,
info->rsnaps = 0;
}
+ if (struct_v >= 5) {
+ u32 alen;
+
+ ceph_decode_32_safe(p, end, alen, bad);
+
+ while (alen--) {
+ u32 len;
+
+ /* key */
+ ceph_decode_32_safe(p, end, len, bad);
+ ceph_decode_skip_n(p, end, len, bad);
+ /* value */
+ ceph_decode_32_safe(p, end, len, bad);
+ ceph_decode_skip_n(p, end, len, bad);
+ }
+ }
+
+ /* fscrypt flag -- ignore */
+ if (struct_v >= 6)
+ ceph_decode_skip_8(p, end, bad);
+
+ info->fscrypt_auth = NULL;
+ info->fscrypt_auth_len = 0;
+ info->fscrypt_file = NULL;
+ info->fscrypt_file_len = 0;
+ if (struct_v >= 7) {
+ ceph_decode_32_safe(p, end, info->fscrypt_auth_len, bad);
+ if (info->fscrypt_auth_len) {
+ info->fscrypt_auth = kmalloc(info->fscrypt_auth_len,
+ GFP_KERNEL);
+ if (!info->fscrypt_auth)
+ return -ENOMEM;
+ ceph_decode_copy_safe(p, end, info->fscrypt_auth,
+ info->fscrypt_auth_len, bad);
+ }
+ ceph_decode_32_safe(p, end, info->fscrypt_file_len, bad);
+ if (info->fscrypt_file_len) {
+ info->fscrypt_file = kmalloc(info->fscrypt_file_len,
+ GFP_KERNEL);
+ if (!info->fscrypt_file)
+ return -ENOMEM;
+ ceph_decode_copy_safe(p, end, info->fscrypt_file,
+ info->fscrypt_file_len, bad);
+ }
+ }
*p = end;
} else {
+ /* legacy (unversioned) struct */
if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
ceph_decode_64_safe(p, end, info->inline_version, bad);
ceph_decode_32_safe(p, end, info->inline_len, bad);
@@ -263,27 +311,47 @@ bad:
static int parse_reply_info_lease(void **p, void *end,
struct ceph_mds_reply_lease **lease,
- u64 features)
+ u64 features, u32 *altname_len, u8 **altname)
{
+ u8 struct_v;
+ u32 struct_len;
+ void *lend;
+
if (features == (u64)-1) {
- u8 struct_v, struct_compat;
- u32 struct_len;
+ u8 struct_compat;
+
ceph_decode_8_safe(p, end, struct_v, bad);
ceph_decode_8_safe(p, end, struct_compat, bad);
+
/* struct_v is expected to be >= 1. we only understand
* encoding whose struct_compat == 1. */
if (!struct_v || struct_compat != 1)
goto bad;
+
ceph_decode_32_safe(p, end, struct_len, bad);
- ceph_decode_need(p, end, struct_len, bad);
- end = *p + struct_len;
+ } else {
+ struct_len = sizeof(**lease);
+ *altname_len = 0;
+ *altname = NULL;
}
- ceph_decode_need(p, end, sizeof(**lease), bad);
+ lend = *p + struct_len;
+ ceph_decode_need(p, end, struct_len, bad);
*lease = *p;
*p += sizeof(**lease);
- if (features == (u64)-1)
- *p = end;
+
+ if (features == (u64)-1) {
+ if (struct_v >= 2) {
+ ceph_decode_32_safe(p, end, *altname_len, bad);
+ ceph_decode_need(p, end, *altname_len, bad);
+ *altname = *p;
+ *p += *altname_len;
+ } else {
+ *altname = NULL;
+ *altname_len = 0;
+ }
+ }
+ *p = lend;
return 0;
bad:
return -EIO;
@@ -313,7 +381,8 @@ static int parse_reply_info_trace(void **p, void *end,
info->dname = *p;
*p += info->dname_len;
- err = parse_reply_info_lease(p, end, &info->dlease, features);
+ err = parse_reply_info_lease(p, end, &info->dlease, features,
+ &info->altname_len, &info->altname);
if (err < 0)
goto out_bad;
}
@@ -339,9 +408,11 @@ out_bad:
* parse readdir results
*/
static int parse_reply_info_readdir(void **p, void *end,
- struct ceph_mds_reply_info_parsed *info,
- u64 features)
+ struct ceph_mds_request *req,
+ u64 features)
{
+ struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
+ struct ceph_client *cl = req->r_mdsc->fsc->client;
u32 num, i = 0;
int err;
@@ -364,25 +435,94 @@ static int parse_reply_info_readdir(void **p, void *end,
BUG_ON(!info->dir_entries);
if ((unsigned long)(info->dir_entries + num) >
(unsigned long)info->dir_entries + info->dir_buf_size) {
- pr_err("dir contents are larger than expected\n");
+ pr_err_client(cl, "dir contents are larger than expected\n");
WARN_ON(1);
goto bad;
}
info->dir_nr = num;
while (num) {
+ struct inode *inode = d_inode(req->r_dentry);
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
+ struct fscrypt_str tname = FSTR_INIT(NULL, 0);
+ struct fscrypt_str oname = FSTR_INIT(NULL, 0);
+ struct ceph_fname fname;
+ u32 altname_len, _name_len;
+ u8 *altname, *_name;
+
/* dentry */
- ceph_decode_32_safe(p, end, rde->name_len, bad);
- ceph_decode_need(p, end, rde->name_len, bad);
- rde->name = *p;
- *p += rde->name_len;
- dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
+ ceph_decode_32_safe(p, end, _name_len, bad);
+ ceph_decode_need(p, end, _name_len, bad);
+ _name = *p;
+ *p += _name_len;
+ doutc(cl, "parsed dir dname '%.*s'\n", _name_len, _name);
+
+ if (info->hash_order)
+ rde->raw_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
+ _name, _name_len);
/* dentry lease */
- err = parse_reply_info_lease(p, end, &rde->lease, features);
+ err = parse_reply_info_lease(p, end, &rde->lease, features,
+ &altname_len, &altname);
if (err)
goto out_bad;
+
+ /*
+ * Try to dencrypt the dentry names and update them
+ * in the ceph_mds_reply_dir_entry struct.
+ */
+ fname.dir = inode;
+ fname.name = _name;
+ fname.name_len = _name_len;
+ fname.ctext = altname;
+ fname.ctext_len = altname_len;
+ /*
+ * The _name_len maybe larger than altname_len, such as
+ * when the human readable name length is in range of
+ * (CEPH_NOHASH_NAME_MAX, CEPH_NOHASH_NAME_MAX + SHA256_DIGEST_SIZE),
+ * then the copy in ceph_fname_to_usr will corrupt the
+ * data if there has no encryption key.
+ *
+ * Just set the no_copy flag and then if there has no
+ * encryption key the oname.name will be assigned to
+ * _name always.
+ */
+ fname.no_copy = true;
+ if (altname_len == 0) {
+ /*
+ * Set tname to _name, and this will be used
+ * to do the base64_decode in-place. It's
+ * safe because the decoded string should
+ * always be shorter, which is 3/4 of origin
+ * string.
+ */
+ tname.name = _name;
+
+ /*
+ * Set oname to _name too, and this will be
+ * used to do the dencryption in-place.
+ */
+ oname.name = _name;
+ oname.len = _name_len;
+ } else {
+ /*
+ * This will do the decryption only in-place
+ * from altname cryptext directly.
+ */
+ oname.name = altname;
+ oname.len = altname_len;
+ }
+ rde->is_nokey = false;
+ err = ceph_fname_to_usr(&fname, &tname, &oname, &rde->is_nokey);
+ if (err) {
+ pr_err_client(cl, "unable to decode %.*s, got %d\n",
+ _name_len, _name, err);
+ goto out_bad;
+ }
+ rde->name = oname.name;
+ rde->name_len = oname.len;
+
/* inode */
err = parse_reply_info_in(p, end, &rde->inode, features);
if (err < 0)
@@ -401,7 +541,7 @@ done:
bad:
err = -EIO;
out_bad:
- pr_err("problem parsing dir contents %d\n", err);
+ pr_err_client(cl, "problem parsing dir contents %d\n", err);
return err;
}
@@ -432,31 +572,33 @@ bad:
static int ceph_parse_deleg_inos(void **p, void *end,
struct ceph_mds_session *s)
{
+ struct ceph_client *cl = s->s_mdsc->fsc->client;
u32 sets;
ceph_decode_32_safe(p, end, sets, bad);
- dout("got %u sets of delegated inodes\n", sets);
+ doutc(cl, "got %u sets of delegated inodes\n", sets);
while (sets--) {
- u64 start, len, ino;
+ u64 start, len;
ceph_decode_64_safe(p, end, start, bad);
ceph_decode_64_safe(p, end, len, bad);
/* Don't accept a delegation of system inodes */
if (start < CEPH_INO_SYSTEM_BASE) {
- pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
- start, len);
+ pr_warn_ratelimited_client(cl,
+ "ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
+ start, len);
continue;
}
while (len--) {
- int err = xa_insert(&s->s_delegated_inos, ino = start++,
+ int err = xa_insert(&s->s_delegated_inos, start++,
DELEGATED_INO_AVAILABLE,
GFP_KERNEL);
if (!err) {
- dout("added delegated inode 0x%llx\n",
- start - 1);
+ doutc(cl, "added delegated inode 0x%llx\n", start - 1);
} else if (err == -EBUSY) {
- pr_warn("ceph: MDS delegated inode 0x%llx more than once.\n",
+ pr_warn_client(cl,
+ "MDS delegated inode 0x%llx more than once.\n",
start - 1);
} else {
return err;
@@ -555,21 +697,46 @@ bad:
return -EIO;
}
+static int parse_reply_info_getvxattr(void **p, void *end,
+ struct ceph_mds_reply_info_parsed *info,
+ u64 features)
+{
+ u32 value_len;
+
+ ceph_decode_skip_8(p, end, bad); /* skip current version: 1 */
+ ceph_decode_skip_8(p, end, bad); /* skip first version: 1 */
+ ceph_decode_skip_32(p, end, bad); /* skip payload length */
+
+ ceph_decode_32_safe(p, end, value_len, bad);
+
+ if (value_len == end - *p) {
+ info->xattr_info.xattr_value = *p;
+ info->xattr_info.xattr_value_len = value_len;
+ *p = end;
+ return value_len;
+ }
+bad:
+ return -EIO;
+}
+
/*
* parse extra results
*/
static int parse_reply_info_extra(void **p, void *end,
- struct ceph_mds_reply_info_parsed *info,
+ struct ceph_mds_request *req,
u64 features, struct ceph_mds_session *s)
{
+ struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
u32 op = le32_to_cpu(info->head->op);
if (op == CEPH_MDS_OP_GETFILELOCK)
return parse_reply_info_filelock(p, end, info, features);
else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
- return parse_reply_info_readdir(p, end, info, features);
+ return parse_reply_info_readdir(p, end, req, features);
else if (op == CEPH_MDS_OP_CREATE)
return parse_reply_info_create(p, end, info, features, s);
+ else if (op == CEPH_MDS_OP_GETVXATTR)
+ return parse_reply_info_getvxattr(p, end, info, features);
else
return -EIO;
}
@@ -578,9 +745,10 @@ static int parse_reply_info_extra(void **p, void *end,
* parse entire mds reply
*/
static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
- struct ceph_mds_reply_info_parsed *info,
- u64 features)
+ struct ceph_mds_request *req, u64 features)
{
+ struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
+ struct ceph_client *cl = s->s_mdsc->fsc->client;
void *p, *end;
u32 len;
int err;
@@ -602,7 +770,7 @@ static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
ceph_decode_32_safe(&p, end, len, bad);
if (len > 0) {
ceph_decode_need(&p, end, len, bad);
- err = parse_reply_info_extra(&p, p+len, info, features, s);
+ err = parse_reply_info_extra(&p, p+len, req, features, s);
if (err < 0)
goto out_bad;
}
@@ -620,17 +788,105 @@ static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
bad:
err = -EIO;
out_bad:
- pr_err("mds parse_reply err %d\n", err);
+ pr_err_client(cl, "mds parse_reply err %d\n", err);
+ ceph_msg_dump(msg);
return err;
}
static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
{
+ int i;
+
+ kfree(info->diri.fscrypt_auth);
+ kfree(info->diri.fscrypt_file);
+ kfree(info->targeti.fscrypt_auth);
+ kfree(info->targeti.fscrypt_file);
if (!info->dir_entries)
return;
+
+ for (i = 0; i < info->dir_nr; i++) {
+ struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
+
+ kfree(rde->inode.fscrypt_auth);
+ kfree(rde->inode.fscrypt_file);
+ }
free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
}
+/*
+ * In async unlink case the kclient won't wait for the first reply
+ * from MDS and just drop all the links and unhash the dentry and then
+ * succeeds immediately.
+ *
+ * For any new create/link/rename,etc requests followed by using the
+ * same file names we must wait for the first reply of the inflight
+ * unlink request, or the MDS possibly will fail these following
+ * requests with -EEXIST if the inflight async unlink request was
+ * delayed for some reasons.
+ *
+ * And the worst case is that for the none async openc request it will
+ * successfully open the file if the CDentry hasn't been unlinked yet,
+ * but later the previous delayed async unlink request will remove the
+ * CDentry. That means the just created file is possibly deleted later
+ * by accident.
+ *
+ * We need to wait for the inflight async unlink requests to finish
+ * when creating new files/directories by using the same file names.
+ */
+int ceph_wait_on_conflict_unlink(struct dentry *dentry)
+{
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+ struct ceph_client *cl = fsc->client;
+ struct dentry *pdentry = dentry->d_parent;
+ struct dentry *udentry, *found = NULL;
+ struct ceph_dentry_info *di;
+ struct qstr dname;
+ u32 hash = dentry->d_name.hash;
+ int err;
+
+ dname.name = dentry->d_name.name;
+ dname.len = dentry->d_name.len;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(fsc->async_unlink_conflict, di,
+ hnode, hash) {
+ udentry = di->dentry;
+
+ spin_lock(&udentry->d_lock);
+ if (udentry->d_name.hash != hash)
+ goto next;
+ if (unlikely(udentry->d_parent != pdentry))
+ goto next;
+ if (!hash_hashed(&di->hnode))
+ goto next;
+
+ if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
+ pr_warn_client(cl, "dentry %p:%pd async unlink bit is not set\n",
+ dentry, dentry);
+
+ if (!d_same_name(udentry, pdentry, &dname))
+ goto next;
+
+ found = dget_dlock(udentry);
+ spin_unlock(&udentry->d_lock);
+ break;
+next:
+ spin_unlock(&udentry->d_lock);
+ }
+ rcu_read_unlock();
+
+ if (likely(!found))
+ return 0;
+
+ doutc(cl, "dentry %p:%pd conflict with old %p:%pd\n", dentry, dentry,
+ found, found);
+
+ err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT,
+ TASK_KILLABLE);
+ dput(found);
+ return err;
+}
+
/*
* sessions
@@ -707,8 +963,12 @@ static int __verify_registered_session(struct ceph_mds_client *mdsc,
static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
int mds)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_session *s;
+ if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
+ return ERR_PTR(-EIO);
+
if (mds >= mdsc->mdsmap->possible_max_rank)
return ERR_PTR(-EINVAL);
@@ -719,21 +979,22 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
if (mds >= mdsc->max_sessions) {
int newmax = 1 << get_count_order(mds + 1);
struct ceph_mds_session **sa;
+ size_t ptr_size = sizeof(struct ceph_mds_session *);
- dout("%s: realloc to %d\n", __func__, newmax);
- sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
+ doutc(cl, "realloc to %d\n", newmax);
+ sa = kcalloc(newmax, ptr_size, GFP_NOFS);
if (!sa)
goto fail_realloc;
if (mdsc->sessions) {
memcpy(sa, mdsc->sessions,
- mdsc->max_sessions * sizeof(void *));
+ mdsc->max_sessions * ptr_size);
kfree(mdsc->sessions);
}
mdsc->sessions = sa;
mdsc->max_sessions = newmax;
}
- dout("%s: mds%d\n", __func__, mds);
+ doutc(cl, "mds%d\n", mds);
s->s_mdsc = mdsc;
s->s_mds = mds;
s->s_state = CEPH_MDS_SESSION_NEW;
@@ -776,7 +1037,7 @@ fail_realloc:
static void __unregister_session(struct ceph_mds_client *mdsc,
struct ceph_mds_session *s)
{
- dout("__unregister_session mds%d %p\n", s->s_mds, s);
+ doutc(mdsc->fsc->client, "mds%d %p\n", s->s_mds, s);
BUG_ON(mdsc->sessions[s->s_mds] != s);
mdsc->sessions[s->s_mds] = NULL;
ceph_con_close(&s->s_con);
@@ -829,7 +1090,7 @@ void ceph_mdsc_release_request(struct kref *kref)
struct ceph_mds_request *req = container_of(kref,
struct ceph_mds_request,
r_kref);
- ceph_mdsc_release_dir_caps_no_check(req);
+ ceph_mdsc_release_dir_caps_async(req);
destroy_reply_info(&req->r_reply_info);
if (req->r_request)
ceph_msg_put(req->r_request);
@@ -844,6 +1105,7 @@ void ceph_mdsc_release_request(struct kref *kref)
iput(req->r_parent);
}
iput(req->r_target_inode);
+ iput(req->r_new_inode);
if (req->r_dentry)
dput(req->r_dentry);
if (req->r_old_dentry)
@@ -862,8 +1124,12 @@ void ceph_mdsc_release_request(struct kref *kref)
kfree(req->r_path1);
kfree(req->r_path2);
put_cred(req->r_cred);
+ if (req->r_mnt_idmap)
+ mnt_idmap_put(req->r_mnt_idmap);
if (req->r_pagelist)
ceph_pagelist_release(req->r_pagelist);
+ kfree(req->r_fscrypt_auth);
+ kfree(req->r_altname);
put_request_session(req);
ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
WARN_ON_ONCE(!list_empty(&req->r_wait));
@@ -899,6 +1165,7 @@ static void __register_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req,
struct inode *dir)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int ret = 0;
req->r_tid = ++mdsc->last_tid;
@@ -906,18 +1173,20 @@ static void __register_request(struct ceph_mds_client *mdsc,
ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
req->r_num_caps);
if (ret < 0) {
- pr_err("__register_request %p "
- "failed to reserve caps: %d\n", req, ret);
+ pr_err_client(cl, "%p failed to reserve caps: %d\n",
+ req, ret);
/* set req->r_err to fail early from __do_request */
req->r_err = ret;
return;
}
}
- dout("__register_request %p tid %lld\n", req, req->r_tid);
+ doutc(cl, "%p tid %lld\n", req, req->r_tid);
ceph_mdsc_get_request(req);
insert_request(&mdsc->request_tree, req);
req->r_cred = get_current_cred();
+ if (!req->r_mnt_idmap)
+ req->r_mnt_idmap = &nop_mnt_idmap;
if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
mdsc->oldest_tid = req->r_tid;
@@ -936,7 +1205,7 @@ static void __register_request(struct ceph_mds_client *mdsc,
static void __unregister_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
- dout("__unregister_request %p tid %lld\n", req, req->r_tid);
+ doutc(mdsc->fsc->client, "%p tid %lld\n", req, req->r_tid);
/* Never leave an unregistered request on an unsafe list! */
list_del_init(&req->r_unsafe_item);
@@ -1022,6 +1291,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
int mds = -1;
u32 hash = req->r_direct_hash;
bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
+ struct ceph_client *cl = mdsc->fsc->client;
if (random)
*random = false;
@@ -1033,8 +1303,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
if (req->r_resend_mds >= 0 &&
(__have_session(mdsc, req->r_resend_mds) ||
ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
- dout("%s using resend_mds mds%d\n", __func__,
- req->r_resend_mds);
+ doutc(cl, "using resend_mds mds%d\n", req->r_resend_mds);
return req->r_resend_mds;
}
@@ -1051,7 +1320,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
rcu_read_lock();
inode = get_nonsnap_parent(req->r_dentry);
rcu_read_unlock();
- dout("%s using snapdir's parent %p\n", __func__, inode);
+ doutc(cl, "using snapdir's parent %p %llx.%llx\n",
+ inode, ceph_vinop(inode));
}
} else if (req->r_dentry) {
/* ignore race with rename; old or new d_parent is okay */
@@ -1071,7 +1341,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
/* direct snapped/virtual snapdir requests
* based on parent dir inode */
inode = get_nonsnap_parent(parent);
- dout("%s using nonsnap parent %p\n", __func__, inode);
+ doutc(cl, "using nonsnap parent %p %llx.%llx\n",
+ inode, ceph_vinop(inode));
} else {
/* dentry target */
inode = d_inode(req->r_dentry);
@@ -1087,10 +1358,11 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
rcu_read_unlock();
}
- dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
- hash, mode);
if (!inode)
goto random;
+
+ doutc(cl, "%p %llx.%llx is_hash=%d (0x%x) mode %d\n", inode,
+ ceph_vinop(inode), (int)is_hash, hash, mode);
ci = ceph_inode(inode);
if (is_hash && S_ISDIR(inode->i_mode)) {
@@ -1106,9 +1378,9 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
get_random_bytes(&r, 1);
r %= frag.ndist;
mds = frag.dist[r];
- dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
- __func__, inode, ceph_vinop(inode),
- frag.frag, mds, (int)r, frag.ndist);
+ doutc(cl, "%p %llx.%llx frag %u mds%d (%d/%d)\n",
+ inode, ceph_vinop(inode), frag.frag,
+ mds, (int)r, frag.ndist);
if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
CEPH_MDS_STATE_ACTIVE &&
!ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
@@ -1121,9 +1393,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
if (frag.mds >= 0) {
/* choose auth mds */
mds = frag.mds;
- dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
- __func__, inode, ceph_vinop(inode),
- frag.frag, mds);
+ doutc(cl, "%p %llx.%llx frag %u mds%d (auth)\n",
+ inode, ceph_vinop(inode), frag.frag, mds);
if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
CEPH_MDS_STATE_ACTIVE) {
if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
@@ -1147,9 +1418,9 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
goto random;
}
mds = cap->session->s_mds;
- dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
- inode, ceph_vinop(inode), mds,
- cap == ci->i_auth_cap ? "auth " : "", cap);
+ doutc(cl, "%p %llx.%llx mds%d (%scap %p)\n", inode,
+ ceph_vinop(inode), mds,
+ cap == ci->i_auth_cap ? "auth " : "", cap);
spin_unlock(&ci->i_ceph_lock);
out:
iput(inode);
@@ -1160,7 +1431,7 @@ random:
*random = true;
mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
- dout("%s chose random mds%d\n", __func__, mds);
+ doutc(cl, "chose random mds%d\n", mds);
return mds;
}
@@ -1196,14 +1467,17 @@ static int encode_supported_features(void **p, void *end)
if (count > 0) {
size_t i;
size_t size = FEATURE_BYTES(count);
+ unsigned long bit;
if (WARN_ON_ONCE(*p + 4 + size > end))
return -ERANGE;
ceph_encode_32(p, size);
memset(*p, 0, size);
- for (i = 0; i < count; i++)
- ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8);
+ for (i = 0; i < count; i++) {
+ bit = feature_bits[i];
+ ((unsigned char *)(*p))[bit / 8] |= BIT(bit % 8);
+ }
*p += size;
} else {
if (WARN_ON_ONCE(*p + 4 > end))
@@ -1261,7 +1535,8 @@ static int encode_metric_spec(void **p, void *end)
* session message, specialization for CEPH_SESSION_REQUEST_OPEN
* to include additional client metadata fields.
*/
-static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
+static struct ceph_msg *
+create_session_full_msg(struct ceph_mds_client *mdsc, int op, u64 seq)
{
struct ceph_msg *msg;
struct ceph_mds_session_head *h;
@@ -1270,6 +1545,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
int metadata_key_count = 0;
struct ceph_options *opt = mdsc->fsc->client->options;
struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
+ struct ceph_client *cl = mdsc->fsc->client;
size_t size, count;
void *p, *end;
int ret;
@@ -1304,27 +1580,30 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
size = METRIC_BYTES(count);
extra_bytes += 2 + 4 + 4 + size;
+ /* flags, mds auth caps and oldest_client_tid */
+ extra_bytes += 4 + 4 + 8;
+
/* Allocate the message */
msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
GFP_NOFS, false);
if (!msg) {
- pr_err("ENOMEM creating session open msg\n");
+ pr_err_client(cl, "ENOMEM creating session open msg\n");
return ERR_PTR(-ENOMEM);
}
p = msg->front.iov_base;
end = p + msg->front.iov_len;
h = p;
- h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
+ h->op = cpu_to_le32(op);
h->seq = cpu_to_le64(seq);
/*
* Serialize client metadata into waiting buffer space, using
* the format that userspace expects for map<string, string>
*
- * ClientSession messages with metadata are v4
+ * ClientSession messages with metadata are v7
*/
- msg->hdr.version = cpu_to_le16(4);
+ msg->hdr.version = cpu_to_le16(7);
msg->hdr.compat_version = cpu_to_le16(1);
/* The write pointer, following the session_head structure */
@@ -1348,18 +1627,27 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
ret = encode_supported_features(&p, end);
if (ret) {
- pr_err("encode_supported_features failed!\n");
+ pr_err_client(cl, "encode_supported_features failed!\n");
ceph_msg_put(msg);
return ERR_PTR(ret);
}
ret = encode_metric_spec(&p, end);
if (ret) {
- pr_err("encode_metric_spec failed!\n");
+ pr_err_client(cl, "encode_metric_spec failed!\n");
ceph_msg_put(msg);
return ERR_PTR(ret);
}
+ /* version == 5, flags */
+ ceph_encode_32(&p, 0);
+
+ /* version == 6, mds auth caps */
+ ceph_encode_32(&p, 0);
+
+ /* version == 7, oldest_client_tid */
+ ceph_encode_64(&p, mdsc->oldest_tid);
+
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
@@ -1378,15 +1666,19 @@ static int __open_session(struct ceph_mds_client *mdsc,
int mstate;
int mds = session->s_mds;
+ if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
+ return -EIO;
+
/* wait for mds to go active? */
mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
- dout("open_session to mds%d (%s)\n", mds,
- ceph_mds_state_name(mstate));
+ doutc(mdsc->fsc->client, "open_session to mds%d (%s)\n", mds,
+ ceph_mds_state_name(mstate));
session->s_state = CEPH_MDS_SESSION_OPENING;
session->s_renew_requested = jiffies;
/* send connect message */
- msg = create_session_open_msg(mdsc, session->s_seq);
+ msg = create_session_full_msg(mdsc, CEPH_SESSION_REQUEST_OPEN,
+ session->s_seq);
if (IS_ERR(msg))
return PTR_ERR(msg);
ceph_con_send(&session->s_con, msg);
@@ -1424,8 +1716,9 @@ struct ceph_mds_session *
ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
{
struct ceph_mds_session *session;
+ struct ceph_client *cl = mdsc->fsc->client;
- dout("open_export_target_session to mds%d\n", target);
+ doutc(cl, "to mds%d\n", target);
mutex_lock(&mdsc->mutex);
session = __open_export_target_session(mdsc, target);
@@ -1440,13 +1733,14 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
struct ceph_mds_info *mi;
struct ceph_mds_session *ts;
int i, mds = session->s_mds;
+ struct ceph_client *cl = mdsc->fsc->client;
if (mds >= mdsc->mdsmap->possible_max_rank)
return;
mi = &mdsc->mdsmap->m_info[mds];
- dout("open_export_target_sessions for mds%d (%d targets)\n",
- session->s_mds, mi->num_export_targets);
+ doutc(cl, "for mds%d (%d targets)\n", session->s_mds,
+ mi->num_export_targets);
for (i = 0; i < mi->num_export_targets; i++) {
ts = __open_export_target_session(mdsc, mi->export_targets[i]);
@@ -1454,14 +1748,6 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
}
}
-void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session)
-{
- mutex_lock(&mdsc->mutex);
- __open_export_target_sessions(mdsc, session);
- mutex_unlock(&mdsc->mutex);
-}
-
/*
* session caps
*/
@@ -1469,11 +1755,13 @@ void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
static void detach_cap_releases(struct ceph_mds_session *session,
struct list_head *target)
{
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
+
lockdep_assert_held(&session->s_cap_lock);
list_splice_init(&session->s_cap_releases, target);
session->s_num_cap_releases = 0;
- dout("dispose_cap_releases mds%d\n", session->s_mds);
+ doutc(cl, "mds%d\n", session->s_mds);
}
static void dispose_cap_releases(struct ceph_mds_client *mdsc,
@@ -1491,16 +1779,17 @@ static void dispose_cap_releases(struct ceph_mds_client *mdsc,
static void cleanup_session_requests(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct rb_node *p;
- dout("cleanup_session_requests mds%d\n", session->s_mds);
+ doutc(cl, "mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
while (!list_empty(&session->s_unsafe)) {
req = list_first_entry(&session->s_unsafe,
struct ceph_mds_request, r_unsafe_item);
- pr_warn_ratelimited(" dropping unsafe request %llu\n",
- req->r_tid);
+ pr_warn_ratelimited_client(cl, " dropping unsafe request %llu\n",
+ req->r_tid);
if (req->r_target_inode)
mapping_set_error(req->r_target_inode->i_mapping, -EIO);
if (req->r_unsafe_dir)
@@ -1526,26 +1815,30 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
* Caller must hold session s_mutex.
*/
int ceph_iterate_session_caps(struct ceph_mds_session *session,
- int (*cb)(struct inode *, struct ceph_cap *,
- void *), void *arg)
+ int (*cb)(struct inode *, int mds, void *),
+ void *arg)
{
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
struct list_head *p;
struct ceph_cap *cap;
struct inode *inode, *last_inode = NULL;
struct ceph_cap *old_cap = NULL;
int ret;
- dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
+ doutc(cl, "%p mds%d\n", session, session->s_mds);
spin_lock(&session->s_cap_lock);
p = session->s_caps.next;
while (p != &session->s_caps) {
+ int mds;
+
cap = list_entry(p, struct ceph_cap, session_caps);
- inode = igrab(&cap->ci->vfs_inode);
+ inode = igrab(&cap->ci->netfs.inode);
if (!inode) {
p = p->next;
continue;
}
session->s_cap_iterator = cap;
+ mds = cap->mds;
spin_unlock(&session->s_cap_lock);
if (last_inode) {
@@ -1557,14 +1850,13 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
old_cap = NULL;
}
- ret = cb(inode, cap, arg);
+ ret = cb(inode, mds, arg);
last_inode = inode;
spin_lock(&session->s_cap_lock);
p = p->next;
if (!cap->ci) {
- dout("iterate_session_caps finishing cap %p removal\n",
- cap);
+ doutc(cl, "finishing cap %p removal\n", cap);
BUG_ON(cap->session != session);
cap->session = NULL;
list_del_init(&cap->session_caps);
@@ -1590,129 +1882,29 @@ out:
return ret;
}
-static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_cap_snap *capsnap;
- int capsnap_release = 0;
-
- lockdep_assert_held(&ci->i_ceph_lock);
-
- dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
-
- while (!list_empty(&ci->i_cap_snaps)) {
- capsnap = list_first_entry(&ci->i_cap_snaps,
- struct ceph_cap_snap, ci_item);
- __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
- ceph_put_snap_context(capsnap->context);
- ceph_put_cap_snap(capsnap);
- capsnap_release++;
- }
- wake_up_all(&ci->i_cap_wq);
- wake_up_all(&mdsc->cap_flushing_wq);
- return capsnap_release;
-}
-
-static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
- void *arg)
+static int remove_session_caps_cb(struct inode *inode, int mds, void *arg)
{
- struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
- struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
- LIST_HEAD(to_remove);
- bool dirty_dropped = false;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
bool invalidate = false;
- int capsnap_release = 0;
+ struct ceph_cap *cap;
+ int iputs = 0;
- dout("removing cap %p, ci is %p, inode is %p\n",
- cap, ci, &ci->vfs_inode);
spin_lock(&ci->i_ceph_lock);
- __ceph_remove_cap(cap, false);
- if (!ci->i_auth_cap) {
- struct ceph_cap_flush *cf;
-
- if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
- if (inode->i_data.nrpages > 0)
- invalidate = true;
- if (ci->i_wrbuffer_ref > 0)
- mapping_set_error(&inode->i_data, -EIO);
- }
-
- while (!list_empty(&ci->i_cap_flush_list)) {
- cf = list_first_entry(&ci->i_cap_flush_list,
- struct ceph_cap_flush, i_list);
- list_move(&cf->i_list, &to_remove);
- }
-
- spin_lock(&mdsc->cap_dirty_lock);
+ cap = __get_cap_for_mds(ci, mds);
+ if (cap) {
+ doutc(cl, " removing cap %p, ci is %p, inode is %p\n",
+ cap, ci, &ci->netfs.inode);
- list_for_each_entry(cf, &to_remove, i_list)
- list_del_init(&cf->g_list);
-
- if (!list_empty(&ci->i_dirty_item)) {
- pr_warn_ratelimited(
- " dropping dirty %s state for %p %lld\n",
- ceph_cap_string(ci->i_dirty_caps),
- inode, ceph_ino(inode));
- ci->i_dirty_caps = 0;
- list_del_init(&ci->i_dirty_item);
- dirty_dropped = true;
- }
- if (!list_empty(&ci->i_flushing_item)) {
- pr_warn_ratelimited(
- " dropping dirty+flushing %s state for %p %lld\n",
- ceph_cap_string(ci->i_flushing_caps),
- inode, ceph_ino(inode));
- ci->i_flushing_caps = 0;
- list_del_init(&ci->i_flushing_item);
- mdsc->num_cap_flushing--;
- dirty_dropped = true;
- }
- spin_unlock(&mdsc->cap_dirty_lock);
-
- if (dirty_dropped) {
- mapping_set_error(inode->i_mapping, -EIO);
-
- if (ci->i_wrbuffer_ref_head == 0 &&
- ci->i_wr_ref == 0 &&
- ci->i_dirty_caps == 0 &&
- ci->i_flushing_caps == 0) {
- ceph_put_snap_context(ci->i_head_snapc);
- ci->i_head_snapc = NULL;
- }
- }
-
- if (atomic_read(&ci->i_filelock_ref) > 0) {
- /* make further file lock syscall return -EIO */
- ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
- pr_warn_ratelimited(" dropping file locks for %p %lld\n",
- inode, ceph_ino(inode));
- }
-
- if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
- list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
- ci->i_prealloc_cap_flush = NULL;
- }
-
- if (!list_empty(&ci->i_cap_snaps))
- capsnap_release = remove_capsnaps(mdsc, inode);
+ iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
}
spin_unlock(&ci->i_ceph_lock);
- while (!list_empty(&to_remove)) {
- struct ceph_cap_flush *cf;
- cf = list_first_entry(&to_remove,
- struct ceph_cap_flush, i_list);
- list_del_init(&cf->i_list);
- if (!cf->is_capsnap)
- ceph_free_cap_flush(cf);
- }
- wake_up_all(&ci->i_cap_wq);
+ if (cap)
+ wake_up_all(&ci->i_cap_wq);
if (invalidate)
ceph_queue_invalidate(inode);
- if (dirty_dropped)
- iput(inode);
- while (capsnap_release--)
+ while (iputs--)
iput(inode);
return 0;
}
@@ -1726,7 +1918,7 @@ static void remove_session_caps(struct ceph_mds_session *session)
struct super_block *sb = fsc->sb;
LIST_HEAD(dispose);
- dout("remove_session_caps on %p\n", session);
+ doutc(fsc->client, "on %p\n", session);
ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
wake_up_all(&fsc->mdsc->cap_flushing_wq);
@@ -1780,8 +1972,7 @@ enum {
*
* caller must hold s_mutex.
*/
-static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
- void *arg)
+static int wake_up_session_cb(struct inode *inode, int mds, void *arg)
{
struct ceph_inode_info *ci = ceph_inode(inode);
unsigned long ev = (unsigned long)arg;
@@ -1792,12 +1983,14 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
ci->i_requested_max_size = 0;
spin_unlock(&ci->i_ceph_lock);
} else if (ev == RENEWCAPS) {
- if (cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) {
- /* mds did not re-issue stale cap */
- spin_lock(&ci->i_ceph_lock);
+ struct ceph_cap *cap;
+
+ spin_lock(&ci->i_ceph_lock);
+ cap = __get_cap_for_mds(ci, mds);
+ /* mds did not re-issue stale cap */
+ if (cap && cap->cap_gen < atomic_read(&cap->session->s_cap_gen))
cap->issued = cap->implemented = CEPH_CAP_PIN;
- spin_unlock(&ci->i_ceph_lock);
- }
+ spin_unlock(&ci->i_ceph_lock);
} else if (ev == FORCE_RO) {
}
wake_up_all(&ci->i_cap_wq);
@@ -1806,7 +1999,9 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
{
- dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
+
+ doutc(cl, "session %p mds%d\n", session, session->s_mds);
ceph_iterate_session_caps(session, wake_up_session_cb,
(void *)(unsigned long)ev);
}
@@ -1820,29 +2015,30 @@ static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
static int send_renew_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
int state;
if (time_after_eq(jiffies, session->s_cap_ttl) &&
time_after_eq(session->s_cap_ttl, session->s_renew_requested))
- pr_info("mds%d caps stale\n", session->s_mds);
+ pr_info_client(cl, "mds%d caps stale\n", session->s_mds);
session->s_renew_requested = jiffies;
/* do not try to renew caps until a recovering mds has reconnected
* with its clients. */
state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
if (state < CEPH_MDS_STATE_RECONNECT) {
- dout("send_renew_caps ignoring mds%d (%s)\n",
- session->s_mds, ceph_mds_state_name(state));
+ doutc(cl, "ignoring mds%d (%s)\n", session->s_mds,
+ ceph_mds_state_name(state));
return 0;
}
- dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
- ceph_mds_state_name(state));
- msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
+ doutc(cl, "to mds%d (%s)\n", session->s_mds,
+ ceph_mds_state_name(state));
+ msg = create_session_full_msg(mdsc, CEPH_SESSION_REQUEST_RENEWCAPS,
++session->s_renew_seq);
- if (!msg)
- return -ENOMEM;
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
ceph_con_send(&session->s_con, msg);
return 0;
}
@@ -1850,10 +2046,11 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session, u64 seq)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
- dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
- session->s_mds, ceph_session_state_name(session->s_state), seq);
+ doutc(cl, "to mds%d (%s)s seq %lld\n", session->s_mds,
+ ceph_session_state_name(session->s_state), seq);
msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
if (!msg)
return -ENOMEM;
@@ -1870,6 +2067,7 @@ static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
static void renewed_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session, int is_renew)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int was_stale;
int wake = 0;
@@ -1881,15 +2079,17 @@ static void renewed_caps(struct ceph_mds_client *mdsc,
if (was_stale) {
if (time_before(jiffies, session->s_cap_ttl)) {
- pr_info("mds%d caps renewed\n", session->s_mds);
+ pr_info_client(cl, "mds%d caps renewed\n",
+ session->s_mds);
wake = 1;
} else {
- pr_info("mds%d caps still stale\n", session->s_mds);
+ pr_info_client(cl, "mds%d caps still stale\n",
+ session->s_mds);
}
}
- dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
- session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
- time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
+ doutc(cl, "mds%d ttl now %lu, was %s, now %s\n", session->s_mds,
+ session->s_cap_ttl, was_stale ? "stale" : "fresh",
+ time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
spin_unlock(&session->s_cap_lock);
if (wake)
@@ -1901,11 +2101,11 @@ static void renewed_caps(struct ceph_mds_client *mdsc,
*/
static int request_close_session(struct ceph_mds_session *session)
{
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
struct ceph_msg *msg;
- dout("request_close_session mds%d state %s seq %lld\n",
- session->s_mds, ceph_session_state_name(session->s_state),
- session->s_seq);
+ doutc(cl, "mds%d state %s seq %lld\n", session->s_mds,
+ ceph_session_state_name(session->s_state), session->s_seq);
msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
session->s_seq);
if (!msg)
@@ -1935,7 +2135,7 @@ static bool drop_negative_children(struct dentry *dentry)
goto out;
spin_lock(&dentry->d_lock);
- list_for_each_entry(child, &dentry->d_subdirs, d_child) {
+ hlist_for_each_entry(child, &dentry->d_children, d_sib) {
if (d_really_is_positive(child)) {
all_negative = false;
break;
@@ -1959,24 +2159,33 @@ out:
* Yes, this is a bit sloppy. Our only real goal here is to respond to
* memory pressure from the MDS, though, so it needn't be perfect.
*/
-static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
+static int trim_caps_cb(struct inode *inode, int mds, void *arg)
{
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
int *remaining = arg;
struct ceph_inode_info *ci = ceph_inode(inode);
int used, wanted, oissued, mine;
+ struct ceph_cap *cap;
if (*remaining <= 0)
return -1;
spin_lock(&ci->i_ceph_lock);
+ cap = __get_cap_for_mds(ci, mds);
+ if (!cap) {
+ spin_unlock(&ci->i_ceph_lock);
+ return 0;
+ }
mine = cap->issued | cap->implemented;
used = __ceph_caps_used(ci);
wanted = __ceph_caps_file_wanted(ci);
oissued = __ceph_caps_issued_other(ci, cap);
- dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
- inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
- ceph_cap_string(used), ceph_cap_string(wanted));
+ doutc(cl, "%p %llx.%llx cap %p mine %s oissued %s used %s wanted %s\n",
+ inode, ceph_vinop(inode), cap, ceph_cap_string(mine),
+ ceph_cap_string(oissued), ceph_cap_string(used),
+ ceph_cap_string(wanted));
if (cap == ci->i_auth_cap) {
if (ci->i_dirty_caps || ci->i_flushing_caps ||
!list_empty(&ci->i_cap_snaps))
@@ -2002,7 +2211,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
if (oissued) {
/* we aren't the only cap.. just remove us */
- ceph_remove_cap(cap, true);
+ ceph_remove_cap(mdsc, cap, true);
(*remaining)--;
} else {
struct dentry *dentry;
@@ -2013,11 +2222,11 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
int count;
dput(dentry);
d_prune_aliases(inode);
- count = atomic_read(&inode->i_count);
+ count = icount_read(inode);
if (count == 1)
(*remaining)--;
- dout("trim_caps_cb %p cap %p pruned, count now %d\n",
- inode, cap, count);
+ doutc(cl, "%p %llx.%llx cap %p pruned, count now %d\n",
+ inode, ceph_vinop(inode), cap, count);
} else {
dput(dentry);
}
@@ -2036,26 +2245,28 @@ int ceph_trim_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
int max_caps)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int trim_caps = session->s_nr_caps - max_caps;
- dout("trim_caps mds%d start: %d / %d, trim %d\n",
- session->s_mds, session->s_nr_caps, max_caps, trim_caps);
+ doutc(cl, "mds%d start: %d / %d, trim %d\n", session->s_mds,
+ session->s_nr_caps, max_caps, trim_caps);
if (trim_caps > 0) {
int remaining = trim_caps;
ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
- dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
- session->s_mds, session->s_nr_caps, max_caps,
- trim_caps - remaining);
+ doutc(cl, "mds%d done: %d / %d, trimmed %d\n",
+ session->s_mds, session->s_nr_caps, max_caps,
+ trim_caps - remaining);
}
- ceph_flush_cap_releases(mdsc, session);
+ ceph_flush_session_cap_releases(mdsc, session);
return 0;
}
static int check_caps_flush(struct ceph_mds_client *mdsc,
u64 want_flush_tid)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int ret = 1;
spin_lock(&mdsc->cap_dirty_lock);
@@ -2064,8 +2275,8 @@ static int check_caps_flush(struct ceph_mds_client *mdsc,
list_first_entry(&mdsc->cap_flush_list,
struct ceph_cap_flush, g_list);
if (cf->tid <= want_flush_tid) {
- dout("check_caps_flush still flushing tid "
- "%llu <= %llu\n", cf->tid, want_flush_tid);
+ doutc(cl, "still flushing tid %llu <= %llu\n",
+ cf->tid, want_flush_tid);
ret = 0;
}
}
@@ -2081,12 +2292,14 @@ static int check_caps_flush(struct ceph_mds_client *mdsc,
static void wait_caps_flush(struct ceph_mds_client *mdsc,
u64 want_flush_tid)
{
- dout("check_caps_flush want %llu\n", want_flush_tid);
+ struct ceph_client *cl = mdsc->fsc->client;
+
+ doutc(cl, "want %llu\n", want_flush_tid);
wait_event(mdsc->cap_flushing_wq,
check_caps_flush(mdsc, want_flush_tid));
- dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
+ doutc(cl, "ok, flushed thru %llu\n", want_flush_tid);
}
/*
@@ -2095,6 +2308,7 @@ static void wait_caps_flush(struct ceph_mds_client *mdsc,
static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg = NULL;
struct ceph_mds_cap_release *head;
struct ceph_mds_cap_item *item;
@@ -2141,7 +2355,7 @@ again:
item->ino = cpu_to_le64(cap->cap_ino);
item->cap_id = cpu_to_le64(cap->cap_id);
item->migrate_seq = cpu_to_le32(cap->mseq);
- item->seq = cpu_to_le32(cap->issue_seq);
+ item->issue_seq = cpu_to_le32(cap->issue_seq);
msg->front.iov_len += sizeof(*item);
ceph_put_cap(mdsc, cap);
@@ -2153,7 +2367,7 @@ again:
msg->front.iov_len += sizeof(*cap_barrier);
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
- dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
+ doutc(cl, "mds%d %p\n", session->s_mds, msg);
ceph_con_send(&session->s_con, msg);
msg = NULL;
}
@@ -2173,13 +2387,13 @@ again:
msg->front.iov_len += sizeof(*cap_barrier);
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
- dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
+ doutc(cl, "mds%d %p\n", session->s_mds, msg);
ceph_con_send(&session->s_con, msg);
}
return;
out_err:
- pr_err("send_cap_releases mds%d, failed to allocate message\n",
- session->s_mds);
+ pr_err_client(cl, "mds%d, failed to allocate message\n",
+ session->s_mds);
spin_lock(&session->s_cap_lock);
list_splice(&tmp_list, &session->s_cap_releases);
session->s_num_cap_releases += num_cap_releases;
@@ -2199,19 +2413,20 @@ static void ceph_cap_release_work(struct work_struct *work)
ceph_put_mds_session(session);
}
-void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
+void ceph_flush_session_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
if (mdsc->stopping)
return;
ceph_get_mds_session(session);
if (queue_work(mdsc->fsc->cap_wq,
&session->s_cap_release_work)) {
- dout("cap release work queued\n");
+ doutc(cl, "cap release work queued\n");
} else {
ceph_put_mds_session(session);
- dout("failed to queue cap release work\n");
+ doutc(cl, "failed to queue cap release work\n");
}
}
@@ -2225,7 +2440,7 @@ void __ceph_queue_cap_release(struct ceph_mds_session *session,
session->s_num_cap_releases++;
if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
- ceph_flush_cap_releases(session->s_mdsc, session);
+ ceph_flush_session_cap_releases(session->s_mdsc, session);
}
static void ceph_cap_reclaim_work(struct work_struct *work)
@@ -2239,13 +2454,14 @@ static void ceph_cap_reclaim_work(struct work_struct *work)
void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
if (mdsc->stopping)
return;
if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
- dout("caps reclaim work queued\n");
+ doutc(cl, "caps reclaim work queued\n");
} else {
- dout("failed to queue caps release work\n");
+ doutc(cl, "failed to queue caps release work\n");
}
}
@@ -2261,6 +2477,50 @@ void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
}
}
+void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc)
+{
+ struct ceph_client *cl = mdsc->fsc->client;
+ if (mdsc->stopping)
+ return;
+
+ if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_unlink_work)) {
+ doutc(cl, "caps unlink work queued\n");
+ } else {
+ doutc(cl, "failed to queue caps unlink work\n");
+ }
+}
+
+static void ceph_cap_unlink_work(struct work_struct *work)
+{
+ struct ceph_mds_client *mdsc =
+ container_of(work, struct ceph_mds_client, cap_unlink_work);
+ struct ceph_client *cl = mdsc->fsc->client;
+
+ doutc(cl, "begin\n");
+ spin_lock(&mdsc->cap_delay_lock);
+ while (!list_empty(&mdsc->cap_unlink_delay_list)) {
+ struct ceph_inode_info *ci;
+ struct inode *inode;
+
+ ci = list_first_entry(&mdsc->cap_unlink_delay_list,
+ struct ceph_inode_info,
+ i_cap_delay_list);
+ list_del_init(&ci->i_cap_delay_list);
+
+ inode = igrab(&ci->netfs.inode);
+ if (inode) {
+ spin_unlock(&mdsc->cap_delay_lock);
+ doutc(cl, "on %p %llx.%llx\n", inode,
+ ceph_vinop(inode));
+ ceph_check_caps(ci, CHECK_CAPS_FLUSH);
+ iput(inode);
+ spin_lock(&mdsc->cap_delay_lock);
+ }
+ }
+ spin_unlock(&mdsc->cap_delay_lock);
+ doutc(cl, "done\n");
+}
+
/*
* requests
*/
@@ -2273,6 +2533,7 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
size_t size = sizeof(struct ceph_mds_reply_dir_entry);
unsigned int num_entries;
+ u64 bytes_count;
int order;
spin_lock(&ci->i_ceph_lock);
@@ -2281,16 +2542,21 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
num_entries = max(num_entries, 1U);
num_entries = min(num_entries, opt->max_readdir);
- order = get_order(size * num_entries);
+ bytes_count = (u64)size * num_entries;
+ if (unlikely(bytes_count > ULONG_MAX))
+ bytes_count = ULONG_MAX;
+
+ order = get_order((unsigned long)bytes_count);
while (order >= 0) {
rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
- __GFP_NOWARN,
+ __GFP_NOWARN |
+ __GFP_ZERO,
order);
if (rinfo->dir_entries)
break;
order--;
}
- if (!rinfo->dir_entries)
+ if (!rinfo->dir_entries || unlikely(order < 0))
return -ENOMEM;
num_entries = (PAGE_SIZE << order) / size;
@@ -2323,6 +2589,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
INIT_LIST_HEAD(&req->r_unsafe_dir_item);
INIT_LIST_HEAD(&req->r_unsafe_target_item);
req->r_fmode = -1;
+ req->r_feature_needed = -1;
kref_init(&req->r_kref);
RB_CLEAR_NODE(&req->r_node);
INIT_LIST_HEAD(&req->r_wait);
@@ -2355,20 +2622,94 @@ static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
return mdsc->oldest_tid;
}
-/*
- * Build a dentry's path. Allocate on heap; caller must kfree. Based
- * on build_path_from_dentry in fs/cifs/dir.c.
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
+{
+ struct inode *dir = req->r_parent;
+ struct dentry *dentry = req->r_dentry;
+ const struct qstr *name = req->r_dname;
+ u8 *cryptbuf = NULL;
+ u32 len = 0;
+ int ret = 0;
+
+ /* only encode if we have parent and dentry */
+ if (!dir || !dentry)
+ goto success;
+
+ /* No-op unless this is encrypted */
+ if (!IS_ENCRYPTED(dir))
+ goto success;
+
+ ret = ceph_fscrypt_prepare_readdir(dir);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* No key? Just ignore it. */
+ if (!fscrypt_has_encryption_key(dir))
+ goto success;
+
+ if (!name)
+ name = &dentry->d_name;
+
+ if (!fscrypt_fname_encrypted_size(dir, name->len, NAME_MAX, &len)) {
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENAMETOOLONG);
+ }
+
+ /* No need to append altname if name is short enough */
+ if (len <= CEPH_NOHASH_NAME_MAX) {
+ len = 0;
+ goto success;
+ }
+
+ cryptbuf = kmalloc(len, GFP_KERNEL);
+ if (!cryptbuf)
+ return ERR_PTR(-ENOMEM);
+
+ ret = fscrypt_fname_encrypt(dir, name, cryptbuf, len);
+ if (ret) {
+ kfree(cryptbuf);
+ return ERR_PTR(ret);
+ }
+success:
+ *plen = len;
+ return cryptbuf;
+}
+#else
+static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
+{
+ *plen = 0;
+ return NULL;
+}
+#endif
+
+/**
+ * ceph_mdsc_build_path - build a path string to a given dentry
+ * @mdsc: mds client
+ * @dentry: dentry to which path should be built
+ * @path_info: output path, length, base ino+snap, and freepath ownership flag
+ * @for_wire: is this path going to be sent to the MDS?
+ *
+ * Build a string that represents the path to the dentry. This is mostly called
+ * for two different purposes:
+ *
+ * 1) we need to build a path string to send to the MDS (for_wire == true)
+ * 2) we need a path string for local presentation (e.g. debugfs)
+ * (for_wire == false)
*
- * If @stop_on_nosnap, generate path relative to the first non-snapped
- * inode.
+ * The path is built in reverse, starting with the dentry. Walk back up toward
+ * the root, building the path until the first non-snapped inode is reached
+ * (for_wire) or the root inode is reached (!for_wire).
*
* Encode hidden .snap dirs as a double /, i.e.
* foo/.snap/bar -> foo//bar
*/
-char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
- int stop_on_nosnap)
+char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
+ struct ceph_path_info *path_info, int for_wire)
{
- struct dentry *temp;
+ struct ceph_client *cl = mdsc->fsc->client;
+ struct dentry *cur;
+ struct inode *inode;
char *path;
int pos;
unsigned seq;
@@ -2385,34 +2726,71 @@ retry:
path[pos] = '\0';
seq = read_seqbegin(&rename_lock);
- rcu_read_lock();
- temp = dentry;
+ cur = dget(dentry);
for (;;) {
- struct inode *inode;
+ struct dentry *parent;
- spin_lock(&temp->d_lock);
- inode = d_inode(temp);
+ spin_lock(&cur->d_lock);
+ inode = d_inode(cur);
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
- dout("build_path path+%d: %p SNAPDIR\n",
- pos, temp);
- } else if (stop_on_nosnap && inode && dentry != temp &&
+ doutc(cl, "path+%d: %p SNAPDIR\n", pos, cur);
+ spin_unlock(&cur->d_lock);
+ parent = dget_parent(cur);
+ } else if (for_wire && inode && dentry != cur &&
ceph_snap(inode) == CEPH_NOSNAP) {
- spin_unlock(&temp->d_lock);
+ spin_unlock(&cur->d_lock);
pos++; /* get rid of any prepended '/' */
break;
+ } else if (!for_wire || !IS_ENCRYPTED(d_inode(cur->d_parent))) {
+ pos -= cur->d_name.len;
+ if (pos < 0) {
+ spin_unlock(&cur->d_lock);
+ break;
+ }
+ memcpy(path + pos, cur->d_name.name, cur->d_name.len);
+ spin_unlock(&cur->d_lock);
+ parent = dget_parent(cur);
} else {
- pos -= temp->d_name.len;
+ int len, ret;
+ char buf[NAME_MAX];
+
+ /*
+ * Proactively copy name into buf, in case we need to
+ * present it as-is.
+ */
+ memcpy(buf, cur->d_name.name, cur->d_name.len);
+ len = cur->d_name.len;
+ spin_unlock(&cur->d_lock);
+ parent = dget_parent(cur);
+
+ ret = ceph_fscrypt_prepare_readdir(d_inode(parent));
+ if (ret < 0) {
+ dput(parent);
+ dput(cur);
+ return ERR_PTR(ret);
+ }
+
+ if (fscrypt_has_encryption_key(d_inode(parent))) {
+ len = ceph_encode_encrypted_dname(d_inode(parent),
+ buf, len);
+ if (len < 0) {
+ dput(parent);
+ dput(cur);
+ return ERR_PTR(len);
+ }
+ }
+ pos -= len;
if (pos < 0) {
- spin_unlock(&temp->d_lock);
+ dput(parent);
break;
}
- memcpy(path + pos, temp->d_name.name, temp->d_name.len);
+ memcpy(path + pos, buf, len);
}
- spin_unlock(&temp->d_lock);
- temp = READ_ONCE(temp->d_parent);
+ dput(cur);
+ cur = parent;
/* Are we at the root? */
- if (IS_ROOT(temp))
+ if (IS_ROOT(cur))
break;
/* Are we out of buffer? */
@@ -2421,73 +2799,93 @@ retry:
path[pos] = '/';
}
- base = ceph_ino(d_inode(temp));
- rcu_read_unlock();
+ inode = d_inode(cur);
+ base = inode ? ceph_ino(inode) : 0;
+ dput(cur);
if (read_seqretry(&rename_lock, seq))
goto retry;
if (pos < 0) {
/*
- * A rename didn't occur, but somehow we didn't end up where
- * we thought we would. Throw a warning and try again.
+ * The path is longer than PATH_MAX and this function
+ * cannot ever succeed. Creating paths that long is
+ * possible with Ceph, but Linux cannot use them.
*/
- pr_warn("build_path did not end path lookup where "
- "expected, pos is %d\n", pos);
- goto retry;
+ return ERR_PTR(-ENAMETOOLONG);
}
- *pbase = base;
- *plen = PATH_MAX - 1 - pos;
- dout("build_path on %p %d built %llx '%.*s'\n",
- dentry, d_count(dentry), base, *plen, path + pos);
+ /* Initialize the output structure */
+ memset(path_info, 0, sizeof(*path_info));
+
+ path_info->vino.ino = base;
+ path_info->pathlen = PATH_MAX - 1 - pos;
+ path_info->path = path + pos;
+ path_info->freepath = true;
+
+ /* Set snap from dentry if available */
+ if (d_inode(dentry))
+ path_info->vino.snap = ceph_snap(d_inode(dentry));
+ else
+ path_info->vino.snap = CEPH_NOSNAP;
+
+ doutc(cl, "on %p %d built %llx '%.*s'\n", dentry, d_count(dentry),
+ base, PATH_MAX - 1 - pos, path + pos);
return path + pos;
}
-static int build_dentry_path(struct dentry *dentry, struct inode *dir,
- const char **ppath, int *ppathlen, u64 *pino,
- bool *pfreepath, bool parent_locked)
+static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
+ struct inode *dir, struct ceph_path_info *path_info,
+ bool parent_locked)
{
char *path;
rcu_read_lock();
if (!dir)
dir = d_inode_rcu(dentry->d_parent);
- if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP) {
- *pino = ceph_ino(dir);
+ if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP &&
+ !IS_ENCRYPTED(dir)) {
+ path_info->vino.ino = ceph_ino(dir);
+ path_info->vino.snap = ceph_snap(dir);
rcu_read_unlock();
- *ppath = dentry->d_name.name;
- *ppathlen = dentry->d_name.len;
+ path_info->path = dentry->d_name.name;
+ path_info->pathlen = dentry->d_name.len;
+ path_info->freepath = false;
return 0;
}
rcu_read_unlock();
- path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
+ path = ceph_mdsc_build_path(mdsc, dentry, path_info, 1);
if (IS_ERR(path))
return PTR_ERR(path);
- *ppath = path;
- *pfreepath = true;
+ /*
+ * ceph_mdsc_build_path already fills path_info, including snap handling.
+ */
return 0;
}
-static int build_inode_path(struct inode *inode,
- const char **ppath, int *ppathlen, u64 *pino,
- bool *pfreepath)
+static int build_inode_path(struct inode *inode, struct ceph_path_info *path_info)
{
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct dentry *dentry;
char *path;
if (ceph_snap(inode) == CEPH_NOSNAP) {
- *pino = ceph_ino(inode);
- *ppathlen = 0;
+ path_info->vino.ino = ceph_ino(inode);
+ path_info->vino.snap = ceph_snap(inode);
+ path_info->pathlen = 0;
+ path_info->freepath = false;
return 0;
}
dentry = d_find_alias(inode);
- path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
+ path = ceph_mdsc_build_path(mdsc, dentry, path_info, 1);
dput(dentry);
if (IS_ERR(path))
return PTR_ERR(path);
- *ppath = path;
- *pfreepath = true;
+ /*
+ * ceph_mdsc_build_path already fills path_info, including snap from dentry.
+ * Override with inode's snap since that's what this function is for.
+ */
+ path_info->vino.snap = ceph_snap(inode);
return 0;
}
@@ -2495,34 +2893,41 @@ static int build_inode_path(struct inode *inode,
* request arguments may be specified via an inode *, a dentry *, or
* an explicit ino+path.
*/
-static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
- struct inode *rdiri, const char *rpath,
- u64 rino, const char **ppath, int *pathlen,
- u64 *ino, bool *freepath, bool parent_locked)
+static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode,
+ struct dentry *rdentry, struct inode *rdiri,
+ const char *rpath, u64 rino,
+ struct ceph_path_info *path_info,
+ bool parent_locked)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int r = 0;
+ /* Initialize the output structure */
+ memset(path_info, 0, sizeof(*path_info));
+
if (rinode) {
- r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
- dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
- ceph_snap(rinode));
+ r = build_inode_path(rinode, path_info);
+ doutc(cl, " inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
+ ceph_snap(rinode));
} else if (rdentry) {
- r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
- freepath, parent_locked);
- dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
- *ppath);
+ r = build_dentry_path(mdsc, rdentry, rdiri, path_info, parent_locked);
+ doutc(cl, " dentry %p %llx/%.*s\n", rdentry, path_info->vino.ino,
+ path_info->pathlen, path_info->path);
} else if (rpath || rino) {
- *ino = rino;
- *ppath = rpath;
- *pathlen = rpath ? strlen(rpath) : 0;
- dout(" path %.*s\n", *pathlen, rpath);
+ path_info->vino.ino = rino;
+ path_info->vino.snap = CEPH_NOSNAP;
+ path_info->path = rpath;
+ path_info->pathlen = rpath ? strlen(rpath) : 0;
+ path_info->freepath = false;
+
+ doutc(cl, " path %.*s\n", path_info->pathlen, rpath);
}
return r;
}
-static void encode_timestamp_and_gids(void **p,
- const struct ceph_mds_request *req)
+static void encode_mclientrequest_tail(void **p,
+ const struct ceph_mds_request *req)
{
struct ceph_timespec ts;
int i;
@@ -2530,11 +2935,54 @@ static void encode_timestamp_and_gids(void **p,
ceph_encode_timespec64(&ts, &req->r_stamp);
ceph_encode_copy(p, &ts, sizeof(ts));
- /* gid_list */
+ /* v4: gid_list */
ceph_encode_32(p, req->r_cred->group_info->ngroups);
for (i = 0; i < req->r_cred->group_info->ngroups; i++)
ceph_encode_64(p, from_kgid(&init_user_ns,
req->r_cred->group_info->gid[i]));
+
+ /* v5: altname */
+ ceph_encode_32(p, req->r_altname_len);
+ ceph_encode_copy(p, req->r_altname, req->r_altname_len);
+
+ /* v6: fscrypt_auth and fscrypt_file */
+ if (req->r_fscrypt_auth) {
+ u32 authlen = ceph_fscrypt_auth_len(req->r_fscrypt_auth);
+
+ ceph_encode_32(p, authlen);
+ ceph_encode_copy(p, req->r_fscrypt_auth, authlen);
+ } else {
+ ceph_encode_32(p, 0);
+ }
+ if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags)) {
+ ceph_encode_32(p, sizeof(__le64));
+ ceph_encode_64(p, req->r_fscrypt_file);
+ } else {
+ ceph_encode_32(p, 0);
+ }
+}
+
+static inline u16 mds_supported_head_version(struct ceph_mds_session *session)
+{
+ if (!test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD, &session->s_features))
+ return 1;
+
+ if (!test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features))
+ return 2;
+
+ return CEPH_MDS_REQUEST_HEAD_VERSION;
+}
+
+static struct ceph_mds_request_head_legacy *
+find_legacy_request_head(void *p, u64 features)
+{
+ bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
+ struct ceph_mds_request_head *head;
+
+ if (legacy)
+ return (struct ceph_mds_request_head_legacy *)p;
+ head = (struct ceph_mds_request_head *)p;
+ return (struct ceph_mds_request_head_legacy *)&head->oldest_client_tid;
}
/*
@@ -2546,53 +2994,125 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
{
int mds = session->s_mds;
struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
- struct ceph_mds_request_head_old *head;
- const char *path1 = NULL;
- const char *path2 = NULL;
- u64 ino1 = 0, ino2 = 0;
- int pathlen1 = 0, pathlen2 = 0;
- bool freepath1 = false, freepath2 = false;
+ struct ceph_mds_request_head_legacy *lhead;
+ struct ceph_path_info path_info1 = {0};
+ struct ceph_path_info path_info2 = {0};
+ struct dentry *old_dentry = NULL;
int len;
u16 releases;
void *p, *end;
int ret;
bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
-
- ret = set_request_path_attr(req->r_inode, req->r_dentry,
- req->r_parent, req->r_path1, req->r_ino1.ino,
- &path1, &pathlen1, &ino1, &freepath1,
- test_bit(CEPH_MDS_R_PARENT_LOCKED,
- &req->r_req_flags));
+ u16 request_head_version = mds_supported_head_version(session);
+ kuid_t caller_fsuid = req->r_cred->fsuid;
+ kgid_t caller_fsgid = req->r_cred->fsgid;
+ bool parent_locked = test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
+
+ ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry,
+ req->r_parent, req->r_path1, req->r_ino1.ino,
+ &path_info1, parent_locked);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out;
}
+ /*
+ * When the parent directory's i_rwsem is *not* locked, req->r_parent may
+ * have become stale (e.g. after a concurrent rename) between the time the
+ * dentry was looked up and now. If we detect that the stored r_parent
+ * does not match the inode number we just encoded for the request, switch
+ * to the correct inode so that the MDS receives a valid parent reference.
+ */
+ if (!parent_locked && req->r_parent && path_info1.vino.ino &&
+ ceph_ino(req->r_parent) != path_info1.vino.ino) {
+ struct inode *old_parent = req->r_parent;
+ struct inode *correct_dir = ceph_get_inode(mdsc->fsc->sb, path_info1.vino, NULL);
+ if (!IS_ERR(correct_dir)) {
+ WARN_ONCE(1, "ceph: r_parent mismatch (had %llx wanted %llx) - updating\n",
+ ceph_ino(old_parent), path_info1.vino.ino);
+ /*
+ * Transfer CEPH_CAP_PIN from the old parent to the new one.
+ * The pin was taken earlier in ceph_mdsc_submit_request().
+ */
+ ceph_put_cap_refs(ceph_inode(old_parent), CEPH_CAP_PIN);
+ iput(old_parent);
+ req->r_parent = correct_dir;
+ ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+ }
+ }
+
/* If r_old_dentry is set, then assume that its parent is locked */
- ret = set_request_path_attr(NULL, req->r_old_dentry,
- req->r_old_dentry_dir,
- req->r_path2, req->r_ino2.ino,
- &path2, &pathlen2, &ino2, &freepath2, true);
+ if (req->r_old_dentry &&
+ !(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
+ old_dentry = req->r_old_dentry;
+ ret = set_request_path_attr(mdsc, NULL, old_dentry,
+ req->r_old_dentry_dir,
+ req->r_path2, req->r_ino2.ino,
+ &path_info2, true);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out_free1;
}
- len = legacy ? sizeof(*head) : sizeof(struct ceph_mds_request_head);
- len += pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
- sizeof(struct ceph_timespec);
- len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
+ req->r_altname = get_fscrypt_altname(req, &req->r_altname_len);
+ if (IS_ERR(req->r_altname)) {
+ msg = ERR_CAST(req->r_altname);
+ req->r_altname = NULL;
+ goto out_free2;
+ }
- /* calculate (max) length for cap releases */
+ /*
+ * For old cephs without supporting the 32bit retry/fwd feature
+ * it will copy the raw memories directly when decoding the
+ * requests. While new cephs will decode the head depending the
+ * version member, so we need to make sure it will be compatible
+ * with them both.
+ */
+ if (legacy)
+ len = sizeof(struct ceph_mds_request_head_legacy);
+ else if (request_head_version == 1)
+ len = offsetofend(struct ceph_mds_request_head, args);
+ else if (request_head_version == 2)
+ len = offsetofend(struct ceph_mds_request_head, ext_num_fwd);
+ else
+ len = sizeof(struct ceph_mds_request_head);
+
+ /* filepaths */
+ len += 2 * (1 + sizeof(u32) + sizeof(u64));
+ len += path_info1.pathlen + path_info2.pathlen;
+
+ /* cap releases */
len += sizeof(struct ceph_mds_request_release) *
(!!req->r_inode_drop + !!req->r_dentry_drop +
!!req->r_old_inode_drop + !!req->r_old_dentry_drop);
if (req->r_dentry_drop)
- len += pathlen1;
+ len += path_info1.pathlen;
if (req->r_old_dentry_drop)
- len += pathlen2;
+ len += path_info2.pathlen;
+
+ /* MClientRequest tail */
+
+ /* req->r_stamp */
+ len += sizeof(struct ceph_timespec);
+
+ /* gid list */
+ len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
+
+ /* alternate name */
+ len += sizeof(u32) + req->r_altname_len;
+
+ /* fscrypt_auth */
+ len += sizeof(u32); // fscrypt_auth
+ if (req->r_fscrypt_auth)
+ len += ceph_fscrypt_auth_len(req->r_fscrypt_auth);
+
+ /* fscrypt_file */
+ len += sizeof(u32);
+ if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags))
+ len += sizeof(__le64);
msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
if (!msg) {
@@ -2602,36 +3122,90 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
msg->hdr.tid = cpu_to_le64(req->r_tid);
+ lhead = find_legacy_request_head(msg->front.iov_base,
+ session->s_con.peer_features);
+
+ if ((req->r_mnt_idmap != &nop_mnt_idmap) &&
+ !test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features)) {
+ WARN_ON_ONCE(!IS_CEPH_MDS_OP_NEWINODE(req->r_op));
+
+ if (enable_unsafe_idmap) {
+ pr_warn_once_client(cl,
+ "idmapped mount is used and CEPHFS_FEATURE_HAS_OWNER_UIDGID"
+ " is not supported by MDS. UID/GID-based restrictions may"
+ " not work properly.\n");
+
+ caller_fsuid = from_vfsuid(req->r_mnt_idmap, &init_user_ns,
+ VFSUIDT_INIT(req->r_cred->fsuid));
+ caller_fsgid = from_vfsgid(req->r_mnt_idmap, &init_user_ns,
+ VFSGIDT_INIT(req->r_cred->fsgid));
+ } else {
+ pr_err_ratelimited_client(cl,
+ "idmapped mount is used and CEPHFS_FEATURE_HAS_OWNER_UIDGID"
+ " is not supported by MDS. Fail request with -EIO.\n");
+
+ ret = -EIO;
+ goto out_err;
+ }
+ }
+
/*
- * The old ceph_mds_request_head didn't contain a version field, and
+ * The ceph_mds_request_head_legacy didn't contain a version field, and
* one was added when we moved the message version from 3->4.
*/
if (legacy) {
msg->hdr.version = cpu_to_le16(3);
- head = msg->front.iov_base;
- p = msg->front.iov_base + sizeof(*head);
- } else {
- struct ceph_mds_request_head *new_head = msg->front.iov_base;
+ p = msg->front.iov_base + sizeof(*lhead);
+ } else if (request_head_version == 1) {
+ struct ceph_mds_request_head *nhead = msg->front.iov_base;
msg->hdr.version = cpu_to_le16(4);
- new_head->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
- head = (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
- p = msg->front.iov_base + sizeof(*new_head);
+ nhead->version = cpu_to_le16(1);
+ p = msg->front.iov_base + offsetofend(struct ceph_mds_request_head, args);
+ } else if (request_head_version == 2) {
+ struct ceph_mds_request_head *nhead = msg->front.iov_base;
+
+ msg->hdr.version = cpu_to_le16(6);
+ nhead->version = cpu_to_le16(2);
+
+ p = msg->front.iov_base + offsetofend(struct ceph_mds_request_head, ext_num_fwd);
+ } else {
+ struct ceph_mds_request_head *nhead = msg->front.iov_base;
+ kuid_t owner_fsuid;
+ kgid_t owner_fsgid;
+
+ msg->hdr.version = cpu_to_le16(6);
+ nhead->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
+ nhead->struct_len = cpu_to_le32(sizeof(struct ceph_mds_request_head));
+
+ if (IS_CEPH_MDS_OP_NEWINODE(req->r_op)) {
+ owner_fsuid = from_vfsuid(req->r_mnt_idmap, &init_user_ns,
+ VFSUIDT_INIT(req->r_cred->fsuid));
+ owner_fsgid = from_vfsgid(req->r_mnt_idmap, &init_user_ns,
+ VFSGIDT_INIT(req->r_cred->fsgid));
+ nhead->owner_uid = cpu_to_le32(from_kuid(&init_user_ns, owner_fsuid));
+ nhead->owner_gid = cpu_to_le32(from_kgid(&init_user_ns, owner_fsgid));
+ } else {
+ nhead->owner_uid = cpu_to_le32(-1);
+ nhead->owner_gid = cpu_to_le32(-1);
+ }
+
+ p = msg->front.iov_base + sizeof(*nhead);
}
end = msg->front.iov_base + msg->front.iov_len;
- head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
- head->op = cpu_to_le32(req->r_op);
- head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
- req->r_cred->fsuid));
- head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
- req->r_cred->fsgid));
- head->ino = cpu_to_le64(req->r_deleg_ino);
- head->args = req->r_args;
+ lhead->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
+ lhead->op = cpu_to_le32(req->r_op);
+ lhead->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
+ caller_fsuid));
+ lhead->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
+ caller_fsgid));
+ lhead->ino = cpu_to_le64(req->r_deleg_ino);
+ lhead->args = req->r_args;
- ceph_encode_filepath(&p, end, ino1, path1);
- ceph_encode_filepath(&p, end, ino2, path2);
+ ceph_encode_filepath(&p, end, path_info1.vino.ino, path_info1.path);
+ ceph_encode_filepath(&p, end, path_info2.vino.ino, path_info2.path);
/* make note of release offset, in case we need to replay */
req->r_request_release_offset = p - msg->front.iov_base;
@@ -2643,15 +3217,23 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
req->r_inode ? req->r_inode : d_inode(req->r_dentry),
mds, req->r_inode_drop, req->r_inode_unless,
req->r_op == CEPH_MDS_OP_READDIR);
- if (req->r_dentry_drop)
- releases += ceph_encode_dentry_release(&p, req->r_dentry,
+ if (req->r_dentry_drop) {
+ ret = ceph_encode_dentry_release(&p, req->r_dentry,
req->r_parent, mds, req->r_dentry_drop,
req->r_dentry_unless);
- if (req->r_old_dentry_drop)
- releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
+ if (ret < 0)
+ goto out_err;
+ releases += ret;
+ }
+ if (req->r_old_dentry_drop) {
+ ret = ceph_encode_dentry_release(&p, req->r_old_dentry,
req->r_old_dentry_dir, mds,
req->r_old_dentry_drop,
req->r_old_dentry_unless);
+ if (ret < 0)
+ goto out_err;
+ releases += ret;
+ }
if (req->r_old_inode_drop)
releases += ceph_encode_inode_release(&p,
d_inode(req->r_old_dentry),
@@ -2662,9 +3244,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
p = msg->front.iov_base + req->r_request_release_offset;
}
- head->num_releases = cpu_to_le16(releases);
+ lhead->num_releases = cpu_to_le16(releases);
- encode_timestamp_and_gids(&p, req);
+ encode_mclientrequest_tail(&p, req);
if (WARN_ON_ONCE(p > end)) {
ceph_msg_put(msg);
@@ -2686,13 +3268,15 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
msg->hdr.data_off = cpu_to_le16(0);
out_free2:
- if (freepath2)
- ceph_mdsc_free_path((char *)path2, pathlen2);
+ ceph_mdsc_free_path_info(&path_info2);
out_free1:
- if (freepath1)
- ceph_mdsc_free_path((char *)path1, pathlen1);
+ ceph_mdsc_free_path_info(&path_info1);
out:
return msg;
+out_err:
+ ceph_msg_put(msg);
+ msg = ERR_PTR(ret);
+ goto out_free2;
}
/*
@@ -2709,18 +3293,6 @@ static void complete_request(struct ceph_mds_client *mdsc,
complete_all(&req->r_completion);
}
-static struct ceph_mds_request_head_old *
-find_old_request_head(void *p, u64 features)
-{
- bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
- struct ceph_mds_request_head *new_head;
-
- if (legacy)
- return (struct ceph_mds_request_head_old *)p;
- new_head = (struct ceph_mds_request_head *)p;
- return (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
-}
-
/*
* called under mdsc->mutex
*/
@@ -2730,9 +3302,30 @@ static int __prepare_send_request(struct ceph_mds_session *session,
{
int mds = session->s_mds;
struct ceph_mds_client *mdsc = session->s_mdsc;
- struct ceph_mds_request_head_old *rhead;
+ struct ceph_client *cl = mdsc->fsc->client;
+ struct ceph_mds_request_head_legacy *lhead;
+ struct ceph_mds_request_head *nhead;
struct ceph_msg *msg;
- int flags = 0;
+ int flags = 0, old_max_retry;
+ bool old_version = !test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD,
+ &session->s_features);
+
+ /*
+ * Avoid infinite retrying after overflow. The client will
+ * increase the retry count and if the MDS is old version,
+ * so we limit to retry at most 256 times.
+ */
+ if (req->r_attempts) {
+ old_max_retry = sizeof_field(struct ceph_mds_request_head,
+ num_retry);
+ old_max_retry = 1 << (old_max_retry * BITS_PER_BYTE);
+ if ((old_version && req->r_attempts >= old_max_retry) ||
+ ((uint32_t)req->r_attempts >= U32_MAX)) {
+ pr_warn_ratelimited_client(cl, "request tid %llu seq overflow\n",
+ req->r_tid);
+ return -EMULTIHOP;
+ }
+ }
req->r_attempts++;
if (req->r_inode) {
@@ -2744,8 +3337,8 @@ static int __prepare_send_request(struct ceph_mds_session *session,
else
req->r_sent_on_mseq = -1;
}
- dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
- req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
+ doutc(cl, "%p tid %lld %s (attempt %d)\n", req, req->r_tid,
+ ceph_mds_op_name(req->r_op), req->r_attempts);
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
void *p;
@@ -2757,23 +3350,27 @@ static int __prepare_send_request(struct ceph_mds_session *session,
* d_move mangles the src name.
*/
msg = req->r_request;
- rhead = find_old_request_head(msg->front.iov_base,
- session->s_con.peer_features);
+ lhead = find_legacy_request_head(msg->front.iov_base,
+ session->s_con.peer_features);
- flags = le32_to_cpu(rhead->flags);
+ flags = le32_to_cpu(lhead->flags);
flags |= CEPH_MDS_FLAG_REPLAY;
- rhead->flags = cpu_to_le32(flags);
+ lhead->flags = cpu_to_le32(flags);
if (req->r_target_inode)
- rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
+ lhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
- rhead->num_retry = req->r_attempts - 1;
+ lhead->num_retry = req->r_attempts - 1;
+ if (!old_version) {
+ nhead = (struct ceph_mds_request_head*)msg->front.iov_base;
+ nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1);
+ }
/* remove cap/dentry releases from message */
- rhead->num_releases = 0;
+ lhead->num_releases = 0;
p = msg->front.iov_base + req->r_request_release_offset;
- encode_timestamp_and_gids(&p, req);
+ encode_mclientrequest_tail(&p, req);
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
@@ -2791,20 +3388,25 @@ static int __prepare_send_request(struct ceph_mds_session *session,
}
req->r_request = msg;
- rhead = find_old_request_head(msg->front.iov_base,
- session->s_con.peer_features);
- rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
+ lhead = find_legacy_request_head(msg->front.iov_base,
+ session->s_con.peer_features);
+ lhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
flags |= CEPH_MDS_FLAG_REPLAY;
if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
flags |= CEPH_MDS_FLAG_ASYNC;
if (req->r_parent)
flags |= CEPH_MDS_FLAG_WANT_DENTRY;
- rhead->flags = cpu_to_le32(flags);
- rhead->num_fwd = req->r_num_fwd;
- rhead->num_retry = req->r_attempts - 1;
+ lhead->flags = cpu_to_le32(flags);
+ lhead->num_fwd = req->r_num_fwd;
+ lhead->num_retry = req->r_attempts - 1;
+ if (!old_version) {
+ nhead = (struct ceph_mds_request_head*)msg->front.iov_base;
+ nhead->ext_num_fwd = cpu_to_le32(req->r_num_fwd);
+ nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1);
+ }
- dout(" r_parent = %p\n", req->r_parent);
+ doutc(cl, " r_parent = %p\n", req->r_parent);
return 0;
}
@@ -2832,6 +3434,7 @@ static int __send_request(struct ceph_mds_session *session,
static void __do_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_session *session = NULL;
int mds = -1;
int err = 0;
@@ -2843,25 +3446,30 @@ static void __do_request(struct ceph_mds_client *mdsc,
return;
}
+ if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
+ doutc(cl, "metadata corrupted\n");
+ err = -EIO;
+ goto finish;
+ }
if (req->r_timeout &&
time_after_eq(jiffies, req->r_started + req->r_timeout)) {
- dout("do_request timed out\n");
+ doutc(cl, "timed out\n");
err = -ETIMEDOUT;
goto finish;
}
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
- dout("do_request forced umount\n");
+ doutc(cl, "forced umount\n");
err = -EIO;
goto finish;
}
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
if (mdsc->mdsmap_err) {
err = mdsc->mdsmap_err;
- dout("do_request mdsmap err %d\n", err);
+ doutc(cl, "mdsmap err %d\n", err);
goto finish;
}
if (mdsc->mdsmap->m_epoch == 0) {
- dout("do_request no mdsmap, waiting for map\n");
+ doutc(cl, "no mdsmap, waiting for map\n");
list_add(&req->r_wait, &mdsc->waiting_for_map);
return;
}
@@ -2882,7 +3490,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
err = -EJUKEBOX;
goto finish;
}
- dout("do_request no mds or not active, waiting for map\n");
+ doutc(cl, "no mds or not active, waiting for map\n");
list_add(&req->r_wait, &mdsc->waiting_for_map);
return;
}
@@ -2898,8 +3506,18 @@ static void __do_request(struct ceph_mds_client *mdsc,
}
req->r_session = ceph_get_mds_session(session);
- dout("do_request mds%d session %p state %s\n", mds, session,
- ceph_session_state_name(session->s_state));
+ doutc(cl, "mds%d session %p state %s\n", mds, session,
+ ceph_session_state_name(session->s_state));
+
+ /*
+ * The old ceph will crash the MDSs when see unknown OPs
+ */
+ if (req->r_feature_needed > 0 &&
+ !test_bit(req->r_feature_needed, &session->s_features)) {
+ err = -EOPNOTSUPP;
+ goto out_session;
+ }
+
if (session->s_state != CEPH_MDS_SESSION_OPEN &&
session->s_state != CEPH_MDS_SESSION_HUNG) {
/*
@@ -2944,13 +3562,71 @@ static void __do_request(struct ceph_mds_client *mdsc,
if (req->r_request_started == 0) /* note request start time */
req->r_request_started = jiffies;
+ /*
+ * For async create we will choose the auth MDS of frag in parent
+ * directory to send the request and usually this works fine, but
+ * if the migrated the dirtory to another MDS before it could handle
+ * it the request will be forwarded.
+ *
+ * And then the auth cap will be changed.
+ */
+ if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) && req->r_num_fwd) {
+ struct ceph_dentry_info *di = ceph_dentry(req->r_dentry);
+ struct ceph_inode_info *ci;
+ struct ceph_cap *cap;
+
+ /*
+ * The request maybe handled very fast and the new inode
+ * hasn't been linked to the dentry yet. We need to wait
+ * for the ceph_finish_async_create(), which shouldn't be
+ * stuck too long or fail in thoery, to finish when forwarding
+ * the request.
+ */
+ if (!d_inode(req->r_dentry)) {
+ err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT,
+ TASK_KILLABLE);
+ if (err) {
+ mutex_lock(&req->r_fill_mutex);
+ set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
+ mutex_unlock(&req->r_fill_mutex);
+ goto out_session;
+ }
+ }
+
+ ci = ceph_inode(d_inode(req->r_dentry));
+
+ spin_lock(&ci->i_ceph_lock);
+ cap = ci->i_auth_cap;
+ if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE && mds != cap->mds) {
+ doutc(cl, "session changed for auth cap %d -> %d\n",
+ cap->session->s_mds, session->s_mds);
+
+ /* Remove the auth cap from old session */
+ spin_lock(&cap->session->s_cap_lock);
+ cap->session->s_nr_caps--;
+ list_del_init(&cap->session_caps);
+ spin_unlock(&cap->session->s_cap_lock);
+
+ /* Add the auth cap to the new session */
+ cap->mds = mds;
+ cap->session = session;
+ spin_lock(&session->s_cap_lock);
+ session->s_nr_caps++;
+ list_add_tail(&cap->session_caps, &session->s_caps);
+ spin_unlock(&session->s_cap_lock);
+
+ change_auth_cap_ses(ci, session);
+ }
+ spin_unlock(&ci->i_ceph_lock);
+ }
+
err = __send_request(session, req, false);
out_session:
ceph_put_mds_session(session);
finish:
if (err) {
- dout("__do_request early error %d\n", err);
+ doutc(cl, "early error %d\n", err);
req->r_err = err;
complete_request(mdsc, req);
__unregister_request(mdsc, req);
@@ -2964,6 +3640,7 @@ finish:
static void __wake_requests(struct ceph_mds_client *mdsc,
struct list_head *head)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
LIST_HEAD(tmp_list);
@@ -2973,7 +3650,8 @@ static void __wake_requests(struct ceph_mds_client *mdsc,
req = list_entry(tmp_list.next,
struct ceph_mds_request, r_wait);
list_del_init(&req->r_wait);
- dout(" wake request %p tid %llu\n", req, req->r_tid);
+ doutc(cl, " wake request %p tid %llu\n", req,
+ req->r_tid);
__do_request(mdsc, req);
}
}
@@ -2984,10 +3662,11 @@ static void __wake_requests(struct ceph_mds_client *mdsc,
*/
static void kick_requests(struct ceph_mds_client *mdsc, int mds)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct rb_node *p = rb_first(&mdsc->request_tree);
- dout("kick_requests mds%d\n", mds);
+ doutc(cl, "kick_requests mds%d\n", mds);
while (p) {
req = rb_entry(p, struct ceph_mds_request, r_node);
p = rb_next(p);
@@ -2997,7 +3676,7 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
continue; /* only new requests */
if (req->r_session &&
req->r_session->s_mds == mds) {
- dout(" kicking tid %llu\n", req->r_tid);
+ doutc(cl, " kicking tid %llu\n", req->r_tid);
list_del_init(&req->r_wait);
__do_request(mdsc, req);
}
@@ -3007,6 +3686,7 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
struct ceph_mds_request *req)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int err = 0;
/* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
@@ -3028,8 +3708,7 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
if (req->r_inode) {
err = ceph_wait_on_async_create(req->r_inode);
if (err) {
- dout("%s: wait for async create returned: %d\n",
- __func__, err);
+ doutc(cl, "wait for async create returned: %d\n", err);
return err;
}
}
@@ -3037,13 +3716,12 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
if (!err && req->r_old_inode) {
err = ceph_wait_on_async_create(req->r_old_inode);
if (err) {
- dout("%s: wait for async create returned: %d\n",
- __func__, err);
+ doutc(cl, "wait for async create returned: %d\n", err);
return err;
}
}
- dout("submit_request on %p for inode %p\n", req, dir);
+ doutc(cl, "submit_request on %p for inode %p\n", req, dir);
mutex_lock(&mdsc->mutex);
__register_request(mdsc, req, dir);
__do_request(mdsc, req);
@@ -3052,15 +3730,17 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
return err;
}
-static int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
- struct ceph_mds_request *req)
+int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req,
+ ceph_mds_request_wait_callback_t wait_func)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int err;
/* wait */
- dout("do_request waiting\n");
- if (!req->r_timeout && req->r_wait_for_completion) {
- err = req->r_wait_for_completion(mdsc, req);
+ doutc(cl, "do_request waiting\n");
+ if (wait_func) {
+ err = wait_func(mdsc, req);
} else {
long timeleft = wait_for_completion_killable_timeout(
&req->r_completion,
@@ -3072,14 +3752,14 @@ static int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
else
err = timeleft; /* killed */
}
- dout("do_request waited, got %d\n", err);
+ doutc(cl, "do_request waited, got %d\n", err);
mutex_lock(&mdsc->mutex);
/* only abort if we didn't race with a real reply */
if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
err = le32_to_cpu(req->r_reply_info.head->result);
} else if (err < 0) {
- dout("aborted request %lld with %d\n", req->r_tid, err);
+ doutc(cl, "aborted request %lld with %d\n", req->r_tid, err);
/*
* ensure we aren't running concurrently with
@@ -3110,15 +3790,16 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
struct inode *dir,
struct ceph_mds_request *req)
{
+ struct ceph_client *cl = mdsc->fsc->client;
int err;
- dout("do_request on %p\n", req);
+ doutc(cl, "do_request on %p\n", req);
/* issue */
err = ceph_mdsc_submit_request(mdsc, dir, req);
if (!err)
- err = ceph_mdsc_wait_request(mdsc, req);
- dout("do_request %p done, result %d\n", req, err);
+ err = ceph_mdsc_wait_request(mdsc, req, NULL);
+ doutc(cl, "do_request %p done, result %d\n", req, err);
return err;
}
@@ -3130,8 +3811,10 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
{
struct inode *dir = req->r_parent;
struct inode *old_dir = req->r_old_dentry_dir;
+ struct ceph_client *cl = req->r_mdsc->fsc->client;
- dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
+ doutc(cl, "invalidate_dir_request %p %p (complete, lease(s))\n",
+ dir, old_dir);
ceph_dir_clear_complete(dir);
if (old_dir)
@@ -3152,6 +3835,7 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
{
struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_mds_reply_head *head = msg->front.iov_base;
struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
@@ -3159,9 +3843,10 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
u64 tid;
int err, result;
int mds = session->s_mds;
+ bool close_sessions = false;
if (msg->front.iov_len < sizeof(*head)) {
- pr_err("mdsc_handle_reply got corrupt (short) reply\n");
+ pr_err_client(cl, "got corrupt (short) reply\n");
ceph_msg_dump(msg);
return;
}
@@ -3171,17 +3856,17 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
mutex_lock(&mdsc->mutex);
req = lookup_get_request(mdsc, tid);
if (!req) {
- dout("handle_reply on unknown tid %llu\n", tid);
+ doutc(cl, "on unknown tid %llu\n", tid);
mutex_unlock(&mdsc->mutex);
return;
}
- dout("handle_reply %p\n", req);
+ doutc(cl, "handle_reply %p\n", req);
/* correct session? */
if (req->r_session != session) {
- pr_err("mdsc_handle_reply got %llu on session mds%d"
- " not mds%d\n", tid, session->s_mds,
- req->r_session ? req->r_session->s_mds : -1);
+ pr_err_client(cl, "got %llu on session mds%d not mds%d\n",
+ tid, session->s_mds,
+ req->r_session ? req->r_session->s_mds : -1);
mutex_unlock(&mdsc->mutex);
goto out;
}
@@ -3189,49 +3874,20 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
/* dup? */
if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
(test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
- pr_warn("got a dup %s reply on %llu from mds%d\n",
- head->safe ? "safe" : "unsafe", tid, mds);
+ pr_warn_client(cl, "got a dup %s reply on %llu from mds%d\n",
+ head->safe ? "safe" : "unsafe", tid, mds);
mutex_unlock(&mdsc->mutex);
goto out;
}
if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
- pr_warn("got unsafe after safe on %llu from mds%d\n",
- tid, mds);
+ pr_warn_client(cl, "got unsafe after safe on %llu from mds%d\n",
+ tid, mds);
mutex_unlock(&mdsc->mutex);
goto out;
}
result = le32_to_cpu(head->result);
- /*
- * Handle an ESTALE
- * if we're not talking to the authority, send to them
- * if the authority has changed while we weren't looking,
- * send to new authority
- * Otherwise we just have to return an ESTALE
- */
- if (result == -ESTALE) {
- dout("got ESTALE on request %llu\n", req->r_tid);
- req->r_resend_mds = -1;
- if (req->r_direct_mode != USE_AUTH_MDS) {
- dout("not using auth, setting for that now\n");
- req->r_direct_mode = USE_AUTH_MDS;
- __do_request(mdsc, req);
- mutex_unlock(&mdsc->mutex);
- goto out;
- } else {
- int mds = __choose_mds(mdsc, req, NULL);
- if (mds >= 0 && mds != req->r_session->s_mds) {
- dout("but auth changed, so resending\n");
- __do_request(mdsc, req);
- mutex_unlock(&mdsc->mutex);
- goto out;
- }
- }
- dout("have to return ESTALE on request %llu\n", req->r_tid);
- }
-
-
if (head->safe) {
set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
__unregister_request(mdsc, req);
@@ -3248,7 +3904,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
* response. And even if it did, there is nothing
* useful we could do with a revised return value.
*/
- dout("got safe reply %llu, mds%d\n", tid, mds);
+ doutc(cl, "got safe reply %llu, mds%d\n", tid, mds);
mutex_unlock(&mdsc->mutex);
goto out;
@@ -3258,23 +3914,36 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
}
- dout("handle_reply tid %lld result %d\n", tid, result);
- rinfo = &req->r_reply_info;
+ doutc(cl, "tid %lld result %d\n", tid, result);
if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
- err = parse_reply_info(session, msg, rinfo, (u64)-1);
+ err = parse_reply_info(session, msg, req, (u64)-1);
else
- err = parse_reply_info(session, msg, rinfo, session->s_con.peer_features);
+ err = parse_reply_info(session, msg, req,
+ session->s_con.peer_features);
mutex_unlock(&mdsc->mutex);
/* Must find target inode outside of mutexes to avoid deadlocks */
+ rinfo = &req->r_reply_info;
if ((err >= 0) && rinfo->head->is_target) {
- struct inode *in;
+ struct inode *in = xchg(&req->r_new_inode, NULL);
struct ceph_vino tvino = {
.ino = le64_to_cpu(rinfo->targeti.in->ino),
.snap = le64_to_cpu(rinfo->targeti.in->snapid)
};
- in = ceph_get_inode(mdsc->fsc->sb, tvino);
+ /*
+ * If we ended up opening an existing inode, discard
+ * r_new_inode
+ */
+ if (req->r_op == CEPH_MDS_OP_CREATE &&
+ !req->r_reply_info.has_create_ino) {
+ /* This should never happen on an async create */
+ WARN_ON_ONCE(req->r_deleg_ino);
+ iput(in);
+ in = NULL;
+ }
+
+ in = ceph_get_inode(mdsc->fsc->sb, tvino, in);
if (IS_ERR(in)) {
err = PTR_ERR(in);
mutex_lock(&session->s_mutex);
@@ -3285,7 +3954,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
mutex_lock(&session->s_mutex);
if (err < 0) {
- pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
+ pr_err_client(cl, "got corrupt reply mds%d(tid:%lld)\n",
+ mds, tid);
ceph_msg_dump(msg);
goto out_err;
}
@@ -3294,10 +3964,17 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
realm = NULL;
if (rinfo->snapblob_len) {
down_write(&mdsc->snap_rwsem);
- ceph_update_snap_trace(mdsc, rinfo->snapblob,
+ err = ceph_update_snap_trace(mdsc, rinfo->snapblob,
rinfo->snapblob + rinfo->snapblob_len,
le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
&realm);
+ if (err) {
+ up_write(&mdsc->snap_rwsem);
+ close_sessions = true;
+ if (err == -EIO)
+ ceph_msg_dump(msg);
+ goto out_err;
+ }
downgrade_write(&mdsc->snap_rwsem);
} else {
down_read(&mdsc->snap_rwsem);
@@ -3310,7 +3987,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
if (err == 0) {
if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
req->r_op == CEPH_MDS_OP_LSSNAP))
- ceph_readdir_prepopulate(req, req->r_session);
+ err = ceph_readdir_prepopulate(req, req->r_session);
}
current->journal_info = NULL;
mutex_unlock(&req->r_fill_mutex);
@@ -3342,7 +4019,7 @@ out_err:
set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
}
} else {
- dout("reply arrived after request %lld was aborted\n", tid);
+ doutc(cl, "reply arrived after request %lld was aborted\n", tid);
}
mutex_unlock(&mdsc->mutex);
@@ -3355,6 +4032,10 @@ out_err:
req->r_end_latency, err);
out:
ceph_mdsc_put_request(req);
+
+ /* Defer closing the sessions after s_mutex lock being released */
+ if (close_sessions)
+ ceph_mdsc_close_sessions(mdsc);
return;
}
@@ -3367,6 +4048,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
u64 tid = le64_to_cpu(msg->hdr.tid);
u32 next_mds;
@@ -3374,6 +4056,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
int err = -EINVAL;
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
+ bool aborted = false;
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
next_mds = ceph_decode_32(&p);
@@ -3382,19 +4065,33 @@ static void handle_forward(struct ceph_mds_client *mdsc,
mutex_lock(&mdsc->mutex);
req = lookup_get_request(mdsc, tid);
if (!req) {
- dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
- goto out; /* dup reply? */
+ mutex_unlock(&mdsc->mutex);
+ doutc(cl, "forward tid %llu to mds%d - req dne\n", tid, next_mds);
+ return; /* dup reply? */
}
if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
- dout("forward tid %llu aborted, unregistering\n", tid);
+ doutc(cl, "forward tid %llu aborted, unregistering\n", tid);
__unregister_request(mdsc, req);
- } else if (fwd_seq <= req->r_num_fwd) {
- dout("forward tid %llu to mds%d - old seq %d <= %d\n",
- tid, next_mds, req->r_num_fwd, fwd_seq);
+ } else if (fwd_seq <= req->r_num_fwd || (uint32_t)fwd_seq >= U32_MAX) {
+ /*
+ * Avoid infinite retrying after overflow.
+ *
+ * The MDS will increase the fwd count and in client side
+ * if the num_fwd is less than the one saved in request
+ * that means the MDS is an old version and overflowed of
+ * 8 bits.
+ */
+ mutex_lock(&req->r_fill_mutex);
+ req->r_err = -EMULTIHOP;
+ set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
+ mutex_unlock(&req->r_fill_mutex);
+ aborted = true;
+ pr_warn_ratelimited_client(cl, "forward tid %llu seq overflow\n",
+ tid);
} else {
/* resend. forward race not possible; mds would drop */
- dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
+ doutc(cl, "forward tid %llu to mds%d (we resend)\n", tid, next_mds);
BUG_ON(req->r_err);
BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
req->r_attempts = 0;
@@ -3403,13 +4100,17 @@ static void handle_forward(struct ceph_mds_client *mdsc,
put_request_session(req);
__do_request(mdsc, req);
}
- ceph_mdsc_put_request(req);
-out:
mutex_unlock(&mdsc->mutex);
+
+ /* kick calling process */
+ if (aborted)
+ complete_request(mdsc, req);
+ ceph_mdsc_put_request(req);
return;
bad:
- pr_err("mdsc_handle_forward decode error err=%d\n", err);
+ pr_err_client(cl, "decode error err=%d\n", err);
+ ceph_msg_dump(msg);
}
static int __decode_session_metadata(void **p, void *end,
@@ -3447,15 +4148,19 @@ static void handle_session(struct ceph_mds_session *session,
struct ceph_msg *msg)
{
struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
int mds = session->s_mds;
int msg_version = le16_to_cpu(msg->hdr.version);
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
struct ceph_mds_session_head *h;
- u32 op;
+ struct ceph_mds_cap_auth *cap_auths = NULL;
+ u32 op, cap_auths_num = 0;
u64 seq, features = 0;
int wake = 0;
bool blocklisted = false;
+ u32 i;
+
/* decode */
ceph_decode_need(&p, end, sizeof(*h), bad);
@@ -3467,9 +4172,14 @@ static void handle_session(struct ceph_mds_session *session,
if (msg_version >= 3) {
u32 len;
- /* version >= 2, metadata */
- if (__decode_session_metadata(&p, end, &blocklisted) < 0)
+ /* version >= 2 and < 5, decode metadata, skip otherwise
+ * as it's handled via flags.
+ */
+ if (msg_version >= 5)
+ ceph_decode_skip_map(&p, end, string, string, bad);
+ else if (__decode_session_metadata(&p, end, &blocklisted) < 0)
goto bad;
+
/* version >= 3, feature bits */
ceph_decode_32_safe(&p, end, len, bad);
if (len) {
@@ -3478,7 +4188,118 @@ static void handle_session(struct ceph_mds_session *session,
}
}
+ if (msg_version >= 5) {
+ u32 flags, len;
+
+ /* version >= 4 */
+ ceph_decode_skip_16(&p, end, bad); /* struct_v, struct_cv */
+ ceph_decode_32_safe(&p, end, len, bad); /* len */
+ ceph_decode_skip_n(&p, end, len, bad); /* metric_spec */
+
+ /* version >= 5, flags */
+ ceph_decode_32_safe(&p, end, flags, bad);
+ if (flags & CEPH_SESSION_BLOCKLISTED) {
+ pr_warn_client(cl, "mds%d session blocklisted\n",
+ session->s_mds);
+ blocklisted = true;
+ }
+ }
+
+ if (msg_version >= 6) {
+ ceph_decode_32_safe(&p, end, cap_auths_num, bad);
+ doutc(cl, "cap_auths_num %d\n", cap_auths_num);
+
+ if (cap_auths_num && op != CEPH_SESSION_OPEN) {
+ WARN_ON_ONCE(op != CEPH_SESSION_OPEN);
+ goto skip_cap_auths;
+ }
+
+ cap_auths = kcalloc(cap_auths_num,
+ sizeof(struct ceph_mds_cap_auth),
+ GFP_KERNEL);
+ if (!cap_auths) {
+ pr_err_client(cl, "No memory for cap_auths\n");
+ return;
+ }
+
+ for (i = 0; i < cap_auths_num; i++) {
+ u32 _len, j;
+
+ /* struct_v, struct_compat, and struct_len in MDSCapAuth */
+ ceph_decode_skip_n(&p, end, 2 + sizeof(u32), bad);
+
+ /* struct_v, struct_compat, and struct_len in MDSCapMatch */
+ ceph_decode_skip_n(&p, end, 2 + sizeof(u32), bad);
+ ceph_decode_64_safe(&p, end, cap_auths[i].match.uid, bad);
+ ceph_decode_32_safe(&p, end, _len, bad);
+ if (_len) {
+ cap_auths[i].match.gids = kcalloc(_len, sizeof(u32),
+ GFP_KERNEL);
+ if (!cap_auths[i].match.gids) {
+ pr_err_client(cl, "No memory for gids\n");
+ goto fail;
+ }
+
+ cap_auths[i].match.num_gids = _len;
+ for (j = 0; j < _len; j++)
+ ceph_decode_32_safe(&p, end,
+ cap_auths[i].match.gids[j],
+ bad);
+ }
+
+ ceph_decode_32_safe(&p, end, _len, bad);
+ if (_len) {
+ cap_auths[i].match.path = kcalloc(_len + 1, sizeof(char),
+ GFP_KERNEL);
+ if (!cap_auths[i].match.path) {
+ pr_err_client(cl, "No memory for path\n");
+ goto fail;
+ }
+ ceph_decode_copy(&p, cap_auths[i].match.path, _len);
+
+ /* Remove the tailing '/' */
+ while (_len && cap_auths[i].match.path[_len - 1] == '/') {
+ cap_auths[i].match.path[_len - 1] = '\0';
+ _len -= 1;
+ }
+ }
+
+ ceph_decode_32_safe(&p, end, _len, bad);
+ if (_len) {
+ cap_auths[i].match.fs_name = kcalloc(_len + 1, sizeof(char),
+ GFP_KERNEL);
+ if (!cap_auths[i].match.fs_name) {
+ pr_err_client(cl, "No memory for fs_name\n");
+ goto fail;
+ }
+ ceph_decode_copy(&p, cap_auths[i].match.fs_name, _len);
+ }
+
+ ceph_decode_8_safe(&p, end, cap_auths[i].match.root_squash, bad);
+ ceph_decode_8_safe(&p, end, cap_auths[i].readable, bad);
+ ceph_decode_8_safe(&p, end, cap_auths[i].writeable, bad);
+ doutc(cl, "uid %lld, num_gids %u, path %s, fs_name %s, root_squash %d, readable %d, writeable %d\n",
+ cap_auths[i].match.uid, cap_auths[i].match.num_gids,
+ cap_auths[i].match.path, cap_auths[i].match.fs_name,
+ cap_auths[i].match.root_squash,
+ cap_auths[i].readable, cap_auths[i].writeable);
+ }
+ }
+
+skip_cap_auths:
mutex_lock(&mdsc->mutex);
+ if (op == CEPH_SESSION_OPEN) {
+ if (mdsc->s_cap_auths) {
+ for (i = 0; i < mdsc->s_cap_auths_num; i++) {
+ kfree(mdsc->s_cap_auths[i].match.gids);
+ kfree(mdsc->s_cap_auths[i].match.path);
+ kfree(mdsc->s_cap_auths[i].match.fs_name);
+ }
+ kfree(mdsc->s_cap_auths);
+ }
+ mdsc->s_cap_auths_num = cap_auths_num;
+ mdsc->s_cap_auths = cap_auths;
+ }
if (op == CEPH_SESSION_CLOSE) {
ceph_get_mds_session(session);
__unregister_session(mdsc, session);
@@ -3489,24 +4310,41 @@ static void handle_session(struct ceph_mds_session *session,
mutex_lock(&session->s_mutex);
- dout("handle_session mds%d %s %p state %s seq %llu\n",
- mds, ceph_session_op_name(op), session,
- ceph_session_state_name(session->s_state), seq);
+ doutc(cl, "mds%d %s %p state %s seq %llu\n", mds,
+ ceph_session_op_name(op), session,
+ ceph_session_state_name(session->s_state), seq);
if (session->s_state == CEPH_MDS_SESSION_HUNG) {
session->s_state = CEPH_MDS_SESSION_OPEN;
- pr_info("mds%d came back\n", session->s_mds);
+ pr_info_client(cl, "mds%d came back\n", session->s_mds);
}
switch (op) {
case CEPH_SESSION_OPEN:
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
- pr_info("mds%d reconnect success\n", session->s_mds);
- session->s_state = CEPH_MDS_SESSION_OPEN;
+ pr_info_client(cl, "mds%d reconnect success\n",
+ session->s_mds);
+
session->s_features = features;
- renewed_caps(mdsc, session, 0);
- if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &session->s_features))
- metric_schedule_delayed(&mdsc->metric);
+ if (session->s_state == CEPH_MDS_SESSION_OPEN) {
+ pr_notice_client(cl, "mds%d is already opened\n",
+ session->s_mds);
+ } else {
+ session->s_state = CEPH_MDS_SESSION_OPEN;
+ renewed_caps(mdsc, session, 0);
+ if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT,
+ &session->s_features))
+ metric_schedule_delayed(&mdsc->metric);
+ }
+
+ /*
+ * The connection maybe broken and the session in client
+ * side has been reinitialized, need to update the seq
+ * anyway.
+ */
+ if (!session->s_seq && seq)
+ session->s_seq = seq;
+
wake = 1;
if (mdsc->stopping)
__close_session(mdsc, session);
@@ -3519,7 +4357,8 @@ static void handle_session(struct ceph_mds_session *session,
case CEPH_SESSION_CLOSE:
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
- pr_info("mds%d reconnect denied\n", session->s_mds);
+ pr_info_client(cl, "mds%d reconnect denied\n",
+ session->s_mds);
session->s_state = CEPH_MDS_SESSION_CLOSED;
cleanup_session_requests(mdsc, session);
remove_session_caps(session);
@@ -3528,8 +4367,8 @@ static void handle_session(struct ceph_mds_session *session,
break;
case CEPH_SESSION_STALE:
- pr_info("mds%d caps went stale, renewing\n",
- session->s_mds);
+ pr_info_client(cl, "mds%d caps went stale, renewing\n",
+ session->s_mds);
atomic_inc(&session->s_cap_gen);
session->s_cap_ttl = jiffies - 1;
send_renew_caps(mdsc, session);
@@ -3540,11 +4379,17 @@ static void handle_session(struct ceph_mds_session *session,
break;
case CEPH_SESSION_FLUSHMSG:
+ /* flush cap releases */
+ spin_lock(&session->s_cap_lock);
+ if (session->s_num_cap_releases)
+ ceph_flush_session_cap_releases(mdsc, session);
+ spin_unlock(&session->s_cap_lock);
+
send_flushmsg_ack(mdsc, session, seq);
break;
case CEPH_SESSION_FORCE_RO:
- dout("force_session_readonly %p\n", session);
+ doutc(cl, "force_session_readonly %p\n", session);
spin_lock(&session->s_cap_lock);
session->s_readonly = true;
spin_unlock(&session->s_cap_lock);
@@ -3553,7 +4398,8 @@ static void handle_session(struct ceph_mds_session *session,
case CEPH_SESSION_REJECT:
WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
- pr_info("mds%d rejected session\n", session->s_mds);
+ pr_info_client(cl, "mds%d rejected session\n",
+ session->s_mds);
session->s_state = CEPH_MDS_SESSION_REJECTED;
cleanup_session_requests(mdsc, session);
remove_session_caps(session);
@@ -3563,7 +4409,7 @@ static void handle_session(struct ceph_mds_session *session,
break;
default:
- pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
+ pr_err_client(cl, "bad op %d mds%d\n", op, mds);
WARN_ON(1);
}
@@ -3580,32 +4426,40 @@ static void handle_session(struct ceph_mds_session *session,
return;
bad:
- pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
- (int)msg->front.iov_len);
+ pr_err_client(cl, "corrupt message mds%d len %d\n", mds,
+ (int)msg->front.iov_len);
ceph_msg_dump(msg);
+fail:
+ for (i = 0; i < cap_auths_num; i++) {
+ kfree(cap_auths[i].match.gids);
+ kfree(cap_auths[i].match.path);
+ kfree(cap_auths[i].match.fs_name);
+ }
+ kfree(cap_auths);
return;
}
void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
{
+ struct ceph_client *cl = req->r_mdsc->fsc->client;
int dcaps;
dcaps = xchg(&req->r_dir_caps, 0);
if (dcaps) {
- dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
+ doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
}
}
-void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
+void ceph_mdsc_release_dir_caps_async(struct ceph_mds_request *req)
{
+ struct ceph_client *cl = req->r_mdsc->fsc->client;
int dcaps;
dcaps = xchg(&req->r_dir_caps, 0);
if (dcaps) {
- dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
- ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
- dcaps);
+ doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
+ ceph_put_cap_refs_async(ceph_inode(req->r_parent), dcaps);
}
}
@@ -3618,7 +4472,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req, *nreq;
struct rb_node *p;
- dout("replay_unsafe_requests mds%d\n", session->s_mds);
+ doutc(mdsc->fsc->client, "mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
@@ -3641,7 +4495,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
if (req->r_session->s_mds != session->s_mds)
continue;
- ceph_mdsc_release_dir_caps_no_check(req);
+ ceph_mdsc_release_dir_caps_async(req);
__send_request(session, req, true);
}
@@ -3760,43 +4614,46 @@ out_unlock:
/*
* Encode information about a cap for a reconnect with the MDS.
*/
-static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
- void *arg)
+static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
{
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
union {
struct ceph_mds_cap_reconnect v2;
struct ceph_mds_cap_reconnect_v1 v1;
} rec;
- struct ceph_inode_info *ci = cap->ci;
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_reconnect_state *recon_state = arg;
struct ceph_pagelist *pagelist = recon_state->pagelist;
struct dentry *dentry;
- char *path;
- int pathlen, err;
- u64 pathbase;
+ struct ceph_cap *cap;
+ struct ceph_path_info path_info = {0};
+ int err;
u64 snap_follows;
- dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
- inode, ceph_vinop(inode), cap, cap->cap_id,
- ceph_cap_string(cap->issued));
-
dentry = d_find_primary(inode);
if (dentry) {
/* set pathbase to parent dir when msg_version >= 2 */
- path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
+ char *path = ceph_mdsc_build_path(mdsc, dentry, &path_info,
recon_state->msg_version >= 2);
dput(dentry);
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out_err;
}
- } else {
- path = NULL;
- pathlen = 0;
- pathbase = 0;
}
spin_lock(&ci->i_ceph_lock);
+ cap = __get_cap_for_mds(ci, mds);
+ if (!cap) {
+ spin_unlock(&ci->i_ceph_lock);
+ err = 0;
+ goto out_err;
+ }
+ doutc(cl, " adding %p ino %llx.%llx cap %p %lld %s\n", inode,
+ ceph_vinop(inode), cap, cap->cap_id,
+ ceph_cap_string(cap->issued));
+
cap->seq = 0; /* reset cap seq */
cap->issue_seq = 0; /* and issue_seq */
cap->mseq = 0; /* and migrate_seq */
@@ -3816,18 +4673,22 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v2.issued = cpu_to_le32(cap->issued);
rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
- rec.v2.pathbase = cpu_to_le64(pathbase);
+ rec.v2.pathbase = cpu_to_le64(path_info.vino.ino);
rec.v2.flock_len = (__force __le32)
((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
} else {
+ struct timespec64 ts;
+
rec.v1.cap_id = cpu_to_le64(cap->cap_id);
rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v1.issued = cpu_to_le32(cap->issued);
rec.v1.size = cpu_to_le64(i_size_read(inode));
- ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
- ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
+ ts = inode_get_mtime(inode);
+ ceph_encode_timespec64(&rec.v1.mtime, &ts);
+ ts = inode_get_atime(inode);
+ ceph_encode_timespec64(&rec.v1.atime, &ts);
rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
- rec.v1.pathbase = cpu_to_le64(pathbase);
+ rec.v1.pathbase = cpu_to_le64(path_info.vino.ino);
}
if (list_empty(&ci->i_cap_snaps)) {
@@ -3889,7 +4750,7 @@ encode_again:
sizeof(struct ceph_filelock);
rec.v2.flock_len = cpu_to_le32(struct_len);
- struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
+ struct_len += sizeof(u32) + path_info.pathlen + sizeof(rec.v2);
if (struct_v >= 2)
struct_len += sizeof(u64); /* snap_follows */
@@ -3913,7 +4774,7 @@ encode_again:
ceph_pagelist_encode_8(pagelist, 1);
ceph_pagelist_encode_32(pagelist, struct_len);
}
- ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
ceph_locks_to_pagelist(flocks, pagelist,
num_fcntl_locks, num_flock_locks);
@@ -3924,17 +4785,17 @@ out_freeflocks:
} else {
err = ceph_pagelist_reserve(pagelist,
sizeof(u64) + sizeof(u32) +
- pathlen + sizeof(rec.v1));
+ path_info.pathlen + sizeof(rec.v1));
if (err)
goto out_err;
ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
- ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
}
out_err:
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
if (!err)
recon_state->nr_caps++;
return err;
@@ -3945,6 +4806,7 @@ static int encode_snap_realms(struct ceph_mds_client *mdsc,
{
struct rb_node *p;
struct ceph_pagelist *pagelist = recon_state->pagelist;
+ struct ceph_client *cl = mdsc->fsc->client;
int err = 0;
if (recon_state->msg_version >= 4) {
@@ -3983,8 +4845,8 @@ static int encode_snap_realms(struct ceph_mds_client *mdsc,
ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
}
- dout(" adding snap realm %llx seq %lld parent %llx\n",
- realm->ino, realm->seq, realm->parent_ino);
+ doutc(cl, " adding snap realm %llx seq %lld parent %llx\n",
+ realm->ino, realm->seq, realm->parent_ino);
sr_rec.ino = cpu_to_le64(realm->ino);
sr_rec.seq = cpu_to_le64(realm->seq);
sr_rec.parent = cpu_to_le64(realm->parent_ino);
@@ -4013,6 +4875,7 @@ fail:
static void send_mds_reconnect(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *reply;
int mds = session->s_mds;
int err = -ENOMEM;
@@ -4021,7 +4884,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
};
LIST_HEAD(dispose);
- pr_info("mds%d reconnect start\n", mds);
+ pr_info_client(cl, "mds%d reconnect start\n", mds);
recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!recon_state.pagelist)
@@ -4037,8 +4900,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
session->s_state = CEPH_MDS_SESSION_RECONNECTING;
session->s_seq = 0;
- dout("session %p state %s\n", session,
- ceph_session_state_name(session->s_state));
+ doutc(cl, "session %p state %s\n", session,
+ ceph_session_state_name(session->s_state));
atomic_inc(&session->s_cap_gen);
@@ -4085,7 +4948,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
} else {
recon_state.msg_version = 2;
}
- /* trsaverse this session's caps */
+ /* traverse this session's caps */
err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
spin_lock(&session->s_cap_lock);
@@ -4172,7 +5035,8 @@ fail:
fail_nomsg:
ceph_pagelist_release(recon_state.pagelist);
fail_nopagelist:
- pr_err("error %d preparing reconnect for mds%d\n", err, mds);
+ pr_err_client(cl, "error %d preparing reconnect for mds%d\n",
+ err, mds);
return;
}
@@ -4191,9 +5055,9 @@ static void check_new_map(struct ceph_mds_client *mdsc,
int oldstate, newstate;
struct ceph_mds_session *s;
unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
+ struct ceph_client *cl = mdsc->fsc->client;
- dout("check_new_map new %u old %u\n",
- newmap->m_epoch, oldmap->m_epoch);
+ doutc(cl, "new %u old %u\n", newmap->m_epoch, oldmap->m_epoch);
if (newmap->m_info) {
for (i = 0; i < newmap->possible_max_rank; i++) {
@@ -4209,12 +5073,12 @@ static void check_new_map(struct ceph_mds_client *mdsc,
oldstate = ceph_mdsmap_get_state(oldmap, i);
newstate = ceph_mdsmap_get_state(newmap, i);
- dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
- i, ceph_mds_state_name(oldstate),
- ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
- ceph_mds_state_name(newstate),
- ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
- ceph_session_state_name(s->s_state));
+ doutc(cl, "mds%d state %s%s -> %s%s (session %s)\n",
+ i, ceph_mds_state_name(oldstate),
+ ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
+ ceph_mds_state_name(newstate),
+ ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
+ ceph_session_state_name(s->s_state));
if (i >= newmap->possible_max_rank) {
/* force close session for stopped mds */
@@ -4267,7 +5131,8 @@ static void check_new_map(struct ceph_mds_client *mdsc,
newstate >= CEPH_MDS_STATE_ACTIVE) {
if (oldstate != CEPH_MDS_STATE_CREATING &&
oldstate != CEPH_MDS_STATE_STARTING)
- pr_info("mds%d recovery completed\n", s->s_mds);
+ pr_info_client(cl, "mds%d recovery completed\n",
+ s->s_mds);
kick_requests(mdsc, i);
mutex_unlock(&mdsc->mutex);
mutex_lock(&s->s_mutex);
@@ -4311,12 +5176,13 @@ static void check_new_map(struct ceph_mds_client *mdsc,
s = __open_export_target_session(mdsc, i);
if (IS_ERR(s)) {
err = PTR_ERR(s);
- pr_err("failed to open export target session, err %d\n",
- err);
+ pr_err_client(cl,
+ "failed to open export target session, err %d\n",
+ err);
continue;
}
}
- dout("send reconnect to export target mds.%d\n", i);
+ doutc(cl, "send reconnect to export target mds.%d\n", i);
mutex_unlock(&mdsc->mutex);
send_mds_reconnect(mdsc, s);
ceph_put_mds_session(s);
@@ -4332,8 +5198,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
if (s->s_state == CEPH_MDS_SESSION_OPEN ||
s->s_state == CEPH_MDS_SESSION_HUNG ||
s->s_state == CEPH_MDS_SESSION_CLOSING) {
- dout(" connecting to export targets of laggy mds%d\n",
- i);
+ doutc(cl, " connecting to export targets of laggy mds%d\n", i);
__open_export_target_sessions(mdsc, s);
}
}
@@ -4360,6 +5225,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct super_block *sb = mdsc->fsc->sb;
struct inode *inode;
struct dentry *parent, *dentry;
@@ -4371,7 +5237,10 @@ static void handle_lease(struct ceph_mds_client *mdsc,
struct qstr dname;
int release = 0;
- dout("handle_lease from mds%d\n", mds);
+ doutc(cl, "from mds%d\n", mds);
+
+ if (!ceph_inc_mds_stopping_blocker(mdsc, session))
+ return;
/* decode */
if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
@@ -4386,22 +5255,19 @@ static void handle_lease(struct ceph_mds_client *mdsc,
/* lookup inode */
inode = ceph_find_inode(sb, vino);
- dout("handle_lease %s, ino %llx %p %.*s\n",
- ceph_lease_op_name(h->action), vino.ino, inode,
- dname.len, dname.name);
+ doutc(cl, "%s, ino %llx %p %.*s\n", ceph_lease_op_name(h->action),
+ vino.ino, inode, dname.len, dname.name);
mutex_lock(&session->s_mutex);
- inc_session_sequence(session);
-
if (!inode) {
- dout("handle_lease no inode %llx\n", vino.ino);
+ doutc(cl, "no inode %llx\n", vino.ino);
goto release;
}
/* dentry */
parent = d_find_alias(inode);
if (!parent) {
- dout("no parent dentry on inode %p\n", inode);
+ doutc(cl, "no parent dentry on inode %p\n", inode);
WARN_ON(1);
goto release; /* hrm... */
}
@@ -4454,10 +5320,14 @@ release:
out:
mutex_unlock(&session->s_mutex);
iput(inode);
+
+ ceph_dec_mds_stopping_blocker(mdsc);
return;
bad:
- pr_err("corrupt lease message\n");
+ ceph_dec_mds_stopping_blocker(mdsc);
+
+ pr_err_client(cl, "corrupt lease message\n");
ceph_msg_dump(msg);
}
@@ -4465,13 +5335,14 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
struct dentry *dentry, char action,
u32 seq)
{
+ struct ceph_client *cl = session->s_mdsc->fsc->client;
struct ceph_msg *msg;
struct ceph_mds_lease *lease;
struct inode *dir;
int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
- dout("lease_send_msg identry %p %s to mds%d\n",
- dentry, ceph_lease_op_name(action), session->s_mds);
+ doutc(cl, "identry %p %s to mds%d\n", dentry, ceph_lease_op_name(action),
+ session->s_mds);
msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
if (!msg)
@@ -4489,12 +5360,6 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
memcpy((void *)(lease + 1) + 4,
dentry->d_name.name, dentry->d_name.len);
spin_unlock(&dentry->d_lock);
- /*
- * if this is a preemptive lease RELEASE, no need to
- * flush request stream, since the actual request will
- * soon follow.
- */
- msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
ceph_con_send(&session->s_con, msg);
}
@@ -4510,6 +5375,7 @@ static void lock_unlock_session(struct ceph_mds_session *s)
static void maybe_recover_session(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_fs_client *fsc = mdsc->fsc;
if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
@@ -4521,26 +5387,22 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc)
if (!READ_ONCE(fsc->blocklisted))
return;
- pr_info("auto reconnect after blocklisted\n");
+ pr_info_client(cl, "auto reconnect after blocklisted\n");
ceph_force_reconnect(fsc->sb);
}
bool check_session_state(struct ceph_mds_session *s)
{
- struct ceph_fs_client *fsc = s->s_mdsc->fsc;
+ struct ceph_client *cl = s->s_mdsc->fsc->client;
switch (s->s_state) {
case CEPH_MDS_SESSION_OPEN:
if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
s->s_state = CEPH_MDS_SESSION_HUNG;
- pr_info("mds%d hung\n", s->s_mds);
+ pr_info_client(cl, "mds%d hung\n", s->s_mds);
}
break;
case CEPH_MDS_SESSION_CLOSING:
- /* Should never reach this when not force unmounting */
- WARN_ON_ONCE(s->s_ttl &&
- READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN);
- fallthrough;
case CEPH_MDS_SESSION_NEW:
case CEPH_MDS_SESSION_RESTARTING:
case CEPH_MDS_SESSION_CLOSED:
@@ -4557,6 +5419,8 @@ bool check_session_state(struct ceph_mds_session *s)
*/
void inc_session_sequence(struct ceph_mds_session *s)
{
+ struct ceph_client *cl = s->s_mdsc->fsc->client;
+
lockdep_assert_held(&s->s_mutex);
s->s_seq++;
@@ -4564,11 +5428,11 @@ void inc_session_sequence(struct ceph_mds_session *s)
if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
int ret;
- dout("resending session close request for mds%d\n", s->s_mds);
+ doutc(cl, "resending session close request for mds%d\n", s->s_mds);
ret = request_close_session(s);
if (ret < 0)
- pr_err("unable to close session to mds%d: %d\n",
- s->s_mds, ret);
+ pr_err_client(cl, "unable to close session to mds%d: %d\n",
+ s->s_mds, ret);
}
}
@@ -4597,9 +5461,9 @@ static void delayed_work(struct work_struct *work)
int renew_caps;
int i;
- dout("mdsc delayed_work\n");
+ doutc(mdsc->fsc->client, "mdsc delayed_work\n");
- if (mdsc->stopping)
+ if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED)
return;
mutex_lock(&mdsc->mutex);
@@ -4620,6 +5484,8 @@ static void delayed_work(struct work_struct *work)
}
mutex_unlock(&mdsc->mutex);
+ ceph_flush_session_cap_releases(mdsc, s);
+
mutex_lock(&s->s_mutex);
if (renew_caps)
send_renew_caps(mdsc, s);
@@ -4664,6 +5530,11 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
}
init_completion(&mdsc->safe_umount_waiters);
+ spin_lock_init(&mdsc->stopping_lock);
+ atomic_set(&mdsc->stopping_blockers, 0);
+ init_completion(&mdsc->stopping_waiter);
+ atomic64_set(&mdsc->dirty_folios, 0);
+ init_waitqueue_head(&mdsc->flush_end_wq);
init_waitqueue_head(&mdsc->session_close_wq);
INIT_LIST_HEAD(&mdsc->waiting_for_map);
mdsc->quotarealms_inodes = RB_ROOT;
@@ -4676,8 +5547,11 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
mdsc->last_renew_caps = jiffies;
INIT_LIST_HEAD(&mdsc->cap_delay_list);
+#ifdef CONFIG_DEBUG_FS
INIT_LIST_HEAD(&mdsc->cap_wait_list);
+#endif
spin_lock_init(&mdsc->cap_delay_lock);
+ INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
INIT_LIST_HEAD(&mdsc->snap_flush_list);
spin_lock_init(&mdsc->snap_flush_lock);
mdsc->last_cap_flush_tid = 1;
@@ -4686,6 +5560,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
spin_lock_init(&mdsc->cap_dirty_lock);
init_waitqueue_head(&mdsc->cap_flushing_wq);
INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
+ INIT_WORK(&mdsc->cap_unlink_work, ceph_cap_unlink_work);
err = ceph_metric_init(&mdsc->metric);
if (err)
goto err_mdsmap;
@@ -4723,6 +5598,7 @@ err_mdsc:
*/
static void wait_requests(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_options *opts = mdsc->fsc->client->options;
struct ceph_mds_request *req;
@@ -4730,25 +5606,25 @@ static void wait_requests(struct ceph_mds_client *mdsc)
if (__get_oldest_req(mdsc)) {
mutex_unlock(&mdsc->mutex);
- dout("wait_requests waiting for requests\n");
+ doutc(cl, "waiting for requests\n");
wait_for_completion_timeout(&mdsc->safe_umount_waiters,
ceph_timeout_jiffies(opts->mount_timeout));
/* tear down remaining requests */
mutex_lock(&mdsc->mutex);
while ((req = __get_oldest_req(mdsc))) {
- dout("wait_requests timed out on tid %llu\n",
- req->r_tid);
+ doutc(cl, "timed out on tid %llu\n", req->r_tid);
list_del_init(&req->r_wait);
__unregister_request(mdsc, req);
}
}
mutex_unlock(&mdsc->mutex);
- dout("wait_requests done\n");
+ doutc(cl, "done\n");
}
void send_flush_mdlog(struct ceph_mds_session *s)
{
+ struct ceph_client *cl = s->s_mdsc->fsc->client;
struct ceph_msg *msg;
/*
@@ -4758,27 +5634,202 @@ void send_flush_mdlog(struct ceph_mds_session *s)
return;
mutex_lock(&s->s_mutex);
- dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds,
- ceph_session_state_name(s->s_state), s->s_seq);
+ doutc(cl, "request mdlog flush to mds%d (%s)s seq %lld\n",
+ s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG,
s->s_seq);
if (!msg) {
- pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n",
- s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
+ pr_err_client(cl, "failed to request mdlog flush to mds%d (%s) seq %lld\n",
+ s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
} else {
ceph_con_send(&s->s_con, msg);
}
mutex_unlock(&s->s_mutex);
}
+static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
+ struct ceph_mds_cap_auth *auth,
+ const struct cred *cred,
+ char *tpath)
+{
+ u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid);
+ u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid);
+ struct ceph_client *cl = mdsc->fsc->client;
+ const char *fs_name = mdsc->fsc->mount_options->mds_namespace;
+ const char *spath = mdsc->fsc->mount_options->server_path;
+ bool gid_matched = false;
+ u32 gid, tlen, len;
+ int i, j;
+
+ doutc(cl, "fsname check fs_name=%s match.fs_name=%s\n",
+ fs_name, auth->match.fs_name ? auth->match.fs_name : "");
+ if (auth->match.fs_name && strcmp(auth->match.fs_name, fs_name)) {
+ /* fsname mismatch, try next one */
+ return 0;
+ }
+
+ doutc(cl, "match.uid %lld\n", auth->match.uid);
+ if (auth->match.uid != MDS_AUTH_UID_ANY) {
+ if (auth->match.uid != caller_uid)
+ return 0;
+ if (auth->match.num_gids) {
+ for (i = 0; i < auth->match.num_gids; i++) {
+ if (caller_gid == auth->match.gids[i])
+ gid_matched = true;
+ }
+ if (!gid_matched && cred->group_info->ngroups) {
+ for (i = 0; i < cred->group_info->ngroups; i++) {
+ gid = from_kgid(&init_user_ns,
+ cred->group_info->gid[i]);
+ for (j = 0; j < auth->match.num_gids; j++) {
+ if (gid == auth->match.gids[j]) {
+ gid_matched = true;
+ break;
+ }
+ }
+ if (gid_matched)
+ break;
+ }
+ }
+ if (!gid_matched)
+ return 0;
+ }
+ }
+
+ /* path match */
+ if (auth->match.path) {
+ if (!tpath)
+ return 0;
+
+ tlen = strlen(tpath);
+ len = strlen(auth->match.path);
+ if (len) {
+ char *_tpath = tpath;
+ bool free_tpath = false;
+ int m, n;
+
+ doutc(cl, "server path %s, tpath %s, match.path %s\n",
+ spath, tpath, auth->match.path);
+ if (spath && (m = strlen(spath)) != 1) {
+ /* mount path + '/' + tpath + an extra space */
+ n = m + 1 + tlen + 1;
+ _tpath = kmalloc(n, GFP_NOFS);
+ if (!_tpath)
+ return -ENOMEM;
+ /* remove the leading '/' */
+ snprintf(_tpath, n, "%s/%s", spath + 1, tpath);
+ free_tpath = true;
+ tlen = strlen(_tpath);
+ }
+
+ /*
+ * Please note the tailing '/' for match.path has already
+ * been removed when parsing.
+ *
+ * Remove the tailing '/' for the target path.
+ */
+ while (tlen && _tpath[tlen - 1] == '/') {
+ _tpath[tlen - 1] = '\0';
+ tlen -= 1;
+ }
+ doutc(cl, "_tpath %s\n", _tpath);
+
+ /*
+ * In case first == _tpath && tlen == len:
+ * match.path=/foo --> /foo _path=/foo --> match
+ * match.path=/foo/ --> /foo _path=/foo --> match
+ *
+ * In case first == _tmatch.path && tlen > len:
+ * match.path=/foo/ --> /foo _path=/foo/ --> match
+ * match.path=/foo --> /foo _path=/foo/ --> match
+ * match.path=/foo/ --> /foo _path=/foo/d --> match
+ * match.path=/foo --> /foo _path=/food --> mismatch
+ *
+ * All the other cases --> mismatch
+ */
+ bool path_matched = true;
+ char *first = strstr(_tpath, auth->match.path);
+ if (first != _tpath ||
+ (tlen > len && _tpath[len] != '/')) {
+ path_matched = false;
+ }
+
+ if (free_tpath)
+ kfree(_tpath);
+
+ if (!path_matched)
+ return 0;
+ }
+ }
+
+ doutc(cl, "matched\n");
+ return 1;
+}
+
+int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath, int mask)
+{
+ const struct cred *cred = get_current_cred();
+ u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid);
+ u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid);
+ struct ceph_mds_cap_auth *rw_perms_s = NULL;
+ struct ceph_client *cl = mdsc->fsc->client;
+ bool root_squash_perms = true;
+ int i, err;
+
+ doutc(cl, "tpath '%s', mask %d, caller_uid %d, caller_gid %d\n",
+ tpath, mask, caller_uid, caller_gid);
+
+ for (i = 0; i < mdsc->s_cap_auths_num; i++) {
+ struct ceph_mds_cap_auth *s = &mdsc->s_cap_auths[i];
+
+ err = ceph_mds_auth_match(mdsc, s, cred, tpath);
+ if (err < 0) {
+ put_cred(cred);
+ return err;
+ } else if (err > 0) {
+ /* always follow the last auth caps' permission */
+ root_squash_perms = true;
+ rw_perms_s = NULL;
+ if ((mask & MAY_WRITE) && s->writeable &&
+ s->match.root_squash && (!caller_uid || !caller_gid))
+ root_squash_perms = false;
+
+ if (((mask & MAY_WRITE) && !s->writeable) ||
+ ((mask & MAY_READ) && !s->readable))
+ rw_perms_s = s;
+ }
+ }
+
+ put_cred(cred);
+
+ doutc(cl, "root_squash_perms %d, rw_perms_s %p\n", root_squash_perms,
+ rw_perms_s);
+ if (root_squash_perms && rw_perms_s == NULL) {
+ doutc(cl, "access allowed\n");
+ return 0;
+ }
+
+ if (!root_squash_perms) {
+ doutc(cl, "root_squash is enabled and user(%d %d) isn't allowed to write",
+ caller_uid, caller_gid);
+ }
+ if (rw_perms_s) {
+ doutc(cl, "mds auth caps readable/writeable %d/%d while request r/w %d/%d",
+ rw_perms_s->readable, rw_perms_s->writeable,
+ !!(mask & MAY_READ), !!(mask & MAY_WRITE));
+ }
+ doutc(cl, "access denied\n");
+ return -EACCES;
+}
+
/*
* called before mount is ro, and before dentries are torn down.
* (hmm, does this still race with new lookups?)
*/
void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
{
- dout("pre_umount\n");
- mdsc->stopping = 1;
+ doutc(mdsc->fsc->client, "begin\n");
+ mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN;
ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
@@ -4792,18 +5843,22 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
ceph_msgr_flush();
ceph_cleanup_quotarealms_inodes(mdsc);
+ doutc(mdsc->fsc->client, "done\n");
}
/*
- * wait for all write mds requests to flush.
+ * flush the mdlog and wait for all write mds requests to flush.
*/
-static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
+static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
+ u64 want_tid)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req = NULL, *nextreq;
+ struct ceph_mds_session *last_session = NULL;
struct rb_node *n;
mutex_lock(&mdsc->mutex);
- dout("wait_unsafe_requests want %lld\n", want_tid);
+ doutc(cl, "want %lld\n", want_tid);
restart:
req = __get_oldest_req(mdsc);
while (req && req->r_tid <= want_tid) {
@@ -4815,14 +5870,32 @@ restart:
nextreq = NULL;
if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
(req->r_op & CEPH_MDS_OP_WRITE)) {
+ struct ceph_mds_session *s = req->r_session;
+
+ if (!s) {
+ req = nextreq;
+ continue;
+ }
+
/* write op */
ceph_mdsc_get_request(req);
if (nextreq)
ceph_mdsc_get_request(nextreq);
+ s = ceph_get_mds_session(s);
mutex_unlock(&mdsc->mutex);
- dout("wait_unsafe_requests wait on %llu (want %llu)\n",
- req->r_tid, want_tid);
+
+ /* send flush mdlog request to MDS */
+ if (last_session != s) {
+ send_flush_mdlog(s);
+ ceph_put_mds_session(last_session);
+ last_session = s;
+ } else {
+ ceph_put_mds_session(s);
+ }
+ doutc(cl, "wait on %llu (want %llu)\n",
+ req->r_tid, want_tid);
wait_for_completion(&req->r_safe_completion);
+
mutex_lock(&mdsc->mutex);
ceph_mdsc_put_request(req);
if (!nextreq)
@@ -4837,22 +5910,25 @@ restart:
req = nextreq;
}
mutex_unlock(&mdsc->mutex);
- dout("wait_unsafe_requests done\n");
+ ceph_put_mds_session(last_session);
+ doutc(cl, "done\n");
}
void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
u64 want_tid, want_flush;
if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
return;
- dout("sync\n");
+ doutc(cl, "sync\n");
mutex_lock(&mdsc->mutex);
want_tid = mdsc->last_tid;
mutex_unlock(&mdsc->mutex);
ceph_flush_dirty_caps(mdsc);
+ ceph_flush_cap_releases(mdsc);
spin_lock(&mdsc->cap_dirty_lock);
want_flush = mdsc->last_cap_flush_tid;
if (!list_empty(&mdsc->cap_flush_list)) {
@@ -4863,10 +5939,9 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
}
spin_unlock(&mdsc->cap_dirty_lock);
- dout("sync want tid %lld flush_seq %lld\n",
- want_tid, want_flush);
+ doutc(cl, "sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
- wait_unsafe_requests(mdsc, want_tid);
+ flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid);
wait_caps_flush(mdsc, want_flush);
}
@@ -4881,16 +5956,17 @@ static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
}
/*
- * called after sb is ro.
+ * called after sb is ro or when metadata corrupted.
*/
void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
{
struct ceph_options *opts = mdsc->fsc->client->options;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_session *session;
int i;
int skipped = 0;
- dout("close_sessions\n");
+ doutc(cl, "begin\n");
/* close sessions */
mutex_lock(&mdsc->mutex);
@@ -4908,7 +5984,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
}
mutex_unlock(&mdsc->mutex);
- dout("waiting for sessions to close\n");
+ doutc(cl, "waiting for sessions to close\n");
wait_event_timeout(mdsc->session_close_wq,
done_closing_sessions(mdsc, skipped),
ceph_timeout_jiffies(opts->mount_timeout));
@@ -4931,12 +6007,13 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
mutex_unlock(&mdsc->mutex);
ceph_cleanup_snapid_map(mdsc);
- ceph_cleanup_empty_realms(mdsc);
+ ceph_cleanup_global_and_empty_realms(mdsc);
cancel_work_sync(&mdsc->cap_reclaim_work);
+ cancel_work_sync(&mdsc->cap_unlink_work);
cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
- dout("stopped\n");
+ doutc(cl, "done\n");
}
void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
@@ -4944,7 +6021,7 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
struct ceph_mds_session *session;
int mds;
- dout("force umount\n");
+ doutc(mdsc->fsc->client, "force umount\n");
mutex_lock(&mdsc->mutex);
for (mds = 0; mds < mdsc->max_sessions; mds++) {
@@ -4975,7 +6052,7 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
{
- dout("stop\n");
+ doutc(mdsc->fsc->client, "stop\n");
/*
* Make sure the delayed work stopped before releasing
* the resources.
@@ -4990,13 +6067,25 @@ static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
ceph_mdsmap_destroy(mdsc->mdsmap);
kfree(mdsc->sessions);
ceph_caps_finalize(mdsc);
+
+ if (mdsc->s_cap_auths) {
+ int i;
+
+ for (i = 0; i < mdsc->s_cap_auths_num; i++) {
+ kfree(mdsc->s_cap_auths[i].match.gids);
+ kfree(mdsc->s_cap_auths[i].match.path);
+ kfree(mdsc->s_cap_auths[i].match.fs_name);
+ }
+ kfree(mdsc->s_cap_auths);
+ }
+
ceph_pool_perm_destroy(mdsc);
}
void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
{
struct ceph_mds_client *mdsc = fsc->mdsc;
- dout("mdsc_destroy %p\n", mdsc);
+ doutc(fsc->client, "%p\n", mdsc);
if (!mdsc)
return;
@@ -5010,12 +6099,13 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
fsc->mdsc = NULL;
kfree(mdsc);
- dout("mdsc_destroy %p done\n", mdsc);
+ doutc(fsc->client, "%p done\n", mdsc);
}
void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
{
struct ceph_fs_client *fsc = mdsc->fsc;
+ struct ceph_client *cl = fsc->client;
const char *mds_namespace = fsc->mount_options->mds_namespace;
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
@@ -5027,7 +6117,7 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
ceph_decode_need(&p, end, sizeof(u32), bad);
epoch = ceph_decode_32(&p);
- dout("handle_fsmap epoch %u\n", epoch);
+ doutc(cl, "epoch %u\n", epoch);
/* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */
ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad);
@@ -5072,7 +6162,10 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
return;
bad:
- pr_err("error decoding fsmap\n");
+ pr_err_client(cl, "error decoding fsmap %d. Shutting down mount.\n",
+ err);
+ ceph_umount_begin(mdsc->fsc->sb);
+ ceph_msg_dump(msg);
err_out:
mutex_lock(&mdsc->mutex);
mdsc->mdsmap_err = err;
@@ -5085,6 +6178,7 @@ err_out:
*/
void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
{
+ struct ceph_client *cl = mdsc->fsc->client;
u32 epoch;
u32 maplen;
void *p = msg->front.iov_base;
@@ -5099,18 +6193,17 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
return;
epoch = ceph_decode_32(&p);
maplen = ceph_decode_32(&p);
- dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
+ doutc(cl, "epoch %u len %d\n", epoch, (int)maplen);
/* do we need it? */
mutex_lock(&mdsc->mutex);
if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
- dout("handle_map epoch %u <= our %u\n",
- epoch, mdsc->mdsmap->m_epoch);
+ doutc(cl, "epoch %u <= our %u\n", epoch, mdsc->mdsmap->m_epoch);
mutex_unlock(&mdsc->mutex);
return;
}
- newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client));
+ newmap = ceph_mdsmap_decode(mdsc, &p, end, ceph_msgr2(mdsc->fsc->client));
if (IS_ERR(newmap)) {
err = PTR_ERR(newmap);
goto bad_unlock;
@@ -5139,7 +6232,10 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
bad_unlock:
mutex_unlock(&mdsc->mutex);
bad:
- pr_err("error decoding mdsmap %d\n", err);
+ pr_err_client(cl, "error decoding mdsmap %d. Shutting down mount.\n",
+ err);
+ ceph_umount_begin(mdsc->fsc->sb);
+ ceph_msg_dump(msg);
return;
}
@@ -5168,14 +6264,18 @@ static void mds_peer_reset(struct ceph_connection *con)
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
- pr_warn("mds%d closed our session\n", s->s_mds);
- send_mds_reconnect(mdsc, s);
+ pr_warn_client(mdsc->fsc->client, "mds%d closed our session\n",
+ s->s_mds);
+ if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO &&
+ ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) >= CEPH_MDS_STATE_RECONNECT)
+ send_mds_reconnect(mdsc, s);
}
static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
int type = le16_to_cpu(msg->hdr.type);
mutex_lock(&mdsc->mutex);
@@ -5215,8 +6315,8 @@ static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
break;
default:
- pr_err("received unknown message type %d %s\n", type,
- ceph_msg_type_name(type));
+ pr_err_client(cl, "received unknown message type %d %s\n",
+ type, ceph_msg_type_name(type));
}
out:
ceph_msg_put(msg);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 97c7f7bfa55f..0428a5eaf28c 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -14,9 +14,9 @@
#include <linux/ceph/types.h>
#include <linux/ceph/messenger.h>
-#include <linux/ceph/mdsmap.h>
#include <linux/ceph/auth.h>
+#include "mdsmap.h"
#include "metric.h"
#include "super.h"
@@ -29,14 +29,17 @@ enum ceph_feature_type {
CEPHFS_FEATURE_MULTI_RECONNECT,
CEPHFS_FEATURE_DELEG_INO,
CEPHFS_FEATURE_METRIC_COLLECT,
-
- CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_METRIC_COLLECT,
+ CEPHFS_FEATURE_ALTERNATE_NAME,
+ CEPHFS_FEATURE_NOTIFY_SESSION_STATE,
+ CEPHFS_FEATURE_OP_GETVXATTR,
+ CEPHFS_FEATURE_32BITS_RETRY_FWD,
+ CEPHFS_FEATURE_NEW_SNAPREALM_INFO,
+ CEPHFS_FEATURE_HAS_OWNER_UIDGID,
+ CEPHFS_FEATURE_MDS_AUTH_CAPS_CHECK,
+
+ CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_MDS_AUTH_CAPS_CHECK,
};
-/*
- * This will always have the highest feature bit value
- * as the last element of the array.
- */
#define CEPHFS_FEATURES_CLIENT_SUPPORTED { \
0, 1, 2, 3, 4, 5, 6, 7, \
CEPHFS_FEATURE_MIMIC, \
@@ -45,10 +48,13 @@ enum ceph_feature_type {
CEPHFS_FEATURE_MULTI_RECONNECT, \
CEPHFS_FEATURE_DELEG_INO, \
CEPHFS_FEATURE_METRIC_COLLECT, \
- \
- CEPHFS_FEATURE_MAX, \
+ CEPHFS_FEATURE_ALTERNATE_NAME, \
+ CEPHFS_FEATURE_NOTIFY_SESSION_STATE, \
+ CEPHFS_FEATURE_OP_GETVXATTR, \
+ CEPHFS_FEATURE_32BITS_RETRY_FWD, \
+ CEPHFS_FEATURE_HAS_OWNER_UIDGID, \
+ CEPHFS_FEATURE_MDS_AUTH_CAPS_CHECK, \
}
-#define CEPHFS_FEATURES_CLIENT_REQUIRED {}
/*
* Some lock dependencies:
@@ -67,6 +73,24 @@ enum ceph_feature_type {
struct ceph_fs_client;
struct ceph_cap;
+#define MDS_AUTH_UID_ANY -1
+
+struct ceph_mds_cap_match {
+ s64 uid; /* default to MDS_AUTH_UID_ANY */
+ u32 num_gids;
+ u32 *gids; /* use these GIDs */
+ char *path; /* require path to be child of this
+ (may be "" or "/" for any) */
+ char *fs_name;
+ bool root_squash; /* default to false */
+};
+
+struct ceph_mds_cap_auth {
+ struct ceph_mds_cap_match match;
+ bool readable;
+ bool writeable;
+};
+
/*
* parsed info about a single inode. pointers are into the encoded
* on-wire structures within the mds reply message payload.
@@ -88,18 +112,29 @@ struct ceph_mds_reply_info_in {
s32 dir_pin;
struct ceph_timespec btime;
struct ceph_timespec snap_btime;
+ u8 *fscrypt_auth;
+ u8 *fscrypt_file;
+ u32 fscrypt_auth_len;
+ u32 fscrypt_file_len;
u64 rsnaps;
u64 change_attr;
};
struct ceph_mds_reply_dir_entry {
+ bool is_nokey;
char *name;
u32 name_len;
+ u32 raw_hash;
struct ceph_mds_reply_lease *lease;
struct ceph_mds_reply_info_in inode;
loff_t offset;
};
+struct ceph_mds_reply_xattr {
+ char *xattr_value;
+ size_t xattr_value_len;
+};
+
/*
* parsed info about an mds reply, including information about
* either: 1) the target inode and/or its parent directory and dentry,
@@ -113,8 +148,11 @@ struct ceph_mds_reply_info_parsed {
struct ceph_mds_reply_info_in diri, targeti;
struct ceph_mds_reply_dirfrag *dirfrag;
char *dname;
+ u8 *altname;
u32 dname_len;
+ u32 altname_len;
struct ceph_mds_reply_lease *dlease;
+ struct ceph_mds_reply_xattr xattr_info;
/* extra */
union {
@@ -259,6 +297,9 @@ struct ceph_mds_request {
struct inode *r_parent; /* parent dir inode */
struct inode *r_target_inode; /* resulting inode */
+ struct inode *r_new_inode; /* new inode (for creates) */
+
+ const struct qstr *r_dname; /* stable name (for ->d_revalidate) */
#define CEPH_MDS_R_DIRECT_IS_HASH (1) /* r_direct_hash is valid */
#define CEPH_MDS_R_ABORTED (2) /* call was aborted */
@@ -268,14 +309,23 @@ struct ceph_mds_request {
#define CEPH_MDS_R_DID_PREPOPULATE (6) /* prepopulated readdir */
#define CEPH_MDS_R_PARENT_LOCKED (7) /* is r_parent->i_rwsem wlocked? */
#define CEPH_MDS_R_ASYNC (8) /* async request */
+#define CEPH_MDS_R_FSCRYPT_FILE (9) /* must marshal fscrypt_file field */
unsigned long r_req_flags;
struct mutex r_fill_mutex;
union ceph_mds_request_args r_args;
+
+ struct ceph_fscrypt_auth *r_fscrypt_auth;
+ u64 r_fscrypt_file;
+
+ u8 *r_altname; /* fscrypt binary crypttext for long filenames */
+ u32 r_altname_len; /* length of r_altname */
+
int r_fmode; /* file mode, if expecting cap */
- const struct cred *r_cred;
int r_request_release_offset;
+ const struct cred *r_cred;
+ struct mnt_idmap *r_mnt_idmap;
struct timespec64 r_stamp;
/* for choosing which mds to send this request to */
@@ -296,12 +346,11 @@ struct ceph_mds_request {
struct ceph_msg *r_reply;
struct ceph_mds_reply_info_parsed r_reply_info;
int r_err;
-
+ u32 r_readdir_offset;
struct page *r_locked_page;
int r_dir_caps;
int r_num_caps;
- u32 r_readdir_offset;
unsigned long r_timeout; /* optional. jiffies, 0 is "wait forever" */
unsigned long r_started; /* start time to measure timeout against */
@@ -329,13 +378,14 @@ struct ceph_mds_request {
struct completion r_completion;
struct completion r_safe_completion;
ceph_mds_request_callback_t r_callback;
- ceph_mds_request_wait_callback_t r_wait_for_completion;
struct list_head r_unsafe_item; /* per-session unsafe list item */
long long r_dir_release_cnt;
long long r_dir_ordered_cnt;
int r_readdir_cache_idx;
+ int r_feature_needed;
+
struct ceph_cap_reservation r_caps_reservation;
};
@@ -351,8 +401,8 @@ struct ceph_snapid_map {
struct rb_node node;
struct list_head lru;
atomic_t ref;
- u64 snap;
dev_t dev;
+ u64 snap;
unsigned long last_used;
};
@@ -368,6 +418,8 @@ struct ceph_quotarealm_inode {
struct inode *inode;
};
+#ifdef CONFIG_DEBUG_FS
+
struct cap_wait {
struct list_head list;
u64 ino;
@@ -376,6 +428,14 @@ struct cap_wait {
int want;
};
+#endif
+
+enum {
+ CEPH_MDSC_STOPPING_BEGIN = 1,
+ CEPH_MDSC_STOPPING_FLUSHING = 2,
+ CEPH_MDSC_STOPPING_FLUSHED = 3,
+};
+
/*
* mds client state
*/
@@ -392,7 +452,14 @@ struct ceph_mds_client {
struct ceph_mds_session **sessions; /* NULL for mds if no session */
atomic_t num_sessions;
int max_sessions; /* len of sessions array */
- int stopping; /* true if shutting down */
+
+ spinlock_t stopping_lock; /* protect snap_empty */
+ int stopping; /* the stage of shutting down */
+ atomic_t stopping_blockers;
+ struct completion stopping_waiter;
+
+ atomic64_t dirty_folios;
+ wait_queue_head_t flush_end_wq;
atomic64_t quotarealms_count; /* # realms with quota */
/*
@@ -423,7 +490,8 @@ struct ceph_mds_client {
struct delayed_work delayed_work; /* delayed work */
unsigned long last_renew_caps; /* last time we renewed our caps */
struct list_head cap_delay_list; /* caps with delayed release */
- spinlock_t cap_delay_lock; /* protects cap_delay_list */
+ struct list_head cap_unlink_delay_list; /* caps with delayed release for unlink */
+ spinlock_t cap_delay_lock; /* protects cap_delay_list and cap_unlink_delay_list */
struct list_head snap_flush_list; /* cap_snaps ready to flush */
spinlock_t snap_flush_lock;
@@ -437,6 +505,8 @@ struct ceph_mds_client {
struct work_struct cap_reclaim_work;
atomic_t cap_reclaim_pending;
+ struct work_struct cap_unlink_work;
+
/*
* Cap reservations
*
@@ -451,7 +521,9 @@ struct ceph_mds_client {
spinlock_t caps_list_lock;
struct list_head caps_list; /* unused (reserved or
unreserved) */
+#ifdef CONFIG_DEBUG_FS
struct list_head cap_wait_list;
+#endif
int caps_total_count; /* total caps allocated */
int caps_use_count; /* in use */
int caps_use_max; /* max used caps */
@@ -472,6 +544,9 @@ struct ceph_mds_client {
struct rw_semaphore pool_perm_rwsem;
struct rb_root pool_perm_tree;
+ u32 s_cap_auths_num;
+ struct ceph_mds_cap_auth *s_cap_auths;
+
char nodename[__NEW_UTS_LEN + 1];
};
@@ -489,9 +564,6 @@ extern struct ceph_mds_session *
ceph_get_mds_session(struct ceph_mds_session *s);
extern void ceph_put_mds_session(struct ceph_mds_session *s);
-extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
- struct ceph_msg *msg, int mds);
-
extern int ceph_mdsc_init(struct ceph_fs_client *fsc);
extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
extern void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc);
@@ -507,11 +579,14 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode);
extern int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
struct inode *dir,
struct ceph_mds_request *req);
+int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req,
+ ceph_mds_request_wait_callback_t wait_func);
extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
struct inode *dir,
struct ceph_mds_request *req);
extern void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req);
-extern void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req);
+extern void ceph_mdsc_release_dir_caps_async(struct ceph_mds_request *req);
static inline void ceph_mdsc_get_request(struct ceph_mds_request *req)
{
kref_get(&req->r_kref);
@@ -529,24 +604,38 @@ extern void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
extern struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq);
extern void __ceph_queue_cap_release(struct ceph_mds_session *session,
struct ceph_cap *cap);
-extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session);
+extern void ceph_flush_session_cap_releases(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session);
extern void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc);
extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
+extern void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc);
extern int ceph_iterate_session_caps(struct ceph_mds_session *session,
- int (*cb)(struct inode *,
- struct ceph_cap *, void *),
+ int (*cb)(struct inode *, int mds, void *),
void *arg);
+extern int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath,
+ int mask);
+
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
-static inline void ceph_mdsc_free_path(char *path, int len)
+/*
+ * Structure to group path-related output parameters for build_*_path functions
+ */
+struct ceph_path_info {
+ const char *path;
+ int pathlen;
+ struct ceph_vino vino;
+ bool freepath;
+};
+
+static inline void ceph_mdsc_free_path_info(const struct ceph_path_info *path_info)
{
- if (!IS_ERR_OR_NULL(path))
- __putname(path - (PATH_MAX - 1 - len));
+ if (path_info && path_info->freepath && !IS_ERR_OR_NULL(path_info->path))
+ __putname((char *)path_info->path - (PATH_MAX - 1 - path_info->pathlen));
}
-extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
- int stop_on_nosnap);
+extern char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc,
+ struct dentry *dentry, struct ceph_path_info *path_info,
+ int for_wire);
extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
@@ -560,8 +649,6 @@ extern void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc,
extern struct ceph_mds_session *
ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target);
-extern void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session);
extern int ceph_trim_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
@@ -572,9 +659,12 @@ static inline int ceph_wait_on_async_create(struct inode *inode)
struct ceph_inode_info *ci = ceph_inode(inode);
return wait_on_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT,
- TASK_INTERRUPTIBLE);
+ TASK_KILLABLE);
}
+extern int ceph_wait_on_conflict_unlink(struct dentry *dentry);
extern u64 ceph_get_deleg_ino(struct ceph_mds_session *session);
extern int ceph_restore_deleg_ino(struct ceph_mds_session *session, u64 ino);
+
+extern bool enable_unsafe_idmap;
#endif
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 61d67cbcb367..2c7b151a7c95 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -7,10 +7,11 @@
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/ceph/mdsmap.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
+#include "mdsmap.h"
+#include "mds_client.h"
#include "super.h"
#define CEPH_MDS_IS_READY(i, ignore_laggy) \
@@ -29,7 +30,7 @@ static int __mdsmap_get_random_mds(struct ceph_mdsmap *m, bool ignore_laggy)
return -1;
/* pick */
- n = prandom_u32() % n;
+ n = get_random_u32_below(n);
for (j = 0, i = 0; i < m->possible_max_rank; i++) {
if (CEPH_MDS_IS_READY(i, ignore_laggy))
j++;
@@ -114,8 +115,10 @@ bad:
* Ignore any fields we don't care about (there are quite a few of
* them).
*/
-struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
+struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
+ void *end, bool msgr2)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mdsmap *m;
const void *start = *p;
int i, j, n;
@@ -233,20 +236,18 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
*p = info_end;
}
- dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s%s\n",
- i+1, n, global_id, mds, inc,
- ceph_pr_addr(&addr),
- ceph_mds_state_name(state),
- laggy ? "(laggy)" : "");
+ doutc(cl, "%d/%d %lld mds%d.%d %s %s%s\n", i+1, n, global_id,
+ mds, inc, ceph_pr_addr(&addr),
+ ceph_mds_state_name(state), laggy ? "(laggy)" : "");
if (mds < 0 || mds >= m->possible_max_rank) {
- pr_warn("mdsmap_decode got incorrect mds(%d)\n", mds);
+ pr_warn_client(cl, "got incorrect mds(%d)\n", mds);
continue;
}
if (state <= 0) {
- dout("mdsmap_decode got incorrect state(%s)\n",
- ceph_mds_state_name(state));
+ doutc(cl, "got incorrect state(%s)\n",
+ ceph_mds_state_name(state));
continue;
}
@@ -263,10 +264,6 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
goto nomem;
for (j = 0; j < num_export_targets; j++) {
target = ceph_decode_32(&pexport_targets);
- if (target >= m->possible_max_rank) {
- err = -EIO;
- goto corrupt;
- }
info->export_targets[j] = target;
}
} else {
@@ -356,12 +353,22 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
__decode_and_drop_type(p, end, u8, bad_ext);
}
if (mdsmap_ev >= 8) {
- u32 name_len;
+ u32 fsname_len;
/* enabled */
ceph_decode_8_safe(p, end, m->m_enabled, bad_ext);
- ceph_decode_32_safe(p, end, name_len, bad_ext);
- ceph_decode_need(p, end, name_len, bad_ext);
- *p += name_len;
+ /* fs_name */
+ ceph_decode_32_safe(p, end, fsname_len, bad_ext);
+
+ /* validate fsname against mds_namespace */
+ if (!namespace_equals(mdsc->fsc->mount_options, *p,
+ fsname_len)) {
+ pr_warn_client(cl, "fsname %*pE doesn't match mds_namespace %s\n",
+ (int)fsname_len, (char *)*p,
+ mdsc->fsc->mount_options->mds_namespace);
+ goto bad;
+ }
+ /* skip fsname after validation */
+ ceph_decode_skip_n(p, end, fsname_len, bad);
}
/* damaged */
if (mdsmap_ev >= 9) {
@@ -374,17 +381,34 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
} else {
m->m_damaged = false;
}
+ if (mdsmap_ev >= 17) {
+ /* balancer */
+ ceph_decode_skip_string(p, end, bad_ext);
+ /* standby_count_wanted */
+ ceph_decode_skip_32(p, end, bad_ext);
+ /* old_max_mds */
+ ceph_decode_skip_32(p, end, bad_ext);
+ /* min_compat_client */
+ ceph_decode_skip_8(p, end, bad_ext);
+ /* required_client_features */
+ ceph_decode_skip_set(p, end, 64, bad_ext);
+ /* bal_rank_mask */
+ ceph_decode_skip_string(p, end, bad_ext);
+ }
+ if (mdsmap_ev >= 18) {
+ ceph_decode_64_safe(p, end, m->m_max_xattr_size, bad_ext);
+ }
bad_ext:
- dout("mdsmap_decode m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
- !!m->m_enabled, !!m->m_damaged, m->m_num_laggy);
+ doutc(cl, "m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
+ !!m->m_enabled, !!m->m_damaged, m->m_num_laggy);
*p = end;
- dout("mdsmap_decode success epoch %u\n", m->m_epoch);
+ doutc(cl, "success epoch %u\n", m->m_epoch);
return m;
nomem:
err = -ENOMEM;
goto out_err;
corrupt:
- pr_err("corrupt mdsmap\n");
+ pr_err_client(cl, "corrupt mdsmap\n");
print_hex_dump(KERN_DEBUG, "mdsmap: ",
DUMP_PREFIX_OFFSET, 16, 1,
start, end - start, true);
diff --git a/fs/ceph/mdsmap.h b/fs/ceph/mdsmap.h
new file mode 100644
index 000000000000..1f2171dd01bf
--- /dev/null
+++ b/fs/ceph/mdsmap.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FS_CEPH_MDSMAP_H
+#define _FS_CEPH_MDSMAP_H
+
+#include <linux/bug.h>
+#include <linux/ceph/types.h>
+
+struct ceph_mds_client;
+
+/*
+ * mds map - describe servers in the mds cluster.
+ *
+ * we limit fields to those the client actually xcares about
+ */
+struct ceph_mds_info {
+ u64 global_id;
+ struct ceph_entity_addr addr;
+ s32 state;
+ int num_export_targets;
+ bool laggy;
+ u32 *export_targets;
+};
+
+struct ceph_mdsmap {
+ u32 m_epoch, m_client_epoch, m_last_failure;
+ u32 m_root;
+ u32 m_session_timeout; /* seconds */
+ u32 m_session_autoclose; /* seconds */
+ u64 m_max_file_size;
+ /*
+ * maximum size for xattrs blob.
+ * Zeroed by default to force the usage of the (sync) SETXATTR Op.
+ */
+ u64 m_max_xattr_size;
+ u32 m_max_mds; /* expected up:active mds number */
+ u32 m_num_active_mds; /* actual up:active mds number */
+ u32 possible_max_rank; /* possible max rank index */
+ struct ceph_mds_info *m_info;
+
+ /* which object pools file data can be stored in */
+ int m_num_data_pg_pools;
+ u64 *m_data_pg_pools;
+ u64 m_cas_pg_pool;
+
+ bool m_enabled;
+ bool m_damaged;
+ int m_num_laggy;
+};
+
+static inline struct ceph_entity_addr *
+ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
+{
+ if (w >= m->possible_max_rank)
+ return NULL;
+ return &m->m_info[w].addr;
+}
+
+static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
+{
+ BUG_ON(w < 0);
+ if (w >= m->possible_max_rank)
+ return CEPH_MDS_STATE_DNE;
+ return m->m_info[w].state;
+}
+
+static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
+{
+ if (w >= 0 && w < m->possible_max_rank)
+ return m->m_info[w].laggy;
+ return false;
+}
+
+extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
+struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
+ void *end, bool msgr2);
+extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
+extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
+
+#endif
diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
index 04d5df29bbbf..871c1090e520 100644
--- a/fs/ceph/metric.c
+++ b/fs/ceph/metric.c
@@ -8,6 +8,12 @@
#include "metric.h"
#include "mds_client.h"
+static void ktime_to_ceph_timespec(struct ceph_timespec *ts, ktime_t val)
+{
+ struct timespec64 t = ktime_to_timespec64(val);
+ ceph_encode_timespec64(ts, &t);
+}
+
static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
struct ceph_mds_session *s)
{
@@ -25,12 +31,20 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
struct ceph_client_metric *m = &mdsc->metric;
u64 nr_caps = atomic64_read(&m->total_caps);
u32 header_len = sizeof(struct ceph_metric_header);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
- struct timespec64 ts;
s64 sum;
s32 items = 0;
s32 len;
+ /* Do not send the metrics until the MDS rank is ready */
+ mutex_lock(&mdsc->mutex);
+ if (ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) != CEPH_MDS_STATE_ACTIVE) {
+ mutex_unlock(&mdsc->mutex);
+ return false;
+ }
+ mutex_unlock(&mdsc->mutex);
+
len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
+ sizeof(*meta) + sizeof(*dlease) + sizeof(*files)
+ sizeof(*icaps) + sizeof(*inodes) + sizeof(*rsize)
@@ -38,8 +52,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
if (!msg) {
- pr_err("send metrics to mds%d, failed to allocate message\n",
- s->s_mds);
+ pr_err_client(cl, "to mds%d, failed to allocate message\n",
+ s->s_mds);
return false;
}
@@ -59,37 +73,40 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
/* encode the read latency metric */
read = (struct ceph_metric_read_latency *)(cap + 1);
read->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY);
- read->header.ver = 1;
+ read->header.ver = 2;
read->header.compat = 1;
read->header.data_len = cpu_to_le32(sizeof(*read) - header_len);
- sum = m->read_latency_sum;
- jiffies_to_timespec64(sum, &ts);
- read->sec = cpu_to_le32(ts.tv_sec);
- read->nsec = cpu_to_le32(ts.tv_nsec);
+ sum = m->metric[METRIC_READ].latency_sum;
+ ktime_to_ceph_timespec(&read->lat, sum);
+ ktime_to_ceph_timespec(&read->avg, m->metric[METRIC_READ].latency_avg);
+ read->sq_sum = cpu_to_le64(m->metric[METRIC_READ].latency_sq_sum);
+ read->count = cpu_to_le64(m->metric[METRIC_READ].total);
items++;
/* encode the write latency metric */
write = (struct ceph_metric_write_latency *)(read + 1);
write->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY);
- write->header.ver = 1;
+ write->header.ver = 2;
write->header.compat = 1;
write->header.data_len = cpu_to_le32(sizeof(*write) - header_len);
- sum = m->write_latency_sum;
- jiffies_to_timespec64(sum, &ts);
- write->sec = cpu_to_le32(ts.tv_sec);
- write->nsec = cpu_to_le32(ts.tv_nsec);
+ sum = m->metric[METRIC_WRITE].latency_sum;
+ ktime_to_ceph_timespec(&write->lat, sum);
+ ktime_to_ceph_timespec(&write->avg, m->metric[METRIC_WRITE].latency_avg);
+ write->sq_sum = cpu_to_le64(m->metric[METRIC_WRITE].latency_sq_sum);
+ write->count = cpu_to_le64(m->metric[METRIC_WRITE].total);
items++;
/* encode the metadata latency metric */
meta = (struct ceph_metric_metadata_latency *)(write + 1);
meta->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY);
- meta->header.ver = 1;
+ meta->header.ver = 2;
meta->header.compat = 1;
meta->header.data_len = cpu_to_le32(sizeof(*meta) - header_len);
- sum = m->metadata_latency_sum;
- jiffies_to_timespec64(sum, &ts);
- meta->sec = cpu_to_le32(ts.tv_sec);
- meta->nsec = cpu_to_le32(ts.tv_nsec);
+ sum = m->metric[METRIC_METADATA].latency_sum;
+ ktime_to_ceph_timespec(&meta->lat, sum);
+ ktime_to_ceph_timespec(&meta->avg, m->metric[METRIC_METADATA].latency_avg);
+ meta->sq_sum = cpu_to_le64(m->metric[METRIC_METADATA].latency_sq_sum);
+ meta->count = cpu_to_le64(m->metric[METRIC_METADATA].total);
items++;
/* encode the dentry lease metric */
@@ -141,8 +158,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
rsize->header.ver = 1;
rsize->header.compat = 1;
rsize->header.data_len = cpu_to_le32(sizeof(*rsize) - header_len);
- rsize->total_ops = cpu_to_le64(m->total_reads);
- rsize->total_size = cpu_to_le64(m->read_size_sum);
+ rsize->total_ops = cpu_to_le64(m->metric[METRIC_READ].total);
+ rsize->total_size = cpu_to_le64(m->metric[METRIC_READ].size_sum);
items++;
/* encode the write io size metric */
@@ -151,8 +168,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
wsize->header.ver = 1;
wsize->header.compat = 1;
wsize->header.data_len = cpu_to_le32(sizeof(*wsize) - header_len);
- wsize->total_ops = cpu_to_le64(m->total_writes);
- wsize->total_size = cpu_to_le64(m->write_size_sum);
+ wsize->total_ops = cpu_to_le64(m->metric[METRIC_WRITE].total);
+ wsize->total_size = cpu_to_le64(m->metric[METRIC_WRITE].size_sum);
items++;
put_unaligned_le32(items, &head->num);
@@ -160,8 +177,6 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
msg->hdr.version = cpu_to_le16(1);
msg->hdr.compat_version = cpu_to_le16(1);
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
- dout("client%llu send metrics to mds%d\n",
- ceph_client_gid(mdsc->fsc->client), s->s_mds);
ceph_con_send(&s->s_con, msg);
return true;
@@ -202,7 +217,7 @@ static void metric_delayed_work(struct work_struct *work)
struct ceph_mds_client *mdsc =
container_of(m, struct ceph_mds_client, metric);
- if (mdsc->stopping)
+ if (mdsc->stopping || disable_send_metrics)
return;
if (!m->session || !check_session_state(m->session)) {
@@ -220,7 +235,8 @@ static void metric_delayed_work(struct work_struct *work)
int ceph_metric_init(struct ceph_client_metric *m)
{
- int ret;
+ struct ceph_metric *metric;
+ int ret, i;
if (!m)
return -EINVAL;
@@ -243,32 +259,19 @@ int ceph_metric_init(struct ceph_client_metric *m)
if (ret)
goto err_i_caps_mis;
- spin_lock_init(&m->read_metric_lock);
- m->read_latency_sq_sum = 0;
- m->read_latency_min = KTIME_MAX;
- m->read_latency_max = 0;
- m->total_reads = 0;
- m->read_latency_sum = 0;
- m->read_size_min = U64_MAX;
- m->read_size_max = 0;
- m->read_size_sum = 0;
-
- spin_lock_init(&m->write_metric_lock);
- m->write_latency_sq_sum = 0;
- m->write_latency_min = KTIME_MAX;
- m->write_latency_max = 0;
- m->total_writes = 0;
- m->write_latency_sum = 0;
- m->write_size_min = U64_MAX;
- m->write_size_max = 0;
- m->write_size_sum = 0;
-
- spin_lock_init(&m->metadata_metric_lock);
- m->metadata_latency_sq_sum = 0;
- m->metadata_latency_min = KTIME_MAX;
- m->metadata_latency_max = 0;
- m->total_metadatas = 0;
- m->metadata_latency_sum = 0;
+ for (i = 0; i < METRIC_MAX; i++) {
+ metric = &m->metric[i];
+ spin_lock_init(&metric->lock);
+ metric->size_sum = 0;
+ metric->size_min = U64_MAX;
+ metric->size_max = 0;
+ metric->total = 0;
+ metric->latency_sum = 0;
+ metric->latency_avg = 0;
+ metric->latency_sq_sum = 0;
+ metric->latency_min = KTIME_MAX;
+ metric->latency_max = 0;
+ }
atomic64_set(&m->opened_files, 0);
ret = percpu_counter_init(&m->opened_inodes, 0, GFP_KERNEL);
@@ -322,25 +325,24 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
max = new; \
}
-static inline void __update_stdev(ktime_t total, ktime_t lsum,
- ktime_t *sq_sump, ktime_t lat)
+static inline void __update_mean_and_stdev(ktime_t total, ktime_t *lavg,
+ ktime_t *sq_sump, ktime_t lat)
{
- ktime_t avg, sq;
-
- if (unlikely(total == 1))
- return;
-
- /* the sq is (lat - old_avg) * (lat - new_avg) */
- avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
- sq = lat - avg;
- avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
- sq = sq * (lat - avg);
- *sq_sump += sq;
+ ktime_t avg;
+
+ if (unlikely(total == 1)) {
+ *lavg = lat;
+ } else {
+ /* the sq is (lat - old_avg) * (lat - new_avg) */
+ avg = *lavg + div64_s64(lat - *lavg, total);
+ *sq_sump += (lat - *lavg)*(lat - avg);
+ *lavg = avg;
+ }
}
-void ceph_update_read_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- unsigned int size, int rc)
+void ceph_update_metrics(struct ceph_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc)
{
ktime_t lat = ktime_sub(r_end, r_start);
ktime_t total;
@@ -348,63 +350,13 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
return;
- spin_lock(&m->read_metric_lock);
- total = ++m->total_reads;
- m->read_size_sum += size;
- m->read_latency_sum += lat;
- METRIC_UPDATE_MIN_MAX(m->read_size_min,
- m->read_size_max,
- size);
- METRIC_UPDATE_MIN_MAX(m->read_latency_min,
- m->read_latency_max,
- lat);
- __update_stdev(total, m->read_latency_sum,
- &m->read_latency_sq_sum, lat);
- spin_unlock(&m->read_metric_lock);
-}
-
-void ceph_update_write_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- unsigned int size, int rc)
-{
- ktime_t lat = ktime_sub(r_end, r_start);
- ktime_t total;
-
- if (unlikely(rc && rc != -ETIMEDOUT))
- return;
-
- spin_lock(&m->write_metric_lock);
- total = ++m->total_writes;
- m->write_size_sum += size;
- m->write_latency_sum += lat;
- METRIC_UPDATE_MIN_MAX(m->write_size_min,
- m->write_size_max,
- size);
- METRIC_UPDATE_MIN_MAX(m->write_latency_min,
- m->write_latency_max,
- lat);
- __update_stdev(total, m->write_latency_sum,
- &m->write_latency_sq_sum, lat);
- spin_unlock(&m->write_metric_lock);
-}
-
-void ceph_update_metadata_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- int rc)
-{
- ktime_t lat = ktime_sub(r_end, r_start);
- ktime_t total;
-
- if (unlikely(rc && rc != -ENOENT))
- return;
-
- spin_lock(&m->metadata_metric_lock);
- total = ++m->total_metadatas;
- m->metadata_latency_sum += lat;
- METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
- m->metadata_latency_max,
- lat);
- __update_stdev(total, m->metadata_latency_sum,
- &m->metadata_latency_sq_sum, lat);
- spin_unlock(&m->metadata_metric_lock);
+ spin_lock(&m->lock);
+ total = ++m->total;
+ m->size_sum += size;
+ METRIC_UPDATE_MIN_MAX(m->size_min, m->size_max, size);
+ m->latency_sum += lat;
+ METRIC_UPDATE_MIN_MAX(m->latency_min, m->latency_max, lat);
+ __update_mean_and_stdev(total, &m->latency_avg, &m->latency_sq_sum,
+ lat);
+ spin_unlock(&m->lock);
}
diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
index 0133955a3c6a..0d0c44bd3332 100644
--- a/fs/ceph/metric.h
+++ b/fs/ceph/metric.h
@@ -2,7 +2,7 @@
#ifndef _FS_CEPH_MDS_METRIC_H
#define _FS_CEPH_MDS_METRIC_H
-#include <linux/types.h>
+#include <linux/ceph/types.h>
#include <linux/percpu_counter.h>
#include <linux/ktime.h>
@@ -19,27 +19,39 @@ enum ceph_metric_type {
CLIENT_METRIC_TYPE_OPENED_INODES,
CLIENT_METRIC_TYPE_READ_IO_SIZES,
CLIENT_METRIC_TYPE_WRITE_IO_SIZES,
-
- CLIENT_METRIC_TYPE_MAX = CLIENT_METRIC_TYPE_WRITE_IO_SIZES,
+ CLIENT_METRIC_TYPE_AVG_READ_LATENCY,
+ CLIENT_METRIC_TYPE_STDEV_READ_LATENCY,
+ CLIENT_METRIC_TYPE_AVG_WRITE_LATENCY,
+ CLIENT_METRIC_TYPE_STDEV_WRITE_LATENCY,
+ CLIENT_METRIC_TYPE_AVG_METADATA_LATENCY,
+ CLIENT_METRIC_TYPE_STDEV_METADATA_LATENCY,
+
+ CLIENT_METRIC_TYPE_MAX = CLIENT_METRIC_TYPE_STDEV_METADATA_LATENCY,
};
/*
* This will always have the highest metric bit value
* as the last element of the array.
*/
-#define CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED { \
- CLIENT_METRIC_TYPE_CAP_INFO, \
- CLIENT_METRIC_TYPE_READ_LATENCY, \
- CLIENT_METRIC_TYPE_WRITE_LATENCY, \
- CLIENT_METRIC_TYPE_METADATA_LATENCY, \
- CLIENT_METRIC_TYPE_DENTRY_LEASE, \
- CLIENT_METRIC_TYPE_OPENED_FILES, \
- CLIENT_METRIC_TYPE_PINNED_ICAPS, \
- CLIENT_METRIC_TYPE_OPENED_INODES, \
- CLIENT_METRIC_TYPE_READ_IO_SIZES, \
- CLIENT_METRIC_TYPE_WRITE_IO_SIZES, \
- \
- CLIENT_METRIC_TYPE_MAX, \
+#define CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED { \
+ CLIENT_METRIC_TYPE_CAP_INFO, \
+ CLIENT_METRIC_TYPE_READ_LATENCY, \
+ CLIENT_METRIC_TYPE_WRITE_LATENCY, \
+ CLIENT_METRIC_TYPE_METADATA_LATENCY, \
+ CLIENT_METRIC_TYPE_DENTRY_LEASE, \
+ CLIENT_METRIC_TYPE_OPENED_FILES, \
+ CLIENT_METRIC_TYPE_PINNED_ICAPS, \
+ CLIENT_METRIC_TYPE_OPENED_INODES, \
+ CLIENT_METRIC_TYPE_READ_IO_SIZES, \
+ CLIENT_METRIC_TYPE_WRITE_IO_SIZES, \
+ CLIENT_METRIC_TYPE_AVG_READ_LATENCY, \
+ CLIENT_METRIC_TYPE_STDEV_READ_LATENCY, \
+ CLIENT_METRIC_TYPE_AVG_WRITE_LATENCY, \
+ CLIENT_METRIC_TYPE_STDEV_WRITE_LATENCY, \
+ CLIENT_METRIC_TYPE_AVG_METADATA_LATENCY, \
+ CLIENT_METRIC_TYPE_STDEV_METADATA_LATENCY, \
+ \
+ CLIENT_METRIC_TYPE_MAX, \
}
struct ceph_metric_header {
@@ -60,22 +72,28 @@ struct ceph_metric_cap {
/* metric read latency header */
struct ceph_metric_read_latency {
struct ceph_metric_header header;
- __le32 sec;
- __le32 nsec;
+ struct ceph_timespec lat;
+ struct ceph_timespec avg;
+ __le64 sq_sum;
+ __le64 count;
} __packed;
/* metric write latency header */
struct ceph_metric_write_latency {
struct ceph_metric_header header;
- __le32 sec;
- __le32 nsec;
+ struct ceph_timespec lat;
+ struct ceph_timespec avg;
+ __le64 sq_sum;
+ __le64 count;
} __packed;
/* metric metadata latency header */
struct ceph_metric_metadata_latency {
struct ceph_metric_header header;
- __le32 sec;
- __le32 nsec;
+ struct ceph_timespec lat;
+ struct ceph_timespec avg;
+ __le64 sq_sum;
+ __le64 count;
} __packed;
/* metric dentry lease header */
@@ -125,6 +143,27 @@ struct ceph_metric_head {
__le32 num; /* the number of metrics that will be sent */
} __packed;
+enum metric_type {
+ METRIC_READ,
+ METRIC_WRITE,
+ METRIC_METADATA,
+ METRIC_COPYFROM,
+ METRIC_MAX
+};
+
+struct ceph_metric {
+ spinlock_t lock;
+ u64 total;
+ u64 size_sum;
+ u64 size_min;
+ u64 size_max;
+ ktime_t latency_sum;
+ ktime_t latency_avg;
+ ktime_t latency_sq_sum;
+ ktime_t latency_min;
+ ktime_t latency_max;
+};
+
/* This is the global metrics */
struct ceph_client_metric {
atomic64_t total_dentries;
@@ -135,32 +174,7 @@ struct ceph_client_metric {
struct percpu_counter i_caps_hit;
struct percpu_counter i_caps_mis;
- spinlock_t read_metric_lock;
- u64 total_reads;
- u64 read_size_sum;
- u64 read_size_min;
- u64 read_size_max;
- ktime_t read_latency_sum;
- ktime_t read_latency_sq_sum;
- ktime_t read_latency_min;
- ktime_t read_latency_max;
-
- spinlock_t write_metric_lock;
- u64 total_writes;
- u64 write_size_sum;
- u64 write_size_min;
- u64 write_size_max;
- ktime_t write_latency_sum;
- ktime_t write_latency_sq_sum;
- ktime_t write_latency_min;
- ktime_t write_latency_max;
-
- spinlock_t metadata_metric_lock;
- u64 total_metadatas;
- ktime_t metadata_latency_sum;
- ktime_t metadata_latency_sq_sum;
- ktime_t metadata_latency_min;
- ktime_t metadata_latency_max;
+ struct ceph_metric metric[METRIC_MAX];
/* The total number of directories and files that are opened */
atomic64_t opened_files;
@@ -195,13 +209,36 @@ static inline void ceph_update_cap_mis(struct ceph_client_metric *m)
percpu_counter_inc(&m->i_caps_mis);
}
-extern void ceph_update_read_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- unsigned int size, int rc);
-extern void ceph_update_write_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- unsigned int size, int rc);
-extern void ceph_update_metadata_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- int rc);
+extern void ceph_update_metrics(struct ceph_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc);
+
+static inline void ceph_update_read_metrics(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc)
+{
+ ceph_update_metrics(&m->metric[METRIC_READ],
+ r_start, r_end, size, rc);
+}
+static inline void ceph_update_write_metrics(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc)
+{
+ ceph_update_metrics(&m->metric[METRIC_WRITE],
+ r_start, r_end, size, rc);
+}
+static inline void ceph_update_metadata_metrics(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc)
+{
+ ceph_update_metrics(&m->metric[METRIC_METADATA],
+ r_start, r_end, 0, rc);
+}
+static inline void ceph_update_copyfrom_metrics(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc)
+{
+ ceph_update_metrics(&m->metric[METRIC_COPYFROM],
+ r_start, r_end, size, rc);
+}
#endif /* _FS_CEPH_MDS_METRIC_H */
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 620c691af40e..d90eda19bcc4 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -30,6 +30,9 @@ static inline bool ceph_has_realms_with_quotas(struct inode *inode)
/* if root is the real CephFS root, we don't have quota realms */
if (root && ceph_ino(root) == CEPH_INO_ROOT)
return false;
+ /* MDS stray dirs have no quota realms */
+ if (ceph_vino_is_reserved(ceph_inode(inode)->i_vino))
+ return false;
/* otherwise, we can't know for sure */
return true;
}
@@ -40,29 +43,28 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
{
struct super_block *sb = mdsc->fsc->sb;
struct ceph_mds_quota *h = msg->front.iov_base;
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_vino vino;
struct inode *inode;
struct ceph_inode_info *ci;
+ if (!ceph_inc_mds_stopping_blocker(mdsc, session))
+ return;
+
if (msg->front.iov_len < sizeof(*h)) {
- pr_err("%s corrupt message mds%d len %d\n", __func__,
- session->s_mds, (int)msg->front.iov_len);
+ pr_err_client(cl, "corrupt message mds%d len %d\n",
+ session->s_mds, (int)msg->front.iov_len);
ceph_msg_dump(msg);
- return;
+ goto out;
}
- /* increment msg sequence number */
- mutex_lock(&session->s_mutex);
- inc_session_sequence(session);
- mutex_unlock(&session->s_mutex);
-
/* lookup inode */
vino.ino = le64_to_cpu(h->ino);
vino.snap = CEPH_NOSNAP;
inode = ceph_find_inode(sb, vino);
if (!inode) {
- pr_warn("Failed to find inode %llu\n", vino.ino);
- return;
+ pr_warn_client(cl, "failed to find inode %llx\n", vino.ino);
+ goto out;
}
ci = ceph_inode(inode);
@@ -75,6 +77,8 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
spin_unlock(&ci->i_ceph_lock);
iput(inode);
+out:
+ ceph_dec_mds_stopping_blocker(mdsc);
}
static struct ceph_quotarealm_inode *
@@ -82,6 +86,7 @@ find_quotarealm_inode(struct ceph_mds_client *mdsc, u64 ino)
{
struct ceph_quotarealm_inode *qri = NULL;
struct rb_node **node, *parent = NULL;
+ struct ceph_client *cl = mdsc->fsc->client;
mutex_lock(&mdsc->quotarealms_inodes_mutex);
node = &(mdsc->quotarealms_inodes.rb_node);
@@ -107,7 +112,7 @@ find_quotarealm_inode(struct ceph_mds_client *mdsc, u64 ino)
rb_link_node(&qri->node, parent, node);
rb_insert_color(&qri->node, &mdsc->quotarealms_inodes);
} else
- pr_warn("Failed to alloc quotarealms_inode\n");
+ pr_warn_client(cl, "Failed to alloc quotarealms_inode\n");
}
mutex_unlock(&mdsc->quotarealms_inodes_mutex);
@@ -126,6 +131,7 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
struct super_block *sb,
struct ceph_snap_realm *realm)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_quotarealm_inode *qri;
struct inode *in;
@@ -158,9 +164,9 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
}
if (IS_ERR(in)) {
- dout("Can't lookup inode %llx (err: %ld)\n",
- realm->ino, PTR_ERR(in));
- qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
+ doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino,
+ PTR_ERR(in));
+ qri->timeout = jiffies + secs_to_jiffies(60); /* XXX */
} else {
qri->timeout = 0;
qri->inode = in;
@@ -191,10 +197,10 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
}
/*
- * This function walks through the snaprealm for an inode and returns the
- * ceph_snap_realm for the first snaprealm that has quotas set (either max_files
- * or max_bytes). If the root is reached, return the root ceph_snap_realm
- * instead.
+ * This function walks through the snaprealm for an inode and set the
+ * realmp with the first snaprealm that has quotas set (max_files,
+ * max_bytes, or any, depending on the 'which_quota' argument). If the root is
+ * reached, set the realmp with the root ceph_snap_realm instead.
*
* Note that the caller is responsible for calling ceph_put_snap_realm() on the
* returned realm.
@@ -205,24 +211,29 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
* this function will return -EAGAIN; otherwise, the snaprealms walk-through
* will be restarted.
*/
-static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
- struct inode *inode, bool retry)
+static int get_quota_realm(struct ceph_mds_client *mdsc, struct inode *inode,
+ enum quota_get_realm which_quota,
+ struct ceph_snap_realm **realmp, bool retry)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci = NULL;
struct ceph_snap_realm *realm, *next;
struct inode *in;
bool has_quota;
+ if (realmp)
+ *realmp = NULL;
if (ceph_snap(inode) != CEPH_NOSNAP)
- return NULL;
+ return 0;
restart:
realm = ceph_inode(inode)->i_snap_realm;
if (realm)
ceph_get_snap_realm(mdsc, realm);
else
- pr_err_ratelimited("get_quota_realm: ino (%llx.%llx) "
- "null i_snap_realm\n", ceph_vinop(inode));
+ pr_err_ratelimited_client(cl,
+ "%p %llx.%llx null i_snap_realm\n",
+ inode, ceph_vinop(inode));
while (realm) {
bool has_inode;
@@ -240,17 +251,20 @@ restart:
break;
ceph_put_snap_realm(mdsc, realm);
if (!retry)
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
goto restart;
}
ci = ceph_inode(in);
- has_quota = __ceph_has_any_quota(ci);
+ has_quota = __ceph_has_quota(ci, which_quota);
iput(in);
next = realm->parent;
- if (has_quota || !next)
- return realm;
+ if (has_quota || !next) {
+ if (realmp)
+ *realmp = realm;
+ return 0;
+ }
ceph_get_snap_realm(mdsc, next);
ceph_put_snap_realm(mdsc, realm);
@@ -259,7 +273,7 @@ restart:
if (realm)
ceph_put_snap_realm(mdsc, realm);
- return NULL;
+ return 0;
}
bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
@@ -267,6 +281,7 @@ bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old->i_sb);
struct ceph_snap_realm *old_realm, *new_realm;
bool is_same;
+ int ret;
restart:
/*
@@ -276,9 +291,9 @@ restart:
* dropped and we can then restart the whole operation.
*/
down_read(&mdsc->snap_rwsem);
- old_realm = get_quota_realm(mdsc, old, true);
- new_realm = get_quota_realm(mdsc, new, false);
- if (PTR_ERR(new_realm) == -EAGAIN) {
+ get_quota_realm(mdsc, old, QUOTA_GET_ANY, &old_realm, true);
+ ret = get_quota_realm(mdsc, new, QUOTA_GET_ANY, &new_realm, false);
+ if (ret == -EAGAIN) {
up_read(&mdsc->snap_rwsem);
if (old_realm)
ceph_put_snap_realm(mdsc, old_realm);
@@ -312,6 +327,7 @@ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op,
loff_t delta)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct ceph_snap_realm *realm, *next;
struct inode *in;
@@ -327,8 +343,9 @@ restart:
if (realm)
ceph_get_snap_realm(mdsc, realm);
else
- pr_err_ratelimited("check_quota_exceeded: ino (%llx.%llx) "
- "null i_snap_realm\n", ceph_vinop(inode));
+ pr_err_ratelimited_client(cl,
+ "%p %llx.%llx null i_snap_realm\n",
+ inode, ceph_vinop(inode));
while (realm) {
bool has_inode;
@@ -378,7 +395,7 @@ restart:
break;
default:
/* Shouldn't happen */
- pr_warn("Invalid quota check op (%d)\n", op);
+ pr_warn_client(cl, "Invalid quota check op (%d)\n", op);
exceeded = true; /* Just break the loop */
}
iput(in);
@@ -480,7 +497,8 @@ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf)
bool is_updated = false;
down_read(&mdsc->snap_rwsem);
- realm = get_quota_realm(mdsc, d_inode(fsc->sb->s_root), true);
+ get_quota_realm(mdsc, d_inode(fsc->sb->s_root), QUOTA_GET_MAX_BYTES,
+ &realm, true);
up_read(&mdsc->snap_rwsem);
if (!realm)
return false;
@@ -494,10 +512,24 @@ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf)
if (ci->i_max_bytes) {
total = ci->i_max_bytes >> CEPH_BLOCK_SHIFT;
used = ci->i_rbytes >> CEPH_BLOCK_SHIFT;
+ /* For quota size less than 4MB, use 4KB block size */
+ if (!total) {
+ total = ci->i_max_bytes >> CEPH_4K_BLOCK_SHIFT;
+ used = ci->i_rbytes >> CEPH_4K_BLOCK_SHIFT;
+ buf->f_frsize = 1 << CEPH_4K_BLOCK_SHIFT;
+ }
/* It is possible for a quota to be exceeded.
* Report 'zero' in that case
*/
free = total > used ? total - used : 0;
+ /* For quota size less than 4KB, report the
+ * total=used=4KB,free=0 when quota is full
+ * and total=free=4KB, used=0 otherwise */
+ if (!total) {
+ total = 1;
+ free = ci->i_max_bytes > ci->i_rbytes ? 1 : 0;
+ buf->f_frsize = 1 << CEPH_4K_BLOCK_SHIFT;
+ }
}
spin_unlock(&ci->i_ceph_lock);
if (total) {
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index b41e6724c591..c65f2b202b2b 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
+#include <linux/fs.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/iversion.h>
@@ -121,18 +122,23 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
if (!realm)
return ERR_PTR(-ENOMEM);
- atomic_set(&realm->nref, 1); /* for caller */
+ /* Do not release the global dummy snaprealm until unmouting */
+ if (ino == CEPH_INO_GLOBAL_SNAPREALM)
+ atomic_set(&realm->nref, 2);
+ else
+ atomic_set(&realm->nref, 1);
realm->ino = ino;
INIT_LIST_HEAD(&realm->children);
INIT_LIST_HEAD(&realm->child_item);
INIT_LIST_HEAD(&realm->empty_item);
INIT_LIST_HEAD(&realm->dirty_item);
+ INIT_LIST_HEAD(&realm->rebuild_item);
INIT_LIST_HEAD(&realm->inodes_with_caps);
spin_lock_init(&realm->inodes_with_caps_lock);
__insert_snap_realm(&mdsc->snap_realms, realm);
mdsc->num_snap_realms++;
- dout("create_snap_realm %llx %p\n", realm->ino, realm);
+ doutc(mdsc->fsc->client, "%llx %p\n", realm->ino, realm);
return realm;
}
@@ -144,6 +150,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
u64 ino)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct rb_node *n = mdsc->snap_realms.rb_node;
struct ceph_snap_realm *r;
@@ -156,7 +163,7 @@ static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
else if (ino > r->ino)
n = n->rb_right;
else {
- dout("lookup_snap_realm %llx %p\n", r->ino, r);
+ doutc(cl, "%llx %p\n", r->ino, r);
return r;
}
}
@@ -182,9 +189,10 @@ static void __put_snap_realm(struct ceph_mds_client *mdsc,
static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm)
{
+ struct ceph_client *cl = mdsc->fsc->client;
lockdep_assert_held_write(&mdsc->snap_rwsem);
- dout("__destroy_snap_realm %p %llx\n", realm, realm->ino);
+ doutc(cl, "%p %llx\n", realm, realm->ino);
rb_erase(&realm->node, &mdsc->snap_realms);
mdsc->num_snap_realms--;
@@ -260,9 +268,14 @@ static void __cleanup_empty_realms(struct ceph_mds_client *mdsc)
spin_unlock(&mdsc->snap_empty_lock);
}
-void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc)
+void ceph_cleanup_global_and_empty_realms(struct ceph_mds_client *mdsc)
{
+ struct ceph_snap_realm *global_realm;
+
down_write(&mdsc->snap_rwsem);
+ global_realm = __lookup_snap_realm(mdsc, CEPH_INO_GLOBAL_SNAPREALM);
+ if (global_realm)
+ ceph_put_snap_realm(mdsc, global_realm);
__cleanup_empty_realms(mdsc);
up_write(&mdsc->snap_rwsem);
}
@@ -279,6 +292,7 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm,
u64 parentino)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snap_realm *parent;
lockdep_assert_held_write(&mdsc->snap_rwsem);
@@ -292,9 +306,8 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
if (IS_ERR(parent))
return PTR_ERR(parent);
}
- dout("adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n",
- realm->ino, realm, realm->parent_ino, realm->parent,
- parentino, parent);
+ doutc(cl, "%llx %p: %llx %p -> %llx %p\n", realm->ino, realm,
+ realm->parent_ino, realm->parent, parentino, parent);
if (realm->parent) {
list_del_init(&realm->child_item);
ceph_put_snap_realm(mdsc, realm->parent);
@@ -319,9 +332,12 @@ static int cmpu64_rev(const void *a, const void *b)
/*
* build the snap context for a given realm.
*/
-static int build_snap_context(struct ceph_snap_realm *realm,
- struct list_head* dirty_realms)
+static int build_snap_context(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm,
+ struct list_head *realm_queue,
+ struct list_head *dirty_realms)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snap_realm *parent = realm->parent;
struct ceph_snap_context *snapc;
int err = 0;
@@ -334,9 +350,9 @@ static int build_snap_context(struct ceph_snap_realm *realm,
*/
if (parent) {
if (!parent->cached_context) {
- err = build_snap_context(parent, dirty_realms);
- if (err)
- goto fail;
+ /* add to the queue head */
+ list_add(&parent->rebuild_item, realm_queue);
+ return 1;
}
num += parent->cached_context->num_snaps;
}
@@ -349,11 +365,10 @@ static int build_snap_context(struct ceph_snap_realm *realm,
realm->cached_context->seq == realm->seq &&
(!parent ||
realm->cached_context->seq >= parent->cached_context->seq)) {
- dout("build_snap_context %llx %p: %p seq %lld (%u snaps)"
- " (unchanged)\n",
- realm->ino, realm, realm->cached_context,
- realm->cached_context->seq,
- (unsigned int)realm->cached_context->num_snaps);
+ doutc(cl, "%llx %p: %p seq %lld (%u snaps) (unchanged)\n",
+ realm->ino, realm, realm->cached_context,
+ realm->cached_context->seq,
+ (unsigned int)realm->cached_context->num_snaps);
return 0;
}
@@ -390,9 +405,8 @@ static int build_snap_context(struct ceph_snap_realm *realm,
sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
snapc->num_snaps = num;
- dout("build_snap_context %llx %p: %p seq %lld (%u snaps)\n",
- realm->ino, realm, snapc, snapc->seq,
- (unsigned int) snapc->num_snaps);
+ doutc(cl, "%llx %p: %p seq %lld (%u snaps)\n", realm->ino, realm,
+ snapc, snapc->seq, (unsigned int) snapc->num_snaps);
ceph_put_snap_context(realm->cached_context);
realm->cached_context = snapc;
@@ -409,24 +423,63 @@ fail:
ceph_put_snap_context(realm->cached_context);
realm->cached_context = NULL;
}
- pr_err("build_snap_context %llx %p fail %d\n", realm->ino,
- realm, err);
+ pr_err_client(cl, "%llx %p fail %d\n", realm->ino, realm, err);
return err;
}
/*
* rebuild snap context for the given realm and all of its children.
*/
-static void rebuild_snap_realms(struct ceph_snap_realm *realm,
+static void rebuild_snap_realms(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm,
struct list_head *dirty_realms)
{
- struct ceph_snap_realm *child;
+ struct ceph_client *cl = mdsc->fsc->client;
+ LIST_HEAD(realm_queue);
+ int last = 0;
+ bool skip = false;
+
+ list_add_tail(&realm->rebuild_item, &realm_queue);
+
+ while (!list_empty(&realm_queue)) {
+ struct ceph_snap_realm *_realm, *child;
+
+ _realm = list_first_entry(&realm_queue,
+ struct ceph_snap_realm,
+ rebuild_item);
+
+ /*
+ * If the last building failed dues to memory
+ * issue, just empty the realm_queue and return
+ * to avoid infinite loop.
+ */
+ if (last < 0) {
+ list_del_init(&_realm->rebuild_item);
+ continue;
+ }
+
+ last = build_snap_context(mdsc, _realm, &realm_queue,
+ dirty_realms);
+ doutc(cl, "%llx %p, %s\n", realm->ino, realm,
+ last > 0 ? "is deferred" : !last ? "succeeded" : "failed");
+
+ /* is any child in the list ? */
+ list_for_each_entry(child, &_realm->children, child_item) {
+ if (!list_empty(&child->rebuild_item)) {
+ skip = true;
+ break;
+ }
+ }
- dout("rebuild_snap_realms %llx %p\n", realm->ino, realm);
- build_snap_context(realm, dirty_realms);
+ if (!skip) {
+ list_for_each_entry(child, &_realm->children, child_item)
+ list_add_tail(&child->rebuild_item, &realm_queue);
+ }
- list_for_each_entry(child, &realm->children, child_item)
- rebuild_snap_realms(child, dirty_realms);
+ /* last == 1 means need to build parent first */
+ if (last <= 0)
+ list_del_init(&_realm->rebuild_item);
+ }
}
@@ -474,23 +527,16 @@ static bool has_new_snaps(struct ceph_snap_context *o,
* Caller must hold snap_rwsem for read (i.e., the realm topology won't
* change).
*/
-static void ceph_queue_cap_snap(struct ceph_inode_info *ci)
+static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
+ struct ceph_cap_snap **pcapsnap)
{
- struct inode *inode = &ci->vfs_inode;
- struct ceph_cap_snap *capsnap;
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_snap_context *old_snapc, *new_snapc;
+ struct ceph_cap_snap *capsnap = *pcapsnap;
struct ceph_buffer *old_blob = NULL;
int used, dirty;
- capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
- if (!capsnap) {
- pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
- return;
- }
- capsnap->cap_flush.is_capsnap = true;
- INIT_LIST_HEAD(&capsnap->cap_flush.i_list);
- INIT_LIST_HEAD(&capsnap->cap_flush.g_list);
-
spin_lock(&ci->i_ceph_lock);
used = __ceph_caps_used(ci);
dirty = __ceph_caps_dirty(ci);
@@ -511,12 +557,14 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci)
as no new writes are allowed to start when pending, so any
writes in progress now were started before the previous
cap_snap. lucky us. */
- dout("queue_cap_snap %p already pending\n", inode);
+ doutc(cl, "%p %llx.%llx already pending\n", inode,
+ ceph_vinop(inode));
goto update_snapc;
}
if (ci->i_wrbuffer_ref_head == 0 &&
!(dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))) {
- dout("queue_cap_snap %p nothing dirty|writing\n", inode);
+ doutc(cl, "%p %llx.%llx nothing dirty|writing\n", inode,
+ ceph_vinop(inode));
goto update_snapc;
}
@@ -536,20 +584,17 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci)
} else {
if (!(used & CEPH_CAP_FILE_WR) &&
ci->i_wrbuffer_ref_head == 0) {
- dout("queue_cap_snap %p "
- "no new_snap|dirty_page|writing\n", inode);
+ doutc(cl, "%p %llx.%llx no new_snap|dirty_page|writing\n",
+ inode, ceph_vinop(inode));
goto update_snapc;
}
}
- dout("queue_cap_snap %p cap_snap %p queuing under %p %s %s\n",
- inode, capsnap, old_snapc, ceph_cap_string(dirty),
- capsnap->need_flush ? "" : "no_flush");
+ doutc(cl, "%p %llx.%llx cap_snap %p queuing under %p %s %s\n",
+ inode, ceph_vinop(inode), capsnap, old_snapc,
+ ceph_cap_string(dirty), capsnap->need_flush ? "" : "no_flush");
ihold(inode);
- refcount_set(&capsnap->nref, 1);
- INIT_LIST_HEAD(&capsnap->ci_item);
-
capsnap->follows = old_snapc->seq;
capsnap->issued = __ceph_caps_issued(ci, NULL);
capsnap->dirty = dirty;
@@ -579,31 +624,30 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci)
list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
if (used & CEPH_CAP_FILE_WR) {
- dout("queue_cap_snap %p cap_snap %p snapc %p"
- " seq %llu used WR, now pending\n", inode,
- capsnap, old_snapc, old_snapc->seq);
+ doutc(cl, "%p %llx.%llx cap_snap %p snapc %p seq %llu used WR,"
+ " now pending\n", inode, ceph_vinop(inode), capsnap,
+ old_snapc, old_snapc->seq);
capsnap->writing = 1;
} else {
/* note mtime, size NOW. */
__ceph_finish_cap_snap(ci, capsnap);
}
- capsnap = NULL;
+ *pcapsnap = NULL;
old_snapc = NULL;
update_snapc:
- if (ci->i_wrbuffer_ref_head == 0 &&
- ci->i_wr_ref == 0 &&
- ci->i_dirty_caps == 0 &&
- ci->i_flushing_caps == 0) {
- ci->i_head_snapc = NULL;
- } else {
+ if (ci->i_wrbuffer_ref_head == 0 &&
+ ci->i_wr_ref == 0 &&
+ ci->i_dirty_caps == 0 &&
+ ci->i_flushing_caps == 0) {
+ ci->i_head_snapc = NULL;
+ } else {
ci->i_head_snapc = ceph_get_snap_context(new_snapc);
- dout(" new snapc is %p\n", new_snapc);
+ doutc(cl, " new snapc is %p\n", new_snapc);
}
spin_unlock(&ci->i_ceph_lock);
ceph_buffer_put(old_blob);
- kfree(capsnap);
ceph_put_snap_context(old_snapc);
}
@@ -618,47 +662,55 @@ update_snapc:
int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap)
{
- struct inode *inode = &ci->vfs_inode;
+ struct inode *inode = &ci->netfs.inode;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct ceph_client *cl = mdsc->fsc->client;
BUG_ON(capsnap->writing);
capsnap->size = i_size_read(inode);
- capsnap->mtime = inode->i_mtime;
- capsnap->atime = inode->i_atime;
- capsnap->ctime = inode->i_ctime;
+ capsnap->mtime = inode_get_mtime(inode);
+ capsnap->atime = inode_get_atime(inode);
+ capsnap->ctime = inode_get_ctime(inode);
capsnap->btime = ci->i_btime;
capsnap->change_attr = inode_peek_iversion_raw(inode);
capsnap->time_warp_seq = ci->i_time_warp_seq;
capsnap->truncate_size = ci->i_truncate_size;
capsnap->truncate_seq = ci->i_truncate_seq;
if (capsnap->dirty_pages) {
- dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
- "still has %d dirty pages\n", inode, capsnap,
- capsnap->context, capsnap->context->seq,
- ceph_cap_string(capsnap->dirty), capsnap->size,
- capsnap->dirty_pages);
+ doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s "
+ "s=%llu still has %d dirty pages\n", inode,
+ ceph_vinop(inode), capsnap, capsnap->context,
+ capsnap->context->seq,
+ ceph_cap_string(capsnap->dirty),
+ capsnap->size, capsnap->dirty_pages);
return 0;
}
- /* Fb cap still in use, delay it */
- if (ci->i_wb_ref) {
- dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
- "used WRBUFFER, delaying\n", inode, capsnap,
- capsnap->context, capsnap->context->seq,
- ceph_cap_string(capsnap->dirty), capsnap->size);
- capsnap->writing = 1;
+ /*
+ * Defer flushing the capsnap if the dirty buffer not flushed yet.
+ * And trigger to flush the buffer immediately.
+ */
+ if (ci->i_wrbuffer_ref) {
+ doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s "
+ "s=%llu used WRBUFFER, delaying\n", inode,
+ ceph_vinop(inode), capsnap, capsnap->context,
+ capsnap->context->seq, ceph_cap_string(capsnap->dirty),
+ capsnap->size);
+ ceph_queue_writeback(inode);
return 0;
}
ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
- dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
- inode, capsnap, capsnap->context,
- capsnap->context->seq, ceph_cap_string(capsnap->dirty),
- capsnap->size);
+ doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu\n",
+ inode, ceph_vinop(inode), capsnap, capsnap->context,
+ capsnap->context->seq, ceph_cap_string(capsnap->dirty),
+ capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
- if (list_empty(&ci->i_snap_flush_item))
+ if (list_empty(&ci->i_snap_flush_item)) {
+ ihold(inode);
list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
+ }
spin_unlock(&mdsc->snap_flush_lock);
return 1; /* caller may want to ceph_flush_snaps */
}
@@ -667,28 +719,54 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
* Queue cap_snaps for snap writeback for this realm and its children.
* Called under snap_rwsem, so realm topology won't change.
*/
-static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
+static void queue_realm_cap_snaps(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct inode *lastinode = NULL;
+ struct ceph_cap_snap *capsnap = NULL;
- dout("queue_realm_cap_snaps %p %llx inodes\n", realm, realm->ino);
+ doutc(cl, "%p %llx inode\n", realm, realm->ino);
spin_lock(&realm->inodes_with_caps_lock);
list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) {
- struct inode *inode = igrab(&ci->vfs_inode);
+ struct inode *inode = igrab(&ci->netfs.inode);
if (!inode)
continue;
spin_unlock(&realm->inodes_with_caps_lock);
iput(lastinode);
lastinode = inode;
- ceph_queue_cap_snap(ci);
+
+ /*
+ * Allocate the capsnap memory outside of ceph_queue_cap_snap()
+ * to reduce very possible but unnecessary frequently memory
+ * allocate/free in this loop.
+ */
+ if (!capsnap) {
+ capsnap = kmem_cache_zalloc(ceph_cap_snap_cachep, GFP_NOFS);
+ if (!capsnap) {
+ pr_err_client(cl,
+ "ENOMEM allocating ceph_cap_snap on %p\n",
+ inode);
+ return;
+ }
+ }
+ capsnap->cap_flush.is_capsnap = true;
+ refcount_set(&capsnap->nref, 1);
+ INIT_LIST_HEAD(&capsnap->cap_flush.i_list);
+ INIT_LIST_HEAD(&capsnap->cap_flush.g_list);
+ INIT_LIST_HEAD(&capsnap->ci_item);
+
+ ceph_queue_cap_snap(ci, &capsnap);
spin_lock(&realm->inodes_with_caps_lock);
}
spin_unlock(&realm->inodes_with_caps_lock);
iput(lastinode);
- dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
+ if (capsnap)
+ kmem_cache_free(ceph_cap_snap_cachep, capsnap);
+ doutc(cl, "%p %llx done\n", realm, realm->ino);
}
/*
@@ -702,19 +780,25 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
void *p, void *e, bool deletion,
struct ceph_snap_realm **realm_ret)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_snap_realm *ri; /* encoded */
__le64 *snaps; /* encoded */
__le64 *prior_parent_snaps; /* encoded */
- struct ceph_snap_realm *realm = NULL;
+ struct ceph_snap_realm *realm;
struct ceph_snap_realm *first_realm = NULL;
- int invalidate = 0;
+ struct ceph_snap_realm *realm_to_rebuild = NULL;
+ struct ceph_client *client = mdsc->fsc->client;
+ int rebuild_snapcs;
int err = -ENOMEM;
+ int ret;
LIST_HEAD(dirty_realms);
lockdep_assert_held_write(&mdsc->snap_rwsem);
- dout("update_snap_trace deletion=%d\n", deletion);
+ doutc(cl, "deletion=%d\n", deletion);
more:
+ realm = NULL;
+ rebuild_snapcs = 0;
ceph_decode_need(&p, e, sizeof(*ri), bad);
ri = p;
p += sizeof(*ri);
@@ -738,11 +822,11 @@ more:
err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
if (err < 0)
goto fail;
- invalidate += err;
+ rebuild_snapcs += err;
if (le64_to_cpu(ri->seq) > realm->seq) {
- dout("update_snap_trace updating %llx %p %lld -> %lld\n",
- realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
+ doutc(cl, "updating %llx %p %lld -> %lld\n", realm->ino,
+ realm, realm->seq, le64_to_cpu(ri->seq));
/* update realm parameters, snap lists */
realm->seq = le64_to_cpu(ri->seq);
realm->created = le64_to_cpu(ri->created);
@@ -763,22 +847,30 @@ more:
if (realm->seq > mdsc->last_snap_seq)
mdsc->last_snap_seq = realm->seq;
- invalidate = 1;
+ rebuild_snapcs = 1;
} else if (!realm->cached_context) {
- dout("update_snap_trace %llx %p seq %lld new\n",
- realm->ino, realm, realm->seq);
- invalidate = 1;
+ doutc(cl, "%llx %p seq %lld new\n", realm->ino, realm,
+ realm->seq);
+ rebuild_snapcs = 1;
} else {
- dout("update_snap_trace %llx %p seq %lld unchanged\n",
- realm->ino, realm, realm->seq);
+ doutc(cl, "%llx %p seq %lld unchanged\n", realm->ino, realm,
+ realm->seq);
}
- dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
- realm, invalidate, p, e);
+ doutc(cl, "done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino,
+ realm, rebuild_snapcs, p, e);
+
+ /*
+ * this will always track the uppest parent realm from which
+ * we need to rebuild the snapshot contexts _downward_ in
+ * hierarchy.
+ */
+ if (rebuild_snapcs)
+ realm_to_rebuild = realm;
- /* invalidate when we reach the _end_ (root) of the trace */
- if (invalidate && p >= e)
- rebuild_snap_realms(realm, &dirty_realms);
+ /* rebuild_snapcs when we reach the _end_ (root) of the trace */
+ if (realm_to_rebuild && p >= e)
+ rebuild_snap_realms(mdsc, realm_to_rebuild, &dirty_realms);
if (!first_realm)
first_realm = realm;
@@ -796,7 +888,7 @@ more:
realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
dirty_item);
list_del_init(&realm->dirty_item);
- queue_realm_cap_snaps(realm);
+ queue_realm_cap_snaps(mdsc, realm);
}
if (realm_ret)
@@ -814,7 +906,29 @@ fail:
ceph_put_snap_realm(mdsc, realm);
if (first_realm)
ceph_put_snap_realm(mdsc, first_realm);
- pr_err("update_snap_trace error %d\n", err);
+ pr_err_client(cl, "error %d\n", err);
+
+ /*
+ * When receiving a corrupted snap trace we don't know what
+ * exactly has happened in MDS side. And we shouldn't continue
+ * writing to OSD, which may corrupt the snapshot contents.
+ *
+ * Just try to blocklist this kclient and then this kclient
+ * must be remounted to continue after the corrupted metadata
+ * fixed in the MDS side.
+ */
+ WRITE_ONCE(mdsc->fsc->mount_state, CEPH_MOUNT_FENCE_IO);
+ ret = ceph_monc_blocklist_add(&client->monc, &client->msgr.inst.addr);
+ if (ret)
+ pr_err_client(cl, "failed to blocklist %s: %d\n",
+ ceph_pr_addr(&client->msgr.inst.addr), ret);
+
+ WARN(1, "[client.%lld] %s %s%sdo remount to continue%s",
+ client->monc.auth->global_id, __func__,
+ ret ? "" : ceph_pr_addr(&client->msgr.inst.addr),
+ ret ? "" : " was blocklisted, ",
+ err == -EIO ? " after corrupted snaptrace is fixed" : "");
+
return err;
}
@@ -827,16 +941,17 @@ fail:
*/
static void flush_snaps(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct inode *inode;
struct ceph_mds_session *session = NULL;
- dout("flush_snaps\n");
+ doutc(cl, "begin\n");
spin_lock(&mdsc->snap_flush_lock);
while (!list_empty(&mdsc->snap_flush_list)) {
ci = list_first_entry(&mdsc->snap_flush_list,
struct ceph_inode_info, i_snap_flush_item);
- inode = &ci->vfs_inode;
+ inode = &ci->netfs.inode;
ihold(inode);
spin_unlock(&mdsc->snap_flush_lock);
ceph_flush_snaps(ci, &session);
@@ -846,7 +961,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
spin_unlock(&mdsc->snap_flush_lock);
ceph_put_mds_session(session);
- dout("flush_snaps done\n");
+ doutc(cl, "done\n");
}
/**
@@ -862,7 +977,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
void ceph_change_snap_realm(struct inode *inode, struct ceph_snap_realm *realm)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
struct ceph_snap_realm *oldrealm = ci->i_snap_realm;
lockdep_assert_held(&ci->i_ceph_lock);
@@ -902,6 +1017,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct super_block *sb = mdsc->fsc->sb;
int mds = session->s_mds;
u64 split;
@@ -915,6 +1031,10 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
__le64 *split_inos = NULL, *split_realms = NULL;
int i;
int locked_rwsem = 0;
+ bool close_sessions = false;
+
+ if (!ceph_inc_mds_stopping_blocker(mdsc, session))
+ return;
/* decode */
if (msg->front.iov_len < sizeof(*h))
@@ -928,12 +1048,8 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
trace_len = le32_to_cpu(h->trace_len);
p += sizeof(*h);
- dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds,
- ceph_snap_op_name(op), split, trace_len);
-
- mutex_lock(&session->s_mutex);
- inc_session_sequence(session);
- mutex_unlock(&session->s_mutex);
+ doutc(cl, "from mds%d op %s split %llx tracelen %d\n", mds,
+ ceph_snap_op_name(op), split, trace_len);
down_write(&mdsc->snap_rwsem);
locked_rwsem = 1;
@@ -964,7 +1080,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
goto out;
}
- dout("splitting snap_realm %llx %p\n", realm->ino, realm);
+ doutc(cl, "splitting snap_realm %llx %p\n", realm->ino, realm);
for (i = 0; i < num_split_inos; i++) {
struct ceph_vino vino = {
.ino = le64_to_cpu(split_inos[i]),
@@ -989,13 +1105,13 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
*/
if (ci->i_snap_realm->created >
le64_to_cpu(ri->created)) {
- dout(" leaving %p in newer realm %llx %p\n",
- inode, ci->i_snap_realm->ino,
- ci->i_snap_realm);
+ doutc(cl, " leaving %p %llx.%llx in newer realm %llx %p\n",
+ inode, ceph_vinop(inode), ci->i_snap_realm->ino,
+ ci->i_snap_realm);
goto skip_inode;
}
- dout(" will move %p to split realm %llx %p\n",
- inode, realm->ino, realm);
+ doutc(cl, " will move %p %llx.%llx to split realm %llx %p\n",
+ inode, ceph_vinop(inode), realm->ino, realm);
ceph_get_snap_realm(mdsc, realm);
ceph_change_snap_realm(inode, realm);
@@ -1017,14 +1133,31 @@ skip_inode:
continue;
adjust_snap_realm_parent(mdsc, child, realm->ino);
}
+ } else {
+ /*
+ * In the non-split case both 'num_split_inos' and
+ * 'num_split_realms' should be 0, making this a no-op.
+ * However the MDS happens to populate 'split_realms' list
+ * in one of the UPDATE op cases by mistake.
+ *
+ * Skip both lists just in case to ensure that 'p' is
+ * positioned at the start of realm info, as expected by
+ * ceph_update_snap_trace().
+ */
+ p += sizeof(u64) * num_split_inos;
+ p += sizeof(u64) * num_split_realms;
}
/*
* update using the provided snap trace. if we are deleting a
* snap, we can avoid queueing cap_snaps.
*/
- ceph_update_snap_trace(mdsc, p, e,
- op == CEPH_SNAP_OP_DESTROY, NULL);
+ if (ceph_update_snap_trace(mdsc, p, e,
+ op == CEPH_SNAP_OP_DESTROY,
+ NULL)) {
+ close_sessions = true;
+ goto bad;
+ }
if (op == CEPH_SNAP_OP_SPLIT)
/* we took a reference when we created the realm, above */
@@ -1035,20 +1168,27 @@ skip_inode:
up_write(&mdsc->snap_rwsem);
flush_snaps(mdsc);
+ ceph_dec_mds_stopping_blocker(mdsc);
return;
bad:
- pr_err("corrupt snap message from mds%d\n", mds);
+ pr_err_client(cl, "corrupt snap message from mds%d\n", mds);
ceph_msg_dump(msg);
out:
if (locked_rwsem)
up_write(&mdsc->snap_rwsem);
+
+ ceph_dec_mds_stopping_blocker(mdsc);
+
+ if (close_sessions)
+ ceph_mdsc_close_sessions(mdsc);
return;
}
struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
u64 snap)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snapid_map *sm, *exist;
struct rb_node **p, *parent;
int ret;
@@ -1071,7 +1211,8 @@ struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
}
spin_unlock(&mdsc->snapid_map_lock);
if (exist) {
- dout("found snapid map %llx -> %x\n", exist->snap, exist->dev);
+ doutc(cl, "found snapid map %llx -> %x\n", exist->snap,
+ exist->dev);
return exist;
}
@@ -1115,11 +1256,12 @@ struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
if (exist) {
free_anon_bdev(sm->dev);
kfree(sm);
- dout("found snapid map %llx -> %x\n", exist->snap, exist->dev);
+ doutc(cl, "found snapid map %llx -> %x\n", exist->snap,
+ exist->dev);
return exist;
}
- dout("create snapid map %llx -> %x\n", sm->snap, sm->dev);
+ doutc(cl, "create snapid map %llx -> %x\n", sm->snap, sm->dev);
return sm;
}
@@ -1144,6 +1286,7 @@ void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snapid_map *sm;
unsigned long now;
LIST_HEAD(to_free);
@@ -1165,7 +1308,7 @@ void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
while (!list_empty(&to_free)) {
sm = list_first_entry(&to_free, struct ceph_snapid_map, lru);
list_del(&sm->lru);
- dout("trim snapid map %llx -> %x\n", sm->snap, sm->dev);
+ doutc(cl, "trim snapid map %llx -> %x\n", sm->snap, sm->dev);
free_anon_bdev(sm->dev);
kfree(sm);
}
@@ -1173,6 +1316,7 @@ void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
{
+ struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snapid_map *sm;
struct rb_node *p;
LIST_HEAD(to_free);
@@ -1191,8 +1335,8 @@ void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
list_del(&sm->lru);
free_anon_bdev(sm->dev);
if (WARN_ON_ONCE(atomic_read(&sm->ref))) {
- pr_err("snapid map %llx -> %x still in use\n",
- sm->snap, sm->dev);
+ pr_err_client(cl, "snapid map %llx -> %x still in use\n",
+ sm->snap, sm->dev);
}
kfree(sm);
}
diff --git a/fs/ceph/strings.c b/fs/ceph/strings.c
index 573bb9556fb5..e36e8948e728 100644
--- a/fs/ceph/strings.c
+++ b/fs/ceph/strings.c
@@ -60,6 +60,7 @@ const char *ceph_mds_op_name(int op)
case CEPH_MDS_OP_LOOKUPINO: return "lookupino";
case CEPH_MDS_OP_LOOKUPNAME: return "lookupname";
case CEPH_MDS_OP_GETATTR: return "getattr";
+ case CEPH_MDS_OP_GETVXATTR: return "getvxattr";
case CEPH_MDS_OP_SETXATTR: return "setxattr";
case CEPH_MDS_OP_SETATTR: return "setattr";
case CEPH_MDS_OP_RMXATTR: return "rmxattr";
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index fd8742bae847..f6bf24b5c683 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -20,6 +20,7 @@
#include "super.h"
#include "mds_client.h"
#include "cache.h"
+#include "crypto.h"
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/decode.h>
@@ -27,6 +28,8 @@
#include <linux/ceph/auth.h>
#include <linux/ceph/debugfs.h>
+#include <uapi/linux/magic.h>
+
static DEFINE_SPINLOCK(ceph_fsc_lock);
static LIST_HEAD(ceph_fsc_list);
@@ -41,28 +44,29 @@ static LIST_HEAD(ceph_fsc_list);
*/
static void ceph_put_super(struct super_block *s)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(s);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s);
- dout("put_super\n");
+ doutc(fsc->client, "begin\n");
+ ceph_fscrypt_free_dummy_policy(fsc);
ceph_mdsc_close_sessions(fsc->mdsc);
+ doutc(fsc->client, "done\n");
}
static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(d_inode(dentry));
struct ceph_mon_client *monc = &fsc->client->monc;
struct ceph_statfs st;
- u64 fsid;
- int err;
+ int i, err;
u64 data_pool;
+ doutc(fsc->client, "begin\n");
if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
} else {
data_pool = CEPH_NOPOOL;
}
- dout("statfs\n");
err = ceph_monc_do_statfs(monc, data_pool, &st);
if (err < 0)
return err;
@@ -71,15 +75,9 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
/*
- * express utilization in terms of large blocks to avoid
+ * Express utilization in terms of large blocks to avoid
* overflow on 32-bit machines.
- *
- * NOTE: for the time being, we make bsize == frsize to humor
- * not-yet-ancient versions of glibc that are broken.
- * Someday, we will probably want to report a real block
- * size... whatever that may mean for a network file system!
*/
- buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
/*
@@ -94,36 +92,49 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
}
+ /*
+ * NOTE: for the time being, we make bsize == frsize to humor
+ * not-yet-ancient versions of glibc that are broken.
+ * Someday, we will probably want to report a real block
+ * size... whatever that may mean for a network file system!
+ */
+ buf->f_bsize = buf->f_frsize;
+
buf->f_files = le64_to_cpu(st.num_objects);
buf->f_ffree = -1;
buf->f_namelen = NAME_MAX;
/* Must convert the fsid, for consistent values across arches */
+ buf->f_fsid.val[0] = 0;
mutex_lock(&monc->mutex);
- fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
- le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
+ for (i = 0 ; i < sizeof(monc->monmap->fsid) / sizeof(__le32) ; ++i)
+ buf->f_fsid.val[0] ^= le32_to_cpu(((__le32 *)&monc->monmap->fsid)[i]);
mutex_unlock(&monc->mutex);
- buf->f_fsid = u64_to_fsid(fsid);
+ /* fold the fs_cluster_id into the upper bits */
+ buf->f_fsid.val[1] = monc->fs_cluster_id;
+ doutc(fsc->client, "done\n");
return 0;
}
static int ceph_sync_fs(struct super_block *sb, int wait)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+ struct ceph_client *cl = fsc->client;
if (!wait) {
- dout("sync_fs (non-blocking)\n");
+ doutc(cl, "(non-blocking)\n");
ceph_flush_dirty_caps(fsc->mdsc);
- dout("sync_fs (non-blocking) done\n");
+ ceph_flush_cap_releases(fsc->mdsc);
+ doutc(cl, "(non-blocking) done\n");
return 0;
}
- dout("sync_fs (blocking)\n");
+ doutc(cl, "(blocking)\n");
ceph_osdc_sync(&fsc->client->osdc);
ceph_mdsc_sync(fsc->mdsc);
- dout("sync_fs (blocking) done\n");
+ doutc(cl, "(blocking) done\n");
return 0;
}
@@ -145,6 +156,8 @@ enum {
Opt_mds_namespace,
Opt_recover_session,
Opt_source,
+ Opt_mon_addr,
+ Opt_test_dummy_encryption,
/* string args above */
Opt_dirstat,
Opt_rbytes,
@@ -158,6 +171,8 @@ enum {
Opt_quotadf,
Opt_copyfrom,
Opt_wsync,
+ Opt_pagecache,
+ Opt_sparseread,
};
enum ceph_recover_session_mode {
@@ -185,6 +200,7 @@ static const struct fs_parameter_spec ceph_mount_parameters[] = {
fsparam_string ("fsc", Opt_fscache), // fsc=...
fsparam_flag_no ("ino32", Opt_ino32),
fsparam_string ("mds_namespace", Opt_mds_namespace),
+ fsparam_string ("mon_addr", Opt_mon_addr),
fsparam_flag_no ("poolperm", Opt_poolperm),
fsparam_flag_no ("quotadf", Opt_quotadf),
fsparam_u32 ("rasize", Opt_rasize),
@@ -196,8 +212,12 @@ static const struct fs_parameter_spec ceph_mount_parameters[] = {
fsparam_u32 ("rsize", Opt_rsize),
fsparam_string ("snapdirname", Opt_snapdirname),
fsparam_string ("source", Opt_source),
+ fsparam_flag ("test_dummy_encryption", Opt_test_dummy_encryption),
+ fsparam_string ("test_dummy_encryption", Opt_test_dummy_encryption),
fsparam_u32 ("wsize", Opt_wsize),
fsparam_flag_no ("wsync", Opt_wsync),
+ fsparam_flag_no ("pagecache", Opt_pagecache),
+ fsparam_flag_no ("sparseread", Opt_sparseread),
{}
};
@@ -226,10 +246,87 @@ static void canonicalize_path(char *path)
path[j] = '\0';
}
+static int ceph_parse_old_source(const char *dev_name, const char *dev_name_end,
+ struct fs_context *fc)
+{
+ int r;
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_mount_options *fsopt = pctx->opts;
+
+ if (*dev_name_end != ':')
+ return invalfc(fc, "separator ':' missing in source");
+
+ r = ceph_parse_mon_ips(dev_name, dev_name_end - dev_name,
+ pctx->copts, fc->log.log, ',');
+ if (r)
+ return r;
+
+ fsopt->new_dev_syntax = false;
+ return 0;
+}
+
+static int ceph_parse_new_source(const char *dev_name, const char *dev_name_end,
+ struct fs_context *fc)
+{
+ size_t len;
+ struct ceph_fsid fsid;
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_options *opts = pctx->copts;
+ struct ceph_mount_options *fsopt = pctx->opts;
+ const char *name_start = dev_name;
+ const char *fsid_start, *fs_name_start;
+
+ if (*dev_name_end != '=') {
+ dout("separator '=' missing in source");
+ return -EINVAL;
+ }
+
+ fsid_start = strchr(dev_name, '@');
+ if (!fsid_start)
+ return invalfc(fc, "missing cluster fsid");
+ len = fsid_start - name_start;
+ kfree(opts->name);
+ opts->name = kstrndup(name_start, len, GFP_KERNEL);
+ if (!opts->name)
+ return -ENOMEM;
+ dout("using %s entity name", opts->name);
+
+ ++fsid_start; /* start of cluster fsid */
+ fs_name_start = strchr(fsid_start, '.');
+ if (!fs_name_start)
+ return invalfc(fc, "missing file system name");
+
+ if (ceph_parse_fsid(fsid_start, &fsid))
+ return invalfc(fc, "Invalid FSID");
+
+ ++fs_name_start; /* start of file system name */
+ len = dev_name_end - fs_name_start;
+
+ if (!namespace_equals(fsopt, fs_name_start, len))
+ return invalfc(fc, "Mismatching mds_namespace");
+ kfree(fsopt->mds_namespace);
+ fsopt->mds_namespace = kstrndup(fs_name_start, len, GFP_KERNEL);
+ if (!fsopt->mds_namespace)
+ return -ENOMEM;
+ dout("file system (mds namespace) '%s'\n", fsopt->mds_namespace);
+
+ fsopt->new_dev_syntax = true;
+ return 0;
+}
+
/*
- * Parse the source parameter. Distinguish the server list from the path.
+ * Parse the source parameter for new device format. Distinguish the device
+ * spec from the path. Try parsing new device format and fallback to old
+ * format if needed.
*
- * The source will look like:
+ * New device syntax will looks like:
+ * <device_spec>=/<path>
+ * where
+ * <device_spec> is name@fsid.fsname
+ * <path> is optional, but if present must begin with '/'
+ * (monitor addresses are passed via mount option)
+ *
+ * Old device syntax is:
* <server_spec>[,<server_spec>...]:[<path>]
* where
* <server_spec> is <ip>[:<port>]
@@ -242,7 +339,7 @@ static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
char *dev_name = param->string, *dev_name_end;
int ret;
- dout("%s '%s'\n", __func__, dev_name);
+ dout("'%s'\n", dev_name);
if (!dev_name || !*dev_name)
return invalfc(fc, "Empty source");
@@ -262,24 +359,44 @@ static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
dev_name_end = dev_name + strlen(dev_name);
}
- dev_name_end--; /* back up to ':' separator */
- if (dev_name_end < dev_name || *dev_name_end != ':')
- return invalfc(fc, "No path or : separator in source");
+ dev_name_end--; /* back up to separator */
+ if (dev_name_end < dev_name)
+ return invalfc(fc, "Path missing in source");
dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
if (fsopt->server_path)
dout("server path '%s'\n", fsopt->server_path);
- ret = ceph_parse_mon_ips(param->string, dev_name_end - dev_name,
- pctx->copts, fc->log.log);
- if (ret)
- return ret;
+ dout("trying new device syntax");
+ ret = ceph_parse_new_source(dev_name, dev_name_end, fc);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ dout("trying old device syntax");
+ ret = ceph_parse_old_source(dev_name, dev_name_end, fc);
+ if (ret)
+ return ret;
+ }
fc->source = param->string;
param->string = NULL;
return 0;
}
+static int ceph_parse_mon_addr(struct fs_parameter *param,
+ struct fs_context *fc)
+{
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_mount_options *fsopt = pctx->opts;
+
+ kfree(fsopt->mon_addr);
+ fsopt->mon_addr = param->string;
+ param->string = NULL;
+
+ return ceph_parse_mon_ips(fsopt->mon_addr, strlen(fsopt->mon_addr),
+ pctx->copts, fc->log.log, '/');
+}
+
static int ceph_parse_mount_param(struct fs_context *fc,
struct fs_parameter *param)
{
@@ -294,17 +411,21 @@ static int ceph_parse_mount_param(struct fs_context *fc,
return ret;
token = fs_parse(fc, ceph_mount_parameters, param, &result);
- dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
+ dout("%s: fs_parse '%s' token %d\n",__func__, param->key, token);
if (token < 0)
return token;
switch (token) {
case Opt_snapdirname:
+ if (strlen(param->string) > NAME_MAX)
+ return invalfc(fc, "snapdirname too long");
kfree(fsopt->snapdir_name);
fsopt->snapdir_name = param->string;
param->string = NULL;
break;
case Opt_mds_namespace:
+ if (!namespace_equals(fsopt, param->string, strlen(param->string)))
+ return invalfc(fc, "Mismatching mds_namespace");
kfree(fsopt->mds_namespace);
fsopt->mds_namespace = param->string;
param->string = NULL;
@@ -322,6 +443,8 @@ static int ceph_parse_mount_param(struct fs_context *fc,
if (fc->source)
return invalfc(fc, "Multiple sources specified");
return ceph_parse_source(param, fc);
+ case Opt_mon_addr:
+ return ceph_parse_mon_addr(param, fc);
case Opt_wsize:
if (result.uint_32 < PAGE_SIZE ||
result.uint_32 > CEPH_MAX_WRITE_SIZE)
@@ -454,6 +577,35 @@ static int ceph_parse_mount_param(struct fs_context *fc,
else
fsopt->flags |= CEPH_MOUNT_OPT_ASYNC_DIROPS;
break;
+ case Opt_pagecache:
+ if (result.negated)
+ fsopt->flags |= CEPH_MOUNT_OPT_NOPAGECACHE;
+ else
+ fsopt->flags &= ~CEPH_MOUNT_OPT_NOPAGECACHE;
+ break;
+ case Opt_sparseread:
+ if (result.negated)
+ fsopt->flags &= ~CEPH_MOUNT_OPT_SPARSEREAD;
+ else
+ fsopt->flags |= CEPH_MOUNT_OPT_SPARSEREAD;
+ break;
+ case Opt_test_dummy_encryption:
+#ifdef CONFIG_FS_ENCRYPTION
+ fscrypt_free_dummy_policy(&fsopt->dummy_enc_policy);
+ ret = fscrypt_parse_test_dummy_encryption(param,
+ &fsopt->dummy_enc_policy);
+ if (ret == -EINVAL) {
+ warnfc(fc, "Value of option \"%s\" is unrecognized",
+ param->key);
+ } else if (ret == -EEXIST) {
+ warnfc(fc, "Conflicting test_dummy_encryption options");
+ ret = -EINVAL;
+ }
+#else
+ warnfc(fc,
+ "FS encryption not supported: test_dummy_encryption mount option ignored");
+#endif
+ break;
default:
BUG();
}
@@ -473,6 +625,8 @@ static void destroy_mount_options(struct ceph_mount_options *args)
kfree(args->mds_namespace);
kfree(args->server_path);
kfree(args->fscache_uniq);
+ kfree(args->mon_addr);
+ fscrypt_free_dummy_policy(&args->dummy_enc_policy);
kfree(args);
}
@@ -516,6 +670,10 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
if (ret)
return ret;
+ ret = strcmp_null(fsopt1->mon_addr, fsopt2->mon_addr);
+ if (ret)
+ return ret;
+
return ceph_compare_options(new_opt, fsc->client);
}
@@ -526,7 +684,7 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
*/
static int ceph_show_options(struct seq_file *m, struct dentry *root)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(root->d_sb);
struct ceph_mount_options *fsopt = fsc->mount_options;
size_t pos;
int ret;
@@ -571,14 +729,24 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
if ((fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) == 0)
seq_puts(m, ",copyfrom");
- if (fsopt->mds_namespace)
+ /* dump mds_namespace when old device syntax is in use */
+ if (fsopt->mds_namespace && !fsopt->new_dev_syntax)
seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
+ if (fsopt->mon_addr)
+ seq_printf(m, ",mon_addr=%s", fsopt->mon_addr);
+
if (fsopt->flags & CEPH_MOUNT_OPT_CLEANRECOVER)
seq_show_option(m, "recover_session", "clean");
- if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
- seq_puts(m, ",nowsync");
+ if (!(fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS))
+ seq_puts(m, ",wsync");
+ if (fsopt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
+ seq_puts(m, ",nopagecache");
+ if (fsopt->flags & CEPH_MOUNT_OPT_SPARSEREAD)
+ seq_puts(m, ",sparseread");
+
+ fscrypt_show_test_dummy_encryption(m, ',', root->d_sb);
if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
seq_printf(m, ",wsize=%u", fsopt->wsize);
@@ -670,6 +838,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
fsc->have_copy_from2 = true;
atomic_long_set(&fsc->writeback_count, 0);
+ fsc->write_congested = false;
err = -ENOMEM;
/*
@@ -679,10 +848,13 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
fsc->inode_wq = alloc_workqueue("ceph-inode", WQ_UNBOUND, 0);
if (!fsc->inode_wq)
goto fail_client;
- fsc->cap_wq = alloc_workqueue("ceph-cap", 0, 1);
+ fsc->cap_wq = alloc_workqueue("ceph-cap", WQ_PERCPU, 1);
if (!fsc->cap_wq)
goto fail_inode_wq;
+ hash_init(fsc->async_unlink_conflict);
+ spin_lock_init(&fsc->async_unlink_conflict_lock);
+
spin_lock(&ceph_fsc_lock);
list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list);
spin_unlock(&ceph_fsc_lock);
@@ -709,7 +881,7 @@ static void flush_fs_workqueues(struct ceph_fs_client *fsc)
static void destroy_fs_client(struct ceph_fs_client *fsc)
{
- dout("destroy_fs_client %p\n", fsc);
+ doutc(fsc->client, "%p\n", fsc);
spin_lock(&ceph_fsc_lock);
list_del(&fsc->metric_wakeup);
@@ -724,7 +896,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
ceph_destroy_client(fsc->client);
kfree(fsc);
- dout("destroy_fs_client %p done\n", fsc);
+ dout("%s: %p done\n", __func__, fsc);
}
/*
@@ -732,6 +904,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
*/
struct kmem_cache *ceph_inode_cachep;
struct kmem_cache *ceph_cap_cachep;
+struct kmem_cache *ceph_cap_snap_cachep;
struct kmem_cache *ceph_cap_flush_cachep;
struct kmem_cache *ceph_dentry_cachep;
struct kmem_cache *ceph_file_cachep;
@@ -742,7 +915,7 @@ mempool_t *ceph_wb_pagevec_pool;
static void ceph_inode_init_once(void *foo)
{
struct ceph_inode_info *ci = foo;
- inode_init_once(&ci->vfs_inode);
+ inode_init_once(&ci->netfs.inode);
}
static int __init init_caches(void)
@@ -752,50 +925,48 @@ static int __init init_caches(void)
ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
sizeof(struct ceph_inode_info),
__alignof__(struct ceph_inode_info),
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
- SLAB_ACCOUNT, ceph_inode_init_once);
+ SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
+ ceph_inode_init_once);
if (!ceph_inode_cachep)
return -ENOMEM;
- ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD);
+ ceph_cap_cachep = KMEM_CACHE(ceph_cap, 0);
if (!ceph_cap_cachep)
goto bad_cap;
+ ceph_cap_snap_cachep = KMEM_CACHE(ceph_cap_snap, 0);
+ if (!ceph_cap_snap_cachep)
+ goto bad_cap_snap;
ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
+ SLAB_RECLAIM_ACCOUNT);
if (!ceph_cap_flush_cachep)
goto bad_cap_flush;
ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
+ SLAB_RECLAIM_ACCOUNT);
if (!ceph_dentry_cachep)
goto bad_dentry;
- ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
+ ceph_file_cachep = KMEM_CACHE(ceph_file_info, 0);
if (!ceph_file_cachep)
goto bad_file;
- ceph_dir_file_cachep = KMEM_CACHE(ceph_dir_file_info, SLAB_MEM_SPREAD);
+ ceph_dir_file_cachep = KMEM_CACHE(ceph_dir_file_info, 0);
if (!ceph_dir_file_cachep)
goto bad_dir_file;
- ceph_mds_request_cachep = KMEM_CACHE(ceph_mds_request, SLAB_MEM_SPREAD);
+ ceph_mds_request_cachep = KMEM_CACHE(ceph_mds_request, 0);
if (!ceph_mds_request_cachep)
goto bad_mds_req;
- ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT);
+ ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10,
+ (CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT) * sizeof(struct page *));
if (!ceph_wb_pagevec_pool)
goto bad_pagevec_pool;
- error = ceph_fscache_register();
- if (error)
- goto bad_fscache;
-
return 0;
-bad_fscache:
- kmem_cache_destroy(ceph_mds_request_cachep);
bad_pagevec_pool:
- mempool_destroy(ceph_wb_pagevec_pool);
+ kmem_cache_destroy(ceph_mds_request_cachep);
bad_mds_req:
kmem_cache_destroy(ceph_dir_file_cachep);
bad_dir_file:
@@ -805,6 +976,8 @@ bad_file:
bad_dentry:
kmem_cache_destroy(ceph_cap_flush_cachep);
bad_cap_flush:
+ kmem_cache_destroy(ceph_cap_snap_cachep);
+bad_cap_snap:
kmem_cache_destroy(ceph_cap_cachep);
bad_cap:
kmem_cache_destroy(ceph_inode_cachep);
@@ -821,14 +994,13 @@ static void destroy_caches(void)
kmem_cache_destroy(ceph_inode_cachep);
kmem_cache_destroy(ceph_cap_cachep);
+ kmem_cache_destroy(ceph_cap_snap_cachep);
kmem_cache_destroy(ceph_cap_flush_cachep);
kmem_cache_destroy(ceph_dentry_cachep);
kmem_cache_destroy(ceph_file_cachep);
kmem_cache_destroy(ceph_dir_file_cachep);
kmem_cache_destroy(ceph_mds_request_cachep);
mempool_destroy(ceph_wb_pagevec_pool);
-
- ceph_fscache_unregister();
}
static void __ceph_umount_begin(struct ceph_fs_client *fsc)
@@ -842,13 +1014,12 @@ static void __ceph_umount_begin(struct ceph_fs_client *fsc)
* ceph_umount_begin - initiate forced umount. Tear down the
* mount, skipping steps that may hang while waiting for server(s).
*/
-static void ceph_umount_begin(struct super_block *sb)
+void ceph_umount_begin(struct super_block *sb)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+
+ doutc(fsc->client, "starting forced umount\n");
- dout("ceph_umount_begin - starting forced umount\n");
- if (!fsc)
- return;
fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
__ceph_umount_begin(fsc);
}
@@ -857,7 +1028,7 @@ static const struct super_operations ceph_super_ops = {
.alloc_inode = ceph_alloc_inode,
.free_inode = ceph_free_inode,
.write_inode = ceph_write_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
.evict_inode = ceph_evict_inode,
.sync_fs = ceph_sync_fs,
.put_super = ceph_put_super,
@@ -874,13 +1045,14 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
const char *path,
unsigned long started)
{
+ struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req = NULL;
int err;
struct dentry *root;
/* open dir */
- dout("open_root_inode opening '%s'\n", path);
+ doutc(cl, "opening '%s'\n", path);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
if (IS_ERR(req))
return ERR_CAST(req);
@@ -900,13 +1072,13 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
if (err == 0) {
struct inode *inode = req->r_target_inode;
req->r_target_inode = NULL;
- dout("open_root_inode success\n");
+ doutc(cl, "success\n");
root = d_make_root(inode);
if (!root) {
root = ERR_PTR(-ENOMEM);
goto out;
}
- dout("open_root_inode success, root dentry is %p\n", root);
+ doutc(cl, "success, root dentry is %p\n", root);
} else {
root = ERR_PTR(err);
}
@@ -915,24 +1087,69 @@ out:
return root;
}
+#ifdef CONFIG_FS_ENCRYPTION
+static int ceph_apply_test_dummy_encryption(struct super_block *sb,
+ struct fs_context *fc,
+ struct ceph_mount_options *fsopt)
+{
+ struct ceph_fs_client *fsc = sb->s_fs_info;
+
+ if (!fscrypt_is_dummy_policy_set(&fsopt->dummy_enc_policy))
+ return 0;
+
+ /* No changing encryption context on remount. */
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE &&
+ !fscrypt_is_dummy_policy_set(&fsc->fsc_dummy_enc_policy)) {
+ if (fscrypt_dummy_policies_equal(&fsopt->dummy_enc_policy,
+ &fsc->fsc_dummy_enc_policy))
+ return 0;
+ errorfc(fc, "Can't set test_dummy_encryption on remount");
+ return -EINVAL;
+ }
+
+ /* Also make sure fsopt doesn't contain a conflicting value. */
+ if (fscrypt_is_dummy_policy_set(&fsc->fsc_dummy_enc_policy)) {
+ if (fscrypt_dummy_policies_equal(&fsopt->dummy_enc_policy,
+ &fsc->fsc_dummy_enc_policy))
+ return 0;
+ errorfc(fc, "Conflicting test_dummy_encryption options");
+ return -EINVAL;
+ }
+
+ fsc->fsc_dummy_enc_policy = fsopt->dummy_enc_policy;
+ memset(&fsopt->dummy_enc_policy, 0, sizeof(fsopt->dummy_enc_policy));
+
+ warnfc(fc, "test_dummy_encryption mode enabled");
+ return 0;
+}
+#else
+static int ceph_apply_test_dummy_encryption(struct super_block *sb,
+ struct fs_context *fc,
+ struct ceph_mount_options *fsopt)
+{
+ return 0;
+}
+#endif
+
/*
* mount: join the ceph cluster, and open root directory.
*/
static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
struct fs_context *fc)
{
+ struct ceph_client *cl = fsc->client;
int err;
unsigned long started = jiffies; /* note the start time */
struct dentry *root;
- dout("mount start %p\n", fsc);
+ doutc(cl, "mount start %p\n", fsc);
mutex_lock(&fsc->client->mount_mutex);
if (!fsc->sb->s_root) {
const char *path = fsc->mount_options->server_path ?
fsc->mount_options->server_path + 1 : "";
- err = __ceph_open_session(fsc->client, started);
+ err = __ceph_open_session(fsc->client);
if (err < 0)
goto out;
@@ -943,7 +1160,12 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
goto out;
}
- dout("mount opening path '%s'\n", path);
+ err = ceph_apply_test_dummy_encryption(fsc->sb, fc,
+ fsc->mount_options);
+ if (err)
+ goto out;
+
+ doutc(cl, "mount opening path '%s'\n", path);
ceph_fs_debugfs_init(fsc);
@@ -958,21 +1180,23 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
}
fsc->mount_state = CEPH_MOUNT_MOUNTED;
- dout("mount success\n");
+ doutc(cl, "mount success\n");
mutex_unlock(&fsc->client->mount_mutex);
return root;
out:
mutex_unlock(&fsc->client->mount_mutex);
+ ceph_fscrypt_free_dummy_policy(fsc);
return ERR_PTR(err);
}
static int ceph_set_super(struct super_block *s, struct fs_context *fc)
{
struct ceph_fs_client *fsc = s->s_fs_info;
+ struct ceph_client *cl = fsc->client;
int ret;
- dout("set_super %p\n", s);
+ doutc(cl, "%p\n", s);
s->s_maxbytes = MAX_LFS_FILESIZE;
@@ -981,12 +1205,16 @@ static int ceph_set_super(struct super_block *s, struct fs_context *fc)
fsc->max_file_size = 1ULL << 40; /* temp value until we get mdsmap */
s->s_op = &ceph_super_ops;
- s->s_d_op = &ceph_dentry_ops;
+ set_default_d_op(s, &ceph_dentry_ops);
s->s_export_op = &ceph_export_ops;
s->s_time_gran = 1;
s->s_time_min = 0;
s->s_time_max = U32_MAX;
+ s->s_flags |= SB_NODIRATIME | SB_NOATIME;
+ s->s_magic = CEPH_SUPER_MAGIC;
+
+ ceph_fscrypt_set_ops(s);
ret = set_anon_super_fc(s, fc);
if (ret != 0)
@@ -1002,31 +1230,32 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
struct ceph_fs_client *new = fc->s_fs_info;
struct ceph_mount_options *fsopt = new->mount_options;
struct ceph_options *opt = new->client->options;
- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+ struct ceph_client *cl = fsc->client;
- dout("ceph_compare_super %p\n", sb);
+ doutc(cl, "%p\n", sb);
if (compare_mount_options(fsopt, opt, fsc)) {
- dout("monitor(s)/mount options don't match\n");
+ doutc(cl, "monitor(s)/mount options don't match\n");
return 0;
}
if ((opt->flags & CEPH_OPT_FSID) &&
ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) {
- dout("fsid doesn't match\n");
+ doutc(cl, "fsid doesn't match\n");
return 0;
}
if (fc->sb_flags != (sb->s_flags & ~SB_BORN)) {
- dout("flags differ\n");
+ doutc(cl, "flags differ\n");
return 0;
}
if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) {
- dout("client is blocklisted (and CLEANRECOVER is not set)\n");
+ doutc(cl, "client is blocklisted (and CLEANRECOVER is not set)\n");
return 0;
}
if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
- dout("client has been forcibly unmounted\n");
+ doutc(cl, "client has been forcibly unmounted\n");
return 0;
}
@@ -1059,6 +1288,7 @@ static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
static int ceph_get_tree(struct fs_context *fc)
{
struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_mount_options *fsopt = pctx->opts;
struct super_block *sb;
struct ceph_fs_client *fsc;
struct dentry *res;
@@ -1070,6 +1300,8 @@ static int ceph_get_tree(struct fs_context *fc)
if (!fc->source)
return invalfc(fc, "No source");
+ if (fsopt->new_dev_syntax && !fsopt->mon_addr)
+ return invalfc(fc, "No monitor address");
/* create client (which we may/may not use) */
fsc = create_fs_client(pctx->opts, pctx->copts);
@@ -1095,9 +1327,9 @@ static int ceph_get_tree(struct fs_context *fc)
goto out;
}
- if (ceph_sb_to_client(sb) != fsc) {
+ if (ceph_sb_to_fs_client(sb) != fsc) {
destroy_fs_client(fsc);
- fsc = ceph_sb_to_client(sb);
+ fsc = ceph_sb_to_fs_client(sb);
dout("get_sb got existing client %p\n", fsc);
} else {
dout("get_sb using new client %p\n", fsc);
@@ -1111,8 +1343,9 @@ static int ceph_get_tree(struct fs_context *fc)
err = PTR_ERR(res);
goto out_splat;
}
- dout("root %p inode %p ino %llx.%llx\n", res,
- d_inode(res), ceph_vinop(d_inode(res)));
+
+ doutc(fsc->client, "root %p inode %p ino %llx.%llx\n", res,
+ d_inode(res), ceph_vinop(d_inode(res)));
fc->root = fsc->sb->s_root;
return 0;
@@ -1146,16 +1379,35 @@ static void ceph_free_fc(struct fs_context *fc)
static int ceph_reconfigure_fc(struct fs_context *fc)
{
+ int err;
struct ceph_parse_opts_ctx *pctx = fc->fs_private;
struct ceph_mount_options *fsopt = pctx->opts;
- struct ceph_fs_client *fsc = ceph_sb_to_client(fc->root->d_sb);
+ struct super_block *sb = fc->root->d_sb;
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+
+ err = ceph_apply_test_dummy_encryption(sb, fc, fsopt);
+ if (err)
+ return err;
if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
ceph_set_mount_opt(fsc, ASYNC_DIROPS);
else
ceph_clear_mount_opt(fsc, ASYNC_DIROPS);
- sync_filesystem(fc->root->d_sb);
+ if (fsopt->flags & CEPH_MOUNT_OPT_SPARSEREAD)
+ ceph_set_mount_opt(fsc, SPARSEREAD);
+ else
+ ceph_clear_mount_opt(fsc, SPARSEREAD);
+
+ if (strcmp_null(fsc->mount_options->mon_addr, fsopt->mon_addr)) {
+ kfree(fsc->mount_options->mon_addr);
+ fsc->mount_options->mon_addr = fsopt->mon_addr;
+ fsopt->mon_addr = NULL;
+ pr_notice_client(fsc->client,
+ "monitor addresses recorded, but not used for reconnection");
+ }
+
+ sync_filesystem(sb);
return 0;
}
@@ -1217,15 +1469,113 @@ nomem:
return -ENOMEM;
}
+/*
+ * Return true if it successfully increases the blocker counter,
+ * or false if the mdsc is in stopping and flushed state.
+ */
+static bool __inc_stopping_blocker(struct ceph_mds_client *mdsc)
+{
+ spin_lock(&mdsc->stopping_lock);
+ if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHING) {
+ spin_unlock(&mdsc->stopping_lock);
+ return false;
+ }
+ atomic_inc(&mdsc->stopping_blockers);
+ spin_unlock(&mdsc->stopping_lock);
+ return true;
+}
+
+static void __dec_stopping_blocker(struct ceph_mds_client *mdsc)
+{
+ spin_lock(&mdsc->stopping_lock);
+ if (!atomic_dec_return(&mdsc->stopping_blockers) &&
+ mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHING)
+ complete_all(&mdsc->stopping_waiter);
+ spin_unlock(&mdsc->stopping_lock);
+}
+
+/* For metadata IO requests */
+bool ceph_inc_mds_stopping_blocker(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ mutex_lock(&session->s_mutex);
+ inc_session_sequence(session);
+ mutex_unlock(&session->s_mutex);
+
+ return __inc_stopping_blocker(mdsc);
+}
+
+void ceph_dec_mds_stopping_blocker(struct ceph_mds_client *mdsc)
+{
+ __dec_stopping_blocker(mdsc);
+}
+
+/* For data IO requests */
+bool ceph_inc_osd_stopping_blocker(struct ceph_mds_client *mdsc)
+{
+ return __inc_stopping_blocker(mdsc);
+}
+
+void ceph_dec_osd_stopping_blocker(struct ceph_mds_client *mdsc)
+{
+ __dec_stopping_blocker(mdsc);
+}
+
static void ceph_kill_sb(struct super_block *s)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(s);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ bool wait;
- dout("kill_sb %p\n", s);
+ doutc(cl, "%p\n", s);
- ceph_mdsc_pre_umount(fsc->mdsc);
+ ceph_mdsc_pre_umount(mdsc);
flush_fs_workqueues(fsc);
+ /*
+ * Though the kill_anon_super() will finally trigger the
+ * sync_filesystem() anyway, we still need to do it here and
+ * then bump the stage of shutdown. This will allow us to
+ * drop any further message, which will increase the inodes'
+ * i_count reference counters but makes no sense any more,
+ * from MDSs.
+ *
+ * Without this when evicting the inodes it may fail in the
+ * kill_anon_super(), which will trigger a warning when
+ * destroying the fscrypt keyring and then possibly trigger
+ * a further crash in ceph module when the iput() tries to
+ * evict the inodes later.
+ */
+ sync_filesystem(s);
+
+ if (atomic64_read(&mdsc->dirty_folios) > 0) {
+ wait_queue_head_t *wq = &mdsc->flush_end_wq;
+ long timeleft = wait_event_killable_timeout(*wq,
+ atomic64_read(&mdsc->dirty_folios) <= 0,
+ fsc->client->options->mount_timeout);
+ if (!timeleft) /* timed out */
+ pr_warn_client(cl, "umount timed out, %ld\n", timeleft);
+ else if (timeleft < 0) /* killed */
+ pr_warn_client(cl, "umount was killed, %ld\n", timeleft);
+ }
+
+ spin_lock(&mdsc->stopping_lock);
+ mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHING;
+ wait = !!atomic_read(&mdsc->stopping_blockers);
+ spin_unlock(&mdsc->stopping_lock);
+
+ if (wait && atomic_read(&mdsc->stopping_blockers)) {
+ long timeleft = wait_for_completion_killable_timeout(
+ &mdsc->stopping_waiter,
+ fsc->client->options->mount_timeout);
+ if (!timeleft) /* timed out */
+ pr_warn_client(cl, "umount timed out, %ld\n", timeleft);
+ else if (timeleft < 0) /* killed */
+ pr_warn_client(cl, "umount was killed, %ld\n", timeleft);
+ }
+
+ mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
kill_anon_super(s);
fsc->client->extra_mon_dispatch = NULL;
@@ -1241,13 +1591,13 @@ static struct file_system_type ceph_fs_type = {
.name = "ceph",
.init_fs_context = ceph_init_fs_context,
.kill_sb = ceph_kill_sb,
- .fs_flags = FS_RENAME_DOES_D_MOVE,
+ .fs_flags = FS_RENAME_DOES_D_MOVE | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("ceph");
int ceph_force_reconnect(struct super_block *sb)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
int err = 0;
fsc->mount_state = CEPH_MOUNT_RECOVER;
@@ -1332,6 +1682,19 @@ bool disable_send_metrics = false;
module_param_cb(disable_send_metrics, &param_ops_metrics, &disable_send_metrics, 0644);
MODULE_PARM_DESC(disable_send_metrics, "Enable sending perf metrics to ceph cluster (default: on)");
+/* for both v1 and v2 syntax */
+static bool mount_support = true;
+static const struct kernel_param_ops param_ops_mount_syntax = {
+ .get = param_get_bool,
+};
+module_param_cb(mount_syntax_v1, &param_ops_mount_syntax, &mount_support, 0444);
+module_param_cb(mount_syntax_v2, &param_ops_mount_syntax, &mount_support, 0444);
+
+bool enable_unsafe_idmap = false;
+module_param(enable_unsafe_idmap, bool, 0644);
+MODULE_PARM_DESC(enable_unsafe_idmap,
+ "Allow to use idmapped mounts with MDS without CEPHFS_FEATURE_HAS_OWNER_UIDGID");
+
module_init(init_ceph);
module_exit(exit_ceph);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 14f951cd5b61..a1f781c46b41 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -3,8 +3,9 @@
#define _FS_CEPH_SUPER_H
#include <linux/ceph/ceph_debug.h>
+#include <linux/ceph/osd_client.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/backing-dev.h>
#include <linux/completion.h>
#include <linux/exportfs.h>
@@ -17,21 +18,18 @@
#include <linux/posix_acl.h>
#include <linux/refcount.h>
#include <linux/security.h>
-
-#include <linux/ceph/libceph.h>
-
-#ifdef CONFIG_CEPH_FSCACHE
-#define FSCACHE_USE_NEW_IO_API
+#include <linux/netfs.h>
#include <linux/fscache.h>
-#endif
+#include <linux/hashtable.h>
-/* f_type in struct statfs */
-#define CEPH_SUPER_MAGIC 0x00c36400
+#include <linux/ceph/libceph.h>
+#include "crypto.h"
/* large granularity for statfs utilization stats to facilitate
* large volume sizes on 32-bit machines. */
#define CEPH_BLOCK_SHIFT 22 /* 4 MB */
#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
+#define CEPH_4K_BLOCK_SHIFT 12 /* 4 KB */
#define CEPH_MOUNT_OPT_CLEANRECOVER (1<<1) /* auto reonnect (clean mode) after blocklisted */
#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
@@ -45,10 +43,13 @@
#define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */
#define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */
#define CEPH_MOUNT_OPT_ASYNC_DIROPS (1<<15) /* allow async directory ops */
+#define CEPH_MOUNT_OPT_NOPAGECACHE (1<<16) /* bypass pagecache altogether */
+#define CEPH_MOUNT_OPT_SPARSEREAD (1<<17) /* always do sparse reads */
#define CEPH_MOUNT_OPT_DEFAULT \
(CEPH_MOUNT_OPT_DCACHE | \
- CEPH_MOUNT_OPT_NOCOPYFROM)
+ CEPH_MOUNT_OPT_NOCOPYFROM | \
+ CEPH_MOUNT_OPT_ASYNC_DIROPS)
#define ceph_set_mount_opt(fsc, opt) \
(fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt
@@ -59,7 +60,7 @@
/* max size of osd read request, limited by libceph */
#define CEPH_MAX_READ_SIZE CEPH_MSG_MAX_DATA_LEN
-/* osd has a configurable limitaion of max write size.
+/* osd has a configurable limitation of max write size.
* CEPH_MSG_MAX_DATA_LEN should be small enough. */
#define CEPH_MAX_WRITE_SIZE CEPH_MSG_MAX_DATA_LEN
#define CEPH_RASIZE_DEFAULT (8192*1024) /* max readahead */
@@ -88,6 +89,8 @@ struct ceph_mount_options {
unsigned int max_readdir; /* max readdir result (entries) */
unsigned int max_readdir_bytes; /* max readdir result (bytes) */
+ bool new_dev_syntax;
+
/*
* everything above this point can be memcmp'd; everything below
* is handled in compare_mount_options()
@@ -97,8 +100,37 @@ struct ceph_mount_options {
char *mds_namespace; /* default NULL */
char *server_path; /* default NULL (means "/") */
char *fscache_uniq; /* default NULL */
+ char *mon_addr;
+ struct fscrypt_dummy_policy dummy_enc_policy;
+};
+
+/*
+ * Check if the mds namespace in ceph_mount_options matches
+ * the passed in namespace string. First time match (when
+ * ->mds_namespace is NULL) is treated specially, since
+ * ->mds_namespace needs to be initialized by the caller.
+ */
+static inline int namespace_equals(struct ceph_mount_options *fsopt,
+ const char *namespace, size_t len)
+{
+ return !(fsopt->mds_namespace &&
+ (strlen(fsopt->mds_namespace) != len ||
+ strncmp(fsopt->mds_namespace, namespace, len)));
+}
+
+/* mount state */
+enum {
+ CEPH_MOUNT_MOUNTING,
+ CEPH_MOUNT_MOUNTED,
+ CEPH_MOUNT_UNMOUNTING,
+ CEPH_MOUNT_UNMOUNTED,
+ CEPH_MOUNT_SHUTDOWN,
+ CEPH_MOUNT_RECOVER,
+ CEPH_MOUNT_FENCE_IO,
};
+#define CEPH_ASYNC_CREATE_CONFLICT_BITS 8
+
struct ceph_fs_client {
struct super_block *sb;
@@ -119,26 +151,32 @@ struct ceph_fs_client {
struct ceph_mds_client *mdsc;
atomic_long_t writeback_count;
+ bool write_congested;
struct workqueue_struct *inode_wq;
struct workqueue_struct *cap_wq;
+ DECLARE_HASHTABLE(async_unlink_conflict, CEPH_ASYNC_CREATE_CONFLICT_BITS);
+ spinlock_t async_unlink_conflict_lock;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_dentry_lru, *debugfs_caps;
struct dentry *debugfs_congestion_kb;
struct dentry *debugfs_bdi;
struct dentry *debugfs_mdsc, *debugfs_mdsmap;
- struct dentry *debugfs_metric;
struct dentry *debugfs_status;
struct dentry *debugfs_mds_sessions;
+ struct dentry *debugfs_metrics_dir;
#endif
#ifdef CONFIG_CEPH_FSCACHE
- struct fscache_cookie *fscache;
+ struct fscache_volume *fscache;
+#endif
+#ifdef CONFIG_FS_ENCRYPTION
+ struct fscrypt_dummy_policy fsc_dummy_enc_policy;
#endif
};
-
/*
* File i/o capability. This tracks shared state with the metadata
* server that allows us to cache or writeback attributes or to read
@@ -176,9 +214,10 @@ struct ceph_cap {
struct list_head caps_item;
};
-#define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */
-#define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */
-#define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */
+#define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */
+#define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */
+#define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */
+#define CHECK_CAPS_FLUSH_FORCE 8 /* force flush any caps */
struct ceph_cap_flush {
u64 tid;
@@ -228,7 +267,7 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
if (refcount_dec_and_test(&capsnap->nref)) {
if (capsnap->xattr_blob)
ceph_buffer_put(capsnap->xattr_blob);
- kfree(capsnap);
+ kmem_cache_free(ceph_cap_snap_cachep, capsnap);
}
}
@@ -279,7 +318,8 @@ struct ceph_dentry_info {
struct dentry *dentry;
struct ceph_mds_session *lease_session;
struct list_head lease_list;
- unsigned flags;
+ struct hlist_node hnode;
+ unsigned long flags;
int lease_shared_gen;
u32 lease_gen;
u32 lease_seq;
@@ -288,10 +328,14 @@ struct ceph_dentry_info {
u64 offset;
};
-#define CEPH_DENTRY_REFERENCED 1
-#define CEPH_DENTRY_LEASE_LIST 2
-#define CEPH_DENTRY_SHRINK_LIST 4
-#define CEPH_DENTRY_PRIMARY_LINK 8
+#define CEPH_DENTRY_REFERENCED (1 << 0)
+#define CEPH_DENTRY_LEASE_LIST (1 << 1)
+#define CEPH_DENTRY_SHRINK_LIST (1 << 2)
+#define CEPH_DENTRY_PRIMARY_LINK (1 << 3)
+#define CEPH_DENTRY_ASYNC_UNLINK_BIT (4)
+#define CEPH_DENTRY_ASYNC_UNLINK (1 << CEPH_DENTRY_ASYNC_UNLINK_BIT)
+#define CEPH_DENTRY_ASYNC_CREATE_BIT (5)
+#define CEPH_DENTRY_ASYNC_CREATE (1 << CEPH_DENTRY_ASYNC_CREATE_BIT)
struct ceph_inode_xattrs_info {
/*
@@ -315,6 +359,7 @@ struct ceph_inode_xattrs_info {
* Ceph inode.
*/
struct ceph_inode_info {
+ struct netfs_inode netfs; /* Netfslib context and vfs inode */
struct ceph_vino i_vino; /* ceph ino + snap */
spinlock_t i_ceph_lock;
@@ -395,6 +440,11 @@ struct ceph_inode_info {
u32 i_truncate_seq; /* last truncate to smaller size */
u64 i_truncate_size; /* and the size we last truncated down to */
int i_truncate_pending; /* still need to call vmtruncate */
+ /*
+ * For none fscrypt case it equals to i_truncate_size or it will
+ * equals to fscrypt_file_size
+ */
+ u64 i_truncate_pagecache_size;
u64 i_max_size; /* max file size authorized by mds */
u64 i_reported_size; /* (max_)size reported to or requested of mds */
@@ -426,26 +476,42 @@ struct ceph_inode_info {
struct work_struct i_work;
unsigned long i_work_mask;
-#ifdef CONFIG_CEPH_FSCACHE
- struct fscache_cookie *fscache;
+#ifdef CONFIG_FS_ENCRYPTION
+ struct fscrypt_inode_info *i_crypt_info;
+ u32 fscrypt_auth_len;
+ u32 fscrypt_file_len;
+ u8 *fscrypt_auth;
+ u8 *fscrypt_file;
#endif
- struct inode vfs_inode; /* at end */
+};
+
+struct ceph_netfs_request_data {
+ int caps;
+
+ /*
+ * Maximum size of a file readahead request.
+ * The fadvise could update the bdi's default ra_pages.
+ */
+ unsigned int file_ra_pages;
+
+ /* Set it if fadvise disables file readahead entirely */
+ bool file_ra_disabled;
};
static inline struct ceph_inode_info *
ceph_inode(const struct inode *inode)
{
- return container_of(inode, struct ceph_inode_info, vfs_inode);
+ return container_of(inode, struct ceph_inode_info, netfs.inode);
}
static inline struct ceph_fs_client *
-ceph_inode_to_client(const struct inode *inode)
+ceph_inode_to_fs_client(const struct inode *inode)
{
return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
}
static inline struct ceph_fs_client *
-ceph_sb_to_client(const struct super_block *sb)
+ceph_sb_to_fs_client(const struct super_block *sb)
{
return (struct ceph_fs_client *)sb->s_fs_info;
}
@@ -453,7 +519,13 @@ ceph_sb_to_client(const struct super_block *sb)
static inline struct ceph_mds_client *
ceph_sb_to_mdsc(const struct super_block *sb)
{
- return (struct ceph_mds_client *)ceph_sb_to_client(sb)->mdsc;
+ return (struct ceph_mds_client *)ceph_sb_to_fs_client(sb)->mdsc;
+}
+
+static inline struct ceph_client *
+ceph_inode_to_client(const struct inode *inode)
+{
+ return (struct ceph_client *)ceph_inode_to_fs_client(inode)->client;
}
static inline struct ceph_vino
@@ -509,7 +581,7 @@ static inline u64 ceph_snap(struct inode *inode)
*/
static inline u64 ceph_present_ino(struct super_block *sb, u64 ino)
{
- if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)))
+ if (unlikely(ceph_test_mount_opt(ceph_sb_to_fs_client(sb), INO32)))
return ceph_ino_to_ino32(ino);
return ino;
}
@@ -534,19 +606,23 @@ static inline int ceph_ino_compare(struct inode *inode, void *data)
*
* These come from src/mds/mdstypes.h in the ceph sources.
*/
-#define CEPH_MAX_MDS 0x100
-#define CEPH_NUM_STRAY 10
+#define CEPH_MAX_MDS 0x100
+#define CEPH_NUM_STRAY 10
#define CEPH_MDS_INO_MDSDIR_OFFSET (1 * CEPH_MAX_MDS)
+#define CEPH_MDS_INO_LOG_OFFSET (2 * CEPH_MAX_MDS)
#define CEPH_INO_SYSTEM_BASE ((6*CEPH_MAX_MDS) + (CEPH_MAX_MDS * CEPH_NUM_STRAY))
static inline bool ceph_vino_is_reserved(const struct ceph_vino vino)
{
- if (vino.ino < CEPH_INO_SYSTEM_BASE &&
- vino.ino >= CEPH_MDS_INO_MDSDIR_OFFSET) {
- WARN_RATELIMIT(1, "Attempt to access reserved inode number 0x%llx", vino.ino);
- return true;
- }
- return false;
+ if (vino.ino >= CEPH_INO_SYSTEM_BASE ||
+ vino.ino < CEPH_MDS_INO_MDSDIR_OFFSET)
+ return false;
+
+ /* Don't warn on mdsdirs */
+ WARN_RATELIMIT(vino.ino >= CEPH_MDS_INO_LOG_OFFSET,
+ "Attempt to access reserved inode number 0x%llx",
+ vino.ino);
+ return true;
}
static inline struct inode *ceph_find_inode(struct super_block *sb,
@@ -577,9 +653,13 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
#define CEPH_I_FLUSH_SNAPS (1 << 8) /* need flush snapss */
#define CEPH_I_ERROR_WRITE (1 << 9) /* have seen write errors */
#define CEPH_I_ERROR_FILELOCK (1 << 10) /* have seen file lock errors */
-#define CEPH_I_ODIRECT (1 << 11) /* inode in direct I/O mode */
+#define CEPH_I_ODIRECT_BIT (11) /* inode in direct I/O mode */
+#define CEPH_I_ODIRECT (1 << CEPH_I_ODIRECT_BIT)
#define CEPH_ASYNC_CREATE_BIT (12) /* async create in flight for this */
#define CEPH_I_ASYNC_CREATE (1 << CEPH_ASYNC_CREATE_BIT)
+#define CEPH_I_SHUTDOWN (1 << 13) /* inode is no longer usable */
+#define CEPH_I_ASYNC_CHECK_CAPS (1 << 14) /* check caps immediately after async
+ creating finishes */
/*
* Masks of ceph inode work.
@@ -732,7 +812,6 @@ extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
extern int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
struct ceph_cap *ocap, int mask);
-extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask);
extern int __ceph_caps_used(struct ceph_inode_info *ci);
static inline bool __ceph_is_file_opened(struct ceph_inode_info *ci)
@@ -756,6 +835,8 @@ extern void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
extern void ceph_reservation_status(struct ceph_fs_client *client,
int *total, int *avail, int *used,
int *reserved, int *min);
+extern void change_auth_cap_ses(struct ceph_inode_info *ci,
+ struct ceph_mds_session *session);
@@ -773,7 +854,6 @@ struct ceph_file_info {
struct list_head rw_contexts;
u32 filp_gen;
- atomic_t num_locks;
};
struct ceph_dir_file_info {
@@ -839,7 +919,7 @@ ceph_find_rw_context(struct ceph_file_info *cf)
}
struct ceph_readdir_cache_control {
- struct page *page;
+ struct folio *folio;
struct dentry **dentries;
int index;
};
@@ -876,6 +956,8 @@ struct ceph_snap_realm {
struct list_head dirty_item; /* if realm needs new context */
+ struct list_head rebuild_item; /* rebuild snap realms _downward_ in hierarchy */
+
/* the current set of snaps for this realm */
struct ceph_snap_context *cached_context;
@@ -931,7 +1013,7 @@ extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
struct ceph_msg *msg);
extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap);
-extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
+extern void ceph_cleanup_global_and_empty_realms(struct ceph_mds_client *mdsc);
extern struct ceph_snapid_map *ceph_get_snapid_map(struct ceph_mds_client *mdsc,
u64 snap);
@@ -939,6 +1021,7 @@ extern void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
struct ceph_snapid_map *sm);
extern void ceph_trim_snapid_map(struct ceph_mds_client *mdsc);
extern void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc);
+void ceph_umount_begin(struct super_block *sb);
/*
@@ -955,6 +1038,7 @@ static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
/* inode.c */
struct ceph_mds_reply_info_in;
struct ceph_mds_reply_dirfrag;
+struct ceph_acl_sec_ctx;
extern const struct inode_operations ceph_file_iops;
@@ -962,8 +1046,14 @@ extern struct inode *ceph_alloc_inode(struct super_block *sb);
extern void ceph_evict_inode(struct inode *inode);
extern void ceph_free_inode(struct inode *inode);
+struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
+ umode_t *mode, struct ceph_acl_sec_ctx *as_ctx);
+void ceph_as_ctx_to_req(struct ceph_mds_request *req,
+ struct ceph_acl_sec_ctx *as_ctx);
+
extern struct inode *ceph_get_inode(struct super_block *sb,
- struct ceph_vino vino);
+ struct ceph_vino vino,
+ struct inode *newino);
extern struct inode *ceph_get_snapdir(struct inode *parent);
extern int ceph_fill_file_size(struct inode *inode, int issued,
u32 truncate_seq, u64 truncate_size, u64 size);
@@ -981,8 +1071,6 @@ extern int ceph_fill_trace(struct super_block *sb,
extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
struct ceph_mds_session *session);
-extern int ceph_inode_holds_cap(struct inode *inode, int mask);
-
extern bool ceph_inode_set_size(struct inode *inode, loff_t size);
extern void __ceph_do_pending_vmtruncate(struct inode *inode);
@@ -1013,28 +1101,46 @@ static inline void ceph_queue_flush_snaps(struct inode *inode)
ceph_queue_inode_work(inode, CEPH_I_WORK_FLUSH_SNAPS);
}
+extern int ceph_try_to_choose_auth_mds(struct inode *inode, int mask);
extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
int mask, bool force);
static inline int ceph_do_getattr(struct inode *inode, int mask, bool force)
{
return __ceph_do_getattr(inode, NULL, mask, force);
}
-extern int ceph_permission(struct user_namespace *mnt_userns,
+extern int ceph_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask);
-extern int __ceph_setattr(struct inode *inode, struct iattr *attr);
-extern int ceph_setattr(struct user_namespace *mnt_userns,
+
+struct ceph_iattr {
+ struct ceph_fscrypt_auth *fscrypt_auth;
+};
+
+extern int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
+ struct iattr *attr, struct ceph_iattr *cia);
+extern int ceph_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr);
-extern int ceph_getattr(struct user_namespace *mnt_userns,
+extern int ceph_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
+void ceph_inode_shutdown(struct inode *inode);
+
+static inline bool ceph_inode_is_shutdown(struct inode *inode)
+{
+ unsigned long flags = READ_ONCE(ceph_inode(inode)->i_ceph_flags);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ int state = READ_ONCE(fsc->mount_state);
+
+ return (flags & CEPH_I_SHUTDOWN) || state >= CEPH_MOUNT_SHUTDOWN;
+}
/* xattr.c */
int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
+int ceph_do_getvxattr(struct inode *inode, const char *name, void *value, size_t size);
ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
-extern const struct xattr_handler *ceph_xattr_handlers[];
+extern const struct xattr_handler * const ceph_xattr_handlers[];
struct ceph_acl_sec_ctx {
#ifdef CONFIG_CEPH_FS_POSIX_ACL
@@ -1042,8 +1148,10 @@ struct ceph_acl_sec_ctx {
void *acl;
#endif
#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
- void *sec_ctx;
- u32 sec_ctxlen;
+ struct lsm_context lsmctx;
+#endif
+#ifdef CONFIG_FS_ENCRYPTION
+ struct ceph_fscrypt_auth *fscrypt_auth;
#endif
struct ceph_pagelist *pagelist;
};
@@ -1086,8 +1194,8 @@ void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx);
#ifdef CONFIG_CEPH_FS_POSIX_ACL
struct posix_acl *ceph_get_acl(struct inode *, int, bool);
-int ceph_set_acl(struct user_namespace *mnt_userns,
- struct inode *inode, struct posix_acl *acl, int type);
+int ceph_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct posix_acl *acl, int type);
int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
struct ceph_acl_sec_ctx *as_ctx);
void ceph_init_inode_acls(struct inode *inode,
@@ -1112,10 +1220,6 @@ static inline void ceph_init_inode_acls(struct inode *inode,
struct ceph_acl_sec_ctx *as_ctx)
{
}
-static inline int ceph_acl_chmod(struct dentry *dentry, struct inode *inode)
-{
- return 0;
-}
static inline void ceph_forget_all_cached_acls(struct inode *inode)
{
@@ -1135,7 +1239,8 @@ extern void ceph_add_cap(struct inode *inode,
unsigned cap, unsigned seq, u64 realmino, int flags,
struct ceph_cap **new_cap);
extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
-extern void ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
+extern void ceph_remove_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
+ bool queue_release);
extern void __ceph_remove_caps(struct ceph_inode_info *ci);
extern void ceph_put_cap(struct ceph_mds_client *mdsc,
struct ceph_cap *cap);
@@ -1150,6 +1255,8 @@ extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session);
void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
struct ceph_inode_info *ci);
+extern struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci,
+ int mds);
extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci,
int mds);
extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
@@ -1157,8 +1264,6 @@ extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
extern void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had);
-extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci,
- int had);
extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc);
extern void __ceph_remove_capsnap(struct inode *inode,
@@ -1170,10 +1275,10 @@ extern void ceph_remove_capsnap(struct inode *inode,
extern void ceph_flush_snaps(struct ceph_inode_info *ci,
struct ceph_mds_session **psession);
extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
-extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
- struct ceph_mds_session *session);
+extern void ceph_check_caps(struct ceph_inode_info *ci, int flags);
extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
+extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc);
extern int ceph_drop_caps_for_unlink(struct inode *inode);
extern int ceph_encode_inode_release(void **p, struct inode *inode,
int mds, int drop, int unless, int force);
@@ -1181,6 +1286,8 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
struct inode *dir,
int mds, int drop, int unless);
+extern int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi,
+ int need, int want, loff_t endoff, int *got);
extern int ceph_get_caps(struct file *filp, int need, int want,
loff_t endoff, int *got);
extern int ceph_try_get_caps(struct inode *inode,
@@ -1194,10 +1301,20 @@ extern void __ceph_touch_fmode(struct ceph_inode_info *ci,
/* addr.c */
extern const struct address_space_operations ceph_aops;
-extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
-extern int ceph_uninline_data(struct file *filp, struct page *locked_page);
+extern const struct netfs_request_ops ceph_netfs_ops;
+int ceph_mmap_prepare(struct vm_area_desc *desc);
+extern int ceph_uninline_data(struct file *file);
extern int ceph_pool_perm_check(struct inode *inode, int need);
extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
+int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate);
+
+static inline bool ceph_has_inline_data(struct ceph_inode_info *ci)
+{
+ if (ci->i_inline_version == CEPH_INLINE_NONE ||
+ ci->i_inline_version == 1) /* initial version, no data */
+ return false;
+ return true;
+}
/* file.c */
extern const struct file_operations ceph_file_fops;
@@ -1206,6 +1323,9 @@ extern int ceph_renew_caps(struct inode *inode, int fmode);
extern int ceph_open(struct inode *inode, struct file *file);
extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags, umode_t mode);
+extern ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ struct iov_iter *to, int *retry_op,
+ u64 *last_objver);
extern int ceph_release(struct inode *inode, struct file *filp);
extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
char *data, size_t len);
@@ -1256,9 +1376,29 @@ extern void ceph_fs_debugfs_init(struct ceph_fs_client *client);
extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client);
/* quota.c */
-static inline bool __ceph_has_any_quota(struct ceph_inode_info *ci)
+
+enum quota_get_realm {
+ QUOTA_GET_MAX_FILES,
+ QUOTA_GET_MAX_BYTES,
+ QUOTA_GET_ANY
+};
+
+static inline bool __ceph_has_quota(struct ceph_inode_info *ci,
+ enum quota_get_realm which)
{
- return ci->i_max_files || ci->i_max_bytes;
+ bool has_quota = false;
+
+ switch (which) {
+ case QUOTA_GET_MAX_BYTES:
+ has_quota = !!ci->i_max_bytes;
+ break;
+ case QUOTA_GET_MAX_FILES:
+ has_quota = !!ci->i_max_files;
+ break;
+ default:
+ has_quota = !!(ci->i_max_files || ci->i_max_bytes);
+ }
+ return has_quota;
}
extern void ceph_adjust_quota_realms_count(struct inode *inode, bool inc);
@@ -1267,13 +1407,26 @@ static inline void __ceph_update_quota(struct ceph_inode_info *ci,
u64 max_bytes, u64 max_files)
{
bool had_quota, has_quota;
- had_quota = __ceph_has_any_quota(ci);
+ had_quota = __ceph_has_quota(ci, QUOTA_GET_ANY);
ci->i_max_bytes = max_bytes;
ci->i_max_files = max_files;
- has_quota = __ceph_has_any_quota(ci);
+ has_quota = __ceph_has_quota(ci, QUOTA_GET_ANY);
if (had_quota != has_quota)
- ceph_adjust_quota_realms_count(&ci->vfs_inode, has_quota);
+ ceph_adjust_quota_realms_count(&ci->netfs.inode, has_quota);
+}
+
+static inline int __ceph_sparse_read_ext_count(struct inode *inode, u64 len)
+{
+ int cnt = 0;
+
+ if (IS_ENCRYPTED(inode)) {
+ cnt = len >> CEPH_FSCRYPT_BLOCK_SHIFT;
+ if (cnt > CEPH_SPARSE_EXT_ARRAY_INITIAL)
+ cnt = 0;
+ }
+
+ return cnt;
}
extern void ceph_handle_quota(struct ceph_mds_client *mdsc,
@@ -1289,4 +1442,9 @@ extern bool ceph_quota_update_statfs(struct ceph_fs_client *fsc,
struct kstatfs *buf);
extern void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc);
+bool ceph_inc_mds_stopping_blocker(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session);
+void ceph_dec_mds_stopping_blocker(struct ceph_mds_client *mdsc);
+bool ceph_inc_osd_stopping_blocker(struct ceph_mds_client *mdsc);
+void ceph_dec_osd_stopping_blocker(struct ceph_mds_client *mdsc);
#endif /* _FS_CEPH_SUPER_H */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index fcf7dfdecf96..ad1f30bea175 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -57,7 +57,8 @@ static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
size_t size)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+ struct ceph_client *cl = fsc->client;
struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_string *pool_ns;
s64 pool = ci->i_layout.pool_id;
@@ -69,7 +70,7 @@ static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
- dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
+ doutc(cl, "%p\n", &ci->netfs.inode);
down_read(&osdc->lock);
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
if (pool_name) {
@@ -161,7 +162,7 @@ static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
char *val, size_t size)
{
ssize_t ret;
- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
struct ceph_osd_client *osdc = &fsc->client->osdc;
s64 pool = ci->i_layout.pool_id;
const char *pool_name;
@@ -248,8 +249,7 @@ static ssize_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
static ssize_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
size_t size)
{
- return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_rctime.tv_sec,
- ci->i_rctime.tv_nsec);
+ return ceph_fmt_xattr(val, size, "%ptSp", &ci->i_rctime);
}
/* dir pin */
@@ -306,14 +306,13 @@ static bool ceph_vxattrcb_snap_btime_exists(struct ceph_inode_info *ci)
static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
size_t size)
{
- return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_snap_btime.tv_sec,
- ci->i_snap_btime.tv_nsec);
+ return ceph_fmt_xattr(val, size, "%ptSp", &ci->i_snap_btime);
}
static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
char *val, size_t size)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid);
}
@@ -321,7 +320,7 @@ static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci,
char *val, size_t size)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
return ceph_fmt_xattr(val, size, "client%lld",
ceph_client_gid(fsc->client));
@@ -352,6 +351,24 @@ static ssize_t ceph_vxattrcb_auth_mds(struct ceph_inode_info *ci,
return ret;
}
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static bool ceph_vxattrcb_fscrypt_auth_exists(struct ceph_inode_info *ci)
+{
+ return ci->fscrypt_auth_len;
+}
+
+static ssize_t ceph_vxattrcb_fscrypt_auth(struct ceph_inode_info *ci,
+ char *val, size_t size)
+{
+ if (size) {
+ if (size < ci->fscrypt_auth_len)
+ return -ERANGE;
+ memcpy(val, ci->fscrypt_auth, ci->fscrypt_auth_len);
+ }
+ return ci->fscrypt_auth_len;
+}
+#endif /* CONFIG_FS_ENCRYPTION */
+
#define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
#define CEPH_XATTR_NAME2(_type, _name, _name2) \
XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
@@ -366,6 +383,14 @@ static ssize_t ceph_vxattrcb_auth_mds(struct ceph_inode_info *ci,
}
#define XATTR_RSTAT_FIELD(_type, _name) \
XATTR_NAME_CEPH(_type, _name, VXATTR_FLAG_RSTAT)
+#define XATTR_RSTAT_FIELD_UPDATABLE(_type, _name) \
+ { \
+ .name = CEPH_XATTR_NAME(_type, _name), \
+ .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
+ .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
+ .exists_cb = NULL, \
+ .flags = VXATTR_FLAG_RSTAT, \
+ }
#define XATTR_LAYOUT_FIELD(_type, _name, _field) \
{ \
.name = CEPH_XATTR_NAME2(_type, _name, _field), \
@@ -404,7 +429,7 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = {
XATTR_RSTAT_FIELD(dir, rsubdirs),
XATTR_RSTAT_FIELD(dir, rsnaps),
XATTR_RSTAT_FIELD(dir, rbytes),
- XATTR_RSTAT_FIELD(dir, rctime),
+ XATTR_RSTAT_FIELD_UPDATABLE(dir, rctime),
{
.name = "ceph.dir.pin",
.name_size = sizeof("ceph.dir.pin"),
@@ -492,6 +517,15 @@ static struct ceph_vxattr ceph_common_vxattrs[] = {
.exists_cb = NULL,
.flags = VXATTR_FLAG_READONLY,
},
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ {
+ .name = "ceph.fscrypt.auth",
+ .name_size = sizeof("ceph.fscrypt.auth"),
+ .getxattr_cb = ceph_vxattrcb_fscrypt_auth,
+ .exists_cb = ceph_vxattrcb_fscrypt_auth_exists,
+ .flags = VXATTR_FLAG_READONLY,
+ },
+#endif /* CONFIG_FS_ENCRYPTION */
{ .name = NULL, 0 } /* Required table terminator */
};
@@ -527,12 +561,16 @@ static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
return NULL;
}
+#define MAX_XATTR_VAL_PRINT_LEN 256
+
static int __set_xattr(struct ceph_inode_info *ci,
const char *name, int name_len,
const char *val, int val_len,
int flags, int update_xattr,
struct ceph_inode_xattr **newxattr)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct rb_node **p;
struct rb_node *parent = NULL;
struct ceph_inode_xattr *xattr = NULL;
@@ -589,7 +627,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
xattr->should_free_name = update_xattr;
ci->i_xattrs.count++;
- dout("__set_xattr count=%d\n", ci->i_xattrs.count);
+ doutc(cl, "count=%d\n", ci->i_xattrs.count);
} else {
kfree(*newxattr);
*newxattr = NULL;
@@ -617,11 +655,13 @@ static int __set_xattr(struct ceph_inode_info *ci,
if (new) {
rb_link_node(&xattr->node, parent, p);
rb_insert_color(&xattr->node, &ci->i_xattrs.index);
- dout("__set_xattr_val p=%p\n", p);
+ doutc(cl, "p=%p\n", p);
}
- dout("__set_xattr_val added %llx.%llx xattr %p %.*s=%.*s\n",
- ceph_vinop(&ci->vfs_inode), xattr, name_len, name, val_len, val);
+ doutc(cl, "added %p %llx.%llx xattr %p %.*s=%.*s%s\n", inode,
+ ceph_vinop(inode), xattr, name_len, name, min(val_len,
+ MAX_XATTR_VAL_PRINT_LEN), val,
+ val_len > MAX_XATTR_VAL_PRINT_LEN ? "..." : "");
return 0;
}
@@ -629,6 +669,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
const char *name)
{
+ struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
struct rb_node **p;
struct rb_node *parent = NULL;
struct ceph_inode_xattr *xattr = NULL;
@@ -647,13 +688,15 @@ static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
else if (c > 0)
p = &(*p)->rb_right;
else {
- dout("__get_xattr %s: found %.*s\n", name,
- xattr->val_len, xattr->val);
+ int len = min(xattr->val_len, MAX_XATTR_VAL_PRINT_LEN);
+
+ doutc(cl, "%s found %.*s%s\n", name, len, xattr->val,
+ xattr->val_len > len ? "..." : "");
return xattr;
}
}
- dout("__get_xattr %s: not found\n", name);
+ doutc(cl, "%s not found\n", name);
return NULL;
}
@@ -694,19 +737,20 @@ static int __remove_xattr(struct ceph_inode_info *ci,
static char *__copy_xattr_names(struct ceph_inode_info *ci,
char *dest)
{
+ struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
struct rb_node *p;
struct ceph_inode_xattr *xattr = NULL;
p = rb_first(&ci->i_xattrs.index);
- dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
+ doutc(cl, "count=%d\n", ci->i_xattrs.count);
while (p) {
xattr = rb_entry(p, struct ceph_inode_xattr, node);
memcpy(dest, xattr->name, xattr->name_len);
dest[xattr->name_len] = '\0';
- dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
- xattr->name_len, ci->i_xattrs.names_size);
+ doutc(cl, "dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
+ xattr->name_len, ci->i_xattrs.names_size);
dest += xattr->name_len + 1;
p = rb_next(p);
@@ -717,19 +761,19 @@ static char *__copy_xattr_names(struct ceph_inode_info *ci,
void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
{
+ struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
struct rb_node *p, *tmp;
struct ceph_inode_xattr *xattr = NULL;
p = rb_first(&ci->i_xattrs.index);
- dout("__ceph_destroy_xattrs p=%p\n", p);
+ doutc(cl, "p=%p\n", p);
while (p) {
xattr = rb_entry(p, struct ceph_inode_xattr, node);
tmp = p;
p = rb_next(tmp);
- dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
- xattr->name_len, xattr->name);
+ doutc(cl, "next p=%p (%.*s)\n", p, xattr->name_len, xattr->name);
rb_erase(tmp, &ci->i_xattrs.index);
__free_xattr(xattr);
@@ -746,6 +790,7 @@ static int __build_xattrs(struct inode *inode)
__releases(ci->i_ceph_lock)
__acquires(ci->i_ceph_lock)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
u32 namelen;
u32 numattr = 0;
void *p, *end;
@@ -757,8 +802,8 @@ static int __build_xattrs(struct inode *inode)
int err = 0;
int i;
- dout("__build_xattrs() len=%d\n",
- ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
+ doutc(cl, "len=%d\n",
+ ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
return 0; /* already built */
@@ -833,6 +878,8 @@ bad:
static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
int val_size)
{
+ struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
+
/*
* 4 bytes for the length, and additional 4 bytes per each xattr name,
* 4 bytes per each value
@@ -840,9 +887,8 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
int size = 4 + ci->i_xattrs.count*(4 + 4) +
ci->i_xattrs.names_size +
ci->i_xattrs.vals_size;
- dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
- ci->i_xattrs.count, ci->i_xattrs.names_size,
- ci->i_xattrs.vals_size);
+ doutc(cl, "c=%d names.size=%d vals.size=%d\n", ci->i_xattrs.count,
+ ci->i_xattrs.names_size, ci->i_xattrs.vals_size);
if (name_size)
size += 4 + 4 + name_size + val_size;
@@ -851,19 +897,21 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
}
/*
- * If there are dirty xattrs, reencode xattrs into the prealloc_blob
+ * If there are dirty xattrs, re-encode xattrs into the prealloc_blob
* and swap into place. It returns the old i_xattrs.blob (or NULL) so
* that it can be freed by the caller as the i_ceph_lock is likely to be
* held.
*/
struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
{
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct rb_node *p;
struct ceph_inode_xattr *xattr = NULL;
struct ceph_buffer *old_blob = NULL;
void *dest;
- dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
+ doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
if (ci->i_xattrs.dirty) {
int need = __get_required_blob_size(ci, 0, 0);
@@ -921,12 +969,16 @@ static inline int __get_request_mask(struct inode *in) {
ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
size_t size)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_inode_xattr *xattr;
- struct ceph_vxattr *vxattr = NULL;
+ struct ceph_vxattr *vxattr;
int req_mask;
ssize_t err;
+ if (strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
+ goto handle_non_vxattrs;
+
/* let's see if a virtual xattr was requested */
vxattr = ceph_match_vxattr(inode, name);
if (vxattr) {
@@ -945,13 +997,20 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
err = -ERANGE;
}
return err;
+ } else {
+ err = ceph_do_getvxattr(inode, name, value, size);
+ /* this would happen with a new client and old server combo */
+ if (err == -EOPNOTSUPP)
+ err = -ENODATA;
+ return err;
}
-
+handle_non_vxattrs:
req_mask = __get_request_mask(inode);
spin_lock(&ci->i_ceph_lock);
- dout("getxattr %p name '%s' ver=%lld index_ver=%lld\n", inode, name,
- ci->i_xattrs.version, ci->i_xattrs.index_version);
+ doutc(cl, "%p %llx.%llx name '%s' ver=%lld index_ver=%lld\n", inode,
+ ceph_vinop(inode), name, ci->i_xattrs.version,
+ ci->i_xattrs.index_version);
if (ci->i_xattrs.version == 0 ||
!((req_mask & CEPH_CAP_XATTR_SHARED) ||
@@ -960,8 +1019,9 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
/* security module gets xattr while filling trace */
if (current->journal_info) {
- pr_warn_ratelimited("sync getxattr %p "
- "during filling trace\n", inode);
+ pr_warn_ratelimited_client(cl,
+ "sync %p %llx.%llx during filling trace\n",
+ inode, ceph_vinop(inode));
return -EBUSY;
}
@@ -1003,14 +1063,16 @@ out:
ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
{
struct inode *inode = d_inode(dentry);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
bool len_only = (size == 0);
u32 namelen;
int err;
spin_lock(&ci->i_ceph_lock);
- dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
- ci->i_xattrs.version, ci->i_xattrs.index_version);
+ doutc(cl, "%p %llx.%llx ver=%lld index_ver=%lld\n", inode,
+ ceph_vinop(inode), ci->i_xattrs.version,
+ ci->i_xattrs.index_version);
if (ci->i_xattrs.version == 0 ||
!__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1)) {
@@ -1044,7 +1106,8 @@ out:
static int ceph_sync_setxattr(struct inode *inode, const char *name,
const char *value, size_t size, int flags)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req;
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -1069,7 +1132,7 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
flags |= CEPH_XATTR_REMOVE;
}
- dout("setxattr value=%.*s\n", (int)size, value);
+ doutc(cl, "name %s value size %zu\n", name, size);
/* do request */
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
@@ -1098,10 +1161,10 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
req->r_num_caps = 1;
req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
- dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
+ doutc(cl, "xattr.ver (before): %lld\n", ci->i_xattrs.version);
err = ceph_mdsc_do_request(mdsc, NULL, req);
ceph_mdsc_put_request(req);
- dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
+ doutc(cl, "xattr.ver (after): %lld\n", ci->i_xattrs.version);
out:
if (pagelist)
@@ -1112,9 +1175,10 @@ out:
int __ceph_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
+ struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_vxattr *vxattr;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
struct ceph_cap_flush *prealloc_cf = NULL;
struct ceph_buffer *old_blob = NULL;
int issued;
@@ -1167,8 +1231,14 @@ int __ceph_setxattr(struct inode *inode, const char *name,
spin_lock(&ci->i_ceph_lock);
retry:
issued = __ceph_caps_issued(ci, NULL);
- if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))
+ required_blob_size = __get_required_blob_size(ci, name_len, val_len);
+ if ((ci->i_xattrs.version == 0) || !(issued & CEPH_CAP_XATTR_EXCL) ||
+ (required_blob_size > mdsc->mdsmap->m_max_xattr_size)) {
+ doutc(cl, "sync version: %llu size: %d max: %llu\n",
+ ci->i_xattrs.version, required_blob_size,
+ mdsc->mdsmap->m_max_xattr_size);
goto do_sync;
+ }
if (!lock_snap_rwsem && !ci->i_head_snapc) {
lock_snap_rwsem = true;
@@ -1180,19 +1250,18 @@ retry:
}
}
- dout("setxattr %p name '%s' issued %s\n", inode, name,
- ceph_cap_string(issued));
+ doutc(cl, "%p %llx.%llx name '%s' issued %s\n", inode,
+ ceph_vinop(inode), name, ceph_cap_string(issued));
__build_xattrs(inode);
- required_blob_size = __get_required_blob_size(ci, name_len, val_len);
-
if (!ci->i_xattrs.prealloc_blob ||
required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
struct ceph_buffer *blob;
spin_unlock(&ci->i_ceph_lock);
ceph_buffer_put(old_blob); /* Shouldn't be required */
- dout(" pre-allocating new blob size=%d\n", required_blob_size);
+ doutc(cl, " pre-allocating new blob size=%d\n",
+ required_blob_size);
blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
if (!blob)
goto do_sync_unlocked;
@@ -1211,7 +1280,7 @@ retry:
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
&prealloc_cf);
ci->i_xattrs.dirty = true;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
}
spin_unlock(&ci->i_ceph_lock);
@@ -1231,8 +1300,9 @@ do_sync_unlocked:
/* security module set xattr while filling trace */
if (current->journal_info) {
- pr_warn_ratelimited("sync setxattr %p "
- "during filling trace\n", inode);
+ pr_warn_ratelimited_client(cl,
+ "sync %p %llx.%llx during filling trace\n",
+ inode, ceph_vinop(inode));
err = -EBUSY;
} else {
err = ceph_sync_setxattr(inode, name, value, size, flags);
@@ -1264,7 +1334,7 @@ static int ceph_get_xattr_handler(const struct xattr_handler *handler,
}
static int ceph_set_xattr_handler(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -1311,8 +1381,7 @@ int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
int err;
err = security_dentry_init_security(dentry, mode, &dentry->d_name,
- &name, &as_ctx->sec_ctx,
- &as_ctx->sec_ctxlen);
+ &name, &as_ctx->lsmctx);
if (err < 0) {
WARN_ON_ONCE(err != -EOPNOTSUPP);
err = 0; /* do nothing */
@@ -1337,7 +1406,7 @@ int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
*/
name_len = strlen(name);
err = ceph_pagelist_reserve(pagelist,
- 4 * 2 + name_len + as_ctx->sec_ctxlen);
+ 4 * 2 + name_len + as_ctx->lsmctx.len);
if (err)
goto out;
@@ -1360,8 +1429,9 @@ int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
ceph_pagelist_encode_32(pagelist, name_len);
ceph_pagelist_append(pagelist, name, name_len);
- ceph_pagelist_encode_32(pagelist, as_ctx->sec_ctxlen);
- ceph_pagelist_append(pagelist, as_ctx->sec_ctx, as_ctx->sec_ctxlen);
+ ceph_pagelist_encode_32(pagelist, as_ctx->lsmctx.len);
+ ceph_pagelist_append(pagelist, as_ctx->lsmctx.context,
+ as_ctx->lsmctx.len);
err = 0;
out:
@@ -1379,7 +1449,10 @@ void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx)
posix_acl_release(as_ctx->default_acl);
#endif
#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
- security_release_secctx(as_ctx->sec_ctx, as_ctx->sec_ctxlen);
+ security_release_secctx(&as_ctx->lsmctx);
+#endif
+#ifdef CONFIG_FS_ENCRYPTION
+ kfree(as_ctx->fscrypt_auth);
#endif
if (as_ctx->pagelist)
ceph_pagelist_release(as_ctx->pagelist);
@@ -1389,11 +1462,7 @@ void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx)
* List of handlers for synthetic system.* attributes. Other
* attributes are handled directly.
*/
-const struct xattr_handler *ceph_xattr_handlers[] = {
-#ifdef CONFIG_CEPH_FS_POSIX_ACL
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
-#endif
+const struct xattr_handler * const ceph_xattr_handlers[] = {
&ceph_other_xattr_handler,
NULL,
};
diff --git a/fs/char_dev.c b/fs/char_dev.c
index ba0ded7842a7..c2ddb998f3c9 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -25,7 +25,7 @@
#include "internal.h"
-static struct kobj_map *cdev_map;
+static struct kobj_map *cdev_map __ro_after_init;
static DEFINE_MUTEX(chrdevs_lock);
@@ -150,7 +150,7 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
cd->major = major;
cd->baseminor = baseminor;
cd->minorct = minorct;
- strlcpy(cd->name, name, sizeof(cd->name));
+ strscpy(cd->name, name, sizeof(cd->name));
if (!prev) {
cd->next = curr;
@@ -350,7 +350,7 @@ static struct kobject *cdev_get(struct cdev *p)
struct module *owner = p->owner;
struct kobject *kobj;
- if (owner && !try_module_get(owner))
+ if (!try_module_get(owner))
return NULL;
kobj = kobject_get_unless_zero(&p->kobj);
if (!kobj)
@@ -483,17 +483,24 @@ int cdev_add(struct cdev *p, dev_t dev, unsigned count)
p->dev = dev;
p->count = count;
- if (WARN_ON(dev == WHITEOUT_DEV))
- return -EBUSY;
+ if (WARN_ON(dev == WHITEOUT_DEV)) {
+ error = -EBUSY;
+ goto err;
+ }
error = kobj_map(cdev_map, dev, count, NULL,
exact_match, exact_lock, p);
if (error)
- return error;
+ goto err;
kobject_get(p->kobj.parent);
return 0;
+
+err:
+ kfree_const(p->kobj.name);
+ p->kobj.name = NULL;
+ return error;
}
/**
@@ -547,7 +554,7 @@ int cdev_device_add(struct cdev *cdev, struct device *dev)
}
rc = device_add(dev);
- if (rc)
+ if (rc && dev->devt)
cdev_del(cdev);
return rc;
@@ -555,8 +562,8 @@ int cdev_device_add(struct cdev *cdev, struct device *dev)
/**
* cdev_device_del() - inverse of cdev_device_add
- * @dev: the device structure
* @cdev: the cdev structure
+ * @dev: the device structure
*
* cdev_device_del() is a helper function to call cdev_del and device_del.
* It should be used whenever cdev_device_add is used.
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
deleted file mode 100644
index 8be57aaedab6..000000000000
--- a/fs/cifs/cache.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- * CIFS filesystem cache index structure definitions
- *
- * Copyright (c) 2010 Novell, Inc.
- * Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
- *
- */
-#include "fscache.h"
-#include "cifs_debug.h"
-
-/*
- * CIFS filesystem definition for FS-Cache
- */
-struct fscache_netfs cifs_fscache_netfs = {
- .name = "cifs",
- .version = 0,
-};
-
-/*
- * Register CIFS for caching with FS-Cache
- */
-int cifs_fscache_register(void)
-{
- return fscache_register_netfs(&cifs_fscache_netfs);
-}
-
-/*
- * Unregister CIFS for caching
- */
-void cifs_fscache_unregister(void)
-{
- fscache_unregister_netfs(&cifs_fscache_netfs);
-}
-
-/*
- * Server object for FS-Cache
- */
-const struct fscache_cookie_def cifs_fscache_server_index_def = {
- .name = "CIFS.server",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-static enum
-fscache_checkaux cifs_fscache_super_check_aux(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size)
-{
- struct cifs_fscache_super_auxdata auxdata;
- const struct cifs_tcon *tcon = cookie_netfs_data;
-
- if (datalen != sizeof(auxdata))
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.resource_id = tcon->resource_id;
- auxdata.vol_create_time = tcon->vol_create_time;
- auxdata.vol_serial_number = tcon->vol_serial_number;
-
- if (memcmp(data, &auxdata, datalen) != 0)
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- return FSCACHE_CHECKAUX_OKAY;
-}
-
-/*
- * Superblock object for FS-Cache
- */
-const struct fscache_cookie_def cifs_fscache_super_index_def = {
- .name = "CIFS.super",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
- .check_aux = cifs_fscache_super_check_aux,
-};
-
-static enum
-fscache_checkaux cifs_fscache_inode_check_aux(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size)
-{
- struct cifs_fscache_inode_auxdata auxdata;
- struct cifsInodeInfo *cifsi = cookie_netfs_data;
-
- if (datalen != sizeof(auxdata))
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
- auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
- auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
- auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
-
- if (memcmp(data, &auxdata, datalen) != 0)
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- return FSCACHE_CHECKAUX_OKAY;
-}
-
-const struct fscache_cookie_def cifs_fscache_inode_object_def = {
- .name = "CIFS.uniqueid",
- .type = FSCACHE_COOKIE_TYPE_DATAFILE,
- .check_aux = cifs_fscache_inode_check_aux,
-};
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
deleted file mode 100644
index 007427ba75e5..000000000000
--- a/fs/cifs/cifs_dfs_ref.c
+++ /dev/null
@@ -1,429 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Contains the CIFS DFS referral mounting routines used for handling
- * traversal via DFS junction point
- *
- * Copyright (c) 2007 Igor Mammedov
- * Copyright (C) International Business Machines Corp., 2008
- * Author(s): Igor Mammedov (niallain@gmail.com)
- * Steve French (sfrench@us.ibm.com)
- */
-
-#include <linux/dcache.h>
-#include <linux/mount.h>
-#include <linux/namei.h>
-#include <linux/slab.h>
-#include <linux/vfs.h>
-#include <linux/fs.h>
-#include <linux/inet.h>
-#include "cifsglob.h"
-#include "cifsproto.h"
-#include "cifsfs.h"
-#include "dns_resolve.h"
-#include "cifs_debug.h"
-#include "cifs_unicode.h"
-#include "dfs_cache.h"
-#include "fs_context.h"
-
-static LIST_HEAD(cifs_dfs_automount_list);
-
-static void cifs_dfs_expire_automounts(struct work_struct *work);
-static DECLARE_DELAYED_WORK(cifs_dfs_automount_task,
- cifs_dfs_expire_automounts);
-static int cifs_dfs_mountpoint_expiry_timeout = 500 * HZ;
-
-static void cifs_dfs_expire_automounts(struct work_struct *work)
-{
- struct list_head *list = &cifs_dfs_automount_list;
-
- mark_mounts_for_expiry(list);
- if (!list_empty(list))
- schedule_delayed_work(&cifs_dfs_automount_task,
- cifs_dfs_mountpoint_expiry_timeout);
-}
-
-void cifs_dfs_release_automount_timer(void)
-{
- BUG_ON(!list_empty(&cifs_dfs_automount_list));
- cancel_delayed_work_sync(&cifs_dfs_automount_task);
-}
-
-/**
- * cifs_build_devname - build a devicename from a UNC and optional prepath
- * @nodename: pointer to UNC string
- * @prepath: pointer to prefixpath (or NULL if there isn't one)
- *
- * Build a new cifs devicename after chasing a DFS referral. Allocate a buffer
- * big enough to hold the final thing. Copy the UNC from the nodename, and
- * concatenate the prepath onto the end of it if there is one.
- *
- * Returns pointer to the built string, or a ERR_PTR. Caller is responsible
- * for freeing the returned string.
- */
-static char *
-cifs_build_devname(char *nodename, const char *prepath)
-{
- size_t pplen;
- size_t unclen;
- char *dev;
- char *pos;
-
- /* skip over any preceding delimiters */
- nodename += strspn(nodename, "\\");
- if (!*nodename)
- return ERR_PTR(-EINVAL);
-
- /* get length of UNC and set pos to last char */
- unclen = strlen(nodename);
- pos = nodename + unclen - 1;
-
- /* trim off any trailing delimiters */
- while (*pos == '\\') {
- --pos;
- --unclen;
- }
-
- /* allocate a buffer:
- * +2 for preceding "//"
- * +1 for delimiter between UNC and prepath
- * +1 for trailing NULL
- */
- pplen = prepath ? strlen(prepath) : 0;
- dev = kmalloc(2 + unclen + 1 + pplen + 1, GFP_KERNEL);
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- pos = dev;
- /* add the initial "//" */
- *pos = '/';
- ++pos;
- *pos = '/';
- ++pos;
-
- /* copy in the UNC portion from referral */
- memcpy(pos, nodename, unclen);
- pos += unclen;
-
- /* copy the prefixpath remainder (if there is one) */
- if (pplen) {
- *pos = '/';
- ++pos;
- memcpy(pos, prepath, pplen);
- pos += pplen;
- }
-
- /* NULL terminator */
- *pos = '\0';
-
- convert_delimiter(dev, '/');
- return dev;
-}
-
-
-/**
- * cifs_compose_mount_options - creates mount options for referral
- * @sb_mountdata: parent/root DFS mount options (template)
- * @fullpath: full path in UNC format
- * @ref: optional server's referral
- * @devname: return the built cifs device name if passed pointer not NULL
- * creates mount options for submount based on template options sb_mountdata
- * and replacing unc,ip,prefixpath options with ones we've got form ref_unc.
- *
- * Returns: pointer to new mount options or ERR_PTR.
- * Caller is responsible for freeing returned value if it is not error.
- */
-char *cifs_compose_mount_options(const char *sb_mountdata,
- const char *fullpath,
- const struct dfs_info3_param *ref,
- char **devname)
-{
- int rc;
- char *name;
- char *mountdata = NULL;
- const char *prepath = NULL;
- int md_len;
- char *tkn_e;
- char *srvIP = NULL;
- char sep = ',';
- int off, noff;
-
- if (sb_mountdata == NULL)
- return ERR_PTR(-EINVAL);
-
- if (ref) {
- if (WARN_ON_ONCE(!ref->node_name || ref->path_consumed < 0))
- return ERR_PTR(-EINVAL);
-
- if (strlen(fullpath) - ref->path_consumed) {
- prepath = fullpath + ref->path_consumed;
- /* skip initial delimiter */
- if (*prepath == '/' || *prepath == '\\')
- prepath++;
- }
-
- name = cifs_build_devname(ref->node_name, prepath);
- if (IS_ERR(name)) {
- rc = PTR_ERR(name);
- name = NULL;
- goto compose_mount_options_err;
- }
- } else {
- name = cifs_build_devname((char *)fullpath, NULL);
- if (IS_ERR(name)) {
- rc = PTR_ERR(name);
- name = NULL;
- goto compose_mount_options_err;
- }
- }
-
- rc = dns_resolve_server_name_to_ip(name, &srvIP, NULL);
- if (rc < 0) {
- cifs_dbg(FYI, "%s: Failed to resolve server part of %s to IP: %d\n",
- __func__, name, rc);
- goto compose_mount_options_err;
- }
-
- /*
- * In most cases, we'll be building a shorter string than the original,
- * but we do have to assume that the address in the ip= option may be
- * much longer than the original. Add the max length of an address
- * string to the length of the original string to allow for worst case.
- */
- md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN;
- mountdata = kzalloc(md_len + sizeof("ip=") + 1, GFP_KERNEL);
- if (mountdata == NULL) {
- rc = -ENOMEM;
- goto compose_mount_options_err;
- }
-
- /* copy all options except of unc,ip,prefixpath */
- off = 0;
- if (strncmp(sb_mountdata, "sep=", 4) == 0) {
- sep = sb_mountdata[4];
- strncpy(mountdata, sb_mountdata, 5);
- off += 5;
- }
-
- do {
- tkn_e = strchr(sb_mountdata + off, sep);
- if (tkn_e == NULL)
- noff = strlen(sb_mountdata + off);
- else
- noff = tkn_e - (sb_mountdata + off) + 1;
-
- if (strncasecmp(sb_mountdata + off, "cruid=", 6) == 0) {
- off += noff;
- continue;
- }
- if (strncasecmp(sb_mountdata + off, "unc=", 4) == 0) {
- off += noff;
- continue;
- }
- if (strncasecmp(sb_mountdata + off, "ip=", 3) == 0) {
- off += noff;
- continue;
- }
- if (strncasecmp(sb_mountdata + off, "prefixpath=", 11) == 0) {
- off += noff;
- continue;
- }
- strncat(mountdata, sb_mountdata + off, noff);
- off += noff;
- } while (tkn_e);
- strcat(mountdata, sb_mountdata + off);
- mountdata[md_len] = '\0';
-
- /* copy new IP and ref share name */
- if (mountdata[strlen(mountdata) - 1] != sep)
- strncat(mountdata, &sep, 1);
- strcat(mountdata, "ip=");
- strcat(mountdata, srvIP);
-
- if (devname)
- *devname = name;
- else
- kfree(name);
-
- /*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
- /*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
-
-compose_mount_options_out:
- kfree(srvIP);
- return mountdata;
-
-compose_mount_options_err:
- kfree(mountdata);
- mountdata = ERR_PTR(rc);
- kfree(name);
- goto compose_mount_options_out;
-}
-
-/**
- * cifs_dfs_do_mount - mounts specified path using DFS full path
- *
- * Always pass down @fullpath to smb3_do_mount() so we can use the root server
- * to perform failover in case we failed to connect to the first target in the
- * referral.
- *
- * @mntpt: directory entry for the path we are trying to automount
- * @cifs_sb: parent/root superblock
- * @fullpath: full path in UNC format
- */
-static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt,
- struct cifs_sb_info *cifs_sb,
- const char *fullpath)
-{
- struct vfsmount *mnt;
- char *mountdata;
- char *devname;
-
- devname = kstrdup(fullpath, GFP_KERNEL);
- if (!devname)
- return ERR_PTR(-ENOMEM);
-
- convert_delimiter(devname, '/');
-
- /* TODO: change to call fs_context_for_mount(), fill in context directly, call fc_mount */
-
- /* See afs_mntpt_do_automount in fs/afs/mntpt.c for an example */
-
- /* strip first '\' from fullpath */
- mountdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
- fullpath + 1, NULL, NULL);
- if (IS_ERR(mountdata)) {
- kfree(devname);
- return (struct vfsmount *)mountdata;
- }
-
- mnt = vfs_submount(mntpt, &cifs_fs_type, devname, mountdata);
- kfree(mountdata);
- kfree(devname);
- return mnt;
-}
-
-/*
- * Create a vfsmount that we can automount
- */
-static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
-{
- struct cifs_sb_info *cifs_sb;
- struct cifs_ses *ses;
- struct cifs_tcon *tcon;
- void *page;
- char *full_path, *root_path;
- unsigned int xid;
- int rc;
- struct vfsmount *mnt;
-
- cifs_dbg(FYI, "in %s\n", __func__);
- BUG_ON(IS_ROOT(mntpt));
-
- /*
- * The MSDFS spec states that paths in DFS referral requests and
- * responses must be prefixed by a single '\' character instead of
- * the double backslashes usually used in the UNC. This function
- * gives us the latter, so we must adjust the result.
- */
- mnt = ERR_PTR(-ENOMEM);
-
- cifs_sb = CIFS_SB(mntpt->d_sb);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) {
- mnt = ERR_PTR(-EREMOTE);
- goto cdda_exit;
- }
-
- page = alloc_dentry_path();
- /* always use tree name prefix */
- full_path = build_path_from_dentry_optional_prefix(mntpt, page, true);
- if (IS_ERR(full_path)) {
- mnt = ERR_CAST(full_path);
- goto free_full_path;
- }
-
- convert_delimiter(full_path, '\\');
-
- cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
-
- if (!cifs_sb_master_tlink(cifs_sb)) {
- cifs_dbg(FYI, "%s: master tlink is NULL\n", __func__);
- goto free_full_path;
- }
-
- tcon = cifs_sb_master_tcon(cifs_sb);
- if (!tcon) {
- cifs_dbg(FYI, "%s: master tcon is NULL\n", __func__);
- goto free_full_path;
- }
-
- root_path = kstrdup(tcon->treeName, GFP_KERNEL);
- if (!root_path) {
- mnt = ERR_PTR(-ENOMEM);
- goto free_full_path;
- }
- cifs_dbg(FYI, "%s: root path: %s\n", __func__, root_path);
-
- ses = tcon->ses;
- xid = get_xid();
-
- /*
- * If DFS root has been expired, then unconditionally fetch it again to
- * refresh DFS referral cache.
- */
- rc = dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
- root_path + 1, NULL, NULL);
- if (!rc) {
- rc = dfs_cache_find(xid, ses, cifs_sb->local_nls,
- cifs_remap(cifs_sb), full_path + 1,
- NULL, NULL);
- }
-
- free_xid(xid);
-
- if (rc) {
- mnt = ERR_PTR(rc);
- goto free_root_path;
- }
- /*
- * OK - we were able to get and cache a referral for @full_path.
- *
- * Now, pass it down to cifs_mount() and it will retry every available
- * node server in case of failures - no need to do it here.
- */
- mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path);
- cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__,
- full_path + 1, mnt);
-
-free_root_path:
- kfree(root_path);
-free_full_path:
- free_dentry_path(page);
-cdda_exit:
- cifs_dbg(FYI, "leaving %s\n" , __func__);
- return mnt;
-}
-
-/*
- * Attempt to automount the referral
- */
-struct vfsmount *cifs_dfs_d_automount(struct path *path)
-{
- struct vfsmount *newmnt;
-
- cifs_dbg(FYI, "in %s\n", __func__);
-
- newmnt = cifs_dfs_do_automount(path->dentry);
- if (IS_ERR(newmnt)) {
- cifs_dbg(FYI, "leaving %s [automount failed]\n" , __func__);
- return newmnt;
- }
-
- mntget(newmnt); /* prevent immediate expiration */
- mnt_set_expiry(newmnt, &cifs_dfs_automount_list);
- schedule_delayed_work(&cifs_dfs_automount_task,
- cifs_dfs_mountpoint_expiry_timeout);
- cifs_dbg(FYI, "leaving %s [ok]\n" , __func__);
- return newmnt;
-}
-
-const struct inode_operations cifs_dfs_referral_inode_operations = {
-};
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
deleted file mode 100644
index 80b3d845419f..000000000000
--- a/fs/cifs/cifs_unicode.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * cifs_unicode: Unicode kernel case support
- *
- * Function:
- * Convert a unicode character to upper or lower case using
- * compressed tables.
- *
- * Copyright (c) International Business Machines Corp., 2000,2009
- *
- * Notes:
- * These APIs are based on the C library functions. The semantics
- * should match the C functions but with expanded size operands.
- *
- * The upper/lower functions are based on a table created by mkupr.
- * This is a compressed table of upper and lower case conversion.
- */
-#ifndef _CIFS_UNICODE_H
-#define _CIFS_UNICODE_H
-
-#include <asm/byteorder.h>
-#include <linux/types.h>
-#include <linux/nls.h>
-
-#define UNIUPR_NOLOWER /* Example to not expand lower case tables */
-
-/*
- * Windows maps these to the user defined 16 bit Unicode range since they are
- * reserved symbols (along with \ and /), otherwise illegal to store
- * in filenames in NTFS
- */
-#define UNI_ASTERISK (__u16) ('*' + 0xF000)
-#define UNI_QUESTION (__u16) ('?' + 0xF000)
-#define UNI_COLON (__u16) (':' + 0xF000)
-#define UNI_GRTRTHAN (__u16) ('>' + 0xF000)
-#define UNI_LESSTHAN (__u16) ('<' + 0xF000)
-#define UNI_PIPE (__u16) ('|' + 0xF000)
-#define UNI_SLASH (__u16) ('\\' + 0xF000)
-
-/*
- * Macs use an older "SFM" mapping of the symbols above. Fortunately it does
- * not conflict (although almost does) with the mapping above.
- */
-
-#define SFM_DOUBLEQUOTE ((__u16) 0xF020)
-#define SFM_ASTERISK ((__u16) 0xF021)
-#define SFM_QUESTION ((__u16) 0xF025)
-#define SFM_COLON ((__u16) 0xF022)
-#define SFM_GRTRTHAN ((__u16) 0xF024)
-#define SFM_LESSTHAN ((__u16) 0xF023)
-#define SFM_PIPE ((__u16) 0xF027)
-#define SFM_SLASH ((__u16) 0xF026)
-#define SFM_SPACE ((__u16) 0xF028)
-#define SFM_PERIOD ((__u16) 0xF029)
-
-/*
- * Mapping mechanism to use when one of the seven reserved characters is
- * encountered. We can only map using one of the mechanisms at a time
- * since otherwise readdir could return directory entries which we would
- * not be able to open
- *
- * NO_MAP_UNI_RSVD = do not perform any remapping of the character
- * SFM_MAP_UNI_RSVD = map reserved characters using SFM scheme (MAC compatible)
- * SFU_MAP_UNI_RSVD = map reserved characters ala SFU ("mapchars" option)
- *
- */
-#define NO_MAP_UNI_RSVD 0
-#define SFM_MAP_UNI_RSVD 1
-#define SFU_MAP_UNI_RSVD 2
-
-/* Just define what we want from uniupr.h. We don't want to define the tables
- * in each source file.
- */
-#ifndef UNICASERANGE_DEFINED
-struct UniCaseRange {
- wchar_t start;
- wchar_t end;
- signed char *table;
-};
-#endif /* UNICASERANGE_DEFINED */
-
-#ifndef UNIUPR_NOUPPER
-extern signed char CifsUniUpperTable[512];
-extern const struct UniCaseRange CifsUniUpperRange[];
-#endif /* UNIUPR_NOUPPER */
-
-#ifndef UNIUPR_NOLOWER
-extern signed char CifsUniLowerTable[512];
-extern const struct UniCaseRange CifsUniLowerRange[];
-#endif /* UNIUPR_NOLOWER */
-
-#ifdef __KERNEL__
-int cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
- const struct nls_table *cp, int map_type);
-int cifs_utf16_bytes(const __le16 *from, int maxbytes,
- const struct nls_table *codepage);
-int cifs_strtoUTF16(__le16 *, const char *, int, const struct nls_table *);
-char *cifs_strndup_from_utf16(const char *src, const int maxlen,
- const bool is_unicode,
- const struct nls_table *codepage);
-extern int cifsConvertToUTF16(__le16 *target, const char *source, int maxlen,
- const struct nls_table *cp, int mapChars);
-extern int cifs_remap(struct cifs_sb_info *cifs_sb);
-extern __le16 *cifs_strndup_to_utf16(const char *src, const int maxlen,
- int *utf16_len, const struct nls_table *cp,
- int remap);
-#endif
-
-wchar_t cifs_toupper(wchar_t in);
-
-/*
- * UniStrcat: Concatenate the second string to the first
- *
- * Returns:
- * Address of the first string
- */
-static inline __le16 *
-UniStrcat(__le16 *ucs1, const __le16 *ucs2)
-{
- __le16 *anchor = ucs1; /* save a pointer to start of ucs1 */
-
- while (*ucs1++) ; /* To end of first string */
- ucs1--; /* Return to the null */
- while ((*ucs1++ = *ucs2++)) ; /* copy string 2 over */
- return anchor;
-}
-
-/*
- * UniStrchr: Find a character in a string
- *
- * Returns:
- * Address of first occurrence of character in string
- * or NULL if the character is not in the string
- */
-static inline wchar_t *
-UniStrchr(const wchar_t *ucs, wchar_t uc)
-{
- while ((*ucs != uc) && *ucs)
- ucs++;
-
- if (*ucs == uc)
- return (wchar_t *) ucs;
- return NULL;
-}
-
-/*
- * UniStrcmp: Compare two strings
- *
- * Returns:
- * < 0: First string is less than second
- * = 0: Strings are equal
- * > 0: First string is greater than second
- */
-static inline int
-UniStrcmp(const wchar_t *ucs1, const wchar_t *ucs2)
-{
- while ((*ucs1 == *ucs2) && *ucs1) {
- ucs1++;
- ucs2++;
- }
- return (int) *ucs1 - (int) *ucs2;
-}
-
-/*
- * UniStrcpy: Copy a string
- */
-static inline wchar_t *
-UniStrcpy(wchar_t *ucs1, const wchar_t *ucs2)
-{
- wchar_t *anchor = ucs1; /* save the start of result string */
-
- while ((*ucs1++ = *ucs2++)) ;
- return anchor;
-}
-
-/*
- * UniStrlen: Return the length of a string (in 16 bit Unicode chars not bytes)
- */
-static inline size_t
-UniStrlen(const wchar_t *ucs1)
-{
- int i = 0;
-
- while (*ucs1++)
- i++;
- return i;
-}
-
-/*
- * UniStrnlen: Return the length (in 16 bit Unicode chars not bytes) of a
- * string (length limited)
- */
-static inline size_t
-UniStrnlen(const wchar_t *ucs1, int maxlen)
-{
- int i = 0;
-
- while (*ucs1++) {
- i++;
- if (i >= maxlen)
- break;
- }
- return i;
-}
-
-/*
- * UniStrncat: Concatenate length limited string
- */
-static inline wchar_t *
-UniStrncat(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
-{
- wchar_t *anchor = ucs1; /* save pointer to string 1 */
-
- while (*ucs1++) ;
- ucs1--; /* point to null terminator of s1 */
- while (n-- && (*ucs1 = *ucs2)) { /* copy s2 after s1 */
- ucs1++;
- ucs2++;
- }
- *ucs1 = 0; /* Null terminate the result */
- return (anchor);
-}
-
-/*
- * UniStrncmp: Compare length limited string
- */
-static inline int
-UniStrncmp(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
-{
- if (!n)
- return 0; /* Null strings are equal */
- while ((*ucs1 == *ucs2) && *ucs1 && --n) {
- ucs1++;
- ucs2++;
- }
- return (int) *ucs1 - (int) *ucs2;
-}
-
-/*
- * UniStrncmp_le: Compare length limited string - native to little-endian
- */
-static inline int
-UniStrncmp_le(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
-{
- if (!n)
- return 0; /* Null strings are equal */
- while ((*ucs1 == __le16_to_cpu(*ucs2)) && *ucs1 && --n) {
- ucs1++;
- ucs2++;
- }
- return (int) *ucs1 - (int) __le16_to_cpu(*ucs2);
-}
-
-/*
- * UniStrncpy: Copy length limited string with pad
- */
-static inline wchar_t *
-UniStrncpy(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
-{
- wchar_t *anchor = ucs1;
-
- while (n-- && *ucs2) /* Copy the strings */
- *ucs1++ = *ucs2++;
-
- n++;
- while (n--) /* Pad with nulls */
- *ucs1++ = 0;
- return anchor;
-}
-
-/*
- * UniStrncpy_le: Copy length limited string with pad to little-endian
- */
-static inline wchar_t *
-UniStrncpy_le(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
-{
- wchar_t *anchor = ucs1;
-
- while (n-- && *ucs2) /* Copy the strings */
- *ucs1++ = __le16_to_cpu(*ucs2++);
-
- n++;
- while (n--) /* Pad with nulls */
- *ucs1++ = 0;
- return anchor;
-}
-
-/*
- * UniStrstr: Find a string in a string
- *
- * Returns:
- * Address of first match found
- * NULL if no matching string is found
- */
-static inline wchar_t *
-UniStrstr(const wchar_t *ucs1, const wchar_t *ucs2)
-{
- const wchar_t *anchor1 = ucs1;
- const wchar_t *anchor2 = ucs2;
-
- while (*ucs1) {
- if (*ucs1 == *ucs2) {
- /* Partial match found */
- ucs1++;
- ucs2++;
- } else {
- if (!*ucs2) /* Match found */
- return (wchar_t *) anchor1;
- ucs1 = ++anchor1; /* No match */
- ucs2 = anchor2;
- }
- }
-
- if (!*ucs2) /* Both end together */
- return (wchar_t *) anchor1; /* Match found */
- return NULL; /* No match */
-}
-
-#ifndef UNIUPR_NOUPPER
-/*
- * UniToupper: Convert a unicode character to upper case
- */
-static inline wchar_t
-UniToupper(register wchar_t uc)
-{
- register const struct UniCaseRange *rp;
-
- if (uc < sizeof(CifsUniUpperTable)) {
- /* Latin characters */
- return uc + CifsUniUpperTable[uc]; /* Use base tables */
- } else {
- rp = CifsUniUpperRange; /* Use range tables */
- while (rp->start) {
- if (uc < rp->start) /* Before start of range */
- return uc; /* Uppercase = input */
- if (uc <= rp->end) /* In range */
- return uc + rp->table[uc - rp->start];
- rp++; /* Try next range */
- }
- }
- return uc; /* Past last range */
-}
-
-/*
- * UniStrupr: Upper case a unicode string
- */
-static inline __le16 *
-UniStrupr(register __le16 *upin)
-{
- register __le16 *up;
-
- up = upin;
- while (*up) { /* For all characters */
- *up = cpu_to_le16(UniToupper(le16_to_cpu(*up)));
- up++;
- }
- return upin; /* Return input pointer */
-}
-#endif /* UNIUPR_NOUPPER */
-
-#ifndef UNIUPR_NOLOWER
-/*
- * UniTolower: Convert a unicode character to lower case
- */
-static inline wchar_t
-UniTolower(register wchar_t uc)
-{
- register const struct UniCaseRange *rp;
-
- if (uc < sizeof(CifsUniLowerTable)) {
- /* Latin characters */
- return uc + CifsUniLowerTable[uc]; /* Use base tables */
- } else {
- rp = CifsUniLowerRange; /* Use range tables */
- while (rp->start) {
- if (uc < rp->start) /* Before start of range */
- return uc; /* Uppercase = input */
- if (uc <= rp->end) /* In range */
- return uc + rp->table[uc - rp->start];
- rp++; /* Try next range */
- }
- }
- return uc; /* Past last range */
-}
-
-/*
- * UniStrlwr: Lower case a unicode string
- */
-static inline wchar_t *
-UniStrlwr(register wchar_t *upin)
-{
- register wchar_t *up;
-
- up = upin;
- while (*up) { /* For all characters */
- *up = UniTolower(*up);
- up++;
- }
- return upin; /* Return input pointer */
-}
-
-#endif
-
-#endif /* _CIFS_UNICODE_H */
diff --git a/fs/cifs/cifs_uniupr.h b/fs/cifs/cifs_uniupr.h
deleted file mode 100644
index 7b272fcdf0d3..000000000000
--- a/fs/cifs/cifs_uniupr.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) International Business Machines Corp., 2000,2002
- *
- * uniupr.h - Unicode compressed case ranges
-*/
-
-#ifndef UNIUPR_NOUPPER
-/*
- * Latin upper case
- */
-signed char CifsUniUpperTable[512] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 000-00f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 010-01f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 020-02f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 030-03f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 040-04f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 050-05f */
- 0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, /* 060-06f */
- -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, 0, 0, 0, 0, 0, /* 070-07f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 080-08f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 090-09f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0a0-0af */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0b0-0bf */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0c0-0cf */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0d0-0df */
- -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, /* 0e0-0ef */
- -32, -32, -32, -32, -32, -32, -32, 0, -32, -32, -32, -32, -32, -32, -32, 121, /* 0f0-0ff */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 100-10f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 110-11f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 120-12f */
- 0, 0, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0, /* 130-13f */
- -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, /* 140-14f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 150-15f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 160-16f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0, /* 170-17f */
- 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, /* 180-18f */
- 0, 0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, /* 190-19f */
- 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, /* 1a0-1af */
- -1, 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, /* 1b0-1bf */
- 0, 0, 0, 0, 0, -1, -2, 0, -1, -2, 0, -1, -2, 0, -1, 0, /* 1c0-1cf */
- -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, -79, 0, -1, /* 1d0-1df */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e0-1ef */
- 0, 0, -1, -2, 0, -1, 0, 0, 0, -1, 0, -1, 0, -1, 0, -1, /* 1f0-1ff */
-};
-
-/* Upper case range - Greek */
-static signed char UniCaseRangeU03a0[47] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -38, -37, -37, -37, /* 3a0-3af */
- 0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, /* 3b0-3bf */
- -32, -32, -31, -32, -32, -32, -32, -32, -32, -32, -32, -32, -64,
- -63, -63,
-};
-
-/* Upper case range - Cyrillic */
-static signed char UniCaseRangeU0430[48] = {
- -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, /* 430-43f */
- -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, /* 440-44f */
- 0, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80, 0, -80, -80, /* 450-45f */
-};
-
-/* Upper case range - Extended cyrillic */
-static signed char UniCaseRangeU0490[61] = {
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 490-49f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 4a0-4af */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 4b0-4bf */
- 0, 0, -1, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1,
-};
-
-/* Upper case range - Extended latin and greek */
-static signed char UniCaseRangeU1e00[509] = {
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e00-1e0f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e10-1e1f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e20-1e2f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e30-1e3f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e40-1e4f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e50-1e5f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e60-1e6f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e70-1e7f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e80-1e8f */
- 0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, -59, 0, -1, 0, -1, /* 1e90-1e9f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ea0-1eaf */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1eb0-1ebf */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ec0-1ecf */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ed0-1edf */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ee0-1eef */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0, /* 1ef0-1eff */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f00-1f0f */
- 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f10-1f1f */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f20-1f2f */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f30-1f3f */
- 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f40-1f4f */
- 0, 8, 0, 8, 0, 8, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f50-1f5f */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f60-1f6f */
- 74, 74, 86, 86, 86, 86, 100, 100, 0, 0, 112, 112, 126, 126, 0, 0, /* 1f70-1f7f */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f80-1f8f */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f90-1f9f */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fa0-1faf */
- 8, 8, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fb0-1fbf */
- 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fc0-1fcf */
- 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fd0-1fdf */
- 8, 8, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fe0-1fef */
- 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-};
-
-/* Upper case range - Wide latin */
-static signed char UniCaseRangeUff40[27] = {
- 0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, /* ff40-ff4f */
- -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
-};
-
-/*
- * Upper Case Range
- */
-const struct UniCaseRange CifsUniUpperRange[] = {
- {0x03a0, 0x03ce, UniCaseRangeU03a0},
- {0x0430, 0x045f, UniCaseRangeU0430},
- {0x0490, 0x04cc, UniCaseRangeU0490},
- {0x1e00, 0x1ffc, UniCaseRangeU1e00},
- {0xff40, 0xff5a, UniCaseRangeUff40},
- {0}
-};
-#endif
-
-#ifndef UNIUPR_NOLOWER
-/*
- * Latin lower case
- */
-signed char CifsUniLowerTable[512] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 000-00f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 010-01f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 020-02f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 030-03f */
- 0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, /* 040-04f */
- 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 0, 0, 0, 0, 0, /* 050-05f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 060-06f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 070-07f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 080-08f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 090-09f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0a0-0af */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0b0-0bf */
- 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, /* 0c0-0cf */
- 32, 32, 32, 32, 32, 32, 32, 0, 32, 32, 32, 32, 32, 32, 32, 0, /* 0d0-0df */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0e0-0ef */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0f0-0ff */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 100-10f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 110-11f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 120-12f */
- 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, /* 130-13f */
- 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, /* 140-14f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 150-15f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 160-16f */
- 1, 0, 1, 0, 1, 0, 1, 0, -121, 1, 0, 1, 0, 1, 0, 0, /* 170-17f */
- 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 79, 0, /* 180-18f */
- 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, /* 190-19f */
- 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, /* 1a0-1af */
- 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, /* 1b0-1bf */
- 0, 0, 0, 0, 2, 1, 0, 2, 1, 0, 2, 1, 0, 1, 0, 1, /* 1c0-1cf */
- 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, /* 1d0-1df */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e0-1ef */
- 0, 2, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1f0-1ff */
-};
-
-/* Lower case range - Greek */
-static signed char UniCaseRangeL0380[44] = {
- 0, 0, 0, 0, 0, 0, 38, 0, 37, 37, 37, 0, 64, 0, 63, 63, /* 380-38f */
- 0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, /* 390-39f */
- 32, 32, 0, 32, 32, 32, 32, 32, 32, 32, 32, 32,
-};
-
-/* Lower case range - Cyrillic */
-static signed char UniCaseRangeL0400[48] = {
- 0, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 0, 80, 80, /* 400-40f */
- 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, /* 410-41f */
- 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, /* 420-42f */
-};
-
-/* Lower case range - Extended cyrillic */
-static signed char UniCaseRangeL0490[60] = {
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 490-49f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 4a0-4af */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 4b0-4bf */
- 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
-};
-
-/* Lower case range - Extended latin and greek */
-static signed char UniCaseRangeL1e00[504] = {
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e00-1e0f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e10-1e1f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e20-1e2f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e30-1e3f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e40-1e4f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e50-1e5f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e60-1e6f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e70-1e7f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1e80-1e8f */
- 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, /* 1e90-1e9f */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1ea0-1eaf */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1eb0-1ebf */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1ec0-1ecf */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1ed0-1edf */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, /* 1ee0-1eef */
- 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, /* 1ef0-1eff */
- 0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1f00-1f0f */
- 0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0, /* 1f10-1f1f */
- 0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1f20-1f2f */
- 0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1f30-1f3f */
- 0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0, /* 1f40-1f4f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, -8, 0, -8, 0, -8, 0, -8, /* 1f50-1f5f */
- 0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1f60-1f6f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f70-1f7f */
- 0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1f80-1f8f */
- 0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1f90-1f9f */
- 0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8, /* 1fa0-1faf */
- 0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -74, -74, -9, 0, 0, 0, /* 1fb0-1fbf */
- 0, 0, 0, 0, 0, 0, 0, 0, -86, -86, -86, -86, -9, 0, 0, 0, /* 1fc0-1fcf */
- 0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -100, -100, 0, 0, 0, 0, /* 1fd0-1fdf */
- 0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -112, -112, -7, 0, 0, 0, /* 1fe0-1fef */
- 0, 0, 0, 0, 0, 0, 0, 0,
-};
-
-/* Lower case range - Wide latin */
-static signed char UniCaseRangeLff20[27] = {
- 0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, /* ff20-ff2f */
- 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
-};
-
-/*
- * Lower Case Range
- */
-const struct UniCaseRange CifsUniLowerRange[] = {
- {0x0380, 0x03ab, UniCaseRangeL0380},
- {0x0400, 0x042f, UniCaseRangeL0400},
- {0x0490, 0x04cb, UniCaseRangeL0490},
- {0x1e00, 0x1ff7, UniCaseRangeL1e00},
- {0xff20, 0xff3a, UniCaseRangeLff20},
- {0}
-};
-#endif
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
deleted file mode 100644
index d118282071b3..000000000000
--- a/fs/cifs/cifsencrypt.c
+++ /dev/null
@@ -1,763 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- *
- * Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP
- * for more detailed information
- *
- * Copyright (C) International Business Machines Corp., 2005,2013
- * Author(s): Steve French (sfrench@us.ibm.com)
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include "cifspdu.h"
-#include "cifsglob.h"
-#include "cifs_debug.h"
-#include "cifs_unicode.h"
-#include "cifsproto.h"
-#include "ntlmssp.h"
-#include <linux/ctype.h>
-#include <linux/random.h>
-#include <linux/highmem.h>
-#include <linux/fips.h>
-#include "../smbfs_common/arc4.h"
-#include <crypto/aead.h>
-
-int __cifs_calc_signature(struct smb_rqst *rqst,
- struct TCP_Server_Info *server, char *signature,
- struct shash_desc *shash)
-{
- int i;
- int rc;
- struct kvec *iov = rqst->rq_iov;
- int n_vec = rqst->rq_nvec;
- int is_smb2 = server->vals->header_preamble_size == 0;
-
- /* iov[0] is actual data and not the rfc1002 length for SMB2+ */
- if (is_smb2) {
- if (iov[0].iov_len <= 4)
- return -EIO;
- i = 0;
- } else {
- if (n_vec < 2 || iov[0].iov_len != 4)
- return -EIO;
- i = 1; /* skip rfc1002 length */
- }
-
- for (; i < n_vec; i++) {
- if (iov[i].iov_len == 0)
- continue;
- if (iov[i].iov_base == NULL) {
- cifs_dbg(VFS, "null iovec entry\n");
- return -EIO;
- }
-
- rc = crypto_shash_update(shash,
- iov[i].iov_base, iov[i].iov_len);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with payload\n",
- __func__);
- return rc;
- }
- }
-
- /* now hash over the rq_pages array */
- for (i = 0; i < rqst->rq_npages; i++) {
- void *kaddr;
- unsigned int len, offset;
-
- rqst_page_get_length(rqst, i, &len, &offset);
-
- kaddr = (char *) kmap(rqst->rq_pages[i]) + offset;
-
- rc = crypto_shash_update(shash, kaddr, len);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with payload\n",
- __func__);
- kunmap(rqst->rq_pages[i]);
- return rc;
- }
-
- kunmap(rqst->rq_pages[i]);
- }
-
- rc = crypto_shash_final(shash, signature);
- if (rc)
- cifs_dbg(VFS, "%s: Could not generate hash\n", __func__);
-
- return rc;
-}
-
-/*
- * Calculate and return the CIFS signature based on the mac key and SMB PDU.
- * The 16 byte signature must be allocated by the caller. Note we only use the
- * 1st eight bytes and that the smb header signature field on input contains
- * the sequence number before this function is called. Also, this function
- * should be called with the server->srv_mutex held.
- */
-static int cifs_calc_signature(struct smb_rqst *rqst,
- struct TCP_Server_Info *server, char *signature)
-{
- int rc;
-
- if (!rqst->rq_iov || !signature || !server)
- return -EINVAL;
-
- rc = cifs_alloc_hash("md5", &server->secmech.md5,
- &server->secmech.sdescmd5);
- if (rc)
- return -1;
-
- rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not init md5\n", __func__);
- return rc;
- }
-
- rc = crypto_shash_update(&server->secmech.sdescmd5->shash,
- server->session_key.response, server->session_key.len);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
- return rc;
- }
-
- return __cifs_calc_signature(rqst, server, signature,
- &server->secmech.sdescmd5->shash);
-}
-
-/* must be called with server->srv_mutex held */
-int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
- __u32 *pexpected_response_sequence_number)
-{
- int rc = 0;
- char smb_signature[20];
- struct smb_hdr *cifs_pdu = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
-
- if (rqst->rq_iov[0].iov_len != 4 ||
- rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
- return -EIO;
-
- if ((cifs_pdu == NULL) || (server == NULL))
- return -EINVAL;
-
- if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
- server->tcpStatus == CifsNeedNegotiate)
- return rc;
-
- if (!server->session_estab) {
- memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
- return rc;
- }
-
- cifs_pdu->Signature.Sequence.SequenceNumber =
- cpu_to_le32(server->sequence_number);
- cifs_pdu->Signature.Sequence.Reserved = 0;
-
- *pexpected_response_sequence_number = ++server->sequence_number;
- ++server->sequence_number;
-
- rc = cifs_calc_signature(rqst, server, smb_signature);
- if (rc)
- memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
- else
- memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
-
- return rc;
-}
-
-int cifs_sign_smbv(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
- __u32 *pexpected_response_sequence)
-{
- struct smb_rqst rqst = { .rq_iov = iov,
- .rq_nvec = n_vec };
-
- return cifs_sign_rqst(&rqst, server, pexpected_response_sequence);
-}
-
-/* must be called with server->srv_mutex held */
-int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
- __u32 *pexpected_response_sequence_number)
-{
- struct kvec iov[2];
-
- iov[0].iov_base = cifs_pdu;
- iov[0].iov_len = 4;
- iov[1].iov_base = (char *)cifs_pdu + 4;
- iov[1].iov_len = be32_to_cpu(cifs_pdu->smb_buf_length);
-
- return cifs_sign_smbv(iov, 2, server,
- pexpected_response_sequence_number);
-}
-
-int cifs_verify_signature(struct smb_rqst *rqst,
- struct TCP_Server_Info *server,
- __u32 expected_sequence_number)
-{
- unsigned int rc;
- char server_response_sig[8];
- char what_we_think_sig_should_be[20];
- struct smb_hdr *cifs_pdu = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
-
- if (rqst->rq_iov[0].iov_len != 4 ||
- rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
- return -EIO;
-
- if (cifs_pdu == NULL || server == NULL)
- return -EINVAL;
-
- if (!server->session_estab)
- return 0;
-
- if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) {
- struct smb_com_lock_req *pSMB =
- (struct smb_com_lock_req *)cifs_pdu;
- if (pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)
- return 0;
- }
-
- /* BB what if signatures are supposed to be on for session but
- server does not send one? BB */
-
- /* Do not need to verify session setups with signature "BSRSPYL " */
- if (memcmp(cifs_pdu->Signature.SecuritySignature, "BSRSPYL ", 8) == 0)
- cifs_dbg(FYI, "dummy signature received for smb command 0x%x\n",
- cifs_pdu->Command);
-
- /* save off the origiginal signature so we can modify the smb and check
- its signature against what the server sent */
- memcpy(server_response_sig, cifs_pdu->Signature.SecuritySignature, 8);
-
- cifs_pdu->Signature.Sequence.SequenceNumber =
- cpu_to_le32(expected_sequence_number);
- cifs_pdu->Signature.Sequence.Reserved = 0;
-
- mutex_lock(&server->srv_mutex);
- rc = cifs_calc_signature(rqst, server, what_we_think_sig_should_be);
- mutex_unlock(&server->srv_mutex);
-
- if (rc)
- return rc;
-
-/* cifs_dump_mem("what we think it should be: ",
- what_we_think_sig_should_be, 16); */
-
- if (memcmp(server_response_sig, what_we_think_sig_should_be, 8))
- return -EACCES;
- else
- return 0;
-
-}
-
-/* Build a proper attribute value/target info pairs blob.
- * Fill in netbios and dns domain name and workstation name
- * and client time (total five av pairs and + one end of fields indicator.
- * Allocate domain name which gets freed when session struct is deallocated.
- */
-static int
-build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
-{
- unsigned int dlen;
- unsigned int size = 2 * sizeof(struct ntlmssp2_name);
- char *defdmname = "WORKGROUP";
- unsigned char *blobptr;
- struct ntlmssp2_name *attrptr;
-
- if (!ses->domainName) {
- ses->domainName = kstrdup(defdmname, GFP_KERNEL);
- if (!ses->domainName)
- return -ENOMEM;
- }
-
- dlen = strlen(ses->domainName);
-
- /*
- * The length of this blob is two times the size of a
- * structure (av pair) which holds name/size
- * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
- * unicode length of a netbios domain name
- */
- ses->auth_key.len = size + 2 * dlen;
- ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
- if (!ses->auth_key.response) {
- ses->auth_key.len = 0;
- return -ENOMEM;
- }
-
- blobptr = ses->auth_key.response;
- attrptr = (struct ntlmssp2_name *) blobptr;
-
- /*
- * As defined in MS-NTLM 3.3.2, just this av pair field
- * is sufficient as part of the temp
- */
- attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
- attrptr->length = cpu_to_le16(2 * dlen);
- blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
- cifs_strtoUTF16((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
-
- return 0;
-}
-
-/* Server has provided av pairs/target info in the type 2 challenge
- * packet and we have plucked it and stored within smb session.
- * We parse that blob here to find netbios domain name to be used
- * as part of ntlmv2 authentication (in Target String), if not already
- * specified on the command line.
- * If this function returns without any error but without fetching
- * domain name, authentication may fail against some server but
- * may not fail against other (those who are not very particular
- * about target string i.e. for some, just user name might suffice.
- */
-static int
-find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
-{
- unsigned int attrsize;
- unsigned int type;
- unsigned int onesize = sizeof(struct ntlmssp2_name);
- unsigned char *blobptr;
- unsigned char *blobend;
- struct ntlmssp2_name *attrptr;
-
- if (!ses->auth_key.len || !ses->auth_key.response)
- return 0;
-
- blobptr = ses->auth_key.response;
- blobend = blobptr + ses->auth_key.len;
-
- while (blobptr + onesize < blobend) {
- attrptr = (struct ntlmssp2_name *) blobptr;
- type = le16_to_cpu(attrptr->type);
- if (type == NTLMSSP_AV_EOL)
- break;
- blobptr += 2; /* advance attr type */
- attrsize = le16_to_cpu(attrptr->length);
- blobptr += 2; /* advance attr size */
- if (blobptr + attrsize > blobend)
- break;
- if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
- if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
- break;
- if (!ses->domainName) {
- ses->domainName =
- kmalloc(attrsize + 1, GFP_KERNEL);
- if (!ses->domainName)
- return -ENOMEM;
- cifs_from_utf16(ses->domainName,
- (__le16 *)blobptr, attrsize, attrsize,
- nls_cp, NO_MAP_UNI_RSVD);
- break;
- }
- }
- blobptr += attrsize; /* advance attr value */
- }
-
- return 0;
-}
-
-/* Server has provided av pairs/target info in the type 2 challenge
- * packet and we have plucked it and stored within smb session.
- * We parse that blob here to find the server given timestamp
- * as part of ntlmv2 authentication (or local current time as
- * default in case of failure)
- */
-static __le64
-find_timestamp(struct cifs_ses *ses)
-{
- unsigned int attrsize;
- unsigned int type;
- unsigned int onesize = sizeof(struct ntlmssp2_name);
- unsigned char *blobptr;
- unsigned char *blobend;
- struct ntlmssp2_name *attrptr;
- struct timespec64 ts;
-
- if (!ses->auth_key.len || !ses->auth_key.response)
- return 0;
-
- blobptr = ses->auth_key.response;
- blobend = blobptr + ses->auth_key.len;
-
- while (blobptr + onesize < blobend) {
- attrptr = (struct ntlmssp2_name *) blobptr;
- type = le16_to_cpu(attrptr->type);
- if (type == NTLMSSP_AV_EOL)
- break;
- blobptr += 2; /* advance attr type */
- attrsize = le16_to_cpu(attrptr->length);
- blobptr += 2; /* advance attr size */
- if (blobptr + attrsize > blobend)
- break;
- if (type == NTLMSSP_AV_TIMESTAMP) {
- if (attrsize == sizeof(u64))
- return *((__le64 *)blobptr);
- }
- blobptr += attrsize; /* advance attr value */
- }
-
- ktime_get_real_ts64(&ts);
- return cpu_to_le64(cifs_UnixTimeToNT(ts));
-}
-
-static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
- const struct nls_table *nls_cp)
-{
- int rc = 0;
- int len;
- char nt_hash[CIFS_NTHASH_SIZE];
- __le16 *user;
- wchar_t *domain;
- wchar_t *server;
-
- if (!ses->server->secmech.sdeschmacmd5) {
- cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__);
- return -1;
- }
-
- /* calculate md4 hash of password */
- E_md4hash(ses->password, nt_hash, nls_cp);
-
- rc = crypto_shash_setkey(ses->server->secmech.hmacmd5, nt_hash,
- CIFS_NTHASH_SIZE);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not set NT Hash as a key\n", __func__);
- return rc;
- }
-
- rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
- return rc;
- }
-
- /* convert ses->user_name to unicode */
- len = ses->user_name ? strlen(ses->user_name) : 0;
- user = kmalloc(2 + (len * 2), GFP_KERNEL);
- if (user == NULL) {
- rc = -ENOMEM;
- return rc;
- }
-
- if (len) {
- len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
- UniStrupr(user);
- } else {
- memset(user, '\0', 2);
- }
-
- rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
- (char *)user, 2 * len);
- kfree(user);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with user\n", __func__);
- return rc;
- }
-
- /* convert ses->domainName to unicode and uppercase */
- if (ses->domainName) {
- len = strlen(ses->domainName);
-
- domain = kmalloc(2 + (len * 2), GFP_KERNEL);
- if (domain == NULL) {
- rc = -ENOMEM;
- return rc;
- }
- len = cifs_strtoUTF16((__le16 *)domain, ses->domainName, len,
- nls_cp);
- rc =
- crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
- (char *)domain, 2 * len);
- kfree(domain);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with domain\n",
- __func__);
- return rc;
- }
- } else {
- /* We use ses->ip_addr if no domain name available */
- len = strlen(ses->ip_addr);
-
- server = kmalloc(2 + (len * 2), GFP_KERNEL);
- if (server == NULL) {
- rc = -ENOMEM;
- return rc;
- }
- len = cifs_strtoUTF16((__le16 *)server, ses->ip_addr, len,
- nls_cp);
- rc =
- crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
- (char *)server, 2 * len);
- kfree(server);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with server\n",
- __func__);
- return rc;
- }
- }
-
- rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
- ntlmv2_hash);
- if (rc)
- cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
-
- return rc;
-}
-
-static int
-CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
-{
- int rc;
- struct ntlmv2_resp *ntlmv2 = (struct ntlmv2_resp *)
- (ses->auth_key.response + CIFS_SESS_KEY_SIZE);
- unsigned int hash_len;
-
- /* The MD5 hash starts at challenge_key.key */
- hash_len = ses->auth_key.len - (CIFS_SESS_KEY_SIZE +
- offsetof(struct ntlmv2_resp, challenge.key[0]));
-
- if (!ses->server->secmech.sdeschmacmd5) {
- cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__);
- return -1;
- }
-
- rc = crypto_shash_setkey(ses->server->secmech.hmacmd5,
- ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
- __func__);
- return rc;
- }
-
- rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
- return rc;
- }
-
- if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED)
- memcpy(ntlmv2->challenge.key,
- ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
- else
- memcpy(ntlmv2->challenge.key,
- ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
- rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
- ntlmv2->challenge.key, hash_len);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
- return rc;
- }
-
- /* Note that the MD5 digest over writes anon.challenge_key.key */
- rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
- ntlmv2->ntlmv2_hash);
- if (rc)
- cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
-
- return rc;
-}
-
-int
-setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
-{
- int rc;
- int baselen;
- unsigned int tilen;
- struct ntlmv2_resp *ntlmv2;
- char ntlmv2_hash[16];
- unsigned char *tiblob = NULL; /* target info blob */
- __le64 rsp_timestamp;
-
- if (nls_cp == NULL) {
- cifs_dbg(VFS, "%s called with nls_cp==NULL\n", __func__);
- return -EINVAL;
- }
-
- if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
- if (!ses->domainName) {
- if (ses->domainAuto) {
- rc = find_domain_name(ses, nls_cp);
- if (rc) {
- cifs_dbg(VFS, "error %d finding domain name\n",
- rc);
- goto setup_ntlmv2_rsp_ret;
- }
- } else {
- ses->domainName = kstrdup("", GFP_KERNEL);
- }
- }
- } else {
- rc = build_avpair_blob(ses, nls_cp);
- if (rc) {
- cifs_dbg(VFS, "error %d building av pair blob\n", rc);
- goto setup_ntlmv2_rsp_ret;
- }
- }
-
- /* Must be within 5 minutes of the server (or in range +/-2h
- * in case of Mac OS X), so simply carry over server timestamp
- * (as Windows 7 does)
- */
- rsp_timestamp = find_timestamp(ses);
-
- baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
- tilen = ses->auth_key.len;
- tiblob = ses->auth_key.response;
-
- ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
- if (!ses->auth_key.response) {
- rc = -ENOMEM;
- ses->auth_key.len = 0;
- goto setup_ntlmv2_rsp_ret;
- }
- ses->auth_key.len += baselen;
-
- ntlmv2 = (struct ntlmv2_resp *)
- (ses->auth_key.response + CIFS_SESS_KEY_SIZE);
- ntlmv2->blob_signature = cpu_to_le32(0x00000101);
- ntlmv2->reserved = 0;
- ntlmv2->time = rsp_timestamp;
-
- get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
- ntlmv2->reserved2 = 0;
-
- memcpy(ses->auth_key.response + baselen, tiblob, tilen);
-
- mutex_lock(&ses->server->srv_mutex);
-
- rc = cifs_alloc_hash("hmac(md5)",
- &ses->server->secmech.hmacmd5,
- &ses->server->secmech.sdeschmacmd5);
- if (rc) {
- goto unlock;
- }
-
- /* calculate ntlmv2_hash */
- rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
- if (rc) {
- cifs_dbg(VFS, "Could not get v2 hash rc %d\n", rc);
- goto unlock;
- }
-
- /* calculate first part of the client response (CR1) */
- rc = CalcNTLMv2_response(ses, ntlmv2_hash);
- if (rc) {
- cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc);
- goto unlock;
- }
-
- /* now calculate the session key for NTLMv2 */
- rc = crypto_shash_setkey(ses->server->secmech.hmacmd5,
- ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
- __func__);
- goto unlock;
- }
-
- rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
- goto unlock;
- }
-
- rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
- ntlmv2->ntlmv2_hash,
- CIFS_HMAC_MD5_HASH_SIZE);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
- goto unlock;
- }
-
- rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
- ses->auth_key.response);
- if (rc)
- cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
-
-unlock:
- mutex_unlock(&ses->server->srv_mutex);
-setup_ntlmv2_rsp_ret:
- kfree(tiblob);
-
- return rc;
-}
-
-int
-calc_seckey(struct cifs_ses *ses)
-{
- unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */
- struct arc4_ctx *ctx_arc4;
-
- if (fips_enabled)
- return -ENODEV;
-
- get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
-
- ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
- if (!ctx_arc4) {
- cifs_dbg(VFS, "Could not allocate arc4 context\n");
- return -ENOMEM;
- }
-
- cifs_arc4_setkey(ctx_arc4, ses->auth_key.response, CIFS_SESS_KEY_SIZE);
- cifs_arc4_crypt(ctx_arc4, ses->ntlmssp->ciphertext, sec_key,
- CIFS_CPHTXT_SIZE);
-
- /* make secondary_key/nonce as session key */
- memcpy(ses->auth_key.response, sec_key, CIFS_SESS_KEY_SIZE);
- /* and make len as that of session key only */
- ses->auth_key.len = CIFS_SESS_KEY_SIZE;
-
- memzero_explicit(sec_key, CIFS_SESS_KEY_SIZE);
- kfree_sensitive(ctx_arc4);
- return 0;
-}
-
-void
-cifs_crypto_secmech_release(struct TCP_Server_Info *server)
-{
- if (server->secmech.cmacaes) {
- crypto_free_shash(server->secmech.cmacaes);
- server->secmech.cmacaes = NULL;
- }
-
- if (server->secmech.hmacsha256) {
- crypto_free_shash(server->secmech.hmacsha256);
- server->secmech.hmacsha256 = NULL;
- }
-
- if (server->secmech.md5) {
- crypto_free_shash(server->secmech.md5);
- server->secmech.md5 = NULL;
- }
-
- if (server->secmech.sha512) {
- crypto_free_shash(server->secmech.sha512);
- server->secmech.sha512 = NULL;
- }
-
- if (server->secmech.hmacmd5) {
- crypto_free_shash(server->secmech.hmacmd5);
- server->secmech.hmacmd5 = NULL;
- }
-
- if (server->secmech.ccmaesencrypt) {
- crypto_free_aead(server->secmech.ccmaesencrypt);
- server->secmech.ccmaesencrypt = NULL;
- }
-
- if (server->secmech.ccmaesdecrypt) {
- crypto_free_aead(server->secmech.ccmaesdecrypt);
- server->secmech.ccmaesdecrypt = NULL;
- }
-
- kfree(server->secmech.sdesccmacaes);
- server->secmech.sdesccmacaes = NULL;
- kfree(server->secmech.sdeschmacsha256);
- server->secmech.sdeschmacsha256 = NULL;
- kfree(server->secmech.sdeschmacmd5);
- server->secmech.sdeschmacmd5 = NULL;
- kfree(server->secmech.sdescmd5);
- server->secmech.sdescmd5 = NULL;
- kfree(server->secmech.sdescsha512);
- server->secmech.sdescsha512 = NULL;
-}
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
deleted file mode 100644
index 0458d28d71aa..000000000000
--- a/fs/cifs/dns_resolve.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- *
- * Copyright (c) 2007 Igor Mammedov
- * Author(s): Igor Mammedov (niallain@gmail.com)
- * Steve French (sfrench@us.ibm.com)
- * Wang Lei (wang840925@gmail.com)
- * David Howells (dhowells@redhat.com)
- *
- * Contains the CIFS DFS upcall routines used for hostname to
- * IP address translation.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/dns_resolver.h>
-#include "dns_resolve.h"
-#include "cifsglob.h"
-#include "cifsproto.h"
-#include "cifs_debug.h"
-
-/**
- * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address.
- * @unc: UNC path specifying the server (with '/' as delimiter)
- * @ip_addr: Where to return the IP address.
- * @expiry: Where to return the expiry time for the dns record.
- *
- * The IP address will be returned in string form, and the caller is
- * responsible for freeing it.
- *
- * Returns length of result on success, -ve on error.
- */
-int
-dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry)
-{
- struct sockaddr_storage ss;
- const char *hostname, *sep;
- char *name;
- int len, rc;
-
- if (!ip_addr || !unc)
- return -EINVAL;
-
- len = strlen(unc);
- if (len < 3) {
- cifs_dbg(FYI, "%s: unc is too short: %s\n", __func__, unc);
- return -EINVAL;
- }
-
- /* Discount leading slashes for cifs */
- len -= 2;
- hostname = unc + 2;
-
- /* Search for server name delimiter */
- sep = memchr(hostname, '/', len);
- if (sep)
- len = sep - hostname;
- else
- cifs_dbg(FYI, "%s: probably server name is whole unc: %s\n",
- __func__, unc);
-
- /* Try to interpret hostname as an IPv4 or IPv6 address */
- rc = cifs_convert_address((struct sockaddr *)&ss, hostname, len);
- if (rc > 0)
- goto name_is_IP_address;
-
- /* Perform the upcall */
- rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len,
- NULL, ip_addr, expiry, false);
- if (rc < 0)
- cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n",
- __func__, len, len, hostname);
- else
- cifs_dbg(FYI, "%s: resolved: %*.*s to %s expiry %llu\n",
- __func__, len, len, hostname, *ip_addr,
- expiry ? (*expiry) : 0);
- return rc;
-
-name_is_IP_address:
- name = kmalloc(len + 1, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
- memcpy(name, hostname, len);
- name[len] = 0;
- cifs_dbg(FYI, "%s: unc is IP, skipping dns upcall: %s\n",
- __func__, name);
- *ip_addr = name;
- return 0;
-}
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
deleted file mode 100644
index afc0df381246..000000000000
--- a/fs/cifs/dns_resolve.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1 */
-/*
- * DNS Resolver upcall management for CIFS DFS
- * Handles host name to IP address resolution
- *
- * Copyright (c) International Business Machines Corp., 2008
- * Author(s): Steve French (sfrench@us.ibm.com)
- *
- */
-
-#ifndef _DNS_RESOLVE_H
-#define _DNS_RESOLVE_H
-
-#ifdef __KERNEL__
-extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry);
-#endif /* KERNEL */
-
-#endif /* _DNS_RESOLVE_H */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
deleted file mode 100644
index 1b855fcb179e..000000000000
--- a/fs/cifs/file.c
+++ /dev/null
@@ -1,5004 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- *
- * vfs operations that deal with files
- *
- * Copyright (C) International Business Machines Corp., 2002,2010
- * Author(s): Steve French (sfrench@us.ibm.com)
- * Jeremy Allison (jra@samba.org)
- *
- */
-#include <linux/fs.h>
-#include <linux/backing-dev.h>
-#include <linux/stat.h>
-#include <linux/fcntl.h>
-#include <linux/pagemap.h>
-#include <linux/pagevec.h>
-#include <linux/writeback.h>
-#include <linux/task_io_accounting_ops.h>
-#include <linux/delay.h>
-#include <linux/mount.h>
-#include <linux/slab.h>
-#include <linux/swap.h>
-#include <linux/mm.h>
-#include <asm/div64.h>
-#include "cifsfs.h"
-#include "cifspdu.h"
-#include "cifsglob.h"
-#include "cifsproto.h"
-#include "cifs_unicode.h"
-#include "cifs_debug.h"
-#include "cifs_fs_sb.h"
-#include "fscache.h"
-#include "smbdirect.h"
-#include "fs_context.h"
-#include "cifs_ioctl.h"
-
-static inline int cifs_convert_flags(unsigned int flags)
-{
- if ((flags & O_ACCMODE) == O_RDONLY)
- return GENERIC_READ;
- else if ((flags & O_ACCMODE) == O_WRONLY)
- return GENERIC_WRITE;
- else if ((flags & O_ACCMODE) == O_RDWR) {
- /* GENERIC_ALL is too much permission to request
- can cause unnecessary access denied on create */
- /* return GENERIC_ALL; */
- return (GENERIC_READ | GENERIC_WRITE);
- }
-
- return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
- FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
- FILE_READ_DATA);
-}
-
-static u32 cifs_posix_convert_flags(unsigned int flags)
-{
- u32 posix_flags = 0;
-
- if ((flags & O_ACCMODE) == O_RDONLY)
- posix_flags = SMB_O_RDONLY;
- else if ((flags & O_ACCMODE) == O_WRONLY)
- posix_flags = SMB_O_WRONLY;
- else if ((flags & O_ACCMODE) == O_RDWR)
- posix_flags = SMB_O_RDWR;
-
- if (flags & O_CREAT) {
- posix_flags |= SMB_O_CREAT;
- if (flags & O_EXCL)
- posix_flags |= SMB_O_EXCL;
- } else if (flags & O_EXCL)
- cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
- current->comm, current->tgid);
-
- if (flags & O_TRUNC)
- posix_flags |= SMB_O_TRUNC;
- /* be safe and imply O_SYNC for O_DSYNC */
- if (flags & O_DSYNC)
- posix_flags |= SMB_O_SYNC;
- if (flags & O_DIRECTORY)
- posix_flags |= SMB_O_DIRECTORY;
- if (flags & O_NOFOLLOW)
- posix_flags |= SMB_O_NOFOLLOW;
- if (flags & O_DIRECT)
- posix_flags |= SMB_O_DIRECT;
-
- return posix_flags;
-}
-
-static inline int cifs_get_disposition(unsigned int flags)
-{
- if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
- return FILE_CREATE;
- else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
- return FILE_OVERWRITE_IF;
- else if ((flags & O_CREAT) == O_CREAT)
- return FILE_OPEN_IF;
- else if ((flags & O_TRUNC) == O_TRUNC)
- return FILE_OVERWRITE;
- else
- return FILE_OPEN;
-}
-
-int cifs_posix_open(const char *full_path, struct inode **pinode,
- struct super_block *sb, int mode, unsigned int f_flags,
- __u32 *poplock, __u16 *pnetfid, unsigned int xid)
-{
- int rc;
- FILE_UNIX_BASIC_INFO *presp_data;
- __u32 posix_flags = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifs_fattr fattr;
- struct tcon_link *tlink;
- struct cifs_tcon *tcon;
-
- cifs_dbg(FYI, "posix open %s\n", full_path);
-
- presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
- if (presp_data == NULL)
- return -ENOMEM;
-
- tlink = cifs_sb_tlink(cifs_sb);
- if (IS_ERR(tlink)) {
- rc = PTR_ERR(tlink);
- goto posix_open_ret;
- }
-
- tcon = tlink_tcon(tlink);
- mode &= ~current_umask();
-
- posix_flags = cifs_posix_convert_flags(f_flags);
- rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
- poplock, full_path, cifs_sb->local_nls,
- cifs_remap(cifs_sb));
- cifs_put_tlink(tlink);
-
- if (rc)
- goto posix_open_ret;
-
- if (presp_data->Type == cpu_to_le32(-1))
- goto posix_open_ret; /* open ok, caller does qpathinfo */
-
- if (!pinode)
- goto posix_open_ret; /* caller does not need info */
-
- cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
-
- /* get new inode and set it up */
- if (*pinode == NULL) {
- cifs_fill_uniqueid(sb, &fattr);
- *pinode = cifs_iget(sb, &fattr);
- if (!*pinode) {
- rc = -ENOMEM;
- goto posix_open_ret;
- }
- } else {
- cifs_revalidate_mapping(*pinode);
- rc = cifs_fattr_to_inode(*pinode, &fattr);
- }
-
-posix_open_ret:
- kfree(presp_data);
- return rc;
-}
-
-static int
-cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
- struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
- struct cifs_fid *fid, unsigned int xid)
-{
- int rc;
- int desired_access;
- int disposition;
- int create_options = CREATE_NOT_DIR;
- FILE_ALL_INFO *buf;
- struct TCP_Server_Info *server = tcon->ses->server;
- struct cifs_open_parms oparms;
-
- if (!server->ops->open)
- return -ENOSYS;
-
- desired_access = cifs_convert_flags(f_flags);
-
-/*********************************************************************
- * open flag mapping table:
- *
- * POSIX Flag CIFS Disposition
- * ---------- ----------------
- * O_CREAT FILE_OPEN_IF
- * O_CREAT | O_EXCL FILE_CREATE
- * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
- * O_TRUNC FILE_OVERWRITE
- * none of the above FILE_OPEN
- *
- * Note that there is not a direct match between disposition
- * FILE_SUPERSEDE (ie create whether or not file exists although
- * O_CREAT | O_TRUNC is similar but truncates the existing
- * file rather than creating a new file as FILE_SUPERSEDE does
- * (which uses the attributes / metadata passed in on open call)
- *?
- *? O_SYNC is a reasonable match to CIFS writethrough flag
- *? and the read write flags match reasonably. O_LARGEFILE
- *? is irrelevant because largefile support is always used
- *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
- * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
- *********************************************************************/
-
- disposition = cifs_get_disposition(f_flags);
-
- /* BB pass O_SYNC flag through on file attributes .. BB */
-
- buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* O_SYNC also has bit for O_DSYNC so following check picks up either */
- if (f_flags & O_SYNC)
- create_options |= CREATE_WRITE_THROUGH;
-
- if (f_flags & O_DIRECT)
- create_options |= CREATE_NO_BUFFER;
-
- oparms.tcon = tcon;
- oparms.cifs_sb = cifs_sb;
- oparms.desired_access = desired_access;
- oparms.create_options = cifs_create_options(cifs_sb, create_options);
- oparms.disposition = disposition;
- oparms.path = full_path;
- oparms.fid = fid;
- oparms.reconnect = false;
-
- rc = server->ops->open(xid, &oparms, oplock, buf);
-
- if (rc)
- goto out;
-
- /* TODO: Add support for calling posix query info but with passing in fid */
- if (tcon->unix_ext)
- rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
- xid);
- else
- rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
- xid, fid);
-
- if (rc) {
- server->ops->close(xid, tcon, fid);
- if (rc == -ESTALE)
- rc = -EOPENSTALE;
- }
-
-out:
- kfree(buf);
- return rc;
-}
-
-static bool
-cifs_has_mand_locks(struct cifsInodeInfo *cinode)
-{
- struct cifs_fid_locks *cur;
- bool has_locks = false;
-
- down_read(&cinode->lock_sem);
- list_for_each_entry(cur, &cinode->llist, llist) {
- if (!list_empty(&cur->locks)) {
- has_locks = true;
- break;
- }
- }
- up_read(&cinode->lock_sem);
- return has_locks;
-}
-
-void
-cifs_down_write(struct rw_semaphore *sem)
-{
- while (!down_write_trylock(sem))
- msleep(10);
-}
-
-static void cifsFileInfo_put_work(struct work_struct *work);
-
-struct cifsFileInfo *
-cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
- struct tcon_link *tlink, __u32 oplock)
-{
- struct dentry *dentry = file_dentry(file);
- struct inode *inode = d_inode(dentry);
- struct cifsInodeInfo *cinode = CIFS_I(inode);
- struct cifsFileInfo *cfile;
- struct cifs_fid_locks *fdlocks;
- struct cifs_tcon *tcon = tlink_tcon(tlink);
- struct TCP_Server_Info *server = tcon->ses->server;
-
- cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
- if (cfile == NULL)
- return cfile;
-
- fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
- if (!fdlocks) {
- kfree(cfile);
- return NULL;
- }
-
- INIT_LIST_HEAD(&fdlocks->locks);
- fdlocks->cfile = cfile;
- cfile->llist = fdlocks;
-
- cfile->count = 1;
- cfile->pid = current->tgid;
- cfile->uid = current_fsuid();
- cfile->dentry = dget(dentry);
- cfile->f_flags = file->f_flags;
- cfile->invalidHandle = false;
- cfile->deferred_close_scheduled = false;
- cfile->tlink = cifs_get_tlink(tlink);
- INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
- INIT_WORK(&cfile->put, cifsFileInfo_put_work);
- INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
- mutex_init(&cfile->fh_mutex);
- spin_lock_init(&cfile->file_info_lock);
-
- cifs_sb_active(inode->i_sb);
-
- /*
- * If the server returned a read oplock and we have mandatory brlocks,
- * set oplock level to None.
- */
- if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
- cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
- oplock = 0;
- }
-
- cifs_down_write(&cinode->lock_sem);
- list_add(&fdlocks->llist, &cinode->llist);
- up_write(&cinode->lock_sem);
-
- spin_lock(&tcon->open_file_lock);
- if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
- oplock = fid->pending_open->oplock;
- list_del(&fid->pending_open->olist);
-
- fid->purge_cache = false;
- server->ops->set_fid(cfile, fid, oplock);
-
- list_add(&cfile->tlist, &tcon->openFileList);
- atomic_inc(&tcon->num_local_opens);
-
- /* if readable file instance put first in list*/
- spin_lock(&cinode->open_file_lock);
- if (file->f_mode & FMODE_READ)
- list_add(&cfile->flist, &cinode->openFileList);
- else
- list_add_tail(&cfile->flist, &cinode->openFileList);
- spin_unlock(&cinode->open_file_lock);
- spin_unlock(&tcon->open_file_lock);
-
- if (fid->purge_cache)
- cifs_zap_mapping(inode);
-
- file->private_data = cfile;
- return cfile;
-}
-
-struct cifsFileInfo *
-cifsFileInfo_get(struct cifsFileInfo *cifs_file)
-{
- spin_lock(&cifs_file->file_info_lock);
- cifsFileInfo_get_locked(cifs_file);
- spin_unlock(&cifs_file->file_info_lock);
- return cifs_file;
-}
-
-static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
-{
- struct inode *inode = d_inode(cifs_file->dentry);
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct cifsLockInfo *li, *tmp;
- struct super_block *sb = inode->i_sb;
-
- cifs_fscache_release_inode_cookie(inode);
-
- /*
- * Delete any outstanding lock records. We'll lose them when the file
- * is closed anyway.
- */
- cifs_down_write(&cifsi->lock_sem);
- list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
- list_del(&li->llist);
- cifs_del_lock_waiters(li);
- kfree(li);
- }
- list_del(&cifs_file->llist->llist);
- kfree(cifs_file->llist);
- up_write(&cifsi->lock_sem);
-
- cifs_put_tlink(cifs_file->tlink);
- dput(cifs_file->dentry);
- cifs_sb_deactive(sb);
- kfree(cifs_file);
-}
-
-static void cifsFileInfo_put_work(struct work_struct *work)
-{
- struct cifsFileInfo *cifs_file = container_of(work,
- struct cifsFileInfo, put);
-
- cifsFileInfo_put_final(cifs_file);
-}
-
-/**
- * cifsFileInfo_put - release a reference of file priv data
- *
- * Always potentially wait for oplock handler. See _cifsFileInfo_put().
- *
- * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
- */
-void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
-{
- _cifsFileInfo_put(cifs_file, true, true);
-}
-
-/**
- * _cifsFileInfo_put - release a reference of file priv data
- *
- * This may involve closing the filehandle @cifs_file out on the
- * server. Must be called without holding tcon->open_file_lock,
- * cinode->open_file_lock and cifs_file->file_info_lock.
- *
- * If @wait_for_oplock_handler is true and we are releasing the last
- * reference, wait for any running oplock break handler of the file
- * and cancel any pending one.
- *
- * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
- * @wait_oplock_handler: must be false if called from oplock_break_handler
- * @offload: not offloaded on close and oplock breaks
- *
- */
-void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
- bool wait_oplock_handler, bool offload)
-{
- struct inode *inode = d_inode(cifs_file->dentry);
- struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
- struct TCP_Server_Info *server = tcon->ses->server;
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct super_block *sb = inode->i_sb;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifs_fid fid;
- struct cifs_pending_open open;
- bool oplock_break_cancelled;
-
- spin_lock(&tcon->open_file_lock);
- spin_lock(&cifsi->open_file_lock);
- spin_lock(&cifs_file->file_info_lock);
- if (--cifs_file->count > 0) {
- spin_unlock(&cifs_file->file_info_lock);
- spin_unlock(&cifsi->open_file_lock);
- spin_unlock(&tcon->open_file_lock);
- return;
- }
- spin_unlock(&cifs_file->file_info_lock);
-
- if (server->ops->get_lease_key)
- server->ops->get_lease_key(inode, &fid);
-
- /* store open in pending opens to make sure we don't miss lease break */
- cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
-
- /* remove it from the lists */
- list_del(&cifs_file->flist);
- list_del(&cifs_file->tlist);
- atomic_dec(&tcon->num_local_opens);
-
- if (list_empty(&cifsi->openFileList)) {
- cifs_dbg(FYI, "closing last open instance for inode %p\n",
- d_inode(cifs_file->dentry));
- /*
- * In strict cache mode we need invalidate mapping on the last
- * close because it may cause a error when we open this file
- * again and get at least level II oplock.
- */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
- set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
- cifs_set_oplock_level(cifsi, 0);
- }
-
- spin_unlock(&cifsi->open_file_lock);
- spin_unlock(&tcon->open_file_lock);
-
- oplock_break_cancelled = wait_oplock_handler ?
- cancel_work_sync(&cifs_file->oplock_break) : false;
-
- if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
- struct TCP_Server_Info *server = tcon->ses->server;
- unsigned int xid;
-
- xid = get_xid();
- if (server->ops->close_getattr)
- server->ops->close_getattr(xid, tcon, cifs_file);
- else if (server->ops->close)
- server->ops->close(xid, tcon, &cifs_file->fid);
- _free_xid(xid);
- }
-
- if (oplock_break_cancelled)
- cifs_done_oplock_break(cifsi);
-
- cifs_del_pending_open(&open);
-
- if (offload)
- queue_work(fileinfo_put_wq, &cifs_file->put);
- else
- cifsFileInfo_put_final(cifs_file);
-}
-
-int cifs_open(struct inode *inode, struct file *file)
-
-{
- int rc = -EACCES;
- unsigned int xid;
- __u32 oplock;
- struct cifs_sb_info *cifs_sb;
- struct TCP_Server_Info *server;
- struct cifs_tcon *tcon;
- struct tcon_link *tlink;
- struct cifsFileInfo *cfile = NULL;
- void *page;
- const char *full_path;
- bool posix_open_ok = false;
- struct cifs_fid fid;
- struct cifs_pending_open open;
-
- xid = get_xid();
-
- cifs_sb = CIFS_SB(inode->i_sb);
- if (unlikely(cifs_forced_shutdown(cifs_sb))) {
- free_xid(xid);
- return -EIO;
- }
-
- tlink = cifs_sb_tlink(cifs_sb);
- if (IS_ERR(tlink)) {
- free_xid(xid);
- return PTR_ERR(tlink);
- }
- tcon = tlink_tcon(tlink);
- server = tcon->ses->server;
-
- page = alloc_dentry_path();
- full_path = build_path_from_dentry(file_dentry(file), page);
- if (IS_ERR(full_path)) {
- rc = PTR_ERR(full_path);
- goto out;
- }
-
- cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
- inode, file->f_flags, full_path);
-
- if (file->f_flags & O_DIRECT &&
- cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- file->f_op = &cifs_file_direct_nobrl_ops;
- else
- file->f_op = &cifs_file_direct_ops;
- }
-
- /* Get the cached handle as SMB2 close is deferred */
- rc = cifs_get_readable_path(tcon, full_path, &cfile);
- if (rc == 0) {
- if (file->f_flags == cfile->f_flags) {
- file->private_data = cfile;
- spin_lock(&CIFS_I(inode)->deferred_lock);
- cifs_del_deferred_close(cfile);
- spin_unlock(&CIFS_I(inode)->deferred_lock);
- goto out;
- } else {
- _cifsFileInfo_put(cfile, true, false);
- }
- }
-
- if (server->oplocks)
- oplock = REQ_OPLOCK;
- else
- oplock = 0;
-
- if (!tcon->broken_posix_open && tcon->unix_ext &&
- cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
- le64_to_cpu(tcon->fsUnixInfo.Capability))) {
- /* can not refresh inode info since size could be stale */
- rc = cifs_posix_open(full_path, &inode, inode->i_sb,
- cifs_sb->ctx->file_mode /* ignored */,
- file->f_flags, &oplock, &fid.netfid, xid);
- if (rc == 0) {
- cifs_dbg(FYI, "posix open succeeded\n");
- posix_open_ok = true;
- } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
- if (tcon->ses->serverNOS)
- cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
- tcon->ses->ip_addr,
- tcon->ses->serverNOS);
- tcon->broken_posix_open = true;
- } else if ((rc != -EIO) && (rc != -EREMOTE) &&
- (rc != -EOPNOTSUPP)) /* path not found or net err */
- goto out;
- /*
- * Else fallthrough to retry open the old way on network i/o
- * or DFS errors.
- */
- }
-
- if (server->ops->get_lease_key)
- server->ops->get_lease_key(inode, &fid);
-
- cifs_add_pending_open(&fid, tlink, &open);
-
- if (!posix_open_ok) {
- if (server->ops->get_lease_key)
- server->ops->get_lease_key(inode, &fid);
-
- rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
- file->f_flags, &oplock, &fid, xid);
- if (rc) {
- cifs_del_pending_open(&open);
- goto out;
- }
- }
-
- cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
- if (cfile == NULL) {
- if (server->ops->close)
- server->ops->close(xid, tcon, &fid);
- cifs_del_pending_open(&open);
- rc = -ENOMEM;
- goto out;
- }
-
- cifs_fscache_set_inode_cookie(inode, file);
-
- if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
- /*
- * Time to set mode which we can not set earlier due to
- * problems creating new read-only files.
- */
- struct cifs_unix_set_info_args args = {
- .mode = inode->i_mode,
- .uid = INVALID_UID, /* no change */
- .gid = INVALID_GID, /* no change */
- .ctime = NO_CHANGE_64,
- .atime = NO_CHANGE_64,
- .mtime = NO_CHANGE_64,
- .device = 0,
- };
- CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
- cfile->pid);
- }
-
-out:
- free_dentry_path(page);
- free_xid(xid);
- cifs_put_tlink(tlink);
- return rc;
-}
-
-static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
-
-/*
- * Try to reacquire byte range locks that were released when session
- * to server was lost.
- */
-static int
-cifs_relock_file(struct cifsFileInfo *cfile)
-{
- struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
- struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- int rc = 0;
-
- down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
- if (cinode->can_cache_brlcks) {
- /* can cache locks - no need to relock */
- up_read(&cinode->lock_sem);
- return rc;
- }
-
- if (cap_unix(tcon->ses) &&
- (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
- rc = cifs_push_posix_locks(cfile);
- else
- rc = tcon->ses->server->ops->push_mand_locks(cfile);
-
- up_read(&cinode->lock_sem);
- return rc;
-}
-
-static int
-cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
-{
- int rc = -EACCES;
- unsigned int xid;
- __u32 oplock;
- struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
- struct cifsInodeInfo *cinode;
- struct inode *inode;
- void *page;
- const char *full_path;
- int desired_access;
- int disposition = FILE_OPEN;
- int create_options = CREATE_NOT_DIR;
- struct cifs_open_parms oparms;
-
- xid = get_xid();
- mutex_lock(&cfile->fh_mutex);
- if (!cfile->invalidHandle) {
- mutex_unlock(&cfile->fh_mutex);
- free_xid(xid);
- return 0;
- }
-
- inode = d_inode(cfile->dentry);
- cifs_sb = CIFS_SB(inode->i_sb);
- tcon = tlink_tcon(cfile->tlink);
- server = tcon->ses->server;
-
- /*
- * Can not grab rename sem here because various ops, including those
- * that already have the rename sem can end up causing writepage to get
- * called and if the server was down that means we end up here, and we
- * can never tell if the caller already has the rename_sem.
- */
- page = alloc_dentry_path();
- full_path = build_path_from_dentry(cfile->dentry, page);
- if (IS_ERR(full_path)) {
- mutex_unlock(&cfile->fh_mutex);
- free_dentry_path(page);
- free_xid(xid);
- return PTR_ERR(full_path);
- }
-
- cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
- inode, cfile->f_flags, full_path);
-
- if (tcon->ses->server->oplocks)
- oplock = REQ_OPLOCK;
- else
- oplock = 0;
-
- if (tcon->unix_ext && cap_unix(tcon->ses) &&
- (CIFS_UNIX_POSIX_PATH_OPS_CAP &
- le64_to_cpu(tcon->fsUnixInfo.Capability))) {
- /*
- * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
- * original open. Must mask them off for a reopen.
- */
- unsigned int oflags = cfile->f_flags &
- ~(O_CREAT | O_EXCL | O_TRUNC);
-
- rc = cifs_posix_open(full_path, NULL, inode->i_sb,
- cifs_sb->ctx->file_mode /* ignored */,
- oflags, &oplock, &cfile->fid.netfid, xid);
- if (rc == 0) {
- cifs_dbg(FYI, "posix reopen succeeded\n");
- oparms.reconnect = true;
- goto reopen_success;
- }
- /*
- * fallthrough to retry open the old way on errors, especially
- * in the reconnect path it is important to retry hard
- */
- }
-
- desired_access = cifs_convert_flags(cfile->f_flags);
-
- /* O_SYNC also has bit for O_DSYNC so following check picks up either */
- if (cfile->f_flags & O_SYNC)
- create_options |= CREATE_WRITE_THROUGH;
-
- if (cfile->f_flags & O_DIRECT)
- create_options |= CREATE_NO_BUFFER;
-
- if (server->ops->get_lease_key)
- server->ops->get_lease_key(inode, &cfile->fid);
-
- oparms.tcon = tcon;
- oparms.cifs_sb = cifs_sb;
- oparms.desired_access = desired_access;
- oparms.create_options = cifs_create_options(cifs_sb, create_options);
- oparms.disposition = disposition;
- oparms.path = full_path;
- oparms.fid = &cfile->fid;
- oparms.reconnect = true;
-
- /*
- * Can not refresh inode by passing in file_info buf to be returned by
- * ops->open and then calling get_inode_info with returned buf since
- * file might have write behind data that needs to be flushed and server
- * version of file size can be stale. If we knew for sure that inode was
- * not dirty locally we could do this.
- */
- rc = server->ops->open(xid, &oparms, &oplock, NULL);
- if (rc == -ENOENT && oparms.reconnect == false) {
- /* durable handle timeout is expired - open the file again */
- rc = server->ops->open(xid, &oparms, &oplock, NULL);
- /* indicate that we need to relock the file */
- oparms.reconnect = true;
- }
-
- if (rc) {
- mutex_unlock(&cfile->fh_mutex);
- cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
- cifs_dbg(FYI, "oplock: %d\n", oplock);
- goto reopen_error_exit;
- }
-
-reopen_success:
- cfile->invalidHandle = false;
- mutex_unlock(&cfile->fh_mutex);
- cinode = CIFS_I(inode);
-
- if (can_flush) {
- rc = filemap_write_and_wait(inode->i_mapping);
- if (!is_interrupt_error(rc))
- mapping_set_error(inode->i_mapping, rc);
-
- if (tcon->posix_extensions)
- rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid);
- else if (tcon->unix_ext)
- rc = cifs_get_inode_info_unix(&inode, full_path,
- inode->i_sb, xid);
- else
- rc = cifs_get_inode_info(&inode, full_path, NULL,
- inode->i_sb, xid, NULL);
- }
- /*
- * Else we are writing out data to server already and could deadlock if
- * we tried to flush data, and since we do not know if we have data that
- * would invalidate the current end of file on the server we can not go
- * to the server to get the new inode info.
- */
-
- /*
- * If the server returned a read oplock and we have mandatory brlocks,
- * set oplock level to None.
- */
- if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
- cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
- oplock = 0;
- }
-
- server->ops->set_fid(cfile, &cfile->fid, oplock);
- if (oparms.reconnect)
- cifs_relock_file(cfile);
-
-reopen_error_exit:
- free_dentry_path(page);
- free_xid(xid);
- return rc;
-}
-
-void smb2_deferred_work_close(struct work_struct *work)
-{
- struct cifsFileInfo *cfile = container_of(work,
- struct cifsFileInfo, deferred.work);
-
- spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
- cifs_del_deferred_close(cfile);
- cfile->deferred_close_scheduled = false;
- spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
- _cifsFileInfo_put(cfile, true, false);
-}
-
-int cifs_close(struct inode *inode, struct file *file)
-{
- struct cifsFileInfo *cfile;
- struct cifsInodeInfo *cinode = CIFS_I(inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifs_deferred_close *dclose;
-
- if (file->private_data != NULL) {
- cfile = file->private_data;
- file->private_data = NULL;
- dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
- if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
- cinode->lease_granted &&
- !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
- dclose) {
- if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
- inode->i_ctime = inode->i_mtime = current_time(inode);
- cifs_fscache_update_inode_cookie(inode);
- }
- spin_lock(&cinode->deferred_lock);
- cifs_add_deferred_close(cfile, dclose);
- if (cfile->deferred_close_scheduled &&
- delayed_work_pending(&cfile->deferred)) {
- /*
- * If there is no pending work, mod_delayed_work queues new work.
- * So, Increase the ref count to avoid use-after-free.
- */
- if (!mod_delayed_work(deferredclose_wq,
- &cfile->deferred, cifs_sb->ctx->acregmax))
- cifsFileInfo_get(cfile);
- } else {
- /* Deferred close for files */
- queue_delayed_work(deferredclose_wq,
- &cfile->deferred, cifs_sb->ctx->acregmax);
- cfile->deferred_close_scheduled = true;
- spin_unlock(&cinode->deferred_lock);
- return 0;
- }
- spin_unlock(&cinode->deferred_lock);
- _cifsFileInfo_put(cfile, true, false);
- } else {
- _cifsFileInfo_put(cfile, true, false);
- kfree(dclose);
- }
- }
-
- /* return code from the ->release op is always ignored */
- return 0;
-}
-
-void
-cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
-{
- struct cifsFileInfo *open_file;
- struct list_head *tmp;
- struct list_head *tmp1;
- struct list_head tmp_list;
-
- if (!tcon->use_persistent || !tcon->need_reopen_files)
- return;
-
- tcon->need_reopen_files = false;
-
- cifs_dbg(FYI, "Reopen persistent handles\n");
- INIT_LIST_HEAD(&tmp_list);
-
- /* list all files open on tree connection, reopen resilient handles */
- spin_lock(&tcon->open_file_lock);
- list_for_each(tmp, &tcon->openFileList) {
- open_file = list_entry(tmp, struct cifsFileInfo, tlist);
- if (!open_file->invalidHandle)
- continue;
- cifsFileInfo_get(open_file);
- list_add_tail(&open_file->rlist, &tmp_list);
- }
- spin_unlock(&tcon->open_file_lock);
-
- list_for_each_safe(tmp, tmp1, &tmp_list) {
- open_file = list_entry(tmp, struct cifsFileInfo, rlist);
- if (cifs_reopen_file(open_file, false /* do not flush */))
- tcon->need_reopen_files = true;
- list_del_init(&open_file->rlist);
- cifsFileInfo_put(open_file);
- }
-}
-
-int cifs_closedir(struct inode *inode, struct file *file)
-{
- int rc = 0;
- unsigned int xid;
- struct cifsFileInfo *cfile = file->private_data;
- struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
- char *buf;
-
- cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
-
- if (cfile == NULL)
- return rc;
-
- xid = get_xid();
- tcon = tlink_tcon(cfile->tlink);
- server = tcon->ses->server;
-
- cifs_dbg(FYI, "Freeing private data in close dir\n");
- spin_lock(&cfile->file_info_lock);
- if (server->ops->dir_needs_close(cfile)) {
- cfile->invalidHandle = true;
- spin_unlock(&cfile->file_info_lock);
- if (server->ops->close_dir)
- rc = server->ops->close_dir(xid, tcon, &cfile->fid);
- else
- rc = -ENOSYS;
- cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
- /* not much we can do if it fails anyway, ignore rc */
- rc = 0;
- } else
- spin_unlock(&cfile->file_info_lock);
-
- buf = cfile->srch_inf.ntwrk_buf_start;
- if (buf) {
- cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
- cfile->srch_inf.ntwrk_buf_start = NULL;
- if (cfile->srch_inf.smallBuf)
- cifs_small_buf_release(buf);
- else
- cifs_buf_release(buf);
- }
-
- cifs_put_tlink(cfile->tlink);
- kfree(file->private_data);
- file->private_data = NULL;
- /* BB can we lock the filestruct while this is going on? */
- free_xid(xid);
- return rc;
-}
-
-static struct cifsLockInfo *
-cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
-{
- struct cifsLockInfo *lock =
- kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
- if (!lock)
- return lock;
- lock->offset = offset;
- lock->length = length;
- lock->type = type;
- lock->pid = current->tgid;
- lock->flags = flags;
- INIT_LIST_HEAD(&lock->blist);
- init_waitqueue_head(&lock->block_q);
- return lock;
-}
-
-void
-cifs_del_lock_waiters(struct cifsLockInfo *lock)
-{
- struct cifsLockInfo *li, *tmp;
- list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
- list_del_init(&li->blist);
- wake_up(&li->block_q);
- }
-}
-
-#define CIFS_LOCK_OP 0
-#define CIFS_READ_OP 1
-#define CIFS_WRITE_OP 2
-
-/* @rw_check : 0 - no op, 1 - read, 2 - write */
-static bool
-cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
- __u64 length, __u8 type, __u16 flags,
- struct cifsFileInfo *cfile,
- struct cifsLockInfo **conf_lock, int rw_check)
-{
- struct cifsLockInfo *li;
- struct cifsFileInfo *cur_cfile = fdlocks->cfile;
- struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
-
- list_for_each_entry(li, &fdlocks->locks, llist) {
- if (offset + length <= li->offset ||
- offset >= li->offset + li->length)
- continue;
- if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
- server->ops->compare_fids(cfile, cur_cfile)) {
- /* shared lock prevents write op through the same fid */
- if (!(li->type & server->vals->shared_lock_type) ||
- rw_check != CIFS_WRITE_OP)
- continue;
- }
- if ((type & server->vals->shared_lock_type) &&
- ((server->ops->compare_fids(cfile, cur_cfile) &&
- current->tgid == li->pid) || type == li->type))
- continue;
- if (rw_check == CIFS_LOCK_OP &&
- (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
- server->ops->compare_fids(cfile, cur_cfile))
- continue;
- if (conf_lock)
- *conf_lock = li;
- return true;
- }
- return false;
-}
-
-bool
-cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
- __u8 type, __u16 flags,
- struct cifsLockInfo **conf_lock, int rw_check)
-{
- bool rc = false;
- struct cifs_fid_locks *cur;
- struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
-
- list_for_each_entry(cur, &cinode->llist, llist) {
- rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
- flags, cfile, conf_lock,
- rw_check);
- if (rc)
- break;
- }
-
- return rc;
-}
-
-/*
- * Check if there is another lock that prevents us to set the lock (mandatory
- * style). If such a lock exists, update the flock structure with its
- * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
- * or leave it the same if we can't. Returns 0 if we don't need to request to
- * the server or 1 otherwise.
- */
-static int
-cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
- __u8 type, struct file_lock *flock)
-{
- int rc = 0;
- struct cifsLockInfo *conf_lock;
- struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
- struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
- bool exist;
-
- down_read(&cinode->lock_sem);
-
- exist = cifs_find_lock_conflict(cfile, offset, length, type,
- flock->fl_flags, &conf_lock,
- CIFS_LOCK_OP);
- if (exist) {
- flock->fl_start = conf_lock->offset;
- flock->fl_end = conf_lock->offset + conf_lock->length - 1;
- flock->fl_pid = conf_lock->pid;
- if (conf_lock->type & server->vals->shared_lock_type)
- flock->fl_type = F_RDLCK;
- else
- flock->fl_type = F_WRLCK;
- } else if (!cinode->can_cache_brlcks)
- rc = 1;
- else
- flock->fl_type = F_UNLCK;
-
- up_read(&cinode->lock_sem);
- return rc;
-}
-
-static void
-cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
-{
- struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
- cifs_down_write(&cinode->lock_sem);
- list_add_tail(&lock->llist, &cfile->llist->locks);
- up_write(&cinode->lock_sem);
-}
-
-/*
- * Set the byte-range lock (mandatory style). Returns:
- * 1) 0, if we set the lock and don't need to request to the server;
- * 2) 1, if no locks prevent us but we need to request to the server;
- * 3) -EACCES, if there is a lock that prevents us and wait is false.
- */
-static int
-cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
- bool wait)
-{
- struct cifsLockInfo *conf_lock;
- struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
- bool exist;
- int rc = 0;
-
-try_again:
- exist = false;
- cifs_down_write(&cinode->lock_sem);
-
- exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
- lock->type, lock->flags, &conf_lock,
- CIFS_LOCK_OP);
- if (!exist && cinode->can_cache_brlcks) {
- list_add_tail(&lock->llist, &cfile->llist->locks);
- up_write(&cinode->lock_sem);
- return rc;
- }
-
- if (!exist)
- rc = 1;
- else if (!wait)
- rc = -EACCES;
- else {
- list_add_tail(&lock->blist, &conf_lock->blist);
- up_write(&cinode->lock_sem);
- rc = wait_event_interruptible(lock->block_q,
- (lock->blist.prev == &lock->blist) &&
- (lock->blist.next == &lock->blist));
- if (!rc)
- goto try_again;
- cifs_down_write(&cinode->lock_sem);
- list_del_init(&lock->blist);
- }
-
- up_write(&cinode->lock_sem);
- return rc;
-}
-
-/*
- * Check if there is another lock that prevents us to set the lock (posix
- * style). If such a lock exists, update the flock structure with its
- * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
- * or leave it the same if we can't. Returns 0 if we don't need to request to
- * the server or 1 otherwise.
- */
-static int
-cifs_posix_lock_test(struct file *file, struct file_lock *flock)
-{
- int rc = 0;
- struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
- unsigned char saved_type = flock->fl_type;
-
- if ((flock->fl_flags & FL_POSIX) == 0)
- return 1;
-
- down_read(&cinode->lock_sem);
- posix_test_lock(file, flock);
-
- if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
- flock->fl_type = saved_type;
- rc = 1;
- }
-
- up_read(&cinode->lock_sem);
- return rc;
-}
-
-/*
- * Set the byte-range lock (posix style). Returns:
- * 1) <0, if the error occurs while setting the lock;
- * 2) 0, if we set the lock and don't need to request to the server;
- * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
- * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
- */
-static int
-cifs_posix_lock_set(struct file *file, struct file_lock *flock)
-{
- struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
- int rc = FILE_LOCK_DEFERRED + 1;
-
- if ((flock->fl_flags & FL_POSIX) == 0)
- return rc;
-
- cifs_down_write(&cinode->lock_sem);
- if (!cinode->can_cache_brlcks) {
- up_write(&cinode->lock_sem);
- return rc;
- }
-
- rc = posix_lock_file(file, flock, NULL);
- up_write(&cinode->lock_sem);
- return rc;
-}
-
-int
-cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
-{
- unsigned int xid;
- int rc = 0, stored_rc;
- struct cifsLockInfo *li, *tmp;
- struct cifs_tcon *tcon;
- unsigned int num, max_num, max_buf;
- LOCKING_ANDX_RANGE *buf, *cur;
- static const int types[] = {
- LOCKING_ANDX_LARGE_FILES,
- LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
- };
- int i;
-
- xid = get_xid();
- tcon = tlink_tcon(cfile->tlink);
-
- /*
- * Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it before using.
- */
- max_buf = tcon->ses->server->maxBuf;
- if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
- free_xid(xid);
- return -EINVAL;
- }
-
- BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
- PAGE_SIZE);
- max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
- PAGE_SIZE);
- max_num = (max_buf - sizeof(struct smb_hdr)) /
- sizeof(LOCKING_ANDX_RANGE);
- buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
- if (!buf) {
- free_xid(xid);
- return -ENOMEM;
- }
-
- for (i = 0; i < 2; i++) {
- cur = buf;
- num = 0;
- list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
- if (li->type != types[i])
- continue;
- cur->Pid = cpu_to_le16(li->pid);
- cur->LengthLow = cpu_to_le32((u32)li->length);
- cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
- cur->OffsetLow = cpu_to_le32((u32)li->offset);
- cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
- if (++num == max_num) {
- stored_rc = cifs_lockv(xid, tcon,
- cfile->fid.netfid,
- (__u8)li->type, 0, num,
- buf);
- if (stored_rc)
- rc = stored_rc;
- cur = buf;
- num = 0;
- } else
- cur++;
- }
-
- if (num) {
- stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
- (__u8)types[i], 0, num, buf);
- if (stored_rc)
- rc = stored_rc;
- }
- }
-
- kfree(buf);
- free_xid(xid);
- return rc;
-}
-
-static __u32
-hash_lockowner(fl_owner_t owner)
-{
- return cifs_lock_secret ^ hash32_ptr((const void *)owner);
-}
-
-struct lock_to_push {
- struct list_head llist;
- __u64 offset;
- __u64 length;
- __u32 pid;
- __u16 netfid;
- __u8 type;
-};
-
-static int
-cifs_push_posix_locks(struct cifsFileInfo *cfile)
-{
- struct inode *inode = d_inode(cfile->dentry);
- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct file_lock *flock;
- struct file_lock_context *flctx = inode->i_flctx;
- unsigned int count = 0, i;
- int rc = 0, xid, type;
- struct list_head locks_to_send, *el;
- struct lock_to_push *lck, *tmp;
- __u64 length;
-
- xid = get_xid();
-
- if (!flctx)
- goto out;
-
- spin_lock(&flctx->flc_lock);
- list_for_each(el, &flctx->flc_posix) {
- count++;
- }
- spin_unlock(&flctx->flc_lock);
-
- INIT_LIST_HEAD(&locks_to_send);
-
- /*
- * Allocating count locks is enough because no FL_POSIX locks can be
- * added to the list while we are holding cinode->lock_sem that
- * protects locking operations of this inode.
- */
- for (i = 0; i < count; i++) {
- lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
- if (!lck) {
- rc = -ENOMEM;
- goto err_out;
- }
- list_add_tail(&lck->llist, &locks_to_send);
- }
-
- el = locks_to_send.next;
- spin_lock(&flctx->flc_lock);
- list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
- if (el == &locks_to_send) {
- /*
- * The list ended. We don't have enough allocated
- * structures - something is really wrong.
- */
- cifs_dbg(VFS, "Can't push all brlocks!\n");
- break;
- }
- length = 1 + flock->fl_end - flock->fl_start;
- if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
- type = CIFS_RDLCK;
- else
- type = CIFS_WRLCK;
- lck = list_entry(el, struct lock_to_push, llist);
- lck->pid = hash_lockowner(flock->fl_owner);
- lck->netfid = cfile->fid.netfid;
- lck->length = length;
- lck->type = type;
- lck->offset = flock->fl_start;
- }
- spin_unlock(&flctx->flc_lock);
-
- list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
- int stored_rc;
-
- stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
- lck->offset, lck->length, NULL,
- lck->type, 0);
- if (stored_rc)
- rc = stored_rc;
- list_del(&lck->llist);
- kfree(lck);
- }
-
-out:
- free_xid(xid);
- return rc;
-err_out:
- list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
- list_del(&lck->llist);
- kfree(lck);
- }
- goto out;
-}
-
-static int
-cifs_push_locks(struct cifsFileInfo *cfile)
-{
- struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
- struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- int rc = 0;
-
- /* we are going to update can_cache_brlcks here - need a write access */
- cifs_down_write(&cinode->lock_sem);
- if (!cinode->can_cache_brlcks) {
- up_write(&cinode->lock_sem);
- return rc;
- }
-
- if (cap_unix(tcon->ses) &&
- (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
- rc = cifs_push_posix_locks(cfile);
- else
- rc = tcon->ses->server->ops->push_mand_locks(cfile);
-
- cinode->can_cache_brlcks = false;
- up_write(&cinode->lock_sem);
- return rc;
-}
-
-static void
-cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
- bool *wait_flag, struct TCP_Server_Info *server)
-{
- if (flock->fl_flags & FL_POSIX)
- cifs_dbg(FYI, "Posix\n");
- if (flock->fl_flags & FL_FLOCK)
- cifs_dbg(FYI, "Flock\n");
- if (flock->fl_flags & FL_SLEEP) {
- cifs_dbg(FYI, "Blocking lock\n");
- *wait_flag = true;
- }
- if (flock->fl_flags & FL_ACCESS)
- cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
- if (flock->fl_flags & FL_LEASE)
- cifs_dbg(FYI, "Lease on file - not implemented yet\n");
- if (flock->fl_flags &
- (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
- FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
- cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
-
- *type = server->vals->large_lock_type;
- if (flock->fl_type == F_WRLCK) {
- cifs_dbg(FYI, "F_WRLCK\n");
- *type |= server->vals->exclusive_lock_type;
- *lock = 1;
- } else if (flock->fl_type == F_UNLCK) {
- cifs_dbg(FYI, "F_UNLCK\n");
- *type |= server->vals->unlock_lock_type;
- *unlock = 1;
- /* Check if unlock includes more than one lock range */
- } else if (flock->fl_type == F_RDLCK) {
- cifs_dbg(FYI, "F_RDLCK\n");
- *type |= server->vals->shared_lock_type;
- *lock = 1;
- } else if (flock->fl_type == F_EXLCK) {
- cifs_dbg(FYI, "F_EXLCK\n");
- *type |= server->vals->exclusive_lock_type;
- *lock = 1;
- } else if (flock->fl_type == F_SHLCK) {
- cifs_dbg(FYI, "F_SHLCK\n");
- *type |= server->vals->shared_lock_type;
- *lock = 1;
- } else
- cifs_dbg(FYI, "Unknown type of lock\n");
-}
-
-static int
-cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
- bool wait_flag, bool posix_lck, unsigned int xid)
-{
- int rc = 0;
- __u64 length = 1 + flock->fl_end - flock->fl_start;
- struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct TCP_Server_Info *server = tcon->ses->server;
- __u16 netfid = cfile->fid.netfid;
-
- if (posix_lck) {
- int posix_lock_type;
-
- rc = cifs_posix_lock_test(file, flock);
- if (!rc)
- return rc;
-
- if (type & server->vals->shared_lock_type)
- posix_lock_type = CIFS_RDLCK;
- else
- posix_lock_type = CIFS_WRLCK;
- rc = CIFSSMBPosixLock(xid, tcon, netfid,
- hash_lockowner(flock->fl_owner),
- flock->fl_start, length, flock,
- posix_lock_type, wait_flag);
- return rc;
- }
-
- rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
- if (!rc)
- return rc;
-
- /* BB we could chain these into one lock request BB */
- rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
- 1, 0, false);
- if (rc == 0) {
- rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
- type, 0, 1, false);
- flock->fl_type = F_UNLCK;
- if (rc != 0)
- cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
- rc);
- return 0;
- }
-
- if (type & server->vals->shared_lock_type) {
- flock->fl_type = F_WRLCK;
- return 0;
- }
-
- type &= ~server->vals->exclusive_lock_type;
-
- rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
- type | server->vals->shared_lock_type,
- 1, 0, false);
- if (rc == 0) {
- rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
- type | server->vals->shared_lock_type, 0, 1, false);
- flock->fl_type = F_RDLCK;
- if (rc != 0)
- cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
- rc);
- } else
- flock->fl_type = F_WRLCK;
-
- return 0;
-}
-
-void
-cifs_move_llist(struct list_head *source, struct list_head *dest)
-{
- struct list_head *li, *tmp;
- list_for_each_safe(li, tmp, source)
- list_move(li, dest);
-}
-
-void
-cifs_free_llist(struct list_head *llist)
-{
- struct cifsLockInfo *li, *tmp;
- list_for_each_entry_safe(li, tmp, llist, llist) {
- cifs_del_lock_waiters(li);
- list_del(&li->llist);
- kfree(li);
- }
-}
-
-int
-cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
- unsigned int xid)
-{
- int rc = 0, stored_rc;
- static const int types[] = {
- LOCKING_ANDX_LARGE_FILES,
- LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
- };
- unsigned int i;
- unsigned int max_num, num, max_buf;
- LOCKING_ANDX_RANGE *buf, *cur;
- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
- struct cifsLockInfo *li, *tmp;
- __u64 length = 1 + flock->fl_end - flock->fl_start;
- struct list_head tmp_llist;
-
- INIT_LIST_HEAD(&tmp_llist);
-
- /*
- * Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it before using.
- */
- max_buf = tcon->ses->server->maxBuf;
- if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
- return -EINVAL;
-
- BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
- PAGE_SIZE);
- max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
- PAGE_SIZE);
- max_num = (max_buf - sizeof(struct smb_hdr)) /
- sizeof(LOCKING_ANDX_RANGE);
- buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- cifs_down_write(&cinode->lock_sem);
- for (i = 0; i < 2; i++) {
- cur = buf;
- num = 0;
- list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
- if (flock->fl_start > li->offset ||
- (flock->fl_start + length) <
- (li->offset + li->length))
- continue;
- if (current->tgid != li->pid)
- continue;
- if (types[i] != li->type)
- continue;
- if (cinode->can_cache_brlcks) {
- /*
- * We can cache brlock requests - simply remove
- * a lock from the file's list.
- */
- list_del(&li->llist);
- cifs_del_lock_waiters(li);
- kfree(li);
- continue;
- }
- cur->Pid = cpu_to_le16(li->pid);
- cur->LengthLow = cpu_to_le32((u32)li->length);
- cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
- cur->OffsetLow = cpu_to_le32((u32)li->offset);
- cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
- /*
- * We need to save a lock here to let us add it again to
- * the file's list if the unlock range request fails on
- * the server.
- */
- list_move(&li->llist, &tmp_llist);
- if (++num == max_num) {
- stored_rc = cifs_lockv(xid, tcon,
- cfile->fid.netfid,
- li->type, num, 0, buf);
- if (stored_rc) {
- /*
- * We failed on the unlock range
- * request - add all locks from the tmp
- * list to the head of the file's list.
- */
- cifs_move_llist(&tmp_llist,
- &cfile->llist->locks);
- rc = stored_rc;
- } else
- /*
- * The unlock range request succeed -
- * free the tmp list.
- */
- cifs_free_llist(&tmp_llist);
- cur = buf;
- num = 0;
- } else
- cur++;
- }
- if (num) {
- stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
- types[i], num, 0, buf);
- if (stored_rc) {
- cifs_move_llist(&tmp_llist,
- &cfile->llist->locks);
- rc = stored_rc;
- } else
- cifs_free_llist(&tmp_llist);
- }
- }
-
- up_write(&cinode->lock_sem);
- kfree(buf);
- return rc;
-}
-
-static int
-cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
- bool wait_flag, bool posix_lck, int lock, int unlock,
- unsigned int xid)
-{
- int rc = 0;
- __u64 length = 1 + flock->fl_end - flock->fl_start;
- struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct TCP_Server_Info *server = tcon->ses->server;
- struct inode *inode = d_inode(cfile->dentry);
-
- if (posix_lck) {
- int posix_lock_type;
-
- rc = cifs_posix_lock_set(file, flock);
- if (rc <= FILE_LOCK_DEFERRED)
- return rc;
-
- if (type & server->vals->shared_lock_type)
- posix_lock_type = CIFS_RDLCK;
- else
- posix_lock_type = CIFS_WRLCK;
-
- if (unlock == 1)
- posix_lock_type = CIFS_UNLCK;
-
- rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
- hash_lockowner(flock->fl_owner),
- flock->fl_start, length,
- NULL, posix_lock_type, wait_flag);
- goto out;
- }
-
- if (lock) {
- struct cifsLockInfo *lock;
-
- lock = cifs_lock_init(flock->fl_start, length, type,
- flock->fl_flags);
- if (!lock)
- return -ENOMEM;
-
- rc = cifs_lock_add_if(cfile, lock, wait_flag);
- if (rc < 0) {
- kfree(lock);
- return rc;
- }
- if (!rc)
- goto out;
-
- /*
- * Windows 7 server can delay breaking lease from read to None
- * if we set a byte-range lock on a file - break it explicitly
- * before sending the lock to the server to be sure the next
- * read won't conflict with non-overlapted locks due to
- * pagereading.
- */
- if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
- CIFS_CACHE_READ(CIFS_I(inode))) {
- cifs_zap_mapping(inode);
- cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
- inode);
- CIFS_I(inode)->oplock = 0;
- }
-
- rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
- type, 1, 0, wait_flag);
- if (rc) {
- kfree(lock);
- return rc;
- }
-
- cifs_lock_add(cfile, lock);
- } else if (unlock)
- rc = server->ops->mand_unlock_range(cfile, flock, xid);
-
-out:
- if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
- /*
- * If this is a request to remove all locks because we
- * are closing the file, it doesn't matter if the
- * unlocking failed as both cifs.ko and the SMB server
- * remove the lock on file close
- */
- if (rc) {
- cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
- if (!(flock->fl_flags & FL_CLOSE))
- return rc;
- }
- rc = locks_lock_file_wait(file, flock);
- }
- return rc;
-}
-
-int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
-{
- int rc, xid;
- int lock = 0, unlock = 0;
- bool wait_flag = false;
- bool posix_lck = false;
- struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *tcon;
- struct cifsFileInfo *cfile;
- __u32 type;
-
- rc = -EACCES;
- xid = get_xid();
-
- if (!(fl->fl_flags & FL_FLOCK))
- return -ENOLCK;
-
- cfile = (struct cifsFileInfo *)file->private_data;
- tcon = tlink_tcon(cfile->tlink);
-
- cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
- tcon->ses->server);
- cifs_sb = CIFS_FILE_SB(file);
-
- if (cap_unix(tcon->ses) &&
- (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
- posix_lck = true;
-
- if (!lock && !unlock) {
- /*
- * if no lock or unlock then nothing to do since we do not
- * know what it is
- */
- free_xid(xid);
- return -EOPNOTSUPP;
- }
-
- rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
- xid);
- free_xid(xid);
- return rc;
-
-
-}
-
-int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
-{
- int rc, xid;
- int lock = 0, unlock = 0;
- bool wait_flag = false;
- bool posix_lck = false;
- struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *tcon;
- struct cifsFileInfo *cfile;
- __u32 type;
-
- rc = -EACCES;
- xid = get_xid();
-
- cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
- cmd, flock->fl_flags, flock->fl_type,
- flock->fl_start, flock->fl_end);
-
- cfile = (struct cifsFileInfo *)file->private_data;
- tcon = tlink_tcon(cfile->tlink);
-
- cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
- tcon->ses->server);
- cifs_sb = CIFS_FILE_SB(file);
- set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
-
- if (cap_unix(tcon->ses) &&
- (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
- posix_lck = true;
- /*
- * BB add code here to normalize offset and length to account for
- * negative length which we can not accept over the wire.
- */
- if (IS_GETLK(cmd)) {
- rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
- free_xid(xid);
- return rc;
- }
-
- if (!lock && !unlock) {
- /*
- * if no lock or unlock then nothing to do since we do not
- * know what it is
- */
- free_xid(xid);
- return -EOPNOTSUPP;
- }
-
- rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
- xid);
- free_xid(xid);
- return rc;
-}
-
-/*
- * update the file size (if needed) after a write. Should be called with
- * the inode->i_lock held
- */
-void
-cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
- unsigned int bytes_written)
-{
- loff_t end_of_write = offset + bytes_written;
-
- if (end_of_write > cifsi->server_eof)
- cifsi->server_eof = end_of_write;
-}
-
-static ssize_t
-cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
- size_t write_size, loff_t *offset)
-{
- int rc = 0;
- unsigned int bytes_written = 0;
- unsigned int total_written;
- struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
- unsigned int xid;
- struct dentry *dentry = open_file->dentry;
- struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
- struct cifs_io_parms io_parms = {0};
-
- cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
- write_size, *offset, dentry);
-
- tcon = tlink_tcon(open_file->tlink);
- server = tcon->ses->server;
-
- if (!server->ops->sync_write)
- return -ENOSYS;
-
- xid = get_xid();
-
- for (total_written = 0; write_size > total_written;
- total_written += bytes_written) {
- rc = -EAGAIN;
- while (rc == -EAGAIN) {
- struct kvec iov[2];
- unsigned int len;
-
- if (open_file->invalidHandle) {
- /* we could deadlock if we called
- filemap_fdatawait from here so tell
- reopen_file not to flush data to
- server now */
- rc = cifs_reopen_file(open_file, false);
- if (rc != 0)
- break;
- }
-
- len = min(server->ops->wp_retry_size(d_inode(dentry)),
- (unsigned int)write_size - total_written);
- /* iov[0] is reserved for smb header */
- iov[1].iov_base = (char *)write_data + total_written;
- iov[1].iov_len = len;
- io_parms.pid = pid;
- io_parms.tcon = tcon;
- io_parms.offset = *offset;
- io_parms.length = len;
- rc = server->ops->sync_write(xid, &open_file->fid,
- &io_parms, &bytes_written, iov, 1);
- }
- if (rc || (bytes_written == 0)) {
- if (total_written)
- break;
- else {
- free_xid(xid);
- return rc;
- }
- } else {
- spin_lock(&d_inode(dentry)->i_lock);
- cifs_update_eof(cifsi, *offset, bytes_written);
- spin_unlock(&d_inode(dentry)->i_lock);
- *offset += bytes_written;
- }
- }
-
- cifs_stats_bytes_written(tcon, total_written);
-
- if (total_written > 0) {
- spin_lock(&d_inode(dentry)->i_lock);
- if (*offset > d_inode(dentry)->i_size) {
- i_size_write(d_inode(dentry), *offset);
- d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9;
- }
- spin_unlock(&d_inode(dentry)->i_lock);
- }
- mark_inode_dirty_sync(d_inode(dentry));
- free_xid(xid);
- return total_written;
-}
-
-struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
- bool fsuid_only)
-{
- struct cifsFileInfo *open_file = NULL;
- struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
-
- /* only filter by fsuid on multiuser mounts */
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
- fsuid_only = false;
-
- spin_lock(&cifs_inode->open_file_lock);
- /* we could simply get the first_list_entry since write-only entries
- are always at the end of the list but since the first entry might
- have a close pending, we go through the whole list */
- list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
- if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
- continue;
- if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
- if ((!open_file->invalidHandle)) {
- /* found a good file */
- /* lock it so it will not be closed on us */
- cifsFileInfo_get(open_file);
- spin_unlock(&cifs_inode->open_file_lock);
- return open_file;
- } /* else might as well continue, and look for
- another, or simply have the caller reopen it
- again rather than trying to fix this handle */
- } else /* write only file */
- break; /* write only files are last so must be done */
- }
- spin_unlock(&cifs_inode->open_file_lock);
- return NULL;
-}
-
-/* Return -EBADF if no handle is found and general rc otherwise */
-int
-cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
- struct cifsFileInfo **ret_file)
-{
- struct cifsFileInfo *open_file, *inv_file = NULL;
- struct cifs_sb_info *cifs_sb;
- bool any_available = false;
- int rc = -EBADF;
- unsigned int refind = 0;
- bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
- bool with_delete = flags & FIND_WR_WITH_DELETE;
- *ret_file = NULL;
-
- /*
- * Having a null inode here (because mapping->host was set to zero by
- * the VFS or MM) should not happen but we had reports of on oops (due
- * to it being zero) during stress testcases so we need to check for it
- */
-
- if (cifs_inode == NULL) {
- cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
- dump_stack();
- return rc;
- }
-
- cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
-
- /* only filter by fsuid on multiuser mounts */
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
- fsuid_only = false;
-
- spin_lock(&cifs_inode->open_file_lock);
-refind_writable:
- if (refind > MAX_REOPEN_ATT) {
- spin_unlock(&cifs_inode->open_file_lock);
- return rc;
- }
- list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
- if (!any_available && open_file->pid != current->tgid)
- continue;
- if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
- continue;
- if (with_delete && !(open_file->fid.access & DELETE))
- continue;
- if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
- if (!open_file->invalidHandle) {
- /* found a good writable file */
- cifsFileInfo_get(open_file);
- spin_unlock(&cifs_inode->open_file_lock);
- *ret_file = open_file;
- return 0;
- } else {
- if (!inv_file)
- inv_file = open_file;
- }
- }
- }
- /* couldn't find useable FH with same pid, try any available */
- if (!any_available) {
- any_available = true;
- goto refind_writable;
- }
-
- if (inv_file) {
- any_available = false;
- cifsFileInfo_get(inv_file);
- }
-
- spin_unlock(&cifs_inode->open_file_lock);
-
- if (inv_file) {
- rc = cifs_reopen_file(inv_file, false);
- if (!rc) {
- *ret_file = inv_file;
- return 0;
- }
-
- spin_lock(&cifs_inode->open_file_lock);
- list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
- spin_unlock(&cifs_inode->open_file_lock);
- cifsFileInfo_put(inv_file);
- ++refind;
- inv_file = NULL;
- spin_lock(&cifs_inode->open_file_lock);
- goto refind_writable;
- }
-
- return rc;
-}
-
-struct cifsFileInfo *
-find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
-{
- struct cifsFileInfo *cfile;
- int rc;
-
- rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
- if (rc)
- cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
-
- return cfile;
-}
-
-int
-cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
- int flags,
- struct cifsFileInfo **ret_file)
-{
- struct cifsFileInfo *cfile;
- void *page = alloc_dentry_path();
-
- *ret_file = NULL;
-
- spin_lock(&tcon->open_file_lock);
- list_for_each_entry(cfile, &tcon->openFileList, tlist) {
- struct cifsInodeInfo *cinode;
- const char *full_path = build_path_from_dentry(cfile->dentry, page);
- if (IS_ERR(full_path)) {
- spin_unlock(&tcon->open_file_lock);
- free_dentry_path(page);
- return PTR_ERR(full_path);
- }
- if (strcmp(full_path, name))
- continue;
-
- cinode = CIFS_I(d_inode(cfile->dentry));
- spin_unlock(&tcon->open_file_lock);
- free_dentry_path(page);
- return cifs_get_writable_file(cinode, flags, ret_file);
- }
-
- spin_unlock(&tcon->open_file_lock);
- free_dentry_path(page);
- return -ENOENT;
-}
-
-int
-cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
- struct cifsFileInfo **ret_file)
-{
- struct cifsFileInfo *cfile;
- void *page = alloc_dentry_path();
-
- *ret_file = NULL;
-
- spin_lock(&tcon->open_file_lock);
- list_for_each_entry(cfile, &tcon->openFileList, tlist) {
- struct cifsInodeInfo *cinode;
- const char *full_path = build_path_from_dentry(cfile->dentry, page);
- if (IS_ERR(full_path)) {
- spin_unlock(&tcon->open_file_lock);
- free_dentry_path(page);
- return PTR_ERR(full_path);
- }
- if (strcmp(full_path, name))
- continue;
-
- cinode = CIFS_I(d_inode(cfile->dentry));
- spin_unlock(&tcon->open_file_lock);
- free_dentry_path(page);
- *ret_file = find_readable_file(cinode, 0);
- return *ret_file ? 0 : -ENOENT;
- }
-
- spin_unlock(&tcon->open_file_lock);
- free_dentry_path(page);
- return -ENOENT;
-}
-
-static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
-{
- struct address_space *mapping = page->mapping;
- loff_t offset = (loff_t)page->index << PAGE_SHIFT;
- char *write_data;
- int rc = -EFAULT;
- int bytes_written = 0;
- struct inode *inode;
- struct cifsFileInfo *open_file;
-
- if (!mapping || !mapping->host)
- return -EFAULT;
-
- inode = page->mapping->host;
-
- offset += (loff_t)from;
- write_data = kmap(page);
- write_data += from;
-
- if ((to > PAGE_SIZE) || (from > to)) {
- kunmap(page);
- return -EIO;
- }
-
- /* racing with truncate? */
- if (offset > mapping->host->i_size) {
- kunmap(page);
- return 0; /* don't care */
- }
-
- /* check to make sure that we are not extending the file */
- if (mapping->host->i_size - offset < (loff_t)to)
- to = (unsigned)(mapping->host->i_size - offset);
-
- rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
- &open_file);
- if (!rc) {
- bytes_written = cifs_write(open_file, open_file->pid,
- write_data, to - from, &offset);
- cifsFileInfo_put(open_file);
- /* Does mm or vfs already set times? */
- inode->i_atime = inode->i_mtime = current_time(inode);
- if ((bytes_written > 0) && (offset))
- rc = 0;
- else if (bytes_written < 0)
- rc = bytes_written;
- else
- rc = -EFAULT;
- } else {
- cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
- if (!is_retryable_error(rc))
- rc = -EIO;
- }
-
- kunmap(page);
- return rc;
-}
-
-static struct cifs_writedata *
-wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
- pgoff_t end, pgoff_t *index,
- unsigned int *found_pages)
-{
- struct cifs_writedata *wdata;
-
- wdata = cifs_writedata_alloc((unsigned int)tofind,
- cifs_writev_complete);
- if (!wdata)
- return NULL;
-
- *found_pages = find_get_pages_range_tag(mapping, index, end,
- PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
- return wdata;
-}
-
-static unsigned int
-wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
- struct address_space *mapping,
- struct writeback_control *wbc,
- pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
-{
- unsigned int nr_pages = 0, i;
- struct page *page;
-
- for (i = 0; i < found_pages; i++) {
- page = wdata->pages[i];
- /*
- * At this point we hold neither the i_pages lock nor the
- * page lock: the page may be truncated or invalidated
- * (changing page->mapping to NULL), or even swizzled
- * back from swapper_space to tmpfs file mapping
- */
-
- if (nr_pages == 0)
- lock_page(page);
- else if (!trylock_page(page))
- break;
-
- if (unlikely(page->mapping != mapping)) {
- unlock_page(page);
- break;
- }
-
- if (!wbc->range_cyclic && page->index > end) {
- *done = true;
- unlock_page(page);
- break;
- }
-
- if (*next && (page->index != *next)) {
- /* Not next consecutive page */
- unlock_page(page);
- break;
- }
-
- if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
-
- if (PageWriteback(page) ||
- !clear_page_dirty_for_io(page)) {
- unlock_page(page);
- break;
- }
-
- /*
- * This actually clears the dirty bit in the radix tree.
- * See cifs_writepage() for more commentary.
- */
- set_page_writeback(page);
- if (page_offset(page) >= i_size_read(mapping->host)) {
- *done = true;
- unlock_page(page);
- end_page_writeback(page);
- break;
- }
-
- wdata->pages[i] = page;
- *next = page->index + 1;
- ++nr_pages;
- }
-
- /* reset index to refind any pages skipped */
- if (nr_pages == 0)
- *index = wdata->pages[0]->index + 1;
-
- /* put any pages we aren't going to use */
- for (i = nr_pages; i < found_pages; i++) {
- put_page(wdata->pages[i]);
- wdata->pages[i] = NULL;
- }
-
- return nr_pages;
-}
-
-static int
-wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
- struct address_space *mapping, struct writeback_control *wbc)
-{
- int rc;
-
- wdata->sync_mode = wbc->sync_mode;
- wdata->nr_pages = nr_pages;
- wdata->offset = page_offset(wdata->pages[0]);
- wdata->pagesz = PAGE_SIZE;
- wdata->tailsz = min(i_size_read(mapping->host) -
- page_offset(wdata->pages[nr_pages - 1]),
- (loff_t)PAGE_SIZE);
- wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
- wdata->pid = wdata->cfile->pid;
-
- rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
- if (rc)
- return rc;
-
- if (wdata->cfile->invalidHandle)
- rc = -EAGAIN;
- else
- rc = wdata->server->ops->async_writev(wdata,
- cifs_writedata_release);
-
- return rc;
-}
-
-static int cifs_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
-{
- struct inode *inode = mapping->host;
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct TCP_Server_Info *server;
- bool done = false, scanned = false, range_whole = false;
- pgoff_t end, index;
- struct cifs_writedata *wdata;
- struct cifsFileInfo *cfile = NULL;
- int rc = 0;
- int saved_rc = 0;
- unsigned int xid;
-
- /*
- * If wsize is smaller than the page cache size, default to writing
- * one page at a time via cifs_writepage
- */
- if (cifs_sb->ctx->wsize < PAGE_SIZE)
- return generic_writepages(mapping, wbc);
-
- xid = get_xid();
- if (wbc->range_cyclic) {
- index = mapping->writeback_index; /* Start from prev offset */
- end = -1;
- } else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = true;
- scanned = true;
- }
- server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
-
-retry:
- while (!done && index <= end) {
- unsigned int i, nr_pages, found_pages, wsize;
- pgoff_t next = 0, tofind, saved_index = index;
- struct cifs_credits credits_on_stack;
- struct cifs_credits *credits = &credits_on_stack;
- int get_file_rc = 0;
-
- if (cfile)
- cifsFileInfo_put(cfile);
-
- rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
-
- /* in case of an error store it to return later */
- if (rc)
- get_file_rc = rc;
-
- rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
- &wsize, credits);
- if (rc != 0) {
- done = true;
- break;
- }
-
- tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
-
- wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
- &found_pages);
- if (!wdata) {
- rc = -ENOMEM;
- done = true;
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- if (found_pages == 0) {
- kref_put(&wdata->refcount, cifs_writedata_release);
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
- end, &index, &next, &done);
-
- /* nothing to write? */
- if (nr_pages == 0) {
- kref_put(&wdata->refcount, cifs_writedata_release);
- add_credits_and_wake_if(server, credits, 0);
- continue;
- }
-
- wdata->credits = credits_on_stack;
- wdata->cfile = cfile;
- wdata->server = server;
- cfile = NULL;
-
- if (!wdata->cfile) {
- cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
- get_file_rc);
- if (is_retryable_error(get_file_rc))
- rc = get_file_rc;
- else
- rc = -EBADF;
- } else
- rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
-
- for (i = 0; i < nr_pages; ++i)
- unlock_page(wdata->pages[i]);
-
- /* send failure -- clean up the mess */
- if (rc != 0) {
- add_credits_and_wake_if(server, &wdata->credits, 0);
- for (i = 0; i < nr_pages; ++i) {
- if (is_retryable_error(rc))
- redirty_page_for_writepage(wbc,
- wdata->pages[i]);
- else
- SetPageError(wdata->pages[i]);
- end_page_writeback(wdata->pages[i]);
- put_page(wdata->pages[i]);
- }
- if (!is_retryable_error(rc))
- mapping_set_error(mapping, rc);
- }
- kref_put(&wdata->refcount, cifs_writedata_release);
-
- if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
- index = saved_index;
- continue;
- }
-
- /* Return immediately if we received a signal during writing */
- if (is_interrupt_error(rc)) {
- done = true;
- break;
- }
-
- if (rc != 0 && saved_rc == 0)
- saved_rc = rc;
-
- wbc->nr_to_write -= nr_pages;
- if (wbc->nr_to_write <= 0)
- done = true;
-
- index = next;
- }
-
- if (!scanned && !done) {
- /*
- * We hit the last page and there is more work to be done: wrap
- * back to the start of the file
- */
- scanned = true;
- index = 0;
- goto retry;
- }
-
- if (saved_rc != 0)
- rc = saved_rc;
-
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = index;
-
- if (cfile)
- cifsFileInfo_put(cfile);
- free_xid(xid);
- /* Indication to update ctime and mtime as close is deferred */
- set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
- return rc;
-}
-
-static int
-cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
-{
- int rc;
- unsigned int xid;
-
- xid = get_xid();
-/* BB add check for wbc flags */
- get_page(page);
- if (!PageUptodate(page))
- cifs_dbg(FYI, "ppw - page not up to date\n");
-
- /*
- * Set the "writeback" flag, and clear "dirty" in the radix tree.
- *
- * A writepage() implementation always needs to do either this,
- * or re-dirty the page with "redirty_page_for_writepage()" in
- * the case of a failure.
- *
- * Just unlocking the page will cause the radix tree tag-bits
- * to fail to update with the state of the page correctly.
- */
- set_page_writeback(page);
-retry_write:
- rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
- if (is_retryable_error(rc)) {
- if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
- goto retry_write;
- redirty_page_for_writepage(wbc, page);
- } else if (rc != 0) {
- SetPageError(page);
- mapping_set_error(page->mapping, rc);
- } else {
- SetPageUptodate(page);
- }
- end_page_writeback(page);
- put_page(page);
- free_xid(xid);
- return rc;
-}
-
-static int cifs_writepage(struct page *page, struct writeback_control *wbc)
-{
- int rc = cifs_writepage_locked(page, wbc);
- unlock_page(page);
- return rc;
-}
-
-static int cifs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- int rc;
- struct inode *inode = mapping->host;
- struct cifsFileInfo *cfile = file->private_data;
- struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
- __u32 pid;
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
- pid = cfile->pid;
- else
- pid = current->tgid;
-
- cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
- page, pos, copied);
-
- if (PageChecked(page)) {
- if (copied == len)
- SetPageUptodate(page);
- ClearPageChecked(page);
- } else if (!PageUptodate(page) && copied == PAGE_SIZE)
- SetPageUptodate(page);
-
- if (!PageUptodate(page)) {
- char *page_data;
- unsigned offset = pos & (PAGE_SIZE - 1);
- unsigned int xid;
-
- xid = get_xid();
- /* this is probably better than directly calling
- partialpage_write since in this function the file handle is
- known which we might as well leverage */
- /* BB check if anything else missing out of ppw
- such as updating last write time */
- page_data = kmap(page);
- rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
- /* if (rc < 0) should we set writebehind rc? */
- kunmap(page);
-
- free_xid(xid);
- } else {
- rc = copied;
- pos += copied;
- set_page_dirty(page);
- }
-
- if (rc > 0) {
- spin_lock(&inode->i_lock);
- if (pos > inode->i_size) {
- i_size_write(inode, pos);
- inode->i_blocks = (512 - 1 + pos) >> 9;
- }
- spin_unlock(&inode->i_lock);
- }
-
- unlock_page(page);
- put_page(page);
- /* Indication to update ctime and mtime as close is deferred */
- set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
-
- return rc;
-}
-
-int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
- int datasync)
-{
- unsigned int xid;
- int rc = 0;
- struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
- struct cifsFileInfo *smbfile = file->private_data;
- struct inode *inode = file_inode(file);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
-
- rc = file_write_and_wait_range(file, start, end);
- if (rc) {
- trace_cifs_fsync_err(inode->i_ino, rc);
- return rc;
- }
-
- xid = get_xid();
-
- cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
- file, datasync);
-
- if (!CIFS_CACHE_READ(CIFS_I(inode))) {
- rc = cifs_zap_mapping(inode);
- if (rc) {
- cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
- rc = 0; /* don't care about it in fsync */
- }
- }
-
- tcon = tlink_tcon(smbfile->tlink);
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
- server = tcon->ses->server;
- if (server->ops->flush)
- rc = server->ops->flush(xid, tcon, &smbfile->fid);
- else
- rc = -ENOSYS;
- }
-
- free_xid(xid);
- return rc;
-}
-
-int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
-{
- unsigned int xid;
- int rc = 0;
- struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
- struct cifsFileInfo *smbfile = file->private_data;
- struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
-
- rc = file_write_and_wait_range(file, start, end);
- if (rc) {
- trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
- return rc;
- }
-
- xid = get_xid();
-
- cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
- file, datasync);
-
- tcon = tlink_tcon(smbfile->tlink);
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
- server = tcon->ses->server;
- if (server->ops->flush)
- rc = server->ops->flush(xid, tcon, &smbfile->fid);
- else
- rc = -ENOSYS;
- }
-
- free_xid(xid);
- return rc;
-}
-
-/*
- * As file closes, flush all cached write data for this inode checking
- * for write behind errors.
- */
-int cifs_flush(struct file *file, fl_owner_t id)
-{
- struct inode *inode = file_inode(file);
- int rc = 0;
-
- if (file->f_mode & FMODE_WRITE)
- rc = filemap_write_and_wait(inode->i_mapping);
-
- cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
- if (rc)
- trace_cifs_flush_err(inode->i_ino, rc);
- return rc;
-}
-
-static int
-cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
-{
- int rc = 0;
- unsigned long i;
-
- for (i = 0; i < num_pages; i++) {
- pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
- if (!pages[i]) {
- /*
- * save number of pages we have already allocated and
- * return with ENOMEM error
- */
- num_pages = i;
- rc = -ENOMEM;
- break;
- }
- }
-
- if (rc) {
- for (i = 0; i < num_pages; i++)
- put_page(pages[i]);
- }
- return rc;
-}
-
-static inline
-size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
-{
- size_t num_pages;
- size_t clen;
-
- clen = min_t(const size_t, len, wsize);
- num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
-
- if (cur_len)
- *cur_len = clen;
-
- return num_pages;
-}
-
-static void
-cifs_uncached_writedata_release(struct kref *refcount)
-{
- int i;
- struct cifs_writedata *wdata = container_of(refcount,
- struct cifs_writedata, refcount);
-
- kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
- for (i = 0; i < wdata->nr_pages; i++)
- put_page(wdata->pages[i]);
- cifs_writedata_release(refcount);
-}
-
-static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
-
-static void
-cifs_uncached_writev_complete(struct work_struct *work)
-{
- struct cifs_writedata *wdata = container_of(work,
- struct cifs_writedata, work);
- struct inode *inode = d_inode(wdata->cfile->dentry);
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
-
- spin_lock(&inode->i_lock);
- cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
- if (cifsi->server_eof > inode->i_size)
- i_size_write(inode, cifsi->server_eof);
- spin_unlock(&inode->i_lock);
-
- complete(&wdata->done);
- collect_uncached_write_data(wdata->ctx);
- /* the below call can possibly free the last ref to aio ctx */
- kref_put(&wdata->refcount, cifs_uncached_writedata_release);
-}
-
-static int
-wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
- size_t *len, unsigned long *num_pages)
-{
- size_t save_len, copied, bytes, cur_len = *len;
- unsigned long i, nr_pages = *num_pages;
-
- save_len = cur_len;
- for (i = 0; i < nr_pages; i++) {
- bytes = min_t(const size_t, cur_len, PAGE_SIZE);
- copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
- cur_len -= copied;
- /*
- * If we didn't copy as much as we expected, then that
- * may mean we trod into an unmapped area. Stop copying
- * at that point. On the next pass through the big
- * loop, we'll likely end up getting a zero-length
- * write and bailing out of it.
- */
- if (copied < bytes)
- break;
- }
- cur_len = save_len - cur_len;
- *len = cur_len;
-
- /*
- * If we have no data to send, then that probably means that
- * the copy above failed altogether. That's most likely because
- * the address in the iovec was bogus. Return -EFAULT and let
- * the caller free anything we allocated and bail out.
- */
- if (!cur_len)
- return -EFAULT;
-
- /*
- * i + 1 now represents the number of pages we actually used in
- * the copy phase above.
- */
- *num_pages = i + 1;
- return 0;
-}
-
-static int
-cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
- struct cifs_aio_ctx *ctx)
-{
- unsigned int wsize;
- struct cifs_credits credits;
- int rc;
- struct TCP_Server_Info *server = wdata->server;
-
- do {
- if (wdata->cfile->invalidHandle) {
- rc = cifs_reopen_file(wdata->cfile, false);
- if (rc == -EAGAIN)
- continue;
- else if (rc)
- break;
- }
-
-
- /*
- * Wait for credits to resend this wdata.
- * Note: we are attempting to resend the whole wdata not in
- * segments
- */
- do {
- rc = server->ops->wait_mtu_credits(server, wdata->bytes,
- &wsize, &credits);
- if (rc)
- goto fail;
-
- if (wsize < wdata->bytes) {
- add_credits_and_wake_if(server, &credits, 0);
- msleep(1000);
- }
- } while (wsize < wdata->bytes);
- wdata->credits = credits;
-
- rc = adjust_credits(server, &wdata->credits, wdata->bytes);
-
- if (!rc) {
- if (wdata->cfile->invalidHandle)
- rc = -EAGAIN;
- else {
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (wdata->mr) {
- wdata->mr->need_invalidate = true;
- smbd_deregister_mr(wdata->mr);
- wdata->mr = NULL;
- }
-#endif
- rc = server->ops->async_writev(wdata,
- cifs_uncached_writedata_release);
- }
- }
-
- /* If the write was successfully sent, we are done */
- if (!rc) {
- list_add_tail(&wdata->list, wdata_list);
- return 0;
- }
-
- /* Roll back credits and retry if needed */
- add_credits_and_wake_if(server, &wdata->credits, 0);
- } while (rc == -EAGAIN);
-
-fail:
- kref_put(&wdata->refcount, cifs_uncached_writedata_release);
- return rc;
-}
-
-static int
-cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
- struct cifsFileInfo *open_file,
- struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
- struct cifs_aio_ctx *ctx)
-{
- int rc = 0;
- size_t cur_len;
- unsigned long nr_pages, num_pages, i;
- struct cifs_writedata *wdata;
- struct iov_iter saved_from = *from;
- loff_t saved_offset = offset;
- pid_t pid;
- struct TCP_Server_Info *server;
- struct page **pagevec;
- size_t start;
- unsigned int xid;
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
- pid = open_file->pid;
- else
- pid = current->tgid;
-
- server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
- xid = get_xid();
-
- do {
- unsigned int wsize;
- struct cifs_credits credits_on_stack;
- struct cifs_credits *credits = &credits_on_stack;
-
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, false);
- if (rc == -EAGAIN)
- continue;
- else if (rc)
- break;
- }
-
- rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
- &wsize, credits);
- if (rc)
- break;
-
- cur_len = min_t(const size_t, len, wsize);
-
- if (ctx->direct_io) {
- ssize_t result;
-
- result = iov_iter_get_pages_alloc(
- from, &pagevec, cur_len, &start);
- if (result < 0) {
- cifs_dbg(VFS,
- "direct_writev couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
- result, iov_iter_type(from),
- from->iov_offset, from->count);
- dump_stack();
-
- rc = result;
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
- cur_len = (size_t)result;
- iov_iter_advance(from, cur_len);
-
- nr_pages =
- (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
-
- wdata = cifs_writedata_direct_alloc(pagevec,
- cifs_uncached_writev_complete);
- if (!wdata) {
- rc = -ENOMEM;
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
-
- wdata->page_offset = start;
- wdata->tailsz =
- nr_pages > 1 ?
- cur_len - (PAGE_SIZE - start) -
- (nr_pages - 2) * PAGE_SIZE :
- cur_len;
- } else {
- nr_pages = get_numpages(wsize, len, &cur_len);
- wdata = cifs_writedata_alloc(nr_pages,
- cifs_uncached_writev_complete);
- if (!wdata) {
- rc = -ENOMEM;
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
- if (rc) {
- kvfree(wdata->pages);
- kfree(wdata);
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- num_pages = nr_pages;
- rc = wdata_fill_from_iovec(
- wdata, from, &cur_len, &num_pages);
- if (rc) {
- for (i = 0; i < nr_pages; i++)
- put_page(wdata->pages[i]);
- kvfree(wdata->pages);
- kfree(wdata);
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- /*
- * Bring nr_pages down to the number of pages we
- * actually used, and free any pages that we didn't use.
- */
- for ( ; nr_pages > num_pages; nr_pages--)
- put_page(wdata->pages[nr_pages - 1]);
-
- wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
- }
-
- wdata->sync_mode = WB_SYNC_ALL;
- wdata->nr_pages = nr_pages;
- wdata->offset = (__u64)offset;
- wdata->cfile = cifsFileInfo_get(open_file);
- wdata->server = server;
- wdata->pid = pid;
- wdata->bytes = cur_len;
- wdata->pagesz = PAGE_SIZE;
- wdata->credits = credits_on_stack;
- wdata->ctx = ctx;
- kref_get(&ctx->refcount);
-
- rc = adjust_credits(server, &wdata->credits, wdata->bytes);
-
- if (!rc) {
- if (wdata->cfile->invalidHandle)
- rc = -EAGAIN;
- else
- rc = server->ops->async_writev(wdata,
- cifs_uncached_writedata_release);
- }
-
- if (rc) {
- add_credits_and_wake_if(server, &wdata->credits, 0);
- kref_put(&wdata->refcount,
- cifs_uncached_writedata_release);
- if (rc == -EAGAIN) {
- *from = saved_from;
- iov_iter_advance(from, offset - saved_offset);
- continue;
- }
- break;
- }
-
- list_add_tail(&wdata->list, wdata_list);
- offset += cur_len;
- len -= cur_len;
- } while (len > 0);
-
- free_xid(xid);
- return rc;
-}
-
-static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
-{
- struct cifs_writedata *wdata, *tmp;
- struct cifs_tcon *tcon;
- struct cifs_sb_info *cifs_sb;
- struct dentry *dentry = ctx->cfile->dentry;
- ssize_t rc;
-
- tcon = tlink_tcon(ctx->cfile->tlink);
- cifs_sb = CIFS_SB(dentry->d_sb);
-
- mutex_lock(&ctx->aio_mutex);
-
- if (list_empty(&ctx->list)) {
- mutex_unlock(&ctx->aio_mutex);
- return;
- }
-
- rc = ctx->rc;
- /*
- * Wait for and collect replies for any successful sends in order of
- * increasing offset. Once an error is hit, then return without waiting
- * for any more replies.
- */
-restart_loop:
- list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
- if (!rc) {
- if (!try_wait_for_completion(&wdata->done)) {
- mutex_unlock(&ctx->aio_mutex);
- return;
- }
-
- if (wdata->result)
- rc = wdata->result;
- else
- ctx->total_len += wdata->bytes;
-
- /* resend call if it's a retryable error */
- if (rc == -EAGAIN) {
- struct list_head tmp_list;
- struct iov_iter tmp_from = ctx->iter;
-
- INIT_LIST_HEAD(&tmp_list);
- list_del_init(&wdata->list);
-
- if (ctx->direct_io)
- rc = cifs_resend_wdata(
- wdata, &tmp_list, ctx);
- else {
- iov_iter_advance(&tmp_from,
- wdata->offset - ctx->pos);
-
- rc = cifs_write_from_iter(wdata->offset,
- wdata->bytes, &tmp_from,
- ctx->cfile, cifs_sb, &tmp_list,
- ctx);
-
- kref_put(&wdata->refcount,
- cifs_uncached_writedata_release);
- }
-
- list_splice(&tmp_list, &ctx->list);
- goto restart_loop;
- }
- }
- list_del_init(&wdata->list);
- kref_put(&wdata->refcount, cifs_uncached_writedata_release);
- }
-
- cifs_stats_bytes_written(tcon, ctx->total_len);
- set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
-
- ctx->rc = (rc == 0) ? ctx->total_len : rc;
-
- mutex_unlock(&ctx->aio_mutex);
-
- if (ctx->iocb && ctx->iocb->ki_complete)
- ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
- else
- complete(&ctx->done);
-}
-
-static ssize_t __cifs_writev(
- struct kiocb *iocb, struct iov_iter *from, bool direct)
-{
- struct file *file = iocb->ki_filp;
- ssize_t total_written = 0;
- struct cifsFileInfo *cfile;
- struct cifs_tcon *tcon;
- struct cifs_sb_info *cifs_sb;
- struct cifs_aio_ctx *ctx;
- struct iov_iter saved_from = *from;
- size_t len = iov_iter_count(from);
- int rc;
-
- /*
- * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
- * In this case, fall back to non-direct write function.
- * this could be improved by getting pages directly in ITER_KVEC
- */
- if (direct && iov_iter_is_kvec(from)) {
- cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
- direct = false;
- }
-
- rc = generic_write_checks(iocb, from);
- if (rc <= 0)
- return rc;
-
- cifs_sb = CIFS_FILE_SB(file);
- cfile = file->private_data;
- tcon = tlink_tcon(cfile->tlink);
-
- if (!tcon->ses->server->ops->async_writev)
- return -ENOSYS;
-
- ctx = cifs_aio_ctx_alloc();
- if (!ctx)
- return -ENOMEM;
-
- ctx->cfile = cifsFileInfo_get(cfile);
-
- if (!is_sync_kiocb(iocb))
- ctx->iocb = iocb;
-
- ctx->pos = iocb->ki_pos;
-
- if (direct) {
- ctx->direct_io = true;
- ctx->iter = *from;
- ctx->len = len;
- } else {
- rc = setup_aio_ctx_iter(ctx, from, WRITE);
- if (rc) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return rc;
- }
- }
-
- /* grab a lock here due to read response handlers can access ctx */
- mutex_lock(&ctx->aio_mutex);
-
- rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
- cfile, cifs_sb, &ctx->list, ctx);
-
- /*
- * If at least one write was successfully sent, then discard any rc
- * value from the later writes. If the other write succeeds, then
- * we'll end up returning whatever was written. If it fails, then
- * we'll get a new rc value from that.
- */
- if (!list_empty(&ctx->list))
- rc = 0;
-
- mutex_unlock(&ctx->aio_mutex);
-
- if (rc) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return rc;
- }
-
- if (!is_sync_kiocb(iocb)) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return -EIOCBQUEUED;
- }
-
- rc = wait_for_completion_killable(&ctx->done);
- if (rc) {
- mutex_lock(&ctx->aio_mutex);
- ctx->rc = rc = -EINTR;
- total_written = ctx->total_len;
- mutex_unlock(&ctx->aio_mutex);
- } else {
- rc = ctx->rc;
- total_written = ctx->total_len;
- }
-
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
-
- if (unlikely(!total_written))
- return rc;
-
- iocb->ki_pos += total_written;
- return total_written;
-}
-
-ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
-{
- return __cifs_writev(iocb, from, true);
-}
-
-ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
-{
- return __cifs_writev(iocb, from, false);
-}
-
-static ssize_t
-cifs_writev(struct kiocb *iocb, struct iov_iter *from)
-{
- struct file *file = iocb->ki_filp;
- struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
- struct inode *inode = file->f_mapping->host;
- struct cifsInodeInfo *cinode = CIFS_I(inode);
- struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
- ssize_t rc;
-
- inode_lock(inode);
- /*
- * We need to hold the sem to be sure nobody modifies lock list
- * with a brlock that prevents writing.
- */
- down_read(&cinode->lock_sem);
-
- rc = generic_write_checks(iocb, from);
- if (rc <= 0)
- goto out;
-
- if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
- server->vals->exclusive_lock_type, 0,
- NULL, CIFS_WRITE_OP))
- rc = __generic_file_write_iter(iocb, from);
- else
- rc = -EACCES;
-out:
- up_read(&cinode->lock_sem);
- inode_unlock(inode);
-
- if (rc > 0)
- rc = generic_write_sync(iocb, rc);
- return rc;
-}
-
-ssize_t
-cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
-{
- struct inode *inode = file_inode(iocb->ki_filp);
- struct cifsInodeInfo *cinode = CIFS_I(inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsFileInfo *cfile = (struct cifsFileInfo *)
- iocb->ki_filp->private_data;
- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- ssize_t written;
-
- written = cifs_get_writer(cinode);
- if (written)
- return written;
-
- if (CIFS_CACHE_WRITE(cinode)) {
- if (cap_unix(tcon->ses) &&
- (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
- && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
- written = generic_file_write_iter(iocb, from);
- goto out;
- }
- written = cifs_writev(iocb, from);
- goto out;
- }
- /*
- * For non-oplocked files in strict cache mode we need to write the data
- * to the server exactly from the pos to pos+len-1 rather than flush all
- * affected pages because it may cause a error with mandatory locks on
- * these pages but not on the region from pos to ppos+len-1.
- */
- written = cifs_user_writev(iocb, from);
- if (CIFS_CACHE_READ(cinode)) {
- /*
- * We have read level caching and we have just sent a write
- * request to the server thus making data in the cache stale.
- * Zap the cache and set oplock/lease level to NONE to avoid
- * reading stale data from the cache. All subsequent read
- * operations will read new data from the server.
- */
- cifs_zap_mapping(inode);
- cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
- inode);
- cinode->oplock = 0;
- }
-out:
- cifs_put_writer(cinode);
- return written;
-}
-
-static struct cifs_readdata *
-cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
-{
- struct cifs_readdata *rdata;
-
- rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
- if (rdata != NULL) {
- rdata->pages = pages;
- kref_init(&rdata->refcount);
- INIT_LIST_HEAD(&rdata->list);
- init_completion(&rdata->done);
- INIT_WORK(&rdata->work, complete);
- }
-
- return rdata;
-}
-
-static struct cifs_readdata *
-cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
-{
- struct page **pages =
- kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
- struct cifs_readdata *ret = NULL;
-
- if (pages) {
- ret = cifs_readdata_direct_alloc(pages, complete);
- if (!ret)
- kfree(pages);
- }
-
- return ret;
-}
-
-void
-cifs_readdata_release(struct kref *refcount)
-{
- struct cifs_readdata *rdata = container_of(refcount,
- struct cifs_readdata, refcount);
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (rdata->mr) {
- smbd_deregister_mr(rdata->mr);
- rdata->mr = NULL;
- }
-#endif
- if (rdata->cfile)
- cifsFileInfo_put(rdata->cfile);
-
- kvfree(rdata->pages);
- kfree(rdata);
-}
-
-static int
-cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
-{
- int rc = 0;
- struct page *page;
- unsigned int i;
-
- for (i = 0; i < nr_pages; i++) {
- page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
- if (!page) {
- rc = -ENOMEM;
- break;
- }
- rdata->pages[i] = page;
- }
-
- if (rc) {
- unsigned int nr_page_failed = i;
-
- for (i = 0; i < nr_page_failed; i++) {
- put_page(rdata->pages[i]);
- rdata->pages[i] = NULL;
- }
- }
- return rc;
-}
-
-static void
-cifs_uncached_readdata_release(struct kref *refcount)
-{
- struct cifs_readdata *rdata = container_of(refcount,
- struct cifs_readdata, refcount);
- unsigned int i;
-
- kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
- for (i = 0; i < rdata->nr_pages; i++) {
- put_page(rdata->pages[i]);
- }
- cifs_readdata_release(refcount);
-}
-
-/**
- * cifs_readdata_to_iov - copy data from pages in response to an iovec
- * @rdata: the readdata response with list of pages holding data
- * @iter: destination for our data
- *
- * This function copies data from a list of pages in a readdata response into
- * an array of iovecs. It will first calculate where the data should go
- * based on the info in the readdata and then copy the data into that spot.
- */
-static int
-cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
-{
- size_t remaining = rdata->got_bytes;
- unsigned int i;
-
- for (i = 0; i < rdata->nr_pages; i++) {
- struct page *page = rdata->pages[i];
- size_t copy = min_t(size_t, remaining, PAGE_SIZE);
- size_t written;
-
- if (unlikely(iov_iter_is_pipe(iter))) {
- void *addr = kmap_atomic(page);
-
- written = copy_to_iter(addr, copy, iter);
- kunmap_atomic(addr);
- } else
- written = copy_page_to_iter(page, 0, copy, iter);
- remaining -= written;
- if (written < copy && iov_iter_count(iter) > 0)
- break;
- }
- return remaining ? -EFAULT : 0;
-}
-
-static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
-
-static void
-cifs_uncached_readv_complete(struct work_struct *work)
-{
- struct cifs_readdata *rdata = container_of(work,
- struct cifs_readdata, work);
-
- complete(&rdata->done);
- collect_uncached_read_data(rdata->ctx);
- /* the below call can possibly free the last ref to aio ctx */
- kref_put(&rdata->refcount, cifs_uncached_readdata_release);
-}
-
-static int
-uncached_fill_pages(struct TCP_Server_Info *server,
- struct cifs_readdata *rdata, struct iov_iter *iter,
- unsigned int len)
-{
- int result = 0;
- unsigned int i;
- unsigned int nr_pages = rdata->nr_pages;
- unsigned int page_offset = rdata->page_offset;
-
- rdata->got_bytes = 0;
- rdata->tailsz = PAGE_SIZE;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = rdata->pages[i];
- size_t n;
- unsigned int segment_size = rdata->pagesz;
-
- if (i == 0)
- segment_size -= page_offset;
- else
- page_offset = 0;
-
-
- if (len <= 0) {
- /* no need to hold page hostage */
- rdata->pages[i] = NULL;
- rdata->nr_pages--;
- put_page(page);
- continue;
- }
-
- n = len;
- if (len >= segment_size)
- /* enough data to fill the page */
- n = segment_size;
- else
- rdata->tailsz = len;
- len -= n;
-
- if (iter)
- result = copy_page_from_iter(
- page, page_offset, n, iter);
-#ifdef CONFIG_CIFS_SMB_DIRECT
- else if (rdata->mr)
- result = n;
-#endif
- else
- result = cifs_read_page_from_socket(
- server, page, page_offset, n);
- if (result < 0)
- break;
-
- rdata->got_bytes += result;
- }
-
- return rdata->got_bytes > 0 && result != -ECONNABORTED ?
- rdata->got_bytes : result;
-}
-
-static int
-cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
- struct cifs_readdata *rdata, unsigned int len)
-{
- return uncached_fill_pages(server, rdata, NULL, len);
-}
-
-static int
-cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
- struct cifs_readdata *rdata,
- struct iov_iter *iter)
-{
- return uncached_fill_pages(server, rdata, iter, iter->count);
-}
-
-static int cifs_resend_rdata(struct cifs_readdata *rdata,
- struct list_head *rdata_list,
- struct cifs_aio_ctx *ctx)
-{
- unsigned int rsize;
- struct cifs_credits credits;
- int rc;
- struct TCP_Server_Info *server;
-
- /* XXX: should we pick a new channel here? */
- server = rdata->server;
-
- do {
- if (rdata->cfile->invalidHandle) {
- rc = cifs_reopen_file(rdata->cfile, true);
- if (rc == -EAGAIN)
- continue;
- else if (rc)
- break;
- }
-
- /*
- * Wait for credits to resend this rdata.
- * Note: we are attempting to resend the whole rdata not in
- * segments
- */
- do {
- rc = server->ops->wait_mtu_credits(server, rdata->bytes,
- &rsize, &credits);
-
- if (rc)
- goto fail;
-
- if (rsize < rdata->bytes) {
- add_credits_and_wake_if(server, &credits, 0);
- msleep(1000);
- }
- } while (rsize < rdata->bytes);
- rdata->credits = credits;
-
- rc = adjust_credits(server, &rdata->credits, rdata->bytes);
- if (!rc) {
- if (rdata->cfile->invalidHandle)
- rc = -EAGAIN;
- else {
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (rdata->mr) {
- rdata->mr->need_invalidate = true;
- smbd_deregister_mr(rdata->mr);
- rdata->mr = NULL;
- }
-#endif
- rc = server->ops->async_readv(rdata);
- }
- }
-
- /* If the read was successfully sent, we are done */
- if (!rc) {
- /* Add to aio pending list */
- list_add_tail(&rdata->list, rdata_list);
- return 0;
- }
-
- /* Roll back credits and retry if needed */
- add_credits_and_wake_if(server, &rdata->credits, 0);
- } while (rc == -EAGAIN);
-
-fail:
- kref_put(&rdata->refcount, cifs_uncached_readdata_release);
- return rc;
-}
-
-static int
-cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
- struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
- struct cifs_aio_ctx *ctx)
-{
- struct cifs_readdata *rdata;
- unsigned int npages, rsize;
- struct cifs_credits credits_on_stack;
- struct cifs_credits *credits = &credits_on_stack;
- size_t cur_len;
- int rc;
- pid_t pid;
- struct TCP_Server_Info *server;
- struct page **pagevec;
- size_t start;
- struct iov_iter direct_iov = ctx->iter;
-
- server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
- pid = open_file->pid;
- else
- pid = current->tgid;
-
- if (ctx->direct_io)
- iov_iter_advance(&direct_iov, offset - ctx->pos);
-
- do {
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, true);
- if (rc == -EAGAIN)
- continue;
- else if (rc)
- break;
- }
-
- rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
- &rsize, credits);
- if (rc)
- break;
-
- cur_len = min_t(const size_t, len, rsize);
-
- if (ctx->direct_io) {
- ssize_t result;
-
- result = iov_iter_get_pages_alloc(
- &direct_iov, &pagevec,
- cur_len, &start);
- if (result < 0) {
- cifs_dbg(VFS,
- "Couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
- result, iov_iter_type(&direct_iov),
- direct_iov.iov_offset,
- direct_iov.count);
- dump_stack();
-
- rc = result;
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
- cur_len = (size_t)result;
- iov_iter_advance(&direct_iov, cur_len);
-
- rdata = cifs_readdata_direct_alloc(
- pagevec, cifs_uncached_readv_complete);
- if (!rdata) {
- add_credits_and_wake_if(server, credits, 0);
- rc = -ENOMEM;
- break;
- }
-
- npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
- rdata->page_offset = start;
- rdata->tailsz = npages > 1 ?
- cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
- cur_len;
-
- } else {
-
- npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
- /* allocate a readdata struct */
- rdata = cifs_readdata_alloc(npages,
- cifs_uncached_readv_complete);
- if (!rdata) {
- add_credits_and_wake_if(server, credits, 0);
- rc = -ENOMEM;
- break;
- }
-
- rc = cifs_read_allocate_pages(rdata, npages);
- if (rc) {
- kvfree(rdata->pages);
- kfree(rdata);
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- rdata->tailsz = PAGE_SIZE;
- }
-
- rdata->server = server;
- rdata->cfile = cifsFileInfo_get(open_file);
- rdata->nr_pages = npages;
- rdata->offset = offset;
- rdata->bytes = cur_len;
- rdata->pid = pid;
- rdata->pagesz = PAGE_SIZE;
- rdata->read_into_pages = cifs_uncached_read_into_pages;
- rdata->copy_into_pages = cifs_uncached_copy_into_pages;
- rdata->credits = credits_on_stack;
- rdata->ctx = ctx;
- kref_get(&ctx->refcount);
-
- rc = adjust_credits(server, &rdata->credits, rdata->bytes);
-
- if (!rc) {
- if (rdata->cfile->invalidHandle)
- rc = -EAGAIN;
- else
- rc = server->ops->async_readv(rdata);
- }
-
- if (rc) {
- add_credits_and_wake_if(server, &rdata->credits, 0);
- kref_put(&rdata->refcount,
- cifs_uncached_readdata_release);
- if (rc == -EAGAIN) {
- iov_iter_revert(&direct_iov, cur_len);
- continue;
- }
- break;
- }
-
- list_add_tail(&rdata->list, rdata_list);
- offset += cur_len;
- len -= cur_len;
- } while (len > 0);
-
- return rc;
-}
-
-static void
-collect_uncached_read_data(struct cifs_aio_ctx *ctx)
-{
- struct cifs_readdata *rdata, *tmp;
- struct iov_iter *to = &ctx->iter;
- struct cifs_sb_info *cifs_sb;
- int rc;
-
- cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
-
- mutex_lock(&ctx->aio_mutex);
-
- if (list_empty(&ctx->list)) {
- mutex_unlock(&ctx->aio_mutex);
- return;
- }
-
- rc = ctx->rc;
- /* the loop below should proceed in the order of increasing offsets */
-again:
- list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
- if (!rc) {
- if (!try_wait_for_completion(&rdata->done)) {
- mutex_unlock(&ctx->aio_mutex);
- return;
- }
-
- if (rdata->result == -EAGAIN) {
- /* resend call if it's a retryable error */
- struct list_head tmp_list;
- unsigned int got_bytes = rdata->got_bytes;
-
- list_del_init(&rdata->list);
- INIT_LIST_HEAD(&tmp_list);
-
- /*
- * Got a part of data and then reconnect has
- * happened -- fill the buffer and continue
- * reading.
- */
- if (got_bytes && got_bytes < rdata->bytes) {
- rc = 0;
- if (!ctx->direct_io)
- rc = cifs_readdata_to_iov(rdata, to);
- if (rc) {
- kref_put(&rdata->refcount,
- cifs_uncached_readdata_release);
- continue;
- }
- }
-
- if (ctx->direct_io) {
- /*
- * Re-use rdata as this is a
- * direct I/O
- */
- rc = cifs_resend_rdata(
- rdata,
- &tmp_list, ctx);
- } else {
- rc = cifs_send_async_read(
- rdata->offset + got_bytes,
- rdata->bytes - got_bytes,
- rdata->cfile, cifs_sb,
- &tmp_list, ctx);
-
- kref_put(&rdata->refcount,
- cifs_uncached_readdata_release);
- }
-
- list_splice(&tmp_list, &ctx->list);
-
- goto again;
- } else if (rdata->result)
- rc = rdata->result;
- else if (!ctx->direct_io)
- rc = cifs_readdata_to_iov(rdata, to);
-
- /* if there was a short read -- discard anything left */
- if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
- rc = -ENODATA;
-
- ctx->total_len += rdata->got_bytes;
- }
- list_del_init(&rdata->list);
- kref_put(&rdata->refcount, cifs_uncached_readdata_release);
- }
-
- if (!ctx->direct_io)
- ctx->total_len = ctx->len - iov_iter_count(to);
-
- /* mask nodata case */
- if (rc == -ENODATA)
- rc = 0;
-
- ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
-
- mutex_unlock(&ctx->aio_mutex);
-
- if (ctx->iocb && ctx->iocb->ki_complete)
- ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
- else
- complete(&ctx->done);
-}
-
-static ssize_t __cifs_readv(
- struct kiocb *iocb, struct iov_iter *to, bool direct)
-{
- size_t len;
- struct file *file = iocb->ki_filp;
- struct cifs_sb_info *cifs_sb;
- struct cifsFileInfo *cfile;
- struct cifs_tcon *tcon;
- ssize_t rc, total_read = 0;
- loff_t offset = iocb->ki_pos;
- struct cifs_aio_ctx *ctx;
-
- /*
- * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
- * fall back to data copy read path
- * this could be improved by getting pages directly in ITER_KVEC
- */
- if (direct && iov_iter_is_kvec(to)) {
- cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
- direct = false;
- }
-
- len = iov_iter_count(to);
- if (!len)
- return 0;
-
- cifs_sb = CIFS_FILE_SB(file);
- cfile = file->private_data;
- tcon = tlink_tcon(cfile->tlink);
-
- if (!tcon->ses->server->ops->async_readv)
- return -ENOSYS;
-
- if ((file->f_flags & O_ACCMODE) == O_WRONLY)
- cifs_dbg(FYI, "attempting read on write only file instance\n");
-
- ctx = cifs_aio_ctx_alloc();
- if (!ctx)
- return -ENOMEM;
-
- ctx->cfile = cifsFileInfo_get(cfile);
-
- if (!is_sync_kiocb(iocb))
- ctx->iocb = iocb;
-
- if (iter_is_iovec(to))
- ctx->should_dirty = true;
-
- if (direct) {
- ctx->pos = offset;
- ctx->direct_io = true;
- ctx->iter = *to;
- ctx->len = len;
- } else {
- rc = setup_aio_ctx_iter(ctx, to, READ);
- if (rc) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return rc;
- }
- len = ctx->len;
- }
-
- /* grab a lock here due to read response handlers can access ctx */
- mutex_lock(&ctx->aio_mutex);
-
- rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
-
- /* if at least one read request send succeeded, then reset rc */
- if (!list_empty(&ctx->list))
- rc = 0;
-
- mutex_unlock(&ctx->aio_mutex);
-
- if (rc) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return rc;
- }
-
- if (!is_sync_kiocb(iocb)) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return -EIOCBQUEUED;
- }
-
- rc = wait_for_completion_killable(&ctx->done);
- if (rc) {
- mutex_lock(&ctx->aio_mutex);
- ctx->rc = rc = -EINTR;
- total_read = ctx->total_len;
- mutex_unlock(&ctx->aio_mutex);
- } else {
- rc = ctx->rc;
- total_read = ctx->total_len;
- }
-
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
-
- if (total_read) {
- iocb->ki_pos += total_read;
- return total_read;
- }
- return rc;
-}
-
-ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
-{
- return __cifs_readv(iocb, to, true);
-}
-
-ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
-{
- return __cifs_readv(iocb, to, false);
-}
-
-ssize_t
-cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
-{
- struct inode *inode = file_inode(iocb->ki_filp);
- struct cifsInodeInfo *cinode = CIFS_I(inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsFileInfo *cfile = (struct cifsFileInfo *)
- iocb->ki_filp->private_data;
- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- int rc = -EACCES;
-
- /*
- * In strict cache mode we need to read from the server all the time
- * if we don't have level II oplock because the server can delay mtime
- * change - so we can't make a decision about inode invalidating.
- * And we can also fail with pagereading if there are mandatory locks
- * on pages affected by this read but not on the region from pos to
- * pos+len-1.
- */
- if (!CIFS_CACHE_READ(cinode))
- return cifs_user_readv(iocb, to);
-
- if (cap_unix(tcon->ses) &&
- (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
- return generic_file_read_iter(iocb, to);
-
- /*
- * We need to hold the sem to be sure nobody modifies lock list
- * with a brlock that prevents reading.
- */
- down_read(&cinode->lock_sem);
- if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
- tcon->ses->server->vals->shared_lock_type,
- 0, NULL, CIFS_READ_OP))
- rc = generic_file_read_iter(iocb, to);
- up_read(&cinode->lock_sem);
- return rc;
-}
-
-static ssize_t
-cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
-{
- int rc = -EACCES;
- unsigned int bytes_read = 0;
- unsigned int total_read;
- unsigned int current_read_size;
- unsigned int rsize;
- struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
- unsigned int xid;
- char *cur_offset;
- struct cifsFileInfo *open_file;
- struct cifs_io_parms io_parms = {0};
- int buf_type = CIFS_NO_BUFFER;
- __u32 pid;
-
- xid = get_xid();
- cifs_sb = CIFS_FILE_SB(file);
-
- /* FIXME: set up handlers for larger reads and/or convert to async */
- rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize);
-
- if (file->private_data == NULL) {
- rc = -EBADF;
- free_xid(xid);
- return rc;
- }
- open_file = file->private_data;
- tcon = tlink_tcon(open_file->tlink);
- server = cifs_pick_channel(tcon->ses);
-
- if (!server->ops->sync_read) {
- free_xid(xid);
- return -ENOSYS;
- }
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
- pid = open_file->pid;
- else
- pid = current->tgid;
-
- if ((file->f_flags & O_ACCMODE) == O_WRONLY)
- cifs_dbg(FYI, "attempting read on write only file instance\n");
-
- for (total_read = 0, cur_offset = read_data; read_size > total_read;
- total_read += bytes_read, cur_offset += bytes_read) {
- do {
- current_read_size = min_t(uint, read_size - total_read,
- rsize);
- /*
- * For windows me and 9x we do not want to request more
- * than it negotiated since it will refuse the read
- * then.
- */
- if (!(tcon->ses->capabilities &
- tcon->ses->server->vals->cap_large_files)) {
- current_read_size = min_t(uint,
- current_read_size, CIFSMaxBufSize);
- }
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, true);
- if (rc != 0)
- break;
- }
- io_parms.pid = pid;
- io_parms.tcon = tcon;
- io_parms.offset = *offset;
- io_parms.length = current_read_size;
- io_parms.server = server;
- rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
- &bytes_read, &cur_offset,
- &buf_type);
- } while (rc == -EAGAIN);
-
- if (rc || (bytes_read == 0)) {
- if (total_read) {
- break;
- } else {
- free_xid(xid);
- return rc;
- }
- } else {
- cifs_stats_bytes_read(tcon, total_read);
- *offset += bytes_read;
- }
- }
- free_xid(xid);
- return total_read;
-}
-
-/*
- * If the page is mmap'ed into a process' page tables, then we need to make
- * sure that it doesn't change while being written back.
- */
-static vm_fault_t
-cifs_page_mkwrite(struct vm_fault *vmf)
-{
- struct page *page = vmf->page;
- struct file *file = vmf->vma->vm_file;
- struct inode *inode = file_inode(file);
-
- cifs_fscache_wait_on_page_write(inode, page);
-
- lock_page(page);
- return VM_FAULT_LOCKED;
-}
-
-static const struct vm_operations_struct cifs_file_vm_ops = {
- .fault = filemap_fault,
- .map_pages = filemap_map_pages,
- .page_mkwrite = cifs_page_mkwrite,
-};
-
-int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
-{
- int xid, rc = 0;
- struct inode *inode = file_inode(file);
-
- xid = get_xid();
-
- if (!CIFS_CACHE_READ(CIFS_I(inode)))
- rc = cifs_zap_mapping(inode);
- if (!rc)
- rc = generic_file_mmap(file, vma);
- if (!rc)
- vma->vm_ops = &cifs_file_vm_ops;
-
- free_xid(xid);
- return rc;
-}
-
-int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
-{
- int rc, xid;
-
- xid = get_xid();
-
- rc = cifs_revalidate_file(file);
- if (rc)
- cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
- rc);
- if (!rc)
- rc = generic_file_mmap(file, vma);
- if (!rc)
- vma->vm_ops = &cifs_file_vm_ops;
-
- free_xid(xid);
- return rc;
-}
-
-static void
-cifs_readv_complete(struct work_struct *work)
-{
- unsigned int i, got_bytes;
- struct cifs_readdata *rdata = container_of(work,
- struct cifs_readdata, work);
-
- got_bytes = rdata->got_bytes;
- for (i = 0; i < rdata->nr_pages; i++) {
- struct page *page = rdata->pages[i];
-
- lru_cache_add(page);
-
- if (rdata->result == 0 ||
- (rdata->result == -EAGAIN && got_bytes)) {
- flush_dcache_page(page);
- SetPageUptodate(page);
- } else
- SetPageError(page);
-
- unlock_page(page);
-
- if (rdata->result == 0 ||
- (rdata->result == -EAGAIN && got_bytes))
- cifs_readpage_to_fscache(rdata->mapping->host, page);
- else
- cifs_fscache_uncache_page(rdata->mapping->host, page);
-
- got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
-
- put_page(page);
- rdata->pages[i] = NULL;
- }
- kref_put(&rdata->refcount, cifs_readdata_release);
-}
-
-static int
-readpages_fill_pages(struct TCP_Server_Info *server,
- struct cifs_readdata *rdata, struct iov_iter *iter,
- unsigned int len)
-{
- int result = 0;
- unsigned int i;
- u64 eof;
- pgoff_t eof_index;
- unsigned int nr_pages = rdata->nr_pages;
- unsigned int page_offset = rdata->page_offset;
-
- /* determine the eof that the server (probably) has */
- eof = CIFS_I(rdata->mapping->host)->server_eof;
- eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
- cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
-
- rdata->got_bytes = 0;
- rdata->tailsz = PAGE_SIZE;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = rdata->pages[i];
- unsigned int to_read = rdata->pagesz;
- size_t n;
-
- if (i == 0)
- to_read -= page_offset;
- else
- page_offset = 0;
-
- n = to_read;
-
- if (len >= to_read) {
- len -= to_read;
- } else if (len > 0) {
- /* enough for partial page, fill and zero the rest */
- zero_user(page, len + page_offset, to_read - len);
- n = rdata->tailsz = len;
- len = 0;
- } else if (page->index > eof_index) {
- /*
- * The VFS will not try to do readahead past the
- * i_size, but it's possible that we have outstanding
- * writes with gaps in the middle and the i_size hasn't
- * caught up yet. Populate those with zeroed out pages
- * to prevent the VFS from repeatedly attempting to
- * fill them until the writes are flushed.
- */
- zero_user(page, 0, PAGE_SIZE);
- lru_cache_add(page);
- flush_dcache_page(page);
- SetPageUptodate(page);
- unlock_page(page);
- put_page(page);
- rdata->pages[i] = NULL;
- rdata->nr_pages--;
- continue;
- } else {
- /* no need to hold page hostage */
- lru_cache_add(page);
- unlock_page(page);
- put_page(page);
- rdata->pages[i] = NULL;
- rdata->nr_pages--;
- continue;
- }
-
- if (iter)
- result = copy_page_from_iter(
- page, page_offset, n, iter);
-#ifdef CONFIG_CIFS_SMB_DIRECT
- else if (rdata->mr)
- result = n;
-#endif
- else
- result = cifs_read_page_from_socket(
- server, page, page_offset, n);
- if (result < 0)
- break;
-
- rdata->got_bytes += result;
- }
-
- return rdata->got_bytes > 0 && result != -ECONNABORTED ?
- rdata->got_bytes : result;
-}
-
-static int
-cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
- struct cifs_readdata *rdata, unsigned int len)
-{
- return readpages_fill_pages(server, rdata, NULL, len);
-}
-
-static int
-cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
- struct cifs_readdata *rdata,
- struct iov_iter *iter)
-{
- return readpages_fill_pages(server, rdata, iter, iter->count);
-}
-
-static int
-readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
- unsigned int rsize, struct list_head *tmplist,
- unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
-{
- struct page *page, *tpage;
- unsigned int expected_index;
- int rc;
- gfp_t gfp = readahead_gfp_mask(mapping);
-
- INIT_LIST_HEAD(tmplist);
-
- page = lru_to_page(page_list);
-
- /*
- * Lock the page and put it in the cache. Since no one else
- * should have access to this page, we're safe to simply set
- * PG_locked without checking it first.
- */
- __SetPageLocked(page);
- rc = add_to_page_cache_locked(page, mapping,
- page->index, gfp);
-
- /* give up if we can't stick it in the cache */
- if (rc) {
- __ClearPageLocked(page);
- return rc;
- }
-
- /* move first page to the tmplist */
- *offset = (loff_t)page->index << PAGE_SHIFT;
- *bytes = PAGE_SIZE;
- *nr_pages = 1;
- list_move_tail(&page->lru, tmplist);
-
- /* now try and add more pages onto the request */
- expected_index = page->index + 1;
- list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
- /* discontinuity ? */
- if (page->index != expected_index)
- break;
-
- /* would this page push the read over the rsize? */
- if (*bytes + PAGE_SIZE > rsize)
- break;
-
- __SetPageLocked(page);
- rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
- if (rc) {
- __ClearPageLocked(page);
- break;
- }
- list_move_tail(&page->lru, tmplist);
- (*bytes) += PAGE_SIZE;
- expected_index++;
- (*nr_pages)++;
- }
- return rc;
-}
-
-static int cifs_readpages(struct file *file, struct address_space *mapping,
- struct list_head *page_list, unsigned num_pages)
-{
- int rc;
- int err = 0;
- struct list_head tmplist;
- struct cifsFileInfo *open_file = file->private_data;
- struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
- struct TCP_Server_Info *server;
- pid_t pid;
- unsigned int xid;
-
- xid = get_xid();
- /*
- * Reads as many pages as possible from fscache. Returns -ENOBUFS
- * immediately if the cookie is negative
- *
- * After this point, every page in the list might have PG_fscache set,
- * so we will need to clean that up off of every page we don't use.
- */
- rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
- &num_pages);
- if (rc == 0) {
- free_xid(xid);
- return rc;
- }
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
- pid = open_file->pid;
- else
- pid = current->tgid;
-
- rc = 0;
- server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
-
- cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
- __func__, file, mapping, num_pages);
-
- /*
- * Start with the page at end of list and move it to private
- * list. Do the same with any following pages until we hit
- * the rsize limit, hit an index discontinuity, or run out of
- * pages. Issue the async read and then start the loop again
- * until the list is empty.
- *
- * Note that list order is important. The page_list is in
- * the order of declining indexes. When we put the pages in
- * the rdata->pages, then we want them in increasing order.
- */
- while (!list_empty(page_list) && !err) {
- unsigned int i, nr_pages, bytes, rsize;
- loff_t offset;
- struct page *page, *tpage;
- struct cifs_readdata *rdata;
- struct cifs_credits credits_on_stack;
- struct cifs_credits *credits = &credits_on_stack;
-
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, true);
- if (rc == -EAGAIN)
- continue;
- else if (rc)
- break;
- }
-
- rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
- &rsize, credits);
- if (rc)
- break;
-
- /*
- * Give up immediately if rsize is too small to read an entire
- * page. The VFS will fall back to readpage. We should never
- * reach this point however since we set ra_pages to 0 when the
- * rsize is smaller than a cache page.
- */
- if (unlikely(rsize < PAGE_SIZE)) {
- add_credits_and_wake_if(server, credits, 0);
- free_xid(xid);
- return 0;
- }
-
- nr_pages = 0;
- err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
- &nr_pages, &offset, &bytes);
- if (!nr_pages) {
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
- if (!rdata) {
- /* best to give up if we're out of mem */
- list_for_each_entry_safe(page, tpage, &tmplist, lru) {
- list_del(&page->lru);
- lru_cache_add(page);
- unlock_page(page);
- put_page(page);
- }
- rc = -ENOMEM;
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- rdata->cfile = cifsFileInfo_get(open_file);
- rdata->server = server;
- rdata->mapping = mapping;
- rdata->offset = offset;
- rdata->bytes = bytes;
- rdata->pid = pid;
- rdata->pagesz = PAGE_SIZE;
- rdata->tailsz = PAGE_SIZE;
- rdata->read_into_pages = cifs_readpages_read_into_pages;
- rdata->copy_into_pages = cifs_readpages_copy_into_pages;
- rdata->credits = credits_on_stack;
-
- list_for_each_entry_safe(page, tpage, &tmplist, lru) {
- list_del(&page->lru);
- rdata->pages[rdata->nr_pages++] = page;
- }
-
- rc = adjust_credits(server, &rdata->credits, rdata->bytes);
-
- if (!rc) {
- if (rdata->cfile->invalidHandle)
- rc = -EAGAIN;
- else
- rc = server->ops->async_readv(rdata);
- }
-
- if (rc) {
- add_credits_and_wake_if(server, &rdata->credits, 0);
- for (i = 0; i < rdata->nr_pages; i++) {
- page = rdata->pages[i];
- lru_cache_add(page);
- unlock_page(page);
- put_page(page);
- }
- /* Fallback to the readpage in error/reconnect cases */
- kref_put(&rdata->refcount, cifs_readdata_release);
- break;
- }
-
- kref_put(&rdata->refcount, cifs_readdata_release);
- }
-
- /* Any pages that have been shown to fscache but didn't get added to
- * the pagecache must be uncached before they get returned to the
- * allocator.
- */
- cifs_fscache_readpages_cancel(mapping->host, page_list);
- free_xid(xid);
- return rc;
-}
-
-/*
- * cifs_readpage_worker must be called with the page pinned
- */
-static int cifs_readpage_worker(struct file *file, struct page *page,
- loff_t *poffset)
-{
- char *read_data;
- int rc;
-
- /* Is the page cached? */
- rc = cifs_readpage_from_fscache(file_inode(file), page);
- if (rc == 0)
- goto read_complete;
-
- read_data = kmap(page);
- /* for reads over a certain size could initiate async read ahead */
-
- rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
-
- if (rc < 0)
- goto io_error;
- else
- cifs_dbg(FYI, "Bytes read %d\n", rc);
-
- /* we do not want atime to be less than mtime, it broke some apps */
- file_inode(file)->i_atime = current_time(file_inode(file));
- if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
- file_inode(file)->i_atime = file_inode(file)->i_mtime;
- else
- file_inode(file)->i_atime = current_time(file_inode(file));
-
- if (PAGE_SIZE > rc)
- memset(read_data + rc, 0, PAGE_SIZE - rc);
-
- flush_dcache_page(page);
- SetPageUptodate(page);
-
- /* send this page to the cache */
- cifs_readpage_to_fscache(file_inode(file), page);
-
- rc = 0;
-
-io_error:
- kunmap(page);
- unlock_page(page);
-
-read_complete:
- return rc;
-}
-
-static int cifs_readpage(struct file *file, struct page *page)
-{
- loff_t offset = page_file_offset(page);
- int rc = -EACCES;
- unsigned int xid;
-
- xid = get_xid();
-
- if (file->private_data == NULL) {
- rc = -EBADF;
- free_xid(xid);
- return rc;
- }
-
- cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
- page, (int)offset, (int)offset);
-
- rc = cifs_readpage_worker(file, page, &offset);
-
- free_xid(xid);
- return rc;
-}
-
-static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
-{
- struct cifsFileInfo *open_file;
-
- spin_lock(&cifs_inode->open_file_lock);
- list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
- if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
- spin_unlock(&cifs_inode->open_file_lock);
- return 1;
- }
- }
- spin_unlock(&cifs_inode->open_file_lock);
- return 0;
-}
-
-/* We do not want to update the file size from server for inodes
- open for write - to avoid races with writepage extending
- the file - in the future we could consider allowing
- refreshing the inode only on increases in the file size
- but this is tricky to do without racing with writebehind
- page caching in the current Linux kernel design */
-bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
-{
- if (!cifsInode)
- return true;
-
- if (is_inode_writable(cifsInode)) {
- /* This inode is open for write at least once */
- struct cifs_sb_info *cifs_sb;
-
- cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
- /* since no page cache to corrupt on directio
- we can change size safely */
- return true;
- }
-
- if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
- return true;
-
- return false;
- } else
- return true;
-}
-
-static int cifs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- int oncethru = 0;
- pgoff_t index = pos >> PAGE_SHIFT;
- loff_t offset = pos & (PAGE_SIZE - 1);
- loff_t page_start = pos & PAGE_MASK;
- loff_t i_size;
- struct page *page;
- int rc = 0;
-
- cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
-
-start:
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (PageUptodate(page))
- goto out;
-
- /*
- * If we write a full page it will be up to date, no need to read from
- * the server. If the write is short, we'll end up doing a sync write
- * instead.
- */
- if (len == PAGE_SIZE)
- goto out;
-
- /*
- * optimize away the read when we have an oplock, and we're not
- * expecting to use any of the data we'd be reading in. That
- * is, when the page lies beyond the EOF, or straddles the EOF
- * and the write will cover all of the existing data.
- */
- if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
- i_size = i_size_read(mapping->host);
- if (page_start >= i_size ||
- (offset == 0 && (pos + len) >= i_size)) {
- zero_user_segments(page, 0, offset,
- offset + len,
- PAGE_SIZE);
- /*
- * PageChecked means that the parts of the page
- * to which we're not writing are considered up
- * to date. Once the data is copied to the
- * page, it can be set uptodate.
- */
- SetPageChecked(page);
- goto out;
- }
- }
-
- if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
- /*
- * might as well read a page, it is fast enough. If we get
- * an error, we don't need to return it. cifs_write_end will
- * do a sync write instead since PG_uptodate isn't set.
- */
- cifs_readpage_worker(file, page, &page_start);
- put_page(page);
- oncethru = 1;
- goto start;
- } else {
- /* we could try using another file handle if there is one -
- but how would we lock it to prevent close of that handle
- racing with this read? In any case
- this will be written out by write_end so is fine */
- }
-out:
- *pagep = page;
- return rc;
-}
-
-static int cifs_release_page(struct page *page, gfp_t gfp)
-{
- if (PagePrivate(page))
- return 0;
-
- return cifs_fscache_release_page(page, gfp);
-}
-
-static void cifs_invalidate_page(struct page *page, unsigned int offset,
- unsigned int length)
-{
- struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
-
- if (offset == 0 && length == PAGE_SIZE)
- cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
-}
-
-static int cifs_launder_page(struct page *page)
-{
- int rc = 0;
- loff_t range_start = page_offset(page);
- loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = 0,
- .range_start = range_start,
- .range_end = range_end,
- };
-
- cifs_dbg(FYI, "Launder page: %p\n", page);
-
- if (clear_page_dirty_for_io(page))
- rc = cifs_writepage_locked(page, &wbc);
-
- cifs_fscache_invalidate_page(page, page->mapping->host);
- return rc;
-}
-
-void cifs_oplock_break(struct work_struct *work)
-{
- struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
- oplock_break);
- struct inode *inode = d_inode(cfile->dentry);
- struct cifsInodeInfo *cinode = CIFS_I(inode);
- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct TCP_Server_Info *server = tcon->ses->server;
- int rc = 0;
- bool purge_cache = false;
- bool is_deferred = false;
- struct cifs_deferred_close *dclose;
-
- wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
- TASK_UNINTERRUPTIBLE);
-
- server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
- cfile->oplock_epoch, &purge_cache);
-
- if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
- cifs_has_mand_locks(cinode)) {
- cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
- inode);
- cinode->oplock = 0;
- }
-
- if (inode && S_ISREG(inode->i_mode)) {
- if (CIFS_CACHE_READ(cinode))
- break_lease(inode, O_RDONLY);
- else
- break_lease(inode, O_WRONLY);
- rc = filemap_fdatawrite(inode->i_mapping);
- if (!CIFS_CACHE_READ(cinode) || purge_cache) {
- rc = filemap_fdatawait(inode->i_mapping);
- mapping_set_error(inode->i_mapping, rc);
- cifs_zap_mapping(inode);
- }
- cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
- if (CIFS_CACHE_WRITE(cinode))
- goto oplock_break_ack;
- }
-
- rc = cifs_push_locks(cfile);
- if (rc)
- cifs_dbg(VFS, "Push locks rc = %d\n", rc);
-
-oplock_break_ack:
- /*
- * When oplock break is received and there are no active
- * file handles but cached, then schedule deferred close immediately.
- * So, new open will not use cached handle.
- */
- spin_lock(&CIFS_I(inode)->deferred_lock);
- is_deferred = cifs_is_deferred_close(cfile, &dclose);
- spin_unlock(&CIFS_I(inode)->deferred_lock);
- if (is_deferred &&
- cfile->deferred_close_scheduled &&
- delayed_work_pending(&cfile->deferred)) {
- if (cancel_delayed_work(&cfile->deferred)) {
- _cifsFileInfo_put(cfile, false, false);
- goto oplock_break_done;
- }
- }
- /*
- * releasing stale oplock after recent reconnect of smb session using
- * a now incorrect file handle is not a data integrity issue but do
- * not bother sending an oplock release if session to server still is
- * disconnected since oplock already released by the server
- */
- if (!cfile->oplock_break_cancelled) {
- rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
- cinode);
- cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
- }
-oplock_break_done:
- _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
- cifs_done_oplock_break(cinode);
-}
-
-/*
- * The presence of cifs_direct_io() in the address space ops vector
- * allowes open() O_DIRECT flags which would have failed otherwise.
- *
- * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
- * so this method should never be called.
- *
- * Direct IO is not yet supported in the cached mode.
- */
-static ssize_t
-cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
-{
- /*
- * FIXME
- * Eventually need to support direct IO for non forcedirectio mounts
- */
- return -EINVAL;
-}
-
-static int cifs_swap_activate(struct swap_info_struct *sis,
- struct file *swap_file, sector_t *span)
-{
- struct cifsFileInfo *cfile = swap_file->private_data;
- struct inode *inode = swap_file->f_mapping->host;
- unsigned long blocks;
- long long isize;
-
- cifs_dbg(FYI, "swap activate\n");
-
- spin_lock(&inode->i_lock);
- blocks = inode->i_blocks;
- isize = inode->i_size;
- spin_unlock(&inode->i_lock);
- if (blocks*512 < isize) {
- pr_warn("swap activate: swapfile has holes\n");
- return -EINVAL;
- }
- *span = sis->pages;
-
- pr_warn_once("Swap support over SMB3 is experimental\n");
-
- /*
- * TODO: consider adding ACL (or documenting how) to prevent other
- * users (on this or other systems) from reading it
- */
-
-
- /* TODO: add sk_set_memalloc(inet) or similar */
-
- if (cfile)
- cfile->swapfile = true;
- /*
- * TODO: Since file already open, we can't open with DENY_ALL here
- * but we could add call to grab a byte range lock to prevent others
- * from reading or writing the file
- */
-
- return 0;
-}
-
-static void cifs_swap_deactivate(struct file *file)
-{
- struct cifsFileInfo *cfile = file->private_data;
-
- cifs_dbg(FYI, "swap deactivate\n");
-
- /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
-
- if (cfile)
- cfile->swapfile = false;
-
- /* do we need to unpin (or unlock) the file */
-}
-
-const struct address_space_operations cifs_addr_ops = {
- .readpage = cifs_readpage,
- .readpages = cifs_readpages,
- .writepage = cifs_writepage,
- .writepages = cifs_writepages,
- .write_begin = cifs_write_begin,
- .write_end = cifs_write_end,
- .set_page_dirty = __set_page_dirty_nobuffers,
- .releasepage = cifs_release_page,
- .direct_IO = cifs_direct_io,
- .invalidatepage = cifs_invalidate_page,
- .launder_page = cifs_launder_page,
- /*
- * TODO: investigate and if useful we could add an cifs_migratePage
- * helper (under an CONFIG_MIGRATION) in the future, and also
- * investigate and add an is_dirty_writeback helper if needed
- */
- .swap_activate = cifs_swap_activate,
- .swap_deactivate = cifs_swap_deactivate,
-};
-
-/*
- * cifs_readpages requires the server to support a buffer large enough to
- * contain the header plus one complete page of data. Otherwise, we need
- * to leave cifs_readpages out of the address space operations.
- */
-const struct address_space_operations cifs_addr_ops_smallbuf = {
- .readpage = cifs_readpage,
- .writepage = cifs_writepage,
- .writepages = cifs_writepages,
- .write_begin = cifs_write_begin,
- .write_end = cifs_write_end,
- .set_page_dirty = __set_page_dirty_nobuffers,
- .releasepage = cifs_release_page,
- .invalidatepage = cifs_invalidate_page,
- .launder_page = cifs_launder_page,
-};
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
deleted file mode 100644
index 8eedd20c44ab..000000000000
--- a/fs/cifs/fscache.c
+++ /dev/null
@@ -1,361 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- * CIFS filesystem cache interface
- *
- * Copyright (c) 2010 Novell, Inc.
- * Author(s): Suresh Jayaraman <sjayaraman@suse.de>
- *
- */
-#include "fscache.h"
-#include "cifsglob.h"
-#include "cifs_debug.h"
-#include "cifs_fs_sb.h"
-#include "cifsproto.h"
-
-/*
- * Key layout of CIFS server cache index object
- */
-struct cifs_server_key {
- struct {
- uint16_t family; /* address family */
- __be16 port; /* IP port */
- } hdr;
- union {
- struct in_addr ipv4_addr;
- struct in6_addr ipv6_addr;
- };
-} __packed;
-
-/*
- * Get a cookie for a server object keyed by {IPaddress,port,family} tuple
- */
-void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
-{
- const struct sockaddr *sa = (struct sockaddr *) &server->dstaddr;
- const struct sockaddr_in *addr = (struct sockaddr_in *) sa;
- const struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) sa;
- struct cifs_server_key key;
- uint16_t key_len = sizeof(key.hdr);
-
- memset(&key, 0, sizeof(key));
-
- /*
- * Should not be a problem as sin_family/sin6_family overlays
- * sa_family field
- */
- key.hdr.family = sa->sa_family;
- switch (sa->sa_family) {
- case AF_INET:
- key.hdr.port = addr->sin_port;
- key.ipv4_addr = addr->sin_addr;
- key_len += sizeof(key.ipv4_addr);
- break;
-
- case AF_INET6:
- key.hdr.port = addr6->sin6_port;
- key.ipv6_addr = addr6->sin6_addr;
- key_len += sizeof(key.ipv6_addr);
- break;
-
- default:
- cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
- server->fscache = NULL;
- return;
- }
-
- server->fscache =
- fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
- &cifs_fscache_server_index_def,
- &key, key_len,
- NULL, 0,
- server, 0, true);
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
- __func__, server, server->fscache);
-}
-
-void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server)
-{
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
- __func__, server, server->fscache);
- fscache_relinquish_cookie(server->fscache, NULL, false);
- server->fscache = NULL;
-}
-
-void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
-{
- struct TCP_Server_Info *server = tcon->ses->server;
- char *sharename;
- struct cifs_fscache_super_auxdata auxdata;
-
- sharename = extract_sharename(tcon->treeName);
- if (IS_ERR(sharename)) {
- cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
- tcon->fscache = NULL;
- return;
- }
-
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.resource_id = tcon->resource_id;
- auxdata.vol_create_time = tcon->vol_create_time;
- auxdata.vol_serial_number = tcon->vol_serial_number;
-
- tcon->fscache =
- fscache_acquire_cookie(server->fscache,
- &cifs_fscache_super_index_def,
- sharename, strlen(sharename),
- &auxdata, sizeof(auxdata),
- tcon, 0, true);
- kfree(sharename);
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
- __func__, server->fscache, tcon->fscache);
-}
-
-void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
-{
- struct cifs_fscache_super_auxdata auxdata;
-
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.resource_id = tcon->resource_id;
- auxdata.vol_create_time = tcon->vol_create_time;
- auxdata.vol_serial_number = tcon->vol_serial_number;
-
- cifs_dbg(FYI, "%s: (0x%p)\n", __func__, tcon->fscache);
- fscache_relinquish_cookie(tcon->fscache, &auxdata, false);
- tcon->fscache = NULL;
-}
-
-static void cifs_fscache_acquire_inode_cookie(struct cifsInodeInfo *cifsi,
- struct cifs_tcon *tcon)
-{
- struct cifs_fscache_inode_auxdata auxdata;
-
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
- auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
- auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
- auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
-
- cifsi->fscache =
- fscache_acquire_cookie(tcon->fscache,
- &cifs_fscache_inode_object_def,
- &cifsi->uniqueid, sizeof(cifsi->uniqueid),
- &auxdata, sizeof(auxdata),
- cifsi, cifsi->vfs_inode.i_size, true);
-}
-
-static void cifs_fscache_enable_inode_cookie(struct inode *inode)
-{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
-
- if (cifsi->fscache)
- return;
-
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE))
- return;
-
- cifs_fscache_acquire_inode_cookie(cifsi, tcon);
-
- cifs_dbg(FYI, "%s: got FH cookie (0x%p/0x%p)\n",
- __func__, tcon->fscache, cifsi->fscache);
-}
-
-void cifs_fscache_release_inode_cookie(struct inode *inode)
-{
- struct cifs_fscache_inode_auxdata auxdata;
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
-
- if (cifsi->fscache) {
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
- auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
- auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
- auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
-
- cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache);
- /* fscache_relinquish_cookie does not seem to update auxdata */
- fscache_update_cookie(cifsi->fscache, &auxdata);
- fscache_relinquish_cookie(cifsi->fscache, &auxdata, false);
- cifsi->fscache = NULL;
- }
-}
-
-void cifs_fscache_update_inode_cookie(struct inode *inode)
-{
- struct cifs_fscache_inode_auxdata auxdata;
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
-
- if (cifsi->fscache) {
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
- auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
- auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
- auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
-
- cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache);
- fscache_update_cookie(cifsi->fscache, &auxdata);
- }
-}
-
-void cifs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
-{
- cifs_fscache_enable_inode_cookie(inode);
-}
-
-void cifs_fscache_reset_inode_cookie(struct inode *inode)
-{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
- struct fscache_cookie *old = cifsi->fscache;
-
- if (cifsi->fscache) {
- /* retire the current fscache cache and get a new one */
- fscache_relinquish_cookie(cifsi->fscache, NULL, true);
-
- cifs_fscache_acquire_inode_cookie(cifsi, tcon);
- cifs_dbg(FYI, "%s: new cookie 0x%p oldcookie 0x%p\n",
- __func__, cifsi->fscache, old);
- }
-}
-
-int cifs_fscache_release_page(struct page *page, gfp_t gfp)
-{
- if (PageFsCache(page)) {
- struct inode *inode = page->mapping->host;
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
-
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
- __func__, page, cifsi->fscache);
- if (!fscache_maybe_release_page(cifsi->fscache, page, gfp))
- return 0;
- }
-
- return 1;
-}
-
-static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx,
- int error)
-{
- cifs_dbg(FYI, "%s: (0x%p/%d)\n", __func__, page, error);
- if (!error)
- SetPageUptodate(page);
- unlock_page(page);
-}
-
-/*
- * Retrieve a page from FS-Cache
- */
-int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
-{
- int ret;
-
- cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
- __func__, CIFS_I(inode)->fscache, page, inode);
- ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page,
- cifs_readpage_from_fscache_complete,
- NULL,
- GFP_KERNEL);
- switch (ret) {
-
- case 0: /* page found in fscache, read submitted */
- cifs_dbg(FYI, "%s: submitted\n", __func__);
- return ret;
- case -ENOBUFS: /* page won't be cached */
- case -ENODATA: /* page not in cache */
- cifs_dbg(FYI, "%s: %d\n", __func__, ret);
- return 1;
-
- default:
- cifs_dbg(VFS, "unknown error ret = %d\n", ret);
- }
- return ret;
-}
-
-/*
- * Retrieve a set of pages from FS-Cache
- */
-int __cifs_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
-{
- int ret;
-
- cifs_dbg(FYI, "%s: (0x%p/%u/0x%p)\n",
- __func__, CIFS_I(inode)->fscache, *nr_pages, inode);
- ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping,
- pages, nr_pages,
- cifs_readpage_from_fscache_complete,
- NULL,
- mapping_gfp_mask(mapping));
- switch (ret) {
- case 0: /* read submitted to the cache for all pages */
- cifs_dbg(FYI, "%s: submitted\n", __func__);
- return ret;
-
- case -ENOBUFS: /* some pages are not cached and can't be */
- case -ENODATA: /* some pages are not cached */
- cifs_dbg(FYI, "%s: no page\n", __func__);
- return 1;
-
- default:
- cifs_dbg(FYI, "unknown error ret = %d\n", ret);
- }
-
- return ret;
-}
-
-void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
-{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- int ret;
-
- WARN_ON(!cifsi->fscache);
-
- cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n",
- __func__, cifsi->fscache, page, inode);
- ret = fscache_write_page(cifsi->fscache, page,
- cifsi->vfs_inode.i_size, GFP_KERNEL);
- if (ret != 0)
- fscache_uncache_page(cifsi->fscache, page);
-}
-
-void __cifs_fscache_readpages_cancel(struct inode *inode, struct list_head *pages)
-{
- cifs_dbg(FYI, "%s: (fsc: %p, i: %p)\n",
- __func__, CIFS_I(inode)->fscache, inode);
- fscache_readpages_cancel(CIFS_I(inode)->fscache, pages);
-}
-
-void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
-{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct fscache_cookie *cookie = cifsi->fscache;
-
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, page, cookie);
- fscache_wait_on_page_write(cookie, page);
- fscache_uncache_page(cookie, page);
-}
-
-void __cifs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
-{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct fscache_cookie *cookie = cifsi->fscache;
-
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, page, cookie);
- fscache_wait_on_page_write(cookie, page);
-}
-
-void __cifs_fscache_uncache_page(struct inode *inode, struct page *page)
-{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct fscache_cookie *cookie = cifsi->fscache;
-
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, page, cookie);
- fscache_uncache_page(cookie, page);
-}
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
deleted file mode 100644
index 9baa1d0f22bd..000000000000
--- a/fs/cifs/fscache.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1 */
-/*
- * CIFS filesystem cache interface definitions
- *
- * Copyright (c) 2010 Novell, Inc.
- * Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
- *
- */
-#ifndef _CIFS_FSCACHE_H
-#define _CIFS_FSCACHE_H
-
-#include <linux/fscache.h>
-
-#include "cifsglob.h"
-
-#ifdef CONFIG_CIFS_FSCACHE
-
-/*
- * Auxiliary data attached to CIFS superblock within the cache
- */
-struct cifs_fscache_super_auxdata {
- u64 resource_id; /* unique server resource id */
- __le64 vol_create_time;
- u32 vol_serial_number;
-} __packed;
-
-/*
- * Auxiliary data attached to CIFS inode within the cache
- */
-struct cifs_fscache_inode_auxdata {
- u64 last_write_time_sec;
- u64 last_change_time_sec;
- u32 last_write_time_nsec;
- u32 last_change_time_nsec;
- u64 eof;
-};
-
-/*
- * cache.c
- */
-extern struct fscache_netfs cifs_fscache_netfs;
-extern const struct fscache_cookie_def cifs_fscache_server_index_def;
-extern const struct fscache_cookie_def cifs_fscache_super_index_def;
-extern const struct fscache_cookie_def cifs_fscache_inode_object_def;
-
-extern int cifs_fscache_register(void);
-extern void cifs_fscache_unregister(void);
-
-/*
- * fscache.c
- */
-extern void cifs_fscache_get_client_cookie(struct TCP_Server_Info *);
-extern void cifs_fscache_release_client_cookie(struct TCP_Server_Info *);
-extern void cifs_fscache_get_super_cookie(struct cifs_tcon *);
-extern void cifs_fscache_release_super_cookie(struct cifs_tcon *);
-
-extern void cifs_fscache_release_inode_cookie(struct inode *);
-extern void cifs_fscache_update_inode_cookie(struct inode *inode);
-extern void cifs_fscache_set_inode_cookie(struct inode *, struct file *);
-extern void cifs_fscache_reset_inode_cookie(struct inode *);
-
-extern void __cifs_fscache_invalidate_page(struct page *, struct inode *);
-extern void __cifs_fscache_wait_on_page_write(struct inode *inode, struct page *page);
-extern void __cifs_fscache_uncache_page(struct inode *inode, struct page *page);
-extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
-extern int __cifs_readpage_from_fscache(struct inode *, struct page *);
-extern int __cifs_readpages_from_fscache(struct inode *,
- struct address_space *,
- struct list_head *,
- unsigned *);
-extern void __cifs_fscache_readpages_cancel(struct inode *, struct list_head *);
-
-extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
-
-static inline void cifs_fscache_invalidate_page(struct page *page,
- struct inode *inode)
-{
- if (PageFsCache(page))
- __cifs_fscache_invalidate_page(page, inode);
-}
-
-static inline void cifs_fscache_wait_on_page_write(struct inode *inode,
- struct page *page)
-{
- if (PageFsCache(page))
- __cifs_fscache_wait_on_page_write(inode, page);
-}
-
-static inline void cifs_fscache_uncache_page(struct inode *inode,
- struct page *page)
-{
- if (PageFsCache(page))
- __cifs_fscache_uncache_page(inode, page);
-}
-
-static inline int cifs_readpage_from_fscache(struct inode *inode,
- struct page *page)
-{
- if (CIFS_I(inode)->fscache)
- return __cifs_readpage_from_fscache(inode, page);
-
- return -ENOBUFS;
-}
-
-static inline int cifs_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
-{
- if (CIFS_I(inode)->fscache)
- return __cifs_readpages_from_fscache(inode, mapping, pages,
- nr_pages);
- return -ENOBUFS;
-}
-
-static inline void cifs_readpage_to_fscache(struct inode *inode,
- struct page *page)
-{
- if (PageFsCache(page))
- __cifs_readpage_to_fscache(inode, page);
-}
-
-static inline void cifs_fscache_readpages_cancel(struct inode *inode,
- struct list_head *pages)
-{
- if (CIFS_I(inode)->fscache)
- return __cifs_fscache_readpages_cancel(inode, pages);
-}
-
-#else /* CONFIG_CIFS_FSCACHE */
-static inline int cifs_fscache_register(void) { return 0; }
-static inline void cifs_fscache_unregister(void) {}
-
-static inline void
-cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) {}
-static inline void
-cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) {}
-static inline void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) {}
-static inline void
-cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) {}
-
-static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
-static inline void cifs_fscache_update_inode_cookie(struct inode *inode) {}
-static inline void cifs_fscache_set_inode_cookie(struct inode *inode,
- struct file *filp) {}
-static inline void cifs_fscache_reset_inode_cookie(struct inode *inode) {}
-static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
-{
- return 1; /* May release page */
-}
-
-static inline void cifs_fscache_invalidate_page(struct page *page,
- struct inode *inode) {}
-static inline void cifs_fscache_wait_on_page_write(struct inode *inode,
- struct page *page) {}
-static inline void cifs_fscache_uncache_page(struct inode *inode,
- struct page *page) {}
-
-static inline int
-cifs_readpage_from_fscache(struct inode *inode, struct page *page)
-{
- return -ENOBUFS;
-}
-
-static inline int cifs_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
-{
- return -ENOBUFS;
-}
-
-static inline void cifs_readpage_to_fscache(struct inode *inode,
- struct page *page) {}
-
-static inline void cifs_fscache_readpages_cancel(struct inode *inode,
- struct list_head *pages)
-{
-}
-
-#endif /* CONFIG_CIFS_FSCACHE */
-
-#endif /* _CIFS_FSCACHE_H */
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
deleted file mode 100644
index 8297703492ee..000000000000
--- a/fs/cifs/smb2inode.c
+++ /dev/null
@@ -1,738 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- *
- * Copyright (C) International Business Machines Corp., 2002, 2011
- * Etersoft, 2012
- * Author(s): Pavel Shilovsky (pshilovsky@samba.org),
- * Steve French (sfrench@us.ibm.com)
- *
- */
-#include <linux/fs.h>
-#include <linux/stat.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <asm/div64.h>
-#include "cifsfs.h"
-#include "cifspdu.h"
-#include "cifsglob.h"
-#include "cifsproto.h"
-#include "cifs_debug.h"
-#include "cifs_fs_sb.h"
-#include "cifs_unicode.h"
-#include "fscache.h"
-#include "smb2glob.h"
-#include "smb2pdu.h"
-#include "smb2proto.h"
-
-static void
-free_set_inf_compound(struct smb_rqst *rqst)
-{
- if (rqst[1].rq_iov)
- SMB2_set_info_free(&rqst[1]);
- if (rqst[2].rq_iov)
- SMB2_close_free(&rqst[2]);
-}
-
-
-struct cop_vars {
- struct cifs_open_parms oparms;
- struct kvec rsp_iov[3];
- struct smb_rqst rqst[3];
- struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
- struct kvec qi_iov[1];
- struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
- struct kvec close_iov[1];
- struct smb2_file_rename_info rename_info;
- struct smb2_file_link_info link_info;
-};
-
-static int
-smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const char *full_path,
- __u32 desired_access, __u32 create_disposition,
- __u32 create_options, umode_t mode, void *ptr, int command,
- struct cifsFileInfo *cfile)
-{
- struct cop_vars *vars = NULL;
- struct kvec *rsp_iov;
- struct smb_rqst *rqst;
- int rc;
- __le16 *utf16_path = NULL;
- __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
- struct cifs_fid fid;
- struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server;
- int num_rqst = 0;
- int resp_buftype[3];
- struct smb2_query_info_rsp *qi_rsp = NULL;
- int flags = 0;
- __u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0};
- unsigned int size[2];
- void *data[2];
- int len;
-
- vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
- if (vars == NULL)
- return -ENOMEM;
- rqst = &vars->rqst[0];
- rsp_iov = &vars->rsp_iov[0];
-
- server = cifs_pick_channel(ses);
-
- if (smb3_encryption_required(tcon))
- flags |= CIFS_TRANSFORM_REQ;
-
- resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
-
- /* We already have a handle so we can skip the open */
- if (cfile)
- goto after_open;
-
- /* Open */
- utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
- if (!utf16_path) {
- rc = -ENOMEM;
- goto finished;
- }
-
- vars->oparms.tcon = tcon;
- vars->oparms.desired_access = desired_access;
- vars->oparms.disposition = create_disposition;
- vars->oparms.create_options = cifs_create_options(cifs_sb, create_options);
- vars->oparms.fid = &fid;
- vars->oparms.reconnect = false;
- vars->oparms.mode = mode;
- vars->oparms.cifs_sb = cifs_sb;
-
- rqst[num_rqst].rq_iov = &vars->open_iov[0];
- rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE;
- rc = SMB2_open_init(tcon, server,
- &rqst[num_rqst], &oplock, &vars->oparms,
- utf16_path);
- kfree(utf16_path);
- if (rc)
- goto finished;
-
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- after_open:
- num_rqst++;
- rc = 0;
-
- /* Operation */
- switch (command) {
- case SMB2_OP_QUERY_INFO:
- rqst[num_rqst].rq_iov = &vars->qi_iov[0];
- rqst[num_rqst].rq_nvec = 1;
-
- if (cfile)
- rc = SMB2_query_info_init(tcon, server,
- &rqst[num_rqst],
- cfile->fid.persistent_fid,
- cfile->fid.volatile_fid,
- FILE_ALL_INFORMATION,
- SMB2_O_INFO_FILE, 0,
- sizeof(struct smb2_file_all_info) +
- PATH_MAX * 2, 0, NULL);
- else {
- rc = SMB2_query_info_init(tcon, server,
- &rqst[num_rqst],
- COMPOUND_FID,
- COMPOUND_FID,
- FILE_ALL_INFORMATION,
- SMB2_O_INFO_FILE, 0,
- sizeof(struct smb2_file_all_info) +
- PATH_MAX * 2, 0, NULL);
- if (!rc) {
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- smb2_set_related(&rqst[num_rqst]);
- }
- }
-
- if (rc)
- goto finished;
- num_rqst++;
- trace_smb3_query_info_compound_enter(xid, ses->Suid, tcon->tid,
- full_path);
- break;
- case SMB2_OP_POSIX_QUERY_INFO:
- rqst[num_rqst].rq_iov = &vars->qi_iov[0];
- rqst[num_rqst].rq_nvec = 1;
-
- if (cfile)
- rc = SMB2_query_info_init(tcon, server,
- &rqst[num_rqst],
- cfile->fid.persistent_fid,
- cfile->fid.volatile_fid,
- SMB_FIND_FILE_POSIX_INFO,
- SMB2_O_INFO_FILE, 0,
- /* TBD: fix following to allow for longer SIDs */
- sizeof(struct smb311_posix_qinfo *) + (PATH_MAX * 2) +
- (sizeof(struct cifs_sid) * 2), 0, NULL);
- else {
- rc = SMB2_query_info_init(tcon, server,
- &rqst[num_rqst],
- COMPOUND_FID,
- COMPOUND_FID,
- SMB_FIND_FILE_POSIX_INFO,
- SMB2_O_INFO_FILE, 0,
- sizeof(struct smb311_posix_qinfo *) + (PATH_MAX * 2) +
- (sizeof(struct cifs_sid) * 2), 0, NULL);
- if (!rc) {
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- smb2_set_related(&rqst[num_rqst]);
- }
- }
-
- if (rc)
- goto finished;
- num_rqst++;
- trace_smb3_posix_query_info_compound_enter(xid, ses->Suid, tcon->tid, full_path);
- break;
- case SMB2_OP_DELETE:
- trace_smb3_delete_enter(xid, ses->Suid, tcon->tid, full_path);
- break;
- case SMB2_OP_MKDIR:
- /*
- * Directories are created through parameters in the
- * SMB2_open() call.
- */
- trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path);
- break;
- case SMB2_OP_RMDIR:
- rqst[num_rqst].rq_iov = &vars->si_iov[0];
- rqst[num_rqst].rq_nvec = 1;
-
- size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */
- data[0] = &delete_pending[0];
-
- rc = SMB2_set_info_init(tcon, server,
- &rqst[num_rqst], COMPOUND_FID,
- COMPOUND_FID, current->tgid,
- FILE_DISPOSITION_INFORMATION,
- SMB2_O_INFO_FILE, 0, data, size);
- if (rc)
- goto finished;
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- smb2_set_related(&rqst[num_rqst++]);
- trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
- break;
- case SMB2_OP_SET_EOF:
- rqst[num_rqst].rq_iov = &vars->si_iov[0];
- rqst[num_rqst].rq_nvec = 1;
-
- size[0] = 8; /* sizeof __le64 */
- data[0] = ptr;
-
- rc = SMB2_set_info_init(tcon, server,
- &rqst[num_rqst], COMPOUND_FID,
- COMPOUND_FID, current->tgid,
- FILE_END_OF_FILE_INFORMATION,
- SMB2_O_INFO_FILE, 0, data, size);
- if (rc)
- goto finished;
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- smb2_set_related(&rqst[num_rqst++]);
- trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
- break;
- case SMB2_OP_SET_INFO:
- rqst[num_rqst].rq_iov = &vars->si_iov[0];
- rqst[num_rqst].rq_nvec = 1;
-
-
- size[0] = sizeof(FILE_BASIC_INFO);
- data[0] = ptr;
-
- if (cfile)
- rc = SMB2_set_info_init(tcon, server,
- &rqst[num_rqst],
- cfile->fid.persistent_fid,
- cfile->fid.volatile_fid, current->tgid,
- FILE_BASIC_INFORMATION,
- SMB2_O_INFO_FILE, 0, data, size);
- else {
- rc = SMB2_set_info_init(tcon, server,
- &rqst[num_rqst],
- COMPOUND_FID,
- COMPOUND_FID, current->tgid,
- FILE_BASIC_INFORMATION,
- SMB2_O_INFO_FILE, 0, data, size);
- if (!rc) {
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- smb2_set_related(&rqst[num_rqst]);
- }
- }
-
- if (rc)
- goto finished;
- num_rqst++;
- trace_smb3_set_info_compound_enter(xid, ses->Suid, tcon->tid,
- full_path);
- break;
- case SMB2_OP_RENAME:
- rqst[num_rqst].rq_iov = &vars->si_iov[0];
- rqst[num_rqst].rq_nvec = 2;
-
- len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
-
- vars->rename_info.ReplaceIfExists = 1;
- vars->rename_info.RootDirectory = 0;
- vars->rename_info.FileNameLength = cpu_to_le32(len);
-
- size[0] = sizeof(struct smb2_file_rename_info);
- data[0] = &vars->rename_info;
-
- size[1] = len + 2 /* null */;
- data[1] = (__le16 *)ptr;
-
- if (cfile)
- rc = SMB2_set_info_init(tcon, server,
- &rqst[num_rqst],
- cfile->fid.persistent_fid,
- cfile->fid.volatile_fid,
- current->tgid, FILE_RENAME_INFORMATION,
- SMB2_O_INFO_FILE, 0, data, size);
- else {
- rc = SMB2_set_info_init(tcon, server,
- &rqst[num_rqst],
- COMPOUND_FID, COMPOUND_FID,
- current->tgid, FILE_RENAME_INFORMATION,
- SMB2_O_INFO_FILE, 0, data, size);
- if (!rc) {
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- smb2_set_related(&rqst[num_rqst]);
- }
- }
- if (rc)
- goto finished;
- num_rqst++;
- trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
- break;
- case SMB2_OP_HARDLINK:
- rqst[num_rqst].rq_iov = &vars->si_iov[0];
- rqst[num_rqst].rq_nvec = 2;
-
- len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
-
- vars->link_info.ReplaceIfExists = 0;
- vars->link_info.RootDirectory = 0;
- vars->link_info.FileNameLength = cpu_to_le32(len);
-
- size[0] = sizeof(struct smb2_file_link_info);
- data[0] = &vars->link_info;
-
- size[1] = len + 2 /* null */;
- data[1] = (__le16 *)ptr;
-
- rc = SMB2_set_info_init(tcon, server,
- &rqst[num_rqst], COMPOUND_FID,
- COMPOUND_FID, current->tgid,
- FILE_LINK_INFORMATION,
- SMB2_O_INFO_FILE, 0, data, size);
- if (rc)
- goto finished;
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- smb2_set_related(&rqst[num_rqst++]);
- trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path);
- break;
- default:
- cifs_dbg(VFS, "Invalid command\n");
- rc = -EINVAL;
- }
- if (rc)
- goto finished;
-
- /* We already have a handle so we can skip the close */
- if (cfile)
- goto after_close;
- /* Close */
- flags |= CIFS_CP_CREATE_CLOSE_OP;
- rqst[num_rqst].rq_iov = &vars->close_iov[0];
- rqst[num_rqst].rq_nvec = 1;
- rc = SMB2_close_init(tcon, server,
- &rqst[num_rqst], COMPOUND_FID,
- COMPOUND_FID, false);
- smb2_set_related(&rqst[num_rqst]);
- if (rc)
- goto finished;
- after_close:
- num_rqst++;
-
- if (cfile) {
- cifsFileInfo_put(cfile);
- cfile = NULL;
- rc = compound_send_recv(xid, ses, server,
- flags, num_rqst - 2,
- &rqst[1], &resp_buftype[1],
- &rsp_iov[1]);
- } else
- rc = compound_send_recv(xid, ses, server,
- flags, num_rqst,
- rqst, resp_buftype,
- rsp_iov);
-
- finished:
- if (cfile)
- cifsFileInfo_put(cfile);
-
- SMB2_open_free(&rqst[0]);
- if (rc == -EREMCHG) {
- pr_warn_once("server share %s deleted\n", tcon->treeName);
- tcon->need_reconnect = true;
- }
-
- switch (command) {
- case SMB2_OP_QUERY_INFO:
- if (rc == 0) {
- qi_rsp = (struct smb2_query_info_rsp *)
- rsp_iov[1].iov_base;
- rc = smb2_validate_and_copy_iov(
- le16_to_cpu(qi_rsp->OutputBufferOffset),
- le32_to_cpu(qi_rsp->OutputBufferLength),
- &rsp_iov[1], sizeof(struct smb2_file_all_info),
- ptr);
- }
- if (rqst[1].rq_iov)
- SMB2_query_info_free(&rqst[1]);
- if (rqst[2].rq_iov)
- SMB2_close_free(&rqst[2]);
- if (rc)
- trace_smb3_query_info_compound_err(xid, ses->Suid,
- tcon->tid, rc);
- else
- trace_smb3_query_info_compound_done(xid, ses->Suid,
- tcon->tid);
- break;
- case SMB2_OP_POSIX_QUERY_INFO:
- if (rc == 0) {
- qi_rsp = (struct smb2_query_info_rsp *)
- rsp_iov[1].iov_base;
- rc = smb2_validate_and_copy_iov(
- le16_to_cpu(qi_rsp->OutputBufferOffset),
- le32_to_cpu(qi_rsp->OutputBufferLength),
- &rsp_iov[1], sizeof(struct smb311_posix_qinfo) /* add SIDs */, ptr);
- }
- if (rqst[1].rq_iov)
- SMB2_query_info_free(&rqst[1]);
- if (rqst[2].rq_iov)
- SMB2_close_free(&rqst[2]);
- if (rc)
- trace_smb3_posix_query_info_compound_err(xid, ses->Suid, tcon->tid, rc);
- else
- trace_smb3_posix_query_info_compound_done(xid, ses->Suid, tcon->tid);
- break;
- case SMB2_OP_DELETE:
- if (rc)
- trace_smb3_delete_err(xid, ses->Suid, tcon->tid, rc);
- else
- trace_smb3_delete_done(xid, ses->Suid, tcon->tid);
- if (rqst[1].rq_iov)
- SMB2_close_free(&rqst[1]);
- break;
- case SMB2_OP_MKDIR:
- if (rc)
- trace_smb3_mkdir_err(xid, ses->Suid, tcon->tid, rc);
- else
- trace_smb3_mkdir_done(xid, ses->Suid, tcon->tid);
- if (rqst[1].rq_iov)
- SMB2_close_free(&rqst[1]);
- break;
- case SMB2_OP_HARDLINK:
- if (rc)
- trace_smb3_hardlink_err(xid, ses->Suid, tcon->tid, rc);
- else
- trace_smb3_hardlink_done(xid, ses->Suid, tcon->tid);
- free_set_inf_compound(rqst);
- break;
- case SMB2_OP_RENAME:
- if (rc)
- trace_smb3_rename_err(xid, ses->Suid, tcon->tid, rc);
- else
- trace_smb3_rename_done(xid, ses->Suid, tcon->tid);
- free_set_inf_compound(rqst);
- break;
- case SMB2_OP_RMDIR:
- if (rc)
- trace_smb3_rmdir_err(xid, ses->Suid, tcon->tid, rc);
- else
- trace_smb3_rmdir_done(xid, ses->Suid, tcon->tid);
- free_set_inf_compound(rqst);
- break;
- case SMB2_OP_SET_EOF:
- if (rc)
- trace_smb3_set_eof_err(xid, ses->Suid, tcon->tid, rc);
- else
- trace_smb3_set_eof_done(xid, ses->Suid, tcon->tid);
- free_set_inf_compound(rqst);
- break;
- case SMB2_OP_SET_INFO:
- if (rc)
- trace_smb3_set_info_compound_err(xid, ses->Suid,
- tcon->tid, rc);
- else
- trace_smb3_set_info_compound_done(xid, ses->Suid,
- tcon->tid);
- free_set_inf_compound(rqst);
- break;
- }
- free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
- free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
- free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
- kfree(vars);
- return rc;
-}
-
-void
-move_smb2_info_to_cifs(FILE_ALL_INFO *dst, struct smb2_file_all_info *src)
-{
- memcpy(dst, src, (size_t)(&src->CurrentByteOffset) - (size_t)src);
- dst->CurrentByteOffset = src->CurrentByteOffset;
- dst->Mode = src->Mode;
- dst->AlignmentRequirement = src->AlignmentRequirement;
- dst->IndexNumber1 = 0; /* we don't use it */
-}
-
-int
-smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const char *full_path,
- FILE_ALL_INFO *data, bool *adjust_tz, bool *reparse)
-{
- int rc;
- struct smb2_file_all_info *smb2_data;
- __u32 create_options = 0;
- struct cifsFileInfo *cfile;
- struct cached_fid *cfid = NULL;
-
- *adjust_tz = false;
- *reparse = false;
-
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
- GFP_KERNEL);
- if (smb2_data == NULL)
- return -ENOMEM;
-
- /* If it is a root and its handle is cached then use it */
- rc = open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid);
- if (!rc) {
- if (tcon->crfid.file_all_info_is_valid) {
- move_smb2_info_to_cifs(data,
- &tcon->crfid.file_all_info);
- } else {
- rc = SMB2_query_info(xid, tcon,
- cfid->fid->persistent_fid,
- cfid->fid->volatile_fid, smb2_data);
- if (!rc)
- move_smb2_info_to_cifs(data, smb2_data);
- }
- close_cached_dir(cfid);
- goto out;
- }
-
- cifs_get_readable_path(tcon, full_path, &cfile);
- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN, create_options,
- ACL_NO_MODE, smb2_data, SMB2_OP_QUERY_INFO, cfile);
- if (rc == -EOPNOTSUPP) {
- *reparse = true;
- create_options |= OPEN_REPARSE_POINT;
-
- /* Failed on a symbolic link - query a reparse point info */
- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN,
- create_options, ACL_NO_MODE,
- smb2_data, SMB2_OP_QUERY_INFO, NULL);
- }
- if (rc)
- goto out;
-
- move_smb2_info_to_cifs(data, smb2_data);
-out:
- kfree(smb2_data);
- return rc;
-}
-
-
-int
-smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const char *full_path,
- struct smb311_posix_qinfo *data, bool *adjust_tz, bool *reparse)
-{
- int rc;
- __u32 create_options = 0;
- struct cifsFileInfo *cfile;
- struct smb311_posix_qinfo *smb2_data;
-
- *adjust_tz = false;
- *reparse = false;
-
- /* BB TODO: Make struct larger when add support for parsing owner SIDs */
- smb2_data = kzalloc(sizeof(struct smb311_posix_qinfo),
- GFP_KERNEL);
- if (smb2_data == NULL)
- return -ENOMEM;
-
- /*
- * BB TODO: Add support for using the cached root handle.
- * Create SMB2_query_posix_info worker function to do non-compounded query
- * when we already have an open file handle for this. For now this is fast enough
- * (always using the compounded version).
- */
-
- cifs_get_readable_path(tcon, full_path, &cfile);
- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN, create_options,
- ACL_NO_MODE, smb2_data, SMB2_OP_POSIX_QUERY_INFO, cfile);
- if (rc == -EOPNOTSUPP) {
- /* BB TODO: When support for special files added to Samba re-verify this path */
- *reparse = true;
- create_options |= OPEN_REPARSE_POINT;
-
- /* Failed on a symbolic link - query a reparse point info */
- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN,
- create_options, ACL_NO_MODE,
- smb2_data, SMB2_OP_POSIX_QUERY_INFO, NULL);
- }
- if (rc)
- goto out;
-
- /* TODO: will need to allow for the 2 SIDs when add support for getting owner UID/GID */
- memcpy(data, smb2_data, sizeof(struct smb311_posix_qinfo));
-
-out:
- kfree(smb2_data);
- return rc;
-}
-
-int
-smb2_mkdir(const unsigned int xid, struct inode *parent_inode, umode_t mode,
- struct cifs_tcon *tcon, const char *name,
- struct cifs_sb_info *cifs_sb)
-{
- return smb2_compound_op(xid, tcon, cifs_sb, name,
- FILE_WRITE_ATTRIBUTES, FILE_CREATE,
- CREATE_NOT_FILE, mode, NULL, SMB2_OP_MKDIR,
- NULL);
-}
-
-void
-smb2_mkdir_setinfo(struct inode *inode, const char *name,
- struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon,
- const unsigned int xid)
-{
- FILE_BASIC_INFO data;
- struct cifsInodeInfo *cifs_i;
- struct cifsFileInfo *cfile;
- u32 dosattrs;
- int tmprc;
-
- memset(&data, 0, sizeof(data));
- cifs_i = CIFS_I(inode);
- dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
- data.Attributes = cpu_to_le32(dosattrs);
- cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile);
- tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
- FILE_WRITE_ATTRIBUTES, FILE_CREATE,
- CREATE_NOT_FILE, ACL_NO_MODE,
- &data, SMB2_OP_SET_INFO, cfile);
- if (tmprc == 0)
- cifs_i->cifsAttrs = dosattrs;
-}
-
-int
-smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
- struct cifs_sb_info *cifs_sb)
-{
- return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
- CREATE_NOT_FILE, ACL_NO_MODE,
- NULL, SMB2_OP_RMDIR, NULL);
-}
-
-int
-smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
- struct cifs_sb_info *cifs_sb)
-{
- return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
- CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
- ACL_NO_MODE, NULL, SMB2_OP_DELETE, NULL);
-}
-
-static int
-smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
- const char *from_name, const char *to_name,
- struct cifs_sb_info *cifs_sb, __u32 access, int command,
- struct cifsFileInfo *cfile)
-{
- __le16 *smb2_to_name = NULL;
- int rc;
-
- smb2_to_name = cifs_convert_path_to_utf16(to_name, cifs_sb);
- if (smb2_to_name == NULL) {
- rc = -ENOMEM;
- goto smb2_rename_path;
- }
- rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access,
- FILE_OPEN, 0, ACL_NO_MODE, smb2_to_name,
- command, cfile);
-smb2_rename_path:
- kfree(smb2_to_name);
- return rc;
-}
-
-int
-smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
- const char *from_name, const char *to_name,
- struct cifs_sb_info *cifs_sb)
-{
- struct cifsFileInfo *cfile;
-
- cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
-
- return smb2_set_path_attr(xid, tcon, from_name, to_name,
- cifs_sb, DELETE, SMB2_OP_RENAME, cfile);
-}
-
-int
-smb2_create_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
- const char *from_name, const char *to_name,
- struct cifs_sb_info *cifs_sb)
-{
- return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
- FILE_READ_ATTRIBUTES, SMB2_OP_HARDLINK,
- NULL);
-}
-
-int
-smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
- const char *full_path, __u64 size,
- struct cifs_sb_info *cifs_sb, bool set_alloc)
-{
- __le64 eof = cpu_to_le64(size);
-
- return smb2_compound_op(xid, tcon, cifs_sb, full_path,
- FILE_WRITE_DATA, FILE_OPEN, 0, ACL_NO_MODE,
- &eof, SMB2_OP_SET_EOF, NULL);
-}
-
-int
-smb2_set_file_info(struct inode *inode, const char *full_path,
- FILE_BASIC_INFO *buf, const unsigned int xid)
-{
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct tcon_link *tlink;
- int rc;
-
- if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
- (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
- (buf->Attributes == 0))
- return 0; /* would be a no op, no sense sending this */
-
- tlink = cifs_sb_tlink(cifs_sb);
- if (IS_ERR(tlink))
- return PTR_ERR(tlink);
-
- rc = smb2_compound_op(xid, tlink_tcon(tlink), cifs_sb, full_path,
- FILE_WRITE_ATTRIBUTES, FILE_OPEN,
- 0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, NULL);
- cifs_put_tlink(tlink);
- return rc;
-}
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
deleted file mode 100644
index 33cfd0a1adf1..000000000000
--- a/fs/cifs/smb2pdu.h
+++ /dev/null
@@ -1,1007 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1 */
-/*
- *
- * Copyright (c) International Business Machines Corp., 2009, 2013
- * Etersoft, 2012
- * Author(s): Steve French (sfrench@us.ibm.com)
- * Pavel Shilovsky (pshilovsky@samba.org) 2012
- *
- */
-
-#ifndef _SMB2PDU_H
-#define _SMB2PDU_H
-
-#include <net/sock.h>
-#include "cifsacl.h"
-
-/* 52 transform hdr + 64 hdr + 88 create rsp */
-#define SMB2_TRANSFORM_HEADER_SIZE 52
-#define MAX_SMB2_HDR_SIZE 204
-
-/* The total header size for SMB2 read and write */
-#define SMB2_READWRITE_PDU_HEADER_SIZE (48 + sizeof(struct smb2_hdr))
-
-/* See MS-SMB2 2.2.43 */
-struct smb2_rdma_transform {
- __le16 RdmaDescriptorOffset;
- __le16 RdmaDescriptorLength;
- __le32 Channel; /* for values see channel description in smb2 read above */
- __le16 TransformCount;
- __le16 Reserved1;
- __le32 Reserved2;
-} __packed;
-
-/* TransformType */
-#define SMB2_RDMA_TRANSFORM_TYPE_ENCRYPTION 0x0001
-#define SMB2_RDMA_TRANSFORM_TYPE_SIGNING 0x0002
-
-struct smb2_rdma_crypto_transform {
- __le16 TransformType;
- __le16 SignatureLength;
- __le16 NonceLength;
- __u16 Reserved;
- __u8 Signature[]; /* variable length */
- /* u8 Nonce[] */
- /* followed by padding */
-} __packed;
-
-/*
- * Definitions for SMB2 Protocol Data Units (network frames)
- *
- * See MS-SMB2.PDF specification for protocol details.
- * The Naming convention is the lower case version of the SMB2
- * command code name for the struct. Note that structures must be packed.
- *
- */
-
-#define COMPOUND_FID 0xFFFFFFFFFFFFFFFFULL
-
-#define SMB2_ERROR_STRUCTURE_SIZE2 cpu_to_le16(9)
-
-struct smb2_err_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize;
- __le16 Reserved; /* MBZ */
- __le32 ByteCount; /* even if zero, at least one byte follows */
- __u8 ErrorData[1]; /* variable length */
-} __packed;
-
-#define SYMLINK_ERROR_TAG 0x4c4d5953
-
-struct smb2_symlink_err_rsp {
- __le32 SymLinkLength;
- __le32 SymLinkErrorTag;
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __le16 UnparsedPathLength;
- __le16 SubstituteNameOffset;
- __le16 SubstituteNameLength;
- __le16 PrintNameOffset;
- __le16 PrintNameLength;
- __le32 Flags;
- __u8 PathBuffer[];
-} __packed;
-
-/* SMB 3.1.1 and later dialects. See MS-SMB2 section 2.2.2.1 */
-struct smb2_error_context_rsp {
- __le32 ErrorDataLength;
- __le32 ErrorId;
- __u8 ErrorContextData; /* ErrorDataLength long array */
-} __packed;
-
-/* ErrorId values */
-#define SMB2_ERROR_ID_DEFAULT 0x00000000
-#define SMB2_ERROR_ID_SHARE_REDIRECT cpu_to_le32(0x72645253) /* "rdRS" */
-
-/* Defines for Type field below (see MS-SMB2 2.2.2.2.2.1) */
-#define MOVE_DST_IPADDR_V4 cpu_to_le32(0x00000001)
-#define MOVE_DST_IPADDR_V6 cpu_to_le32(0x00000002)
-
-struct move_dst_ipaddr {
- __le32 Type;
- __u32 Reserved;
- __u8 address[16]; /* IPv4 followed by 12 bytes rsvd or IPv6 address */
-} __packed;
-
-struct share_redirect_error_context_rsp {
- __le32 StructureSize;
- __le32 NotificationType;
- __le32 ResourceNameOffset;
- __le32 ResourceNameLength;
- __le16 Reserved;
- __le16 TargetType;
- __le32 IPAddrCount;
- struct move_dst_ipaddr IpAddrMoveList[];
- /* __u8 ResourceName[] */ /* Name of share as counted Unicode string */
-} __packed;
-
-/*
- * Maximum number of iovs we need for an open/create request.
- * [0] : struct smb2_create_req
- * [1] : path
- * [2] : lease context
- * [3] : durable context
- * [4] : posix context
- * [5] : time warp context
- * [6] : query id context
- * [7] : compound padding
- */
-#define SMB2_CREATE_IOV_SIZE 8
-
-/*
- * Maximum size of a SMB2_CREATE response is 64 (smb2 header) +
- * 88 (fixed part of create response) + 520 (path) + 208 (contexts) +
- * 2 bytes of padding.
- */
-#define MAX_SMB2_CREATE_RESPONSE_SIZE 880
-
-#define SMB2_LEASE_READ_CACHING_HE 0x01
-#define SMB2_LEASE_HANDLE_CACHING_HE 0x02
-#define SMB2_LEASE_WRITE_CACHING_HE 0x04
-
-#define SMB2_LEASE_NONE cpu_to_le32(0x00)
-#define SMB2_LEASE_READ_CACHING cpu_to_le32(0x01)
-#define SMB2_LEASE_HANDLE_CACHING cpu_to_le32(0x02)
-#define SMB2_LEASE_WRITE_CACHING cpu_to_le32(0x04)
-
-#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS cpu_to_le32(0x00000002)
-#define SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET cpu_to_le32(0x00000004)
-
-#define SMB2_LEASE_KEY_SIZE 16
-
-struct lease_context {
- u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
- __le32 LeaseState;
- __le32 LeaseFlags;
- __le64 LeaseDuration;
-} __packed;
-
-struct lease_context_v2 {
- u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
- __le32 LeaseState;
- __le32 LeaseFlags;
- __le64 LeaseDuration;
- __le64 ParentLeaseKeyLow;
- __le64 ParentLeaseKeyHigh;
- __le16 Epoch;
- __le16 Reserved;
-} __packed;
-
-struct create_lease {
- struct create_context ccontext;
- __u8 Name[8];
- struct lease_context lcontext;
-} __packed;
-
-struct create_lease_v2 {
- struct create_context ccontext;
- __u8 Name[8];
- struct lease_context_v2 lcontext;
- __u8 Pad[4];
-} __packed;
-
-struct create_durable {
- struct create_context ccontext;
- __u8 Name[8];
- union {
- __u8 Reserved[16];
- struct {
- __u64 PersistentFileId;
- __u64 VolatileFileId;
- } Fid;
- } Data;
-} __packed;
-
-struct create_posix {
- struct create_context ccontext;
- __u8 Name[16];
- __le32 Mode;
- __u32 Reserved;
-} __packed;
-
-/* See MS-SMB2 2.2.13.2.11 */
-/* Flags */
-#define SMB2_DHANDLE_FLAG_PERSISTENT 0x00000002
-struct durable_context_v2 {
- __le32 Timeout;
- __le32 Flags;
- __u64 Reserved;
- __u8 CreateGuid[16];
-} __packed;
-
-struct create_durable_v2 {
- struct create_context ccontext;
- __u8 Name[8];
- struct durable_context_v2 dcontext;
-} __packed;
-
-/* See MS-SMB2 2.2.13.2.12 */
-struct durable_reconnect_context_v2 {
- struct {
- __u64 PersistentFileId;
- __u64 VolatileFileId;
- } Fid;
- __u8 CreateGuid[16];
- __le32 Flags; /* see above DHANDLE_FLAG_PERSISTENT */
-} __packed;
-
-/* See MS-SMB2 2.2.14.2.9 */
-struct create_on_disk_id {
- struct create_context ccontext;
- __u8 Name[8];
- __le64 DiskFileId;
- __le64 VolumeId;
- __u32 Reserved[4];
-} __packed;
-
-/* See MS-SMB2 2.2.14.2.12 */
-struct durable_reconnect_context_v2_rsp {
- __le32 Timeout;
- __le32 Flags; /* see above DHANDLE_FLAG_PERSISTENT */
-} __packed;
-
-struct create_durable_handle_reconnect_v2 {
- struct create_context ccontext;
- __u8 Name[8];
- struct durable_reconnect_context_v2 dcontext;
- __u8 Pad[4];
-} __packed;
-
-/* See MS-SMB2 2.2.13.2.5 */
-struct crt_twarp_ctxt {
- struct create_context ccontext;
- __u8 Name[8];
- __le64 Timestamp;
-
-} __packed;
-
-/* See MS-SMB2 2.2.13.2.9 */
-struct crt_query_id_ctxt {
- struct create_context ccontext;
- __u8 Name[8];
-} __packed;
-
-struct crt_sd_ctxt {
- struct create_context ccontext;
- __u8 Name[8];
- struct smb3_sd sd;
-} __packed;
-
-
-#define COPY_CHUNK_RES_KEY_SIZE 24
-struct resume_key_req {
- char ResumeKey[COPY_CHUNK_RES_KEY_SIZE];
- __le32 ContextLength; /* MBZ */
- char Context[]; /* ignored, Windows sets to 4 bytes of zero */
-} __packed;
-
-/* this goes in the ioctl buffer when doing a copychunk request */
-struct copychunk_ioctl {
- char SourceKey[COPY_CHUNK_RES_KEY_SIZE];
- __le32 ChunkCount; /* we are only sending 1 */
- __le32 Reserved;
- /* array will only be one chunk long for us */
- __le64 SourceOffset;
- __le64 TargetOffset;
- __le32 Length; /* how many bytes to copy */
- __u32 Reserved2;
-} __packed;
-
-/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */
-struct file_zero_data_information {
- __le64 FileOffset;
- __le64 BeyondFinalZero;
-} __packed;
-
-struct copychunk_ioctl_rsp {
- __le32 ChunksWritten;
- __le32 ChunkBytesWritten;
- __le32 TotalBytesWritten;
-} __packed;
-
-/* See MS-FSCC 2.3.29 and 2.3.30 */
-struct get_retrieval_pointer_count_req {
- __le64 StartingVcn; /* virtual cluster number (signed) */
-} __packed;
-
-struct get_retrieval_pointer_count_rsp {
- __le32 ExtentCount;
-} __packed;
-
-/*
- * See MS-FSCC 2.3.33 and 2.3.34
- * request is the same as get_retrieval_point_count_req struct above
- */
-struct smb3_extents {
- __le64 NextVcn;
- __le64 Lcn; /* logical cluster number */
-} __packed;
-
-struct get_retrieval_pointers_refcount_rsp {
- __le32 ExtentCount;
- __u32 Reserved;
- __le64 StartingVcn;
- struct smb3_extents extents[];
-} __packed;
-
-struct fsctl_set_integrity_information_req {
- __le16 ChecksumAlgorithm;
- __le16 Reserved;
- __le32 Flags;
-} __packed;
-
-struct fsctl_get_integrity_information_rsp {
- __le16 ChecksumAlgorithm;
- __le16 Reserved;
- __le32 Flags;
- __le32 ChecksumChunkSizeInBytes;
- __le32 ClusterSizeInBytes;
-} __packed;
-
-struct file_allocated_range_buffer {
- __le64 file_offset;
- __le64 length;
-} __packed;
-
-/* Integrity ChecksumAlgorithm choices for above */
-#define CHECKSUM_TYPE_NONE 0x0000
-#define CHECKSUM_TYPE_CRC64 0x0002
-#define CHECKSUM_TYPE_UNCHANGED 0xFFFF /* set only */
-
-/* Integrity flags for above */
-#define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF 0x00000001
-
-/* Reparse structures - see MS-FSCC 2.1.2 */
-
-/* struct fsctl_reparse_info_req is empty, only response structs (see below) */
-
-struct reparse_data_buffer {
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __u16 Reserved;
- __u8 DataBuffer[]; /* Variable Length */
-} __packed;
-
-struct reparse_guid_data_buffer {
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __u16 Reserved;
- __u8 ReparseGuid[16];
- __u8 DataBuffer[]; /* Variable Length */
-} __packed;
-
-struct reparse_mount_point_data_buffer {
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __u16 Reserved;
- __le16 SubstituteNameOffset;
- __le16 SubstituteNameLength;
- __le16 PrintNameOffset;
- __le16 PrintNameLength;
- __u8 PathBuffer[]; /* Variable Length */
-} __packed;
-
-#define SYMLINK_FLAG_RELATIVE 0x00000001
-
-struct reparse_symlink_data_buffer {
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __u16 Reserved;
- __le16 SubstituteNameOffset;
- __le16 SubstituteNameLength;
- __le16 PrintNameOffset;
- __le16 PrintNameLength;
- __le32 Flags;
- __u8 PathBuffer[]; /* Variable Length */
-} __packed;
-
-/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
-
-
-/* See MS-DFSC 2.2.2 */
-struct fsctl_get_dfs_referral_req {
- __le16 MaxReferralLevel;
- __u8 RequestFileName[];
-} __packed;
-
-/* DFS response is struct get_dfs_refer_rsp */
-
-/* See MS-SMB2 2.2.31.3 */
-struct network_resiliency_req {
- __le32 Timeout;
- __le32 Reserved;
-} __packed;
-/* There is no buffer for the response ie no struct network_resiliency_rsp */
-
-
-struct validate_negotiate_info_req {
- __le32 Capabilities;
- __u8 Guid[SMB2_CLIENT_GUID_SIZE];
- __le16 SecurityMode;
- __le16 DialectCount;
- __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
-} __packed;
-
-struct validate_negotiate_info_rsp {
- __le32 Capabilities;
- __u8 Guid[SMB2_CLIENT_GUID_SIZE];
- __le16 SecurityMode;
- __le16 Dialect; /* Dialect in use for the connection */
-} __packed;
-
-#define RSS_CAPABLE cpu_to_le32(0x00000001)
-#define RDMA_CAPABLE cpu_to_le32(0x00000002)
-
-#define INTERNETWORK cpu_to_le16(0x0002)
-#define INTERNETWORKV6 cpu_to_le16(0x0017)
-
-struct network_interface_info_ioctl_rsp {
- __le32 Next; /* next interface. zero if this is last one */
- __le32 IfIndex;
- __le32 Capability; /* RSS or RDMA Capable */
- __le32 Reserved;
- __le64 LinkSpeed;
- __le16 Family;
- __u8 Buffer[126];
-} __packed;
-
-struct iface_info_ipv4 {
- __be16 Port;
- __be32 IPv4Address;
- __be64 Reserved;
-} __packed;
-
-struct iface_info_ipv6 {
- __be16 Port;
- __be32 FlowInfo;
- __u8 IPv6Address[16];
- __be32 ScopeId;
-} __packed;
-
-#define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
-
-struct compress_ioctl {
- __le16 CompressionState; /* See cifspdu.h for possible flag values */
-} __packed;
-
-struct duplicate_extents_to_file {
- __u64 PersistentFileHandle; /* source file handle, opaque endianness */
- __u64 VolatileFileHandle;
- __le64 SourceFileOffset;
- __le64 TargetFileOffset;
- __le64 ByteCount; /* Bytes to be copied */
-} __packed;
-
-/*
- * Maximum number of iovs we need for an ioctl request.
- * [0] : struct smb2_ioctl_req
- * [1] : in_data
- */
-#define SMB2_IOCTL_IOV_SIZE 2
-
-struct smb2_ioctl_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 57 */
- __u16 Reserved;
- __le32 CtlCode;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
- __le32 InputOffset;
- __le32 InputCount;
- __le32 MaxInputResponse;
- __le32 OutputOffset;
- __le32 OutputCount;
- __le32 MaxOutputResponse;
- __le32 Flags;
- __u32 Reserved2;
- __u8 Buffer[];
-} __packed;
-
-struct smb2_ioctl_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 57 */
- __u16 Reserved;
- __le32 CtlCode;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
- __le32 InputOffset;
- __le32 InputCount;
- __le32 OutputOffset;
- __le32 OutputCount;
- __le32 Flags;
- __u32 Reserved2;
- /* char * buffer[] */
-} __packed;
-
-#define SMB2_LOCKFLAG_SHARED_LOCK 0x0001
-#define SMB2_LOCKFLAG_EXCLUSIVE_LOCK 0x0002
-#define SMB2_LOCKFLAG_UNLOCK 0x0004
-#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
-
-struct smb2_lock_element {
- __le64 Offset;
- __le64 Length;
- __le32 Flags;
- __le32 Reserved;
-} __packed;
-
-struct smb2_lock_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 48 */
- __le16 LockCount;
- /*
- * The least significant four bits are the index, the other 28 bits are
- * the lock sequence number (0 to 64). See MS-SMB2 2.2.26
- */
- __le32 LockSequenceNumber;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
- /* Followed by at least one */
- struct smb2_lock_element locks[1];
-} __packed;
-
-struct smb2_lock_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_echo_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __u16 Reserved;
-} __packed;
-
-struct smb2_echo_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __u16 Reserved;
-} __packed;
-
-/* search (query_directory) Flags field */
-#define SMB2_RESTART_SCANS 0x01
-#define SMB2_RETURN_SINGLE_ENTRY 0x02
-#define SMB2_INDEX_SPECIFIED 0x04
-#define SMB2_REOPEN 0x10
-
-#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
-
-/*
- * Valid FileInformation classes.
- *
- * Note that these are a subset of the (file) QUERY_INFO levels defined
- * later in this file (but since QUERY_DIRECTORY uses equivalent numbers
- * we do not redefine them here)
- *
- * FileDirectoryInfomation 0x01
- * FileFullDirectoryInformation 0x02
- * FileIdFullDirectoryInformation 0x26
- * FileBothDirectoryInformation 0x03
- * FileIdBothDirectoryInformation 0x25
- * FileNamesInformation 0x0C
- * FileIdExtdDirectoryInformation 0x3C
- */
-
-struct smb2_query_directory_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 33 */
- __u8 FileInformationClass;
- __u8 Flags;
- __le32 FileIndex;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
- __le16 FileNameOffset;
- __le16 FileNameLength;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_query_directory_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 OutputBufferOffset;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-/* Possible InfoType values */
-#define SMB2_O_INFO_FILE 0x01
-#define SMB2_O_INFO_FILESYSTEM 0x02
-#define SMB2_O_INFO_SECURITY 0x03
-#define SMB2_O_INFO_QUOTA 0x04
-
-/* Security info type additionalinfo flags. See MS-SMB2 (2.2.37) or MS-DTYP */
-#define OWNER_SECINFO 0x00000001
-#define GROUP_SECINFO 0x00000002
-#define DACL_SECINFO 0x00000004
-#define SACL_SECINFO 0x00000008
-#define LABEL_SECINFO 0x00000010
-#define ATTRIBUTE_SECINFO 0x00000020
-#define SCOPE_SECINFO 0x00000040
-#define BACKUP_SECINFO 0x00010000
-#define UNPROTECTED_SACL_SECINFO 0x10000000
-#define UNPROTECTED_DACL_SECINFO 0x20000000
-#define PROTECTED_SACL_SECINFO 0x40000000
-#define PROTECTED_DACL_SECINFO 0x80000000
-
-/* Flags used for FileFullEAinfo */
-#define SL_RESTART_SCAN 0x00000001
-#define SL_RETURN_SINGLE_ENTRY 0x00000002
-#define SL_INDEX_SPECIFIED 0x00000004
-
-struct smb2_query_info_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 41 */
- __u8 InfoType;
- __u8 FileInfoClass;
- __le32 OutputBufferLength;
- __le16 InputBufferOffset;
- __u16 Reserved;
- __le32 InputBufferLength;
- __le32 AdditionalInformation;
- __le32 Flags;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_query_info_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 OutputBufferOffset;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-/*
- * Maximum number of iovs we need for a set-info request.
- * The largest one is rename/hardlink
- * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info
- * [1] : path
- * [2] : compound padding
- */
-#define SMB2_SET_INFO_IOV_SIZE 3
-
-struct smb2_set_info_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 33 */
- __u8 InfoType;
- __u8 FileInfoClass;
- __le32 BufferLength;
- __le16 BufferOffset;
- __u16 Reserved;
- __le32 AdditionalInformation;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_set_info_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 2 */
-} __packed;
-
-struct smb2_oplock_break {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 24 */
- __u8 OplockLevel;
- __u8 Reserved;
- __le32 Reserved2;
- __u64 PersistentFid;
- __u64 VolatileFid;
-} __packed;
-
-#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
-
-struct smb2_lease_break {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 44 */
- __le16 Epoch;
- __le32 Flags;
- __u8 LeaseKey[16];
- __le32 CurrentLeaseState;
- __le32 NewLeaseState;
- __le32 BreakReason;
- __le32 AccessMaskHint;
- __le32 ShareMaskHint;
-} __packed;
-
-struct smb2_lease_ack {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 36 */
- __le16 Reserved;
- __le32 Flags;
- __u8 LeaseKey[16];
- __le32 LeaseState;
- __le64 LeaseDuration;
-} __packed;
-
-/*
- * PDU infolevel structure definitions
- * BB consider moving to a different header
- */
-
-/* File System Information Classes */
-#define FS_VOLUME_INFORMATION 1 /* Query */
-#define FS_LABEL_INFORMATION 2 /* Local only */
-#define FS_SIZE_INFORMATION 3 /* Query */
-#define FS_DEVICE_INFORMATION 4 /* Query */
-#define FS_ATTRIBUTE_INFORMATION 5 /* Query */
-#define FS_CONTROL_INFORMATION 6 /* Query, Set */
-#define FS_FULL_SIZE_INFORMATION 7 /* Query */
-#define FS_OBJECT_ID_INFORMATION 8 /* Query, Set */
-#define FS_DRIVER_PATH_INFORMATION 9 /* Local only */
-#define FS_VOLUME_FLAGS_INFORMATION 10 /* Local only */
-#define FS_SECTOR_SIZE_INFORMATION 11 /* SMB3 or later. Query */
-#define FS_POSIX_INFORMATION 100 /* SMB3.1.1 POSIX. Query */
-
-struct smb2_fs_full_size_info {
- __le64 TotalAllocationUnits;
- __le64 CallerAvailableAllocationUnits;
- __le64 ActualAvailableAllocationUnits;
- __le32 SectorsPerAllocationUnit;
- __le32 BytesPerSector;
-} __packed;
-
-#define SSINFO_FLAGS_ALIGNED_DEVICE 0x00000001
-#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
-#define SSINFO_FLAGS_NO_SEEK_PENALTY 0x00000004
-#define SSINFO_FLAGS_TRIM_ENABLED 0x00000008
-
-/* sector size info struct */
-struct smb3_fs_ss_info {
- __le32 LogicalBytesPerSector;
- __le32 PhysicalBytesPerSectorForAtomicity;
- __le32 PhysicalBytesPerSectorForPerf;
- __le32 FileSystemEffectivePhysicalBytesPerSectorForAtomicity;
- __le32 Flags;
- __le32 ByteOffsetForSectorAlignment;
- __le32 ByteOffsetForPartitionAlignment;
-} __packed;
-
-/* volume info struct - see MS-FSCC 2.5.9 */
-#define MAX_VOL_LABEL_LEN 32
-struct smb3_fs_vol_info {
- __le64 VolumeCreationTime;
- __u32 VolumeSerialNumber;
- __le32 VolumeLabelLength; /* includes trailing null */
- __u8 SupportsObjects; /* True if eg like NTFS, supports objects */
- __u8 Reserved;
- __u8 VolumeLabel[]; /* variable len */
-} __packed;
-
-/* partial list of QUERY INFO levels */
-#define FILE_DIRECTORY_INFORMATION 1
-#define FILE_FULL_DIRECTORY_INFORMATION 2
-#define FILE_BOTH_DIRECTORY_INFORMATION 3
-#define FILE_BASIC_INFORMATION 4
-#define FILE_STANDARD_INFORMATION 5
-#define FILE_INTERNAL_INFORMATION 6
-#define FILE_EA_INFORMATION 7
-#define FILE_ACCESS_INFORMATION 8
-#define FILE_NAME_INFORMATION 9
-#define FILE_RENAME_INFORMATION 10
-#define FILE_LINK_INFORMATION 11
-#define FILE_NAMES_INFORMATION 12
-#define FILE_DISPOSITION_INFORMATION 13
-#define FILE_POSITION_INFORMATION 14
-#define FILE_FULL_EA_INFORMATION 15
-#define FILE_MODE_INFORMATION 16
-#define FILE_ALIGNMENT_INFORMATION 17
-#define FILE_ALL_INFORMATION 18
-#define FILE_ALLOCATION_INFORMATION 19
-#define FILE_END_OF_FILE_INFORMATION 20
-#define FILE_ALTERNATE_NAME_INFORMATION 21
-#define FILE_STREAM_INFORMATION 22
-#define FILE_PIPE_INFORMATION 23
-#define FILE_PIPE_LOCAL_INFORMATION 24
-#define FILE_PIPE_REMOTE_INFORMATION 25
-#define FILE_MAILSLOT_QUERY_INFORMATION 26
-#define FILE_MAILSLOT_SET_INFORMATION 27
-#define FILE_COMPRESSION_INFORMATION 28
-#define FILE_OBJECT_ID_INFORMATION 29
-/* Number 30 not defined in documents */
-#define FILE_MOVE_CLUSTER_INFORMATION 31
-#define FILE_QUOTA_INFORMATION 32
-#define FILE_REPARSE_POINT_INFORMATION 33
-#define FILE_NETWORK_OPEN_INFORMATION 34
-#define FILE_ATTRIBUTE_TAG_INFORMATION 35
-#define FILE_TRACKING_INFORMATION 36
-#define FILEID_BOTH_DIRECTORY_INFORMATION 37
-#define FILEID_FULL_DIRECTORY_INFORMATION 38
-#define FILE_VALID_DATA_LENGTH_INFORMATION 39
-#define FILE_SHORT_NAME_INFORMATION 40
-#define FILE_SFIO_RESERVE_INFORMATION 44
-#define FILE_SFIO_VOLUME_INFORMATION 45
-#define FILE_HARD_LINK_INFORMATION 46
-#define FILE_NORMALIZED_NAME_INFORMATION 48
-#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
-#define FILE_STANDARD_LINK_INFORMATION 54
-#define FILE_ID_INFORMATION 59
-#define FILE_ID_EXTD_DIRECTORY_INFORMATION 60
-
-struct smb2_file_internal_info {
- __le64 IndexNumber;
-} __packed; /* level 6 Query */
-
-struct smb2_file_rename_info { /* encoding of request for level 10 */
- __u8 ReplaceIfExists; /* 1 = replace existing target with new */
- /* 0 = fail if target already exists */
- __u8 Reserved[7];
- __u64 RootDirectory; /* MBZ for network operations (why says spec?) */
- __le32 FileNameLength;
- char FileName[]; /* New name to be assigned */
- /* padding - overall struct size must be >= 24 so filename + pad >= 6 */
-} __packed; /* level 10 Set */
-
-struct smb2_file_link_info { /* encoding of request for level 11 */
- __u8 ReplaceIfExists; /* 1 = replace existing link with new */
- /* 0 = fail if link already exists */
- __u8 Reserved[7];
- __u64 RootDirectory; /* MBZ for network operations (why says spec?) */
- __le32 FileNameLength;
- char FileName[]; /* Name to be assigned to new link */
-} __packed; /* level 11 Set */
-
-struct smb2_file_full_ea_info { /* encoding of response for level 15 */
- __le32 next_entry_offset;
- __u8 flags;
- __u8 ea_name_length;
- __le16 ea_value_length;
- char ea_data[]; /* \0 terminated name plus value */
-} __packed; /* level 15 Set */
-
-/*
- * This level 18, although with struct with same name is different from cifs
- * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
- * CurrentByteOffset.
- */
-struct smb2_file_all_info { /* data block encoding of response to level 18 */
- __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le32 Attributes;
- __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */
- __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
- __le64 EndOfFile; /* size ie offset to first free byte in file */
- __le32 NumberOfLinks; /* hard links */
- __u8 DeletePending;
- __u8 Directory;
- __u16 Pad2; /* End of FILE_STANDARD_INFO equivalent */
- __le64 IndexNumber;
- __le32 EASize;
- __le32 AccessFlags;
- __le64 CurrentByteOffset;
- __le32 Mode;
- __le32 AlignmentRequirement;
- __le32 FileNameLength;
- char FileName[1];
-} __packed; /* level 18 Query */
-
-struct smb2_file_eof_info { /* encoding of request for level 10 */
- __le64 EndOfFile; /* new end of file value */
-} __packed; /* level 20 Set */
-
-struct smb2_file_reparse_point_info {
- __le64 IndexNumber;
- __le32 Tag;
-} __packed;
-
-struct smb2_file_network_open_info {
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 AllocationSize;
- __le64 EndOfFile;
- __le32 Attributes;
- __le32 Reserved;
-} __packed; /* level 34 Query also similar returned in close rsp and open rsp */
-
-/* See MS-FSCC 2.4.21 */
-struct smb2_file_id_information {
- __le64 VolumeSerialNumber;
- __u64 PersistentFileId; /* opaque endianness */
- __u64 VolatileFileId; /* opaque endianness */
-} __packed; /* level 59 */
-
-/* See MS-FSCC 2.4.18 */
-struct smb2_file_id_extd_directory_info {
- __le32 NextEntryOffset;
- __u32 FileIndex;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 EndOfFile;
- __le64 AllocationSize;
- __le32 FileAttributes;
- __le32 FileNameLength;
- __le32 EaSize; /* EA size */
- __le32 ReparsePointTag; /* valid if FILE_ATTR_REPARSE_POINT set in FileAttributes */
- __le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit */
- char FileName[1];
-} __packed; /* level 60 */
-
-extern char smb2_padding[7];
-
-/* equivalent of the contents of SMB3.1.1 POSIX open context response */
-struct create_posix_rsp {
- u32 nlink;
- u32 reparse_tag;
- u32 mode;
- struct cifs_sid owner; /* var-sized on the wire */
- struct cifs_sid group; /* var-sized on the wire */
-} __packed;
-
-/*
- * SMB2-only POSIX info level for query dir
- *
- * See posix_info_sid_size(), posix_info_extra_size() and
- * posix_info_parse() to help with the handling of this struct.
- */
-struct smb2_posix_info {
- __le32 NextEntryOffset;
- __u32 Ignored;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 EndOfFile;
- __le64 AllocationSize;
- __le32 DosAttributes;
- __le64 Inode;
- __le32 DeviceId;
- __le32 Zero;
- /* beginning of POSIX Create Context Response */
- __le32 HardLinks;
- __le32 ReparseTag;
- __le32 Mode;
- /*
- * var sized owner SID
- * var sized group SID
- * le32 filenamelength
- * u8 filename[]
- */
-} __packed;
-
-/* Level 100 query info */
-struct smb311_posix_qinfo {
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 EndOfFile;
- __le64 AllocationSize;
- __le32 DosAttributes;
- __le64 Inode;
- __le32 DeviceId;
- __le32 Zero;
- /* beginning of POSIX Create Context Response */
- __le32 HardLinks;
- __le32 ReparseTag;
- __le32 Mode;
- u8 Sids[];
- /*
- * var sized owner SID
- * var sized group SID
- * le32 filenamelength
- * u8 filename[]
- */
-} __packed;
-
-/*
- * Parsed version of the above struct. Allows direct access to the
- * variable length fields
- */
-struct smb2_posix_info_parsed {
- const struct smb2_posix_info *base;
- size_t size;
- struct cifs_sid owner;
- struct cifs_sid group;
- int name_len;
- const u8 *name;
-};
-
-#endif /* _SMB2PDU_H */
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
deleted file mode 100644
index 31ef64eb7fbb..000000000000
--- a/fs/cifs/smbdirect.c
+++ /dev/null
@@ -1,2524 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2017, Microsoft Corporation.
- *
- * Author(s): Long Li <longli@microsoft.com>
- */
-#include <linux/module.h>
-#include <linux/highmem.h>
-#include "smbdirect.h"
-#include "cifs_debug.h"
-#include "cifsproto.h"
-#include "smb2proto.h"
-
-static struct smbd_response *get_empty_queue_buffer(
- struct smbd_connection *info);
-static struct smbd_response *get_receive_buffer(
- struct smbd_connection *info);
-static void put_receive_buffer(
- struct smbd_connection *info,
- struct smbd_response *response);
-static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
-static void destroy_receive_buffers(struct smbd_connection *info);
-
-static void put_empty_packet(
- struct smbd_connection *info, struct smbd_response *response);
-static void enqueue_reassembly(
- struct smbd_connection *info,
- struct smbd_response *response, int data_length);
-static struct smbd_response *_get_first_reassembly(
- struct smbd_connection *info);
-
-static int smbd_post_recv(
- struct smbd_connection *info,
- struct smbd_response *response);
-
-static int smbd_post_send_empty(struct smbd_connection *info);
-static int smbd_post_send_data(
- struct smbd_connection *info,
- struct kvec *iov, int n_vec, int remaining_data_length);
-static int smbd_post_send_page(struct smbd_connection *info,
- struct page *page, unsigned long offset,
- size_t size, int remaining_data_length);
-
-static void destroy_mr_list(struct smbd_connection *info);
-static int allocate_mr_list(struct smbd_connection *info);
-
-/* SMBD version number */
-#define SMBD_V1 0x0100
-
-/* Port numbers for SMBD transport */
-#define SMB_PORT 445
-#define SMBD_PORT 5445
-
-/* Address lookup and resolve timeout in ms */
-#define RDMA_RESOLVE_TIMEOUT 5000
-
-/* SMBD negotiation timeout in seconds */
-#define SMBD_NEGOTIATE_TIMEOUT 120
-
-/* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
-#define SMBD_MIN_RECEIVE_SIZE 128
-#define SMBD_MIN_FRAGMENTED_SIZE 131072
-
-/*
- * Default maximum number of RDMA read/write outstanding on this connection
- * This value is possibly decreased during QP creation on hardware limit
- */
-#define SMBD_CM_RESPONDER_RESOURCES 32
-
-/* Maximum number of retries on data transfer operations */
-#define SMBD_CM_RETRY 6
-/* No need to retry on Receiver Not Ready since SMBD manages credits */
-#define SMBD_CM_RNR_RETRY 0
-
-/*
- * User configurable initial values per SMBD transport connection
- * as defined in [MS-SMBD] 3.1.1.1
- * Those may change after a SMBD negotiation
- */
-/* The local peer's maximum number of credits to grant to the peer */
-int smbd_receive_credit_max = 255;
-
-/* The remote peer's credit request of local peer */
-int smbd_send_credit_target = 255;
-
-/* The maximum single message size can be sent to remote peer */
-int smbd_max_send_size = 1364;
-
-/* The maximum fragmented upper-layer payload receive size supported */
-int smbd_max_fragmented_recv_size = 1024 * 1024;
-
-/* The maximum single-message size which can be received */
-int smbd_max_receive_size = 8192;
-
-/* The timeout to initiate send of a keepalive message on idle */
-int smbd_keep_alive_interval = 120;
-
-/*
- * User configurable initial values for RDMA transport
- * The actual values used may be lower and are limited to hardware capabilities
- */
-/* Default maximum number of SGEs in a RDMA write/read */
-int smbd_max_frmr_depth = 2048;
-
-/* If payload is less than this byte, use RDMA send/recv not read/write */
-int rdma_readwrite_threshold = 4096;
-
-/* Transport logging functions
- * Logging are defined as classes. They can be OR'ed to define the actual
- * logging level via module parameter smbd_logging_class
- * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and
- * log_rdma_event()
- */
-#define LOG_OUTGOING 0x1
-#define LOG_INCOMING 0x2
-#define LOG_READ 0x4
-#define LOG_WRITE 0x8
-#define LOG_RDMA_SEND 0x10
-#define LOG_RDMA_RECV 0x20
-#define LOG_KEEP_ALIVE 0x40
-#define LOG_RDMA_EVENT 0x80
-#define LOG_RDMA_MR 0x100
-static unsigned int smbd_logging_class;
-module_param(smbd_logging_class, uint, 0644);
-MODULE_PARM_DESC(smbd_logging_class,
- "Logging class for SMBD transport 0x0 to 0x100");
-
-#define ERR 0x0
-#define INFO 0x1
-static unsigned int smbd_logging_level = ERR;
-module_param(smbd_logging_level, uint, 0644);
-MODULE_PARM_DESC(smbd_logging_level,
- "Logging level for SMBD transport, 0 (default): error, 1: info");
-
-#define log_rdma(level, class, fmt, args...) \
-do { \
- if (level <= smbd_logging_level || class & smbd_logging_class) \
- cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\
-} while (0)
-
-#define log_outgoing(level, fmt, args...) \
- log_rdma(level, LOG_OUTGOING, fmt, ##args)
-#define log_incoming(level, fmt, args...) \
- log_rdma(level, LOG_INCOMING, fmt, ##args)
-#define log_read(level, fmt, args...) log_rdma(level, LOG_READ, fmt, ##args)
-#define log_write(level, fmt, args...) log_rdma(level, LOG_WRITE, fmt, ##args)
-#define log_rdma_send(level, fmt, args...) \
- log_rdma(level, LOG_RDMA_SEND, fmt, ##args)
-#define log_rdma_recv(level, fmt, args...) \
- log_rdma(level, LOG_RDMA_RECV, fmt, ##args)
-#define log_keep_alive(level, fmt, args...) \
- log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args)
-#define log_rdma_event(level, fmt, args...) \
- log_rdma(level, LOG_RDMA_EVENT, fmt, ##args)
-#define log_rdma_mr(level, fmt, args...) \
- log_rdma(level, LOG_RDMA_MR, fmt, ##args)
-
-static void smbd_disconnect_rdma_work(struct work_struct *work)
-{
- struct smbd_connection *info =
- container_of(work, struct smbd_connection, disconnect_work);
-
- if (info->transport_status == SMBD_CONNECTED) {
- info->transport_status = SMBD_DISCONNECTING;
- rdma_disconnect(info->id);
- }
-}
-
-static void smbd_disconnect_rdma_connection(struct smbd_connection *info)
-{
- queue_work(info->workqueue, &info->disconnect_work);
-}
-
-/* Upcall from RDMA CM */
-static int smbd_conn_upcall(
- struct rdma_cm_id *id, struct rdma_cm_event *event)
-{
- struct smbd_connection *info = id->context;
-
- log_rdma_event(INFO, "event=%d status=%d\n",
- event->event, event->status);
-
- switch (event->event) {
- case RDMA_CM_EVENT_ADDR_RESOLVED:
- case RDMA_CM_EVENT_ROUTE_RESOLVED:
- info->ri_rc = 0;
- complete(&info->ri_done);
- break;
-
- case RDMA_CM_EVENT_ADDR_ERROR:
- info->ri_rc = -EHOSTUNREACH;
- complete(&info->ri_done);
- break;
-
- case RDMA_CM_EVENT_ROUTE_ERROR:
- info->ri_rc = -ENETUNREACH;
- complete(&info->ri_done);
- break;
-
- case RDMA_CM_EVENT_ESTABLISHED:
- log_rdma_event(INFO, "connected event=%d\n", event->event);
- info->transport_status = SMBD_CONNECTED;
- wake_up_interruptible(&info->conn_wait);
- break;
-
- case RDMA_CM_EVENT_CONNECT_ERROR:
- case RDMA_CM_EVENT_UNREACHABLE:
- case RDMA_CM_EVENT_REJECTED:
- log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
- info->transport_status = SMBD_DISCONNECTED;
- wake_up_interruptible(&info->conn_wait);
- break;
-
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
- case RDMA_CM_EVENT_DISCONNECTED:
- /* This happenes when we fail the negotiation */
- if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
- info->transport_status = SMBD_DISCONNECTED;
- wake_up(&info->conn_wait);
- break;
- }
-
- info->transport_status = SMBD_DISCONNECTED;
- wake_up_interruptible(&info->disconn_wait);
- wake_up_interruptible(&info->wait_reassembly_queue);
- wake_up_interruptible_all(&info->wait_send_queue);
- break;
-
- default:
- break;
- }
-
- return 0;
-}
-
-/* Upcall from RDMA QP */
-static void
-smbd_qp_async_error_upcall(struct ib_event *event, void *context)
-{
- struct smbd_connection *info = context;
-
- log_rdma_event(ERR, "%s on device %s info %p\n",
- ib_event_msg(event->event), event->device->name, info);
-
- switch (event->event) {
- case IB_EVENT_CQ_ERR:
- case IB_EVENT_QP_FATAL:
- smbd_disconnect_rdma_connection(info);
- break;
-
- default:
- break;
- }
-}
-
-static inline void *smbd_request_payload(struct smbd_request *request)
-{
- return (void *)request->packet;
-}
-
-static inline void *smbd_response_payload(struct smbd_response *response)
-{
- return (void *)response->packet;
-}
-
-/* Called when a RDMA send is done */
-static void send_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- int i;
- struct smbd_request *request =
- container_of(wc->wr_cqe, struct smbd_request, cqe);
-
- log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n",
- request, wc->status);
-
- if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
- log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
- wc->status, wc->opcode);
- smbd_disconnect_rdma_connection(request->info);
- }
-
- for (i = 0; i < request->num_sge; i++)
- ib_dma_unmap_single(request->info->id->device,
- request->sge[i].addr,
- request->sge[i].length,
- DMA_TO_DEVICE);
-
- if (atomic_dec_and_test(&request->info->send_pending))
- wake_up(&request->info->wait_send_pending);
-
- wake_up(&request->info->wait_post_send);
-
- mempool_free(request, request->info->request_mempool);
-}
-
-static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
-{
- log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n",
- resp->min_version, resp->max_version,
- resp->negotiated_version, resp->credits_requested,
- resp->credits_granted, resp->status,
- resp->max_readwrite_size, resp->preferred_send_size,
- resp->max_receive_size, resp->max_fragmented_size);
-}
-
-/*
- * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
- * response, packet_length: the negotiation response message
- * return value: true if negotiation is a success, false if failed
- */
-static bool process_negotiation_response(
- struct smbd_response *response, int packet_length)
-{
- struct smbd_connection *info = response->info;
- struct smbd_negotiate_resp *packet = smbd_response_payload(response);
-
- if (packet_length < sizeof(struct smbd_negotiate_resp)) {
- log_rdma_event(ERR,
- "error: packet_length=%d\n", packet_length);
- return false;
- }
-
- if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
- log_rdma_event(ERR, "error: negotiated_version=%x\n",
- le16_to_cpu(packet->negotiated_version));
- return false;
- }
- info->protocol = le16_to_cpu(packet->negotiated_version);
-
- if (packet->credits_requested == 0) {
- log_rdma_event(ERR, "error: credits_requested==0\n");
- return false;
- }
- info->receive_credit_target = le16_to_cpu(packet->credits_requested);
-
- if (packet->credits_granted == 0) {
- log_rdma_event(ERR, "error: credits_granted==0\n");
- return false;
- }
- atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted));
-
- atomic_set(&info->receive_credits, 0);
-
- if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
- log_rdma_event(ERR, "error: preferred_send_size=%d\n",
- le32_to_cpu(packet->preferred_send_size));
- return false;
- }
- info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
-
- if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
- log_rdma_event(ERR, "error: max_receive_size=%d\n",
- le32_to_cpu(packet->max_receive_size));
- return false;
- }
- info->max_send_size = min_t(int, info->max_send_size,
- le32_to_cpu(packet->max_receive_size));
-
- if (le32_to_cpu(packet->max_fragmented_size) <
- SMBD_MIN_FRAGMENTED_SIZE) {
- log_rdma_event(ERR, "error: max_fragmented_size=%d\n",
- le32_to_cpu(packet->max_fragmented_size));
- return false;
- }
- info->max_fragmented_send_size =
- le32_to_cpu(packet->max_fragmented_size);
- info->rdma_readwrite_threshold =
- rdma_readwrite_threshold > info->max_fragmented_send_size ?
- info->max_fragmented_send_size :
- rdma_readwrite_threshold;
-
-
- info->max_readwrite_size = min_t(u32,
- le32_to_cpu(packet->max_readwrite_size),
- info->max_frmr_depth * PAGE_SIZE);
- info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
-
- return true;
-}
-
-static void smbd_post_send_credits(struct work_struct *work)
-{
- int ret = 0;
- int use_receive_queue = 1;
- int rc;
- struct smbd_response *response;
- struct smbd_connection *info =
- container_of(work, struct smbd_connection,
- post_send_credits_work);
-
- if (info->transport_status != SMBD_CONNECTED) {
- wake_up(&info->wait_receive_queues);
- return;
- }
-
- if (info->receive_credit_target >
- atomic_read(&info->receive_credits)) {
- while (true) {
- if (use_receive_queue)
- response = get_receive_buffer(info);
- else
- response = get_empty_queue_buffer(info);
- if (!response) {
- /* now switch to emtpy packet queue */
- if (use_receive_queue) {
- use_receive_queue = 0;
- continue;
- } else
- break;
- }
-
- response->type = SMBD_TRANSFER_DATA;
- response->first_segment = false;
- rc = smbd_post_recv(info, response);
- if (rc) {
- log_rdma_recv(ERR,
- "post_recv failed rc=%d\n", rc);
- put_receive_buffer(info, response);
- break;
- }
-
- ret++;
- }
- }
-
- spin_lock(&info->lock_new_credits_offered);
- info->new_credits_offered += ret;
- spin_unlock(&info->lock_new_credits_offered);
-
- /* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */
- info->send_immediate = true;
- if (atomic_read(&info->receive_credits) <
- info->receive_credit_target - 1) {
- if (info->keep_alive_requested == KEEP_ALIVE_PENDING ||
- info->send_immediate) {
- log_keep_alive(INFO, "send an empty message\n");
- smbd_post_send_empty(info);
- }
- }
-}
-
-/* Called from softirq, when recv is done */
-static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct smbd_data_transfer *data_transfer;
- struct smbd_response *response =
- container_of(wc->wr_cqe, struct smbd_response, cqe);
- struct smbd_connection *info = response->info;
- int data_length = 0;
-
- log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%x\n",
- response, response->type, wc->status, wc->opcode,
- wc->byte_len, wc->pkey_index);
-
- if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
- log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
- wc->status, wc->opcode);
- smbd_disconnect_rdma_connection(info);
- goto error;
- }
-
- ib_dma_sync_single_for_cpu(
- wc->qp->device,
- response->sge.addr,
- response->sge.length,
- DMA_FROM_DEVICE);
-
- switch (response->type) {
- /* SMBD negotiation response */
- case SMBD_NEGOTIATE_RESP:
- dump_smbd_negotiate_resp(smbd_response_payload(response));
- info->full_packet_received = true;
- info->negotiate_done =
- process_negotiation_response(response, wc->byte_len);
- complete(&info->negotiate_completion);
- break;
-
- /* SMBD data transfer packet */
- case SMBD_TRANSFER_DATA:
- data_transfer = smbd_response_payload(response);
- data_length = le32_to_cpu(data_transfer->data_length);
-
- /*
- * If this is a packet with data playload place the data in
- * reassembly queue and wake up the reading thread
- */
- if (data_length) {
- if (info->full_packet_received)
- response->first_segment = true;
-
- if (le32_to_cpu(data_transfer->remaining_data_length))
- info->full_packet_received = false;
- else
- info->full_packet_received = true;
-
- enqueue_reassembly(
- info,
- response,
- data_length);
- } else
- put_empty_packet(info, response);
-
- if (data_length)
- wake_up_interruptible(&info->wait_reassembly_queue);
-
- atomic_dec(&info->receive_credits);
- info->receive_credit_target =
- le16_to_cpu(data_transfer->credits_requested);
- if (le16_to_cpu(data_transfer->credits_granted)) {
- atomic_add(le16_to_cpu(data_transfer->credits_granted),
- &info->send_credits);
- /*
- * We have new send credits granted from remote peer
- * If any sender is waiting for credits, unblock it
- */
- wake_up_interruptible(&info->wait_send_queue);
- }
-
- log_incoming(INFO, "data flags %d data_offset %d data_length %d remaining_data_length %d\n",
- le16_to_cpu(data_transfer->flags),
- le32_to_cpu(data_transfer->data_offset),
- le32_to_cpu(data_transfer->data_length),
- le32_to_cpu(data_transfer->remaining_data_length));
-
- /* Send a KEEP_ALIVE response right away if requested */
- info->keep_alive_requested = KEEP_ALIVE_NONE;
- if (le16_to_cpu(data_transfer->flags) &
- SMB_DIRECT_RESPONSE_REQUESTED) {
- info->keep_alive_requested = KEEP_ALIVE_PENDING;
- }
-
- return;
-
- default:
- log_rdma_recv(ERR,
- "unexpected response type=%d\n", response->type);
- }
-
-error:
- put_receive_buffer(info, response);
-}
-
-static struct rdma_cm_id *smbd_create_id(
- struct smbd_connection *info,
- struct sockaddr *dstaddr, int port)
-{
- struct rdma_cm_id *id;
- int rc;
- __be16 *sport;
-
- id = rdma_create_id(&init_net, smbd_conn_upcall, info,
- RDMA_PS_TCP, IB_QPT_RC);
- if (IS_ERR(id)) {
- rc = PTR_ERR(id);
- log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc);
- return id;
- }
-
- if (dstaddr->sa_family == AF_INET6)
- sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port;
- else
- sport = &((struct sockaddr_in *)dstaddr)->sin_port;
-
- *sport = htons(port);
-
- init_completion(&info->ri_done);
- info->ri_rc = -ETIMEDOUT;
-
- rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr,
- RDMA_RESOLVE_TIMEOUT);
- if (rc) {
- log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
- goto out;
- }
- rc = wait_for_completion_interruptible_timeout(
- &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
- /* e.g. if interrupted returns -ERESTARTSYS */
- if (rc < 0) {
- log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
- goto out;
- }
- rc = info->ri_rc;
- if (rc) {
- log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
- goto out;
- }
-
- info->ri_rc = -ETIMEDOUT;
- rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
- if (rc) {
- log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
- goto out;
- }
- rc = wait_for_completion_interruptible_timeout(
- &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
- /* e.g. if interrupted returns -ERESTARTSYS */
- if (rc < 0) {
- log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
- goto out;
- }
- rc = info->ri_rc;
- if (rc) {
- log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
- goto out;
- }
-
- return id;
-
-out:
- rdma_destroy_id(id);
- return ERR_PTR(rc);
-}
-
-/*
- * Test if FRWR (Fast Registration Work Requests) is supported on the device
- * This implementation requries FRWR on RDMA read/write
- * return value: true if it is supported
- */
-static bool frwr_is_supported(struct ib_device_attr *attrs)
-{
- if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
- return false;
- if (attrs->max_fast_reg_page_list_len == 0)
- return false;
- return true;
-}
-
-static int smbd_ia_open(
- struct smbd_connection *info,
- struct sockaddr *dstaddr, int port)
-{
- int rc;
-
- info->id = smbd_create_id(info, dstaddr, port);
- if (IS_ERR(info->id)) {
- rc = PTR_ERR(info->id);
- goto out1;
- }
-
- if (!frwr_is_supported(&info->id->device->attrs)) {
- log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
- log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
- info->id->device->attrs.device_cap_flags,
- info->id->device->attrs.max_fast_reg_page_list_len);
- rc = -EPROTONOSUPPORT;
- goto out2;
- }
- info->max_frmr_depth = min_t(int,
- smbd_max_frmr_depth,
- info->id->device->attrs.max_fast_reg_page_list_len);
- info->mr_type = IB_MR_TYPE_MEM_REG;
- if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
- info->mr_type = IB_MR_TYPE_SG_GAPS;
-
- info->pd = ib_alloc_pd(info->id->device, 0);
- if (IS_ERR(info->pd)) {
- rc = PTR_ERR(info->pd);
- log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
- goto out2;
- }
-
- return 0;
-
-out2:
- rdma_destroy_id(info->id);
- info->id = NULL;
-
-out1:
- return rc;
-}
-
-/*
- * Send a negotiation request message to the peer
- * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
- * After negotiation, the transport is connected and ready for
- * carrying upper layer SMB payload
- */
-static int smbd_post_send_negotiate_req(struct smbd_connection *info)
-{
- struct ib_send_wr send_wr;
- int rc = -ENOMEM;
- struct smbd_request *request;
- struct smbd_negotiate_req *packet;
-
- request = mempool_alloc(info->request_mempool, GFP_KERNEL);
- if (!request)
- return rc;
-
- request->info = info;
-
- packet = smbd_request_payload(request);
- packet->min_version = cpu_to_le16(SMBD_V1);
- packet->max_version = cpu_to_le16(SMBD_V1);
- packet->reserved = 0;
- packet->credits_requested = cpu_to_le16(info->send_credit_target);
- packet->preferred_send_size = cpu_to_le32(info->max_send_size);
- packet->max_receive_size = cpu_to_le32(info->max_receive_size);
- packet->max_fragmented_size =
- cpu_to_le32(info->max_fragmented_recv_size);
-
- request->num_sge = 1;
- request->sge[0].addr = ib_dma_map_single(
- info->id->device, (void *)packet,
- sizeof(*packet), DMA_TO_DEVICE);
- if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
- rc = -EIO;
- goto dma_mapping_failed;
- }
-
- request->sge[0].length = sizeof(*packet);
- request->sge[0].lkey = info->pd->local_dma_lkey;
-
- ib_dma_sync_single_for_device(
- info->id->device, request->sge[0].addr,
- request->sge[0].length, DMA_TO_DEVICE);
-
- request->cqe.done = send_done;
-
- send_wr.next = NULL;
- send_wr.wr_cqe = &request->cqe;
- send_wr.sg_list = request->sge;
- send_wr.num_sge = request->num_sge;
- send_wr.opcode = IB_WR_SEND;
- send_wr.send_flags = IB_SEND_SIGNALED;
-
- log_rdma_send(INFO, "sge addr=%llx length=%x lkey=%x\n",
- request->sge[0].addr,
- request->sge[0].length, request->sge[0].lkey);
-
- atomic_inc(&info->send_pending);
- rc = ib_post_send(info->id->qp, &send_wr, NULL);
- if (!rc)
- return 0;
-
- /* if we reach here, post send failed */
- log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
- atomic_dec(&info->send_pending);
- ib_dma_unmap_single(info->id->device, request->sge[0].addr,
- request->sge[0].length, DMA_TO_DEVICE);
-
- smbd_disconnect_rdma_connection(info);
-
-dma_mapping_failed:
- mempool_free(request, info->request_mempool);
- return rc;
-}
-
-/*
- * Extend the credits to remote peer
- * This implements [MS-SMBD] 3.1.5.9
- * The idea is that we should extend credits to remote peer as quickly as
- * it's allowed, to maintain data flow. We allocate as much receive
- * buffer as possible, and extend the receive credits to remote peer
- * return value: the new credtis being granted.
- */
-static int manage_credits_prior_sending(struct smbd_connection *info)
-{
- int new_credits;
-
- spin_lock(&info->lock_new_credits_offered);
- new_credits = info->new_credits_offered;
- info->new_credits_offered = 0;
- spin_unlock(&info->lock_new_credits_offered);
-
- return new_credits;
-}
-
-/*
- * Check if we need to send a KEEP_ALIVE message
- * The idle connection timer triggers a KEEP_ALIVE message when expires
- * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
- * back a response.
- * return value:
- * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
- * 0: otherwise
- */
-static int manage_keep_alive_before_sending(struct smbd_connection *info)
-{
- if (info->keep_alive_requested == KEEP_ALIVE_PENDING) {
- info->keep_alive_requested = KEEP_ALIVE_SENT;
- return 1;
- }
- return 0;
-}
-
-/* Post the send request */
-static int smbd_post_send(struct smbd_connection *info,
- struct smbd_request *request)
-{
- struct ib_send_wr send_wr;
- int rc, i;
-
- for (i = 0; i < request->num_sge; i++) {
- log_rdma_send(INFO,
- "rdma_request sge[%d] addr=%llu length=%u\n",
- i, request->sge[i].addr, request->sge[i].length);
- ib_dma_sync_single_for_device(
- info->id->device,
- request->sge[i].addr,
- request->sge[i].length,
- DMA_TO_DEVICE);
- }
-
- request->cqe.done = send_done;
-
- send_wr.next = NULL;
- send_wr.wr_cqe = &request->cqe;
- send_wr.sg_list = request->sge;
- send_wr.num_sge = request->num_sge;
- send_wr.opcode = IB_WR_SEND;
- send_wr.send_flags = IB_SEND_SIGNALED;
-
- rc = ib_post_send(info->id->qp, &send_wr, NULL);
- if (rc) {
- log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
- smbd_disconnect_rdma_connection(info);
- rc = -EAGAIN;
- } else
- /* Reset timer for idle connection after packet is sent */
- mod_delayed_work(info->workqueue, &info->idle_timer_work,
- info->keep_alive_interval*HZ);
-
- return rc;
-}
-
-static int smbd_post_send_sgl(struct smbd_connection *info,
- struct scatterlist *sgl, int data_length, int remaining_data_length)
-{
- int num_sgs;
- int i, rc;
- int header_length;
- struct smbd_request *request;
- struct smbd_data_transfer *packet;
- int new_credits;
- struct scatterlist *sg;
-
-wait_credit:
- /* Wait for send credits. A SMBD packet needs one credit */
- rc = wait_event_interruptible(info->wait_send_queue,
- atomic_read(&info->send_credits) > 0 ||
- info->transport_status != SMBD_CONNECTED);
- if (rc)
- goto err_wait_credit;
-
- if (info->transport_status != SMBD_CONNECTED) {
- log_outgoing(ERR, "disconnected not sending on wait_credit\n");
- rc = -EAGAIN;
- goto err_wait_credit;
- }
- if (unlikely(atomic_dec_return(&info->send_credits) < 0)) {
- atomic_inc(&info->send_credits);
- goto wait_credit;
- }
-
-wait_send_queue:
- wait_event(info->wait_post_send,
- atomic_read(&info->send_pending) < info->send_credit_target ||
- info->transport_status != SMBD_CONNECTED);
-
- if (info->transport_status != SMBD_CONNECTED) {
- log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
- rc = -EAGAIN;
- goto err_wait_send_queue;
- }
-
- if (unlikely(atomic_inc_return(&info->send_pending) >
- info->send_credit_target)) {
- atomic_dec(&info->send_pending);
- goto wait_send_queue;
- }
-
- request = mempool_alloc(info->request_mempool, GFP_KERNEL);
- if (!request) {
- rc = -ENOMEM;
- goto err_alloc;
- }
-
- request->info = info;
-
- /* Fill in the packet header */
- packet = smbd_request_payload(request);
- packet->credits_requested = cpu_to_le16(info->send_credit_target);
-
- new_credits = manage_credits_prior_sending(info);
- atomic_add(new_credits, &info->receive_credits);
- packet->credits_granted = cpu_to_le16(new_credits);
-
- info->send_immediate = false;
-
- packet->flags = 0;
- if (manage_keep_alive_before_sending(info))
- packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
-
- packet->reserved = 0;
- if (!data_length)
- packet->data_offset = 0;
- else
- packet->data_offset = cpu_to_le32(24);
- packet->data_length = cpu_to_le32(data_length);
- packet->remaining_data_length = cpu_to_le32(remaining_data_length);
- packet->padding = 0;
-
- log_outgoing(INFO, "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
- le16_to_cpu(packet->credits_requested),
- le16_to_cpu(packet->credits_granted),
- le32_to_cpu(packet->data_offset),
- le32_to_cpu(packet->data_length),
- le32_to_cpu(packet->remaining_data_length));
-
- /* Map the packet to DMA */
- header_length = sizeof(struct smbd_data_transfer);
- /* If this is a packet without payload, don't send padding */
- if (!data_length)
- header_length = offsetof(struct smbd_data_transfer, padding);
-
- request->num_sge = 1;
- request->sge[0].addr = ib_dma_map_single(info->id->device,
- (void *)packet,
- header_length,
- DMA_TO_DEVICE);
- if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
- rc = -EIO;
- request->sge[0].addr = 0;
- goto err_dma;
- }
-
- request->sge[0].length = header_length;
- request->sge[0].lkey = info->pd->local_dma_lkey;
-
- /* Fill in the packet data payload */
- num_sgs = sgl ? sg_nents(sgl) : 0;
- for_each_sg(sgl, sg, num_sgs, i) {
- request->sge[i+1].addr =
- ib_dma_map_page(info->id->device, sg_page(sg),
- sg->offset, sg->length, DMA_TO_DEVICE);
- if (ib_dma_mapping_error(
- info->id->device, request->sge[i+1].addr)) {
- rc = -EIO;
- request->sge[i+1].addr = 0;
- goto err_dma;
- }
- request->sge[i+1].length = sg->length;
- request->sge[i+1].lkey = info->pd->local_dma_lkey;
- request->num_sge++;
- }
-
- rc = smbd_post_send(info, request);
- if (!rc)
- return 0;
-
-err_dma:
- for (i = 0; i < request->num_sge; i++)
- if (request->sge[i].addr)
- ib_dma_unmap_single(info->id->device,
- request->sge[i].addr,
- request->sge[i].length,
- DMA_TO_DEVICE);
- mempool_free(request, info->request_mempool);
-
- /* roll back receive credits and credits to be offered */
- spin_lock(&info->lock_new_credits_offered);
- info->new_credits_offered += new_credits;
- spin_unlock(&info->lock_new_credits_offered);
- atomic_sub(new_credits, &info->receive_credits);
-
-err_alloc:
- if (atomic_dec_and_test(&info->send_pending))
- wake_up(&info->wait_send_pending);
-
-err_wait_send_queue:
- /* roll back send credits and pending */
- atomic_inc(&info->send_credits);
-
-err_wait_credit:
- return rc;
-}
-
-/*
- * Send a page
- * page: the page to send
- * offset: offset in the page to send
- * size: length in the page to send
- * remaining_data_length: remaining data to send in this payload
- */
-static int smbd_post_send_page(struct smbd_connection *info, struct page *page,
- unsigned long offset, size_t size, int remaining_data_length)
-{
- struct scatterlist sgl;
-
- sg_init_table(&sgl, 1);
- sg_set_page(&sgl, page, size, offset);
-
- return smbd_post_send_sgl(info, &sgl, size, remaining_data_length);
-}
-
-/*
- * Send an empty message
- * Empty message is used to extend credits to peer to for keep live
- * while there is no upper layer payload to send at the time
- */
-static int smbd_post_send_empty(struct smbd_connection *info)
-{
- info->count_send_empty++;
- return smbd_post_send_sgl(info, NULL, 0, 0);
-}
-
-/*
- * Send a data buffer
- * iov: the iov array describing the data buffers
- * n_vec: number of iov array
- * remaining_data_length: remaining data to send following this packet
- * in segmented SMBD packet
- */
-static int smbd_post_send_data(
- struct smbd_connection *info, struct kvec *iov, int n_vec,
- int remaining_data_length)
-{
- int i;
- u32 data_length = 0;
- struct scatterlist sgl[SMBDIRECT_MAX_SGE];
-
- if (n_vec > SMBDIRECT_MAX_SGE) {
- cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
- return -EINVAL;
- }
-
- sg_init_table(sgl, n_vec);
- for (i = 0; i < n_vec; i++) {
- data_length += iov[i].iov_len;
- sg_set_buf(&sgl[i], iov[i].iov_base, iov[i].iov_len);
- }
-
- return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length);
-}
-
-/*
- * Post a receive request to the transport
- * The remote peer can only send data when a receive request is posted
- * The interaction is controlled by send/receive credit system
- */
-static int smbd_post_recv(
- struct smbd_connection *info, struct smbd_response *response)
-{
- struct ib_recv_wr recv_wr;
- int rc = -EIO;
-
- response->sge.addr = ib_dma_map_single(
- info->id->device, response->packet,
- info->max_receive_size, DMA_FROM_DEVICE);
- if (ib_dma_mapping_error(info->id->device, response->sge.addr))
- return rc;
-
- response->sge.length = info->max_receive_size;
- response->sge.lkey = info->pd->local_dma_lkey;
-
- response->cqe.done = recv_done;
-
- recv_wr.wr_cqe = &response->cqe;
- recv_wr.next = NULL;
- recv_wr.sg_list = &response->sge;
- recv_wr.num_sge = 1;
-
- rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
- if (rc) {
- ib_dma_unmap_single(info->id->device, response->sge.addr,
- response->sge.length, DMA_FROM_DEVICE);
- smbd_disconnect_rdma_connection(info);
- log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
- }
-
- return rc;
-}
-
-/* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
-static int smbd_negotiate(struct smbd_connection *info)
-{
- int rc;
- struct smbd_response *response = get_receive_buffer(info);
-
- response->type = SMBD_NEGOTIATE_RESP;
- rc = smbd_post_recv(info, response);
- log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x iov.lkey=%x\n",
- rc, response->sge.addr,
- response->sge.length, response->sge.lkey);
- if (rc)
- return rc;
-
- init_completion(&info->negotiate_completion);
- info->negotiate_done = false;
- rc = smbd_post_send_negotiate_req(info);
- if (rc)
- return rc;
-
- rc = wait_for_completion_interruptible_timeout(
- &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ);
- log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc);
-
- if (info->negotiate_done)
- return 0;
-
- if (rc == 0)
- rc = -ETIMEDOUT;
- else if (rc == -ERESTARTSYS)
- rc = -EINTR;
- else
- rc = -ENOTCONN;
-
- return rc;
-}
-
-static void put_empty_packet(
- struct smbd_connection *info, struct smbd_response *response)
-{
- spin_lock(&info->empty_packet_queue_lock);
- list_add_tail(&response->list, &info->empty_packet_queue);
- info->count_empty_packet_queue++;
- spin_unlock(&info->empty_packet_queue_lock);
-
- queue_work(info->workqueue, &info->post_send_credits_work);
-}
-
-/*
- * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
- * This is a queue for reassembling upper layer payload and present to upper
- * layer. All the inncoming payload go to the reassembly queue, regardless of
- * if reassembly is required. The uuper layer code reads from the queue for all
- * incoming payloads.
- * Put a received packet to the reassembly queue
- * response: the packet received
- * data_length: the size of payload in this packet
- */
-static void enqueue_reassembly(
- struct smbd_connection *info,
- struct smbd_response *response,
- int data_length)
-{
- spin_lock(&info->reassembly_queue_lock);
- list_add_tail(&response->list, &info->reassembly_queue);
- info->reassembly_queue_length++;
- /*
- * Make sure reassembly_data_length is updated after list and
- * reassembly_queue_length are updated. On the dequeue side
- * reassembly_data_length is checked without a lock to determine
- * if reassembly_queue_length and list is up to date
- */
- virt_wmb();
- info->reassembly_data_length += data_length;
- spin_unlock(&info->reassembly_queue_lock);
- info->count_reassembly_queue++;
- info->count_enqueue_reassembly_queue++;
-}
-
-/*
- * Get the first entry at the front of reassembly queue
- * Caller is responsible for locking
- * return value: the first entry if any, NULL if queue is empty
- */
-static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
-{
- struct smbd_response *ret = NULL;
-
- if (!list_empty(&info->reassembly_queue)) {
- ret = list_first_entry(
- &info->reassembly_queue,
- struct smbd_response, list);
- }
- return ret;
-}
-
-static struct smbd_response *get_empty_queue_buffer(
- struct smbd_connection *info)
-{
- struct smbd_response *ret = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
- if (!list_empty(&info->empty_packet_queue)) {
- ret = list_first_entry(
- &info->empty_packet_queue,
- struct smbd_response, list);
- list_del(&ret->list);
- info->count_empty_packet_queue--;
- }
- spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
-
- return ret;
-}
-
-/*
- * Get a receive buffer
- * For each remote send, we need to post a receive. The receive buffers are
- * pre-allocated in advance.
- * return value: the receive buffer, NULL if none is available
- */
-static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
-{
- struct smbd_response *ret = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&info->receive_queue_lock, flags);
- if (!list_empty(&info->receive_queue)) {
- ret = list_first_entry(
- &info->receive_queue,
- struct smbd_response, list);
- list_del(&ret->list);
- info->count_receive_queue--;
- info->count_get_receive_buffer++;
- }
- spin_unlock_irqrestore(&info->receive_queue_lock, flags);
-
- return ret;
-}
-
-/*
- * Return a receive buffer
- * Upon returning of a receive buffer, we can post new receive and extend
- * more receive credits to remote peer. This is done immediately after a
- * receive buffer is returned.
- */
-static void put_receive_buffer(
- struct smbd_connection *info, struct smbd_response *response)
-{
- unsigned long flags;
-
- ib_dma_unmap_single(info->id->device, response->sge.addr,
- response->sge.length, DMA_FROM_DEVICE);
-
- spin_lock_irqsave(&info->receive_queue_lock, flags);
- list_add_tail(&response->list, &info->receive_queue);
- info->count_receive_queue++;
- info->count_put_receive_buffer++;
- spin_unlock_irqrestore(&info->receive_queue_lock, flags);
-
- queue_work(info->workqueue, &info->post_send_credits_work);
-}
-
-/* Preallocate all receive buffer on transport establishment */
-static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
-{
- int i;
- struct smbd_response *response;
-
- INIT_LIST_HEAD(&info->reassembly_queue);
- spin_lock_init(&info->reassembly_queue_lock);
- info->reassembly_data_length = 0;
- info->reassembly_queue_length = 0;
-
- INIT_LIST_HEAD(&info->receive_queue);
- spin_lock_init(&info->receive_queue_lock);
- info->count_receive_queue = 0;
-
- INIT_LIST_HEAD(&info->empty_packet_queue);
- spin_lock_init(&info->empty_packet_queue_lock);
- info->count_empty_packet_queue = 0;
-
- init_waitqueue_head(&info->wait_receive_queues);
-
- for (i = 0; i < num_buf; i++) {
- response = mempool_alloc(info->response_mempool, GFP_KERNEL);
- if (!response)
- goto allocate_failed;
-
- response->info = info;
- list_add_tail(&response->list, &info->receive_queue);
- info->count_receive_queue++;
- }
-
- return 0;
-
-allocate_failed:
- while (!list_empty(&info->receive_queue)) {
- response = list_first_entry(
- &info->receive_queue,
- struct smbd_response, list);
- list_del(&response->list);
- info->count_receive_queue--;
-
- mempool_free(response, info->response_mempool);
- }
- return -ENOMEM;
-}
-
-static void destroy_receive_buffers(struct smbd_connection *info)
-{
- struct smbd_response *response;
-
- while ((response = get_receive_buffer(info)))
- mempool_free(response, info->response_mempool);
-
- while ((response = get_empty_queue_buffer(info)))
- mempool_free(response, info->response_mempool);
-}
-
-/* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
-static void idle_connection_timer(struct work_struct *work)
-{
- struct smbd_connection *info = container_of(
- work, struct smbd_connection,
- idle_timer_work.work);
-
- if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
- log_keep_alive(ERR,
- "error status info->keep_alive_requested=%d\n",
- info->keep_alive_requested);
- smbd_disconnect_rdma_connection(info);
- return;
- }
-
- log_keep_alive(INFO, "about to send an empty idle message\n");
- smbd_post_send_empty(info);
-
- /* Setup the next idle timeout work */
- queue_delayed_work(info->workqueue, &info->idle_timer_work,
- info->keep_alive_interval*HZ);
-}
-
-/*
- * Destroy the transport and related RDMA and memory resources
- * Need to go through all the pending counters and make sure on one is using
- * the transport while it is destroyed
- */
-void smbd_destroy(struct TCP_Server_Info *server)
-{
- struct smbd_connection *info = server->smbd_conn;
- struct smbd_response *response;
- unsigned long flags;
-
- if (!info) {
- log_rdma_event(INFO, "rdma session already destroyed\n");
- return;
- }
-
- log_rdma_event(INFO, "destroying rdma session\n");
- if (info->transport_status != SMBD_DISCONNECTED) {
- rdma_disconnect(server->smbd_conn->id);
- log_rdma_event(INFO, "wait for transport being disconnected\n");
- wait_event_interruptible(
- info->disconn_wait,
- info->transport_status == SMBD_DISCONNECTED);
- }
-
- log_rdma_event(INFO, "destroying qp\n");
- ib_drain_qp(info->id->qp);
- rdma_destroy_qp(info->id);
-
- log_rdma_event(INFO, "cancelling idle timer\n");
- cancel_delayed_work_sync(&info->idle_timer_work);
-
- log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
- wait_event(info->wait_send_pending,
- atomic_read(&info->send_pending) == 0);
-
- /* It's not posssible for upper layer to get to reassembly */
- log_rdma_event(INFO, "drain the reassembly queue\n");
- do {
- spin_lock_irqsave(&info->reassembly_queue_lock, flags);
- response = _get_first_reassembly(info);
- if (response) {
- list_del(&response->list);
- spin_unlock_irqrestore(
- &info->reassembly_queue_lock, flags);
- put_receive_buffer(info, response);
- } else
- spin_unlock_irqrestore(
- &info->reassembly_queue_lock, flags);
- } while (response);
- info->reassembly_data_length = 0;
-
- log_rdma_event(INFO, "free receive buffers\n");
- wait_event(info->wait_receive_queues,
- info->count_receive_queue + info->count_empty_packet_queue
- == info->receive_credit_max);
- destroy_receive_buffers(info);
-
- /*
- * For performance reasons, memory registration and deregistration
- * are not locked by srv_mutex. It is possible some processes are
- * blocked on transport srv_mutex while holding memory registration.
- * Release the transport srv_mutex to allow them to hit the failure
- * path when sending data, and then release memory registartions.
- */
- log_rdma_event(INFO, "freeing mr list\n");
- wake_up_interruptible_all(&info->wait_mr);
- while (atomic_read(&info->mr_used_count)) {
- mutex_unlock(&server->srv_mutex);
- msleep(1000);
- mutex_lock(&server->srv_mutex);
- }
- destroy_mr_list(info);
-
- ib_free_cq(info->send_cq);
- ib_free_cq(info->recv_cq);
- ib_dealloc_pd(info->pd);
- rdma_destroy_id(info->id);
-
- /* free mempools */
- mempool_destroy(info->request_mempool);
- kmem_cache_destroy(info->request_cache);
-
- mempool_destroy(info->response_mempool);
- kmem_cache_destroy(info->response_cache);
-
- info->transport_status = SMBD_DESTROYED;
-
- destroy_workqueue(info->workqueue);
- log_rdma_event(INFO, "rdma session destroyed\n");
- kfree(info);
-}
-
-/*
- * Reconnect this SMBD connection, called from upper layer
- * return value: 0 on success, or actual error code
- */
-int smbd_reconnect(struct TCP_Server_Info *server)
-{
- log_rdma_event(INFO, "reconnecting rdma session\n");
-
- if (!server->smbd_conn) {
- log_rdma_event(INFO, "rdma session already destroyed\n");
- goto create_conn;
- }
-
- /*
- * This is possible if transport is disconnected and we haven't received
- * notification from RDMA, but upper layer has detected timeout
- */
- if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
- log_rdma_event(INFO, "disconnecting transport\n");
- smbd_destroy(server);
- }
-
-create_conn:
- log_rdma_event(INFO, "creating rdma session\n");
- server->smbd_conn = smbd_get_connection(
- server, (struct sockaddr *) &server->dstaddr);
-
- if (server->smbd_conn)
- cifs_dbg(VFS, "RDMA transport re-established\n");
-
- return server->smbd_conn ? 0 : -ENOENT;
-}
-
-static void destroy_caches_and_workqueue(struct smbd_connection *info)
-{
- destroy_receive_buffers(info);
- destroy_workqueue(info->workqueue);
- mempool_destroy(info->response_mempool);
- kmem_cache_destroy(info->response_cache);
- mempool_destroy(info->request_mempool);
- kmem_cache_destroy(info->request_cache);
-}
-
-#define MAX_NAME_LEN 80
-static int allocate_caches_and_workqueue(struct smbd_connection *info)
-{
- char name[MAX_NAME_LEN];
- int rc;
-
- scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
- info->request_cache =
- kmem_cache_create(
- name,
- sizeof(struct smbd_request) +
- sizeof(struct smbd_data_transfer),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!info->request_cache)
- return -ENOMEM;
-
- info->request_mempool =
- mempool_create(info->send_credit_target, mempool_alloc_slab,
- mempool_free_slab, info->request_cache);
- if (!info->request_mempool)
- goto out1;
-
- scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
- info->response_cache =
- kmem_cache_create(
- name,
- sizeof(struct smbd_response) +
- info->max_receive_size,
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!info->response_cache)
- goto out2;
-
- info->response_mempool =
- mempool_create(info->receive_credit_max, mempool_alloc_slab,
- mempool_free_slab, info->response_cache);
- if (!info->response_mempool)
- goto out3;
-
- scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
- info->workqueue = create_workqueue(name);
- if (!info->workqueue)
- goto out4;
-
- rc = allocate_receive_buffers(info, info->receive_credit_max);
- if (rc) {
- log_rdma_event(ERR, "failed to allocate receive buffers\n");
- goto out5;
- }
-
- return 0;
-
-out5:
- destroy_workqueue(info->workqueue);
-out4:
- mempool_destroy(info->response_mempool);
-out3:
- kmem_cache_destroy(info->response_cache);
-out2:
- mempool_destroy(info->request_mempool);
-out1:
- kmem_cache_destroy(info->request_cache);
- return -ENOMEM;
-}
-
-/* Create a SMBD connection, called by upper layer */
-static struct smbd_connection *_smbd_get_connection(
- struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port)
-{
- int rc;
- struct smbd_connection *info;
- struct rdma_conn_param conn_param;
- struct ib_qp_init_attr qp_attr;
- struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
- struct ib_port_immutable port_immutable;
- u32 ird_ord_hdr[2];
-
- info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
- if (!info)
- return NULL;
-
- info->transport_status = SMBD_CONNECTING;
- rc = smbd_ia_open(info, dstaddr, port);
- if (rc) {
- log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
- goto create_id_failed;
- }
-
- if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
- smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
- log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
- smbd_send_credit_target,
- info->id->device->attrs.max_cqe,
- info->id->device->attrs.max_qp_wr);
- goto config_failed;
- }
-
- if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
- smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
- log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
- smbd_receive_credit_max,
- info->id->device->attrs.max_cqe,
- info->id->device->attrs.max_qp_wr);
- goto config_failed;
- }
-
- info->receive_credit_max = smbd_receive_credit_max;
- info->send_credit_target = smbd_send_credit_target;
- info->max_send_size = smbd_max_send_size;
- info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
- info->max_receive_size = smbd_max_receive_size;
- info->keep_alive_interval = smbd_keep_alive_interval;
-
- if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SGE) {
- log_rdma_event(ERR,
- "warning: device max_send_sge = %d too small\n",
- info->id->device->attrs.max_send_sge);
- log_rdma_event(ERR, "Queue Pair creation may fail\n");
- }
- if (info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_SGE) {
- log_rdma_event(ERR,
- "warning: device max_recv_sge = %d too small\n",
- info->id->device->attrs.max_recv_sge);
- log_rdma_event(ERR, "Queue Pair creation may fail\n");
- }
-
- info->send_cq = NULL;
- info->recv_cq = NULL;
- info->send_cq =
- ib_alloc_cq_any(info->id->device, info,
- info->send_credit_target, IB_POLL_SOFTIRQ);
- if (IS_ERR(info->send_cq)) {
- info->send_cq = NULL;
- goto alloc_cq_failed;
- }
-
- info->recv_cq =
- ib_alloc_cq_any(info->id->device, info,
- info->receive_credit_max, IB_POLL_SOFTIRQ);
- if (IS_ERR(info->recv_cq)) {
- info->recv_cq = NULL;
- goto alloc_cq_failed;
- }
-
- memset(&qp_attr, 0, sizeof(qp_attr));
- qp_attr.event_handler = smbd_qp_async_error_upcall;
- qp_attr.qp_context = info;
- qp_attr.cap.max_send_wr = info->send_credit_target;
- qp_attr.cap.max_recv_wr = info->receive_credit_max;
- qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SGE;
- qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_SGE;
- qp_attr.cap.max_inline_data = 0;
- qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
- qp_attr.qp_type = IB_QPT_RC;
- qp_attr.send_cq = info->send_cq;
- qp_attr.recv_cq = info->recv_cq;
- qp_attr.port_num = ~0;
-
- rc = rdma_create_qp(info->id, info->pd, &qp_attr);
- if (rc) {
- log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
- goto create_qp_failed;
- }
-
- memset(&conn_param, 0, sizeof(conn_param));
- conn_param.initiator_depth = 0;
-
- conn_param.responder_resources =
- info->id->device->attrs.max_qp_rd_atom
- < SMBD_CM_RESPONDER_RESOURCES ?
- info->id->device->attrs.max_qp_rd_atom :
- SMBD_CM_RESPONDER_RESOURCES;
- info->responder_resources = conn_param.responder_resources;
- log_rdma_mr(INFO, "responder_resources=%d\n",
- info->responder_resources);
-
- /* Need to send IRD/ORD in private data for iWARP */
- info->id->device->ops.get_port_immutable(
- info->id->device, info->id->port_num, &port_immutable);
- if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
- ird_ord_hdr[0] = info->responder_resources;
- ird_ord_hdr[1] = 1;
- conn_param.private_data = ird_ord_hdr;
- conn_param.private_data_len = sizeof(ird_ord_hdr);
- } else {
- conn_param.private_data = NULL;
- conn_param.private_data_len = 0;
- }
-
- conn_param.retry_count = SMBD_CM_RETRY;
- conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
- conn_param.flow_control = 0;
-
- log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
- &addr_in->sin_addr, port);
-
- init_waitqueue_head(&info->conn_wait);
- init_waitqueue_head(&info->disconn_wait);
- init_waitqueue_head(&info->wait_reassembly_queue);
- rc = rdma_connect(info->id, &conn_param);
- if (rc) {
- log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
- goto rdma_connect_failed;
- }
-
- wait_event_interruptible(
- info->conn_wait, info->transport_status != SMBD_CONNECTING);
-
- if (info->transport_status != SMBD_CONNECTED) {
- log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
- goto rdma_connect_failed;
- }
-
- log_rdma_event(INFO, "rdma_connect connected\n");
-
- rc = allocate_caches_and_workqueue(info);
- if (rc) {
- log_rdma_event(ERR, "cache allocation failed\n");
- goto allocate_cache_failed;
- }
-
- init_waitqueue_head(&info->wait_send_queue);
- INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
- queue_delayed_work(info->workqueue, &info->idle_timer_work,
- info->keep_alive_interval*HZ);
-
- init_waitqueue_head(&info->wait_send_pending);
- atomic_set(&info->send_pending, 0);
-
- init_waitqueue_head(&info->wait_post_send);
-
- INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
- INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
- info->new_credits_offered = 0;
- spin_lock_init(&info->lock_new_credits_offered);
-
- rc = smbd_negotiate(info);
- if (rc) {
- log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc);
- goto negotiation_failed;
- }
-
- rc = allocate_mr_list(info);
- if (rc) {
- log_rdma_mr(ERR, "memory registration allocation failed\n");
- goto allocate_mr_failed;
- }
-
- return info;
-
-allocate_mr_failed:
- /* At this point, need to a full transport shutdown */
- smbd_destroy(server);
- return NULL;
-
-negotiation_failed:
- cancel_delayed_work_sync(&info->idle_timer_work);
- destroy_caches_and_workqueue(info);
- info->transport_status = SMBD_NEGOTIATE_FAILED;
- init_waitqueue_head(&info->conn_wait);
- rdma_disconnect(info->id);
- wait_event(info->conn_wait,
- info->transport_status == SMBD_DISCONNECTED);
-
-allocate_cache_failed:
-rdma_connect_failed:
- rdma_destroy_qp(info->id);
-
-create_qp_failed:
-alloc_cq_failed:
- if (info->send_cq)
- ib_free_cq(info->send_cq);
- if (info->recv_cq)
- ib_free_cq(info->recv_cq);
-
-config_failed:
- ib_dealloc_pd(info->pd);
- rdma_destroy_id(info->id);
-
-create_id_failed:
- kfree(info);
- return NULL;
-}
-
-struct smbd_connection *smbd_get_connection(
- struct TCP_Server_Info *server, struct sockaddr *dstaddr)
-{
- struct smbd_connection *ret;
- int port = SMBD_PORT;
-
-try_again:
- ret = _smbd_get_connection(server, dstaddr, port);
-
- /* Try SMB_PORT if SMBD_PORT doesn't work */
- if (!ret && port == SMBD_PORT) {
- port = SMB_PORT;
- goto try_again;
- }
- return ret;
-}
-
-/*
- * Receive data from receive reassembly queue
- * All the incoming data packets are placed in reassembly queue
- * buf: the buffer to read data into
- * size: the length of data to read
- * return value: actual data read
- * Note: this implementation copies the data from reassebmly queue to receive
- * buffers used by upper layer. This is not the optimal code path. A better way
- * to do it is to not have upper layer allocate its receive buffers but rather
- * borrow the buffer from reassembly queue, and return it after data is
- * consumed. But this will require more changes to upper layer code, and also
- * need to consider packet boundaries while they still being reassembled.
- */
-static int smbd_recv_buf(struct smbd_connection *info, char *buf,
- unsigned int size)
-{
- struct smbd_response *response;
- struct smbd_data_transfer *data_transfer;
- int to_copy, to_read, data_read, offset;
- u32 data_length, remaining_data_length, data_offset;
- int rc;
-
-again:
- /*
- * No need to hold the reassembly queue lock all the time as we are
- * the only one reading from the front of the queue. The transport
- * may add more entries to the back of the queue at the same time
- */
- log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
- info->reassembly_data_length);
- if (info->reassembly_data_length >= size) {
- int queue_length;
- int queue_removed = 0;
-
- /*
- * Need to make sure reassembly_data_length is read before
- * reading reassembly_queue_length and calling
- * _get_first_reassembly. This call is lock free
- * as we never read at the end of the queue which are being
- * updated in SOFTIRQ as more data is received
- */
- virt_rmb();
- queue_length = info->reassembly_queue_length;
- data_read = 0;
- to_read = size;
- offset = info->first_entry_offset;
- while (data_read < size) {
- response = _get_first_reassembly(info);
- data_transfer = smbd_response_payload(response);
- data_length = le32_to_cpu(data_transfer->data_length);
- remaining_data_length =
- le32_to_cpu(
- data_transfer->remaining_data_length);
- data_offset = le32_to_cpu(data_transfer->data_offset);
-
- /*
- * The upper layer expects RFC1002 length at the
- * beginning of the payload. Return it to indicate
- * the total length of the packet. This minimize the
- * change to upper layer packet processing logic. This
- * will be eventually remove when an intermediate
- * transport layer is added
- */
- if (response->first_segment && size == 4) {
- unsigned int rfc1002_len =
- data_length + remaining_data_length;
- *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
- data_read = 4;
- response->first_segment = false;
- log_read(INFO, "returning rfc1002 length %d\n",
- rfc1002_len);
- goto read_rfc1002_done;
- }
-
- to_copy = min_t(int, data_length - offset, to_read);
- memcpy(
- buf + data_read,
- (char *)data_transfer + data_offset + offset,
- to_copy);
-
- /* move on to the next buffer? */
- if (to_copy == data_length - offset) {
- queue_length--;
- /*
- * No need to lock if we are not at the
- * end of the queue
- */
- if (queue_length)
- list_del(&response->list);
- else {
- spin_lock_irq(
- &info->reassembly_queue_lock);
- list_del(&response->list);
- spin_unlock_irq(
- &info->reassembly_queue_lock);
- }
- queue_removed++;
- info->count_reassembly_queue--;
- info->count_dequeue_reassembly_queue++;
- put_receive_buffer(info, response);
- offset = 0;
- log_read(INFO, "put_receive_buffer offset=0\n");
- } else
- offset += to_copy;
-
- to_read -= to_copy;
- data_read += to_copy;
-
- log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to_read=%d data_read=%d offset=%d\n",
- to_copy, data_length - offset,
- to_read, data_read, offset);
- }
-
- spin_lock_irq(&info->reassembly_queue_lock);
- info->reassembly_data_length -= data_read;
- info->reassembly_queue_length -= queue_removed;
- spin_unlock_irq(&info->reassembly_queue_lock);
-
- info->first_entry_offset = offset;
- log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
- data_read, info->reassembly_data_length,
- info->first_entry_offset);
-read_rfc1002_done:
- return data_read;
- }
-
- log_read(INFO, "wait_event on more data\n");
- rc = wait_event_interruptible(
- info->wait_reassembly_queue,
- info->reassembly_data_length >= size ||
- info->transport_status != SMBD_CONNECTED);
- /* Don't return any data if interrupted */
- if (rc)
- return rc;
-
- if (info->transport_status != SMBD_CONNECTED) {
- log_read(ERR, "disconnected\n");
- return -ECONNABORTED;
- }
-
- goto again;
-}
-
-/*
- * Receive a page from receive reassembly queue
- * page: the page to read data into
- * to_read: the length of data to read
- * return value: actual data read
- */
-static int smbd_recv_page(struct smbd_connection *info,
- struct page *page, unsigned int page_offset,
- unsigned int to_read)
-{
- int ret;
- char *to_address;
- void *page_address;
-
- /* make sure we have the page ready for read */
- ret = wait_event_interruptible(
- info->wait_reassembly_queue,
- info->reassembly_data_length >= to_read ||
- info->transport_status != SMBD_CONNECTED);
- if (ret)
- return ret;
-
- /* now we can read from reassembly queue and not sleep */
- page_address = kmap_atomic(page);
- to_address = (char *) page_address + page_offset;
-
- log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
- page, to_address, to_read);
-
- ret = smbd_recv_buf(info, to_address, to_read);
- kunmap_atomic(page_address);
-
- return ret;
-}
-
-/*
- * Receive data from transport
- * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
- * return: total bytes read, or 0. SMB Direct will not do partial read.
- */
-int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
-{
- char *buf;
- struct page *page;
- unsigned int to_read, page_offset;
- int rc;
-
- if (iov_iter_rw(&msg->msg_iter) == WRITE) {
- /* It's a bug in upper layer to get there */
- cifs_dbg(VFS, "Invalid msg iter dir %u\n",
- iov_iter_rw(&msg->msg_iter));
- rc = -EINVAL;
- goto out;
- }
-
- switch (iov_iter_type(&msg->msg_iter)) {
- case ITER_KVEC:
- buf = msg->msg_iter.kvec->iov_base;
- to_read = msg->msg_iter.kvec->iov_len;
- rc = smbd_recv_buf(info, buf, to_read);
- break;
-
- case ITER_BVEC:
- page = msg->msg_iter.bvec->bv_page;
- page_offset = msg->msg_iter.bvec->bv_offset;
- to_read = msg->msg_iter.bvec->bv_len;
- rc = smbd_recv_page(info, page, page_offset, to_read);
- break;
-
- default:
- /* It's a bug in upper layer to get there */
- cifs_dbg(VFS, "Invalid msg type %d\n",
- iov_iter_type(&msg->msg_iter));
- rc = -EINVAL;
- }
-
-out:
- /* SMBDirect will read it all or nothing */
- if (rc > 0)
- msg->msg_iter.count = 0;
- return rc;
-}
-
-/*
- * Send data to transport
- * Each rqst is transported as a SMBDirect payload
- * rqst: the data to write
- * return value: 0 if successfully write, otherwise error code
- */
-int smbd_send(struct TCP_Server_Info *server,
- int num_rqst, struct smb_rqst *rqst_array)
-{
- struct smbd_connection *info = server->smbd_conn;
- struct kvec vec;
- int nvecs;
- int size;
- unsigned int buflen, remaining_data_length;
- int start, i, j;
- int max_iov_size =
- info->max_send_size - sizeof(struct smbd_data_transfer);
- struct kvec *iov;
- int rc;
- struct smb_rqst *rqst;
- int rqst_idx;
-
- if (info->transport_status != SMBD_CONNECTED) {
- rc = -EAGAIN;
- goto done;
- }
-
- /*
- * Add in the page array if there is one. The caller needs to set
- * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
- * ends at page boundary
- */
- remaining_data_length = 0;
- for (i = 0; i < num_rqst; i++)
- remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
-
- if (remaining_data_length > info->max_fragmented_send_size) {
- log_write(ERR, "payload size %d > max size %d\n",
- remaining_data_length, info->max_fragmented_send_size);
- rc = -EINVAL;
- goto done;
- }
-
- log_write(INFO, "num_rqst=%d total length=%u\n",
- num_rqst, remaining_data_length);
-
- rqst_idx = 0;
-next_rqst:
- rqst = &rqst_array[rqst_idx];
- iov = rqst->rq_iov;
-
- cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
- rqst_idx, smb_rqst_len(server, rqst));
- for (i = 0; i < rqst->rq_nvec; i++)
- dump_smb(iov[i].iov_base, iov[i].iov_len);
-
-
- log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d rq_tailsz=%d buflen=%lu\n",
- rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
- rqst->rq_tailsz, smb_rqst_len(server, rqst));
-
- start = i = 0;
- buflen = 0;
- while (true) {
- buflen += iov[i].iov_len;
- if (buflen > max_iov_size) {
- if (i > start) {
- remaining_data_length -=
- (buflen-iov[i].iov_len);
- log_write(INFO, "sending iov[] from start=%d i=%d nvecs=%d remaining_data_length=%d\n",
- start, i, i - start,
- remaining_data_length);
- rc = smbd_post_send_data(
- info, &iov[start], i-start,
- remaining_data_length);
- if (rc)
- goto done;
- } else {
- /* iov[start] is too big, break it */
- nvecs = (buflen+max_iov_size-1)/max_iov_size;
- log_write(INFO, "iov[%d] iov_base=%p buflen=%d break to %d vectors\n",
- start, iov[start].iov_base,
- buflen, nvecs);
- for (j = 0; j < nvecs; j++) {
- vec.iov_base =
- (char *)iov[start].iov_base +
- j*max_iov_size;
- vec.iov_len = max_iov_size;
- if (j == nvecs-1)
- vec.iov_len =
- buflen -
- max_iov_size*(nvecs-1);
- remaining_data_length -= vec.iov_len;
- log_write(INFO,
- "sending vec j=%d iov_base=%p iov_len=%zu remaining_data_length=%d\n",
- j, vec.iov_base, vec.iov_len,
- remaining_data_length);
- rc = smbd_post_send_data(
- info, &vec, 1,
- remaining_data_length);
- if (rc)
- goto done;
- }
- i++;
- if (i == rqst->rq_nvec)
- break;
- }
- start = i;
- buflen = 0;
- } else {
- i++;
- if (i == rqst->rq_nvec) {
- /* send out all remaining vecs */
- remaining_data_length -= buflen;
- log_write(INFO, "sending iov[] from start=%d i=%d nvecs=%d remaining_data_length=%d\n",
- start, i, i - start,
- remaining_data_length);
- rc = smbd_post_send_data(info, &iov[start],
- i-start, remaining_data_length);
- if (rc)
- goto done;
- break;
- }
- }
- log_write(INFO, "looping i=%d buflen=%d\n", i, buflen);
- }
-
- /* now sending pages if there are any */
- for (i = 0; i < rqst->rq_npages; i++) {
- unsigned int offset;
-
- rqst_page_get_length(rqst, i, &buflen, &offset);
- nvecs = (buflen + max_iov_size - 1) / max_iov_size;
- log_write(INFO, "sending pages buflen=%d nvecs=%d\n",
- buflen, nvecs);
- for (j = 0; j < nvecs; j++) {
- size = max_iov_size;
- if (j == nvecs-1)
- size = buflen - j*max_iov_size;
- remaining_data_length -= size;
- log_write(INFO, "sending pages i=%d offset=%d size=%d remaining_data_length=%d\n",
- i, j * max_iov_size + offset, size,
- remaining_data_length);
- rc = smbd_post_send_page(
- info, rqst->rq_pages[i],
- j*max_iov_size + offset,
- size, remaining_data_length);
- if (rc)
- goto done;
- }
- }
-
- rqst_idx++;
- if (rqst_idx < num_rqst)
- goto next_rqst;
-
-done:
- /*
- * As an optimization, we don't wait for individual I/O to finish
- * before sending the next one.
- * Send them all and wait for pending send count to get to 0
- * that means all the I/Os have been out and we are good to return
- */
-
- wait_event(info->wait_send_pending,
- atomic_read(&info->send_pending) == 0);
-
- return rc;
-}
-
-static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct smbd_mr *mr;
- struct ib_cqe *cqe;
-
- if (wc->status) {
- log_rdma_mr(ERR, "status=%d\n", wc->status);
- cqe = wc->wr_cqe;
- mr = container_of(cqe, struct smbd_mr, cqe);
- smbd_disconnect_rdma_connection(mr->conn);
- }
-}
-
-/*
- * The work queue function that recovers MRs
- * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used
- * again. Both calls are slow, so finish them in a workqueue. This will not
- * block I/O path.
- * There is one workqueue that recovers MRs, there is no need to lock as the
- * I/O requests calling smbd_register_mr will never update the links in the
- * mr_list.
- */
-static void smbd_mr_recovery_work(struct work_struct *work)
-{
- struct smbd_connection *info =
- container_of(work, struct smbd_connection, mr_recovery_work);
- struct smbd_mr *smbdirect_mr;
- int rc;
-
- list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
- if (smbdirect_mr->state == MR_ERROR) {
-
- /* recover this MR entry */
- rc = ib_dereg_mr(smbdirect_mr->mr);
- if (rc) {
- log_rdma_mr(ERR,
- "ib_dereg_mr failed rc=%x\n",
- rc);
- smbd_disconnect_rdma_connection(info);
- continue;
- }
-
- smbdirect_mr->mr = ib_alloc_mr(
- info->pd, info->mr_type,
- info->max_frmr_depth);
- if (IS_ERR(smbdirect_mr->mr)) {
- log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
- info->mr_type,
- info->max_frmr_depth);
- smbd_disconnect_rdma_connection(info);
- continue;
- }
- } else
- /* This MR is being used, don't recover it */
- continue;
-
- smbdirect_mr->state = MR_READY;
-
- /* smbdirect_mr->state is updated by this function
- * and is read and updated by I/O issuing CPUs trying
- * to get a MR, the call to atomic_inc_return
- * implicates a memory barrier and guarantees this
- * value is updated before waking up any calls to
- * get_mr() from the I/O issuing CPUs
- */
- if (atomic_inc_return(&info->mr_ready_count) == 1)
- wake_up_interruptible(&info->wait_mr);
- }
-}
-
-static void destroy_mr_list(struct smbd_connection *info)
-{
- struct smbd_mr *mr, *tmp;
-
- cancel_work_sync(&info->mr_recovery_work);
- list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
- if (mr->state == MR_INVALIDATED)
- ib_dma_unmap_sg(info->id->device, mr->sgl,
- mr->sgl_count, mr->dir);
- ib_dereg_mr(mr->mr);
- kfree(mr->sgl);
- kfree(mr);
- }
-}
-
-/*
- * Allocate MRs used for RDMA read/write
- * The number of MRs will not exceed hardware capability in responder_resources
- * All MRs are kept in mr_list. The MR can be recovered after it's used
- * Recovery is done in smbd_mr_recovery_work. The content of list entry changes
- * as MRs are used and recovered for I/O, but the list links will not change
- */
-static int allocate_mr_list(struct smbd_connection *info)
-{
- int i;
- struct smbd_mr *smbdirect_mr, *tmp;
-
- INIT_LIST_HEAD(&info->mr_list);
- init_waitqueue_head(&info->wait_mr);
- spin_lock_init(&info->mr_list_lock);
- atomic_set(&info->mr_ready_count, 0);
- atomic_set(&info->mr_used_count, 0);
- init_waitqueue_head(&info->wait_for_mr_cleanup);
- /* Allocate more MRs (2x) than hardware responder_resources */
- for (i = 0; i < info->responder_resources * 2; i++) {
- smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
- if (!smbdirect_mr)
- goto out;
- smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
- info->max_frmr_depth);
- if (IS_ERR(smbdirect_mr->mr)) {
- log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
- info->mr_type, info->max_frmr_depth);
- goto out;
- }
- smbdirect_mr->sgl = kcalloc(
- info->max_frmr_depth,
- sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!smbdirect_mr->sgl) {
- log_rdma_mr(ERR, "failed to allocate sgl\n");
- ib_dereg_mr(smbdirect_mr->mr);
- goto out;
- }
- smbdirect_mr->state = MR_READY;
- smbdirect_mr->conn = info;
-
- list_add_tail(&smbdirect_mr->list, &info->mr_list);
- atomic_inc(&info->mr_ready_count);
- }
- INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
- return 0;
-
-out:
- kfree(smbdirect_mr);
-
- list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
- ib_dereg_mr(smbdirect_mr->mr);
- kfree(smbdirect_mr->sgl);
- kfree(smbdirect_mr);
- }
- return -ENOMEM;
-}
-
-/*
- * Get a MR from mr_list. This function waits until there is at least one
- * MR available in the list. It may access the list while the
- * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
- * as they never modify the same places. However, there may be several CPUs
- * issueing I/O trying to get MR at the same time, mr_list_lock is used to
- * protect this situation.
- */
-static struct smbd_mr *get_mr(struct smbd_connection *info)
-{
- struct smbd_mr *ret;
- int rc;
-again:
- rc = wait_event_interruptible(info->wait_mr,
- atomic_read(&info->mr_ready_count) ||
- info->transport_status != SMBD_CONNECTED);
- if (rc) {
- log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
- return NULL;
- }
-
- if (info->transport_status != SMBD_CONNECTED) {
- log_rdma_mr(ERR, "info->transport_status=%x\n",
- info->transport_status);
- return NULL;
- }
-
- spin_lock(&info->mr_list_lock);
- list_for_each_entry(ret, &info->mr_list, list) {
- if (ret->state == MR_READY) {
- ret->state = MR_REGISTERED;
- spin_unlock(&info->mr_list_lock);
- atomic_dec(&info->mr_ready_count);
- atomic_inc(&info->mr_used_count);
- return ret;
- }
- }
-
- spin_unlock(&info->mr_list_lock);
- /*
- * It is possible that we could fail to get MR because other processes may
- * try to acquire a MR at the same time. If this is the case, retry it.
- */
- goto again;
-}
-
-/*
- * Register memory for RDMA read/write
- * pages[]: the list of pages to register memory with
- * num_pages: the number of pages to register
- * tailsz: if non-zero, the bytes to register in the last page
- * writing: true if this is a RDMA write (SMB read), false for RDMA read
- * need_invalidate: true if this MR needs to be locally invalidated after I/O
- * return value: the MR registered, NULL if failed.
- */
-struct smbd_mr *smbd_register_mr(
- struct smbd_connection *info, struct page *pages[], int num_pages,
- int offset, int tailsz, bool writing, bool need_invalidate)
-{
- struct smbd_mr *smbdirect_mr;
- int rc, i;
- enum dma_data_direction dir;
- struct ib_reg_wr *reg_wr;
-
- if (num_pages > info->max_frmr_depth) {
- log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
- num_pages, info->max_frmr_depth);
- return NULL;
- }
-
- smbdirect_mr = get_mr(info);
- if (!smbdirect_mr) {
- log_rdma_mr(ERR, "get_mr returning NULL\n");
- return NULL;
- }
- smbdirect_mr->need_invalidate = need_invalidate;
- smbdirect_mr->sgl_count = num_pages;
- sg_init_table(smbdirect_mr->sgl, num_pages);
-
- log_rdma_mr(INFO, "num_pages=0x%x offset=0x%x tailsz=0x%x\n",
- num_pages, offset, tailsz);
-
- if (num_pages == 1) {
- sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset);
- goto skip_multiple_pages;
- }
-
- /* We have at least two pages to register */
- sg_set_page(
- &smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset);
- i = 1;
- while (i < num_pages - 1) {
- sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
- i++;
- }
- sg_set_page(&smbdirect_mr->sgl[i], pages[i],
- tailsz ? tailsz : PAGE_SIZE, 0);
-
-skip_multiple_pages:
- dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- smbdirect_mr->dir = dir;
- rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
- if (!rc) {
- log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
- num_pages, dir, rc);
- goto dma_map_error;
- }
-
- rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
- NULL, PAGE_SIZE);
- if (rc != num_pages) {
- log_rdma_mr(ERR,
- "ib_map_mr_sg failed rc = %d num_pages = %x\n",
- rc, num_pages);
- goto map_mr_error;
- }
-
- ib_update_fast_reg_key(smbdirect_mr->mr,
- ib_inc_rkey(smbdirect_mr->mr->rkey));
- reg_wr = &smbdirect_mr->wr;
- reg_wr->wr.opcode = IB_WR_REG_MR;
- smbdirect_mr->cqe.done = register_mr_done;
- reg_wr->wr.wr_cqe = &smbdirect_mr->cqe;
- reg_wr->wr.num_sge = 0;
- reg_wr->wr.send_flags = IB_SEND_SIGNALED;
- reg_wr->mr = smbdirect_mr->mr;
- reg_wr->key = smbdirect_mr->mr->rkey;
- reg_wr->access = writing ?
- IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
- IB_ACCESS_REMOTE_READ;
-
- /*
- * There is no need for waiting for complemtion on ib_post_send
- * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
- * on the next ib_post_send when we actaully send I/O to remote peer
- */
- rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
- if (!rc)
- return smbdirect_mr;
-
- log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n",
- rc, reg_wr->key);
-
- /* If all failed, attempt to recover this MR by setting it MR_ERROR*/
-map_mr_error:
- ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl,
- smbdirect_mr->sgl_count, smbdirect_mr->dir);
-
-dma_map_error:
- smbdirect_mr->state = MR_ERROR;
- if (atomic_dec_and_test(&info->mr_used_count))
- wake_up(&info->wait_for_mr_cleanup);
-
- smbd_disconnect_rdma_connection(info);
-
- return NULL;
-}
-
-static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct smbd_mr *smbdirect_mr;
- struct ib_cqe *cqe;
-
- cqe = wc->wr_cqe;
- smbdirect_mr = container_of(cqe, struct smbd_mr, cqe);
- smbdirect_mr->state = MR_INVALIDATED;
- if (wc->status != IB_WC_SUCCESS) {
- log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status);
- smbdirect_mr->state = MR_ERROR;
- }
- complete(&smbdirect_mr->invalidate_done);
-}
-
-/*
- * Deregister a MR after I/O is done
- * This function may wait if remote invalidation is not used
- * and we have to locally invalidate the buffer to prevent data is being
- * modified by remote peer after upper layer consumes it
- */
-int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
-{
- struct ib_send_wr *wr;
- struct smbd_connection *info = smbdirect_mr->conn;
- int rc = 0;
-
- if (smbdirect_mr->need_invalidate) {
- /* Need to finish local invalidation before returning */
- wr = &smbdirect_mr->inv_wr;
- wr->opcode = IB_WR_LOCAL_INV;
- smbdirect_mr->cqe.done = local_inv_done;
- wr->wr_cqe = &smbdirect_mr->cqe;
- wr->num_sge = 0;
- wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey;
- wr->send_flags = IB_SEND_SIGNALED;
-
- init_completion(&smbdirect_mr->invalidate_done);
- rc = ib_post_send(info->id->qp, wr, NULL);
- if (rc) {
- log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
- smbd_disconnect_rdma_connection(info);
- goto done;
- }
- wait_for_completion(&smbdirect_mr->invalidate_done);
- smbdirect_mr->need_invalidate = false;
- } else
- /*
- * For remote invalidation, just set it to MR_INVALIDATED
- * and defer to mr_recovery_work to recover the MR for next use
- */
- smbdirect_mr->state = MR_INVALIDATED;
-
- if (smbdirect_mr->state == MR_INVALIDATED) {
- ib_dma_unmap_sg(
- info->id->device, smbdirect_mr->sgl,
- smbdirect_mr->sgl_count,
- smbdirect_mr->dir);
- smbdirect_mr->state = MR_READY;
- if (atomic_inc_return(&info->mr_ready_count) == 1)
- wake_up_interruptible(&info->wait_mr);
- } else
- /*
- * Schedule the work to do MR recovery for future I/Os MR
- * recovery is slow and don't want it to block current I/O
- */
- queue_work(info->workqueue, &info->mr_recovery_work);
-
-done:
- if (atomic_dec_and_test(&info->mr_used_count))
- wake_up(&info->wait_for_mr_cleanup);
-
- return rc;
-}
diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h
deleted file mode 100644
index a87fca82a796..000000000000
--- a/fs/cifs/smbdirect.h
+++ /dev/null
@@ -1,316 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2017, Microsoft Corporation.
- *
- * Author(s): Long Li <longli@microsoft.com>
- */
-#ifndef _SMBDIRECT_H
-#define _SMBDIRECT_H
-
-#ifdef CONFIG_CIFS_SMB_DIRECT
-#define cifs_rdma_enabled(server) ((server)->rdma)
-
-#include "cifsglob.h"
-#include <rdma/ib_verbs.h>
-#include <rdma/rdma_cm.h>
-#include <linux/mempool.h>
-
-extern int rdma_readwrite_threshold;
-extern int smbd_max_frmr_depth;
-extern int smbd_keep_alive_interval;
-extern int smbd_max_receive_size;
-extern int smbd_max_fragmented_recv_size;
-extern int smbd_max_send_size;
-extern int smbd_send_credit_target;
-extern int smbd_receive_credit_max;
-
-enum keep_alive_status {
- KEEP_ALIVE_NONE,
- KEEP_ALIVE_PENDING,
- KEEP_ALIVE_SENT,
-};
-
-enum smbd_connection_status {
- SMBD_CREATED,
- SMBD_CONNECTING,
- SMBD_CONNECTED,
- SMBD_NEGOTIATE_FAILED,
- SMBD_DISCONNECTING,
- SMBD_DISCONNECTED,
- SMBD_DESTROYED
-};
-
-/*
- * The context for the SMBDirect transport
- * Everything related to the transport is here. It has several logical parts
- * 1. RDMA related structures
- * 2. SMBDirect connection parameters
- * 3. Memory registrations
- * 4. Receive and reassembly queues for data receive path
- * 5. mempools for allocating packets
- */
-struct smbd_connection {
- enum smbd_connection_status transport_status;
-
- /* RDMA related */
- struct rdma_cm_id *id;
- struct ib_qp_init_attr qp_attr;
- struct ib_pd *pd;
- struct ib_cq *send_cq, *recv_cq;
- struct ib_device_attr dev_attr;
- int ri_rc;
- struct completion ri_done;
- wait_queue_head_t conn_wait;
- wait_queue_head_t disconn_wait;
-
- struct completion negotiate_completion;
- bool negotiate_done;
-
- struct work_struct disconnect_work;
- struct work_struct post_send_credits_work;
-
- spinlock_t lock_new_credits_offered;
- int new_credits_offered;
-
- /* Connection parameters defined in [MS-SMBD] 3.1.1.1 */
- int receive_credit_max;
- int send_credit_target;
- int max_send_size;
- int max_fragmented_recv_size;
- int max_fragmented_send_size;
- int max_receive_size;
- int keep_alive_interval;
- int max_readwrite_size;
- enum keep_alive_status keep_alive_requested;
- int protocol;
- atomic_t send_credits;
- atomic_t receive_credits;
- int receive_credit_target;
- int fragment_reassembly_remaining;
-
- /* Memory registrations */
- /* Maximum number of RDMA read/write outstanding on this connection */
- int responder_resources;
- /* Maximum number of SGEs in a RDMA write/read */
- int max_frmr_depth;
- /*
- * If payload is less than or equal to the threshold,
- * use RDMA send/recv to send upper layer I/O.
- * If payload is more than the threshold,
- * use RDMA read/write through memory registration for I/O.
- */
- int rdma_readwrite_threshold;
- enum ib_mr_type mr_type;
- struct list_head mr_list;
- spinlock_t mr_list_lock;
- /* The number of available MRs ready for memory registration */
- atomic_t mr_ready_count;
- atomic_t mr_used_count;
- wait_queue_head_t wait_mr;
- struct work_struct mr_recovery_work;
- /* Used by transport to wait until all MRs are returned */
- wait_queue_head_t wait_for_mr_cleanup;
-
- /* Activity accoutning */
- atomic_t send_pending;
- wait_queue_head_t wait_send_pending;
- wait_queue_head_t wait_post_send;
-
- /* Receive queue */
- struct list_head receive_queue;
- int count_receive_queue;
- spinlock_t receive_queue_lock;
-
- struct list_head empty_packet_queue;
- int count_empty_packet_queue;
- spinlock_t empty_packet_queue_lock;
-
- wait_queue_head_t wait_receive_queues;
-
- /* Reassembly queue */
- struct list_head reassembly_queue;
- spinlock_t reassembly_queue_lock;
- wait_queue_head_t wait_reassembly_queue;
-
- /* total data length of reassembly queue */
- int reassembly_data_length;
- int reassembly_queue_length;
- /* the offset to first buffer in reassembly queue */
- int first_entry_offset;
-
- bool send_immediate;
-
- wait_queue_head_t wait_send_queue;
-
- /*
- * Indicate if we have received a full packet on the connection
- * This is used to identify the first SMBD packet of a assembled
- * payload (SMB packet) in reassembly queue so we can return a
- * RFC1002 length to upper layer to indicate the length of the SMB
- * packet received
- */
- bool full_packet_received;
-
- struct workqueue_struct *workqueue;
- struct delayed_work idle_timer_work;
-
- /* Memory pool for preallocating buffers */
- /* request pool for RDMA send */
- struct kmem_cache *request_cache;
- mempool_t *request_mempool;
-
- /* response pool for RDMA receive */
- struct kmem_cache *response_cache;
- mempool_t *response_mempool;
-
- /* for debug purposes */
- unsigned int count_get_receive_buffer;
- unsigned int count_put_receive_buffer;
- unsigned int count_reassembly_queue;
- unsigned int count_enqueue_reassembly_queue;
- unsigned int count_dequeue_reassembly_queue;
- unsigned int count_send_empty;
-};
-
-enum smbd_message_type {
- SMBD_NEGOTIATE_RESP,
- SMBD_TRANSFER_DATA,
-};
-
-#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
-
-/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
-struct smbd_negotiate_req {
- __le16 min_version;
- __le16 max_version;
- __le16 reserved;
- __le16 credits_requested;
- __le32 preferred_send_size;
- __le32 max_receive_size;
- __le32 max_fragmented_size;
-} __packed;
-
-/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
-struct smbd_negotiate_resp {
- __le16 min_version;
- __le16 max_version;
- __le16 negotiated_version;
- __le16 reserved;
- __le16 credits_requested;
- __le16 credits_granted;
- __le32 status;
- __le32 max_readwrite_size;
- __le32 preferred_send_size;
- __le32 max_receive_size;
- __le32 max_fragmented_size;
-} __packed;
-
-/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
-struct smbd_data_transfer {
- __le16 credits_requested;
- __le16 credits_granted;
- __le16 flags;
- __le16 reserved;
- __le32 remaining_data_length;
- __le32 data_offset;
- __le32 data_length;
- __le32 padding;
- __u8 buffer[];
-} __packed;
-
-/* The packet fields for a registered RDMA buffer */
-struct smbd_buffer_descriptor_v1 {
- __le64 offset;
- __le32 token;
- __le32 length;
-} __packed;
-
-/* Default maximum number of SGEs in a RDMA send/recv */
-#define SMBDIRECT_MAX_SGE 16
-/* The context for a SMBD request */
-struct smbd_request {
- struct smbd_connection *info;
- struct ib_cqe cqe;
-
- /* the SGE entries for this packet */
- struct ib_sge sge[SMBDIRECT_MAX_SGE];
- int num_sge;
-
- /* SMBD packet header follows this structure */
- u8 packet[];
-};
-
-/* The context for a SMBD response */
-struct smbd_response {
- struct smbd_connection *info;
- struct ib_cqe cqe;
- struct ib_sge sge;
-
- enum smbd_message_type type;
-
- /* Link to receive queue or reassembly queue */
- struct list_head list;
-
- /* Indicate if this is the 1st packet of a payload */
- bool first_segment;
-
- /* SMBD packet header and payload follows this structure */
- u8 packet[];
-};
-
-/* Create a SMBDirect session */
-struct smbd_connection *smbd_get_connection(
- struct TCP_Server_Info *server, struct sockaddr *dstaddr);
-
-/* Reconnect SMBDirect session */
-int smbd_reconnect(struct TCP_Server_Info *server);
-/* Destroy SMBDirect session */
-void smbd_destroy(struct TCP_Server_Info *server);
-
-/* Interface for carrying upper layer I/O through send/recv */
-int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
-int smbd_send(struct TCP_Server_Info *server,
- int num_rqst, struct smb_rqst *rqst);
-
-enum mr_state {
- MR_READY,
- MR_REGISTERED,
- MR_INVALIDATED,
- MR_ERROR
-};
-
-struct smbd_mr {
- struct smbd_connection *conn;
- struct list_head list;
- enum mr_state state;
- struct ib_mr *mr;
- struct scatterlist *sgl;
- int sgl_count;
- enum dma_data_direction dir;
- union {
- struct ib_reg_wr wr;
- struct ib_send_wr inv_wr;
- };
- struct ib_cqe cqe;
- bool need_invalidate;
- struct completion invalidate_done;
-};
-
-/* Interfaces to register and deregister MR for RDMA read/write */
-struct smbd_mr *smbd_register_mr(
- struct smbd_connection *info, struct page *pages[], int num_pages,
- int offset, int tailsz, bool writing, bool need_invalidate);
-int smbd_deregister_mr(struct smbd_mr *mr);
-
-#else
-#define cifs_rdma_enabled(server) 0
-struct smbd_connection {};
-static inline void *smbd_get_connection(
- struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;}
-static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
-static inline void smbd_destroy(struct TCP_Server_Info *server) {}
-static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
-static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
-#endif
-
-#endif
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
deleted file mode 100644
index 6cecf302dcfd..000000000000
--- a/fs/cifs/trace.h
+++ /dev/null
@@ -1,1016 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2018, Microsoft Corporation.
- *
- * Author(s): Steve French <stfrench@microsoft.com>
- */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM cifs
-
-#if !defined(_CIFS_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _CIFS_TRACE_H
-
-#include <linux/tracepoint.h>
-#include <linux/net.h>
-#include <linux/inet.h>
-
-/*
- * Please use this 3-part article as a reference for writing new tracepoints:
- * https://lwn.net/Articles/379903/
- */
-
-/* For logging errors in read or write */
-DECLARE_EVENT_CLASS(smb3_rw_err_class,
- TP_PROTO(unsigned int xid,
- __u64 fid,
- __u32 tid,
- __u64 sesid,
- __u64 offset,
- __u32 len,
- int rc),
- TP_ARGS(xid, fid, tid, sesid, offset, len, rc),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u64, fid)
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(__u64, offset)
- __field(__u32, len)
- __field(int, rc)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->fid = fid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->offset = offset;
- __entry->len = len;
- __entry->rc = rc;
- ),
- TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
- __entry->xid, __entry->sesid, __entry->tid, __entry->fid,
- __entry->offset, __entry->len, __entry->rc)
-)
-
-#define DEFINE_SMB3_RW_ERR_EVENT(name) \
-DEFINE_EVENT(smb3_rw_err_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u64 fid, \
- __u32 tid, \
- __u64 sesid, \
- __u64 offset, \
- __u32 len, \
- int rc), \
- TP_ARGS(xid, fid, tid, sesid, offset, len, rc))
-
-DEFINE_SMB3_RW_ERR_EVENT(write_err);
-DEFINE_SMB3_RW_ERR_EVENT(read_err);
-DEFINE_SMB3_RW_ERR_EVENT(query_dir_err);
-DEFINE_SMB3_RW_ERR_EVENT(zero_err);
-DEFINE_SMB3_RW_ERR_EVENT(falloc_err);
-
-
-/* For logging successful read or write */
-DECLARE_EVENT_CLASS(smb3_rw_done_class,
- TP_PROTO(unsigned int xid,
- __u64 fid,
- __u32 tid,
- __u64 sesid,
- __u64 offset,
- __u32 len),
- TP_ARGS(xid, fid, tid, sesid, offset, len),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u64, fid)
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(__u64, offset)
- __field(__u32, len)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->fid = fid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->offset = offset;
- __entry->len = len;
- ),
- TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x",
- __entry->xid, __entry->sesid, __entry->tid, __entry->fid,
- __entry->offset, __entry->len)
-)
-
-#define DEFINE_SMB3_RW_DONE_EVENT(name) \
-DEFINE_EVENT(smb3_rw_done_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u64 fid, \
- __u32 tid, \
- __u64 sesid, \
- __u64 offset, \
- __u32 len), \
- TP_ARGS(xid, fid, tid, sesid, offset, len))
-
-DEFINE_SMB3_RW_DONE_EVENT(write_enter);
-DEFINE_SMB3_RW_DONE_EVENT(read_enter);
-DEFINE_SMB3_RW_DONE_EVENT(query_dir_enter);
-DEFINE_SMB3_RW_DONE_EVENT(zero_enter);
-DEFINE_SMB3_RW_DONE_EVENT(falloc_enter);
-DEFINE_SMB3_RW_DONE_EVENT(write_done);
-DEFINE_SMB3_RW_DONE_EVENT(read_done);
-DEFINE_SMB3_RW_DONE_EVENT(query_dir_done);
-DEFINE_SMB3_RW_DONE_EVENT(zero_done);
-DEFINE_SMB3_RW_DONE_EVENT(falloc_done);
-
-/*
- * For handle based calls other than read and write, and get/set info
- */
-DECLARE_EVENT_CLASS(smb3_fd_class,
- TP_PROTO(unsigned int xid,
- __u64 fid,
- __u32 tid,
- __u64 sesid),
- TP_ARGS(xid, fid, tid, sesid),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u64, fid)
- __field(__u32, tid)
- __field(__u64, sesid)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->fid = fid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- ),
- TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx",
- __entry->xid, __entry->sesid, __entry->tid, __entry->fid)
-)
-
-#define DEFINE_SMB3_FD_EVENT(name) \
-DEFINE_EVENT(smb3_fd_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u64 fid, \
- __u32 tid, \
- __u64 sesid), \
- TP_ARGS(xid, fid, tid, sesid))
-
-DEFINE_SMB3_FD_EVENT(flush_enter);
-DEFINE_SMB3_FD_EVENT(flush_done);
-DEFINE_SMB3_FD_EVENT(close_enter);
-DEFINE_SMB3_FD_EVENT(close_done);
-
-DECLARE_EVENT_CLASS(smb3_fd_err_class,
- TP_PROTO(unsigned int xid,
- __u64 fid,
- __u32 tid,
- __u64 sesid,
- int rc),
- TP_ARGS(xid, fid, tid, sesid, rc),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u64, fid)
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(int, rc)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->fid = fid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->rc = rc;
- ),
- TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d",
- __entry->xid, __entry->sesid, __entry->tid, __entry->fid,
- __entry->rc)
-)
-
-#define DEFINE_SMB3_FD_ERR_EVENT(name) \
-DEFINE_EVENT(smb3_fd_err_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u64 fid, \
- __u32 tid, \
- __u64 sesid, \
- int rc), \
- TP_ARGS(xid, fid, tid, sesid, rc))
-
-DEFINE_SMB3_FD_ERR_EVENT(flush_err);
-DEFINE_SMB3_FD_ERR_EVENT(lock_err);
-DEFINE_SMB3_FD_ERR_EVENT(close_err);
-
-/*
- * For handle based query/set info calls
- */
-DECLARE_EVENT_CLASS(smb3_inf_enter_class,
- TP_PROTO(unsigned int xid,
- __u64 fid,
- __u32 tid,
- __u64 sesid,
- __u8 infclass,
- __u32 type),
- TP_ARGS(xid, fid, tid, sesid, infclass, type),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u64, fid)
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(__u8, infclass)
- __field(__u32, type)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->fid = fid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->infclass = infclass;
- __entry->type = type;
- ),
- TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx class=%u type=0x%x",
- __entry->xid, __entry->sesid, __entry->tid, __entry->fid,
- __entry->infclass, __entry->type)
-)
-
-#define DEFINE_SMB3_INF_ENTER_EVENT(name) \
-DEFINE_EVENT(smb3_inf_enter_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u64 fid, \
- __u32 tid, \
- __u64 sesid, \
- __u8 infclass, \
- __u32 type), \
- TP_ARGS(xid, fid, tid, sesid, infclass, type))
-
-DEFINE_SMB3_INF_ENTER_EVENT(query_info_enter);
-DEFINE_SMB3_INF_ENTER_EVENT(query_info_done);
-DEFINE_SMB3_INF_ENTER_EVENT(notify_enter);
-DEFINE_SMB3_INF_ENTER_EVENT(notify_done);
-
-DECLARE_EVENT_CLASS(smb3_inf_err_class,
- TP_PROTO(unsigned int xid,
- __u64 fid,
- __u32 tid,
- __u64 sesid,
- __u8 infclass,
- __u32 type,
- int rc),
- TP_ARGS(xid, fid, tid, sesid, infclass, type, rc),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u64, fid)
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(__u8, infclass)
- __field(__u32, type)
- __field(int, rc)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->fid = fid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->infclass = infclass;
- __entry->type = type;
- __entry->rc = rc;
- ),
- TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx class=%u type=0x%x rc=%d",
- __entry->xid, __entry->sesid, __entry->tid, __entry->fid,
- __entry->infclass, __entry->type, __entry->rc)
-)
-
-#define DEFINE_SMB3_INF_ERR_EVENT(name) \
-DEFINE_EVENT(smb3_inf_err_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u64 fid, \
- __u32 tid, \
- __u64 sesid, \
- __u8 infclass, \
- __u32 type, \
- int rc), \
- TP_ARGS(xid, fid, tid, sesid, infclass, type, rc))
-
-DEFINE_SMB3_INF_ERR_EVENT(query_info_err);
-DEFINE_SMB3_INF_ERR_EVENT(set_info_err);
-DEFINE_SMB3_INF_ERR_EVENT(notify_err);
-DEFINE_SMB3_INF_ERR_EVENT(fsctl_err);
-
-DECLARE_EVENT_CLASS(smb3_inf_compound_enter_class,
- TP_PROTO(unsigned int xid,
- __u32 tid,
- __u64 sesid,
- const char *full_path),
- TP_ARGS(xid, tid, sesid, full_path),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u32, tid)
- __field(__u64, sesid)
- __string(path, full_path)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __assign_str(path, full_path);
- ),
- TP_printk("xid=%u sid=0x%llx tid=0x%x path=%s",
- __entry->xid, __entry->sesid, __entry->tid,
- __get_str(path))
-)
-
-#define DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(name) \
-DEFINE_EVENT(smb3_inf_compound_enter_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u32 tid, \
- __u64 sesid, \
- const char *full_path), \
- TP_ARGS(xid, tid, sesid, full_path))
-
-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(query_info_compound_enter);
-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(posix_query_info_compound_enter);
-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(hardlink_enter);
-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rename_enter);
-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rmdir_enter);
-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_eof_enter);
-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_info_compound_enter);
-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(delete_enter);
-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mkdir_enter);
-
-
-DECLARE_EVENT_CLASS(smb3_inf_compound_done_class,
- TP_PROTO(unsigned int xid,
- __u32 tid,
- __u64 sesid),
- TP_ARGS(xid, tid, sesid),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u32, tid)
- __field(__u64, sesid)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- ),
- TP_printk("xid=%u sid=0x%llx tid=0x%x",
- __entry->xid, __entry->sesid, __entry->tid)
-)
-
-#define DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(name) \
-DEFINE_EVENT(smb3_inf_compound_done_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u32 tid, \
- __u64 sesid), \
- TP_ARGS(xid, tid, sesid))
-
-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(query_info_compound_done);
-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(posix_query_info_compound_done);
-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(hardlink_done);
-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rename_done);
-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rmdir_done);
-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_eof_done);
-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_info_compound_done);
-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(delete_done);
-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mkdir_done);
-
-
-DECLARE_EVENT_CLASS(smb3_inf_compound_err_class,
- TP_PROTO(unsigned int xid,
- __u32 tid,
- __u64 sesid,
- int rc),
- TP_ARGS(xid, tid, sesid, rc),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(int, rc)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->rc = rc;
- ),
- TP_printk("xid=%u sid=0x%llx tid=0x%x rc=%d",
- __entry->xid, __entry->sesid, __entry->tid,
- __entry->rc)
-)
-
-#define DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(name) \
-DEFINE_EVENT(smb3_inf_compound_err_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u32 tid, \
- __u64 sesid, \
- int rc), \
- TP_ARGS(xid, tid, sesid, rc))
-
-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(query_info_compound_err);
-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(posix_query_info_compound_err);
-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(hardlink_err);
-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rename_err);
-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rmdir_err);
-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_eof_err);
-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_info_compound_err);
-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mkdir_err);
-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(delete_err);
-
-/*
- * For logging SMB3 Status code and Command for responses which return errors
- */
-DECLARE_EVENT_CLASS(smb3_cmd_err_class,
- TP_PROTO(__u32 tid,
- __u64 sesid,
- __u16 cmd,
- __u64 mid,
- __u32 status,
- int rc),
- TP_ARGS(tid, sesid, cmd, mid, status, rc),
- TP_STRUCT__entry(
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(__u16, cmd)
- __field(__u64, mid)
- __field(__u32, status)
- __field(int, rc)
- ),
- TP_fast_assign(
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->cmd = cmd;
- __entry->mid = mid;
- __entry->status = status;
- __entry->rc = rc;
- ),
- TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d",
- __entry->sesid, __entry->tid, __entry->cmd, __entry->mid,
- __entry->status, __entry->rc)
-)
-
-#define DEFINE_SMB3_CMD_ERR_EVENT(name) \
-DEFINE_EVENT(smb3_cmd_err_class, smb3_##name, \
- TP_PROTO(__u32 tid, \
- __u64 sesid, \
- __u16 cmd, \
- __u64 mid, \
- __u32 status, \
- int rc), \
- TP_ARGS(tid, sesid, cmd, mid, status, rc))
-
-DEFINE_SMB3_CMD_ERR_EVENT(cmd_err);
-
-DECLARE_EVENT_CLASS(smb3_cmd_done_class,
- TP_PROTO(__u32 tid,
- __u64 sesid,
- __u16 cmd,
- __u64 mid),
- TP_ARGS(tid, sesid, cmd, mid),
- TP_STRUCT__entry(
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(__u16, cmd)
- __field(__u64, mid)
- ),
- TP_fast_assign(
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->cmd = cmd;
- __entry->mid = mid;
- ),
- TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu",
- __entry->sesid, __entry->tid,
- __entry->cmd, __entry->mid)
-)
-
-#define DEFINE_SMB3_CMD_DONE_EVENT(name) \
-DEFINE_EVENT(smb3_cmd_done_class, smb3_##name, \
- TP_PROTO(__u32 tid, \
- __u64 sesid, \
- __u16 cmd, \
- __u64 mid), \
- TP_ARGS(tid, sesid, cmd, mid))
-
-DEFINE_SMB3_CMD_DONE_EVENT(cmd_enter);
-DEFINE_SMB3_CMD_DONE_EVENT(cmd_done);
-DEFINE_SMB3_CMD_DONE_EVENT(ses_expired);
-
-DECLARE_EVENT_CLASS(smb3_mid_class,
- TP_PROTO(__u16 cmd,
- __u64 mid,
- __u32 pid,
- unsigned long when_sent,
- unsigned long when_received),
- TP_ARGS(cmd, mid, pid, when_sent, when_received),
- TP_STRUCT__entry(
- __field(__u16, cmd)
- __field(__u64, mid)
- __field(__u32, pid)
- __field(unsigned long, when_sent)
- __field(unsigned long, when_received)
- ),
- TP_fast_assign(
- __entry->cmd = cmd;
- __entry->mid = mid;
- __entry->pid = pid;
- __entry->when_sent = when_sent;
- __entry->when_received = when_received;
- ),
- TP_printk("\tcmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu",
- __entry->cmd, __entry->mid, __entry->pid, __entry->when_sent,
- __entry->when_received)
-)
-
-#define DEFINE_SMB3_MID_EVENT(name) \
-DEFINE_EVENT(smb3_mid_class, smb3_##name, \
- TP_PROTO(__u16 cmd, \
- __u64 mid, \
- __u32 pid, \
- unsigned long when_sent, \
- unsigned long when_received), \
- TP_ARGS(cmd, mid, pid, when_sent, when_received))
-
-DEFINE_SMB3_MID_EVENT(slow_rsp);
-
-DECLARE_EVENT_CLASS(smb3_exit_err_class,
- TP_PROTO(unsigned int xid,
- const char *func_name,
- int rc),
- TP_ARGS(xid, func_name, rc),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __string(func_name, func_name)
- __field(int, rc)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __assign_str(func_name, func_name);
- __entry->rc = rc;
- ),
- TP_printk("\t%s: xid=%u rc=%d",
- __get_str(func_name), __entry->xid, __entry->rc)
-)
-
-#define DEFINE_SMB3_EXIT_ERR_EVENT(name) \
-DEFINE_EVENT(smb3_exit_err_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- const char *func_name, \
- int rc), \
- TP_ARGS(xid, func_name, rc))
-
-DEFINE_SMB3_EXIT_ERR_EVENT(exit_err);
-
-
-DECLARE_EVENT_CLASS(smb3_sync_err_class,
- TP_PROTO(unsigned long ino,
- int rc),
- TP_ARGS(ino, rc),
- TP_STRUCT__entry(
- __field(unsigned long, ino)
- __field(int, rc)
- ),
- TP_fast_assign(
- __entry->ino = ino;
- __entry->rc = rc;
- ),
- TP_printk("\tino=%lu rc=%d",
- __entry->ino, __entry->rc)
-)
-
-#define DEFINE_SMB3_SYNC_ERR_EVENT(name) \
-DEFINE_EVENT(smb3_sync_err_class, cifs_##name, \
- TP_PROTO(unsigned long ino, \
- int rc), \
- TP_ARGS(ino, rc))
-
-DEFINE_SMB3_SYNC_ERR_EVENT(fsync_err);
-DEFINE_SMB3_SYNC_ERR_EVENT(flush_err);
-
-
-DECLARE_EVENT_CLASS(smb3_enter_exit_class,
- TP_PROTO(unsigned int xid,
- const char *func_name),
- TP_ARGS(xid, func_name),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __string(func_name, func_name)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __assign_str(func_name, func_name);
- ),
- TP_printk("\t%s: xid=%u",
- __get_str(func_name), __entry->xid)
-)
-
-#define DEFINE_SMB3_ENTER_EXIT_EVENT(name) \
-DEFINE_EVENT(smb3_enter_exit_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- const char *func_name), \
- TP_ARGS(xid, func_name))
-
-DEFINE_SMB3_ENTER_EXIT_EVENT(enter);
-DEFINE_SMB3_ENTER_EXIT_EVENT(exit_done);
-
-/*
- * For SMB2/SMB3 tree connect
- */
-
-DECLARE_EVENT_CLASS(smb3_tcon_class,
- TP_PROTO(unsigned int xid,
- __u32 tid,
- __u64 sesid,
- const char *unc_name,
- int rc),
- TP_ARGS(xid, tid, sesid, unc_name, rc),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u32, tid)
- __field(__u64, sesid)
- __string(name, unc_name)
- __field(int, rc)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __assign_str(name, unc_name);
- __entry->rc = rc;
- ),
- TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d",
- __entry->xid, __entry->sesid, __entry->tid,
- __get_str(name), __entry->rc)
-)
-
-#define DEFINE_SMB3_TCON_EVENT(name) \
-DEFINE_EVENT(smb3_tcon_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u32 tid, \
- __u64 sesid, \
- const char *unc_name, \
- int rc), \
- TP_ARGS(xid, tid, sesid, unc_name, rc))
-
-DEFINE_SMB3_TCON_EVENT(tcon);
-
-
-/*
- * For smb2/smb3 open (including create and mkdir) calls
- */
-
-DECLARE_EVENT_CLASS(smb3_open_enter_class,
- TP_PROTO(unsigned int xid,
- __u32 tid,
- __u64 sesid,
- int create_options,
- int desired_access),
- TP_ARGS(xid, tid, sesid, create_options, desired_access),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(int, create_options)
- __field(int, desired_access)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->create_options = create_options;
- __entry->desired_access = desired_access;
- ),
- TP_printk("xid=%u sid=0x%llx tid=0x%x cr_opts=0x%x des_access=0x%x",
- __entry->xid, __entry->sesid, __entry->tid,
- __entry->create_options, __entry->desired_access)
-)
-
-#define DEFINE_SMB3_OPEN_ENTER_EVENT(name) \
-DEFINE_EVENT(smb3_open_enter_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u32 tid, \
- __u64 sesid, \
- int create_options, \
- int desired_access), \
- TP_ARGS(xid, tid, sesid, create_options, desired_access))
-
-DEFINE_SMB3_OPEN_ENTER_EVENT(open_enter);
-DEFINE_SMB3_OPEN_ENTER_EVENT(posix_mkdir_enter);
-
-DECLARE_EVENT_CLASS(smb3_open_err_class,
- TP_PROTO(unsigned int xid,
- __u32 tid,
- __u64 sesid,
- int create_options,
- int desired_access,
- int rc),
- TP_ARGS(xid, tid, sesid, create_options, desired_access, rc),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(int, create_options)
- __field(int, desired_access)
- __field(int, rc)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->create_options = create_options;
- __entry->desired_access = desired_access;
- __entry->rc = rc;
- ),
- TP_printk("xid=%u sid=0x%llx tid=0x%x cr_opts=0x%x des_access=0x%x rc=%d",
- __entry->xid, __entry->sesid, __entry->tid,
- __entry->create_options, __entry->desired_access, __entry->rc)
-)
-
-#define DEFINE_SMB3_OPEN_ERR_EVENT(name) \
-DEFINE_EVENT(smb3_open_err_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u32 tid, \
- __u64 sesid, \
- int create_options, \
- int desired_access, \
- int rc), \
- TP_ARGS(xid, tid, sesid, create_options, desired_access, rc))
-
-DEFINE_SMB3_OPEN_ERR_EVENT(open_err);
-DEFINE_SMB3_OPEN_ERR_EVENT(posix_mkdir_err);
-
-DECLARE_EVENT_CLASS(smb3_open_done_class,
- TP_PROTO(unsigned int xid,
- __u64 fid,
- __u32 tid,
- __u64 sesid,
- int create_options,
- int desired_access),
- TP_ARGS(xid, fid, tid, sesid, create_options, desired_access),
- TP_STRUCT__entry(
- __field(unsigned int, xid)
- __field(__u64, fid)
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(int, create_options)
- __field(int, desired_access)
- ),
- TP_fast_assign(
- __entry->xid = xid;
- __entry->fid = fid;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->create_options = create_options;
- __entry->desired_access = desired_access;
- ),
- TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx cr_opts=0x%x des_access=0x%x",
- __entry->xid, __entry->sesid, __entry->tid, __entry->fid,
- __entry->create_options, __entry->desired_access)
-)
-
-#define DEFINE_SMB3_OPEN_DONE_EVENT(name) \
-DEFINE_EVENT(smb3_open_done_class, smb3_##name, \
- TP_PROTO(unsigned int xid, \
- __u64 fid, \
- __u32 tid, \
- __u64 sesid, \
- int create_options, \
- int desired_access), \
- TP_ARGS(xid, fid, tid, sesid, create_options, desired_access))
-
-DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
-DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
-
-
-DECLARE_EVENT_CLASS(smb3_lease_done_class,
- TP_PROTO(__u32 lease_state,
- __u32 tid,
- __u64 sesid,
- __u64 lease_key_low,
- __u64 lease_key_high),
- TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high),
- TP_STRUCT__entry(
- __field(__u32, lease_state)
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(__u64, lease_key_low)
- __field(__u64, lease_key_high)
- ),
- TP_fast_assign(
- __entry->lease_state = lease_state;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->lease_key_low = lease_key_low;
- __entry->lease_key_high = lease_key_high;
- ),
- TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x",
- __entry->sesid, __entry->tid, __entry->lease_key_high,
- __entry->lease_key_low, __entry->lease_state)
-)
-
-#define DEFINE_SMB3_LEASE_DONE_EVENT(name) \
-DEFINE_EVENT(smb3_lease_done_class, smb3_##name, \
- TP_PROTO(__u32 lease_state, \
- __u32 tid, \
- __u64 sesid, \
- __u64 lease_key_low, \
- __u64 lease_key_high), \
- TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high))
-
-DEFINE_SMB3_LEASE_DONE_EVENT(lease_done);
-
-DECLARE_EVENT_CLASS(smb3_lease_err_class,
- TP_PROTO(__u32 lease_state,
- __u32 tid,
- __u64 sesid,
- __u64 lease_key_low,
- __u64 lease_key_high,
- int rc),
- TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc),
- TP_STRUCT__entry(
- __field(__u32, lease_state)
- __field(__u32, tid)
- __field(__u64, sesid)
- __field(__u64, lease_key_low)
- __field(__u64, lease_key_high)
- __field(int, rc)
- ),
- TP_fast_assign(
- __entry->lease_state = lease_state;
- __entry->tid = tid;
- __entry->sesid = sesid;
- __entry->lease_key_low = lease_key_low;
- __entry->lease_key_high = lease_key_high;
- __entry->rc = rc;
- ),
- TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x rc=%d",
- __entry->sesid, __entry->tid, __entry->lease_key_high,
- __entry->lease_key_low, __entry->lease_state, __entry->rc)
-)
-
-#define DEFINE_SMB3_LEASE_ERR_EVENT(name) \
-DEFINE_EVENT(smb3_lease_err_class, smb3_##name, \
- TP_PROTO(__u32 lease_state, \
- __u32 tid, \
- __u64 sesid, \
- __u64 lease_key_low, \
- __u64 lease_key_high, \
- int rc), \
- TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc))
-
-DEFINE_SMB3_LEASE_ERR_EVENT(lease_err);
-
-DECLARE_EVENT_CLASS(smb3_connect_class,
- TP_PROTO(char *hostname,
- __u64 conn_id,
- const struct __kernel_sockaddr_storage *dst_addr),
- TP_ARGS(hostname, conn_id, dst_addr),
- TP_STRUCT__entry(
- __string(hostname, hostname)
- __field(__u64, conn_id)
- __array(__u8, dst_addr, sizeof(struct sockaddr_storage))
- ),
- TP_fast_assign(
- struct sockaddr_storage *pss = NULL;
-
- __entry->conn_id = conn_id;
- pss = (struct sockaddr_storage *)__entry->dst_addr;
- *pss = *dst_addr;
- __assign_str(hostname, hostname);
- ),
- TP_printk("conn_id=0x%llx server=%s addr=%pISpsfc",
- __entry->conn_id,
- __get_str(hostname),
- __entry->dst_addr)
-)
-
-#define DEFINE_SMB3_CONNECT_EVENT(name) \
-DEFINE_EVENT(smb3_connect_class, smb3_##name, \
- TP_PROTO(char *hostname, \
- __u64 conn_id, \
- const struct __kernel_sockaddr_storage *addr), \
- TP_ARGS(hostname, conn_id, addr))
-
-DEFINE_SMB3_CONNECT_EVENT(connect_done);
-
-DECLARE_EVENT_CLASS(smb3_connect_err_class,
- TP_PROTO(char *hostname, __u64 conn_id,
- const struct __kernel_sockaddr_storage *dst_addr, int rc),
- TP_ARGS(hostname, conn_id, dst_addr, rc),
- TP_STRUCT__entry(
- __string(hostname, hostname)
- __field(__u64, conn_id)
- __array(__u8, dst_addr, sizeof(struct sockaddr_storage))
- __field(int, rc)
- ),
- TP_fast_assign(
- struct sockaddr_storage *pss = NULL;
-
- __entry->conn_id = conn_id;
- __entry->rc = rc;
- pss = (struct sockaddr_storage *)__entry->dst_addr;
- *pss = *dst_addr;
- __assign_str(hostname, hostname);
- ),
- TP_printk("rc=%d conn_id=0x%llx server=%s addr=%pISpsfc",
- __entry->rc,
- __entry->conn_id,
- __get_str(hostname),
- __entry->dst_addr)
-)
-
-#define DEFINE_SMB3_CONNECT_ERR_EVENT(name) \
-DEFINE_EVENT(smb3_connect_err_class, smb3_##name, \
- TP_PROTO(char *hostname, \
- __u64 conn_id, \
- const struct __kernel_sockaddr_storage *addr, \
- int rc), \
- TP_ARGS(hostname, conn_id, addr, rc))
-
-DEFINE_SMB3_CONNECT_ERR_EVENT(connect_err);
-
-DECLARE_EVENT_CLASS(smb3_reconnect_class,
- TP_PROTO(__u64 currmid,
- __u64 conn_id,
- char *hostname),
- TP_ARGS(currmid, conn_id, hostname),
- TP_STRUCT__entry(
- __field(__u64, currmid)
- __field(__u64, conn_id)
- __string(hostname, hostname)
- ),
- TP_fast_assign(
- __entry->currmid = currmid;
- __entry->conn_id = conn_id;
- __assign_str(hostname, hostname);
- ),
- TP_printk("conn_id=0x%llx server=%s current_mid=%llu",
- __entry->conn_id,
- __get_str(hostname),
- __entry->currmid)
-)
-
-#define DEFINE_SMB3_RECONNECT_EVENT(name) \
-DEFINE_EVENT(smb3_reconnect_class, smb3_##name, \
- TP_PROTO(__u64 currmid, \
- __u64 conn_id, \
- char *hostname), \
- TP_ARGS(currmid, conn_id, hostname))
-
-DEFINE_SMB3_RECONNECT_EVENT(reconnect);
-DEFINE_SMB3_RECONNECT_EVENT(partial_send_reconnect);
-
-DECLARE_EVENT_CLASS(smb3_credit_class,
- TP_PROTO(__u64 currmid,
- __u64 conn_id,
- char *hostname,
- int credits,
- int credits_to_add,
- int in_flight),
- TP_ARGS(currmid, conn_id, hostname, credits, credits_to_add, in_flight),
- TP_STRUCT__entry(
- __field(__u64, currmid)
- __field(__u64, conn_id)
- __string(hostname, hostname)
- __field(int, credits)
- __field(int, credits_to_add)
- __field(int, in_flight)
- ),
- TP_fast_assign(
- __entry->currmid = currmid;
- __entry->conn_id = conn_id;
- __assign_str(hostname, hostname);
- __entry->credits = credits;
- __entry->credits_to_add = credits_to_add;
- __entry->in_flight = in_flight;
- ),
- TP_printk("conn_id=0x%llx server=%s current_mid=%llu "
- "credits=%d credit_change=%d in_flight=%d",
- __entry->conn_id,
- __get_str(hostname),
- __entry->currmid,
- __entry->credits,
- __entry->credits_to_add,
- __entry->in_flight)
-)
-
-#define DEFINE_SMB3_CREDIT_EVENT(name) \
-DEFINE_EVENT(smb3_credit_class, smb3_##name, \
- TP_PROTO(__u64 currmid, \
- __u64 conn_id, \
- char *hostname, \
- int credits, \
- int credits_to_add, \
- int in_flight), \
- TP_ARGS(currmid, conn_id, hostname, credits, credits_to_add, in_flight))
-
-DEFINE_SMB3_CREDIT_EVENT(reconnect_with_invalid_credits);
-DEFINE_SMB3_CREDIT_EVENT(reconnect_detected);
-DEFINE_SMB3_CREDIT_EVENT(credit_timeout);
-DEFINE_SMB3_CREDIT_EVENT(insufficient_credits);
-DEFINE_SMB3_CREDIT_EVENT(too_many_credits);
-DEFINE_SMB3_CREDIT_EVENT(add_credits);
-DEFINE_SMB3_CREDIT_EVENT(set_credits);
-
-#endif /* _CIFS_TRACE_H */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
-#include <trace/define_trace.h>
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index 3b8c4513118f..970f0022ec52 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -93,13 +93,13 @@ static void coda_flag_children(struct dentry *parent, int flag)
struct dentry *de;
spin_lock(&parent->d_lock);
- list_for_each_entry(de, &parent->d_subdirs, d_child) {
+ hlist_for_each_entry(de, &parent->d_children, d_sib) {
+ struct inode *inode = d_inode_rcu(de);
/* don't know what to do with negative dentries */
- if (d_inode(de) )
- coda_flag_inode(d_inode(de), flag);
+ if (inode)
+ coda_flag_inode(inode, flag);
}
spin_unlock(&parent->d_lock);
- return;
}
void coda_flag_inode_children(struct inode *inode, int flag)
diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c
index 62a3d2565c26..70bb0579b40c 100644
--- a/fs/coda/cnode.c
+++ b/fs/coda/cnode.c
@@ -70,7 +70,7 @@ retry:
if (!inode)
return ERR_PTR(-ENOMEM);
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
cii = ITOC(inode);
/* we still need to set i_ino for things like stat(2) */
inode->i_ino = hash;
@@ -148,7 +148,7 @@ struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb)
/* we should never see newly created inodes because we intentionally
* fail in the initialization callback */
- BUG_ON(inode->i_state & I_NEW);
+ BUG_ON(inode_state_read_once(inode) & I_NEW);
return inode;
}
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
index 903ca8fa4b9b..1d2dac95f86a 100644
--- a/fs/coda/coda_linux.c
+++ b/fs/coda/coda_linux.c
@@ -123,11 +123,14 @@ void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr)
if (attr->va_size != -1)
inode->i_blocks = (attr->va_size + 511) >> 9;
if (attr->va_atime.tv_sec != -1)
- inode->i_atime = coda_to_timespec64(attr->va_atime);
+ inode_set_atime_to_ts(inode,
+ coda_to_timespec64(attr->va_atime));
if (attr->va_mtime.tv_sec != -1)
- inode->i_mtime = coda_to_timespec64(attr->va_mtime);
+ inode_set_mtime_to_ts(inode,
+ coda_to_timespec64(attr->va_mtime));
if (attr->va_ctime.tv_sec != -1)
- inode->i_ctime = coda_to_timespec64(attr->va_ctime);
+ inode_set_ctime_to_ts(inode,
+ coda_to_timespec64(attr->va_ctime));
}
diff --git a/fs/coda/coda_linux.h b/fs/coda/coda_linux.h
index 9be281bbcc06..dd6277d87afb 100644
--- a/fs/coda/coda_linux.h
+++ b/fs/coda/coda_linux.h
@@ -46,12 +46,12 @@ extern const struct file_operations coda_ioctl_operations;
/* operations shared over more than one file */
int coda_open(struct inode *i, struct file *f);
int coda_release(struct inode *i, struct file *f);
-int coda_permission(struct user_namespace *mnt_userns, struct inode *inode,
+int coda_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask);
int coda_revalidate_inode(struct inode *);
-int coda_getattr(struct user_namespace *, const struct path *, struct kstat *,
+int coda_getattr(struct mnt_idmap *, const struct path *, struct kstat *,
u32, unsigned int);
-int coda_setattr(struct user_namespace *, struct dentry *, struct iattr *);
+int coda_setattr(struct mnt_idmap *, struct dentry *, struct iattr *);
/* this file: helpers */
char *coda_f2s(struct CodaFid *f);
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 328d7a684b63..ca9990017265 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -73,7 +73,7 @@ static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, unsig
}
-int coda_permission(struct user_namespace *mnt_userns, struct inode *inode,
+int coda_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask)
{
int error;
@@ -111,7 +111,7 @@ static inline void coda_dir_update_mtime(struct inode *dir)
/* optimistically we can also act as if our nose bleeds. The
* granularity of the mtime is coarse anyways so we might actually be
* right most of the time. Note: we only do this for directories. */
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
#endif
}
@@ -133,7 +133,7 @@ static inline void coda_dir_drop_nlink(struct inode *dir)
}
/* creation routines: create, mknod, mkdir, link, symlink */
-static int coda_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int coda_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *de, umode_t mode, bool excl)
{
int error;
@@ -166,8 +166,8 @@ err_out:
return error;
}
-static int coda_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *de, umode_t mode)
+static struct dentry *coda_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *de, umode_t mode)
{
struct inode *inode;
struct coda_vattr attrs;
@@ -177,14 +177,14 @@ static int coda_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
struct CodaFid newfid;
if (is_root_inode(dir) && coda_iscontrol(name, len))
- return -EPERM;
+ return ERR_PTR(-EPERM);
attrs.va_mode = mode;
- error = venus_mkdir(dir->i_sb, coda_i2f(dir),
+ error = venus_mkdir(dir->i_sb, coda_i2f(dir),
name, len, &newfid, &attrs);
if (error)
goto err_out;
-
+
inode = coda_iget(dir->i_sb, &newfid, &attrs);
if (IS_ERR(inode)) {
error = PTR_ERR(inode);
@@ -195,10 +195,10 @@ static int coda_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
coda_dir_inc_nlink(dir);
coda_dir_update_mtime(dir);
d_instantiate(de, inode);
- return 0;
+ return NULL;
err_out:
d_drop(de);
- return error;
+ return ERR_PTR(error);
}
/* try to make de an entry in dir_inodde linked to source_de */
@@ -228,7 +228,7 @@ static int coda_link(struct dentry *source_de, struct inode *dir_inode,
}
-static int coda_symlink(struct user_namespace *mnt_userns,
+static int coda_symlink(struct mnt_idmap *idmap,
struct inode *dir_inode, struct dentry *de,
const char *symname)
{
@@ -295,7 +295,7 @@ static int coda_rmdir(struct inode *dir, struct dentry *de)
}
/* rename */
-static int coda_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+static int coda_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
@@ -429,30 +429,16 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx)
cfi = coda_ftoc(coda_file);
host_file = cfi->cfi_container;
- if (host_file->f_op->iterate || host_file->f_op->iterate_shared) {
- struct inode *host_inode = file_inode(host_file);
- ret = -ENOENT;
- if (!IS_DEADDIR(host_inode)) {
- if (host_file->f_op->iterate_shared) {
- inode_lock_shared(host_inode);
- ret = host_file->f_op->iterate_shared(host_file, ctx);
- file_accessed(host_file);
- inode_unlock_shared(host_inode);
- } else {
- inode_lock(host_inode);
- ret = host_file->f_op->iterate(host_file, ctx);
- file_accessed(host_file);
- inode_unlock(host_inode);
- }
- }
+ ret = iterate_dir(host_file, ctx);
+ if (ret != -ENOTDIR)
return ret;
- }
/* Venus: we must read Venus dirents from a file */
return coda_venus_readdir(coda_file, ctx);
}
/* called when a cache lookup succeeds */
-static int coda_dentry_revalidate(struct dentry *de, unsigned int flags)
+static int coda_dentry_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *de, unsigned int flags)
{
struct inode *inode;
struct coda_inode_info *cii;
@@ -585,10 +571,11 @@ const struct inode_operations coda_dir_inode_operations = {
.setattr = coda_setattr,
};
+WRAP_DIR_ITER(coda_readdir) // FIXME!
const struct file_operations coda_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = coda_readdir,
+ .iterate_shared = shared_coda_readdir,
.open = coda_open,
.release = coda_release,
.fsync = coda_fsync,
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 29dd87be2fb8..a390b5d21196 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -14,6 +14,7 @@
#include <linux/time.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/stat.h>
#include <linux/cred.h>
#include <linux/errno.h>
@@ -22,6 +23,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
+#include <linux/splice.h>
#include <linux/coda.h>
#include "coda_psdev.h"
@@ -77,14 +79,12 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
if (ret)
goto finish_write;
- file_start_write(host_file);
inode_lock(coda_inode);
ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0);
coda_inode->i_size = file_inode(host_file)->i_size;
coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
- coda_inode->i_mtime = coda_inode->i_ctime = current_time(coda_inode);
+ inode_set_mtime_to_ts(coda_inode, inode_set_ctime_current(coda_inode));
inode_unlock(coda_inode);
- file_end_write(host_file);
finish_write:
venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
@@ -93,6 +93,32 @@ finish_write:
return ret;
}
+static ssize_t
+coda_file_splice_read(struct file *coda_file, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ struct inode *coda_inode = file_inode(coda_file);
+ struct coda_file_info *cfi = coda_ftoc(coda_file);
+ struct file *in = cfi->cfi_container;
+ loff_t ki_pos = *ppos;
+ ssize_t ret;
+
+ ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
+ &cfi->cfi_access_intent,
+ len, ki_pos, CODA_ACCESS_TYPE_READ);
+ if (ret)
+ goto finish_read;
+
+ ret = vfs_splice_read(in, ppos, pipe, len, flags);
+
+finish_read:
+ venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
+ &cfi->cfi_access_intent,
+ len, ki_pos, CODA_ACCESS_TYPE_READ_FINISH);
+ return ret;
+}
+
static void
coda_vm_open(struct vm_area_struct *vma)
{
@@ -134,7 +160,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
size_t count;
int ret;
- if (!host_file->f_op->mmap)
+ if (!can_mmap_file(host_file))
return -ENODEV;
if (WARN_ON(coda_file != vma->vm_file))
@@ -173,10 +199,10 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
spin_unlock(&cii->c_lock);
vma->vm_file = get_file(host_file);
- ret = call_mmap(vma->vm_file, vma);
+ ret = vfs_mmap(vma->vm_file, vma);
if (ret) {
- /* if call_mmap fails, our caller will put host_file so we
+ /* if vfs_mmap fails, our caller will put host_file so we
* should drop the reference to the coda_file that we got.
*/
fput(coda_file);
@@ -301,5 +327,5 @@ const struct file_operations coda_file_operations = {
.open = coda_open,
.release = coda_release,
.fsync = coda_fsync,
- .splice_read = generic_file_splice_read,
+ .splice_read = coda_file_splice_read,
};
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index d9f1bd7153df..08450d006016 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -24,6 +24,8 @@
#include <linux/pid_namespace.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/vmalloc.h>
#include <linux/coda.h>
@@ -43,7 +45,7 @@ static struct kmem_cache * coda_inode_cachep;
static struct inode *coda_alloc_inode(struct super_block *sb)
{
struct coda_inode_info *ei;
- ei = kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
+ ei = alloc_inode_sb(sb, coda_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
memset(&ei->c_fid, 0, sizeof(struct CodaFid));
@@ -70,8 +72,8 @@ int __init coda_init_inodecache(void)
{
coda_inode_cachep = kmem_cache_create("coda_inode_cache",
sizeof(struct coda_inode_info), 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
- SLAB_ACCOUNT, init_once);
+ SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
+ init_once);
if (coda_inode_cachep == NULL)
return -ENOMEM;
return 0;
@@ -87,10 +89,10 @@ void coda_destroy_inodecache(void)
kmem_cache_destroy(coda_inode_cachep);
}
-static int coda_remount(struct super_block *sb, int *flags, char *data)
+static int coda_reconfigure(struct fs_context *fc)
{
- sync_filesystem(sb);
- *flags |= SB_NOATIME;
+ sync_filesystem(fc->root->d_sb);
+ fc->sb_flags |= SB_NOATIME;
return 0;
}
@@ -102,78 +104,119 @@ static const struct super_operations coda_super_operations =
.evict_inode = coda_evict_inode,
.put_super = coda_put_super,
.statfs = coda_statfs,
- .remount_fs = coda_remount,
};
-static int get_device_index(struct coda_mount_data *data)
+struct coda_fs_context {
+ int idx;
+};
+
+enum {
+ Opt_fd,
+};
+
+static const struct fs_parameter_spec coda_param_specs[] = {
+ fsparam_fd ("fd", Opt_fd),
+ {}
+};
+
+static int coda_set_idx(struct fs_context *fc, struct file *file)
{
- struct fd f;
+ struct coda_fs_context *ctx = fc->fs_private;
struct inode *inode;
int idx;
- if (data == NULL) {
- pr_warn("%s: Bad mount data\n", __func__);
- return -1;
+ inode = file_inode(file);
+ if (!S_ISCHR(inode->i_mode) || imajor(inode) != CODA_PSDEV_MAJOR) {
+ return invalf(fc, "coda: Not coda psdev");
}
+ idx = iminor(inode);
+ if (idx < 0 || idx >= MAX_CODADEVS)
+ return invalf(fc, "coda: Bad minor number");
+ ctx->idx = idx;
+ return 0;
+}
- if (data->version != CODA_MOUNT_VERSION) {
- pr_warn("%s: Bad mount version\n", __func__);
- return -1;
+static int coda_parse_fd(struct fs_context *fc, struct fs_parameter *param,
+ struct fs_parse_result *result)
+{
+ struct file *file;
+ int err;
+
+ if (param->type == fs_value_is_file) {
+ file = param->file;
+ param->file = NULL;
+ } else {
+ file = fget(result->uint_32);
}
+ if (!file)
+ return -EBADF;
- f = fdget(data->fd);
- if (!f.file)
- goto Ebadf;
- inode = file_inode(f.file);
- if (!S_ISCHR(inode->i_mode) || imajor(inode) != CODA_PSDEV_MAJOR) {
- fdput(f);
- goto Ebadf;
- }
+ err = coda_set_idx(fc, file);
+ fput(file);
+ return err;
+}
- idx = iminor(inode);
- fdput(f);
+static int coda_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, coda_param_specs, param, &result);
+ if (opt < 0)
+ return opt;
- if (idx < 0 || idx >= MAX_CODADEVS) {
- pr_warn("%s: Bad minor number\n", __func__);
- return -1;
+ switch (opt) {
+ case Opt_fd:
+ return coda_parse_fd(fc, param, &result);
}
- return idx;
-Ebadf:
- pr_warn("%s: Bad file\n", __func__);
- return -1;
+ return 0;
}
-static int coda_fill_super(struct super_block *sb, void *data, int silent)
+/*
+ * Parse coda's binary mount data form. We ignore any errors and go with index
+ * 0 if we get one for backward compatibility.
+ */
+static int coda_parse_monolithic(struct fs_context *fc, void *_data)
{
+ struct file *file;
+ struct coda_mount_data *data = _data;
+
+ if (!data)
+ return invalf(fc, "coda: Bad mount data");
+
+ if (data->version != CODA_MOUNT_VERSION)
+ return invalf(fc, "coda: Bad mount version");
+
+ file = fget(data->fd);
+ if (file) {
+ coda_set_idx(fc, file);
+ fput(file);
+ }
+ return 0;
+}
+
+static int coda_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ struct coda_fs_context *ctx = fc->fs_private;
struct inode *root = NULL;
struct venus_comm *vc;
struct CodaFid fid;
int error;
- int idx;
-
- if (task_active_pid_ns(current) != &init_pid_ns)
- return -EINVAL;
-
- idx = get_device_index((struct coda_mount_data *) data);
- /* Ignore errors in data, for backward compatibility */
- if(idx == -1)
- idx = 0;
-
- pr_info("%s: device index: %i\n", __func__, idx);
+ infof(fc, "coda: device index: %i\n", ctx->idx);
- vc = &coda_comms[idx];
+ vc = &coda_comms[ctx->idx];
mutex_lock(&vc->vc_mutex);
if (!vc->vc_inuse) {
- pr_warn("%s: No pseudo device\n", __func__);
+ errorf(fc, "coda: No pseudo device");
error = -EINVAL;
goto unlock_out;
}
if (vc->vc_sb) {
- pr_warn("%s: Device already mounted\n", __func__);
+ errorf(fc, "coda: Device already mounted");
error = -EBUSY;
goto unlock_out;
}
@@ -187,7 +230,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
sb->s_blocksize_bits = 12;
sb->s_magic = CODA_SUPER_MAGIC;
sb->s_op = &coda_super_operations;
- sb->s_d_op = &coda_dentry_operations;
+ set_default_d_op(sb, &coda_dentry_operations);
sb->s_time_gran = 1;
sb->s_time_min = S64_MIN;
sb->s_time_max = S64_MAX;
@@ -251,16 +294,17 @@ static void coda_evict_inode(struct inode *inode)
coda_cache_clear_inode(inode);
}
-int coda_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int coda_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int flags)
{
int err = coda_revalidate_inode(d_inode(path->dentry));
if (!err)
- generic_fillattr(&init_user_ns, d_inode(path->dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask,
+ d_inode(path->dentry), stat);
return err;
}
-int coda_setattr(struct user_namespace *mnt_userns, struct dentry *de,
+int coda_setattr(struct mnt_idmap *idmap, struct dentry *de,
struct iattr *iattr)
{
struct inode *inode = d_inode(de);
@@ -269,7 +313,7 @@ int coda_setattr(struct user_namespace *mnt_userns, struct dentry *de,
memset(&vattr, 0, sizeof(vattr));
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
coda_iattr_to_vattr(iattr, &vattr);
vattr.va_type = C_VNON; /* cannot set type */
@@ -312,18 +356,45 @@ static int coda_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-/* init_coda: used by filesystems.c to register coda */
+static int coda_get_tree(struct fs_context *fc)
+{
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ return get_tree_nodev(fc, coda_fill_super);
+}
+
+static void coda_free_fc(struct fs_context *fc)
+{
+ kfree(fc->fs_private);
+}
+
+static const struct fs_context_operations coda_context_ops = {
+ .free = coda_free_fc,
+ .parse_param = coda_parse_param,
+ .parse_monolithic = coda_parse_monolithic,
+ .get_tree = coda_get_tree,
+ .reconfigure = coda_reconfigure,
+};
-static struct dentry *coda_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int coda_init_fs_context(struct fs_context *fc)
{
- return mount_nodev(fs_type, flags, data, coda_fill_super);
+ struct coda_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct coda_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ fc->fs_private = ctx;
+ fc->ops = &coda_context_ops;
+ return 0;
}
struct file_system_type coda_fs_type = {
.owner = THIS_MODULE,
.name = "coda",
- .mount = coda_mount,
+ .init_fs_context = coda_init_fs_context,
+ .parameters = coda_param_specs,
.kill_sb = kill_anon_super,
.fs_flags = FS_BINARY_MOUNTDATA,
};
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index cb9fd59a688c..36e35c15561a 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -24,7 +24,7 @@
#include "coda_linux.h"
/* pioctl ops */
-static int coda_ioctl_permission(struct user_namespace *mnt_userns,
+static int coda_ioctl_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask);
static long coda_pioctl(struct file *filp, unsigned int cmd,
unsigned long user_data);
@@ -41,7 +41,7 @@ const struct file_operations coda_ioctl_operations = {
};
/* the coda pioctl inode ops */
-static int coda_ioctl_permission(struct user_namespace *mnt_userns,
+static int coda_ioctl_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
return (mask & MAY_EXEC) ? -EACCES : 0;
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index b39580ad4ce5..3c3148588491 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -361,7 +361,7 @@ static int __init init_coda_psdev(void)
__func__, CODA_PSDEV_MAJOR);
return -EIO;
}
- coda_psdev_class = class_create(THIS_MODULE, "coda");
+ coda_psdev_class = class_create("coda");
if (IS_ERR(coda_psdev_class)) {
err = PTR_ERR(coda_psdev_class);
goto out_chrdev;
diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c
index 8907d0508198..40f84d014524 100644
--- a/fs/coda/symlink.c
+++ b/fs/coda/symlink.c
@@ -20,29 +20,21 @@
#include "coda_psdev.h"
#include "coda_linux.h"
-static int coda_symlink_filler(struct file *file, struct page *page)
+static int coda_symlink_filler(struct file *file, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
int error;
struct coda_inode_info *cii;
unsigned int len = PAGE_SIZE;
- char *p = page_address(page);
+ char *p = folio_address(folio);
cii = ITOC(inode);
error = venus_readlink(inode->i_sb, &cii->c_fid, p, &len);
- if (error)
- goto fail;
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
-
-fail:
- SetPageError(page);
- unlock_page(page);
+ folio_end_read(folio, error == 0);
return error;
}
const struct address_space_operations coda_symlink_aops = {
- .readpage = coda_symlink_filler,
+ .read_folio = coda_symlink_filler,
};
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
index fda3b702b1c5..0df46f09b6cc 100644
--- a/fs/coda/sysctl.c
+++ b/fs/coda/sysctl.c
@@ -14,7 +14,7 @@
static struct ctl_table_header *fs_table_header;
-static struct ctl_table coda_table[] = {
+static const struct ctl_table coda_table[] = {
{
.procname = "timeout",
.data = &coda_timeout,
@@ -36,22 +36,12 @@ static struct ctl_table coda_table[] = {
.mode = 0600,
.proc_handler = proc_dointvec
},
- {}
-};
-
-static struct ctl_table fs_table[] = {
- {
- .procname = "coda",
- .mode = 0555,
- .child = coda_table
- },
- {}
};
void coda_sysctl_init(void)
{
if ( !fs_table_header )
- fs_table_header = register_sysctl_table(fs_table);
+ fs_table_header = register_sysctl("coda", coda_table);
}
void coda_sysctl_clean(void)
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index 59f6cfd06f96..cd6a3721f6f6 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -791,7 +791,7 @@ static int coda_upcall(struct venus_comm *vcp,
sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL);
if (!sig_req) goto exit;
- sig_inputArgs = kvzalloc(sizeof(struct coda_in_hdr), GFP_KERNEL);
+ sig_inputArgs = kvzalloc(sizeof(*sig_inputArgs), GFP_KERNEL);
if (!sig_inputArgs) {
kfree(sig_req);
goto exit;
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index 95e72d271b95..d5ef5469e4e6 100644
--- a/fs/compat_binfmt_elf.c
+++ b/fs/compat_binfmt_elf.c
@@ -80,6 +80,16 @@
#define ELF_HWCAP2 COMPAT_ELF_HWCAP2
#endif
+#ifdef COMPAT_ELF_HWCAP3
+#undef ELF_HWCAP3
+#define ELF_HWCAP3 COMPAT_ELF_HWCAP3
+#endif
+
+#ifdef COMPAT_ELF_HWCAP4
+#undef ELF_HWCAP4
+#define ELF_HWCAP4 COMPAT_ELF_HWCAP4
+#endif
+
#ifdef COMPAT_ARCH_DLINFO
#undef ARCH_DLINFO
#define ARCH_DLINFO COMPAT_ARCH_DLINFO
@@ -135,6 +145,8 @@
#define elf_format compat_elf_format
#define init_elf_binfmt init_compat_elf_binfmt
#define exit_elf_binfmt exit_compat_elf_binfmt
+#define binfmt_elf_test_cases compat_binfmt_elf_test_cases
+#define binfmt_elf_test_suite compat_binfmt_elf_test_suite
/*
* We share all the actual code with the native (64-bit) version.
diff --git a/fs/configfs/Kconfig b/fs/configfs/Kconfig
index 272b64456999..1fcd761fe7be 100644
--- a/fs/configfs/Kconfig
+++ b/fs/configfs/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config CONFIGFS_FS
tristate "Userspace-driven configuration filesystem"
- select SYSFS
help
configfs is a RAM-based filesystem that provides the converse
of sysfs's functionality. Where sysfs is a filesystem-based
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index c0395363eab9..0b969d0eb8ff 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -55,6 +55,8 @@ struct configfs_dirent {
#define CONFIGFS_USET_IN_MKDIR 0x0200
#define CONFIGFS_USET_CREATING 0x0400
#define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR | CONFIGFS_ITEM_BIN_ATTR)
+#define CONFIGFS_PINNED \
+ (CONFIGFS_ROOT | CONFIGFS_DIR | CONFIGFS_ITEM_LINK)
extern struct mutex configfs_symlink_mutex;
extern spinlock_t configfs_dirent_lock;
@@ -73,11 +75,9 @@ extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *,
void *, umode_t, int, struct configfs_fragment *);
extern int configfs_dirent_is_ready(struct configfs_dirent *);
-extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
-
extern const unsigned char * configfs_get_name(struct configfs_dirent *sd);
extern void configfs_drop_dentry(struct configfs_dirent *sd, struct dentry *parent);
-extern int configfs_setattr(struct user_namespace *mnt_userns,
+extern int configfs_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *iattr);
extern struct dentry *configfs_pin_fs(void);
@@ -91,7 +91,7 @@ extern const struct inode_operations configfs_root_inode_operations;
extern const struct inode_operations configfs_symlink_inode_operations;
extern const struct dentry_operations configfs_dentry_ops;
-extern int configfs_symlink(struct user_namespace *mnt_userns,
+extern int configfs_symlink(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry,
const char *symname);
extern int configfs_unlink(struct inode *dir, struct dentry *dentry);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 1466b5d01cbb..ba95f636a5ab 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -34,6 +34,14 @@
*/
DEFINE_SPINLOCK(configfs_dirent_lock);
+/*
+ * All of link_obj/unlink_obj/link_group/unlink_group require that
+ * subsys->su_mutex is held.
+ * But parent configfs_subsystem is NULL when config_item is root.
+ * Use this mutex when config_item is root.
+ */
+static DEFINE_MUTEX(configfs_subsystem_mutex);
+
static void configfs_d_iput(struct dentry * dentry,
struct inode * inode)
{
@@ -59,7 +67,6 @@ static void configfs_d_iput(struct dentry * dentry,
const struct dentry_operations configfs_dentry_ops = {
.d_iput = configfs_d_iput,
- .d_delete = always_delete_dentry,
};
#ifdef CONFIG_LOCKDEP
@@ -199,7 +206,17 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren
return ERR_PTR(-ENOENT);
}
sd->s_frag = get_fragment(frag);
- list_add(&sd->s_sibling, &parent_sd->s_children);
+
+ /*
+ * configfs_lookup scans only for unpinned items. s_children is
+ * partitioned so that configfs_lookup can bail out early.
+ * CONFIGFS_PINNED and CONFIGFS_NOT_PINNED are not symmetrical. readdir
+ * cursors still need to be inserted at the front of the list.
+ */
+ if (sd->s_type & CONFIGFS_PINNED)
+ list_add_tail(&sd->s_sibling, &parent_sd->s_children);
+ else
+ list_add(&sd->s_sibling, &parent_sd->s_children);
spin_unlock(&configfs_dirent_lock);
return sd;
@@ -212,10 +229,11 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren
*
* called with parent inode's i_mutex held
*/
-static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
- const unsigned char *new)
+static int configfs_dirent_exists(struct dentry *dentry)
{
- struct configfs_dirent * sd;
+ struct configfs_dirent *parent_sd = dentry->d_parent->d_fsdata;
+ const unsigned char *new = dentry->d_name.name;
+ struct configfs_dirent *sd;
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
if (sd->s_element) {
@@ -281,10 +299,6 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
BUG_ON(!item);
- error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name);
- if (unlikely(error))
- return error;
-
error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
CONFIGFS_DIR | CONFIGFS_USET_CREATING,
frag);
@@ -308,6 +322,7 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
return 0;
out_remove:
+ configfs_put(dentry->d_fsdata);
configfs_remove_dirent(dentry);
return PTR_ERR(inode);
}
@@ -374,6 +389,7 @@ int configfs_create_link(struct configfs_dirent *target, struct dentry *parent,
return 0;
out_remove:
+ configfs_put(dentry->d_fsdata);
configfs_remove_dirent(dentry);
return PTR_ERR(inode);
}
@@ -384,8 +400,14 @@ static void remove_dir(struct dentry * d)
configfs_remove_dirent(d);
- if (d_really_is_positive(d))
- simple_rmdir(d_inode(parent),d);
+ if (d_really_is_positive(d)) {
+ if (likely(simple_empty(d))) {
+ __simple_rmdir(d_inode(parent),d);
+ dput(d);
+ } else {
+ pr_warn("remove_dir (%pd): attributes remain", d);
+ }
+ }
pr_debug(" o %pd removing done (%d)\n", d, d_count(d));
@@ -441,6 +463,18 @@ static struct dentry * configfs_lookup(struct inode *dir,
spin_lock(&configfs_dirent_lock);
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
+
+ /*
+ * s_children is partitioned, see configfs_new_dirent. The first
+ * pinned item indicates we can stop scanning.
+ */
+ if (sd->s_type & CONFIGFS_PINNED)
+ break;
+
+ /*
+ * Note: CONFIGFS_PINNED and CONFIGFS_NOT_PINNED are asymmetric.
+ * there may be a readdir cursor in this list
+ */
if ((sd->s_type & CONFIGFS_NOT_PINNED) &&
!strcmp(configfs_get_name(sd), dentry->d_name.name)) {
struct configfs_attribute *attr = sd->s_element;
@@ -570,6 +604,7 @@ static void detach_attrs(struct config_item * item)
static int populate_attrs(struct config_item *item)
{
const struct config_item_type *t = item->ci_type;
+ const struct configfs_group_operations *ops;
struct configfs_attribute *attr;
struct configfs_bin_attribute *bin_attr;
int error = 0;
@@ -577,14 +612,23 @@ static int populate_attrs(struct config_item *item)
if (!t)
return -EINVAL;
+
+ ops = t->ct_group_ops;
+
if (t->ct_attrs) {
for (i = 0; (attr = t->ct_attrs[i]) != NULL; i++) {
+ if (ops && ops->is_visible && !ops->is_visible(item, attr, i))
+ continue;
+
if ((error = configfs_create_file(item, attr)))
break;
}
}
- if (t->ct_bin_attrs) {
+ if (!error && t->ct_bin_attrs) {
for (i = 0; (bin_attr = t->ct_bin_attrs[i]) != NULL; i++) {
+ if (ops && ops->is_bin_visible && !ops->is_bin_visible(item, bin_attr, i))
+ continue;
+
error = configfs_create_bin_file(item, bin_attr);
if (error)
break;
@@ -931,7 +975,7 @@ static void configfs_dump_one(struct configfs_dirent *sd, int level)
{
pr_info("%*s\"%s\":\n", level, " ", configfs_get_name(sd));
-#define type_print(_type) if (sd->s_type & _type) pr_info("%*s %s\n", level, " ", #_type);
+#define type_print(_type) if (sd->s_type & _type) pr_info("%*s %s\n", level, " ", #_type)
type_print(CONFIGFS_ROOT);
type_print(CONFIGFS_DIR);
type_print(CONFIGFS_ITEM_ATTR);
@@ -1241,8 +1285,8 @@ out_root_unlock:
}
EXPORT_SYMBOL(configfs_depend_item_unlocked);
-static int configfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *configfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int ret = 0;
int module_got = 0;
@@ -1422,7 +1466,7 @@ out_put:
put_fragment(frag);
out:
- return ret;
+ return ERR_PTR(ret);
}
static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
@@ -1563,10 +1607,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
err = -ENOENT;
if (configfs_dirent_is_ready(parent_sd)) {
file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
- if (IS_ERR(file->private_data))
- err = PTR_ERR(file->private_data);
- else
- err = 0;
+ err = PTR_ERR_OR_ZERO(file->private_data);
}
inode_unlock(d_inode(dentry));
@@ -1589,12 +1630,6 @@ static int configfs_dir_close(struct inode *inode, struct file *file)
return 0;
}
-/* Relationship between s_mode and the DT_xxx types */
-static inline unsigned char dt_type(struct configfs_dirent *sd)
-{
- return (sd->s_mode >> 12) & 15;
-}
-
static int configfs_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
@@ -1644,7 +1679,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
name = configfs_get_name(next);
len = strlen(name);
- if (!dir_emit(ctx, name, len, ino, dt_type(next)))
+ if (!dir_emit(ctx, name, len, ino,
+ fs_umode_to_dtype(next->s_mode)))
return 0;
spin_lock(&configfs_dirent_lock);
@@ -1780,8 +1816,8 @@ void configfs_unregister_group(struct config_group *group)
configfs_detach_group(&group->cg_item);
d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
+ d_drop(dentry);
fsnotify_rmdir(d_inode(parent), dentry);
- d_delete(dentry);
inode_unlock(d_inode(parent));
dput(dentry);
@@ -1859,7 +1895,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
group->cg_item.ci_name = group->cg_item.ci_namebuf;
sd = root->d_fsdata;
+ mutex_lock(&configfs_subsystem_mutex);
link_group(to_config_group(sd->s_element), group);
+ mutex_unlock(&configfs_subsystem_mutex);
inode_lock_nested(d_inode(root), I_MUTEX_PARENT);
@@ -1868,8 +1906,11 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
if (dentry) {
d_add(dentry, NULL);
- err = configfs_attach_group(sd->s_element, &group->cg_item,
- dentry, frag);
+ err = configfs_dirent_exists(dentry);
+ if (!err)
+ err = configfs_attach_group(sd->s_element,
+ &group->cg_item,
+ dentry, frag);
if (err) {
BUG_ON(d_inode(dentry));
d_drop(dentry);
@@ -1884,7 +1925,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
inode_unlock(d_inode(root));
if (err) {
+ mutex_lock(&configfs_subsystem_mutex);
unlink_group(group);
+ mutex_unlock(&configfs_subsystem_mutex);
configfs_release_fs();
}
put_fragment(frag);
@@ -1922,16 +1965,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
configfs_detach_group(&group->cg_item);
d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
- fsnotify_rmdir(d_inode(root), dentry);
inode_unlock(d_inode(dentry));
- d_delete(dentry);
+ d_drop(dentry);
+ fsnotify_rmdir(d_inode(root), dentry);
inode_unlock(d_inode(root));
dput(dentry);
+ mutex_lock(&configfs_subsystem_mutex);
unlink_group(group);
+ mutex_unlock(&configfs_subsystem_mutex);
configfs_release_fs();
}
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 0ad32150611e..affe4742bbb5 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -30,7 +30,7 @@ struct configfs_buffer {
size_t count;
loff_t pos;
char * page;
- struct configfs_item_operations * ops;
+ const struct configfs_item_operations *ops;
struct mutex mutex;
int needs_read_fill;
bool read_in_progress;
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index b601610e9907..bcda3372e141 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -32,7 +32,7 @@ static const struct inode_operations configfs_inode_operations ={
.setattr = configfs_setattr,
};
-int configfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int configfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *iattr)
{
struct inode * inode = d_inode(dentry);
@@ -60,7 +60,7 @@ int configfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
}
/* attributes were changed atleast once in past */
- error = simple_setattr(mnt_userns, dentry, iattr);
+ error = simple_setattr(idmap, dentry, iattr);
if (error)
return error;
@@ -88,8 +88,7 @@ int configfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
static inline void set_default_inode_attr(struct inode * inode, umode_t mode)
{
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime =
- inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
}
static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
@@ -97,9 +96,9 @@ static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
inode->i_mode = iattr->ia_mode;
inode->i_uid = iattr->ia_uid;
inode->i_gid = iattr->ia_gid;
- inode->i_atime = iattr->ia_atime;
- inode->i_mtime = iattr->ia_mtime;
- inode->i_ctime = iattr->ia_ctime;
+ inode_set_atime_to_ts(inode, iattr->ia_atime);
+ inode_set_mtime_to_ts(inode, iattr->ia_mtime);
+ inode_set_ctime_to_ts(inode, iattr->ia_ctime);
}
struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent *sd,
@@ -172,7 +171,7 @@ struct inode *configfs_create(struct dentry *dentry, umode_t mode)
return ERR_PTR(-ENOMEM);
p_inode = d_inode(dentry->d_parent);
- p_inode->i_mtime = p_inode->i_ctime = current_time(p_inode);
+ inode_set_mtime_to_ts(p_inode, inode_set_ctime_current(p_inode));
configfs_set_inode_lock_class(sd, inode);
return inode;
}
@@ -212,33 +211,9 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
dget_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- simple_unlink(d_inode(parent), dentry);
+ __simple_unlink(d_inode(parent), dentry);
+ dput(dentry);
} else
spin_unlock(&dentry->d_lock);
}
}
-
-void configfs_hash_and_remove(struct dentry * dir, const char * name)
-{
- struct configfs_dirent * sd;
- struct configfs_dirent * parent_sd = dir->d_fsdata;
-
- if (d_really_is_negative(dir))
- /* no inode means this hasn't been made visible yet */
- return;
-
- inode_lock(d_inode(dir));
- list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
- if (!sd->s_element)
- continue;
- if (!strcmp(configfs_get_name(sd), name)) {
- spin_lock(&configfs_dirent_lock);
- list_del_init(&sd->s_sibling);
- spin_unlock(&configfs_dirent_lock);
- configfs_drop_dentry(sd, dir);
- configfs_put(sd);
- break;
- }
- }
- inode_unlock(d_inode(dir));
-}
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index 254170a82aa3..c378b5cbf87d 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -66,7 +66,7 @@ int config_item_set_name(struct config_item *item, const char *fmt, ...)
name = kvasprintf(GFP_KERNEL, fmt, args);
va_end(args);
if (!name)
- return -EFAULT;
+ return -ENOMEM;
}
/* Free the old name, if necessary. */
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index c2d820063ec4..4929f3431189 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -36,7 +36,7 @@ static void configfs_free_inode(struct inode *inode)
static const struct super_operations configfs_ops = {
.statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
.free_inode = configfs_free_inode,
};
@@ -92,7 +92,8 @@ static int configfs_fill_super(struct super_block *sb, struct fs_context *fc)
configfs_root_group.cg_item.ci_dentry = root;
root->d_fsdata = &configfs_root;
sb->s_root = root;
- sb->s_d_op = &configfs_dentry_ops; /* the rest get that */
+ set_default_d_op(sb, &configfs_dentry_ops); /* the rest get that */
+ sb->s_d_flags |= DCACHE_DONTCACHE;
return 0;
}
@@ -115,7 +116,7 @@ static struct file_system_type configfs_fs_type = {
.owner = THIS_MODULE,
.name = "configfs",
.init_fs_context = configfs_init_fs_context,
- .kill_sb = kill_litter_super,
+ .kill_sb = kill_anon_super,
};
MODULE_ALIAS_FS("configfs");
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index 0623c3edcfb9..f3f79c67add5 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -114,34 +114,28 @@ static int create_link(struct config_item *parent_item,
}
-static int get_target(const char *symname, struct path *path,
- struct config_item **target, struct super_block *sb)
+static int get_target(const char *symname, struct config_item **target,
+ struct super_block *sb)
{
+ struct path path __free(path_put) = {};
int ret;
- ret = kern_path(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, path);
- if (!ret) {
- if (path->dentry->d_sb == sb) {
- *target = configfs_get_config_item(path->dentry);
- if (!*target) {
- ret = -ENOENT;
- path_put(path);
- }
- } else {
- ret = -EPERM;
- path_put(path);
- }
- }
-
- return ret;
+ ret = kern_path(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &path);
+ if (ret)
+ return ret;
+ if (path.dentry->d_sb != sb)
+ return -EPERM;
+ *target = configfs_get_config_item(path.dentry);
+ if (!*target)
+ return -ENOENT;
+ return 0;
}
-int configfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+int configfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
int ret;
- struct path path;
struct configfs_dirent *sd;
struct config_item *parent_item;
struct config_item *target_item = NULL;
@@ -188,7 +182,7 @@ int configfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
* AV, a thoroughly annoyed bastard.
*/
inode_unlock(dir);
- ret = get_target(symname, &path, &target_item, dentry->d_sb);
+ ret = get_target(symname, &target_item, dentry->d_sb);
inode_lock(dir);
if (ret)
goto out_put;
@@ -196,7 +190,7 @@ int configfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
if (dentry->d_inode || d_unhashed(dentry))
ret = -EEXIST;
else
- ret = inode_permission(&init_user_ns, dir,
+ ret = inode_permission(&nop_mnt_idmap, dir,
MAY_WRITE | MAY_EXEC);
if (!ret)
ret = type->ct_item_ops->allow_link(parent_item, target_item);
@@ -210,7 +204,6 @@ int configfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
}
config_item_put(target_item);
- path_put(&path);
out_put:
config_item_put(parent_item);
diff --git a/fs/coredump.c b/fs/coredump.c
index a6b3c196cdef..8feb9c1cf83d 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -18,6 +18,7 @@
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/coredump.h>
+#include <linux/sort.h>
#include <linux/sched/coredump.h>
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
@@ -31,7 +32,6 @@
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/audit.h>
-#include <linux/tracehook.h>
#include <linux/kmod.h>
#include <linux/fsnotify.h>
#include <linux/fs_struct.h>
@@ -41,6 +41,17 @@
#include <linux/fs.h>
#include <linux/path.h>
#include <linux/timekeeping.h>
+#include <linux/sysctl.h>
+#include <linux/elf.h>
+#include <linux/pidfs.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <net/af_unix.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <uapi/linux/pidfd.h>
+#include <uapi/linux/un.h>
+#include <uapi/linux/coredump.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -52,21 +63,49 @@
#include <trace/events/sched.h>
-int core_uses_pid;
-unsigned int core_pipe_limit;
-char core_pattern[CORENAME_MAX_SIZE] = "core";
+static bool dump_vma_snapshot(struct coredump_params *cprm);
+static void free_vma_snapshot(struct coredump_params *cprm);
+
+#define CORE_FILE_NOTE_SIZE_DEFAULT (4*1024*1024)
+/* Define a reasonable max cap */
+#define CORE_FILE_NOTE_SIZE_MAX (16*1024*1024)
+/*
+ * File descriptor number for the pidfd for the thread-group leader of
+ * the coredumping task installed into the usermode helper's file
+ * descriptor table.
+ */
+#define COREDUMP_PIDFD_NUMBER 3
+
+static int core_uses_pid;
+static unsigned int core_pipe_limit;
+static unsigned int core_sort_vma;
+static char core_pattern[CORENAME_MAX_SIZE] = "core";
static int core_name_size = CORENAME_MAX_SIZE;
+unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT;
+static atomic_t core_pipe_count = ATOMIC_INIT(0);
+
+enum coredump_type_t {
+ COREDUMP_FILE = 1,
+ COREDUMP_PIPE = 2,
+ COREDUMP_SOCK = 3,
+ COREDUMP_SOCK_REQ = 4,
+};
struct core_name {
char *corename;
int used, size;
+ unsigned int core_pipe_limit;
+ bool core_dumped;
+ enum coredump_type_t core_type;
+ u64 mask;
};
-/* The maximal length of core_pattern is also specified in sysctl.c */
-
static int expand_corename(struct core_name *cn, int size)
{
- char *corename = krealloc(cn->corename, size, GFP_KERNEL);
+ char *corename;
+
+ size = kmalloc_size_roundup(size);
+ corename = krealloc(cn->corename, size, GFP_KERNEL);
if (!corename)
return -ENOMEM;
@@ -74,7 +113,7 @@ static int expand_corename(struct core_name *cn, int size)
if (size > core_name_size) /* racy but harmless */
core_name_size = size;
- cn->size = ksize(corename);
+ cn->size = size;
cn->corename = corename;
return 0;
}
@@ -189,35 +228,104 @@ put_exe_file:
return ret;
}
-/* format_corename will inspect the pattern parameter, and output a
- * name into corename, which must have space for at least
- * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
+/*
+ * coredump_parse will inspect the pattern parameter, and output a name
+ * into corename, which must have space for at least CORENAME_MAX_SIZE
+ * bytes plus one byte for the zero terminator.
*/
-static int format_corename(struct core_name *cn, struct coredump_params *cprm,
+static bool coredump_parse(struct core_name *cn, struct coredump_params *cprm,
size_t **argv, int *argc)
{
const struct cred *cred = current_cred();
const char *pat_ptr = core_pattern;
- int ispipe = (*pat_ptr == '|');
bool was_space = false;
int pid_in_pattern = 0;
int err = 0;
+ cn->mask = COREDUMP_KERNEL;
+ if (core_pipe_limit)
+ cn->mask |= COREDUMP_WAIT;
cn->used = 0;
cn->corename = NULL;
+ cn->core_pipe_limit = 0;
+ cn->core_dumped = false;
+ if (*pat_ptr == '|')
+ cn->core_type = COREDUMP_PIPE;
+ else if (*pat_ptr == '@')
+ cn->core_type = COREDUMP_SOCK;
+ else
+ cn->core_type = COREDUMP_FILE;
if (expand_corename(cn, core_name_size))
- return -ENOMEM;
+ return false;
cn->corename[0] = '\0';
- if (ispipe) {
+ switch (cn->core_type) {
+ case COREDUMP_PIPE: {
int argvs = sizeof(core_pattern) / 2;
(*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
if (!(*argv))
- return -ENOMEM;
+ return false;
(*argv)[(*argc)++] = 0;
++pat_ptr;
if (!(*pat_ptr))
- return -ENOMEM;
+ return false;
+ break;
+ }
+ case COREDUMP_SOCK: {
+ /* skip the @ */
+ pat_ptr++;
+ if (!(*pat_ptr))
+ return false;
+ if (*pat_ptr == '@') {
+ pat_ptr++;
+ if (!(*pat_ptr))
+ return false;
+
+ cn->core_type = COREDUMP_SOCK_REQ;
+ }
+
+ err = cn_printf(cn, "%s", pat_ptr);
+ if (err)
+ return false;
+
+ /* Require absolute paths. */
+ if (cn->corename[0] != '/')
+ return false;
+
+ /*
+ * Ensure we can uses spaces to indicate additional
+ * parameters in the future.
+ */
+ if (strchr(cn->corename, ' ')) {
+ coredump_report_failure("Coredump socket may not %s contain spaces", cn->corename);
+ return false;
+ }
+
+ /* Must not contain ".." in the path. */
+ if (name_contains_dotdot(cn->corename)) {
+ coredump_report_failure("Coredump socket may not %s contain '..' spaces", cn->corename);
+ return false;
+ }
+
+ if (strlen(cn->corename) >= UNIX_PATH_MAX) {
+ coredump_report_failure("Coredump socket path %s too long", cn->corename);
+ return false;
+ }
+
+ /*
+ * Currently no need to parse any other options.
+ * Relevant information can be retrieved from the peer
+ * pidfd retrievable via SO_PEERPIDFD by the receiver or
+ * via /proc/<pid>, using the SO_PEERPIDFD to guard
+ * against pid recycling when opening /proc/<pid>.
+ */
+ return true;
+ }
+ case COREDUMP_FILE:
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ return false;
}
/* Repeat as long as we have more pattern to process and more output
@@ -227,7 +335,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
* Split on spaces before doing template expansion so that
* %e and %E don't get split if they have spaces in them
*/
- if (ispipe) {
+ if (cn->core_type == COREDUMP_PIPE) {
if (isspace(*pat_ptr)) {
if (cn->used != 0)
was_space = true;
@@ -237,7 +345,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
was_space = false;
err = cn_printf(cn, "%c", '\0');
if (err)
- return err;
+ return false;
(*argv)[(*argc)++] = cn->used;
}
}
@@ -323,6 +431,31 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
err = cn_printf(cn, "%lu",
rlimit(RLIMIT_CORE));
break;
+ /* CPU the task ran on */
+ case 'C':
+ err = cn_printf(cn, "%d", cprm->cpu);
+ break;
+ /* pidfd number */
+ case 'F': {
+ /*
+ * Installing a pidfd only makes sense if
+ * we actually spawn a usermode helper.
+ */
+ if (cn->core_type != COREDUMP_PIPE)
+ break;
+
+ /*
+ * Note that we'll install a pidfd for the
+ * thread-group leader. We know that task
+ * linkage hasn't been removed yet and even if
+ * this @current isn't the actual thread-group
+ * leader we know that the thread-group leader
+ * cannot be reaped until @current has exited.
+ */
+ cprm->pid = task_tgid(current);
+ err = cn_printf(cn, "%d", COREDUMP_PIDFD_NUMBER);
+ break;
+ }
default:
break;
}
@@ -330,7 +463,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
}
if (err)
- return err;
+ return false;
}
out:
@@ -339,25 +472,22 @@ out:
* If core_pattern does not include a %p (as is the default)
* and core_uses_pid is set, then .%pid will be appended to
* the filename. Do not do this for piped commands. */
- if (!ispipe && !pid_in_pattern && core_uses_pid) {
- err = cn_printf(cn, ".%d", task_tgid_vnr(current));
- if (err)
- return err;
- }
- return ispipe;
+ if (cn->core_type == COREDUMP_FILE && !pid_in_pattern && core_uses_pid)
+ return cn_printf(cn, ".%d", task_tgid_vnr(current)) == 0;
+
+ return true;
}
-static int zap_process(struct task_struct *start, int exit_code, int flags)
+static int zap_process(struct signal_struct *signal, int exit_code)
{
struct task_struct *t;
int nr = 0;
- /* ignore all signals except SIGKILL, see prepare_signal() */
- start->signal->flags = SIGNAL_GROUP_COREDUMP | flags;
- start->signal->group_exit_code = exit_code;
- start->signal->group_stop_count = 0;
+ signal->flags = SIGNAL_GROUP_EXIT;
+ signal->group_exit_code = exit_code;
+ signal->group_stop_count = 0;
- for_each_thread(start, t) {
+ __for_each_thread(signal, t) {
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
if (t != current && !(t->flags & PF_POSTCOREDUMP)) {
sigaddset(&t->pending.signal, SIGKILL);
@@ -372,13 +502,14 @@ static int zap_process(struct task_struct *start, int exit_code, int flags)
static int zap_threads(struct task_struct *tsk,
struct core_state *core_state, int exit_code)
{
+ struct signal_struct *signal = tsk->signal;
int nr = -EAGAIN;
spin_lock_irq(&tsk->sighand->siglock);
- if (!signal_group_exit(tsk->signal)) {
- tsk->signal->core_state = core_state;
- tsk->signal->group_exit_task = tsk;
- nr = zap_process(tsk, exit_code, 0);
+ if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) {
+ /* Allow SIGKILL, see prepare_signal() */
+ signal->core_state = core_state;
+ nr = zap_process(signal, exit_code);
clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
tsk->flags |= PF_DUMPCORE;
atomic_set(&core_state->nr_threads, nr);
@@ -400,9 +531,8 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
if (core_waiters > 0) {
struct core_thread *ptr;
- freezer_do_not_count();
- wait_for_completion(&core_state->startup);
- freezer_count();
+ wait_for_completion_state(&core_state->startup,
+ TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
/*
* Wait for all the threads to become inactive, so that
* all the thread context (extended register state, like
@@ -410,7 +540,7 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
*/
ptr = core_state->dumper.next;
while (ptr != NULL) {
- wait_task_inactive(ptr->task, 0);
+ wait_task_inactive(ptr->task, TASK_ANY);
ptr = ptr->next;
}
}
@@ -426,8 +556,6 @@ static void coredump_finish(bool core_dumped)
spin_lock_irq(&current->sighand->siglock);
if (core_dumped && !__fatal_signal_pending(current))
current->signal->group_exit_code |= 0x80;
- current->signal->group_exit_task = NULL;
- current->signal->flags = SIGNAL_GROUP_EXIT;
next = current->signal->core_state->dumper.next;
current->signal->core_state = NULL;
spin_unlock_irq(&current->sighand->siglock);
@@ -480,7 +608,7 @@ static void wait_for_dump_helpers(struct file *file)
}
/*
- * umh_pipe_setup
+ * umh_coredump_setup
* helper function to customize the process used
* to collect the core in userspace. Specifically
* it sets up a pipe and installs it as fd 0 (stdin)
@@ -490,11 +618,34 @@ static void wait_for_dump_helpers(struct file *file)
* is a special value that we use to trap recursive
* core dumps
*/
-static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
+static int umh_coredump_setup(struct subprocess_info *info, struct cred *new)
{
struct file *files[2];
struct coredump_params *cp = (struct coredump_params *)info->data;
- int err = create_pipe_files(files, 0);
+ int err;
+
+ if (cp->pid) {
+ struct file *pidfs_file __free(fput) = NULL;
+
+ pidfs_file = pidfs_alloc_file(cp->pid, 0);
+ if (IS_ERR(pidfs_file))
+ return PTR_ERR(pidfs_file);
+
+ pidfs_coredump(cp);
+
+ /*
+ * Usermode helpers are childen of either
+ * system_dfl_wq or of kthreadd. So we know that
+ * we're starting off with a clean file descriptor
+ * table. So we should always be able to use
+ * COREDUMP_PIDFD_NUMBER as our file descriptor value.
+ */
+ err = replace_fd(COREDUMP_PIDFD_NUMBER, pidfs_file, 0);
+ if (err < 0)
+ return err;
+ }
+
+ err = create_pipe_files(files, 0);
if (err)
return err;
@@ -502,282 +653,552 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
err = replace_fd(0, files[0], 0);
fput(files[0]);
+ if (err < 0)
+ return err;
+
/* and disallow core files too */
current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
- return err;
+ return 0;
}
-void do_coredump(const kernel_siginfo_t *siginfo)
+#ifdef CONFIG_UNIX
+static bool coredump_sock_connect(struct core_name *cn, struct coredump_params *cprm)
{
- struct core_state core_state;
- struct core_name cn;
- struct mm_struct *mm = current->mm;
- struct linux_binfmt * binfmt;
- const struct cred *old_cred;
- struct cred *cred;
- int retval = 0;
- int ispipe;
- size_t *argv = NULL;
- int argc = 0;
- /* require nonrelative corefile path and be extra careful */
- bool need_suid_safe = false;
- bool core_dumped = false;
- static atomic_t core_dump_count = ATOMIC_INIT(0);
- struct coredump_params cprm = {
- .siginfo = siginfo,
- .regs = signal_pt_regs(),
- .limit = rlimit(RLIMIT_CORE),
- /*
- * We must use the same mm->flags while dumping core to avoid
- * inconsistency of bit flags, since this flag is not protected
- * by any locks.
- */
- .mm_flags = mm->flags,
+ struct file *file __free(fput) = NULL;
+ struct sockaddr_un addr = {
+ .sun_family = AF_UNIX,
};
+ ssize_t addr_len;
+ int retval;
+ struct socket *socket;
- audit_core_dumps(siginfo->si_signo);
+ addr_len = strscpy(addr.sun_path, cn->corename);
+ if (addr_len < 0)
+ return false;
+ addr_len += offsetof(struct sockaddr_un, sun_path) + 1;
- binfmt = mm->binfmt;
- if (!binfmt || !binfmt->core_dump)
- goto fail;
- if (!__get_dumpable(cprm.mm_flags))
- goto fail;
+ /*
+ * It is possible that the userspace process which is supposed
+ * to handle the coredump and is listening on the AF_UNIX socket
+ * coredumps. Userspace should just mark itself non dumpable.
+ */
+
+ retval = sock_create_kern(&init_net, AF_UNIX, SOCK_STREAM, 0, &socket);
+ if (retval < 0)
+ return false;
+
+ file = sock_alloc_file(socket, 0, NULL);
+ if (IS_ERR(file))
+ return false;
- cred = prepare_creds();
- if (!cred)
- goto fail;
/*
- * We cannot trust fsuid as being the "true" uid of the process
- * nor do we know its entire history. We only know it was tainted
- * so we dump it as root in mode 2, and only into a controlled
- * environment (pipe handler or fully qualified path).
+ * Set the thread-group leader pid which is used for the peer
+ * credentials during connect() below. Then immediately register
+ * it in pidfs...
+ */
+ cprm->pid = task_tgid(current);
+ retval = pidfs_register_pid(cprm->pid);
+ if (retval)
+ return false;
+
+ /*
+ * ... and set the coredump information so userspace has it
+ * available after connect()...
*/
- if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
- /* Setuid core dump mode */
- cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
- need_suid_safe = true;
+ pidfs_coredump(cprm);
+
+ retval = kernel_connect(socket, (struct sockaddr_unsized *)(&addr), addr_len,
+ O_NONBLOCK | SOCK_COREDUMP);
+
+ if (retval) {
+ if (retval == -EAGAIN)
+ coredump_report_failure("Coredump socket %s receive queue full", addr.sun_path);
+ else
+ coredump_report_failure("Coredump socket connection %s failed %d", addr.sun_path, retval);
+ return false;
}
- retval = coredump_wait(siginfo->si_signo, &core_state);
- if (retval < 0)
- goto fail_creds;
+ /* ... and validate that @sk_peer_pid matches @cprm.pid. */
+ if (WARN_ON_ONCE(unix_peer(socket->sk)->sk_peer_pid != cprm->pid))
+ return false;
- old_cred = override_creds(cred);
+ cprm->limit = RLIM_INFINITY;
+ cprm->file = no_free_ptr(file);
- ispipe = format_corename(&cn, &cprm, &argv, &argc);
+ return true;
+}
- if (ispipe) {
- int argi;
- int dump_count;
- char **helper_argv;
- struct subprocess_info *sub_info;
+static inline bool coredump_sock_recv(struct file *file, struct coredump_ack *ack, size_t size, int flags)
+{
+ struct msghdr msg = {};
+ struct kvec iov = { .iov_base = ack, .iov_len = size };
+ ssize_t ret;
- if (ispipe < 0) {
- printk(KERN_WARNING "format_corename failed\n");
- printk(KERN_WARNING "Aborting core\n");
- goto fail_unlock;
- }
+ memset(ack, 0, size);
+ ret = kernel_recvmsg(sock_from_file(file), &msg, &iov, 1, size, flags);
+ return ret == size;
+}
- if (cprm.limit == 1) {
- /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
- *
- * Normally core limits are irrelevant to pipes, since
- * we're not writing to the file system, but we use
- * cprm.limit of 1 here as a special value, this is a
- * consistent way to catch recursive crashes.
- * We can still crash if the core_pattern binary sets
- * RLIM_CORE = !1, but it runs as root, and can do
- * lots of stupid things.
- *
- * Note that we use task_tgid_vnr here to grab the pid
- * of the process group leader. That way we get the
- * right pid if a thread in a multi-threaded
- * core_pattern process dies.
- */
- printk(KERN_WARNING
- "Process %d(%s) has RLIMIT_CORE set to 1\n",
- task_tgid_vnr(current), current->comm);
- printk(KERN_WARNING "Aborting core\n");
- goto fail_unlock;
- }
- cprm.limit = RLIM_INFINITY;
-
- dump_count = atomic_inc_return(&core_dump_count);
- if (core_pipe_limit && (core_pipe_limit < dump_count)) {
- printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
- task_tgid_vnr(current), current->comm);
- printk(KERN_WARNING "Skipping core dump\n");
- goto fail_dropcount;
- }
+static inline bool coredump_sock_send(struct file *file, struct coredump_req *req)
+{
+ struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
+ struct kvec iov = { .iov_base = req, .iov_len = sizeof(*req) };
+ ssize_t ret;
- helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
- GFP_KERNEL);
- if (!helper_argv) {
- printk(KERN_WARNING "%s failed to allocate memory\n",
- __func__);
- goto fail_dropcount;
- }
- for (argi = 0; argi < argc; argi++)
- helper_argv[argi] = cn.corename + argv[argi];
- helper_argv[argi] = NULL;
-
- retval = -ENOMEM;
- sub_info = call_usermodehelper_setup(helper_argv[0],
- helper_argv, NULL, GFP_KERNEL,
- umh_pipe_setup, NULL, &cprm);
- if (sub_info)
- retval = call_usermodehelper_exec(sub_info,
- UMH_WAIT_EXEC);
-
- kfree(helper_argv);
- if (retval) {
- printk(KERN_INFO "Core dump to |%s pipe failed\n",
- cn.corename);
- goto close_fail;
- }
- } else {
- struct user_namespace *mnt_userns;
- struct inode *inode;
- int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
- O_LARGEFILE | O_EXCL;
-
- if (cprm.limit < binfmt->min_coredump)
- goto fail_unlock;
-
- if (need_suid_safe && cn.corename[0] != '/') {
- printk(KERN_WARNING "Pid %d(%s) can only dump core "\
- "to fully qualified path!\n",
- task_tgid_vnr(current), current->comm);
- printk(KERN_WARNING "Skipping core dump\n");
- goto fail_unlock;
- }
+ ret = kernel_sendmsg(sock_from_file(file), &msg, &iov, 1, sizeof(*req));
+ return ret == sizeof(*req);
+}
- /*
- * Unlink the file if it exists unless this is a SUID
- * binary - in that case, we're running around with root
- * privs and don't want to unlink another user's coredump.
- */
- if (!need_suid_safe) {
- /*
- * If it doesn't exist, that's fine. If there's some
- * other problem, we'll catch it at the filp_open().
- */
- do_unlinkat(AT_FDCWD, getname_kernel(cn.corename));
- }
+static_assert(sizeof(enum coredump_mark) == sizeof(__u32));
+
+static inline bool coredump_sock_mark(struct file *file, enum coredump_mark mark)
+{
+ struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
+ struct kvec iov = { .iov_base = &mark, .iov_len = sizeof(mark) };
+ ssize_t ret;
+
+ ret = kernel_sendmsg(sock_from_file(file), &msg, &iov, 1, sizeof(mark));
+ return ret == sizeof(mark);
+}
+
+static inline void coredump_sock_wait(struct file *file)
+{
+ ssize_t n;
+
+ /*
+ * We use a simple read to wait for the coredump processing to
+ * finish. Either the socket is closed or we get sent unexpected
+ * data. In both cases, we're done.
+ */
+ n = __kernel_read(file, &(char){ 0 }, 1, NULL);
+ if (n > 0)
+ coredump_report_failure("Coredump socket had unexpected data");
+ else if (n < 0)
+ coredump_report_failure("Coredump socket failed");
+}
+
+static inline void coredump_sock_shutdown(struct file *file)
+{
+ struct socket *socket;
+
+ socket = sock_from_file(file);
+ if (!socket)
+ return;
+
+ /* Let userspace know we're done processing the coredump. */
+ kernel_sock_shutdown(socket, SHUT_WR);
+}
+
+static bool coredump_sock_request(struct core_name *cn, struct coredump_params *cprm)
+{
+ struct coredump_req req = {
+ .size = sizeof(struct coredump_req),
+ .mask = COREDUMP_KERNEL | COREDUMP_USERSPACE |
+ COREDUMP_REJECT | COREDUMP_WAIT,
+ .size_ack = sizeof(struct coredump_ack),
+ };
+ struct coredump_ack ack = {};
+ ssize_t usize;
+
+ if (cn->core_type != COREDUMP_SOCK_REQ)
+ return true;
+
+ /* Let userspace know what we support. */
+ if (!coredump_sock_send(cprm->file, &req))
+ return false;
+ /* Peek the size of the coredump_ack. */
+ if (!coredump_sock_recv(cprm->file, &ack, sizeof(ack.size),
+ MSG_PEEK | MSG_WAITALL))
+ return false;
+
+ /* Refuse unknown coredump_ack sizes. */
+ usize = ack.size;
+ if (usize < COREDUMP_ACK_SIZE_VER0) {
+ coredump_sock_mark(cprm->file, COREDUMP_MARK_MINSIZE);
+ return false;
+ }
+
+ if (usize > sizeof(ack)) {
+ coredump_sock_mark(cprm->file, COREDUMP_MARK_MAXSIZE);
+ return false;
+ }
+
+ /* Now retrieve the coredump_ack. */
+ if (!coredump_sock_recv(cprm->file, &ack, usize, MSG_WAITALL))
+ return false;
+ if (ack.size != usize)
+ return false;
+
+ /* Refuse unknown coredump_ack flags. */
+ if (ack.mask & ~req.mask) {
+ coredump_sock_mark(cprm->file, COREDUMP_MARK_UNSUPPORTED);
+ return false;
+ }
+
+ /* Refuse mutually exclusive options. */
+ if (hweight64(ack.mask & (COREDUMP_USERSPACE | COREDUMP_KERNEL |
+ COREDUMP_REJECT)) != 1) {
+ coredump_sock_mark(cprm->file, COREDUMP_MARK_CONFLICTING);
+ return false;
+ }
+
+ if (ack.spare) {
+ coredump_sock_mark(cprm->file, COREDUMP_MARK_UNSUPPORTED);
+ return false;
+ }
+
+ cn->mask = ack.mask;
+ return coredump_sock_mark(cprm->file, COREDUMP_MARK_REQACK);
+}
+
+static bool coredump_socket(struct core_name *cn, struct coredump_params *cprm)
+{
+ if (!coredump_sock_connect(cn, cprm))
+ return false;
+
+ return coredump_sock_request(cn, cprm);
+}
+#else
+static inline void coredump_sock_wait(struct file *file) { }
+static inline void coredump_sock_shutdown(struct file *file) { }
+static inline bool coredump_socket(struct core_name *cn, struct coredump_params *cprm) { return false; }
+#endif
+
+/* cprm->mm_flags contains a stable snapshot of dumpability flags. */
+static inline bool coredump_force_suid_safe(const struct coredump_params *cprm)
+{
+ /* Require nonrelative corefile path and be extra careful. */
+ return __get_dumpable(cprm->mm_flags) == SUID_DUMP_ROOT;
+}
+
+static bool coredump_file(struct core_name *cn, struct coredump_params *cprm,
+ const struct linux_binfmt *binfmt)
+{
+ struct mnt_idmap *idmap;
+ struct inode *inode;
+ struct file *file __free(fput) = NULL;
+ int open_flags = O_CREAT | O_WRONLY | O_NOFOLLOW | O_LARGEFILE | O_EXCL;
+
+ if (cprm->limit < binfmt->min_coredump)
+ return false;
+
+ if (coredump_force_suid_safe(cprm) && cn->corename[0] != '/') {
+ coredump_report_failure("this process can only dump core to a fully qualified path, skipping core dump");
+ return false;
+ }
+
+ /*
+ * Unlink the file if it exists unless this is a SUID
+ * binary - in that case, we're running around with root
+ * privs and don't want to unlink another user's coredump.
+ */
+ if (!coredump_force_suid_safe(cprm)) {
/*
- * There is a race between unlinking and creating the
- * file, but if that causes an EEXIST here, that's
- * fine - another process raced with us while creating
- * the corefile, and the other process won. To userspace,
- * what matters is that at least one of the two processes
- * writes its coredump successfully, not which one.
+ * If it doesn't exist, that's fine. If there's some
+ * other problem, we'll catch it at the filp_open().
*/
- if (need_suid_safe) {
- /*
- * Using user namespaces, normal user tasks can change
- * their current->fs->root to point to arbitrary
- * directories. Since the intention of the "only dump
- * with a fully qualified path" rule is to control where
- * coredumps may be placed using root privileges,
- * current->fs->root must not be used. Instead, use the
- * root directory of init_task.
- */
- struct path root;
-
- task_lock(&init_task);
- get_fs_root(init_task.fs, &root);
- task_unlock(&init_task);
- cprm.file = file_open_root(&root, cn.corename,
- open_flags, 0600);
- path_put(&root);
- } else {
- cprm.file = filp_open(cn.corename, open_flags, 0600);
- }
- if (IS_ERR(cprm.file))
- goto fail_unlock;
-
- inode = file_inode(cprm.file);
- if (inode->i_nlink > 1)
- goto close_fail;
- if (d_unhashed(cprm.file->f_path.dentry))
- goto close_fail;
+ do_unlinkat(AT_FDCWD, getname_kernel(cn->corename));
+ }
+
+ /*
+ * There is a race between unlinking and creating the
+ * file, but if that causes an EEXIST here, that's
+ * fine - another process raced with us while creating
+ * the corefile, and the other process won. To userspace,
+ * what matters is that at least one of the two processes
+ * writes its coredump successfully, not which one.
+ */
+ if (coredump_force_suid_safe(cprm)) {
/*
- * AK: actually i see no reason to not allow this for named
- * pipes etc, but keep the previous behaviour for now.
+ * Using user namespaces, normal user tasks can change
+ * their current->fs->root to point to arbitrary
+ * directories. Since the intention of the "only dump
+ * with a fully qualified path" rule is to control where
+ * coredumps may be placed using root privileges,
+ * current->fs->root must not be used. Instead, use the
+ * root directory of init_task.
*/
- if (!S_ISREG(inode->i_mode))
- goto close_fail;
- /*
- * Don't dump core if the filesystem changed owner or mode
- * of the file during file creation. This is an issue when
- * a process dumps core while its cwd is e.g. on a vfat
- * filesystem.
+ struct path root;
+
+ task_lock(&init_task);
+ get_fs_root(init_task.fs, &root);
+ task_unlock(&init_task);
+ file = file_open_root(&root, cn->corename, open_flags, 0600);
+ path_put(&root);
+ } else {
+ file = filp_open(cn->corename, open_flags, 0600);
+ }
+ if (IS_ERR(file))
+ return false;
+
+ inode = file_inode(file);
+ if (inode->i_nlink > 1)
+ return false;
+ if (d_unhashed(file->f_path.dentry))
+ return false;
+ /*
+ * AK: actually i see no reason to not allow this for named
+ * pipes etc, but keep the previous behaviour for now.
+ */
+ if (!S_ISREG(inode->i_mode))
+ return false;
+ /*
+ * Don't dump core if the filesystem changed owner or mode
+ * of the file during file creation. This is an issue when
+ * a process dumps core while its cwd is e.g. on a vfat
+ * filesystem.
+ */
+ idmap = file_mnt_idmap(file);
+ if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), current_fsuid())) {
+ coredump_report_failure("Core dump to %s aborted: cannot preserve file owner", cn->corename);
+ return false;
+ }
+ if ((inode->i_mode & 0677) != 0600) {
+ coredump_report_failure("Core dump to %s aborted: cannot preserve file permissions", cn->corename);
+ return false;
+ }
+ if (!(file->f_mode & FMODE_CAN_WRITE))
+ return false;
+ if (do_truncate(idmap, file->f_path.dentry, 0, 0, file))
+ return false;
+
+ cprm->file = no_free_ptr(file);
+ return true;
+}
+
+static bool coredump_pipe(struct core_name *cn, struct coredump_params *cprm,
+ size_t *argv, int argc)
+{
+ int argi;
+ char **helper_argv __free(kfree) = NULL;
+ struct subprocess_info *sub_info;
+
+ if (cprm->limit == 1) {
+ /* See umh_coredump_setup() which sets RLIMIT_CORE = 1.
+ *
+ * Normally core limits are irrelevant to pipes, since
+ * we're not writing to the file system, but we use
+ * cprm.limit of 1 here as a special value, this is a
+ * consistent way to catch recursive crashes.
+ * We can still crash if the core_pattern binary sets
+ * RLIM_CORE = !1, but it runs as root, and can do
+ * lots of stupid things.
+ *
+ * Note that we use task_tgid_vnr here to grab the pid
+ * of the process group leader. That way we get the
+ * right pid if a thread in a multi-threaded
+ * core_pattern process dies.
*/
- mnt_userns = file_mnt_user_ns(cprm.file);
- if (!uid_eq(i_uid_into_mnt(mnt_userns, inode),
- current_fsuid())) {
- pr_info_ratelimited("Core dump to %s aborted: cannot preserve file owner\n",
- cn.corename);
- goto close_fail;
- }
- if ((inode->i_mode & 0677) != 0600) {
- pr_info_ratelimited("Core dump to %s aborted: cannot preserve file permissions\n",
- cn.corename);
- goto close_fail;
- }
- if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
- goto close_fail;
- if (do_truncate(mnt_userns, cprm.file->f_path.dentry,
- 0, 0, cprm.file))
- goto close_fail;
+ coredump_report_failure("RLIMIT_CORE is set to 1, aborting core");
+ return false;
+ }
+ cprm->limit = RLIM_INFINITY;
+
+ cn->core_pipe_limit = atomic_inc_return(&core_pipe_count);
+ if (core_pipe_limit && (core_pipe_limit < cn->core_pipe_limit)) {
+ coredump_report_failure("over core_pipe_limit, skipping core dump");
+ return false;
+ }
+
+ helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), GFP_KERNEL);
+ if (!helper_argv) {
+ coredump_report_failure("%s failed to allocate memory", __func__);
+ return false;
+ }
+ for (argi = 0; argi < argc; argi++)
+ helper_argv[argi] = cn->corename + argv[argi];
+ helper_argv[argi] = NULL;
+
+ sub_info = call_usermodehelper_setup(helper_argv[0], helper_argv, NULL,
+ GFP_KERNEL, umh_coredump_setup,
+ NULL, cprm);
+ if (!sub_info)
+ return false;
+
+ if (call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC)) {
+ coredump_report_failure("|%s pipe failed", cn->corename);
+ return false;
}
+ /*
+ * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
+ * have this set to NULL.
+ */
+ if (!cprm->file) {
+ coredump_report_failure("Core dump to |%s disabled", cn->corename);
+ return false;
+ }
+
+ return true;
+}
+
+static bool coredump_write(struct core_name *cn,
+ struct coredump_params *cprm,
+ const struct linux_binfmt *binfmt)
+{
+
+ if (dump_interrupted())
+ return true;
+
+ if (!dump_vma_snapshot(cprm))
+ return false;
+
+ file_start_write(cprm->file);
+ cn->core_dumped = binfmt->core_dump(cprm);
+ /*
+ * Ensures that file size is big enough to contain the current
+ * file postion. This prevents gdb from complaining about
+ * a truncated file if the last "write" to the file was
+ * dump_skip.
+ */
+ if (cprm->to_skip) {
+ cprm->to_skip--;
+ dump_emit(cprm, "", 1);
+ }
+ file_end_write(cprm->file);
+ free_vma_snapshot(cprm);
+ return true;
+}
+
+static void coredump_cleanup(struct core_name *cn, struct coredump_params *cprm)
+{
+ if (cprm->file)
+ filp_close(cprm->file, NULL);
+ if (cn->core_pipe_limit) {
+ VFS_WARN_ON_ONCE(cn->core_type != COREDUMP_PIPE);
+ atomic_dec(&core_pipe_count);
+ }
+ kfree(cn->corename);
+ coredump_finish(cn->core_dumped);
+}
+
+static inline bool coredump_skip(const struct coredump_params *cprm,
+ const struct linux_binfmt *binfmt)
+{
+ if (!binfmt)
+ return true;
+ if (!binfmt->core_dump)
+ return true;
+ if (!__get_dumpable(cprm->mm_flags))
+ return true;
+ return false;
+}
+
+static void do_coredump(struct core_name *cn, struct coredump_params *cprm,
+ size_t **argv, int *argc, const struct linux_binfmt *binfmt)
+{
+ if (!coredump_parse(cn, cprm, argv, argc)) {
+ coredump_report_failure("format_corename failed, aborting core");
+ return;
+ }
+
+ switch (cn->core_type) {
+ case COREDUMP_FILE:
+ if (!coredump_file(cn, cprm, binfmt))
+ return;
+ break;
+ case COREDUMP_PIPE:
+ if (!coredump_pipe(cn, cprm, *argv, *argc))
+ return;
+ break;
+ case COREDUMP_SOCK_REQ:
+ fallthrough;
+ case COREDUMP_SOCK:
+ if (!coredump_socket(cn, cprm))
+ return;
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ return;
+ }
+
+ /* Don't even generate the coredump. */
+ if (cn->mask & COREDUMP_REJECT)
+ return;
+
/* get us an unshared descriptor table; almost always a no-op */
/* The cell spufs coredump code reads the file descriptor tables */
- retval = unshare_files();
- if (retval)
- goto close_fail;
- if (!dump_interrupted()) {
- /*
- * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
- * have this set to NULL.
- */
- if (!cprm.file) {
- pr_info("Core dump to |%s disabled\n", cn.corename);
- goto close_fail;
+ if (unshare_files())
+ return;
+
+ if ((cn->mask & COREDUMP_KERNEL) && !coredump_write(cn, cprm, binfmt))
+ return;
+
+ coredump_sock_shutdown(cprm->file);
+
+ /* Let the parent know that a coredump was generated. */
+ if (cn->mask & COREDUMP_USERSPACE)
+ cn->core_dumped = true;
+
+ /*
+ * When core_pipe_limit is set we wait for the coredump server
+ * or usermodehelper to finish before exiting so it can e.g.,
+ * inspect /proc/<pid>.
+ */
+ if (cn->mask & COREDUMP_WAIT) {
+ switch (cn->core_type) {
+ case COREDUMP_PIPE:
+ wait_for_dump_helpers(cprm->file);
+ break;
+ case COREDUMP_SOCK_REQ:
+ fallthrough;
+ case COREDUMP_SOCK:
+ coredump_sock_wait(cprm->file);
+ break;
+ default:
+ break;
}
- file_start_write(cprm.file);
- core_dumped = binfmt->core_dump(&cprm);
+ }
+}
+
+void vfs_coredump(const kernel_siginfo_t *siginfo)
+{
+ size_t *argv __free(kfree) = NULL;
+ struct core_state core_state;
+ struct core_name cn;
+ const struct mm_struct *mm = current->mm;
+ const struct linux_binfmt *binfmt = mm->binfmt;
+ int argc = 0;
+ struct coredump_params cprm = {
+ .siginfo = siginfo,
+ .limit = rlimit(RLIMIT_CORE),
/*
- * Ensures that file size is big enough to contain the current
- * file postion. This prevents gdb from complaining about
- * a truncated file if the last "write" to the file was
- * dump_skip.
+ * We must use the same mm->flags while dumping core to avoid
+ * inconsistency of bit flags, since this flag is not protected
+ * by any locks.
+ *
+ * Note that we only care about MMF_DUMP* flags.
*/
- if (cprm.to_skip) {
- cprm.to_skip--;
- dump_emit(&cprm, "", 1);
- }
- file_end_write(cprm.file);
- }
- if (ispipe && core_pipe_limit)
- wait_for_dump_helpers(cprm.file);
-close_fail:
- if (cprm.file)
- filp_close(cprm.file, NULL);
-fail_dropcount:
- if (ispipe)
- atomic_dec(&core_dump_count);
-fail_unlock:
- kfree(argv);
- kfree(cn.corename);
- coredump_finish(core_dumped);
- revert_creds(old_cred);
-fail_creds:
- put_cred(cred);
-fail:
+ .mm_flags = __mm_flags_get_dumpable(mm),
+ .vma_meta = NULL,
+ .cpu = raw_smp_processor_id(),
+ };
+
+ audit_core_dumps(siginfo->si_signo);
+
+ if (coredump_skip(&cprm, binfmt))
+ return;
+
+ CLASS(prepare_creds, cred)();
+ if (!cred)
+ return;
+ /*
+ * We cannot trust fsuid as being the "true" uid of the process
+ * nor do we know its entire history. We only know it was tainted
+ * so we dump it as root in mode 2, and only into a controlled
+ * environment (pipe handler or fully qualified path).
+ */
+ if (coredump_force_suid_safe(&cprm))
+ cred->fsuid = GLOBAL_ROOT_UID;
+
+ if (coredump_wait(siginfo->si_signo, &core_state) < 0)
+ return;
+
+ scoped_with_creds(cred)
+ do_coredump(&cn, &cprm, &argv, &argc, binfmt);
+ coredump_cleanup(&cn, &cprm);
return;
}
@@ -791,10 +1212,9 @@ static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr)
struct file *file = cprm->file;
loff_t pos = file->f_pos;
ssize_t n;
+
if (cprm->written + nr > cprm->limit)
return 0;
-
-
if (dump_interrupted())
return 0;
n = __kernel_write(file, addr, nr, &pos);
@@ -811,20 +1231,21 @@ static int __dump_skip(struct coredump_params *cprm, size_t nr)
{
static char zeroes[PAGE_SIZE];
struct file *file = cprm->file;
- if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
- if (dump_interrupted() ||
- file->f_op->llseek(file, nr, SEEK_CUR) < 0)
+
+ if (file->f_mode & FMODE_LSEEK) {
+ if (dump_interrupted() || vfs_llseek(file, nr, SEEK_CUR) < 0)
return 0;
cprm->pos += nr;
return 1;
- } else {
- while (nr > PAGE_SIZE) {
- if (!__dump_emit(cprm, zeroes, PAGE_SIZE))
- return 0;
- nr -= PAGE_SIZE;
- }
- return __dump_emit(cprm, zeroes, nr);
}
+
+ while (nr > PAGE_SIZE) {
+ if (!__dump_emit(cprm, zeroes, PAGE_SIZE))
+ return 0;
+ nr -= PAGE_SIZE;
+ }
+
+ return __dump_emit(cprm, zeroes, nr);
}
int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
@@ -851,14 +1272,89 @@ void dump_skip(struct coredump_params *cprm, size_t nr)
EXPORT_SYMBOL(dump_skip);
#ifdef CONFIG_ELF_CORE
+static int dump_emit_page(struct coredump_params *cprm, struct page *page)
+{
+ struct bio_vec bvec;
+ struct iov_iter iter;
+ struct file *file = cprm->file;
+ loff_t pos;
+ ssize_t n;
+
+ if (!page)
+ return 0;
+
+ if (cprm->to_skip) {
+ if (!__dump_skip(cprm, cprm->to_skip))
+ return 0;
+ cprm->to_skip = 0;
+ }
+ if (cprm->written + PAGE_SIZE > cprm->limit)
+ return 0;
+ if (dump_interrupted())
+ return 0;
+ pos = file->f_pos;
+ bvec_set_page(&bvec, page, PAGE_SIZE, 0);
+ iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
+ n = __kernel_write_iter(cprm->file, &iter, &pos);
+ if (n != PAGE_SIZE)
+ return 0;
+ file->f_pos = pos;
+ cprm->written += PAGE_SIZE;
+ cprm->pos += PAGE_SIZE;
+
+ return 1;
+}
+
+/*
+ * If we might get machine checks from kernel accesses during the
+ * core dump, let's get those errors early rather than during the
+ * IO. This is not performance-critical enough to warrant having
+ * all the machine check logic in the iovec paths.
+ */
+#ifdef copy_mc_to_kernel
+
+#define dump_page_alloc() alloc_page(GFP_KERNEL)
+#define dump_page_free(x) __free_page(x)
+static struct page *dump_page_copy(struct page *src, struct page *dst)
+{
+ void *buf = kmap_local_page(src);
+ size_t left = copy_mc_to_kernel(page_address(dst), buf, PAGE_SIZE);
+ kunmap_local(buf);
+ return left ? NULL : dst;
+}
+
+#else
+
+/* We just want to return non-NULL; it's never used. */
+#define dump_page_alloc() ERR_PTR(-EINVAL)
+#define dump_page_free(x) ((void)(x))
+static inline struct page *dump_page_copy(struct page *src, struct page *dst)
+{
+ return src;
+}
+#endif
+
int dump_user_range(struct coredump_params *cprm, unsigned long start,
unsigned long len)
{
unsigned long addr;
+ struct page *dump_page;
+ int locked, ret;
+
+ dump_page = dump_page_alloc();
+ if (!dump_page)
+ return 0;
+ ret = 0;
+ locked = 0;
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
struct page *page;
- int stop;
+
+ if (!locked) {
+ if (mmap_read_lock_killable(current->mm))
+ goto out;
+ locked = 1;
+ }
/*
* To avoid having to allocate page tables for virtual address
@@ -867,20 +1363,38 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
* NULL when encountering an empty page table entry that would
* otherwise have been filled with the zero page.
*/
- page = get_dump_page(addr);
+ page = get_dump_page(addr, &locked);
if (page) {
- void *kaddr = kmap_local_page(page);
-
- stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
- kunmap_local(kaddr);
+ if (locked) {
+ mmap_read_unlock(current->mm);
+ locked = 0;
+ }
+ int stop = !dump_emit_page(cprm, dump_page_copy(page, dump_page));
put_page(page);
if (stop)
- return 0;
+ goto out;
} else {
dump_skip(cprm, PAGE_SIZE);
}
+
+ if (dump_interrupted())
+ goto out;
+
+ if (!need_resched())
+ continue;
+ if (locked) {
+ mmap_read_unlock(current->mm);
+ locked = 0;
+ }
+ cond_resched();
}
- return 1;
+ ret = 1;
+out:
+ if (locked)
+ mmap_read_unlock(current->mm);
+
+ dump_page_free(dump_page);
+ return ret;
}
#endif
@@ -895,6 +1409,151 @@ int dump_align(struct coredump_params *cprm, int align)
}
EXPORT_SYMBOL(dump_align);
+#ifdef CONFIG_SYSCTL
+
+void validate_coredump_safety(void)
+{
+ if (suid_dumpable == SUID_DUMP_ROOT &&
+ core_pattern[0] != '/' && core_pattern[0] != '|' && core_pattern[0] != '@') {
+
+ coredump_report_failure("Unsafe core_pattern used with fs.suid_dumpable=2: "
+ "pipe handler or fully qualified core dump path required. "
+ "Set kernel.core_pattern before fs.suid_dumpable.");
+ }
+}
+
+static inline bool check_coredump_socket(void)
+{
+ const char *p;
+
+ if (core_pattern[0] != '@')
+ return true;
+
+ /*
+ * Coredump socket must be located in the initial mount
+ * namespace. Don't give the impression that anything else is
+ * supported right now.
+ */
+ if (current->nsproxy->mnt_ns != init_task.nsproxy->mnt_ns)
+ return false;
+
+ /* Must be an absolute path... */
+ if (core_pattern[1] != '/') {
+ /* ... or the socket request protocol... */
+ if (core_pattern[1] != '@')
+ return false;
+ /* ... and if so must be an absolute path. */
+ if (core_pattern[2] != '/')
+ return false;
+ p = &core_pattern[2];
+ } else {
+ p = &core_pattern[1];
+ }
+
+ /* The path obviously cannot exceed UNIX_PATH_MAX. */
+ if (strlen(p) >= UNIX_PATH_MAX)
+ return false;
+
+ /* Must not contain ".." in the path. */
+ if (name_contains_dotdot(core_pattern))
+ return false;
+
+ return true;
+}
+
+static int proc_dostring_coredump(const struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int error;
+ ssize_t retval;
+ char old_core_pattern[CORENAME_MAX_SIZE];
+
+ if (!write)
+ return proc_dostring(table, write, buffer, lenp, ppos);
+
+ retval = strscpy(old_core_pattern, core_pattern, CORENAME_MAX_SIZE);
+
+ error = proc_dostring(table, write, buffer, lenp, ppos);
+ if (error)
+ return error;
+
+ if (!check_coredump_socket()) {
+ strscpy(core_pattern, old_core_pattern, retval + 1);
+ return -EINVAL;
+ }
+
+ validate_coredump_safety();
+ return error;
+}
+
+static const unsigned int core_file_note_size_min = CORE_FILE_NOTE_SIZE_DEFAULT;
+static const unsigned int core_file_note_size_max = CORE_FILE_NOTE_SIZE_MAX;
+static char core_modes[] = {
+ "file\npipe"
+#ifdef CONFIG_UNIX
+ "\nsocket"
+#endif
+};
+
+static const struct ctl_table coredump_sysctls[] = {
+ {
+ .procname = "core_uses_pid",
+ .data = &core_uses_pid,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "core_pattern",
+ .data = core_pattern,
+ .maxlen = CORENAME_MAX_SIZE,
+ .mode = 0644,
+ .proc_handler = proc_dostring_coredump,
+ },
+ {
+ .procname = "core_pipe_limit",
+ .data = &core_pipe_limit,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "core_file_note_size_limit",
+ .data = &core_file_note_size_limit,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = (unsigned int *)&core_file_note_size_min,
+ .extra2 = (unsigned int *)&core_file_note_size_max,
+ },
+ {
+ .procname = "core_sort_vma",
+ .data = &core_sort_vma,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "core_modes",
+ .data = core_modes,
+ .maxlen = sizeof(core_modes) - 1,
+ .mode = 0444,
+ .proc_handler = proc_dostring,
+ },
+};
+
+static int __init init_fs_coredump_sysctls(void)
+{
+ register_sysctl_init("kernel", coredump_sysctls);
+ return 0;
+}
+fs_initcall(init_fs_coredump_sysctls);
+#endif /* CONFIG_SYSCTL */
+
/*
* The purpose of always_dump_vma() is to make sure that special kernel mappings
* that are useful for post-mortem analysis are included in every core dump.
@@ -926,6 +1585,8 @@ static bool always_dump_vma(struct vm_area_struct *vma)
return false;
}
+#define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1
+
/*
* Decide how much of @vma's contents should be included in a core dump.
*/
@@ -985,9 +1646,20 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
* dump the first page to aid in determining what was mapped here.
*/
if (FILTER(ELF_HEADERS) &&
- vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ) &&
- (READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0)
- return PAGE_SIZE;
+ vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
+ if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0)
+ return PAGE_SIZE;
+
+ /*
+ * ELF libraries aren't always executable.
+ * We'll want to check whether the mapping starts with the ELF
+ * magic, but not now - we're holding the mmap lock,
+ * so copy_from_user() doesn't work here.
+ * Use a placeholder instead, and fix it up later in
+ * dump_vma_snapshot().
+ */
+ return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER;
+ }
#undef FILTER
@@ -997,45 +1669,59 @@ whole:
return vma->vm_end - vma->vm_start;
}
-static struct vm_area_struct *first_vma(struct task_struct *tsk,
- struct vm_area_struct *gate_vma)
-{
- struct vm_area_struct *ret = tsk->mm->mmap;
-
- if (ret)
- return ret;
- return gate_vma;
-}
-
/*
* Helper function for iterating across a vma list. It ensures that the caller
* will visit `gate_vma' prior to terminating the search.
*/
-static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
+static struct vm_area_struct *coredump_next_vma(struct vma_iterator *vmi,
+ struct vm_area_struct *vma,
struct vm_area_struct *gate_vma)
{
- struct vm_area_struct *ret;
-
- ret = this_vma->vm_next;
- if (ret)
- return ret;
- if (this_vma == gate_vma)
+ if (gate_vma && (vma == gate_vma))
return NULL;
+
+ vma = vma_next(vmi);
+ if (vma)
+ return vma;
return gate_vma;
}
+static void free_vma_snapshot(struct coredump_params *cprm)
+{
+ if (cprm->vma_meta) {
+ int i;
+ for (i = 0; i < cprm->vma_count; i++) {
+ struct file *file = cprm->vma_meta[i].file;
+ if (file)
+ fput(file);
+ }
+ kvfree(cprm->vma_meta);
+ cprm->vma_meta = NULL;
+ }
+}
+
+static int cmp_vma_size(const void *vma_meta_lhs_ptr, const void *vma_meta_rhs_ptr)
+{
+ const struct core_vma_metadata *vma_meta_lhs = vma_meta_lhs_ptr;
+ const struct core_vma_metadata *vma_meta_rhs = vma_meta_rhs_ptr;
+
+ if (vma_meta_lhs->dump_size < vma_meta_rhs->dump_size)
+ return -1;
+ if (vma_meta_lhs->dump_size > vma_meta_rhs->dump_size)
+ return 1;
+ return 0;
+}
+
/*
* Under the mmap_lock, take a snapshot of relevant information about the task's
* VMAs.
*/
-int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
- struct core_vma_metadata **vma_meta,
- size_t *vma_data_size_ptr)
+static bool dump_vma_snapshot(struct coredump_params *cprm)
{
- struct vm_area_struct *vma, *gate_vma;
+ struct vm_area_struct *gate_vma, *vma = NULL;
struct mm_struct *mm = current->mm;
- int i;
- size_t vma_data_size = 0;
+ VMA_ITERATOR(vmi, mm, 0);
+ int i = 0;
/*
* Once the stack expansion code is fixed to not change VMA bounds
@@ -1043,36 +1729,54 @@ int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
* mmap_lock in read mode.
*/
if (mmap_write_lock_killable(mm))
- return -EINTR;
+ return false;
+ cprm->vma_data_size = 0;
gate_vma = get_gate_vma(mm);
- *vma_count = mm->map_count + (gate_vma ? 1 : 0);
+ cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0);
- *vma_meta = kvmalloc_array(*vma_count, sizeof(**vma_meta), GFP_KERNEL);
- if (!*vma_meta) {
+ cprm->vma_meta = kvmalloc_array(cprm->vma_count, sizeof(*cprm->vma_meta), GFP_KERNEL);
+ if (!cprm->vma_meta) {
mmap_write_unlock(mm);
- return -ENOMEM;
+ return false;
}
- for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
- vma = next_vma(vma, gate_vma), i++) {
- struct core_vma_metadata *m = (*vma_meta) + i;
+ while ((vma = coredump_next_vma(&vmi, vma, gate_vma)) != NULL) {
+ struct core_vma_metadata *m = cprm->vma_meta + i;
m->start = vma->vm_start;
m->end = vma->vm_end;
m->flags = vma->vm_flags;
m->dump_size = vma_dump_size(vma, cprm->mm_flags);
-
- vma_data_size += m->dump_size;
+ m->pgoff = vma->vm_pgoff;
+ m->file = vma->vm_file;
+ if (m->file)
+ get_file(m->file);
+ i++;
}
mmap_write_unlock(mm);
- if (WARN_ON(i != *vma_count)) {
- kvfree(*vma_meta);
- return -EFAULT;
+ for (i = 0; i < cprm->vma_count; i++) {
+ struct core_vma_metadata *m = cprm->vma_meta + i;
+
+ if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) {
+ char elfmag[SELFMAG];
+
+ if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) ||
+ memcmp(elfmag, ELFMAG, SELFMAG) != 0) {
+ m->dump_size = 0;
+ } else {
+ m->dump_size = PAGE_SIZE;
+ }
+ }
+
+ cprm->vma_data_size += m->dump_size;
}
- *vma_data_size_ptr = vma_data_size;
- return 0;
+ if (core_sort_vma)
+ sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta),
+ cmp_vma_size, NULL);
+
+ return true;
}
diff --git a/fs/cramfs/Kconfig b/fs/cramfs/Kconfig
index d98cef0dbb6b..4612c9bbf102 100644
--- a/fs/cramfs/Kconfig
+++ b/fs/cramfs/Kconfig
@@ -38,7 +38,7 @@ config CRAMFS_MTD
default y if !CRAMFS_BLOCKDEV
help
This option allows the CramFs driver to load data directly from
- a linear adressed memory range (usually non volatile memory
+ a linear addressed memory range (usually non-volatile memory
like flash) instead of going through the block device layer.
This saves some memory since no intermediate buffering is
necessary.
diff --git a/fs/cramfs/README b/fs/cramfs/README
index d71b27e0ff15..778df5c4d70b 100644
--- a/fs/cramfs/README
+++ b/fs/cramfs/README
@@ -115,7 +115,7 @@ Block Size
(Block size in cramfs refers to the size of input data that is
compressed at a time. It's intended to be somewhere around
-PAGE_SIZE for cramfs_readpage's convenience.)
+PAGE_SIZE for cramfs_read_folio's convenience.)
The superblock ought to indicate the block size that the fs was
written for, since comments in <linux/pagemap.h> indicate that
@@ -161,7 +161,7 @@ size. The options are:
PAGE_SIZE.
It's easy enough to change the kernel to use a smaller value than
-PAGE_SIZE: just make cramfs_readpage read multiple blocks.
+PAGE_SIZE: just make cramfs_read_folio read multiple blocks.
The cost of option 1 is that kernels with a larger PAGE_SIZE
value don't get as good compression as they can.
@@ -173,9 +173,9 @@ they don't mind their cramfs being inaccessible to kernels with
smaller PAGE_SIZE values.
Option 3 is easy to implement if we don't mind being CPU-inefficient:
-e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
+e.g. get read_folio to decompress to a buffer of size MAX_BLKSIZE (which
must be no larger than 32KB) and discard what it doesn't need.
-Getting readpage to read into all the covered pages is harder.
+Getting read_folio to read into all the covered pages is harder.
The main advantage of option 3 over 1, 2, is better compression. The
cost is greater complexity. Probably not worth it, but I hope someone
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 666aa380011e..e54ebe402df7 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -17,7 +17,6 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/pagemap.h>
-#include <linux/pfn_t.h>
#include <linux/ramfs.h>
#include <linux/init.h>
#include <linux/string.h>
@@ -96,7 +95,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
inode = iget_locked(sb, cramino(cramfs_inode, offset));
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
switch (cramfs_inode->mode & S_IFMT) {
@@ -117,9 +116,18 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
inode_nohighmem(inode);
inode->i_data.a_ops = &cramfs_aops;
break;
- default:
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
init_special_inode(inode, cramfs_inode->mode,
old_decode_dev(cramfs_inode->size));
+ break;
+ default:
+ printk(KERN_DEBUG "CRAMFS: Invalid file type 0%04o for inode %lu.\n",
+ inode->i_mode, inode->i_ino);
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
inode->i_mode = cramfs_inode->mode;
@@ -133,7 +141,8 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
}
/* Struct copy intentional */
- inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, zerotime)));
/* inode->i_nlink is left 1 - arguably wrong for directories,
but it's the best we can do without reading the directory
contents. 1 yields the right result in GNU find, even
@@ -182,7 +191,8 @@ static int next_buffer;
static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
unsigned int len)
{
- struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
+ struct address_space *mapping = sb->s_bdev->bd_mapping;
+ struct file_ra_state ra = {};
struct page *pages[BLKS_PER_BUF];
unsigned i, blocknr, buffer;
unsigned long devsize;
@@ -212,6 +222,9 @@ static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
devsize = bdev_nr_bytes(sb->s_bdev) >> PAGE_SHIFT;
/* Ok, read in BLKS_PER_BUF pages completely first. */
+ file_ra_state_init(&ra, mapping);
+ page_cache_sync_readahead(mapping, &ra, NULL, blocknr, BLKS_PER_BUF);
+
for (i = 0; i < BLKS_PER_BUF; i++) {
struct page *page = NULL;
@@ -224,19 +237,6 @@ static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
pages[i] = page;
}
- for (i = 0; i < BLKS_PER_BUF; i++) {
- struct page *page = pages[i];
-
- if (page) {
- wait_on_page_locked(page);
- if (!PageUptodate(page)) {
- /* asynchronous error */
- put_page(page);
- pages[i] = NULL;
- }
- }
- }
-
buffer = next_buffer;
next_buffer = NEXT_BUFFER(buffer);
buffer_blocknr[buffer] = blocknr;
@@ -247,8 +247,7 @@ static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
struct page *page = pages[i];
if (page) {
- memcpy(data, kmap(page), PAGE_SIZE);
- kunmap(page);
+ memcpy_from_page(data, page, 0, PAGE_SIZE);
put_page(page);
} else
memset(data, 0, PAGE_SIZE);
@@ -414,15 +413,15 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
/*
* Let's create a mixed map if we can't map it all.
* The normal paging machinery will take care of the
- * unpopulated ptes via cramfs_readpage().
+ * unpopulated ptes via cramfs_read_folio().
*/
int i;
- vma->vm_flags |= VM_MIXEDMAP;
+ vm_flags_set(vma, VM_MIXEDMAP);
for (i = 0; i < pages && !ret; i++) {
vm_fault_t vmf;
unsigned long off = i * PAGE_SIZE;
- pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV);
- vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
+ vmf = vmf_insert_mixed(vma, vma->vm_start + off,
+ PHYS_PFN(address + off));
if (vmf & VM_FAULT_ERROR)
ret = vm_fault_to_errno(vmf, 0);
}
@@ -446,7 +445,7 @@ bailout:
static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
{
- return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS;
+ return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -ENOSYS;
}
static unsigned long cramfs_physmem_get_unmapped_area(struct file *file,
@@ -483,7 +482,7 @@ static unsigned int cramfs_physmem_mmap_capabilities(struct file *file)
static const struct file_operations cramfs_physmem_fops = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
- .splice_read = generic_file_splice_read,
+ .splice_read = filemap_splice_read,
.mmap = cramfs_physmem_mmap,
#ifndef CONFIG_MMU
.get_unmapped_area = cramfs_physmem_get_unmapped_area,
@@ -495,12 +494,16 @@ static void cramfs_kill_sb(struct super_block *sb)
{
struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
+ generic_shutdown_super(sb);
+
if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sb->s_mtd) {
if (sbi && sbi->mtd_point_size)
mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size);
- kill_mtd_super(sb);
+ put_mtd_device(sb->s_mtd);
+ sb->s_mtd = NULL;
} else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
- kill_block_super(sb);
+ sync_blockdev(sb->s_bdev);
+ bdev_fput(sb->s_bdev_file);
}
kfree(sbi);
}
@@ -814,20 +817,21 @@ out:
return d_splice_alias(inode, dentry);
}
-static int cramfs_readpage(struct file *file, struct page *page)
+static int cramfs_read_folio(struct file *file, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
u32 maxblock;
int bytes_filled;
void *pgdata;
+ bool success = false;
maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
bytes_filled = 0;
- pgdata = kmap(page);
+ pgdata = kmap_local_folio(folio, 0);
- if (page->index < maxblock) {
+ if (folio->index < maxblock) {
struct super_block *sb = inode->i_sb;
- u32 blkptr_offset = OFFSET(inode) + page->index * 4;
+ u32 blkptr_offset = OFFSET(inode) + folio->index * 4;
u32 block_ptr, block_start, block_len;
bool uncompressed, direct;
@@ -848,7 +852,7 @@ static int cramfs_readpage(struct file *file, struct page *page)
if (uncompressed) {
block_len = PAGE_SIZE;
/* if last block: cap to file length */
- if (page->index == maxblock - 1)
+ if (folio->index == maxblock - 1)
block_len =
offset_in_page(inode->i_size);
} else {
@@ -865,7 +869,7 @@ static int cramfs_readpage(struct file *file, struct page *page)
* from the previous block's pointer.
*/
block_start = OFFSET(inode) + maxblock * 4;
- if (page->index)
+ if (folio->index)
block_start = *(u32 *)
cramfs_read(sb, blkptr_offset - 4, 4);
/* Beware... previous ptr might be a direct ptr */
@@ -910,22 +914,17 @@ static int cramfs_readpage(struct file *file, struct page *page)
}
memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
- flush_dcache_page(page);
- kunmap(page);
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
+ flush_dcache_folio(folio);
+ success = true;
err:
- kunmap(page);
- ClearPageUptodate(page);
- SetPageError(page);
- unlock_page(page);
+ kunmap_local(pgdata);
+ folio_end_read(folio, success);
return 0;
}
static const struct address_space_operations cramfs_aops = {
- .readpage = cramfs_readpage
+ .read_folio = cramfs_read_folio
};
/*
@@ -1007,4 +1006,5 @@ static void __exit exit_cramfs_fs(void)
module_init(init_cramfs_fs)
module_exit(exit_cramfs_fs)
+MODULE_DESCRIPTION("Compressed ROM file system support");
MODULE_LICENSE("GPL");
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig
index 2d0c8922f635..464b54610fd3 100644
--- a/fs/crypto/Kconfig
+++ b/fs/crypto/Kconfig
@@ -2,16 +2,16 @@
config FS_ENCRYPTION
bool "FS Encryption (Per-file encryption)"
select CRYPTO
- select CRYPTO_HASH
select CRYPTO_SKCIPHER
select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA512
select KEYS
help
Enable encryption of files and directories. This
feature is similar to ecryptfs, but it is more memory
efficient since it avoids caching the encrypted and
decrypted pages in the page cache. Currently Ext4,
- F2FS and UBIFS make use of this feature.
+ F2FS, UBIFS, and CephFS make use of this feature.
# Filesystems supporting encryption must select this if FS_ENCRYPTION. This
# allows the algorithms to be built as modules when all the filesystems are,
@@ -24,20 +24,14 @@ config FS_ENCRYPTION
#
# Also note that this option only pulls in the generic implementations of the
# algorithms, not any per-architecture optimized implementations. It is
-# strongly recommended to enable optimized implementations too. It is safe to
-# disable these generic implementations if corresponding optimized
-# implementations will always be available too; for this reason, these are soft
-# dependencies ('imply' rather than 'select'). Only disable these generic
-# implementations if you're sure they will never be needed, though.
+# strongly recommended to enable optimized implementations too.
config FS_ENCRYPTION_ALGS
tristate
- imply CRYPTO_AES
- imply CRYPTO_CBC
- imply CRYPTO_CTS
- imply CRYPTO_ECB
- imply CRYPTO_HMAC
- imply CRYPTO_SHA512
- imply CRYPTO_XTS
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_CTS
+ select CRYPTO_ECB
+ select CRYPTO_XTS
config FS_ENCRYPTION_INLINE_CRYPT
bool "Enable fscrypt to use inline crypto"
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index bfc2a5b74ed3..5f5599020e94 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -7,10 +7,12 @@
* Copyright (C) 2015, Motorola Mobility
*/
-#include <linux/pagemap.h>
-#include <linux/module.h>
#include <linux/bio.h>
+#include <linux/export.h>
+#include <linux/module.h>
#include <linux/namei.h>
+#include <linux/pagemap.h>
+
#include "fscrypt_private.h"
/**
@@ -25,21 +27,23 @@
* then this function isn't applicable. This function may sleep, so it must be
* called from a workqueue rather than from the bio's bi_end_io callback.
*
- * This function sets PG_error on any pages that contain any blocks that failed
- * to be decrypted. The filesystem must not mark such pages uptodate.
+ * Return: %true on success; %false on failure. On failure, bio->bi_status is
+ * also set to an error status.
*/
-void fscrypt_decrypt_bio(struct bio *bio)
+bool fscrypt_decrypt_bio(struct bio *bio)
{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
-
- bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
- int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
- bv->bv_offset);
- if (ret)
- SetPageError(page);
+ struct folio_iter fi;
+
+ bio_for_each_folio_all(fi, bio) {
+ int err = fscrypt_decrypt_pagecache_blocks(fi.folio, fi.length,
+ fi.offset);
+
+ if (err) {
+ bio->bi_status = errno_to_blk_status(err);
+ return false;
+ }
}
+ return true;
}
EXPORT_SYMBOL(fscrypt_decrypt_bio);
@@ -54,7 +58,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
int num_pages = 0;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
- bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
+ bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE,
+ GFP_NOFS);
while (len) {
unsigned int blocks_this_page = min(len, blocks_per_page);
@@ -62,13 +67,11 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
if (num_pages == 0) {
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
- bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector =
pblk << (blockbits - SECTOR_SHIFT);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
}
ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
- if (WARN_ON(ret != bytes_this_page)) {
+ if (WARN_ON_ONCE(ret != bytes_this_page)) {
err = -EIO;
goto out;
}
@@ -81,7 +84,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
err = submit_bio_wait(bio);
if (err)
goto out;
- bio_reset(bio);
+ bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
num_pages = 0;
}
}
@@ -110,10 +113,14 @@ out:
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
- const unsigned int blockbits = inode->i_blkbits;
- const unsigned int blocksize = 1 << blockbits;
- const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits;
- const unsigned int blocks_per_page = 1 << blocks_per_page_bits;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
+ const unsigned int du_bits = ci->ci_data_unit_bits;
+ const unsigned int du_size = 1U << du_bits;
+ const unsigned int du_per_page_bits = PAGE_SHIFT - du_bits;
+ const unsigned int du_per_page = 1U << du_per_page_bits;
+ u64 du_index = (u64)lblk << (inode->i_blkbits - du_bits);
+ u64 du_remaining = (u64)len << (inode->i_blkbits - du_bits);
+ sector_t sector = pblk << (inode->i_blkbits - SECTOR_SHIFT);
struct page *pages[16]; /* write up to 16 pages at a time */
unsigned int nr_pages;
unsigned int i;
@@ -129,8 +136,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
len);
BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS);
- nr_pages = min_t(unsigned int, ARRAY_SIZE(pages),
- (len + blocks_per_page - 1) >> blocks_per_page_bits);
+ nr_pages = min_t(u64, ARRAY_SIZE(pages),
+ (du_remaining + du_per_page - 1) >> du_per_page_bits);
/*
* We need at least one page for ciphertext. Allocate the first one
@@ -141,49 +148,47 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
*/
for (i = 0; i < nr_pages; i++) {
pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS :
- GFP_NOWAIT | __GFP_NOWARN);
+ GFP_NOWAIT);
if (!pages[i])
break;
}
nr_pages = i;
- if (WARN_ON(nr_pages <= 0))
+ if (WARN_ON_ONCE(nr_pages <= 0))
return -EINVAL;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
- bio = bio_alloc(GFP_NOFS, nr_pages);
+ bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS);
do {
- bio_set_dev(bio, inode->i_sb->s_bdev);
- bio->bi_iter.bi_sector = pblk << (blockbits - 9);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_iter.bi_sector = sector;
i = 0;
offset = 0;
do {
- err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
- ZERO_PAGE(0), pages[i],
- blocksize, offset, GFP_NOFS);
+ err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, du_index,
+ ZERO_PAGE(0), pages[i],
+ du_size, offset);
if (err)
goto out;
- lblk++;
- pblk++;
- len--;
- offset += blocksize;
- if (offset == PAGE_SIZE || len == 0) {
+ du_index++;
+ sector += 1U << (du_bits - SECTOR_SHIFT);
+ du_remaining--;
+ offset += du_size;
+ if (offset == PAGE_SIZE || du_remaining == 0) {
ret = bio_add_page(bio, pages[i++], offset, 0);
- if (WARN_ON(ret != offset)) {
+ if (WARN_ON_ONCE(ret != offset)) {
err = -EIO;
goto out;
}
offset = 0;
}
- } while (i != nr_pages && len != 0);
+ } while (i != nr_pages && du_remaining != 0);
err = submit_bio_wait(bio);
if (err)
goto out;
- bio_reset(bio);
- } while (len != 0);
+ bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
+ } while (du_remaining != 0);
err = 0;
out:
bio_put(bio);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 4ef3f714046a..07f9cbfe3ea4 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -20,12 +20,14 @@
* Special Publication 800-38E and IEEE P1619/D16.
*/
-#include <linux/pagemap.h>
+#include <crypto/skcipher.h>
+#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
+#include <linux/pagemap.h>
#include <linux/ratelimit.h>
-#include <crypto/skcipher.h>
+#include <linux/scatterlist.h>
+
#include "fscrypt_private.h"
static unsigned int num_prealloc_crypto_pages = 32;
@@ -39,7 +41,7 @@ static mempool_t *fscrypt_bounce_page_pool = NULL;
static struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex);
-struct kmem_cache *fscrypt_info_cachep;
+struct kmem_cache *fscrypt_inode_info_cachep;
void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
@@ -49,6 +51,13 @@ EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
{
+ if (WARN_ON_ONCE(!fscrypt_bounce_page_pool)) {
+ /*
+ * Oops, the filesystem called a function that uses the bounce
+ * page pool, but it didn't set needs_bounce_pages.
+ */
+ return NULL;
+ }
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
}
@@ -69,88 +78,87 @@ void fscrypt_free_bounce_page(struct page *bounce_page)
}
EXPORT_SYMBOL(fscrypt_free_bounce_page);
-void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
- const struct fscrypt_info *ci)
+/*
+ * Generate the IV for the given data unit index within the given file.
+ * For filenames encryption, index == 0.
+ *
+ * Keep this in sync with fscrypt_limit_io_blocks(). fscrypt_limit_io_blocks()
+ * needs to know about any IV generation methods where the low bits of IV don't
+ * simply contain the data unit index (e.g., IV_INO_LBLK_32).
+ */
+void fscrypt_generate_iv(union fscrypt_iv *iv, u64 index,
+ const struct fscrypt_inode_info *ci)
{
u8 flags = fscrypt_policy_flags(&ci->ci_policy);
memset(iv, 0, ci->ci_mode->ivsize);
if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
- WARN_ON_ONCE(lblk_num > U32_MAX);
+ WARN_ON_ONCE(index > U32_MAX);
WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX);
- lblk_num |= (u64)ci->ci_inode->i_ino << 32;
+ index |= (u64)ci->ci_inode->i_ino << 32;
} else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
- WARN_ON_ONCE(lblk_num > U32_MAX);
- lblk_num = (u32)(ci->ci_hashed_ino + lblk_num);
+ WARN_ON_ONCE(index > U32_MAX);
+ index = (u32)(ci->ci_hashed_ino + index);
} else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE);
}
- iv->lblk_num = cpu_to_le64(lblk_num);
+ iv->index = cpu_to_le64(index);
}
-/* Encrypt or decrypt a single filesystem block of file contents */
-int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
- u64 lblk_num, struct page *src_page,
- struct page *dest_page, unsigned int len,
- unsigned int offs, gfp_t gfp_flags)
+/* Encrypt or decrypt a single "data unit" of file contents. */
+int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
+ fscrypt_direction_t rw, u64 index,
+ struct page *src_page, struct page *dest_page,
+ unsigned int len, unsigned int offs)
{
+ struct crypto_sync_skcipher *tfm = ci->ci_enc_key.tfm;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
union fscrypt_iv iv;
- struct skcipher_request *req = NULL;
- DECLARE_CRYPTO_WAIT(wait);
struct scatterlist dst, src;
- struct fscrypt_info *ci = inode->i_crypt_info;
- struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
- int res = 0;
+ int err;
if (WARN_ON_ONCE(len <= 0))
return -EINVAL;
- if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0))
+ if (WARN_ON_ONCE(len % FSCRYPT_CONTENTS_ALIGNMENT != 0))
return -EINVAL;
- fscrypt_generate_iv(&iv, lblk_num, ci);
-
- req = skcipher_request_alloc(tfm, gfp_flags);
- if (!req)
- return -ENOMEM;
+ fscrypt_generate_iv(&iv, index, ci);
skcipher_request_set_callback(
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &wait);
-
+ NULL, NULL);
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, len, offs);
sg_init_table(&src, 1);
sg_set_page(&src, src_page, len, offs);
skcipher_request_set_crypt(req, &src, &dst, len, &iv);
if (rw == FS_DECRYPT)
- res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
+ err = crypto_skcipher_decrypt(req);
else
- res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
- skcipher_request_free(req);
- if (res) {
- fscrypt_err(inode, "%scryption failed for block %llu: %d",
- (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
- return res;
- }
- return 0;
+ err = crypto_skcipher_encrypt(req);
+ if (err)
+ fscrypt_err(ci->ci_inode,
+ "%scryption failed for data unit %llu: %d",
+ (rw == FS_DECRYPT ? "De" : "En"), index, err);
+ return err;
}
/**
- * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a
- * pagecache page
- * @page: The locked pagecache page containing the block(s) to encrypt
- * @len: Total size of the block(s) to encrypt. Must be a nonzero
- * multiple of the filesystem's block size.
- * @offs: Byte offset within @page of the first block to encrypt. Must be
- * a multiple of the filesystem's block size.
- * @gfp_flags: Memory allocation flags. See details below.
+ * fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache folio
+ * @folio: the locked pagecache folio containing the data to encrypt
+ * @len: size of the data to encrypt, in bytes
+ * @offs: offset within @page of the data to encrypt, in bytes
+ * @gfp_flags: memory allocation flags; see details below
+ *
+ * This allocates a new bounce page and encrypts the given data into it. The
+ * length and offset of the data must be aligned to the file's crypto data unit
+ * size. Alignment to the filesystem block size fulfills this requirement, as
+ * the filesystem block size is always a multiple of the data unit size.
*
- * A new bounce page is allocated, and the specified block(s) are encrypted into
- * it. In the bounce page, the ciphertext block(s) will be located at the same
- * offsets at which the plaintext block(s) were located in the source page; any
- * other parts of the bounce page will be left uninitialized. However, normally
- * blocksize == PAGE_SIZE and the whole page is encrypted at once.
+ * In the bounce page, the ciphertext data will be located at the same offset at
+ * which the plaintext data was located in the source page. Any other parts of
+ * the bounce page will be left uninitialized.
*
* This is for use by the filesystem's ->writepages() method.
*
@@ -161,42 +169,41 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
*
* Return: the new encrypted bounce page on success; an ERR_PTR() on failure
*/
-struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags)
-
+struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags)
{
- const struct inode *inode = page->mapping->host;
- const unsigned int blockbits = inode->i_blkbits;
- const unsigned int blocksize = 1 << blockbits;
+ const struct inode *inode = folio->mapping->host;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
+ const unsigned int du_bits = ci->ci_data_unit_bits;
+ const unsigned int du_size = 1U << du_bits;
struct page *ciphertext_page;
- u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
- (offs >> blockbits);
+ u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
+ (offs >> du_bits);
unsigned int i;
int err;
- if (WARN_ON_ONCE(!PageLocked(page)))
+ VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
+ if (WARN_ON_ONCE(!folio_test_locked(folio)))
return ERR_PTR(-EINVAL);
- if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
+ if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
return ERR_PTR(-EINVAL);
ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
if (!ciphertext_page)
return ERR_PTR(-ENOMEM);
- for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
- err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
- page, ciphertext_page,
- blocksize, i, gfp_flags);
+ for (i = offs; i < offs + len; i += du_size, index++) {
+ err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, index,
+ &folio->page, ciphertext_page,
+ du_size, i);
if (err) {
fscrypt_free_bounce_page(ciphertext_page);
return ERR_PTR(err);
}
}
SetPagePrivate(ciphertext_page);
- set_page_private(ciphertext_page, (unsigned long)page);
+ set_page_private(ciphertext_page, (unsigned long)folio);
return ciphertext_page;
}
EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
@@ -205,65 +212,69 @@ EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
* fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
* @inode: The inode to which this block belongs
* @page: The page containing the block to encrypt
- * @len: Size of block to encrypt. Doesn't need to be a multiple of the
- * fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE.
+ * @len: Size of block to encrypt. This must be a multiple of
+ * FSCRYPT_CONTENTS_ALIGNMENT.
* @offs: Byte offset within @page at which the block to encrypt begins
* @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
* number of the block within the file
- * @gfp_flags: Memory allocation flags
*
* Encrypt a possibly-compressed filesystem block that is located in an
* arbitrary page, not necessarily in the original pagecache page. The @inode
* and @lblk_num must be specified, as they can't be determined from @page.
*
+ * This is not compatible with fscrypt_operations::supports_subblock_data_units.
+ *
* Return: 0 on success; -errno on failure
*/
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
- u64 lblk_num, gfp_t gfp_flags)
+ u64 lblk_num)
{
- return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
- len, offs, gfp_flags);
+ if (WARN_ON_ONCE(inode->i_sb->s_cop->supports_subblock_data_units))
+ return -EOPNOTSUPP;
+ return fscrypt_crypt_data_unit(fscrypt_get_inode_info_raw(inode),
+ FS_ENCRYPT, lblk_num, page, page, len,
+ offs);
}
EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
/**
- * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a
- * pagecache page
- * @page: The locked pagecache page containing the block(s) to decrypt
- * @len: Total size of the block(s) to decrypt. Must be a nonzero
- * multiple of the filesystem's block size.
- * @offs: Byte offset within @page of the first block to decrypt. Must be
- * a multiple of the filesystem's block size.
+ * fscrypt_decrypt_pagecache_blocks() - Decrypt data from a pagecache folio
+ * @folio: the pagecache folio containing the data to decrypt
+ * @len: size of the data to decrypt, in bytes
+ * @offs: offset within @folio of the data to decrypt, in bytes
*
- * The specified block(s) are decrypted in-place within the pagecache page,
- * which must still be locked and not uptodate. Normally, blocksize ==
- * PAGE_SIZE and the whole page is decrypted at once.
- *
- * This is for use by the filesystem's ->readpages() method.
+ * Decrypt data that has just been read from an encrypted file. The data must
+ * be located in a pagecache folio that is still locked and not yet uptodate.
+ * The length and offset of the data must be aligned to the file's crypto data
+ * unit size. Alignment to the filesystem block size fulfills this requirement,
+ * as the filesystem block size is always a multiple of the data unit size.
*
* Return: 0 on success; -errno on failure
*/
-int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
- unsigned int offs)
+int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
+ size_t offs)
{
- const struct inode *inode = page->mapping->host;
- const unsigned int blockbits = inode->i_blkbits;
- const unsigned int blocksize = 1 << blockbits;
- u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
- (offs >> blockbits);
- unsigned int i;
+ const struct inode *inode = folio->mapping->host;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
+ const unsigned int du_bits = ci->ci_data_unit_bits;
+ const unsigned int du_size = 1U << du_bits;
+ u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
+ (offs >> du_bits);
+ size_t i;
int err;
- if (WARN_ON_ONCE(!PageLocked(page)))
+ if (WARN_ON_ONCE(!folio_test_locked(folio)))
return -EINVAL;
- if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
+ if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
return -EINVAL;
- for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
- err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
- page, blocksize, i, GFP_NOFS);
+ for (i = offs; i < offs + len; i += du_size, index++) {
+ struct page *page = folio_page(folio, i >> PAGE_SHIFT);
+
+ err = fscrypt_crypt_data_unit(ci, FS_DECRYPT, index, page,
+ page, du_size, i & ~PAGE_MASK);
if (err)
return err;
}
@@ -275,8 +286,8 @@ EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
* fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
* @inode: The inode to which this block belongs
* @page: The page containing the block to decrypt
- * @len: Size of block to decrypt. Doesn't need to be a multiple of the
- * fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE.
+ * @len: Size of block to decrypt. This must be a multiple of
+ * FSCRYPT_CONTENTS_ALIGNMENT.
* @offs: Byte offset within @page at which the block to decrypt begins
* @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
* number of the block within the file
@@ -285,32 +296,42 @@ EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
* arbitrary page, not necessarily in the original pagecache page. The @inode
* and @lblk_num must be specified, as they can't be determined from @page.
*
+ * This is not compatible with fscrypt_operations::supports_subblock_data_units.
+ *
* Return: 0 on success; -errno on failure
*/
int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num)
{
- return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
- len, offs, GFP_NOFS);
+ if (WARN_ON_ONCE(inode->i_sb->s_cop->supports_subblock_data_units))
+ return -EOPNOTSUPP;
+ return fscrypt_crypt_data_unit(fscrypt_get_inode_info_raw(inode),
+ FS_DECRYPT, lblk_num, page, page, len,
+ offs);
}
EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
/**
* fscrypt_initialize() - allocate major buffers for fs encryption.
- * @cop_flags: fscrypt operations flags
+ * @sb: the filesystem superblock
*
* We only call this when we start accessing encrypted files, since it
* results in memory getting allocated that wouldn't otherwise be used.
*
* Return: 0 on success; -errno on failure
*/
-int fscrypt_initialize(unsigned int cop_flags)
+int fscrypt_initialize(struct super_block *sb)
{
int err = 0;
+ mempool_t *pool;
+
+ /* pairs with smp_store_release() below */
+ if (likely(smp_load_acquire(&fscrypt_bounce_page_pool)))
+ return 0;
/* No need to allocate a bounce page pool if this FS won't use it. */
- if (cop_flags & FS_CFLG_OWN_PAGES)
+ if (!sb->s_cop->needs_bounce_pages)
return 0;
mutex_lock(&fscrypt_init_mutex);
@@ -318,11 +339,11 @@ int fscrypt_initialize(unsigned int cop_flags)
goto out_unlock;
err = -ENOMEM;
- fscrypt_bounce_page_pool =
- mempool_create_page_pool(num_prealloc_crypto_pages, 0);
- if (!fscrypt_bounce_page_pool)
+ pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0);
+ if (!pool)
goto out_unlock;
-
+ /* pairs with smp_load_acquire() above */
+ smp_store_release(&fscrypt_bounce_page_pool, pool);
err = 0;
out_unlock:
mutex_unlock(&fscrypt_init_mutex);
@@ -376,18 +397,19 @@ static int __init fscrypt_init(void)
if (!fscrypt_read_workqueue)
goto fail;
- fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
- if (!fscrypt_info_cachep)
+ fscrypt_inode_info_cachep = KMEM_CACHE(fscrypt_inode_info,
+ SLAB_RECLAIM_ACCOUNT);
+ if (!fscrypt_inode_info_cachep)
goto fail_free_queue;
err = fscrypt_init_keyring();
if (err)
- goto fail_free_info;
+ goto fail_free_inode_info;
return 0;
-fail_free_info:
- kmem_cache_destroy(fscrypt_info_cachep);
+fail_free_inode_info:
+ kmem_cache_destroy(fscrypt_inode_info_cachep);
fail_free_queue:
destroy_workqueue(fscrypt_read_workqueue);
fail:
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index a9be4bc74a94..a9a4432d12ba 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -11,14 +11,23 @@
* This has not yet undergone a rigorous security audit.
*/
-#include <linux/namei.h>
-#include <linux/scatterlist.h>
-#include <crypto/hash.h>
#include <crypto/sha2.h>
#include <crypto/skcipher.h>
+#include <linux/export.h>
+#include <linux/namei.h>
+#include <linux/scatterlist.h>
+#include <linux/base64.h>
+
#include "fscrypt_private.h"
/*
+ * The minimum message length (input and output length), in bytes, for all
+ * filenames encryption modes. Filenames shorter than this will be zero-padded
+ * before being encrypted.
+ */
+#define FSCRYPT_FNAME_MIN_MSG_LEN 16
+
+/*
* struct fscrypt_nokey_name - identifier for directory entry when key is absent
*
* When userspace lists an encrypted directory without access to the key, the
@@ -63,23 +72,18 @@ struct fscrypt_nokey_name {
/* Encoded size of max-size no-key name */
#define FSCRYPT_NOKEY_NAME_MAX_ENCODED \
- FSCRYPT_BASE64URL_CHARS(FSCRYPT_NOKEY_NAME_MAX)
+ BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX)
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
{
- if (str->len == 1 && str->name[0] == '.')
- return true;
-
- if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
- return true;
-
- return false;
+ return is_dot_dotdot(str->name, str->len);
}
/**
* fscrypt_fname_encrypt() - encrypt a filename
* @inode: inode of the parent directory (for regular filenames)
- * or of the symlink (for symlink targets)
+ * or of the symlink (for symlink targets). Key must already be
+ * set up.
* @iname: the filename to encrypt
* @out: (output) the encrypted filename
* @olen: size of the encrypted filename. It must be at least @iname->len.
@@ -90,46 +94,35 @@ static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
u8 *out, unsigned int olen)
{
- struct skcipher_request *req = NULL;
- DECLARE_CRYPTO_WAIT(wait);
- const struct fscrypt_info *ci = inode->i_crypt_info;
- struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
+ struct crypto_sync_skcipher *tfm = ci->ci_enc_key.tfm;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
union fscrypt_iv iv;
struct scatterlist sg;
- int res;
+ int err;
/*
* Copy the filename to the output buffer for encrypting in-place and
* pad it with the needed number of NUL bytes.
*/
- if (WARN_ON(olen < iname->len))
+ if (WARN_ON_ONCE(olen < iname->len))
return -ENOBUFS;
memcpy(out, iname->name, iname->len);
memset(out + iname->len, 0, olen - iname->len);
- /* Initialize the IV */
fscrypt_generate_iv(&iv, 0, ci);
- /* Set up the encryption request */
- req = skcipher_request_alloc(tfm, GFP_NOFS);
- if (!req)
- return -ENOMEM;
- skcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &wait);
+ skcipher_request_set_callback(
+ req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
sg_init_one(&sg, out, olen);
skcipher_request_set_crypt(req, &sg, &sg, olen, &iv);
-
- /* Do the encryption */
- res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
- skcipher_request_free(req);
- if (res < 0) {
- fscrypt_err(inode, "Filename encryption failed: %d", res);
- return res;
- }
-
- return 0;
+ err = crypto_skcipher_encrypt(req);
+ if (err)
+ fscrypt_err(inode, "Filename encryption failed: %d", err);
+ return err;
}
+EXPORT_SYMBOL_GPL(fscrypt_fname_encrypt);
/**
* fname_decrypt() - decrypt a filename
@@ -145,121 +138,34 @@ static int fname_decrypt(const struct inode *inode,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
{
- struct skcipher_request *req = NULL;
- DECLARE_CRYPTO_WAIT(wait);
- struct scatterlist src_sg, dst_sg;
- const struct fscrypt_info *ci = inode->i_crypt_info;
- struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
+ struct crypto_sync_skcipher *tfm = ci->ci_enc_key.tfm;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
union fscrypt_iv iv;
- int res;
-
- /* Allocate request */
- req = skcipher_request_alloc(tfm, GFP_NOFS);
- if (!req)
- return -ENOMEM;
- skcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &wait);
+ struct scatterlist src_sg, dst_sg;
+ int err;
- /* Initialize IV */
fscrypt_generate_iv(&iv, 0, ci);
- /* Create decryption request */
+ skcipher_request_set_callback(
+ req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
sg_init_one(&src_sg, iname->name, iname->len);
sg_init_one(&dst_sg, oname->name, oname->len);
skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, &iv);
- res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
- skcipher_request_free(req);
- if (res < 0) {
- fscrypt_err(inode, "Filename decryption failed: %d", res);
- return res;
+ err = crypto_skcipher_decrypt(req);
+ if (err) {
+ fscrypt_err(inode, "Filename decryption failed: %d", err);
+ return err;
}
oname->len = strnlen(oname->name, iname->len);
return 0;
}
-static const char base64url_table[65] =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
-
-#define FSCRYPT_BASE64URL_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
-
-/**
- * fscrypt_base64url_encode() - base64url-encode some binary data
- * @src: the binary data to encode
- * @srclen: the length of @src in bytes
- * @dst: (output) the base64url-encoded string. Not NUL-terminated.
- *
- * Encodes data using base64url encoding, i.e. the "Base 64 Encoding with URL
- * and Filename Safe Alphabet" specified by RFC 4648. '='-padding isn't used,
- * as it's unneeded and not required by the RFC. base64url is used instead of
- * base64 to avoid the '/' character, which isn't allowed in filenames.
- *
- * Return: the length of the resulting base64url-encoded string in bytes.
- * This will be equal to FSCRYPT_BASE64URL_CHARS(srclen).
- */
-static int fscrypt_base64url_encode(const u8 *src, int srclen, char *dst)
-{
- u32 ac = 0;
- int bits = 0;
- int i;
- char *cp = dst;
-
- for (i = 0; i < srclen; i++) {
- ac = (ac << 8) | src[i];
- bits += 8;
- do {
- bits -= 6;
- *cp++ = base64url_table[(ac >> bits) & 0x3f];
- } while (bits >= 6);
- }
- if (bits)
- *cp++ = base64url_table[(ac << (6 - bits)) & 0x3f];
- return cp - dst;
-}
-
-/**
- * fscrypt_base64url_decode() - base64url-decode a string
- * @src: the string to decode. Doesn't need to be NUL-terminated.
- * @srclen: the length of @src in bytes
- * @dst: (output) the decoded binary data
- *
- * Decodes a string using base64url encoding, i.e. the "Base 64 Encoding with
- * URL and Filename Safe Alphabet" specified by RFC 4648. '='-padding isn't
- * accepted, nor are non-encoding characters such as whitespace.
- *
- * This implementation hasn't been optimized for performance.
- *
- * Return: the length of the resulting decoded binary data in bytes,
- * or -1 if the string isn't a valid base64url string.
- */
-static int fscrypt_base64url_decode(const char *src, int srclen, u8 *dst)
-{
- u32 ac = 0;
- int bits = 0;
- int i;
- u8 *bp = dst;
-
- for (i = 0; i < srclen; i++) {
- const char *p = strchr(base64url_table, src[i]);
-
- if (p == NULL || src[i] == 0)
- return -1;
- ac = (ac << 6) | (p - base64url_table);
- bits += 6;
- if (bits >= 8) {
- bits -= 8;
- *bp++ = (u8)(ac >> bits);
- }
- }
- if (ac & ((1 << bits) - 1))
- return -1;
- return bp - dst;
-}
-
-bool fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
- u32 orig_len, u32 max_len,
- u32 *encrypted_len_ret)
+bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
+ u32 orig_len, u32 max_len,
+ u32 *encrypted_len_ret)
{
int padding = 4 << (fscrypt_policy_flags(policy) &
FSCRYPT_POLICY_FLAGS_PAD_MASK);
@@ -267,13 +173,37 @@ bool fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
if (orig_len > max_len)
return false;
- encrypted_len = max(orig_len, (u32)FS_CRYPTO_BLOCK_SIZE);
+ encrypted_len = max_t(u32, orig_len, FSCRYPT_FNAME_MIN_MSG_LEN);
encrypted_len = round_up(encrypted_len, padding);
*encrypted_len_ret = min(encrypted_len, max_len);
return true;
}
/**
+ * fscrypt_fname_encrypted_size() - calculate length of encrypted filename
+ * @inode: parent inode of dentry name being encrypted. Key must
+ * already be set up.
+ * @orig_len: length of the original filename
+ * @max_len: maximum length to return
+ * @encrypted_len_ret: where calculated length should be returned (on success)
+ *
+ * Filenames that are shorter than the maximum length may have their lengths
+ * increased slightly by encryption, due to padding that is applied.
+ *
+ * Return: false if the orig_len is greater than max_len. Otherwise, true and
+ * fill out encrypted_len_ret with the length (up to max_len).
+ */
+bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
+ u32 max_len, u32 *encrypted_len_ret)
+{
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
+
+ return __fscrypt_fname_encrypted_size(&ci->ci_policy, orig_len, max_len,
+ encrypted_len_ret);
+}
+EXPORT_SYMBOL_GPL(fscrypt_fname_encrypted_size);
+
+/**
* fscrypt_fname_alloc_buffer() - allocate a buffer for presented filenames
* @max_encrypted_len: maximum length of encrypted filenames the buffer will be
* used to present
@@ -350,7 +280,7 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode,
return 0;
}
- if (iname->len < FS_CRYPTO_BLOCK_SIZE)
+ if (iname->len < FSCRYPT_FNAME_MIN_MSG_LEN)
return -EUCLEAN;
if (fscrypt_has_encryption_key(inode))
@@ -380,8 +310,8 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode,
nokey_name.sha256);
size = FSCRYPT_NOKEY_NAME_MAX;
}
- oname->len = fscrypt_base64url_encode((const u8 *)&nokey_name, size,
- oname->name);
+ oname->len = base64_encode((const u8 *)&nokey_name, size,
+ oname->name, false, BASE64_URLSAFE);
return 0;
}
EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
@@ -428,8 +358,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
return ret;
if (fscrypt_has_encryption_key(dir)) {
- if (!fscrypt_fname_encrypted_size(&dir->i_crypt_info->ci_policy,
- iname->len, NAME_MAX,
+ if (!fscrypt_fname_encrypted_size(dir, iname->len, NAME_MAX,
&fname->crypto_buf.len))
return -ENAMETOOLONG;
fname->crypto_buf.name = kmalloc(fname->crypto_buf.len,
@@ -461,8 +390,8 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
if (fname->crypto_buf.name == NULL)
return -ENOMEM;
- ret = fscrypt_base64url_decode(iname->name, iname->len,
- fname->crypto_buf.name);
+ ret = base64_decode(iname->name, iname->len,
+ fname->crypto_buf.name, false, BASE64_URLSAFE);
if (ret < (int)offsetof(struct fscrypt_nokey_name, bytes[1]) ||
(ret > offsetof(struct fscrypt_nokey_name, sha256) &&
ret != FSCRYPT_NOKEY_NAME_MAX)) {
@@ -537,9 +466,9 @@ EXPORT_SYMBOL_GPL(fscrypt_match_name);
*/
u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name)
{
- const struct fscrypt_info *ci = dir->i_crypt_info;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(dir);
- WARN_ON(!ci->ci_dirhash_key_initialized);
+ WARN_ON_ONCE(!ci->ci_dirhash_key_initialized);
return siphash(name->name, name->len, &ci->ci_dirhash_key);
}
@@ -549,11 +478,10 @@ EXPORT_SYMBOL_GPL(fscrypt_fname_siphash);
* Validate dentries in encrypted directories to make sure we aren't potentially
* caching stale dentries after a key has been added.
*/
-int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
+int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- struct dentry *dir;
int err;
- int valid;
/*
* Plaintext names are always valid, since fscrypt doesn't support
@@ -566,30 +494,21 @@ int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
/*
* No-key name; valid if the directory's key is still unavailable.
*
- * Although fscrypt forbids rename() on no-key names, we still must use
- * dget_parent() here rather than use ->d_parent directly. That's
- * because a corrupted fs image may contain directory hard links, which
- * the VFS handles by moving the directory's dentry tree in the dcache
- * each time ->lookup() finds the directory and it already has a dentry
- * elsewhere. Thus ->d_parent can be changing, and we must safely grab
- * a reference to some ->d_parent to prevent it from being freed.
+ * Note in RCU mode we have to bail if we get here -
+ * fscrypt_get_encryption_info() may block.
*/
if (flags & LOOKUP_RCU)
return -ECHILD;
- dir = dget_parent(dentry);
/*
* Pass allow_unsupported=true, so that files with an unsupported
* encryption policy can be deleted.
*/
- err = fscrypt_get_encryption_info(d_inode(dir), true);
- valid = !fscrypt_has_encryption_key(d_inode(dir));
- dput(dir);
-
+ err = fscrypt_get_encryption_info(dir, true);
if (err < 0)
return err;
- return valid;
+ return !fscrypt_has_encryption_key(dir);
}
EXPORT_SYMBOL_GPL(fscrypt_d_revalidate);
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 5b0a9e6478b5..4e8e82a9ccf9 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -11,9 +11,10 @@
#ifndef _FSCRYPT_PRIVATE_H
#define _FSCRYPT_PRIVATE_H
+#include <crypto/sha2.h>
#include <linux/fscrypt.h>
+#include <linux/minmax.h>
#include <linux/siphash.h>
-#include <crypto/hash.h>
#include <linux/blk-crypto.h>
#define CONST_STRLEN(str) (sizeof(str) - 1)
@@ -27,11 +28,46 @@
*/
#define FSCRYPT_MIN_KEY_SIZE 16
+/* Maximum size of a raw fscrypt master key */
+#define FSCRYPT_MAX_RAW_KEY_SIZE 64
+
+/* Maximum size of a hardware-wrapped fscrypt master key */
+#define FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE
+
+/* Maximum size of an fscrypt master key across both key types */
+#define FSCRYPT_MAX_ANY_KEY_SIZE \
+ MAX(FSCRYPT_MAX_RAW_KEY_SIZE, FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE)
+
+/*
+ * FSCRYPT_MAX_KEY_SIZE is defined in the UAPI header, but the addition of
+ * hardware-wrapped keys has made it misleading as it's only for raw keys.
+ * Don't use it in kernel code; use one of the above constants instead.
+ */
+#undef FSCRYPT_MAX_KEY_SIZE
+
+/*
+ * This mask is passed as the third argument to the crypto_alloc_*() functions
+ * to prevent fscrypt from using the Crypto API drivers for non-inline crypto
+ * engines. Those drivers have been problematic for fscrypt. fscrypt users
+ * have reported hangs and even incorrect en/decryption with these drivers.
+ * Since going to the driver, off CPU, and back again is really slow, such
+ * drivers can be over 50 times slower than the CPU-based code for fscrypt's
+ * workload. Even on platforms that lack AES instructions on the CPU, using the
+ * offloads has been shown to be slower, even staying with AES. (Of course,
+ * Adiantum is faster still, and is the recommended option on such platforms...)
+ *
+ * Note that fscrypt also supports inline crypto engines. Those don't use the
+ * Crypto API and work much better than the old-style (non-inline) engines.
+ */
+#define FSCRYPT_CRYPTOAPI_MASK \
+ (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | \
+ CRYPTO_ALG_KERN_DRIVER_ONLY)
+
#define FSCRYPT_CONTEXT_V1 1
#define FSCRYPT_CONTEXT_V2 2
/* Keep this in sync with include/uapi/linux/fscrypt.h */
-#define FSCRYPT_MODE_MAX FSCRYPT_MODE_ADIANTUM
+#define FSCRYPT_MODE_MAX FSCRYPT_MODE_AES_256_HCTR2
struct fscrypt_context_v1 {
u8 version; /* FSCRYPT_CONTEXT_V1 */
@@ -47,7 +83,8 @@ struct fscrypt_context_v2 {
u8 contents_encryption_mode;
u8 filenames_encryption_mode;
u8 flags;
- u8 __reserved[4];
+ u8 log2_data_unit_size;
+ u8 __reserved[3];
u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
u8 nonce[FSCRYPT_FILE_NONCE_SIZE];
};
@@ -101,7 +138,7 @@ static inline const u8 *fscrypt_context_nonce(const union fscrypt_context *ctx)
case FSCRYPT_CONTEXT_V2:
return ctx->v2.nonce;
}
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return NULL;
}
@@ -165,13 +202,33 @@ fscrypt_policy_flags(const union fscrypt_policy *policy)
BUG();
}
+static inline int
+fscrypt_policy_v2_du_bits(const struct fscrypt_policy_v2 *policy,
+ const struct inode *inode)
+{
+ return policy->log2_data_unit_size ?: inode->i_blkbits;
+}
+
+static inline int
+fscrypt_policy_du_bits(const union fscrypt_policy *policy,
+ const struct inode *inode)
+{
+ switch (policy->version) {
+ case FSCRYPT_POLICY_V1:
+ return inode->i_blkbits;
+ case FSCRYPT_POLICY_V2:
+ return fscrypt_policy_v2_du_bits(&policy->v2, inode);
+ }
+ BUG();
+}
+
/*
* For encrypted symlinks, the ciphertext length is stored at the beginning
* of the string in little-endian format.
*/
struct fscrypt_symlink_data {
__le16 len;
- char encrypted_path[1];
+ char encrypted_path[];
} __packed;
/**
@@ -182,35 +239,51 @@ struct fscrypt_symlink_data {
* Normally only one of the fields will be non-NULL.
*/
struct fscrypt_prepared_key {
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
- struct fscrypt_blk_crypto_key *blk_key;
+ struct blk_crypto_key *blk_key;
#endif
};
/*
- * fscrypt_info - the "encryption key" for an inode
+ * fscrypt_inode_info - the "encryption key" for an inode
*
* When an encrypted file's key is made available, an instance of this struct is
- * allocated and stored in ->i_crypt_info. Once created, it remains until the
- * inode is evicted.
+ * allocated and a pointer to it is stored in the file's in-memory inode. Once
+ * created, it remains until the inode is evicted.
*/
-struct fscrypt_info {
+struct fscrypt_inode_info {
/* The key in a form prepared for actual encryption/decryption */
struct fscrypt_prepared_key ci_enc_key;
- /* True if ci_enc_key should be freed when this fscrypt_info is freed */
- bool ci_owns_key;
+ /* True if ci_enc_key should be freed when this struct is freed */
+ u8 ci_owns_key : 1;
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
/*
* True if this inode will use inline encryption (blk-crypto) instead of
* the traditional filesystem-layer encryption.
*/
- bool ci_inlinecrypt;
+ u8 ci_inlinecrypt : 1;
#endif
+ /* True if ci_dirhash_key is initialized */
+ u8 ci_dirhash_key_initialized : 1;
+
+ /*
+ * log2 of the data unit size (granularity of contents encryption) of
+ * this file. This is computable from ci_policy and ci_inode but is
+ * cached here for efficiency. Only used for regular files.
+ */
+ u8 ci_data_unit_bits;
+
+ /* Cached value: log2 of number of data units per FS block */
+ u8 ci_data_units_per_block_bits;
+
+ /* Hashed inode number. Only set for IV_INO_LBLK_32 */
+ u32 ci_hashed_ino;
+
/*
* Encryption mode used for this inode. It corresponds to either the
* contents or filenames encryption mode, depending on the inode type.
@@ -225,7 +298,7 @@ struct fscrypt_info {
* will be NULL if the master key was found in a process-subscribed
* keyring rather than in the filesystem-level keyring.
*/
- struct key *ci_master_key;
+ struct fscrypt_master_key *ci_master_key;
/*
* Link in list of inodes that were unlocked with the master key.
@@ -245,16 +318,12 @@ struct fscrypt_info {
* the plaintext filenames -- currently just casefolded directories.
*/
siphash_key_t ci_dirhash_key;
- bool ci_dirhash_key_initialized;
/* The encryption policy used by this inode */
union fscrypt_policy ci_policy;
/* This inode's nonce, copied from the fscrypt_context */
u8 ci_nonce[FSCRYPT_FILE_NONCE_SIZE];
-
- /* Hashed inode number. Only set for IV_INO_LBLK_32 */
- u32 ci_hashed_ino;
};
typedef enum {
@@ -263,12 +332,12 @@ typedef enum {
} fscrypt_direction_t;
/* crypto.c */
-extern struct kmem_cache *fscrypt_info_cachep;
-int fscrypt_initialize(unsigned int cop_flags);
-int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
- u64 lblk_num, struct page *src_page,
- struct page *dest_page, unsigned int len,
- unsigned int offs, gfp_t gfp_flags);
+extern struct kmem_cache *fscrypt_inode_info_cachep;
+int fscrypt_initialize(struct super_block *sb);
+int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
+ fscrypt_direction_t rw, u64 index,
+ struct page *src_page, struct page *dest_page,
+ unsigned int len, unsigned int offs);
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
void __printf(3, 4) __cold
@@ -283,8 +352,8 @@ fscrypt_msg(const struct inode *inode, const char *level, const char *fmt, ...);
union fscrypt_iv {
struct {
- /* logical block number within the file */
- __le64 lblk_num;
+ /* zero-based index of data unit within the file */
+ __le64 index;
/* per-file nonce; only set in DIRECT_KEY mode */
u8 nonce[FSCRYPT_FILE_NONCE_SIZE];
@@ -293,24 +362,27 @@ union fscrypt_iv {
__le64 dun[FSCRYPT_MAX_IV_SIZE / sizeof(__le64)];
};
-void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
- const struct fscrypt_info *ci);
+void fscrypt_generate_iv(union fscrypt_iv *iv, u64 index,
+ const struct fscrypt_inode_info *ci);
+
+/*
+ * Return the number of bits used by the maximum file data unit index that is
+ * possible on the given filesystem, using the given log2 data unit size.
+ */
+static inline int
+fscrypt_max_file_dun_bits(const struct super_block *sb, int du_bits)
+{
+ return fls64(sb->s_maxbytes - 1) - du_bits;
+}
/* fname.c */
-int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
- u8 *out, unsigned int olen);
-bool fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
- u32 orig_len, u32 max_len,
- u32 *encrypted_len_ret);
+bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
+ u32 orig_len, u32 max_len,
+ u32 *encrypted_len_ret);
/* hkdf.c */
-
-struct fscrypt_hkdf {
- struct crypto_shash *hmac_tfm;
-};
-
-int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
- unsigned int master_key_size);
+void fscrypt_init_hkdf(struct hmac_sha512_key *hkdf, const u8 *master_key,
+ unsigned int master_key_size);
/*
* The list of contexts in which fscrypt uses HKDF. These values are used as
@@ -319,35 +391,42 @@ int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
* outputs are unique and cryptographically isolated, i.e. knowledge of one
* output doesn't reveal another.
*/
-#define HKDF_CONTEXT_KEY_IDENTIFIER 1 /* info=<empty> */
+#define HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY 1 /* info=<empty> */
#define HKDF_CONTEXT_PER_FILE_ENC_KEY 2 /* info=file_nonce */
#define HKDF_CONTEXT_DIRECT_KEY 3 /* info=mode_num */
#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 /* info=mode_num||fs_uuid */
#define HKDF_CONTEXT_DIRHASH_KEY 5 /* info=file_nonce */
#define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6 /* info=mode_num||fs_uuid */
#define HKDF_CONTEXT_INODE_HASH_KEY 7 /* info=<empty> */
+#define HKDF_CONTEXT_KEY_IDENTIFIER_FOR_HW_WRAPPED_KEY \
+ 8 /* info=<empty> */
-int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context,
- const u8 *info, unsigned int infolen,
- u8 *okm, unsigned int okmlen);
-
-void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf);
+void fscrypt_hkdf_expand(const struct hmac_sha512_key *hkdf, u8 context,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen);
/* inline_crypt.c */
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
-int fscrypt_select_encryption_impl(struct fscrypt_info *ci);
+int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci,
+ bool is_hw_wrapped_key);
static inline bool
-fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
+fscrypt_using_inline_encryption(const struct fscrypt_inode_info *ci)
{
return ci->ci_inlinecrypt;
}
int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key,
- const struct fscrypt_info *ci);
+ const u8 *key_bytes, size_t key_size,
+ bool is_hw_wrapped,
+ const struct fscrypt_inode_info *ci);
+
+void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
+ struct fscrypt_prepared_key *prep_key);
-void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key);
+int fscrypt_derive_sw_secret(struct super_block *sb,
+ const u8 *wrapped_key, size_t wrapped_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
/*
* Check whether the crypto transform or blk-crypto key has been allocated in
@@ -355,7 +434,7 @@ void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key);
*/
static inline bool
fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
/*
* The two smp_load_acquire()'s here pair with the smp_store_release()'s
@@ -372,34 +451,46 @@ fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
-static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
+static inline int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci,
+ bool is_hw_wrapped_key)
{
return 0;
}
static inline bool
-fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
+fscrypt_using_inline_encryption(const struct fscrypt_inode_info *ci)
{
return false;
}
static inline int
fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key,
- const struct fscrypt_info *ci)
+ const u8 *key_bytes, size_t key_size,
+ bool is_hw_wrapped,
+ const struct fscrypt_inode_info *ci)
{
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return -EOPNOTSUPP;
}
static inline void
-fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key)
+fscrypt_destroy_inline_crypt_key(struct super_block *sb,
+ struct fscrypt_prepared_key *prep_key)
{
}
+static inline int
+fscrypt_derive_sw_secret(struct super_block *sb,
+ const u8 *wrapped_key, size_t wrapped_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ fscrypt_warn(NULL, "kernel doesn't support hardware-wrapped keys");
+ return -EOPNOTSUPP;
+}
+
static inline bool
fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
return smp_load_acquire(&prep_key->tfm) != NULL;
}
@@ -413,20 +504,38 @@ fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
struct fscrypt_master_key_secret {
/*
- * For v2 policy keys: HKDF context keyed by this master key.
- * For v1 policy keys: not set (hkdf.hmac_tfm == NULL).
+ * The KDF with which subkeys of this key can be derived.
+ *
+ * For v1 policy keys, this isn't applicable and won't be set.
+ * Otherwise, this KDF will be keyed by this master key if
+ * ->is_hw_wrapped=false, or by the "software secret" that hardware
+ * derived from this master key if ->is_hw_wrapped=true.
+ */
+ struct hmac_sha512_key hkdf;
+
+ /*
+ * True if this key is a hardware-wrapped key; false if this key is a
+ * raw key (i.e. a "software key"). For v1 policy keys this will always
+ * be false, as v1 policy support is a legacy feature which doesn't
+ * support newer functionality such as hardware-wrapped keys.
*/
- struct fscrypt_hkdf hkdf;
+ bool is_hw_wrapped;
/*
- * Size of the raw key in bytes. This remains set even if ->raw was
+ * Size of the key in bytes. This remains set even if ->bytes was
* zeroized due to no longer being needed. I.e. we still remember the
* size of the key even if we don't need to remember the key itself.
*/
u32 size;
- /* For v1 policy keys: the raw key. Wiped for v2 policy keys. */
- u8 raw[FSCRYPT_MAX_KEY_SIZE];
+ /*
+ * The bytes of the key, when still needed. This can be either a raw
+ * key or a hardware-wrapped key, as indicated by ->is_hw_wrapped. In
+ * the case of a raw, v2 policy key, there is no need to remember the
+ * actual key separately from ->hkdf so this field will be zeroized as
+ * soon as ->hkdf is initialized.
+ */
+ u8 bytes[FSCRYPT_MAX_ANY_KEY_SIZE];
} __randomize_layout;
@@ -434,20 +543,64 @@ struct fscrypt_master_key_secret {
* fscrypt_master_key - an in-use master key
*
* This represents a master encryption key which has been added to the
- * filesystem and can be used to "unlock" the encrypted files which were
- * encrypted with it.
+ * filesystem. There are three high-level states that a key can be in:
+ *
+ * FSCRYPT_KEY_STATUS_PRESENT
+ * Key is fully usable; it can be used to unlock inodes that are encrypted
+ * with it (this includes being able to create new inodes). ->mk_present
+ * indicates whether the key is in this state. ->mk_secret exists, the key
+ * is in the keyring, and ->mk_active_refs > 0 due to ->mk_present.
+ *
+ * FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED
+ * Removal of this key has been initiated, but some inodes that were
+ * unlocked with it are still in-use. Like ABSENT, ->mk_secret is wiped,
+ * and the key can no longer be used to unlock inodes. Unlike ABSENT, the
+ * key is still in the keyring; ->mk_decrypted_inodes is nonempty; and
+ * ->mk_active_refs > 0, being equal to the size of ->mk_decrypted_inodes.
+ *
+ * This state transitions to ABSENT if ->mk_decrypted_inodes becomes empty,
+ * or to PRESENT if FS_IOC_ADD_ENCRYPTION_KEY is called again for this key.
+ *
+ * FSCRYPT_KEY_STATUS_ABSENT
+ * Key is fully removed. The key is no longer in the keyring,
+ * ->mk_decrypted_inodes is empty, ->mk_active_refs == 0, ->mk_secret is
+ * wiped, and the key can no longer be used to unlock inodes.
*/
struct fscrypt_master_key {
/*
- * The secret key material. After FS_IOC_REMOVE_ENCRYPTION_KEY is
- * executed, this is wiped and no new inodes can be unlocked with this
- * key; however, there may still be inodes in ->mk_decrypted_inodes
- * which could not be evicted. As long as some inodes still remain,
- * FS_IOC_REMOVE_ENCRYPTION_KEY can be retried, or
- * FS_IOC_ADD_ENCRYPTION_KEY can add the secret again.
+ * Link in ->s_master_keys->key_hashtable.
+ * Only valid if ->mk_active_refs > 0.
+ */
+ struct hlist_node mk_node;
+
+ /* Semaphore that protects ->mk_secret, ->mk_users, and ->mk_present */
+ struct rw_semaphore mk_sem;
+
+ /*
+ * Active and structural reference counts. An active ref guarantees
+ * that the struct continues to exist, continues to be in the keyring
+ * ->s_master_keys, and that any embedded subkeys (e.g.
+ * ->mk_direct_keys) that have been prepared continue to exist.
+ * A structural ref only guarantees that the struct continues to exist.
+ *
+ * There is one active ref associated with ->mk_present being true, and
+ * one active ref for each inode in ->mk_decrypted_inodes.
*
- * Locking: protected by this master key's key->sem.
+ * There is one structural ref associated with the active refcount being
+ * nonzero. Finding a key in the keyring also takes a structural ref,
+ * which is then held temporarily while the key is operated on.
+ */
+ refcount_t mk_active_refs;
+ refcount_t mk_struct_refs;
+
+ struct rcu_head mk_rcu_head;
+
+ /*
+ * The secret key material. Wiped as soon as it is no longer needed;
+ * for details, see the fscrypt_master_key struct comment.
+ *
+ * Locking: protected by ->mk_sem.
*/
struct fscrypt_master_key_secret mk_secret;
@@ -468,23 +621,13 @@ struct fscrypt_master_key {
*
* This is NULL for v1 policy keys; those can only be added by root.
*
- * Locking: in addition to this keyring's own semaphore, this is
- * protected by this master key's key->sem, so we can do atomic
- * search+insert. It can also be searched without taking any locks, but
- * in that case the returned key may have already been removed.
+ * Locking: protected by ->mk_sem. (We don't just rely on the keyrings
+ * subsystem semaphore ->mk_users->sem, as we need support for atomic
+ * search+insert along with proper synchronization with other fields.)
*/
struct key *mk_users;
/*
- * Length of ->mk_decrypted_inodes, plus one if mk_secret is present.
- * Once this goes to 0, the master key is removed from ->s_master_keys.
- * The 'struct fscrypt_master_key' will continue to live as long as the
- * 'struct key' whose payload it is, but we won't let this reference
- * count rise again.
- */
- refcount_t mk_refcount;
-
- /*
* List of inodes that were unlocked using this key. This allows the
* inodes to be evicted efficiently if the key is removed.
*/
@@ -503,20 +646,17 @@ struct fscrypt_master_key {
siphash_key_t mk_ino_hash_key;
bool mk_ino_hash_key_initialized;
-} __randomize_layout;
-
-static inline bool
-is_master_key_secret_present(const struct fscrypt_master_key_secret *secret)
-{
/*
- * The READ_ONCE() is only necessary for fscrypt_drop_inode() and
- * fscrypt_key_describe(). These run in atomic context, so they can't
- * take the key semaphore and thus 'secret' can change concurrently
- * which would be a data race. But they only need to know whether the
- * secret *was* present at the time of check, so READ_ONCE() suffices.
+ * Whether this key is in the "present" state, i.e. fully usable. For
+ * details, see the fscrypt_master_key struct comment.
+ *
+ * Locking: protected by ->mk_sem, but can be read locklessly using
+ * READ_ONCE(). Writers must use WRITE_ONCE() when concurrent readers
+ * are possible.
*/
- return READ_ONCE(secret->size) != 0;
-}
+ bool mk_present;
+
+} __randomize_layout;
static inline const char *master_key_spec_type(
const struct fscrypt_key_specifier *spec)
@@ -541,10 +681,18 @@ static inline int master_key_spec_len(const struct fscrypt_key_specifier *spec)
return 0;
}
-struct key *
+void fscrypt_put_master_key(struct fscrypt_master_key *mk);
+
+void fscrypt_put_master_key_activeref(struct super_block *sb,
+ struct fscrypt_master_key *mk);
+
+struct fscrypt_master_key *
fscrypt_find_master_key(struct super_block *sb,
const struct fscrypt_key_specifier *mk_spec);
+void fscrypt_get_test_dummy_key_identifier(
+ u8 key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]);
+
int fscrypt_add_test_dummy_key(struct super_block *sb,
struct fscrypt_key_specifier *key_spec);
@@ -561,23 +709,27 @@ struct fscrypt_mode {
int keysize; /* key size in bytes */
int security_strength; /* security strength in bytes */
int ivsize; /* IV size in bytes */
- int logged_impl_name;
+ int logged_cryptoapi_impl;
+ int logged_blk_crypto_native;
+ int logged_blk_crypto_fallback;
enum blk_crypto_mode_num blk_crypto_mode;
};
extern struct fscrypt_mode fscrypt_modes[];
int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key, const struct fscrypt_info *ci);
+ const u8 *raw_key, const struct fscrypt_inode_info *ci);
-void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key);
+void fscrypt_destroy_prepared_key(struct super_block *sb,
+ struct fscrypt_prepared_key *prep_key);
-int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key);
+int fscrypt_set_per_file_enc_key(struct fscrypt_inode_info *ci,
+ const u8 *raw_key);
-int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
- const struct fscrypt_master_key *mk);
+void fscrypt_derive_dirhash_key(struct fscrypt_inode_info *ci,
+ const struct fscrypt_master_key *mk);
-void fscrypt_hash_inode_number(struct fscrypt_info *ci,
+void fscrypt_hash_inode_number(struct fscrypt_inode_info *ci,
const struct fscrypt_master_key *mk);
int fscrypt_get_encryption_info(struct inode *inode, bool allow_unsupported);
@@ -612,15 +764,19 @@ static inline int fscrypt_require_key(struct inode *inode)
void fscrypt_put_direct_key(struct fscrypt_direct_key *dk);
-int fscrypt_setup_v1_file_key(struct fscrypt_info *ci,
+int fscrypt_setup_v1_file_key(struct fscrypt_inode_info *ci,
const u8 *raw_master_key);
-int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci);
+int fscrypt_setup_v1_file_key_via_subscribed_keyrings(
+ struct fscrypt_inode_info *ci);
/* policy.c */
bool fscrypt_policies_equal(const union fscrypt_policy *policy1,
const union fscrypt_policy *policy2);
+int fscrypt_policy_to_key_spec(const union fscrypt_policy *policy,
+ struct fscrypt_key_specifier *key_spec);
+const union fscrypt_policy *fscrypt_get_dummy_policy(struct super_block *sb);
bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
const struct inode *inode);
int fscrypt_policy_from_context(union fscrypt_policy *policy_u,
diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c
index 7607d18b35fc..706f56d0076e 100644
--- a/fs/crypto/hkdf.c
+++ b/fs/crypto/hkdf.c
@@ -4,14 +4,13 @@
* Function"), aka RFC 5869. See also the original paper (Krawczyk 2010):
* "Cryptographic Extraction and Key Derivation: The HKDF Scheme".
*
- * This is used to derive keys from the fscrypt master keys.
+ * This is used to derive keys from the fscrypt master keys (or from the
+ * "software secrets" which hardware derives from the fscrypt master keys, in
+ * the case that the fscrypt master keys are hardware-wrapped keys).
*
* Copyright 2019 Google LLC
*/
-#include <crypto/hash.h>
-#include <crypto/sha2.h>
-
#include "fscrypt_private.h"
/*
@@ -25,7 +24,6 @@
* HKDF-SHA512 being much faster than HKDF-SHA256, as the longer digest size of
* SHA-512 causes HKDF-Expand to only need to do one iteration rather than two.
*/
-#define HKDF_HMAC_ALG "hmac(sha512)"
#define HKDF_HASHLEN SHA512_DIGEST_SIZE
/*
@@ -44,67 +42,25 @@
* there's no way to persist a random salt per master key from kernel mode.
*/
-/* HKDF-Extract (RFC 5869 section 2.2), unsalted */
-static int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
- unsigned int ikmlen, u8 prk[HKDF_HASHLEN])
-{
- static const u8 default_salt[HKDF_HASHLEN];
- int err;
-
- err = crypto_shash_setkey(hmac_tfm, default_salt, HKDF_HASHLEN);
- if (err)
- return err;
-
- return crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk);
-}
-
/*
- * Compute HKDF-Extract using the given master key as the input keying material,
- * and prepare an HMAC transform object keyed by the resulting pseudorandom key.
- *
- * Afterwards, the keyed HMAC transform object can be used for HKDF-Expand many
- * times without having to recompute HKDF-Extract each time.
+ * Compute HKDF-Extract using 'master_key' as the input keying material, and
+ * prepare the resulting HMAC key in 'hkdf'. Afterwards, 'hkdf' can be used for
+ * HKDF-Expand many times without having to recompute HKDF-Extract each time.
*/
-int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
- unsigned int master_key_size)
+void fscrypt_init_hkdf(struct hmac_sha512_key *hkdf, const u8 *master_key,
+ unsigned int master_key_size)
{
- struct crypto_shash *hmac_tfm;
+ static const u8 default_salt[HKDF_HASHLEN];
u8 prk[HKDF_HASHLEN];
- int err;
-
- hmac_tfm = crypto_alloc_shash(HKDF_HMAC_ALG, 0, 0);
- if (IS_ERR(hmac_tfm)) {
- fscrypt_err(NULL, "Error allocating " HKDF_HMAC_ALG ": %ld",
- PTR_ERR(hmac_tfm));
- return PTR_ERR(hmac_tfm);
- }
-
- if (WARN_ON(crypto_shash_digestsize(hmac_tfm) != sizeof(prk))) {
- err = -EINVAL;
- goto err_free_tfm;
- }
-
- err = hkdf_extract(hmac_tfm, master_key, master_key_size, prk);
- if (err)
- goto err_free_tfm;
- err = crypto_shash_setkey(hmac_tfm, prk, sizeof(prk));
- if (err)
- goto err_free_tfm;
-
- hkdf->hmac_tfm = hmac_tfm;
- goto out;
-
-err_free_tfm:
- crypto_free_shash(hmac_tfm);
-out:
+ hmac_sha512_usingrawkey(default_salt, sizeof(default_salt),
+ master_key, master_key_size, prk);
+ hmac_sha512_preparekey(hkdf, prk, sizeof(prk));
memzero_explicit(prk, sizeof(prk));
- return err;
}
/*
- * HKDF-Expand (RFC 5869 section 2.3). This expands the pseudorandom key, which
- * was already keyed into 'hkdf->hmac_tfm' by fscrypt_init_hkdf(), into 'okmlen'
+ * HKDF-Expand (RFC 5869 section 2.3). Expand the HMAC key 'hkdf' into 'okmlen'
* bytes of output keying material parameterized by the application-specific
* 'info' of length 'infolen' bytes, prefixed by "fscrypt\0" and the 'context'
* byte. This is thread-safe and may be called by multiple threads in parallel.
@@ -113,70 +69,32 @@ out:
* adds to its application-specific info strings to guarantee that it doesn't
* accidentally repeat an info string when using HKDF for different purposes.)
*/
-int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context,
- const u8 *info, unsigned int infolen,
- u8 *okm, unsigned int okmlen)
+void fscrypt_hkdf_expand(const struct hmac_sha512_key *hkdf, u8 context,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen)
{
- SHASH_DESC_ON_STACK(desc, hkdf->hmac_tfm);
- u8 prefix[9];
- unsigned int i;
- int err;
- const u8 *prev = NULL;
+ struct hmac_sha512_ctx ctx;
u8 counter = 1;
u8 tmp[HKDF_HASHLEN];
- if (WARN_ON(okmlen > 255 * HKDF_HASHLEN))
- return -EINVAL;
-
- desc->tfm = hkdf->hmac_tfm;
-
- memcpy(prefix, "fscrypt\0", 8);
- prefix[8] = context;
-
- for (i = 0; i < okmlen; i += HKDF_HASHLEN) {
-
- err = crypto_shash_init(desc);
- if (err)
- goto out;
-
- if (prev) {
- err = crypto_shash_update(desc, prev, HKDF_HASHLEN);
- if (err)
- goto out;
- }
-
- err = crypto_shash_update(desc, prefix, sizeof(prefix));
- if (err)
- goto out;
-
- err = crypto_shash_update(desc, info, infolen);
- if (err)
- goto out;
-
- BUILD_BUG_ON(sizeof(counter) != 1);
+ WARN_ON_ONCE(okmlen > 255 * HKDF_HASHLEN);
+
+ for (unsigned int i = 0; i < okmlen; i += HKDF_HASHLEN) {
+ hmac_sha512_init(&ctx, hkdf);
+ if (i != 0)
+ hmac_sha512_update(&ctx, &okm[i - HKDF_HASHLEN],
+ HKDF_HASHLEN);
+ hmac_sha512_update(&ctx, "fscrypt\0", 8);
+ hmac_sha512_update(&ctx, &context, 1);
+ hmac_sha512_update(&ctx, info, infolen);
+ hmac_sha512_update(&ctx, &counter, 1);
if (okmlen - i < HKDF_HASHLEN) {
- err = crypto_shash_finup(desc, &counter, 1, tmp);
- if (err)
- goto out;
+ hmac_sha512_final(&ctx, tmp);
memcpy(&okm[i], tmp, okmlen - i);
memzero_explicit(tmp, sizeof(tmp));
} else {
- err = crypto_shash_finup(desc, &counter, 1, &okm[i]);
- if (err)
- goto out;
+ hmac_sha512_final(&ctx, &okm[i]);
}
counter++;
- prev = &okm[i];
}
- err = 0;
-out:
- if (unlikely(err))
- memzero_explicit(okm, okmlen); /* so caller doesn't need to */
- shash_desc_zero(desc);
- return err;
-}
-
-void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf)
-{
- crypto_free_shash(hkdf->hmac_tfm);
}
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index af74599ae1cf..b97de0d1430f 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -5,7 +5,7 @@
* Encryption hooks for higher-level filesystem operations.
*/
-#include <linux/key.h>
+#include <linux/export.h>
#include "fscrypt_private.h"
@@ -32,21 +32,41 @@
int fscrypt_file_open(struct inode *inode, struct file *filp)
{
int err;
- struct dentry *dir;
+ struct dentry *dentry, *dentry_parent;
+ struct inode *inode_parent;
err = fscrypt_require_key(inode);
if (err)
return err;
- dir = dget_parent(file_dentry(filp));
- if (IS_ENCRYPTED(d_inode(dir)) &&
- !fscrypt_has_permitted_context(d_inode(dir), inode)) {
+ dentry = file_dentry(filp);
+
+ /*
+ * Getting a reference to the parent dentry is needed for the actual
+ * encryption policy comparison, but it's expensive on multi-core
+ * systems. Since this function runs on unencrypted files too, start
+ * with a lightweight RCU-mode check for the parent directory being
+ * unencrypted (in which case it's fine for the child to be either
+ * unencrypted, or encrypted with any policy). Only continue on to the
+ * full policy check if the parent directory is actually encrypted.
+ */
+ rcu_read_lock();
+ dentry_parent = READ_ONCE(dentry->d_parent);
+ inode_parent = d_inode_rcu(dentry_parent);
+ if (inode_parent != NULL && !IS_ENCRYPTED(inode_parent)) {
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+
+ dentry_parent = dget_parent(dentry);
+ if (!fscrypt_has_permitted_context(d_inode(dentry_parent), inode)) {
fscrypt_warn(inode,
"Inconsistent encryption context (parent directory: %lu)",
- d_inode(dir)->i_ino);
+ d_inode(dentry_parent)->i_ino);
err = -EPERM;
}
- dput(dir);
+ dput(dentry_parent);
return err;
}
EXPORT_SYMBOL_GPL(fscrypt_file_open);
@@ -104,15 +124,40 @@ int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
if (err && err != -ENOENT)
return err;
- if (fname->is_nokey_name) {
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_NOKEY_NAME;
- spin_unlock(&dentry->d_lock);
- }
+ fscrypt_prepare_dentry(dentry, fname->is_nokey_name);
+
return err;
}
EXPORT_SYMBOL_GPL(__fscrypt_prepare_lookup);
+/**
+ * fscrypt_prepare_lookup_partial() - prepare lookup without filename setup
+ * @dir: the encrypted directory being searched
+ * @dentry: the dentry being looked up in @dir
+ *
+ * This function should be used by the ->lookup and ->atomic_open methods of
+ * filesystems that handle filename encryption and no-key name encoding
+ * themselves and thus can't use fscrypt_prepare_lookup(). Like
+ * fscrypt_prepare_lookup(), this will try to set up the directory's encryption
+ * key and will set DCACHE_NOKEY_NAME on the dentry if the key is unavailable.
+ * However, this function doesn't set up a struct fscrypt_name for the filename.
+ *
+ * Return: 0 on success; -errno on error. Note that the encryption key being
+ * unavailable is not considered an error. It is also not an error if
+ * the encryption policy is unsupported by this kernel; that is treated
+ * like the key being unavailable, so that files can still be deleted.
+ */
+int fscrypt_prepare_lookup_partial(struct inode *dir, struct dentry *dentry)
+{
+ int err = fscrypt_get_encryption_info(dir, true);
+ bool is_nokey_name = (!err && !fscrypt_has_encryption_key(dir));
+
+ fscrypt_prepare_dentry(dentry, is_nokey_name);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(fscrypt_prepare_lookup_partial);
+
int __fscrypt_prepare_readdir(struct inode *dir)
{
return fscrypt_get_encryption_info(dir, true);
@@ -141,8 +186,7 @@ EXPORT_SYMBOL_GPL(__fscrypt_prepare_setattr);
int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags, unsigned int flags)
{
- struct fscrypt_info *ci;
- struct key *key;
+ struct fscrypt_inode_info *ci;
struct fscrypt_master_key *mk;
int err;
@@ -155,17 +199,16 @@ int fscrypt_prepare_setflags(struct inode *inode,
err = fscrypt_require_key(inode);
if (err)
return err;
- ci = inode->i_crypt_info;
+ ci = fscrypt_get_inode_info_raw(inode);
if (ci->ci_policy.version != FSCRYPT_POLICY_V2)
return -EINVAL;
- key = ci->ci_master_key;
- mk = key->payload.data[0];
- down_read(&key->sem);
- if (is_master_key_secret_present(&mk->mk_secret))
- err = fscrypt_derive_dirhash_key(ci, mk);
+ mk = ci->ci_master_key;
+ down_read(&mk->mk_sem);
+ if (mk->mk_present)
+ fscrypt_derive_dirhash_key(ci, mk);
else
err = -ENOKEY;
- up_read(&key->sem);
+ up_read(&mk->mk_sem);
return err;
}
return 0;
@@ -228,11 +271,11 @@ int fscrypt_prepare_symlink(struct inode *dir, const char *target,
* counting it (even though it is meaningless for ciphertext) is simpler
* for now since filesystems will assume it is there and subtract it.
*/
- if (!fscrypt_fname_encrypted_size(policy, len,
- max_len - sizeof(struct fscrypt_symlink_data),
- &disk_link->len))
+ if (!__fscrypt_fname_encrypted_size(policy, len,
+ max_len - sizeof(struct fscrypt_symlink_data) - 1,
+ &disk_link->len))
return -ENAMETOOLONG;
- disk_link->len += sizeof(struct fscrypt_symlink_data);
+ disk_link->len += sizeof(struct fscrypt_symlink_data) + 1;
disk_link->name = NULL;
return 0;
@@ -263,7 +306,7 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
if (!sd)
return -ENOMEM;
}
- ciphertext_len = disk_link->len - sizeof(*sd);
+ ciphertext_len = disk_link->len - sizeof(*sd) - 1;
sd->len = cpu_to_le16(ciphertext_len);
err = fscrypt_fname_encrypt(inode, &iname, sd->encrypted_path,
@@ -319,7 +362,7 @@ const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
int err;
/* This is for encrypted symlinks only */
- if (WARN_ON(!IS_ENCRYPTED(inode)))
+ if (WARN_ON_ONCE(!IS_ENCRYPTED(inode)))
return ERR_PTR(-EINVAL);
/* If the decrypted target is already cached, just return it. */
@@ -341,7 +384,7 @@ const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
* the ciphertext length, even though this is redundant with i_size.
*/
- if (max_size < sizeof(*sd))
+ if (max_size < sizeof(*sd) + 1)
return ERR_PTR(-EUCLEAN);
sd = caddr;
cstr.name = (unsigned char *)sd->encrypted_path;
@@ -350,7 +393,7 @@ const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
if (cstr.len == 0)
return ERR_PTR(-EUCLEAN);
- if (cstr.len + sizeof(*sd) - 1 > max_size)
+ if (cstr.len + sizeof(*sd) > max_size)
return ERR_PTR(-EUCLEAN);
err = fscrypt_fname_alloc_buffer(cstr.len, &pstr);
diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
index c57bebfa48fe..ed6e926226b5 100644
--- a/fs/crypto/inline_crypt.c
+++ b/fs/crypto/inline_crypt.c
@@ -15,38 +15,36 @@
#include <linux/blk-crypto.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
+#include <linux/export.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
+#include <linux/uio.h>
#include "fscrypt_private.h"
-struct fscrypt_blk_crypto_key {
- struct blk_crypto_key base;
- int num_devs;
- struct request_queue *devs[];
-};
-
-static int fscrypt_get_num_devices(struct super_block *sb)
+static struct block_device **fscrypt_get_devices(struct super_block *sb,
+ unsigned int *num_devs)
{
- if (sb->s_cop->get_num_devices)
- return sb->s_cop->get_num_devices(sb);
- return 1;
-}
+ struct block_device **devs;
-static void fscrypt_get_devices(struct super_block *sb, int num_devs,
- struct request_queue **devs)
-{
- if (num_devs == 1)
- devs[0] = bdev_get_queue(sb->s_bdev);
- else
- sb->s_cop->get_devices(sb, devs);
+ if (sb->s_cop->get_devices) {
+ devs = sb->s_cop->get_devices(sb, num_devs);
+ if (devs)
+ return devs;
+ }
+ devs = kmalloc(sizeof(*devs), GFP_KERNEL);
+ if (!devs)
+ return ERR_PTR(-ENOMEM);
+ devs[0] = sb->s_bdev;
+ *num_devs = 1;
+ return devs;
}
-static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
+static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_inode_info *ci)
{
- struct super_block *sb = ci->ci_inode->i_sb;
+ const struct super_block *sb = ci->ci_inode->i_sb;
unsigned int flags = fscrypt_policy_flags(&ci->ci_policy);
- int ino_bits = 64, lblk_bits = 64;
+ int dun_bits;
if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
return offsetofend(union fscrypt_iv, nonce);
@@ -57,21 +55,50 @@ static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)
return sizeof(__le32);
- /* Default case: IVs are just the file logical block number */
- if (sb->s_cop->get_ino_and_lblk_bits)
- sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
- return DIV_ROUND_UP(lblk_bits, 8);
+ /* Default case: IVs are just the file data unit index */
+ dun_bits = fscrypt_max_file_dun_bits(sb, ci->ci_data_unit_bits);
+ return DIV_ROUND_UP(dun_bits, 8);
+}
+
+/*
+ * Log a message when starting to use blk-crypto (native) or blk-crypto-fallback
+ * for an encryption mode for the first time. This is the blk-crypto
+ * counterpart to the message logged when starting to use the crypto API for the
+ * first time. A limitation is that these messages don't convey which specific
+ * filesystems or files are using each implementation. However, *usually*
+ * systems use just one implementation per mode, which makes these messages
+ * helpful for debugging problems where the "wrong" implementation is used.
+ */
+static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode,
+ struct block_device **devs,
+ unsigned int num_devs,
+ const struct blk_crypto_config *cfg)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_devs; i++) {
+ if (!IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
+ blk_crypto_config_supported_natively(devs[i], cfg)) {
+ if (!xchg(&mode->logged_blk_crypto_native, 1))
+ pr_info("fscrypt: %s using blk-crypto (native)\n",
+ mode->friendly_name);
+ } else if (!xchg(&mode->logged_blk_crypto_fallback, 1)) {
+ pr_info("fscrypt: %s using blk-crypto-fallback\n",
+ mode->friendly_name);
+ }
+ }
}
/* Enable inline encryption for this file if supported. */
-int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
+int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci,
+ bool is_hw_wrapped_key)
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
struct blk_crypto_config crypto_cfg;
- int num_devs;
- struct request_queue **devs;
- int i;
+ struct block_device **devs;
+ unsigned int num_devs;
+ unsigned int i;
/* The file must need contents encryption, not filenames encryption */
if (!S_ISREG(inode->i_mode))
@@ -99,23 +126,26 @@ int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
return 0;
/*
- * On all the filesystem's devices, blk-crypto must support the crypto
- * configuration that the file would use.
+ * On all the filesystem's block devices, blk-crypto must support the
+ * crypto configuration that the file would use.
*/
crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
- crypto_cfg.data_unit_size = sb->s_blocksize;
+ crypto_cfg.data_unit_size = 1U << ci->ci_data_unit_bits;
crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
- num_devs = fscrypt_get_num_devices(sb);
- devs = kmalloc_array(num_devs, sizeof(*devs), GFP_KERNEL);
- if (!devs)
- return -ENOMEM;
- fscrypt_get_devices(sb, num_devs, devs);
+ crypto_cfg.key_type = is_hw_wrapped_key ?
+ BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_RAW;
+
+ devs = fscrypt_get_devices(sb, &num_devs);
+ if (IS_ERR(devs))
+ return PTR_ERR(devs);
for (i = 0; i < num_devs; i++) {
if (!blk_crypto_config_supported(devs[i], &crypto_cfg))
goto out_free_devs;
}
+ fscrypt_log_blk_crypto_impl(ci->ci_mode, devs, num_devs, &crypto_cfg);
+
ci->ci_inlinecrypt = true;
out_free_devs:
kfree(devs);
@@ -124,55 +154,50 @@ out_free_devs:
}
int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key,
- const struct fscrypt_info *ci)
+ const u8 *key_bytes, size_t key_size,
+ bool is_hw_wrapped,
+ const struct fscrypt_inode_info *ci)
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
- int num_devs = fscrypt_get_num_devices(sb);
- int queue_refs = 0;
- struct fscrypt_blk_crypto_key *blk_key;
+ enum blk_crypto_key_type key_type = is_hw_wrapped ?
+ BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_RAW;
+ struct blk_crypto_key *blk_key;
+ struct block_device **devs;
+ unsigned int num_devs;
+ unsigned int i;
int err;
- int i;
- blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_KERNEL);
+ blk_key = kmalloc(sizeof(*blk_key), GFP_KERNEL);
if (!blk_key)
return -ENOMEM;
- blk_key->num_devs = num_devs;
- fscrypt_get_devices(sb, num_devs, blk_key->devs);
-
- err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode,
- fscrypt_get_dun_bytes(ci), sb->s_blocksize);
+ err = blk_crypto_init_key(blk_key, key_bytes, key_size, key_type,
+ crypto_mode, fscrypt_get_dun_bytes(ci),
+ 1U << ci->ci_data_unit_bits);
if (err) {
fscrypt_err(inode, "error %d initializing blk-crypto key", err);
goto fail;
}
- /*
- * We have to start using blk-crypto on all the filesystem's devices.
- * We also have to save all the request_queue's for later so that the
- * key can be evicted from them. This is needed because some keys
- * aren't destroyed until after the filesystem was already unmounted
- * (namely, the per-mode keys in struct fscrypt_master_key).
- */
+ /* Start using blk-crypto on all the filesystem's block devices. */
+ devs = fscrypt_get_devices(sb, &num_devs);
+ if (IS_ERR(devs)) {
+ err = PTR_ERR(devs);
+ goto fail;
+ }
for (i = 0; i < num_devs; i++) {
- if (!blk_get_queue(blk_key->devs[i])) {
- fscrypt_err(inode, "couldn't get request_queue");
- err = -EAGAIN;
- goto fail;
- }
- queue_refs++;
-
- err = blk_crypto_start_using_key(&blk_key->base,
- blk_key->devs[i]);
- if (err) {
- fscrypt_err(inode,
- "error %d starting to use blk-crypto", err);
- goto fail;
- }
+ err = blk_crypto_start_using_key(devs[i], blk_key);
+ if (err)
+ break;
+ }
+ kfree(devs);
+ if (err) {
+ fscrypt_err(inode, "error %d starting to use blk-crypto", err);
+ goto fail;
}
+
/*
* Pairs with the smp_load_acquire() in fscrypt_is_key_prepared().
* I.e., here we publish ->blk_key with a RELEASE barrier so that
@@ -183,39 +208,74 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
return 0;
fail:
- for (i = 0; i < queue_refs; i++)
- blk_put_queue(blk_key->devs[i]);
kfree_sensitive(blk_key);
return err;
}
-void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key)
+void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
+ struct fscrypt_prepared_key *prep_key)
{
- struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key;
- int i;
+ struct blk_crypto_key *blk_key = prep_key->blk_key;
+ struct block_device **devs;
+ unsigned int num_devs;
+ unsigned int i;
- if (blk_key) {
- for (i = 0; i < blk_key->num_devs; i++) {
- blk_crypto_evict_key(blk_key->devs[i], &blk_key->base);
- blk_put_queue(blk_key->devs[i]);
- }
- kfree_sensitive(blk_key);
+ if (!blk_key)
+ return;
+
+ /* Evict the key from all the filesystem's block devices. */
+ devs = fscrypt_get_devices(sb, &num_devs);
+ if (!IS_ERR(devs)) {
+ for (i = 0; i < num_devs; i++)
+ blk_crypto_evict_key(devs[i], blk_key);
+ kfree(devs);
+ }
+ kfree_sensitive(blk_key);
+}
+
+/*
+ * Ask the inline encryption hardware to derive the software secret from a
+ * hardware-wrapped key. Returns -EOPNOTSUPP if hardware-wrapped keys aren't
+ * supported on this filesystem or hardware.
+ */
+int fscrypt_derive_sw_secret(struct super_block *sb,
+ const u8 *wrapped_key, size_t wrapped_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ int err;
+
+ /* The filesystem must be mounted with -o inlinecrypt. */
+ if (!(sb->s_flags & SB_INLINECRYPT)) {
+ fscrypt_warn(NULL,
+ "%s: filesystem not mounted with inlinecrypt\n",
+ sb->s_id);
+ return -EOPNOTSUPP;
}
+
+ err = blk_crypto_derive_sw_secret(sb->s_bdev, wrapped_key,
+ wrapped_key_size, sw_secret);
+ if (err == -EOPNOTSUPP)
+ fscrypt_warn(NULL,
+ "%s: block device doesn't support hardware-wrapped keys\n",
+ sb->s_id);
+ return err;
}
bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
{
- return inode->i_crypt_info->ci_inlinecrypt;
+ return fscrypt_get_inode_info_raw(inode)->ci_inlinecrypt;
}
EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
-static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
+static void fscrypt_generate_dun(const struct fscrypt_inode_info *ci,
+ u64 lblk_num,
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
{
+ u64 index = lblk_num << ci->ci_data_units_per_block_bits;
union fscrypt_iv iv;
int i;
- fscrypt_generate_iv(&iv, lblk_num, ci);
+ fscrypt_generate_iv(&iv, index, ci);
BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
@@ -242,15 +302,15 @@ static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
u64 first_lblk, gfp_t gfp_mask)
{
- const struct fscrypt_info *ci;
+ const struct fscrypt_inode_info *ci;
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
if (!fscrypt_inode_uses_inline_crypto(inode))
return;
- ci = inode->i_crypt_info;
+ ci = fscrypt_get_inode_info_raw(inode);
fscrypt_generate_dun(ci, first_lblk, dun);
- bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask);
+ bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask);
}
EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
@@ -259,7 +319,7 @@ static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
const struct inode **inode_ret,
u64 *lblk_num_ret)
{
- struct page *page = bh->b_page;
+ struct folio *folio = bh->b_folio;
const struct address_space *mapping;
const struct inode *inode;
@@ -267,14 +327,13 @@ static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
* The ext4 journal (jbd2) can submit a buffer_head it directly created
* for a non-pagecache page. fscrypt doesn't care about these.
*/
- mapping = page_mapping(page);
+ mapping = folio_mapping(folio);
if (!mapping)
return false;
inode = mapping->host;
*inode_ret = inode;
- *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
- (bh_offset(bh) >> inode->i_blkbits);
+ *lblk_num_ret = (folio_pos(folio) + bh_offset(bh)) >> inode->i_blkbits;
return true;
}
@@ -315,28 +374,34 @@ EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
*
* fscrypt_set_bio_crypt_ctx() must have already been called on the bio.
*
+ * This function isn't required in cases where crypto-mergeability is ensured in
+ * another way, such as I/O targeting only a single file (and thus a single key)
+ * combined with fscrypt_limit_io_blocks() to ensure DUN contiguity.
+ *
* Return: true iff the I/O is mergeable
*/
bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
u64 next_lblk)
{
const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
+ const struct fscrypt_inode_info *ci;
u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
return false;
if (!bc)
return true;
+ ci = fscrypt_get_inode_info_raw(inode);
/*
* Comparing the key pointers is good enough, as all I/O for each key
* uses the same pointer. I.e., there's currently no need to support
* merging requests where the keys are the same but the pointers differ.
*/
- if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base)
+ if (bc->bc_key != ci->ci_enc_key.blk_key)
return false;
- fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
+ fscrypt_generate_dun(ci, next_lblk, next_dun);
return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
}
EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
@@ -363,3 +428,90 @@ bool fscrypt_mergeable_bio_bh(struct bio *bio,
return fscrypt_mergeable_bio(bio, inode, next_lblk);
}
EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
+
+/**
+ * fscrypt_dio_supported() - check whether DIO (direct I/O) is supported on an
+ * inode, as far as encryption is concerned
+ * @inode: the inode in question
+ *
+ * Return: %true if there are no encryption constraints that prevent DIO from
+ * being supported; %false if DIO is unsupported. (Note that in the
+ * %true case, the filesystem might have other, non-encryption-related
+ * constraints that prevent DIO from actually being supported. Also, on
+ * encrypted files the filesystem is still responsible for only allowing
+ * DIO when requests are filesystem-block-aligned.)
+ */
+bool fscrypt_dio_supported(struct inode *inode)
+{
+ int err;
+
+ /* If the file is unencrypted, no veto from us. */
+ if (!fscrypt_needs_contents_encryption(inode))
+ return true;
+
+ /*
+ * We only support DIO with inline crypto, not fs-layer crypto.
+ *
+ * To determine whether the inode is using inline crypto, we have to set
+ * up the key if it wasn't already done. This is because in the current
+ * design of fscrypt, the decision of whether to use inline crypto or
+ * not isn't made until the inode's encryption key is being set up. In
+ * the DIO read/write case, the key will always be set up already, since
+ * the file will be open. But in the case of statx(), the key might not
+ * be set up yet, as the file might not have been opened yet.
+ */
+ err = fscrypt_require_key(inode);
+ if (err) {
+ /*
+ * Key unavailable or couldn't be set up. This edge case isn't
+ * worth worrying about; just report that DIO is unsupported.
+ */
+ return false;
+ }
+ return fscrypt_inode_uses_inline_crypto(inode);
+}
+EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
+
+/**
+ * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs
+ * @inode: the file on which I/O is being done
+ * @lblk: the block at which the I/O is being started from
+ * @nr_blocks: the number of blocks we want to submit starting at @lblk
+ *
+ * Determine the limit to the number of blocks that can be submitted in a bio
+ * targeting @lblk without causing a data unit number (DUN) discontiguity.
+ *
+ * This is normally just @nr_blocks, as normally the DUNs just increment along
+ * with the logical blocks. (Or the file is not encrypted.)
+ *
+ * In rare cases, fscrypt can be using an IV generation method that allows the
+ * DUN to wrap around within logically contiguous blocks, and that wraparound
+ * will occur. If this happens, a value less than @nr_blocks will be returned
+ * so that the wraparound doesn't occur in the middle of a bio, which would
+ * cause encryption/decryption to produce wrong results.
+ *
+ * Return: the actual number of blocks that can be submitted
+ */
+u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
+{
+ const struct fscrypt_inode_info *ci;
+ u32 dun;
+
+ if (!fscrypt_inode_uses_inline_crypto(inode))
+ return nr_blocks;
+
+ if (nr_blocks <= 1)
+ return nr_blocks;
+
+ ci = fscrypt_get_inode_info_raw(inode);
+ if (!(fscrypt_policy_flags(&ci->ci_policy) &
+ FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
+ return nr_blocks;
+
+ /* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
+
+ dun = ci->ci_hashed_ino + lblk;
+
+ return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);
+}
+EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index 0b3ffbb4faf4..5e939ea3ac28 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -19,15 +19,29 @@
*/
#include <crypto/skcipher.h>
+#include <linux/export.h>
#include <linux/key-type.h>
+#include <linux/once.h>
#include <linux/random.h>
#include <linux/seq_file.h>
+#include <linux/unaligned.h>
#include "fscrypt_private.h"
+/* The master encryption keys for a filesystem (->s_master_keys) */
+struct fscrypt_keyring {
+ /*
+ * Lock that protects ->key_hashtable. It does *not* protect the
+ * fscrypt_master_key structs themselves.
+ */
+ spinlock_t lock;
+
+ /* Hash table that maps fscrypt_key_specifier to fscrypt_master_key */
+ struct hlist_head key_hashtable[128];
+};
+
static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret)
{
- fscrypt_destroy_hkdf(&secret->hkdf);
memzero_explicit(secret, sizeof(*secret));
}
@@ -38,76 +52,108 @@ static void move_master_key_secret(struct fscrypt_master_key_secret *dst,
memzero_explicit(src, sizeof(*src));
}
-static void free_master_key(struct fscrypt_master_key *mk)
+static void fscrypt_free_master_key(struct rcu_head *head)
{
- size_t i;
-
- wipe_master_key_secret(&mk->mk_secret);
-
- for (i = 0; i <= FSCRYPT_MODE_MAX; i++) {
- fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]);
- fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]);
- fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_32_keys[i]);
- }
-
- key_put(mk->mk_users);
+ struct fscrypt_master_key *mk =
+ container_of(head, struct fscrypt_master_key, mk_rcu_head);
+ /*
+ * The master key secret and any embedded subkeys should have already
+ * been wiped when the last active reference to the fscrypt_master_key
+ * struct was dropped; doing it here would be unnecessarily late.
+ * Nevertheless, use kfree_sensitive() in case anything was missed.
+ */
kfree_sensitive(mk);
}
-static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec)
+void fscrypt_put_master_key(struct fscrypt_master_key *mk)
{
- if (spec->__reserved)
- return false;
- return master_key_spec_len(spec) != 0;
+ if (!refcount_dec_and_test(&mk->mk_struct_refs))
+ return;
+ /*
+ * No structural references left, so free ->mk_users, and also free the
+ * fscrypt_master_key struct itself after an RCU grace period ensures
+ * that concurrent keyring lookups can no longer find it.
+ */
+ WARN_ON_ONCE(refcount_read(&mk->mk_active_refs) != 0);
+ if (mk->mk_users) {
+ /* Clear the keyring so the quota gets released right away. */
+ keyring_clear(mk->mk_users);
+ key_put(mk->mk_users);
+ mk->mk_users = NULL;
+ }
+ call_rcu(&mk->mk_rcu_head, fscrypt_free_master_key);
}
-static int fscrypt_key_instantiate(struct key *key,
- struct key_preparsed_payload *prep)
+void fscrypt_put_master_key_activeref(struct super_block *sb,
+ struct fscrypt_master_key *mk)
{
- key->payload.data[0] = (struct fscrypt_master_key *)prep->data;
- return 0;
-}
+ size_t i;
-static void fscrypt_key_destroy(struct key *key)
-{
- free_master_key(key->payload.data[0]);
-}
+ if (!refcount_dec_and_test(&mk->mk_active_refs))
+ return;
+ /*
+ * No active references left, so complete the full removal of this
+ * fscrypt_master_key struct by removing it from the keyring and
+ * destroying any subkeys embedded in it.
+ */
-static void fscrypt_key_describe(const struct key *key, struct seq_file *m)
-{
- seq_puts(m, key->description);
+ if (WARN_ON_ONCE(!sb->s_master_keys))
+ return;
+ spin_lock(&sb->s_master_keys->lock);
+ hlist_del_rcu(&mk->mk_node);
+ spin_unlock(&sb->s_master_keys->lock);
- if (key_is_positive(key)) {
- const struct fscrypt_master_key *mk = key->payload.data[0];
+ /*
+ * ->mk_active_refs == 0 implies that ->mk_present is false and
+ * ->mk_decrypted_inodes is empty.
+ */
+ WARN_ON_ONCE(mk->mk_present);
+ WARN_ON_ONCE(!list_empty(&mk->mk_decrypted_inodes));
- if (!is_master_key_secret_present(&mk->mk_secret))
- seq_puts(m, ": secret removed");
+ for (i = 0; i <= FSCRYPT_MODE_MAX; i++) {
+ fscrypt_destroy_prepared_key(
+ sb, &mk->mk_direct_keys[i]);
+ fscrypt_destroy_prepared_key(
+ sb, &mk->mk_iv_ino_lblk_64_keys[i]);
+ fscrypt_destroy_prepared_key(
+ sb, &mk->mk_iv_ino_lblk_32_keys[i]);
}
+ memzero_explicit(&mk->mk_ino_hash_key,
+ sizeof(mk->mk_ino_hash_key));
+ mk->mk_ino_hash_key_initialized = false;
+
+ /* Drop the structural ref associated with the active refs. */
+ fscrypt_put_master_key(mk);
}
/*
- * Type of key in ->s_master_keys. Each key of this type represents a master
- * key which has been added to the filesystem. Its payload is a
- * 'struct fscrypt_master_key'. The "." prefix in the key type name prevents
- * users from adding keys of this type via the keyrings syscalls rather than via
- * the intended method of FS_IOC_ADD_ENCRYPTION_KEY.
+ * This transitions the key state from present to incompletely removed, and then
+ * potentially to absent (depending on whether inodes remain).
*/
-static struct key_type key_type_fscrypt = {
- .name = "._fscrypt",
- .instantiate = fscrypt_key_instantiate,
- .destroy = fscrypt_key_destroy,
- .describe = fscrypt_key_describe,
-};
+static void fscrypt_initiate_key_removal(struct super_block *sb,
+ struct fscrypt_master_key *mk)
+{
+ WRITE_ONCE(mk->mk_present, false);
+ wipe_master_key_secret(&mk->mk_secret);
+ fscrypt_put_master_key_activeref(sb, mk);
+}
+
+static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec)
+{
+ if (spec->__reserved)
+ return false;
+ return master_key_spec_len(spec) != 0;
+}
static int fscrypt_user_key_instantiate(struct key *key,
struct key_preparsed_payload *prep)
{
/*
- * We just charge FSCRYPT_MAX_KEY_SIZE bytes to the user's key quota for
- * each key, regardless of the exact key size. The amount of memory
+ * We just charge FSCRYPT_MAX_RAW_KEY_SIZE bytes to the user's key quota
+ * for each key, regardless of the exact key size. The amount of memory
* actually used is greater than the size of the raw key anyway.
*/
- return key_payload_reserve(key, FSCRYPT_MAX_KEY_SIZE);
+ return key_payload_reserve(key, FSCRYPT_MAX_RAW_KEY_SIZE);
}
static void fscrypt_user_key_describe(const struct key *key, struct seq_file *m)
@@ -131,32 +177,6 @@ static struct key_type key_type_fscrypt_user = {
.describe = fscrypt_user_key_describe,
};
-/* Search ->s_master_keys or ->mk_users */
-static struct key *search_fscrypt_keyring(struct key *keyring,
- struct key_type *type,
- const char *description)
-{
- /*
- * We need to mark the keyring reference as "possessed" so that we
- * acquire permission to search it, via the KEY_POS_SEARCH permission.
- */
- key_ref_t keyref = make_key_ref(keyring, true /* possessed */);
-
- keyref = keyring_search(keyref, type, description, false);
- if (IS_ERR(keyref)) {
- if (PTR_ERR(keyref) == -EAGAIN || /* not found */
- PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */
- keyref = ERR_PTR(-ENOKEY);
- return ERR_CAST(keyref);
- }
- return key_ref_to_ptr(keyref);
-}
-
-#define FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE \
- (CONST_STRLEN("fscrypt-") + sizeof_field(struct super_block, s_id))
-
-#define FSCRYPT_MK_DESCRIPTION_SIZE (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + 1)
-
#define FSCRYPT_MK_USERS_DESCRIPTION_SIZE \
(CONST_STRLEN("fscrypt-") + 2 * FSCRYPT_KEY_IDENTIFIER_SIZE + \
CONST_STRLEN("-users") + 1)
@@ -164,21 +184,6 @@ static struct key *search_fscrypt_keyring(struct key *keyring,
#define FSCRYPT_MK_USER_DESCRIPTION_SIZE \
(2 * FSCRYPT_KEY_IDENTIFIER_SIZE + CONST_STRLEN(".uid.") + 10 + 1)
-static void format_fs_keyring_description(
- char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE],
- const struct super_block *sb)
-{
- sprintf(description, "fscrypt-%s", sb->s_id);
-}
-
-static void format_mk_description(
- char description[FSCRYPT_MK_DESCRIPTION_SIZE],
- const struct fscrypt_key_specifier *mk_spec)
-{
- sprintf(description, "%*phN",
- master_key_spec_len(mk_spec), (u8 *)&mk_spec->u);
-}
-
static void format_mk_users_keyring_description(
char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE],
const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE])
@@ -199,20 +204,15 @@ static void format_mk_user_description(
/* Create ->s_master_keys if needed. Synchronized by fscrypt_add_key_mutex. */
static int allocate_filesystem_keyring(struct super_block *sb)
{
- char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE];
- struct key *keyring;
+ struct fscrypt_keyring *keyring;
if (sb->s_master_keys)
return 0;
- format_fs_keyring_description(description, sb);
- keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
- current_cred(), KEY_POS_SEARCH |
- KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW,
- KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
- if (IS_ERR(keyring))
- return PTR_ERR(keyring);
-
+ keyring = kzalloc(sizeof(*keyring), GFP_KERNEL);
+ if (!keyring)
+ return -ENOMEM;
+ spin_lock_init(&keyring->lock);
/*
* Pairs with the smp_load_acquire() in fscrypt_find_master_key().
* I.e., here we publish ->s_master_keys with a RELEASE barrier so that
@@ -222,21 +222,76 @@ static int allocate_filesystem_keyring(struct super_block *sb)
return 0;
}
-void fscrypt_sb_free(struct super_block *sb)
+/*
+ * Release all encryption keys that have been added to the filesystem, along
+ * with the keyring that contains them.
+ *
+ * This is called at unmount time, after all potentially-encrypted inodes have
+ * been evicted. The filesystem's underlying block device(s) are still
+ * available at this time; this is important because after user file accesses
+ * have been allowed, this function may need to evict keys from the keyslots of
+ * an inline crypto engine, which requires the block device(s).
+ */
+void fscrypt_destroy_keyring(struct super_block *sb)
{
- key_put(sb->s_master_keys);
+ struct fscrypt_keyring *keyring = sb->s_master_keys;
+ size_t i;
+
+ if (!keyring)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(keyring->key_hashtable); i++) {
+ struct hlist_head *bucket = &keyring->key_hashtable[i];
+ struct fscrypt_master_key *mk;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) {
+ /*
+ * Since all potentially-encrypted inodes were already
+ * evicted, every key remaining in the keyring should
+ * have an empty inode list, and should only still be in
+ * the keyring due to the single active ref associated
+ * with ->mk_present. There should be no structural
+ * refs beyond the one associated with the active ref.
+ */
+ WARN_ON_ONCE(refcount_read(&mk->mk_active_refs) != 1);
+ WARN_ON_ONCE(refcount_read(&mk->mk_struct_refs) != 1);
+ WARN_ON_ONCE(!mk->mk_present);
+ fscrypt_initiate_key_removal(sb, mk);
+ }
+ }
+ kfree_sensitive(keyring);
sb->s_master_keys = NULL;
}
+static struct hlist_head *
+fscrypt_mk_hash_bucket(struct fscrypt_keyring *keyring,
+ const struct fscrypt_key_specifier *mk_spec)
+{
+ /*
+ * Since key specifiers should be "random" values, it is sufficient to
+ * use a trivial hash function that just takes the first several bits of
+ * the key specifier.
+ */
+ unsigned long i = get_unaligned((unsigned long *)&mk_spec->u);
+
+ return &keyring->key_hashtable[i % ARRAY_SIZE(keyring->key_hashtable)];
+}
+
/*
- * Find the specified master key in ->s_master_keys.
- * Returns ERR_PTR(-ENOKEY) if not found.
+ * Find the specified master key struct in ->s_master_keys and take a structural
+ * ref to it. The structural ref guarantees that the key struct continues to
+ * exist, but it does *not* guarantee that ->s_master_keys continues to contain
+ * the key struct. The structural ref needs to be dropped by
+ * fscrypt_put_master_key(). Returns NULL if the key struct is not found.
*/
-struct key *fscrypt_find_master_key(struct super_block *sb,
- const struct fscrypt_key_specifier *mk_spec)
+struct fscrypt_master_key *
+fscrypt_find_master_key(struct super_block *sb,
+ const struct fscrypt_key_specifier *mk_spec)
{
- struct key *keyring;
- char description[FSCRYPT_MK_DESCRIPTION_SIZE];
+ struct fscrypt_keyring *keyring;
+ struct hlist_head *bucket;
+ struct fscrypt_master_key *mk;
/*
* Pairs with the smp_store_release() in allocate_filesystem_keyring().
@@ -246,10 +301,38 @@ struct key *fscrypt_find_master_key(struct super_block *sb,
*/
keyring = smp_load_acquire(&sb->s_master_keys);
if (keyring == NULL)
- return ERR_PTR(-ENOKEY); /* No keyring yet, so no keys yet. */
-
- format_mk_description(description, mk_spec);
- return search_fscrypt_keyring(keyring, &key_type_fscrypt, description);
+ return NULL; /* No keyring yet, so no keys yet. */
+
+ bucket = fscrypt_mk_hash_bucket(keyring, mk_spec);
+ rcu_read_lock();
+ switch (mk_spec->type) {
+ case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR:
+ hlist_for_each_entry_rcu(mk, bucket, mk_node) {
+ if (mk->mk_spec.type ==
+ FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR &&
+ memcmp(mk->mk_spec.u.descriptor,
+ mk_spec->u.descriptor,
+ FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 &&
+ refcount_inc_not_zero(&mk->mk_struct_refs))
+ goto out;
+ }
+ break;
+ case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER:
+ hlist_for_each_entry_rcu(mk, bucket, mk_node) {
+ if (mk->mk_spec.type ==
+ FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER &&
+ memcmp(mk->mk_spec.u.identifier,
+ mk_spec->u.identifier,
+ FSCRYPT_KEY_IDENTIFIER_SIZE) == 0 &&
+ refcount_inc_not_zero(&mk->mk_struct_refs))
+ goto out;
+ }
+ break;
+ }
+ mk = NULL;
+out:
+ rcu_read_unlock();
+ return mk;
}
static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk)
@@ -277,17 +360,30 @@ static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk)
static struct key *find_master_key_user(struct fscrypt_master_key *mk)
{
char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE];
+ key_ref_t keyref;
format_mk_user_description(description, mk->mk_spec.u.identifier);
- return search_fscrypt_keyring(mk->mk_users, &key_type_fscrypt_user,
- description);
+
+ /*
+ * We need to mark the keyring reference as "possessed" so that we
+ * acquire permission to search it, via the KEY_POS_SEARCH permission.
+ */
+ keyref = keyring_search(make_key_ref(mk->mk_users, true /*possessed*/),
+ &key_type_fscrypt_user, description, false);
+ if (IS_ERR(keyref)) {
+ if (PTR_ERR(keyref) == -EAGAIN || /* not found */
+ PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */
+ keyref = ERR_PTR(-ENOKEY);
+ return ERR_CAST(keyref);
+ }
+ return key_ref_to_ptr(keyref);
}
/*
* Give the current user a "key" in ->mk_users. This charges the user's quota
* and marks the master key as added by the current user, so that it cannot be
- * removed by another user with the key. Either the master key's key->sem must
- * be held for write, or the master key must be still undergoing initialization.
+ * removed by another user with the key. Either ->mk_sem must be held for
+ * write, or the master key must be still undergoing initialization.
*/
static int add_master_key_user(struct fscrypt_master_key *mk)
{
@@ -309,7 +405,7 @@ static int add_master_key_user(struct fscrypt_master_key *mk)
/*
* Remove the current user's "key" from ->mk_users.
- * The master key's key->sem must be held for write.
+ * ->mk_sem must be held for write.
*
* Returns 0 if removed, -ENOKEY if not found, or another -errno code.
*/
@@ -327,63 +423,49 @@ static int remove_master_key_user(struct fscrypt_master_key *mk)
}
/*
- * Allocate a new fscrypt_master_key which contains the given secret, set it as
- * the payload of a new 'struct key' of type fscrypt, and link the 'struct key'
- * into the given keyring. Synchronized by fscrypt_add_key_mutex.
+ * Allocate a new fscrypt_master_key, transfer the given secret over to it, and
+ * insert it into sb->s_master_keys.
*/
-static int add_new_master_key(struct fscrypt_master_key_secret *secret,
- const struct fscrypt_key_specifier *mk_spec,
- struct key *keyring)
+static int add_new_master_key(struct super_block *sb,
+ struct fscrypt_master_key_secret *secret,
+ const struct fscrypt_key_specifier *mk_spec)
{
+ struct fscrypt_keyring *keyring = sb->s_master_keys;
struct fscrypt_master_key *mk;
- char description[FSCRYPT_MK_DESCRIPTION_SIZE];
- struct key *key;
int err;
mk = kzalloc(sizeof(*mk), GFP_KERNEL);
if (!mk)
return -ENOMEM;
+ init_rwsem(&mk->mk_sem);
+ refcount_set(&mk->mk_struct_refs, 1);
mk->mk_spec = *mk_spec;
- move_master_key_secret(&mk->mk_secret, secret);
-
- refcount_set(&mk->mk_refcount, 1); /* secret is present */
INIT_LIST_HEAD(&mk->mk_decrypted_inodes);
spin_lock_init(&mk->mk_decrypted_inodes_lock);
if (mk_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) {
err = allocate_master_key_users_keyring(mk);
if (err)
- goto out_free_mk;
+ goto out_put;
err = add_master_key_user(mk);
if (err)
- goto out_free_mk;
+ goto out_put;
}
- /*
- * Note that we don't charge this key to anyone's quota, since when
- * ->mk_users is in use those keys are charged instead, and otherwise
- * (when ->mk_users isn't in use) only root can add these keys.
- */
- format_mk_description(description, mk_spec);
- key = key_alloc(&key_type_fscrypt, description,
- GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
- KEY_POS_SEARCH | KEY_USR_SEARCH | KEY_USR_VIEW,
- KEY_ALLOC_NOT_IN_QUOTA, NULL);
- if (IS_ERR(key)) {
- err = PTR_ERR(key);
- goto out_free_mk;
- }
- err = key_instantiate_and_link(key, mk, sizeof(*mk), keyring, NULL);
- key_put(key);
- if (err)
- goto out_free_mk;
+ move_master_key_secret(&mk->mk_secret, secret);
+ mk->mk_present = true;
+ refcount_set(&mk->mk_active_refs, 1); /* ->mk_present is true */
+ spin_lock(&keyring->lock);
+ hlist_add_head_rcu(&mk->mk_node,
+ fscrypt_mk_hash_bucket(keyring, mk_spec));
+ spin_unlock(&keyring->lock);
return 0;
-out_free_mk:
- free_master_key(mk);
+out_put:
+ fscrypt_put_master_key(mk);
return err;
}
@@ -392,42 +474,41 @@ out_free_mk:
static int add_existing_master_key(struct fscrypt_master_key *mk,
struct fscrypt_master_key_secret *secret)
{
- struct key *mk_user;
- bool rekey;
int err;
/*
* If the current user is already in ->mk_users, then there's nothing to
- * do. (Not applicable for v1 policy keys, which have NULL ->mk_users.)
+ * do. Otherwise, we need to add the user to ->mk_users. (Neither is
+ * applicable for v1 policy keys, which have NULL ->mk_users.)
*/
if (mk->mk_users) {
- mk_user = find_master_key_user(mk);
+ struct key *mk_user = find_master_key_user(mk);
+
if (mk_user != ERR_PTR(-ENOKEY)) {
if (IS_ERR(mk_user))
return PTR_ERR(mk_user);
key_put(mk_user);
return 0;
}
- }
-
- /* If we'll be re-adding ->mk_secret, try to take the reference. */
- rekey = !is_master_key_secret_present(&mk->mk_secret);
- if (rekey && !refcount_inc_not_zero(&mk->mk_refcount))
- return KEY_DEAD;
-
- /* Add the current user to ->mk_users, if applicable. */
- if (mk->mk_users) {
err = add_master_key_user(mk);
- if (err) {
- if (rekey && refcount_dec_and_test(&mk->mk_refcount))
- return KEY_DEAD;
+ if (err)
return err;
- }
}
- /* Re-add the secret if needed. */
- if (rekey)
+ /* If the key is incompletely removed, make it present again. */
+ if (!mk->mk_present) {
+ if (!refcount_inc_not_zero(&mk->mk_active_refs)) {
+ /*
+ * Raced with the last active ref being dropped, so the
+ * key has become, or is about to become, "absent".
+ * Therefore, we need to allocate a new key struct.
+ */
+ return KEY_DEAD;
+ }
move_master_key_secret(&mk->mk_secret, secret);
+ WRITE_ONCE(mk->mk_present, true);
+ }
+
return 0;
}
@@ -436,38 +517,36 @@ static int do_add_master_key(struct super_block *sb,
const struct fscrypt_key_specifier *mk_spec)
{
static DEFINE_MUTEX(fscrypt_add_key_mutex);
- struct key *key;
+ struct fscrypt_master_key *mk;
int err;
mutex_lock(&fscrypt_add_key_mutex); /* serialize find + link */
-retry:
- key = fscrypt_find_master_key(sb, mk_spec);
- if (IS_ERR(key)) {
- err = PTR_ERR(key);
- if (err != -ENOKEY)
- goto out_unlock;
+
+ mk = fscrypt_find_master_key(sb, mk_spec);
+ if (!mk) {
/* Didn't find the key in ->s_master_keys. Add it. */
err = allocate_filesystem_keyring(sb);
- if (err)
- goto out_unlock;
- err = add_new_master_key(secret, mk_spec, sb->s_master_keys);
+ if (!err)
+ err = add_new_master_key(sb, secret, mk_spec);
} else {
/*
- * Found the key in ->s_master_keys. Re-add the secret if
- * needed, and add the user to ->mk_users if needed.
+ * Found the key in ->s_master_keys. Add the user to ->mk_users
+ * if needed, and make the key "present" again if possible.
*/
- down_write(&key->sem);
- err = add_existing_master_key(key->payload.data[0], secret);
- up_write(&key->sem);
+ down_write(&mk->mk_sem);
+ err = add_existing_master_key(mk, secret);
+ up_write(&mk->mk_sem);
if (err == KEY_DEAD) {
- /* Key being removed or needs to be removed */
- key_invalidate(key);
- key_put(key);
- goto retry;
+ /*
+ * We found a key struct, but it's already been fully
+ * removed. Ignore the old struct and add a new one.
+ * fscrypt_add_key_mutex means we don't need to worry
+ * about concurrent adds.
+ */
+ err = add_new_master_key(sb, secret, mk_spec);
}
- key_put(key);
+ fscrypt_put_master_key(mk);
}
-out_unlock:
mutex_unlock(&fscrypt_add_key_mutex);
return err;
}
@@ -479,41 +558,79 @@ static int add_master_key(struct super_block *sb,
int err;
if (key_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) {
- err = fscrypt_init_hkdf(&secret->hkdf, secret->raw,
- secret->size);
- if (err)
- return err;
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE];
+ u8 *kdf_key = secret->bytes;
+ unsigned int kdf_key_size = secret->size;
+ u8 keyid_kdf_ctx = HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY;
/*
- * Now that the HKDF context is initialized, the raw key is no
- * longer needed.
+ * For raw keys, the fscrypt master key is used directly as the
+ * fscrypt KDF key. For hardware-wrapped keys, we have to pass
+ * the master key to the hardware to derive the KDF key, which
+ * is then only used to derive non-file-contents subkeys.
*/
- memzero_explicit(secret->raw, secret->size);
+ if (secret->is_hw_wrapped) {
+ err = fscrypt_derive_sw_secret(sb, secret->bytes,
+ secret->size, sw_secret);
+ if (err)
+ return err;
+ kdf_key = sw_secret;
+ kdf_key_size = sizeof(sw_secret);
+ /*
+ * To avoid weird behavior if someone manages to
+ * determine sw_secret and add it as a raw key, ensure
+ * that hardware-wrapped keys and raw keys will have
+ * different key identifiers by deriving their key
+ * identifiers using different KDF contexts.
+ */
+ keyid_kdf_ctx =
+ HKDF_CONTEXT_KEY_IDENTIFIER_FOR_HW_WRAPPED_KEY;
+ }
+ fscrypt_init_hkdf(&secret->hkdf, kdf_key, kdf_key_size);
+ /*
+ * Now that the KDF context is initialized, the raw KDF key is
+ * no longer needed.
+ */
+ memzero_explicit(kdf_key, kdf_key_size);
/* Calculate the key identifier */
- err = fscrypt_hkdf_expand(&secret->hkdf,
- HKDF_CONTEXT_KEY_IDENTIFIER, NULL, 0,
- key_spec->u.identifier,
- FSCRYPT_KEY_IDENTIFIER_SIZE);
- if (err)
- return err;
+ fscrypt_hkdf_expand(&secret->hkdf, keyid_kdf_ctx, NULL, 0,
+ key_spec->u.identifier,
+ FSCRYPT_KEY_IDENTIFIER_SIZE);
}
return do_add_master_key(sb, secret, key_spec);
}
+/*
+ * Validate the size of an fscrypt master key being added. Note that this is
+ * just an initial check, as we don't know which ciphers will be used yet.
+ * There is a stricter size check later when the key is actually used by a file.
+ */
+static inline bool fscrypt_valid_key_size(size_t size, u32 add_key_flags)
+{
+ u32 max_size = (add_key_flags & FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) ?
+ FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE :
+ FSCRYPT_MAX_RAW_KEY_SIZE;
+
+ return size >= FSCRYPT_MIN_KEY_SIZE && size <= max_size;
+}
+
static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep)
{
const struct fscrypt_provisioning_key_payload *payload = prep->data;
- if (prep->datalen < sizeof(*payload) + FSCRYPT_MIN_KEY_SIZE ||
- prep->datalen > sizeof(*payload) + FSCRYPT_MAX_KEY_SIZE)
+ if (prep->datalen < sizeof(*payload))
+ return -EINVAL;
+
+ if (!fscrypt_valid_key_size(prep->datalen - sizeof(*payload),
+ payload->flags))
return -EINVAL;
if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR &&
payload->type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER)
return -EINVAL;
- if (payload->__reserved)
+ if (payload->flags & ~FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED)
return -EINVAL;
prep->payload.data[0] = kmemdup(payload, prep->datalen, GFP_KERNEL);
@@ -557,21 +674,21 @@ static struct key_type key_type_fscrypt_provisioning = {
};
/*
- * Retrieve the raw key from the Linux keyring key specified by 'key_id', and
- * store it into 'secret'.
+ * Retrieve the key from the Linux keyring key specified by 'key_id', and store
+ * it into 'secret'.
*
- * The key must be of type "fscrypt-provisioning" and must have the field
- * fscrypt_provisioning_key_payload::type set to 'type', indicating that it's
- * only usable with fscrypt with the particular KDF version identified by
- * 'type'. We don't use the "logon" key type because there's no way to
- * completely restrict the use of such keys; they can be used by any kernel API
- * that accepts "logon" keys and doesn't require a specific service prefix.
+ * The key must be of type "fscrypt-provisioning" and must have the 'type' and
+ * 'flags' field of the payload set to the given values, indicating that the key
+ * is intended for use for the specified purpose. We don't use the "logon" key
+ * type because there's no way to completely restrict the use of such keys; they
+ * can be used by any kernel API that accepts "logon" keys and doesn't require a
+ * specific service prefix.
*
* The ability to specify the key via Linux keyring key is intended for cases
* where userspace needs to re-add keys after the filesystem is unmounted and
- * re-mounted. Most users should just provide the raw key directly instead.
+ * re-mounted. Most users should just provide the key directly instead.
*/
-static int get_keyring_key(u32 key_id, u32 type,
+static int get_keyring_key(u32 key_id, u32 type, u32 flags,
struct fscrypt_master_key_secret *secret)
{
key_ref_t ref;
@@ -588,12 +705,16 @@ static int get_keyring_key(u32 key_id, u32 type,
goto bad_key;
payload = key->payload.data[0];
- /* Don't allow fscrypt v1 keys to be used as v2 keys and vice versa. */
- if (payload->type != type)
+ /*
+ * Don't allow fscrypt v1 keys to be used as v2 keys and vice versa.
+ * Similarly, don't allow hardware-wrapped keys to be used as
+ * non-hardware-wrapped keys and vice versa.
+ */
+ if (payload->type != type || payload->flags != flags)
goto bad_key;
secret->size = key->datalen - sizeof(*payload);
- memcpy(secret->raw, payload->raw, secret->size);
+ memcpy(secret->bytes, payload->raw, secret->size);
err = 0;
goto out_put;
@@ -655,19 +776,28 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg)
return -EACCES;
memset(&secret, 0, sizeof(secret));
+
+ if (arg.flags) {
+ if (arg.flags & ~FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED)
+ return -EINVAL;
+ if (arg.key_spec.type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER)
+ return -EINVAL;
+ secret.is_hw_wrapped = true;
+ }
+
if (arg.key_id) {
if (arg.raw_size != 0)
return -EINVAL;
- err = get_keyring_key(arg.key_id, arg.key_spec.type, &secret);
+ err = get_keyring_key(arg.key_id, arg.key_spec.type, arg.flags,
+ &secret);
if (err)
goto out_wipe_secret;
} else {
- if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE ||
- arg.raw_size > FSCRYPT_MAX_KEY_SIZE)
+ if (!fscrypt_valid_key_size(arg.raw_size, arg.flags))
return -EINVAL;
secret.size = arg.raw_size;
err = -EFAULT;
- if (copy_from_user(secret.raw, uarg->raw, secret.size))
+ if (copy_from_user(secret.bytes, uarg->raw, secret.size))
goto out_wipe_secret;
}
@@ -688,24 +818,50 @@ out_wipe_secret:
}
EXPORT_SYMBOL_GPL(fscrypt_ioctl_add_key);
-/*
- * Add the key for '-o test_dummy_encryption' to the filesystem keyring.
+static void
+fscrypt_get_test_dummy_secret(struct fscrypt_master_key_secret *secret)
+{
+ static u8 test_key[FSCRYPT_MAX_RAW_KEY_SIZE];
+
+ get_random_once(test_key, sizeof(test_key));
+
+ memset(secret, 0, sizeof(*secret));
+ secret->size = sizeof(test_key);
+ memcpy(secret->bytes, test_key, sizeof(test_key));
+}
+
+void fscrypt_get_test_dummy_key_identifier(
+ u8 key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE])
+{
+ struct fscrypt_master_key_secret secret;
+
+ fscrypt_get_test_dummy_secret(&secret);
+ fscrypt_init_hkdf(&secret.hkdf, secret.bytes, secret.size);
+ fscrypt_hkdf_expand(&secret.hkdf,
+ HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY, NULL, 0,
+ key_identifier, FSCRYPT_KEY_IDENTIFIER_SIZE);
+ wipe_master_key_secret(&secret);
+}
+
+/**
+ * fscrypt_add_test_dummy_key() - add the test dummy encryption key
+ * @sb: the filesystem instance to add the key to
+ * @key_spec: the key specifier of the test dummy encryption key
+ *
+ * Add the key for the test_dummy_encryption mount option to the filesystem. To
+ * prevent misuse of this mount option, a per-boot random key is used instead of
+ * a hardcoded one. This makes it so that any encrypted files created using
+ * this option won't be accessible after a reboot.
*
- * Use a per-boot random key to prevent people from misusing this option.
+ * Return: 0 on success, -errno on failure
*/
int fscrypt_add_test_dummy_key(struct super_block *sb,
struct fscrypt_key_specifier *key_spec)
{
- static u8 test_key[FSCRYPT_MAX_KEY_SIZE];
struct fscrypt_master_key_secret secret;
int err;
- get_random_once(test_key, FSCRYPT_MAX_KEY_SIZE);
-
- memset(&secret, 0, sizeof(secret));
- secret.size = FSCRYPT_MAX_KEY_SIZE;
- memcpy(secret.raw, test_key, FSCRYPT_MAX_KEY_SIZE);
-
+ fscrypt_get_test_dummy_secret(&secret);
err = add_master_key(sb, &secret, key_spec);
wipe_master_key_secret(&secret);
return err;
@@ -731,19 +887,19 @@ int fscrypt_verify_key_added(struct super_block *sb,
const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE])
{
struct fscrypt_key_specifier mk_spec;
- struct key *key, *mk_user;
struct fscrypt_master_key *mk;
+ struct key *mk_user;
int err;
mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER;
memcpy(mk_spec.u.identifier, identifier, FSCRYPT_KEY_IDENTIFIER_SIZE);
- key = fscrypt_find_master_key(sb, &mk_spec);
- if (IS_ERR(key)) {
- err = PTR_ERR(key);
+ mk = fscrypt_find_master_key(sb, &mk_spec);
+ if (!mk) {
+ err = -ENOKEY;
goto out;
}
- mk = key->payload.data[0];
+ down_read(&mk->mk_sem);
mk_user = find_master_key_user(mk);
if (IS_ERR(mk_user)) {
err = PTR_ERR(mk_user);
@@ -751,7 +907,8 @@ int fscrypt_verify_key_added(struct super_block *sb,
key_put(mk_user);
err = 0;
}
- key_put(key);
+ up_read(&mk->mk_sem);
+ fscrypt_put_master_key(mk);
out:
if (err == -ENOKEY && capable(CAP_FOWNER))
err = 0;
@@ -779,7 +936,7 @@ static void shrink_dcache_inode(struct inode *inode)
static void evict_dentries_for_decrypted_inodes(struct fscrypt_master_key *mk)
{
- struct fscrypt_info *ci;
+ struct fscrypt_inode_info *ci;
struct inode *inode;
struct inode *toput_inode = NULL;
@@ -788,7 +945,7 @@ static void evict_dentries_for_decrypted_inodes(struct fscrypt_master_key *mk)
list_for_each_entry(ci, &mk->mk_decrypted_inodes, ci_master_key_link) {
inode = ci->ci_inode;
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
+ if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) {
spin_unlock(&inode->i_lock);
continue;
}
@@ -829,7 +986,7 @@ static int check_for_busy_inodes(struct super_block *sb,
/* select an example file to show for debugging purposes */
struct inode *inode =
list_first_entry(&mk->mk_decrypted_inodes,
- struct fscrypt_info,
+ struct fscrypt_inode_info,
ci_master_key_link)->ci_inode;
ino = inode->i_ino;
}
@@ -895,15 +1052,14 @@ static int try_to_lock_encrypted_files(struct super_block *sb,
* FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS (all_users=true) always removes the
* key itself.
*
- * To "remove the key itself", first we wipe the actual master key secret, so
- * that no more inodes can be unlocked with it. Then we try to evict all cached
- * inodes that had been unlocked with the key.
+ * To "remove the key itself", first we transition the key to the "incompletely
+ * removed" state, so that no more inodes can be unlocked with it. Then we try
+ * to evict all cached inodes that had been unlocked with the key.
*
* If all inodes were evicted, then we unlink the fscrypt_master_key from the
* keyring. Otherwise it remains in the keyring in the "incompletely removed"
- * state (without the actual secret key) where it tracks the list of remaining
- * inodes. Userspace can execute the ioctl again later to retry eviction, or
- * alternatively can re-add the secret key again.
+ * state where it tracks the list of remaining inodes. Userspace can execute
+ * the ioctl again later to retry eviction, or alternatively can re-add the key.
*
* For more details, see the "Removing keys" section of
* Documentation/filesystems/fscrypt.rst.
@@ -913,11 +1069,10 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
struct super_block *sb = file_inode(filp)->i_sb;
struct fscrypt_remove_key_arg __user *uarg = _uarg;
struct fscrypt_remove_key_arg arg;
- struct key *key;
struct fscrypt_master_key *mk;
u32 status_flags = 0;
int err;
- bool dead;
+ bool inodes_remain;
if (copy_from_user(&arg, uarg, sizeof(arg)))
return -EFAULT;
@@ -937,12 +1092,10 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
return -EACCES;
/* Find the key being removed. */
- key = fscrypt_find_master_key(sb, &arg.key_spec);
- if (IS_ERR(key))
- return PTR_ERR(key);
- mk = key->payload.data[0];
-
- down_write(&key->sem);
+ mk = fscrypt_find_master_key(sb, &arg.key_spec);
+ if (!mk)
+ return -ENOKEY;
+ down_write(&mk->mk_sem);
/* If relevant, remove current user's (or all users) claim to the key */
if (mk->mk_users && mk->mk_users->keys.nr_leaves_on_tree != 0) {
@@ -951,7 +1104,7 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
else
err = remove_master_key_user(mk);
if (err) {
- up_write(&key->sem);
+ up_write(&mk->mk_sem);
goto out_put_key;
}
if (mk->mk_users->keys.nr_leaves_on_tree != 0) {
@@ -963,26 +1116,21 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
status_flags |=
FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS;
err = 0;
- up_write(&key->sem);
+ up_write(&mk->mk_sem);
goto out_put_key;
}
}
- /* No user claims remaining. Go ahead and wipe the secret. */
- dead = false;
- if (is_master_key_secret_present(&mk->mk_secret)) {
- wipe_master_key_secret(&mk->mk_secret);
- dead = refcount_dec_and_test(&mk->mk_refcount);
- }
- up_write(&key->sem);
- if (dead) {
- /*
- * No inodes reference the key, and we wiped the secret, so the
- * key object is free to be removed from the keyring.
- */
- key_invalidate(key);
+ /* No user claims remaining. Initiate removal of the key. */
+ err = -ENOKEY;
+ if (mk->mk_present) {
+ fscrypt_initiate_key_removal(sb, mk);
err = 0;
- } else {
+ }
+ inodes_remain = refcount_read(&mk->mk_active_refs) > 0;
+ up_write(&mk->mk_sem);
+
+ if (inodes_remain) {
/* Some inodes still reference this key; try to evict them. */
err = try_to_lock_encrypted_files(sb, mk);
if (err == -EBUSY) {
@@ -993,12 +1141,12 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
}
/*
* We return 0 if we successfully did something: removed a claim to the
- * key, wiped the secret, or tried locking the files again. Users need
- * to check the informational status flags if they care whether the key
- * has been fully removed including all files locked.
+ * key, initiated removal of the key, or tried locking the files again.
+ * Users need to check the informational status flags if they care
+ * whether the key has been fully removed including all files locked.
*/
out_put_key:
- key_put(key);
+ fscrypt_put_master_key(mk);
if (err == 0)
err = put_user(status_flags, &uarg->removal_status_flags);
return err;
@@ -1022,12 +1170,11 @@ EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key_all_users);
* Retrieve the status of an fscrypt master encryption key.
*
* We set ->status to indicate whether the key is absent, present, or
- * incompletely removed. "Incompletely removed" means that the master key
- * secret has been removed, but some files which had been unlocked with it are
- * still in use. This field allows applications to easily determine the state
- * of an encrypted directory without using a hack such as trying to open a
- * regular file in it (which can confuse the "incompletely removed" state with
- * absent or present).
+ * incompletely removed. (For an explanation of what these statuses mean and
+ * how they are represented internally, see struct fscrypt_master_key.) This
+ * field allows applications to easily determine the status of an encrypted
+ * directory without using a hack such as trying to open a regular file in it
+ * (which can confuse the "incompletely removed" status with absent or present).
*
* In addition, for v2 policy keys we allow applications to determine, via
* ->status_flags and ->user_count, whether the key has been added by the
@@ -1045,7 +1192,6 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg)
{
struct super_block *sb = file_inode(filp)->i_sb;
struct fscrypt_get_key_status_arg arg;
- struct key *key;
struct fscrypt_master_key *mk;
int err;
@@ -1062,19 +1208,18 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg)
arg.user_count = 0;
memset(arg.__out_reserved, 0, sizeof(arg.__out_reserved));
- key = fscrypt_find_master_key(sb, &arg.key_spec);
- if (IS_ERR(key)) {
- if (key != ERR_PTR(-ENOKEY))
- return PTR_ERR(key);
+ mk = fscrypt_find_master_key(sb, &arg.key_spec);
+ if (!mk) {
arg.status = FSCRYPT_KEY_STATUS_ABSENT;
err = 0;
goto out;
}
- mk = key->payload.data[0];
- down_read(&key->sem);
+ down_read(&mk->mk_sem);
- if (!is_master_key_secret_present(&mk->mk_secret)) {
- arg.status = FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED;
+ if (!mk->mk_present) {
+ arg.status = refcount_read(&mk->mk_active_refs) > 0 ?
+ FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED :
+ FSCRYPT_KEY_STATUS_ABSENT /* raced with full removal */;
err = 0;
goto out_release_key;
}
@@ -1096,8 +1241,8 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg)
}
err = 0;
out_release_key:
- up_read(&key->sem);
- key_put(key);
+ up_read(&mk->mk_sem);
+ fscrypt_put_master_key(mk);
out:
if (!err && copy_to_user(uarg, &arg, sizeof(arg)))
err = -EFAULT;
@@ -1109,13 +1254,9 @@ int __init fscrypt_init_keyring(void)
{
int err;
- err = register_key_type(&key_type_fscrypt);
- if (err)
- return err;
-
err = register_key_type(&key_type_fscrypt_user);
if (err)
- goto err_unregister_fscrypt;
+ return err;
err = register_key_type(&key_type_fscrypt_provisioning);
if (err)
@@ -1125,7 +1266,5 @@ int __init fscrypt_init_keyring(void)
err_unregister_fscrypt_user:
unregister_key_type(&key_type_fscrypt_user);
-err_unregister_fscrypt:
- unregister_key_type(&key_type_fscrypt);
return err;
}
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
index eede186b04ce..40fa05688d3a 100644
--- a/fs/crypto/keysetup.c
+++ b/fs/crypto/keysetup.c
@@ -9,7 +9,7 @@
*/
#include <crypto/skcipher.h>
-#include <linux/key.h>
+#include <linux/export.h>
#include <linux/random.h>
#include "fscrypt_private.h"
@@ -24,7 +24,7 @@ struct fscrypt_mode fscrypt_modes[] = {
.blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_256_XTS,
},
[FSCRYPT_MODE_AES_256_CTS] = {
- .friendly_name = "AES-256-CTS-CBC",
+ .friendly_name = "AES-256-CBC-CTS",
.cipher_str = "cts(cbc(aes))",
.keysize = 32,
.security_strength = 32,
@@ -39,12 +39,27 @@ struct fscrypt_mode fscrypt_modes[] = {
.blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
},
[FSCRYPT_MODE_AES_128_CTS] = {
- .friendly_name = "AES-128-CTS-CBC",
+ .friendly_name = "AES-128-CBC-CTS",
.cipher_str = "cts(cbc(aes))",
.keysize = 16,
.security_strength = 16,
.ivsize = 16,
},
+ [FSCRYPT_MODE_SM4_XTS] = {
+ .friendly_name = "SM4-XTS",
+ .cipher_str = "xts(sm4)",
+ .keysize = 32,
+ .security_strength = 16,
+ .ivsize = 16,
+ .blk_crypto_mode = BLK_ENCRYPTION_MODE_SM4_XTS,
+ },
+ [FSCRYPT_MODE_SM4_CTS] = {
+ .friendly_name = "SM4-CBC-CTS",
+ .cipher_str = "cts(cbc(sm4))",
+ .keysize = 16,
+ .security_strength = 16,
+ .ivsize = 16,
+ },
[FSCRYPT_MODE_ADIANTUM] = {
.friendly_name = "Adiantum",
.cipher_str = "adiantum(xchacha12,aes)",
@@ -53,6 +68,13 @@ struct fscrypt_mode fscrypt_modes[] = {
.ivsize = 32,
.blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM,
},
+ [FSCRYPT_MODE_AES_256_HCTR2] = {
+ .friendly_name = "AES-256-HCTR2",
+ .cipher_str = "hctr2(aes)",
+ .keysize = 32,
+ .security_strength = 32,
+ .ivsize = 32,
+ },
};
static DEFINE_MUTEX(fscrypt_mode_key_setup_mutex);
@@ -75,14 +97,15 @@ select_encryption_mode(const union fscrypt_policy *policy,
}
/* Create a symmetric cipher object for the given encryption mode and key */
-static struct crypto_skcipher *
+static struct crypto_sync_skcipher *
fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
const struct inode *inode)
{
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
int err;
- tfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0);
+ tfm = crypto_alloc_sync_skcipher(mode->cipher_str, 0,
+ FSCRYPT_CRYPTOAPI_MASK);
if (IS_ERR(tfm)) {
if (PTR_ERR(tfm) == -ENOENT) {
fscrypt_warn(inode,
@@ -94,7 +117,7 @@ fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
mode->cipher_str, PTR_ERR(tfm));
return tfm;
}
- if (!xchg(&mode->logged_impl_name, 1)) {
+ if (!xchg(&mode->logged_cryptoapi_impl, 1)) {
/*
* fscrypt performance can vary greatly depending on which
* crypto algorithm implementation is used. Help people debug
@@ -102,21 +125,22 @@ fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
* first time a mode is used.
*/
pr_info("fscrypt: %s using implementation \"%s\"\n",
- mode->friendly_name, crypto_skcipher_driver_name(tfm));
+ mode->friendly_name,
+ crypto_skcipher_driver_name(&tfm->base));
}
- if (WARN_ON(crypto_skcipher_ivsize(tfm) != mode->ivsize)) {
+ if (WARN_ON_ONCE(crypto_sync_skcipher_ivsize(tfm) != mode->ivsize)) {
err = -EINVAL;
goto err_free_tfm;
}
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
- err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize);
+ crypto_sync_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+ err = crypto_sync_skcipher_setkey(tfm, raw_key, mode->keysize);
if (err)
goto err_free_tfm;
return tfm;
err_free_tfm:
- crypto_free_skcipher(tfm);
+ crypto_free_sync_skcipher(tfm);
return ERR_PTR(err);
}
@@ -127,12 +151,14 @@ err_free_tfm:
* and IV generation method (@ci->ci_policy.flags).
*/
int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key, const struct fscrypt_info *ci)
+ const u8 *raw_key, const struct fscrypt_inode_info *ci)
{
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
if (fscrypt_using_inline_encryption(ci))
- return fscrypt_prepare_inline_crypt_key(prep_key, raw_key, ci);
+ return fscrypt_prepare_inline_crypt_key(prep_key, raw_key,
+ ci->ci_mode->keysize,
+ false, ci);
tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode);
if (IS_ERR(tfm))
@@ -148,20 +174,23 @@ int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
}
/* Destroy a crypto transform object and/or blk-crypto key. */
-void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key)
+void fscrypt_destroy_prepared_key(struct super_block *sb,
+ struct fscrypt_prepared_key *prep_key)
{
- crypto_free_skcipher(prep_key->tfm);
- fscrypt_destroy_inline_crypt_key(prep_key);
+ crypto_free_sync_skcipher(prep_key->tfm);
+ fscrypt_destroy_inline_crypt_key(sb, prep_key);
+ memzero_explicit(prep_key, sizeof(*prep_key));
}
/* Given a per-file encryption key, set up the file's crypto transform object */
-int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key)
+int fscrypt_set_per_file_enc_key(struct fscrypt_inode_info *ci,
+ const u8 *raw_key)
{
ci->ci_owns_key = true;
return fscrypt_prepare_key(&ci->ci_enc_key, raw_key, ci);
}
-static int setup_per_mode_enc_key(struct fscrypt_info *ci,
+static int setup_per_mode_enc_key(struct fscrypt_inode_info *ci,
struct fscrypt_master_key *mk,
struct fscrypt_prepared_key *keys,
u8 hkdf_context, bool include_fs_uuid)
@@ -171,14 +200,29 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci,
struct fscrypt_mode *mode = ci->ci_mode;
const u8 mode_num = mode - fscrypt_modes;
struct fscrypt_prepared_key *prep_key;
- u8 mode_key[FSCRYPT_MAX_KEY_SIZE];
+ u8 mode_key[FSCRYPT_MAX_RAW_KEY_SIZE];
u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)];
unsigned int hkdf_infolen = 0;
+ bool use_hw_wrapped_key = false;
int err;
- if (WARN_ON(mode_num > FSCRYPT_MODE_MAX))
+ if (WARN_ON_ONCE(mode_num > FSCRYPT_MODE_MAX))
return -EINVAL;
+ if (mk->mk_secret.is_hw_wrapped && S_ISREG(inode->i_mode)) {
+ /* Using a hardware-wrapped key for file contents encryption */
+ if (!fscrypt_using_inline_encryption(ci)) {
+ if (sb->s_flags & SB_INLINECRYPT)
+ fscrypt_warn(ci->ci_inode,
+ "Hardware-wrapped key required, but no suitable inline encryption capabilities are available");
+ else
+ fscrypt_warn(ci->ci_inode,
+ "Hardware-wrapped keys require inline encryption (-o inlinecrypt)");
+ return -EINVAL;
+ }
+ use_hw_wrapped_key = true;
+ }
+
prep_key = &keys[mode_num];
if (fscrypt_is_key_prepared(prep_key, ci)) {
ci->ci_enc_key = *prep_key;
@@ -190,6 +234,16 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci,
if (fscrypt_is_key_prepared(prep_key, ci))
goto done_unlock;
+ if (use_hw_wrapped_key) {
+ err = fscrypt_prepare_inline_crypt_key(prep_key,
+ mk->mk_secret.bytes,
+ mk->mk_secret.size, true,
+ ci);
+ if (err)
+ goto out_unlock;
+ goto done_unlock;
+ }
+
BUILD_BUG_ON(sizeof(mode_num) != 1);
BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
BUILD_BUG_ON(sizeof(hkdf_info) != 17);
@@ -199,11 +253,8 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci,
sizeof(sb->s_uuid));
hkdf_infolen += sizeof(sb->s_uuid);
}
- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
- hkdf_context, hkdf_info, hkdf_infolen,
- mode_key, mode->keysize);
- if (err)
- goto out_unlock;
+ fscrypt_hkdf_expand(&mk->mk_secret.hkdf, hkdf_context, hkdf_info,
+ hkdf_infolen, mode_key, mode->keysize);
err = fscrypt_prepare_key(prep_key, mode_key, ci);
memzero_explicit(mode_key, mode->keysize);
if (err)
@@ -224,49 +275,38 @@ out_unlock:
* as a pair of 64-bit words. Therefore, on big endian CPUs we have to do an
* endianness swap in order to get the same results as on little endian CPUs.
*/
-static int fscrypt_derive_siphash_key(const struct fscrypt_master_key *mk,
- u8 context, const u8 *info,
- unsigned int infolen, siphash_key_t *key)
+static void fscrypt_derive_siphash_key(const struct fscrypt_master_key *mk,
+ u8 context, const u8 *info,
+ unsigned int infolen, siphash_key_t *key)
{
- int err;
-
- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, context, info, infolen,
- (u8 *)key, sizeof(*key));
- if (err)
- return err;
-
+ fscrypt_hkdf_expand(&mk->mk_secret.hkdf, context, info, infolen,
+ (u8 *)key, sizeof(*key));
BUILD_BUG_ON(sizeof(*key) != 16);
BUILD_BUG_ON(ARRAY_SIZE(key->key) != 2);
le64_to_cpus(&key->key[0]);
le64_to_cpus(&key->key[1]);
- return 0;
}
-int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
- const struct fscrypt_master_key *mk)
+void fscrypt_derive_dirhash_key(struct fscrypt_inode_info *ci,
+ const struct fscrypt_master_key *mk)
{
- int err;
-
- err = fscrypt_derive_siphash_key(mk, HKDF_CONTEXT_DIRHASH_KEY,
- ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
- &ci->ci_dirhash_key);
- if (err)
- return err;
+ fscrypt_derive_siphash_key(mk, HKDF_CONTEXT_DIRHASH_KEY,
+ ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
+ &ci->ci_dirhash_key);
ci->ci_dirhash_key_initialized = true;
- return 0;
}
-void fscrypt_hash_inode_number(struct fscrypt_info *ci,
+void fscrypt_hash_inode_number(struct fscrypt_inode_info *ci,
const struct fscrypt_master_key *mk)
{
- WARN_ON(ci->ci_inode->i_ino == 0);
- WARN_ON(!mk->mk_ino_hash_key_initialized);
+ WARN_ON_ONCE(ci->ci_inode->i_ino == 0);
+ WARN_ON_ONCE(!mk->mk_ino_hash_key_initialized);
ci->ci_hashed_ino = (u32)siphash_1u64(ci->ci_inode->i_ino,
&mk->mk_ino_hash_key);
}
-static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci,
+static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_inode_info *ci,
struct fscrypt_master_key *mk)
{
int err;
@@ -284,17 +324,12 @@ static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci,
if (mk->mk_ino_hash_key_initialized)
goto unlock;
- err = fscrypt_derive_siphash_key(mk,
- HKDF_CONTEXT_INODE_HASH_KEY,
- NULL, 0, &mk->mk_ino_hash_key);
- if (err)
- goto unlock;
+ fscrypt_derive_siphash_key(mk, HKDF_CONTEXT_INODE_HASH_KEY,
+ NULL, 0, &mk->mk_ino_hash_key);
/* pairs with smp_load_acquire() above */
smp_store_release(&mk->mk_ino_hash_key_initialized, true);
unlock:
mutex_unlock(&fscrypt_mode_key_setup_mutex);
- if (err)
- return err;
}
/*
@@ -306,12 +341,20 @@ unlock:
return 0;
}
-static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
+static int fscrypt_setup_v2_file_key(struct fscrypt_inode_info *ci,
struct fscrypt_master_key *mk,
bool need_dirhash_key)
{
int err;
+ if (mk->mk_secret.is_hw_wrapped &&
+ !(ci->ci_policy.v2.flags & (FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 |
+ FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))) {
+ fscrypt_warn(ci->ci_inode,
+ "Hardware-wrapped keys are only supported with IV_INO_LBLK policies");
+ return -EINVAL;
+ }
+
if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
/*
* DIRECT_KEY: instead of deriving per-file encryption keys, the
@@ -338,15 +381,12 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
err = fscrypt_setup_iv_ino_lblk_32_key(ci, mk);
} else {
- u8 derived_key[FSCRYPT_MAX_KEY_SIZE];
-
- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
- HKDF_CONTEXT_PER_FILE_ENC_KEY,
- ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
- derived_key, ci->ci_mode->keysize);
- if (err)
- return err;
+ u8 derived_key[FSCRYPT_MAX_RAW_KEY_SIZE];
+ fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
+ HKDF_CONTEXT_PER_FILE_ENC_KEY,
+ ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
+ derived_key, ci->ci_mode->keysize);
err = fscrypt_set_per_file_enc_key(ci, derived_key);
memzero_explicit(derived_key, ci->ci_mode->keysize);
}
@@ -354,11 +394,8 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
return err;
/* Derive a secret dirhash key for directories that need it. */
- if (need_dirhash_key) {
- err = fscrypt_derive_dirhash_key(ci, mk);
- if (err)
- return err;
- }
+ if (need_dirhash_key)
+ fscrypt_derive_dirhash_key(ci, mk);
return 0;
}
@@ -381,7 +418,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
* still allow 512-bit master keys if the user chooses to use them, though.)
*/
static bool fscrypt_valid_master_key_size(const struct fscrypt_master_key *mk,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
unsigned int min_keysize;
@@ -405,49 +442,52 @@ static bool fscrypt_valid_master_key_size(const struct fscrypt_master_key *mk,
/*
* Find the master key, then set up the inode's actual encryption key.
*
- * If the master key is found in the filesystem-level keyring, then the
- * corresponding 'struct key' is returned in *master_key_ret with its semaphore
- * read-locked. This is needed to ensure that only one task links the
- * fscrypt_info into ->mk_decrypted_inodes (as multiple tasks may race to create
- * an fscrypt_info for the same inode), and to synchronize the master key being
- * removed with a new inode starting to use it.
+ * If the master key is found in the filesystem-level keyring, then it is
+ * returned in *mk_ret with its semaphore read-locked. This is needed to ensure
+ * that only one task links the fscrypt_inode_info into ->mk_decrypted_inodes
+ * (as multiple tasks may race to create an fscrypt_inode_info for the same
+ * inode), and to synchronize the master key being removed with a new inode
+ * starting to use it.
*/
-static int setup_file_encryption_key(struct fscrypt_info *ci,
+static int setup_file_encryption_key(struct fscrypt_inode_info *ci,
bool need_dirhash_key,
- struct key **master_key_ret)
+ struct fscrypt_master_key **mk_ret)
{
- struct key *key;
- struct fscrypt_master_key *mk = NULL;
+ struct super_block *sb = ci->ci_inode->i_sb;
struct fscrypt_key_specifier mk_spec;
+ struct fscrypt_master_key *mk;
int err;
- err = fscrypt_select_encryption_impl(ci);
+ err = fscrypt_policy_to_key_spec(&ci->ci_policy, &mk_spec);
if (err)
return err;
- switch (ci->ci_policy.version) {
- case FSCRYPT_POLICY_V1:
- mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR;
- memcpy(mk_spec.u.descriptor,
- ci->ci_policy.v1.master_key_descriptor,
- FSCRYPT_KEY_DESCRIPTOR_SIZE);
- break;
- case FSCRYPT_POLICY_V2:
- mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER;
- memcpy(mk_spec.u.identifier,
- ci->ci_policy.v2.master_key_identifier,
- FSCRYPT_KEY_IDENTIFIER_SIZE);
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
+ mk = fscrypt_find_master_key(sb, &mk_spec);
+ if (unlikely(!mk)) {
+ const union fscrypt_policy *dummy_policy =
+ fscrypt_get_dummy_policy(sb);
+
+ /*
+ * Add the test_dummy_encryption key on-demand. In principle,
+ * it should be added at mount time. Do it here instead so that
+ * the individual filesystems don't need to worry about adding
+ * this key at mount time and cleaning up on mount failure.
+ */
+ if (dummy_policy &&
+ fscrypt_policies_equal(dummy_policy, &ci->ci_policy)) {
+ err = fscrypt_add_test_dummy_key(sb, &mk_spec);
+ if (err)
+ return err;
+ mk = fscrypt_find_master_key(sb, &mk_spec);
+ }
}
+ if (unlikely(!mk)) {
+ if (ci->ci_policy.version != FSCRYPT_POLICY_V1)
+ return -ENOKEY;
- key = fscrypt_find_master_key(ci->ci_inode->i_sb, &mk_spec);
- if (IS_ERR(key)) {
- if (key != ERR_PTR(-ENOKEY) ||
- ci->ci_policy.version != FSCRYPT_POLICY_V1)
- return PTR_ERR(key);
+ err = fscrypt_select_encryption_impl(ci, false);
+ if (err)
+ return err;
/*
* As a legacy fallback for v1 policies, search for the key in
@@ -457,12 +497,10 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
*/
return fscrypt_setup_v1_file_key_via_subscribed_keyrings(ci);
}
+ down_read(&mk->mk_sem);
- mk = key->payload.data[0];
- down_read(&key->sem);
-
- /* Has the secret been removed (via FS_IOC_REMOVE_ENCRYPTION_KEY)? */
- if (!is_master_key_secret_present(&mk->mk_secret)) {
+ if (!mk->mk_present) {
+ /* FS_IOC_REMOVE_ENCRYPTION_KEY has been executed on this key */
err = -ENOKEY;
goto out_release_key;
}
@@ -472,33 +510,45 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
goto out_release_key;
}
+ err = fscrypt_select_encryption_impl(ci, mk->mk_secret.is_hw_wrapped);
+ if (err)
+ goto out_release_key;
+
switch (ci->ci_policy.version) {
case FSCRYPT_POLICY_V1:
- err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw);
+ if (WARN_ON_ONCE(mk->mk_secret.is_hw_wrapped)) {
+ /*
+ * This should never happen, as adding a v1 policy key
+ * that is hardware-wrapped isn't allowed.
+ */
+ err = -EINVAL;
+ goto out_release_key;
+ }
+ err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.bytes);
break;
case FSCRYPT_POLICY_V2:
err = fscrypt_setup_v2_file_key(ci, mk, need_dirhash_key);
break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
err = -EINVAL;
break;
}
if (err)
goto out_release_key;
- *master_key_ret = key;
+ *mk_ret = mk;
return 0;
out_release_key:
- up_read(&key->sem);
- key_put(key);
+ up_read(&mk->mk_sem);
+ fscrypt_put_master_key(mk);
return err;
}
-static void put_crypt_info(struct fscrypt_info *ci)
+static void put_crypt_info(struct fscrypt_inode_info *ci)
{
- struct key *key;
+ struct fscrypt_master_key *mk;
if (!ci)
return;
@@ -506,29 +556,24 @@ static void put_crypt_info(struct fscrypt_info *ci)
if (ci->ci_direct_key)
fscrypt_put_direct_key(ci->ci_direct_key);
else if (ci->ci_owns_key)
- fscrypt_destroy_prepared_key(&ci->ci_enc_key);
-
- key = ci->ci_master_key;
- if (key) {
- struct fscrypt_master_key *mk = key->payload.data[0];
+ fscrypt_destroy_prepared_key(ci->ci_inode->i_sb,
+ &ci->ci_enc_key);
+ mk = ci->ci_master_key;
+ if (mk) {
/*
* Remove this inode from the list of inodes that were unlocked
- * with the master key.
- *
- * In addition, if we're removing the last inode from a key that
- * already had its secret removed, invalidate the key so that it
- * gets removed from ->s_master_keys.
+ * with the master key. In addition, if we're removing the last
+ * inode from an incompletely removed key, then complete the
+ * full removal of the key.
*/
spin_lock(&mk->mk_decrypted_inodes_lock);
list_del(&ci->ci_master_key_link);
spin_unlock(&mk->mk_decrypted_inodes_lock);
- if (refcount_dec_and_test(&mk->mk_refcount))
- key_invalidate(key);
- key_put(key);
+ fscrypt_put_master_key_activeref(ci->ci_inode->i_sb, mk);
}
memzero_explicit(ci, sizeof(*ci));
- kmem_cache_free(fscrypt_info_cachep, ci);
+ kmem_cache_free(fscrypt_inode_info_cachep, ci);
}
static int
@@ -537,16 +582,16 @@ fscrypt_setup_encryption_info(struct inode *inode,
const u8 nonce[FSCRYPT_FILE_NONCE_SIZE],
bool need_dirhash_key)
{
- struct fscrypt_info *crypt_info;
+ struct fscrypt_inode_info *crypt_info;
struct fscrypt_mode *mode;
- struct key *master_key = NULL;
+ struct fscrypt_master_key *mk = NULL;
int res;
- res = fscrypt_initialize(inode->i_sb->s_cop->flags);
+ res = fscrypt_initialize(inode->i_sb);
if (res)
return res;
- crypt_info = kmem_cache_zalloc(fscrypt_info_cachep, GFP_KERNEL);
+ crypt_info = kmem_cache_zalloc(fscrypt_inode_info_cachep, GFP_KERNEL);
if (!crypt_info)
return -ENOMEM;
@@ -559,31 +604,33 @@ fscrypt_setup_encryption_info(struct inode *inode,
res = PTR_ERR(mode);
goto out;
}
- WARN_ON(mode->ivsize > FSCRYPT_MAX_IV_SIZE);
+ WARN_ON_ONCE(mode->ivsize > FSCRYPT_MAX_IV_SIZE);
crypt_info->ci_mode = mode;
- res = setup_file_encryption_key(crypt_info, need_dirhash_key,
- &master_key);
+ crypt_info->ci_data_unit_bits =
+ fscrypt_policy_du_bits(&crypt_info->ci_policy, inode);
+ crypt_info->ci_data_units_per_block_bits =
+ inode->i_blkbits - crypt_info->ci_data_unit_bits;
+
+ res = setup_file_encryption_key(crypt_info, need_dirhash_key, &mk);
if (res)
goto out;
/*
- * For existing inodes, multiple tasks may race to set ->i_crypt_info.
- * So use cmpxchg_release(). This pairs with the smp_load_acquire() in
- * fscrypt_get_info(). I.e., here we publish ->i_crypt_info with a
- * RELEASE barrier so that other tasks can ACQUIRE it.
+ * For existing inodes, multiple tasks may race to set the inode's
+ * fscrypt info pointer. So use cmpxchg_release(). This pairs with the
+ * smp_load_acquire() in fscrypt_get_inode_info(). I.e., publish the
+ * pointer with a RELEASE barrier so that other tasks can ACQUIRE it.
*/
- if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL) {
+ if (cmpxchg_release(fscrypt_inode_info_addr(inode), NULL, crypt_info) ==
+ NULL) {
/*
- * We won the race and set ->i_crypt_info to our crypt_info.
- * Now link it into the master key's inode list.
+ * We won the race and set the inode's fscrypt info to our
+ * crypt_info. Now link it into the master key's inode list.
*/
- if (master_key) {
- struct fscrypt_master_key *mk =
- master_key->payload.data[0];
-
- refcount_inc(&mk->mk_refcount);
- crypt_info->ci_master_key = key_get(master_key);
+ if (mk) {
+ crypt_info->ci_master_key = mk;
+ refcount_inc(&mk->mk_active_refs);
spin_lock(&mk->mk_decrypted_inodes_lock);
list_add(&crypt_info->ci_master_key_link,
&mk->mk_decrypted_inodes);
@@ -593,9 +640,9 @@ fscrypt_setup_encryption_info(struct inode *inode,
}
res = 0;
out:
- if (master_key) {
- up_read(&master_key->sem);
- key_put(master_key);
+ if (mk) {
+ up_read(&mk->mk_sem);
+ fscrypt_put_master_key(mk);
}
put_crypt_info(crypt_info);
return res;
@@ -610,13 +657,13 @@ out:
* %false unless the operation being performed is needed in
* order for files (or directories) to be deleted.
*
- * Set up ->i_crypt_info, if it hasn't already been done.
+ * Set up the inode's encryption key, if it hasn't already been done.
*
- * Note: unless ->i_crypt_info is already set, this isn't %GFP_NOFS-safe. So
+ * Note: unless the key setup was already done, this isn't %GFP_NOFS-safe. So
* generally this shouldn't be called from within a filesystem transaction.
*
- * Return: 0 if ->i_crypt_info was set or was already set, *or* if the
- * encryption key is unavailable. (Use fscrypt_has_encryption_key() to
+ * Return: 0 if the key is now set up, *or* if it couldn't be set up because the
+ * needed master key is absent. (Use fscrypt_has_encryption_key() to
* distinguish these cases.) Also can return another -errno code.
*/
int fscrypt_get_encryption_info(struct inode *inode, bool allow_unsupported)
@@ -666,13 +713,13 @@ int fscrypt_get_encryption_info(struct inode *inode, bool allow_unsupported)
/**
* fscrypt_prepare_new_inode() - prepare to create a new inode in a directory
* @dir: a possibly-encrypted directory
- * @inode: the new inode. ->i_mode must be set already.
+ * @inode: the new inode. ->i_mode and ->i_blkbits must be set already.
* ->i_ino doesn't need to be set yet.
* @encrypt_ret: (output) set to %true if the new inode will be encrypted
*
- * If the directory is encrypted, set up its ->i_crypt_info in preparation for
+ * If the directory is encrypted, set up its encryption key in preparation for
* encrypting the name of the new file. Also, if the new inode will be
- * encrypted, set up its ->i_crypt_info and set *encrypt_ret=true.
+ * encrypted, set up its encryption key too and set *encrypt_ret=true.
*
* This isn't %GFP_NOFS-safe, and therefore it should be called before starting
* any filesystem transaction to create the inode. For this reason, ->i_ino
@@ -681,8 +728,8 @@ int fscrypt_get_encryption_info(struct inode *inode, bool allow_unsupported)
* This doesn't persist the new inode's encryption context. That still needs to
* be done later by calling fscrypt_set_context().
*
- * Return: 0 on success, -ENOKEY if the encryption key is missing, or another
- * -errno code
+ * Return: 0 on success, -ENOKEY if a key needs to be set up for @dir or @inode
+ * but the needed master key is absent, or another -errno code
*/
int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode,
bool *encrypt_ret)
@@ -696,6 +743,9 @@ int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode,
if (IS_ERR(policy))
return PTR_ERR(policy);
+ if (WARN_ON_ONCE(inode->i_blkbits == 0))
+ return -EINVAL;
+
if (WARN_ON_ONCE(inode->i_mode == 0))
return -EINVAL;
@@ -721,13 +771,21 @@ EXPORT_SYMBOL_GPL(fscrypt_prepare_new_inode);
* fscrypt_put_encryption_info() - free most of an inode's fscrypt data
* @inode: an inode being evicted
*
- * Free the inode's fscrypt_info. Filesystems must call this when the inode is
- * being evicted. An RCU grace period need not have elapsed yet.
+ * Free the inode's fscrypt_inode_info. Filesystems must call this when the
+ * inode is being evicted. An RCU grace period need not have elapsed yet.
*/
void fscrypt_put_encryption_info(struct inode *inode)
{
- put_crypt_info(inode->i_crypt_info);
- inode->i_crypt_info = NULL;
+ /*
+ * Ideally we'd start with a lightweight IS_ENCRYPTED() check here
+ * before proceeding to retrieve and check the pointer. However, during
+ * inode creation, the fscrypt_inode_info is set before S_ENCRYPTED. If
+ * an error occurs, it needs to be cleaned up regardless.
+ */
+ struct fscrypt_inode_info **ci_addr = fscrypt_inode_info_addr(inode);
+
+ put_crypt_info(*ci_addr);
+ *ci_addr = NULL;
}
EXPORT_SYMBOL(fscrypt_put_encryption_info);
@@ -759,8 +817,7 @@ EXPORT_SYMBOL(fscrypt_free_inode);
*/
int fscrypt_drop_inode(struct inode *inode)
{
- const struct fscrypt_info *ci = fscrypt_get_info(inode);
- const struct fscrypt_master_key *mk;
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info(inode);
/*
* If ci is NULL, then the inode doesn't have an encryption key set up
@@ -770,7 +827,6 @@ int fscrypt_drop_inode(struct inode *inode)
*/
if (!ci || !ci->ci_master_key)
return 0;
- mk = ci->ci_master_key->payload.data[0];
/*
* With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes
@@ -778,17 +834,18 @@ int fscrypt_drop_inode(struct inode *inode)
* userspace is still using the files, inodes can be dirtied between
* then and now. We mustn't lose any writes, so skip dirty inodes here.
*/
- if (inode->i_state & I_DIRTY_ALL)
+ if (inode_state_read(inode) & I_DIRTY_ALL)
return 0;
/*
- * Note: since we aren't holding the key semaphore, the result here can
+ * We can't take ->mk_sem here, since this runs in atomic context.
+ * Therefore, ->mk_present can change concurrently, and our result may
* immediately become outdated. But there's no correctness problem with
* unnecessarily evicting. Nor is there a correctness problem with not
* evicting while iput() is racing with the key being removed, since
* then the thread removing the key will either evict the inode itself
* or will correctly detect that it wasn't evicted due to the race.
*/
- return !is_master_key_secret_present(&mk->mk_secret);
+ return !READ_ONCE(ci->ci_master_key->mk_present);
}
EXPORT_SYMBOL_GPL(fscrypt_drop_inode);
diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c
index 2762c5350432..c4d05168522b 100644
--- a/fs/crypto/keysetup_v1.c
+++ b/fs/crypto/keysetup_v1.c
@@ -20,8 +20,8 @@
* managed alongside the master keys in the filesystem-level keyring)
*/
-#include <crypto/algapi.h>
#include <crypto/skcipher.h>
+#include <crypto/utils.h>
#include <keys/user-type.h>
#include <linux/hashtable.h>
#include <linux/scatterlist.h>
@@ -48,39 +48,30 @@ static int derive_key_aes(const u8 *master_key,
const u8 nonce[FSCRYPT_FILE_NONCE_SIZE],
u8 *derived_key, unsigned int derived_keysize)
{
- int res = 0;
- struct skcipher_request *req = NULL;
- DECLARE_CRYPTO_WAIT(wait);
- struct scatterlist src_sg, dst_sg;
- struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
-
- if (IS_ERR(tfm)) {
- res = PTR_ERR(tfm);
- tfm = NULL;
- goto out;
- }
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
- req = skcipher_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- res = -ENOMEM;
- goto out;
- }
- skcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &wait);
- res = crypto_skcipher_setkey(tfm, nonce, FSCRYPT_FILE_NONCE_SIZE);
- if (res < 0)
- goto out;
+ struct crypto_sync_skcipher *tfm;
+ int err;
- sg_init_one(&src_sg, master_key, derived_keysize);
- sg_init_one(&dst_sg, derived_key, derived_keysize);
- skcipher_request_set_crypt(req, &src_sg, &dst_sg, derived_keysize,
- NULL);
- res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
-out:
- skcipher_request_free(req);
- crypto_free_skcipher(tfm);
- return res;
+ tfm = crypto_alloc_sync_skcipher("ecb(aes)", 0, FSCRYPT_CRYPTOAPI_MASK);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ err = crypto_sync_skcipher_setkey(tfm, nonce, FSCRYPT_FILE_NONCE_SIZE);
+ if (err == 0) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ struct scatterlist src_sg, dst_sg;
+
+ skcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+ sg_init_one(&src_sg, master_key, derived_keysize);
+ sg_init_one(&dst_sg, derived_key, derived_keysize);
+ skcipher_request_set_crypt(req, &src_sg, &dst_sg,
+ derived_keysize, NULL);
+ err = crypto_skcipher_encrypt(req);
+ }
+ crypto_free_sync_skcipher(tfm);
+ return err;
}
/*
@@ -118,7 +109,7 @@ find_and_lock_process_key(const char *prefix,
payload = (const struct fscrypt_key *)ukp->data;
if (ukp->datalen != sizeof(struct fscrypt_key) ||
- payload->size < 1 || payload->size > FSCRYPT_MAX_KEY_SIZE) {
+ payload->size < 1 || payload->size > sizeof(payload->raw)) {
fscrypt_warn(NULL,
"key with description '%s' has invalid payload",
key->description);
@@ -143,18 +134,19 @@ invalid:
/* Master key referenced by DIRECT_KEY policy */
struct fscrypt_direct_key {
+ struct super_block *dk_sb;
struct hlist_node dk_node;
refcount_t dk_refcount;
const struct fscrypt_mode *dk_mode;
struct fscrypt_prepared_key dk_key;
u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
- u8 dk_raw[FSCRYPT_MAX_KEY_SIZE];
+ u8 dk_raw[FSCRYPT_MAX_RAW_KEY_SIZE];
};
static void free_direct_key(struct fscrypt_direct_key *dk)
{
if (dk) {
- fscrypt_destroy_prepared_key(&dk->dk_key);
+ fscrypt_destroy_prepared_key(dk->dk_sb, &dk->dk_key);
kfree_sensitive(dk);
}
}
@@ -177,7 +169,8 @@ void fscrypt_put_direct_key(struct fscrypt_direct_key *dk)
*/
static struct fscrypt_direct_key *
find_or_insert_direct_key(struct fscrypt_direct_key *to_insert,
- const u8 *raw_key, const struct fscrypt_info *ci)
+ const u8 *raw_key,
+ const struct fscrypt_inode_info *ci)
{
unsigned long hash_key;
struct fscrypt_direct_key *dk;
@@ -217,7 +210,7 @@ find_or_insert_direct_key(struct fscrypt_direct_key *to_insert,
/* Prepare to encrypt directly using the master key in the given mode */
static struct fscrypt_direct_key *
-fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key)
+fscrypt_get_direct_key(const struct fscrypt_inode_info *ci, const u8 *raw_key)
{
struct fscrypt_direct_key *dk;
int err;
@@ -231,6 +224,7 @@ fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key)
dk = kzalloc(sizeof(*dk), GFP_KERNEL);
if (!dk)
return ERR_PTR(-ENOMEM);
+ dk->dk_sb = ci->ci_inode->i_sb;
refcount_set(&dk->dk_refcount, 1);
dk->dk_mode = ci->ci_mode;
err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci);
@@ -248,7 +242,7 @@ err_free_dk:
}
/* v1 policy, DIRECT_KEY: use the master key directly */
-static int setup_v1_file_key_direct(struct fscrypt_info *ci,
+static int setup_v1_file_key_direct(struct fscrypt_inode_info *ci,
const u8 *raw_master_key)
{
struct fscrypt_direct_key *dk;
@@ -262,7 +256,7 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci,
}
/* v1 policy, !DIRECT_KEY: derive the file's encryption key */
-static int setup_v1_file_key_derived(struct fscrypt_info *ci,
+static int setup_v1_file_key_derived(struct fscrypt_inode_info *ci,
const u8 *raw_master_key)
{
u8 *derived_key;
@@ -287,7 +281,8 @@ out:
return err;
}
-int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key)
+int fscrypt_setup_v1_file_key(struct fscrypt_inode_info *ci,
+ const u8 *raw_master_key)
{
if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
return setup_v1_file_key_direct(ci, raw_master_key);
@@ -295,8 +290,10 @@ int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key)
return setup_v1_file_key_derived(ci, raw_master_key);
}
-int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci)
+int
+fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_inode_info *ci)
{
+ const struct super_block *sb = ci->ci_inode->i_sb;
struct key *key;
const struct fscrypt_key *payload;
int err;
@@ -304,8 +301,8 @@ int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci)
key = find_and_lock_process_key(FSCRYPT_KEY_DESC_PREFIX,
ci->ci_policy.v1.master_key_descriptor,
ci->ci_mode->keysize, &payload);
- if (key == ERR_PTR(-ENOKEY) && ci->ci_inode->i_sb->s_cop->key_prefix) {
- key = find_and_lock_process_key(ci->ci_inode->i_sb->s_cop->key_prefix,
+ if (key == ERR_PTR(-ENOKEY) && sb->s_cop->legacy_key_prefix) {
+ key = find_and_lock_process_key(sb->s_cop->legacy_key_prefix,
ci->ci_policy.v1.master_key_descriptor,
ci->ci_mode->keysize, &payload);
}
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index ed3d623724cd..bbb2f5ced988 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -10,10 +10,13 @@
* Modified by Eric Biggers, 2019 for v2 policy support.
*/
+#include <linux/export.h>
+#include <linux/fs_context.h>
+#include <linux/mount.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/string.h>
-#include <linux/mount.h>
+
#include "fscrypt_private.h"
/**
@@ -32,15 +35,41 @@ bool fscrypt_policies_equal(const union fscrypt_policy *policy1,
return !memcmp(policy1, policy2, fscrypt_policy_size(policy1));
}
-static const union fscrypt_policy *
-fscrypt_get_dummy_policy(struct super_block *sb)
+int fscrypt_policy_to_key_spec(const union fscrypt_policy *policy,
+ struct fscrypt_key_specifier *key_spec)
+{
+ switch (policy->version) {
+ case FSCRYPT_POLICY_V1:
+ key_spec->type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR;
+ memcpy(key_spec->u.descriptor, policy->v1.master_key_descriptor,
+ FSCRYPT_KEY_DESCRIPTOR_SIZE);
+ return 0;
+ case FSCRYPT_POLICY_V2:
+ key_spec->type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER;
+ memcpy(key_spec->u.identifier, policy->v2.master_key_identifier,
+ FSCRYPT_KEY_IDENTIFIER_SIZE);
+ return 0;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+}
+
+const union fscrypt_policy *fscrypt_get_dummy_policy(struct super_block *sb)
{
if (!sb->s_cop->get_dummy_policy)
return NULL;
return sb->s_cop->get_dummy_policy(sb);
}
-static bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode)
+/*
+ * Return %true if the given combination of encryption modes is supported for v1
+ * (and later) encryption policies.
+ *
+ * Do *not* add anything new here, since v1 encryption policies are deprecated.
+ * New combinations of modes should go in fscrypt_valid_enc_modes_v2() only.
+ */
+static bool fscrypt_valid_enc_modes_v1(u32 contents_mode, u32 filenames_mode)
{
if (contents_mode == FSCRYPT_MODE_AES_256_XTS &&
filenames_mode == FSCRYPT_MODE_AES_256_CTS)
@@ -57,6 +86,19 @@ static bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode)
return false;
}
+static bool fscrypt_valid_enc_modes_v2(u32 contents_mode, u32 filenames_mode)
+{
+ if (contents_mode == FSCRYPT_MODE_AES_256_XTS &&
+ filenames_mode == FSCRYPT_MODE_AES_256_HCTR2)
+ return true;
+
+ if (contents_mode == FSCRYPT_MODE_SM4_XTS &&
+ filenames_mode == FSCRYPT_MODE_SM4_CTS)
+ return true;
+
+ return fscrypt_valid_enc_modes_v1(contents_mode, filenames_mode);
+}
+
static bool supported_direct_key_modes(const struct inode *inode,
u32 contents_mode, u32 filenames_mode)
{
@@ -78,12 +120,11 @@ static bool supported_direct_key_modes(const struct inode *inode,
}
static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy,
- const struct inode *inode,
- const char *type,
- int max_ino_bits, int max_lblk_bits)
+ const struct inode *inode)
{
+ const char *type = (policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)
+ ? "IV_INO_LBLK_64" : "IV_INO_LBLK_32";
struct super_block *sb = inode->i_sb;
- int ino_bits = 64, lblk_bits = 64;
/*
* IV_INO_LBLK_* exist only because of hardware limitations, and
@@ -110,17 +151,29 @@ static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy,
type, sb->s_id);
return false;
}
- if (sb->s_cop->get_ino_and_lblk_bits)
- sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
- if (ino_bits > max_ino_bits) {
+
+ /*
+ * IV_INO_LBLK_64 and IV_INO_LBLK_32 both require that inode numbers fit
+ * in 32 bits. In principle, IV_INO_LBLK_32 could support longer inode
+ * numbers because it hashes the inode number; however, currently the
+ * inode number is gotten from inode::i_ino which is 'unsigned long'.
+ * So for now the implementation limit is 32 bits.
+ */
+ if (!sb->s_cop->has_32bit_inodes) {
fscrypt_warn(inode,
"Can't use %s policy on filesystem '%s' because its inode numbers are too long",
type, sb->s_id);
return false;
}
- if (lblk_bits > max_lblk_bits) {
+
+ /*
+ * IV_INO_LBLK_64 and IV_INO_LBLK_32 both require that file data unit
+ * indices fit in 32 bits.
+ */
+ if (fscrypt_max_file_dun_bits(sb,
+ fscrypt_policy_v2_du_bits(policy, inode)) > 32) {
fscrypt_warn(inode,
- "Can't use %s policy on filesystem '%s' because its block numbers are too long",
+ "Can't use %s policy on filesystem '%s' because its maximum file size is too large",
type, sb->s_id);
return false;
}
@@ -130,7 +183,7 @@ static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy,
static bool fscrypt_supported_v1_policy(const struct fscrypt_policy_v1 *policy,
const struct inode *inode)
{
- if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
+ if (!fscrypt_valid_enc_modes_v1(policy->contents_encryption_mode,
policy->filenames_encryption_mode)) {
fscrypt_warn(inode,
"Unsupported encryption modes (contents %d, filenames %d)",
@@ -166,7 +219,7 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy,
{
int count = 0;
- if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
+ if (!fscrypt_valid_enc_modes_v2(policy->contents_encryption_mode,
policy->filenames_encryption_mode)) {
fscrypt_warn(inode,
"Unsupported encryption modes (contents %d, filenames %d)",
@@ -193,25 +246,39 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy,
return false;
}
+ if (policy->log2_data_unit_size) {
+ if (!inode->i_sb->s_cop->supports_subblock_data_units) {
+ fscrypt_warn(inode,
+ "Filesystem does not support configuring crypto data unit size");
+ return false;
+ }
+ if (policy->log2_data_unit_size > inode->i_blkbits ||
+ policy->log2_data_unit_size < SECTOR_SHIFT /* 9 */) {
+ fscrypt_warn(inode,
+ "Unsupported log2_data_unit_size in encryption policy: %d",
+ policy->log2_data_unit_size);
+ return false;
+ }
+ if (policy->log2_data_unit_size != inode->i_blkbits &&
+ (policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) {
+ /*
+ * Not safe to enable yet, as we need to ensure that DUN
+ * wraparound can only occur on a FS block boundary.
+ */
+ fscrypt_warn(inode,
+ "Sub-block data units not yet supported with IV_INO_LBLK_32");
+ return false;
+ }
+ }
+
if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) &&
!supported_direct_key_modes(inode, policy->contents_encryption_mode,
policy->filenames_encryption_mode))
return false;
- if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) &&
- !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_64",
- 32, 32))
- return false;
-
- /*
- * IV_INO_LBLK_32 hashes the inode number, so in principle it can
- * support any ino_bits. However, currently the inode number is gotten
- * from inode::i_ino which is 'unsigned long'. So for now the
- * implementation limit is 32 bits.
- */
- if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
- !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32",
- 32, 32))
+ if ((policy->flags & (FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 |
+ FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) &&
+ !supported_iv_ino_lblk_policy(policy, inode))
return false;
if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) {
@@ -290,6 +357,7 @@ static int fscrypt_new_context(union fscrypt_context *ctx_u,
ctx->filenames_encryption_mode =
policy->filenames_encryption_mode;
ctx->flags = policy->flags;
+ ctx->log2_data_unit_size = policy->log2_data_unit_size;
memcpy(ctx->master_key_identifier,
policy->master_key_identifier,
sizeof(ctx->master_key_identifier));
@@ -350,6 +418,7 @@ int fscrypt_policy_from_context(union fscrypt_policy *policy_u,
policy->filenames_encryption_mode =
ctx->filenames_encryption_mode;
policy->flags = ctx->flags;
+ policy->log2_data_unit_size = ctx->log2_data_unit_size;
memcpy(policy->__reserved, ctx->__reserved,
sizeof(policy->__reserved));
memcpy(policy->master_key_identifier,
@@ -365,11 +434,11 @@ int fscrypt_policy_from_context(union fscrypt_policy *policy_u,
/* Retrieve an inode's encryption policy */
static int fscrypt_get_policy(struct inode *inode, union fscrypt_policy *policy)
{
- const struct fscrypt_info *ci;
+ const struct fscrypt_inode_info *ci;
union fscrypt_context ctx;
int ret;
- ci = fscrypt_get_info(inode);
+ ci = fscrypt_get_inode_info(inode);
if (ci) {
/* key available, use the cached policy */
*policy = ci->ci_policy;
@@ -423,7 +492,7 @@ static int set_encryption_policy(struct inode *inode,
current->comm, current->pid);
break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return -EINVAL;
}
@@ -465,7 +534,7 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
return -EFAULT;
policy.version = version;
- if (!inode_owner_or_capable(&init_user_ns, inode))
+ if (!inode_owner_or_capable(&nop_mnt_idmap, inode))
return -EACCES;
ret = mnt_want_write_file(filp);
@@ -607,7 +676,7 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
/*
* Both parent and child are encrypted, so verify they use the same
- * encryption policy. Compare the fscrypt_info structs if the keys are
+ * encryption policy. Compare the cached policies if the keys are
* available, otherwise retrieve and compare the fscrypt_contexts.
*
* Note that the fscrypt_context retrieval will be required frequently
@@ -658,13 +727,39 @@ const union fscrypt_policy *fscrypt_policy_to_inherit(struct inode *dir)
err = fscrypt_require_key(dir);
if (err)
return ERR_PTR(err);
- return &dir->i_crypt_info->ci_policy;
+ return &fscrypt_get_inode_info_raw(dir)->ci_policy;
}
return fscrypt_get_dummy_policy(dir->i_sb);
}
/**
+ * fscrypt_context_for_new_inode() - create an encryption context for a new inode
+ * @ctx: where context should be written
+ * @inode: inode from which to fetch policy and nonce
+ *
+ * Given an in-core "prepared" (via fscrypt_prepare_new_inode) inode,
+ * generate a new context and write it to ctx. ctx _must_ be at least
+ * FSCRYPT_SET_CONTEXT_MAX_SIZE bytes.
+ *
+ * Return: size of the resulting context or a negative error code.
+ */
+int fscrypt_context_for_new_inode(void *ctx, struct inode *inode)
+{
+ struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
+
+ BUILD_BUG_ON(sizeof(union fscrypt_context) !=
+ FSCRYPT_SET_CONTEXT_MAX_SIZE);
+
+ /* fscrypt_prepare_new_inode() should have set up the key already. */
+ if (WARN_ON_ONCE(!ci))
+ return -ENOKEY;
+
+ return fscrypt_new_context(ctx, &ci->ci_policy, ci->ci_nonce);
+}
+EXPORT_SYMBOL_GPL(fscrypt_context_for_new_inode);
+
+/**
* fscrypt_set_context() - Set the fscrypt context of a new inode
* @inode: a new inode
* @fs_data: private data given by FS and passed to ->set_context()
@@ -676,101 +771,65 @@ const union fscrypt_policy *fscrypt_policy_to_inherit(struct inode *dir)
*/
int fscrypt_set_context(struct inode *inode, void *fs_data)
{
- struct fscrypt_info *ci = inode->i_crypt_info;
+ struct fscrypt_inode_info *ci;
union fscrypt_context ctx;
int ctxsize;
- /* fscrypt_prepare_new_inode() should have set up the key already. */
- if (WARN_ON_ONCE(!ci))
- return -ENOKEY;
-
- BUILD_BUG_ON(sizeof(ctx) != FSCRYPT_SET_CONTEXT_MAX_SIZE);
- ctxsize = fscrypt_new_context(&ctx, &ci->ci_policy, ci->ci_nonce);
+ ctxsize = fscrypt_context_for_new_inode(&ctx, inode);
+ if (ctxsize < 0)
+ return ctxsize;
/*
* This may be the first time the inode number is available, so do any
* delayed key setup that requires the inode number.
*/
+ ci = fscrypt_get_inode_info_raw(inode);
if (ci->ci_policy.version == FSCRYPT_POLICY_V2 &&
- (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) {
- const struct fscrypt_master_key *mk =
- ci->ci_master_key->payload.data[0];
-
- fscrypt_hash_inode_number(ci, mk);
- }
+ (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
+ fscrypt_hash_inode_number(ci, ci->ci_master_key);
return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, fs_data);
}
EXPORT_SYMBOL_GPL(fscrypt_set_context);
/**
- * fscrypt_set_test_dummy_encryption() - handle '-o test_dummy_encryption'
- * @sb: the filesystem on which test_dummy_encryption is being specified
- * @arg: the argument to the test_dummy_encryption option. May be NULL.
- * @dummy_policy: the filesystem's current dummy policy (input/output, see
- * below)
- *
- * Handle the test_dummy_encryption mount option by creating a dummy encryption
- * policy, saving it in @dummy_policy, and adding the corresponding dummy
- * encryption key to the filesystem. If the @dummy_policy is already set, then
- * instead validate that it matches @arg. Don't support changing it via
- * remount, as that is difficult to do safely.
+ * fscrypt_parse_test_dummy_encryption() - parse the test_dummy_encryption mount option
+ * @param: the mount option
+ * @dummy_policy: (input/output) the place to write the dummy policy that will
+ * result from parsing the option. Zero-initialize this. If a policy is
+ * already set here (due to test_dummy_encryption being given multiple
+ * times), then this function will verify that the policies are the same.
*
- * Return: 0 on success (dummy policy set, or the same policy is already set);
- * -EEXIST if a different dummy policy is already set;
- * or another -errno value.
+ * Return: 0 on success; -EINVAL if the argument is invalid; -EEXIST if the
+ * argument conflicts with one already specified; or -ENOMEM.
*/
-int fscrypt_set_test_dummy_encryption(struct super_block *sb, const char *arg,
- struct fscrypt_dummy_policy *dummy_policy)
+int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct fscrypt_dummy_policy *dummy_policy)
{
- struct fscrypt_key_specifier key_spec = { 0 };
- int version;
- union fscrypt_policy *policy = NULL;
+ const char *arg = "v2";
+ union fscrypt_policy *policy;
int err;
- if (!arg)
- arg = "v2";
-
- if (!strcmp(arg, "v1")) {
- version = FSCRYPT_POLICY_V1;
- key_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR;
- memset(key_spec.u.descriptor, 0x42,
- FSCRYPT_KEY_DESCRIPTOR_SIZE);
- } else if (!strcmp(arg, "v2")) {
- version = FSCRYPT_POLICY_V2;
- key_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER;
- /* key_spec.u.identifier gets filled in when adding the key */
- } else {
- err = -EINVAL;
- goto out;
- }
+ if (param->type == fs_value_is_string && *param->string)
+ arg = param->string;
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
- if (!policy) {
- err = -ENOMEM;
- goto out;
- }
-
- err = fscrypt_add_test_dummy_key(sb, &key_spec);
- if (err)
- goto out;
+ if (!policy)
+ return -ENOMEM;
- policy->version = version;
- switch (policy->version) {
- case FSCRYPT_POLICY_V1:
+ if (!strcmp(arg, "v1")) {
+ policy->version = FSCRYPT_POLICY_V1;
policy->v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS;
policy->v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS;
- memcpy(policy->v1.master_key_descriptor, key_spec.u.descriptor,
+ memset(policy->v1.master_key_descriptor, 0x42,
FSCRYPT_KEY_DESCRIPTOR_SIZE);
- break;
- case FSCRYPT_POLICY_V2:
+ } else if (!strcmp(arg, "v2")) {
+ policy->version = FSCRYPT_POLICY_V2;
policy->v2.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS;
policy->v2.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS;
- memcpy(policy->v2.master_key_identifier, key_spec.u.identifier,
- FSCRYPT_KEY_IDENTIFIER_SIZE);
- break;
- default:
- WARN_ON(1);
+ fscrypt_get_test_dummy_key_identifier(
+ policy->v2.master_key_identifier);
+ } else {
err = -EINVAL;
goto out;
}
@@ -789,7 +848,25 @@ out:
kfree(policy);
return err;
}
-EXPORT_SYMBOL_GPL(fscrypt_set_test_dummy_encryption);
+EXPORT_SYMBOL_GPL(fscrypt_parse_test_dummy_encryption);
+
+/**
+ * fscrypt_dummy_policies_equal() - check whether two dummy policies are equal
+ * @p1: the first test dummy policy (may be unset)
+ * @p2: the second test dummy policy (may be unset)
+ *
+ * Return: %true if the dummy policies are both set and equal, or both unset.
+ */
+bool fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
+ const struct fscrypt_dummy_policy *p2)
+{
+ if (!p1->policy && !p2->policy)
+ return true;
+ if (!p1->policy || !p2->policy)
+ return false;
+ return fscrypt_policies_equal(p1->policy, p2->policy);
+}
+EXPORT_SYMBOL_GPL(fscrypt_dummy_policies_equal);
/**
* fscrypt_show_test_dummy_encryption() - show '-o test_dummy_encryption'
diff --git a/fs/d_path.c b/fs/d_path.c
index e4e0ebad1f15..bb365511066b 100644
--- a/fs/d_path.c
+++ b/fs/d_path.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/prefetch.h>
#include "mount.h"
+#include "internal.h"
struct prepend_buffer {
char *buf;
@@ -34,7 +35,7 @@ static bool prepend_char(struct prepend_buffer *p, unsigned char c)
}
/*
- * The source of the prepend data can be an optimistoc load
+ * The source of the prepend data can be an optimistic load
* of a dentry name and length. And because we don't hold any
* locks, the length and the pointer to the name may not be
* in sync if a concurrent rename happens, and the kernel
@@ -240,9 +241,9 @@ static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
unsigned seq;
do {
- seq = read_seqcount_begin(&fs->seq);
+ seq = read_seqbegin(&fs->seq);
*root = fs->root;
- } while (read_seqcount_retry(&fs->seq, seq));
+ } while (read_seqretry(&fs->seq, seq));
}
/**
@@ -297,8 +298,7 @@ EXPORT_SYMBOL(d_path);
/*
* Helper function for dentry_operations.d_dname() members
*/
-char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
- const char *fmt, ...)
+char *dynamic_dname(char *buffer, int buflen, const char *fmt, ...)
{
va_list args;
char temp[64];
@@ -385,10 +385,10 @@ static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
unsigned seq;
do {
- seq = read_seqcount_begin(&fs->seq);
+ seq = read_seqbegin(&fs->seq);
*root = fs->root;
*pwd = fs->pwd;
- } while (read_seqcount_retry(&fs->seq, seq));
+ } while (read_seqretry(&fs->seq, seq));
}
/*
diff --git a/fs/dax.c b/fs/dax.c
index 4e3e5a283a91..289e6254aa30 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -11,7 +11,6 @@
#include <linux/buffer_head.h>
#include <linux/dax.h>
#include <linux/fs.h>
-#include <linux/genhd.h>
#include <linux/highmem.h>
#include <linux/memcontrol.h>
#include <linux/mm.h>
@@ -21,26 +20,15 @@
#include <linux/sched/signal.h>
#include <linux/uio.h>
#include <linux/vmstat.h>
-#include <linux/pfn_t.h>
#include <linux/sizes.h>
#include <linux/mmu_notifier.h>
#include <linux/iomap.h>
-#include <asm/pgalloc.h>
+#include <linux/rmap.h>
+#include <linux/pgalloc.h>
#define CREATE_TRACE_POINTS
#include <trace/events/fs_dax.h>
-static inline unsigned int pe_order(enum page_entry_size pe_size)
-{
- if (pe_size == PE_SIZE_PTE)
- return PAGE_SHIFT - PAGE_SHIFT;
- if (pe_size == PE_SIZE_PMD)
- return PMD_SHIFT - PAGE_SHIFT;
- if (pe_size == PE_SIZE_PUD)
- return PUD_SHIFT - PAGE_SHIFT;
- return ~0;
-}
-
/* We choose 4096 entries - same as per-zone page wait tables */
#define DAX_WAIT_TABLE_BITS 12
#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
@@ -49,9 +37,6 @@ static inline unsigned int pe_order(enum page_entry_size pe_size)
#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
-/* The order of a PMD entry */
-#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
-
static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
static int __init init_dax_wait_table(void)
@@ -85,9 +70,14 @@ static unsigned long dax_to_pfn(void *entry)
return xa_to_value(entry) >> DAX_SHIFT;
}
-static void *dax_make_entry(pfn_t pfn, unsigned long flags)
+static struct folio *dax_to_folio(void *entry)
+{
+ return page_folio(pfn_to_page(dax_to_pfn(entry)));
+}
+
+static void *dax_make_entry(unsigned long pfn, unsigned long flags)
{
- return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
+ return xa_mk_value(flags | (pfn << DAX_SHIFT));
}
static bool dax_is_locked(void *entry)
@@ -220,7 +210,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry,
*
* Must be called with the i_pages lock held.
*/
-static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
+static void *get_next_unlocked_entry(struct xa_state *xas, unsigned int order)
{
void *entry;
struct wait_exceptional_entry_queue ewait;
@@ -250,6 +240,37 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
}
/*
+ * Wait for the given entry to become unlocked. Caller must hold the i_pages
+ * lock and call either put_unlocked_entry() if it did not lock the entry or
+ * dax_unlock_entry() if it did. Returns an unlocked entry if still present.
+ */
+static void *wait_entry_unlocked_exclusive(struct xa_state *xas, void *entry)
+{
+ struct wait_exceptional_entry_queue ewait;
+ wait_queue_head_t *wq;
+
+ init_wait(&ewait.wait);
+ ewait.wait.func = wake_exceptional_entry_func;
+
+ while (unlikely(dax_is_locked(entry))) {
+ wq = dax_entry_waitqueue(xas, entry, &ewait.key);
+ prepare_to_wait_exclusive(wq, &ewait.wait,
+ TASK_UNINTERRUPTIBLE);
+ xas_reset(xas);
+ xas_unlock_irq(xas);
+ schedule();
+ finish_wait(wq, &ewait.wait);
+ xas_lock_irq(xas);
+ entry = xas_load(xas);
+ }
+
+ if (xa_is_internal(entry))
+ return NULL;
+
+ return entry;
+}
+
+/*
* The only thing keeping the address space around is the i_pages lock
* (it's cycled in clear_inode() after removing the entries from i_pages)
* After we call xas_unlock_irq(), we cannot touch xas->xa.
@@ -264,7 +285,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
wq = dax_entry_waitqueue(xas, entry, &ewait.key);
/*
- * Unlike get_unlocked_entry() there is no guarantee that this
+ * Unlike get_next_unlocked_entry() there is no guarantee that this
* path ever successfully retrieves an unlocked entry before an
* inode dies. Perform a non-exclusive wait in case this path
* never successfully performs its own wake up.
@@ -321,91 +342,170 @@ static unsigned long dax_entry_size(void *entry)
return PAGE_SIZE;
}
-static unsigned long dax_end_pfn(void *entry)
+/*
+ * A DAX folio is considered shared if it has no mapping set and ->share (which
+ * shares the ->index field) is non-zero. Note this may return false even if the
+ * page is shared between multiple files but has not yet actually been mapped
+ * into multiple address spaces.
+ */
+static inline bool dax_folio_is_shared(struct folio *folio)
{
- return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
+ return !folio->mapping && folio->share;
}
/*
- * Iterate through all mapped pfns represented by an entry, i.e. skip
- * 'empty' and 'zero' entries.
+ * When it is called by dax_insert_entry(), the shared flag will indicate
+ * whether this entry is shared by multiple files. If the page has not
+ * previously been associated with any mappings the ->mapping and ->index
+ * fields will be set. If it has already been associated with a mapping
+ * the mapping will be cleared and the share count set. It's then up to
+ * reverse map users like memory_failure() to call back into the filesystem to
+ * recover ->mapping and ->index information. For example by implementing
+ * dax_holder_operations.
*/
-#define for_each_mapped_pfn(entry, pfn) \
- for (pfn = dax_to_pfn(entry); \
- pfn < dax_end_pfn(entry); pfn++)
+static void dax_folio_make_shared(struct folio *folio)
+{
+ /*
+ * folio is not currently shared so mark it as shared by clearing
+ * folio->mapping.
+ */
+ folio->mapping = NULL;
+
+ /*
+ * folio has previously been mapped into one address space so set the
+ * share count.
+ */
+ folio->share = 1;
+}
+
+static inline unsigned long dax_folio_put(struct folio *folio)
+{
+ unsigned long ref;
+ int order, i;
+
+ if (!dax_folio_is_shared(folio))
+ ref = 0;
+ else
+ ref = --folio->share;
+
+ if (ref)
+ return ref;
+
+ folio->mapping = NULL;
+ order = folio_order(folio);
+ if (!order)
+ return 0;
+ folio_reset_order(folio);
+
+ for (i = 0; i < (1UL << order); i++) {
+ struct dev_pagemap *pgmap = page_pgmap(&folio->page);
+ struct page *page = folio_page(folio, i);
+ struct folio *new_folio = (struct folio *)page;
+
+ ClearPageHead(page);
+ clear_compound_head(page);
+
+ new_folio->mapping = NULL;
+ /*
+ * Reset pgmap which was over-written by
+ * prep_compound_page().
+ */
+ new_folio->pgmap = pgmap;
+ new_folio->share = 0;
+ WARN_ON_ONCE(folio_ref_count(new_folio));
+ }
+
+ return ref;
+}
+
+static void dax_folio_init(void *entry)
+{
+ struct folio *folio = dax_to_folio(entry);
+ int order = dax_entry_order(entry);
+
+ /*
+ * Folio should have been split back to order-0 pages in
+ * dax_folio_put() when they were removed from their
+ * final mapping.
+ */
+ WARN_ON_ONCE(folio_order(folio));
+
+ if (order > 0) {
+ prep_compound_page(&folio->page, order);
+ if (order > 1)
+ INIT_LIST_HEAD(&folio->_deferred_list);
+ WARN_ON_ONCE(folio_ref_count(folio));
+ }
+}
-/*
- * TODO: for reflink+dax we need a way to associate a single page with
- * multiple address_space instances at different linear_page_index()
- * offsets.
- */
static void dax_associate_entry(void *entry, struct address_space *mapping,
- struct vm_area_struct *vma, unsigned long address)
+ struct vm_area_struct *vma,
+ unsigned long address, bool shared)
{
- unsigned long size = dax_entry_size(entry), pfn, index;
- int i = 0;
+ unsigned long size = dax_entry_size(entry), index;
+ struct folio *folio = dax_to_folio(entry);
- if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
+ if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
return;
index = linear_page_index(vma, address & ~(size - 1));
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
+ if (shared && (folio->mapping || dax_folio_is_shared(folio))) {
+ if (folio->mapping)
+ dax_folio_make_shared(folio);
- WARN_ON_ONCE(page->mapping);
- page->mapping = mapping;
- page->index = index + i++;
+ WARN_ON_ONCE(!folio->share);
+ WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio));
+ folio->share++;
+ } else {
+ WARN_ON_ONCE(folio->mapping);
+ dax_folio_init(entry);
+ folio = dax_to_folio(entry);
+ folio->mapping = mapping;
+ folio->index = index;
}
}
static void dax_disassociate_entry(void *entry, struct address_space *mapping,
- bool trunc)
+ bool trunc)
{
- unsigned long pfn;
+ struct folio *folio = dax_to_folio(entry);
- if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
+ if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
return;
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
-
- WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
- WARN_ON_ONCE(page->mapping && page->mapping != mapping);
- page->mapping = NULL;
- page->index = 0;
- }
+ dax_folio_put(folio);
}
static struct page *dax_busy_page(void *entry)
{
- unsigned long pfn;
+ struct folio *folio = dax_to_folio(entry);
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
+ if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
+ return NULL;
- if (page_ref_count(page) > 1)
- return page;
- }
- return NULL;
+ if (folio_ref_count(folio) - folio_mapcount(folio))
+ return &folio->page;
+ else
+ return NULL;
}
-/*
- * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
- * @page: The page whose entry we want to lock
+/**
+ * dax_lock_folio - Lock the DAX entry corresponding to a folio
+ * @folio: The folio whose entry we want to lock
*
* Context: Process context.
- * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
+ * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could
* not be locked.
*/
-dax_entry_t dax_lock_page(struct page *page)
+dax_entry_t dax_lock_folio(struct folio *folio)
{
XA_STATE(xas, NULL, 0);
void *entry;
- /* Ensure page->mapping isn't freed while we look at it */
+ /* Ensure folio->mapping isn't freed while we look at it */
rcu_read_lock();
for (;;) {
- struct address_space *mapping = READ_ONCE(page->mapping);
+ struct address_space *mapping = READ_ONCE(folio->mapping);
entry = NULL;
if (!mapping || !dax_mapping(mapping))
@@ -424,11 +524,11 @@ dax_entry_t dax_lock_page(struct page *page)
xas.xa = &mapping->i_pages;
xas_lock_irq(&xas);
- if (mapping != page->mapping) {
+ if (mapping != folio->mapping) {
xas_unlock_irq(&xas);
continue;
}
- xas_set(&xas, page->index);
+ xas_set(&xas, folio->index);
entry = xas_load(&xas);
if (dax_is_locked(entry)) {
rcu_read_unlock();
@@ -444,10 +544,10 @@ dax_entry_t dax_lock_page(struct page *page)
return (dax_entry_t)entry;
}
-void dax_unlock_page(struct page *page, dax_entry_t cookie)
+void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
{
- struct address_space *mapping = page->mapping;
- XA_STATE(xas, &mapping->i_pages, page->index);
+ struct address_space *mapping = folio->mapping;
+ XA_STATE(xas, &mapping->i_pages, folio->index);
if (S_ISCHR(mapping->host->i_mode))
return;
@@ -456,6 +556,69 @@ void dax_unlock_page(struct page *page, dax_entry_t cookie)
}
/*
+ * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
+ * @mapping: the file's mapping whose entry we want to lock
+ * @index: the offset within this file
+ * @page: output the dax page corresponding to this dax entry
+ *
+ * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
+ * could not be locked.
+ */
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
+ struct page **page)
+{
+ XA_STATE(xas, NULL, 0);
+ void *entry;
+
+ rcu_read_lock();
+ for (;;) {
+ entry = NULL;
+ if (!dax_mapping(mapping))
+ break;
+
+ xas.xa = &mapping->i_pages;
+ xas_lock_irq(&xas);
+ xas_set(&xas, index);
+ entry = xas_load(&xas);
+ if (dax_is_locked(entry)) {
+ rcu_read_unlock();
+ wait_entry_unlocked(&xas, entry);
+ rcu_read_lock();
+ continue;
+ }
+ if (!entry ||
+ dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+ /*
+ * Because we are looking for entry from file's mapping
+ * and index, so the entry may not be inserted for now,
+ * or even a zero/empty entry. We don't think this is
+ * an error case. So, return a special value and do
+ * not output @page.
+ */
+ entry = (void *)~0UL;
+ } else {
+ *page = pfn_to_page(dax_to_pfn(entry));
+ dax_lock_entry(&xas, entry);
+ }
+ xas_unlock_irq(&xas);
+ break;
+ }
+ rcu_read_unlock();
+ return (dax_entry_t)entry;
+}
+
+void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
+ dax_entry_t cookie)
+{
+ XA_STATE(xas, &mapping->i_pages, index);
+
+ if (cookie == ~0UL)
+ return;
+
+ dax_unlock_entry(&xas, (void *)cookie);
+}
+
+/*
* Find page cache entry at given index. If it is a DAX entry, return it
* with the entry locked. If the page cache doesn't contain an entry at
* that index, add a locked empty entry.
@@ -494,7 +657,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
retry:
pmd_downgrade = false;
xas_lock_irq(xas);
- entry = get_unlocked_entry(xas, order);
+ entry = get_next_unlocked_entry(xas, order);
if (entry) {
if (dax_is_conflict(entry))
@@ -549,7 +712,7 @@ retry:
if (order > 0)
flags |= DAX_PMD;
- entry = dax_make_entry(pfn_to_pfn_t(0), flags);
+ entry = dax_make_entry(0, flags);
dax_lock_entry(xas, entry);
if (xas_error(xas))
goto out_unlock;
@@ -598,13 +761,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
pgoff_t end_idx;
XA_STATE(xas, &mapping->i_pages, start_idx);
- /*
- * In the 'limited' case get_user_pages() for dax is disabled.
- */
- if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
- return NULL;
-
- if (!dax_mapping(mapping) || !mapping_mapped(mapping))
+ if (!dax_mapping(mapping))
return NULL;
/* If end == LLONG_MAX, all pages from start to till end of file */
@@ -630,8 +787,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
xas_for_each(&xas, entry, end_idx) {
if (WARN_ON_ONCE(!xa_is_value(entry)))
continue;
- if (unlikely(dax_is_locked(entry)))
- entry = get_unlocked_entry(&xas, 0);
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
if (entry)
page = dax_busy_page(entry);
put_unlocked_entry(&xas, entry, WAKE_NEXT);
@@ -657,14 +813,14 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
EXPORT_SYMBOL_GPL(dax_layout_busy_page);
static int __dax_invalidate_entry(struct address_space *mapping,
- pgoff_t index, bool trunc)
+ pgoff_t index, bool trunc)
{
XA_STATE(xas, &mapping->i_pages, index);
int ret = 0;
void *entry;
xas_lock_irq(&xas);
- entry = get_unlocked_entry(&xas, 0);
+ entry = get_next_unlocked_entry(&xas, 0);
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
goto out;
if (!trunc &&
@@ -681,6 +837,35 @@ out:
return ret;
}
+static int __dax_clear_dirty_range(struct address_space *mapping,
+ pgoff_t start, pgoff_t end)
+{
+ XA_STATE(xas, &mapping->i_pages, start);
+ unsigned int scanned = 0;
+ void *entry;
+
+ xas_lock_irq(&xas);
+ xas_for_each(&xas, entry, end) {
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
+ if (!entry)
+ continue;
+ xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
+ xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
+ put_unlocked_entry(&xas, entry, WAKE_NEXT);
+
+ if (++scanned % XA_CHECK_SCHED)
+ continue;
+
+ xas_pause(&xas);
+ xas_unlock_irq(&xas);
+ cond_resched();
+ xas_lock_irq(&xas);
+ }
+ xas_unlock_irq(&xas);
+
+ return 0;
+}
+
/*
* Delete DAX entry at @index from @mapping. Wait for it
* to be unlocked before deleting it.
@@ -700,6 +885,107 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
return ret;
}
+void dax_delete_mapping_range(struct address_space *mapping,
+ loff_t start, loff_t end)
+{
+ void *entry;
+ pgoff_t start_idx = start >> PAGE_SHIFT;
+ pgoff_t end_idx;
+ XA_STATE(xas, &mapping->i_pages, start_idx);
+
+ /* If end == LLONG_MAX, all pages from start to till end of file */
+ if (end == LLONG_MAX)
+ end_idx = ULONG_MAX;
+ else
+ end_idx = end >> PAGE_SHIFT;
+
+ xas_lock_irq(&xas);
+ xas_for_each(&xas, entry, end_idx) {
+ if (!xa_is_value(entry))
+ continue;
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
+ if (!entry)
+ continue;
+ dax_disassociate_entry(entry, mapping, true);
+ xas_store(&xas, NULL);
+ mapping->nrpages -= 1UL << dax_entry_order(entry);
+ put_unlocked_entry(&xas, entry, WAKE_ALL);
+ }
+ xas_unlock_irq(&xas);
+}
+EXPORT_SYMBOL_GPL(dax_delete_mapping_range);
+
+static int wait_page_idle(struct page *page,
+ void (cb)(struct inode *),
+ struct inode *inode)
+{
+ return ___wait_var_event(page, dax_page_is_idle(page),
+ TASK_INTERRUPTIBLE, 0, 0, cb(inode));
+}
+
+static void wait_page_idle_uninterruptible(struct page *page,
+ struct inode *inode)
+{
+ ___wait_var_event(page, dax_page_is_idle(page),
+ TASK_UNINTERRUPTIBLE, 0, 0, schedule());
+}
+
+/*
+ * Unmaps the inode and waits for any DMA to complete prior to deleting the
+ * DAX mapping entries for the range.
+ *
+ * For NOWAIT behavior, pass @cb as NULL to early-exit on first found
+ * busy page
+ */
+int dax_break_layout(struct inode *inode, loff_t start, loff_t end,
+ void (cb)(struct inode *))
+{
+ struct page *page;
+ int error = 0;
+
+ if (!dax_mapping(inode->i_mapping))
+ return 0;
+
+ do {
+ page = dax_layout_busy_page_range(inode->i_mapping, start, end);
+ if (!page)
+ break;
+ if (!cb) {
+ error = -ERESTARTSYS;
+ break;
+ }
+
+ error = wait_page_idle(page, cb, inode);
+ } while (error == 0);
+
+ if (!page)
+ dax_delete_mapping_range(inode->i_mapping, start, end);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dax_break_layout);
+
+void dax_break_layout_final(struct inode *inode)
+{
+ struct page *page;
+
+ if (!dax_mapping(inode->i_mapping))
+ return;
+
+ do {
+ page = dax_layout_busy_page_range(inode->i_mapping, 0,
+ LLONG_MAX);
+ if (!page)
+ break;
+
+ wait_page_idle_uninterruptible(page, inode);
+ } while (true);
+
+ if (!page)
+ dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX);
+}
+EXPORT_SYMBOL_GPL(dax_break_layout_final);
+
/*
* Invalidate DAX entry if it is clean.
*/
@@ -709,48 +995,64 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
return __dax_invalidate_entry(mapping, index, false);
}
-static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev,
- sector_t sector, struct page *to, unsigned long vaddr)
+static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
+{
+ return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
+}
+
+static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
{
+ pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
void *vto, *kaddr;
- pgoff_t pgoff;
long rc;
int id;
- rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
- if (rc)
- return rc;
-
id = dax_read_lock();
- rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
+ rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
+ &kaddr, NULL);
if (rc < 0) {
dax_read_unlock(id);
return rc;
}
- vto = kmap_atomic(to);
- copy_user_page(vto, (void __force *)kaddr, vaddr, to);
+ vto = kmap_atomic(vmf->cow_page);
+ copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
kunmap_atomic(vto);
dax_read_unlock(id);
return 0;
}
/*
+ * MAP_SYNC on a dax mapping guarantees dirty metadata is
+ * flushed on write-faults (non-cow), but not read-faults.
+ */
+static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
+ struct vm_area_struct *vma)
+{
+ return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
+ (iter->iomap.flags & IOMAP_F_DIRTY);
+}
+
+/*
* By this point grab_mapping_entry() has ensured that we have a locked entry
* of the appropriate size so we don't have to worry about downgrading PMDs to
* PTEs. If we happen to be trying to insert a PTE and there is a PMD
* already in the tree, we will skip the insertion and just dirty the PMD as
* appropriate.
*/
-static void *dax_insert_entry(struct xa_state *xas,
- struct address_space *mapping, struct vm_fault *vmf,
- void *entry, pfn_t pfn, unsigned long flags, bool dirty)
+static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
+ const struct iomap_iter *iter, void *entry, unsigned long pfn,
+ unsigned long flags)
{
+ struct address_space *mapping = vmf->vma->vm_file->f_mapping;
void *new_entry = dax_make_entry(pfn, flags);
+ bool write = iter->flags & IOMAP_WRITE;
+ bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma);
+ bool shared = iter->iomap.flags & IOMAP_F_SHARED;
if (dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
+ if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
unsigned long index = xas->xa_index;
/* we are replacing a zero page with block mapping */
if (dax_is_pmd_entry(entry))
@@ -762,11 +1064,13 @@ static void *dax_insert_entry(struct xa_state *xas,
xas_reset(xas);
xas_lock_irq(xas);
- if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+ if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
void *old;
dax_disassociate_entry(entry, mapping, false);
- dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
+ dax_associate_entry(new_entry, mapping, vmf->vma,
+ vmf->address, shared);
+
/*
* Only swap our new entry into the page cache if the current
* entry is a zero page or an empty entry. If a normal PTE or
@@ -786,99 +1090,19 @@ static void *dax_insert_entry(struct xa_state *xas,
if (dirty)
xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
+ if (write && shared)
+ xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
+
xas_unlock_irq(xas);
return entry;
}
-static inline
-unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
-{
- unsigned long address;
-
- address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
- VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
- return address;
-}
-
-/* Walk all mappings of a given index of a file and writeprotect them */
-static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
- unsigned long pfn)
-{
- struct vm_area_struct *vma;
- pte_t pte, *ptep = NULL;
- pmd_t *pmdp = NULL;
- spinlock_t *ptl;
-
- i_mmap_lock_read(mapping);
- vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
- struct mmu_notifier_range range;
- unsigned long address;
-
- cond_resched();
-
- if (!(vma->vm_flags & VM_SHARED))
- continue;
-
- address = pgoff_address(index, vma);
-
- /*
- * follow_invalidate_pte() will use the range to call
- * mmu_notifier_invalidate_range_start() on our behalf before
- * taking any lock.
- */
- if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep,
- &pmdp, &ptl))
- continue;
-
- /*
- * No need to call mmu_notifier_invalidate_range() as we are
- * downgrading page table protection not changing it to point
- * to a new page.
- *
- * See Documentation/vm/mmu_notifier.rst
- */
- if (pmdp) {
-#ifdef CONFIG_FS_DAX_PMD
- pmd_t pmd;
-
- if (pfn != pmd_pfn(*pmdp))
- goto unlock_pmd;
- if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
- goto unlock_pmd;
-
- flush_cache_page(vma, address, pfn);
- pmd = pmdp_invalidate(vma, address, pmdp);
- pmd = pmd_wrprotect(pmd);
- pmd = pmd_mkclean(pmd);
- set_pmd_at(vma->vm_mm, address, pmdp, pmd);
-unlock_pmd:
-#endif
- spin_unlock(ptl);
- } else {
- if (pfn != pte_pfn(*ptep))
- goto unlock_pte;
- if (!pte_dirty(*ptep) && !pte_write(*ptep))
- goto unlock_pte;
-
- flush_cache_page(vma, address, pfn);
- pte = ptep_clear_flush(vma, address, ptep);
- pte = pte_wrprotect(pte);
- pte = pte_mkclean(pte);
- set_pte_at(vma->vm_mm, address, ptep, pte);
-unlock_pte:
- pte_unmap_unlock(ptep, ptl);
- }
-
- mmu_notifier_invalidate_range_end(&range);
- }
- i_mmap_unlock_read(mapping);
-}
-
static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
struct address_space *mapping, void *entry)
{
- unsigned long pfn, index, count;
+ unsigned long pfn, index, count, end;
long ret = 0;
+ struct vm_area_struct *vma;
/*
* A page got tagged dirty in DAX mapping? Something is seriously
@@ -890,7 +1114,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
if (unlikely(dax_is_locked(entry))) {
void *old_entry = entry;
- entry = get_unlocked_entry(xas, 0);
+ entry = get_next_unlocked_entry(xas, 0);
/* Entry got punched out / reallocated? */
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
@@ -936,8 +1160,16 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
pfn = dax_to_pfn(entry);
count = 1UL << dax_entry_order(entry);
index = xas->xa_index & ~(count - 1);
+ end = index + count - 1;
+
+ /* Walk all mappings of a given index of a file and writeprotect them */
+ i_mmap_lock_read(mapping);
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) {
+ pfn_mkclean_range(pfn, count, index, vma);
+ cond_resched();
+ }
+ i_mmap_unlock_read(mapping);
- dax_entry_mkclean(mapping, index, pfn);
dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
/*
* After we have flushed the cache, we can clear the dirty tag. There
@@ -1005,43 +1237,117 @@ int dax_writeback_mapping_range(struct address_space *mapping,
}
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
-static sector_t dax_iomap_sector(const struct iomap *iomap, loff_t pos)
+static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
+ size_t size, void **kaddr, unsigned long *pfnp)
{
- return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
-}
-
-static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
- pfn_t *pfnp)
-{
- const sector_t sector = dax_iomap_sector(iomap, pos);
- pgoff_t pgoff;
- int id, rc;
+ pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
+ int id, rc = 0;
long length;
- rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
- if (rc)
- return rc;
id = dax_read_lock();
length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
- NULL, pfnp);
+ DAX_ACCESS, kaddr, pfnp);
if (length < 0) {
rc = length;
goto out;
}
+ if (!pfnp)
+ goto out_check_addr;
rc = -EINVAL;
if (PFN_PHYS(length) < size)
goto out;
- if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
- goto out;
- /* For larger pages we need devmap */
- if (length > 1 && !pfn_t_devmap(*pfnp))
+ if (*pfnp & (PHYS_PFN(size)-1))
goto out;
+
rc = 0;
+
+out_check_addr:
+ if (!kaddr)
+ goto out;
+ if (!*kaddr)
+ rc = -EFAULT;
out:
dax_read_unlock(id);
return rc;
}
+/**
+ * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page
+ * by copying the data before and after the range to be written.
+ * @pos: address to do copy from.
+ * @length: size of copy operation.
+ * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
+ * @srcmap: iomap srcmap
+ * @daddr: destination address to copy to.
+ *
+ * This can be called from two places. Either during DAX write fault (page
+ * aligned), to copy the length size data to daddr. Or, while doing normal DAX
+ * write operation, dax_iomap_iter() might call this to do the copy of either
+ * start or end unaligned address. In the latter case the rest of the copy of
+ * aligned ranges is taken care by dax_iomap_iter() itself.
+ * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the
+ * area to make sure no old data remains.
+ */
+static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size,
+ const struct iomap *srcmap, void *daddr)
+{
+ loff_t head_off = pos & (align_size - 1);
+ size_t size = ALIGN(head_off + length, align_size);
+ loff_t end = pos + length;
+ loff_t pg_end = round_up(end, align_size);
+ /* copy_all is usually in page fault case */
+ bool copy_all = head_off == 0 && end == pg_end;
+ /* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */
+ bool zero_edge = srcmap->flags & IOMAP_F_SHARED ||
+ srcmap->type == IOMAP_UNWRITTEN;
+ void *saddr = NULL;
+ int ret = 0;
+
+ if (!zero_edge) {
+ ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
+ if (ret)
+ return dax_mem2blk_err(ret);
+ }
+
+ if (copy_all) {
+ if (zero_edge)
+ memset(daddr, 0, size);
+ else
+ ret = copy_mc_to_kernel(daddr, saddr, length);
+ goto out;
+ }
+
+ /* Copy the head part of the range */
+ if (head_off) {
+ if (zero_edge)
+ memset(daddr, 0, head_off);
+ else {
+ ret = copy_mc_to_kernel(daddr, saddr, head_off);
+ if (ret)
+ return -EIO;
+ }
+ }
+
+ /* Copy the tail part of the range */
+ if (end < pg_end) {
+ loff_t tail_off = head_off + length;
+ loff_t tail_len = pg_end - end;
+
+ if (zero_edge)
+ memset(daddr + tail_off, 0, tail_len);
+ else {
+ ret = copy_mc_to_kernel(daddr + tail_off,
+ saddr + tail_off, tail_len);
+ if (ret)
+ return -EIO;
+ }
+ }
+out:
+ if (zero_edge)
+ dax_flush(srcmap->dax_dev, daddr, size);
+ return ret ? -EIO : 0;
+}
+
/*
* The user has performed a load from a hole in the file. Allocating a new
* page in the file would cause excessive storage usage for workloads with
@@ -1049,143 +1355,258 @@ out:
* If this page is ever written to we will re-fault and change the mapping to
* point to real DAX storage instead.
*/
-static vm_fault_t dax_load_hole(struct xa_state *xas,
- struct address_space *mapping, void **entry,
- struct vm_fault *vmf)
+static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+ const struct iomap_iter *iter, void **entry)
{
- struct inode *inode = mapping->host;
+ struct inode *inode = iter->inode;
unsigned long vaddr = vmf->address;
- pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
+ unsigned long pfn = my_zero_pfn(vaddr);
vm_fault_t ret;
- *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
- DAX_ZERO_PAGE, false);
+ *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
- ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
+ ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), false);
trace_dax_load_hole(inode, vmf, ret);
return ret;
}
#ifdef CONFIG_FS_DAX_PMD
static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
- const struct iomap *iomap, void **entry)
+ const struct iomap_iter *iter, void **entry)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
- unsigned long pmd_addr = vmf->address & PMD_MASK;
- struct vm_area_struct *vma = vmf->vma;
struct inode *inode = mapping->host;
- pgtable_t pgtable = NULL;
- struct page *zero_page;
- spinlock_t *ptl;
- pmd_t pmd_entry;
- pfn_t pfn;
-
- zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
-
- if (unlikely(!zero_page))
- goto fallback;
-
- pfn = page_to_pfn_t(zero_page);
- *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
- DAX_PMD | DAX_ZERO_PAGE, false);
+ struct folio *zero_folio;
+ vm_fault_t ret;
- if (arch_needs_pgtable_deposit()) {
- pgtable = pte_alloc_one(vma->vm_mm);
- if (!pgtable)
- return VM_FAULT_OOM;
- }
+ zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm);
- ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
- if (!pmd_none(*(vmf->pmd))) {
- spin_unlock(ptl);
- goto fallback;
+ if (unlikely(!zero_folio)) {
+ trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
+ return VM_FAULT_FALLBACK;
}
- if (pgtable) {
- pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
- mm_inc_nr_ptes(vma->vm_mm);
- }
- pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
- pmd_entry = pmd_mkhuge(pmd_entry);
- set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
- spin_unlock(ptl);
- trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
- return VM_FAULT_NOPAGE;
+ *entry = dax_insert_entry(xas, vmf, iter, *entry, folio_pfn(zero_folio),
+ DAX_PMD | DAX_ZERO_PAGE);
-fallback:
- if (pgtable)
- pte_free(vma->vm_mm, pgtable);
- trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
- return VM_FAULT_FALLBACK;
+ ret = vmf_insert_folio_pmd(vmf, zero_folio, false);
+ if (ret == VM_FAULT_NOPAGE)
+ trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry);
+ return ret;
}
#else
static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
- const struct iomap *iomap, void **entry)
+ const struct iomap_iter *iter, void **entry)
{
return VM_FAULT_FALLBACK;
}
#endif /* CONFIG_FS_DAX_PMD */
-s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
+static int dax_unshare_iter(struct iomap_iter *iter)
{
- sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
- pgoff_t pgoff;
- long rc, id;
- void *kaddr;
- bool page_aligned = false;
- unsigned offset = offset_in_page(pos);
- unsigned size = min_t(u64, PAGE_SIZE - offset, length);
+ struct iomap *iomap = &iter->iomap;
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ loff_t copy_pos = iter->pos;
+ u64 copy_len = iomap_length(iter);
+ u32 mod;
+ int id = 0;
+ s64 ret;
+ void *daddr = NULL, *saddr = NULL;
+
+ if (!iomap_want_unshare_iter(iter))
+ return iomap_iter_advance_full(iter);
- if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) &&
- (size == PAGE_SIZE))
- page_aligned = true;
+ /*
+ * Extend the file range to be aligned to fsblock/pagesize, because
+ * we need to copy entire blocks, not just the byte range specified.
+ * Invalidate the mapping because we're about to CoW.
+ */
+ mod = offset_in_page(copy_pos);
+ if (mod) {
+ copy_len += mod;
+ copy_pos -= mod;
+ }
- rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff);
- if (rc)
- return rc;
+ mod = offset_in_page(copy_pos + copy_len);
+ if (mod)
+ copy_len += PAGE_SIZE - mod;
+
+ invalidate_inode_pages2_range(iter->inode->i_mapping,
+ copy_pos >> PAGE_SHIFT,
+ (copy_pos + copy_len - 1) >> PAGE_SHIFT);
id = dax_read_lock();
+ ret = dax_iomap_direct_access(iomap, copy_pos, copy_len, &daddr, NULL);
+ if (ret < 0)
+ goto out_unlock;
+
+ ret = dax_iomap_direct_access(srcmap, copy_pos, copy_len, &saddr, NULL);
+ if (ret < 0)
+ goto out_unlock;
- if (page_aligned)
- rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
+ if (copy_mc_to_kernel(daddr, saddr, copy_len) != 0)
+ ret = -EIO;
+
+out_unlock:
+ dax_read_unlock(id);
+ if (ret < 0)
+ return dax_mem2blk_err(ret);
+ return iomap_iter_advance_full(iter);
+}
+
+int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
+ const struct iomap_ops *ops)
+{
+ struct iomap_iter iter = {
+ .inode = inode,
+ .pos = pos,
+ .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX,
+ };
+ loff_t size = i_size_read(inode);
+ int ret;
+
+ if (pos < 0 || pos >= size)
+ return 0;
+
+ iter.len = min(len, size - pos);
+ while ((ret = iomap_iter(&iter, ops)) > 0)
+ iter.status = dax_unshare_iter(&iter);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dax_file_unshare);
+
+static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
+{
+ const struct iomap *iomap = &iter->iomap;
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ unsigned offset = offset_in_page(pos);
+ pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
+ void *kaddr;
+ long ret;
+
+ ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr,
+ NULL);
+ if (ret < 0)
+ return dax_mem2blk_err(ret);
+
+ memset(kaddr + offset, 0, size);
+ if (iomap->flags & IOMAP_F_SHARED)
+ ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap,
+ kaddr);
else
- rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL);
- if (rc < 0) {
+ dax_flush(iomap->dax_dev, kaddr + offset, size);
+ return ret;
+}
+
+static int dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
+{
+ const struct iomap *iomap = &iter->iomap;
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ u64 length = iomap_length(iter);
+ int ret;
+
+ /* already zeroed? we're done. */
+ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
+ return iomap_iter_advance(iter, length);
+
+ /*
+ * invalidate the pages whose sharing state is to be changed
+ * because of CoW.
+ */
+ if (iomap->flags & IOMAP_F_SHARED)
+ invalidate_inode_pages2_range(iter->inode->i_mapping,
+ iter->pos >> PAGE_SHIFT,
+ (iter->pos + length - 1) >> PAGE_SHIFT);
+
+ do {
+ loff_t pos = iter->pos;
+ unsigned offset = offset_in_page(pos);
+ pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
+ int id;
+
+ length = min_t(u64, PAGE_SIZE - offset, length);
+
+ id = dax_read_lock();
+ if (IS_ALIGNED(pos, PAGE_SIZE) && length == PAGE_SIZE)
+ ret = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
+ else
+ ret = dax_memzero(iter, pos, length);
dax_read_unlock(id);
- return rc;
- }
- if (!page_aligned) {
- memset(kaddr + offset, 0, size);
- dax_flush(iomap->dax_dev, kaddr + offset, size);
- }
- dax_read_unlock(id);
- return size;
+ if (ret < 0)
+ return ret;
+
+ ret = iomap_iter_advance(iter, length);
+ if (ret)
+ return ret;
+ } while ((length = iomap_length(iter)) > 0);
+
+ if (did_zero)
+ *did_zero = true;
+ return ret;
}
-static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
- struct iov_iter *iter)
+int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
+ const struct iomap_ops *ops)
+{
+ struct iomap_iter iter = {
+ .inode = inode,
+ .pos = pos,
+ .len = len,
+ .flags = IOMAP_DAX | IOMAP_ZERO,
+ };
+ int ret;
+
+ while ((ret = iomap_iter(&iter, ops)) > 0)
+ iter.status = dax_zero_iter(&iter, did_zero);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dax_zero_range);
+
+int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
+ const struct iomap_ops *ops)
+{
+ unsigned int blocksize = i_blocksize(inode);
+ unsigned int off = pos & (blocksize - 1);
+
+ /* Block boundary? Nothing to do */
+ if (!off)
+ return 0;
+ return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
+}
+EXPORT_SYMBOL_GPL(dax_truncate_page);
+
+static int dax_iomap_iter(struct iomap_iter *iomi, struct iov_iter *iter)
{
const struct iomap *iomap = &iomi->iomap;
+ const struct iomap *srcmap = iomap_iter_srcmap(iomi);
loff_t length = iomap_length(iomi);
loff_t pos = iomi->pos;
- struct block_device *bdev = iomap->bdev;
struct dax_device *dax_dev = iomap->dax_dev;
loff_t end = pos + length, done = 0;
+ bool write = iov_iter_rw(iter) == WRITE;
+ bool cow = write && iomap->flags & IOMAP_F_SHARED;
ssize_t ret = 0;
size_t xfer;
int id;
- if (iov_iter_rw(iter) == READ) {
+ if (!write) {
end = min(end, i_size_read(iomi->inode));
if (pos >= end)
return 0;
- if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
- return iov_iter_zero(min(length, end - pos), iter);
+ if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) {
+ done = iov_iter_zero(min(length, end - pos), iter);
+ return iomap_iter_advance(iomi, done);
+ }
}
- if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
+ /*
+ * In DAX mode, enforce either pure overwrites of written extents, or
+ * writes to unwritten extents as part of a copy-on-write operation.
+ */
+ if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
+ !(iomap->flags & IOMAP_F_SHARED)))
return -EIO;
/*
@@ -1193,19 +1614,29 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
* into page tables. We have to tear down these mappings so that data
* written by write(2) is visible in mmap.
*/
- if (iomap->flags & IOMAP_F_NEW) {
+ if (iomap->flags & IOMAP_F_NEW || cow) {
+ /*
+ * Filesystem allows CoW on non-shared extents. The src extents
+ * may have been mmapped with dirty mark before. To be able to
+ * invalidate its dax entries, we need to clear the dirty mark
+ * in advance.
+ */
+ if (cow)
+ __dax_clear_dirty_range(iomi->inode->i_mapping,
+ pos >> PAGE_SHIFT,
+ (end - 1) >> PAGE_SHIFT);
invalidate_inode_pages2_range(iomi->inode->i_mapping,
pos >> PAGE_SHIFT,
(end - 1) >> PAGE_SHIFT);
}
id = dax_read_lock();
- while (pos < end) {
+ while ((pos = iomi->pos) < end) {
unsigned offset = pos & (PAGE_SIZE - 1);
const size_t size = ALIGN(length + offset, PAGE_SIZE);
- const sector_t sector = dax_iomap_sector(iomap, pos);
+ pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
ssize_t map_len;
- pgoff_t pgoff;
+ bool recovery = false;
void *kaddr;
if (fatal_signal_pending(current)) {
@@ -1213,47 +1644,53 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
break;
}
- ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
- if (ret)
- break;
-
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
- &kaddr, NULL);
+ DAX_ACCESS, &kaddr, NULL);
+ if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) {
+ map_len = dax_direct_access(dax_dev, pgoff,
+ PHYS_PFN(size), DAX_RECOVERY_WRITE,
+ &kaddr, NULL);
+ if (map_len > 0)
+ recovery = true;
+ }
if (map_len < 0) {
- ret = map_len;
+ ret = dax_mem2blk_err(map_len);
break;
}
+ if (cow) {
+ ret = dax_iomap_copy_around(pos, length, PAGE_SIZE,
+ srcmap, kaddr);
+ if (ret)
+ break;
+ }
+
map_len = PFN_PHYS(map_len);
kaddr += offset;
map_len -= offset;
if (map_len > end - pos)
map_len = end - pos;
- /*
- * The userspace address for the memory copy has already been
- * validated via access_ok() in either vfs_read() or
- * vfs_write(), depending on which operation we are doing.
- */
- if (iov_iter_rw(iter) == WRITE)
+ if (recovery)
+ xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
+ map_len, iter);
+ else if (write)
xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
map_len, iter);
else
xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
map_len, iter);
- pos += xfer;
- length -= xfer;
- done += xfer;
-
- if (xfer == 0)
+ ret = iomap_iter_advance(iomi, xfer);
+ if (!ret && xfer == 0)
ret = -EFAULT;
if (xfer < map_len)
break;
+ length = iomap_length(iomi);
}
dax_read_unlock(id);
- return done ? done : ret;
+ return ret;
}
/**
@@ -1274,14 +1711,21 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
.inode = iocb->ki_filp->f_mapping->host,
.pos = iocb->ki_pos,
.len = iov_iter_count(iter),
+ .flags = IOMAP_DAX,
};
loff_t done = 0;
int ret;
+ if (WARN_ON_ONCE(iocb->ki_flags & IOCB_ATOMIC))
+ return -EIO;
+
+ if (!iomi.len)
+ return 0;
+
if (iov_iter_rw(iter) == WRITE) {
lockdep_assert_held_write(&iomi.inode->i_rwsem);
iomi.flags |= IOMAP_WRITE;
- } else {
+ } else if (!sb_rdonly(iomi.inode->i_sb)) {
lockdep_assert_held(&iomi.inode->i_rwsem);
}
@@ -1289,7 +1733,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
iomi.flags |= IOMAP_NOWAIT;
while ((ret = iomap_iter(&iomi, ops)) > 0)
- iomi.processed = dax_iomap_iter(&iomi, iter);
+ iomi.status = dax_iomap_iter(&iomi, iter);
done = iomi.pos - iocb->ki_pos;
iocb->ki_pos = iomi.pos;
@@ -1305,23 +1749,13 @@ static vm_fault_t dax_fault_return(int error)
}
/*
- * MAP_SYNC on a dax mapping guarantees dirty metadata is
- * flushed on write-faults (non-cow), but not read-faults.
- */
-static bool dax_fault_is_synchronous(unsigned long flags,
- struct vm_area_struct *vma, const struct iomap *iomap)
-{
- return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
- && (iomap->flags & IOMAP_F_DIRTY);
-}
-
-/*
* When handling a synchronous page fault and the inode need a fsync, we can
* insert the PTE/PMD into page tables only after that fsync happened. Skip
* insertion for now and return the pfn so that caller can insert it after the
* fsync is done.
*/
-static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
+static vm_fault_t dax_fault_synchronous_pfnp(unsigned long *pfnp,
+ unsigned long pfn)
{
if (WARN_ON_ONCE(!pfnp))
return VM_FAULT_SIGBUS;
@@ -1332,19 +1766,16 @@ static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
const struct iomap_iter *iter)
{
- sector_t sector = dax_iomap_sector(&iter->iomap, iter->pos);
- unsigned long vaddr = vmf->address;
vm_fault_t ret;
int error = 0;
switch (iter->iomap.type) {
case IOMAP_HOLE:
case IOMAP_UNWRITTEN:
- clear_user_highpage(vmf->cow_page, vaddr);
+ clear_user_highpage(vmf->cow_page, vmf->address);
break;
case IOMAP_MAPPED:
- error = copy_cow_page_dax(iter->iomap.bdev, iter->iomap.dax_dev,
- sector, vmf->cow_page, vaddr);
+ error = copy_cow_page_dax(vmf, iter);
break;
default:
WARN_ON_ONCE(1);
@@ -1372,18 +1803,19 @@ static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
* @pmd: distinguish whether it is a pmd fault
*/
static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
- const struct iomap_iter *iter, pfn_t *pfnp,
+ const struct iomap_iter *iter, unsigned long *pfnp,
struct xa_state *xas, void **entry, bool pmd)
{
- struct address_space *mapping = vmf->vma->vm_file->f_mapping;
const struct iomap *iomap = &iter->iomap;
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
- bool write = vmf->flags & FAULT_FLAG_WRITE;
- bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap);
+ bool write = iter->flags & IOMAP_WRITE;
unsigned long entry_flags = pmd ? DAX_PMD : 0;
- int err = 0;
- pfn_t pfn;
+ struct folio *folio;
+ int ret, err = 0;
+ unsigned long pfn;
+ void *kaddr;
if (!pmd && vmf->cow_page)
return dax_fault_cow_page(vmf, iter);
@@ -1392,36 +1824,42 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
if (!write &&
(iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
if (!pmd)
- return dax_load_hole(xas, mapping, entry, vmf);
- return dax_pmd_load_hole(xas, vmf, iomap, entry);
+ return dax_load_hole(xas, vmf, iter, entry);
+ return dax_pmd_load_hole(xas, vmf, iter, entry);
}
- if (iomap->type != IOMAP_MAPPED) {
+ if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) {
WARN_ON_ONCE(1);
return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
}
- err = dax_iomap_pfn(&iter->iomap, pos, size, &pfn);
+ err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
if (err)
return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
- *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags,
- write && !sync);
+ *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
+
+ if (write && iomap->flags & IOMAP_F_SHARED) {
+ err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr);
+ if (err)
+ return dax_fault_return(err);
+ }
- if (sync)
+ folio = dax_to_folio(*entry);
+ if (dax_fault_is_synchronous(iter, vmf->vma))
return dax_fault_synchronous_pfnp(pfnp, pfn);
- /* insert PMD pfn */
+ folio_ref_inc(folio);
if (pmd)
- return vmf_insert_pfn_pmd(vmf, pfn, write);
+ ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn), write);
+ else
+ ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), write);
+ folio_put(folio);
- /* insert PTE pfn */
- if (write)
- return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
- return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
+ return ret;
}
-static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, unsigned long *pfnp,
int *iomap_errp, const struct iomap_ops *ops)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
@@ -1430,7 +1868,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
.inode = mapping->host,
.pos = (loff_t)vmf->pgoff << PAGE_SHIFT,
.len = PAGE_SIZE,
- .flags = IOMAP_FAULT,
+ .flags = IOMAP_DAX | IOMAP_FAULT,
};
vm_fault_t ret = 0;
void *entry;
@@ -1462,14 +1900,14 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
* the PTE we need to set up. If so just return and the fault will be
* retried.
*/
- if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
+ if (pmd_trans_huge(*vmf->pmd)) {
ret = VM_FAULT_NOPAGE;
goto unlock_entry;
}
while ((error = iomap_iter(&iter, ops)) > 0) {
if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
- iter.processed = -EIO; /* fs corruption? */
+ iter.status = -EIO; /* fs corruption? */
continue;
}
@@ -1482,7 +1920,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
}
if (!(ret & VM_FAULT_ERROR))
- iter.processed = PAGE_SIZE;
+ iter.status = iomap_iter_advance(&iter, PAGE_SIZE);
}
if (iomap_errp)
@@ -1531,7 +1969,7 @@ static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
return false;
}
-static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp,
const struct iomap_ops *ops)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
@@ -1539,12 +1977,11 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
struct iomap_iter iter = {
.inode = mapping->host,
.len = PMD_SIZE,
- .flags = IOMAP_FAULT,
+ .flags = IOMAP_DAX | IOMAP_FAULT,
};
vm_fault_t ret = VM_FAULT_FALLBACK;
pgoff_t max_pgoff;
void *entry;
- int error;
if (vmf->flags & FAULT_FLAG_WRITE)
iter.flags |= IOMAP_WRITE;
@@ -1584,20 +2021,19 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* the PMD we need to set up. If so just return and the fault will be
* retried.
*/
- if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
- !pmd_devmap(*vmf->pmd)) {
+ if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd)) {
ret = 0;
goto unlock_entry;
}
iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
- while ((error = iomap_iter(&iter, ops)) > 0) {
+ while (iomap_iter(&iter, ops) > 0) {
if (iomap_length(&iter) < PMD_SIZE)
continue; /* actually breaks out of the loop */
ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
if (ret != VM_FAULT_FALLBACK)
- iter.processed = PMD_SIZE;
+ iter.status = iomap_iter_advance(&iter, PMD_SIZE);
}
unlock_entry:
@@ -1612,7 +2048,7 @@ out:
return ret;
}
#else
-static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp,
const struct iomap_ops *ops)
{
return VM_FAULT_FALLBACK;
@@ -1622,7 +2058,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
/**
* dax_iomap_fault - handle a page fault on a DAX file
* @vmf: The description of the fault
- * @pe_size: Size of the page to fault in
+ * @order: Order of the page to fault in
* @pfnp: PFN to insert for synchronous faults if fsync is required
* @iomap_errp: Storage for detailed error code in case of error
* @ops: Iomap ops passed from the file system
@@ -1632,17 +2068,16 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* has done all the necessary locking for page fault to proceed
* successfully.
*/
-vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
- pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
+ unsigned long *pfnp, int *iomap_errp,
+ const struct iomap_ops *ops)
{
- switch (pe_size) {
- case PE_SIZE_PTE:
+ if (order == 0)
return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
- case PE_SIZE_PMD:
+ else if (order == PMD_ORDER)
return dax_iomap_pmd_fault(vmf, pfnp, ops);
- default:
+ else
return VM_FAULT_FALLBACK;
- }
}
EXPORT_SYMBOL_GPL(dax_iomap_fault);
@@ -1655,16 +2090,17 @@ EXPORT_SYMBOL_GPL(dax_iomap_fault);
* This function inserts a writeable PTE or PMD entry into the page tables
* for an mmaped DAX file. It also marks the page cache entry as dirty.
*/
-static vm_fault_t
-dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
+static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
+ unsigned long pfn, unsigned int order)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
+ struct folio *folio;
void *entry;
vm_fault_t ret;
xas_lock_irq(&xas);
- entry = get_unlocked_entry(&xas, order);
+ entry = get_next_unlocked_entry(&xas, order);
/* Did we race with someone splitting entry or so? */
if (!entry || dax_is_conflict(entry) ||
(order == 0 && !dax_is_pte_entry(entry))) {
@@ -1677,14 +2113,17 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
dax_lock_entry(&xas, entry);
xas_unlock_irq(&xas);
+ folio = pfn_folio(pfn);
+ folio_ref_inc(folio);
if (order == 0)
- ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
+ ret = vmf_insert_page_mkwrite(vmf, &folio->page, true);
#ifdef CONFIG_FS_DAX_PMD
else if (order == PMD_ORDER)
- ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
+ ret = vmf_insert_folio_pmd(vmf, folio, FAULT_FLAG_WRITE);
#endif
else
ret = VM_FAULT_FALLBACK;
+ folio_put(folio);
dax_unlock_entry(&xas, entry);
trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
return ret;
@@ -1693,19 +2132,18 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
/**
* dax_finish_sync_fault - finish synchronous page fault
* @vmf: The description of the fault
- * @pe_size: Size of entry to be inserted
+ * @order: Order of entry to be inserted
* @pfn: PFN to insert
*
* This function ensures that the file range touched by the page fault is
* stored persistently on the media and handles inserting of appropriate page
* table entry.
*/
-vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size, pfn_t pfn)
+vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order,
+ unsigned long pfn)
{
int err;
loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
- unsigned int order = pe_order(pe_size);
size_t len = PAGE_SIZE << order;
err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
@@ -1714,3 +2152,90 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
return dax_insert_pfn_mkwrite(vmf, pfn, order);
}
EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
+
+static int dax_range_compare_iter(struct iomap_iter *it_src,
+ struct iomap_iter *it_dest, u64 len, bool *same)
+{
+ const struct iomap *smap = &it_src->iomap;
+ const struct iomap *dmap = &it_dest->iomap;
+ loff_t pos1 = it_src->pos, pos2 = it_dest->pos;
+ void *saddr, *daddr;
+ int id, ret;
+
+ len = min(len, min(smap->length, dmap->length));
+
+ if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) {
+ *same = true;
+ goto advance;
+ }
+
+ if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) {
+ *same = false;
+ return 0;
+ }
+
+ id = dax_read_lock();
+ ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE),
+ &saddr, NULL);
+ if (ret < 0)
+ goto out_unlock;
+
+ ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE),
+ &daddr, NULL);
+ if (ret < 0)
+ goto out_unlock;
+
+ *same = !memcmp(saddr, daddr, len);
+ if (!*same)
+ len = 0;
+ dax_read_unlock(id);
+
+advance:
+ ret = iomap_iter_advance(it_src, len);
+ if (!ret)
+ ret = iomap_iter_advance(it_dest, len);
+ return ret;
+
+out_unlock:
+ dax_read_unlock(id);
+ return -EIO;
+}
+
+int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+ struct inode *dst, loff_t dstoff, loff_t len, bool *same,
+ const struct iomap_ops *ops)
+{
+ struct iomap_iter src_iter = {
+ .inode = src,
+ .pos = srcoff,
+ .len = len,
+ .flags = IOMAP_DAX,
+ };
+ struct iomap_iter dst_iter = {
+ .inode = dst,
+ .pos = dstoff,
+ .len = len,
+ .flags = IOMAP_DAX,
+ };
+ int ret, status;
+
+ while ((ret = iomap_iter(&src_iter, ops)) > 0 &&
+ (ret = iomap_iter(&dst_iter, ops)) > 0) {
+ status = dax_range_compare_iter(&src_iter, &dst_iter,
+ min(src_iter.len, dst_iter.len), same);
+ if (status < 0)
+ return ret;
+ src_iter.status = dst_iter.status = status;
+ }
+ return ret;
+}
+
+int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *ops)
+{
+ return __generic_remap_file_range_prep(file_in, pos_in, file_out,
+ pos_out, len, remap_flags, ops);
+}
+EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);
diff --git a/fs/dcache.c b/fs/dcache.c
index cf871a81f4fd..dc2fff4811d1 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -35,6 +35,8 @@
#include "internal.h"
#include "mount.h"
+#include <asm/runtime-const.h>
+
/*
* Usage:
* dcache->d_inode->i_lock protects:
@@ -51,8 +53,8 @@
* - d_lru
* - d_count
* - d_unhashed()
- * - d_parent and d_subdirs
- * - childrens' d_child and d_parent
+ * - d_parent and d_chilren
+ * - childrens' d_sib and d_parent
* - d_u.d_alias, d_inode
*
* Ordering:
@@ -71,14 +73,21 @@
* If no ancestor relationship:
* arbitrary, since it's serialized on rename_lock
*/
-int sysctl_vfs_cache_pressure __read_mostly = 100;
-EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
+static int sysctl_vfs_cache_pressure __read_mostly = 100;
+static int sysctl_vfs_cache_pressure_denom __read_mostly = 100;
+
+unsigned long vfs_pressure_ratio(unsigned long val)
+{
+ return mult_frac(val, sysctl_vfs_cache_pressure, sysctl_vfs_cache_pressure_denom);
+}
+EXPORT_SYMBOL_GPL(vfs_pressure_ratio);
__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
EXPORT_SYMBOL(rename_lock);
-static struct kmem_cache *dentry_cache __read_mostly;
+static struct kmem_cache *__dentry_cache __ro_after_init;
+#define dentry_cache runtime_const_ptr(__dentry_cache)
const struct qstr empty_name = QSTR_INIT("", 0);
EXPORT_SYMBOL(empty_name);
@@ -94,15 +103,21 @@ EXPORT_SYMBOL(dotdot_name);
*
* This hash-function tries to avoid losing too many bits of hash
* information, yet avoid using a prime hash-size or similar.
+ *
+ * Marking the variables "used" ensures that the compiler doesn't
+ * optimize them away completely on architectures with runtime
+ * constant infrastructure, this allows debuggers to see their
+ * values. But updating these values has no effect on those arches.
*/
-static unsigned int d_hash_shift __read_mostly;
+static unsigned int d_hash_shift __ro_after_init __used;
-static struct hlist_bl_head *dentry_hashtable __read_mostly;
+static struct hlist_bl_head *dentry_hashtable __ro_after_init __used;
-static inline struct hlist_bl_head *d_hash(unsigned int hash)
+static inline struct hlist_bl_head *d_hash(unsigned long hashlen)
{
- return dentry_hashtable + (hash >> d_hash_shift);
+ return runtime_const_ptr(dentry_hashtable) +
+ runtime_const_shift_right_32(hashlen, d_hash_shift);
}
#define IN_LOOKUP_SHIFT 10
@@ -115,17 +130,25 @@ static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
}
-
-/* Statistics gathering. */
-struct dentry_stat_t dentry_stat = {
- .age_limit = 45,
+struct dentry_stat_t {
+ long nr_dentry;
+ long nr_unused;
+ long age_limit; /* age in seconds */
+ long want_pages; /* pages requested by system */
+ long nr_negative; /* # of unused negative dentries */
+ long dummy; /* Reserved for future use */
};
static DEFINE_PER_CPU(long, nr_dentry);
static DEFINE_PER_CPU(long, nr_dentry_unused);
static DEFINE_PER_CPU(long, nr_dentry_negative);
+static int dentry_negative_policy;
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
+/* Statistics gathering. */
+static struct dentry_stat_t dentry_stat = {
+ .age_limit = 45,
+};
/*
* Here we resort to our own counters instead of using generic per-cpu counters
@@ -167,14 +190,60 @@ static long get_nr_dentry_negative(void)
return sum < 0 ? 0 : sum;
}
-int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos)
+static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
dentry_stat.nr_dentry = get_nr_dentry();
dentry_stat.nr_unused = get_nr_dentry_unused();
dentry_stat.nr_negative = get_nr_dentry_negative();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
+
+static const struct ctl_table fs_dcache_sysctls[] = {
+ {
+ .procname = "dentry-state",
+ .data = &dentry_stat,
+ .maxlen = 6*sizeof(long),
+ .mode = 0444,
+ .proc_handler = proc_nr_dentry,
+ },
+ {
+ .procname = "dentry-negative",
+ .data = &dentry_negative_policy,
+ .maxlen = sizeof(dentry_negative_policy),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+};
+
+static const struct ctl_table vm_dcache_sysctls[] = {
+ {
+ .procname = "vfs_cache_pressure",
+ .data = &sysctl_vfs_cache_pressure,
+ .maxlen = sizeof(sysctl_vfs_cache_pressure),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
+ {
+ .procname = "vfs_cache_pressure_denom",
+ .data = &sysctl_vfs_cache_pressure_denom,
+ .maxlen = sizeof(sysctl_vfs_cache_pressure_denom),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE_HUNDRED,
+ },
+};
+
+static int __init init_fs_dcache_sysctls(void)
+{
+ register_sysctl_init("vm", vm_dcache_sysctls);
+ register_sysctl_init("fs", fs_dcache_sysctls);
+ return 0;
+}
+fs_initcall(init_fs_dcache_sysctls);
#endif
/*
@@ -253,12 +322,16 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
return dentry_string_cmp(cs, ct, tcount);
}
+/*
+ * long names are allocated separately from dentry and never modified.
+ * Refcounted, freeing is RCU-delayed. See take_dentry_name_snapshot()
+ * for the reason why ->count and ->head can't be combined into a union.
+ * dentry_string_cmp() relies upon ->name[] being word-aligned.
+ */
struct external_name {
- union {
- atomic_t count;
- struct rcu_head head;
- } u;
- unsigned char name[];
+ atomic_t count;
+ struct rcu_head head;
+ unsigned char name[] __aligned(sizeof(unsigned long));
};
static inline struct external_name *external_name(struct dentry *dentry)
@@ -282,31 +355,45 @@ static void __d_free_external(struct rcu_head *head)
static inline int dname_external(const struct dentry *dentry)
{
- return dentry->d_name.name != dentry->d_iname;
+ return dentry->d_name.name != dentry->d_shortname.string;
}
void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
- name->name = dentry->d_name;
- if (unlikely(dname_external(dentry))) {
- atomic_inc(&external_name(dentry)->u.count);
+ unsigned seq;
+ const unsigned char *s;
+
+ rcu_read_lock();
+retry:
+ seq = read_seqcount_begin(&dentry->d_seq);
+ s = READ_ONCE(dentry->d_name.name);
+ name->name.hash_len = dentry->d_name.hash_len;
+ name->name.name = name->inline_name.string;
+ if (likely(s == dentry->d_shortname.string)) {
+ name->inline_name = dentry->d_shortname;
} else {
- memcpy(name->inline_name, dentry->d_iname,
- dentry->d_name.len + 1);
- name->name.name = name->inline_name;
+ struct external_name *p;
+ p = container_of(s, struct external_name, name[0]);
+ // get a valid reference
+ if (unlikely(!atomic_inc_not_zero(&p->count)))
+ goto retry;
+ name->name.name = s;
}
- spin_unlock(&dentry->d_lock);
+ if (read_seqcount_retry(&dentry->d_seq, seq)) {
+ release_dentry_name_snapshot(name);
+ goto retry;
+ }
+ rcu_read_unlock();
}
EXPORT_SYMBOL(take_dentry_name_snapshot);
void release_dentry_name_snapshot(struct name_snapshot *name)
{
- if (unlikely(name->name.name != name->inline_name)) {
+ if (unlikely(name->name.name != name->inline_name.string)) {
struct external_name *p;
p = container_of(name->name.name, struct external_name, name[0]);
- if (unlikely(atomic_dec_and_test(&p->u.count)))
- kfree_rcu(p, u.head);
+ if (unlikely(atomic_dec_and_test(&p->count)))
+ kfree_rcu(p, head);
}
}
EXPORT_SYMBOL(release_dentry_name_snapshot);
@@ -319,7 +406,7 @@ static inline void __d_set_inode_and_type(struct dentry *dentry,
dentry->d_inode = inode;
flags = READ_ONCE(dentry->d_flags);
- flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+ flags &= ~DCACHE_ENTRY_TYPE;
flags |= type_flags;
smp_store_release(&dentry->d_flags, flags);
}
@@ -328,10 +415,14 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
{
unsigned flags = READ_ONCE(dentry->d_flags);
- flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+ flags &= ~DCACHE_ENTRY_TYPE;
WRITE_ONCE(dentry->d_flags, flags);
dentry->d_inode = NULL;
- if (dentry->d_flags & DCACHE_LRU_LIST)
+ /*
+ * The negative counter only tracks dentries on the LRU. Don't inc if
+ * d_lru is on another list.
+ */
+ if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
this_cpu_inc(nr_dentry_negative);
}
@@ -340,7 +431,7 @@ static void dentry_free(struct dentry *dentry)
WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
if (unlikely(dname_external(dentry))) {
struct external_name *p = external_name(dentry);
- if (likely(atomic_dec_and_test(&p->u.count))) {
+ if (likely(atomic_dec_and_test(&p->count))) {
call_rcu(&dentry->d_u.d_rcu, __d_free_external);
return;
}
@@ -403,7 +494,8 @@ static void d_lru_add(struct dentry *dentry)
this_cpu_inc(nr_dentry_unused);
if (d_is_negative(dentry))
this_cpu_inc(nr_dentry_negative);
- WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
+ WARN_ON_ONCE(!list_lru_add_obj(
+ &dentry->d_sb->s_dentry_lru, &dentry->d_lru));
}
static void d_lru_del(struct dentry *dentry)
@@ -413,7 +505,8 @@ static void d_lru_del(struct dentry *dentry)
this_cpu_dec(nr_dentry_unused);
if (d_is_negative(dentry))
this_cpu_dec(nr_dentry_negative);
- WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
+ WARN_ON_ONCE(!list_lru_del_obj(
+ &dentry->d_sb->s_dentry_lru, &dentry->d_lru));
}
static void d_shrink_del(struct dentry *dentry)
@@ -512,7 +605,7 @@ void d_drop(struct dentry *dentry)
}
EXPORT_SYMBOL(d_drop);
-static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
+static inline void dentry_unlist(struct dentry *dentry)
{
struct dentry *next;
/*
@@ -520,12 +613,12 @@ static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
* attached to the dentry tree
*/
dentry->d_flags |= DCACHE_DENTRY_KILLED;
- if (unlikely(list_empty(&dentry->d_child)))
+ if (unlikely(hlist_unhashed(&dentry->d_sib)))
return;
- __list_del_entry(&dentry->d_child);
+ __hlist_del(&dentry->d_sib);
/*
* Cursors can move around the list of children. While we'd been
- * a normal list member, it didn't matter - ->d_child.next would've
+ * a normal list member, it didn't matter - ->d_sib.next would've
* been updated. However, from now on it won't be and for the
* things like d_walk() it might end up with a nasty surprise.
* Normally d_walk() doesn't care about cursors moving around -
@@ -533,29 +626,27 @@ static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
* of its own, we get through it without ever unlocking the parent.
* There is one exception, though - if we ascend from a child that
* gets killed as soon as we unlock it, the next sibling is found
- * using the value left in its ->d_child.next. And if _that_
+ * using the value left in its ->d_sib.next. And if _that_
* pointed to a cursor, and cursor got moved (e.g. by lseek())
* before d_walk() regains parent->d_lock, we'll end up skipping
* everything the cursor had been moved past.
*
- * Solution: make sure that the pointer left behind in ->d_child.next
+ * Solution: make sure that the pointer left behind in ->d_sib.next
* points to something that won't be moving around. I.e. skip the
* cursors.
*/
- while (dentry->d_child.next != &parent->d_subdirs) {
- next = list_entry(dentry->d_child.next, struct dentry, d_child);
+ while (dentry->d_sib.next) {
+ next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib);
if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
break;
- dentry->d_child.next = next->d_child.next;
+ dentry->d_sib.next = next->d_sib.next;
}
}
-static void __dentry_kill(struct dentry *dentry)
+static struct dentry *__dentry_kill(struct dentry *dentry)
{
struct dentry *parent = NULL;
bool can_free = true;
- if (!IS_ROOT(dentry))
- parent = dentry->d_parent;
/*
* The dentry is now unrecoverably dead to the world.
@@ -575,9 +666,6 @@ static void __dentry_kill(struct dentry *dentry)
}
/* if it was on the hash then remove it */
__d_drop(dentry);
- dentry_unlist(dentry, parent);
- if (parent)
- spin_unlock(&parent->d_lock);
if (dentry->d_inode)
dentry_unlink_inode(dentry);
else
@@ -586,80 +674,114 @@ static void __dentry_kill(struct dentry *dentry)
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
- spin_lock(&dentry->d_lock);
- if (dentry->d_flags & DCACHE_SHRINK_LIST) {
- dentry->d_flags |= DCACHE_MAY_FREE;
- can_free = false;
+ cond_resched();
+ /* now that it's negative, ->d_parent is stable */
+ if (!IS_ROOT(dentry)) {
+ parent = dentry->d_parent;
+ spin_lock(&parent->d_lock);
}
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ dentry_unlist(dentry);
+ if (dentry->d_flags & DCACHE_SHRINK_LIST)
+ can_free = false;
spin_unlock(&dentry->d_lock);
if (likely(can_free))
dentry_free(dentry);
- cond_resched();
-}
-
-static struct dentry *__lock_parent(struct dentry *dentry)
-{
- struct dentry *parent;
- rcu_read_lock();
- spin_unlock(&dentry->d_lock);
-again:
- parent = READ_ONCE(dentry->d_parent);
- spin_lock(&parent->d_lock);
- /*
- * We can't blindly lock dentry until we are sure
- * that we won't violate the locking order.
- * Any changes of dentry->d_parent must have
- * been done with parent->d_lock held, so
- * spin_lock() above is enough of a barrier
- * for checking if it's still our child.
- */
- if (unlikely(parent != dentry->d_parent)) {
+ if (parent && --parent->d_lockref.count) {
spin_unlock(&parent->d_lock);
- goto again;
+ return NULL;
}
- rcu_read_unlock();
- if (parent != dentry)
- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- else
- parent = NULL;
return parent;
}
-static inline struct dentry *lock_parent(struct dentry *dentry)
+/*
+ * Lock a dentry for feeding it to __dentry_kill().
+ * Called under rcu_read_lock() and dentry->d_lock; the former
+ * guarantees that nothing we access will be freed under us.
+ * Note that dentry is *not* protected from concurrent dentry_kill(),
+ * d_delete(), etc.
+ *
+ * Return false if dentry is busy. Otherwise, return true and have
+ * that dentry's inode locked.
+ */
+
+static bool lock_for_kill(struct dentry *dentry)
{
- struct dentry *parent = dentry->d_parent;
- if (IS_ROOT(dentry))
- return NULL;
- if (likely(spin_trylock(&parent->d_lock)))
- return parent;
- return __lock_parent(dentry);
+ struct inode *inode = dentry->d_inode;
+
+ if (unlikely(dentry->d_lockref.count))
+ return false;
+
+ if (!inode || likely(spin_trylock(&inode->i_lock)))
+ return true;
+
+ do {
+ spin_unlock(&dentry->d_lock);
+ spin_lock(&inode->i_lock);
+ spin_lock(&dentry->d_lock);
+ if (likely(inode == dentry->d_inode))
+ break;
+ spin_unlock(&inode->i_lock);
+ inode = dentry->d_inode;
+ } while (inode);
+ if (likely(!dentry->d_lockref.count))
+ return true;
+ if (inode)
+ spin_unlock(&inode->i_lock);
+ return false;
}
-static inline bool retain_dentry(struct dentry *dentry)
+/*
+ * Decide if dentry is worth retaining. Usually this is called with dentry
+ * locked; if not locked, we are more limited and might not be able to tell
+ * without a lock. False in this case means "punt to locked path and recheck".
+ *
+ * In case we aren't locked, these predicates are not "stable". However, it is
+ * sufficient that at some point after we dropped the reference the dentry was
+ * hashed and the flags had the proper value. Other dentry users may have
+ * re-gotten a reference to the dentry and change that, but our work is done -
+ * we can leave the dentry around with a zero refcount.
+ */
+static inline bool retain_dentry(struct dentry *dentry, bool locked)
{
- WARN_ON(d_in_lookup(dentry));
+ unsigned int d_flags;
+
+ smp_rmb();
+ d_flags = READ_ONCE(dentry->d_flags);
- /* Unreachable? Get rid of it */
+ // Unreachable? Nobody would be able to look it up, no point retaining
if (unlikely(d_unhashed(dentry)))
return false;
- if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
+ // Same if it's disconnected
+ if (unlikely(d_flags & DCACHE_DISCONNECTED))
return false;
- if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
- if (dentry->d_op->d_delete(dentry))
+ // ->d_delete() might tell us not to bother, but that requires
+ // ->d_lock; can't decide without it
+ if (unlikely(d_flags & DCACHE_OP_DELETE)) {
+ if (!locked || dentry->d_op->d_delete(dentry))
return false;
}
- if (unlikely(dentry->d_flags & DCACHE_DONTCACHE))
+ // Explicitly told not to bother
+ if (unlikely(d_flags & DCACHE_DONTCACHE))
return false;
- /* retain; LRU fodder */
- dentry->d_lockref.count--;
- if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
+ // At this point it looks like we ought to keep it. We also might
+ // need to do something - put it on LRU if it wasn't there already
+ // and mark it referenced if it was on LRU, but not marked yet.
+ // Unfortunately, both actions require ->d_lock, so in lockless
+ // case we'd have to punt rather than doing those.
+ if (unlikely(!(d_flags & DCACHE_LRU_LIST))) {
+ if (!locked)
+ return false;
d_lru_add(dentry);
- else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
+ } else if (unlikely(!(d_flags & DCACHE_REFERENCED))) {
+ if (!locked)
+ return false;
dentry->d_flags |= DCACHE_REFERENCED;
+ }
return true;
}
@@ -673,66 +795,17 @@ void d_mark_dontcache(struct inode *inode)
de->d_flags |= DCACHE_DONTCACHE;
spin_unlock(&de->d_lock);
}
- inode->i_state |= I_DONTCACHE;
+ inode_state_set(inode, I_DONTCACHE);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(d_mark_dontcache);
/*
- * Finish off a dentry we've decided to kill.
- * dentry->d_lock must be held, returns with it unlocked.
- * Returns dentry requiring refcount drop, or NULL if we're done.
- */
-static struct dentry *dentry_kill(struct dentry *dentry)
- __releases(dentry->d_lock)
-{
- struct inode *inode = dentry->d_inode;
- struct dentry *parent = NULL;
-
- if (inode && unlikely(!spin_trylock(&inode->i_lock)))
- goto slow_positive;
-
- if (!IS_ROOT(dentry)) {
- parent = dentry->d_parent;
- if (unlikely(!spin_trylock(&parent->d_lock))) {
- parent = __lock_parent(dentry);
- if (likely(inode || !dentry->d_inode))
- goto got_locks;
- /* negative that became positive */
- if (parent)
- spin_unlock(&parent->d_lock);
- inode = dentry->d_inode;
- goto slow_positive;
- }
- }
- __dentry_kill(dentry);
- return parent;
-
-slow_positive:
- spin_unlock(&dentry->d_lock);
- spin_lock(&inode->i_lock);
- spin_lock(&dentry->d_lock);
- parent = lock_parent(dentry);
-got_locks:
- if (unlikely(dentry->d_lockref.count != 1)) {
- dentry->d_lockref.count--;
- } else if (likely(!retain_dentry(dentry))) {
- __dentry_kill(dentry);
- return parent;
- }
- /* we are keeping it, after all */
- if (inode)
- spin_unlock(&inode->i_lock);
- if (parent)
- spin_unlock(&parent->d_lock);
- spin_unlock(&dentry->d_lock);
- return NULL;
-}
-
-/*
* Try to do a lockless dput(), and return whether that was successful.
*
* If unsuccessful, we return false, having already taken the dentry lock.
+ * In that case refcount is guaranteed to be zero and we have already
+ * decided that it's not worth keeping around.
*
* The caller needs to hold the RCU read lock, so that the dentry is
* guaranteed to stay around even if the refcount goes down to zero!
@@ -740,18 +813,9 @@ got_locks:
static inline bool fast_dput(struct dentry *dentry)
{
int ret;
- unsigned int d_flags;
/*
- * If we have a d_op->d_delete() operation, we sould not
- * let the dentry count go to zero, so use "put_or_lock".
- */
- if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
- return lockref_put_or_lock(&dentry->d_lockref);
-
- /*
- * .. otherwise, we can try to just decrement the
- * lockref optimistically.
+ * try to decrement the lockref optimistically.
*/
ret = lockref_put_return(&dentry->d_lockref);
@@ -762,12 +826,12 @@ static inline bool fast_dput(struct dentry *dentry)
*/
if (unlikely(ret < 0)) {
spin_lock(&dentry->d_lock);
- if (dentry->d_lockref.count > 1) {
- dentry->d_lockref.count--;
+ if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) {
spin_unlock(&dentry->d_lock);
return true;
}
- return false;
+ dentry->d_lockref.count--;
+ goto locked;
}
/*
@@ -777,45 +841,18 @@ static inline bool fast_dput(struct dentry *dentry)
return true;
/*
- * Careful, careful. The reference count went down
- * to zero, but we don't hold the dentry lock, so
- * somebody else could get it again, and do another
- * dput(), and we need to not race with that.
- *
- * However, there is a very special and common case
- * where we don't care, because there is nothing to
- * do: the dentry is still hashed, it does not have
- * a 'delete' op, and it's referenced and already on
- * the LRU list.
- *
- * NOTE! Since we aren't locked, these values are
- * not "stable". However, it is sufficient that at
- * some point after we dropped the reference the
- * dentry was hashed and the flags had the proper
- * value. Other dentry users may have re-gotten
- * a reference to the dentry and change that, but
- * our work is done - we can leave the dentry
- * around with a zero refcount.
- *
- * Nevertheless, there are two cases that we should kill
- * the dentry anyway.
- * 1. free disconnected dentries as soon as their refcount
- * reached zero.
- * 2. free dentries if they should not be cached.
+ * Can we decide that decrement of refcount is all we needed without
+ * taking the lock? There's a very common case when it's all we need -
+ * dentry looks like it ought to be retained and there's nothing else
+ * to do.
*/
- smp_rmb();
- d_flags = READ_ONCE(dentry->d_flags);
- d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST |
- DCACHE_DISCONNECTED | DCACHE_DONTCACHE;
-
- /* Nothing to do? Dropping the reference was all we needed? */
- if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
+ if (retain_dentry(dentry, false))
return true;
/*
- * Not the fast normal case? Get the lock. We've already decremented
- * the refcount, but we'll need to re-check the situation after
- * getting the lock.
+ * Either not worth retaining or we can't tell without the lock.
+ * Get the lock, then. We've already decremented the refcount to 0,
+ * but we'll need to re-check the situation after getting the lock.
*/
spin_lock(&dentry->d_lock);
@@ -825,20 +862,32 @@ static inline bool fast_dput(struct dentry *dentry)
* else could have killed it and marked it dead. Either way, we
* don't need to do anything else.
*/
- if (dentry->d_lockref.count) {
+locked:
+ if (dentry->d_lockref.count || retain_dentry(dentry, true)) {
spin_unlock(&dentry->d_lock);
return true;
}
-
- /*
- * Re-get the reference we optimistically dropped. We hold the
- * lock, and we just tested that it was zero, so we can just
- * set it to 1.
- */
- dentry->d_lockref.count = 1;
return false;
}
+static void finish_dput(struct dentry *dentry)
+ __releases(dentry->d_lock)
+ __releases(RCU)
+{
+ while (lock_for_kill(dentry)) {
+ rcu_read_unlock();
+ dentry = __dentry_kill(dentry);
+ if (!dentry)
+ return;
+ if (retain_dentry(dentry, true)) {
+ spin_unlock(&dentry->d_lock);
+ return;
+ }
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+ spin_unlock(&dentry->d_lock);
+}
/*
* This is dput
@@ -868,39 +917,36 @@ static inline bool fast_dput(struct dentry *dentry)
*/
void dput(struct dentry *dentry)
{
- while (dentry) {
- might_sleep();
-
- rcu_read_lock();
- if (likely(fast_dput(dentry))) {
- rcu_read_unlock();
- return;
- }
-
- /* Slow case: now with the dentry lock held */
+ if (!dentry)
+ return;
+ might_sleep();
+ rcu_read_lock();
+ if (likely(fast_dput(dentry))) {
rcu_read_unlock();
-
- if (likely(retain_dentry(dentry))) {
- spin_unlock(&dentry->d_lock);
- return;
- }
-
- dentry = dentry_kill(dentry);
+ return;
}
+ finish_dput(dentry);
}
EXPORT_SYMBOL(dput);
-static void __dput_to_list(struct dentry *dentry, struct list_head *list)
+void d_make_discardable(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ WARN_ON(!(dentry->d_flags & DCACHE_PERSISTENT));
+ dentry->d_flags &= ~DCACHE_PERSISTENT;
+ dentry->d_lockref.count--;
+ rcu_read_lock();
+ finish_dput(dentry);
+}
+EXPORT_SYMBOL(d_make_discardable);
+
+static void to_shrink_list(struct dentry *dentry, struct list_head *list)
__must_hold(&dentry->d_lock)
{
- if (dentry->d_flags & DCACHE_SHRINK_LIST) {
- /* let the owner of the list it's on deal with it */
- --dentry->d_lockref.count;
- } else {
+ if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
if (dentry->d_flags & DCACHE_LRU_LIST)
d_lru_del(dentry);
- if (!--dentry->d_lockref.count)
- d_shrink_add(dentry, list);
+ d_shrink_add(dentry, list);
}
}
@@ -912,22 +958,10 @@ void dput_to_list(struct dentry *dentry, struct list_head *list)
return;
}
rcu_read_unlock();
- if (!retain_dentry(dentry))
- __dput_to_list(dentry, list);
+ to_shrink_list(dentry, list);
spin_unlock(&dentry->d_lock);
}
-/* This must be called with d_lock held */
-static inline void __dget_dlock(struct dentry *dentry)
-{
- dentry->d_lockref.count++;
-}
-
-static inline void __dget(struct dentry *dentry)
-{
- lockref_get(&dentry->d_lockref);
-}
-
struct dentry *dget_parent(struct dentry *dentry)
{
int gotref;
@@ -977,7 +1011,7 @@ static struct dentry * __d_find_any_alias(struct inode *inode)
if (hlist_empty(&inode->i_dentry))
return NULL;
alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
- __dget(alias);
+ lockref_get(&alias->d_lockref);
return alias;
}
@@ -1009,7 +1043,7 @@ static struct dentry *__d_find_alias(struct inode *inode)
hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
spin_lock(&alias->d_lock);
if (!d_unhashed(alias)) {
- __dget_dlock(alias);
+ dget_dlock(alias);
spin_unlock(&alias->d_lock);
return alias;
}
@@ -1057,7 +1091,7 @@ struct dentry *d_find_alias_rcu(struct inode *inode)
spin_lock(&inode->i_lock);
// ->i_dentry and ->i_rcu are colocated, but the latter won't be
// used without having I_FREEING set, which means no aliases left
- if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
+ if (likely(!(inode_state_read(inode) & I_FREEING) && !hlist_empty(l))) {
if (S_ISDIR(inode->i_mode)) {
de = hlist_entry(l->first, struct dentry, d_u.d_alias);
} else {
@@ -1070,115 +1104,70 @@ struct dentry *d_find_alias_rcu(struct inode *inode)
return de;
}
+void d_dispose_if_unused(struct dentry *dentry, struct list_head *dispose)
+{
+ spin_lock(&dentry->d_lock);
+ if (!dentry->d_lockref.count)
+ to_shrink_list(dentry, dispose);
+ spin_unlock(&dentry->d_lock);
+}
+EXPORT_SYMBOL(d_dispose_if_unused);
+
/*
* Try to kill dentries associated with this inode.
* WARNING: you must own a reference to inode.
*/
void d_prune_aliases(struct inode *inode)
{
+ LIST_HEAD(dispose);
struct dentry *dentry;
-restart:
+
spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
- spin_lock(&dentry->d_lock);
- if (!dentry->d_lockref.count) {
- struct dentry *parent = lock_parent(dentry);
- if (likely(!dentry->d_lockref.count)) {
- __dentry_kill(dentry);
- dput(parent);
- goto restart;
- }
- if (parent)
- spin_unlock(&parent->d_lock);
- }
- spin_unlock(&dentry->d_lock);
- }
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias)
+ d_dispose_if_unused(dentry, &dispose);
spin_unlock(&inode->i_lock);
+ shrink_dentry_list(&dispose);
}
EXPORT_SYMBOL(d_prune_aliases);
-/*
- * Lock a dentry from shrink list.
- * Called under rcu_read_lock() and dentry->d_lock; the former
- * guarantees that nothing we access will be freed under us.
- * Note that dentry is *not* protected from concurrent dentry_kill(),
- * d_delete(), etc.
- *
- * Return false if dentry has been disrupted or grabbed, leaving
- * the caller to kick it off-list. Otherwise, return true and have
- * that dentry's inode and parent both locked.
- */
-static bool shrink_lock_dentry(struct dentry *dentry)
+static inline void shrink_kill(struct dentry *victim)
{
- struct inode *inode;
- struct dentry *parent;
-
- if (dentry->d_lockref.count)
- return false;
-
- inode = dentry->d_inode;
- if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
- spin_unlock(&dentry->d_lock);
- spin_lock(&inode->i_lock);
- spin_lock(&dentry->d_lock);
- if (unlikely(dentry->d_lockref.count))
- goto out;
- /* changed inode means that somebody had grabbed it */
- if (unlikely(inode != dentry->d_inode))
- goto out;
- }
-
- parent = dentry->d_parent;
- if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
- return true;
-
- spin_unlock(&dentry->d_lock);
- spin_lock(&parent->d_lock);
- if (unlikely(parent != dentry->d_parent)) {
- spin_unlock(&parent->d_lock);
- spin_lock(&dentry->d_lock);
- goto out;
- }
- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- if (likely(!dentry->d_lockref.count))
- return true;
- spin_unlock(&parent->d_lock);
-out:
- if (inode)
- spin_unlock(&inode->i_lock);
- return false;
+ do {
+ rcu_read_unlock();
+ victim = __dentry_kill(victim);
+ rcu_read_lock();
+ } while (victim && lock_for_kill(victim));
+ rcu_read_unlock();
+ if (victim)
+ spin_unlock(&victim->d_lock);
}
void shrink_dentry_list(struct list_head *list)
{
while (!list_empty(list)) {
- struct dentry *dentry, *parent;
+ struct dentry *dentry;
dentry = list_entry(list->prev, struct dentry, d_lru);
spin_lock(&dentry->d_lock);
rcu_read_lock();
- if (!shrink_lock_dentry(dentry)) {
- bool can_free = false;
+ if (!lock_for_kill(dentry)) {
+ bool can_free;
rcu_read_unlock();
d_shrink_del(dentry);
- if (dentry->d_lockref.count < 0)
- can_free = dentry->d_flags & DCACHE_MAY_FREE;
+ can_free = dentry->d_flags & DCACHE_DENTRY_KILLED;
spin_unlock(&dentry->d_lock);
if (can_free)
dentry_free(dentry);
continue;
}
- rcu_read_unlock();
d_shrink_del(dentry);
- parent = dentry->d_parent;
- if (parent != dentry)
- __dput_to_list(parent, list);
- __dentry_kill(dentry);
+ shrink_kill(dentry);
}
}
+EXPORT_SYMBOL(shrink_dentry_list);
static enum lru_status dentry_lru_isolate(struct list_head *item,
- struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, void *arg)
{
struct list_head *freeable = arg;
struct dentry *dentry = container_of(item, struct dentry, d_lru);
@@ -1215,7 +1204,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
*
* This is guaranteed by the fact that all LRU management
* functions are intermediated by the LRU API calls like
- * list_lru_add and list_lru_del. List movement in this file
+ * list_lru_add_obj and list_lru_del_obj. List movement in this file
* only ever occur through this functions or through callbacks
* like this one, that are called from the LRU API.
*
@@ -1259,7 +1248,7 @@ long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
}
static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
- struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, void *arg)
{
struct list_head *freeable = arg;
struct dentry *dentry = container_of(item, struct dentry, d_lru);
@@ -1323,8 +1312,7 @@ enum d_walk_ret {
static void d_walk(struct dentry *parent, void *data,
enum d_walk_ret (*enter)(void *, struct dentry *))
{
- struct dentry *this_parent;
- struct list_head *next;
+ struct dentry *this_parent, *dentry;
unsigned seq = 0;
enum d_walk_ret ret;
bool retry = true;
@@ -1346,13 +1334,9 @@ again:
break;
}
repeat:
- next = this_parent->d_subdirs.next;
+ dentry = d_first_child(this_parent);
resume:
- while (next != &this_parent->d_subdirs) {
- struct list_head *tmp = next;
- struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
- next = tmp->next;
-
+ hlist_for_each_entry_from(dentry, d_sib) {
if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
continue;
@@ -1373,7 +1357,7 @@ resume:
continue;
}
- if (!list_empty(&dentry->d_subdirs)) {
+ if (!hlist_empty(&dentry->d_children)) {
spin_unlock(&this_parent->d_lock);
spin_release(&dentry->d_lock.dep_map, _RET_IP_);
this_parent = dentry;
@@ -1388,24 +1372,23 @@ resume:
rcu_read_lock();
ascend:
if (this_parent != parent) {
- struct dentry *child = this_parent;
- this_parent = child->d_parent;
+ dentry = this_parent;
+ this_parent = dentry->d_parent;
- spin_unlock(&child->d_lock);
+ spin_unlock(&dentry->d_lock);
spin_lock(&this_parent->d_lock);
/* might go back up the wrong parent if we have had a rename. */
if (need_seqretry(&rename_lock, seq))
goto rename_retry;
/* go into the first sibling still alive */
- do {
- next = child->d_child.next;
- if (next == &this_parent->d_subdirs)
- goto ascend;
- child = list_entry(next, struct dentry, d_child);
- } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
- rcu_read_unlock();
- goto resume;
+ hlist_for_each_entry_continue(dentry, d_sib) {
+ if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) {
+ rcu_read_unlock();
+ goto resume;
+ }
+ }
+ goto ascend;
}
if (need_seqretry(&rename_lock, seq))
goto rename_retry;
@@ -1431,6 +1414,7 @@ struct check_mount {
unsigned int mounted;
};
+/* locks: mount_locked_reader && dentry->d_lock */
static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
{
struct check_mount *info = data;
@@ -1457,9 +1441,8 @@ int path_has_submounts(const struct path *parent)
{
struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
- read_seqlock_excl(&mount_lock);
+ guard(mount_locked_reader)();
d_walk(parent->dentry, &data, path_check_mount);
- read_sequnlock_excl(&mount_lock);
return data.mounted;
}
@@ -1477,7 +1460,7 @@ int d_set_mounted(struct dentry *dentry)
{
struct dentry *p;
int ret = -ENOENT;
- write_seqlock(&rename_lock);
+ read_seqlock_excl(&rename_lock);
for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
/* Need exclusion wrt. d_invalidate() */
spin_lock(&p->d_lock);
@@ -1497,7 +1480,7 @@ int d_set_mounted(struct dentry *dentry)
}
spin_unlock(&dentry->d_lock);
out:
- write_sequnlock(&rename_lock);
+ read_sequnlock_excl(&rename_lock);
return ret;
}
@@ -1505,7 +1488,7 @@ out:
* Search the dentry child list of the specified parent,
* and move any unused dentries to the end of the unused
* list for prune_dcache(). We descend to the next level
- * whenever the d_subdirs list is non-empty and continue
+ * whenever the d_children list is non-empty and continue
* searching.
*
* It returns zero iff there are no unused children,
@@ -1535,13 +1518,11 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
if (dentry->d_flags & DCACHE_SHRINK_LIST) {
data->found++;
- } else {
- if (dentry->d_flags & DCACHE_LRU_LIST)
- d_lru_del(dentry);
- if (!dentry->d_lockref.count) {
- d_shrink_add(dentry, &data->dispose);
- data->found++;
- }
+ } else if (!dentry->d_lockref.count) {
+ to_shrink_list(dentry, &data->dispose);
+ data->found++;
+ } else if (dentry->d_lockref.count < 0) {
+ data->found++;
}
/*
* We can return to the caller if we have found some (this
@@ -1554,6 +1535,15 @@ out:
return ret;
}
+static enum d_walk_ret select_collect_umount(void *_data, struct dentry *dentry)
+{
+ if (dentry->d_flags & DCACHE_PERSISTENT) {
+ dentry->d_flags &= ~DCACHE_PERSISTENT;
+ dentry->d_lockref.count--;
+ }
+ return select_collect(_data, dentry);
+}
+
static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
{
struct select_data *data = _data;
@@ -1562,17 +1552,13 @@ static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
if (data->start == dentry)
goto out;
- if (dentry->d_flags & DCACHE_SHRINK_LIST) {
- if (!dentry->d_lockref.count) {
+ if (!dentry->d_lockref.count) {
+ if (dentry->d_flags & DCACHE_SHRINK_LIST) {
rcu_read_lock();
data->victim = dentry;
return D_WALK_QUIT;
}
- } else {
- if (dentry->d_flags & DCACHE_LRU_LIST)
- d_lru_del(dentry);
- if (!dentry->d_lockref.count)
- d_shrink_add(dentry, &data->dispose);
+ to_shrink_list(dentry, &data->dispose);
}
/*
* We can return to the caller if we have found some (this
@@ -1586,18 +1572,20 @@ out:
}
/**
- * shrink_dcache_parent - prune dcache
+ * shrink_dcache_tree - prune dcache
* @parent: parent of entries to prune
+ * @for_umount: true if we want to unpin the persistent ones
*
* Prune the dcache to remove unused children of the parent dentry.
*/
-void shrink_dcache_parent(struct dentry *parent)
+static void shrink_dcache_tree(struct dentry *parent, bool for_umount)
{
for (;;) {
struct select_data data = {.start = parent};
INIT_LIST_HEAD(&data.dispose);
- d_walk(parent, &data, select_collect);
+ d_walk(parent, &data,
+ for_umount ? select_collect_umount : select_collect);
if (!list_empty(&data.dispose)) {
shrink_dentry_list(&data.dispose);
@@ -1610,36 +1598,36 @@ void shrink_dcache_parent(struct dentry *parent)
data.victim = NULL;
d_walk(parent, &data, select_collect2);
if (data.victim) {
- struct dentry *parent;
spin_lock(&data.victim->d_lock);
- if (!shrink_lock_dentry(data.victim)) {
+ if (!lock_for_kill(data.victim)) {
spin_unlock(&data.victim->d_lock);
rcu_read_unlock();
} else {
- rcu_read_unlock();
- parent = data.victim->d_parent;
- if (parent != data.victim)
- __dput_to_list(parent, &data.dispose);
- __dentry_kill(data.victim);
+ shrink_kill(data.victim);
}
}
if (!list_empty(&data.dispose))
shrink_dentry_list(&data.dispose);
}
}
+
+void shrink_dcache_parent(struct dentry *parent)
+{
+ shrink_dcache_tree(parent, false);
+}
EXPORT_SYMBOL(shrink_dcache_parent);
static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
{
/* it has busy descendents; complain about those instead */
- if (!list_empty(&dentry->d_subdirs))
+ if (!hlist_empty(&dentry->d_children))
return D_WALK_CONTINUE;
/* root with refcount 1 is fine */
if (dentry == _data && dentry->d_lockref.count == 1)
return D_WALK_CONTINUE;
- printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
+ WARN(1, "BUG: Dentry %p{i=%lx,n=%pd} "
" still in use (%d) [unmount of %s %s]\n",
dentry,
dentry->d_inode ?
@@ -1648,13 +1636,12 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
dentry->d_lockref.count,
dentry->d_sb->s_type->name,
dentry->d_sb->s_id);
- WARN_ON(1);
return D_WALK_CONTINUE;
}
static void do_one_tree(struct dentry *dentry)
{
- shrink_dcache_parent(dentry);
+ shrink_dcache_tree(dentry, true);
d_walk(dentry, dentry, umount_check);
d_drop(dentry);
dput(dentry);
@@ -1667,7 +1654,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
{
struct dentry *dentry;
- WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
+ rwsem_assert_held_write(&sb->s_umount);
dentry = sb->s_root;
sb->s_root = NULL;
@@ -1683,8 +1670,7 @@ static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
{
struct dentry **victim = _data;
if (d_mountpoint(dentry)) {
- __dget_dlock(dentry);
- *victim = dentry;
+ *victim = dget_dlock(dentry);
return D_WALK_QUIT;
}
return D_WALK_CONTINUE;
@@ -1741,7 +1727,8 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
char *dname;
int err;
- dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
+ dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru,
+ GFP_KERNEL);
if (!dentry)
return NULL;
@@ -1751,10 +1738,10 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
* will still always have a NUL at the end, even if we might
* be overwriting an internal NUL character
*/
- dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
+ dentry->d_shortname.string[DNAME_INLINE_LEN-1] = 0;
if (unlikely(!name)) {
name = &slash_name;
- dname = dentry->d_iname;
+ dname = dentry->d_shortname.string;
} else if (name->len > DNAME_INLINE_LEN-1) {
size_t size = offsetof(struct external_name, name[1]);
struct external_name *p = kmalloc(size + name->len,
@@ -1764,35 +1751,34 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
kmem_cache_free(dentry_cache, dentry);
return NULL;
}
- atomic_set(&p->u.count, 1);
+ atomic_set(&p->count, 1);
dname = p->name;
} else {
- dname = dentry->d_iname;
+ dname = dentry->d_shortname.string;
}
- dentry->d_name.len = name->len;
- dentry->d_name.hash = name->hash;
+ dentry->__d_name.len = name->len;
+ dentry->__d_name.hash = name->hash;
memcpy(dname, name->name, name->len);
dname[name->len] = 0;
/* Make sure we always see the terminating NUL character */
- smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
+ smp_store_release(&dentry->__d_name.name, dname); /* ^^^ */
- dentry->d_lockref.count = 1;
dentry->d_flags = 0;
- spin_lock_init(&dentry->d_lock);
+ lockref_init(&dentry->d_lockref);
seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
dentry->d_inode = NULL;
dentry->d_parent = dentry;
dentry->d_sb = sb;
- dentry->d_op = NULL;
+ dentry->d_op = sb->__s_d_op;
+ dentry->d_flags = sb->s_d_flags;
dentry->d_fsdata = NULL;
INIT_HLIST_BL_NODE(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
- INIT_LIST_HEAD(&dentry->d_subdirs);
+ INIT_HLIST_HEAD(&dentry->d_children);
INIT_HLIST_NODE(&dentry->d_u.d_alias);
- INIT_LIST_HEAD(&dentry->d_child);
- d_set_d_op(dentry, dentry->d_sb->s_d_op);
+ INIT_HLIST_NODE(&dentry->d_sib);
if (dentry->d_op && dentry->d_op->d_init) {
err = dentry->d_op->d_init(dentry);
@@ -1828,9 +1814,8 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
* don't need child lock because it is not subject
* to concurrency here
*/
- __dget_dlock(parent);
- dentry->d_parent = parent;
- list_add(&dentry->d_child, &parent->d_subdirs);
+ dentry->d_parent = dget_dlock(parent);
+ hlist_add_head(&dentry->d_sib, &parent->d_children);
spin_unlock(&parent->d_lock);
return dentry;
@@ -1870,9 +1855,16 @@ struct dentry *d_alloc_cursor(struct dentry * parent)
*/
struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
{
+ static const struct dentry_operations anon_ops = {
+ .d_dname = simple_dname
+ };
struct dentry *dentry = __d_alloc(sb, name);
- if (likely(dentry))
+ if (likely(dentry)) {
dentry->d_flags |= DCACHE_NORCU;
+ /* d_op_flags(&anon_ops) is 0 */
+ if (!dentry->d_op)
+ dentry->d_op = &anon_ops;
+ }
return dentry;
}
@@ -1886,51 +1878,50 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name)
}
EXPORT_SYMBOL(d_alloc_name);
-void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
+#define DCACHE_OP_FLAGS \
+ (DCACHE_OP_HASH | DCACHE_OP_COMPARE | DCACHE_OP_REVALIDATE | \
+ DCACHE_OP_WEAK_REVALIDATE | DCACHE_OP_DELETE | DCACHE_OP_PRUNE | \
+ DCACHE_OP_REAL)
+
+static unsigned int d_op_flags(const struct dentry_operations *op)
+{
+ unsigned int flags = 0;
+ if (op) {
+ if (op->d_hash)
+ flags |= DCACHE_OP_HASH;
+ if (op->d_compare)
+ flags |= DCACHE_OP_COMPARE;
+ if (op->d_revalidate)
+ flags |= DCACHE_OP_REVALIDATE;
+ if (op->d_weak_revalidate)
+ flags |= DCACHE_OP_WEAK_REVALIDATE;
+ if (op->d_delete)
+ flags |= DCACHE_OP_DELETE;
+ if (op->d_prune)
+ flags |= DCACHE_OP_PRUNE;
+ if (op->d_real)
+ flags |= DCACHE_OP_REAL;
+ }
+ return flags;
+}
+
+static void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
{
+ unsigned int flags = d_op_flags(op);
WARN_ON_ONCE(dentry->d_op);
- WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
- DCACHE_OP_COMPARE |
- DCACHE_OP_REVALIDATE |
- DCACHE_OP_WEAK_REVALIDATE |
- DCACHE_OP_DELETE |
- DCACHE_OP_REAL));
+ WARN_ON_ONCE(dentry->d_flags & DCACHE_OP_FLAGS);
dentry->d_op = op;
- if (!op)
- return;
- if (op->d_hash)
- dentry->d_flags |= DCACHE_OP_HASH;
- if (op->d_compare)
- dentry->d_flags |= DCACHE_OP_COMPARE;
- if (op->d_revalidate)
- dentry->d_flags |= DCACHE_OP_REVALIDATE;
- if (op->d_weak_revalidate)
- dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
- if (op->d_delete)
- dentry->d_flags |= DCACHE_OP_DELETE;
- if (op->d_prune)
- dentry->d_flags |= DCACHE_OP_PRUNE;
- if (op->d_real)
- dentry->d_flags |= DCACHE_OP_REAL;
-
+ if (flags)
+ dentry->d_flags |= flags;
}
-EXPORT_SYMBOL(d_set_d_op);
-
-/*
- * d_set_fallthru - Mark a dentry as falling through to a lower layer
- * @dentry - The dentry to mark
- *
- * Mark a dentry as falling through to the lower layer (as set with
- * d_pin_lower()). This flag may be recorded on the medium.
- */
-void d_set_fallthru(struct dentry *dentry)
+void set_default_d_op(struct super_block *s, const struct dentry_operations *ops)
{
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_FALLTHRU;
- spin_unlock(&dentry->d_lock);
+ unsigned int flags = d_op_flags(ops);
+ s->__s_d_op = ops;
+ s->s_d_flags = (s->s_d_flags & ~DCACHE_OP_FLAGS) | flags;
}
-EXPORT_SYMBOL(d_set_fallthru);
+EXPORT_SYMBOL(set_default_d_op);
static unsigned d_flags_for_inode(struct inode *inode)
{
@@ -1972,18 +1963,18 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
unsigned add_flags = d_flags_for_inode(inode);
WARN_ON(d_in_lookup(dentry));
- spin_lock(&dentry->d_lock);
/*
- * Decrement negative dentry count if it was in the LRU list.
+ * The negative counter only tracks dentries on the LRU. Don't dec if
+ * d_lru is on another list.
*/
- if (dentry->d_flags & DCACHE_LRU_LIST)
+ if ((dentry->d_flags &
+ (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
this_cpu_dec(nr_dentry_negative);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
raw_write_seqcount_begin(&dentry->d_seq);
__d_set_inode_and_type(dentry, inode, add_flags);
raw_write_seqcount_end(&dentry->d_seq);
fsnotify_update_flags(dentry);
- spin_unlock(&dentry->d_lock);
}
/**
@@ -2007,7 +1998,9 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
if (inode) {
security_d_instantiate(entry, inode);
spin_lock(&inode->i_lock);
+ spin_lock(&entry->d_lock);
__d_instantiate(entry, inode);
+ spin_unlock(&entry->d_lock);
spin_unlock(&inode->i_lock);
}
}
@@ -2026,11 +2019,12 @@ void d_instantiate_new(struct dentry *entry, struct inode *inode)
lockdep_annotate_inode_mutex_key(inode);
security_d_instantiate(entry, inode);
spin_lock(&inode->i_lock);
+ spin_lock(&entry->d_lock);
__d_instantiate(entry, inode);
- WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW & ~I_CREATING;
- smp_mb();
- wake_up_bit(&inode->i_state, __I_NEW);
+ spin_unlock(&entry->d_lock);
+ WARN_ON(!(inode_state_read(inode) & I_NEW));
+ inode_state_clear(inode, I_NEW | I_CREATING);
+ inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(d_instantiate_new);
@@ -2050,75 +2044,55 @@ struct dentry *d_make_root(struct inode *root_inode)
}
EXPORT_SYMBOL(d_make_root);
-static struct dentry *__d_instantiate_anon(struct dentry *dentry,
- struct inode *inode,
- bool disconnected)
-{
- struct dentry *res;
- unsigned add_flags;
-
- security_d_instantiate(dentry, inode);
- spin_lock(&inode->i_lock);
- res = __d_find_any_alias(inode);
- if (res) {
- spin_unlock(&inode->i_lock);
- dput(dentry);
- goto out_iput;
- }
-
- /* attach a disconnected dentry */
- add_flags = d_flags_for_inode(inode);
-
- if (disconnected)
- add_flags |= DCACHE_DISCONNECTED;
-
- spin_lock(&dentry->d_lock);
- __d_set_inode_and_type(dentry, inode, add_flags);
- hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
- if (!disconnected) {
- hlist_bl_lock(&dentry->d_sb->s_roots);
- hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots);
- hlist_bl_unlock(&dentry->d_sb->s_roots);
- }
- spin_unlock(&dentry->d_lock);
- spin_unlock(&inode->i_lock);
-
- return dentry;
-
- out_iput:
- iput(inode);
- return res;
-}
-
-struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode)
-{
- return __d_instantiate_anon(dentry, inode, true);
-}
-EXPORT_SYMBOL(d_instantiate_anon);
-
static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
{
- struct dentry *tmp;
- struct dentry *res;
+ struct super_block *sb;
+ struct dentry *new, *res;
if (!inode)
return ERR_PTR(-ESTALE);
if (IS_ERR(inode))
return ERR_CAST(inode);
- res = d_find_any_alias(inode);
+ sb = inode->i_sb;
+
+ res = d_find_any_alias(inode); /* existing alias? */
if (res)
- goto out_iput;
+ goto out;
- tmp = d_alloc_anon(inode->i_sb);
- if (!tmp) {
+ new = d_alloc_anon(sb);
+ if (!new) {
res = ERR_PTR(-ENOMEM);
- goto out_iput;
+ goto out;
}
- return __d_instantiate_anon(tmp, inode, disconnected);
+ security_d_instantiate(new, inode);
+ spin_lock(&inode->i_lock);
+ res = __d_find_any_alias(inode); /* recheck under lock */
+ if (likely(!res)) { /* still no alias, attach a disconnected dentry */
+ unsigned add_flags = d_flags_for_inode(inode);
+
+ if (disconnected)
+ add_flags |= DCACHE_DISCONNECTED;
-out_iput:
+ spin_lock(&new->d_lock);
+ __d_set_inode_and_type(new, inode, add_flags);
+ hlist_add_head(&new->d_u.d_alias, &inode->i_dentry);
+ if (!disconnected) {
+ hlist_bl_lock(&sb->s_roots);
+ hlist_bl_add_head(&new->d_hash, &sb->s_roots);
+ hlist_bl_unlock(&sb->s_roots);
+ }
+ spin_unlock(&new->d_lock);
+ spin_unlock(&inode->i_lock);
+ inode = NULL; /* consumed by new->d_inode */
+ res = new;
+ } else {
+ spin_unlock(&inode->i_lock);
+ dput(new);
+ }
+
+ out:
iput(inode);
return res;
}
@@ -2170,8 +2144,8 @@ EXPORT_SYMBOL(d_obtain_root);
/**
* d_add_ci - lookup or allocate new dentry with case-exact name
- * @inode: the inode case-insensitive lookup has found
* @dentry: the negative dentry that was passed to the parent's lookup func
+ * @inode: the inode case-insensitive lookup has found
* @name: the case-exact name to be associated with the returned dentry
*
* This is to avoid filling the dcache with case-insensitive names to the
@@ -2214,6 +2188,7 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
}
res = d_splice_alias(inode, found);
if (res) {
+ d_lookup_done(found);
dput(found);
return res;
}
@@ -2221,10 +2196,16 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
}
EXPORT_SYMBOL(d_add_ci);
-
-static inline bool d_same_name(const struct dentry *dentry,
- const struct dentry *parent,
- const struct qstr *name)
+/**
+ * d_same_name - compare dentry name with case-exact name
+ * @dentry: the negative dentry that was passed to the parent's lookup func
+ * @parent: parent dentry
+ * @name: the case-exact name to be associated with the returned dentry
+ *
+ * Return: true if names are same, or false
+ */
+bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
+ const struct qstr *name)
{
if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
if (dentry->d_name.len != name->len)
@@ -2235,6 +2216,49 @@ static inline bool d_same_name(const struct dentry *dentry,
dentry->d_name.len, dentry->d_name.name,
name) == 0;
}
+EXPORT_SYMBOL_GPL(d_same_name);
+
+/*
+ * This is __d_lookup_rcu() when the parent dentry has
+ * DCACHE_OP_COMPARE, which makes things much nastier.
+ */
+static noinline struct dentry *__d_lookup_rcu_op_compare(
+ const struct dentry *parent,
+ const struct qstr *name,
+ unsigned *seqp)
+{
+ u64 hashlen = name->hash_len;
+ struct hlist_bl_head *b = d_hash(hashlen);
+ struct hlist_bl_node *node;
+ struct dentry *dentry;
+
+ hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
+ int tlen;
+ const char *tname;
+ unsigned seq;
+
+seqretry:
+ seq = raw_seqcount_begin(&dentry->d_seq);
+ if (dentry->d_parent != parent)
+ continue;
+ if (d_unhashed(dentry))
+ continue;
+ if (dentry->d_name.hash != hashlen_hash(hashlen))
+ continue;
+ tlen = dentry->d_name.len;
+ tname = dentry->d_name.name;
+ /* we want a consistent (name,len) pair */
+ if (read_seqcount_retry(&dentry->d_seq, seq)) {
+ cpu_relax();
+ goto seqretry;
+ }
+ if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
+ continue;
+ *seqp = seq;
+ return dentry;
+ }
+ return NULL;
+}
/**
* __d_lookup_rcu - search for a dentry (racy, store-free)
@@ -2254,9 +2278,6 @@ static inline bool d_same_name(const struct dentry *dentry,
* without taking d_lock and checking d_seq sequence count against @seq
* returned here.
*
- * A refcount may be taken on the found dentry with the d_rcu_to_refcount
- * function.
- *
* Alternatively, __d_lookup_rcu may be called again to look up the child of
* the returned dentry, so long as its parent's seqlock is checked after the
* child is looked up. Thus, an interlocking stepping of sequence lock checks
@@ -2271,7 +2292,7 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
{
u64 hashlen = name->hash_len;
const unsigned char *str = name->name;
- struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
+ struct hlist_bl_head *b = d_hash(hashlen);
struct hlist_bl_node *node;
struct dentry *dentry;
@@ -2282,6 +2303,9 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
* Keep the two functions in sync.
*/
+ if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
+ return __d_lookup_rcu_op_compare(parent, name, seqp);
+
/*
* The hash list is protected using RCU.
*
@@ -2298,7 +2322,6 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
unsigned seq;
-seqretry:
/*
* The dentry sequence count protects us from concurrent
* renames, and thus protects parent and name fields.
@@ -2319,30 +2342,21 @@ seqretry:
seq = raw_seqcount_begin(&dentry->d_seq);
if (dentry->d_parent != parent)
continue;
- if (d_unhashed(dentry))
+ if (dentry->d_name.hash_len != hashlen)
+ continue;
+ if (unlikely(dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0))
+ continue;
+ /*
+ * Check for the dentry being unhashed.
+ *
+ * As tempting as it is, we *can't* skip it because of a race window
+ * between us finding the dentry before it gets unhashed and loading
+ * the sequence counter after unhashing is finished.
+ *
+ * We can at least predict on it.
+ */
+ if (unlikely(d_unhashed(dentry)))
continue;
-
- if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
- int tlen;
- const char *tname;
- if (dentry->d_name.hash != hashlen_hash(hashlen))
- continue;
- tlen = dentry->d_name.len;
- tname = dentry->d_name.name;
- /* we want a consistent (name,len) pair */
- if (read_seqcount_retry(&dentry->d_seq, seq)) {
- cpu_relax();
- goto seqretry;
- }
- if (parent->d_op->d_compare(dentry,
- tlen, tname, name) != 0)
- continue;
- } else {
- if (dentry->d_name.hash_len != hashlen)
- continue;
- if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
- continue;
- }
*seqp = seq;
return dentry;
}
@@ -2468,7 +2482,6 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
}
return d_lookup(dir, name);
}
-EXPORT_SYMBOL(d_hash_and_lookup);
/*
* When a file is deleted, we have two options:
@@ -2501,6 +2514,8 @@ void d_delete(struct dentry * dentry)
* Are we the only user?
*/
if (dentry->d_lockref.count == 1) {
+ if (dentry_negative_policy)
+ __d_drop(dentry);
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
dentry_unlink_inode(dentry);
} else {
@@ -2537,18 +2552,22 @@ EXPORT_SYMBOL(d_rehash);
static inline unsigned start_dir_add(struct inode *dir)
{
-
+ preempt_disable_nested();
for (;;) {
- unsigned n = dir->i_dir_seq;
- if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
+ unsigned n = READ_ONCE(dir->i_dir_seq);
+ if (!(n & 1) && try_cmpxchg(&dir->i_dir_seq, &n, n + 1))
return n;
cpu_relax();
}
}
-static inline void end_dir_add(struct inode *dir, unsigned n)
+static inline void end_dir_add(struct inode *dir, unsigned int n,
+ wait_queue_head_t *d_wait)
{
smp_store_release(&dir->i_dir_seq, n + 2);
+ preempt_enable_nested();
+ if (wq_has_sleeper(d_wait))
+ wake_up_all(d_wait);
}
static void d_wait_lookup(struct dentry *dentry)
@@ -2572,13 +2591,21 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
unsigned int hash = name->hash;
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
struct hlist_bl_node *node;
- struct dentry *new = d_alloc(parent, name);
+ struct dentry *new = __d_alloc(parent->d_sb, name);
struct dentry *dentry;
unsigned seq, r_seq, d_seq;
if (unlikely(!new))
return ERR_PTR(-ENOMEM);
+ new->d_flags |= DCACHE_PAR_LOOKUP;
+ spin_lock(&parent->d_lock);
+ new->d_parent = dget_dlock(parent);
+ hlist_add_head(&new->d_sib, &parent->d_children);
+ if (parent->d_flags & DCACHE_DISCONNECTED)
+ new->d_flags |= DCACHE_DISCONNECTED;
+ spin_unlock(&parent->d_lock);
+
retry:
rcu_read_lock();
seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
@@ -2662,10 +2689,8 @@ retry:
return dentry;
}
rcu_read_unlock();
- /* we can't take ->d_lock here; it's OK, though. */
- new->d_flags |= DCACHE_PAR_LOOKUP;
new->d_wait = wq;
- hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
+ hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b);
hlist_bl_unlock(b);
return new;
mismatch:
@@ -2675,33 +2700,54 @@ mismatch:
}
EXPORT_SYMBOL(d_alloc_parallel);
-void __d_lookup_done(struct dentry *dentry)
+/*
+ * - Unhash the dentry
+ * - Retrieve and clear the waitqueue head in dentry
+ * - Return the waitqueue head
+ */
+static wait_queue_head_t *__d_lookup_unhash(struct dentry *dentry)
{
- struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
- dentry->d_name.hash);
+ wait_queue_head_t *d_wait;
+ struct hlist_bl_head *b;
+
+ lockdep_assert_held(&dentry->d_lock);
+
+ b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash);
hlist_bl_lock(b);
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
- wake_up_all(dentry->d_wait);
+ d_wait = dentry->d_wait;
dentry->d_wait = NULL;
hlist_bl_unlock(b);
INIT_HLIST_NODE(&dentry->d_u.d_alias);
INIT_LIST_HEAD(&dentry->d_lru);
+ return d_wait;
}
-EXPORT_SYMBOL(__d_lookup_done);
+
+void __d_lookup_unhash_wake(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ wake_up_all(__d_lookup_unhash(dentry));
+ spin_unlock(&dentry->d_lock);
+}
+EXPORT_SYMBOL(__d_lookup_unhash_wake);
/* inode->i_lock held if inode is non-NULL */
-static inline void __d_add(struct dentry *dentry, struct inode *inode)
+static inline void __d_add(struct dentry *dentry, struct inode *inode,
+ const struct dentry_operations *ops)
{
+ wait_queue_head_t *d_wait;
struct inode *dir = NULL;
unsigned n;
spin_lock(&dentry->d_lock);
if (unlikely(d_in_lookup(dentry))) {
dir = dentry->d_parent->d_inode;
n = start_dir_add(dir);
- __d_lookup_done(dentry);
+ d_wait = __d_lookup_unhash(dentry);
}
+ if (unlikely(ops))
+ d_set_d_op(dentry, ops);
if (inode) {
unsigned add_flags = d_flags_for_inode(inode);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
@@ -2712,7 +2758,7 @@ static inline void __d_add(struct dentry *dentry, struct inode *inode)
}
__d_rehash(dentry);
if (dir)
- end_dir_add(dir, n);
+ end_dir_add(dir, n, d_wait);
spin_unlock(&dentry->d_lock);
if (inode)
spin_unlock(&inode->i_lock);
@@ -2733,55 +2779,27 @@ void d_add(struct dentry *entry, struct inode *inode)
security_d_instantiate(entry, inode);
spin_lock(&inode->i_lock);
}
- __d_add(entry, inode);
+ __d_add(entry, inode, NULL);
}
EXPORT_SYMBOL(d_add);
-/**
- * d_exact_alias - find and hash an exact unhashed alias
- * @entry: dentry to add
- * @inode: The inode to go with this dentry
- *
- * If an unhashed dentry with the same name/parent and desired
- * inode already exists, hash and return it. Otherwise, return
- * NULL.
- *
- * Parent directory should be locked.
- */
-struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
+struct dentry *d_make_persistent(struct dentry *dentry, struct inode *inode)
{
- struct dentry *alias;
- unsigned int hash = entry->d_name.hash;
-
+ WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
+ WARN_ON(!inode);
+ security_d_instantiate(dentry, inode);
spin_lock(&inode->i_lock);
- hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
- /*
- * Don't need alias->d_lock here, because aliases with
- * d_parent == entry->d_parent are not subject to name or
- * parent changes, because the parent inode i_mutex is held.
- */
- if (alias->d_name.hash != hash)
- continue;
- if (alias->d_parent != entry->d_parent)
- continue;
- if (!d_same_name(alias, entry->d_parent, &entry->d_name))
- continue;
- spin_lock(&alias->d_lock);
- if (!d_unhashed(alias)) {
- spin_unlock(&alias->d_lock);
- alias = NULL;
- } else {
- __dget_dlock(alias);
- __d_rehash(alias);
- spin_unlock(&alias->d_lock);
- }
- spin_unlock(&inode->i_lock);
- return alias;
- }
+ spin_lock(&dentry->d_lock);
+ __d_instantiate(dentry, inode);
+ dentry->d_flags |= DCACHE_PERSISTENT;
+ dget_dlock(dentry);
+ if (d_unhashed(dentry))
+ __d_rehash(dentry);
+ spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
- return NULL;
+ return dentry;
}
-EXPORT_SYMBOL(d_exact_alias);
+EXPORT_SYMBOL(d_make_persistent);
static void swap_names(struct dentry *dentry, struct dentry *target)
{
@@ -2790,16 +2808,15 @@ static void swap_names(struct dentry *dentry, struct dentry *target)
/*
* Both external: swap the pointers
*/
- swap(target->d_name.name, dentry->d_name.name);
+ swap(target->__d_name.name, dentry->__d_name.name);
} else {
/*
* dentry:internal, target:external. Steal target's
* storage and make target internal.
*/
- memcpy(target->d_iname, dentry->d_name.name,
- dentry->d_name.len + 1);
- dentry->d_name.name = target->d_name.name;
- target->d_name.name = target->d_iname;
+ dentry->__d_name.name = target->__d_name.name;
+ target->d_shortname = dentry->d_shortname;
+ target->__d_name.name = target->d_shortname.string;
}
} else {
if (unlikely(dname_external(dentry))) {
@@ -2807,23 +2824,19 @@ static void swap_names(struct dentry *dentry, struct dentry *target)
* dentry:external, target:internal. Give dentry's
* storage to target and make dentry internal
*/
- memcpy(dentry->d_iname, target->d_name.name,
- target->d_name.len + 1);
- target->d_name.name = dentry->d_name.name;
- dentry->d_name.name = dentry->d_iname;
+ target->__d_name.name = dentry->__d_name.name;
+ dentry->d_shortname = target->d_shortname;
+ dentry->__d_name.name = dentry->d_shortname.string;
} else {
/*
* Both are internal.
*/
- unsigned int i;
- BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
- for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
- swap(((long *) &dentry->d_iname)[i],
- ((long *) &target->d_iname)[i]);
- }
+ for (int i = 0; i < DNAME_INLINE_WORDS; i++)
+ swap(dentry->d_shortname.words[i],
+ target->d_shortname.words[i]);
}
}
- swap(dentry->d_name.hash_len, target->d_name.hash_len);
+ swap(dentry->__d_name.hash_len, target->__d_name.hash_len);
}
static void copy_name(struct dentry *dentry, struct dentry *target)
@@ -2832,16 +2845,15 @@ static void copy_name(struct dentry *dentry, struct dentry *target)
if (unlikely(dname_external(dentry)))
old_name = external_name(dentry);
if (unlikely(dname_external(target))) {
- atomic_inc(&external_name(target)->u.count);
- dentry->d_name = target->d_name;
+ atomic_inc(&external_name(target)->count);
+ dentry->__d_name = target->__d_name;
} else {
- memcpy(dentry->d_iname, target->d_name.name,
- target->d_name.len + 1);
- dentry->d_name.name = dentry->d_iname;
- dentry->d_name.hash_len = target->d_name.hash_len;
+ dentry->d_shortname = target->d_shortname;
+ dentry->__d_name.name = dentry->d_shortname.string;
+ dentry->__d_name.hash_len = target->__d_name.hash_len;
}
- if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
- kfree_rcu(old_name, u.head);
+ if (old_name && likely(atomic_dec_and_test(&old_name->count)))
+ kfree_rcu(old_name, head);
}
/*
@@ -2850,15 +2862,16 @@ static void copy_name(struct dentry *dentry, struct dentry *target)
* @target: new dentry
* @exchange: exchange the two dentries
*
- * Update the dcache to reflect the move of a file name. Negative
- * dcache entries should not be moved in this way. Caller must hold
- * rename_lock, the i_mutex of the source and target directories,
- * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
+ * Update the dcache to reflect the move of a file name. Negative dcache
+ * entries should not be moved in this way. Caller must hold rename_lock, the
+ * i_rwsem of the source and target directories (exclusively), and the sb->
+ * s_vfs_rename_mutex if they differ. See lock_rename().
*/
static void __d_move(struct dentry *dentry, struct dentry *target,
bool exchange)
{
struct dentry *old_parent, *p;
+ wait_queue_head_t *d_wait;
struct inode *dir = NULL;
unsigned n;
@@ -2889,7 +2902,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
if (unlikely(d_in_lookup(target))) {
dir = target->d_parent->d_inode;
n = start_dir_add(dir);
- __d_lookup_done(target);
+ d_wait = __d_lookup_unhash(target);
}
write_seqcount_begin(&dentry->d_seq);
@@ -2912,11 +2925,15 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
} else {
target->d_parent = old_parent;
swap_names(dentry, target);
- list_move(&target->d_child, &target->d_parent->d_subdirs);
+ if (!hlist_unhashed(&target->d_sib))
+ __hlist_del(&target->d_sib);
+ hlist_add_head(&target->d_sib, &target->d_parent->d_children);
__d_rehash(target);
fsnotify_update_flags(target);
}
- list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
+ if (!hlist_unhashed(&dentry->d_sib))
+ __hlist_del(&dentry->d_sib);
+ hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children);
__d_rehash(dentry);
fsnotify_update_flags(dentry);
fscrypt_handle_d_move(dentry);
@@ -2925,7 +2942,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
write_seqcount_end(&dentry->d_seq);
if (dir)
- end_dir_add(dir, n);
+ end_dir_add(dir, n, d_wait);
if (dentry->d_parent != old_parent)
spin_unlock(&dentry->d_parent->d_lock);
@@ -2970,6 +2987,7 @@ void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
write_sequnlock(&rename_lock);
}
+EXPORT_SYMBOL(d_exchange);
/**
* d_ancestor - search for an ancestor
@@ -2994,13 +3012,12 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
* This helper attempts to cope with remotely renamed directories
*
* It assumes that the caller is already holding
- * dentry->d_parent->d_inode->i_mutex, and rename_lock
+ * dentry->d_parent->d_inode->i_rwsem, and rename_lock
*
* Note: If ever the locking in lock_rename() changes, then please
* remember to update this too...
*/
-static int __d_unalias(struct inode *inode,
- struct dentry *dentry, struct dentry *alias)
+static int __d_unalias(struct dentry *dentry, struct dentry *alias)
{
struct mutex *m1 = NULL;
struct rw_semaphore *m2 = NULL;
@@ -3018,7 +3035,12 @@ static int __d_unalias(struct inode *inode,
goto out_err;
m2 = &alias->d_parent->d_inode->i_rwsem;
out_unalias:
+ if (alias->d_op && alias->d_op->d_unalias_trylock &&
+ !alias->d_op->d_unalias_trylock(alias))
+ goto out_err;
__d_move(alias, dentry, false);
+ if (alias->d_op && alias->d_op->d_unalias_unlock)
+ alias->d_op->d_unalias_unlock(alias);
ret = 0;
out_err:
if (m2)
@@ -3028,30 +3050,8 @@ out_err:
return ret;
}
-/**
- * d_splice_alias - splice a disconnected dentry into the tree if one exists
- * @inode: the inode which may have a disconnected dentry
- * @dentry: a negative dentry which we want to point to the inode.
- *
- * If inode is a directory and has an IS_ROOT alias, then d_move that in
- * place of the given dentry and return it, else simply d_add the inode
- * to the dentry and return NULL.
- *
- * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
- * we should error out: directories can't have multiple aliases.
- *
- * This is needed in the lookup routine of any filesystem that is exportable
- * (via knfsd) so that we can build dcache paths to directories effectively.
- *
- * If a dentry was found and moved, then it is returned. Otherwise NULL
- * is returned. This matches the expected return value of ->lookup.
- *
- * Cluster filesystems may call this function with a negative, hashed dentry.
- * In that case, we know that the inode will be a regular file, and also this
- * will only occur during atomic_open. So we need to check for the dentry
- * being already hashed only in the final case.
- */
-struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
+struct dentry *d_splice_alias_ops(struct inode *inode, struct dentry *dentry,
+ const struct dentry_operations *ops)
{
if (IS_ERR(inode))
return ERR_CAST(inode);
@@ -3081,7 +3081,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
inode->i_sb->s_id);
} else if (!IS_ROOT(new)) {
struct dentry *old_parent = dget(new->d_parent);
- int err = __d_unalias(inode, dentry, new);
+ int err = __d_unalias(dentry, new);
write_sequnlock(&rename_lock);
if (err) {
dput(new);
@@ -3097,9 +3097,37 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
}
}
out:
- __d_add(dentry, inode);
+ __d_add(dentry, inode, ops);
return NULL;
}
+
+/**
+ * d_splice_alias - splice a disconnected dentry into the tree if one exists
+ * @inode: the inode which may have a disconnected dentry
+ * @dentry: a negative dentry which we want to point to the inode.
+ *
+ * If inode is a directory and has an IS_ROOT alias, then d_move that in
+ * place of the given dentry and return it, else simply d_add the inode
+ * to the dentry and return NULL.
+ *
+ * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
+ * we should error out: directories can't have multiple aliases.
+ *
+ * This is needed in the lookup routine of any filesystem that is exportable
+ * (via knfsd) so that we can build dcache paths to directories effectively.
+ *
+ * If a dentry was found and moved, then it is returned. Otherwise NULL
+ * is returned. This matches the expected return value of ->lookup.
+ *
+ * Cluster filesystems may call this function with a negative, hashed dentry.
+ * In that case, we know that the inode will be a regular file, and also this
+ * will only occur during atomic_open. So we need to check for the dentry
+ * being already hashed only in the final case.
+ */
+struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
+{
+ return d_splice_alias_ops(inode, dentry, NULL);
+}
EXPORT_SYMBOL(d_splice_alias);
/*
@@ -3120,69 +3148,82 @@ EXPORT_SYMBOL(d_splice_alias);
bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
{
- bool result;
+ bool subdir;
unsigned seq;
if (new_dentry == old_dentry)
return true;
- do {
- /* for restarting inner loop in case of seq retry */
- seq = read_seqbegin(&rename_lock);
- /*
- * Need rcu_readlock to protect against the d_parent trashing
- * due to d_move
- */
- rcu_read_lock();
- if (d_ancestor(old_dentry, new_dentry))
- result = true;
- else
- result = false;
- rcu_read_unlock();
- } while (read_seqretry(&rename_lock, seq));
-
- return result;
-}
-EXPORT_SYMBOL(is_subdir);
-
-static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
-{
- struct dentry *root = data;
- if (dentry != root) {
- if (d_unhashed(dentry) || !dentry->d_inode)
- return D_WALK_SKIP;
-
- if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
- dentry->d_flags |= DCACHE_GENOCIDE;
- dentry->d_lockref.count--;
- }
+ /* Access d_parent under rcu as d_move() may change it. */
+ rcu_read_lock();
+ seq = read_seqbegin(&rename_lock);
+ subdir = d_ancestor(old_dentry, new_dentry);
+ /* Try lockless once... */
+ if (read_seqretry(&rename_lock, seq)) {
+ /* ...else acquire lock for progress even on deep chains. */
+ read_seqlock_excl(&rename_lock);
+ subdir = d_ancestor(old_dentry, new_dentry);
+ read_sequnlock_excl(&rename_lock);
}
- return D_WALK_CONTINUE;
+ rcu_read_unlock();
+ return subdir;
}
+EXPORT_SYMBOL(is_subdir);
-void d_genocide(struct dentry *parent)
+void d_mark_tmpfile(struct file *file, struct inode *inode)
{
- d_walk(parent, parent, d_genocide_kill);
-}
-
-EXPORT_SYMBOL(d_genocide);
+ struct dentry *dentry = file->f_path.dentry;
-void d_tmpfile(struct dentry *dentry, struct inode *inode)
-{
- inode_dec_link_count(inode);
- BUG_ON(dentry->d_name.name != dentry->d_iname ||
+ BUG_ON(dname_external(dentry) ||
!hlist_unhashed(&dentry->d_u.d_alias) ||
!d_unlinked(dentry));
spin_lock(&dentry->d_parent->d_lock);
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
+ dentry->__d_name.len = sprintf(dentry->d_shortname.string, "#%llu",
(unsigned long long)inode->i_ino);
spin_unlock(&dentry->d_lock);
spin_unlock(&dentry->d_parent->d_lock);
+}
+EXPORT_SYMBOL(d_mark_tmpfile);
+
+void d_tmpfile(struct file *file, struct inode *inode)
+{
+ struct dentry *dentry = file->f_path.dentry;
+
+ inode_dec_link_count(inode);
+ d_mark_tmpfile(file, inode);
d_instantiate(dentry, inode);
}
EXPORT_SYMBOL(d_tmpfile);
+/*
+ * Obtain inode number of the parent dentry.
+ */
+ino_t d_parent_ino(struct dentry *dentry)
+{
+ struct dentry *parent;
+ struct inode *iparent;
+ unsigned seq;
+ ino_t ret;
+
+ scoped_guard(rcu) {
+ seq = raw_seqcount_begin(&dentry->d_seq);
+ parent = READ_ONCE(dentry->d_parent);
+ iparent = d_inode_rcu(parent);
+ if (likely(iparent)) {
+ ret = iparent->i_ino;
+ if (!read_seqcount_retry(&dentry->d_seq, seq))
+ return ret;
+ }
+ }
+
+ spin_lock(&dentry->d_lock);
+ ret = dentry->d_parent->d_inode->i_ino;
+ spin_unlock(&dentry->d_lock);
+ return ret;
+}
+EXPORT_SYMBOL(d_parent_ino);
+
static __initdata unsigned long dhash_entries;
static int __init set_dhash_entries(char *str)
{
@@ -3212,6 +3253,9 @@ static void __init dcache_init_early(void)
0,
0);
d_hash_shift = 32 - d_hash_shift;
+
+ runtime_const_init(shift, d_hash_shift);
+ runtime_const_init(ptr, dentry_hashtable);
}
static void __init dcache_init(void)
@@ -3221,9 +3265,10 @@ static void __init dcache_init(void)
* but it is probably not worth it because of the cache nature
* of the dcache.
*/
- dentry_cache = KMEM_CACHE_USERCOPY(dentry,
- SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
- d_iname);
+ __dentry_cache = KMEM_CACHE_USERCOPY(dentry,
+ SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT,
+ d_shortname.string);
+ runtime_const_init(ptr, __dentry_cache);
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
@@ -3240,10 +3285,13 @@ static void __init dcache_init(void)
0,
0);
d_hash_shift = 32 - d_hash_shift;
+
+ runtime_const_init(shift, d_hash_shift);
+ runtime_const_init(ptr, dentry_hashtable);
}
/* SLAB cache for __getname() consumers */
-struct kmem_cache *names_cachep __read_mostly;
+struct kmem_cache *names_cachep __ro_after_init;
EXPORT_SYMBOL(names_cachep);
void __init vfs_caches_init_early(void)
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 7d162b0efbf0..3ec3324c2060 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -47,58 +47,81 @@ const struct file_operations debugfs_noop_file_operations = {
#define F_DENTRY(filp) ((filp)->f_path.dentry)
-const struct file_operations *debugfs_real_fops(const struct file *filp)
+void *debugfs_get_aux(const struct file *file)
{
- struct debugfs_fsdata *fsd = F_DENTRY(filp)->d_fsdata;
-
- if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) {
- /*
- * Urgh, we've been called w/o a protecting
- * debugfs_file_get().
- */
- WARN_ON(1);
- return NULL;
- }
-
- return fsd->real_fops;
+ return DEBUGFS_I(file_inode(file))->aux;
}
-EXPORT_SYMBOL_GPL(debugfs_real_fops);
+EXPORT_SYMBOL_GPL(debugfs_get_aux);
-/**
- * debugfs_file_get - mark the beginning of file data access
- * @dentry: the dentry object whose data is being accessed.
- *
- * Up to a matching call to debugfs_file_put(), any successive call
- * into the file removing functions debugfs_remove() and
- * debugfs_remove_recursive() will block. Since associated private
- * file data may only get freed after a successful return of any of
- * the removal functions, you may safely access it after a successful
- * call to debugfs_file_get() without worrying about lifetime issues.
- *
- * If -%EIO is returned, the file has already been removed and thus,
- * it is not safe to access any of its data. If, on the other hand,
- * it is allowed to access the file data, zero is returned.
- */
-int debugfs_file_get(struct dentry *dentry)
+enum dbgfs_get_mode {
+ DBGFS_GET_ALREADY,
+ DBGFS_GET_REGULAR,
+ DBGFS_GET_SHORT,
+};
+
+static int __debugfs_file_get(struct dentry *dentry, enum dbgfs_get_mode mode)
{
struct debugfs_fsdata *fsd;
void *d_fsd;
+ /*
+ * This could only happen if some debugfs user erroneously calls
+ * debugfs_file_get() on a dentry that isn't even a file, let
+ * them know about it.
+ */
+ if (WARN_ON(!d_is_reg(dentry)))
+ return -EINVAL;
+
d_fsd = READ_ONCE(dentry->d_fsdata);
- if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) {
+ if (d_fsd) {
fsd = d_fsd;
} else {
+ struct inode *inode = dentry->d_inode;
+ unsigned int methods = 0;
+
+ if (WARN_ON(mode == DBGFS_GET_ALREADY))
+ return -EINVAL;
+
fsd = kmalloc(sizeof(*fsd), GFP_KERNEL);
if (!fsd)
return -ENOMEM;
- fsd->real_fops = (void *)((unsigned long)d_fsd &
- ~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
+ if (mode == DBGFS_GET_SHORT) {
+ const struct debugfs_short_fops *ops;
+ ops = fsd->short_fops = DEBUGFS_I(inode)->short_fops;
+ if (ops->llseek)
+ methods |= HAS_LSEEK;
+ if (ops->read)
+ methods |= HAS_READ;
+ if (ops->write)
+ methods |= HAS_WRITE;
+ fsd->real_fops = NULL;
+ } else {
+ const struct file_operations *ops;
+ ops = fsd->real_fops = DEBUGFS_I(inode)->real_fops;
+ if (ops->llseek)
+ methods |= HAS_LSEEK;
+ if (ops->read)
+ methods |= HAS_READ;
+ if (ops->write)
+ methods |= HAS_WRITE;
+ if (ops->unlocked_ioctl)
+ methods |= HAS_IOCTL;
+ if (ops->poll)
+ methods |= HAS_POLL;
+ fsd->short_fops = NULL;
+ }
+ fsd->methods = methods;
refcount_set(&fsd->active_users, 1);
init_completion(&fsd->active_users_drained);
- if (cmpxchg(&dentry->d_fsdata, d_fsd, fsd) != d_fsd) {
+ INIT_LIST_HEAD(&fsd->cancellations);
+ mutex_init(&fsd->cancellations_mtx);
+
+ d_fsd = cmpxchg(&dentry->d_fsdata, NULL, fsd);
+ if (d_fsd) {
+ mutex_destroy(&fsd->cancellations_mtx);
kfree(fsd);
- fsd = READ_ONCE(dentry->d_fsdata);
+ fsd = d_fsd;
}
}
@@ -118,6 +141,26 @@ int debugfs_file_get(struct dentry *dentry)
return 0;
}
+
+/**
+ * debugfs_file_get - mark the beginning of file data access
+ * @dentry: the dentry object whose data is being accessed.
+ *
+ * Up to a matching call to debugfs_file_put(), any successive call
+ * into the file removing functions debugfs_remove() and
+ * debugfs_remove_recursive() will block. Since associated private
+ * file data may only get freed after a successful return of any of
+ * the removal functions, you may safely access it after a successful
+ * call to debugfs_file_get() without worrying about lifetime issues.
+ *
+ * If -%EIO is returned, the file has already been removed and thus,
+ * it is not safe to access any of its data. If, on the other hand,
+ * it is allowed to access the file data, zero is returned.
+ */
+int debugfs_file_get(struct dentry *dentry)
+{
+ return __debugfs_file_get(dentry, DBGFS_GET_ALREADY);
+}
EXPORT_SYMBOL_GPL(debugfs_file_get);
/**
@@ -138,6 +181,84 @@ void debugfs_file_put(struct dentry *dentry)
}
EXPORT_SYMBOL_GPL(debugfs_file_put);
+/**
+ * debugfs_enter_cancellation - enter a debugfs cancellation
+ * @file: the file being accessed
+ * @cancellation: the cancellation object, the cancel callback
+ * inside of it must be initialized
+ *
+ * When a debugfs file is removed it needs to wait for all active
+ * operations to complete. However, the operation itself may need
+ * to wait for hardware or completion of some asynchronous process
+ * or similar. As such, it may need to be cancelled to avoid long
+ * waits or even deadlocks.
+ *
+ * This function can be used inside a debugfs handler that may
+ * need to be cancelled. As soon as this function is called, the
+ * cancellation's 'cancel' callback may be called, at which point
+ * the caller should proceed to call debugfs_leave_cancellation()
+ * and leave the debugfs handler function as soon as possible.
+ * Note that the 'cancel' callback is only ever called in the
+ * context of some kind of debugfs_remove().
+ *
+ * This function must be paired with debugfs_leave_cancellation().
+ */
+void debugfs_enter_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation)
+{
+ struct debugfs_fsdata *fsd;
+ struct dentry *dentry = F_DENTRY(file);
+
+ INIT_LIST_HEAD(&cancellation->list);
+
+ if (WARN_ON(!d_is_reg(dentry)))
+ return;
+
+ if (WARN_ON(!cancellation->cancel))
+ return;
+
+ fsd = READ_ONCE(dentry->d_fsdata);
+ if (WARN_ON(!fsd))
+ return;
+
+ mutex_lock(&fsd->cancellations_mtx);
+ list_add(&cancellation->list, &fsd->cancellations);
+ mutex_unlock(&fsd->cancellations_mtx);
+
+ /* if we're already removing wake it up to cancel */
+ if (d_unlinked(dentry))
+ complete(&fsd->active_users_drained);
+}
+EXPORT_SYMBOL_GPL(debugfs_enter_cancellation);
+
+/**
+ * debugfs_leave_cancellation - leave cancellation section
+ * @file: the file being accessed
+ * @cancellation: the cancellation previously registered with
+ * debugfs_enter_cancellation()
+ *
+ * See the documentation of debugfs_enter_cancellation().
+ */
+void debugfs_leave_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation)
+{
+ struct debugfs_fsdata *fsd;
+ struct dentry *dentry = F_DENTRY(file);
+
+ if (WARN_ON(!d_is_reg(dentry)))
+ return;
+
+ fsd = READ_ONCE(dentry->d_fsdata);
+ if (WARN_ON(!fsd))
+ return;
+
+ mutex_lock(&fsd->cancellations_mtx);
+ if (!list_empty(&cancellation->list))
+ list_del(&cancellation->list);
+ mutex_unlock(&fsd->cancellations_mtx);
+}
+EXPORT_SYMBOL_GPL(debugfs_leave_cancellation);
+
/*
* Only permit access to world-readable files when the kernel is locked down.
* We also need to exclude any file that has ways to write or alter it as root
@@ -147,11 +268,12 @@ static int debugfs_locked_down(struct inode *inode,
struct file *filp,
const struct file_operations *real_fops)
{
- if ((inode->i_mode & 07777) == 0444 &&
+ if ((inode->i_mode & 07777 & ~0444) == 0 &&
!(filp->f_mode & FMODE_WRITE) &&
- !real_fops->unlocked_ioctl &&
- !real_fops->compat_ioctl &&
- !real_fops->mmap)
+ (!real_fops ||
+ (!real_fops->unlocked_ioctl &&
+ !real_fops->compat_ioctl &&
+ !real_fops->mmap)))
return 0;
if (security_locked_down(LOCKDOWN_DEBUGFS))
@@ -163,15 +285,13 @@ static int debugfs_locked_down(struct inode *inode,
static int open_proxy_open(struct inode *inode, struct file *filp)
{
struct dentry *dentry = F_DENTRY(filp);
- const struct file_operations *real_fops = NULL;
+ const struct file_operations *real_fops = DEBUGFS_I(inode)->real_fops;
int r;
- r = debugfs_file_get(dentry);
+ r = __debugfs_file_get(dentry, DBGFS_GET_REGULAR);
if (r)
return r == -EIO ? -ENOENT : r;
- real_fops = debugfs_real_fops(filp);
-
r = debugfs_locked_down(inode, filp, real_fops);
if (r)
goto out;
@@ -208,61 +328,93 @@ const struct file_operations debugfs_open_proxy_file_operations = {
#define PROTO(args...) args
#define ARGS(args...) args
-#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args) \
+#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args, bit, ret) \
static ret_type full_proxy_ ## name(proto) \
{ \
- struct dentry *dentry = F_DENTRY(filp); \
- const struct file_operations *real_fops; \
+ struct dentry *dentry = F_DENTRY(filp); \
+ struct debugfs_fsdata *fsd = dentry->d_fsdata; \
ret_type r; \
\
+ if (!(fsd->methods & bit)) \
+ return ret; \
r = debugfs_file_get(dentry); \
if (unlikely(r)) \
return r; \
- real_fops = debugfs_real_fops(filp); \
- r = real_fops->name(args); \
+ r = fsd->real_fops->name(args); \
debugfs_file_put(dentry); \
return r; \
}
+#define SHORT_PROXY_FUNC(name, ret_type, filp, proto, args, bit, ret) \
+static ret_type short_proxy_ ## name(proto) \
+{ \
+ struct dentry *dentry = F_DENTRY(filp); \
+ struct debugfs_fsdata *fsd = dentry->d_fsdata; \
+ ret_type r; \
+ \
+ if (!(fsd->methods & bit)) \
+ return ret; \
+ r = debugfs_file_get(dentry); \
+ if (unlikely(r)) \
+ return r; \
+ r = fsd->short_fops->name(args); \
+ debugfs_file_put(dentry); \
+ return r; \
+}
+
+SHORT_PROXY_FUNC(llseek, loff_t, filp,
+ PROTO(struct file *filp, loff_t offset, int whence),
+ ARGS(filp, offset, whence), HAS_LSEEK, -ESPIPE);
+
FULL_PROXY_FUNC(llseek, loff_t, filp,
PROTO(struct file *filp, loff_t offset, int whence),
- ARGS(filp, offset, whence));
+ ARGS(filp, offset, whence), HAS_LSEEK, -ESPIPE);
+
+SHORT_PROXY_FUNC(read, ssize_t, filp,
+ PROTO(struct file *filp, char __user *buf, size_t size,
+ loff_t *ppos),
+ ARGS(filp, buf, size, ppos), HAS_READ, -EINVAL);
FULL_PROXY_FUNC(read, ssize_t, filp,
PROTO(struct file *filp, char __user *buf, size_t size,
loff_t *ppos),
- ARGS(filp, buf, size, ppos));
+ ARGS(filp, buf, size, ppos), HAS_READ, -EINVAL);
+
+SHORT_PROXY_FUNC(write, ssize_t, filp,
+ PROTO(struct file *filp, const char __user *buf,
+ size_t size, loff_t *ppos),
+ ARGS(filp, buf, size, ppos), HAS_WRITE, -EINVAL);
FULL_PROXY_FUNC(write, ssize_t, filp,
- PROTO(struct file *filp, const char __user *buf, size_t size,
- loff_t *ppos),
- ARGS(filp, buf, size, ppos));
+ PROTO(struct file *filp, const char __user *buf,
+ size_t size, loff_t *ppos),
+ ARGS(filp, buf, size, ppos), HAS_WRITE, -EINVAL);
FULL_PROXY_FUNC(unlocked_ioctl, long, filp,
PROTO(struct file *filp, unsigned int cmd, unsigned long arg),
- ARGS(filp, cmd, arg));
+ ARGS(filp, cmd, arg), HAS_IOCTL, -ENOTTY);
static __poll_t full_proxy_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct dentry *dentry = F_DENTRY(filp);
+ struct debugfs_fsdata *fsd = dentry->d_fsdata;
__poll_t r = 0;
- const struct file_operations *real_fops;
+ if (!(fsd->methods & HAS_POLL))
+ return DEFAULT_POLLMASK;
if (debugfs_file_get(dentry))
return EPOLLHUP;
- real_fops = debugfs_real_fops(filp);
- r = real_fops->poll(filp, wait);
+ r = fsd->real_fops->poll(filp, wait);
debugfs_file_put(dentry);
return r;
}
-static int full_proxy_release(struct inode *inode, struct file *filp)
+static int full_proxy_release(struct inode *inode, struct file *file)
{
- const struct dentry *dentry = F_DENTRY(filp);
- const struct file_operations *real_fops = debugfs_real_fops(filp);
- const struct file_operations *proxy_fops = filp->f_op;
+ struct debugfs_fsdata *fsd = F_DENTRY(file)->d_fsdata;
+ const struct file_operations *real_fops = fsd->real_fops;
int r = 0;
/*
@@ -272,43 +424,25 @@ static int full_proxy_release(struct inode *inode, struct file *filp)
* ->i_private is still being meaningful here.
*/
if (real_fops->release)
- r = real_fops->release(inode, filp);
+ r = real_fops->release(inode, file);
- replace_fops(filp, d_inode(dentry)->i_fop);
- kfree(proxy_fops);
fops_put(real_fops);
return r;
}
-static void __full_proxy_fops_init(struct file_operations *proxy_fops,
- const struct file_operations *real_fops)
-{
- proxy_fops->release = full_proxy_release;
- if (real_fops->llseek)
- proxy_fops->llseek = full_proxy_llseek;
- if (real_fops->read)
- proxy_fops->read = full_proxy_read;
- if (real_fops->write)
- proxy_fops->write = full_proxy_write;
- if (real_fops->poll)
- proxy_fops->poll = full_proxy_poll;
- if (real_fops->unlocked_ioctl)
- proxy_fops->unlocked_ioctl = full_proxy_unlocked_ioctl;
-}
-
-static int full_proxy_open(struct inode *inode, struct file *filp)
+static int full_proxy_open_regular(struct inode *inode, struct file *filp)
{
struct dentry *dentry = F_DENTRY(filp);
- const struct file_operations *real_fops = NULL;
- struct file_operations *proxy_fops = NULL;
+ const struct file_operations *real_fops;
+ struct debugfs_fsdata *fsd;
int r;
- r = debugfs_file_get(dentry);
+ r = __debugfs_file_get(dentry, DBGFS_GET_REGULAR);
if (r)
return r == -EIO ? -ENOENT : r;
- real_fops = debugfs_real_fops(filp);
-
+ fsd = dentry->d_fsdata;
+ real_fops = fsd->real_fops;
r = debugfs_locked_down(inode, filp, real_fops);
if (r)
goto out;
@@ -329,38 +463,52 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
goto out;
}
- proxy_fops = kzalloc(sizeof(*proxy_fops), GFP_KERNEL);
- if (!proxy_fops) {
- r = -ENOMEM;
- goto free_proxy;
- }
- __full_proxy_fops_init(proxy_fops, real_fops);
- replace_fops(filp, proxy_fops);
-
if (real_fops->open) {
r = real_fops->open(inode, filp);
if (r) {
- replace_fops(filp, d_inode(dentry)->i_fop);
- goto free_proxy;
- } else if (filp->f_op != proxy_fops) {
+ fops_put(real_fops);
+ } else if (filp->f_op != &debugfs_full_proxy_file_operations) {
/* No protection against file removal anymore. */
WARN(1, "debugfs file owner replaced proxy fops: %pd",
dentry);
- goto free_proxy;
+ fops_put(real_fops);
}
}
-
- goto out;
-free_proxy:
- kfree(proxy_fops);
- fops_put(real_fops);
out:
debugfs_file_put(dentry);
return r;
}
const struct file_operations debugfs_full_proxy_file_operations = {
- .open = full_proxy_open,
+ .open = full_proxy_open_regular,
+ .release = full_proxy_release,
+ .llseek = full_proxy_llseek,
+ .read = full_proxy_read,
+ .write = full_proxy_write,
+ .poll = full_proxy_poll,
+ .unlocked_ioctl = full_proxy_unlocked_ioctl
+};
+
+static int full_proxy_open_short(struct inode *inode, struct file *filp)
+{
+ struct dentry *dentry = F_DENTRY(filp);
+ int r;
+
+ r = __debugfs_file_get(dentry, DBGFS_GET_SHORT);
+ if (r)
+ return r == -EIO ? -ENOENT : r;
+ r = debugfs_locked_down(inode, filp, NULL);
+ if (!r)
+ r = simple_open(inode, filp);
+ debugfs_file_put(dentry);
+ return r;
+}
+
+const struct file_operations debugfs_full_short_proxy_file_operations = {
+ .open = full_proxy_open_short,
+ .llseek = short_proxy_llseek,
+ .read = short_proxy_read,
+ .write = short_proxy_write,
};
ssize_t debugfs_attr_read(struct file *file, char __user *buf,
@@ -378,8 +526,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
}
EXPORT_SYMBOL_GPL(debugfs_attr_read);
-ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
+static ssize_t debugfs_attr_write_xsigned(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos, bool is_signed)
{
struct dentry *dentry = F_DENTRY(file);
ssize_t ret;
@@ -387,12 +535,28 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
ret = debugfs_file_get(dentry);
if (unlikely(ret))
return ret;
- ret = simple_attr_write(file, buf, len, ppos);
+ if (is_signed)
+ ret = simple_attr_write_signed(file, buf, len, ppos);
+ else
+ ret = simple_attr_write(file, buf, len, ppos);
debugfs_file_put(dentry);
return ret;
}
+
+ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return debugfs_attr_write_xsigned(file, buf, len, ppos, false);
+}
EXPORT_SYMBOL_GPL(debugfs_attr_write);
+ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return debugfs_attr_write_xsigned(file, buf, len, ppos, true);
+}
+EXPORT_SYMBOL_GPL(debugfs_attr_write_signed);
+
static struct dentry *debugfs_create_mode_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *value,
const struct file_operations *fops,
@@ -738,11 +902,11 @@ static int debugfs_atomic_t_get(void *data, u64 *val)
*val = atomic_read((atomic_t *)data);
return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get,
+DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t, debugfs_atomic_t_get,
debugfs_atomic_t_set, "%lld\n");
-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_ro, debugfs_atomic_t_get, NULL,
+DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_ro, debugfs_atomic_t_get, NULL,
"%lld\n");
-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
+DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
"%lld\n");
/**
@@ -883,12 +1047,57 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
return ret;
}
+EXPORT_SYMBOL_GPL(debugfs_create_str);
static ssize_t debugfs_write_file_str(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
- /* This is really only for read-only strings */
- return -EINVAL;
+ struct dentry *dentry = F_DENTRY(file);
+ char *old, *new = NULL;
+ int pos = *ppos;
+ int r;
+
+ r = debugfs_file_get(dentry);
+ if (unlikely(r))
+ return r;
+
+ old = *(char **)file->private_data;
+
+ /* only allow strict concatenation */
+ r = -EINVAL;
+ if (pos && pos != strlen(old))
+ goto error;
+
+ r = -E2BIG;
+ if (pos + count + 1 > PAGE_SIZE)
+ goto error;
+
+ r = -ENOMEM;
+ new = kmalloc(pos + count + 1, GFP_KERNEL);
+ if (!new)
+ goto error;
+
+ if (pos)
+ memcpy(new, old, pos);
+
+ r = -EFAULT;
+ if (copy_from_user(new + pos, user_buf, count))
+ goto error;
+
+ new[pos + count] = '\0';
+ strim(new);
+
+ rcu_assign_pointer(*(char __rcu **)file->private_data, new);
+ synchronize_rcu();
+ kfree(old);
+
+ debugfs_file_put(dentry);
+ return count;
+
+error:
+ kfree(new);
+ debugfs_file_put(dentry);
+ return r;
}
static const struct file_operations fops_str = {
@@ -923,15 +1132,6 @@ static const struct file_operations fops_str_wo = {
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
- * be returned.
*/
void debugfs_create_str(const char *name, umode_t mode,
struct dentry *parent, char **value)
@@ -956,17 +1156,35 @@ static ssize_t read_file_blob(struct file *file, char __user *user_buf,
return r;
}
+static ssize_t write_file_blob(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debugfs_blob_wrapper *blob = file->private_data;
+ struct dentry *dentry = F_DENTRY(file);
+ ssize_t r;
+
+ r = debugfs_file_get(dentry);
+ if (unlikely(r))
+ return r;
+ r = simple_write_to_buffer(blob->data, blob->size, ppos, user_buf,
+ count);
+
+ debugfs_file_put(dentry);
+ return r;
+}
+
static const struct file_operations fops_blob = {
.read = read_file_blob,
+ .write = write_file_blob,
.open = simple_open,
.llseek = default_llseek,
};
/**
- * debugfs_create_blob - create a debugfs file that is used to read a binary blob
+ * debugfs_create_blob - create a debugfs file that is used to read and write
+ * a binary blob
* @name: a pointer to a string containing the name of the file to create.
- * @mode: the read permission that the file should have (other permissions are
- * masked out)
+ * @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
@@ -975,7 +1193,7 @@ static const struct file_operations fops_blob = {
*
* This function creates a file in debugfs with the given name that exports
* @blob->data as a binary blob. If the @mode variable is so set it can be
- * read from. Writing is not supported.
+ * read from and written to.
*
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
@@ -990,7 +1208,7 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob)
{
- return debugfs_create_file_unsafe(name, mode & 0444, parent, blob, &fops_blob);
+ return debugfs_create_file_unsafe(name, mode & 0644, parent, blob, &fops_blob);
}
EXPORT_SYMBOL_GPL(debugfs_create_blob);
@@ -1056,7 +1274,6 @@ static const struct file_operations u32_array_fops = {
.open = u32_array_open,
.release = u32_array_release,
.read = u32_array_read,
- .llseek = no_llseek,
};
/**
@@ -1121,7 +1338,7 @@ void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
}
EXPORT_SYMBOL_GPL(debugfs_print_regs32);
-static int debugfs_show_regset32(struct seq_file *s, void *data)
+static int debugfs_regset32_show(struct seq_file *s, void *data)
{
struct debugfs_regset32 *regset = s->private;
@@ -1136,17 +1353,7 @@ static int debugfs_show_regset32(struct seq_file *s, void *data)
return 0;
}
-static int debugfs_open_regset32(struct inode *inode, struct file *file)
-{
- return single_open(file, debugfs_show_regset32, inode->i_private);
-}
-
-static const struct file_operations fops_regset32 = {
- .open = debugfs_open_regset32,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(debugfs_regset32);
/**
* debugfs_create_regset32 - create a debugfs file that returns register values
@@ -1167,7 +1374,7 @@ void debugfs_create_regset32(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_regset32 *regset)
{
- debugfs_create_file(name, mode, parent, regset, &fops_regset32);
+ debugfs_create_file(name, mode, parent, regset, &debugfs_regset32_fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_regset32);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 2f117c57160d..4b263c328ed2 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -14,7 +14,8 @@
#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/mount.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/kobject.h>
@@ -23,7 +24,6 @@
#include <linux/fsnotify.h>
#include <linux/string.h>
#include <linux/seq_file.h>
-#include <linux/parser.h>
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/security.h>
@@ -35,14 +35,14 @@
static struct vfsmount *debugfs_mount;
static int debugfs_mount_count;
static bool debugfs_registered;
-static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
+static bool debugfs_enabled __ro_after_init = IS_ENABLED(CONFIG_DEBUG_FS_ALLOW_ALL);
/*
* Don't allow access attributes to be changed whilst the kernel is locked down
* so that we can use the file mode as part of a heuristic to determine whether
* to lock down individual files.
*/
-static int debugfs_setattr(struct user_namespace *mnt_userns,
+static int debugfs_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *ia)
{
int ret;
@@ -52,7 +52,7 @@ static int debugfs_setattr(struct user_namespace *mnt_userns,
if (ret)
return ret;
}
- return simple_setattr(&init_user_ns, dentry, ia);
+ return simple_setattr(&nop_mnt_idmap, dentry, ia);
}
static const struct inode_operations debugfs_file_inode_operations = {
@@ -72,216 +72,261 @@ static struct inode *debugfs_get_inode(struct super_block *sb)
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_atime = inode->i_mtime =
- inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
}
return inode;
}
-struct debugfs_mount_opts {
+struct debugfs_fs_info {
kuid_t uid;
kgid_t gid;
umode_t mode;
+ /* Opt_* bitfield. */
+ unsigned int opts;
};
enum {
Opt_uid,
Opt_gid,
Opt_mode,
- Opt_err
+ Opt_source,
};
-static const match_table_t tokens = {
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_mode, "mode=%o"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec debugfs_param_specs[] = {
+ fsparam_gid ("gid", Opt_gid),
+ fsparam_u32oct ("mode", Opt_mode),
+ fsparam_uid ("uid", Opt_uid),
+ fsparam_string ("source", Opt_source),
+ {}
};
-struct debugfs_fs_info {
- struct debugfs_mount_opts mount_opts;
-};
-
-static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts)
+static int debugfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- substring_t args[MAX_OPT_ARGS];
- int option;
- int token;
- kuid_t uid;
- kgid_t gid;
- char *p;
-
- opts->mode = DEBUGFS_DEFAULT_MODE;
-
- while ((p = strsep(&data, ",")) != NULL) {
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_uid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid))
- return -EINVAL;
- opts->uid = uid;
- break;
- case Opt_gid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid))
- return -EINVAL;
- opts->gid = gid;
- break;
- case Opt_mode:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->mode = option & S_IALLUGO;
- break;
+ struct debugfs_fs_info *opts = fc->s_fs_info;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, debugfs_param_specs, param, &result);
+ if (opt < 0) {
/*
- * We might like to report bad mount options here;
- * but traditionally debugfs has ignored all mount options
- */
- }
+ * We might like to report bad mount options here; but
+ * traditionally debugfs has ignored all mount options
+ */
+ if (opt == -ENOPARAM)
+ return 0;
+
+ return opt;
}
+ switch (opt) {
+ case Opt_uid:
+ opts->uid = result.uid;
+ break;
+ case Opt_gid:
+ opts->gid = result.gid;
+ break;
+ case Opt_mode:
+ opts->mode = result.uint_32 & S_IALLUGO;
+ break;
+ case Opt_source:
+ if (fc->source)
+ return invalfc(fc, "Multiple sources specified");
+ fc->source = param->string;
+ param->string = NULL;
+ break;
+ /*
+ * We might like to report bad mount options here;
+ * but traditionally debugfs has ignored all mount options
+ */
+ }
+
+ opts->opts |= BIT(opt);
+
return 0;
}
-static int debugfs_apply_options(struct super_block *sb)
+static void _debugfs_apply_options(struct super_block *sb, bool remount)
{
struct debugfs_fs_info *fsi = sb->s_fs_info;
struct inode *inode = d_inode(sb->s_root);
- struct debugfs_mount_opts *opts = &fsi->mount_opts;
- inode->i_mode &= ~S_IALLUGO;
- inode->i_mode |= opts->mode;
+ /*
+ * On remount, only reset mode/uid/gid if they were provided as mount
+ * options.
+ */
- inode->i_uid = opts->uid;
- inode->i_gid = opts->gid;
+ if (!remount || fsi->opts & BIT(Opt_mode)) {
+ inode->i_mode &= ~S_IALLUGO;
+ inode->i_mode |= fsi->mode;
+ }
- return 0;
+ if (!remount || fsi->opts & BIT(Opt_uid))
+ inode->i_uid = fsi->uid;
+
+ if (!remount || fsi->opts & BIT(Opt_gid))
+ inode->i_gid = fsi->gid;
}
-static int debugfs_remount(struct super_block *sb, int *flags, char *data)
+static void debugfs_apply_options(struct super_block *sb)
{
- int err;
- struct debugfs_fs_info *fsi = sb->s_fs_info;
+ _debugfs_apply_options(sb, false);
+}
+
+static void debugfs_apply_options_remount(struct super_block *sb)
+{
+ _debugfs_apply_options(sb, true);
+}
+
+static int debugfs_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb = fc->root->d_sb;
+ struct debugfs_fs_info *sb_opts = sb->s_fs_info;
+ struct debugfs_fs_info *new_opts = fc->s_fs_info;
+
+ if (!new_opts)
+ return 0;
sync_filesystem(sb);
- err = debugfs_parse_options(data, &fsi->mount_opts);
- if (err)
- goto fail;
- debugfs_apply_options(sb);
+ /* structure copy of new mount options to sb */
+ *sb_opts = *new_opts;
+ debugfs_apply_options_remount(sb);
-fail:
- return err;
+ return 0;
}
static int debugfs_show_options(struct seq_file *m, struct dentry *root)
{
struct debugfs_fs_info *fsi = root->d_sb->s_fs_info;
- struct debugfs_mount_opts *opts = &fsi->mount_opts;
- if (!uid_eq(opts->uid, GLOBAL_ROOT_UID))
+ if (!uid_eq(fsi->uid, GLOBAL_ROOT_UID))
seq_printf(m, ",uid=%u",
- from_kuid_munged(&init_user_ns, opts->uid));
- if (!gid_eq(opts->gid, GLOBAL_ROOT_GID))
+ from_kuid_munged(&init_user_ns, fsi->uid));
+ if (!gid_eq(fsi->gid, GLOBAL_ROOT_GID))
seq_printf(m, ",gid=%u",
- from_kgid_munged(&init_user_ns, opts->gid));
- if (opts->mode != DEBUGFS_DEFAULT_MODE)
- seq_printf(m, ",mode=%o", opts->mode);
+ from_kgid_munged(&init_user_ns, fsi->gid));
+ if (fsi->mode != DEBUGFS_DEFAULT_MODE)
+ seq_printf(m, ",mode=%o", fsi->mode);
return 0;
}
+static struct kmem_cache *debugfs_inode_cachep __ro_after_init;
+
+static void init_once(void *foo)
+{
+ struct debugfs_inode_info *info = foo;
+ inode_init_once(&info->vfs_inode);
+}
+
+static struct inode *debugfs_alloc_inode(struct super_block *sb)
+{
+ struct debugfs_inode_info *info;
+ info = alloc_inode_sb(sb, debugfs_inode_cachep, GFP_KERNEL);
+ if (!info)
+ return NULL;
+ return &info->vfs_inode;
+}
+
static void debugfs_free_inode(struct inode *inode)
{
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
- free_inode_nonrcu(inode);
+ kmem_cache_free(debugfs_inode_cachep, DEBUGFS_I(inode));
}
static const struct super_operations debugfs_super_operations = {
.statfs = simple_statfs,
- .remount_fs = debugfs_remount,
.show_options = debugfs_show_options,
+ .alloc_inode = debugfs_alloc_inode,
.free_inode = debugfs_free_inode,
};
static void debugfs_release_dentry(struct dentry *dentry)
{
- void *fsd = dentry->d_fsdata;
+ struct debugfs_fsdata *fsd = dentry->d_fsdata;
- if (!((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
- kfree(dentry->d_fsdata);
+ if (fsd) {
+ WARN_ON(!list_empty(&fsd->cancellations));
+ mutex_destroy(&fsd->cancellations_mtx);
+ }
+ kfree(fsd);
}
static struct vfsmount *debugfs_automount(struct path *path)
{
- debugfs_automount_t f;
- f = (debugfs_automount_t)path->dentry->d_fsdata;
- return f(path->dentry, d_inode(path->dentry)->i_private);
+ struct inode *inode = path->dentry->d_inode;
+
+ return DEBUGFS_I(inode)->automount(path->dentry, inode->i_private);
}
static const struct dentry_operations debugfs_dops = {
- .d_delete = always_delete_dentry,
.d_release = debugfs_release_dentry,
.d_automount = debugfs_automount,
};
-static int debug_fill_super(struct super_block *sb, void *data, int silent)
+static int debugfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
static const struct tree_descr debug_files[] = {{""}};
- struct debugfs_fs_info *fsi;
int err;
- fsi = kzalloc(sizeof(struct debugfs_fs_info), GFP_KERNEL);
- sb->s_fs_info = fsi;
- if (!fsi) {
- err = -ENOMEM;
- goto fail;
- }
-
- err = debugfs_parse_options(data, &fsi->mount_opts);
- if (err)
- goto fail;
-
- err = simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
+ err = simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
if (err)
- goto fail;
+ return err;
sb->s_op = &debugfs_super_operations;
- sb->s_d_op = &debugfs_dops;
+ set_default_d_op(sb, &debugfs_dops);
+ sb->s_d_flags |= DCACHE_DONTCACHE;
debugfs_apply_options(sb);
return 0;
+}
-fail:
- kfree(fsi);
- sb->s_fs_info = NULL;
- return err;
+static int debugfs_get_tree(struct fs_context *fc)
+{
+ int err;
+
+ err = get_tree_single(fc, debugfs_fill_super);
+ if (err)
+ return err;
+
+ return debugfs_reconfigure(fc);
}
-static struct dentry *debug_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static void debugfs_free_fc(struct fs_context *fc)
{
- if (!(debugfs_allow & DEBUGFS_ALLOW_API))
- return ERR_PTR(-EPERM);
+ kfree(fc->s_fs_info);
+}
- return mount_single(fs_type, flags, data, debug_fill_super);
+static const struct fs_context_operations debugfs_context_ops = {
+ .free = debugfs_free_fc,
+ .parse_param = debugfs_parse_param,
+ .get_tree = debugfs_get_tree,
+ .reconfigure = debugfs_reconfigure,
+};
+
+static int debugfs_init_fs_context(struct fs_context *fc)
+{
+ struct debugfs_fs_info *fsi;
+
+ fsi = kzalloc(sizeof(struct debugfs_fs_info), GFP_KERNEL);
+ if (!fsi)
+ return -ENOMEM;
+
+ fsi->mode = DEBUGFS_DEFAULT_MODE;
+
+ fc->s_fs_info = fsi;
+ fc->ops = &debugfs_context_ops;
+ return 0;
}
static struct file_system_type debug_fs_type = {
.owner = THIS_MODULE,
.name = "debugfs",
- .mount = debug_mount,
- .kill_sb = kill_litter_super,
+ .init_fs_context = debugfs_init_fs_context,
+ .parameters = debugfs_param_specs,
+ .kill_sb = kill_anon_super,
};
MODULE_ALIAS_FS("debugfs");
@@ -307,19 +352,20 @@ struct dentry *debugfs_lookup(const char *name, struct dentry *parent)
if (!parent)
parent = debugfs_mount->mnt_root;
- dentry = lookup_positive_unlocked(name, parent, strlen(name));
+ dentry = lookup_noperm_positive_unlocked(&QSTR(name), parent);
if (IS_ERR(dentry))
return NULL;
return dentry;
}
EXPORT_SYMBOL_GPL(debugfs_lookup);
-static struct dentry *start_creating(const char *name, struct dentry *parent)
+static struct dentry *debugfs_start_creating(const char *name,
+ struct dentry *parent)
{
struct dentry *dentry;
int error;
- if (!(debugfs_allow & DEBUGFS_ALLOW_API))
+ if (!debugfs_enabled)
return ERR_PTR(-EPERM);
if (!debugfs_initialized())
@@ -345,48 +391,33 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
if (!parent)
parent = debugfs_mount->mnt_root;
- inode_lock(d_inode(parent));
- if (unlikely(IS_DEADDIR(d_inode(parent))))
- dentry = ERR_PTR(-ENOENT);
- else
- dentry = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(dentry) && d_really_is_positive(dentry)) {
- if (d_is_dir(dentry))
- pr_err("Directory '%s' with parent '%s' already present!\n",
- name, parent->d_name.name);
- else
- pr_err("File '%s' in directory '%s' already present!\n",
- name, parent->d_name.name);
- dput(dentry);
- dentry = ERR_PTR(-EEXIST);
- }
-
+ dentry = simple_start_creating(parent, name);
if (IS_ERR(dentry)) {
- inode_unlock(d_inode(parent));
+ if (dentry == ERR_PTR(-EEXIST))
+ pr_err("'%s' already exists in '%pd'\n", name, parent);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
-
return dentry;
}
-static struct dentry *failed_creating(struct dentry *dentry)
+static struct dentry *debugfs_failed_creating(struct dentry *dentry)
{
- inode_unlock(d_inode(dentry->d_parent));
- dput(dentry);
+ simple_done_creating(dentry);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
return ERR_PTR(-ENOMEM);
}
-static struct dentry *end_creating(struct dentry *dentry)
+static struct dentry *debugfs_end_creating(struct dentry *dentry)
{
- inode_unlock(d_inode(dentry->d_parent));
- return dentry;
+ simple_done_creating(dentry);
+ return dentry; // borrowed
}
static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
+ const void *aux,
const struct file_operations *proxy_fops,
- const struct file_operations *real_fops)
+ const void *real_fops)
{
struct dentry *dentry;
struct inode *inode;
@@ -394,74 +425,54 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
if (!(mode & S_IFMT))
mode |= S_IFREG;
BUG_ON(!S_ISREG(mode));
- dentry = start_creating(name, parent);
+ dentry = debugfs_start_creating(name, parent);
if (IS_ERR(dentry))
return dentry;
- if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
- failed_creating(dentry);
- return ERR_PTR(-EPERM);
- }
-
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create file '%s'\n",
name);
- return failed_creating(dentry);
+ return debugfs_failed_creating(dentry);
}
inode->i_mode = mode;
inode->i_private = data;
inode->i_op = &debugfs_file_inode_operations;
+ if (!real_fops)
+ proxy_fops = &debugfs_noop_file_operations;
inode->i_fop = proxy_fops;
- dentry->d_fsdata = (void *)((unsigned long)real_fops |
- DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
+ DEBUGFS_I(inode)->raw = real_fops;
+ DEBUGFS_I(inode)->aux = (void *)aux;
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
fsnotify_create(d_inode(dentry->d_parent), dentry);
- return end_creating(dentry);
+ return debugfs_end_creating(dentry);
}
-/**
- * debugfs_create_file - create a file in the debugfs filesystem
- * @name: a pointer to a string containing the name of the file to create.
- * @mode: the permission that the file should have.
- * @parent: a pointer to the parent dentry for this file. This should be a
- * directory dentry if set. If this parameter is NULL, then the
- * file will be created in the root of the debugfs filesystem.
- * @data: a pointer to something that the caller will want to get to later
- * on. The inode.i_private pointer will point to this value on
- * the open() call.
- * @fops: a pointer to a struct file_operations that should be used for
- * this file.
- *
- * This is the basic "create a file" function for debugfs. It allows for a
- * wide range of flexibility in creating a file, or a directory (if you want
- * to create a directory, the debugfs_create_dir() function is
- * recommended to be used instead.)
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value -%ENODEV will be
- * returned.
- */
-struct dentry *debugfs_create_file(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops)
+struct dentry *debugfs_create_file_full(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const void *aux,
+ const struct file_operations *fops)
{
+ return __debugfs_create_file(name, mode, parent, data, aux,
+ &debugfs_full_proxy_file_operations,
+ fops);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_file_full);
- return __debugfs_create_file(name, mode, parent, data,
- fops ? &debugfs_full_proxy_file_operations :
- &debugfs_noop_file_operations,
+struct dentry *debugfs_create_file_short(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const void *aux,
+ const struct debugfs_short_fops *fops)
+{
+ return __debugfs_create_file(name, mode, parent, data, aux,
+ &debugfs_full_short_proxy_file_operations,
fops);
}
-EXPORT_SYMBOL_GPL(debugfs_create_file);
+EXPORT_SYMBOL_GPL(debugfs_create_file_short);
/**
* debugfs_create_file_unsafe - create a file in the debugfs filesystem
@@ -495,9 +506,8 @@ struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
const struct file_operations *fops)
{
- return __debugfs_create_file(name, mode, parent, data,
- fops ? &debugfs_open_proxy_file_operations :
- &debugfs_noop_file_operations,
+ return __debugfs_create_file(name, mode, parent, data, NULL,
+ &debugfs_open_proxy_file_operations,
fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe);
@@ -551,25 +561,25 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
+ *
+ * NOTE: it's expected that most callers should _ignore_ the errors returned
+ * by this function. Other debugfs functions handle the fact that the "dentry"
+ * passed to them could be an error and they don't crash in that case.
+ * Drivers should generally work fine even if debugfs fails to init anyway.
*/
struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
{
- struct dentry *dentry = start_creating(name, parent);
+ struct dentry *dentry = debugfs_start_creating(name, parent);
struct inode *inode;
if (IS_ERR(dentry))
return dentry;
- if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
- failed_creating(dentry);
- return ERR_PTR(-EPERM);
- }
-
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create directory '%s'\n",
name);
- return failed_creating(dentry);
+ return debugfs_failed_creating(dentry);
}
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
@@ -578,10 +588,10 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
inc_nlink(d_inode(dentry->d_parent));
fsnotify_mkdir(d_inode(dentry->d_parent), dentry);
- return end_creating(dentry);
+ return debugfs_end_creating(dentry);
}
EXPORT_SYMBOL_GPL(debugfs_create_dir);
@@ -601,34 +611,29 @@ struct dentry *debugfs_create_automount(const char *name,
debugfs_automount_t f,
void *data)
{
- struct dentry *dentry = start_creating(name, parent);
+ struct dentry *dentry = debugfs_start_creating(name, parent);
struct inode *inode;
if (IS_ERR(dentry))
return dentry;
- if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
- failed_creating(dentry);
- return ERR_PTR(-EPERM);
- }
-
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create automount '%s'\n",
name);
- return failed_creating(dentry);
+ return debugfs_failed_creating(dentry);
}
make_empty_dir_inode(inode);
inode->i_flags |= S_AUTOMOUNT;
inode->i_private = data;
- dentry->d_fsdata = (void *)f;
+ DEBUGFS_I(inode)->automount = f;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
inc_nlink(d_inode(dentry->d_parent));
fsnotify_mkdir(d_inode(dentry->d_parent), dentry);
- return end_creating(dentry);
+ return debugfs_end_creating(dentry);
}
EXPORT_SYMBOL(debugfs_create_automount);
@@ -664,7 +669,7 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
if (!link)
return ERR_PTR(-ENOMEM);
- dentry = start_creating(name, parent);
+ dentry = debugfs_start_creating(name, parent);
if (IS_ERR(dentry)) {
kfree(link);
return dentry;
@@ -675,13 +680,13 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
pr_err("out of free dentries, can not create symlink '%s'\n",
name);
kfree(link);
- return failed_creating(dentry);
+ return debugfs_failed_creating(dentry);
}
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_op = &debugfs_symlink_inode_operations;
inode->i_link = link;
- d_instantiate(dentry, inode);
- return end_creating(dentry);
+ d_make_persistent(dentry, inode);
+ return debugfs_end_creating(dentry);
}
EXPORT_SYMBOL_GPL(debugfs_create_symlink);
@@ -697,10 +702,54 @@ static void __debugfs_file_removed(struct dentry *dentry)
*/
smp_mb();
fsd = READ_ONCE(dentry->d_fsdata);
- if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
+ if (!fsd)
+ return;
+
+ /* if this was the last reference, we're done */
+ if (refcount_dec_and_test(&fsd->active_users))
return;
- if (!refcount_dec_and_test(&fsd->active_users))
+
+ /*
+ * If there's still a reference, the code that obtained it can
+ * be in different states:
+ * - The common case of not using cancellations, or already
+ * after debugfs_leave_cancellation(), where we just need
+ * to wait for debugfs_file_put() which signals the completion;
+ * - inside a cancellation section, i.e. between
+ * debugfs_enter_cancellation() and debugfs_leave_cancellation(),
+ * in which case we need to trigger the ->cancel() function,
+ * and then wait for debugfs_file_put() just like in the
+ * previous case;
+ * - before debugfs_enter_cancellation() (but obviously after
+ * debugfs_file_get()), in which case we may not see the
+ * cancellation in the list on the first round of the loop,
+ * but debugfs_enter_cancellation() signals the completion
+ * after adding it, so this code gets woken up to call the
+ * ->cancel() function.
+ */
+ while (refcount_read(&fsd->active_users)) {
+ struct debugfs_cancellation *c;
+
+ /*
+ * Lock the cancellations. Note that the cancellations
+ * structs are meant to be on the stack, so we need to
+ * ensure we either use them here or don't touch them,
+ * and debugfs_leave_cancellation() will wait for this
+ * to be finished processing before exiting one. It may
+ * of course win and remove the cancellation, but then
+ * chances are we never even got into this bit, we only
+ * do if the refcount isn't zero already.
+ */
+ mutex_lock(&fsd->cancellations_mtx);
+ while ((c = list_first_entry_or_null(&fsd->cancellations,
+ typeof(*c), list))) {
+ list_del_init(&c->list);
+ c->cancel(dentry, c->cancel_data);
+ }
+ mutex_unlock(&fsd->cancellations_mtx);
+
wait_for_completion(&fsd->active_users_drained);
+ }
}
static void remove_one(struct dentry *victim)
@@ -735,76 +784,88 @@ void debugfs_remove(struct dentry *dentry)
EXPORT_SYMBOL_GPL(debugfs_remove);
/**
- * debugfs_rename - rename a file/directory in the debugfs filesystem
- * @old_dir: a pointer to the parent dentry for the renamed object. This
- * should be a directory dentry.
- * @old_dentry: dentry of an object to be renamed.
- * @new_dir: a pointer to the parent dentry where the object should be
- * moved. This should be a directory dentry.
- * @new_name: a pointer to a string containing the target name.
+ * debugfs_lookup_and_remove - lookup a directory or file and recursively remove it
+ * @name: a pointer to a string containing the name of the item to look up.
+ * @parent: a pointer to the parent dentry of the item.
+ *
+ * This is the equlivant of doing something like
+ * debugfs_remove(debugfs_lookup(..)) but with the proper reference counting
+ * handled for the directory being looked up.
+ */
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent)
+{
+ struct dentry *dentry;
+
+ dentry = debugfs_lookup(name, parent);
+ if (!dentry)
+ return;
+
+ debugfs_remove(dentry);
+ dput(dentry);
+}
+EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove);
+
+/**
+ * debugfs_change_name - rename a file/directory in the debugfs filesystem
+ * @dentry: dentry of an object to be renamed.
+ * @fmt: format for new name
*
* This function renames a file/directory in debugfs. The target must not
* exist for rename to succeed.
*
- * This function will return a pointer to old_dentry (which is updated to
- * reflect renaming) if it succeeds. If an error occurs, %NULL will be
- * returned.
+ * This function will return 0 on success and -E... on failure.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
-struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
- struct dentry *new_dir, const char *new_name)
+int __printf(2, 3) debugfs_change_name(struct dentry *dentry, const char *fmt, ...)
{
- int error;
- struct dentry *dentry = NULL, *trap;
+ int error = 0;
+ const char *new_name;
struct name_snapshot old_name;
+ struct dentry *target;
+ struct renamedata rd = {};
+ struct inode *dir;
+ va_list ap;
- if (IS_ERR(old_dir))
- return old_dir;
- if (IS_ERR(new_dir))
- return new_dir;
- if (IS_ERR_OR_NULL(old_dentry))
- return old_dentry;
-
- trap = lock_rename(new_dir, old_dir);
- /* Source or destination directories don't exist? */
- if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
- goto exit;
- /* Source does not exist, cyclic rename, or mountpoint? */
- if (d_really_is_negative(old_dentry) || old_dentry == trap ||
- d_mountpoint(old_dentry))
- goto exit;
- dentry = lookup_one_len(new_name, new_dir, strlen(new_name));
- /* Lookup failed, cyclic rename or target exists? */
- if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
- goto exit;
-
- take_dentry_name_snapshot(&old_name, old_dentry);
-
- error = simple_rename(&init_user_ns, d_inode(old_dir), old_dentry,
- d_inode(new_dir), dentry, 0);
+ if (IS_ERR_OR_NULL(dentry))
+ return 0;
+
+ va_start(ap, fmt);
+ new_name = kvasprintf_const(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+ if (!new_name)
+ return -ENOMEM;
+
+ rd.old_parent = dget_parent(dentry);
+ rd.new_parent = rd.old_parent;
+ rd.flags = RENAME_NOREPLACE;
+ target = lookup_noperm_unlocked(&QSTR(new_name), rd.new_parent);
+ if (IS_ERR(target))
+ return PTR_ERR(target);
+
+ error = start_renaming_two_dentries(&rd, dentry, target);
if (error) {
- release_dentry_name_snapshot(&old_name);
- goto exit;
+ if (error == -EEXIST && target == dentry)
+ /* it isn't an error to rename a thing to itself */
+ error = 0;
+ goto out;
}
- d_move(old_dentry, dentry);
- fsnotify_move(d_inode(old_dir), d_inode(new_dir), &old_name.name,
- d_is_dir(old_dentry),
- NULL, old_dentry);
+
+ dir = d_inode(rd.old_parent);
+ take_dentry_name_snapshot(&old_name, dentry);
+ simple_rename_timestamp(dir, dentry, dir, rd.new_dentry);
+ d_move(dentry, rd.new_dentry);
+ fsnotify_move(dir, dir, &old_name.name, d_is_dir(dentry), NULL, dentry);
release_dentry_name_snapshot(&old_name);
- unlock_rename(new_dir, old_dir);
- dput(dentry);
- return old_dentry;
-exit:
- if (dentry && !IS_ERR(dentry))
- dput(dentry);
- unlock_rename(new_dir, old_dir);
- if (IS_ERR(dentry))
- return dentry;
- return ERR_PTR(-EINVAL);
+ end_renaming(&rd);
+out:
+ dput(rd.old_parent);
+ dput(target);
+ kfree_const(new_name);
+ return error;
}
-EXPORT_SYMBOL_GPL(debugfs_rename);
+EXPORT_SYMBOL_GPL(debugfs_change_name);
/**
* debugfs_initialized - Tells whether debugfs has been registered
@@ -819,33 +880,47 @@ static int __init debugfs_kernel(char *str)
{
if (str) {
if (!strcmp(str, "on"))
- debugfs_allow = DEBUGFS_ALLOW_API | DEBUGFS_ALLOW_MOUNT;
- else if (!strcmp(str, "no-mount"))
- debugfs_allow = DEBUGFS_ALLOW_API;
+ debugfs_enabled = true;
else if (!strcmp(str, "off"))
- debugfs_allow = 0;
+ debugfs_enabled = false;
+ else if (!strcmp(str, "no-mount")) {
+ pr_notice("debugfs=no-mount is a deprecated alias "
+ "for debugfs=off\n");
+ debugfs_enabled = false;
+ }
}
return 0;
}
early_param("debugfs", debugfs_kernel);
+
static int __init debugfs_init(void)
{
int retval;
- if (!(debugfs_allow & DEBUGFS_ALLOW_MOUNT))
+ if (!debugfs_enabled)
return -EPERM;
retval = sysfs_create_mount_point(kernel_kobj, "debug");
if (retval)
return retval;
- retval = register_filesystem(&debug_fs_type);
- if (retval)
+ debugfs_inode_cachep = kmem_cache_create("debugfs_inode_cache",
+ sizeof(struct debugfs_inode_info), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
+ init_once);
+ if (debugfs_inode_cachep == NULL) {
sysfs_remove_mount_point(kernel_kobj, "debug");
- else
- debugfs_registered = true;
+ return -ENOMEM;
+ }
- return retval;
+ retval = register_filesystem(&debug_fs_type);
+ if (retval) { // Really not going to happen
+ sysfs_remove_mount_point(kernel_kobj, "debug");
+ kmem_cache_destroy(debugfs_inode_cachep);
+ return retval;
+ }
+ debugfs_registered = true;
+ return 0;
}
core_initcall(debugfs_init);
diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
index 92af8ae31313..c95699b27a56 100644
--- a/fs/debugfs/internal.h
+++ b/fs/debugfs/internal.h
@@ -7,40 +7,52 @@
#ifndef _DEBUGFS_INTERNAL_H_
#define _DEBUGFS_INTERNAL_H_
+#include <linux/list.h>
struct file_operations;
+struct debugfs_inode_info {
+ struct inode vfs_inode;
+ union {
+ const void *raw;
+ const struct file_operations *real_fops;
+ const struct debugfs_short_fops *short_fops;
+ debugfs_automount_t automount;
+ };
+ void *aux;
+};
+
+static inline struct debugfs_inode_info *DEBUGFS_I(struct inode *inode)
+{
+ return container_of(inode, struct debugfs_inode_info, vfs_inode);
+}
+
/* declared over in file.c */
extern const struct file_operations debugfs_noop_file_operations;
extern const struct file_operations debugfs_open_proxy_file_operations;
extern const struct file_operations debugfs_full_proxy_file_operations;
+extern const struct file_operations debugfs_full_short_proxy_file_operations;
struct debugfs_fsdata {
const struct file_operations *real_fops;
- refcount_t active_users;
- struct completion active_users_drained;
+ const struct debugfs_short_fops *short_fops;
+ struct {
+ refcount_t active_users;
+ struct completion active_users_drained;
+
+ /* protect cancellations */
+ struct mutex cancellations_mtx;
+ struct list_head cancellations;
+ unsigned int methods;
+ };
};
-/*
- * A dentry's ->d_fsdata either points to the real fops or to a
- * dynamically allocated debugfs_fsdata instance.
- * In order to distinguish between these two cases, a real fops
- * pointer gets its lowest bit set.
- */
-#define DEBUGFS_FSDATA_IS_REAL_FOPS_BIT BIT(0)
-
-/* Access BITS */
-#define DEBUGFS_ALLOW_API BIT(0)
-#define DEBUGFS_ALLOW_MOUNT BIT(1)
-
-#ifdef CONFIG_DEBUG_FS_ALLOW_ALL
-#define DEFAULT_DEBUGFS_ALLOW_BITS (DEBUGFS_ALLOW_MOUNT | DEBUGFS_ALLOW_API)
-#endif
-#ifdef CONFIG_DEBUG_FS_DISALLOW_MOUNT
-#define DEFAULT_DEBUGFS_ALLOW_BITS (DEBUGFS_ALLOW_API)
-#endif
-#ifdef CONFIG_DEBUG_FS_ALLOW_NONE
-#define DEFAULT_DEBUGFS_ALLOW_BITS (0)
-#endif
+enum {
+ HAS_READ = 1,
+ HAS_WRITE = 2,
+ HAS_LSEEK = 4,
+ HAS_POLL = 8,
+ HAS_IOCTL = 16
+};
#endif /* _DEBUGFS_INTERNAL_H_ */
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 42e5a766d33c..9f3de528c358 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -12,6 +12,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/sched.h>
#include <linux/namei.h>
#include <linux/slab.h>
@@ -21,7 +23,6 @@
#include <linux/magic.h>
#include <linux/idr.h>
#include <linux/devpts_fs.h>
-#include <linux/parser.h>
#include <linux/fsnotify.h>
#include <linux/seq_file.h>
@@ -45,7 +46,7 @@ static int pty_limit_min;
static int pty_limit_max = INT_MAX;
static atomic_t pty_count = ATOMIC_INIT(0);
-static struct ctl_table pty_table[] = {
+static const struct ctl_table pty_table[] = {
{
.procname = "max",
.maxlen = sizeof(int),
@@ -69,25 +70,6 @@ static struct ctl_table pty_table[] = {
.data = &pty_count,
.proc_handler = proc_dointvec,
},
- {}
-};
-
-static struct ctl_table pty_kern_table[] = {
- {
- .procname = "pty",
- .mode = 0555,
- .child = pty_table,
- },
- {}
-};
-
-static struct ctl_table pty_root_table[] = {
- {
- .procname = "kernel",
- .mode = 0555,
- .child = pty_kern_table,
- },
- {}
};
struct pts_mount_opts {
@@ -106,21 +88,21 @@ enum {
Opt_err
};
-static const match_table_t tokens = {
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_mode, "mode=%o"},
- {Opt_ptmxmode, "ptmxmode=%o"},
- {Opt_newinstance, "newinstance"},
- {Opt_max, "max=%d"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec devpts_param_specs[] = {
+ fsparam_gid ("gid", Opt_gid),
+ fsparam_s32 ("max", Opt_max),
+ fsparam_u32oct ("mode", Opt_mode),
+ fsparam_flag ("newinstance", Opt_newinstance),
+ fsparam_u32oct ("ptmxmode", Opt_ptmxmode),
+ fsparam_uid ("uid", Opt_uid),
+ {}
};
struct pts_fs_info {
struct ida allocated_ptys;
struct pts_mount_opts mount_opts;
struct super_block *sb;
- struct dentry *ptmx_dentry;
+ struct inode *ptmx_inode; // borrowed
};
static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb)
@@ -233,96 +215,50 @@ void devpts_release(struct pts_fs_info *fsi)
deactivate_super(fsi->sb);
}
-#define PARSE_MOUNT 0
-#define PARSE_REMOUNT 1
-
/*
- * parse_mount_options():
- * Set @opts to mount options specified in @data. If an option is not
- * specified in @data, set it to its default value.
- *
- * Note: @data may be NULL (in which case all options are set to default).
+ * devpts_parse_param - Parse mount parameters
*/
-static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
+static int devpts_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *p;
- kuid_t uid;
- kgid_t gid;
-
- opts->setuid = 0;
- opts->setgid = 0;
- opts->uid = GLOBAL_ROOT_UID;
- opts->gid = GLOBAL_ROOT_GID;
- opts->mode = DEVPTS_DEFAULT_MODE;
- opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
- opts->max = NR_UNIX98_PTY_MAX;
-
- /* Only allow instances mounted from the initial mount
- * namespace to tap the reserve pool of ptys.
- */
- if (op == PARSE_MOUNT)
- opts->reserve =
- (current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns);
-
- while ((p = strsep(&data, ",")) != NULL) {
- substring_t args[MAX_OPT_ARGS];
- int token;
- int option;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_uid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid))
- return -EINVAL;
- opts->uid = uid;
- opts->setuid = 1;
- break;
- case Opt_gid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid))
- return -EINVAL;
- opts->gid = gid;
- opts->setgid = 1;
- break;
- case Opt_mode:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->mode = option & S_IALLUGO;
- break;
- case Opt_ptmxmode:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->ptmxmode = option & S_IALLUGO;
- break;
- case Opt_newinstance:
- break;
- case Opt_max:
- if (match_int(&args[0], &option) ||
- option < 0 || option > NR_UNIX98_PTY_MAX)
- return -EINVAL;
- opts->max = option;
- break;
- default:
- pr_err("called with bogus options\n");
- return -EINVAL;
- }
+ struct pts_fs_info *fsi = fc->s_fs_info;
+ struct pts_mount_opts *opts = &fsi->mount_opts;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, devpts_param_specs, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ opts->uid = result.uid;
+ opts->setuid = 1;
+ break;
+ case Opt_gid:
+ opts->gid = result.gid;
+ opts->setgid = 1;
+ break;
+ case Opt_mode:
+ opts->mode = result.uint_32 & S_IALLUGO;
+ break;
+ case Opt_ptmxmode:
+ opts->ptmxmode = result.uint_32 & S_IALLUGO;
+ break;
+ case Opt_newinstance:
+ break;
+ case Opt_max:
+ if (result.uint_32 > NR_UNIX98_PTY_MAX)
+ return invalf(fc, "max out of range");
+ opts->max = result.uint_32;
+ break;
}
return 0;
}
-static int mknod_ptmx(struct super_block *sb)
+static int mknod_ptmx(struct super_block *sb, struct fs_context *fc)
{
int mode;
- int rc = -ENOMEM;
struct dentry *dentry;
struct inode *inode;
struct dentry *root = sb->s_root;
@@ -331,18 +267,10 @@ static int mknod_ptmx(struct super_block *sb)
kuid_t ptmx_uid = current_fsuid();
kgid_t ptmx_gid = current_fsgid();
- inode_lock(d_inode(root));
-
- /* If we have already created ptmx node, return */
- if (fsi->ptmx_dentry) {
- rc = 0;
- goto out;
- }
-
- dentry = d_alloc_name(root, "ptmx");
- if (!dentry) {
+ dentry = simple_start_creating(root, "ptmx");
+ if (IS_ERR(dentry)) {
pr_err("Unable to alloc dentry for ptmx node\n");
- goto out;
+ return PTR_ERR(dentry);
}
/*
@@ -350,44 +278,49 @@ static int mknod_ptmx(struct super_block *sb)
*/
inode = new_inode(sb);
if (!inode) {
+ simple_done_creating(dentry);
pr_err("Unable to alloc inode for ptmx node\n");
- dput(dentry);
- goto out;
+ return -ENOMEM;
}
inode->i_ino = 2;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
mode = S_IFCHR|opts->ptmxmode;
init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2));
inode->i_uid = ptmx_uid;
inode->i_gid = ptmx_gid;
+ fsi->ptmx_inode = inode;
- d_add(dentry, inode);
+ d_make_persistent(dentry, inode);
- fsi->ptmx_dentry = dentry;
- rc = 0;
-out:
- inode_unlock(d_inode(root));
- return rc;
+ simple_done_creating(dentry);
+
+ return 0;
}
static void update_ptmx_mode(struct pts_fs_info *fsi)
{
- struct inode *inode;
- if (fsi->ptmx_dentry) {
- inode = d_inode(fsi->ptmx_dentry);
- inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode;
- }
+ fsi->ptmx_inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode;
}
-static int devpts_remount(struct super_block *sb, int *flags, char *data)
+static int devpts_reconfigure(struct fs_context *fc)
{
- int err;
- struct pts_fs_info *fsi = DEVPTS_SB(sb);
- struct pts_mount_opts *opts = &fsi->mount_opts;
+ struct pts_fs_info *fsi = DEVPTS_SB(fc->root->d_sb);
+ struct pts_fs_info *new = fc->s_fs_info;
- err = parse_mount_options(data, PARSE_REMOUNT, opts);
+ /* Apply the revised options. We don't want to change ->reserve.
+ * Ideally, we'd update each option conditionally on it having been
+ * explicitly changed, but the default is to reset everything so that
+ * would break UAPI...
+ */
+ fsi->mount_opts.setuid = new->mount_opts.setuid;
+ fsi->mount_opts.setgid = new->mount_opts.setgid;
+ fsi->mount_opts.uid = new->mount_opts.uid;
+ fsi->mount_opts.gid = new->mount_opts.gid;
+ fsi->mount_opts.mode = new->mount_opts.mode;
+ fsi->mount_opts.ptmxmode = new->mount_opts.ptmxmode;
+ fsi->mount_opts.max = new->mount_opts.max;
/*
* parse_mount_options() restores options to default values
@@ -397,7 +330,7 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data)
*/
update_ptmx_mode(fsi);
- return err;
+ return 0;
}
static int devpts_show_options(struct seq_file *seq, struct dentry *root)
@@ -421,55 +354,28 @@ static int devpts_show_options(struct seq_file *seq, struct dentry *root)
static const struct super_operations devpts_sops = {
.statfs = simple_statfs,
- .remount_fs = devpts_remount,
.show_options = devpts_show_options,
};
-static void *new_pts_fs_info(struct super_block *sb)
-{
- struct pts_fs_info *fsi;
-
- fsi = kzalloc(sizeof(struct pts_fs_info), GFP_KERNEL);
- if (!fsi)
- return NULL;
-
- ida_init(&fsi->allocated_ptys);
- fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
- fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
- fsi->sb = sb;
-
- return fsi;
-}
-
-static int
-devpts_fill_super(struct super_block *s, void *data, int silent)
+static int devpts_fill_super(struct super_block *s, struct fs_context *fc)
{
+ struct pts_fs_info *fsi = DEVPTS_SB(s);
struct inode *inode;
- int error;
s->s_iflags &= ~SB_I_NODEV;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = DEVPTS_SUPER_MAGIC;
s->s_op = &devpts_sops;
- s->s_d_op = &simple_dentry_operations;
+ s->s_d_flags = DCACHE_DONTCACHE;
s->s_time_gran = 1;
+ fsi->sb = s;
- error = -ENOMEM;
- s->s_fs_info = new_pts_fs_info(s);
- if (!s->s_fs_info)
- goto fail;
-
- error = parse_mount_options(data, PARSE_MOUNT, &DEVPTS_SB(s)->mount_opts);
- if (error)
- goto fail;
-
- error = -ENOMEM;
inode = new_inode(s);
if (!inode)
- goto fail;
+ return -ENOMEM;
inode->i_ino = 1;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
@@ -478,31 +384,60 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
s->s_root = d_make_root(inode);
if (!s->s_root) {
pr_err("get root dentry failed\n");
- goto fail;
+ return -ENOMEM;
}
- error = mknod_ptmx(s);
- if (error)
- goto fail_dput;
-
- return 0;
-fail_dput:
- dput(s->s_root);
- s->s_root = NULL;
-fail:
- return error;
+ return mknod_ptmx(s, fc);
}
/*
- * devpts_mount()
+ * devpts_get_tree()
*
* Mount a new (private) instance of devpts. PTYs created in this
* instance are independent of the PTYs in other devpts instances.
*/
-static struct dentry *devpts_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int devpts_get_tree(struct fs_context *fc)
{
- return mount_nodev(fs_type, flags, data, devpts_fill_super);
+ return get_tree_nodev(fc, devpts_fill_super);
+}
+
+static void devpts_free_fc(struct fs_context *fc)
+{
+ kfree(fc->s_fs_info);
+}
+
+static const struct fs_context_operations devpts_context_ops = {
+ .free = devpts_free_fc,
+ .parse_param = devpts_parse_param,
+ .get_tree = devpts_get_tree,
+ .reconfigure = devpts_reconfigure,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+static int devpts_init_fs_context(struct fs_context *fc)
+{
+ struct pts_fs_info *fsi;
+
+ fsi = kzalloc(sizeof(struct pts_fs_info), GFP_KERNEL);
+ if (!fsi)
+ return -ENOMEM;
+
+ ida_init(&fsi->allocated_ptys);
+ fsi->mount_opts.uid = GLOBAL_ROOT_UID;
+ fsi->mount_opts.gid = GLOBAL_ROOT_GID;
+ fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
+ fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
+ fsi->mount_opts.max = NR_UNIX98_PTY_MAX;
+
+ if (fc->purpose == FS_CONTEXT_FOR_MOUNT &&
+ current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns)
+ fsi->mount_opts.reserve = true;
+
+ fc->s_fs_info = fsi;
+ fc->ops = &devpts_context_ops;
+ return 0;
}
static void devpts_kill_sb(struct super_block *sb)
@@ -512,12 +447,13 @@ static void devpts_kill_sb(struct super_block *sb)
if (fsi)
ida_destroy(&fsi->allocated_ptys);
kfree(fsi);
- kill_litter_super(sb);
+ kill_anon_super(sb);
}
static struct file_system_type devpts_fs_type = {
.name = "devpts",
- .mount = devpts_mount,
+ .init_fs_context = devpts_init_fs_context,
+ .parameters = devpts_param_specs,
.kill_sb = devpts_kill_sb,
.fs_flags = FS_USERNS_MOUNT,
};
@@ -552,12 +488,12 @@ void devpts_kill_index(struct pts_fs_info *fsi, int idx)
/**
* devpts_pty_new -- create a new inode in /dev/pts/
- * @ptmx_inode: inode of the master
- * @device: major+minor of the node to be created
+ * @fsi: Filesystem info for this instance.
* @index: used as a name of the node
* @priv: what's given back by devpts_get_priv
*
- * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill.
+ * The dentry for the created inode is returned.
+ * Remove it from /dev/pts/ with devpts_pty_kill().
*/
struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
{
@@ -578,27 +514,26 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
inode->i_ino = index + 3;
inode->i_uid = opts->setuid ? opts->uid : current_fsuid();
inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index));
sprintf(s, "%d", index);
dentry = d_alloc_name(root, s);
- if (dentry) {
- dentry->d_fsdata = priv;
- d_add(dentry, inode);
- fsnotify_create(d_inode(root), dentry);
- } else {
+ if (!dentry) {
iput(inode);
- dentry = ERR_PTR(-ENOMEM);
+ return ERR_PTR(-ENOMEM);
}
-
- return dentry;
+ dentry->d_fsdata = priv;
+ d_make_persistent(dentry, inode);
+ fsnotify_create(d_inode(root), dentry);
+ dput(dentry);
+ return dentry; // borrowed
}
/**
* devpts_get_priv -- get private data for a slave
- * @pts_inode: inode of the slave
+ * @dentry: dentry of the slave
*
* Returns whatever was passed as priv in devpts_pty_new for a given inode.
*/
@@ -611,7 +546,7 @@ void *devpts_get_priv(struct dentry *dentry)
/**
* devpts_pty_kill -- remove inode form /dev/pts/
- * @inode: inode of the slave to be removed
+ * @dentry: dentry of the slave to be removed
*
* This is an inverse operation of devpts_pty_new.
*/
@@ -621,16 +556,16 @@ void devpts_pty_kill(struct dentry *dentry)
dentry->d_fsdata = NULL;
drop_nlink(dentry->d_inode);
- fsnotify_unlink(d_inode(dentry->d_parent), dentry);
d_drop(dentry);
- dput(dentry); /* d_alloc_name() in devpts_pty_new() */
+ fsnotify_unlink(d_inode(dentry->d_parent), dentry);
+ d_make_discardable(dentry);
}
static int __init init_devpts_fs(void)
{
int err = register_filesystem(&devpts_fs_type);
if (!err) {
- register_sysctl_table(pty_root_table);
+ register_sysctl("kernel/pty", pty_table);
}
return err;
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 654443558047..2267f5ae7f77 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -37,13 +37,12 @@
#include <linux/rwsem.h>
#include <linux/uio.h>
#include <linux/atomic.h>
-#include <linux/prefetch.h>
#include "internal.h"
/*
- * How many user pages to map in one call to get_user_pages(). This determines
- * the size of a structure in the slab cache
+ * How many user pages to map in one call to iov_iter_extract_pages(). This
+ * determines the size of a structure in the slab cache
*/
#define DIO_PAGES 64
@@ -86,7 +85,6 @@ struct dio_submit {
sector_t final_block_in_request;/* doesn't change */
int boundary; /* prev block is at a boundary */
get_block_t *get_block; /* block mapping function */
- dio_submit_t *submit_io; /* IO submition function */
loff_t logical_offset_in_bio; /* current first logical block in bio */
sector_t final_block_in_bio; /* current final block in bio + 1 */
@@ -117,18 +115,18 @@ struct dio_submit {
/* dio_state communicated between submission path and end_io */
struct dio {
int flags; /* doesn't change */
- int op;
- int op_flags;
+ blk_opf_t opf; /* request operation type and flags */
struct gendisk *bio_disk;
struct inode *inode;
loff_t i_size; /* i_size when submitted */
dio_iodone_t *end_io; /* IO completion function */
+ bool is_pinned; /* T if we have pins on the pages */
void *private; /* copy from map_bh.b_private */
/* BIO completion state */
spinlock_t bio_lock; /* protects BIO fields below */
- int page_errors; /* errno from get_user_pages() */
+ int page_errors; /* err from iov_iter_extract_pages() */
int is_async; /* is IO async ? */
bool defer_completion; /* defer AIO completion to workqueue? */
bool should_dirty; /* if pages should be dirtied */
@@ -152,7 +150,7 @@ struct dio {
};
} ____cacheline_aligned_in_smp;
-static struct kmem_cache *dio_cache __read_mostly;
+static struct kmem_cache *dio_cache __ro_after_init;
/*
* How many pages are in the queue?
@@ -167,13 +165,14 @@ static inline unsigned dio_pages_present(struct dio_submit *sdio)
*/
static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
{
+ struct page **pages = dio->pages;
+ const enum req_op dio_op = dio->opf & REQ_OP_MASK;
ssize_t ret;
- ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
- &sdio->from);
+ ret = iov_iter_extract_pages(sdio->iter, &pages, LONG_MAX,
+ DIO_PAGES, 0, &sdio->from);
- if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) {
- struct page *page = ZERO_PAGE(0);
+ if (ret < 0 && sdio->blocks_available && dio_op == REQ_OP_WRITE) {
/*
* A memory fault, but the filesystem has some outstanding
* mapped blocks. We need to use those blocks up to avoid
@@ -181,8 +180,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
*/
if (dio->page_errors == 0)
dio->page_errors = ret;
- get_page(page);
- dio->pages[0] = page;
+ dio->pages[0] = ZERO_PAGE(0);
sdio->head = 0;
sdio->tail = 1;
sdio->from = 0;
@@ -191,7 +189,6 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
}
if (ret >= 0) {
- iov_iter_advance(sdio->iter, ret);
ret += sdio->from;
sdio->head = 0;
sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
@@ -203,9 +200,9 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
/*
* Get another userspace page. Returns an ERR_PTR on error. Pages are
- * buffered inside the dio so that we can call get_user_pages() against a
- * decent number of pages, less frequently. To provide nicer use of the
- * L1 cache.
+ * buffered inside the dio so that we can call iov_iter_extract_pages()
+ * against a decent number of pages, less frequently. To provide nicer use of
+ * the L1 cache.
*/
static inline struct page *dio_get_page(struct dio *dio,
struct dio_submit *sdio)
@@ -221,6 +218,18 @@ static inline struct page *dio_get_page(struct dio *dio,
return dio->pages[sdio->head];
}
+static void dio_pin_page(struct dio *dio, struct page *page)
+{
+ if (dio->is_pinned)
+ folio_add_pin(page_folio(page));
+}
+
+static void dio_unpin_page(struct dio *dio, struct page *page)
+{
+ if (dio->is_pinned)
+ unpin_user_page(page);
+}
+
/*
* dio_complete() - called when all DIO BIO I/O has been completed
*
@@ -234,6 +243,7 @@ static inline struct page *dio_get_page(struct dio *dio,
*/
static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
{
+ const enum req_op dio_op = dio->opf & REQ_OP_MASK;
loff_t offset = dio->iocb->ki_pos;
ssize_t transferred = 0;
int err;
@@ -251,7 +261,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
transferred = dio->result;
/* Check for short read case */
- if ((dio->op == REQ_OP_READ) &&
+ if (dio_op == REQ_OP_READ &&
((offset + transferred) > dio->i_size))
transferred = dio->i_size - offset;
/* ignore EFAULT if some IO has been done */
@@ -286,14 +296,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
* zeros from unwritten extents.
*/
if (flags & DIO_COMPLETE_INVALIDATE &&
- ret > 0 && dio->op == REQ_OP_WRITE &&
- dio->inode->i_mapping->nrpages) {
- err = invalidate_inode_pages2_range(dio->inode->i_mapping,
- offset >> PAGE_SHIFT,
- (offset + ret - 1) >> PAGE_SHIFT);
- if (err)
- dio_warn_stale_pagecache(dio->iocb->ki_filp);
- }
+ ret > 0 && dio_op == REQ_OP_WRITE)
+ kiocb_invalidate_post_direct_write(dio->iocb, ret);
inode_dio_end(dio->inode);
@@ -305,7 +309,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
*/
dio->iocb->ki_pos += transferred;
- if (ret > 0 && dio->op == REQ_OP_WRITE)
+ if (ret > 0 && dio_op == REQ_OP_WRITE)
ret = generic_write_sync(dio->iocb, ret);
dio->iocb->ki_complete(dio->iocb, ret);
}
@@ -329,6 +333,7 @@ static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
static void dio_bio_end_aio(struct bio *bio)
{
struct dio *dio = bio->bi_private;
+ const enum req_op dio_op = dio->opf & REQ_OP_MASK;
unsigned long remaining;
unsigned long flags;
bool defer_completion = false;
@@ -353,7 +358,7 @@ static void dio_bio_end_aio(struct bio *bio)
*/
if (dio->result)
defer_completion = dio->defer_completion ||
- (dio->op == REQ_OP_WRITE &&
+ (dio_op == REQ_OP_WRITE &&
dio->inode->i_mapping->nrpages);
if (defer_completion) {
INIT_WORK(&dio->complete_work, dio_aio_complete_work);
@@ -396,17 +401,15 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
* bio_alloc() is guaranteed to return a bio when allowed to sleep and
* we request a valid number of vectors.
*/
- bio = bio_alloc(GFP_KERNEL, nr_vecs);
-
- bio_set_dev(bio, bdev);
+ bio = bio_alloc(bdev, nr_vecs, dio->opf, GFP_KERNEL);
bio->bi_iter.bi_sector = first_sector;
- bio_set_op_attrs(bio, dio->op, dio->op_flags);
if (dio->is_async)
bio->bi_end_io = dio_bio_end_aio;
else
bio->bi_end_io = dio_bio_end_io;
-
- bio->bi_write_hint = dio->iocb->ki_hint;
+ if (dio->is_pinned)
+ bio_set_flag(bio, BIO_PAGE_PINNED);
+ bio->bi_write_hint = file_inode(dio->iocb->ki_filp)->i_write_hint;
sdio->bio = bio;
sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
@@ -421,26 +424,22 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
*/
static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
{
+ const enum req_op dio_op = dio->opf & REQ_OP_MASK;
struct bio *bio = sdio->bio;
unsigned long flags;
bio->bi_private = dio;
- /* don't account direct I/O as memory stall */
- bio_clear_flag(bio, BIO_WORKINGSET);
spin_lock_irqsave(&dio->bio_lock, flags);
dio->refcount++;
spin_unlock_irqrestore(&dio->bio_lock, flags);
- if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
+ if (dio->is_async && dio_op == REQ_OP_READ && dio->should_dirty)
bio_set_pages_dirty(bio);
dio->bio_disk = bio->bi_bdev->bd_disk;
- if (sdio->submit_io)
- sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
- else
- submit_bio(bio);
+ submit_bio(bio);
sdio->bio = NULL;
sdio->boundary = 0;
@@ -452,8 +451,10 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
*/
static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
{
- while (sdio->head < sdio->tail)
- put_page(dio->pages[sdio->head++]);
+ if (dio->is_pinned)
+ unpin_user_pages(dio->pages + sdio->head,
+ sdio->tail - sdio->head);
+ sdio->head = sdio->tail;
}
/*
@@ -498,7 +499,8 @@ static struct bio *dio_await_one(struct dio *dio)
static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
{
blk_status_t err = bio->bi_status;
- bool should_dirty = dio->op == REQ_OP_READ && dio->should_dirty;
+ const enum req_op dio_op = dio->opf & REQ_OP_MASK;
+ bool should_dirty = dio_op == REQ_OP_READ && dio->should_dirty;
if (err) {
if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT))
@@ -563,30 +565,6 @@ static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
return ret;
}
-/*
- * Create workqueue for deferred direct IO completions. We allocate the
- * workqueue when it's first needed. This avoids creating workqueue for
- * filesystems that don't need it and also allows us to create the workqueue
- * late enough so the we can include s_id in the name of the workqueue.
- */
-int sb_init_dio_done_wq(struct super_block *sb)
-{
- struct workqueue_struct *old;
- struct workqueue_struct *wq = alloc_workqueue("dio/%s",
- WQ_MEM_RECLAIM, 0,
- sb->s_id);
- if (!wq)
- return -ENOMEM;
- /*
- * This has to be atomic as more DIOs can race to create the workqueue
- */
- old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
- /* Someone created workqueue before us? Free ours... */
- if (old)
- destroy_workqueue(wq);
- return 0;
-}
-
static int dio_set_defer_completion(struct dio *dio)
{
struct super_block *sb = dio->inode->i_sb;
@@ -625,6 +603,7 @@ static int dio_set_defer_completion(struct dio *dio)
static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
struct buffer_head *map_bh)
{
+ const enum req_op dio_op = dio->opf & REQ_OP_MASK;
int ret;
sector_t fs_startblk; /* Into file, in filesystem-sized blocks */
sector_t fs_endblk; /* Into file, in filesystem-sized blocks */
@@ -659,7 +638,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
* which may decide to handle it or also return an unmapped
* buffer head.
*/
- create = dio->op == REQ_OP_WRITE;
+ create = dio_op == REQ_OP_WRITE;
if (dio->flags & DIO_SKIP_HOLES) {
i_size = i_size_read(dio->inode);
if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits)
@@ -706,7 +685,7 @@ out:
*
* Return zero on success. Non-zero means the caller needs to start a new BIO.
*/
-static inline int dio_bio_add_page(struct dio_submit *sdio)
+static inline int dio_bio_add_page(struct dio *dio, struct dio_submit *sdio)
{
int ret;
@@ -718,7 +697,7 @@ static inline int dio_bio_add_page(struct dio_submit *sdio)
*/
if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
sdio->pages_in_io--;
- get_page(sdio->cur_page);
+ dio_pin_page(dio, sdio->cur_page);
sdio->final_block_in_bio = sdio->cur_page_block +
(sdio->cur_page_len >> sdio->blkbits);
ret = 0;
@@ -773,11 +752,11 @@ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
goto out;
}
- if (dio_bio_add_page(sdio) != 0) {
+ if (dio_bio_add_page(dio, sdio) != 0) {
dio_bio_submit(dio, sdio);
ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
if (ret == 0) {
- ret = dio_bio_add_page(sdio);
+ ret = dio_bio_add_page(dio, sdio);
BUG_ON(ret != 0);
}
}
@@ -807,10 +786,11 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
unsigned offset, unsigned len, sector_t blocknr,
struct buffer_head *map_bh)
{
+ const enum req_op dio_op = dio->opf & REQ_OP_MASK;
int ret = 0;
int boundary = sdio->boundary; /* dio_send_cur_page may clear it */
- if (dio->op == REQ_OP_WRITE) {
+ if (dio_op == REQ_OP_WRITE) {
/*
* Read accounting is performed in submit_bio()
*/
@@ -833,13 +813,13 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
*/
if (sdio->cur_page) {
ret = dio_send_cur_page(dio, sdio, map_bh);
- put_page(sdio->cur_page);
+ dio_unpin_page(dio, sdio->cur_page);
sdio->cur_page = NULL;
if (ret)
return ret;
}
- get_page(page); /* It is in dio */
+ dio_pin_page(dio, page); /* It is in dio */
sdio->cur_page = page;
sdio->cur_page_offset = offset;
sdio->cur_page_len = len;
@@ -854,7 +834,7 @@ out:
ret = dio_send_cur_page(dio, sdio, map_bh);
if (sdio->bio)
dio_bio_submit(dio, sdio);
- put_page(sdio->cur_page);
+ dio_unpin_page(dio, sdio->cur_page);
sdio->cur_page = NULL;
}
return ret;
@@ -923,6 +903,7 @@ static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
struct buffer_head *map_bh)
{
+ const enum req_op dio_op = dio->opf & REQ_OP_MASK;
const unsigned blkbits = sdio->blkbits;
const unsigned i_blkbits = blkbits + sdio->blkfactor;
int ret = 0;
@@ -954,7 +935,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
ret = get_more_blocks(dio, sdio, map_bh);
if (ret) {
- put_page(page);
+ dio_unpin_page(dio, page);
goto out;
}
if (!buffer_mapped(map_bh))
@@ -998,8 +979,8 @@ do_holes:
loff_t i_size_aligned;
/* AKPM: eargh, -ENOTBLK is a hack */
- if (dio->op == REQ_OP_WRITE) {
- put_page(page);
+ if (dio_op == REQ_OP_WRITE) {
+ dio_unpin_page(dio, page);
return -ENOTBLK;
}
@@ -1012,10 +993,10 @@ do_holes:
if (sdio->block_in_file >=
i_size_aligned >> blkbits) {
/* We hit eof */
- put_page(page);
+ dio_unpin_page(dio, page);
goto out;
}
- zero_user(page, from, 1 << blkbits);
+ memzero_page(page, from, 1 << blkbits);
sdio->block_in_file++;
from += 1 << blkbits;
dio->result += 1 << blkbits;
@@ -1052,7 +1033,7 @@ do_holes:
sdio->next_block_for_io,
map_bh);
if (ret) {
- put_page(page);
+ dio_unpin_page(dio, page);
goto out;
}
sdio->next_block_for_io += this_chunk_blocks;
@@ -1067,8 +1048,8 @@ next_block:
break;
}
- /* Drop the ref which was taken in get_user_pages() */
- put_page(page);
+ /* Drop the pin which was taken in get_user_pages() */
+ dio_unpin_page(dio, page);
}
out:
return ret;
@@ -1102,8 +1083,8 @@ static inline int drop_refcount(struct dio *dio)
* The locking rules are governed by the flags parameter:
* - if the flags value contains DIO_LOCKING we use a fancy locking
* scheme for dumb filesystems.
- * For writes this function is called under i_mutex and returns with
- * i_mutex held, for reads, i_mutex is not held on entry, but it is
+ * For writes this function is called under i_rwsem and returns with
+ * i_rwsem held, for reads, i_rwsem is not held on entry, but it is
* taken and dropped again before returning.
* - if the flags value does NOT contain DIO_LOCKING we don't use any
* internal locking but rather rely on the filesystem to synchronize
@@ -1113,7 +1094,7 @@ static inline int drop_refcount(struct dio *dio)
* counter before starting direct I/O, and decrement it once we are done.
* Truncate can wait for it to reach zero to provide exclusion. It is
* expected that filesystem provide exclusion between new direct I/O
- * and truncates. For DIO_LOCKING filesystems this is done by i_mutex,
+ * and truncates. For DIO_LOCKING filesystems this is done by i_rwsem,
* but other filesystems need to take care of this on their own.
*
* NOTE: if you pass "sdio" to anything by pointer make sure that function
@@ -1121,11 +1102,10 @@ static inline int drop_refcount(struct dio *dio)
* individual fields and will generate much worse code. This is important
* for the whole file.
*/
-static inline ssize_t
-do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, struct iov_iter *iter,
- get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags)
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ struct block_device *bdev, struct iov_iter *iter,
+ get_block_t get_block, dio_iodone_t end_io,
+ int flags)
{
unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
unsigned blkbits = i_blkbits;
@@ -1135,16 +1115,11 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
loff_t offset = iocb->ki_pos;
const loff_t end = offset + count;
struct dio *dio;
- struct dio_submit sdio = { 0, };
+ struct dio_submit sdio = { NULL, };
struct buffer_head map_bh = { 0, };
struct blk_plug plug;
unsigned long align = offset | iov_iter_alignment(iter);
- /*
- * Avoid references to bdev if not absolutely needed to give
- * the early prefetch in the caller enough time.
- */
-
/* watch out for a 0 len io from a tricksy fs */
if (iov_iter_rw(iter) == READ && !count)
return 0;
@@ -1164,6 +1139,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
/* will be released by direct_io_worker */
inode_lock(inode);
}
+ dio->is_pinned = iov_iter_extract_will_pin(iter);
/* Once we sampled i_size check for reads beyond EOF */
dio->i_size = i_size_read(inode);
@@ -1203,12 +1179,11 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
dio->inode = inode;
if (iov_iter_rw(iter) == WRITE) {
- dio->op = REQ_OP_WRITE;
- dio->op_flags = REQ_SYNC | REQ_IDLE;
+ dio->opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (iocb->ki_flags & IOCB_NOWAIT)
- dio->op_flags |= REQ_NOWAIT;
+ dio->opf |= REQ_NOWAIT;
} else {
- dio->op = REQ_OP_READ;
+ dio->opf = REQ_OP_READ;
}
/*
@@ -1217,7 +1192,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
*/
if (dio->is_async && iov_iter_rw(iter) == WRITE) {
retval = 0;
- if (iocb->ki_flags & IOCB_DSYNC)
+ if (iocb_is_dsync(iocb))
retval = dio_set_defer_completion(dio);
else if (!dio->inode->i_sb->s_dio_done_wq) {
/*
@@ -1236,14 +1211,12 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
*/
inode_dio_begin(inode);
- retval = 0;
sdio.blkbits = blkbits;
sdio.blkfactor = i_blkbits - blkbits;
sdio.block_in_file = offset >> blkbits;
sdio.get_block = get_block;
dio->end_io = end_io;
- sdio.submit_io = submit_io;
sdio.final_block_in_bio = -1;
sdio.next_block_for_io = -1;
@@ -1252,7 +1225,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
spin_lock_init(&dio->bio_lock);
dio->refcount = 1;
- dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ;
+ dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ;
sdio.iter = iter;
sdio.final_block_in_request = end >> blkbits;
@@ -1290,7 +1263,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
if (retval == 0)
retval = ret2;
- put_page(sdio.cur_page);
+ dio_unpin_page(dio, sdio.cur_page);
sdio.cur_page = NULL;
}
if (sdio.bio)
@@ -1306,7 +1279,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
/*
* All block lookups have been performed. For READ requests
- * we can let i_mutex go now that its achieved its purpose
+ * we can let i_rwsem go now that its achieved its purpose
* of protecting us from looking up uninitialized blocks.
*/
if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
@@ -1340,29 +1313,6 @@ fail_dio:
kmem_cache_free(dio_cache, dio);
return retval;
}
-
-ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, struct iov_iter *iter,
- get_block_t get_block,
- dio_iodone_t end_io, dio_submit_t submit_io,
- int flags)
-{
- /*
- * The block device state is needed in the end to finally
- * submit everything. Since it's likely to be cache cold
- * prefetch it here as first thing to hide some of the
- * latency.
- *
- * Attempt to prefetch the pieces we likely need later.
- */
- prefetch(&bdev->bd_disk->part_tbl);
- prefetch(bdev->bd_disk->queue);
- prefetch((char *)bdev->bd_disk->queue + SMP_CACHE_BYTES);
-
- return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block,
- end_io, submit_io, flags);
-}
-
EXPORT_SYMBOL(__blockdev_direct_IO);
static __init int dio_init(void)
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
index ee92634196a8..b46165df5a91 100644
--- a/fs/dlm/Kconfig
+++ b/fs/dlm/Kconfig
@@ -3,8 +3,6 @@ menuconfig DLM
tristate "Distributed Lock Manager (DLM)"
depends on INET
depends on SYSFS && CONFIGFS_FS && (IPV6 || IPV6=n)
- select IP_SCTP
- select SRCU
help
A general purpose distributed lock manager for kernel or userspace
applications.
diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile
index 3545fdafc6fb..5a471af1d1fe 100644
--- a/fs/dlm/Makefile
+++ b/fs/dlm/Makefile
@@ -9,7 +9,6 @@ dlm-y := ast.o \
member.o \
memory.o \
midcomms.o \
- netlink.o \
lowcomms.o \
plock.o \
rcom.o \
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 283c7b94edda..0fe8d80ce5e8 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -9,56 +9,73 @@
*******************************************************************************
******************************************************************************/
+#include <trace/events/dlm.h>
+
#include "dlm_internal.h"
+#include "lvb_table.h"
+#include "memory.h"
#include "lock.h"
#include "user.h"
#include "ast.h"
-static uint64_t dlm_cb_seq;
-static DEFINE_SPINLOCK(dlm_cb_seq_spin);
-
-static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb)
+static void dlm_run_callback(uint32_t ls_id, uint32_t lkb_id, int8_t mode,
+ uint32_t flags, uint8_t sb_flags, int sb_status,
+ struct dlm_lksb *lksb,
+ void (*astfn)(void *astparam),
+ void (*bastfn)(void *astparam, int mode),
+ void *astparam, const char *res_name,
+ size_t res_length)
{
- int i;
-
- log_print("last_bast %x %llu flags %x mode %d sb %d %x",
- lkb->lkb_id,
- (unsigned long long)lkb->lkb_last_bast.seq,
- lkb->lkb_last_bast.flags,
- lkb->lkb_last_bast.mode,
- lkb->lkb_last_bast.sb_status,
- lkb->lkb_last_bast.sb_flags);
-
- log_print("last_cast %x %llu flags %x mode %d sb %d %x",
- lkb->lkb_id,
- (unsigned long long)lkb->lkb_last_cast.seq,
- lkb->lkb_last_cast.flags,
- lkb->lkb_last_cast.mode,
- lkb->lkb_last_cast.sb_status,
- lkb->lkb_last_cast.sb_flags);
-
- for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
- log_print("cb %x %llu flags %x mode %d sb %d %x",
- lkb->lkb_id,
- (unsigned long long)lkb->lkb_callbacks[i].seq,
- lkb->lkb_callbacks[i].flags,
- lkb->lkb_callbacks[i].mode,
- lkb->lkb_callbacks[i].sb_status,
- lkb->lkb_callbacks[i].sb_flags);
+ if (flags & DLM_CB_BAST) {
+ trace_dlm_bast(ls_id, lkb_id, mode, res_name, res_length);
+ bastfn(astparam, mode);
+ } else if (flags & DLM_CB_CAST) {
+ trace_dlm_ast(ls_id, lkb_id, sb_flags, sb_status, res_name,
+ res_length);
+ lksb->sb_status = sb_status;
+ lksb->sb_flags = sb_flags;
+ astfn(astparam);
}
}
-int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
- int status, uint32_t sbflags, uint64_t seq)
+static void dlm_do_callback(struct dlm_callback *cb)
+{
+ dlm_run_callback(cb->ls_id, cb->lkb_id, cb->mode, cb->flags,
+ cb->sb_flags, cb->sb_status, cb->lkb_lksb,
+ cb->astfn, cb->bastfn, cb->astparam,
+ cb->res_name, cb->res_length);
+ dlm_free_cb(cb);
+}
+
+static void dlm_callback_work(struct work_struct *work)
+{
+ struct dlm_callback *cb = container_of(work, struct dlm_callback, work);
+
+ dlm_do_callback(cb);
+}
+
+bool dlm_may_skip_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ int status, uint32_t sbflags, int *copy_lvb)
{
- struct dlm_ls *ls = lkb->lkb_resource->res_ls;
- uint64_t prev_seq;
+ struct dlm_rsb *rsb = lkb->lkb_resource;
+ struct dlm_ls *ls = rsb->res_ls;
int prev_mode;
- int i, rv;
- for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
- if (lkb->lkb_callbacks[i].seq)
- continue;
+ if (copy_lvb)
+ *copy_lvb = 0;
+
+ if (flags & DLM_CB_BAST) {
+ /* if cb is a bast, it should be skipped if the blocking mode is
+ * compatible with the last granted mode
+ */
+ if (lkb->lkb_last_cast_cb_mode != -1) {
+ if (dlm_modes_compat(mode, lkb->lkb_last_cast_cb_mode)) {
+ log_debug(ls, "skip %x bast mode %d for cast mode %d",
+ lkb->lkb_id, mode,
+ lkb->lkb_last_cast_cb_mode);
+ return true;
+ }
+ }
/*
* Suppress some redundant basts here, do more on removal.
@@ -66,209 +83,130 @@ int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
* is a bast for the same mode or a more restrictive mode.
* (the addional > PR check is needed for PR/CW inversion)
*/
-
- if ((i > 0) && (flags & DLM_CB_BAST) &&
- (lkb->lkb_callbacks[i-1].flags & DLM_CB_BAST)) {
-
- prev_seq = lkb->lkb_callbacks[i-1].seq;
- prev_mode = lkb->lkb_callbacks[i-1].mode;
+ if (lkb->lkb_last_cb_mode != -1 &&
+ lkb->lkb_last_cb_flags & DLM_CB_BAST) {
+ prev_mode = lkb->lkb_last_cb_mode;
if ((prev_mode == mode) ||
(prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
+ log_debug(ls, "skip %x add bast mode %d for bast mode %d",
+ lkb->lkb_id, mode, prev_mode);
+ return true;
+ }
+ }
- log_debug(ls, "skip %x add bast %llu mode %d "
- "for bast %llu mode %d",
- lkb->lkb_id,
- (unsigned long long)seq,
- mode,
- (unsigned long long)prev_seq,
- prev_mode);
- rv = 0;
- goto out;
+ lkb->lkb_last_bast_time = ktime_get();
+ lkb->lkb_last_bast_cb_mode = mode;
+ } else if (flags & DLM_CB_CAST) {
+ if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
+ prev_mode = lkb->lkb_last_cast_cb_mode;
+
+ if (!status && lkb->lkb_lksb->sb_lvbptr &&
+ dlm_lvb_operations[prev_mode + 1][mode + 1]) {
+ if (copy_lvb)
+ *copy_lvb = 1;
}
}
- lkb->lkb_callbacks[i].seq = seq;
- lkb->lkb_callbacks[i].flags = flags;
- lkb->lkb_callbacks[i].mode = mode;
- lkb->lkb_callbacks[i].sb_status = status;
- lkb->lkb_callbacks[i].sb_flags = (sbflags & 0x000000FF);
- rv = 0;
- break;
+ lkb->lkb_last_cast_cb_mode = mode;
+ lkb->lkb_last_cast_time = ktime_get();
}
- if (i == DLM_CALLBACKS_SIZE) {
- log_error(ls, "no callbacks %x %llu flags %x mode %d sb %d %x",
- lkb->lkb_id, (unsigned long long)seq,
- flags, mode, status, sbflags);
- dlm_dump_lkb_callbacks(lkb);
- rv = -1;
- goto out;
- }
- out:
- return rv;
+ lkb->lkb_last_cb_mode = mode;
+ lkb->lkb_last_cb_flags = flags;
+
+ return false;
}
-int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
- struct dlm_callback *cb, int *resid)
+int dlm_get_cb(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ int status, uint32_t sbflags,
+ struct dlm_callback **cb)
{
- int i, rv;
-
- *resid = 0;
+ struct dlm_rsb *rsb = lkb->lkb_resource;
+ struct dlm_ls *ls = rsb->res_ls;
- if (!lkb->lkb_callbacks[0].seq) {
- rv = -ENOENT;
- goto out;
- }
+ *cb = dlm_allocate_cb();
+ if (WARN_ON_ONCE(!*cb))
+ return -ENOMEM;
- /* oldest undelivered cb is callbacks[0] */
+ /* for tracing */
+ (*cb)->lkb_id = lkb->lkb_id;
+ (*cb)->ls_id = ls->ls_global_id;
+ memcpy((*cb)->res_name, rsb->res_name, rsb->res_length);
+ (*cb)->res_length = rsb->res_length;
- memcpy(cb, &lkb->lkb_callbacks[0], sizeof(struct dlm_callback));
- memset(&lkb->lkb_callbacks[0], 0, sizeof(struct dlm_callback));
+ (*cb)->flags = flags;
+ (*cb)->mode = mode;
+ (*cb)->sb_status = status;
+ (*cb)->sb_flags = (sbflags & 0x000000FF);
+ (*cb)->lkb_lksb = lkb->lkb_lksb;
- /* shift others down */
+ return 0;
+}
- for (i = 1; i < DLM_CALLBACKS_SIZE; i++) {
- if (!lkb->lkb_callbacks[i].seq)
- break;
- memcpy(&lkb->lkb_callbacks[i-1], &lkb->lkb_callbacks[i],
- sizeof(struct dlm_callback));
- memset(&lkb->lkb_callbacks[i], 0, sizeof(struct dlm_callback));
- (*resid)++;
- }
+static int dlm_get_queue_cb(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ int status, uint32_t sbflags,
+ struct dlm_callback **cb)
+{
+ int rv;
- /* if cb is a bast, it should be skipped if the blocking mode is
- compatible with the last granted mode */
-
- if ((cb->flags & DLM_CB_BAST) && lkb->lkb_last_cast.seq) {
- if (dlm_modes_compat(cb->mode, lkb->lkb_last_cast.mode)) {
- cb->flags |= DLM_CB_SKIP;
-
- log_debug(ls, "skip %x bast %llu mode %d "
- "for cast %llu mode %d",
- lkb->lkb_id,
- (unsigned long long)cb->seq,
- cb->mode,
- (unsigned long long)lkb->lkb_last_cast.seq,
- lkb->lkb_last_cast.mode);
- rv = 0;
- goto out;
- }
- }
+ rv = dlm_get_cb(lkb, flags, mode, status, sbflags, cb);
+ if (rv)
+ return rv;
- if (cb->flags & DLM_CB_CAST) {
- memcpy(&lkb->lkb_last_cast, cb, sizeof(struct dlm_callback));
- lkb->lkb_last_cast_time = ktime_get();
- }
+ (*cb)->astfn = lkb->lkb_astfn;
+ (*cb)->bastfn = lkb->lkb_bastfn;
+ (*cb)->astparam = lkb->lkb_astparam;
+ INIT_WORK(&(*cb)->work, dlm_callback_work);
- if (cb->flags & DLM_CB_BAST) {
- memcpy(&lkb->lkb_last_bast, cb, sizeof(struct dlm_callback));
- lkb->lkb_last_bast_time = ktime_get();
- }
- rv = 0;
- out:
- return rv;
+ return 0;
}
void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
uint32_t sbflags)
{
- struct dlm_ls *ls = lkb->lkb_resource->res_ls;
- uint64_t new_seq, prev_seq;
+ struct dlm_rsb *rsb = lkb->lkb_resource;
+ struct dlm_ls *ls = rsb->res_ls;
+ struct dlm_callback *cb;
int rv;
- spin_lock(&dlm_cb_seq_spin);
- new_seq = ++dlm_cb_seq;
- if (!dlm_cb_seq)
- new_seq = ++dlm_cb_seq;
- spin_unlock(&dlm_cb_seq_spin);
-
- if (lkb->lkb_flags & DLM_IFL_USER) {
- dlm_user_add_ast(lkb, flags, mode, status, sbflags, new_seq);
+ if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
+ dlm_user_add_ast(lkb, flags, mode, status, sbflags);
return;
}
- mutex_lock(&lkb->lkb_cb_mutex);
- prev_seq = lkb->lkb_callbacks[0].seq;
-
- rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, new_seq);
- if (rv < 0)
- goto out;
-
- if (!prev_seq) {
- kref_get(&lkb->lkb_ref);
+ if (dlm_may_skip_callback(lkb, flags, mode, status, sbflags, NULL))
+ return;
- if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
- mutex_lock(&ls->ls_cb_mutex);
- list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
- mutex_unlock(&ls->ls_cb_mutex);
+ spin_lock_bh(&ls->ls_cb_lock);
+ if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
+ rv = dlm_get_queue_cb(lkb, flags, mode, status, sbflags, &cb);
+ if (!rv)
+ list_add(&cb->list, &ls->ls_cb_delay);
+ } else {
+ if (test_bit(LSFL_SOFTIRQ, &ls->ls_flags)) {
+ dlm_run_callback(ls->ls_global_id, lkb->lkb_id, mode, flags,
+ sbflags, status, lkb->lkb_lksb,
+ lkb->lkb_astfn, lkb->lkb_bastfn,
+ lkb->lkb_astparam, rsb->res_name,
+ rsb->res_length);
} else {
- queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
+ rv = dlm_get_queue_cb(lkb, flags, mode, status, sbflags, &cb);
+ if (!rv)
+ queue_work(ls->ls_callback_wq, &cb->work);
}
}
- out:
- mutex_unlock(&lkb->lkb_cb_mutex);
-}
-
-void dlm_callback_work(struct work_struct *work)
-{
- struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work);
- struct dlm_ls *ls = lkb->lkb_resource->res_ls;
- void (*castfn) (void *astparam);
- void (*bastfn) (void *astparam, int mode);
- struct dlm_callback callbacks[DLM_CALLBACKS_SIZE];
- int i, rv, resid;
-
- memset(&callbacks, 0, sizeof(callbacks));
-
- mutex_lock(&lkb->lkb_cb_mutex);
- if (!lkb->lkb_callbacks[0].seq) {
- /* no callback work exists, shouldn't happen */
- log_error(ls, "dlm_callback_work %x no work", lkb->lkb_id);
- dlm_print_lkb(lkb);
- dlm_dump_lkb_callbacks(lkb);
- }
-
- for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
- rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid);
- if (rv < 0)
- break;
- }
-
- if (resid) {
- /* cbs remain, loop should have removed all, shouldn't happen */
- log_error(ls, "dlm_callback_work %x resid %d", lkb->lkb_id,
- resid);
- dlm_print_lkb(lkb);
- dlm_dump_lkb_callbacks(lkb);
- }
- mutex_unlock(&lkb->lkb_cb_mutex);
-
- castfn = lkb->lkb_astfn;
- bastfn = lkb->lkb_bastfn;
-
- for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
- if (!callbacks[i].seq)
- break;
- if (callbacks[i].flags & DLM_CB_SKIP) {
- continue;
- } else if (callbacks[i].flags & DLM_CB_BAST) {
- bastfn(lkb->lkb_astparam, callbacks[i].mode);
- } else if (callbacks[i].flags & DLM_CB_CAST) {
- lkb->lkb_lksb->sb_status = callbacks[i].sb_status;
- lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags;
- castfn(lkb->lkb_astparam);
- }
- }
-
- /* undo kref_get from dlm_add_callback, may cause lkb to be freed */
- dlm_put_lkb(lkb);
+ spin_unlock_bh(&ls->ls_cb_lock);
}
int dlm_callback_start(struct dlm_ls *ls)
{
- ls->ls_callback_wq = alloc_workqueue("dlm_callback",
- WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
+ if (!test_bit(LSFL_FS, &ls->ls_flags) ||
+ test_bit(LSFL_SOFTIRQ, &ls->ls_flags))
+ return 0;
+
+ ls->ls_callback_wq = alloc_ordered_workqueue("dlm_callback",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!ls->ls_callback_wq) {
log_print("can't start dlm_callback workqueue");
return -ENOMEM;
@@ -284,7 +222,12 @@ void dlm_callback_stop(struct dlm_ls *ls)
void dlm_callback_suspend(struct dlm_ls *ls)
{
+ if (!test_bit(LSFL_FS, &ls->ls_flags))
+ return;
+
+ spin_lock_bh(&ls->ls_cb_lock);
set_bit(LSFL_CB_DELAY, &ls->ls_flags);
+ spin_unlock_bh(&ls->ls_cb_lock);
if (ls->ls_callback_wq)
flush_workqueue(ls->ls_callback_wq);
@@ -294,31 +237,39 @@ void dlm_callback_suspend(struct dlm_ls *ls)
void dlm_callback_resume(struct dlm_ls *ls)
{
- struct dlm_lkb *lkb, *safe;
- int count = 0;
+ struct dlm_callback *cb, *safe;
+ int count = 0, sum = 0;
+ bool empty;
- clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
-
- if (!ls->ls_callback_wq)
+ if (!test_bit(LSFL_FS, &ls->ls_flags))
return;
more:
- mutex_lock(&ls->ls_cb_mutex);
- list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
- list_del_init(&lkb->lkb_cb_list);
- queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
+ spin_lock_bh(&ls->ls_cb_lock);
+ list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) {
+ list_del(&cb->list);
+ if (test_bit(LSFL_SOFTIRQ, &ls->ls_flags))
+ dlm_do_callback(cb);
+ else
+ queue_work(ls->ls_callback_wq, &cb->work);
+
count++;
if (count == MAX_CB_QUEUE)
break;
}
- mutex_unlock(&ls->ls_cb_mutex);
+ empty = list_empty(&ls->ls_cb_delay);
+ if (empty)
+ clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
+ spin_unlock_bh(&ls->ls_cb_lock);
- if (count)
- log_rinfo(ls, "dlm_callback_resume %d", count);
- if (count == MAX_CB_QUEUE) {
+ sum += count;
+ if (!empty) {
count = 0;
cond_resched();
goto more;
}
+
+ if (sum)
+ log_rinfo(ls, "%s %d", __func__, sum);
}
diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h
index 181ad7d20c4d..e2b86845d331 100644
--- a/fs/dlm/ast.h
+++ b/fs/dlm/ast.h
@@ -11,15 +11,14 @@
#ifndef __ASTD_DOT_H__
#define __ASTD_DOT_H__
-void dlm_del_ast(struct dlm_lkb *lkb);
-int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
- int status, uint32_t sbflags, uint64_t seq);
-int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
- struct dlm_callback *cb, int *resid);
+bool dlm_may_skip_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ int status, uint32_t sbflags, int *copy_lvb);
+int dlm_get_cb(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ int status, uint32_t sbflags,
+ struct dlm_callback **cb);
void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
uint32_t sbflags);
-void dlm_callback_work(struct work_struct *work);
int dlm_callback_start(struct dlm_ls *ls);
void dlm_callback_stop(struct dlm_ls *ls);
void dlm_callback_suspend(struct dlm_ls *ls);
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 42eee2783756..a0d75b5c83c6 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -24,9 +24,10 @@
#include "lowcomms.h"
/*
- * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid
+ * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid (refers to <node>)
* /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight
- * /config/dlm/<cluster>/comms/<comm>/nodeid
+ * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/release_recover
+ * /config/dlm/<cluster>/comms/<comm>/nodeid (refers to <comm>)
* /config/dlm/<cluster>/comms/<comm>/local
* /config/dlm/<cluster>/comms/<comm>/addr (write only)
* /config/dlm/<cluster>/comms/<comm>/addr_list (read only)
@@ -63,24 +64,16 @@ static void release_node(struct config_item *);
static struct configfs_attribute *comm_attrs[];
static struct configfs_attribute *node_attrs[];
+const struct rhashtable_params dlm_rhash_rsb_params = {
+ .nelem_hint = 3, /* start small */
+ .key_len = DLM_RESNAME_MAXLEN,
+ .key_offset = offsetof(struct dlm_rsb, res_name),
+ .head_offset = offsetof(struct dlm_rsb, res_node),
+ .automatic_shrinking = true,
+};
+
struct dlm_cluster {
struct config_group group;
- unsigned int cl_tcp_port;
- unsigned int cl_buffer_size;
- unsigned int cl_rsbtbl_size;
- unsigned int cl_recover_timer;
- unsigned int cl_toss_secs;
- unsigned int cl_scan_secs;
- unsigned int cl_log_debug;
- unsigned int cl_log_info;
- unsigned int cl_protocol;
- unsigned int cl_mark;
- unsigned int cl_timewarn_cs;
- unsigned int cl_waitwarn_us;
- unsigned int cl_new_rsb_count;
- unsigned int cl_recover_callbacks;
- char cl_cluster_name[DLM_LOCKSPACE_LEN];
-
struct dlm_spaces *sps;
struct dlm_comms *cms;
};
@@ -102,8 +95,6 @@ enum {
CLUSTER_ATTR_LOG_INFO,
CLUSTER_ATTR_PROTOCOL,
CLUSTER_ATTR_MARK,
- CLUSTER_ATTR_TIMEWARN_CS,
- CLUSTER_ATTR_WAITWARN_US,
CLUSTER_ATTR_NEW_RSB_COUNT,
CLUSTER_ATTR_RECOVER_CALLBACKS,
CLUSTER_ATTR_CLUSTER_NAME,
@@ -111,25 +102,60 @@ enum {
static ssize_t cluster_cluster_name_show(struct config_item *item, char *buf)
{
- struct dlm_cluster *cl = config_item_to_cluster(item);
- return sprintf(buf, "%s\n", cl->cl_cluster_name);
+ return sprintf(buf, "%s\n", dlm_config.ci_cluster_name);
}
static ssize_t cluster_cluster_name_store(struct config_item *item,
const char *buf, size_t len)
{
- struct dlm_cluster *cl = config_item_to_cluster(item);
-
- strlcpy(dlm_config.ci_cluster_name, buf,
- sizeof(dlm_config.ci_cluster_name));
- strlcpy(cl->cl_cluster_name, buf, sizeof(cl->cl_cluster_name));
+ strscpy(dlm_config.ci_cluster_name, buf,
+ sizeof(dlm_config.ci_cluster_name));
return len;
}
CONFIGFS_ATTR(cluster_, cluster_name);
-static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
- int *info_field, int (*check_cb)(unsigned int x),
+static ssize_t cluster_tcp_port_show(struct config_item *item, char *buf)
+{
+ return sprintf(buf, "%u\n", be16_to_cpu(dlm_config.ci_tcp_port));
+}
+
+static int dlm_check_zero_and_dlm_running(unsigned int x)
+{
+ if (!x)
+ return -EINVAL;
+
+ if (dlm_lowcomms_is_running())
+ return -EBUSY;
+
+ return 0;
+}
+
+static ssize_t cluster_tcp_port_store(struct config_item *item,
+ const char *buf, size_t len)
+{
+ int rc;
+ u16 x;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ rc = kstrtou16(buf, 0, &x);
+ if (rc)
+ return rc;
+
+ rc = dlm_check_zero_and_dlm_running(x);
+ if (rc)
+ return rc;
+
+ dlm_config.ci_tcp_port = cpu_to_be16(x);
+ return len;
+}
+
+CONFIGFS_ATTR(cluster_, tcp_port);
+
+static ssize_t cluster_set(unsigned int *info_field,
+ int (*check_cb)(unsigned int x),
const char *buf, size_t len)
{
unsigned int x;
@@ -147,7 +173,6 @@ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
return rc;
}
- *cl_field = x;
*info_field = x;
return len;
@@ -157,14 +182,11 @@ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
static ssize_t cluster_##name##_store(struct config_item *item, \
const char *buf, size_t len) \
{ \
- struct dlm_cluster *cl = config_item_to_cluster(item); \
- return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name, \
- check_cb, buf, len); \
+ return cluster_set(&dlm_config.ci_##name, check_cb, buf, len); \
} \
static ssize_t cluster_##name##_show(struct config_item *item, char *buf) \
{ \
- struct dlm_cluster *cl = config_item_to_cluster(item); \
- return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name); \
+ return snprintf(buf, PAGE_SIZE, "%u\n", dlm_config.ci_##name); \
} \
CONFIGFS_ATTR(cluster_, name);
@@ -176,23 +198,15 @@ static int dlm_check_protocol_and_dlm_running(unsigned int x)
break;
case 1:
/* SCTP */
+ if (!IS_ENABLED(CONFIG_IP_SCTP))
+ return -EOPNOTSUPP;
+
break;
default:
return -EINVAL;
}
- if (dlm_allow_conn)
- return -EBUSY;
-
- return 0;
-}
-
-static int dlm_check_zero_and_dlm_running(unsigned int x)
-{
- if (!x)
- return -EINVAL;
-
- if (dlm_allow_conn)
+ if (dlm_lowcomms_is_running())
return -EBUSY;
return 0;
@@ -214,7 +228,6 @@ static int dlm_check_buffer_size(unsigned int x)
return 0;
}
-CLUSTER_ATTR(tcp_port, dlm_check_zero_and_dlm_running);
CLUSTER_ATTR(buffer_size, dlm_check_buffer_size);
CLUSTER_ATTR(rsbtbl_size, dlm_check_zero);
CLUSTER_ATTR(recover_timer, dlm_check_zero);
@@ -224,8 +237,6 @@ CLUSTER_ATTR(log_debug, NULL);
CLUSTER_ATTR(log_info, NULL);
CLUSTER_ATTR(protocol, dlm_check_protocol_and_dlm_running);
CLUSTER_ATTR(mark, NULL);
-CLUSTER_ATTR(timewarn_cs, dlm_check_zero);
-CLUSTER_ATTR(waitwarn_us, NULL);
CLUSTER_ATTR(new_rsb_count, NULL);
CLUSTER_ATTR(recover_callbacks, NULL);
@@ -240,8 +251,6 @@ static struct configfs_attribute *cluster_attrs[] = {
[CLUSTER_ATTR_LOG_INFO] = &cluster_attr_log_info,
[CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol,
[CLUSTER_ATTR_MARK] = &cluster_attr_mark,
- [CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs,
- [CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us,
[CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count,
[CLUSTER_ATTR_RECOVER_CALLBACKS] = &cluster_attr_recover_callbacks,
[CLUSTER_ATTR_CLUSTER_NAME] = &cluster_attr_cluster_name,
@@ -259,6 +268,7 @@ enum {
enum {
NODE_ATTR_NODEID = 0,
NODE_ATTR_WEIGHT,
+ NODE_ATTR_RELEASE_RECOVER,
};
struct dlm_clusters {
@@ -272,6 +282,8 @@ struct dlm_spaces {
struct dlm_space {
struct config_group group;
struct list_head members;
+ struct list_head members_gone;
+ int members_gone_count;
struct mutex members_lock;
int members_count;
struct dlm_nodes *nds;
@@ -302,6 +314,14 @@ struct dlm_node {
int weight;
int new;
int comm_seq; /* copy of cm->seq when nd->nodeid is set */
+ unsigned int release_recover;
+};
+
+struct dlm_member_gone {
+ int nodeid;
+ unsigned int release_recover;
+
+ struct list_head list; /* space->members_gone */
};
static struct configfs_group_operations clusters_ops = {
@@ -423,22 +443,6 @@ static struct config_group *make_cluster(struct config_group *g,
configfs_add_default_group(&sps->ss_group, &cl->group);
configfs_add_default_group(&cms->cs_group, &cl->group);
- cl->cl_tcp_port = dlm_config.ci_tcp_port;
- cl->cl_buffer_size = dlm_config.ci_buffer_size;
- cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size;
- cl->cl_recover_timer = dlm_config.ci_recover_timer;
- cl->cl_toss_secs = dlm_config.ci_toss_secs;
- cl->cl_scan_secs = dlm_config.ci_scan_secs;
- cl->cl_log_debug = dlm_config.ci_log_debug;
- cl->cl_log_info = dlm_config.ci_log_info;
- cl->cl_protocol = dlm_config.ci_protocol;
- cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs;
- cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us;
- cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count;
- cl->cl_recover_callbacks = dlm_config.ci_recover_callbacks;
- memcpy(cl->cl_cluster_name, dlm_config.ci_cluster_name,
- DLM_LOCKSPACE_LEN);
-
space_list = &sps->ss_group;
comm_list = &cms->cs_group;
return &cl->group;
@@ -488,6 +492,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
configfs_add_default_group(&nds->ns_group, &sp->group);
INIT_LIST_HEAD(&sp->members);
+ INIT_LIST_HEAD(&sp->members_gone);
mutex_init(&sp->members_lock);
sp->members_count = 0;
sp->nds = nds;
@@ -519,6 +524,12 @@ static void release_space(struct config_item *i)
static struct config_item *make_comm(struct config_group *g, const char *name)
{
struct dlm_comm *cm;
+ unsigned int nodeid;
+ int rv;
+
+ rv = kstrtouint(name, 0, &nodeid);
+ if (rv)
+ return ERR_PTR(rv);
cm = kzalloc(sizeof(struct dlm_comm), GFP_NOFS);
if (!cm)
@@ -530,7 +541,7 @@ static struct config_item *make_comm(struct config_group *g, const char *name)
if (!cm->seq)
cm->seq = dlm_comm_count++;
- cm->nodeid = -1;
+ cm->nodeid = nodeid;
cm->local = 0;
cm->addr_count = 0;
cm->mark = 0;
@@ -557,16 +568,25 @@ static void release_comm(struct config_item *i)
static struct config_item *make_node(struct config_group *g, const char *name)
{
struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
+ unsigned int nodeid;
struct dlm_node *nd;
+ uint32_t seq = 0;
+ int rv;
+
+ rv = kstrtouint(name, 0, &nodeid);
+ if (rv)
+ return ERR_PTR(rv);
nd = kzalloc(sizeof(struct dlm_node), GFP_NOFS);
if (!nd)
return ERR_PTR(-ENOMEM);
config_item_init_type_name(&nd->item, name, &node_type);
- nd->nodeid = -1;
+ nd->nodeid = nodeid;
nd->weight = 1; /* default weight of 1 if none is set */
nd->new = 1; /* set to 0 once it's been read by dlm_nodeid_list() */
+ dlm_comm_seq(nodeid, &seq, true);
+ nd->comm_seq = seq;
mutex_lock(&sp->members_lock);
list_add(&nd->list, &sp->members);
@@ -580,10 +600,20 @@ static void drop_node(struct config_group *g, struct config_item *i)
{
struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
struct dlm_node *nd = config_item_to_node(i);
+ struct dlm_member_gone *mb_gone;
+
+ mb_gone = kzalloc(sizeof(*mb_gone), GFP_KERNEL);
+ if (!mb_gone)
+ return;
mutex_lock(&sp->members_lock);
list_del(&nd->list);
sp->members_count--;
+
+ mb_gone->nodeid = nd->nodeid;
+ mb_gone->release_recover = nd->release_recover;
+ list_add(&mb_gone->list, &sp->members_gone);
+ sp->members_gone_count++;
mutex_unlock(&sp->members_lock);
config_item_put(i);
@@ -624,16 +654,19 @@ void dlm_config_exit(void)
static ssize_t comm_nodeid_show(struct config_item *item, char *buf)
{
- return sprintf(buf, "%d\n", config_item_to_comm(item)->nodeid);
+ unsigned int nodeid;
+ int rv;
+
+ rv = kstrtouint(config_item_name(item), 0, &nodeid);
+ if (WARN_ON(rv))
+ return rv;
+
+ return sprintf(buf, "%u\n", nodeid);
}
static ssize_t comm_nodeid_store(struct config_item *item, const char *buf,
size_t len)
{
- int rc = kstrtoint(buf, 0, &config_item_to_comm(item)->nodeid);
-
- if (rc)
- return rc;
return len;
}
@@ -674,7 +707,7 @@ static ssize_t comm_addr_store(struct config_item *item, const char *buf,
memcpy(addr, buf, len);
- rv = dlm_lowcomms_addr(cm->nodeid, addr, len);
+ rv = dlm_midcomms_addr(cm->nodeid, addr);
if (rv) {
kfree(addr);
return rv;
@@ -774,20 +807,19 @@ static struct configfs_attribute *comm_attrs[] = {
static ssize_t node_nodeid_show(struct config_item *item, char *buf)
{
- return sprintf(buf, "%d\n", config_item_to_node(item)->nodeid);
+ unsigned int nodeid;
+ int rv;
+
+ rv = kstrtouint(config_item_name(item), 0, &nodeid);
+ if (WARN_ON(rv))
+ return rv;
+
+ return sprintf(buf, "%u\n", nodeid);
}
static ssize_t node_nodeid_store(struct config_item *item, const char *buf,
size_t len)
{
- struct dlm_node *nd = config_item_to_node(item);
- uint32_t seq = 0;
- int rc = kstrtoint(buf, 0, &nd->nodeid);
-
- if (rc)
- return rc;
- dlm_comm_seq(nd->nodeid, &seq);
- nd->comm_seq = seq;
return len;
}
@@ -806,12 +838,34 @@ static ssize_t node_weight_store(struct config_item *item, const char *buf,
return len;
}
+static ssize_t node_release_recover_show(struct config_item *item, char *buf)
+{
+ struct dlm_node *n = config_item_to_node(item);
+
+ return sprintf(buf, "%u\n", n->release_recover);
+}
+
+static ssize_t node_release_recover_store(struct config_item *item,
+ const char *buf, size_t len)
+{
+ struct dlm_node *n = config_item_to_node(item);
+ int rc;
+
+ rc = kstrtouint(buf, 0, &n->release_recover);
+ if (rc)
+ return rc;
+
+ return len;
+}
+
CONFIGFS_ATTR(node_, nodeid);
CONFIGFS_ATTR(node_, weight);
+CONFIGFS_ATTR(node_, release_recover);
static struct configfs_attribute *node_attrs[] = {
[NODE_ATTR_NODEID] = &node_attr_nodeid,
[NODE_ATTR_WEIGHT] = &node_attr_weight,
+ [NODE_ATTR_RELEASE_RECOVER] = &node_attr_release_recover,
NULL,
};
@@ -847,7 +901,7 @@ static struct dlm_comm *get_comm(int nodeid)
if (!comm_list)
return NULL;
- mutex_lock(&clusters_root.subsys.su_mutex);
+ WARN_ON_ONCE(!mutex_is_locked(&clusters_root.subsys.su_mutex));
list_for_each_entry(i, &comm_list->cg_children, ci_entry) {
cm = config_item_to_comm(i);
@@ -858,7 +912,6 @@ static struct dlm_comm *get_comm(int nodeid)
config_item_get(i);
break;
}
- mutex_unlock(&clusters_root.subsys.su_mutex);
if (!found)
cm = NULL;
@@ -874,9 +927,10 @@ static void put_comm(struct dlm_comm *cm)
int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
int *count_out)
{
+ struct dlm_member_gone *mb_gone, *mb_safe;
+ struct dlm_config_node *nodes, *node;
struct dlm_space *sp;
struct dlm_node *nd;
- struct dlm_config_node *nodes, *node;
int rv, count;
sp = get_space(lsname);
@@ -890,7 +944,7 @@ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
goto out;
}
- count = sp->members_count;
+ count = sp->members_count + sp->members_gone_count;
nodes = kcalloc(count, sizeof(struct dlm_config_node), GFP_NOFS);
if (!nodes) {
@@ -909,6 +963,20 @@ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
nd->new = 0;
}
+ /* we delay the remove on nodes until here as configfs does
+ * not support addtional attributes for rmdir().
+ */
+ list_for_each_entry_safe(mb_gone, mb_safe, &sp->members_gone, list) {
+ node->nodeid = mb_gone->nodeid;
+ node->release_recover = mb_gone->release_recover;
+ node->gone = true;
+ node++;
+
+ list_del(&mb_gone->list);
+ sp->members_gone_count--;
+ kfree(mb_gone);
+ }
+
*count_out = count;
*nodes_out = nodes;
rv = 0;
@@ -918,11 +986,20 @@ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
return rv;
}
-int dlm_comm_seq(int nodeid, uint32_t *seq)
+int dlm_comm_seq(int nodeid, uint32_t *seq, bool locked)
{
- struct dlm_comm *cm = get_comm(nodeid);
+ struct dlm_comm *cm;
+
+ if (locked) {
+ cm = get_comm(nodeid);
+ } else {
+ mutex_lock(&clusters_root.subsys.su_mutex);
+ cm = get_comm(nodeid);
+ mutex_unlock(&clusters_root.subsys.su_mutex);
+ }
if (!cm)
- return -EEXIST;
+ return -ENOENT;
+
*seq = cm->seq;
put_comm(cm);
return 0;
@@ -930,7 +1007,7 @@ int dlm_comm_seq(int nodeid, uint32_t *seq)
int dlm_our_nodeid(void)
{
- return local_comm ? local_comm->nodeid : 0;
+ return local_comm->nodeid;
}
/* num 0 is first addr, num 1 is second addr */
@@ -954,14 +1031,12 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
#define DEFAULT_LOG_INFO 1
#define DEFAULT_PROTOCOL DLM_PROTO_TCP
#define DEFAULT_MARK 0
-#define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */
-#define DEFAULT_WAITWARN_US 0
#define DEFAULT_NEW_RSB_COUNT 128
#define DEFAULT_RECOVER_CALLBACKS 0
#define DEFAULT_CLUSTER_NAME ""
struct dlm_config_info dlm_config = {
- .ci_tcp_port = DEFAULT_TCP_PORT,
+ .ci_tcp_port = cpu_to_be16(DEFAULT_TCP_PORT),
.ci_buffer_size = DLM_MAX_SOCKET_BUFSIZE,
.ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE,
.ci_recover_timer = DEFAULT_RECOVER_TIMER,
@@ -971,8 +1046,6 @@ struct dlm_config_info dlm_config = {
.ci_log_info = DEFAULT_LOG_INFO,
.ci_protocol = DEFAULT_PROTOCOL,
.ci_mark = DEFAULT_MARK,
- .ci_timewarn_cs = DEFAULT_TIMEWARN_CS,
- .ci_waitwarn_us = DEFAULT_WAITWARN_US,
.ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT,
.ci_recover_callbacks = DEFAULT_RECOVER_CALLBACKS,
.ci_cluster_name = DEFAULT_CLUSTER_NAME
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index df92b0a07fc6..4ebd45f75276 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -17,30 +17,32 @@
struct dlm_config_node {
int nodeid;
int weight;
+ bool gone;
int new;
uint32_t comm_seq;
+ unsigned int release_recover;
};
-#define DLM_MAX_ADDR_COUNT 3
+extern const struct rhashtable_params dlm_rhash_rsb_params;
+
+#define DLM_MAX_ADDR_COUNT 8
#define DLM_PROTO_TCP 0
#define DLM_PROTO_SCTP 1
struct dlm_config_info {
- int ci_tcp_port;
- int ci_buffer_size;
- int ci_rsbtbl_size;
- int ci_recover_timer;
- int ci_toss_secs;
- int ci_scan_secs;
- int ci_log_debug;
- int ci_log_info;
- int ci_protocol;
- int ci_mark;
- int ci_timewarn_cs;
- int ci_waitwarn_us;
- int ci_new_rsb_count;
- int ci_recover_callbacks;
+ __be16 ci_tcp_port;
+ unsigned int ci_buffer_size;
+ unsigned int ci_rsbtbl_size;
+ unsigned int ci_recover_timer;
+ unsigned int ci_toss_secs;
+ unsigned int ci_scan_secs;
+ unsigned int ci_log_debug;
+ unsigned int ci_log_info;
+ unsigned int ci_protocol;
+ unsigned int ci_mark;
+ unsigned int ci_new_rsb_count;
+ unsigned int ci_recover_callbacks;
char ci_cluster_name[DLM_LOCKSPACE_LEN];
};
@@ -50,7 +52,7 @@ int dlm_config_init(void);
void dlm_config_exit(void);
int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
int *count_out);
-int dlm_comm_seq(int nodeid, uint32_t *seq);
+int dlm_comm_seq(int nodeid, uint32_t *seq, bool locked);
int dlm_our_nodeid(void);
int dlm_our_addr(struct sockaddr_storage *addr, int num);
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 47e9d57e4cae..700a0cbb2f14 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -18,6 +18,7 @@
#include "dlm_internal.h"
#include "midcomms.h"
#include "lock.h"
+#include "ast.h"
#define DLM_DEBUG_BUF_LEN 4096
static char debug_buf[DLM_DEBUG_BUF_LEN];
@@ -170,7 +171,7 @@ static void print_format2_lock(struct seq_file *s, struct dlm_lkb *lkb,
u64 xid = 0;
u64 us;
- if (lkb->lkb_flags & DLM_IFL_USER) {
+ if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
if (lkb->lkb_ua)
xid = lkb->lkb_ua->xid;
}
@@ -188,7 +189,7 @@ static void print_format2_lock(struct seq_file *s, struct dlm_lkb *lkb,
lkb->lkb_ownpid,
(unsigned long long)xid,
lkb->lkb_exflags,
- lkb->lkb_flags,
+ dlm_iflags_val(lkb),
lkb->lkb_status,
lkb->lkb_grmode,
lkb->lkb_rqmode,
@@ -230,7 +231,7 @@ static void print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb,
{
u64 xid = 0;
- if (lkb->lkb_flags & DLM_IFL_USER) {
+ if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
if (lkb->lkb_ua)
xid = lkb->lkb_ua->xid;
}
@@ -242,11 +243,11 @@ static void print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb,
lkb->lkb_ownpid,
(unsigned long long)xid,
lkb->lkb_exflags,
- lkb->lkb_flags,
+ dlm_iflags_val(lkb),
lkb->lkb_status,
lkb->lkb_grmode,
lkb->lkb_rqmode,
- lkb->lkb_last_bast.mode,
+ lkb->lkb_last_bast_cb_mode,
rsb_lookup,
lkb->lkb_wait_type,
lkb->lkb_lvbseq,
@@ -365,12 +366,10 @@ static void print_format4(struct dlm_rsb *r, struct seq_file *s)
unlock_rsb(r);
}
-struct rsbtbl_iter {
- struct dlm_rsb *rsb;
- unsigned bucket;
- int format;
- int header;
-};
+static const struct seq_operations format1_seq_ops;
+static const struct seq_operations format2_seq_ops;
+static const struct seq_operations format3_seq_ops;
+static const struct seq_operations format4_seq_ops;
/*
* If the buffer is full, seq_printf can be called again, but it
@@ -381,197 +380,61 @@ struct rsbtbl_iter {
static int table_seq_show(struct seq_file *seq, void *iter_ptr)
{
- struct rsbtbl_iter *ri = iter_ptr;
-
- switch (ri->format) {
- case 1:
- print_format1(ri->rsb, seq);
- break;
- case 2:
- if (ri->header) {
- seq_puts(seq, "id nodeid remid pid xid exflags flags sts grmode rqmode time_ms r_nodeid r_len r_name\n");
- ri->header = 0;
- }
- print_format2(ri->rsb, seq);
- break;
- case 3:
- if (ri->header) {
- seq_puts(seq, "version rsb 1.1 lvb 1.1 lkb 1.1\n");
- ri->header = 0;
- }
- print_format3(ri->rsb, seq);
- break;
- case 4:
- if (ri->header) {
- seq_puts(seq, "version 4 rsb 2\n");
- ri->header = 0;
- }
- print_format4(ri->rsb, seq);
- break;
- }
+ struct dlm_rsb *rsb = list_entry(iter_ptr, struct dlm_rsb, res_slow_list);
+
+ if (seq->op == &format1_seq_ops)
+ print_format1(rsb, seq);
+ else if (seq->op == &format2_seq_ops)
+ print_format2(rsb, seq);
+ else if (seq->op == &format3_seq_ops)
+ print_format3(rsb, seq);
+ else if (seq->op == &format4_seq_ops)
+ print_format4(rsb, seq);
return 0;
}
-static const struct seq_operations format1_seq_ops;
-static const struct seq_operations format2_seq_ops;
-static const struct seq_operations format3_seq_ops;
-static const struct seq_operations format4_seq_ops;
-
static void *table_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct rb_root *tree;
- struct rb_node *node;
struct dlm_ls *ls = seq->private;
- struct rsbtbl_iter *ri;
- struct dlm_rsb *r;
- loff_t n = *pos;
- unsigned bucket, entry;
- int toss = (seq->op == &format4_seq_ops);
-
- bucket = n >> 32;
- entry = n & ((1LL << 32) - 1);
-
- if (bucket >= ls->ls_rsbtbl_size)
- return NULL;
-
- ri = kzalloc(sizeof(*ri), GFP_NOFS);
- if (!ri)
- return NULL;
- if (n == 0)
- ri->header = 1;
- if (seq->op == &format1_seq_ops)
- ri->format = 1;
- if (seq->op == &format2_seq_ops)
- ri->format = 2;
- if (seq->op == &format3_seq_ops)
- ri->format = 3;
- if (seq->op == &format4_seq_ops)
- ri->format = 4;
-
- tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
-
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- if (!RB_EMPTY_ROOT(tree)) {
- for (node = rb_first(tree); node; node = rb_next(node)) {
- r = rb_entry(node, struct dlm_rsb, res_hashnode);
- if (!entry--) {
- dlm_hold_rsb(r);
- ri->rsb = r;
- ri->bucket = bucket;
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- return ri;
- }
- }
- }
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
-
- /*
- * move to the first rsb in the next non-empty bucket
- */
+ struct list_head *list;
- /* zero the entry */
- n &= ~((1LL << 32) - 1);
+ if (!*pos) {
+ if (seq->op == &format2_seq_ops)
+ seq_puts(seq, "id nodeid remid pid xid exflags flags sts grmode rqmode time_ms r_nodeid r_len r_name\n");
+ else if (seq->op == &format3_seq_ops)
+ seq_puts(seq, "rsb ptr nodeid first_lkid flags !root_list_empty !recover_list_empty recover_locks_count len\n");
+ else if (seq->op == &format4_seq_ops)
+ seq_puts(seq, "rsb ptr nodeid master_nodeid dir_nodeid our_nodeid toss_time flags len str|hex name\n");
+ }
- while (1) {
- bucket++;
- n += 1LL << 32;
+ if (seq->op == &format4_seq_ops)
+ list = &ls->ls_slow_inactive;
+ else
+ list = &ls->ls_slow_active;
- if (bucket >= ls->ls_rsbtbl_size) {
- kfree(ri);
- return NULL;
- }
- tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
-
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- if (!RB_EMPTY_ROOT(tree)) {
- node = rb_first(tree);
- r = rb_entry(node, struct dlm_rsb, res_hashnode);
- dlm_hold_rsb(r);
- ri->rsb = r;
- ri->bucket = bucket;
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- *pos = n;
- return ri;
- }
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- }
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ return seq_list_start(list, *pos);
}
static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
{
struct dlm_ls *ls = seq->private;
- struct rsbtbl_iter *ri = iter_ptr;
- struct rb_root *tree;
- struct rb_node *next;
- struct dlm_rsb *r, *rp;
- loff_t n = *pos;
- unsigned bucket;
- int toss = (seq->op == &format4_seq_ops);
-
- bucket = n >> 32;
-
- /*
- * move to the next rsb in the same bucket
- */
-
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- rp = ri->rsb;
- next = rb_next(&rp->res_hashnode);
-
- if (next) {
- r = rb_entry(next, struct dlm_rsb, res_hashnode);
- dlm_hold_rsb(r);
- ri->rsb = r;
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- dlm_put_rsb(rp);
- ++*pos;
- return ri;
- }
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- dlm_put_rsb(rp);
-
- /*
- * move to the first rsb in the next non-empty bucket
- */
-
- /* zero the entry */
- n &= ~((1LL << 32) - 1);
+ struct list_head *list;
- while (1) {
- bucket++;
- n += 1LL << 32;
+ if (seq->op == &format4_seq_ops)
+ list = &ls->ls_slow_inactive;
+ else
+ list = &ls->ls_slow_active;
- if (bucket >= ls->ls_rsbtbl_size) {
- kfree(ri);
- ++*pos;
- return NULL;
- }
- tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
-
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- if (!RB_EMPTY_ROOT(tree)) {
- next = rb_first(tree);
- r = rb_entry(next, struct dlm_rsb, res_hashnode);
- dlm_hold_rsb(r);
- ri->rsb = r;
- ri->bucket = bucket;
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- *pos = n;
- return ri;
- }
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- }
+ return seq_list_next(iter_ptr, list, pos);
}
static void table_seq_stop(struct seq_file *seq, void *iter_ptr)
{
- struct rsbtbl_iter *ri = iter_ptr;
+ struct dlm_ls *ls = seq->private;
- if (ri) {
- dlm_put_rsb(ri->rsb);
- kfree(ri);
- }
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
}
static const struct seq_operations format1_seq_ops = {
@@ -635,6 +498,35 @@ static int table_open2(struct inode *inode, struct file *file)
return 0;
}
+static ssize_t table_write2(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = file->private_data;
+ int n, len, lkb_nodeid, lkb_status, error;
+ char name[DLM_RESNAME_MAXLEN + 1] = {};
+ struct dlm_ls *ls = seq->private;
+ unsigned int lkb_flags;
+ char buf[256] = {};
+ uint32_t lkb_id;
+
+ if (copy_from_user(buf, user_buf,
+ min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ n = sscanf(buf, "%x %" __stringify(DLM_RESNAME_MAXLEN) "s %x %d %d",
+ &lkb_id, name, &lkb_flags, &lkb_nodeid, &lkb_status);
+ if (n != 5)
+ return -EINVAL;
+
+ len = strnlen(name, DLM_RESNAME_MAXLEN);
+ error = dlm_debug_add_lkb(ls, lkb_id, name, len, lkb_flags,
+ lkb_nodeid, lkb_status);
+ if (error)
+ return error;
+
+ return count;
+}
+
static int table_open3(struct inode *inode, struct file *file)
{
struct seq_file *seq;
@@ -675,6 +567,7 @@ static const struct file_operations format2_fops = {
.owner = THIS_MODULE,
.open = table_open2,
.read = seq_read,
+ .write = table_write2,
.llseek = seq_lseek,
.release = seq_release
};
@@ -706,7 +599,13 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf,
size_t len = DLM_DEBUG_BUF_LEN, pos = 0, ret, rv;
mutex_lock(&debug_buf_lock);
- mutex_lock(&ls->ls_waiters_mutex);
+ ret = dlm_lock_recovery_try(ls);
+ if (!ret) {
+ rv = -EAGAIN;
+ goto out;
+ }
+
+ spin_lock_bh(&ls->ls_waiters_lock);
memset(debug_buf, 0, sizeof(debug_buf));
list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
@@ -717,17 +616,49 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf,
break;
pos += ret;
}
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock_bh(&ls->ls_waiters_lock);
+ dlm_unlock_recovery(ls);
rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos);
+out:
mutex_unlock(&debug_buf_lock);
return rv;
}
+static ssize_t waiters_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dlm_ls *ls = file->private_data;
+ int mstype, to_nodeid;
+ char buf[128] = {};
+ uint32_t lkb_id;
+ int n, error;
+
+ if (copy_from_user(buf, user_buf,
+ min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ n = sscanf(buf, "%x %d %d", &lkb_id, &mstype, &to_nodeid);
+ if (n != 3)
+ return -EINVAL;
+
+ error = dlm_lock_recovery_try(ls);
+ if (!error)
+ return -EAGAIN;
+
+ error = dlm_debug_add_lkb_to_waiters(ls, lkb_id, mstype, to_nodeid);
+ dlm_unlock_recovery(ls);
+ if (error)
+ return error;
+
+ return count;
+}
+
static const struct file_operations waiters_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = waiters_read,
+ .write = waiters_write,
.llseek = default_llseek,
};
@@ -738,6 +669,7 @@ void dlm_delete_debug_file(struct dlm_ls *ls)
debugfs_remove(ls->ls_debug_locks_dentry);
debugfs_remove(ls->ls_debug_all_dentry);
debugfs_remove(ls->ls_debug_toss_dentry);
+ debugfs_remove(ls->ls_debug_queued_asts_dentry);
}
static int dlm_state_show(struct seq_file *file, void *offset)
@@ -768,6 +700,41 @@ static int dlm_version_show(struct seq_file *file, void *offset)
}
DEFINE_SHOW_ATTRIBUTE(dlm_version);
+static ssize_t dlm_rawmsg_write(struct file *fp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ void *buf;
+ int ret;
+
+ if (count > PAGE_SIZE || count < sizeof(struct dlm_header))
+ return -EINVAL;
+
+ buf = kmalloc(PAGE_SIZE, GFP_NOFS);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, user_buf, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = dlm_midcomms_rawmsg_send(fp->private_data, buf, count);
+ if (ret)
+ goto out;
+
+ kfree(buf);
+ return count;
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations dlm_rawmsg_fops = {
+ .open = simple_open,
+ .write = dlm_rawmsg_write,
+};
+
void *dlm_create_debug_comms_file(int nodeid, void *data)
{
struct dentry *d_node;
@@ -782,6 +749,7 @@ void *dlm_create_debug_comms_file(int nodeid, void *data)
debugfs_create_file("send_queue_count", 0444, d_node, data,
&dlm_send_queue_cnt_fops);
debugfs_create_file("version", 0444, d_node, data, &dlm_version_fops);
+ debugfs_create_file("rawmsg", 0200, d_node, data, &dlm_rawmsg_fops);
return d_node;
}
@@ -793,7 +761,8 @@ void dlm_delete_debug_comms_file(void *ctx)
void dlm_create_debug_file(struct dlm_ls *ls)
{
- char name[DLM_LOCKSPACE_LEN + 8];
+ /* Reserve enough space for the longest file name */
+ char name[DLM_LOCKSPACE_LEN + sizeof("_queued_asts")];
/* format 1 */
@@ -805,19 +774,17 @@ void dlm_create_debug_file(struct dlm_ls *ls)
/* format 2 */
- memset(name, 0, sizeof(name));
- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_locks", ls->ls_name);
+ snprintf(name, sizeof(name), "%s_locks", ls->ls_name);
ls->ls_debug_locks_dentry = debugfs_create_file(name,
- S_IFREG | S_IRUGO,
+ 0644,
dlm_root,
ls,
&format2_fops);
/* format 3 */
- memset(name, 0, sizeof(name));
- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_all", ls->ls_name);
+ snprintf(name, sizeof(name), "%s_all", ls->ls_name);
ls->ls_debug_all_dentry = debugfs_create_file(name,
S_IFREG | S_IRUGO,
@@ -827,8 +794,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
/* format 4 */
- memset(name, 0, sizeof(name));
- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_toss", ls->ls_name);
+ snprintf(name, sizeof(name), "%s_toss", ls->ls_name);
ls->ls_debug_toss_dentry = debugfs_create_file(name,
S_IFREG | S_IRUGO,
@@ -836,11 +802,10 @@ void dlm_create_debug_file(struct dlm_ls *ls)
ls,
&format4_fops);
- memset(name, 0, sizeof(name));
- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_waiters", ls->ls_name);
+ snprintf(name, sizeof(name), "%s_waiters", ls->ls_name);
ls->ls_debug_waiters_dentry = debugfs_create_file(name,
- S_IFREG | S_IRUGO,
+ 0644,
dlm_root,
ls,
&waiters_fops);
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index 45ebbe602bbf..b1ab0adbd9d0 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -47,18 +47,16 @@ int dlm_dir_nodeid(struct dlm_rsb *r)
return r->res_dir_nodeid;
}
-void dlm_recover_dir_nodeid(struct dlm_ls *ls)
+void dlm_recover_dir_nodeid(struct dlm_ls *ls, const struct list_head *root_list)
{
struct dlm_rsb *r;
- down_read(&ls->ls_root_sem);
- list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
+ list_for_each_entry(r, root_list, res_root_list) {
r->res_dir_nodeid = dlm_hash2nodeid(ls, r->res_hash);
}
- up_read(&ls->ls_root_sem);
}
-int dlm_recover_directory(struct dlm_ls *ls)
+int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq)
{
struct dlm_member *memb;
char *b, *last_name = NULL;
@@ -84,14 +82,13 @@ int dlm_recover_directory(struct dlm_ls *ls)
for (;;) {
int left;
- error = dlm_recovery_stopped(ls);
- if (error) {
+ if (dlm_recovery_stopped(ls)) {
error = -EINTR;
goto out_free;
}
error = dlm_rcom_names(ls, memb->nodeid,
- last_name, last_len);
+ last_name, last_len, seq);
if (error)
goto out_free;
@@ -102,7 +99,7 @@ int dlm_recover_directory(struct dlm_ls *ls)
*/
b = ls->ls_recover_buf->rc_buf;
- left = ls->ls_recover_buf->rc_header.h_length;
+ left = le16_to_cpu(ls->ls_recover_buf->rc_header.h_length);
left -= sizeof(struct dlm_rcom);
for (;;) {
@@ -197,70 +194,156 @@ int dlm_recover_directory(struct dlm_ls *ls)
return error;
}
-static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, char *name, int len)
+static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name,
+ int len)
{
struct dlm_rsb *r;
- uint32_t hash, bucket;
int rv;
- hash = jhash(name, len, 0);
- bucket = hash & (ls->ls_rsbtbl_size - 1);
-
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r);
- if (rv)
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss,
- name, len, &r);
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
-
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
if (!rv)
return r;
- down_read(&ls->ls_root_sem);
- list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
+ list_for_each_entry(r, &ls->ls_masters_list, res_masters_list) {
if (len == r->res_length && !memcmp(name, r->res_name, len)) {
- up_read(&ls->ls_root_sem);
log_debug(ls, "find_rsb_root revert to root_list %s",
r->res_name);
return r;
}
}
- up_read(&ls->ls_root_sem);
return NULL;
}
+struct dlm_dir_dump {
+ /* init values to match if whole
+ * dump fits to one seq. Sanity check only.
+ */
+ uint64_t seq_init;
+ uint64_t nodeid_init;
+ /* compare local pointer with last lookup,
+ * just a sanity check.
+ */
+ struct list_head *last;
+
+ unsigned int sent_res; /* for log info */
+ unsigned int sent_msg; /* for log info */
+
+ struct list_head list;
+};
+
+static void drop_dir_ctx(struct dlm_ls *ls, int nodeid)
+{
+ struct dlm_dir_dump *dd, *safe;
+
+ write_lock_bh(&ls->ls_dir_dump_lock);
+ list_for_each_entry_safe(dd, safe, &ls->ls_dir_dump_list, list) {
+ if (dd->nodeid_init == nodeid) {
+ log_error(ls, "drop dump seq %llu",
+ (unsigned long long)dd->seq_init);
+ list_del(&dd->list);
+ kfree(dd);
+ }
+ }
+ write_unlock_bh(&ls->ls_dir_dump_lock);
+}
+
+static struct dlm_dir_dump *lookup_dir_dump(struct dlm_ls *ls, int nodeid)
+{
+ struct dlm_dir_dump *iter, *dd = NULL;
+
+ read_lock_bh(&ls->ls_dir_dump_lock);
+ list_for_each_entry(iter, &ls->ls_dir_dump_list, list) {
+ if (iter->nodeid_init == nodeid) {
+ dd = iter;
+ break;
+ }
+ }
+ read_unlock_bh(&ls->ls_dir_dump_lock);
+
+ return dd;
+}
+
+static struct dlm_dir_dump *init_dir_dump(struct dlm_ls *ls, int nodeid)
+{
+ struct dlm_dir_dump *dd;
+
+ dd = lookup_dir_dump(ls, nodeid);
+ if (dd) {
+ log_error(ls, "found ongoing dir dump for node %d, will drop it",
+ nodeid);
+ drop_dir_ctx(ls, nodeid);
+ }
+
+ dd = kzalloc(sizeof(*dd), GFP_ATOMIC);
+ if (!dd)
+ return NULL;
+
+ dd->seq_init = ls->ls_recover_seq;
+ dd->nodeid_init = nodeid;
+
+ write_lock_bh(&ls->ls_dir_dump_lock);
+ list_add(&dd->list, &ls->ls_dir_dump_list);
+ write_unlock_bh(&ls->ls_dir_dump_lock);
+
+ return dd;
+}
+
/* Find the rsb where we left off (or start again), then send rsb names
for rsb's we're master of and whose directory node matches the requesting
node. inbuf is the rsb name last sent, inlen is the name's length */
-void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
+void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen,
char *outbuf, int outlen, int nodeid)
{
struct list_head *list;
struct dlm_rsb *r;
int offset = 0, dir_nodeid;
+ struct dlm_dir_dump *dd;
__be16 be_namelen;
- down_read(&ls->ls_root_sem);
+ read_lock_bh(&ls->ls_masters_lock);
if (inlen > 1) {
+ dd = lookup_dir_dump(ls, nodeid);
+ if (!dd) {
+ log_error(ls, "failed to lookup dir dump context nodeid: %d",
+ nodeid);
+ goto out;
+ }
+
+ /* next chunk in dump */
r = find_rsb_root(ls, inbuf, inlen);
if (!r) {
- inbuf[inlen - 1] = '\0';
- log_error(ls, "copy_master_names from %d start %d %s",
- nodeid, inlen, inbuf);
+ log_error(ls, "copy_master_names from %d start %d %.*s",
+ nodeid, inlen, inlen, inbuf);
+ goto out;
+ }
+ list = r->res_masters_list.next;
+
+ /* sanity checks */
+ if (dd->last != &r->res_masters_list ||
+ dd->seq_init != ls->ls_recover_seq) {
+ log_error(ls, "failed dir dump sanity check seq_init: %llu seq: %llu",
+ (unsigned long long)dd->seq_init,
+ (unsigned long long)ls->ls_recover_seq);
goto out;
}
- list = r->res_root_list.next;
} else {
- list = ls->ls_root_list.next;
- }
+ dd = init_dir_dump(ls, nodeid);
+ if (!dd) {
+ log_error(ls, "failed to allocate dir dump context");
+ goto out;
+ }
- for (offset = 0; list != &ls->ls_root_list; list = list->next) {
- r = list_entry(list, struct dlm_rsb, res_root_list);
- if (r->res_nodeid)
- continue;
+ /* start dump */
+ list = ls->ls_masters_list.next;
+ dd->last = list;
+ }
+ for (offset = 0; list != &ls->ls_masters_list; list = list->next) {
+ r = list_entry(list, struct dlm_rsb, res_masters_list);
dir_nodeid = dlm_dir_nodeid(r);
if (dir_nodeid != nodeid)
continue;
@@ -278,7 +361,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
be_namelen = cpu_to_be16(0);
memcpy(outbuf + offset, &be_namelen, sizeof(__be16));
offset += sizeof(__be16);
- ls->ls_recover_dir_sent_msg++;
+ dd->sent_msg++;
goto out;
}
@@ -287,7 +370,8 @@ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
offset += sizeof(__be16);
memcpy(outbuf + offset, r->res_name, r->res_length);
offset += r->res_length;
- ls->ls_recover_dir_sent_res++;
+ dd->sent_res++;
+ dd->last = list;
}
/*
@@ -295,14 +379,22 @@ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
* terminating record.
*/
- if ((list == &ls->ls_root_list) &&
+ if ((list == &ls->ls_masters_list) &&
(offset + sizeof(uint16_t) <= outlen)) {
+ /* end dump */
be_namelen = cpu_to_be16(0xFFFF);
memcpy(outbuf + offset, &be_namelen, sizeof(__be16));
offset += sizeof(__be16);
- ls->ls_recover_dir_sent_msg++;
+ dd->sent_msg++;
+ log_rinfo(ls, "dlm_recover_directory nodeid %d sent %u res out %u messages",
+ nodeid, dd->sent_res, dd->sent_msg);
+
+ write_lock_bh(&ls->ls_dir_dump_lock);
+ list_del_init(&dd->list);
+ write_unlock_bh(&ls->ls_dir_dump_lock);
+ kfree(dd);
}
out:
- up_read(&ls->ls_root_sem);
+ read_unlock_bh(&ls->ls_masters_lock);
}
diff --git a/fs/dlm/dir.h b/fs/dlm/dir.h
index 03844d086be2..5b2a7ee3762d 100644
--- a/fs/dlm/dir.h
+++ b/fs/dlm/dir.h
@@ -14,10 +14,11 @@
int dlm_dir_nodeid(struct dlm_rsb *rsb);
int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash);
-void dlm_recover_dir_nodeid(struct dlm_ls *ls);
-int dlm_recover_directory(struct dlm_ls *ls);
-void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
- char *outbuf, int outlen, int nodeid);
+void dlm_recover_dir_nodeid(struct dlm_ls *ls,
+ const struct list_head *root_list);
+int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq);
+void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen,
+ char *outbuf, int outlen, int nodeid);
#endif /* __DIR_DOT_H__ */
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 5f57538b5d45..d534a4bc162b 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -16,6 +16,7 @@
* This is the main header file to be included in each DLM source file.
*/
+#include <uapi/linux/dlm_device.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -33,20 +34,15 @@
#include <linux/kernel.h>
#include <linux/jhash.h>
#include <linux/miscdevice.h>
+#include <linux/rhashtable.h>
#include <linux/mutex.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/ratelimit.h>
#include <linux/uaccess.h>
#include <linux/dlm.h>
#include "config.h"
-/* Size of the temp buffer midcomms allocates on the stack.
- We try to make this large enough so most messages fit.
- FIXME: should sctp make this unnecessary? */
-
-#define DLM_INBUF_LEN 148
-
struct dlm_ls;
struct dlm_lkb;
struct dlm_rsb;
@@ -104,17 +100,6 @@ do { \
} \
}
-
-#define DLM_RTF_SHRINK 0x00000001
-
-struct dlm_rsbtable {
- struct rb_root keep;
- struct rb_root toss;
- spinlock_t lock;
- uint32_t flags;
-};
-
-
/*
* Lockspace member (per node in a ls)
*/
@@ -151,7 +136,6 @@ struct dlm_args {
void (*bastfn) (void *astparam, int mode);
int mode;
struct dlm_lksb *lksb;
- unsigned long timeout;
};
@@ -201,33 +185,69 @@ struct dlm_args {
#define DLM_LKSTS_GRANTED 2
#define DLM_LKSTS_CONVERT 3
-/* lkb_flags */
+/* lkb_iflags */
+
+#define DLM_IFL_MSTCPY_BIT 16
+#define __DLM_IFL_MIN_BIT DLM_IFL_MSTCPY_BIT
+#define DLM_IFL_RESEND_BIT 17
+#define DLM_IFL_DEAD_BIT 18
+#define DLM_IFL_OVERLAP_UNLOCK_BIT 19
+#define DLM_IFL_OVERLAP_CANCEL_BIT 20
+#define DLM_IFL_ENDOFLIFE_BIT 21
+#define DLM_IFL_DEADLOCK_CANCEL_BIT 24
+#define __DLM_IFL_MAX_BIT DLM_IFL_DEADLOCK_CANCEL_BIT
-#define DLM_IFL_MSTCPY 0x00010000
-#define DLM_IFL_RESEND 0x00020000
-#define DLM_IFL_DEAD 0x00040000
-#define DLM_IFL_OVERLAP_UNLOCK 0x00080000
-#define DLM_IFL_OVERLAP_CANCEL 0x00100000
-#define DLM_IFL_ENDOFLIFE 0x00200000
-#define DLM_IFL_WATCH_TIMEWARN 0x00400000
-#define DLM_IFL_TIMEOUT_CANCEL 0x00800000
-#define DLM_IFL_DEADLOCK_CANCEL 0x01000000
-#define DLM_IFL_STUB_MS 0x02000000 /* magic number for m_flags */
-#define DLM_IFL_USER 0x00000001
-#define DLM_IFL_ORPHAN 0x00000002
+/* lkb_dflags */
-#define DLM_CALLBACKS_SIZE 6
+#define DLM_DFL_USER_BIT 0
+#define __DLM_DFL_MIN_BIT DLM_DFL_USER_BIT
+#define DLM_DFL_ORPHAN_BIT 1
+#define __DLM_DFL_MAX_BIT DLM_DFL_ORPHAN_BIT
#define DLM_CB_CAST 0x00000001
#define DLM_CB_BAST 0x00000002
-#define DLM_CB_SKIP 0x00000004
+
+/* much of this is just saving user space pointers associated with the
+ * lock that we pass back to the user lib with an ast
+ */
+
+struct dlm_user_args {
+ struct dlm_user_proc *proc; /* each process that opens the lockspace
+ * device has private data
+ * (dlm_user_proc) on the struct file,
+ * the process's locks point back to it
+ */
+ struct dlm_lksb lksb;
+ struct dlm_lksb __user *user_lksb;
+ void __user *castparam;
+ void __user *castaddr;
+ void __user *bastparam;
+ void __user *bastaddr;
+ uint64_t xid;
+};
struct dlm_callback {
- uint64_t seq;
uint32_t flags; /* DLM_CBF_ */
int sb_status; /* copy to lksb status */
uint8_t sb_flags; /* copy to lksb flags */
int8_t mode; /* rq mode of bast, gr mode of cast */
+ bool copy_lvb;
+ struct dlm_lksb *lkb_lksb;
+ unsigned char lvbptr[DLM_USER_LVB_LEN];
+
+ union {
+ void *astparam; /* caller's ast arg */
+ struct dlm_user_args ua;
+ };
+ struct work_struct work;
+ void (*bastfn)(void *astparam, int mode);
+ void (*astfn)(void *astparam);
+ char res_name[DLM_RESNAME_MAXLEN];
+ size_t res_length;
+ uint32_t ls_id;
+ uint32_t lkb_id;
+
+ struct list_head list;
};
struct dlm_lkb {
@@ -238,8 +258,9 @@ struct dlm_lkb {
uint32_t lkb_id; /* our lock ID */
uint32_t lkb_remid; /* lock ID on remote partner */
uint32_t lkb_exflags; /* external flags from caller */
- uint32_t lkb_sbflags; /* lksb flags */
- uint32_t lkb_flags; /* internal flags */
+ unsigned long lkb_sbflags; /* lksb flags */
+ unsigned long lkb_dflags; /* distributed flags */
+ unsigned long lkb_iflags; /* internal flags */
uint32_t lkb_lvbseq; /* lvb sequence number */
int8_t lkb_status; /* granted, waiting, convert */
@@ -255,17 +276,12 @@ struct dlm_lkb {
struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */
struct list_head lkb_wait_reply; /* waiting for remote reply */
struct list_head lkb_ownqueue; /* list of locks for a process */
- struct list_head lkb_time_list;
ktime_t lkb_timestamp;
- ktime_t lkb_wait_time;
- unsigned long lkb_timeout_cs;
-
- struct mutex lkb_cb_mutex;
- struct work_struct lkb_cb_work;
- struct list_head lkb_cb_list; /* for ls_cb_delay or proc->asts */
- struct dlm_callback lkb_callbacks[DLM_CALLBACKS_SIZE];
- struct dlm_callback lkb_last_cast;
- struct dlm_callback lkb_last_bast;
+
+ int8_t lkb_last_cast_cb_mode;
+ int8_t lkb_last_bast_cb_mode;
+ int8_t lkb_last_cb_mode;
+ uint8_t lkb_last_cb_flags;
ktime_t lkb_last_cast_time; /* for debugging */
ktime_t lkb_last_bast_time; /* for debugging */
@@ -279,6 +295,7 @@ struct dlm_lkb {
void *lkb_astparam; /* caller's ast arg */
struct dlm_user_args *lkb_ua;
};
+ struct rcu_head rcu;
};
/*
@@ -294,30 +311,30 @@ struct dlm_lkb {
struct dlm_rsb {
struct dlm_ls *res_ls; /* the lockspace */
struct kref res_ref;
- struct mutex res_mutex;
+ spinlock_t res_lock;
unsigned long res_flags;
int res_length; /* length of rsb name */
int res_nodeid;
int res_master_nodeid;
int res_dir_nodeid;
- int res_id; /* for ls_recover_idr */
+ unsigned long res_id; /* for ls_recover_xa */
uint32_t res_lvbseq;
uint32_t res_hash;
- uint32_t res_bucket; /* rsbtbl */
unsigned long res_toss_time;
uint32_t res_first_lkid;
struct list_head res_lookup; /* lkbs waiting on first */
- union {
- struct list_head res_hashchain;
- struct rb_node res_hashnode; /* rsbtbl */
- };
+ struct rhash_head res_node; /* rsbtbl */
struct list_head res_grantqueue;
struct list_head res_convertqueue;
struct list_head res_waitqueue;
+ struct list_head res_slow_list; /* ls_slow_* */
+ struct list_head res_scan_list;
struct list_head res_root_list; /* used for recovery */
+ struct list_head res_masters_list; /* used for recovery */
struct list_head res_recover_list; /* used for recovery */
int res_recover_locks_count;
+ struct rcu_head rcu;
char *res_lvbptr;
char res_name[DLM_RESNAME_MAXLEN+1];
@@ -350,6 +367,8 @@ enum rsb_flags {
RSB_RECOVER_CONVERT,
RSB_RECOVER_GRANT,
RSB_RECOVER_LVB_INVAL,
+ RSB_INACTIVE,
+ RSB_HASHED, /* set while rsb is on ls_rsbtbl */
};
static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
@@ -385,15 +404,15 @@ static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
#define DLM_FIN 5
struct dlm_header {
- uint32_t h_version;
+ __le32 h_version;
union {
/* for DLM_MSG and DLM_RCOM */
- uint32_t h_lockspace;
+ __le32 h_lockspace;
/* for DLM_ACK and DLM_OPTS */
- uint32_t h_seq;
+ __le32 h_seq;
} u;
- uint32_t h_nodeid; /* nodeid of sender */
- uint16_t h_length;
+ __le32 h_nodeid; /* nodeid of sender */
+ __le16 h_length;
uint8_t h_cmd; /* DLM_MSG, DLM_RCOM */
uint8_t h_pad;
};
@@ -415,24 +434,24 @@ struct dlm_header {
struct dlm_message {
struct dlm_header m_header;
- uint32_t m_type; /* DLM_MSG_ */
- uint32_t m_nodeid;
- uint32_t m_pid;
- uint32_t m_lkid; /* lkid on sender */
- uint32_t m_remid; /* lkid on receiver */
- uint32_t m_parent_lkid;
- uint32_t m_parent_remid;
- uint32_t m_exflags;
- uint32_t m_sbflags;
- uint32_t m_flags;
- uint32_t m_lvbseq;
- uint32_t m_hash;
- int m_status;
- int m_grmode;
- int m_rqmode;
- int m_bastmode;
- int m_asts;
- int m_result; /* 0 or -EXXX */
+ __le32 m_type; /* DLM_MSG_ */
+ __le32 m_nodeid;
+ __le32 m_pid;
+ __le32 m_lkid; /* lkid on sender */
+ __le32 m_remid; /* lkid on receiver */
+ __le32 m_parent_lkid;
+ __le32 m_parent_remid;
+ __le32 m_exflags;
+ __le32 m_sbflags;
+ __le32 m_flags;
+ __le32 m_lvbseq;
+ __le32 m_hash;
+ __le32 m_status;
+ __le32 m_grmode;
+ __le32 m_rqmode;
+ __le32 m_bastmode;
+ __le32 m_asts;
+ __le32 m_result; /* 0 or -EXXX */
char m_extra[]; /* name or lvb */
};
@@ -457,18 +476,18 @@ struct dlm_message {
struct dlm_rcom {
struct dlm_header rc_header;
- uint32_t rc_type; /* DLM_RCOM_ */
- int rc_result; /* multi-purpose */
- uint64_t rc_id; /* match reply with request */
- uint64_t rc_seq; /* sender's ls_recover_seq */
- uint64_t rc_seq_reply; /* remote ls_recover_seq */
+ __le32 rc_type; /* DLM_RCOM_ */
+ __le32 rc_result; /* multi-purpose */
+ __le64 rc_id; /* match reply with request */
+ __le64 rc_seq; /* sender's ls_recover_seq */
+ __le64 rc_seq_reply; /* remote ls_recover_seq */
char rc_buf[];
};
struct dlm_opt_header {
- uint16_t t_type;
- uint16_t t_length;
- uint32_t t_pad;
+ __le16 t_type;
+ __le16 t_length;
+ __le32 t_pad;
/* need to be 8 byte aligned */
char t_value[];
};
@@ -478,8 +497,8 @@ struct dlm_opts {
struct dlm_header o_header;
uint8_t o_nextcmd;
uint8_t o_pad;
- uint16_t o_optlen;
- uint32_t o_pad2;
+ __le16 o_optlen;
+ __le32 o_pad2;
char o_opts[];
};
@@ -540,52 +559,39 @@ struct rcom_lock {
char rl_lvb[];
};
-/*
- * The max number of resources per rsbtbl bucket that shrink will attempt
- * to remove in each iteration.
- */
-
-#define DLM_REMOVE_NAMES_MAX 8
-
struct dlm_ls {
struct list_head ls_list; /* list of lockspaces */
- dlm_lockspace_t *ls_local_handle;
uint32_t ls_global_id; /* global unique lockspace ID */
uint32_t ls_generation;
uint32_t ls_exflags;
int ls_lvblen;
- int ls_count; /* refcount of processes in
+ atomic_t ls_count; /* refcount of processes in
the dlm using this ls */
+ wait_queue_head_t ls_count_wait;
int ls_create_count; /* create/release refcount */
unsigned long ls_flags; /* LSFL_ */
- unsigned long ls_scan_time;
struct kobject ls_kobj;
- struct idr ls_lkbidr;
- spinlock_t ls_lkbidr_spin;
+ struct xarray ls_lkbxa;
+ rwlock_t ls_lkbxa_lock;
- struct dlm_rsbtable *ls_rsbtbl;
- uint32_t ls_rsbtbl_size;
+ /* an rsb is on rsbtl for primary locking functions,
+ and on a slow list for recovery/dump iteration */
+ struct rhashtable ls_rsbtbl;
+ rwlock_t ls_rsbtbl_lock; /* for ls_rsbtbl and ls_slow */
+ struct list_head ls_slow_inactive; /* to iterate rsbtbl */
+ struct list_head ls_slow_active; /* to iterate rsbtbl */
- struct mutex ls_waiters_mutex;
+ struct timer_list ls_scan_timer; /* based on first scan_list rsb toss_time */
+ struct list_head ls_scan_list; /* rsbs ordered by res_toss_time */
+ spinlock_t ls_scan_lock;
+
+ spinlock_t ls_waiters_lock;
struct list_head ls_waiters; /* lkbs needing a reply */
- struct mutex ls_orphans_mutex;
+ spinlock_t ls_orphans_lock;
struct list_head ls_orphans;
- struct mutex ls_timeout_mutex;
- struct list_head ls_timeout;
-
- spinlock_t ls_new_rsb_spin;
- int ls_new_rsb_count;
- struct list_head ls_new_rsb; /* new rsb structs */
-
- spinlock_t ls_remove_spin;
- char ls_remove_name[DLM_RESNAME_MAXLEN+1];
- char *ls_remove_names[DLM_REMOVE_NAMES_MAX];
- int ls_remove_len;
- int ls_remove_lens[DLM_REMOVE_NAMES_MAX];
-
struct list_head ls_nodes; /* current nodes in ls */
struct list_head ls_nodes_gone; /* dead node list, recovery */
int ls_num_nodes; /* number of nodes in ls */
@@ -598,20 +604,21 @@ struct dlm_ls {
int ls_slots_size;
struct dlm_slot *ls_slots;
- struct dlm_rsb ls_stub_rsb; /* for returning errors */
- struct dlm_lkb ls_stub_lkb; /* for returning errors */
- struct dlm_message ls_stub_ms; /* for faking a reply */
+ struct dlm_rsb ls_local_rsb; /* for returning errors */
+ struct dlm_lkb ls_local_lkb; /* for returning errors */
+ struct dlm_message ls_local_ms; /* for faking a reply */
struct dentry *ls_debug_rsb_dentry; /* debugfs */
struct dentry *ls_debug_waiters_dentry; /* debugfs */
struct dentry *ls_debug_locks_dentry; /* debugfs */
struct dentry *ls_debug_all_dentry; /* debugfs */
struct dentry *ls_debug_toss_dentry; /* debugfs */
+ struct dentry *ls_debug_queued_asts_dentry; /* debugfs */
wait_queue_head_t ls_uevent_wait; /* user part of join/leave */
int ls_uevent_result;
- struct completion ls_members_done;
- int ls_members_result;
+ struct completion ls_recovery_done;
+ int ls_recovery_result;
struct miscdevice ls_device;
@@ -619,9 +626,8 @@ struct dlm_ls {
/* recovery related */
- struct mutex ls_cb_mutex;
+ spinlock_t ls_cb_lock;
struct list_head ls_cb_delay; /* save for queue_work later */
- struct timer_list ls_timer;
struct task_struct *ls_recoverd_task;
struct mutex ls_recoverd_active;
spinlock_t ls_recover_lock;
@@ -630,33 +636,35 @@ struct dlm_ls {
uint64_t ls_recover_seq;
struct dlm_recover *ls_recover_args;
struct rw_semaphore ls_in_recovery; /* block local requests */
- struct rw_semaphore ls_recv_active; /* block dlm_recv */
+ rwlock_t ls_recv_active; /* block dlm_recv */
struct list_head ls_requestqueue;/* queue remote requests */
- struct mutex ls_requestqueue_mutex;
+ rwlock_t ls_requestqueue_lock;
struct dlm_rcom *ls_recover_buf;
int ls_recover_nodeid; /* for debugging */
- unsigned int ls_recover_dir_sent_res; /* for log info */
- unsigned int ls_recover_dir_sent_msg; /* for log info */
unsigned int ls_recover_locks_in; /* for log info */
uint64_t ls_rcom_seq;
spinlock_t ls_rcom_spin;
struct list_head ls_recover_list;
spinlock_t ls_recover_list_lock;
int ls_recover_list_count;
- struct idr ls_recover_idr;
- spinlock_t ls_recover_idr_lock;
+ struct xarray ls_recover_xa;
+ spinlock_t ls_recover_xa_lock;
wait_queue_head_t ls_wait_general;
wait_queue_head_t ls_recover_lock_wait;
- struct mutex ls_clear_proc_locks;
+ spinlock_t ls_clear_proc_locks;
- struct list_head ls_root_list; /* root resources */
- struct rw_semaphore ls_root_sem; /* protect root_list */
+ struct list_head ls_masters_list; /* root resources */
+ rwlock_t ls_masters_lock; /* protect root_list */
+ struct list_head ls_dir_dump_list; /* root resources */
+ rwlock_t ls_dir_dump_lock; /* protect root_list */
const struct dlm_lockspace_ops *ls_ops;
void *ls_ops_arg;
+ struct work_struct ls_free_work;
+
int ls_namelen;
- char ls_name[1];
+ char ls_name[DLM_LOCKSPACE_LEN + 1];
};
/*
@@ -690,26 +698,11 @@ struct dlm_ls {
#define LSFL_RCOM_READY 5
#define LSFL_RCOM_WAIT 6
#define LSFL_UEVENT_WAIT 7
-#define LSFL_TIMEWARN 8
#define LSFL_CB_DELAY 9
#define LSFL_NODIR 10
-
-/* much of this is just saving user space pointers associated with the
- lock that we pass back to the user lib with an ast */
-
-struct dlm_user_args {
- struct dlm_user_proc *proc; /* each process that opens the lockspace
- device has private data
- (dlm_user_proc) on the struct file,
- the process's locks point back to it*/
- struct dlm_lksb lksb;
- struct dlm_lksb __user *user_lksb;
- void __user *castparam;
- void __user *castaddr;
- void __user *bastparam;
- void __user *bastaddr;
- uint64_t xid;
-};
+#define LSFL_RECV_MSG_BLOCKED 11
+#define LSFL_FS 12
+#define LSFL_SOFTIRQ 13
#define DLM_PROC_FLAGS_CLOSING 1
#define DLM_PROC_FLAGS_COMPAT 2
@@ -743,9 +736,78 @@ static inline int dlm_no_directory(struct dlm_ls *ls)
return test_bit(LSFL_NODIR, &ls->ls_flags);
}
-int dlm_netlink_init(void);
-void dlm_netlink_exit(void);
-void dlm_timeout_warn(struct dlm_lkb *lkb);
+/* takes a snapshot from dlm atomic flags */
+static inline uint32_t dlm_flags_val(const unsigned long *addr,
+ uint32_t min, uint32_t max)
+{
+ uint32_t bit = min, val = 0;
+
+ for_each_set_bit_from(bit, addr, max + 1) {
+ val |= BIT(bit);
+ }
+
+ return val;
+}
+
+static inline uint32_t dlm_iflags_val(const struct dlm_lkb *lkb)
+{
+ return dlm_flags_val(&lkb->lkb_iflags, __DLM_IFL_MIN_BIT,
+ __DLM_IFL_MAX_BIT);
+}
+
+static inline uint32_t dlm_dflags_val(const struct dlm_lkb *lkb)
+{
+ return dlm_flags_val(&lkb->lkb_dflags, __DLM_DFL_MIN_BIT,
+ __DLM_DFL_MAX_BIT);
+}
+
+/* coming from UAPI header
+ *
+ * TODO:
+ * Move this to UAPI header and let other values point to them and use BIT()
+ */
+#define DLM_SBF_DEMOTED_BIT 0
+#define __DLM_SBF_MIN_BIT DLM_SBF_DEMOTED_BIT
+#define DLM_SBF_VALNOTVALID_BIT 1
+#define DLM_SBF_ALTMODE_BIT 2
+#define __DLM_SBF_MAX_BIT DLM_SBF_ALTMODE_BIT
+
+static inline uint32_t dlm_sbflags_val(const struct dlm_lkb *lkb)
+{
+ /* be sure the next person updates this */
+ BUILD_BUG_ON(BIT(__DLM_SBF_MAX_BIT) != DLM_SBF_ALTMODE);
+
+ return dlm_flags_val(&lkb->lkb_sbflags, __DLM_SBF_MIN_BIT,
+ __DLM_SBF_MAX_BIT);
+}
+
+static inline void dlm_set_flags_val(unsigned long *addr, uint32_t val,
+ uint32_t min, uint32_t max)
+{
+ uint32_t bit;
+
+ for (bit = min; bit < (max + 1); bit++) {
+ if (val & BIT(bit))
+ set_bit(bit, addr);
+ else
+ clear_bit(bit, addr);
+ }
+}
+
+static inline void dlm_set_dflags_val(struct dlm_lkb *lkb, uint32_t val)
+{
+ dlm_set_flags_val(&lkb->lkb_dflags, val, __DLM_DFL_MIN_BIT,
+ __DLM_DFL_MAX_BIT);
+}
+
+static inline void dlm_set_sbflags_val(struct dlm_lkb *lkb, uint32_t val)
+{
+ dlm_set_flags_val(&lkb->lkb_sbflags, val, __DLM_SBF_MIN_BIT,
+ __DLM_SBF_MAX_BIT);
+}
+
+extern struct workqueue_struct *dlm_wq;
+
int dlm_plock_init(void);
void dlm_plock_exit(void);
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index c502c065d007..be938fdf17d9 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -53,6 +53,8 @@
R: do_xxxx()
L: receive_xxxx_reply() <- R: send_xxxx_reply()
*/
+#include <trace/events/dlm.h>
+
#include <linux/types.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
@@ -84,11 +86,10 @@ static int send_remove(struct dlm_rsb *r);
static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
- struct dlm_message *ms);
-static int receive_extralen(struct dlm_message *ms);
+ const struct dlm_message *ms, bool local);
+static int receive_extralen(const struct dlm_message *ms);
static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
-static void del_timeout(struct dlm_lkb *lkb);
-static void toss_rsb(struct kref *kref);
+static void deactivate_rsb(struct kref *kref);
/*
* Lock compatibilty matrix - thanks Steve
@@ -162,7 +163,7 @@ void dlm_print_lkb(struct dlm_lkb *lkb)
printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
"sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
- lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
+ dlm_iflags_val(lkb), lkb->lkb_status, lkb->lkb_rqmode,
lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
(unsigned long long)lkb->lkb_recover_seq);
}
@@ -200,7 +201,7 @@ void dlm_dump_rsb(struct dlm_rsb *r)
/* Threads cannot use the lockspace while it's being recovered */
-static inline void dlm_lock_recovery(struct dlm_ls *ls)
+void dlm_lock_recovery(struct dlm_ls *ls)
{
down_read(&ls->ls_in_recovery);
}
@@ -227,12 +228,12 @@ static inline int force_blocking_asts(struct dlm_lkb *lkb)
static inline int is_demoted(struct dlm_lkb *lkb)
{
- return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
+ return test_bit(DLM_SBF_DEMOTED_BIT, &lkb->lkb_sbflags);
}
static inline int is_altmode(struct dlm_lkb *lkb)
{
- return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
+ return test_bit(DLM_SBF_ALTMODE_BIT, &lkb->lkb_sbflags);
}
static inline int is_granted(struct dlm_lkb *lkb)
@@ -248,12 +249,13 @@ static inline int is_remote(struct dlm_rsb *r)
static inline int is_process_copy(struct dlm_lkb *lkb)
{
- return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
+ return lkb->lkb_nodeid &&
+ !test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags);
}
static inline int is_master_copy(struct dlm_lkb *lkb)
{
- return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
+ return test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags);
}
static inline int middle_conversion(struct dlm_lkb *lkb)
@@ -271,18 +273,18 @@ static inline int down_conversion(struct dlm_lkb *lkb)
static inline int is_overlap_unlock(struct dlm_lkb *lkb)
{
- return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
+ return test_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags);
}
static inline int is_overlap_cancel(struct dlm_lkb *lkb)
{
- return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
+ return test_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
}
static inline int is_overlap(struct dlm_lkb *lkb)
{
- return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
- DLM_IFL_OVERLAP_CANCEL));
+ return test_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags) ||
+ test_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
}
static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
@@ -290,23 +292,13 @@ static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
if (is_master_copy(lkb))
return;
- del_timeout(lkb);
-
DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
- /* if the operation was a cancel, then return -DLM_ECANCEL, if a
- timeout caused the cancel then return -ETIMEDOUT */
- if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
- lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
- rv = -ETIMEDOUT;
- }
-
- if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
- lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
+ if (rv == -DLM_ECANCEL &&
+ test_and_clear_bit(DLM_IFL_DEADLOCK_CANCEL_BIT, &lkb->lkb_iflags))
rv = -EDEADLK;
- }
- dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
+ dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, dlm_sbflags_val(lkb));
}
static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
@@ -328,11 +320,18 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
* Basic operations on rsb's and lkb's
*/
+static inline unsigned long rsb_toss_jiffies(void)
+{
+ return jiffies + (READ_ONCE(dlm_config.ci_toss_secs) * HZ);
+}
+
/* This is only called to add a reference when the code already holds
a valid reference to the rsb, so there's no need for locking. */
static inline void hold_rsb(struct dlm_rsb *r)
{
+ /* inactive rsbs are not ref counted */
+ WARN_ON(rsb_flag(r, RSB_INACTIVE));
kref_get(&r->res_ref);
}
@@ -341,17 +340,45 @@ void dlm_hold_rsb(struct dlm_rsb *r)
hold_rsb(r);
}
-/* When all references to the rsb are gone it's transferred to
- the tossed list for later disposal. */
+/* TODO move this to lib/refcount.c */
+static __must_check bool
+dlm_refcount_dec_and_write_lock_bh(refcount_t *r, rwlock_t *lock)
+__cond_acquires(lock)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ write_lock_bh(lock);
+ if (!refcount_dec_and_test(r)) {
+ write_unlock_bh(lock);
+ return false;
+ }
+
+ return true;
+}
+
+/* TODO move this to include/linux/kref.h */
+static inline int dlm_kref_put_write_lock_bh(struct kref *kref,
+ void (*release)(struct kref *kref),
+ rwlock_t *lock)
+{
+ if (dlm_refcount_dec_and_write_lock_bh(&kref->refcount, lock)) {
+ release(kref);
+ return 1;
+ }
+
+ return 0;
+}
static void put_rsb(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
- uint32_t bucket = r->res_bucket;
+ int rv;
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- kref_put(&r->res_ref, toss_rsb);
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+ rv = dlm_kref_put_write_lock_bh(&r->res_ref, deactivate_rsb,
+ &ls->ls_rsbtbl_lock);
+ if (rv)
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
}
void dlm_put_rsb(struct dlm_rsb *r)
@@ -359,141 +386,265 @@ void dlm_put_rsb(struct dlm_rsb *r)
put_rsb(r);
}
-static int pre_rsb_struct(struct dlm_ls *ls)
+/* connected with timer_delete_sync() in dlm_ls_stop() to stop
+ * new timers when recovery is triggered and don't run them
+ * again until a resume_scan_timer() tries it again.
+ */
+static void enable_scan_timer(struct dlm_ls *ls, unsigned long jiffies)
{
- struct dlm_rsb *r1, *r2;
- int count = 0;
+ if (!dlm_locking_stopped(ls))
+ mod_timer(&ls->ls_scan_timer, jiffies);
+}
- spin_lock(&ls->ls_new_rsb_spin);
- if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
- spin_unlock(&ls->ls_new_rsb_spin);
- return 0;
- }
- spin_unlock(&ls->ls_new_rsb_spin);
+/* This function tries to resume the timer callback if a rsb
+ * is on the scan list and no timer is pending. It might that
+ * the first entry is on currently executed as timer callback
+ * but we don't care if a timer queued up again and does
+ * nothing. Should be a rare case.
+ */
+void resume_scan_timer(struct dlm_ls *ls)
+{
+ struct dlm_rsb *r;
- r1 = dlm_allocate_rsb(ls);
- r2 = dlm_allocate_rsb(ls);
+ spin_lock_bh(&ls->ls_scan_lock);
+ r = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb,
+ res_scan_list);
+ if (r && !timer_pending(&ls->ls_scan_timer))
+ enable_scan_timer(ls, r->res_toss_time);
+ spin_unlock_bh(&ls->ls_scan_lock);
+}
+
+/* ls_rsbtbl_lock must be held */
+
+static void del_scan(struct dlm_ls *ls, struct dlm_rsb *r)
+{
+ struct dlm_rsb *first;
- spin_lock(&ls->ls_new_rsb_spin);
- if (r1) {
- list_add(&r1->res_hashchain, &ls->ls_new_rsb);
- ls->ls_new_rsb_count++;
+ /* active rsbs should never be on the scan list */
+ WARN_ON(!rsb_flag(r, RSB_INACTIVE));
+
+ spin_lock_bh(&ls->ls_scan_lock);
+ r->res_toss_time = 0;
+
+ /* if the rsb is not queued do nothing */
+ if (list_empty(&r->res_scan_list))
+ goto out;
+
+ /* get the first element before delete */
+ first = list_first_entry(&ls->ls_scan_list, struct dlm_rsb,
+ res_scan_list);
+ list_del_init(&r->res_scan_list);
+ /* check if the first element was the rsb we deleted */
+ if (first == r) {
+ /* try to get the new first element, if the list
+ * is empty now try to delete the timer, if we are
+ * too late we don't care.
+ *
+ * if the list isn't empty and a new first element got
+ * in place, set the new timer expire time.
+ */
+ first = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb,
+ res_scan_list);
+ if (!first)
+ timer_delete(&ls->ls_scan_timer);
+ else
+ enable_scan_timer(ls, first->res_toss_time);
}
- if (r2) {
- list_add(&r2->res_hashchain, &ls->ls_new_rsb);
- ls->ls_new_rsb_count++;
+
+out:
+ spin_unlock_bh(&ls->ls_scan_lock);
+}
+
+static void add_scan(struct dlm_ls *ls, struct dlm_rsb *r)
+{
+ int our_nodeid = dlm_our_nodeid();
+ struct dlm_rsb *first;
+
+ /* A dir record for a remote master rsb should never be on the scan list. */
+ WARN_ON(!dlm_no_directory(ls) &&
+ (r->res_master_nodeid != our_nodeid) &&
+ (dlm_dir_nodeid(r) == our_nodeid));
+
+ /* An active rsb should never be on the scan list. */
+ WARN_ON(!rsb_flag(r, RSB_INACTIVE));
+
+ /* An rsb should not already be on the scan list. */
+ WARN_ON(!list_empty(&r->res_scan_list));
+
+ spin_lock_bh(&ls->ls_scan_lock);
+ /* set the new rsb absolute expire time in the rsb */
+ r->res_toss_time = rsb_toss_jiffies();
+ if (list_empty(&ls->ls_scan_list)) {
+ /* if the queue is empty add the element and it's
+ * our new expire time
+ */
+ list_add_tail(&r->res_scan_list, &ls->ls_scan_list);
+ enable_scan_timer(ls, r->res_toss_time);
+ } else {
+ /* try to get the maybe new first element and then add
+ * to this rsb with the oldest expire time to the end
+ * of the queue. If the list was empty before this
+ * rsb expire time is our next expiration if it wasn't
+ * the now new first elemet is our new expiration time
+ */
+ first = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb,
+ res_scan_list);
+ list_add_tail(&r->res_scan_list, &ls->ls_scan_list);
+ if (!first)
+ enable_scan_timer(ls, r->res_toss_time);
+ else
+ enable_scan_timer(ls, first->res_toss_time);
}
- count = ls->ls_new_rsb_count;
- spin_unlock(&ls->ls_new_rsb_spin);
+ spin_unlock_bh(&ls->ls_scan_lock);
+}
- if (!count)
- return -ENOMEM;
- return 0;
+/* if we hit contention we do in 250 ms a retry to trylock.
+ * if there is any other mod_timer in between we don't care
+ * about that it expires earlier again this is only for the
+ * unlikely case nothing happened in this time.
+ */
+#define DLM_TOSS_TIMER_RETRY (jiffies + msecs_to_jiffies(250))
+
+/* Called by lockspace scan_timer to free unused rsb's. */
+
+void dlm_rsb_scan(struct timer_list *timer)
+{
+ struct dlm_ls *ls = timer_container_of(ls, timer, ls_scan_timer);
+ int our_nodeid = dlm_our_nodeid();
+ struct dlm_rsb *r;
+ int rv;
+
+ while (1) {
+ /* interrupting point to leave iteration when
+ * recovery waits for timer_delete_sync(), recovery
+ * will take care to delete everything in scan list.
+ */
+ if (dlm_locking_stopped(ls))
+ break;
+
+ rv = spin_trylock(&ls->ls_scan_lock);
+ if (!rv) {
+ /* rearm again try timer */
+ enable_scan_timer(ls, DLM_TOSS_TIMER_RETRY);
+ break;
+ }
+
+ r = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb,
+ res_scan_list);
+ if (!r) {
+ /* the next add_scan will enable the timer again */
+ spin_unlock(&ls->ls_scan_lock);
+ break;
+ }
+
+ /*
+ * If the first rsb is not yet expired, then stop because the
+ * list is sorted with nearest expiration first.
+ */
+ if (time_before(jiffies, r->res_toss_time)) {
+ /* rearm with the next rsb to expire in the future */
+ enable_scan_timer(ls, r->res_toss_time);
+ spin_unlock(&ls->ls_scan_lock);
+ break;
+ }
+
+ /* in find_rsb_dir/nodir there is a reverse order of this
+ * lock, however this is only a trylock if we hit some
+ * possible contention we try it again.
+ */
+ rv = write_trylock(&ls->ls_rsbtbl_lock);
+ if (!rv) {
+ spin_unlock(&ls->ls_scan_lock);
+ /* rearm again try timer */
+ enable_scan_timer(ls, DLM_TOSS_TIMER_RETRY);
+ break;
+ }
+
+ list_del(&r->res_slow_list);
+ rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
+ dlm_rhash_rsb_params);
+ rsb_clear_flag(r, RSB_HASHED);
+
+ /* ls_rsbtbl_lock is not needed when calling send_remove() */
+ write_unlock(&ls->ls_rsbtbl_lock);
+
+ list_del_init(&r->res_scan_list);
+ spin_unlock(&ls->ls_scan_lock);
+
+ /* An rsb that is a dir record for a remote master rsb
+ * cannot be removed, and should not have a timer enabled.
+ */
+ WARN_ON(!dlm_no_directory(ls) &&
+ (r->res_master_nodeid != our_nodeid) &&
+ (dlm_dir_nodeid(r) == our_nodeid));
+
+ /* We're the master of this rsb but we're not
+ * the directory record, so we need to tell the
+ * dir node to remove the dir record
+ */
+ if (!dlm_no_directory(ls) &&
+ (r->res_master_nodeid == our_nodeid) &&
+ (dlm_dir_nodeid(r) != our_nodeid))
+ send_remove(r);
+
+ free_inactive_rsb(r);
+ }
}
/* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
unlock any spinlocks, go back and call pre_rsb_struct again.
Otherwise, take an rsb off the list and return it. */
-static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
+static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
struct dlm_rsb **r_ret)
{
struct dlm_rsb *r;
- int count;
-
- spin_lock(&ls->ls_new_rsb_spin);
- if (list_empty(&ls->ls_new_rsb)) {
- count = ls->ls_new_rsb_count;
- spin_unlock(&ls->ls_new_rsb_spin);
- log_debug(ls, "find_rsb retry %d %d %s",
- count, dlm_config.ci_new_rsb_count, name);
- return -EAGAIN;
- }
- r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
- list_del(&r->res_hashchain);
- /* Convert the empty list_head to a NULL rb_node for tree usage: */
- memset(&r->res_hashnode, 0, sizeof(struct rb_node));
- ls->ls_new_rsb_count--;
- spin_unlock(&ls->ls_new_rsb_spin);
+ r = dlm_allocate_rsb();
+ if (!r)
+ return -ENOMEM;
r->res_ls = ls;
r->res_length = len;
memcpy(r->res_name, name, len);
- mutex_init(&r->res_mutex);
+ spin_lock_init(&r->res_lock);
INIT_LIST_HEAD(&r->res_lookup);
INIT_LIST_HEAD(&r->res_grantqueue);
INIT_LIST_HEAD(&r->res_convertqueue);
INIT_LIST_HEAD(&r->res_waitqueue);
INIT_LIST_HEAD(&r->res_root_list);
+ INIT_LIST_HEAD(&r->res_scan_list);
INIT_LIST_HEAD(&r->res_recover_list);
+ INIT_LIST_HEAD(&r->res_masters_list);
*r_ret = r;
return 0;
}
-static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
+int dlm_search_rsb_tree(struct rhashtable *rhash, const void *name, int len,
+ struct dlm_rsb **r_ret)
{
- char maxname[DLM_RESNAME_MAXLEN];
+ char key[DLM_RESNAME_MAXLEN] = {};
- memset(maxname, 0, DLM_RESNAME_MAXLEN);
- memcpy(maxname, name, nlen);
- return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
-}
+ memcpy(key, name, len);
+ *r_ret = rhashtable_lookup_fast(rhash, &key, dlm_rhash_rsb_params);
+ if (*r_ret)
+ return 0;
-int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
- struct dlm_rsb **r_ret)
-{
- struct rb_node *node = tree->rb_node;
- struct dlm_rsb *r;
- int rc;
-
- while (node) {
- r = rb_entry(node, struct dlm_rsb, res_hashnode);
- rc = rsb_cmp(r, name, len);
- if (rc < 0)
- node = node->rb_left;
- else if (rc > 0)
- node = node->rb_right;
- else
- goto found;
- }
- *r_ret = NULL;
return -EBADR;
-
- found:
- *r_ret = r;
- return 0;
}
-static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
+static int rsb_insert(struct dlm_rsb *rsb, struct rhashtable *rhash)
{
- struct rb_node **newn = &tree->rb_node;
- struct rb_node *parent = NULL;
- int rc;
-
- while (*newn) {
- struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
- res_hashnode);
+ int rv;
- parent = *newn;
- rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
- if (rc < 0)
- newn = &parent->rb_left;
- else if (rc > 0)
- newn = &parent->rb_right;
- else {
- log_print("rsb_insert match");
- dlm_dump_rsb(rsb);
- dlm_dump_rsb(cur);
- return -EEXIST;
- }
- }
+ rv = rhashtable_insert_fast(rhash, &rsb->res_node,
+ dlm_rhash_rsb_params);
+ if (!rv)
+ rsb_set_flag(rsb, RSB_HASHED);
- rb_link_node(&rsb->res_hashnode, parent, newn);
- rb_insert_color(&rsb->res_hashnode, tree);
- return 0;
+ return rv;
}
/*
@@ -523,7 +674,7 @@ static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
* So, if the given rsb is on the toss list, it is moved to the keep list
* before being returned.
*
- * toss_rsb() happens when all local usage of the rsb is done, i.e. no
+ * deactivate_rsb() happens when all local usage of the rsb is done, i.e. no
* more refcounts exist, so the rsb is moved from the keep list to the
* toss list.
*
@@ -540,9 +691,8 @@ static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
* while that rsb has a potentially stale master.)
*/
-static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
- uint32_t hash, uint32_t b,
- int dir_nodeid, int from_nodeid,
+static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
+ uint32_t hash, int dir_nodeid, int from_nodeid,
unsigned int flags, struct dlm_rsb **r_ret)
{
struct dlm_rsb *r = NULL;
@@ -572,9 +722,9 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
*
* If someone sends us a request, we are the dir node, and we do
* not find the rsb anywhere, then recreate it. This happens if
- * someone sends us a request after we have removed/freed an rsb
- * from our toss list. (They sent a request instead of lookup
- * because they are using an rsb from their toss list.)
+ * someone sends us a request after we have removed/freed an rsb.
+ * (They sent a request instead of lookup because they are using
+ * an rsb taken from their scan list.)
*/
if (from_local || from_dir ||
@@ -583,52 +733,83 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
}
retry:
- if (create) {
- error = pre_rsb_struct(ls);
- if (error < 0)
- goto out;
- }
-
- spin_lock(&ls->ls_rsbtbl[b].lock);
-
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
if (error)
- goto do_toss;
+ goto do_new;
+
+ /* check if the rsb is active under read lock - likely path */
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ if (!rsb_flag(r, RSB_HASHED)) {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ error = -EBADR;
+ goto do_new;
+ }
/*
* rsb is active, so we can't check master_nodeid without lock_rsb.
*/
+ if (rsb_flag(r, RSB_INACTIVE)) {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto do_inactive;
+ }
+
kref_get(&r->res_ref);
- error = 0;
- goto out_unlock;
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto out;
- do_toss:
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
- if (error)
+ do_inactive:
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+
+ /*
+ * The expectation here is that the rsb will have HASHED and
+ * INACTIVE flags set, and that the rsb can be moved from
+ * inactive back to active again. However, between releasing
+ * the read lock and acquiring the write lock, this rsb could
+ * have been removed from rsbtbl, and had HASHED cleared, to
+ * be freed. To deal with this case, we would normally need
+ * to repeat dlm_search_rsb_tree while holding the write lock,
+ * but rcu allows us to simply check the HASHED flag, because
+ * the rcu read lock means the rsb will not be freed yet.
+ * If the HASHED flag is not set, then the rsb is being freed,
+ * so we add a new rsb struct. If the HASHED flag is set,
+ * and INACTIVE is not set, it means another thread has
+ * made the rsb active, as we're expecting to do here, and
+ * we just repeat the lookup (this will be very unlikely.)
+ */
+ if (rsb_flag(r, RSB_HASHED)) {
+ if (!rsb_flag(r, RSB_INACTIVE)) {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto retry;
+ }
+ } else {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ error = -EBADR;
goto do_new;
+ }
/*
* rsb found inactive (master_nodeid may be out of date unless
* we are the dir_nodeid or were the master) No other thread
- * is using this rsb because it's on the toss list, so we can
+ * is using this rsb because it's inactive, so we can
* look at or update res_master_nodeid without lock_rsb.
*/
if ((r->res_master_nodeid != our_nodeid) && from_other) {
/* our rsb was not master, and another node (not the dir node)
has sent us a request */
- log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
+ log_debug(ls, "find_rsb inactive from_other %d master %d dir %d %s",
from_nodeid, r->res_master_nodeid, dir_nodeid,
r->res_name);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
error = -ENOTBLK;
- goto out_unlock;
+ goto out;
}
if ((r->res_master_nodeid != our_nodeid) && from_dir) {
/* don't think this should ever happen */
- log_error(ls, "find_rsb toss from_dir %d master %d",
+ log_error(ls, "find_rsb inactive from_dir %d master %d",
from_nodeid, r->res_master_nodeid);
dlm_print_rsb(r);
/* fix it and go on */
@@ -645,9 +826,18 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
r->res_first_lkid = 0;
}
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
- error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
- goto out_unlock;
+ /* we always deactivate scan timer for the rsb, when
+ * we move it out of the inactive state as rsb state
+ * can be changed and scan timers are only for inactive
+ * rsbs.
+ */
+ del_scan(ls, r);
+ list_move(&r->res_slow_list, &ls->ls_slow_active);
+ rsb_clear_flag(r, RSB_INACTIVE);
+ kref_init(&r->res_ref); /* ref is now used in active state */
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+
+ goto out;
do_new:
@@ -656,18 +846,13 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
*/
if (error == -EBADR && !create)
- goto out_unlock;
+ goto out;
error = get_rsb_struct(ls, name, len, &r);
- if (error == -EAGAIN) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- goto retry;
- }
- if (error)
- goto out_unlock;
+ if (WARN_ON_ONCE(error))
+ goto out;
r->res_hash = hash;
- r->res_bucket = b;
r->res_dir_nodeid = dir_nodeid;
kref_init(&r->res_ref);
@@ -687,7 +872,7 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
dlm_free_rsb(r);
r = NULL;
error = -ENOTBLK;
- goto out_unlock;
+ goto out;
}
if (from_other) {
@@ -707,9 +892,20 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
}
out_add:
- error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
- out_unlock:
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+ error = rsb_insert(r, &ls->ls_rsbtbl);
+ if (error == -EEXIST) {
+ /* somebody else was faster and it seems the
+ * rsb exists now, we do a whole relookup
+ */
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ dlm_free_rsb(r);
+ goto retry;
+ } else if (!error) {
+ list_add(&r->res_slow_list, &ls->ls_slow_active);
+ }
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
out:
*r_ret = r;
return error;
@@ -719,9 +915,8 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
dlm_recover_locks) before we've made ourself master (in
dlm_recover_masters). */
-static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
- uint32_t hash, uint32_t b,
- int dir_nodeid, int from_nodeid,
+static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
+ uint32_t hash, int dir_nodeid, int from_nodeid,
unsigned int flags, struct dlm_rsb **r_ret)
{
struct dlm_rsb *r = NULL;
@@ -730,59 +925,82 @@ static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
int error;
retry:
- error = pre_rsb_struct(ls);
- if (error < 0)
- goto out;
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
+ if (error)
+ goto do_new;
- spin_lock(&ls->ls_rsbtbl[b].lock);
+ /* check if the rsb is in active state under read lock - likely path */
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ if (!rsb_flag(r, RSB_HASHED)) {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto do_new;
+ }
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
- if (error)
- goto do_toss;
+ if (rsb_flag(r, RSB_INACTIVE)) {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto do_inactive;
+ }
/*
* rsb is active, so we can't check master_nodeid without lock_rsb.
*/
kref_get(&r->res_ref);
- goto out_unlock;
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto out;
- do_toss:
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
- if (error)
+
+ do_inactive:
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+
+ /* See comment in find_rsb_dir. */
+ if (rsb_flag(r, RSB_HASHED)) {
+ if (!rsb_flag(r, RSB_INACTIVE)) {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto retry;
+ }
+ } else {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
goto do_new;
+ }
+
/*
* rsb found inactive. No other thread is using this rsb because
- * it's on the toss list, so we can look at or update
- * res_master_nodeid without lock_rsb.
+ * it's inactive, so we can look at or update res_master_nodeid
+ * without lock_rsb.
*/
if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
/* our rsb is not master, and another node has sent us a
request; this should never happen */
- log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
+ log_error(ls, "find_rsb inactive from_nodeid %d master %d dir %d",
from_nodeid, r->res_master_nodeid, dir_nodeid);
dlm_print_rsb(r);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
error = -ENOTBLK;
- goto out_unlock;
+ goto out;
}
if (!recover && (r->res_master_nodeid != our_nodeid) &&
(dir_nodeid == our_nodeid)) {
/* our rsb is not master, and we are dir; may as well fix it;
this should never happen */
- log_error(ls, "find_rsb toss our %d master %d dir %d",
+ log_error(ls, "find_rsb inactive our %d master %d dir %d",
our_nodeid, r->res_master_nodeid, dir_nodeid);
dlm_print_rsb(r);
r->res_master_nodeid = our_nodeid;
r->res_nodeid = 0;
}
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
- error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
- goto out_unlock;
+ del_scan(ls, r);
+ list_move(&r->res_slow_list, &ls->ls_slow_active);
+ rsb_clear_flag(r, RSB_INACTIVE);
+ kref_init(&r->res_ref);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+
+ goto out;
do_new:
@@ -791,48 +1009,98 @@ static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
*/
error = get_rsb_struct(ls, name, len, &r);
- if (error == -EAGAIN) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- goto retry;
- }
- if (error)
- goto out_unlock;
+ if (WARN_ON_ONCE(error))
+ goto out;
r->res_hash = hash;
- r->res_bucket = b;
r->res_dir_nodeid = dir_nodeid;
r->res_master_nodeid = dir_nodeid;
r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
kref_init(&r->res_ref);
- error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
- out_unlock:
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+ error = rsb_insert(r, &ls->ls_rsbtbl);
+ if (error == -EEXIST) {
+ /* somebody else was faster and it seems the
+ * rsb exists now, we do a whole relookup
+ */
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ dlm_free_rsb(r);
+ goto retry;
+ } else if (!error) {
+ list_add(&r->res_slow_list, &ls->ls_slow_active);
+ }
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+
out:
*r_ret = r;
return error;
}
-static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
- unsigned int flags, struct dlm_rsb **r_ret)
+/*
+ * rsb rcu usage
+ *
+ * While rcu read lock is held, the rsb cannot be freed,
+ * which allows a lookup optimization.
+ *
+ * Two threads are accessing the same rsb concurrently,
+ * the first (A) is trying to use the rsb, the second (B)
+ * is trying to free the rsb.
+ *
+ * thread A thread B
+ * (trying to use rsb) (trying to free rsb)
+ *
+ * A1. rcu read lock
+ * A2. rsbtbl read lock
+ * A3. look up rsb in rsbtbl
+ * A4. rsbtbl read unlock
+ * B1. rsbtbl write lock
+ * B2. look up rsb in rsbtbl
+ * B3. remove rsb from rsbtbl
+ * B4. clear rsb HASHED flag
+ * B5. rsbtbl write unlock
+ * B6. begin freeing rsb using rcu...
+ *
+ * (rsb is inactive, so try to make it active again)
+ * A5. read rsb HASHED flag (safe because rsb is not freed yet)
+ * A6. the rsb HASHED flag is not set, which it means the rsb
+ * is being removed from rsbtbl and freed, so don't use it.
+ * A7. rcu read unlock
+ *
+ * B7. ...finish freeing rsb using rcu
+ * A8. create a new rsb
+ *
+ * Without the rcu optimization, steps A5-8 would need to do
+ * an extra rsbtbl lookup:
+ * A5. rsbtbl write lock
+ * A6. look up rsb in rsbtbl, not found
+ * A7. rsbtbl write unlock
+ * A8. create a new rsb
+ */
+
+static int find_rsb(struct dlm_ls *ls, const void *name, int len,
+ int from_nodeid, unsigned int flags,
+ struct dlm_rsb **r_ret)
{
- uint32_t hash, b;
int dir_nodeid;
+ uint32_t hash;
+ int rv;
if (len > DLM_RESNAME_MAXLEN)
return -EINVAL;
hash = jhash(name, len, 0);
- b = hash & (ls->ls_rsbtbl_size - 1);
-
dir_nodeid = dlm_hash2nodeid(ls, hash);
+ rcu_read_lock();
if (dlm_no_directory(ls))
- return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
+ rv = find_rsb_nodir(ls, name, len, hash, dir_nodeid,
from_nodeid, flags, r_ret);
else
- return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
- from_nodeid, flags, r_ret);
+ rv = find_rsb_dir(ls, name, len, hash, dir_nodeid,
+ from_nodeid, flags, r_ret);
+ rcu_read_unlock();
+ return rv;
}
/* we have received a request and found that res_master_nodeid != our_nodeid,
@@ -878,6 +1146,88 @@ static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
}
}
+static void __dlm_master_lookup(struct dlm_ls *ls, struct dlm_rsb *r, int our_nodeid,
+ int from_nodeid, bool is_inactive, unsigned int flags,
+ int *r_nodeid, int *result)
+{
+ int fix_master = (flags & DLM_LU_RECOVER_MASTER);
+ int from_master = (flags & DLM_LU_RECOVER_DIR);
+
+ if (r->res_dir_nodeid != our_nodeid) {
+ /* should not happen, but may as well fix it and carry on */
+ log_error(ls, "%s res_dir %d our %d %s", __func__,
+ r->res_dir_nodeid, our_nodeid, r->res_name);
+ r->res_dir_nodeid = our_nodeid;
+ }
+
+ if (fix_master && r->res_master_nodeid && dlm_is_removed(ls, r->res_master_nodeid)) {
+ /* Recovery uses this function to set a new master when
+ * the previous master failed. Setting NEW_MASTER will
+ * force dlm_recover_masters to call recover_master on this
+ * rsb even though the res_nodeid is no longer removed.
+ */
+
+ r->res_master_nodeid = from_nodeid;
+ r->res_nodeid = from_nodeid;
+ rsb_set_flag(r, RSB_NEW_MASTER);
+
+ if (is_inactive) {
+ /* I don't think we should ever find it inactive. */
+ log_error(ls, "%s fix_master inactive", __func__);
+ dlm_dump_rsb(r);
+ }
+ }
+
+ if (from_master && (r->res_master_nodeid != from_nodeid)) {
+ /* this will happen if from_nodeid became master during
+ * a previous recovery cycle, and we aborted the previous
+ * cycle before recovering this master value
+ */
+
+ log_limit(ls, "%s from_master %d master_nodeid %d res_nodeid %d first %x %s",
+ __func__, from_nodeid, r->res_master_nodeid,
+ r->res_nodeid, r->res_first_lkid, r->res_name);
+
+ if (r->res_master_nodeid == our_nodeid) {
+ log_error(ls, "from_master %d our_master", from_nodeid);
+ dlm_dump_rsb(r);
+ goto ret_assign;
+ }
+
+ r->res_master_nodeid = from_nodeid;
+ r->res_nodeid = from_nodeid;
+ rsb_set_flag(r, RSB_NEW_MASTER);
+ }
+
+ if (!r->res_master_nodeid) {
+ /* this will happen if recovery happens while we're looking
+ * up the master for this rsb
+ */
+
+ log_debug(ls, "%s master 0 to %d first %x %s", __func__,
+ from_nodeid, r->res_first_lkid, r->res_name);
+ r->res_master_nodeid = from_nodeid;
+ r->res_nodeid = from_nodeid;
+ }
+
+ if (!from_master && !fix_master &&
+ (r->res_master_nodeid == from_nodeid)) {
+ /* this can happen when the master sends remove, the dir node
+ * finds the rsb on the active list and ignores the remove,
+ * and the former master sends a lookup
+ */
+
+ log_limit(ls, "%s from master %d flags %x first %x %s",
+ __func__, from_nodeid, flags, r->res_first_lkid,
+ r->res_name);
+ }
+
+ ret_assign:
+ *r_nodeid = r->res_master_nodeid;
+ if (result)
+ *result = DLM_LU_MATCH;
+}
+
/*
* We're the dir node for this res and another node wants to know the
* master nodeid. During normal operation (non recovery) this is only
@@ -907,15 +1257,13 @@ static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
* . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
*/
-int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
- unsigned int flags, int *r_nodeid, int *result)
+static int _dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
+ int len, unsigned int flags, int *r_nodeid, int *result)
{
struct dlm_rsb *r = NULL;
- uint32_t hash, b;
- int from_master = (flags & DLM_LU_RECOVER_DIR);
- int fix_master = (flags & DLM_LU_RECOVER_MASTER);
+ uint32_t hash;
int our_nodeid = dlm_our_nodeid();
- int dir_nodeid, error, toss_list = 0;
+ int dir_nodeid, error;
if (len > DLM_RESNAME_MAXLEN)
return -EINVAL;
@@ -927,8 +1275,6 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
}
hash = jhash(name, len, 0);
- b = hash & (ls->ls_rsbtbl_size - 1);
-
dir_nodeid = dlm_hash2nodeid(ls, hash);
if (dir_nodeid != our_nodeid) {
log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
@@ -939,226 +1285,199 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
}
retry:
- error = pre_rsb_struct(ls);
- if (error < 0)
- return error;
-
- spin_lock(&ls->ls_rsbtbl[b].lock);
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
- if (!error) {
- /* because the rsb is active, we need to lock_rsb before
- checking/changing re_master_nodeid */
-
- hold_rsb(r);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- lock_rsb(r);
- goto found;
- }
-
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
if (error)
goto not_found;
- /* because the rsb is inactive (on toss list), it's not refcounted
- and lock_rsb is not used, but is protected by the rsbtbl lock */
+ /* check if the rsb is active under read lock - likely path */
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ if (!rsb_flag(r, RSB_HASHED)) {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto not_found;
+ }
- toss_list = 1;
- found:
- if (r->res_dir_nodeid != our_nodeid) {
- /* should not happen, but may as well fix it and carry on */
- log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
- r->res_dir_nodeid, our_nodeid, r->res_name);
- r->res_dir_nodeid = our_nodeid;
+ if (rsb_flag(r, RSB_INACTIVE)) {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto do_inactive;
}
- if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
- /* Recovery uses this function to set a new master when
- the previous master failed. Setting NEW_MASTER will
- force dlm_recover_masters to call recover_master on this
- rsb even though the res_nodeid is no longer removed. */
+ /* because the rsb is active, we need to lock_rsb before
+ * checking/changing re_master_nodeid
+ */
- r->res_master_nodeid = from_nodeid;
- r->res_nodeid = from_nodeid;
- rsb_set_flag(r, RSB_NEW_MASTER);
+ hold_rsb(r);
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ lock_rsb(r);
- if (toss_list) {
- /* I don't think we should ever find it on toss list. */
- log_error(ls, "dlm_master_lookup fix_master on toss");
- dlm_dump_rsb(r);
- }
- }
+ __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false,
+ flags, r_nodeid, result);
- if (from_master && (r->res_master_nodeid != from_nodeid)) {
- /* this will happen if from_nodeid became master during
- a previous recovery cycle, and we aborted the previous
- cycle before recovering this master value */
+ /* the rsb was active */
+ unlock_rsb(r);
+ put_rsb(r);
- log_limit(ls, "dlm_master_lookup from_master %d "
- "master_nodeid %d res_nodeid %d first %x %s",
- from_nodeid, r->res_master_nodeid, r->res_nodeid,
- r->res_first_lkid, r->res_name);
+ return 0;
- if (r->res_master_nodeid == our_nodeid) {
- log_error(ls, "from_master %d our_master", from_nodeid);
- dlm_dump_rsb(r);
- goto out_found;
- }
+ do_inactive:
+ /* unlikely path - check if still part of ls_rsbtbl */
+ write_lock_bh(&ls->ls_rsbtbl_lock);
- r->res_master_nodeid = from_nodeid;
- r->res_nodeid = from_nodeid;
- rsb_set_flag(r, RSB_NEW_MASTER);
+ /* see comment in find_rsb_dir */
+ if (rsb_flag(r, RSB_HASHED)) {
+ if (!rsb_flag(r, RSB_INACTIVE)) {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ /* something as changed, very unlikely but
+ * try again
+ */
+ goto retry;
+ }
+ } else {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto not_found;
}
- if (!r->res_master_nodeid) {
- /* this will happen if recovery happens while we're looking
- up the master for this rsb */
+ /* because the rsb is inactive, it's not refcounted and lock_rsb
+ is not used, but is protected by the rsbtbl lock */
- log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
- from_nodeid, r->res_first_lkid, r->res_name);
- r->res_master_nodeid = from_nodeid;
- r->res_nodeid = from_nodeid;
- }
+ __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags,
+ r_nodeid, result);
- if (!from_master && !fix_master &&
- (r->res_master_nodeid == from_nodeid)) {
- /* this can happen when the master sends remove, the dir node
- finds the rsb on the keep list and ignores the remove,
- and the former master sends a lookup */
+ /* A dir record rsb should never be on scan list.
+ * Except when we are the dir and master node.
+ * This function should only be called by the dir
+ * node.
+ */
+ WARN_ON(!list_empty(&r->res_scan_list) &&
+ r->res_master_nodeid != our_nodeid);
- log_limit(ls, "dlm_master_lookup from master %d flags %x "
- "first %x %s", from_nodeid, flags,
- r->res_first_lkid, r->res_name);
- }
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
- out_found:
- *r_nodeid = r->res_master_nodeid;
- if (result)
- *result = DLM_LU_MATCH;
-
- if (toss_list) {
- r->res_toss_time = jiffies;
- /* the rsb was inactive (on toss list) */
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- } else {
- /* the rsb was active */
- unlock_rsb(r);
- put_rsb(r);
- }
return 0;
not_found:
error = get_rsb_struct(ls, name, len, &r);
- if (error == -EAGAIN) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- goto retry;
- }
- if (error)
- goto out_unlock;
+ if (WARN_ON_ONCE(error))
+ goto out;
r->res_hash = hash;
- r->res_bucket = b;
r->res_dir_nodeid = our_nodeid;
r->res_master_nodeid = from_nodeid;
r->res_nodeid = from_nodeid;
- kref_init(&r->res_ref);
- r->res_toss_time = jiffies;
-
- error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
- if (error) {
+ rsb_set_flag(r, RSB_INACTIVE);
+
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+ error = rsb_insert(r, &ls->ls_rsbtbl);
+ if (error == -EEXIST) {
+ /* somebody else was faster and it seems the
+ * rsb exists now, we do a whole relookup
+ */
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ dlm_free_rsb(r);
+ goto retry;
+ } else if (error) {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
/* should never happen */
dlm_free_rsb(r);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
goto retry;
}
+ list_add(&r->res_slow_list, &ls->ls_slow_inactive);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+
if (result)
*result = DLM_LU_ADD;
*r_nodeid = from_nodeid;
- error = 0;
- out_unlock:
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ out:
return error;
}
+int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
+ int len, unsigned int flags, int *r_nodeid, int *result)
+{
+ int rv;
+ rcu_read_lock();
+ rv = _dlm_master_lookup(ls, from_nodeid, name, len, flags, r_nodeid, result);
+ rcu_read_unlock();
+ return rv;
+}
+
static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
{
- struct rb_node *n;
struct dlm_rsb *r;
- int i;
- for (i = 0; i < ls->ls_rsbtbl_size; i++) {
- spin_lock(&ls->ls_rsbtbl[i].lock);
- for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
- r = rb_entry(n, struct dlm_rsb, res_hashnode);
- if (r->res_hash == hash)
- dlm_dump_rsb(r);
- }
- spin_unlock(&ls->ls_rsbtbl[i].lock);
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) {
+ if (r->res_hash == hash)
+ dlm_dump_rsb(r);
}
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
}
-void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
+void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len)
{
struct dlm_rsb *r = NULL;
- uint32_t hash, b;
int error;
- hash = jhash(name, len, 0);
- b = hash & (ls->ls_rsbtbl_size - 1);
-
- spin_lock(&ls->ls_rsbtbl[b].lock);
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
+ rcu_read_lock();
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
if (!error)
- goto out_dump;
-
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
- if (error)
goto out;
- out_dump:
+
dlm_dump_rsb(r);
out:
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ rcu_read_unlock();
}
-static void toss_rsb(struct kref *kref)
+static void deactivate_rsb(struct kref *kref)
{
struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
struct dlm_ls *ls = r->res_ls;
+ int our_nodeid = dlm_our_nodeid();
DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
- kref_init(&r->res_ref);
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
- rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
- r->res_toss_time = jiffies;
- ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
+ rsb_set_flag(r, RSB_INACTIVE);
+ list_move(&r->res_slow_list, &ls->ls_slow_inactive);
+
+ /*
+ * When the rsb becomes unused, there are two possibilities:
+ * 1. Leave the inactive rsb in place (don't remove it).
+ * 2. Add it to the scan list to be removed.
+ *
+ * 1 is done when the rsb is acting as the dir record
+ * for a remotely mastered rsb. The rsb must be left
+ * in place as an inactive rsb to act as the dir record.
+ *
+ * 2 is done when a) the rsb is not the master and not the
+ * dir record, b) when the rsb is both the master and the
+ * dir record, c) when the rsb is master but not dir record.
+ *
+ * (If no directory is used, the rsb can always be removed.)
+ */
+ if (dlm_no_directory(ls) ||
+ (r->res_master_nodeid == our_nodeid ||
+ dlm_dir_nodeid(r) != our_nodeid))
+ add_scan(ls, r);
+
if (r->res_lvbptr) {
dlm_free_lvb(r->res_lvbptr);
r->res_lvbptr = NULL;
}
}
-/* See comment for unhold_lkb */
-
-static void unhold_rsb(struct dlm_rsb *r)
+void free_inactive_rsb(struct dlm_rsb *r)
{
- int rv;
- rv = kref_put(&r->res_ref, toss_rsb);
- DLM_ASSERT(!rv, dlm_dump_rsb(r););
-}
-
-static void kill_rsb(struct kref *kref)
-{
- struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
-
- /* All work is done after the return from kref_put() so we
- can release the write_lock before the remove and free. */
+ WARN_ON_ONCE(!rsb_flag(r, RSB_INACTIVE));
DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
+ DLM_ASSERT(list_empty(&r->res_scan_list), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
+ DLM_ASSERT(list_empty(&r->res_masters_list), dlm_dump_rsb(r););
+
+ dlm_free_rsb(r);
}
/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
@@ -1178,35 +1497,35 @@ static void detach_lkb(struct dlm_lkb *lkb)
}
}
-static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
+static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
+ unsigned long start, unsigned long end)
{
+ struct xa_limit limit;
struct dlm_lkb *lkb;
int rv;
- lkb = dlm_allocate_lkb(ls);
+ limit.max = end;
+ limit.min = start;
+
+ lkb = dlm_allocate_lkb();
if (!lkb)
return -ENOMEM;
+ lkb->lkb_last_bast_cb_mode = DLM_LOCK_IV;
+ lkb->lkb_last_cast_cb_mode = DLM_LOCK_IV;
+ lkb->lkb_last_cb_mode = DLM_LOCK_IV;
lkb->lkb_nodeid = -1;
lkb->lkb_grmode = DLM_LOCK_IV;
kref_init(&lkb->lkb_ref);
INIT_LIST_HEAD(&lkb->lkb_ownqueue);
INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
- INIT_LIST_HEAD(&lkb->lkb_time_list);
- INIT_LIST_HEAD(&lkb->lkb_cb_list);
- mutex_init(&lkb->lkb_cb_mutex);
- INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
-
- idr_preload(GFP_NOFS);
- spin_lock(&ls->ls_lkbidr_spin);
- rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
- if (rv >= 0)
- lkb->lkb_id = rv;
- spin_unlock(&ls->ls_lkbidr_spin);
- idr_preload_end();
+
+ write_lock_bh(&ls->ls_lkbxa_lock);
+ rv = xa_alloc(&ls->ls_lkbxa, &lkb->lkb_id, lkb, limit, GFP_ATOMIC);
+ write_unlock_bh(&ls->ls_lkbxa_lock);
if (rv < 0) {
- log_error(ls, "create_lkb idr error %d", rv);
+ log_error(ls, "create_lkb xa error %d", rv);
dlm_free_lkb(lkb);
return rv;
}
@@ -1215,15 +1534,30 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
return 0;
}
+static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
+{
+ return _create_lkb(ls, lkb_ret, 1, ULONG_MAX);
+}
+
static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
{
struct dlm_lkb *lkb;
- spin_lock(&ls->ls_lkbidr_spin);
- lkb = idr_find(&ls->ls_lkbidr, lkid);
- if (lkb)
- kref_get(&lkb->lkb_ref);
- spin_unlock(&ls->ls_lkbidr_spin);
+ rcu_read_lock();
+ lkb = xa_load(&ls->ls_lkbxa, lkid);
+ if (lkb) {
+ /* check if lkb is still part of lkbxa under lkbxa_lock as
+ * the lkb_ref is tight to the lkbxa data structure, see
+ * __put_lkb().
+ */
+ read_lock_bh(&ls->ls_lkbxa_lock);
+ if (kref_read(&lkb->lkb_ref))
+ kref_get(&lkb->lkb_ref);
+ else
+ lkb = NULL;
+ read_unlock_bh(&ls->ls_lkbxa_lock);
+ }
+ rcu_read_unlock();
*lkb_ret = lkb;
return lkb ? 0 : -ENOENT;
@@ -1245,11 +1579,13 @@ static void kill_lkb(struct kref *kref)
static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
{
uint32_t lkid = lkb->lkb_id;
+ int rv;
- spin_lock(&ls->ls_lkbidr_spin);
- if (kref_put(&lkb->lkb_ref, kill_lkb)) {
- idr_remove(&ls->ls_lkbidr, lkid);
- spin_unlock(&ls->ls_lkbidr_spin);
+ rv = dlm_kref_put_write_lock_bh(&lkb->lkb_ref, kill_lkb,
+ &ls->ls_lkbxa_lock);
+ if (rv) {
+ xa_erase(&ls->ls_lkbxa, lkid);
+ write_unlock_bh(&ls->ls_lkbxa_lock);
detach_lkb(lkb);
@@ -1257,11 +1593,9 @@ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
if (lkb->lkb_lvbptr && is_master_copy(lkb))
dlm_free_lvb(lkb->lkb_lvbptr);
dlm_free_lkb(lkb);
- return 1;
- } else {
- spin_unlock(&ls->ls_lkbidr_spin);
- return 0;
}
+
+ return rv;
}
int dlm_put_lkb(struct dlm_lkb *lkb)
@@ -1283,6 +1617,13 @@ static inline void hold_lkb(struct dlm_lkb *lkb)
kref_get(&lkb->lkb_ref);
}
+static void unhold_lkb_assert(struct kref *kref)
+{
+ struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
+
+ DLM_ASSERT(false, dlm_print_lkb(lkb););
+}
+
/* This is called when we need to remove a reference and are certain
it's not the last ref. e.g. del_lkb is always called between a
find_lkb/put_lkb and is always the inverse of a previous add_lkb.
@@ -1290,21 +1631,23 @@ static inline void hold_lkb(struct dlm_lkb *lkb)
static inline void unhold_lkb(struct dlm_lkb *lkb)
{
- int rv;
- rv = kref_put(&lkb->lkb_ref, kill_lkb);
- DLM_ASSERT(!rv, dlm_print_lkb(lkb););
+ kref_put(&lkb->lkb_ref, unhold_lkb_assert);
}
static void lkb_add_ordered(struct list_head *new, struct list_head *head,
int mode)
{
- struct dlm_lkb *lkb = NULL;
+ struct dlm_lkb *lkb = NULL, *iter;
- list_for_each_entry(lkb, head, lkb_statequeue)
- if (lkb->lkb_rqmode < mode)
+ list_for_each_entry(iter, head, lkb_statequeue)
+ if (iter->lkb_rqmode < mode) {
+ lkb = iter;
+ list_add_tail(new, &iter->lkb_statequeue);
break;
+ }
- __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
+ if (!lkb)
+ list_add_tail(new, head);
}
/* add/remove lkb to rsb's grant/convert/wait queue */
@@ -1352,10 +1695,8 @@ static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
{
- hold_lkb(lkb);
del_lkb(r, lkb);
add_lkb(r, lkb, sts);
- unhold_lkb(lkb);
}
static int msg_reply_type(int mstype)
@@ -1375,101 +1716,28 @@ static int msg_reply_type(int mstype)
return -1;
}
-static int nodeid_warned(int nodeid, int num_nodes, int *warned)
-{
- int i;
-
- for (i = 0; i < num_nodes; i++) {
- if (!warned[i]) {
- warned[i] = nodeid;
- return 0;
- }
- if (warned[i] == nodeid)
- return 1;
- }
- return 0;
-}
-
-void dlm_scan_waiters(struct dlm_ls *ls)
-{
- struct dlm_lkb *lkb;
- s64 us;
- s64 debug_maxus = 0;
- u32 debug_scanned = 0;
- u32 debug_expired = 0;
- int num_nodes = 0;
- int *warned = NULL;
-
- if (!dlm_config.ci_waitwarn_us)
- return;
-
- mutex_lock(&ls->ls_waiters_mutex);
-
- list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
- if (!lkb->lkb_wait_time)
- continue;
-
- debug_scanned++;
-
- us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
-
- if (us < dlm_config.ci_waitwarn_us)
- continue;
-
- lkb->lkb_wait_time = 0;
-
- debug_expired++;
- if (us > debug_maxus)
- debug_maxus = us;
-
- if (!num_nodes) {
- num_nodes = ls->ls_num_nodes;
- warned = kcalloc(num_nodes, sizeof(int), GFP_KERNEL);
- }
- if (!warned)
- continue;
- if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
- continue;
-
- log_error(ls, "waitwarn %x %lld %d us check connection to "
- "node %d", lkb->lkb_id, (long long)us,
- dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
- }
- mutex_unlock(&ls->ls_waiters_mutex);
- kfree(warned);
-
- if (debug_expired)
- log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
- debug_scanned, debug_expired,
- dlm_config.ci_waitwarn_us, (long long)debug_maxus);
-}
-
/* add/remove lkb from global waiters list of lkb's waiting for
a reply from a remote node */
-static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
+static void add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
- int error = 0;
-
- mutex_lock(&ls->ls_waiters_mutex);
-
- if (is_overlap_unlock(lkb) ||
- (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
- error = -EINVAL;
- goto out;
- }
+ spin_lock_bh(&ls->ls_waiters_lock);
if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
switch (mstype) {
case DLM_MSG_UNLOCK:
- lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
+ set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags);
break;
case DLM_MSG_CANCEL:
- lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
+ set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
break;
default:
- error = -EBUSY;
+ /* should never happen as validate_lock_args() checks
+ * on lkb_wait_type and validate_unlock_args() only
+ * creates UNLOCK or CANCEL messages.
+ */
+ WARN_ON_ONCE(1);
goto out;
}
lkb->lkb_wait_count++;
@@ -1477,7 +1745,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
lkb->lkb_id, lkb->lkb_wait_type, mstype,
- lkb->lkb_wait_count, lkb->lkb_flags);
+ lkb->lkb_wait_count, dlm_iflags_val(lkb));
goto out;
}
@@ -1487,17 +1755,11 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
lkb->lkb_wait_count++;
lkb->lkb_wait_type = mstype;
- lkb->lkb_wait_time = ktime_get();
lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
hold_lkb(lkb);
list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
out:
- if (error)
- log_error(ls, "addwait error %x %d flags %x %d %d %s",
- lkb->lkb_id, error, lkb->lkb_flags, mstype,
- lkb->lkb_wait_type, lkb->lkb_resource->res_name);
- mutex_unlock(&ls->ls_waiters_mutex);
- return error;
+ spin_unlock_bh(&ls->ls_waiters_lock);
}
/* We clear the RESEND flag because we might be taking an lkb off the waiters
@@ -1506,21 +1768,21 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
set RESEND and dlm_recover_waiters_post() */
static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
- struct dlm_message *ms)
+ const struct dlm_message *ms)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int overlap_done = 0;
- if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
+ if (mstype == DLM_MSG_UNLOCK_REPLY &&
+ test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags)) {
log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
- lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
overlap_done = 1;
goto out_del;
}
- if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
+ if (mstype == DLM_MSG_CANCEL_REPLY &&
+ test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) {
log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
- lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
overlap_done = 1;
goto out_del;
}
@@ -1544,13 +1806,13 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
lingering state of the cancel and fail with -EBUSY. */
if ((mstype == DLM_MSG_CONVERT_REPLY) &&
- (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
- is_overlap_cancel(lkb) && ms && !ms->m_result) {
+ (lkb->lkb_wait_type == DLM_MSG_CONVERT) && ms && !ms->m_result &&
+ test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) {
log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
lkb->lkb_id);
lkb->lkb_wait_type = 0;
- lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
lkb->lkb_wait_count--;
+ unhold_lkb(lkb);
goto out_del;
}
@@ -1563,8 +1825,8 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
}
log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
- lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
- mstype, lkb->lkb_flags);
+ lkb->lkb_id, ms ? le32_to_cpu(ms->m_header.h_nodeid) : 0,
+ lkb->lkb_remid, mstype, dlm_iflags_val(lkb));
return -1;
out_del:
@@ -1577,12 +1839,13 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
log_error(ls, "remwait error %x reply %d wait_type %d overlap",
lkb->lkb_id, mstype, lkb->lkb_wait_type);
lkb->lkb_wait_count--;
+ unhold_lkb(lkb);
lkb->lkb_wait_type = 0;
}
DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
- lkb->lkb_flags &= ~DLM_IFL_RESEND;
+ clear_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags);
lkb->lkb_wait_count--;
if (!lkb->lkb_wait_count)
list_del_init(&lkb->lkb_wait_reply);
@@ -1595,349 +1858,34 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error;
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock_bh(&ls->ls_waiters_lock);
error = _remove_from_waiters(lkb, mstype, NULL);
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock_bh(&ls->ls_waiters_lock);
return error;
}
-/* Handles situations where we might be processing a "fake" or "stub" reply in
- which we can't try to take waiters_mutex again. */
+/* Handles situations where we might be processing a "fake" or "local" reply in
+ * the recovery context which stops any locking activity. Only debugfs might
+ * change the lockspace waiters but they will held the recovery lock to ensure
+ * remove_from_waiters_ms() in local case will be the only user manipulating the
+ * lockspace waiters in recovery context.
+ */
-static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
+static int remove_from_waiters_ms(struct dlm_lkb *lkb,
+ const struct dlm_message *ms, bool local)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error;
- if (ms->m_flags != DLM_IFL_STUB_MS)
- mutex_lock(&ls->ls_waiters_mutex);
- error = _remove_from_waiters(lkb, ms->m_type, ms);
- if (ms->m_flags != DLM_IFL_STUB_MS)
- mutex_unlock(&ls->ls_waiters_mutex);
- return error;
-}
-
-/* If there's an rsb for the same resource being removed, ensure
- that the remove message is sent before the new lookup message.
- It should be rare to need a delay here, but if not, then it may
- be worthwhile to add a proper wait mechanism rather than a delay. */
-
-static void wait_pending_remove(struct dlm_rsb *r)
-{
- struct dlm_ls *ls = r->res_ls;
- restart:
- spin_lock(&ls->ls_remove_spin);
- if (ls->ls_remove_len &&
- !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
- log_debug(ls, "delay lookup for remove dir %d %s",
- r->res_dir_nodeid, r->res_name);
- spin_unlock(&ls->ls_remove_spin);
- msleep(1);
- goto restart;
- }
- spin_unlock(&ls->ls_remove_spin);
-}
-
-/*
- * ls_remove_spin protects ls_remove_name and ls_remove_len which are
- * read by other threads in wait_pending_remove. ls_remove_names
- * and ls_remove_lens are only used by the scan thread, so they do
- * not need protection.
- */
-
-static void shrink_bucket(struct dlm_ls *ls, int b)
-{
- struct rb_node *n, *next;
- struct dlm_rsb *r;
- char *name;
- int our_nodeid = dlm_our_nodeid();
- int remote_count = 0;
- int need_shrink = 0;
- int i, len, rv;
-
- memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
-
- spin_lock(&ls->ls_rsbtbl[b].lock);
-
- if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- return;
- }
-
- for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
- next = rb_next(n);
- r = rb_entry(n, struct dlm_rsb, res_hashnode);
-
- /* If we're the directory record for this rsb, and
- we're not the master of it, then we need to wait
- for the master node to send us a dir remove for
- before removing the dir record. */
-
- if (!dlm_no_directory(ls) &&
- (r->res_master_nodeid != our_nodeid) &&
- (dlm_dir_nodeid(r) == our_nodeid)) {
- continue;
- }
-
- need_shrink = 1;
-
- if (!time_after_eq(jiffies, r->res_toss_time +
- dlm_config.ci_toss_secs * HZ)) {
- continue;
- }
-
- if (!dlm_no_directory(ls) &&
- (r->res_master_nodeid == our_nodeid) &&
- (dlm_dir_nodeid(r) != our_nodeid)) {
-
- /* We're the master of this rsb but we're not
- the directory record, so we need to tell the
- dir node to remove the dir record. */
-
- ls->ls_remove_lens[remote_count] = r->res_length;
- memcpy(ls->ls_remove_names[remote_count], r->res_name,
- DLM_RESNAME_MAXLEN);
- remote_count++;
-
- if (remote_count >= DLM_REMOVE_NAMES_MAX)
- break;
- continue;
- }
-
- if (!kref_put(&r->res_ref, kill_rsb)) {
- log_error(ls, "tossed rsb in use %s", r->res_name);
- continue;
- }
-
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
- dlm_free_rsb(r);
- }
-
- if (need_shrink)
- ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
+ if (!local)
+ spin_lock_bh(&ls->ls_waiters_lock);
else
- ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
- spin_unlock(&ls->ls_rsbtbl[b].lock);
-
- /*
- * While searching for rsb's to free, we found some that require
- * remote removal. We leave them in place and find them again here
- * so there is a very small gap between removing them from the toss
- * list and sending the removal. Keeping this gap small is
- * important to keep us (the master node) from being out of sync
- * with the remote dir node for very long.
- *
- * From the time the rsb is removed from toss until just after
- * send_remove, the rsb name is saved in ls_remove_name. A new
- * lookup checks this to ensure that a new lookup message for the
- * same resource name is not sent just before the remove message.
- */
-
- for (i = 0; i < remote_count; i++) {
- name = ls->ls_remove_names[i];
- len = ls->ls_remove_lens[i];
-
- spin_lock(&ls->ls_rsbtbl[b].lock);
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
- if (rv) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_debug(ls, "remove_name not toss %s", name);
- continue;
- }
-
- if (r->res_master_nodeid != our_nodeid) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_debug(ls, "remove_name master %d dir %d our %d %s",
- r->res_master_nodeid, r->res_dir_nodeid,
- our_nodeid, name);
- continue;
- }
-
- if (r->res_dir_nodeid == our_nodeid) {
- /* should never happen */
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_error(ls, "remove_name dir %d master %d our %d %s",
- r->res_dir_nodeid, r->res_master_nodeid,
- our_nodeid, name);
- continue;
- }
-
- if (!time_after_eq(jiffies, r->res_toss_time +
- dlm_config.ci_toss_secs * HZ)) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_debug(ls, "remove_name toss_time %lu now %lu %s",
- r->res_toss_time, jiffies, name);
- continue;
- }
-
- if (!kref_put(&r->res_ref, kill_rsb)) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_error(ls, "remove_name in use %s", name);
- continue;
- }
-
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
-
- /* block lookup of same name until we've sent remove */
- spin_lock(&ls->ls_remove_spin);
- ls->ls_remove_len = len;
- memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
- spin_unlock(&ls->ls_remove_spin);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
-
- send_remove(r);
-
- /* allow lookup of name again */
- spin_lock(&ls->ls_remove_spin);
- ls->ls_remove_len = 0;
- memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
- spin_unlock(&ls->ls_remove_spin);
-
- dlm_free_rsb(r);
- }
-}
-
-void dlm_scan_rsbs(struct dlm_ls *ls)
-{
- int i;
-
- for (i = 0; i < ls->ls_rsbtbl_size; i++) {
- shrink_bucket(ls, i);
- if (dlm_locking_stopped(ls))
- break;
- cond_resched();
- }
-}
-
-static void add_timeout(struct dlm_lkb *lkb)
-{
- struct dlm_ls *ls = lkb->lkb_resource->res_ls;
-
- if (is_master_copy(lkb))
- return;
-
- if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
- !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
- lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
- goto add_it;
- }
- if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
- goto add_it;
- return;
-
- add_it:
- DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
- mutex_lock(&ls->ls_timeout_mutex);
- hold_lkb(lkb);
- list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
- mutex_unlock(&ls->ls_timeout_mutex);
-}
-
-static void del_timeout(struct dlm_lkb *lkb)
-{
- struct dlm_ls *ls = lkb->lkb_resource->res_ls;
-
- mutex_lock(&ls->ls_timeout_mutex);
- if (!list_empty(&lkb->lkb_time_list)) {
- list_del_init(&lkb->lkb_time_list);
- unhold_lkb(lkb);
- }
- mutex_unlock(&ls->ls_timeout_mutex);
-}
-
-/* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
- lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
- and then lock rsb because of lock ordering in add_timeout. We may need
- to specify some special timeout-related bits in the lkb that are just to
- be accessed under the timeout_mutex. */
-
-void dlm_scan_timeout(struct dlm_ls *ls)
-{
- struct dlm_rsb *r;
- struct dlm_lkb *lkb;
- int do_cancel, do_warn;
- s64 wait_us;
-
- for (;;) {
- if (dlm_locking_stopped(ls))
- break;
-
- do_cancel = 0;
- do_warn = 0;
- mutex_lock(&ls->ls_timeout_mutex);
- list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
-
- wait_us = ktime_to_us(ktime_sub(ktime_get(),
- lkb->lkb_timestamp));
-
- if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
- wait_us >= (lkb->lkb_timeout_cs * 10000))
- do_cancel = 1;
-
- if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
- wait_us >= dlm_config.ci_timewarn_cs * 10000)
- do_warn = 1;
-
- if (!do_cancel && !do_warn)
- continue;
- hold_lkb(lkb);
- break;
- }
- mutex_unlock(&ls->ls_timeout_mutex);
-
- if (!do_cancel && !do_warn)
- break;
-
- r = lkb->lkb_resource;
- hold_rsb(r);
- lock_rsb(r);
-
- if (do_warn) {
- /* clear flag so we only warn once */
- lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
- if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
- del_timeout(lkb);
- dlm_timeout_warn(lkb);
- }
-
- if (do_cancel) {
- log_debug(ls, "timeout cancel %x node %d %s",
- lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
- lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
- lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
- del_timeout(lkb);
- _cancel_lock(r, lkb);
- }
-
- unlock_rsb(r);
- unhold_rsb(r);
- dlm_put_lkb(lkb);
- }
-}
-
-/* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
- dlm_recoverd before checking/setting ls_recover_begin. */
-
-void dlm_adjust_timeouts(struct dlm_ls *ls)
-{
- struct dlm_lkb *lkb;
- u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
-
- ls->ls_recover_begin = 0;
- mutex_lock(&ls->ls_timeout_mutex);
- list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
- lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
- mutex_unlock(&ls->ls_timeout_mutex);
-
- if (!dlm_config.ci_waitwarn_us)
- return;
-
- mutex_lock(&ls->ls_waiters_mutex);
- list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
- if (ktime_to_us(lkb->lkb_wait_time))
- lkb->lkb_wait_time = ktime_get();
- }
- mutex_unlock(&ls->ls_waiters_mutex);
+ WARN_ON_ONCE(!rwsem_is_locked(&ls->ls_in_recovery) ||
+ !dlm_locking_stopped(ls));
+ error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms);
+ if (!local)
+ spin_unlock_bh(&ls->ls_waiters_lock);
+ return error;
}
/* lkb is master or local copy */
@@ -1990,7 +1938,7 @@ static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
}
if (rsb_flag(r, RSB_VALNOTVALID))
- lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
+ set_bit(DLM_SBF_VALNOTVALID_BIT, &lkb->lkb_sbflags);
}
static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
@@ -2023,7 +1971,7 @@ static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
/* lkb is process copy (pc) */
static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
- struct dlm_message *ms)
+ const struct dlm_message *ms)
{
int b;
@@ -2039,7 +1987,7 @@ static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
if (len > r->res_ls->ls_lvblen)
len = r->res_ls->ls_lvblen;
memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
- lkb->lkb_lvbseq = ms->m_lvbseq;
+ lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq);
}
}
@@ -2130,7 +2078,7 @@ static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
}
static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
- struct dlm_message *ms)
+ const struct dlm_message *ms)
{
set_lvb_lock_pc(r, lkb, ms);
_grant_lock(r, lkb);
@@ -2168,12 +2116,12 @@ static void munge_demoted(struct dlm_lkb *lkb)
lkb->lkb_grmode = DLM_LOCK_NL;
}
-static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
+static void munge_altmode(struct dlm_lkb *lkb, const struct dlm_message *ms)
{
- if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
- ms->m_type != DLM_MSG_GRANT) {
+ if (ms->m_type != cpu_to_le32(DLM_MSG_REQUEST_REPLY) &&
+ ms->m_type != cpu_to_le32(DLM_MSG_GRANT)) {
log_print("munge_altmode %x invalid reply type %d",
- lkb->lkb_id, ms->m_type);
+ lkb->lkb_id, le32_to_cpu(ms->m_type));
return;
}
@@ -2462,7 +2410,7 @@ static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
conversion_deadlock_detect(r, lkb)) {
if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
lkb->lkb_grmode = DLM_LOCK_NL;
- lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
+ set_bit(DLM_SBF_DEMOTED_BIT, &lkb->lkb_sbflags);
} else if (err) {
*err = -EDEADLK;
} else {
@@ -2489,7 +2437,7 @@ static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
lkb->lkb_rqmode = alt;
rv = _can_be_granted(r, lkb, now, 0);
if (rv)
- lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
+ set_bit(DLM_SBF_ALTMODE_BIT, &lkb->lkb_sbflags);
else
lkb->lkb_rqmode = rqmode;
}
@@ -2746,8 +2694,6 @@ static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
return 0;
}
- wait_pending_remove(r);
-
r->res_first_lkid = lkb->lkb_id;
send_lookup(r, lkb);
return 1;
@@ -2760,7 +2706,6 @@ static void process_lookup_list(struct dlm_rsb *r)
list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
list_del_init(&lkb->lkb_rsb_lookup);
_request_lock(r, lkb);
- schedule();
}
}
@@ -2804,10 +2749,9 @@ static void confirm_master(struct dlm_rsb *r, int error)
}
static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
- int namelen, unsigned long timeout_cs,
- void (*ast) (void *astparam),
+ int namelen, void (*ast)(void *astparam),
void *astparam,
- void (*bast) (void *astparam, int mode),
+ void (*bast)(void *astparam, int mode),
struct dlm_args *args)
{
int rv = -EINVAL;
@@ -2861,7 +2805,6 @@ static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
args->astfn = ast;
args->astparam = astparam;
args->bastfn = bast;
- args->timeout = timeout_cs;
args->mode = mode;
args->lksb = lksb;
rv = 0;
@@ -2886,29 +2829,30 @@ static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_args *args)
{
- int rv = -EINVAL;
+ int rv = -EBUSY;
if (args->flags & DLM_LKF_CONVERT) {
- if (lkb->lkb_flags & DLM_IFL_MSTCPY)
+ if (lkb->lkb_status != DLM_LKSTS_GRANTED)
goto out;
- if (args->flags & DLM_LKF_QUECVT &&
- !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
+ /* lock not allowed if there's any op in progress */
+ if (lkb->lkb_wait_type || lkb->lkb_wait_count)
goto out;
- rv = -EBUSY;
- if (lkb->lkb_status != DLM_LKSTS_GRANTED)
+ if (is_overlap(lkb))
goto out;
- if (lkb->lkb_wait_type)
+ rv = -EINVAL;
+ if (test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags))
goto out;
- if (is_overlap(lkb))
+ if (args->flags & DLM_LKF_QUECVT &&
+ !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
goto out;
}
lkb->lkb_exflags = args->flags;
- lkb->lkb_sbflags = 0;
+ dlm_set_sbflags_val(lkb, 0);
lkb->lkb_astfn = args->astfn;
lkb->lkb_astparam = args->astparam;
lkb->lkb_bastfn = args->bastfn;
@@ -2916,14 +2860,25 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
lkb->lkb_lksb = args->lksb;
lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
lkb->lkb_ownpid = (int) current->pid;
- lkb->lkb_timeout_cs = args->timeout;
rv = 0;
out:
- if (rv)
- log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
- rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
- lkb->lkb_status, lkb->lkb_wait_type,
- lkb->lkb_resource->res_name);
+ switch (rv) {
+ case 0:
+ break;
+ case -EINVAL:
+ /* annoy the user because dlm usage is wrong */
+ WARN_ON(1);
+ log_error(ls, "%s %d %x %x %x %d %d", __func__,
+ rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags,
+ lkb->lkb_status, lkb->lkb_wait_type);
+ break;
+ default:
+ log_debug(ls, "%s %d %x %x %x %d %d", __func__,
+ rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags,
+ lkb->lkb_status, lkb->lkb_wait_type);
+ break;
+ }
+
return rv;
}
@@ -2937,23 +2892,12 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
- int rv = -EINVAL;
+ int rv = -EBUSY;
- if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
- log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
- dlm_print_lkb(lkb);
- goto out;
- }
-
- /* an lkb may still exist even though the lock is EOL'ed due to a
- cancel, unlock or failed noqueue request; an app can't use these
- locks; return same error as if the lkid had not been found at all */
-
- if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
- log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
- rv = -ENOENT;
+ /* normal unlock not allowed if there's any op in progress */
+ if (!(args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) &&
+ (lkb->lkb_wait_type || lkb->lkb_wait_count))
goto out;
- }
/* an lkb may be waiting for an rsb lookup to complete where the
lookup was initiated by another lock */
@@ -2968,24 +2912,41 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
unhold_lkb(lkb); /* undoes create_lkb() */
}
/* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
- rv = -EBUSY;
goto out;
}
+ rv = -EINVAL;
+ if (test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) {
+ log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
+ dlm_print_lkb(lkb);
+ goto out;
+ }
+
+ /* an lkb may still exist even though the lock is EOL'ed due to a
+ * cancel, unlock or failed noqueue request; an app can't use these
+ * locks; return same error as if the lkid had not been found at all
+ */
+
+ if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) {
+ log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
+ rv = -ENOENT;
+ goto out;
+ }
+
+ if (is_overlap_unlock(lkb))
+ goto out;
+
/* cancel not allowed with another cancel/unlock in progress */
if (args->flags & DLM_LKF_CANCEL) {
if (lkb->lkb_exflags & DLM_LKF_CANCEL)
goto out;
- if (is_overlap(lkb))
+ if (is_overlap_cancel(lkb))
goto out;
- /* don't let scand try to do a cancel */
- del_timeout(lkb);
-
- if (lkb->lkb_flags & DLM_IFL_RESEND) {
- lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
+ if (test_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags)) {
+ set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
rv = -EBUSY;
goto out;
}
@@ -3000,7 +2961,7 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
switch (lkb->lkb_wait_type) {
case DLM_MSG_LOOKUP:
case DLM_MSG_REQUEST:
- lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
+ set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
rv = -EBUSY;
goto out;
case DLM_MSG_UNLOCK:
@@ -3019,14 +2980,8 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
goto out;
- if (is_overlap_unlock(lkb))
- goto out;
-
- /* don't let scand try to do a cancel */
- del_timeout(lkb);
-
- if (lkb->lkb_flags & DLM_IFL_RESEND) {
- lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
+ if (test_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags)) {
+ set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags);
rv = -EBUSY;
goto out;
}
@@ -3034,33 +2989,41 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
switch (lkb->lkb_wait_type) {
case DLM_MSG_LOOKUP:
case DLM_MSG_REQUEST:
- lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
+ set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags);
rv = -EBUSY;
goto out;
case DLM_MSG_UNLOCK:
goto out;
}
/* add_to_waiters() will set OVERLAP_UNLOCK */
- goto out_ok;
}
- /* normal unlock not allowed if there's any op in progress */
- rv = -EBUSY;
- if (lkb->lkb_wait_type || lkb->lkb_wait_count)
- goto out;
-
out_ok:
/* an overlapping op shouldn't blow away exflags from other op */
lkb->lkb_exflags |= args->flags;
- lkb->lkb_sbflags = 0;
+ dlm_set_sbflags_val(lkb, 0);
lkb->lkb_astparam = args->astparam;
rv = 0;
out:
- if (rv)
- log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
- lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
+ switch (rv) {
+ case 0:
+ break;
+ case -EINVAL:
+ /* annoy the user because dlm usage is wrong */
+ WARN_ON(1);
+ log_error(ls, "%s %d %x %x %x %x %d %s", __func__, rv,
+ lkb->lkb_id, dlm_iflags_val(lkb), lkb->lkb_exflags,
args->flags, lkb->lkb_wait_type,
lkb->lkb_resource->res_name);
+ break;
+ default:
+ log_debug(ls, "%s %d %x %x %x %x %d %s", __func__, rv,
+ lkb->lkb_id, dlm_iflags_val(lkb), lkb->lkb_exflags,
+ args->flags, lkb->lkb_wait_type,
+ lkb->lkb_resource->res_name);
+ break;
+ }
+
return rv;
}
@@ -3084,7 +3047,6 @@ static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
if (can_be_queued(lkb)) {
error = -EINPROGRESS;
add_lkb(r, lkb, DLM_LKSTS_WAITING);
- add_timeout(lkb);
goto out;
}
@@ -3153,7 +3115,6 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
error = -EINPROGRESS;
del_lkb(r, lkb);
add_lkb(r, lkb, DLM_LKSTS_CONVERT);
- add_timeout(lkb);
goto out;
}
@@ -3311,8 +3272,9 @@ static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
* request_lock(), convert_lock(), unlock_lock(), cancel_lock()
*/
-static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
- int len, struct dlm_args *args)
+static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ const void *name, int len,
+ struct dlm_args *args)
{
struct dlm_rsb *r;
int error;
@@ -3411,7 +3373,7 @@ int dlm_lock(dlm_lockspace_t *lockspace,
int mode,
struct dlm_lksb *lksb,
uint32_t flags,
- void *name,
+ const void *name,
unsigned int namelen,
uint32_t parent_lkid,
void (*ast) (void *astarg),
@@ -3437,8 +3399,10 @@ int dlm_lock(dlm_lockspace_t *lockspace,
if (error)
goto out;
- error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
- astarg, bast, &args);
+ trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
+
+ error = set_lock_args(mode, lksb, flags, namelen, ast, astarg, bast,
+ &args);
if (error)
goto out_put;
@@ -3450,6 +3414,8 @@ int dlm_lock(dlm_lockspace_t *lockspace,
if (error == -EINPROGRESS)
error = 0;
out_put:
+ trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, true);
+
if (convert || error)
__put_lkb(ls, lkb);
if (error == -EAGAIN || error == -EDEADLK)
@@ -3481,6 +3447,8 @@ int dlm_unlock(dlm_lockspace_t *lockspace,
if (error)
goto out;
+ trace_dlm_unlock_start(ls, lkb, flags);
+
error = set_unlock_args(flags, astarg, &args);
if (error)
goto out_put;
@@ -3495,6 +3463,8 @@ int dlm_unlock(dlm_lockspace_t *lockspace,
if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
error = 0;
out_put:
+ trace_dlm_unlock_end(ls, lkb, flags, error);
+
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
@@ -3537,19 +3507,19 @@ static int _create_message(struct dlm_ls *ls, int mb_len,
pass into midcomms_commit and a message buffer (mb) that we
write our data into */
- mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, GFP_NOFS, &mb);
+ mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, &mb);
if (!mh)
return -ENOBUFS;
ms = (struct dlm_message *) mb;
- ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
- ms->m_header.u.h_lockspace = ls->ls_global_id;
- ms->m_header.h_nodeid = dlm_our_nodeid();
- ms->m_header.h_length = mb_len;
+ ms->m_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+ ms->m_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id);
+ ms->m_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
+ ms->m_header.h_length = cpu_to_le16(mb_len);
ms->m_header.h_cmd = DLM_MSG;
- ms->m_type = mstype;
+ ms->m_type = cpu_to_le32(mstype);
*mh_ret = mh;
*ms_ret = ms;
@@ -3574,7 +3544,7 @@ static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
case DLM_MSG_REQUEST_REPLY:
case DLM_MSG_CONVERT_REPLY:
case DLM_MSG_GRANT:
- if (lkb && lkb->lkb_lvbptr)
+ if (lkb && lkb->lkb_lvbptr && (lkb->lkb_exflags & DLM_LKF_VALBLK))
mb_len += r->res_ls->ls_lvblen;
break;
}
@@ -3586,51 +3556,51 @@ static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
/* further lowcomms enhancements or alternate implementations may make
the return value from this function useful at some point */
-static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
+static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms,
+ const void *name, int namelen)
{
- dlm_message_out(ms);
- dlm_midcomms_commit_mhandle(mh);
+ dlm_midcomms_commit_mhandle(mh, name, namelen);
return 0;
}
static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
struct dlm_message *ms)
{
- ms->m_nodeid = lkb->lkb_nodeid;
- ms->m_pid = lkb->lkb_ownpid;
- ms->m_lkid = lkb->lkb_id;
- ms->m_remid = lkb->lkb_remid;
- ms->m_exflags = lkb->lkb_exflags;
- ms->m_sbflags = lkb->lkb_sbflags;
- ms->m_flags = lkb->lkb_flags;
- ms->m_lvbseq = lkb->lkb_lvbseq;
- ms->m_status = lkb->lkb_status;
- ms->m_grmode = lkb->lkb_grmode;
- ms->m_rqmode = lkb->lkb_rqmode;
- ms->m_hash = r->res_hash;
+ ms->m_nodeid = cpu_to_le32(lkb->lkb_nodeid);
+ ms->m_pid = cpu_to_le32(lkb->lkb_ownpid);
+ ms->m_lkid = cpu_to_le32(lkb->lkb_id);
+ ms->m_remid = cpu_to_le32(lkb->lkb_remid);
+ ms->m_exflags = cpu_to_le32(lkb->lkb_exflags);
+ ms->m_sbflags = cpu_to_le32(dlm_sbflags_val(lkb));
+ ms->m_flags = cpu_to_le32(dlm_dflags_val(lkb));
+ ms->m_lvbseq = cpu_to_le32(lkb->lkb_lvbseq);
+ ms->m_status = cpu_to_le32(lkb->lkb_status);
+ ms->m_grmode = cpu_to_le32(lkb->lkb_grmode);
+ ms->m_rqmode = cpu_to_le32(lkb->lkb_rqmode);
+ ms->m_hash = cpu_to_le32(r->res_hash);
/* m_result and m_bastmode are set from function args,
not from lkb fields */
if (lkb->lkb_bastfn)
- ms->m_asts |= DLM_CB_BAST;
+ ms->m_asts |= cpu_to_le32(DLM_CB_BAST);
if (lkb->lkb_astfn)
- ms->m_asts |= DLM_CB_CAST;
+ ms->m_asts |= cpu_to_le32(DLM_CB_CAST);
/* compare with switch in create_message; send_remove() doesn't
use send_args() */
switch (ms->m_type) {
- case DLM_MSG_REQUEST:
- case DLM_MSG_LOOKUP:
+ case cpu_to_le32(DLM_MSG_REQUEST):
+ case cpu_to_le32(DLM_MSG_LOOKUP):
memcpy(ms->m_extra, r->res_name, r->res_length);
break;
- case DLM_MSG_CONVERT:
- case DLM_MSG_UNLOCK:
- case DLM_MSG_REQUEST_REPLY:
- case DLM_MSG_CONVERT_REPLY:
- case DLM_MSG_GRANT:
- if (!lkb->lkb_lvbptr)
+ case cpu_to_le32(DLM_MSG_CONVERT):
+ case cpu_to_le32(DLM_MSG_UNLOCK):
+ case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
+ case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
+ case cpu_to_le32(DLM_MSG_GRANT):
+ if (!lkb->lkb_lvbptr || !(lkb->lkb_exflags & DLM_LKF_VALBLK))
break;
memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
break;
@@ -3645,17 +3615,14 @@ static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
to_nodeid = r->res_nodeid;
- error = add_to_waiters(lkb, mstype, to_nodeid);
- if (error)
- return error;
-
+ add_to_waiters(lkb, mstype, to_nodeid);
error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
if (error)
goto fail;
send_args(r, lkb, ms);
- error = send_message(mh, ms);
+ error = send_message(mh, ms, r->res_name, r->res_length);
if (error)
goto fail;
return 0;
@@ -3679,10 +3646,9 @@ static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
/* down conversions go without a reply from the master */
if (!error && down_conversion(lkb)) {
remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
- r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
- r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
- r->res_ls->ls_stub_ms.m_result = 0;
- __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
+ r->res_ls->ls_local_ms.m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY);
+ r->res_ls->ls_local_ms.m_result = 0;
+ __receive_convert_reply(r, lkb, &r->res_ls->ls_local_ms, true);
}
return error;
@@ -3718,7 +3684,7 @@ static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
ms->m_result = 0;
- error = send_message(mh, ms);
+ error = send_message(mh, ms, r->res_name, r->res_length);
out:
return error;
}
@@ -3737,9 +3703,9 @@ static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
send_args(r, lkb, ms);
- ms->m_bastmode = mode;
+ ms->m_bastmode = cpu_to_le32(mode);
- error = send_message(mh, ms);
+ error = send_message(mh, ms, r->res_name, r->res_length);
out:
return error;
}
@@ -3752,17 +3718,14 @@ static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
to_nodeid = dlm_dir_nodeid(r);
- error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
- if (error)
- return error;
-
+ add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
if (error)
goto fail;
send_args(r, lkb, ms);
- error = send_message(mh, ms);
+ error = send_message(mh, ms, r->res_name, r->res_length);
if (error)
goto fail;
return 0;
@@ -3785,9 +3748,9 @@ static int send_remove(struct dlm_rsb *r)
goto out;
memcpy(ms->m_extra, r->res_name, r->res_length);
- ms->m_hash = r->res_hash;
+ ms->m_hash = cpu_to_le32(r->res_hash);
- error = send_message(mh, ms);
+ error = send_message(mh, ms, r->res_name, r->res_length);
out:
return error;
}
@@ -3807,9 +3770,9 @@ static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
send_args(r, lkb, ms);
- ms->m_result = rv;
+ ms->m_result = cpu_to_le32(to_dlm_errno(rv));
- error = send_message(mh, ms);
+ error = send_message(mh, ms, r->res_name, r->res_length);
out:
return error;
}
@@ -3834,23 +3797,24 @@ static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
}
-static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
- int ret_nodeid, int rv)
+static int send_lookup_reply(struct dlm_ls *ls,
+ const struct dlm_message *ms_in, int ret_nodeid,
+ int rv)
{
- struct dlm_rsb *r = &ls->ls_stub_rsb;
+ struct dlm_rsb *r = &ls->ls_local_rsb;
struct dlm_message *ms;
struct dlm_mhandle *mh;
- int error, nodeid = ms_in->m_header.h_nodeid;
+ int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid);
error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
if (error)
goto out;
ms->m_lkid = ms_in->m_lkid;
- ms->m_result = rv;
- ms->m_nodeid = ret_nodeid;
+ ms->m_result = cpu_to_le32(to_dlm_errno(rv));
+ ms->m_nodeid = cpu_to_le32(ret_nodeid);
- error = send_message(mh, ms);
+ error = send_message(mh, ms, ms_in->m_extra, receive_extralen(ms_in));
out:
return error;
}
@@ -3859,31 +3823,32 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
of message, unlike the send side where we can safely send everything about
the lkb for any type of message */
-static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
+static void receive_flags(struct dlm_lkb *lkb, const struct dlm_message *ms)
{
- lkb->lkb_exflags = ms->m_exflags;
- lkb->lkb_sbflags = ms->m_sbflags;
- lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
- (ms->m_flags & 0x0000FFFF);
+ lkb->lkb_exflags = le32_to_cpu(ms->m_exflags);
+ dlm_set_sbflags_val(lkb, le32_to_cpu(ms->m_sbflags));
+ dlm_set_dflags_val(lkb, le32_to_cpu(ms->m_flags));
}
-static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+static void receive_flags_reply(struct dlm_lkb *lkb,
+ const struct dlm_message *ms,
+ bool local)
{
- if (ms->m_flags == DLM_IFL_STUB_MS)
+ if (local)
return;
- lkb->lkb_sbflags = ms->m_sbflags;
- lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
- (ms->m_flags & 0x0000FFFF);
+ dlm_set_sbflags_val(lkb, le32_to_cpu(ms->m_sbflags));
+ dlm_set_dflags_val(lkb, le32_to_cpu(ms->m_flags));
}
-static int receive_extralen(struct dlm_message *ms)
+static int receive_extralen(const struct dlm_message *ms)
{
- return (ms->m_header.h_length - sizeof(struct dlm_message));
+ return (le16_to_cpu(ms->m_header.h_length) -
+ sizeof(struct dlm_message));
}
static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
- struct dlm_message *ms)
+ const struct dlm_message *ms)
{
int len;
@@ -3911,16 +3876,16 @@ static void fake_astfn(void *astparam)
}
static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
- struct dlm_message *ms)
+ const struct dlm_message *ms)
{
- lkb->lkb_nodeid = ms->m_header.h_nodeid;
- lkb->lkb_ownpid = ms->m_pid;
- lkb->lkb_remid = ms->m_lkid;
+ lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+ lkb->lkb_ownpid = le32_to_cpu(ms->m_pid);
+ lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
lkb->lkb_grmode = DLM_LOCK_IV;
- lkb->lkb_rqmode = ms->m_rqmode;
+ lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode);
- lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
- lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
+ lkb->lkb_bastfn = (ms->m_asts & cpu_to_le32(DLM_CB_BAST)) ? &fake_bastfn : NULL;
+ lkb->lkb_astfn = (ms->m_asts & cpu_to_le32(DLM_CB_CAST)) ? &fake_astfn : NULL;
if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
/* lkb was just created so there won't be an lvb yet */
@@ -3933,7 +3898,7 @@ static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
}
static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
- struct dlm_message *ms)
+ const struct dlm_message *ms)
{
if (lkb->lkb_status != DLM_LKSTS_GRANTED)
return -EBUSY;
@@ -3941,56 +3906,65 @@ static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
if (receive_lvb(ls, lkb, ms))
return -ENOMEM;
- lkb->lkb_rqmode = ms->m_rqmode;
- lkb->lkb_lvbseq = ms->m_lvbseq;
+ lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode);
+ lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq);
return 0;
}
static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
- struct dlm_message *ms)
+ const struct dlm_message *ms)
{
if (receive_lvb(ls, lkb, ms))
return -ENOMEM;
return 0;
}
-/* We fill in the stub-lkb fields with the info that send_xxxx_reply()
+/* We fill in the local-lkb fields with the info that send_xxxx_reply()
uses to send a reply and that the remote end uses to process the reply. */
-static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
+static void setup_local_lkb(struct dlm_ls *ls, const struct dlm_message *ms)
{
- struct dlm_lkb *lkb = &ls->ls_stub_lkb;
- lkb->lkb_nodeid = ms->m_header.h_nodeid;
- lkb->lkb_remid = ms->m_lkid;
+ struct dlm_lkb *lkb = &ls->ls_local_lkb;
+ lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+ lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
}
/* This is called after the rsb is locked so that we can safely inspect
fields in the lkb. */
-static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
+static int validate_message(struct dlm_lkb *lkb, const struct dlm_message *ms)
{
- int from = ms->m_header.h_nodeid;
+ int from = le32_to_cpu(ms->m_header.h_nodeid);
int error = 0;
+ /* currently mixing of user/kernel locks are not supported */
+ if (ms->m_flags & cpu_to_le32(BIT(DLM_DFL_USER_BIT)) &&
+ !test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
+ log_error(lkb->lkb_resource->res_ls,
+ "got user dlm message for a kernel lock");
+ error = -EINVAL;
+ goto out;
+ }
+
switch (ms->m_type) {
- case DLM_MSG_CONVERT:
- case DLM_MSG_UNLOCK:
- case DLM_MSG_CANCEL:
+ case cpu_to_le32(DLM_MSG_CONVERT):
+ case cpu_to_le32(DLM_MSG_UNLOCK):
+ case cpu_to_le32(DLM_MSG_CANCEL):
if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
error = -EINVAL;
break;
- case DLM_MSG_CONVERT_REPLY:
- case DLM_MSG_UNLOCK_REPLY:
- case DLM_MSG_CANCEL_REPLY:
- case DLM_MSG_GRANT:
- case DLM_MSG_BAST:
+ case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
+ case cpu_to_le32(DLM_MSG_UNLOCK_REPLY):
+ case cpu_to_le32(DLM_MSG_CANCEL_REPLY):
+ case cpu_to_le32(DLM_MSG_GRANT):
+ case cpu_to_le32(DLM_MSG_BAST):
if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
error = -EINVAL;
break;
- case DLM_MSG_REQUEST_REPLY:
+ case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
if (!is_process_copy(lkb))
error = -EINVAL;
else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
@@ -4001,87 +3975,31 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
error = -EINVAL;
}
+out:
if (error)
log_error(lkb->lkb_resource->res_ls,
"ignore invalid message %d from %d %x %x %x %d",
- ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
- lkb->lkb_flags, lkb->lkb_nodeid);
+ le32_to_cpu(ms->m_type), from, lkb->lkb_id,
+ lkb->lkb_remid, dlm_iflags_val(lkb),
+ lkb->lkb_nodeid);
return error;
}
-static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
-{
- char name[DLM_RESNAME_MAXLEN + 1];
- struct dlm_message *ms;
- struct dlm_mhandle *mh;
- struct dlm_rsb *r;
- uint32_t hash, b;
- int rv, dir_nodeid;
-
- memset(name, 0, sizeof(name));
- memcpy(name, ms_name, len);
-
- hash = jhash(name, len, 0);
- b = hash & (ls->ls_rsbtbl_size - 1);
-
- dir_nodeid = dlm_hash2nodeid(ls, hash);
-
- log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
-
- spin_lock(&ls->ls_rsbtbl[b].lock);
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
- if (!rv) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_error(ls, "repeat_remove on keep %s", name);
- return;
- }
-
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
- if (!rv) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_error(ls, "repeat_remove on toss %s", name);
- return;
- }
-
- /* use ls->remove_name2 to avoid conflict with shrink? */
-
- spin_lock(&ls->ls_remove_spin);
- ls->ls_remove_len = len;
- memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
- spin_unlock(&ls->ls_remove_spin);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
-
- rv = _create_message(ls, sizeof(struct dlm_message) + len,
- dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
- if (rv)
- return;
-
- memcpy(ms->m_extra, name, len);
- ms->m_hash = hash;
-
- send_message(mh, ms);
-
- spin_lock(&ls->ls_remove_spin);
- ls->ls_remove_len = 0;
- memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
- spin_unlock(&ls->ls_remove_spin);
-}
-
-static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
+static int receive_request(struct dlm_ls *ls, const struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int from_nodeid;
int error, namelen = 0;
- from_nodeid = ms->m_header.h_nodeid;
+ from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
error = create_lkb(ls, &lkb);
if (error)
goto fail;
receive_flags(lkb, ms);
- lkb->lkb_flags |= DLM_IFL_MSTCPY;
+ set_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags);
error = receive_request_args(ls, lkb, ms);
if (error) {
__put_lkb(ls, lkb);
@@ -4136,45 +4054,32 @@ static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
ENOTBLK request failures when the lookup reply designating us
as master is delayed. */
- /* We could repeatedly return -EBADR here if our send_remove() is
- delayed in being sent/arriving/being processed on the dir node.
- Another node would repeatedly lookup up the master, and the dir
- node would continue returning our nodeid until our send_remove
- took effect.
-
- We send another remove message in case our previous send_remove
- was lost/ignored/missed somehow. */
-
if (error != -ENOTBLK) {
log_limit(ls, "receive_request %x from %d %d",
- ms->m_lkid, from_nodeid, error);
+ le32_to_cpu(ms->m_lkid), from_nodeid, error);
}
- if (namelen && error == -EBADR) {
- send_repeat_remove(ls, ms->m_extra, namelen);
- msleep(1000);
- }
-
- setup_stub_lkb(ls, ms);
- send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
+ setup_local_lkb(ls, ms);
+ send_request_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error);
return error;
}
-static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
+static int receive_convert(struct dlm_ls *ls, const struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error, reply = 1;
- error = find_lkb(ls, ms->m_remid, &lkb);
+ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
if (error)
goto fail;
- if (lkb->lkb_remid != ms->m_lkid) {
+ if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) {
log_error(ls, "receive_convert %x remid %x recover_seq %llu "
"remote %d %x", lkb->lkb_id, lkb->lkb_remid,
(unsigned long long)lkb->lkb_recover_seq,
- ms->m_header.h_nodeid, ms->m_lkid);
+ le32_to_cpu(ms->m_header.h_nodeid),
+ le32_to_cpu(ms->m_lkid));
error = -ENOENT;
dlm_put_lkb(lkb);
goto fail;
@@ -4210,25 +4115,26 @@ static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
return 0;
fail:
- setup_stub_lkb(ls, ms);
- send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
+ setup_local_lkb(ls, ms);
+ send_convert_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error);
return error;
}
-static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
+static int receive_unlock(struct dlm_ls *ls, const struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error;
- error = find_lkb(ls, ms->m_remid, &lkb);
+ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
if (error)
goto fail;
- if (lkb->lkb_remid != ms->m_lkid) {
+ if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) {
log_error(ls, "receive_unlock %x remid %x remote %d %x",
lkb->lkb_id, lkb->lkb_remid,
- ms->m_header.h_nodeid, ms->m_lkid);
+ le32_to_cpu(ms->m_header.h_nodeid),
+ le32_to_cpu(ms->m_lkid));
error = -ENOENT;
dlm_put_lkb(lkb);
goto fail;
@@ -4261,18 +4167,18 @@ static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
return 0;
fail:
- setup_stub_lkb(ls, ms);
- send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
+ setup_local_lkb(ls, ms);
+ send_unlock_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error);
return error;
}
-static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
+static int receive_cancel(struct dlm_ls *ls, const struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error;
- error = find_lkb(ls, ms->m_remid, &lkb);
+ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
if (error)
goto fail;
@@ -4297,18 +4203,18 @@ static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
return 0;
fail:
- setup_stub_lkb(ls, ms);
- send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
+ setup_local_lkb(ls, ms);
+ send_cancel_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error);
return error;
}
-static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
+static int receive_grant(struct dlm_ls *ls, const struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error;
- error = find_lkb(ls, ms->m_remid, &lkb);
+ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
if (error)
return error;
@@ -4321,7 +4227,7 @@ static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
if (error)
goto out;
- receive_flags_reply(lkb, ms);
+ receive_flags_reply(lkb, ms, false);
if (is_altmode(lkb))
munge_altmode(lkb, ms);
grant_lock_pc(r, lkb, ms);
@@ -4333,13 +4239,13 @@ static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
return 0;
}
-static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
+static int receive_bast(struct dlm_ls *ls, const struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error;
- error = find_lkb(ls, ms->m_remid, &lkb);
+ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
if (error)
return error;
@@ -4352,8 +4258,8 @@ static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
if (error)
goto out;
- queue_bast(r, lkb, ms->m_bastmode);
- lkb->lkb_highbast = ms->m_bastmode;
+ queue_bast(r, lkb, le32_to_cpu(ms->m_bastmode));
+ lkb->lkb_highbast = le32_to_cpu(ms->m_bastmode);
out:
unlock_rsb(r);
put_rsb(r);
@@ -4361,11 +4267,11 @@ static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
return 0;
}
-static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
+static void receive_lookup(struct dlm_ls *ls, const struct dlm_message *ms)
{
int len, error, ret_nodeid, from_nodeid, our_nodeid;
- from_nodeid = ms->m_header.h_nodeid;
+ from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
our_nodeid = dlm_our_nodeid();
len = receive_extralen(ms);
@@ -4381,14 +4287,13 @@ static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
send_lookup_reply(ls, ms, ret_nodeid, error);
}
-static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
+static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
{
char name[DLM_RESNAME_MAXLEN+1];
struct dlm_rsb *r;
- uint32_t hash, b;
int rv, len, dir_nodeid, from_nodeid;
- from_nodeid = ms->m_header.h_nodeid;
+ from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
len = receive_extralen(ms);
@@ -4398,90 +4303,99 @@ static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
return;
}
- dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
+ dir_nodeid = dlm_hash2nodeid(ls, le32_to_cpu(ms->m_hash));
if (dir_nodeid != dlm_our_nodeid()) {
log_error(ls, "receive_remove from %d bad nodeid %d",
from_nodeid, dir_nodeid);
return;
}
- /* Look for name on rsbtbl.toss, if it's there, kill it.
- If it's on rsbtbl.keep, it's being used, and we should ignore this
- message. This is an expected race between the dir node sending a
- request to the master node at the same time as the master node sends
- a remove to the dir node. The resolution to that race is for the
- dir node to ignore the remove message, and the master node to
- recreate the master rsb when it gets a request from the dir node for
- an rsb it doesn't have. */
+ /*
+ * Look for inactive rsb, if it's there, free it.
+ * If the rsb is active, it's being used, and we should ignore this
+ * message. This is an expected race between the dir node sending a
+ * request to the master node at the same time as the master node sends
+ * a remove to the dir node. The resolution to that race is for the
+ * dir node to ignore the remove message, and the master node to
+ * recreate the master rsb when it gets a request from the dir node for
+ * an rsb it doesn't have.
+ */
memset(name, 0, sizeof(name));
memcpy(name, ms->m_extra, len);
- hash = jhash(name, len, 0);
- b = hash & (ls->ls_rsbtbl_size - 1);
+ rcu_read_lock();
+ rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
+ if (rv) {
+ rcu_read_unlock();
+ /* should not happen */
+ log_error(ls, "%s from %d not found %s", __func__,
+ from_nodeid, name);
+ return;
+ }
- spin_lock(&ls->ls_rsbtbl[b].lock);
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+ if (!rsb_flag(r, RSB_HASHED)) {
+ rcu_read_unlock();
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ /* should not happen */
+ log_error(ls, "%s from %d got removed during removal %s",
+ __func__, from_nodeid, name);
+ return;
+ }
+ /* at this stage the rsb can only being freed here */
+ rcu_read_unlock();
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
- if (rv) {
- /* verify the rsb is on keep list per comment above */
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
- if (rv) {
- /* should not happen */
- log_error(ls, "receive_remove from %d not found %s",
- from_nodeid, name);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- return;
- }
+ if (!rsb_flag(r, RSB_INACTIVE)) {
if (r->res_master_nodeid != from_nodeid) {
/* should not happen */
- log_error(ls, "receive_remove keep from %d master %d",
+ log_error(ls, "receive_remove on active rsb from %d master %d",
from_nodeid, r->res_master_nodeid);
dlm_print_rsb(r);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
return;
}
+ /* Ignore the remove message, see race comment above. */
+
log_debug(ls, "receive_remove from %d master %d first %x %s",
from_nodeid, r->res_master_nodeid, r->res_first_lkid,
name);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
return;
}
if (r->res_master_nodeid != from_nodeid) {
- log_error(ls, "receive_remove toss from %d master %d",
+ log_error(ls, "receive_remove inactive from %d master %d",
from_nodeid, r->res_master_nodeid);
dlm_print_rsb(r);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
return;
}
- if (kref_put(&r->res_ref, kill_rsb)) {
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- dlm_free_rsb(r);
- } else {
- log_error(ls, "receive_remove from %d rsb ref error",
- from_nodeid);
- dlm_print_rsb(r);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- }
+ list_del(&r->res_slow_list);
+ rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
+ dlm_rhash_rsb_params);
+ rsb_clear_flag(r, RSB_HASHED);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+
+ free_inactive_rsb(r);
}
-static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
+static void receive_purge(struct dlm_ls *ls, const struct dlm_message *ms)
{
- do_purge(ls, ms->m_nodeid, ms->m_pid);
+ do_purge(ls, le32_to_cpu(ms->m_nodeid), le32_to_cpu(ms->m_pid));
}
-static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
+static int receive_request_reply(struct dlm_ls *ls,
+ const struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error, mstype, result;
- int from_nodeid = ms->m_header.h_nodeid;
+ int from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
- error = find_lkb(ls, ms->m_remid, &lkb);
+ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
if (error)
return error;
@@ -4497,7 +4411,8 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
if (error) {
log_error(ls, "receive_request_reply %x remote %d %x result %d",
- lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
+ lkb->lkb_id, from_nodeid, le32_to_cpu(ms->m_lkid),
+ from_dlm_errno(le32_to_cpu(ms->m_result)));
dlm_dump_rsb(r);
goto out;
}
@@ -4511,7 +4426,7 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
}
/* this is the value returned from do_request() on the master */
- result = ms->m_result;
+ result = from_dlm_errno(le32_to_cpu(ms->m_result));
switch (result) {
case -EAGAIN:
@@ -4524,13 +4439,12 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
case -EINPROGRESS:
case 0:
/* request was queued or granted on remote master */
- receive_flags_reply(lkb, ms);
- lkb->lkb_remid = ms->m_lkid;
+ receive_flags_reply(lkb, ms, false);
+ lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
if (is_altmode(lkb))
munge_altmode(lkb, ms);
if (result) {
add_lkb(r, lkb, DLM_LKSTS_WAITING);
- add_timeout(lkb);
} else {
grant_lock_pc(r, lkb, ms);
queue_cast(r, lkb, 0);
@@ -4572,20 +4486,21 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
lkb->lkb_id, result);
}
- if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
+ if ((result == 0 || result == -EINPROGRESS) &&
+ test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags)) {
log_debug(ls, "receive_request_reply %x result %d unlock",
lkb->lkb_id, result);
- lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
- lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
+ clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
send_unlock(r, lkb);
- } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
+ } else if ((result == -EINPROGRESS) &&
+ test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT,
+ &lkb->lkb_iflags)) {
log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
- lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
- lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
+ clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags);
send_cancel(r, lkb);
} else {
- lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
- lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
+ clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags);
+ clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags);
}
out:
unlock_rsb(r);
@@ -4595,34 +4510,33 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
}
static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
- struct dlm_message *ms)
+ const struct dlm_message *ms, bool local)
{
/* this is the value returned from do_convert() on the master */
- switch (ms->m_result) {
+ switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
case -EAGAIN:
/* convert would block (be queued) on remote master */
queue_cast(r, lkb, -EAGAIN);
break;
case -EDEADLK:
- receive_flags_reply(lkb, ms);
+ receive_flags_reply(lkb, ms, local);
revert_lock_pc(r, lkb);
queue_cast(r, lkb, -EDEADLK);
break;
case -EINPROGRESS:
/* convert was queued on remote master */
- receive_flags_reply(lkb, ms);
+ receive_flags_reply(lkb, ms, local);
if (is_demoted(lkb))
munge_demoted(lkb);
del_lkb(r, lkb);
add_lkb(r, lkb, DLM_LKSTS_CONVERT);
- add_timeout(lkb);
break;
case 0:
/* convert was granted on remote master */
- receive_flags_reply(lkb, ms);
+ receive_flags_reply(lkb, ms, local);
if (is_demoted(lkb))
munge_demoted(lkb);
grant_lock_pc(r, lkb, ms);
@@ -4631,14 +4545,16 @@ static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
default:
log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
- lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
- ms->m_result);
+ lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
+ le32_to_cpu(ms->m_lkid),
+ from_dlm_errno(le32_to_cpu(ms->m_result)));
dlm_print_rsb(r);
dlm_print_lkb(lkb);
}
}
-static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+static void _receive_convert_reply(struct dlm_lkb *lkb,
+ const struct dlm_message *ms, bool local)
{
struct dlm_rsb *r = lkb->lkb_resource;
int error;
@@ -4650,32 +4566,33 @@ static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
if (error)
goto out;
- /* stub reply can happen with waiters_mutex held */
- error = remove_from_waiters_ms(lkb, ms);
+ error = remove_from_waiters_ms(lkb, ms, local);
if (error)
goto out;
- __receive_convert_reply(r, lkb, ms);
+ __receive_convert_reply(r, lkb, ms, local);
out:
unlock_rsb(r);
put_rsb(r);
}
-static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
+static int receive_convert_reply(struct dlm_ls *ls,
+ const struct dlm_message *ms)
{
struct dlm_lkb *lkb;
int error;
- error = find_lkb(ls, ms->m_remid, &lkb);
+ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
if (error)
return error;
- _receive_convert_reply(lkb, ms);
+ _receive_convert_reply(lkb, ms, false);
dlm_put_lkb(lkb);
return 0;
}
-static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+static void _receive_unlock_reply(struct dlm_lkb *lkb,
+ const struct dlm_message *ms, bool local)
{
struct dlm_rsb *r = lkb->lkb_resource;
int error;
@@ -4687,16 +4604,15 @@ static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
if (error)
goto out;
- /* stub reply can happen with waiters_mutex held */
- error = remove_from_waiters_ms(lkb, ms);
+ error = remove_from_waiters_ms(lkb, ms, local);
if (error)
goto out;
/* this is the value returned from do_unlock() on the master */
- switch (ms->m_result) {
+ switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
case -DLM_EUNLOCK:
- receive_flags_reply(lkb, ms);
+ receive_flags_reply(lkb, ms, local);
remove_lock_pc(r, lkb);
queue_cast(r, lkb, -DLM_EUNLOCK);
break;
@@ -4704,28 +4620,30 @@ static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
break;
default:
log_error(r->res_ls, "receive_unlock_reply %x error %d",
- lkb->lkb_id, ms->m_result);
+ lkb->lkb_id, from_dlm_errno(le32_to_cpu(ms->m_result)));
}
out:
unlock_rsb(r);
put_rsb(r);
}
-static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
+static int receive_unlock_reply(struct dlm_ls *ls,
+ const struct dlm_message *ms)
{
struct dlm_lkb *lkb;
int error;
- error = find_lkb(ls, ms->m_remid, &lkb);
+ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
if (error)
return error;
- _receive_unlock_reply(lkb, ms);
+ _receive_unlock_reply(lkb, ms, false);
dlm_put_lkb(lkb);
return 0;
}
-static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+static void _receive_cancel_reply(struct dlm_lkb *lkb,
+ const struct dlm_message *ms, bool local)
{
struct dlm_rsb *r = lkb->lkb_resource;
int error;
@@ -4737,16 +4655,15 @@ static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
if (error)
goto out;
- /* stub reply can happen with waiters_mutex held */
- error = remove_from_waiters_ms(lkb, ms);
+ error = remove_from_waiters_ms(lkb, ms, local);
if (error)
goto out;
/* this is the value returned from do_cancel() on the master */
- switch (ms->m_result) {
+ switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
case -DLM_ECANCEL:
- receive_flags_reply(lkb, ms);
+ receive_flags_reply(lkb, ms, local);
revert_lock_pc(r, lkb);
queue_cast(r, lkb, -DLM_ECANCEL);
break;
@@ -4754,37 +4671,41 @@ static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
break;
default:
log_error(r->res_ls, "receive_cancel_reply %x error %d",
- lkb->lkb_id, ms->m_result);
+ lkb->lkb_id,
+ from_dlm_errno(le32_to_cpu(ms->m_result)));
}
out:
unlock_rsb(r);
put_rsb(r);
}
-static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
+static int receive_cancel_reply(struct dlm_ls *ls,
+ const struct dlm_message *ms)
{
struct dlm_lkb *lkb;
int error;
- error = find_lkb(ls, ms->m_remid, &lkb);
+ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
if (error)
return error;
- _receive_cancel_reply(lkb, ms);
+ _receive_cancel_reply(lkb, ms, false);
dlm_put_lkb(lkb);
return 0;
}
-static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
+static void receive_lookup_reply(struct dlm_ls *ls,
+ const struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error, ret_nodeid;
int do_lookup_list = 0;
- error = find_lkb(ls, ms->m_lkid, &lkb);
+ error = find_lkb(ls, le32_to_cpu(ms->m_lkid), &lkb);
if (error) {
- log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
+ log_error(ls, "%s no lkid %x", __func__,
+ le32_to_cpu(ms->m_lkid));
return;
}
@@ -4799,7 +4720,7 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
if (error)
goto out;
- ret_nodeid = ms->m_nodeid;
+ ret_nodeid = le32_to_cpu(ms->m_nodeid);
/* We sometimes receive a request from the dir node for this
rsb before we've received the dir node's loookup_reply for it.
@@ -4811,8 +4732,8 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
/* This should never happen */
log_error(ls, "receive_lookup_reply %x from %d ret %d "
"master %d dir %d our %d first %x %s",
- lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
- r->res_master_nodeid, r->res_dir_nodeid,
+ lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
+ ret_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
dlm_our_nodeid(), r->res_first_lkid, r->res_name);
}
@@ -4824,7 +4745,7 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
} else if (ret_nodeid == -1) {
/* the remote node doesn't believe it's the dir node */
log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
- lkb->lkb_id, ms->m_header.h_nodeid);
+ lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid));
r->res_master_nodeid = 0;
r->res_nodeid = -1;
lkb->lkb_nodeid = -1;
@@ -4836,7 +4757,7 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
if (is_overlap(lkb)) {
log_debug(ls, "receive_lookup_reply %x unlock %x",
- lkb->lkb_id, lkb->lkb_flags);
+ lkb->lkb_id, dlm_iflags_val(lkb));
queue_cast_overlap(r, lkb);
unhold_lkb(lkb); /* undoes create_lkb() */
goto out_list;
@@ -4853,15 +4774,17 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
dlm_put_lkb(lkb);
}
-static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
+static void _receive_message(struct dlm_ls *ls, const struct dlm_message *ms,
uint32_t saved_seq)
{
int error = 0, noent = 0;
- if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
+ if (WARN_ON_ONCE(!dlm_is_member(ls, le32_to_cpu(ms->m_header.h_nodeid)))) {
log_limit(ls, "receive %d from non-member %d %x %x %d",
- ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
- ms->m_remid, ms->m_result);
+ le32_to_cpu(ms->m_type),
+ le32_to_cpu(ms->m_header.h_nodeid),
+ le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
+ from_dlm_errno(le32_to_cpu(ms->m_result)));
return;
}
@@ -4869,77 +4792,78 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
/* messages sent to a master node */
- case DLM_MSG_REQUEST:
+ case cpu_to_le32(DLM_MSG_REQUEST):
error = receive_request(ls, ms);
break;
- case DLM_MSG_CONVERT:
+ case cpu_to_le32(DLM_MSG_CONVERT):
error = receive_convert(ls, ms);
break;
- case DLM_MSG_UNLOCK:
+ case cpu_to_le32(DLM_MSG_UNLOCK):
error = receive_unlock(ls, ms);
break;
- case DLM_MSG_CANCEL:
+ case cpu_to_le32(DLM_MSG_CANCEL):
noent = 1;
error = receive_cancel(ls, ms);
break;
/* messages sent from a master node (replies to above) */
- case DLM_MSG_REQUEST_REPLY:
+ case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
error = receive_request_reply(ls, ms);
break;
- case DLM_MSG_CONVERT_REPLY:
+ case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
error = receive_convert_reply(ls, ms);
break;
- case DLM_MSG_UNLOCK_REPLY:
+ case cpu_to_le32(DLM_MSG_UNLOCK_REPLY):
error = receive_unlock_reply(ls, ms);
break;
- case DLM_MSG_CANCEL_REPLY:
+ case cpu_to_le32(DLM_MSG_CANCEL_REPLY):
error = receive_cancel_reply(ls, ms);
break;
/* messages sent from a master node (only two types of async msg) */
- case DLM_MSG_GRANT:
+ case cpu_to_le32(DLM_MSG_GRANT):
noent = 1;
error = receive_grant(ls, ms);
break;
- case DLM_MSG_BAST:
+ case cpu_to_le32(DLM_MSG_BAST):
noent = 1;
error = receive_bast(ls, ms);
break;
/* messages sent to a dir node */
- case DLM_MSG_LOOKUP:
+ case cpu_to_le32(DLM_MSG_LOOKUP):
receive_lookup(ls, ms);
break;
- case DLM_MSG_REMOVE:
+ case cpu_to_le32(DLM_MSG_REMOVE):
receive_remove(ls, ms);
break;
/* messages sent from a dir node (remove has no reply) */
- case DLM_MSG_LOOKUP_REPLY:
+ case cpu_to_le32(DLM_MSG_LOOKUP_REPLY):
receive_lookup_reply(ls, ms);
break;
/* other messages */
- case DLM_MSG_PURGE:
+ case cpu_to_le32(DLM_MSG_PURGE):
receive_purge(ls, ms);
break;
default:
- log_error(ls, "unknown message type %d", ms->m_type);
+ log_error(ls, "unknown message type %d",
+ le32_to_cpu(ms->m_type));
}
/*
@@ -4955,22 +4879,26 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
if (error == -ENOENT && noent) {
log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
- ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
- ms->m_lkid, saved_seq);
+ le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid),
+ le32_to_cpu(ms->m_header.h_nodeid),
+ le32_to_cpu(ms->m_lkid), saved_seq);
} else if (error == -ENOENT) {
log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
- ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
- ms->m_lkid, saved_seq);
+ le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid),
+ le32_to_cpu(ms->m_header.h_nodeid),
+ le32_to_cpu(ms->m_lkid), saved_seq);
- if (ms->m_type == DLM_MSG_CONVERT)
- dlm_dump_rsb_hash(ls, ms->m_hash);
+ if (ms->m_type == cpu_to_le32(DLM_MSG_CONVERT))
+ dlm_dump_rsb_hash(ls, le32_to_cpu(ms->m_hash));
}
if (error == -EINVAL) {
log_error(ls, "receive %d inval from %d lkid %x remid %x "
"saved_seq %u",
- ms->m_type, ms->m_header.h_nodeid,
- ms->m_lkid, ms->m_remid, saved_seq);
+ le32_to_cpu(ms->m_type),
+ le32_to_cpu(ms->m_header.h_nodeid),
+ le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
+ saved_seq);
}
}
@@ -4982,30 +4910,42 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
requestqueue, to processing all the saved messages, to processing new
messages as they arrive. */
-static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
+static void dlm_receive_message(struct dlm_ls *ls, const struct dlm_message *ms,
int nodeid)
{
- if (dlm_locking_stopped(ls)) {
+try_again:
+ read_lock_bh(&ls->ls_requestqueue_lock);
+ if (test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) {
/* If we were a member of this lockspace, left, and rejoined,
other nodes may still be sending us messages from the
lockspace generation before we left. */
- if (!ls->ls_generation) {
+ if (WARN_ON_ONCE(!ls->ls_generation)) {
+ read_unlock_bh(&ls->ls_requestqueue_lock);
log_limit(ls, "receive %d from %d ignore old gen",
- ms->m_type, nodeid);
+ le32_to_cpu(ms->m_type), nodeid);
return;
}
+ read_unlock_bh(&ls->ls_requestqueue_lock);
+ write_lock_bh(&ls->ls_requestqueue_lock);
+ /* recheck because we hold writelock now */
+ if (!test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) {
+ write_unlock_bh(&ls->ls_requestqueue_lock);
+ goto try_again;
+ }
+
dlm_add_requestqueue(ls, nodeid, ms);
+ write_unlock_bh(&ls->ls_requestqueue_lock);
} else {
- dlm_wait_requestqueue(ls);
_receive_message(ls, ms, 0);
+ read_unlock_bh(&ls->ls_requestqueue_lock);
}
}
/* This is called by dlm_recoverd to process messages that were saved on
the requestqueue. */
-void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
+void dlm_receive_message_saved(struct dlm_ls *ls, const struct dlm_message *ms,
uint32_t saved_seq)
{
_receive_message(ls, ms, saved_seq);
@@ -5016,38 +4956,38 @@ void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
standard locking activity) or an RCOM (recovery message sent as part of
lockspace recovery). */
-void dlm_receive_buffer(union dlm_packet *p, int nodeid)
+void dlm_receive_buffer(const union dlm_packet *p, int nodeid)
{
- struct dlm_header *hd = &p->header;
+ const struct dlm_header *hd = &p->header;
struct dlm_ls *ls;
int type = 0;
switch (hd->h_cmd) {
case DLM_MSG:
- dlm_message_in(&p->message);
- type = p->message.m_type;
+ type = le32_to_cpu(p->message.m_type);
break;
case DLM_RCOM:
- dlm_rcom_in(&p->rcom);
- type = p->rcom.rc_type;
+ type = le32_to_cpu(p->rcom.rc_type);
break;
default:
log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
return;
}
- if (hd->h_nodeid != nodeid) {
+ if (le32_to_cpu(hd->h_nodeid) != nodeid) {
log_print("invalid h_nodeid %d from %d lockspace %x",
- hd->h_nodeid, nodeid, hd->u.h_lockspace);
+ le32_to_cpu(hd->h_nodeid), nodeid,
+ le32_to_cpu(hd->u.h_lockspace));
return;
}
- ls = dlm_find_lockspace_global(hd->u.h_lockspace);
+ ls = dlm_find_lockspace_global(le32_to_cpu(hd->u.h_lockspace));
if (!ls) {
if (dlm_config.ci_log_debug) {
printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
"%u from %d cmd %d type %d\n",
- hd->u.h_lockspace, nodeid, hd->h_cmd, type);
+ le32_to_cpu(hd->u.h_lockspace), nodeid,
+ hd->h_cmd, type);
}
if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
@@ -5058,35 +4998,40 @@ void dlm_receive_buffer(union dlm_packet *p, int nodeid)
/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
be inactive (in this ls) before transitioning to recovery mode */
- down_read(&ls->ls_recv_active);
+ read_lock_bh(&ls->ls_recv_active);
if (hd->h_cmd == DLM_MSG)
dlm_receive_message(ls, &p->message, nodeid);
- else
+ else if (hd->h_cmd == DLM_RCOM)
dlm_receive_rcom(ls, &p->rcom, nodeid);
- up_read(&ls->ls_recv_active);
+ else
+ log_error(ls, "invalid h_cmd %d from %d lockspace %x",
+ hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace));
+ read_unlock_bh(&ls->ls_recv_active);
dlm_put_lockspace(ls);
}
static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
- struct dlm_message *ms_stub)
+ struct dlm_message *ms_local)
{
if (middle_conversion(lkb)) {
+ log_rinfo(ls, "%s %x middle convert in progress", __func__,
+ lkb->lkb_id);
+
+ /* We sent this lock to the new master. The new master will
+ * tell us when it's granted. We no longer need a reply, so
+ * use a fake reply to put the lkb into the right state.
+ */
hold_lkb(lkb);
- memset(ms_stub, 0, sizeof(struct dlm_message));
- ms_stub->m_flags = DLM_IFL_STUB_MS;
- ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
- ms_stub->m_result = -EINPROGRESS;
- ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
- _receive_convert_reply(lkb, ms_stub);
-
- /* Same special case as in receive_rcom_lock_args() */
- lkb->lkb_grmode = DLM_LOCK_IV;
- rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
+ memset(ms_local, 0, sizeof(struct dlm_message));
+ ms_local->m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY);
+ ms_local->m_result = cpu_to_le32(to_dlm_errno(-EINPROGRESS));
+ ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
+ _receive_convert_reply(lkb, ms_local, true);
unhold_lkb(lkb);
} else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
- lkb->lkb_flags |= DLM_IFL_RESEND;
+ set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags);
}
/* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
@@ -5117,16 +5062,14 @@ static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
void dlm_recover_waiters_pre(struct dlm_ls *ls)
{
struct dlm_lkb *lkb, *safe;
- struct dlm_message *ms_stub;
- int wait_type, stub_unlock_result, stub_cancel_result;
+ struct dlm_message *ms_local;
+ int wait_type, local_unlock_result, local_cancel_result;
int dir_nodeid;
- ms_stub = kmalloc(sizeof(*ms_stub), GFP_KERNEL);
- if (!ms_stub)
+ ms_local = kmalloc(sizeof(*ms_local), GFP_KERNEL);
+ if (!ms_local)
return;
- mutex_lock(&ls->ls_waiters_mutex);
-
list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
@@ -5150,7 +5093,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
resent after recovery is done */
if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
- lkb->lkb_flags |= DLM_IFL_RESEND;
+ set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags);
continue;
}
@@ -5158,8 +5101,8 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
continue;
wait_type = lkb->lkb_wait_type;
- stub_unlock_result = -DLM_EUNLOCK;
- stub_cancel_result = -DLM_ECANCEL;
+ local_unlock_result = -DLM_EUNLOCK;
+ local_cancel_result = -DLM_ECANCEL;
/* Main reply may have been received leaving a zero wait_type,
but a reply for the overlapping op may not have been
@@ -5170,48 +5113,46 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
if (is_overlap_cancel(lkb)) {
wait_type = DLM_MSG_CANCEL;
if (lkb->lkb_grmode == DLM_LOCK_IV)
- stub_cancel_result = 0;
+ local_cancel_result = 0;
}
if (is_overlap_unlock(lkb)) {
wait_type = DLM_MSG_UNLOCK;
if (lkb->lkb_grmode == DLM_LOCK_IV)
- stub_unlock_result = -ENOENT;
+ local_unlock_result = -ENOENT;
}
log_debug(ls, "rwpre overlap %x %x %d %d %d",
- lkb->lkb_id, lkb->lkb_flags, wait_type,
- stub_cancel_result, stub_unlock_result);
+ lkb->lkb_id, dlm_iflags_val(lkb), wait_type,
+ local_cancel_result, local_unlock_result);
}
switch (wait_type) {
case DLM_MSG_REQUEST:
- lkb->lkb_flags |= DLM_IFL_RESEND;
+ set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags);
break;
case DLM_MSG_CONVERT:
- recover_convert_waiter(ls, lkb, ms_stub);
+ recover_convert_waiter(ls, lkb, ms_local);
break;
case DLM_MSG_UNLOCK:
hold_lkb(lkb);
- memset(ms_stub, 0, sizeof(struct dlm_message));
- ms_stub->m_flags = DLM_IFL_STUB_MS;
- ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
- ms_stub->m_result = stub_unlock_result;
- ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
- _receive_unlock_reply(lkb, ms_stub);
+ memset(ms_local, 0, sizeof(struct dlm_message));
+ ms_local->m_type = cpu_to_le32(DLM_MSG_UNLOCK_REPLY);
+ ms_local->m_result = cpu_to_le32(to_dlm_errno(local_unlock_result));
+ ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
+ _receive_unlock_reply(lkb, ms_local, true);
dlm_put_lkb(lkb);
break;
case DLM_MSG_CANCEL:
hold_lkb(lkb);
- memset(ms_stub, 0, sizeof(struct dlm_message));
- ms_stub->m_flags = DLM_IFL_STUB_MS;
- ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
- ms_stub->m_result = stub_cancel_result;
- ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
- _receive_cancel_reply(lkb, ms_stub);
+ memset(ms_local, 0, sizeof(struct dlm_message));
+ ms_local->m_type = cpu_to_le32(DLM_MSG_CANCEL_REPLY);
+ ms_local->m_result = cpu_to_le32(to_dlm_errno(local_cancel_result));
+ ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
+ _receive_cancel_reply(lkb, ms_local, true);
dlm_put_lkb(lkb);
break;
@@ -5221,45 +5162,52 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
}
schedule();
}
- mutex_unlock(&ls->ls_waiters_mutex);
- kfree(ms_stub);
+ kfree(ms_local);
}
static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
{
- struct dlm_lkb *lkb;
- int found = 0;
+ struct dlm_lkb *lkb = NULL, *iter;
- mutex_lock(&ls->ls_waiters_mutex);
- list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
- if (lkb->lkb_flags & DLM_IFL_RESEND) {
- hold_lkb(lkb);
- found = 1;
+ spin_lock_bh(&ls->ls_waiters_lock);
+ list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
+ if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) {
+ hold_lkb(iter);
+ lkb = iter;
break;
}
}
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock_bh(&ls->ls_waiters_lock);
- if (!found)
- lkb = NULL;
return lkb;
}
-/* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
- master or dir-node for r. Processing the lkb may result in it being placed
- back on waiters. */
-
-/* We do this after normal locking has been enabled and any saved messages
- (in requestqueue) have been processed. We should be confident that at
- this point we won't get or process a reply to any of these waiting
- operations. But, new ops may be coming in on the rsbs/locks here from
- userspace or remotely. */
-
-/* there may have been an overlap unlock/cancel prior to recovery or after
- recovery. if before, the lkb may still have a pos wait_count; if after, the
- overlap flag would just have been set and nothing new sent. we can be
- confident here than any replies to either the initial op or overlap ops
- prior to recovery have been received. */
+/*
+ * Forced state reset for locks that were in the middle of remote operations
+ * when recovery happened (i.e. lkbs that were on the waiters list, waiting
+ * for a reply from a remote operation.) The lkbs remaining on the waiters
+ * list need to be reevaluated; some may need resending to a different node
+ * than previously, and some may now need local handling rather than remote.
+ *
+ * First, the lkb state for the voided remote operation is forcibly reset,
+ * equivalent to what remove_from_waiters() would normally do:
+ * . lkb removed from ls_waiters list
+ * . lkb wait_type cleared
+ * . lkb waiters_count cleared
+ * . lkb ref count decremented for each waiters_count (almost always 1,
+ * but possibly 2 in case of cancel/unlock overlapping, which means
+ * two remote replies were being expected for the lkb.)
+ *
+ * Second, the lkb is reprocessed like an original operation would be,
+ * by passing it to _request_lock or _convert_lock, which will either
+ * process the lkb operation locally, or send it to a remote node again
+ * and put the lkb back onto the waiters list.
+ *
+ * When reprocessing the lkb, we may find that it's flagged for an overlapping
+ * force-unlock or cancel, either from before recovery began, or after recovery
+ * finished. If this is the case, the unlock/cancel is done directly, and the
+ * original operation is not initiated again (no _request_lock/_convert_lock.)
+ */
int dlm_recover_waiters_post(struct dlm_ls *ls)
{
@@ -5274,6 +5222,11 @@ int dlm_recover_waiters_post(struct dlm_ls *ls)
break;
}
+ /*
+ * Find an lkb from the waiters list that's been affected by
+ * recovery node changes, and needs to be reprocessed. Does
+ * hold_lkb(), adding a refcount.
+ */
lkb = find_resend_waiter(ls);
if (!lkb)
break;
@@ -5282,9 +5235,16 @@ int dlm_recover_waiters_post(struct dlm_ls *ls)
hold_rsb(r);
lock_rsb(r);
+ /*
+ * If the lkb has been flagged for a force unlock or cancel,
+ * then the reprocessing below will be replaced by just doing
+ * the unlock/cancel directly.
+ */
mstype = lkb->lkb_wait_type;
- oc = is_overlap_cancel(lkb);
- ou = is_overlap_unlock(lkb);
+ oc = test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT,
+ &lkb->lkb_iflags);
+ ou = test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT,
+ &lkb->lkb_iflags);
err = 0;
log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
@@ -5293,19 +5253,39 @@ int dlm_recover_waiters_post(struct dlm_ls *ls)
r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
dlm_dir_nodeid(r), oc, ou);
- /* At this point we assume that we won't get a reply to any
- previous op or overlap op on this lock. First, do a big
- remove_from_waiters() for all previous ops. */
+ /*
+ * No reply to the pre-recovery operation will now be received,
+ * so a forced equivalent of remove_from_waiters() is needed to
+ * reset the waiters state that was in place before recovery.
+ */
- lkb->lkb_flags &= ~DLM_IFL_RESEND;
- lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
- lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
+ clear_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags);
+
+ /* Forcibly clear wait_type */
lkb->lkb_wait_type = 0;
- lkb->lkb_wait_count = 0;
- mutex_lock(&ls->ls_waiters_mutex);
+
+ /*
+ * Forcibly reset wait_count and associated refcount. The
+ * wait_count will almost always be 1, but in case of an
+ * overlapping unlock/cancel it could be 2: see where
+ * add_to_waiters() finds the lkb is already on the waiters
+ * list and does lkb_wait_count++; hold_lkb().
+ */
+ while (lkb->lkb_wait_count) {
+ lkb->lkb_wait_count--;
+ unhold_lkb(lkb);
+ }
+
+ /* Forcibly remove from waiters list */
+ spin_lock_bh(&ls->ls_waiters_lock);
list_del_init(&lkb->lkb_wait_reply);
- mutex_unlock(&ls->ls_waiters_mutex);
- unhold_lkb(lkb); /* for waiters list */
+ spin_unlock_bh(&ls->ls_waiters_lock);
+
+ /*
+ * The lkb is now clear of all prior waiters state and can be
+ * processed locally, or sent to remote node again, or directly
+ * cancelled/unlocked.
+ */
if (oc || ou) {
/* do an unlock or cancel instead of resending */
@@ -5332,7 +5312,7 @@ int dlm_recover_waiters_post(struct dlm_ls *ls)
case DLM_MSG_LOOKUP:
case DLM_MSG_REQUEST:
_request_lock(r, lkb);
- if (is_master(r))
+ if (r->res_nodeid != -1 && is_master(r))
confirm_master(r, 0);
break;
case DLM_MSG_CONVERT:
@@ -5424,7 +5404,7 @@ static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
/* Get rid of locks held by nodes that are gone. */
-void dlm_recover_purge(struct dlm_ls *ls)
+void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list)
{
struct dlm_rsb *r;
struct dlm_member *memb;
@@ -5443,11 +5423,9 @@ void dlm_recover_purge(struct dlm_ls *ls)
if (!nodes_count)
return;
- down_write(&ls->ls_root_sem);
- list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
- hold_rsb(r);
+ list_for_each_entry(r, root_list, res_root_list) {
lock_rsb(r);
- if (is_master(r)) {
+ if (r->res_nodeid != -1 && is_master(r)) {
purge_dead_list(ls, r, &r->res_grantqueue,
nodeid_gone, &lkb_count);
purge_dead_list(ls, r, &r->res_convertqueue,
@@ -5456,25 +5434,21 @@ void dlm_recover_purge(struct dlm_ls *ls)
nodeid_gone, &lkb_count);
}
unlock_rsb(r);
- unhold_rsb(r);
+
cond_resched();
}
- up_write(&ls->ls_root_sem);
if (lkb_count)
log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
lkb_count, nodes_count);
}
-static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
+static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls)
{
- struct rb_node *n;
struct dlm_rsb *r;
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
- r = rb_entry(n, struct dlm_rsb, res_hashnode);
-
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) {
if (!rsb_flag(r, RSB_RECOVER_GRANT))
continue;
if (!is_master(r)) {
@@ -5482,10 +5456,10 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
continue;
}
hold_rsb(r);
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
return r;
}
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
return NULL;
}
@@ -5509,19 +5483,15 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
void dlm_recover_grant(struct dlm_ls *ls)
{
struct dlm_rsb *r;
- int bucket = 0;
unsigned int count = 0;
unsigned int rsb_count = 0;
unsigned int lkb_count = 0;
while (1) {
- r = find_grant_rsb(ls, bucket);
- if (!r) {
- if (bucket == ls->ls_rsbtbl_size - 1)
- break;
- bucket++;
- continue;
- }
+ r = find_grant_rsb(ls);
+ if (!r)
+ break;
+
rsb_count++;
count = 0;
lock_rsb(r);
@@ -5571,16 +5541,16 @@ static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
/* needs at least dlm_rcom + rcom_lock */
static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
- struct dlm_rsb *r, struct dlm_rcom *rc)
+ struct dlm_rsb *r, const struct dlm_rcom *rc)
{
struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
- lkb->lkb_nodeid = rc->rc_header.h_nodeid;
+ lkb->lkb_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
- lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
- lkb->lkb_flags |= DLM_IFL_MSTCPY;
+ dlm_set_dflags_val(lkb, le32_to_cpu(rl->rl_flags));
+ set_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags);
lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
lkb->lkb_rqmode = rl->rl_rqmode;
lkb->lkb_grmode = rl->rl_grmode;
@@ -5590,8 +5560,8 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
- int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
- sizeof(struct rcom_lock);
+ int lvblen = le16_to_cpu(rc->rc_header.h_length) -
+ sizeof(struct dlm_rcom) - sizeof(struct rcom_lock);
if (lvblen > ls->ls_lvblen)
return -EINVAL;
lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
@@ -5604,10 +5574,11 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
The real granted mode of these converting locks cannot be determined
until all locks have been rebuilt on the rsb (recover_conversion) */
- if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
- middle_conversion(lkb)) {
- rl->rl_status = DLM_LKSTS_CONVERT;
- lkb->lkb_grmode = DLM_LOCK_IV;
+ if (rl->rl_status == DLM_LKSTS_CONVERT && middle_conversion(lkb)) {
+ /* We may need to adjust grmode depending on other granted locks. */
+ log_rinfo(ls, "%s %x middle convert gr %d rq %d remote %d %x",
+ __func__, lkb->lkb_id, lkb->lkb_grmode,
+ lkb->lkb_rqmode, lkb->lkb_nodeid, lkb->lkb_remid);
rsb_set_flag(r, RSB_RECOVER_CONVERT);
}
@@ -5621,15 +5592,19 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
back the rcom_lock struct we got but with the remid field filled in. */
/* needs at least dlm_rcom + rcom_lock */
-int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
+int dlm_recover_master_copy(struct dlm_ls *ls, const struct dlm_rcom *rc,
+ __le32 *rl_remid, __le32 *rl_result)
{
struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
struct dlm_rsb *r;
struct dlm_lkb *lkb;
uint32_t remid = 0;
- int from_nodeid = rc->rc_header.h_nodeid;
+ int from_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
int error;
+ /* init rl_remid with rcom lock rl_remid */
+ *rl_remid = rl->rl_remid;
+
if (rl->rl_parent_lkid) {
error = -EOPNOTSUPP;
goto out;
@@ -5677,7 +5652,6 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
attach_lkb(r, lkb);
add_lkb(r, lkb, rl->rl_status);
- error = 0;
ls->ls_recover_locks_in++;
if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
@@ -5686,7 +5660,7 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
out_remid:
/* this is the new value returned to the lock holder for
saving in its process-copy lkb */
- rl->rl_remid = cpu_to_le32(lkb->lkb_id);
+ *rl_remid = cpu_to_le32(lkb->lkb_id);
lkb->lkb_recover_seq = ls->ls_recover_seq;
@@ -5697,12 +5671,13 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
if (error && error != -EEXIST)
log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
from_nodeid, remid, error);
- rl->rl_result = cpu_to_le32(error);
+ *rl_result = cpu_to_le32(error);
return error;
}
/* needs at least dlm_rcom + rcom_lock */
-int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
+int dlm_recover_process_copy(struct dlm_ls *ls, const struct dlm_rcom *rc,
+ uint64_t seq)
{
struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
struct dlm_rsb *r;
@@ -5717,7 +5692,8 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
error = find_lkb(ls, lkid, &lkb);
if (error) {
log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
- lkid, rc->rc_header.h_nodeid, remid, result);
+ lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
+ result);
return error;
}
@@ -5727,7 +5703,8 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
if (!is_process_copy(lkb)) {
log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
- lkid, rc->rc_header.h_nodeid, remid, result);
+ lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
+ result);
dlm_dump_rsb(r);
unlock_rsb(r);
put_rsb(r);
@@ -5742,9 +5719,10 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
a barrier between recover_masters and recover_locks. */
log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
- lkid, rc->rc_header.h_nodeid, remid, result);
+ lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
+ result);
- dlm_send_rcom_lock(r, lkb);
+ dlm_send_rcom_lock(r, lkb, seq);
goto out;
case -EEXIST:
case 0:
@@ -5752,7 +5730,8 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
break;
default:
log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
- lkid, rc->rc_header.h_nodeid, remid, result);
+ lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
+ result);
}
/* an ack for dlm_recover_locks() which waits for replies from
@@ -5767,11 +5746,11 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
}
int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
- int mode, uint32_t flags, void *name, unsigned int namelen,
- unsigned long timeout_cs)
+ int mode, uint32_t flags, void *name, unsigned int namelen)
{
struct dlm_lkb *lkb;
struct dlm_args args;
+ bool do_put = true;
int error;
dlm_lock_recovery(ls);
@@ -5782,29 +5761,29 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
goto out;
}
+ trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
+
if (flags & DLM_LKF_VALBLK) {
ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
if (!ua->lksb.sb_lvbptr) {
kfree(ua);
- __put_lkb(ls, lkb);
error = -ENOMEM;
- goto out;
+ goto out_put;
}
}
- error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
- fake_astfn, ua, fake_bastfn, &args);
+ error = set_lock_args(mode, &ua->lksb, flags, namelen, fake_astfn, ua,
+ fake_bastfn, &args);
if (error) {
kfree(ua->lksb.sb_lvbptr);
ua->lksb.sb_lvbptr = NULL;
kfree(ua);
- __put_lkb(ls, lkb);
- goto out;
+ goto out_put;
}
/* After ua is attached to lkb it will be freed by dlm_free_lkb().
- When DLM_IFL_USER is set, the dlm knows that this is a userspace
+ When DLM_DFL_USER_BIT is set, the dlm knows that this is a userspace
lock and that lkb_astparam is the dlm_user_args structure. */
- lkb->lkb_flags |= DLM_IFL_USER;
+ set_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags);
error = request_lock(ls, lkb, name, namelen, &args);
switch (error) {
@@ -5817,23 +5796,26 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
error = 0;
fallthrough;
default:
- __put_lkb(ls, lkb);
- goto out;
+ goto out_put;
}
/* add this new lkb to the per-process list of locks */
- spin_lock(&ua->proc->locks_spin);
+ spin_lock_bh(&ua->proc->locks_spin);
hold_lkb(lkb);
list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
- spin_unlock(&ua->proc->locks_spin);
+ spin_unlock_bh(&ua->proc->locks_spin);
+ do_put = false;
+ out_put:
+ trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false);
+ if (do_put)
+ __put_lkb(ls, lkb);
out:
dlm_unlock_recovery(ls);
return error;
}
int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
- int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
- unsigned long timeout_cs)
+ int mode, uint32_t flags, uint32_t lkid, char *lvb_in)
{
struct dlm_lkb *lkb;
struct dlm_args args;
@@ -5846,6 +5828,8 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error)
goto out;
+ trace_dlm_lock_start(ls, lkb, NULL, 0, mode, flags);
+
/* user can change the params on its lock when it converts it, or
add an lvb that didn't exist before */
@@ -5868,8 +5852,8 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
ua->bastaddr = ua_tmp->bastaddr;
ua->user_lksb = ua_tmp->user_lksb;
- error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
- fake_astfn, ua, fake_bastfn, &args);
+ error = set_lock_args(mode, &ua->lksb, flags, 0, fake_astfn, ua,
+ fake_bastfn, &args);
if (error)
goto out_put;
@@ -5878,6 +5862,7 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
error = 0;
out_put:
+ trace_dlm_lock_end(ls, lkb, NULL, 0, mode, flags, error, false);
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
@@ -5893,39 +5878,38 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
int mode, uint32_t flags, void *name, unsigned int namelen,
- unsigned long timeout_cs, uint32_t *lkid)
+ uint32_t *lkid)
{
- struct dlm_lkb *lkb;
+ struct dlm_lkb *lkb = NULL, *iter;
struct dlm_user_args *ua;
int found_other_mode = 0;
- int found = 0;
int rv = 0;
- mutex_lock(&ls->ls_orphans_mutex);
- list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
- if (lkb->lkb_resource->res_length != namelen)
+ spin_lock_bh(&ls->ls_orphans_lock);
+ list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
+ if (iter->lkb_resource->res_length != namelen)
continue;
- if (memcmp(lkb->lkb_resource->res_name, name, namelen))
+ if (memcmp(iter->lkb_resource->res_name, name, namelen))
continue;
- if (lkb->lkb_grmode != mode) {
+ if (iter->lkb_grmode != mode) {
found_other_mode = 1;
continue;
}
- found = 1;
- list_del_init(&lkb->lkb_ownqueue);
- lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
- *lkid = lkb->lkb_id;
+ lkb = iter;
+ list_del_init(&iter->lkb_ownqueue);
+ clear_bit(DLM_DFL_ORPHAN_BIT, &iter->lkb_dflags);
+ *lkid = iter->lkb_id;
break;
}
- mutex_unlock(&ls->ls_orphans_mutex);
+ spin_unlock_bh(&ls->ls_orphans_lock);
- if (!found && found_other_mode) {
+ if (!lkb && found_other_mode) {
rv = -EAGAIN;
goto out;
}
- if (!found) {
+ if (!lkb) {
rv = -ENOENT;
goto out;
}
@@ -5949,9 +5933,9 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
* for the proc locks list.
*/
- spin_lock(&ua->proc->locks_spin);
+ spin_lock_bh(&ua->proc->locks_spin);
list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
- spin_unlock(&ua->proc->locks_spin);
+ spin_unlock_bh(&ua->proc->locks_spin);
out:
kfree(ua_tmp);
return rv;
@@ -5971,6 +5955,8 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error)
goto out;
+ trace_dlm_unlock_start(ls, lkb, flags);
+
ua = lkb->lkb_ua;
if (lvb_in && ua->lksb.sb_lvbptr)
@@ -5993,12 +5979,13 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error)
goto out_put;
- spin_lock(&ua->proc->locks_spin);
+ spin_lock_bh(&ua->proc->locks_spin);
/* dlm_user_add_cb() may have already taken lkb off the proc list */
if (!list_empty(&lkb->lkb_ownqueue))
list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
- spin_unlock(&ua->proc->locks_spin);
+ spin_unlock_bh(&ua->proc->locks_spin);
out_put:
+ trace_dlm_unlock_end(ls, lkb, flags, error);
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
@@ -6020,6 +6007,8 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error)
goto out;
+ trace_dlm_unlock_start(ls, lkb, flags);
+
ua = lkb->lkb_ua;
if (ua_tmp->castparam)
ua->castparam = ua_tmp->castparam;
@@ -6037,6 +6026,7 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error == -EBUSY)
error = 0;
out_put:
+ trace_dlm_unlock_end(ls, lkb, flags, error);
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
@@ -6058,6 +6048,8 @@ int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
if (error)
goto out;
+ trace_dlm_unlock_start(ls, lkb, flags);
+
ua = lkb->lkb_ua;
error = set_unlock_args(flags, ua, &args);
@@ -6073,7 +6065,7 @@ int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
error = validate_unlock_args(lkb, &args);
if (error)
goto out_r;
- lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
+ set_bit(DLM_IFL_DEADLOCK_CANCEL_BIT, &lkb->lkb_iflags);
error = _cancel_lock(r, lkb);
out_r:
@@ -6086,6 +6078,7 @@ int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
if (error == -EBUSY)
error = 0;
out_put:
+ trace_dlm_unlock_end(ls, lkb, flags, error);
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
@@ -6101,9 +6094,9 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
int error;
hold_lkb(lkb); /* reference for the ls_orphans list */
- mutex_lock(&ls->ls_orphans_mutex);
+ spin_lock_bh(&ls->ls_orphans_lock);
list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
- mutex_unlock(&ls->ls_orphans_mutex);
+ spin_unlock_bh(&ls->ls_orphans_lock);
set_unlock_args(0, lkb->lkb_ua, &args);
@@ -6141,7 +6134,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
{
struct dlm_lkb *lkb = NULL;
- mutex_lock(&ls->ls_clear_proc_locks);
+ spin_lock_bh(&ls->ls_clear_proc_locks);
if (list_empty(&proc->locks))
goto out;
@@ -6149,11 +6142,11 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
list_del_init(&lkb->lkb_ownqueue);
if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
- lkb->lkb_flags |= DLM_IFL_ORPHAN;
+ set_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags);
else
- lkb->lkb_flags |= DLM_IFL_DEAD;
+ set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags);
out:
- mutex_unlock(&ls->ls_clear_proc_locks);
+ spin_unlock_bh(&ls->ls_clear_proc_locks);
return lkb;
}
@@ -6169,6 +6162,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
{
+ struct dlm_callback *cb, *cb_safe;
struct dlm_lkb *lkb, *safe;
dlm_lock_recovery(ls);
@@ -6177,7 +6171,6 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
lkb = del_proc_lock(ls, proc);
if (!lkb)
break;
- del_timeout(lkb);
if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
orphan_proc_lock(ls, lkb);
else
@@ -6190,64 +6183,61 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
dlm_put_lkb(lkb);
}
- mutex_lock(&ls->ls_clear_proc_locks);
+ spin_lock_bh(&ls->ls_clear_proc_locks);
/* in-progress unlocks */
list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
list_del_init(&lkb->lkb_ownqueue);
- lkb->lkb_flags |= DLM_IFL_DEAD;
+ set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags);
dlm_put_lkb(lkb);
}
- list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
- memset(&lkb->lkb_callbacks, 0,
- sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
- list_del_init(&lkb->lkb_cb_list);
- dlm_put_lkb(lkb);
+ list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) {
+ list_del(&cb->list);
+ dlm_free_cb(cb);
}
- mutex_unlock(&ls->ls_clear_proc_locks);
+ spin_unlock_bh(&ls->ls_clear_proc_locks);
dlm_unlock_recovery(ls);
}
static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
{
+ struct dlm_callback *cb, *cb_safe;
struct dlm_lkb *lkb, *safe;
while (1) {
lkb = NULL;
- spin_lock(&proc->locks_spin);
+ spin_lock_bh(&proc->locks_spin);
if (!list_empty(&proc->locks)) {
lkb = list_entry(proc->locks.next, struct dlm_lkb,
lkb_ownqueue);
list_del_init(&lkb->lkb_ownqueue);
}
- spin_unlock(&proc->locks_spin);
+ spin_unlock_bh(&proc->locks_spin);
if (!lkb)
break;
- lkb->lkb_flags |= DLM_IFL_DEAD;
+ set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags);
unlock_proc_lock(ls, lkb);
dlm_put_lkb(lkb); /* ref from proc->locks list */
}
- spin_lock(&proc->locks_spin);
+ spin_lock_bh(&proc->locks_spin);
list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
list_del_init(&lkb->lkb_ownqueue);
- lkb->lkb_flags |= DLM_IFL_DEAD;
+ set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags);
dlm_put_lkb(lkb);
}
- spin_unlock(&proc->locks_spin);
+ spin_unlock_bh(&proc->locks_spin);
- spin_lock(&proc->asts_spin);
- list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
- memset(&lkb->lkb_callbacks, 0,
- sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
- list_del_init(&lkb->lkb_cb_list);
- dlm_put_lkb(lkb);
+ spin_lock_bh(&proc->asts_spin);
+ list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) {
+ list_del(&cb->list);
+ dlm_free_cb(cb);
}
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
}
/* pid of 0 means purge all orphans */
@@ -6256,7 +6246,7 @@ static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
{
struct dlm_lkb *lkb, *safe;
- mutex_lock(&ls->ls_orphans_mutex);
+ spin_lock_bh(&ls->ls_orphans_lock);
list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
if (pid && lkb->lkb_ownpid != pid)
continue;
@@ -6264,7 +6254,7 @@ static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
list_del_init(&lkb->lkb_ownqueue);
dlm_put_lkb(lkb);
}
- mutex_unlock(&ls->ls_orphans_mutex);
+ spin_unlock_bh(&ls->ls_orphans_lock);
}
static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
@@ -6277,10 +6267,10 @@ static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
DLM_MSG_PURGE, &ms, &mh);
if (error)
return error;
- ms->m_nodeid = nodeid;
- ms->m_pid = pid;
+ ms->m_nodeid = cpu_to_le32(nodeid);
+ ms->m_pid = cpu_to_le32(pid);
- return send_message(mh, ms);
+ return send_message(mh, ms, NULL, 0);
}
int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
@@ -6301,3 +6291,64 @@ int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
return error;
}
+/* debug functionality */
+int dlm_debug_add_lkb(struct dlm_ls *ls, uint32_t lkb_id, char *name, int len,
+ int lkb_nodeid, unsigned int lkb_dflags, int lkb_status)
+{
+ struct dlm_lksb *lksb;
+ struct dlm_lkb *lkb;
+ struct dlm_rsb *r;
+ int error;
+
+ /* we currently can't set a valid user lock */
+ if (lkb_dflags & BIT(DLM_DFL_USER_BIT))
+ return -EOPNOTSUPP;
+
+ lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
+ if (!lksb)
+ return -ENOMEM;
+
+ error = _create_lkb(ls, &lkb, lkb_id, lkb_id + 1);
+ if (error) {
+ kfree(lksb);
+ return error;
+ }
+
+ dlm_set_dflags_val(lkb, lkb_dflags);
+ lkb->lkb_nodeid = lkb_nodeid;
+ lkb->lkb_lksb = lksb;
+ /* user specific pointer, just don't have it NULL for kernel locks */
+ if (~lkb_dflags & BIT(DLM_DFL_USER_BIT))
+ lkb->lkb_astparam = (void *)0xDEADBEEF;
+
+ error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
+ if (error) {
+ kfree(lksb);
+ __put_lkb(ls, lkb);
+ return error;
+ }
+
+ lock_rsb(r);
+ attach_lkb(r, lkb);
+ add_lkb(r, lkb, lkb_status);
+ unlock_rsb(r);
+ put_rsb(r);
+
+ return 0;
+}
+
+int dlm_debug_add_lkb_to_waiters(struct dlm_ls *ls, uint32_t lkb_id,
+ int mstype, int to_nodeid)
+{
+ struct dlm_lkb *lkb;
+ int error;
+
+ error = find_lkb(ls, lkb_id, &lkb);
+ if (error)
+ return error;
+
+ add_to_waiters(lkb, mstype, to_nodeid);
+ dlm_put_lkb(lkb);
+ return 0;
+}
+
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index 456c6ec3ef6f..b23d7b854ed4 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -12,44 +12,45 @@
#define __LOCK_DOT_H__
void dlm_dump_rsb(struct dlm_rsb *r);
-void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len);
+void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len);
void dlm_print_lkb(struct dlm_lkb *lkb);
-void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
+void dlm_receive_message_saved(struct dlm_ls *ls, const struct dlm_message *ms,
uint32_t saved_seq);
-void dlm_receive_buffer(union dlm_packet *p, int nodeid);
+void dlm_receive_buffer(const union dlm_packet *p, int nodeid);
int dlm_modes_compat(int mode1, int mode2);
+void free_inactive_rsb(struct dlm_rsb *r);
void dlm_put_rsb(struct dlm_rsb *r);
void dlm_hold_rsb(struct dlm_rsb *r);
int dlm_put_lkb(struct dlm_lkb *lkb);
-void dlm_scan_rsbs(struct dlm_ls *ls);
int dlm_lock_recovery_try(struct dlm_ls *ls);
+void dlm_lock_recovery(struct dlm_ls *ls);
void dlm_unlock_recovery(struct dlm_ls *ls);
-void dlm_scan_waiters(struct dlm_ls *ls);
-void dlm_scan_timeout(struct dlm_ls *ls);
-void dlm_adjust_timeouts(struct dlm_ls *ls);
-int dlm_master_lookup(struct dlm_ls *ls, int nodeid, char *name, int len,
- unsigned int flags, int *r_nodeid, int *result);
+void dlm_rsb_scan(struct timer_list *timer);
+void resume_scan_timer(struct dlm_ls *ls);
-int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
+int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
+ int len, unsigned int flags, int *r_nodeid, int *result);
+
+int dlm_search_rsb_tree(struct rhashtable *rhash, const void *name, int len,
struct dlm_rsb **r_ret);
-void dlm_recover_purge(struct dlm_ls *ls);
+void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list);
void dlm_purge_mstcpy_locks(struct dlm_rsb *r);
void dlm_recover_grant(struct dlm_ls *ls);
int dlm_recover_waiters_post(struct dlm_ls *ls);
void dlm_recover_waiters_pre(struct dlm_ls *ls);
-int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
-int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
+int dlm_recover_master_copy(struct dlm_ls *ls, const struct dlm_rcom *rc,
+ __le32 *rl_remid, __le32 *rl_result);
+int dlm_recover_process_copy(struct dlm_ls *ls, const struct dlm_rcom *rc,
+ uint64_t seq);
int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, int mode,
- uint32_t flags, void *name, unsigned int namelen,
- unsigned long timeout_cs);
+ uint32_t flags, void *name, unsigned int namelen);
int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
- int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
- unsigned long timeout_cs);
+ int mode, uint32_t flags, uint32_t lkid, char *lvb_in);
int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
int mode, uint32_t flags, void *name, unsigned int namelen,
- unsigned long timeout_cs, uint32_t *lkid);
+ uint32_t *lkid);
int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
uint32_t flags, uint32_t lkid, char *lvb_in);
int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
@@ -58,20 +59,26 @@ int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
int nodeid, int pid);
int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid);
void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc);
+int dlm_debug_add_lkb(struct dlm_ls *ls, uint32_t lkb_id, char *name, int len,
+ int lkb_nodeid, unsigned int lkb_flags, int lkb_status);
+int dlm_debug_add_lkb_to_waiters(struct dlm_ls *ls, uint32_t lkb_id,
+ int mstype, int to_nodeid);
static inline int is_master(struct dlm_rsb *r)
{
+ WARN_ON_ONCE(r->res_nodeid == -1);
+
return !r->res_nodeid;
}
static inline void lock_rsb(struct dlm_rsb *r)
{
- mutex_lock(&r->res_mutex);
+ spin_lock_bh(&r->res_lock);
}
static inline void unlock_rsb(struct dlm_rsb *r)
{
- mutex_unlock(&r->res_mutex);
+ spin_unlock_bh(&r->res_lock);
}
#endif
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 10eddfa6c3d7..ddaa76558706 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -17,7 +17,6 @@
#include "recoverd.h"
#include "dir.h"
#include "midcomms.h"
-#include "lowcomms.h"
#include "config.h"
#include "memory.h"
#include "lock.h"
@@ -30,8 +29,6 @@ static int ls_count;
static struct mutex ls_lock;
static struct list_head lslist;
static spinlock_t lslist_lock;
-static struct task_struct * scand_task;
-
static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
{
@@ -41,7 +38,7 @@ static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
if (rc)
return rc;
- ls = dlm_find_lockspace_local(ls->ls_local_handle);
+ ls = dlm_find_lockspace_local(ls);
if (!ls)
return -EINVAL;
@@ -177,12 +174,6 @@ static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
return a->store ? a->store(ls, buf, len) : len;
}
-static void lockspace_kobj_release(struct kobject *k)
-{
- struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
- kfree(ls);
-}
-
static const struct sysfs_ops dlm_attr_ops = {
.show = dlm_attr_show,
.store = dlm_attr_store,
@@ -191,17 +182,21 @@ static const struct sysfs_ops dlm_attr_ops = {
static struct kobj_type dlm_ktype = {
.default_groups = dlm_groups,
.sysfs_ops = &dlm_attr_ops,
- .release = lockspace_kobj_release,
};
static struct kset *dlm_kset;
-static int do_uevent(struct dlm_ls *ls, int in)
+static int do_uevent(struct dlm_ls *ls, int in, unsigned int release_recover)
{
- if (in)
+ char message[512] = {};
+ char *envp[] = { message, NULL };
+
+ if (in) {
kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
- else
- kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
+ } else {
+ snprintf(message, 511, "RELEASE_RECOVER=%u", release_recover);
+ kobject_uevent_env(&ls->ls_kobj, KOBJ_OFFLINE, envp);
+ }
log_rinfo(ls, "%s the lockspace group...", in ? "joining" : "leaving");
@@ -216,10 +211,9 @@ static int do_uevent(struct dlm_ls *ls, int in)
return ls->ls_uevent_result;
}
-static int dlm_uevent(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env)
+static int dlm_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
- struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
+ const struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name);
return 0;
@@ -249,95 +243,29 @@ void dlm_lockspace_exit(void)
kset_unregister(dlm_kset);
}
-static struct dlm_ls *find_ls_to_scan(void)
-{
- struct dlm_ls *ls;
-
- spin_lock(&lslist_lock);
- list_for_each_entry(ls, &lslist, ls_list) {
- if (time_after_eq(jiffies, ls->ls_scan_time +
- dlm_config.ci_scan_secs * HZ)) {
- spin_unlock(&lslist_lock);
- return ls;
- }
- }
- spin_unlock(&lslist_lock);
- return NULL;
-}
-
-static int dlm_scand(void *data)
-{
- struct dlm_ls *ls;
-
- while (!kthread_should_stop()) {
- ls = find_ls_to_scan();
- if (ls) {
- if (dlm_lock_recovery_try(ls)) {
- ls->ls_scan_time = jiffies;
- dlm_scan_rsbs(ls);
- dlm_scan_timeout(ls);
- dlm_scan_waiters(ls);
- dlm_unlock_recovery(ls);
- } else {
- ls->ls_scan_time += HZ;
- }
- continue;
- }
- schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
- }
- return 0;
-}
-
-static int dlm_scand_start(void)
-{
- struct task_struct *p;
- int error = 0;
-
- p = kthread_run(dlm_scand, NULL, "dlm_scand");
- if (IS_ERR(p))
- error = PTR_ERR(p);
- else
- scand_task = p;
- return error;
-}
-
-static void dlm_scand_stop(void)
-{
- kthread_stop(scand_task);
-}
-
struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
{
struct dlm_ls *ls;
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (ls->ls_global_id == id) {
- ls->ls_count++;
+ atomic_inc(&ls->ls_count);
goto out;
}
}
ls = NULL;
out:
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
return ls;
}
struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
{
- struct dlm_ls *ls;
+ struct dlm_ls *ls = lockspace;
- spin_lock(&lslist_lock);
- list_for_each_entry(ls, &lslist, ls_list) {
- if (ls->ls_local_handle == lockspace) {
- ls->ls_count++;
- goto out;
- }
- }
- ls = NULL;
- out:
- spin_unlock(&lslist_lock);
+ atomic_inc(&ls->ls_count);
return ls;
}
@@ -345,64 +273,89 @@ struct dlm_ls *dlm_find_lockspace_device(int minor)
{
struct dlm_ls *ls;
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (ls->ls_device.minor == minor) {
- ls->ls_count++;
+ atomic_inc(&ls->ls_count);
goto out;
}
}
ls = NULL;
out:
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
return ls;
}
void dlm_put_lockspace(struct dlm_ls *ls)
{
- spin_lock(&lslist_lock);
- ls->ls_count--;
- spin_unlock(&lslist_lock);
+ if (atomic_dec_and_test(&ls->ls_count))
+ wake_up(&ls->ls_count_wait);
}
static void remove_lockspace(struct dlm_ls *ls)
{
- for (;;) {
- spin_lock(&lslist_lock);
- if (ls->ls_count == 0) {
- WARN_ON(ls->ls_create_count != 0);
- list_del(&ls->ls_list);
- spin_unlock(&lslist_lock);
- return;
- }
- spin_unlock(&lslist_lock);
- ssleep(1);
+retry:
+ wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0);
+
+ spin_lock_bh(&lslist_lock);
+ if (atomic_read(&ls->ls_count) != 0) {
+ spin_unlock_bh(&lslist_lock);
+ goto retry;
}
+
+ WARN_ON(ls->ls_create_count != 0);
+ list_del(&ls->ls_list);
+ spin_unlock_bh(&lslist_lock);
}
static int threads_start(void)
{
int error;
- error = dlm_scand_start();
- if (error) {
- log_print("cannot start dlm_scand thread %d", error);
- goto fail;
- }
-
/* Thread for sending/receiving messages for all lockspace's */
error = dlm_midcomms_start();
- if (error) {
- log_print("cannot start dlm lowcomms %d", error);
- goto scand_fail;
- }
+ if (error)
+ log_print("cannot start dlm midcomms %d", error);
+
+ return error;
+}
+
+static int lkb_idr_free(struct dlm_lkb *lkb)
+{
+ if (lkb->lkb_lvbptr && test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags))
+ dlm_free_lvb(lkb->lkb_lvbptr);
+ dlm_free_lkb(lkb);
return 0;
+}
- scand_fail:
- dlm_scand_stop();
- fail:
- return error;
+static void rhash_free_rsb(void *ptr, void *arg)
+{
+ struct dlm_rsb *rsb = ptr;
+
+ dlm_free_rsb(rsb);
+}
+
+static void free_lockspace(struct work_struct *work)
+{
+ struct dlm_ls *ls = container_of(work, struct dlm_ls, ls_free_work);
+ struct dlm_lkb *lkb;
+ unsigned long id;
+
+ /*
+ * Free all lkb's in xa
+ */
+ xa_for_each(&ls->ls_lkbxa, id, lkb) {
+ lkb_idr_free(lkb);
+ }
+ xa_destroy(&ls->ls_lkbxa);
+
+ /*
+ * Free all rsb's on rsbtbl
+ */
+ rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL);
+
+ kfree(ls);
}
static int new_lockspace(const char *name, const char *cluster,
@@ -411,14 +364,13 @@ static int new_lockspace(const char *name, const char *cluster,
int *ops_result, dlm_lockspace_t **lockspace)
{
struct dlm_ls *ls;
- int i, size, error;
- int do_unreg = 0;
int namelen = strlen(name);
+ int error;
if (namelen > DLM_LOCKSPACE_LEN || namelen == 0)
return -EINVAL;
- if (!lvblen || (lvblen % 8))
+ if (lvblen % 8)
return -EINVAL;
if (!try_module_get(THIS_MODULE))
@@ -452,7 +404,7 @@ static int new_lockspace(const char *name, const char *cluster,
error = 0;
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
WARN_ON(ls->ls_create_count <= 0);
if (ls->ls_namelen != namelen)
@@ -468,69 +420,52 @@ static int new_lockspace(const char *name, const char *cluster,
error = 1;
break;
}
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
if (error)
goto out;
error = -ENOMEM;
- ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_NOFS);
+ ls = kzalloc(sizeof(*ls), GFP_NOFS);
if (!ls)
goto out;
memcpy(ls->ls_name, name, namelen);
ls->ls_namelen = namelen;
ls->ls_lvblen = lvblen;
- ls->ls_count = 0;
+ atomic_set(&ls->ls_count, 0);
+ init_waitqueue_head(&ls->ls_count_wait);
ls->ls_flags = 0;
- ls->ls_scan_time = jiffies;
if (ops && dlm_config.ci_recover_callbacks) {
ls->ls_ops = ops;
ls->ls_ops_arg = ops_arg;
}
- if (flags & DLM_LSFL_TIMEWARN)
- set_bit(LSFL_TIMEWARN, &ls->ls_flags);
+ if (flags & DLM_LSFL_SOFTIRQ)
+ set_bit(LSFL_SOFTIRQ, &ls->ls_flags);
/* ls_exflags are forced to match among nodes, and we don't
- need to require all nodes to have some flags set */
- ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
- DLM_LSFL_NEWEXCL));
+ * need to require all nodes to have some flags set
+ */
+ ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL |
+ DLM_LSFL_SOFTIRQ));
- size = READ_ONCE(dlm_config.ci_rsbtbl_size);
- ls->ls_rsbtbl_size = size;
+ INIT_LIST_HEAD(&ls->ls_slow_inactive);
+ INIT_LIST_HEAD(&ls->ls_slow_active);
+ rwlock_init(&ls->ls_rsbtbl_lock);
- ls->ls_rsbtbl = vmalloc(array_size(size, sizeof(struct dlm_rsbtable)));
- if (!ls->ls_rsbtbl)
+ error = rhashtable_init(&ls->ls_rsbtbl, &dlm_rhash_rsb_params);
+ if (error)
goto out_lsfree;
- for (i = 0; i < size; i++) {
- ls->ls_rsbtbl[i].keep.rb_node = NULL;
- ls->ls_rsbtbl[i].toss.rb_node = NULL;
- spin_lock_init(&ls->ls_rsbtbl[i].lock);
- }
- spin_lock_init(&ls->ls_remove_spin);
-
- for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
- ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1,
- GFP_KERNEL);
- if (!ls->ls_remove_names[i])
- goto out_rsbtbl;
- }
-
- idr_init(&ls->ls_lkbidr);
- spin_lock_init(&ls->ls_lkbidr_spin);
+ xa_init_flags(&ls->ls_lkbxa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH);
+ rwlock_init(&ls->ls_lkbxa_lock);
INIT_LIST_HEAD(&ls->ls_waiters);
- mutex_init(&ls->ls_waiters_mutex);
+ spin_lock_init(&ls->ls_waiters_lock);
INIT_LIST_HEAD(&ls->ls_orphans);
- mutex_init(&ls->ls_orphans_mutex);
- INIT_LIST_HEAD(&ls->ls_timeout);
- mutex_init(&ls->ls_timeout_mutex);
-
- INIT_LIST_HEAD(&ls->ls_new_rsb);
- spin_lock_init(&ls->ls_new_rsb_spin);
+ spin_lock_init(&ls->ls_orphans_lock);
INIT_LIST_HEAD(&ls->ls_nodes);
INIT_LIST_HEAD(&ls->ls_nodes_gone);
@@ -539,33 +474,35 @@ static int new_lockspace(const char *name, const char *cluster,
ls->ls_total_weight = 0;
ls->ls_node_array = NULL;
- memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
- ls->ls_stub_rsb.res_ls = ls;
+ memset(&ls->ls_local_rsb, 0, sizeof(struct dlm_rsb));
+ ls->ls_local_rsb.res_ls = ls;
ls->ls_debug_rsb_dentry = NULL;
ls->ls_debug_waiters_dentry = NULL;
init_waitqueue_head(&ls->ls_uevent_wait);
ls->ls_uevent_result = 0;
- init_completion(&ls->ls_members_done);
- ls->ls_members_result = -1;
+ init_completion(&ls->ls_recovery_done);
+ ls->ls_recovery_result = -1;
- mutex_init(&ls->ls_cb_mutex);
+ spin_lock_init(&ls->ls_cb_lock);
INIT_LIST_HEAD(&ls->ls_cb_delay);
+ INIT_WORK(&ls->ls_free_work, free_lockspace);
+
ls->ls_recoverd_task = NULL;
mutex_init(&ls->ls_recoverd_active);
spin_lock_init(&ls->ls_recover_lock);
spin_lock_init(&ls->ls_rcom_spin);
get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
ls->ls_recover_status = 0;
- ls->ls_recover_seq = 0;
+ ls->ls_recover_seq = get_random_u64();
ls->ls_recover_args = NULL;
init_rwsem(&ls->ls_in_recovery);
- init_rwsem(&ls->ls_recv_active);
+ rwlock_init(&ls->ls_recv_active);
INIT_LIST_HEAD(&ls->ls_requestqueue);
- mutex_init(&ls->ls_requestqueue_mutex);
- mutex_init(&ls->ls_clear_proc_locks);
+ rwlock_init(&ls->ls_requestqueue_lock);
+ spin_lock_init(&ls->ls_clear_proc_locks);
/* Due backwards compatibility with 3.1 we need to use maximum
* possible dlm message size to be sure the message will fit and
@@ -573,8 +510,10 @@ static int new_lockspace(const char *name, const char *cluster,
* might send less.
*/
ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS);
- if (!ls->ls_recover_buf)
- goto out_lkbidr;
+ if (!ls->ls_recover_buf) {
+ error = -ENOMEM;
+ goto out_lkbxa;
+ }
ls->ls_slot = 0;
ls->ls_num_slots = 0;
@@ -583,25 +522,31 @@ static int new_lockspace(const char *name, const char *cluster,
INIT_LIST_HEAD(&ls->ls_recover_list);
spin_lock_init(&ls->ls_recover_list_lock);
- idr_init(&ls->ls_recover_idr);
- spin_lock_init(&ls->ls_recover_idr_lock);
+ xa_init_flags(&ls->ls_recover_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH);
+ spin_lock_init(&ls->ls_recover_xa_lock);
ls->ls_recover_list_count = 0;
- ls->ls_local_handle = ls;
init_waitqueue_head(&ls->ls_wait_general);
- INIT_LIST_HEAD(&ls->ls_root_list);
- init_rwsem(&ls->ls_root_sem);
+ INIT_LIST_HEAD(&ls->ls_masters_list);
+ rwlock_init(&ls->ls_masters_lock);
+ INIT_LIST_HEAD(&ls->ls_dir_dump_list);
+ rwlock_init(&ls->ls_dir_dump_lock);
- spin_lock(&lslist_lock);
+ INIT_LIST_HEAD(&ls->ls_scan_list);
+ spin_lock_init(&ls->ls_scan_lock);
+ timer_setup(&ls->ls_scan_timer, dlm_rsb_scan, TIMER_DEFERRABLE);
+
+ spin_lock_bh(&lslist_lock);
ls->ls_create_count = 1;
list_add(&ls->ls_list, &lslist);
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
- if (flags & DLM_LSFL_FS) {
- error = dlm_callback_start(ls);
- if (error) {
- log_error(ls, "can't start dlm_callback %d", error);
- goto out_delist;
- }
+ if (flags & DLM_LSFL_FS)
+ set_bit(LSFL_FS, &ls->ls_flags);
+
+ error = dlm_callback_start(ls);
+ if (error) {
+ log_error(ls, "can't start dlm_callback %d", error);
+ goto out_delist;
}
init_waitqueue_head(&ls->ls_recover_lock_wait);
@@ -622,9 +567,6 @@ static int new_lockspace(const char *name, const char *cluster,
wait_event(ls->ls_recover_lock_wait,
test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags));
- /* let kobject handle freeing of ls if there's an error */
- do_unreg = 1;
-
ls->ls_kobj.kset = dlm_kset;
error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
"%s", ls->ls_name);
@@ -638,12 +580,13 @@ static int new_lockspace(const char *name, const char *cluster,
current lockspace members are (via configfs) and then tells the
lockspace to start running (via sysfs) in dlm_ls_start(). */
- error = do_uevent(ls, 1);
- if (error)
+ error = do_uevent(ls, 1, 0);
+ if (error < 0)
goto out_recoverd;
- wait_for_completion(&ls->ls_members_done);
- error = ls->ls_members_result;
+ /* wait until recovery is successful or failed */
+ wait_for_completion(&ls->ls_recovery_done);
+ error = ls->ls_recovery_result;
if (error)
goto out_members;
@@ -654,7 +597,7 @@ static int new_lockspace(const char *name, const char *cluster,
return 0;
out_members:
- do_uevent(ls, 0);
+ do_uevent(ls, 0, 0);
dlm_clear_members(ls);
kfree(ls->ls_node_array);
out_recoverd:
@@ -662,31 +605,27 @@ static int new_lockspace(const char *name, const char *cluster,
out_callback:
dlm_callback_stop(ls);
out_delist:
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
list_del(&ls->ls_list);
- spin_unlock(&lslist_lock);
- idr_destroy(&ls->ls_recover_idr);
+ spin_unlock_bh(&lslist_lock);
+ xa_destroy(&ls->ls_recover_xa);
kfree(ls->ls_recover_buf);
- out_lkbidr:
- idr_destroy(&ls->ls_lkbidr);
- out_rsbtbl:
- for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
- kfree(ls->ls_remove_names[i]);
- vfree(ls->ls_rsbtbl);
+ out_lkbxa:
+ xa_destroy(&ls->ls_lkbxa);
+ rhashtable_destroy(&ls->ls_rsbtbl);
out_lsfree:
- if (do_unreg)
- kobject_put(&ls->ls_kobj);
- else
- kfree(ls);
+ kobject_put(&ls->ls_kobj);
+ kfree(ls);
out:
module_put(THIS_MODULE);
return error;
}
-int dlm_new_lockspace(const char *name, const char *cluster,
- uint32_t flags, int lvblen,
- const struct dlm_lockspace_ops *ops, void *ops_arg,
- int *ops_result, dlm_lockspace_t **lockspace)
+static int __dlm_new_lockspace(const char *name, const char *cluster,
+ uint32_t flags, int lvblen,
+ const struct dlm_lockspace_ops *ops,
+ void *ops_arg, int *ops_result,
+ dlm_lockspace_t **lockspace)
{
int error = 0;
@@ -703,67 +642,75 @@ int dlm_new_lockspace(const char *name, const char *cluster,
if (error > 0)
error = 0;
if (!ls_count) {
- dlm_scand_stop();
dlm_midcomms_shutdown();
- dlm_lowcomms_stop();
+ dlm_midcomms_stop();
}
out:
mutex_unlock(&ls_lock);
return error;
}
-static int lkb_idr_is_local(int id, void *p, void *data)
-{
- struct dlm_lkb *lkb = p;
-
- return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV;
-}
-
-static int lkb_idr_is_any(int id, void *p, void *data)
+int dlm_new_lockspace(const char *name, const char *cluster, uint32_t flags,
+ int lvblen, const struct dlm_lockspace_ops *ops,
+ void *ops_arg, int *ops_result,
+ dlm_lockspace_t **lockspace)
{
- return 1;
+ return __dlm_new_lockspace(name, cluster, flags | DLM_LSFL_FS, lvblen,
+ ops, ops_arg, ops_result, lockspace);
}
-static int lkb_idr_free(int id, void *p, void *data)
+int dlm_new_user_lockspace(const char *name, const char *cluster,
+ uint32_t flags, int lvblen,
+ const struct dlm_lockspace_ops *ops,
+ void *ops_arg, int *ops_result,
+ dlm_lockspace_t **lockspace)
{
- struct dlm_lkb *lkb = p;
-
- if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
- dlm_free_lvb(lkb->lkb_lvbptr);
+ if (flags & DLM_LSFL_SOFTIRQ)
+ return -EINVAL;
- dlm_free_lkb(lkb);
- return 0;
+ return __dlm_new_lockspace(name, cluster, flags, lvblen, ops,
+ ops_arg, ops_result, lockspace);
}
-/* NOTE: We check the lkbidr here rather than the resource table.
+/* NOTE: We check the lkbxa here rather than the resource table.
This is because there may be LKBs queued as ASTs that have been unlinked
from their RSBs and are pending deletion once the AST has been delivered */
-static int lockspace_busy(struct dlm_ls *ls, int force)
+static int lockspace_busy(struct dlm_ls *ls, unsigned int release_option)
{
- int rv;
+ struct dlm_lkb *lkb;
+ unsigned long id;
+ int rv = 0;
- spin_lock(&ls->ls_lkbidr_spin);
- if (force == 0) {
- rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
- } else if (force == 1) {
- rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
+ read_lock_bh(&ls->ls_lkbxa_lock);
+ if (release_option == DLM_RELEASE_NO_LOCKS) {
+ xa_for_each(&ls->ls_lkbxa, id, lkb) {
+ rv = 1;
+ break;
+ }
+ } else if (release_option == DLM_RELEASE_UNUSED) {
+ /* TODO: handle this UNUSED option as NO_LOCKS in later patch */
+ xa_for_each(&ls->ls_lkbxa, id, lkb) {
+ if (lkb->lkb_nodeid == 0 &&
+ lkb->lkb_grmode != DLM_LOCK_IV) {
+ rv = 1;
+ break;
+ }
+ }
} else {
rv = 0;
}
- spin_unlock(&ls->ls_lkbidr_spin);
+ read_unlock_bh(&ls->ls_lkbxa_lock);
return rv;
}
-static int release_lockspace(struct dlm_ls *ls, int force)
+static int release_lockspace(struct dlm_ls *ls, unsigned int release_option)
{
- struct dlm_rsb *rsb;
- struct rb_node *n;
- int i, busy, rv;
+ int busy, rv;
- busy = lockspace_busy(ls, force);
+ busy = lockspace_busy(ls, release_option);
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
if (ls->ls_create_count == 1) {
if (busy) {
rv = -EBUSY;
@@ -777,22 +724,31 @@ static int release_lockspace(struct dlm_ls *ls, int force)
} else {
rv = -EINVAL;
}
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
if (rv) {
log_debug(ls, "release_lockspace no remove %d", rv);
return rv;
}
+ if (ls_count == 1)
+ dlm_midcomms_version_wait();
+
dlm_device_deregister(ls);
- if (force < 3 && dlm_user_daemon_available())
- do_uevent(ls, 0);
+ if (release_option != DLM_RELEASE_NO_EVENT &&
+ dlm_user_daemon_available())
+ do_uevent(ls, 0, (release_option == DLM_RELEASE_RECOVER));
dlm_recoverd_stop(ls);
+ /* clear the LSFL_RUNNING flag to fast up
+ * time_shutdown_sync(), we don't care anymore
+ */
+ clear_bit(LSFL_RUNNING, &ls->ls_flags);
+ timer_shutdown_sync(&ls->ls_scan_timer);
+
if (ls_count == 1) {
- dlm_scand_stop();
dlm_clear_members(ls);
dlm_midcomms_shutdown();
}
@@ -803,45 +759,10 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_delete_debug_file(ls);
- idr_destroy(&ls->ls_recover_idr);
- kfree(ls->ls_recover_buf);
-
- /*
- * Free all lkb's in idr
- */
-
- idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
- idr_destroy(&ls->ls_lkbidr);
-
- /*
- * Free all rsb's on rsbtbl[] lists
- */
-
- for (i = 0; i < ls->ls_rsbtbl_size; i++) {
- while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) {
- rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
- rb_erase(n, &ls->ls_rsbtbl[i].keep);
- dlm_free_rsb(rsb);
- }
-
- while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) {
- rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
- rb_erase(n, &ls->ls_rsbtbl[i].toss);
- dlm_free_rsb(rsb);
- }
- }
-
- vfree(ls->ls_rsbtbl);
-
- for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
- kfree(ls->ls_remove_names[i]);
+ kobject_put(&ls->ls_kobj);
- while (!list_empty(&ls->ls_new_rsb)) {
- rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
- res_hashchain);
- list_del(&rsb->res_hashchain);
- dlm_free_rsb(rsb);
- }
+ xa_destroy(&ls->ls_recover_xa);
+ kfree(ls->ls_recover_buf);
/*
* Free structures on any other lists
@@ -852,10 +773,11 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_clear_members(ls);
dlm_clear_members_gone(ls);
kfree(ls->ls_node_array);
- log_rinfo(ls, "release_lockspace final free");
- kobject_put(&ls->ls_kobj);
- /* The ls structure will be freed when the kobject is done with */
+ log_rinfo(ls, "%s final free", __func__);
+
+ /* delayed free of data structures see free_lockspace() */
+ queue_work(dlm_wq, &ls->ls_free_work);
module_put(THIS_MODULE);
return 0;
}
@@ -867,29 +789,28 @@ static int release_lockspace(struct dlm_ls *ls, int force)
* lockspace must continue to function as usual, participating in recoveries,
* until this returns.
*
- * Force has 4 possible values:
- * 0 - don't destroy locksapce if it has any LKBs
- * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
- * 2 - destroy lockspace regardless of LKBs
- * 3 - destroy lockspace as part of a forced shutdown
+ * See DLM_RELEASE defines for release_option values and their meaning.
*/
-int dlm_release_lockspace(void *lockspace, int force)
+int dlm_release_lockspace(void *lockspace, unsigned int release_option)
{
struct dlm_ls *ls;
int error;
+ if (release_option > __DLM_RELEASE_MAX)
+ return -EINVAL;
+
ls = dlm_find_lockspace_local(lockspace);
if (!ls)
return -EINVAL;
dlm_put_lockspace(ls);
mutex_lock(&ls_lock);
- error = release_lockspace(ls, force);
+ error = release_lockspace(ls, release_option);
if (!error)
ls_count--;
if (!ls_count)
- dlm_lowcomms_stop();
+ dlm_midcomms_stop();
mutex_unlock(&ls_lock);
return error;
@@ -902,20 +823,19 @@ void dlm_stop_lockspaces(void)
restart:
count = 0;
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) {
count++;
continue;
}
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
log_error(ls, "no userland control daemon, stopping lockspace");
dlm_ls_stop(ls);
goto restart;
}
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
if (count)
log_print("dlm user daemon left %d lockspaces", count);
}
-
diff --git a/fs/dlm/lockspace.h b/fs/dlm/lockspace.h
index a78d853b9342..47ebd4411926 100644
--- a/fs/dlm/lockspace.h
+++ b/fs/dlm/lockspace.h
@@ -12,6 +12,14 @@
#ifndef __LOCKSPACE_DOT_H__
#define __LOCKSPACE_DOT_H__
+/* DLM_LSFL_FS
+ * The lockspace user is in the kernel (i.e. filesystem). Enables
+ * direct bast/cast callbacks.
+ *
+ * internal lockspace flag - will be removed in future
+ */
+#define DLM_LSFL_FS 0x00000004
+
int dlm_lockspace_init(void);
void dlm_lockspace_exit(void);
struct dlm_ls *dlm_find_lockspace_global(uint32_t id);
@@ -19,6 +27,11 @@ struct dlm_ls *dlm_find_lockspace_local(void *id);
struct dlm_ls *dlm_find_lockspace_device(int minor);
void dlm_put_lockspace(struct dlm_ls *ls);
void dlm_stop_lockspaces(void);
+int dlm_new_user_lockspace(const char *name, const char *cluster,
+ uint32_t flags, int lvblen,
+ const struct dlm_lockspace_ops *ops,
+ void *ops_arg, int *ops_result,
+ dlm_lockspace_t **lockspace);
#endif /* __LOCKSPACE_DOT_H__ */
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 8f715c620e1f..b3958008ba3f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -53,49 +53,63 @@
#include <net/sctp/sctp.h>
#include <net/ipv6.h>
+#include <trace/events/dlm.h>
+#include <trace/events/sock.h>
+
#include "dlm_internal.h"
#include "lowcomms.h"
#include "midcomms.h"
+#include "memory.h"
#include "config.h"
+#define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(5000)
+#define DLM_MAX_PROCESS_BUFFERS 24
#define NEEDED_RMEM (4*1024*1024)
-/* Number of messages to send before rescheduling */
-#define MAX_SEND_MSG_COUNT 25
-#define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(10000)
-
struct connection {
struct socket *sock; /* NULL if not connected */
uint32_t nodeid; /* So we know who we are in the list */
- struct mutex sock_mutex;
+ /* this semaphore is used to allow parallel recv/send in read
+ * lock mode. When we release a sock we need to held the write lock.
+ *
+ * However this is locking code and not nice. When we remove the
+ * othercon handling we can look into other mechanism to synchronize
+ * io handling to call sock_release() at the right time.
+ */
+ struct rw_semaphore sock_lock;
unsigned long flags;
-#define CF_READ_PENDING 1
-#define CF_WRITE_PENDING 2
-#define CF_INIT_PENDING 4
+#define CF_APP_LIMITED 0
+#define CF_RECV_PENDING 1
+#define CF_SEND_PENDING 2
+#define CF_RECV_INTR 3
+#define CF_IO_STOP 4
#define CF_IS_OTHERCON 5
-#define CF_CLOSE 6
-#define CF_APP_LIMITED 7
-#define CF_CLOSING 8
-#define CF_SHUTDOWN 9
-#define CF_CONNECTED 10
-#define CF_RECONNECT 11
-#define CF_DELAY_CONNECT 12
-#define CF_EOF 13
struct list_head writequeue; /* List of outgoing writequeue_entries */
spinlock_t writequeue_lock;
- atomic_t writequeue_cnt;
- struct mutex wq_alloc;
int retries;
-#define MAX_CONNECT_RETRIES 3
struct hlist_node list;
+ /* due some connect()/accept() races we currently have this cross over
+ * connection attempt second connection for one node.
+ *
+ * There is a solution to avoid the race by introducing a connect
+ * rule as e.g. our_nodeid > nodeid_to_connect who is allowed to
+ * connect. Otherside can connect but will only be considered that
+ * the other side wants to have a reconnect.
+ *
+ * However changing to this behaviour will break backwards compatible.
+ * In a DLM protocol major version upgrade we should remove this!
+ */
struct connection *othercon;
- struct connection *sendcon;
- struct work_struct rwork; /* Receive workqueue */
- struct work_struct swork; /* Send workqueue */
- wait_queue_head_t shutdown_wait; /* wait for graceful shutdown */
- unsigned char *rx_buf;
- int rx_buflen;
+ struct work_struct rwork; /* receive worker */
+ struct work_struct swork; /* send worker */
+ wait_queue_head_t shutdown_wait;
+ unsigned char rx_leftover_buf[DLM_MAX_SOCKET_BUFSIZE];
int rx_leftover;
+ int mark;
+ int addr_count;
+ int curr_addr_index;
+ struct sockaddr_storage addr[DLM_MAX_ADDR_COUNT];
+ spinlock_t addrs_lock;
struct rcu_head rcu;
};
#define sock2con(x) ((struct connection *)(x)->sk_user_data)
@@ -134,31 +148,25 @@ struct dlm_msg {
struct kref ref;
};
-struct dlm_node_addr {
- struct list_head list;
+struct processqueue_entry {
+ unsigned char *buf;
int nodeid;
- int mark;
- int addr_count;
- int curr_addr_index;
- struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
+ int buflen;
+
+ struct list_head list;
};
struct dlm_proto_ops {
bool try_new_addr;
const char *name;
int proto;
+ int how;
- int (*connect)(struct connection *con, struct socket *sock,
- struct sockaddr *addr, int addr_len);
void (*sockopts)(struct socket *sock);
int (*bind)(struct socket *sock);
int (*listen_validate)(void);
void (*listen_sockopts)(struct socket *sock);
int (*listen_bind)(struct socket *sock);
- /* What to do to shutdown */
- void (*shutdown_action)(struct connection *con);
- /* What to do to eof check */
- bool (*eof_condition)(struct connection *con);
};
static struct listen_sock_callbacks {
@@ -168,17 +176,13 @@ static struct listen_sock_callbacks {
void (*sk_write_space)(struct sock *);
} listen_sock;
-static LIST_HEAD(dlm_node_addrs);
-static DEFINE_SPINLOCK(dlm_node_addrs_spin);
-
static struct listen_connection listen_con;
-static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
+static struct sockaddr_storage dlm_local_addr[DLM_MAX_ADDR_COUNT];
static int dlm_local_count;
-int dlm_allow_conn;
/* Work queues */
-static struct workqueue_struct *recv_workqueue;
-static struct workqueue_struct *send_workqueue;
+static struct workqueue_struct *io_workqueue;
+static struct workqueue_struct *process_workqueue;
static struct hlist_head connection_hash[CONN_HASH_SIZE];
static DEFINE_SPINLOCK(connections_lock);
@@ -186,20 +190,78 @@ DEFINE_STATIC_SRCU(connections_srcu);
static const struct dlm_proto_ops *dlm_proto_ops;
+#define DLM_IO_SUCCESS 0
+#define DLM_IO_END 1
+#define DLM_IO_EOF 2
+#define DLM_IO_RESCHED 3
+#define DLM_IO_FLUSH 4
+
static void process_recv_sockets(struct work_struct *work);
static void process_send_sockets(struct work_struct *work);
+static void process_dlm_messages(struct work_struct *work);
+
+static DECLARE_WORK(process_work, process_dlm_messages);
+static DEFINE_SPINLOCK(processqueue_lock);
+static bool process_dlm_messages_pending;
+static DECLARE_WAIT_QUEUE_HEAD(processqueue_wq);
+static atomic_t processqueue_count;
+static LIST_HEAD(processqueue);
+
+bool dlm_lowcomms_is_running(void)
+{
+ return !!listen_con.sock;
+}
+
+static void lowcomms_queue_swork(struct connection *con)
+{
+ assert_spin_locked(&con->writequeue_lock);
+
+ if (!test_bit(CF_IO_STOP, &con->flags) &&
+ !test_bit(CF_APP_LIMITED, &con->flags) &&
+ !test_and_set_bit(CF_SEND_PENDING, &con->flags))
+ queue_work(io_workqueue, &con->swork);
+}
+
+static void lowcomms_queue_rwork(struct connection *con)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(!lockdep_sock_is_held(con->sock->sk));
+#endif
+
+ if (!test_bit(CF_IO_STOP, &con->flags) &&
+ !test_and_set_bit(CF_RECV_PENDING, &con->flags))
+ queue_work(io_workqueue, &con->rwork);
+}
+
+static void writequeue_entry_ctor(void *data)
+{
+ struct writequeue_entry *entry = data;
+
+ INIT_LIST_HEAD(&entry->msgs);
+}
+
+struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void)
+{
+ return kmem_cache_create("dlm_writequeue", sizeof(struct writequeue_entry),
+ 0, 0, writequeue_entry_ctor);
+}
+
+struct kmem_cache *dlm_lowcomms_msg_cache_create(void)
+{
+ return KMEM_CACHE(dlm_msg, 0);
+}
/* need to held writequeue_lock */
static struct writequeue_entry *con_next_wq(struct connection *con)
{
struct writequeue_entry *e;
- if (list_empty(&con->writequeue))
- return NULL;
-
- e = list_first_entry(&con->writequeue, struct writequeue_entry,
- list);
- if (e->len == 0)
+ e = list_first_entry_or_null(&con->writequeue, struct writequeue_entry,
+ list);
+ /* if len is zero nothing is to send, if there are users filling
+ * buffers we wait until the users are done so we can send more.
+ */
+ if (!e || e->users || e->len == 0)
return NULL;
return e;
@@ -217,28 +279,16 @@ static struct connection *__find_con(int nodeid, int r)
return NULL;
}
-static bool tcp_eof_condition(struct connection *con)
+static void dlm_con_init(struct connection *con, int nodeid)
{
- return atomic_read(&con->writequeue_cnt);
-}
-
-static int dlm_con_init(struct connection *con, int nodeid)
-{
- con->rx_buflen = dlm_config.ci_buffer_size;
- con->rx_buf = kmalloc(con->rx_buflen, GFP_NOFS);
- if (!con->rx_buf)
- return -ENOMEM;
-
con->nodeid = nodeid;
- mutex_init(&con->sock_mutex);
+ init_rwsem(&con->sock_lock);
INIT_LIST_HEAD(&con->writequeue);
spin_lock_init(&con->writequeue_lock);
- atomic_set(&con->writequeue_cnt, 0);
INIT_WORK(&con->swork, process_send_sockets);
INIT_WORK(&con->rwork, process_recv_sockets);
+ spin_lock_init(&con->addrs_lock);
init_waitqueue_head(&con->shutdown_wait);
-
- return 0;
}
/*
@@ -248,7 +298,7 @@ static int dlm_con_init(struct connection *con, int nodeid)
static struct connection *nodeid2con(int nodeid, gfp_t alloc)
{
struct connection *con, *tmp;
- int r, ret;
+ int r;
r = nodeid_hash(nodeid);
con = __find_con(nodeid, r);
@@ -259,13 +309,7 @@ static struct connection *nodeid2con(int nodeid, gfp_t alloc)
if (!con)
return NULL;
- ret = dlm_con_init(con, nodeid);
- if (ret) {
- kfree(con);
- return NULL;
- }
-
- mutex_init(&con->wq_alloc);
+ dlm_con_init(con, nodeid);
spin_lock(&connections_lock);
/* Because multiple workqueues/threads calls this function it can
@@ -277,7 +321,6 @@ static struct connection *nodeid2con(int nodeid, gfp_t alloc)
tmp = __find_con(nodeid, r);
if (tmp) {
spin_unlock(&connections_lock);
- kfree(con->rx_buf);
kfree(con);
return tmp;
}
@@ -288,29 +331,6 @@ static struct connection *nodeid2con(int nodeid, gfp_t alloc)
return con;
}
-/* Loop round all connections */
-static void foreach_conn(void (*conn_func)(struct connection *c))
-{
- int i;
- struct connection *con;
-
- for (i = 0; i < CONN_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(con, &connection_hash[i], list)
- conn_func(con);
- }
-}
-
-static struct dlm_node_addr *find_node_addr(int nodeid)
-{
- struct dlm_node_addr *na;
-
- list_for_each_entry(na, &dlm_node_addrs, list) {
- if (na->nodeid == nodeid)
- return na;
- }
- return NULL;
-}
-
static int addr_compare(const struct sockaddr_storage *x,
const struct sockaddr_storage *y)
{
@@ -344,40 +364,47 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
unsigned int *mark)
{
struct sockaddr_storage sas;
- struct dlm_node_addr *na;
+ struct connection *con;
+ int idx;
if (!dlm_local_count)
return -1;
- spin_lock(&dlm_node_addrs_spin);
- na = find_node_addr(nodeid);
- if (na && na->addr_count) {
- memcpy(&sas, na->addr[na->curr_addr_index],
- sizeof(struct sockaddr_storage));
+ idx = srcu_read_lock(&connections_srcu);
+ con = nodeid2con(nodeid, 0);
+ if (!con) {
+ srcu_read_unlock(&connections_srcu, idx);
+ return -ENOENT;
+ }
- if (try_new_addr) {
- na->curr_addr_index++;
- if (na->curr_addr_index == na->addr_count)
- na->curr_addr_index = 0;
- }
+ spin_lock(&con->addrs_lock);
+ if (!con->addr_count) {
+ spin_unlock(&con->addrs_lock);
+ srcu_read_unlock(&connections_srcu, idx);
+ return -ENOENT;
}
- spin_unlock(&dlm_node_addrs_spin);
- if (!na)
- return -EEXIST;
+ memcpy(&sas, &con->addr[con->curr_addr_index],
+ sizeof(struct sockaddr_storage));
- if (!na->addr_count)
- return -ENOENT;
+ if (try_new_addr) {
+ con->curr_addr_index++;
+ if (con->curr_addr_index == con->addr_count)
+ con->curr_addr_index = 0;
+ }
- *mark = na->mark;
+ *mark = con->mark;
+ spin_unlock(&con->addrs_lock);
if (sas_out)
memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
- if (!sa_out)
+ if (!sa_out) {
+ srcu_read_unlock(&connections_srcu, idx);
return 0;
+ }
- if (dlm_local_addr[0]->ss_family == AF_INET) {
+ if (dlm_local_addr[0].ss_family == AF_INET) {
struct sockaddr_in *in4 = (struct sockaddr_in *) &sas;
struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
@@ -387,168 +414,135 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
ret6->sin6_addr = in6->sin6_addr;
}
+ srcu_read_unlock(&connections_srcu, idx);
return 0;
}
static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
unsigned int *mark)
{
- struct dlm_node_addr *na;
- int rv = -EEXIST;
- int addr_i;
-
- spin_lock(&dlm_node_addrs_spin);
- list_for_each_entry(na, &dlm_node_addrs, list) {
- if (!na->addr_count)
- continue;
-
- for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
- if (addr_compare(na->addr[addr_i], addr)) {
- *nodeid = na->nodeid;
- *mark = na->mark;
- rv = 0;
- goto unlock;
+ struct connection *con;
+ int i, idx, addr_i;
+
+ idx = srcu_read_lock(&connections_srcu);
+ for (i = 0; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
+ WARN_ON_ONCE(!con->addr_count);
+
+ spin_lock(&con->addrs_lock);
+ for (addr_i = 0; addr_i < con->addr_count; addr_i++) {
+ if (addr_compare(&con->addr[addr_i], addr)) {
+ *nodeid = con->nodeid;
+ *mark = con->mark;
+ spin_unlock(&con->addrs_lock);
+ srcu_read_unlock(&connections_srcu, idx);
+ return 0;
+ }
}
+ spin_unlock(&con->addrs_lock);
}
}
-unlock:
- spin_unlock(&dlm_node_addrs_spin);
- return rv;
+ srcu_read_unlock(&connections_srcu, idx);
+
+ return -ENOENT;
}
-/* caller need to held dlm_node_addrs_spin lock */
-static bool dlm_lowcomms_na_has_addr(const struct dlm_node_addr *na,
- const struct sockaddr_storage *addr)
+static bool dlm_lowcomms_con_has_addr(const struct connection *con,
+ const struct sockaddr_storage *addr)
{
int i;
- for (i = 0; i < na->addr_count; i++) {
- if (addr_compare(na->addr[i], addr))
+ for (i = 0; i < con->addr_count; i++) {
+ if (addr_compare(&con->addr[i], addr))
return true;
}
return false;
}
-int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
+int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr)
{
- struct sockaddr_storage *new_addr;
- struct dlm_node_addr *new_node, *na;
+ struct connection *con;
bool ret;
+ int idx;
- new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
- if (!new_node)
- return -ENOMEM;
-
- new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
- if (!new_addr) {
- kfree(new_node);
+ idx = srcu_read_lock(&connections_srcu);
+ con = nodeid2con(nodeid, GFP_NOFS);
+ if (!con) {
+ srcu_read_unlock(&connections_srcu, idx);
return -ENOMEM;
}
- memcpy(new_addr, addr, len);
-
- spin_lock(&dlm_node_addrs_spin);
- na = find_node_addr(nodeid);
- if (!na) {
- new_node->nodeid = nodeid;
- new_node->addr[0] = new_addr;
- new_node->addr_count = 1;
- new_node->mark = dlm_config.ci_mark;
- list_add(&new_node->list, &dlm_node_addrs);
- spin_unlock(&dlm_node_addrs_spin);
+ spin_lock(&con->addrs_lock);
+ if (!con->addr_count) {
+ memcpy(&con->addr[0], addr, sizeof(*addr));
+ con->addr_count = 1;
+ con->mark = dlm_config.ci_mark;
+ spin_unlock(&con->addrs_lock);
+ srcu_read_unlock(&connections_srcu, idx);
return 0;
}
- ret = dlm_lowcomms_na_has_addr(na, addr);
+ ret = dlm_lowcomms_con_has_addr(con, addr);
if (ret) {
- spin_unlock(&dlm_node_addrs_spin);
- kfree(new_addr);
- kfree(new_node);
+ spin_unlock(&con->addrs_lock);
+ srcu_read_unlock(&connections_srcu, idx);
return -EEXIST;
}
- if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
- spin_unlock(&dlm_node_addrs_spin);
- kfree(new_addr);
- kfree(new_node);
+ if (con->addr_count >= DLM_MAX_ADDR_COUNT) {
+ spin_unlock(&con->addrs_lock);
+ srcu_read_unlock(&connections_srcu, idx);
return -ENOSPC;
}
- na->addr[na->addr_count++] = new_addr;
- spin_unlock(&dlm_node_addrs_spin);
- kfree(new_node);
+ memcpy(&con->addr[con->addr_count++], addr, sizeof(*addr));
+ srcu_read_unlock(&connections_srcu, idx);
+ spin_unlock(&con->addrs_lock);
return 0;
}
/* Data available on socket or listen socket received a connect */
static void lowcomms_data_ready(struct sock *sk)
{
- struct connection *con;
+ struct connection *con = sock2con(sk);
- read_lock_bh(&sk->sk_callback_lock);
- con = sock2con(sk);
- if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
- queue_work(recv_workqueue, &con->rwork);
- read_unlock_bh(&sk->sk_callback_lock);
-}
-
-static void lowcomms_listen_data_ready(struct sock *sk)
-{
- if (!dlm_allow_conn)
- return;
+ trace_sk_data_ready(sk);
- queue_work(recv_workqueue, &listen_con.rwork);
+ set_bit(CF_RECV_INTR, &con->flags);
+ lowcomms_queue_rwork(con);
}
static void lowcomms_write_space(struct sock *sk)
{
- struct connection *con;
-
- read_lock_bh(&sk->sk_callback_lock);
- con = sock2con(sk);
- if (!con)
- goto out;
-
- if (!test_and_set_bit(CF_CONNECTED, &con->flags)) {
- log_print("successful connected to node %d", con->nodeid);
- queue_work(send_workqueue, &con->swork);
- goto out;
- }
+ struct connection *con = sock2con(sk);
clear_bit(SOCK_NOSPACE, &con->sock->flags);
+ spin_lock_bh(&con->writequeue_lock);
if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
con->sock->sk->sk_write_pending--;
clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
}
- queue_work(send_workqueue, &con->swork);
-out:
- read_unlock_bh(&sk->sk_callback_lock);
-}
-
-static inline void lowcomms_connect_sock(struct connection *con)
-{
- if (test_bit(CF_CLOSE, &con->flags))
- return;
- queue_work(send_workqueue, &con->swork);
- cond_resched();
+ lowcomms_queue_swork(con);
+ spin_unlock_bh(&con->writequeue_lock);
}
static void lowcomms_state_change(struct sock *sk)
{
/* SCTP layer is not calling sk_data_ready when the connection
- * is done, so we catch the signal through here. Also, it
- * doesn't switch socket state when entering shutdown, so we
- * skip the write in that case.
+ * is done, so we catch the signal through here.
*/
- if (sk->sk_shutdown) {
- if (sk->sk_shutdown == RCV_SHUTDOWN)
- lowcomms_data_ready(sk);
- } else if (sk->sk_state == TCP_ESTABLISHED) {
- lowcomms_write_space(sk);
- }
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ lowcomms_data_ready(sk);
+}
+
+static void lowcomms_listen_data_ready(struct sock *sk)
+{
+ trace_sk_data_ready(sk);
+
+ queue_work(io_workqueue, &listen_con.rwork);
}
int dlm_lowcomms_connect_node(int nodeid)
@@ -556,138 +550,95 @@ int dlm_lowcomms_connect_node(int nodeid)
struct connection *con;
int idx;
- if (nodeid == dlm_our_nodeid())
- return 0;
-
idx = srcu_read_lock(&connections_srcu);
- con = nodeid2con(nodeid, GFP_NOFS);
- if (!con) {
+ con = nodeid2con(nodeid, 0);
+ if (WARN_ON_ONCE(!con)) {
srcu_read_unlock(&connections_srcu, idx);
- return -ENOMEM;
+ return -ENOENT;
}
- lowcomms_connect_sock(con);
+ down_read(&con->sock_lock);
+ if (!con->sock) {
+ spin_lock_bh(&con->writequeue_lock);
+ lowcomms_queue_swork(con);
+ spin_unlock_bh(&con->writequeue_lock);
+ }
+ up_read(&con->sock_lock);
srcu_read_unlock(&connections_srcu, idx);
+ cond_resched();
return 0;
}
int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
{
- struct dlm_node_addr *na;
+ struct connection *con;
+ int idx;
- spin_lock(&dlm_node_addrs_spin);
- na = find_node_addr(nodeid);
- if (!na) {
- spin_unlock(&dlm_node_addrs_spin);
+ idx = srcu_read_lock(&connections_srcu);
+ con = nodeid2con(nodeid, 0);
+ if (!con) {
+ srcu_read_unlock(&connections_srcu, idx);
return -ENOENT;
}
- na->mark = mark;
- spin_unlock(&dlm_node_addrs_spin);
-
+ spin_lock(&con->addrs_lock);
+ con->mark = mark;
+ spin_unlock(&con->addrs_lock);
+ srcu_read_unlock(&connections_srcu, idx);
return 0;
}
static void lowcomms_error_report(struct sock *sk)
{
- struct connection *con;
- struct sockaddr_storage saddr;
- void (*orig_report)(struct sock *) = NULL;
-
- read_lock_bh(&sk->sk_callback_lock);
- con = sock2con(sk);
- if (con == NULL)
- goto out;
+ struct connection *con = sock2con(sk);
+ struct inet_sock *inet;
- orig_report = listen_sock.sk_error_report;
- if (kernel_getpeername(sk->sk_socket, (struct sockaddr *)&saddr) < 0) {
+ inet = inet_sk(sk);
+ switch (sk->sk_family) {
+ case AF_INET:
printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
- "sending to node %d, port %d, "
+ "sending to node %d at %pI4, dport %d, "
"sk_err=%d/%d\n", dlm_our_nodeid(),
- con->nodeid, dlm_config.ci_tcp_port,
- sk->sk_err, sk->sk_err_soft);
- } else if (saddr.ss_family == AF_INET) {
- struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
-
- printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
- "sending to node %d at %pI4, port %d, "
- "sk_err=%d/%d\n", dlm_our_nodeid(),
- con->nodeid, &sin4->sin_addr.s_addr,
- dlm_config.ci_tcp_port, sk->sk_err,
- sk->sk_err_soft);
- } else {
- struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
-
+ con->nodeid, &inet->inet_daddr,
+ ntohs(inet->inet_dport), sk->sk_err,
+ READ_ONCE(sk->sk_err_soft));
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
- "sending to node %d at %u.%u.%u.%u, "
- "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
- con->nodeid, sin6->sin6_addr.s6_addr32[0],
- sin6->sin6_addr.s6_addr32[1],
- sin6->sin6_addr.s6_addr32[2],
- sin6->sin6_addr.s6_addr32[3],
- dlm_config.ci_tcp_port, sk->sk_err,
- sk->sk_err_soft);
- }
-
- /* below sendcon only handling */
- if (test_bit(CF_IS_OTHERCON, &con->flags))
- con = con->sendcon;
-
- switch (sk->sk_err) {
- case ECONNREFUSED:
- set_bit(CF_DELAY_CONNECT, &con->flags);
+ "sending to node %d at %pI6c, "
+ "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(),
+ con->nodeid, &sk->sk_v6_daddr,
+ ntohs(inet->inet_dport), sk->sk_err,
+ READ_ONCE(sk->sk_err_soft));
break;
+#endif
default:
+ printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
+ "invalid socket family %d set, "
+ "sk_err=%d/%d\n", dlm_our_nodeid(),
+ sk->sk_family, sk->sk_err,
+ READ_ONCE(sk->sk_err_soft));
break;
}
- if (!test_and_set_bit(CF_RECONNECT, &con->flags))
- queue_work(send_workqueue, &con->swork);
+ dlm_midcomms_unack_msg_resend(con->nodeid);
-out:
- read_unlock_bh(&sk->sk_callback_lock);
- if (orig_report)
- orig_report(sk);
-}
-
-/* Note: sk_callback_lock must be locked before calling this function. */
-static void save_listen_callbacks(struct socket *sock)
-{
- struct sock *sk = sock->sk;
-
- listen_sock.sk_data_ready = sk->sk_data_ready;
- listen_sock.sk_state_change = sk->sk_state_change;
- listen_sock.sk_write_space = sk->sk_write_space;
- listen_sock.sk_error_report = sk->sk_error_report;
+ listen_sock.sk_error_report(sk);
}
-static void restore_callbacks(struct socket *sock)
+static void restore_callbacks(struct sock *sk)
{
- struct sock *sk = sock->sk;
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(!lockdep_sock_is_held(sk));
+#endif
- write_lock_bh(&sk->sk_callback_lock);
sk->sk_user_data = NULL;
sk->sk_data_ready = listen_sock.sk_data_ready;
sk->sk_state_change = listen_sock.sk_state_change;
sk->sk_write_space = listen_sock.sk_write_space;
sk->sk_error_report = listen_sock.sk_error_report;
- write_unlock_bh(&sk->sk_callback_lock);
-}
-
-static void add_listen_sock(struct socket *sock, struct listen_connection *con)
-{
- struct sock *sk = sock->sk;
-
- write_lock_bh(&sk->sk_callback_lock);
- save_listen_callbacks(sock);
- con->sock = sock;
-
- sk->sk_user_data = con;
- sk->sk_allocation = GFP_NOFS;
- /* Install a data_ready callback */
- sk->sk_data_ready = lowcomms_listen_data_ready;
- write_unlock_bh(&sk->sk_callback_lock);
}
/* Make a socket active */
@@ -695,33 +646,34 @@ static void add_sock(struct socket *sock, struct connection *con)
{
struct sock *sk = sock->sk;
- write_lock_bh(&sk->sk_callback_lock);
+ lock_sock(sk);
con->sock = sock;
sk->sk_user_data = con;
- /* Install a data_ready callback */
sk->sk_data_ready = lowcomms_data_ready;
sk->sk_write_space = lowcomms_write_space;
- sk->sk_state_change = lowcomms_state_change;
+ if (dlm_config.ci_protocol == DLM_PROTO_SCTP)
+ sk->sk_state_change = lowcomms_state_change;
sk->sk_allocation = GFP_NOFS;
+ sk->sk_use_task_frag = false;
sk->sk_error_report = lowcomms_error_report;
- write_unlock_bh(&sk->sk_callback_lock);
+ release_sock(sk);
}
/* Add the port number to an IPv6 or 4 sockaddr and return the address
length */
-static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
+static void make_sockaddr(struct sockaddr_storage *saddr, __be16 port,
int *addr_len)
{
- saddr->ss_family = dlm_local_addr[0]->ss_family;
+ saddr->ss_family = dlm_local_addr[0].ss_family;
if (saddr->ss_family == AF_INET) {
struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
- in4_addr->sin_port = cpu_to_be16(port);
+ in4_addr->sin_port = port;
*addr_len = sizeof(struct sockaddr_in);
memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
} else {
struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
- in6_addr->sin6_port = cpu_to_be16(port);
+ in6_addr->sin6_port = port;
*addr_len = sizeof(struct sockaddr_in6);
}
memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
@@ -733,7 +685,7 @@ static void dlm_page_release(struct kref *kref)
ref);
__free_page(e->page);
- kfree(e);
+ dlm_free_writequeue(e);
}
static void dlm_msg_release(struct kref *kref)
@@ -741,7 +693,7 @@ static void dlm_msg_release(struct kref *kref)
struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref);
kref_put(&msg->entry->ref, dlm_page_release);
- kfree(msg);
+ dlm_free_msg(msg);
}
static void free_entry(struct writequeue_entry *e)
@@ -759,43 +711,63 @@ static void free_entry(struct writequeue_entry *e)
}
list_del(&e->list);
- atomic_dec(&e->con->writequeue_cnt);
kref_put(&e->ref, dlm_page_release);
}
static void dlm_close_sock(struct socket **sock)
{
- if (*sock) {
- restore_callbacks(*sock);
- sock_release(*sock);
- *sock = NULL;
+ lock_sock((*sock)->sk);
+ restore_callbacks((*sock)->sk);
+ release_sock((*sock)->sk);
+
+ sock_release(*sock);
+ *sock = NULL;
+}
+
+static void allow_connection_io(struct connection *con)
+{
+ if (con->othercon)
+ clear_bit(CF_IO_STOP, &con->othercon->flags);
+ clear_bit(CF_IO_STOP, &con->flags);
+}
+
+static void stop_connection_io(struct connection *con)
+{
+ if (con->othercon)
+ stop_connection_io(con->othercon);
+
+ spin_lock_bh(&con->writequeue_lock);
+ set_bit(CF_IO_STOP, &con->flags);
+ spin_unlock_bh(&con->writequeue_lock);
+
+ down_write(&con->sock_lock);
+ if (con->sock) {
+ lock_sock(con->sock->sk);
+ restore_callbacks(con->sock->sk);
+ release_sock(con->sock->sk);
}
+ up_write(&con->sock_lock);
+
+ cancel_work_sync(&con->swork);
+ cancel_work_sync(&con->rwork);
}
/* Close a remote connection and tidy up */
-static void close_connection(struct connection *con, bool and_other,
- bool tx, bool rx)
+static void close_connection(struct connection *con, bool and_other)
{
- bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
struct writequeue_entry *e;
- if (tx && !closing && cancel_work_sync(&con->swork)) {
- log_print("canceled swork for node %d", con->nodeid);
- clear_bit(CF_WRITE_PENDING, &con->flags);
- }
- if (rx && !closing && cancel_work_sync(&con->rwork)) {
- log_print("canceled rwork for node %d", con->nodeid);
- clear_bit(CF_READ_PENDING, &con->flags);
+ if (con->othercon && and_other)
+ close_connection(con->othercon, false);
+
+ down_write(&con->sock_lock);
+ if (!con->sock) {
+ up_write(&con->sock_lock);
+ return;
}
- mutex_lock(&con->sock_mutex);
dlm_close_sock(&con->sock);
- if (con->othercon && and_other) {
- /* Will only re-enter once. */
- close_connection(con->othercon, false, tx, rx);
- }
-
/* if we send a writequeue entry only a half way, we drop the
* whole entry because reconnection and that we not start of the
* middle of a msg which will confuse the other end.
@@ -807,49 +779,46 @@ static void close_connection(struct connection *con, bool and_other,
* our policy is to start on a clean state when disconnects, we don't
* know what's send/received on transport layer in this case.
*/
- spin_lock(&con->writequeue_lock);
+ spin_lock_bh(&con->writequeue_lock);
if (!list_empty(&con->writequeue)) {
e = list_first_entry(&con->writequeue, struct writequeue_entry,
list);
if (e->dirty)
free_entry(e);
}
- spin_unlock(&con->writequeue_lock);
+ spin_unlock_bh(&con->writequeue_lock);
con->rx_leftover = 0;
con->retries = 0;
clear_bit(CF_APP_LIMITED, &con->flags);
- clear_bit(CF_CONNECTED, &con->flags);
- clear_bit(CF_DELAY_CONNECT, &con->flags);
- clear_bit(CF_RECONNECT, &con->flags);
- clear_bit(CF_EOF, &con->flags);
- mutex_unlock(&con->sock_mutex);
- clear_bit(CF_CLOSING, &con->flags);
+ clear_bit(CF_RECV_PENDING, &con->flags);
+ clear_bit(CF_SEND_PENDING, &con->flags);
+ up_write(&con->sock_lock);
}
-static void shutdown_connection(struct connection *con)
+static void shutdown_connection(struct connection *con, bool and_other)
{
int ret;
- flush_work(&con->swork);
+ if (con->othercon && and_other)
+ shutdown_connection(con->othercon, false);
- mutex_lock(&con->sock_mutex);
+ flush_workqueue(io_workqueue);
+ down_read(&con->sock_lock);
/* nothing to shutdown */
if (!con->sock) {
- mutex_unlock(&con->sock_mutex);
+ up_read(&con->sock_lock);
return;
}
- set_bit(CF_SHUTDOWN, &con->flags);
- ret = kernel_sock_shutdown(con->sock, SHUT_WR);
- mutex_unlock(&con->sock_mutex);
+ ret = kernel_sock_shutdown(con->sock, dlm_proto_ops->how);
+ up_read(&con->sock_lock);
if (ret) {
log_print("Connection %p failed to shutdown: %d will force close",
con, ret);
goto force_close;
} else {
- ret = wait_event_timeout(con->shutdown_wait,
- !test_bit(CF_SHUTDOWN, &con->flags),
+ ret = wait_event_timeout(con->shutdown_wait, !con->sock,
DLM_SHUTDOWN_WAIT_TIMEOUT);
if (ret == 0) {
log_print("Connection %p shutdown timed out, will force close",
@@ -861,145 +830,167 @@ static void shutdown_connection(struct connection *con)
return;
force_close:
- clear_bit(CF_SHUTDOWN, &con->flags);
- close_connection(con, false, true, true);
+ close_connection(con, false);
}
-static void dlm_tcp_shutdown(struct connection *con)
+static struct processqueue_entry *new_processqueue_entry(int nodeid,
+ int buflen)
{
- if (con->othercon)
- shutdown_connection(con->othercon);
- shutdown_connection(con);
+ struct processqueue_entry *pentry;
+
+ pentry = kmalloc(sizeof(*pentry), GFP_NOFS);
+ if (!pentry)
+ return NULL;
+
+ pentry->buf = kmalloc(buflen, GFP_NOFS);
+ if (!pentry->buf) {
+ kfree(pentry);
+ return NULL;
+ }
+
+ pentry->nodeid = nodeid;
+ return pentry;
}
-static int con_realloc_receive_buf(struct connection *con, int newlen)
+static void free_processqueue_entry(struct processqueue_entry *pentry)
{
- unsigned char *newbuf;
+ kfree(pentry->buf);
+ kfree(pentry);
+}
- newbuf = kmalloc(newlen, GFP_NOFS);
- if (!newbuf)
- return -ENOMEM;
+static void process_dlm_messages(struct work_struct *work)
+{
+ struct processqueue_entry *pentry;
+
+ spin_lock_bh(&processqueue_lock);
+ pentry = list_first_entry_or_null(&processqueue,
+ struct processqueue_entry, list);
+ if (WARN_ON_ONCE(!pentry)) {
+ process_dlm_messages_pending = false;
+ spin_unlock_bh(&processqueue_lock);
+ return;
+ }
- /* copy any leftover from last receive */
- if (con->rx_leftover)
- memmove(newbuf, con->rx_buf, con->rx_leftover);
+ list_del(&pentry->list);
+ if (atomic_dec_and_test(&processqueue_count))
+ wake_up(&processqueue_wq);
+ spin_unlock_bh(&processqueue_lock);
- /* swap to new buffer space */
- kfree(con->rx_buf);
- con->rx_buflen = newlen;
- con->rx_buf = newbuf;
+ for (;;) {
+ dlm_process_incoming_buffer(pentry->nodeid, pentry->buf,
+ pentry->buflen);
+ free_processqueue_entry(pentry);
+
+ spin_lock_bh(&processqueue_lock);
+ pentry = list_first_entry_or_null(&processqueue,
+ struct processqueue_entry, list);
+ if (!pentry) {
+ process_dlm_messages_pending = false;
+ spin_unlock_bh(&processqueue_lock);
+ break;
+ }
- return 0;
+ list_del(&pentry->list);
+ if (atomic_dec_and_test(&processqueue_count))
+ wake_up(&processqueue_wq);
+ spin_unlock_bh(&processqueue_lock);
+ }
}
/* Data received from remote end */
-static int receive_from_sock(struct connection *con)
+static int receive_from_sock(struct connection *con, int buflen)
{
+ struct processqueue_entry *pentry;
+ int ret, buflen_real;
struct msghdr msg;
struct kvec iov;
- int ret, buflen;
- mutex_lock(&con->sock_mutex);
+ pentry = new_processqueue_entry(con->nodeid, buflen);
+ if (!pentry)
+ return DLM_IO_RESCHED;
- if (con->sock == NULL) {
- ret = -EAGAIN;
- goto out_close;
- }
-
- /* realloc if we get new buffer size to read out */
- buflen = dlm_config.ci_buffer_size;
- if (con->rx_buflen != buflen && con->rx_leftover <= buflen) {
- ret = con_realloc_receive_buf(con, buflen);
- if (ret < 0)
- goto out_resched;
- }
+ memcpy(pentry->buf, con->rx_leftover_buf, con->rx_leftover);
- for (;;) {
- /* calculate new buffer parameter regarding last receive and
- * possible leftover bytes
- */
- iov.iov_base = con->rx_buf + con->rx_leftover;
- iov.iov_len = con->rx_buflen - con->rx_leftover;
-
- memset(&msg, 0, sizeof(msg));
- msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
- ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
- msg.msg_flags);
- if (ret == -EAGAIN)
- break;
- else if (ret <= 0)
- goto out_close;
-
- /* new buflen according readed bytes and leftover from last receive */
- buflen = ret + con->rx_leftover;
- ret = dlm_process_incoming_buffer(con->nodeid, con->rx_buf, buflen);
- if (ret < 0)
- goto out_close;
-
- /* calculate leftover bytes from process and put it into begin of
- * the receive buffer, so next receive we have the full message
- * at the start address of the receive buffer.
- */
- con->rx_leftover = buflen - ret;
- if (con->rx_leftover) {
- memmove(con->rx_buf, con->rx_buf + ret,
- con->rx_leftover);
+ /* calculate new buffer parameter regarding last receive and
+ * possible leftover bytes
+ */
+ iov.iov_base = pentry->buf + con->rx_leftover;
+ iov.iov_len = buflen - con->rx_leftover;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
+ clear_bit(CF_RECV_INTR, &con->flags);
+again:
+ ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
+ msg.msg_flags);
+ trace_dlm_recv(con->nodeid, ret);
+ if (ret == -EAGAIN) {
+ lock_sock(con->sock->sk);
+ if (test_and_clear_bit(CF_RECV_INTR, &con->flags)) {
+ release_sock(con->sock->sk);
+ goto again;
}
+
+ clear_bit(CF_RECV_PENDING, &con->flags);
+ release_sock(con->sock->sk);
+ free_processqueue_entry(pentry);
+ return DLM_IO_END;
+ } else if (ret == 0) {
+ /* close will clear CF_RECV_PENDING */
+ free_processqueue_entry(pentry);
+ return DLM_IO_EOF;
+ } else if (ret < 0) {
+ free_processqueue_entry(pentry);
+ return ret;
}
- dlm_midcomms_receive_done(con->nodeid);
- mutex_unlock(&con->sock_mutex);
- return 0;
+ /* new buflen according readed bytes and leftover from last receive */
+ buflen_real = ret + con->rx_leftover;
+ ret = dlm_validate_incoming_buffer(con->nodeid, pentry->buf,
+ buflen_real);
+ if (ret < 0) {
+ free_processqueue_entry(pentry);
+ return ret;
+ }
-out_resched:
- if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
- queue_work(recv_workqueue, &con->rwork);
- mutex_unlock(&con->sock_mutex);
- return -EAGAIN;
-
-out_close:
- if (ret == 0) {
- log_print("connection %p got EOF from %d",
- con, con->nodeid);
-
- if (dlm_proto_ops->eof_condition &&
- dlm_proto_ops->eof_condition(con)) {
- set_bit(CF_EOF, &con->flags);
- mutex_unlock(&con->sock_mutex);
- } else {
- mutex_unlock(&con->sock_mutex);
- close_connection(con, false, true, false);
+ pentry->buflen = ret;
- /* handling for tcp shutdown */
- clear_bit(CF_SHUTDOWN, &con->flags);
- wake_up(&con->shutdown_wait);
- }
+ /* calculate leftover bytes from process and put it into begin of
+ * the receive buffer, so next receive we have the full message
+ * at the start address of the receive buffer.
+ */
+ con->rx_leftover = buflen_real - ret;
+ memmove(con->rx_leftover_buf, pentry->buf + ret,
+ con->rx_leftover);
- /* signal to breaking receive worker */
- ret = -1;
- } else {
- mutex_unlock(&con->sock_mutex);
+ spin_lock_bh(&processqueue_lock);
+ ret = atomic_inc_return(&processqueue_count);
+ list_add_tail(&pentry->list, &processqueue);
+ if (!process_dlm_messages_pending) {
+ process_dlm_messages_pending = true;
+ queue_work(process_workqueue, &process_work);
}
- return ret;
+ spin_unlock_bh(&processqueue_lock);
+
+ if (ret > DLM_MAX_PROCESS_BUFFERS)
+ return DLM_IO_FLUSH;
+
+ return DLM_IO_SUCCESS;
}
/* Listening socket is busy, accept a connection */
-static int accept_from_sock(struct listen_connection *con)
+static int accept_from_sock(void)
{
- int result;
struct sockaddr_storage peeraddr;
- struct socket *newsock;
- int len, idx;
- int nodeid;
+ int len, idx, result, nodeid;
struct connection *newcon;
- struct connection *addcon;
+ struct socket *newsock;
unsigned int mark;
- if (!con->sock)
- return -ENOTCONN;
-
- result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
- if (result < 0)
+ result = kernel_accept(listen_con.sock, &newsock, O_NONBLOCK);
+ if (result == -EAGAIN)
+ return DLM_IO_END;
+ else if (result < 0)
goto accept_err;
/* Get the connected socket's peer */
@@ -1013,10 +1004,28 @@ static int accept_from_sock(struct listen_connection *con)
/* Get the new node's NODEID */
make_sockaddr(&peeraddr, 0, &len);
if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
- unsigned char *b=(unsigned char *)&peeraddr;
- log_print("connect from non cluster node");
- print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
- b, sizeof(struct sockaddr_storage));
+ switch (peeraddr.ss_family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *)&peeraddr;
+
+ log_print("connect from non cluster IPv4 node %pI4",
+ &sin->sin_addr);
+ break;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&peeraddr;
+
+ log_print("connect from non cluster IPv6 node %pI6c",
+ &sin6->sin6_addr);
+ break;
+ }
+#endif
+ default:
+ log_print("invalid family from non cluster node");
+ break;
+ }
+
sock_release(newsock);
return -1;
}
@@ -1029,16 +1038,16 @@ static int accept_from_sock(struct listen_connection *con)
* In this case we store the incoming one in "othercon"
*/
idx = srcu_read_lock(&connections_srcu);
- newcon = nodeid2con(nodeid, GFP_NOFS);
- if (!newcon) {
+ newcon = nodeid2con(nodeid, 0);
+ if (WARN_ON_ONCE(!newcon)) {
srcu_read_unlock(&connections_srcu, idx);
- result = -ENOMEM;
+ result = -ENOENT;
goto accept_err;
}
sock_set_mark(newsock->sk, mark);
- mutex_lock(&newcon->sock_mutex);
+ down_write(&newcon->sock_lock);
if (newcon->sock) {
struct connection *othercon = newcon->othercon;
@@ -1046,63 +1055,50 @@ static int accept_from_sock(struct listen_connection *con)
othercon = kzalloc(sizeof(*othercon), GFP_NOFS);
if (!othercon) {
log_print("failed to allocate incoming socket");
- mutex_unlock(&newcon->sock_mutex);
+ up_write(&newcon->sock_lock);
srcu_read_unlock(&connections_srcu, idx);
result = -ENOMEM;
goto accept_err;
}
- result = dlm_con_init(othercon, nodeid);
- if (result < 0) {
- kfree(othercon);
- mutex_unlock(&newcon->sock_mutex);
- srcu_read_unlock(&connections_srcu, idx);
- goto accept_err;
- }
-
- lockdep_set_subclass(&othercon->sock_mutex, 1);
- set_bit(CF_IS_OTHERCON, &othercon->flags);
+ dlm_con_init(othercon, nodeid);
+ lockdep_set_subclass(&othercon->sock_lock, 1);
newcon->othercon = othercon;
- othercon->sendcon = newcon;
+ set_bit(CF_IS_OTHERCON, &othercon->flags);
} else {
/* close other sock con if we have something new */
- close_connection(othercon, false, true, false);
+ close_connection(othercon, false);
}
- mutex_lock(&othercon->sock_mutex);
+ down_write(&othercon->sock_lock);
add_sock(newsock, othercon);
- addcon = othercon;
- mutex_unlock(&othercon->sock_mutex);
+
+ /* check if we receved something while adding */
+ lock_sock(othercon->sock->sk);
+ lowcomms_queue_rwork(othercon);
+ release_sock(othercon->sock->sk);
+ up_write(&othercon->sock_lock);
}
else {
/* accept copies the sk after we've saved the callbacks, so we
don't want to save them a second time or comm errors will
result in calling sk_error_report recursively. */
add_sock(newsock, newcon);
- addcon = newcon;
- }
-
- set_bit(CF_CONNECTED, &addcon->flags);
- mutex_unlock(&newcon->sock_mutex);
-
- /*
- * Add it to the active queue in case we got data
- * between processing the accept adding the socket
- * to the read_sockets list
- */
- if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
- queue_work(recv_workqueue, &addcon->rwork);
+ /* check if we receved something while adding */
+ lock_sock(newcon->sock->sk);
+ lowcomms_queue_rwork(newcon);
+ release_sock(newcon->sock->sk);
+ }
+ up_write(&newcon->sock_lock);
srcu_read_unlock(&connections_srcu, idx);
- return 0;
+ return DLM_IO_SUCCESS;
accept_err:
if (newsock)
sock_release(newsock);
- if (result != -EAGAIN)
- log_print("error accepting connection from node: %d", result);
return result;
}
@@ -1127,14 +1123,14 @@ static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
/*
* sctp_bind_addrs - bind a SCTP socket to all our addresses
*/
-static int sctp_bind_addrs(struct socket *sock, uint16_t port)
+static int sctp_bind_addrs(struct socket *sock, __be16 port)
{
struct sockaddr_storage localaddr;
- struct sockaddr *addr = (struct sockaddr *)&localaddr;
+ struct sockaddr_unsized *addr = (struct sockaddr_unsized *)&localaddr;
int i, addr_len, result = 0;
for (i = 0; i < dlm_local_count; i++) {
- memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
+ memcpy(&localaddr, &dlm_local_addr[i], sizeof(localaddr));
make_sockaddr(&localaddr, port, &addr_len);
if (!i)
@@ -1154,7 +1150,7 @@ static int sctp_bind_addrs(struct socket *sock, uint16_t port)
/* Get local addresses */
static void init_local(void)
{
- struct sockaddr_storage sas, *addr;
+ struct sockaddr_storage sas;
int i;
dlm_local_count = 0;
@@ -1162,52 +1158,41 @@ static void init_local(void)
if (dlm_our_addr(&sas, i))
break;
- addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
- if (!addr)
- break;
- dlm_local_addr[dlm_local_count++] = addr;
+ memcpy(&dlm_local_addr[dlm_local_count++], &sas, sizeof(sas));
}
}
-static void deinit_local(void)
-{
- int i;
-
- for (i = 0; i < dlm_local_count; i++)
- kfree(dlm_local_addr[i]);
-}
-
-static struct writequeue_entry *new_writequeue_entry(struct connection *con,
- gfp_t allocation)
+static struct writequeue_entry *new_writequeue_entry(struct connection *con)
{
struct writequeue_entry *entry;
- entry = kzalloc(sizeof(*entry), allocation);
+ entry = dlm_allocate_writequeue();
if (!entry)
return NULL;
- entry->page = alloc_page(allocation | __GFP_ZERO);
+ entry->page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
if (!entry->page) {
- kfree(entry);
+ dlm_free_writequeue(entry);
return NULL;
}
+ entry->offset = 0;
+ entry->len = 0;
+ entry->end = 0;
+ entry->dirty = false;
entry->con = con;
entry->users = 1;
kref_init(&entry->ref);
- INIT_LIST_HEAD(&entry->msgs);
-
return entry;
}
static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
- gfp_t allocation, char **ppc,
- void (*cb)(struct dlm_mhandle *mh),
- struct dlm_mhandle *mh)
+ char **ppc, void (*cb)(void *data),
+ void *data)
{
struct writequeue_entry *e;
- spin_lock(&con->writequeue_lock);
+ spin_lock_bh(&con->writequeue_lock);
if (!list_empty(&con->writequeue)) {
e = list_last_entry(&con->writequeue, struct writequeue_entry, list);
if (DLM_WQ_REMAIN_BYTES(e) >= len) {
@@ -1215,74 +1200,52 @@ static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
*ppc = page_address(e->page) + e->end;
if (cb)
- cb(mh);
+ cb(data);
e->end += len;
e->users++;
- spin_unlock(&con->writequeue_lock);
-
- return e;
+ goto out;
}
}
- spin_unlock(&con->writequeue_lock);
- e = new_writequeue_entry(con, allocation);
+ e = new_writequeue_entry(con);
if (!e)
- return NULL;
+ goto out;
kref_get(&e->ref);
*ppc = page_address(e->page);
e->end += len;
- atomic_inc(&con->writequeue_cnt);
-
- spin_lock(&con->writequeue_lock);
if (cb)
- cb(mh);
+ cb(data);
list_add_tail(&e->list, &con->writequeue);
- spin_unlock(&con->writequeue_lock);
+out:
+ spin_unlock_bh(&con->writequeue_lock);
return e;
};
static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len,
- gfp_t allocation, char **ppc,
- void (*cb)(struct dlm_mhandle *mh),
- struct dlm_mhandle *mh)
+ char **ppc, void (*cb)(void *data),
+ void *data)
{
struct writequeue_entry *e;
struct dlm_msg *msg;
- bool sleepable;
- msg = kzalloc(sizeof(*msg), allocation);
+ msg = dlm_allocate_msg();
if (!msg)
return NULL;
- /* this mutex is being used as a wait to avoid multiple "fast"
- * new writequeue page list entry allocs in new_wq_entry in
- * normal operation which is sleepable context. Without it
- * we could end in multiple writequeue entries with one
- * dlm message because multiple callers were waiting at
- * the writequeue_lock in new_wq_entry().
- */
- sleepable = gfpflags_normal_context(allocation);
- if (sleepable)
- mutex_lock(&con->wq_alloc);
-
kref_init(&msg->ref);
- e = new_wq_entry(con, len, allocation, ppc, cb, mh);
+ e = new_wq_entry(con, len, ppc, cb, data);
if (!e) {
- if (sleepable)
- mutex_unlock(&con->wq_alloc);
-
- kfree(msg);
+ dlm_free_msg(msg);
return NULL;
}
- if (sleepable)
- mutex_unlock(&con->wq_alloc);
-
+ msg->retransmit = false;
+ msg->orig_msg = NULL;
msg->ppc = *ppc;
msg->len = len;
msg->entry = e;
@@ -1290,9 +1253,12 @@ static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len,
return msg;
}
-struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
- char **ppc, void (*cb)(struct dlm_mhandle *mh),
- struct dlm_mhandle *mh)
+/* avoid false positive for nodes_srcu, unlock happens in
+ * dlm_lowcomms_commit_msg which is a must call if success
+ */
+#ifndef __CHECKER__
+struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, char **ppc,
+ void (*cb)(void *data), void *data)
{
struct connection *con;
struct dlm_msg *msg;
@@ -1302,27 +1268,30 @@ struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
len < sizeof(struct dlm_header)) {
BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE);
log_print("failed to allocate a buffer of size %d", len);
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return NULL;
}
idx = srcu_read_lock(&connections_srcu);
- con = nodeid2con(nodeid, allocation);
- if (!con) {
+ con = nodeid2con(nodeid, 0);
+ if (WARN_ON_ONCE(!con)) {
srcu_read_unlock(&connections_srcu, idx);
return NULL;
}
- msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, mh);
+ msg = dlm_lowcomms_new_msg_con(con, len, ppc, cb, data);
if (!msg) {
srcu_read_unlock(&connections_srcu, idx);
return NULL;
}
+ /* for dlm_lowcomms_commit_msg() */
+ kref_get(&msg->ref);
/* we assume if successful commit must called */
msg->idx = idx;
return msg;
}
+#endif
static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg)
{
@@ -1330,7 +1299,7 @@ static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg)
struct connection *con = e->con;
int users;
- spin_lock(&con->writequeue_lock);
+ spin_lock_bh(&con->writequeue_lock);
kref_get(&msg->ref);
list_add(&msg->list, &e->msgs);
@@ -1339,28 +1308,33 @@ static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg)
goto out;
e->len = DLM_WQ_LENGTH_BYTES(e);
- spin_unlock(&con->writequeue_lock);
- queue_work(send_workqueue, &con->swork);
- return;
+ lowcomms_queue_swork(con);
out:
- spin_unlock(&con->writequeue_lock);
+ spin_unlock_bh(&con->writequeue_lock);
return;
}
+/* avoid false positive for nodes_srcu, lock was happen in
+ * dlm_lowcomms_new_msg
+ */
+#ifndef __CHECKER__
void dlm_lowcomms_commit_msg(struct dlm_msg *msg)
{
_dlm_lowcomms_commit_msg(msg);
srcu_read_unlock(&connections_srcu, msg->idx);
+ /* because dlm_lowcomms_new_msg() */
+ kref_put(&msg->ref, dlm_msg_release);
}
+#endif
void dlm_lowcomms_put_msg(struct dlm_msg *msg)
{
kref_put(&msg->ref, dlm_msg_release);
}
-/* does not held connections_srcu, usage workqueue only */
+/* does not held connections_srcu, usage lowcomms_error_report only */
int dlm_lowcomms_resend_msg(struct dlm_msg *msg)
{
struct dlm_msg *msg_resend;
@@ -1369,8 +1343,8 @@ int dlm_lowcomms_resend_msg(struct dlm_msg *msg)
if (msg->retransmit)
return 1;
- msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len,
- GFP_ATOMIC, &ppc, NULL, NULL);
+ msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len, &ppc,
+ NULL, NULL);
if (!msg_resend)
return -ENOMEM;
@@ -1386,90 +1360,83 @@ int dlm_lowcomms_resend_msg(struct dlm_msg *msg)
}
/* Send a message */
-static void send_to_sock(struct connection *con)
+static int send_to_sock(struct connection *con)
{
- const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
struct writequeue_entry *e;
+ struct bio_vec bvec;
+ struct msghdr msg = {
+ .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | MSG_NOSIGNAL,
+ };
int len, offset, ret;
- int count = 0;
- mutex_lock(&con->sock_mutex);
- if (con->sock == NULL)
- goto out_connect;
-
- spin_lock(&con->writequeue_lock);
- for (;;) {
- e = con_next_wq(con);
- if (!e)
- break;
-
- e = list_first_entry(&con->writequeue, struct writequeue_entry, list);
- len = e->len;
- offset = e->offset;
- BUG_ON(len == 0 && e->users == 0);
- spin_unlock(&con->writequeue_lock);
-
- ret = kernel_sendpage(con->sock, e->page, offset, len,
- msg_flags);
- if (ret == -EAGAIN || ret == 0) {
- if (ret == -EAGAIN &&
- test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
- !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
- /* Notify TCP that we're limited by the
- * application window size.
- */
- set_bit(SOCK_NOSPACE, &con->sock->flags);
- con->sock->sk->sk_write_pending++;
- }
- cond_resched();
- goto out;
- } else if (ret < 0)
- goto out;
-
- /* Don't starve people filling buffers */
- if (++count >= MAX_SEND_MSG_COUNT) {
- cond_resched();
- count = 0;
+ spin_lock_bh(&con->writequeue_lock);
+ e = con_next_wq(con);
+ if (!e) {
+ clear_bit(CF_SEND_PENDING, &con->flags);
+ spin_unlock_bh(&con->writequeue_lock);
+ return DLM_IO_END;
+ }
+
+ len = e->len;
+ offset = e->offset;
+ WARN_ON_ONCE(len == 0 && e->users == 0);
+ spin_unlock_bh(&con->writequeue_lock);
+
+ bvec_set_page(&bvec, e->page, len, offset);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
+ ret = sock_sendmsg(con->sock, &msg);
+ trace_dlm_send(con->nodeid, ret);
+ if (ret == -EAGAIN || ret == 0) {
+ lock_sock(con->sock->sk);
+ spin_lock_bh(&con->writequeue_lock);
+ if (test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
+ !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
+ /* Notify TCP that we're limited by the
+ * application window size.
+ */
+ set_bit(SOCK_NOSPACE, &con->sock->sk->sk_socket->flags);
+ con->sock->sk->sk_write_pending++;
+
+ clear_bit(CF_SEND_PENDING, &con->flags);
+ spin_unlock_bh(&con->writequeue_lock);
+ release_sock(con->sock->sk);
+
+ /* wait for write_space() event */
+ return DLM_IO_END;
}
+ spin_unlock_bh(&con->writequeue_lock);
+ release_sock(con->sock->sk);
- spin_lock(&con->writequeue_lock);
- writequeue_entry_complete(e, ret);
- }
- spin_unlock(&con->writequeue_lock);
-
- /* close if we got EOF */
- if (test_and_clear_bit(CF_EOF, &con->flags)) {
- mutex_unlock(&con->sock_mutex);
- close_connection(con, false, false, true);
-
- /* handling for tcp shutdown */
- clear_bit(CF_SHUTDOWN, &con->flags);
- wake_up(&con->shutdown_wait);
- } else {
- mutex_unlock(&con->sock_mutex);
+ return DLM_IO_RESCHED;
+ } else if (ret < 0) {
+ return ret;
}
- return;
-
-out:
- mutex_unlock(&con->sock_mutex);
- return;
+ spin_lock_bh(&con->writequeue_lock);
+ writequeue_entry_complete(e, ret);
+ spin_unlock_bh(&con->writequeue_lock);
-out_connect:
- mutex_unlock(&con->sock_mutex);
- queue_work(send_workqueue, &con->swork);
- cond_resched();
+ return DLM_IO_SUCCESS;
}
static void clean_one_writequeue(struct connection *con)
{
struct writequeue_entry *e, *safe;
- spin_lock(&con->writequeue_lock);
+ spin_lock_bh(&con->writequeue_lock);
list_for_each_entry_safe(e, safe, &con->writequeue, list) {
free_entry(e);
}
- spin_unlock(&con->writequeue_lock);
+ spin_unlock_bh(&con->writequeue_lock);
+}
+
+static void connection_release(struct rcu_head *rcu)
+{
+ struct connection *con = container_of(rcu, struct connection, rcu);
+
+ WARN_ON_ONCE(!list_empty(&con->writequeue));
+ WARN_ON_ONCE(con->sock);
+ kfree(con);
}
/* Called from recovery when it knows that a node has
@@ -1477,286 +1444,309 @@ static void clean_one_writequeue(struct connection *con)
int dlm_lowcomms_close(int nodeid)
{
struct connection *con;
- struct dlm_node_addr *na;
int idx;
log_print("closing connection to node %d", nodeid);
+
idx = srcu_read_lock(&connections_srcu);
con = nodeid2con(nodeid, 0);
- if (con) {
- set_bit(CF_CLOSE, &con->flags);
- close_connection(con, true, true, true);
- clean_one_writequeue(con);
- if (con->othercon)
- clean_one_writequeue(con->othercon);
+ if (WARN_ON_ONCE(!con)) {
+ srcu_read_unlock(&connections_srcu, idx);
+ return -ENOENT;
}
- srcu_read_unlock(&connections_srcu, idx);
- spin_lock(&dlm_node_addrs_spin);
- na = find_node_addr(nodeid);
- if (na) {
- list_del(&na->list);
- while (na->addr_count--)
- kfree(na->addr[na->addr_count]);
- kfree(na);
+ stop_connection_io(con);
+ log_print("io handling for node: %d stopped", nodeid);
+ close_connection(con, true);
+
+ spin_lock(&connections_lock);
+ hlist_del_rcu(&con->list);
+ spin_unlock(&connections_lock);
+
+ clean_one_writequeue(con);
+ call_srcu(&connections_srcu, &con->rcu, connection_release);
+ if (con->othercon) {
+ clean_one_writequeue(con->othercon);
+ call_srcu(&connections_srcu, &con->othercon->rcu, connection_release);
}
- spin_unlock(&dlm_node_addrs_spin);
+ srcu_read_unlock(&connections_srcu, idx);
+
+ /* for debugging we print when we are done to compare with other
+ * messages in between. This function need to be correctly synchronized
+ * with io handling
+ */
+ log_print("closing connection to node %d done", nodeid);
return 0;
}
-/* Receive workqueue function */
+/* Receive worker function */
static void process_recv_sockets(struct work_struct *work)
{
struct connection *con = container_of(work, struct connection, rwork);
+ int ret, buflen;
+
+ down_read(&con->sock_lock);
+ if (!con->sock) {
+ up_read(&con->sock_lock);
+ return;
+ }
+
+ buflen = READ_ONCE(dlm_config.ci_buffer_size);
+ do {
+ ret = receive_from_sock(con, buflen);
+ } while (ret == DLM_IO_SUCCESS);
+ up_read(&con->sock_lock);
+
+ switch (ret) {
+ case DLM_IO_END:
+ /* CF_RECV_PENDING cleared */
+ break;
+ case DLM_IO_EOF:
+ close_connection(con, false);
+ wake_up(&con->shutdown_wait);
+ /* CF_RECV_PENDING cleared */
+ break;
+ case DLM_IO_FLUSH:
+ /* we can't flush the process_workqueue here because a
+ * WQ_MEM_RECLAIM workequeue can occurr a deadlock for a non
+ * WQ_MEM_RECLAIM workqueue such as process_workqueue. Instead
+ * we have a waitqueue to wait until all messages are
+ * processed.
+ *
+ * This handling is only necessary to backoff the sender and
+ * not queue all messages from the socket layer into DLM
+ * processqueue. When DLM is capable to parse multiple messages
+ * on an e.g. per socket basis this handling can might be
+ * removed. Especially in a message burst we are too slow to
+ * process messages and the queue will fill up memory.
+ */
+ wait_event(processqueue_wq, !atomic_read(&processqueue_count));
+ fallthrough;
+ case DLM_IO_RESCHED:
+ cond_resched();
+ queue_work(io_workqueue, &con->rwork);
+ /* CF_RECV_PENDING not cleared */
+ break;
+ default:
+ if (ret < 0) {
+ if (test_bit(CF_IS_OTHERCON, &con->flags)) {
+ close_connection(con, false);
+ } else {
+ spin_lock_bh(&con->writequeue_lock);
+ lowcomms_queue_swork(con);
+ spin_unlock_bh(&con->writequeue_lock);
+ }
+
+ /* CF_RECV_PENDING cleared for othercon
+ * we trigger send queue if not already done
+ * and process_send_sockets will handle it
+ */
+ break;
+ }
- clear_bit(CF_READ_PENDING, &con->flags);
- receive_from_sock(con);
+ WARN_ON_ONCE(1);
+ break;
+ }
}
static void process_listen_recv_socket(struct work_struct *work)
{
- accept_from_sock(&listen_con);
+ int ret;
+
+ if (WARN_ON_ONCE(!listen_con.sock))
+ return;
+
+ do {
+ ret = accept_from_sock();
+ } while (ret == DLM_IO_SUCCESS);
+
+ if (ret < 0)
+ log_print("critical error accepting connection: %d", ret);
}
-static void dlm_connect(struct connection *con)
+static int dlm_connect(struct connection *con)
{
struct sockaddr_storage addr;
int result, addr_len;
struct socket *sock;
unsigned int mark;
- /* Some odd races can cause double-connects, ignore them */
- if (con->retries++ > MAX_CONNECT_RETRIES)
- return;
-
- if (con->sock) {
- log_print("node %d already connected.", con->nodeid);
- return;
- }
-
memset(&addr, 0, sizeof(addr));
result = nodeid_to_addr(con->nodeid, &addr, NULL,
dlm_proto_ops->try_new_addr, &mark);
if (result < 0) {
log_print("no address for nodeid %d", con->nodeid);
- return;
+ return result;
}
/* Create a socket to communicate with */
- result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+ result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family,
SOCK_STREAM, dlm_proto_ops->proto, &sock);
if (result < 0)
- goto socket_err;
+ return result;
sock_set_mark(sock->sk, mark);
dlm_proto_ops->sockopts(sock);
- add_sock(sock, con);
-
result = dlm_proto_ops->bind(sock);
- if (result < 0)
- goto add_sock_err;
+ if (result < 0) {
+ sock_release(sock);
+ return result;
+ }
+
+ add_sock(sock, con);
log_print_ratelimited("connecting to %d", con->nodeid);
make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len);
- result = dlm_proto_ops->connect(con, sock, (struct sockaddr *)&addr,
- addr_len);
- if (result < 0)
- goto add_sock_err;
-
- return;
-
-add_sock_err:
- dlm_close_sock(&con->sock);
+ result = kernel_connect(sock, (struct sockaddr_unsized *)&addr, addr_len, 0);
+ switch (result) {
+ case -EINPROGRESS:
+ /* not an error */
+ fallthrough;
+ case 0:
+ break;
+ default:
+ if (result < 0)
+ dlm_close_sock(&con->sock);
-socket_err:
- /*
- * Some errors are fatal and this list might need adjusting. For other
- * errors we try again until the max number of retries is reached.
- */
- if (result != -EHOSTUNREACH &&
- result != -ENETUNREACH &&
- result != -ENETDOWN &&
- result != -EINVAL &&
- result != -EPROTONOSUPPORT) {
- log_print("connect %d try %d error %d", con->nodeid,
- con->retries, result);
- msleep(1000);
- lowcomms_connect_sock(con);
+ break;
}
+
+ return result;
}
-/* Send workqueue function */
+/* Send worker function */
static void process_send_sockets(struct work_struct *work)
{
struct connection *con = container_of(work, struct connection, swork);
+ int ret;
- WARN_ON(test_bit(CF_IS_OTHERCON, &con->flags));
-
- clear_bit(CF_WRITE_PENDING, &con->flags);
+ WARN_ON_ONCE(test_bit(CF_IS_OTHERCON, &con->flags));
- if (test_and_clear_bit(CF_RECONNECT, &con->flags)) {
- close_connection(con, false, false, true);
- dlm_midcomms_unack_msg_resend(con->nodeid);
+ down_read(&con->sock_lock);
+ if (!con->sock) {
+ up_read(&con->sock_lock);
+ down_write(&con->sock_lock);
+ if (!con->sock) {
+ ret = dlm_connect(con);
+ switch (ret) {
+ case 0:
+ break;
+ default:
+ /* CF_SEND_PENDING not cleared */
+ up_write(&con->sock_lock);
+ log_print("connect to node %d try %d error %d",
+ con->nodeid, con->retries++, ret);
+ msleep(1000);
+ /* For now we try forever to reconnect. In
+ * future we should send a event to cluster
+ * manager to fence itself after certain amount
+ * of retries.
+ */
+ queue_work(io_workqueue, &con->swork);
+ return;
+ }
+ }
+ downgrade_write(&con->sock_lock);
}
- if (con->sock == NULL) {
- if (test_and_clear_bit(CF_DELAY_CONNECT, &con->flags))
- msleep(1000);
+ do {
+ ret = send_to_sock(con);
+ } while (ret == DLM_IO_SUCCESS);
+ up_read(&con->sock_lock);
- mutex_lock(&con->sock_mutex);
- dlm_connect(con);
- mutex_unlock(&con->sock_mutex);
- }
+ switch (ret) {
+ case DLM_IO_END:
+ /* CF_SEND_PENDING cleared */
+ break;
+ case DLM_IO_RESCHED:
+ /* CF_SEND_PENDING not cleared */
+ cond_resched();
+ queue_work(io_workqueue, &con->swork);
+ break;
+ default:
+ if (ret < 0) {
+ close_connection(con, false);
+
+ /* CF_SEND_PENDING cleared */
+ spin_lock_bh(&con->writequeue_lock);
+ lowcomms_queue_swork(con);
+ spin_unlock_bh(&con->writequeue_lock);
+ break;
+ }
- if (!list_empty(&con->writequeue))
- send_to_sock(con);
+ WARN_ON_ONCE(1);
+ break;
+ }
}
static void work_stop(void)
{
- if (recv_workqueue) {
- destroy_workqueue(recv_workqueue);
- recv_workqueue = NULL;
+ if (io_workqueue) {
+ destroy_workqueue(io_workqueue);
+ io_workqueue = NULL;
}
- if (send_workqueue) {
- destroy_workqueue(send_workqueue);
- send_workqueue = NULL;
+ if (process_workqueue) {
+ destroy_workqueue(process_workqueue);
+ process_workqueue = NULL;
}
}
static int work_start(void)
{
- recv_workqueue = alloc_ordered_workqueue("dlm_recv", WQ_MEM_RECLAIM);
- if (!recv_workqueue) {
- log_print("can't start dlm_recv");
+ io_workqueue = alloc_workqueue("dlm_io", WQ_HIGHPRI | WQ_MEM_RECLAIM |
+ WQ_UNBOUND, 0);
+ if (!io_workqueue) {
+ log_print("can't start dlm_io");
return -ENOMEM;
}
- send_workqueue = alloc_ordered_workqueue("dlm_send", WQ_MEM_RECLAIM);
- if (!send_workqueue) {
- log_print("can't start dlm_send");
- destroy_workqueue(recv_workqueue);
- recv_workqueue = NULL;
+ process_workqueue = alloc_workqueue("dlm_process", WQ_HIGHPRI | WQ_BH | WQ_PERCPU, 0);
+ if (!process_workqueue) {
+ log_print("can't start dlm_process");
+ destroy_workqueue(io_workqueue);
+ io_workqueue = NULL;
return -ENOMEM;
}
return 0;
}
-static void shutdown_conn(struct connection *con)
-{
- if (dlm_proto_ops->shutdown_action)
- dlm_proto_ops->shutdown_action(con);
-}
-
void dlm_lowcomms_shutdown(void)
{
- int idx;
-
- /* Set all the flags to prevent any
- * socket activity.
- */
- dlm_allow_conn = 0;
+ struct connection *con;
+ int i, idx;
- if (recv_workqueue)
- flush_workqueue(recv_workqueue);
- if (send_workqueue)
- flush_workqueue(send_workqueue);
+ /* stop lowcomms_listen_data_ready calls */
+ lock_sock(listen_con.sock->sk);
+ listen_con.sock->sk->sk_data_ready = listen_sock.sk_data_ready;
+ release_sock(listen_con.sock->sk);
+ cancel_work_sync(&listen_con.rwork);
dlm_close_sock(&listen_con.sock);
idx = srcu_read_lock(&connections_srcu);
- foreach_conn(shutdown_conn);
- srcu_read_unlock(&connections_srcu, idx);
-}
-
-static void _stop_conn(struct connection *con, bool and_other)
-{
- mutex_lock(&con->sock_mutex);
- set_bit(CF_CLOSE, &con->flags);
- set_bit(CF_READ_PENDING, &con->flags);
- set_bit(CF_WRITE_PENDING, &con->flags);
- if (con->sock && con->sock->sk) {
- write_lock_bh(&con->sock->sk->sk_callback_lock);
- con->sock->sk->sk_user_data = NULL;
- write_unlock_bh(&con->sock->sk->sk_callback_lock);
- }
- if (con->othercon && and_other)
- _stop_conn(con->othercon, false);
- mutex_unlock(&con->sock_mutex);
-}
-
-static void stop_conn(struct connection *con)
-{
- _stop_conn(con, true);
-}
-
-static void connection_release(struct rcu_head *rcu)
-{
- struct connection *con = container_of(rcu, struct connection, rcu);
-
- kfree(con->rx_buf);
- kfree(con);
-}
-
-static void free_conn(struct connection *con)
-{
- close_connection(con, true, true, true);
- spin_lock(&connections_lock);
- hlist_del_rcu(&con->list);
- spin_unlock(&connections_lock);
- if (con->othercon) {
- clean_one_writequeue(con->othercon);
- call_srcu(&connections_srcu, &con->othercon->rcu,
- connection_release);
- }
- clean_one_writequeue(con);
- call_srcu(&connections_srcu, &con->rcu, connection_release);
-}
-
-static void work_flush(void)
-{
- int ok;
- int i;
- struct connection *con;
-
- do {
- ok = 1;
- foreach_conn(stop_conn);
- if (recv_workqueue)
- flush_workqueue(recv_workqueue);
- if (send_workqueue)
- flush_workqueue(send_workqueue);
- for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
- hlist_for_each_entry_rcu(con, &connection_hash[i],
- list) {
- ok &= test_bit(CF_READ_PENDING, &con->flags);
- ok &= test_bit(CF_WRITE_PENDING, &con->flags);
- if (con->othercon) {
- ok &= test_bit(CF_READ_PENDING,
- &con->othercon->flags);
- ok &= test_bit(CF_WRITE_PENDING,
- &con->othercon->flags);
- }
- }
+ for (i = 0; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
+ shutdown_connection(con, true);
+ stop_connection_io(con);
+ flush_workqueue(process_workqueue);
+ close_connection(con, true);
+
+ clean_one_writequeue(con);
+ if (con->othercon)
+ clean_one_writequeue(con->othercon);
+ allow_connection_io(con);
}
- } while (!ok);
+ }
+ srcu_read_unlock(&connections_srcu, idx);
}
void dlm_lowcomms_stop(void)
{
- int idx;
-
- idx = srcu_read_lock(&connections_srcu);
- work_flush();
- foreach_conn(free_conn);
- srcu_read_unlock(&connections_srcu, idx);
work_stop();
- deinit_local();
-
dlm_proto_ops = NULL;
}
@@ -1772,11 +1762,11 @@ static int dlm_listen_for_all(void)
if (result < 0)
return result;
- result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+ result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family,
SOCK_STREAM, dlm_proto_ops->proto, &sock);
if (result < 0) {
- log_print("Can't create comms socket, check SCTP is loaded");
- goto out;
+ log_print("Can't create comms socket: %d", result);
+ return result;
}
sock_set_mark(sock->sk, dlm_config.ci_mark);
@@ -1786,14 +1776,23 @@ static int dlm_listen_for_all(void)
if (result < 0)
goto out;
- save_listen_callbacks(sock);
- add_listen_sock(sock, &listen_con);
+ lock_sock(sock->sk);
+ listen_sock.sk_data_ready = sock->sk->sk_data_ready;
+ listen_sock.sk_write_space = sock->sk->sk_write_space;
+ listen_sock.sk_error_report = sock->sk->sk_error_report;
+ listen_sock.sk_state_change = sock->sk->sk_state_change;
- INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
- result = sock->ops->listen(sock, 5);
+ listen_con.sock = sock;
+
+ sock->sk->sk_allocation = GFP_NOFS;
+ sock->sk->sk_use_task_frag = false;
+ sock->sk->sk_data_ready = lowcomms_listen_data_ready;
+ release_sock(sock->sk);
+
+ result = sock->ops->listen(sock, 128);
if (result < 0) {
dlm_close_sock(&listen_con.sock);
- goto out;
+ return result;
}
return 0;
@@ -1811,11 +1810,11 @@ static int dlm_tcp_bind(struct socket *sock)
/* Bind to our cluster-known address connecting to avoid
* routing problems.
*/
- memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
+ memcpy(&src_addr, &dlm_local_addr[0], sizeof(src_addr));
make_sockaddr(&src_addr, 0, &addr_len);
- result = sock->ops->bind(sock, (struct sockaddr *)&src_addr,
- addr_len);
+ result = kernel_bind(sock, (struct sockaddr_unsized *)&src_addr,
+ addr_len);
if (result < 0) {
/* This *may* not indicate a critical error */
log_print("could not bind for connect: %d", result);
@@ -1824,28 +1823,12 @@ static int dlm_tcp_bind(struct socket *sock)
return 0;
}
-static int dlm_tcp_connect(struct connection *con, struct socket *sock,
- struct sockaddr *addr, int addr_len)
-{
- int ret;
-
- ret = sock->ops->connect(sock, addr, addr_len, O_NONBLOCK);
- switch (ret) {
- case -EINPROGRESS:
- fallthrough;
- case 0:
- return 0;
- }
-
- return ret;
-}
-
static int dlm_tcp_listen_validate(void)
{
/* We don't support multi-homed hosts */
if (dlm_local_count > 1) {
- log_print("TCP protocol can't handle multi-homed hosts, try SCTP");
- return -EINVAL;
+ log_print("Detect multi-homed hosts but use only the first IP address.");
+ log_print("Try SCTP, if you want to enable multi-link.");
}
return 0;
@@ -1868,22 +1851,20 @@ static int dlm_tcp_listen_bind(struct socket *sock)
int addr_len;
/* Bind to our port */
- make_sockaddr(dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
- return sock->ops->bind(sock, (struct sockaddr *)dlm_local_addr[0],
- addr_len);
+ make_sockaddr(&dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
+ return kernel_bind(sock, (struct sockaddr_unsized *)&dlm_local_addr[0],
+ addr_len);
}
static const struct dlm_proto_ops dlm_tcp_ops = {
.name = "TCP",
.proto = IPPROTO_TCP,
- .connect = dlm_tcp_connect,
+ .how = SHUT_WR,
.sockopts = dlm_tcp_sockopts,
.bind = dlm_tcp_bind,
.listen_validate = dlm_tcp_listen_validate,
.listen_sockopts = dlm_tcp_listen_sockopts,
.listen_bind = dlm_tcp_listen_bind,
- .shutdown_action = dlm_tcp_shutdown,
- .eof_condition = tcp_eof_condition,
};
static int dlm_sctp_bind(struct socket *sock)
@@ -1891,28 +1872,6 @@ static int dlm_sctp_bind(struct socket *sock)
return sctp_bind_addrs(sock, 0);
}
-static int dlm_sctp_connect(struct connection *con, struct socket *sock,
- struct sockaddr *addr, int addr_len)
-{
- int ret;
-
- /*
- * Make sock->ops->connect() function return in specified time,
- * since O_NONBLOCK argument in connect() function does not work here,
- * then, we should restore the default value of this attribute.
- */
- sock_set_sndtimeo(sock->sk, 5);
- ret = sock->ops->connect(sock, addr, addr_len, 0);
- sock_set_sndtimeo(sock->sk, 0);
- if (ret < 0)
- return ret;
-
- if (!test_and_set_bit(CF_CONNECTED, &con->flags))
- log_print("successful connected to node %d", con->nodeid);
-
- return 0;
-}
-
static int dlm_sctp_listen_validate(void)
{
if (!IS_ENABLED(CONFIG_IP_SCTP)) {
@@ -1939,8 +1898,8 @@ static void dlm_sctp_sockopts(struct socket *sock)
static const struct dlm_proto_ops dlm_sctp_ops = {
.name = "SCTP",
.proto = IPPROTO_SCTP,
+ .how = SHUT_RDWR,
.try_new_addr = true,
- .connect = dlm_sctp_connect,
.sockopts = dlm_sctp_sockopts,
.bind = dlm_sctp_bind,
.listen_validate = dlm_sctp_listen_validate,
@@ -1950,11 +1909,7 @@ static const struct dlm_proto_ops dlm_sctp_ops = {
int dlm_lowcomms_start(void)
{
- int error = -EINVAL;
- int i;
-
- for (i = 0; i < CONN_HASH_SIZE; i++)
- INIT_HLIST_HEAD(&connection_hash[i]);
+ int error;
init_local();
if (!dlm_local_count) {
@@ -1963,13 +1918,9 @@ int dlm_lowcomms_start(void)
goto fail;
}
- INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
-
error = work_start();
if (error)
- goto fail_local;
-
- dlm_allow_conn = 1;
+ goto fail;
/* Start listening */
switch (dlm_config.ci_protocol) {
@@ -1995,25 +1946,38 @@ int dlm_lowcomms_start(void)
fail_listen:
dlm_proto_ops = NULL;
fail_proto_ops:
- dlm_allow_conn = 0;
- dlm_close_sock(&listen_con.sock);
work_stop();
-fail_local:
- deinit_local();
fail:
return error;
}
+void dlm_lowcomms_init(void)
+{
+ int i;
+
+ for (i = 0; i < CONN_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&connection_hash[i]);
+
+ INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
+}
+
void dlm_lowcomms_exit(void)
{
- struct dlm_node_addr *na, *safe;
+ struct connection *con;
+ int i, idx;
- spin_lock(&dlm_node_addrs_spin);
- list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
- list_del(&na->list);
- while (na->addr_count--)
- kfree(na->addr[na->addr_count]);
- kfree(na);
+ idx = srcu_read_lock(&connections_srcu);
+ for (i = 0; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
+ spin_lock(&connections_lock);
+ hlist_del_rcu(&con->list);
+ spin_unlock(&connections_lock);
+
+ if (con->othercon)
+ call_srcu(&connections_srcu, &con->othercon->rcu,
+ connection_release);
+ call_srcu(&connections_srcu, &con->rcu, connection_release);
+ }
}
- spin_unlock(&dlm_node_addrs_spin);
+ srcu_read_unlock(&connections_srcu, idx);
}
diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
index 4ccae07cf005..fd0df604eb93 100644
--- a/fs/dlm/lowcomms.h
+++ b/fs/dlm/lowcomms.h
@@ -29,24 +29,27 @@ static inline int nodeid_hash(int nodeid)
return nodeid & (CONN_HASH_SIZE-1);
}
-/* switch to check if dlm is running */
-extern int dlm_allow_conn;
+/* check if dlm is running */
+bool dlm_lowcomms_is_running(void);
int dlm_lowcomms_start(void);
void dlm_lowcomms_shutdown(void);
+void dlm_lowcomms_shutdown_node(int nodeid, bool force);
void dlm_lowcomms_stop(void);
+void dlm_lowcomms_init(void);
void dlm_lowcomms_exit(void);
int dlm_lowcomms_close(int nodeid);
-struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
- char **ppc, void (*cb)(struct dlm_mhandle *mh),
- struct dlm_mhandle *mh);
+struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, char **ppc,
+ void (*cb)(void *data), void *data);
void dlm_lowcomms_commit_msg(struct dlm_msg *msg);
void dlm_lowcomms_put_msg(struct dlm_msg *msg);
int dlm_lowcomms_resend_msg(struct dlm_msg *msg);
int dlm_lowcomms_connect_node(int nodeid);
int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark);
-int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
+int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr);
void dlm_midcomms_receive_done(int nodeid);
+struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void);
+struct kmem_cache *dlm_lowcomms_msg_cache_create(void);
#endif /* __LOWCOMMS_DOT_H__ */
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index afc66a1346d3..a44d16da7187 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -17,7 +17,12 @@
#include "user.h"
#include "memory.h"
#include "config.h"
-#include "lowcomms.h"
+#include "midcomms.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/dlm.h>
+
+struct workqueue_struct *dlm_wq;
static int __init init_dlm(void)
{
@@ -27,6 +32,8 @@ static int __init init_dlm(void)
if (error)
goto out;
+ dlm_midcomms_init();
+
error = dlm_lockspace_init();
if (error)
goto out_mem;
@@ -41,20 +48,22 @@ static int __init init_dlm(void)
if (error)
goto out_debug;
- error = dlm_netlink_init();
+ error = dlm_plock_init();
if (error)
goto out_user;
- error = dlm_plock_init();
- if (error)
- goto out_netlink;
+ dlm_wq = alloc_workqueue("dlm_wq", WQ_PERCPU, 0);
+ if (!dlm_wq) {
+ error = -ENOMEM;
+ goto out_plock;
+ }
printk("DLM installed\n");
return 0;
- out_netlink:
- dlm_netlink_exit();
+ out_plock:
+ dlm_plock_exit();
out_user:
dlm_user_exit();
out_debug:
@@ -63,6 +72,7 @@ static int __init init_dlm(void)
out_lockspace:
dlm_lockspace_exit();
out_mem:
+ dlm_midcomms_exit();
dlm_memory_exit();
out:
return error;
@@ -70,14 +80,15 @@ static int __init init_dlm(void)
static void __exit exit_dlm(void)
{
+ /* be sure every pending work e.g. freeing is done */
+ destroy_workqueue(dlm_wq);
dlm_plock_exit();
- dlm_netlink_exit();
dlm_user_exit();
dlm_config_exit();
- dlm_memory_exit();
dlm_lockspace_exit();
- dlm_lowcomms_exit();
+ dlm_midcomms_exit();
dlm_unregister_debugfs();
+ dlm_memory_exit();
}
module_init(init_dlm);
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index 731d489aa323..c0f557a80a75 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -18,9 +18,9 @@
#include "midcomms.h"
#include "lowcomms.h"
-int dlm_slots_version(struct dlm_header *h)
+int dlm_slots_version(const struct dlm_header *h)
{
- if ((h->h_version & 0x0000FFFF) < DLM_HEADER_SLOTS)
+ if ((le32_to_cpu(h->h_version) & 0x0000FFFF) < DLM_HEADER_SLOTS)
return 0;
return 1;
}
@@ -120,18 +120,13 @@ int dlm_slots_copy_in(struct dlm_ls *ls)
ro0 = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config));
- for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
- ro->ro_nodeid = le32_to_cpu(ro->ro_nodeid);
- ro->ro_slot = le16_to_cpu(ro->ro_slot);
- }
-
log_slots(ls, gen, num_slots, ro0, NULL, 0);
list_for_each_entry(memb, &ls->ls_nodes, list) {
for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
- if (ro->ro_nodeid != memb->nodeid)
+ if (le32_to_cpu(ro->ro_nodeid) != memb->nodeid)
continue;
- memb->slot = ro->ro_slot;
+ memb->slot = le16_to_cpu(ro->ro_slot);
memb->slot_prev = memb->slot;
break;
}
@@ -312,6 +307,21 @@ static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
}
}
+static int add_remote_member(int nodeid)
+{
+ int error;
+
+ if (nodeid == dlm_our_nodeid())
+ return 0;
+
+ error = dlm_lowcomms_connect_node(nodeid);
+ if (error < 0)
+ return error;
+
+ dlm_midcomms_add_member(nodeid);
+ return 0;
+}
+
static int dlm_add_member(struct dlm_ls *ls, struct dlm_config_node *node)
{
struct dlm_member *memb;
@@ -321,16 +331,16 @@ static int dlm_add_member(struct dlm_ls *ls, struct dlm_config_node *node)
if (!memb)
return -ENOMEM;
- error = dlm_lowcomms_connect_node(node->nodeid);
+ memb->nodeid = node->nodeid;
+ memb->weight = node->weight;
+ memb->comm_seq = node->comm_seq;
+
+ error = add_remote_member(node->nodeid);
if (error < 0) {
kfree(memb);
return error;
}
- memb->nodeid = node->nodeid;
- memb->weight = node->weight;
- memb->comm_seq = node->comm_seq;
- dlm_midcomms_add_member(node->nodeid);
add_ordered_member(ls, memb);
ls->ls_num_nodes++;
return 0;
@@ -356,6 +366,8 @@ int dlm_is_member(struct dlm_ls *ls, int nodeid)
int dlm_is_removed(struct dlm_ls *ls, int nodeid)
{
+ WARN_ON_ONCE(!nodeid || nodeid == -1);
+
if (find_memb(&ls->ls_nodes_gone, nodeid))
return 1;
return 0;
@@ -375,14 +387,17 @@ static void clear_memb_list(struct list_head *head,
}
}
-static void clear_members_cb(int nodeid)
+static void remove_remote_member(int nodeid)
{
+ if (nodeid == dlm_our_nodeid())
+ return;
+
dlm_midcomms_remove_member(nodeid);
}
void dlm_clear_members(struct dlm_ls *ls)
{
- clear_memb_list(&ls->ls_nodes, clear_members_cb);
+ clear_memb_list(&ls->ls_nodes, remove_remote_member);
ls->ls_num_nodes = 0;
}
@@ -436,18 +451,17 @@ static void make_member_array(struct dlm_ls *ls)
/* send a status request to all members just to establish comms connections */
-static int ping_members(struct dlm_ls *ls)
+static int ping_members(struct dlm_ls *ls, uint64_t seq)
{
struct dlm_member *memb;
int error = 0;
list_for_each_entry(memb, &ls->ls_nodes, list) {
- error = dlm_recovery_stopped(ls);
- if (error) {
+ if (dlm_recovery_stopped(ls)) {
error = -EINTR;
break;
}
- error = dlm_rcom_status(ls, memb->nodeid, 0);
+ error = dlm_rcom_status(ls, memb->nodeid, 0, seq);
if (error)
break;
}
@@ -464,7 +478,8 @@ static void dlm_lsop_recover_prep(struct dlm_ls *ls)
ls->ls_ops->recover_prep(ls->ls_ops_arg);
}
-static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb)
+static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb,
+ unsigned int release_recover)
{
struct dlm_slot slot;
uint32_t seq;
@@ -479,9 +494,9 @@ static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb)
we consider the node to have failed (versus
being removed due to dlm_release_lockspace) */
- error = dlm_comm_seq(memb->nodeid, &seq);
+ error = dlm_comm_seq(memb->nodeid, &seq, false);
- if (!error && seq == memb->comm_seq)
+ if (!release_recover && !error && seq == memb->comm_seq)
return;
slot.nodeid = memb->nodeid;
@@ -538,9 +553,14 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
struct dlm_member *memb, *safe;
struct dlm_config_node *node;
int i, error, neg = 0, low = -1;
+ unsigned int release_recover;
/* previously removed members that we've not finished removing need to
- count as a negative change so the "neg" recovery steps will happen */
+ * count as a negative change so the "neg" recovery steps will happen
+ *
+ * This functionality must report all member changes to lsops or
+ * midcomms layer and must never return before.
+ */
list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
log_rinfo(ls, "prev removed member %d", memb->nodeid);
@@ -551,11 +571,21 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
node = find_config_node(rv, memb->nodeid);
- if (node && !node->new)
+ if (!node) {
+ log_error(ls, "remove member %d invalid",
+ memb->nodeid);
+ return -EFAULT;
+ }
+
+ if (!node->new && !node->gone)
continue;
- if (!node) {
- log_rinfo(ls, "remove member %d", memb->nodeid);
+ release_recover = 0;
+
+ if (node->gone) {
+ release_recover = node->release_recover;
+ log_rinfo(ls, "remove member %d%s", memb->nodeid,
+ release_recover ? " (release_recover)" : "");
} else {
/* removed and re-added */
log_rinfo(ls, "remove member %d comm_seq %u %u",
@@ -564,18 +594,24 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
neg++;
list_move(&memb->list, &ls->ls_nodes_gone);
- dlm_midcomms_remove_member(memb->nodeid);
+ remove_remote_member(memb->nodeid);
ls->ls_num_nodes--;
- dlm_lsop_recover_slot(ls, memb);
+ dlm_lsop_recover_slot(ls, memb, release_recover);
}
/* add new members to ls_nodes */
for (i = 0; i < rv->nodes_count; i++) {
node = &rv->nodes[i];
+ if (node->gone)
+ continue;
+
if (dlm_is_member(ls, node->nodeid))
continue;
- dlm_add_member(ls, node);
+ error = dlm_add_member(ls, node);
+ if (error)
+ return error;
+
log_rinfo(ls, "add member %d", node->nodeid);
}
@@ -588,20 +624,7 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
make_member_array(ls);
*neg_out = neg;
- error = ping_members(ls);
- /* error -EINTR means that a new recovery action is triggered.
- * We ignore this recovery action and let run the new one which might
- * have new member configuration.
- */
- if (error == -EINTR)
- error = 0;
-
- /* new_lockspace() may be waiting to know if the config
- * is good or bad
- */
- ls->ls_members_result = error;
- complete(&ls->ls_members_done);
-
+ error = ping_members(ls, rv->seq);
log_rinfo(ls, "dlm_recover_members %d nodes", ls->ls_num_nodes);
return error;
}
@@ -624,7 +647,7 @@ int dlm_ls_stop(struct dlm_ls *ls)
* message to the requestqueue without races.
*/
- down_write(&ls->ls_recv_active);
+ write_lock_bh(&ls->ls_recv_active);
/*
* Abort any recovery that's in progress (see RECOVER_STOP,
@@ -632,18 +655,25 @@ int dlm_ls_stop(struct dlm_ls *ls)
* dlm to quit any processing (see RUNNING, dlm_locking_stopped()).
*/
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
set_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags);
+ if (new)
+ timer_delete_sync(&ls->ls_scan_timer);
ls->ls_recover_seq++;
- spin_unlock(&ls->ls_recover_lock);
+
+ /* activate requestqueue and stop processing */
+ write_lock_bh(&ls->ls_requestqueue_lock);
+ set_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags);
+ write_unlock_bh(&ls->ls_requestqueue_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
/*
* Let dlm_recv run again, now any normal messages will be saved on the
* requestqueue for later.
*/
- up_write(&ls->ls_recv_active);
+ write_unlock_bh(&ls->ls_recv_active);
/*
* This in_recovery lock does two things:
@@ -668,20 +698,29 @@ int dlm_ls_stop(struct dlm_ls *ls)
dlm_recoverd_suspend(ls);
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
kfree(ls->ls_slots);
ls->ls_slots = NULL;
ls->ls_num_slots = 0;
ls->ls_slots_size = 0;
ls->ls_recover_status = 0;
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
dlm_recoverd_resume(ls);
if (!ls->ls_recover_begin)
ls->ls_recover_begin = jiffies;
- dlm_lsop_recover_prep(ls);
+ /* call recover_prep ops only once and not multiple times
+ * for each possible dlm_ls_stop() when recovery is already
+ * stopped.
+ *
+ * If we successful was able to clear LSFL_RUNNING bit and
+ * it was set we know it is the first dlm_ls_stop() call.
+ */
+ if (new)
+ dlm_lsop_recover_prep(ls);
+
return 0;
}
@@ -699,12 +738,12 @@ int dlm_ls_start(struct dlm_ls *ls)
if (error < 0)
goto fail_rv;
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
/* the lockspace needs to be stopped before it can be started */
if (!dlm_locking_stopped(ls)) {
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
log_error(ls, "start ignored: lockspace running");
error = -EINVAL;
goto fail;
@@ -715,7 +754,7 @@ int dlm_ls_start(struct dlm_ls *ls)
rv->seq = ++ls->ls_recover_seq;
rv_old = ls->ls_recover_args;
ls->ls_recover_args = rv;
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
if (rv_old) {
log_error(ls, "unused recovery %llx %d",
diff --git a/fs/dlm/member.h b/fs/dlm/member.h
index 433b2fac9f4a..f61cfde46314 100644
--- a/fs/dlm/member.h
+++ b/fs/dlm/member.h
@@ -18,7 +18,7 @@ void dlm_clear_members_gone(struct dlm_ls *ls);
int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv,int *neg_out);
int dlm_is_removed(struct dlm_ls *ls, int nodeid);
int dlm_is_member(struct dlm_ls *ls, int nodeid);
-int dlm_slots_version(struct dlm_header *h);
+int dlm_slots_version(const struct dlm_header *h);
void dlm_slot_save(struct dlm_ls *ls, struct dlm_rcom *rc,
struct dlm_member *memb);
void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc);
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index 5918f4d39586..5c35cc67aca4 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -10,42 +10,81 @@
******************************************************************************/
#include "dlm_internal.h"
+#include "midcomms.h"
+#include "lowcomms.h"
#include "config.h"
#include "memory.h"
+#include "ast.h"
+static struct kmem_cache *writequeue_cache;
+static struct kmem_cache *mhandle_cache;
+static struct kmem_cache *msg_cache;
static struct kmem_cache *lkb_cache;
static struct kmem_cache *rsb_cache;
+static struct kmem_cache *cb_cache;
int __init dlm_memory_init(void)
{
+ writequeue_cache = dlm_lowcomms_writequeue_cache_create();
+ if (!writequeue_cache)
+ goto out;
+
+ mhandle_cache = dlm_midcomms_cache_create();
+ if (!mhandle_cache)
+ goto mhandle;
+
lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb),
__alignof__(struct dlm_lkb), 0, NULL);
if (!lkb_cache)
- return -ENOMEM;
+ goto lkb;
+
+ msg_cache = dlm_lowcomms_msg_cache_create();
+ if (!msg_cache)
+ goto msg;
rsb_cache = kmem_cache_create("dlm_rsb", sizeof(struct dlm_rsb),
__alignof__(struct dlm_rsb), 0, NULL);
- if (!rsb_cache) {
- kmem_cache_destroy(lkb_cache);
- return -ENOMEM;
- }
+ if (!rsb_cache)
+ goto rsb;
+
+ cb_cache = kmem_cache_create("dlm_cb", sizeof(struct dlm_callback),
+ __alignof__(struct dlm_callback), 0,
+ NULL);
+ if (!cb_cache)
+ goto cb;
return 0;
+
+cb:
+ kmem_cache_destroy(rsb_cache);
+rsb:
+ kmem_cache_destroy(msg_cache);
+msg:
+ kmem_cache_destroy(lkb_cache);
+lkb:
+ kmem_cache_destroy(mhandle_cache);
+mhandle:
+ kmem_cache_destroy(writequeue_cache);
+out:
+ return -ENOMEM;
}
void dlm_memory_exit(void)
{
+ rcu_barrier();
+
+ kmem_cache_destroy(writequeue_cache);
+ kmem_cache_destroy(mhandle_cache);
+ kmem_cache_destroy(msg_cache);
kmem_cache_destroy(lkb_cache);
kmem_cache_destroy(rsb_cache);
+ kmem_cache_destroy(cb_cache);
}
char *dlm_allocate_lvb(struct dlm_ls *ls)
{
- char *p;
-
- p = kzalloc(ls->ls_lvblen, GFP_NOFS);
- return p;
+ return kzalloc(ls->ls_lvblen, GFP_ATOMIC);
}
void dlm_free_lvb(char *p)
@@ -53,32 +92,34 @@ void dlm_free_lvb(char *p)
kfree(p);
}
-struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls)
+struct dlm_rsb *dlm_allocate_rsb(void)
{
- struct dlm_rsb *r;
-
- r = kmem_cache_zalloc(rsb_cache, GFP_NOFS);
- return r;
+ return kmem_cache_zalloc(rsb_cache, GFP_ATOMIC);
}
-void dlm_free_rsb(struct dlm_rsb *r)
+static void __free_rsb_rcu(struct rcu_head *rcu)
{
+ struct dlm_rsb *r = container_of(rcu, struct dlm_rsb, rcu);
if (r->res_lvbptr)
dlm_free_lvb(r->res_lvbptr);
kmem_cache_free(rsb_cache, r);
}
-struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls)
+void dlm_free_rsb(struct dlm_rsb *r)
{
- struct dlm_lkb *lkb;
+ call_rcu(&r->rcu, __free_rsb_rcu);
+}
- lkb = kmem_cache_zalloc(lkb_cache, GFP_NOFS);
- return lkb;
+struct dlm_lkb *dlm_allocate_lkb(void)
+{
+ return kmem_cache_zalloc(lkb_cache, GFP_ATOMIC);
}
-void dlm_free_lkb(struct dlm_lkb *lkb)
+static void __free_lkb_rcu(struct rcu_head *rcu)
{
- if (lkb->lkb_flags & DLM_IFL_USER) {
+ struct dlm_lkb *lkb = container_of(rcu, struct dlm_lkb, rcu);
+
+ if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
struct dlm_user_args *ua;
ua = lkb->lkb_ua;
if (ua) {
@@ -86,6 +127,51 @@ void dlm_free_lkb(struct dlm_lkb *lkb)
kfree(ua);
}
}
+
kmem_cache_free(lkb_cache, lkb);
}
+void dlm_free_lkb(struct dlm_lkb *lkb)
+{
+ call_rcu(&lkb->rcu, __free_lkb_rcu);
+}
+
+struct dlm_mhandle *dlm_allocate_mhandle(void)
+{
+ return kmem_cache_alloc(mhandle_cache, GFP_ATOMIC);
+}
+
+void dlm_free_mhandle(struct dlm_mhandle *mhandle)
+{
+ kmem_cache_free(mhandle_cache, mhandle);
+}
+
+struct writequeue_entry *dlm_allocate_writequeue(void)
+{
+ return kmem_cache_alloc(writequeue_cache, GFP_ATOMIC);
+}
+
+void dlm_free_writequeue(struct writequeue_entry *writequeue)
+{
+ kmem_cache_free(writequeue_cache, writequeue);
+}
+
+struct dlm_msg *dlm_allocate_msg(void)
+{
+ return kmem_cache_alloc(msg_cache, GFP_ATOMIC);
+}
+
+void dlm_free_msg(struct dlm_msg *msg)
+{
+ kmem_cache_free(msg_cache, msg);
+}
+
+struct dlm_callback *dlm_allocate_cb(void)
+{
+ return kmem_cache_alloc(cb_cache, GFP_ATOMIC);
+}
+
+void dlm_free_cb(struct dlm_callback *cb)
+{
+ kmem_cache_free(cb_cache, cb);
+}
diff --git a/fs/dlm/memory.h b/fs/dlm/memory.h
index 4f218ea4b187..551b6b788489 100644
--- a/fs/dlm/memory.h
+++ b/fs/dlm/memory.h
@@ -14,12 +14,20 @@
int dlm_memory_init(void);
void dlm_memory_exit(void);
-struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls);
+struct dlm_rsb *dlm_allocate_rsb(void);
void dlm_free_rsb(struct dlm_rsb *r);
-struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls);
+struct dlm_lkb *dlm_allocate_lkb(void);
void dlm_free_lkb(struct dlm_lkb *l);
char *dlm_allocate_lvb(struct dlm_ls *ls);
void dlm_free_lvb(char *l);
+struct dlm_mhandle *dlm_allocate_mhandle(void);
+void dlm_free_mhandle(struct dlm_mhandle *mhandle);
+struct writequeue_entry *dlm_allocate_writequeue(void);
+void dlm_free_writequeue(struct writequeue_entry *writequeue);
+struct dlm_msg *dlm_allocate_msg(void);
+void dlm_free_msg(struct dlm_msg *msg);
+struct dlm_callback *dlm_allocate_cb(void);
+void dlm_free_cb(struct dlm_callback *cb);
#endif /* __MEMORY_DOT_H__ */
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
index 7ae39ec8d9b0..2c101bbe261a 100644
--- a/fs/dlm/midcomms.c
+++ b/fs/dlm/midcomms.c
@@ -132,26 +132,30 @@
*/
#define DLM_DEBUG_FENCE_TERMINATION 0
+#include <trace/events/dlm.h>
#include <net/tcp.h>
#include "dlm_internal.h"
#include "lowcomms.h"
#include "config.h"
+#include "memory.h"
#include "lock.h"
#include "util.h"
#include "midcomms.h"
/* init value for sequence numbers for testing purpose only e.g. overflows */
#define DLM_SEQ_INIT 0
-/* 3 minutes wait to sync ending of dlm */
-#define DLM_SHUTDOWN_TIMEOUT msecs_to_jiffies(3 * 60 * 1000)
+/* 5 seconds wait to sync ending of dlm */
+#define DLM_SHUTDOWN_TIMEOUT msecs_to_jiffies(5000)
#define DLM_VERSION_NOT_SET 0
+#define DLM_SEND_ACK_BACK_MSG_THRESHOLD 32
+#define DLM_RECV_ACK_BACK_MSG_THRESHOLD (DLM_SEND_ACK_BACK_MSG_THRESHOLD * 8)
struct midcomms_node {
int nodeid;
uint32_t version;
- uint32_t seq_send;
- uint32_t seq_next;
+ atomic_t seq_send;
+ atomic_t seq_next;
/* These queues are unbound because we cannot drop any message in dlm.
* We could send a fence signal for a specific node to the cluster
* manager if queues hits some maximum value, however this handling
@@ -163,7 +167,7 @@ struct midcomms_node {
#define DLM_NODE_FLAG_CLOSE 1
#define DLM_NODE_FLAG_STOP_TX 2
#define DLM_NODE_FLAG_STOP_RX 3
-#define DLM_NODE_ULP_DELIVERED 4
+ atomic_t ulp_delivered;
unsigned long flags;
wait_queue_head_t shutdown_wait;
@@ -192,7 +196,7 @@ struct midcomms_node {
};
struct dlm_mhandle {
- const struct dlm_header *inner_hd;
+ const union dlm_packet *inner_p;
struct midcomms_node *node;
struct dlm_opts *opts;
struct dlm_msg *msg;
@@ -220,6 +224,11 @@ DEFINE_STATIC_SRCU(nodes_srcu);
*/
static DEFINE_MUTEX(close_lock);
+struct kmem_cache *dlm_midcomms_cache_create(void)
+{
+ return KMEM_CACHE(dlm_mhandle, 0);
+}
+
static inline const char *dlm_state_str(int state)
{
switch (state) {
@@ -279,7 +288,7 @@ static void dlm_mhandle_release(struct rcu_head *rcu)
struct dlm_mhandle *mh = container_of(rcu, struct dlm_mhandle, rcu);
dlm_lowcomms_put_msg(mh->msg);
- kfree(mh);
+ dlm_free_mhandle(mh);
}
static void dlm_mhandle_delete(struct midcomms_node *node,
@@ -297,11 +306,11 @@ static void dlm_send_queue_flush(struct midcomms_node *node)
pr_debug("flush midcomms send queue of node %d\n", node->nodeid);
rcu_read_lock();
- spin_lock(&node->send_queue_lock);
+ spin_lock_bh(&node->send_queue_lock);
list_for_each_entry_rcu(mh, &node->send_queue, list) {
dlm_mhandle_delete(node, mh);
}
- spin_unlock(&node->send_queue_lock);
+ spin_unlock_bh(&node->send_queue_lock);
rcu_read_unlock();
}
@@ -309,8 +318,9 @@ static void midcomms_node_reset(struct midcomms_node *node)
{
pr_debug("reset node %d\n", node->nodeid);
- node->seq_next = DLM_SEQ_INIT;
- node->seq_send = DLM_SEQ_INIT;
+ atomic_set(&node->seq_next, DLM_SEQ_INIT);
+ atomic_set(&node->seq_send, DLM_SEQ_INIT);
+ atomic_set(&node->ulp_delivered, 0);
node->version = DLM_VERSION_NOT_SET;
node->flags = 0;
@@ -319,18 +329,31 @@ static void midcomms_node_reset(struct midcomms_node *node)
wake_up(&node->shutdown_wait);
}
-static struct midcomms_node *nodeid2node(int nodeid, gfp_t alloc)
+static struct midcomms_node *nodeid2node(int nodeid)
{
- struct midcomms_node *node, *tmp;
- int r = nodeid_hash(nodeid);
+ return __find_node(nodeid, nodeid_hash(nodeid));
+}
+
+int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr)
+{
+ int ret, idx, r = nodeid_hash(nodeid);
+ struct midcomms_node *node;
+ ret = dlm_lowcomms_addr(nodeid, addr);
+ if (ret)
+ return ret;
+
+ idx = srcu_read_lock(&nodes_srcu);
node = __find_node(nodeid, r);
- if (node || !alloc)
- return node;
+ if (node) {
+ srcu_read_unlock(&nodes_srcu, idx);
+ return 0;
+ }
+ srcu_read_unlock(&nodes_srcu, idx);
- node = kmalloc(sizeof(*node), alloc);
+ node = kmalloc(sizeof(*node), GFP_NOFS);
if (!node)
- return NULL;
+ return -ENOMEM;
node->nodeid = nodeid;
spin_lock_init(&node->state_lock);
@@ -341,22 +364,12 @@ static struct midcomms_node *nodeid2node(int nodeid, gfp_t alloc)
node->users = 0;
midcomms_node_reset(node);
- spin_lock(&nodes_lock);
- /* check again if there was somebody else
- * earlier here to add the node
- */
- tmp = __find_node(nodeid, r);
- if (tmp) {
- spin_unlock(&nodes_lock);
- kfree(node);
- return tmp;
- }
-
+ spin_lock_bh(&nodes_lock);
hlist_add_head_rcu(&node->hlist, &node_hash[r]);
- spin_unlock(&nodes_lock);
+ spin_unlock_bh(&nodes_lock);
node->debugfs = dlm_create_debug_comms_file(nodeid, node);
- return node;
+ return 0;
}
static int dlm_send_ack(int nodeid, uint32_t seq)
@@ -366,26 +379,46 @@ static int dlm_send_ack(int nodeid, uint32_t seq)
struct dlm_msg *msg;
char *ppc;
- msg = dlm_lowcomms_new_msg(nodeid, mb_len, GFP_NOFS, &ppc,
- NULL, NULL);
+ msg = dlm_lowcomms_new_msg(nodeid, mb_len, &ppc, NULL, NULL);
if (!msg)
return -ENOMEM;
m_header = (struct dlm_header *)ppc;
- m_header->h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
- m_header->h_nodeid = dlm_our_nodeid();
- m_header->h_length = mb_len;
+ m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+ m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
+ m_header->h_length = cpu_to_le16(mb_len);
m_header->h_cmd = DLM_ACK;
- m_header->u.h_seq = seq;
+ m_header->u.h_seq = cpu_to_le32(seq);
- header_out(m_header);
dlm_lowcomms_commit_msg(msg);
dlm_lowcomms_put_msg(msg);
return 0;
}
+static void dlm_send_ack_threshold(struct midcomms_node *node,
+ uint32_t threshold)
+{
+ uint32_t oval, nval;
+ bool send_ack;
+
+ /* let only send one user trigger threshold to send ack back */
+ do {
+ oval = atomic_read(&node->ulp_delivered);
+ send_ack = (oval > threshold);
+ /* abort if threshold is not reached */
+ if (!send_ack)
+ break;
+
+ nval = 0;
+ /* try to reset ulp_delivered counter */
+ } while (atomic_cmpxchg(&node->ulp_delivered, oval, nval) != oval);
+
+ if (send_ack)
+ dlm_send_ack(node->nodeid, atomic_read(&node->seq_next));
+}
+
static int dlm_send_fin(struct midcomms_node *node,
void (*ack_rcv)(struct midcomms_node *node))
{
@@ -394,24 +427,22 @@ static int dlm_send_fin(struct midcomms_node *node,
struct dlm_mhandle *mh;
char *ppc;
- mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, GFP_NOFS, &ppc);
+ mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, &ppc);
if (!mh)
return -ENOMEM;
+ set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
mh->ack_rcv = ack_rcv;
m_header = (struct dlm_header *)ppc;
- m_header->h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
- m_header->h_nodeid = dlm_our_nodeid();
- m_header->h_length = mb_len;
+ m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+ m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
+ m_header->h_length = cpu_to_le16(mb_len);
m_header->h_cmd = DLM_FIN;
- header_out(m_header);
-
pr_debug("sending fin msg to node %d\n", node->nodeid);
- dlm_midcomms_commit_mhandle(mh);
- set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
+ dlm_midcomms_commit_mhandle(mh, NULL, 0);
return 0;
}
@@ -431,7 +462,7 @@ static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq)
}
}
- spin_lock(&node->send_queue_lock);
+ spin_lock_bh(&node->send_queue_lock);
list_for_each_entry_rcu(mh, &node->send_queue, list) {
if (before(mh->seq, seq)) {
dlm_mhandle_delete(node, mh);
@@ -440,13 +471,13 @@ static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq)
break;
}
}
- spin_unlock(&node->send_queue_lock);
+ spin_unlock_bh(&node->send_queue_lock);
rcu_read_unlock();
}
static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
{
- spin_lock(&node->state_lock);
+ spin_lock_bh(&node->state_lock);
pr_debug("receive passive fin ack from node %d with state %s\n",
node->nodeid, dlm_state_str(node->state));
@@ -460,36 +491,57 @@ static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
wake_up(&node->shutdown_wait);
break;
default:
- spin_unlock(&node->state_lock);
- log_print("%s: unexpected state: %d\n",
+ spin_unlock_bh(&node->state_lock);
+ log_print("%s: unexpected state: %d",
__func__, node->state);
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return;
}
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
+}
+
+static void dlm_receive_buffer_3_2_trace(uint32_t seq,
+ const union dlm_packet *p)
+{
+ switch (p->header.h_cmd) {
+ case DLM_MSG:
+ trace_dlm_recv_message(dlm_our_nodeid(), seq, &p->message);
+ break;
+ case DLM_RCOM:
+ trace_dlm_recv_rcom(dlm_our_nodeid(), seq, &p->rcom);
+ break;
+ default:
+ break;
+ }
}
-static void dlm_midcomms_receive_buffer(union dlm_packet *p,
+static void dlm_midcomms_receive_buffer(const union dlm_packet *p,
struct midcomms_node *node,
uint32_t seq)
{
- if (seq == node->seq_next) {
- node->seq_next++;
+ bool is_expected_seq;
+ uint32_t oval, nval;
+
+ do {
+ oval = atomic_read(&node->seq_next);
+ is_expected_seq = (oval == seq);
+ if (!is_expected_seq)
+ break;
+
+ nval = oval + 1;
+ } while (atomic_cmpxchg(&node->seq_next, oval, nval) != oval);
+ if (is_expected_seq) {
switch (p->header.h_cmd) {
case DLM_FIN:
- /* send ack before fin */
- dlm_send_ack(node->nodeid, node->seq_next);
-
- spin_lock(&node->state_lock);
+ spin_lock_bh(&node->state_lock);
pr_debug("receive fin msg from node %d with state %s\n",
node->nodeid, dlm_state_str(node->state));
switch (node->state) {
case DLM_ESTABLISHED:
- node->state = DLM_CLOSE_WAIT;
- pr_debug("switch node %d to state %s\n",
- node->nodeid, dlm_state_str(node->state));
+ dlm_send_ack(node->nodeid, nval);
+
/* passive shutdown DLM_LAST_ACK case 1
* additional we check if the node is used by
* cluster manager events at all.
@@ -498,173 +550,62 @@ static void dlm_midcomms_receive_buffer(union dlm_packet *p,
node->state = DLM_LAST_ACK;
pr_debug("switch node %d to state %s case 1\n",
node->nodeid, dlm_state_str(node->state));
- spin_unlock(&node->state_lock);
- goto send_fin;
+ set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+ dlm_send_fin(node, dlm_pas_fin_ack_rcv);
+ } else {
+ node->state = DLM_CLOSE_WAIT;
+ pr_debug("switch node %d to state %s\n",
+ node->nodeid, dlm_state_str(node->state));
}
break;
case DLM_FIN_WAIT1:
+ dlm_send_ack(node->nodeid, nval);
node->state = DLM_CLOSING;
+ set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
pr_debug("switch node %d to state %s\n",
node->nodeid, dlm_state_str(node->state));
break;
case DLM_FIN_WAIT2:
+ dlm_send_ack(node->nodeid, nval);
midcomms_node_reset(node);
pr_debug("switch node %d to state %s\n",
node->nodeid, dlm_state_str(node->state));
- wake_up(&node->shutdown_wait);
break;
case DLM_LAST_ACK:
/* probably remove_member caught it, do nothing */
break;
default:
- spin_unlock(&node->state_lock);
- log_print("%s: unexpected state: %d\n",
+ spin_unlock_bh(&node->state_lock);
+ log_print("%s: unexpected state: %d",
__func__, node->state);
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return;
}
- spin_unlock(&node->state_lock);
-
- set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+ spin_unlock_bh(&node->state_lock);
break;
default:
- WARN_ON(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
+ WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
+ dlm_receive_buffer_3_2_trace(seq, p);
dlm_receive_buffer(p, node->nodeid);
- set_bit(DLM_NODE_ULP_DELIVERED, &node->flags);
+ atomic_inc(&node->ulp_delivered);
+ /* unlikely case to send ack back when we don't transmit */
+ dlm_send_ack_threshold(node, DLM_RECV_ACK_BACK_MSG_THRESHOLD);
break;
}
} else {
/* retry to ack message which we already have by sending back
* current node->seq_next number as ack.
*/
- if (seq < node->seq_next)
- dlm_send_ack(node->nodeid, node->seq_next);
+ if (seq < oval)
+ dlm_send_ack(node->nodeid, oval);
log_print_ratelimited("ignore dlm msg because seq mismatch, seq: %u, expected: %u, nodeid: %d",
- seq, node->seq_next, node->nodeid);
- }
-
- return;
-
-send_fin:
- set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
- dlm_send_fin(node, dlm_pas_fin_ack_rcv);
-}
-
-static struct midcomms_node *
-dlm_midcomms_recv_node_lookup(int nodeid, const union dlm_packet *p,
- uint16_t msglen, int (*cb)(struct midcomms_node *node))
-{
- struct midcomms_node *node = NULL;
- gfp_t allocation = 0;
- int ret;
-
- switch (p->header.h_cmd) {
- case DLM_RCOM:
- if (msglen < sizeof(struct dlm_rcom)) {
- log_print("rcom msg too small: %u, will skip this message from node %d",
- msglen, nodeid);
- return NULL;
- }
-
- switch (le32_to_cpu(p->rcom.rc_type)) {
- case DLM_RCOM_NAMES:
- fallthrough;
- case DLM_RCOM_NAMES_REPLY:
- fallthrough;
- case DLM_RCOM_STATUS:
- fallthrough;
- case DLM_RCOM_STATUS_REPLY:
- node = nodeid2node(nodeid, 0);
- if (node) {
- spin_lock(&node->state_lock);
- if (node->state != DLM_ESTABLISHED)
- pr_debug("receive begin RCOM msg from node %d with state %s\n",
- node->nodeid, dlm_state_str(node->state));
-
- switch (node->state) {
- case DLM_CLOSED:
- node->state = DLM_ESTABLISHED;
- pr_debug("switch node %d to state %s\n",
- node->nodeid, dlm_state_str(node->state));
- break;
- case DLM_ESTABLISHED:
- break;
- default:
- /* some invalid state passive shutdown
- * was failed, we try to reset and
- * hope it will go on.
- */
- log_print("reset node %d because shutdown stuck",
- node->nodeid);
-
- midcomms_node_reset(node);
- node->state = DLM_ESTABLISHED;
- break;
- }
- spin_unlock(&node->state_lock);
- }
-
- allocation = GFP_NOFS;
- break;
- default:
- break;
- }
-
- break;
- default:
- break;
- }
-
- node = nodeid2node(nodeid, allocation);
- if (!node) {
- switch (p->header.h_cmd) {
- case DLM_OPTS:
- if (msglen < sizeof(struct dlm_opts)) {
- log_print("opts msg too small: %u, will skip this message from node %d",
- msglen, nodeid);
- return NULL;
- }
-
- log_print_ratelimited("received dlm opts message nextcmd %d from node %d in an invalid sequence",
- p->opts.o_nextcmd, nodeid);
- break;
- default:
- log_print_ratelimited("received dlm message cmd %d from node %d in an invalid sequence",
- p->header.h_cmd, nodeid);
- break;
- }
-
- return NULL;
+ seq, oval, node->nodeid);
}
-
- ret = cb(node);
- if (ret < 0)
- return NULL;
-
- return node;
}
-static int dlm_midcomms_version_check_3_2(struct midcomms_node *node)
-{
- switch (node->version) {
- case DLM_VERSION_NOT_SET:
- node->version = DLM_VERSION_3_2;
- log_print("version 0x%08x for node %d detected", DLM_VERSION_3_2,
- node->nodeid);
- break;
- case DLM_VERSION_3_2:
- break;
- default:
- log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
- DLM_VERSION_3_2, node->nodeid, node->version);
- return -1;
- }
-
- return 0;
-}
-
-static int dlm_opts_check_msglen(union dlm_packet *p, uint16_t msglen, int nodeid)
+static int dlm_opts_check_msglen(const union dlm_packet *p, uint16_t msglen,
+ int nodeid)
{
int len = msglen;
@@ -713,7 +654,7 @@ static int dlm_opts_check_msglen(union dlm_packet *p, uint16_t msglen, int nodei
return 0;
}
-static void dlm_midcomms_receive_buffer_3_2(union dlm_packet *p, int nodeid)
+static void dlm_midcomms_receive_buffer_3_2(const union dlm_packet *p, int nodeid)
{
uint16_t msglen = le16_to_cpu(p->header.h_length);
struct midcomms_node *node;
@@ -721,11 +662,38 @@ static void dlm_midcomms_receive_buffer_3_2(union dlm_packet *p, int nodeid)
int ret, idx;
idx = srcu_read_lock(&nodes_srcu);
- node = dlm_midcomms_recv_node_lookup(nodeid, p, msglen,
- dlm_midcomms_version_check_3_2);
- if (!node)
+ node = nodeid2node(nodeid);
+ if (WARN_ON_ONCE(!node))
goto out;
+ switch (node->version) {
+ case DLM_VERSION_NOT_SET:
+ node->version = DLM_VERSION_3_2;
+ wake_up(&node->shutdown_wait);
+ log_print("version 0x%08x for node %d detected", DLM_VERSION_3_2,
+ node->nodeid);
+
+ spin_lock(&node->state_lock);
+ switch (node->state) {
+ case DLM_CLOSED:
+ node->state = DLM_ESTABLISHED;
+ pr_debug("switch node %d to state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ break;
+ default:
+ break;
+ }
+ spin_unlock(&node->state_lock);
+
+ break;
+ case DLM_VERSION_3_2:
+ break;
+ default:
+ log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
+ DLM_VERSION_3_2, node->nodeid, node->version);
+ goto out;
+ }
+
switch (p->header.h_cmd) {
case DLM_RCOM:
/* these rcom message we use to determine version.
@@ -734,14 +702,14 @@ static void dlm_midcomms_receive_buffer_3_2(union dlm_packet *p, int nodeid)
*
* length already checked.
*/
- switch (le32_to_cpu(p->rcom.rc_type)) {
- case DLM_RCOM_NAMES:
+ switch (p->rcom.rc_type) {
+ case cpu_to_le32(DLM_RCOM_NAMES):
fallthrough;
- case DLM_RCOM_NAMES_REPLY:
+ case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
fallthrough;
- case DLM_RCOM_STATUS:
+ case cpu_to_le32(DLM_RCOM_STATUS):
fallthrough;
- case DLM_RCOM_STATUS_REPLY:
+ case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
break;
default:
log_print("unsupported rcom type received: %u, will skip this message from node %d",
@@ -749,7 +717,7 @@ static void dlm_midcomms_receive_buffer_3_2(union dlm_packet *p, int nodeid)
goto out;
}
- WARN_ON(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
+ WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
dlm_receive_buffer(p, nodeid);
break;
case DLM_OPTS:
@@ -814,11 +782,23 @@ out:
srcu_read_unlock(&nodes_srcu, idx);
}
-static int dlm_midcomms_version_check_3_1(struct midcomms_node *node)
+static void dlm_midcomms_receive_buffer_3_1(const union dlm_packet *p, int nodeid)
{
+ uint16_t msglen = le16_to_cpu(p->header.h_length);
+ struct midcomms_node *node;
+ int idx;
+
+ idx = srcu_read_lock(&nodes_srcu);
+ node = nodeid2node(nodeid);
+ if (WARN_ON_ONCE(!node)) {
+ srcu_read_unlock(&nodes_srcu, idx);
+ return;
+ }
+
switch (node->version) {
case DLM_VERSION_NOT_SET:
node->version = DLM_VERSION_3_1;
+ wake_up(&node->shutdown_wait);
log_print("version 0x%08x for node %d detected", DLM_VERSION_3_1,
node->nodeid);
break;
@@ -827,22 +807,6 @@ static int dlm_midcomms_version_check_3_1(struct midcomms_node *node)
default:
log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
DLM_VERSION_3_1, node->nodeid, node->version);
- return -1;
- }
-
- return 0;
-}
-
-static void dlm_midcomms_receive_buffer_3_1(union dlm_packet *p, int nodeid)
-{
- uint16_t msglen = le16_to_cpu(p->header.h_length);
- struct midcomms_node *node;
- int idx;
-
- idx = srcu_read_lock(&nodes_srcu);
- node = dlm_midcomms_recv_node_lookup(nodeid, p, msglen,
- dlm_midcomms_version_check_3_1);
- if (!node) {
srcu_read_unlock(&nodes_srcu, idx);
return;
}
@@ -869,12 +833,7 @@ static void dlm_midcomms_receive_buffer_3_1(union dlm_packet *p, int nodeid)
dlm_receive_buffer(p, nodeid);
}
-/*
- * Called from the low-level comms layer to process a buffer of
- * commands.
- */
-
-int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
+int dlm_validate_incoming_buffer(int nodeid, unsigned char *buf, int len)
{
const unsigned char *ptr = buf;
const struct dlm_header *hd;
@@ -909,19 +868,6 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
if (msglen > len)
break;
- switch (le32_to_cpu(hd->h_version)) {
- case DLM_VERSION_3_1:
- dlm_midcomms_receive_buffer_3_1((union dlm_packet *)ptr, nodeid);
- break;
- case DLM_VERSION_3_2:
- dlm_midcomms_receive_buffer_3_2((union dlm_packet *)ptr, nodeid);
- break;
- default:
- log_print("received invalid version header: %u from node %d, will skip this message",
- le32_to_cpu(hd->h_version), nodeid);
- break;
- }
-
ret += msglen;
len -= msglen;
ptr += msglen;
@@ -930,47 +876,43 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
return ret;
}
-void dlm_midcomms_receive_done(int nodeid)
+/*
+ * Called from the low-level comms layer to process a buffer of
+ * commands.
+ */
+int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
{
- struct midcomms_node *node;
- int idx;
+ const unsigned char *ptr = buf;
+ const struct dlm_header *hd;
+ uint16_t msglen;
+ int ret = 0;
- idx = srcu_read_lock(&nodes_srcu);
- node = nodeid2node(nodeid, 0);
- if (!node) {
- srcu_read_unlock(&nodes_srcu, idx);
- return;
- }
+ while (len >= sizeof(struct dlm_header)) {
+ hd = (struct dlm_header *)ptr;
- /* old protocol, we do nothing */
- switch (node->version) {
- case DLM_VERSION_3_2:
- break;
- default:
- srcu_read_unlock(&nodes_srcu, idx);
- return;
- }
+ msglen = le16_to_cpu(hd->h_length);
+ if (msglen > len)
+ break;
- /* do nothing if we didn't delivered stateful to ulp */
- if (!test_and_clear_bit(DLM_NODE_ULP_DELIVERED,
- &node->flags)) {
- srcu_read_unlock(&nodes_srcu, idx);
- return;
+ switch (hd->h_version) {
+ case cpu_to_le32(DLM_VERSION_3_1):
+ dlm_midcomms_receive_buffer_3_1((const union dlm_packet *)ptr, nodeid);
+ break;
+ case cpu_to_le32(DLM_VERSION_3_2):
+ dlm_midcomms_receive_buffer_3_2((const union dlm_packet *)ptr, nodeid);
+ break;
+ default:
+ log_print("received invalid version header: %u from node %d, will skip this message",
+ le32_to_cpu(hd->h_version), nodeid);
+ break;
+ }
+
+ ret += msglen;
+ len -= msglen;
+ ptr += msglen;
}
- spin_lock(&node->state_lock);
- /* we only ack if state is ESTABLISHED */
- switch (node->state) {
- case DLM_ESTABLISHED:
- spin_unlock(&node->state_lock);
- dlm_send_ack(node->nodeid, node->seq_next);
- break;
- default:
- spin_unlock(&node->state_lock);
- /* do nothing FIN has it's own ack send */
- break;
- };
- srcu_read_unlock(&nodes_srcu, idx);
+ return ret;
}
void dlm_midcomms_unack_msg_resend(int nodeid)
@@ -980,8 +922,8 @@ void dlm_midcomms_unack_msg_resend(int nodeid)
int idx, ret;
idx = srcu_read_lock(&nodes_srcu);
- node = nodeid2node(nodeid, 0);
- if (!node) {
+ node = nodeid2node(nodeid);
+ if (WARN_ON_ONCE(!node)) {
srcu_read_unlock(&nodes_srcu, idx);
return;
}
@@ -1013,32 +955,33 @@ static void dlm_fill_opts_header(struct dlm_opts *opts, uint16_t inner_len,
uint32_t seq)
{
opts->o_header.h_cmd = DLM_OPTS;
- opts->o_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
- opts->o_header.h_nodeid = dlm_our_nodeid();
- opts->o_header.h_length = DLM_MIDCOMMS_OPT_LEN + inner_len;
- opts->o_header.u.h_seq = seq;
- header_out(&opts->o_header);
+ opts->o_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+ opts->o_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
+ opts->o_header.h_length = cpu_to_le16(DLM_MIDCOMMS_OPT_LEN + inner_len);
+ opts->o_header.u.h_seq = cpu_to_le32(seq);
}
-static void midcomms_new_msg_cb(struct dlm_mhandle *mh)
+static void midcomms_new_msg_cb(void *data)
{
+ struct dlm_mhandle *mh = data;
+
atomic_inc(&mh->node->send_queue_cnt);
- spin_lock(&mh->node->send_queue_lock);
+ spin_lock_bh(&mh->node->send_queue_lock);
list_add_tail_rcu(&mh->list, &mh->node->send_queue);
- spin_unlock(&mh->node->send_queue_lock);
+ spin_unlock_bh(&mh->node->send_queue_lock);
- mh->seq = mh->node->seq_send++;
+ mh->seq = atomic_fetch_inc(&mh->node->seq_send);
}
static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int nodeid,
- int len, gfp_t allocation, char **ppc)
+ int len, char **ppc)
{
struct dlm_opts *opts;
struct dlm_msg *msg;
msg = dlm_lowcomms_new_msg(nodeid, len + DLM_MIDCOMMS_OPT_LEN,
- allocation, ppc, midcomms_new_msg_cb, mh);
+ ppc, midcomms_new_msg_cb, mh);
if (!msg)
return NULL;
@@ -1049,12 +992,15 @@ static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int node
dlm_fill_opts_header(opts, len, mh->seq);
*ppc += sizeof(*opts);
- mh->inner_hd = (const struct dlm_header *)*ppc;
+ mh->inner_p = (const union dlm_packet *)*ppc;
return msg;
}
-struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
- gfp_t allocation, char **ppc)
+/* avoid false positive for nodes_srcu, unlock happens in
+ * dlm_midcomms_commit_mhandle which is a must call if success
+ */
+#ifndef __CHECKER__
+struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, char **ppc)
{
struct midcomms_node *node;
struct dlm_mhandle *mh;
@@ -1062,44 +1008,44 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
int idx;
idx = srcu_read_lock(&nodes_srcu);
- node = nodeid2node(nodeid, 0);
- if (!node) {
- WARN_ON_ONCE(1);
+ node = nodeid2node(nodeid);
+ if (WARN_ON_ONCE(!node))
goto err;
- }
/* this is a bug, however we going on and hope it will be resolved */
- WARN_ON(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
+ WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
- mh = kzalloc(sizeof(*mh), GFP_NOFS);
+ mh = dlm_allocate_mhandle();
if (!mh)
goto err;
+ mh->committed = false;
+ mh->ack_rcv = NULL;
mh->idx = idx;
mh->node = node;
switch (node->version) {
case DLM_VERSION_3_1:
- msg = dlm_lowcomms_new_msg(nodeid, len, allocation, ppc,
- NULL, NULL);
+ msg = dlm_lowcomms_new_msg(nodeid, len, ppc, NULL, NULL);
if (!msg) {
- kfree(mh);
+ dlm_free_mhandle(mh);
goto err;
}
break;
case DLM_VERSION_3_2:
- msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation,
- ppc);
+ /* send ack back if necessary */
+ dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
+
+ msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, ppc);
if (!msg) {
- kfree(mh);
+ dlm_free_mhandle(mh);
goto err;
}
-
break;
default:
- kfree(mh);
- WARN_ON(1);
+ dlm_free_mhandle(mh);
+ WARN_ON_ONCE(1);
goto err;
}
@@ -1116,17 +1062,45 @@ err:
srcu_read_unlock(&nodes_srcu, idx);
return NULL;
}
+#endif
+
+static void dlm_midcomms_commit_msg_3_2_trace(const struct dlm_mhandle *mh,
+ const void *name, int namelen)
+{
+ switch (mh->inner_p->header.h_cmd) {
+ case DLM_MSG:
+ trace_dlm_send_message(mh->node->nodeid, mh->seq,
+ &mh->inner_p->message,
+ name, namelen);
+ break;
+ case DLM_RCOM:
+ trace_dlm_send_rcom(mh->node->nodeid, mh->seq,
+ &mh->inner_p->rcom);
+ break;
+ default:
+ /* nothing to trace */
+ break;
+ }
+}
-static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh)
+static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh,
+ const void *name, int namelen)
{
/* nexthdr chain for fast lookup */
- mh->opts->o_nextcmd = mh->inner_hd->h_cmd;
+ mh->opts->o_nextcmd = mh->inner_p->header.h_cmd;
mh->committed = true;
+ dlm_midcomms_commit_msg_3_2_trace(mh, name, namelen);
dlm_lowcomms_commit_msg(mh->msg);
}
-void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh)
+/* avoid false positive for nodes_srcu, lock was happen in
+ * dlm_midcomms_get_mhandle
+ */
+#ifndef __CHECKER__
+void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh,
+ const void *name, int namelen)
{
+
switch (mh->node->version) {
case DLM_VERSION_3_1:
srcu_read_unlock(&nodes_srcu, mh->idx);
@@ -1134,32 +1108,81 @@ void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh)
dlm_lowcomms_commit_msg(mh->msg);
dlm_lowcomms_put_msg(mh->msg);
/* mh is not part of rcu list in this case */
- kfree(mh);
+ dlm_free_mhandle(mh);
break;
case DLM_VERSION_3_2:
- dlm_midcomms_commit_msg_3_2(mh);
+ /* held rcu read lock here, because we sending the
+ * dlm message out, when we do that we could receive
+ * an ack back which releases the mhandle and we
+ * get a use after free.
+ */
+ rcu_read_lock();
+ dlm_midcomms_commit_msg_3_2(mh, name, namelen);
srcu_read_unlock(&nodes_srcu, mh->idx);
+ rcu_read_unlock();
break;
default:
srcu_read_unlock(&nodes_srcu, mh->idx);
- WARN_ON(1);
+ WARN_ON_ONCE(1);
break;
}
}
+#endif
int dlm_midcomms_start(void)
{
+ return dlm_lowcomms_start();
+}
+
+void dlm_midcomms_stop(void)
+{
+ dlm_lowcomms_stop();
+}
+
+void dlm_midcomms_init(void)
+{
int i;
for (i = 0; i < CONN_HASH_SIZE; i++)
INIT_HLIST_HEAD(&node_hash[i]);
- return dlm_lowcomms_start();
+ dlm_lowcomms_init();
+}
+
+static void midcomms_node_release(struct rcu_head *rcu)
+{
+ struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu);
+
+ WARN_ON_ONCE(atomic_read(&node->send_queue_cnt));
+ dlm_send_queue_flush(node);
+ kfree(node);
+}
+
+void dlm_midcomms_exit(void)
+{
+ struct midcomms_node *node;
+ int i, idx;
+
+ idx = srcu_read_lock(&nodes_srcu);
+ for (i = 0; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
+ dlm_delete_debug_comms_file(node->debugfs);
+
+ spin_lock(&nodes_lock);
+ hlist_del_rcu(&node->hlist);
+ spin_unlock(&nodes_lock);
+
+ call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release);
+ }
+ }
+ srcu_read_unlock(&nodes_srcu, idx);
+
+ dlm_lowcomms_exit();
}
static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
{
- spin_lock(&node->state_lock);
+ spin_lock_bh(&node->state_lock);
pr_debug("receive active fin ack from node %d with state %s\n",
node->nodeid, dlm_state_str(node->state));
@@ -1173,20 +1196,19 @@ static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
midcomms_node_reset(node);
pr_debug("switch node %d to state %s\n",
node->nodeid, dlm_state_str(node->state));
- wake_up(&node->shutdown_wait);
break;
case DLM_CLOSED:
/* not valid but somehow we got what we want */
wake_up(&node->shutdown_wait);
break;
default:
- spin_unlock(&node->state_lock);
- log_print("%s: unexpected state: %d\n",
+ spin_unlock_bh(&node->state_lock);
+ log_print("%s: unexpected state: %d",
__func__, node->state);
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return;
}
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
}
void dlm_midcomms_add_member(int nodeid)
@@ -1194,17 +1216,14 @@ void dlm_midcomms_add_member(int nodeid)
struct midcomms_node *node;
int idx;
- if (nodeid == dlm_our_nodeid())
- return;
-
idx = srcu_read_lock(&nodes_srcu);
- node = nodeid2node(nodeid, GFP_NOFS);
- if (!node) {
+ node = nodeid2node(nodeid);
+ if (WARN_ON_ONCE(!node)) {
srcu_read_unlock(&nodes_srcu, idx);
return;
}
- spin_lock(&node->state_lock);
+ spin_lock_bh(&node->state_lock);
if (!node->users) {
pr_debug("receive add member from node %d with state %s\n",
node->nodeid, dlm_state_str(node->state));
@@ -1231,8 +1250,8 @@ void dlm_midcomms_add_member(int nodeid)
}
node->users++;
- pr_debug("users inc count %d\n", node->users);
- spin_unlock(&node->state_lock);
+ pr_debug("node %d users inc count %d\n", nodeid, node->users);
+ spin_unlock_bh(&node->state_lock);
srcu_read_unlock(&nodes_srcu, idx);
}
@@ -1242,19 +1261,27 @@ void dlm_midcomms_remove_member(int nodeid)
struct midcomms_node *node;
int idx;
- if (nodeid == dlm_our_nodeid())
- return;
-
idx = srcu_read_lock(&nodes_srcu);
- node = nodeid2node(nodeid, 0);
+ node = nodeid2node(nodeid);
+ /* in case of dlm_midcomms_close() removes node */
if (!node) {
srcu_read_unlock(&nodes_srcu, idx);
return;
}
- spin_lock(&node->state_lock);
+ spin_lock_bh(&node->state_lock);
+ /* case of dlm_midcomms_addr() created node but
+ * was not added before because dlm_midcomms_close()
+ * removed the node
+ */
+ if (!node->users) {
+ spin_unlock_bh(&node->state_lock);
+ srcu_read_unlock(&nodes_srcu, idx);
+ return;
+ }
+
node->users--;
- pr_debug("users dec count %d\n", node->users);
+ pr_debug("node %d users dec count %d\n", nodeid, node->users);
/* hitting users count to zero means the
* other side is running dlm_midcomms_stop()
@@ -1269,11 +1296,11 @@ void dlm_midcomms_remove_member(int nodeid)
case DLM_CLOSE_WAIT:
/* passive shutdown DLM_LAST_ACK case 2 */
node->state = DLM_LAST_ACK;
- spin_unlock(&node->state_lock);
-
pr_debug("switch node %d to state %s case 2\n",
node->nodeid, dlm_state_str(node->state));
- goto send_fin;
+ set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+ dlm_send_fin(node, dlm_pas_fin_ack_rcv);
+ break;
case DLM_LAST_ACK:
/* probably receive fin caught it, do nothing */
break;
@@ -1281,28 +1308,35 @@ void dlm_midcomms_remove_member(int nodeid)
/* already gone, do nothing */
break;
default:
- log_print("%s: unexpected state: %d\n",
+ log_print("%s: unexpected state: %d",
__func__, node->state);
break;
}
}
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
srcu_read_unlock(&nodes_srcu, idx);
- return;
-
-send_fin:
- set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
- dlm_send_fin(node, dlm_pas_fin_ack_rcv);
- srcu_read_unlock(&nodes_srcu, idx);
}
-static void midcomms_node_release(struct rcu_head *rcu)
+void dlm_midcomms_version_wait(void)
{
- struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu);
+ struct midcomms_node *node;
+ int i, idx, ret;
- WARN_ON(atomic_read(&node->send_queue_cnt));
- kfree(node);
+ idx = srcu_read_lock(&nodes_srcu);
+ for (i = 0; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
+ ret = wait_event_timeout(node->shutdown_wait,
+ node->version != DLM_VERSION_NOT_SET ||
+ node->state == DLM_CLOSED ||
+ test_bit(DLM_NODE_FLAG_CLOSE, &node->flags),
+ DLM_SHUTDOWN_TIMEOUT);
+ if (!ret || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags))
+ pr_debug("version wait timed out for node %d with state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ }
+ }
+ srcu_read_unlock(&nodes_srcu, idx);
}
static void midcomms_shutdown(struct midcomms_node *node)
@@ -1317,7 +1351,7 @@ static void midcomms_shutdown(struct midcomms_node *node)
return;
}
- spin_lock(&node->state_lock);
+ spin_lock_bh(&node->state_lock);
pr_debug("receive active shutdown for node %d with state %s\n",
node->nodeid, dlm_state_str(node->state));
switch (node->state) {
@@ -1325,40 +1359,33 @@ static void midcomms_shutdown(struct midcomms_node *node)
node->state = DLM_FIN_WAIT1;
pr_debug("switch node %d to state %s case 2\n",
node->nodeid, dlm_state_str(node->state));
+ dlm_send_fin(node, dlm_act_fin_ack_rcv);
break;
case DLM_CLOSED:
/* we have what we want */
- spin_unlock(&node->state_lock);
- return;
+ break;
default:
/* busy to enter DLM_FIN_WAIT1, wait until passive
* done in shutdown_wait to enter DLM_CLOSED.
*/
break;
}
- spin_unlock(&node->state_lock);
-
- if (node->state == DLM_FIN_WAIT1) {
- dlm_send_fin(node, dlm_act_fin_ack_rcv);
+ spin_unlock_bh(&node->state_lock);
- if (DLM_DEBUG_FENCE_TERMINATION)
- msleep(5000);
- }
+ if (DLM_DEBUG_FENCE_TERMINATION)
+ msleep(5000);
/* wait for other side dlm + fin */
ret = wait_event_timeout(node->shutdown_wait,
node->state == DLM_CLOSED ||
test_bit(DLM_NODE_FLAG_CLOSE, &node->flags),
DLM_SHUTDOWN_TIMEOUT);
- if (!ret || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags)) {
+ if (!ret)
pr_debug("active shutdown timed out for node %d with state %s\n",
node->nodeid, dlm_state_str(node->state));
- midcomms_node_reset(node);
- return;
- }
-
- pr_debug("active shutdown done for node %d with state %s\n",
- node->nodeid, dlm_state_str(node->state));
+ else
+ pr_debug("active shutdown done for node %d with state %s\n",
+ node->nodeid, dlm_state_str(node->state));
}
void dlm_midcomms_shutdown(void)
@@ -1371,20 +1398,18 @@ void dlm_midcomms_shutdown(void)
for (i = 0; i < CONN_HASH_SIZE; i++) {
hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
midcomms_shutdown(node);
+ }
+ }
- dlm_delete_debug_comms_file(node->debugfs);
-
- spin_lock(&nodes_lock);
- hlist_del_rcu(&node->hlist);
- spin_unlock(&nodes_lock);
+ dlm_lowcomms_shutdown();
- call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release);
+ for (i = 0; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
+ midcomms_node_reset(node);
}
}
srcu_read_unlock(&nodes_srcu, idx);
mutex_unlock(&close_lock);
-
- dlm_lowcomms_shutdown();
}
int dlm_midcomms_close(int nodeid)
@@ -1392,12 +1417,9 @@ int dlm_midcomms_close(int nodeid)
struct midcomms_node *node;
int idx, ret;
- if (nodeid == dlm_our_nodeid())
- return 0;
-
idx = srcu_read_lock(&nodes_srcu);
/* Abort pending close/remove operation */
- node = nodeid2node(nodeid, 0);
+ node = nodeid2node(nodeid);
if (node) {
/* let shutdown waiters leave */
set_bit(DLM_NODE_FLAG_CLOSE, &node->flags);
@@ -1407,21 +1429,81 @@ int dlm_midcomms_close(int nodeid)
synchronize_srcu(&nodes_srcu);
- idx = srcu_read_lock(&nodes_srcu);
mutex_lock(&close_lock);
- node = nodeid2node(nodeid, 0);
+ idx = srcu_read_lock(&nodes_srcu);
+ node = nodeid2node(nodeid);
if (!node) {
- mutex_unlock(&close_lock);
srcu_read_unlock(&nodes_srcu, idx);
+ mutex_unlock(&close_lock);
return dlm_lowcomms_close(nodeid);
}
ret = dlm_lowcomms_close(nodeid);
- spin_lock(&node->state_lock);
- midcomms_node_reset(node);
- spin_unlock(&node->state_lock);
+ dlm_delete_debug_comms_file(node->debugfs);
+
+ spin_lock_bh(&nodes_lock);
+ hlist_del_rcu(&node->hlist);
+ spin_unlock_bh(&nodes_lock);
srcu_read_unlock(&nodes_srcu, idx);
+
+ /* wait that all readers left until flush send queue */
+ synchronize_srcu(&nodes_srcu);
+
+ /* drop all pending dlm messages, this is fine as
+ * this function get called when the node is fenced
+ */
+ dlm_send_queue_flush(node);
+
+ call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release);
mutex_unlock(&close_lock);
return ret;
}
+
+/* debug functionality to send raw dlm msg from user space */
+struct dlm_rawmsg_data {
+ struct midcomms_node *node;
+ void *buf;
+};
+
+static void midcomms_new_rawmsg_cb(void *data)
+{
+ struct dlm_rawmsg_data *rd = data;
+ struct dlm_header *h = rd->buf;
+
+ switch (h->h_version) {
+ case cpu_to_le32(DLM_VERSION_3_1):
+ break;
+ default:
+ switch (h->h_cmd) {
+ case DLM_OPTS:
+ if (!h->u.h_seq)
+ h->u.h_seq = cpu_to_le32(atomic_fetch_inc(&rd->node->seq_send));
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+}
+
+int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf,
+ int buflen)
+{
+ struct dlm_rawmsg_data rd;
+ struct dlm_msg *msg;
+ char *msgbuf;
+
+ rd.node = node;
+ rd.buf = buf;
+
+ msg = dlm_lowcomms_new_msg(node->nodeid, buflen, &msgbuf,
+ midcomms_new_rawmsg_cb, &rd);
+ if (!msg)
+ return -ENOMEM;
+
+ memcpy(msgbuf, buf, buflen);
+ dlm_lowcomms_commit_msg(msg);
+ return 0;
+}
+
diff --git a/fs/dlm/midcomms.h b/fs/dlm/midcomms.h
index 579abc6929be..7fad1d170bba 100644
--- a/fs/dlm/midcomms.h
+++ b/fs/dlm/midcomms.h
@@ -14,12 +14,18 @@
struct midcomms_node;
+int dlm_validate_incoming_buffer(int nodeid, unsigned char *buf, int len);
int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int buflen);
-struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
- gfp_t allocation, char **ppc);
-void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh);
+struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, char **ppc);
+void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh, const void *name,
+ int namelen);
+int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr);
+void dlm_midcomms_version_wait(void);
int dlm_midcomms_close(int nodeid);
int dlm_midcomms_start(void);
+void dlm_midcomms_stop(void);
+void dlm_midcomms_init(void);
+void dlm_midcomms_exit(void);
void dlm_midcomms_shutdown(void);
void dlm_midcomms_add_member(int nodeid);
void dlm_midcomms_remove_member(int nodeid);
@@ -28,6 +34,9 @@ const char *dlm_midcomms_state(struct midcomms_node *node);
unsigned long dlm_midcomms_flags(struct midcomms_node *node);
int dlm_midcomms_send_queue_cnt(struct midcomms_node *node);
uint32_t dlm_midcomms_version(struct midcomms_node *node);
+int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf,
+ int buflen);
+struct kmem_cache *dlm_midcomms_cache_create(void);
#endif /* __MIDCOMMS_DOT_H__ */
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c
deleted file mode 100644
index 67f68d48d60c..000000000000
--- a/fs/dlm/netlink.c
+++ /dev/null
@@ -1,138 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007 Red Hat, Inc. All rights reserved.
- */
-
-#include <net/genetlink.h>
-#include <linux/dlm.h>
-#include <linux/dlm_netlink.h>
-#include <linux/gfp.h>
-
-#include "dlm_internal.h"
-
-static uint32_t dlm_nl_seqnum;
-static uint32_t listener_nlportid;
-
-static struct genl_family family;
-
-static int prepare_data(u8 cmd, struct sk_buff **skbp, size_t size)
-{
- struct sk_buff *skb;
- void *data;
-
- skb = genlmsg_new(size, GFP_NOFS);
- if (!skb)
- return -ENOMEM;
-
- /* add the message headers */
- data = genlmsg_put(skb, 0, dlm_nl_seqnum++, &family, 0, cmd);
- if (!data) {
- nlmsg_free(skb);
- return -EINVAL;
- }
-
- *skbp = skb;
- return 0;
-}
-
-static struct dlm_lock_data *mk_data(struct sk_buff *skb)
-{
- struct nlattr *ret;
-
- ret = nla_reserve(skb, DLM_TYPE_LOCK, sizeof(struct dlm_lock_data));
- if (!ret)
- return NULL;
- return nla_data(ret);
-}
-
-static int send_data(struct sk_buff *skb)
-{
- struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
- void *data = genlmsg_data(genlhdr);
-
- genlmsg_end(skb, data);
-
- return genlmsg_unicast(&init_net, skb, listener_nlportid);
-}
-
-static int user_cmd(struct sk_buff *skb, struct genl_info *info)
-{
- listener_nlportid = info->snd_portid;
- printk("user_cmd nlpid %u\n", listener_nlportid);
- return 0;
-}
-
-static const struct genl_small_ops dlm_nl_ops[] = {
- {
- .cmd = DLM_CMD_HELLO,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = user_cmd,
- },
-};
-
-static struct genl_family family __ro_after_init = {
- .name = DLM_GENL_NAME,
- .version = DLM_GENL_VERSION,
- .small_ops = dlm_nl_ops,
- .n_small_ops = ARRAY_SIZE(dlm_nl_ops),
- .module = THIS_MODULE,
-};
-
-int __init dlm_netlink_init(void)
-{
- return genl_register_family(&family);
-}
-
-void dlm_netlink_exit(void)
-{
- genl_unregister_family(&family);
-}
-
-static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb)
-{
- struct dlm_rsb *r = lkb->lkb_resource;
-
- memset(data, 0, sizeof(struct dlm_lock_data));
-
- data->version = DLM_LOCK_DATA_VERSION;
- data->nodeid = lkb->lkb_nodeid;
- data->ownpid = lkb->lkb_ownpid;
- data->id = lkb->lkb_id;
- data->remid = lkb->lkb_remid;
- data->status = lkb->lkb_status;
- data->grmode = lkb->lkb_grmode;
- data->rqmode = lkb->lkb_rqmode;
- if (lkb->lkb_ua)
- data->xid = lkb->lkb_ua->xid;
- if (r) {
- data->lockspace_id = r->res_ls->ls_global_id;
- data->resource_namelen = r->res_length;
- memcpy(data->resource_name, r->res_name, r->res_length);
- }
-}
-
-void dlm_timeout_warn(struct dlm_lkb *lkb)
-{
- struct sk_buff *send_skb;
- struct dlm_lock_data *data;
- size_t size;
- int rv;
-
- size = nla_total_size(sizeof(struct dlm_lock_data)) +
- nla_total_size(0); /* why this? */
-
- rv = prepare_data(DLM_CMD_TIMEOUT, &send_skb, size);
- if (rv < 0)
- return;
-
- data = mk_data(send_skb);
- if (!data) {
- nlmsg_free(send_skb);
- return;
- }
-
- fill_data(data, lkb);
-
- send_data(send_skb);
-}
-
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index c38b2b8ffd1d..9ca83ef70ed1 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -4,35 +4,38 @@
*/
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/miscdevice.h>
#include <linux/poll.h>
#include <linux/dlm.h>
#include <linux/dlm_plock.h>
#include <linux/slab.h>
+#include <trace/events/dlm.h>
+
#include "dlm_internal.h"
#include "lockspace.h"
-static spinlock_t ops_lock;
-static struct list_head send_list;
-static struct list_head recv_list;
-static wait_queue_head_t send_wq;
-static wait_queue_head_t recv_wq;
-
-struct plock_op {
- struct list_head list;
- int done;
- struct dlm_plock_info info;
-};
+static DEFINE_SPINLOCK(ops_lock);
+static LIST_HEAD(send_list);
+static LIST_HEAD(recv_list);
+static DECLARE_WAIT_QUEUE_HEAD(send_wq);
+static DECLARE_WAIT_QUEUE_HEAD(recv_wq);
-struct plock_xop {
- struct plock_op xop;
- int (*callback)(struct file_lock *fl, int result);
+struct plock_async_data {
void *fl;
void *file;
struct file_lock flc;
+ int (*callback)(struct file_lock *fl, int result);
};
+struct plock_op {
+ struct list_head list;
+ int done;
+ struct dlm_plock_info info;
+ /* if set indicates async handling */
+ struct plock_async_data *data;
+};
static inline void set_version(struct dlm_plock_info *info)
{
@@ -41,6 +44,27 @@ static inline void set_version(struct dlm_plock_info *info)
info->version[2] = DLM_PLOCK_VERSION_PATCH;
}
+static struct plock_op *plock_lookup_waiter(const struct dlm_plock_info *info)
+{
+ struct plock_op *op = NULL, *iter;
+
+ list_for_each_entry(iter, &recv_list, list) {
+ if (iter->info.fsid == info->fsid &&
+ iter->info.number == info->number &&
+ iter->info.owner == info->owner &&
+ iter->info.pid == info->pid &&
+ iter->info.start == info->start &&
+ iter->info.end == info->end &&
+ iter->info.ex == info->ex &&
+ iter->info.wait) {
+ op = iter;
+ break;
+ }
+ }
+
+ return op;
+}
+
static int check_version(struct dlm_plock_info *info)
{
if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
@@ -58,113 +82,142 @@ static int check_version(struct dlm_plock_info *info)
return 0;
}
+static void dlm_release_plock_op(struct plock_op *op)
+{
+ kfree(op->data);
+ kfree(op);
+}
+
static void send_op(struct plock_op *op)
{
set_version(&op->info);
- INIT_LIST_HEAD(&op->list);
spin_lock(&ops_lock);
list_add_tail(&op->list, &send_list);
spin_unlock(&ops_lock);
wake_up(&send_wq);
}
-/* If a process was killed while waiting for the only plock on a file,
- locks_remove_posix will not see any lock on the file so it won't
- send an unlock-close to us to pass on to userspace to clean up the
- abandoned waiter. So, we have to insert the unlock-close when the
- lock call is interrupted. */
-
-static void do_unlock_close(struct dlm_ls *ls, u64 number,
- struct file *file, struct file_lock *fl)
+static int do_lock_cancel(const struct dlm_plock_info *orig_info)
{
struct plock_op *op;
+ int rv;
op = kzalloc(sizeof(*op), GFP_NOFS);
if (!op)
- return;
+ return -ENOMEM;
+
+ op->info = *orig_info;
+ op->info.optype = DLM_PLOCK_OP_CANCEL;
+ op->info.wait = 0;
- op->info.optype = DLM_PLOCK_OP_UNLOCK;
- op->info.pid = fl->fl_pid;
- op->info.fsid = ls->ls_global_id;
- op->info.number = number;
- op->info.start = 0;
- op->info.end = OFFSET_MAX;
- if (fl->fl_lmops && fl->fl_lmops->lm_grant)
- op->info.owner = (__u64) fl->fl_pid;
- else
- op->info.owner = (__u64)(long) fl->fl_owner;
-
- op->info.flags |= DLM_PLOCK_FL_CLOSE;
send_op(op);
+ wait_event(recv_wq, (op->done != 0));
+
+ rv = op->info.rv;
+
+ dlm_release_plock_op(op);
+ return rv;
}
int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
int cmd, struct file_lock *fl)
{
+ struct plock_async_data *op_data;
struct dlm_ls *ls;
struct plock_op *op;
- struct plock_xop *xop;
int rv;
ls = dlm_find_lockspace_local(lockspace);
if (!ls)
return -EINVAL;
- xop = kzalloc(sizeof(*xop), GFP_NOFS);
- if (!xop) {
+ op = kzalloc(sizeof(*op), GFP_NOFS);
+ if (!op) {
rv = -ENOMEM;
goto out;
}
- op = &xop->xop;
op->info.optype = DLM_PLOCK_OP_LOCK;
- op->info.pid = fl->fl_pid;
- op->info.ex = (fl->fl_type == F_WRLCK);
- op->info.wait = IS_SETLKW(cmd);
+ op->info.pid = fl->c.flc_pid;
+ op->info.ex = lock_is_write(fl);
+ op->info.wait = !!(fl->c.flc_flags & FL_SLEEP);
op->info.fsid = ls->ls_global_id;
op->info.number = number;
op->info.start = fl->fl_start;
op->info.end = fl->fl_end;
+ op->info.owner = (__u64)(long) fl->c.flc_owner;
+ /* async handling */
if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
- /* fl_owner is lockd which doesn't distinguish
- processes on the nfs client */
- op->info.owner = (__u64) fl->fl_pid;
- xop->callback = fl->fl_lmops->lm_grant;
- locks_init_lock(&xop->flc);
- locks_copy_lock(&xop->flc, fl);
- xop->fl = fl;
- xop->file = file;
- } else {
- op->info.owner = (__u64)(long) fl->fl_owner;
- xop->callback = NULL;
+ op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
+ if (!op_data) {
+ dlm_release_plock_op(op);
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ op_data->callback = fl->fl_lmops->lm_grant;
+ locks_init_lock(&op_data->flc);
+ locks_copy_lock(&op_data->flc, fl);
+ op_data->fl = fl;
+ op_data->file = file;
+
+ op->data = op_data;
+
+ send_op(op);
+ rv = FILE_LOCK_DEFERRED;
+ goto out;
}
send_op(op);
- if (xop->callback == NULL) {
+ if (op->info.wait) {
rv = wait_event_interruptible(recv_wq, (op->done != 0));
if (rv == -ERESTARTSYS) {
- log_debug(ls, "dlm_posix_lock: wait killed %llx",
- (unsigned long long)number);
spin_lock(&ops_lock);
- list_del(&op->list);
+ /* recheck under ops_lock if we got a done != 0,
+ * if so this interrupt case should be ignored
+ */
+ if (op->done != 0) {
+ spin_unlock(&ops_lock);
+ goto do_lock_wait;
+ }
spin_unlock(&ops_lock);
- kfree(xop);
- do_unlock_close(ls, number, file, fl);
+
+ rv = do_lock_cancel(&op->info);
+ switch (rv) {
+ case 0:
+ /* waiter was deleted in user space, answer will never come
+ * remove original request. The original request must be
+ * on recv_list because the answer of do_lock_cancel()
+ * synchronized it.
+ */
+ spin_lock(&ops_lock);
+ list_del(&op->list);
+ spin_unlock(&ops_lock);
+ rv = -EINTR;
+ break;
+ case -ENOENT:
+ /* cancellation wasn't successful but op should be done */
+ fallthrough;
+ default:
+ /* internal error doing cancel we need to wait */
+ goto wait;
+ }
+
+ log_debug(ls, "%s: wait interrupted %x %llx pid %d",
+ __func__, ls->ls_global_id,
+ (unsigned long long)number, op->info.pid);
+ dlm_release_plock_op(op);
goto out;
}
} else {
- rv = FILE_LOCK_DEFERRED;
- goto out;
+wait:
+ wait_event(recv_wq, (op->done != 0));
}
- spin_lock(&ops_lock);
- if (!list_empty(&op->list)) {
- log_error(ls, "dlm_posix_lock: op on list %llx",
- (unsigned long long)number);
- list_del(&op->list);
- }
- spin_unlock(&ops_lock);
+do_lock_wait:
+
+ WARN_ON(!list_empty(&op->list));
rv = op->info.rv;
@@ -174,7 +227,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
(unsigned long long)number);
}
- kfree(xop);
+ dlm_release_plock_op(op);
out:
dlm_put_lockspace(ls);
return rv;
@@ -184,26 +237,20 @@ EXPORT_SYMBOL_GPL(dlm_posix_lock);
/* Returns failure iff a successful lock operation should be canceled */
static int dlm_plock_callback(struct plock_op *op)
{
+ struct plock_async_data *op_data = op->data;
struct file *file;
struct file_lock *fl;
struct file_lock *flc;
int (*notify)(struct file_lock *fl, int result) = NULL;
- struct plock_xop *xop = (struct plock_xop *)op;
int rv = 0;
- spin_lock(&ops_lock);
- if (!list_empty(&op->list)) {
- log_print("dlm_plock_callback: op on list %llx",
- (unsigned long long)op->info.number);
- list_del(&op->list);
- }
- spin_unlock(&ops_lock);
+ WARN_ON(!list_empty(&op->list));
/* check if the following 2 are still valid or make a copy */
- file = xop->file;
- flc = &xop->flc;
- fl = xop->fl;
- notify = xop->callback;
+ file = op_data->file;
+ flc = &op_data->flc;
+ fl = op_data->fl;
+ notify = op_data->callback;
if (op->info.rv) {
notify(fl, op->info.rv);
@@ -211,7 +258,7 @@ static int dlm_plock_callback(struct plock_op *op)
}
/* got fs lock; bookkeep locally as well: */
- flc->fl_flags &= ~FL_SLEEP;
+ flc->c.flc_flags &= ~FL_SLEEP;
if (posix_lock_file(file, flc, NULL)) {
/*
* This can only happen in the case of kmalloc() failure.
@@ -228,13 +275,13 @@ static int dlm_plock_callback(struct plock_op *op)
rv = notify(fl, 0);
if (rv) {
/* XXX: We need to cancel the fs lock here: */
- log_print("dlm_plock_callback: lock granted after lock request "
- "failed; dangling lock!\n");
+ log_print("%s: lock granted after lock request failed; dangling lock!",
+ __func__);
goto out;
}
out:
- kfree(xop);
+ dlm_release_plock_op(op);
return rv;
}
@@ -244,7 +291,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
struct dlm_ls *ls;
struct plock_op *op;
int rv;
- unsigned char fl_flags = fl->fl_flags;
+ unsigned char saved_flags = fl->c.flc_flags;
ls = dlm_find_lockspace_local(lockspace);
if (!ls)
@@ -257,7 +304,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
}
/* cause the vfs unlock to return ENOENT if lock is not found */
- fl->fl_flags |= FL_EXISTS;
+ fl->c.flc_flags |= FL_EXISTS;
rv = locks_lock_file_wait(file, fl);
if (rv == -ENOENT) {
@@ -270,17 +317,14 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
}
op->info.optype = DLM_PLOCK_OP_UNLOCK;
- op->info.pid = fl->fl_pid;
+ op->info.pid = fl->c.flc_pid;
op->info.fsid = ls->ls_global_id;
op->info.number = number;
op->info.start = fl->fl_start;
op->info.end = fl->fl_end;
- if (fl->fl_lmops && fl->fl_lmops->lm_grant)
- op->info.owner = (__u64) fl->fl_pid;
- else
- op->info.owner = (__u64)(long) fl->fl_owner;
+ op->info.owner = (__u64)(long) fl->c.flc_owner;
- if (fl->fl_flags & FL_CLOSE) {
+ if (fl->c.flc_flags & FL_CLOSE) {
op->info.flags |= DLM_PLOCK_FL_CLOSE;
send_op(op);
rv = 0;
@@ -290,13 +334,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
send_op(op);
wait_event(recv_wq, (op->done != 0));
- spin_lock(&ops_lock);
- if (!list_empty(&op->list)) {
- log_error(ls, "dlm_posix_unlock: op on list %llx",
- (unsigned long long)number);
- list_del(&op->list);
- }
- spin_unlock(&ops_lock);
+ WARN_ON(!list_empty(&op->list));
rv = op->info.rv;
@@ -304,14 +342,83 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
rv = 0;
out_free:
- kfree(op);
+ dlm_release_plock_op(op);
out:
dlm_put_lockspace(ls);
- fl->fl_flags = fl_flags;
+ fl->c.flc_flags = saved_flags;
return rv;
}
EXPORT_SYMBOL_GPL(dlm_posix_unlock);
+/*
+ * NOTE: This implementation can only handle async lock requests as nfs
+ * do it. It cannot handle cancellation of a pending lock request sitting
+ * in wait_event(), but for now only nfs is the only user local kernel
+ * user.
+ */
+int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ struct file_lock *fl)
+{
+ struct dlm_plock_info info;
+ struct plock_op *op;
+ struct dlm_ls *ls;
+ int rv;
+
+ /* this only works for async request for now and nfs is the only
+ * kernel user right now.
+ */
+ if (WARN_ON_ONCE(!fl->fl_lmops || !fl->fl_lmops->lm_grant))
+ return -EOPNOTSUPP;
+
+ ls = dlm_find_lockspace_local(lockspace);
+ if (!ls)
+ return -EINVAL;
+
+ memset(&info, 0, sizeof(info));
+ info.pid = fl->c.flc_pid;
+ info.ex = lock_is_write(fl);
+ info.fsid = ls->ls_global_id;
+ dlm_put_lockspace(ls);
+ info.number = number;
+ info.start = fl->fl_start;
+ info.end = fl->fl_end;
+ info.owner = (__u64)(long) fl->c.flc_owner;
+
+ rv = do_lock_cancel(&info);
+ switch (rv) {
+ case 0:
+ spin_lock(&ops_lock);
+ /* lock request to cancel must be on recv_list because
+ * do_lock_cancel() synchronizes it.
+ */
+ op = plock_lookup_waiter(&info);
+ if (WARN_ON_ONCE(!op)) {
+ spin_unlock(&ops_lock);
+ rv = -ENOLCK;
+ break;
+ }
+
+ list_del(&op->list);
+ spin_unlock(&ops_lock);
+ WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
+ op->data->callback(op->data->fl, -EINTR);
+ dlm_release_plock_op(op);
+ rv = -EINTR;
+ break;
+ case -ENOENT:
+ /* if cancel wasn't successful we probably were to late
+ * or it was a non-blocking lock request, so just unlock it.
+ */
+ rv = dlm_posix_unlock(lockspace, number, file, fl);
+ break;
+ default:
+ break;
+ }
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(dlm_posix_cancel);
+
int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
struct file_lock *fl)
{
@@ -330,47 +437,40 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
}
op->info.optype = DLM_PLOCK_OP_GET;
- op->info.pid = fl->fl_pid;
- op->info.ex = (fl->fl_type == F_WRLCK);
+ op->info.pid = fl->c.flc_pid;
+ op->info.ex = lock_is_write(fl);
op->info.fsid = ls->ls_global_id;
op->info.number = number;
op->info.start = fl->fl_start;
op->info.end = fl->fl_end;
- if (fl->fl_lmops && fl->fl_lmops->lm_grant)
- op->info.owner = (__u64) fl->fl_pid;
- else
- op->info.owner = (__u64)(long) fl->fl_owner;
+ op->info.owner = (__u64)(long) fl->c.flc_owner;
send_op(op);
wait_event(recv_wq, (op->done != 0));
- spin_lock(&ops_lock);
- if (!list_empty(&op->list)) {
- log_error(ls, "dlm_posix_get: op on list %llx",
- (unsigned long long)number);
- list_del(&op->list);
- }
- spin_unlock(&ops_lock);
+ WARN_ON(!list_empty(&op->list));
/* info.rv from userspace is 1 for conflict, 0 for no-conflict,
-ENOENT if there are no locks on the file */
rv = op->info.rv;
- fl->fl_type = F_UNLCK;
+ fl->c.flc_type = F_UNLCK;
if (rv == -ENOENT)
rv = 0;
else if (rv > 0) {
locks_init_lock(fl);
- fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
- fl->fl_flags = FL_POSIX;
- fl->fl_pid = -op->info.pid;
+ fl->c.flc_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
+ fl->c.flc_flags = FL_POSIX;
+ fl->c.flc_pid = op->info.pid;
+ if (op->info.nodeid != dlm_our_nodeid())
+ fl->c.flc_pid = -fl->c.flc_pid;
fl->fl_start = op->info.start;
fl->fl_end = op->info.end;
rv = 0;
}
- kfree(op);
+ dlm_release_plock_op(op);
out:
dlm_put_lockspace(ls);
return rv;
@@ -389,11 +489,11 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
spin_lock(&ops_lock);
if (!list_empty(&send_list)) {
- op = list_entry(send_list.next, struct plock_op, list);
+ op = list_first_entry(&send_list, struct plock_op, list);
if (op->info.flags & DLM_PLOCK_FL_CLOSE)
list_del(&op->list);
else
- list_move(&op->list, &recv_list);
+ list_move_tail(&op->list, &recv_list);
memcpy(&info, &op->info, sizeof(info));
}
spin_unlock(&ops_lock);
@@ -401,12 +501,14 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
if (!op)
return -EAGAIN;
+ trace_dlm_plock_read(&info);
+
/* there is no need to get a reply from userspace for unlocks
that were generated by the vfs cleaning up for a close
(the process did not make an unlock call). */
if (op->info.flags & DLM_PLOCK_FL_CLOSE)
- kfree(op);
+ dlm_release_plock_op(op);
if (copy_to_user(u, &info, sizeof(info)))
return -EFAULT;
@@ -418,9 +520,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
loff_t *ppos)
{
+ struct plock_op *op = NULL, *iter;
struct dlm_plock_info info;
- struct plock_op *op;
- int found = 0, do_callback = 0;
+ int do_callback = 0;
if (count != sizeof(info))
return -EINVAL;
@@ -428,35 +530,56 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
if (copy_from_user(&info, u, sizeof(info)))
return -EFAULT;
+ trace_dlm_plock_write(&info);
+
if (check_version(&info))
return -EINVAL;
+ /*
+ * The results for waiting ops (SETLKW) can be returned in any
+ * order, so match all fields to find the op. The results for
+ * non-waiting ops are returned in the order that they were sent
+ * to userspace, so match the result with the first non-waiting op.
+ */
spin_lock(&ops_lock);
- list_for_each_entry(op, &recv_list, list) {
- if (op->info.fsid == info.fsid &&
- op->info.number == info.number &&
- op->info.owner == info.owner) {
- struct plock_xop *xop = (struct plock_xop *)op;
- list_del_init(&op->list);
- memcpy(&op->info, &info, sizeof(info));
- if (xop->callback)
- do_callback = 1;
- else
- op->done = 1;
- found = 1;
- break;
+ if (info.wait) {
+ op = plock_lookup_waiter(&info);
+ } else {
+ list_for_each_entry(iter, &recv_list, list) {
+ if (!iter->info.wait &&
+ iter->info.fsid == info.fsid) {
+ op = iter;
+ break;
+ }
}
}
+
+ if (op) {
+ /* Sanity check that op and info match. */
+ if (info.wait)
+ WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
+ else
+ WARN_ON(op->info.number != info.number ||
+ op->info.owner != info.owner ||
+ op->info.optype != info.optype);
+
+ list_del_init(&op->list);
+ memcpy(&op->info, &info, sizeof(info));
+ if (op->data)
+ do_callback = 1;
+ else
+ op->done = 1;
+ }
spin_unlock(&ops_lock);
- if (found) {
+ if (op) {
if (do_callback)
dlm_plock_callback(op);
else
wake_up(&recv_wq);
} else
- log_print("dev_write no op %x %llx", info.fsid,
- (unsigned long long)info.number);
+ pr_debug("%s: no op %x %llx", __func__,
+ info.fsid, (unsigned long long)info.number);
return count;
}
@@ -492,12 +615,6 @@ int dlm_plock_init(void)
{
int rv;
- spin_lock_init(&ops_lock);
- INIT_LIST_HEAD(&send_list);
- INIT_LIST_HEAD(&recv_list);
- init_waitqueue_head(&send_wq);
- init_waitqueue_head(&recv_wq);
-
rv = misc_register(&plock_dev_misc);
if (rv)
log_print("dlm_plock_init: misc_register failed %d", rv);
@@ -507,5 +624,7 @@ int dlm_plock_init(void)
void dlm_plock_exit(void)
{
misc_deregister(&plock_dev_misc);
+ WARN_ON(!list_empty(&send_list));
+ WARN_ON(!list_empty(&recv_list));
}
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index 6cba86470278..be1a71a6303a 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -28,76 +28,72 @@ static int rcom_response(struct dlm_ls *ls)
}
static void _create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
- struct dlm_rcom **rc_ret, char *mb, int mb_len)
+ struct dlm_rcom **rc_ret, char *mb, int mb_len,
+ uint64_t seq)
{
struct dlm_rcom *rc;
rc = (struct dlm_rcom *) mb;
- rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
- rc->rc_header.u.h_lockspace = ls->ls_global_id;
- rc->rc_header.h_nodeid = dlm_our_nodeid();
- rc->rc_header.h_length = mb_len;
+ rc->rc_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+ rc->rc_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id);
+ rc->rc_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
+ rc->rc_header.h_length = cpu_to_le16(mb_len);
rc->rc_header.h_cmd = DLM_RCOM;
- rc->rc_type = type;
-
- spin_lock(&ls->ls_recover_lock);
- rc->rc_seq = ls->ls_recover_seq;
- spin_unlock(&ls->ls_recover_lock);
+ rc->rc_type = cpu_to_le32(type);
+ rc->rc_seq = cpu_to_le64(seq);
*rc_ret = rc;
}
static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
- struct dlm_rcom **rc_ret, struct dlm_mhandle **mh_ret)
+ struct dlm_rcom **rc_ret, struct dlm_mhandle **mh_ret,
+ uint64_t seq)
{
int mb_len = sizeof(struct dlm_rcom) + len;
struct dlm_mhandle *mh;
char *mb;
- mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, GFP_NOFS, &mb);
+ mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, &mb);
if (!mh) {
log_print("%s to %d type %d len %d ENOBUFS",
__func__, to_nodeid, type, len);
return -ENOBUFS;
}
- _create_rcom(ls, to_nodeid, type, len, rc_ret, mb, mb_len);
+ _create_rcom(ls, to_nodeid, type, len, rc_ret, mb, mb_len, seq);
*mh_ret = mh;
return 0;
}
static int create_rcom_stateless(struct dlm_ls *ls, int to_nodeid, int type,
int len, struct dlm_rcom **rc_ret,
- struct dlm_msg **msg_ret)
+ struct dlm_msg **msg_ret, uint64_t seq)
{
int mb_len = sizeof(struct dlm_rcom) + len;
struct dlm_msg *msg;
char *mb;
- msg = dlm_lowcomms_new_msg(to_nodeid, mb_len, GFP_NOFS, &mb,
- NULL, NULL);
+ msg = dlm_lowcomms_new_msg(to_nodeid, mb_len, &mb, NULL, NULL);
if (!msg) {
log_print("create_rcom to %d type %d len %d ENOBUFS",
to_nodeid, type, len);
return -ENOBUFS;
}
- _create_rcom(ls, to_nodeid, type, len, rc_ret, mb, mb_len);
+ _create_rcom(ls, to_nodeid, type, len, rc_ret, mb, mb_len, seq);
*msg_ret = msg;
return 0;
}
static void send_rcom(struct dlm_mhandle *mh, struct dlm_rcom *rc)
{
- dlm_rcom_out(rc);
- dlm_midcomms_commit_mhandle(mh);
+ dlm_midcomms_commit_mhandle(mh, NULL, 0);
}
static void send_rcom_stateless(struct dlm_msg *msg, struct dlm_rcom *rc)
{
- dlm_rcom_out(rc);
dlm_lowcomms_commit_msg(msg);
dlm_lowcomms_put_msg(msg);
}
@@ -127,10 +123,10 @@ static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
{
struct rcom_config *rf = (struct rcom_config *) rc->rc_buf;
- if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) {
+ if ((le32_to_cpu(rc->rc_header.h_version) & 0xFFFF0000) != DLM_HEADER_MAJOR) {
log_error(ls, "version mismatch: %x nodeid %d: %x",
DLM_HEADER_MAJOR | DLM_HEADER_MINOR, nodeid,
- rc->rc_header.h_version);
+ le32_to_cpu(rc->rc_header.h_version));
return -EPROTO;
}
@@ -145,20 +141,20 @@ static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
return 0;
}
-static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq)
+static void allow_sync_reply(struct dlm_ls *ls, __le64 *new_seq)
{
- spin_lock(&ls->ls_rcom_spin);
- *new_seq = ++ls->ls_rcom_seq;
+ spin_lock_bh(&ls->ls_rcom_spin);
+ *new_seq = cpu_to_le64(++ls->ls_rcom_seq);
set_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
- spin_unlock(&ls->ls_rcom_spin);
+ spin_unlock_bh(&ls->ls_rcom_spin);
}
static void disallow_sync_reply(struct dlm_ls *ls)
{
- spin_lock(&ls->ls_rcom_spin);
+ spin_lock_bh(&ls->ls_rcom_spin);
clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
- spin_unlock(&ls->ls_rcom_spin);
+ spin_unlock_bh(&ls->ls_rcom_spin);
}
/*
@@ -172,7 +168,8 @@ static void disallow_sync_reply(struct dlm_ls *ls)
* node's rcom_config.
*/
-int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags)
+int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags,
+ uint64_t seq)
{
struct dlm_rcom *rc;
struct dlm_msg *msg;
@@ -182,13 +179,14 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags)
if (nodeid == dlm_our_nodeid()) {
rc = ls->ls_recover_buf;
- rc->rc_result = dlm_recover_status(ls);
+ rc->rc_result = cpu_to_le32(dlm_recover_status(ls));
goto out;
}
retry:
error = create_rcom_stateless(ls, nodeid, DLM_RCOM_STATUS,
- sizeof(struct rcom_status), &rc, &msg);
+ sizeof(struct rcom_status), &rc, &msg,
+ seq);
if (error)
goto out;
@@ -208,7 +206,7 @@ retry:
rc = ls->ls_recover_buf;
- if (rc->rc_result == -ESRCH) {
+ if (rc->rc_result == cpu_to_le32(-ESRCH)) {
/* we pretend the remote lockspace exists with 0 status */
log_debug(ls, "remote node %d not ready", nodeid);
rc->rc_result = 0;
@@ -222,12 +220,14 @@ retry:
return error;
}
-static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+static void receive_rcom_status(struct dlm_ls *ls,
+ const struct dlm_rcom *rc_in,
+ uint64_t seq)
{
struct dlm_rcom *rc;
struct rcom_status *rs;
uint32_t status;
- int nodeid = rc_in->rc_header.h_nodeid;
+ int nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid);
int len = sizeof(struct rcom_config);
struct dlm_msg *msg;
int num_slots = 0;
@@ -245,30 +245,30 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
goto do_create;
}
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
status = ls->ls_recover_status;
num_slots = ls->ls_num_slots;
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
len += num_slots * sizeof(struct rcom_slot);
do_create:
error = create_rcom_stateless(ls, nodeid, DLM_RCOM_STATUS_REPLY,
- len, &rc, &msg);
+ len, &rc, &msg, seq);
if (error)
return;
rc->rc_id = rc_in->rc_id;
rc->rc_seq_reply = rc_in->rc_seq;
- rc->rc_result = status;
+ rc->rc_result = cpu_to_le32(status);
set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, num_slots);
if (!num_slots)
goto do_send;
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
if (ls->ls_num_slots != num_slots) {
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
log_debug(ls, "receive_rcom_status num_slots %d to %d",
num_slots, ls->ls_num_slots);
rc->rc_result = 0;
@@ -277,42 +277,45 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
}
dlm_slots_copy_out(ls, rc);
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
do_send:
send_rcom_stateless(msg, rc);
}
-static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+static void receive_sync_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in)
{
- spin_lock(&ls->ls_rcom_spin);
+ spin_lock_bh(&ls->ls_rcom_spin);
if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) ||
- rc_in->rc_id != ls->ls_rcom_seq) {
+ le64_to_cpu(rc_in->rc_id) != ls->ls_rcom_seq) {
log_debug(ls, "reject reply %d from %d seq %llx expect %llx",
- rc_in->rc_type, rc_in->rc_header.h_nodeid,
- (unsigned long long)rc_in->rc_id,
+ le32_to_cpu(rc_in->rc_type),
+ le32_to_cpu(rc_in->rc_header.h_nodeid),
+ (unsigned long long)le64_to_cpu(rc_in->rc_id),
(unsigned long long)ls->ls_rcom_seq);
goto out;
}
- memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length);
+ memcpy(ls->ls_recover_buf, rc_in,
+ le16_to_cpu(rc_in->rc_header.h_length));
set_bit(LSFL_RCOM_READY, &ls->ls_flags);
clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
wake_up(&ls->ls_wait_general);
out:
- spin_unlock(&ls->ls_rcom_spin);
+ spin_unlock_bh(&ls->ls_rcom_spin);
}
-int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
+int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,
+ int last_len, uint64_t seq)
{
+ struct dlm_mhandle *mh;
struct dlm_rcom *rc;
- struct dlm_msg *msg;
int error = 0;
ls->ls_recover_nodeid = nodeid;
retry:
- error = create_rcom_stateless(ls, nodeid, DLM_RCOM_NAMES, last_len,
- &rc, &msg);
+ error = create_rcom(ls, nodeid, DLM_RCOM_NAMES, last_len,
+ &rc, &mh, seq);
if (error)
goto out;
memcpy(rc->rc_buf, last_name, last_len);
@@ -320,7 +323,7 @@ retry:
allow_sync_reply(ls, &rc->rc_id);
memset(ls->ls_recover_buf, 0, DLM_MAX_SOCKET_BUFSIZE);
- send_rcom_stateless(msg, rc);
+ send_rcom(mh, rc);
error = dlm_wait_function(ls, &rcom_response);
disallow_sync_reply(ls);
@@ -330,18 +333,20 @@ retry:
return error;
}
-static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+static void receive_rcom_names(struct dlm_ls *ls, const struct dlm_rcom *rc_in,
+ uint64_t seq)
{
+ struct dlm_mhandle *mh;
struct dlm_rcom *rc;
int error, inlen, outlen, nodeid;
- struct dlm_msg *msg;
- nodeid = rc_in->rc_header.h_nodeid;
- inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
+ nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid);
+ inlen = le16_to_cpu(rc_in->rc_header.h_length) -
+ sizeof(struct dlm_rcom);
outlen = DLM_MAX_APP_BUFSIZE - sizeof(struct dlm_rcom);
- error = create_rcom_stateless(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen,
- &rc, &msg);
+ error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen,
+ &rc, &mh, seq);
if (error)
return;
rc->rc_id = rc_in->rc_id;
@@ -349,10 +354,10 @@ static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in)
dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen,
nodeid);
- send_rcom_stateless(msg, rc);
+ send_rcom(mh, rc);
}
-int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid)
+int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid, uint64_t seq)
{
struct dlm_rcom *rc;
struct dlm_mhandle *mh;
@@ -360,32 +365,35 @@ int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid)
int error;
error = create_rcom(ls, dir_nodeid, DLM_RCOM_LOOKUP, r->res_length,
- &rc, &mh);
+ &rc, &mh, seq);
if (error)
goto out;
memcpy(rc->rc_buf, r->res_name, r->res_length);
- rc->rc_id = (unsigned long) r->res_id;
+ rc->rc_id = cpu_to_le64(r->res_id);
send_rcom(mh, rc);
out:
return error;
}
-static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+static void receive_rcom_lookup(struct dlm_ls *ls,
+ const struct dlm_rcom *rc_in, uint64_t seq)
{
struct dlm_rcom *rc;
struct dlm_mhandle *mh;
- int error, ret_nodeid, nodeid = rc_in->rc_header.h_nodeid;
- int len = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
+ int error, ret_nodeid, nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid);
+ int len = le16_to_cpu(rc_in->rc_header.h_length) -
+ sizeof(struct dlm_rcom);
/* Old code would send this special id to trigger a debug dump. */
- if (rc_in->rc_id == 0xFFFFFFFF) {
+ if (rc_in->rc_id == cpu_to_le64(0xFFFFFFFF)) {
log_error(ls, "receive_rcom_lookup dump from %d", nodeid);
dlm_dump_rsb_name(ls, rc_in->rc_buf, len);
return;
}
- error = create_rcom(ls, nodeid, DLM_RCOM_LOOKUP_REPLY, 0, &rc, &mh);
+ error = create_rcom(ls, nodeid, DLM_RCOM_LOOKUP_REPLY, 0, &rc, &mh,
+ seq);
if (error)
return;
@@ -393,14 +401,15 @@ static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
DLM_LU_RECOVER_MASTER, &ret_nodeid, NULL);
if (error)
ret_nodeid = error;
- rc->rc_result = ret_nodeid;
+ rc->rc_result = cpu_to_le32(ret_nodeid);
rc->rc_id = rc_in->rc_id;
rc->rc_seq_reply = rc_in->rc_seq;
send_rcom(mh, rc);
}
-static void receive_rcom_lookup_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+static void receive_rcom_lookup_reply(struct dlm_ls *ls,
+ const struct dlm_rcom *rc_in)
{
dlm_recover_master_reply(ls, rc_in);
}
@@ -413,7 +422,7 @@ static void pack_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb,
rl->rl_ownpid = cpu_to_le32(lkb->lkb_ownpid);
rl->rl_lkid = cpu_to_le32(lkb->lkb_id);
rl->rl_exflags = cpu_to_le32(lkb->lkb_exflags);
- rl->rl_flags = cpu_to_le32(lkb->lkb_flags);
+ rl->rl_flags = cpu_to_le32(dlm_dflags_val(lkb));
rl->rl_lvbseq = cpu_to_le32(lkb->lkb_lvbseq);
rl->rl_rqmode = lkb->lkb_rqmode;
rl->rl_grmode = lkb->lkb_grmode;
@@ -435,7 +444,7 @@ static void pack_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb,
memcpy(rl->rl_lvb, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
}
-int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb, uint64_t seq)
{
struct dlm_ls *ls = r->res_ls;
struct dlm_rcom *rc;
@@ -446,13 +455,14 @@ int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
if (lkb->lkb_lvbptr)
len += ls->ls_lvblen;
- error = create_rcom(ls, r->res_nodeid, DLM_RCOM_LOCK, len, &rc, &mh);
+ error = create_rcom(ls, r->res_nodeid, DLM_RCOM_LOCK, len, &rc, &mh,
+ seq);
if (error)
goto out;
rl = (struct rcom_lock *) rc->rc_buf;
pack_rcom_lock(r, lkb, rl);
- rc->rc_id = (unsigned long) r;
+ rc->rc_id = cpu_to_le64((uintptr_t)r);
send_rcom(mh, rc);
out:
@@ -460,23 +470,28 @@ int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
}
/* needs at least dlm_rcom + rcom_lock */
-static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+static void receive_rcom_lock(struct dlm_ls *ls, const struct dlm_rcom *rc_in,
+ uint64_t seq)
{
+ __le32 rl_remid, rl_result;
+ struct rcom_lock *rl;
struct dlm_rcom *rc;
struct dlm_mhandle *mh;
- int error, nodeid = rc_in->rc_header.h_nodeid;
+ int error, nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid);
- dlm_recover_master_copy(ls, rc_in);
+ dlm_recover_master_copy(ls, rc_in, &rl_remid, &rl_result);
error = create_rcom(ls, nodeid, DLM_RCOM_LOCK_REPLY,
- sizeof(struct rcom_lock), &rc, &mh);
+ sizeof(struct rcom_lock), &rc, &mh, seq);
if (error)
return;
- /* We send back the same rcom_lock struct we received, but
- dlm_recover_master_copy() has filled in rl_remid and rl_result */
-
memcpy(rc->rc_buf, rc_in->rc_buf, sizeof(struct rcom_lock));
+ rl = (struct rcom_lock *)rc->rc_buf;
+ /* set rl_remid and rl_result from dlm_recover_master_copy() */
+ rl->rl_remid = rl_remid;
+ rl->rl_result = rl_result;
+
rc->rc_id = rc_in->rc_id;
rc->rc_seq_reply = rc_in->rc_seq;
@@ -486,7 +501,7 @@ static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in)
/* If the lockspace doesn't exist then still send a status message
back; it's possible that it just doesn't have its global_id yet. */
-int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
+int dlm_send_ls_not_ready(int nodeid, const struct dlm_rcom *rc_in)
{
struct dlm_rcom *rc;
struct rcom_config *rf;
@@ -494,28 +509,27 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
char *mb;
int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config);
- mh = dlm_midcomms_get_mhandle(nodeid, mb_len, GFP_NOFS, &mb);
+ mh = dlm_midcomms_get_mhandle(nodeid, mb_len, &mb);
if (!mh)
return -ENOBUFS;
rc = (struct dlm_rcom *) mb;
- rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+ rc->rc_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
rc->rc_header.u.h_lockspace = rc_in->rc_header.u.h_lockspace;
- rc->rc_header.h_nodeid = dlm_our_nodeid();
- rc->rc_header.h_length = mb_len;
+ rc->rc_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
+ rc->rc_header.h_length = cpu_to_le16(mb_len);
rc->rc_header.h_cmd = DLM_RCOM;
- rc->rc_type = DLM_RCOM_STATUS_REPLY;
+ rc->rc_type = cpu_to_le32(DLM_RCOM_STATUS_REPLY);
rc->rc_id = rc_in->rc_id;
rc->rc_seq_reply = rc_in->rc_seq;
- rc->rc_result = -ESRCH;
+ rc->rc_result = cpu_to_le32(-ESRCH);
rf = (struct rcom_config *) rc->rc_buf;
rf->rf_lvblen = cpu_to_le32(~0U);
- dlm_rcom_out(rc);
- dlm_midcomms_commit_mhandle(mh);
+ dlm_midcomms_commit_mhandle(mh, NULL, 0);
return 0;
}
@@ -565,7 +579,7 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
/* Called by dlm_recv; corresponds to dlm_receive_message() but special
recovery-only comms are sent through here. */
-void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
+void dlm_receive_rcom(struct dlm_ls *ls, const struct dlm_rcom *rc, int nodeid)
{
int lock_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_lock);
int stop, reply = 0, names = 0, lookup = 0, lock = 0;
@@ -573,42 +587,42 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
uint64_t seq;
switch (rc->rc_type) {
- case DLM_RCOM_STATUS_REPLY:
+ case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
reply = 1;
break;
- case DLM_RCOM_NAMES:
+ case cpu_to_le32(DLM_RCOM_NAMES):
names = 1;
break;
- case DLM_RCOM_NAMES_REPLY:
+ case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
names = 1;
reply = 1;
break;
- case DLM_RCOM_LOOKUP:
+ case cpu_to_le32(DLM_RCOM_LOOKUP):
lookup = 1;
break;
- case DLM_RCOM_LOOKUP_REPLY:
+ case cpu_to_le32(DLM_RCOM_LOOKUP_REPLY):
lookup = 1;
reply = 1;
break;
- case DLM_RCOM_LOCK:
+ case cpu_to_le32(DLM_RCOM_LOCK):
lock = 1;
break;
- case DLM_RCOM_LOCK_REPLY:
+ case cpu_to_le32(DLM_RCOM_LOCK_REPLY):
lock = 1;
reply = 1;
break;
}
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
status = ls->ls_recover_status;
- stop = test_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
+ stop = dlm_recovery_stopped(ls);
seq = ls->ls_recover_seq;
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
- if (stop && (rc->rc_type != DLM_RCOM_STATUS))
+ if (stop && (rc->rc_type != cpu_to_le32(DLM_RCOM_STATUS)))
goto ignore;
- if (reply && (rc->rc_seq_reply != seq))
+ if (reply && (le64_to_cpu(rc->rc_seq_reply) != seq))
goto ignore;
if (!(status & DLM_RS_NODES) && (names || lookup || lock))
@@ -618,59 +632,60 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
goto ignore;
switch (rc->rc_type) {
- case DLM_RCOM_STATUS:
- receive_rcom_status(ls, rc);
+ case cpu_to_le32(DLM_RCOM_STATUS):
+ receive_rcom_status(ls, rc, seq);
break;
- case DLM_RCOM_NAMES:
- receive_rcom_names(ls, rc);
+ case cpu_to_le32(DLM_RCOM_NAMES):
+ receive_rcom_names(ls, rc, seq);
break;
- case DLM_RCOM_LOOKUP:
- receive_rcom_lookup(ls, rc);
+ case cpu_to_le32(DLM_RCOM_LOOKUP):
+ receive_rcom_lookup(ls, rc, seq);
break;
- case DLM_RCOM_LOCK:
- if (rc->rc_header.h_length < lock_size)
+ case cpu_to_le32(DLM_RCOM_LOCK):
+ if (le16_to_cpu(rc->rc_header.h_length) < lock_size)
goto Eshort;
- receive_rcom_lock(ls, rc);
+ receive_rcom_lock(ls, rc, seq);
break;
- case DLM_RCOM_STATUS_REPLY:
+ case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
receive_sync_reply(ls, rc);
break;
- case DLM_RCOM_NAMES_REPLY:
+ case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
receive_sync_reply(ls, rc);
break;
- case DLM_RCOM_LOOKUP_REPLY:
+ case cpu_to_le32(DLM_RCOM_LOOKUP_REPLY):
receive_rcom_lookup_reply(ls, rc);
break;
- case DLM_RCOM_LOCK_REPLY:
- if (rc->rc_header.h_length < lock_size)
+ case cpu_to_le32(DLM_RCOM_LOCK_REPLY):
+ if (le16_to_cpu(rc->rc_header.h_length) < lock_size)
goto Eshort;
- dlm_recover_process_copy(ls, rc);
+ dlm_recover_process_copy(ls, rc, seq);
break;
default:
- log_error(ls, "receive_rcom bad type %d", rc->rc_type);
+ log_error(ls, "receive_rcom bad type %d",
+ le32_to_cpu(rc->rc_type));
}
return;
ignore:
log_limit(ls, "dlm_receive_rcom ignore msg %d "
"from %d %llu %llu recover seq %llu sts %x gen %u",
- rc->rc_type,
+ le32_to_cpu(rc->rc_type),
nodeid,
- (unsigned long long)rc->rc_seq,
- (unsigned long long)rc->rc_seq_reply,
+ (unsigned long long)le64_to_cpu(rc->rc_seq),
+ (unsigned long long)le64_to_cpu(rc->rc_seq_reply),
(unsigned long long)seq,
status, ls->ls_generation);
return;
Eshort:
log_error(ls, "recovery message %d from %d is too short",
- rc->rc_type, nodeid);
+ le32_to_cpu(rc->rc_type), nodeid);
}
diff --git a/fs/dlm/rcom.h b/fs/dlm/rcom.h
index 454d3c4814ab..765926ae0020 100644
--- a/fs/dlm/rcom.h
+++ b/fs/dlm/rcom.h
@@ -12,12 +12,15 @@
#ifndef __RCOM_DOT_H__
#define __RCOM_DOT_H__
-int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags);
-int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,int last_len);
-int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid);
-int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
-void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid);
-int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in);
+int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags,
+ uint64_t seq);
+int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,
+ int last_len, uint64_t seq);
+int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid, uint64_t seq);
+int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb, uint64_t seq);
+void dlm_receive_rcom(struct dlm_ls *ls, const struct dlm_rcom *rc,
+ int nodeid);
+int dlm_send_ls_not_ready(int nodeid, const struct dlm_rcom *rc_in);
#endif
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index 8928e99dfd47..3ac020fb8139 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -74,9 +74,9 @@ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
uint32_t dlm_recover_status(struct dlm_ls *ls)
{
uint32_t status;
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
status = ls->ls_recover_status;
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
return status;
}
@@ -87,13 +87,13 @@ static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
{
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
_set_recover_status(ls, status);
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
}
static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
- int save_slots)
+ int save_slots, uint64_t seq)
{
struct dlm_rcom *rc = ls->ls_recover_buf;
struct dlm_member *memb;
@@ -107,14 +107,14 @@ static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
goto out;
}
- error = dlm_rcom_status(ls, memb->nodeid, 0);
+ error = dlm_rcom_status(ls, memb->nodeid, 0, seq);
if (error)
goto out;
if (save_slots)
dlm_slot_save(ls, rc, memb);
- if (rc->rc_result & wait_status)
+ if (le32_to_cpu(rc->rc_result) & wait_status)
break;
if (delay < 1000)
delay += 20;
@@ -126,7 +126,7 @@ static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
}
static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
- uint32_t status_flags)
+ uint32_t status_flags, uint64_t seq)
{
struct dlm_rcom *rc = ls->ls_recover_buf;
int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
@@ -137,11 +137,11 @@ static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
goto out;
}
- error = dlm_rcom_status(ls, nodeid, status_flags);
+ error = dlm_rcom_status(ls, nodeid, status_flags, seq);
if (error)
break;
- if (rc->rc_result & wait_status)
+ if (le32_to_cpu(rc->rc_result) & wait_status)
break;
if (delay < 1000)
delay += 20;
@@ -151,22 +151,22 @@ static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
return error;
}
-static int wait_status(struct dlm_ls *ls, uint32_t status)
+static int wait_status(struct dlm_ls *ls, uint32_t status, uint64_t seq)
{
uint32_t status_all = status << 1;
int error;
if (ls->ls_low_nodeid == dlm_our_nodeid()) {
- error = wait_status_all(ls, status, 0);
+ error = wait_status_all(ls, status, 0, seq);
if (!error)
dlm_set_recover_status(ls, status_all);
} else
- error = wait_status_low(ls, status_all, 0);
+ error = wait_status_low(ls, status_all, 0, seq);
return error;
}
-int dlm_recover_members_wait(struct dlm_ls *ls)
+int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq)
{
struct dlm_member *memb;
struct dlm_slot *slots;
@@ -180,7 +180,7 @@ int dlm_recover_members_wait(struct dlm_ls *ls)
}
if (ls->ls_low_nodeid == dlm_our_nodeid()) {
- error = wait_status_all(ls, DLM_RS_NODES, 1);
+ error = wait_status_all(ls, DLM_RS_NODES, 1, seq);
if (error)
goto out;
@@ -188,18 +188,19 @@ int dlm_recover_members_wait(struct dlm_ls *ls)
rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen);
if (!rv) {
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
_set_recover_status(ls, DLM_RS_NODES_ALL);
ls->ls_num_slots = num_slots;
ls->ls_slots_size = slots_size;
ls->ls_slots = slots;
ls->ls_generation = gen;
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
} else {
dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
}
} else {
- error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS);
+ error = wait_status_low(ls, DLM_RS_NODES_ALL,
+ DLM_RSF_NEED_SLOTS, seq);
if (error)
goto out;
@@ -209,19 +210,19 @@ int dlm_recover_members_wait(struct dlm_ls *ls)
return error;
}
-int dlm_recover_directory_wait(struct dlm_ls *ls)
+int dlm_recover_directory_wait(struct dlm_ls *ls, uint64_t seq)
{
- return wait_status(ls, DLM_RS_DIR);
+ return wait_status(ls, DLM_RS_DIR, seq);
}
-int dlm_recover_locks_wait(struct dlm_ls *ls)
+int dlm_recover_locks_wait(struct dlm_ls *ls, uint64_t seq)
{
- return wait_status(ls, DLM_RS_LOCKS);
+ return wait_status(ls, DLM_RS_LOCKS, seq);
}
-int dlm_recover_done_wait(struct dlm_ls *ls)
+int dlm_recover_done_wait(struct dlm_ls *ls, uint64_t seq)
{
- return wait_status(ls, DLM_RS_DONE);
+ return wait_status(ls, DLM_RS_DONE, seq);
}
/*
@@ -240,9 +241,9 @@ static int recover_list_empty(struct dlm_ls *ls)
{
int empty;
- spin_lock(&ls->ls_recover_list_lock);
+ spin_lock_bh(&ls->ls_recover_list_lock);
empty = list_empty(&ls->ls_recover_list);
- spin_unlock(&ls->ls_recover_list_lock);
+ spin_unlock_bh(&ls->ls_recover_list_lock);
return empty;
}
@@ -251,23 +252,23 @@ static void recover_list_add(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
- spin_lock(&ls->ls_recover_list_lock);
+ spin_lock_bh(&ls->ls_recover_list_lock);
if (list_empty(&r->res_recover_list)) {
list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
ls->ls_recover_list_count++;
dlm_hold_rsb(r);
}
- spin_unlock(&ls->ls_recover_list_lock);
+ spin_unlock_bh(&ls->ls_recover_list_lock);
}
static void recover_list_del(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
- spin_lock(&ls->ls_recover_list_lock);
+ spin_lock_bh(&ls->ls_recover_list_lock);
list_del_init(&r->res_recover_list);
ls->ls_recover_list_count--;
- spin_unlock(&ls->ls_recover_list_lock);
+ spin_unlock_bh(&ls->ls_recover_list_lock);
dlm_put_rsb(r);
}
@@ -276,7 +277,7 @@ static void recover_list_clear(struct dlm_ls *ls)
{
struct dlm_rsb *r, *s;
- spin_lock(&ls->ls_recover_list_lock);
+ spin_lock_bh(&ls->ls_recover_list_lock);
list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
list_del_init(&r->res_recover_list);
r->res_recover_locks_count = 0;
@@ -289,78 +290,81 @@ static void recover_list_clear(struct dlm_ls *ls)
ls->ls_recover_list_count);
ls->ls_recover_list_count = 0;
}
- spin_unlock(&ls->ls_recover_list_lock);
+ spin_unlock_bh(&ls->ls_recover_list_lock);
}
-static int recover_idr_empty(struct dlm_ls *ls)
+static int recover_xa_empty(struct dlm_ls *ls)
{
int empty = 1;
- spin_lock(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_xa_lock);
if (ls->ls_recover_list_count)
empty = 0;
- spin_unlock(&ls->ls_recover_idr_lock);
+ spin_unlock_bh(&ls->ls_recover_xa_lock);
return empty;
}
-static int recover_idr_add(struct dlm_rsb *r)
+static int recover_xa_add(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
+ struct xa_limit limit = {
+ .min = 1,
+ .max = UINT_MAX,
+ };
+ uint32_t id;
int rv;
- idr_preload(GFP_NOFS);
- spin_lock(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_xa_lock);
if (r->res_id) {
rv = -1;
goto out_unlock;
}
- rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT);
+ rv = xa_alloc(&ls->ls_recover_xa, &id, r, limit, GFP_ATOMIC);
if (rv < 0)
goto out_unlock;
- r->res_id = rv;
+ r->res_id = id;
ls->ls_recover_list_count++;
dlm_hold_rsb(r);
rv = 0;
out_unlock:
- spin_unlock(&ls->ls_recover_idr_lock);
- idr_preload_end();
+ spin_unlock_bh(&ls->ls_recover_xa_lock);
return rv;
}
-static void recover_idr_del(struct dlm_rsb *r)
+static void recover_xa_del(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
- spin_lock(&ls->ls_recover_idr_lock);
- idr_remove(&ls->ls_recover_idr, r->res_id);
+ spin_lock_bh(&ls->ls_recover_xa_lock);
+ xa_erase_bh(&ls->ls_recover_xa, r->res_id);
r->res_id = 0;
ls->ls_recover_list_count--;
- spin_unlock(&ls->ls_recover_idr_lock);
+ spin_unlock_bh(&ls->ls_recover_xa_lock);
dlm_put_rsb(r);
}
-static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
+static struct dlm_rsb *recover_xa_find(struct dlm_ls *ls, uint64_t id)
{
struct dlm_rsb *r;
- spin_lock(&ls->ls_recover_idr_lock);
- r = idr_find(&ls->ls_recover_idr, (int)id);
- spin_unlock(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_xa_lock);
+ r = xa_load(&ls->ls_recover_xa, (int)id);
+ spin_unlock_bh(&ls->ls_recover_xa_lock);
return r;
}
-static void recover_idr_clear(struct dlm_ls *ls)
+static void recover_xa_clear(struct dlm_ls *ls)
{
struct dlm_rsb *r;
- int id;
+ unsigned long id;
- spin_lock(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_xa_lock);
- idr_for_each_entry(&ls->ls_recover_idr, r, id) {
- idr_remove(&ls->ls_recover_idr, id);
+ xa_for_each(&ls->ls_recover_xa, id, r) {
+ xa_erase_bh(&ls->ls_recover_xa, id);
r->res_id = 0;
r->res_recover_locks_count = 0;
ls->ls_recover_list_count--;
@@ -373,7 +377,7 @@ static void recover_idr_clear(struct dlm_ls *ls)
ls->ls_recover_list_count);
ls->ls_recover_list_count = 0;
}
- spin_unlock(&ls->ls_recover_idr_lock);
+ spin_unlock_bh(&ls->ls_recover_xa_lock);
}
@@ -403,7 +407,7 @@ static void set_lock_master(struct list_head *queue, int nodeid)
struct dlm_lkb *lkb;
list_for_each_entry(lkb, queue, lkb_statequeue) {
- if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) {
+ if (!test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) {
lkb->lkb_nodeid = nodeid;
lkb->lkb_remid = 0;
}
@@ -441,17 +445,18 @@ static void set_new_master(struct dlm_rsb *r)
* equals our_nodeid below).
*/
-static int recover_master(struct dlm_rsb *r, unsigned int *count)
+static int recover_master(struct dlm_rsb *r, unsigned int *count, uint64_t seq)
{
struct dlm_ls *ls = r->res_ls;
int our_nodeid, dir_nodeid;
int is_removed = 0;
int error;
- if (is_master(r))
+ if (r->res_nodeid != -1 && is_master(r))
return 0;
- is_removed = dlm_is_removed(ls, r->res_nodeid);
+ if (r->res_nodeid != -1)
+ is_removed = dlm_is_removed(ls, r->res_nodeid);
if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER))
return 0;
@@ -471,8 +476,8 @@ static int recover_master(struct dlm_rsb *r, unsigned int *count)
set_new_master(r);
error = 0;
} else {
- recover_idr_add(r);
- error = dlm_send_rcom_lookup(r, dir_nodeid);
+ recover_xa_add(r);
+ error = dlm_send_rcom_lookup(r, dir_nodeid, seq);
}
(*count)++;
@@ -520,7 +525,8 @@ static int recover_master_static(struct dlm_rsb *r, unsigned int *count)
* the correct dir node.
*/
-int dlm_recover_masters(struct dlm_ls *ls)
+int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq,
+ const struct list_head *root_list)
{
struct dlm_rsb *r;
unsigned int total = 0;
@@ -530,10 +536,8 @@ int dlm_recover_masters(struct dlm_ls *ls)
log_rinfo(ls, "dlm_recover_masters");
- down_read(&ls->ls_root_sem);
- list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
+ list_for_each_entry(r, root_list, res_root_list) {
if (dlm_recovery_stopped(ls)) {
- up_read(&ls->ls_root_sem);
error = -EINTR;
goto out;
}
@@ -542,40 +546,37 @@ int dlm_recover_masters(struct dlm_ls *ls)
if (nodir)
error = recover_master_static(r, &count);
else
- error = recover_master(r, &count);
+ error = recover_master(r, &count, seq);
unlock_rsb(r);
cond_resched();
total++;
- if (error) {
- up_read(&ls->ls_root_sem);
+ if (error)
goto out;
- }
}
- up_read(&ls->ls_root_sem);
log_rinfo(ls, "dlm_recover_masters %u of %u", count, total);
- error = dlm_wait_function(ls, &recover_idr_empty);
+ error = dlm_wait_function(ls, &recover_xa_empty);
out:
if (error)
- recover_idr_clear(ls);
+ recover_xa_clear(ls);
return error;
}
-int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
+int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc)
{
struct dlm_rsb *r;
int ret_nodeid, new_master;
- r = recover_idr_find(ls, rc->rc_id);
+ r = recover_xa_find(ls, le64_to_cpu(rc->rc_id));
if (!r) {
log_error(ls, "dlm_recover_master_reply no id %llx",
- (unsigned long long)rc->rc_id);
+ (unsigned long long)le64_to_cpu(rc->rc_id));
goto out;
}
- ret_nodeid = rc->rc_result;
+ ret_nodeid = le32_to_cpu(rc->rc_result);
if (ret_nodeid == dlm_our_nodeid())
new_master = 0;
@@ -587,9 +588,9 @@ int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
r->res_nodeid = new_master;
set_new_master(r);
unlock_rsb(r);
- recover_idr_del(r);
+ recover_xa_del(r);
- if (recover_idr_empty(ls))
+ if (recover_xa_empty(ls))
wake_up(&ls->ls_wait_general);
out:
return 0;
@@ -614,13 +615,14 @@ int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
* an equal number of replies then recovery for the rsb is done
*/
-static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head)
+static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head,
+ uint64_t seq)
{
struct dlm_lkb *lkb;
int error = 0;
list_for_each_entry(lkb, head, lkb_statequeue) {
- error = dlm_send_rcom_lock(r, lkb);
+ error = dlm_send_rcom_lock(r, lkb, seq);
if (error)
break;
r->res_recover_locks_count++;
@@ -629,7 +631,7 @@ static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head)
return error;
}
-static int recover_locks(struct dlm_rsb *r)
+static int recover_locks(struct dlm_rsb *r, uint64_t seq)
{
int error = 0;
@@ -637,13 +639,13 @@ static int recover_locks(struct dlm_rsb *r)
DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r););
- error = recover_locks_queue(r, &r->res_grantqueue);
+ error = recover_locks_queue(r, &r->res_grantqueue, seq);
if (error)
goto out;
- error = recover_locks_queue(r, &r->res_convertqueue);
+ error = recover_locks_queue(r, &r->res_convertqueue, seq);
if (error)
goto out;
- error = recover_locks_queue(r, &r->res_waitqueue);
+ error = recover_locks_queue(r, &r->res_waitqueue, seq);
if (error)
goto out;
@@ -656,14 +658,14 @@ static int recover_locks(struct dlm_rsb *r)
return error;
}
-int dlm_recover_locks(struct dlm_ls *ls)
+int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq,
+ const struct list_head *root_list)
{
struct dlm_rsb *r;
int error, count = 0;
- down_read(&ls->ls_root_sem);
- list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
- if (is_master(r)) {
+ list_for_each_entry(r, root_list, res_root_list) {
+ if (r->res_nodeid != -1 && is_master(r)) {
rsb_clear_flag(r, RSB_NEW_MASTER);
continue;
}
@@ -673,19 +675,15 @@ int dlm_recover_locks(struct dlm_ls *ls)
if (dlm_recovery_stopped(ls)) {
error = -EINTR;
- up_read(&ls->ls_root_sem);
goto out;
}
- error = recover_locks(r);
- if (error) {
- up_read(&ls->ls_root_sem);
+ error = recover_locks(r, seq);
+ if (error)
goto out;
- }
count += r->res_recover_locks_count;
}
- up_read(&ls->ls_root_sem);
log_rinfo(ls, "dlm_recover_locks %d out", count);
@@ -732,10 +730,9 @@ void dlm_recovered_lock(struct dlm_rsb *r)
static void recover_lvb(struct dlm_rsb *r)
{
- struct dlm_lkb *lkb, *high_lkb = NULL;
+ struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
uint32_t high_seq = 0;
int lock_lvb_exists = 0;
- int big_lock_exists = 0;
int lvblen = r->res_ls->ls_lvblen;
if (!rsb_flag(r, RSB_NEW_MASTER2) &&
@@ -751,37 +748,37 @@ static void recover_lvb(struct dlm_rsb *r)
/* we are the new master, so figure out if VALNOTVALID should
be set, and set the rsb lvb from the best lkb available. */
- list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
- if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
+ list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
+ if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
continue;
lock_lvb_exists = 1;
- if (lkb->lkb_grmode > DLM_LOCK_CR) {
- big_lock_exists = 1;
+ if (iter->lkb_grmode > DLM_LOCK_CR) {
+ big_lkb = iter;
goto setflag;
}
- if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
- high_lkb = lkb;
- high_seq = lkb->lkb_lvbseq;
+ if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
+ high_lkb = iter;
+ high_seq = iter->lkb_lvbseq;
}
}
- list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
- if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
+ list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
+ if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
continue;
lock_lvb_exists = 1;
- if (lkb->lkb_grmode > DLM_LOCK_CR) {
- big_lock_exists = 1;
+ if (iter->lkb_grmode > DLM_LOCK_CR) {
+ big_lkb = iter;
goto setflag;
}
- if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
- high_lkb = lkb;
- high_seq = lkb->lkb_lvbseq;
+ if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
+ high_lkb = iter;
+ high_seq = iter->lkb_lvbseq;
}
}
@@ -790,7 +787,7 @@ static void recover_lvb(struct dlm_rsb *r)
goto out;
/* lvb is invalidated if only NL/CR locks remain */
- if (!big_lock_exists)
+ if (!big_lkb)
rsb_set_flag(r, RSB_VALNOTVALID);
if (!r->res_lvbptr) {
@@ -799,9 +796,9 @@ static void recover_lvb(struct dlm_rsb *r)
goto out;
}
- if (big_lock_exists) {
- r->res_lvbseq = lkb->lkb_lvbseq;
- memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
+ if (big_lkb) {
+ r->res_lvbseq = big_lkb->lkb_lvbseq;
+ memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
} else if (high_lkb) {
r->res_lvbseq = high_lkb->lkb_lvbseq;
memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
@@ -814,33 +811,42 @@ static void recover_lvb(struct dlm_rsb *r)
}
/* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks
- converting PR->CW or CW->PR need to have their lkb_grmode set. */
+ * converting PR->CW or CW->PR may need to have their lkb_grmode changed.
+ */
static void recover_conversion(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
+ uint32_t other_lkid = 0;
+ int other_grmode = -1;
struct dlm_lkb *lkb;
- int grmode = -1;
list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
if (lkb->lkb_grmode == DLM_LOCK_PR ||
lkb->lkb_grmode == DLM_LOCK_CW) {
- grmode = lkb->lkb_grmode;
+ other_grmode = lkb->lkb_grmode;
+ other_lkid = lkb->lkb_id;
break;
}
}
+ if (other_grmode == -1)
+ return;
+
list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
- if (lkb->lkb_grmode != DLM_LOCK_IV)
- continue;
- if (grmode == -1) {
- log_debug(ls, "recover_conversion %x set gr to rq %d",
- lkb->lkb_id, lkb->lkb_rqmode);
- lkb->lkb_grmode = lkb->lkb_rqmode;
- } else {
- log_debug(ls, "recover_conversion %x set gr %d",
- lkb->lkb_id, grmode);
- lkb->lkb_grmode = grmode;
+ /* Lock recovery created incompatible granted modes, so
+ * change the granted mode of the converting lock to
+ * NL. The rqmode of the converting lock should be CW,
+ * which means the converting lock should be granted at
+ * the end of recovery.
+ */
+ if (((lkb->lkb_grmode == DLM_LOCK_PR) && (other_grmode == DLM_LOCK_CW)) ||
+ ((lkb->lkb_grmode == DLM_LOCK_CW) && (other_grmode == DLM_LOCK_PR))) {
+ log_rinfo(ls, "%s %x gr %d rq %d, remote %d %x, other_lkid %u, other gr %d, set gr=NL",
+ __func__, lkb->lkb_id, lkb->lkb_grmode,
+ lkb->lkb_rqmode, lkb->lkb_nodeid,
+ lkb->lkb_remid, other_lkid, other_grmode);
+ lkb->lkb_grmode = DLM_LOCK_NL;
}
}
}
@@ -855,15 +861,14 @@ static void recover_grant(struct dlm_rsb *r)
rsb_set_flag(r, RSB_RECOVER_GRANT);
}
-void dlm_recover_rsbs(struct dlm_ls *ls)
+void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list)
{
struct dlm_rsb *r;
unsigned int count = 0;
- down_read(&ls->ls_root_sem);
- list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
+ list_for_each_entry(r, root_list, res_root_list) {
lock_rsb(r);
- if (is_master(r)) {
+ if (r->res_nodeid != -1 && is_master(r)) {
if (rsb_flag(r, RSB_RECOVER_CONVERT))
recover_conversion(r);
@@ -882,76 +887,31 @@ void dlm_recover_rsbs(struct dlm_ls *ls)
rsb_clear_flag(r, RSB_NEW_MASTER2);
unlock_rsb(r);
}
- up_read(&ls->ls_root_sem);
if (count)
log_rinfo(ls, "dlm_recover_rsbs %d done", count);
}
-/* Create a single list of all root rsb's to be used during recovery */
-
-int dlm_create_root_list(struct dlm_ls *ls)
-{
- struct rb_node *n;
- struct dlm_rsb *r;
- int i, error = 0;
-
- down_write(&ls->ls_root_sem);
- if (!list_empty(&ls->ls_root_list)) {
- log_error(ls, "root list not empty");
- error = -EINVAL;
- goto out;
- }
-
- for (i = 0; i < ls->ls_rsbtbl_size; i++) {
- spin_lock(&ls->ls_rsbtbl[i].lock);
- for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
- r = rb_entry(n, struct dlm_rsb, res_hashnode);
- list_add(&r->res_root_list, &ls->ls_root_list);
- dlm_hold_rsb(r);
- }
-
- if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss))
- log_error(ls, "dlm_create_root_list toss not empty");
- spin_unlock(&ls->ls_rsbtbl[i].lock);
- }
- out:
- up_write(&ls->ls_root_sem);
- return error;
-}
-
-void dlm_release_root_list(struct dlm_ls *ls)
+void dlm_clear_inactive(struct dlm_ls *ls)
{
struct dlm_rsb *r, *safe;
+ unsigned int count = 0;
- down_write(&ls->ls_root_sem);
- list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
- list_del_init(&r->res_root_list);
- dlm_put_rsb(r);
- }
- up_write(&ls->ls_root_sem);
-}
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+ list_for_each_entry_safe(r, safe, &ls->ls_slow_inactive, res_slow_list) {
+ list_del(&r->res_slow_list);
+ rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
+ dlm_rhash_rsb_params);
-void dlm_clear_toss(struct dlm_ls *ls)
-{
- struct rb_node *n, *next;
- struct dlm_rsb *r;
- unsigned int count = 0;
- int i;
-
- for (i = 0; i < ls->ls_rsbtbl_size; i++) {
- spin_lock(&ls->ls_rsbtbl[i].lock);
- for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
- next = rb_next(n);
- r = rb_entry(n, struct dlm_rsb, res_hashnode);
- rb_erase(n, &ls->ls_rsbtbl[i].toss);
- dlm_free_rsb(r);
- count++;
- }
- spin_unlock(&ls->ls_rsbtbl[i].lock);
+ if (!list_empty(&r->res_scan_list))
+ list_del_init(&r->res_scan_list);
+
+ free_inactive_rsb(r);
+ count++;
}
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
if (count)
- log_rinfo(ls, "dlm_clear_toss %u done", count);
+ log_rinfo(ls, "dlm_clear_inactive %u done", count);
}
diff --git a/fs/dlm/recover.h b/fs/dlm/recover.h
index 235e0d25cd48..ec69896462fb 100644
--- a/fs/dlm/recover.h
+++ b/fs/dlm/recover.h
@@ -15,18 +15,18 @@
int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls));
uint32_t dlm_recover_status(struct dlm_ls *ls);
void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status);
-int dlm_recover_members_wait(struct dlm_ls *ls);
-int dlm_recover_directory_wait(struct dlm_ls *ls);
-int dlm_recover_locks_wait(struct dlm_ls *ls);
-int dlm_recover_done_wait(struct dlm_ls *ls);
-int dlm_recover_masters(struct dlm_ls *ls);
-int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc);
-int dlm_recover_locks(struct dlm_ls *ls);
+int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq);
+int dlm_recover_directory_wait(struct dlm_ls *ls, uint64_t seq);
+int dlm_recover_locks_wait(struct dlm_ls *ls, uint64_t seq);
+int dlm_recover_done_wait(struct dlm_ls *ls, uint64_t seq);
+int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq,
+ const struct list_head *root_list);
+int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc);
+int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq,
+ const struct list_head *root_list);
void dlm_recovered_lock(struct dlm_rsb *r);
-int dlm_create_root_list(struct dlm_ls *ls);
-void dlm_release_root_list(struct dlm_ls *ls);
-void dlm_clear_toss(struct dlm_ls *ls);
-void dlm_recover_rsbs(struct dlm_ls *ls);
+void dlm_clear_inactive(struct dlm_ls *ls);
+void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list);
#endif /* __RECOVER_DOT_H__ */
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 97d052cea5a9..12272a8f6d75 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -20,6 +20,67 @@
#include "requestqueue.h"
#include "recoverd.h"
+static int dlm_create_masters_list(struct dlm_ls *ls)
+{
+ struct dlm_rsb *r;
+ int error = 0;
+
+ write_lock_bh(&ls->ls_masters_lock);
+ if (!list_empty(&ls->ls_masters_list)) {
+ log_error(ls, "root list not empty");
+ error = -EINVAL;
+ goto out;
+ }
+
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) {
+ if (r->res_nodeid)
+ continue;
+
+ list_add(&r->res_masters_list, &ls->ls_masters_list);
+ dlm_hold_rsb(r);
+ }
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ out:
+ write_unlock_bh(&ls->ls_masters_lock);
+ return error;
+}
+
+static void dlm_release_masters_list(struct dlm_ls *ls)
+{
+ struct dlm_rsb *r, *safe;
+
+ write_lock_bh(&ls->ls_masters_lock);
+ list_for_each_entry_safe(r, safe, &ls->ls_masters_list, res_masters_list) {
+ list_del_init(&r->res_masters_list);
+ dlm_put_rsb(r);
+ }
+ write_unlock_bh(&ls->ls_masters_lock);
+}
+
+static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list)
+{
+ struct dlm_rsb *r;
+
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) {
+ list_add(&r->res_root_list, root_list);
+ dlm_hold_rsb(r);
+ }
+
+ WARN_ON_ONCE(!list_empty(&ls->ls_slow_inactive));
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+}
+
+static void dlm_release_root_list(struct list_head *root_list)
+{
+ struct dlm_rsb *r, *safe;
+
+ list_for_each_entry_safe(r, safe, root_list, res_root_list) {
+ list_del_init(&r->res_root_list);
+ dlm_put_rsb(r);
+ }
+}
/* If the start for which we're re-enabling locking (seq) has been superseded
by a newer stop (ls_recover_seq), we need to leave locking disabled.
@@ -32,24 +93,35 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq)
{
int error = -EINTR;
- down_write(&ls->ls_recv_active);
+ write_lock_bh(&ls->ls_recv_active);
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
if (ls->ls_recover_seq == seq) {
set_bit(LSFL_RUNNING, &ls->ls_flags);
+ /* Schedule next timer if recovery put something on inactive.
+ *
+ * The rsbs that was queued while recovery on toss hasn't
+ * started yet because LSFL_RUNNING was set everything
+ * else recovery hasn't started as well because ls_in_recovery
+ * is still hold. So we should not run into the case that
+ * resume_scan_timer() queues a timer that can occur in
+ * a no op.
+ */
+ resume_scan_timer(ls);
/* unblocks processes waiting to enter the dlm */
up_write(&ls->ls_in_recovery);
clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
error = 0;
}
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
- up_write(&ls->ls_recv_active);
+ write_unlock_bh(&ls->ls_recv_active);
return error;
}
static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
{
+ LIST_HEAD(root_list);
unsigned long start;
int error, neg = 0;
@@ -59,37 +131,57 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
dlm_callback_suspend(ls);
- dlm_clear_toss(ls);
+ dlm_clear_inactive(ls);
/*
* This list of root rsb's will be the basis of most of the recovery
* routines.
*/
- dlm_create_root_list(ls);
+ dlm_create_root_list(ls, &root_list);
/*
* Add or remove nodes from the lockspace's ls_nodes list.
+ *
+ * Due to the fact that we must report all membership changes to lsops
+ * or midcomms layer, it is not permitted to abort ls_recover() until
+ * this is done.
*/
error = dlm_recover_members(ls, rv, &neg);
if (error) {
log_rinfo(ls, "dlm_recover_members error %d", error);
- goto fail;
+ goto fail_root_list;
}
- dlm_recover_dir_nodeid(ls);
+ dlm_recover_dir_nodeid(ls, &root_list);
+
+ /* Create a snapshot of all active rsbs were we are the master of.
+ * During the barrier between dlm_recover_members_wait() and
+ * dlm_recover_directory() other nodes can dump their necessary
+ * directory dlm_rsb (r->res_dir_nodeid == nodeid) in rcom
+ * communication dlm_copy_master_names() handling.
+ *
+ * TODO We should create a per lockspace list that contains rsbs
+ * that we are the master of. Instead of creating this list while
+ * recovery we keep track of those rsbs while locking handling and
+ * recovery can use it when necessary.
+ */
+ error = dlm_create_masters_list(ls);
+ if (error) {
+ log_rinfo(ls, "dlm_create_masters_list error %d", error);
+ goto fail_root_list;
+ }
- ls->ls_recover_dir_sent_res = 0;
- ls->ls_recover_dir_sent_msg = 0;
ls->ls_recover_locks_in = 0;
dlm_set_recover_status(ls, DLM_RS_NODES);
- error = dlm_recover_members_wait(ls);
+ error = dlm_recover_members_wait(ls, rv->seq);
if (error) {
log_rinfo(ls, "dlm_recover_members_wait error %d", error);
- goto fail;
+ dlm_release_masters_list(ls);
+ goto fail_root_list;
}
start = jiffies;
@@ -99,22 +191,23 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
* nodes their master rsb names that hash to us.
*/
- error = dlm_recover_directory(ls);
+ error = dlm_recover_directory(ls, rv->seq);
if (error) {
log_rinfo(ls, "dlm_recover_directory error %d", error);
- goto fail;
+ dlm_release_masters_list(ls);
+ goto fail_root_list;
}
dlm_set_recover_status(ls, DLM_RS_DIR);
- error = dlm_recover_directory_wait(ls);
+ error = dlm_recover_directory_wait(ls, rv->seq);
if (error) {
log_rinfo(ls, "dlm_recover_directory_wait error %d", error);
- goto fail;
+ dlm_release_masters_list(ls);
+ goto fail_root_list;
}
- log_rinfo(ls, "dlm_recover_directory %u out %u messages",
- ls->ls_recover_dir_sent_res, ls->ls_recover_dir_sent_msg);
+ dlm_release_masters_list(ls);
/*
* We may have outstanding operations that are waiting for a reply from
@@ -124,10 +217,9 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
dlm_recover_waiters_pre(ls);
- error = dlm_recovery_stopped(ls);
- if (error) {
+ if (dlm_recovery_stopped(ls)) {
error = -EINTR;
- goto fail;
+ goto fail_root_list;
}
if (neg || dlm_no_directory(ls)) {
@@ -135,35 +227,35 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
* Clear lkb's for departed nodes.
*/
- dlm_recover_purge(ls);
+ dlm_recover_purge(ls, &root_list);
/*
* Get new master nodeid's for rsb's that were mastered on
* departed nodes.
*/
- error = dlm_recover_masters(ls);
+ error = dlm_recover_masters(ls, rv->seq, &root_list);
if (error) {
log_rinfo(ls, "dlm_recover_masters error %d", error);
- goto fail;
+ goto fail_root_list;
}
/*
* Send our locks on remastered rsb's to the new masters.
*/
- error = dlm_recover_locks(ls);
+ error = dlm_recover_locks(ls, rv->seq, &root_list);
if (error) {
log_rinfo(ls, "dlm_recover_locks error %d", error);
- goto fail;
+ goto fail_root_list;
}
dlm_set_recover_status(ls, DLM_RS_LOCKS);
- error = dlm_recover_locks_wait(ls);
+ error = dlm_recover_locks_wait(ls, rv->seq);
if (error) {
log_rinfo(ls, "dlm_recover_locks_wait error %d", error);
- goto fail;
+ goto fail_root_list;
}
log_rinfo(ls, "dlm_recover_locks %u in",
@@ -175,7 +267,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
* settings.
*/
- dlm_recover_rsbs(ls);
+ dlm_recover_rsbs(ls, &root_list);
} else {
/*
* Other lockspace members may be going through the "neg" steps
@@ -184,14 +276,14 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
*/
dlm_set_recover_status(ls, DLM_RS_LOCKS);
- error = dlm_recover_locks_wait(ls);
+ error = dlm_recover_locks_wait(ls, rv->seq);
if (error) {
log_rinfo(ls, "dlm_recover_locks_wait error %d", error);
- goto fail;
+ goto fail_root_list;
}
}
- dlm_release_root_list(ls);
+ dlm_release_root_list(&root_list);
/*
* Purge directory-related requests that are saved in requestqueue.
@@ -203,7 +295,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
dlm_set_recover_status(ls, DLM_RS_DONE);
- error = dlm_recover_done_wait(ls);
+ error = dlm_recover_done_wait(ls, rv->seq);
if (error) {
log_rinfo(ls, "dlm_recover_done_wait error %d", error);
goto fail;
@@ -211,8 +303,6 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
dlm_clear_members_gone(ls);
- dlm_adjust_timeouts(ls);
-
dlm_callback_resume(ls);
error = enable_locking(ls, rv->seq);
@@ -240,14 +330,13 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
jiffies_to_msecs(jiffies - start));
mutex_unlock(&ls->ls_recoverd_active);
- dlm_lsop_recover_done(ls);
return 0;
+ fail_root_list:
+ dlm_release_root_list(&root_list);
fail:
- dlm_release_root_list(ls);
- log_rinfo(ls, "dlm_recover %llu error %d",
- (unsigned long long)rv->seq, error);
mutex_unlock(&ls->ls_recoverd_active);
+
return error;
}
@@ -258,16 +347,41 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
static void do_ls_recovery(struct dlm_ls *ls)
{
struct dlm_recover *rv = NULL;
+ int error;
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
rv = ls->ls_recover_args;
ls->ls_recover_args = NULL;
if (rv && ls->ls_recover_seq == rv->seq)
clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
if (rv) {
- ls_recover(ls, rv);
+ error = ls_recover(ls, rv);
+ switch (error) {
+ case 0:
+ ls->ls_recovery_result = 0;
+ complete(&ls->ls_recovery_done);
+
+ dlm_lsop_recover_done(ls);
+ break;
+ case -EINTR:
+ /* if recovery was interrupted -EINTR we wait for the next
+ * ls_recover() iteration until it hopefully succeeds.
+ */
+ log_rinfo(ls, "%s %llu interrupted and should be queued to run again",
+ __func__, (unsigned long long)rv->seq);
+ break;
+ default:
+ log_rinfo(ls, "%s %llu error %d", __func__,
+ (unsigned long long)rv->seq, error);
+
+ /* let new_lockspace() get aware of critical error */
+ ls->ls_recovery_result = error;
+ complete(&ls->ls_recovery_done);
+ break;
+ }
+
kfree(rv->nodes);
kfree(rv);
}
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
index e89e0ff8bfa3..719a5243a069 100644
--- a/fs/dlm/requestqueue.c
+++ b/fs/dlm/requestqueue.c
@@ -14,6 +14,7 @@
#include "dir.h"
#include "config.h"
#include "requestqueue.h"
+#include "util.h"
struct rq_entry {
struct list_head list;
@@ -29,12 +30,14 @@ struct rq_entry {
* lockspace is enabled on some while still suspended on others.
*/
-void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
+void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid,
+ const struct dlm_message *ms)
{
struct rq_entry *e;
- int length = ms->m_header.h_length - sizeof(struct dlm_message);
+ int length = le16_to_cpu(ms->m_header.h_length) -
+ sizeof(struct dlm_message);
- e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS);
+ e = kmalloc(sizeof(struct rq_entry) + length, GFP_ATOMIC);
if (!e) {
log_print("dlm_add_requestqueue: out of memory len %d", length);
return;
@@ -42,11 +45,10 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF;
e->nodeid = nodeid;
- memcpy(&e->request, ms, ms->m_header.h_length);
+ memcpy(&e->request, ms, sizeof(*ms));
+ memcpy(&e->request.m_extra, ms->m_extra, length);
- mutex_lock(&ls->ls_requestqueue_mutex);
list_add_tail(&e->list, &ls->ls_requestqueue);
- mutex_unlock(&ls->ls_requestqueue_mutex);
}
/*
@@ -66,71 +68,49 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
struct dlm_message *ms;
int error = 0;
- mutex_lock(&ls->ls_requestqueue_mutex);
-
+ write_lock_bh(&ls->ls_requestqueue_lock);
for (;;) {
if (list_empty(&ls->ls_requestqueue)) {
- mutex_unlock(&ls->ls_requestqueue_mutex);
+ clear_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags);
error = 0;
break;
}
- e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
- mutex_unlock(&ls->ls_requestqueue_mutex);
+ e = list_first_entry(&ls->ls_requestqueue, struct rq_entry, list);
ms = &e->request;
log_limit(ls, "dlm_process_requestqueue msg %d from %d "
"lkid %x remid %x result %d seq %u",
- ms->m_type, ms->m_header.h_nodeid,
- ms->m_lkid, ms->m_remid, ms->m_result,
+ le32_to_cpu(ms->m_type),
+ le32_to_cpu(ms->m_header.h_nodeid),
+ le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
+ from_dlm_errno(le32_to_cpu(ms->m_result)),
e->recover_seq);
dlm_receive_message_saved(ls, &e->request, e->recover_seq);
-
- mutex_lock(&ls->ls_requestqueue_mutex);
list_del(&e->list);
kfree(e);
if (dlm_locking_stopped(ls)) {
log_debug(ls, "process_requestqueue abort running");
- mutex_unlock(&ls->ls_requestqueue_mutex);
error = -EINTR;
break;
}
+ write_unlock_bh(&ls->ls_requestqueue_lock);
schedule();
+ write_lock_bh(&ls->ls_requestqueue_lock);
}
+ write_unlock_bh(&ls->ls_requestqueue_lock);
return error;
}
-/*
- * After recovery is done, locking is resumed and dlm_recoverd takes all the
- * saved requests and processes them as they would have been by dlm_recv. At
- * the same time, dlm_recv will start receiving new requests from remote nodes.
- * We want to delay dlm_recv processing new requests until dlm_recoverd has
- * finished processing the old saved requests. We don't check for locking
- * stopped here because dlm_ls_stop won't stop locking until it's suspended us
- * (dlm_recv).
- */
-
-void dlm_wait_requestqueue(struct dlm_ls *ls)
-{
- for (;;) {
- mutex_lock(&ls->ls_requestqueue_mutex);
- if (list_empty(&ls->ls_requestqueue))
- break;
- mutex_unlock(&ls->ls_requestqueue_mutex);
- schedule();
- }
- mutex_unlock(&ls->ls_requestqueue_mutex);
-}
-
static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
{
- uint32_t type = ms->m_type;
+ __le32 type = ms->m_type;
/* the ls is being cleaned up and freed by release_lockspace */
- if (!ls->ls_count)
+ if (!atomic_read(&ls->ls_count))
return 1;
if (dlm_is_removed(ls, nodeid))
@@ -139,9 +119,9 @@ static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
/* directory operations are always purged because the directory is
always rebuilt during recovery and the lookups resent */
- if (type == DLM_MSG_REMOVE ||
- type == DLM_MSG_LOOKUP ||
- type == DLM_MSG_LOOKUP_REPLY)
+ if (type == cpu_to_le32(DLM_MSG_REMOVE) ||
+ type == cpu_to_le32(DLM_MSG_LOOKUP) ||
+ type == cpu_to_le32(DLM_MSG_LOOKUP_REPLY))
return 1;
if (!dlm_no_directory(ls))
@@ -155,7 +135,7 @@ void dlm_purge_requestqueue(struct dlm_ls *ls)
struct dlm_message *ms;
struct rq_entry *e, *safe;
- mutex_lock(&ls->ls_requestqueue_mutex);
+ write_lock_bh(&ls->ls_requestqueue_lock);
list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
ms = &e->request;
@@ -164,6 +144,6 @@ void dlm_purge_requestqueue(struct dlm_ls *ls)
kfree(e);
}
}
- mutex_unlock(&ls->ls_requestqueue_mutex);
+ write_unlock_bh(&ls->ls_requestqueue_lock);
}
diff --git a/fs/dlm/requestqueue.h b/fs/dlm/requestqueue.h
index 4e403469a845..42bfe23ceabe 100644
--- a/fs/dlm/requestqueue.h
+++ b/fs/dlm/requestqueue.h
@@ -11,7 +11,8 @@
#ifndef __REQUESTQUEUE_DOT_H__
#define __REQUESTQUEUE_DOT_H__
-void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms);
+void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid,
+ const struct dlm_message *ms);
int dlm_process_requestqueue(struct dlm_ls *ls);
void dlm_wait_requestqueue(struct dlm_ls *ls);
void dlm_purge_requestqueue(struct dlm_ls *ls);
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index e5cefa90b1ce..51daf4acbe31 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -16,6 +16,8 @@
#include <linux/slab.h>
#include <linux/sched/signal.h>
+#include <trace/events/dlm.h>
+
#include "dlm_internal.h"
#include "lockspace.h"
#include "lock.h"
@@ -23,6 +25,7 @@
#include "user.h"
#include "ast.h"
#include "config.h"
+#include "memory.h"
static const char name_prefix[] = "dlm";
static const struct file_operations device_fops;
@@ -108,11 +111,11 @@ static void compat_input(struct dlm_write_request *kb,
kb->i.lock.parent = kb32->i.lock.parent;
kb->i.lock.xid = kb32->i.lock.xid;
kb->i.lock.timeout = kb32->i.lock.timeout;
- kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
- kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
- kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
- kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
- kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
+ kb->i.lock.castparam = (__user void *)(long)kb32->i.lock.castparam;
+ kb->i.lock.castaddr = (__user void *)(long)kb32->i.lock.castaddr;
+ kb->i.lock.bastparam = (__user void *)(long)kb32->i.lock.bastparam;
+ kb->i.lock.bastaddr = (__user void *)(long)kb32->i.lock.bastaddr;
+ kb->i.lock.lksb = (__user void *)(long)kb32->i.lock.lksb;
memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
}
@@ -127,9 +130,9 @@ static void compat_output(struct dlm_lock_result *res,
res32->version[1] = res->version[1];
res32->version[2] = res->version[2];
- res32->user_astaddr = (__u32)(long)res->user_astaddr;
- res32->user_astparam = (__u32)(long)res->user_astparam;
- res32->user_lksb = (__u32)(long)res->user_lksb;
+ res32->user_astaddr = (__u32)(__force long)res->user_astaddr;
+ res32->user_astparam = (__u32)(__force long)res->user_astparam;
+ res32->user_lksb = (__u32)(__force long)res->user_lksb;
res32->bast_mode = res->bast_mode;
res32->lvb_offset = res->lvb_offset;
@@ -173,18 +176,20 @@ static int lkb_is_endoflife(int mode, int status)
being removed and then remove that lkb from the orphans list and free it */
void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
- int status, uint32_t sbflags, uint64_t seq)
+ int status, uint32_t sbflags)
{
struct dlm_ls *ls;
struct dlm_user_args *ua;
struct dlm_user_proc *proc;
- int rv;
+ struct dlm_callback *cb;
+ int rv, copy_lvb;
- if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
+ if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
+ test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
return;
ls = lkb->lkb_resource->res_ls;
- mutex_lock(&ls->ls_clear_proc_locks);
+ spin_lock_bh(&ls->ls_clear_proc_locks);
/* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
@@ -192,7 +197,8 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
for cases where a completion ast is received for an operation that
began before clear_proc_locks did its cancel/unlock. */
- if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
+ if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
+ test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
goto out;
DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
@@ -203,34 +209,40 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
goto out;
if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
- lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
-
- spin_lock(&proc->asts_spin);
-
- rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
- if (rv < 0) {
- spin_unlock(&proc->asts_spin);
- goto out;
- }
-
- if (list_empty(&lkb->lkb_cb_list)) {
- kref_get(&lkb->lkb_ref);
- list_add_tail(&lkb->lkb_cb_list, &proc->asts);
- wake_up_interruptible(&proc->wait);
+ set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags);
+
+ spin_lock_bh(&proc->asts_spin);
+
+ if (!dlm_may_skip_callback(lkb, flags, mode, status, sbflags,
+ &copy_lvb)) {
+ rv = dlm_get_cb(lkb, flags, mode, status, sbflags, &cb);
+ if (!rv) {
+ cb->copy_lvb = copy_lvb;
+ cb->ua = *ua;
+ cb->lkb_lksb = &cb->ua.lksb;
+ if (copy_lvb) {
+ memcpy(cb->lvbptr, ua->lksb.sb_lvbptr,
+ DLM_USER_LVB_LEN);
+ cb->lkb_lksb->sb_lvbptr = cb->lvbptr;
+ }
+
+ list_add_tail(&cb->list, &proc->asts);
+ wake_up_interruptible(&proc->wait);
+ }
}
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
- if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
+ if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) {
/* N.B. spin_lock locks_spin, not asts_spin */
- spin_lock(&proc->locks_spin);
+ spin_lock_bh(&proc->locks_spin);
if (!list_empty(&lkb->lkb_ownqueue)) {
list_del_init(&lkb->lkb_ownqueue);
dlm_put_lkb(lkb);
}
- spin_unlock(&proc->locks_spin);
+ spin_unlock_bh(&proc->locks_spin);
}
out:
- mutex_unlock(&ls->ls_clear_proc_locks);
+ spin_unlock_bh(&ls->ls_clear_proc_locks);
}
static int device_user_lock(struct dlm_user_proc *proc,
@@ -263,22 +275,19 @@ static int device_user_lock(struct dlm_user_proc *proc,
if (params->flags & DLM_LKF_CONVERT) {
error = dlm_user_convert(ls, ua,
- params->mode, params->flags,
- params->lkid, params->lvb,
- (unsigned long) params->timeout);
+ params->mode, params->flags,
+ params->lkid, params->lvb);
} else if (params->flags & DLM_LKF_ORPHAN) {
error = dlm_user_adopt_orphan(ls, ua,
params->mode, params->flags,
params->name, params->namelen,
- (unsigned long) params->timeout,
&lkid);
if (!error)
error = lkid;
} else {
error = dlm_user_request(ls, ua,
params->mode, params->flags,
- params->name, params->namelen,
- (unsigned long) params->timeout);
+ params->name, params->namelen);
if (!error)
error = ua->lksb.sb_lkid;
}
@@ -402,9 +411,9 @@ static int device_create_lockspace(struct dlm_lspace_params *params)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- error = dlm_new_lockspace(params->name, dlm_config.ci_cluster_name, params->flags,
- DLM_USER_LVB_LEN, NULL, NULL, NULL,
- &lockspace);
+ error = dlm_new_user_lockspace(params->name, dlm_config.ci_cluster_name,
+ params->flags, DLM_USER_LVB_LEN, NULL,
+ NULL, NULL, &lockspace);
if (error)
return error;
@@ -416,7 +425,7 @@ static int device_create_lockspace(struct dlm_lspace_params *params)
dlm_put_lockspace(ls);
if (error)
- dlm_release_lockspace(lockspace, 0);
+ dlm_release_lockspace(lockspace, DLM_RELEASE_NO_LOCKS);
else
error = ls->ls_device.minor;
@@ -427,7 +436,7 @@ static int device_remove_lockspace(struct dlm_lspace_params *params)
{
dlm_lockspace_t *lockspace;
struct dlm_ls *ls;
- int error, force = 0;
+ int error, force = DLM_RELEASE_NO_LOCKS;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -437,9 +446,9 @@ static int device_remove_lockspace(struct dlm_lspace_params *params)
return -ENOENT;
if (params->flags & DLM_USER_LSFLG_FORCEFREE)
- force = 2;
+ force = DLM_RELEASE_NORMAL;
- lockspace = ls->ls_local_handle;
+ lockspace = ls;
dlm_put_lockspace(ls);
/* The final dlm_release_lockspace waits for references to go to
@@ -642,7 +651,7 @@ static int device_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
- proc->lockspace = ls->ls_local_handle;
+ proc->lockspace = ls;
INIT_LIST_HEAD(&proc->asts);
INIT_LIST_HEAD(&proc->locks);
INIT_LIST_HEAD(&proc->unlocking);
@@ -777,11 +786,9 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
struct dlm_user_proc *proc = file->private_data;
- struct dlm_lkb *lkb;
DECLARE_WAITQUEUE(wait, current);
- struct dlm_callback cb;
- int rv, resid, copy_lvb = 0;
- int old_mode, new_mode;
+ struct dlm_callback *cb;
+ int rv, ret;
if (count == sizeof(struct dlm_device_version)) {
rv = copy_version_to_user(buf, count);
@@ -800,16 +807,14 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
#endif
return -EINVAL;
- try_another:
-
/* do we really need this? can a read happen after a close? */
if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
return -EINVAL;
- spin_lock(&proc->asts_spin);
+ spin_lock_bh(&proc->asts_spin);
if (list_empty(&proc->asts)) {
if (file->f_flags & O_NONBLOCK) {
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
return -EAGAIN;
}
@@ -818,16 +823,16 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
repeat:
set_current_state(TASK_INTERRUPTIBLE);
if (list_empty(&proc->asts) && !signal_pending(current)) {
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
schedule();
- spin_lock(&proc->asts_spin);
+ spin_lock_bh(&proc->asts_spin);
goto repeat;
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&proc->wait, &wait);
if (signal_pending(current)) {
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
return -ERESTARTSYS;
}
}
@@ -836,53 +841,25 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
without removing lkb_cb_list; so empty lkb_cb_list is always
consistent with empty lkb_callbacks */
- lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
-
- /* rem_lkb_callback sets a new lkb_last_cast */
- old_mode = lkb->lkb_last_cast.mode;
-
- rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
- if (rv < 0) {
- /* this shouldn't happen; lkb should have been removed from
- list when resid was zero */
- log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
- list_del_init(&lkb->lkb_cb_list);
- spin_unlock(&proc->asts_spin);
- /* removes ref for proc->asts, may cause lkb to be freed */
- dlm_put_lkb(lkb);
- goto try_another;
+ cb = list_first_entry(&proc->asts, struct dlm_callback, list);
+ list_del(&cb->list);
+ spin_unlock_bh(&proc->asts_spin);
+
+ if (cb->flags & DLM_CB_BAST) {
+ trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name,
+ cb->res_length);
+ } else if (cb->flags & DLM_CB_CAST) {
+ cb->lkb_lksb->sb_status = cb->sb_status;
+ cb->lkb_lksb->sb_flags = cb->sb_flags;
+ trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status,
+ cb->sb_flags, cb->res_name, cb->res_length);
}
- if (!resid)
- list_del_init(&lkb->lkb_cb_list);
- spin_unlock(&proc->asts_spin);
-
- if (cb.flags & DLM_CB_SKIP) {
- /* removes ref for proc->asts, may cause lkb to be freed */
- if (!resid)
- dlm_put_lkb(lkb);
- goto try_another;
- }
-
- if (cb.flags & DLM_CB_CAST) {
- new_mode = cb.mode;
-
- if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
- dlm_lvb_operations[old_mode + 1][new_mode + 1])
- copy_lvb = 1;
-
- lkb->lkb_lksb->sb_status = cb.sb_status;
- lkb->lkb_lksb->sb_flags = cb.sb_flags;
- }
-
- rv = copy_result_to_user(lkb->lkb_ua,
- test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
- cb.flags, cb.mode, copy_lvb, buf, count);
-
- /* removes ref for proc->asts, may cause lkb to be freed */
- if (!resid)
- dlm_put_lkb(lkb);
- return rv;
+ ret = copy_result_to_user(&cb->ua,
+ test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
+ cb->flags, cb->mode, cb->copy_lvb, buf, count);
+ dlm_free_cb(cb);
+ return ret;
}
static __poll_t device_poll(struct file *file, poll_table *wait)
@@ -891,12 +868,12 @@ static __poll_t device_poll(struct file *file, poll_table *wait)
poll_wait(file, &proc->wait, wait);
- spin_lock(&proc->asts_spin);
+ spin_lock_bh(&proc->asts_spin);
if (!list_empty(&proc->asts)) {
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
return EPOLLIN | EPOLLRDNORM;
}
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
return 0;
}
diff --git a/fs/dlm/user.h b/fs/dlm/user.h
index 6b9bce6b96e0..2caf8e6e24d5 100644
--- a/fs/dlm/user.h
+++ b/fs/dlm/user.h
@@ -6,8 +6,9 @@
#ifndef __USER_DOT_H__
#define __USER_DOT_H__
+void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb);
void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
- int status, uint32_t sbflags, uint64_t seq);
+ int status, uint32_t sbflags);
int dlm_user_init(void);
void dlm_user_exit(void);
int dlm_device_deregister(struct dlm_ls *ls);
diff --git a/fs/dlm/util.c b/fs/dlm/util.c
index 58acbcc2081a..f2bc401f312f 100644
--- a/fs/dlm/util.c
+++ b/fs/dlm/util.c
@@ -20,28 +20,10 @@
#define DLM_ERRNO_ETIMEDOUT 110
#define DLM_ERRNO_EINPROGRESS 115
-void header_out(struct dlm_header *hd)
-{
- hd->h_version = cpu_to_le32(hd->h_version);
- /* does it for others u32 in union as well */
- hd->u.h_lockspace = cpu_to_le32(hd->u.h_lockspace);
- hd->h_nodeid = cpu_to_le32(hd->h_nodeid);
- hd->h_length = cpu_to_le16(hd->h_length);
-}
-
-void header_in(struct dlm_header *hd)
-{
- hd->h_version = le32_to_cpu(hd->h_version);
- /* does it for others u32 in union as well */
- hd->u.h_lockspace = le32_to_cpu(hd->u.h_lockspace);
- hd->h_nodeid = le32_to_cpu(hd->h_nodeid);
- hd->h_length = le16_to_cpu(hd->h_length);
-}
-
/* higher errno values are inconsistent across architectures, so select
one set of values for on the wire */
-static int to_dlm_errno(int err)
+int to_dlm_errno(int err)
{
switch (err) {
case -EDEADLK:
@@ -62,7 +44,7 @@ static int to_dlm_errno(int err)
return err;
}
-static int from_dlm_errno(int err)
+int from_dlm_errno(int err)
{
switch (err) {
case -DLM_ERRNO_EDEADLK:
@@ -82,73 +64,3 @@ static int from_dlm_errno(int err)
}
return err;
}
-
-void dlm_message_out(struct dlm_message *ms)
-{
- header_out(&ms->m_header);
-
- ms->m_type = cpu_to_le32(ms->m_type);
- ms->m_nodeid = cpu_to_le32(ms->m_nodeid);
- ms->m_pid = cpu_to_le32(ms->m_pid);
- ms->m_lkid = cpu_to_le32(ms->m_lkid);
- ms->m_remid = cpu_to_le32(ms->m_remid);
- ms->m_parent_lkid = cpu_to_le32(ms->m_parent_lkid);
- ms->m_parent_remid = cpu_to_le32(ms->m_parent_remid);
- ms->m_exflags = cpu_to_le32(ms->m_exflags);
- ms->m_sbflags = cpu_to_le32(ms->m_sbflags);
- ms->m_flags = cpu_to_le32(ms->m_flags);
- ms->m_lvbseq = cpu_to_le32(ms->m_lvbseq);
- ms->m_hash = cpu_to_le32(ms->m_hash);
- ms->m_status = cpu_to_le32(ms->m_status);
- ms->m_grmode = cpu_to_le32(ms->m_grmode);
- ms->m_rqmode = cpu_to_le32(ms->m_rqmode);
- ms->m_bastmode = cpu_to_le32(ms->m_bastmode);
- ms->m_asts = cpu_to_le32(ms->m_asts);
- ms->m_result = cpu_to_le32(to_dlm_errno(ms->m_result));
-}
-
-void dlm_message_in(struct dlm_message *ms)
-{
- header_in(&ms->m_header);
-
- ms->m_type = le32_to_cpu(ms->m_type);
- ms->m_nodeid = le32_to_cpu(ms->m_nodeid);
- ms->m_pid = le32_to_cpu(ms->m_pid);
- ms->m_lkid = le32_to_cpu(ms->m_lkid);
- ms->m_remid = le32_to_cpu(ms->m_remid);
- ms->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
- ms->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
- ms->m_exflags = le32_to_cpu(ms->m_exflags);
- ms->m_sbflags = le32_to_cpu(ms->m_sbflags);
- ms->m_flags = le32_to_cpu(ms->m_flags);
- ms->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
- ms->m_hash = le32_to_cpu(ms->m_hash);
- ms->m_status = le32_to_cpu(ms->m_status);
- ms->m_grmode = le32_to_cpu(ms->m_grmode);
- ms->m_rqmode = le32_to_cpu(ms->m_rqmode);
- ms->m_bastmode = le32_to_cpu(ms->m_bastmode);
- ms->m_asts = le32_to_cpu(ms->m_asts);
- ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result));
-}
-
-void dlm_rcom_out(struct dlm_rcom *rc)
-{
- header_out(&rc->rc_header);
-
- rc->rc_type = cpu_to_le32(rc->rc_type);
- rc->rc_result = cpu_to_le32(rc->rc_result);
- rc->rc_id = cpu_to_le64(rc->rc_id);
- rc->rc_seq = cpu_to_le64(rc->rc_seq);
- rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply);
-}
-
-void dlm_rcom_in(struct dlm_rcom *rc)
-{
- header_in(&rc->rc_header);
-
- rc->rc_type = le32_to_cpu(rc->rc_type);
- rc->rc_result = le32_to_cpu(rc->rc_result);
- rc->rc_id = le64_to_cpu(rc->rc_id);
- rc->rc_seq = le64_to_cpu(rc->rc_seq);
- rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply);
-}
diff --git a/fs/dlm/util.h b/fs/dlm/util.h
index d46f23c7a6a0..b6a4b8adca8d 100644
--- a/fs/dlm/util.h
+++ b/fs/dlm/util.h
@@ -11,12 +11,8 @@
#ifndef __UTIL_DOT_H__
#define __UTIL_DOT_H__
-void dlm_message_out(struct dlm_message *ms);
-void dlm_message_in(struct dlm_message *ms);
-void dlm_rcom_out(struct dlm_rcom *rc);
-void dlm_rcom_in(struct dlm_rcom *rc);
-void header_out(struct dlm_header *hd);
-void header_in(struct dlm_header *hd);
+int to_dlm_errno(int err);
+int from_dlm_errno(int err);
#endif
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index e619c31b6bd9..49f56a598ecb 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -10,10 +10,11 @@
#include <linux/writeback.h>
#include <linux/sysctl.h>
#include <linux/gfp.h>
+#include <linux/swap.h>
#include "internal.h"
/* A global variable is a bit ugly, but it keeps the code simple */
-int sysctl_drop_caches;
+static int sysctl_drop_caches;
static void drop_pagecache_sb(struct super_block *sb, void *unused)
{
@@ -27,7 +28,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
* inodes without pages but we deliberately won't in case
* we need to reschedule to avoid softlockups.
*/
- if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+ if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) ||
(mapping_empty(inode->i_mapping) && !need_resched())) {
spin_unlock(&inode->i_lock);
continue;
@@ -47,7 +48,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
iput(toput_inode);
}
-int drop_caches_sysctl_handler(struct ctl_table *table, int write,
+static int drop_caches_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int ret;
@@ -59,6 +60,7 @@ int drop_caches_sysctl_handler(struct ctl_table *table, int write,
static int stfu;
if (sysctl_drop_caches & 1) {
+ lru_add_drain_all();
iterate_supers(drop_pagecache_sb, NULL);
count_vm_event(DROP_PAGECACHE);
}
@@ -75,3 +77,22 @@ int drop_caches_sysctl_handler(struct ctl_table *table, int write,
}
return 0;
}
+
+static const struct ctl_table drop_caches_table[] = {
+ {
+ .procname = "drop_caches",
+ .data = &sysctl_drop_caches,
+ .maxlen = sizeof(int),
+ .mode = 0200,
+ .proc_handler = drop_caches_sysctl_handler,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = SYSCTL_FOUR,
+ },
+};
+
+static int __init init_vm_drop_caches_sysctls(void)
+{
+ register_sysctl_init("vm", drop_caches_table);
+ return 0;
+}
+fs_initcall(init_vm_drop_caches_sysctls);
diff --git a/fs/ecryptfs/Kconfig b/fs/ecryptfs/Kconfig
index 1bdeaa6d5790..c2f4fb41b4e6 100644
--- a/fs/ecryptfs/Kconfig
+++ b/fs/ecryptfs/Kconfig
@@ -4,7 +4,7 @@ config ECRYPT_FS
depends on KEYS && CRYPTO && (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n)
select CRYPTO_ECB
select CRYPTO_CBC
- select CRYPTO_MD5
+ select CRYPTO_LIB_MD5
help
Encrypted filesystem that operates on the VFS layer. See
<file:Documentation/filesystems/ecryptfs.rst> to learn more about
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index e3f5d7f3c8a0..260f8a4938b0 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -9,7 +9,6 @@
* Michael C. Thompson <mcthomps@us.ibm.com>
*/
-#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/fs.h>
#include <linux/mount.h>
@@ -21,7 +20,7 @@
#include <linux/file.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/xattr.h>
#include "ecryptfs_kernel.h"
@@ -48,32 +47,6 @@ void ecryptfs_from_hex(char *dst, char *src, int dst_size)
}
}
-/**
- * ecryptfs_calculate_md5 - calculates the md5 of @src
- * @dst: Pointer to 16 bytes of allocated memory
- * @crypt_stat: Pointer to crypt_stat struct for the current inode
- * @src: Data to be md5'd
- * @len: Length of @src
- *
- * Uses the allocated crypto context that crypt_stat references to
- * generate the MD5 sum of the contents of src.
- */
-static int ecryptfs_calculate_md5(char *dst,
- struct ecryptfs_crypt_stat *crypt_stat,
- char *src, int len)
-{
- int rc = crypto_shash_tfm_digest(crypt_stat->hash_tfm, src, len, dst);
-
- if (rc) {
- printk(KERN_ERR
- "%s: Error computing crypto hash; rc = [%d]\n",
- __func__, rc);
- goto out;
- }
-out:
- return rc;
-}
-
static int ecryptfs_crypto_api_algify_cipher_name(char **algified_name,
char *cipher_name,
char *chaining_modifier)
@@ -104,13 +77,10 @@ out:
*
* Generate the initialization vector from the given root IV and page
* offset.
- *
- * Returns zero on success; non-zero on error.
*/
-int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
- loff_t offset)
+void ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
+ loff_t offset)
{
- int rc = 0;
char dst[MD5_DIGEST_SIZE];
char src[ECRYPTFS_MAX_IV_BYTES + 16];
@@ -129,20 +99,12 @@ int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
ecryptfs_printk(KERN_DEBUG, "source:\n");
ecryptfs_dump_hex(src, (crypt_stat->iv_bytes + 16));
}
- rc = ecryptfs_calculate_md5(dst, crypt_stat, src,
- (crypt_stat->iv_bytes + 16));
- if (rc) {
- ecryptfs_printk(KERN_WARNING, "Error attempting to compute "
- "MD5 while generating IV for a page\n");
- goto out;
- }
+ md5(src, crypt_stat->iv_bytes + 16, dst);
memcpy(iv, dst, crypt_stat->iv_bytes);
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "derived iv:\n");
ecryptfs_dump_hex(iv, crypt_stat->iv_bytes);
}
-out:
- return rc;
}
/**
@@ -151,29 +113,14 @@ out:
*
* Initialize the crypt_stat structure.
*/
-int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
+void ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
{
- struct crypto_shash *tfm;
- int rc;
-
- tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0);
- if (IS_ERR(tfm)) {
- rc = PTR_ERR(tfm);
- ecryptfs_printk(KERN_ERR, "Error attempting to "
- "allocate crypto context; rc = [%d]\n",
- rc);
- return rc;
- }
-
memset((void *)crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat));
INIT_LIST_HEAD(&crypt_stat->keysig_list);
mutex_init(&crypt_stat->keysig_list_mutex);
mutex_init(&crypt_stat->cs_mutex);
mutex_init(&crypt_stat->cs_tfm_mutex);
- crypt_stat->hash_tfm = tfm;
crypt_stat->flags |= ECRYPTFS_STRUCT_INITIALIZED;
-
- return 0;
}
/**
@@ -187,7 +134,6 @@ void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
struct ecryptfs_key_sig *key_sig, *key_sig_tmp;
crypto_free_skcipher(crypt_stat->tfm);
- crypto_free_shash(crypt_stat->hash_tfm);
list_for_each_entry_safe(key_sig, key_sig_tmp,
&crypt_stat->keysig_list, crypt_stat_list) {
list_del(&key_sig->crypt_stat_list);
@@ -260,22 +206,6 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
return i;
}
-struct extent_crypt_result {
- struct completion completion;
- int rc;
-};
-
-static void extent_crypt_complete(struct crypto_async_request *req, int rc)
-{
- struct extent_crypt_result *ecr = req->data;
-
- if (rc == -EINPROGRESS)
- return;
-
- ecr->rc = rc;
- complete(&ecr->completion);
-}
-
/**
* crypt_scatterlist
* @crypt_stat: Pointer to the crypt_stat struct to initialize.
@@ -293,7 +223,7 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
unsigned char *iv, int op)
{
struct skcipher_request *req = NULL;
- struct extent_crypt_result ecr;
+ DECLARE_CRYPTO_WAIT(ecr);
int rc = 0;
if (unlikely(ecryptfs_verbosity > 0)) {
@@ -303,8 +233,6 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
crypt_stat->key_size);
}
- init_completion(&ecr.completion);
-
mutex_lock(&crypt_stat->cs_tfm_mutex);
req = skcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
if (!req) {
@@ -315,7 +243,7 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- extent_crypt_complete, &ecr);
+ crypto_req_done, &ecr);
/* Consider doing this once, when the file is opened */
if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
rc = crypto_skcipher_setkey(crypt_stat->tfm, crypt_stat->key,
@@ -334,13 +262,7 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
skcipher_request_set_crypt(req, src_sg, dst_sg, size, iv);
rc = op == ENCRYPT ? crypto_skcipher_encrypt(req) :
crypto_skcipher_decrypt(req);
- if (rc == -EINPROGRESS || rc == -EBUSY) {
- struct extent_crypt_result *ecr = req->base.data;
-
- wait_for_completion(&ecr->completion);
- rc = ecr->rc;
- reinit_completion(&ecr->completion);
- }
+ rc = crypto_wait_req(rc, &ecr);
out:
skcipher_request_free(req);
return rc;
@@ -352,10 +274,10 @@ out:
* Convert an eCryptfs page index into a lower byte offset
*/
static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
- struct page *page)
+ struct folio *folio)
{
return ecryptfs_lower_header_size(crypt_stat) +
- ((loff_t)page->index << PAGE_SHIFT);
+ (loff_t)folio->index * PAGE_SIZE;
}
/**
@@ -364,6 +286,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
* encryption operation
* @dst_page: The page to write the result into
* @src_page: The page to read from
+ * @page_index: The offset in the file (in units of PAGE_SIZE)
* @extent_offset: Page extent offset for use in generating IV
* @op: ENCRYPT or DECRYPT to indicate the desired operation
*
@@ -374,9 +297,9 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat,
struct page *dst_page,
struct page *src_page,
+ pgoff_t page_index,
unsigned long extent_offset, int op)
{
- pgoff_t page_index = op == ENCRYPT ? src_page->index : dst_page->index;
loff_t extent_base;
char extent_iv[ECRYPTFS_MAX_IV_BYTES];
struct scatterlist src_sg, dst_sg;
@@ -384,14 +307,7 @@ static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat,
int rc;
extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size));
- rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
- (extent_base + extent_offset));
- if (rc) {
- ecryptfs_printk(KERN_ERR, "Error attempting to derive IV for "
- "extent [0x%.16llx]; rc = [%d]\n",
- (unsigned long long)(extent_base + extent_offset), rc);
- goto out;
- }
+ ecryptfs_derive_iv(extent_iv, crypt_stat, extent_base + extent_offset);
sg_init_table(&src_sg, 1);
sg_init_table(&dst_sg, 1);
@@ -416,7 +332,7 @@ out:
/**
* ecryptfs_encrypt_page
- * @page: Page mapped from the eCryptfs inode for the file; contains
+ * @folio: Folio mapped from the eCryptfs inode for the file; contains
* decrypted content that needs to be encrypted (to a temporary
* page; not in place) and written out to the lower file
*
@@ -430,7 +346,7 @@ out:
*
* Returns zero on success; negative on error
*/
-int ecryptfs_encrypt_page(struct page *page)
+int ecryptfs_encrypt_page(struct folio *folio)
{
struct inode *ecryptfs_inode;
struct ecryptfs_crypt_stat *crypt_stat;
@@ -440,7 +356,7 @@ int ecryptfs_encrypt_page(struct page *page)
loff_t lower_offset;
int rc = 0;
- ecryptfs_inode = page->mapping->host;
+ ecryptfs_inode = folio->mapping->host;
crypt_stat =
&(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat);
BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED));
@@ -455,8 +371,9 @@ int ecryptfs_encrypt_page(struct page *page)
for (extent_offset = 0;
extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
extent_offset++) {
- rc = crypt_extent(crypt_stat, enc_extent_page, page,
- extent_offset, ENCRYPT);
+ rc = crypt_extent(crypt_stat, enc_extent_page,
+ folio_page(folio, 0), folio->index,
+ extent_offset, ENCRYPT);
if (rc) {
printk(KERN_ERR "%s: Error encrypting extent; "
"rc = [%d]\n", __func__, rc);
@@ -464,11 +381,11 @@ int ecryptfs_encrypt_page(struct page *page)
}
}
- lower_offset = lower_offset_for_page(crypt_stat, page);
- enc_extent_virt = kmap(enc_extent_page);
+ lower_offset = lower_offset_for_page(crypt_stat, folio);
+ enc_extent_virt = kmap_local_page(enc_extent_page);
rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset,
PAGE_SIZE);
- kunmap(enc_extent_page);
+ kunmap_local(enc_extent_virt);
if (rc < 0) {
ecryptfs_printk(KERN_ERR,
"Error attempting to write lower page; rc = [%d]\n",
@@ -485,7 +402,7 @@ out:
/**
* ecryptfs_decrypt_page
- * @page: Page mapped from the eCryptfs inode for the file; data read
+ * @folio: Folio mapped from the eCryptfs inode for the file; data read
* and decrypted from the lower file will be written into this
* page
*
@@ -499,7 +416,7 @@ out:
*
* Returns zero on success; negative on error
*/
-int ecryptfs_decrypt_page(struct page *page)
+int ecryptfs_decrypt_page(struct folio *folio)
{
struct inode *ecryptfs_inode;
struct ecryptfs_crypt_stat *crypt_stat;
@@ -508,16 +425,16 @@ int ecryptfs_decrypt_page(struct page *page)
loff_t lower_offset;
int rc = 0;
- ecryptfs_inode = page->mapping->host;
+ ecryptfs_inode = folio->mapping->host;
crypt_stat =
&(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat);
BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED));
- lower_offset = lower_offset_for_page(crypt_stat, page);
- page_virt = kmap(page);
+ lower_offset = lower_offset_for_page(crypt_stat, folio);
+ page_virt = kmap_local_folio(folio, 0);
rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE,
ecryptfs_inode);
- kunmap(page);
+ kunmap_local(page_virt);
if (rc < 0) {
ecryptfs_printk(KERN_ERR,
"Error attempting to read lower page; rc = [%d]\n",
@@ -528,8 +445,9 @@ int ecryptfs_decrypt_page(struct page *page)
for (extent_offset = 0;
extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
extent_offset++) {
- rc = crypt_extent(crypt_stat, page, page,
- extent_offset, DECRYPT);
+ struct page *page = folio_page(folio, 0);
+ rc = crypt_extent(crypt_stat, page, page, folio->index,
+ extent_offset, DECRYPT);
if (rc) {
printk(KERN_ERR "%s: Error decrypting extent; "
"rc = [%d]\n", __func__, rc);
@@ -630,31 +548,20 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
*/
int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat)
{
- int rc = 0;
char dst[MD5_DIGEST_SIZE];
BUG_ON(crypt_stat->iv_bytes > MD5_DIGEST_SIZE);
BUG_ON(crypt_stat->iv_bytes <= 0);
if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
- rc = -EINVAL;
ecryptfs_printk(KERN_WARNING, "Session key not valid; "
"cannot generate root IV\n");
- goto out;
- }
- rc = ecryptfs_calculate_md5(dst, crypt_stat, crypt_stat->key,
- crypt_stat->key_size);
- if (rc) {
- ecryptfs_printk(KERN_WARNING, "Error attempting to compute "
- "MD5 while generating root IV\n");
- goto out;
- }
- memcpy(crypt_stat->root_iv, dst, crypt_stat->iv_bytes);
-out:
- if (rc) {
memset(crypt_stat->root_iv, 0, crypt_stat->iv_bytes);
crypt_stat->flags |= ECRYPTFS_SECURITY_WARNING;
+ return -EINVAL;
}
- return rc;
+ md5(crypt_stat->key, crypt_stat->key_size, dst);
+ memcpy(crypt_stat->root_iv, dst, crypt_stat->iv_bytes);
+ return 0;
}
static void ecryptfs_generate_new_key(struct ecryptfs_crypt_stat *crypt_stat)
@@ -1105,7 +1012,7 @@ ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry,
}
inode_lock(lower_inode);
- rc = __vfs_setxattr(&init_user_ns, lower_dentry, lower_inode,
+ rc = __vfs_setxattr(&nop_mnt_idmap, lower_dentry, lower_inode,
ECRYPTFS_XATTR_NAME, page_virt, size, 0);
if (!rc && ecryptfs_inode)
fsstack_copy_attr_all(ecryptfs_inode, lower_inode);
@@ -1630,9 +1537,7 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name,
goto out;
}
mutex_init(&tmp_tfm->key_tfm_mutex);
- strncpy(tmp_tfm->cipher_name, cipher_name,
- ECRYPTFS_MAX_CIPHER_NAME_SIZE);
- tmp_tfm->cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0';
+ strscpy(tmp_tfm->cipher_name, cipher_name);
tmp_tfm->key_size = key_size;
rc = ecryptfs_process_key_cipher(&tmp_tfm->key_tfm,
tmp_tfm->cipher_name,
@@ -1973,16 +1878,6 @@ out:
return rc;
}
-static bool is_dot_dotdot(const char *name, size_t name_size)
-{
- if (name_size == 1 && name[0] == '.')
- return true;
- else if (name_size == 2 && name[0] == '.' && name[1] == '.')
- return true;
-
- return false;
-}
-
/**
* ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext
* @plaintext_name: The plaintext name
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index acaa0825e9bb..6648a924e31a 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -17,7 +17,9 @@
/**
* ecryptfs_d_revalidate - revalidate an ecryptfs dentry
- * @dentry: The ecryptfs dentry
+ * @dir: inode of expected parent
+ * @name: expected name
+ * @dentry: dentry to revalidate
* @flags: lookup flags
*
* Called when the VFS needs to revalidate a dentry. This
@@ -28,7 +30,8 @@
* Returns 1 if valid, 0 otherwise.
*
*/
-static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int ecryptfs_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
int rc = 1;
@@ -36,8 +39,15 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
- if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE)
- rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
+ if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE) {
+ struct inode *lower_dir = ecryptfs_inode_to_lower(dir);
+ struct name_snapshot n;
+
+ take_dentry_name_snapshot(&n, lower_dentry);
+ rc = lower_dentry->d_op->d_revalidate(lower_dir, &n.name,
+ lower_dentry, flags);
+ release_dentry_name_snapshot(&n);
+ }
if (d_really_is_positive(dentry)) {
struct inode *inode = d_inode(dentry);
@@ -49,14 +59,6 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
return rc;
}
-struct kmem_cache *ecryptfs_dentry_info_cache;
-
-static void ecryptfs_dentry_free_rcu(struct rcu_head *head)
-{
- kmem_cache_free(ecryptfs_dentry_info_cache,
- container_of(head, struct ecryptfs_dentry_info, rcu));
-}
-
/**
* ecryptfs_d_release
* @dentry: The ecryptfs dentry
@@ -65,11 +67,7 @@ static void ecryptfs_dentry_free_rcu(struct rcu_head *head)
*/
static void ecryptfs_d_release(struct dentry *dentry)
{
- struct ecryptfs_dentry_info *p = dentry->d_fsdata;
- if (p) {
- path_put(&p->lower_path);
- call_rcu(&p->rcu, ecryptfs_dentry_free_rcu);
- }
+ dput(dentry->d_fsdata);
}
const struct dentry_operations ecryptfs_dops = {
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 5f2b49e13731..62a2ea7f59ed 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -14,6 +14,7 @@
#ifndef ECRYPTFS_KERNEL_H
#define ECRYPTFS_KERNEL_H
+#include <crypto/md5.h>
#include <crypto/skcipher.h>
#include <keys/user-type.h>
#include <keys/encrypted-type.h>
@@ -137,8 +138,6 @@ ecryptfs_get_key_payload_data(struct key *key)
+ MAGIC_ECRYPTFS_MARKER_SIZE_BYTES)
#define ECRYPTFS_DEFAULT_CIPHER "aes"
#define ECRYPTFS_DEFAULT_KEY_BYTES 16
-#define ECRYPTFS_DEFAULT_HASH "md5"
-#define ECRYPTFS_TAG_70_DIGEST ECRYPTFS_DEFAULT_HASH
#define ECRYPTFS_TAG_1_PACKET_TYPE 0x01
#define ECRYPTFS_TAG_3_PACKET_TYPE 0x8C
#define ECRYPTFS_TAG_11_PACKET_TYPE 0xED
@@ -163,8 +162,6 @@ ecryptfs_get_key_payload_data(struct key *key)
* ECRYPTFS_MAX_IV_BYTES */
#define ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES 16
#define ECRYPTFS_NON_NULL 0x42 /* A reasonable substitute for NULL */
-#define MD5_DIGEST_SIZE 16
-#define ECRYPTFS_TAG_70_DIGEST_SIZE MD5_DIGEST_SIZE
#define ECRYPTFS_TAG_70_MIN_METADATA_SIZE (1 + ECRYPTFS_MIN_PKT_LEN_SIZE \
+ ECRYPTFS_SIG_SIZE + 1 + 1)
#define ECRYPTFS_TAG_70_MAX_METADATA_SIZE (1 + ECRYPTFS_MAX_PKT_LEN_SIZE \
@@ -237,8 +234,6 @@ struct ecryptfs_crypt_stat {
unsigned int extent_mask;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct crypto_skcipher *tfm;
- struct crypto_shash *hash_tfm; /* Crypto context for generating
- * the initialization vectors */
unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
unsigned char key[ECRYPTFS_MAX_KEY_BYTES];
unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES];
@@ -258,13 +253,6 @@ struct ecryptfs_inode_info {
struct ecryptfs_crypt_stat crypt_stat;
};
-/* dentry private data. Each dentry must keep track of a lower
- * vfsmount too. */
-struct ecryptfs_dentry_info {
- struct path lower_path;
- struct rcu_head rcu;
-};
-
/**
* ecryptfs_global_auth_tok - A key used to encrypt all new files under the mountpoint
* @flags: Status flags
@@ -348,6 +336,7 @@ struct ecryptfs_mount_crypt_stat {
/* superblock private data. */
struct ecryptfs_sb_info {
struct super_block *wsi_sb;
+ struct vfsmount *lower_mnt;
struct ecryptfs_mount_crypt_stat mount_crypt_stat;
};
@@ -494,22 +483,25 @@ ecryptfs_set_superblock_lower(struct super_block *sb,
}
static inline void
-ecryptfs_set_dentry_private(struct dentry *dentry,
- struct ecryptfs_dentry_info *dentry_info)
+ecryptfs_set_dentry_lower(struct dentry *dentry,
+ struct dentry *lower_dentry)
{
- dentry->d_fsdata = dentry_info;
+ dentry->d_fsdata = lower_dentry;
}
static inline struct dentry *
ecryptfs_dentry_to_lower(struct dentry *dentry)
{
- return ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.dentry;
+ return dentry->d_fsdata;
}
-static inline struct path *
-ecryptfs_dentry_to_lower_path(struct dentry *dentry)
+static inline struct path
+ecryptfs_lower_path(struct dentry *dentry)
{
- return &((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path;
+ return (struct path){
+ .mnt = ecryptfs_superblock_to_private(dentry->d_sb)->lower_mnt,
+ .dentry = ecryptfs_dentry_to_lower(dentry)
+ };
}
#define ecryptfs_printk(type, fmt, arg...) \
@@ -532,7 +524,6 @@ extern unsigned int ecryptfs_number_of_users;
extern struct kmem_cache *ecryptfs_auth_tok_list_item_cache;
extern struct kmem_cache *ecryptfs_file_info_cache;
-extern struct kmem_cache *ecryptfs_dentry_info_cache;
extern struct kmem_cache *ecryptfs_inode_info_cache;
extern struct kmem_cache *ecryptfs_sb_info_cache;
extern struct kmem_cache *ecryptfs_header_cache;
@@ -557,20 +548,19 @@ int ecryptfs_encrypt_and_encode_filename(
size_t *encoded_name_size,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
const char *name, size_t name_size);
-struct dentry *ecryptfs_lower_dentry(struct dentry *this_dentry);
void ecryptfs_dump_hex(char *data, int bytes);
int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
int sg_size);
int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat);
void ecryptfs_rotate_iv(unsigned char *iv);
-int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
+void ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
void ecryptfs_destroy_mount_crypt_stat(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat);
int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat);
int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode);
-int ecryptfs_encrypt_page(struct page *page);
-int ecryptfs_decrypt_page(struct page *page);
+int ecryptfs_encrypt_page(struct folio *folio);
+int ecryptfs_decrypt_page(struct folio *folio);
int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
struct inode *ecryptfs_inode);
int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry);
@@ -653,16 +643,15 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
loff_t offset, size_t size);
int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
- struct page *page_for_lower,
+ struct folio *folio_for_lower,
size_t offset_in_page, size_t size);
int ecryptfs_write(struct inode *inode, char *data, loff_t offset, size_t size);
int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
struct inode *ecryptfs_inode);
-int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
+int ecryptfs_read_lower_page_segment(struct folio *folio_for_ecryptfs,
pgoff_t page_index,
size_t offset_in_page, size_t size,
struct inode *ecryptfs_inode);
-struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index);
int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
size_t *length_size);
int ecryptfs_write_packet_length(char *dest, size_t size,
@@ -699,9 +688,9 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
char *data, size_t max_packet_size);
int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat);
-int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
- loff_t offset);
+void ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
+ loff_t offset);
-extern const struct xattr_handler *ecryptfs_xattr_handlers[];
+extern const struct xattr_handler * const ecryptfs_xattr_handlers[];
#endif /* #ifndef ECRYPTFS_KERNEL_H */
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 18d5b91cb573..7929411837cf 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -33,13 +33,36 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
struct iov_iter *to)
{
ssize_t rc;
- struct path *path;
struct file *file = iocb->ki_filp;
rc = generic_file_read_iter(iocb, to);
if (rc >= 0) {
- path = ecryptfs_dentry_to_lower_path(file->f_path.dentry);
- touch_atime(path);
+ struct path path = ecryptfs_lower_path(file->f_path.dentry);
+ touch_atime(&path);
+ }
+ return rc;
+}
+
+/*
+ * ecryptfs_splice_read_update_atime
+ *
+ * filemap_splice_read updates the atime of upper layer inode. But, it
+ * doesn't give us a chance to update the atime of the lower layer inode. This
+ * function is a wrapper to generic_file_read. It updates the atime of the
+ * lower level inode if generic_file_read returns without any errors. This is
+ * to be used only for file reads. The function to be used for directory reads
+ * is ecryptfs_read.
+ */
+static ssize_t ecryptfs_splice_read_update_atime(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ ssize_t rc;
+
+ rc = filemap_splice_read(in, ppos, pipe, len, flags);
+ if (rc >= 0) {
+ struct path path = ecryptfs_lower_path(in->f_path.dentry);
+ touch_atime(&path);
}
return rc;
}
@@ -53,7 +76,7 @@ struct ecryptfs_getdents_callback {
};
/* Inspired by generic filldir in fs/readdir.c */
-static int
+static bool
ecryptfs_filldir(struct dir_context *ctx, const char *lower_name,
int lower_namelen, loff_t offset, u64 ino, unsigned int d_type)
{
@@ -61,18 +84,19 @@ ecryptfs_filldir(struct dir_context *ctx, const char *lower_name,
container_of(ctx, struct ecryptfs_getdents_callback, ctx);
size_t name_size;
char *name;
- int rc;
+ int err;
+ bool res;
buf->filldir_called++;
- rc = ecryptfs_decode_and_decrypt_filename(&name, &name_size,
- buf->sb, lower_name,
- lower_namelen);
- if (rc) {
- if (rc != -EINVAL) {
+ err = ecryptfs_decode_and_decrypt_filename(&name, &name_size,
+ buf->sb, lower_name,
+ lower_namelen);
+ if (err) {
+ if (err != -EINVAL) {
ecryptfs_printk(KERN_DEBUG,
"%s: Error attempting to decode and decrypt filename [%s]; rc = [%d]\n",
- __func__, lower_name, rc);
- return rc;
+ __func__, lower_name, err);
+ return false;
}
/* Mask -EINVAL errors as these are most likely due a plaintext
@@ -81,16 +105,15 @@ ecryptfs_filldir(struct dir_context *ctx, const char *lower_name,
* the "lost+found" dentry in the root directory of an Ext4
* filesystem.
*/
- return 0;
+ return true;
}
buf->caller->pos = buf->ctx.pos;
- rc = !dir_emit(buf->caller, name, name_size, ino, d_type);
+ res = dir_emit(buf->caller, name, name_size, ino, d_type);
kfree(name);
- if (!rc)
+ if (res)
buf->entries_written++;
-
- return rc;
+ return res;
}
/**
@@ -111,14 +134,8 @@ static int ecryptfs_readdir(struct file *file, struct dir_context *ctx)
lower_file = ecryptfs_file_to_lower(file);
rc = iterate_dir(lower_file, &buf.ctx);
ctx->pos = buf.ctx.pos;
- if (rc < 0)
- goto out;
- if (buf.filldir_called && !buf.entries_written)
- goto out;
- if (rc >= 0)
- fsstack_copy_attr_atime(inode,
- file_inode(lower_file));
-out:
+ if (rc >= 0 && (buf.entries_written || !buf.filldir_called))
+ fsstack_copy_attr_atime(inode, file_inode(lower_file));
return rc;
}
@@ -174,7 +191,7 @@ static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
* natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
* allows recursive mounting, this will need to be extended.
*/
- if (!lower_file->f_op->mmap)
+ if (!can_mmap_file(lower_file))
return -ENODEV;
return generic_file_mmap(file, vma);
}
@@ -264,6 +281,7 @@ static int ecryptfs_dir_open(struct inode *inode, struct file *file)
* ecryptfs_lookup() */
struct ecryptfs_file_info *file_info;
struct file *lower_file;
+ struct path path;
/* Released in ecryptfs_release or end of function if failure */
file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
@@ -273,8 +291,8 @@ static int ecryptfs_dir_open(struct inode *inode, struct file *file)
"Error attempting to allocate memory\n");
return -ENOMEM;
}
- lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
- file->f_flags, current_cred());
+ path = ecryptfs_lower_path(ecryptfs_dentry);
+ lower_file = dentry_open(&path, file->f_flags, current_cred());
if (IS_ERR(lower_file)) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
@@ -420,5 +438,5 @@ const struct file_operations ecryptfs_main_fops = {
.release = ecryptfs_release,
.fsync = ecryptfs_fsync,
.fasync = ecryptfs_fasync,
- .splice_read = generic_file_splice_read,
+ .splice_read = ecryptfs_splice_read_update_atime,
};
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 16d50dface59..3978248247dc 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -18,22 +18,32 @@
#include <linux/fs_stack.h>
#include <linux/slab.h>
#include <linux/xattr.h>
+#include <linux/posix_acl.h>
+#include <linux/posix_acl_xattr.h>
#include <linux/fileattr.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ecryptfs_kernel.h"
-static int lock_parent(struct dentry *dentry,
- struct dentry **lower_dentry,
- struct inode **lower_dir)
+static struct dentry *ecryptfs_start_creating_dentry(struct dentry *dentry)
{
- struct dentry *lower_dir_dentry;
+ struct dentry *parent = dget_parent(dentry);
+ struct dentry *ret;
+
+ ret = start_creating_dentry(ecryptfs_dentry_to_lower(parent),
+ ecryptfs_dentry_to_lower(dentry));
+ dput(parent);
+ return ret;
+}
- lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
- *lower_dir = d_inode(lower_dir_dentry);
- *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+static struct dentry *ecryptfs_start_removing_dentry(struct dentry *dentry)
+{
+ struct dentry *parent = dget_parent(dentry);
+ struct dentry *ret;
- inode_lock_nested(*lower_dir, I_MUTEX_PARENT);
- return (*lower_dentry)->d_parent == lower_dir_dentry ? 0 : -EINVAL;
+ ret = start_removing_dentry(ecryptfs_dentry_to_lower(parent),
+ ecryptfs_dentry_to_lower(dentry));
+ dput(parent);
+ return ret;
}
static int ecryptfs_inode_test(struct inode *inode, void *lower_inode)
@@ -76,6 +86,14 @@ static struct inode *__ecryptfs_get_inode(struct inode *lower_inode,
if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb))
return ERR_PTR(-EXDEV);
+
+ /* Reject dealing with casefold directories. */
+ if (IS_CASEFOLDED(lower_inode)) {
+ pr_err_ratelimited("%s: Can't handle casefolded directory.\n",
+ __func__);
+ return ERR_PTR(-EREMOTE);
+ }
+
if (!igrab(lower_inode))
return ERR_PTR(-ESTALE);
inode = iget5_locked(sb, (unsigned long)lower_inode,
@@ -85,7 +103,7 @@ static struct inode *__ecryptfs_get_inode(struct inode *lower_inode,
iput(lower_inode);
return ERR_PTR(-EACCES);
}
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
iput(lower_inode);
return inode;
@@ -96,7 +114,7 @@ struct inode *ecryptfs_get_inode(struct inode *lower_inode,
{
struct inode *inode = __ecryptfs_get_inode(lower_inode, sb);
- if (!IS_ERR(inode) && (inode->i_state & I_NEW))
+ if (!IS_ERR(inode) && (inode_state_read_once(inode) & I_NEW))
unlock_new_inode(inode);
return inode;
@@ -131,25 +149,21 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
struct inode *lower_dir;
int rc;
- rc = lock_parent(dentry, &lower_dentry, &lower_dir);
- dget(lower_dentry); // don't even try to make the lower negative
- if (!rc) {
- if (d_unhashed(lower_dentry))
- rc = -EINVAL;
- else
- rc = vfs_unlink(&init_user_ns, lower_dir, lower_dentry,
- NULL);
- }
+ lower_dentry = ecryptfs_start_removing_dentry(dentry);
+ if (IS_ERR(lower_dentry))
+ return PTR_ERR(lower_dentry);
+
+ lower_dir = lower_dentry->d_parent->d_inode;
+ rc = vfs_unlink(&nop_mnt_idmap, lower_dir, lower_dentry, NULL);
if (rc) {
printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
goto out_unlock;
}
fsstack_copy_attr_times(dir, lower_dir);
set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
- inode->i_ctime = dir->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
out_unlock:
- dput(lower_dentry);
- inode_unlock(lower_dir);
+ end_removing(lower_dentry);
if (!rc)
d_drop(dentry);
return rc;
@@ -176,10 +190,11 @@ ecryptfs_do_create(struct inode *directory_inode,
struct inode *lower_dir;
struct inode *inode;
- rc = lock_parent(ecryptfs_dentry, &lower_dentry, &lower_dir);
- if (!rc)
- rc = vfs_create(&init_user_ns, lower_dir,
- lower_dentry, mode, true);
+ lower_dentry = ecryptfs_start_creating_dentry(ecryptfs_dentry);
+ if (IS_ERR(lower_dentry))
+ return ERR_CAST(lower_dentry);
+ lower_dir = lower_dentry->d_parent->d_inode;
+ rc = vfs_create(&nop_mnt_idmap, lower_dentry, mode, NULL);
if (rc) {
printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
"rc = [%d]\n", __func__, rc);
@@ -189,13 +204,13 @@ ecryptfs_do_create(struct inode *directory_inode,
inode = __ecryptfs_get_inode(d_inode(lower_dentry),
directory_inode->i_sb);
if (IS_ERR(inode)) {
- vfs_unlink(&init_user_ns, lower_dir, lower_dentry, NULL);
+ vfs_unlink(&nop_mnt_idmap, lower_dir, lower_dentry, NULL);
goto out_lock;
}
fsstack_copy_attr_times(directory_inode, lower_dir);
fsstack_copy_inode_size(directory_inode, lower_dir);
out_lock:
- inode_unlock(lower_dir);
+ end_creating(lower_dentry);
return inode;
}
@@ -251,7 +266,7 @@ out:
* Returns zero on success; non-zero on error condition
*/
static int
-ecryptfs_create(struct user_namespace *mnt_userns,
+ecryptfs_create(struct mnt_idmap *idmap,
struct inode *directory_inode, struct dentry *ecryptfs_dentry,
umode_t mode, bool excl)
{
@@ -317,24 +332,15 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
struct dentry *lower_dentry)
{
- struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent);
+ struct dentry *lower_parent = ecryptfs_dentry_to_lower(dentry->d_parent);
struct inode *inode, *lower_inode;
- struct ecryptfs_dentry_info *dentry_info;
int rc = 0;
- dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
- if (!dentry_info) {
- dput(lower_dentry);
- return ERR_PTR(-ENOMEM);
- }
-
fsstack_copy_attr_atime(d_inode(dentry->d_parent),
- d_inode(path->dentry));
+ d_inode(lower_parent));
BUG_ON(!d_count(lower_dentry));
- ecryptfs_set_dentry_private(dentry, dentry_info);
- dentry_info->lower_path.mnt = mntget(path->mnt);
- dentry_info->lower_path.dentry = lower_dentry;
+ ecryptfs_set_dentry_lower(dentry, lower_dentry);
/*
* negative dentry can go positive under us here - its parent is not
@@ -363,7 +369,7 @@ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
}
}
- if (inode->i_state & I_NEW)
+ if (inode_state_read_once(inode) & I_NEW)
unlock_new_inode(inode);
return d_splice_alias(inode, dentry);
}
@@ -384,8 +390,8 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
char *encrypted_and_encoded_name = NULL;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct dentry *lower_dir_dentry, *lower_dentry;
- const char *name = ecryptfs_dentry->d_name.name;
- size_t len = ecryptfs_dentry->d_name.len;
+ struct qstr qname = QSTR_INIT(ecryptfs_dentry->d_name.name,
+ ecryptfs_dentry->d_name.len);
struct dentry *res;
int rc = 0;
@@ -394,23 +400,25 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
mount_crypt_stat = &ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
+ size_t len = qname.len;
rc = ecryptfs_encrypt_and_encode_filename(
&encrypted_and_encoded_name, &len,
- mount_crypt_stat, name, len);
+ mount_crypt_stat, qname.name, len);
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt and encode "
"filename; rc = [%d]\n", __func__, rc);
return ERR_PTR(rc);
}
- name = encrypted_and_encoded_name;
+ qname.name = encrypted_and_encoded_name;
+ qname.len = len;
}
- lower_dentry = lookup_one_len_unlocked(name, lower_dir_dentry, len);
+ lower_dentry = lookup_noperm_unlocked(&qname, lower_dir_dentry);
if (IS_ERR(lower_dentry)) {
- ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
+ ecryptfs_printk(KERN_DEBUG, "%s: lookup_noperm() returned "
"[%ld] on lower_dentry = [%s]\n", __func__,
PTR_ERR(lower_dentry),
- name);
+ qname.name);
res = ERR_CAST(lower_dentry);
} else {
res = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry);
@@ -430,10 +438,12 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
file_size_save = i_size_read(d_inode(old_dentry));
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
- rc = lock_parent(new_dentry, &lower_new_dentry, &lower_dir);
- if (!rc)
- rc = vfs_link(lower_old_dentry, &init_user_ns, lower_dir,
- lower_new_dentry, NULL);
+ lower_new_dentry = ecryptfs_start_creating_dentry(new_dentry);
+ if (IS_ERR(lower_new_dentry))
+ return PTR_ERR(lower_new_dentry);
+ lower_dir = lower_new_dentry->d_parent->d_inode;
+ rc = vfs_link(lower_old_dentry, &nop_mnt_idmap, lower_dir,
+ lower_new_dentry, NULL);
if (rc || d_really_is_negative(lower_new_dentry))
goto out_lock;
rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb);
@@ -445,7 +455,7 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
ecryptfs_inode_to_lower(d_inode(old_dentry))->i_nlink);
i_size_write(d_inode(new_dentry), file_size_save);
out_lock:
- inode_unlock(lower_dir);
+ end_creating(lower_new_dentry);
return rc;
}
@@ -454,7 +464,7 @@ static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
return ecryptfs_do_unlink(dir, dentry, d_inode(dentry));
}
-static int ecryptfs_symlink(struct user_namespace *mnt_userns,
+static int ecryptfs_symlink(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry,
const char *symname)
{
@@ -465,9 +475,11 @@ static int ecryptfs_symlink(struct user_namespace *mnt_userns,
size_t encoded_symlen;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
- rc = lock_parent(dentry, &lower_dentry, &lower_dir);
- if (rc)
- goto out_lock;
+ lower_dentry = ecryptfs_start_creating_dentry(dentry);
+ if (IS_ERR(lower_dentry))
+ return PTR_ERR(lower_dentry);
+ lower_dir = lower_dentry->d_parent->d_inode;
+
mount_crypt_stat = &ecryptfs_superblock_to_private(
dir->i_sb)->mount_crypt_stat;
rc = ecryptfs_encrypt_and_encode_filename(&encoded_symname,
@@ -476,8 +488,8 @@ static int ecryptfs_symlink(struct user_namespace *mnt_userns,
strlen(symname));
if (rc)
goto out_lock;
- rc = vfs_symlink(&init_user_ns, lower_dir, lower_dentry,
- encoded_symname);
+ rc = vfs_symlink(&nop_mnt_idmap, lower_dir, lower_dentry,
+ encoded_symname, NULL);
kfree(encoded_symname);
if (rc || d_really_is_negative(lower_dentry))
goto out_lock;
@@ -487,24 +499,32 @@ static int ecryptfs_symlink(struct user_namespace *mnt_userns,
fsstack_copy_attr_times(dir, lower_dir);
fsstack_copy_inode_size(dir, lower_dir);
out_lock:
- inode_unlock(lower_dir);
+ end_creating(lower_dentry);
if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;
}
-static int ecryptfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ecryptfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int rc;
struct dentry *lower_dentry;
+ struct dentry *lower_dir_dentry;
struct inode *lower_dir;
- rc = lock_parent(dentry, &lower_dentry, &lower_dir);
- if (!rc)
- rc = vfs_mkdir(&init_user_ns, lower_dir,
- lower_dentry, mode);
- if (rc || d_really_is_negative(lower_dentry))
+ lower_dentry = ecryptfs_start_creating_dentry(dentry);
+ if (IS_ERR(lower_dentry))
+ return lower_dentry;
+ lower_dir_dentry = dget(lower_dentry->d_parent);
+ lower_dir = lower_dir_dentry->d_inode;
+ lower_dentry = vfs_mkdir(&nop_mnt_idmap, lower_dir,
+ lower_dentry, mode, NULL);
+ rc = PTR_ERR(lower_dentry);
+ if (IS_ERR(lower_dentry))
+ goto out;
+ rc = 0;
+ if (d_unhashed(lower_dentry))
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
@@ -513,10 +533,10 @@ static int ecryptfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
fsstack_copy_inode_size(dir, lower_dir);
set_nlink(dir, lower_dir->i_nlink);
out:
- inode_unlock(lower_dir);
+ end_creating(lower_dentry);
if (d_really_is_negative(dentry))
d_drop(dentry);
- return rc;
+ return ERR_PTR(rc);
}
static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
@@ -525,38 +545,37 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
struct inode *lower_dir;
int rc;
- rc = lock_parent(dentry, &lower_dentry, &lower_dir);
- dget(lower_dentry); // don't even try to make the lower negative
- if (!rc) {
- if (d_unhashed(lower_dentry))
- rc = -EINVAL;
- else
- rc = vfs_rmdir(&init_user_ns, lower_dir, lower_dentry);
- }
+ lower_dentry = ecryptfs_start_removing_dentry(dentry);
+ if (IS_ERR(lower_dentry))
+ return PTR_ERR(lower_dentry);
+ lower_dir = lower_dentry->d_parent->d_inode;
+
+ rc = vfs_rmdir(&nop_mnt_idmap, lower_dir, lower_dentry, NULL);
if (!rc) {
clear_nlink(d_inode(dentry));
fsstack_copy_attr_times(dir, lower_dir);
set_nlink(dir, lower_dir->i_nlink);
}
- dput(lower_dentry);
- inode_unlock(lower_dir);
+ end_removing(lower_dentry);
if (!rc)
d_drop(dentry);
return rc;
}
static int
-ecryptfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ecryptfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t dev)
{
int rc;
struct dentry *lower_dentry;
struct inode *lower_dir;
- rc = lock_parent(dentry, &lower_dentry, &lower_dir);
- if (!rc)
- rc = vfs_mknod(&init_user_ns, lower_dir,
- lower_dentry, mode, dev);
+ lower_dentry = ecryptfs_start_creating_dentry(dentry);
+ if (IS_ERR(lower_dentry))
+ return PTR_ERR(lower_dentry);
+ lower_dir = lower_dentry->d_parent->d_inode;
+
+ rc = vfs_mknod(&nop_mnt_idmap, lower_dir, lower_dentry, mode, dev, NULL);
if (rc || d_really_is_negative(lower_dentry))
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
@@ -565,14 +584,14 @@ ecryptfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
fsstack_copy_attr_times(dir, lower_dir);
fsstack_copy_inode_size(dir, lower_dir);
out:
- inode_unlock(lower_dir);
+ end_removing(lower_dentry);
if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;
}
static int
-ecryptfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ecryptfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
@@ -581,7 +600,6 @@ ecryptfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
struct dentry *lower_new_dentry;
struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry;
- struct dentry *trap;
struct inode *target_inode;
struct renamedata rd = {};
@@ -596,30 +614,13 @@ ecryptfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
target_inode = d_inode(new_dentry);
- trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
- dget(lower_new_dentry);
- rc = -EINVAL;
- if (lower_old_dentry->d_parent != lower_old_dir_dentry)
- goto out_lock;
- if (lower_new_dentry->d_parent != lower_new_dir_dentry)
- goto out_lock;
- if (d_unhashed(lower_old_dentry) || d_unhashed(lower_new_dentry))
- goto out_lock;
- /* source should not be ancestor of target */
- if (trap == lower_old_dentry)
- goto out_lock;
- /* target should not be ancestor of source */
- if (trap == lower_new_dentry) {
- rc = -ENOTEMPTY;
- goto out_lock;
- }
+ rd.mnt_idmap = &nop_mnt_idmap;
+ rd.old_parent = lower_old_dir_dentry;
+ rd.new_parent = lower_new_dir_dentry;
+ rc = start_renaming_two_dentries(&rd, lower_old_dentry, lower_new_dentry);
+ if (rc)
+ return rc;
- rd.old_mnt_userns = &init_user_ns;
- rd.old_dir = d_inode(lower_old_dir_dentry);
- rd.old_dentry = lower_old_dentry;
- rd.new_mnt_userns = &init_user_ns;
- rd.new_dir = d_inode(lower_new_dir_dentry);
- rd.new_dentry = lower_new_dentry;
rc = vfs_rename(&rd);
if (rc)
goto out_lock;
@@ -630,8 +631,7 @@ ecryptfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
if (new_dir != old_dir)
fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry));
out_lock:
- dput(lower_new_dentry);
- unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ end_renaming(&rd);
return rc;
}
@@ -854,7 +854,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
inode_lock(d_inode(lower_dentry));
- rc = notify_change(&init_user_ns, lower_dentry,
+ rc = notify_change(&nop_mnt_idmap, lower_dentry,
&lower_ia, NULL);
inode_unlock(d_inode(lower_dentry));
}
@@ -862,16 +862,16 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
}
static int
-ecryptfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ecryptfs_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask)
{
- return inode_permission(&init_user_ns,
+ return inode_permission(&nop_mnt_idmap,
ecryptfs_inode_to_lower(inode), mask);
}
/**
* ecryptfs_setattr
- * @mnt_userns: user namespace of the target mount
+ * @idmap: idmap of the target mount
* @dentry: dentry handle to the inode to modify
* @ia: Structure with flags of what to change and values
*
@@ -882,7 +882,7 @@ ecryptfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
* All other metadata changes will be passed right to the lower filesystem,
* and we will just update our inode to look like the lower.
*/
-static int ecryptfs_setattr(struct user_namespace *mnt_userns,
+static int ecryptfs_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *ia)
{
int rc = 0;
@@ -893,11 +893,8 @@ static int ecryptfs_setattr(struct user_namespace *mnt_userns,
struct ecryptfs_crypt_stat *crypt_stat;
crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat;
- if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) {
- rc = ecryptfs_init_crypt_stat(crypt_stat);
- if (rc)
- return rc;
- }
+ if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
+ ecryptfs_init_crypt_stat(crypt_stat);
inode = d_inode(dentry);
lower_inode = ecryptfs_inode_to_lower(inode);
lower_dentry = ecryptfs_dentry_to_lower(dentry);
@@ -937,7 +934,7 @@ static int ecryptfs_setattr(struct user_namespace *mnt_userns,
}
mutex_unlock(&crypt_stat->cs_mutex);
- rc = setattr_prepare(&init_user_ns, dentry, ia);
+ rc = setattr_prepare(&nop_mnt_idmap, dentry, ia);
if (rc)
goto out;
if (ia->ia_valid & ATTR_SIZE) {
@@ -963,14 +960,14 @@ static int ecryptfs_setattr(struct user_namespace *mnt_userns,
lower_ia.ia_valid &= ~ATTR_MODE;
inode_lock(d_inode(lower_dentry));
- rc = notify_change(&init_user_ns, lower_dentry, &lower_ia, NULL);
+ rc = notify_change(&nop_mnt_idmap, lower_dentry, &lower_ia, NULL);
inode_unlock(d_inode(lower_dentry));
out:
fsstack_copy_attr_all(inode, lower_inode);
return rc;
}
-static int ecryptfs_getattr_link(struct user_namespace *mnt_userns,
+static int ecryptfs_getattr_link(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
@@ -980,7 +977,7 @@ static int ecryptfs_getattr_link(struct user_namespace *mnt_userns,
mount_crypt_stat = &ecryptfs_superblock_to_private(
dentry->d_sb)->mount_crypt_stat;
- generic_fillattr(&init_user_ns, d_inode(dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(dentry), stat);
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
char *target;
size_t targetsiz;
@@ -996,20 +993,21 @@ static int ecryptfs_getattr_link(struct user_namespace *mnt_userns,
return rc;
}
-static int ecryptfs_getattr(struct user_namespace *mnt_userns,
+static int ecryptfs_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
struct kstat lower_stat;
+ struct path lower_path = ecryptfs_lower_path(dentry);
int rc;
- rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat,
- request_mask, flags);
+ rc = vfs_getattr_nosec(&lower_path, &lower_stat, request_mask, flags);
if (!rc) {
fsstack_copy_attr_all(d_inode(dentry),
ecryptfs_inode_to_lower(d_inode(dentry)));
- generic_fillattr(&init_user_ns, d_inode(dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask,
+ d_inode(dentry), stat);
stat->blocks = lower_stat.blocks;
}
return rc;
@@ -1031,7 +1029,7 @@ ecryptfs_setxattr(struct dentry *dentry, struct inode *inode,
goto out;
}
inode_lock(lower_inode);
- rc = __vfs_setxattr_locked(&init_user_ns, lower_dentry, name, value, size, flags, NULL);
+ rc = __vfs_setxattr_locked(&nop_mnt_idmap, lower_dentry, name, value, size, flags, NULL);
inode_unlock(lower_inode);
if (!rc && inode)
fsstack_copy_attr_all(inode, lower_inode);
@@ -1097,29 +1095,51 @@ static int ecryptfs_removexattr(struct dentry *dentry, struct inode *inode,
goto out;
}
inode_lock(lower_inode);
- rc = __vfs_removexattr(&init_user_ns, lower_dentry, name);
+ rc = __vfs_removexattr(&nop_mnt_idmap, lower_dentry, name);
inode_unlock(lower_inode);
out:
return rc;
}
-static int ecryptfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+static int ecryptfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
return vfs_fileattr_get(ecryptfs_dentry_to_lower(dentry), fa);
}
-static int ecryptfs_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa)
+static int ecryptfs_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
int rc;
- rc = vfs_fileattr_set(&init_user_ns, lower_dentry, fa);
+ rc = vfs_fileattr_set(&nop_mnt_idmap, lower_dentry, fa);
fsstack_copy_attr_all(d_inode(dentry), d_inode(lower_dentry));
return rc;
}
+static struct posix_acl *ecryptfs_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, int type)
+{
+ return vfs_get_acl(idmap, ecryptfs_dentry_to_lower(dentry),
+ posix_acl_xattr_name(type));
+}
+
+static int ecryptfs_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct posix_acl *acl,
+ int type)
+{
+ int rc;
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+ struct inode *lower_inode = d_inode(lower_dentry);
+
+ rc = vfs_set_acl(&nop_mnt_idmap, lower_dentry,
+ posix_acl_xattr_name(type), acl);
+ if (!rc)
+ fsstack_copy_attr_all(d_inode(dentry), lower_inode);
+ return rc;
+}
+
const struct inode_operations ecryptfs_symlink_iops = {
.get_link = ecryptfs_get_link,
.permission = ecryptfs_permission,
@@ -1143,6 +1163,8 @@ const struct inode_operations ecryptfs_dir_iops = {
.listxattr = ecryptfs_listxattr,
.fileattr_get = ecryptfs_fileattr_get,
.fileattr_set = ecryptfs_fileattr_set,
+ .get_acl = ecryptfs_get_acl,
+ .set_acl = ecryptfs_set_acl,
};
const struct inode_operations ecryptfs_main_iops = {
@@ -1152,6 +1174,8 @@ const struct inode_operations ecryptfs_main_iops = {
.listxattr = ecryptfs_listxattr,
.fileattr_get = ecryptfs_fileattr_get,
.fileattr_set = ecryptfs_fileattr_set,
+ .get_acl = ecryptfs_get_acl,
+ .set_acl = ecryptfs_set_acl,
};
static int ecryptfs_xattr_get(const struct xattr_handler *handler,
@@ -1162,7 +1186,7 @@ static int ecryptfs_xattr_get(const struct xattr_handler *handler,
}
static int ecryptfs_xattr_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value, size_t size,
int flags)
@@ -1181,7 +1205,7 @@ static const struct xattr_handler ecryptfs_xattr_handler = {
.set = ecryptfs_xattr_set,
};
-const struct xattr_handler *ecryptfs_xattr_handlers[] = {
+const struct xattr_handler * const ecryptfs_xattr_handlers[] = {
&ecryptfs_xattr_handler,
NULL
};
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3fe41964c0d8..bbf8603242fa 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -11,7 +11,6 @@
* Trevor S. Highland <trevor.highland@gmail.com>
*/
-#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/string.h>
#include <linux/pagemap.h>
@@ -300,9 +299,11 @@ write_tag_66_packet(char *signature, u8 cipher_code,
* | Key Identifier Size | 1 or 2 bytes |
* | Key Identifier | arbitrary |
* | File Encryption Key Size | 1 or 2 bytes |
+ * | Cipher Code | 1 byte |
* | File Encryption Key | arbitrary |
+ * | Checksum | 2 bytes |
*/
- data_len = (5 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
+ data_len = (8 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
*packet = kmalloc(data_len, GFP_KERNEL);
message = *packet;
if (!message) {
@@ -599,10 +600,7 @@ struct ecryptfs_write_tag_70_packet_silly_stack {
struct crypto_skcipher *skcipher_tfm;
struct skcipher_request *skcipher_req;
char iv[ECRYPTFS_MAX_IV_BYTES];
- char hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
- char tmp_hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
- struct crypto_shash *hash_tfm;
- struct shash_desc *hash_desc;
+ char hash[MD5_DIGEST_SIZE];
};
/*
@@ -739,51 +737,15 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
"password tokens\n", __func__);
goto out_free_unlock;
}
- s->hash_tfm = crypto_alloc_shash(ECRYPTFS_TAG_70_DIGEST, 0, 0);
- if (IS_ERR(s->hash_tfm)) {
- rc = PTR_ERR(s->hash_tfm);
- printk(KERN_ERR "%s: Error attempting to "
- "allocate hash crypto context; rc = [%d]\n",
- __func__, rc);
- goto out_free_unlock;
- }
-
- s->hash_desc = kmalloc(sizeof(*s->hash_desc) +
- crypto_shash_descsize(s->hash_tfm), GFP_KERNEL);
- if (!s->hash_desc) {
- rc = -ENOMEM;
- goto out_release_free_unlock;
- }
- s->hash_desc->tfm = s->hash_tfm;
-
- rc = crypto_shash_digest(s->hash_desc,
- (u8 *)s->auth_tok->token.password.session_key_encryption_key,
- s->auth_tok->token.password.session_key_encryption_key_bytes,
- s->hash);
- if (rc) {
- printk(KERN_ERR
- "%s: Error computing crypto hash; rc = [%d]\n",
- __func__, rc);
- goto out_release_free_unlock;
- }
+ md5(s->auth_tok->token.password.session_key_encryption_key,
+ s->auth_tok->token.password.session_key_encryption_key_bytes,
+ s->hash);
for (s->j = 0; s->j < (s->num_rand_bytes - 1); s->j++) {
s->block_aligned_filename[s->j] =
- s->hash[(s->j % ECRYPTFS_TAG_70_DIGEST_SIZE)];
- if ((s->j % ECRYPTFS_TAG_70_DIGEST_SIZE)
- == (ECRYPTFS_TAG_70_DIGEST_SIZE - 1)) {
- rc = crypto_shash_digest(s->hash_desc, (u8 *)s->hash,
- ECRYPTFS_TAG_70_DIGEST_SIZE,
- s->tmp_hash);
- if (rc) {
- printk(KERN_ERR
- "%s: Error computing crypto hash; "
- "rc = [%d]\n", __func__, rc);
- goto out_release_free_unlock;
- }
- memcpy(s->hash, s->tmp_hash,
- ECRYPTFS_TAG_70_DIGEST_SIZE);
- }
+ s->hash[s->j % MD5_DIGEST_SIZE];
+ if ((s->j % MD5_DIGEST_SIZE) == (MD5_DIGEST_SIZE - 1))
+ md5(s->hash, MD5_DIGEST_SIZE, s->hash);
if (s->block_aligned_filename[s->j] == '\0')
s->block_aligned_filename[s->j] = ECRYPTFS_NON_NULL;
}
@@ -796,7 +758,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
"convert filename memory to scatterlist; rc = [%d]. "
"block_aligned_filename_size = [%zd]\n", __func__, rc,
s->block_aligned_filename_size);
- goto out_release_free_unlock;
+ goto out_free_unlock;
}
rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size,
s->dst_sg, 2);
@@ -805,7 +767,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
"convert encrypted filename memory to scatterlist; "
"rc = [%d]. block_aligned_filename_size = [%zd]\n",
__func__, rc, s->block_aligned_filename_size);
- goto out_release_free_unlock;
+ goto out_free_unlock;
}
/* The characters in the first block effectively do the job
* of the IV here, so we just use 0's for the IV. Note the
@@ -823,7 +785,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
rc,
s->auth_tok->token.password.session_key_encryption_key,
mount_crypt_stat->global_default_fn_cipher_key_bytes);
- goto out_release_free_unlock;
+ goto out_free_unlock;
}
skcipher_request_set_crypt(s->skcipher_req, s->src_sg, s->dst_sg,
s->block_aligned_filename_size, s->iv);
@@ -831,13 +793,11 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt filename; "
"rc = [%d]\n", __func__, rc);
- goto out_release_free_unlock;
+ goto out_free_unlock;
}
s->i += s->block_aligned_filename_size;
(*packet_size) = s->i;
(*remaining_bytes) -= (*packet_size);
-out_release_free_unlock:
- crypto_free_shash(s->hash_tfm);
out_free_unlock:
kfree_sensitive(s->block_aligned_filename);
out_unlock:
@@ -848,7 +808,6 @@ out:
key_put(auth_tok_key);
}
skcipher_request_free(s->skcipher_req);
- kfree_sensitive(s->hash_desc);
kfree(s);
return rc;
}
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index d66bbd2df191..c12dc680f8fe 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -12,14 +12,16 @@
#include <linux/dcache.h>
#include <linux/file.h>
+#include <linux/fips.h>
#include <linux/module.h>
#include <linux/namei.h>
#include <linux/skbuff.h>
-#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/key.h>
-#include <linux/parser.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/fs_stack.h>
+#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/magic.h>
#include "ecryptfs_kernel.h"
@@ -105,15 +107,14 @@ static int ecryptfs_init_lower_file(struct dentry *dentry,
struct file **lower_file)
{
const struct cred *cred = current_cred();
- struct path *path = ecryptfs_dentry_to_lower_path(dentry);
+ struct path path = ecryptfs_lower_path(dentry);
int rc;
- rc = ecryptfs_privileged_open(lower_file, path->dentry, path->mnt,
- cred);
+ rc = ecryptfs_privileged_open(lower_file, path.dentry, path.mnt, cred);
if (rc) {
printk(KERN_ERR "Error opening lower file "
"for lower_dentry [0x%p] and lower_mnt [0x%p]; "
- "rc = [%d]\n", path->dentry, path->mnt, rc);
+ "rc = [%d]\n", path.dentry, path.mnt, rc);
(*lower_file) = NULL;
}
return rc;
@@ -153,32 +154,30 @@ void ecryptfs_put_lower_file(struct inode *inode)
}
}
-enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig,
- ecryptfs_opt_cipher, ecryptfs_opt_ecryptfs_cipher,
- ecryptfs_opt_ecryptfs_key_bytes,
- ecryptfs_opt_passthrough, ecryptfs_opt_xattr_metadata,
- ecryptfs_opt_encrypted_view, ecryptfs_opt_fnek_sig,
- ecryptfs_opt_fn_cipher, ecryptfs_opt_fn_cipher_key_bytes,
- ecryptfs_opt_unlink_sigs, ecryptfs_opt_mount_auth_tok_only,
- ecryptfs_opt_check_dev_ruid,
- ecryptfs_opt_err };
-
-static const match_table_t tokens = {
- {ecryptfs_opt_sig, "sig=%s"},
- {ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"},
- {ecryptfs_opt_cipher, "cipher=%s"},
- {ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"},
- {ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"},
- {ecryptfs_opt_passthrough, "ecryptfs_passthrough"},
- {ecryptfs_opt_xattr_metadata, "ecryptfs_xattr_metadata"},
- {ecryptfs_opt_encrypted_view, "ecryptfs_encrypted_view"},
- {ecryptfs_opt_fnek_sig, "ecryptfs_fnek_sig=%s"},
- {ecryptfs_opt_fn_cipher, "ecryptfs_fn_cipher=%s"},
- {ecryptfs_opt_fn_cipher_key_bytes, "ecryptfs_fn_key_bytes=%u"},
- {ecryptfs_opt_unlink_sigs, "ecryptfs_unlink_sigs"},
- {ecryptfs_opt_mount_auth_tok_only, "ecryptfs_mount_auth_tok_only"},
- {ecryptfs_opt_check_dev_ruid, "ecryptfs_check_dev_ruid"},
- {ecryptfs_opt_err, NULL}
+enum {
+ Opt_sig, Opt_ecryptfs_sig, Opt_cipher, Opt_ecryptfs_cipher,
+ Opt_ecryptfs_key_bytes, Opt_passthrough, Opt_xattr_metadata,
+ Opt_encrypted_view, Opt_fnek_sig, Opt_fn_cipher,
+ Opt_fn_cipher_key_bytes, Opt_unlink_sigs, Opt_mount_auth_tok_only,
+ Opt_check_dev_ruid
+};
+
+static const struct fs_parameter_spec ecryptfs_fs_param_spec[] = {
+ fsparam_string ("sig", Opt_sig),
+ fsparam_string ("ecryptfs_sig", Opt_ecryptfs_sig),
+ fsparam_string ("cipher", Opt_cipher),
+ fsparam_string ("ecryptfs_cipher", Opt_ecryptfs_cipher),
+ fsparam_u32 ("ecryptfs_key_bytes", Opt_ecryptfs_key_bytes),
+ fsparam_flag ("ecryptfs_passthrough", Opt_passthrough),
+ fsparam_flag ("ecryptfs_xattr_metadata", Opt_xattr_metadata),
+ fsparam_flag ("ecryptfs_encrypted_view", Opt_encrypted_view),
+ fsparam_string ("ecryptfs_fnek_sig", Opt_fnek_sig),
+ fsparam_string ("ecryptfs_fn_cipher", Opt_fn_cipher),
+ fsparam_u32 ("ecryptfs_fn_key_bytes", Opt_fn_cipher_key_bytes),
+ fsparam_flag ("ecryptfs_unlink_sigs", Opt_unlink_sigs),
+ fsparam_flag ("ecryptfs_mount_auth_tok_only", Opt_mount_auth_tok_only),
+ fsparam_flag ("ecryptfs_check_dev_ruid", Opt_check_dev_ruid),
+ {}
};
static int ecryptfs_init_global_auth_toks(
@@ -219,19 +218,20 @@ static void ecryptfs_init_mount_crypt_stat(
mount_crypt_stat->flags |= ECRYPTFS_MOUNT_CRYPT_STAT_INITIALIZED;
}
+struct ecryptfs_fs_context {
+ /* Mount option status trackers */
+ bool check_ruid;
+ bool sig_set;
+ bool cipher_name_set;
+ bool cipher_key_bytes_set;
+ bool fn_cipher_name_set;
+ bool fn_cipher_key_bytes_set;
+};
+
/**
- * ecryptfs_parse_options
- * @sbi: The ecryptfs super block
- * @options: The options passed to the kernel
- * @check_ruid: set to 1 if device uid should be checked against the ruid
- *
- * Parse mount options:
- * debug=N - ecryptfs_verbosity level for debug output
- * sig=XXX - description(signature) of the key to use
- *
- * Returns the dentry object of the lower-level (lower/interposed)
- * directory; We want to mount our stackable file system on top of
- * that lower directory.
+ * ecryptfs_parse_param
+ * @fc: The ecryptfs filesystem context
+ * @param: The mount parameter to parse
*
* The signature of the key to use must be the description of a key
* already in the keyring. Mounting will fail if the key can not be
@@ -239,157 +239,118 @@ static void ecryptfs_init_mount_crypt_stat(
*
* Returns zero on success; non-zero on error
*/
-static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
- uid_t *check_ruid)
+static int ecryptfs_parse_param(
+ struct fs_context *fc,
+ struct fs_parameter *param)
{
- char *p;
- int rc = 0;
- int sig_set = 0;
- int cipher_name_set = 0;
- int fn_cipher_name_set = 0;
- int cipher_key_bytes;
- int cipher_key_bytes_set = 0;
- int fn_cipher_key_bytes;
- int fn_cipher_key_bytes_set = 0;
+ int rc;
+ int opt;
+ struct fs_parse_result result;
+ struct ecryptfs_fs_context *ctx = fc->fs_private;
+ struct ecryptfs_sb_info *sbi = fc->s_fs_info;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&sbi->mount_crypt_stat;
- substring_t args[MAX_OPT_ARGS];
- int token;
- char *sig_src;
- char *cipher_name_dst;
- char *cipher_name_src;
- char *fn_cipher_name_dst;
- char *fn_cipher_name_src;
- char *fnek_dst;
- char *fnek_src;
- char *cipher_key_bytes_src;
- char *fn_cipher_key_bytes_src;
- u8 cipher_code;
- *check_ruid = 0;
+ opt = fs_parse(fc, ecryptfs_fs_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
- if (!options) {
- rc = -EINVAL;
- goto out;
- }
- ecryptfs_init_mount_crypt_stat(mount_crypt_stat);
- while ((p = strsep(&options, ",")) != NULL) {
- if (!*p)
- continue;
- token = match_token(p, tokens, args);
- switch (token) {
- case ecryptfs_opt_sig:
- case ecryptfs_opt_ecryptfs_sig:
- sig_src = args[0].from;
- rc = ecryptfs_add_global_auth_tok(mount_crypt_stat,
- sig_src, 0);
- if (rc) {
- printk(KERN_ERR "Error attempting to register "
- "global sig; rc = [%d]\n", rc);
- goto out;
- }
- sig_set = 1;
- break;
- case ecryptfs_opt_cipher:
- case ecryptfs_opt_ecryptfs_cipher:
- cipher_name_src = args[0].from;
- cipher_name_dst =
- mount_crypt_stat->
- global_default_cipher_name;
- strncpy(cipher_name_dst, cipher_name_src,
- ECRYPTFS_MAX_CIPHER_NAME_SIZE);
- cipher_name_dst[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0';
- cipher_name_set = 1;
- break;
- case ecryptfs_opt_ecryptfs_key_bytes:
- cipher_key_bytes_src = args[0].from;
- cipher_key_bytes =
- (int)simple_strtol(cipher_key_bytes_src,
- &cipher_key_bytes_src, 0);
- mount_crypt_stat->global_default_cipher_key_size =
- cipher_key_bytes;
- cipher_key_bytes_set = 1;
- break;
- case ecryptfs_opt_passthrough:
- mount_crypt_stat->flags |=
- ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED;
- break;
- case ecryptfs_opt_xattr_metadata:
- mount_crypt_stat->flags |=
- ECRYPTFS_XATTR_METADATA_ENABLED;
- break;
- case ecryptfs_opt_encrypted_view:
- mount_crypt_stat->flags |=
- ECRYPTFS_XATTR_METADATA_ENABLED;
- mount_crypt_stat->flags |=
- ECRYPTFS_ENCRYPTED_VIEW_ENABLED;
- break;
- case ecryptfs_opt_fnek_sig:
- fnek_src = args[0].from;
- fnek_dst =
- mount_crypt_stat->global_default_fnek_sig;
- strncpy(fnek_dst, fnek_src, ECRYPTFS_SIG_SIZE_HEX);
- mount_crypt_stat->global_default_fnek_sig[
- ECRYPTFS_SIG_SIZE_HEX] = '\0';
- rc = ecryptfs_add_global_auth_tok(
- mount_crypt_stat,
- mount_crypt_stat->global_default_fnek_sig,
- ECRYPTFS_AUTH_TOK_FNEK);
- if (rc) {
- printk(KERN_ERR "Error attempting to register "
- "global fnek sig [%s]; rc = [%d]\n",
- mount_crypt_stat->global_default_fnek_sig,
- rc);
- goto out;
- }
- mount_crypt_stat->flags |=
- (ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES
- | ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK);
- break;
- case ecryptfs_opt_fn_cipher:
- fn_cipher_name_src = args[0].from;
- fn_cipher_name_dst =
- mount_crypt_stat->global_default_fn_cipher_name;
- strncpy(fn_cipher_name_dst, fn_cipher_name_src,
- ECRYPTFS_MAX_CIPHER_NAME_SIZE);
- mount_crypt_stat->global_default_fn_cipher_name[
- ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0';
- fn_cipher_name_set = 1;
- break;
- case ecryptfs_opt_fn_cipher_key_bytes:
- fn_cipher_key_bytes_src = args[0].from;
- fn_cipher_key_bytes =
- (int)simple_strtol(fn_cipher_key_bytes_src,
- &fn_cipher_key_bytes_src, 0);
- mount_crypt_stat->global_default_fn_cipher_key_bytes =
- fn_cipher_key_bytes;
- fn_cipher_key_bytes_set = 1;
- break;
- case ecryptfs_opt_unlink_sigs:
- mount_crypt_stat->flags |= ECRYPTFS_UNLINK_SIGS;
- break;
- case ecryptfs_opt_mount_auth_tok_only:
- mount_crypt_stat->flags |=
- ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY;
- break;
- case ecryptfs_opt_check_dev_ruid:
- *check_ruid = 1;
- break;
- case ecryptfs_opt_err:
- default:
- printk(KERN_WARNING
- "%s: eCryptfs: unrecognized option [%s]\n",
- __func__, p);
+ switch (opt) {
+ case Opt_sig:
+ case Opt_ecryptfs_sig:
+ rc = ecryptfs_add_global_auth_tok(mount_crypt_stat,
+ param->string, 0);
+ if (rc) {
+ printk(KERN_ERR "Error attempting to register "
+ "global sig; rc = [%d]\n", rc);
+ return rc;
}
+ ctx->sig_set = 1;
+ break;
+ case Opt_cipher:
+ case Opt_ecryptfs_cipher:
+ strscpy(mount_crypt_stat->global_default_cipher_name,
+ param->string);
+ ctx->cipher_name_set = 1;
+ break;
+ case Opt_ecryptfs_key_bytes:
+ mount_crypt_stat->global_default_cipher_key_size =
+ result.uint_32;
+ ctx->cipher_key_bytes_set = 1;
+ break;
+ case Opt_passthrough:
+ mount_crypt_stat->flags |=
+ ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED;
+ break;
+ case Opt_xattr_metadata:
+ mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED;
+ break;
+ case Opt_encrypted_view:
+ mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED;
+ mount_crypt_stat->flags |= ECRYPTFS_ENCRYPTED_VIEW_ENABLED;
+ break;
+ case Opt_fnek_sig:
+ strscpy(mount_crypt_stat->global_default_fnek_sig,
+ param->string);
+ rc = ecryptfs_add_global_auth_tok(
+ mount_crypt_stat,
+ mount_crypt_stat->global_default_fnek_sig,
+ ECRYPTFS_AUTH_TOK_FNEK);
+ if (rc) {
+ printk(KERN_ERR "Error attempting to register "
+ "global fnek sig [%s]; rc = [%d]\n",
+ mount_crypt_stat->global_default_fnek_sig, rc);
+ return rc;
+ }
+ mount_crypt_stat->flags |=
+ (ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES
+ | ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK);
+ break;
+ case Opt_fn_cipher:
+ strscpy(mount_crypt_stat->global_default_fn_cipher_name,
+ param->string);
+ ctx->fn_cipher_name_set = 1;
+ break;
+ case Opt_fn_cipher_key_bytes:
+ mount_crypt_stat->global_default_fn_cipher_key_bytes =
+ result.uint_32;
+ ctx->fn_cipher_key_bytes_set = 1;
+ break;
+ case Opt_unlink_sigs:
+ mount_crypt_stat->flags |= ECRYPTFS_UNLINK_SIGS;
+ break;
+ case Opt_mount_auth_tok_only:
+ mount_crypt_stat->flags |= ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY;
+ break;
+ case Opt_check_dev_ruid:
+ ctx->check_ruid = 1;
+ break;
+ default:
+ return -EINVAL;
}
- if (!sig_set) {
+
+ return 0;
+}
+
+static int ecryptfs_validate_options(struct fs_context *fc)
+{
+ int rc = 0;
+ u8 cipher_code;
+ struct ecryptfs_fs_context *ctx = fc->fs_private;
+ struct ecryptfs_sb_info *sbi = fc->s_fs_info;
+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+
+
+ mount_crypt_stat = &sbi->mount_crypt_stat;
+
+ if (!ctx->sig_set) {
rc = -EINVAL;
ecryptfs_printk(KERN_ERR, "You must supply at least one valid "
"auth tok signature as a mount "
"parameter; see the eCryptfs README\n");
goto out;
}
- if (!cipher_name_set) {
+ if (!ctx->cipher_name_set) {
int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER);
BUG_ON(cipher_name_len > ECRYPTFS_MAX_CIPHER_NAME_SIZE);
@@ -397,13 +358,13 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
ECRYPTFS_DEFAULT_CIPHER);
}
if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
- && !fn_cipher_name_set)
+ && !ctx->fn_cipher_name_set)
strcpy(mount_crypt_stat->global_default_fn_cipher_name,
mount_crypt_stat->global_default_cipher_name);
- if (!cipher_key_bytes_set)
+ if (!ctx->cipher_key_bytes_set)
mount_crypt_stat->global_default_cipher_key_size = 0;
if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
- && !fn_cipher_key_bytes_set)
+ && !ctx->fn_cipher_key_bytes_set)
mount_crypt_stat->global_default_fn_cipher_key_bytes =
mount_crypt_stat->global_default_cipher_key_size;
@@ -467,45 +428,40 @@ struct kmem_cache *ecryptfs_sb_info_cache;
static struct file_system_type ecryptfs_fs_type;
/*
- * ecryptfs_mount
- * @fs_type: The filesystem type that the superblock should belong to
- * @flags: The flags associated with the mount
- * @dev_name: The path to mount over
- * @raw_data: The options passed into the kernel
+ * ecryptfs_get_tree
+ * @fc: The filesystem context
*/
-static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data)
+static int ecryptfs_get_tree(struct fs_context *fc)
{
struct super_block *s;
- struct ecryptfs_sb_info *sbi;
+ struct ecryptfs_fs_context *ctx = fc->fs_private;
+ struct ecryptfs_sb_info *sbi = fc->s_fs_info;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
- struct ecryptfs_dentry_info *root_info;
const char *err = "Getting sb failed";
struct inode *inode;
struct path path;
- uid_t check_ruid;
int rc;
- sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL);
- if (!sbi) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (!dev_name) {
+ if (!fc->source) {
rc = -EINVAL;
err = "Device name cannot be null";
goto out;
}
- rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid);
+ mount_crypt_stat = &sbi->mount_crypt_stat;
+ rc = ecryptfs_validate_options(fc);
if (rc) {
- err = "Error parsing options";
+ err = "Error validating options";
goto out;
}
- mount_crypt_stat = &sbi->mount_crypt_stat;
- s = sget(fs_type, NULL, set_anon_super, flags, NULL);
+ if (fips_enabled) {
+ rc = -EINVAL;
+ err = "eCryptfs support is disabled due to FIPS";
+ goto out;
+ }
+
+ s = sget_fc(fc, NULL, set_anon_super_fc);
if (IS_ERR(s)) {
rc = PTR_ERR(s);
goto out;
@@ -521,10 +477,10 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
sbi = NULL;
s->s_op = &ecryptfs_sops;
s->s_xattr = ecryptfs_xattr_handlers;
- s->s_d_op = &ecryptfs_dops;
+ set_default_d_op(s, &ecryptfs_dops);
err = "Reading sb failed";
- rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
+ rc = kern_path(fc->source, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
if (rc) {
ecryptfs_printk(KERN_WARNING, "kern_path() failed\n");
goto out1;
@@ -537,13 +493,14 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
goto out_free;
}
- if (mnt_user_ns(path.mnt) != &init_user_ns) {
+ if (is_idmapped_mnt(path.mnt)) {
rc = -EINVAL;
printk(KERN_ERR "Mounting on idmapped mounts currently disallowed\n");
goto out_free;
}
- if (check_ruid && !uid_eq(d_inode(path.dentry)->i_uid, current_uid())) {
+ if (ctx->check_ruid &&
+ !uid_eq(d_inode(path.dentry)->i_uid, current_uid())) {
rc = -EPERM;
printk(KERN_ERR "Mount of device (uid: %d) not owned by "
"requested user (uid: %d)\n",
@@ -558,7 +515,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
* Set the POSIX ACL flag based on whether they're enabled in the lower
* mount.
*/
- s->s_flags = flags & ~SB_POSIXACL;
+ s->s_flags = fc->sb_flags & ~SB_POSIXACL;
s->s_flags |= path.dentry->d_sb->s_flags & SB_POSIXACL;
/**
@@ -591,29 +548,23 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
goto out_free;
}
- rc = -ENOMEM;
- root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
- if (!root_info)
- goto out_free;
-
- /* ->kill_sb() will take care of root_info */
- ecryptfs_set_dentry_private(s->s_root, root_info);
- root_info->lower_path = path;
+ ecryptfs_set_dentry_lower(s->s_root, path.dentry);
+ ecryptfs_superblock_to_private(s)->lower_mnt = path.mnt;
s->s_flags |= SB_ACTIVE;
- return dget(s->s_root);
+ fc->root = dget(s->s_root);
+ return 0;
out_free:
path_put(&path);
out1:
deactivate_locked_super(s);
out:
- if (sbi) {
+ if (sbi)
ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat);
- kmem_cache_free(ecryptfs_sb_info_cache, sbi);
- }
+
printk(KERN_ERR "%s; rc = [%d]\n", err, rc);
- return ERR_PTR(rc);
+ return rc;
}
/**
@@ -628,14 +579,59 @@ static void ecryptfs_kill_block_super(struct super_block *sb)
kill_anon_super(sb);
if (!sb_info)
return;
+ mntput(sb_info->lower_mnt);
ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat);
kmem_cache_free(ecryptfs_sb_info_cache, sb_info);
}
+static void ecryptfs_free_fc(struct fs_context *fc)
+{
+ struct ecryptfs_fs_context *ctx = fc->fs_private;
+ struct ecryptfs_sb_info *sbi = fc->s_fs_info;
+
+ kfree(ctx);
+
+ if (sbi) {
+ ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat);
+ kmem_cache_free(ecryptfs_sb_info_cache, sbi);
+ }
+}
+
+static const struct fs_context_operations ecryptfs_context_ops = {
+ .free = ecryptfs_free_fc,
+ .parse_param = ecryptfs_parse_param,
+ .get_tree = ecryptfs_get_tree,
+ .reconfigure = NULL,
+};
+
+static int ecryptfs_init_fs_context(struct fs_context *fc)
+{
+ struct ecryptfs_fs_context *ctx;
+ struct ecryptfs_sb_info *sbi = NULL;
+
+ ctx = kzalloc(sizeof(struct ecryptfs_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL);
+ if (!sbi) {
+ kfree(ctx);
+ ctx = NULL;
+ return -ENOMEM;
+ }
+
+ ecryptfs_init_mount_crypt_stat(&sbi->mount_crypt_stat);
+
+ fc->fs_private = ctx;
+ fc->s_fs_info = sbi;
+ fc->ops = &ecryptfs_context_ops;
+ return 0;
+}
+
static struct file_system_type ecryptfs_fs_type = {
.owner = THIS_MODULE,
.name = "ecryptfs",
- .mount = ecryptfs_mount,
+ .init_fs_context = ecryptfs_init_fs_context,
+ .parameters = ecryptfs_fs_param_spec,
.kill_sb = ecryptfs_kill_block_super,
.fs_flags = 0
};
@@ -672,11 +668,6 @@ static struct ecryptfs_cache_info {
.size = sizeof(struct ecryptfs_file_info),
},
{
- .cache = &ecryptfs_dentry_info_cache,
- .name = "ecryptfs_dentry_info_cache",
- .size = sizeof(struct ecryptfs_dentry_info),
- },
- {
.cache = &ecryptfs_inode_info_cache,
.name = "ecryptfs_inode_cache",
.size = sizeof(struct ecryptfs_inode_info),
@@ -769,7 +760,7 @@ static struct kobject *ecryptfs_kobj;
static ssize_t version_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buff)
{
- return snprintf(buff, PAGE_SIZE, "%d\n", ECRYPTFS_VERSIONING_MASK);
+ return sysfs_emit(buff, "%d\n", ECRYPTFS_VERSIONING_MASK);
}
static struct kobj_attribute version_attr = __ATTR_RO(version);
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 7d85e64ea62f..2c2b12fedeae 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -19,51 +19,33 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/xattr.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ecryptfs_kernel.h"
/*
- * ecryptfs_get_locked_page
- *
- * Get one page from cache or lower f/s, return error otherwise.
- *
- * Returns locked and up-to-date page (if ok), with increased
- * refcnt.
- */
-struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index)
-{
- struct page *page = read_mapping_page(inode->i_mapping, index, NULL);
- if (!IS_ERR(page))
- lock_page(page);
- return page;
-}
-
-/**
- * ecryptfs_writepage
- * @page: Page that is locked before this call is made
- * @wbc: Write-back control structure
- *
- * Returns zero on success; non-zero otherwise
- *
* This is where we encrypt the data and pass the encrypted data to
* the lower filesystem. In OpenPGP-compatible mode, we operate on
* entire underlying packets.
*/
-static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
+static int ecryptfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- int rc;
-
- rc = ecryptfs_encrypt_page(page);
- if (rc) {
- ecryptfs_printk(KERN_WARNING, "Error encrypting "
- "page (upper index [0x%.16lx])\n", page->index);
- ClearPageUptodate(page);
- goto out;
+ struct folio *folio = NULL;
+ int error;
+
+ while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
+ error = ecryptfs_encrypt_page(folio);
+ if (error) {
+ ecryptfs_printk(KERN_WARNING,
+ "Error encrypting folio (index [0x%.16lx])\n",
+ folio->index);
+ folio_clear_uptodate(folio);
+ mapping_set_error(mapping, error);
+ }
+ folio_unlock(folio);
}
- SetPageUptodate(page);
-out:
- unlock_page(page);
- return rc;
+
+ return error;
}
static void strip_xattr_flag(char *page_virt,
@@ -97,7 +79,7 @@ static void strip_xattr_flag(char *page_virt,
/**
* ecryptfs_copy_up_encrypted_with_header
- * @page: Sort of a ``virtual'' representation of the encrypted lower
+ * @folio: Sort of a ``virtual'' representation of the encrypted lower
* file. The actual lower file does not have the metadata in
* the header. This is locked.
* @crypt_stat: The eCryptfs inode's cryptographic context
@@ -106,7 +88,7 @@ static void strip_xattr_flag(char *page_virt,
* seeing, with the header information inserted.
*/
static int
-ecryptfs_copy_up_encrypted_with_header(struct page *page,
+ecryptfs_copy_up_encrypted_with_header(struct folio *folio,
struct ecryptfs_crypt_stat *crypt_stat)
{
loff_t extent_num_in_page = 0;
@@ -115,9 +97,9 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
int rc = 0;
while (extent_num_in_page < num_extents_per_page) {
- loff_t view_extent_num = ((((loff_t)page->index)
+ loff_t view_extent_num = ((loff_t)folio->index
* num_extents_per_page)
- + extent_num_in_page);
+ + extent_num_in_page;
size_t num_header_extents_at_front =
(crypt_stat->metadata_size / crypt_stat->extent_size);
@@ -125,21 +107,21 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
/* This is a header extent */
char *page_virt;
- page_virt = kmap_atomic(page);
+ page_virt = kmap_local_folio(folio, 0);
memset(page_virt, 0, PAGE_SIZE);
/* TODO: Support more than one header extent */
if (view_extent_num == 0) {
size_t written;
rc = ecryptfs_read_xattr_region(
- page_virt, page->mapping->host);
+ page_virt, folio->mapping->host);
strip_xattr_flag(page_virt + 16, crypt_stat);
ecryptfs_write_header_metadata(page_virt + 20,
crypt_stat,
&written);
}
- kunmap_atomic(page_virt);
- flush_dcache_page(page);
+ kunmap_local(page_virt);
+ flush_dcache_folio(folio);
if (rc) {
printk(KERN_ERR "%s: Error reading xattr "
"region; rc = [%d]\n", __func__, rc);
@@ -152,9 +134,9 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
- crypt_stat->metadata_size);
rc = ecryptfs_read_lower_page_segment(
- page, (lower_offset >> PAGE_SHIFT),
+ folio, (lower_offset >> PAGE_SHIFT),
(lower_offset & ~PAGE_MASK),
- crypt_stat->extent_size, page->mapping->host);
+ crypt_stat->extent_size, folio->mapping->host);
if (rc) {
printk(KERN_ERR "%s: Error attempting to read "
"extent at offset [%lld] in the lower "
@@ -170,133 +152,129 @@ out:
}
/**
- * ecryptfs_readpage
+ * ecryptfs_read_folio
* @file: An eCryptfs file
- * @page: Page from eCryptfs inode mapping into which to stick the read data
+ * @folio: Folio from eCryptfs inode mapping into which to stick the read data
*
- * Read in a page, decrypting if necessary.
+ * Read in a folio, decrypting if necessary.
*
* Returns zero on success; non-zero on error.
*/
-static int ecryptfs_readpage(struct file *file, struct page *page)
+static int ecryptfs_read_folio(struct file *file, struct folio *folio)
{
+ struct inode *inode = folio->mapping->host;
struct ecryptfs_crypt_stat *crypt_stat =
- &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
- int rc = 0;
+ &ecryptfs_inode_to_private(inode)->crypt_stat;
+ int err = 0;
if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
- rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
- PAGE_SIZE,
- page->mapping->host);
+ err = ecryptfs_read_lower_page_segment(folio, folio->index, 0,
+ folio_size(folio), inode);
} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
- rc = ecryptfs_copy_up_encrypted_with_header(page,
- crypt_stat);
- if (rc) {
+ err = ecryptfs_copy_up_encrypted_with_header(folio,
+ crypt_stat);
+ if (err) {
printk(KERN_ERR "%s: Error attempting to copy "
"the encrypted content from the lower "
"file whilst inserting the metadata "
- "from the xattr into the header; rc = "
- "[%d]\n", __func__, rc);
+ "from the xattr into the header; err = "
+ "[%d]\n", __func__, err);
goto out;
}
} else {
- rc = ecryptfs_read_lower_page_segment(
- page, page->index, 0, PAGE_SIZE,
- page->mapping->host);
- if (rc) {
- printk(KERN_ERR "Error reading page; rc = "
- "[%d]\n", rc);
+ err = ecryptfs_read_lower_page_segment(folio,
+ folio->index, 0, folio_size(folio),
+ inode);
+ if (err) {
+ printk(KERN_ERR "Error reading page; err = "
+ "[%d]\n", err);
goto out;
}
}
} else {
- rc = ecryptfs_decrypt_page(page);
- if (rc) {
+ err = ecryptfs_decrypt_page(folio);
+ if (err) {
ecryptfs_printk(KERN_ERR, "Error decrypting page; "
- "rc = [%d]\n", rc);
+ "err = [%d]\n", err);
goto out;
}
}
out:
- if (rc)
- ClearPageUptodate(page);
- else
- SetPageUptodate(page);
- ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16lx]\n",
- page->index);
- unlock_page(page);
- return rc;
+ ecryptfs_printk(KERN_DEBUG, "Unlocking folio with index = [0x%.16lx]\n",
+ folio->index);
+ folio_end_read(folio, err == 0);
+ return err;
}
/*
* Called with lower inode mutex held.
*/
-static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
+static int fill_zeros_to_end_of_page(struct folio *folio, unsigned int to)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
int end_byte_in_page;
- if ((i_size_read(inode) / PAGE_SIZE) != page->index)
+ if ((i_size_read(inode) / PAGE_SIZE) != folio->index)
goto out;
end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
if (to > end_byte_in_page)
end_byte_in_page = to;
- zero_user_segment(page, end_byte_in_page, PAGE_SIZE);
+ folio_zero_segment(folio, end_byte_in_page, PAGE_SIZE);
out:
return 0;
}
/**
* ecryptfs_write_begin
- * @file: The eCryptfs file
+ * @iocb: I/O control block for the eCryptfs file
* @mapping: The eCryptfs object
* @pos: The file offset at which to start writing
* @len: Length of the write
- * @flags: Various flags
- * @pagep: Pointer to return the page
+ * @foliop: Pointer to return the folio
* @fsdata: Pointer to return fs data (unused)
*
* This function must zero any hole we create
*
* Returns zero on success; non-zero otherwise
*/
-static int ecryptfs_write_begin(struct file *file,
+static int ecryptfs_write_begin(const struct kiocb *iocb,
struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
- struct page *page;
+ struct folio *folio;
loff_t prev_page_end_size;
int rc = 0;
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
- *pagep = page;
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ *foliop = folio;
prev_page_end_size = ((loff_t)index << PAGE_SHIFT);
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_read_lower_page_segment(
- page, index, 0, PAGE_SIZE, mapping->host);
+ folio, index, 0, PAGE_SIZE, mapping->host);
if (rc) {
printk(KERN_ERR "%s: Error attempting to read "
"lower page segment; rc = [%d]\n",
__func__, rc);
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
goto out;
} else
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
rc = ecryptfs_copy_up_encrypted_with_header(
- page, crypt_stat);
+ folio, crypt_stat);
if (rc) {
printk(KERN_ERR "%s: Error attempting "
"to copy the encrypted content "
@@ -304,47 +282,47 @@ static int ecryptfs_write_begin(struct file *file,
"inserting the metadata from "
"the xattr into the header; rc "
"= [%d]\n", __func__, rc);
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
} else {
rc = ecryptfs_read_lower_page_segment(
- page, index, 0, PAGE_SIZE,
+ folio, index, 0, PAGE_SIZE,
mapping->host);
if (rc) {
printk(KERN_ERR "%s: Error reading "
"page; rc = [%d]\n",
__func__, rc);
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
} else {
if (prev_page_end_size
- >= i_size_read(page->mapping->host)) {
- zero_user(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
+ >= i_size_read(mapping->host)) {
+ folio_zero_range(folio, 0, PAGE_SIZE);
+ folio_mark_uptodate(folio);
} else if (len < PAGE_SIZE) {
- rc = ecryptfs_decrypt_page(page);
+ rc = ecryptfs_decrypt_page(folio);
if (rc) {
printk(KERN_ERR "%s: Error decrypting "
"page at index [%ld]; "
"rc = [%d]\n",
- __func__, page->index, rc);
- ClearPageUptodate(page);
+ __func__, folio->index, rc);
+ folio_clear_uptodate(folio);
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
}
}
/* If creating a page or more of holes, zero them out via truncate.
* Note, this will increase i_size. */
if (index != 0) {
- if (prev_page_end_size > i_size_read(page->mapping->host)) {
- rc = ecryptfs_truncate(file->f_path.dentry,
+ if (prev_page_end_size > i_size_read(mapping->host)) {
+ rc = ecryptfs_truncate(iocb->ki_filp->f_path.dentry,
prev_page_end_size);
if (rc) {
printk(KERN_ERR "%s: Error on attempt to "
@@ -359,12 +337,11 @@ static int ecryptfs_write_begin(struct file *file,
* of page? Zero it out. */
if ((i_size_read(mapping->host) == prev_page_end_size)
&& (pos != 0))
- zero_user(page, 0, PAGE_SIZE);
+ folio_zero_range(folio, 0, PAGE_SIZE);
out:
if (unlikely(rc)) {
- unlock_page(page);
- put_page(page);
- *pagep = NULL;
+ folio_unlock(folio);
+ folio_put(folio);
}
return rc;
}
@@ -427,7 +404,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
if (size < 0)
size = 8;
put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
- rc = __vfs_setxattr(&init_user_ns, lower_dentry, lower_inode,
+ rc = __vfs_setxattr(&nop_mnt_idmap, lower_dentry, lower_inode,
ECRYPTFS_XATTR_NAME, xattr_virt, size, 0);
inode_unlock(lower_inode);
if (rc)
@@ -452,18 +429,18 @@ int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode)
/**
* ecryptfs_write_end
- * @file: The eCryptfs file object
+ * @iocb: I/O control block for the eCryptfs file
* @mapping: The eCryptfs object
* @pos: The file position
* @len: The length of the data (unused)
* @copied: The amount of data copied
- * @page: The eCryptfs page
+ * @folio: The eCryptfs folio
* @fsdata: The fsdata (unused)
*/
-static int ecryptfs_write_end(struct file *file,
+static int ecryptfs_write_end(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
unsigned from = pos & (PAGE_SIZE - 1);
@@ -476,8 +453,8 @@ static int ecryptfs_write_end(struct file *file,
ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
"(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
- rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page, 0,
- to);
+ rc = ecryptfs_write_lower_page_segment(ecryptfs_inode,
+ folio, 0, to);
if (!rc) {
rc = copied;
fsstack_copy_inode_size(ecryptfs_inode,
@@ -485,21 +462,21 @@ static int ecryptfs_write_end(struct file *file,
}
goto out;
}
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
if (copied < PAGE_SIZE) {
rc = 0;
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
/* Fills in zeros if 'to' goes beyond inode size */
- rc = fill_zeros_to_end_of_page(page, to);
+ rc = fill_zeros_to_end_of_page(folio, to);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error attempting to fill "
"zeros in page with index = [0x%.16lx]\n", index);
goto out;
}
- rc = ecryptfs_encrypt_page(page);
+ rc = ecryptfs_encrypt_page(folio);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
"index [0x%.16lx])\n", index);
@@ -518,8 +495,8 @@ static int ecryptfs_write_end(struct file *file,
else
rc = copied;
out:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return rc;
}
@@ -540,16 +517,18 @@ const struct address_space_operations ecryptfs_aops = {
* XXX: This is pretty broken for multiple reasons: ecryptfs does not
* actually use buffer_heads, and ecryptfs will crash without
* CONFIG_BLOCK. But it matches the behavior before the default for
- * address_space_operations without the ->set_page_dirty method was
+ * address_space_operations without the ->dirty_folio method was
* cleaned up, so this is the best we can do without maintainer
* feedback.
*/
#ifdef CONFIG_BLOCK
- .set_page_dirty = __set_page_dirty_buffers,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
#endif
- .writepage = ecryptfs_writepage,
- .readpage = ecryptfs_readpage,
+ .writepages = ecryptfs_writepages,
+ .read_folio = ecryptfs_read_folio,
.write_begin = ecryptfs_write_begin,
.write_end = ecryptfs_write_end,
+ .migrate_folio = filemap_migrate_folio,
.bmap = ecryptfs_bmap,
};
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 60bdcaddcbe5..b3b451c2b941 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -41,34 +41,33 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
/**
* ecryptfs_write_lower_page_segment
* @ecryptfs_inode: The eCryptfs inode
- * @page_for_lower: The page containing the data to be written to the
+ * @folio_for_lower: The folio containing the data to be written to the
* lower file
- * @offset_in_page: The offset in the @page_for_lower from which to
+ * @offset_in_page: The offset in the @folio_for_lower from which to
* start writing the data
- * @size: The amount of data from @page_for_lower to write to the
+ * @size: The amount of data from @folio_for_lower to write to the
* lower file
*
* Determines the byte offset in the file for the given page and
* offset within the page, maps the page, and makes the call to write
- * the contents of @page_for_lower to the lower inode.
+ * the contents of @folio_for_lower to the lower inode.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
- struct page *page_for_lower,
+ struct folio *folio_for_lower,
size_t offset_in_page, size_t size)
{
char *virt;
loff_t offset;
int rc;
- offset = ((((loff_t)page_for_lower->index) << PAGE_SHIFT)
- + offset_in_page);
- virt = kmap(page_for_lower);
+ offset = (loff_t)folio_for_lower->index * PAGE_SIZE + offset_in_page;
+ virt = kmap_local_folio(folio_for_lower, 0);
rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
if (rc > 0)
rc = 0;
- kunmap(page_for_lower);
+ kunmap_local(virt);
return rc;
}
@@ -93,7 +92,6 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
size_t size)
{
- struct page *ecryptfs_page;
struct ecryptfs_crypt_stat *crypt_stat;
char *ecryptfs_page_virt;
loff_t ecryptfs_file_size = i_size_read(ecryptfs_inode);
@@ -111,6 +109,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
else
pos = offset;
while (pos < (offset + size)) {
+ struct folio *ecryptfs_folio;
pgoff_t ecryptfs_page_idx = (pos >> PAGE_SHIFT);
size_t start_offset_in_page = (pos & ~PAGE_MASK);
size_t num_bytes = (PAGE_SIZE - start_offset_in_page);
@@ -130,17 +129,18 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
if (num_bytes > total_remaining_zeros)
num_bytes = total_remaining_zeros;
}
- ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_inode,
- ecryptfs_page_idx);
- if (IS_ERR(ecryptfs_page)) {
- rc = PTR_ERR(ecryptfs_page);
+ ecryptfs_folio = read_mapping_folio(ecryptfs_inode->i_mapping,
+ ecryptfs_page_idx, NULL);
+ if (IS_ERR(ecryptfs_folio)) {
+ rc = PTR_ERR(ecryptfs_folio);
printk(KERN_ERR "%s: Error getting page at "
"index [%ld] from eCryptfs inode "
"mapping; rc = [%d]\n", __func__,
ecryptfs_page_idx, rc);
goto out;
}
- ecryptfs_page_virt = kmap_atomic(ecryptfs_page);
+ folio_lock(ecryptfs_folio);
+ ecryptfs_page_virt = kmap_local_folio(ecryptfs_folio, 0);
/*
* pos: where we're now writing, offset: where the request was
@@ -163,18 +163,18 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
(data + data_offset), num_bytes);
data_offset += num_bytes;
}
- kunmap_atomic(ecryptfs_page_virt);
- flush_dcache_page(ecryptfs_page);
- SetPageUptodate(ecryptfs_page);
- unlock_page(ecryptfs_page);
+ kunmap_local(ecryptfs_page_virt);
+ flush_dcache_folio(ecryptfs_folio);
+ folio_mark_uptodate(ecryptfs_folio);
+ folio_unlock(ecryptfs_folio);
if (crypt_stat->flags & ECRYPTFS_ENCRYPTED)
- rc = ecryptfs_encrypt_page(ecryptfs_page);
+ rc = ecryptfs_encrypt_page(ecryptfs_folio);
else
rc = ecryptfs_write_lower_page_segment(ecryptfs_inode,
- ecryptfs_page,
+ ecryptfs_folio,
start_offset_in_page,
data_offset);
- put_page(ecryptfs_page);
+ folio_put(ecryptfs_folio);
if (rc) {
printk(KERN_ERR "%s: Error encrypting "
"page; rc = [%d]\n", __func__, rc);
@@ -228,7 +228,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
/**
* ecryptfs_read_lower_page_segment
- * @page_for_ecryptfs: The page into which data for eCryptfs will be
+ * @folio_for_ecryptfs: The folio into which data for eCryptfs will be
* written
* @page_index: Page index in @page_for_ecryptfs from which to start
* writing
@@ -243,7 +243,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
*
* Returns zero on success; non-zero otherwise
*/
-int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
+int ecryptfs_read_lower_page_segment(struct folio *folio_for_ecryptfs,
pgoff_t page_index,
size_t offset_in_page, size_t size,
struct inode *ecryptfs_inode)
@@ -252,12 +252,12 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
loff_t offset;
int rc;
- offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page);
- virt = kmap(page_for_ecryptfs);
+ offset = (loff_t)page_index * PAGE_SIZE + offset_in_page;
+ virt = kmap_local_folio(folio_for_ecryptfs, 0);
rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
if (rc > 0)
rc = 0;
- kunmap(page_for_ecryptfs);
- flush_dcache_page(page_for_ecryptfs);
+ kunmap_local(virt);
+ flush_dcache_folio(folio_for_ecryptfs);
return rc;
}
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 39116af0390f..3bc21d677564 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -38,13 +38,10 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
struct ecryptfs_inode_info *inode_info;
struct inode *inode = NULL;
- inode_info = kmem_cache_alloc(ecryptfs_inode_info_cache, GFP_KERNEL);
+ inode_info = alloc_inode_sb(sb, ecryptfs_inode_info_cache, GFP_KERNEL);
if (unlikely(!inode_info))
goto out;
- if (ecryptfs_init_crypt_stat(&inode_info->crypt_stat)) {
- kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
- goto out;
- }
+ ecryptfs_init_crypt_stat(&inode_info->crypt_stat);
mutex_init(&inode_info->lower_file_mutex);
atomic_set(&inode_info->lower_file_count, 0);
inode_info->lower_file = NULL;
@@ -172,7 +169,6 @@ const struct super_operations ecryptfs_sops = {
.destroy_inode = ecryptfs_destroy_inode,
.free_inode = ecryptfs_free_inode,
.statfs = ecryptfs_statfs,
- .remount_fs = NULL,
.evict_inode = ecryptfs_evict_inode,
.show_options = ecryptfs_show_options
};
diff --git a/fs/efivarfs/Makefile b/fs/efivarfs/Makefile
index 0b1c5e63eb71..7bfc2f9754a8 100644
--- a/fs/efivarfs/Makefile
+++ b/fs/efivarfs/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_EFIVAR_FS) += efivarfs.o
-efivarfs-objs := inode.o file.o super.o
+efivarfs-objs := inode.o file.o super.o vars.o
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index d57ee15874f9..cb1b6d0c3454 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -36,28 +36,41 @@ static ssize_t efivarfs_file_write(struct file *file,
if (IS_ERR(data))
return PTR_ERR(data);
+ inode_lock(inode);
+ if (var->removed) {
+ /*
+ * file got removed; don't allow a set. Caused by an
+ * unsuccessful create or successful delete write
+ * racing with us.
+ */
+ bytes = -EIO;
+ goto out;
+ }
+
bytes = efivar_entry_set_get_size(var, attributes, &datasize,
data, &set);
- if (!set && bytes) {
+ if (!set) {
if (bytes == -ENOENT)
bytes = -EIO;
goto out;
}
if (bytes == -ENOENT) {
- drop_nlink(inode);
- d_delete(file->f_path.dentry);
- dput(file->f_path.dentry);
+ /*
+ * zero size signals to release that the write deleted
+ * the variable
+ */
+ i_size_write(inode, 0);
} else {
- inode_lock(inode);
i_size_write(inode, datasize + sizeof(attributes));
- inode->i_mtime = current_time(inode);
- inode_unlock(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
}
bytes = count;
out:
+ inode_unlock(inode);
+
kfree(data);
return bytes;
@@ -106,9 +119,36 @@ out_free:
return size;
}
+static int efivarfs_file_release(struct inode *inode, struct file *file)
+{
+ struct efivar_entry *var = inode->i_private;
+
+ inode_lock(inode);
+ var->removed = (--var->open_count == 0 && i_size_read(inode) == 0);
+ inode_unlock(inode);
+
+ if (var->removed)
+ simple_recursive_removal(file->f_path.dentry, NULL);
+
+ return 0;
+}
+
+static int efivarfs_file_open(struct inode *inode, struct file *file)
+{
+ struct efivar_entry *entry = inode->i_private;
+
+ file->private_data = entry;
+
+ inode_lock(inode);
+ entry->open_count++;
+ inode_unlock(inode);
+
+ return 0;
+}
+
const struct file_operations efivarfs_file_operations = {
- .open = simple_open,
- .read = efivarfs_file_read,
- .write = efivarfs_file_write,
- .llseek = no_llseek,
+ .open = efivarfs_file_open,
+ .read = efivarfs_file_read,
+ .write = efivarfs_file_write,
+ .release = efivarfs_file_release,
};
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 939e5e242b98..95dcad83da11 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -21,11 +21,15 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
dev_t dev, bool is_removable)
{
struct inode *inode = new_inode(sb);
+ struct efivarfs_fs_info *fsi = sb->s_fs_info;
+ struct efivarfs_mount_opts *opts = &fsi->mount_opts;
if (inode) {
+ inode->i_uid = opts->uid;
+ inode->i_gid = opts->gid;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
switch (mode & S_IFMT) {
case S_IFREG:
@@ -47,7 +51,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
*
* VariableName-12345678-1234-1234-1234-1234567891bc
*/
-bool efivarfs_valid_name(const char *str, int len)
+static bool efivarfs_valid_name(const char *str, int len)
{
const char *s = str + len - EFI_VARIABLE_GUID_LEN;
@@ -70,37 +74,37 @@ bool efivarfs_valid_name(const char *str, int len)
return uuid_is_valid(s);
}
-static int efivarfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int efivarfs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct inode *inode = NULL;
struct efivar_entry *var;
int namelen, i = 0, err = 0;
bool is_removable = false;
+ efi_guid_t vendor;
if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
return -EINVAL;
- var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
- if (!var)
- return -ENOMEM;
-
/* length of the variable name itself: remove GUID and separator */
namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
- err = guid_parse(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+ err = guid_parse(dentry->d_name.name + namelen + 1, &vendor);
if (err)
- goto out;
+ return err;
+ if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID))
+ return -EPERM;
- if (efivar_variable_is_removable(var->var.VendorGuid,
+ if (efivar_variable_is_removable(vendor,
dentry->d_name.name, namelen))
is_removable = true;
inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
- if (!inode) {
- err = -ENOMEM;
- goto out;
- }
+ if (!inode)
+ return -ENOMEM;
+ var = efivar_entry(inode);
+
+ var->var.VendorGuid = vendor;
for (i = 0; i < namelen; i++)
var->var.VariableName[i] = dentry->d_name.name[i];
@@ -108,21 +112,10 @@ static int efivarfs_create(struct user_namespace *mnt_userns, struct inode *dir,
var->var.VariableName[i] = '\0';
inode->i_private = var;
- kmemleak_ignore(var);
- err = efivar_entry_add(var, &efivarfs_list);
- if (err)
- goto out;
-
- d_instantiate(dentry, inode);
- dget(dentry);
-out:
- if (err) {
- kfree(var);
- if (inode)
- iput(inode);
- }
- return err;
+ d_make_persistent(dentry, inode);
+
+ return 0;
}
static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
@@ -132,9 +125,7 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
if (efivar_entry_delete(var))
return -EINVAL;
- drop_nlink(d_inode(dentry));
- dput(dentry);
- return 0;
+ return simple_unlink(dir, dentry);
};
const struct inode_operations efivarfs_dir_inode_operations = {
@@ -144,7 +135,7 @@ const struct inode_operations efivarfs_dir_inode_operations = {
};
static int
-efivarfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+efivarfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
unsigned int i_flags;
unsigned int flags = 0;
@@ -159,8 +150,8 @@ efivarfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
}
static int
-efivarfs_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa)
+efivarfs_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa)
{
unsigned int i_flags = 0;
@@ -178,7 +169,24 @@ efivarfs_fileattr_set(struct user_namespace *mnt_userns,
return 0;
}
+/* copy of simple_setattr except that it doesn't do i_size updates */
+static int efivarfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *iattr)
+{
+ struct inode *inode = d_inode(dentry);
+ int error;
+
+ error = setattr_prepare(idmap, dentry, iattr);
+ if (error)
+ return error;
+
+ setattr_copy(idmap, inode, iattr);
+ mark_inode_dirty(inode);
+ return 0;
+}
+
static const struct inode_operations efivarfs_file_inode_operations = {
.fileattr_get = efivarfs_fileattr_get,
.fileattr_set = efivarfs_fileattr_set,
+ .setattr = efivarfs_setattr,
};
diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
index 30ae44cb7453..f913b6824289 100644
--- a/fs/efivarfs/internal.h
+++ b/fs/efivarfs/internal.h
@@ -6,15 +6,62 @@
#ifndef EFIVAR_FS_INTERNAL_H
#define EFIVAR_FS_INTERNAL_H
-#include <linux/list.h>
+#include <linux/efi.h>
+
+struct efivarfs_mount_opts {
+ kuid_t uid;
+ kgid_t gid;
+};
+
+struct efivarfs_fs_info {
+ struct efivarfs_mount_opts mount_opts;
+ struct super_block *sb;
+ struct notifier_block nb;
+};
+
+struct efi_variable {
+ efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
+ efi_guid_t VendorGuid;
+};
+
+struct efivar_entry {
+ struct efi_variable var;
+ struct inode vfs_inode;
+ unsigned long open_count;
+ bool removed;
+};
+
+static inline struct efivar_entry *efivar_entry(struct inode *inode)
+{
+ return container_of(inode, struct efivar_entry, vfs_inode);
+}
+
+int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ void *data, bool duplicate_check);
+
+int efivar_entry_delete(struct efivar_entry *entry);
+
+int efivar_entry_size(struct efivar_entry *entry, unsigned long *size);
+int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ unsigned long *size, void *data);
+int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ unsigned long *size, void *data);
+int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+ unsigned long *size, void *data, bool *set);
+
+
+bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+ unsigned long data_size);
+bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
+ size_t len);
+char *efivar_get_utf8name(const efi_char16_t *name16, efi_guid_t *vendor);
+bool efivarfs_variable_is_present(efi_char16_t *variable_name,
+ efi_guid_t *vendor, void *data);
extern const struct file_operations efivarfs_file_operations;
extern const struct inode_operations efivarfs_dir_inode_operations;
-extern bool efivarfs_valid_name(const char *str, int len);
extern struct inode *efivarfs_get_inode(struct super_block *sb,
const struct inode *dir, int mode, dev_t dev,
bool is_removable);
-extern struct list_head efivarfs_list;
-
#endif /* EFIVAR_FS_INTERNAL_H */
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 15880a68faad..9da992925920 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -8,25 +8,131 @@
#include <linux/efi.h>
#include <linux/fs.h>
#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/ucs2_string.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <linux/magic.h>
+#include <linux/statfs.h>
+#include <linux/notifier.h>
+#include <linux/printk.h>
+#include <linux/namei.h>
#include "internal.h"
+#include "../internal.h"
-LIST_HEAD(efivarfs_list);
+static int efivarfs_ops_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct efivarfs_fs_info *sfi = container_of(nb, struct efivarfs_fs_info, nb);
+
+ switch (event) {
+ case EFIVAR_OPS_RDONLY:
+ sfi->sb->s_flags |= SB_RDONLY;
+ break;
+ case EFIVAR_OPS_RDWR:
+ sfi->sb->s_flags &= ~SB_RDONLY;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct inode *efivarfs_alloc_inode(struct super_block *sb)
+{
+ struct efivar_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return NULL;
+
+ inode_init_once(&entry->vfs_inode);
+ entry->removed = false;
+
+ return &entry->vfs_inode;
+}
+
+static void efivarfs_free_inode(struct inode *inode)
+{
+ struct efivar_entry *entry = efivar_entry(inode);
+
+ kfree(entry);
+}
+
+static int efivarfs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct super_block *sb = root->d_sb;
+ struct efivarfs_fs_info *sbi = sb->s_fs_info;
+ struct efivarfs_mount_opts *opts = &sbi->mount_opts;
+
+ if (!uid_eq(opts->uid, GLOBAL_ROOT_UID))
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, opts->uid));
+ if (!gid_eq(opts->gid, GLOBAL_ROOT_GID))
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, opts->gid));
+ return 0;
+}
-static void efivarfs_evict_inode(struct inode *inode)
+static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- clear_inode(inode);
+ const u32 attr = EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS;
+ u64 storage_space, remaining_space, max_variable_size;
+ u64 id = huge_encode_dev(dentry->d_sb->s_dev);
+ efi_status_t status;
+
+ /* Some UEFI firmware does not implement QueryVariableInfo() */
+ storage_space = remaining_space = 0;
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_QUERY_VARIABLE_INFO)) {
+ status = efivar_query_variable_info(attr, &storage_space,
+ &remaining_space,
+ &max_variable_size);
+ if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED)
+ pr_warn_ratelimited("query_variable_info() failed: 0x%lx\n",
+ status);
+ }
+
+ /*
+ * This is not a normal filesystem, so no point in pretending it has a block
+ * size; we declare f_bsize to 1, so that we can then report the exact value
+ * sent by EFI QueryVariableInfo in f_blocks and f_bfree
+ */
+ buf->f_bsize = 1;
+ buf->f_namelen = NAME_MAX;
+ buf->f_blocks = storage_space;
+ buf->f_bfree = remaining_space;
+ buf->f_type = dentry->d_sb->s_magic;
+ buf->f_fsid = u64_to_fsid(id);
+
+ /*
+ * In f_bavail we declare the free space that the kernel will allow writing
+ * when the storage_paranoia x86 quirk is active. To use more, users
+ * should boot the kernel with efi_no_storage_paranoia.
+ */
+ if (remaining_space > efivar_reserved_space())
+ buf->f_bavail = remaining_space - efivar_reserved_space();
+ else
+ buf->f_bavail = 0;
+
+ return 0;
}
+static int efivarfs_freeze_fs(struct super_block *sb);
+static int efivarfs_unfreeze_fs(struct super_block *sb);
+
static const struct super_operations efivarfs_ops = {
- .statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
- .evict_inode = efivarfs_evict_inode,
+ .statfs = efivarfs_statfs,
+ .drop_inode = inode_just_drop,
+ .alloc_inode = efivarfs_alloc_inode,
+ .free_inode = efivarfs_free_inode,
+ .show_options = efivarfs_show_options,
+ .freeze_fs = efivarfs_freeze_fs,
+ .unfreeze_fs = efivarfs_unfreeze_fs,
};
/*
@@ -46,6 +152,10 @@ static int efivarfs_d_compare(const struct dentry *dentry,
{
int guid = len - EFI_VARIABLE_GUID_LEN;
+ /* Parallel lookups may produce a temporary invalid filename */
+ if (guid <= 0)
+ return 1;
+
if (name->len != len)
return 1;
@@ -63,9 +173,6 @@ static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr)
const unsigned char *s = qstr->name;
unsigned int len = qstr->len;
- if (!efivarfs_valid_name(s, len))
- return -EINVAL;
-
while (len-- > EFI_VARIABLE_GUID_LEN)
hash = partial_name_hash(*s++, hash);
@@ -80,7 +187,6 @@ static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr)
static const struct dentry_operations efivarfs_d_ops = {
.d_compare = efivarfs_d_compare,
.d_hash = efivarfs_d_hash,
- .d_delete = always_delete_dentry,
};
static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
@@ -103,71 +209,77 @@ static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
return ERR_PTR(-ENOMEM);
}
-static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
- unsigned long name_size, void *data)
+bool efivarfs_variable_is_present(efi_char16_t *variable_name,
+ efi_guid_t *vendor, void *data)
+{
+ char *name = efivar_get_utf8name(variable_name, vendor);
+ struct super_block *sb = data;
+ struct dentry *dentry;
+
+ if (!name)
+ /*
+ * If the allocation failed there'll already be an
+ * error in the log (and likely a huge and growing
+ * number of them since they system will be under
+ * extreme memory pressure), so simply assume
+ * collision for safety but don't add to the log
+ * flood.
+ */
+ return true;
+
+ dentry = try_lookup_noperm(&QSTR(name), sb->s_root);
+ kfree(name);
+ if (!IS_ERR_OR_NULL(dentry))
+ dput(dentry);
+
+ return dentry != NULL;
+}
+
+static int efivarfs_create_dentry(struct super_block *sb, efi_char16_t *name16,
+ unsigned long name_size, efi_guid_t vendor,
+ char *name)
{
- struct super_block *sb = (struct super_block *)data;
struct efivar_entry *entry;
- struct inode *inode = NULL;
+ struct inode *inode;
struct dentry *dentry, *root = sb->s_root;
unsigned long size = 0;
- char *name;
int len;
int err = -ENOMEM;
bool is_removable = false;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return err;
+ /* length of the variable name itself: remove GUID and separator */
+ len = strlen(name) - EFI_VARIABLE_GUID_LEN - 1;
- memcpy(entry->var.VariableName, name16, name_size);
- memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
-
- len = ucs2_utf8size(entry->var.VariableName);
-
- /* name, plus '-', plus GUID, plus NUL*/
- name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
- if (!name)
- goto fail;
-
- ucs2_as_utf8(name, entry->var.VariableName, len);
-
- if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
+ if (efivar_variable_is_removable(vendor, name, len))
is_removable = true;
- name[len] = '-';
-
- efi_guid_to_str(&entry->var.VendorGuid, name + len + 1);
-
- name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
-
- /* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */
- strreplace(name, '/', '!');
-
inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
is_removable);
if (!inode)
goto fail_name;
+ entry = efivar_entry(inode);
+
+ memcpy(entry->var.VariableName, name16, name_size);
+ memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+
dentry = efivarfs_alloc_dentry(root, name);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
goto fail_inode;
}
- efivar_entry_size(entry, &size);
- err = efivar_entry_add(entry, &efivarfs_list);
- if (err)
- goto fail_inode;
+ __efivar_entry_get(entry, NULL, &size, NULL);
/* copied by the above to local storage in the dentry. */
kfree(name);
inode_lock(inode);
inode->i_private = entry;
- i_size_write(inode, size + sizeof(entry->var.Attributes));
+ i_size_write(inode, size + sizeof(__u32)); /* attributes + data */
inode_unlock(inode);
- d_add(dentry, inode);
+ d_make_persistent(dentry, inode);
+ dput(dentry);
return 0;
@@ -175,23 +287,64 @@ fail_inode:
iput(inode);
fail_name:
kfree(name);
-fail:
- kfree(entry);
+
return err;
}
-static int efivarfs_destroy(struct efivar_entry *entry, void *data)
+static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ unsigned long name_size, void *data)
{
- int err = efivar_entry_remove(entry);
+ struct super_block *sb = (struct super_block *)data;
+ char *name;
+
+ if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID))
+ return 0;
+
+ name = efivar_get_utf8name(name16, &vendor);
+ if (!name)
+ return -ENOMEM;
+
+ return efivarfs_create_dentry(sb, name16, name_size, vendor, name);
+}
+
+enum {
+ Opt_uid, Opt_gid,
+};
+
+static const struct fs_parameter_spec efivarfs_parameters[] = {
+ fsparam_uid("uid", Opt_uid),
+ fsparam_gid("gid", Opt_gid),
+ {},
+};
+
+static int efivarfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct efivarfs_fs_info *sbi = fc->s_fs_info;
+ struct efivarfs_mount_opts *opts = &sbi->mount_opts;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, efivarfs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ opts->uid = result.uid;
+ break;
+ case Opt_gid:
+ opts->gid = result.gid;
+ break;
+ default:
+ return -EINVAL;
+ }
- if (err)
- return err;
- kfree(entry);
return 0;
}
static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct efivarfs_fs_info *sfi = sb->s_fs_info;
struct inode *inode = NULL;
struct dentry *root;
int err;
@@ -201,7 +354,8 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = EFIVARFS_MAGIC;
sb->s_op = &efivarfs_ops;
- sb->s_d_op = &efivarfs_d_ops;
+ set_default_d_op(sb, &efivarfs_d_ops);
+ sb->s_d_flags |= DCACHE_DONTCACHE;
sb->s_time_gran = 1;
if (!efivar_supports_writes())
@@ -217,13 +371,13 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (!root)
return -ENOMEM;
- INIT_LIST_HEAD(&efivarfs_list);
-
- err = efivar_init(efivarfs_callback, (void *)sb, true, &efivarfs_list);
+ sfi->sb = sb;
+ sfi->nb.notifier_call = efivarfs_ops_notifier;
+ err = blocking_notifier_chain_register(&efivar_ops_nh, &sfi->nb);
if (err)
- __efivar_entry_iter(efivarfs_destroy, &efivarfs_list, NULL, NULL);
+ return err;
- return err;
+ return efivar_init(efivarfs_callback, sb, true);
}
static int efivarfs_get_tree(struct fs_context *fc)
@@ -231,22 +385,147 @@ static int efivarfs_get_tree(struct fs_context *fc)
return get_tree_single(fc, efivarfs_fill_super);
}
+static int efivarfs_reconfigure(struct fs_context *fc)
+{
+ if (!efivar_supports_writes() && !(fc->sb_flags & SB_RDONLY)) {
+ pr_err("Firmware does not support SetVariableRT. Can not remount with rw\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void efivarfs_free(struct fs_context *fc)
+{
+ kfree(fc->s_fs_info);
+}
+
static const struct fs_context_operations efivarfs_context_ops = {
.get_tree = efivarfs_get_tree,
+ .parse_param = efivarfs_parse_param,
+ .reconfigure = efivarfs_reconfigure,
+ .free = efivarfs_free,
};
+static int efivarfs_check_missing(efi_char16_t *name16, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ char *name;
+ struct super_block *sb = data;
+ struct dentry *dentry;
+ int err;
+
+ if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID))
+ return 0;
+
+ name = efivar_get_utf8name(name16, &vendor);
+ if (!name)
+ return -ENOMEM;
+
+ dentry = try_lookup_noperm(&QSTR(name), sb->s_root);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
+ }
+
+ if (!dentry) {
+ /* found missing entry */
+ pr_info("efivarfs: creating variable %s\n", name);
+ return efivarfs_create_dentry(sb, name16, name_size, vendor, name);
+ }
+
+ dput(dentry);
+ err = 0;
+
+ out:
+ kfree(name);
+
+ return err;
+}
+
+static struct file_system_type efivarfs_type;
+
+static int efivarfs_freeze_fs(struct super_block *sb)
+{
+ /* Nothing for us to do. */
+ return 0;
+}
+
+static int efivarfs_unfreeze_fs(struct super_block *sb)
+{
+ struct dentry *child = NULL;
+
+ /*
+ * Unconditionally resync the variable state on a thaw request.
+ * Given the size of efivarfs it really doesn't matter to simply
+ * iterate through all of the entries and resync. Freeze/thaw
+ * requests are rare enough for that to not matter and the
+ * number of entries is pretty low too. So we really don't care.
+ */
+ pr_info("efivarfs: resyncing variable state\n");
+ for (;;) {
+ int err;
+ unsigned long size = 0;
+ struct inode *inode;
+ struct efivar_entry *entry;
+
+ child = find_next_child(sb->s_root, child);
+ if (!child)
+ break;
+
+ inode = d_inode(child);
+ entry = efivar_entry(inode);
+
+ err = efivar_entry_size(entry, &size);
+ if (err)
+ size = 0;
+ else
+ size += sizeof(__u32);
+
+ inode_lock(inode);
+ i_size_write(inode, size);
+ inode_unlock(inode);
+
+ /* The variable doesn't exist anymore, delete it. */
+ if (!size) {
+ pr_info("efivarfs: removing variable %pd\n", child);
+ simple_recursive_removal(child, NULL);
+ }
+ }
+
+ efivar_init(efivarfs_check_missing, sb, false);
+ pr_info("efivarfs: finished resyncing variable state\n");
+ return 0;
+}
+
static int efivarfs_init_fs_context(struct fs_context *fc)
{
+ struct efivarfs_fs_info *sfi;
+
+ if (!efivar_is_available())
+ return -EOPNOTSUPP;
+
+ sfi = kzalloc(sizeof(*sfi), GFP_KERNEL);
+ if (!sfi)
+ return -ENOMEM;
+
+ sfi->mount_opts.uid = GLOBAL_ROOT_UID;
+ sfi->mount_opts.gid = GLOBAL_ROOT_GID;
+
+ fc->s_fs_info = sfi;
fc->ops = &efivarfs_context_ops;
+
return 0;
}
static void efivarfs_kill_sb(struct super_block *sb)
{
- kill_litter_super(sb);
+ struct efivarfs_fs_info *sfi = sb->s_fs_info;
- /* Remove all entries and destroy */
- __efivar_entry_iter(efivarfs_destroy, &efivarfs_list, NULL, NULL);
+ blocking_notifier_chain_unregister(&efivar_ops_nh, &sfi->nb);
+ kill_anon_super(sb);
+
+ kfree(sfi);
}
static struct file_system_type efivarfs_type = {
@@ -254,13 +533,12 @@ static struct file_system_type efivarfs_type = {
.name = "efivarfs",
.init_fs_context = efivarfs_init_fs_context,
.kill_sb = efivarfs_kill_sb,
+ .parameters = efivarfs_parameters,
+ .fs_flags = FS_POWER_FREEZE,
};
static __init int efivarfs_init(void)
{
- if (!efivars_kobject())
- return -ENODEV;
-
return register_filesystem(&efivarfs_type);
}
diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c
new file mode 100644
index 000000000000..6edc10958ecf
--- /dev/null
+++ b/fs/efivarfs/vars.c
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Originally from efivars.c
+ *
+ * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
+ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
+ */
+
+#include <linux/capability.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/smp.h>
+#include <linux/efi.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/ucs2_string.h>
+
+#include "internal.h"
+
+MODULE_IMPORT_NS("EFIVAR");
+
+static bool
+validate_device_path(efi_char16_t *var_name, int match, u8 *buffer,
+ unsigned long len)
+{
+ struct efi_generic_dev_path *node;
+ int offset = 0;
+
+ node = (struct efi_generic_dev_path *)buffer;
+
+ if (len < sizeof(*node))
+ return false;
+
+ while (offset <= len - sizeof(*node) &&
+ node->length >= sizeof(*node) &&
+ node->length <= len - offset) {
+ offset += node->length;
+
+ if ((node->type == EFI_DEV_END_PATH ||
+ node->type == EFI_DEV_END_PATH2) &&
+ node->sub_type == EFI_DEV_END_ENTIRE)
+ return true;
+
+ node = (struct efi_generic_dev_path *)(buffer + offset);
+ }
+
+ /*
+ * If we're here then either node->length pointed past the end
+ * of the buffer or we reached the end of the buffer without
+ * finding a device path end node.
+ */
+ return false;
+}
+
+static bool
+validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer,
+ unsigned long len)
+{
+ /* An array of 16-bit integers */
+ if ((len % 2) != 0)
+ return false;
+
+ return true;
+}
+
+static bool
+validate_load_option(efi_char16_t *var_name, int match, u8 *buffer,
+ unsigned long len)
+{
+ u16 filepathlength;
+ int i, desclength = 0, namelen;
+
+ namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN);
+
+ /* Either "Boot" or "Driver" followed by four digits of hex */
+ for (i = match; i < match+4; i++) {
+ if (var_name[i] > 127 ||
+ hex_to_bin(var_name[i] & 0xff) < 0)
+ return true;
+ }
+
+ /* Reject it if there's 4 digits of hex and then further content */
+ if (namelen > match + 4)
+ return false;
+
+ /* A valid entry must be at least 8 bytes */
+ if (len < 8)
+ return false;
+
+ filepathlength = buffer[4] | buffer[5] << 8;
+
+ /*
+ * There's no stored length for the description, so it has to be
+ * found by hand
+ */
+ desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
+
+ /* Each boot entry must have a descriptor */
+ if (!desclength)
+ return false;
+
+ /*
+ * If the sum of the length of the description, the claimed filepath
+ * length and the original header are greater than the length of the
+ * variable, it's malformed
+ */
+ if ((desclength + filepathlength + 6) > len)
+ return false;
+
+ /*
+ * And, finally, check the filepath
+ */
+ return validate_device_path(var_name, match, buffer + desclength + 6,
+ filepathlength);
+}
+
+static bool
+validate_uint16(efi_char16_t *var_name, int match, u8 *buffer,
+ unsigned long len)
+{
+ /* A single 16-bit integer */
+ if (len != 2)
+ return false;
+
+ return true;
+}
+
+static bool
+validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
+ unsigned long len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (buffer[i] > 127)
+ return false;
+
+ if (buffer[i] == 0)
+ return true;
+ }
+
+ return false;
+}
+
+struct variable_validate {
+ efi_guid_t vendor;
+ char *name;
+ bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
+ unsigned long len);
+};
+
+/*
+ * This is the list of variables we need to validate, as well as the
+ * whitelist for what we think is safe not to default to immutable.
+ *
+ * If it has a validate() method that's not NULL, it'll go into the
+ * validation routine. If not, it is assumed valid, but still used for
+ * whitelisting.
+ *
+ * Note that it's sorted by {vendor,name}, but globbed names must come after
+ * any other name with the same prefix.
+ */
+static const struct variable_validate variable_validate[] = {
+ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
+ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
+ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
+ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
+ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
+ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
+ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
+ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
+ { LINUX_EFI_CRASH_GUID, "*", NULL },
+ { NULL_GUID, "", NULL },
+};
+
+/*
+ * Check if @var_name matches the pattern given in @match_name.
+ *
+ * @var_name: an array of @len non-NUL characters.
+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
+ * final "*" character matches any trailing characters @var_name,
+ * including the case when there are none left in @var_name.
+ * @match: on output, the number of non-wildcard characters in @match_name
+ * that @var_name matches, regardless of the return value.
+ * @return: whether @var_name fully matches @match_name.
+ */
+static bool
+variable_matches(const char *var_name, size_t len, const char *match_name,
+ int *match)
+{
+ for (*match = 0; ; (*match)++) {
+ char c = match_name[*match];
+
+ switch (c) {
+ case '*':
+ /* Wildcard in @match_name means we've matched. */
+ return true;
+
+ case '\0':
+ /* @match_name has ended. Has @var_name too? */
+ return (*match == len);
+
+ default:
+ /*
+ * We've reached a non-wildcard char in @match_name.
+ * Continue only if there's an identical character in
+ * @var_name.
+ */
+ if (*match < len && c == var_name[*match])
+ continue;
+ return false;
+ }
+ }
+}
+
+char *
+efivar_get_utf8name(const efi_char16_t *name16, efi_guid_t *vendor)
+{
+ int len = ucs2_utf8size(name16);
+ char *name;
+
+ /* name, plus '-', plus GUID, plus NUL*/
+ name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
+ if (!name)
+ return NULL;
+
+ ucs2_as_utf8(name, name16, len);
+
+ name[len] = '-';
+
+ efi_guid_to_str(vendor, name + len + 1);
+
+ name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
+
+ /* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */
+ strreplace(name, '/', '!');
+
+ return name;
+}
+
+bool
+efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+ unsigned long data_size)
+{
+ int i;
+ unsigned long utf8_size;
+ u8 *utf8_name;
+
+ utf8_size = ucs2_utf8size(var_name);
+ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
+ if (!utf8_name)
+ return false;
+
+ ucs2_as_utf8(utf8_name, var_name, utf8_size);
+ utf8_name[utf8_size] = '\0';
+
+ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+ const char *name = variable_validate[i].name;
+ int match = 0;
+
+ if (efi_guidcmp(vendor, variable_validate[i].vendor))
+ continue;
+
+ if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
+ if (variable_validate[i].validate == NULL)
+ break;
+ kfree(utf8_name);
+ return variable_validate[i].validate(var_name, match,
+ data, data_size);
+ }
+ }
+ kfree(utf8_name);
+ return true;
+}
+
+bool
+efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
+ size_t len)
+{
+ int i;
+ bool found = false;
+ int match = 0;
+
+ /*
+ * Check if our variable is in the validated variables list
+ */
+ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+ if (efi_guidcmp(variable_validate[i].vendor, vendor))
+ continue;
+
+ if (variable_matches(var_name, len,
+ variable_validate[i].name, &match)) {
+ found = true;
+ break;
+ }
+ }
+
+ /*
+ * If it's in our list, it is removable.
+ */
+ return found;
+}
+
+/*
+ * Returns the size of variable_name, in bytes, including the
+ * terminating NULL character, or variable_name_size if no NULL
+ * character is found among the first variable_name_size bytes.
+ */
+static unsigned long var_name_strnsize(efi_char16_t *variable_name,
+ unsigned long variable_name_size)
+{
+ unsigned long len;
+ efi_char16_t c;
+
+ /*
+ * The variable name is, by definition, a NULL-terminated
+ * string, so make absolutely sure that variable_name_size is
+ * the value we expect it to be. If not, return the real size.
+ */
+ for (len = 2; len <= variable_name_size; len += sizeof(c)) {
+ c = variable_name[(len / sizeof(c)) - 1];
+ if (!c)
+ break;
+ }
+
+ return min(len, variable_name_size);
+}
+
+/*
+ * Print a warning when duplicate EFI variables are encountered and
+ * disable the sysfs workqueue since the firmware is buggy.
+ */
+static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
+ unsigned long len16)
+{
+ size_t i, len8 = len16 / sizeof(efi_char16_t);
+ char *str8;
+
+ str8 = kzalloc(len8, GFP_KERNEL);
+ if (!str8)
+ return;
+
+ for (i = 0; i < len8; i++)
+ str8[i] = str16[i];
+
+ printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
+ str8, vendor_guid);
+ kfree(str8);
+}
+
+/**
+ * efivar_init - build the initial list of EFI variables
+ * @func: callback function to invoke for every variable
+ * @data: function-specific data to pass to @func
+ * @duplicate_check: fail if a duplicate variable is found
+ *
+ * Get every EFI variable from the firmware and invoke @func. @func
+ * should populate the initial dentry and inode tree.
+ *
+ * Returns 0 on success, or a kernel error code on failure.
+ */
+int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ void *data, bool duplicate_check)
+{
+ unsigned long variable_name_size = 512;
+ efi_char16_t *variable_name;
+ efi_status_t status;
+ efi_guid_t vendor_guid;
+ int err = 0;
+
+ variable_name = kzalloc(variable_name_size, GFP_KERNEL);
+ if (!variable_name) {
+ printk(KERN_ERR "efivars: Memory allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ err = efivar_lock();
+ if (err)
+ goto free;
+
+ /*
+ * A small set of old UEFI implementations reject sizes
+ * above a certain threshold, the lowest seen in the wild
+ * is 512.
+ */
+
+ do {
+ variable_name_size = 512;
+ BUILD_BUG_ON(EFI_VAR_NAME_LEN < 512);
+
+ status = efivar_get_next_variable(&variable_name_size,
+ variable_name,
+ &vendor_guid);
+ switch (status) {
+ case EFI_SUCCESS:
+ variable_name_size = var_name_strnsize(variable_name,
+ variable_name_size);
+
+ /*
+ * Some firmware implementations return the
+ * same variable name on multiple calls to
+ * get_next_variable(). Terminate the loop
+ * immediately as there is no guarantee that
+ * we'll ever see a different variable name,
+ * and may end up looping here forever.
+ */
+ if (duplicate_check &&
+ efivarfs_variable_is_present(variable_name,
+ &vendor_guid, data)) {
+ dup_variable_bug(variable_name, &vendor_guid,
+ variable_name_size);
+ status = EFI_NOT_FOUND;
+ } else {
+ err = func(variable_name, vendor_guid,
+ variable_name_size, data);
+ if (err)
+ status = EFI_NOT_FOUND;
+ }
+ break;
+ case EFI_UNSUPPORTED:
+ err = -EOPNOTSUPP;
+ status = EFI_NOT_FOUND;
+ break;
+ case EFI_NOT_FOUND:
+ break;
+ case EFI_BUFFER_TOO_SMALL:
+ pr_warn("efivars: Variable name size exceeds maximum (%lu > 512)\n",
+ variable_name_size);
+ status = EFI_NOT_FOUND;
+ break;
+ default:
+ pr_warn("efivars: get_next_variable: status=%lx\n", status);
+ status = EFI_NOT_FOUND;
+ break;
+ }
+
+ } while (status != EFI_NOT_FOUND);
+
+ efivar_unlock();
+free:
+ kfree(variable_name);
+
+ return err;
+}
+
+/**
+ * efivar_entry_delete - delete variable
+ * @entry: entry containing variable to delete
+ *
+ * Delete the variable from the firmware. It is the caller's
+ * responsibility to free @entry (by deleting the dentry/inode) once
+ * we return.
+ *
+ * Returns 0 on success, -EINTR if we can't grab the semaphore,
+ * converted EFI status code if set_variable() fails.
+ */
+int efivar_entry_delete(struct efivar_entry *entry)
+{
+ efi_status_t status;
+ int err;
+
+ err = efivar_lock();
+ if (err)
+ return err;
+
+ status = efivar_set_variable_locked(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ 0, 0, NULL, false);
+ efivar_unlock();
+ if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND))
+ return efi_status_to_err(status);
+
+ return 0;
+}
+
+/**
+ * efivar_entry_size - obtain the size of a variable
+ * @entry: entry for this variable
+ * @size: location to store the variable's size
+ */
+int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
+{
+ efi_status_t status;
+ int err;
+
+ *size = 0;
+
+ err = efivar_lock();
+ if (err)
+ return err;
+
+ status = efivar_get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid, NULL, size, NULL);
+ efivar_unlock();
+
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return efi_status_to_err(status);
+
+ return 0;
+}
+
+/**
+ * __efivar_entry_get - call get_variable()
+ * @entry: read data for this variable
+ * @attributes: variable attributes
+ * @size: size of @data buffer
+ * @data: buffer to store variable data
+ *
+ * The caller MUST call efivar_entry_iter_begin() and
+ * efivar_entry_iter_end() before and after the invocation of this
+ * function, respectively.
+ */
+int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ unsigned long *size, void *data)
+{
+ efi_status_t status;
+
+ status = efivar_get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ attributes, size, data);
+
+ return efi_status_to_err(status);
+}
+
+/**
+ * efivar_entry_get - call get_variable()
+ * @entry: read data for this variable
+ * @attributes: variable attributes
+ * @size: size of @data buffer
+ * @data: buffer to store variable data
+ */
+int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ unsigned long *size, void *data)
+{
+ int err;
+
+ err = efivar_lock();
+ if (err)
+ return err;
+ err = __efivar_entry_get(entry, attributes, size, data);
+ efivar_unlock();
+
+ return 0;
+}
+
+/**
+ * efivar_entry_set_get_size - call set_variable() and get new size (atomic)
+ * @entry: entry containing variable to set and get
+ * @attributes: attributes of variable to be written
+ * @size: size of data buffer
+ * @data: buffer containing data to write
+ * @set: did the set_variable() call succeed?
+ *
+ * This is a pretty special (complex) function. See efivarfs_file_write().
+ *
+ * Atomically call set_variable() for @entry and if the call is
+ * successful, return the new size of the variable from get_variable()
+ * in @size. The success of set_variable() is indicated by @set.
+ *
+ * Returns 0 on success, -EINVAL if the variable data is invalid,
+ * -ENOSPC if the firmware does not have enough available space, or a
+ * converted EFI status code if either of set_variable() or
+ * get_variable() fail.
+ *
+ * If the EFI variable does not exist when calling set_variable()
+ * (EFI_NOT_FOUND).
+ */
+int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+ unsigned long *size, void *data, bool *set)
+{
+ efi_char16_t *name = entry->var.VariableName;
+ efi_guid_t *vendor = &entry->var.VendorGuid;
+ efi_status_t status;
+ int err;
+
+ *set = false;
+
+ if (efivar_validate(*vendor, name, data, *size) == false)
+ return -EINVAL;
+
+ /*
+ * The lock here protects the get_variable call and the
+ * conditional set_variable call
+ */
+ err = efivar_lock();
+ if (err)
+ return err;
+
+ status = efivar_set_variable_locked(name, vendor, attributes, *size,
+ data, false);
+ if (status != EFI_SUCCESS) {
+ err = efi_status_to_err(status);
+ goto out;
+ }
+
+ *set = true;
+
+ /*
+ * Writing to the variable may have caused a change in size (which
+ * could either be an append or an overwrite), or the variable to be
+ * deleted. Perform a GetVariable() so we can tell what actually
+ * happened.
+ */
+ *size = 0;
+ status = efivar_get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ NULL, size, NULL);
+
+ efivar_unlock();
+
+ if (status && status != EFI_BUFFER_TOO_SMALL)
+ return efi_status_to_err(status);
+
+ return 0;
+
+out:
+ efivar_unlock();
+ return err;
+
+}
diff --git a/fs/efs/Kconfig b/fs/efs/Kconfig
index 2df1bac8b375..0833e533df9d 100644
--- a/fs/efs/Kconfig
+++ b/fs/efs/Kconfig
@@ -2,6 +2,7 @@
config EFS_FS
tristate "EFS file system support (read only)"
depends on BLOCK
+ select BUFFER_HEAD
help
EFS is an older file system used for non-ISO9660 CD-ROMs and hard
disk partitions by SGI's IRIX operating system (IRIX 6.0 and newer
diff --git a/fs/efs/efs.h b/fs/efs/efs.h
index 13a4d9622633..918d2b9abb76 100644
--- a/fs/efs/efs.h
+++ b/fs/efs/efs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 1999 Al Smith
+ * Copyright (c) 1999 Al Smith, <Al.Smith@aeschi.ch.eu.org>
*
* Portions derived from work (c) 1995,1996 Christian Vogelgsang.
* Portions derived from IRIX header files (c) 1988 Silicon Graphics
@@ -19,9 +19,6 @@
#define EFS_VERSION "1.0a"
-static const char cprt[] = "EFS: "EFS_VERSION" - (c) 1999 Al Smith <Al.Smith@aeschi.ch.eu.org>";
-
-
/* 1 block is 512 bytes */
#define EFS_BLOCKSIZE_BITS 9
#define EFS_BLOCKSIZE (1 << EFS_BLOCKSIZE_BITS)
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index 89e73a6f0d36..28407578f83a 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -14,16 +14,18 @@
#include "efs.h"
#include <linux/efs_fs_sb.h>
-static int efs_readpage(struct file *file, struct page *page)
+static int efs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page,efs_get_block);
+ return block_read_full_folio(folio, efs_get_block);
}
+
static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,efs_get_block);
}
+
static const struct address_space_operations efs_aops = {
- .readpage = efs_readpage,
+ .read_folio = efs_read_folio,
.bmap = _efs_bmap
};
@@ -60,7 +62,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
inode = iget_locked(super, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
in = INODE_INFO(inode);
@@ -101,10 +103,9 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
i_uid_write(inode, (uid_t)be16_to_cpu(efs_inode->di_uid));
i_gid_write(inode, (gid_t)be16_to_cpu(efs_inode->di_gid));
inode->i_size = be32_to_cpu(efs_inode->di_size);
- inode->i_atime.tv_sec = be32_to_cpu(efs_inode->di_atime);
- inode->i_mtime.tv_sec = be32_to_cpu(efs_inode->di_mtime);
- inode->i_ctime.tv_sec = be32_to_cpu(efs_inode->di_ctime);
- inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
+ inode_set_atime(inode, be32_to_cpu(efs_inode->di_atime), 0);
+ inode_set_mtime(inode, be32_to_cpu(efs_inode->di_mtime), 0);
+ inode_set_ctime(inode, be32_to_cpu(efs_inode->di_ctime), 0);
/* this is the number of blocks in the file */
if (inode->i_size == 0) {
@@ -310,4 +311,5 @@ efs_block_t efs_map_block(struct inode *inode, efs_block_t block) {
return 0;
}
+MODULE_DESCRIPTION("Extent File System (efs)");
MODULE_LICENSE("GPL");
diff --git a/fs/efs/super.c b/fs/efs/super.c
index 62b155b9366b..c59086b7eabf 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -14,19 +14,13 @@
#include <linux/buffer_head.h>
#include <linux/vfs.h>
#include <linux/blkdev.h>
-
+#include <linux/fs_context.h>
#include "efs.h"
#include <linux/efs_vh.h>
#include <linux/efs_fs_sb.h>
static int efs_statfs(struct dentry *dentry, struct kstatfs *buf);
-static int efs_fill_super(struct super_block *s, void *d, int silent);
-
-static struct dentry *efs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_bdev(fs_type, flags, dev_name, data, efs_fill_super);
-}
+static int efs_init_fs_context(struct fs_context *fc);
static void efs_kill_sb(struct super_block *s)
{
@@ -35,15 +29,6 @@ static void efs_kill_sb(struct super_block *s)
kfree(sbi);
}
-static struct file_system_type efs_fs_type = {
- .owner = THIS_MODULE,
- .name = "efs",
- .mount = efs_mount,
- .kill_sb = efs_kill_sb,
- .fs_flags = FS_REQUIRES_DEV,
-};
-MODULE_ALIAS_FS("efs");
-
static struct pt_types sgi_pt_types[] = {
{0x00, "SGI vh"},
{0x01, "SGI trkrepl"},
@@ -63,13 +48,24 @@ static struct pt_types sgi_pt_types[] = {
{0, NULL}
};
+/*
+ * File system definition and registration.
+ */
+static struct file_system_type efs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "efs",
+ .kill_sb = efs_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = efs_init_fs_context,
+};
+MODULE_ALIAS_FS("efs");
static struct kmem_cache * efs_inode_cachep;
static struct inode *efs_alloc_inode(struct super_block *sb)
{
struct efs_inode_info *ei;
- ei = kmem_cache_alloc(efs_inode_cachep, GFP_KERNEL);
+ ei = alloc_inode_sb(sb, efs_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
return &ei->vfs_inode;
@@ -91,8 +87,8 @@ static int __init init_inodecache(void)
{
efs_inode_cachep = kmem_cache_create("efs_inode_cache",
sizeof(struct efs_inode_info), 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
- SLAB_ACCOUNT, init_once);
+ SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT,
+ init_once);
if (efs_inode_cachep == NULL)
return -ENOMEM;
return 0;
@@ -108,21 +104,14 @@ static void destroy_inodecache(void)
kmem_cache_destroy(efs_inode_cachep);
}
-static int efs_remount(struct super_block *sb, int *flags, char *data)
-{
- sync_filesystem(sb);
- *flags |= SB_RDONLY;
- return 0;
-}
-
static const struct super_operations efs_superblock_operations = {
.alloc_inode = efs_alloc_inode,
.free_inode = efs_free_inode,
.statfs = efs_statfs,
- .remount_fs = efs_remount,
};
static const struct export_operations efs_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = efs_fh_to_dentry,
.fh_to_parent = efs_fh_to_parent,
.get_parent = efs_get_parent,
@@ -248,26 +237,27 @@ static int efs_validate_super(struct efs_sb_info *sb, struct efs_super *super) {
return 0;
}
-static int efs_fill_super(struct super_block *s, void *d, int silent)
+static int efs_fill_super(struct super_block *s, struct fs_context *fc)
{
struct efs_sb_info *sb;
struct buffer_head *bh;
struct inode *root;
- sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL);
+ sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL);
if (!sb)
return -ENOMEM;
s->s_fs_info = sb;
s->s_time_min = 0;
s->s_time_max = U32_MAX;
-
+
s->s_magic = EFS_SUPER_MAGIC;
if (!sb_set_blocksize(s, EFS_BLOCKSIZE)) {
pr_err("device does not support %d byte blocks\n",
EFS_BLOCKSIZE);
- return -EINVAL;
+ return invalf(fc, "device does not support %d byte blocks\n",
+ EFS_BLOCKSIZE);
}
-
+
/* read the vh (volume header) block */
bh = sb_bread(s, 0);
@@ -293,7 +283,7 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
pr_err("cannot read superblock\n");
return -EIO;
}
-
+
if (efs_validate_super(sb, (struct efs_super *) bh->b_data)) {
#ifdef DEBUG
pr_warn("invalid superblock at block %u\n",
@@ -327,6 +317,34 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
return 0;
}
+static int efs_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, efs_fill_super);
+}
+
+static int efs_reconfigure(struct fs_context *fc)
+{
+ sync_filesystem(fc->root->d_sb);
+ fc->sb_flags |= SB_RDONLY;
+
+ return 0;
+}
+
+static const struct fs_context_operations efs_context_opts = {
+ .get_tree = efs_get_tree,
+ .reconfigure = efs_reconfigure,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+static int efs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &efs_context_opts;
+
+ return 0;
+}
+
static int efs_statfs(struct dentry *dentry, struct kstatfs *buf) {
struct super_block *sb = dentry->d_sb;
struct efs_sb_info *sbi = SUPER_INFO(sb);
diff --git a/fs/efs/symlink.c b/fs/efs/symlink.c
index 923eb91654d5..7749feded722 100644
--- a/fs/efs/symlink.c
+++ b/fs/efs/symlink.c
@@ -12,11 +12,11 @@
#include <linux/buffer_head.h>
#include "efs.h"
-static int efs_symlink_readpage(struct file *file, struct page *page)
+static int efs_symlink_read_folio(struct file *file, struct folio *folio)
{
- char *link = page_address(page);
- struct buffer_head * bh;
- struct inode * inode = page->mapping->host;
+ char *link = folio_address(folio);
+ struct buffer_head *bh;
+ struct inode *inode = folio->mapping->host;
efs_block_t size = inode->i_size;
int err;
@@ -39,15 +39,12 @@ static int efs_symlink_readpage(struct file *file, struct page *page)
brelse(bh);
}
link[size] = '\0';
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
+ err = 0;
fail:
- SetPageError(page);
- unlock_page(page);
+ folio_end_read(folio, err == 0);
return err;
}
const struct address_space_operations efs_symlink_aops = {
- .readpage = efs_symlink_readpage
+ .read_folio = efs_symlink_read_folio
};
diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig
index f57255ab88ed..d81f3318417d 100644
--- a/fs/erofs/Kconfig
+++ b/fs/erofs/Kconfig
@@ -3,8 +3,18 @@
config EROFS_FS
tristate "EROFS filesystem support"
depends on BLOCK
+ select CACHEFILES if EROFS_FS_ONDEMAND
+ select CRC32
+ select CRYPTO if EROFS_FS_ZIP_ACCEL
+ select CRYPTO_DEFLATE if EROFS_FS_ZIP_ACCEL
select FS_IOMAP
- select LIBCRC32C
+ select LZ4_DECOMPRESS if EROFS_FS_ZIP
+ select NETFS_SUPPORT if EROFS_FS_ONDEMAND
+ select XXHASH if EROFS_FS_XATTR
+ select XZ_DEC if EROFS_FS_ZIP_LZMA
+ select XZ_DEC_MICROLZMA if EROFS_FS_ZIP_LZMA
+ select ZLIB_INFLATE if EROFS_FS_ZIP_DEFLATE
+ select ZSTD_DECOMPRESS if EROFS_FS_ZIP_ZSTD
help
EROFS (Enhanced Read-Only File System) is a lightweight read-only
file system with modern designs (e.g. no buffer heads, inline
@@ -13,15 +23,15 @@ config EROFS_FS
smartphones with Android OS, LiveCDs and high-density hosts with
numerous containers;
- It also provides fixed-sized output compression support in order to
- improve storage density as well as keep relatively higher compression
- ratios and implements in-place decompression to reuse the file page
- for compressed data temporarily with proper strategies, which is
- quite useful to ensure guaranteed end-to-end runtime decompression
- performance under extremely memory pressure without extra cost.
+ It also provides transparent compression and deduplication support to
+ improve storage density and maintain relatively high compression
+ ratios, and it implements in-place decompression to temporarily reuse
+ page cache for compressed data using proper strategies, which is
+ quite useful for ensuring guaranteed end-to-end runtime decompression
+ performance under extreme memory pressure without extra cost.
See the documentation at <file:Documentation/filesystems/erofs.rst>
- for more details.
+ and the web pages at <https://erofs.docs.kernel.org> for more details.
If unsure, say N.
@@ -73,28 +83,114 @@ config EROFS_FS_SECURITY
If you are not using a security module, say N.
+config EROFS_FS_BACKED_BY_FILE
+ bool "File-backed EROFS filesystem support"
+ depends on EROFS_FS
+ default y
+ help
+ This allows EROFS to use filesystem image files directly, without
+ the intercession of loopback block devices or likewise. It is
+ particularly useful for container images with numerous blobs and
+ other sandboxes, where loop devices behave intricately. It can also
+ be used to simplify error-prone lifetime management of unnecessary
+ virtual block devices.
+
+ Note that this feature, along with ongoing fanotify pre-content
+ hooks, will eventually replace "EROFS over fscache."
+
+ If you don't want to enable this feature, say N.
+
config EROFS_FS_ZIP
bool "EROFS Data Compression Support"
depends on EROFS_FS
- select LZ4_DECOMPRESS
default y
help
- Enable fixed-sized output compression for EROFS.
+ Enable transparent compression support for EROFS file systems.
If you don't want to enable compression feature, say N.
config EROFS_FS_ZIP_LZMA
bool "EROFS LZMA compressed data support"
depends on EROFS_FS_ZIP
- select XZ_DEC
- select XZ_DEC_MICROLZMA
help
Saying Y here includes support for reading EROFS file systems
- containing LZMA compressed data, specifically called microLZMA. it
- gives better compression ratios than the LZ4 algorithm, at the
+ containing LZMA compressed data, specifically called microLZMA. It
+ gives better compression ratios than the default LZ4 format, at the
expense of more CPU overhead.
- LZMA support is an experimental feature for now and so most file
- systems will be readable without selecting this option.
+ If unsure, say N.
+
+config EROFS_FS_ZIP_DEFLATE
+ bool "EROFS DEFLATE compressed data support"
+ depends on EROFS_FS_ZIP
+ help
+ Saying Y here includes support for reading EROFS file systems
+ containing DEFLATE compressed data. It gives better compression
+ ratios than the default LZ4 format, while it costs more CPU
+ overhead.
+
+ DEFLATE support is an experimental feature for now and so most
+ file systems will be readable without selecting this option.
+
+ If unsure, say N.
+
+config EROFS_FS_ZIP_ZSTD
+ bool "EROFS Zstandard compressed data support"
+ depends on EROFS_FS_ZIP
+ help
+ Saying Y here includes support for reading EROFS file systems
+ containing Zstandard compressed data. It gives better compression
+ ratios than the default LZ4 format, while it costs more CPU
+ overhead.
+
+ Zstandard support is an experimental feature for now and so most
+ file systems will be readable without selecting this option.
+
+ If unsure, say N.
+
+config EROFS_FS_ZIP_ACCEL
+ bool "EROFS hardware decompression support"
+ depends on EROFS_FS_ZIP
+ help
+ Saying Y here includes hardware accelerator support for reading
+ EROFS file systems containing compressed data. It gives better
+ decompression speed than the software-implemented decompression, and
+ it costs lower CPU overhead.
+
+ Hardware accelerator support is an experimental feature for now and
+ file systems are still readable without selecting this option.
+
+ If unsure, say N.
+
+config EROFS_FS_ONDEMAND
+ bool "EROFS fscache-based on-demand read support (deprecated)"
+ depends on EROFS_FS
+ select FSCACHE
+ select CACHEFILES_ONDEMAND
+ help
+ This permits EROFS to use fscache-backed data blobs with on-demand
+ read support.
+
+ It is now deprecated and scheduled to be removed from the kernel
+ after fanotify pre-content hooks are landed.
+
+ If unsure, say N.
+
+config EROFS_FS_PCPU_KTHREAD
+ bool "EROFS per-cpu decompression kthread workers"
+ depends on EROFS_FS_ZIP
+ help
+ Saying Y here enables per-CPU kthread workers pool to carry out
+ async decompression for low latencies on some architectures.
+
+ If unsure, say N.
+
+config EROFS_FS_PCPU_KTHREAD_HIPRI
+ bool "EROFS high priority per-CPU kthread workers"
+ depends on EROFS_FS_ZIP && EROFS_FS_PCPU_KTHREAD
+ default y
+ help
+ This permits EROFS to configure per-CPU kthread workers to run
+ at higher priority.
If unsure, say N.
diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile
index 756fe2d65272..549abc424763 100644
--- a/fs/erofs/Makefile
+++ b/fs/erofs/Makefile
@@ -1,7 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_EROFS_FS) += erofs.o
-erofs-objs := super.o inode.o data.o namei.o dir.o utils.o pcpubuf.o
+erofs-objs := super.o inode.o data.o namei.o dir.o sysfs.o
erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
-erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o
+erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o zutil.o
erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o
+erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o
+erofs-$(CONFIG_EROFS_FS_ZIP_ZSTD) += decompressor_zstd.o
+erofs-$(CONFIG_EROFS_FS_ZIP_ACCEL) += decompressor_crypto.o
+erofs-$(CONFIG_EROFS_FS_BACKED_BY_FILE) += fileio.o
+erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
index 579406504919..84c8e52581f4 100644
--- a/fs/erofs/compress.h
+++ b/fs/erofs/compress.h
@@ -11,57 +11,37 @@
struct z_erofs_decompress_req {
struct super_block *sb;
struct page **in, **out;
-
- unsigned short pageofs_out;
+ unsigned int inpages, outpages;
+ unsigned short pageofs_in, pageofs_out;
unsigned int inputsize, outputsize;
- /* indicate the algorithm will be used for decompression */
- unsigned int alg;
- bool inplace_io, partial_decoding;
+ unsigned int alg; /* the algorithm for decompression */
+ bool inplace_io, partial_decoding, fillgaps;
+ gfp_t gfp; /* allocation flags for extra temporary buffers */
};
struct z_erofs_decompressor {
- int (*decompress)(struct z_erofs_decompress_req *rq,
- struct page **pagepool);
+ int (*config)(struct super_block *sb, struct erofs_super_block *dsb,
+ void *data, int size);
+ const char *(*decompress)(struct z_erofs_decompress_req *rq,
+ struct page **pagepool);
+ int (*init)(void);
+ void (*exit)(void);
char *name;
};
-/* some special page->private (unsigned long, see below) */
#define Z_EROFS_SHORTLIVED_PAGE (-1UL << 2)
-#define Z_EROFS_PREALLOCATED_PAGE (-2UL << 2)
-
-/*
- * For all pages in a pcluster, page->private should be one of
- * Type Last 2bits page->private
- * short-lived page 00 Z_EROFS_SHORTLIVED_PAGE
- * preallocated page (tryalloc) 00 Z_EROFS_PREALLOCATED_PAGE
- * cached/managed page 00 pointer to z_erofs_pcluster
- * online page (file-backed, 01/10/11 sub-index << 2 | count
- * some pages can be used for inplace I/O)
- *
- * page->mapping should be one of
- * Type page->mapping
- * short-lived page NULL
- * preallocated page NULL
- * cached/managed page non-NULL or NULL (invalidated/truncated page)
- * online page non-NULL
- *
- * For all managed pages, PG_private should be set with 1 extra refcount,
- * which is used for page reclaim / migration.
- */
+#define Z_EROFS_PREALLOCATED_FOLIO ((void *)(-2UL << 2))
/*
- * short-lived pages are pages directly from buddy system with specific
- * page->private (no need to set PagePrivate since these are non-LRU /
- * non-movable pages and bypass reclaim / migration code).
+ * Currently, short-lived pages are pages directly from buddy system
+ * with specific page->private (Z_EROFS_SHORTLIVED_PAGE).
+ * In the future world of Memdescs, it should be type 0 (Misc) memory
+ * which type can be checked with a new helper.
*/
static inline bool z_erofs_is_shortlived_page(struct page *page)
{
- if (page->private != Z_EROFS_SHORTLIVED_PAGE)
- return false;
-
- DBG_BUGON(page->mapping);
- return true;
+ return page->private == Z_EROFS_SHORTLIVED_PAGE;
}
static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
@@ -69,28 +49,41 @@ static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
{
if (!z_erofs_is_shortlived_page(page))
return false;
-
- /* short-lived pages should not be used by others at the same time */
- if (page_ref_count(page) > 1) {
- put_page(page);
- } else {
- /* follow the pcluster rule above. */
- erofs_pagepool_add(pagepool, page);
- }
+ erofs_pagepool_add(pagepool, page);
return true;
}
-#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
-static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
- struct page *page)
-{
- return page->mapping == MNGD_MAPPING(sbi);
-}
+extern const struct z_erofs_decompressor z_erofs_lzma_decomp;
+extern const struct z_erofs_decompressor z_erofs_deflate_decomp;
+extern const struct z_erofs_decompressor z_erofs_zstd_decomp;
+extern const struct z_erofs_decompressor *z_erofs_decomp[];
-int z_erofs_decompress(struct z_erofs_decompress_req *rq,
- struct page **pagepool);
+struct z_erofs_stream_dctx {
+ struct z_erofs_decompress_req *rq;
+ int no, ni; /* the current {en,de}coded page # */
-/* prototypes for specific algorithms */
-int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
- struct page **pagepool);
+ unsigned int avail_out; /* remaining bytes in the decoded buffer */
+ unsigned int inbuf_pos, inbuf_sz;
+ /* current status of the encoded buffer */
+ u8 *kin, *kout; /* buffer mapped pointers */
+ void *bounce; /* bounce buffer for inplace I/Os */
+ bool bounced; /* is the bounce buffer used now? */
+};
+
+const char *z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx,
+ void **dst, void **src, struct page **pgpl);
+const char *z_erofs_fixup_insize(struct z_erofs_decompress_req *rq,
+ const char *padbuf, unsigned int padbufsize);
+int __init z_erofs_init_decompressor(void);
+void z_erofs_exit_decompressor(void);
+int z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl);
+int z_erofs_crypto_enable_engine(const char *name, int len);
+#ifdef CONFIG_EROFS_FS_ZIP_ACCEL
+void z_erofs_crypto_disable_all_engines(void);
+int z_erofs_crypto_show_engines(char *buf, int size, char sep);
+#else
+static inline void z_erofs_crypto_disable_all_engines(void) {}
+static inline int z_erofs_crypto_show_engines(char *buf, int size, char sep) { return 0; }
+#endif
#endif
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 0e35ef3f9f3d..bb13c4cb8455 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -5,100 +5,121 @@
* Copyright (C) 2021, Alibaba Cloud
*/
#include "internal.h"
-#include <linux/prefetch.h>
-#include <linux/dax.h>
+#include <linux/sched/mm.h>
#include <trace/events/erofs.h>
-struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
+void erofs_unmap_metabuf(struct erofs_buf *buf)
{
- struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
- struct page *page;
-
- page = read_cache_page_gfp(mapping, blkaddr,
- mapping_gfp_constraint(mapping, ~__GFP_FS));
- /* should already be PageUptodate */
- if (!IS_ERR(page))
- lock_page(page);
- return page;
+ if (!buf->base)
+ return;
+ kunmap_local(buf->base);
+ buf->base = NULL;
}
-static int erofs_map_blocks_flatmode(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags)
+void erofs_put_metabuf(struct erofs_buf *buf)
{
- int err = 0;
- erofs_blk_t nblocks, lastblk;
- u64 offset = map->m_la;
- struct erofs_inode *vi = EROFS_I(inode);
- bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
-
- trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
-
- nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
- lastblk = nblocks - tailendpacking;
-
- /* there is no hole in flatmode */
- map->m_flags = EROFS_MAP_MAPPED;
+ if (!buf->page)
+ return;
+ erofs_unmap_metabuf(buf);
+ folio_put(page_folio(buf->page));
+ buf->page = NULL;
+}
- if (offset < blknr_to_addr(lastblk)) {
- map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
- map->m_plen = blknr_to_addr(lastblk) - offset;
- } else if (tailendpacking) {
- /* 2 - inode inline B: inode, [xattrs], inline last blk... */
- struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
+void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap)
+{
+ pgoff_t index = (buf->off + offset) >> PAGE_SHIFT;
+ struct folio *folio = NULL;
- map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
- vi->xattr_isize + erofs_blkoff(map->m_la);
- map->m_plen = inode->i_size - offset;
+ if (buf->page) {
+ folio = page_folio(buf->page);
+ if (folio_file_page(folio, index) != buf->page)
+ erofs_unmap_metabuf(buf);
+ }
+ if (!folio || !folio_contains(folio, index)) {
+ erofs_put_metabuf(buf);
+ folio = read_mapping_folio(buf->mapping, index, buf->file);
+ if (IS_ERR(folio))
+ return folio;
+ }
+ buf->page = folio_file_page(folio, index);
+ if (!need_kmap)
+ return NULL;
+ if (!buf->base)
+ buf->base = kmap_local_page(buf->page);
+ return buf->base + (offset & ~PAGE_MASK);
+}
- /* inline data should be located in one meta block */
- if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
- erofs_err(inode->i_sb,
- "inline data cross block boundary @ nid %llu",
- vi->nid);
- DBG_BUGON(1);
- err = -EFSCORRUPTED;
- goto err_out;
- }
+int erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb,
+ bool in_metabox)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
- map->m_flags |= EROFS_MAP_META;
- } else {
- erofs_err(inode->i_sb,
- "internal error @ nid: %llu (size %llu), m_la 0x%llx",
- vi->nid, inode->i_size, map->m_la);
- DBG_BUGON(1);
- err = -EIO;
- goto err_out;
+ buf->file = NULL;
+ if (in_metabox) {
+ if (unlikely(!sbi->metabox_inode))
+ return -EFSCORRUPTED;
+ buf->mapping = sbi->metabox_inode->i_mapping;
+ return 0;
}
+ buf->off = sbi->dif0.fsoff;
+ if (erofs_is_fileio_mode(sbi)) {
+ buf->file = sbi->dif0.file; /* some fs like FUSE needs it */
+ buf->mapping = buf->file->f_mapping;
+ } else if (erofs_is_fscache_mode(sb))
+ buf->mapping = sbi->dif0.fscache->inode->i_mapping;
+ else
+ buf->mapping = sb->s_bdev->bd_mapping;
+ return 0;
+}
- map->m_llen = map->m_plen;
-err_out:
- trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
- return err;
+void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
+ erofs_off_t offset, bool in_metabox)
+{
+ int err;
+
+ err = erofs_init_metabuf(buf, sb, in_metabox);
+ if (err)
+ return ERR_PTR(err);
+ return erofs_bread(buf, offset, true);
}
-static int erofs_map_blocks(struct inode *inode,
- struct erofs_map_blocks *map, int flags)
+int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
{
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct super_block *sb = inode->i_sb;
+ unsigned int unit, blksz = sb->s_blocksize;
struct erofs_inode *vi = EROFS_I(inode);
struct erofs_inode_chunk_index *idx;
- struct page *page;
- u64 chunknr;
- unsigned int unit;
+ erofs_blk_t startblk, addrmask;
+ bool tailpacking;
erofs_off_t pos;
+ u64 chunknr;
int err = 0;
+ trace_erofs_map_blocks_enter(inode, map, 0);
map->m_deviceid = 0;
- if (map->m_la >= inode->i_size) {
- /* leave out-of-bound access unmapped */
- map->m_flags = 0;
- map->m_plen = 0;
+ map->m_flags = 0;
+ if (map->m_la >= inode->i_size)
goto out;
- }
- if (vi->datalayout != EROFS_INODE_CHUNK_BASED)
- return erofs_map_blocks_flatmode(inode, map, flags);
+ if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
+ tailpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
+ if (!tailpacking && vi->startblk == EROFS_NULL_ADDR)
+ goto out;
+ pos = erofs_pos(sb, erofs_iblks(inode) - tailpacking);
+
+ map->m_flags = EROFS_MAP_MAPPED;
+ if (map->m_la < pos) {
+ map->m_pa = erofs_pos(sb, vi->startblk) + map->m_la;
+ map->m_llen = pos - map->m_la;
+ } else {
+ map->m_pa = erofs_iloc(inode) + vi->inode_isize +
+ vi->xattr_isize + erofs_blkoff(sb, map->m_la);
+ map->m_llen = inode->i_size - map->m_la;
+ map->m_flags |= EROFS_MAP_META;
+ }
+ goto out;
+ }
if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
unit = sizeof(*idx); /* chunk index */
@@ -106,60 +127,70 @@ static int erofs_map_blocks(struct inode *inode,
unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
chunknr = map->m_la >> vi->chunkbits;
- pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
+ pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
vi->xattr_isize, unit) + unit * chunknr;
- page = erofs_get_meta_page(inode->i_sb, erofs_blknr(pos));
- if (IS_ERR(page))
- return PTR_ERR(page);
-
+ idx = erofs_read_metabuf(&buf, sb, pos, erofs_inode_in_metabox(inode));
+ if (IS_ERR(idx)) {
+ err = PTR_ERR(idx);
+ goto out;
+ }
map->m_la = chunknr << vi->chunkbits;
- map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
- roundup(inode->i_size - map->m_la, EROFS_BLKSIZ));
-
- /* handle block map */
- if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
- __le32 *blkaddr = page_address(page) + erofs_blkoff(pos);
-
- if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
- map->m_flags = 0;
- } else {
- map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr));
+ map->m_llen = min_t(erofs_off_t, 1UL << vi->chunkbits,
+ round_up(inode->i_size - map->m_la, blksz));
+ if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) {
+ addrmask = (vi->chunkformat & EROFS_CHUNK_FORMAT_48BIT) ?
+ BIT_ULL(48) - 1 : BIT_ULL(32) - 1;
+ startblk = (((u64)le16_to_cpu(idx->startblk_hi) << 32) |
+ le32_to_cpu(idx->startblk_lo)) & addrmask;
+ if ((startblk ^ EROFS_NULL_ADDR) & addrmask) {
+ map->m_deviceid = le16_to_cpu(idx->device_id) &
+ EROFS_SB(sb)->device_id_mask;
+ map->m_pa = erofs_pos(sb, startblk);
+ map->m_flags = EROFS_MAP_MAPPED;
+ }
+ } else {
+ startblk = le32_to_cpu(*(__le32 *)idx);
+ if (startblk != (u32)EROFS_NULL_ADDR) {
+ map->m_pa = erofs_pos(sb, startblk);
map->m_flags = EROFS_MAP_MAPPED;
}
- goto out_unlock;
- }
- /* parse chunk indexes */
- idx = page_address(page) + erofs_blkoff(pos);
- switch (le32_to_cpu(idx->blkaddr)) {
- case EROFS_NULL_ADDR:
- map->m_flags = 0;
- break;
- default:
- map->m_deviceid = le16_to_cpu(idx->device_id) &
- EROFS_SB(sb)->device_id_mask;
- map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr));
- map->m_flags = EROFS_MAP_MAPPED;
- break;
}
-out_unlock:
- unlock_page(page);
- put_page(page);
+ erofs_put_metabuf(&buf);
out:
- map->m_llen = map->m_plen;
+ if (!err) {
+ map->m_plen = map->m_llen;
+ /* inline data should be located in the same meta block */
+ if ((map->m_flags & EROFS_MAP_META) &&
+ erofs_blkoff(sb, map->m_pa) + map->m_plen > blksz) {
+ erofs_err(sb, "inline data across blocks @ nid %llu", vi->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+ }
+ trace_erofs_map_blocks_exit(inode, map, 0, err);
return err;
}
+static void erofs_fill_from_devinfo(struct erofs_map_dev *map,
+ struct super_block *sb, struct erofs_device_info *dif)
+{
+ map->m_sb = sb;
+ map->m_dif = dif;
+ map->m_bdev = NULL;
+ if (dif->file && S_ISBLK(file_inode(dif->file)->i_mode))
+ map->m_bdev = file_bdev(dif->file);
+}
+
int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
{
struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
struct erofs_device_info *dif;
+ erofs_off_t startoff;
int id;
- /* primary device by default */
- map->m_bdev = sb->s_bdev;
- map->m_daxdev = EROFS_SB(sb)->dax_dev;
-
+ erofs_fill_from_devinfo(map, sb, &EROFS_SB(sb)->dif0);
+ map->m_bdev = sb->s_bdev; /* use s_bdev for the primary device */
if (map->m_deviceid) {
down_read(&devs->rwsem);
dif = idr_find(&devs->tree, map->m_deviceid - 1);
@@ -167,24 +198,24 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
up_read(&devs->rwsem);
return -ENODEV;
}
- map->m_bdev = dif->bdev;
- map->m_daxdev = dif->dax_dev;
+ if (devs->flatdev) {
+ map->m_pa += erofs_pos(sb, dif->uniaddr);
+ up_read(&devs->rwsem);
+ return 0;
+ }
+ erofs_fill_from_devinfo(map, sb, dif);
up_read(&devs->rwsem);
- } else if (devs->extra_devices) {
+ } else if (devs->extra_devices && !devs->flatdev) {
down_read(&devs->rwsem);
idr_for_each_entry(&devs->tree, dif, id) {
- erofs_off_t startoff, length;
-
- if (!dif->mapped_blkaddr)
+ if (!dif->uniaddr)
continue;
- startoff = blknr_to_addr(dif->mapped_blkaddr);
- length = blknr_to_addr(dif->blocks);
+ startoff = erofs_pos(sb, dif->uniaddr);
if (map->m_pa >= startoff &&
- map->m_pa < startoff + length) {
+ map->m_pa < startoff + erofs_pos(sb, dif->blocks)) {
map->m_pa -= startoff;
- map->m_bdev = dif->bdev;
- map->m_daxdev = dif->dax_dev;
+ erofs_fill_from_devinfo(map, sb, dif);
break;
}
}
@@ -193,57 +224,103 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
return 0;
}
+/*
+ * bit 30: I/O error occurred on this folio
+ * bit 29: CPU has dirty data in D-cache (needs aliasing handling);
+ * bit 0 - 29: remaining parts to complete this folio
+ */
+#define EROFS_ONLINEFOLIO_EIO 30
+#define EROFS_ONLINEFOLIO_DIRTY 29
+
+void erofs_onlinefolio_init(struct folio *folio)
+{
+ union {
+ atomic_t o;
+ void *v;
+ } u = { .o = ATOMIC_INIT(1) };
+
+ folio->private = u.v; /* valid only if file-backed folio is locked */
+}
+
+void erofs_onlinefolio_split(struct folio *folio)
+{
+ atomic_inc((atomic_t *)&folio->private);
+}
+
+void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty)
+{
+ int orig, v;
+
+ do {
+ orig = atomic_read((atomic_t *)&folio->private);
+ DBG_BUGON(orig <= 0);
+ v = dirty << EROFS_ONLINEFOLIO_DIRTY;
+ v |= (orig - 1) | (!!err << EROFS_ONLINEFOLIO_EIO);
+ } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
+
+ if (v & (BIT(EROFS_ONLINEFOLIO_DIRTY) - 1))
+ return;
+ folio->private = 0;
+ if (v & BIT(EROFS_ONLINEFOLIO_DIRTY))
+ flush_dcache_folio(folio);
+ folio_end_read(folio, !(v & BIT(EROFS_ONLINEFOLIO_EIO)));
+}
+
static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
{
int ret;
+ struct super_block *sb = inode->i_sb;
struct erofs_map_blocks map;
struct erofs_map_dev mdev;
map.m_la = offset;
map.m_llen = length;
-
- ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
+ ret = erofs_map_blocks(inode, &map);
if (ret < 0)
return ret;
- mdev = (struct erofs_map_dev) {
- .m_deviceid = map.m_deviceid,
- .m_pa = map.m_pa,
- };
- ret = erofs_map_dev(inode->i_sb, &mdev);
- if (ret)
- return ret;
-
- iomap->bdev = mdev.m_bdev;
- iomap->dax_dev = mdev.m_daxdev;
iomap->offset = map.m_la;
iomap->length = map.m_llen;
iomap->flags = 0;
iomap->private = NULL;
-
+ iomap->addr = IOMAP_NULL_ADDR;
if (!(map.m_flags & EROFS_MAP_MAPPED)) {
iomap->type = IOMAP_HOLE;
- iomap->addr = IOMAP_NULL_ADDR;
- if (!iomap->length)
- iomap->length = length;
return 0;
}
+ if (!(map.m_flags & EROFS_MAP_META) || !erofs_inode_in_metabox(inode)) {
+ mdev = (struct erofs_map_dev) {
+ .m_deviceid = map.m_deviceid,
+ .m_pa = map.m_pa,
+ };
+ ret = erofs_map_dev(sb, &mdev);
+ if (ret)
+ return ret;
+
+ if (flags & IOMAP_DAX)
+ iomap->dax_dev = mdev.m_dif->dax_dev;
+ else
+ iomap->bdev = mdev.m_bdev;
+ iomap->addr = mdev.m_dif->fsoff + mdev.m_pa;
+ if (flags & IOMAP_DAX)
+ iomap->addr += mdev.m_dif->dax_part_off;
+ }
+
if (map.m_flags & EROFS_MAP_META) {
- struct page *ipage;
+ void *ptr;
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
iomap->type = IOMAP_INLINE;
- ipage = erofs_get_meta_page(inode->i_sb,
- erofs_blknr(mdev.m_pa));
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
- iomap->inline_data = page_address(ipage) +
- erofs_blkoff(mdev.m_pa);
- iomap->private = ipage;
+ ptr = erofs_read_metabuf(&buf, sb, map.m_pa,
+ erofs_inode_in_metabox(inode));
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+ iomap->inline_data = ptr;
+ iomap->private = buf.base;
} else {
iomap->type = IOMAP_MAPPED;
- iomap->addr = mdev.m_pa;
}
return 0;
}
@@ -251,12 +328,16 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
ssize_t written, unsigned int flags, struct iomap *iomap)
{
- struct page *ipage = iomap->private;
+ void *ptr = iomap->private;
+
+ if (ptr) {
+ struct erofs_buf buf = {
+ .page = kmap_to_page(ptr),
+ .base = ptr,
+ };
- if (ipage) {
DBG_BUGON(iomap->type != IOMAP_INLINE);
- unlock_page(ipage);
- put_page(ipage);
+ erofs_put_metabuf(&buf);
} else {
DBG_BUGON(iomap->type == IOMAP_INLINE);
}
@@ -286,14 +367,20 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
* since we dont have write or truncate flows, so no inode
* locking needs to be held at the moment.
*/
-static int erofs_readpage(struct file *file, struct page *page)
+static int erofs_read_folio(struct file *file, struct folio *folio)
{
- return iomap_readpage(page, &erofs_iomap_ops);
+ trace_erofs_read_folio(folio, true);
+
+ iomap_bio_read_folio(folio, &erofs_iomap_ops);
+ return 0;
}
static void erofs_readahead(struct readahead_control *rac)
{
- return iomap_readahead(rac, &erofs_iomap_ops);
+ trace_erofs_readahead(rac->mapping->host, readahead_index(rac),
+ readahead_count(rac), true);
+
+ iomap_bio_readahead(rac, &erofs_iomap_ops);
}
static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
@@ -301,64 +388,44 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
return iomap_bmap(mapping, block, &erofs_iomap_ops);
}
-static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to)
+static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct inode *inode = file_inode(iocb->ki_filp);
- loff_t align = iocb->ki_pos | iov_iter_count(to) |
- iov_iter_alignment(to);
- struct block_device *bdev = inode->i_sb->s_bdev;
- unsigned int blksize_mask;
-
- if (bdev)
- blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1;
- else
- blksize_mask = (1 << inode->i_blkbits) - 1;
- if (align & blksize_mask)
- return -EINVAL;
- return 0;
-}
-
-static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
-{
/* no need taking (shared) inode lock since it's a ro filesystem */
if (!iov_iter_count(to))
return 0;
#ifdef CONFIG_FS_DAX
- if (IS_DAX(iocb->ki_filp->f_mapping->host))
+ if (IS_DAX(inode))
return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
#endif
- if (iocb->ki_flags & IOCB_DIRECT) {
- int err = erofs_prepare_dio(iocb, to);
-
- if (!err)
- return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
- NULL, 0, 0);
- if (err < 0)
- return err;
- }
+ if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_sb->s_bdev)
+ return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
+ NULL, 0, NULL, 0);
return filemap_read(iocb, to, 0);
}
/* for uncompressed (aligned) files and raw access for other files */
-const struct address_space_operations erofs_raw_access_aops = {
- .readpage = erofs_readpage,
+const struct address_space_operations erofs_aops = {
+ .read_folio = erofs_read_folio,
.readahead = erofs_readahead,
.bmap = erofs_bmap,
.direct_IO = noop_direct_IO,
+ .release_folio = iomap_release_folio,
+ .invalidate_folio = iomap_invalidate_folio,
};
#ifdef CONFIG_FS_DAX
static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size)
+ unsigned int order)
{
- return dax_iomap_fault(vmf, pe_size, NULL, NULL, &erofs_iomap_ops);
+ return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
}
static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
{
- return erofs_dax_huge_fault(vmf, PE_SIZE_PTE);
+ return erofs_dax_huge_fault(vmf, 0);
}
static const struct vm_operations_struct erofs_dax_vm_ops = {
@@ -366,25 +433,54 @@ static const struct vm_operations_struct erofs_dax_vm_ops = {
.huge_fault = erofs_dax_huge_fault,
};
-static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int erofs_file_mmap_prepare(struct vm_area_desc *desc)
{
- if (!IS_DAX(file_inode(file)))
- return generic_file_readonly_mmap(file, vma);
+ if (!IS_DAX(file_inode(desc->file)))
+ return generic_file_readonly_mmap_prepare(desc);
- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+ if ((desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
return -EINVAL;
- vma->vm_ops = &erofs_dax_vm_ops;
- vma->vm_flags |= VM_HUGEPAGE;
+ desc->vm_ops = &erofs_dax_vm_ops;
+ desc->vm_flags |= VM_HUGEPAGE;
return 0;
}
#else
-#define erofs_file_mmap generic_file_readonly_mmap
+#define erofs_file_mmap_prepare generic_file_readonly_mmap_prepare
+#endif
+
+static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct inode *inode = file->f_mapping->host;
+ const struct iomap_ops *ops = &erofs_iomap_ops;
+
+ if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
+#ifdef CONFIG_EROFS_FS_ZIP
+ ops = &z_erofs_iomap_report_ops;
+#else
+ return generic_file_llseek(file, offset, whence);
#endif
+ if (whence == SEEK_HOLE)
+ offset = iomap_seek_hole(inode, offset, ops);
+ else if (whence == SEEK_DATA)
+ offset = iomap_seek_data(inode, offset, ops);
+ else
+ return generic_file_llseek(file, offset, whence);
+
+ if (offset < 0)
+ return offset;
+ return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
+}
+
const struct file_operations erofs_file_fops = {
- .llseek = generic_file_llseek,
+ .llseek = erofs_file_llseek,
.read_iter = erofs_file_read_iter,
- .mmap = erofs_file_mmap,
- .splice_read = generic_file_splice_read,
+ .unlocked_ioctl = erofs_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = erofs_compat_ioctl,
+#endif
+ .mmap_prepare = erofs_file_mmap_prepare,
+ .get_unmapped_area = thp_get_unmapped_area,
+ .splice_read = filemap_splice_read,
};
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index bf37fc76b182..d5d090276391 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -2,25 +2,18 @@
/*
* Copyright (C) 2019 HUAWEI, Inc.
* https://www.huawei.com/
+ * Copyright (C) 2024 Alibaba Cloud
*/
#include "compress.h"
-#include <linux/module.h>
#include <linux/lz4.h>
-#ifndef LZ4_DISTANCE_MAX /* history window size */
-#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
-#endif
-
#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
-#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
-#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
-#endif
-int z_erofs_load_lz4_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_lz4_cfgs *lz4, int size)
+static int z_erofs_load_lz4_config(struct super_block *sb,
+ struct erofs_super_block *dsb, void *data, int size)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct z_erofs_lz4_cfgs *lz4 = data;
u16 distance;
if (lz4) {
@@ -34,12 +27,10 @@ int z_erofs_load_lz4_config(struct super_block *sb,
if (!sbi->lz4.max_pclusterblks) {
sbi->lz4.max_pclusterblks = 1; /* reserved case */
} else if (sbi->lz4.max_pclusterblks >
- Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
+ erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
erofs_err(sb, "too large lz4 pclusterblks %u",
sbi->lz4.max_pclusterblks);
return -EINVAL;
- } else if (sbi->lz4.max_pclusterblks >= 2) {
- erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!");
}
} else {
distance = le16_to_cpu(dsb->u1.lz4_max_distance);
@@ -49,7 +40,7 @@ int z_erofs_load_lz4_config(struct super_block *sb,
sbi->lz4.max_distance_pages = distance ?
DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
LZ4_MAX_DISTANCE_PAGES;
- return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
+ return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks);
}
/*
@@ -59,8 +50,6 @@ int z_erofs_load_lz4_config(struct super_block *sb,
static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
struct page **pagepool)
{
- const unsigned int nr =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
BITS_PER_LONG)] = { 0 };
@@ -70,7 +59,7 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
unsigned int i, j, top;
top = 0;
- for (i = j = 0; i < nr; ++i, ++j) {
+ for (i = j = 0; i < rq->outpages; ++i, ++j) {
struct page *const page = rq->out[i];
struct page *victim;
@@ -78,7 +67,7 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
j = 0;
/* 'valid' bounced can only be tested after a complete round */
- if (test_bit(j, bounced)) {
+ if (!rq->fillgaps && test_bit(j, bounced)) {
DBG_BUGON(i < lz4_max_distance_pages);
DBG_BUGON(top >= lz4_max_distance_pages);
availables[top++] = rq->out[i - lz4_max_distance_pages];
@@ -86,14 +75,18 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
if (page) {
__clear_bit(j, bounced);
- if (kaddr) {
- if (kaddr + PAGE_SIZE == page_address(page))
+ if (!PageHighMem(page)) {
+ if (!i) {
+ kaddr = page_address(page);
+ continue;
+ }
+ if (kaddr &&
+ kaddr + PAGE_SIZE == page_address(page)) {
kaddr += PAGE_SIZE;
- else
- kaddr = NULL;
- } else if (!i) {
- kaddr = page_address(page);
+ continue;
+ }
}
+ kaddr = NULL;
continue;
}
kaddr = NULL;
@@ -101,10 +94,10 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
if (top) {
victim = availables[--top];
- get_page(victim);
} else {
- victim = erofs_allocpage(pagepool,
- GFP_KERNEL | __GFP_NOFAIL);
+ victim = __erofs_allocpage(pagepool, rq->gfp, true);
+ if (!victim)
+ return -ENOMEM;
set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
}
rq->out[i] = victim;
@@ -112,109 +105,128 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
return kaddr ? 1 : 0;
}
-static void *z_erofs_lz4_handle_inplace_io(struct z_erofs_decompress_req *rq,
- void *inpage, unsigned int *inputmargin, int *maptype,
- bool support_0padding)
+static void *z_erofs_lz4_handle_overlap(const struct z_erofs_decompress_req *rq,
+ void *inpage, void *out, unsigned int *inputmargin,
+ int *maptype, bool may_inplace)
{
- unsigned int nrpages_in, nrpages_out;
- unsigned int ofull, oend, inputsize, total, i, j;
+ unsigned int oend, omargin, cnt, i;
struct page **in;
- void *src, *tmp;
-
- inputsize = rq->inputsize;
- nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
- oend = rq->pageofs_out + rq->outputsize;
- ofull = PAGE_ALIGN(oend);
- nrpages_out = ofull >> PAGE_SHIFT;
-
- if (rq->inplace_io) {
- if (rq->partial_decoding || !support_0padding ||
- ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize))
- goto docopy;
-
- for (i = 0; i < nrpages_in; ++i) {
- DBG_BUGON(rq->in[i] == NULL);
- for (j = 0; j < nrpages_out - nrpages_in + i; ++j)
- if (rq->out[j] == rq->in[i])
- goto docopy;
+ void *src;
+
+ /*
+ * If in-place I/O isn't used, for example, the bounce compressed cache
+ * can hold data for incomplete read requests. Just map the compressed
+ * buffer as well and decompress directly.
+ */
+ if (!rq->inplace_io) {
+ if (rq->inpages <= 1) {
+ *maptype = 0;
+ return inpage;
}
+ kunmap_local(inpage);
+ src = erofs_vm_map_ram(rq->in, rq->inpages);
+ if (!src)
+ return ERR_PTR(-ENOMEM);
+ *maptype = 1;
+ return src;
}
-
- if (nrpages_in <= 1) {
- *maptype = 0;
- return inpage;
+ /*
+ * Then, deal with in-place I/Os. The reasons why in-place I/O is useful
+ * are: (1) It minimizes memory footprint during the I/O submission,
+ * which is useful for slow storage (including network devices and
+ * low-end HDDs/eMMCs) but with a lot inflight I/Os; (2) If in-place
+ * decompression can also be applied, it will reuse the unique buffer so
+ * that no extra CPU D-cache is polluted with temporary compressed data
+ * for extreme performance.
+ */
+ oend = rq->pageofs_out + rq->outputsize;
+ omargin = PAGE_ALIGN(oend) - oend;
+ if (!rq->partial_decoding && may_inplace &&
+ omargin >= LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) {
+ for (i = 0; i < rq->inpages; ++i)
+ if (rq->out[rq->outpages - rq->inpages + i] !=
+ rq->in[i])
+ break;
+ if (i >= rq->inpages) {
+ kunmap_local(inpage);
+ *maptype = 3;
+ return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT);
+ }
}
- kunmap_atomic(inpage);
- might_sleep();
- src = erofs_vm_map_ram(rq->in, nrpages_in);
- if (!src)
- return ERR_PTR(-ENOMEM);
- *maptype = 1;
- return src;
-
-docopy:
- /* Or copy compressed data which can be overlapped to per-CPU buffer */
- in = rq->in;
- src = erofs_get_pcpubuf(nrpages_in);
+ /*
+ * If in-place decompression can't be applied, copy compressed data that
+ * may potentially overlap during decompression to a per-CPU buffer.
+ */
+ src = z_erofs_get_gbuf(rq->inpages);
if (!src) {
DBG_BUGON(1);
- kunmap_atomic(inpage);
+ kunmap_local(inpage);
return ERR_PTR(-EFAULT);
}
- tmp = src;
- total = rq->inputsize;
- while (total) {
- unsigned int page_copycnt =
- min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
-
+ for (i = 0, in = rq->in; i < rq->inputsize; i += cnt, ++in) {
+ cnt = min_t(u32, rq->inputsize - i, PAGE_SIZE - *inputmargin);
if (!inpage)
- inpage = kmap_atomic(*in);
- memcpy(tmp, inpage + *inputmargin, page_copycnt);
- kunmap_atomic(inpage);
+ inpage = kmap_local_page(*in);
+ memcpy(src + i, inpage + *inputmargin, cnt);
+ kunmap_local(inpage);
inpage = NULL;
- tmp += page_copycnt;
- total -= page_copycnt;
- ++in;
*inputmargin = 0;
}
*maptype = 2;
return src;
}
-static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq,
- u8 *out)
+/*
+ * Get the exact on-disk size of the compressed data:
+ * - For LZ4, it should apply if the zero_padding feature is on (5.3+);
+ * - For others, zero_padding is enabled all the time.
+ */
+const char *z_erofs_fixup_insize(struct z_erofs_decompress_req *rq,
+ const char *padbuf, unsigned int padbufsize)
+{
+ const char *padend;
+
+ padend = memchr_inv(padbuf, 0, padbufsize);
+ if (!padend)
+ return "compressed data start not found";
+ rq->inputsize -= padend - padbuf;
+ rq->pageofs_in += padend - padbuf;
+ return NULL;
+}
+
+static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, u8 *dst)
{
+ bool support_0padding = false, may_inplace = false;
unsigned int inputmargin;
- u8 *headpage, *src;
- bool support_0padding;
+ u8 *out, *headpage, *src;
+ const char *reason;
int ret, maptype;
DBG_BUGON(*rq->in == NULL);
- headpage = kmap_atomic(*rq->in);
- inputmargin = 0;
- support_0padding = false;
+ headpage = kmap_local_page(*rq->in);
- /* decompression inplace is only safe when 0padding is enabled */
- if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
+ /* LZ4 decompression inplace is only safe if zero_padding is enabled */
+ if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
support_0padding = true;
-
- while (!headpage[inputmargin & ~PAGE_MASK])
- if (!(++inputmargin & ~PAGE_MASK))
- break;
-
- if (inputmargin >= rq->inputsize) {
- kunmap_atomic(headpage);
- return -EIO;
+ reason = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
+ min_t(unsigned int, rq->inputsize,
+ rq->sb->s_blocksize - rq->pageofs_in));
+ if (reason) {
+ kunmap_local(headpage);
+ return IS_ERR(reason) ? PTR_ERR(reason) : -EFSCORRUPTED;
}
+ may_inplace = !((rq->pageofs_in + rq->inputsize) &
+ (rq->sb->s_blocksize - 1));
}
- rq->inputsize -= inputmargin;
- src = z_erofs_lz4_handle_inplace_io(rq, headpage, &inputmargin,
- &maptype, support_0padding);
+ inputmargin = rq->pageofs_in;
+ src = z_erofs_lz4_handle_overlap(rq, headpage, dst, &inputmargin,
+ &maptype, may_inplace);
if (IS_ERR(src))
return PTR_ERR(src);
+ out = dst + rq->pageofs_out;
/* legacy format could compress extra data in a pcluster. */
if (rq->partial_decoding || !support_0padding)
ret = LZ4_decompress_safe_partial(src + inputmargin, out,
@@ -224,134 +236,290 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq,
rq->inputsize, rq->outputsize);
if (ret != rq->outputsize) {
- erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
- ret, rq->inputsize, inputmargin, rq->outputsize);
-
- print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
- 16, 1, src + inputmargin, rq->inputsize, true);
- print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
- 16, 1, out, rq->outputsize, true);
-
if (ret >= 0)
memset(out + ret, 0, rq->outputsize - ret);
- ret = -EIO;
+ ret = -EFSCORRUPTED;
} else {
ret = 0;
}
if (maptype == 0) {
- kunmap_atomic(src);
+ kunmap_local(headpage);
} else if (maptype == 1) {
- vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT);
+ vm_unmap_ram(src, rq->inpages);
} else if (maptype == 2) {
- erofs_put_pcpubuf(src);
- } else {
+ z_erofs_put_gbuf(src);
+ } else if (maptype != 3) {
DBG_BUGON(1);
return -EFAULT;
}
return ret;
}
-static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
- struct page **pagepool)
+static const char *z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pagepool)
{
- const unsigned int nrpages_out =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
unsigned int dst_maptype;
void *dst;
int ret;
/* one optimized fast path only for non bigpcluster cases yet */
- if (rq->inputsize <= PAGE_SIZE && nrpages_out == 1 && !rq->inplace_io) {
+ if (rq->inpages == 1 && rq->outpages == 1 && !rq->inplace_io) {
DBG_BUGON(!*rq->out);
- dst = kmap_atomic(*rq->out);
+ dst = kmap_local_page(*rq->out);
dst_maptype = 0;
- goto dstmap_out;
- }
-
- /* general decoding path which can be used for all cases */
- ret = z_erofs_lz4_prepare_dstpages(rq, pagepool);
- if (ret < 0)
- return ret;
- if (ret) {
- dst = page_address(*rq->out);
- dst_maptype = 1;
- goto dstmap_out;
+ } else {
+ /* general decoding path which can be used for all cases */
+ ret = z_erofs_lz4_prepare_dstpages(rq, pagepool);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ if (ret > 0) {
+ dst = page_address(*rq->out);
+ dst_maptype = 1;
+ } else {
+ dst = erofs_vm_map_ram(rq->out, rq->outpages);
+ if (!dst)
+ return ERR_PTR(-ENOMEM);
+ dst_maptype = 2;
+ }
}
-
- dst = erofs_vm_map_ram(rq->out, nrpages_out);
- if (!dst)
- return -ENOMEM;
- dst_maptype = 2;
-
-dstmap_out:
- ret = z_erofs_lz4_decompress_mem(rq, dst + rq->pageofs_out);
-
+ ret = z_erofs_lz4_decompress_mem(rq, dst);
if (!dst_maptype)
- kunmap_atomic(dst);
+ kunmap_local(dst);
else if (dst_maptype == 2)
- vm_unmap_ram(dst, nrpages_out);
- return ret;
+ vm_unmap_ram(dst, rq->outpages);
+ return ERR_PTR(ret);
}
-static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq,
- struct page **pagepool)
+static const char *z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
+ struct page **pagepool)
{
- const unsigned int nrpages_out =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
- const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
- unsigned char *src, *dst;
-
- if (nrpages_out > 2) {
- DBG_BUGON(1);
- return -EIO;
+ const unsigned int nrpages_in = rq->inpages, nrpages_out = rq->outpages;
+ const unsigned int bs = rq->sb->s_blocksize;
+ unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
+ u8 *kin;
+
+ if (rq->outputsize > rq->inputsize)
+ return ERR_PTR(-EOPNOTSUPP);
+ if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
+ cur = bs - (rq->pageofs_out & (bs - 1));
+ pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
+ cur = min(cur, rq->outputsize);
+ if (cur && rq->out[0]) {
+ kin = kmap_local_page(rq->in[nrpages_in - 1]);
+ if (rq->out[0] == rq->in[nrpages_in - 1])
+ memmove(kin + rq->pageofs_out, kin + pi, cur);
+ else
+ memcpy_to_page(rq->out[0], rq->pageofs_out,
+ kin + pi, cur);
+ kunmap_local(kin);
+ }
+ rq->outputsize -= cur;
}
- if (rq->out[0] == *rq->in) {
- DBG_BUGON(nrpages_out != 1);
- return 0;
+ for (; rq->outputsize; rq->pageofs_in = 0, cur += insz, ni++) {
+ insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
+ rq->outputsize -= insz;
+ if (!rq->in[ni])
+ continue;
+ kin = kmap_local_page(rq->in[ni]);
+ pi = 0;
+ do {
+ no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
+ po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
+ DBG_BUGON(no >= nrpages_out);
+ cnt = min(insz - pi, PAGE_SIZE - po);
+ if (rq->out[no] == rq->in[ni])
+ memmove(kin + po,
+ kin + rq->pageofs_in + pi, cnt);
+ else if (rq->out[no])
+ memcpy_to_page(rq->out[no], po,
+ kin + rq->pageofs_in + pi, cnt);
+ pi += cnt;
+ } while (pi < insz);
+ kunmap_local(kin);
}
+ DBG_BUGON(ni > nrpages_in);
+ return NULL;
+}
- src = kmap_atomic(*rq->in);
- if (rq->out[0]) {
- dst = kmap_atomic(rq->out[0]);
- memcpy(dst + rq->pageofs_out, src, righthalf);
- kunmap_atomic(dst);
+const char *z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx,
+ void **dst, void **src, struct page **pgpl)
+{
+ struct z_erofs_decompress_req *rq = dctx->rq;
+ struct page **pgo, *tmppage;
+ unsigned int j;
+
+ if (!dctx->avail_out) {
+ if (++dctx->no >= rq->outpages || !rq->outputsize)
+ return "insufficient space for decompressed data";
+
+ if (dctx->kout)
+ kunmap_local(dctx->kout);
+ dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out);
+ rq->outputsize -= dctx->avail_out;
+ pgo = &rq->out[dctx->no];
+ if (!*pgo && rq->fillgaps) { /* deduped */
+ *pgo = erofs_allocpage(pgpl, rq->gfp);
+ if (!*pgo) {
+ dctx->kout = NULL;
+ return ERR_PTR(-ENOMEM);
+ }
+ set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE);
+ }
+ if (*pgo) {
+ dctx->kout = kmap_local_page(*pgo);
+ *dst = dctx->kout + rq->pageofs_out;
+ } else {
+ *dst = dctx->kout = NULL;
+ }
+ rq->pageofs_out = 0;
}
- if (nrpages_out == 2) {
- DBG_BUGON(!rq->out[1]);
- if (rq->out[1] == *rq->in) {
- memmove(src, src + righthalf, rq->pageofs_out);
- } else {
- dst = kmap_atomic(rq->out[1]);
- memcpy(dst, src + righthalf, rq->pageofs_out);
- kunmap_atomic(dst);
+ if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) {
+ if (++dctx->ni >= rq->inpages)
+ return "invalid compressed data";
+ if (dctx->kout) /* unlike kmap(), take care of the orders */
+ kunmap_local(dctx->kout);
+ kunmap_local(dctx->kin);
+
+ dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE);
+ rq->inputsize -= dctx->inbuf_sz;
+ dctx->kin = kmap_local_page(rq->in[dctx->ni]);
+ *src = dctx->kin;
+ dctx->bounced = false;
+ if (dctx->kout) {
+ j = (u8 *)*dst - dctx->kout;
+ dctx->kout = kmap_local_page(rq->out[dctx->no]);
+ *dst = dctx->kout + j;
}
+ dctx->inbuf_pos = 0;
}
- kunmap_atomic(src);
- return 0;
+
+ /*
+ * Handle overlapping: Use the given bounce buffer if the input data is
+ * under processing; Or utilize short-lived pages from the on-stack page
+ * pool, where pages are shared among the same request. Note that only
+ * a few inplace I/O pages need to be doubled.
+ */
+ if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) {
+ memcpy(dctx->bounce, *src, dctx->inbuf_sz);
+ *src = dctx->bounce;
+ dctx->bounced = true;
+ }
+
+ for (j = dctx->ni + 1; j < rq->inpages; ++j) {
+ if (rq->out[dctx->no] != rq->in[j])
+ continue;
+ tmppage = erofs_allocpage(pgpl, rq->gfp);
+ if (!tmppage)
+ return ERR_PTR(-ENOMEM);
+ set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
+ copy_highpage(tmppage, rq->in[j]);
+ rq->in[j] = tmppage;
+ }
+ return NULL;
}
-static struct z_erofs_decompressor decompressors[] = {
- [Z_EROFS_COMPRESSION_SHIFTED] = {
- .decompress = z_erofs_shifted_transform,
+const struct z_erofs_decompressor *z_erofs_decomp[] = {
+ [Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) {
+ .decompress = z_erofs_transform_plain,
.name = "shifted"
},
- [Z_EROFS_COMPRESSION_LZ4] = {
+ [Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) {
+ .decompress = z_erofs_transform_plain,
+ .name = "interlaced"
+ },
+ [Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) {
+ .config = z_erofs_load_lz4_config,
.decompress = z_erofs_lz4_decompress,
+ .init = z_erofs_gbuf_init,
+ .exit = z_erofs_gbuf_exit,
.name = "lz4"
},
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
- [Z_EROFS_COMPRESSION_LZMA] = {
- .decompress = z_erofs_lzma_decompress,
- .name = "lzma"
- },
+ [Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp,
+#endif
+#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
+ [Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp,
+#endif
+#ifdef CONFIG_EROFS_FS_ZIP_ZSTD
+ [Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp,
#endif
};
-int z_erofs_decompress(struct z_erofs_decompress_req *rq,
- struct page **pagepool)
+int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+ unsigned int algs, alg;
+ erofs_off_t offset;
+ int size, ret = 0;
+
+ if (!erofs_sb_has_compr_cfgs(sbi)) {
+ sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
+ return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
+ }
+
+ sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
+ if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
+ erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
+ sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
+ return -EOPNOTSUPP;
+ }
+
+ (void)erofs_init_metabuf(&buf, sb, false);
+ offset = EROFS_SUPER_OFFSET + sbi->sb_size;
+ alg = 0;
+ for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
+ const struct z_erofs_decompressor *dec = z_erofs_decomp[alg];
+ void *data;
+
+ if (!(algs & 1))
+ continue;
+
+ data = erofs_read_metadata(sb, &buf, &offset, &size);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ break;
+ }
+
+ if (alg < Z_EROFS_COMPRESSION_MAX && dec && dec->config) {
+ ret = dec->config(sb, dsb, data, size);
+ } else {
+ erofs_err(sb, "algorithm %d isn't enabled on this kernel",
+ alg);
+ ret = -EOPNOTSUPP;
+ }
+ kfree(data);
+ if (ret)
+ break;
+ }
+ erofs_put_metabuf(&buf);
+ return ret;
+}
+
+int __init z_erofs_init_decompressor(void)
{
- return decompressors[rq->alg].decompress(rq, pagepool);
+ int i, err;
+
+ for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) {
+ err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0;
+ if (err) {
+ while (i--)
+ if (z_erofs_decomp[i])
+ z_erofs_decomp[i]->exit();
+ return err;
+ }
+ }
+ return 0;
+}
+
+void z_erofs_exit_decompressor(void)
+{
+ int i;
+
+ for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i)
+ if (z_erofs_decomp[i])
+ z_erofs_decomp[i]->exit();
}
diff --git a/fs/erofs/decompressor_crypto.c b/fs/erofs/decompressor_crypto.c
new file mode 100644
index 000000000000..5ef6f71d3b7f
--- /dev/null
+++ b/fs/erofs/decompressor_crypto.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/scatterlist.h>
+#include <crypto/acompress.h>
+#include "compress.h"
+
+static int __z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq,
+ struct crypto_acomp *tfm)
+{
+ struct sg_table st_src, st_dst;
+ struct acomp_req *req;
+ struct crypto_wait wait;
+ const char *reason;
+ u8 *headpage;
+ int ret;
+
+ headpage = kmap_local_page(*rq->in);
+ reason = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
+ min_t(unsigned int, rq->inputsize,
+ rq->sb->s_blocksize - rq->pageofs_in));
+ kunmap_local(headpage);
+ if (reason)
+ return IS_ERR(reason) ? PTR_ERR(reason) : -EFSCORRUPTED;
+
+ req = acomp_request_alloc(tfm);
+ if (!req)
+ return -ENOMEM;
+
+ ret = sg_alloc_table_from_pages_segment(&st_src, rq->in, rq->inpages,
+ rq->pageofs_in, rq->inputsize, UINT_MAX, GFP_KERNEL);
+ if (ret < 0)
+ goto failed_src_alloc;
+
+ ret = sg_alloc_table_from_pages_segment(&st_dst, rq->out, rq->outpages,
+ rq->pageofs_out, rq->outputsize, UINT_MAX, GFP_KERNEL);
+ if (ret < 0)
+ goto failed_dst_alloc;
+
+ acomp_request_set_params(req, st_src.sgl,
+ st_dst.sgl, rq->inputsize, rq->outputsize);
+
+ crypto_init_wait(&wait);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
+ if (ret) {
+ erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
+ ret, rq->inputsize, rq->pageofs_in, rq->outputsize);
+ ret = -EIO;
+ }
+
+ sg_free_table(&st_dst);
+failed_dst_alloc:
+ sg_free_table(&st_src);
+failed_src_alloc:
+ acomp_request_free(req);
+ return ret;
+}
+
+struct z_erofs_crypto_engine {
+ char *crypto_name;
+ struct crypto_acomp *tfm;
+};
+
+struct z_erofs_crypto_engine *z_erofs_crypto[Z_EROFS_COMPRESSION_MAX] = {
+ [Z_EROFS_COMPRESSION_LZ4] = (struct z_erofs_crypto_engine[]) {
+ {},
+ },
+ [Z_EROFS_COMPRESSION_LZMA] = (struct z_erofs_crypto_engine[]) {
+ {},
+ },
+ [Z_EROFS_COMPRESSION_DEFLATE] = (struct z_erofs_crypto_engine[]) {
+ { .crypto_name = "qat_deflate", },
+ {},
+ },
+ [Z_EROFS_COMPRESSION_ZSTD] = (struct z_erofs_crypto_engine[]) {
+ {},
+ },
+};
+static DECLARE_RWSEM(z_erofs_crypto_rwsem);
+
+static struct crypto_acomp *z_erofs_crypto_get_engine(int alg)
+{
+ struct z_erofs_crypto_engine *e;
+
+ for (e = z_erofs_crypto[alg]; e->crypto_name; ++e)
+ if (e->tfm)
+ return e->tfm;
+ return NULL;
+}
+
+int z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
+{
+ struct crypto_acomp *tfm;
+ int i, err;
+
+ down_read(&z_erofs_crypto_rwsem);
+ tfm = z_erofs_crypto_get_engine(rq->alg);
+ if (!tfm) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ for (i = 0; i < rq->outpages; i++) {
+ struct page *const page = rq->out[i];
+ struct page *victim;
+
+ if (!page) {
+ victim = __erofs_allocpage(pgpl, rq->gfp, true);
+ if (!victim) {
+ err = -ENOMEM;
+ goto out;
+ }
+ set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
+ rq->out[i] = victim;
+ }
+ }
+ err = __z_erofs_crypto_decompress(rq, tfm);
+out:
+ up_read(&z_erofs_crypto_rwsem);
+ return err;
+}
+
+int z_erofs_crypto_enable_engine(const char *name, int len)
+{
+ struct z_erofs_crypto_engine *e;
+ struct crypto_acomp *tfm;
+ int alg;
+
+ down_write(&z_erofs_crypto_rwsem);
+ for (alg = 0; alg < Z_EROFS_COMPRESSION_MAX; ++alg) {
+ for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) {
+ if (!strncmp(name, e->crypto_name, len)) {
+ if (e->tfm)
+ break;
+ tfm = crypto_alloc_acomp(e->crypto_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ up_write(&z_erofs_crypto_rwsem);
+ return -EOPNOTSUPP;
+ }
+ e->tfm = tfm;
+ break;
+ }
+ }
+ }
+ up_write(&z_erofs_crypto_rwsem);
+ return 0;
+}
+
+void z_erofs_crypto_disable_all_engines(void)
+{
+ struct z_erofs_crypto_engine *e;
+ int alg;
+
+ down_write(&z_erofs_crypto_rwsem);
+ for (alg = 0; alg < Z_EROFS_COMPRESSION_MAX; ++alg) {
+ for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) {
+ if (!e->tfm)
+ continue;
+ crypto_free_acomp(e->tfm);
+ e->tfm = NULL;
+ }
+ }
+ up_write(&z_erofs_crypto_rwsem);
+}
+
+int z_erofs_crypto_show_engines(char *buf, int size, char sep)
+{
+ struct z_erofs_crypto_engine *e;
+ int alg, len = 0;
+
+ for (alg = 0; alg < Z_EROFS_COMPRESSION_MAX; ++alg) {
+ for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) {
+ if (!e->tfm)
+ continue;
+ len += scnprintf(buf + len, size - len, "%s%c",
+ e->crypto_name, sep);
+ }
+ }
+ return len;
+}
diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c
new file mode 100644
index 000000000000..3fb73000ed27
--- /dev/null
+++ b/fs/erofs/decompressor_deflate.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/zlib.h>
+#include "compress.h"
+
+struct z_erofs_deflate {
+ struct z_erofs_deflate *next;
+ struct z_stream_s z;
+ u8 bounce[PAGE_SIZE];
+};
+
+static DEFINE_SPINLOCK(z_erofs_deflate_lock);
+static unsigned int z_erofs_deflate_nstrms, z_erofs_deflate_avail_strms;
+static struct z_erofs_deflate *z_erofs_deflate_head;
+static DECLARE_WAIT_QUEUE_HEAD(z_erofs_deflate_wq);
+
+module_param_named(deflate_streams, z_erofs_deflate_nstrms, uint, 0444);
+
+static void z_erofs_deflate_exit(void)
+{
+ /* there should be no running fs instance */
+ while (z_erofs_deflate_avail_strms) {
+ struct z_erofs_deflate *strm;
+
+ spin_lock(&z_erofs_deflate_lock);
+ strm = z_erofs_deflate_head;
+ if (!strm) {
+ spin_unlock(&z_erofs_deflate_lock);
+ continue;
+ }
+ z_erofs_deflate_head = NULL;
+ spin_unlock(&z_erofs_deflate_lock);
+
+ while (strm) {
+ struct z_erofs_deflate *n = strm->next;
+
+ vfree(strm->z.workspace);
+ kfree(strm);
+ --z_erofs_deflate_avail_strms;
+ strm = n;
+ }
+ }
+}
+
+static int __init z_erofs_deflate_init(void)
+{
+ /* by default, use # of possible CPUs instead */
+ if (!z_erofs_deflate_nstrms)
+ z_erofs_deflate_nstrms = num_possible_cpus();
+ return 0;
+}
+
+static int z_erofs_load_deflate_config(struct super_block *sb,
+ struct erofs_super_block *dsb, void *data, int size)
+{
+ struct z_erofs_deflate_cfgs *dfl = data;
+ static DEFINE_MUTEX(deflate_resize_mutex);
+ static bool inited;
+
+ if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) {
+ erofs_err(sb, "invalid deflate cfgs, size=%u", size);
+ return -EINVAL;
+ }
+
+ if (dfl->windowbits > MAX_WBITS) {
+ erofs_err(sb, "unsupported windowbits %u", dfl->windowbits);
+ return -EOPNOTSUPP;
+ }
+ mutex_lock(&deflate_resize_mutex);
+ if (!inited) {
+ for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
+ ++z_erofs_deflate_avail_strms) {
+ struct z_erofs_deflate *strm;
+
+ strm = kzalloc(sizeof(*strm), GFP_KERNEL);
+ if (!strm)
+ goto failed;
+ /* XXX: in-kernel zlib cannot customize windowbits */
+ strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
+ if (!strm->z.workspace) {
+ kfree(strm);
+ goto failed;
+ }
+
+ spin_lock(&z_erofs_deflate_lock);
+ strm->next = z_erofs_deflate_head;
+ z_erofs_deflate_head = strm;
+ spin_unlock(&z_erofs_deflate_lock);
+ }
+ inited = true;
+ }
+ mutex_unlock(&deflate_resize_mutex);
+ erofs_info(sb, "EXPERIMENTAL DEFLATE feature in use. Use at your own risk!");
+ return 0;
+failed:
+ mutex_unlock(&deflate_resize_mutex);
+ z_erofs_deflate_exit();
+ return -ENOMEM;
+}
+
+static const char *__z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
+{
+ struct super_block *sb = rq->sb;
+ struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 };
+ struct z_erofs_deflate *strm;
+ const char *reason;
+ int zerr;
+
+ /* 1. get the exact DEFLATE compressed size */
+ dctx.kin = kmap_local_page(*rq->in);
+ reason = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
+ min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
+ if (reason) {
+ kunmap_local(dctx.kin);
+ return reason;
+ }
+
+ /* 2. get an available DEFLATE context */
+again:
+ spin_lock(&z_erofs_deflate_lock);
+ strm = z_erofs_deflate_head;
+ if (!strm) {
+ spin_unlock(&z_erofs_deflate_lock);
+ wait_event(z_erofs_deflate_wq, READ_ONCE(z_erofs_deflate_head));
+ goto again;
+ }
+ z_erofs_deflate_head = strm->next;
+ spin_unlock(&z_erofs_deflate_lock);
+
+ /* 3. multi-call decompress */
+ zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS);
+ if (zerr != Z_OK) {
+ reason = ERR_PTR(-EINVAL);
+ goto failed_zinit;
+ }
+
+ rq->fillgaps = true; /* DEFLATE doesn't support NULL output buffer */
+ strm->z.avail_in = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in);
+ rq->inputsize -= strm->z.avail_in;
+ strm->z.next_in = dctx.kin + rq->pageofs_in;
+ strm->z.avail_out = 0;
+ dctx.bounce = strm->bounce;
+
+ while (1) {
+ dctx.avail_out = strm->z.avail_out;
+ dctx.inbuf_sz = strm->z.avail_in;
+ reason = z_erofs_stream_switch_bufs(&dctx,
+ (void **)&strm->z.next_out,
+ (void **)&strm->z.next_in, pgpl);
+ if (reason)
+ break;
+ strm->z.avail_out = dctx.avail_out;
+ strm->z.avail_in = dctx.inbuf_sz;
+
+ zerr = zlib_inflate(&strm->z, Z_SYNC_FLUSH);
+ if (zerr != Z_OK || !(rq->outputsize + strm->z.avail_out)) {
+ if (zerr == Z_OK && rq->partial_decoding)
+ break;
+ if (zerr == Z_STREAM_END && !rq->outputsize)
+ break;
+ reason = (zerr == Z_DATA_ERROR ?
+ "corrupted compressed data" :
+ "unexpected end of stream");
+ break;
+ }
+ }
+ if (zlib_inflateEnd(&strm->z) != Z_OK && !reason)
+ reason = ERR_PTR(-EIO);
+ if (dctx.kout)
+ kunmap_local(dctx.kout);
+failed_zinit:
+ kunmap_local(dctx.kin);
+ /* 4. push back DEFLATE stream context to the global list */
+ spin_lock(&z_erofs_deflate_lock);
+ strm->next = z_erofs_deflate_head;
+ z_erofs_deflate_head = strm;
+ spin_unlock(&z_erofs_deflate_lock);
+ wake_up(&z_erofs_deflate_wq);
+ return reason;
+}
+
+static const char *z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
+{
+#ifdef CONFIG_EROFS_FS_ZIP_ACCEL
+ int err;
+
+ if (!rq->partial_decoding) {
+ err = z_erofs_crypto_decompress(rq, pgpl);
+ if (err != -EOPNOTSUPP)
+ return ERR_PTR(err);
+
+ }
+#endif
+ return __z_erofs_deflate_decompress(rq, pgpl);
+}
+
+const struct z_erofs_decompressor z_erofs_deflate_decomp = {
+ .config = z_erofs_load_deflate_config,
+ .decompress = z_erofs_deflate_decompress,
+ .init = z_erofs_deflate_init,
+ .exit = z_erofs_deflate_exit,
+ .name = "deflate",
+};
diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
index 50045510a1f4..b4ea6978faae 100644
--- a/fs/erofs/decompressor_lzma.c
+++ b/fs/erofs/decompressor_lzma.c
@@ -1,12 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/xz.h>
-#include <linux/module.h>
#include "compress.h"
struct z_erofs_lzma {
struct z_erofs_lzma *next;
struct xz_dec_microlzma *state;
- struct xz_buf buf;
u8 bounce[PAGE_SIZE];
};
@@ -19,7 +17,7 @@ static DECLARE_WAIT_QUEUE_HEAD(z_erofs_lzma_wq);
module_param_named(lzma_streams, z_erofs_lzma_nstrms, uint, 0444);
-void z_erofs_lzma_exit(void)
+static void z_erofs_lzma_exit(void)
{
/* there should be no running fs instance */
while (z_erofs_lzma_avail_strms) {
@@ -47,7 +45,7 @@ void z_erofs_lzma_exit(void)
}
}
-int z_erofs_lzma_init(void)
+static int __init z_erofs_lzma_init(void)
{
unsigned int i;
@@ -71,11 +69,11 @@ int z_erofs_lzma_init(void)
return 0;
}
-int z_erofs_load_lzma_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_lzma_cfgs *lzma, int size)
+static int z_erofs_load_lzma_config(struct super_block *sb,
+ struct erofs_super_block *dsb, void *data, int size)
{
static DEFINE_MUTEX(lzma_resize_mutex);
+ struct z_erofs_lzma_cfgs *lzma = data;
unsigned int dict_size, i;
struct z_erofs_lzma *strm, *head = NULL;
int err;
@@ -96,8 +94,6 @@ int z_erofs_load_lzma_config(struct super_block *sb,
return -EINVAL;
}
- erofs_info(sb, "EXPERIMENTAL MicroLZMA in use. Use at your own risk!");
-
/* in case 2 z_erofs_load_lzma_config() race to avoid deadlock */
mutex_lock(&lzma_resize_mutex);
@@ -143,37 +139,31 @@ again:
DBG_BUGON(z_erofs_lzma_head);
z_erofs_lzma_head = head;
spin_unlock(&z_erofs_lzma_lock);
+ wake_up_all(&z_erofs_lzma_wq);
z_erofs_lzma_max_dictsize = dict_size;
mutex_unlock(&lzma_resize_mutex);
return err;
}
-int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
- struct page **pagepool)
+static const char *z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
{
- const unsigned int nrpages_out =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
- const unsigned int nrpages_in =
- PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
- unsigned int inputmargin, inlen, outlen, pageofs;
+ struct super_block *sb = rq->sb;
+ struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 };
+ struct xz_buf buf = {};
struct z_erofs_lzma *strm;
- u8 *kin;
- bool bounced = false;
- int no, ni, j, err = 0;
+ enum xz_ret xz_err;
+ const char *reason;
/* 1. get the exact LZMA compressed size */
- kin = kmap(*rq->in);
- inputmargin = 0;
- while (!kin[inputmargin & ~PAGE_MASK])
- if (!(++inputmargin & ~PAGE_MASK))
- break;
-
- if (inputmargin >= PAGE_SIZE) {
- kunmap(*rq->in);
- return -EFSCORRUPTED;
+ dctx.kin = kmap_local_page(*rq->in);
+ reason = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
+ min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
+ if (reason) {
+ kunmap_local(dctx.kin);
+ return reason;
}
- rq->inputsize -= inputmargin;
/* 2. get an available lzma context */
again:
@@ -188,103 +178,58 @@ again:
spin_unlock(&z_erofs_lzma_lock);
/* 3. multi-call decompress */
- inlen = rq->inputsize;
- outlen = rq->outputsize;
- xz_dec_microlzma_reset(strm->state, inlen, outlen,
+ xz_dec_microlzma_reset(strm->state, rq->inputsize, rq->outputsize,
!rq->partial_decoding);
- pageofs = rq->pageofs_out;
- strm->buf.in = kin + inputmargin;
- strm->buf.in_pos = 0;
- strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE - inputmargin);
- inlen -= strm->buf.in_size;
- strm->buf.out = NULL;
- strm->buf.out_pos = 0;
- strm->buf.out_size = 0;
-
- for (ni = 0, no = -1;;) {
- enum xz_ret xz_err;
-
- if (strm->buf.out_pos == strm->buf.out_size) {
- if (strm->buf.out) {
- kunmap(rq->out[no]);
- strm->buf.out = NULL;
- }
-
- if (++no >= nrpages_out || !outlen) {
- erofs_err(rq->sb, "decompressed buf out of bound");
- err = -EFSCORRUPTED;
- break;
- }
- strm->buf.out_pos = 0;
- strm->buf.out_size = min_t(u32, outlen,
- PAGE_SIZE - pageofs);
- outlen -= strm->buf.out_size;
- if (rq->out[no])
- strm->buf.out = kmap(rq->out[no]) + pageofs;
- pageofs = 0;
- } else if (strm->buf.in_pos == strm->buf.in_size) {
- kunmap(rq->in[ni]);
-
- if (++ni >= nrpages_in || !inlen) {
- erofs_err(rq->sb, "compressed buf out of bound");
- err = -EFSCORRUPTED;
- break;
- }
- strm->buf.in_pos = 0;
- strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE);
- inlen -= strm->buf.in_size;
- kin = kmap(rq->in[ni]);
- strm->buf.in = kin;
- bounced = false;
- }
+ buf.in_size = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in);
+ rq->inputsize -= buf.in_size;
+ buf.in = dctx.kin + rq->pageofs_in;
+ dctx.bounce = strm->bounce;
+ do {
+ dctx.avail_out = buf.out_size - buf.out_pos;
+ dctx.inbuf_sz = buf.in_size;
+ dctx.inbuf_pos = buf.in_pos;
+ reason = z_erofs_stream_switch_bufs(&dctx, (void **)&buf.out,
+ (void **)&buf.in, pgpl);
+ if (reason)
+ break;
- /*
- * Handle overlapping: Use bounced buffer if the compressed
- * data is under processing; Otherwise, Use short-lived pages
- * from the on-stack pagepool where pages share with the same
- * request.
- */
- if (!bounced && rq->out[no] == rq->in[ni]) {
- memcpy(strm->bounce, strm->buf.in, strm->buf.in_size);
- strm->buf.in = strm->bounce;
- bounced = true;
+ if (buf.out_size == buf.out_pos) {
+ buf.out_size = dctx.avail_out;
+ buf.out_pos = 0;
}
- for (j = ni + 1; j < nrpages_in; ++j) {
- struct page *tmppage;
+ buf.in_size = dctx.inbuf_sz;
+ buf.in_pos = dctx.inbuf_pos;
- if (rq->out[no] != rq->in[j])
- continue;
-
- DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb),
- rq->in[j]));
- tmppage = erofs_allocpage(pagepool,
- GFP_KERNEL | __GFP_NOFAIL);
- set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
- copy_highpage(tmppage, rq->in[j]);
- rq->in[j] = tmppage;
- }
- xz_err = xz_dec_microlzma_run(strm->state, &strm->buf);
- DBG_BUGON(strm->buf.out_pos > strm->buf.out_size);
- DBG_BUGON(strm->buf.in_pos > strm->buf.in_size);
+ xz_err = xz_dec_microlzma_run(strm->state, &buf);
+ DBG_BUGON(buf.out_pos > buf.out_size);
+ DBG_BUGON(buf.in_pos > buf.in_size);
if (xz_err != XZ_OK) {
- if (xz_err == XZ_STREAM_END && !outlen)
+ if (xz_err == XZ_STREAM_END && !rq->outputsize)
break;
- erofs_err(rq->sb, "failed to decompress %d in[%u] out[%u]",
- xz_err, rq->inputsize, rq->outputsize);
- err = -EFSCORRUPTED;
+ reason = (xz_err == XZ_DATA_ERROR ?
+ "corrupted compressed data" :
+ "unexpected end of stream");
break;
}
- }
- if (no < nrpages_out && strm->buf.out)
- kunmap(rq->in[no]);
- if (ni < nrpages_in)
- kunmap(rq->in[ni]);
+ } while (1);
+
+ if (dctx.kout)
+ kunmap_local(dctx.kout);
+ kunmap_local(dctx.kin);
/* 4. push back LZMA stream context to the global list */
spin_lock(&z_erofs_lzma_lock);
strm->next = z_erofs_lzma_head;
z_erofs_lzma_head = strm;
spin_unlock(&z_erofs_lzma_lock);
wake_up(&z_erofs_lzma_wq);
- return err;
+ return reason;
}
+
+const struct z_erofs_decompressor z_erofs_lzma_decomp = {
+ .config = z_erofs_load_lzma_config,
+ .decompress = z_erofs_lzma_decompress,
+ .init = z_erofs_lzma_init,
+ .exit = z_erofs_lzma_exit,
+ .name = "lzma"
+};
diff --git a/fs/erofs/decompressor_zstd.c b/fs/erofs/decompressor_zstd.c
new file mode 100644
index 000000000000..beae49165c69
--- /dev/null
+++ b/fs/erofs/decompressor_zstd.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/zstd.h>
+#include "compress.h"
+
+struct z_erofs_zstd {
+ struct z_erofs_zstd *next;
+ u8 bounce[PAGE_SIZE];
+ void *wksp;
+ unsigned int wkspsz;
+};
+
+static DEFINE_SPINLOCK(z_erofs_zstd_lock);
+static unsigned int z_erofs_zstd_max_dictsize;
+static unsigned int z_erofs_zstd_nstrms, z_erofs_zstd_avail_strms;
+static struct z_erofs_zstd *z_erofs_zstd_head;
+static DECLARE_WAIT_QUEUE_HEAD(z_erofs_zstd_wq);
+
+module_param_named(zstd_streams, z_erofs_zstd_nstrms, uint, 0444);
+
+static struct z_erofs_zstd *z_erofs_isolate_strms(bool all)
+{
+ struct z_erofs_zstd *strm;
+
+again:
+ spin_lock(&z_erofs_zstd_lock);
+ strm = z_erofs_zstd_head;
+ if (!strm) {
+ spin_unlock(&z_erofs_zstd_lock);
+ wait_event(z_erofs_zstd_wq, READ_ONCE(z_erofs_zstd_head));
+ goto again;
+ }
+ z_erofs_zstd_head = all ? NULL : strm->next;
+ spin_unlock(&z_erofs_zstd_lock);
+ return strm;
+}
+
+static void z_erofs_zstd_exit(void)
+{
+ while (z_erofs_zstd_avail_strms) {
+ struct z_erofs_zstd *strm, *n;
+
+ for (strm = z_erofs_isolate_strms(true); strm; strm = n) {
+ n = strm->next;
+
+ kvfree(strm->wksp);
+ kfree(strm);
+ --z_erofs_zstd_avail_strms;
+ }
+ }
+}
+
+static int __init z_erofs_zstd_init(void)
+{
+ /* by default, use # of possible CPUs instead */
+ if (!z_erofs_zstd_nstrms)
+ z_erofs_zstd_nstrms = num_possible_cpus();
+
+ for (; z_erofs_zstd_avail_strms < z_erofs_zstd_nstrms;
+ ++z_erofs_zstd_avail_strms) {
+ struct z_erofs_zstd *strm;
+
+ strm = kzalloc(sizeof(*strm), GFP_KERNEL);
+ if (!strm) {
+ z_erofs_zstd_exit();
+ return -ENOMEM;
+ }
+ spin_lock(&z_erofs_zstd_lock);
+ strm->next = z_erofs_zstd_head;
+ z_erofs_zstd_head = strm;
+ spin_unlock(&z_erofs_zstd_lock);
+ }
+ return 0;
+}
+
+static int z_erofs_load_zstd_config(struct super_block *sb,
+ struct erofs_super_block *dsb, void *data, int size)
+{
+ static DEFINE_MUTEX(zstd_resize_mutex);
+ struct z_erofs_zstd_cfgs *zstd = data;
+ unsigned int dict_size, wkspsz;
+ struct z_erofs_zstd *strm, *head = NULL;
+ void *wksp;
+
+ if (!zstd || size < sizeof(struct z_erofs_zstd_cfgs) || zstd->format) {
+ erofs_err(sb, "unsupported zstd format, size=%u", size);
+ return -EINVAL;
+ }
+
+ if (zstd->windowlog > ilog2(Z_EROFS_ZSTD_MAX_DICT_SIZE) - 10) {
+ erofs_err(sb, "unsupported zstd window log %u", zstd->windowlog);
+ return -EINVAL;
+ }
+ dict_size = 1U << (zstd->windowlog + 10);
+
+ /* in case 2 z_erofs_load_zstd_config() race to avoid deadlock */
+ mutex_lock(&zstd_resize_mutex);
+ if (z_erofs_zstd_max_dictsize >= dict_size) {
+ mutex_unlock(&zstd_resize_mutex);
+ return 0;
+ }
+
+ /* 1. collect/isolate all streams for the following check */
+ while (z_erofs_zstd_avail_strms) {
+ struct z_erofs_zstd *n;
+
+ for (strm = z_erofs_isolate_strms(true); strm; strm = n) {
+ n = strm->next;
+ strm->next = head;
+ head = strm;
+ --z_erofs_zstd_avail_strms;
+ }
+ }
+
+ /* 2. walk each isolated stream and grow max dict_size if needed */
+ wkspsz = zstd_dstream_workspace_bound(dict_size);
+ for (strm = head; strm; strm = strm->next) {
+ wksp = kvmalloc(wkspsz, GFP_KERNEL);
+ if (!wksp)
+ break;
+ kvfree(strm->wksp);
+ strm->wksp = wksp;
+ strm->wkspsz = wkspsz;
+ }
+
+ /* 3. push back all to the global list and update max dict_size */
+ spin_lock(&z_erofs_zstd_lock);
+ DBG_BUGON(z_erofs_zstd_head);
+ z_erofs_zstd_head = head;
+ spin_unlock(&z_erofs_zstd_lock);
+ z_erofs_zstd_avail_strms = z_erofs_zstd_nstrms;
+ wake_up_all(&z_erofs_zstd_wq);
+ if (!strm)
+ z_erofs_zstd_max_dictsize = dict_size;
+ mutex_unlock(&zstd_resize_mutex);
+ return strm ? -ENOMEM : 0;
+}
+
+static const char *z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
+{
+ struct super_block *sb = rq->sb;
+ struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 };
+ zstd_in_buffer in_buf = { NULL, 0, 0 };
+ zstd_out_buffer out_buf = { NULL, 0, 0 };
+ struct z_erofs_zstd *strm;
+ zstd_dstream *stream;
+ const char *reason;
+ int zerr;
+
+ /* 1. get the exact compressed size */
+ dctx.kin = kmap_local_page(*rq->in);
+ reason = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
+ min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
+ if (reason) {
+ kunmap_local(dctx.kin);
+ return reason;
+ }
+
+ /* 2. get an available ZSTD context */
+ strm = z_erofs_isolate_strms(false);
+
+ /* 3. multi-call decompress */
+ stream = zstd_init_dstream(z_erofs_zstd_max_dictsize, strm->wksp, strm->wkspsz);
+ if (!stream) {
+ reason = ERR_PTR(-ENOMEM);
+ goto failed_zinit;
+ }
+
+ rq->fillgaps = true; /* ZSTD doesn't support NULL output buffer */
+ in_buf.size = min_t(u32, rq->inputsize, PAGE_SIZE - rq->pageofs_in);
+ rq->inputsize -= in_buf.size;
+ in_buf.src = dctx.kin + rq->pageofs_in;
+ dctx.bounce = strm->bounce;
+
+ do {
+ dctx.inbuf_sz = in_buf.size;
+ dctx.inbuf_pos = in_buf.pos;
+ reason = z_erofs_stream_switch_bufs(&dctx, &out_buf.dst,
+ (void **)&in_buf.src, pgpl);
+ if (reason)
+ break;
+
+ if (out_buf.size == out_buf.pos) {
+ out_buf.size = dctx.avail_out;
+ out_buf.pos = 0;
+ }
+ in_buf.size = dctx.inbuf_sz;
+ in_buf.pos = dctx.inbuf_pos;
+
+ zerr = zstd_decompress_stream(stream, &out_buf, &in_buf);
+ dctx.avail_out = out_buf.size - out_buf.pos;
+ if (zstd_is_error(zerr) ||
+ ((rq->outputsize + dctx.avail_out) && (!zerr || (zerr > 0 &&
+ !(rq->inputsize + in_buf.size - in_buf.pos))))) {
+ reason = zstd_is_error(zerr) ? zstd_get_error_name(zerr) :
+ "unexpected end of stream";
+ break;
+ }
+ } while (rq->outputsize + dctx.avail_out);
+
+ if (dctx.kout)
+ kunmap_local(dctx.kout);
+failed_zinit:
+ kunmap_local(dctx.kin);
+ /* 4. push back ZSTD stream context to the global list */
+ spin_lock(&z_erofs_zstd_lock);
+ strm->next = z_erofs_zstd_head;
+ z_erofs_zstd_head = strm;
+ spin_unlock(&z_erofs_zstd_lock);
+ wake_up(&z_erofs_zstd_wq);
+ return reason;
+}
+
+const struct z_erofs_decompressor z_erofs_zstd_decomp = {
+ .config = z_erofs_load_zstd_config,
+ .decompress = z_erofs_zstd_decompress,
+ .init = z_erofs_zstd_init,
+ .exit = z_erofs_zstd_exit,
+ .name = "zstd",
+};
diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c
index eee9b0b31b63..32b4f5aa60c9 100644
--- a/fs/erofs/dir.c
+++ b/fs/erofs/dir.c
@@ -2,40 +2,21 @@
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
+ * Copyright (C) 2022, Alibaba Cloud
*/
#include "internal.h"
-static void debug_one_dentry(unsigned char d_type, const char *de_name,
- unsigned int de_namelen)
-{
-#ifdef CONFIG_EROFS_FS_DEBUG
- /* since the on-disk name could not have the trailing '\0' */
- unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
-
- memcpy(dbg_namebuf, de_name, de_namelen);
- dbg_namebuf[de_namelen] = '\0';
-
- erofs_dbg("found dirent %s de_len %u d_type %d", dbg_namebuf,
- de_namelen, d_type);
-#endif
-}
-
static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
- void *dentry_blk, unsigned int *ofs,
- unsigned int nameoff, unsigned int maxsize)
+ void *dentry_blk, struct erofs_dirent *de,
+ unsigned int nameoff0, unsigned int maxsize)
{
- struct erofs_dirent *de = dentry_blk + *ofs;
- const struct erofs_dirent *end = dentry_blk + nameoff;
+ const struct erofs_dirent *end = dentry_blk + nameoff0;
while (de < end) {
- const char *de_name;
+ unsigned char d_type = fs_ftype_to_dtype(de->file_type);
+ unsigned int nameoff = le16_to_cpu(de->nameoff);
+ const char *de_name = (char *)dentry_blk + nameoff;
unsigned int de_namelen;
- unsigned char d_type;
-
- d_type = fs_ftype_to_dtype(de->file_type);
-
- nameoff = le16_to_cpu(de->nameoff);
- de_name = (char *)dentry_blk + nameoff;
/* the last dirent in the block? */
if (de + 1 >= end)
@@ -52,83 +33,88 @@ static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
return -EFSCORRUPTED;
}
- debug_one_dentry(d_type, de_name, de_namelen);
if (!dir_emit(ctx, de_name, de_namelen,
- le64_to_cpu(de->nid), d_type))
- /* stopped by some reason */
+ erofs_nid_to_ino64(EROFS_SB(dir->i_sb),
+ le64_to_cpu(de->nid)), d_type))
return 1;
++de;
- *ofs += sizeof(struct erofs_dirent);
+ ctx->pos += sizeof(struct erofs_dirent);
}
- *ofs = maxsize;
return 0;
}
static int erofs_readdir(struct file *f, struct dir_context *ctx)
{
struct inode *dir = file_inode(f);
- struct address_space *mapping = dir->i_mapping;
- const size_t dirsize = i_size_read(dir);
- unsigned int i = ctx->pos / EROFS_BLKSIZ;
- unsigned int ofs = ctx->pos % EROFS_BLKSIZ;
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+ struct super_block *sb = dir->i_sb;
+ struct file_ra_state *ra = &f->f_ra;
+ unsigned long bsz = sb->s_blocksize;
+ unsigned int ofs = erofs_blkoff(sb, ctx->pos);
+ pgoff_t ra_pages = DIV_ROUND_UP_POW2(
+ EROFS_I_SB(dir)->dir_ra_bytes, PAGE_SIZE);
+ pgoff_t nr_pages = DIV_ROUND_UP_POW2(dir->i_size, PAGE_SIZE);
int err = 0;
bool initial = true;
- while (ctx->pos < dirsize) {
- struct page *dentry_page;
+ buf.mapping = dir->i_mapping;
+ while (ctx->pos < dir->i_size) {
+ erofs_off_t dbstart = ctx->pos - ofs;
struct erofs_dirent *de;
unsigned int nameoff, maxsize;
- dentry_page = read_mapping_page(mapping, i, NULL);
- if (dentry_page == ERR_PTR(-ENOMEM)) {
- err = -ENOMEM;
- break;
- } else if (IS_ERR(dentry_page)) {
- erofs_err(dir->i_sb,
- "fail to readdir of logical block %u of nid %llu",
- i, EROFS_I(dir)->nid);
- err = -EFSCORRUPTED;
+ if (fatal_signal_pending(current)) {
+ err = -ERESTARTSYS;
break;
}
- de = (struct erofs_dirent *)kmap(dentry_page);
+ /* readahead blocks to enhance performance for large directories */
+ if (ra_pages) {
+ pgoff_t idx = DIV_ROUND_UP_POW2(ctx->pos, PAGE_SIZE);
+ pgoff_t pages = min(nr_pages - idx, ra_pages);
- nameoff = le16_to_cpu(de->nameoff);
+ if (pages > 1 && !ra_has_index(ra, idx))
+ page_cache_sync_readahead(dir->i_mapping, ra,
+ f, idx, pages);
+ }
- if (nameoff < sizeof(struct erofs_dirent) ||
- nameoff >= PAGE_SIZE) {
- erofs_err(dir->i_sb,
- "invalid de[0].nameoff %u @ nid %llu",
+ de = erofs_bread(&buf, dbstart, true);
+ if (IS_ERR(de)) {
+ erofs_err(sb, "failed to readdir of logical block %llu of nid %llu",
+ erofs_blknr(sb, dbstart), EROFS_I(dir)->nid);
+ err = PTR_ERR(de);
+ break;
+ }
+
+ nameoff = le16_to_cpu(de->nameoff);
+ if (nameoff < sizeof(struct erofs_dirent) || nameoff >= bsz) {
+ erofs_err(sb, "invalid de[0].nameoff %u @ nid %llu",
nameoff, EROFS_I(dir)->nid);
err = -EFSCORRUPTED;
- goto skip_this;
+ break;
}
- maxsize = min_t(unsigned int,
- dirsize - ctx->pos + ofs, PAGE_SIZE);
-
+ maxsize = min_t(unsigned int, dir->i_size - dbstart, bsz);
/* search dirents at the arbitrary position */
if (initial) {
initial = false;
-
ofs = roundup(ofs, sizeof(struct erofs_dirent));
- if (ofs >= nameoff)
- goto skip_this;
+ ctx->pos = dbstart + ofs;
}
- err = erofs_fill_dentries(dir, ctx, de, &ofs,
+ err = erofs_fill_dentries(dir, ctx, de, (void *)de + ofs,
nameoff, maxsize);
-skip_this:
- kunmap(dentry_page);
-
- put_page(dentry_page);
-
- ctx->pos = blknr_to_addr(i) + ofs;
-
if (err)
break;
- ++i;
+ ctx->pos = dbstart + maxsize;
ofs = 0;
+ cond_resched();
+ }
+ erofs_put_metabuf(&buf);
+ if (EROFS_I(dir)->dot_omitted && ctx->pos == dir->i_size) {
+ if (!dir_emit_dot(f, ctx))
+ return 0;
+ ++ctx->pos;
}
return err < 0 ? err : 0;
}
@@ -137,4 +123,8 @@ const struct file_operations erofs_dir_fops = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = erofs_readdir,
+ .unlocked_ioctl = erofs_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = erofs_compat_ioctl,
+#endif
};
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
index 083997a034e5..e24268acdd62 100644
--- a/fs/erofs/erofs_fs.h
+++ b/fs/erofs/erofs_fs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* EROFS (Enhanced ROM File System) on-disk format definition
*
@@ -9,55 +9,62 @@
#ifndef __EROFS_FS_H
#define __EROFS_FS_H
+/* to allow for x86 boot sectors and other oddities. */
#define EROFS_SUPER_OFFSET 1024
-#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
+#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
+#define EROFS_FEATURE_COMPAT_MTIME 0x00000002
+#define EROFS_FEATURE_COMPAT_XATTR_FILTER 0x00000004
+#define EROFS_FEATURE_COMPAT_SHARED_EA_IN_METABOX 0x00000008
+#define EROFS_FEATURE_COMPAT_PLAIN_XATTR_PFX 0x00000010
+
/*
* Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should
* be incompatible with this kernel version.
*/
-#define EROFS_FEATURE_INCOMPAT_LZ4_0PADDING 0x00000001
+#define EROFS_FEATURE_INCOMPAT_ZERO_PADDING 0x00000001
#define EROFS_FEATURE_INCOMPAT_COMPR_CFGS 0x00000002
#define EROFS_FEATURE_INCOMPAT_BIG_PCLUSTER 0x00000002
#define EROFS_FEATURE_INCOMPAT_CHUNKED_FILE 0x00000004
#define EROFS_FEATURE_INCOMPAT_DEVICE_TABLE 0x00000008
#define EROFS_FEATURE_INCOMPAT_COMPR_HEAD2 0x00000008
+#define EROFS_FEATURE_INCOMPAT_ZTAILPACKING 0x00000010
+#define EROFS_FEATURE_INCOMPAT_FRAGMENTS 0x00000020
+#define EROFS_FEATURE_INCOMPAT_DEDUPE 0x00000020
+#define EROFS_FEATURE_INCOMPAT_XATTR_PREFIXES 0x00000040
+#define EROFS_FEATURE_INCOMPAT_48BIT 0x00000080
+#define EROFS_FEATURE_INCOMPAT_METABOX 0x00000100
#define EROFS_ALL_FEATURE_INCOMPAT \
- (EROFS_FEATURE_INCOMPAT_LZ4_0PADDING | \
- EROFS_FEATURE_INCOMPAT_COMPR_CFGS | \
- EROFS_FEATURE_INCOMPAT_BIG_PCLUSTER | \
- EROFS_FEATURE_INCOMPAT_CHUNKED_FILE | \
- EROFS_FEATURE_INCOMPAT_DEVICE_TABLE | \
- EROFS_FEATURE_INCOMPAT_COMPR_HEAD2)
+ ((EROFS_FEATURE_INCOMPAT_METABOX << 1) - 1)
#define EROFS_SB_EXTSLOT_SIZE 16
struct erofs_deviceslot {
- union {
- u8 uuid[16]; /* used for device manager later */
- u8 userdata[64]; /* digest(sha256), etc. */
- } u;
- __le32 blocks; /* total fs blocks of this device */
- __le32 mapped_blkaddr; /* map starting at mapped_blkaddr */
- u8 reserved[56];
+ u8 tag[64]; /* digest(sha256), etc. */
+ __le32 blocks_lo; /* total blocks count of this device */
+ __le32 uniaddr_lo; /* unified starting block of this device */
+ __le32 blocks_hi; /* total blocks count MSB */
+ __le16 uniaddr_hi; /* unified starting block MSB */
+ u8 reserved[50];
};
#define EROFS_DEVT_SLOT_SIZE sizeof(struct erofs_deviceslot)
-/* erofs on-disk super block (currently 128 bytes) */
+/* erofs on-disk super block (currently 144 bytes at maximum) */
struct erofs_super_block {
__le32 magic; /* file system magic number */
- __le32 checksum; /* crc32c(super_block) */
+ __le32 checksum; /* crc32c to avoid unexpected on-disk overlap */
__le32 feature_compat;
- __u8 blkszbits; /* support block_size == PAGE_SIZE only */
+ __u8 blkszbits; /* filesystem block size in bit shift */
__u8 sb_extslots; /* superblock size = 128 + sb_extslots * 16 */
-
- __le16 root_nid; /* nid of root directory */
+ union {
+ __le16 rootnid_2b; /* nid of root directory */
+ __le16 blocks_hi; /* (48BIT on) blocks count MSB */
+ } __packed rb;
__le64 inos; /* total valid ino # (== f_files - f_favail) */
-
- __le64 build_time; /* inode v1 time derivation */
- __le32 build_time_nsec; /* inode v1 time derivation in nano scale */
- __le32 blocks; /* used for statfs */
+ __le64 epoch; /* base seconds used for compact inodes */
+ __le32 fixed_nsec; /* fixed nanoseconds for compact inodes */
+ __le32 blocks_lo; /* blocks count LSB */
__le32 meta_blkaddr; /* start block address of metadata area */
__le32 xattr_blkaddr; /* start block address of shared xattr area */
__u8 uuid[16]; /* 128-bit uuid for volume */
@@ -71,129 +78,118 @@ struct erofs_super_block {
} __packed u1;
__le16 extra_devices; /* # of devices besides the primary device */
__le16 devt_slotoff; /* startoff = devt_slotoff * devt_slotsize */
- __u8 reserved2[38];
+ __u8 dirblkbits; /* directory block size in bit shift */
+ __u8 xattr_prefix_count; /* # of long xattr name prefixes */
+ __le32 xattr_prefix_start; /* start of long xattr prefixes */
+ __le64 packed_nid; /* nid of the special packed inode */
+ __u8 xattr_filter_reserved; /* reserved for xattr name filter */
+ __u8 reserved[3];
+ __le32 build_time; /* seconds added to epoch for mkfs time */
+ __le64 rootnid_8b; /* (48BIT on) nid of root directory */
+ __le64 reserved2;
+ __le64 metabox_nid; /* (METABOX on) nid of the metabox inode */
+ __le64 reserved3; /* [align to extslot 1] */
};
/*
- * erofs inode datalayout (i_format in on-disk inode):
- * 0 - inode plain without inline data A:
- * inode, [xattrs], ... | ... | no-holed data
- * 1 - inode VLE compression B (legacy):
- * inode, [xattrs], extents ... | ...
- * 2 - inode plain with inline data C:
- * inode, [xattrs], last_inline_data, ... | ... | no-holed data
- * 3 - inode compression D:
- * inode, [xattrs], map_header, extents ... | ...
- * 4 - inode chunk-based E:
- * inode, [xattrs], chunk indexes ... | ...
+ * EROFS inode datalayout (i_format in on-disk inode):
+ * 0 - uncompressed flat inode without tail-packing inline data:
+ * 1 - compressed inode with non-compact indexes:
+ * 2 - uncompressed flat inode with tail-packing inline data:
+ * 3 - compressed inode with compact indexes:
+ * 4 - chunk-based inode with (optional) multi-device support:
* 5~7 - reserved
*/
enum {
EROFS_INODE_FLAT_PLAIN = 0,
- EROFS_INODE_FLAT_COMPRESSION_LEGACY = 1,
+ EROFS_INODE_COMPRESSED_FULL = 1,
EROFS_INODE_FLAT_INLINE = 2,
- EROFS_INODE_FLAT_COMPRESSION = 3,
+ EROFS_INODE_COMPRESSED_COMPACT = 3,
EROFS_INODE_CHUNK_BASED = 4,
EROFS_INODE_DATALAYOUT_MAX
};
static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
{
- return datamode == EROFS_INODE_FLAT_COMPRESSION ||
- datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY;
+ return datamode == EROFS_INODE_COMPRESSED_COMPACT ||
+ datamode == EROFS_INODE_COMPRESSED_FULL;
}
-/* bit definitions of inode i_advise */
-#define EROFS_I_VERSION_BITS 1
-#define EROFS_I_DATALAYOUT_BITS 3
-
-#define EROFS_I_VERSION_BIT 0
-#define EROFS_I_DATALAYOUT_BIT 1
+/* bit definitions of inode i_format */
+#define EROFS_I_VERSION_MASK 0x01
+#define EROFS_I_DATALAYOUT_MASK 0x07
-#define EROFS_I_ALL \
- ((1 << (EROFS_I_DATALAYOUT_BIT + EROFS_I_DATALAYOUT_BITS)) - 1)
+#define EROFS_I_VERSION_BIT 0
+#define EROFS_I_DATALAYOUT_BIT 1
+#define EROFS_I_NLINK_1_BIT 4 /* non-directory compact inodes only */
+#define EROFS_I_DOT_OMITTED_BIT 4 /* (directories) omit the `.` dirent */
+#define EROFS_I_ALL ((1 << (EROFS_I_NLINK_1_BIT + 1)) - 1)
/* indicate chunk blkbits, thus 'chunksize = blocksize << chunk blkbits' */
#define EROFS_CHUNK_FORMAT_BLKBITS_MASK 0x001F
-/* with chunk indexes or just a 4-byte blkaddr array */
+/* with chunk indexes or just a 4-byte block array */
#define EROFS_CHUNK_FORMAT_INDEXES 0x0020
+#define EROFS_CHUNK_FORMAT_48BIT 0x0040
-#define EROFS_CHUNK_FORMAT_ALL \
- (EROFS_CHUNK_FORMAT_BLKBITS_MASK | EROFS_CHUNK_FORMAT_INDEXES)
+#define EROFS_CHUNK_FORMAT_ALL ((EROFS_CHUNK_FORMAT_48BIT << 1) - 1)
+
+/* 32-byte on-disk inode */
+#define EROFS_INODE_LAYOUT_COMPACT 0
+/* 64-byte on-disk inode */
+#define EROFS_INODE_LAYOUT_EXTENDED 1
struct erofs_inode_chunk_info {
__le16 format; /* chunk blkbits, etc. */
__le16 reserved;
};
+union erofs_inode_i_u {
+ __le32 blocks_lo; /* total blocks count (if compressed inodes) */
+ __le32 startblk_lo; /* starting block number (if flat inodes) */
+ __le32 rdev; /* device ID (if special inodes) */
+ struct erofs_inode_chunk_info c;
+};
+
+union erofs_inode_i_nb {
+ __le16 nlink; /* if EROFS_I_NLINK_1_BIT is unset */
+ __le16 blocks_hi; /* total blocks count MSB */
+ __le16 startblk_hi; /* starting block number MSB */
+} __packed;
+
/* 32-byte reduced form of an ondisk inode */
struct erofs_inode_compact {
__le16 i_format; /* inode format hints */
-
-/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
__le16 i_xattr_icount;
__le16 i_mode;
- __le16 i_nlink;
+ union erofs_inode_i_nb i_nb;
__le32 i_size;
- __le32 i_reserved;
- union {
- /* file total compressed blocks for data mapping 1 */
- __le32 compressed_blocks;
- __le32 raw_blkaddr;
-
- /* for device files, used to indicate old/new device # */
- __le32 rdev;
+ __le32 i_mtime;
+ union erofs_inode_i_u i_u;
- /* for chunk-based files, it contains the summary info */
- struct erofs_inode_chunk_info c;
- } i_u;
- __le32 i_ino; /* only used for 32-bit stat compatibility */
+ __le32 i_ino; /* only used for 32-bit stat compatibility */
__le16 i_uid;
__le16 i_gid;
- __le32 i_reserved2;
+ __le32 i_reserved;
};
-/* 32 bytes on-disk inode */
-#define EROFS_INODE_LAYOUT_COMPACT 0
-/* 64 bytes on-disk inode */
-#define EROFS_INODE_LAYOUT_EXTENDED 1
-
/* 64-byte complete form of an ondisk inode */
struct erofs_inode_extended {
__le16 i_format; /* inode format hints */
-
-/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
__le16 i_xattr_icount;
__le16 i_mode;
- __le16 i_reserved;
+ union erofs_inode_i_nb i_nb;
__le64 i_size;
- union {
- /* file total compressed blocks for data mapping 1 */
- __le32 compressed_blocks;
- __le32 raw_blkaddr;
-
- /* for device files, used to indicate old/new device # */
- __le32 rdev;
-
- /* for chunk-based files, it contains the summary info */
- struct erofs_inode_chunk_info c;
- } i_u;
-
- /* only used for 32-bit stat compatibility */
- __le32 i_ino;
+ union erofs_inode_i_u i_u;
+ __le32 i_ino; /* only used for 32-bit stat compatibility */
__le32 i_uid;
__le32 i_gid;
- __le64 i_ctime;
- __le32 i_ctime_nsec;
+ __le64 i_mtime;
+ __le32 i_mtime_nsec;
__le32 i_nlink;
__u8 i_reserved2[16];
};
-#define EROFS_MAX_SHARED_XATTRS (128)
-/* h_shared_count between 129 ... 255 are special # */
-#define EROFS_SHARED_XATTR_EXTENT (255)
-
/*
* inline xattrs (n == i_xattr_icount):
* erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes
@@ -206,10 +202,10 @@ struct erofs_inode_extended {
* for read-only fs, no need to introduce h_refcount
*/
struct erofs_xattr_ibody_header {
- __le32 h_reserved;
+ __le32 h_name_filter; /* bit value 1 indicates not-present */
__u8 h_shared_count;
__u8 h_reserved2[7];
- __le32 h_shared_xattrs[0]; /* shared xattr id array */
+ __le32 h_shared_xattrs[]; /* shared xattr id array */
};
/* Name indexes */
@@ -220,13 +216,30 @@ struct erofs_xattr_ibody_header {
#define EROFS_XATTR_INDEX_LUSTRE 5
#define EROFS_XATTR_INDEX_SECURITY 6
+/*
+ * bit 7 of e_name_index is set when it refers to a long xattr name prefix,
+ * while the remained lower bits represent the index of the prefix.
+ */
+#define EROFS_XATTR_LONG_PREFIX 0x80
+#define EROFS_XATTR_LONG_PREFIX_MASK 0x7f
+
+#define EROFS_XATTR_FILTER_BITS 32
+#define EROFS_XATTR_FILTER_DEFAULT UINT32_MAX
+#define EROFS_XATTR_FILTER_SEED 0x25BBE08F
+
/* xattr entry (for both inline & shared xattrs) */
struct erofs_xattr_entry {
__u8 e_name_len; /* length of name */
__u8 e_name_index; /* attribute name index */
__le16 e_value_size; /* size of attribute value */
/* followed by e_name and e_value */
- char e_name[0]; /* attribute name */
+ char e_name[]; /* attribute name */
+};
+
+/* long xattr name prefix */
+struct erofs_xattr_long_prefix {
+ __u8 base_index; /* short xattr name prefix index */
+ char infix[]; /* infix apart from short prefix */
};
static inline unsigned int erofs_xattr_ibody_size(__le16 i_xattr_icount)
@@ -234,6 +247,7 @@ static inline unsigned int erofs_xattr_ibody_size(__le16 i_xattr_icount)
if (!i_xattr_icount)
return 0;
+ /* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
return sizeof(struct erofs_xattr_ibody_header) +
sizeof(__u32) * (le16_to_cpu(i_xattr_icount) - 1);
}
@@ -252,20 +266,44 @@ static inline unsigned int erofs_xattr_entry_size(struct erofs_xattr_entry *e)
/* 4-byte block address array */
#define EROFS_BLOCK_MAP_ENTRY_SIZE sizeof(__le32)
-/* 8-byte inode chunk indexes */
+/* 8-byte inode chunk index */
struct erofs_inode_chunk_index {
- __le16 advise; /* always 0, don't care for now */
+ __le16 startblk_hi; /* starting block number MSB */
__le16 device_id; /* back-end storage id (with bits masked) */
- __le32 blkaddr; /* start block address of this inode chunk */
+ __le32 startblk_lo; /* starting block number of this chunk */
};
-/* maximum supported size of a physical compression cluster */
+#define EROFS_DIRENT_NID_METABOX_BIT 63
+#define EROFS_DIRENT_NID_MASK (BIT_ULL(EROFS_DIRENT_NID_METABOX_BIT) - 1)
+
+/* dirent sorts in alphabet order, thus we can do binary search */
+struct erofs_dirent {
+ __le64 nid; /* node number */
+ __le16 nameoff; /* start offset of file name */
+ __u8 file_type; /* file type */
+ __u8 reserved; /* reserved */
+} __packed;
+
+/*
+ * EROFS file types should match generic FT_* types and
+ * it seems no need to add BUILD_BUG_ONs since potential
+ * unmatchness will break other fses as well...
+ */
+
+#define EROFS_NAME_LEN 255
+
+/* maximum supported encoded size of a physical compressed cluster */
#define Z_EROFS_PCLUSTER_MAX_SIZE (1024 * 1024)
+/* maximum supported decoded size of a physical compressed cluster */
+#define Z_EROFS_PCLUSTER_MAX_DSIZE (12 * 1024 * 1024)
+
/* available compression algorithm types (for h_algorithmtype) */
enum {
Z_EROFS_COMPRESSION_LZ4 = 0,
Z_EROFS_COMPRESSION_LZMA = 1,
+ Z_EROFS_COMPRESSION_DEFLATE = 2,
+ Z_EROFS_COMPRESSION_ZSTD = 3,
Z_EROFS_COMPRESSION_MAX
};
#define Z_EROFS_ALL_COMPR_ALGS ((1 << Z_EROFS_COMPRESSION_MAX) - 1)
@@ -286,118 +324,126 @@ struct z_erofs_lzma_cfgs {
#define Z_EROFS_LZMA_MAX_DICT_SIZE (8 * Z_EROFS_PCLUSTER_MAX_SIZE)
+/* 6 bytes (+ length field = 8 bytes) */
+struct z_erofs_deflate_cfgs {
+ u8 windowbits; /* 8..15 for DEFLATE */
+ u8 reserved[5];
+} __packed;
+
+/* 6 bytes (+ length field = 8 bytes) */
+struct z_erofs_zstd_cfgs {
+ u8 format;
+ u8 windowlog; /* windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN(10) */
+ u8 reserved[4];
+} __packed;
+
+#define Z_EROFS_ZSTD_MAX_DICT_SIZE Z_EROFS_PCLUSTER_MAX_SIZE
+
/*
- * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on)
- * e.g. for 4k logical cluster size, 4B if compacted 2B is off;
- * (4B) + 2B + (4B) if compacted 2B is on.
- * bit 1 : HEAD1 big pcluster (0 - off; 1 - on)
- * bit 2 : HEAD2 big pcluster (0 - off; 1 - on)
+ * Enable COMPACTED_2B for EROFS_INODE_COMPRESSED_COMPACT inodes:
+ * 4B (disabled) vs 4B+2B+4B (enabled)
*/
#define Z_EROFS_ADVISE_COMPACTED_2B 0x0001
+/* Enable extent metadata for EROFS_INODE_COMPRESSED_FULL inodes */
+#define Z_EROFS_ADVISE_EXTENTS 0x0001
#define Z_EROFS_ADVISE_BIG_PCLUSTER_1 0x0002
#define Z_EROFS_ADVISE_BIG_PCLUSTER_2 0x0004
-
+#define Z_EROFS_ADVISE_INLINE_PCLUSTER 0x0008
+#define Z_EROFS_ADVISE_INTERLACED_PCLUSTER 0x0010
+#define Z_EROFS_ADVISE_FRAGMENT_PCLUSTER 0x0020
+/* Indicate the record size for each extent if extent metadata is used */
+#define Z_EROFS_ADVISE_EXTRECSZ_BIT 1
+#define Z_EROFS_ADVISE_EXTRECSZ_MASK 0x3
+
+#define Z_EROFS_FRAGMENT_INODE_BIT 7
struct z_erofs_map_header {
- __le32 h_reserved1;
+ union {
+ /* fragment data offset in the packed inode */
+ __le32 h_fragmentoff;
+ struct {
+ __le16 h_reserved1;
+ /* indicates the encoded size of tailpacking data */
+ __le16 h_idata_size;
+ };
+ __le32 h_extents_lo; /* extent count LSB */
+ };
__le16 h_advise;
- /*
- * bit 0-3 : algorithm type of head 1 (logical cluster type 01);
- * bit 4-7 : algorithm type of head 2 (logical cluster type 11).
- */
- __u8 h_algorithmtype;
- /*
- * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096;
- * bit 3-7 : reserved.
- */
- __u8 h_clusterbits;
+ union {
+ struct {
+ /* algorithm type (bit 0-3: HEAD1; bit 4-7: HEAD2) */
+ __u8 h_algorithmtype;
+ /*
+ * bit 0-3 : logical cluster bits - blkszbits
+ * bit 4-6 : reserved
+ * bit 7 : pack the whole file into packed inode
+ */
+ __u8 h_clusterbits;
+ } __packed;
+ __le16 h_extents_hi; /* extent count MSB */
+ } __packed;
};
-#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8
-
-/*
- * Fixed-sized output compression on-disk logical cluster type:
- * 0 - literal (uncompressed) lcluster
- * 1,3 - compressed lcluster (for HEAD lclusters)
- * 2 - compressed lcluster (for NONHEAD lclusters)
- *
- * In detail,
- * 0 - literal (uncompressed) lcluster,
- * di_advise = 0
- * di_clusterofs = the literal data offset of the lcluster
- * di_blkaddr = the blkaddr of the literal pcluster
- *
- * 1,3 - compressed lcluster (for HEAD lclusters)
- * di_advise = 1 or 3
- * di_clusterofs = the decompressed data offset of the lcluster
- * di_blkaddr = the blkaddr of the compressed pcluster
- *
- * 2 - compressed lcluster (for NONHEAD lclusters)
- * di_advise = 2
- * di_clusterofs =
- * the decompressed data offset in its own HEAD lcluster
- * di_u.delta[0] = distance to this HEAD lcluster
- * di_u.delta[1] = distance to the next HEAD lcluster
- */
enum {
- Z_EROFS_VLE_CLUSTER_TYPE_PLAIN = 0,
- Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 = 1,
- Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD = 2,
- Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 = 3,
- Z_EROFS_VLE_CLUSTER_TYPE_MAX
+ Z_EROFS_LCLUSTER_TYPE_PLAIN = 0,
+ Z_EROFS_LCLUSTER_TYPE_HEAD1 = 1,
+ Z_EROFS_LCLUSTER_TYPE_NONHEAD = 2,
+ Z_EROFS_LCLUSTER_TYPE_HEAD2 = 3,
+ Z_EROFS_LCLUSTER_TYPE_MAX
};
-#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2
-#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0
+#define Z_EROFS_LI_LCLUSTER_TYPE_MASK (Z_EROFS_LCLUSTER_TYPE_MAX - 1)
-/*
- * D0_CBLKCNT will be marked _only_ at the 1st non-head lcluster to store the
- * compressed block count of a compressed extent (in logical clusters, aka.
- * block count of a pcluster).
- */
-#define Z_EROFS_VLE_DI_D0_CBLKCNT (1 << 11)
+/* (noncompact only, HEAD) This pcluster refers to partial decompressed data */
+#define Z_EROFS_LI_PARTIAL_REF (1 << 15)
+
+/* Set on 1st non-head lcluster to store compressed block counti (in blocks) */
+#define Z_EROFS_LI_D0_CBLKCNT (1 << 11)
-struct z_erofs_vle_decompressed_index {
+struct z_erofs_lcluster_index {
__le16 di_advise;
- /* where to decompress in the head cluster */
+ /* where to decompress in the head lcluster */
__le16 di_clusterofs;
union {
- /* for the head cluster */
- __le32 blkaddr;
+ __le32 blkaddr; /* for the HEAD lclusters */
/*
- * for the rest clusters
- * eg. for 4k page-sized cluster, maximum 4K*64k = 256M)
- * [0] - pointing to the head cluster
- * [1] - pointing to the tail cluster
+ * [0] - distance to its HEAD lcluster
+ * [1] - distance to the next HEAD lcluster
*/
- __le16 delta[2];
+ __le16 delta[2]; /* for the NONHEAD lclusters */
} di_u;
};
-#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \
- (round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \
- sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING)
-
-/* dirent sorts in alphabet order, thus we can do binary search */
-struct erofs_dirent {
- __le64 nid; /* node number */
- __le16 nameoff; /* start offset of file name */
- __u8 file_type; /* file type */
- __u8 reserved; /* reserved */
-} __packed;
-
-/*
- * EROFS file types should match generic FT_* types and
- * it seems no need to add BUILD_BUG_ONs since potential
- * unmatchness will break other fses as well...
- */
+#define Z_EROFS_MAP_HEADER_END(end) \
+ (ALIGN(end, 8) + sizeof(struct z_erofs_map_header))
+#define Z_EROFS_FULL_INDEX_START(end) (Z_EROFS_MAP_HEADER_END(end) + 8)
+
+#define Z_EROFS_EXTENT_PLEN_PARTIAL BIT(27)
+#define Z_EROFS_EXTENT_PLEN_FMT_BIT 28
+#define Z_EROFS_EXTENT_PLEN_MASK ((Z_EROFS_PCLUSTER_MAX_SIZE << 1) - 1)
+struct z_erofs_extent {
+ __le32 plen; /* encoded length */
+ __le32 pstart_lo; /* physical offset */
+ __le32 pstart_hi; /* physical offset MSB */
+ __le32 lstart_lo; /* logical offset */
+ __le32 lstart_hi; /* logical offset MSB (>= 4GiB inodes) */
+ __u8 reserved[12]; /* for future use */
+};
-#define EROFS_NAME_LEN 255
+static inline int z_erofs_extent_recsize(unsigned int advise)
+{
+ return 4 << ((advise >> Z_EROFS_ADVISE_EXTRECSZ_BIT) &
+ Z_EROFS_ADVISE_EXTRECSZ_MASK);
+}
/* check the EROFS on-disk layout strictly at compile time */
static inline void erofs_check_ondisk_layout_definitions(void)
{
- BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128);
+ const __le64 fmh = *(__le64 *)&(struct z_erofs_map_header) {
+ .h_clusterbits = 1 << Z_EROFS_FRAGMENT_INODE_BIT
+ };
+
+ BUILD_BUG_ON(sizeof(struct erofs_super_block) != 144);
BUILD_BUG_ON(sizeof(struct erofs_inode_compact) != 32);
BUILD_BUG_ON(sizeof(struct erofs_inode_extended) != 64);
BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12);
@@ -405,15 +451,16 @@ static inline void erofs_check_ondisk_layout_definitions(void)
BUILD_BUG_ON(sizeof(struct erofs_inode_chunk_info) != 4);
BUILD_BUG_ON(sizeof(struct erofs_inode_chunk_index) != 8);
BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8);
- BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8);
+ BUILD_BUG_ON(sizeof(struct z_erofs_lcluster_index) != 8);
BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12);
/* keep in sync between 2 index structures for better extendibility */
BUILD_BUG_ON(sizeof(struct erofs_inode_chunk_index) !=
- sizeof(struct z_erofs_vle_decompressed_index));
+ sizeof(struct z_erofs_lcluster_index));
BUILD_BUG_ON(sizeof(struct erofs_deviceslot) != 128);
- BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) <
- Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1);
+ /* exclude old compiler versions like gcc 7.5.0 */
+ BUILD_BUG_ON(__builtin_constant_p(fmh) ?
+ fmh != cpu_to_le64(1ULL << 63) : 0);
}
#endif
diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
new file mode 100644
index 000000000000..932e8b353ba1
--- /dev/null
+++ b/fs/erofs/fileio.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2024, Alibaba Cloud
+ */
+#include "internal.h"
+#include <trace/events/erofs.h>
+
+struct erofs_fileio_rq {
+ struct bio_vec bvecs[16];
+ struct bio bio;
+ struct kiocb iocb;
+ struct super_block *sb;
+};
+
+struct erofs_fileio {
+ struct erofs_map_blocks map;
+ struct erofs_map_dev dev;
+ struct erofs_fileio_rq *rq;
+};
+
+static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
+{
+ struct erofs_fileio_rq *rq =
+ container_of(iocb, struct erofs_fileio_rq, iocb);
+ struct folio_iter fi;
+
+ if (ret > 0) {
+ if (ret != rq->bio.bi_iter.bi_size) {
+ bio_advance(&rq->bio, ret);
+ zero_fill_bio(&rq->bio);
+ }
+ ret = 0;
+ }
+ if (rq->bio.bi_end_io) {
+ if (ret < 0 && !rq->bio.bi_status)
+ rq->bio.bi_status = errno_to_blk_status(ret);
+ } else {
+ bio_for_each_folio_all(fi, &rq->bio) {
+ DBG_BUGON(folio_test_uptodate(fi.folio));
+ erofs_onlinefolio_end(fi.folio, ret, false);
+ }
+ }
+ bio_endio(&rq->bio);
+ bio_uninit(&rq->bio);
+ kfree(rq);
+}
+
+static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
+{
+ struct iov_iter iter;
+ int ret;
+
+ if (!rq)
+ return;
+ rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
+ rq->iocb.ki_ioprio = get_current_ioprio();
+ rq->iocb.ki_complete = erofs_fileio_ki_complete;
+ if (test_opt(&EROFS_SB(rq->sb)->opt, DIRECT_IO) &&
+ rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT)
+ rq->iocb.ki_flags = IOCB_DIRECT;
+ iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
+ rq->bio.bi_iter.bi_size);
+ scoped_with_creds(rq->iocb.ki_filp->f_cred)
+ ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
+ if (ret != -EIOCBQUEUED)
+ erofs_fileio_ki_complete(&rq->iocb, ret);
+}
+
+static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
+{
+ struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq),
+ GFP_KERNEL | __GFP_NOFAIL);
+
+ bio_init(&rq->bio, NULL, rq->bvecs, ARRAY_SIZE(rq->bvecs), REQ_OP_READ);
+ rq->iocb.ki_filp = mdev->m_dif->file;
+ rq->sb = mdev->m_sb;
+ return rq;
+}
+
+struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev)
+{
+ return &erofs_fileio_rq_alloc(mdev)->bio;
+}
+
+void erofs_fileio_submit_bio(struct bio *bio)
+{
+ return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq,
+ bio));
+}
+
+static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
+{
+ struct inode *inode = folio_inode(folio);
+ struct erofs_map_blocks *map = &io->map;
+ unsigned int cur = 0, end = folio_size(folio), len, attached = 0;
+ loff_t pos = folio_pos(folio), ofs;
+ int err = 0;
+
+ erofs_onlinefolio_init(folio);
+ while (cur < end) {
+ if (!in_range(pos + cur, map->m_la, map->m_llen)) {
+ map->m_la = pos + cur;
+ map->m_llen = end - cur;
+ err = erofs_map_blocks(inode, map);
+ if (err)
+ break;
+ }
+
+ ofs = folio_pos(folio) + cur - map->m_la;
+ len = min_t(loff_t, map->m_llen - ofs, end - cur);
+ if (map->m_flags & EROFS_MAP_META) {
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+ void *src;
+
+ src = erofs_read_metabuf(&buf, inode->i_sb,
+ map->m_pa + ofs, erofs_inode_in_metabox(inode));
+ if (IS_ERR(src)) {
+ err = PTR_ERR(src);
+ break;
+ }
+ memcpy_to_folio(folio, cur, src, len);
+ erofs_put_metabuf(&buf);
+ } else if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ folio_zero_segment(folio, cur, cur + len);
+ attached = 0;
+ } else {
+ if (io->rq && (map->m_pa + ofs != io->dev.m_pa ||
+ map->m_deviceid != io->dev.m_deviceid)) {
+io_retry:
+ erofs_fileio_rq_submit(io->rq);
+ io->rq = NULL;
+ }
+
+ if (!io->rq) {
+ io->dev = (struct erofs_map_dev) {
+ .m_pa = io->map.m_pa + ofs,
+ .m_deviceid = io->map.m_deviceid,
+ };
+ err = erofs_map_dev(inode->i_sb, &io->dev);
+ if (err)
+ break;
+ io->rq = erofs_fileio_rq_alloc(&io->dev);
+ io->rq->bio.bi_iter.bi_sector =
+ (io->dev.m_dif->fsoff + io->dev.m_pa) >> 9;
+ attached = 0;
+ }
+ if (!bio_add_folio(&io->rq->bio, folio, len, cur))
+ goto io_retry;
+ if (!attached++)
+ erofs_onlinefolio_split(folio);
+ io->dev.m_pa += len;
+ }
+ cur += len;
+ }
+ erofs_onlinefolio_end(folio, err, false);
+ return err;
+}
+
+static int erofs_fileio_read_folio(struct file *file, struct folio *folio)
+{
+ struct erofs_fileio io = {};
+ int err;
+
+ trace_erofs_read_folio(folio, true);
+ err = erofs_fileio_scan_folio(&io, folio);
+ erofs_fileio_rq_submit(io.rq);
+ return err;
+}
+
+static void erofs_fileio_readahead(struct readahead_control *rac)
+{
+ struct inode *inode = rac->mapping->host;
+ struct erofs_fileio io = {};
+ struct folio *folio;
+ int err;
+
+ trace_erofs_readahead(inode, readahead_index(rac),
+ readahead_count(rac), true);
+ while ((folio = readahead_folio(rac))) {
+ err = erofs_fileio_scan_folio(&io, folio);
+ if (err && err != -EINTR)
+ erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
+ folio->index, EROFS_I(inode)->nid);
+ }
+ erofs_fileio_rq_submit(io.rq);
+}
+
+const struct address_space_operations erofs_fileio_aops = {
+ .read_folio = erofs_fileio_read_folio,
+ .readahead = erofs_fileio_readahead,
+};
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
new file mode 100644
index 000000000000..7a346e20f7b7
--- /dev/null
+++ b/fs/erofs/fscache.c
@@ -0,0 +1,677 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2022, Alibaba Cloud
+ * Copyright (C) 2022, Bytedance Inc. All rights reserved.
+ */
+#include <linux/pseudo_fs.h>
+#include <linux/fscache.h>
+#include "internal.h"
+
+static DEFINE_MUTEX(erofs_domain_list_lock);
+static DEFINE_MUTEX(erofs_domain_cookies_lock);
+static LIST_HEAD(erofs_domain_list);
+static LIST_HEAD(erofs_domain_cookies_list);
+static struct vfsmount *erofs_pseudo_mnt;
+
+static int erofs_anon_init_fs_context(struct fs_context *fc)
+{
+ return init_pseudo(fc, EROFS_SUPER_MAGIC) ? 0 : -ENOMEM;
+}
+
+static struct file_system_type erofs_anon_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "pseudo_erofs",
+ .init_fs_context = erofs_anon_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+struct erofs_fscache_io {
+ struct netfs_cache_resources cres;
+ struct iov_iter iter;
+ netfs_io_terminated_t end_io;
+ void *private;
+ refcount_t ref;
+};
+
+struct erofs_fscache_rq {
+ struct address_space *mapping; /* The mapping being accessed */
+ loff_t start; /* Start position */
+ size_t len; /* Length of the request */
+ size_t submitted; /* Length of submitted */
+ short error; /* 0 or error that occurred */
+ refcount_t ref;
+};
+
+static bool erofs_fscache_io_put(struct erofs_fscache_io *io)
+{
+ if (!refcount_dec_and_test(&io->ref))
+ return false;
+ if (io->cres.ops)
+ io->cres.ops->end_operation(&io->cres);
+ kfree(io);
+ return true;
+}
+
+static void erofs_fscache_req_complete(struct erofs_fscache_rq *req)
+{
+ struct folio *folio;
+ bool failed = req->error;
+ pgoff_t start_page = req->start / PAGE_SIZE;
+ pgoff_t last_page = ((req->start + req->len) / PAGE_SIZE) - 1;
+
+ XA_STATE(xas, &req->mapping->i_pages, start_page);
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, last_page) {
+ if (xas_retry(&xas, folio))
+ continue;
+ if (!failed)
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
+ }
+ rcu_read_unlock();
+}
+
+static void erofs_fscache_req_put(struct erofs_fscache_rq *req)
+{
+ if (!refcount_dec_and_test(&req->ref))
+ return;
+ erofs_fscache_req_complete(req);
+ kfree(req);
+}
+
+static struct erofs_fscache_rq *erofs_fscache_req_alloc(struct address_space *mapping,
+ loff_t start, size_t len)
+{
+ struct erofs_fscache_rq *req = kzalloc(sizeof(*req), GFP_KERNEL);
+
+ if (!req)
+ return NULL;
+ req->mapping = mapping;
+ req->start = start;
+ req->len = len;
+ refcount_set(&req->ref, 1);
+ return req;
+}
+
+static void erofs_fscache_req_io_put(struct erofs_fscache_io *io)
+{
+ struct erofs_fscache_rq *req = io->private;
+
+ if (erofs_fscache_io_put(io))
+ erofs_fscache_req_put(req);
+}
+
+static void erofs_fscache_req_end_io(void *priv, ssize_t transferred_or_error)
+{
+ struct erofs_fscache_io *io = priv;
+ struct erofs_fscache_rq *req = io->private;
+
+ if (IS_ERR_VALUE(transferred_or_error))
+ req->error = transferred_or_error;
+ erofs_fscache_req_io_put(io);
+}
+
+static struct erofs_fscache_io *erofs_fscache_req_io_alloc(struct erofs_fscache_rq *req)
+{
+ struct erofs_fscache_io *io = kzalloc(sizeof(*io), GFP_KERNEL);
+
+ if (!io)
+ return NULL;
+ io->end_io = erofs_fscache_req_end_io;
+ io->private = req;
+ refcount_inc(&req->ref);
+ refcount_set(&io->ref, 1);
+ return io;
+}
+
+/*
+ * Read data from fscache described by cookie at pstart physical address
+ * offset, and fill the read data into buffer described by io->iter.
+ */
+static int erofs_fscache_read_io_async(struct fscache_cookie *cookie,
+ loff_t pstart, struct erofs_fscache_io *io)
+{
+ enum netfs_io_source source;
+ struct netfs_cache_resources *cres = &io->cres;
+ struct iov_iter *iter = &io->iter;
+ int ret;
+
+ ret = fscache_begin_read_operation(cres, cookie);
+ if (ret)
+ return ret;
+
+ while (iov_iter_count(iter)) {
+ size_t orig_count = iov_iter_count(iter), len = orig_count;
+ unsigned long flags = 1 << NETFS_SREQ_ONDEMAND;
+
+ source = cres->ops->prepare_ondemand_read(cres,
+ pstart, &len, LLONG_MAX, &flags, 0);
+ if (WARN_ON(len == 0))
+ source = NETFS_INVALID_READ;
+ if (source != NETFS_READ_FROM_CACHE) {
+ erofs_err(NULL, "prepare_ondemand_read failed (source %d)", source);
+ return -EIO;
+ }
+
+ iov_iter_truncate(iter, len);
+ refcount_inc(&io->ref);
+ ret = fscache_read(cres, pstart, iter, NETFS_READ_HOLE_FAIL,
+ io->end_io, io);
+ if (ret == -EIOCBQUEUED)
+ ret = 0;
+ if (ret) {
+ erofs_err(NULL, "fscache_read failed (ret %d)", ret);
+ return ret;
+ }
+ if (WARN_ON(iov_iter_count(iter)))
+ return -EIO;
+
+ iov_iter_reexpand(iter, orig_count - len);
+ pstart += len;
+ }
+ return 0;
+}
+
+struct erofs_fscache_bio {
+ struct erofs_fscache_io io;
+ struct bio bio; /* w/o bdev to share bio_add_page/endio() */
+ struct bio_vec bvecs[BIO_MAX_VECS];
+};
+
+static void erofs_fscache_bio_endio(void *priv, ssize_t transferred_or_error)
+{
+ struct erofs_fscache_bio *io = priv;
+
+ if (IS_ERR_VALUE(transferred_or_error))
+ io->bio.bi_status = errno_to_blk_status(transferred_or_error);
+ bio_endio(&io->bio);
+ BUILD_BUG_ON(offsetof(struct erofs_fscache_bio, io) != 0);
+ erofs_fscache_io_put(&io->io);
+}
+
+struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev)
+{
+ struct erofs_fscache_bio *io;
+
+ io = kmalloc(sizeof(*io), GFP_KERNEL | __GFP_NOFAIL);
+ bio_init(&io->bio, NULL, io->bvecs, BIO_MAX_VECS, REQ_OP_READ);
+ io->io.private = mdev->m_dif->fscache->cookie;
+ io->io.end_io = erofs_fscache_bio_endio;
+ refcount_set(&io->io.ref, 1);
+ return &io->bio;
+}
+
+void erofs_fscache_submit_bio(struct bio *bio)
+{
+ struct erofs_fscache_bio *io = container_of(bio,
+ struct erofs_fscache_bio, bio);
+ int ret;
+
+ iov_iter_bvec(&io->io.iter, ITER_DEST, io->bvecs, bio->bi_vcnt,
+ bio->bi_iter.bi_size);
+ ret = erofs_fscache_read_io_async(io->io.private,
+ bio->bi_iter.bi_sector << 9, &io->io);
+ erofs_fscache_io_put(&io->io);
+ if (!ret)
+ return;
+ bio->bi_status = errno_to_blk_status(ret);
+ bio_endio(bio);
+}
+
+static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
+{
+ struct erofs_fscache *ctx = folio->mapping->host->i_private;
+ int ret = -ENOMEM;
+ struct erofs_fscache_rq *req;
+ struct erofs_fscache_io *io;
+
+ req = erofs_fscache_req_alloc(folio->mapping,
+ folio_pos(folio), folio_size(folio));
+ if (!req) {
+ folio_unlock(folio);
+ return ret;
+ }
+
+ io = erofs_fscache_req_io_alloc(req);
+ if (!io) {
+ req->error = ret;
+ goto out;
+ }
+ iov_iter_xarray(&io->iter, ITER_DEST, &folio->mapping->i_pages,
+ folio_pos(folio), folio_size(folio));
+
+ ret = erofs_fscache_read_io_async(ctx->cookie, folio_pos(folio), io);
+ if (ret)
+ req->error = ret;
+
+ erofs_fscache_req_io_put(io);
+out:
+ erofs_fscache_req_put(req);
+ return ret;
+}
+
+static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req)
+{
+ struct address_space *mapping = req->mapping;
+ struct inode *inode = mapping->host;
+ struct super_block *sb = inode->i_sb;
+ struct erofs_fscache_io *io;
+ struct erofs_map_blocks map;
+ struct erofs_map_dev mdev;
+ loff_t pos = req->start + req->submitted;
+ size_t count;
+ int ret;
+
+ map.m_la = pos;
+ ret = erofs_map_blocks(inode, &map);
+ if (ret)
+ return ret;
+
+ if (map.m_flags & EROFS_MAP_META) {
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+ struct iov_iter iter;
+ size_t size = map.m_llen;
+ void *src;
+
+ src = erofs_read_metabuf(&buf, sb, map.m_pa,
+ erofs_inode_in_metabox(inode));
+ if (IS_ERR(src))
+ return PTR_ERR(src);
+
+ iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE);
+ if (copy_to_iter(src, size, &iter) != size) {
+ erofs_put_metabuf(&buf);
+ return -EFAULT;
+ }
+ iov_iter_zero(PAGE_SIZE - size, &iter);
+ erofs_put_metabuf(&buf);
+ req->submitted += PAGE_SIZE;
+ return 0;
+ }
+
+ count = req->len - req->submitted;
+ if (!(map.m_flags & EROFS_MAP_MAPPED)) {
+ struct iov_iter iter;
+
+ iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count);
+ iov_iter_zero(count, &iter);
+ req->submitted += count;
+ return 0;
+ }
+
+ count = min_t(size_t, map.m_llen - (pos - map.m_la), count);
+ DBG_BUGON(!count || count % PAGE_SIZE);
+
+ mdev = (struct erofs_map_dev) {
+ .m_deviceid = map.m_deviceid,
+ .m_pa = map.m_pa,
+ };
+ ret = erofs_map_dev(sb, &mdev);
+ if (ret)
+ return ret;
+
+ io = erofs_fscache_req_io_alloc(req);
+ if (!io)
+ return -ENOMEM;
+ iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count);
+ ret = erofs_fscache_read_io_async(mdev.m_dif->fscache->cookie,
+ mdev.m_pa + (pos - map.m_la), io);
+ erofs_fscache_req_io_put(io);
+
+ req->submitted += count;
+ return ret;
+}
+
+static int erofs_fscache_data_read(struct erofs_fscache_rq *req)
+{
+ int ret;
+
+ do {
+ ret = erofs_fscache_data_read_slice(req);
+ if (ret)
+ req->error = ret;
+ } while (!ret && req->submitted < req->len);
+ return ret;
+}
+
+static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
+{
+ struct erofs_fscache_rq *req;
+ int ret;
+
+ req = erofs_fscache_req_alloc(folio->mapping,
+ folio_pos(folio), folio_size(folio));
+ if (!req) {
+ folio_unlock(folio);
+ return -ENOMEM;
+ }
+
+ ret = erofs_fscache_data_read(req);
+ erofs_fscache_req_put(req);
+ return ret;
+}
+
+static void erofs_fscache_readahead(struct readahead_control *rac)
+{
+ struct erofs_fscache_rq *req;
+
+ if (!readahead_count(rac))
+ return;
+
+ req = erofs_fscache_req_alloc(rac->mapping,
+ readahead_pos(rac), readahead_length(rac));
+ if (!req)
+ return;
+
+ /* The request completion will drop refs on the folios. */
+ while (readahead_folio(rac))
+ ;
+
+ erofs_fscache_data_read(req);
+ erofs_fscache_req_put(req);
+}
+
+static const struct address_space_operations erofs_fscache_meta_aops = {
+ .read_folio = erofs_fscache_meta_read_folio,
+};
+
+const struct address_space_operations erofs_fscache_access_aops = {
+ .read_folio = erofs_fscache_read_folio,
+ .readahead = erofs_fscache_readahead,
+};
+
+static void erofs_fscache_domain_put(struct erofs_domain *domain)
+{
+ mutex_lock(&erofs_domain_list_lock);
+ if (refcount_dec_and_test(&domain->ref)) {
+ list_del(&domain->list);
+ if (list_empty(&erofs_domain_list)) {
+ kern_unmount(erofs_pseudo_mnt);
+ erofs_pseudo_mnt = NULL;
+ }
+ fscache_relinquish_volume(domain->volume, NULL, false);
+ mutex_unlock(&erofs_domain_list_lock);
+ kfree(domain->domain_id);
+ kfree(domain);
+ return;
+ }
+ mutex_unlock(&erofs_domain_list_lock);
+}
+
+static int erofs_fscache_register_volume(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ char *domain_id = sbi->domain_id;
+ struct fscache_volume *volume;
+ char *name;
+ int ret = 0;
+
+ name = kasprintf(GFP_KERNEL, "erofs,%s",
+ domain_id ? domain_id : sbi->fsid);
+ if (!name)
+ return -ENOMEM;
+
+ volume = fscache_acquire_volume(name, NULL, NULL, 0);
+ if (IS_ERR_OR_NULL(volume)) {
+ erofs_err(sb, "failed to register volume for %s", name);
+ ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
+ volume = NULL;
+ }
+
+ sbi->volume = volume;
+ kfree(name);
+ return ret;
+}
+
+static int erofs_fscache_init_domain(struct super_block *sb)
+{
+ int err;
+ struct erofs_domain *domain;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ domain = kzalloc(sizeof(struct erofs_domain), GFP_KERNEL);
+ if (!domain)
+ return -ENOMEM;
+
+ domain->domain_id = kstrdup(sbi->domain_id, GFP_KERNEL);
+ if (!domain->domain_id) {
+ kfree(domain);
+ return -ENOMEM;
+ }
+
+ err = erofs_fscache_register_volume(sb);
+ if (err)
+ goto out;
+
+ if (!erofs_pseudo_mnt) {
+ struct vfsmount *mnt = kern_mount(&erofs_anon_fs_type);
+ if (IS_ERR(mnt)) {
+ err = PTR_ERR(mnt);
+ goto out;
+ }
+ erofs_pseudo_mnt = mnt;
+ }
+
+ domain->volume = sbi->volume;
+ refcount_set(&domain->ref, 1);
+ list_add(&domain->list, &erofs_domain_list);
+ sbi->domain = domain;
+ return 0;
+out:
+ kfree(domain->domain_id);
+ kfree(domain);
+ return err;
+}
+
+static int erofs_fscache_register_domain(struct super_block *sb)
+{
+ int err;
+ struct erofs_domain *domain;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ mutex_lock(&erofs_domain_list_lock);
+ list_for_each_entry(domain, &erofs_domain_list, list) {
+ if (!strcmp(domain->domain_id, sbi->domain_id)) {
+ sbi->domain = domain;
+ sbi->volume = domain->volume;
+ refcount_inc(&domain->ref);
+ mutex_unlock(&erofs_domain_list_lock);
+ return 0;
+ }
+ }
+ err = erofs_fscache_init_domain(sb);
+ mutex_unlock(&erofs_domain_list_lock);
+ return err;
+}
+
+static struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb,
+ char *name, unsigned int flags)
+{
+ struct fscache_volume *volume = EROFS_SB(sb)->volume;
+ struct erofs_fscache *ctx;
+ struct fscache_cookie *cookie;
+ struct super_block *isb;
+ struct inode *inode;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&ctx->node);
+ refcount_set(&ctx->ref, 1);
+
+ cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
+ name, strlen(name), NULL, 0, 0);
+ if (!cookie) {
+ erofs_err(sb, "failed to get cookie for %s", name);
+ ret = -EINVAL;
+ goto err;
+ }
+ fscache_use_cookie(cookie, false);
+
+ /*
+ * Allocate anonymous inode in global pseudo mount for shareable blobs,
+ * so that they are accessible among erofs fs instances.
+ */
+ isb = flags & EROFS_REG_COOKIE_SHARE ? erofs_pseudo_mnt->mnt_sb : sb;
+ inode = new_inode(isb);
+ if (!inode) {
+ erofs_err(sb, "failed to get anon inode for %s", name);
+ ret = -ENOMEM;
+ goto err_cookie;
+ }
+
+ inode->i_size = OFFSET_MAX;
+ inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
+ inode->i_blkbits = EROFS_SB(sb)->blkszbits;
+ inode->i_private = ctx;
+
+ ctx->cookie = cookie;
+ ctx->inode = inode;
+ return ctx;
+
+err_cookie:
+ fscache_unuse_cookie(cookie, NULL, NULL);
+ fscache_relinquish_cookie(cookie, false);
+err:
+ kfree(ctx);
+ return ERR_PTR(ret);
+}
+
+static void erofs_fscache_relinquish_cookie(struct erofs_fscache *ctx)
+{
+ fscache_unuse_cookie(ctx->cookie, NULL, NULL);
+ fscache_relinquish_cookie(ctx->cookie, false);
+ iput(ctx->inode);
+ kfree(ctx->name);
+ kfree(ctx);
+}
+
+static struct erofs_fscache *erofs_domain_init_cookie(struct super_block *sb,
+ char *name, unsigned int flags)
+{
+ struct erofs_fscache *ctx;
+ struct erofs_domain *domain = EROFS_SB(sb)->domain;
+
+ ctx = erofs_fscache_acquire_cookie(sb, name, flags);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ ctx->name = kstrdup(name, GFP_KERNEL);
+ if (!ctx->name) {
+ erofs_fscache_relinquish_cookie(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ refcount_inc(&domain->ref);
+ ctx->domain = domain;
+ list_add(&ctx->node, &erofs_domain_cookies_list);
+ return ctx;
+}
+
+static struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
+ char *name, unsigned int flags)
+{
+ struct erofs_fscache *ctx;
+ struct erofs_domain *domain = EROFS_SB(sb)->domain;
+
+ flags |= EROFS_REG_COOKIE_SHARE;
+ mutex_lock(&erofs_domain_cookies_lock);
+ list_for_each_entry(ctx, &erofs_domain_cookies_list, node) {
+ if (ctx->domain != domain || strcmp(ctx->name, name))
+ continue;
+ if (!(flags & EROFS_REG_COOKIE_NEED_NOEXIST)) {
+ refcount_inc(&ctx->ref);
+ } else {
+ erofs_err(sb, "%s already exists in domain %s", name,
+ domain->domain_id);
+ ctx = ERR_PTR(-EEXIST);
+ }
+ mutex_unlock(&erofs_domain_cookies_lock);
+ return ctx;
+ }
+ ctx = erofs_domain_init_cookie(sb, name, flags);
+ mutex_unlock(&erofs_domain_cookies_lock);
+ return ctx;
+}
+
+struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
+ char *name,
+ unsigned int flags)
+{
+ if (EROFS_SB(sb)->domain_id)
+ return erofs_domain_register_cookie(sb, name, flags);
+ return erofs_fscache_acquire_cookie(sb, name, flags);
+}
+
+void erofs_fscache_unregister_cookie(struct erofs_fscache *ctx)
+{
+ struct erofs_domain *domain = NULL;
+
+ if (!ctx)
+ return;
+ if (!ctx->domain)
+ return erofs_fscache_relinquish_cookie(ctx);
+
+ mutex_lock(&erofs_domain_cookies_lock);
+ if (refcount_dec_and_test(&ctx->ref)) {
+ domain = ctx->domain;
+ list_del(&ctx->node);
+ erofs_fscache_relinquish_cookie(ctx);
+ }
+ mutex_unlock(&erofs_domain_cookies_lock);
+ if (domain)
+ erofs_fscache_domain_put(domain);
+}
+
+int erofs_fscache_register_fs(struct super_block *sb)
+{
+ int ret;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_fscache *fscache;
+ unsigned int flags = 0;
+
+ if (sbi->domain_id)
+ ret = erofs_fscache_register_domain(sb);
+ else
+ ret = erofs_fscache_register_volume(sb);
+ if (ret)
+ return ret;
+
+ /*
+ * When shared domain is enabled, using NEED_NOEXIST to guarantee
+ * the primary data blob (aka fsid) is unique in the shared domain.
+ *
+ * For non-shared-domain case, fscache_acquire_volume() invoked by
+ * erofs_fscache_register_volume() has already guaranteed
+ * the uniqueness of primary data blob.
+ *
+ * Acquired domain/volume will be relinquished in kill_sb() on error.
+ */
+ if (sbi->domain_id)
+ flags |= EROFS_REG_COOKIE_NEED_NOEXIST;
+ fscache = erofs_fscache_register_cookie(sb, sbi->fsid, flags);
+ if (IS_ERR(fscache))
+ return PTR_ERR(fscache);
+
+ sbi->dif0.fscache = fscache;
+ return 0;
+}
+
+void erofs_fscache_unregister_fs(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ erofs_fscache_unregister_cookie(sbi->dif0.fscache);
+
+ if (sbi->domain)
+ erofs_fscache_domain_put(sbi->domain);
+ else
+ fscache_relinquish_volume(sbi->volume, NULL, false);
+
+ sbi->dif0.fscache = NULL;
+ sbi->volume = NULL;
+ sbi->domain = NULL;
+}
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index 2345f1de438e..bce98c845a18 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -5,47 +5,54 @@
* Copyright (C) 2021, Alibaba Cloud
*/
#include "xattr.h"
-
+#include <linux/compat.h>
#include <trace/events/erofs.h>
-/*
- * if inode is successfully read, return its inode page (or sometimes
- * the inode payload page if it's an extended inode) in order to fill
- * inline data if possible.
- */
-static struct page *erofs_read_inode(struct inode *inode,
- unsigned int *ofs)
+static int erofs_fill_symlink(struct inode *inode, void *kaddr,
+ unsigned int m_pofs)
+{
+ struct erofs_inode *vi = EROFS_I(inode);
+ loff_t off;
+
+ m_pofs += vi->xattr_isize;
+ /* check if it cannot be handled with fast symlink scheme */
+ if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
+ check_add_overflow(m_pofs, inode->i_size, &off) ||
+ off > i_blocksize(inode))
+ return 0;
+
+ inode->i_link = kmemdup_nul(kaddr + m_pofs, inode->i_size, GFP_KERNEL);
+ return inode->i_link ? 0 : -ENOMEM;
+}
+
+static int erofs_read_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
+ erofs_blk_t blkaddr = erofs_blknr(sb, erofs_iloc(inode));
+ unsigned int ofs = erofs_blkoff(sb, erofs_iloc(inode));
+ bool in_mbox = erofs_inode_in_metabox(inode);
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct erofs_sb_info *sbi = EROFS_SB(sb);
+ erofs_blk_t addrmask = BIT_ULL(48) - 1;
struct erofs_inode *vi = EROFS_I(inode);
- const erofs_off_t inode_loc = iloc(sbi, vi->nid);
-
- erofs_blk_t blkaddr, nblks = 0;
- struct page *page;
+ struct erofs_inode_extended *die, copied;
struct erofs_inode_compact *dic;
- struct erofs_inode_extended *die, *copied = NULL;
unsigned int ifmt;
- int err;
-
- blkaddr = erofs_blknr(inode_loc);
- *ofs = erofs_blkoff(inode_loc);
-
- erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
- __func__, vi->nid, *ofs, blkaddr);
+ void *ptr;
+ int err = 0;
- page = erofs_get_meta_page(sb, blkaddr);
- if (IS_ERR(page)) {
- erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
- vi->nid, PTR_ERR(page));
- return page;
+ ptr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), in_mbox);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ erofs_err(sb, "failed to read inode meta block (nid: %llu): %d",
+ vi->nid, err);
+ goto err_out;
}
- dic = page_address(page) + *ofs;
+ dic = ptr + ofs;
ifmt = le16_to_cpu(dic->i_format);
-
if (ifmt & ~EROFS_I_ALL) {
- erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
+ erofs_err(sb, "unsupported i_format %u of nid %llu",
ifmt, vi->nid);
err = -EOPNOTSUPP;
goto err_out;
@@ -53,7 +60,7 @@ static struct page *erofs_read_inode(struct inode *inode,
vi->datalayout = erofs_inode_datalayout(ifmt);
if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
- erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
+ erofs_err(sb, "unsupported datalayout %u of nid %llu",
vi->datalayout, vi->nid);
err = -EOPNOTSUPP;
goto err_out;
@@ -62,326 +69,314 @@ static struct page *erofs_read_inode(struct inode *inode,
switch (erofs_inode_version(ifmt)) {
case EROFS_INODE_LAYOUT_EXTENDED:
vi->inode_isize = sizeof(struct erofs_inode_extended);
- /* check if the inode acrosses page boundary */
- if (*ofs + vi->inode_isize <= PAGE_SIZE) {
- *ofs += vi->inode_isize;
+ /* check if the extended inode acrosses block boundary */
+ if (ofs + vi->inode_isize <= sb->s_blocksize) {
+ ofs += vi->inode_isize;
die = (struct erofs_inode_extended *)dic;
+ copied.i_u = die->i_u;
+ copied.i_nb = die->i_nb;
} else {
- const unsigned int gotten = PAGE_SIZE - *ofs;
-
- copied = kmalloc(vi->inode_isize, GFP_NOFS);
- if (!copied) {
- err = -ENOMEM;
+ const unsigned int gotten = sb->s_blocksize - ofs;
+
+ memcpy(&copied, dic, gotten);
+ ptr = erofs_read_metabuf(&buf, sb,
+ erofs_pos(sb, blkaddr + 1), in_mbox);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ erofs_err(sb, "failed to read inode payload block (nid: %llu): %d",
+ vi->nid, err);
goto err_out;
}
- memcpy(copied, dic, gotten);
- unlock_page(page);
- put_page(page);
-
- page = erofs_get_meta_page(sb, blkaddr + 1);
- if (IS_ERR(page)) {
- erofs_err(sb, "failed to get inode payload page (nid: %llu), err %ld",
- vi->nid, PTR_ERR(page));
- kfree(copied);
- return page;
- }
- *ofs = vi->inode_isize - gotten;
- memcpy((u8 *)copied + gotten, page_address(page), *ofs);
- die = copied;
+ ofs = vi->inode_isize - gotten;
+ memcpy((u8 *)&copied + gotten, ptr, ofs);
+ die = &copied;
}
vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
inode->i_mode = le16_to_cpu(die->i_mode);
- switch (inode->i_mode & S_IFMT) {
- case S_IFREG:
- case S_IFDIR:
- case S_IFLNK:
- vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
- break;
- case S_IFCHR:
- case S_IFBLK:
- inode->i_rdev =
- new_decode_dev(le32_to_cpu(die->i_u.rdev));
- break;
- case S_IFIFO:
- case S_IFSOCK:
- inode->i_rdev = 0;
- break;
- default:
- goto bogusimode;
- }
i_uid_write(inode, le32_to_cpu(die->i_uid));
i_gid_write(inode, le32_to_cpu(die->i_gid));
set_nlink(inode, le32_to_cpu(die->i_nlink));
-
- /* extended inode has its own timestamp */
- inode->i_ctime.tv_sec = le64_to_cpu(die->i_ctime);
- inode->i_ctime.tv_nsec = le32_to_cpu(die->i_ctime_nsec);
+ inode_set_mtime(inode, le64_to_cpu(die->i_mtime),
+ le32_to_cpu(die->i_mtime_nsec));
inode->i_size = le64_to_cpu(die->i_size);
-
- /* total blocks for compressed files */
- if (erofs_inode_is_data_compressed(vi->datalayout))
- nblks = le32_to_cpu(die->i_u.compressed_blocks);
- else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
- /* fill chunked inode summary info */
- vi->chunkformat = le16_to_cpu(die->i_u.c.format);
- kfree(copied);
- copied = NULL;
break;
case EROFS_INODE_LAYOUT_COMPACT:
vi->inode_isize = sizeof(struct erofs_inode_compact);
- *ofs += vi->inode_isize;
+ ofs += vi->inode_isize;
vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
inode->i_mode = le16_to_cpu(dic->i_mode);
- switch (inode->i_mode & S_IFMT) {
- case S_IFREG:
- case S_IFDIR:
- case S_IFLNK:
- vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
- break;
- case S_IFCHR:
- case S_IFBLK:
- inode->i_rdev =
- new_decode_dev(le32_to_cpu(dic->i_u.rdev));
- break;
- case S_IFIFO:
- case S_IFSOCK:
- inode->i_rdev = 0;
- break;
- default:
- goto bogusimode;
- }
+ copied.i_u = dic->i_u;
i_uid_write(inode, le16_to_cpu(dic->i_uid));
i_gid_write(inode, le16_to_cpu(dic->i_gid));
- set_nlink(inode, le16_to_cpu(dic->i_nlink));
-
- /* use build time for compact inodes */
- inode->i_ctime.tv_sec = sbi->build_time;
- inode->i_ctime.tv_nsec = sbi->build_time_nsec;
+ if (!S_ISDIR(inode->i_mode) &&
+ ((ifmt >> EROFS_I_NLINK_1_BIT) & 1)) {
+ set_nlink(inode, 1);
+ copied.i_nb = dic->i_nb;
+ } else {
+ set_nlink(inode, le16_to_cpu(dic->i_nb.nlink));
+ copied.i_nb.startblk_hi = 0;
+ addrmask = BIT_ULL(32) - 1;
+ }
+ inode_set_mtime(inode, sbi->epoch + le32_to_cpu(dic->i_mtime),
+ sbi->fixed_nsec);
inode->i_size = le32_to_cpu(dic->i_size);
- if (erofs_inode_is_data_compressed(vi->datalayout))
- nblks = le32_to_cpu(dic->i_u.compressed_blocks);
- else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
- vi->chunkformat = le16_to_cpu(dic->i_u.c.format);
break;
default:
- erofs_err(inode->i_sb,
- "unsupported on-disk inode version %u of nid %llu",
+ erofs_err(sb, "unsupported on-disk inode version %u of nid %llu",
erofs_inode_version(ifmt), vi->nid);
err = -EOPNOTSUPP;
goto err_out;
}
+ if (unlikely(inode->i_size < 0)) {
+ erofs_err(sb, "negative i_size @ nid %llu", vi->nid);
+ err = -EFSCORRUPTED;
+ goto err_out;
+ }
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFDIR:
+ vi->dot_omitted = (ifmt >> EROFS_I_DOT_OMITTED_BIT) & 1;
+ fallthrough;
+ case S_IFREG:
+ case S_IFLNK:
+ vi->startblk = le32_to_cpu(copied.i_u.startblk_lo) |
+ ((u64)le16_to_cpu(copied.i_nb.startblk_hi) << 32);
+ if (vi->datalayout == EROFS_INODE_FLAT_PLAIN &&
+ !((vi->startblk ^ EROFS_NULL_ADDR) & addrmask))
+ vi->startblk = EROFS_NULL_ADDR;
+
+ if(S_ISLNK(inode->i_mode)) {
+ err = erofs_fill_symlink(inode, ptr, ofs);
+ if (err)
+ goto err_out;
+ }
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ inode->i_rdev = new_decode_dev(le32_to_cpu(copied.i_u.rdev));
+ break;
+ case S_IFIFO:
+ case S_IFSOCK:
+ inode->i_rdev = 0;
+ break;
+ default:
+ erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode,
+ vi->nid);
+ err = -EFSCORRUPTED;
+ goto err_out;
+ }
+
+ if (erofs_inode_is_data_compressed(vi->datalayout))
+ inode->i_blocks = le32_to_cpu(copied.i_u.blocks_lo) <<
+ (sb->s_blocksize_bits - 9);
+ else
+ inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
+
if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
+ /* fill chunked inode summary info */
+ vi->chunkformat = le16_to_cpu(copied.i_u.c.format);
if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
- erofs_err(inode->i_sb,
- "unsupported chunk format %x of nid %llu",
+ erofs_err(sb, "unsupported chunk format %x of nid %llu",
vi->chunkformat, vi->nid);
err = -EOPNOTSUPP;
goto err_out;
}
- vi->chunkbits = LOG_BLOCK_SIZE +
+ vi->chunkbits = sb->s_blocksize_bits +
(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
}
- inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
- inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
- inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
- inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
+ inode_set_atime_to_ts(inode,
+ inode_set_ctime_to_ts(inode, inode_get_mtime(inode)));
inode->i_flags &= ~S_DAX;
if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
- vi->datalayout == EROFS_INODE_FLAT_PLAIN)
+ (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
+ vi->datalayout == EROFS_INODE_CHUNK_BASED))
inode->i_flags |= S_DAX;
- if (!nblks)
- /* measure inode.i_blocks as generic filesystems */
- inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
- else
- inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
- return page;
-
-bogusimode:
- erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
- inode->i_mode, vi->nid);
- err = -EFSCORRUPTED;
err_out:
- DBG_BUGON(1);
- kfree(copied);
- unlock_page(page);
- put_page(page);
- return ERR_PTR(err);
-}
-
-static int erofs_fill_symlink(struct inode *inode, void *data,
- unsigned int m_pofs)
-{
- struct erofs_inode *vi = EROFS_I(inode);
- char *lnk;
-
- /* if it cannot be handled with fast symlink scheme */
- if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
- inode->i_size >= PAGE_SIZE) {
- inode->i_op = &erofs_symlink_iops;
- return 0;
- }
-
- lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
- if (!lnk)
- return -ENOMEM;
-
- m_pofs += vi->xattr_isize;
- /* inline symlink data shouldn't cross page boundary as well */
- if (m_pofs + inode->i_size > PAGE_SIZE) {
- kfree(lnk);
- erofs_err(inode->i_sb,
- "inline data cross block boundary @ nid %llu",
- vi->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
-
- memcpy(lnk, data + m_pofs, inode->i_size);
- lnk[inode->i_size] = '\0';
-
- inode->i_link = lnk;
- inode->i_op = &erofs_fast_symlink_iops;
- return 0;
+ erofs_put_metabuf(&buf);
+ return err;
}
-static int erofs_fill_inode(struct inode *inode, int isdir)
+static int erofs_fill_inode(struct inode *inode)
{
struct erofs_inode *vi = EROFS_I(inode);
- struct page *page;
- unsigned int ofs;
- int err = 0;
-
- trace_erofs_fill_inode(inode, isdir);
+ int err;
- /* read inode base data from disk */
- page = erofs_read_inode(inode, &ofs);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ trace_erofs_fill_inode(inode);
+ err = erofs_read_inode(inode);
+ if (err)
+ return err;
- /* setup the new inode */
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_op = &erofs_generic_iops;
- if (erofs_inode_is_data_compressed(vi->datalayout))
- inode->i_fop = &generic_ro_fops;
- else
- inode->i_fop = &erofs_file_fops;
+ inode->i_fop = &erofs_file_fops;
break;
case S_IFDIR:
inode->i_op = &erofs_dir_iops;
inode->i_fop = &erofs_dir_fops;
+ inode_nohighmem(inode);
break;
case S_IFLNK:
- err = erofs_fill_symlink(inode, page_address(page), ofs);
- if (err)
- goto out_unlock;
+ if (inode->i_link)
+ inode->i_op = &erofs_fast_symlink_iops;
+ else
+ inode->i_op = &erofs_symlink_iops;
inode_nohighmem(inode);
break;
- case S_IFCHR:
- case S_IFBLK:
- case S_IFIFO:
- case S_IFSOCK:
+ default:
inode->i_op = &erofs_generic_iops;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
- goto out_unlock;
- default:
- err = -EFSCORRUPTED;
- goto out_unlock;
+ return 0;
}
+ mapping_set_large_folios(inode->i_mapping);
if (erofs_inode_is_data_compressed(vi->datalayout)) {
- err = z_erofs_fill_inode(inode);
- goto out_unlock;
+#ifdef CONFIG_EROFS_FS_ZIP
+ DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT,
+ erofs_info, inode->i_sb,
+ "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
+ inode->i_mapping->a_ops = &z_erofs_aops;
+#else
+ err = -EOPNOTSUPP;
+#endif
+ } else {
+ inode->i_mapping->a_ops = &erofs_aops;
+#ifdef CONFIG_EROFS_FS_ONDEMAND
+ if (erofs_is_fscache_mode(inode->i_sb))
+ inode->i_mapping->a_ops = &erofs_fscache_access_aops;
+#endif
+#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+ if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb)))
+ inode->i_mapping->a_ops = &erofs_fileio_aops;
+#endif
}
- inode->i_mapping->a_ops = &erofs_raw_access_aops;
-out_unlock:
- unlock_page(page);
- put_page(page);
return err;
}
/*
- * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
- * we should do more for 32-bit platform to find the right inode.
+ * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
+ * so that it will fit.
*/
-static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
+static ino_t erofs_squash_ino(struct super_block *sb, erofs_nid_t nid)
{
- const erofs_nid_t nid = *(erofs_nid_t *)opaque;
+ u64 ino64 = erofs_nid_to_ino64(EROFS_SB(sb), nid);
- return EROFS_I(inode)->nid == nid;
+ if (sizeof(ino_t) < sizeof(erofs_nid_t))
+ ino64 ^= ino64 >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8;
+ return (ino_t)ino64;
}
-static int erofs_iget_set_actor(struct inode *inode, void *opaque)
+static int erofs_iget5_eq(struct inode *inode, void *opaque)
{
- const erofs_nid_t nid = *(erofs_nid_t *)opaque;
-
- inode->i_ino = erofs_inode_hash(nid);
- return 0;
+ return EROFS_I(inode)->nid == *(erofs_nid_t *)opaque;
}
-static inline struct inode *erofs_iget_locked(struct super_block *sb,
- erofs_nid_t nid)
+static int erofs_iget5_set(struct inode *inode, void *opaque)
{
- const unsigned long hashval = erofs_inode_hash(nid);
+ const erofs_nid_t nid = *(erofs_nid_t *)opaque;
- return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
- erofs_iget_set_actor, &nid);
+ inode->i_ino = erofs_squash_ino(inode->i_sb, nid);
+ EROFS_I(inode)->nid = nid;
+ return 0;
}
-struct inode *erofs_iget(struct super_block *sb,
- erofs_nid_t nid,
- bool isdir)
+struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
{
- struct inode *inode = erofs_iget_locked(sb, nid);
+ struct inode *inode;
+ inode = iget5_locked(sb, erofs_squash_ino(sb, nid), erofs_iget5_eq,
+ erofs_iget5_set, &nid);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (inode->i_state & I_NEW) {
- int err;
- struct erofs_inode *vi = EROFS_I(inode);
-
- vi->nid = nid;
+ if (inode_state_read_once(inode) & I_NEW) {
+ int err = erofs_fill_inode(inode);
- err = erofs_fill_inode(inode, isdir);
- if (!err)
- unlock_new_inode(inode);
- else {
+ if (err) {
iget_failed(inode);
- inode = ERR_PTR(err);
+ return ERR_PTR(err);
}
+ unlock_new_inode(inode);
}
return inode;
}
-int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask,
unsigned int query_flags)
{
struct inode *const inode = d_inode(path->dentry);
+ struct block_device *bdev = inode->i_sb->s_bdev;
+ bool compressed =
+ erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout);
- if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
+ if (compressed)
stat->attributes |= STATX_ATTR_COMPRESSED;
-
stat->attributes |= STATX_ATTR_IMMUTABLE;
stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
STATX_ATTR_IMMUTABLE);
- generic_fillattr(&init_user_ns, inode, stat);
+ /*
+ * Return the DIO alignment restrictions if requested.
+ *
+ * In EROFS, STATX_DIOALIGN is only supported in bdev-based mode
+ * and uncompressed inodes, otherwise we report no DIO support.
+ */
+ if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
+ stat->result_mask |= STATX_DIOALIGN;
+ if (bdev && !compressed) {
+ stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
+ stat->dio_offset_align = bdev_logical_block_size(bdev);
+ }
+ }
+ generic_fillattr(idmap, request_mask, inode, stat);
return 0;
}
+static int erofs_ioctl_get_volume_label(struct inode *inode, void __user *arg)
+{
+ struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+ int ret;
+
+ if (!sbi->volume_name)
+ ret = clear_user(arg, 1);
+ else
+ ret = copy_to_user(arg, sbi->volume_name,
+ strlen(sbi->volume_name));
+ return ret ? -EFAULT : 0;
+}
+
+long erofs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case FS_IOC_GETFSLABEL:
+ return erofs_ioctl_get_volume_label(inode, argp);
+ default:
+ return -ENOTTY;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+long erofs_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return erofs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
const struct inode_operations erofs_generic_iops = {
.getattr = erofs_getattr,
.listxattr = erofs_listxattr,
- .get_acl = erofs_get_acl,
+ .get_inode_acl = erofs_get_acl,
.fiemap = erofs_fiemap,
};
@@ -389,12 +384,12 @@ const struct inode_operations erofs_symlink_iops = {
.get_link = page_get_link,
.getattr = erofs_getattr,
.listxattr = erofs_listxattr,
- .get_acl = erofs_get_acl,
+ .get_inode_acl = erofs_get_acl,
};
const struct inode_operations erofs_fast_symlink_iops = {
.get_link = simple_get_link,
.getattr = erofs_getattr,
.listxattr = erofs_listxattr,
- .get_acl = erofs_get_acl,
+ .get_inode_acl = erofs_get_acl,
};
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 3265688af7f9..f7f622836198 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -8,34 +8,27 @@
#define __EROFS_INTERNAL_H
#include <linux/fs.h>
+#include <linux/dax.h>
#include <linux/dcache.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/bio.h>
-#include <linux/buffer_head.h>
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/iomap.h>
#include "erofs_fs.h"
-/* redefine pr_fmt "erofs: " */
-#undef pr_fmt
-#define pr_fmt(fmt) "erofs: " fmt
-
-__printf(3, 4) void _erofs_err(struct super_block *sb,
- const char *function, const char *fmt, ...);
+__printf(2, 3) void _erofs_printk(struct super_block *sb, const char *fmt, ...);
#define erofs_err(sb, fmt, ...) \
- _erofs_err(sb, __func__, fmt "\n", ##__VA_ARGS__)
-__printf(3, 4) void _erofs_info(struct super_block *sb,
- const char *function, const char *fmt, ...);
+ _erofs_printk(sb, KERN_ERR fmt "\n", ##__VA_ARGS__)
#define erofs_info(sb, fmt, ...) \
- _erofs_info(sb, __func__, fmt "\n", ##__VA_ARGS__)
+ _erofs_printk(sb, KERN_INFO fmt "\n", ##__VA_ARGS__)
+
#ifdef CONFIG_EROFS_FS_DEBUG
-#define erofs_dbg(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
#define DBG_BUGON BUG_ON
#else
-#define erofs_dbg(x, ...) ((void)0)
#define DBG_BUGON(x) ((void)(x))
#endif /* !CONFIG_EROFS_FS_DEBUG */
@@ -44,28 +37,32 @@ __printf(3, 4) void _erofs_info(struct super_block *sb,
typedef u64 erofs_nid_t;
typedef u64 erofs_off_t;
-/* data type for filesystem-wide blocks number */
-typedef u32 erofs_blk_t;
+typedef u64 erofs_blk_t;
struct erofs_device_info {
char *path;
- struct block_device *bdev;
+ struct erofs_fscache *fscache;
+ struct file *file;
struct dax_device *dax_dev;
+ u64 fsoff, dax_part_off;
- u32 blocks;
- u32 mapped_blkaddr;
+ erofs_blk_t blocks;
+ erofs_blk_t uniaddr;
+};
+
+enum {
+ EROFS_SYNC_DECOMPRESS_AUTO,
+ EROFS_SYNC_DECOMPRESS_FORCE_ON,
+ EROFS_SYNC_DECOMPRESS_FORCE_OFF
};
struct erofs_mount_opts {
-#ifdef CONFIG_EROFS_FS_ZIP
/* current strategy of how to use managed cache */
unsigned char cache_strategy;
- /* strategy of sync decompression (false - auto, true - force on) */
- bool readahead_sync_decompress;
-
+ /* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */
+ unsigned int sync_decompress;
/* threshold for decompression synchronously */
unsigned int max_sync_decompress_pages;
-#endif
unsigned int mount_opt;
};
@@ -74,11 +71,7 @@ struct erofs_dev_context {
struct rw_semaphore rwsem;
unsigned int extra_devices;
-};
-
-struct erofs_fs_context {
- struct erofs_mount_opts opt;
- struct erofs_dev_context *devs;
+ bool flatdev;
};
/* all filesystem-wide lz4 configurations */
@@ -89,7 +82,31 @@ struct erofs_sb_lz4_info {
u16 max_pclusterblks;
};
+struct erofs_domain {
+ refcount_t ref;
+ struct list_head list;
+ struct fscache_volume *volume;
+ char *domain_id;
+};
+
+struct erofs_fscache {
+ struct fscache_cookie *cookie;
+ struct inode *inode; /* anonymous inode for the blob */
+
+ /* used for share domain mode */
+ struct erofs_domain *domain;
+ struct list_head node;
+ refcount_t ref;
+ char *name;
+};
+
+struct erofs_xattr_prefix_item {
+ struct erofs_xattr_long_prefix *prefix;
+ u8 infix_len;
+};
+
struct erofs_sb_info {
+ struct erofs_device_info dif0;
struct erofs_mount_opts opt; /* options */
#ifdef CONFIG_EROFS_FS_ZIP
/* list for all registered superblocks, mainly for shrinker */
@@ -107,33 +124,49 @@ struct erofs_sb_info {
struct erofs_sb_lz4_info lz4;
#endif /* CONFIG_EROFS_FS_ZIP */
+ struct inode *packed_inode;
+ struct inode *metabox_inode;
struct erofs_dev_context *devs;
- struct dax_device *dax_dev;
u64 total_blocks;
- u32 primarydevice_blocks;
u32 meta_blkaddr;
#ifdef CONFIG_EROFS_FS_XATTR
u32 xattr_blkaddr;
+ u32 xattr_prefix_start;
+ u8 xattr_prefix_count;
+ struct erofs_xattr_prefix_item *xattr_prefixes;
+ unsigned int xattr_filter_reserved;
#endif
u16 device_id_mask; /* valid bits of device id to be used */
- /* inode slot unit size in bit shift */
- unsigned char islotbits;
+ unsigned char islotbits; /* inode slot unit size in bit shift */
+ unsigned char blkszbits; /* filesystem block size in bit shift */
u32 sb_size; /* total superblock size */
- u32 build_time_nsec;
- u64 build_time;
+ u32 fixed_nsec;
+ s64 epoch;
/* what we really care is nid, rather than ino.. */
erofs_nid_t root_nid;
+ erofs_nid_t packed_nid;
+ erofs_nid_t metabox_nid;
/* used for statfs, f_files - f_favail */
u64 inos;
- u8 uuid[16]; /* 128-bit uuid for volume */
- u8 volume_name[16]; /* volume name */
+ char *volume_name;
u32 feature_compat;
u32 feature_incompat;
+
+ /* sysfs support */
+ struct kobject s_kobj; /* /sys/fs/erofs/<devname> */
+ struct completion s_kobj_unregister;
+ erofs_off_t dir_ra_bytes;
+
+ /* fscache support */
+ struct fscache_volume *volume;
+ struct erofs_domain *domain;
+ char *fsid;
+ char *domain_id;
};
#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
@@ -144,113 +177,42 @@ struct erofs_sb_info {
#define EROFS_MOUNT_POSIX_ACL 0x00000020
#define EROFS_MOUNT_DAX_ALWAYS 0x00000040
#define EROFS_MOUNT_DAX_NEVER 0x00000080
+#define EROFS_MOUNT_DIRECT_IO 0x00000100
#define clear_opt(opt, option) ((opt)->mount_opt &= ~EROFS_MOUNT_##option)
#define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option)
#define test_opt(opt, option) ((opt)->mount_opt & EROFS_MOUNT_##option)
-enum {
- EROFS_ZIP_CACHE_DISABLED,
- EROFS_ZIP_CACHE_READAHEAD,
- EROFS_ZIP_CACHE_READAROUND
-};
-
-#ifdef CONFIG_EROFS_FS_ZIP
-#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
-
-/* basic unit of the workstation of a super_block */
-struct erofs_workgroup {
- /* the workgroup index in the workstation */
- pgoff_t index;
-
- /* overall workgroup reference count */
- atomic_t refcount;
-};
-
-#if defined(CONFIG_SMP)
-static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
- int val)
-{
- preempt_disable();
- if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
- preempt_enable();
- return false;
- }
- return true;
-}
-
-static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
- int orig_val)
+static inline bool erofs_is_fileio_mode(struct erofs_sb_info *sbi)
{
- /*
- * other observers should notice all modifications
- * in the freezing period.
- */
- smp_mb();
- atomic_set(&grp->refcount, orig_val);
- preempt_enable();
-}
-
-static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
-{
- return atomic_cond_read_relaxed(&grp->refcount,
- VAL != EROFS_LOCKED_MAGIC);
-}
-#else
-static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
- int val)
-{
- preempt_disable();
- /* no need to spin on UP platforms, let's just disable preemption. */
- if (val != atomic_read(&grp->refcount)) {
- preempt_enable();
- return false;
- }
- return true;
+ return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->dif0.file;
}
-static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
- int orig_val)
+static inline bool erofs_is_fscache_mode(struct super_block *sb)
{
- preempt_enable();
+ return IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) &&
+ !erofs_is_fileio_mode(EROFS_SB(sb)) && !sb->s_bdev;
}
-static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
-{
- int v = atomic_read(&grp->refcount);
-
- /* workgroup is never freezed on uniprocessor systems */
- DBG_BUGON(v == EROFS_LOCKED_MAGIC);
- return v;
-}
-#endif /* !CONFIG_SMP */
-#endif /* !CONFIG_EROFS_FS_ZIP */
-
-/* we strictly follow PAGE_SIZE and no buffer head yet */
-#define LOG_BLOCK_SIZE PAGE_SHIFT
-
-#undef LOG_SECTORS_PER_BLOCK
-#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
-
-#undef SECTORS_PER_BLOCK
-#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
-
-#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
-
-#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
-#error erofs cannot be used in this platform
-#endif
-
-#define ROOT_NID(sb) ((sb)->root_nid)
+enum {
+ EROFS_ZIP_CACHE_DISABLED,
+ EROFS_ZIP_CACHE_READAHEAD,
+ EROFS_ZIP_CACHE_READAROUND
+};
-#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
-#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
-#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
+struct erofs_buf {
+ struct address_space *mapping;
+ struct file *file;
+ u64 off;
+ struct page *page;
+ void *base;
+};
+#define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL })
-static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
-{
- return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
-}
+#define erofs_blknr(sb, pos) ((erofs_blk_t)((pos) >> (sb)->s_blocksize_bits))
+#define erofs_blkoff(sb, pos) ((pos) & ((sb)->s_blocksize - 1))
+#define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits)
+#define erofs_iblks(i) (round_up((i)->i_size, i_blocksize(i)) >> (i)->i_blkbits)
#define EROFS_FEATURE_FUNCS(name, compat, feature) \
static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \
@@ -258,11 +220,39 @@ static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \
return sbi->feature_##compat & EROFS_FEATURE_##feature; \
}
-EROFS_FEATURE_FUNCS(lz4_0padding, incompat, INCOMPAT_LZ4_0PADDING)
+EROFS_FEATURE_FUNCS(zero_padding, incompat, INCOMPAT_ZERO_PADDING)
EROFS_FEATURE_FUNCS(compr_cfgs, incompat, INCOMPAT_COMPR_CFGS)
EROFS_FEATURE_FUNCS(big_pcluster, incompat, INCOMPAT_BIG_PCLUSTER)
+EROFS_FEATURE_FUNCS(chunked_file, incompat, INCOMPAT_CHUNKED_FILE)
EROFS_FEATURE_FUNCS(device_table, incompat, INCOMPAT_DEVICE_TABLE)
+EROFS_FEATURE_FUNCS(compr_head2, incompat, INCOMPAT_COMPR_HEAD2)
+EROFS_FEATURE_FUNCS(ztailpacking, incompat, INCOMPAT_ZTAILPACKING)
+EROFS_FEATURE_FUNCS(fragments, incompat, INCOMPAT_FRAGMENTS)
+EROFS_FEATURE_FUNCS(dedupe, incompat, INCOMPAT_DEDUPE)
+EROFS_FEATURE_FUNCS(xattr_prefixes, incompat, INCOMPAT_XATTR_PREFIXES)
+EROFS_FEATURE_FUNCS(48bit, incompat, INCOMPAT_48BIT)
+EROFS_FEATURE_FUNCS(metabox, incompat, INCOMPAT_METABOX)
EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM)
+EROFS_FEATURE_FUNCS(xattr_filter, compat, COMPAT_XATTR_FILTER)
+EROFS_FEATURE_FUNCS(shared_ea_in_metabox, compat, COMPAT_SHARED_EA_IN_METABOX)
+EROFS_FEATURE_FUNCS(plain_xattr_pfx, compat, COMPAT_PLAIN_XATTR_PFX)
+
+static inline u64 erofs_nid_to_ino64(struct erofs_sb_info *sbi, erofs_nid_t nid)
+{
+ if (!erofs_sb_has_metabox(sbi))
+ return nid;
+
+ /*
+ * When metadata compression is enabled, avoid generating excessively
+ * large inode numbers for metadata-compressed inodes. Shift NIDs in
+ * the 31-62 bit range left by one and move the metabox flag to bit 31.
+ *
+ * Note: on-disk NIDs remain unchanged as they are primarily used for
+ * compatibility with non-LFS 32-bit applications.
+ */
+ return ((nid << 1) & GENMASK_ULL(63, 32)) | (nid & GENMASK(30, 0)) |
+ ((nid >> EROFS_DIRENT_NID_METABOX_BIT) << 31);
+}
/* atomic flag definitions */
#define EROFS_I_EA_INITED_BIT 0
@@ -272,6 +262,9 @@ EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM)
#define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1)
#define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2)
+/* default readahead size of directories */
+#define EROFS_DIR_RA_BYTES 16384
+
struct erofs_inode {
erofs_nid_t nid;
@@ -280,13 +273,15 @@ struct erofs_inode {
unsigned char datalayout;
unsigned char inode_isize;
- unsigned short xattr_isize;
+ bool dot_omitted;
+ unsigned int xattr_isize;
+ unsigned int xattr_name_filter;
unsigned int xattr_shared_count;
unsigned int *xattr_shared_xattrs;
union {
- erofs_blk_t raw_blkaddr;
+ erofs_blk_t startblk;
struct {
unsigned short chunkformat;
unsigned char chunkbits;
@@ -295,7 +290,13 @@ struct erofs_inode {
struct {
unsigned short z_advise;
unsigned char z_algorithmtype[2];
- unsigned char z_logical_clusterbits;
+ unsigned char z_lclusterbits;
+ union {
+ u64 z_tailextent_headlcn;
+ u64 z_extents;
+ };
+ erofs_off_t z_fragmentoff;
+ unsigned short z_idata_size;
};
#endif /* CONFIG_EROFS_FS_ZIP */
};
@@ -303,179 +304,138 @@ struct erofs_inode {
struct inode vfs_inode;
};
-#define EROFS_I(ptr) \
- container_of(ptr, struct erofs_inode, vfs_inode)
+#define EROFS_I(ptr) container_of(ptr, struct erofs_inode, vfs_inode)
-static inline unsigned long erofs_inode_datablocks(struct inode *inode)
+static inline bool erofs_inode_in_metabox(struct inode *inode)
{
- /* since i_size cannot be changed */
- return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
+ return EROFS_I(inode)->nid & BIT_ULL(EROFS_DIRENT_NID_METABOX_BIT);
}
-static inline unsigned int erofs_bitrange(unsigned int value, unsigned int bit,
- unsigned int bits)
+static inline erofs_off_t erofs_iloc(struct inode *inode)
{
+ struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+ erofs_nid_t nid_lo = EROFS_I(inode)->nid & EROFS_DIRENT_NID_MASK;
- return (value >> bit) & ((1 << bits) - 1);
+ if (erofs_inode_in_metabox(inode))
+ return nid_lo << sbi->islotbits;
+ return erofs_pos(inode->i_sb, sbi->meta_blkaddr) +
+ (nid_lo << sbi->islotbits);
}
-
-static inline unsigned int erofs_inode_version(unsigned int value)
+static inline unsigned int erofs_inode_version(unsigned int ifmt)
{
- return erofs_bitrange(value, EROFS_I_VERSION_BIT,
- EROFS_I_VERSION_BITS);
+ return (ifmt >> EROFS_I_VERSION_BIT) & EROFS_I_VERSION_MASK;
}
-static inline unsigned int erofs_inode_datalayout(unsigned int value)
+static inline unsigned int erofs_inode_datalayout(unsigned int ifmt)
{
- return erofs_bitrange(value, EROFS_I_DATALAYOUT_BIT,
- EROFS_I_DATALAYOUT_BITS);
+ return (ifmt >> EROFS_I_DATALAYOUT_BIT) & EROFS_I_DATALAYOUT_MASK;
}
-/*
- * Different from grab_cache_page_nowait(), reclaiming is never triggered
- * when allocating new pages.
- */
-static inline
-struct page *erofs_grab_cache_page_nowait(struct address_space *mapping,
- pgoff_t index)
+/* reclaiming is never triggered when allocating new folios. */
+static inline struct folio *erofs_grab_folio_nowait(struct address_space *as,
+ pgoff_t index)
{
- return pagecache_get_page(mapping, index,
+ return __filemap_get_folio(as, index,
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
- readahead_gfp_mask(mapping) & ~__GFP_RECLAIM);
+ readahead_gfp_mask(as) & ~__GFP_RECLAIM);
}
-extern const struct super_operations erofs_sops;
-
-extern const struct address_space_operations erofs_raw_access_aops;
-extern const struct address_space_operations z_erofs_aops;
-
-/*
- * Logical to physical block mapping
- *
- * Different with other file systems, it is used for 2 access modes:
- *
- * 1) RAW access mode:
- *
- * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
- * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
- *
- * Note that m_lblk in the RAW access mode refers to the number of
- * the compressed ondisk block rather than the uncompressed
- * in-memory block for the compressed file.
- *
- * m_pofs equals to m_lofs except for the inline data page.
- *
- * 2) Normal access mode:
- *
- * If the inode is not compressed, it has no difference with
- * the RAW access mode. However, if the inode is compressed,
- * users should pass a valid (m_lblk, m_lofs) pair, and get
- * the needed m_pblk, m_pofs, m_len to get the compressed data
- * and the updated m_lblk, m_lofs which indicates the start
- * of the corresponding uncompressed data in the file.
- */
-enum {
- BH_Encoded = BH_PrivateStart,
- BH_FullMapped,
-};
-
/* Has a disk mapping */
-#define EROFS_MAP_MAPPED (1 << BH_Mapped)
+#define EROFS_MAP_MAPPED 0x0001
/* Located in metadata (could be copied from bd_inode) */
-#define EROFS_MAP_META (1 << BH_Meta)
+#define EROFS_MAP_META 0x0002
/* The extent is encoded */
-#define EROFS_MAP_ENCODED (1 << BH_Encoded)
+#define EROFS_MAP_ENCODED 0x0004
/* The length of extent is full */
-#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped)
+#define EROFS_MAP_FULL_MAPPED 0x0008
+/* Located in the special packed inode */
+#define __EROFS_MAP_FRAGMENT 0x0010
+/* The extent refers to partial decompressed data */
+#define EROFS_MAP_PARTIAL_REF 0x0020
+
+#define EROFS_MAP_FRAGMENT (EROFS_MAP_MAPPED | __EROFS_MAP_FRAGMENT)
struct erofs_map_blocks {
+ struct erofs_buf buf;
+
erofs_off_t m_pa, m_la;
u64 m_plen, m_llen;
unsigned short m_deviceid;
char m_algorithmformat;
unsigned int m_flags;
-
- struct page *mpage;
};
-/* Flags used by erofs_map_blocks_flatmode() */
-#define EROFS_GET_BLOCKS_RAW 0x0001
/*
* Used to get the exact decompressed length, e.g. fiemap (consider lookback
* approach instead if possible since it's more metadata lightweight.)
*/
-#define EROFS_GET_BLOCKS_FIEMAP 0x0002
+#define EROFS_GET_BLOCKS_FIEMAP 0x0001
/* Used to map the whole extent if non-negligible data is requested for LZMA */
-#define EROFS_GET_BLOCKS_READMORE 0x0004
+#define EROFS_GET_BLOCKS_READMORE 0x0002
+/* Used to map tail extent for tailpacking inline or fragment pcluster */
+#define EROFS_GET_BLOCKS_FINDTAIL 0x0004
enum {
Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX,
+ Z_EROFS_COMPRESSION_INTERLACED,
Z_EROFS_COMPRESSION_RUNTIME_MAX
};
-/* zmap.c */
-extern const struct iomap_ops z_erofs_iomap_report_ops;
-
-#ifdef CONFIG_EROFS_FS_ZIP
-int z_erofs_fill_inode(struct inode *inode);
-int z_erofs_map_blocks_iter(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags);
-#else
-static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; }
-static inline int z_erofs_map_blocks_iter(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags)
-{
- return -EOPNOTSUPP;
-}
-#endif /* !CONFIG_EROFS_FS_ZIP */
-
struct erofs_map_dev {
+ struct super_block *m_sb;
+ struct erofs_device_info *m_dif;
struct block_device *m_bdev;
- struct dax_device *m_daxdev;
erofs_off_t m_pa;
unsigned int m_deviceid;
};
-/* data.c */
-extern const struct file_operations erofs_file_fops;
-struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr);
-int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev);
-int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- u64 start, u64 len);
+extern const struct super_operations erofs_sops;
-/* inode.c */
-static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
-{
-#if BITS_PER_LONG == 32
- return (nid >> 32) ^ (nid & 0xffffffff);
-#else
- return nid;
-#endif
-}
+extern const struct address_space_operations erofs_aops;
+extern const struct address_space_operations erofs_fileio_aops;
+extern const struct address_space_operations z_erofs_aops;
+extern const struct address_space_operations erofs_fscache_access_aops;
extern const struct inode_operations erofs_generic_iops;
extern const struct inode_operations erofs_symlink_iops;
extern const struct inode_operations erofs_fast_symlink_iops;
+extern const struct inode_operations erofs_dir_iops;
-struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir);
-int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
- struct kstat *stat, u32 request_mask,
- unsigned int query_flags);
+extern const struct file_operations erofs_file_fops;
+extern const struct file_operations erofs_dir_fops;
-/* namei.c */
-extern const struct inode_operations erofs_dir_iops;
+extern const struct iomap_ops z_erofs_iomap_report_ops;
-int erofs_namei(struct inode *dir, struct qstr *name,
+/* flags for erofs_fscache_register_cookie() */
+#define EROFS_REG_COOKIE_SHARE 0x0001
+#define EROFS_REG_COOKIE_NEED_NOEXIST 0x0002
+
+void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
+ erofs_off_t *offset, int *lengthp);
+void erofs_unmap_metabuf(struct erofs_buf *buf);
+void erofs_put_metabuf(struct erofs_buf *buf);
+void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap);
+int erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb,
+ bool in_metabox);
+void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
+ erofs_off_t offset, bool in_metabox);
+int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev);
+int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len);
+int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map);
+void erofs_onlinefolio_init(struct folio *folio);
+void erofs_onlinefolio_split(struct folio *folio);
+void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty);
+struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid);
+int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags);
+int erofs_namei(struct inode *dir, const struct qstr *name,
erofs_nid_t *nid, unsigned int *d_type);
-/* dir.c */
-extern const struct file_operations erofs_dir_fops;
-
static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
{
int retried = 0;
@@ -491,17 +451,17 @@ static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
return NULL;
}
-/* pcpubuf.c */
-void *erofs_get_pcpubuf(unsigned int requiredpages);
-void erofs_put_pcpubuf(void *ptr);
-int erofs_pcpubuf_growsize(unsigned int nrpages);
-void erofs_pcpubuf_init(void);
-void erofs_pcpubuf_exit(void);
-
-/* utils.c / zdata.c */
-struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp);
-static inline void erofs_pagepool_add(struct page **pagepool,
- struct page *page)
+int erofs_register_sysfs(struct super_block *sb);
+void erofs_unregister_sysfs(struct super_block *sb);
+int __init erofs_init_sysfs(void);
+void erofs_exit_sysfs(void);
+
+struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv);
+static inline struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
+{
+ return __erofs_allocpage(pagepool, gfp, false);
+}
+static inline void erofs_pagepool_add(struct page **pagepool, struct page *page)
{
set_page_private(page, (unsigned long)*pagepool);
*pagepool = page;
@@ -509,62 +469,77 @@ static inline void erofs_pagepool_add(struct page **pagepool,
void erofs_release_pages(struct page **pagepool);
#ifdef CONFIG_EROFS_FS_ZIP
-int erofs_workgroup_put(struct erofs_workgroup *grp);
-struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
- pgoff_t index);
-struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
- struct erofs_workgroup *grp);
-void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
+#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
+
+extern atomic_long_t erofs_global_shrink_cnt;
void erofs_shrinker_register(struct super_block *sb);
void erofs_shrinker_unregister(struct super_block *sb);
int __init erofs_init_shrinker(void);
void erofs_exit_shrinker(void);
-int __init z_erofs_init_zip_subsystem(void);
-void z_erofs_exit_zip_subsystem(void);
-int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
- struct erofs_workgroup *egrp);
-int erofs_try_to_free_cached_page(struct page *page);
-int z_erofs_load_lz4_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_lz4_cfgs *lz4, int len);
+int __init z_erofs_init_subsystem(void);
+void z_erofs_exit_subsystem(void);
+int z_erofs_init_super(struct super_block *sb);
+unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
+ unsigned long nr_shrink);
+int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
+ int flags);
+void *z_erofs_get_gbuf(unsigned int requiredpages);
+void z_erofs_put_gbuf(void *ptr);
+int z_erofs_gbuf_growsize(unsigned int nrpages);
+int __init z_erofs_gbuf_init(void);
+void z_erofs_gbuf_exit(void);
+int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
#else
static inline void erofs_shrinker_register(struct super_block *sb) {}
static inline void erofs_shrinker_unregister(struct super_block *sb) {}
static inline int erofs_init_shrinker(void) { return 0; }
static inline void erofs_exit_shrinker(void) {}
-static inline int z_erofs_init_zip_subsystem(void) { return 0; }
-static inline void z_erofs_exit_zip_subsystem(void) {}
-static inline int z_erofs_load_lz4_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_lz4_cfgs *lz4, int len)
-{
- if (lz4 || dsb->u1.lz4_max_distance) {
- erofs_err(sb, "lz4 algorithm isn't enabled");
- return -EINVAL;
- }
- return 0;
-}
+static inline int z_erofs_init_subsystem(void) { return 0; }
+static inline void z_erofs_exit_subsystem(void) {}
+static inline int z_erofs_init_super(struct super_block *sb) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP */
-#ifdef CONFIG_EROFS_FS_ZIP_LZMA
-int z_erofs_lzma_init(void);
-void z_erofs_lzma_exit(void);
-int z_erofs_load_lzma_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_lzma_cfgs *lzma, int size);
+#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev);
+void erofs_fileio_submit_bio(struct bio *bio);
#else
-static inline int z_erofs_lzma_init(void) { return 0; }
-static inline int z_erofs_lzma_exit(void) { return 0; }
-static inline int z_erofs_load_lzma_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_lzma_cfgs *lzma, int size) {
- if (lzma) {
- erofs_err(sb, "lzma algorithm isn't enabled");
- return -EINVAL;
- }
- return 0;
+static inline struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev) { return NULL; }
+static inline void erofs_fileio_submit_bio(struct bio *bio) {}
+#endif
+
+#ifdef CONFIG_EROFS_FS_ONDEMAND
+int erofs_fscache_register_fs(struct super_block *sb);
+void erofs_fscache_unregister_fs(struct super_block *sb);
+
+struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
+ char *name, unsigned int flags);
+void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache);
+struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev);
+void erofs_fscache_submit_bio(struct bio *bio);
+#else
+static inline int erofs_fscache_register_fs(struct super_block *sb)
+{
+ return -EOPNOTSUPP;
}
-#endif /* !CONFIG_EROFS_FS_ZIP */
+static inline void erofs_fscache_unregister_fs(struct super_block *sb) {}
+
+static inline
+struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
+ char *name, unsigned int flags)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache)
+{
+}
+static inline struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev) { return NULL; }
+static inline void erofs_fscache_submit_bio(struct bio *bio) {}
+#endif
+
+long erofs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+long erofs_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c
index 8629e616028c..f7cf4f41af28 100644
--- a/fs/erofs/namei.c
+++ b/fs/erofs/namei.c
@@ -2,9 +2,9 @@
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
+ * Copyright (C) 2022, Alibaba Cloud
*/
#include "xattr.h"
-
#include <trace/events/erofs.h>
struct erofs_qstr {
@@ -86,40 +86,35 @@ static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
return ERR_PTR(-ENOENT);
}
-static struct page *find_target_block_classic(struct inode *dir,
- struct erofs_qstr *name,
- int *_ndirents)
+static void *erofs_find_target_block(struct erofs_buf *target,
+ struct inode *dir, struct erofs_qstr *name, int *_ndirents)
{
- unsigned int startprfx, endprfx;
- int head, back;
- struct address_space *const mapping = dir->i_mapping;
- struct page *candidate = ERR_PTR(-ENOENT);
-
- startprfx = endprfx = 0;
- head = 0;
- back = erofs_inode_datablocks(dir) - 1;
+ unsigned int bsz = i_blocksize(dir);
+ int head = 0, back = erofs_iblks(dir) - 1;
+ unsigned int startprfx = 0, endprfx = 0;
+ void *candidate = ERR_PTR(-ENOENT);
while (head <= back) {
const int mid = head + (back - head) / 2;
- struct page *page = read_mapping_page(mapping, mid, NULL);
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+ struct erofs_dirent *de;
- if (!IS_ERR(page)) {
- struct erofs_dirent *de = kmap_atomic(page);
- const int nameoff = nameoff_from_disk(de->nameoff,
- EROFS_BLKSIZ);
+ buf.mapping = dir->i_mapping;
+ de = erofs_bread(&buf, erofs_pos(dir->i_sb, mid), true);
+ if (!IS_ERR(de)) {
+ const int nameoff = nameoff_from_disk(de->nameoff, bsz);
const int ndirents = nameoff / sizeof(*de);
int diff;
unsigned int matched;
struct erofs_qstr dname;
if (!ndirents) {
- kunmap_atomic(de);
- put_page(page);
+ erofs_put_metabuf(&buf);
erofs_err(dir->i_sb,
"corrupted dir block %d @ nid %llu",
mid, EROFS_I(dir)->nid);
DBG_BUGON(1);
- page = ERR_PTR(-EFSCORRUPTED);
+ de = ERR_PTR(-EFSCORRUPTED);
goto out;
}
@@ -127,50 +122,47 @@ static struct page *find_target_block_classic(struct inode *dir,
dname.name = (u8 *)de + nameoff;
if (ndirents == 1)
- dname.end = (u8 *)de + EROFS_BLKSIZ;
+ dname.end = (u8 *)de + bsz;
else
dname.end = (u8 *)de +
- nameoff_from_disk(de[1].nameoff,
- EROFS_BLKSIZ);
+ nameoff_from_disk(de[1].nameoff, bsz);
/* string comparison without already matched prefix */
diff = erofs_dirnamecmp(name, &dname, &matched);
- kunmap_atomic(de);
-
- if (!diff) {
- *_ndirents = 0;
- goto out;
- } else if (diff > 0) {
- head = mid + 1;
- startprfx = matched;
-
- if (!IS_ERR(candidate))
- put_page(candidate);
- candidate = page;
- *_ndirents = ndirents;
- } else {
- put_page(page);
+ if (diff < 0) {
+ erofs_put_metabuf(&buf);
back = mid - 1;
endprfx = matched;
+ continue;
}
+
+ if (!IS_ERR(candidate))
+ erofs_put_metabuf(target);
+ *target = buf;
+ if (!diff) {
+ *_ndirents = 0;
+ return de;
+ }
+ head = mid + 1;
+ startprfx = matched;
+ candidate = de;
+ *_ndirents = ndirents;
continue;
}
out: /* free if the candidate is valid */
if (!IS_ERR(candidate))
- put_page(candidate);
- return page;
+ erofs_put_metabuf(target);
+ return de;
}
return candidate;
}
-int erofs_namei(struct inode *dir,
- struct qstr *name,
- erofs_nid_t *nid, unsigned int *d_type)
+int erofs_namei(struct inode *dir, const struct qstr *name, erofs_nid_t *nid,
+ unsigned int *d_type)
{
int ndirents;
- struct page *page;
- void *data;
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct erofs_dirent *de;
struct erofs_qstr qn;
@@ -179,34 +171,26 @@ int erofs_namei(struct inode *dir,
qn.name = name->name;
qn.end = name->name + name->len;
+ buf.mapping = dir->i_mapping;
ndirents = 0;
- page = find_target_block_classic(dir, &qn, &ndirents);
-
- if (IS_ERR(page))
- return PTR_ERR(page);
+ de = erofs_find_target_block(&buf, dir, &qn, &ndirents);
+ if (IS_ERR(de))
+ return PTR_ERR(de);
- data = kmap_atomic(page);
- /* the target page has been mapped */
if (ndirents)
- de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
- else
- de = (struct erofs_dirent *)data;
+ de = find_target_dirent(&qn, (u8 *)de, i_blocksize(dir),
+ ndirents);
if (!IS_ERR(de)) {
*nid = le64_to_cpu(de->nid);
*d_type = de->file_type;
}
-
- kunmap_atomic(data);
- put_page(page);
-
+ erofs_put_metabuf(&buf);
return PTR_ERR_OR_ZERO(de);
}
-/* NOTE: i_mutex is already held by vfs */
-static struct dentry *erofs_lookup(struct inode *dir,
- struct dentry *dentry,
+static struct dentry *erofs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
int err;
@@ -214,29 +198,20 @@ static struct dentry *erofs_lookup(struct inode *dir,
unsigned int d_type;
struct inode *inode;
- DBG_BUGON(!d_really_is_negative(dentry));
- /* dentry must be unhashed in lookup, no need to worry about */
- DBG_BUGON(!d_unhashed(dentry));
-
trace_erofs_lookup(dir, dentry, flags);
- /* file name exceeds fs limit */
if (dentry->d_name.len > EROFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
- /* false uninitialized warnings on gcc 4.8.x */
err = erofs_namei(dir, &dentry->d_name, &nid, &d_type);
- if (err == -ENOENT) {
+ if (err == -ENOENT)
/* negative dentry */
inode = NULL;
- } else if (err) {
+ else if (err)
inode = ERR_PTR(err);
- } else {
- erofs_dbg("%s, %pd (nid %llu) found, d_type %u", __func__,
- dentry, nid, d_type);
- inode = erofs_iget(dir->i_sb, nid, d_type == FT_DIR);
- }
+ else
+ inode = erofs_iget(dir->i_sb, nid);
return d_splice_alias(inode, dentry);
}
@@ -244,6 +219,6 @@ const struct inode_operations erofs_dir_iops = {
.lookup = erofs_lookup,
.getattr = erofs_getattr,
.listxattr = erofs_listxattr,
- .get_acl = erofs_get_acl,
+ .get_inode_acl = erofs_get_acl,
.fiemap = erofs_fiemap,
};
diff --git a/fs/erofs/pcpubuf.c b/fs/erofs/pcpubuf.c
deleted file mode 100644
index a2efd833d1b6..000000000000
--- a/fs/erofs/pcpubuf.c
+++ /dev/null
@@ -1,148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) Gao Xiang <xiang@kernel.org>
- *
- * For low-latency decompression algorithms (e.g. lz4), reserve consecutive
- * per-CPU virtual memory (in pages) in advance to store such inplace I/O
- * data if inplace decompression is failed (due to unmet inplace margin for
- * example).
- */
-#include "internal.h"
-
-struct erofs_pcpubuf {
- raw_spinlock_t lock;
- void *ptr;
- struct page **pages;
- unsigned int nrpages;
-};
-
-static DEFINE_PER_CPU(struct erofs_pcpubuf, erofs_pcb);
-
-void *erofs_get_pcpubuf(unsigned int requiredpages)
- __acquires(pcb->lock)
-{
- struct erofs_pcpubuf *pcb = &get_cpu_var(erofs_pcb);
-
- raw_spin_lock(&pcb->lock);
- /* check if the per-CPU buffer is too small */
- if (requiredpages > pcb->nrpages) {
- raw_spin_unlock(&pcb->lock);
- put_cpu_var(erofs_pcb);
- /* (for sparse checker) pretend pcb->lock is still taken */
- __acquire(pcb->lock);
- return NULL;
- }
- return pcb->ptr;
-}
-
-void erofs_put_pcpubuf(void *ptr) __releases(pcb->lock)
-{
- struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, smp_processor_id());
-
- DBG_BUGON(pcb->ptr != ptr);
- raw_spin_unlock(&pcb->lock);
- put_cpu_var(erofs_pcb);
-}
-
-/* the next step: support per-CPU page buffers hotplug */
-int erofs_pcpubuf_growsize(unsigned int nrpages)
-{
- static DEFINE_MUTEX(pcb_resize_mutex);
- static unsigned int pcb_nrpages;
- struct page *pagepool = NULL;
- int delta, cpu, ret, i;
-
- mutex_lock(&pcb_resize_mutex);
- delta = nrpages - pcb_nrpages;
- ret = 0;
- /* avoid shrinking pcpubuf, since no idea how many fses rely on */
- if (delta <= 0)
- goto out;
-
- for_each_possible_cpu(cpu) {
- struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
- struct page **pages, **oldpages;
- void *ptr, *old_ptr;
-
- pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- ret = -ENOMEM;
- break;
- }
-
- for (i = 0; i < nrpages; ++i) {
- pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL);
- if (!pages[i]) {
- ret = -ENOMEM;
- oldpages = pages;
- goto free_pagearray;
- }
- }
- ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL);
- if (!ptr) {
- ret = -ENOMEM;
- oldpages = pages;
- goto free_pagearray;
- }
- raw_spin_lock(&pcb->lock);
- old_ptr = pcb->ptr;
- pcb->ptr = ptr;
- oldpages = pcb->pages;
- pcb->pages = pages;
- i = pcb->nrpages;
- pcb->nrpages = nrpages;
- raw_spin_unlock(&pcb->lock);
-
- if (!oldpages) {
- DBG_BUGON(old_ptr);
- continue;
- }
-
- if (old_ptr)
- vunmap(old_ptr);
-free_pagearray:
- while (i)
- erofs_pagepool_add(&pagepool, oldpages[--i]);
- kfree(oldpages);
- if (ret)
- break;
- }
- pcb_nrpages = nrpages;
- erofs_release_pages(&pagepool);
-out:
- mutex_unlock(&pcb_resize_mutex);
- return ret;
-}
-
-void erofs_pcpubuf_init(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
-
- raw_spin_lock_init(&pcb->lock);
- }
-}
-
-void erofs_pcpubuf_exit(void)
-{
- int cpu, i;
-
- for_each_possible_cpu(cpu) {
- struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
-
- if (pcb->ptr) {
- vunmap(pcb->ptr);
- pcb->ptr = NULL;
- }
- if (!pcb->pages)
- continue;
-
- for (i = 0; i < pcb->nrpages; ++i)
- if (pcb->pages[i])
- put_page(pcb->pages[i]);
- kfree(pcb->pages);
- pcb->pages = NULL;
- }
-}
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 6a969b1e0ee6..937a215f626c 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -2,16 +2,15 @@
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
+ * Copyright (C) 2021, Alibaba Cloud
*/
-#include <linux/module.h>
-#include <linux/buffer_head.h>
#include <linux/statfs.h>
-#include <linux/parser.h>
#include <linux/seq_file.h>
#include <linux/crc32c.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
-#include <linux/dax.h>
+#include <linux/exportfs.h>
+#include <linux/backing-dev.h>
#include "xattr.h"
#define CREATE_TRACE_POINTS
@@ -19,58 +18,42 @@
static struct kmem_cache *erofs_inode_cachep __read_mostly;
-void _erofs_err(struct super_block *sb, const char *function,
- const char *fmt, ...)
+void _erofs_printk(struct super_block *sb, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
+ int level;
va_start(args, fmt);
- vaf.fmt = fmt;
+ level = printk_get_level(fmt);
+ vaf.fmt = printk_skip_level(fmt);
vaf.va = &args;
-
- pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
- va_end(args);
-}
-
-void _erofs_info(struct super_block *sb, const char *function,
- const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- pr_info("(device %s): %pV", sb->s_id, &vaf);
+ if (sb)
+ printk("%c%cerofs (device %s): %pV",
+ KERN_SOH_ASCII, level, sb->s_id, &vaf);
+ else
+ printk("%c%cerofs: %pV", KERN_SOH_ASCII, level, &vaf);
va_end(args);
}
static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
{
- struct erofs_super_block *dsb;
- u32 expected_crc, crc;
-
- dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
- EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
- if (!dsb)
- return -ENOMEM;
+ struct erofs_super_block *dsb = sbdata + EROFS_SUPER_OFFSET;
+ u32 len = 1 << EROFS_SB(sb)->blkszbits, crc;
- expected_crc = le32_to_cpu(dsb->checksum);
- dsb->checksum = 0;
- /* to allow for x86 boot sectors and other oddities. */
- crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
- kfree(dsb);
+ if (len > EROFS_SUPER_OFFSET)
+ len -= EROFS_SUPER_OFFSET;
+ len -= offsetof(struct erofs_super_block, checksum) +
+ sizeof(dsb->checksum);
- if (crc != expected_crc) {
- erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
- crc, expected_crc);
- return -EBADMSG;
- }
- return 0;
+ /* skip .magic(pre-verified) and .checksum(0) fields */
+ crc = crc32c(0x5045B54A, (&dsb->checksum) + 1, len);
+ if (crc == le32_to_cpu(dsb->checksum))
+ return 0;
+ erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
+ crc, le32_to_cpu(dsb->checksum));
+ return -EBADMSG;
}
static void erofs_inode_init_once(void *ptr)
@@ -83,7 +66,7 @@ static void erofs_inode_init_once(void *ptr)
static struct inode *erofs_alloc_inode(struct super_block *sb)
{
struct erofs_inode *vi =
- kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
+ alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
if (!vi)
return NULL;
@@ -97,263 +80,217 @@ static void erofs_free_inode(struct inode *inode)
{
struct erofs_inode *vi = EROFS_I(inode);
- /* be careful of RCU symlink path */
if (inode->i_op == &erofs_fast_symlink_iops)
kfree(inode->i_link);
kfree(vi->xattr_shared_xattrs);
-
kmem_cache_free(erofs_inode_cachep, vi);
}
-static bool check_layout_compatibility(struct super_block *sb,
- struct erofs_super_block *dsb)
-{
- const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
-
- EROFS_SB(sb)->feature_incompat = feature;
-
- /* check if current kernel meets all mandatory requirements */
- if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
- erofs_err(sb,
- "unidentified incompatible feature %x, please upgrade kernel version",
- feature & ~EROFS_ALL_FEATURE_INCOMPAT);
- return false;
- }
- return true;
-}
-
-#ifdef CONFIG_EROFS_FS_ZIP
/* read variable-sized metadata, offset will be aligned by 4-byte */
-static void *erofs_read_metadata(struct super_block *sb, struct page **pagep,
- erofs_off_t *offset, int *lengthp)
+void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
+ erofs_off_t *offset, int *lengthp)
{
- struct page *page = *pagep;
u8 *buffer, *ptr;
int len, i, cnt;
- erofs_blk_t blk;
*offset = round_up(*offset, 4);
- blk = erofs_blknr(*offset);
+ ptr = erofs_bread(buf, *offset, true);
+ if (IS_ERR(ptr))
+ return ptr;
- if (!page || page->index != blk) {
- if (page) {
- unlock_page(page);
- put_page(page);
- }
- page = erofs_get_meta_page(sb, blk);
- if (IS_ERR(page))
- goto err_nullpage;
- }
-
- ptr = kmap(page);
- len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]);
+ len = le16_to_cpu(*(__le16 *)ptr);
if (!len)
len = U16_MAX + 1;
buffer = kmalloc(len, GFP_KERNEL);
- if (!buffer) {
- buffer = ERR_PTR(-ENOMEM);
- goto out;
- }
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
*offset += sizeof(__le16);
*lengthp = len;
for (i = 0; i < len; i += cnt) {
- cnt = min(EROFS_BLKSIZ - (int)erofs_blkoff(*offset), len - i);
- blk = erofs_blknr(*offset);
-
- if (!page || page->index != blk) {
- if (page) {
- kunmap(page);
- unlock_page(page);
- put_page(page);
- }
- page = erofs_get_meta_page(sb, blk);
- if (IS_ERR(page)) {
- kfree(buffer);
- goto err_nullpage;
- }
- ptr = kmap(page);
+ cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
+ len - i);
+ ptr = erofs_bread(buf, *offset, true);
+ if (IS_ERR(ptr)) {
+ kfree(buffer);
+ return ptr;
}
- memcpy(buffer + i, ptr + erofs_blkoff(*offset), cnt);
+ memcpy(buffer + i, ptr, cnt);
*offset += cnt;
}
-out:
- kunmap(page);
- *pagep = page;
return buffer;
-err_nullpage:
- *pagep = NULL;
- return page;
}
-static int erofs_load_compr_cfgs(struct super_block *sb,
- struct erofs_super_block *dsb)
+#ifndef CONFIG_EROFS_FS_ZIP
+static int z_erofs_parse_cfgs(struct super_block *sb,
+ struct erofs_super_block *dsb)
{
- struct erofs_sb_info *sbi;
- struct page *page;
- unsigned int algs, alg;
- erofs_off_t offset;
- int size, ret;
-
- sbi = EROFS_SB(sb);
- sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
+ if (!dsb->u1.available_compr_algs)
+ return 0;
- if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
- erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
- sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
- return -EINVAL;
- }
+ erofs_err(sb, "compression disabled, unable to mount compressed EROFS");
+ return -EOPNOTSUPP;
+}
+#endif
- offset = EROFS_SUPER_OFFSET + sbi->sb_size;
- page = NULL;
- alg = 0;
- ret = 0;
+static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
+ struct erofs_device_info *dif, erofs_off_t *pos)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_fscache *fscache;
+ struct erofs_deviceslot *dis;
+ struct file *file;
- for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
- void *data;
+ dis = erofs_read_metabuf(buf, sb, *pos, false);
+ if (IS_ERR(dis))
+ return PTR_ERR(dis);
- if (!(algs & 1))
- continue;
+ if (!sbi->devs->flatdev && !dif->path) {
+ if (!dis->tag[0]) {
+ erofs_err(sb, "empty device tag @ pos %llu", *pos);
+ return -EINVAL;
+ }
+ dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL);
+ if (!dif->path)
+ return -ENOMEM;
+ }
- data = erofs_read_metadata(sb, &page, &offset, &size);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- goto err;
+ if (erofs_is_fscache_mode(sb)) {
+ fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
+ if (IS_ERR(fscache))
+ return PTR_ERR(fscache);
+ dif->fscache = fscache;
+ } else if (!sbi->devs->flatdev) {
+ file = erofs_is_fileio_mode(sbi) ?
+ filp_open(dif->path, O_RDONLY | O_LARGEFILE, 0) :
+ bdev_file_open_by_path(dif->path,
+ BLK_OPEN_READ, sb->s_type, NULL);
+ if (IS_ERR(file)) {
+ if (file == ERR_PTR(-ENOTBLK))
+ return -EINVAL;
+ return PTR_ERR(file);
}
- switch (alg) {
- case Z_EROFS_COMPRESSION_LZ4:
- ret = z_erofs_load_lz4_config(sb, dsb, data, size);
- break;
- case Z_EROFS_COMPRESSION_LZMA:
- ret = z_erofs_load_lzma_config(sb, dsb, data, size);
- break;
- default:
- DBG_BUGON(1);
- ret = -EFAULT;
+ if (!erofs_is_fileio_mode(sbi)) {
+ dif->dax_dev = fs_dax_get_by_bdev(file_bdev(file),
+ &dif->dax_part_off, NULL, NULL);
+ } else if (!S_ISREG(file_inode(file)->i_mode)) {
+ fput(file);
+ return -EINVAL;
}
- kfree(data);
- if (ret)
- goto err;
- }
-err:
- if (page) {
- unlock_page(page);
- put_page(page);
- }
- return ret;
-}
-#else
-static int erofs_load_compr_cfgs(struct super_block *sb,
- struct erofs_super_block *dsb)
-{
- if (dsb->u1.available_compr_algs) {
- erofs_err(sb, "try to load compressed fs when compression is disabled");
- return -EINVAL;
+ if (!dif->dax_dev && test_opt(&sbi->opt, DAX_ALWAYS)) {
+ erofs_info(sb, "DAX unsupported by %s. Turning off DAX.",
+ dif->path);
+ clear_opt(&sbi->opt, DAX_ALWAYS);
+ }
+ dif->file = file;
}
+
+ dif->blocks = le32_to_cpu(dis->blocks_lo);
+ dif->uniaddr = le32_to_cpu(dis->uniaddr_lo);
+ sbi->total_blocks += dif->blocks;
+ *pos += EROFS_DEVT_SLOT_SIZE;
return 0;
}
-#endif
-static int erofs_init_devices(struct super_block *sb,
+static int erofs_scan_devices(struct super_block *sb,
struct erofs_super_block *dsb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
unsigned int ondisk_extradevs;
erofs_off_t pos;
- struct page *page = NULL;
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct erofs_device_info *dif;
- struct erofs_deviceslot *dis;
- void *ptr;
int id, err = 0;
- sbi->total_blocks = sbi->primarydevice_blocks;
+ sbi->total_blocks = sbi->dif0.blocks;
if (!erofs_sb_has_device_table(sbi))
ondisk_extradevs = 0;
else
ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
- if (ondisk_extradevs != sbi->devs->extra_devices) {
+ if (sbi->devs->extra_devices &&
+ ondisk_extradevs != sbi->devs->extra_devices) {
erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
ondisk_extradevs, sbi->devs->extra_devices);
return -EINVAL;
}
+
+ if (test_opt(&sbi->opt, DAX_ALWAYS) && !sbi->dif0.dax_dev) {
+ erofs_info(sb, "DAX unsupported by block device. Turning off DAX.");
+ clear_opt(&sbi->opt, DAX_ALWAYS);
+ }
if (!ondisk_extradevs)
return 0;
+ if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
+ sbi->devs->flatdev = true;
+
sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
down_read(&sbi->devs->rwsem);
- idr_for_each_entry(&sbi->devs->tree, dif, id) {
- erofs_blk_t blk = erofs_blknr(pos);
- struct block_device *bdev;
-
- if (!page || page->index != blk) {
- if (page) {
- kunmap(page);
- unlock_page(page);
- put_page(page);
+ if (sbi->devs->extra_devices) {
+ idr_for_each_entry(&sbi->devs->tree, dif, id) {
+ err = erofs_init_device(&buf, sb, dif, &pos);
+ if (err)
+ break;
+ }
+ } else {
+ for (id = 0; id < ondisk_extradevs; id++) {
+ dif = kzalloc(sizeof(*dif), GFP_KERNEL);
+ if (!dif) {
+ err = -ENOMEM;
+ break;
}
- page = erofs_get_meta_page(sb, blk);
- if (IS_ERR(page)) {
- up_read(&sbi->devs->rwsem);
- return PTR_ERR(page);
+ err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
+ if (err < 0) {
+ kfree(dif);
+ break;
}
- ptr = kmap(page);
- }
- dis = ptr + erofs_blkoff(pos);
-
- bdev = blkdev_get_by_path(dif->path,
- FMODE_READ | FMODE_EXCL,
- sb->s_type);
- if (IS_ERR(bdev)) {
- err = PTR_ERR(bdev);
- goto err_out;
+ ++sbi->devs->extra_devices;
+
+ err = erofs_init_device(&buf, sb, dif, &pos);
+ if (err)
+ break;
}
- dif->bdev = bdev;
- dif->dax_dev = fs_dax_get_by_bdev(bdev);
- dif->blocks = le32_to_cpu(dis->blocks);
- dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
- sbi->total_blocks += dif->blocks;
- pos += EROFS_DEVT_SLOT_SIZE;
}
-err_out:
up_read(&sbi->devs->rwsem);
- if (page) {
- kunmap(page);
- unlock_page(page);
- put_page(page);
- }
+ erofs_put_metabuf(&buf);
return err;
}
static int erofs_read_superblock(struct super_block *sb)
{
- struct erofs_sb_info *sbi;
- struct page *page;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct erofs_super_block *dsb;
- unsigned int blkszbits;
void *data;
int ret;
- page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL);
- if (IS_ERR(page)) {
+ data = erofs_read_metabuf(&buf, sb, 0, false);
+ if (IS_ERR(data)) {
erofs_err(sb, "cannot read erofs superblock");
- return PTR_ERR(page);
+ return PTR_ERR(data);
}
- sbi = EROFS_SB(sb);
-
- data = kmap(page);
dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
-
ret = -EINVAL;
if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
erofs_err(sb, "cannot find valid erofs superblock");
goto out;
}
+ sbi->blkszbits = dsb->blkszbits;
+ if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
+ erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
+ goto out;
+ }
+ if (dsb->dirblkbits) {
+ erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
+ goto out;
+ }
+
sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
if (erofs_sb_has_sb_chksum(sbi)) {
ret = erofs_superblock_csum_verify(sb, data);
@@ -362,85 +299,93 @@ static int erofs_read_superblock(struct super_block *sb)
}
ret = -EINVAL;
- blkszbits = dsb->blkszbits;
- /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
- if (blkszbits != LOG_BLOCK_SIZE) {
- erofs_err(sb, "blkszbits %u isn't supported on this platform",
- blkszbits);
+ sbi->feature_incompat = le32_to_cpu(dsb->feature_incompat);
+ if (sbi->feature_incompat & ~EROFS_ALL_FEATURE_INCOMPAT) {
+ erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel",
+ sbi->feature_incompat & ~EROFS_ALL_FEATURE_INCOMPAT);
goto out;
}
- if (!check_layout_compatibility(sb, dsb))
- goto out;
-
sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
- if (sbi->sb_size > EROFS_BLKSIZ) {
+ if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
sbi->sb_size);
goto out;
}
- sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
+ sbi->dif0.blocks = le32_to_cpu(dsb->blocks_lo);
sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
#ifdef CONFIG_EROFS_FS_XATTR
sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
+ sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
+ sbi->xattr_prefix_count = dsb->xattr_prefix_count;
+ sbi->xattr_filter_reserved = dsb->xattr_filter_reserved;
#endif
sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
- sbi->root_nid = le16_to_cpu(dsb->root_nid);
+ if (erofs_sb_has_48bit(sbi) && dsb->rootnid_8b) {
+ sbi->root_nid = le64_to_cpu(dsb->rootnid_8b);
+ sbi->dif0.blocks = sbi->dif0.blocks |
+ ((u64)le16_to_cpu(dsb->rb.blocks_hi) << 32);
+ } else {
+ sbi->root_nid = le16_to_cpu(dsb->rb.rootnid_2b);
+ }
+ sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
+ if (erofs_sb_has_metabox(sbi)) {
+ if (sbi->sb_size <= offsetof(struct erofs_super_block,
+ metabox_nid))
+ return -EFSCORRUPTED;
+ sbi->metabox_nid = le64_to_cpu(dsb->metabox_nid);
+ if (sbi->metabox_nid & BIT_ULL(EROFS_DIRENT_NID_METABOX_BIT))
+ return -EFSCORRUPTED; /* self-loop detection */
+ }
sbi->inos = le64_to_cpu(dsb->inos);
- sbi->build_time = le64_to_cpu(dsb->build_time);
- sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
+ sbi->epoch = (s64)le64_to_cpu(dsb->epoch);
+ sbi->fixed_nsec = le32_to_cpu(dsb->fixed_nsec);
+ super_set_uuid(sb, (void *)dsb->uuid, sizeof(dsb->uuid));
- memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
-
- ret = strscpy(sbi->volume_name, dsb->volume_name,
- sizeof(dsb->volume_name));
- if (ret < 0) { /* -E2BIG */
- erofs_err(sb, "bad volume name without NIL terminator");
- ret = -EFSCORRUPTED;
- goto out;
+ if (dsb->volume_name[0]) {
+ sbi->volume_name = kstrndup(dsb->volume_name,
+ sizeof(dsb->volume_name), GFP_KERNEL);
+ if (!sbi->volume_name)
+ return -ENOMEM;
}
/* parse on-disk compression configurations */
- if (erofs_sb_has_compr_cfgs(sbi))
- ret = erofs_load_compr_cfgs(sb, dsb);
- else
- ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
+ ret = z_erofs_parse_cfgs(sb, dsb);
if (ret < 0)
goto out;
- /* handle multiple devices */
- ret = erofs_init_devices(sb, dsb);
+ ret = erofs_scan_devices(sb, dsb);
+
+ if (erofs_sb_has_48bit(sbi))
+ erofs_info(sb, "EXPERIMENTAL 48-bit layout support in use. Use at your own risk!");
+ if (erofs_sb_has_metabox(sbi))
+ erofs_info(sb, "EXPERIMENTAL metadata compression support in use. Use at your own risk!");
+ if (erofs_is_fscache_mode(sb))
+ erofs_info(sb, "[deprecated] fscache-based on-demand read feature in use. Use at your own risk!");
out:
- kunmap(page);
- put_page(page);
+ erofs_put_metabuf(&buf);
return ret;
}
-/* set up default EROFS parameters */
-static void erofs_default_options(struct erofs_fs_context *ctx)
+static void erofs_default_options(struct erofs_sb_info *sbi)
{
#ifdef CONFIG_EROFS_FS_ZIP
- ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
- ctx->opt.max_sync_decompress_pages = 3;
- ctx->opt.readahead_sync_decompress = false;
+ sbi->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
+ sbi->opt.max_sync_decompress_pages = 3;
+ sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
#endif
#ifdef CONFIG_EROFS_FS_XATTR
- set_opt(&ctx->opt, XATTR_USER);
+ set_opt(&sbi->opt, XATTR_USER);
#endif
#ifdef CONFIG_EROFS_FS_POSIX_ACL
- set_opt(&ctx->opt, POSIX_ACL);
+ set_opt(&sbi->opt, POSIX_ACL);
#endif
}
enum {
- Opt_user_xattr,
- Opt_acl,
- Opt_cache_strategy,
- Opt_dax,
- Opt_dax_enum,
- Opt_device,
- Opt_err
+ Opt_user_xattr, Opt_acl, Opt_cache_strategy, Opt_dax, Opt_dax_enum,
+ Opt_device, Opt_fsid, Opt_domain_id, Opt_directio, Opt_fsoffset,
};
static const struct constant_table erofs_param_cache_strategy[] = {
@@ -464,23 +409,26 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = {
fsparam_flag("dax", Opt_dax),
fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums),
fsparam_string("device", Opt_device),
+ fsparam_string("fsid", Opt_fsid),
+ fsparam_string("domain_id", Opt_domain_id),
+ fsparam_flag_no("directio", Opt_directio),
+ fsparam_u64("fsoffset", Opt_fsoffset),
{}
};
static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
{
#ifdef CONFIG_FS_DAX
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
switch (mode) {
case EROFS_MOUNT_DAX_ALWAYS:
- warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
- set_opt(&ctx->opt, DAX_ALWAYS);
- clear_opt(&ctx->opt, DAX_NEVER);
+ set_opt(&sbi->opt, DAX_ALWAYS);
+ clear_opt(&sbi->opt, DAX_NEVER);
return true;
case EROFS_MOUNT_DAX_NEVER:
- set_opt(&ctx->opt, DAX_NEVER);
- clear_opt(&ctx->opt, DAX_ALWAYS);
+ set_opt(&sbi->opt, DAX_NEVER);
+ clear_opt(&sbi->opt, DAX_ALWAYS);
return true;
default:
DBG_BUGON(1);
@@ -495,7 +443,7 @@ static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
static int erofs_fc_parse_param(struct fs_context *fc,
struct fs_parameter *param)
{
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
struct fs_parse_result result;
struct erofs_device_info *dif;
int opt, ret;
@@ -508,9 +456,9 @@ static int erofs_fc_parse_param(struct fs_context *fc,
case Opt_user_xattr:
#ifdef CONFIG_EROFS_FS_XATTR
if (result.boolean)
- set_opt(&ctx->opt, XATTR_USER);
+ set_opt(&sbi->opt, XATTR_USER);
else
- clear_opt(&ctx->opt, XATTR_USER);
+ clear_opt(&sbi->opt, XATTR_USER);
#else
errorfc(fc, "{,no}user_xattr options not supported");
#endif
@@ -518,16 +466,16 @@ static int erofs_fc_parse_param(struct fs_context *fc,
case Opt_acl:
#ifdef CONFIG_EROFS_FS_POSIX_ACL
if (result.boolean)
- set_opt(&ctx->opt, POSIX_ACL);
+ set_opt(&sbi->opt, POSIX_ACL);
else
- clear_opt(&ctx->opt, POSIX_ACL);
+ clear_opt(&sbi->opt, POSIX_ACL);
#else
errorfc(fc, "{,no}acl options not supported");
#endif
break;
case Opt_cache_strategy:
#ifdef CONFIG_EROFS_FS_ZIP
- ctx->opt.cache_strategy = result.uint_32;
+ sbi->opt.cache_strategy = result.uint_32;
#else
errorfc(fc, "compression not supported, cache_strategy ignored");
#endif
@@ -549,175 +497,319 @@ static int erofs_fc_parse_param(struct fs_context *fc,
kfree(dif);
return -ENOMEM;
}
- down_write(&ctx->devs->rwsem);
- ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
- up_write(&ctx->devs->rwsem);
+ down_write(&sbi->devs->rwsem);
+ ret = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
+ up_write(&sbi->devs->rwsem);
if (ret < 0) {
kfree(dif->path);
kfree(dif);
return ret;
}
- ++ctx->devs->extra_devices;
+ ++sbi->devs->extra_devices;
+ break;
+#ifdef CONFIG_EROFS_FS_ONDEMAND
+ case Opt_fsid:
+ kfree(sbi->fsid);
+ sbi->fsid = kstrdup(param->string, GFP_KERNEL);
+ if (!sbi->fsid)
+ return -ENOMEM;
+ break;
+ case Opt_domain_id:
+ kfree(sbi->domain_id);
+ sbi->domain_id = kstrdup(param->string, GFP_KERNEL);
+ if (!sbi->domain_id)
+ return -ENOMEM;
+ break;
+#else
+ case Opt_fsid:
+ case Opt_domain_id:
+ errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
+ break;
+#endif
+ case Opt_directio:
+#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+ if (result.boolean)
+ set_opt(&sbi->opt, DIRECT_IO);
+ else
+ clear_opt(&sbi->opt, DIRECT_IO);
+#else
+ errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
+#endif
+ break;
+ case Opt_fsoffset:
+ sbi->dif0.fsoff = result.uint_64;
break;
- default:
- return -ENOPARAM;
}
return 0;
}
-#ifdef CONFIG_EROFS_FS_ZIP
-static const struct address_space_operations managed_cache_aops;
-
-static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
+static int erofs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+ struct inode *parent)
{
- int ret = 1; /* 0 - busy */
- struct address_space *const mapping = page->mapping;
+ erofs_nid_t nid = EROFS_I(inode)->nid;
+ int len = parent ? 6 : 3;
- DBG_BUGON(!PageLocked(page));
- DBG_BUGON(mapping->a_ops != &managed_cache_aops);
+ if (*max_len < len) {
+ *max_len = len;
+ return FILEID_INVALID;
+ }
- if (PagePrivate(page))
- ret = erofs_try_to_free_cached_page(page);
+ fh[0] = (u32)(nid >> 32);
+ fh[1] = (u32)(nid & 0xffffffff);
+ fh[2] = inode->i_generation;
- return ret;
+ if (parent) {
+ nid = EROFS_I(parent)->nid;
+
+ fh[3] = (u32)(nid >> 32);
+ fh[4] = (u32)(nid & 0xffffffff);
+ fh[5] = parent->i_generation;
+ }
+
+ *max_len = len;
+ return parent ? FILEID_INO64_GEN_PARENT : FILEID_INO64_GEN;
}
-static void erofs_managed_cache_invalidatepage(struct page *page,
- unsigned int offset,
- unsigned int length)
+static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len, int fh_type)
{
- const unsigned int stop = length + offset;
+ if ((fh_type != FILEID_INO64_GEN &&
+ fh_type != FILEID_INO64_GEN_PARENT) || fh_len < 3)
+ return NULL;
- DBG_BUGON(!PageLocked(page));
+ return d_obtain_alias(erofs_iget(sb,
+ ((u64)fid->raw[0] << 32) | fid->raw[1]));
+}
- /* Check for potential overflow in debug mode */
- DBG_BUGON(stop > PAGE_SIZE || stop < length);
+static struct dentry *erofs_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len, int fh_type)
+{
+ if (fh_type != FILEID_INO64_GEN_PARENT || fh_len < 6)
+ return NULL;
- if (offset == 0 && stop == PAGE_SIZE)
- while (!erofs_managed_cache_releasepage(page, GFP_NOFS))
- cond_resched();
+ return d_obtain_alias(erofs_iget(sb,
+ ((u64)fid->raw[3] << 32) | fid->raw[4]));
}
-static const struct address_space_operations managed_cache_aops = {
- .releasepage = erofs_managed_cache_releasepage,
- .invalidatepage = erofs_managed_cache_invalidatepage,
-};
-
-static int erofs_init_managed_cache(struct super_block *sb)
+static struct dentry *erofs_get_parent(struct dentry *child)
{
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- struct inode *const inode = new_inode(sb);
+ erofs_nid_t nid;
+ unsigned int d_type;
+ int err;
- if (!inode)
- return -ENOMEM;
+ err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type);
+ if (err)
+ return ERR_PTR(err);
+ return d_obtain_alias(erofs_iget(child->d_sb, nid));
+}
- set_nlink(inode, 1);
- inode->i_size = OFFSET_MAX;
+static const struct export_operations erofs_export_ops = {
+ .encode_fh = erofs_encode_fh,
+ .fh_to_dentry = erofs_fh_to_dentry,
+ .fh_to_parent = erofs_fh_to_parent,
+ .get_parent = erofs_get_parent,
+};
- inode->i_mapping->a_ops = &managed_cache_aops;
- mapping_set_gfp_mask(inode->i_mapping,
- GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
- sbi->managed_cache = inode;
- return 0;
+static void erofs_set_sysfs_name(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ if (sbi->domain_id)
+ super_set_sysfs_name_generic(sb, "%s,%s", sbi->domain_id,
+ sbi->fsid);
+ else if (sbi->fsid)
+ super_set_sysfs_name_generic(sb, "%s", sbi->fsid);
+ else if (erofs_is_fileio_mode(sbi))
+ super_set_sysfs_name_generic(sb, "%s",
+ bdi_dev_name(sb->s_bdi));
+ else
+ super_set_sysfs_name_id(sb);
}
-#else
-static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
-#endif
static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
- struct erofs_sb_info *sbi;
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
int err;
sb->s_magic = EROFS_SUPER_MAGIC;
+ sb->s_flags |= SB_RDONLY | SB_NOATIME;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_op = &erofs_sops;
- if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) {
- erofs_err(sb, "failed to set erofs blksize");
- return -EINVAL;
- }
+ sbi->blkszbits = PAGE_SHIFT;
+ if (!sb->s_bdev) {
+ /*
+ * (File-backed mounts) EROFS claims it's safe to nest other
+ * fs contexts (including its own) due to self-controlled RO
+ * accesses/contexts and no side-effect changes that need to
+ * context save & restore so it can reuse the current thread
+ * context. However, it still needs to bump `s_stack_depth` to
+ * avoid kernel stack overflow from nested filesystems.
+ */
+ if (erofs_is_fileio_mode(sbi)) {
+ sb->s_stack_depth =
+ file_inode(sbi->dif0.file)->i_sb->s_stack_depth + 1;
+ if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+ erofs_err(sb, "maximum fs stacking depth exceeded");
+ return -ENOTBLK;
+ }
+ }
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- return -ENOMEM;
+ if (erofs_is_fscache_mode(sb)) {
+ err = erofs_fscache_register_fs(sb);
+ if (err)
+ return err;
+ }
+ err = super_setup_bdi(sb);
+ if (err)
+ return err;
+ } else {
+ if (!sb_set_blocksize(sb, PAGE_SIZE)) {
+ errorfc(fc, "failed to set initial blksize");
+ return -EINVAL;
+ }
- sb->s_fs_info = sbi;
- sbi->opt = ctx->opt;
- sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
- sbi->devs = ctx->devs;
- ctx->devs = NULL;
+ sbi->dif0.dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
+ &sbi->dif0.dax_part_off, NULL, NULL);
+ }
err = erofs_read_superblock(sb);
if (err)
return err;
- if (test_opt(&sbi->opt, DAX_ALWAYS) &&
- !dax_supported(sbi->dax_dev, sb->s_bdev, EROFS_BLKSIZ, 0, bdev_nr_sectors(sb->s_bdev))) {
- errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
+ if (sb->s_blocksize_bits != sbi->blkszbits) {
+ if (erofs_is_fscache_mode(sb)) {
+ errorfc(fc, "unsupported blksize for fscache mode");
+ return -EINVAL;
+ }
+
+ if (erofs_is_fileio_mode(sbi)) {
+ sb->s_blocksize = 1 << sbi->blkszbits;
+ sb->s_blocksize_bits = sbi->blkszbits;
+ } else if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
+ errorfc(fc, "failed to set erofs blksize");
+ return -EINVAL;
+ }
+ }
+
+ if (sbi->dif0.fsoff) {
+ if (sbi->dif0.fsoff & (sb->s_blocksize - 1))
+ return invalfc(fc, "fsoffset %llu is not aligned to block size %lu",
+ sbi->dif0.fsoff, sb->s_blocksize);
+ if (erofs_is_fscache_mode(sb))
+ return invalfc(fc, "cannot use fsoffset in fscache mode");
+ }
+
+ if (test_opt(&sbi->opt, DAX_ALWAYS) && sbi->blkszbits != PAGE_SHIFT) {
+ erofs_info(sb, "unsupported blocksize for DAX");
clear_opt(&sbi->opt, DAX_ALWAYS);
}
- sb->s_flags |= SB_RDONLY | SB_NOATIME;
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_time_gran = 1;
- sb->s_op = &erofs_sops;
+ sb->s_time_gran = 1;
sb->s_xattr = erofs_xattr_handlers;
+ sb->s_export_op = &erofs_export_ops;
if (test_opt(&sbi->opt, POSIX_ACL))
sb->s_flags |= SB_POSIXACL;
else
sb->s_flags &= ~SB_POSIXACL;
-#ifdef CONFIG_EROFS_FS_ZIP
- xa_init(&sbi->managed_pslots);
-#endif
+ err = z_erofs_init_super(sb);
+ if (err)
+ return err;
- /* get the root inode */
- inode = erofs_iget(sb, ROOT_NID(sbi), true);
+ if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
+ inode = erofs_iget(sb, sbi->packed_nid);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+ sbi->packed_inode = inode;
+ }
+ if (erofs_sb_has_metabox(sbi)) {
+ inode = erofs_iget(sb, sbi->metabox_nid);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+ sbi->metabox_inode = inode;
+ }
+
+ inode = erofs_iget(sb, sbi->root_nid);
if (IS_ERR(inode))
return PTR_ERR(inode);
if (!S_ISDIR(inode->i_mode)) {
erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
- ROOT_NID(sbi), inode->i_mode);
+ sbi->root_nid, inode->i_mode);
iput(inode);
return -EINVAL;
}
-
sb->s_root = d_make_root(inode);
if (!sb->s_root)
return -ENOMEM;
erofs_shrinker_register(sb);
- /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
- err = erofs_init_managed_cache(sb);
+ err = erofs_xattr_prefixes_init(sb);
if (err)
return err;
- erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
+ erofs_set_sysfs_name(sb);
+ err = erofs_register_sysfs(sb);
+ if (err)
+ return err;
+
+ sbi->dir_ra_bytes = EROFS_DIR_RA_BYTES;
+ erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid);
return 0;
}
static int erofs_fc_get_tree(struct fs_context *fc)
{
- return get_tree_bdev(fc, erofs_fc_fill_super);
+ struct erofs_sb_info *sbi = fc->s_fs_info;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
+ return get_tree_nodev(fc, erofs_fc_fill_super);
+
+ ret = get_tree_bdev_flags(fc, erofs_fc_fill_super,
+ IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) ?
+ GET_TREE_BDEV_QUIET_LOOKUP : 0);
+#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+ if (ret == -ENOTBLK) {
+ struct file *file;
+
+ if (!fc->source)
+ return invalf(fc, "No source specified");
+ file = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ sbi->dif0.file = file;
+
+ if (S_ISREG(file_inode(sbi->dif0.file)->i_mode) &&
+ sbi->dif0.file->f_mapping->a_ops->read_folio)
+ return get_tree_nodev(fc, erofs_fc_fill_super);
+ }
+#endif
+ return ret;
}
static int erofs_fc_reconfigure(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *new_sbi = fc->s_fs_info;
DBG_BUGON(!sb_rdonly(sb));
- if (test_opt(&ctx->opt, POSIX_ACL))
+ if (new_sbi->fsid || new_sbi->domain_id)
+ erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
+
+ if (test_opt(&new_sbi->opt, POSIX_ACL))
fc->sb_flags |= SB_POSIXACL;
else
fc->sb_flags &= ~SB_POSIXACL;
- sbi->opt = ctx->opt;
+ sbi->opt = new_sbi->opt;
fc->sb_flags |= SB_RDONLY;
return 0;
@@ -727,9 +819,11 @@ static int erofs_release_device_info(int id, void *ptr, void *data)
{
struct erofs_device_info *dif = ptr;
- fs_put_dax(dif->dax_dev);
- if (dif->bdev)
- blkdev_put(dif->bdev, FMODE_READ | FMODE_EXCL);
+ fs_put_dax(dif->dax_dev, NULL);
+ if (dif->file)
+ fput(dif->file);
+ erofs_fscache_unregister_cookie(dif->fscache);
+ dif->fscache = NULL;
kfree(dif->path);
kfree(dif);
return 0;
@@ -744,12 +838,23 @@ static void erofs_free_dev_context(struct erofs_dev_context *devs)
kfree(devs);
}
+static void erofs_sb_free(struct erofs_sb_info *sbi)
+{
+ erofs_free_dev_context(sbi->devs);
+ kfree(sbi->fsid);
+ kfree(sbi->domain_id);
+ if (sbi->dif0.file)
+ fput(sbi->dif0.file);
+ kfree(sbi->volume_name);
+ kfree(sbi);
+}
+
static void erofs_fc_free(struct fs_context *fc)
{
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
- erofs_free_dev_context(ctx->devs);
- kfree(ctx);
+ if (sbi) /* free here if an error occurs before transferring to sb */
+ erofs_sb_free(sbi);
}
static const struct fs_context_operations erofs_context_ops = {
@@ -761,58 +866,65 @@ static const struct fs_context_operations erofs_context_ops = {
static int erofs_init_fs_context(struct fs_context *fc)
{
- struct erofs_fs_context *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ struct erofs_sb_info *sbi;
- if (!ctx)
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ if (!sbi)
return -ENOMEM;
- ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
- if (!ctx->devs) {
- kfree(ctx);
+
+ sbi->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
+ if (!sbi->devs) {
+ kfree(sbi);
return -ENOMEM;
}
- fc->fs_private = ctx;
+ fc->s_fs_info = sbi;
- idr_init(&ctx->devs->tree);
- init_rwsem(&ctx->devs->rwsem);
- erofs_default_options(ctx);
+ idr_init(&sbi->devs->tree);
+ init_rwsem(&sbi->devs->rwsem);
+ erofs_default_options(sbi);
fc->ops = &erofs_context_ops;
return 0;
}
-/*
- * could be triggered after deactivate_locked_super()
- * is called, thus including umount and failed to initialize.
- */
-static void erofs_kill_sb(struct super_block *sb)
+static void erofs_drop_internal_inodes(struct erofs_sb_info *sbi)
{
- struct erofs_sb_info *sbi;
-
- WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
-
- kill_block_super(sb);
+ iput(sbi->packed_inode);
+ sbi->packed_inode = NULL;
+ iput(sbi->metabox_inode);
+ sbi->metabox_inode = NULL;
+#ifdef CONFIG_EROFS_FS_ZIP
+ iput(sbi->managed_cache);
+ sbi->managed_cache = NULL;
+#endif
+}
- sbi = EROFS_SB(sb);
- if (!sbi)
- return;
+static void erofs_kill_sb(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
- erofs_free_dev_context(sbi->devs);
- fs_put_dax(sbi->dax_dev);
- kfree(sbi);
+ if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) ||
+ sbi->dif0.file)
+ kill_anon_super(sb);
+ else
+ kill_block_super(sb);
+ erofs_drop_internal_inodes(sbi);
+ fs_put_dax(sbi->dif0.dax_dev, NULL);
+ erofs_fscache_unregister_fs(sb);
+ erofs_sb_free(sbi);
sb->s_fs_info = NULL;
}
-/* called when ->s_root is non-NULL */
static void erofs_put_super(struct super_block *sb)
{
struct erofs_sb_info *const sbi = EROFS_SB(sb);
- DBG_BUGON(!sbi);
-
+ erofs_unregister_sysfs(sb);
erofs_shrinker_unregister(sb);
-#ifdef CONFIG_EROFS_FS_ZIP
- iput(sbi->managed_cache);
- sbi->managed_cache = NULL;
-#endif
+ erofs_xattr_prefixes_cleanup(sb);
+ erofs_drop_internal_inodes(sbi);
+ erofs_free_dev_context(sbi->devs);
+ sbi->devs = NULL;
+ erofs_fscache_unregister_fs(sb);
}
static struct file_system_type erofs_fs_type = {
@@ -820,7 +932,7 @@ static struct file_system_type erofs_fs_type = {
.name = "erofs",
.init_fs_context = erofs_init_fs_context,
.kill_sb = erofs_kill_sb,
- .fs_flags = FS_REQUIRES_DEV,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("erofs");
@@ -831,26 +943,23 @@ static int __init erofs_module_init(void)
erofs_check_ondisk_layout_definitions();
erofs_inode_cachep = kmem_cache_create("erofs_inode",
- sizeof(struct erofs_inode), 0,
- SLAB_RECLAIM_ACCOUNT,
- erofs_inode_init_once);
- if (!erofs_inode_cachep) {
- err = -ENOMEM;
- goto icache_err;
- }
+ sizeof(struct erofs_inode), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
+ erofs_inode_init_once);
+ if (!erofs_inode_cachep)
+ return -ENOMEM;
err = erofs_init_shrinker();
if (err)
goto shrinker_err;
- err = z_erofs_lzma_init();
+ err = z_erofs_init_subsystem();
if (err)
- goto lzma_err;
+ goto zip_err;
- erofs_pcpubuf_init();
- err = z_erofs_init_zip_subsystem();
+ err = erofs_init_sysfs();
if (err)
- goto zip_err;
+ goto sysfs_err;
err = register_filesystem(&erofs_fs_type);
if (err)
@@ -859,14 +968,13 @@ static int __init erofs_module_init(void)
return 0;
fs_err:
- z_erofs_exit_zip_subsystem();
+ erofs_exit_sysfs();
+sysfs_err:
+ z_erofs_exit_subsystem();
zip_err:
- z_erofs_lzma_exit();
-lzma_err:
erofs_exit_shrinker();
shrinker_err:
kmem_cache_destroy(erofs_inode_cachep);
-icache_err:
return err;
}
@@ -877,31 +985,30 @@ static void __exit erofs_module_exit(void)
/* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
rcu_barrier();
- z_erofs_exit_zip_subsystem();
- z_erofs_lzma_exit();
+ erofs_exit_sysfs();
+ z_erofs_exit_subsystem();
erofs_exit_shrinker();
kmem_cache_destroy(erofs_inode_cachep);
- erofs_pcpubuf_exit();
}
-/* get filesystem statistics */
static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
- u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
buf->f_type = sb->s_magic;
- buf->f_bsize = EROFS_BLKSIZ;
+ buf->f_bsize = sb->s_blocksize;
buf->f_blocks = sbi->total_blocks;
buf->f_bfree = buf->f_bavail = 0;
-
buf->f_files = ULLONG_MAX;
buf->f_ffree = ULLONG_MAX - sbi->inos;
-
buf->f_namelen = EROFS_NAME_LEN;
- buf->f_fsid = u64_to_fsid(id);
+ if (uuid_is_null(&sb->s_uuid))
+ buf->f_fsid = u64_to_fsid(!sb->s_bdev ? 0 :
+ huge_encode_dev(sb->s_bdev->bd_dev));
+ else
+ buf->f_fsid = uuid_to_fsid(sb->s_uuid.b);
return 0;
}
@@ -910,37 +1017,47 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
struct erofs_mount_opts *opt = &sbi->opt;
-#ifdef CONFIG_EROFS_FS_XATTR
- if (test_opt(opt, XATTR_USER))
- seq_puts(seq, ",user_xattr");
- else
- seq_puts(seq, ",nouser_xattr");
-#endif
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- if (test_opt(opt, POSIX_ACL))
- seq_puts(seq, ",acl");
- else
- seq_puts(seq, ",noacl");
-#endif
-#ifdef CONFIG_EROFS_FS_ZIP
- if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
- seq_puts(seq, ",cache_strategy=disabled");
- else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
- seq_puts(seq, ",cache_strategy=readahead");
- else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
- seq_puts(seq, ",cache_strategy=readaround");
-#endif
+ if (IS_ENABLED(CONFIG_EROFS_FS_XATTR))
+ seq_puts(seq, test_opt(opt, XATTR_USER) ?
+ ",user_xattr" : ",nouser_xattr");
+ if (IS_ENABLED(CONFIG_EROFS_FS_POSIX_ACL))
+ seq_puts(seq, test_opt(opt, POSIX_ACL) ? ",acl" : ",noacl");
+ if (IS_ENABLED(CONFIG_EROFS_FS_ZIP))
+ seq_printf(seq, ",cache_strategy=%s",
+ erofs_param_cache_strategy[opt->cache_strategy].name);
if (test_opt(opt, DAX_ALWAYS))
seq_puts(seq, ",dax=always");
if (test_opt(opt, DAX_NEVER))
seq_puts(seq, ",dax=never");
+ if (erofs_is_fileio_mode(sbi) && test_opt(opt, DIRECT_IO))
+ seq_puts(seq, ",directio");
+#ifdef CONFIG_EROFS_FS_ONDEMAND
+ if (sbi->fsid)
+ seq_printf(seq, ",fsid=%s", sbi->fsid);
+ if (sbi->domain_id)
+ seq_printf(seq, ",domain_id=%s", sbi->domain_id);
+#endif
+ if (sbi->dif0.fsoff)
+ seq_printf(seq, ",fsoffset=%llu", sbi->dif0.fsoff);
return 0;
}
+static void erofs_evict_inode(struct inode *inode)
+{
+#ifdef CONFIG_FS_DAX
+ if (IS_DAX(inode))
+ dax_break_layout_final(inode);
+#endif
+
+ truncate_inode_pages_final(&inode->i_data);
+ clear_inode(inode);
+}
+
const struct super_operations erofs_sops = {
.put_super = erofs_put_super,
.alloc_inode = erofs_alloc_inode,
.free_inode = erofs_free_inode,
+ .evict_inode = erofs_evict_inode,
.statfs = erofs_statfs,
.show_options = erofs_show_options,
};
diff --git a/fs/erofs/sysfs.c b/fs/erofs/sysfs.c
new file mode 100644
index 000000000000..1e0658a1d95b
--- /dev/null
+++ b/fs/erofs/sysfs.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C), 2008-2021, OPPO Mobile Comm Corp., Ltd.
+ * https://www.oppo.com/
+ */
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+
+#include "internal.h"
+#include "compress.h"
+
+enum {
+ attr_feature,
+ attr_drop_caches,
+ attr_pointer_ui,
+ attr_pointer_bool,
+ attr_accel,
+};
+
+enum {
+ struct_erofs_sb_info,
+ struct_erofs_mount_opts,
+};
+
+struct erofs_attr {
+ struct attribute attr;
+ short attr_id;
+ int struct_type, offset;
+};
+
+#define EROFS_ATTR(_name, _mode, _id) \
+static struct erofs_attr erofs_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .attr_id = attr_##_id, \
+}
+#define EROFS_ATTR_FUNC(_name, _mode) EROFS_ATTR(_name, _mode, _name)
+#define EROFS_ATTR_FEATURE(_name) EROFS_ATTR(_name, 0444, feature)
+
+#define EROFS_ATTR_OFFSET(_name, _mode, _id, _struct) \
+static struct erofs_attr erofs_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .attr_id = attr_##_id, \
+ .struct_type = struct_##_struct, \
+ .offset = offsetof(struct _struct, _name),\
+}
+
+#define EROFS_ATTR_RW(_name, _id, _struct) \
+ EROFS_ATTR_OFFSET(_name, 0644, _id, _struct)
+
+#define EROFS_RO_ATTR(_name, _id, _struct) \
+ EROFS_ATTR_OFFSET(_name, 0444, _id, _struct)
+
+#define EROFS_ATTR_RW_UI(_name, _struct) \
+ EROFS_ATTR_RW(_name, pointer_ui, _struct)
+
+#define EROFS_ATTR_RW_BOOL(_name, _struct) \
+ EROFS_ATTR_RW(_name, pointer_bool, _struct)
+
+#define ATTR_LIST(name) (&erofs_attr_##name.attr)
+
+#ifdef CONFIG_EROFS_FS_ZIP
+EROFS_ATTR_RW_UI(sync_decompress, erofs_mount_opts);
+EROFS_ATTR_FUNC(drop_caches, 0200);
+#endif
+#ifdef CONFIG_EROFS_FS_ZIP_ACCEL
+EROFS_ATTR_FUNC(accel, 0644);
+#endif
+EROFS_ATTR_RW_UI(dir_ra_bytes, erofs_sb_info);
+
+static struct attribute *erofs_sb_attrs[] = {
+#ifdef CONFIG_EROFS_FS_ZIP
+ ATTR_LIST(sync_decompress),
+ ATTR_LIST(drop_caches),
+#endif
+ ATTR_LIST(dir_ra_bytes),
+ NULL,
+};
+ATTRIBUTE_GROUPS(erofs_sb);
+
+static struct attribute *erofs_attrs[] = {
+#ifdef CONFIG_EROFS_FS_ZIP_ACCEL
+ ATTR_LIST(accel),
+#endif
+ NULL,
+};
+ATTRIBUTE_GROUPS(erofs);
+
+/* Features this copy of erofs supports */
+EROFS_ATTR_FEATURE(zero_padding);
+EROFS_ATTR_FEATURE(compr_cfgs);
+EROFS_ATTR_FEATURE(big_pcluster);
+EROFS_ATTR_FEATURE(chunked_file);
+EROFS_ATTR_FEATURE(device_table);
+EROFS_ATTR_FEATURE(compr_head2);
+EROFS_ATTR_FEATURE(sb_chksum);
+EROFS_ATTR_FEATURE(ztailpacking);
+EROFS_ATTR_FEATURE(fragments);
+EROFS_ATTR_FEATURE(dedupe);
+EROFS_ATTR_FEATURE(48bit);
+EROFS_ATTR_FEATURE(metabox);
+
+static struct attribute *erofs_feat_attrs[] = {
+ ATTR_LIST(zero_padding),
+ ATTR_LIST(compr_cfgs),
+ ATTR_LIST(big_pcluster),
+ ATTR_LIST(chunked_file),
+ ATTR_LIST(device_table),
+ ATTR_LIST(compr_head2),
+ ATTR_LIST(sb_chksum),
+ ATTR_LIST(ztailpacking),
+ ATTR_LIST(fragments),
+ ATTR_LIST(dedupe),
+ ATTR_LIST(48bit),
+ ATTR_LIST(metabox),
+ NULL,
+};
+ATTRIBUTE_GROUPS(erofs_feat);
+
+static unsigned char *__struct_ptr(struct erofs_sb_info *sbi,
+ int struct_type, int offset)
+{
+ if (struct_type == struct_erofs_sb_info)
+ return (unsigned char *)sbi + offset;
+ if (struct_type == struct_erofs_mount_opts)
+ return (unsigned char *)&sbi->opt + offset;
+ return NULL;
+}
+
+static ssize_t erofs_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct erofs_sb_info *sbi = container_of(kobj, struct erofs_sb_info,
+ s_kobj);
+ struct erofs_attr *a = container_of(attr, struct erofs_attr, attr);
+ unsigned char *ptr = __struct_ptr(sbi, a->struct_type, a->offset);
+
+ switch (a->attr_id) {
+ case attr_feature:
+ return sysfs_emit(buf, "supported\n");
+ case attr_pointer_ui:
+ if (!ptr)
+ return 0;
+ return sysfs_emit(buf, "%u\n", *(unsigned int *)ptr);
+ case attr_pointer_bool:
+ if (!ptr)
+ return 0;
+ return sysfs_emit(buf, "%d\n", *(bool *)ptr);
+ case attr_accel:
+ return z_erofs_crypto_show_engines(buf, PAGE_SIZE, '\n');
+ }
+ return 0;
+}
+
+static ssize_t erofs_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct erofs_sb_info *sbi = container_of(kobj, struct erofs_sb_info,
+ s_kobj);
+ struct erofs_attr *a = container_of(attr, struct erofs_attr, attr);
+ unsigned char *ptr = __struct_ptr(sbi, a->struct_type, a->offset);
+ unsigned long t;
+ int ret;
+
+ switch (a->attr_id) {
+ case attr_pointer_ui:
+ if (!ptr)
+ return 0;
+ ret = kstrtoul(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+ if (t != (unsigned int)t)
+ return -ERANGE;
+#ifdef CONFIG_EROFS_FS_ZIP
+ if (!strcmp(a->attr.name, "sync_decompress") &&
+ (t > EROFS_SYNC_DECOMPRESS_FORCE_OFF))
+ return -EINVAL;
+#endif
+ *(unsigned int *)ptr = t;
+ return len;
+ case attr_pointer_bool:
+ if (!ptr)
+ return 0;
+ ret = kstrtoul(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+ if (t != 0 && t != 1)
+ return -EINVAL;
+ *(bool *)ptr = !!t;
+ return len;
+#ifdef CONFIG_EROFS_FS_ZIP
+ case attr_drop_caches:
+ ret = kstrtoul(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+ if (t < 1 || t > 3)
+ return -EINVAL;
+
+ if (t & 2)
+ z_erofs_shrink_scan(sbi, ~0UL);
+ if (t & 1)
+ invalidate_mapping_pages(MNGD_MAPPING(sbi), 0, -1);
+ return len;
+#endif
+#ifdef CONFIG_EROFS_FS_ZIP_ACCEL
+ case attr_accel:
+ buf = skip_spaces(buf);
+ z_erofs_crypto_disable_all_engines();
+ while (*buf) {
+ t = strcspn(buf, "\n");
+ ret = z_erofs_crypto_enable_engine(buf, t);
+ if (ret < 0)
+ return ret;
+ buf += buf[t] != '\0' ? t + 1 : t;
+ }
+ return len;
+#endif
+ }
+ return 0;
+}
+
+static void erofs_sb_release(struct kobject *kobj)
+{
+ struct erofs_sb_info *sbi = container_of(kobj, struct erofs_sb_info,
+ s_kobj);
+ complete(&sbi->s_kobj_unregister);
+}
+
+static const struct sysfs_ops erofs_attr_ops = {
+ .show = erofs_attr_show,
+ .store = erofs_attr_store,
+};
+
+static const struct kobj_type erofs_sb_ktype = {
+ .default_groups = erofs_sb_groups,
+ .sysfs_ops = &erofs_attr_ops,
+ .release = erofs_sb_release,
+};
+
+static const struct kobj_type erofs_ktype = {
+ .default_groups = erofs_groups,
+ .sysfs_ops = &erofs_attr_ops,
+};
+
+static struct kset erofs_root = {
+ .kobj = {.ktype = &erofs_ktype},
+};
+
+static const struct kobj_type erofs_feat_ktype = {
+ .default_groups = erofs_feat_groups,
+ .sysfs_ops = &erofs_attr_ops,
+};
+
+static struct kobject erofs_feat = {
+ .kset = &erofs_root,
+};
+
+int erofs_register_sysfs(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ int err;
+
+ sbi->s_kobj.kset = &erofs_root;
+ init_completion(&sbi->s_kobj_unregister);
+ err = kobject_init_and_add(&sbi->s_kobj, &erofs_sb_ktype, NULL, "%s",
+ sb->s_sysfs_name);
+ if (err) {
+ kobject_put(&sbi->s_kobj);
+ wait_for_completion(&sbi->s_kobj_unregister);
+ }
+ return err;
+}
+
+void erofs_unregister_sysfs(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ if (sbi->s_kobj.state_in_sysfs) {
+ kobject_del(&sbi->s_kobj);
+ kobject_put(&sbi->s_kobj);
+ wait_for_completion(&sbi->s_kobj_unregister);
+ }
+}
+
+void erofs_exit_sysfs(void)
+{
+ kobject_put(&erofs_feat);
+ kset_unregister(&erofs_root);
+}
+
+int __init erofs_init_sysfs(void)
+{
+ int ret;
+
+ kobject_set_name(&erofs_root.kobj, "erofs");
+ erofs_root.kobj.parent = fs_kobj;
+ ret = kset_register(&erofs_root);
+ if (!ret) {
+ ret = kobject_init_and_add(&erofs_feat, &erofs_feat_ktype,
+ NULL, "features");
+ if (!ret)
+ return 0;
+ erofs_exit_sysfs();
+ }
+ return ret;
+}
diff --git a/fs/erofs/tagptr.h b/fs/erofs/tagptr.h
deleted file mode 100644
index 64ceb7270b5c..000000000000
--- a/fs/erofs/tagptr.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * A tagged pointer implementation
- */
-#ifndef __EROFS_FS_TAGPTR_H
-#define __EROFS_FS_TAGPTR_H
-
-#include <linux/types.h>
-#include <linux/build_bug.h>
-
-/*
- * the name of tagged pointer types are tagptr{1, 2, 3...}_t
- * avoid directly using the internal structs __tagptr{1, 2, 3...}
- */
-#define __MAKE_TAGPTR(n) \
-typedef struct __tagptr##n { \
- uintptr_t v; \
-} tagptr##n##_t;
-
-__MAKE_TAGPTR(1)
-__MAKE_TAGPTR(2)
-__MAKE_TAGPTR(3)
-__MAKE_TAGPTR(4)
-
-#undef __MAKE_TAGPTR
-
-extern void __compiletime_error("bad tagptr tags")
- __bad_tagptr_tags(void);
-
-extern void __compiletime_error("bad tagptr type")
- __bad_tagptr_type(void);
-
-/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */
-#define __tagptr_mask_1(ptr, n) \
- __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \
- (1UL << (n)) - 1 :
-
-#define __tagptr_mask(ptr) (\
- __tagptr_mask_1(ptr, 1) ( \
- __tagptr_mask_1(ptr, 2) ( \
- __tagptr_mask_1(ptr, 3) ( \
- __tagptr_mask_1(ptr, 4) ( \
- __bad_tagptr_type(), 0)))))
-
-/* generate a tagged pointer from a raw value */
-#define tagptr_init(type, val) \
- ((typeof(type)){ .v = (uintptr_t)(val) })
-
-/*
- * directly cast a tagged pointer to the native pointer type, which
- * could be used for backward compatibility of existing code.
- */
-#define tagptr_cast_ptr(tptr) ((void *)(tptr).v)
-
-/* encode tagged pointers */
-#define tagptr_fold(type, ptr, _tags) ({ \
- const typeof(_tags) tags = (_tags); \
- if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \
- __bad_tagptr_tags(); \
-tagptr_init(type, (uintptr_t)(ptr) | tags); })
-
-/* decode tagged pointers */
-#define tagptr_unfold_ptr(tptr) \
- ((void *)((tptr).v & ~__tagptr_mask(tptr)))
-
-#define tagptr_unfold_tags(tptr) \
- ((tptr).v & __tagptr_mask(tptr))
-
-/* operations for the tagger pointer */
-#define tagptr_eq(_tptr1, _tptr2) ({ \
- typeof(_tptr1) tptr1 = (_tptr1); \
- typeof(_tptr2) tptr2 = (_tptr2); \
- (void)(&tptr1 == &tptr2); \
-(tptr1).v == (tptr2).v; })
-
-/* lock-free CAS operation */
-#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \
- typeof(_ptptr) ptptr = (_ptptr); \
- typeof(_o) o = (_o); \
- typeof(_n) n = (_n); \
- (void)(&o == &n); \
- (void)(&o == ptptr); \
-tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); })
-
-/* wrap WRITE_ONCE if atomic update is needed */
-#define tagptr_replace_tags(_ptptr, tags) ({ \
- typeof(_ptptr) ptptr = (_ptptr); \
- *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \
-*ptptr; })
-
-#define tagptr_set_tags(_ptptr, _tags) ({ \
- typeof(_ptptr) ptptr = (_ptptr); \
- const typeof(_tags) tags = (_tags); \
- if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
- __bad_tagptr_tags(); \
- ptptr->v |= tags; \
-*ptptr; })
-
-#define tagptr_clear_tags(_ptptr, _tags) ({ \
- typeof(_ptptr) ptptr = (_ptptr); \
- const typeof(_tags) tags = (_tags); \
- if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
- __bad_tagptr_tags(); \
- ptptr->v &= ~tags; \
-*ptptr; })
-
-#endif /* __EROFS_FS_TAGPTR_H */
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
deleted file mode 100644
index 84da2c280012..000000000000
--- a/fs/erofs/utils.c
+++ /dev/null
@@ -1,288 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2018 HUAWEI, Inc.
- * https://www.huawei.com/
- */
-#include "internal.h"
-#include <linux/pagevec.h>
-
-struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
-{
- struct page *page = *pagepool;
-
- if (page) {
- DBG_BUGON(page_ref_count(page) != 1);
- *pagepool = (struct page *)page_private(page);
- } else {
- page = alloc_page(gfp);
- }
- return page;
-}
-
-void erofs_release_pages(struct page **pagepool)
-{
- while (*pagepool) {
- struct page *page = *pagepool;
-
- *pagepool = (struct page *)page_private(page);
- put_page(page);
- }
-}
-
-#ifdef CONFIG_EROFS_FS_ZIP
-/* global shrink count (for all mounted EROFS instances) */
-static atomic_long_t erofs_global_shrink_cnt;
-
-static int erofs_workgroup_get(struct erofs_workgroup *grp)
-{
- int o;
-
-repeat:
- o = erofs_wait_on_workgroup_freezed(grp);
- if (o <= 0)
- return -1;
-
- if (atomic_cmpxchg(&grp->refcount, o, o + 1) != o)
- goto repeat;
-
- /* decrease refcount paired by erofs_workgroup_put */
- if (o == 1)
- atomic_long_dec(&erofs_global_shrink_cnt);
- return 0;
-}
-
-struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
- pgoff_t index)
-{
- struct erofs_sb_info *sbi = EROFS_SB(sb);
- struct erofs_workgroup *grp;
-
-repeat:
- rcu_read_lock();
- grp = xa_load(&sbi->managed_pslots, index);
- if (grp) {
- if (erofs_workgroup_get(grp)) {
- /* prefer to relax rcu read side */
- rcu_read_unlock();
- goto repeat;
- }
-
- DBG_BUGON(index != grp->index);
- }
- rcu_read_unlock();
- return grp;
-}
-
-struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
- struct erofs_workgroup *grp)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- struct erofs_workgroup *pre;
-
- /*
- * Bump up a reference count before making this visible
- * to others for the XArray in order to avoid potential
- * UAF without serialized by xa_lock.
- */
- atomic_inc(&grp->refcount);
-
-repeat:
- xa_lock(&sbi->managed_pslots);
- pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
- NULL, grp, GFP_NOFS);
- if (pre) {
- if (xa_is_err(pre)) {
- pre = ERR_PTR(xa_err(pre));
- } else if (erofs_workgroup_get(pre)) {
- /* try to legitimize the current in-tree one */
- xa_unlock(&sbi->managed_pslots);
- cond_resched();
- goto repeat;
- }
- atomic_dec(&grp->refcount);
- grp = pre;
- }
- xa_unlock(&sbi->managed_pslots);
- return grp;
-}
-
-static void __erofs_workgroup_free(struct erofs_workgroup *grp)
-{
- atomic_long_dec(&erofs_global_shrink_cnt);
- erofs_workgroup_free_rcu(grp);
-}
-
-int erofs_workgroup_put(struct erofs_workgroup *grp)
-{
- int count = atomic_dec_return(&grp->refcount);
-
- if (count == 1)
- atomic_long_inc(&erofs_global_shrink_cnt);
- else if (!count)
- __erofs_workgroup_free(grp);
- return count;
-}
-
-static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp)
-{
- /*
- * If managed cache is on, refcount of workgroups
- * themselves could be < 0 (freezed). In other words,
- * there is no guarantee that all refcounts > 0.
- */
- if (!erofs_workgroup_try_to_freeze(grp, 1))
- return false;
-
- /*
- * Note that all cached pages should be unattached
- * before deleted from the XArray. Otherwise some
- * cached pages could be still attached to the orphan
- * old workgroup when the new one is available in the tree.
- */
- if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
- erofs_workgroup_unfreeze(grp, 1);
- return false;
- }
-
- /*
- * It's impossible to fail after the workgroup is freezed,
- * however in order to avoid some race conditions, add a
- * DBG_BUGON to observe this in advance.
- */
- DBG_BUGON(xa_erase(&sbi->managed_pslots, grp->index) != grp);
-
- /* last refcount should be connected with its managed pslot. */
- erofs_workgroup_unfreeze(grp, 0);
- __erofs_workgroup_free(grp);
- return true;
-}
-
-static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
- unsigned long nr_shrink)
-{
- struct erofs_workgroup *grp;
- unsigned int freed = 0;
- unsigned long index;
-
- xa_for_each(&sbi->managed_pslots, index, grp) {
- /* try to shrink each valid workgroup */
- if (!erofs_try_to_release_workgroup(sbi, grp))
- continue;
-
- ++freed;
- if (!--nr_shrink)
- break;
- }
- return freed;
-}
-
-/* protected by 'erofs_sb_list_lock' */
-static unsigned int shrinker_run_no;
-
-/* protects the mounted 'erofs_sb_list' */
-static DEFINE_SPINLOCK(erofs_sb_list_lock);
-static LIST_HEAD(erofs_sb_list);
-
-void erofs_shrinker_register(struct super_block *sb)
-{
- struct erofs_sb_info *sbi = EROFS_SB(sb);
-
- mutex_init(&sbi->umount_mutex);
-
- spin_lock(&erofs_sb_list_lock);
- list_add(&sbi->list, &erofs_sb_list);
- spin_unlock(&erofs_sb_list_lock);
-}
-
-void erofs_shrinker_unregister(struct super_block *sb)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
-
- mutex_lock(&sbi->umount_mutex);
- /* clean up all remaining workgroups in memory */
- erofs_shrink_workstation(sbi, ~0UL);
-
- spin_lock(&erofs_sb_list_lock);
- list_del(&sbi->list);
- spin_unlock(&erofs_sb_list_lock);
- mutex_unlock(&sbi->umount_mutex);
-}
-
-static unsigned long erofs_shrink_count(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- return atomic_long_read(&erofs_global_shrink_cnt);
-}
-
-static unsigned long erofs_shrink_scan(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- struct erofs_sb_info *sbi;
- struct list_head *p;
-
- unsigned long nr = sc->nr_to_scan;
- unsigned int run_no;
- unsigned long freed = 0;
-
- spin_lock(&erofs_sb_list_lock);
- do {
- run_no = ++shrinker_run_no;
- } while (run_no == 0);
-
- /* Iterate over all mounted superblocks and try to shrink them */
- p = erofs_sb_list.next;
- while (p != &erofs_sb_list) {
- sbi = list_entry(p, struct erofs_sb_info, list);
-
- /*
- * We move the ones we do to the end of the list, so we stop
- * when we see one we have already done.
- */
- if (sbi->shrinker_run_no == run_no)
- break;
-
- if (!mutex_trylock(&sbi->umount_mutex)) {
- p = p->next;
- continue;
- }
-
- spin_unlock(&erofs_sb_list_lock);
- sbi->shrinker_run_no = run_no;
-
- freed += erofs_shrink_workstation(sbi, nr - freed);
-
- spin_lock(&erofs_sb_list_lock);
- /* Get the next list element before we move this one */
- p = p->next;
-
- /*
- * Move this one to the end of the list to provide some
- * fairness.
- */
- list_move_tail(&sbi->list, &erofs_sb_list);
- mutex_unlock(&sbi->umount_mutex);
-
- if (freed >= nr)
- break;
- }
- spin_unlock(&erofs_sb_list_lock);
- return freed;
-}
-
-static struct shrinker erofs_shrinker_info = {
- .scan_objects = erofs_shrink_scan,
- .count_objects = erofs_shrink_count,
- .seeks = DEFAULT_SEEKS,
-};
-
-int __init erofs_init_shrinker(void)
-{
- return register_shrinker(&erofs_shrinker_info);
-}
-
-void erofs_exit_shrinker(void)
-{
- unregister_shrinker(&erofs_shrinker_info);
-}
-#endif /* !CONFIG_EROFS_FS_ZIP */
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index 01c581e93c5f..396536d9a862 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -2,48 +2,36 @@
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
+ * Copyright (C) 2021-2022, Alibaba Cloud
*/
#include <linux/security.h>
+#include <linux/xxhash.h>
#include "xattr.h"
-struct xattr_iter {
+struct erofs_xattr_iter {
struct super_block *sb;
- struct page *page;
+ struct erofs_buf buf;
+ erofs_off_t pos;
void *kaddr;
- erofs_blk_t blkaddr;
- unsigned int ofs;
-};
-
-static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
-{
- /* the only user of kunmap() is 'init_inode_xattrs' */
- if (!atomic)
- kunmap(it->page);
- else
- kunmap_atomic(it->kaddr);
-
- unlock_page(it->page);
- put_page(it->page);
-}
+ char *buffer;
+ int buffer_size, buffer_ofs;
-static inline void xattr_iter_end_final(struct xattr_iter *it)
-{
- if (!it->page)
- return;
+ /* getxattr */
+ int index, infix_len;
+ struct qstr name;
- xattr_iter_end(it, true);
-}
+ /* listxattr */
+ struct dentry *dentry;
+};
-static int init_inode_xattrs(struct inode *inode)
+static int erofs_init_inode_xattrs(struct inode *inode)
{
struct erofs_inode *const vi = EROFS_I(inode);
- struct xattr_iter it;
+ struct erofs_xattr_iter it;
unsigned int i;
struct erofs_xattr_ibody_header *ih;
- struct super_block *sb;
- struct erofs_sb_info *sbi;
- bool atomic_map;
+ struct super_block *sb = inode->i_sb;
int ret = 0;
/* the most case is that xattrs of this inode are initialized. */
@@ -72,75 +60,61 @@ static int init_inode_xattrs(struct inode *inode)
* undefined right now (maybe use later with some new sb feature).
*/
if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
- erofs_err(inode->i_sb,
+ erofs_err(sb,
"xattr_isize %d of nid %llu is not supported yet",
vi->xattr_isize, vi->nid);
ret = -EOPNOTSUPP;
goto out_unlock;
} else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
if (vi->xattr_isize) {
- erofs_err(inode->i_sb,
- "bogus xattr ibody @ nid %llu", vi->nid);
+ erofs_err(sb, "bogus xattr ibody @ nid %llu", vi->nid);
DBG_BUGON(1);
ret = -EFSCORRUPTED;
goto out_unlock; /* xattr ondisk layout error */
}
- ret = -ENOATTR;
+ ret = -ENODATA;
goto out_unlock;
}
- sb = inode->i_sb;
- sbi = EROFS_SB(sb);
- it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
- it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
-
- it.page = erofs_get_meta_page(sb, it.blkaddr);
- if (IS_ERR(it.page)) {
- ret = PTR_ERR(it.page);
+ it.buf = __EROFS_BUF_INITIALIZER;
+ ret = erofs_init_metabuf(&it.buf, sb, erofs_inode_in_metabox(inode));
+ if (ret)
goto out_unlock;
- }
+ it.pos = erofs_iloc(inode) + vi->inode_isize;
/* read in shared xattr array (non-atomic, see kmalloc below) */
- it.kaddr = kmap(it.page);
- atomic_map = false;
-
- ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
+ it.kaddr = erofs_bread(&it.buf, it.pos, true);
+ if (IS_ERR(it.kaddr)) {
+ ret = PTR_ERR(it.kaddr);
+ goto out_unlock;
+ }
+ ih = it.kaddr;
+ vi->xattr_name_filter = le32_to_cpu(ih->h_name_filter);
vi->xattr_shared_count = ih->h_shared_count;
vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
sizeof(uint), GFP_KERNEL);
if (!vi->xattr_shared_xattrs) {
- xattr_iter_end(&it, atomic_map);
+ erofs_put_metabuf(&it.buf);
ret = -ENOMEM;
goto out_unlock;
}
/* let's skip ibody header */
- it.ofs += sizeof(struct erofs_xattr_ibody_header);
+ it.pos += sizeof(struct erofs_xattr_ibody_header);
for (i = 0; i < vi->xattr_shared_count; ++i) {
- if (it.ofs >= EROFS_BLKSIZ) {
- /* cannot be unaligned */
- DBG_BUGON(it.ofs != EROFS_BLKSIZ);
- xattr_iter_end(&it, atomic_map);
-
- it.page = erofs_get_meta_page(sb, ++it.blkaddr);
- if (IS_ERR(it.page)) {
- kfree(vi->xattr_shared_xattrs);
- vi->xattr_shared_xattrs = NULL;
- ret = PTR_ERR(it.page);
- goto out_unlock;
- }
-
- it.kaddr = kmap_atomic(it.page);
- atomic_map = true;
- it.ofs = 0;
+ it.kaddr = erofs_bread(&it.buf, it.pos, true);
+ if (IS_ERR(it.kaddr)) {
+ kfree(vi->xattr_shared_xattrs);
+ vi->xattr_shared_xattrs = NULL;
+ ret = PTR_ERR(it.kaddr);
+ goto out_unlock;
}
- vi->xattr_shared_xattrs[i] =
- le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
- it.ofs += sizeof(__le32);
+ vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *)it.kaddr);
+ it.pos += sizeof(__le32);
}
- xattr_iter_end(&it, atomic_map);
+ erofs_put_metabuf(&it.buf);
/* paired with smp_mb() at the beginning of the function. */
smp_mb();
@@ -151,282 +125,6 @@ out_unlock:
return ret;
}
-/*
- * the general idea for these return values is
- * if 0 is returned, go on processing the current xattr;
- * 1 (> 0) is returned, skip this round to process the next xattr;
- * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
- * and need to be handled
- */
-struct xattr_iter_handlers {
- int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
- int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
- unsigned int len);
- int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
- void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
- unsigned int len);
-};
-
-static inline int xattr_iter_fixup(struct xattr_iter *it)
-{
- if (it->ofs < EROFS_BLKSIZ)
- return 0;
-
- xattr_iter_end(it, true);
-
- it->blkaddr += erofs_blknr(it->ofs);
-
- it->page = erofs_get_meta_page(it->sb, it->blkaddr);
- if (IS_ERR(it->page)) {
- int err = PTR_ERR(it->page);
-
- it->page = NULL;
- return err;
- }
-
- it->kaddr = kmap_atomic(it->page);
- it->ofs = erofs_blkoff(it->ofs);
- return 0;
-}
-
-static int inline_xattr_iter_begin(struct xattr_iter *it,
- struct inode *inode)
-{
- struct erofs_inode *const vi = EROFS_I(inode);
- struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
- unsigned int xattr_header_sz, inline_xattr_ofs;
-
- xattr_header_sz = inlinexattr_header_size(inode);
- if (xattr_header_sz >= vi->xattr_isize) {
- DBG_BUGON(xattr_header_sz > vi->xattr_isize);
- return -ENOATTR;
- }
-
- inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
-
- it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
- it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
-
- it->page = erofs_get_meta_page(inode->i_sb, it->blkaddr);
- if (IS_ERR(it->page))
- return PTR_ERR(it->page);
-
- it->kaddr = kmap_atomic(it->page);
- return vi->xattr_isize - xattr_header_sz;
-}
-
-/*
- * Regardless of success or failure, `xattr_foreach' will end up with
- * `ofs' pointing to the next xattr item rather than an arbitrary position.
- */
-static int xattr_foreach(struct xattr_iter *it,
- const struct xattr_iter_handlers *op,
- unsigned int *tlimit)
-{
- struct erofs_xattr_entry entry;
- unsigned int value_sz, processed, slice;
- int err;
-
- /* 0. fixup blkaddr, ofs, ipage */
- err = xattr_iter_fixup(it);
- if (err)
- return err;
-
- /*
- * 1. read xattr entry to the memory,
- * since we do EROFS_XATTR_ALIGN
- * therefore entry should be in the page
- */
- entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
- if (tlimit) {
- unsigned int entry_sz = erofs_xattr_entry_size(&entry);
-
- /* xattr on-disk corruption: xattr entry beyond xattr_isize */
- if (*tlimit < entry_sz) {
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
- *tlimit -= entry_sz;
- }
-
- it->ofs += sizeof(struct erofs_xattr_entry);
- value_sz = le16_to_cpu(entry.e_value_size);
-
- /* handle entry */
- err = op->entry(it, &entry);
- if (err) {
- it->ofs += entry.e_name_len + value_sz;
- goto out;
- }
-
- /* 2. handle xattr name (ofs will finally be at the end of name) */
- processed = 0;
-
- while (processed < entry.e_name_len) {
- if (it->ofs >= EROFS_BLKSIZ) {
- DBG_BUGON(it->ofs > EROFS_BLKSIZ);
-
- err = xattr_iter_fixup(it);
- if (err)
- goto out;
- it->ofs = 0;
- }
-
- slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
- entry.e_name_len - processed);
-
- /* handle name */
- err = op->name(it, processed, it->kaddr + it->ofs, slice);
- if (err) {
- it->ofs += entry.e_name_len - processed + value_sz;
- goto out;
- }
-
- it->ofs += slice;
- processed += slice;
- }
-
- /* 3. handle xattr value */
- processed = 0;
-
- if (op->alloc_buffer) {
- err = op->alloc_buffer(it, value_sz);
- if (err) {
- it->ofs += value_sz;
- goto out;
- }
- }
-
- while (processed < value_sz) {
- if (it->ofs >= EROFS_BLKSIZ) {
- DBG_BUGON(it->ofs > EROFS_BLKSIZ);
-
- err = xattr_iter_fixup(it);
- if (err)
- goto out;
- it->ofs = 0;
- }
-
- slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
- value_sz - processed);
- op->value(it, processed, it->kaddr + it->ofs, slice);
- it->ofs += slice;
- processed += slice;
- }
-
-out:
- /* xattrs should be 4-byte aligned (on-disk constraint) */
- it->ofs = EROFS_XATTR_ALIGN(it->ofs);
- return err < 0 ? err : 0;
-}
-
-struct getxattr_iter {
- struct xattr_iter it;
-
- char *buffer;
- int buffer_size, index;
- struct qstr name;
-};
-
-static int xattr_entrymatch(struct xattr_iter *_it,
- struct erofs_xattr_entry *entry)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
-
- return (it->index != entry->e_name_index ||
- it->name.len != entry->e_name_len) ? -ENOATTR : 0;
-}
-
-static int xattr_namematch(struct xattr_iter *_it,
- unsigned int processed, char *buf, unsigned int len)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
-
- return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
-}
-
-static int xattr_checkbuffer(struct xattr_iter *_it,
- unsigned int value_sz)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
- int err = it->buffer_size < value_sz ? -ERANGE : 0;
-
- it->buffer_size = value_sz;
- return !it->buffer ? 1 : err;
-}
-
-static void xattr_copyvalue(struct xattr_iter *_it,
- unsigned int processed,
- char *buf, unsigned int len)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
-
- memcpy(it->buffer + processed, buf, len);
-}
-
-static const struct xattr_iter_handlers find_xattr_handlers = {
- .entry = xattr_entrymatch,
- .name = xattr_namematch,
- .alloc_buffer = xattr_checkbuffer,
- .value = xattr_copyvalue
-};
-
-static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
-{
- int ret;
- unsigned int remaining;
-
- ret = inline_xattr_iter_begin(&it->it, inode);
- if (ret < 0)
- return ret;
-
- remaining = ret;
- while (remaining) {
- ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
- if (ret != -ENOATTR)
- break;
- }
- xattr_iter_end_final(&it->it);
-
- return ret ? ret : it->buffer_size;
-}
-
-static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
-{
- struct erofs_inode *const vi = EROFS_I(inode);
- struct super_block *const sb = inode->i_sb;
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- unsigned int i;
- int ret = -ENOATTR;
-
- for (i = 0; i < vi->xattr_shared_count; ++i) {
- erofs_blk_t blkaddr =
- xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
-
- it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
-
- if (!i || blkaddr != it->it.blkaddr) {
- if (i)
- xattr_iter_end(&it->it, true);
-
- it->it.page = erofs_get_meta_page(sb, blkaddr);
- if (IS_ERR(it->it.page))
- return PTR_ERR(it->it.page);
-
- it->it.kaddr = kmap_atomic(it->it.page);
- it->it.blkaddr = blkaddr;
- }
-
- ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
- if (ret != -ENOATTR)
- break;
- }
- if (vi->xattr_shared_count)
- xattr_iter_end_final(&it->it);
-
- return ret ? ret : it->buffer_size;
-}
-
static bool erofs_xattr_user_list(struct dentry *dentry)
{
return test_opt(&EROFS_SB(dentry->d_sb)->opt, XATTR_USER);
@@ -437,55 +135,13 @@ static bool erofs_xattr_trusted_list(struct dentry *dentry)
return capable(CAP_SYS_ADMIN);
}
-int erofs_getxattr(struct inode *inode, int index,
- const char *name,
- void *buffer, size_t buffer_size)
-{
- int ret;
- struct getxattr_iter it;
-
- if (!name)
- return -EINVAL;
-
- ret = init_inode_xattrs(inode);
- if (ret)
- return ret;
-
- it.index = index;
-
- it.name.len = strlen(name);
- if (it.name.len > EROFS_NAME_LEN)
- return -ERANGE;
- it.name.name = name;
-
- it.buffer = buffer;
- it.buffer_size = buffer_size;
-
- it.it.sb = inode->i_sb;
- ret = inline_getxattr(inode, &it);
- if (ret == -ENOATTR)
- ret = shared_getxattr(inode, &it);
- return ret;
-}
-
static int erofs_xattr_generic_get(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
- struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
-
- switch (handler->flags) {
- case EROFS_XATTR_INDEX_USER:
- if (!test_opt(&sbi->opt, XATTR_USER))
- return -EOPNOTSUPP;
- break;
- case EROFS_XATTR_INDEX_TRUSTED:
- break;
- case EROFS_XATTR_INDEX_SECURITY:
- break;
- default:
- return -EINVAL;
- }
+ if (handler->flags == EROFS_XATTR_INDEX_USER &&
+ !test_opt(&EROFS_I_SB(inode)->opt, XATTR_USER))
+ return -EOPNOTSUPP;
return erofs_getxattr(inode, handler->flags, name, buffer, size);
}
@@ -512,12 +168,8 @@ const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
};
#endif
-const struct xattr_handler *erofs_xattr_handlers[] = {
+const struct xattr_handler * const erofs_xattr_handlers[] = {
&erofs_xattr_user_handler,
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
-#endif
&erofs_xattr_trusted_handler,
#ifdef CONFIG_EROFS_FS_SECURITY
&erofs_xattr_security_handler,
@@ -525,151 +177,352 @@ const struct xattr_handler *erofs_xattr_handlers[] = {
NULL,
};
-struct listxattr_iter {
- struct xattr_iter it;
-
- struct dentry *dentry;
- char *buffer;
- int buffer_size, buffer_ofs;
-};
-
-static int xattr_entrylist(struct xattr_iter *_it,
- struct erofs_xattr_entry *entry)
+static int erofs_xattr_copy_to_buffer(struct erofs_xattr_iter *it,
+ unsigned int len)
{
- struct listxattr_iter *it =
- container_of(_it, struct listxattr_iter, it);
- unsigned int prefix_len;
- const char *prefix;
+ unsigned int slice, processed;
+ struct super_block *sb = it->sb;
+ void *src;
+
+ for (processed = 0; processed < len; processed += slice) {
+ it->kaddr = erofs_bread(&it->buf, it->pos, true);
+ if (IS_ERR(it->kaddr))
+ return PTR_ERR(it->kaddr);
+
+ src = it->kaddr;
+ slice = min_t(unsigned int, sb->s_blocksize -
+ erofs_blkoff(sb, it->pos), len - processed);
+ memcpy(it->buffer + it->buffer_ofs, src, slice);
+ it->buffer_ofs += slice;
+ it->pos += slice;
+ }
+ return 0;
+}
- const struct xattr_handler *h =
- erofs_xattr_handler(entry->e_name_index);
+static int erofs_listxattr_foreach(struct erofs_xattr_iter *it)
+{
+ struct erofs_xattr_entry entry;
+ unsigned int base_index, name_total, prefix_len, infix_len = 0;
+ const char *prefix, *infix = NULL;
+ int err;
- if (!h || (h->list && !h->list(it->dentry)))
- return 1;
+ /* 1. handle xattr entry */
+ entry = *(struct erofs_xattr_entry *)it->kaddr;
+ it->pos += sizeof(struct erofs_xattr_entry);
+
+ base_index = entry.e_name_index;
+ if (entry.e_name_index & EROFS_XATTR_LONG_PREFIX) {
+ struct erofs_sb_info *sbi = EROFS_SB(it->sb);
+ struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
+ (entry.e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
+
+ if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
+ return 0;
+ infix = pf->prefix->infix;
+ infix_len = pf->infix_len;
+ base_index = pf->prefix->base_index;
+ }
- prefix = xattr_prefix(h);
+ prefix = erofs_xattr_prefix(base_index, it->dentry);
+ if (!prefix)
+ return 0;
prefix_len = strlen(prefix);
+ name_total = prefix_len + infix_len + entry.e_name_len + 1;
if (!it->buffer) {
- it->buffer_ofs += prefix_len + entry->e_name_len + 1;
- return 1;
+ it->buffer_ofs += name_total;
+ return 0;
}
- if (it->buffer_ofs + prefix_len
- + entry->e_name_len + 1 > it->buffer_size)
+ if (it->buffer_ofs + name_total > it->buffer_size)
return -ERANGE;
memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
- it->buffer_ofs += prefix_len;
- return 0;
-}
+ memcpy(it->buffer + it->buffer_ofs + prefix_len, infix, infix_len);
+ it->buffer_ofs += prefix_len + infix_len;
-static int xattr_namelist(struct xattr_iter *_it,
- unsigned int processed, char *buf, unsigned int len)
-{
- struct listxattr_iter *it =
- container_of(_it, struct listxattr_iter, it);
+ /* 2. handle xattr name */
+ err = erofs_xattr_copy_to_buffer(it, entry.e_name_len);
+ if (err)
+ return err;
- memcpy(it->buffer + it->buffer_ofs, buf, len);
- it->buffer_ofs += len;
+ it->buffer[it->buffer_ofs++] = '\0';
return 0;
}
-static int xattr_skipvalue(struct xattr_iter *_it,
- unsigned int value_sz)
+static int erofs_getxattr_foreach(struct erofs_xattr_iter *it)
{
- struct listxattr_iter *it =
- container_of(_it, struct listxattr_iter, it);
+ struct super_block *sb = it->sb;
+ struct erofs_xattr_entry entry;
+ unsigned int slice, processed, value_sz;
- it->buffer[it->buffer_ofs++] = '\0';
- return 1;
-}
+ /* 1. handle xattr entry */
+ entry = *(struct erofs_xattr_entry *)it->kaddr;
+ it->pos += sizeof(struct erofs_xattr_entry);
+ value_sz = le16_to_cpu(entry.e_value_size);
-static const struct xattr_iter_handlers list_xattr_handlers = {
- .entry = xattr_entrylist,
- .name = xattr_namelist,
- .alloc_buffer = xattr_skipvalue,
- .value = NULL
-};
+ /* should also match the infix for long name prefixes */
+ if (entry.e_name_index & EROFS_XATTR_LONG_PREFIX) {
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
+ (entry.e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
+
+ if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
+ return -ENODATA;
-static int inline_listxattr(struct listxattr_iter *it)
+ if (it->index != pf->prefix->base_index ||
+ it->name.len != entry.e_name_len + pf->infix_len)
+ return -ENODATA;
+
+ if (memcmp(it->name.name, pf->prefix->infix, pf->infix_len))
+ return -ENODATA;
+
+ it->infix_len = pf->infix_len;
+ } else {
+ if (it->index != entry.e_name_index ||
+ it->name.len != entry.e_name_len)
+ return -ENODATA;
+
+ it->infix_len = 0;
+ }
+
+ /* 2. handle xattr name */
+ for (processed = 0; processed < entry.e_name_len; processed += slice) {
+ it->kaddr = erofs_bread(&it->buf, it->pos, true);
+ if (IS_ERR(it->kaddr))
+ return PTR_ERR(it->kaddr);
+
+ slice = min_t(unsigned int,
+ sb->s_blocksize - erofs_blkoff(sb, it->pos),
+ entry.e_name_len - processed);
+ if (memcmp(it->name.name + it->infix_len + processed,
+ it->kaddr, slice))
+ return -ENODATA;
+ it->pos += slice;
+ }
+
+ /* 3. handle xattr value */
+ if (!it->buffer) {
+ it->buffer_ofs = value_sz;
+ return 0;
+ }
+
+ if (it->buffer_size < value_sz)
+ return -ERANGE;
+
+ return erofs_xattr_copy_to_buffer(it, value_sz);
+}
+
+static int erofs_xattr_iter_inline(struct erofs_xattr_iter *it,
+ struct inode *inode, bool getxattr)
{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ unsigned int xattr_header_sz, remaining, entry_sz;
+ erofs_off_t next_pos;
int ret;
- unsigned int remaining;
- ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
- if (ret < 0)
+ xattr_header_sz = sizeof(struct erofs_xattr_ibody_header) +
+ sizeof(u32) * vi->xattr_shared_count;
+ if (xattr_header_sz >= vi->xattr_isize) {
+ DBG_BUGON(xattr_header_sz > vi->xattr_isize);
+ return -ENODATA;
+ }
+
+ ret = erofs_init_metabuf(&it->buf, it->sb, erofs_inode_in_metabox(inode));
+ if (ret)
return ret;
+ remaining = vi->xattr_isize - xattr_header_sz;
+ it->pos = erofs_iloc(inode) + vi->inode_isize + xattr_header_sz;
- remaining = ret;
while (remaining) {
- ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
- if (ret)
+ it->kaddr = erofs_bread(&it->buf, it->pos, true);
+ if (IS_ERR(it->kaddr))
+ return PTR_ERR(it->kaddr);
+
+ entry_sz = erofs_xattr_entry_size(it->kaddr);
+ /* xattr on-disk corruption: xattr entry beyond xattr_isize */
+ if (remaining < entry_sz) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+ remaining -= entry_sz;
+ next_pos = it->pos + entry_sz;
+
+ if (getxattr)
+ ret = erofs_getxattr_foreach(it);
+ else
+ ret = erofs_listxattr_foreach(it);
+ if ((getxattr && ret != -ENODATA) || (!getxattr && ret))
break;
+
+ it->pos = next_pos;
}
- xattr_iter_end_final(&it->it);
- return ret ? ret : it->buffer_ofs;
+ return ret;
}
-static int shared_listxattr(struct listxattr_iter *it)
+static int erofs_xattr_iter_shared(struct erofs_xattr_iter *it,
+ struct inode *inode, bool getxattr)
{
- struct inode *const inode = d_inode(it->dentry);
struct erofs_inode *const vi = EROFS_I(inode);
- struct super_block *const sb = inode->i_sb;
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- unsigned int i;
- int ret = 0;
+ struct super_block *const sb = it->sb;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ unsigned int i = 0;
+ int ret;
- for (i = 0; i < vi->xattr_shared_count; ++i) {
- erofs_blk_t blkaddr =
- xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
+ ret = erofs_init_metabuf(&it->buf, sb,
+ erofs_sb_has_shared_ea_in_metabox(sbi));
+ if (ret)
+ return ret;
- it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
- if (!i || blkaddr != it->it.blkaddr) {
- if (i)
- xattr_iter_end(&it->it, true);
+ while (i < vi->xattr_shared_count) {
+ it->pos = erofs_pos(sb, sbi->xattr_blkaddr) +
+ vi->xattr_shared_xattrs[i++] * sizeof(__le32);
+ it->kaddr = erofs_bread(&it->buf, it->pos, true);
+ if (IS_ERR(it->kaddr))
+ return PTR_ERR(it->kaddr);
+
+ if (getxattr)
+ ret = erofs_getxattr_foreach(it);
+ else
+ ret = erofs_listxattr_foreach(it);
+ if ((getxattr && ret != -ENODATA) || (!getxattr && ret))
+ break;
+ }
+ return i ? ret : -ENODATA;
+}
- it->it.page = erofs_get_meta_page(sb, blkaddr);
- if (IS_ERR(it->it.page))
- return PTR_ERR(it->it.page);
+int erofs_getxattr(struct inode *inode, int index, const char *name,
+ void *buffer, size_t buffer_size)
+{
+ int ret;
+ unsigned int hashbit;
+ struct erofs_xattr_iter it;
+ struct erofs_inode *vi = EROFS_I(inode);
+ struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
- it->it.kaddr = kmap_atomic(it->it.page);
- it->it.blkaddr = blkaddr;
- }
+ if (!name)
+ return -EINVAL;
- ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
- if (ret)
- break;
+ ret = erofs_init_inode_xattrs(inode);
+ if (ret)
+ return ret;
+
+ /* reserved flag is non-zero if there's any change of on-disk format */
+ if (erofs_sb_has_xattr_filter(sbi) && !sbi->xattr_filter_reserved) {
+ hashbit = xxh32(name, strlen(name),
+ EROFS_XATTR_FILTER_SEED + index);
+ hashbit &= EROFS_XATTR_FILTER_BITS - 1;
+ if (vi->xattr_name_filter & (1U << hashbit))
+ return -ENODATA;
}
- if (vi->xattr_shared_count)
- xattr_iter_end_final(&it->it);
- return ret ? ret : it->buffer_ofs;
+ it.index = index;
+ it.name = QSTR(name);
+ if (it.name.len > EROFS_NAME_LEN)
+ return -ERANGE;
+
+ it.sb = inode->i_sb;
+ it.buf = __EROFS_BUF_INITIALIZER;
+ it.buffer = buffer;
+ it.buffer_size = buffer_size;
+ it.buffer_ofs = 0;
+
+ ret = erofs_xattr_iter_inline(&it, inode, true);
+ if (ret == -ENODATA)
+ ret = erofs_xattr_iter_shared(&it, inode, true);
+ erofs_put_metabuf(&it.buf);
+ return ret ? ret : it.buffer_ofs;
}
-ssize_t erofs_listxattr(struct dentry *dentry,
- char *buffer, size_t buffer_size)
+ssize_t erofs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
int ret;
- struct listxattr_iter it;
+ struct erofs_xattr_iter it;
+ struct inode *inode = d_inode(dentry);
- ret = init_inode_xattrs(d_inode(dentry));
- if (ret == -ENOATTR)
+ ret = erofs_init_inode_xattrs(inode);
+ if (ret == -ENODATA)
return 0;
if (ret)
return ret;
+ it.sb = dentry->d_sb;
+ it.buf = __EROFS_BUF_INITIALIZER;
it.dentry = dentry;
it.buffer = buffer;
it.buffer_size = buffer_size;
it.buffer_ofs = 0;
- it.it.sb = dentry->d_sb;
+ ret = erofs_xattr_iter_inline(&it, inode, false);
+ if (!ret || ret == -ENODATA)
+ ret = erofs_xattr_iter_shared(&it, inode, false);
+ if (ret == -ENODATA)
+ ret = 0;
+ erofs_put_metabuf(&it.buf);
+ return ret ? ret : it.buffer_ofs;
+}
- ret = inline_listxattr(&it);
- if (ret < 0 && ret != -ENOATTR)
- return ret;
- return shared_listxattr(&it);
+void erofs_xattr_prefixes_cleanup(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ int i;
+
+ if (sbi->xattr_prefixes) {
+ for (i = 0; i < sbi->xattr_prefix_count; i++)
+ kfree(sbi->xattr_prefixes[i].prefix);
+ kfree(sbi->xattr_prefixes);
+ sbi->xattr_prefixes = NULL;
+ }
+}
+
+int erofs_xattr_prefixes_init(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+ erofs_off_t pos = (erofs_off_t)sbi->xattr_prefix_start << 2;
+ struct erofs_xattr_prefix_item *pfs;
+ int ret = 0, i, len;
+ bool plain = erofs_sb_has_plain_xattr_pfx(sbi);
+
+ if (!sbi->xattr_prefix_count)
+ return 0;
+
+ pfs = kcalloc(sbi->xattr_prefix_count, sizeof(*pfs), GFP_KERNEL);
+ if (!pfs)
+ return -ENOMEM;
+
+ if (!plain) {
+ if (erofs_sb_has_metabox(sbi))
+ (void)erofs_init_metabuf(&buf, sb, true);
+ else if (sbi->packed_inode)
+ buf.mapping = sbi->packed_inode->i_mapping;
+ else
+ plain = true;
+ }
+ if (plain)
+ (void)erofs_init_metabuf(&buf, sb, false);
+
+ for (i = 0; i < sbi->xattr_prefix_count; i++) {
+ void *ptr = erofs_read_metadata(sb, &buf, &pos, &len);
+
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+ break;
+ } else if (len < sizeof(*pfs->prefix) ||
+ len > EROFS_NAME_LEN + sizeof(*pfs->prefix)) {
+ kfree(ptr);
+ ret = -EFSCORRUPTED;
+ break;
+ }
+ pfs[i].prefix = ptr;
+ pfs[i].infix_len = len - sizeof(struct erofs_xattr_long_prefix);
+ }
+
+ erofs_put_metabuf(&buf);
+ sbi->xattr_prefixes = pfs;
+ if (ret)
+ erofs_xattr_prefixes_cleanup(sb);
+ return ret;
}
#ifdef CONFIG_EROFS_FS_POSIX_ACL
@@ -701,7 +554,7 @@ struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu)
rc = erofs_getxattr(inode, prefix, "", value, rc);
}
- if (rc == -ENOATTR)
+ if (rc == -ENODATA)
acl = NULL;
else if (rc < 0)
acl = ERR_PTR(rc);
diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h
index 94090c74b3f7..6317caa8413e 100644
--- a/fs/erofs/xattr.h
+++ b/fs/erofs/xattr.h
@@ -10,48 +10,21 @@
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
-/* Attribute not found */
-#define ENOATTR ENODATA
-
-static inline unsigned int inlinexattr_header_size(struct inode *inode)
-{
- return sizeof(struct erofs_xattr_ibody_header) +
- sizeof(u32) * EROFS_I(inode)->xattr_shared_count;
-}
-
-static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi,
- unsigned int xattr_id)
-{
-#ifdef CONFIG_EROFS_FS_XATTR
- return sbi->xattr_blkaddr +
- xattr_id * sizeof(__u32) / EROFS_BLKSIZ;
-#else
- return 0;
-#endif
-}
-
-static inline unsigned int xattrblock_offset(struct erofs_sb_info *sbi,
- unsigned int xattr_id)
-{
- return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ;
-}
-
#ifdef CONFIG_EROFS_FS_XATTR
extern const struct xattr_handler erofs_xattr_user_handler;
extern const struct xattr_handler erofs_xattr_trusted_handler;
-#ifdef CONFIG_EROFS_FS_SECURITY
extern const struct xattr_handler erofs_xattr_security_handler;
-#endif
-static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx)
+static inline const char *erofs_xattr_prefix(unsigned int idx,
+ struct dentry *dentry)
{
- static const struct xattr_handler *xattr_handler_map[] = {
+ const struct xattr_handler *handler = NULL;
+
+ static const struct xattr_handler * const xattr_handler_map[] = {
[EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
#ifdef CONFIG_EROFS_FS_POSIX_ACL
- [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] =
- &posix_acl_access_xattr_handler,
- [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
- &posix_acl_default_xattr_handler,
+ [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
+ [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &nop_posix_acl_default,
#endif
[EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
#ifdef CONFIG_EROFS_FS_SECURITY
@@ -59,15 +32,24 @@ static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx)
#endif
};
- return idx && idx < ARRAY_SIZE(xattr_handler_map) ?
- xattr_handler_map[idx] : NULL;
+ if (idx && idx < ARRAY_SIZE(xattr_handler_map))
+ handler = xattr_handler_map[idx];
+
+ if (!xattr_handler_can_list(handler, dentry))
+ return NULL;
+
+ return xattr_prefix(handler);
}
-extern const struct xattr_handler *erofs_xattr_handlers[];
+extern const struct xattr_handler * const erofs_xattr_handlers[];
+int erofs_xattr_prefixes_init(struct super_block *sb);
+void erofs_xattr_prefixes_cleanup(struct super_block *sb);
int erofs_getxattr(struct inode *, int, const char *, void *, size_t);
ssize_t erofs_listxattr(struct dentry *, char *, size_t);
#else
+static inline int erofs_xattr_prefixes_init(struct super_block *sb) { return 0; }
+static inline void erofs_xattr_prefixes_cleanup(struct super_block *sb) {}
static inline int erofs_getxattr(struct inode *inode, int index,
const char *name, void *buffer,
size_t buffer_size)
@@ -86,4 +68,3 @@ struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu);
#endif
#endif
-
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index bcb1b91b234f..65da21504632 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -2,13 +2,118 @@
/*
* Copyright (C) 2018 HUAWEI, Inc.
* https://www.huawei.com/
+ * Copyright (C) 2022 Alibaba Cloud
*/
-#include "zdata.h"
#include "compress.h"
-#include <linux/prefetch.h>
-
+#include <linux/psi.h>
+#include <linux/cpuhotplug.h>
#include <trace/events/erofs.h>
+#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
+#define Z_EROFS_INLINE_BVECS 2
+
+struct z_erofs_bvec {
+ struct page *page;
+ int offset;
+ unsigned int end;
+};
+
+#define __Z_EROFS_BVSET(name, total) \
+struct name { \
+ /* point to the next page which contains the following bvecs */ \
+ struct page *nextpage; \
+ struct z_erofs_bvec bvec[total]; \
+}
+__Z_EROFS_BVSET(z_erofs_bvset,);
+__Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
+
+/*
+ * Structure fields follow one of the following exclusion rules.
+ *
+ * I: Modifiable by initialization/destruction paths and read-only
+ * for everyone else;
+ *
+ * L: Field should be protected by the pcluster lock;
+ *
+ * A: Field should be accessed / updated in atomic for parallelized code.
+ */
+struct z_erofs_pcluster {
+ struct mutex lock;
+ struct lockref lockref;
+
+ /* A: point to next chained pcluster or TAILs */
+ struct z_erofs_pcluster *next;
+
+ /* I: start physical position of this pcluster */
+ erofs_off_t pos;
+
+ /* L: the maximum decompression size of this round */
+ unsigned int length;
+
+ /* L: total number of bvecs */
+ unsigned int vcnt;
+
+ /* I: pcluster size (compressed size) in bytes */
+ unsigned int pclustersize;
+
+ /* I: page offset of start position of decompression */
+ unsigned short pageofs_out;
+
+ /* I: page offset of inline compressed data */
+ unsigned short pageofs_in;
+
+ union {
+ /* L: inline a certain number of bvec for bootstrap */
+ struct z_erofs_bvset_inline bvset;
+
+ /* I: can be used to free the pcluster by RCU. */
+ struct rcu_head rcu;
+ };
+
+ /* I: compression algorithm format */
+ unsigned char algorithmformat;
+
+ /* I: whether compressed data is in-lined or not */
+ bool from_meta;
+
+ /* L: whether partial decompression or not */
+ bool partial;
+
+ /* L: whether extra buffer allocations are best-effort */
+ bool besteffort;
+
+ /* A: compressed bvecs (can be cached or inplaced pages) */
+ struct z_erofs_bvec compressed_bvecs[];
+};
+
+/* the end of a chain of pclusters */
+#define Z_EROFS_PCLUSTER_TAIL ((void *) 0x700 + POISON_POINTER_DELTA)
+
+struct z_erofs_decompressqueue {
+ struct super_block *sb;
+ struct z_erofs_pcluster *head;
+ atomic_t pending_bios;
+
+ union {
+ struct completion done;
+ struct work_struct work;
+ struct kthread_work kthread_work;
+ } u;
+ bool eio, sync;
+};
+
+static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
+{
+ return PAGE_ALIGN(pcl->pageofs_in + pcl->pclustersize) >> PAGE_SHIFT;
+}
+
+static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo)
+{
+ return fo->mapping == MNGD_MAPPING(sbi);
+}
+
+#define Z_EROFS_ONSTACK_PAGES 32
+
/*
* since pclustersize is variable for big pcluster feature, introduce slab
* pools implementation for different pcluster sizes.
@@ -23,9 +128,92 @@ struct z_erofs_pcluster_slab {
static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
_PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
- _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
+ _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES + 1)
};
+struct z_erofs_bvec_iter {
+ struct page *bvpage;
+ struct z_erofs_bvset *bvset;
+ unsigned int nr, cur;
+};
+
+static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
+{
+ if (iter->bvpage)
+ kunmap_local(iter->bvset);
+ return iter->bvpage;
+}
+
+static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
+{
+ unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
+ /* have to access nextpage in advance, otherwise it will be unmapped */
+ struct page *nextpage = iter->bvset->nextpage;
+ struct page *oldpage;
+
+ DBG_BUGON(!nextpage);
+ oldpage = z_erofs_bvec_iter_end(iter);
+ iter->bvpage = nextpage;
+ iter->bvset = kmap_local_page(nextpage);
+ iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
+ iter->cur = 0;
+ return oldpage;
+}
+
+static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
+ struct z_erofs_bvset_inline *bvset,
+ unsigned int bootstrap_nr,
+ unsigned int cur)
+{
+ *iter = (struct z_erofs_bvec_iter) {
+ .nr = bootstrap_nr,
+ .bvset = (struct z_erofs_bvset *)bvset,
+ };
+
+ while (cur > iter->nr) {
+ cur -= iter->nr;
+ z_erofs_bvset_flip(iter);
+ }
+ iter->cur = cur;
+}
+
+static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
+ struct z_erofs_bvec *bvec,
+ struct page **candidate_bvpage,
+ struct page **pagepool)
+{
+ if (iter->cur >= iter->nr) {
+ struct page *nextpage = *candidate_bvpage;
+
+ if (!nextpage) {
+ nextpage = __erofs_allocpage(pagepool, GFP_KERNEL,
+ true);
+ if (!nextpage)
+ return -ENOMEM;
+ set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
+ }
+ DBG_BUGON(iter->bvset->nextpage);
+ iter->bvset->nextpage = nextpage;
+ z_erofs_bvset_flip(iter);
+
+ iter->bvset->nextpage = NULL;
+ *candidate_bvpage = NULL;
+ }
+ iter->bvset->bvec[iter->cur++] = *bvec;
+ return 0;
+}
+
+static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
+ struct z_erofs_bvec *bvec,
+ struct page **old_bvpage)
+{
+ if (iter->cur == iter->nr)
+ *old_bvpage = z_erofs_bvset_flip(iter);
+ else
+ *old_bvpage = NULL;
+ *bvec = iter->bvset->bvec[iter->cur++];
+}
+
static void z_erofs_destroy_pcluster_pool(void)
{
int i;
@@ -46,7 +234,7 @@ static int z_erofs_create_pcluster_pool(void)
for (pcs = pcluster_pool;
pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
- size = struct_size(a, compressed_pages, pcs->maxpages);
+ size = struct_size(a, compressed_bvecs, pcs->maxpages);
sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
pcs->slab = kmem_cache_create(pcs->name, size, 0,
@@ -60,21 +248,20 @@ static int z_erofs_create_pcluster_pool(void)
return 0;
}
-static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
+static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
{
- int i;
+ unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct z_erofs_pcluster_slab *pcs = pcluster_pool;
- for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
- struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
+ for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
struct z_erofs_pcluster *pcl;
if (nrpages > pcs->maxpages)
continue;
- pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
+ pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL);
if (!pcl)
return ERR_PTR(-ENOMEM);
- pcl->pclusterpages = nrpages;
return pcl;
}
return ERR_PTR(-EINVAL);
@@ -82,12 +269,13 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
{
+ unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
int i;
for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
- if (pcl->pclusterpages > pcs->maxpages)
+ if (pclusterpages > pcs->maxpages)
continue;
kmem_cache_free(pcs->slab, pcl);
@@ -96,962 +284,1143 @@ static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
DBG_BUGON(1);
}
-/*
- * a compressed_pages[] placeholder in order to avoid
- * being filled with file pages for in-place decompression.
- */
-#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
+static struct workqueue_struct *z_erofs_workqueue __read_mostly;
-/* how to allocate cached pages for a pcluster */
-enum z_erofs_cache_alloctype {
- DONTALLOC, /* don't allocate any cached pages */
- DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
- /*
- * try to use cached I/O if page allocation succeeds or fallback
- * to in-place I/O instead to avoid any direct reclaim.
- */
- TRYALLOC,
-};
+#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
+static struct kthread_worker __rcu **z_erofs_pcpu_workers;
+static atomic_t erofs_percpu_workers_initialized = ATOMIC_INIT(0);
-/*
- * tagged pointer with 1-bit tag for all compressed pages
- * tag 0 - the page is just found with an extra page reference
- */
-typedef tagptr1_t compressed_page_t;
+static void erofs_destroy_percpu_workers(void)
+{
+ struct kthread_worker *worker;
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ worker = rcu_dereference_protected(
+ z_erofs_pcpu_workers[cpu], 1);
+ rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
+ if (worker)
+ kthread_destroy_worker(worker);
+ }
+ kfree(z_erofs_pcpu_workers);
+}
-#define tag_compressed_page_justfound(page) \
- tagptr_fold(compressed_page_t, page, 1)
+static struct kthread_worker *erofs_init_percpu_worker(int cpu)
+{
+ struct kthread_worker *worker =
+ kthread_run_worker_on_cpu(cpu, 0, "erofs_worker/%u");
+
+ if (IS_ERR(worker))
+ return worker;
+ if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
+ sched_set_fifo_low(worker->task);
+ return worker;
+}
-static struct workqueue_struct *z_erofs_workqueue __read_mostly;
+static int erofs_init_percpu_workers(void)
+{
+ struct kthread_worker *worker;
+ unsigned int cpu;
-void z_erofs_exit_zip_subsystem(void)
+ z_erofs_pcpu_workers = kcalloc(num_possible_cpus(),
+ sizeof(struct kthread_worker *), GFP_ATOMIC);
+ if (!z_erofs_pcpu_workers)
+ return -ENOMEM;
+
+ for_each_online_cpu(cpu) { /* could miss cpu{off,on}line? */
+ worker = erofs_init_percpu_worker(cpu);
+ if (!IS_ERR(worker))
+ rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
+static enum cpuhp_state erofs_cpuhp_state;
+
+static int erofs_cpu_online(unsigned int cpu)
{
- destroy_workqueue(z_erofs_workqueue);
- z_erofs_destroy_pcluster_pool();
+ struct kthread_worker *worker, *old;
+
+ worker = erofs_init_percpu_worker(cpu);
+ if (IS_ERR(worker))
+ return PTR_ERR(worker);
+
+ spin_lock(&z_erofs_pcpu_worker_lock);
+ old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
+ lockdep_is_held(&z_erofs_pcpu_worker_lock));
+ if (!old)
+ rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
+ spin_unlock(&z_erofs_pcpu_worker_lock);
+ if (old)
+ kthread_destroy_worker(worker);
+ return 0;
}
-static inline int z_erofs_init_workqueue(void)
+static int erofs_cpu_offline(unsigned int cpu)
{
- const unsigned int onlinecpus = num_possible_cpus();
+ struct kthread_worker *worker;
- /*
- * no need to spawn too many threads, limiting threads could minimum
- * scheduling overhead, perhaps per-CPU threads should be better?
- */
- z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
- WQ_UNBOUND | WQ_HIGHPRI,
- onlinecpus + onlinecpus / 4);
- return z_erofs_workqueue ? 0 : -ENOMEM;
+ spin_lock(&z_erofs_pcpu_worker_lock);
+ worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
+ lockdep_is_held(&z_erofs_pcpu_worker_lock));
+ rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
+ spin_unlock(&z_erofs_pcpu_worker_lock);
+
+ synchronize_rcu();
+ if (worker)
+ kthread_destroy_worker(worker);
+ return 0;
}
-int __init z_erofs_init_zip_subsystem(void)
+static int erofs_cpu_hotplug_init(void)
{
- int err = z_erofs_create_pcluster_pool();
+ int state;
+
+ state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "fs/erofs:online", erofs_cpu_online, erofs_cpu_offline);
+ if (state < 0)
+ return state;
+
+ erofs_cpuhp_state = state;
+ return 0;
+}
+
+static void erofs_cpu_hotplug_destroy(void)
+{
+ if (erofs_cpuhp_state)
+ cpuhp_remove_state_nocalls(erofs_cpuhp_state);
+}
+#else /* !CONFIG_HOTPLUG_CPU */
+static inline int erofs_cpu_hotplug_init(void) { return 0; }
+static inline void erofs_cpu_hotplug_destroy(void) {}
+#endif/* CONFIG_HOTPLUG_CPU */
+static int z_erofs_init_pcpu_workers(struct super_block *sb)
+{
+ int err;
+
+ if (atomic_xchg(&erofs_percpu_workers_initialized, 1))
+ return 0;
+
+ err = erofs_init_percpu_workers();
+ if (err) {
+ erofs_err(sb, "per-cpu workers: failed to allocate.");
+ goto err_init_percpu_workers;
+ }
+
+ err = erofs_cpu_hotplug_init();
+ if (err < 0) {
+ erofs_err(sb, "per-cpu workers: failed CPU hotplug init.");
+ goto err_cpuhp_init;
+ }
+ erofs_info(sb, "initialized per-cpu workers successfully.");
+ return err;
+
+err_cpuhp_init:
+ erofs_destroy_percpu_workers();
+err_init_percpu_workers:
+ atomic_set(&erofs_percpu_workers_initialized, 0);
+ return err;
+}
+
+static void z_erofs_destroy_pcpu_workers(void)
+{
+ if (!atomic_xchg(&erofs_percpu_workers_initialized, 0))
+ return;
+ erofs_cpu_hotplug_destroy();
+ erofs_destroy_percpu_workers();
+}
+#else /* !CONFIG_EROFS_FS_PCPU_KTHREAD */
+static inline int z_erofs_init_pcpu_workers(struct super_block *sb) { return 0; }
+static inline void z_erofs_destroy_pcpu_workers(void) {}
+#endif/* CONFIG_EROFS_FS_PCPU_KTHREAD */
+
+void z_erofs_exit_subsystem(void)
+{
+ z_erofs_destroy_pcpu_workers();
+ destroy_workqueue(z_erofs_workqueue);
+ z_erofs_destroy_pcluster_pool();
+ z_erofs_crypto_disable_all_engines();
+ z_erofs_exit_decompressor();
+}
+
+int __init z_erofs_init_subsystem(void)
+{
+ int err = z_erofs_init_decompressor();
if (err)
- return err;
- err = z_erofs_init_workqueue();
+ goto err_decompressor;
+
+ err = z_erofs_create_pcluster_pool();
if (err)
- z_erofs_destroy_pcluster_pool();
+ goto err_pcluster_pool;
+
+ z_erofs_workqueue = alloc_workqueue("erofs_worker",
+ WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
+ if (!z_erofs_workqueue) {
+ err = -ENOMEM;
+ goto err_workqueue_init;
+ }
+
+ return err;
+
+err_workqueue_init:
+ z_erofs_destroy_pcluster_pool();
+err_pcluster_pool:
+ z_erofs_exit_decompressor();
+err_decompressor:
return err;
}
-enum z_erofs_collectmode {
- COLLECT_SECONDARY,
- COLLECT_PRIMARY,
- /*
- * The current collection was the tail of an exist chain, in addition
- * that the previous processed chained collections are all decided to
- * be hooked up to it.
- * A new chain will be created for the remaining collections which are
- * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED,
- * the next collection cannot reuse the whole page safely in
- * the following scenario:
- * ________________________________________________________________
- * | tail (partial) page | head (partial) page |
- * | (belongs to the next cl) | (belongs to the current cl) |
- * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
- */
- COLLECT_PRIMARY_HOOKED,
+enum z_erofs_pclustermode {
+ /* It has previously been linked into another processing chain */
+ Z_EROFS_PCLUSTER_INFLIGHT,
/*
- * a weak form of COLLECT_PRIMARY_FOLLOWED, the difference is that it
- * could be dispatched into bypass queue later due to uptodated managed
- * pages. All related online pages cannot be reused for inplace I/O (or
- * pagevec) since it can be directly decoded without I/O submission.
+ * A weaker form of Z_EROFS_PCLUSTER_FOLLOWED; the difference is that it
+ * may be dispatched to the bypass queue later due to uptodated managed
+ * folios. All file-backed folios related to this pcluster cannot be
+ * reused for in-place I/O (or bvpage) since the pcluster may be decoded
+ * in a separate queue (and thus out of order).
*/
- COLLECT_PRIMARY_FOLLOWED_NOINPLACE,
+ Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
/*
- * The current collection has been linked with the owned chain, and
- * could also be linked with the remaining collections, which means
- * if the processing page is the tail page of the collection, thus
- * the current collection can safely use the whole page (since
- * the previous collection is under control) for in-place I/O, as
- * illustrated below:
- * ________________________________________________________________
- * | tail (partial) page | head (partial) page |
- * | (of the current cl) | (of the previous collection) |
- * | PRIMARY_FOLLOWED or | |
- * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________|
- *
- * [ (*) the above page can be used as inplace I/O. ]
+ * The pcluster has just been linked to our processing chain.
+ * File-backed folios (except for the head page) related to it can be
+ * used for in-place I/O (or bvpage).
*/
- COLLECT_PRIMARY_FOLLOWED,
+ Z_EROFS_PCLUSTER_FOLLOWED,
};
-struct z_erofs_collector {
- struct z_erofs_pagevec_ctor vector;
+struct z_erofs_frontend {
+ struct inode *const inode;
+ struct erofs_map_blocks map;
+ struct z_erofs_bvec_iter biter;
- struct z_erofs_pcluster *pcl, *tailpcl;
- struct z_erofs_collection *cl;
- /* a pointer used to pick up inplace I/O pages */
- struct page **icpage_ptr;
- z_erofs_next_pcluster_t owned_head;
+ struct page *pagepool;
+ struct page *candidate_bvpage;
+ struct z_erofs_pcluster *pcl, *head;
+ enum z_erofs_pclustermode mode;
- enum z_erofs_collectmode mode;
+ erofs_off_t headoffset;
+
+ /* a pointer used to pick up inplace I/O pages */
+ unsigned int icur;
};
-struct z_erofs_decompress_frontend {
- struct inode *const inode;
+#define Z_EROFS_DEFINE_FRONTEND(fe, i, ho) struct z_erofs_frontend fe = { \
+ .inode = i, .head = Z_EROFS_PCLUSTER_TAIL, \
+ .mode = Z_EROFS_PCLUSTER_FOLLOWED, .headoffset = ho }
- struct z_erofs_collector clt;
- struct erofs_map_blocks map;
+static bool z_erofs_should_alloc_cache(struct z_erofs_frontend *fe)
+{
+ unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
- bool readahead;
- /* used for applying cache strategy on the fly */
- bool backmost;
- erofs_off_t headoffset;
-};
+ if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
+ return false;
-#define COLLECTOR_INIT() { \
- .owned_head = Z_EROFS_PCLUSTER_TAIL, \
- .mode = COLLECT_PRIMARY_FOLLOWED }
+ if (!(fe->map.m_flags & EROFS_MAP_FULL_MAPPED))
+ return true;
-#define DECOMPRESS_FRONTEND_INIT(__i) { \
- .inode = __i, .clt = COLLECTOR_INIT(), \
- .backmost = true, }
+ if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
+ fe->map.m_la < fe->headoffset)
+ return true;
-static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
-static DEFINE_MUTEX(z_pagemap_global_lock);
+ return false;
+}
-static void preload_compressed_pages(struct z_erofs_collector *clt,
- struct address_space *mc,
- enum z_erofs_cache_alloctype type,
- struct page **pagepool)
+static void z_erofs_bind_cache(struct z_erofs_frontend *fe)
{
- struct z_erofs_pcluster *pcl = clt->pcl;
- bool standalone = true;
+ struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
+ struct z_erofs_pcluster *pcl = fe->pcl;
+ unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
+ bool shouldalloc = z_erofs_should_alloc_cache(fe);
+ pgoff_t poff = pcl->pos >> PAGE_SHIFT;
+ bool may_bypass = true;
+ /* Optimistic allocation, as in-place I/O can be used as a fallback */
gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
- struct page **pages;
- pgoff_t index;
+ struct folio *folio, *newfolio;
+ unsigned int i;
- if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
+ if (i_blocksize(fe->inode) != PAGE_SIZE ||
+ fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
return;
- pages = pcl->compressed_pages;
- index = pcl->obj.index;
- for (; index < pcl->obj.index + pcl->pclusterpages; ++index, ++pages) {
- struct page *page;
- compressed_page_t t;
- struct page *newpage = NULL;
-
- /* the compressed page was loaded before */
- if (READ_ONCE(*pages))
+ for (i = 0; i < pclusterpages; ++i) {
+ /* Inaccurate check w/o locking to avoid unneeded lookups */
+ if (READ_ONCE(pcl->compressed_bvecs[i].page))
continue;
- page = find_get_page(mc, index);
+ folio = filemap_get_folio(mc, poff + i);
+ if (IS_ERR(folio)) {
+ may_bypass = false;
+ if (!shouldalloc)
+ continue;
- if (page) {
- t = tag_compressed_page_justfound(page);
- } else {
- /* I/O is needed, no possible to decompress directly */
- standalone = false;
- switch (type) {
- case DELAYEDALLOC:
- t = tagptr_init(compressed_page_t,
- PAGE_UNALLOCATED);
- break;
- case TRYALLOC:
- newpage = erofs_allocpage(pagepool, gfp);
- if (!newpage)
- continue;
- set_page_private(newpage,
- Z_EROFS_PREALLOCATED_PAGE);
- t = tag_compressed_page_justfound(newpage);
- break;
- default: /* DONTALLOC */
+ /*
+ * Allocate a managed folio for cached I/O, or it may be
+ * then filled with a file-backed folio for in-place I/O
+ */
+ newfolio = filemap_alloc_folio(gfp, 0, NULL);
+ if (!newfolio)
continue;
- }
+ newfolio->private = Z_EROFS_PREALLOCATED_FOLIO;
+ folio = NULL;
}
-
- if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
+ spin_lock(&pcl->lockref.lock);
+ if (!pcl->compressed_bvecs[i].page) {
+ pcl->compressed_bvecs[i].page =
+ folio_page(folio ?: newfolio, 0);
+ spin_unlock(&pcl->lockref.lock);
continue;
-
- if (page)
- put_page(page);
- else if (newpage)
- erofs_pagepool_add(pagepool, newpage);
+ }
+ spin_unlock(&pcl->lockref.lock);
+ folio_put(folio ?: newfolio);
}
/*
- * don't do inplace I/O if all compressed pages are available in
- * managed cache since it can be moved to the bypass queue instead.
+ * Don't perform in-place I/O if all compressed pages are available in
+ * the managed cache, as the pcluster can be moved to the bypass queue.
*/
- if (standalone)
- clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
+ if (may_bypass)
+ fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
}
-/* called by erofs_shrinker to get rid of all compressed_pages */
-int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp)
+/* (erofs_shrinker) disconnect cached encoded data with pclusters */
+static int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
+ struct z_erofs_pcluster *pcl)
{
- struct z_erofs_pcluster *const pcl =
- container_of(grp, struct z_erofs_pcluster, obj);
+ unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
+ struct folio *folio;
int i;
- /*
- * refcount of workgroup is now freezed as 1,
- * therefore no need to worry about available decompression users.
- */
- for (i = 0; i < pcl->pclusterpages; ++i) {
- struct page *page = pcl->compressed_pages[i];
-
- if (!page)
- continue;
-
- /* block other users from reclaiming or migrating the page */
- if (!trylock_page(page))
- return -EBUSY;
-
- if (!erofs_page_is_managed(sbi, page))
- continue;
+ DBG_BUGON(pcl->from_meta);
+ /* Each cached folio contains one page unless bs > ps is supported */
+ for (i = 0; i < pclusterpages; ++i) {
+ if (pcl->compressed_bvecs[i].page) {
+ folio = page_folio(pcl->compressed_bvecs[i].page);
+ /* Avoid reclaiming or migrating this folio */
+ if (!folio_trylock(folio))
+ return -EBUSY;
- /* barrier is implied in the following 'unlock_page' */
- WRITE_ONCE(pcl->compressed_pages[i], NULL);
- detach_page_private(page);
- unlock_page(page);
+ if (!erofs_folio_is_managed(sbi, folio))
+ continue;
+ pcl->compressed_bvecs[i].page = NULL;
+ folio_detach_private(folio);
+ folio_unlock(folio);
+ }
}
return 0;
}
-int erofs_try_to_free_cached_page(struct page *page)
+static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
{
- struct z_erofs_pcluster *const pcl = (void *)page_private(page);
- int ret = 0; /* 0 - busy */
+ struct z_erofs_pcluster *pcl = folio_get_private(folio);
+ struct z_erofs_bvec *bvec = pcl->compressed_bvecs;
+ struct z_erofs_bvec *end = bvec + z_erofs_pclusterpages(pcl);
+ bool ret;
- if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) {
- unsigned int i;
+ if (!folio_test_private(folio))
+ return true;
- for (i = 0; i < pcl->pclusterpages; ++i) {
- if (pcl->compressed_pages[i] == page) {
- WRITE_ONCE(pcl->compressed_pages[i], NULL);
- ret = 1;
+ ret = false;
+ spin_lock(&pcl->lockref.lock);
+ if (pcl->lockref.count <= 0) {
+ DBG_BUGON(pcl->from_meta);
+ for (; bvec < end; ++bvec) {
+ if (bvec->page && page_folio(bvec->page) == folio) {
+ bvec->page = NULL;
+ folio_detach_private(folio);
+ ret = true;
break;
}
}
- erofs_workgroup_unfreeze(&pcl->obj, 1);
-
- if (ret)
- detach_page_private(page);
}
+ spin_unlock(&pcl->lockref.lock);
return ret;
}
-/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
-static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
- struct page *page)
+/*
+ * It will be called only on inode eviction. In case that there are still some
+ * decompression requests in progress, wait with rescheduling for a bit here.
+ * An extra lock could be introduced instead but it seems unnecessary.
+ */
+static void z_erofs_cache_invalidate_folio(struct folio *folio,
+ size_t offset, size_t length)
{
- struct z_erofs_pcluster *const pcl = clt->pcl;
+ const size_t stop = length + offset;
- while (clt->icpage_ptr > pcl->compressed_pages)
- if (!cmpxchg(--clt->icpage_ptr, NULL, page))
- return true;
- return false;
+ /* Check for potential overflow in debug mode */
+ DBG_BUGON(stop > folio_size(folio) || stop < length);
+
+ if (offset == 0 && stop == folio_size(folio))
+ while (!z_erofs_cache_release_folio(folio, 0))
+ cond_resched();
}
-/* callers must be with collection lock held */
-static int z_erofs_attach_page(struct z_erofs_collector *clt,
- struct page *page,
- enum z_erofs_page_type type)
-{
- int ret;
+static const struct address_space_operations z_erofs_cache_aops = {
+ .release_folio = z_erofs_cache_release_folio,
+ .invalidate_folio = z_erofs_cache_invalidate_folio,
+};
- /* give priority for inplaceio */
- if (clt->mode >= COLLECT_PRIMARY &&
- type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
- z_erofs_try_inplace_io(clt, page))
- return 0;
+int z_erofs_init_super(struct super_block *sb)
+{
+ struct inode *inode;
+ int err;
- ret = z_erofs_pagevec_enqueue(&clt->vector, page, type);
- clt->cl->vcnt += (unsigned int)ret;
+ err = z_erofs_init_pcpu_workers(sb);
+ if (err)
+ return err;
- return ret ? 0 : -EAGAIN;
+ inode = new_inode(sb);
+ if (!inode)
+ return -ENOMEM;
+ set_nlink(inode, 1);
+ inode->i_size = OFFSET_MAX;
+ inode->i_mapping->a_ops = &z_erofs_cache_aops;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
+ EROFS_SB(sb)->managed_cache = inode;
+ xa_init(&EROFS_SB(sb)->managed_pslots);
+ return 0;
}
-static void z_erofs_try_to_claim_pcluster(struct z_erofs_collector *clt)
+/* callers must be with pcluster lock held */
+static int z_erofs_attach_page(struct z_erofs_frontend *fe,
+ struct z_erofs_bvec *bvec, bool exclusive)
{
- struct z_erofs_pcluster *pcl = clt->pcl;
- z_erofs_next_pcluster_t *owned_head = &clt->owned_head;
-
- /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
- if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
- *owned_head) == Z_EROFS_PCLUSTER_NIL) {
- *owned_head = &pcl->next;
- /* so we can attach this pcluster to our submission chain. */
- clt->mode = COLLECT_PRIMARY_FOLLOWED;
- return;
- }
+ struct z_erofs_pcluster *pcl = fe->pcl;
+ int ret;
- /*
- * type 2, link to the end of an existing open chain, be careful
- * that its submission is controlled by the original attached chain.
- */
- if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
- *owned_head) == Z_EROFS_PCLUSTER_TAIL) {
- *owned_head = Z_EROFS_PCLUSTER_TAIL;
- clt->mode = COLLECT_PRIMARY_HOOKED;
- clt->tailpcl = NULL;
- return;
+ if (exclusive) {
+ /* Inplace I/O is limited to one page for uncompressed data */
+ if (pcl->algorithmformat < Z_EROFS_COMPRESSION_MAX ||
+ fe->icur <= 1) {
+ /* Try to prioritize inplace I/O here */
+ spin_lock(&pcl->lockref.lock);
+ while (fe->icur > 0) {
+ if (pcl->compressed_bvecs[--fe->icur].page)
+ continue;
+ pcl->compressed_bvecs[fe->icur] = *bvec;
+ spin_unlock(&pcl->lockref.lock);
+ return 0;
+ }
+ spin_unlock(&pcl->lockref.lock);
+ }
+
+ /* otherwise, check if it can be used as a bvpage */
+ if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
+ !fe->candidate_bvpage)
+ fe->candidate_bvpage = bvec->page;
}
- /* type 3, it belongs to a chain, but it isn't the end of the chain */
- clt->mode = COLLECT_PRIMARY;
+ ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
+ &fe->pagepool);
+ fe->pcl->vcnt += (ret >= 0);
+ return ret;
}
-static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
- struct inode *inode,
- struct erofs_map_blocks *map)
+static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl)
{
- struct z_erofs_pcluster *pcl = clt->pcl;
- struct z_erofs_collection *cl;
- unsigned int length;
-
- /* to avoid unexpected loop formed by corrupted images */
- if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
-
- cl = z_erofs_primarycollection(pcl);
- if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
-
- length = READ_ONCE(pcl->length);
- if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) {
- if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
- } else {
- unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
-
- if (map->m_flags & EROFS_MAP_FULL_MAPPED)
- llen |= Z_EROFS_PCLUSTER_FULL_LENGTH;
+ if (lockref_get_not_zero(&pcl->lockref))
+ return true;
- while (llen > length &&
- length != cmpxchg_relaxed(&pcl->length, length, llen)) {
- cpu_relax();
- length = READ_ONCE(pcl->length);
- }
+ spin_lock(&pcl->lockref.lock);
+ if (__lockref_is_dead(&pcl->lockref)) {
+ spin_unlock(&pcl->lockref.lock);
+ return false;
}
- mutex_lock(&cl->lock);
- /* used to check tail merging loop due to corrupted images */
- if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
- clt->tailpcl = pcl;
- z_erofs_try_to_claim_pcluster(clt);
- clt->cl = cl;
- return 0;
+ if (!pcl->lockref.count++)
+ atomic_long_dec(&erofs_global_shrink_cnt);
+ spin_unlock(&pcl->lockref.lock);
+ return true;
}
-static int z_erofs_register_collection(struct z_erofs_collector *clt,
- struct inode *inode,
- struct erofs_map_blocks *map)
+static int z_erofs_register_pcluster(struct z_erofs_frontend *fe)
{
- struct z_erofs_pcluster *pcl;
- struct z_erofs_collection *cl;
- struct erofs_workgroup *grp;
+ struct erofs_map_blocks *map = &fe->map;
+ struct super_block *sb = fe->inode->i_sb;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct z_erofs_pcluster *pcl, *pre;
+ unsigned int pageofs_in;
int err;
- if (!(map->m_flags & EROFS_MAP_ENCODED)) {
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
-
- /* no available pcluster, let's allocate one */
- pcl = z_erofs_alloc_pcluster(map->m_plen >> PAGE_SHIFT);
+ pageofs_in = erofs_blkoff(sb, map->m_pa);
+ pcl = z_erofs_alloc_pcluster(pageofs_in + map->m_plen);
if (IS_ERR(pcl))
return PTR_ERR(pcl);
- atomic_set(&pcl->obj.refcount, 1);
- pcl->obj.index = map->m_pa >> PAGE_SHIFT;
+ lockref_init(&pcl->lockref); /* one ref for this request */
pcl->algorithmformat = map->m_algorithmformat;
- pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) |
- (map->m_flags & EROFS_MAP_FULL_MAPPED ?
- Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
-
- /* new pclusters should be claimed as type 1, primary and followed */
- pcl->next = clt->owned_head;
- clt->mode = COLLECT_PRIMARY_FOLLOWED;
-
- cl = z_erofs_primarycollection(pcl);
- cl->pageofs = map->m_la & ~PAGE_MASK;
+ pcl->pclustersize = map->m_plen;
+ pcl->length = 0;
+ pcl->partial = true;
+ pcl->next = fe->head;
+ pcl->pos = map->m_pa;
+ pcl->pageofs_in = pageofs_in;
+ pcl->pageofs_out = map->m_la & ~PAGE_MASK;
+ pcl->from_meta = map->m_flags & EROFS_MAP_META;
+ fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
/*
* lock all primary followed works before visible to others
* and mutex_trylock *never* fails for a new pcluster.
*/
- mutex_init(&cl->lock);
- DBG_BUGON(!mutex_trylock(&cl->lock));
-
- grp = erofs_insert_workgroup(inode->i_sb, &pcl->obj);
- if (IS_ERR(grp)) {
- err = PTR_ERR(grp);
- goto err_out;
- }
-
- if (grp != &pcl->obj) {
- clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
- err = -EEXIST;
- goto err_out;
+ mutex_init(&pcl->lock);
+ DBG_BUGON(!mutex_trylock(&pcl->lock));
+
+ if (!pcl->from_meta) {
+ while (1) {
+ xa_lock(&sbi->managed_pslots);
+ pre = __xa_cmpxchg(&sbi->managed_pslots, pcl->pos,
+ NULL, pcl, GFP_KERNEL);
+ if (!pre || xa_is_err(pre) || z_erofs_get_pcluster(pre)) {
+ xa_unlock(&sbi->managed_pslots);
+ break;
+ }
+ /* try to legitimize the current in-tree one */
+ xa_unlock(&sbi->managed_pslots);
+ cond_resched();
+ }
+ if (xa_is_err(pre)) {
+ err = xa_err(pre);
+ goto err_out;
+ } else if (pre) {
+ fe->pcl = pre;
+ err = -EEXIST;
+ goto err_out;
+ }
}
- /* used to check tail merging loop due to corrupted images */
- if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
- clt->tailpcl = pcl;
- clt->owned_head = &pcl->next;
- clt->pcl = pcl;
- clt->cl = cl;
+ fe->head = fe->pcl = pcl;
return 0;
err_out:
- mutex_unlock(&cl->lock);
+ mutex_unlock(&pcl->lock);
z_erofs_free_pcluster(pcl);
return err;
}
-static int z_erofs_collector_begin(struct z_erofs_collector *clt,
- struct inode *inode,
- struct erofs_map_blocks *map)
+static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe)
{
- struct erofs_workgroup *grp;
+ struct erofs_map_blocks *map = &fe->map;
+ struct super_block *sb = fe->inode->i_sb;
+ struct z_erofs_pcluster *pcl = NULL;
+ void *ptr;
int ret;
- DBG_BUGON(clt->cl);
-
- /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
- DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
- DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
-
- if (!PAGE_ALIGNED(map->m_pa)) {
- DBG_BUGON(1);
- return -EINVAL;
+ DBG_BUGON(fe->pcl);
+ /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
+ DBG_BUGON(!fe->head);
+
+ if (!(map->m_flags & EROFS_MAP_META)) {
+ while (1) {
+ rcu_read_lock();
+ pcl = xa_load(&EROFS_SB(sb)->managed_pslots, map->m_pa);
+ if (!pcl || z_erofs_get_pcluster(pcl)) {
+ DBG_BUGON(pcl && map->m_pa != pcl->pos);
+ rcu_read_unlock();
+ break;
+ }
+ rcu_read_unlock();
+ }
}
- grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
- if (grp) {
- clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
+ if (pcl) {
+ fe->pcl = pcl;
+ ret = -EEXIST;
} else {
- ret = z_erofs_register_collection(clt, inode, map);
-
- if (!ret)
- goto out;
- if (ret != -EEXIST)
- return ret;
+ ret = z_erofs_register_pcluster(fe);
}
- ret = z_erofs_lookup_collection(clt, inode, map);
- if (ret) {
- erofs_workgroup_put(&clt->pcl->obj);
+ if (ret == -EEXIST) {
+ mutex_lock(&fe->pcl->lock);
+ /* check if this pcluster hasn't been linked into any chain. */
+ if (!cmpxchg(&fe->pcl->next, NULL, fe->head)) {
+ /* .. so it can be attached to our submission chain */
+ fe->head = fe->pcl;
+ fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
+ } else { /* otherwise, it belongs to an inflight chain */
+ fe->mode = Z_EROFS_PCLUSTER_INFLIGHT;
+ }
+ } else if (ret) {
return ret;
}
-out:
- z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
- clt->cl->pagevec, clt->cl->vcnt);
-
- /* since file-backed online pages are traversed in reverse order */
- clt->icpage_ptr = clt->pcl->compressed_pages + clt->pcl->pclusterpages;
+ z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
+ Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
+ if (!fe->pcl->from_meta) {
+ /* bind cache first when cached decompression is preferred */
+ z_erofs_bind_cache(fe);
+ } else {
+ ret = erofs_init_metabuf(&map->buf, sb,
+ erofs_inode_in_metabox(fe->inode));
+ if (ret)
+ return ret;
+ ptr = erofs_bread(&map->buf, map->m_pa, false);
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+ erofs_err(sb, "failed to get inline folio %d", ret);
+ return ret;
+ }
+ folio_get(page_folio(map->buf.page));
+ WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
+ fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
+ fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
+ }
+ /* file-backed inplace I/O pages are traversed in reverse order */
+ fe->icur = z_erofs_pclusterpages(fe->pcl);
return 0;
}
-/*
- * keep in mind that no referenced pclusters will be freed
- * only after a RCU grace period.
- */
static void z_erofs_rcu_callback(struct rcu_head *head)
{
- struct z_erofs_collection *const cl =
- container_of(head, struct z_erofs_collection, rcu);
-
- z_erofs_free_pcluster(container_of(cl, struct z_erofs_pcluster,
- primary_collection));
-}
-
-void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
-{
- struct z_erofs_pcluster *const pcl =
- container_of(grp, struct z_erofs_pcluster, obj);
- struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl);
-
- call_rcu(&cl->rcu, z_erofs_rcu_callback);
+ z_erofs_free_pcluster(container_of(head, struct z_erofs_pcluster, rcu));
}
-static void z_erofs_collection_put(struct z_erofs_collection *cl)
+static bool __erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
+ struct z_erofs_pcluster *pcl)
{
- struct z_erofs_pcluster *const pcl =
- container_of(cl, struct z_erofs_pcluster, primary_collection);
-
- erofs_workgroup_put(&pcl->obj);
-}
-
-static bool z_erofs_collector_end(struct z_erofs_collector *clt)
-{
- struct z_erofs_collection *cl = clt->cl;
-
- if (!cl)
+ if (pcl->lockref.count)
return false;
- z_erofs_pagevec_ctor_exit(&clt->vector, false);
- mutex_unlock(&cl->lock);
+ /*
+ * Note that all cached folios should be detached before deleted from
+ * the XArray. Otherwise some folios could be still attached to the
+ * orphan old pcluster when the new one is available in the tree.
+ */
+ if (erofs_try_to_free_all_cached_folios(sbi, pcl))
+ return false;
/*
- * if all pending pages are added, don't hold its reference
- * any longer if the pcluster isn't hosted by ourselves.
+ * It's impossible to fail after the pcluster is freezed, but in order
+ * to avoid some race conditions, add a DBG_BUGON to observe this.
*/
- if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
- z_erofs_collection_put(cl);
+ DBG_BUGON(__xa_erase(&sbi->managed_pslots, pcl->pos) != pcl);
- clt->cl = NULL;
+ lockref_mark_dead(&pcl->lockref);
return true;
}
-static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
- unsigned int cachestrategy,
- erofs_off_t la)
+static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
+ struct z_erofs_pcluster *pcl)
{
- if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
- return false;
-
- if (fe->backmost)
- return true;
-
- return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
- la < fe->headoffset;
+ bool free;
+
+ spin_lock(&pcl->lockref.lock);
+ free = __erofs_try_to_release_pcluster(sbi, pcl);
+ spin_unlock(&pcl->lockref.lock);
+ if (free) {
+ atomic_long_dec(&erofs_global_shrink_cnt);
+ call_rcu(&pcl->rcu, z_erofs_rcu_callback);
+ }
+ return free;
}
-static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
- struct page *page, struct page **pagepool)
+unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, unsigned long nr)
{
- struct inode *const inode = fe->inode;
- struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
- struct erofs_map_blocks *const map = &fe->map;
- struct z_erofs_collector *const clt = &fe->clt;
- const loff_t offset = page_offset(page);
- bool tight = true;
-
- enum z_erofs_cache_alloctype cache_strategy;
- enum z_erofs_page_type page_type;
- unsigned int cur, end, spiltted, index;
- int err = 0;
+ struct z_erofs_pcluster *pcl;
+ unsigned long index, freed = 0;
- /* register locked file pages as online pages in pack */
- z_erofs_onlinepage_init(page);
+ xa_lock(&sbi->managed_pslots);
+ xa_for_each(&sbi->managed_pslots, index, pcl) {
+ /* try to shrink each valid pcluster */
+ if (!erofs_try_to_release_pcluster(sbi, pcl))
+ continue;
+ xa_unlock(&sbi->managed_pslots);
- spiltted = 0;
- end = PAGE_SIZE;
-repeat:
- cur = end - 1;
-
- /* lucky, within the range of the current map_blocks */
- if (offset + cur >= map->m_la &&
- offset + cur < map->m_la + map->m_llen) {
- /* didn't get a valid collection previously (very rare) */
- if (!clt->cl)
- goto restart_now;
- goto hitted;
+ ++freed;
+ if (!--nr)
+ return freed;
+ xa_lock(&sbi->managed_pslots);
}
+ xa_unlock(&sbi->managed_pslots);
+ return freed;
+}
- /* go ahead the next map_blocks */
- erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
+static void z_erofs_put_pcluster(struct erofs_sb_info *sbi,
+ struct z_erofs_pcluster *pcl, bool try_free)
+{
+ bool free = false;
- if (z_erofs_collector_end(clt))
- fe->backmost = false;
+ if (lockref_put_or_lock(&pcl->lockref))
+ return;
- map->m_la = offset + cur;
- map->m_llen = 0;
- err = z_erofs_map_blocks_iter(inode, map, 0);
- if (err)
- goto err_out;
+ DBG_BUGON(__lockref_is_dead(&pcl->lockref));
+ if (!--pcl->lockref.count) {
+ if (try_free && xa_trylock(&sbi->managed_pslots)) {
+ free = __erofs_try_to_release_pcluster(sbi, pcl);
+ xa_unlock(&sbi->managed_pslots);
+ }
+ atomic_long_add(!free, &erofs_global_shrink_cnt);
+ }
+ spin_unlock(&pcl->lockref.lock);
+ if (free)
+ call_rcu(&pcl->rcu, z_erofs_rcu_callback);
+}
-restart_now:
- if (!(map->m_flags & EROFS_MAP_MAPPED))
- goto hitted;
+static void z_erofs_pcluster_end(struct z_erofs_frontend *fe)
+{
+ struct z_erofs_pcluster *pcl = fe->pcl;
- err = z_erofs_collector_begin(clt, inode, map);
- if (err)
- goto err_out;
+ if (!pcl)
+ return;
- /* preload all compressed pages (maybe downgrade role if necessary) */
- if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy, map->m_la))
- cache_strategy = TRYALLOC;
- else
- cache_strategy = DONTALLOC;
+ z_erofs_bvec_iter_end(&fe->biter);
+ mutex_unlock(&pcl->lock);
- preload_compressed_pages(clt, MNGD_MAPPING(sbi),
- cache_strategy, pagepool);
+ if (fe->candidate_bvpage)
+ fe->candidate_bvpage = NULL;
-hitted:
- /*
- * Ensure the current partial page belongs to this submit chain rather
- * than other concurrent submit chains or the noio(bypass) chain since
- * those chains are handled asynchronously thus the page cannot be used
- * for inplace I/O or pagevec (should be processed in strict order.)
- */
- tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
- clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
+ /* Drop refcount if it doesn't belong to our processing chain */
+ if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
+ z_erofs_put_pcluster(EROFS_I_SB(fe->inode), pcl, false);
+ fe->pcl = NULL;
+}
- cur = end - min_t(unsigned int, offset + end - map->m_la, end);
- if (!(map->m_flags & EROFS_MAP_MAPPED)) {
- zero_user_segment(page, cur, end);
- goto next_part;
- }
+static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
+ unsigned int cur, unsigned int end, erofs_off_t pos)
+{
+ struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+ unsigned int cnt;
+ u8 *src;
- /* let's derive page type */
- page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
- (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
- (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
- Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
-
- if (cur)
- tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
-
-retry:
- err = z_erofs_attach_page(clt, page, page_type);
- /* should allocate an additional short-lived page for pagevec */
- if (err == -EAGAIN) {
- struct page *const newpage =
- alloc_page(GFP_NOFS | __GFP_NOFAIL);
-
- set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
- err = z_erofs_attach_page(clt, newpage,
- Z_EROFS_PAGE_TYPE_EXCLUSIVE);
- if (!err)
- goto retry;
- }
+ if (!packed_inode)
+ return -EFSCORRUPTED;
- if (err)
- goto err_out;
+ buf.mapping = packed_inode->i_mapping;
+ for (; cur < end; cur += cnt, pos += cnt) {
+ cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos));
+ src = erofs_bread(&buf, pos, true);
+ if (IS_ERR(src)) {
+ erofs_put_metabuf(&buf);
+ return PTR_ERR(src);
+ }
+ memcpy_to_folio(folio, cur, src, cnt);
+ }
+ erofs_put_metabuf(&buf);
+ return 0;
+}
- index = page->index - (map->m_la >> PAGE_SHIFT);
+static int z_erofs_scan_folio(struct z_erofs_frontend *f,
+ struct folio *folio, bool ra)
+{
+ struct inode *const inode = f->inode;
+ struct erofs_map_blocks *const map = &f->map;
+ const loff_t offset = folio_pos(folio);
+ const unsigned int bs = i_blocksize(inode);
+ unsigned int end = folio_size(folio), split = 0, cur, pgs;
+ bool tight, excl;
+ int err = 0;
- z_erofs_onlinepage_fixup(page, index, true);
+ tight = (bs == PAGE_SIZE);
+ erofs_onlinefolio_init(folio);
+ do {
+ if (offset + end - 1 < map->m_la ||
+ offset + end - 1 >= map->m_la + map->m_llen) {
+ z_erofs_pcluster_end(f);
+ map->m_la = offset + end - 1;
+ map->m_llen = 0;
+ err = z_erofs_map_blocks_iter(inode, map, 0);
+ if (err)
+ break;
+ }
- /* bump up the number of spiltted parts of a page */
- ++spiltted;
- /* also update nr_pages */
- clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1);
-next_part:
- /* can be used for verification */
- map->m_llen = offset + cur - map->m_la;
+ cur = offset > map->m_la ? 0 : map->m_la - offset;
+ pgs = round_down(cur, PAGE_SIZE);
+ /* bump split parts first to avoid several separate cases */
+ ++split;
+
+ if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ folio_zero_segment(folio, cur, end);
+ tight = false;
+ } else if (map->m_flags & __EROFS_MAP_FRAGMENT) {
+ erofs_off_t fpos = offset + cur - map->m_la;
+
+ err = z_erofs_read_fragment(inode->i_sb, folio, cur,
+ cur + min(map->m_llen - fpos, end - cur),
+ EROFS_I(inode)->z_fragmentoff + fpos);
+ if (err)
+ break;
+ tight = false;
+ } else {
+ if (!f->pcl) {
+ err = z_erofs_pcluster_begin(f);
+ if (err)
+ break;
+ f->pcl->besteffort |= !ra;
+ }
- end = cur;
- if (end > 0)
- goto repeat;
+ pgs = round_down(end - 1, PAGE_SIZE);
+ /*
+ * Ensure this partial page belongs to this submit chain
+ * rather than other concurrent submit chains or
+ * noio(bypass) chains since those chains are handled
+ * asynchronously thus it cannot be used for inplace I/O
+ * or bvpage (should be processed in the strict order.)
+ */
+ tight &= (f->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
+ excl = false;
+ if (cur <= pgs) {
+ excl = (split <= 1) || tight;
+ cur = pgs;
+ }
-out:
- z_erofs_onlinepage_endio(page);
+ err = z_erofs_attach_page(f, &((struct z_erofs_bvec) {
+ .page = folio_page(folio, pgs >> PAGE_SHIFT),
+ .offset = offset + pgs - map->m_la,
+ .end = end - pgs, }), excl);
+ if (err)
+ break;
- erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu",
- __func__, page, spiltted, map->m_llen);
+ erofs_onlinefolio_split(folio);
+ if (f->pcl->length < offset + end - map->m_la) {
+ f->pcl->length = offset + end - map->m_la;
+ f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
+ }
+ if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
+ !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
+ f->pcl->length == map->m_llen)
+ f->pcl->partial = false;
+ }
+ /* shorten the remaining extent to update progress */
+ map->m_llen = offset + cur - map->m_la;
+ map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
+ if (cur <= pgs) {
+ split = cur < pgs;
+ tight = (bs == PAGE_SIZE);
+ }
+ } while ((end = cur) > 0);
+ erofs_onlinefolio_end(folio, err, false);
return err;
-
- /* if some error occurred while processing this page */
-err_out:
- SetPageError(page);
- goto out;
}
-static void z_erofs_decompressqueue_work(struct work_struct *work);
-static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
- bool sync, int bios)
+static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
+ unsigned int readahead_pages)
{
- struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
-
- /* wake up the caller thread for sync decompression */
- if (sync) {
- unsigned long flags;
+ /* auto: enable for read_folio, disable for readahead */
+ if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
+ !readahead_pages)
+ return true;
- spin_lock_irqsave(&io->u.wait.lock, flags);
- if (!atomic_add_return(bios, &io->pending_bios))
- wake_up_locked(&io->u.wait);
- spin_unlock_irqrestore(&io->u.wait.lock, flags);
- return;
- }
+ if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
+ (readahead_pages <= sbi->opt.max_sync_decompress_pages))
+ return true;
- if (atomic_add_return(bios, &io->pending_bios))
- return;
- /* Use workqueue and sync decompression for atomic contexts only */
- if (in_atomic() || irqs_disabled()) {
- queue_work(z_erofs_workqueue, &io->u.work);
- sbi->opt.readahead_sync_decompress = true;
- return;
- }
- z_erofs_decompressqueue_work(&io->u.work);
+ return false;
}
static bool z_erofs_page_is_invalidated(struct page *page)
{
- return !page->mapping && !z_erofs_is_shortlived_page(page);
+ return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
}
-static void z_erofs_decompressqueue_endio(struct bio *bio)
-{
- tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
- struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
- blk_status_t err = bio->bi_status;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+struct z_erofs_backend {
+ struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
+ struct super_block *sb;
+ struct z_erofs_pcluster *pcl;
+ /* pages with the longest decompressed length for deduplication */
+ struct page **decompressed_pages;
+ /* pages to keep the compressed data */
+ struct page **compressed_pages;
+
+ struct list_head decompressed_secondary_bvecs;
+ struct page **pagepool;
+ unsigned int onstack_used, nr_pages;
+ /* indicate if temporary copies should be preserved for later use */
+ bool keepxcpy;
+};
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
+struct z_erofs_bvec_item {
+ struct z_erofs_bvec bvec;
+ struct list_head list;
+};
- DBG_BUGON(PageUptodate(page));
- DBG_BUGON(z_erofs_page_is_invalidated(page));
+static void z_erofs_do_decompressed_bvec(struct z_erofs_backend *be,
+ struct z_erofs_bvec *bvec)
+{
+ int poff = bvec->offset + be->pcl->pageofs_out;
+ struct z_erofs_bvec_item *item;
+ struct page **page;
+
+ if (!(poff & ~PAGE_MASK) && (bvec->end == PAGE_SIZE ||
+ bvec->offset + bvec->end == be->pcl->length)) {
+ DBG_BUGON((poff >> PAGE_SHIFT) >= be->nr_pages);
+ page = be->decompressed_pages + (poff >> PAGE_SHIFT);
+ if (!*page) {
+ *page = bvec->page;
+ return;
+ }
+ } else {
+ be->keepxcpy = true;
+ }
- if (err)
- SetPageError(page);
+ /* (cold path) one pcluster is requested multiple times */
+ item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
+ item->bvec = *bvec;
+ list_add(&item->list, &be->decompressed_secondary_bvecs);
+}
- if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
- if (!err)
- SetPageUptodate(page);
- unlock_page(page);
+static void z_erofs_fill_other_copies(struct z_erofs_backend *be, int err)
+{
+ unsigned int off0 = be->pcl->pageofs_out;
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
+ struct z_erofs_bvec_item *bvi;
+ unsigned int end, cur;
+ void *dst, *src;
+
+ bvi = container_of(p, struct z_erofs_bvec_item, list);
+ cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
+ end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
+ bvi->bvec.end);
+ dst = kmap_local_page(bvi->bvec.page);
+ while (cur < end) {
+ unsigned int pgnr, scur, len;
+
+ pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
+ DBG_BUGON(pgnr >= be->nr_pages);
+
+ scur = bvi->bvec.offset + cur -
+ ((pgnr << PAGE_SHIFT) - off0);
+ len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
+ if (!be->decompressed_pages[pgnr]) {
+ err = -EFSCORRUPTED;
+ cur += len;
+ continue;
+ }
+ src = kmap_local_page(be->decompressed_pages[pgnr]);
+ memcpy(dst + cur, src + scur, len);
+ kunmap_local(src);
+ cur += len;
}
+ kunmap_local(dst);
+ erofs_onlinefolio_end(page_folio(bvi->bvec.page), err, true);
+ list_del(p);
+ kfree(bvi);
}
- z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
- bio_put(bio);
}
-static int z_erofs_decompress_pcluster(struct super_block *sb,
- struct z_erofs_pcluster *pcl,
- struct page **pagepool)
+static void z_erofs_parse_out_bvecs(struct z_erofs_backend *be)
{
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- struct z_erofs_pagevec_ctor ctor;
- unsigned int i, inputsize, outputsize, llen, nr_pages;
- struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES];
- struct page **pages, **compressed_pages, *page;
-
- enum z_erofs_page_type page_type;
- bool overlapped, partial;
- struct z_erofs_collection *cl;
- int err;
-
- might_sleep();
- cl = z_erofs_primarycollection(pcl);
- DBG_BUGON(!READ_ONCE(cl->nr_pages));
+ struct z_erofs_pcluster *pcl = be->pcl;
+ struct z_erofs_bvec_iter biter;
+ struct page *old_bvpage;
+ int i;
- mutex_lock(&cl->lock);
- nr_pages = cl->nr_pages;
+ z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
+ for (i = 0; i < pcl->vcnt; ++i) {
+ struct z_erofs_bvec bvec;
- if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) {
- pages = pages_onstack;
- } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES &&
- mutex_trylock(&z_pagemap_global_lock)) {
- pages = z_pagemap_global;
- } else {
- gfp_t gfp_flags = GFP_KERNEL;
+ z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
- if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES)
- gfp_flags |= __GFP_NOFAIL;
+ if (old_bvpage)
+ z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
- pages = kvmalloc_array(nr_pages, sizeof(struct page *),
- gfp_flags);
-
- /* fallback to global pagemap for the lowmem scenario */
- if (!pages) {
- mutex_lock(&z_pagemap_global_lock);
- pages = z_pagemap_global;
- }
+ DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
+ z_erofs_do_decompressed_bvec(be, &bvec);
}
- for (i = 0; i < nr_pages; ++i)
- pages[i] = NULL;
-
- err = 0;
- z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
- cl->pagevec, 0);
+ old_bvpage = z_erofs_bvec_iter_end(&biter);
+ if (old_bvpage)
+ z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
+}
- for (i = 0; i < cl->vcnt; ++i) {
- unsigned int pagenr;
+static int z_erofs_parse_in_bvecs(struct z_erofs_backend *be, bool *overlapped)
+{
+ struct z_erofs_pcluster *pcl = be->pcl;
+ unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
+ int i, err = 0;
+
+ *overlapped = false;
+ for (i = 0; i < pclusterpages; ++i) {
+ struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
+ struct page *page = bvec->page;
+
+ /* compressed data ought to be valid when decompressing */
+ if (IS_ERR(page) || !page) {
+ bvec->page = NULL; /* clear the failure reason */
+ err = page ? PTR_ERR(page) : -EIO;
+ continue;
+ }
+ be->compressed_pages[i] = page;
- page = z_erofs_pagevec_dequeue(&ctor, &page_type);
+ if (pcl->from_meta ||
+ erofs_folio_is_managed(EROFS_SB(be->sb), page_folio(page))) {
+ if (!PageUptodate(page))
+ err = -EIO;
+ continue;
+ }
- /* all pages in pagevec ought to be valid */
- DBG_BUGON(!page);
DBG_BUGON(z_erofs_page_is_invalidated(page));
-
- if (z_erofs_put_shortlivedpage(pagepool, page))
+ if (z_erofs_is_shortlived_page(page))
continue;
+ z_erofs_do_decompressed_bvec(be, bvec);
+ *overlapped = true;
+ }
+ return err;
+}
- if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
- pagenr = 0;
- else
- pagenr = z_erofs_onlinepage_index(page);
-
- DBG_BUGON(pagenr >= nr_pages);
+static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
+ struct z_erofs_pcluster *pcl = be->pcl;
+ unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
+ const struct z_erofs_decompressor *alg =
+ z_erofs_decomp[pcl->algorithmformat];
+ bool try_free = true;
+ int i, j, jtop, err2;
+ struct page *page;
+ bool overlapped;
+ const char *reason;
+
+ mutex_lock(&pcl->lock);
+ be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
+
+ /* allocate (de)compressed page arrays if cannot be kept on stack */
+ be->decompressed_pages = NULL;
+ be->compressed_pages = NULL;
+ be->onstack_used = 0;
+ if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
+ be->decompressed_pages = be->onstack_pages;
+ be->onstack_used = be->nr_pages;
+ memset(be->decompressed_pages, 0,
+ sizeof(struct page *) * be->nr_pages);
+ }
- /*
- * currently EROFS doesn't support multiref(dedup),
- * so here erroring out one multiref page.
- */
- if (pages[pagenr]) {
- DBG_BUGON(1);
- SetPageError(pages[pagenr]);
- z_erofs_onlinepage_endio(pages[pagenr]);
+ if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
+ be->compressed_pages = be->onstack_pages + be->onstack_used;
+
+ if (!be->decompressed_pages)
+ be->decompressed_pages =
+ kvcalloc(be->nr_pages, sizeof(struct page *),
+ GFP_KERNEL | __GFP_NOFAIL);
+ if (!be->compressed_pages)
+ be->compressed_pages =
+ kvcalloc(pclusterpages, sizeof(struct page *),
+ GFP_KERNEL | __GFP_NOFAIL);
+
+ z_erofs_parse_out_bvecs(be);
+ err2 = z_erofs_parse_in_bvecs(be, &overlapped);
+ if (err2)
+ err = err2;
+ if (!err) {
+ reason = alg->decompress(&(struct z_erofs_decompress_req) {
+ .sb = be->sb,
+ .in = be->compressed_pages,
+ .out = be->decompressed_pages,
+ .inpages = pclusterpages,
+ .outpages = be->nr_pages,
+ .pageofs_in = pcl->pageofs_in,
+ .pageofs_out = pcl->pageofs_out,
+ .inputsize = pcl->pclustersize,
+ .outputsize = pcl->length,
+ .alg = pcl->algorithmformat,
+ .inplace_io = overlapped,
+ .partial_decoding = pcl->partial,
+ .fillgaps = be->keepxcpy,
+ .gfp = pcl->besteffort ? GFP_KERNEL :
+ GFP_NOWAIT | __GFP_NORETRY
+ }, be->pagepool);
+ if (IS_ERR(reason)) {
+ erofs_err(be->sb, "failed to decompress (%s) %ld @ pa %llu size %u => %u",
+ alg->name, PTR_ERR(reason), pcl->pos,
+ pcl->pclustersize, pcl->length);
+ err = PTR_ERR(reason);
+ } else if (unlikely(reason)) {
+ erofs_err(be->sb, "failed to decompress (%s) %s @ pa %llu size %u => %u",
+ alg->name, reason, pcl->pos,
+ pcl->pclustersize, pcl->length);
err = -EFSCORRUPTED;
}
- pages[pagenr] = page;
}
- z_erofs_pagevec_ctor_exit(&ctor, true);
- overlapped = false;
- compressed_pages = pcl->compressed_pages;
-
- for (i = 0; i < pcl->pclusterpages; ++i) {
- unsigned int pagenr;
-
- page = compressed_pages[i];
-
- /* all compressed pages ought to be valid */
- DBG_BUGON(!page);
- DBG_BUGON(z_erofs_page_is_invalidated(page));
-
- if (!z_erofs_is_shortlived_page(page)) {
- if (erofs_page_is_managed(sbi, page)) {
- if (!PageUptodate(page))
- err = -EIO;
+ /* must handle all compressed pages before actual file pages */
+ if (pcl->from_meta) {
+ folio_put(page_folio(pcl->compressed_bvecs[0].page));
+ WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
+ } else {
+ /* managed folios are still left in compressed_bvecs[] */
+ for (i = 0; i < pclusterpages; ++i) {
+ page = be->compressed_pages[i];
+ if (!page)
+ continue;
+ if (erofs_folio_is_managed(sbi, page_folio(page))) {
+ try_free = false;
continue;
}
-
- /*
- * only if non-head page can be selected
- * for inplace decompression
- */
- pagenr = z_erofs_onlinepage_index(page);
-
- DBG_BUGON(pagenr >= nr_pages);
- if (pages[pagenr]) {
- DBG_BUGON(1);
- SetPageError(pages[pagenr]);
- z_erofs_onlinepage_endio(pages[pagenr]);
- err = -EFSCORRUPTED;
- }
- pages[pagenr] = page;
-
- overlapped = true;
- }
-
- /* PG_error needs checking for all non-managed pages */
- if (PageError(page)) {
- DBG_BUGON(PageUptodate(page));
- err = -EIO;
+ (void)z_erofs_put_shortlivedpage(be->pagepool, page);
+ WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
}
}
-
- if (err)
- goto out;
-
- llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
- if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
- outputsize = llen;
- partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
- } else {
- outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;
- partial = true;
- }
-
- inputsize = pcl->pclusterpages * PAGE_SIZE;
- err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
- .sb = sb,
- .in = compressed_pages,
- .out = pages,
- .pageofs_out = cl->pageofs,
- .inputsize = inputsize,
- .outputsize = outputsize,
- .alg = pcl->algorithmformat,
- .inplace_io = overlapped,
- .partial_decoding = partial
- }, pagepool);
-
-out:
- /* must handle all compressed pages before ending pages */
- for (i = 0; i < pcl->pclusterpages; ++i) {
- page = compressed_pages[i];
-
- if (erofs_page_is_managed(sbi, page))
- continue;
-
- /* recycle all individual short-lived pages */
- (void)z_erofs_put_shortlivedpage(pagepool, page);
-
- WRITE_ONCE(compressed_pages[i], NULL);
- }
-
- for (i = 0; i < nr_pages; ++i) {
- page = pages[i];
+ if (be->compressed_pages < be->onstack_pages ||
+ be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
+ kvfree(be->compressed_pages);
+
+ jtop = 0;
+ z_erofs_fill_other_copies(be, err);
+ for (i = 0; i < be->nr_pages; ++i) {
+ page = be->decompressed_pages[i];
if (!page)
continue;
DBG_BUGON(z_erofs_page_is_invalidated(page));
-
- /* recycle all individual short-lived pages */
- if (z_erofs_put_shortlivedpage(pagepool, page))
+ if (!z_erofs_is_shortlived_page(page)) {
+ erofs_onlinefolio_end(page_folio(page), err, true);
continue;
-
- if (err < 0)
- SetPageError(page);
-
- z_erofs_onlinepage_endio(page);
+ }
+ if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
+ erofs_pagepool_add(be->pagepool, page);
+ continue;
+ }
+ for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j)
+ ;
+ if (j >= jtop) /* this bounce page is newly detected */
+ be->decompressed_pages[jtop++] = page;
}
-
- if (pages == z_pagemap_global)
- mutex_unlock(&z_pagemap_global_lock);
- else if (pages != pages_onstack)
- kvfree(pages);
-
- cl->nr_pages = 0;
- cl->vcnt = 0;
-
- /* all cl locks MUST be taken before the following line */
- WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
-
- /* all cl locks SHOULD be released right now */
- mutex_unlock(&cl->lock);
-
- z_erofs_collection_put(cl);
+ while (jtop)
+ erofs_pagepool_add(be->pagepool,
+ be->decompressed_pages[--jtop]);
+ if (be->decompressed_pages != be->onstack_pages)
+ kvfree(be->decompressed_pages);
+
+ pcl->length = 0;
+ pcl->partial = true;
+ pcl->besteffort = false;
+ pcl->bvset.nextpage = NULL;
+ pcl->vcnt = 0;
+
+ /* pcluster lock MUST be taken before the following line */
+ WRITE_ONCE(pcl->next, NULL);
+ mutex_unlock(&pcl->lock);
+
+ if (pcl->from_meta)
+ z_erofs_free_pcluster(pcl);
+ else
+ z_erofs_put_pcluster(sbi, pcl, try_free);
return err;
}
-static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
- struct page **pagepool)
+static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
+ struct page **pagepool)
{
- z_erofs_next_pcluster_t owned = io->head;
-
- while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
- struct z_erofs_pcluster *pcl;
-
- /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
- DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
-
- /* no possible that 'owned' equals NULL */
- DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
-
- pcl = container_of(owned, struct z_erofs_pcluster, next);
- owned = READ_ONCE(pcl->next);
-
- z_erofs_decompress_pcluster(io->sb, pcl, pagepool);
+ struct z_erofs_backend be = {
+ .sb = io->sb,
+ .pagepool = pagepool,
+ .decompressed_secondary_bvecs =
+ LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
+ .pcl = io->head,
+ };
+ struct z_erofs_pcluster *next;
+ int err = io->eio ? -EIO : 0;
+
+ for (; be.pcl != Z_EROFS_PCLUSTER_TAIL; be.pcl = next) {
+ DBG_BUGON(!be.pcl);
+ next = READ_ONCE(be.pcl->next);
+ err = z_erofs_decompress_pcluster(&be, err) ?: err;
}
+ return err;
}
static void z_erofs_decompressqueue_work(struct work_struct *work)
@@ -1060,138 +1429,174 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
container_of(work, struct z_erofs_decompressqueue, u.work);
struct page *pagepool = NULL;
- DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
z_erofs_decompress_queue(bgq, &pagepool);
-
erofs_release_pages(&pagepool);
kvfree(bgq);
}
-static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
- unsigned int nr,
- struct page **pagepool,
- struct address_space *mc,
- gfp_t gfp)
+#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
+static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
{
- const pgoff_t index = pcl->obj.index;
- bool tocache = false;
-
- struct address_space *mapping;
- struct page *oldpage, *page;
+ z_erofs_decompressqueue_work((struct work_struct *)work);
+}
+#endif
- compressed_page_t t;
- int justfound;
+/* Use (kthread_)work in atomic contexts to minimize scheduling overhead */
+static inline bool z_erofs_in_atomic(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPTION) && rcu_preempt_depth())
+ return true;
+ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ return true;
+ return !preemptible();
+}
-repeat:
- page = READ_ONCE(pcl->compressed_pages[nr]);
- oldpage = page;
+static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
+ int bios)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
- if (!page)
- goto out_allocpage;
+ /* wake up the caller thread for sync decompression */
+ if (io->sync) {
+ if (!atomic_add_return(bios, &io->pending_bios))
+ complete(&io->u.done);
+ return;
+ }
- /*
- * the cached page has not been allocated and
- * an placeholder is out there, prepare it now.
- */
- if (page == PAGE_UNALLOCATED) {
- tocache = true;
- goto out_allocpage;
+ if (atomic_add_return(bios, &io->pending_bios))
+ return;
+ if (z_erofs_in_atomic()) {
+#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
+ struct kthread_worker *worker;
+
+ rcu_read_lock();
+ worker = rcu_dereference(
+ z_erofs_pcpu_workers[raw_smp_processor_id()]);
+ if (!worker) {
+ INIT_WORK(&io->u.work, z_erofs_decompressqueue_work);
+ queue_work(z_erofs_workqueue, &io->u.work);
+ } else {
+ kthread_queue_work(worker, &io->u.kthread_work);
+ }
+ rcu_read_unlock();
+#else
+ queue_work(z_erofs_workqueue, &io->u.work);
+#endif
+ /* enable sync decompression for readahead */
+ if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
+ sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
+ return;
}
+ z_erofs_decompressqueue_work(&io->u.work);
+}
- /* process the target tagged pointer */
- t = tagptr_init(compressed_page_t, page);
- justfound = tagptr_unfold_tags(t);
- page = tagptr_unfold_ptr(t);
+static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
+ struct z_erofs_frontend *f,
+ struct z_erofs_pcluster *pcl,
+ unsigned int nr,
+ struct address_space *mc)
+{
+ gfp_t gfp = mapping_gfp_mask(mc);
+ bool tocache = false;
+ struct z_erofs_bvec zbv;
+ struct address_space *mapping;
+ struct folio *folio;
+ struct page *page;
+ int bs = i_blocksize(f->inode);
- /*
- * preallocated cached pages, which is used to avoid direct reclaim
- * otherwise, it will go inplace I/O path instead.
- */
- if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
- WRITE_ONCE(pcl->compressed_pages[nr], page);
- set_page_private(page, 0);
+ /* Except for inplace folios, the entire folio can be used for I/Os */
+ bvec->bv_offset = 0;
+ bvec->bv_len = PAGE_SIZE;
+repeat:
+ spin_lock(&pcl->lockref.lock);
+ zbv = pcl->compressed_bvecs[nr];
+ spin_unlock(&pcl->lockref.lock);
+ if (!zbv.page)
+ goto out_allocfolio;
+
+ bvec->bv_page = zbv.page;
+ DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page));
+
+ folio = page_folio(zbv.page);
+ /* For preallocated managed folios, add them to page cache here */
+ if (folio->private == Z_EROFS_PREALLOCATED_FOLIO) {
tocache = true;
goto out_tocache;
}
- mapping = READ_ONCE(page->mapping);
+ mapping = READ_ONCE(folio->mapping);
/*
- * file-backed online pages in plcuster are all locked steady,
- * therefore it is impossible for `mapping' to be NULL.
+ * File-backed folios for inplace I/Os are all locked steady,
+ * therefore it is impossible for `mapping` to be NULL.
*/
- if (mapping && mapping != mc)
- /* ought to be unmanaged pages */
- goto out;
-
- /* directly return for shortlived page as well */
- if (z_erofs_is_shortlived_page(page))
- goto out;
-
- lock_page(page);
-
- /* only true if page reclaim goes wrong, should never happen */
- DBG_BUGON(justfound && PagePrivate(page));
-
- /* the page is still in manage cache */
- if (page->mapping == mc) {
- WRITE_ONCE(pcl->compressed_pages[nr], page);
-
- ClearPageError(page);
- if (!PagePrivate(page)) {
- /*
- * impossible to be !PagePrivate(page) for
- * the current restriction as well if
- * the page is already in compressed_pages[].
- */
- DBG_BUGON(!justfound);
+ if (mapping && mapping != mc) {
+ if (zbv.offset < 0)
+ bvec->bv_offset = round_up(-zbv.offset, bs);
+ bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset;
+ return;
+ }
- justfound = 0;
- set_page_private(page, (unsigned long)pcl);
- SetPagePrivate(page);
+ folio_lock(folio);
+ if (likely(folio->mapping == mc)) {
+ /*
+ * The cached folio is still in managed cache but without
+ * a valid `->private` pcluster hint. Let's reconnect them.
+ */
+ if (!folio_test_private(folio)) {
+ folio_attach_private(folio, pcl);
+ /* compressed_bvecs[] already takes a ref before */
+ folio_put(folio);
}
-
- /* no need to submit io if it is already up-to-date */
- if (PageUptodate(page)) {
- unlock_page(page);
- page = NULL;
+ if (likely(folio->private == pcl)) {
+ /* don't submit cache I/Os again if already uptodate */
+ if (folio_test_uptodate(folio)) {
+ folio_unlock(folio);
+ bvec->bv_page = NULL;
+ }
+ return;
}
- goto out;
+ /*
+ * Already linked with another pcluster, which only appears in
+ * crafted images by fuzzers for now. But handle this anyway.
+ */
+ tocache = false; /* use temporary short-lived pages */
+ } else {
+ DBG_BUGON(1); /* referenced managed folios can't be truncated */
+ tocache = true;
}
-
- /*
- * the managed page has been truncated, it's unsafe to
- * reuse this one, let's allocate a new cache-managed page.
- */
- DBG_BUGON(page->mapping);
- DBG_BUGON(!justfound);
-
- tocache = true;
- unlock_page(page);
- put_page(page);
-out_allocpage:
- page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
- if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
- erofs_pagepool_add(pagepool, page);
+ folio_unlock(folio);
+ folio_put(folio);
+out_allocfolio:
+ page = __erofs_allocpage(&f->pagepool, gfp, true);
+ spin_lock(&pcl->lockref.lock);
+ if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) {
+ if (page)
+ erofs_pagepool_add(&f->pagepool, page);
+ spin_unlock(&pcl->lockref.lock);
cond_resched();
goto repeat;
}
+ pcl->compressed_bvecs[nr].page = page ? page : ERR_PTR(-ENOMEM);
+ spin_unlock(&pcl->lockref.lock);
+ bvec->bv_page = page;
+ if (!page)
+ return;
+ folio = page_folio(page);
out_tocache:
- if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
- /* turn into temporary page if fails (1 ref) */
- set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
- goto out;
+ if (!tocache || bs != PAGE_SIZE ||
+ filemap_add_folio(mc, folio, (pcl->pos >> PAGE_SHIFT) + nr, gfp)) {
+ /* turn into a temporary shortlived folio (1 ref) */
+ folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
+ return;
}
- attach_page_private(page, pcl);
- /* drop a refcount added by allocpage (then we have 2 refs here) */
- put_page(page);
-
-out: /* the only exit (for tracing and debugging) */
- return page;
+ folio_attach_private(folio, pcl);
+ /* drop a refcount added by allocpage (then 2 refs in total here) */
+ folio_put(folio);
}
-static struct z_erofs_decompressqueue *
-jobqueue_init(struct super_block *sb,
- struct z_erofs_decompressqueue *fgq, bool *fg)
+static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
+ struct z_erofs_decompressqueue *fgq, bool *fg)
{
struct z_erofs_decompressqueue *q;
@@ -1201,15 +1606,22 @@ jobqueue_init(struct super_block *sb,
*fg = true;
goto fg_out;
}
+#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
+ kthread_init_work(&q->u.kthread_work,
+ z_erofs_decompressqueue_kthread_work);
+#else
INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
+#endif
} else {
fg_out:
q = fgq;
- init_waitqueue_head(&fgq->u.wait);
+ init_completion(&fgq->u.done);
atomic_set(&fgq->pending_bios, 0);
+ q->eio = false;
+ q->sync = true;
}
q->sb = sb;
- q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
+ q->head = Z_EROFS_PCLUSTER_TAIL;
return q;
}
@@ -1220,314 +1632,312 @@ enum {
NR_JOBQUEUES,
};
-static void *jobqueueset_init(struct super_block *sb,
- struct z_erofs_decompressqueue *q[],
- struct z_erofs_decompressqueue *fgq, bool *fg)
+static void z_erofs_move_to_bypass_queue(struct z_erofs_pcluster *pcl,
+ struct z_erofs_pcluster *next,
+ struct z_erofs_pcluster **qtail[])
{
- /*
- * if managed cache is enabled, bypass jobqueue is needed,
- * no need to read from device for all pclusters in this queue.
- */
- q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
- q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg);
-
- return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg));
+ WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
+ WRITE_ONCE(*qtail[JQ_SUBMIT], next);
+ WRITE_ONCE(*qtail[JQ_BYPASS], pcl);
+ qtail[JQ_BYPASS] = &pcl->next;
}
-static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
- z_erofs_next_pcluster_t qtail[],
- z_erofs_next_pcluster_t owned_head)
+static void z_erofs_endio(struct bio *bio)
{
- z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
- z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
-
- DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
- if (owned_head == Z_EROFS_PCLUSTER_TAIL)
- owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
+ struct z_erofs_decompressqueue *q = bio->bi_private;
+ blk_status_t err = bio->bi_status;
+ struct folio_iter fi;
- WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
- WRITE_ONCE(*submit_qtail, owned_head);
- WRITE_ONCE(*bypass_qtail, &pcl->next);
+ DBG_BUGON(folio_test_uptodate(folio));
+ DBG_BUGON(z_erofs_page_is_invalidated(&folio->page));
+ if (!erofs_folio_is_managed(EROFS_SB(q->sb), folio))
+ continue;
- qtail[JQ_BYPASS] = &pcl->next;
+ if (!err)
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
+ }
+ if (err)
+ q->eio = true;
+ z_erofs_decompress_kickoff(q, -1);
+ if (bio->bi_bdev)
+ bio_put(bio);
}
-static void z_erofs_submit_queue(struct super_block *sb,
- struct z_erofs_decompress_frontend *f,
- struct page **pagepool,
+static void z_erofs_submit_queue(struct z_erofs_frontend *f,
struct z_erofs_decompressqueue *fgq,
- bool *force_fg)
+ bool *force_fg, bool readahead)
{
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
+ struct super_block *sb = f->inode->i_sb;
+ struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
+ struct z_erofs_pcluster **qtail[NR_JOBQUEUES];
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
- void *bi_private;
- z_erofs_next_pcluster_t owned_head = f->clt.owned_head;
+ struct z_erofs_pcluster *pcl, *next;
/* bio is NULL initially, so no need to initialize last_{index,bdev} */
- pgoff_t last_index;
- struct block_device *last_bdev;
+ erofs_off_t last_pa;
unsigned int nr_bios = 0;
struct bio *bio = NULL;
+ unsigned long pflags;
+ int memstall = 0;
+
+ /* No need to read from device for pclusters in the bypass queue. */
+ q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
+ q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
- bi_private = jobqueueset_init(sb, q, fgq, force_fg);
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
/* by default, all need io submission */
- q[JQ_SUBMIT]->head = owned_head;
+ q[JQ_SUBMIT]->head = next = f->head;
do {
struct erofs_map_dev mdev;
- struct z_erofs_pcluster *pcl;
- pgoff_t cur, end;
+ erofs_off_t cur, end;
+ struct bio_vec bvec;
unsigned int i = 0;
bool bypass = true;
- /* no possible 'owned_head' equals the following */
- DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
- DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
-
- pcl = container_of(owned_head, struct z_erofs_pcluster, next);
+ pcl = next;
+ next = READ_ONCE(pcl->next);
+ if (pcl->from_meta) {
+ z_erofs_move_to_bypass_queue(pcl, next, qtail);
+ continue;
+ }
/* no device id here, thus it will always succeed */
mdev = (struct erofs_map_dev) {
- .m_pa = blknr_to_addr(pcl->obj.index),
+ .m_pa = round_down(pcl->pos, sb->s_blocksize),
};
(void)erofs_map_dev(sb, &mdev);
- cur = erofs_blknr(mdev.m_pa);
- end = cur + pcl->pclusterpages;
-
- /* close the main owned chain at first */
- owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
- Z_EROFS_PCLUSTER_TAIL_CLOSED);
-
+ cur = mdev.m_pa;
+ end = round_up(cur + pcl->pageofs_in + pcl->pclustersize,
+ sb->s_blocksize);
do {
- struct page *page;
+ bvec.bv_page = NULL;
+ if (bio && (cur != last_pa ||
+ bio->bi_bdev != mdev.m_bdev)) {
+drain_io:
+ if (erofs_is_fileio_mode(EROFS_SB(sb)))
+ erofs_fileio_submit_bio(bio);
+ else if (erofs_is_fscache_mode(sb))
+ erofs_fscache_submit_bio(bio);
+ else
+ submit_bio(bio);
+
+ if (memstall) {
+ psi_memstall_leave(&pflags);
+ memstall = 0;
+ }
+ bio = NULL;
+ }
- page = pickup_page_for_submission(pcl, i++, pagepool,
- MNGD_MAPPING(sbi),
- GFP_NOFS);
- if (!page)
- continue;
+ if (!bvec.bv_page) {
+ z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
+ if (!bvec.bv_page)
+ continue;
+ if (cur + bvec.bv_len > end)
+ bvec.bv_len = end - cur;
+ DBG_BUGON(bvec.bv_len < sb->s_blocksize);
+ }
- if (bio && (cur != last_index + 1 ||
- last_bdev != mdev.m_bdev)) {
-submit_bio_retry:
- submit_bio(bio);
- bio = NULL;
+ if (unlikely(PageWorkingset(bvec.bv_page)) &&
+ !memstall) {
+ psi_memstall_enter(&pflags);
+ memstall = 1;
}
if (!bio) {
- bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
- bio->bi_end_io = z_erofs_decompressqueue_endio;
-
- bio_set_dev(bio, mdev.m_bdev);
- last_bdev = mdev.m_bdev;
- bio->bi_iter.bi_sector = (sector_t)cur <<
- LOG_SECTORS_PER_BLOCK;
- bio->bi_private = bi_private;
- bio->bi_opf = REQ_OP_READ;
- if (f->readahead)
+ if (erofs_is_fileio_mode(EROFS_SB(sb)))
+ bio = erofs_fileio_bio_alloc(&mdev);
+ else if (erofs_is_fscache_mode(sb))
+ bio = erofs_fscache_bio_alloc(&mdev);
+ else
+ bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
+ REQ_OP_READ, GFP_NOIO);
+ bio->bi_end_io = z_erofs_endio;
+ bio->bi_iter.bi_sector =
+ (mdev.m_dif->fsoff + cur) >> 9;
+ bio->bi_private = q[JQ_SUBMIT];
+ if (readahead)
bio->bi_opf |= REQ_RAHEAD;
++nr_bios;
}
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
- goto submit_bio_retry;
-
- last_index = cur;
+ if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
+ bvec.bv_offset))
+ goto drain_io;
+ last_pa = cur + bvec.bv_len;
bypass = false;
- } while (++cur < end);
+ } while ((cur += bvec.bv_len) < end);
if (!bypass)
qtail[JQ_SUBMIT] = &pcl->next;
else
- move_to_bypass_jobqueue(pcl, qtail, owned_head);
- } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
-
- if (bio)
- submit_bio(bio);
+ z_erofs_move_to_bypass_queue(pcl, next, qtail);
+ } while (next != Z_EROFS_PCLUSTER_TAIL);
+
+ if (bio) {
+ if (erofs_is_fileio_mode(EROFS_SB(sb)))
+ erofs_fileio_submit_bio(bio);
+ else if (erofs_is_fscache_mode(sb))
+ erofs_fscache_submit_bio(bio);
+ else
+ submit_bio(bio);
+ }
+ if (memstall)
+ psi_memstall_leave(&pflags);
/*
* although background is preferred, no one is pending for submission.
- * don't issue workqueue for decompression but drop it directly instead.
+ * don't issue decompression but drop it directly instead.
*/
if (!*force_fg && !nr_bios) {
kvfree(q[JQ_SUBMIT]);
return;
}
- z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
+ z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
}
-static void z_erofs_runqueue(struct super_block *sb,
- struct z_erofs_decompress_frontend *f,
- struct page **pagepool, bool force_fg)
+static int z_erofs_runqueue(struct z_erofs_frontend *f, unsigned int rapages)
{
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
+ struct erofs_sb_info *sbi = EROFS_I_SB(f->inode);
+ bool force_fg = z_erofs_is_sync_decompress(sbi, rapages);
+ int err;
- if (f->clt.owned_head == Z_EROFS_PCLUSTER_TAIL)
- return;
- z_erofs_submit_queue(sb, f, pagepool, io, &force_fg);
+ if (f->head == Z_EROFS_PCLUSTER_TAIL)
+ return 0;
+ z_erofs_submit_queue(f, io, &force_fg, !!rapages);
/* handle bypass queue (no i/o pclusters) immediately */
- z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
-
+ err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
if (!force_fg)
- return;
+ return err;
/* wait until all bios are completed */
- io_wait_event(io[JQ_SUBMIT].u.wait,
- !atomic_read(&io[JQ_SUBMIT].pending_bios));
+ wait_for_completion_io(&io[JQ_SUBMIT].u.done);
/* handle synchronous decompress queue in the caller context */
- z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
+ return z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool) ?: err;
}
/*
* Since partial uptodate is still unimplemented for now, we have to use
* approximate readmore strategies as a start.
*/
-static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
- struct readahead_control *rac,
- erofs_off_t end,
- struct page **pagepool,
- bool backmost)
+static void z_erofs_pcluster_readmore(struct z_erofs_frontend *f,
+ struct readahead_control *rac, bool backmost)
{
struct inode *inode = f->inode;
struct erofs_map_blocks *map = &f->map;
- erofs_off_t cur;
+ erofs_off_t cur, end, headoffset = f->headoffset;
int err;
if (backmost) {
+ if (rac)
+ end = headoffset + readahead_length(rac) - 1;
+ else
+ end = headoffset + PAGE_SIZE - 1;
map->m_la = end;
err = z_erofs_map_blocks_iter(inode, map,
EROFS_GET_BLOCKS_READMORE);
- if (err)
+ if (err || !(map->m_flags & EROFS_MAP_ENCODED))
return;
- /* expend ra for the trailing edge if readahead */
+ /* expand ra for the trailing edge if readahead */
if (rac) {
- loff_t newstart = readahead_pos(rac);
-
cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
- readahead_expand(rac, newstart, cur - newstart);
+ readahead_expand(rac, headoffset, cur - headoffset);
return;
}
end = round_up(end, PAGE_SIZE);
} else {
end = round_up(map->m_la, PAGE_SIZE);
-
- if (!map->m_llen)
+ if (!(map->m_flags & EROFS_MAP_ENCODED) || !map->m_llen)
return;
}
cur = map->m_la + map->m_llen - 1;
- while (cur >= end) {
+ while ((cur >= end) && (cur < i_size_read(inode))) {
pgoff_t index = cur >> PAGE_SHIFT;
- struct page *page;
-
- page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
- if (!page)
- goto skip;
-
- if (PageUptodate(page)) {
- unlock_page(page);
- put_page(page);
- goto skip;
+ struct folio *folio;
+
+ folio = erofs_grab_folio_nowait(inode->i_mapping, index);
+ if (!IS_ERR_OR_NULL(folio)) {
+ if (folio_test_uptodate(folio))
+ folio_unlock(folio);
+ else
+ z_erofs_scan_folio(f, folio, !!rac);
+ folio_put(folio);
}
- err = z_erofs_do_read_page(f, page, pagepool);
- if (err)
- erofs_err(inode->i_sb,
- "readmore error at page %lu @ nid %llu",
- index, EROFS_I(inode)->nid);
- put_page(page);
-skip:
if (cur < PAGE_SIZE)
break;
cur = (index << PAGE_SHIFT) - 1;
}
}
-static int z_erofs_readpage(struct file *file, struct page *page)
+static int z_erofs_read_folio(struct file *file, struct folio *folio)
{
- struct inode *const inode = page->mapping->host;
- struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
- struct page *pagepool = NULL;
+ struct inode *const inode = folio->mapping->host;
+ Z_EROFS_DEFINE_FRONTEND(f, inode, folio_pos(folio));
int err;
- trace_erofs_readpage(page, false);
- f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
-
- z_erofs_pcluster_readmore(&f, NULL, f.headoffset + PAGE_SIZE - 1,
- &pagepool, true);
- err = z_erofs_do_read_page(&f, page, &pagepool);
- z_erofs_pcluster_readmore(&f, NULL, 0, &pagepool, false);
-
- (void)z_erofs_collector_end(&f.clt);
-
- /* if some compressed cluster ready, need submit them anyway */
- z_erofs_runqueue(inode->i_sb, &f, &pagepool, true);
+ trace_erofs_read_folio(folio, false);
+ z_erofs_pcluster_readmore(&f, NULL, true);
+ err = z_erofs_scan_folio(&f, folio, false);
+ z_erofs_pcluster_readmore(&f, NULL, false);
+ z_erofs_pcluster_end(&f);
- if (err)
- erofs_err(inode->i_sb, "failed to read, err [%d]", err);
-
- if (f.map.mpage)
- put_page(f.map.mpage);
+ /* if some pclusters are ready, need submit them anyway */
+ err = z_erofs_runqueue(&f, 0) ?: err;
+ if (err && err != -EINTR)
+ erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu",
+ err, folio->index, EROFS_I(inode)->nid);
- erofs_release_pages(&pagepool);
+ erofs_put_metabuf(&f.map.buf);
+ erofs_release_pages(&f.pagepool);
return err;
}
static void z_erofs_readahead(struct readahead_control *rac)
{
struct inode *const inode = rac->mapping->host;
- struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
- struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
- struct page *pagepool = NULL, *head = NULL, *page;
- unsigned int nr_pages;
-
- f.readahead = true;
- f.headoffset = readahead_pos(rac);
-
- z_erofs_pcluster_readmore(&f, rac, f.headoffset +
- readahead_length(rac) - 1, &pagepool, true);
- nr_pages = readahead_count(rac);
- trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
-
- while ((page = readahead_page(rac))) {
- set_page_private(page, (unsigned long)head);
- head = page;
+ Z_EROFS_DEFINE_FRONTEND(f, inode, readahead_pos(rac));
+ unsigned int nrpages = readahead_count(rac);
+ struct folio *head = NULL, *folio;
+ int err;
+
+ trace_erofs_readahead(inode, readahead_index(rac), nrpages, false);
+ z_erofs_pcluster_readmore(&f, rac, true);
+ while ((folio = readahead_folio(rac))) {
+ folio->private = head;
+ head = folio;
}
+ /* traverse in reverse order for best metadata I/O performance */
while (head) {
- struct page *page = head;
- int err;
-
- /* traversal in reverse order */
- head = (void *)page_private(page);
-
- err = z_erofs_do_read_page(&f, page, &pagepool);
- if (err)
- erofs_err(inode->i_sb,
- "readahead error at page %lu @ nid %llu",
- page->index, EROFS_I(inode)->nid);
- put_page(page);
+ folio = head;
+ head = folio_get_private(folio);
+
+ err = z_erofs_scan_folio(&f, folio, true);
+ if (err && err != -EINTR)
+ erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
+ folio->index, EROFS_I(inode)->nid);
}
- z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false);
- (void)z_erofs_collector_end(&f.clt);
-
- z_erofs_runqueue(inode->i_sb, &f, &pagepool,
- sbi->opt.readahead_sync_decompress &&
- nr_pages <= sbi->opt.max_sync_decompress_pages);
- if (f.map.mpage)
- put_page(f.map.mpage);
- erofs_release_pages(&pagepool);
+ z_erofs_pcluster_readmore(&f, rac, false);
+ z_erofs_pcluster_end(&f);
+
+ (void)z_erofs_runqueue(&f, nrpages);
+ erofs_put_metabuf(&f.map.buf);
+ erofs_release_pages(&f.pagepool);
}
const struct address_space_operations z_erofs_aops = {
- .readpage = z_erofs_readpage,
+ .read_folio = z_erofs_read_folio,
.readahead = z_erofs_readahead,
};
diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
deleted file mode 100644
index 879df5362777..000000000000
--- a/fs/erofs/zdata.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2018 HUAWEI, Inc.
- * https://www.huawei.com/
- */
-#ifndef __EROFS_FS_ZDATA_H
-#define __EROFS_FS_ZDATA_H
-
-#include "internal.h"
-#include "zpvec.h"
-
-#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
-#define Z_EROFS_NR_INLINE_PAGEVECS 3
-
-/*
- * Structure fields follow one of the following exclusion rules.
- *
- * I: Modifiable by initialization/destruction paths and read-only
- * for everyone else;
- *
- * L: Field should be protected by pageset lock;
- *
- * A: Field should be accessed / updated in atomic for parallelized code.
- */
-struct z_erofs_collection {
- struct mutex lock;
-
- /* I: page offset of start position of decompression */
- unsigned short pageofs;
-
- /* L: maximum relative page index in pagevec[] */
- unsigned short nr_pages;
-
- /* L: total number of pages in pagevec[] */
- unsigned int vcnt;
-
- union {
- /* L: inline a certain number of pagevecs for bootstrap */
- erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
-
- /* I: can be used to free the pcluster by RCU. */
- struct rcu_head rcu;
- };
-};
-
-#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
-#define Z_EROFS_PCLUSTER_LENGTH_BIT 1
-
-/*
- * let's leave a type here in case of introducing
- * another tagged pointer later.
- */
-typedef void *z_erofs_next_pcluster_t;
-
-struct z_erofs_pcluster {
- struct erofs_workgroup obj;
- struct z_erofs_collection primary_collection;
-
- /* A: point to next chained pcluster or TAILs */
- z_erofs_next_pcluster_t next;
-
- /* A: lower limit of decompressed length and if full length or not */
- unsigned int length;
-
- /* I: physical cluster size in pages */
- unsigned short pclusterpages;
-
- /* I: compression algorithm format */
- unsigned char algorithmformat;
-
- /* A: compressed pages (can be cached or inplaced pages) */
- struct page *compressed_pages[];
-};
-
-#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
-
-/* let's avoid the valid 32-bit kernel addresses */
-
-/* the chained workgroup has't submitted io (still open) */
-#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
-/* the chained workgroup has already submitted io */
-#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
-
-#define Z_EROFS_PCLUSTER_NIL (NULL)
-
-struct z_erofs_decompressqueue {
- struct super_block *sb;
- atomic_t pending_bios;
- z_erofs_next_pcluster_t head;
-
- union {
- wait_queue_head_t wait;
- struct work_struct work;
- } u;
-};
-
-#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
-#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
-#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
-
-/*
- * waiters (aka. ongoing_packs): # to unlock the page
- * sub-index: 0 - for partial page, >= 1 full page sub-index
- */
-typedef atomic_t z_erofs_onlinepage_t;
-
-/* type punning */
-union z_erofs_onlinepage_converter {
- z_erofs_onlinepage_t *o;
- unsigned long *v;
-};
-
-static inline unsigned int z_erofs_onlinepage_index(struct page *page)
-{
- union z_erofs_onlinepage_converter u;
-
- DBG_BUGON(!PagePrivate(page));
- u.v = &page_private(page);
-
- return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
-}
-
-static inline void z_erofs_onlinepage_init(struct page *page)
-{
- union {
- z_erofs_onlinepage_t o;
- unsigned long v;
- /* keep from being unlocked in advance */
- } u = { .o = ATOMIC_INIT(1) };
-
- set_page_private(page, u.v);
- smp_wmb();
- SetPagePrivate(page);
-}
-
-static inline void z_erofs_onlinepage_fixup(struct page *page,
- uintptr_t index, bool down)
-{
- union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
- int orig, orig_index, val;
-
-repeat:
- orig = atomic_read(u.o);
- orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
- if (orig_index) {
- if (!index)
- return;
-
- DBG_BUGON(orig_index != index);
- }
-
- val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
- ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
- if (atomic_cmpxchg(u.o, orig, val) != orig)
- goto repeat;
-}
-
-static inline void z_erofs_onlinepage_endio(struct page *page)
-{
- union z_erofs_onlinepage_converter u;
- unsigned int v;
-
- DBG_BUGON(!PagePrivate(page));
- u.v = &page_private(page);
-
- v = atomic_dec_return(u.o);
- if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
- set_page_private(page, 0);
- ClearPagePrivate(page);
- if (!PageError(page))
- SetPageUptodate(page);
- unlock_page(page);
- }
- erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
-}
-
-#define Z_EROFS_VMAP_ONSTACK_PAGES \
- min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
-#define Z_EROFS_VMAP_GLOBAL_PAGES 2048
-
-#endif
-
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 660489a7fb64..c8d8e129eb4b 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -4,285 +4,172 @@
* https://www.huawei.com/
*/
#include "internal.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <trace/events/erofs.h>
-int z_erofs_fill_inode(struct inode *inode)
-{
- struct erofs_inode *const vi = EROFS_I(inode);
- struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
-
- if (!erofs_sb_has_big_pcluster(sbi) &&
- vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
- vi->z_advise = 0;
- vi->z_algorithmtype[0] = 0;
- vi->z_algorithmtype[1] = 0;
- vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
- set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
- }
- inode->i_mapping->a_ops = &z_erofs_aops;
- return 0;
-}
-
-static int z_erofs_fill_inode_lazy(struct inode *inode)
-{
- struct erofs_inode *const vi = EROFS_I(inode);
- struct super_block *const sb = inode->i_sb;
- int err, headnr;
- erofs_off_t pos;
- struct page *page;
- void *kaddr;
- struct z_erofs_map_header *h;
-
- if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
- /*
- * paired with smp_mb() at the end of the function to ensure
- * fields will only be observed after the bit is set.
- */
- smp_mb();
- return 0;
- }
-
- if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
- return -ERESTARTSYS;
-
- err = 0;
- if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
- goto out_unlock;
-
- DBG_BUGON(!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
- vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY);
-
- pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
- vi->xattr_isize, 8);
- page = erofs_get_meta_page(sb, erofs_blknr(pos));
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto out_unlock;
- }
-
- kaddr = kmap_atomic(page);
-
- h = kaddr + erofs_blkoff(pos);
- vi->z_advise = le16_to_cpu(h->h_advise);
- vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
- vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
-
- headnr = 0;
- if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
- vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
- erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
- headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
- err = -EOPNOTSUPP;
- goto unmap_done;
- }
-
- vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
- if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
- vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
- Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
- erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
- vi->nid);
- err = -EFSCORRUPTED;
- goto unmap_done;
- }
- if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
- !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
- !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
- erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
- vi->nid);
- err = -EFSCORRUPTED;
- goto unmap_done;
- }
- /* paired with smp_mb() at the beginning of the function */
- smp_mb();
- set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
-unmap_done:
- kunmap_atomic(kaddr);
- unlock_page(page);
- put_page(page);
-out_unlock:
- clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
- return err;
-}
-
struct z_erofs_maprecorder {
struct inode *inode;
struct erofs_map_blocks *map;
- void *kaddr;
-
unsigned long lcn;
/* compression extent information gathered */
u8 type, headtype;
u16 clusterofs;
u16 delta[2];
- erofs_blk_t pblk, compressedlcs;
+ erofs_blk_t pblk, compressedblks;
+ erofs_off_t nextpackoff;
+ bool partialref, in_mbox;
};
-static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
- erofs_blk_t eblk)
-{
- struct super_block *const sb = m->inode->i_sb;
- struct erofs_map_blocks *const map = m->map;
- struct page *mpage = map->mpage;
-
- if (mpage) {
- if (mpage->index == eblk) {
- if (!m->kaddr)
- m->kaddr = kmap_atomic(mpage);
- return 0;
- }
-
- if (m->kaddr) {
- kunmap_atomic(m->kaddr);
- m->kaddr = NULL;
- }
- put_page(mpage);
- }
-
- mpage = erofs_get_meta_page(sb, eblk);
- if (IS_ERR(mpage)) {
- map->mpage = NULL;
- return PTR_ERR(mpage);
- }
- m->kaddr = kmap_atomic(mpage);
- unlock_page(mpage);
- map->mpage = mpage;
- return 0;
-}
-
-static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned long lcn)
+static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
+ unsigned long lcn)
{
struct inode *const inode = m->inode;
struct erofs_inode *const vi = EROFS_I(inode);
- const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid);
- const erofs_off_t pos =
- Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
- vi->xattr_isize) +
- lcn * sizeof(struct z_erofs_vle_decompressed_index);
- struct z_erofs_vle_decompressed_index *di;
- unsigned int advise, type;
- int err;
-
- err = z_erofs_reload_indexes(m, erofs_blknr(pos));
- if (err)
- return err;
-
+ const erofs_off_t pos = Z_EROFS_FULL_INDEX_START(erofs_iloc(inode) +
+ vi->inode_isize + vi->xattr_isize) +
+ lcn * sizeof(struct z_erofs_lcluster_index);
+ struct z_erofs_lcluster_index *di;
+ unsigned int advise;
+
+ di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox);
+ if (IS_ERR(di))
+ return PTR_ERR(di);
m->lcn = lcn;
- di = m->kaddr + erofs_blkoff(pos);
+ m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
advise = le16_to_cpu(di->di_advise);
- type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
- ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
- switch (type) {
- case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
- m->clusterofs = 1 << vi->z_logical_clusterbits;
+ m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK;
+ if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
+ m->clusterofs = 1 << vi->z_lclusterbits;
m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
- if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) {
+ if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
- m->compressedlcs = m->delta[0] &
- ~Z_EROFS_VLE_DI_D0_CBLKCNT;
+ m->compressedblks = m->delta[0] & ~Z_EROFS_LI_D0_CBLKCNT;
m->delta[0] = 1;
}
m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
- break;
- case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
+ } else {
+ m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF);
m->clusterofs = le16_to_cpu(di->di_clusterofs);
m->pblk = le32_to_cpu(di->di_u.blkaddr);
- break;
- default:
- DBG_BUGON(1);
- return -EOPNOTSUPP;
}
- m->type = type;
return 0;
}
static unsigned int decode_compactedbits(unsigned int lobits,
- unsigned int lomask,
u8 *in, unsigned int pos, u8 *type)
{
const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
- const unsigned int lo = v & lomask;
+ const unsigned int lo = v & ((1 << lobits) - 1);
*type = (v >> lobits) & 3;
return lo;
}
-static int get_compacted_la_distance(unsigned int lclusterbits,
+static int get_compacted_la_distance(unsigned int lobits,
unsigned int encodebits,
unsigned int vcnt, u8 *in, int i)
{
- const unsigned int lomask = (1 << lclusterbits) - 1;
unsigned int lo, d1 = 0;
u8 type;
DBG_BUGON(i >= vcnt);
do {
- lo = decode_compactedbits(lclusterbits, lomask,
- in, encodebits * i, &type);
+ lo = decode_compactedbits(lobits, in, encodebits * i, &type);
- if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
+ if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
return d1;
++d1;
} while (++i < vcnt);
- /* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */
- if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT))
+ /* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
+ if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
d1 += lo - 1;
return d1;
}
-static int unpack_compacted_index(struct z_erofs_maprecorder *m,
- unsigned int amortizedshift,
- unsigned int eofs, bool lookahead)
+static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
+ unsigned long lcn, bool lookahead)
{
- struct erofs_inode *const vi = EROFS_I(m->inode);
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
- const unsigned int lomask = (1 << lclusterbits) - 1;
- unsigned int vcnt, base, lo, encodebits, nblk;
- int i;
+ struct inode *const inode = m->inode;
+ struct erofs_inode *const vi = EROFS_I(inode);
+ const erofs_off_t ebase = Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) +
+ vi->inode_isize + vi->xattr_isize);
+ const unsigned int lclusterbits = vi->z_lclusterbits;
+ const unsigned int totalidx = erofs_iblks(inode);
+ unsigned int compacted_4b_initial, compacted_2b, amortizedshift;
+ unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
+ bool big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
+ erofs_off_t pos;
u8 *in, type;
- bool big_pcluster;
+ int i;
+
+ if (lcn >= totalidx || lclusterbits > 14)
+ return -EINVAL;
- if (1 << amortizedshift == 4)
+ m->lcn = lcn;
+ /* used to align to 32-byte (compacted_2b) alignment */
+ compacted_4b_initial = ((32 - ebase % 32) / 4) & 7;
+ compacted_2b = 0;
+ if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
+ compacted_4b_initial < totalidx)
+ compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
+
+ pos = ebase;
+ amortizedshift = 2; /* compact_4b */
+ if (lcn >= compacted_4b_initial) {
+ pos += compacted_4b_initial * 4;
+ lcn -= compacted_4b_initial;
+ if (lcn < compacted_2b) {
+ amortizedshift = 1;
+ } else {
+ pos += compacted_2b * 2;
+ lcn -= compacted_2b;
+ }
+ }
+ pos += lcn * (1 << amortizedshift);
+
+ /* figure out the lcluster count in this pack */
+ if (1 << amortizedshift == 4 && lclusterbits <= 14)
vcnt = 2;
- else if (1 << amortizedshift == 2 && lclusterbits == 12)
+ else if (1 << amortizedshift == 2 && lclusterbits <= 12)
vcnt = 16;
else
return -EOPNOTSUPP;
- big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
- encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
- base = round_down(eofs, vcnt << amortizedshift);
- in = m->kaddr + base;
+ in = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox);
+ if (IS_ERR(in))
+ return PTR_ERR(in);
- i = (eofs - base) >> amortizedshift;
+ /* it doesn't equal to round_up(..) */
+ m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
+ (vcnt << amortizedshift);
+ lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
+ encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
+ bytes = pos & ((vcnt << amortizedshift) - 1);
+ in -= bytes;
+ i = bytes >> amortizedshift;
- lo = decode_compactedbits(lclusterbits, lomask,
- in, encodebits * i, &type);
+ lo = decode_compactedbits(lobits, in, encodebits * i, &type);
m->type = type;
- if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
+ if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
m->clusterofs = 1 << lclusterbits;
/* figure out lookahead_distance: delta[1] if needed */
if (lookahead)
- m->delta[1] = get_compacted_la_distance(lclusterbits,
+ m->delta[1] = get_compacted_la_distance(lobits,
encodebits, vcnt, in, i);
- if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
+ if (lo & Z_EROFS_LI_D0_CBLKCNT) {
if (!big_pcluster) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
- m->compressedlcs = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
+ m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
m->delta[0] = 1;
return 0;
} else if (i + 1 != (int)vcnt) {
@@ -294,11 +181,11 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
* of which lo saves delta[1] rather than delta[0].
* Hence, get delta[0] by the previous lcluster indirectly.
*/
- lo = decode_compactedbits(lclusterbits, lomask,
- in, encodebits * (i - 1), &type);
- if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
+ lo = decode_compactedbits(lobits, in,
+ encodebits * (i - 1), &type);
+ if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
lo = 0;
- else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT)
+ else if (lo & Z_EROFS_LI_D0_CBLKCNT)
lo = 1;
m->delta[0] = lo + 1;
return 0;
@@ -310,9 +197,9 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
nblk = 1;
while (i > 0) {
--i;
- lo = decode_compactedbits(lclusterbits, lomask,
- in, encodebits * i, &type);
- if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
+ lo = decode_compactedbits(lobits, in,
+ encodebits * i, &type);
+ if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
i -= lo;
if (i >= 0)
@@ -322,12 +209,12 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
nblk = 0;
while (i > 0) {
--i;
- lo = decode_compactedbits(lclusterbits, lomask,
- in, encodebits * i, &type);
- if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
- if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
+ lo = decode_compactedbits(lobits, in,
+ encodebits * i, &type);
+ if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
+ if (lo & Z_EROFS_LI_D0_CBLKCNT) {
--i;
- nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
+ nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
continue;
}
/* bigpcluster shouldn't have plain d0 == 1 */
@@ -346,198 +233,118 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
return 0;
}
-static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned long lcn, bool lookahead)
+static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned int lcn, bool lookahead)
{
- struct inode *const inode = m->inode;
- struct erofs_inode *const vi = EROFS_I(inode);
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
- const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) +
- vi->inode_isize + vi->xattr_isize, 8) +
- sizeof(struct z_erofs_map_header);
- const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
- unsigned int compacted_4b_initial, compacted_2b;
- unsigned int amortizedshift;
- erofs_off_t pos;
+ struct erofs_inode *vi = EROFS_I(m->inode);
int err;
- if (lclusterbits != 12)
- return -EOPNOTSUPP;
-
- if (lcn >= totalidx)
- return -EINVAL;
-
- m->lcn = lcn;
- /* used to align to 32-byte (compacted_2b) alignment */
- compacted_4b_initial = (32 - ebase % 32) / 4;
- if (compacted_4b_initial == 32 / 4)
- compacted_4b_initial = 0;
-
- if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
- compacted_4b_initial < totalidx)
- compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
- else
- compacted_2b = 0;
-
- pos = ebase;
- if (lcn < compacted_4b_initial) {
- amortizedshift = 2;
- goto out;
- }
- pos += compacted_4b_initial * 4;
- lcn -= compacted_4b_initial;
-
- if (lcn < compacted_2b) {
- amortizedshift = 1;
- goto out;
+ if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT) {
+ err = z_erofs_load_compact_lcluster(m, lcn, lookahead);
+ } else {
+ DBG_BUGON(vi->datalayout != EROFS_INODE_COMPRESSED_FULL);
+ err = z_erofs_load_full_lcluster(m, lcn);
}
- pos += compacted_2b * 2;
- lcn -= compacted_2b;
- amortizedshift = 2;
-out:
- pos += lcn * (1 << amortizedshift);
- err = z_erofs_reload_indexes(m, erofs_blknr(pos));
if (err)
return err;
- return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos),
- lookahead);
-}
-
-static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned int lcn, bool lookahead)
-{
- const unsigned int datamode = EROFS_I(m->inode)->datalayout;
- if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
- return legacy_load_cluster_from_disk(m, lcn);
-
- if (datamode == EROFS_INODE_FLAT_COMPRESSION)
- return compacted_load_cluster_from_disk(m, lcn, lookahead);
-
- return -EINVAL;
+ if (m->type >= Z_EROFS_LCLUSTER_TYPE_MAX) {
+ erofs_err(m->inode->i_sb, "unknown type %u @ lcn %u of nid %llu",
+ m->type, lcn, EROFS_I(m->inode)->nid);
+ DBG_BUGON(1);
+ return -EOPNOTSUPP;
+ } else if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD &&
+ m->clusterofs >= (1 << vi->z_lclusterbits)) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+ return 0;
}
static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
unsigned int lookback_distance)
{
+ struct super_block *sb = m->inode->i_sb;
struct erofs_inode *const vi = EROFS_I(m->inode);
- struct erofs_map_blocks *const map = m->map;
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
- unsigned long lcn = m->lcn;
- int err;
+ const unsigned int lclusterbits = vi->z_lclusterbits;
- if (lcn < lookback_distance) {
- erofs_err(m->inode->i_sb,
- "bogus lookback distance @ nid %llu", vi->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
+ while (m->lcn >= lookback_distance) {
+ unsigned long lcn = m->lcn - lookback_distance;
+ int err;
- /* load extent head logical cluster if needed */
- lcn -= lookback_distance;
- err = z_erofs_load_cluster_from_disk(m, lcn, false);
- if (err)
- return err;
+ if (!lookback_distance)
+ break;
- switch (m->type) {
- case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
- if (!m->delta[0]) {
- erofs_err(m->inode->i_sb,
- "invalid lookback distance 0 @ nid %llu",
- vi->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
+ err = z_erofs_load_lcluster_from_disk(m, lcn, false);
+ if (err)
+ return err;
+ if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
+ lookback_distance = m->delta[0];
+ continue;
}
- return z_erofs_extent_lookback(m, m->delta[0]);
- case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
m->headtype = m->type;
- map->m_la = (lcn << lclusterbits) | m->clusterofs;
- break;
- default:
- erofs_err(m->inode->i_sb,
- "unknown type %u @ lcn %lu of nid %llu",
- m->type, lcn, vi->nid);
- DBG_BUGON(1);
- return -EOPNOTSUPP;
+ m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
+ return 0;
}
- return 0;
+ erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
+ lookback_distance, m->lcn, vi->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
}
static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
unsigned int initial_lcn)
{
- struct erofs_inode *const vi = EROFS_I(m->inode);
- struct erofs_map_blocks *const map = m->map;
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
- unsigned long lcn;
+ struct inode *inode = m->inode;
+ struct super_block *sb = inode->i_sb;
+ struct erofs_inode *vi = EROFS_I(inode);
+ bool bigpcl1 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
+ bool bigpcl2 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2;
+ unsigned long lcn = m->lcn + 1;
int err;
- DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN &&
- m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 &&
- m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD2);
+ DBG_BUGON(m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
DBG_BUGON(m->type != m->headtype);
- if (m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
- ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1) &&
- !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
- ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) &&
- !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
- map->m_plen = 1 << lclusterbits;
- return 0;
- }
- lcn = m->lcn + 1;
- if (m->compressedlcs)
+ if ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1 && !bigpcl1) ||
+ ((m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
+ m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) && !bigpcl2) ||
+ (lcn << vi->z_lclusterbits) >= inode->i_size)
+ m->compressedblks = 1;
+
+ if (m->compressedblks)
goto out;
- err = z_erofs_load_cluster_from_disk(m, lcn, false);
+ err = z_erofs_load_lcluster_from_disk(m, lcn, false);
if (err)
return err;
/*
* If the 1st NONHEAD lcluster has already been handled initially w/o
- * valid compressedlcs, which means at least it mustn't be CBLKCNT, or
+ * valid compressedblks, which means at least it mustn't be CBLKCNT, or
* an internal implemenatation error is detected.
*
* The following code can also handle it properly anyway, but let's
* BUG_ON in the debugging mode only for developers to notice that.
*/
DBG_BUGON(lcn == initial_lcn &&
- m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD);
+ m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
- switch (m->type) {
- case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
- /*
- * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
- * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
- */
- m->compressedlcs = 1;
- break;
- case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
- if (m->delta[0] != 1)
- goto err_bonus_cblkcnt;
- if (m->compressedlcs)
- break;
- fallthrough;
- default:
- erofs_err(m->inode->i_sb,
- "cannot found CBLKCNT @ lcn %lu of nid %llu",
- lcn, vi->nid);
+ if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD && m->delta[0] != 1) {
+ erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
+
+ /*
+ * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type rather
+ * than CBLKCNT, it's a 1 block-sized pcluster.
+ */
+ if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD || !m->compressedblks)
+ m->compressedblks = 1;
out:
- map->m_plen = m->compressedlcs << lclusterbits;
+ m->map->m_plen = erofs_pos(sb, m->compressedblks);
return 0;
-err_bonus_cblkcnt:
- erofs_err(m->inode->i_sb,
- "bogus CBLKCNT @ lcn %lu of nid %llu",
- lcn, vi->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
}
static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
@@ -545,154 +352,416 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
struct inode *inode = m->inode;
struct erofs_inode *vi = EROFS_I(inode);
struct erofs_map_blocks *map = m->map;
- unsigned int lclusterbits = vi->z_logical_clusterbits;
+ unsigned int lclusterbits = vi->z_lclusterbits;
u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
int err;
- do {
+ while (1) {
/* handle the last EOF pcluster (no next HEAD lcluster) */
if ((lcn << lclusterbits) >= inode->i_size) {
map->m_llen = inode->i_size - map->m_la;
return 0;
}
- err = z_erofs_load_cluster_from_disk(m, lcn, true);
+ err = z_erofs_load_lcluster_from_disk(m, lcn, true);
if (err)
return err;
- if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
- DBG_BUGON(!m->delta[1] &&
- m->clusterofs != 1 << lclusterbits);
- } else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
- m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 ||
- m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) {
- /* go on until the next HEAD lcluster */
+ if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
+ /* work around invalid d1 generated by pre-1.0 mkfs */
+ if (unlikely(!m->delta[1])) {
+ m->delta[1] = 1;
+ DBG_BUGON(1);
+ }
+ } else if (m->type < Z_EROFS_LCLUSTER_TYPE_MAX) {
if (lcn != headlcn)
- break;
+ break; /* ends at the next HEAD lcluster */
m->delta[1] = 1;
- } else {
- erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
- m->type, lcn, vi->nid);
- DBG_BUGON(1);
- return -EOPNOTSUPP;
}
lcn += m->delta[1];
- } while (m->delta[1]);
-
+ }
map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
return 0;
}
-int z_erofs_map_blocks_iter(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags)
+static int z_erofs_map_blocks_fo(struct inode *inode,
+ struct erofs_map_blocks *map, int flags)
{
- struct erofs_inode *const vi = EROFS_I(inode);
+ struct erofs_inode *vi = EROFS_I(inode);
+ struct super_block *sb = inode->i_sb;
+ bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
+ bool ztailpacking = vi->z_idata_size;
+ unsigned int lclusterbits = vi->z_lclusterbits;
struct z_erofs_maprecorder m = {
.inode = inode,
.map = map,
+ .in_mbox = erofs_inode_in_metabox(inode),
};
- int err = 0;
- unsigned int lclusterbits, endoff;
+ unsigned int endoff;
unsigned long initial_lcn;
unsigned long long ofs, end;
+ int err;
- trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
-
- /* when trying to read beyond EOF, leave it unmapped */
- if (map->m_la >= inode->i_size) {
- map->m_llen = map->m_la + 1 - inode->i_size;
- map->m_la = inode->i_size;
- map->m_flags = 0;
- goto out;
+ ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
+ if (fragment && !(flags & EROFS_GET_BLOCKS_FINDTAIL) &&
+ !vi->z_tailextent_headlcn) {
+ map->m_la = 0;
+ map->m_llen = inode->i_size;
+ map->m_flags = EROFS_MAP_FRAGMENT;
+ return 0;
}
-
- err = z_erofs_fill_inode_lazy(inode);
- if (err)
- goto out;
-
- lclusterbits = vi->z_logical_clusterbits;
- ofs = map->m_la;
initial_lcn = ofs >> lclusterbits;
endoff = ofs & ((1 << lclusterbits) - 1);
- err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false);
+ err = z_erofs_load_lcluster_from_disk(&m, initial_lcn, false);
if (err)
goto unmap_out;
+ if ((flags & EROFS_GET_BLOCKS_FINDTAIL) && ztailpacking)
+ vi->z_fragmentoff = m.nextpackoff;
map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
end = (m.lcn + 1ULL) << lclusterbits;
- switch (m.type) {
- case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
- if (endoff >= m.clusterofs) {
- m.headtype = m.type;
- map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
- break;
- }
- /* m.lcn should be >= 1 if endoff < m.clusterofs */
- if (!m.lcn) {
- erofs_err(inode->i_sb,
- "invalid logical cluster 0 at nid %llu",
- vi->nid);
- err = -EFSCORRUPTED;
- goto unmap_out;
+ if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD && endoff >= m.clusterofs) {
+ m.headtype = m.type;
+ map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
+ /*
+ * For ztailpacking files, in order to inline data more
+ * effectively, special EOF lclusters are now supported
+ * which can have three parts at most.
+ */
+ if (ztailpacking && end > inode->i_size)
+ end = inode->i_size;
+ } else {
+ if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
+ end = (m.lcn << lclusterbits) | m.clusterofs;
+ map->m_flags |= EROFS_MAP_FULL_MAPPED;
+ m.delta[0] = 1;
}
- end = (m.lcn << lclusterbits) | m.clusterofs;
- map->m_flags |= EROFS_MAP_FULL_MAPPED;
- m.delta[0] = 1;
- fallthrough;
- case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
/* get the corresponding first chunk */
err = z_erofs_extent_lookback(&m, m.delta[0]);
if (err)
goto unmap_out;
- break;
- default:
- erofs_err(inode->i_sb,
- "unknown type %u @ offset %llu of nid %llu",
- m.type, ofs, vi->nid);
- err = -EOPNOTSUPP;
- goto unmap_out;
}
-
+ if (m.partialref)
+ map->m_flags |= EROFS_MAP_PARTIAL_REF;
map->m_llen = end - map->m_la;
- map->m_pa = blknr_to_addr(m.pblk);
- err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
- if (err)
- goto out;
+ if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
+ vi->z_tailextent_headlcn = m.lcn;
+ /* for non-compact indexes, fragmentoff is 64 bits */
+ if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
+ vi->z_fragmentoff |= (u64)m.pblk << 32;
+ }
+ if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
+ map->m_flags |= EROFS_MAP_META;
+ map->m_pa = vi->z_fragmentoff;
+ map->m_plen = vi->z_idata_size;
+ if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
+ erofs_err(sb, "ztailpacking inline data across blocks @ nid %llu",
+ vi->nid);
+ err = -EFSCORRUPTED;
+ goto unmap_out;
+ }
+ } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
+ map->m_flags = EROFS_MAP_FRAGMENT;
+ } else {
+ map->m_pa = erofs_pos(sb, m.pblk);
+ err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
+ if (err)
+ goto unmap_out;
+ }
- if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN)
- map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
- else if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2)
+ if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
+ if (map->m_llen > map->m_plen) {
+ DBG_BUGON(1);
+ err = -EFSCORRUPTED;
+ goto unmap_out;
+ }
+ if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
+ map->m_algorithmformat = Z_EROFS_COMPRESSION_INTERLACED;
+ else
+ map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
+ } else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
map->m_algorithmformat = vi->z_algorithmtype[1];
- else
+ } else {
map->m_algorithmformat = vi->z_algorithmtype[0];
+ }
if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
((flags & EROFS_GET_BLOCKS_READMORE) &&
- map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA &&
- map->m_llen >= EROFS_BLKSIZ)) {
+ (map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA ||
+ map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE ||
+ map->m_algorithmformat == Z_EROFS_COMPRESSION_ZSTD) &&
+ map->m_llen >= i_blocksize(inode))) {
err = z_erofs_get_extent_decompressedlen(&m);
if (!err)
map->m_flags |= EROFS_MAP_FULL_MAPPED;
}
+
unmap_out:
- if (m.kaddr)
- kunmap_atomic(m.kaddr);
+ erofs_unmap_metabuf(&m.map->buf);
+ return err;
+}
-out:
- erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
- __func__, map->m_la, map->m_pa,
- map->m_llen, map->m_plen, map->m_flags);
+static int z_erofs_map_blocks_ext(struct inode *inode,
+ struct erofs_map_blocks *map, int flags)
+{
+ struct erofs_inode *vi = EROFS_I(inode);
+ struct super_block *sb = inode->i_sb;
+ bool interlaced = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER;
+ unsigned int recsz = z_erofs_extent_recsize(vi->z_advise);
+ erofs_off_t pos = round_up(Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) +
+ vi->inode_isize + vi->xattr_isize), recsz);
+ bool in_mbox = erofs_inode_in_metabox(inode);
+ erofs_off_t lend = inode->i_size;
+ erofs_off_t l, r, mid, pa, la, lstart;
+ struct z_erofs_extent *ext;
+ unsigned int fmt;
+ bool last;
+
+ map->m_flags = 0;
+ if (recsz <= offsetof(struct z_erofs_extent, pstart_hi)) {
+ if (recsz <= offsetof(struct z_erofs_extent, pstart_lo)) {
+ ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox);
+ if (IS_ERR(ext))
+ return PTR_ERR(ext);
+ pa = le64_to_cpu(*(__le64 *)ext);
+ pos += sizeof(__le64);
+ lstart = 0;
+ } else {
+ lstart = round_down(map->m_la, 1 << vi->z_lclusterbits);
+ pos += (lstart >> vi->z_lclusterbits) * recsz;
+ pa = EROFS_NULL_ADDR;
+ }
+
+ for (; lstart <= map->m_la; lstart += 1 << vi->z_lclusterbits) {
+ ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox);
+ if (IS_ERR(ext))
+ return PTR_ERR(ext);
+ map->m_plen = le32_to_cpu(ext->plen);
+ if (pa != EROFS_NULL_ADDR) {
+ map->m_pa = pa;
+ pa += map->m_plen & Z_EROFS_EXTENT_PLEN_MASK;
+ } else {
+ map->m_pa = le32_to_cpu(ext->pstart_lo);
+ }
+ pos += recsz;
+ }
+ last = (lstart >= round_up(lend, 1 << vi->z_lclusterbits));
+ lend = min(lstart, lend);
+ lstart -= 1 << vi->z_lclusterbits;
+ } else {
+ lstart = lend;
+ for (l = 0, r = vi->z_extents; l < r; ) {
+ mid = l + (r - l) / 2;
+ ext = erofs_read_metabuf(&map->buf, sb,
+ pos + mid * recsz, in_mbox);
+ if (IS_ERR(ext))
+ return PTR_ERR(ext);
+
+ la = le32_to_cpu(ext->lstart_lo);
+ pa = le32_to_cpu(ext->pstart_lo) |
+ (u64)le32_to_cpu(ext->pstart_hi) << 32;
+ if (recsz > offsetof(struct z_erofs_extent, lstart_hi))
+ la |= (u64)le32_to_cpu(ext->lstart_hi) << 32;
+
+ if (la > map->m_la) {
+ r = mid;
+ if (la > lend) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+ lend = la;
+ } else {
+ l = mid + 1;
+ if (map->m_la == la)
+ r = min(l + 1, r);
+ lstart = la;
+ map->m_plen = le32_to_cpu(ext->plen);
+ map->m_pa = pa;
+ }
+ }
+ last = (l >= vi->z_extents);
+ }
+
+ if (lstart < lend) {
+ map->m_la = lstart;
+ if (last && (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
+ map->m_flags = EROFS_MAP_FRAGMENT;
+ vi->z_fragmentoff = map->m_plen;
+ if (recsz > offsetof(struct z_erofs_extent, pstart_lo))
+ vi->z_fragmentoff |= map->m_pa << 32;
+ } else if (map->m_plen & Z_EROFS_EXTENT_PLEN_MASK) {
+ map->m_flags |= EROFS_MAP_MAPPED |
+ EROFS_MAP_FULL_MAPPED | EROFS_MAP_ENCODED;
+ fmt = map->m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT;
+ if (fmt)
+ map->m_algorithmformat = fmt - 1;
+ else if (interlaced && !erofs_blkoff(sb, map->m_pa))
+ map->m_algorithmformat =
+ Z_EROFS_COMPRESSION_INTERLACED;
+ else
+ map->m_algorithmformat =
+ Z_EROFS_COMPRESSION_SHIFTED;
+ if (map->m_plen & Z_EROFS_EXTENT_PLEN_PARTIAL)
+ map->m_flags |= EROFS_MAP_PARTIAL_REF;
+ map->m_plen &= Z_EROFS_EXTENT_PLEN_MASK;
+ }
+ }
+ map->m_llen = lend - map->m_la;
+ return 0;
+}
+
+static int z_erofs_fill_inode(struct inode *inode, struct erofs_map_blocks *map)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct super_block *const sb = inode->i_sb;
+ struct z_erofs_map_header *h;
+ erofs_off_t pos;
+ int err = 0;
- trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
+ if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
+ /*
+ * paired with smp_mb() at the end of the function to ensure
+ * fields will only be observed after the bit is set.
+ */
+ smp_mb();
+ return 0;
+ }
+
+ if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
+ return -ERESTARTSYS;
- /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
- DBG_BUGON(err < 0 && err != -ENOMEM);
+ if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
+ goto out_unlock;
+
+ pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
+ h = erofs_read_metabuf(&map->buf, sb, pos, erofs_inode_in_metabox(inode));
+ if (IS_ERR(h)) {
+ err = PTR_ERR(h);
+ goto out_unlock;
+ }
+
+ /*
+ * if the highest bit of the 8-byte map header is set, the whole file
+ * is stored in the packed inode. The rest bits keeps z_fragmentoff.
+ */
+ if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
+ vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
+ vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
+ vi->z_tailextent_headlcn = 0;
+ goto done;
+ }
+ vi->z_advise = le16_to_cpu(h->h_advise);
+ vi->z_lclusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 15);
+ if (vi->datalayout == EROFS_INODE_COMPRESSED_FULL &&
+ (vi->z_advise & Z_EROFS_ADVISE_EXTENTS)) {
+ vi->z_extents = le32_to_cpu(h->h_extents_lo) |
+ ((u64)le16_to_cpu(h->h_extents_hi) << 32);
+ goto done;
+ }
+
+ vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
+ vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
+ if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)
+ vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
+ else if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER)
+ vi->z_idata_size = le16_to_cpu(h->h_idata_size);
+
+ if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
+ vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
+ Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
+ erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
+ vi->nid);
+ err = -EFSCORRUPTED;
+ goto out_unlock;
+ }
+ if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
+ !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
+ !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
+ erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
+ vi->nid);
+ err = -EFSCORRUPTED;
+ goto out_unlock;
+ }
+
+ if (vi->z_idata_size ||
+ (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
+ struct erofs_map_blocks tm = {
+ .buf = __EROFS_BUF_INITIALIZER
+ };
+
+ err = z_erofs_map_blocks_fo(inode, &tm,
+ EROFS_GET_BLOCKS_FINDTAIL);
+ erofs_put_metabuf(&tm.buf);
+ if (err < 0)
+ goto out_unlock;
+ }
+done:
+ /* paired with smp_mb() at the beginning of the function */
+ smp_mb();
+ set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
+out_unlock:
+ clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
+ return err;
+}
+
+static int z_erofs_map_sanity_check(struct inode *inode,
+ struct erofs_map_blocks *map)
+{
+ struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+ u64 pend;
+
+ if (!(map->m_flags & EROFS_MAP_ENCODED))
+ return 0;
+ if (unlikely(map->m_algorithmformat >= Z_EROFS_COMPRESSION_RUNTIME_MAX)) {
+ erofs_err(inode->i_sb, "unknown algorithm %d @ pos %llu for nid %llu, please upgrade kernel",
+ map->m_algorithmformat, map->m_la, EROFS_I(inode)->nid);
+ return -EOPNOTSUPP;
+ }
+ if (unlikely(map->m_algorithmformat < Z_EROFS_COMPRESSION_MAX &&
+ !(sbi->available_compr_algs & (1 << map->m_algorithmformat)))) {
+ erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
+ map->m_algorithmformat, EROFS_I(inode)->nid);
+ return -EFSCORRUPTED;
+ }
+ if (unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
+ map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
+ return -EOPNOTSUPP;
+ /* Filesystems beyond 48-bit physical block addresses are invalid */
+ if (unlikely(check_add_overflow(map->m_pa, map->m_plen, &pend) ||
+ (pend >> sbi->blkszbits) >= BIT_ULL(48)))
+ return -EFSCORRUPTED;
+ return 0;
+}
+
+int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
+ int flags)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ int err = 0;
+
+ trace_erofs_map_blocks_enter(inode, map, flags);
+ if (map->m_la >= inode->i_size) { /* post-EOF unmapped extent */
+ map->m_llen = map->m_la + 1 - inode->i_size;
+ map->m_la = inode->i_size;
+ map->m_flags = 0;
+ } else {
+ err = z_erofs_fill_inode(inode, map);
+ if (!err) {
+ if (vi->datalayout == EROFS_INODE_COMPRESSED_FULL &&
+ (vi->z_advise & Z_EROFS_ADVISE_EXTENTS))
+ err = z_erofs_map_blocks_ext(inode, map, flags);
+ else
+ err = z_erofs_map_blocks_fo(inode, map, flags);
+ }
+ if (!err)
+ err = z_erofs_map_sanity_check(inode, map);
+ if (err)
+ map->m_llen = 0;
+ }
+ trace_erofs_map_blocks_exit(inode, map, flags, err);
return err;
}
@@ -704,8 +773,7 @@ static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
struct erofs_map_blocks map = { .m_la = offset };
ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
- if (map.mpage)
- put_page(map.mpage);
+ erofs_put_metabuf(&map.buf);
if (ret < 0)
return ret;
@@ -714,17 +782,22 @@ static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
iomap->length = map.m_llen;
if (map.m_flags & EROFS_MAP_MAPPED) {
iomap->type = IOMAP_MAPPED;
- iomap->addr = map.m_pa;
+ iomap->addr = map.m_flags & __EROFS_MAP_FRAGMENT ?
+ IOMAP_NULL_ADDR : map.m_pa;
} else {
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
/*
- * No strict rule how to describe extents for post EOF, yet
- * we need do like below. Otherwise, iomap itself will get
+ * No strict rule on how to describe extents for post EOF, yet
+ * we need to do like below. Otherwise, iomap itself will get
* into an endless loop on post EOF.
+ *
+ * Calculate the effective offset by subtracting extent start
+ * (map.m_la) from the requested offset, and add it to length.
+ * (NB: offset >= map.m_la always)
*/
if (iomap->offset >= inode->i_size)
- iomap->length = length + map.m_la - offset;
+ iomap->length = length + offset - map.m_la;
}
iomap->flags = 0;
return 0;
diff --git a/fs/erofs/zpvec.h b/fs/erofs/zpvec.h
deleted file mode 100644
index dfd7fe0503bb..000000000000
--- a/fs/erofs/zpvec.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2018 HUAWEI, Inc.
- * https://www.huawei.com/
- */
-#ifndef __EROFS_FS_ZPVEC_H
-#define __EROFS_FS_ZPVEC_H
-
-#include "tagptr.h"
-
-/* page type in pagevec for decompress subsystem */
-enum z_erofs_page_type {
- /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
- Z_EROFS_PAGE_TYPE_EXCLUSIVE,
-
- Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
-
- Z_EROFS_VLE_PAGE_TYPE_HEAD,
- Z_EROFS_VLE_PAGE_TYPE_MAX
-};
-
-extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
- __bad_page_type_exclusive(void);
-
-/* pagevec tagged pointer */
-typedef tagptr2_t erofs_vtptr_t;
-
-/* pagevec collector */
-struct z_erofs_pagevec_ctor {
- struct page *curr, *next;
- erofs_vtptr_t *pages;
-
- unsigned int nr, index;
-};
-
-static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
- bool atomic)
-{
- if (!ctor->curr)
- return;
-
- if (atomic)
- kunmap_atomic(ctor->pages);
- else
- kunmap(ctor->curr);
-}
-
-static inline struct page *
-z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
- unsigned int nr)
-{
- unsigned int index;
-
- /* keep away from occupied pages */
- if (ctor->next)
- return ctor->next;
-
- for (index = 0; index < nr; ++index) {
- const erofs_vtptr_t t = ctor->pages[index];
- const unsigned int tags = tagptr_unfold_tags(t);
-
- if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
- return tagptr_unfold_ptr(t);
- }
- DBG_BUGON(nr >= ctor->nr);
- return NULL;
-}
-
-static inline void
-z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
- bool atomic)
-{
- struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
-
- z_erofs_pagevec_ctor_exit(ctor, atomic);
-
- ctor->curr = next;
- ctor->next = NULL;
- ctor->pages = atomic ?
- kmap_atomic(ctor->curr) : kmap(ctor->curr);
-
- ctor->nr = PAGE_SIZE / sizeof(struct page *);
- ctor->index = 0;
-}
-
-static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
- unsigned int nr,
- erofs_vtptr_t *pages,
- unsigned int i)
-{
- ctor->nr = nr;
- ctor->curr = ctor->next = NULL;
- ctor->pages = pages;
-
- if (i >= nr) {
- i -= nr;
- z_erofs_pagevec_ctor_pagedown(ctor, false);
- while (i > ctor->nr) {
- i -= ctor->nr;
- z_erofs_pagevec_ctor_pagedown(ctor, false);
- }
- }
- ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
- ctor->index = i;
-}
-
-static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
- struct page *page,
- enum z_erofs_page_type type)
-{
- if (!ctor->next && type)
- if (ctor->index + 1 == ctor->nr)
- return false;
-
- if (ctor->index >= ctor->nr)
- z_erofs_pagevec_ctor_pagedown(ctor, false);
-
- /* exclusive page type must be 0 */
- if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
- __bad_page_type_exclusive();
-
- /* should remind that collector->next never equal to 1, 2 */
- if (type == (uintptr_t)ctor->next) {
- ctor->next = page;
- }
- ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
- return true;
-}
-
-static inline struct page *
-z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor,
- enum z_erofs_page_type *type)
-{
- erofs_vtptr_t t;
-
- if (ctor->index >= ctor->nr) {
- DBG_BUGON(!ctor->next);
- z_erofs_pagevec_ctor_pagedown(ctor, true);
- }
-
- t = ctor->pages[ctor->index];
-
- *type = tagptr_unfold_tags(t);
-
- /* should remind that collector->next never equal to 1, 2 */
- if (*type == (uintptr_t)ctor->next)
- ctor->next = tagptr_unfold_ptr(t);
-
- ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0);
- return tagptr_unfold_ptr(t);
-}
-#endif
diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c
new file mode 100644
index 000000000000..55ff2ab5128e
--- /dev/null
+++ b/fs/erofs/zutil.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * https://www.huawei.com/
+ * Copyright (C) 2024 Alibaba Cloud
+ */
+#include "internal.h"
+
+struct z_erofs_gbuf {
+ spinlock_t lock;
+ void *ptr;
+ struct page **pages;
+ unsigned int nrpages;
+};
+
+static struct z_erofs_gbuf *z_erofs_gbufpool, *z_erofs_rsvbuf;
+static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages,
+ z_erofs_rsv_nrpages;
+
+module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444);
+module_param_named(reserved_pages, z_erofs_rsv_nrpages, uint, 0444);
+
+atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */
+
+/* protects `erofs_sb_list_lock` and the mounted `erofs_sb_list` */
+static DEFINE_SPINLOCK(erofs_sb_list_lock);
+static LIST_HEAD(erofs_sb_list);
+static unsigned int shrinker_run_no;
+static struct shrinker *erofs_shrinker_info;
+
+static unsigned int z_erofs_gbuf_id(void)
+{
+ return raw_smp_processor_id() % z_erofs_gbuf_count;
+}
+
+void *z_erofs_get_gbuf(unsigned int requiredpages)
+ __acquires(gbuf->lock)
+{
+ struct z_erofs_gbuf *gbuf;
+
+ migrate_disable();
+ gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
+ spin_lock(&gbuf->lock);
+ /* check if the buffer is too small */
+ if (requiredpages > gbuf->nrpages) {
+ spin_unlock(&gbuf->lock);
+ migrate_enable();
+ /* (for sparse checker) pretend gbuf->lock is still taken */
+ __acquire(gbuf->lock);
+ return NULL;
+ }
+ return gbuf->ptr;
+}
+
+void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock)
+{
+ struct z_erofs_gbuf *gbuf;
+
+ gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
+ DBG_BUGON(gbuf->ptr != ptr);
+ spin_unlock(&gbuf->lock);
+ migrate_enable();
+}
+
+int z_erofs_gbuf_growsize(unsigned int nrpages)
+{
+ static DEFINE_MUTEX(gbuf_resize_mutex);
+ struct page **tmp_pages = NULL;
+ struct z_erofs_gbuf *gbuf;
+ void *ptr, *old_ptr;
+ int last, i, j;
+
+ mutex_lock(&gbuf_resize_mutex);
+ /* avoid shrinking gbufs, since no idea how many fses rely on */
+ if (nrpages <= z_erofs_gbuf_nrpages) {
+ mutex_unlock(&gbuf_resize_mutex);
+ return 0;
+ }
+
+ for (i = 0; i < z_erofs_gbuf_count; ++i) {
+ gbuf = &z_erofs_gbufpool[i];
+ tmp_pages = kcalloc(nrpages, sizeof(*tmp_pages), GFP_KERNEL);
+ if (!tmp_pages)
+ goto out;
+
+ for (j = 0; j < gbuf->nrpages; ++j)
+ tmp_pages[j] = gbuf->pages[j];
+ do {
+ last = j;
+ j = alloc_pages_bulk(GFP_KERNEL, nrpages,
+ tmp_pages);
+ if (last == j)
+ goto out;
+ } while (j != nrpages);
+
+ ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL);
+ if (!ptr)
+ goto out;
+
+ spin_lock(&gbuf->lock);
+ kfree(gbuf->pages);
+ gbuf->pages = tmp_pages;
+ old_ptr = gbuf->ptr;
+ gbuf->ptr = ptr;
+ gbuf->nrpages = nrpages;
+ spin_unlock(&gbuf->lock);
+ if (old_ptr)
+ vunmap(old_ptr);
+ }
+ z_erofs_gbuf_nrpages = nrpages;
+out:
+ if (i < z_erofs_gbuf_count && tmp_pages) {
+ for (j = 0; j < nrpages; ++j)
+ if (tmp_pages[j] && (j >= gbuf->nrpages ||
+ tmp_pages[j] != gbuf->pages[j]))
+ __free_page(tmp_pages[j]);
+ kfree(tmp_pages);
+ }
+ mutex_unlock(&gbuf_resize_mutex);
+ return i < z_erofs_gbuf_count ? -ENOMEM : 0;
+}
+
+int __init z_erofs_gbuf_init(void)
+{
+ unsigned int i, total = num_possible_cpus();
+
+ if (z_erofs_gbuf_count)
+ total = min(z_erofs_gbuf_count, total);
+ z_erofs_gbuf_count = total;
+
+ /* The last (special) global buffer is the reserved buffer */
+ total += !!z_erofs_rsv_nrpages;
+
+ z_erofs_gbufpool = kcalloc(total, sizeof(*z_erofs_gbufpool),
+ GFP_KERNEL);
+ if (!z_erofs_gbufpool)
+ return -ENOMEM;
+
+ if (z_erofs_rsv_nrpages) {
+ z_erofs_rsvbuf = &z_erofs_gbufpool[total - 1];
+ z_erofs_rsvbuf->pages = kcalloc(z_erofs_rsv_nrpages,
+ sizeof(*z_erofs_rsvbuf->pages), GFP_KERNEL);
+ if (!z_erofs_rsvbuf->pages) {
+ z_erofs_rsvbuf = NULL;
+ z_erofs_rsv_nrpages = 0;
+ }
+ }
+ for (i = 0; i < total; ++i)
+ spin_lock_init(&z_erofs_gbufpool[i].lock);
+ return 0;
+}
+
+void z_erofs_gbuf_exit(void)
+{
+ int i, j;
+
+ for (i = 0; i < z_erofs_gbuf_count + (!!z_erofs_rsvbuf); ++i) {
+ struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i];
+
+ if (gbuf->ptr) {
+ vunmap(gbuf->ptr);
+ gbuf->ptr = NULL;
+ }
+
+ if (!gbuf->pages)
+ continue;
+
+ for (j = 0; j < gbuf->nrpages; ++j)
+ if (gbuf->pages[j])
+ put_page(gbuf->pages[j]);
+ kfree(gbuf->pages);
+ gbuf->pages = NULL;
+ }
+ kfree(z_erofs_gbufpool);
+}
+
+struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv)
+{
+ struct page *page = *pagepool;
+
+ if (page) {
+ *pagepool = (struct page *)page_private(page);
+ } else if (tryrsv && z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages) {
+ spin_lock(&z_erofs_rsvbuf->lock);
+ if (z_erofs_rsvbuf->nrpages)
+ page = z_erofs_rsvbuf->pages[--z_erofs_rsvbuf->nrpages];
+ spin_unlock(&z_erofs_rsvbuf->lock);
+ }
+ if (!page)
+ page = alloc_page(gfp);
+ DBG_BUGON(page && page_ref_count(page) != 1);
+ return page;
+}
+
+void erofs_release_pages(struct page **pagepool)
+{
+ while (*pagepool) {
+ struct page *page = *pagepool;
+
+ *pagepool = (struct page *)page_private(page);
+ /* try to fill reserved global pool first */
+ if (z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages <
+ z_erofs_rsv_nrpages) {
+ spin_lock(&z_erofs_rsvbuf->lock);
+ if (z_erofs_rsvbuf->nrpages < z_erofs_rsv_nrpages) {
+ z_erofs_rsvbuf->pages[z_erofs_rsvbuf->nrpages++]
+ = page;
+ spin_unlock(&z_erofs_rsvbuf->lock);
+ continue;
+ }
+ spin_unlock(&z_erofs_rsvbuf->lock);
+ }
+ put_page(page);
+ }
+}
+
+void erofs_shrinker_register(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ mutex_init(&sbi->umount_mutex);
+
+ spin_lock(&erofs_sb_list_lock);
+ list_add(&sbi->list, &erofs_sb_list);
+ spin_unlock(&erofs_sb_list_lock);
+}
+
+void erofs_shrinker_unregister(struct super_block *sb)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+
+ mutex_lock(&sbi->umount_mutex);
+ while (!xa_empty(&sbi->managed_pslots)) {
+ z_erofs_shrink_scan(sbi, ~0UL);
+ cond_resched();
+ }
+ spin_lock(&erofs_sb_list_lock);
+ list_del(&sbi->list);
+ spin_unlock(&erofs_sb_list_lock);
+ mutex_unlock(&sbi->umount_mutex);
+}
+
+static unsigned long erofs_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ return atomic_long_read(&erofs_global_shrink_cnt) ?: SHRINK_EMPTY;
+}
+
+static unsigned long erofs_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct erofs_sb_info *sbi;
+ struct list_head *p;
+
+ unsigned long nr = sc->nr_to_scan;
+ unsigned int run_no;
+ unsigned long freed = 0;
+
+ spin_lock(&erofs_sb_list_lock);
+ do {
+ run_no = ++shrinker_run_no;
+ } while (run_no == 0);
+
+ /* Iterate over all mounted superblocks and try to shrink them */
+ p = erofs_sb_list.next;
+ while (p != &erofs_sb_list) {
+ sbi = list_entry(p, struct erofs_sb_info, list);
+
+ /*
+ * We move the ones we do to the end of the list, so we stop
+ * when we see one we have already done.
+ */
+ if (sbi->shrinker_run_no == run_no)
+ break;
+
+ if (!mutex_trylock(&sbi->umount_mutex)) {
+ p = p->next;
+ continue;
+ }
+
+ spin_unlock(&erofs_sb_list_lock);
+ sbi->shrinker_run_no = run_no;
+ freed += z_erofs_shrink_scan(sbi, nr - freed);
+ spin_lock(&erofs_sb_list_lock);
+ /* Get the next list element before we move this one */
+ p = p->next;
+
+ /*
+ * Move this one to the end of the list to provide some
+ * fairness.
+ */
+ list_move_tail(&sbi->list, &erofs_sb_list);
+ mutex_unlock(&sbi->umount_mutex);
+
+ if (freed >= nr)
+ break;
+ }
+ spin_unlock(&erofs_sb_list_lock);
+ return freed;
+}
+
+int __init erofs_init_shrinker(void)
+{
+ erofs_shrinker_info = shrinker_alloc(0, "erofs-shrinker");
+ if (!erofs_shrinker_info)
+ return -ENOMEM;
+
+ erofs_shrinker_info->count_objects = erofs_shrink_count;
+ erofs_shrinker_info->scan_objects = erofs_shrink_scan;
+ shrinker_register(erofs_shrinker_info);
+ return 0;
+}
+
+void erofs_exit_shrinker(void)
+{
+ shrinker_free(erofs_shrinker_info);
+}
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 3627dd7d25db..3219e0d596fe 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -33,10 +33,10 @@ struct eventfd_ctx {
/*
* Every time that a write(2) is performed on an eventfd, the
* value of the __u64 being written is added to "count" and a
- * wakeup is performed on "wqh". A read(2) will return the "count"
- * value to userspace, and will reset "count" to zero. The kernel
- * side eventfd_signal() also, adds to the "count" counter and
- * issue a wakeup.
+ * wakeup is performed on "wqh". If EFD_SEMAPHORE flag was not
+ * specified, a read(2) will return the "count" value to userspace,
+ * and will reset "count" to zero. The kernel side eventfd_signal()
+ * also, adds to the "count" counter and issue a wakeup.
*/
__u64 count;
unsigned int flags;
@@ -44,20 +44,16 @@ struct eventfd_ctx {
};
/**
- * eventfd_signal - Adds @n to the eventfd counter.
+ * eventfd_signal_mask - Increment the event counter
* @ctx: [in] Pointer to the eventfd context.
- * @n: [in] Value of the counter to be added to the eventfd internal counter.
- * The value cannot be negative.
+ * @mask: [in] poll mask
*
* This function is supposed to be called by the kernel in paths that do not
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
* value, and we signal this as overflow condition by returning a EPOLLERR
* to poll(2).
- *
- * Returns the amount by which the counter was incremented. This will be less
- * than @n if the counter has overflowed.
*/
-__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
+void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
unsigned long flags;
@@ -69,27 +65,24 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
* it returns false, the eventfd_signal() call should be deferred to a
* safe context.
*/
- if (WARN_ON_ONCE(current->in_eventfd_signal))
- return 0;
+ if (WARN_ON_ONCE(current->in_eventfd))
+ return;
spin_lock_irqsave(&ctx->wqh.lock, flags);
- current->in_eventfd_signal = 1;
- if (ULLONG_MAX - ctx->count < n)
- n = ULLONG_MAX - ctx->count;
- ctx->count += n;
+ current->in_eventfd = 1;
+ if (ctx->count < ULLONG_MAX)
+ ctx->count++;
if (waitqueue_active(&ctx->wqh))
- wake_up_locked_poll(&ctx->wqh, EPOLLIN);
- current->in_eventfd_signal = 0;
+ wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
+ current->in_eventfd = 0;
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
-
- return n;
}
-EXPORT_SYMBOL_GPL(eventfd_signal);
+EXPORT_SYMBOL_GPL(eventfd_signal_mask);
static void eventfd_free_ctx(struct eventfd_ctx *ctx)
{
if (ctx->id >= 0)
- ida_simple_remove(&eventfd_ida, ctx->id);
+ ida_free(&eventfd_ida, ctx->id);
kfree(ctx);
}
@@ -184,7 +177,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
{
lockdep_assert_held(&ctx->wqh.lock);
- *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
+ *cnt = ((ctx->flags & EFD_SEMAPHORE) && ctx->count) ? 1 : ctx->count;
ctx->count -= *cnt;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_do_read);
@@ -223,7 +216,6 @@ static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
struct file *file = iocb->ki_filp;
struct eventfd_ctx *ctx = file->private_data;
__u64 ucnt = 0;
- DECLARE_WAITQUEUE(wait, current);
if (iov_iter_count(to) < sizeof(ucnt))
return -EINVAL;
@@ -234,27 +226,17 @@ static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
spin_unlock_irq(&ctx->wqh.lock);
return -EAGAIN;
}
- __add_wait_queue(&ctx->wqh, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (ctx->count)
- break;
- if (signal_pending(current)) {
- __remove_wait_queue(&ctx->wqh, &wait);
- __set_current_state(TASK_RUNNING);
- spin_unlock_irq(&ctx->wqh.lock);
- return -ERESTARTSYS;
- }
+
+ if (wait_event_interruptible_locked_irq(ctx->wqh, ctx->count)) {
spin_unlock_irq(&ctx->wqh.lock);
- schedule();
- spin_lock_irq(&ctx->wqh.lock);
+ return -ERESTARTSYS;
}
- __remove_wait_queue(&ctx->wqh, &wait);
- __set_current_state(TASK_RUNNING);
}
eventfd_ctx_do_read(ctx, &ucnt);
+ current->in_eventfd = 1;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
+ current->in_eventfd = 0;
spin_unlock_irq(&ctx->wqh.lock);
if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
return -EFAULT;
@@ -268,9 +250,8 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
struct eventfd_ctx *ctx = file->private_data;
ssize_t res;
__u64 ucnt;
- DECLARE_WAITQUEUE(wait, current);
- if (count < sizeof(ucnt))
+ if (count != sizeof(ucnt))
return -EINVAL;
if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
return -EFAULT;
@@ -281,28 +262,17 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
if (ULLONG_MAX - ctx->count > ucnt)
res = sizeof(ucnt);
else if (!(file->f_flags & O_NONBLOCK)) {
- __add_wait_queue(&ctx->wqh, &wait);
- for (res = 0;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (ULLONG_MAX - ctx->count > ucnt) {
- res = sizeof(ucnt);
- break;
- }
- if (signal_pending(current)) {
- res = -ERESTARTSYS;
- break;
- }
- spin_unlock_irq(&ctx->wqh.lock);
- schedule();
- spin_lock_irq(&ctx->wqh.lock);
- }
- __remove_wait_queue(&ctx->wqh, &wait);
- __set_current_state(TASK_RUNNING);
+ res = wait_event_interruptible_locked_irq(ctx->wqh,
+ ULLONG_MAX - ctx->count > ucnt);
+ if (!res)
+ res = sizeof(ucnt);
}
if (likely(res > 0)) {
ctx->count += ucnt;
+ current->in_eventfd = 1;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
+ current->in_eventfd = 0;
}
spin_unlock_irq(&ctx->wqh.lock);
@@ -313,12 +283,19 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
{
struct eventfd_ctx *ctx = f->private_data;
+ __u64 cnt;
spin_lock_irq(&ctx->wqh.lock);
- seq_printf(m, "eventfd-count: %16llx\n",
- (unsigned long long)ctx->count);
+ cnt = ctx->count;
spin_unlock_irq(&ctx->wqh.lock);
- seq_printf(m, "eventfd-id: %d\n", ctx->id);
+
+ seq_printf(m,
+ "eventfd-count: %16llx\n"
+ "eventfd-id: %d\n"
+ "eventfd-semaphore: %d\n",
+ cnt,
+ ctx->id,
+ !!(ctx->flags & EFD_SEMAPHORE));
}
#endif
@@ -370,13 +347,10 @@ EXPORT_SYMBOL_GPL(eventfd_fget);
*/
struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
- struct eventfd_ctx *ctx;
- struct fd f = fdget(fd);
- if (!f.file)
+ CLASS(fd, f)(fd);
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- ctx = eventfd_ctx_fileget(f.file);
- fdput(f);
- return ctx;
+ return eventfd_ctx_fileget(fd_file(f));
}
EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
@@ -404,13 +378,12 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
static int do_eventfd(unsigned int count, int flags)
{
- struct eventfd_ctx *ctx;
- struct file *file;
- int fd;
+ struct eventfd_ctx *ctx __free(kfree) = NULL;
/* Check the EFD_* constants for consistency. */
BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
+ BUILD_BUG_ON(EFD_SEMAPHORE != (1 << 0));
if (flags & ~EFD_FLAGS_SET)
return -EINVAL;
@@ -423,27 +396,19 @@ static int do_eventfd(unsigned int count, int flags)
init_waitqueue_head(&ctx->wqh);
ctx->count = count;
ctx->flags = flags;
- ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
flags &= EFD_SHARED_FCNTL_FLAGS;
flags |= O_RDWR;
- fd = get_unused_fd_flags(flags);
- if (fd < 0)
- goto err;
-
- file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- fd = PTR_ERR(file);
- goto err;
- }
- file->f_mode |= FMODE_NOWAIT;
- fd_install(fd, file);
- return fd;
-err:
- eventfd_free_ctx(ctx);
- return fd;
+ FD_PREPARE(fdf, flags,
+ anon_inode_getfile_fmode("[eventfd]", &eventfd_fops, ctx,
+ flags, FMODE_NOWAIT));
+ if (fdf.err)
+ return fdf.err;
+
+ ctx->id = ida_alloc(&eventfd_ida, GFP_KERNEL);
+ retain_and_null_ptr(ctx);
+ return fd_publish(fdf);
}
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 06f4c5ae1451..6c36d9dc6926 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -37,18 +37,19 @@
#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/rculist.h>
+#include <linux/capability.h>
#include <net/busy_poll.h>
/*
* LOCKING:
* There are three level of locking required by epoll :
*
- * 1) epmutex (mutex)
+ * 1) epnested_mutex (mutex)
* 2) ep->mtx (mutex)
- * 3) ep->lock (rwlock)
+ * 3) ep->lock (spinlock)
*
* The acquire order is the one listed above, from 1 to 3.
- * We need a rwlock (ep->lock) because we manipulate objects
+ * We need a spinlock (ep->lock) because we manipulate objects
* from inside the poll callback, that might be triggered from
* a wake_up() that in turn might be called from IRQ context.
* So we can't sleep inside the poll callback and hence we need
@@ -57,14 +58,8 @@
* we need a lock that will allow us to sleep. This lock is a
* mutex (ep->mtx). It is acquired during the event transfer loop,
* during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
- * Then we also need a global mutex to serialize eventpoll_release_file()
- * and ep_free().
- * This mutex is acquired by ep_free() during the epoll file
- * cleanup path and it is also acquired by eventpoll_release_file()
- * if a file has been pushed inside an epoll set and it is then
- * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
- * It is also acquired when inserting an epoll fd onto another epoll
- * fd. We do this so that we walk the epoll tree and ensure that this
+ * The epnested_mutex is acquired when inserting an epoll fd onto another
+ * epoll fd. We do this so that we walk the epoll tree and ensure that this
* insertion does not create a cycle of epoll file descriptors, which
* could lead to deadlock. We need a global mutex to prevent two
* simultaneous inserts (A into B and B into A) from racing and
@@ -80,9 +75,9 @@
* of epoll file descriptors, we use the current recursion depth as
* the lockdep subkey.
* It is possible to drop the "ep->mtx" and to use the global
- * mutex "epmutex" (together with "ep->lock") to have it working,
+ * mutex "epnested_mutex" (together with "ep->lock") to have it working,
* but having "ep->mtx" will make the interface more scalable.
- * Events that require holding "epmutex" are very rare, while for
+ * Events that require holding "epnested_mutex" are very rare, while for
* normal operations the epoll private "ep->mtx" will guarantee
* a better scalability.
*/
@@ -153,6 +148,13 @@ struct epitem {
/* The file descriptor information this item refers to */
struct epoll_filefd ffd;
+ /*
+ * Protected by file->f_lock, true for to-be-released epitem already
+ * removed from the "struct file" items list; together with
+ * eventpoll->refcount orchestrates "struct eventpoll" disposal
+ */
+ bool dying;
+
/* List containing poll wait queues */
struct eppoll_entry *pwqlist;
@@ -193,7 +195,7 @@ struct eventpoll {
struct list_head rdllist;
/* Lock which protects rdllist and ovflist */
- rwlock_t lock;
+ spinlock_t lock;
/* RB tree root used to store monitored fd structs */
struct rb_root_cached rbr;
@@ -205,7 +207,7 @@ struct eventpoll {
*/
struct epitem *ovflist;
- /* wakeup_source used when ep_scan_ready_list is running */
+ /* wakeup_source used when ep_send_events or __ep_eventpoll_poll is running */
struct wakeup_source *ws;
/* The user that created the eventpoll descriptor */
@@ -216,10 +218,22 @@ struct eventpoll {
/* used to optimize loop detection check */
u64 gen;
struct hlist_head refs;
+ u8 loop_check_depth;
+
+ /*
+ * usage count, used together with epitem->dying to
+ * orchestrate the disposal of this struct
+ */
+ refcount_t refcount;
#ifdef CONFIG_NET_RX_BUSY_POLL
/* used to track busy poll napi_id */
unsigned int napi_id;
+ /* busy poll timeout */
+ u32 busy_poll_usecs;
+ /* busy poll packet budget */
+ u16 busy_poll_budget;
+ bool prefer_busy_poll;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -240,10 +254,8 @@ struct ep_pqueue {
/* Maximum number of epoll watched descriptors, per user */
static long max_user_watches __read_mostly;
-/*
- * This mutex is used to serialize ep_free() and eventpoll_release_file().
- */
-static DEFINE_MUTEX(epmutex);
+/* Used for cycles detection */
+static DEFINE_MUTEX(epnested_mutex);
static u64 loop_check_gen = 0;
@@ -251,14 +263,14 @@ static u64 loop_check_gen = 0;
static struct eventpoll *inserting_into;
/* Slab cache used to allocate "struct epitem" */
-static struct kmem_cache *epi_cache __read_mostly;
+static struct kmem_cache *epi_cache __ro_after_init;
/* Slab cache used to allocate "struct eppoll_entry" */
-static struct kmem_cache *pwq_cache __read_mostly;
+static struct kmem_cache *pwq_cache __ro_after_init;
/*
* List of files with newly added links, where we may need to limit the number
- * of emanating paths. Protected by the epmutex.
+ * of emanating paths. Protected by the epnested_mutex.
*/
struct epitems_head {
struct hlist_head epitems;
@@ -266,7 +278,7 @@ struct epitems_head {
};
static struct epitems_head *tfile_check_list = EP_UNACTIVE_PTR;
-static struct kmem_cache *ephead_cache __read_mostly;
+static struct kmem_cache *ephead_cache __ro_after_init;
static inline void free_ephead(struct epitems_head *head)
{
@@ -307,7 +319,7 @@ static void unlist_file(struct epitems_head *head)
static long long_zero;
static long long_max = LONG_MAX;
-struct ctl_table epoll_table[] = {
+static const struct ctl_table epoll_table[] = {
{
.procname = "max_user_watches",
.data = &max_user_watches,
@@ -317,8 +329,14 @@ struct ctl_table epoll_table[] = {
.extra1 = &long_zero,
.extra2 = &long_max,
},
- { }
};
+
+static void __init epoll_sysctls_init(void)
+{
+ register_sysctl("fs/epoll", epoll_table);
+}
+#else
+#define epoll_sysctls_init() do { } while (0)
#endif /* CONFIG_SYSCTL */
static const struct file_operations eventpoll_fops;
@@ -376,11 +394,43 @@ static inline int ep_events_available(struct eventpoll *ep)
}
#ifdef CONFIG_NET_RX_BUSY_POLL
+/**
+ * busy_loop_ep_timeout - check if busy poll has timed out. The timeout value
+ * from the epoll instance ep is preferred, but if it is not set fallback to
+ * the system-wide global via busy_loop_timeout.
+ *
+ * @start_time: The start time used to compute the remaining time until timeout.
+ * @ep: Pointer to the eventpoll context.
+ *
+ * Return: true if the timeout has expired, false otherwise.
+ */
+static bool busy_loop_ep_timeout(unsigned long start_time,
+ struct eventpoll *ep)
+{
+ unsigned long bp_usec = READ_ONCE(ep->busy_poll_usecs);
+
+ if (bp_usec) {
+ unsigned long end_time = start_time + bp_usec;
+ unsigned long now = busy_loop_current_time();
+
+ return time_after(now, end_time);
+ } else {
+ return busy_loop_timeout(start_time);
+ }
+}
+
+static bool ep_busy_loop_on(struct eventpoll *ep)
+{
+ return !!READ_ONCE(ep->busy_poll_usecs) ||
+ READ_ONCE(ep->prefer_busy_poll) ||
+ net_busy_loop_on();
+}
+
static bool ep_busy_loop_end(void *p, unsigned long start_time)
{
struct eventpoll *ep = p;
- return ep_events_available(ep) || busy_loop_timeout(start_time);
+ return ep_events_available(ep) || busy_loop_ep_timeout(start_time, ep);
}
/*
@@ -389,13 +439,18 @@ static bool ep_busy_loop_end(void *p, unsigned long start_time)
*
* we must do our busy polling with irqs enabled
*/
-static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
+static bool ep_busy_loop(struct eventpoll *ep)
{
unsigned int napi_id = READ_ONCE(ep->napi_id);
+ u16 budget = READ_ONCE(ep->busy_poll_budget);
+ bool prefer_busy_poll = READ_ONCE(ep->prefer_busy_poll);
- if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on()) {
- napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep, false,
- BUSY_POLL_BUDGET);
+ if (!budget)
+ budget = BUSY_POLL_BUDGET;
+
+ if (napi_id_valid(napi_id) && ep_busy_loop_on(ep)) {
+ napi_busy_loop(napi_id, ep_busy_loop_end,
+ ep, prefer_busy_poll, budget);
if (ep_events_available(ep))
return true;
/*
@@ -403,6 +458,8 @@ static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
* it back in when we have moved a socket with a valid NAPI
* ID onto the ready list.
*/
+ if (prefer_busy_poll)
+ napi_resume_irqs(napi_id);
ep->napi_id = 0;
return false;
}
@@ -414,12 +471,12 @@ static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
*/
static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
{
- struct eventpoll *ep;
+ struct eventpoll *ep = epi->ep;
unsigned int napi_id;
struct socket *sock;
struct sock *sk;
- if (!net_busy_loop_on())
+ if (!ep_busy_loop_on(ep))
return;
sock = sock_from_file(epi->ffd.file);
@@ -431,22 +488,80 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
return;
napi_id = READ_ONCE(sk->sk_napi_id);
- ep = epi->ep;
/* Non-NAPI IDs can be rejected
* or
* Nothing to do if we already have this ID
*/
- if (napi_id < MIN_NAPI_ID || napi_id == ep->napi_id)
+ if (!napi_id_valid(napi_id) || napi_id == ep->napi_id)
return;
/* record NAPI ID for use in next busy poll */
ep->napi_id = napi_id;
}
+static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct eventpoll *ep = file->private_data;
+ void __user *uarg = (void __user *)arg;
+ struct epoll_params epoll_params;
+
+ switch (cmd) {
+ case EPIOCSPARAMS:
+ if (copy_from_user(&epoll_params, uarg, sizeof(epoll_params)))
+ return -EFAULT;
+
+ /* pad byte must be zero */
+ if (epoll_params.__pad)
+ return -EINVAL;
+
+ if (epoll_params.busy_poll_usecs > S32_MAX)
+ return -EINVAL;
+
+ if (epoll_params.prefer_busy_poll > 1)
+ return -EINVAL;
+
+ if (epoll_params.busy_poll_budget > NAPI_POLL_WEIGHT &&
+ !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ WRITE_ONCE(ep->busy_poll_usecs, epoll_params.busy_poll_usecs);
+ WRITE_ONCE(ep->busy_poll_budget, epoll_params.busy_poll_budget);
+ WRITE_ONCE(ep->prefer_busy_poll, epoll_params.prefer_busy_poll);
+ return 0;
+ case EPIOCGPARAMS:
+ memset(&epoll_params, 0, sizeof(epoll_params));
+ epoll_params.busy_poll_usecs = READ_ONCE(ep->busy_poll_usecs);
+ epoll_params.busy_poll_budget = READ_ONCE(ep->busy_poll_budget);
+ epoll_params.prefer_busy_poll = READ_ONCE(ep->prefer_busy_poll);
+ if (copy_to_user(uarg, &epoll_params, sizeof(epoll_params)))
+ return -EFAULT;
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static void ep_suspend_napi_irqs(struct eventpoll *ep)
+{
+ unsigned int napi_id = READ_ONCE(ep->napi_id);
+
+ if (napi_id_valid(napi_id) && READ_ONCE(ep->prefer_busy_poll))
+ napi_suspend_irqs(napi_id);
+}
+
+static void ep_resume_napi_irqs(struct eventpoll *ep)
+{
+ unsigned int napi_id = READ_ONCE(ep->napi_id);
+
+ if (napi_id_valid(napi_id) && READ_ONCE(ep->prefer_busy_poll))
+ napi_resume_irqs(napi_id);
+}
+
#else
-static inline bool ep_busy_loop(struct eventpoll *ep, int nonblock)
+static inline bool ep_busy_loop(struct eventpoll *ep)
{
return false;
}
@@ -455,6 +570,20 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
{
}
+static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static void ep_suspend_napi_irqs(struct eventpoll *ep)
+{
+}
+
+static void ep_resume_napi_irqs(struct eventpoll *ep)
+{
+}
+
#endif /* CONFIG_NET_RX_BUSY_POLL */
/*
@@ -476,15 +605,16 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
* (efd1) notices that it may have some event ready, so it needs to wake up
* the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
* that ends up in another wake_up(), after having checked about the
- * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
- * avoid stack blasting.
+ * recursion constraints. That are, no more than EP_MAX_NESTS, to avoid
+ * stack blasting.
*
* When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
* this special case of epoll.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
+static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
+ unsigned pollflags)
{
struct eventpoll *ep_src;
unsigned long flags;
@@ -515,16 +645,17 @@ static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
}
spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
ep->nests = nests + 1;
- wake_up_locked_poll(&ep->poll_wait, EPOLLIN);
+ wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags);
ep->nests = 0;
spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
}
#else
-static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
+static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
+ __poll_t pollflags)
{
- wake_up_poll(&ep->poll_wait, EPOLLIN);
+ wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags);
}
#endif
@@ -548,8 +679,7 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
/*
* This function unregisters poll callbacks from the associated file
- * descriptor. Must be called with "mtx" held (or "epmutex" if called from
- * ep_free).
+ * descriptor. Must be called with "mtx" held.
*/
static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
{
@@ -611,10 +741,10 @@ static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist)
* in a lockless way.
*/
lockdep_assert_irqs_enabled();
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
list_splice_init(&ep->rdllist, txlist);
WRITE_ONCE(ep->ovflist, NULL);
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
}
static void ep_done_scan(struct eventpoll *ep,
@@ -622,7 +752,7 @@ static void ep_done_scan(struct eventpoll *ep,
{
struct epitem *epi, *nepi;
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
/*
* During the time we spent inside the "sproc" callback, some
* other events might have been queued by the poll callback.
@@ -663,20 +793,44 @@ static void ep_done_scan(struct eventpoll *ep,
wake_up(&ep->wq);
}
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
}
-static void epi_rcu_free(struct rcu_head *head)
+static void ep_get(struct eventpoll *ep)
{
- struct epitem *epi = container_of(head, struct epitem, rcu);
- kmem_cache_free(epi_cache, epi);
+ refcount_inc(&ep->refcount);
+}
+
+/*
+ * Returns true if the event poll can be disposed
+ */
+static bool ep_refcount_dec_and_test(struct eventpoll *ep)
+{
+ if (!refcount_dec_and_test(&ep->refcount))
+ return false;
+
+ WARN_ON_ONCE(!RB_EMPTY_ROOT(&ep->rbr.rb_root));
+ return true;
+}
+
+static void ep_free(struct eventpoll *ep)
+{
+ ep_resume_napi_irqs(ep);
+ mutex_destroy(&ep->mtx);
+ free_uid(ep->user);
+ wakeup_source_unregister(ep->ws);
+ kfree(ep);
}
/*
* Removes a "struct epitem" from the eventpoll RB tree and deallocates
* all the associated resources. Must be called with "mtx" held.
+ * If the dying flag is set, do the removal only if force is true.
+ * This prevents ep_clear_and_put() from dropping all the ep references
+ * while running concurrently with eventpoll_release_file().
+ * Returns true if the eventpoll can be disposed.
*/
-static int ep_remove(struct eventpoll *ep, struct epitem *epi)
+static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
{
struct file *file = epi->ffd.file;
struct epitems_head *to_free;
@@ -691,10 +845,16 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
/* Remove the current item from the list of epoll hooks */
spin_lock(&file->f_lock);
+ if (epi->dying && !force) {
+ spin_unlock(&file->f_lock);
+ return false;
+ }
+
to_free = NULL;
head = file->f_ep;
if (head->first == &epi->fllink && !epi->fllink.next) {
- file->f_ep = NULL;
+ /* See eventpoll_release() for details. */
+ WRITE_ONCE(file->f_ep, NULL);
if (!is_file_epoll(file)) {
struct epitems_head *v;
v = container_of(head, struct epitems_head, epitems);
@@ -708,10 +868,10 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
rb_erase_cached(&epi->rbn, &ep->rbr);
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
if (ep_is_linked(epi))
list_del_init(&epi->rdllink);
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
wakeup_source_unregister(ep_wakeup_source(epi));
/*
@@ -721,31 +881,31 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
* ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
* use of the rbn field.
*/
- call_rcu(&epi->rcu, epi_rcu_free);
+ kfree_rcu(epi, rcu);
percpu_counter_dec(&ep->user->epoll_watches);
+ return true;
+}
- return 0;
+/*
+ * ep_remove variant for callers owing an additional reference to the ep
+ */
+static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)
+{
+ if (__ep_remove(ep, epi, false))
+ WARN_ON_ONCE(ep_refcount_dec_and_test(ep));
}
-static void ep_free(struct eventpoll *ep)
+static void ep_clear_and_put(struct eventpoll *ep)
{
- struct rb_node *rbp;
+ struct rb_node *rbp, *next;
struct epitem *epi;
/* We need to release all tasks waiting for these file */
if (waitqueue_active(&ep->poll_wait))
- ep_poll_safewake(ep, NULL);
+ ep_poll_safewake(ep, NULL, 0);
- /*
- * We need to lock this because we could be hit by
- * eventpoll_release_file() while we're freeing the "struct eventpoll".
- * We do not need to hold "ep->mtx" here because the epoll file
- * is on the way to be removed and no one has references to it
- * anymore. The only hit might come from eventpoll_release_file() but
- * holding "epmutex" is sufficient here.
- */
- mutex_lock(&epmutex);
+ mutex_lock(&ep->mtx);
/*
* Walks through the whole tree by unregistering poll callbacks.
@@ -758,26 +918,44 @@ static void ep_free(struct eventpoll *ep)
}
/*
- * Walks through the whole tree by freeing each "struct epitem". At this
- * point we are sure no poll callbacks will be lingering around, and also by
- * holding "epmutex" we can be sure that no file cleanup code will hit
- * us during this operation. So we can avoid the lock on "ep->lock".
- * We do not need to lock ep->mtx, either, we only do it to prevent
- * a lockdep warning.
+ * Walks through the whole tree and try to free each "struct epitem".
+ * Note that ep_remove_safe() will not remove the epitem in case of a
+ * racing eventpoll_release_file(); the latter will do the removal.
+ * At this point we are sure no poll callbacks will be lingering around.
+ * Since we still own a reference to the eventpoll struct, the loop can't
+ * dispose it.
*/
- mutex_lock(&ep->mtx);
- while ((rbp = rb_first_cached(&ep->rbr)) != NULL) {
+ for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = next) {
+ next = rb_next(rbp);
epi = rb_entry(rbp, struct epitem, rbn);
- ep_remove(ep, epi);
+ ep_remove_safe(ep, epi);
cond_resched();
}
+
mutex_unlock(&ep->mtx);
+ if (ep_refcount_dec_and_test(ep))
+ ep_free(ep);
+}
- mutex_unlock(&epmutex);
- mutex_destroy(&ep->mtx);
- free_uid(ep->user);
- wakeup_source_unregister(ep->ws);
- kfree(ep);
+static long ep_eventpoll_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ if (!is_file_epoll(file))
+ return -EINVAL;
+
+ switch (cmd) {
+ case EPIOCSPARAMS:
+ case EPIOCGPARAMS:
+ ret = ep_eventpoll_bp_ioctl(file, cmd, arg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
}
static int ep_eventpoll_release(struct inode *inode, struct file *file)
@@ -785,7 +963,7 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file)
struct eventpoll *ep = file->private_data;
if (ep)
- ep_free(ep);
+ ep_clear_and_put(ep);
return 0;
}
@@ -831,6 +1009,34 @@ static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int dep
}
/*
+ * The ffd.file pointer may be in the process of being torn down due to
+ * being closed, but we may not have finished eventpoll_release() yet.
+ *
+ * Normally, even with the atomic_long_inc_not_zero, the file may have
+ * been free'd and then gotten re-allocated to something else (since
+ * files are not RCU-delayed, they are SLAB_TYPESAFE_BY_RCU).
+ *
+ * But for epoll, users hold the ep->mtx mutex, and as such any file in
+ * the process of being free'd will block in eventpoll_release_file()
+ * and thus the underlying file allocation will not be free'd, and the
+ * file re-use cannot happen.
+ *
+ * For the same reason we can avoid a rcu_read_lock() around the
+ * operation - 'ffd.file' cannot go away even if the refcount has
+ * reached zero (but we must still not call out to ->poll() functions
+ * etc).
+ */
+static struct file *epi_fget(const struct epitem *epi)
+{
+ struct file *file;
+
+ file = epi->ffd.file;
+ if (!file_ref_get(&file->f_ref))
+ file = NULL;
+ return file;
+}
+
+/*
* Differs from ep_eventpoll_poll() in that internal callers already have
* the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
* is correctly annotated.
@@ -838,14 +1044,22 @@ static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int dep
static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
int depth)
{
- struct file *file = epi->ffd.file;
+ struct file *file = epi_fget(epi);
__poll_t res;
+ /*
+ * We could return EPOLLERR | EPOLLHUP or something, but let's
+ * treat this more as "file doesn't exist, poll didn't happen".
+ */
+ if (!file)
+ return 0;
+
pt->_key = epi->event.events;
if (!is_file_epoll(file))
res = vfs_poll(file, pt);
else
res = __ep_eventpoll_poll(file, pt, depth);
+ fput(file);
return res & epi->event.events;
}
@@ -886,6 +1100,8 @@ static const struct file_operations eventpoll_fops = {
.release = ep_eventpoll_release,
.poll = ep_eventpoll_poll,
.llseek = noop_llseek,
+ .unlocked_ioctl = ep_eventpoll_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
/*
@@ -897,63 +1113,57 @@ void eventpoll_release_file(struct file *file)
{
struct eventpoll *ep;
struct epitem *epi;
- struct hlist_node *next;
+ bool dispose;
/*
- * We don't want to get "file->f_lock" because it is not
- * necessary. It is not necessary because we're in the "struct file"
- * cleanup path, and this means that no one is using this file anymore.
- * So, for example, epoll_ctl() cannot hit here since if we reach this
- * point, the file counter already went to zero and fget() would fail.
- * The only hit might come from ep_free() but by holding the mutex
- * will correctly serialize the operation. We do need to acquire
- * "ep->mtx" after "epmutex" because ep_remove() requires it when called
- * from anywhere but ep_free().
- *
- * Besides, ep_remove() acquires the lock, so we can't hold it here.
+ * Use the 'dying' flag to prevent a concurrent ep_clear_and_put() from
+ * touching the epitems list before eventpoll_release_file() can access
+ * the ep->mtx.
*/
- mutex_lock(&epmutex);
- if (unlikely(!file->f_ep)) {
- mutex_unlock(&epmutex);
- return;
- }
- hlist_for_each_entry_safe(epi, next, file->f_ep, fllink) {
+again:
+ spin_lock(&file->f_lock);
+ if (file->f_ep && file->f_ep->first) {
+ epi = hlist_entry(file->f_ep->first, struct epitem, fllink);
+ epi->dying = true;
+ spin_unlock(&file->f_lock);
+
+ /*
+ * ep access is safe as we still own a reference to the ep
+ * struct
+ */
ep = epi->ep;
- mutex_lock_nested(&ep->mtx, 0);
- ep_remove(ep, epi);
+ mutex_lock(&ep->mtx);
+ dispose = __ep_remove(ep, epi, true);
mutex_unlock(&ep->mtx);
+
+ if (dispose && ep_refcount_dec_and_test(ep))
+ ep_free(ep);
+ goto again;
}
- mutex_unlock(&epmutex);
+ spin_unlock(&file->f_lock);
}
static int ep_alloc(struct eventpoll **pep)
{
- int error;
- struct user_struct *user;
struct eventpoll *ep;
- user = get_current_user();
- error = -ENOMEM;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (unlikely(!ep))
- goto free_uid;
+ return -ENOMEM;
mutex_init(&ep->mtx);
- rwlock_init(&ep->lock);
+ spin_lock_init(&ep->lock);
init_waitqueue_head(&ep->wq);
init_waitqueue_head(&ep->poll_wait);
INIT_LIST_HEAD(&ep->rdllist);
ep->rbr = RB_ROOT_CACHED;
ep->ovflist = EP_UNACTIVE_PTR;
- ep->user = user;
+ ep->user = get_current_user();
+ refcount_set(&ep->refcount, 1);
*pep = ep;
return 0;
-
-free_uid:
- free_uid(user);
- return error;
}
/*
@@ -1030,99 +1240,9 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
#endif /* CONFIG_KCMP */
/*
- * Adds a new entry to the tail of the list in a lockless way, i.e.
- * multiple CPUs are allowed to call this function concurrently.
- *
- * Beware: it is necessary to prevent any other modifications of the
- * existing list until all changes are completed, in other words
- * concurrent list_add_tail_lockless() calls should be protected
- * with a read lock, where write lock acts as a barrier which
- * makes sure all list_add_tail_lockless() calls are fully
- * completed.
- *
- * Also an element can be locklessly added to the list only in one
- * direction i.e. either to the tail or to the head, otherwise
- * concurrent access will corrupt the list.
- *
- * Return: %false if element has been already added to the list, %true
- * otherwise.
- */
-static inline bool list_add_tail_lockless(struct list_head *new,
- struct list_head *head)
-{
- struct list_head *prev;
-
- /*
- * This is simple 'new->next = head' operation, but cmpxchg()
- * is used in order to detect that same element has been just
- * added to the list from another CPU: the winner observes
- * new->next == new.
- */
- if (cmpxchg(&new->next, new, head) != new)
- return false;
-
- /*
- * Initially ->next of a new element must be updated with the head
- * (we are inserting to the tail) and only then pointers are atomically
- * exchanged. XCHG guarantees memory ordering, thus ->next should be
- * updated before pointers are actually swapped and pointers are
- * swapped before prev->next is updated.
- */
-
- prev = xchg(&head->prev, new);
-
- /*
- * It is safe to modify prev->next and new->prev, because a new element
- * is added only to the tail and new->next is updated before XCHG.
- */
-
- prev->next = new;
- new->prev = prev;
-
- return true;
-}
-
-/*
- * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
- * i.e. multiple CPUs are allowed to call this function concurrently.
- *
- * Return: %false if epi element has been already chained, %true otherwise.
- */
-static inline bool chain_epi_lockless(struct epitem *epi)
-{
- struct eventpoll *ep = epi->ep;
-
- /* Fast preliminary check */
- if (epi->next != EP_UNACTIVE_PTR)
- return false;
-
- /* Check that the same epi has not been just chained from another CPU */
- if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
- return false;
-
- /* Atomically exchange tail */
- epi->next = xchg(&ep->ovflist, epi);
-
- return true;
-}
-
-/*
* This is the callback that is passed to the wait queue wakeup
* mechanism. It is called by the stored file descriptors when they
* have events to report.
- *
- * This callback takes a read lock in order not to contend with concurrent
- * events from another file descriptor, thus all modifications to ->rdllist
- * or ->ovflist are lockless. Read lock is paired with the write lock from
- * ep_scan_ready_list(), which stops all list modifications and guarantees
- * that lists state is seen correctly.
- *
- * Another thing worth to mention is that ep_poll_callback() can be called
- * concurrently for the same @epi from different CPUs if poll table was inited
- * with several wait queues entries. Plural wakeup from different CPUs of a
- * single wait queue is serialized by wq.lock, but the case when multiple wait
- * queues are used should be detected accordingly. This is detected using
- * cmpxchg() operation.
*/
static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
@@ -1133,7 +1253,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
unsigned long flags;
int ewake = 0;
- read_lock_irqsave(&ep->lock, flags);
+ spin_lock_irqsave(&ep->lock, flags);
ep_set_busy_poll_napi_id(epi);
@@ -1162,12 +1282,15 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
* chained in ep->ovflist and requeued later on.
*/
if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
- if (chain_epi_lockless(epi))
+ if (epi->next == EP_UNACTIVE_PTR) {
+ epi->next = READ_ONCE(ep->ovflist);
+ WRITE_ONCE(ep->ovflist, epi);
ep_pm_stay_awake_rcu(epi);
+ }
} else if (!ep_is_linked(epi)) {
/* In the usual case, add event to ready list. */
- if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
- ep_pm_stay_awake_rcu(epi);
+ list_add_tail(&epi->rdllink, &ep->rdllist);
+ ep_pm_stay_awake_rcu(epi);
}
/*
@@ -1191,17 +1314,20 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
break;
}
}
- wake_up(&ep->wq);
+ if (sync)
+ wake_up_sync(&ep->wq);
+ else
+ wake_up(&ep->wq);
}
if (waitqueue_active(&ep->poll_wait))
pwake++;
out_unlock:
- read_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
- ep_poll_safewake(ep, epi);
+ ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE);
if (!(epi->event.events & EPOLLEXCLUSIVE))
ewake = 1;
@@ -1214,10 +1340,10 @@ out_unlock:
*/
list_del_init(&wait->entry);
/*
- * ->whead != NULL protects us from the race with ep_free()
- * or ep_remove(), ep_remove_wait_queue() takes whead->lock
- * held by the caller. Once we nullify it, nothing protects
- * ep/epi or even wait.
+ * ->whead != NULL protects us from the race with
+ * ep_clear_and_put() or ep_remove(), ep_remove_wait_queue()
+ * takes whead->lock held by the caller. Once we nullify it,
+ * nothing protects ep/epi or even wait.
*/
smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
}
@@ -1289,7 +1415,7 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
* is connected to n file sources. In this case each file source has 1 path
* of length 1. Thus, the numbers below should be more than sufficient. These
* path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
- * and delete can't add additional paths. Protected by the epmutex.
+ * and delete can't add additional paths. Protected by the epnested_mutex.
*/
static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
static int path_count[PATH_ARR_SIZE];
@@ -1422,7 +1548,8 @@ allocate:
spin_unlock(&file->f_lock);
goto allocate;
}
- file->f_ep = head;
+ /* See eventpoll_release() for details. */
+ WRITE_ONCE(file->f_ep, head);
to_free = NULL;
}
hlist_add_head_rcu(&epi->fllink, file->f_ep);
@@ -1487,16 +1614,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
if (tep)
mutex_unlock(&tep->mtx);
+ /*
+ * ep_remove_safe() calls in the later error paths can't lead to
+ * ep_free() as the ep file itself still holds an ep reference.
+ */
+ ep_get(ep);
+
/* now check if we've created too many backpaths */
if (unlikely(full_check && reverse_path_check())) {
- ep_remove(ep, epi);
+ ep_remove_safe(ep, epi);
return -EINVAL;
}
if (epi->event.events & EPOLLWAKEUP) {
error = ep_create_wakeup_source(epi);
if (error) {
- ep_remove(ep, epi);
+ ep_remove_safe(ep, epi);
return error;
}
}
@@ -1520,12 +1653,12 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
* high memory pressure.
*/
if (unlikely(!epq.epi)) {
- ep_remove(ep, epi);
+ ep_remove_safe(ep, epi);
return -ENOMEM;
}
/* We have to drop the new item inside our item list to keep track of it */
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
/* record NAPI ID of new item if present */
ep_set_busy_poll_napi_id(epi);
@@ -1542,11 +1675,11 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
pwake++;
}
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
/* We have to call this outside the lock */
if (pwake)
- ep_poll_safewake(ep, NULL);
+ ep_poll_safewake(ep, NULL, 0);
return 0;
}
@@ -1606,7 +1739,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
* list, push it inside.
*/
if (ep_item_poll(epi, &pt, 1)) {
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
if (!ep_is_linked(epi)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
@@ -1617,12 +1750,12 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
}
/* We have to call this outside the lock */
if (pwake)
- ep_poll_safewake(ep, NULL);
+ ep_poll_safewake(ep, NULL, 0);
return 0;
}
@@ -1706,7 +1839,7 @@ static int ep_send_events(struct eventpoll *ep,
* availability. At this point, no one can insert
* into ep->rdllist besides us. The epoll_ctl()
* callers are locked out by
- * ep_scan_ready_list() holding "mtx" and the
+ * ep_send_events() holding "mtx" and the
* poll callback will queue them in ep->ovflist.
*/
list_add_tail(&epi->rdllink, &ep->rdllist);
@@ -1740,6 +1873,49 @@ static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms)
return to;
}
+/*
+ * autoremove_wake_function, but remove even on failure to wake up, because we
+ * know that default_wake_function/ttwu will only fail if the thread is already
+ * woken, and in that case the ep_poll loop will remove the entry anyways, not
+ * try to reuse it.
+ */
+static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
+ unsigned int mode, int sync, void *key)
+{
+ int ret = default_wake_function(wq_entry, mode, sync, key);
+
+ /*
+ * Pairs with list_empty_careful in ep_poll, and ensures future loop
+ * iterations see the cause of this wakeup.
+ */
+ list_del_init_careful(&wq_entry->entry);
+ return ret;
+}
+
+static int ep_try_send_events(struct eventpoll *ep,
+ struct epoll_event __user *events, int maxevents)
+{
+ int res;
+
+ /*
+ * Try to transfer events to user space. In case we get 0 events and
+ * there's still timeout left over, we go trying again in search of
+ * more luck.
+ */
+ res = ep_send_events(ep, events, maxevents);
+ if (res > 0)
+ ep_suspend_napi_irqs(ep);
+ return res;
+}
+
+static int ep_schedule_timeout(ktime_t *to)
+{
+ if (to)
+ return ktime_after(*to, ktime_get());
+ else
+ return 1;
+}
+
/**
* ep_poll - Retrieves ready events, and delivers them to the caller-supplied
* event buffer.
@@ -1791,12 +1967,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
while (1) {
if (eavail) {
- /*
- * Try to transfer events to user space. In case we get
- * 0 events and there's still timeout left over, we go
- * trying again in search of more luck.
- */
- res = ep_send_events(ep, events, maxevents);
+ res = ep_try_send_events(ep, events, maxevents);
if (res)
return res;
}
@@ -1804,7 +1975,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
if (timed_out)
return 0;
- eavail = ep_busy_loop(ep, timed_out);
+ eavail = ep_busy_loop(ep);
if (eavail)
continue;
@@ -1821,10 +1992,17 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
* normal wakeup path no need to call __remove_wait_queue()
* explicitly, thus ep->lock is not taken, which halts the
* event delivery.
+ *
+ * In fact, we now use an even more aggressive function that
+ * unconditionally removes, because we don't reuse the wait
+ * entry between loop iterations. This lets us also avoid the
+ * performance issue if a process is killed, causing all of its
+ * threads to wake up without being removed normally.
*/
init_wait(&wait);
+ wait.func = ep_autoremove_wake_function;
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
/*
* Barrierless variant, waitqueue_active() is called under
* the same lock on wakeup ep_poll_callback() side, so it
@@ -1833,7 +2011,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
__set_current_state(TASK_INTERRUPTIBLE);
/*
- * Do the final check under the lock. ep_scan_ready_list()
+ * Do the final check under the lock. ep_start/done_scan()
* plays with two lists (->rdllist and ->ovflist) and there
* is always a race when both lists are empty for short
* period of time although events are pending, so lock is
@@ -1843,11 +2021,12 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
if (!eavail)
__add_wait_queue_exclusive(&ep->wq, &wait);
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
if (!eavail)
- timed_out = !schedule_hrtimeout_range(to, slack,
- HRTIMER_MODE_ABS);
+ timed_out = !ep_schedule_timeout(to) ||
+ !schedule_hrtimeout_range(to, slack,
+ HRTIMER_MODE_ABS);
__set_current_state(TASK_RUNNING);
/*
@@ -1858,7 +2037,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
eavail = 1;
if (!list_empty_careful(&wait.entry)) {
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
/*
* If the thread timed out and is not on the wait queue,
* it means that the thread was woken up after its
@@ -1869,29 +2048,30 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
if (timed_out)
eavail = list_empty(&wait.entry);
__remove_wait_queue(&ep->wq, &wait);
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
}
}
}
/**
- * ep_loop_check_proc - verify that adding an epoll file inside another
- * epoll structure does not violate the constraints, in
- * terms of closed loops, or too deep chains (which can
- * result in excessive stack usage).
+ * ep_loop_check_proc - verify that adding an epoll file @ep inside another
+ * epoll file does not create closed loops, and
+ * determine the depth of the subtree starting at @ep
*
* @ep: the &struct eventpoll to be currently checked.
* @depth: Current depth of the path being checked.
*
- * Return: %zero if adding the epoll @file inside current epoll
- * structure @ep does not violate the constraints, or %-1 otherwise.
+ * Return: depth of the subtree, or INT_MAX if we found a loop or went too deep.
*/
static int ep_loop_check_proc(struct eventpoll *ep, int depth)
{
- int error = 0;
+ int result = 0;
struct rb_node *rbp;
struct epitem *epi;
+ if (ep->gen == loop_check_gen)
+ return ep->loop_check_depth;
+
mutex_lock_nested(&ep->mtx, depth + 1);
ep->gen = loop_check_gen;
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
@@ -1899,13 +2079,11 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
if (unlikely(is_file_epoll(epi->ffd.file))) {
struct eventpoll *ep_tovisit;
ep_tovisit = epi->ffd.file->private_data;
- if (ep_tovisit->gen == loop_check_gen)
- continue;
if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS)
- error = -1;
+ result = INT_MAX;
else
- error = ep_loop_check_proc(ep_tovisit, depth + 1);
- if (error != 0)
+ result = max(result, ep_loop_check_proc(ep_tovisit, depth + 1) + 1);
+ if (result > EP_MAX_NESTS)
break;
} else {
/*
@@ -1919,9 +2097,25 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
list_file(epi->ffd.file);
}
}
+ ep->loop_check_depth = result;
mutex_unlock(&ep->mtx);
- return error;
+ return result;
+}
+
+/* ep_get_upwards_depth_proc - determine depth of @ep when traversed upwards */
+static int ep_get_upwards_depth_proc(struct eventpoll *ep, int depth)
+{
+ int result = 0;
+ struct epitem *epi;
+
+ if (ep->gen == loop_check_gen)
+ return ep->loop_check_depth;
+ hlist_for_each_entry_rcu(epi, &ep->refs, fllink)
+ result = max(result, ep_get_upwards_depth_proc(epi->ep, depth + 1) + 1);
+ ep->gen = loop_check_gen;
+ ep->loop_check_depth = result;
+ return result;
}
/**
@@ -1937,8 +2131,22 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
*/
static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to)
{
+ int depth, upwards_depth;
+
inserting_into = ep;
- return ep_loop_check_proc(to, 0);
+ /*
+ * Check how deep down we can get from @to, and whether it is possible
+ * to loop up to @ep.
+ */
+ depth = ep_loop_check_proc(to, 0);
+ if (depth > EP_MAX_NESTS)
+ return -1;
+ /* Check how far up we can go from @ep. */
+ rcu_read_lock();
+ upwards_depth = ep_get_upwards_depth_proc(ep, 0);
+ rcu_read_unlock();
+
+ return (depth+1+upwards_depth > EP_MAX_NESTS) ? -1 : 0;
}
static void clear_tfile_check_list(void)
@@ -1957,9 +2165,8 @@ static void clear_tfile_check_list(void)
*/
static int do_epoll_create(int flags)
{
- int error, fd;
- struct eventpoll *ep = NULL;
- struct file *file;
+ int error;
+ struct eventpoll *ep;
/* Check the EPOLL_* constant for consistency. */
BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
@@ -1976,26 +2183,15 @@ static int do_epoll_create(int flags)
* Creates all the items needed to setup an eventpoll file. That is,
* a file structure and a free file descriptor.
*/
- fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
- if (fd < 0) {
- error = fd;
- goto out_free_ep;
+ FD_PREPARE(fdf, O_RDWR | (flags & O_CLOEXEC),
+ anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
+ O_RDWR | (flags & O_CLOEXEC)));
+ if (fdf.err) {
+ ep_clear_and_put(ep);
+ return fdf.err;
}
- file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
- O_RDWR | (flags & O_CLOEXEC));
- if (IS_ERR(file)) {
- error = PTR_ERR(file);
- goto out_free_fd;
- }
- ep->file = file;
- fd_install(fd, file);
- return fd;
-
-out_free_fd:
- put_unused_fd(fd);
-out_free_ep:
- ep_free(ep);
- return error;
+ ep->file = fd_prepare_file(fdf);
+ return fd_publish(fdf);
}
SYSCALL_DEFINE1(epoll_create1, int, flags)
@@ -2011,6 +2207,19 @@ SYSCALL_DEFINE1(epoll_create, int, size)
return do_epoll_create(0);
}
+#ifdef CONFIG_PM_SLEEP
+static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
+{
+ if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
+ epev->events &= ~EPOLLWAKEUP;
+}
+#else
+static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
+{
+ epev->events &= ~EPOLLWAKEUP;
+}
+#endif
+
static inline int epoll_mutex_lock(struct mutex *mutex, int depth,
bool nonblock)
{
@@ -2028,25 +2237,22 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
{
int error;
int full_check = 0;
- struct fd f, tf;
struct eventpoll *ep;
struct epitem *epi;
struct eventpoll *tep = NULL;
- error = -EBADF;
- f = fdget(epfd);
- if (!f.file)
- goto error_return;
+ CLASS(fd, f)(epfd);
+ if (fd_empty(f))
+ return -EBADF;
/* Get the "struct file *" for the target file */
- tf = fdget(fd);
- if (!tf.file)
- goto error_fput;
+ CLASS(fd, tf)(fd);
+ if (fd_empty(tf))
+ return -EBADF;
/* The target file descriptor must support poll */
- error = -EPERM;
- if (!file_can_poll(tf.file))
- goto error_tgt_fput;
+ if (!file_can_poll(fd_file(tf)))
+ return -EPERM;
/* Check if EPOLLWAKEUP is allowed */
if (ep_op_has_event(op))
@@ -2058,7 +2264,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
* adding an epoll file descriptor inside itself.
*/
error = -EINVAL;
- if (f.file == tf.file || !is_file_epoll(f.file))
+ if (fd_file(f) == fd_file(tf) || !is_file_epoll(fd_file(f)))
goto error_tgt_fput;
/*
@@ -2069,7 +2275,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {
if (op == EPOLL_CTL_MOD)
goto error_tgt_fput;
- if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
+ if (op == EPOLL_CTL_ADD && (is_file_epoll(fd_file(tf)) ||
(epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))
goto error_tgt_fput;
}
@@ -2078,7 +2284,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
- ep = f.file->private_data;
+ ep = fd_file(f)->private_data;
/*
* When we insert an epoll file descriptor inside another epoll file
@@ -2091,7 +2297,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
* We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
* the epoll file descriptor is attaching directly to a wakeup source,
* unless the epoll file descriptor is nested. The purpose of taking the
- * 'epmutex' on add is to prevent complex toplogies such as loops and
+ * 'epnested_mutex' on add is to prevent complex toplogies such as loops and
* deep wakeup paths from forming in parallel through multiple
* EPOLL_CTL_ADD operations.
*/
@@ -2099,16 +2305,16 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
if (error)
goto error_tgt_fput;
if (op == EPOLL_CTL_ADD) {
- if (READ_ONCE(f.file->f_ep) || ep->gen == loop_check_gen ||
- is_file_epoll(tf.file)) {
+ if (READ_ONCE(fd_file(f)->f_ep) || ep->gen == loop_check_gen ||
+ is_file_epoll(fd_file(tf))) {
mutex_unlock(&ep->mtx);
- error = epoll_mutex_lock(&epmutex, 0, nonblock);
+ error = epoll_mutex_lock(&epnested_mutex, 0, nonblock);
if (error)
goto error_tgt_fput;
loop_check_gen++;
full_check = 1;
- if (is_file_epoll(tf.file)) {
- tep = tf.file->private_data;
+ if (is_file_epoll(fd_file(tf))) {
+ tep = fd_file(tf)->private_data;
error = -ELOOP;
if (ep_loop_check(ep, tep) != 0)
goto error_tgt_fput;
@@ -2124,22 +2330,28 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
* above, we can be sure to be able to use the item looked up by
* ep_find() till we release the mutex.
*/
- epi = ep_find(ep, tf.file, fd);
+ epi = ep_find(ep, fd_file(tf), fd);
error = -EINVAL;
switch (op) {
case EPOLL_CTL_ADD:
if (!epi) {
epds->events |= EPOLLERR | EPOLLHUP;
- error = ep_insert(ep, epds, tf.file, fd, full_check);
+ error = ep_insert(ep, epds, fd_file(tf), fd, full_check);
} else
error = -EEXIST;
break;
case EPOLL_CTL_DEL:
- if (epi)
- error = ep_remove(ep, epi);
- else
+ if (epi) {
+ /*
+ * The eventpoll itself is still alive: the refcount
+ * can't go to zero here.
+ */
+ ep_remove_safe(ep, epi);
+ error = 0;
+ } else {
error = -ENOENT;
+ }
break;
case EPOLL_CTL_MOD:
if (epi) {
@@ -2157,14 +2369,8 @@ error_tgt_fput:
if (full_check) {
clear_tfile_check_list();
loop_check_gen++;
- mutex_unlock(&epmutex);
+ mutex_unlock(&epnested_mutex);
}
-
- fdput(tf);
-error_fput:
- fdput(f);
-error_return:
-
return error;
}
@@ -2185,50 +2391,74 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
return do_epoll_ctl(epfd, op, fd, &epds, false);
}
-/*
- * Implement the event wait interface for the eventpoll file. It is the kernel
- * part of the user space epoll_wait(2).
- */
-static int do_epoll_wait(int epfd, struct epoll_event __user *events,
- int maxevents, struct timespec64 *to)
+static int ep_check_params(struct file *file, struct epoll_event __user *evs,
+ int maxevents)
{
- int error;
- struct fd f;
- struct eventpoll *ep;
-
/* The maximum number of event must be greater than zero */
if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
return -EINVAL;
/* Verify that the area passed by the user is writeable */
- if (!access_ok(events, maxevents * sizeof(struct epoll_event)))
+ if (!access_ok(evs, maxevents * sizeof(struct epoll_event)))
return -EFAULT;
- /* Get the "struct file *" for the eventpoll file */
- f = fdget(epfd);
- if (!f.file)
- return -EBADF;
-
/*
* We have to check that the file structure underneath the fd
* the user passed to us _is_ an eventpoll file.
*/
- error = -EINVAL;
- if (!is_file_epoll(f.file))
- goto error_fput;
+ if (!is_file_epoll(file))
+ return -EINVAL;
+
+ return 0;
+}
+
+int epoll_sendevents(struct file *file, struct epoll_event __user *events,
+ int maxevents)
+{
+ struct eventpoll *ep;
+ int ret;
+
+ ret = ep_check_params(file, events, maxevents);
+ if (unlikely(ret))
+ return ret;
+
+ ep = file->private_data;
+ /*
+ * Racy call, but that's ok - it should get retried based on
+ * poll readiness anyway.
+ */
+ if (ep_events_available(ep))
+ return ep_try_send_events(ep, events, maxevents);
+ return 0;
+}
+
+/*
+ * Implement the event wait interface for the eventpoll file. It is the kernel
+ * part of the user space epoll_wait(2).
+ */
+static int do_epoll_wait(int epfd, struct epoll_event __user *events,
+ int maxevents, struct timespec64 *to)
+{
+ struct eventpoll *ep;
+ int ret;
+
+ /* Get the "struct file *" for the eventpoll file */
+ CLASS(fd, f)(epfd);
+ if (fd_empty(f))
+ return -EBADF;
+
+ ret = ep_check_params(fd_file(f), events, maxevents);
+ if (unlikely(ret))
+ return ret;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
- ep = f.file->private_data;
+ ep = fd_file(f)->private_data;
/* Time to fish for events ... */
- error = ep_poll(ep, events, maxevents, to);
-
-error_fput:
- fdput(f);
- return error;
+ return ep_poll(ep, events, maxevents, to);
}
SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
@@ -2378,6 +2608,7 @@ static int __init eventpoll_init(void)
/* Allocates slab cache used to allocate "struct eppoll_entry" */
pwq_cache = kmem_cache_create("eventpoll_pwq",
sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
+ epoll_sysctls_init();
ephead_cache = kmem_cache_create("ep_head",
sizeof(struct epitems_head), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
diff --git a/fs/exec.c b/fs/exec.c
index 537d92c41105..9d5ebc9d15b0 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -28,7 +28,6 @@
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/mm.h>
-#include <linux/vmacache.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/swap.h>
@@ -56,7 +55,6 @@
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/audit.h>
-#include <linux/tracehook.h>
#include <linux/kmod.h>
#include <linux/fsnotify.h>
#include <linux/fs_struct.h>
@@ -65,6 +63,11 @@
#include <linux/vmalloc.h>
#include <linux/io_uring.h>
#include <linux/syscall_user_dispatch.h>
+#include <linux/coredump.h>
+#include <linux/time_namespace.h>
+#include <linux/user_events.h>
+#include <linux/rseq.h>
+#include <linux/ksm.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -75,6 +78,9 @@
#include <trace/events/sched.h>
+/* For vma exec functions. */
+#include "../mm/internal.h"
+
static int bprm_creds_from_file(struct linux_binprm *bprm);
int suid_dumpable = 0;
@@ -108,74 +114,13 @@ static inline void put_binfmt(struct linux_binfmt * fmt)
bool path_noexec(const struct path *path)
{
+ /* If it's an anonymous inode make sure that we catch any shenanigans. */
+ VFS_WARN_ON_ONCE(IS_ANON_FILE(d_inode(path->dentry)) &&
+ !(path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC));
return (path->mnt->mnt_flags & MNT_NOEXEC) ||
(path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
}
-#ifdef CONFIG_USELIB
-/*
- * Note that a shared library must be both readable and executable due to
- * security reasons.
- *
- * Also note that we take the address to load from from the file itself.
- */
-SYSCALL_DEFINE1(uselib, const char __user *, library)
-{
- struct linux_binfmt *fmt;
- struct file *file;
- struct filename *tmp = getname(library);
- int error = PTR_ERR(tmp);
- static const struct open_flags uselib_flags = {
- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
- .acc_mode = MAY_READ | MAY_EXEC,
- .intent = LOOKUP_OPEN,
- .lookup_flags = LOOKUP_FOLLOW,
- };
-
- if (IS_ERR(tmp))
- goto out;
-
- file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
- putname(tmp);
- error = PTR_ERR(file);
- if (IS_ERR(file))
- goto out;
-
- /*
- * may_open() has already checked for this, so it should be
- * impossible to trip now. But we need to be extra cautious
- * and check again at the very end too.
- */
- error = -EACCES;
- if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
- path_noexec(&file->f_path)))
- goto exit;
-
- fsnotify_open(file);
-
- error = -ENOEXEC;
-
- read_lock(&binfmt_lock);
- list_for_each_entry(fmt, &formats, lh) {
- if (!fmt->load_shlib)
- continue;
- if (!try_module_get(fmt->module))
- continue;
- read_unlock(&binfmt_lock);
- error = fmt->load_shlib(file);
- read_lock(&binfmt_lock);
- put_binfmt(fmt);
- if (error != -ENOEXEC)
- break;
- }
- read_unlock(&binfmt_lock);
-exit:
- fput(file);
-out:
- return error;
-}
-#endif /* #ifdef CONFIG_USELIB */
-
#ifdef CONFIG_MMU
/*
* The nascent bprm->mm is not visible until exec_mmap() but it can
@@ -199,33 +144,31 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
+ struct vm_area_struct *vma = bprm->vma;
+ struct mm_struct *mm = bprm->mm;
int ret;
- unsigned int gup_flags = FOLL_FORCE;
-
-#ifdef CONFIG_STACK_GROWSUP
- if (write) {
- ret = expand_downwards(bprm->vma, pos);
- if (ret < 0)
- return NULL;
- }
-#endif
- if (write)
- gup_flags |= FOLL_WRITE;
+ /*
+ * Avoid relying on expanding the stack down in GUP (which
+ * does not work for STACK_GROWSUP anyway), and just do it
+ * ahead of time.
+ */
+ if (!mmap_read_lock_maybe_expand(mm, vma, pos, write))
+ return NULL;
/*
* We are doing an exec(). 'current' is the process
- * doing the exec and bprm->mm is the new process's mm.
+ * doing the exec and 'mm' is the new process's mm.
*/
- mmap_read_lock(bprm->mm);
- ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
- &page, NULL, NULL);
- mmap_read_unlock(bprm->mm);
+ ret = get_user_pages_remote(mm, pos, 1,
+ write ? FOLL_WRITE : 0,
+ &page, NULL);
+ mmap_read_unlock(mm);
if (ret <= 0)
return NULL;
if (write)
- acct_arg_size(bprm, vma_pages(bprm->vma));
+ acct_arg_size(bprm, vma_pages(vma));
return page;
}
@@ -245,50 +188,6 @@ static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
flush_cache_page(bprm->vma, pos, page_to_pfn(page));
}
-static int __bprm_mm_init(struct linux_binprm *bprm)
-{
- int err;
- struct vm_area_struct *vma = NULL;
- struct mm_struct *mm = bprm->mm;
-
- bprm->vma = vma = vm_area_alloc(mm);
- if (!vma)
- return -ENOMEM;
- vma_set_anonymous(vma);
-
- if (mmap_write_lock_killable(mm)) {
- err = -EINTR;
- goto err_free;
- }
-
- /*
- * Place the stack at the largest stack address the architecture
- * supports. Later, we'll move this to an appropriate place. We don't
- * use STACK_TOP because that can depend on attributes which aren't
- * configured yet.
- */
- BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
- vma->vm_end = STACK_TOP_MAX;
- vma->vm_start = vma->vm_end - PAGE_SIZE;
- vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-
- err = insert_vm_struct(mm, vma);
- if (err)
- goto err;
-
- mm->stack_vm = mm->total_vm = 1;
- mmap_write_unlock(mm);
- bprm->p = vma->vm_end - sizeof(void *);
- return 0;
-err:
- mmap_write_unlock(mm);
-err_free:
- bprm->vma = NULL;
- vm_area_free(vma);
- return err;
-}
-
static bool valid_arg_len(struct linux_binprm *bprm, long len)
{
return len <= MAX_ARG_STRLEN;
@@ -341,12 +240,6 @@ static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
{
}
-static int __bprm_mm_init(struct linux_binprm *bprm)
-{
- bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
- return 0;
-}
-
static bool valid_arg_len(struct linux_binprm *bprm, long len)
{
return len <= bprm->p;
@@ -375,9 +268,13 @@ static int bprm_mm_init(struct linux_binprm *bprm)
bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
task_unlock(current->group_leader);
- err = __bprm_mm_init(bprm);
+#ifndef CONFIG_MMU
+ bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
+#else
+ err = create_init_stack_vma(bprm->mm, &bprm->vma, &bprm->p);
if (err)
goto err;
+#endif
return 0;
@@ -469,6 +366,35 @@ static int count_strings_kernel(const char *const *argv)
return i;
}
+static inline int bprm_set_stack_limit(struct linux_binprm *bprm,
+ unsigned long limit)
+{
+#ifdef CONFIG_MMU
+ /* Avoid a pathological bprm->p. */
+ if (bprm->p < limit)
+ return -E2BIG;
+ bprm->argmin = bprm->p - limit;
+#endif
+ return 0;
+}
+static inline bool bprm_hit_stack_limit(struct linux_binprm *bprm)
+{
+#ifdef CONFIG_MMU
+ return bprm->p < bprm->argmin;
+#else
+ return false;
+#endif
+}
+
+/*
+ * Calculate bprm->argmin from:
+ * - _STK_LIM
+ * - ARG_MAX
+ * - bprm->rlim_stack.rlim_cur
+ * - bprm->argc
+ * - bprm->envc
+ * - bprm->p
+ */
static int bprm_stack_limits(struct linux_binprm *bprm)
{
unsigned long limit, ptr_size;
@@ -488,20 +414,30 @@ static int bprm_stack_limits(struct linux_binprm *bprm)
* of argument strings even with small stacks
*/
limit = max_t(unsigned long, limit, ARG_MAX);
+ /* Reject totally pathological counts. */
+ if (bprm->argc < 0 || bprm->envc < 0)
+ return -E2BIG;
/*
* We must account for the size of all the argv and envp pointers to
* the argv and envp strings, since they will also take up space in
* the stack. They aren't stored until much later when we can't
* signal to the parent that the child has run out of stack space.
* Instead, calculate it here so it's possible to fail gracefully.
+ *
+ * In the case of argc = 0, make sure there is space for adding a
+ * empty string (which will bump argc to 1), to ensure confused
+ * userspace programs don't start processing from argv[1], thinking
+ * argc can never be 0, to keep them from walking envp by accident.
+ * See do_execveat_common().
*/
- ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+ if (check_add_overflow(max(bprm->argc, 1), bprm->envc, &ptr_size) ||
+ check_mul_overflow(ptr_size, sizeof(void *), &ptr_size))
+ return -E2BIG;
if (limit <= ptr_size)
return -E2BIG;
limit -= ptr_size;
- bprm->argmin = bprm->p - limit;
- return 0;
+ return bprm_set_stack_limit(bprm, limit);
}
/*
@@ -535,14 +471,12 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
if (!valid_arg_len(bprm, len))
goto out;
- /* We're going to work our way backwords. */
+ /* We're going to work our way backwards. */
pos = bprm->p;
str += len;
bprm->p -= len;
-#ifdef CONFIG_MMU
- if (bprm->p < bprm->argmin)
+ if (bprm_hit_stack_limit(bprm))
goto out;
-#endif
while (len > 0) {
int offset, bytes_to_copy;
@@ -577,11 +511,11 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
if (kmapped_page) {
flush_dcache_page(kmapped_page);
- kunmap(kmapped_page);
+ kunmap_local(kaddr);
put_arg_page(kmapped_page);
}
kmapped_page = page;
- kaddr = kmap(kmapped_page);
+ kaddr = kmap_local_page(kmapped_page);
kpos = pos & PAGE_MASK;
flush_arg_page(bprm, kpos, kmapped_page);
}
@@ -595,7 +529,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
out:
if (kmapped_page) {
flush_dcache_page(kmapped_page);
- kunmap(kmapped_page);
+ kunmap_local(kaddr);
put_arg_page(kmapped_page);
}
return ret;
@@ -617,14 +551,13 @@ int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
/* We're going to work our way backwards. */
arg += len;
bprm->p -= len;
- if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
+ if (bprm_hit_stack_limit(bprm))
return -E2BIG;
while (len > 0) {
unsigned int bytes_to_copy = min_t(unsigned int, len,
min_not_zero(offset_in_page(pos), PAGE_SIZE));
struct page *page;
- char *kaddr;
pos -= bytes_to_copy;
arg -= bytes_to_copy;
@@ -633,11 +566,8 @@ int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
page = get_arg_page(bprm, pos, 1);
if (!page)
return -E2BIG;
- kaddr = kmap_atomic(page);
flush_arg_page(bprm, pos & PAGE_MASK, page);
- memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy);
- flush_dcache_page(page);
- kunmap_atomic(kaddr);
+ memcpy_to_page(page, offset_in_page(pos), arg, bytes_to_copy);
put_arg_page(page);
}
@@ -662,79 +592,6 @@ static int copy_strings_kernel(int argc, const char *const *argv,
#ifdef CONFIG_MMU
/*
- * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
- * the binfmt code determines where the new stack should reside, we shift it to
- * its final location. The process proceeds as follows:
- *
- * 1) Use shift to calculate the new vma endpoints.
- * 2) Extend vma to cover both the old and new ranges. This ensures the
- * arguments passed to subsequent functions are consistent.
- * 3) Move vma's page tables to the new range.
- * 4) Free up any cleared pgd range.
- * 5) Shrink the vma to cover only the new range.
- */
-static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
-{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long old_start = vma->vm_start;
- unsigned long old_end = vma->vm_end;
- unsigned long length = old_end - old_start;
- unsigned long new_start = old_start - shift;
- unsigned long new_end = old_end - shift;
- struct mmu_gather tlb;
-
- BUG_ON(new_start > new_end);
-
- /*
- * ensure there are no vmas between where we want to go
- * and where we are
- */
- if (vma != find_vma(mm, new_start))
- return -EFAULT;
-
- /*
- * cover the whole range: [new_start, old_end)
- */
- if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
- return -ENOMEM;
-
- /*
- * move the page tables downwards, on failure we rely on
- * process cleanup to remove whatever mess we made.
- */
- if (length != move_page_tables(vma, old_start,
- vma, new_start, length, false))
- return -ENOMEM;
-
- lru_add_drain();
- tlb_gather_mmu(&tlb, mm);
- if (new_end > old_start) {
- /*
- * when the old and new regions overlap clear from new_end.
- */
- free_pgd_range(&tlb, new_end, old_end, new_end,
- vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
- } else {
- /*
- * otherwise, clean from old_start; this is done to not touch
- * the address space in [new_end, old_start) some architectures
- * have constraints on va-space that make this illegal (IA64) -
- * for the others its just a little faster.
- */
- free_pgd_range(&tlb, old_start, old_end, new_end,
- vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
- }
- tlb_finish_mmu(&tlb);
-
- /*
- * Shrink the vma to just the new range. Always succeeds.
- */
- vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
-
- return 0;
-}
-
-/*
* Finalizes the stack vm_area_struct. The flags and permissions are updated,
* the stack is optionally relocated, and some extra space is added.
*/
@@ -742,16 +599,18 @@ int setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_top,
int executable_stack)
{
- unsigned long ret;
+ int ret;
unsigned long stack_shift;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = bprm->vma;
struct vm_area_struct *prev = NULL;
- unsigned long vm_flags;
+ vm_flags_t vm_flags;
unsigned long stack_base;
unsigned long stack_size;
unsigned long stack_expand;
unsigned long rlim_stack;
+ struct mmu_gather tlb;
+ struct vma_iterator vmi;
#ifdef CONFIG_STACK_GROWSUP
/* Limit stack size */
@@ -760,7 +619,8 @@ int setup_arg_pages(struct linux_binprm *bprm,
stack_base = calc_max_stack_size(stack_base);
/* Add space for stack randomization. */
- stack_base += (STACK_RND_MASK << PAGE_SHIFT);
+ if (current->flags & PF_RANDOMIZE)
+ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
/* Make sure we didn't let the argument array grow too large. */
if (vma->vm_end - vma->vm_start > stack_base)
@@ -785,8 +645,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
mm->arg_start = bprm->p;
#endif
- if (bprm->loader)
- bprm->loader -= stack_shift;
bprm->exec -= stack_shift;
if (mmap_write_lock_killable(mm))
@@ -806,8 +664,13 @@ int setup_arg_pages(struct linux_binprm *bprm,
vm_flags |= mm->def_flags;
vm_flags |= VM_STACK_INCOMPLETE_SETUP;
- ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
+ vma_iter_init(&vmi, mm, vma->vm_start);
+
+ tlb_gather_mmu(&tlb, mm);
+ ret = mprotect_fixup(&vmi, &tlb, vma, &prev, vma->vm_start, vma->vm_end,
vm_flags);
+ tlb_finish_mmu(&tlb);
+
if (ret)
goto out_unlock;
BUG_ON(prev != vma);
@@ -819,13 +682,18 @@ int setup_arg_pages(struct linux_binprm *bprm,
/* Move stack pages down in memory. */
if (stack_shift) {
- ret = shift_arg_pages(vma, stack_shift);
+ /*
+ * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
+ * the binfmt code determines where the new stack should reside, we shift it to
+ * its final location.
+ */
+ ret = relocate_vma_down(vma, stack_shift);
if (ret)
goto out_unlock;
}
/* mprotect_fixup is overkill to remove the temporary stack flags */
- vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
+ vm_flags_clear(vma, VM_STACK_INCOMPLETE_SETUP);
stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
stack_size = vma->vm_end - vma->vm_start;
@@ -834,19 +702,16 @@ int setup_arg_pages(struct linux_binprm *bprm,
* will align it up.
*/
rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
+
+ stack_expand = min(rlim_stack, stack_size + stack_expand);
+
#ifdef CONFIG_STACK_GROWSUP
- if (stack_size + stack_expand > rlim_stack)
- stack_base = vma->vm_start + rlim_stack;
- else
- stack_base = vma->vm_end + stack_expand;
+ stack_base = vma->vm_start + stack_expand;
#else
- if (stack_size + stack_expand > rlim_stack)
- stack_base = vma->vm_end - rlim_stack;
- else
- stack_base = vma->vm_start - stack_expand;
+ stack_base = vma->vm_end - stack_expand;
#endif
current->mm->start_stack = bprm->p;
- ret = expand_stack(vma, stack_base);
+ ret = expand_stack_locked(vma, stack_base);
if (ret)
ret = -EFAULT;
@@ -873,15 +738,16 @@ int transfer_args_to_stack(struct linux_binprm *bprm,
for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
- char *src = kmap(bprm->page[index]) + offset;
+ char *src = kmap_local_page(bprm->page[index]) + offset;
sp -= PAGE_SIZE - offset;
if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
ret = -EFAULT;
- kunmap(bprm->page[index]);
+ kunmap_local(src);
if (ret)
goto out;
}
+ bprm->exec += *sp_location - MAX_ARG_PAGES * PAGE_SIZE;
*sp_location = sp;
out:
@@ -891,10 +757,14 @@ EXPORT_SYMBOL(transfer_args_to_stack);
#endif /* CONFIG_MMU */
+/*
+ * On success, caller must call do_close_execat() on the returned
+ * struct file to close it.
+ */
static struct file *do_open_execat(int fd, struct filename *name, int flags)
{
- struct file *file;
int err;
+ struct file *file __free(fput) = NULL;
struct open_flags open_exec_flags = {
.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
.acc_mode = MAY_EXEC,
@@ -902,7 +772,8 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
.lookup_flags = LOOKUP_FOLLOW,
};
- if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
+ if ((flags &
+ ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH | AT_EXECVE_CHECK)) != 0)
return ERR_PTR(-EINVAL);
if (flags & AT_SYMLINK_NOFOLLOW)
open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
@@ -911,33 +782,37 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
file = do_filp_open(fd, name, &open_exec_flags);
if (IS_ERR(file))
- goto out;
+ return file;
+
+ if (path_noexec(&file->f_path))
+ return ERR_PTR(-EACCES);
/*
- * may_open() has already checked for this, so it should be
- * impossible to trip now. But we need to be extra cautious
- * and check again at the very end too.
+ * In the past the regular type check was here. It moved to may_open() in
+ * 633fb6ac3980 ("exec: move S_ISREG() check earlier"). Since then it is
+ * an invariant that all non-regular files error out before we get here.
*/
- err = -EACCES;
- if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
- path_noexec(&file->f_path)))
- goto exit;
+ if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)))
+ return ERR_PTR(-EACCES);
- err = deny_write_access(file);
+ err = exe_file_deny_write_access(file);
if (err)
- goto exit;
-
- if (name->name[0] != '\0')
- fsnotify_open(file);
+ return ERR_PTR(err);
-out:
- return file;
-
-exit:
- fput(file);
- return ERR_PTR(err);
+ return no_free_ptr(file);
}
+/**
+ * open_exec - Open a path name for execution
+ *
+ * @name: path name to open with the intent of executing it.
+ *
+ * Returns ERR_PTR on failure or allocated struct file on success.
+ *
+ * As this is a wrapper for the internal do_open_execat(), callers
+ * must call exe_file_allow_write_access() before fput() on release. Also see
+ * do_close_execat().
+ */
struct file *open_exec(const char *name)
{
struct filename *filename = getname_kernel(name);
@@ -951,8 +826,7 @@ struct file *open_exec(const char *name)
}
EXPORT_SYMBOL(open_exec);
-#if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \
- defined(CONFIG_BINFMT_ELF_FDPIC)
+#if defined(CONFIG_BINFMT_FLAT) || defined(CONFIG_BINFMT_ELF_FDPIC)
ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
{
ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
@@ -978,8 +852,6 @@ static int exec_mmap(struct mm_struct *mm)
tsk = current;
old_mm = current->mm;
exec_mm_release(tsk, old_mm);
- if (old_mm)
- sync_mm_rss(old_mm);
ret = down_write_killable(&tsk->signal->exec_update_lock);
if (ret)
@@ -1005,6 +877,7 @@ static int exec_mmap(struct mm_struct *mm)
active_mm = tsk->active_mm;
tsk->active_mm = mm;
tsk->mm = mm;
+ mm_init_cid(mm, tsk);
/*
* This prevents preemption while active_mm is being loaded and
* it and mm are being updated, which could cause problems for
@@ -1017,9 +890,9 @@ static int exec_mmap(struct mm_struct *mm)
activate_mm(active_mm, mm);
if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
local_irq_enable();
- tsk->mm->vmacache_seqnum = 0;
- vmacache_flush(tsk);
+ lru_gen_add_mm(mm);
task_unlock(tsk);
+ lru_gen_use_mm(mm);
if (old_mm) {
mmap_read_unlock(old_mm);
BUG_ON(active_mm != old_mm);
@@ -1028,7 +901,7 @@ static int exec_mmap(struct mm_struct *mm)
mmput(old_mm);
return 0;
}
- mmdrop(active_mm);
+ mmdrop_lazy_tlb(active_mm);
return 0;
}
@@ -1045,7 +918,7 @@ static int de_thread(struct task_struct *tsk)
* Kill all other threads in the thread group.
*/
spin_lock_irq(lock);
- if (signal_group_exit(sig)) {
+ if ((sig->flags & SIGNAL_GROUP_EXIT) || sig->group_exec_task) {
/*
* Another group action in progress, just
* return so that the signal is processed.
@@ -1054,7 +927,7 @@ static int de_thread(struct task_struct *tsk)
return -EAGAIN;
}
- sig->group_exit_task = tsk;
+ sig->group_exec_task = tsk;
sig->notify_count = zap_other_threads(tsk);
if (!thread_group_leader(tsk))
sig->notify_count--;
@@ -1082,7 +955,7 @@ static int de_thread(struct task_struct *tsk)
write_lock_irq(&tasklist_lock);
/*
* Do this under tasklist_lock to ensure that
- * exit_notify() can't miss ->group_exit_task
+ * exit_notify() can't miss ->group_exec_task
*/
sig->notify_count = -1;
if (likely(leader->exit_state))
@@ -1135,11 +1008,10 @@ static int de_thread(struct task_struct *tsk)
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
leader->exit_state = EXIT_DEAD;
-
/*
* We are going to release_task()->ptrace_unlink() silently,
* the tracer can sleep in do_wait(). EXIT_DEAD guarantees
- * the tracer wont't block again waiting for this thread.
+ * the tracer won't block again waiting for this thread.
*/
if (unlikely(leader->ptrace))
__wake_up_parent(leader, leader->parent);
@@ -1149,7 +1021,7 @@ static int de_thread(struct task_struct *tsk)
release_task(leader);
}
- sig->group_exit_task = NULL;
+ sig->group_exec_task = NULL;
sig->notify_count = 0;
no_thread_group:
@@ -1162,7 +1034,7 @@ no_thread_group:
killed:
/* protects against exit_notify() and __exit_signal() */
read_lock(&tasklist_lock);
- sig->group_exit_task = NULL;
+ sig->group_exec_task = NULL;
sig->notify_count = 0;
read_unlock(&tasklist_lock);
return -EAGAIN;
@@ -1190,11 +1062,11 @@ static int unshare_sighand(struct task_struct *me)
return -ENOMEM;
refcount_set(&newsighand->count, 1);
- memcpy(newsighand->action, oldsighand->action,
- sizeof(newsighand->action));
write_lock_irq(&tasklist_lock);
spin_lock(&oldsighand->siglock);
+ memcpy(newsighand->action, oldsighand->action,
+ sizeof(newsighand->action));
rcu_assign_pointer(me->sighand, newsighand);
spin_unlock(&oldsighand->siglock);
write_unlock_irq(&tasklist_lock);
@@ -1204,26 +1076,17 @@ static int unshare_sighand(struct task_struct *me)
return 0;
}
-char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
-{
- task_lock(tsk);
- strncpy(buf, tsk->comm, buf_size);
- task_unlock(tsk);
- return buf;
-}
-EXPORT_SYMBOL_GPL(__get_task_comm);
-
/*
- * These functions flushes out all traces of the currently running executable
- * so that a new one can be started
+ * This is unlocked -- the string will always be NUL-terminated, but
+ * may show overlapping contents if racing concurrent reads.
*/
-
void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
{
- task_lock(tsk);
+ size_t len = min(strlen(buf), sizeof(tsk->comm) - 1);
+
trace_task_rename(tsk, buf);
- strlcpy(tsk->comm, buf, sizeof(tsk->comm));
- task_unlock(tsk);
+ memcpy(tsk->comm, buf, len);
+ memset(&tsk->comm[len], 0, sizeof(tsk->comm) - len);
perf_event_comm(tsk, exec);
}
@@ -1244,17 +1107,24 @@ int begin_new_exec(struct linux_binprm * bprm)
return retval;
/*
- * Ensure all future errors are fatal.
+ * This tracepoint marks the point before flushing the old exec where
+ * the current task is still unchanged, but errors are fatal (point of
+ * no return). The later "sched_process_exec" tracepoint is called after
+ * the current task has successfully switched to the new exec.
*/
- bprm->point_of_no_return = true;
+ trace_sched_prepare_exec(current, bprm);
/*
- * Make this the only thread in the thread group.
+ * Ensure all future errors are fatal.
*/
+ bprm->point_of_no_return = true;
+
+ /* Make this the only thread in the thread group */
retval = de_thread(me);
if (retval)
goto out;
-
+ /* see the comment in check_unsafe_exec() */
+ current->fs->in_exec = 0;
/*
* Cancel any io_uring activity across execve
*/
@@ -1267,8 +1137,8 @@ int begin_new_exec(struct linux_binprm * bprm)
/*
* Must be called _before_ exec_mmap() as bprm->mm is
- * not visibile until then. This also enables the update
- * to be lockless.
+ * not visible until then. Doing it here also ensures
+ * we don't race against replace_mm_exe_file().
*/
retval = set_mm_exe_file(bprm->mm, bprm->file);
if (retval)
@@ -1289,8 +1159,15 @@ int begin_new_exec(struct linux_binprm * bprm)
bprm->mm = NULL;
+ retval = exec_task_namespaces();
+ if (retval)
+ goto out_unlock;
+
#ifdef CONFIG_POSIX_TIMERS
- exit_itimers(me->signal);
+ spin_lock_irq(&me->sighand->siglock);
+ posix_cpu_timers_exit(me);
+ spin_unlock_irq(&me->sighand->siglock);
+ exit_itimers(me);
flush_itimer_signals();
#endif
@@ -1301,13 +1178,7 @@ int begin_new_exec(struct linux_binprm * bprm)
if (retval)
goto out_unlock;
- /*
- * Ensure that the uaccess routines can actually operate on userspace
- * pointers:
- */
- force_uaccess_begin();
-
- me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
+ me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC |
PF_NOFREEZE | PF_NO_SETAFFINITY);
flush_thread();
me->personality &= ~bprm->per_clear;
@@ -1352,7 +1223,28 @@ int begin_new_exec(struct linux_binprm * bprm)
set_dumpable(current->mm, SUID_DUMP_USER);
perf_event_exec();
- __set_task_comm(me, kbasename(bprm->filename), true);
+
+ /*
+ * If the original filename was empty, alloc_bprm() made up a path
+ * that will probably not be useful to admins running ps or similar.
+ * Let's fix it up to be something reasonable.
+ */
+ if (bprm->comm_from_dentry) {
+ /*
+ * Hold RCU lock to keep the name from being freed behind our back.
+ * Use acquire semantics to make sure the terminating NUL from
+ * __d_alloc() is seen.
+ *
+ * Note, we're deliberately sloppy here. We don't need to care about
+ * detecting a concurrent rename and just want a terminated name.
+ */
+ rcu_read_lock();
+ __set_task_comm(me, smp_load_acquire(&bprm->file->f_path.dentry->d_name.name),
+ true);
+ rcu_read_unlock();
+ } else {
+ __set_task_comm(me, kbasename(bprm->filename), true);
+ }
/* An exec changes our domain. We are no longer part of the thread
group */
@@ -1388,10 +1280,9 @@ int begin_new_exec(struct linux_binprm * bprm)
/* Pass the opened binary to the interpreter. */
if (bprm->have_execfd) {
- retval = get_unused_fd_flags(0);
+ retval = FD_ADD(0, bprm->executable);
if (retval < 0)
goto out_unlock;
- fd_install(retval, bprm->executable);
bprm->executable = NULL;
bprm->execfd = retval;
}
@@ -1399,6 +1290,9 @@ int begin_new_exec(struct linux_binprm * bprm)
out_unlock:
up_write(&me->signal->exec_update_lock);
+ if (!bprm->cred)
+ mutex_unlock(&me->signal->cred_guard_mutex);
+
out:
return retval;
}
@@ -1407,15 +1301,15 @@ EXPORT_SYMBOL(begin_new_exec);
void would_dump(struct linux_binprm *bprm, struct file *file)
{
struct inode *inode = file_inode(file);
- struct user_namespace *mnt_userns = file_mnt_user_ns(file);
- if (inode_permission(mnt_userns, inode, MAY_READ) < 0) {
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
+ if (inode_permission(idmap, inode, MAY_READ) < 0) {
struct user_namespace *old, *user_ns;
bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
/* Ensure mm->user_ns contains the executable */
user_ns = old = bprm->mm->user_ns;
while ((user_ns != &init_user_ns) &&
- !privileged_wrt_inode_uidgid(user_ns, mnt_userns, inode))
+ !privileged_wrt_inode_uidgid(user_ns, idmap, inode))
user_ns = user_ns->parent;
if (old != user_ns) {
@@ -1474,6 +1368,15 @@ static int prepare_bprm_creds(struct linux_binprm *bprm)
return -ENOMEM;
}
+/* Matches do_open_execat() */
+static void do_close_execat(struct file *file)
+{
+ if (!file)
+ return;
+ exe_file_allow_write_access(file);
+ fput(file);
+}
+
static void free_bprm(struct linux_binprm *bprm)
{
if (bprm->mm) {
@@ -1482,13 +1385,12 @@ static void free_bprm(struct linux_binprm *bprm)
}
free_arg_pages(bprm);
if (bprm->cred) {
+ /* in case exec fails before de_thread() succeeds */
+ current->fs->in_exec = 0;
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
- if (bprm->file) {
- allow_write_access(bprm->file);
- fput(bprm->file);
- }
+ do_close_execat(bprm->file);
if (bprm->executable)
fput(bprm->executable);
/* If a binfmt changed the interp, free it. */
@@ -1498,36 +1400,74 @@ static void free_bprm(struct linux_binprm *bprm)
kfree(bprm);
}
-static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
+static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int flags)
{
- struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
+ struct linux_binprm *bprm;
+ struct file *file;
int retval = -ENOMEM;
- if (!bprm)
- goto out;
+
+ file = do_open_execat(fd, filename, flags);
+ if (IS_ERR(file))
+ return ERR_CAST(file);
+
+ bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
+ if (!bprm) {
+ do_close_execat(file);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ bprm->file = file;
if (fd == AT_FDCWD || filename->name[0] == '/') {
bprm->filename = filename->name;
} else {
- if (filename->name[0] == '\0')
+ if (filename->name[0] == '\0') {
bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
- else
+ bprm->comm_from_dentry = 1;
+ } else {
bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
fd, filename->name);
+ }
if (!bprm->fdpath)
goto out_free;
+ /*
+ * Record that a name derived from an O_CLOEXEC fd will be
+ * inaccessible after exec. This allows the code in exec to
+ * choose to fail when the executable is not mmaped into the
+ * interpreter and an open file descriptor is not passed to
+ * the interpreter. This makes for a better user experience
+ * than having the interpreter start and then immediately fail
+ * when it finds the executable is inaccessible.
+ */
+ if (get_close_on_exec(fd))
+ bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
+
bprm->filename = bprm->fdpath;
}
bprm->interp = bprm->filename;
+ /*
+ * At this point, security_file_open() has already been called (with
+ * __FMODE_EXEC) and access control checks for AT_EXECVE_CHECK will
+ * stop just after the security_bprm_creds_for_exec() call in
+ * bprm_execve(). Indeed, the kernel should not try to parse the
+ * content of the file with exec_binprm() nor change the calling
+ * thread, which means that the following security functions will not
+ * be called:
+ * - security_bprm_check()
+ * - security_bprm_creds_from_file()
+ * - security_bprm_committing_creds()
+ * - security_bprm_committed_creds()
+ */
+ bprm->is_check = !!(flags & AT_EXECVE_CHECK);
+
retval = bprm_mm_init(bprm);
- if (retval)
- goto out_free;
- return bprm;
+ if (!retval)
+ return bprm;
out_free:
free_bprm(bprm);
-out:
return ERR_PTR(retval);
}
@@ -1563,31 +1503,42 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
if (task_no_new_privs(current))
bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
- t = p;
+ /*
+ * If another task is sharing our fs, we cannot safely
+ * suid exec because the differently privileged task
+ * will be able to manipulate the current directory, etc.
+ * It would be nice to force an unshare instead...
+ *
+ * Otherwise we set fs->in_exec = 1 to deny clone(CLONE_FS)
+ * from another sub-thread until de_thread() succeeds, this
+ * state is protected by cred_guard_mutex we hold.
+ */
n_fs = 1;
- spin_lock(&p->fs->lock);
+ read_seqlock_excl(&p->fs->seq);
rcu_read_lock();
- while_each_thread(p, t) {
+ for_other_threads(p, t) {
if (t->fs == p->fs)
n_fs++;
}
rcu_read_unlock();
+ /* "users" and "in_exec" locked for copy_fs() */
if (p->fs->users > n_fs)
bprm->unsafe |= LSM_UNSAFE_SHARE;
else
p->fs->in_exec = 1;
- spin_unlock(&p->fs->lock);
+ read_sequnlock_excl(&p->fs->seq);
}
static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
{
/* Handle suid and sgid on files */
- struct user_namespace *mnt_userns;
- struct inode *inode;
+ struct mnt_idmap *idmap;
+ struct inode *inode = file_inode(file);
unsigned int mode;
- kuid_t uid;
- kgid_t gid;
+ vfsuid_t vfsuid;
+ vfsgid_t vfsgid;
+ int err;
if (!mnt_may_suid(file->f_path.mnt))
return;
@@ -1595,35 +1546,39 @@ static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
if (task_no_new_privs(current))
return;
- inode = file->f_path.dentry->d_inode;
mode = READ_ONCE(inode->i_mode);
if (!(mode & (S_ISUID|S_ISGID)))
return;
- mnt_userns = file_mnt_user_ns(file);
+ idmap = file_mnt_idmap(file);
/* Be careful if suid/sgid is set */
inode_lock(inode);
- /* reload atomically mode/uid/gid now that lock held */
+ /* Atomically reload and check mode/uid/gid now that lock held. */
mode = inode->i_mode;
- uid = i_uid_into_mnt(mnt_userns, inode);
- gid = i_gid_into_mnt(mnt_userns, inode);
+ vfsuid = i_uid_into_vfsuid(idmap, inode);
+ vfsgid = i_gid_into_vfsgid(idmap, inode);
+ err = inode_permission(idmap, inode, MAY_EXEC);
inode_unlock(inode);
+ /* Did the exec bit vanish out from under us? Give up. */
+ if (err)
+ return;
+
/* We ignore suid/sgid if there are no mappings for them in the ns */
- if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
- !kgid_has_mapping(bprm->cred->user_ns, gid))
+ if (!vfsuid_has_mapping(bprm->cred->user_ns, vfsuid) ||
+ !vfsgid_has_mapping(bprm->cred->user_ns, vfsgid))
return;
if (mode & S_ISUID) {
bprm->per_clear |= PER_CLEAR_ON_SETID;
- bprm->cred->euid = uid;
+ bprm->cred->euid = vfsuid_into_kuid(vfsuid);
}
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
bprm->per_clear |= PER_CLEAR_ON_SETID;
- bprm->cred->egid = gid;
+ bprm->cred->egid = vfsgid_into_kgid(vfsgid);
}
}
@@ -1660,7 +1615,6 @@ static int prepare_binprm(struct linux_binprm *bprm)
*/
int remove_arg_zero(struct linux_binprm *bprm)
{
- int ret = 0;
unsigned long offset;
char *kaddr;
struct page *page;
@@ -1671,36 +1625,30 @@ int remove_arg_zero(struct linux_binprm *bprm)
do {
offset = bprm->p & ~PAGE_MASK;
page = get_arg_page(bprm, bprm->p, 0);
- if (!page) {
- ret = -EFAULT;
- goto out;
- }
- kaddr = kmap_atomic(page);
+ if (!page)
+ return -EFAULT;
+ kaddr = kmap_local_page(page);
for (; offset < PAGE_SIZE && kaddr[offset];
offset++, bprm->p++)
;
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
put_arg_page(page);
} while (offset == PAGE_SIZE);
bprm->p++;
bprm->argc--;
- ret = 0;
-out:
- return ret;
+ return 0;
}
EXPORT_SYMBOL(remove_arg_zero);
-#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
/*
* cycle the list of binary formats handler, until one recognizes the image
*/
static int search_binary_handler(struct linux_binprm *bprm)
{
- bool need_retry = IS_ENABLED(CONFIG_MODULES);
struct linux_binfmt *fmt;
int retval;
@@ -1712,8 +1660,6 @@ static int search_binary_handler(struct linux_binprm *bprm)
if (retval)
return retval;
- retval = -ENOENT;
- retry:
read_lock(&binfmt_lock);
list_for_each_entry(fmt, &formats, lh) {
if (!try_module_get(fmt->module))
@@ -1731,19 +1677,10 @@ static int search_binary_handler(struct linux_binprm *bprm)
}
read_unlock(&binfmt_lock);
- if (need_retry) {
- if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
- printable(bprm->buf[2]) && printable(bprm->buf[3]))
- return retval;
- if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
- return retval;
- need_retry = false;
- goto retry;
- }
-
- return retval;
+ return -ENOEXEC;
}
+/* binfmt handlers will call back into begin_new_exec() on success. */
static int exec_binprm(struct linux_binprm *bprm)
{
pid_t old_pid, old_vpid;
@@ -1771,7 +1708,7 @@ static int exec_binprm(struct linux_binprm *bprm)
bprm->file = bprm->interpreter;
bprm->interpreter = NULL;
- allow_write_access(exec);
+ exe_file_allow_write_access(exec);
if (unlikely(bprm->have_execfd)) {
if (bprm->executable) {
fput(exec);
@@ -1789,55 +1726,39 @@ static int exec_binprm(struct linux_binprm *bprm)
return 0;
}
-/*
- * sys_execve() executes a new program.
- */
-static int bprm_execve(struct linux_binprm *bprm,
- int fd, struct filename *filename, int flags)
+static int bprm_execve(struct linux_binprm *bprm)
{
- struct file *file;
int retval;
retval = prepare_bprm_creds(bprm);
if (retval)
return retval;
+ /*
+ * Check for unsafe execution states before exec_binprm(), which
+ * will call back into begin_new_exec(), into bprm_creds_from_file(),
+ * where setuid-ness is evaluated.
+ */
check_unsafe_exec(bprm);
current->in_execve = 1;
-
- file = do_open_execat(fd, filename, flags);
- retval = PTR_ERR(file);
- if (IS_ERR(file))
- goto out_unmark;
+ sched_mm_cid_before_execve(current);
sched_exec();
- bprm->file = file;
- /*
- * Record that a name derived from an O_CLOEXEC fd will be
- * inaccessible after exec. This allows the code in exec to
- * choose to fail when the executable is not mmaped into the
- * interpreter and an open file descriptor is not passed to
- * the interpreter. This makes for a better user experience
- * than having the interpreter start and then immediately fail
- * when it finds the executable is inaccessible.
- */
- if (bprm->fdpath && get_close_on_exec(fd))
- bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
-
/* Set the unchanging part of bprm->cred */
retval = security_bprm_creds_for_exec(bprm);
- if (retval)
+ if (retval || bprm->is_check)
goto out;
retval = exec_binprm(bprm);
if (retval < 0)
goto out;
+ sched_mm_cid_after_execve(current);
+ rseq_execve(current);
/* execve succeeded */
- current->fs->in_exec = 0;
current->in_execve = 0;
- rseq_execve(current);
+ user_events_execve(current);
acct_update_integrals(current);
task_numa_free(current, false);
return retval;
@@ -1852,8 +1773,8 @@ out:
if (bprm->point_of_no_return && !fatal_signal_pending(current))
force_fatal_sig(SIGSEGV);
-out_unmark:
- current->fs->in_exec = 0;
+ sched_mm_cid_after_execve(current);
+ rseq_force_update();
current->in_execve = 0;
return retval;
@@ -1877,7 +1798,7 @@ static int do_execveat_common(int fd, struct filename *filename,
* whether NPROC limit is still exceeded.
*/
if ((current->flags & PF_NPROC_EXCEEDED) &&
- is_ucounts_overlimit(current_ucounts(), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
+ is_rlimit_overlimit(current_ucounts(), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
retval = -EAGAIN;
goto out_ret;
}
@@ -1886,7 +1807,7 @@ static int do_execveat_common(int fd, struct filename *filename,
* further execve() calls fail. */
current->flags &= ~PF_NPROC_EXCEEDED;
- bprm = alloc_bprm(fd, filename);
+ bprm = alloc_bprm(fd, filename, flags);
if (IS_ERR(bprm)) {
retval = PTR_ERR(bprm);
goto out_ret;
@@ -1919,7 +1840,23 @@ static int do_execveat_common(int fd, struct filename *filename,
if (retval < 0)
goto out_free;
- retval = bprm_execve(bprm, fd, filename, flags);
+ /*
+ * When argv is empty, add an empty string ("") as argv[0] to
+ * ensure confused userspace programs that start processing
+ * from argv[1] won't end up walking envp. See also
+ * bprm_stack_limits().
+ */
+ if (bprm->argc == 0) {
+ retval = copy_string_kernel("", bprm);
+ if (retval < 0)
+ goto out_free;
+ bprm->argc = 1;
+
+ pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n",
+ current->comm, bprm->filename);
+ }
+
+ retval = bprm_execve(bprm);
out_free:
free_bprm(bprm);
@@ -1936,17 +1873,23 @@ int kernel_execve(const char *kernel_filename,
int fd = AT_FDCWD;
int retval;
+ /* It is non-sense for kernel threads to call execve */
+ if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
+ return -EINVAL;
+
filename = getname_kernel(kernel_filename);
if (IS_ERR(filename))
return PTR_ERR(filename);
- bprm = alloc_bprm(fd, filename);
+ bprm = alloc_bprm(fd, filename, 0);
if (IS_ERR(bprm)) {
retval = PTR_ERR(bprm);
goto out_ret;
}
retval = count_strings_kernel(argv);
+ if (WARN_ON_ONCE(retval == 0))
+ retval = -EINVAL;
if (retval < 0)
goto out_free;
bprm->argc = retval;
@@ -1973,7 +1916,7 @@ int kernel_execve(const char *kernel_filename,
if (retval < 0)
goto out_free;
- retval = bprm_execve(bprm, fd, filename, 0);
+ retval = bprm_execve(bprm);
out_free:
free_bprm(bprm);
out_ret:
@@ -2055,7 +1998,7 @@ void set_dumpable(struct mm_struct *mm, int value)
if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
return;
- set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
+ __mm_flags_set_mask_dumpable(mm, value);
}
SYSCALL_DEFINE3(execve,
@@ -2096,3 +2039,40 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
argv, envp, flags);
}
#endif
+
+#ifdef CONFIG_SYSCTL
+
+static int proc_dointvec_minmax_coredump(const struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (!error && write)
+ validate_coredump_safety();
+ return error;
+}
+
+static const struct ctl_table fs_exec_sysctls[] = {
+ {
+ .procname = "suid_dumpable",
+ .data = &suid_dumpable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax_coredump,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+};
+
+static int __init init_fs_exec_sysctls(void)
+{
+ register_sysctl_init("fs", fs_exec_sysctls);
+ return 0;
+}
+
+fs_initcall(init_fs_exec_sysctls);
+#endif /* CONFIG_SYSCTL */
+
+#ifdef CONFIG_EXEC_KUNIT_TEST
+#include "tests/exec_kunit.c"
+#endif
diff --git a/fs/exfat/Kconfig b/fs/exfat/Kconfig
index 5a65071b5ecf..cbeca8e44d9b 100644
--- a/fs/exfat/Kconfig
+++ b/fs/exfat/Kconfig
@@ -2,7 +2,9 @@
config EXFAT_FS
tristate "exFAT filesystem support"
+ select BUFFER_HEAD
select NLS
+ select LEGACY_DIRECT_IO
help
This allows you to mount devices formatted with the exFAT file system.
exFAT is typically used on SD-Cards or USB sticks.
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
index cc5cffc4a769..5429041c7eaf 100644
--- a/fs/exfat/balloc.c
+++ b/fs/exfat/balloc.c
@@ -5,53 +5,80 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
+#include <linux/bitmap.h>
#include <linux/buffer_head.h>
+#include <linux/backing-dev.h>
#include "exfat_raw.h"
#include "exfat_fs.h"
-static const unsigned char free_bit[] = {
- 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2,/* 0 ~ 19*/
- 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3,/* 20 ~ 39*/
- 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/* 40 ~ 59*/
- 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,/* 60 ~ 79*/
- 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2,/* 80 ~ 99*/
- 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3,/*100 ~ 119*/
- 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/*120 ~ 139*/
- 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5,/*140 ~ 159*/
- 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2,/*160 ~ 179*/
- 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3,/*180 ~ 199*/
- 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/*200 ~ 219*/
- 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,/*220 ~ 239*/
- 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 /*240 ~ 254*/
-};
-
-static const unsigned char used_bit[] = {
- 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3,/* 0 ~ 19*/
- 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4,/* 20 ~ 39*/
- 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5,/* 40 ~ 59*/
- 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,/* 60 ~ 79*/
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4,/* 80 ~ 99*/
- 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6,/*100 ~ 119*/
- 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4,/*120 ~ 139*/
- 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,/*140 ~ 159*/
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5,/*160 ~ 179*/
- 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5,/*180 ~ 199*/
- 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6,/*200 ~ 219*/
- 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,/*220 ~ 239*/
- 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 /*240 ~ 255*/
-};
+#if BITS_PER_LONG == 32
+#define __le_long __le32
+#define lel_to_cpu(A) le32_to_cpu(A)
+#define cpu_to_lel(A) cpu_to_le32(A)
+#elif BITS_PER_LONG == 64
+#define __le_long __le64
+#define lel_to_cpu(A) le64_to_cpu(A)
+#define cpu_to_lel(A) cpu_to_le64(A)
+#else
+#error "BITS_PER_LONG not 32 or 64"
+#endif
/*
* Allocation Bitmap Management Functions
*/
+static bool exfat_test_bitmap_range(struct super_block *sb, unsigned int clu,
+ unsigned int count)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ unsigned int start = clu;
+ unsigned int end = clu + count;
+ unsigned int ent_idx, i, b;
+ unsigned int bit_offset, bits_to_check;
+ __le_long *bitmap_le;
+ unsigned long mask, word;
+
+ if (!is_valid_cluster(sbi, start) || !is_valid_cluster(sbi, end - 1))
+ return false;
+
+ while (start < end) {
+ ent_idx = CLUSTER_TO_BITMAP_ENT(start);
+ i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
+ b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+
+ bitmap_le = (__le_long *)sbi->vol_amap[i]->b_data;
+
+ /* Calculate how many bits we can check in the current word */
+ bit_offset = b % BITS_PER_LONG;
+ bits_to_check = min(end - start,
+ (unsigned int)(BITS_PER_LONG - bit_offset));
+
+ /* Create a bitmask for the range of bits to check */
+ if (bits_to_check >= BITS_PER_LONG)
+ mask = ~0UL;
+ else
+ mask = ((1UL << bits_to_check) - 1) << bit_offset;
+ word = lel_to_cpu(bitmap_le[b / BITS_PER_LONG]);
+
+ /* Check if all bits in the mask are set */
+ if ((word & mask) != mask)
+ return false;
+
+ start += bits_to_check;
+ }
+
+ return true;
+}
+
static int exfat_allocate_bitmap(struct super_block *sb,
struct exfat_dentry *ep)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct blk_plug plug;
long long map_size;
- unsigned int i, need_map_size;
+ unsigned int i, j, need_map_size;
sector_t sector;
+ unsigned int max_ra_count;
sbi->map_clu = le32_to_cpu(ep->dentry.bitmap.start_clu);
map_size = le64_to_cpu(ep->dentry.bitmap.size);
@@ -69,28 +96,43 @@ static int exfat_allocate_bitmap(struct super_block *sb,
}
sbi->map_sectors = ((need_map_size - 1) >>
(sb->s_blocksize_bits)) + 1;
- sbi->vol_amap = kmalloc_array(sbi->map_sectors,
+ sbi->vol_amap = kvmalloc_array(sbi->map_sectors,
sizeof(struct buffer_head *), GFP_KERNEL);
if (!sbi->vol_amap)
return -ENOMEM;
sector = exfat_cluster_to_sector(sbi, sbi->map_clu);
+ max_ra_count = min(sb->s_bdi->ra_pages, sb->s_bdi->io_pages) <<
+ (PAGE_SHIFT - sb->s_blocksize_bits);
for (i = 0; i < sbi->map_sectors; i++) {
- sbi->vol_amap[i] = sb_bread(sb, sector + i);
- if (!sbi->vol_amap[i]) {
- /* release all buffers and free vol_amap */
- int j = 0;
-
- while (j < i)
- brelse(sbi->vol_amap[j++]);
-
- kfree(sbi->vol_amap);
- sbi->vol_amap = NULL;
- return -EIO;
+ /* Trigger the next readahead in advance. */
+ if (max_ra_count && 0 == (i % max_ra_count)) {
+ blk_start_plug(&plug);
+ for (j = i; j < min(max_ra_count, sbi->map_sectors - i) + i; j++)
+ sb_breadahead(sb, sector + j);
+ blk_finish_plug(&plug);
}
+
+ sbi->vol_amap[i] = sb_bread(sb, sector + i);
+ if (!sbi->vol_amap[i])
+ goto err_out;
}
+ if (exfat_test_bitmap_range(sb, sbi->map_clu,
+ EXFAT_B_TO_CLU_ROUND_UP(map_size, sbi)) == false)
+ goto err_out;
+
return 0;
+
+err_out:
+ j = 0;
+ /* release all buffers and free vol_amap */
+ while (j < i)
+ brelse(sbi->vol_amap[j++]);
+
+ kvfree(sbi->vol_amap);
+ sbi->vol_amap = NULL;
+ return -EIO;
}
int exfat_load_bitmap(struct super_block *sb)
@@ -105,16 +147,13 @@ int exfat_load_bitmap(struct super_block *sb)
struct exfat_dentry *ep;
struct buffer_head *bh;
- ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
type = exfat_get_entry_type(ep);
- if (type == TYPE_UNUSED)
- break;
- if (type != TYPE_BITMAP)
- continue;
- if (ep->dentry.bitmap.flags == 0x0) {
+ if (type == TYPE_BITMAP &&
+ ep->dentry.bitmap.flags == 0x0) {
int err;
err = exfat_allocate_bitmap(sb, ep);
@@ -122,6 +161,9 @@ int exfat_load_bitmap(struct super_block *sb)
return err;
}
brelse(bh);
+
+ if (type == TYPE_UNUSED)
+ return -EINVAL;
}
if (exfat_get_next_cluster(sb, &clu.dir))
@@ -138,17 +180,18 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
for (i = 0; i < sbi->map_sectors; i++)
__brelse(sbi->vol_amap[i]);
- kfree(sbi->vol_amap);
+ kvfree(sbi->vol_amap);
}
-int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
+int exfat_set_bitmap(struct super_block *sb, unsigned int clu, bool sync)
{
int i, b;
unsigned int ent_idx;
- struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- WARN_ON(clu < EXFAT_FIRST_CLUSTER);
+ if (!is_valid_cluster(sbi, clu))
+ return -EINVAL;
+
ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
@@ -158,34 +201,49 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
return 0;
}
-void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
+int exfat_clear_bitmap(struct super_block *sb, unsigned int clu, bool sync)
{
int i, b;
unsigned int ent_idx;
- struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct exfat_mount_options *opts = &sbi->options;
- WARN_ON(clu < EXFAT_FIRST_CLUSTER);
+ if (!is_valid_cluster(sbi, clu))
+ return -EIO;
+
ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+ if (!test_bit_le(b, sbi->vol_amap[i]->b_data))
+ return -EIO;
+
clear_bit_le(b, sbi->vol_amap[i]->b_data);
+
exfat_update_bh(sbi->vol_amap[i], sync);
- if (opts->discard) {
- int ret_discard;
+ return 0;
+}
+
+bool exfat_test_bitmap(struct super_block *sb, unsigned int clu)
+{
+ int i, b;
+ unsigned int ent_idx;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ if (!sbi->vol_amap)
+ return true;
- ret_discard = sb_issue_discard(sb,
- exfat_cluster_to_sector(sbi, clu),
- (1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
+ if (!is_valid_cluster(sbi, clu))
+ return false;
- if (ret_discard == -EOPNOTSUPP) {
- exfat_err(sb, "discard not supported by device, disabling");
- opts->discard = 0;
- }
- }
+ ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
+ i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
+ b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+
+ if (!test_bit_le(b, sbi->vol_amap[i]->b_data))
+ return false;
+
+ return true;
}
/*
@@ -196,32 +254,35 @@ unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu)
{
unsigned int i, map_i, map_b, ent_idx;
unsigned int clu_base, clu_free;
- unsigned char k, clu_mask;
+ unsigned long clu_bits, clu_mask;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ __le_long bitval;
WARN_ON(clu < EXFAT_FIRST_CLUSTER);
- ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
- clu_base = BITMAP_ENT_TO_CLUSTER(ent_idx & ~(BITS_PER_BYTE_MASK));
+ ent_idx = ALIGN_DOWN(CLUSTER_TO_BITMAP_ENT(clu), BITS_PER_LONG);
+ clu_base = BITMAP_ENT_TO_CLUSTER(ent_idx);
clu_mask = IGNORED_BITS_REMAINED(clu, clu_base);
map_i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
map_b = BITMAP_OFFSET_BYTE_IN_SECTOR(sb, ent_idx);
for (i = EXFAT_FIRST_CLUSTER; i < sbi->num_clusters;
- i += BITS_PER_BYTE) {
- k = *(sbi->vol_amap[map_i]->b_data + map_b);
+ i += BITS_PER_LONG) {
+ bitval = *(__le_long *)(sbi->vol_amap[map_i]->b_data + map_b);
if (clu_mask > 0) {
- k |= clu_mask;
+ bitval |= cpu_to_lel(clu_mask);
clu_mask = 0;
}
- if (k < 0xFF) {
- clu_free = clu_base + free_bit[k];
+ if (lel_to_cpu(bitval) != ULONG_MAX) {
+ clu_bits = lel_to_cpu(bitval);
+ clu_free = clu_base + ffz(clu_bits);
if (clu_free < sbi->num_clusters)
return clu_free;
}
- clu_base += BITS_PER_BYTE;
+ clu_base += BITS_PER_LONG;
+ map_b += sizeof(long);
- if (++map_b >= sb->s_blocksize ||
+ if (map_b >= sb->s_blocksize ||
clu_base >= sbi->num_clusters) {
if (++map_i >= sbi->map_sectors) {
clu_base = EXFAT_FIRST_CLUSTER;
@@ -240,25 +301,24 @@ int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count)
unsigned int count = 0;
unsigned int i, map_i = 0, map_b = 0;
unsigned int total_clus = EXFAT_DATA_CLUSTER_COUNT(sbi);
- unsigned int last_mask = total_clus & BITS_PER_BYTE_MASK;
- unsigned char clu_bits;
- const unsigned char last_bit_mask[] = {0, 0b00000001, 0b00000011,
- 0b00000111, 0b00001111, 0b00011111, 0b00111111, 0b01111111};
+ unsigned int last_mask = total_clus & (BITS_PER_LONG - 1);
+ unsigned long *bitmap, clu_bits;
total_clus &= ~last_mask;
- for (i = 0; i < total_clus; i += BITS_PER_BYTE) {
- clu_bits = *(sbi->vol_amap[map_i]->b_data + map_b);
- count += used_bit[clu_bits];
- if (++map_b >= (unsigned int)sb->s_blocksize) {
+ for (i = 0; i < total_clus; i += BITS_PER_LONG) {
+ bitmap = (void *)(sbi->vol_amap[map_i]->b_data + map_b);
+ count += hweight_long(*bitmap);
+ map_b += sizeof(long);
+ if (map_b >= (unsigned int)sb->s_blocksize) {
map_i++;
map_b = 0;
}
}
if (last_mask) {
- clu_bits = *(sbi->vol_amap[map_i]->b_data + map_b);
- clu_bits &= last_bit_mask[last_mask];
- count += used_bit[clu_bits];
+ bitmap = (void *)(sbi->vol_amap[map_i]->b_data + map_b);
+ clu_bits = lel_to_cpu(*(__le_long *)bitmap);
+ count += hweight_long(clu_bits & BITMAP_LAST_WORD_MASK(last_mask));
}
*ret_count = count;
diff --git a/fs/exfat/cache.c b/fs/exfat/cache.c
index 5a2f119b7e8c..d5ce0ae660ba 100644
--- a/fs/exfat/cache.c
+++ b/fs/exfat/cache.c
@@ -11,7 +11,7 @@
*/
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/buffer_head.h>
#include "exfat_raw.h"
@@ -46,7 +46,7 @@ int exfat_cache_init(void)
{
exfat_cachep = kmem_cache_create("exfat_cache",
sizeof(struct exfat_cache),
- 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
+ 0, SLAB_RECLAIM_ACCOUNT,
exfat_cache_init_once);
if (!exfat_cachep)
return -ENOMEM;
diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
index cb1c0d8c1714..3045a58e124a 100644
--- a/fs/exfat/dir.c
+++ b/fs/exfat/dir.c
@@ -29,15 +29,16 @@ static int exfat_extract_uni_name(struct exfat_dentry *ep,
}
-static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
+static int exfat_get_uniname_from_ext_entry(struct super_block *sb,
struct exfat_chain *p_dir, int entry, unsigned short *uniname)
{
- int i;
- struct exfat_entry_set_cache *es;
+ int i, err;
+ struct exfat_entry_set_cache es;
+ unsigned int uni_len = 0, len;
- es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES);
- if (!es)
- return;
+ err = exfat_get_dentry_set(&es, sb, p_dir, entry, ES_ALL_ENTRIES);
+ if (err)
+ return err;
/*
* First entry : file entry
@@ -45,26 +46,29 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
* Third entry : first file-name entry
* So, the index of first file-name dentry should start from 2.
*/
- for (i = 2; i < es->num_entries; i++) {
- struct exfat_dentry *ep = exfat_get_dentry_cached(es, i);
+ for (i = ES_IDX_FIRST_FILENAME; i < es.num_entries; i++) {
+ struct exfat_dentry *ep = exfat_get_dentry_cached(&es, i);
/* end of name entry */
if (exfat_get_entry_type(ep) != TYPE_EXTEND)
break;
- exfat_extract_uni_name(ep, uniname);
+ len = exfat_extract_uni_name(ep, uniname);
+ uni_len += len;
+ if (len != EXFAT_FILE_NAME_LEN || uni_len >= MAX_NAME_LENGTH)
+ break;
uniname += EXFAT_FILE_NAME_LEN;
}
- exfat_free_dentry_set(es, false);
+ exfat_put_dentry_set(&es, false);
+ return 0;
}
/* read a directory entry from the opened directory */
static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_entry *dir_entry)
{
- int i, dentries_per_clu, dentries_per_clu_bits = 0, num_ext;
+ int i, dentries_per_clu, num_ext, err;
unsigned int type, clu_offset, max_dentries;
- sector_t sector;
struct exfat_chain dir, clu;
struct exfat_uni_name uni_name;
struct exfat_dentry *ep;
@@ -78,18 +82,14 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
if (ei->type != TYPE_DIR)
return -EPERM;
- if (ei->entry == -1)
- exfat_chain_set(&dir, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
- else
- exfat_chain_set(&dir, ei->start_clu,
- EXFAT_B_TO_CLU(i_size_read(inode), sbi), ei->flags);
+ exfat_chain_set(&dir, ei->start_clu,
+ EXFAT_B_TO_CLU(i_size_read(inode), sbi), ei->flags);
dentries_per_clu = sbi->dentries_per_clu;
- dentries_per_clu_bits = ilog2(dentries_per_clu);
max_dentries = (unsigned int)min_t(u64, MAX_EXFAT_DENTRIES,
- (u64)sbi->num_clusters << dentries_per_clu_bits);
+ (u64)EXFAT_CLU_TO_DEN(sbi->num_clusters, sbi));
- clu_offset = dentry >> dentries_per_clu_bits;
+ clu_offset = EXFAT_DEN_TO_CLU(dentry, sbi);
exfat_chain_dup(&clu, &dir);
if (clu.flags == ALLOC_NO_FAT_CHAIN) {
@@ -103,7 +103,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
clu.dir = ei->hint_bmap.clu;
}
- while (clu_offset > 0) {
+ while (clu_offset > 0 && clu.dir != EXFAT_EOF_CLUSTER) {
if (exfat_get_next_cluster(sb, &(clu.dir)))
return -EIO;
@@ -115,14 +115,14 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
i = dentry & (dentries_per_clu - 1);
for ( ; i < dentries_per_clu; i++, dentry++) {
- ep = exfat_get_dentry(sb, &clu, i, &bh, &sector);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
type = exfat_get_entry_type(ep);
if (type == TYPE_UNUSED) {
brelse(bh);
- break;
+ goto out;
}
if (type != TYPE_FILE && type != TYPE_DIR) {
@@ -132,39 +132,27 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
num_ext = ep->dentry.file.num_ext;
dir_entry->attr = le16_to_cpu(ep->dentry.file.attr);
- exfat_get_entry_time(sbi, &dir_entry->crtime,
- ep->dentry.file.create_tz,
- ep->dentry.file.create_time,
- ep->dentry.file.create_date,
- ep->dentry.file.create_time_cs);
- exfat_get_entry_time(sbi, &dir_entry->mtime,
- ep->dentry.file.modify_tz,
- ep->dentry.file.modify_time,
- ep->dentry.file.modify_date,
- ep->dentry.file.modify_time_cs);
- exfat_get_entry_time(sbi, &dir_entry->atime,
- ep->dentry.file.access_tz,
- ep->dentry.file.access_time,
- ep->dentry.file.access_date,
- 0);
*uni_name.name = 0x0;
- exfat_get_uniname_from_ext_entry(sb, &clu, i,
+ err = exfat_get_uniname_from_ext_entry(sb, &clu, i,
uni_name.name);
+ if (err) {
+ brelse(bh);
+ continue;
+ }
exfat_utf16_to_nls(sb, &uni_name,
dir_entry->namebuf.lfn,
dir_entry->namebuf.lfnbuf_len);
brelse(bh);
- ep = exfat_get_dentry(sb, &clu, i + 1, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i + 1, &bh);
if (!ep)
return -EIO;
- dir_entry->size =
- le64_to_cpu(ep->dentry.stream.valid_size);
- dir_entry->entry = dentry;
+ dir_entry->entry = i;
+ dir_entry->dir = clu;
brelse(bh);
- ei->hint_bmap.off = dentry >> dentries_per_clu_bits;
+ ei->hint_bmap.off = EXFAT_DEN_TO_CLU(dentry, sbi);
ei->hint_bmap.clu = clu.dir;
*cpos = EXFAT_DEN_TO_B(dentry + 1 + num_ext);
@@ -182,6 +170,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
}
}
+out:
dir_entry->namebuf.lfn[0] = '\0';
*cpos = EXFAT_DEN_TO_B(dentry);
return 0;
@@ -211,11 +200,14 @@ static void exfat_free_namebuf(struct exfat_dentry_namebuf *nb)
exfat_init_namebuf(nb);
}
-/* skip iterating emit_dots when dir is empty */
+/*
+ * Before calling dir_emit*(), sbi->s_lock should be released
+ * because page fault can occur in dir_emit*().
+ */
#define ITER_POS_FILLED_DOTS (2)
-static int exfat_iterate(struct file *filp, struct dir_context *ctx)
+static int exfat_iterate(struct file *file, struct dir_context *ctx)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct inode *tmp;
struct exfat_dir_entry de;
@@ -226,35 +218,33 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx)
int err = 0, fake_offset = 0;
exfat_init_namebuf(nb);
- mutex_lock(&EXFAT_SB(sb)->s_lock);
cpos = ctx->pos;
- if (!dir_emit_dots(filp, ctx))
- goto unlock;
+ if (!dir_emit_dots(file, ctx))
+ goto out;
if (ctx->pos == ITER_POS_FILLED_DOTS) {
cpos = 0;
fake_offset = 1;
}
- if (cpos & (DENTRY_SIZE - 1)) {
- err = -ENOENT;
- goto unlock;
- }
+ cpos = round_up(cpos, DENTRY_SIZE);
/* name buffer should be allocated before use */
err = exfat_alloc_namebuf(nb);
if (err)
- goto unlock;
+ goto out;
get_new:
+ mutex_lock(&EXFAT_SB(sb)->s_lock);
+
if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode))
goto end_of_dir;
err = exfat_readdir(inode, &cpos, &de);
if (err) {
/*
- * At least we tried to read a sector. Move cpos to next sector
- * position (should be aligned).
+ * At least we tried to read a sector.
+ * Move cpos to next sector position (should be aligned).
*/
if (err == -EIO) {
cpos += 1 << (sb->s_blocksize_bits);
@@ -268,7 +258,7 @@ get_new:
if (!nb->lfn[0])
goto end_of_dir;
- i_pos = ((loff_t)ei->start_clu << 32) | (de.entry & 0xffffffff);
+ i_pos = ((loff_t)de.dir.dir << 32) | (de.entry & 0xffffffff);
tmp = exfat_iget(sb, i_pos);
if (tmp) {
inum = tmp->i_ino;
@@ -277,16 +267,10 @@ get_new:
inum = iunique(sb, EXFAT_ROOT_INO);
}
- /*
- * Before calling dir_emit(), sb_lock should be released.
- * Because page fault can occur in dir_emit() when the size
- * of buffer given from user is larger than one page size.
- */
mutex_unlock(&EXFAT_SB(sb)->s_lock);
if (!dir_emit(ctx, nb->lfn, strlen(nb->lfn), inum,
- (de.attr & ATTR_SUBDIR) ? DT_DIR : DT_REG))
- goto out_unlocked;
- mutex_lock(&EXFAT_SB(sb)->s_lock);
+ (de.attr & EXFAT_ATTR_SUBDIR) ? DT_DIR : DT_REG))
+ goto out;
ctx->pos = cpos;
goto get_new;
@@ -294,9 +278,8 @@ end_of_dir:
if (!cpos && fake_offset)
cpos = ITER_POS_FILLED_DOTS;
ctx->pos = cpos;
-unlock:
mutex_unlock(&EXFAT_SB(sb)->s_lock);
-out_unlocked:
+out:
/*
* To improve performance, free namebuf after unlock sb_lock.
* If namebuf is not allocated, this function do nothing
@@ -305,10 +288,11 @@ out_unlocked:
return err;
}
+WRAP_DIR_ITER(exfat_iterate) // FIXME!
const struct file_operations exfat_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = exfat_iterate,
+ .iterate_shared = shared_exfat_iterate,
.unlocked_ioctl = exfat_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = exfat_compat_ioctl,
@@ -338,7 +322,7 @@ int exfat_calc_num_entries(struct exfat_uni_name *p_uniname)
return -EINVAL;
/* 1 file entry + 1 stream entry + name entries */
- return ((len - 1) / EXFAT_FILE_NAME_LEN + 3);
+ return ES_ENTRY_NUM(len);
}
unsigned int exfat_get_entry_type(struct exfat_dentry *ep)
@@ -357,7 +341,7 @@ unsigned int exfat_get_entry_type(struct exfat_dentry *ep)
if (ep->type == EXFAT_VOLUME)
return TYPE_VOLUME;
if (ep->type == EXFAT_FILE) {
- if (le16_to_cpu(ep->dentry.file.attr) & ATTR_SUBDIR)
+ if (le16_to_cpu(ep->dentry.file.attr) & EXFAT_ATTR_SUBDIR)
return TYPE_DIR;
return TYPE_FILE;
}
@@ -381,6 +365,12 @@ unsigned int exfat_get_entry_type(struct exfat_dentry *ep)
return TYPE_ACL;
return TYPE_CRITICAL_SEC;
}
+
+ if (ep->type == EXFAT_VENDOR_EXT)
+ return TYPE_VENDOR_EXT;
+ if (ep->type == EXFAT_VENDOR_ALLOC)
+ return TYPE_VENDOR_ALLOC;
+
return TYPE_BENIGN_SEC;
}
@@ -402,19 +392,22 @@ static void exfat_set_entry_type(struct exfat_dentry *ep, unsigned int type)
ep->type = EXFAT_VOLUME;
} else if (type == TYPE_DIR) {
ep->type = EXFAT_FILE;
- ep->dentry.file.attr = cpu_to_le16(ATTR_SUBDIR);
+ ep->dentry.file.attr = cpu_to_le16(EXFAT_ATTR_SUBDIR);
} else if (type == TYPE_FILE) {
ep->type = EXFAT_FILE;
- ep->dentry.file.attr = cpu_to_le16(ATTR_ARCHIVE);
+ ep->dentry.file.attr = cpu_to_le16(EXFAT_ATTR_ARCHIVE);
}
}
static void exfat_init_stream_entry(struct exfat_dentry *ep,
- unsigned char flags, unsigned int start_clu,
- unsigned long long size)
+ unsigned int start_clu, unsigned long long size)
{
+ memset(ep, 0, sizeof(*ep));
exfat_set_entry_type(ep, TYPE_STREAM);
- ep->dentry.stream.flags = flags;
+ if (size == 0)
+ ep->dentry.stream.flags = ALLOC_FAT_CHAIN;
+ else
+ ep->dentry.stream.flags = ALLOC_NO_FAT_CHAIN;
ep->dentry.stream.start_clu = cpu_to_le32(start_clu);
ep->dentry.stream.valid_size = cpu_to_le64(size);
ep->dentry.stream.size = cpu_to_le64(size);
@@ -438,177 +431,116 @@ static void exfat_init_name_entry(struct exfat_dentry *ep,
}
}
-int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
- int entry, unsigned int type, unsigned int start_clu,
- unsigned long long size)
+void exfat_init_dir_entry(struct exfat_entry_set_cache *es,
+ unsigned int type, unsigned int start_clu,
+ unsigned long long size, struct timespec64 *ts)
{
- struct super_block *sb = inode->i_sb;
+ struct super_block *sb = es->sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct timespec64 ts = current_time(inode);
- sector_t sector;
struct exfat_dentry *ep;
- struct buffer_head *bh;
-
- /*
- * We cannot use exfat_get_dentry_set here because file ep is not
- * initialized yet.
- */
- ep = exfat_get_dentry(sb, p_dir, entry, &bh, &sector);
- if (!ep)
- return -EIO;
+ ep = exfat_get_dentry_cached(es, ES_IDX_FILE);
+ memset(ep, 0, sizeof(*ep));
exfat_set_entry_type(ep, type);
- exfat_set_entry_time(sbi, &ts,
+ exfat_set_entry_time(sbi, ts,
&ep->dentry.file.create_tz,
&ep->dentry.file.create_time,
&ep->dentry.file.create_date,
&ep->dentry.file.create_time_cs);
- exfat_set_entry_time(sbi, &ts,
+ exfat_set_entry_time(sbi, ts,
&ep->dentry.file.modify_tz,
&ep->dentry.file.modify_time,
&ep->dentry.file.modify_date,
&ep->dentry.file.modify_time_cs);
- exfat_set_entry_time(sbi, &ts,
+ exfat_set_entry_time(sbi, ts,
&ep->dentry.file.access_tz,
&ep->dentry.file.access_time,
&ep->dentry.file.access_date,
NULL);
- exfat_update_bh(bh, IS_DIRSYNC(inode));
- brelse(bh);
-
- ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh, &sector);
- if (!ep)
- return -EIO;
-
- exfat_init_stream_entry(ep,
- (type == TYPE_FILE) ? ALLOC_FAT_CHAIN : ALLOC_NO_FAT_CHAIN,
- start_clu, size);
- exfat_update_bh(bh, IS_DIRSYNC(inode));
- brelse(bh);
-
- return 0;
+ ep = exfat_get_dentry_cached(es, ES_IDX_STREAM);
+ exfat_init_stream_entry(ep, start_clu, size);
}
-int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
- int entry)
+static void exfat_free_benign_secondary_clusters(struct inode *inode,
+ struct exfat_dentry *ep)
{
struct super_block *sb = inode->i_sb;
- int ret = 0;
- int i, num_entries;
- sector_t sector;
- u16 chksum;
- struct exfat_dentry *ep, *fep;
- struct buffer_head *fbh, *bh;
-
- fep = exfat_get_dentry(sb, p_dir, entry, &fbh, &sector);
- if (!fep)
- return -EIO;
+ struct exfat_chain dir;
+ unsigned int start_clu =
+ le32_to_cpu(ep->dentry.generic_secondary.start_clu);
+ u64 size = le64_to_cpu(ep->dentry.generic_secondary.size);
+ unsigned char flags = ep->dentry.generic_secondary.flags;
- num_entries = fep->dentry.file.num_ext + 1;
- chksum = exfat_calc_chksum16(fep, DENTRY_SIZE, 0, CS_DIR_ENTRY);
-
- for (i = 1; i < num_entries; i++) {
- ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, NULL);
- if (!ep) {
- ret = -EIO;
- goto release_fbh;
- }
- chksum = exfat_calc_chksum16(ep, DENTRY_SIZE, chksum,
- CS_DEFAULT);
- brelse(bh);
- }
+ if (!(flags & ALLOC_POSSIBLE) || !start_clu || !size)
+ return;
- fep->dentry.file.checksum = cpu_to_le16(chksum);
- exfat_update_bh(fbh, IS_DIRSYNC(inode));
-release_fbh:
- brelse(fbh);
- return ret;
+ exfat_chain_set(&dir, start_clu,
+ EXFAT_B_TO_CLU_ROUND_UP(size, EXFAT_SB(sb)),
+ flags);
+ exfat_free_cluster(inode, &dir);
}
-int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
- int entry, int num_entries, struct exfat_uni_name *p_uniname)
+void exfat_init_ext_entry(struct exfat_entry_set_cache *es, int num_entries,
+ struct exfat_uni_name *p_uniname)
{
- struct super_block *sb = inode->i_sb;
int i;
- sector_t sector;
unsigned short *uniname = p_uniname->name;
struct exfat_dentry *ep;
- struct buffer_head *bh;
- int sync = IS_DIRSYNC(inode);
-
- ep = exfat_get_dentry(sb, p_dir, entry, &bh, &sector);
- if (!ep)
- return -EIO;
+ ep = exfat_get_dentry_cached(es, ES_IDX_FILE);
ep->dentry.file.num_ext = (unsigned char)(num_entries - 1);
- exfat_update_bh(bh, sync);
- brelse(bh);
-
- ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh, &sector);
- if (!ep)
- return -EIO;
+ ep = exfat_get_dentry_cached(es, ES_IDX_STREAM);
ep->dentry.stream.name_len = p_uniname->name_len;
ep->dentry.stream.name_hash = cpu_to_le16(p_uniname->name_hash);
- exfat_update_bh(bh, sync);
- brelse(bh);
-
- for (i = EXFAT_FIRST_CLUSTER; i < num_entries; i++) {
- ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, &sector);
- if (!ep)
- return -EIO;
+ for (i = ES_IDX_FIRST_FILENAME; i < num_entries; i++) {
+ ep = exfat_get_dentry_cached(es, i);
exfat_init_name_entry(ep, uniname);
- exfat_update_bh(bh, sync);
- brelse(bh);
uniname += EXFAT_FILE_NAME_LEN;
}
- exfat_update_dir_chksum(inode, p_dir, entry);
- return 0;
+ exfat_update_dir_chksum(es);
}
-int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir,
- int entry, int order, int num_entries)
+void exfat_remove_entries(struct inode *inode, struct exfat_entry_set_cache *es,
+ int order)
{
- struct super_block *sb = inode->i_sb;
int i;
- sector_t sector;
struct exfat_dentry *ep;
- struct buffer_head *bh;
- for (i = order; i < num_entries; i++) {
- ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, &sector);
- if (!ep)
- return -EIO;
+ for (i = order; i < es->num_entries; i++) {
+ ep = exfat_get_dentry_cached(es, i);
+
+ if (exfat_get_entry_type(ep) & TYPE_BENIGN_SEC)
+ exfat_free_benign_secondary_clusters(inode, ep);
exfat_set_entry_type(ep, TYPE_DELETED);
- exfat_update_bh(bh, IS_DIRSYNC(inode));
- brelse(bh);
}
- return 0;
+ if (order < es->num_entries)
+ es->modified = true;
}
-void exfat_update_dir_chksum_with_entry_set(struct exfat_entry_set_cache *es)
+void exfat_update_dir_chksum(struct exfat_entry_set_cache *es)
{
int chksum_type = CS_DIR_ENTRY, i;
unsigned short chksum = 0;
struct exfat_dentry *ep;
- for (i = 0; i < es->num_entries; i++) {
+ for (i = ES_IDX_FILE; i < es->num_entries; i++) {
ep = exfat_get_dentry_cached(es, i);
chksum = exfat_calc_chksum16(ep, DENTRY_SIZE, chksum,
chksum_type);
chksum_type = CS_DEFAULT;
}
- ep = exfat_get_dentry_cached(es, 0);
+ ep = exfat_get_dentry_cached(es, ES_IDX_FILE);
ep->dentry.file.checksum = cpu_to_le16(chksum);
es->modified = true;
}
-int exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync)
+int exfat_put_dentry_set(struct exfat_entry_set_cache *es, int sync)
{
int i, err = 0;
@@ -620,7 +552,10 @@ int exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync)
bforget(es->bh[i]);
else
brelse(es->bh[i]);
- kfree(es);
+
+ if (IS_DYNAMIC_ES(es))
+ kfree(es->bh);
+
return err;
}
@@ -656,8 +591,8 @@ static int exfat_walk_fat_chain(struct super_block *sb,
return 0;
}
-int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir,
- int entry, sector_t *sector, int *offset)
+static int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir,
+ int entry, sector_t *sector, int *offset)
{
int ret;
unsigned int off, clu = 0;
@@ -669,6 +604,11 @@ int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir,
if (ret)
return ret;
+ if (!exfat_test_bitmap(sb, clu)) {
+ exfat_err(sb, "failed to test cluster bit(%u)", clu);
+ return -EIO;
+ }
+
/* byte offset in cluster */
off = EXFAT_CLU_OFFSET(off, sbi);
@@ -717,8 +657,7 @@ static int exfat_dir_readahead(struct super_block *sb, sector_t sec)
}
struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
- struct exfat_chain *p_dir, int entry, struct buffer_head **bh,
- sector_t *sector)
+ struct exfat_chain *p_dir, int entry, struct buffer_head **bh)
{
unsigned int dentries_per_page = EXFAT_B_TO_DEN(PAGE_SIZE);
int off;
@@ -740,17 +679,15 @@ struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
if (!*bh)
return NULL;
- if (sector)
- *sector = sec;
return (struct exfat_dentry *)((*bh)->b_data + off);
}
enum exfat_validate_dentry_mode {
- ES_MODE_STARTED,
ES_MODE_GET_FILE_ENTRY,
ES_MODE_GET_STRM_ENTRY,
ES_MODE_GET_NAME_ENTRY,
ES_MODE_GET_CRITICAL_SEC_ENTRY,
+ ES_MODE_GET_BENIGN_SEC_ENTRY,
};
static bool exfat_validate_entry(unsigned int type,
@@ -760,40 +697,32 @@ static bool exfat_validate_entry(unsigned int type,
return false;
switch (*mode) {
- case ES_MODE_STARTED:
- if (type != TYPE_FILE && type != TYPE_DIR)
- return false;
- *mode = ES_MODE_GET_FILE_ENTRY;
- return true;
case ES_MODE_GET_FILE_ENTRY:
if (type != TYPE_STREAM)
return false;
*mode = ES_MODE_GET_STRM_ENTRY;
- return true;
+ break;
case ES_MODE_GET_STRM_ENTRY:
if (type != TYPE_EXTEND)
return false;
*mode = ES_MODE_GET_NAME_ENTRY;
- return true;
+ break;
case ES_MODE_GET_NAME_ENTRY:
- if (type == TYPE_STREAM)
+ if (type & TYPE_BENIGN_SEC)
+ *mode = ES_MODE_GET_BENIGN_SEC_ENTRY;
+ else if (type != TYPE_EXTEND)
return false;
- if (type != TYPE_EXTEND) {
- if (!(type & TYPE_CRITICAL_SEC))
- return false;
- *mode = ES_MODE_GET_CRITICAL_SEC_ENTRY;
- }
- return true;
- case ES_MODE_GET_CRITICAL_SEC_ENTRY:
- if (type == TYPE_EXTEND || type == TYPE_STREAM)
+ break;
+ case ES_MODE_GET_BENIGN_SEC_ENTRY:
+ /* Assume unreconized benign secondary entry */
+ if (!(type & TYPE_BENIGN_SEC))
return false;
- if ((type & TYPE_CRITICAL_SEC) != TYPE_CRITICAL_SEC)
- return false;
- return true;
+ break;
default:
- WARN_ON_ONCE(1);
return false;
}
+
+ return true;
}
struct exfat_dentry *exfat_get_dentry_cached(
@@ -807,7 +736,7 @@ struct exfat_dentry *exfat_get_dentry_cached(
}
/*
- * Returns a set of dentries for a file or dir.
+ * Returns a set of dentries.
*
* Note It provides a direct pointer to bh->data via exfat_get_dentry_cached().
* User should call exfat_get_dentry_set() after setting 'modified' to apply
@@ -815,72 +744,79 @@ struct exfat_dentry *exfat_get_dentry_cached(
*
* in:
* sb+p_dir+entry: indicates a file/dir
- * type: specifies how many dentries should be included.
+ * num_entries: specifies how many dentries should be included.
+ * It will be set to es->num_entries if it is not 0.
+ * If num_entries is 0, es->num_entries will be obtained
+ * from the first dentry.
+ * out:
+ * es: pointer of entry set on success.
* return:
- * pointer of entry set on success,
- * NULL on failure.
+ * 0 on success
+ * -error code on failure
*/
-struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
- struct exfat_chain *p_dir, int entry, unsigned int type)
+static int __exfat_get_dentry_set(struct exfat_entry_set_cache *es,
+ struct super_block *sb, struct exfat_chain *p_dir, int entry,
+ unsigned int num_entries)
{
int ret, i, num_bh;
- unsigned int off, byte_offset, clu = 0;
+ unsigned int off;
sector_t sec;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct exfat_entry_set_cache *es;
- struct exfat_dentry *ep;
- int num_entries;
- enum exfat_validate_dentry_mode mode = ES_MODE_STARTED;
struct buffer_head *bh;
if (p_dir->dir == DIR_DELETED) {
exfat_err(sb, "access to deleted dentry");
- return NULL;
+ return -EIO;
}
- byte_offset = EXFAT_DEN_TO_B(entry);
- ret = exfat_walk_fat_chain(sb, p_dir, byte_offset, &clu);
+ ret = exfat_find_location(sb, p_dir, entry, &sec, &off);
if (ret)
- return NULL;
+ return ret;
- es = kzalloc(sizeof(*es), GFP_KERNEL);
- if (!es)
- return NULL;
+ memset(es, 0, sizeof(*es));
es->sb = sb;
es->modified = false;
-
- /* byte offset in cluster */
- byte_offset = EXFAT_CLU_OFFSET(byte_offset, sbi);
-
- /* byte offset in sector */
- off = EXFAT_BLK_OFFSET(byte_offset, sb);
es->start_off = off;
-
- /* sector offset in cluster */
- sec = EXFAT_B_TO_BLK(byte_offset, sb);
- sec += exfat_cluster_to_sector(sbi, clu);
+ es->bh = es->__bh;
bh = sb_bread(sb, sec);
if (!bh)
- goto free_es;
+ return -EIO;
es->bh[es->num_bh++] = bh;
- ep = exfat_get_dentry_cached(es, 0);
- if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode))
- goto free_es;
+ if (num_entries == ES_ALL_ENTRIES) {
+ struct exfat_dentry *ep;
+
+ ep = exfat_get_dentry_cached(es, ES_IDX_FILE);
+ if (ep->type != EXFAT_FILE) {
+ brelse(bh);
+ return -EIO;
+ }
+
+ num_entries = ep->dentry.file.num_ext + 1;
+ }
- num_entries = type == ES_ALL_ENTRIES ?
- ep->dentry.file.num_ext + 1 : type;
es->num_entries = num_entries;
num_bh = EXFAT_B_TO_BLK_ROUND_UP(off + num_entries * DENTRY_SIZE, sb);
+ if (num_bh > ARRAY_SIZE(es->__bh)) {
+ es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_NOFS);
+ if (!es->bh) {
+ brelse(bh);
+ return -ENOMEM;
+ }
+ es->bh[0] = bh;
+ }
+
for (i = 1; i < num_bh; i++) {
/* get the next sector */
if (exfat_is_last_sector_in_cluster(sbi, sec)) {
+ unsigned int clu = exfat_sector_to_cluster(sbi, sec);
+
if (p_dir->flags == ALLOC_NO_FAT_CHAIN)
clu++;
else if (exfat_get_next_cluster(sb, &clu))
- goto free_es;
+ goto put_es;
sec = exfat_cluster_to_sector(sbi, clu);
} else {
sec++;
@@ -888,21 +824,149 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
bh = sb_bread(sb, sec);
if (!bh)
- goto free_es;
+ goto put_es;
es->bh[es->num_bh++] = bh;
}
- /* validiate cached dentries */
- for (i = 1; i < num_entries; i++) {
+ return 0;
+
+put_es:
+ exfat_put_dentry_set(es, false);
+ return -EIO;
+}
+
+int exfat_get_dentry_set(struct exfat_entry_set_cache *es,
+ struct super_block *sb, struct exfat_chain *p_dir,
+ int entry, unsigned int num_entries)
+{
+ int ret, i;
+ struct exfat_dentry *ep;
+ enum exfat_validate_dentry_mode mode = ES_MODE_GET_FILE_ENTRY;
+
+ ret = __exfat_get_dentry_set(es, sb, p_dir, entry, num_entries);
+ if (ret < 0)
+ return ret;
+
+ /* validate cached dentries */
+ for (i = ES_IDX_STREAM; i < es->num_entries; i++) {
ep = exfat_get_dentry_cached(es, i);
if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode))
- goto free_es;
+ goto put_es;
+ }
+ return 0;
+
+put_es:
+ exfat_put_dentry_set(es, false);
+ return -EIO;
+}
+
+static int exfat_validate_empty_dentry_set(struct exfat_entry_set_cache *es)
+{
+ struct exfat_dentry *ep;
+ struct buffer_head *bh;
+ int i, off;
+ bool unused_hit = false;
+
+ /*
+ * ONLY UNUSED OR DELETED DENTRIES ARE ALLOWED:
+ * Although it violates the specification for a deleted entry to
+ * follow an unused entry, some exFAT implementations could work
+ * like this. Therefore, to improve compatibility, let's allow it.
+ */
+ for (i = 0; i < es->num_entries; i++) {
+ ep = exfat_get_dentry_cached(es, i);
+ if (ep->type == EXFAT_UNUSED) {
+ unused_hit = true;
+ } else if (!IS_EXFAT_DELETED(ep->type)) {
+ if (unused_hit)
+ goto err_used_follow_unused;
+ i++;
+ goto count_skip_entries;
+ }
}
- return es;
-free_es:
- exfat_free_dentry_set(es, false);
- return NULL;
+ return 0;
+
+err_used_follow_unused:
+ off = es->start_off + (i << DENTRY_SIZE_BITS);
+ bh = es->bh[EXFAT_B_TO_BLK(off, es->sb)];
+
+ exfat_fs_error(es->sb,
+ "in sector %lld, dentry %d should be unused, but 0x%x",
+ bh->b_blocknr, off >> DENTRY_SIZE_BITS, ep->type);
+
+ return -EIO;
+
+count_skip_entries:
+ es->num_entries = EXFAT_B_TO_DEN(EXFAT_BLK_TO_B(es->num_bh, es->sb) - es->start_off);
+ for (; i < es->num_entries; i++) {
+ ep = exfat_get_dentry_cached(es, i);
+ if (IS_EXFAT_DELETED(ep->type))
+ break;
+ }
+
+ return i;
+}
+
+/*
+ * Get an empty dentry set.
+ *
+ * in:
+ * sb+p_dir+entry: indicates the empty dentry location
+ * num_entries: specifies how many empty dentries should be included.
+ * out:
+ * es: pointer of empty dentry set on success.
+ * return:
+ * 0 : on success
+ * >0 : the dentries are not empty, the return value is the number of
+ * dentries to be skipped for the next lookup.
+ * <0 : on failure
+ */
+int exfat_get_empty_dentry_set(struct exfat_entry_set_cache *es,
+ struct super_block *sb, struct exfat_chain *p_dir,
+ int entry, unsigned int num_entries)
+{
+ int ret;
+
+ ret = __exfat_get_dentry_set(es, sb, p_dir, entry, num_entries);
+ if (ret < 0)
+ return ret;
+
+ ret = exfat_validate_empty_dentry_set(es);
+ if (ret)
+ exfat_put_dentry_set(es, false);
+
+ return ret;
+}
+
+static inline void exfat_reset_empty_hint(struct exfat_hint_femp *hint_femp)
+{
+ hint_femp->eidx = EXFAT_HINT_NONE;
+ hint_femp->count = 0;
+}
+
+static inline void exfat_set_empty_hint(struct exfat_inode_info *ei,
+ struct exfat_hint_femp *candi_empty, struct exfat_chain *clu,
+ int dentry, int num_entries, int entry_type)
+{
+ if (ei->hint_femp.eidx == EXFAT_HINT_NONE ||
+ ei->hint_femp.eidx > dentry) {
+ int total_entries = EXFAT_B_TO_DEN(i_size_read(&ei->vfs_inode));
+
+ if (candi_empty->count == 0) {
+ candi_empty->cur = *clu;
+ candi_empty->eidx = dentry;
+ }
+
+ if (entry_type == TYPE_UNUSED)
+ candi_empty->count += total_entries - dentry;
+ else
+ candi_empty->count++;
+
+ if (candi_empty->count == num_entries ||
+ candi_empty->count + candi_empty->eidx == total_entries)
+ ei->hint_femp = *candi_empty;
+ }
}
enum {
@@ -925,17 +989,22 @@ enum {
*/
int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname,
- int num_entries, unsigned int type, struct exfat_hint *hint_opt)
+ struct exfat_hint *hint_opt)
{
int i, rewind = 0, dentry = 0, end_eidx = 0, num_ext = 0, len;
int order, step, name_len = 0;
- int dentries_per_clu, num_empty = 0;
+ int dentries_per_clu;
unsigned int entry_type;
unsigned short *uniname = NULL;
struct exfat_chain clu;
struct exfat_hint *hint_stat = &ei->hint_stat;
struct exfat_hint_femp candi_empty;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ int num_entries = exfat_calc_num_entries(p_uniname);
+ unsigned int clu_count = 0;
+
+ if (num_entries < 0)
+ return num_entries;
dentries_per_clu = sbi->dentries_per_clu;
@@ -947,10 +1016,13 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
end_eidx = dentry;
}
- candi_empty.eidx = EXFAT_HINT_NONE;
+ exfat_reset_empty_hint(&ei->hint_femp);
+
rewind:
order = 0;
step = DIRENT_STEP_FILE;
+ exfat_reset_empty_hint(&candi_empty);
+
while (clu.dir != EXFAT_EOF_CLUSTER) {
i = dentry & (dentries_per_clu - 1);
for (; i < dentries_per_clu; i++, dentry++) {
@@ -960,7 +1032,7 @@ rewind:
if (rewind && dentry == end_eidx)
goto not_found;
- ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
@@ -970,26 +1042,9 @@ rewind:
entry_type == TYPE_DELETED) {
step = DIRENT_STEP_FILE;
- num_empty++;
- if (candi_empty.eidx == EXFAT_HINT_NONE &&
- num_empty == 1) {
- exfat_chain_set(&candi_empty.cur,
- clu.dir, clu.size, clu.flags);
- }
-
- if (candi_empty.eidx == EXFAT_HINT_NONE &&
- num_empty >= num_entries) {
- candi_empty.eidx =
- dentry - (num_empty - 1);
- WARN_ON(candi_empty.eidx < 0);
- candi_empty.count = num_empty;
-
- if (ei->hint_femp.eidx ==
- EXFAT_HINT_NONE ||
- candi_empty.eidx <=
- ei->hint_femp.eidx)
- ei->hint_femp = candi_empty;
- }
+ exfat_set_empty_hint(ei, &candi_empty, &clu,
+ dentry, num_entries,
+ entry_type);
brelse(bh);
if (entry_type == TYPE_UNUSED)
@@ -997,17 +1052,14 @@ rewind:
continue;
}
- num_empty = 0;
- candi_empty.eidx = EXFAT_HINT_NONE;
+ exfat_reset_empty_hint(&candi_empty);
if (entry_type == TYPE_FILE || entry_type == TYPE_DIR) {
step = DIRENT_STEP_FILE;
hint_opt->clu = clu.dir;
hint_opt->eidx = i;
- if (type == TYPE_ALL || type == entry_type) {
- num_ext = ep->dentry.file.num_ext;
- step = DIRENT_STEP_STRM;
- }
+ num_ext = ep->dentry.file.num_ext;
+ step = DIRENT_STEP_STRM;
brelse(bh);
continue;
}
@@ -1038,7 +1090,8 @@ rewind:
if (entry_type == TYPE_EXTEND) {
unsigned short entry_uniname[16], unichar;
- if (step != DIRENT_STEP_NAME) {
+ if (step != DIRENT_STEP_NAME ||
+ name_len >= MAX_NAME_LENGTH) {
step = DIRENT_STEP_FILE;
continue;
}
@@ -1086,6 +1139,10 @@ rewind:
} else {
if (exfat_get_next_cluster(sb, &clu.dir))
return -EIO;
+
+ /* break if the cluster chain includes a loop */
+ if (unlikely(++clu_count > EXFAT_DATA_CLUSTER_COUNT(sbi)))
+ goto not_found;
}
}
@@ -1098,12 +1155,19 @@ not_found:
rewind = 1;
dentry = 0;
clu.dir = p_dir->dir;
- /* reset empty hint */
- num_empty = 0;
- candi_empty.eidx = EXFAT_HINT_NONE;
goto rewind;
}
+ /*
+ * set the EXFAT_EOF_CLUSTER flag to avoid search
+ * from the beginning again when allocated a new cluster
+ */
+ if (ei->hint_femp.eidx == EXFAT_HINT_NONE) {
+ ei->hint_femp.cur.dir = EXFAT_EOF_CLUSTER;
+ ei->hint_femp.eidx = p_dir->size * dentries_per_clu;
+ ei->hint_femp.count = 0;
+ }
+
/* initialized hint_stat */
hint_stat->clu = p_dir->dir;
hint_stat->eidx = 0;
@@ -1136,34 +1200,12 @@ found:
return dentry - num_ext;
}
-int exfat_count_ext_entries(struct super_block *sb, struct exfat_chain *p_dir,
- int entry, struct exfat_dentry *ep)
-{
- int i, count = 0;
- unsigned int type;
- struct exfat_dentry *ext_ep;
- struct buffer_head *bh;
-
- for (i = 0, entry++; i < ep->dentry.file.num_ext; i++, entry++) {
- ext_ep = exfat_get_dentry(sb, p_dir, entry, &bh, NULL);
- if (!ext_ep)
- return -EIO;
-
- type = exfat_get_entry_type(ext_ep);
- brelse(bh);
- if (type == TYPE_EXTEND || type == TYPE_STREAM)
- count++;
- else
- break;
- }
- return count;
-}
-
int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir)
{
int i, count = 0;
int dentries_per_clu;
unsigned int entry_type;
+ unsigned int clu_count = 0;
struct exfat_chain clu;
struct exfat_dentry *ep;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -1175,7 +1217,7 @@ int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir)
while (clu.dir != EXFAT_EOF_CLUSTER) {
for (i = 0; i < dentries_per_clu; i++) {
- ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
entry_type = exfat_get_entry_type(ep);
@@ -1196,8 +1238,174 @@ int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir)
} else {
if (exfat_get_next_cluster(sb, &(clu.dir)))
return -EIO;
+
+ if (unlikely(++clu_count > sbi->used_clusters)) {
+ exfat_fs_error(sb, "FAT or bitmap is corrupted");
+ return -EIO;
+ }
+
}
}
return count;
}
+
+static int exfat_get_volume_label_dentry(struct super_block *sb,
+ struct exfat_entry_set_cache *es)
+{
+ int i;
+ int dentry = 0;
+ unsigned int type;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_hint_femp hint_femp;
+ struct exfat_inode_info *ei = EXFAT_I(sb->s_root->d_inode);
+ struct exfat_chain clu;
+ struct exfat_dentry *ep;
+ struct buffer_head *bh;
+
+ hint_femp.eidx = EXFAT_HINT_NONE;
+ exfat_chain_set(&clu, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
+
+ while (clu.dir != EXFAT_EOF_CLUSTER) {
+ for (i = 0; i < sbi->dentries_per_clu; i++, dentry++) {
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
+ if (!ep)
+ return -EIO;
+
+ type = exfat_get_entry_type(ep);
+ if (hint_femp.eidx == EXFAT_HINT_NONE) {
+ if (type == TYPE_DELETED || type == TYPE_UNUSED) {
+ hint_femp.cur = clu;
+ hint_femp.eidx = dentry;
+ hint_femp.count = 1;
+ }
+ }
+
+ if (type == TYPE_UNUSED) {
+ brelse(bh);
+ goto not_found;
+ }
+
+ if (type != TYPE_VOLUME) {
+ brelse(bh);
+ continue;
+ }
+
+ memset(es, 0, sizeof(*es));
+ es->sb = sb;
+ es->bh = es->__bh;
+ es->bh[0] = bh;
+ es->num_bh = 1;
+ es->start_off = EXFAT_DEN_TO_B(i) % sb->s_blocksize;
+
+ return 0;
+ }
+
+ if (exfat_get_next_cluster(sb, &(clu.dir)))
+ return -EIO;
+ }
+
+not_found:
+ if (hint_femp.eidx == EXFAT_HINT_NONE) {
+ hint_femp.cur.dir = EXFAT_EOF_CLUSTER;
+ hint_femp.eidx = dentry;
+ hint_femp.count = 0;
+ }
+
+ ei->hint_femp = hint_femp;
+
+ return -ENOENT;
+}
+
+int exfat_read_volume_label(struct super_block *sb, struct exfat_uni_name *label_out)
+{
+ int ret, i;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_entry_set_cache es;
+ struct exfat_dentry *ep;
+
+ mutex_lock(&sbi->s_lock);
+
+ memset(label_out, 0, sizeof(*label_out));
+ ret = exfat_get_volume_label_dentry(sb, &es);
+ if (ret < 0) {
+ /*
+ * ENOENT signifies that a volume label dentry doesn't exist
+ * We will treat this as an empty volume label and not fail.
+ */
+ if (ret == -ENOENT)
+ ret = 0;
+
+ goto unlock;
+ }
+
+ ep = exfat_get_dentry_cached(&es, 0);
+ label_out->name_len = ep->dentry.volume_label.char_count;
+ if (label_out->name_len > EXFAT_VOLUME_LABEL_LEN) {
+ ret = -EIO;
+ exfat_put_dentry_set(&es, false);
+ goto unlock;
+ }
+
+ for (i = 0; i < label_out->name_len; i++)
+ label_out->name[i] = le16_to_cpu(ep->dentry.volume_label.volume_label[i]);
+
+ exfat_put_dentry_set(&es, false);
+unlock:
+ mutex_unlock(&sbi->s_lock);
+ return ret;
+}
+
+int exfat_write_volume_label(struct super_block *sb,
+ struct exfat_uni_name *label)
+{
+ int ret, i;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct inode *root_inode = sb->s_root->d_inode;
+ struct exfat_entry_set_cache es;
+ struct exfat_chain clu;
+ struct exfat_dentry *ep;
+
+ if (label->name_len > EXFAT_VOLUME_LABEL_LEN)
+ return -EINVAL;
+
+ mutex_lock(&sbi->s_lock);
+
+ ret = exfat_get_volume_label_dentry(sb, &es);
+ if (ret == -ENOENT) {
+ if (label->name_len == 0) {
+ /* No volume label dentry, no need to clear */
+ ret = 0;
+ goto unlock;
+ }
+
+ ret = exfat_find_empty_entry(root_inode, &clu, 1, &es);
+ }
+
+ if (ret < 0)
+ goto unlock;
+
+ ep = exfat_get_dentry_cached(&es, 0);
+
+ if (label->name_len == 0 && ep->dentry.volume_label.char_count == 0) {
+ /* volume label had been cleared */
+ exfat_put_dentry_set(&es, 0);
+ goto unlock;
+ }
+
+ memset(ep, 0, sizeof(*ep));
+ ep->type = EXFAT_VOLUME;
+
+ for (i = 0; i < label->name_len; i++)
+ ep->dentry.volume_label.volume_label[i] =
+ cpu_to_le16(label->name[i]);
+
+ ep->dentry.volume_label.char_count = label->name_len;
+ es.modified = true;
+
+ ret = exfat_put_dentry_set(&es, IS_DIRSYNC(root_inode));
+
+unlock:
+ mutex_unlock(&sbi->s_lock);
+ return ret;
+}
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index 1d6da61157c9..176fef62574c 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -9,12 +9,11 @@
#include <linux/fs.h>
#include <linux/ratelimit.h>
#include <linux/nls.h>
+#include <linux/blkdev.h>
+#include <uapi/linux/exfat.h>
-#define EXFAT_SUPER_MAGIC 0x2011BAB0UL
#define EXFAT_ROOT_INO 1
-#define EXFAT_CLUSTERS_UNTRACKED (~0u)
-
/*
* exfat error flags
*/
@@ -28,9 +27,8 @@ enum exfat_error_mode {
* exfat nls lossy flag
*/
enum {
- NLS_NAME_NO_LOSSY, /* no lossy */
- NLS_NAME_LOSSY, /* just detected incorrect filename(s) */
- NLS_NAME_OVERLEN, /* the length is over than its limit */
+ NLS_NAME_NO_LOSSY = 0, /* no lossy */
+ NLS_NAME_LOSSY = 1 << 0, /* just detected incorrect filename(s) */
};
#define EXFAT_HASH_BITS 8
@@ -42,7 +40,15 @@ enum {
#define ES_2_ENTRIES 2
#define ES_ALL_ENTRIES 0
-#define DIR_DELETED 0xFFFF0321
+#define ES_IDX_FILE 0
+#define ES_IDX_STREAM 1
+#define ES_IDX_FIRST_FILENAME 2
+#define EXFAT_FILENAME_ENTRY_NUM(name_len) \
+ DIV_ROUND_UP(name_len, EXFAT_FILE_NAME_LEN)
+#define ES_IDX_LAST_FILENAME(name_len) \
+ (ES_IDX_FIRST_FILENAME + EXFAT_FILENAME_ENTRY_NUM(name_len) - 1)
+
+#define DIR_DELETED 0xFFFFFFF7
/* type values */
#define TYPE_UNUSED 0x0000
@@ -63,15 +69,13 @@ enum {
#define TYPE_PADDING 0x0402
#define TYPE_ACLTAB 0x0403
#define TYPE_BENIGN_SEC 0x0800
-#define TYPE_ALL 0x0FFF
+#define TYPE_VENDOR_EXT 0x0801
+#define TYPE_VENDOR_ALLOC 0x0802
#define MAX_CHARSET_SIZE 6 /* max size of multi-byte character */
#define MAX_NAME_LENGTH 255 /* max len of file name excluding NULL */
#define MAX_VFSNAME_BUF_SIZE ((MAX_NAME_LENGTH + 1) * MAX_CHARSET_SIZE)
-/* Enough size to hold 256 dentry (even 512 Byte sector) */
-#define DIR_CACHE_SIZE (256*sizeof(struct exfat_dentry)/512+1)
-
#define EXFAT_HINT_NONE -1
#define EXFAT_MIN_SUBDIR 2
@@ -96,12 +100,18 @@ enum {
/*
* helpers for block size to dentry size conversion.
*/
-#define EXFAT_B_TO_DEN_IDX(b, sbi) \
- ((b) << ((sbi)->cluster_size_bits - DENTRY_SIZE_BITS))
#define EXFAT_B_TO_DEN(b) ((b) >> DENTRY_SIZE_BITS)
#define EXFAT_DEN_TO_B(b) ((b) << DENTRY_SIZE_BITS)
/*
+ * helpers for cluster size to dentry size conversion.
+ */
+#define EXFAT_CLU_TO_DEN(clu, sbi) \
+ ((clu) << ((sbi)->cluster_size_bits - DENTRY_SIZE_BITS))
+#define EXFAT_DEN_TO_CLU(dentry, sbi) \
+ ((dentry) >> ((sbi)->cluster_size_bits - DENTRY_SIZE_BITS))
+
+/*
* helpers for fat entry.
*/
#define FAT_ENT_SIZE (4)
@@ -123,8 +133,21 @@ enum {
#define BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent) (ent & BITS_PER_SECTOR_MASK(sb))
#define BITMAP_OFFSET_BYTE_IN_SECTOR(sb, ent) \
((ent / BITS_PER_BYTE) & ((sb)->s_blocksize - 1))
-#define BITS_PER_BYTE_MASK 0x7
-#define IGNORED_BITS_REMAINED(clu, clu_base) ((1 << ((clu) - (clu_base))) - 1)
+#define IGNORED_BITS_REMAINED(clu, clu_base) ((1UL << ((clu) - (clu_base))) - 1)
+
+#define ES_ENTRY_NUM(name_len) (ES_IDX_LAST_FILENAME(name_len) + 1)
+/* 19 entries = 1 file entry + 1 stream entry + 17 filename entries */
+#define ES_MAX_ENTRY_NUM ES_ENTRY_NUM(MAX_NAME_LENGTH)
+
+/*
+ * 19 entries x 32 bytes/entry = 608 bytes.
+ * The 608 bytes are in 3 sectors at most (even 512 Byte sector).
+ */
+#define DIR_CACHE_SIZE \
+ (DIV_ROUND_UP(EXFAT_DEN_TO_B(ES_MAX_ENTRY_NUM), SECTOR_SIZE) + 1)
+
+/* Superblock flags */
+#define EXFAT_FLAGS_SHUTDOWN 1
struct exfat_dentry_namebuf {
char *lfn;
@@ -167,21 +190,27 @@ struct exfat_hint {
struct exfat_entry_set_cache {
struct super_block *sb;
- bool modified;
unsigned int start_off;
int num_bh;
- struct buffer_head *bh[DIR_CACHE_SIZE];
+ struct buffer_head *__bh[DIR_CACHE_SIZE];
+ struct buffer_head **bh;
unsigned int num_entries;
+ bool modified;
};
+#define IS_DYNAMIC_ES(es) ((es)->__bh != (es)->bh)
+
struct exfat_dir_entry {
+ /* the cluster where file dentry is located */
struct exfat_chain dir;
+ /* the index of file dentry in ->dir */
int entry;
unsigned int type;
unsigned int start_clu;
unsigned char flags;
unsigned short attr;
loff_t size;
+ loff_t valid_size;
unsigned int num_subdirs;
struct timespec64 atime;
struct timespec64 mtime;
@@ -204,8 +233,12 @@ struct exfat_mount_options {
/* on error: continue, panic, remount-ro */
enum exfat_error_mode errors;
unsigned utf8:1, /* Use of UTF-8 character set */
- discard:1; /* Issue discard requests on deletions */
+ sys_tz:1, /* Use local timezone */
+ discard:1, /* Issue discard requests on deletions */
+ keep_last_dots:1; /* Keep trailing periods in paths */
int time_offset; /* Offset of timestamps from UTC (in minutes) */
+ /* Support creating zero-size directory, default: false */
+ bool zero_size_dir;
};
/*
@@ -237,6 +270,8 @@ struct exfat_sb_info {
unsigned int clu_srch_ptr; /* cluster search pointer */
unsigned int used_clusters; /* number of used clusters */
+ unsigned long s_exfat_flags; /* Exfat superblock flags */
+
struct mutex s_lock; /* superblock lock */
struct mutex bitmap_lock; /* bitmap lock */
struct exfat_mount_options options;
@@ -245,7 +280,6 @@ struct exfat_sb_info {
spinlock_t inode_hash_lock;
struct hlist_head inode_hashtable[EXFAT_HASH_SIZE];
-
struct rcu_head rcu;
};
@@ -255,7 +289,9 @@ struct exfat_sb_info {
* EXFAT file system inode in-memory data
*/
struct exfat_inode_info {
+ /* the cluster where file dentry is located */
struct exfat_chain dir;
+ /* the index of file dentry in ->dir */
int entry;
unsigned int type;
unsigned short attr;
@@ -280,15 +316,9 @@ struct exfat_inode_info {
/* for avoiding the race between alloc and free */
unsigned int cache_valid_id;
- /*
- * NOTE: i_size_ondisk is 64bits, so must hold ->inode_lock to access.
- * physically allocated size.
- */
- loff_t i_size_ondisk;
- /* block-aligned i_size (used in cont_write_begin) */
- loff_t i_size_aligned;
/* on-disk position of directory entry or 0 */
loff_t i_pos;
+ loff_t valid_size;
/* hash by i_location */
struct hlist_node i_hash_fat;
/* protect bmap against truncate */
@@ -308,6 +338,11 @@ static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
return container_of(inode, struct exfat_inode_info, vfs_inode);
}
+static inline int exfat_forced_shutdown(struct super_block *sb)
+{
+ return test_bit(EXFAT_FLAGS_SHUTDOWN, &EXFAT_SB(sb)->s_exfat_flags);
+}
+
/*
* If ->i_mode can't hold 0222 (i.e. ATTR_RO), we use ->i_attrs to
* save ATTR_RO instead of ->i_mode.
@@ -331,10 +366,10 @@ static inline int exfat_mode_can_hold_ro(struct inode *inode)
static inline mode_t exfat_make_mode(struct exfat_sb_info *sbi,
unsigned short attr, mode_t mode)
{
- if ((attr & ATTR_READONLY) && !(attr & ATTR_SUBDIR))
+ if ((attr & EXFAT_ATTR_READONLY) && !(attr & EXFAT_ATTR_SUBDIR))
mode &= ~0222;
- if (attr & ATTR_SUBDIR)
+ if (attr & EXFAT_ATTR_SUBDIR)
return (mode & ~sbi->options.fs_dmask) | S_IFDIR;
return (mode & ~sbi->options.fs_fmask) | S_IFREG;
@@ -346,18 +381,18 @@ static inline unsigned short exfat_make_attr(struct inode *inode)
unsigned short attr = EXFAT_I(inode)->attr;
if (S_ISDIR(inode->i_mode))
- attr |= ATTR_SUBDIR;
+ attr |= EXFAT_ATTR_SUBDIR;
if (exfat_mode_can_hold_ro(inode) && !(inode->i_mode & 0222))
- attr |= ATTR_READONLY;
+ attr |= EXFAT_ATTR_READONLY;
return attr;
}
static inline void exfat_save_attr(struct inode *inode, unsigned short attr)
{
if (exfat_mode_can_hold_ro(inode))
- EXFAT_I(inode)->attr = attr & (ATTR_RWMASK | ATTR_READONLY);
+ EXFAT_I(inode)->attr = attr & (EXFAT_ATTR_RWMASK | EXFAT_ATTR_READONLY);
else
- EXFAT_I(inode)->attr = attr & ATTR_RWMASK;
+ EXFAT_I(inode)->attr = attr & EXFAT_ATTR_RWMASK;
}
static inline bool exfat_is_last_sector_in_cluster(struct exfat_sb_info *sbi,
@@ -374,13 +409,24 @@ static inline sector_t exfat_cluster_to_sector(struct exfat_sb_info *sbi,
sbi->data_start_sector;
}
-static inline int exfat_sector_to_cluster(struct exfat_sb_info *sbi,
+static inline unsigned int exfat_sector_to_cluster(struct exfat_sb_info *sbi,
sector_t sec)
{
return ((sec - sbi->data_start_sector) >> sbi->sect_per_clus_bits) +
EXFAT_RESERVED_CLUSTERS;
}
+static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
+ unsigned int clus)
+{
+ return clus >= EXFAT_FIRST_CLUSTER && clus < sbi->num_clusters;
+}
+
+static inline loff_t exfat_ondisk_size(const struct inode *inode)
+{
+ return ((loff_t)inode->i_blocks) << 9;
+}
+
/* super.c */
int exfat_set_volume_dirty(struct super_block *sb);
int exfat_clear_volume_dirty(struct super_block *sb);
@@ -395,8 +441,6 @@ int exfat_ent_get(struct super_block *sb, unsigned int loc,
unsigned int *content);
int exfat_ent_set(struct super_block *sb, unsigned int loc,
unsigned int content);
-int exfat_count_ext_entries(struct super_block *sb, struct exfat_chain *p_dir,
- int entry, struct exfat_dentry *p_entry);
int exfat_chain_cont_cluster(struct super_block *sb, unsigned int chain,
unsigned int len);
int exfat_zeroed_cluster(struct inode *dir, unsigned int clu);
@@ -408,29 +452,34 @@ int exfat_count_num_clusters(struct super_block *sb,
/* balloc.c */
int exfat_load_bitmap(struct super_block *sb);
void exfat_free_bitmap(struct exfat_sb_info *sbi);
-int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync);
-void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync);
+int exfat_set_bitmap(struct super_block *sb, unsigned int clu, bool sync);
+int exfat_clear_bitmap(struct super_block *sb, unsigned int clu, bool sync);
+bool exfat_test_bitmap(struct super_block *sb, unsigned int clu);
unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu);
int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count);
int exfat_trim_fs(struct inode *inode, struct fstrim_range *range);
/* file.c */
extern const struct file_operations exfat_file_operations;
-int __exfat_truncate(struct inode *inode, loff_t new_size);
-void exfat_truncate(struct inode *inode, loff_t size);
-int exfat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int __exfat_truncate(struct inode *inode);
+void exfat_truncate(struct inode *inode);
+int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr);
-int exfat_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int exfat_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, unsigned int request_mask,
unsigned int query_flags);
int exfat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long exfat_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+int exfat_force_shutdown(struct super_block *sb, u32 flags);
/* namei.c */
extern const struct dentry_operations exfat_dentry_ops;
extern const struct dentry_operations exfat_utf8_dentry_ops;
+int exfat_find_empty_entry(struct inode *inode,
+ struct exfat_chain *p_dir, int num_entries,
+ struct exfat_entry_set_cache *es);
/* cache.c */
int exfat_cache_init(void);
@@ -444,32 +493,37 @@ int exfat_get_cluster(struct inode *inode, unsigned int cluster,
extern const struct inode_operations exfat_dir_inode_operations;
extern const struct file_operations exfat_dir_operations;
unsigned int exfat_get_entry_type(struct exfat_dentry *p_entry);
-int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
- int entry, unsigned int type, unsigned int start_clu,
- unsigned long long size);
-int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
- int entry, int num_entries, struct exfat_uni_name *p_uniname);
-int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir,
- int entry, int order, int num_entries);
-int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
- int entry);
-void exfat_update_dir_chksum_with_entry_set(struct exfat_entry_set_cache *es);
+void exfat_init_dir_entry(struct exfat_entry_set_cache *es,
+ unsigned int type, unsigned int start_clu,
+ unsigned long long size, struct timespec64 *ts);
+void exfat_init_ext_entry(struct exfat_entry_set_cache *es, int num_entries,
+ struct exfat_uni_name *p_uniname);
+void exfat_remove_entries(struct inode *inode, struct exfat_entry_set_cache *es,
+ int order);
+void exfat_update_dir_chksum(struct exfat_entry_set_cache *es);
int exfat_calc_num_entries(struct exfat_uni_name *p_uniname);
int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname,
- int num_entries, unsigned int type, struct exfat_hint *hint_opt);
+ struct exfat_hint *hint_opt);
int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu);
-int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir,
- int entry, sector_t *sector, int *offset);
struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
- struct exfat_chain *p_dir, int entry, struct buffer_head **bh,
- sector_t *sector);
+ struct exfat_chain *p_dir, int entry, struct buffer_head **bh);
struct exfat_dentry *exfat_get_dentry_cached(struct exfat_entry_set_cache *es,
int num);
-struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
- struct exfat_chain *p_dir, int entry, unsigned int type);
-int exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync);
+int exfat_get_dentry_set(struct exfat_entry_set_cache *es,
+ struct super_block *sb, struct exfat_chain *p_dir, int entry,
+ unsigned int num_entries);
+#define exfat_get_dentry_set_by_ei(es, sb, ei) \
+ exfat_get_dentry_set(es, sb, &(ei)->dir, (ei)->entry, ES_ALL_ENTRIES)
+int exfat_get_empty_dentry_set(struct exfat_entry_set_cache *es,
+ struct super_block *sb, struct exfat_chain *p_dir, int entry,
+ unsigned int num_entries);
+int exfat_put_dentry_set(struct exfat_entry_set_cache *es, int sync);
int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir);
+int exfat_read_volume_label(struct super_block *sb,
+ struct exfat_uni_name *label_out);
+int exfat_write_volume_label(struct super_block *sb,
+ struct exfat_uni_name *label);
/* inode.c */
extern const struct inode_operations exfat_file_inode_operations;
@@ -479,6 +533,7 @@ struct inode *exfat_build_inode(struct super_block *sb,
void exfat_hash_inode(struct inode *inode, loff_t i_pos);
void exfat_unhash_inode(struct inode *inode);
struct inode *exfat_iget(struct super_block *sb, loff_t i_pos);
+int __exfat_write_inode(struct inode *inode, int sync);
int exfat_write_inode(struct inode *inode, struct writeback_control *wbc);
void exfat_evict_inode(struct inode *inode);
int exfat_block_truncate_page(struct inode *inode, loff_t from);
@@ -504,18 +559,21 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
#define exfat_fs_error_ratelimit(sb, fmt, args...) \
__exfat_fs_error(sb, __ratelimit(&EXFAT_SB(sb)->ratelimit), \
fmt, ## args)
-void exfat_msg(struct super_block *sb, const char *lv, const char *fmt, ...)
- __printf(3, 4) __cold;
+
+/* expand to pr_*() with prefix */
#define exfat_err(sb, fmt, ...) \
- exfat_msg(sb, KERN_ERR, fmt, ##__VA_ARGS__)
+ pr_err("exFAT-fs (%s): " fmt "\n", (sb)->s_id, ##__VA_ARGS__)
#define exfat_warn(sb, fmt, ...) \
- exfat_msg(sb, KERN_WARNING, fmt, ##__VA_ARGS__)
+ pr_warn("exFAT-fs (%s): " fmt "\n", (sb)->s_id, ##__VA_ARGS__)
#define exfat_info(sb, fmt, ...) \
- exfat_msg(sb, KERN_INFO, fmt, ##__VA_ARGS__)
+ pr_info("exFAT-fs (%s): " fmt "\n", (sb)->s_id, ##__VA_ARGS__)
+#define exfat_debug(sb, fmt, ...) \
+ pr_debug("exFAT-fs (%s): " fmt "\n", (sb)->s_id, ##__VA_ARGS__)
void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
u8 tz, __le16 time, __le16 date, u8 time_cs);
void exfat_truncate_atime(struct timespec64 *ts);
+void exfat_truncate_inode_atime(struct inode *inode);
void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
u8 *tz, __le16 *time, __le16 *date, u8 *time_cs);
u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type);
diff --git a/fs/exfat/exfat_raw.h b/fs/exfat/exfat_raw.h
index 7f39b1c6469c..4082fa7b8c14 100644
--- a/fs/exfat/exfat_raw.h
+++ b/fs/exfat/exfat_raw.h
@@ -27,6 +27,7 @@
((sbi)->num_clusters - EXFAT_RESERVED_CLUSTERS)
/* AllocationPossible and NoFatChain field in GeneralSecondaryFlags Field */
+#define ALLOC_POSSIBLE 0x01
#define ALLOC_FAT_CHAIN 0x01
#define ALLOC_NO_FAT_CHAIN 0x03
@@ -50,6 +51,8 @@
#define EXFAT_STREAM 0xC0 /* stream entry */
#define EXFAT_NAME 0xC1 /* file name entry */
#define EXFAT_ACL 0xC2 /* stream entry */
+#define EXFAT_VENDOR_EXT 0xE0 /* vendor extension entry */
+#define EXFAT_VENDOR_ALLOC 0xE1 /* vendor allocation entry */
#define IS_EXFAT_CRITICAL_PRI(x) (x < 0xA0)
#define IS_EXFAT_BENIGN_PRI(x) (x < 0xC0)
@@ -61,21 +64,23 @@
#define CS_DEFAULT 2
/* file attributes */
-#define ATTR_READONLY 0x0001
-#define ATTR_HIDDEN 0x0002
-#define ATTR_SYSTEM 0x0004
-#define ATTR_VOLUME 0x0008
-#define ATTR_SUBDIR 0x0010
-#define ATTR_ARCHIVE 0x0020
+#define EXFAT_ATTR_READONLY 0x0001
+#define EXFAT_ATTR_HIDDEN 0x0002
+#define EXFAT_ATTR_SYSTEM 0x0004
+#define EXFAT_ATTR_VOLUME 0x0008
+#define EXFAT_ATTR_SUBDIR 0x0010
+#define EXFAT_ATTR_ARCHIVE 0x0020
-#define ATTR_RWMASK (ATTR_HIDDEN | ATTR_SYSTEM | ATTR_VOLUME | \
- ATTR_SUBDIR | ATTR_ARCHIVE)
+#define EXFAT_ATTR_RWMASK (EXFAT_ATTR_HIDDEN | EXFAT_ATTR_SYSTEM | \
+ EXFAT_ATTR_VOLUME | EXFAT_ATTR_SUBDIR | \
+ EXFAT_ATTR_ARCHIVE)
#define BOOTSEC_JUMP_BOOT_LEN 3
#define BOOTSEC_FS_NAME_LEN 8
#define BOOTSEC_OLDBPB_LEN 53
#define EXFAT_FILE_NAME_LEN 15
+#define EXFAT_VOLUME_LABEL_LEN 11
#define EXFAT_MIN_SECT_SIZE_BITS 9
#define EXFAT_MAX_SECT_SIZE_BITS 12
@@ -155,6 +160,29 @@ struct exfat_dentry {
__le32 start_clu;
__le64 size;
} __packed upcase; /* up-case table directory entry */
+ struct {
+ __u8 char_count;
+ __le16 volume_label[EXFAT_VOLUME_LABEL_LEN];
+ __u8 reserved[8];
+ } __packed volume_label; /* volume label directory entry */
+ struct {
+ __u8 flags;
+ __u8 vendor_guid[16];
+ __u8 vendor_defined[14];
+ } __packed vendor_ext; /* vendor extension directory entry */
+ struct {
+ __u8 flags;
+ __u8 vendor_guid[16];
+ __u8 vendor_defined[2];
+ __le32 start_clu;
+ __le64 size;
+ } __packed vendor_alloc; /* vendor allocation directory entry */
+ struct {
+ __u8 flags;
+ __u8 custom_defined[18];
+ __le32 start_clu;
+ __le64 size;
+ } __packed generic_secondary; /* generic secondary directory entry */
} __packed dentry;
} __packed;
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
index e949e563443c..c9c5f2e3a05e 100644
--- a/fs/exfat/fatent.c
+++ b/fs/exfat/fatent.c
@@ -4,8 +4,9 @@
*/
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
#include "exfat_raw.h"
#include "exfat_fs.h"
@@ -81,14 +82,6 @@ int exfat_ent_set(struct super_block *sb, unsigned int loc,
return 0;
}
-static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
- unsigned int clus)
-{
- if (clus < EXFAT_FIRST_CLUSTER || sbi->num_clusters <= clus)
- return false;
- return true;
-}
-
int exfat_ent_get(struct super_block *sb, unsigned int loc,
unsigned int *content)
{
@@ -96,35 +89,36 @@ int exfat_ent_get(struct super_block *sb, unsigned int loc,
int err;
if (!is_valid_cluster(sbi, loc)) {
- exfat_fs_error(sb, "invalid access to FAT (entry 0x%08x)",
+ exfat_fs_error_ratelimit(sb,
+ "invalid access to FAT (entry 0x%08x)",
loc);
return -EIO;
}
err = __exfat_ent_get(sb, loc, content);
if (err) {
- exfat_fs_error(sb,
+ exfat_fs_error_ratelimit(sb,
"failed to access to FAT (entry 0x%08x, err:%d)",
loc, err);
return err;
}
if (*content == EXFAT_FREE_CLUSTER) {
- exfat_fs_error(sb,
+ exfat_fs_error_ratelimit(sb,
"invalid access to FAT free cluster (entry 0x%08x)",
loc);
return -EIO;
}
if (*content == EXFAT_BAD_CLUSTER) {
- exfat_fs_error(sb,
+ exfat_fs_error_ratelimit(sb,
"invalid access to FAT bad cluster (entry 0x%08x)",
loc);
return -EIO;
}
if (*content != EXFAT_EOF_CLUSTER && !is_valid_cluster(sbi, *content)) {
- exfat_fs_error(sb,
+ exfat_fs_error_ratelimit(sb,
"invalid access to FAT (entry 0x%08x) bogus content (0x%08x)",
loc, *content);
return -EIO;
@@ -151,6 +145,20 @@ int exfat_chain_cont_cluster(struct super_block *sb, unsigned int chain,
return 0;
}
+static inline void exfat_discard_cluster(struct super_block *sb,
+ unsigned int clu, unsigned int num_clusters)
+{
+ int ret;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ ret = sb_issue_discard(sb, exfat_cluster_to_sector(sbi, clu),
+ sbi->sect_per_clus * num_clusters, GFP_NOFS, 0);
+ if (ret == -EOPNOTSUPP) {
+ exfat_err(sb, "discard not supported by device, disabling");
+ sbi->options.discard = 0;
+ }
+}
+
/* This function must be called with bitmap_lock held */
static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain)
{
@@ -182,6 +190,7 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(clu));
if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
+ int err;
unsigned int last_cluster = p_chain->dir + p_chain->size - 1;
do {
bool sync = false;
@@ -196,11 +205,18 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
cur_cmap_i = next_cmap_i;
}
- exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)));
+ err = exfat_clear_bitmap(sb, clu, (sync && IS_DIRSYNC(inode)));
+ if (err)
+ break;
clu++;
num_clusters++;
} while (num_clusters < p_chain->size);
+
+ if (sbi->options.discard)
+ exfat_discard_cluster(sb, p_chain->dir, p_chain->size);
} else {
+ unsigned int nr_clu = 1;
+
do {
bool sync = false;
unsigned int n_clu = clu;
@@ -217,16 +233,36 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
cur_cmap_i = next_cmap_i;
}
- exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)));
+ if (exfat_clear_bitmap(sb, clu, (sync && IS_DIRSYNC(inode))))
+ break;
+
+ if (sbi->options.discard) {
+ if (n_clu == clu + 1)
+ nr_clu++;
+ else {
+ exfat_discard_cluster(sb, clu - nr_clu + 1, nr_clu);
+ nr_clu = 1;
+ }
+ }
+
clu = n_clu;
num_clusters++;
if (err)
- goto dec_used_clus;
+ break;
+
+ if (num_clusters >= sbi->num_clusters - EXFAT_FIRST_CLUSTER) {
+ /*
+ * The cluster chain includes a loop, scan the
+ * bitmap to get the number of used clusters.
+ */
+ exfat_count_used_clusters(sb, &sbi->used_clusters);
+
+ return 0;
+ }
} while (clu != EXFAT_EOF_CLUSTER);
}
-dec_used_clus:
sbi->used_clusters -= num_clusters;
return 0;
}
@@ -259,7 +295,7 @@ int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain,
clu = next;
if (exfat_ent_get(sb, clu, &next))
return -EIO;
- } while (next != EXFAT_EOF_CLUSTER);
+ } while (next != EXFAT_EOF_CLUSTER && count <= p_chain->size);
if (p_chain->size != count) {
exfat_fs_error(sb,
@@ -276,10 +312,8 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
{
struct super_block *sb = dir->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct buffer_head *bhs[MAX_BUF_PER_PAGE];
- int nr_bhs = MAX_BUF_PER_PAGE;
- sector_t blknr, last_blknr;
- int err, i, n;
+ struct buffer_head *bh;
+ sector_t blknr, last_blknr, i;
blknr = exfat_cluster_to_sector(sbi, clu);
last_blknr = blknr + sbi->sect_per_clus;
@@ -293,37 +327,30 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
}
/* Zeroing the unused blocks on this cluster */
- while (blknr < last_blknr) {
- for (n = 0; n < nr_bhs && blknr < last_blknr; n++, blknr++) {
- bhs[n] = sb_getblk(sb, blknr);
- if (!bhs[n]) {
- err = -ENOMEM;
- goto release_bhs;
- }
- memset(bhs[n]->b_data, 0, sb->s_blocksize);
- }
-
- err = exfat_update_bhs(bhs, n, IS_DIRSYNC(dir));
- if (err)
- goto release_bhs;
+ for (i = blknr; i < last_blknr; i++) {
+ bh = sb_getblk(sb, i);
+ if (!bh)
+ return -ENOMEM;
- for (i = 0; i < n; i++)
- brelse(bhs[i]);
+ memset(bh->b_data, 0, sb->s_blocksize);
+ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+ brelse(bh);
}
- return 0;
-release_bhs:
- exfat_err(sb, "failed zeroed sect %llu\n", (unsigned long long)blknr);
- for (i = 0; i < n; i++)
- bforget(bhs[i]);
- return err;
+ if (IS_DIRSYNC(dir))
+ return sync_blockdev_range(sb->s_bdev,
+ EXFAT_BLK_TO_B(blknr, sb),
+ EXFAT_BLK_TO_B(last_blknr, sb) - 1);
+
+ return 0;
}
int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
struct exfat_chain *p_chain, bool sync_bmap)
{
int ret = -ENOSPC;
- unsigned int num_clusters = 0, total_cnt;
+ unsigned int total_cnt;
unsigned int hint_clu, new_clu, last_clu = EXFAT_EOF_CLUSTER;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -346,7 +373,7 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
/* find new cluster */
if (hint_clu == EXFAT_EOF_CLUSTER) {
if (sbi->clu_srch_ptr < EXFAT_FIRST_CLUSTER) {
- exfat_err(sb, "sbi->clu_srch_ptr is invalid (%u)\n",
+ exfat_err(sb, "sbi->clu_srch_ptr is invalid (%u)",
sbi->clu_srch_ptr);
sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER;
}
@@ -360,17 +387,11 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
/* check cluster validation */
if (!is_valid_cluster(sbi, hint_clu)) {
- exfat_err(sb, "hint_cluster is invalid (%u)",
- hint_clu);
+ if (hint_clu != sbi->num_clusters)
+ exfat_err(sb, "hint_cluster is invalid (%u), rewind to the first cluster",
+ hint_clu);
hint_clu = EXFAT_FIRST_CLUSTER;
- if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
- if (exfat_chain_cont_cluster(sb, p_chain->dir,
- num_clusters)) {
- ret = -EIO;
- goto unlock;
- }
- p_chain->flags = ALLOC_FAT_CHAIN;
- }
+ p_chain->flags = ALLOC_FAT_CHAIN;
}
p_chain->dir = EXFAT_EOF_CLUSTER;
@@ -380,7 +401,7 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
if (new_clu != hint_clu &&
p_chain->flags == ALLOC_NO_FAT_CHAIN) {
if (exfat_chain_cont_cluster(sb, p_chain->dir,
- num_clusters)) {
+ p_chain->size)) {
ret = -EIO;
goto free_cluster;
}
@@ -388,13 +409,11 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
}
/* update allocation bitmap */
- if (exfat_set_bitmap(inode, new_clu, sync_bmap)) {
+ if (exfat_set_bitmap(sb, new_clu, sync_bmap)) {
ret = -EIO;
goto free_cluster;
}
- num_clusters++;
-
/* update FAT table */
if (p_chain->flags == ALLOC_FAT_CHAIN) {
if (exfat_ent_set(sb, new_clu, EXFAT_EOF_CLUSTER)) {
@@ -411,13 +430,14 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
goto free_cluster;
}
}
+ p_chain->size++;
+
last_clu = new_clu;
- if (--num_alloc == 0) {
+ if (p_chain->size == num_alloc) {
sbi->clu_srch_ptr = hint_clu;
- sbi->used_clusters += num_clusters;
+ sbi->used_clusters += num_alloc;
- p_chain->size += num_clusters;
mutex_unlock(&sbi->bitmap_lock);
return 0;
}
@@ -428,7 +448,7 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
if (exfat_chain_cont_cluster(sb, p_chain->dir,
- num_clusters)) {
+ p_chain->size)) {
ret = -EIO;
goto free_cluster;
}
@@ -437,8 +457,7 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
}
}
free_cluster:
- if (num_clusters)
- __exfat_free_cluster(inode, p_chain);
+ __exfat_free_cluster(inode, p_chain);
unlock:
mutex_unlock(&sbi->bitmap_lock);
return ret;
@@ -472,5 +491,15 @@ int exfat_count_num_clusters(struct super_block *sb,
}
*ret_count = count;
+
+ /*
+ * since exfat_count_used_clusters() is not called, sbi->used_clusters
+ * cannot be used here.
+ */
+ if (unlikely(i == sbi->num_clusters && clu != EXFAT_EOF_CLUSTER)) {
+ exfat_fs_error(sb, "The cluster chain has a loop");
+ return -EIO;
+ }
+
return 0;
}
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index 6af0191b648f..536c8078f0c1 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -8,45 +8,95 @@
#include <linux/cred.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
+#include <linux/fsnotify.h>
+#include <linux/security.h>
+#include <linux/msdos_fs.h>
+#include <linux/writeback.h>
#include "exfat_raw.h"
#include "exfat_fs.h"
static int exfat_cont_expand(struct inode *inode, loff_t size)
{
- struct address_space *mapping = inode->i_mapping;
- loff_t start = i_size_read(inode), count = size - i_size_read(inode);
- int err, err2;
+ int ret;
+ unsigned int num_clusters, new_num_clusters, last_clu;
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_chain clu;
- err = generic_cont_expand_simple(inode, size);
- if (err)
- return err;
+ truncate_pagecache(inode, i_size_read(inode));
+
+ ret = inode_newsize_ok(inode, size);
+ if (ret)
+ return ret;
+
+ num_clusters = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi);
+ new_num_clusters = EXFAT_B_TO_CLU_ROUND_UP(size, sbi);
+
+ if (new_num_clusters == num_clusters)
+ goto out;
+
+ if (num_clusters) {
+ exfat_chain_set(&clu, ei->start_clu, num_clusters, ei->flags);
+ ret = exfat_find_last_cluster(sb, &clu, &last_clu);
+ if (ret)
+ return ret;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ clu.dir = last_clu + 1;
+ } else {
+ last_clu = EXFAT_EOF_CLUSTER;
+ clu.dir = EXFAT_EOF_CLUSTER;
+ }
+
+ clu.size = 0;
+ clu.flags = ei->flags;
+
+ ret = exfat_alloc_cluster(inode, new_num_clusters - num_clusters,
+ &clu, inode_needs_sync(inode));
+ if (ret)
+ return ret;
+
+ /* Append new clusters to chain */
+ if (num_clusters) {
+ if (clu.flags != ei->flags)
+ if (exfat_chain_cont_cluster(sb, ei->start_clu, num_clusters))
+ goto free_clu;
+
+ if (clu.flags == ALLOC_FAT_CHAIN)
+ if (exfat_ent_set(sb, last_clu, clu.dir))
+ goto free_clu;
+ } else
+ ei->start_clu = clu.dir;
+
+ ei->flags = clu.flags;
+
+out:
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ /* Expanded range not zeroed, do not update valid_size */
+ i_size_write(inode, size);
+
+ inode->i_blocks = round_up(size, sbi->cluster_size) >> 9;
mark_inode_dirty(inode);
- if (!IS_SYNC(inode))
- return 0;
+ if (IS_SYNC(inode))
+ return write_inode_now(inode, 1);
- err = filemap_fdatawrite_range(mapping, start, start + count - 1);
- err2 = sync_mapping_buffers(mapping);
- if (!err)
- err = err2;
- err2 = write_inode_now(inode, 1);
- if (!err)
- err = err2;
- if (err)
- return err;
+ return 0;
- return filemap_fdatawait_range(mapping, start, start + count - 1);
+free_clu:
+ exfat_free_cluster(inode, &clu);
+ return -EIO;
}
-static bool exfat_allow_set_time(struct exfat_sb_info *sbi, struct inode *inode)
+static bool exfat_allow_set_time(struct mnt_idmap *idmap,
+ struct exfat_sb_info *sbi, struct inode *inode)
{
mode_t allow_utime = sbi->options.allow_utime;
- if (!uid_eq(current_fsuid(), inode->i_uid)) {
- if (in_group_p(inode->i_gid))
+ if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode),
+ current_fsuid())) {
+ if (vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
allow_utime >>= 3;
if (allow_utime & MAY_WRITE)
return true;
@@ -93,7 +143,7 @@ static int exfat_sanitize_mode(const struct exfat_sb_info *sbi,
}
/* resize the file length */
-int __exfat_truncate(struct inode *inode, loff_t new_size)
+int __exfat_truncate(struct inode *inode)
{
unsigned int num_clusters_new, num_clusters_phys;
unsigned int last_clu = EXFAT_FREE_CLUSTER;
@@ -101,7 +151,6 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
- int evict = (ei->dir.dir == DIR_DELETED) ? 1 : 0;
/* check if the given file ID is opened */
if (ei->type != TYPE_FILE && ei->type != TYPE_DIR)
@@ -110,12 +159,11 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
exfat_set_volume_dirty(sb);
num_clusters_new = EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi);
- num_clusters_phys =
- EXFAT_B_TO_CLU_ROUND_UP(EXFAT_I(inode)->i_size_ondisk, sbi);
+ num_clusters_phys = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi);
exfat_chain_set(&clu, ei->start_clu, num_clusters_phys, ei->flags);
- if (new_size > 0) {
+ if (i_size_read(inode) > 0) {
/*
* Truncate FAT chain num_clusters after the first cluster
* num_clusters = min(new, phys);
@@ -145,55 +193,25 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
ei->start_clu = EXFAT_EOF_CLUSTER;
}
- i_size_write(inode, new_size);
+ if (i_size_read(inode) < ei->valid_size)
+ ei->valid_size = i_size_read(inode);
if (ei->type == TYPE_FILE)
- ei->attr |= ATTR_ARCHIVE;
-
- /* update the directory entry */
- if (!evict) {
- struct timespec64 ts;
- struct exfat_dentry *ep, *ep2;
- struct exfat_entry_set_cache *es;
- int err;
-
- es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
- ES_ALL_ENTRIES);
- if (!es)
- return -EIO;
- ep = exfat_get_dentry_cached(es, 0);
- ep2 = exfat_get_dentry_cached(es, 1);
-
- ts = current_time(inode);
- exfat_set_entry_time(sbi, &ts,
- &ep->dentry.file.modify_tz,
- &ep->dentry.file.modify_time,
- &ep->dentry.file.modify_date,
- &ep->dentry.file.modify_time_cs);
- ep->dentry.file.attr = cpu_to_le16(ei->attr);
-
- /* File size should be zero if there is no cluster allocated */
- if (ei->start_clu == EXFAT_EOF_CLUSTER) {
- ep2->dentry.stream.valid_size = 0;
- ep2->dentry.stream.size = 0;
- } else {
- ep2->dentry.stream.valid_size = cpu_to_le64(new_size);
- ep2->dentry.stream.size = ep2->dentry.stream.valid_size;
- }
-
- if (new_size == 0) {
- /* Any directory can not be truncated to zero */
- WARN_ON(ei->type != TYPE_FILE);
-
- ep2->dentry.stream.flags = ALLOC_FAT_CHAIN;
- ep2->dentry.stream.start_clu = EXFAT_FREE_CLUSTER;
- }
+ ei->attr |= EXFAT_ATTR_ARCHIVE;
- exfat_update_dir_chksum_with_entry_set(es);
- err = exfat_free_dentry_set(es, inode_needs_sync(inode));
- if (err)
- return err;
- }
+ /*
+ * update the directory entry
+ *
+ * If the directory entry is updated by mark_inode_dirty(), the
+ * directory entry will be written after a writeback cycle of
+ * updating the bitmap/FAT, which may result in clusters being
+ * freed but referenced by the directory entry in the event of a
+ * sudden power failure.
+ * __exfat_write_inode() is called for directory entry, bitmap
+ * and FAT to be written in a same writeback.
+ */
+ if (__exfat_write_inode(inode, inode_needs_sync(inode)))
+ return -EIO;
/* cut off from the FAT chain */
if (ei->flags == ALLOC_FAT_CHAIN && last_clu != EXFAT_FREE_CLUSTER &&
@@ -219,21 +237,18 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
if (exfat_free_cluster(inode, &clu))
return -EIO;
- exfat_clear_volume_dirty(sb);
-
return 0;
}
-void exfat_truncate(struct inode *inode, loff_t size)
+void exfat_truncate(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- unsigned int blocksize = i_blocksize(inode);
- loff_t aligned_size;
+ struct exfat_inode_info *ei = EXFAT_I(inode);
int err;
mutex_lock(&sbi->s_lock);
- if (EXFAT_I(inode)->start_clu == 0) {
+ if (ei->start_clu == 0) {
/*
* Empty start_clu != ~0 (not allocated)
*/
@@ -241,41 +256,23 @@ void exfat_truncate(struct inode *inode, loff_t size)
goto write_size;
}
- err = __exfat_truncate(inode, i_size_read(inode));
+ err = __exfat_truncate(inode);
if (err)
goto write_size;
- inode->i_ctime = inode->i_mtime = current_time(inode);
- if (IS_DIRSYNC(inode))
- exfat_sync_inode(inode);
- else
- mark_inode_dirty(inode);
-
- inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1)) &
- ~(sbi->cluster_size - 1)) >> inode->i_blkbits;
+ inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
write_size:
- aligned_size = i_size_read(inode);
- if (aligned_size & (blocksize - 1)) {
- aligned_size |= (blocksize - 1);
- aligned_size++;
- }
-
- if (EXFAT_I(inode)->i_size_ondisk > i_size_read(inode))
- EXFAT_I(inode)->i_size_ondisk = aligned_size;
-
- if (EXFAT_I(inode)->i_size_aligned > i_size_read(inode))
- EXFAT_I(inode)->i_size_aligned = aligned_size;
mutex_unlock(&sbi->s_lock);
}
-int exfat_getattr(struct user_namespace *mnt_uerns, const struct path *path,
+int exfat_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, unsigned int request_mask,
unsigned int query_flags)
{
struct inode *inode = d_backing_inode(path->dentry);
struct exfat_inode_info *ei = EXFAT_I(inode);
- generic_fillattr(&init_user_ns, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
exfat_truncate_atime(&stat->atime);
stat->result_mask |= STATX_BTIME;
stat->btime.tv_sec = ei->i_crtime.tv_sec;
@@ -284,7 +281,7 @@ int exfat_getattr(struct user_namespace *mnt_uerns, const struct path *path,
return 0;
}
-int exfat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct exfat_sb_info *sbi = EXFAT_SB(dentry->d_sb);
@@ -292,6 +289,9 @@ int exfat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
unsigned int ia_valid;
int error;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size > i_size_read(inode)) {
error = exfat_cont_expand(inode, attr->ia_size);
@@ -303,20 +303,22 @@ int exfat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
/* Check for setting the inode time. */
ia_valid = attr->ia_valid;
if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) &&
- exfat_allow_set_time(sbi, inode)) {
+ exfat_allow_set_time(idmap, sbi, inode)) {
attr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET |
ATTR_TIMES_SET);
}
- error = setattr_prepare(&init_user_ns, dentry, attr);
+ error = setattr_prepare(idmap, dentry, attr);
attr->ia_valid = ia_valid;
if (error)
goto out;
if (((attr->ia_valid & ATTR_UID) &&
- !uid_eq(attr->ia_uid, sbi->options.fs_uid)) ||
+ (!uid_eq(from_vfsuid(idmap, i_user_ns(inode), attr->ia_vfsuid),
+ sbi->options.fs_uid))) ||
((attr->ia_valid & ATTR_GID) &&
- !gid_eq(attr->ia_gid, sbi->options.fs_gid)) ||
+ (!gid_eq(from_vfsgid(idmap, i_user_ns(inode), attr->ia_vfsgid),
+ sbi->options.fs_gid))) ||
((attr->ia_valid & ATTR_MODE) &&
(attr->ia_mode & ~(S_IFREG | S_IFLNK | S_IFDIR | 0777)))) {
error = -EPERM;
@@ -332,6 +334,12 @@ int exfat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
attr->ia_valid &= ~ATTR_MODE;
}
+ if (attr->ia_valid & ATTR_SIZE)
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+
+ setattr_copy(idmap, inode, attr);
+ exfat_truncate_inode_atime(inode);
+
if (attr->ia_valid & ATTR_SIZE) {
error = exfat_block_truncate_page(inode, attr->ia_size);
if (error)
@@ -339,35 +347,123 @@ int exfat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
down_write(&EXFAT_I(inode)->truncate_lock);
truncate_setsize(inode, attr->ia_size);
- exfat_truncate(inode, attr->ia_size);
+
+ /*
+ * __exfat_write_inode() is called from exfat_truncate(), inode
+ * is already written by it, so mark_inode_dirty() is unneeded.
+ */
+ exfat_truncate(inode);
up_write(&EXFAT_I(inode)->truncate_lock);
+ } else
+ mark_inode_dirty(inode);
+
+out:
+ return error;
+}
+
+/*
+ * modified ioctls from fat/file.c by Welmer Almesberger
+ */
+static int exfat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr)
+{
+ u32 attr;
+
+ inode_lock_shared(inode);
+ attr = exfat_make_attr(inode);
+ inode_unlock_shared(inode);
+
+ return put_user(attr, user_attr);
+}
+
+static int exfat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
+{
+ struct inode *inode = file_inode(file);
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+ int is_dir = S_ISDIR(inode->i_mode);
+ u32 attr, oldattr;
+ struct iattr ia;
+ int err;
+
+ err = get_user(attr, user_attr);
+ if (err)
+ goto out;
+
+ err = mnt_want_write_file(file);
+ if (err)
+ goto out;
+ inode_lock(inode);
+
+ oldattr = exfat_make_attr(inode);
+
+ /*
+ * Mask attributes so we don't set reserved fields.
+ */
+ attr &= (EXFAT_ATTR_READONLY | EXFAT_ATTR_HIDDEN | EXFAT_ATTR_SYSTEM |
+ EXFAT_ATTR_ARCHIVE);
+ attr |= (is_dir ? EXFAT_ATTR_SUBDIR : 0);
+
+ /* Equivalent to a chmod() */
+ ia.ia_valid = ATTR_MODE | ATTR_CTIME;
+ ia.ia_ctime = current_time(inode);
+ if (is_dir)
+ ia.ia_mode = exfat_make_mode(sbi, attr, 0777);
+ else
+ ia.ia_mode = exfat_make_mode(sbi, attr, 0666 | (inode->i_mode & 0111));
+
+ /* The root directory has no attributes */
+ if (inode->i_ino == EXFAT_ROOT_INO && attr != EXFAT_ATTR_SUBDIR) {
+ err = -EINVAL;
+ goto out_unlock_inode;
}
- setattr_copy(&init_user_ns, inode, attr);
- exfat_truncate_atime(&inode->i_atime);
- mark_inode_dirty(inode);
+ if (((attr | oldattr) & EXFAT_ATTR_SYSTEM) &&
+ !capable(CAP_LINUX_IMMUTABLE)) {
+ err = -EPERM;
+ goto out_unlock_inode;
+ }
+ /*
+ * The security check is questionable... We single
+ * out the RO attribute for checking by the security
+ * module, just because it maps to a file mode.
+ */
+ err = security_inode_setattr(file_mnt_idmap(file),
+ file->f_path.dentry, &ia);
+ if (err)
+ goto out_unlock_inode;
+
+ /* This MUST be done before doing anything irreversible... */
+ err = exfat_setattr(file_mnt_idmap(file), file->f_path.dentry, &ia);
+ if (err)
+ goto out_unlock_inode;
+
+ fsnotify_change(file->f_path.dentry, ia.ia_valid);
+
+ exfat_save_attr(inode, attr);
+ mark_inode_dirty(inode);
+out_unlock_inode:
+ inode_unlock(inode);
+ mnt_drop_write_file(file);
out:
- return error;
+ return err;
}
static int exfat_ioctl_fitrim(struct inode *inode, unsigned long arg)
{
- struct request_queue *q = bdev_get_queue(inode->i_sb->s_bdev);
struct fstrim_range range;
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!blk_queue_discard(q))
+ if (!bdev_max_discard_sectors(inode->i_sb->s_bdev))
return -EOPNOTSUPP;
if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range)))
return -EFAULT;
range.minlen = max_t(unsigned int, range.minlen,
- q->limits.discard_granularity);
+ bdev_discard_granularity(inode->i_sb->s_bdev));
ret = exfat_trim_fs(inode, &range);
if (ret < 0)
@@ -379,13 +475,86 @@ static int exfat_ioctl_fitrim(struct inode *inode, unsigned long arg)
return 0;
}
+static int exfat_ioctl_shutdown(struct super_block *sb, unsigned long arg)
+{
+ u32 flags;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(flags, (__u32 __user *)arg))
+ return -EFAULT;
+
+ return exfat_force_shutdown(sb, flags);
+}
+
+static int exfat_ioctl_get_volume_label(struct super_block *sb, unsigned long arg)
+{
+ int ret;
+ char label[FSLABEL_MAX] = {0};
+ struct exfat_uni_name uniname;
+
+ ret = exfat_read_volume_label(sb, &uniname);
+ if (ret < 0)
+ return ret;
+
+ ret = exfat_utf16_to_nls(sb, &uniname, label, uniname.name_len);
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user((char __user *)arg, label, ret + 1))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int exfat_ioctl_set_volume_label(struct super_block *sb,
+ unsigned long arg)
+{
+ int ret = 0, lossy, label_len;
+ char label[FSLABEL_MAX] = {0};
+ struct exfat_uni_name uniname;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(label, (char __user *)arg, FSLABEL_MAX))
+ return -EFAULT;
+
+ memset(&uniname, 0, sizeof(uniname));
+ label_len = strnlen(label, FSLABEL_MAX - 1);
+ if (label[0]) {
+ ret = exfat_nls_to_utf16(sb, label, label_len,
+ &uniname, &lossy);
+ if (ret < 0)
+ return ret;
+ else if (lossy & NLS_NAME_LOSSY)
+ return -EINVAL;
+ }
+
+ uniname.name_len = ret;
+
+ return exfat_write_volume_label(sb, &uniname);
+}
+
long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
+ u32 __user *user_attr = (u32 __user *)arg;
switch (cmd) {
+ case FAT_IOCTL_GET_ATTRIBUTES:
+ return exfat_ioctl_get_attributes(inode, user_attr);
+ case FAT_IOCTL_SET_ATTRIBUTES:
+ return exfat_ioctl_set_attributes(filp, user_attr);
+ case EXFAT_IOC_SHUTDOWN:
+ return exfat_ioctl_shutdown(inode->i_sb, arg);
case FITRIM:
return exfat_ioctl_fitrim(inode, arg);
+ case FS_IOC_GETFSLABEL:
+ return exfat_ioctl_get_volume_label(inode->i_sb, arg);
+ case FS_IOC_SETFSLABEL:
+ return exfat_ioctl_set_volume_label(inode->i_sb, arg);
default:
return -ENOTTY;
}
@@ -404,6 +573,9 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
struct inode *inode = filp->f_mapping->host;
int err;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
err = __generic_file_fsync(filp, start, end, datasync);
if (err)
return err;
@@ -415,17 +587,190 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
return blkdev_issue_flush(inode->i_sb->s_bdev);
}
+static int exfat_extend_valid_size(struct inode *inode, loff_t new_valid_size)
+{
+ int err;
+ loff_t pos;
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ struct address_space *mapping = inode->i_mapping;
+ const struct address_space_operations *ops = mapping->a_ops;
+
+ pos = ei->valid_size;
+ while (pos < new_valid_size) {
+ u32 len;
+ struct folio *folio;
+ unsigned long off;
+
+ len = PAGE_SIZE - (pos & (PAGE_SIZE - 1));
+ if (pos + len > new_valid_size)
+ len = new_valid_size - pos;
+
+ err = ops->write_begin(NULL, mapping, pos, len, &folio, NULL);
+ if (err)
+ goto out;
+
+ off = offset_in_folio(folio, pos);
+ folio_zero_new_buffers(folio, off, off + len);
+
+ err = ops->write_end(NULL, mapping, pos, len, len, folio, NULL);
+ if (err < 0)
+ goto out;
+ pos += len;
+
+ balance_dirty_pages_ratelimited(mapping);
+ cond_resched();
+ }
+
+ return 0;
+
+out:
+ return err;
+}
+
+static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ ssize_t ret;
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ loff_t pos = iocb->ki_pos;
+ loff_t valid_size;
+
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
+ inode_lock(inode);
+
+ if (pos > i_size_read(inode))
+ truncate_pagecache(inode, i_size_read(inode));
+
+ valid_size = ei->valid_size;
+
+ ret = generic_write_checks(iocb, iter);
+ if (ret <= 0)
+ goto unlock;
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ unsigned long align = pos | iov_iter_alignment(iter);
+
+ if (!IS_ALIGNED(align, i_blocksize(inode)) &&
+ !IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev))) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ }
+
+ if (pos > valid_size) {
+ ret = exfat_extend_valid_size(inode, pos);
+ if (ret < 0 && ret != -ENOSPC) {
+ exfat_err(inode->i_sb,
+ "write: fail to zero from %llu to %llu(%zd)",
+ valid_size, pos, ret);
+ }
+ if (ret < 0)
+ goto unlock;
+ }
+
+ ret = __generic_file_write_iter(iocb, iter);
+ if (ret < 0)
+ goto unlock;
+
+ inode_unlock(inode);
+
+ if (pos > valid_size)
+ pos = valid_size;
+
+ if (iocb->ki_pos > pos) {
+ ssize_t err = generic_write_sync(iocb, iocb->ki_pos - pos);
+ if (err < 0)
+ return err;
+ }
+
+ return ret;
+
+unlock:
+ inode_unlock(inode);
+
+ return ret;
+}
+
+static ssize_t exfat_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
+ return generic_file_read_iter(iocb, iter);
+}
+
+static vm_fault_t exfat_page_mkwrite(struct vm_fault *vmf)
+{
+ int err;
+ struct vm_area_struct *vma = vmf->vma;
+ struct file *file = vma->vm_file;
+ struct inode *inode = file_inode(file);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ loff_t start, end;
+
+ if (!inode_trylock(inode))
+ return VM_FAULT_RETRY;
+
+ start = ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ end = min_t(loff_t, i_size_read(inode),
+ start + vma->vm_end - vma->vm_start);
+
+ if (ei->valid_size < end) {
+ err = exfat_extend_valid_size(inode, end);
+ if (err < 0) {
+ inode_unlock(inode);
+ return vmf_fs_error(err);
+ }
+ }
+
+ inode_unlock(inode);
+
+ return filemap_page_mkwrite(vmf);
+}
+
+static const struct vm_operations_struct exfat_file_vm_ops = {
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = exfat_page_mkwrite,
+};
+
+static int exfat_file_mmap_prepare(struct vm_area_desc *desc)
+{
+ struct file *file = desc->file;
+
+ if (unlikely(exfat_forced_shutdown(file_inode(desc->file)->i_sb)))
+ return -EIO;
+
+ file_accessed(file);
+ desc->vm_ops = &exfat_file_vm_ops;
+ return 0;
+}
+
+static ssize_t exfat_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len, unsigned int flags)
+{
+ if (unlikely(exfat_forced_shutdown(file_inode(in)->i_sb)))
+ return -EIO;
+
+ return filemap_splice_read(in, ppos, pipe, len, flags);
+}
+
const struct file_operations exfat_file_operations = {
.llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
- .write_iter = generic_file_write_iter,
+ .read_iter = exfat_file_read_iter,
+ .write_iter = exfat_file_write_iter,
.unlocked_ioctl = exfat_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = exfat_compat_ioctl,
#endif
- .mmap = generic_file_mmap,
+ .mmap_prepare = exfat_file_mmap_prepare,
.fsync = exfat_file_fsync,
- .splice_read = generic_file_splice_read,
+ .splice_read = exfat_splice_read,
.splice_write = iter_file_splice_write,
};
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 1c7aa1ea4724..f9501c3a3666 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -17,21 +17,22 @@
#include "exfat_raw.h"
#include "exfat_fs.h"
-static int __exfat_write_inode(struct inode *inode, int sync)
+int __exfat_write_inode(struct inode *inode, int sync)
{
unsigned long long on_disk_size;
struct exfat_dentry *ep, *ep2;
- struct exfat_entry_set_cache *es = NULL;
+ struct exfat_entry_set_cache es;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
- bool is_dir = (ei->type == TYPE_DIR) ? true : false;
+ bool is_dir = (ei->type == TYPE_DIR);
+ struct timespec64 ts;
if (inode->i_ino == EXFAT_ROOT_INO)
return 0;
/*
- * If the indode is already unlinked, there is no need for updating it.
+ * If the inode is already unlinked, there is no need for updating it.
*/
if (ei->dir.dir == DIR_DELETED)
return 0;
@@ -42,11 +43,10 @@ static int __exfat_write_inode(struct inode *inode, int sync)
exfat_set_volume_dirty(sb);
/* get the directory entry of given file or directory */
- es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES);
- if (!es)
+ if (exfat_get_dentry_set_by_ei(&es, sb, ei))
return -EIO;
- ep = exfat_get_dentry_cached(es, 0);
- ep2 = exfat_get_dentry_cached(es, 1);
+ ep = exfat_get_dentry_cached(&es, ES_IDX_FILE);
+ ep2 = exfat_get_dentry_cached(&es, ES_IDX_STREAM);
ep->dentry.file.attr = cpu_to_le16(exfat_make_attr(inode));
@@ -56,16 +56,18 @@ static int __exfat_write_inode(struct inode *inode, int sync)
&ep->dentry.file.create_time,
&ep->dentry.file.create_date,
&ep->dentry.file.create_time_cs);
- exfat_set_entry_time(sbi, &inode->i_mtime,
- &ep->dentry.file.modify_tz,
- &ep->dentry.file.modify_time,
- &ep->dentry.file.modify_date,
- &ep->dentry.file.modify_time_cs);
- exfat_set_entry_time(sbi, &inode->i_atime,
- &ep->dentry.file.access_tz,
- &ep->dentry.file.access_time,
- &ep->dentry.file.access_date,
- NULL);
+ ts = inode_get_mtime(inode);
+ exfat_set_entry_time(sbi, &ts,
+ &ep->dentry.file.modify_tz,
+ &ep->dentry.file.modify_time,
+ &ep->dentry.file.modify_date,
+ &ep->dentry.file.modify_time_cs);
+ ts = inode_get_atime(inode);
+ exfat_set_entry_time(sbi, &ts,
+ &ep->dentry.file.access_tz,
+ &ep->dentry.file.access_time,
+ &ep->dentry.file.access_date,
+ NULL);
/* File size should be zero if there is no cluster allocated */
on_disk_size = i_size_read(inode);
@@ -73,17 +75,36 @@ static int __exfat_write_inode(struct inode *inode, int sync)
if (ei->start_clu == EXFAT_EOF_CLUSTER)
on_disk_size = 0;
- ep2->dentry.stream.valid_size = cpu_to_le64(on_disk_size);
- ep2->dentry.stream.size = ep2->dentry.stream.valid_size;
+ ep2->dentry.stream.size = cpu_to_le64(on_disk_size);
+ /*
+ * mmap write does not use exfat_write_end(), valid_size may be
+ * extended to the sector-aligned length in exfat_get_block().
+ * So we need to fixup valid_size to the writren length.
+ */
+ if (on_disk_size < ei->valid_size)
+ ep2->dentry.stream.valid_size = ep2->dentry.stream.size;
+ else
+ ep2->dentry.stream.valid_size = cpu_to_le64(ei->valid_size);
+
+ if (on_disk_size) {
+ ep2->dentry.stream.flags = ei->flags;
+ ep2->dentry.stream.start_clu = cpu_to_le32(ei->start_clu);
+ } else {
+ ep2->dentry.stream.flags = ALLOC_FAT_CHAIN;
+ ep2->dentry.stream.start_clu = EXFAT_FREE_CLUSTER;
+ }
- exfat_update_dir_chksum_with_entry_set(es);
- return exfat_free_dentry_set(es, sync);
+ exfat_update_dir_chksum(&es);
+ return exfat_put_dentry_set(&es, sync);
}
int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int ret;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
ret = __exfat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
mutex_unlock(&EXFAT_SB(inode->i_sb)->s_lock);
@@ -105,19 +126,16 @@ void exfat_sync_inode(struct inode *inode)
static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
unsigned int *clu, int create)
{
- int ret, modified = false;
+ int ret;
unsigned int last_clu;
struct exfat_chain new_clu;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
unsigned int local_clu_offset = clu_offset;
- unsigned int num_to_be_allocated = 0, num_clusters = 0;
+ unsigned int num_to_be_allocated = 0, num_clusters;
- if (EXFAT_I(inode)->i_size_ondisk > 0)
- num_clusters =
- EXFAT_B_TO_CLU_ROUND_UP(EXFAT_I(inode)->i_size_ondisk,
- sbi);
+ num_clusters = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi);
if (clu_offset >= num_clusters)
num_to_be_allocated = clu_offset - num_clusters + 1;
@@ -197,7 +215,6 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
if (new_clu.flags == ALLOC_FAT_CHAIN)
ei->flags = ALLOC_FAT_CHAIN;
ei->start_clu = new_clu.dir;
- modified = true;
} else {
if (new_clu.flags != ei->flags) {
/* no-fat-chain bit is disabled,
@@ -207,7 +224,6 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
exfat_chain_cont_cluster(sb, ei->start_clu,
num_clusters);
ei->flags = ALLOC_FAT_CHAIN;
- modified = true;
}
if (new_clu.flags == ALLOC_FAT_CHAIN)
if (exfat_ent_set(sb, last_clu, new_clu.dir))
@@ -217,35 +233,7 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
num_clusters += num_to_be_allocated;
*clu = new_clu.dir;
- if (ei->dir.dir != DIR_DELETED && modified) {
- struct exfat_dentry *ep;
- struct exfat_entry_set_cache *es;
- int err;
-
- es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
- ES_ALL_ENTRIES);
- if (!es)
- return -EIO;
- /* get stream entry */
- ep = exfat_get_dentry_cached(es, 1);
-
- /* update directory entry */
- ep->dentry.stream.flags = ei->flags;
- ep->dentry.stream.start_clu =
- cpu_to_le32(ei->start_clu);
- ep->dentry.stream.valid_size =
- cpu_to_le64(i_size_read(inode));
- ep->dentry.stream.size =
- ep->dentry.stream.valid_size;
-
- exfat_update_dir_chksum_with_entry_set(es);
- err = exfat_free_dentry_set(es, inode_needs_sync(inode));
- if (err)
- return err;
- } /* end of if != DIR_DELETED */
-
- inode->i_blocks +=
- num_to_be_allocated << sbi->sect_per_clus_bits;
+ inode->i_blocks += EXFAT_CLU_TO_B(num_to_be_allocated, sbi) >> 9;
/*
* Move *clu pointer along FAT chains (hole care) because the
@@ -273,21 +261,6 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
return 0;
}
-static int exfat_map_new_buffer(struct exfat_inode_info *ei,
- struct buffer_head *bh, loff_t pos)
-{
- if (buffer_delay(bh) && pos > ei->i_size_aligned)
- return -EIO;
- set_buffer_new(bh);
-
- /*
- * Adjust i_size_aligned if i_size_ondisk is bigger than it.
- */
- if (ei->i_size_ondisk > ei->i_size_aligned)
- ei->i_size_aligned = ei->i_size_ondisk;
- return 0;
-}
-
static int exfat_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
@@ -300,10 +273,12 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
unsigned int cluster, sec_offset;
sector_t last_block;
sector_t phys = 0;
- loff_t pos;
+ sector_t valid_blks;
+ loff_t i_size;
mutex_lock(&sbi->s_lock);
- last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size_read(inode), sb);
+ i_size = i_size_read(inode);
+ last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size, sb);
if (iblock >= last_block && !create)
goto done;
@@ -328,54 +303,135 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
mapped_blocks = sbi->sect_per_clus - sec_offset;
max_blocks = min(mapped_blocks, max_blocks);
- /* Treat newly added block / cluster */
- if (iblock < last_block)
- create = 0;
+ map_bh(bh_result, sb, phys);
+ if (buffer_delay(bh_result))
+ clear_buffer_delay(bh_result);
+
+ /*
+ * In most cases, we just need to set bh_result to mapped, unmapped
+ * or new status as follows:
+ * 1. i_size == valid_size
+ * 2. write case (create == 1)
+ * 3. direct_read (!bh_result->b_folio)
+ * -> the unwritten part will be zeroed in exfat_direct_IO()
+ *
+ * Otherwise, in the case of buffered read, it is necessary to take
+ * care the last nested block if valid_size is not equal to i_size.
+ */
+ if (i_size == ei->valid_size || create || !bh_result->b_folio)
+ valid_blks = EXFAT_B_TO_BLK_ROUND_UP(ei->valid_size, sb);
+ else
+ valid_blks = EXFAT_B_TO_BLK(ei->valid_size, sb);
+
+ /* The range has been fully written, map it */
+ if (iblock + max_blocks < valid_blks)
+ goto done;
- if (create || buffer_delay(bh_result)) {
- pos = EXFAT_BLK_TO_B((iblock + 1), sb);
- if (ei->i_size_ondisk < pos)
- ei->i_size_ondisk = pos;
+ /* The range has been partially written, map the written part */
+ if (iblock < valid_blks) {
+ max_blocks = valid_blks - iblock;
+ goto done;
}
+ /* The area has not been written, map and mark as new for create case */
if (create) {
- err = exfat_map_new_buffer(ei, bh_result, pos);
- if (err) {
- exfat_fs_error(sb,
- "requested for bmap out of range(pos : (%llu) > i_size_aligned(%llu)\n",
- pos, ei->i_size_aligned);
- goto unlock_ret;
+ set_buffer_new(bh_result);
+ ei->valid_size = EXFAT_BLK_TO_B(iblock + max_blocks, sb);
+ mark_inode_dirty(inode);
+ goto done;
+ }
+
+ /*
+ * The area has just one block partially written.
+ * In that case, we should read and fill the unwritten part of
+ * a block with zero.
+ */
+ if (bh_result->b_folio && iblock == valid_blks &&
+ (ei->valid_size & (sb->s_blocksize - 1))) {
+ loff_t size, pos;
+ void *addr;
+
+ max_blocks = 1;
+
+ /*
+ * No buffer_head is allocated.
+ * (1) bmap: It's enough to set blocknr without I/O.
+ * (2) read: The unwritten part should be filled with zero.
+ * If a folio does not have any buffers,
+ * let's returns -EAGAIN to fallback to
+ * block_read_full_folio() for per-bh IO.
+ */
+ if (!folio_buffers(bh_result->b_folio)) {
+ err = -EAGAIN;
+ goto done;
}
+
+ pos = EXFAT_BLK_TO_B(iblock, sb);
+ size = ei->valid_size - pos;
+ addr = folio_address(bh_result->b_folio) +
+ offset_in_folio(bh_result->b_folio, pos);
+
+ /* Check if bh->b_data points to proper addr in folio */
+ if (bh_result->b_data != addr) {
+ exfat_fs_error_ratelimit(sb,
+ "b_data(%p) != folio_addr(%p)",
+ bh_result->b_data, addr);
+ err = -EINVAL;
+ goto done;
+ }
+
+ /* Read a block */
+ err = bh_read(bh_result, 0);
+ if (err < 0)
+ goto done;
+
+ /* Zero unwritten part of a block */
+ memset(bh_result->b_data + size, 0, bh_result->b_size - size);
+ err = 0;
+ goto done;
}
- if (buffer_delay(bh_result))
- clear_buffer_delay(bh_result);
- map_bh(bh_result, sb, phys);
+ /*
+ * The area has not been written, clear mapped for read/bmap cases.
+ * If so, it will be filled with zero without reading from disk.
+ */
+ clear_buffer_mapped(bh_result);
done:
bh_result->b_size = EXFAT_BLK_TO_B(max_blocks, sb);
+ if (err < 0)
+ clear_buffer_mapped(bh_result);
unlock_ret:
mutex_unlock(&sbi->s_lock);
return err;
}
-static int exfat_readpage(struct file *file, struct page *page)
+static int exfat_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, exfat_get_block);
+ return mpage_read_folio(folio, exfat_get_block);
}
static void exfat_readahead(struct readahead_control *rac)
{
- mpage_readahead(rac, exfat_get_block);
-}
+ struct address_space *mapping = rac->mapping;
+ struct inode *inode = mapping->host;
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ loff_t pos = readahead_pos(rac);
-static int exfat_writepage(struct page *page, struct writeback_control *wbc)
-{
- return block_write_full_page(page, exfat_get_block, wbc);
+ /* Range cross valid_size, read it page by page. */
+ if (ei->valid_size < i_size_read(inode) &&
+ pos <= ei->valid_size &&
+ ei->valid_size < pos + readahead_length(rac))
+ return;
+
+ mpage_readahead(rac, exfat_get_block);
}
static int exfat_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
+ if (unlikely(exfat_forced_shutdown(mapping->host->i_sb)))
+ return -EIO;
+
return mpage_writepages(mapping, wbc, exfat_get_block);
}
@@ -385,20 +441,22 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to)
if (to > i_size_read(inode)) {
truncate_pagecache(inode, i_size_read(inode));
- exfat_truncate(inode, EXFAT_I(inode)->i_size_aligned);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ exfat_truncate(inode);
}
}
-static int exfat_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int flags,
- struct page **pagep, void **fsdata)
+static int exfat_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned int len,
+ struct folio **foliop, void **fsdata)
{
int ret;
- *pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- exfat_get_block,
- &EXFAT_I(mapping->host)->i_size_ondisk);
+ if (unlikely(exfat_forced_shutdown(mapping->host->i_sb)))
+ return -EIO;
+
+ ret = block_write_begin(mapping, pos, len, foliop, exfat_get_block);
if (ret < 0)
exfat_write_failed(mapping, pos+len);
@@ -406,29 +464,27 @@ static int exfat_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
-static int exfat_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int copied,
- struct page *pagep, void *fsdata)
+static int exfat_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int copied,
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
struct exfat_inode_info *ei = EXFAT_I(inode);
int err;
- err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
-
- if (EXFAT_I(inode)->i_size_aligned < i_size_read(inode)) {
- exfat_fs_error(inode->i_sb,
- "invalid size(size(%llu) > aligned(%llu)\n",
- i_size_read(inode), EXFAT_I(inode)->i_size_aligned);
- return -EIO;
- }
-
+ err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
if (err < len)
exfat_write_failed(mapping, pos+len);
- if (!(err < 0) && !(ei->attr & ATTR_ARCHIVE)) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
- ei->attr |= ATTR_ARCHIVE;
+ if (!(err < 0) && pos + err > ei->valid_size) {
+ ei->valid_size = pos + err;
+ mark_inode_dirty(inode);
+ }
+
+ if (!(err < 0) && !(ei->attr & EXFAT_ATTR_ARCHIVE)) {
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ei->attr |= EXFAT_ATTR_ARCHIVE;
mark_inode_dirty(inode);
}
@@ -439,31 +495,41 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
- loff_t size = iocb->ki_pos + iov_iter_count(iter);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ loff_t pos = iocb->ki_pos;
+ loff_t size = pos + iov_iter_count(iter);
int rw = iov_iter_rw(iter);
ssize_t ret;
- if (rw == WRITE) {
- /*
- * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
- * so we need to update the ->i_size_aligned to block boundary.
- *
- * But we must fill the remaining area or hole by nul for
- * updating ->i_size_aligned
- *
- * Return 0, and fallback to normal buffered write.
- */
- if (EXFAT_I(inode)->i_size_aligned < size)
- return 0;
- }
-
/*
* Need to use the DIO_LOCKING for avoiding the race
* condition of exfat_get_block() and ->truncate().
*/
ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block);
- if (ret < 0 && (rw & WRITE))
- exfat_write_failed(mapping, size);
+ if (ret < 0) {
+ if (rw == WRITE && ret != -EIOCBQUEUED)
+ exfat_write_failed(mapping, size);
+
+ return ret;
+ } else
+ size = pos + ret;
+
+ if (rw == WRITE) {
+ /*
+ * If the block had been partially written before this write,
+ * ->valid_size will not be updated in exfat_get_block(),
+ * update it here.
+ */
+ if (ei->valid_size < size) {
+ ei->valid_size = size;
+ mark_inode_dirty(inode);
+ }
+ } else if (pos < ei->valid_size && ei->valid_size < size) {
+ /* zero the unwritten part in the partially written block */
+ iov_iter_revert(iter, size - ei->valid_size);
+ iov_iter_zero(size - ei->valid_size, iter);
+ }
+
return ret;
}
@@ -491,15 +557,16 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from)
}
static const struct address_space_operations exfat_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = exfat_readpage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = exfat_read_folio,
.readahead = exfat_readahead,
- .writepage = exfat_writepage,
.writepages = exfat_writepages,
.write_begin = exfat_write_begin,
.write_end = exfat_write_end,
.direct_IO = exfat_direct_IO,
- .bmap = exfat_aop_bmap
+ .bmap = exfat_aop_bmap,
+ .migrate_folio = buffer_migrate_folio,
};
static inline unsigned long exfat_hash(loff_t i_pos)
@@ -562,6 +629,7 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
ei->start_clu = info->start_clu;
ei->flags = info->flags;
ei->type = info->type;
+ ei->valid_size = info->valid_size;
ei->version = 0;
ei->hint_stat.eidx = 0;
@@ -573,9 +641,9 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
inode_inc_iversion(inode);
- inode->i_generation = prandom_u32();
+ inode->i_generation = get_random_u32();
- if (info->attr & ATTR_SUBDIR) { /* directory */
+ if (info->attr & EXFAT_ATTR_SUBDIR) { /* directory */
inode->i_generation &= ~1;
inode->i_mode = exfat_make_mode(sbi, info->attr, 0777);
inode->i_op = &exfat_dir_inode_operations;
@@ -592,23 +660,13 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
i_size_write(inode, size);
- /* ondisk and aligned size should be aligned with block size */
- if (size & (inode->i_sb->s_blocksize - 1)) {
- size |= (inode->i_sb->s_blocksize - 1);
- size++;
- }
-
- ei->i_size_aligned = size;
- ei->i_size_ondisk = size;
-
exfat_save_attr(inode, info->attr);
- inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1)) &
- ~((loff_t)sbi->cluster_size - 1)) >> inode->i_blkbits;
- inode->i_mtime = info->mtime;
- inode->i_ctime = info->mtime;
+ inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
+ inode_set_mtime_to_ts(inode, info->mtime);
+ inode_set_ctime_to_ts(inode, info->mtime);
ei->i_crtime = info->crtime;
- inode->i_atime = info->atime;
+ inode_set_atime_to_ts(inode, info->atime);
return 0;
}
@@ -648,7 +706,7 @@ void exfat_evict_inode(struct inode *inode)
if (!inode->i_nlink) {
i_size_write(inode, 0);
mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
- __exfat_truncate(inode, 0);
+ __exfat_truncate(inode);
mutex_unlock(&EXFAT_SB(inode->i_sb)->s_lock);
}
diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c
index d34e6193258d..fa8459828046 100644
--- a/fs/exfat/misc.c
+++ b/fs/exfat/misc.c
@@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
+#include <linux/blk_types.h>
#include "exfat_raw.h"
#include "exfat_fs.h"
@@ -45,23 +46,6 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
}
}
-/*
- * exfat_msg() - print preformated EXFAT specific messages.
- * All logs except what uses exfat_fs_error() should be written by exfat_msg()
- */
-void exfat_msg(struct super_block *sb, const char *level, const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
- /* level means KERN_ pacility level */
- printk("%sexFAT-fs (%s): %pV\n", level, sb->s_id, &vaf);
- va_end(args);
-}
-
#define SECS_PER_MIN (60)
#define TIMEZONE_SEC(x) ((x) * 15 * SECS_PER_MIN)
@@ -73,6 +57,13 @@ static void exfat_adjust_tz(struct timespec64 *ts, u8 tz_off)
ts->tv_sec += TIMEZONE_SEC(0x80 - tz_off);
}
+static inline int exfat_tz_offset(struct exfat_sb_info *sbi)
+{
+ if (sbi->options.sys_tz)
+ return -sys_tz.tz_minuteswest;
+ return sbi->options.time_offset;
+}
+
/* Convert a EXFAT time/date pair to a UNIX date (seconds since 1 1 70). */
void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
u8 tz, __le16 time, __le16 date, u8 time_cs)
@@ -95,8 +86,7 @@ void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
/* Adjust timezone to UTC0. */
exfat_adjust_tz(ts, tz & ~EXFAT_TZ_VALID);
else
- /* Convert from local time to UTC using time_offset. */
- ts->tv_sec -= sbi->options.time_offset * SECS_PER_MIN;
+ ts->tv_sec -= exfat_tz_offset(sbi) * SECS_PER_MIN;
}
/* Convert linear UNIX date to a EXFAT time/date pair. */
@@ -136,6 +126,14 @@ void exfat_truncate_atime(struct timespec64 *ts)
ts->tv_nsec = 0;
}
+void exfat_truncate_inode_atime(struct inode *inode)
+{
+ struct timespec64 atime = inode_get_atime(inode);
+
+ exfat_truncate_atime(&atime);
+ inode_set_atime_to_ts(inode, atime);
+}
+
u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type)
{
int i;
@@ -180,7 +178,7 @@ int exfat_update_bhs(struct buffer_head **bhs, int nr_bhs, int sync)
set_buffer_uptodate(bhs[i]);
mark_buffer_dirty(bhs[i]);
if (sync)
- write_dirty_buffer(bhs[i], 0);
+ write_dirty_buffer(bhs[i], REQ_SYNC);
}
for (i = 0; i < nr_bhs && sync; i++) {
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 24b41103d1cc..dfe957493d49 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -31,10 +31,9 @@ static inline void exfat_d_version_set(struct dentry *dentry,
* If it happened, the negative dentry isn't actually negative anymore. So,
* drop it.
*/
-static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int exfat_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- int ret;
-
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -58,18 +57,17 @@ static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
- spin_lock(&dentry->d_lock);
- ret = inode_eq_iversion(d_inode(dentry->d_parent),
- exfat_d_version(dentry));
- spin_unlock(&dentry->d_lock);
- return ret;
+ return inode_eq_iversion(dir, exfat_d_version(dentry));
}
-/* returns the length of a struct qstr, ignoring trailing dots */
-static unsigned int exfat_striptail_len(unsigned int len, const char *name)
+/* returns the length of a struct qstr, ignoring trailing dots if necessary */
+static unsigned int exfat_striptail_len(unsigned int len, const char *name,
+ bool keep_last_dots)
{
- while (len && name[len - 1] == '.')
- len--;
+ if (!keep_last_dots) {
+ while (len && name[len - 1] == '.')
+ len--;
+ }
return len;
}
@@ -83,7 +81,8 @@ static int exfat_d_hash(const struct dentry *dentry, struct qstr *qstr)
struct super_block *sb = dentry->d_sb;
struct nls_table *t = EXFAT_SB(sb)->nls_io;
const unsigned char *name = qstr->name;
- unsigned int len = exfat_striptail_len(qstr->len, qstr->name);
+ unsigned int len = exfat_striptail_len(qstr->len, qstr->name,
+ EXFAT_SB(sb)->options.keep_last_dots);
unsigned long hash = init_name_hash(dentry);
int i, charlen;
wchar_t c;
@@ -104,8 +103,10 @@ static int exfat_d_cmp(const struct dentry *dentry, unsigned int len,
{
struct super_block *sb = dentry->d_sb;
struct nls_table *t = EXFAT_SB(sb)->nls_io;
- unsigned int alen = exfat_striptail_len(name->len, name->name);
- unsigned int blen = exfat_striptail_len(len, str);
+ unsigned int alen = exfat_striptail_len(name->len, name->name,
+ EXFAT_SB(sb)->options.keep_last_dots);
+ unsigned int blen = exfat_striptail_len(len, str,
+ EXFAT_SB(sb)->options.keep_last_dots);
wchar_t c1, c2;
int charlen, i;
@@ -136,7 +137,8 @@ static int exfat_utf8_d_hash(const struct dentry *dentry, struct qstr *qstr)
{
struct super_block *sb = dentry->d_sb;
const unsigned char *name = qstr->name;
- unsigned int len = exfat_striptail_len(qstr->len, qstr->name);
+ unsigned int len = exfat_striptail_len(qstr->len, qstr->name,
+ EXFAT_SB(sb)->options.keep_last_dots);
unsigned long hash = init_name_hash(dentry);
int i, charlen;
unicode_t u;
@@ -161,8 +163,11 @@ static int exfat_utf8_d_cmp(const struct dentry *dentry, unsigned int len,
const char *str, const struct qstr *name)
{
struct super_block *sb = dentry->d_sb;
- unsigned int alen = exfat_striptail_len(name->len, name->name);
- unsigned int blen = exfat_striptail_len(len, str);
+ unsigned int alen = exfat_striptail_len(name->len, name->name,
+ EXFAT_SB(sb)->options.keep_last_dots);
+ unsigned int blen = exfat_striptail_len(len, str,
+ EXFAT_SB(sb)->options.keep_last_dots);
+
unicode_t u_a, u_b;
int charlen, i;
@@ -194,95 +199,75 @@ const struct dentry_operations exfat_utf8_dentry_ops = {
.d_compare = exfat_utf8_d_cmp,
};
-/* used only in search empty_slot() */
-#define CNT_UNUSED_NOHIT (-1)
-#define CNT_UNUSED_HIT (-2)
/* search EMPTY CONTINUOUS "num_entries" entries */
static int exfat_search_empty_slot(struct super_block *sb,
struct exfat_hint_femp *hint_femp, struct exfat_chain *p_dir,
- int num_entries)
+ int num_entries, struct exfat_entry_set_cache *es)
{
- int i, dentry, num_empty = 0;
+ int i, dentry, ret;
int dentries_per_clu;
- unsigned int type;
struct exfat_chain clu;
- struct exfat_dentry *ep;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct buffer_head *bh;
+ int total_entries = EXFAT_CLU_TO_DEN(p_dir->size, sbi);
dentries_per_clu = sbi->dentries_per_clu;
if (hint_femp->eidx != EXFAT_HINT_NONE) {
dentry = hint_femp->eidx;
- if (num_entries <= hint_femp->count) {
- hint_femp->eidx = EXFAT_HINT_NONE;
- return dentry;
- }
+ /*
+ * If hint_femp->count is enough, it is needed to check if
+ * there are actual empty entries.
+ * Otherwise, and if "dentry + hint_famp->count" is also equal
+ * to "p_dir->size * dentries_per_clu", it means ENOSPC.
+ */
+ if (dentry + hint_femp->count == total_entries &&
+ num_entries > hint_femp->count)
+ return -ENOSPC;
+
+ hint_femp->eidx = EXFAT_HINT_NONE;
exfat_chain_dup(&clu, &hint_femp->cur);
} else {
exfat_chain_dup(&clu, p_dir);
dentry = 0;
}
- while (clu.dir != EXFAT_EOF_CLUSTER) {
+ while (dentry + num_entries <= total_entries &&
+ clu.dir != EXFAT_EOF_CLUSTER) {
i = dentry & (dentries_per_clu - 1);
- for (; i < dentries_per_clu; i++, dentry++) {
- ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
- if (!ep)
- return -EIO;
- type = exfat_get_entry_type(ep);
- brelse(bh);
+ ret = exfat_get_empty_dentry_set(es, sb, &clu, i, num_entries);
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ return dentry;
+
+ dentry += ret;
+ i += ret;
- if (type == TYPE_UNUSED || type == TYPE_DELETED) {
- num_empty++;
- if (hint_femp->eidx == EXFAT_HINT_NONE) {
- hint_femp->eidx = dentry;
- hint_femp->count = CNT_UNUSED_NOHIT;
- exfat_chain_set(&hint_femp->cur,
- clu.dir, clu.size, clu.flags);
- }
-
- if (type == TYPE_UNUSED &&
- hint_femp->count != CNT_UNUSED_HIT)
- hint_femp->count = CNT_UNUSED_HIT;
+ while (i >= dentries_per_clu) {
+ if (clu.flags == ALLOC_NO_FAT_CHAIN) {
+ if (--clu.size > 0)
+ clu.dir++;
+ else
+ clu.dir = EXFAT_EOF_CLUSTER;
} else {
- if (hint_femp->eidx != EXFAT_HINT_NONE &&
- hint_femp->count == CNT_UNUSED_HIT) {
- /* unused empty group means
- * an empty group which includes
- * unused dentry
- */
- exfat_fs_error(sb,
- "found bogus dentry(%d) beyond unused empty group(%d) (start_clu : %u, cur_clu : %u)",
- dentry, hint_femp->eidx,
- p_dir->dir, clu.dir);
+ if (exfat_get_next_cluster(sb, &clu.dir))
return -EIO;
- }
-
- num_empty = 0;
- hint_femp->eidx = EXFAT_HINT_NONE;
}
- if (num_empty >= num_entries) {
- /* found and invalidate hint_femp */
- hint_femp->eidx = EXFAT_HINT_NONE;
- return (dentry - (num_entries - 1));
- }
- }
-
- if (clu.flags == ALLOC_NO_FAT_CHAIN) {
- if (--clu.size > 0)
- clu.dir++;
- else
- clu.dir = EXFAT_EOF_CLUSTER;
- } else {
- if (exfat_get_next_cluster(sb, &clu.dir))
- return -EIO;
+ i -= dentries_per_clu;
}
}
+ hint_femp->eidx = dentry;
+ hint_femp->count = 0;
+ if (dentry == total_entries || clu.dir == EXFAT_EOF_CLUSTER)
+ exfat_chain_set(&hint_femp->cur, EXFAT_EOF_CLUSTER, 0,
+ clu.flags);
+ else
+ hint_femp->cur = clu;
+
return -ENOSPC;
}
@@ -298,18 +283,31 @@ static int exfat_check_max_dentries(struct inode *inode)
return 0;
}
-/* find empty directory entry.
- * if there isn't any empty slot, expand cluster chain.
+/*
+ * Find an empty directory entry set.
+ *
+ * If there isn't any empty slot, expand cluster chain.
+ *
+ * in:
+ * inode: inode of the parent directory
+ * num_entries: specifies how many dentries in the empty directory entry set
+ *
+ * out:
+ * p_dir: the cluster where the empty directory entry set is located
+ * es: The found empty directory entry set
+ *
+ * return:
+ * the directory entry index in p_dir is returned on succeeds
+ * -error code is returned on failure
*/
-static int exfat_find_empty_entry(struct inode *inode,
- struct exfat_chain *p_dir, int num_entries)
+int exfat_find_empty_entry(struct inode *inode,
+ struct exfat_chain *p_dir, int num_entries,
+ struct exfat_entry_set_cache *es)
{
int dentry;
unsigned int ret, last_clu;
- sector_t sector;
loff_t size = 0;
struct exfat_chain clu;
- struct exfat_dentry *ep = NULL;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
@@ -322,22 +320,31 @@ static int exfat_find_empty_entry(struct inode *inode,
ei->hint_femp.eidx = EXFAT_HINT_NONE;
}
+ exfat_chain_set(p_dir, ei->start_clu,
+ EXFAT_B_TO_CLU(i_size_read(inode), sbi), ei->flags);
+
while ((dentry = exfat_search_empty_slot(sb, &hint_femp, p_dir,
- num_entries)) < 0) {
- if (dentry == -EIO)
- break;
+ num_entries, es)) < 0) {
+ if (dentry != -ENOSPC)
+ return dentry;
if (exfat_check_max_dentries(inode))
return -ENOSPC;
- /* we trust p_dir->size regardless of FAT type */
- if (exfat_find_last_cluster(sb, p_dir, &last_clu))
- return -EIO;
-
/*
* Allocate new cluster to this directory
*/
- exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
+ if (ei->start_clu != EXFAT_EOF_CLUSTER) {
+ /* we trust p_dir->size regardless of FAT type */
+ if (exfat_find_last_cluster(sb, p_dir, &last_clu))
+ return -EIO;
+
+ exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
+ } else {
+ /* This directory is empty */
+ exfat_chain_set(&clu, EXFAT_EOF_CLUSTER, 0,
+ ALLOC_NO_FAT_CHAIN);
+ }
/* allocate a cluster */
ret = exfat_alloc_cluster(inode, 1, &clu, IS_DIRSYNC(inode));
@@ -347,6 +354,12 @@ static int exfat_find_empty_entry(struct inode *inode,
if (exfat_zeroed_cluster(inode, clu.dir))
return -EIO;
+ if (ei->start_clu == EXFAT_EOF_CLUSTER) {
+ ei->start_clu = clu.dir;
+ p_dir->dir = clu.dir;
+ hint_femp.eidx = 0;
+ }
+
/* append to the FAT chain */
if (clu.flags != p_dir->flags) {
/* no-fat-chain bit is disabled,
@@ -361,47 +374,26 @@ static int exfat_find_empty_entry(struct inode *inode,
if (exfat_ent_set(sb, last_clu, clu.dir))
return -EIO;
- if (hint_femp.eidx == EXFAT_HINT_NONE) {
- /* the special case that new dentry
- * should be allocated from the start of new cluster
- */
- hint_femp.eidx = EXFAT_B_TO_DEN_IDX(p_dir->size, sbi);
- hint_femp.count = sbi->dentries_per_clu;
-
+ if (hint_femp.cur.dir == EXFAT_EOF_CLUSTER)
exfat_chain_set(&hint_femp.cur, clu.dir, 0, clu.flags);
- }
+
+ hint_femp.count += sbi->dentries_per_clu;
+
hint_femp.cur.size++;
p_dir->size++;
size = EXFAT_CLU_TO_B(p_dir->size, sbi);
- /* update the directory entry */
- if (p_dir->dir != sbi->root_dir) {
- struct buffer_head *bh;
-
- ep = exfat_get_dentry(sb,
- &(ei->dir), ei->entry + 1, &bh, &sector);
- if (!ep)
- return -EIO;
-
- ep->dentry.stream.valid_size = cpu_to_le64(size);
- ep->dentry.stream.size = ep->dentry.stream.valid_size;
- ep->dentry.stream.flags = p_dir->flags;
- exfat_update_bh(bh, IS_DIRSYNC(inode));
- brelse(bh);
- if (exfat_update_dir_chksum(inode, &(ei->dir),
- ei->entry))
- return -EIO;
- }
-
/* directory inode should be updated in here */
i_size_write(inode, size);
- EXFAT_I(inode)->i_size_ondisk += sbi->cluster_size;
- EXFAT_I(inode)->i_size_aligned += sbi->cluster_size;
- EXFAT_I(inode)->flags = p_dir->flags;
- inode->i_blocks += 1 << sbi->sect_per_clus_bits;
+ ei->valid_size += sbi->cluster_size;
+ ei->flags = p_dir->flags;
+ inode->i_blocks += sbi->cluster_size >> 9;
}
- return dentry;
+ p_dir->dir = exfat_sector_to_cluster(sbi, es->bh[0]->b_blocknr);
+ p_dir->size -= dentry / sbi->dentries_per_clu;
+
+ return dentry & (sbi->dentries_per_clu - 1);
}
/*
@@ -409,21 +401,30 @@ static int exfat_find_empty_entry(struct inode *inode,
* Zero if it was successful; otherwise nonzero.
*/
static int __exfat_resolve_path(struct inode *inode, const unsigned char *path,
- struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname,
- int lookup)
+ struct exfat_uni_name *p_uniname, int lookup)
{
int namelen;
int lossy = NLS_NAME_NO_LOSSY;
struct super_block *sb = inode->i_sb;
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct exfat_inode_info *ei = EXFAT_I(inode);
+ int pathlen = strlen(path);
- /* strip all trailing periods */
- namelen = exfat_striptail_len(strlen(path), path);
+ /*
+ * get the length of the pathname excluding
+ * trailing periods, if any.
+ */
+ namelen = exfat_striptail_len(pathlen, path, false);
+ if (EXFAT_SB(sb)->options.keep_last_dots) {
+ /*
+ * Do not allow the creation of files with names
+ * ending with period(s).
+ */
+ if (!lookup && (namelen < pathlen))
+ return -EINVAL;
+ namelen = pathlen;
+ }
if (!namelen)
return -ENOENT;
-
- if (strlen(path) > (MAX_NAME_LENGTH * MAX_CHARSET_SIZE))
+ if (pathlen > (MAX_NAME_LENGTH * MAX_CHARSET_SIZE))
return -ENAMETOOLONG;
/*
@@ -443,24 +444,19 @@ static int __exfat_resolve_path(struct inode *inode, const unsigned char *path,
if ((lossy && !lookup) || !namelen)
return -EINVAL;
- exfat_chain_set(p_dir, ei->start_clu,
- EXFAT_B_TO_CLU(i_size_read(inode), sbi), ei->flags);
-
return 0;
}
static inline int exfat_resolve_path(struct inode *inode,
- const unsigned char *path, struct exfat_chain *dir,
- struct exfat_uni_name *uni)
+ const unsigned char *path, struct exfat_uni_name *uni)
{
- return __exfat_resolve_path(inode, path, dir, uni, 0);
+ return __exfat_resolve_path(inode, path, uni, 0);
}
static inline int exfat_resolve_path_for_lookup(struct inode *inode,
- const unsigned char *path, struct exfat_chain *dir,
- struct exfat_uni_name *uni)
+ const unsigned char *path, struct exfat_uni_name *uni)
{
- return __exfat_resolve_path(inode, path, dir, uni, 1);
+ return __exfat_resolve_path(inode, path, uni, 1);
}
static inline loff_t exfat_make_i_pos(struct exfat_dir_entry *info)
@@ -469,18 +465,19 @@ static inline loff_t exfat_make_i_pos(struct exfat_dir_entry *info)
}
static int exfat_add_entry(struct inode *inode, const char *path,
- struct exfat_chain *p_dir, unsigned int type,
- struct exfat_dir_entry *info)
+ unsigned int type, struct exfat_dir_entry *info)
{
int ret, dentry, num_entries;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_uni_name uniname;
struct exfat_chain clu;
+ struct timespec64 ts = current_time(inode);
+ struct exfat_entry_set_cache es;
int clu_size = 0;
unsigned int start_clu = EXFAT_FREE_CLUSTER;
- ret = exfat_resolve_path(inode, path, p_dir, &uniname);
+ ret = exfat_resolve_path(inode, path, &uniname);
if (ret)
goto out;
@@ -491,16 +488,18 @@ static int exfat_add_entry(struct inode *inode, const char *path,
}
/* exfat_find_empty_entry must be called before alloc_cluster() */
- dentry = exfat_find_empty_entry(inode, p_dir, num_entries);
+ dentry = exfat_find_empty_entry(inode, &info->dir, num_entries, &es);
if (dentry < 0) {
ret = dentry; /* -EIO or -ENOSPC */
goto out;
}
- if (type == TYPE_DIR) {
+ if (type == TYPE_DIR && !sbi->options.zero_size_dir) {
ret = exfat_alloc_new_dir(inode, &clu);
- if (ret)
+ if (ret) {
+ exfat_put_dentry_set(&es, false);
goto out;
+ }
start_clu = clu.dir;
clu_size = sbi->cluster_size;
}
@@ -509,31 +508,33 @@ static int exfat_add_entry(struct inode *inode, const char *path,
/* fill the dos name directory entry information of the created file.
* the first cluster is not determined yet. (0)
*/
- ret = exfat_init_dir_entry(inode, p_dir, dentry, type,
- start_clu, clu_size);
- if (ret)
- goto out;
+ exfat_init_dir_entry(&es, type, start_clu, clu_size, &ts);
+ exfat_init_ext_entry(&es, num_entries, &uniname);
- ret = exfat_init_ext_entry(inode, p_dir, dentry, num_entries, &uniname);
+ ret = exfat_put_dentry_set(&es, IS_DIRSYNC(inode));
if (ret)
goto out;
- info->dir = *p_dir;
info->entry = dentry;
info->flags = ALLOC_NO_FAT_CHAIN;
info->type = type;
if (type == TYPE_FILE) {
- info->attr = ATTR_ARCHIVE;
+ info->attr = EXFAT_ATTR_ARCHIVE;
info->start_clu = EXFAT_EOF_CLUSTER;
info->size = 0;
info->num_subdirs = 0;
} else {
- info->attr = ATTR_SUBDIR;
- info->start_clu = start_clu;
+ info->attr = EXFAT_ATTR_SUBDIR;
+ if (sbi->options.zero_size_dir)
+ info->start_clu = EXFAT_EOF_CLUSTER;
+ else
+ info->start_clu = start_clu;
info->size = clu_size;
info->num_subdirs = EXFAT_MIN_SUBDIR;
}
+ info->valid_size = info->size;
+
memset(&info->crtime, 0, sizeof(info->crtime));
memset(&info->mtime, 0, sizeof(info->mtime));
memset(&info->atime, 0, sizeof(info->atime));
@@ -541,27 +542,28 @@ out:
return ret;
}
-static int exfat_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int exfat_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
- struct exfat_chain cdir;
struct exfat_dir_entry info;
loff_t i_pos;
int err;
+ loff_t size = i_size_read(dir);
+
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_set_volume_dirty(sb);
- err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_FILE,
- &info);
- exfat_clear_volume_dirty(sb);
+ err = exfat_add_entry(dir, dentry->d_name.name, TYPE_FILE, &info);
if (err)
goto unlock;
inode_inc_iversion(dir);
- dir->i_ctime = dir->i_mtime = current_time(dir);
- if (IS_DIRSYNC(dir))
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ if (IS_DIRSYNC(dir) && size != i_size_read(dir))
exfat_sync_inode(dir);
else
mark_inode_dirty(dir);
@@ -573,9 +575,9 @@ static int exfat_create(struct user_namespace *mnt_userns, struct inode *dir,
goto unlock;
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime =
- EXFAT_I(inode)->i_crtime = current_time(inode);
- exfat_truncate_atime(&inode->i_atime);
+ EXFAT_I(inode)->i_crtime = simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
+
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -585,17 +587,17 @@ unlock:
}
/* lookup a file */
-static int exfat_find(struct inode *dir, struct qstr *qname,
+static int exfat_find(struct inode *dir, const struct qstr *qname,
struct exfat_dir_entry *info)
{
- int ret, dentry, num_entries, count;
+ int ret, dentry, count;
struct exfat_chain cdir;
struct exfat_uni_name uni_name;
struct super_block *sb = dir->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(dir);
struct exfat_dentry *ep, *ep2;
- struct exfat_entry_set_cache *es;
+ struct exfat_entry_set_cache es;
/* for optimized dir & entry to prevent long traverse of cluster chain */
struct exfat_hint hint_opt;
@@ -603,13 +605,12 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
return -ENOENT;
/* check the validity of directory name in the given pathname */
- ret = exfat_resolve_path_for_lookup(dir, qname->name, &cdir, &uni_name);
+ ret = exfat_resolve_path_for_lookup(dir, qname->name, &uni_name);
if (ret)
return ret;
- num_entries = exfat_calc_num_entries(&uni_name);
- if (num_entries < 0)
- return num_entries;
+ exfat_chain_set(&cdir, ei->start_clu,
+ EXFAT_B_TO_CLU(i_size_read(dir), sbi), ei->flags);
/* check the validation of hint_stat and initialize it if required */
if (ei->version != (inode_peek_iversion_raw(dir) & 0xffffffff)) {
@@ -620,38 +621,49 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
}
/* search the file name for directories */
- dentry = exfat_find_dir_entry(sb, ei, &cdir, &uni_name,
- num_entries, TYPE_ALL, &hint_opt);
-
+ dentry = exfat_find_dir_entry(sb, ei, &cdir, &uni_name, &hint_opt);
if (dentry < 0)
return dentry; /* -error value */
- info->dir = cdir;
- info->entry = dentry;
- info->num_subdirs = 0;
-
/* adjust cdir to the optimized value */
cdir.dir = hint_opt.clu;
if (cdir.flags & ALLOC_NO_FAT_CHAIN)
cdir.size -= dentry / sbi->dentries_per_clu;
dentry = hint_opt.eidx;
- es = exfat_get_dentry_set(sb, &cdir, dentry, ES_2_ENTRIES);
- if (!es)
+
+ info->dir = cdir;
+ info->entry = dentry;
+ info->num_subdirs = 0;
+
+ if (exfat_get_dentry_set(&es, sb, &cdir, dentry, ES_2_ENTRIES))
return -EIO;
- ep = exfat_get_dentry_cached(es, 0);
- ep2 = exfat_get_dentry_cached(es, 1);
+ ep = exfat_get_dentry_cached(&es, ES_IDX_FILE);
+ ep2 = exfat_get_dentry_cached(&es, ES_IDX_STREAM);
info->type = exfat_get_entry_type(ep);
info->attr = le16_to_cpu(ep->dentry.file.attr);
- info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
- if ((info->type == TYPE_FILE) && (info->size == 0)) {
+ info->valid_size = le64_to_cpu(ep2->dentry.stream.valid_size);
+ info->size = le64_to_cpu(ep2->dentry.stream.size);
+
+ info->start_clu = le32_to_cpu(ep2->dentry.stream.start_clu);
+ if (!is_valid_cluster(sbi, info->start_clu) && info->size) {
+ exfat_warn(sb, "start_clu is invalid cluster(0x%x)",
+ info->start_clu);
+ info->size = 0;
+ info->valid_size = 0;
+ }
+
+ if (info->valid_size > info->size) {
+ exfat_warn(sb, "valid_size(%lld) is greater than size(%lld)",
+ info->valid_size, info->size);
+ info->valid_size = info->size;
+ }
+
+ if (info->size == 0) {
info->flags = ALLOC_NO_FAT_CHAIN;
info->start_clu = EXFAT_EOF_CLUSTER;
- } else {
+ } else
info->flags = ep2->dentry.stream.flags;
- info->start_clu =
- le32_to_cpu(ep2->dentry.stream.start_clu);
- }
exfat_get_entry_time(sbi, &info->crtime,
ep->dentry.file.create_tz,
@@ -668,7 +680,17 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
ep->dentry.file.access_time,
ep->dentry.file.access_date,
0);
- exfat_free_dentry_set(es, false);
+ exfat_put_dentry_set(&es, false);
+
+ if (info->valid_size < 0) {
+ exfat_fs_error(sb, "data valid size is invalid(%lld)", info->valid_size);
+ return -EIO;
+ }
+
+ if (unlikely(EXFAT_B_TO_CLU_ROUND_UP(info->size, sbi) > sbi->used_clusters)) {
+ exfat_fs_error(sb, "data size is invalid(%lld)", info->size);
+ return -EIO;
+ }
if (ei->start_clu == EXFAT_FREE_CLUSTER) {
exfat_fs_error(sb,
@@ -773,60 +795,48 @@ unlock:
/* remove an entry, BUT don't truncate */
static int exfat_unlink(struct inode *dir, struct dentry *dentry)
{
- struct exfat_chain cdir;
- struct exfat_dentry *ep;
struct super_block *sb = dir->i_sb;
struct inode *inode = dentry->d_inode;
struct exfat_inode_info *ei = EXFAT_I(inode);
- struct buffer_head *bh;
- sector_t sector;
- int num_entries, entry, err = 0;
+ struct exfat_entry_set_cache es;
+ int err = 0;
+
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
mutex_lock(&EXFAT_SB(sb)->s_lock);
- exfat_chain_dup(&cdir, &ei->dir);
- entry = ei->entry;
if (ei->dir.dir == DIR_DELETED) {
exfat_err(sb, "abnormal access to deleted dentry");
err = -ENOENT;
goto unlock;
}
- ep = exfat_get_dentry(sb, &cdir, entry, &bh, &sector);
- if (!ep) {
- err = -EIO;
- goto unlock;
- }
- num_entries = exfat_count_ext_entries(sb, &cdir, entry, ep);
- if (num_entries < 0) {
+ err = exfat_get_dentry_set_by_ei(&es, sb, ei);
+ if (err) {
err = -EIO;
- brelse(bh);
goto unlock;
}
- num_entries++;
- brelse(bh);
exfat_set_volume_dirty(sb);
+
/* update the directory entry */
- if (exfat_remove_entries(dir, &cdir, entry, 0, num_entries)) {
- err = -EIO;
+ exfat_remove_entries(inode, &es, ES_IDX_FILE);
+
+ err = exfat_put_dentry_set(&es, IS_DIRSYNC(inode));
+ if (err)
goto unlock;
- }
/* This doesn't modify ei */
ei->dir.dir = DIR_DELETED;
- exfat_clear_volume_dirty(sb);
inode_inc_iversion(dir);
- dir->i_mtime = dir->i_atime = current_time(dir);
- exfat_truncate_atime(&dir->i_atime);
- if (IS_DIRSYNC(dir))
- exfat_sync_inode(dir);
- else
- mark_inode_dirty(dir);
+ simple_inode_init_ts(dir);
+ exfat_truncate_inode_atime(dir);
+ mark_inode_dirty(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
- exfat_truncate_atime(&inode->i_atime);
+ simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
exfat_unhash_inode(inode);
exfat_d_version_set(dentry, inode_query_iversion(dir));
unlock:
@@ -834,27 +844,28 @@ unlock:
return err;
}
-static int exfat_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
struct exfat_dir_entry info;
- struct exfat_chain cdir;
loff_t i_pos;
int err;
+ loff_t size = i_size_read(dir);
+
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return ERR_PTR(-EIO);
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_set_volume_dirty(sb);
- err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_DIR,
- &info);
- exfat_clear_volume_dirty(sb);
+ err = exfat_add_entry(dir, dentry->d_name.name, TYPE_DIR, &info);
if (err)
goto unlock;
inode_inc_iversion(dir);
- dir->i_ctime = dir->i_mtime = current_time(dir);
- if (IS_DIRSYNC(dir))
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ if (IS_DIRSYNC(dir) && size != i_size_read(dir))
exfat_sync_inode(dir);
else
mark_inode_dirty(dir);
@@ -867,16 +878,15 @@ static int exfat_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
goto unlock;
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime =
- EXFAT_I(inode)->i_crtime = current_time(inode);
- exfat_truncate_atime(&inode->i_atime);
+ EXFAT_I(inode)->i_crtime = simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
unlock:
mutex_unlock(&EXFAT_SB(sb)->s_lock);
- return err;
+ return ERR_PTR(err);
}
static int exfat_check_dir_empty(struct super_block *sb,
@@ -884,6 +894,7 @@ static int exfat_check_dir_empty(struct super_block *sb,
{
int i, dentries_per_clu;
unsigned int type;
+ unsigned int clu_count = 0;
struct exfat_chain clu;
struct exfat_dentry *ep;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -891,11 +902,14 @@ static int exfat_check_dir_empty(struct super_block *sb,
dentries_per_clu = sbi->dentries_per_clu;
+ if (p_dir->dir == EXFAT_EOF_CLUSTER)
+ return 0;
+
exfat_chain_dup(&clu, p_dir);
while (clu.dir != EXFAT_EOF_CLUSTER) {
for (i = 0; i < dentries_per_clu; i++) {
- ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
type = exfat_get_entry_type(ep);
@@ -917,6 +931,10 @@ static int exfat_check_dir_empty(struct super_block *sb,
} else {
if (exfat_get_next_cluster(sb, &(clu.dir)))
return -EIO;
+
+ /* break if the cluster chain includes a loop */
+ if (unlikely(++clu_count > EXFAT_DATA_CLUSTER_COUNT(sbi)))
+ break;
}
}
@@ -926,19 +944,17 @@ static int exfat_check_dir_empty(struct super_block *sb,
static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- struct exfat_dentry *ep;
- struct exfat_chain cdir, clu_to_free;
+ struct exfat_chain clu_to_free;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
- struct buffer_head *bh;
- sector_t sector;
- int num_entries, entry, err;
+ struct exfat_entry_set_cache es;
+ int err;
- mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
- exfat_chain_dup(&cdir, &ei->dir);
- entry = ei->entry;
+ mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
if (ei->dir.dir == DIR_DELETED) {
exfat_err(sb, "abnormal access to deleted dentry");
@@ -957,33 +973,25 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
goto unlock;
}
- ep = exfat_get_dentry(sb, &cdir, entry, &bh, &sector);
- if (!ep) {
- err = -EIO;
- goto unlock;
- }
-
- num_entries = exfat_count_ext_entries(sb, &cdir, entry, ep);
- if (num_entries < 0) {
+ err = exfat_get_dentry_set_by_ei(&es, sb, ei);
+ if (err) {
err = -EIO;
- brelse(bh);
goto unlock;
}
- num_entries++;
- brelse(bh);
exfat_set_volume_dirty(sb);
- err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries);
- if (err) {
- exfat_err(sb, "failed to exfat_remove_entries : err(%d)", err);
+
+ exfat_remove_entries(inode, &es, ES_IDX_FILE);
+
+ err = exfat_put_dentry_set(&es, IS_DIRSYNC(dir));
+ if (err)
goto unlock;
- }
+
ei->dir.dir = DIR_DELETED;
- exfat_clear_volume_dirty(sb);
inode_inc_iversion(dir);
- dir->i_mtime = dir->i_atime = current_time(dir);
- exfat_truncate_atime(&dir->i_atime);
+ simple_inode_init_ts(dir);
+ exfat_truncate_inode_atime(dir);
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
else
@@ -991,8 +999,8 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
- exfat_truncate_atime(&inode->i_atime);
+ simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
exfat_unhash_inode(inode);
exfat_d_version_set(dentry, inode_query_iversion(dir));
unlock:
@@ -1000,185 +1008,128 @@ unlock:
return err;
}
-static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
- int oldentry, struct exfat_uni_name *p_uniname,
- struct exfat_inode_info *ei)
+static int exfat_rename_file(struct inode *parent_inode,
+ struct exfat_uni_name *p_uniname, struct exfat_inode_info *ei)
{
- int ret, num_old_entries, num_new_entries;
- sector_t sector_old, sector_new;
+ int ret, num_new_entries;
struct exfat_dentry *epold, *epnew;
- struct super_block *sb = inode->i_sb;
- struct buffer_head *new_bh, *old_bh;
- int sync = IS_DIRSYNC(inode);
-
- epold = exfat_get_dentry(sb, p_dir, oldentry, &old_bh, &sector_old);
- if (!epold)
- return -EIO;
+ struct super_block *sb = parent_inode->i_sb;
+ struct exfat_entry_set_cache old_es, new_es;
+ int sync = IS_DIRSYNC(parent_inode);
- num_old_entries = exfat_count_ext_entries(sb, p_dir, oldentry, epold);
- if (num_old_entries < 0)
+ if (unlikely(exfat_forced_shutdown(sb)))
return -EIO;
- num_old_entries++;
num_new_entries = exfat_calc_num_entries(p_uniname);
if (num_new_entries < 0)
return num_new_entries;
- if (num_old_entries < num_new_entries) {
- int newentry;
+ ret = exfat_get_dentry_set_by_ei(&old_es, sb, ei);
+ if (ret) {
+ ret = -EIO;
+ return ret;
+ }
- newentry =
- exfat_find_empty_entry(inode, p_dir, num_new_entries);
- if (newentry < 0)
- return newentry; /* -EIO or -ENOSPC */
+ epold = exfat_get_dentry_cached(&old_es, ES_IDX_FILE);
- epnew = exfat_get_dentry(sb, p_dir, newentry, &new_bh,
- &sector_new);
- if (!epnew)
- return -EIO;
+ if (old_es.num_entries < num_new_entries) {
+ int newentry;
+ struct exfat_chain dir;
- *epnew = *epold;
- if (exfat_get_entry_type(epnew) == TYPE_FILE) {
- epnew->dentry.file.attr |= cpu_to_le16(ATTR_ARCHIVE);
- ei->attr |= ATTR_ARCHIVE;
+ newentry = exfat_find_empty_entry(parent_inode, &dir,
+ num_new_entries, &new_es);
+ if (newentry < 0) {
+ ret = newentry; /* -EIO or -ENOSPC */
+ goto put_old_es;
}
- exfat_update_bh(new_bh, sync);
- brelse(old_bh);
- brelse(new_bh);
- epold = exfat_get_dentry(sb, p_dir, oldentry + 1, &old_bh,
- &sector_old);
- if (!epold)
- return -EIO;
- epnew = exfat_get_dentry(sb, p_dir, newentry + 1, &new_bh,
- &sector_new);
- if (!epnew) {
- brelse(old_bh);
- return -EIO;
+ epnew = exfat_get_dentry_cached(&new_es, ES_IDX_FILE);
+ *epnew = *epold;
+ if (exfat_get_entry_type(epnew) == TYPE_FILE) {
+ epnew->dentry.file.attr |= cpu_to_le16(EXFAT_ATTR_ARCHIVE);
+ ei->attr |= EXFAT_ATTR_ARCHIVE;
}
+ epold = exfat_get_dentry_cached(&old_es, ES_IDX_STREAM);
+ epnew = exfat_get_dentry_cached(&new_es, ES_IDX_STREAM);
*epnew = *epold;
- exfat_update_bh(new_bh, sync);
- brelse(old_bh);
- brelse(new_bh);
- ret = exfat_init_ext_entry(inode, p_dir, newentry,
- num_new_entries, p_uniname);
+ exfat_init_ext_entry(&new_es, num_new_entries, p_uniname);
+
+ ret = exfat_put_dentry_set(&new_es, sync);
if (ret)
- return ret;
+ goto put_old_es;
- exfat_remove_entries(inode, p_dir, oldentry, 0,
- num_old_entries);
+ exfat_remove_entries(parent_inode, &old_es, ES_IDX_FILE);
+ ei->dir = dir;
ei->entry = newentry;
} else {
if (exfat_get_entry_type(epold) == TYPE_FILE) {
- epold->dentry.file.attr |= cpu_to_le16(ATTR_ARCHIVE);
- ei->attr |= ATTR_ARCHIVE;
+ epold->dentry.file.attr |= cpu_to_le16(EXFAT_ATTR_ARCHIVE);
+ ei->attr |= EXFAT_ATTR_ARCHIVE;
}
- exfat_update_bh(old_bh, sync);
- brelse(old_bh);
- ret = exfat_init_ext_entry(inode, p_dir, oldentry,
- num_new_entries, p_uniname);
- if (ret)
- return ret;
- exfat_remove_entries(inode, p_dir, oldentry, num_new_entries,
- num_old_entries);
+ exfat_remove_entries(parent_inode, &old_es, ES_IDX_FIRST_FILENAME + 1);
+ exfat_init_ext_entry(&old_es, num_new_entries, p_uniname);
}
- return 0;
+ return exfat_put_dentry_set(&old_es, sync);
+
+put_old_es:
+ exfat_put_dentry_set(&old_es, false);
+ return ret;
}
-static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
- int oldentry, struct exfat_chain *p_newdir,
+static int exfat_move_file(struct inode *parent_inode,
struct exfat_uni_name *p_uniname, struct exfat_inode_info *ei)
{
- int ret, newentry, num_new_entries, num_old_entries;
- sector_t sector_mov, sector_new;
+ int ret, newentry, num_new_entries;
struct exfat_dentry *epmov, *epnew;
- struct super_block *sb = inode->i_sb;
- struct buffer_head *mov_bh, *new_bh;
-
- epmov = exfat_get_dentry(sb, p_olddir, oldentry, &mov_bh, &sector_mov);
- if (!epmov)
- return -EIO;
-
- num_old_entries = exfat_count_ext_entries(sb, p_olddir, oldentry,
- epmov);
- if (num_old_entries < 0)
- return -EIO;
- num_old_entries++;
+ struct exfat_entry_set_cache mov_es, new_es;
+ struct exfat_chain newdir;
num_new_entries = exfat_calc_num_entries(p_uniname);
if (num_new_entries < 0)
return num_new_entries;
- newentry = exfat_find_empty_entry(inode, p_newdir, num_new_entries);
- if (newentry < 0)
- return newentry; /* -EIO or -ENOSPC */
-
- epnew = exfat_get_dentry(sb, p_newdir, newentry, &new_bh, &sector_new);
- if (!epnew)
+ ret = exfat_get_dentry_set_by_ei(&mov_es, parent_inode->i_sb, ei);
+ if (ret)
return -EIO;
- *epnew = *epmov;
- if (exfat_get_entry_type(epnew) == TYPE_FILE) {
- epnew->dentry.file.attr |= cpu_to_le16(ATTR_ARCHIVE);
- ei->attr |= ATTR_ARCHIVE;
+ newentry = exfat_find_empty_entry(parent_inode, &newdir,
+ num_new_entries, &new_es);
+ if (newentry < 0) {
+ ret = newentry; /* -EIO or -ENOSPC */
+ goto put_mov_es;
}
- exfat_update_bh(new_bh, IS_DIRSYNC(inode));
- brelse(mov_bh);
- brelse(new_bh);
- epmov = exfat_get_dentry(sb, p_olddir, oldentry + 1, &mov_bh,
- &sector_mov);
- if (!epmov)
- return -EIO;
- epnew = exfat_get_dentry(sb, p_newdir, newentry + 1, &new_bh,
- &sector_new);
- if (!epnew) {
- brelse(mov_bh);
- return -EIO;
+ epmov = exfat_get_dentry_cached(&mov_es, ES_IDX_FILE);
+ epnew = exfat_get_dentry_cached(&new_es, ES_IDX_FILE);
+ *epnew = *epmov;
+ if (exfat_get_entry_type(epnew) == TYPE_FILE) {
+ epnew->dentry.file.attr |= cpu_to_le16(EXFAT_ATTR_ARCHIVE);
+ ei->attr |= EXFAT_ATTR_ARCHIVE;
}
+ epmov = exfat_get_dentry_cached(&mov_es, ES_IDX_STREAM);
+ epnew = exfat_get_dentry_cached(&new_es, ES_IDX_STREAM);
*epnew = *epmov;
- exfat_update_bh(new_bh, IS_DIRSYNC(inode));
- brelse(mov_bh);
- brelse(new_bh);
- ret = exfat_init_ext_entry(inode, p_newdir, newentry, num_new_entries,
- p_uniname);
- if (ret)
- return ret;
+ exfat_init_ext_entry(&new_es, num_new_entries, p_uniname);
+ exfat_remove_entries(parent_inode, &mov_es, ES_IDX_FILE);
- exfat_remove_entries(inode, p_olddir, oldentry, 0, num_old_entries);
+ ei->dir = newdir;
+ ei->entry = newentry;
- exfat_chain_set(&ei->dir, p_newdir->dir, p_newdir->size,
- p_newdir->flags);
+ ret = exfat_put_dentry_set(&new_es, IS_DIRSYNC(parent_inode));
+ if (ret)
+ goto put_mov_es;
- ei->entry = newentry;
- return 0;
-}
+ return exfat_put_dentry_set(&mov_es, IS_DIRSYNC(parent_inode));
-static void exfat_update_parent_info(struct exfat_inode_info *ei,
- struct inode *parent_inode)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(parent_inode->i_sb);
- struct exfat_inode_info *parent_ei = EXFAT_I(parent_inode);
- loff_t parent_isize = i_size_read(parent_inode);
+put_mov_es:
+ exfat_put_dentry_set(&mov_es, false);
- /*
- * the problem that struct exfat_inode_info caches wrong parent info.
- *
- * because of flag-mismatch of ei->dir,
- * there is abnormal traversing cluster chain.
- */
- if (unlikely(parent_ei->flags != ei->dir.flags ||
- parent_isize != EXFAT_CLU_TO_B(ei->dir.size, sbi) ||
- parent_ei->start_clu != ei->dir.dir)) {
- exfat_chain_set(&ei->dir, parent_ei->start_clu,
- EXFAT_B_TO_CLU_ROUND_UP(parent_isize, sbi),
- parent_ei->flags);
- }
+ return ret;
}
/* rename or move a old file into a new file */
@@ -1187,20 +1138,12 @@ static int __exfat_rename(struct inode *old_parent_inode,
struct dentry *new_dentry)
{
int ret;
- int dentry;
- struct exfat_chain olddir, newdir;
- struct exfat_chain *p_dir = NULL;
struct exfat_uni_name uni_name;
- struct exfat_dentry *ep;
struct super_block *sb = old_parent_inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
const unsigned char *new_path = new_dentry->d_name.name;
struct inode *new_inode = new_dentry->d_inode;
- int num_entries;
struct exfat_inode_info *new_ei = NULL;
- unsigned int new_entry_type = TYPE_UNUSED;
- int new_entry = 0;
- struct buffer_head *old_bh, *new_bh = NULL;
/* check the validity of pointer parameters */
if (new_path == NULL || strlen(new_path) == 0)
@@ -1211,18 +1154,6 @@ static int __exfat_rename(struct inode *old_parent_inode,
return -ENOENT;
}
- exfat_update_parent_info(ei, old_parent_inode);
-
- exfat_chain_dup(&olddir, &ei->dir);
- dentry = ei->entry;
-
- ep = exfat_get_dentry(sb, &olddir, dentry, &old_bh, NULL);
- if (!ep) {
- ret = -EIO;
- goto out;
- }
- brelse(old_bh);
-
/* check whether new dir is existing directory and empty */
if (new_inode) {
ret = -EIO;
@@ -1233,19 +1164,8 @@ static int __exfat_rename(struct inode *old_parent_inode,
goto out;
}
- exfat_update_parent_info(new_ei, new_parent_inode);
-
- p_dir = &(new_ei->dir);
- new_entry = new_ei->entry;
- ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh, NULL);
- if (!ep)
- goto out;
-
- new_entry_type = exfat_get_entry_type(ep);
- brelse(new_bh);
-
/* if new_inode exists, update ei */
- if (new_entry_type == TYPE_DIR) {
+ if (S_ISDIR(new_inode->i_mode)) {
struct exfat_chain new_clu;
new_clu.dir = new_ei->start_clu;
@@ -1261,43 +1181,36 @@ static int __exfat_rename(struct inode *old_parent_inode,
}
/* check the validity of directory name in the given new pathname */
- ret = exfat_resolve_path(new_parent_inode, new_path, &newdir,
- &uni_name);
+ ret = exfat_resolve_path(new_parent_inode, new_path, &uni_name);
if (ret)
goto out;
exfat_set_volume_dirty(sb);
- if (olddir.dir == newdir.dir)
- ret = exfat_rename_file(new_parent_inode, &olddir, dentry,
- &uni_name, ei);
+ if (new_parent_inode == old_parent_inode)
+ ret = exfat_rename_file(new_parent_inode, &uni_name, ei);
else
- ret = exfat_move_file(new_parent_inode, &olddir, dentry,
- &newdir, &uni_name, ei);
+ ret = exfat_move_file(new_parent_inode, &uni_name, ei);
if (!ret && new_inode) {
+ struct exfat_entry_set_cache es;
+
/* delete entries of new_dir */
- ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh, NULL);
- if (!ep) {
+ ret = exfat_get_dentry_set_by_ei(&es, sb, new_ei);
+ if (ret) {
ret = -EIO;
goto del_out;
}
- num_entries = exfat_count_ext_entries(sb, p_dir, new_entry, ep);
- if (num_entries < 0) {
- ret = -EIO;
- goto del_out;
- }
- brelse(new_bh);
+ exfat_remove_entries(new_inode, &es, ES_IDX_FILE);
- if (exfat_remove_entries(new_inode, p_dir, new_entry, 0,
- num_entries + 1)) {
- ret = -EIO;
+ ret = exfat_put_dentry_set(&es, IS_DIRSYNC(new_inode));
+ if (ret)
goto del_out;
- }
/* Free the clusters if new_inode is a dir(as if exfat_rmdir) */
- if (new_entry_type == TYPE_DIR) {
+ if (S_ISDIR(new_inode->i_mode) &&
+ new_ei->start_clu != EXFAT_EOF_CLUSTER) {
/* new_ei, new_clu_to_free */
struct exfat_chain new_clu_to_free;
@@ -1311,6 +1224,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
}
i_size_write(new_inode, 0);
+ new_ei->valid_size = 0;
new_ei->start_clu = EXFAT_EOF_CLUSTER;
new_ei->flags = ALLOC_NO_FAT_CHAIN;
}
@@ -1321,12 +1235,11 @@ del_out:
*/
new_ei->dir.dir = DIR_DELETED;
}
- exfat_clear_volume_dirty(sb);
out:
return ret;
}
-static int exfat_rename(struct user_namespace *mnt_userns,
+static int exfat_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
@@ -1335,6 +1248,7 @@ static int exfat_rename(struct user_namespace *mnt_userns,
struct super_block *sb = old_dir->i_sb;
loff_t i_pos;
int err;
+ loff_t size = i_size_read(new_dir);
/*
* The VFS already checks for existence, so for local filesystems
@@ -1353,10 +1267,10 @@ static int exfat_rename(struct user_namespace *mnt_userns,
goto unlock;
inode_inc_iversion(new_dir);
- new_dir->i_ctime = new_dir->i_mtime = new_dir->i_atime =
- EXFAT_I(new_dir)->i_crtime = current_time(new_dir);
- exfat_truncate_atime(&new_dir->i_atime);
- if (IS_DIRSYNC(new_dir))
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
+ EXFAT_I(new_dir)->i_crtime = current_time(new_dir);
+ exfat_truncate_inode_atime(new_dir);
+ if (IS_DIRSYNC(new_dir) && size != i_size_read(new_dir))
exfat_sync_inode(new_dir);
else
mark_inode_dirty(new_dir);
@@ -1377,10 +1291,7 @@ static int exfat_rename(struct user_namespace *mnt_userns,
}
inode_inc_iversion(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
- if (IS_DIRSYNC(old_dir))
- exfat_sync_inode(old_dir);
- else
+ if (new_dir != old_dir)
mark_inode_dirty(old_dir);
if (new_inode) {
@@ -1395,8 +1306,7 @@ static int exfat_rename(struct user_namespace *mnt_userns,
exfat_warn(sb, "abnormal access to an inode dropped");
WARN_ON(new_inode->i_nlink == 0);
}
- new_inode->i_ctime = EXFAT_I(new_inode)->i_crtime =
- current_time(new_inode);
+ EXFAT_I(new_inode)->i_crtime = current_time(new_inode);
}
unlock:
diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
index 314d5407a1be..57db08a5271c 100644
--- a/fs/exfat/nls.c
+++ b/fs/exfat/nls.c
@@ -6,7 +6,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "exfat_raw.h"
#include "exfat_fs.h"
@@ -509,7 +509,7 @@ static int exfat_utf8_to_utf16(struct super_block *sb,
}
if (unilen > MAX_NAME_LENGTH) {
- exfat_err(sb, "failed to %s (estr:ENAMETOOLONG) nls len : %d, unilen : %d > %d",
+ exfat_debug(sb, "failed to %s (estr:ENAMETOOLONG) nls len : %d, unilen : %d > %d",
__func__, len, unilen, MAX_NAME_LENGTH);
return -ENAMETOOLONG;
}
@@ -616,9 +616,6 @@ static int exfat_nls_to_ucs2(struct super_block *sb,
unilen++;
}
- if (p_cstring[i] != '\0')
- lossy |= NLS_NAME_OVERLEN;
-
*uniname = '\0';
p_uniname->name_len = unilen;
p_uniname->name_hash = exfat_calc_chksum16(upname, unilen << 1, 0,
@@ -655,7 +652,6 @@ static int exfat_load_upcase_table(struct super_block *sb,
unsigned int sect_size = sb->s_blocksize;
unsigned int i, index = 0;
u32 chksum = 0;
- int ret;
unsigned char skip = false;
unsigned short *upcase_table;
@@ -671,10 +667,9 @@ static int exfat_load_upcase_table(struct super_block *sb,
bh = sb_bread(sb, sector);
if (!bh) {
- exfat_err(sb, "failed to read sector(0x%llx)\n",
+ exfat_err(sb, "failed to read sector(0x%llx)",
(unsigned long long)sector);
- ret = -EIO;
- goto free_table;
+ return -EIO;
}
sector++;
for (i = 0; i < sect_size && index <= 0xFFFF; i += 2) {
@@ -701,15 +696,12 @@ static int exfat_load_upcase_table(struct super_block *sb,
exfat_err(sb, "failed to load upcase table (idx : 0x%08x, chksum : 0x%08x, utbl_chksum : 0x%08x)",
index, chksum, utbl_checksum);
- ret = -EINVAL;
-free_table:
- exfat_free_upcase_table(sbi);
- return ret;
+ return -EINVAL;
}
static int exfat_load_default_upcase_table(struct super_block *sb)
{
- int i, ret = -EIO;
+ int i;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
unsigned char skip = false;
unsigned short uni = 0, *upcase_table;
@@ -740,8 +732,7 @@ static int exfat_load_default_upcase_table(struct super_block *sb)
return 0;
/* FATAL error: default upcase table has error */
- exfat_free_upcase_table(sbi);
- return ret;
+ return -EIO;
}
int exfat_create_upcase_table(struct super_block *sb)
@@ -761,7 +752,7 @@ int exfat_create_upcase_table(struct super_block *sb)
while (clu.dir != EXFAT_EOF_CLUSTER) {
for (i = 0; i < sbi->dentries_per_clu; i++) {
- ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
@@ -785,14 +776,17 @@ int exfat_create_upcase_table(struct super_block *sb)
le32_to_cpu(ep->dentry.upcase.checksum));
brelse(bh);
- if (ret && ret != -EIO)
+ if (ret && ret != -EIO) {
+ /* free memory from exfat_load_upcase_table call */
+ exfat_free_upcase_table(sbi);
goto load_default;
+ }
/* load successfully */
return ret;
}
- if (exfat_get_next_cluster(sb, &(clu.dir)))
+ if (exfat_get_next_cluster(sb, &clu.dir))
return -EIO;
}
@@ -804,4 +798,5 @@ load_default:
void exfat_free_upcase_table(struct exfat_sb_info *sbi)
{
kvfree(sbi->vol_utbl);
+ sbi->vol_utbl = NULL;
}
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 5539ffc20d16..10e872a99663 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -17,6 +17,7 @@
#include <linux/iversion.h>
#include <linux/nls.h>
#include <linux/buffer_head.h>
+#include <linux/magic.h>
#include "exfat_raw.h"
#include "exfat_fs.h"
@@ -30,14 +31,14 @@ static void exfat_free_iocharset(struct exfat_sb_info *sbi)
kfree(sbi->options.iocharset);
}
-static void exfat_delayed_free(struct rcu_head *p)
+static void exfat_set_iocharset(struct exfat_mount_options *opts,
+ char *iocharset)
{
- struct exfat_sb_info *sbi = container_of(p, struct exfat_sb_info, rcu);
-
- unload_nls(sbi->nls_io);
- exfat_free_iocharset(sbi);
- exfat_free_upcase_table(sbi);
- kfree(sbi);
+ opts->iocharset = iocharset;
+ if (!strcmp(opts->iocharset, "utf8"))
+ opts->utf8 = 1;
+ else
+ opts->utf8 = 0;
}
static void exfat_put_super(struct super_block *sb)
@@ -45,28 +46,10 @@ static void exfat_put_super(struct super_block *sb)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
mutex_lock(&sbi->s_lock);
+ exfat_clear_volume_dirty(sb);
exfat_free_bitmap(sbi);
brelse(sbi->boot_bh);
mutex_unlock(&sbi->s_lock);
-
- call_rcu(&sbi->rcu, exfat_delayed_free);
-}
-
-static int exfat_sync_fs(struct super_block *sb, int wait)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- int err = 0;
-
- if (!wait)
- return 0;
-
- /* If there are some dirty buffers in the bdev inode */
- mutex_lock(&sbi->s_lock);
- sync_blockdev(sb->s_bdev);
- if (exfat_clear_volume_dirty(sb))
- err = -EIO;
- mutex_unlock(&sbi->s_lock);
- return err;
}
static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -75,15 +58,6 @@ static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
unsigned long long id = huge_encode_dev(sb->s_bdev->bd_dev);
- if (sbi->used_clusters == EXFAT_CLUSTERS_UNTRACKED) {
- mutex_lock(&sbi->s_lock);
- if (exfat_count_used_clusters(sb, &sbi->used_clusters)) {
- mutex_unlock(&sbi->s_lock);
- return -EIO;
- }
- mutex_unlock(&sbi->s_lock);
- }
-
buf->f_type = sb->s_magic;
buf->f_bsize = sbi->cluster_size;
buf->f_blocks = sbi->num_clusters - 2; /* clu 0 & 1 */
@@ -99,7 +73,6 @@ static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data;
- bool sync;
/* retain persistent-flags */
new_flags |= sbi->vol_flags_persistent;
@@ -118,16 +91,11 @@ static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags)
p_boot->vol_flags = cpu_to_le16(new_flags);
- if ((new_flags & VOLUME_DIRTY) && !buffer_dirty(sbi->boot_bh))
- sync = true;
- else
- sync = false;
-
set_buffer_uptodate(sbi->boot_bh);
mark_buffer_dirty(sbi->boot_bh);
- if (sync)
- sync_dirty_buffer(sbi->boot_bh);
+ __sync_dirty_buffer(sbi->boot_bh, REQ_SYNC | REQ_FUA | REQ_PREFLUSH);
+
return 0;
}
@@ -173,16 +141,57 @@ static int exfat_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",errors=remount-ro");
if (opts->discard)
seq_puts(m, ",discard");
- if (opts->time_offset)
+ if (opts->keep_last_dots)
+ seq_puts(m, ",keep_last_dots");
+ if (opts->sys_tz)
+ seq_puts(m, ",sys_tz");
+ else if (opts->time_offset)
seq_printf(m, ",time_offset=%d", opts->time_offset);
+ if (opts->zero_size_dir)
+ seq_puts(m, ",zero_size_dir");
return 0;
}
+int exfat_force_shutdown(struct super_block *sb, u32 flags)
+{
+ int ret;
+ struct exfat_sb_info *sbi = sb->s_fs_info;
+ struct exfat_mount_options *opts = &sbi->options;
+
+ if (exfat_forced_shutdown(sb))
+ return 0;
+
+ switch (flags) {
+ case EXFAT_GOING_DOWN_DEFAULT:
+ case EXFAT_GOING_DOWN_FULLSYNC:
+ ret = bdev_freeze(sb->s_bdev);
+ if (ret)
+ return ret;
+ bdev_thaw(sb->s_bdev);
+ set_bit(EXFAT_FLAGS_SHUTDOWN, &sbi->s_exfat_flags);
+ break;
+ case EXFAT_GOING_DOWN_NOSYNC:
+ set_bit(EXFAT_FLAGS_SHUTDOWN, &sbi->s_exfat_flags);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (opts->discard)
+ opts->discard = 0;
+ return 0;
+}
+
+static void exfat_shutdown(struct super_block *sb)
+{
+ exfat_force_shutdown(sb, EXFAT_GOING_DOWN_NOSYNC);
+}
+
static struct inode *exfat_alloc_inode(struct super_block *sb)
{
struct exfat_inode_info *ei;
- ei = kmem_cache_alloc(exfat_inode_cachep, GFP_NOFS);
+ ei = alloc_inode_sb(sb, exfat_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
@@ -201,9 +210,9 @@ static const struct super_operations exfat_sops = {
.write_inode = exfat_write_inode,
.evict_inode = exfat_evict_inode,
.put_super = exfat_put_super,
- .sync_fs = exfat_sync_fs,
.statfs = exfat_statfs,
.show_options = exfat_show_options,
+ .shutdown = exfat_shutdown,
};
enum {
@@ -216,7 +225,10 @@ enum {
Opt_charset,
Opt_errors,
Opt_discard,
+ Opt_keep_last_dots,
+ Opt_sys_tz,
Opt_time_offset,
+ Opt_zero_size_dir,
/* Deprecated options */
Opt_utf8,
@@ -233,16 +245,19 @@ static const struct constant_table exfat_param_enums[] = {
};
static const struct fs_parameter_spec exfat_parameters[] = {
- fsparam_u32("uid", Opt_uid),
- fsparam_u32("gid", Opt_gid),
+ fsparam_uid("uid", Opt_uid),
+ fsparam_gid("gid", Opt_gid),
fsparam_u32oct("umask", Opt_umask),
fsparam_u32oct("dmask", Opt_dmask),
fsparam_u32oct("fmask", Opt_fmask),
fsparam_u32oct("allow_utime", Opt_allow_utime),
fsparam_string("iocharset", Opt_charset),
fsparam_enum("errors", Opt_errors, exfat_param_enums),
- fsparam_flag("discard", Opt_discard),
+ fsparam_flag_no("discard", Opt_discard),
+ fsparam_flag("keep_last_dots", Opt_keep_last_dots),
+ fsparam_flag("sys_tz", Opt_sys_tz),
fsparam_s32("time_offset", Opt_time_offset),
+ fsparam_flag_no("zero_size_dir", Opt_zero_size_dir),
__fsparam(NULL, "utf8", Opt_utf8, fs_param_deprecated,
NULL),
__fsparam(NULL, "debug", Opt_debug, fs_param_deprecated,
@@ -267,10 +282,10 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
switch (opt) {
case Opt_uid:
- opts->fs_uid = make_kuid(current_user_ns(), result.uint_32);
+ opts->fs_uid = result.uid;
break;
case Opt_gid:
- opts->fs_gid = make_kgid(current_user_ns(), result.uint_32);
+ opts->fs_gid = result.gid;
break;
case Opt_umask:
opts->fs_fmask = result.uint_32;
@@ -287,14 +302,20 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_charset:
exfat_free_iocharset(sbi);
- opts->iocharset = param->string;
+ exfat_set_iocharset(opts, param->string);
param->string = NULL;
break;
case Opt_errors:
opts->errors = result.uint_32;
break;
case Opt_discard:
- opts->discard = 1;
+ opts->discard = !result.negated;
+ break;
+ case Opt_keep_last_dots:
+ opts->keep_last_dots = 1;
+ break;
+ case Opt_sys_tz:
+ opts->sys_tz = 1;
break;
case Opt_time_offset:
/*
@@ -305,6 +326,9 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
return -EINVAL;
opts->time_offset = result.int_32;
break;
+ case Opt_zero_size_dir:
+ opts->zero_size_dir = !result.negated;
+ break;
case Opt_utf8:
case Opt_debug:
case Opt_namecase:
@@ -327,13 +351,12 @@ static void exfat_hash_init(struct super_block *sb)
INIT_HLIST_HEAD(&sbi->inode_hashtable[i]);
}
-static int exfat_read_root(struct inode *inode)
+static int exfat_read_root(struct inode *inode, struct exfat_chain *root_clu)
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
- struct exfat_chain cdir;
- int num_subdirs, num_clu = 0;
+ int num_subdirs;
exfat_chain_set(&ei->dir, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
ei->entry = -1;
@@ -346,12 +369,9 @@ static int exfat_read_root(struct inode *inode)
ei->hint_stat.clu = sbi->root_dir;
ei->hint_femp.eidx = EXFAT_HINT_NONE;
- exfat_chain_set(&cdir, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
- if (exfat_count_num_clusters(sb, &cdir, &num_clu))
- return -EIO;
- i_size_write(inode, num_clu << sbi->cluster_size_bits);
+ i_size_write(inode, EXFAT_CLU_TO_B(root_clu->size, sbi));
- num_subdirs = exfat_count_dir_entries(sb, &cdir);
+ num_subdirs = exfat_count_dir_entries(sb, root_clu);
if (num_subdirs < 0)
return -EIO;
set_nlink(inode, num_subdirs + EXFAT_MIN_SUBDIR);
@@ -360,20 +380,16 @@ static int exfat_read_root(struct inode *inode)
inode->i_gid = sbi->options.fs_gid;
inode_inc_iversion(inode);
inode->i_generation = 0;
- inode->i_mode = exfat_make_mode(sbi, ATTR_SUBDIR, 0777);
+ inode->i_mode = exfat_make_mode(sbi, EXFAT_ATTR_SUBDIR, 0777);
inode->i_op = &exfat_dir_inode_operations;
inode->i_fop = &exfat_dir_operations;
- inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1))
- & ~(sbi->cluster_size - 1)) >> inode->i_blkbits;
- EXFAT_I(inode)->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff;
- EXFAT_I(inode)->i_size_aligned = i_size_read(inode);
- EXFAT_I(inode)->i_size_ondisk = i_size_read(inode);
+ inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
+ ei->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff;
- exfat_save_attr(inode, ATTR_SUBDIR);
- inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
- current_time(inode);
- exfat_truncate_atime(&inode->i_atime);
+ exfat_save_attr(inode, EXFAT_ATTR_SUBDIR);
+ ei->i_crtime = simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
return 0;
}
@@ -417,7 +433,10 @@ static int exfat_read_boot_sector(struct super_block *sb)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
/* set block size to read super block */
- sb_min_blocksize(sb, 512);
+ if (!sb_min_blocksize(sb, 512)) {
+ exfat_err(sb, "unable to set blocksize");
+ return -EINVAL;
+ }
/* read boot sector */
sbi->boot_bh = sb_bread(sb, 0);
@@ -455,7 +474,7 @@ static int exfat_read_boot_sector(struct super_block *sb)
*/
if (p_boot->sect_size_bits < EXFAT_MIN_SECT_SIZE_BITS ||
p_boot->sect_size_bits > EXFAT_MAX_SECT_SIZE_BITS) {
- exfat_err(sb, "bogus sector size bits : %u\n",
+ exfat_err(sb, "bogus sector size bits : %u",
p_boot->sect_size_bits);
return -EINVAL;
}
@@ -464,7 +483,7 @@ static int exfat_read_boot_sector(struct super_block *sb)
* sect_per_clus_bits could be at least 0 and at most 25 - sect_size_bits.
*/
if (p_boot->sect_per_clus_bits > EXFAT_MAX_SECT_PER_CLUS_BITS(p_boot)) {
- exfat_err(sb, "bogus sectors bits per cluster : %u\n",
+ exfat_err(sb, "bogus sectors bits per cluster : %u",
p_boot->sect_per_clus_bits);
return -EINVAL;
}
@@ -492,7 +511,6 @@ static int exfat_read_boot_sector(struct super_block *sb)
sbi->vol_flags = le16_to_cpu(p_boot->vol_flags);
sbi->vol_flags_persistent = sbi->vol_flags & (VOLUME_DIRTY | MEDIA_FAILURE);
sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER;
- sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED;
/* check consistencies */
if ((u64)sbi->num_FAT_sectors << p_boot->sect_size_bits <
@@ -569,7 +587,8 @@ static int exfat_verify_boot_region(struct super_block *sb)
}
/* mount the file system volume */
-static int __exfat_fill_super(struct super_block *sb)
+static int __exfat_fill_super(struct super_block *sb,
+ struct exfat_chain *root_clu)
{
int ret;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -586,6 +605,18 @@ static int __exfat_fill_super(struct super_block *sb)
goto free_bh;
}
+ /*
+ * Call exfat_count_num_cluster() before searching for up-case and
+ * bitmap directory entries to avoid infinite loop if they are missing
+ * and the cluster chain includes a loop.
+ */
+ exfat_chain_set(root_clu, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
+ ret = exfat_count_num_clusters(sb, root_clu, &root_clu->size);
+ if (ret) {
+ exfat_err(sb, "failed to count the number of clusters in root");
+ goto free_bh;
+ }
+
ret = exfat_create_upcase_table(sb);
if (ret) {
exfat_err(sb, "failed to load upcase table");
@@ -595,7 +626,18 @@ static int __exfat_fill_super(struct super_block *sb)
ret = exfat_load_bitmap(sb);
if (ret) {
exfat_err(sb, "failed to load alloc-bitmap");
- goto free_upcase_table;
+ goto free_bh;
+ }
+
+ if (!exfat_test_bitmap(sb, sbi->root_dir)) {
+ exfat_warn(sb, "failed to test first cluster bit of root dir(%u)",
+ sbi->root_dir);
+ /*
+ * The first cluster bit of the root directory should never
+ * be unset except when storage is corrupted. This bit is
+ * set to allow operations after mount.
+ */
+ exfat_set_bitmap(sb, sbi->root_dir, false);
}
ret = exfat_count_used_clusters(sb, &sbi->used_clusters);
@@ -608,8 +650,6 @@ static int __exfat_fill_super(struct super_block *sb)
free_alloc_bitmap:
exfat_free_bitmap(sbi);
-free_upcase_table:
- exfat_free_upcase_table(sbi);
free_bh:
brelse(sbi->boot_bh);
return ret;
@@ -620,18 +660,15 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
struct exfat_sb_info *sbi = sb->s_fs_info;
struct exfat_mount_options *opts = &sbi->options;
struct inode *root_inode;
+ struct exfat_chain root_clu;
int err;
if (opts->allow_utime == (unsigned short)-1)
opts->allow_utime = ~opts->fs_dmask & 0022;
- if (opts->discard) {
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
- if (!blk_queue_discard(q)) {
- exfat_warn(sb, "mounting with \"discard\" option, but the device does not support discard");
- opts->discard = 0;
- }
+ if (opts->discard && !bdev_max_discard_sectors(sb->s_bdev)) {
+ exfat_warn(sb, "mounting with \"discard\" option, but the device does not support discard");
+ opts->discard = 0;
}
sb->s_flags |= SB_NODIRATIME;
@@ -642,7 +679,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_time_min = EXFAT_MIN_TIMESTAMP_SECS;
sb->s_time_max = EXFAT_MAX_TIMESTAMP_SECS;
- err = __exfat_fill_super(sb);
+ err = __exfat_fill_super(sb, &root_clu);
if (err) {
exfat_err(sb, "failed to recognize exfat type");
goto check_nls_io;
@@ -651,8 +688,8 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
/* set up enough so that it can read an inode */
exfat_hash_init(sb);
- if (!strcmp(sbi->options.iocharset, "utf8"))
- opts->utf8 = 1;
+ if (sbi->options.utf8)
+ set_default_d_op(sb, &exfat_utf8_dentry_ops);
else {
sbi->nls_io = load_nls(sbi->options.iocharset);
if (!sbi->nls_io) {
@@ -661,13 +698,9 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
err = -EINVAL;
goto free_table;
}
+ set_default_d_op(sb, &exfat_dentry_ops);
}
- if (sbi->options.utf8)
- sb->s_d_op = &exfat_utf8_dentry_ops;
- else
- sb->s_d_op = &exfat_dentry_ops;
-
root_inode = new_inode(sb);
if (!root_inode) {
exfat_err(sb, "failed to allocate root inode");
@@ -677,7 +710,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
root_inode->i_ino = EXFAT_ROOT_INO;
inode_set_iversion(root_inode, 1);
- err = exfat_read_root(root_inode);
+ err = exfat_read_root(root_inode, &root_clu);
if (err) {
exfat_err(sb, "failed to initialize root inode");
goto put_inode;
@@ -700,15 +733,10 @@ put_inode:
sb->s_root = NULL;
free_table:
- exfat_free_upcase_table(sbi);
exfat_free_bitmap(sbi);
brelse(sbi->boot_bh);
check_nls_io:
- unload_nls(sbi->nls_io);
- exfat_free_iocharset(sbi);
- sb->s_fs_info = NULL;
- kfree(sbi);
return err;
}
@@ -717,22 +745,62 @@ static int exfat_get_tree(struct fs_context *fc)
return get_tree_bdev(fc, exfat_fill_super);
}
+static void exfat_free_sbi(struct exfat_sb_info *sbi)
+{
+ exfat_free_iocharset(sbi);
+ kfree(sbi);
+}
+
static void exfat_free(struct fs_context *fc)
{
struct exfat_sb_info *sbi = fc->s_fs_info;
- if (sbi) {
- exfat_free_iocharset(sbi);
- kfree(sbi);
- }
+ if (sbi)
+ exfat_free_sbi(sbi);
}
static int exfat_reconfigure(struct fs_context *fc)
{
+ struct super_block *sb = fc->root->d_sb;
+ struct exfat_sb_info *remount_sbi = fc->s_fs_info;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_mount_options *new_opts = &remount_sbi->options;
+ struct exfat_mount_options *cur_opts = &sbi->options;
+
fc->sb_flags |= SB_NODIRATIME;
- /* volume flag will be updated in exfat_sync_fs */
- sync_filesystem(fc->root->d_sb);
+ sync_filesystem(sb);
+ mutex_lock(&sbi->s_lock);
+ exfat_clear_volume_dirty(sb);
+ mutex_unlock(&sbi->s_lock);
+
+ if (new_opts->allow_utime == (unsigned short)-1)
+ new_opts->allow_utime = ~new_opts->fs_dmask & 0022;
+
+ /*
+ * Since the old settings of these mount options are cached in
+ * inodes or dentries, they cannot be modified dynamically.
+ */
+ if (strcmp(new_opts->iocharset, cur_opts->iocharset) ||
+ new_opts->keep_last_dots != cur_opts->keep_last_dots ||
+ new_opts->sys_tz != cur_opts->sys_tz ||
+ new_opts->time_offset != cur_opts->time_offset ||
+ !uid_eq(new_opts->fs_uid, cur_opts->fs_uid) ||
+ !gid_eq(new_opts->fs_gid, cur_opts->fs_gid) ||
+ new_opts->fs_fmask != cur_opts->fs_fmask ||
+ new_opts->fs_dmask != cur_opts->fs_dmask ||
+ new_opts->allow_utime != cur_opts->allow_utime)
+ return -EINVAL;
+
+ if (new_opts->discard != cur_opts->discard &&
+ new_opts->discard &&
+ !bdev_max_discard_sectors(sb->s_bdev)) {
+ exfat_warn(sb, "remounting with \"discard\" option, but the device does not support discard");
+ return -EINVAL;
+ }
+
+ swap(*cur_opts, *new_opts);
+
return 0;
}
@@ -756,26 +824,55 @@ static int exfat_init_fs_context(struct fs_context *fc)
ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- sbi->options.fs_uid = current_uid();
- sbi->options.fs_gid = current_gid();
- sbi->options.fs_fmask = current->fs->umask;
- sbi->options.fs_dmask = current->fs->umask;
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE && fc->root) {
+ struct super_block *sb = fc->root->d_sb;
+ struct exfat_mount_options *cur_opts = &EXFAT_SB(sb)->options;
+
+ sbi->options.fs_uid = cur_opts->fs_uid;
+ sbi->options.fs_gid = cur_opts->fs_gid;
+ sbi->options.fs_fmask = cur_opts->fs_fmask;
+ sbi->options.fs_dmask = cur_opts->fs_dmask;
+ } else {
+ sbi->options.fs_uid = current_uid();
+ sbi->options.fs_gid = current_gid();
+ sbi->options.fs_fmask = current->fs->umask;
+ sbi->options.fs_dmask = current->fs->umask;
+ }
+
sbi->options.allow_utime = -1;
- sbi->options.iocharset = exfat_default_iocharset;
sbi->options.errors = EXFAT_ERRORS_RO;
+ exfat_set_iocharset(&sbi->options, exfat_default_iocharset);
fc->s_fs_info = sbi;
fc->ops = &exfat_context_ops;
return 0;
}
+static void delayed_free(struct rcu_head *p)
+{
+ struct exfat_sb_info *sbi = container_of(p, struct exfat_sb_info, rcu);
+
+ unload_nls(sbi->nls_io);
+ exfat_free_upcase_table(sbi);
+ exfat_free_sbi(sbi);
+}
+
+static void exfat_kill_sb(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = sb->s_fs_info;
+
+ kill_block_super(sb);
+ if (sbi)
+ call_rcu(&sbi->rcu, delayed_free);
+}
+
static struct file_system_type exfat_fs_type = {
.owner = THIS_MODULE,
.name = "exfat",
.init_fs_context = exfat_init_fs_context,
.parameters = exfat_parameters,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .kill_sb = exfat_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
static void exfat_inode_init_once(void *foo)
@@ -800,7 +897,7 @@ static int __init init_exfat_fs(void)
exfat_inode_cachep = kmem_cache_create("exfat_inode_cache",
sizeof(struct exfat_inode_info),
- 0, SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ 0, SLAB_RECLAIM_ACCOUNT,
exfat_inode_init_once);
if (!exfat_inode_cachep) {
err = -ENOMEM;
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 0106eba46d5a..d3e55de4a2a2 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -18,7 +18,7 @@
#include <linux/sched.h>
#include <linux/cred.h>
-#define dprintk(fmt, args...) do{}while(0)
+#define dprintk(fmt, args...) pr_debug(fmt, ##args)
static int get_name(const struct path *path, char *name, struct dentry *child);
@@ -126,14 +126,12 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
int err;
parent = ERR_PTR(-EACCES);
- inode_lock(dentry->d_inode);
if (mnt->mnt_sb->s_export_op->get_parent)
parent = mnt->mnt_sb->s_export_op->get_parent(dentry);
- inode_unlock(dentry->d_inode);
if (IS_ERR(parent)) {
- dprintk("%s: get_parent of %ld failed, err %d\n",
- __func__, dentry->d_inode->i_ino, PTR_ERR(parent));
+ dprintk("get_parent of %lu failed, err %ld\n",
+ dentry->d_inode->i_ino, PTR_ERR(parent));
return parent;
}
@@ -145,9 +143,9 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
if (err)
goto out_err;
dprintk("%s: found name: %s\n", __func__, nbuf);
- tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf));
+ tmp = lookup_one_unlocked(mnt_idmap(mnt), &QSTR(nbuf), parent);
if (IS_ERR(tmp)) {
- dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
+ dprintk("lookup failed: %ld\n", PTR_ERR(tmp));
err = PTR_ERR(tmp);
goto out_err;
}
@@ -248,21 +246,20 @@ struct getdents_callback {
* A rather strange filldir function to capture
* the name matching the specified inode number.
*/
-static int filldir_one(struct dir_context *ctx, const char *name, int len,
+static bool filldir_one(struct dir_context *ctx, const char *name, int len,
loff_t pos, u64 ino, unsigned int d_type)
{
struct getdents_callback *buf =
container_of(ctx, struct getdents_callback, ctx);
- int result = 0;
buf->sequence++;
- if (buf->ino == ino && len <= NAME_MAX) {
+ if (buf->ino == ino && len <= NAME_MAX && !is_dot_dotdot(name, len)) {
memcpy(buf->name, name, len);
buf->name[len] = '\0';
buf->found = 1;
- result = -1;
+ return false; // no more
}
- return result;
+ return true;
}
/**
@@ -287,6 +284,7 @@ static int get_name(const struct path *path, char *name, struct dentry *child)
};
struct getdents_callback buffer = {
.ctx.actor = filldir_one,
+ .ctx.count = INT_MAX,
.name = name,
};
@@ -316,7 +314,7 @@ static int get_name(const struct path *path, char *name, struct dentry *child)
goto out;
error = -EINVAL;
- if (!file->f_op->iterate && !file->f_op->iterate_shared)
+ if (!file->f_op->iterate_shared)
goto out_close;
buffer.sequence = 0;
@@ -343,65 +341,84 @@ out:
return error;
}
+#define FILEID_INO64_GEN_LEN 3
+
/**
- * export_encode_fh - default export_operations->encode_fh function
+ * exportfs_encode_ino64_fid - encode non-decodeable 64bit ino file id
* @inode: the object to encode
* @fid: where to store the file handle fragment
- * @max_len: maximum length to store there
- * @parent: parent directory inode, if wanted
+ * @max_len: maximum length to store there (in 4 byte units)
*
- * This default encode_fh function assumes that the 32 inode number
- * is suitable for locating an inode, and that the generation number
- * can be used to check that it is still valid. It places them in the
- * filehandle fragment where export_decode_fh expects to find them.
+ * This generic function is used to encode a non-decodeable file id for
+ * fanotify for filesystems that do not support NFS export.
*/
-static int export_encode_fh(struct inode *inode, struct fid *fid,
- int *max_len, struct inode *parent)
+static int exportfs_encode_ino64_fid(struct inode *inode, struct fid *fid,
+ int *max_len)
{
- int len = *max_len;
- int type = FILEID_INO32_GEN;
-
- if (parent && (len < 4)) {
- *max_len = 4;
- return FILEID_INVALID;
- } else if (len < 2) {
- *max_len = 2;
+ if (*max_len < FILEID_INO64_GEN_LEN) {
+ *max_len = FILEID_INO64_GEN_LEN;
return FILEID_INVALID;
}
- len = 2;
- fid->i32.ino = inode->i_ino;
- fid->i32.gen = inode->i_generation;
- if (parent) {
- fid->i32.parent_ino = parent->i_ino;
- fid->i32.parent_gen = parent->i_generation;
- len = 4;
- type = FILEID_INO32_GEN_PARENT;
- }
- *max_len = len;
- return type;
+ fid->i64.ino = inode->i_ino;
+ fid->i64.gen = inode->i_generation;
+ *max_len = FILEID_INO64_GEN_LEN;
+
+ return FILEID_INO64_GEN;
}
+/**
+ * exportfs_encode_inode_fh - encode a file handle from inode
+ * @inode: the object to encode
+ * @fid: where to store the file handle fragment
+ * @max_len: maximum length to store there
+ * @parent: parent directory inode, if wanted
+ * @flags: properties of the requested file handle
+ *
+ * Returns an enum fid_type or a negative errno.
+ */
int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
- int *max_len, struct inode *parent)
+ int *max_len, struct inode *parent, int flags)
{
const struct export_operations *nop = inode->i_sb->s_export_op;
+ enum fid_type type;
+
+ if (!exportfs_can_encode_fh(nop, flags))
+ return -EOPNOTSUPP;
+
+ if (!nop && (flags & EXPORT_FH_FID))
+ type = exportfs_encode_ino64_fid(inode, fid, max_len);
+ else
+ type = nop->encode_fh(inode, fid->raw, max_len, parent);
- if (nop && nop->encode_fh)
- return nop->encode_fh(inode, fid->raw, max_len, parent);
+ if (type > 0 && FILEID_USER_FLAGS(type)) {
+ pr_warn_once("%s: unexpected fh type value 0x%x from fstype %s.\n",
+ __func__, type, inode->i_sb->s_type->name);
+ return -EINVAL;
+ }
+
+ return type;
- return export_encode_fh(inode, fid, max_len, parent);
}
EXPORT_SYMBOL_GPL(exportfs_encode_inode_fh);
+/**
+ * exportfs_encode_fh - encode a file handle from dentry
+ * @dentry: the object to encode
+ * @fid: where to store the file handle fragment
+ * @max_len: maximum length to store there
+ * @flags: properties of the requested file handle
+ *
+ * Returns an enum fid_type or a negative errno.
+ */
int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len,
- int connectable)
+ int flags)
{
int error;
struct dentry *p = NULL;
struct inode *inode = dentry->d_inode, *parent = NULL;
- if (connectable && !S_ISDIR(inode->i_mode)) {
+ if ((flags & EXPORT_FH_CONNECTABLE) && !S_ISDIR(inode->i_mode)) {
p = dget_parent(dentry);
/*
* note that while p might've ceased to be our parent already,
@@ -410,7 +427,7 @@ int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len,
parent = p->d_inode;
}
- error = exportfs_encode_inode_fh(inode, fid, max_len, parent);
+ error = exportfs_encode_inode_fh(inode, fid, max_len, parent, flags);
dput(p);
return error;
@@ -419,7 +436,7 @@ EXPORT_SYMBOL_GPL(exportfs_encode_fh);
struct dentry *
exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
- int fileid_type,
+ int fileid_type, unsigned int flags,
int (*acceptable)(void *, struct dentry *),
void *context)
{
@@ -428,15 +445,23 @@ exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
char nbuf[NAME_MAX+1];
int err;
+ if (fileid_type < 0 || FILEID_USER_FLAGS(fileid_type))
+ return ERR_PTR(-EINVAL);
+
/*
* Try to get any dentry for the given file handle from the filesystem.
*/
- if (!nop || !nop->fh_to_dentry)
+ if (!exportfs_can_decode_fh(nop))
return ERR_PTR(-ESTALE);
result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
if (IS_ERR_OR_NULL(result))
return result;
+ if ((flags & EXPORT_FH_DIR_ONLY) && !d_is_dir(result)) {
+ err = -ENOTDIR;
+ goto err_result;
+ }
+
/*
* If no acceptance criteria was specified by caller, a disconnected
* dentry is also accepatable. Callers may use this mode to query if
@@ -524,15 +549,13 @@ exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
goto err_result;
}
- inode_lock(target_dir->d_inode);
- nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
+ nresult = lookup_one_unlocked(mnt_idmap(mnt), &QSTR(nbuf), target_dir);
if (!IS_ERR(nresult)) {
if (unlikely(nresult->d_inode != result->d_inode)) {
dput(nresult);
nresult = ERR_PTR(-ESTALE);
}
}
- inode_unlock(target_dir->d_inode);
/*
* At this point we are done with the parent, but it's pinned
* by the child dentry anyway.
@@ -572,7 +595,7 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
{
struct dentry *ret;
- ret = exportfs_decode_fh_raw(mnt, fid, fh_len, fileid_type,
+ ret = exportfs_decode_fh_raw(mnt, fid, fh_len, fileid_type, 0,
acceptable, context);
if (IS_ERR_OR_NULL(ret)) {
if (ret == ERR_PTR(-ENOMEM))
@@ -583,4 +606,5 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
}
EXPORT_SYMBOL_GPL(exportfs_decode_fh);
+MODULE_DESCRIPTION("Code mapping from inodes to file handles");
MODULE_LICENSE("GPL");
diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig
index 1248ff4ef562..d5bce83ad905 100644
--- a/fs/ext2/Kconfig
+++ b/fs/ext2/Kconfig
@@ -1,14 +1,22 @@
# SPDX-License-Identifier: GPL-2.0-only
config EXT2_FS
- tristate "Second extended fs support"
+ tristate "Second extended fs support (DEPRECATED)"
+ select BUFFER_HEAD
select FS_IOMAP
help
Ext2 is a standard Linux file system for hard disks.
- To compile this file system support as a module, choose M here: the
- module will be called ext2.
+ This filesystem driver is deprecated because it does not properly
+ support inode time stamps beyond 03:14:07 UTC on 19 January 2038.
- If unsure, say Y.
+ Ext2 users are advised to use ext4 driver to access their filesystem.
+ The driver is fully compatible, supports filesystems without journal
+ or extents, and also supports larger time stamps if the filesystem
+ is created with at least 256 byte inodes.
+
+ This code is kept as a simple reference for filesystem developers.
+
+ If unsure, say N.
config EXT2_FS_XATTR
bool "Ext2 extended attributes"
diff --git a/fs/ext2/Makefile b/fs/ext2/Makefile
index 311479d864a7..8860948ef9ca 100644
--- a/fs/ext2/Makefile
+++ b/fs/ext2/Makefile
@@ -6,7 +6,10 @@
obj-$(CONFIG_EXT2_FS) += ext2.o
ext2-y := balloc.o dir.o file.o ialloc.o inode.o \
- ioctl.o namei.o super.o symlink.o
+ ioctl.o namei.o super.o symlink.o trace.o
+
+# For tracepoints to include our trace.h from tracepoint infrastructure
+CFLAGS_trace.o := -I$(src)
ext2-$(CONFIG_EXT2_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
ext2-$(CONFIG_EXT2_FS_POSIX_ACL) += acl.o
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index bf298967c5b8..7e54c31589c7 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -219,15 +219,16 @@ __ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
* inode->i_mutex: down
*/
int
-ext2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ext2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
int error;
int update_mode = 0;
+ struct inode *inode = d_inode(dentry);
umode_t mode = inode->i_mode;
if (type == ACL_TYPE_ACCESS && acl) {
- error = posix_acl_update_mode(&init_user_ns, inode, &mode,
+ error = posix_acl_update_mode(&nop_mnt_idmap, inode, &mode,
&acl);
if (error)
return error;
@@ -236,7 +237,7 @@ ext2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
error = __ext2_set_acl(inode, acl, type);
if (!error && update_mode) {
inode->i_mode = mode;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
}
return error;
diff --git a/fs/ext2/acl.h b/fs/ext2/acl.h
index 925ab6287d35..4a8443a2b8ec 100644
--- a/fs/ext2/acl.h
+++ b/fs/ext2/acl.h
@@ -56,7 +56,7 @@ static inline int ext2_acl_count(size_t size)
/* acl.c */
extern struct posix_acl *ext2_get_acl(struct inode *inode, int type, bool rcu);
-extern int ext2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+extern int ext2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type);
extern int ext2_init_acl (struct inode *, struct inode *);
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index c17ccc19b938..b8cfab8f98b9 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -36,8 +36,6 @@
*/
-#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
-
struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
unsigned int block_group,
struct buffer_head ** bh)
@@ -79,26 +77,33 @@ static int ext2_valid_block_bitmap(struct super_block *sb,
ext2_grpblk_t next_zero_bit;
ext2_fsblk_t bitmap_blk;
ext2_fsblk_t group_first_block;
+ ext2_grpblk_t max_bit;
group_first_block = ext2_group_first_block_no(sb, block_group);
+ max_bit = ext2_group_last_block_no(sb, block_group) - group_first_block;
/* check whether block bitmap block number is set */
bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
offset = bitmap_blk - group_first_block;
- if (!ext2_test_bit(offset, bh->b_data))
+ if (offset < 0 || offset > max_bit ||
+ !ext2_test_bit(offset, bh->b_data))
/* bad block bitmap */
goto err_out;
/* check whether the inode bitmap block number is set */
bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
offset = bitmap_blk - group_first_block;
- if (!ext2_test_bit(offset, bh->b_data))
+ if (offset < 0 || offset > max_bit ||
+ !ext2_test_bit(offset, bh->b_data))
/* bad block bitmap */
goto err_out;
/* check whether the inode table block number is set */
bitmap_blk = le32_to_cpu(desc->bg_inode_table);
offset = bitmap_blk - group_first_block;
+ if (offset < 0 || offset > max_bit ||
+ offset + EXT2_SB(sb)->s_itb_per_group - 1 > max_bit)
+ goto err_out;
next_zero_bit = ext2_find_next_zero_bit(bh->b_data,
offset + EXT2_SB(sb)->s_itb_per_group,
offset);
@@ -126,6 +131,7 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
struct ext2_group_desc * desc;
struct buffer_head * bh = NULL;
ext2_fsblk_t bitmap_blk;
+ int ret;
desc = ext2_get_group_desc(sb, block_group, NULL);
if (!desc)
@@ -139,10 +145,10 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
block_group, le32_to_cpu(desc->bg_block_bitmap));
return NULL;
}
- if (likely(bh_uptodate_or_lock(bh)))
+ ret = bh_read(bh, 0);
+ if (ret > 0)
return bh;
-
- if (bh_submit_read(bh) < 0) {
+ if (ret < 0) {
brelse(bh);
ext2_error(sb, __func__,
"Cannot read block bitmap - "
@@ -413,7 +419,7 @@ void ext2_init_block_alloc_info(struct inode *inode)
struct ext2_block_alloc_info *block_i;
struct super_block *sb = inode->i_sb;
- block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
+ block_i = kmalloc(sizeof(*block_i), GFP_KERNEL);
if (block_i) {
struct ext2_reserve_window_node *rsv = &block_i->rsv_window_node;
@@ -473,8 +479,8 @@ void ext2_discard_reservation(struct inode *inode)
* @block: start physical block to free
* @count: number of blocks to free
*/
-void ext2_free_blocks (struct inode * inode, unsigned long block,
- unsigned long count)
+void ext2_free_blocks(struct inode * inode, ext2_fsblk_t block,
+ unsigned long count)
{
struct buffer_head *bitmap_bh = NULL;
struct buffer_head * bh2;
@@ -666,7 +672,7 @@ ext2_try_to_allocate(struct super_block *sb, int group,
{
ext2_fsblk_t group_first_block = ext2_group_first_block_no(sb, group);
ext2_fsblk_t group_last_block = ext2_group_last_block_no(sb, group);
- ext2_grpblk_t start, end;
+ ext2_grpblk_t start, end;
unsigned long num = 0;
start = 0;
@@ -717,36 +723,34 @@ fail_access:
}
/**
- * find_next_reservable_window():
- * find a reservable space within the given range.
- * It does not allocate the reservation window for now:
- * alloc_new_reservation() will do the work later.
- *
- * @search_head: the head of the searching list;
- * This is not necessarily the list head of the whole filesystem
- *
- * We have both head and start_block to assist the search
- * for the reservable space. The list starts from head,
- * but we will shift to the place where start_block is,
- * then start from there, when looking for a reservable space.
+ * find_next_reservable_window - Find a reservable space within the given range.
+ * @search_head: The list to search.
+ * @my_rsv: The reservation we're currently using.
+ * @sb: The super block.
+ * @start_block: The first block we consider to start the real search from
+ * @last_block: The maximum block number that our goal reservable space
+ * could start from.
*
- * @sb: the super block.
+ * It does not allocate the reservation window: alloc_new_reservation()
+ * will do the work later.
*
- * @start_block: the first block we consider to start the real search from
+ * We search the given range, rather than the whole reservation double
+ * linked list, (start_block, last_block) to find a free region that is
+ * of my size and has not been reserved.
*
- * @last_block:
- * the maximum block number that our goal reservable space
- * could start from. This is normally the last block in this
- * group. The search will end when we found the start of next
- * possible reservable space is out of this boundary.
- * This could handle the cross boundary reservation window
- * request.
+ * @search_head is not necessarily the list head of the whole filesystem.
+ * We have both head and @start_block to assist the search for the
+ * reservable space. The list starts from head, but we will shift to
+ * the place where start_block is, then start from there, when looking
+ * for a reservable space.
*
- * basically we search from the given range, rather than the whole
- * reservation double linked list, (start_block, last_block)
- * to find a free region that is of my size and has not
- * been reserved.
+ * @last_block is normally the last block in this group. The search will end
+ * when we found the start of next possible reservable space is out
+ * of this boundary. This could handle the cross boundary reservation
+ * window request.
*
+ * Return: -1 if we could not find a range of sufficient size. If we could,
+ * return 0 and fill in @my_rsv with the range information.
*/
static int find_next_reservable_window(
struct ext2_reserve_window_node *search_head,
@@ -834,41 +838,34 @@ static int find_next_reservable_window(
}
/**
- * alloc_new_reservation()--allocate a new reservation window
+ * alloc_new_reservation - Allocate a new reservation window.
+ * @my_rsv: The reservation we're currently using.
+ * @grp_goal: The goal block relative to the start of the group.
+ * @sb: The super block.
+ * @group: The group we are trying to allocate in.
+ * @bitmap_bh: The block group block bitmap.
*
- * To make a new reservation, we search part of the filesystem
- * reservation list (the list that inside the group). We try to
- * allocate a new reservation window near the allocation goal,
- * or the beginning of the group, if there is no goal.
+ * To make a new reservation, we search part of the filesystem reservation
+ * list (the list inside the group). We try to allocate a new
+ * reservation window near @grp_goal, or the beginning of the
+ * group, if @grp_goal is negative.
*
- * We first find a reservable space after the goal, then from
- * there, we check the bitmap for the first free block after
- * it. If there is no free block until the end of group, then the
- * whole group is full, we failed. Otherwise, check if the free
- * block is inside the expected reservable space, if so, we
- * succeed.
- * If the first free block is outside the reservable space, then
- * start from the first free block, we search for next available
- * space, and go on.
+ * We first find a reservable space after the goal, then from there,
+ * we check the bitmap for the first free block after it. If there is
+ * no free block until the end of group, then the whole group is full,
+ * we failed. Otherwise, check if the free block is inside the expected
+ * reservable space, if so, we succeed.
*
- * on succeed, a new reservation will be found and inserted into the list
- * It contains at least one free block, and it does not overlap with other
- * reservation windows.
+ * If the first free block is outside the reservable space, then start
+ * from the first free block, we search for next available space, and
+ * go on.
*
- * failed: we failed to find a reservation window in this group
- *
- * @my_rsv: the reservation
- *
- * @grp_goal: The goal (group-relative). It is where the search for a
- * free reservable space should start from.
- * if we have a goal(goal >0 ), then start from there,
- * no goal(goal = -1), we start from the first block
- * of the group.
- *
- * @sb: the super block
- * @group: the group we are trying to allocate in
- * @bitmap_bh: the block group block bitmap
+ * on succeed, a new reservation will be found and inserted into the
+ * list. It contains at least one free block, and it does not overlap
+ * with other reservation windows.
*
+ * Return: 0 on success, -1 if we failed to find a reservation window
+ * in this group
*/
static int alloc_new_reservation(struct ext2_reserve_window_node *my_rsv,
ext2_grpblk_t grp_goal, struct super_block *sb,
@@ -1132,8 +1129,13 @@ ext2_try_to_allocate_with_rsv(struct super_block *sb, unsigned int group,
if ((my_rsv->rsv_start > group_last_block) ||
(my_rsv->rsv_end < group_first_block)) {
+ ext2_error(sb, __func__,
+ "Reservation out of group %u range goal %d fsb[%lu,%lu] rsv[%lu, %lu]",
+ group, grp_goal, group_first_block,
+ group_last_block, my_rsv->rsv_start,
+ my_rsv->rsv_end);
rsv_window_dump(&EXT2_SB(sb)->s_rsv_window_root, 1);
- BUG();
+ return -1;
}
ret = ext2_try_to_allocate(sb, group, bitmap_bh, grp_goal,
&num, &my_rsv->rsv_window);
@@ -1194,6 +1196,7 @@ int ext2_data_block_valid(struct ext2_sb_info *sbi, ext2_fsblk_t start_blk,
* @goal: given target block(filesystem wide)
* @count: target number of blocks to allocate
* @errp: error code
+ * @flags: allocate flags
*
* ext2_new_blocks uses a goal block to assist allocation. If the goal is
* free, or there is a free block within 32 blocks of the goal, that block
@@ -1203,7 +1206,7 @@ int ext2_data_block_valid(struct ext2_sb_info *sbi, ext2_fsblk_t start_blk,
* This function also updates quota and i_blocks field.
*/
ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
- unsigned long *count, int *errp)
+ unsigned long *count, int *errp, unsigned int flags)
{
struct buffer_head *bitmap_bh = NULL;
struct buffer_head *gdp_bh;
@@ -1242,15 +1245,15 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
es = EXT2_SB(sb)->s_es;
ext2_debug("goal=%lu.\n", goal);
/*
- * Allocate a block from reservation only when
- * filesystem is mounted with reservation(default,-o reservation), and
- * it's a regular file, and
- * the desired window size is greater than 0 (One could use ioctl
- * command EXT2_IOC_SETRSVSZ to set the window size to 0 to turn off
- * reservation on that particular file)
+ * Allocate a block from reservation only when the filesystem is
+ * mounted with reservation(default,-o reservation), and it's a regular
+ * file, and the desired window size is greater than 0 (One could use
+ * ioctl command EXT2_IOC_SETRSVSZ to set the window size to 0 to turn
+ * off reservation on that particular file). Also do not use the
+ * reservation window if the caller asked us not to do it.
*/
block_i = EXT2_I(inode)->i_block_alloc_info;
- if (block_i) {
+ if (!(flags & EXT2_ALLOC_NORESERVE) && block_i) {
windowsz = block_i->rsv_window_node.rsv_goal_size;
if (windowsz > 0)
my_rsv = &block_i->rsv_window_node;
@@ -1430,13 +1433,6 @@ out:
return 0;
}
-ext2_fsblk_t ext2_new_block(struct inode *inode, unsigned long goal, int *errp)
-{
- unsigned long count = 1;
-
- return ext2_new_blocks(inode, goal, &count, errp);
-}
-
#ifdef EXT2FS_DEBUG
unsigned long ext2_count_free(struct buffer_head *map, unsigned int numchars)
@@ -1480,11 +1476,11 @@ unsigned long ext2_count_free_blocks (struct super_block * sb)
desc_count, bitmap_count);
return bitmap_count;
#else
- for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
- desc = ext2_get_group_desc (sb, i, NULL);
- if (!desc)
- continue;
- desc_count += le16_to_cpu(desc->bg_free_blocks_count);
+ for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
+ desc = ext2_get_group_desc(sb, i, NULL);
+ if (!desc)
+ continue;
+ desc_count += le16_to_cpu(desc->bg_free_blocks_count);
}
return desc_count;
#endif
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 2c2f179b6977..b07b3b369710 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -81,44 +81,34 @@ ext2_last_byte(struct inode *inode, unsigned long page_nr)
return last_byte;
}
-static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
+static void ext2_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct inode *dir = mapping->host;
- int err = 0;
inode_inc_iversion(dir);
- block_write_end(NULL, mapping, pos, len, len, page, NULL);
+ block_write_end(pos, len, len, folio);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
mark_inode_dirty(dir);
}
-
- if (IS_DIRSYNC(dir)) {
- err = write_one_page(page);
- if (!err)
- err = sync_inode_metadata(dir, 1);
- } else {
- unlock_page(page);
- }
-
- return err;
+ folio_unlock(folio);
}
-static bool ext2_check_page(struct page *page, int quiet, char *kaddr)
+static bool ext2_check_folio(struct folio *folio, int quiet, char *kaddr)
{
- struct inode *dir = page->mapping->host;
+ struct inode *dir = folio->mapping->host;
struct super_block *sb = dir->i_sb;
unsigned chunk_size = ext2_chunk_size(dir);
u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
unsigned offs, rec_len;
- unsigned limit = PAGE_SIZE;
+ unsigned limit = folio_size(folio);
ext2_dirent *p;
char *error;
- if ((dir->i_size >> PAGE_SHIFT) == page->index) {
- limit = dir->i_size & ~PAGE_MASK;
+ if (dir->i_size < folio_pos(folio) + limit) {
+ limit = offset_in_folio(folio, dir->i_size);
if (limit & (chunk_size - 1))
goto Ebadsize;
if (!limit)
@@ -142,7 +132,7 @@ static bool ext2_check_page(struct page *page, int quiet, char *kaddr)
if (offs != limit)
goto Eend;
out:
- SetPageChecked(page);
+ folio_set_checked(folio);
return true;
/* Too bad, we had an error */
@@ -170,49 +160,51 @@ Einumber:
bad_entry:
if (!quiet)
ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - "
- "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
- dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
+ "offset=%llu, inode=%lu, rec_len=%d, name_len=%d",
+ dir->i_ino, error, folio_pos(folio) + offs,
(unsigned long) le32_to_cpu(p->inode),
rec_len, p->name_len);
goto fail;
Eend:
if (!quiet) {
p = (ext2_dirent *)(kaddr + offs);
- ext2_error(sb, "ext2_check_page",
+ ext2_error(sb, "ext2_check_folio",
"entry in directory #%lu spans the page boundary"
- "offset=%lu, inode=%lu",
- dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
+ "offset=%llu, inode=%lu",
+ dir->i_ino, folio_pos(folio) + offs,
(unsigned long) le32_to_cpu(p->inode));
}
fail:
- SetPageError(page);
return false;
}
/*
- * Calls to ext2_get_page()/ext2_put_page() must be nested according to the
- * rules documented in kmap_local_page()/kunmap_local().
+ * Calls to ext2_get_folio()/folio_release_kmap() must be nested according
+ * to the rules documented in kmap_local_folio()/kunmap_local().
*
- * NOTE: ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_page()
- * and should be treated as a call to ext2_get_page() for nesting purposes.
+ * NOTE: ext2_find_entry() and ext2_dotdot() act as a call
+ * to folio_release_kmap() and should be treated as a call to
+ * folio_release_kmap() for nesting purposes.
*/
-static struct page * ext2_get_page(struct inode *dir, unsigned long n,
- int quiet, void **page_addr)
+static void *ext2_get_folio(struct inode *dir, unsigned long n,
+ int quiet, struct folio **foliop)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_mapping_page(mapping, n, NULL);
- if (!IS_ERR(page)) {
- *page_addr = kmap_local_page(page);
- if (unlikely(!PageChecked(page))) {
- if (PageError(page) || !ext2_check_page(page, quiet,
- *page_addr))
- goto fail;
- }
+ struct folio *folio = read_mapping_folio(mapping, n, NULL);
+ void *kaddr;
+
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
+ kaddr = kmap_local_folio(folio, 0);
+ if (unlikely(!folio_test_checked(folio))) {
+ if (!ext2_check_folio(folio, quiet, kaddr))
+ goto fail;
}
- return page;
+ *foliop = folio;
+ return kaddr;
fail:
- ext2_put_page(page, *page_addr);
+ folio_release_kmap(folio, kaddr);
return ERR_PTR(-EIO);
}
@@ -250,7 +242,7 @@ ext2_validate_entry(char *base, unsigned offset, unsigned mask)
break;
p = ext2_next_entry(p);
}
- return (char *)p - base;
+ return offset_in_page(p);
}
static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
@@ -271,7 +263,7 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
unsigned long n = pos >> PAGE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
- bool need_revalidate = !inode_eq_iversion(inode, file->f_version);
+ bool need_revalidate = !inode_eq_iversion(inode, *(u64 *)file->private_data);
bool has_filetype;
if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
@@ -281,23 +273,24 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE);
for ( ; n < npages; n++, offset = 0) {
- char *kaddr, *limit;
ext2_dirent *de;
- struct page *page = ext2_get_page(inode, n, 0, (void **)&kaddr);
+ struct folio *folio;
+ char *kaddr = ext2_get_folio(inode, n, 0, &folio);
+ char *limit;
- if (IS_ERR(page)) {
+ if (IS_ERR(kaddr)) {
ext2_error(sb, __func__,
"bad page in #%lu",
inode->i_ino);
ctx->pos += PAGE_SIZE - offset;
- return PTR_ERR(page);
+ return PTR_ERR(kaddr);
}
if (unlikely(need_revalidate)) {
if (offset) {
offset = ext2_validate_entry(kaddr, offset, chunk_mask);
ctx->pos = (n<<PAGE_SHIFT) + offset;
}
- file->f_version = inode_query_iversion(inode);
+ *(u64 *)file->private_data = inode_query_iversion(inode);
need_revalidate = false;
}
de = (ext2_dirent *)(kaddr+offset);
@@ -306,7 +299,7 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
if (de->rec_len == 0) {
ext2_error(sb, __func__,
"zero-length directory entry");
- ext2_put_page(page, kaddr);
+ folio_release_kmap(folio, de);
return -EIO;
}
if (de->inode) {
@@ -318,13 +311,13 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
if (!dir_emit(ctx, de->name, de->name_len,
le32_to_cpu(de->inode),
d_type)) {
- ext2_put_page(page, kaddr);
+ folio_release_kmap(folio, de);
return 0;
}
}
ctx->pos += ext2_rec_len_from_disk(de->rec_len);
}
- ext2_put_page(page, kaddr);
+ folio_release_kmap(folio, kaddr);
}
return 0;
}
@@ -337,64 +330,56 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
* and the entry itself. Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*
- * On Success ext2_put_page() should be called on *res_page.
+ * On Success folio_release_kmap() should be called on *foliop.
*
- * NOTE: Calls to ext2_get_page()/ext2_put_page() must be nested according to
- * the rules documented in kmap_local_page()/kunmap_local().
+ * NOTE: Calls to ext2_get_folio()/folio_release_kmap() must be nested
+ * according to the rules documented in kmap_local_folio()/kunmap_local().
*
- * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_page() and
- * should be treated as a call to ext2_get_page() for nesting purposes.
+ * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_folio()
+ * and should be treated as a call to ext2_get_folio() for nesting
+ * purposes.
*/
struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir,
- const struct qstr *child, struct page **res_page,
- void **res_page_addr)
+ const struct qstr *child, struct folio **foliop)
{
const char *name = child->name;
int namelen = child->len;
unsigned reclen = EXT2_DIR_REC_LEN(namelen);
unsigned long start, n;
unsigned long npages = dir_pages(dir);
- struct page *page = NULL;
struct ext2_inode_info *ei = EXT2_I(dir);
ext2_dirent * de;
- void *page_addr;
if (npages == 0)
goto out;
- /* OFFSET_CACHE */
- *res_page = NULL;
- *res_page_addr = NULL;
-
start = ei->i_dir_start_lookup;
if (start >= npages)
start = 0;
n = start;
do {
- char *kaddr;
- page = ext2_get_page(dir, n, 0, &page_addr);
- if (IS_ERR(page))
- return ERR_CAST(page);
+ char *kaddr = ext2_get_folio(dir, n, 0, foliop);
+ if (IS_ERR(kaddr))
+ return ERR_CAST(kaddr);
- kaddr = page_addr;
de = (ext2_dirent *) kaddr;
kaddr += ext2_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (de->rec_len == 0) {
ext2_error(dir->i_sb, __func__,
"zero-length directory entry");
- ext2_put_page(page, page_addr);
+ folio_release_kmap(*foliop, de);
goto out;
}
if (ext2_match(namelen, name, de))
goto found;
de = ext2_next_entry(de);
}
- ext2_put_page(page, page_addr);
+ folio_release_kmap(*foliop, kaddr);
if (++n >= npages)
n = 0;
- /* next page is past the blocks we've got */
+ /* next folio is past the blocks we've got */
if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
ext2_error(dir->i_sb, __func__,
"dir %lu size %lld exceeds block count %llu",
@@ -407,78 +392,82 @@ out:
return ERR_PTR(-ENOENT);
found:
- *res_page = page;
- *res_page_addr = page_addr;
ei->i_dir_start_lookup = n;
return de;
}
-/**
+/*
* Return the '..' directory entry and the page in which the entry was found
* (as a parameter - p).
*
- * On Success ext2_put_page() should be called on *p.
+ * On Success folio_release_kmap() should be called on *foliop.
*
- * NOTE: Calls to ext2_get_page()/ext2_put_page() must be nested according to
- * the rules documented in kmap_local_page()/kunmap_local().
+ * NOTE: Calls to ext2_get_folio()/folio_release_kmap() must be nested
+ * according to the rules documented in kmap_local_folio()/kunmap_local().
*
- * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_page() and
- * should be treated as a call to ext2_get_page() for nesting purposes.
+ * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_folio()
+ * and should be treated as a call to ext2_get_folio() for nesting
+ * purposes.
*/
-struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p,
- void **pa)
+struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct folio **foliop)
{
- void *page_addr;
- struct page *page = ext2_get_page(dir, 0, 0, &page_addr);
- ext2_dirent *de = NULL;
-
- if (!IS_ERR(page)) {
- de = ext2_next_entry((ext2_dirent *) page_addr);
- *p = page;
- *pa = page_addr;
- }
- return de;
+ ext2_dirent *de = ext2_get_folio(dir, 0, 0, foliop);
+
+ if (!IS_ERR(de))
+ return ext2_next_entry(de);
+ return NULL;
}
int ext2_inode_by_name(struct inode *dir, const struct qstr *child, ino_t *ino)
{
struct ext2_dir_entry_2 *de;
- struct page *page;
- void *page_addr;
-
- de = ext2_find_entry(dir, child, &page, &page_addr);
+ struct folio *folio;
+
+ de = ext2_find_entry(dir, child, &folio);
if (IS_ERR(de))
return PTR_ERR(de);
*ino = le32_to_cpu(de->inode);
- ext2_put_page(page, page_addr);
+ folio_release_kmap(folio, de);
return 0;
}
-static int ext2_prepare_chunk(struct page *page, loff_t pos, unsigned len)
+static int ext2_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
{
- return __block_write_begin(page, pos, len, ext2_get_block);
+ return __block_write_begin(folio, pos, len, ext2_get_block);
}
-void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
- struct page *page, void *page_addr, struct inode *inode,
- int update_times)
+static int ext2_handle_dirsync(struct inode *dir)
{
- loff_t pos = page_offset(page) +
- (char *) de - (char *) page_addr;
+ int err;
+
+ err = filemap_write_and_wait(dir->i_mapping);
+ if (!err)
+ err = sync_inode_metadata(dir, 1);
+ return err;
+}
+
+int ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
+ struct folio *folio, struct inode *inode, bool update_times)
+{
+ loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
unsigned len = ext2_rec_len_from_disk(de->rec_len);
int err;
- lock_page(page);
- err = ext2_prepare_chunk(page, pos, len);
- BUG_ON(err);
+ folio_lock(folio);
+ err = ext2_prepare_chunk(folio, pos, len);
+ if (err) {
+ folio_unlock(folio);
+ return err;
+ }
de->inode = cpu_to_le32(inode->i_ino);
ext2_set_de_type(de, inode);
- err = ext2_commit_chunk(page, pos, len);
+ ext2_commit_chunk(folio, pos, len);
if (update_times)
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(dir);
+ return ext2_handle_dirsync(dir);
}
/*
@@ -492,8 +481,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
unsigned chunk_size = ext2_chunk_size(dir);
unsigned reclen = EXT2_DIR_REC_LEN(namelen);
unsigned short rec_len, name_len;
- struct page *page = NULL;
- void *page_addr = NULL;
+ struct folio *folio = NULL;
ext2_dirent * de;
unsigned long npages = dir_pages(dir);
unsigned long n;
@@ -502,22 +490,19 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
/*
* We take care of directory expansion in the same loop.
- * This code plays outside i_size, so it locks the page
+ * This code plays outside i_size, so it locks the folio
* to protect that region.
*/
for (n = 0; n <= npages; n++) {
- char *kaddr;
+ char *kaddr = ext2_get_folio(dir, n, 0, &folio);
char *dir_end;
- page = ext2_get_page(dir, n, 0, &page_addr);
- err = PTR_ERR(page);
- if (IS_ERR(page))
- goto out;
- lock_page(page);
- kaddr = page_addr;
+ if (IS_ERR(kaddr))
+ return PTR_ERR(kaddr);
+ folio_lock(folio);
dir_end = kaddr + ext2_last_byte(dir, n);
de = (ext2_dirent *)kaddr;
- kaddr += PAGE_SIZE - reclen;
+ kaddr += folio_size(folio) - reclen;
while ((char *)de <= kaddr) {
if ((char *)de == dir_end) {
/* We hit i_size */
@@ -544,16 +529,15 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
goto got_it;
de = (ext2_dirent *) ((char *) de + rec_len);
}
- unlock_page(page);
- ext2_put_page(page, page_addr);
+ folio_unlock(folio);
+ folio_release_kmap(folio, kaddr);
}
BUG();
return -EINVAL;
got_it:
- pos = page_offset(page) +
- (char *)de - (char *)page_addr;
- err = ext2_prepare_chunk(page, pos, rec_len);
+ pos = folio_pos(folio) + offset_in_folio(folio, de);
+ err = ext2_prepare_chunk(folio, pos, rec_len);
if (err)
goto out_unlock;
if (de->inode) {
@@ -566,17 +550,17 @@ got_it:
memcpy(de->name, name, namelen);
de->inode = cpu_to_le32(inode->i_ino);
ext2_set_de_type (de, inode);
- err = ext2_commit_chunk(page, pos, rec_len);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ ext2_commit_chunk(folio, pos, rec_len);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(dir);
+ err = ext2_handle_dirsync(dir);
/* OFFSET_CACHE */
out_put:
- ext2_put_page(page, page_addr);
-out:
+ folio_release_kmap(folio, de);
return err;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
goto out_put;
}
@@ -584,43 +568,47 @@ out_unlock:
* ext2_delete_entry deletes a directory entry by merging it with the
* previous entry. Page is up-to-date.
*/
-int ext2_delete_entry (struct ext2_dir_entry_2 *dir, struct page *page,
- char *kaddr)
+int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
- unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
- unsigned to = ((char *)dir - kaddr) +
- ext2_rec_len_from_disk(dir->rec_len);
+ struct inode *inode = folio->mapping->host;
+ size_t from, to;
+ char *kaddr;
loff_t pos;
- ext2_dirent * pde = NULL;
- ext2_dirent * de = (ext2_dirent *) (kaddr + from);
+ ext2_dirent *de, *pde = NULL;
int err;
+ from = offset_in_folio(folio, dir);
+ to = from + ext2_rec_len_from_disk(dir->rec_len);
+ kaddr = (char *)dir - from;
+ from &= ~(ext2_chunk_size(inode)-1);
+ de = (ext2_dirent *)(kaddr + from);
+
while ((char*)de < (char*)dir) {
if (de->rec_len == 0) {
ext2_error(inode->i_sb, __func__,
"zero-length directory entry");
- err = -EIO;
- goto out;
+ return -EIO;
}
pde = de;
de = ext2_next_entry(de);
}
if (pde)
- from = (char *)pde - kaddr;
- pos = page_offset(page) + from;
- lock_page(page);
- err = ext2_prepare_chunk(page, pos, to - from);
- BUG_ON(err);
+ from = offset_in_folio(folio, pde);
+ pos = folio_pos(folio) + from;
+ folio_lock(folio);
+ err = ext2_prepare_chunk(folio, pos, to - from);
+ if (err) {
+ folio_unlock(folio);
+ return err;
+ }
if (pde)
pde->rec_len = ext2_rec_len_to_disk(to - from);
dir->inode = 0;
- err = ext2_commit_chunk(page, pos, to - from);
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ ext2_commit_chunk(folio, pos, to - from);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(inode);
-out:
- return err;
+ return ext2_handle_dirsync(inode);
}
/*
@@ -628,21 +616,21 @@ out:
*/
int ext2_make_empty(struct inode *inode, struct inode *parent)
{
- struct page *page = grab_cache_page(inode->i_mapping, 0);
+ struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
unsigned chunk_size = ext2_chunk_size(inode);
struct ext2_dir_entry_2 * de;
int err;
void *kaddr;
- if (!page)
- return -ENOMEM;
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- err = ext2_prepare_chunk(page, 0, chunk_size);
+ err = ext2_prepare_chunk(folio, 0, chunk_size);
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
goto fail;
}
- kaddr = kmap_atomic(page);
+ kaddr = kmap_local_folio(folio, 0);
memset(kaddr, 0, chunk_size);
de = (struct ext2_dir_entry_2 *)kaddr;
de->name_len = 1;
@@ -657,34 +645,30 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
de->inode = cpu_to_le32(parent->i_ino);
memcpy (de->name, "..\0", 4);
ext2_set_de_type (de, inode);
- kunmap_atomic(kaddr);
- err = ext2_commit_chunk(page, 0, chunk_size);
+ kunmap_local(kaddr);
+ ext2_commit_chunk(folio, 0, chunk_size);
+ err = ext2_handle_dirsync(inode);
fail:
- put_page(page);
+ folio_put(folio);
return err;
}
/*
* routine to check that the specified directory is empty (for rmdir)
*/
-int ext2_empty_dir (struct inode * inode)
+int ext2_empty_dir(struct inode *inode)
{
- void *page_addr = NULL;
- struct page *page = NULL;
+ struct folio *folio;
+ char *kaddr;
unsigned long i, npages = dir_pages(inode);
- int dir_has_error = 0;
for (i = 0; i < npages; i++) {
- char *kaddr;
- ext2_dirent * de;
- page = ext2_get_page(inode, i, dir_has_error, &page_addr);
+ ext2_dirent *de;
- if (IS_ERR(page)) {
- dir_has_error = 1;
- continue;
- }
+ kaddr = ext2_get_folio(inode, i, 0, &folio);
+ if (IS_ERR(kaddr))
+ return 0;
- kaddr = page_addr;
de = (ext2_dirent *)kaddr;
kaddr += ext2_last_byte(inode, i) - EXT2_DIR_REC_LEN(1);
@@ -710,17 +694,39 @@ int ext2_empty_dir (struct inode * inode)
}
de = ext2_next_entry(de);
}
- ext2_put_page(page, page_addr);
+ folio_release_kmap(folio, kaddr);
}
return 1;
not_empty:
- ext2_put_page(page, page_addr);
+ folio_release_kmap(folio, kaddr);
+ return 0;
+}
+
+static int ext2_dir_open(struct inode *inode, struct file *file)
+{
+ file->private_data = kzalloc(sizeof(u64), GFP_KERNEL);
+ if (!file->private_data)
+ return -ENOMEM;
return 0;
}
+static int ext2_dir_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static loff_t ext2_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ return generic_llseek_cookie(file, offset, whence,
+ (u64 *)file->private_data);
+}
+
const struct file_operations ext2_dir_operations = {
- .llseek = generic_file_llseek,
+ .open = ext2_dir_open,
+ .release = ext2_dir_release,
+ .llseek = ext2_dir_llseek,
.read = generic_read_dir,
.iterate_shared = ext2_readdir,
.unlocked_ioctl = ext2_ioctl,
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 3be9dd6412b7..cf97b76e9fd3 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -70,10 +70,7 @@ struct mb_cache;
* second extended-fs super-block data in memory
*/
struct ext2_sb_info {
- unsigned long s_frag_size; /* Size of a fragment in bytes */
- unsigned long s_frags_per_block;/* Number of fragments per block */
unsigned long s_inodes_per_block;/* Number of inodes per block */
- unsigned long s_frags_per_group;/* Number of fragments in a group */
unsigned long s_blocks_per_group;/* Number of blocks in a group */
unsigned long s_inodes_per_group;/* Number of inodes in a group */
unsigned long s_itb_per_group; /* Number of inode table blocks per group */
@@ -118,6 +115,7 @@ struct ext2_sb_info {
spinlock_t s_lock;
struct mb_cache *s_ea_block_cache;
struct dax_device *s_daxdev;
+ u64 s_dax_part_off;
};
static inline spinlock_t *
@@ -177,8 +175,9 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
* Macro-instructions used to manage several block sizes
*/
#define EXT2_MIN_BLOCK_SIZE 1024
-#define EXT2_MAX_BLOCK_SIZE 4096
+#define EXT2_MAX_BLOCK_SIZE 65536
#define EXT2_MIN_BLOCK_LOG_SIZE 10
+#define EXT2_MAX_BLOCK_LOG_SIZE 16
#define EXT2_BLOCK_SIZE(s) ((s)->s_blocksize)
#define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32))
#define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
@@ -187,15 +186,6 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
#define EXT2_FIRST_INO(s) (EXT2_SB(s)->s_first_ino)
/*
- * Macro-instructions used to manage fragments
- */
-#define EXT2_MIN_FRAG_SIZE 1024
-#define EXT2_MAX_FRAG_SIZE 4096
-#define EXT2_MIN_FRAG_LOG_SIZE 10
-#define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->s_frag_size)
-#define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->s_frags_per_block)
-
-/*
* Structure of a blocks group descriptor
*/
struct ext2_group_desc
@@ -378,6 +368,7 @@ struct ext2_inode {
#define EXT2_MOUNT_ERRORS_CONT 0x000010 /* Continue on errors */
#define EXT2_MOUNT_ERRORS_RO 0x000020 /* Remount fs ro on errors */
#define EXT2_MOUNT_ERRORS_PANIC 0x000040 /* Panic on errors */
+#define EXT2_MOUNT_ERRORS_MASK 0x000070
#define EXT2_MOUNT_MINIX_DF 0x000080 /* Mimics the Minix statfs */
#define EXT2_MOUNT_NOBH 0x000100 /* No buffer_heads */
#define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */
@@ -409,6 +400,12 @@ struct ext2_inode {
#define EXT2_ERRORS_DEFAULT EXT2_ERRORS_CONTINUE
/*
+ * Allocation flags
+ */
+#define EXT2_ALLOC_NORESERVE 0x1 /* Do not use reservation
+ * window for allocation */
+
+/*
* Structure of the super block
*/
struct ext2_super_block {
@@ -678,7 +675,7 @@ struct ext2_inode_info {
struct inode vfs_inode;
struct list_head i_orphan; /* unlinked but open inodes */
#ifdef CONFIG_QUOTA
- struct dquot *i_dquot[MAXQUOTAS];
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
};
@@ -705,13 +702,11 @@ static inline struct ext2_inode_info *EXT2_I(struct inode *inode)
/* balloc.c */
extern int ext2_bg_has_super(struct super_block *sb, int group);
extern unsigned long ext2_bg_num_gdb(struct super_block *sb, int group);
-extern ext2_fsblk_t ext2_new_block(struct inode *, unsigned long, int *);
-extern ext2_fsblk_t ext2_new_blocks(struct inode *, unsigned long,
- unsigned long *, int *);
+extern ext2_fsblk_t ext2_new_blocks(struct inode *, ext2_fsblk_t,
+ unsigned long *, int *, unsigned int);
extern int ext2_data_block_valid(struct ext2_sb_info *sbi, ext2_fsblk_t start_blk,
unsigned int count);
-extern void ext2_free_blocks (struct inode *, unsigned long,
- unsigned long);
+extern void ext2_free_blocks(struct inode *, ext2_fsblk_t, unsigned long);
extern unsigned long ext2_count_free_blocks (struct super_block *);
extern unsigned long ext2_count_dirs (struct super_block *);
extern struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
@@ -723,23 +718,17 @@ extern void ext2_init_block_alloc_info(struct inode *);
extern void ext2_rsv_window_add(struct super_block *sb, struct ext2_reserve_window_node *rsv);
/* dir.c */
-extern int ext2_add_link (struct dentry *, struct inode *);
-extern int ext2_inode_by_name(struct inode *dir,
+int ext2_add_link(struct dentry *, struct inode *);
+int ext2_inode_by_name(struct inode *dir,
const struct qstr *child, ino_t *ino);
-extern int ext2_make_empty(struct inode *, struct inode *);
-extern struct ext2_dir_entry_2 *ext2_find_entry(struct inode *, const struct qstr *,
- struct page **, void **res_page_addr);
-extern int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page,
- char *kaddr);
-extern int ext2_empty_dir (struct inode *);
-extern struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p, void **pa);
-extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, void *,
- struct inode *, int);
-static inline void ext2_put_page(struct page *page, void *page_addr)
-{
- kunmap_local(page_addr);
- put_page(page);
-}
+int ext2_make_empty(struct inode *, struct inode *);
+struct ext2_dir_entry_2 *ext2_find_entry(struct inode *, const struct qstr *,
+ struct folio **foliop);
+int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct folio *folio);
+int ext2_empty_dir(struct inode *);
+struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct folio **foliop);
+int ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
+ struct folio *folio, struct inode *inode, bool update_times);
/* ialloc.c */
extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *);
@@ -751,18 +740,19 @@ extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
extern struct inode *ext2_iget (struct super_block *, unsigned long);
extern int ext2_write_inode (struct inode *, struct writeback_control *);
extern void ext2_evict_inode(struct inode *);
+void ext2_write_failed(struct address_space *mapping, loff_t to);
extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int);
-extern int ext2_setattr (struct user_namespace *, struct dentry *, struct iattr *);
-extern int ext2_getattr (struct user_namespace *, const struct path *,
+extern int ext2_setattr (struct mnt_idmap *, struct dentry *, struct iattr *);
+extern int ext2_getattr (struct mnt_idmap *, const struct path *,
struct kstat *, u32, unsigned int);
extern void ext2_set_inode_flags(struct inode *inode);
extern int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
/* ioctl.c */
-extern int ext2_fileattr_get(struct dentry *dentry, struct fileattr *fa);
-extern int ext2_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa);
+extern int ext2_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
+extern int ext2_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa);
extern long ext2_ioctl(struct file *, unsigned int, unsigned long);
extern long ext2_compat_ioctl(struct file *, unsigned int, unsigned long);
@@ -794,7 +784,6 @@ extern const struct file_operations ext2_file_operations;
/* inode.c */
extern void ext2_set_file_ops(struct inode *inode);
extern const struct address_space_operations ext2_aops;
-extern const struct address_space_operations ext2_nobh_aops;
extern const struct iomap_ops ext2_iomap_ops;
/* namei.c */
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index eb97aa3d700e..76bddce462fc 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -25,9 +25,11 @@
#include <linux/quotaops.h>
#include <linux/iomap.h>
#include <linux/uio.h>
+#include <linux/buffer_head.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
+#include "trace.h"
#ifdef CONFIG_FS_DAX
static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -101,7 +103,7 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
}
filemap_invalidate_lock_shared(inode->i_mapping);
- ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
+ ret = dax_iomap_fault(vmf, 0, NULL, NULL, &ext2_iomap_ops);
filemap_invalidate_unlock_shared(inode->i_mapping);
if (write)
@@ -120,17 +122,19 @@ static const struct vm_operations_struct ext2_dax_vm_ops = {
.pfn_mkwrite = ext2_dax_fault,
};
-static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int ext2_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
+
if (!IS_DAX(file_inode(file)))
- return generic_file_mmap(file, vma);
+ return generic_file_mmap_prepare(desc);
file_accessed(file);
- vma->vm_ops = &ext2_dax_vm_ops;
+ desc->vm_ops = &ext2_dax_vm_ops;
return 0;
}
#else
-#define ext2_file_mmap generic_file_mmap
+#define ext2_file_mmap_prepare generic_file_mmap_prepare
#endif
/*
@@ -153,7 +157,7 @@ int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
int ret;
struct super_block *sb = file->f_mapping->host->i_sb;
- ret = generic_file_fsync(file, start, end, datasync);
+ ret = generic_buffers_fsync(file, start, end, datasync);
if (ret == -EIO)
/* We don't really know where the IO error happened... */
ext2_error(sb, __func__,
@@ -161,12 +165,130 @@ int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
return ret;
}
+static ssize_t ext2_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ ssize_t ret;
+
+ trace_ext2_dio_read_begin(iocb, to, 0);
+ inode_lock_shared(inode);
+ ret = iomap_dio_rw(iocb, to, &ext2_iomap_ops, NULL, 0, NULL, 0);
+ inode_unlock_shared(inode);
+ trace_ext2_dio_read_end(iocb, to, ret);
+
+ return ret;
+}
+
+static int ext2_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+ int error, unsigned int flags)
+{
+ loff_t pos = iocb->ki_pos;
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ if (error)
+ goto out;
+
+ /*
+ * If we are extending the file, we have to update i_size here before
+ * page cache gets invalidated in iomap_dio_rw(). This prevents racing
+ * buffered reads from zeroing out too much from page cache pages.
+ * Note that all extending writes always happens synchronously with
+ * inode lock held by ext2_dio_write_iter(). So it is safe to update
+ * inode size here for extending file writes.
+ */
+ pos += size;
+ if (pos > i_size_read(inode)) {
+ i_size_write(inode, pos);
+ mark_inode_dirty(inode);
+ }
+out:
+ trace_ext2_dio_write_endio(iocb, size, error);
+ return error;
+}
+
+static const struct iomap_dio_ops ext2_dio_write_ops = {
+ .end_io = ext2_dio_write_end_io,
+};
+
+static ssize_t ext2_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ ssize_t ret;
+ unsigned int flags = 0;
+ unsigned long blocksize = inode->i_sb->s_blocksize;
+ loff_t offset = iocb->ki_pos;
+ loff_t count = iov_iter_count(from);
+ ssize_t status = 0;
+
+ trace_ext2_dio_write_begin(iocb, from, 0);
+ inode_lock(inode);
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto out_unlock;
+
+ ret = kiocb_modified(iocb);
+ if (ret)
+ goto out_unlock;
+
+ /* use IOMAP_DIO_FORCE_WAIT for unaligned or extending writes */
+ if (iocb->ki_pos + iov_iter_count(from) > i_size_read(inode) ||
+ (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(from), blocksize)))
+ flags |= IOMAP_DIO_FORCE_WAIT;
+
+ ret = iomap_dio_rw(iocb, from, &ext2_iomap_ops, &ext2_dio_write_ops,
+ flags, NULL, 0);
+
+ /* ENOTBLK is magic return value for fallback to buffered-io */
+ if (ret == -ENOTBLK)
+ ret = 0;
+
+ if (ret < 0 && ret != -EIOCBQUEUED)
+ ext2_write_failed(inode->i_mapping, offset + count);
+
+ /* handle case for partial write and for fallback to buffered write */
+ if (ret >= 0 && iov_iter_count(from)) {
+ loff_t pos, endbyte;
+ int ret2;
+
+ iocb->ki_flags &= ~IOCB_DIRECT;
+ pos = iocb->ki_pos;
+ status = generic_perform_write(iocb, from);
+ if (unlikely(status < 0)) {
+ ret = status;
+ goto out_unlock;
+ }
+
+ ret += status;
+ endbyte = pos + status - 1;
+ ret2 = filemap_write_and_wait_range(inode->i_mapping, pos,
+ endbyte);
+ if (!ret2)
+ invalidate_mapping_pages(inode->i_mapping,
+ pos >> PAGE_SHIFT,
+ endbyte >> PAGE_SHIFT);
+ if (ret > 0)
+ generic_write_sync(iocb, ret);
+ }
+
+out_unlock:
+ inode_unlock(inode);
+ if (status)
+ trace_ext2_dio_write_buff_end(iocb, from, status);
+ trace_ext2_dio_write_end(iocb, from, ret);
+ return ret;
+}
+
static ssize_t ext2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
#ifdef CONFIG_FS_DAX
if (IS_DAX(iocb->ki_filp->f_mapping->host))
return ext2_dax_read_iter(iocb, to);
#endif
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return ext2_dio_read_iter(iocb, to);
+
return generic_file_read_iter(iocb, to);
}
@@ -176,9 +298,18 @@ static ssize_t ext2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (IS_DAX(iocb->ki_filp->f_mapping->host))
return ext2_dax_write_iter(iocb, from);
#endif
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return ext2_dio_write_iter(iocb, from);
+
return generic_file_write_iter(iocb, from);
}
+static int ext2_file_open(struct inode *inode, struct file *filp)
+{
+ filp->f_mode |= FMODE_CAN_ODIRECT;
+ return dquot_file_open(inode, filp);
+}
+
const struct file_operations ext2_file_operations = {
.llseek = generic_file_llseek,
.read_iter = ext2_file_read_iter,
@@ -187,12 +318,12 @@ const struct file_operations ext2_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
#endif
- .mmap = ext2_file_mmap,
- .open = dquot_file_open,
+ .mmap_prepare = ext2_file_mmap_prepare,
+ .open = ext2_file_open,
.release = ext2_release_file,
.fsync = ext2_fsync,
.get_unmapped_area = thp_get_unmapped_area,
- .splice_read = generic_file_splice_read,
+ .splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
};
@@ -200,7 +331,7 @@ const struct inode_operations ext2_file_inode_operations = {
.listxattr = ext2_listxattr,
.getattr = ext2_getattr,
.setattr = ext2_setattr,
- .get_acl = ext2_get_acl,
+ .get_inode_acl = ext2_get_acl,
.set_acl = ext2_set_acl,
.fiemap = ext2_fiemap,
.fileattr_get = ext2_fileattr_get,
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index df14e750e9fe..fdf63e9c6e7c 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -170,11 +170,6 @@ static void ext2_preread_inode(struct inode *inode)
unsigned long offset;
unsigned long block;
struct ext2_group_desc * gdp;
- struct backing_dev_info *bdi;
-
- bdi = inode_to_bdi(inode);
- if (bdi_rw_congested(bdi))
- return;
block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
gdp = ext2_get_group_desc(inode->i_sb, block_group, NULL);
@@ -278,12 +273,10 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
if ((parent == d_inode(sb->s_root)) ||
(EXT2_I(parent)->i_flags & EXT2_TOPDIR_FL)) {
- struct ext2_group_desc *best_desc = NULL;
int best_ndir = inodes_per_group;
int best_group = -1;
- group = prandom_u32();
- parent_group = (unsigned)group % ngroups;
+ parent_group = get_random_u32_below(ngroups);
for (i = 0; i < ngroups; i++) {
group = (parent_group + i) % ngroups;
desc = ext2_get_group_desc (sb, group, NULL);
@@ -297,10 +290,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
continue;
best_group = group;
best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
- best_desc = desc;
}
if (best_group >= 0) {
- desc = best_desc;
group = best_group;
goto found;
}
@@ -551,11 +542,11 @@ got:
inode->i_uid = current_fsuid();
inode->i_gid = dir->i_gid;
} else
- inode_init_owner(&init_user_ns, inode, dir, mode);
+ inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_ino = ino;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_flags =
ext2_mask_flags(mode, EXT2_I(dir)->i_flags & EXT2_FL_INHERITED);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 333fa62661d5..dbfe9098a124 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -56,7 +56,7 @@ static inline int ext2_inode_is_fast_symlink(struct inode *inode)
static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
-static void ext2_write_failed(struct address_space *mapping, loff_t to)
+void ext2_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
@@ -385,12 +385,16 @@ ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
}
/**
- * ext2_alloc_blocks: multiple allocate blocks needed for a branch
- * @indirect_blks: the number of blocks need to allocate for indirect
- * blocks
- * @blks: the number of blocks need to allocate for direct blocks
- * @new_blocks: on return it will store the new block numbers for
- * the indirect blocks(if needed) and the first direct block,
+ * ext2_alloc_blocks: Allocate multiple blocks needed for a branch.
+ * @inode: Owner.
+ * @goal: Preferred place for allocation.
+ * @indirect_blks: The number of blocks needed to allocate for indirect blocks.
+ * @blks: The number of blocks need to allocate for direct blocks.
+ * @new_blocks: On return it will store the new block numbers for
+ * the indirect blocks(if needed) and the first direct block.
+ * @err: Error pointer.
+ *
+ * Return: Number of blocks allocated.
*/
static int ext2_alloc_blocks(struct inode *inode,
ext2_fsblk_t goal, int indirect_blks, int blks,
@@ -415,7 +419,7 @@ static int ext2_alloc_blocks(struct inode *inode,
while (1) {
count = target;
/* allocating blocks for indirect blocks and direct blocks */
- current_block = ext2_new_blocks(inode,goal,&count,err);
+ current_block = ext2_new_blocks(inode, goal, &count, err, 0);
if (*err)
goto failed_out;
@@ -595,7 +599,7 @@ static void ext2_splice_branch(struct inode *inode,
if (where->bh)
mark_buffer_dirty_inode(where->bh, inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
}
@@ -750,7 +754,7 @@ static int ext2_get_blocks(struct inode *inode,
*/
err = sb_issue_zeroout(inode->i_sb,
le32_to_cpu(chain[depth-1].key), count,
- GFP_NOFS);
+ GFP_KERNEL);
if (err) {
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
@@ -809,24 +813,52 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
bool new = false, boundary = false;
u32 bno;
int ret;
+ bool create = flags & IOMAP_WRITE;
+
+ /*
+ * For writes that could fill holes inside i_size on a
+ * DIO_SKIP_HOLES filesystem we forbid block creations: only
+ * overwrites are permitted.
+ */
+ if ((flags & IOMAP_DIRECT) &&
+ (first_block << blkbits) < i_size_read(inode))
+ create = 0;
+
+ /*
+ * Writes that span EOF might trigger an IO size update on completion,
+ * so consider them to be dirty for the purposes of O_DSYNC even if
+ * there is no other metadata changes pending or have been made here.
+ */
+ if ((flags & IOMAP_WRITE) && offset + length > i_size_read(inode))
+ iomap->flags |= IOMAP_F_DIRTY;
ret = ext2_get_blocks(inode, first_block, max_blocks,
- &bno, &new, &boundary, flags & IOMAP_WRITE);
+ &bno, &new, &boundary, create);
if (ret < 0)
return ret;
iomap->flags = 0;
- iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = (u64)first_block << blkbits;
- iomap->dax_dev = sbi->s_daxdev;
+ if (flags & IOMAP_DAX)
+ iomap->dax_dev = sbi->s_daxdev;
+ else
+ iomap->bdev = inode->i_sb->s_bdev;
if (ret == 0) {
+ /*
+ * Switch to buffered-io for writing to holes in a non-extent
+ * based filesystem to avoid stale data exposure problem.
+ */
+ if (!create && (flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT))
+ return -ENOTBLK;
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
iomap->length = 1 << blkbits;
} else {
iomap->type = IOMAP_MAPPED;
iomap->addr = (u64)bno << blkbits;
+ if (flags & IOMAP_DAX)
+ iomap->addr += sbi->s_dax_part_off;
iomap->length = (u64)ret << blkbits;
iomap->flags |= IOMAP_F_MERGED;
}
@@ -840,6 +872,13 @@ static int
ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
ssize_t written, unsigned flags, struct iomap *iomap)
{
+ /*
+ * Switch to buffered-io in case of any error.
+ * Blocks allocated can be used by the buffered-io path.
+ */
+ if ((flags & IOMAP_DIRECT) && (flags & IOMAP_WRITE) && written == 0)
+ return -ENOTBLK;
+
if (iomap->type == IOMAP_MAPPED &&
written < length &&
(flags & IOMAP_WRITE))
@@ -856,23 +895,28 @@ int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
int ret;
+ loff_t i_size;
inode_lock(inode);
- len = min_t(u64, len, i_size_read(inode));
+ i_size = i_size_read(inode);
+ /*
+ * iomap_fiemap() returns EINVAL for 0 length. Make sure we don't trim
+ * length to 0 but still trim the range as much as possible since
+ * ext2_get_blocks() iterates unmapped space block by block which is
+ * slow.
+ */
+ if (i_size == 0)
+ i_size = 1;
+ len = min_t(u64, len, i_size);
ret = iomap_fiemap(inode, fieinfo, start, len, &ext2_iomap_ops);
inode_unlock(inode);
return ret;
}
-static int ext2_writepage(struct page *page, struct writeback_control *wbc)
-{
- return block_write_full_page(page, ext2_get_block, wbc);
-}
-
-static int ext2_readpage(struct file *file, struct page *page)
+static int ext2_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, ext2_get_block);
+ return mpage_read_folio(folio, ext2_get_block);
}
static void ext2_readahead(struct readahead_control *rac)
@@ -881,72 +925,35 @@ static void ext2_readahead(struct readahead_control *rac)
}
static int
-ext2_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+ext2_write_begin(const struct kiocb *iocb, struct address_space *mapping,
+ loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, flags, pagep,
- ext2_get_block);
+ ret = block_write_begin(mapping, pos, len, foliop, ext2_get_block);
if (ret < 0)
ext2_write_failed(mapping, pos + len);
return ret;
}
-static int ext2_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+static int ext2_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
int ret;
- ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+ ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
if (ret < len)
ext2_write_failed(mapping, pos + len);
return ret;
}
-static int
-ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- int ret;
-
- ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
- ext2_get_block);
- if (ret < 0)
- ext2_write_failed(mapping, pos + len);
- return ret;
-}
-
-static int ext2_nobh_writepage(struct page *page,
- struct writeback_control *wbc)
-{
- return nobh_writepage(page, ext2_get_block, wbc);
-}
-
static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,ext2_get_block);
}
-static ssize_t
-ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- size_t count = iov_iter_count(iter);
- loff_t offset = iocb->ki_pos;
- ssize_t ret;
-
- ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
- if (ret < 0 && iov_iter_rw(iter) == WRITE)
- ext2_write_failed(mapping, offset + count);
- return ret;
-}
-
static int
ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
@@ -962,39 +969,22 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc
}
const struct address_space_operations ext2_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = ext2_readpage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = ext2_read_folio,
.readahead = ext2_readahead,
- .writepage = ext2_writepage,
.write_begin = ext2_write_begin,
.write_end = ext2_write_end,
.bmap = ext2_bmap,
- .direct_IO = ext2_direct_IO,
.writepages = ext2_writepages,
- .migratepage = buffer_migrate_page,
+ .migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
-};
-
-const struct address_space_operations ext2_nobh_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = ext2_readpage,
- .readahead = ext2_readahead,
- .writepage = ext2_nobh_writepage,
- .write_begin = ext2_nobh_write_begin,
- .write_end = nobh_write_end,
- .bmap = ext2_bmap,
- .direct_IO = ext2_direct_IO,
- .writepages = ext2_writepages,
- .migratepage = buffer_migrate_page,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
};
static const struct address_space_operations ext2_dax_aops = {
.writepages = ext2_dax_writepages,
- .direct_IO = noop_direct_IO,
- .set_page_dirty = __set_page_dirty_no_writeback,
- .invalidatepage = noop_invalidatepage,
+ .dirty_folio = noop_dirty_folio,
};
/*
@@ -1105,8 +1095,8 @@ no_top:
*/
static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
{
- unsigned long block_to_free = 0, count = 0;
- unsigned long nr;
+ ext2_fsblk_t block_to_free = 0, count = 0;
+ ext2_fsblk_t nr;
for ( ; p < q ; p++) {
nr = le32_to_cpu(*p);
@@ -1146,7 +1136,7 @@ static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
{
struct buffer_head * bh;
- unsigned long nr;
+ ext2_fsblk_t nr;
if (depth--) {
int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
@@ -1296,13 +1286,9 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
inode_dio_wait(inode);
- if (IS_DAX(inode)) {
- error = iomap_zero_range(inode, newsize,
- PAGE_ALIGN(newsize) - newsize, NULL,
- &ext2_iomap_ops);
- } else if (test_opt(inode->i_sb, NOBH))
- error = nobh_truncate_page(inode->i_mapping,
- newsize, ext2_get_block);
+ if (IS_DAX(inode))
+ error = dax_truncate_page(inode, newsize, NULL,
+ &ext2_iomap_ops);
else
error = block_truncate_page(inode->i_mapping,
newsize, ext2_get_block);
@@ -1314,7 +1300,7 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
__ext2_truncate_blocks(inode, newsize);
filemap_invalidate_unlock(inode->i_mapping);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (inode_needs_sync(inode)) {
sync_mapping_buffers(inode->i_mapping);
sync_inode_metadata(inode, 1);
@@ -1394,8 +1380,6 @@ void ext2_set_file_ops(struct inode *inode)
inode->i_fop = &ext2_file_operations;
if (IS_DAX(inode))
inode->i_mapping->a_ops = &ext2_dax_aops;
- else if (test_opt(inode->i_sb, NOBH))
- inode->i_mapping->a_ops = &ext2_nobh_aops;
else
inode->i_mapping->a_ops = &ext2_aops;
}
@@ -1414,7 +1398,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
ei = EXT2_I(inode);
@@ -1437,10 +1421,9 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
i_gid_write(inode, i_gid);
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
inode->i_size = le32_to_cpu(raw_inode->i_size);
- inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
- inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
- inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
- inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
+ inode_set_atime(inode, (signed)le32_to_cpu(raw_inode->i_atime), 0);
+ inode_set_ctime(inode, (signed)le32_to_cpu(raw_inode->i_ctime), 0);
+ inode_set_mtime(inode, (signed)le32_to_cpu(raw_inode->i_mtime), 0);
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
* This is needed because nfsd might try to access dead inodes
@@ -1495,10 +1478,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &ext2_dir_inode_operations;
inode->i_fop = &ext2_dir_operations;
- if (test_opt(inode->i_sb, NOBH))
- inode->i_mapping->a_ops = &ext2_nobh_aops;
- else
- inode->i_mapping->a_ops = &ext2_aops;
+ inode->i_mapping->a_ops = &ext2_aops;
} else if (S_ISLNK(inode->i_mode)) {
if (ext2_inode_is_fast_symlink(inode)) {
inode->i_link = (char *)ei->i_data;
@@ -1508,10 +1488,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
} else {
inode->i_op = &ext2_symlink_inode_operations;
inode_nohighmem(inode);
- if (test_opt(inode->i_sb, NOBH))
- inode->i_mapping->a_ops = &ext2_nobh_aops;
- else
- inode->i_mapping->a_ops = &ext2_aops;
+ inode->i_mapping->a_ops = &ext2_aops;
}
} else {
inode->i_op = &ext2_special_inode_operations;
@@ -1547,7 +1524,7 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
if (IS_ERR(raw_inode))
return -EIO;
- /* For fields not not tracking in the in-memory inode,
+ /* For fields not tracking in the in-memory inode,
* initialise them to zero for new inodes. */
if (ei->i_state & EXT2_STATE_NEW)
memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
@@ -1575,9 +1552,9 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
raw_inode->i_size = cpu_to_le32(inode->i_size);
- raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
- raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
- raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+ raw_inode->i_atime = cpu_to_le32(inode_get_atime_sec(inode));
+ raw_inode->i_ctime = cpu_to_le32(inode_get_ctime_sec(inode));
+ raw_inode->i_mtime = cpu_to_le32(inode_get_mtime_sec(inode));
raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
@@ -1641,7 +1618,7 @@ int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
}
-int ext2_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int ext2_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
@@ -1663,28 +1640,28 @@ int ext2_getattr(struct user_namespace *mnt_userns, const struct path *path,
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
- generic_fillattr(&init_user_ns, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
return 0;
}
-int ext2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int ext2_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
int error;
- error = setattr_prepare(&init_user_ns, dentry, iattr);
+ error = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
if (error)
return error;
- if (is_quota_modification(inode, iattr)) {
+ if (is_quota_modification(&nop_mnt_idmap, inode, iattr)) {
error = dquot_initialize(inode);
if (error)
return error;
}
- if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
- (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
- error = dquot_transfer(inode, iattr);
+ if (i_uid_needs_update(&nop_mnt_idmap, iattr, inode) ||
+ i_gid_needs_update(&nop_mnt_idmap, iattr, inode)) {
+ error = dquot_transfer(&nop_mnt_idmap, inode, iattr);
if (error)
return error;
}
@@ -1693,9 +1670,9 @@ int ext2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
if (error)
return error;
}
- setattr_copy(&init_user_ns, inode, iattr);
+ setattr_copy(&nop_mnt_idmap, inode, iattr);
if (iattr->ia_valid & ATTR_MODE)
- error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
+ error = posix_acl_chmod(&nop_mnt_idmap, dentry, inode->i_mode);
mark_inode_dirty(inode);
return error;
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index e8340bf09b10..c3fea55b8efa 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -18,7 +18,7 @@
#include <linux/uaccess.h>
#include <linux/fileattr.h>
-int ext2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int ext2_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct ext2_inode_info *ei = EXT2_I(d_inode(dentry));
@@ -27,8 +27,8 @@ int ext2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
return 0;
}
-int ext2_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa)
+int ext2_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct ext2_inode_info *ei = EXT2_I(inode);
@@ -44,7 +44,7 @@ int ext2_fileattr_set(struct user_namespace *mnt_userns,
(fa->flags & EXT2_FL_USER_MODIFIABLE);
ext2_set_inode_flags(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return 0;
@@ -66,7 +66,7 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case EXT2_IOC_SETVERSION: {
__u32 generation;
- if (!inode_owner_or_capable(&init_user_ns, inode))
+ if (!inode_owner_or_capable(&nop_mnt_idmap, inode))
return -EPERM;
ret = mnt_want_write_file(filp);
if (ret)
@@ -77,7 +77,7 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
inode_lock(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode->i_generation = generation;
inode_unlock(inode);
@@ -99,7 +99,7 @@ setversion_out:
if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
return -ENOTTY;
- if (!inode_owner_or_capable(&init_user_ns, inode))
+ if (!inode_owner_or_capable(&nop_mnt_idmap, inode))
return -EACCES;
if (get_user(rsv_window_size, (int __user *)arg))
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 5f6b7560eb3f..bde617a66cec 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -99,7 +99,7 @@ struct dentry *ext2_get_parent(struct dentry *child)
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int ext2_create (struct user_namespace * mnt_userns,
+static int ext2_create (struct mnt_idmap * idmap,
struct inode * dir, struct dentry * dentry,
umode_t mode, bool excl)
{
@@ -119,8 +119,8 @@ static int ext2_create (struct user_namespace * mnt_userns,
return ext2_add_nondir(dentry, inode);
}
-static int ext2_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static int ext2_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ struct file *file, umode_t mode)
{
struct inode *inode = ext2_new_inode(dir, mode, NULL);
if (IS_ERR(inode))
@@ -128,12 +128,12 @@ static int ext2_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
ext2_set_file_ops(inode);
mark_inode_dirty(inode);
- d_tmpfile(dentry, inode);
+ d_tmpfile(file, inode);
unlock_new_inode(inode);
- return 0;
+ return finish_open_simple(file, 0);
}
-static int ext2_mknod (struct user_namespace * mnt_userns, struct inode * dir,
+static int ext2_mknod (struct mnt_idmap * idmap, struct inode * dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode * inode;
@@ -154,7 +154,7 @@ static int ext2_mknod (struct user_namespace * mnt_userns, struct inode * dir,
return err;
}
-static int ext2_symlink (struct user_namespace * mnt_userns, struct inode * dir,
+static int ext2_symlink (struct mnt_idmap * idmap, struct inode * dir,
struct dentry * dentry, const char * symname)
{
struct super_block * sb = dir->i_sb;
@@ -178,10 +178,7 @@ static int ext2_symlink (struct user_namespace * mnt_userns, struct inode * dir,
/* slow symlink */
inode->i_op = &ext2_symlink_inode_operations;
inode_nohighmem(inode);
- if (test_opt(inode->i_sb, NOBH))
- inode->i_mapping->a_ops = &ext2_nobh_aops;
- else
- inode->i_mapping->a_ops = &ext2_aops;
+ inode->i_mapping->a_ops = &ext2_aops;
err = page_symlink(inode, symname, l);
if (err)
goto out_fail;
@@ -214,7 +211,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir,
if (err)
return err;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode_inc_link_count(inode);
ihold(inode);
@@ -228,15 +225,16 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir,
return err;
}
-static int ext2_mkdir(struct user_namespace * mnt_userns,
- struct inode * dir, struct dentry * dentry, umode_t mode)
+static struct dentry *ext2_mkdir(struct mnt_idmap * idmap,
+ struct inode * dir, struct dentry * dentry,
+ umode_t mode)
{
struct inode * inode;
int err;
err = dquot_initialize(dir);
if (err)
- return err;
+ return ERR_PTR(err);
inode_inc_link_count(dir);
@@ -247,10 +245,7 @@ static int ext2_mkdir(struct user_namespace * mnt_userns,
inode->i_op = &ext2_dir_inode_operations;
inode->i_fop = &ext2_dir_operations;
- if (test_opt(inode->i_sb, NOBH))
- inode->i_mapping->a_ops = &ext2_nobh_aops;
- else
- inode->i_mapping->a_ops = &ext2_aops;
+ inode->i_mapping->a_ops = &ext2_aops;
inode_inc_link_count(inode);
@@ -264,7 +259,7 @@ static int ext2_mkdir(struct user_namespace * mnt_userns,
d_instantiate_new(dentry, inode);
out:
- return err;
+ return ERR_PTR(err);
out_fail:
inode_dec_link_count(inode);
@@ -275,30 +270,29 @@ out_dir:
goto out;
}
-static int ext2_unlink(struct inode * dir, struct dentry *dentry)
+static int ext2_unlink(struct inode *dir, struct dentry *dentry)
{
- struct inode * inode = d_inode(dentry);
- struct ext2_dir_entry_2 * de;
- struct page * page;
- void *page_addr;
+ struct inode *inode = d_inode(dentry);
+ struct ext2_dir_entry_2 *de;
+ struct folio *folio;
int err;
err = dquot_initialize(dir);
if (err)
goto out;
- de = ext2_find_entry(dir, &dentry->d_name, &page, &page_addr);
+ de = ext2_find_entry(dir, &dentry->d_name, &folio);
if (IS_ERR(de)) {
err = PTR_ERR(de);
goto out;
}
- err = ext2_delete_entry (de, page, page_addr);
- ext2_put_page(page, page_addr);
+ err = ext2_delete_entry(de, folio);
+ folio_release_kmap(folio, de);
if (err)
goto out;
- inode->i_ctime = dir->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode);
err = 0;
out:
@@ -321,19 +315,18 @@ static int ext2_rmdir (struct inode * dir, struct dentry *dentry)
return err;
}
-static int ext2_rename (struct user_namespace * mnt_userns,
+static int ext2_rename (struct mnt_idmap * idmap,
struct inode * old_dir, struct dentry * old_dentry,
struct inode * new_dir, struct dentry * new_dentry,
unsigned int flags)
{
struct inode * old_inode = d_inode(old_dentry);
struct inode * new_inode = d_inode(new_dentry);
- struct page * dir_page = NULL;
- void *dir_page_addr;
+ struct folio *dir_folio = NULL;
struct ext2_dir_entry_2 * dir_de = NULL;
- struct page * old_page;
- void *old_page_addr;
+ struct folio * old_folio;
struct ext2_dir_entry_2 * old_de;
+ bool old_is_dir = S_ISDIR(old_inode->i_mode);
int err;
if (flags & ~RENAME_NOREPLACE)
@@ -341,52 +334,50 @@ static int ext2_rename (struct user_namespace * mnt_userns,
err = dquot_initialize(old_dir);
if (err)
- goto out;
+ return err;
err = dquot_initialize(new_dir);
if (err)
- goto out;
+ return err;
- old_de = ext2_find_entry(old_dir, &old_dentry->d_name, &old_page,
- &old_page_addr);
- if (IS_ERR(old_de)) {
- err = PTR_ERR(old_de);
- goto out;
- }
+ old_de = ext2_find_entry(old_dir, &old_dentry->d_name, &old_folio);
+ if (IS_ERR(old_de))
+ return PTR_ERR(old_de);
- if (S_ISDIR(old_inode->i_mode)) {
+ if (old_is_dir && old_dir != new_dir) {
err = -EIO;
- dir_de = ext2_dotdot(old_inode, &dir_page, &dir_page_addr);
+ dir_de = ext2_dotdot(old_inode, &dir_folio);
if (!dir_de)
goto out_old;
}
if (new_inode) {
- void *page_addr;
- struct page *new_page;
+ struct folio *new_folio;
struct ext2_dir_entry_2 *new_de;
err = -ENOTEMPTY;
- if (dir_de && !ext2_empty_dir (new_inode))
+ if (old_is_dir && !ext2_empty_dir(new_inode))
goto out_dir;
new_de = ext2_find_entry(new_dir, &new_dentry->d_name,
- &new_page, &page_addr);
+ &new_folio);
if (IS_ERR(new_de)) {
err = PTR_ERR(new_de);
goto out_dir;
}
- ext2_set_link(new_dir, new_de, new_page, page_addr, old_inode, 1);
- ext2_put_page(new_page, page_addr);
- new_inode->i_ctime = current_time(new_inode);
- if (dir_de)
+ err = ext2_set_link(new_dir, new_de, new_folio, old_inode, true);
+ folio_release_kmap(new_folio, new_de);
+ if (err)
+ goto out_dir;
+ inode_set_ctime_current(new_inode);
+ if (old_is_dir)
drop_nlink(new_inode);
inode_dec_link_count(new_inode);
} else {
err = ext2_add_link(new_dentry, old_inode);
if (err)
goto out_dir;
- if (dir_de)
+ if (old_is_dir)
inode_inc_link_count(new_dir);
}
@@ -394,29 +385,22 @@ static int ext2_rename (struct user_namespace * mnt_userns,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- old_inode->i_ctime = current_time(old_inode);
+ inode_set_ctime_current(old_inode);
mark_inode_dirty(old_inode);
- ext2_delete_entry(old_de, old_page, old_page_addr);
-
- if (dir_de) {
+ err = ext2_delete_entry(old_de, old_folio);
+ if (!err && old_is_dir) {
if (old_dir != new_dir)
- ext2_set_link(old_inode, dir_de, dir_page,
- dir_page_addr, new_dir, 0);
+ err = ext2_set_link(old_inode, dir_de, dir_folio,
+ new_dir, false);
- ext2_put_page(dir_page, dir_page_addr);
inode_dec_link_count(old_dir);
}
-
- ext2_put_page(old_page, old_page_addr);
- return 0;
-
out_dir:
if (dir_de)
- ext2_put_page(dir_page, dir_page_addr);
+ folio_release_kmap(dir_folio, dir_de);
out_old:
- ext2_put_page(old_page, old_page_addr);
-out:
+ folio_release_kmap(old_folio, old_de);
return err;
}
@@ -433,7 +417,7 @@ const struct inode_operations ext2_dir_inode_operations = {
.listxattr = ext2_listxattr,
.getattr = ext2_getattr,
.setattr = ext2_setattr,
- .get_acl = ext2_get_acl,
+ .get_inode_acl = ext2_get_acl,
.set_acl = ext2_set_acl,
.tmpfile = ext2_tmpfile,
.fileattr_get = ext2_fileattr_get,
@@ -444,6 +428,6 @@ const struct inode_operations ext2_special_inode_operations = {
.listxattr = ext2_listxattr,
.getattr = ext2_getattr,
.setattr = ext2_setattr,
- .get_acl = ext2_get_acl,
+ .get_inode_acl = ext2_get_acl,
.set_acl = ext2_set_acl,
};
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index d8d580b609ba..121e634c792a 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -23,7 +23,8 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
-#include <linux/parser.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/random.h>
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
@@ -40,7 +41,6 @@
#include "acl.h"
static void ext2_write_super(struct super_block *sb);
-static int ext2_remount (struct super_block * sb, int * flags, char * data);
static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
static int ext2_sync_fs(struct super_block *sb, int wait);
static int ext2_freeze(struct super_block *sb);
@@ -81,6 +81,33 @@ void ext2_error(struct super_block *sb, const char *function,
}
}
+static void ext2_msg_fc(struct fs_context *fc, const char *prefix,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ const char *s_id;
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ s_id = fc->root->d_sb->s_id;
+ } else {
+ /* get last path component of source */
+ s_id = strrchr(fc->source, '/');
+ if (s_id)
+ s_id++;
+ else
+ s_id = fc->source;
+ }
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk("%sEXT2-fs (%s): %pV\n", prefix, s_id, &vaf);
+
+ va_end(args);
+}
+
void ext2_msg(struct super_block *sb, const char *prefix,
const char *fmt, ...)
{
@@ -163,7 +190,7 @@ static void ext2_put_super (struct super_block * sb)
db_count = sbi->s_gdb_count;
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
- kfree(sbi->s_group_desc);
+ kvfree(sbi->s_group_desc);
kfree(sbi->s_debts);
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
@@ -171,7 +198,7 @@ static void ext2_put_super (struct super_block * sb)
brelse (sbi->s_sbh);
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
- fs_put_dax(sbi->s_daxdev);
+ fs_put_dax(sbi->s_daxdev, NULL);
kfree(sbi);
}
@@ -180,7 +207,7 @@ static struct kmem_cache * ext2_inode_cachep;
static struct inode *ext2_alloc_inode(struct super_block *sb)
{
struct ext2_inode_info *ei;
- ei = kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
+ ei = alloc_inode_sb(sb, ext2_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
ei->i_block_alloc_info = NULL;
@@ -213,8 +240,7 @@ static int __init init_inodecache(void)
{
ext2_inode_cachep = kmem_cache_create_usercopy("ext2_inode_cache",
sizeof(struct ext2_inode_info), 0,
- (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
- SLAB_ACCOUNT),
+ SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
offsetof(struct ext2_inode_info, i_data),
sizeof_field(struct ext2_inode_info, i_data),
init_once);
@@ -296,9 +322,6 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",noacl");
#endif
- if (test_opt(sb, NOBH))
- seq_puts(seq, ",nobh");
-
if (test_opt(sb, USRQUOTA))
seq_puts(seq, ",usrquota");
@@ -323,7 +346,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, siz
static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off);
static int ext2_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path);
-static struct dquot **ext2_get_dquots(struct inode *inode)
+static struct dquot __rcu **ext2_get_dquots(struct inode *inode)
{
return EXT2_I(inode)->i_dquot;
}
@@ -350,7 +373,6 @@ static const struct super_operations ext2_sops = {
.freeze_fs = ext2_freeze,
.unfreeze_fs = ext2_unfreeze,
.statfs = ext2_statfs,
- .remount_fs = ext2_remount,
.show_options = ext2_show_options,
#ifdef CONFIG_QUOTA
.quota_read = ext2_quota_read,
@@ -400,234 +422,224 @@ static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid,
}
static const struct export_operations ext2_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = ext2_fh_to_dentry,
.fh_to_parent = ext2_fh_to_parent,
.get_parent = ext2_get_parent,
};
-static unsigned long get_sb_block(void **data)
-{
- unsigned long sb_block;
- char *options = (char *) *data;
-
- if (!options || strncmp(options, "sb=", 3) != 0)
- return 1; /* Default location */
- options += 3;
- sb_block = simple_strtoul(options, &options, 0);
- if (*options && *options != ',') {
- printk("EXT2-fs: Invalid sb specification: %s\n",
- (char *) *data);
- return 1;
- }
- if (*options == ',')
- options++;
- *data = (void *) options;
- return sb_block;
-}
-
enum {
- Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
- Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic,
- Opt_err_ro, Opt_nouid32, Opt_debug,
- Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
- Opt_acl, Opt_noacl, Opt_xip, Opt_dax, Opt_ignore, Opt_err, Opt_quota,
- Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
+ Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid,
+ Opt_sb, Opt_errors, Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov,
+ Opt_nobh, Opt_user_xattr, Opt_acl, Opt_xip, Opt_dax, Opt_ignore,
+ Opt_quota, Opt_usrquota, Opt_grpquota, Opt_reservation,
+};
+
+static const struct constant_table ext2_param_errors[] = {
+ {"continue", EXT2_MOUNT_ERRORS_CONT},
+ {"panic", EXT2_MOUNT_ERRORS_PANIC},
+ {"remount-ro", EXT2_MOUNT_ERRORS_RO},
+ {}
};
-static const match_table_t tokens = {
- {Opt_bsd_df, "bsddf"},
- {Opt_minix_df, "minixdf"},
- {Opt_grpid, "grpid"},
- {Opt_grpid, "bsdgroups"},
- {Opt_nogrpid, "nogrpid"},
- {Opt_nogrpid, "sysvgroups"},
- {Opt_resgid, "resgid=%u"},
- {Opt_resuid, "resuid=%u"},
- {Opt_sb, "sb=%u"},
- {Opt_err_cont, "errors=continue"},
- {Opt_err_panic, "errors=panic"},
- {Opt_err_ro, "errors=remount-ro"},
- {Opt_nouid32, "nouid32"},
- {Opt_debug, "debug"},
- {Opt_oldalloc, "oldalloc"},
- {Opt_orlov, "orlov"},
- {Opt_nobh, "nobh"},
- {Opt_user_xattr, "user_xattr"},
- {Opt_nouser_xattr, "nouser_xattr"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_xip, "xip"},
- {Opt_dax, "dax"},
- {Opt_grpquota, "grpquota"},
- {Opt_ignore, "noquota"},
- {Opt_quota, "quota"},
- {Opt_usrquota, "usrquota"},
- {Opt_reservation, "reservation"},
- {Opt_noreservation, "noreservation"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec ext2_param_spec[] = {
+ fsparam_flag ("bsddf", Opt_bsd_df),
+ fsparam_flag ("minixdf", Opt_minix_df),
+ fsparam_flag ("grpid", Opt_grpid),
+ fsparam_flag ("bsdgroups", Opt_grpid),
+ fsparam_flag ("nogrpid", Opt_nogrpid),
+ fsparam_flag ("sysvgroups", Opt_nogrpid),
+ fsparam_gid ("resgid", Opt_resgid),
+ fsparam_uid ("resuid", Opt_resuid),
+ fsparam_u32 ("sb", Opt_sb),
+ fsparam_enum ("errors", Opt_errors, ext2_param_errors),
+ fsparam_flag ("nouid32", Opt_nouid32),
+ fsparam_flag ("debug", Opt_debug),
+ fsparam_flag ("oldalloc", Opt_oldalloc),
+ fsparam_flag ("orlov", Opt_orlov),
+ fsparam_flag ("nobh", Opt_nobh),
+ fsparam_flag_no ("user_xattr", Opt_user_xattr),
+ fsparam_flag_no ("acl", Opt_acl),
+ fsparam_flag ("xip", Opt_xip),
+ fsparam_flag ("dax", Opt_dax),
+ fsparam_flag ("grpquota", Opt_grpquota),
+ fsparam_flag ("noquota", Opt_ignore),
+ fsparam_flag ("quota", Opt_quota),
+ fsparam_flag ("usrquota", Opt_usrquota),
+ fsparam_flag_no ("reservation", Opt_reservation),
+ {}
};
-static int parse_options(char *options, struct super_block *sb,
- struct ext2_mount_options *opts)
+#define EXT2_SPEC_s_resuid (1 << 0)
+#define EXT2_SPEC_s_resgid (1 << 1)
+
+struct ext2_fs_context {
+ unsigned long vals_s_flags; /* Bits to set in s_flags */
+ unsigned long mask_s_flags; /* Bits changed in s_flags */
+ unsigned int vals_s_mount_opt;
+ unsigned int mask_s_mount_opt;
+ kuid_t s_resuid;
+ kgid_t s_resgid;
+ unsigned long s_sb_block;
+ unsigned int spec;
+
+};
+
+static inline void ctx_set_mount_opt(struct ext2_fs_context *ctx,
+ unsigned long flag)
{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
- kuid_t uid;
- kgid_t gid;
-
- if (!options)
- return 1;
-
- while ((p = strsep (&options, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_bsd_df:
- clear_opt (opts->s_mount_opt, MINIX_DF);
- break;
- case Opt_minix_df:
- set_opt (opts->s_mount_opt, MINIX_DF);
- break;
- case Opt_grpid:
- set_opt (opts->s_mount_opt, GRPID);
- break;
- case Opt_nogrpid:
- clear_opt (opts->s_mount_opt, GRPID);
- break;
- case Opt_resuid:
- if (match_int(&args[0], &option))
- return 0;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid)) {
- ext2_msg(sb, KERN_ERR, "Invalid uid value %d", option);
- return 0;
-
- }
- opts->s_resuid = uid;
- break;
- case Opt_resgid:
- if (match_int(&args[0], &option))
- return 0;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid)) {
- ext2_msg(sb, KERN_ERR, "Invalid gid value %d", option);
- return 0;
- }
- opts->s_resgid = gid;
- break;
- case Opt_sb:
- /* handled by get_sb_block() instead of here */
- /* *sb_block = match_int(&args[0]); */
- break;
- case Opt_err_panic:
- clear_opt (opts->s_mount_opt, ERRORS_CONT);
- clear_opt (opts->s_mount_opt, ERRORS_RO);
- set_opt (opts->s_mount_opt, ERRORS_PANIC);
- break;
- case Opt_err_ro:
- clear_opt (opts->s_mount_opt, ERRORS_CONT);
- clear_opt (opts->s_mount_opt, ERRORS_PANIC);
- set_opt (opts->s_mount_opt, ERRORS_RO);
- break;
- case Opt_err_cont:
- clear_opt (opts->s_mount_opt, ERRORS_RO);
- clear_opt (opts->s_mount_opt, ERRORS_PANIC);
- set_opt (opts->s_mount_opt, ERRORS_CONT);
- break;
- case Opt_nouid32:
- set_opt (opts->s_mount_opt, NO_UID32);
- break;
- case Opt_debug:
- set_opt (opts->s_mount_opt, DEBUG);
- break;
- case Opt_oldalloc:
- set_opt (opts->s_mount_opt, OLDALLOC);
- break;
- case Opt_orlov:
- clear_opt (opts->s_mount_opt, OLDALLOC);
- break;
- case Opt_nobh:
- set_opt (opts->s_mount_opt, NOBH);
- break;
+ ctx->mask_s_mount_opt |= flag;
+ ctx->vals_s_mount_opt |= flag;
+}
+
+static inline void ctx_clear_mount_opt(struct ext2_fs_context *ctx,
+ unsigned long flag)
+{
+ ctx->mask_s_mount_opt |= flag;
+ ctx->vals_s_mount_opt &= ~flag;
+}
+
+static inline unsigned long
+ctx_test_mount_opt(struct ext2_fs_context *ctx, unsigned long flag)
+{
+ return (ctx->vals_s_mount_opt & flag);
+}
+
+static inline bool
+ctx_parsed_mount_opt(struct ext2_fs_context *ctx, unsigned long flag)
+{
+ return (ctx->mask_s_mount_opt & flag);
+}
+
+static void ext2_free_fc(struct fs_context *fc)
+{
+ kfree(fc->fs_private);
+}
+
+static int ext2_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct ext2_fs_context *ctx = fc->fs_private;
+ int opt;
+ struct fs_parse_result result;
+
+ opt = fs_parse(fc, ext2_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_bsd_df:
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_MINIX_DF);
+ break;
+ case Opt_minix_df:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_MINIX_DF);
+ break;
+ case Opt_grpid:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_GRPID);
+ break;
+ case Opt_nogrpid:
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_GRPID);
+ break;
+ case Opt_resuid:
+ ctx->s_resuid = result.uid;
+ ctx->spec |= EXT2_SPEC_s_resuid;
+ break;
+ case Opt_resgid:
+ ctx->s_resgid = result.gid;
+ ctx->spec |= EXT2_SPEC_s_resgid;
+ break;
+ case Opt_sb:
+ /* Note that this is silently ignored on remount */
+ ctx->s_sb_block = result.uint_32;
+ break;
+ case Opt_errors:
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_ERRORS_MASK);
+ ctx_set_mount_opt(ctx, result.uint_32);
+ break;
+ case Opt_nouid32:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_NO_UID32);
+ break;
+ case Opt_debug:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_DEBUG);
+ break;
+ case Opt_oldalloc:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_OLDALLOC);
+ break;
+ case Opt_orlov:
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_OLDALLOC);
+ break;
+ case Opt_nobh:
+ ext2_msg_fc(fc, KERN_INFO, "nobh option not supported\n");
+ break;
#ifdef CONFIG_EXT2_FS_XATTR
- case Opt_user_xattr:
- set_opt (opts->s_mount_opt, XATTR_USER);
- break;
- case Opt_nouser_xattr:
- clear_opt (opts->s_mount_opt, XATTR_USER);
- break;
+ case Opt_user_xattr:
+ if (!result.negated)
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_XATTR_USER);
+ else
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_XATTR_USER);
+ break;
#else
- case Opt_user_xattr:
- case Opt_nouser_xattr:
- ext2_msg(sb, KERN_INFO, "(no)user_xattr options"
- "not supported");
- break;
+ case Opt_user_xattr:
+ ext2_msg_fc(fc, KERN_INFO, "(no)user_xattr options not supported");
+ break;
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
- case Opt_acl:
- set_opt(opts->s_mount_opt, POSIX_ACL);
- break;
- case Opt_noacl:
- clear_opt(opts->s_mount_opt, POSIX_ACL);
- break;
+ case Opt_acl:
+ if (!result.negated)
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_POSIX_ACL);
+ else
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_POSIX_ACL);
+ break;
#else
- case Opt_acl:
- case Opt_noacl:
- ext2_msg(sb, KERN_INFO,
- "(no)acl options not supported");
- break;
+ case Opt_acl:
+ ext2_msg_fc(fc, KERN_INFO, "(no)acl options not supported");
+ break;
#endif
- case Opt_xip:
- ext2_msg(sb, KERN_INFO, "use dax instead of xip");
- set_opt(opts->s_mount_opt, XIP);
- fallthrough;
- case Opt_dax:
+ case Opt_xip:
+ ext2_msg_fc(fc, KERN_INFO, "use dax instead of xip");
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_XIP);
+ fallthrough;
+ case Opt_dax:
#ifdef CONFIG_FS_DAX
- ext2_msg(sb, KERN_WARNING,
- "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
- set_opt(opts->s_mount_opt, DAX);
+ ext2_msg_fc(fc, KERN_WARNING,
+ "DAX enabled. Warning: DAX support in ext2 driver is deprecated"
+ " and will be removed at the end of 2025. Please use ext4 driver instead.");
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_DAX);
#else
- ext2_msg(sb, KERN_INFO, "dax option not supported");
+ ext2_msg_fc(fc, KERN_INFO, "dax option not supported");
#endif
- break;
+ break;
#if defined(CONFIG_QUOTA)
- case Opt_quota:
- case Opt_usrquota:
- set_opt(opts->s_mount_opt, USRQUOTA);
- break;
-
- case Opt_grpquota:
- set_opt(opts->s_mount_opt, GRPQUOTA);
- break;
+ case Opt_quota:
+ case Opt_usrquota:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_USRQUOTA);
+ break;
+
+ case Opt_grpquota:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_GRPQUOTA);
+ break;
#else
- case Opt_quota:
- case Opt_usrquota:
- case Opt_grpquota:
- ext2_msg(sb, KERN_INFO,
- "quota operations not supported");
- break;
+ case Opt_quota:
+ case Opt_usrquota:
+ case Opt_grpquota:
+ ext2_msg_fc(fc, KERN_INFO, "quota operations not supported");
+ break;
#endif
-
- case Opt_reservation:
- set_opt(opts->s_mount_opt, RESERVATION);
- ext2_msg(sb, KERN_INFO, "reservations ON");
- break;
- case Opt_noreservation:
- clear_opt(opts->s_mount_opt, RESERVATION);
- ext2_msg(sb, KERN_INFO, "reservations OFF");
- break;
- case Opt_ignore:
- break;
- default:
- return 0;
+ case Opt_reservation:
+ if (!result.negated) {
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_RESERVATION);
+ ext2_msg_fc(fc, KERN_INFO, "reservations ON");
+ } else {
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_RESERVATION);
+ ext2_msg_fc(fc, KERN_INFO, "reservations OFF");
}
+ break;
+ case Opt_ignore:
+ break;
+ default:
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static int ext2_setup_super (struct super_block * sb,
@@ -670,10 +682,9 @@ static int ext2_setup_super (struct super_block * sb,
es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1);
if (test_opt (sb, DEBUG))
- ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
+ ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, gc=%lu, "
"bpg=%lu, ipg=%lu, mo=%04lx]",
EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize,
- sbi->s_frag_size,
sbi->s_groups_count,
EXT2_BLOCKS_PER_GROUP(sb),
EXT2_INODES_PER_GROUP(sb),
@@ -753,8 +764,12 @@ static loff_t ext2_max_size(int bits)
res += 1LL << (bits-2);
res += 1LL << (2*(bits-2));
res += 1LL << (3*(bits-2));
+ /* Compute how many metadata blocks are needed */
+ meta_blocks = 1;
+ meta_blocks += 1 + ppb;
+ meta_blocks += 1 + ppb + ppb * ppb;
/* Does block tree limit file size? */
- if (res < upper_limit)
+ if (res + meta_blocks <= upper_limit)
goto check_lfs;
res = upper_limit;
@@ -800,39 +815,98 @@ static unsigned long descriptor_loc(struct super_block *sb,
return ext2_group_first_block_no(sb, bg) + ext2_bg_has_super(sb, bg);
}
-static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+/*
+ * Set all mount options either from defaults on disk, or from parsed
+ * options. Parsed/specified options override on-disk defaults.
+ */
+static void ext2_set_options(struct fs_context *fc, struct ext2_sb_info *sbi)
{
- struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
+ struct ext2_fs_context *ctx = fc->fs_private;
+ struct ext2_super_block *es = sbi->s_es;
+ unsigned long def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
+
+ /* Copy parsed mount options to sbi */
+ sbi->s_mount_opt = ctx->vals_s_mount_opt;
+
+ /* Use in-superblock defaults only if not specified during parsing */
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_DEBUG) &&
+ def_mount_opts & EXT2_DEFM_DEBUG)
+ set_opt(sbi->s_mount_opt, DEBUG);
+
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_GRPID) &&
+ def_mount_opts & EXT2_DEFM_BSDGROUPS)
+ set_opt(sbi->s_mount_opt, GRPID);
+
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_NO_UID32) &&
+ def_mount_opts & EXT2_DEFM_UID16)
+ set_opt(sbi->s_mount_opt, NO_UID32);
+
+#ifdef CONFIG_EXT2_FS_XATTR
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_XATTR_USER) &&
+ def_mount_opts & EXT2_DEFM_XATTR_USER)
+ set_opt(sbi->s_mount_opt, XATTR_USER);
+#endif
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_POSIX_ACL) &&
+ def_mount_opts & EXT2_DEFM_ACL)
+ set_opt(sbi->s_mount_opt, POSIX_ACL);
+#endif
+
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_ERRORS_MASK)) {
+ if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
+ set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+ else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
+ set_opt(sbi->s_mount_opt, ERRORS_CONT);
+ else
+ set_opt(sbi->s_mount_opt, ERRORS_RO);
+ }
+
+ if (ctx->spec & EXT2_SPEC_s_resuid)
+ sbi->s_resuid = ctx->s_resuid;
+ else
+ sbi->s_resuid = make_kuid(&init_user_ns,
+ le16_to_cpu(es->s_def_resuid));
+
+ if (ctx->spec & EXT2_SPEC_s_resgid)
+ sbi->s_resgid = ctx->s_resgid;
+ else
+ sbi->s_resgid = make_kgid(&init_user_ns,
+ le16_to_cpu(es->s_def_resgid));
+}
+
+static int ext2_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ struct ext2_fs_context *ctx = fc->fs_private;
+ int silent = fc->sb_flags & SB_SILENT;
struct buffer_head * bh;
struct ext2_sb_info * sbi;
struct ext2_super_block * es;
struct inode *root;
unsigned long block;
- unsigned long sb_block = get_sb_block(&data);
+ unsigned long sb_block = ctx->s_sb_block;
unsigned long logic_sb_block;
unsigned long offset = 0;
- unsigned long def_mount_opts;
long ret = -ENOMEM;
int blocksize = BLOCK_SIZE;
int db_count;
int i, j;
__le32 features;
int err;
- struct ext2_mount_options opts;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
- goto failed;
+ return -ENOMEM;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
if (!sbi->s_blockgroup_lock) {
kfree(sbi);
- goto failed;
+ return -ENOMEM;
}
sb->s_fs_info = sbi;
sbi->s_sb_block = sb_block;
- sbi->s_daxdev = dax_dev;
+ sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off,
+ NULL, NULL);
spin_lock_init(&sbi->s_lock);
ret = -EINVAL;
@@ -876,42 +950,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_magic != EXT2_SUPER_MAGIC)
goto cantfind_ext2;
- opts.s_mount_opt = 0;
- /* Set defaults before we parse the mount options */
- def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
- if (def_mount_opts & EXT2_DEFM_DEBUG)
- set_opt(opts.s_mount_opt, DEBUG);
- if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
- set_opt(opts.s_mount_opt, GRPID);
- if (def_mount_opts & EXT2_DEFM_UID16)
- set_opt(opts.s_mount_opt, NO_UID32);
-#ifdef CONFIG_EXT2_FS_XATTR
- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
- set_opt(opts.s_mount_opt, XATTR_USER);
-#endif
-#ifdef CONFIG_EXT2_FS_POSIX_ACL
- if (def_mount_opts & EXT2_DEFM_ACL)
- set_opt(opts.s_mount_opt, POSIX_ACL);
-#endif
-
- if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
- set_opt(opts.s_mount_opt, ERRORS_PANIC);
- else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
- set_opt(opts.s_mount_opt, ERRORS_CONT);
- else
- set_opt(opts.s_mount_opt, ERRORS_RO);
-
- opts.s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
- opts.s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
-
- set_opt(opts.s_mount_opt, RESERVATION);
-
- if (!parse_options((char *) data, sb, &opts))
- goto failed_mount;
-
- sbi->s_mount_opt = opts.s_mount_opt;
- sbi->s_resuid = opts.s_resuid;
- sbi->s_resgid = opts.s_resgid;
+ ext2_set_options(fc, sbi);
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
@@ -943,14 +982,23 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
+ if (le32_to_cpu(es->s_log_block_size) >
+ (EXT2_MAX_BLOCK_LOG_SIZE - BLOCK_SIZE_BITS)) {
+ ext2_msg(sb, KERN_ERR,
+ "Invalid log block size: %u",
+ le32_to_cpu(es->s_log_block_size));
+ goto failed_mount;
+ }
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (test_opt(sb, DAX)) {
- if (!dax_supported(dax_dev, sb->s_bdev, blocksize, 0,
- bdev_nr_sectors(sb->s_bdev))) {
+ if (!sbi->s_daxdev) {
ext2_msg(sb, KERN_ERR,
"DAX unsupported by block device. Turning off DAX.");
clear_opt(sbi->s_mount_opt, DAX);
+ } else if (blocksize != PAGE_SIZE) {
+ ext2_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n");
+ clear_opt(sbi->s_mount_opt, DAX);
}
}
@@ -1001,14 +1049,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
}
}
- sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
- le32_to_cpu(es->s_log_frag_size);
- if (sbi->s_frag_size == 0)
- goto cantfind_ext2;
- sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
-
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
- sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
@@ -1034,11 +1075,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
- if (sb->s_blocksize != sbi->s_frag_size) {
+ if (es->s_log_frag_size != es->s_log_block_size) {
ext2_msg(sb, KERN_ERR,
- "error: fragsize %lu != blocksize %lu"
- "(not supported yet)",
- sbi->s_frag_size, sb->s_blocksize);
+ "error: fragsize log %u != blocksize log %u",
+ le32_to_cpu(es->s_log_frag_size), sb->s_blocksize_bits);
goto failed_mount;
}
@@ -1048,27 +1088,41 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_blocks_per_group);
goto failed_mount;
}
- if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
+ /* At least inode table, bitmaps, and sb have to fit in one group */
+ if (sbi->s_blocks_per_group <= sbi->s_itb_per_group + 3) {
ext2_msg(sb, KERN_ERR,
- "error: #fragments per group too big: %lu",
- sbi->s_frags_per_group);
+ "error: #blocks per group smaller than metadata size: %lu <= %lu",
+ sbi->s_blocks_per_group, sbi->s_inodes_per_group + 3);
goto failed_mount;
}
- if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
+ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+ sbi->s_inodes_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
- "error: #inodes per group too big: %lu",
+ "error: invalid #inodes per group: %lu",
sbi->s_inodes_per_group);
goto failed_mount;
}
+ if (sb_bdev_nr_blocks(sb) < le32_to_cpu(es->s_blocks_count)) {
+ ext2_msg(sb, KERN_ERR,
+ "bad geometry: block count %u exceeds size of device (%u blocks)",
+ le32_to_cpu(es->s_blocks_count),
+ (unsigned)sb_bdev_nr_blocks(sb));
+ goto failed_mount;
+ }
- if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
- goto cantfind_ext2;
sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
le32_to_cpu(es->s_first_data_block) - 1)
/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
+ if ((u64)sbi->s_groups_count * sbi->s_inodes_per_group !=
+ le32_to_cpu(es->s_inodes_count)) {
+ ext2_msg(sb, KERN_ERR, "error: invalid #inodes: %u vs computed %llu",
+ le32_to_cpu(es->s_inodes_count),
+ (u64)sbi->s_groups_count * sbi->s_inodes_per_group);
+ goto failed_mount;
+ }
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb);
- sbi->s_group_desc = kmalloc_array(db_count,
+ sbi->s_group_desc = kvmalloc_array(db_count,
sizeof(struct buffer_head *),
GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
@@ -1194,16 +1248,15 @@ failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
- kfree(sbi->s_group_desc);
+ kvfree(sbi->s_group_desc);
kfree(sbi->s_debts);
failed_mount:
brelse(bh);
failed_sbi:
+ fs_put_dax(sbi->s_daxdev, NULL);
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
-failed:
- fs_put_dax(dax_dev);
return ret;
}
@@ -1309,23 +1362,21 @@ static void ext2_write_super(struct super_block *sb)
ext2_sync_fs(sb, 1);
}
-static int ext2_remount (struct super_block * sb, int * flags, char * data)
+static int ext2_reconfigure(struct fs_context *fc)
{
+ struct ext2_fs_context *ctx = fc->fs_private;
+ struct super_block *sb = fc->root->d_sb;
struct ext2_sb_info * sbi = EXT2_SB(sb);
struct ext2_super_block * es;
struct ext2_mount_options new_opts;
+ int flags = fc->sb_flags;
int err;
sync_filesystem(sb);
- spin_lock(&sbi->s_lock);
- new_opts.s_mount_opt = sbi->s_mount_opt;
- new_opts.s_resuid = sbi->s_resuid;
- new_opts.s_resgid = sbi->s_resgid;
- spin_unlock(&sbi->s_lock);
-
- if (!parse_options(data, sb, &new_opts))
- return -EINVAL;
+ new_opts.s_mount_opt = ctx->vals_s_mount_opt;
+ new_opts.s_resuid = ctx->s_resuid;
+ new_opts.s_resgid = ctx->s_resgid;
spin_lock(&sbi->s_lock);
es = sbi->s_es;
@@ -1334,9 +1385,9 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
"dax flag with busy inodes while remounting");
new_opts.s_mount_opt ^= EXT2_MOUNT_DAX;
}
- if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
+ if ((bool)(flags & SB_RDONLY) == sb_rdonly(sb))
goto out_set;
- if (*flags & SB_RDONLY) {
+ if (flags & SB_RDONLY) {
if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
!(sbi->s_mount_state & EXT2_VALID_FS))
goto out_set;
@@ -1455,10 +1506,9 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
return 0;
}
-static struct dentry *ext2_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ext2_get_tree(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, ext2_fill_super);
+ return get_tree_bdev(fc, ext2_fill_super);
}
#ifdef CONFIG_QUOTA
@@ -1486,8 +1536,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
len = i_size-off;
toread = len;
while (toread > 0) {
- tocopy = sb->s_blocksize - offset < toread ?
- sb->s_blocksize - offset : toread;
+ tocopy = min_t(size_t, sb->s_blocksize - offset, toread);
tmp_bh.b_state = 0;
tmp_bh.b_size = sb->s_blocksize;
@@ -1525,8 +1574,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
struct buffer_head *bh;
while (towrite > 0) {
- tocopy = sb->s_blocksize - offset < towrite ?
- sb->s_blocksize - offset : towrite;
+ tocopy = min_t(size_t, sb->s_blocksize - offset, towrite);
tmp_bh.b_state = 0;
tmp_bh.b_size = sb->s_blocksize;
@@ -1543,7 +1591,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, tocopy);
- flush_dcache_page(bh->b_page);
+ flush_dcache_folio(bh->b_folio);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unlock_buffer(bh);
@@ -1559,7 +1607,7 @@ out:
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
return len - towrite;
}
@@ -1611,12 +1659,49 @@ out:
#endif
+static const struct fs_context_operations ext2_context_ops = {
+ .parse_param = ext2_parse_param,
+ .get_tree = ext2_get_tree,
+ .reconfigure = ext2_reconfigure,
+ .free = ext2_free_fc,
+};
+
+static int ext2_init_fs_context(struct fs_context *fc)
+{
+ struct ext2_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ struct super_block *sb = fc->root->d_sb;
+ struct ext2_sb_info *sbi = EXT2_SB(sb);
+
+ spin_lock(&sbi->s_lock);
+ ctx->vals_s_mount_opt = sbi->s_mount_opt;
+ ctx->vals_s_flags = sb->s_flags;
+ ctx->s_resuid = sbi->s_resuid;
+ ctx->s_resgid = sbi->s_resgid;
+ spin_unlock(&sbi->s_lock);
+ } else {
+ ctx->s_sb_block = 1;
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_RESERVATION);
+ }
+
+ fc->fs_private = ctx;
+ fc->ops = &ext2_context_ops;
+
+ return 0;
+}
+
static struct file_system_type ext2_fs_type = {
.owner = THIS_MODULE,
.name = "ext2",
- .mount = ext2_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = ext2_init_fs_context,
+ .parameters = ext2_param_spec,
};
MODULE_ALIAS_FS("ext2");
@@ -1627,7 +1712,7 @@ static int __init init_ext2_fs(void)
err = init_inodecache();
if (err)
return err;
- err = register_filesystem(&ext2_fs_type);
+ err = register_filesystem(&ext2_fs_type);
if (err)
goto out;
return 0;
diff --git a/fs/ext2/trace.c b/fs/ext2/trace.c
new file mode 100644
index 000000000000..b01cdf6526fd
--- /dev/null
+++ b/fs/ext2/trace.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "ext2.h"
+#include <linux/uio.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/fs/ext2/trace.h b/fs/ext2/trace.h
new file mode 100644
index 000000000000..7d230e13576e
--- /dev/null
+++ b/fs/ext2/trace.h
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ext2
+
+#if !defined(_EXT2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _EXT2_TRACE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(ext2_dio_class,
+ TP_PROTO(struct kiocb *iocb, struct iov_iter *iter, ssize_t ret),
+ TP_ARGS(iocb, iter, ret),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, isize)
+ __field(loff_t, pos)
+ __field(size_t, count)
+ __field(int, ki_flags)
+ __field(bool, aio)
+ __field(ssize_t, ret)
+ ),
+ TP_fast_assign(
+ __entry->dev = file_inode(iocb->ki_filp)->i_sb->s_dev;
+ __entry->ino = file_inode(iocb->ki_filp)->i_ino;
+ __entry->isize = file_inode(iocb->ki_filp)->i_size;
+ __entry->pos = iocb->ki_pos;
+ __entry->count = iov_iter_count(iter);
+ __entry->ki_flags = iocb->ki_flags;
+ __entry->aio = !is_sync_kiocb(iocb);
+ __entry->ret = ret;
+ ),
+ TP_printk("dev %d:%d ino 0x%lx isize 0x%llx pos 0x%llx len %zu flags %s aio %d ret %zd",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->isize,
+ __entry->pos,
+ __entry->count,
+ __print_flags(__entry->ki_flags, "|", TRACE_IOCB_STRINGS),
+ __entry->aio,
+ __entry->ret)
+);
+
+#define DEFINE_DIO_RW_EVENT(name) \
+DEFINE_EVENT(ext2_dio_class, name, \
+ TP_PROTO(struct kiocb *iocb, struct iov_iter *iter, ssize_t ret), \
+ TP_ARGS(iocb, iter, ret))
+DEFINE_DIO_RW_EVENT(ext2_dio_write_begin);
+DEFINE_DIO_RW_EVENT(ext2_dio_write_end);
+DEFINE_DIO_RW_EVENT(ext2_dio_write_buff_end);
+DEFINE_DIO_RW_EVENT(ext2_dio_read_begin);
+DEFINE_DIO_RW_EVENT(ext2_dio_read_end);
+
+TRACE_EVENT(ext2_dio_write_endio,
+ TP_PROTO(struct kiocb *iocb, ssize_t size, int ret),
+ TP_ARGS(iocb, size, ret),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, isize)
+ __field(loff_t, pos)
+ __field(ssize_t, size)
+ __field(int, ki_flags)
+ __field(bool, aio)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->dev = file_inode(iocb->ki_filp)->i_sb->s_dev;
+ __entry->ino = file_inode(iocb->ki_filp)->i_ino;
+ __entry->isize = file_inode(iocb->ki_filp)->i_size;
+ __entry->pos = iocb->ki_pos;
+ __entry->size = size;
+ __entry->ki_flags = iocb->ki_flags;
+ __entry->aio = !is_sync_kiocb(iocb);
+ __entry->ret = ret;
+ ),
+ TP_printk("dev %d:%d ino 0x%lx isize 0x%llx pos 0x%llx len %zd flags %s aio %d ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->isize,
+ __entry->pos,
+ __entry->size,
+ __print_flags(__entry->ki_flags, "|", TRACE_IOCB_STRINGS),
+ __entry->aio,
+ __entry->ret)
+);
+
+#endif /* _EXT2_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 841fa6d9d744..c885dcc3bd0d 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -98,11 +98,11 @@ static struct buffer_head *ext2_xattr_cache_find(struct inode *,
static void ext2_xattr_rehash(struct ext2_xattr_header *,
struct ext2_xattr_entry *);
-static const struct xattr_handler *ext2_xattr_handler_map[] = {
+static const struct xattr_handler * const ext2_xattr_handler_map[] = {
[EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler,
#ifdef CONFIG_EXT2_FS_POSIX_ACL
- [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
- [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
+ [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
+ [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &nop_posix_acl_default,
#endif
[EXT2_XATTR_INDEX_TRUSTED] = &ext2_xattr_trusted_handler,
#ifdef CONFIG_EXT2_FS_SECURITY
@@ -110,13 +110,9 @@ static const struct xattr_handler *ext2_xattr_handler_map[] = {
#endif
};
-const struct xattr_handler *ext2_xattr_handlers[] = {
+const struct xattr_handler * const ext2_xattr_handlers[] = {
&ext2_xattr_user_handler,
&ext2_xattr_trusted_handler,
-#ifdef CONFIG_EXT2_FS_POSIX_ACL
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
-#endif
#ifdef CONFIG_EXT2_FS_SECURITY
&ext2_xattr_security_handler,
#endif
@@ -125,14 +121,18 @@ const struct xattr_handler *ext2_xattr_handlers[] = {
#define EA_BLOCK_CACHE(inode) (EXT2_SB(inode->i_sb)->s_ea_block_cache)
-static inline const struct xattr_handler *
-ext2_xattr_handler(int name_index)
+static inline const char *ext2_xattr_prefix(int name_index,
+ struct dentry *dentry)
{
const struct xattr_handler *handler = NULL;
if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map))
handler = ext2_xattr_handler_map[name_index];
- return handler;
+
+ if (!xattr_handler_can_list(handler, dentry))
+ return NULL;
+
+ return xattr_prefix(handler);
}
static bool
@@ -333,11 +333,10 @@ bad_block:
/* list the attribute names */
for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
entry = EXT2_XATTR_NEXT(entry)) {
- const struct xattr_handler *handler =
- ext2_xattr_handler(entry->e_name_index);
+ const char *prefix;
- if (handler && (!handler->list || handler->list(dentry))) {
- const char *prefix = handler->prefix ?: handler->name;
+ prefix = ext2_xattr_prefix(entry->e_name_index, dentry);
+ if (prefix) {
size_t prefix_len = strlen(prefix);
size_t size = prefix_len + entry->e_name_len + 1;
@@ -517,48 +516,48 @@ bad_block:
/* Here we know that we can set the new attribute. */
if (header) {
- /* assert(header == HDR(bh)); */
+ int offset;
+
lock_buffer(bh);
if (header->h_refcount == cpu_to_le32(1)) {
__u32 hash = le32_to_cpu(header->h_hash);
+ struct mb_cache_entry *oe;
- ea_bdebug(bh, "modifying in-place");
+ oe = mb_cache_entry_delete_or_get(EA_BLOCK_CACHE(inode),
+ hash, bh->b_blocknr);
+ if (!oe) {
+ ea_bdebug(bh, "modifying in-place");
+ goto update_block;
+ }
/*
- * This must happen under buffer lock for
- * ext2_xattr_set2() to reliably detect modified block
+ * Someone is trying to reuse the block, leave it alone
*/
- mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
- bh->b_blocknr);
-
- /* keep the buffer locked while modifying it. */
- } else {
- int offset;
-
- unlock_buffer(bh);
- ea_bdebug(bh, "cloning");
- header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL);
- error = -ENOMEM;
- if (header == NULL)
- goto cleanup;
- header->h_refcount = cpu_to_le32(1);
-
- offset = (char *)here - bh->b_data;
- here = ENTRY((char *)header + offset);
- offset = (char *)last - bh->b_data;
- last = ENTRY((char *)header + offset);
+ mb_cache_entry_put(EA_BLOCK_CACHE(inode), oe);
}
+ unlock_buffer(bh);
+ ea_bdebug(bh, "cloning");
+ header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL);
+ error = -ENOMEM;
+ if (header == NULL)
+ goto cleanup;
+ header->h_refcount = cpu_to_le32(1);
+
+ offset = (char *)here - bh->b_data;
+ here = ENTRY((char *)header + offset);
+ offset = (char *)last - bh->b_data;
+ last = ENTRY((char *)header + offset);
} else {
/* Allocate a buffer where we construct the new block. */
header = kzalloc(sb->s_blocksize, GFP_KERNEL);
error = -ENOMEM;
if (header == NULL)
goto cleanup;
- end = (char *)header + sb->s_blocksize;
header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);
header->h_blocks = header->h_refcount = cpu_to_le32(1);
last = here = ENTRY(header+1);
}
+update_block:
/* Iff we are modifying the block in-place, bh is locked here. */
if (not_found) {
@@ -651,6 +650,55 @@ cleanup:
return error;
}
+static void ext2_xattr_release_block(struct inode *inode,
+ struct buffer_head *bh)
+{
+ struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
+
+retry_ref:
+ lock_buffer(bh);
+ if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
+ __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
+ struct mb_cache_entry *oe;
+
+ /*
+ * This must happen under buffer lock to properly
+ * serialize with ext2_xattr_set() reusing the block.
+ */
+ oe = mb_cache_entry_delete_or_get(ea_block_cache, hash,
+ bh->b_blocknr);
+ if (oe) {
+ /*
+ * Someone is trying to reuse the block. Wait
+ * and retry.
+ */
+ unlock_buffer(bh);
+ mb_cache_entry_wait_unused(oe);
+ mb_cache_entry_put(ea_block_cache, oe);
+ goto retry_ref;
+ }
+
+ /* Free the old block. */
+ ea_bdebug(bh, "freeing");
+ ext2_free_blocks(inode, bh->b_blocknr, 1);
+ /* We let our caller release bh, so we
+ * need to duplicate the buffer before. */
+ get_bh(bh);
+ bforget(bh);
+ unlock_buffer(bh);
+ } else {
+ /* Decrement the refcount only. */
+ le32_add_cpu(&HDR(bh)->h_refcount, -1);
+ dquot_free_block(inode, 1);
+ mark_buffer_dirty(bh);
+ unlock_buffer(bh);
+ ea_bdebug(bh, "refcount now=%d",
+ le32_to_cpu(HDR(bh)->h_refcount));
+ if (IS_SYNC(inode))
+ sync_dirty_buffer(bh);
+ }
+}
+
/*
* Second half of ext2_xattr_set(): Update the file system.
*/
@@ -694,10 +742,13 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
/* We need to allocate a new block */
ext2_fsblk_t goal = ext2_group_first_block_no(sb,
EXT2_I(inode)->i_block_group);
- int block = ext2_new_block(inode, goal, &error);
+ unsigned long count = 1;
+ ext2_fsblk_t block = ext2_new_blocks(inode, goal,
+ &count, &error,
+ EXT2_ALLOC_NORESERVE);
if (error)
goto cleanup;
- ea_idebug(inode, "creating block %d", block);
+ ea_idebug(inode, "creating block %lu", block);
new_bh = sb_getblk(sb, block);
if (unlikely(!new_bh)) {
@@ -725,7 +776,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
/* Update the inode. */
EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
if (IS_SYNC(inode)) {
error = sync_inode_metadata(inode, 1);
/* In case sync failed due to ENOSPC the inode was actually
@@ -747,34 +798,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
* If there was an old block and we are no longer using it,
* release the old block.
*/
- lock_buffer(old_bh);
- if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
- __u32 hash = le32_to_cpu(HDR(old_bh)->h_hash);
-
- /*
- * This must happen under buffer lock for
- * ext2_xattr_set2() to reliably detect freed block
- */
- mb_cache_entry_delete(ea_block_cache, hash,
- old_bh->b_blocknr);
- /* Free the old block. */
- ea_bdebug(old_bh, "freeing");
- ext2_free_blocks(inode, old_bh->b_blocknr, 1);
- mark_inode_dirty(inode);
- /* We let our caller release old_bh, so we
- * need to duplicate the buffer before. */
- get_bh(old_bh);
- bforget(old_bh);
- } else {
- /* Decrement the refcount only. */
- le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
- dquot_free_block_nodirty(inode, 1);
- mark_inode_dirty(inode);
- mark_buffer_dirty(old_bh);
- ea_bdebug(old_bh, "refcount now=%d",
- le32_to_cpu(HDR(old_bh)->h_refcount));
- }
- unlock_buffer(old_bh);
+ ext2_xattr_release_block(inode, old_bh);
}
cleanup:
@@ -828,30 +852,7 @@ ext2_xattr_delete_inode(struct inode *inode)
EXT2_I(inode)->i_file_acl);
goto cleanup;
}
- lock_buffer(bh);
- if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
- __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
-
- /*
- * This must happen under buffer lock for ext2_xattr_set2() to
- * reliably detect freed block
- */
- mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
- bh->b_blocknr);
- ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
- get_bh(bh);
- bforget(bh);
- unlock_buffer(bh);
- } else {
- le32_add_cpu(&HDR(bh)->h_refcount, -1);
- ea_bdebug(bh, "refcount now=%d",
- le32_to_cpu(HDR(bh)->h_refcount));
- unlock_buffer(bh);
- mark_buffer_dirty(bh);
- if (IS_SYNC(inode))
- sync_dirty_buffer(bh);
- dquot_free_block_nodirty(inode, 1);
- }
+ ext2_xattr_release_block(inode, bh);
EXT2_I(inode)->i_file_acl = 0;
cleanup:
@@ -873,7 +874,7 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
int error;
- error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr,
+ error = mb_cache_entry_create(cache, GFP_KERNEL, hash, bh->b_blocknr,
true);
if (error) {
if (error == -EBUSY) {
@@ -943,7 +944,7 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
if (!header->h_hash)
return NULL; /* never share */
ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
-again:
+
ce = mb_cache_entry_find_first(ea_block_cache, hash);
while (ce) {
struct buffer_head *bh;
@@ -955,22 +956,8 @@ again:
inode->i_ino, (unsigned long) ce->e_value);
} else {
lock_buffer(bh);
- /*
- * We have to be careful about races with freeing or
- * rehashing of xattr block. Once we hold buffer lock
- * xattr block's state is stable so we can check
- * whether the block got freed / rehashed or not.
- * Since we unhash mbcache entry under buffer lock when
- * freeing / rehashing xattr block, checking whether
- * entry is still hashed is reliable.
- */
- if (hlist_bl_unhashed(&ce->e_hash_list)) {
- mb_cache_entry_put(ea_block_cache, ce);
- unlock_buffer(bh);
- brelse(bh);
- goto again;
- } else if (le32_to_cpu(HDR(bh)->h_refcount) >
- EXT2_XATTR_REFCOUNT_MAX) {
+ if (le32_to_cpu(HDR(bh)->h_refcount) >
+ EXT2_XATTR_REFCOUNT_MAX) {
ea_idebug(inode, "block %ld refcount %d>%d",
(unsigned long) ce->e_value,
le32_to_cpu(HDR(bh)->h_refcount),
diff --git a/fs/ext2/xattr.h b/fs/ext2/xattr.h
index 7925f596e8e2..6a4966949047 100644
--- a/fs/ext2/xattr.h
+++ b/fs/ext2/xattr.h
@@ -72,7 +72,7 @@ extern void ext2_xattr_delete_inode(struct inode *);
extern struct mb_cache *ext2_xattr_create_cache(void);
extern void ext2_xattr_destroy_cache(struct mb_cache *cache);
-extern const struct xattr_handler *ext2_xattr_handlers[];
+extern const struct xattr_handler * const ext2_xattr_handlers[];
# else /* CONFIG_EXT2_FS_XATTR */
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index ebade1f52451..db47b8ab153e 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -19,7 +19,7 @@ ext2_xattr_security_get(const struct xattr_handler *handler,
static int
ext2_xattr_security_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 18a87d5dd1ab..995f931228ce 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -26,7 +26,7 @@ ext2_xattr_trusted_get(const struct xattr_handler *handler,
static int
ext2_xattr_trusted_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index 58092449f8ff..dd1507231081 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -30,7 +30,7 @@ ext2_xattr_user_get(const struct xattr_handler *handler,
static int
ext2_xattr_user_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 86699c8cab28..01873c2a34ad 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -1,37 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
-# Ext3 configs are here for backward compatibility with old configs which may
-# have EXT3_FS set but not EXT4_FS set and thus would result in non-bootable
-# kernels after the removal of ext3 driver.
-config EXT3_FS
- tristate "The Extended 3 (ext3) filesystem"
- select EXT4_FS
- help
- This config option is here only for backward compatibility. ext3
- filesystem is now handled by the ext4 driver.
-
-config EXT3_FS_POSIX_ACL
- bool "Ext3 POSIX Access Control Lists"
- depends on EXT3_FS
- select EXT4_FS_POSIX_ACL
- select FS_POSIX_ACL
- help
- This config option is here only for backward compatibility. ext3
- filesystem is now handled by the ext4 driver.
-
-config EXT3_FS_SECURITY
- bool "Ext3 Security Labels"
- depends on EXT3_FS
- select EXT4_FS_SECURITY
- help
- This config option is here only for backward compatibility. ext3
- filesystem is now handled by the ext4 driver.
-
config EXT4_FS
tristate "The Extended 4 (ext4) filesystem"
+ select BUFFER_HEAD
select JBD2
select CRC16
- select CRYPTO
- select CRYPTO_CRC32C
+ select CRC32
select FS_IOMAP
select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
help
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 7d89142e1421..72206a292676 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -17,3 +17,4 @@ ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
ext4-inode-test-objs += inode-test.o
obj-$(CONFIG_EXT4_KUNIT_TESTS) += ext4-inode-test.o
ext4-$(CONFIG_FS_VERITY) += verity.o
+ext4-$(CONFIG_FS_ENCRYPTION) += crypto.o
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 0613dfcbfd4a..3bffe862f954 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -139,7 +139,7 @@ fail:
/*
* Inode operation get_posix_acl().
*
- * inode->i_mutex: don't care
+ * inode->i_rwsem: don't care
*/
struct posix_acl *
ext4_get_acl(struct inode *inode, int type, bool rcu)
@@ -183,7 +183,7 @@ ext4_get_acl(struct inode *inode, int type, bool rcu)
/*
* Set the access or default ACL of an inode.
*
- * inode->i_mutex: down unless called from ext4_new_inode
+ * inode->i_rwsem: down unless called from ext4_new_inode
*/
static int
__ext4_set_acl(handle_t *handle, struct inode *inode, int type,
@@ -225,12 +225,13 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
}
int
-ext4_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ext4_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
handle_t *handle;
int error, credits, retries = 0;
size_t acl_size = acl ? ext4_acl_size(acl->a_count) : 0;
+ struct inode *inode = d_inode(dentry);
umode_t mode = inode->i_mode;
int update_mode = 0;
@@ -246,10 +247,9 @@ retry:
handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
if (IS_ERR(handle))
return PTR_ERR(handle);
- ext4_fc_start_update(inode);
if ((type == ACL_TYPE_ACCESS) && acl) {
- error = posix_acl_update_mode(mnt_userns, inode, &mode, &acl);
+ error = posix_acl_update_mode(idmap, inode, &mode, &acl);
if (error)
goto out_stop;
if (mode != inode->i_mode)
@@ -259,12 +259,11 @@ retry:
error = __ext4_set_acl(handle, inode, type, acl, 0 /* xattr_flags */);
if (!error && update_mode) {
inode->i_mode = mode;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
error = ext4_mark_inode_dirty(handle, inode);
}
out_stop:
ext4_journal_stop(handle);
- ext4_fc_stop_update(inode);
if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
return error;
@@ -273,8 +272,8 @@ out_stop:
/*
* Initialize the ACLs of a new inode. Called from ext4_new_inode.
*
- * dir->i_mutex: down
- * inode->i_mutex: up (access to inode is still exclusive)
+ * dir->i_rwsem: down
+ * inode->i_rwsem: up (access to inode is still exclusive)
*/
int
ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
index 3219669732bf..0c5a79c3b5d4 100644
--- a/fs/ext4/acl.h
+++ b/fs/ext4/acl.h
@@ -56,7 +56,7 @@ static inline int ext4_acl_count(size_t size)
/* acl.c */
struct posix_acl *ext4_get_acl(struct inode *inode, int type, bool rcu);
-int ext4_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+int ext4_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type);
extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index a0fb0c4bdc7c..8040c731b3e4 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -22,6 +22,7 @@
#include "mballoc.h"
#include <trace/events/ext4.h>
+#include <kunit/static_stub.h>
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
ext4_group_t block_group);
@@ -80,32 +81,54 @@ static inline int ext4_block_in_group(struct super_block *sb,
return (actual_group == block_group) ? 1 : 0;
}
-/* Return the number of clusters used for file system metadata; this
+/*
+ * Return the number of clusters used for file system metadata; this
* represents the overhead needed by the file system.
*/
static unsigned ext4_num_overhead_clusters(struct super_block *sb,
ext4_group_t block_group,
struct ext4_group_desc *gdp)
{
- unsigned num_clusters;
- int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
+ unsigned base_clusters, num_clusters;
+ int block_cluster = -1, inode_cluster;
+ int itbl_cluster_start = -1, itbl_cluster_end = -1;
ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
- ext4_fsblk_t itbl_blk;
+ ext4_fsblk_t end = start + EXT4_BLOCKS_PER_GROUP(sb) - 1;
+ ext4_fsblk_t itbl_blk_start, itbl_blk_end;
struct ext4_sb_info *sbi = EXT4_SB(sb);
/* This is the number of clusters used by the superblock,
* block group descriptors, and reserved block group
* descriptor blocks */
- num_clusters = ext4_num_base_meta_clusters(sb, block_group);
+ base_clusters = ext4_num_base_meta_clusters(sb, block_group);
+ num_clusters = base_clusters;
/*
- * For the allocation bitmaps and inode table, we first need
- * to check to see if the block is in the block group. If it
- * is, then check to see if the cluster is already accounted
- * for in the clusters used for the base metadata cluster, or
- * if we can increment the base metadata cluster to include
- * that block. Otherwise, we will have to track the cluster
- * used for the allocation bitmap or inode table explicitly.
+ * Account and record inode table clusters if any cluster
+ * is in the block group, or inode table cluster range is
+ * [-1, -1] and won't overlap with block/inode bitmap cluster
+ * accounted below.
+ */
+ itbl_blk_start = ext4_inode_table(sb, gdp);
+ itbl_blk_end = itbl_blk_start + sbi->s_itb_per_group - 1;
+ if (itbl_blk_start <= end && itbl_blk_end >= start) {
+ itbl_blk_start = max(itbl_blk_start, start);
+ itbl_blk_end = min(itbl_blk_end, end);
+
+ itbl_cluster_start = EXT4_B2C(sbi, itbl_blk_start - start);
+ itbl_cluster_end = EXT4_B2C(sbi, itbl_blk_end - start);
+
+ num_clusters += itbl_cluster_end - itbl_cluster_start + 1;
+ /* check if border cluster is overlapped */
+ if (itbl_cluster_start == base_clusters - 1)
+ num_clusters--;
+ }
+
+ /*
+ * For the allocation bitmaps, we first need to check to see
+ * if the block is in the block group. If it is, then check
+ * to see if the cluster is already accounted for in the clusters
+ * used for the base metadata cluster and inode tables cluster.
* Normally all of these blocks are contiguous, so the special
* case handling shouldn't be necessary except for *very*
* unusual file system layouts.
@@ -113,46 +136,26 @@ static unsigned ext4_num_overhead_clusters(struct super_block *sb,
if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
block_cluster = EXT4_B2C(sbi,
ext4_block_bitmap(sb, gdp) - start);
- if (block_cluster < num_clusters)
- block_cluster = -1;
- else if (block_cluster == num_clusters) {
+ if (block_cluster >= base_clusters &&
+ (block_cluster < itbl_cluster_start ||
+ block_cluster > itbl_cluster_end))
num_clusters++;
- block_cluster = -1;
- }
}
if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
inode_cluster = EXT4_B2C(sbi,
ext4_inode_bitmap(sb, gdp) - start);
- if (inode_cluster < num_clusters)
- inode_cluster = -1;
- else if (inode_cluster == num_clusters) {
- num_clusters++;
- inode_cluster = -1;
- }
- }
-
- itbl_blk = ext4_inode_table(sb, gdp);
- for (i = 0; i < sbi->s_itb_per_group; i++) {
- if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
- c = EXT4_B2C(sbi, itbl_blk + i - start);
- if ((c < num_clusters) || (c == inode_cluster) ||
- (c == block_cluster) || (c == itbl_cluster))
- continue;
- if (c == num_clusters) {
- num_clusters++;
- continue;
- }
+ /*
+ * Additional check if inode bitmap is in just accounted
+ * block_cluster
+ */
+ if (inode_cluster != block_cluster &&
+ inode_cluster >= base_clusters &&
+ (inode_cluster < itbl_cluster_start ||
+ inode_cluster > itbl_cluster_end))
num_clusters++;
- itbl_cluster = c;
- }
}
- if (block_cluster != -1)
- num_clusters++;
- if (inode_cluster != -1)
- num_clusters++;
-
return num_clusters;
}
@@ -187,8 +190,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
ASSERT(buffer_locked(bh));
- /* If checksum is bad mark all blocks used to prevent allocation
- * essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
ext4_mark_group_bitmap_corrupted(sb, block_group,
EXT4_GROUP_INFO_BBITMAP_CORRUPT |
@@ -272,6 +273,9 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh_p;
+ KUNIT_STATIC_STUB_REDIRECT(ext4_get_group_desc,
+ sb, block_group, bh);
+
if (block_group >= ngroups) {
ext4_error(sb, "block_group >= groups_count - block_group = %u,"
" groups_count = %u", block_group, ngroups);
@@ -303,6 +307,36 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
return desc;
}
+static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
+ ext4_group_t block_group,
+ struct buffer_head *bh)
+{
+ ext4_grpblk_t next_zero_bit;
+ unsigned long bitmap_size = sb->s_blocksize * 8;
+ unsigned int offset = num_clusters_in_group(sb, block_group);
+
+ if (bitmap_size <= offset)
+ return 0;
+
+ next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset);
+
+ return (next_zero_bit < bitmap_size ? next_zero_bit : 0);
+}
+
+struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
+ ext4_group_t group)
+{
+ struct ext4_group_info **grp_info;
+ long indexv, indexh;
+
+ if (unlikely(group >= EXT4_SB(sb)->s_groups_count))
+ return NULL;
+ indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
+ indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
+ grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
+ return grp_info[indexh];
+}
+
/*
* Return the block number which was discovered to be invalid, or 0 if
* the block bitmap is valid.
@@ -350,13 +384,13 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
blk = ext4_inode_table(sb, desc);
offset = blk - group_first_block;
if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
- EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
+ EXT4_B2C(sbi, offset + sbi->s_itb_per_group - 1) >= max_bit)
return blk;
next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
- EXT4_B2C(sbi, offset + sbi->s_itb_per_group),
+ EXT4_B2C(sbi, offset + sbi->s_itb_per_group - 1) + 1,
EXT4_B2C(sbi, offset));
if (next_zero_bit <
- EXT4_B2C(sbi, offset + sbi->s_itb_per_group))
+ EXT4_B2C(sbi, offset + sbi->s_itb_per_group - 1) + 1)
/* bad bitmap for inode tables */
return blk;
return 0;
@@ -377,14 +411,13 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
if (buffer_verified(bh))
return 0;
- if (EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+ if (!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
return -EFSCORRUPTED;
ext4_lock_group(sb, block_group);
if (buffer_verified(bh))
goto verified;
- if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
- desc, bh) ||
+ if (unlikely(!ext4_block_bitmap_csum_verify(sb, desc, bh) ||
ext4_simulate_fail(sb, EXT4_SIM_BBITMAP_CRC))) {
ext4_unlock_group(sb, block_group);
ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
@@ -401,6 +434,15 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
return -EFSCORRUPTED;
}
+ blk = ext4_valid_block_bitmap_padding(sb, block_group, bh);
+ if (unlikely(blk != 0)) {
+ ext4_unlock_group(sb, block_group);
+ ext4_error(sb, "bg %u: block %llu: padding at end of block bitmap is not set",
+ block_group, blk);
+ ext4_mark_group_bitmap_corrupted(sb, block_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+ return -EFSCORRUPTED;
+ }
set_buffer_verified(bh);
verified:
ext4_unlock_group(sb, block_group);
@@ -411,6 +453,7 @@ verified:
* ext4_read_block_bitmap_nowait()
* @sb: super block
* @block_group: given block group
+ * @ignore_locked: ignore locked buffers
*
* Read the bitmap for a given block_group,and validate the
* bits for block/inode/inode tables are set in the bitmaps
@@ -427,6 +470,9 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group,
ext4_fsblk_t bitmap_blk;
int err;
+ KUNIT_STATIC_STUB_REDIRECT(ext4_read_block_bitmap_nowait,
+ sb, block_group, ignore_locked);
+
desc = ext4_get_group_desc(sb, block_group, NULL);
if (!desc)
return ERR_PTR(-EFSCORRUPTED);
@@ -473,17 +519,19 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group,
goto out;
}
err = ext4_init_block_bitmap(sb, bh, block_group, desc);
- set_bitmap_uptodate(bh);
- set_buffer_uptodate(bh);
- set_buffer_verified(bh);
- ext4_unlock_group(sb, block_group);
- unlock_buffer(bh);
if (err) {
+ ext4_unlock_group(sb, block_group);
+ unlock_buffer(bh);
ext4_error(sb, "Failed to init block bitmap for group "
"%u: %d", block_group, err);
goto out;
}
- goto verify;
+ set_bitmap_uptodate(bh);
+ set_buffer_uptodate(bh);
+ set_buffer_verified(bh);
+ ext4_unlock_group(sb, block_group);
+ unlock_buffer(bh);
+ return bh;
}
ext4_unlock_group(sb, block_group);
if (buffer_uptodate(bh)) {
@@ -502,7 +550,8 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group,
trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked);
ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO |
(ignore_locked ? REQ_RAHEAD : 0),
- ext4_end_bitmap_read);
+ ext4_end_bitmap_read,
+ ext4_simulate_fail(sb, EXT4_SIM_BBITMAP_EIO));
return bh;
verify:
err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
@@ -520,13 +569,15 @@ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
{
struct ext4_group_desc *desc;
+ KUNIT_STATIC_STUB_REDIRECT(ext4_wait_block_bitmap,
+ sb, block_group, bh);
+
if (!buffer_new(bh))
return 0;
desc = ext4_get_group_desc(sb, block_group, NULL);
if (!desc)
return -EFSCORRUPTED;
wait_on_buffer(bh);
- ext4_simulate_fail_bh(sb, bh, EXT4_SIM_BBITMAP_EIO);
if (!buffer_uptodate(bh)) {
ext4_error_err(sb, EIO, "Cannot read block bitmap - "
"block_group = %u, block_bitmap = %llu",
@@ -598,8 +649,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
/* Hm, nope. Are (enough) root reserved clusters available? */
if (uid_eq(sbi->s_resuid, current_fsuid()) ||
(!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
- capable(CAP_SYS_RESOURCE) ||
- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
+ capable(CAP_SYS_RESOURCE)) {
if (free_clusters >= (nclusters + dirty_clusters +
resv_clusters))
@@ -652,7 +703,7 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries)
* possible we just missed a transaction commit that did so
*/
smp_mb();
- if (sbi->s_mb_free_pending == 0) {
+ if (atomic_read(&sbi->s_mb_free_pending) == 0) {
if (test_opt(sb, DISCARD)) {
atomic_inc(&sbi->s_retry_alloc_pending);
flush_work(&sbi->s_discard_work);
@@ -665,7 +716,7 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries)
* it's possible we've just missed a transaction commit here,
* so ignore the returned status
*/
- jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
+ ext4_debug("%s: retrying operation after ENOSPC\n", sb->s_id);
(void) jbd2_journal_force_commit_nested(sbi->s_journal);
return 1;
}
@@ -701,7 +752,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
*count = ar.len;
/*
* Account for the allocated meta blocks. We will never
- * fail EDQUOT for metdata, but we do account for it.
+ * fail EDQUOT for metadata, but we do account for it.
*/
if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
dquot_alloc_block_nofail(inode,
@@ -870,11 +921,11 @@ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
}
/*
- * This function returns the number of file system metadata clusters at
+ * This function returns the number of file system metadata blocks at
* the beginning of a block group, including the reserved gdt blocks.
*/
-static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
- ext4_group_t block_group)
+unsigned int ext4_num_base_meta_blocks(struct super_block *sb,
+ ext4_group_t block_group)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
unsigned num;
@@ -886,14 +937,21 @@ static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
sbi->s_desc_per_block) {
if (num) {
- num += ext4_bg_num_gdb(sb, block_group);
+ num += ext4_bg_num_gdb_nometa(sb, block_group);
num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
}
} else { /* For META_BG_BLOCK_GROUPS */
- num += ext4_bg_num_gdb(sb, block_group);
+ num += ext4_bg_num_gdb_meta(sb, block_group);
}
- return EXT4_NUM_B2C(sbi, num);
+ return num;
+}
+
+static unsigned int ext4_num_base_meta_clusters(struct super_block *sb,
+ ext4_group_t block_group)
+{
+ return EXT4_NUM_B2C(EXT4_SB(sb), ext4_num_base_meta_blocks(sb, block_group));
}
+
/**
* ext4_inode_to_goal_block - return a hint for block allocation
* @inode: inode for block allocation
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index f63e028c638c..87760fabdd2e 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -16,19 +16,21 @@ unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
return numchars * BITS_PER_BYTE - memweight(bitmap, numchars);
}
-int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+int ext4_inode_bitmap_csum_verify(struct super_block *sb,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz)
+ struct buffer_head *bh)
{
__u32 hi;
__u32 provided, calculated;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int sz;
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return 1;
+ sz = EXT4_INODES_PER_GROUP(sb) >> 3;
provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
- calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+ calculated = ext4_chksum(sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) {
hi = le16_to_cpu(gdp->bg_inode_bitmap_csum_hi);
provided |= (hi << 16);
@@ -38,23 +40,25 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
return provided == calculated;
}
-void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+void ext4_inode_bitmap_csum_set(struct super_block *sb,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz)
+ struct buffer_head *bh)
{
__u32 csum;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int sz;
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return;
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+ sz = EXT4_INODES_PER_GROUP(sb) >> 3;
+ csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
gdp->bg_inode_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END)
gdp->bg_inode_bitmap_csum_hi = cpu_to_le16(csum >> 16);
}
-int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+int ext4_block_bitmap_csum_verify(struct super_block *sb,
struct ext4_group_desc *gdp,
struct buffer_head *bh)
{
@@ -63,24 +67,21 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
struct ext4_sb_info *sbi = EXT4_SB(sb);
int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return 1;
provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
- calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+ calculated = ext4_chksum(sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) {
hi = le16_to_cpu(gdp->bg_block_bitmap_csum_hi);
provided |= (hi << 16);
} else
calculated &= 0xFFFF;
- if (provided == calculated)
- return 1;
-
- return 0;
+ return provided == calculated;
}
-void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+void ext4_block_bitmap_csum_set(struct super_block *sb,
struct ext4_group_desc *gdp,
struct buffer_head *bh)
{
@@ -88,10 +89,10 @@ void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
__u32 csum;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return;
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+ csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
gdp->bg_block_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END)
gdp->bg_block_bitmap_csum_hi = cpu_to_le16(csum >> 16);
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 4666b55b736e..e8c5525afc67 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -72,7 +72,7 @@ static int add_system_zone(struct ext4_system_blocks *system_blks,
{
struct ext4_system_zone *new_entry, *entry;
struct rb_node **n = &system_blks->root.rb_node, *node;
- struct rb_node *parent = NULL, *new_node = NULL;
+ struct rb_node *parent = NULL, *new_node;
while (*n) {
parent = *n;
@@ -215,7 +215,6 @@ int ext4_setup_system_zone(struct super_block *sb)
struct ext4_system_blocks *system_blks;
struct ext4_group_desc *gdp;
ext4_group_t i;
- int flex_size = ext4_flex_bg_size(sbi);
int ret;
system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
@@ -223,12 +222,13 @@ int ext4_setup_system_zone(struct super_block *sb)
return -ENOMEM;
for (i=0; i < ngroups; i++) {
+ unsigned int meta_blks = ext4_num_base_meta_blocks(sb, i);
+
cond_resched();
- if (ext4_bg_has_super(sb, i) &&
- ((i < 5) || ((i % flex_size) == 0))) {
+ if (meta_blks != 0) {
ret = add_system_zone(system_blks,
ext4_group_first_block_no(sb, i),
- ext4_bg_num_gdb(sb, i) + 1, 0);
+ meta_blks, 0);
if (ret)
goto err;
}
@@ -292,15 +292,10 @@ void ext4_release_system_zone(struct super_block *sb)
call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
}
-/*
- * Returns 1 if the passed-in block region (start_blk,
- * start_blk+count) is valid; 0 if some part of the block region
- * overlaps with some other filesystem metadata blocks.
- */
-int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
- unsigned int count)
+int ext4_sb_block_valid(struct super_block *sb, struct inode *inode,
+ ext4_fsblk_t start_blk, unsigned int count)
{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_system_blocks *system_blks;
struct ext4_system_zone *entry;
struct rb_node *n;
@@ -329,7 +324,9 @@ int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
else if (start_blk >= (entry->start_blk + entry->count))
n = n->rb_right;
else {
- ret = (entry->ino == inode->i_ino);
+ ret = 0;
+ if (inode)
+ ret = (entry->ino == inode->i_ino);
break;
}
}
@@ -338,15 +335,25 @@ out_rcu:
return ret;
}
+/*
+ * Returns 1 if the passed-in block region (start_blk,
+ * start_blk+count) is valid; 0 if some part of the block region
+ * overlaps with some other filesystem metadata blocks.
+ */
+int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
+ unsigned int count)
+{
+ return ext4_sb_block_valid(inode->i_sb, inode, start_blk, count);
+}
+
int ext4_check_blockref(const char *function, unsigned int line,
struct inode *inode, __le32 *p, unsigned int max)
{
__le32 *bref = p;
unsigned int blk;
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
- if (ext4_has_feature_journal(inode->i_sb) &&
- (inode->i_ino ==
- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+ if (journal && inode == journal->j_inode)
return 0;
while (bref < p+max) {
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
new file mode 100644
index 000000000000..cf0a0970c095
--- /dev/null
+++ b/fs/ext4/crypto.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/quotaops.h>
+#include <linux/uuid.h>
+
+#include "ext4.h"
+#include "xattr.h"
+#include "ext4_jbd2.h"
+
+static void ext4_fname_from_fscrypt_name(struct ext4_filename *dst,
+ const struct fscrypt_name *src)
+{
+ memset(dst, 0, sizeof(*dst));
+
+ dst->usr_fname = src->usr_fname;
+ dst->disk_name = src->disk_name;
+ dst->hinfo.hash = src->hash;
+ dst->hinfo.minor_hash = src->minor_hash;
+ dst->crypto_buf = src->crypto_buf;
+}
+
+int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,
+ int lookup, struct ext4_filename *fname)
+{
+ struct fscrypt_name name;
+ int err;
+
+ err = fscrypt_setup_filename(dir, iname, lookup, &name);
+ if (err)
+ return err;
+
+ ext4_fname_from_fscrypt_name(fname, &name);
+
+ err = ext4_fname_setup_ci_filename(dir, iname, fname);
+ if (err)
+ ext4_fname_free_filename(fname);
+
+ return err;
+}
+
+int ext4_fname_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ struct ext4_filename *fname)
+{
+ struct fscrypt_name name;
+ int err;
+
+ err = fscrypt_prepare_lookup(dir, dentry, &name);
+ if (err)
+ return err;
+
+ ext4_fname_from_fscrypt_name(fname, &name);
+
+ err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname);
+ if (err)
+ ext4_fname_free_filename(fname);
+ return err;
+}
+
+void ext4_fname_free_filename(struct ext4_filename *fname)
+{
+ struct fscrypt_name name;
+
+ name.crypto_buf = fname->crypto_buf;
+ fscrypt_free_filename(&name);
+
+ fname->crypto_buf.name = NULL;
+ fname->usr_fname = NULL;
+ fname->disk_name.name = NULL;
+
+ ext4_fname_free_ci_filename(fname);
+}
+
+static bool uuid_is_zero(__u8 u[16])
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ if (u[i])
+ return false;
+ return true;
+}
+
+int ext4_ioctl_get_encryption_pwsalt(struct file *filp, void __user *arg)
+{
+ struct super_block *sb = file_inode(filp)->i_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int err, err2;
+ handle_t *handle;
+
+ if (!ext4_has_feature_encrypt(sb))
+ return -EOPNOTSUPP;
+
+ if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) {
+ err = mnt_want_write_file(filp);
+ if (err)
+ return err;
+ handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto pwsalt_err_exit;
+ }
+ err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
+ EXT4_JTR_NONE);
+ if (err)
+ goto pwsalt_err_journal;
+ lock_buffer(sbi->s_sbh);
+ generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(sbi->s_sbh);
+ err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
+pwsalt_err_journal:
+ err2 = ext4_journal_stop(handle);
+ if (err2 && !err)
+ err = err2;
+pwsalt_err_exit:
+ mnt_drop_write_file(filp);
+ if (err)
+ return err;
+ }
+
+ if (copy_to_user(arg, sbi->s_es->s_encrypt_pw_salt, 16))
+ return -EFAULT;
+ return 0;
+}
+
+static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
+{
+ return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
+}
+
+static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
+ void *fs_data)
+{
+ handle_t *handle = fs_data;
+ int res, res2, credits, retries = 0;
+
+ /*
+ * Encrypting the root directory is not allowed because e2fsck expects
+ * lost+found to exist and be unencrypted, and encrypting the root
+ * directory would imply encrypting the lost+found directory as well as
+ * the filename "lost+found" itself.
+ */
+ if (inode->i_ino == EXT4_ROOT_INO)
+ return -EPERM;
+
+ if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
+ return -EINVAL;
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_DAX))
+ return -EOPNOTSUPP;
+
+ res = ext4_convert_inline_data(inode);
+ if (res)
+ return res;
+
+ /*
+ * If a journal handle was specified, then the encryption context is
+ * being set on a new inode via inheritance and is part of a larger
+ * transaction to create the inode. Otherwise the encryption context is
+ * being set on an existing inode in its own transaction. Only in the
+ * latter case should the "retry on ENOSPC" logic be used.
+ */
+
+ if (handle) {
+ res = ext4_xattr_set_handle(handle, inode,
+ EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ ctx, len, 0);
+ if (!res) {
+ ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+ ext4_clear_inode_state(inode,
+ EXT4_STATE_MAY_INLINE_DATA);
+ /*
+ * Update inode->i_flags - S_ENCRYPTED will be enabled,
+ * S_DAX may be disabled
+ */
+ ext4_set_inode_flags(inode, false);
+ }
+ return res;
+ }
+
+ res = dquot_initialize(inode);
+ if (res)
+ return res;
+retry:
+ res = ext4_xattr_set_credits(inode, len, false /* is_create */,
+ &credits);
+ if (res)
+ return res;
+
+ handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ ctx, len, 0);
+ if (!res) {
+ ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+ /*
+ * Update inode->i_flags - S_ENCRYPTED will be enabled,
+ * S_DAX may be disabled
+ */
+ ext4_set_inode_flags(inode, false);
+ res = ext4_mark_inode_dirty(handle, inode);
+ if (res)
+ EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
+ }
+ res2 = ext4_journal_stop(handle);
+
+ if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
+ if (!res)
+ res = res2;
+ return res;
+}
+
+static const union fscrypt_policy *ext4_get_dummy_policy(struct super_block *sb)
+{
+ return EXT4_SB(sb)->s_dummy_enc_policy.policy;
+}
+
+static bool ext4_has_stable_inodes(struct super_block *sb)
+{
+ return ext4_has_feature_stable_inodes(sb);
+}
+
+const struct fscrypt_operations ext4_cryptops = {
+ .inode_info_offs = (int)offsetof(struct ext4_inode_info, i_crypt_info) -
+ (int)offsetof(struct ext4_inode_info, vfs_inode),
+ .needs_bounce_pages = 1,
+ .has_32bit_inodes = 1,
+ .supports_subblock_data_units = 1,
+ .legacy_key_prefix = "ext4:",
+ .get_context = ext4_get_context,
+ .set_context = ext4_set_context,
+ .get_dummy_policy = ext4_get_dummy_policy,
+ .empty_dir = ext4_empty_dir,
+ .has_stable_inodes = ext4_has_stable_inodes,
+};
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 74b172a4adda..256fe2c1d4c1 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -86,7 +86,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
dir->i_sb->s_blocksize);
const int next_offset = ((char *) de - buf) + rlen;
bool fake = is_fake_dir_entry(de);
- bool has_csum = ext4_has_metadata_csum(dir->i_sb);
+ bool has_csum = ext4_has_feature_metadata_csum(dir->i_sb);
if (unlikely(rlen < ext4_dir_rec_len(1, fake ? NULL : dir)))
error_msg = "rec_len is smaller than minimal";
@@ -104,6 +104,9 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
else if (unlikely(le32_to_cpu(de->inode) >
le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
+ else if (unlikely(next_offset == size && de->name_len == 1 &&
+ de->name[0] == '.'))
+ error_msg = "'.' directory cannot be the last in data block";
else
return 0;
@@ -133,6 +136,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
struct super_block *sb = inode->i_sb;
struct buffer_head *bh = NULL;
struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
+ struct dir_private_info *info = file->private_data;
err = fscrypt_prepare_readdir(inode);
if (err)
@@ -144,7 +148,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
return err;
/* Can we just clear INDEX flag to ignore htree information? */
- if (!ext4_has_metadata_csum(sb)) {
+ if (!ext4_has_feature_metadata_csum(sb)) {
/*
* We don't set the inode dirty flag since it's not
* critical that it gets flushed back to the disk.
@@ -188,13 +192,13 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
continue;
}
if (err > 0) {
- pgoff_t index = map.m_pblk >>
- (PAGE_SHIFT - inode->i_blkbits);
+ pgoff_t index = map.m_pblk << inode->i_blkbits >>
+ PAGE_SHIFT;
if (!ra_has_index(&file->f_ra, index))
page_cache_sync_readahead(
- sb->s_bdev->bd_inode->i_mapping,
- &file->f_ra, file,
- index, 1);
+ sb->s_bdev->bd_mapping,
+ &file->f_ra, file, index,
+ 1 << EXT4_SB(sb)->s_min_folio_order);
file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
bh = ext4_bread(NULL, inode, map.m_lblk, 0);
if (IS_ERR(bh)) {
@@ -229,7 +233,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
- if (!inode_eq_iversion(inode, file->f_version)) {
+ if (!inode_eq_iversion(inode, info->cookie)) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ext4_dir_entry_2 *)
(bh->b_data + i);
@@ -249,7 +253,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
offset = i;
ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
| offset;
- file->f_version = inode_query_iversion(inode);
+ info->cookie = inode_query_iversion(inode);
}
while (ctx->pos < inode->i_size
@@ -279,12 +283,20 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
struct fscrypt_str de_name =
FSTR_INIT(de->name,
de->name_len);
+ u32 hash;
+ u32 minor_hash;
+
+ if (IS_CASEFOLDED(inode)) {
+ hash = EXT4_DIRENT_HASH(de);
+ minor_hash = EXT4_DIRENT_MINOR_HASH(de);
+ } else {
+ hash = 0;
+ minor_hash = 0;
+ }
/* Directory is encrypted */
err = fscrypt_fname_disk_to_usr(inode,
- EXT4_DIRENT_HASH(de),
- EXT4_DIRENT_MINOR_HASH(de),
- &de_name, &fstr);
+ hash, minor_hash, &de_name, &fstr);
de_name = fstr;
fstr.len = save_len;
if (err)
@@ -303,7 +315,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
goto done;
brelse(bh);
bh = NULL;
- offset = 0;
}
done:
err = 0;
@@ -385,6 +396,7 @@ static inline loff_t ext4_get_htree_eof(struct file *filp)
static loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
+ struct dir_private_info *info = file->private_data;
int dx_dir = is_dx_dir(inode);
loff_t ret, htree_max = ext4_get_htree_eof(file);
@@ -393,7 +405,7 @@ static loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
htree_max, htree_max);
else
ret = ext4_llseek(file, offset, whence);
- file->f_version = inode_peek_iversion(inode) - 1;
+ info->cookie = inode_peek_iversion(inode) - 1;
return ret;
}
@@ -409,11 +421,11 @@ struct fname {
__u32 inode;
__u8 name_len;
__u8 file_type;
- char name[];
+ char name[] __counted_by(name_len);
};
/*
- * This functoin implements a non-recursive way of freeing all of the
+ * This function implements a non-recursive way of freeing all of the
* nodes in the red-black tree.
*/
static void free_rb_tree_fname(struct rb_root *root)
@@ -430,18 +442,15 @@ static void free_rb_tree_fname(struct rb_root *root)
*root = RB_ROOT;
}
-
-static struct dir_private_info *ext4_htree_create_dir_info(struct file *filp,
- loff_t pos)
+static void ext4_htree_init_dir_info(struct file *filp, loff_t pos)
{
- struct dir_private_info *p;
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return NULL;
- p->curr_hash = pos2maj_hash(filp, pos);
- p->curr_minor_hash = pos2min_hash(filp, pos);
- return p;
+ struct dir_private_info *p = filp->private_data;
+
+ if (is_dx_dir(file_inode(filp)) && !p->initialized) {
+ p->curr_hash = pos2maj_hash(filp, pos);
+ p->curr_minor_hash = pos2min_hash(filp, pos);
+ p->initialized = true;
+ }
}
void ext4_htree_free_dir_info(struct dir_private_info *p)
@@ -465,14 +474,13 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
struct rb_node **p, *parent = NULL;
struct fname *fname, *new_fn;
struct dir_private_info *info;
- int len;
info = dir_file->private_data;
p = &info->root.rb_node;
/* Create and allocate the fname structure */
- len = sizeof(struct fname) + ent_name->len + 1;
- new_fn = kzalloc(len, GFP_KERNEL);
+ new_fn = kzalloc(struct_size(new_fn, name, ent_name->len + 1),
+ GFP_KERNEL);
if (!new_fn)
return -ENOMEM;
new_fn->hash = hash;
@@ -516,7 +524,7 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
/*
* This is a helper function for ext4_dx_readdir. It calls filldir
- * for all entres on the fname linked list. (Normally there is only
+ * for all entries on the fname linked list. (Normally there is only
* one entry on the linked list, unless there are 62 bit hash collisions.)
*/
static int call_filldir(struct file *file, struct dir_context *ctx,
@@ -553,12 +561,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
struct fname *fname;
int ret = 0;
- if (!info) {
- info = ext4_htree_create_dir_info(file, ctx->pos);
- if (!info)
- return -ENOMEM;
- file->private_data = info;
- }
+ ext4_htree_init_dir_info(file, ctx->pos);
if (ctx->pos == ext4_get_htree_eof(file))
return 0; /* EOF */
@@ -591,10 +594,10 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
* cached entries.
*/
if ((!info->curr_node) ||
- !inode_eq_iversion(inode, file->f_version)) {
+ !inode_eq_iversion(inode, info->cookie)) {
info->curr_node = NULL;
free_rb_tree_fname(&info->root);
- file->f_version = inode_query_iversion(inode);
+ info->cookie = inode_query_iversion(inode);
ret = ext4_htree_fill_tree(file, info->curr_hash,
info->curr_minor_hash,
&info->next_hash);
@@ -649,7 +652,7 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
unsigned int offset = 0;
char *top;
- de = (struct ext4_dir_entry_2 *)buf;
+ de = buf;
top = buf + buf_size;
while ((char *) de < top) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
@@ -665,7 +668,19 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
return 0;
}
+static int ext4_dir_open(struct inode *inode, struct file *file)
+{
+ struct dir_private_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ file->private_data = info;
+ return 0;
+}
+
const struct file_operations ext4_dir_operations = {
+ .open = ext4_dir_open,
.llseek = ext4_dir_llseek,
.read = generic_read_dir,
.iterate_shared = ext4_readdir,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 404dd50856e5..56112f201cac 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -33,13 +33,14 @@
#include <linux/blockgroup_lock.h>
#include <linux/percpu_counter.h>
#include <linux/ratelimit.h>
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#include <linux/falloc.h>
#include <linux/percpu-rwsem.h>
#include <linux/fiemap.h>
#ifdef __KERNEL__
#include <linux/compat.h>
#endif
+#include <uapi/linux/ext4.h>
#include <linux/fscrypt.h>
#include <linux/fsverity.h>
@@ -127,6 +128,55 @@ enum SHIFT_DIRECTION {
};
/*
+ * For each criteria, mballoc has slightly different way of finding
+ * the required blocks nad usually, higher the criteria the slower the
+ * allocation. We start at lower criterias and keep falling back to
+ * higher ones if we are not able to find any blocks. Lower (earlier)
+ * criteria are faster.
+ */
+enum criteria {
+ /*
+ * Used when number of blocks needed is a power of 2. This
+ * doesn't trigger any disk IO except prefetch and is the
+ * fastest criteria.
+ */
+ CR_POWER2_ALIGNED,
+
+ /*
+ * Tries to lookup in-memory data structures to find the most
+ * suitable group that satisfies goal request. No disk IO
+ * except block prefetch.
+ */
+ CR_GOAL_LEN_FAST,
+
+ /*
+ * Same as CR_GOAL_LEN_FAST but is allowed to reduce the goal
+ * length to the best available length for faster allocation.
+ */
+ CR_BEST_AVAIL_LEN,
+
+ /*
+ * Reads each block group sequentially, performing disk IO if
+ * necessary, to find suitable block group. Tries to
+ * allocate goal length but might trim the request if nothing
+ * is found after enough tries.
+ */
+ CR_GOAL_LEN_SLOW,
+
+ /*
+ * Finds the first free set of blocks and allocates
+ * those. This is only used in rare cases when
+ * CR_GOAL_LEN_SLOW also fails to allocate anything.
+ */
+ CR_ANY_FREE,
+
+ /*
+ * Number of criterias defined.
+ */
+ EXT4_MB_NUM_CRS
+};
+
+/*
* Flags used in mballoc's allocation_context flags field.
*
* Also used to show what's going on for debugging purposes when the
@@ -135,14 +185,8 @@ enum SHIFT_DIRECTION {
/* prefer goal again. length */
#define EXT4_MB_HINT_MERGE 0x0001
-/* blocks already reserved */
-#define EXT4_MB_HINT_RESERVED 0x0002
-/* metadata is being allocated */
-#define EXT4_MB_HINT_METADATA 0x0004
/* first blocks in the file */
#define EXT4_MB_HINT_FIRST 0x0008
-/* search for the best chunk */
-#define EXT4_MB_HINT_BEST 0x0010
/* data is being allocated */
#define EXT4_MB_HINT_DATA 0x0020
/* don't preallocate (for tails) */
@@ -163,12 +207,7 @@ enum SHIFT_DIRECTION {
#define EXT4_MB_USE_RESERVED 0x2000
/* Do strict check for free blocks while retrying block allocation */
#define EXT4_MB_STRICT_CHECK 0x4000
-/* Large fragment size list lookup succeeded at least once for cr = 0 */
-#define EXT4_MB_CR0_OPTIMIZED 0x8000
-/* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */
-#define EXT4_MB_CR1_OPTIMIZED 0x00010000
-/* Perform linear traversal for one group */
-#define EXT4_MB_SEARCH_NEXT_LINEAR 0x00020000
+
struct ext4_allocation_request {
/* target inode for block we're allocating */
struct inode *inode;
@@ -201,14 +240,27 @@ struct ext4_allocation_request {
#define EXT4_MAP_MAPPED BIT(BH_Mapped)
#define EXT4_MAP_UNWRITTEN BIT(BH_Unwritten)
#define EXT4_MAP_BOUNDARY BIT(BH_Boundary)
+#define EXT4_MAP_DELAYED BIT(BH_Delay)
+/*
+ * This is for use in ext4_map_query_blocks() for a special case where we can
+ * have a physically and logically contiguous blocks split across two leaf
+ * nodes instead of a single extent. This is required in case of atomic writes
+ * to know whether the returned extent is last in leaf. If yes, then lookup for
+ * next in leaf block in ext4_map_query_blocks_next_in_leaf().
+ * - This is never going to be added to any buffer head state.
+ * - We use the next available bit after BH_BITMAP_UPTODATE.
+ */
+#define EXT4_MAP_QUERY_LAST_IN_LEAF BIT(BH_BITMAP_UPTODATE + 1)
#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
- EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
+ EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
+ EXT4_MAP_DELAYED | EXT4_MAP_QUERY_LAST_IN_LEAF)
struct ext4_map_blocks {
ext4_fsblk_t m_pblk;
ext4_lblk_t m_lblk;
unsigned int m_len;
unsigned int m_flags;
+ u64 m_seq;
};
/*
@@ -222,7 +274,10 @@ struct ext4_system_blocks {
/*
* Flags for ext4_io_end->flags
*/
-#define EXT4_IO_END_UNWRITTEN 0x0001
+#define EXT4_IO_END_UNWRITTEN 0x0001
+#define EXT4_IO_END_FAILED 0x0002
+
+#define EXT4_IO_END_DEFER_COMPLETION (EXT4_IO_END_UNWRITTEN | EXT4_IO_END_FAILED)
struct ext4_io_end_vec {
struct list_head list; /* list of io_end_vec */
@@ -311,7 +366,16 @@ struct ext4_io_submit {
#define EXT4_MAX_BLOCKS(size, offset, blkbits) \
((EXT4_BLOCK_ALIGN(size + offset, blkbits) >> blkbits) - (offset >> \
blkbits))
-
+#define EXT4_B_TO_LBLK(inode, offset) \
+ (round_up((offset), i_blocksize(inode)) >> (inode)->i_blkbits)
+#define EXT4_LBLK_TO_B(inode, lblk) ((loff_t)(lblk) << (inode)->i_blkbits)
+
+/* Translate a block number to a page index */
+#define EXT4_LBLK_TO_PG(inode, lblk) (EXT4_LBLK_TO_B((inode), (lblk)) >> \
+ PAGE_SHIFT)
+/* Translate a page index to a block number */
+#define EXT4_PG_TO_LBLK(inode, pnum) (((loff_t)(pnum) << PAGE_SHIFT) >> \
+ (inode)->i_blkbits)
/* Translate a block number to a cluster number */
#define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits)
/* Translate a cluster number to a block number */
@@ -560,7 +624,7 @@ enum {
*
* It's not paranoia if the Murphy's Law really *is* out to get you. :-)
*/
-#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
+#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1U << EXT4_INODE_##FLAG))
#define CHECK_FLAG_VALUE(FLAG) BUILD_BUG_ON(!TEST_FLAG_VALUE(FLAG))
static inline void ext4_check_flag_values(void)
@@ -593,17 +657,6 @@ static inline void ext4_check_flag_values(void)
CHECK_FLAG_VALUE(RESERVED);
}
-/* Used to pass group descriptor data when online resize is done */
-struct ext4_new_group_input {
- __u32 group; /* Group number for this data */
- __u64 block_bitmap; /* Absolute block number of block bitmap */
- __u64 inode_bitmap; /* Absolute block number of inode bitmap */
- __u64 inode_table; /* Absolute block number of inode table start */
- __u32 blocks_count; /* Total number of blocks in this group */
- __u16 reserved_blocks; /* Number of reserved blocks in this group */
- __u16 unused;
-};
-
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
struct compat_ext4_new_group_input {
u32 group;
@@ -649,16 +702,22 @@ enum {
/* Caller is from the delayed allocation writeout path
* finally doing the actual allocation of delayed blocks */
#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004
- /* caller is from the direct IO path, request to creation of an
- unwritten extents if not allocated, split the unwritten
- extent if blocks has been preallocated already*/
-#define EXT4_GET_BLOCKS_PRE_IO 0x0008
-#define EXT4_GET_BLOCKS_CONVERT 0x0010
-#define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_PRE_IO|\
- EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT)
- /* Convert extent to initialized after IO complete */
-#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
+ /*
+ * This means that we cannot merge newly allocated extents, and if we
+ * found an unwritten extent, we need to split it.
+ */
+#define EXT4_GET_BLOCKS_SPLIT_NOMERGE 0x0008
+ /*
+ * Caller is from the dio or dioread_nolock buffered IO, reqest to
+ * create an unwritten extent if it does not exist or split the
+ * found unwritten extent. Also do not merge the newly created
+ * unwritten extent, io end will convert unwritten to written,
+ * and try to merge the written extent.
+ */
+#define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_SPLIT_NOMERGE|\
EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT)
+ /* Convert unwritten extent to initialized. */
+#define EXT4_GET_BLOCKS_CONVERT 0x0010
/* Eventual metadata allocation (due to growing extent tree)
* should not fail, so try to use reserved blocks for that.*/
#define EXT4_GET_BLOCKS_METADATA_NOFAIL 0x0020
@@ -670,9 +729,23 @@ enum {
#define EXT4_GET_BLOCKS_ZERO 0x0200
#define EXT4_GET_BLOCKS_CREATE_ZERO (EXT4_GET_BLOCKS_CREATE |\
EXT4_GET_BLOCKS_ZERO)
- /* Caller will submit data before dropping transaction handle. This
- * allows jbd2 to avoid submitting data before commit. */
+ /* Caller is in the context of data submission, such as writeback,
+ * fsync, etc. Especially, in the generic writeback path, caller will
+ * submit data before dropping transaction handle. This allows jbd2
+ * to avoid submitting data before commit. */
#define EXT4_GET_BLOCKS_IO_SUBMIT 0x0400
+ /* Convert extent to initialized after IO complete */
+#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT |\
+ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |\
+ EXT4_GET_BLOCKS_IO_SUBMIT)
+ /* Caller is in the atomic contex, find extent if it has been cached */
+#define EXT4_GET_BLOCKS_CACHED_NOWAIT 0x0800
+/*
+ * Atomic write caller needs this to query in the slow path of mixed mapping
+ * case, when a contiguous extent can be split across two adjacent leaf nodes.
+ * Look EXT4_MAP_QUERY_LAST_IN_LEAF.
+ */
+#define EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF 0x1000
/*
* The bit position of these flags must not overlap with any of the
@@ -686,6 +759,13 @@ enum {
#define EXT4_EX_NOCACHE 0x40000000
#define EXT4_EX_FORCE_CACHE 0x20000000
#define EXT4_EX_NOFAIL 0x10000000
+/*
+ * ext4_map_query_blocks() uses this filter mask to filter the flags needed to
+ * pass while lookup/querying of on disk extent tree.
+ */
+#define EXT4_EX_QUERY_FILTER (EXT4_EX_NOCACHE | EXT4_EX_FORCE_CACHE |\
+ EXT4_EX_NOFAIL |\
+ EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF)
/*
* Flags used by ext4_free_blocks
@@ -698,59 +778,6 @@ enum {
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
#define EXT4_FREE_BLOCKS_RERESERVE_CLUSTER 0x0040
-/*
- * ioctl commands
- */
-#define EXT4_IOC_GETVERSION _IOR('f', 3, long)
-#define EXT4_IOC_SETVERSION _IOW('f', 4, long)
-#define EXT4_IOC_GETVERSION_OLD FS_IOC_GETVERSION
-#define EXT4_IOC_SETVERSION_OLD FS_IOC_SETVERSION
-#define EXT4_IOC_GETRSVSZ _IOR('f', 5, long)
-#define EXT4_IOC_SETRSVSZ _IOW('f', 6, long)
-#define EXT4_IOC_GROUP_EXTEND _IOW('f', 7, unsigned long)
-#define EXT4_IOC_GROUP_ADD _IOW('f', 8, struct ext4_new_group_input)
-#define EXT4_IOC_MIGRATE _IO('f', 9)
- /* note ioctl 10 reserved for an early version of the FIEMAP ioctl */
- /* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
-#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
-#define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent)
-#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64)
-#define EXT4_IOC_SWAP_BOOT _IO('f', 17)
-#define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18)
-/* ioctl codes 19--39 are reserved for fscrypt */
-#define EXT4_IOC_CLEAR_ES_CACHE _IO('f', 40)
-#define EXT4_IOC_GETSTATE _IOW('f', 41, __u32)
-#define EXT4_IOC_GET_ES_CACHE _IOWR('f', 42, struct fiemap)
-#define EXT4_IOC_CHECKPOINT _IOW('f', 43, __u32)
-
-#define EXT4_IOC_SHUTDOWN _IOR ('X', 125, __u32)
-
-/*
- * Flags for going down operation
- */
-#define EXT4_GOING_FLAGS_DEFAULT 0x0 /* going down */
-#define EXT4_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */
-#define EXT4_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */
-
-/*
- * Flags returned by EXT4_IOC_GETSTATE
- *
- * We only expose to userspace a subset of the state flags in
- * i_state_flags
- */
-#define EXT4_STATE_FLAG_EXT_PRECACHED 0x00000001
-#define EXT4_STATE_FLAG_NEW 0x00000002
-#define EXT4_STATE_FLAG_NEWENTRY 0x00000004
-#define EXT4_STATE_FLAG_DA_ALLOC_CLOSE 0x00000008
-
-/* flags for ioctl EXT4_IOC_CHECKPOINT */
-#define EXT4_IOC_CHECKPOINT_FLAG_DISCARD 0x1
-#define EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT 0x2
-#define EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN 0x4
-#define EXT4_IOC_CHECKPOINT_FLAG_VALID (EXT4_IOC_CHECKPOINT_FLAG_DISCARD | \
- EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT | \
- EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN)
-
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
* ioctl commands in 32 bit emulation
@@ -765,12 +792,6 @@ enum {
#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
#endif
-/*
- * Returned by EXT4_IOC_GET_ES_CACHE as an additional possible flag.
- * It indicates that the entry in extent status cache is for a hole.
- */
-#define EXT4_FIEMAP_EXTENT_HOLE 0x08000000
-
/* Max physical block we can address w/o extents */
#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
@@ -841,15 +862,6 @@ struct ext4_inode {
__le32 i_projid; /* Project ID */
};
-struct move_extent {
- __u32 reserved; /* should be zero */
- __u32 donor_fd; /* donor file descriptor */
- __u64 orig_start; /* logical start offset in block for orig */
- __u64 donor_start; /* logical start offset in block for donor */
- __u64 len; /* block length to be moved */
- __u64 moved_len; /* moved block length */
-};
-
#define EXT4_EPOCH_BITS 2
#define EXT4_EPOCH_MASK ((1 << EXT4_EPOCH_BITS) - 1)
#define EXT4_NSEC_MASK (~0UL << EXT4_EPOCH_BITS)
@@ -891,64 +903,80 @@ struct move_extent {
* affected filesystem before 2242.
*/
-static inline __le32 ext4_encode_extra_time(struct timespec64 *time)
+static inline __le32 ext4_encode_extra_time(struct timespec64 ts)
{
- u32 extra =((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK;
- return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
+ u32 extra = ((ts.tv_sec - (s32)ts.tv_sec) >> 32) & EXT4_EPOCH_MASK;
+ return cpu_to_le32(extra | (ts.tv_nsec << EXT4_EPOCH_BITS));
}
-static inline void ext4_decode_extra_time(struct timespec64 *time,
- __le32 extra)
+static inline struct timespec64 ext4_decode_extra_time(__le32 base,
+ __le32 extra)
{
+ struct timespec64 ts = { .tv_sec = (signed)le32_to_cpu(base) };
+
if (unlikely(extra & cpu_to_le32(EXT4_EPOCH_MASK)))
- time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
- time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
+ ts.tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
+ ts.tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
+ return ts;
}
-#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
+#define EXT4_INODE_SET_XTIME_VAL(xtime, inode, raw_inode, ts) \
do { \
- if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) {\
- (raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \
- (raw_inode)->xtime ## _extra = \
- ext4_encode_extra_time(&(inode)->xtime); \
- } \
- else \
- (raw_inode)->xtime = cpu_to_le32(clamp_t(int32_t, (inode)->xtime.tv_sec, S32_MIN, S32_MAX)); \
+ if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) { \
+ (raw_inode)->xtime = cpu_to_le32((ts).tv_sec); \
+ (raw_inode)->xtime ## _extra = ext4_encode_extra_time(ts); \
+ } else \
+ (raw_inode)->xtime = cpu_to_le32(clamp_t(int32_t, (ts).tv_sec, S32_MIN, S32_MAX)); \
} while (0)
-#define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode) \
-do { \
- if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
- (raw_inode)->xtime = cpu_to_le32((einode)->xtime.tv_sec); \
- if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
- (raw_inode)->xtime ## _extra = \
- ext4_encode_extra_time(&(einode)->xtime); \
+#define EXT4_INODE_SET_ATIME(inode, raw_inode) \
+ EXT4_INODE_SET_XTIME_VAL(i_atime, inode, raw_inode, inode_get_atime(inode))
+
+#define EXT4_INODE_SET_MTIME(inode, raw_inode) \
+ EXT4_INODE_SET_XTIME_VAL(i_mtime, inode, raw_inode, inode_get_mtime(inode))
+
+#define EXT4_INODE_SET_CTIME(inode, raw_inode) \
+ EXT4_INODE_SET_XTIME_VAL(i_ctime, inode, raw_inode, inode_get_ctime(inode))
+
+#define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode) \
+ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
+ EXT4_INODE_SET_XTIME_VAL(xtime, &((einode)->vfs_inode), \
+ raw_inode, (einode)->xtime)
+
+#define EXT4_INODE_GET_XTIME_VAL(xtime, inode, raw_inode) \
+ (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra) ? \
+ ext4_decode_extra_time((raw_inode)->xtime, \
+ (raw_inode)->xtime ## _extra) : \
+ (struct timespec64) { \
+ .tv_sec = (signed)le32_to_cpu((raw_inode)->xtime) \
+ })
+
+#define EXT4_INODE_GET_ATIME(inode, raw_inode) \
+do { \
+ inode_set_atime_to_ts(inode, \
+ EXT4_INODE_GET_XTIME_VAL(i_atime, inode, raw_inode)); \
} while (0)
-#define EXT4_INODE_GET_XTIME(xtime, inode, raw_inode) \
+#define EXT4_INODE_GET_MTIME(inode, raw_inode) \
do { \
- (inode)->xtime.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime); \
- if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) { \
- ext4_decode_extra_time(&(inode)->xtime, \
- raw_inode->xtime ## _extra); \
- } \
- else \
- (inode)->xtime.tv_nsec = 0; \
+ inode_set_mtime_to_ts(inode, \
+ EXT4_INODE_GET_XTIME_VAL(i_mtime, inode, raw_inode)); \
} while (0)
+#define EXT4_INODE_GET_CTIME(inode, raw_inode) \
+do { \
+ inode_set_ctime_to_ts(inode, \
+ EXT4_INODE_GET_XTIME_VAL(i_ctime, inode, raw_inode)); \
+} while (0)
-#define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \
-do { \
- if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
- (einode)->xtime.tv_sec = \
- (signed)le32_to_cpu((raw_inode)->xtime); \
- else \
- (einode)->xtime.tv_sec = 0; \
- if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
- ext4_decode_extra_time(&(einode)->xtime, \
- raw_inode->xtime ## _extra); \
- else \
- (einode)->xtime.tv_nsec = 0; \
+#define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \
+do { \
+ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
+ (einode)->xtime = \
+ EXT4_INODE_GET_XTIME_VAL(xtime, &(einode->vfs_inode), \
+ raw_inode); \
+ else \
+ (einode)->xtime = (struct timespec64){0, 0}; \
} while (0)
#define i_disk_version osd1.linux1.l_i_version
@@ -996,11 +1024,13 @@ do { \
* where the second inode has larger inode number
* than the first
* I_DATA_SEM_QUOTA - Used for quota inodes only
+ * I_DATA_SEM_EA - Used for ea_inodes only
*/
enum {
I_DATA_SEM_NORMAL = 0,
I_DATA_SEM_OTHER,
I_DATA_SEM_QUOTA,
+ I_DATA_SEM_EA
};
@@ -1028,7 +1058,7 @@ struct ext4_inode_info {
/*
* Extended attributes can be read independently of the main file
- * data. Taking i_mutex even when reading would cause contention
+ * data. Taking i_rwsem even when reading would cause contention
* between readers of EAs and writers of regular file data, so
* instead we synchronize on xattr_sem when reading or changing
* EAs.
@@ -1046,6 +1076,8 @@ struct ext4_inode_info {
/* Fast commit related info */
+ /* For tracking dentry create updates */
+ struct list_head i_fc_dilist;
struct list_head i_fc_list; /*
* inodes that need fast commit
* protected by sbi->s_fc_lock.
@@ -1057,14 +1089,16 @@ struct ext4_inode_info {
/* End of lblk range that needs to be committed in this fast commit */
ext4_lblk_t i_fc_lblk_len;
- /* Number of ongoing updates on this inode */
- atomic_t i_fc_updates;
+ spinlock_t i_raw_lock; /* protects updates to the raw inode */
/* Fast commit wait queue for this inode */
wait_queue_head_t i_fc_wait;
- /* Protect concurrent accesses on i_fc_lblk_start, i_fc_lblk_len */
- struct mutex i_fc_lock;
+ /*
+ * Protect concurrent accesses on i_fc_lblk_start, i_fc_lblk_len
+ * and inode's EXT4_FC_STATE_COMMITTING state bit.
+ */
+ spinlock_t i_fc_lock;
/*
* i_disksize keeps track of what the inode size is ON DISK, not
@@ -1097,8 +1131,6 @@ struct ext4_inode_info {
struct inode vfs_inode;
struct jbd2_inode *jinode;
- spinlock_t i_raw_lock; /* protects updates to the raw inode */
-
/*
* File creation time. Its function is same as that of
* struct timespec64 i_{a,c,m}time in the generic inode.
@@ -1107,8 +1139,12 @@ struct ext4_inode_info {
/* mballoc */
atomic_t i_prealloc_active;
- struct list_head i_prealloc_list;
- spinlock_t i_prealloc_lock;
+
+ /* allocation reservation info for delalloc */
+ /* In case of bigalloc, this refer to clusters rather than blocks */
+ unsigned int i_reserved_data_blocks;
+ struct rb_root i_prealloc_node;
+ rwlock_t i_prealloc_lock;
/* extents status tree */
struct ext4_es_tree i_es_tree;
@@ -1119,14 +1155,12 @@ struct ext4_inode_info {
ext4_lblk_t i_es_shrink_lblk; /* Offset where we start searching for
extents to shrink. Protected by
i_es_lock */
+ u64 i_es_seq; /* Change counter for extents.
+ Protected by i_es_lock */
/* ialloc */
ext4_group_t i_last_alloc_group;
- /* allocation reservation info for delalloc */
- /* In case of bigalloc, this refer to clusters rather than blocks */
- unsigned int i_reserved_data_blocks;
-
/* pending cluster reservations for bigalloc file systems */
struct ext4_pending_tree i_pending_tree;
@@ -1141,6 +1175,7 @@ struct ext4_inode_info {
/* quota space reservation, managed internally by quota code */
qsize_t i_reserved_quota;
#endif
+ spinlock_t i_block_reservation_lock;
/* Lock protecting lists below */
spinlock_t i_completed_io_lock;
@@ -1150,9 +1185,6 @@ struct ext4_inode_info {
*/
struct list_head i_rsv_conversion_list;
struct work_struct i_rsv_conversion_work;
- atomic_t i_unwritten; /* Nr. of inflight conversions pending */
-
- spinlock_t i_block_reservation_lock;
/*
* Transactions that contain inode's metadata needed to complete
@@ -1162,13 +1194,21 @@ struct ext4_inode_info {
tid_t i_datasync_tid;
#ifdef CONFIG_QUOTA
- struct dquot *i_dquot[MAXQUOTAS];
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
/* Precomputed uuid+inum+igen checksum for seeding inode checksums */
__u32 i_csum_seed;
kprojid_t i_projid;
+
+#ifdef CONFIG_FS_ENCRYPTION
+ struct fscrypt_inode_info *i_crypt_info;
+#endif
+
+#ifdef CONFIG_FS_VERITY
+ struct fsverity_info *i_verity_info;
+#endif
};
/*
@@ -1254,6 +1294,7 @@ struct ext4_inode_info {
#define EXT4_MOUNT2_MB_OPTIMIZE_SCAN 0x00000080 /* Optimize group
* scanning in mballoc
*/
+#define EXT4_MOUNT2_ABORT 0x00000100 /* Abort filesystem */
#define clear_opt(sb, opt) EXT4_SB(sb)->s_mount_opt &= \
~EXT4_MOUNT_##opt
@@ -1271,15 +1312,13 @@ struct ext4_inode_info {
#define ext4_test_and_set_bit __test_and_set_bit_le
#define ext4_set_bit __set_bit_le
-#define ext4_set_bit_atomic ext2_set_bit_atomic
#define ext4_test_and_clear_bit __test_and_clear_bit_le
#define ext4_clear_bit __clear_bit_le
-#define ext4_clear_bit_atomic ext2_clear_bit_atomic
#define ext4_test_bit test_bit_le
#define ext4_find_next_zero_bit find_next_zero_bit_le
#define ext4_find_next_bit find_next_bit_le
-extern void ext4_set_bits(void *bm, int cur, int len);
+extern void mb_set_bits(void *bm, int cur, int len);
/*
* Maximal mount counts between two filesystem checks
@@ -1298,6 +1337,8 @@ extern void ext4_set_bits(void *bm, int cur, int len);
/* Metadata checksum algorithm codes */
#define EXT4_CRC32C_CHKSUM 1
+#define EXT4_LABEL_MAX 16
+
/*
* Structure of the super block
*/
@@ -1347,7 +1388,7 @@ struct ext4_super_block {
/*60*/ __le32 s_feature_incompat; /* incompatible feature set */
__le32 s_feature_ro_compat; /* readonly-compatible feature set */
/*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */
-/*78*/ char s_volume_name[16]; /* volume name */
+/*78*/ char s_volume_name[EXT4_LABEL_MAX] __nonstring; /* volume name */
/*88*/ char s_last_mounted[64] __nonstring; /* directory where last mounted */
/*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */
/*
@@ -1372,7 +1413,7 @@ struct ext4_super_block {
__le32 s_first_meta_bg; /* First metablock block group */
__le32 s_mkfs_time; /* When the filesystem was created */
__le32 s_jnl_blocks[17]; /* Backup of the journal inode */
- /* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */
+ /* 64bit support valid if EXT4_FEATURE_INCOMPAT_64BIT */
/*150*/ __le32 s_blocks_count_hi; /* Blocks count */
__le32 s_r_blocks_count_hi; /* Reserved blocks count */
__le32 s_free_blocks_count_hi; /* Free blocks count */
@@ -1428,7 +1469,9 @@ struct ext4_super_block {
__le16 s_encoding; /* Filename charset encoding */
__le16 s_encoding_flags; /* Filename charset encoding flags */
__le32 s_orphan_file_inum; /* Inode for tracking orphan inodes */
- __le32 s_reserved[94]; /* Padding to the end of the block */
+ __le16 s_def_resuid_hi;
+ __le16 s_def_resgid_hi;
+ __le32 s_reserved[93]; /* Padding to the end of the block */
__le32 s_checksum; /* crc32c(superblock) */
};
@@ -1436,12 +1479,6 @@ struct ext4_super_block {
#ifdef __KERNEL__
-#ifdef CONFIG_FS_ENCRYPTION
-#define DUMMY_ENCRYPTION_ENABLED(sbi) ((sbi)->s_dummy_enc_policy.policy != NULL)
-#else
-#define DUMMY_ENCRYPTION_ENABLED(sbi) (0)
-#endif
-
/* Number of quota types we support */
#define EXT4_MAXQUOTAS 3
@@ -1515,11 +1552,13 @@ struct ext4_sb_info {
loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
struct buffer_head * s_sbh; /* Buffer containing the super block */
struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
+ /* Array of bh's for the block group descriptors */
struct buffer_head * __rcu *s_group_desc;
unsigned int s_mount_opt;
unsigned int s_mount_opt2;
unsigned long s_mount_flags;
unsigned int s_def_mount_opt;
+ unsigned int s_def_mount_opt2;
ext4_fsblk_t s_sb_block;
atomic64_t s_resv_clusters;
kuid_t s_resuid;
@@ -1557,7 +1596,7 @@ struct ext4_sb_info {
unsigned long s_commit_interval;
u32 s_max_batch_time;
u32 s_min_batch_time;
- struct block_device *s_journal_bdev;
+ struct file *s_journal_bdev_file;
#ifdef CONFIG_QUOTA
/* Names of quota files with journalled quota */
char __rcu *s_qf_names[EXT4_MAXQUOTAS];
@@ -1583,16 +1622,14 @@ struct ext4_sb_info {
unsigned short *s_mb_offsets;
unsigned int *s_mb_maxs;
unsigned int s_group_info_size;
- unsigned int s_mb_free_pending;
- struct list_head s_freed_data_list; /* List of blocks to be freed
+ atomic_t s_mb_free_pending;
+ struct list_head s_freed_data_list[2]; /* List of blocks to be freed
after commit completed */
struct list_head s_discard_list;
struct work_struct s_discard_work;
atomic_t s_retry_alloc_pending;
- struct rb_root s_mb_avg_fragment_size_root;
- rwlock_t s_mb_rb_lock;
- struct list_head *s_mb_largest_free_orders;
- rwlock_t *s_mb_largest_free_orders_locks;
+ struct xarray *s_mb_avg_fragment_size;
+ struct xarray *s_mb_largest_free_orders;
/* tunables */
unsigned long s_stripe;
@@ -1603,28 +1640,32 @@ struct ext4_sb_info {
unsigned int s_mb_stats;
unsigned int s_mb_order2_reqs;
unsigned int s_mb_group_prealloc;
- unsigned int s_mb_max_inode_prealloc;
unsigned int s_max_dir_size_kb;
- /* where last allocation was done - for stream allocation */
- unsigned long s_mb_last_group;
- unsigned long s_mb_last_start;
unsigned int s_mb_prefetch;
unsigned int s_mb_prefetch_limit;
+ unsigned int s_mb_best_avail_max_trim_order;
+ unsigned int s_sb_update_sec;
+ unsigned int s_sb_update_kb;
+
+ /* where last allocation was done - for stream allocation */
+ ext4_group_t *s_mb_last_groups;
+ unsigned int s_mb_nr_global_goals;
/* stats for buddy allocator */
atomic_t s_bal_reqs; /* number of reqs with len > 1 */
atomic_t s_bal_success; /* we found long enough chunks */
atomic_t s_bal_allocated; /* in blocks */
atomic_t s_bal_ex_scanned; /* total extents scanned */
+ atomic_t s_bal_cX_ex_scanned[EXT4_MB_NUM_CRS]; /* total extents scanned */
atomic_t s_bal_groups_scanned; /* number of groups scanned */
atomic_t s_bal_goals; /* goal hits */
+ atomic_t s_bal_stream_goals; /* stream allocation global goal hits */
+ atomic_t s_bal_len_goals; /* len goal hits */
atomic_t s_bal_breaks; /* too long searches */
atomic_t s_bal_2orders; /* 2^order hits */
- atomic_t s_bal_cr0_bad_suggestions;
- atomic_t s_bal_cr1_bad_suggestions;
- atomic64_t s_bal_cX_groups_considered[4];
- atomic64_t s_bal_cX_hits[4];
- atomic64_t s_bal_cX_failed[4]; /* cX loop didn't find blocks */
+ atomic64_t s_bal_cX_groups_considered[EXT4_MB_NUM_CRS];
+ atomic64_t s_bal_cX_hits[EXT4_MB_NUM_CRS];
+ atomic64_t s_bal_cX_failed[EXT4_MB_NUM_CRS]; /* cX loop didn't find blocks */
atomic_t s_mb_buddies_generated; /* number of buddies generated */
atomic64_t s_mb_generation_time;
atomic_t s_mb_lost_chunks;
@@ -1661,16 +1702,18 @@ struct ext4_sb_info {
struct task_struct *s_mmp_tsk;
/* record the last minlen when FITRIM is called. */
- atomic_t s_last_trim_minblks;
+ unsigned long s_last_trim_minblks;
- /* Reference to checksum algorithm driver via cryptoapi */
- struct crypto_shash *s_chksum_driver;
+ /* minimum folio order of a page cache allocation */
+ u16 s_min_folio_order;
+ /* supported maximum folio order, 0 means not supported */
+ u16 s_max_folio_order;
/* Precomputed FS UUID checksum for seeding other checksums */
__u32 s_csum_seed;
/* Reclaim extents from extent status tree */
- struct shrinker s_es_shrinker;
+ struct shrinker *s_es_shrinker;
struct list_head s_es_list; /* List of inodes with reclaimable extents */
long s_es_nr_inode;
struct ext4_es_stats s_es_stats;
@@ -1693,10 +1736,12 @@ struct ext4_sb_info {
/*
* Barrier between writepages ops and changing any inode's JOURNAL_DATA
- * or EXTENTS flag.
+ * or EXTENTS flag or between writepages ops and changing DELALLOC or
+ * DIOREAD_NOLOCK mount options on remount.
*/
struct percpu_rw_semaphore s_writepages_rwsem;
struct dax_device *s_daxdev;
+ u64 s_dax_part_off;
#ifdef CONFIG_EXT4_DEBUG
unsigned long s_simulate_fail;
#endif
@@ -1720,14 +1765,21 @@ struct ext4_sb_info {
const char *s_last_error_func;
time64_t s_last_error_time;
/*
- * If we are in a context where we cannot update error information in
- * the on-disk superblock, we queue this work to do it.
+ * If we are in a context where we cannot update the on-disk
+ * superblock, we queue the work here. This is used to update
+ * the error information in the superblock, and for periodic
+ * updates of the superblock called from the commit callback
+ * function.
*/
- struct work_struct s_error_work;
+ struct work_struct s_sb_upd_work;
- /* Ext4 fast commit stuff */
+ /* Atomic write unit values in bytes */
+ unsigned int s_awu_min;
+ unsigned int s_awu_max;
+
+ /* Ext4 fast commit sub transaction ID */
atomic_t s_fc_subtid;
- atomic_t s_fc_ineligible_updates;
+
/*
* After commit starts, the main queue gets locked, and the further
* updates get added in the staging queue.
@@ -1744,10 +1796,10 @@ struct ext4_sb_info {
* following fields:
* ei->i_fc_list, s_fc_dentry_q, s_fc_q, s_fc_bytes, s_fc_bh.
*/
- spinlock_t s_fc_lock;
+ struct mutex s_fc_lock;
struct buffer_head *s_fc_bh;
struct ext4_fc_stats s_fc_stats;
- u64 s_fc_avg_commit_time;
+ tid_t s_fc_ineligible_tid;
#ifdef CONFIG_EXT4_DEBUG
int s_fc_debug_max_replay;
#endif
@@ -1763,6 +1815,30 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode)
return container_of(inode, struct ext4_inode_info, vfs_inode);
}
+static inline int ext4_writepages_down_read(struct super_block *sb)
+{
+ percpu_down_read(&EXT4_SB(sb)->s_writepages_rwsem);
+ return memalloc_nofs_save();
+}
+
+static inline void ext4_writepages_up_read(struct super_block *sb, int ctx)
+{
+ memalloc_nofs_restore(ctx);
+ percpu_up_read(&EXT4_SB(sb)->s_writepages_rwsem);
+}
+
+static inline int ext4_writepages_down_write(struct super_block *sb)
+{
+ percpu_down_write(&EXT4_SB(sb)->s_writepages_rwsem);
+ return memalloc_nofs_save();
+}
+
+static inline void ext4_writepages_up_write(struct super_block *sb, int ctx)
+{
+ memalloc_nofs_restore(ctx);
+ percpu_up_write(&EXT4_SB(sb)->s_writepages_rwsem);
+}
+
static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
{
return ino == EXT4_ROOT_INO ||
@@ -1770,6 +1846,18 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
}
+static inline int ext4_get_resuid(struct ext4_super_block *es)
+{
+ return le16_to_cpu(es->s_def_resuid) |
+ le16_to_cpu(es->s_def_resuid_hi) << 16;
+}
+
+static inline int ext4_get_resgid(struct ext4_super_block *es)
+{
+ return le16_to_cpu(es->s_def_resgid) |
+ le16_to_cpu(es->s_def_resgid_hi) << 16;
+}
+
/*
* Returns: sbi->field[index]
* Used to access an array element from the following sbi fields which require
@@ -1792,11 +1880,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
*/
enum {
EXT4_MF_MNTDIR_SAMPLED,
- EXT4_MF_FS_ABORTED, /* Fatal error detected */
EXT4_MF_FC_INELIGIBLE, /* Fast commit ineligible */
- EXT4_MF_FC_COMMITTING /* File system underoing a fast
- * commit.
- */
+ EXT4_MF_JOURNAL_DESTROY /* Journal is in process of destroying */
};
static inline void ext4_set_mount_flag(struct super_block *sb, int bit)
@@ -1841,14 +1926,6 @@ static inline bool ext4_simulate_fail(struct super_block *sb,
return false;
}
-static inline void ext4_simulate_fail_bh(struct super_block *sb,
- struct buffer_head *bh,
- unsigned long code)
-{
- if (!IS_ERR(bh) && ext4_simulate_fail(sb, code))
- clear_buffer_uptodate(bh);
-}
-
/*
* Error number codes for s_{first,last}_error_errno
*
@@ -1879,7 +1956,6 @@ static inline void ext4_simulate_fail_bh(struct super_block *sb,
* Inode dynamic state flags
*/
enum {
- EXT4_STATE_JDATA, /* journaled data exists */
EXT4_STATE_NEW, /* inode is newly created */
EXT4_STATE_XATTR, /* has in-inode xattrs */
EXT4_STATE_NO_EXPAND, /* No space for expansion */
@@ -1891,6 +1967,7 @@ enum {
EXT4_STATE_LUSTRE_EA_INODE, /* Lustre-style ea_inode */
EXT4_STATE_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
EXT4_STATE_FC_COMMITTING, /* Fast commit ongoing */
+ EXT4_STATE_FC_FLUSHING_DATA, /* Fast commit flushing data */
EXT4_STATE_ORPHAN_FILE, /* Inode orphaned in orphan file */
};
@@ -1951,6 +2028,16 @@ static inline bool ext4_verity_in_progress(struct inode *inode)
#define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
/*
+ * Check whether the inode is tracked as orphan (either in orphan file or
+ * orphan list).
+ */
+static inline bool ext4_inode_orphan_tracked(struct inode *inode)
+{
+ return ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) ||
+ !list_empty(&EXT4_I(inode)->i_orphan);
+}
+
+/*
* Codes for operating systems
*/
#define EXT4_OS_LINUX 0
@@ -2216,13 +2303,30 @@ extern int ext4_feature_set_ok(struct super_block *sb, int readonly);
/*
* Superblock flags
*/
-#define EXT4_FLAGS_RESIZING 0
-#define EXT4_FLAGS_SHUTDOWN 1
-#define EXT4_FLAGS_BDEV_IS_DAX 2
+enum {
+ EXT4_FLAGS_RESIZING, /* Avoid superblock update and resize race */
+ EXT4_FLAGS_SHUTDOWN, /* Prevent access to the file system */
+ EXT4_FLAGS_BDEV_IS_DAX, /* Current block device support DAX */
+ EXT4_FLAGS_EMERGENCY_RO,/* Emergency read-only due to fs errors */
+};
+
+static inline int ext4_forced_shutdown(struct super_block *sb)
+{
+ return test_bit(EXT4_FLAGS_SHUTDOWN, &EXT4_SB(sb)->s_ext4_flags);
+}
+
+static inline int ext4_emergency_ro(struct super_block *sb)
+{
+ return test_bit(EXT4_FLAGS_EMERGENCY_RO, &EXT4_SB(sb)->s_ext4_flags);
+}
-static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
+static inline int ext4_emergency_state(struct super_block *sb)
{
- return test_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
+ if (unlikely(ext4_forced_shutdown(sb)))
+ return -EIO;
+ if (unlikely(ext4_emergency_ro(sb)))
+ return -EROFS;
+ return 0;
}
/*
@@ -2256,10 +2360,19 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
#define EXT4_DEFM_NODELALLOC 0x0800
/*
- * Default journal batch times
+ * Default journal batch times and ioprio.
*/
#define EXT4_DEF_MIN_BATCH_TIME 0
#define EXT4_DEF_MAX_BATCH_TIME 15000 /* 15ms */
+#define EXT4_DEF_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
+
+
+/*
+ * Default values for superblock update
+ */
+#define EXT4_DEF_SB_UPDATE_INTERVAL_SEC (3600) /* seconds (1 hour) */
+#define EXT4_DEF_SB_UPDATE_INTERVAL_KB (16384) /* kilobytes (16MB) */
+
/*
* Minimum number of groups in a flexgroup before we separate out
@@ -2271,6 +2384,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
* Structure of a directory entry
*/
#define EXT4_NAME_LEN 255
+/*
+ * Base length of the ext4 directory entry excluding the name length
+ */
+#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
struct ext4_dir_entry {
__le32 inode; /* Inode number */
@@ -2311,9 +2428,9 @@ struct ext4_dir_entry_2 {
((struct ext4_dir_entry_hash *) \
(((void *)(entry)) + \
((8 + (entry)->name_len + EXT4_DIR_ROUND) & ~EXT4_DIR_ROUND)))
-#define EXT4_DIRENT_HASH(entry) le32_to_cpu(EXT4_DIRENT_HASHES(de)->hash)
+#define EXT4_DIRENT_HASH(entry) le32_to_cpu(EXT4_DIRENT_HASHES(entry)->hash)
#define EXT4_DIRENT_MINOR_HASH(entry) \
- le32_to_cpu(EXT4_DIRENT_HASHES(de)->minor_hash)
+ le32_to_cpu(EXT4_DIRENT_HASHES(entry)->minor_hash)
static inline bool ext4_hash_in_dirent(const struct inode *inode)
{
@@ -2379,29 +2496,19 @@ static inline unsigned int ext4_dir_rec_len(__u8 name_len,
return (rec_len & ~EXT4_DIR_ROUND);
}
-/*
- * If we ever get support for fs block sizes > page_size, we'll need
- * to remove the #if statements in the next two functions...
- */
static inline unsigned int
ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
{
unsigned len = le16_to_cpu(dlen);
-#if (PAGE_SIZE >= 65536)
if (len == EXT4_MAX_REC_LEN || len == 0)
return blocksize;
return (len & 65532) | ((len & 3) << 16);
-#else
- return len;
-#endif
}
static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
{
- if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
- BUG();
-#if (PAGE_SIZE >= 65536)
+ BUG_ON((len > blocksize) || (blocksize > (1 << 18)) || (len & 3));
if (len < 65536)
return cpu_to_le16(len);
if (len == blocksize) {
@@ -2411,9 +2518,6 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
return cpu_to_le16(0);
}
return cpu_to_le16((len & 65532) | ((len >> 16) & 3));
-#else
- return cpu_to_le16(len);
-#endif
}
/*
@@ -2436,23 +2540,11 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
#define DX_HASH_HALF_MD4_UNSIGNED 4
#define DX_HASH_TEA_UNSIGNED 5
#define DX_HASH_SIPHASH 6
+#define DX_HASH_LAST DX_HASH_SIPHASH
-static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
- const void *address, unsigned int length)
+static inline u32 ext4_chksum(u32 crc, const void *address, unsigned int length)
{
- struct {
- struct shash_desc shash;
- char ctx[4];
- } desc;
-
- BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx));
-
- desc.shash.tfm = sbi->s_chksum_driver;
- *(u32 *)desc.ctx = crc;
-
- BUG_ON(crypto_shash_update(&desc.shash, address, length));
-
- return *(u32 *)desc.ctx;
+ return crc32c(crc, address, length);
}
#ifdef __KERNEL__
@@ -2484,8 +2576,8 @@ struct ext4_filename {
#ifdef CONFIG_FS_ENCRYPTION
struct fscrypt_str crypto_buf;
#endif
-#ifdef CONFIG_UNICODE
- struct fscrypt_str cf_name;
+#if IS_ENABLED(CONFIG_UNICODE)
+ struct qstr cf_name;
#endif
};
@@ -2527,6 +2619,8 @@ struct dir_private_info {
__u32 curr_hash;
__u32 curr_minor_hash;
__u32 next_hash;
+ u64 cookie;
+ bool initialized;
};
/* calculate the first block number of the group */
@@ -2665,16 +2759,16 @@ struct mmpd_data {
/* bitmap.c */
extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
-void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+void ext4_inode_bitmap_csum_set(struct super_block *sb,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz);
-int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+ struct buffer_head *bh);
+int ext4_inode_bitmap_csum_verify(struct super_block *sb,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz);
-void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+ struct buffer_head *bh);
+void ext4_block_bitmap_csum_set(struct super_block *sb,
struct ext4_group_desc *gdp,
struct buffer_head *bh);
-int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+int ext4_block_bitmap_csum_verify(struct super_block *sb,
struct ext4_group_desc *gdp,
struct buffer_head *bh);
@@ -2686,10 +2780,6 @@ extern void ext4_get_group_no_and_offset(struct super_block *sb,
extern ext4_group_t ext4_get_group_number(struct super_block *sb,
ext4_fsblk_t block);
-extern unsigned int ext4_block_group(struct super_block *sb,
- ext4_fsblk_t blocknr);
-extern ext4_grpblk_t ext4_block_group_offset(struct super_block *sb,
- ext4_fsblk_t blocknr);
extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group);
extern unsigned long ext4_bg_num_gdb(struct super_block *sb,
ext4_group_t group);
@@ -2701,10 +2791,11 @@ extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
extern int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
s64 nclusters, unsigned int flags);
extern ext4_fsblk_t ext4_count_free_clusters(struct super_block *);
-extern void ext4_check_blocks_bitmap(struct super_block *);
extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
ext4_group_t block_group,
struct buffer_head ** bh);
+extern struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
+ ext4_group_t group);
extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb,
@@ -2720,96 +2811,54 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
struct ext4_group_desc *gdp);
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
extern int ext4_fname_setup_ci_filename(struct inode *dir,
- const struct qstr *iname,
- struct ext4_filename *fname);
-#endif
+ const struct qstr *iname,
+ struct ext4_filename *fname);
-#ifdef CONFIG_FS_ENCRYPTION
-static inline void ext4_fname_from_fscrypt_name(struct ext4_filename *dst,
- const struct fscrypt_name *src)
+static inline void ext4_fname_free_ci_filename(struct ext4_filename *fname)
{
- memset(dst, 0, sizeof(*dst));
-
- dst->usr_fname = src->usr_fname;
- dst->disk_name = src->disk_name;
- dst->hinfo.hash = src->hash;
- dst->hinfo.minor_hash = src->minor_hash;
- dst->crypto_buf = src->crypto_buf;
+ kfree(fname->cf_name.name);
+ fname->cf_name.name = NULL;
}
-
-static inline int ext4_fname_setup_filename(struct inode *dir,
- const struct qstr *iname,
- int lookup,
- struct ext4_filename *fname)
+#else
+static inline int ext4_fname_setup_ci_filename(struct inode *dir,
+ const struct qstr *iname,
+ struct ext4_filename *fname)
{
- struct fscrypt_name name;
- int err;
-
- err = fscrypt_setup_filename(dir, iname, lookup, &name);
- if (err)
- return err;
-
- ext4_fname_from_fscrypt_name(fname, &name);
-
-#ifdef CONFIG_UNICODE
- err = ext4_fname_setup_ci_filename(dir, iname, fname);
-#endif
- return err;
+ return 0;
}
-static inline int ext4_fname_prepare_lookup(struct inode *dir,
- struct dentry *dentry,
- struct ext4_filename *fname)
+static inline void ext4_fname_free_ci_filename(struct ext4_filename *fname)
{
- struct fscrypt_name name;
- int err;
-
- err = fscrypt_prepare_lookup(dir, dentry, &name);
- if (err)
- return err;
+}
+#endif
- ext4_fname_from_fscrypt_name(fname, &name);
+/* ext4 encryption related stuff goes here crypto.c */
+#ifdef CONFIG_FS_ENCRYPTION
+extern const struct fscrypt_operations ext4_cryptops;
-#ifdef CONFIG_UNICODE
- err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname);
-#endif
- return err;
-}
+int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,
+ int lookup, struct ext4_filename *fname);
-static inline void ext4_fname_free_filename(struct ext4_filename *fname)
-{
- struct fscrypt_name name;
+int ext4_fname_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ struct ext4_filename *fname);
- name.crypto_buf = fname->crypto_buf;
- fscrypt_free_filename(&name);
+void ext4_fname_free_filename(struct ext4_filename *fname);
- fname->crypto_buf.name = NULL;
- fname->usr_fname = NULL;
- fname->disk_name.name = NULL;
+int ext4_ioctl_get_encryption_pwsalt(struct file *filp, void __user *arg);
-#ifdef CONFIG_UNICODE
- kfree(fname->cf_name.name);
- fname->cf_name.name = NULL;
-#endif
-}
#else /* !CONFIG_FS_ENCRYPTION */
static inline int ext4_fname_setup_filename(struct inode *dir,
const struct qstr *iname,
int lookup,
struct ext4_filename *fname)
{
- int err = 0;
fname->usr_fname = iname;
fname->disk_name.name = (unsigned char *) iname->name;
fname->disk_name.len = iname->len;
-#ifdef CONFIG_UNICODE
- err = ext4_fname_setup_ci_filename(dir, iname, fname);
-#endif
-
- return err;
+ return ext4_fname_setup_ci_filename(dir, iname, fname);
}
static inline int ext4_fname_prepare_lookup(struct inode *dir,
@@ -2821,10 +2870,13 @@ static inline int ext4_fname_prepare_lookup(struct inode *dir,
static inline void ext4_fname_free_filename(struct ext4_filename *fname)
{
-#ifdef CONFIG_UNICODE
- kfree(fname->cf_name.name);
- fname->cf_name.name = NULL;
-#endif
+ ext4_fname_free_ci_filename(fname);
+}
+
+static inline int ext4_ioctl_get_encryption_pwsalt(struct file *filp,
+ void __user *arg)
+{
+ return -EOPNOTSUPP;
}
#endif /* !CONFIG_FS_ENCRYPTION */
@@ -2842,8 +2894,7 @@ extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
struct ext4_dir_entry_2 *dirent,
struct fscrypt_str *ent_name);
extern void ext4_htree_free_dir_info(struct dir_private_info *p);
-extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
- struct buffer_head *bh,
+extern int ext4_find_dest_de(struct inode *dir, struct buffer_head *bh,
void *buf, int buf_size,
struct ext4_filename *fname,
struct ext4_dir_entry_2 **dest_de);
@@ -2883,7 +2934,7 @@ extern int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
/* ialloc.c */
extern int ext4_mark_inode_used(struct super_block *sb, int ino);
-extern struct inode *__ext4_new_inode(struct user_namespace *, handle_t *,
+extern struct inode *__ext4_new_inode(struct mnt_idmap *, handle_t *,
struct inode *, umode_t,
const struct qstr *qstr, __u32 goal,
uid_t *owner, __u32 i_flags,
@@ -2891,11 +2942,11 @@ extern struct inode *__ext4_new_inode(struct user_namespace *, handle_t *,
int nblocks);
#define ext4_new_inode(handle, dir, mode, qstr, goal, owner, i_flags) \
- __ext4_new_inode(&init_user_ns, (handle), (dir), (mode), (qstr), \
+ __ext4_new_inode(&nop_mnt_idmap, (handle), (dir), (mode), (qstr), \
(goal), (owner), i_flags, 0, 0, 0)
-#define ext4_new_inode_start_handle(mnt_userns, dir, mode, qstr, goal, owner, \
+#define ext4_new_inode_start_handle(idmap, dir, mode, qstr, goal, owner, \
type, nblocks) \
- __ext4_new_inode((mnt_userns), NULL, (dir), (mode), (qstr), (goal), (owner), \
+ __ext4_new_inode((idmap), NULL, (dir), (mode), (qstr), (goal), (owner), \
0, (type), __LINE__, (nblocks))
@@ -2903,7 +2954,6 @@ extern void ext4_free_inode(handle_t *, struct inode *);
extern struct inode * ext4_orphan_get(struct super_block *, unsigned long);
extern unsigned long ext4_count_free_inodes(struct super_block *);
extern unsigned long ext4_count_dirs(struct super_block *);
-extern void ext4_check_inodes_bitmap(struct super_block *);
extern void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
extern int ext4_init_inode_table(struct super_block *sb,
ext4_group_t group, int barrier);
@@ -2925,29 +2975,26 @@ void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
struct dentry *dentry);
void ext4_fc_track_create(handle_t *handle, struct dentry *dentry);
void ext4_fc_track_inode(handle_t *handle, struct inode *inode);
-void ext4_fc_mark_ineligible(struct super_block *sb, int reason);
-void ext4_fc_start_ineligible(struct super_block *sb, int reason);
-void ext4_fc_stop_ineligible(struct super_block *sb);
-void ext4_fc_start_update(struct inode *inode);
-void ext4_fc_stop_update(struct inode *inode);
+void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle);
void ext4_fc_del(struct inode *inode);
bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t block);
void ext4_fc_replay_cleanup(struct super_block *sb);
int ext4_fc_commit(journal_t *journal, tid_t commit_tid);
int __init ext4_fc_init_dentry_cache(void);
+void ext4_fc_destroy_dentry_cache(void);
+int ext4_fc_record_regions(struct super_block *sb, int ino,
+ ext4_lblk_t lblk, ext4_fsblk_t pblk,
+ int len, int replay);
/* mballoc.c */
extern const struct seq_operations ext4_mb_seq_groups_ops;
extern const struct seq_operations ext4_mb_seq_structs_summary_ops;
-extern long ext4_mb_stats;
-extern long ext4_mb_max_to_scan;
extern int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset);
extern int ext4_mb_init(struct super_block *);
-extern int ext4_mb_release(struct super_block *);
+extern void ext4_mb_release(struct super_block *);
extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
struct ext4_allocation_request *, int *);
-extern int ext4_mb_reserve_blocks(struct super_block *, int);
-extern void ext4_discard_preallocations(struct inode *, unsigned int);
+extern void ext4_discard_preallocations(struct inode *);
extern int __init ext4_init_mballoc(void);
extern void ext4_exit_mballoc(void);
extern ext4_group_t ext4_mb_prefetch(struct super_block *sb,
@@ -2968,12 +3015,17 @@ extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
extern void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid);
extern void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
- int len, int state);
+ int len, bool state);
+static inline bool ext4_mb_cr_expensive(enum criteria cr)
+{
+ return cr >= CR_GOAL_LEN_SLOW;
+}
/* inode.c */
void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei);
int ext4_inode_is_fast_symlink(struct inode *inode);
+void ext4_check_map_extents_env(struct inode *inode);
struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
@@ -2994,13 +3046,16 @@ int ext4_walk_page_buffers(handle_t *handle,
struct buffer_head *bh));
int do_journal_get_write_access(handle_t *handle, struct inode *inode,
struct buffer_head *bh);
+void ext4_set_inode_mapping_order(struct inode *inode);
#define FALL_BACK_TO_NONDELALLOC 1
#define CONVERT_INLINE_DATA 2
typedef enum {
EXT4_IGET_NORMAL = 0,
EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */
- EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */
+ EXT4_IGET_HANDLE = 0x0002, /* Inode # is from a handle */
+ EXT4_IGET_BAD = 0x0004, /* Allow to iget a bad inode */
+ EXT4_IGET_EA_INODE = 0x0008 /* Inode should contain an EA value */
} ext4_iget_flags;
extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
@@ -3011,15 +3066,15 @@ extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
__ext4_iget((sb), (ino), (flags), __func__, __LINE__)
extern int ext4_write_inode(struct inode *, struct writeback_control *);
-extern int ext4_setattr(struct user_namespace *, struct dentry *,
+extern int ext4_setattr(struct mnt_idmap *, struct dentry *,
struct iattr *);
-extern int ext4_getattr(struct user_namespace *, const struct path *,
+extern u32 ext4_dio_alignment(struct inode *inode);
+extern int ext4_getattr(struct mnt_idmap *, const struct path *,
struct kstat *, u32, unsigned int);
extern void ext4_evict_inode(struct inode *);
extern void ext4_clear_inode(struct inode *);
-extern int ext4_file_getattr(struct user_namespace *, const struct path *,
+extern int ext4_file_getattr(struct mnt_idmap *, const struct path *,
struct kstat *, u32, unsigned int);
-extern int ext4_sync_inode(handle_t *, struct inode *);
extern void ext4_dirty_inode(struct inode *, int);
extern int ext4_change_inode_journal_flag(struct inode *, int);
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
@@ -3029,12 +3084,17 @@ extern int ext4_inode_attach_jinode(struct inode *inode);
extern int ext4_can_truncate(struct inode *inode);
extern int ext4_truncate(struct inode *);
extern int ext4_break_layouts(struct inode *);
-extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
+extern int ext4_truncate_page_cache_block_range(struct inode *inode,
+ loff_t start, loff_t end);
+extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
extern void ext4_set_inode_flags(struct inode *, bool init);
extern int ext4_alloc_da_blocks(struct inode *inode);
extern void ext4_set_aops(struct inode *inode);
-extern int ext4_writepage_trans_blocks(struct inode *);
+extern int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode);
extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
+extern int ext4_chunk_trans_extent(struct inode *inode, int nrblocks);
+extern int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
+ int pextents);
extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
loff_t lstart, loff_t lend);
extern vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf);
@@ -3046,6 +3106,17 @@ extern void ext4_da_update_reserve_space(struct inode *inode,
extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
ext4_fsblk_t pblk, ext4_lblk_t len);
+static inline bool is_special_ino(struct super_block *sb, unsigned long ino)
+{
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+ return (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) ||
+ ino == le32_to_cpu(es->s_usr_quota_inum) ||
+ ino == le32_to_cpu(es->s_grp_quota_inum) ||
+ ino == le32_to_cpu(es->s_prj_quota_inum) ||
+ ino == le32_to_cpu(es->s_orphan_file_inum);
+}
+
/* indirect.c */
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
@@ -3057,10 +3128,12 @@ extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
-int ext4_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa);
-int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+int ext4_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa);
+int ext4_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
extern void ext4_reset_inode_seed(struct inode *inode);
+int ext4_update_overhead(struct super_block *sb, bool force);
+int ext4_force_shutdown(struct super_block *sb, u32 flags);
/* migrate.c */
extern int ext4_ext_migrate(struct inode *);
@@ -3096,20 +3169,26 @@ extern int ext4_group_extend(struct super_block *sb,
struct ext4_super_block *es,
ext4_fsblk_t n_blocks_count);
extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
+extern unsigned int ext4_list_backups(struct super_block *sb,
+ unsigned int *three, unsigned int *five,
+ unsigned int *seven);
/* super.c */
extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
- sector_t block, int op_flags);
+ sector_t block, blk_opf_t op_flags);
extern struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
sector_t block);
-extern void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
- bh_end_io_t *end_io);
-extern int ext4_read_bh(struct buffer_head *bh, int op_flags,
- bh_end_io_t *end_io);
-extern int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait);
+extern struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb,
+ sector_t block);
+extern void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags,
+ bh_end_io_t *end_io, bool simu_fail);
+extern int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
+ bh_end_io_t *end_io, bool simu_fail);
+extern int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
extern void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block);
extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
extern int ext4_calculate_overhead(struct super_block *sb);
+extern __le32 ext4_superblock_csum(struct ext4_super_block *es);
extern void ext4_superblock_csum_set(struct super_block *sb);
extern int ext4_alloc_flex_bg_array(struct super_block *sb,
ext4_group_t ngroup);
@@ -3118,6 +3197,8 @@ extern const char *ext4_decode_error(struct super_block *sb, int errno,
extern void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
ext4_group_t block_group,
unsigned int flags);
+extern unsigned int ext4_num_base_meta_blocks(struct super_block *sb,
+ ext4_group_t block_group);
extern __printf(7, 8)
void __ext4_error(struct super_block *, const char *, unsigned int, bool,
@@ -3277,18 +3358,10 @@ extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group,
extern int ext4_register_li_request(struct super_block *sb,
ext4_group_t first_not_zeroed);
-static inline int ext4_has_metadata_csum(struct super_block *sb)
-{
- WARN_ON_ONCE(ext4_has_feature_metadata_csum(sb) &&
- !EXT4_SB(sb)->s_chksum_driver);
-
- return ext4_has_feature_metadata_csum(sb) &&
- (EXT4_SB(sb)->s_chksum_driver != NULL);
-}
-
static inline int ext4_has_group_desc_csum(struct super_block *sb)
{
- return ext4_has_feature_gdt_csum(sb) || ext4_has_metadata_csum(sb);
+ return ext4_has_feature_gdt_csum(sb) ||
+ ext4_has_feature_metadata_csum(sb);
}
#define ext4_read_incompat_64bit_val(es, name) \
@@ -3349,19 +3422,6 @@ static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
raw_inode->i_size_high = cpu_to_le32(i_size >> 32);
}
-static inline
-struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
- ext4_group_t group)
-{
- struct ext4_group_info **grp_info;
- long indexv, indexh;
- BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
- indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
- indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
- grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
- return grp_info[indexh];
-}
-
/*
* Reading s_groups_count requires using smp_rmb() afterwards. See
* the locking protocol documented in the comments of ext4_group_add()
@@ -3386,6 +3446,13 @@ static inline unsigned int ext4_flex_bg_size(struct ext4_sb_info *sbi)
return 1 << sbi->s_log_groups_per_flex;
}
+static inline loff_t ext4_get_maxbytes(struct inode *inode)
+{
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ return inode->i_sb->s_maxbytes;
+ return EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
+}
+
#define ext4_std_error(sb, errno) \
do { \
if ((errno)) \
@@ -3402,7 +3469,7 @@ do { \
#define EXT4_FREECLUSTERS_WATERMARK 0
#endif
-/* Update i_disksize. Requires i_mutex to avoid races with truncate */
+/* Update i_disksize. Requires i_rwsem to avoid races with truncate */
static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
{
WARN_ON_ONCE(S_ISREG(inode->i_mode) &&
@@ -3413,7 +3480,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
up_write(&EXT4_I(inode)->i_data_sem);
}
-/* Update i_size, i_disksize. Requires i_mutex to avoid races with truncate */
+/* Update i_size, i_disksize. Requires i_rwsem to avoid races with truncate */
static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
{
int changed = 0;
@@ -3441,6 +3508,8 @@ struct ext4_group_info {
ext4_grpblk_t bb_first_free; /* first free block */
ext4_grpblk_t bb_free; /* total free blocks */
ext4_grpblk_t bb_fragments; /* nr of freespace fragments */
+ int bb_avg_fragment_size_order; /* order of average
+ fragment in BG */
ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */
ext4_group_t bb_group; /* Group number */
struct list_head bb_prealloc_list;
@@ -3448,8 +3517,6 @@ struct ext4_group_info {
void *bb_bitmap;
#endif
struct rw_semaphore alloc_sem;
- struct rb_node bb_avg_fragment_size_rb;
- struct list_head bb_largest_free_order_node;
ext4_grpblk_t bb_counters[]; /* Nr of free power-of-two-block
* regions, index is order.
* bb_counters[3] = 5 means
@@ -3500,23 +3567,28 @@ static inline int ext4_fs_is_busy(struct ext4_sb_info *sbi)
return (atomic_read(&sbi->s_lock_busy) > EXT4_CONTENTION_THRESHOLD);
}
+static inline bool ext4_try_lock_group(struct super_block *sb, ext4_group_t group)
+{
+ if (!spin_trylock(ext4_group_lock_ptr(sb, group)))
+ return false;
+ /*
+ * We're able to grab the lock right away, so drop the lock
+ * contention counter.
+ */
+ atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
+ return true;
+}
+
static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
{
- spinlock_t *lock = ext4_group_lock_ptr(sb, group);
- if (spin_trylock(lock))
- /*
- * We're able to grab the lock right away, so drop the
- * lock contention counter.
- */
- atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
- else {
+ if (!ext4_try_lock_group(sb, group)) {
/*
* The lock is busy, so bump the contention counter,
* and then wait on the spin lock.
*/
atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, 1,
EXT4_MAX_CONTENTION);
- spin_lock(lock);
+ spin_lock(ext4_group_lock_ptr(sb, group));
}
}
@@ -3570,30 +3642,21 @@ extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
/* inline.c */
extern int ext4_get_max_inline_size(struct inode *inode);
extern int ext4_find_inline_data_nolock(struct inode *inode);
-extern int ext4_init_inline_data(handle_t *handle, struct inode *inode,
- unsigned int len);
extern int ext4_destroy_inline_data(handle_t *handle, struct inode *inode);
+extern void ext4_update_final_de(void *de_buf, int old_size, int new_size);
-extern int ext4_readpage_inline(struct inode *inode, struct page *page);
+int ext4_readpage_inline(struct inode *inode, struct folio *folio);
extern int ext4_try_to_write_inline_data(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
- unsigned flags,
- struct page **pagep);
-extern int ext4_write_inline_data_end(struct inode *inode,
- loff_t pos, unsigned len,
- unsigned copied,
- struct page *page);
-extern struct buffer_head *
-ext4_journalled_write_inline_data(struct inode *inode,
- unsigned len,
- struct page *page);
-extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
- struct inode *inode,
- loff_t pos, unsigned len,
- unsigned flags,
- struct page **pagep,
- void **fsdata);
+ struct folio **foliop);
+int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
+ unsigned copied, struct folio *folio);
+extern int ext4_generic_write_inline_data(struct address_space *mapping,
+ struct inode *inode,
+ loff_t pos, unsigned len,
+ struct folio **foliop,
+ void **fsdata, bool da);
extern int ext4_try_add_inline_entry(handle_t *handle,
struct ext4_filename *fname,
struct inode *dir, struct inode *inode);
@@ -3621,9 +3684,7 @@ extern bool empty_inline_dir(struct inode *dir, int *has_inline_data);
extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
struct ext4_dir_entry_2 **parent_de,
int *retval);
-extern int ext4_inline_data_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo,
- int *has_inline, __u64 start, __u64 len);
+extern void *ext4_read_inline_link(struct inode *inode);
struct iomap;
extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap);
@@ -3642,16 +3703,16 @@ static inline int ext4_has_inline_data(struct inode *inode)
extern const struct inode_operations ext4_dir_inode_operations;
extern const struct inode_operations ext4_special_inode_operations;
extern struct dentry *ext4_get_parent(struct dentry *child);
-extern struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
- struct ext4_dir_entry_2 *de,
- int blocksize, int csum_size,
- unsigned int parent_ino, int dotdot_real_len);
+extern int ext4_init_dirblock(handle_t *handle, struct inode *inode,
+ struct buffer_head *dir_block,
+ unsigned int parent_ino, void *inline_buf,
+ int inline_size);
extern void ext4_initialize_dirent_tail(struct buffer_head *bh,
unsigned int blocksize);
extern int ext4_handle_dirty_dirblock(handle_t *handle, struct inode *inode,
struct buffer_head *bh);
-extern int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name,
- struct inode *inode);
+extern int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
+ struct inode *inode, struct dentry *dentry);
extern int __ext4_link(struct inode *dir, struct inode *inode,
struct dentry *dentry);
@@ -3675,7 +3736,7 @@ static inline void ext4_set_de_type(struct super_block *sb,
/* readpages.c */
extern int ext4_mpage_readpages(struct inode *inode,
- struct readahead_control *rac, struct page *page);
+ struct readahead_control *rac, struct folio *folio);
extern int __init ext4_init_post_read_processing(void);
extern void ext4_exit_post_read_processing(void);
@@ -3701,6 +3762,9 @@ extern int ext4_inode_block_valid(struct inode *inode,
unsigned int count);
extern int ext4_check_blockref(const char *, unsigned int,
struct inode *, __le32 *, unsigned int);
+extern int ext4_sb_block_valid(struct super_block *sb, struct inode *inode,
+ ext4_fsblk_t start_blk, unsigned int count);
+
/* extents.c */
struct ext4_ext_path;
@@ -3725,6 +3789,8 @@ extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
loff_t offset, ssize_t len);
+extern int ext4_convert_unwritten_extents_atomic(handle_t *handle,
+ struct inode *inode, loff_t offset, ssize_t len);
extern int ext4_convert_unwritten_io_end_vec(handle_t *handle,
ext4_io_end_t *io_end);
extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
@@ -3732,13 +3798,14 @@ extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
int num,
struct ext4_ext_path *path);
-extern int ext4_ext_insert_extent(handle_t *, struct inode *,
- struct ext4_ext_path **,
- struct ext4_extent *, int);
+extern struct ext4_ext_path *ext4_ext_insert_extent(
+ handle_t *handle, struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_extent *newext, int gb_flags);
extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
- struct ext4_ext_path **,
+ struct ext4_ext_path *,
int flags);
-extern void ext4_ext_drop_refs(struct ext4_ext_path *);
+extern void ext4_free_ext_path(struct ext4_ext_path *);
extern int ext4_ext_check_inode(struct inode *inode);
extern ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path);
extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -3782,10 +3849,8 @@ extern void ext4_io_submit_init(struct ext4_io_submit *io,
struct writeback_control *wbc);
extern void ext4_end_io_rsv_work(struct work_struct *work);
extern void ext4_io_submit(struct ext4_io_submit *io);
-extern int ext4_bio_write_page(struct ext4_io_submit *io,
- struct page *page,
- int len,
- bool keep_towrite);
+int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *page,
+ size_t len);
extern struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end);
extern struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end);
@@ -3828,36 +3893,19 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
}
-#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
-
-/* For ioend & aio unwritten conversion wait queues */
-#define EXT4_WQ_HASH_SZ 37
-#define ext4_ioend_wq(v) (&ext4__ioend_wq[((unsigned long)(v)) %\
- EXT4_WQ_HASH_SZ])
-extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
-
extern int ext4_resize_begin(struct super_block *sb);
-extern void ext4_resize_end(struct super_block *sb);
+extern int ext4_resize_end(struct super_block *sb, bool update_backups);
-static inline void ext4_set_io_unwritten_flag(struct inode *inode,
- struct ext4_io_end *io_end)
+static inline void ext4_set_io_unwritten_flag(struct ext4_io_end *io_end)
{
- if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN))
io_end->flag |= EXT4_IO_END_UNWRITTEN;
- atomic_inc(&EXT4_I(inode)->i_unwritten);
- }
}
static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
{
- struct inode *inode = io_end->inode;
-
- if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
+ if (io_end->flag & EXT4_IO_END_UNWRITTEN)
io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
- /* Wake up anyone waiting on unwritten extent conversion */
- if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
- wake_up_all(ext4_ioend_wq(inode));
- }
}
extern const struct iomap_ops ext4_iomap_ops;
@@ -3877,6 +3925,17 @@ static inline int ext4_buffer_uptodate(struct buffer_head *bh)
return buffer_uptodate(bh);
}
+static inline bool ext4_inode_can_atomic_write(struct inode *inode)
+{
+
+ return S_ISREG(inode->i_mode) &&
+ ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
+ EXT4_SB(inode->i_sb)->s_awu_min > 0;
+}
+
+extern int ext4_block_write_begin(handle_t *handle, struct folio *folio,
+ loff_t pos, unsigned len,
+ get_block_t *get_block);
#endif /* __KERNEL__ */
#define EFSBADCRC EBADMSG /* Bad CRC detected */
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 26435f3a3094..c484125d963f 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -31,13 +31,6 @@
#define CHECK_BINSEARCH__
/*
- * If EXT_STATS is defined then stats numbers are collected.
- * These number will be displayed at umount time.
- */
-#define EXT_STATS_
-
-
-/*
* ext4_inode has i_block array (60 bytes total).
* The first 12 bytes store ext4_extent_header;
* the remainder stores an array of ext4_extent.
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 6def7339056d..05e5946ed9b3 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -63,15 +63,18 @@ static void ext4_put_nojournal(handle_t *handle)
*/
static int ext4_journal_check_start(struct super_block *sb)
{
+ int ret;
journal_t *journal;
might_sleep();
- if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
- return -EIO;
+ ret = ext4_emergency_state(sb);
+ if (unlikely(ret))
+ return ret;
- if (sb_rdonly(sb))
+ if (WARN_ON_ONCE(sb_rdonly(sb)))
return -EROFS;
+
WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
journal = EXT4_SB(sb)->s_journal;
/*
@@ -86,15 +89,21 @@ static int ext4_journal_check_start(struct super_block *sb)
return 0;
}
-handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
+handle_t *__ext4_journal_start_sb(struct inode *inode,
+ struct super_block *sb, unsigned int line,
int type, int blocks, int rsv_blocks,
int revoke_creds)
{
journal_t *journal;
int err;
-
- trace_ext4_journal_start(sb, blocks, rsv_blocks, revoke_creds,
- _RET_IP_);
+ if (inode)
+ trace_ext4_journal_start_inode(inode, blocks, rsv_blocks,
+ revoke_creds, type,
+ _RET_IP_);
+ else
+ trace_ext4_journal_start_sb(sb, blocks, rsv_blocks,
+ revoke_creds, type,
+ _RET_IP_);
err = ext4_journal_check_start(sb);
if (err < 0)
return ERR_PTR(err);
@@ -162,6 +171,8 @@ int __ext4_journal_ensure_credits(handle_t *handle, int check_cred,
{
if (!ext4_handle_valid(handle))
return 0;
+ if (is_handle_aborted(handle))
+ return -EROFS;
if (jbd2_handle_buffer_credits(handle) >= check_cred &&
handle->h_revoke_credits >= revoke_cred)
return 0;
@@ -197,7 +208,7 @@ static void ext4_journal_abort_handle(const char *caller, unsigned int line,
static void ext4_check_bdev_write_error(struct super_block *sb)
{
- struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
+ struct address_space *mapping = sb->s_bdev->bd_mapping;
struct ext4_sb_info *sbi = EXT4_SB(sb);
int err;
@@ -226,9 +237,6 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
might_sleep();
- if (bh->b_bdev->bd_super)
- ext4_check_bdev_write_error(bh->b_bdev->bd_super);
-
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_write_access(handle, bh);
if (err) {
@@ -236,8 +244,10 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
handle, err);
return err;
}
- }
- if (trigger_type == EXT4_JTR_NONE || !ext4_has_metadata_csum(sb))
+ } else
+ ext4_check_bdev_write_error(sb);
+ if (trigger_type == EXT4_JTR_NONE ||
+ !ext4_has_feature_metadata_csum(sb))
return 0;
BUG_ON(trigger_type >= EXT4_JOURNAL_TRIGGER_COUNT);
jbd2_journal_set_triggers(bh,
@@ -265,14 +275,20 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
trace_ext4_forget(inode, is_metadata, blocknr);
BUFFER_TRACE(bh, "enter");
- jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
- "data mode %x\n",
+ ext4_debug("forgetting bh %p: is_metadata=%d, mode %o, data mode %x\n",
bh, is_metadata, inode->i_mode,
test_opt(inode->i_sb, DATA_FLAGS));
- /* In the no journal case, we can just do a bforget and return */
+ /*
+ * In the no journal case, we should wait for the ongoing buffer
+ * to complete and do a forget.
+ */
if (!ext4_handle_valid(handle)) {
- bforget(bh);
+ if (bh) {
+ clear_buffer_dirty(bh);
+ wait_on_buffer(bh);
+ __bforget(bh);
+ }
return 0;
}
@@ -325,7 +341,8 @@ int __ext4_journal_get_create_access(const char *where, unsigned int line,
err);
return err;
}
- if (trigger_type == EXT4_JTR_NONE || !ext4_has_metadata_csum(sb))
+ if (trigger_type == EXT4_JTR_NONE ||
+ !ext4_has_feature_metadata_csum(sb))
return 0;
BUG_ON(trigger_type >= EXT4_JOURNAL_TRIGGER_COUNT);
jbd2_journal_set_triggers(bh,
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 0e4fa644df01..63d17c5201b5 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -122,90 +122,6 @@
#define EXT4_HT_EXT_CONVERT 11
#define EXT4_HT_MAX 12
-/**
- * struct ext4_journal_cb_entry - Base structure for callback information.
- *
- * This struct is a 'seed' structure for a using with your own callback
- * structs. If you are using callbacks you must allocate one of these
- * or another struct of your own definition which has this struct
- * as it's first element and pass it to ext4_journal_callback_add().
- */
-struct ext4_journal_cb_entry {
- /* list information for other callbacks attached to the same handle */
- struct list_head jce_list;
-
- /* Function to call with this callback structure */
- void (*jce_func)(struct super_block *sb,
- struct ext4_journal_cb_entry *jce, int error);
-
- /* user data goes here */
-};
-
-/**
- * ext4_journal_callback_add: add a function to call after transaction commit
- * @handle: active journal transaction handle to register callback on
- * @func: callback function to call after the transaction has committed:
- * @sb: superblock of current filesystem for transaction
- * @jce: returned journal callback data
- * @rc: journal state at commit (0 = transaction committed properly)
- * @jce: journal callback data (internal and function private data struct)
- *
- * The registered function will be called in the context of the journal thread
- * after the transaction for which the handle was created has completed.
- *
- * No locks are held when the callback function is called, so it is safe to
- * call blocking functions from within the callback, but the callback should
- * not block or run for too long, or the filesystem will be blocked waiting for
- * the next transaction to commit. No journaling functions can be used, or
- * there is a risk of deadlock.
- *
- * There is no guaranteed calling order of multiple registered callbacks on
- * the same transaction.
- */
-static inline void _ext4_journal_callback_add(handle_t *handle,
- struct ext4_journal_cb_entry *jce)
-{
- /* Add the jce to transaction's private list */
- list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list);
-}
-
-static inline void ext4_journal_callback_add(handle_t *handle,
- void (*func)(struct super_block *sb,
- struct ext4_journal_cb_entry *jce,
- int rc),
- struct ext4_journal_cb_entry *jce)
-{
- struct ext4_sb_info *sbi =
- EXT4_SB(handle->h_transaction->t_journal->j_private);
-
- /* Add the jce to transaction's private list */
- jce->jce_func = func;
- spin_lock(&sbi->s_md_lock);
- _ext4_journal_callback_add(handle, jce);
- spin_unlock(&sbi->s_md_lock);
-}
-
-
-/**
- * ext4_journal_callback_del: delete a registered callback
- * @handle: active journal transaction handle on which callback was registered
- * @jce: registered journal callback entry to unregister
- * Return true if object was successfully removed
- */
-static inline bool ext4_journal_callback_try_del(handle_t *handle,
- struct ext4_journal_cb_entry *jce)
-{
- bool deleted;
- struct ext4_sb_info *sbi =
- EXT4_SB(handle->h_transaction->t_journal->j_private);
-
- spin_lock(&sbi->s_md_lock);
- deleted = !list_empty(&jce->jce_list);
- list_del_init(&jce->jce_list);
- spin_unlock(&sbi->s_md_lock);
- return deleted;
-}
-
int
ext4_mark_iloc_dirty(handle_t *handle,
struct inode *inode,
@@ -261,9 +177,9 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
__ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
(bh))
-handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
- int type, int blocks, int rsv_blocks,
- int revoke_creds);
+handle_t *__ext4_journal_start_sb(struct inode *inode, struct super_block *sb,
+ unsigned int line, int type, int blocks,
+ int rsv_blocks, int revoke_creds);
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
#define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
@@ -303,7 +219,7 @@ static inline int ext4_trans_default_revoke_credits(struct super_block *sb)
}
#define ext4_journal_start_sb(sb, type, nblocks) \
- __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0, \
+ __ext4_journal_start_sb(NULL, (sb), __LINE__, (type), (nblocks), 0,\
ext4_trans_default_revoke_credits(sb))
#define ext4_journal_start(inode, type, nblocks) \
@@ -323,7 +239,7 @@ static inline handle_t *__ext4_journal_start(struct inode *inode,
int blocks, int rsv_blocks,
int revoke_creds)
{
- return __ext4_journal_start_sb(inode->i_sb, line, type, blocks,
+ return __ext4_journal_start_sb(inode, inode->i_sb, line, type, blocks,
rsv_blocks, revoke_creds);
}
@@ -403,10 +319,10 @@ static inline int ext4_journal_ensure_credits(handle_t *handle, int credits,
revoke_creds, 0);
}
-static inline int ext4_journal_blocks_per_page(struct inode *inode)
+static inline int ext4_journal_blocks_per_folio(struct inode *inode)
{
if (EXT4_JOURNAL(inode) != NULL)
- return jbd2_journal_blocks_per_page(inode);
+ return jbd2_journal_blocks_per_folio(inode);
return 0;
}
@@ -491,7 +407,7 @@ static inline int ext4_free_data_revoke_credits(struct inode *inode, int blocks)
/*
* This function controls whether or not we should try to go down the
* dioread_nolock code paths, which makes it safe to avoid taking
- * i_mutex for direct I/O reads. This only works for extent-based
+ * i_rwsem for direct I/O reads. This only works for extent-based
* files, and it doesn't work if data journaling is enabled, since the
* dioread_nolock code uses b_private to pass information back to the
* I/O completion handler, and this conflicts with the jbd's use of
@@ -513,4 +429,33 @@ static inline int ext4_should_dioread_nolock(struct inode *inode)
return 1;
}
+/*
+ * Pass journal explicitly as it may not be cached in the sbi->s_journal in some
+ * cases
+ */
+static inline int ext4_journal_destroy(struct ext4_sb_info *sbi, journal_t *journal)
+{
+ int err = 0;
+
+ /*
+ * At this point only two things can be operating on the journal.
+ * JBD2 thread performing transaction commit and s_sb_upd_work
+ * issuing sb update through the journal. Once we set
+ * EXT4_JOURNAL_DESTROY, new ext4_handle_error() calls will not
+ * queue s_sb_upd_work and ext4_force_commit() makes sure any
+ * ext4_handle_error() calls from the running transaction commit are
+ * finished. Hence no new s_sb_upd_work can be queued after we
+ * flush it here.
+ */
+ ext4_set_mount_flag(sbi->s_sb, EXT4_MF_JOURNAL_DESTROY);
+
+ ext4_force_commit(sbi->s_sb);
+ flush_work(&sbi->s_sb_upd_work);
+
+ err = jbd2_journal_destroy(journal);
+ sbi->s_journal = NULL;
+
+ return err;
+}
+
#endif /* _EXT4_JBD2_H */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0ecf819bf189..2cf5759ba689 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -27,8 +27,8 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/fiemap.h>
-#include <linux/backing-dev.h>
#include <linux/iomap.h>
+#include <linux/sched/mm.h>
#include "ext4_jbd2.h"
#include "ext4_extents.h"
#include "xattr.h"
@@ -50,10 +50,9 @@ static __le32 ext4_extent_block_csum(struct inode *inode,
struct ext4_extent_header *eh)
{
struct ext4_inode_info *ei = EXT4_I(inode);
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
+ csum = ext4_chksum(ei->i_csum_seed, (__u8 *)eh,
EXT4_EXTENT_TAIL_OFFSET(eh));
return cpu_to_le32(csum);
}
@@ -63,7 +62,7 @@ static int ext4_extent_block_csum_verify(struct inode *inode,
{
struct ext4_extent_tail *et;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return 1;
et = find_ext4_extent_tail(eh);
@@ -77,19 +76,18 @@ static void ext4_extent_block_csum_set(struct inode *inode,
{
struct ext4_extent_tail *et;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return;
et = find_ext4_extent_tail(eh);
et->et_checksum = ext4_extent_block_csum(inode, eh);
}
-static int ext4_split_extent_at(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path **ppath,
- ext4_lblk_t split,
- int split_flag,
- int flags);
+static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_lblk_t split,
+ int split_flag, int flags);
static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
{
@@ -97,15 +95,40 @@ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
* Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
* moment, get_block can be called only for blocks inside i_size since
* page cache has been already dropped and writes are blocked by
- * i_mutex. So we can safely drop the i_data_sem here.
+ * i_rwsem. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
up_write(&EXT4_I(inode)->i_data_sem);
*dropped = 1;
return 0;
}
+static inline void ext4_ext_path_brelse(struct ext4_ext_path *path)
+{
+ brelse(path->p_bh);
+ path->p_bh = NULL;
+}
+
+static void ext4_ext_drop_refs(struct ext4_ext_path *path)
+{
+ int depth, i;
+
+ if (IS_ERR_OR_NULL(path))
+ return;
+ depth = path->p_depth;
+ for (i = 0; i <= depth; i++, path++)
+ ext4_ext_path_brelse(path);
+}
+
+void ext4_free_ext_path(struct ext4_ext_path *path)
+{
+ if (IS_ERR_OR_NULL(path))
+ return;
+ ext4_ext_drop_refs(path);
+ kfree(path);
+}
+
/*
* Make sure 'handle' has at least 'check_cred' credits. If not, restart
* transaction with 'restart_cred' credits. The function drops i_data_sem
@@ -304,19 +327,18 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
return size;
}
-static inline int
+static inline struct ext4_ext_path *
ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
- struct ext4_ext_path **ppath, ext4_lblk_t lblk,
+ struct ext4_ext_path *path, ext4_lblk_t lblk,
int nofail)
{
- struct ext4_ext_path *path = *ppath;
int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
- int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
+ int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_SPLIT_NOMERGE;
if (nofail)
flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
- return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
+ return ext4_split_extent_at(handle, inode, path, lblk, unwritten ?
EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
flags);
}
@@ -372,7 +394,7 @@ static int ext4_valid_extent_entries(struct inode *inode,
{
unsigned short entries;
ext4_lblk_t lblock = 0;
- ext4_lblk_t prev = 0;
+ ext4_lblk_t cur = 0;
if (eh->eh_entries == 0)
return 1;
@@ -396,11 +418,11 @@ static int ext4_valid_extent_entries(struct inode *inode,
/* Check for overlapping extents */
lblock = le32_to_cpu(ext->ee_block);
- if ((lblock <= prev) && prev) {
+ if (lblock < cur) {
*pblk = ext4_ext_pblock(ext);
return 0;
}
- prev = lblock + ext4_ext_get_actual_len(ext) - 1;
+ cur = lblock + ext4_ext_get_actual_len(ext);
ext++;
entries--;
}
@@ -420,13 +442,13 @@ static int ext4_valid_extent_entries(struct inode *inode,
/* Check for overlapping index extents */
lblock = le32_to_cpu(ext_idx->ei_block);
- if ((lblock <= prev) && prev) {
+ if (lblock < cur) {
*pblk = ext4_idx_pblock(ext_idx);
return 0;
}
ext_idx++;
entries--;
- prev = lblock;
+ cur = lblock + 1;
}
}
return 1;
@@ -460,6 +482,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
error_msg = "invalid eh_entries";
goto corrupted;
}
+ if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
+ error_msg = "eh_entries is 0 but eh_depth is > 0";
+ goto corrupted;
+ }
if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
error_msg = "invalid extent entries";
goto corrupted;
@@ -541,7 +567,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
if (!bh_uptodate_or_lock(bh)) {
trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
- err = ext4_read_bh(bh, 0, NULL);
+ err = ext4_read_bh(bh, 0, NULL, false);
if (err < 0)
goto errout;
}
@@ -584,6 +610,8 @@ int ext4_ext_precache(struct inode *inode)
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return 0; /* not an extent-mapped inode */
+ ext4_check_map_extents_env(inode);
+
down_read(&ei->i_data_sem);
depth = ext_depth(inode);
@@ -612,8 +640,7 @@ int ext4_ext_precache(struct inode *inode)
*/
if ((i == depth) ||
path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
- brelse(path[i].p_bh);
- path[i].p_bh = NULL;
+ ext4_ext_path_brelse(path + i);
i--;
continue;
}
@@ -632,8 +659,7 @@ int ext4_ext_precache(struct inode *inode)
ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
out:
up_read(&ei->i_data_sem);
- ext4_ext_drop_refs(path);
- kfree(path);
+ ext4_free_ext_path(path);
return ret;
}
@@ -667,7 +693,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
struct ext4_extent *ex;
int i;
- if (!path)
+ if (IS_ERR_OR_NULL(path))
return;
eh = path[depth].p_hdr;
@@ -720,19 +746,6 @@ static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
#define ext4_ext_show_move(inode, path, newblock, level)
#endif
-void ext4_ext_drop_refs(struct ext4_ext_path *path)
-{
- int depth, i;
-
- if (!path)
- return;
- depth = path->p_depth;
- for (i = 0; i <= depth; i++, path++) {
- brelse(path->p_bh);
- path->p_bh = NULL;
- }
-}
-
/*
* ext4_ext_binsearch_idx:
* binary search for the closest index of the given block
@@ -872,11 +885,10 @@ void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
struct ext4_ext_path *
ext4_find_extent(struct inode *inode, ext4_lblk_t block,
- struct ext4_ext_path **orig_path, int flags)
+ struct ext4_ext_path *path, int flags)
{
struct ext4_extent_header *eh;
struct buffer_head *bh;
- struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
short int depth, i, ppos = 0;
int ret;
gfp_t gfp_flags = GFP_NOFS;
@@ -897,7 +909,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
ext4_ext_drop_refs(path);
if (depth > path[0].p_maxdepth) {
kfree(path);
- *orig_path = path = NULL;
+ path = NULL;
}
}
if (!path) {
@@ -951,10 +963,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
return path;
err:
- ext4_ext_drop_refs(path);
- kfree(path);
- if (orig_path)
- *orig_path = NULL;
+ ext4_free_ext_path(path);
return ERR_PTR(ret);
}
@@ -1002,6 +1011,11 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
ix = curp->p_idx;
}
+ if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
+ EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
+ return -EFSCORRUPTED;
+ }
+
len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
BUG_ON(len < 0);
if (len > 0) {
@@ -1011,11 +1025,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
}
- if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
- EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
- return -EFSCORRUPTED;
- }
-
ix->ei_block = cpu_to_le32(logical);
ext4_idx_store_pblock(ix, ptr);
le16_add_cpu(&curp->p_hdr->eh_entries, 1);
@@ -1387,15 +1396,15 @@ out:
* finds empty index and adds new leaf.
* if no free index is found, then it requests in-depth growing.
*/
-static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
- unsigned int mb_flags,
- unsigned int gb_flags,
- struct ext4_ext_path **ppath,
- struct ext4_extent *newext)
+static struct ext4_ext_path *
+ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ unsigned int mb_flags, unsigned int gb_flags,
+ struct ext4_ext_path *path,
+ struct ext4_extent *newext)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_ext_path *curp;
int depth, i, err = 0;
+ ext4_lblk_t ee_block = le32_to_cpu(newext->ee_block);
repeat:
i = depth = ext_depth(inode);
@@ -1414,42 +1423,38 @@ repeat:
* entry: create all needed subtree and add new leaf */
err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
if (err)
- goto out;
+ goto errout;
/* refill path */
- path = ext4_find_extent(inode,
- (ext4_lblk_t)le32_to_cpu(newext->ee_block),
- ppath, gb_flags);
- if (IS_ERR(path))
- err = PTR_ERR(path);
- } else {
- /* tree is full, time to grow in depth */
- err = ext4_ext_grow_indepth(handle, inode, mb_flags);
- if (err)
- goto out;
+ path = ext4_find_extent(inode, ee_block, path, gb_flags);
+ return path;
+ }
- /* refill path */
- path = ext4_find_extent(inode,
- (ext4_lblk_t)le32_to_cpu(newext->ee_block),
- ppath, gb_flags);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- goto out;
- }
+ /* tree is full, time to grow in depth */
+ err = ext4_ext_grow_indepth(handle, inode, mb_flags);
+ if (err)
+ goto errout;
- /*
- * only first (depth 0 -> 1) produces free space;
- * in all other cases we have to split the grown tree
- */
- depth = ext_depth(inode);
- if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
- /* now we need to split */
- goto repeat;
- }
+ /* refill path */
+ path = ext4_find_extent(inode, ee_block, path, gb_flags);
+ if (IS_ERR(path))
+ return path;
+
+ /*
+ * only first (depth 0 -> 1) produces free space;
+ * in all other cases we have to split the grown tree
+ */
+ depth = ext_depth(inode);
+ if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
+ /* now we need to split */
+ goto repeat;
}
-out:
- return err;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
/*
@@ -1496,8 +1501,7 @@ static int ext4_ext_search_left(struct inode *inode,
EXT4_ERROR_INODE(inode,
"ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
- EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
- le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
+ le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block),
depth);
return -EFSCORRUPTED;
}
@@ -1527,7 +1531,7 @@ static int ext4_ext_search_left(struct inode *inode,
static int ext4_ext_search_right(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t *logical, ext4_fsblk_t *phys,
- struct ext4_extent *ret_ex)
+ struct ext4_extent *ret_ex, int flags)
{
struct buffer_head *bh = NULL;
struct ext4_extent_header *eh;
@@ -1601,7 +1605,8 @@ got_index:
ix++;
while (++depth < path->p_depth) {
/* subtract from p_depth to get proper eh_depth */
- bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
+ bh = read_extent_tree_block(inode, ix, path->p_depth - depth,
+ flags);
if (IS_ERR(bh))
return PTR_ERR(bh);
eh = ext_block_hdr(bh);
@@ -1609,7 +1614,7 @@ got_index:
put_bh(bh);
}
- bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
+ bh = read_extent_tree_block(inode, ix, path->p_depth - depth, flags);
if (IS_ERR(bh))
return PTR_ERR(bh);
eh = ext_block_hdr(bh);
@@ -1742,12 +1747,23 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
break;
err = ext4_ext_get_access(handle, inode, path + k);
if (err)
- break;
+ goto clean;
path[k].p_idx->ei_block = border;
err = ext4_ext_dirty(handle, inode, path + k);
if (err)
- break;
+ goto clean;
}
+ return 0;
+
+clean:
+ /*
+ * The path[k].p_bh is either unmodified or with no verified bit
+ * set (see ext4_ext_get_access()). So just clear the verified bit
+ * of the successfully modified extents buffers, which will force
+ * these extents to be checked to avoid using inconsistent data.
+ */
+ while (++k < depth)
+ clear_buffer_verified(path[k].p_bh);
return err;
}
@@ -1869,7 +1885,7 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
(path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
path[0].p_hdr->eh_max = cpu_to_le16(max_root);
- brelse(path[1].p_bh);
+ ext4_ext_path_brelse(path + 1);
ext4_free_blocks(handle, inode, NULL, blk, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
}
@@ -1957,16 +1973,15 @@ out:
* inserts requested extent as new one into the tree,
* creating new leaf in the no-space case.
*/
-int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
- struct ext4_ext_path **ppath,
- struct ext4_extent *newext, int gb_flags)
+struct ext4_ext_path *
+ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_extent *newext, int gb_flags)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_extent_header *eh;
struct ext4_extent *ex, *fex;
struct ext4_extent *nearex; /* nearest extent */
- struct ext4_ext_path *npath = NULL;
- int depth, len, err;
+ int depth, len, err = 0;
ext4_lblk_t next;
int mb_flags = 0, unwritten;
@@ -1974,18 +1989,20 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
mb_flags |= EXT4_MB_DELALLOC_RESERVED;
if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
- return -EFSCORRUPTED;
+ err = -EFSCORRUPTED;
+ goto errout;
}
depth = ext_depth(inode);
ex = path[depth].p_ext;
eh = path[depth].p_hdr;
if (unlikely(path[depth].p_hdr == NULL)) {
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
- return -EFSCORRUPTED;
+ err = -EFSCORRUPTED;
+ goto errout;
}
/* try to insert block into found extent and return */
- if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
+ if (ex && !(gb_flags & EXT4_GET_BLOCKS_SPLIT_NOMERGE)) {
/*
* Try to see whether we should rather test the extent on
@@ -2019,13 +2036,12 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
err = ext4_ext_get_access(handle, inode,
path + depth);
if (err)
- return err;
+ goto errout;
unwritten = ext4_ext_is_unwritten(ex);
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ ext4_ext_get_actual_len(newext));
if (unwritten)
ext4_ext_mark_unwritten(ex);
- eh = path[depth].p_hdr;
nearex = ex;
goto merge;
}
@@ -2045,7 +2061,7 @@ prepend:
err = ext4_ext_get_access(handle, inode,
path + depth);
if (err)
- return err;
+ goto errout;
unwritten = ext4_ext_is_unwritten(ex);
ex->ee_block = newext->ee_block;
@@ -2054,7 +2070,6 @@ prepend:
+ ext4_ext_get_actual_len(newext));
if (unwritten)
ext4_ext_mark_unwritten(ex);
- eh = path[depth].p_hdr;
nearex = ex;
goto merge;
}
@@ -2071,21 +2086,26 @@ prepend:
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
next = ext4_ext_next_leaf_block(path);
if (next != EXT_MAX_BLOCKS) {
+ struct ext4_ext_path *npath;
+
ext_debug(inode, "next leaf block - %u\n", next);
- BUG_ON(npath != NULL);
npath = ext4_find_extent(inode, next, NULL, gb_flags);
- if (IS_ERR(npath))
- return PTR_ERR(npath);
+ if (IS_ERR(npath)) {
+ err = PTR_ERR(npath);
+ goto errout;
+ }
BUG_ON(npath->p_depth != path->p_depth);
eh = npath[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
ext_debug(inode, "next leaf isn't full(%d)\n",
le16_to_cpu(eh->eh_entries));
+ ext4_free_ext_path(path);
path = npath;
goto has_space;
}
ext_debug(inode, "next leaf has no free space(%d,%d)\n",
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
+ ext4_free_ext_path(npath);
}
/*
@@ -2094,10 +2114,10 @@ prepend:
*/
if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
mb_flags |= EXT4_MB_USE_RESERVED;
- err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
- ppath, newext);
- if (err)
- goto cleanup;
+ path = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
+ path, newext);
+ if (IS_ERR(path))
+ return path;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
@@ -2106,7 +2126,7 @@ has_space:
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- goto cleanup;
+ goto errout;
if (!nearex) {
/* there is no extent in this leaf, create first one */
@@ -2161,21 +2181,23 @@ has_space:
merge:
/* try to merge extents */
- if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
+ if (!(gb_flags & EXT4_GET_BLOCKS_SPLIT_NOMERGE))
ext4_ext_try_to_merge(handle, inode, path, nearex);
-
/* time to correct all indexes above */
err = ext4_ext_correct_indexes(handle, inode, path);
if (err)
- goto cleanup;
+ goto errout;
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+ if (err)
+ goto errout;
-cleanup:
- ext4_ext_drop_refs(npath);
- kfree(npath);
- return err;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
static int ext4_fill_es_cache_info(struct inode *inode,
@@ -2191,7 +2213,7 @@ static int ext4_fill_es_cache_info(struct inode *inode,
while (block <= end) {
next = 0;
flags = 0;
- if (!ext4_es_lookup_extent(inode, block, &next, &es))
+ if (!ext4_es_lookup_extent(inode, block, &next, &es, NULL))
break;
if (ext4_es_is_unwritten(&es))
flags |= FIEMAP_EXTENT_UNWRITTEN;
@@ -2225,7 +2247,7 @@ static int ext4_fill_es_cache_info(struct inode *inode,
/*
- * ext4_ext_determine_hole - determine hole around given block
+ * ext4_ext_find_hole - find hole around given block according to the given path
* @inode: inode we lookup in
* @path: path in extent tree to @lblk
* @lblk: pointer to logical block around which we want to determine hole
@@ -2237,9 +2259,9 @@ static int ext4_fill_es_cache_info(struct inode *inode,
* The function returns the length of a hole starting at @lblk. We update @lblk
* to the beginning of the hole if we managed to find it.
*/
-static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
- struct ext4_ext_path *path,
- ext4_lblk_t *lblk)
+static ext4_lblk_t ext4_ext_find_hole(struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_lblk_t *lblk)
{
int depth = ext_depth(inode);
struct ext4_extent *ex;
@@ -2267,30 +2289,6 @@ static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
}
/*
- * ext4_ext_put_gap_in_cache:
- * calculate boundaries of the gap that the requested block fits into
- * and cache this gap
- */
-static void
-ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
- ext4_lblk_t hole_len)
-{
- struct extent_status es;
-
- ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
- hole_start + hole_len - 1, &es);
- if (es.es_len) {
- /* There's delayed extent containing lblock? */
- if (es.es_lblk <= hole_start)
- return;
- hole_len = min(es.es_lblk - hole_start, hole_len);
- }
- ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
- ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
- EXTENT_STATUS_HOLE);
-}
-
-/*
* ext4_ext_rm_idx:
* removes index from the index block.
*/
@@ -2299,27 +2297,26 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
{
int err;
ext4_fsblk_t leaf;
+ int k = depth - 1;
/* free index block */
- depth--;
- path = path + depth;
- leaf = ext4_idx_pblock(path->p_idx);
- if (unlikely(path->p_hdr->eh_entries == 0)) {
- EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
+ leaf = ext4_idx_pblock(path[k].p_idx);
+ if (unlikely(path[k].p_hdr->eh_entries == 0)) {
+ EXT4_ERROR_INODE(inode, "path[%d].p_hdr->eh_entries == 0", k);
return -EFSCORRUPTED;
}
- err = ext4_ext_get_access(handle, inode, path);
+ err = ext4_ext_get_access(handle, inode, path + k);
if (err)
return err;
- if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
- int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
+ if (path[k].p_idx != EXT_LAST_INDEX(path[k].p_hdr)) {
+ int len = EXT_LAST_INDEX(path[k].p_hdr) - path[k].p_idx;
len *= sizeof(struct ext4_extent_idx);
- memmove(path->p_idx, path->p_idx + 1, len);
+ memmove(path[k].p_idx, path[k].p_idx + 1, len);
}
- le16_add_cpu(&path->p_hdr->eh_entries, -1);
- err = ext4_ext_dirty(handle, inode, path);
+ le16_add_cpu(&path[k].p_hdr->eh_entries, -1);
+ err = ext4_ext_dirty(handle, inode, path + k);
if (err)
return err;
ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
@@ -2328,18 +2325,29 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
ext4_free_blocks(handle, inode, NULL, leaf, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
- while (--depth >= 0) {
- if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
+ while (--k >= 0) {
+ if (path[k + 1].p_idx != EXT_FIRST_INDEX(path[k + 1].p_hdr))
break;
- path--;
- err = ext4_ext_get_access(handle, inode, path);
+ err = ext4_ext_get_access(handle, inode, path + k);
if (err)
- break;
- path->p_idx->ei_block = (path+1)->p_idx->ei_block;
- err = ext4_ext_dirty(handle, inode, path);
+ goto clean;
+ path[k].p_idx->ei_block = path[k + 1].p_idx->ei_block;
+ err = ext4_ext_dirty(handle, inode, path + k);
if (err)
- break;
+ goto clean;
}
+ return 0;
+
+clean:
+ /*
+ * The path[k].p_bh is either unmodified or with no verified bit
+ * set (see ext4_ext_get_access()). So just clear the verified bit
+ * of the successfully modified extents buffers, which will force
+ * these extents to be checked to avoid using inconsistent data.
+ */
+ while (++k < depth)
+ clear_buffer_verified(path[k].p_bh);
+
return err;
}
@@ -2390,18 +2398,20 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
{
int index;
- int depth;
/* If we are converting the inline data, only one is needed here. */
if (ext4_has_inline_data(inode))
return 1;
- depth = ext_depth(inode);
-
+ /*
+ * Extent tree can change between the time we estimate credits and
+ * the time we actually modify the tree. Assume the worst case.
+ */
if (extents <= 1)
- index = depth * 2;
+ index = (EXT4_MAX_EXTENT_DEPTH * 2) + extents;
else
- index = depth * 3;
+ index = (EXT4_MAX_EXTENT_DEPTH * 3) +
+ DIV_ROUND_UP(extents, ext4_ext_space_block(inode, 0));
return index;
}
@@ -2631,9 +2641,8 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
unwritten, ex_ee_len);
path[depth].p_ext = ex;
- a = ex_ee_block > start ? ex_ee_block : start;
- b = ex_ee_block+ex_ee_len - 1 < end ?
- ex_ee_block+ex_ee_len - 1 : end;
+ a = max(ex_ee_block, start);
+ b = min(ex_ee_block + ex_ee_len - 1, end);
ext_debug(inode, " border %u:%u\n", a, b);
@@ -2816,6 +2825,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
struct partial_cluster partial;
handle_t *handle;
int i = 0, err = 0;
+ int flags = EXT4_EX_NOCACHE | EXT4_EX_NOFAIL;
partial.pclu = 0;
partial.lblk = 0;
@@ -2846,8 +2856,7 @@ again:
ext4_fsblk_t pblk;
/* find extent for or closest extent to this block */
- path = ext4_find_extent(inode, end, NULL,
- EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
+ path = ext4_find_extent(inode, end, NULL, flags);
if (IS_ERR(path)) {
ext4_journal_stop(handle);
return PTR_ERR(path);
@@ -2893,11 +2902,12 @@ again:
* fail removing space due to ENOSPC so try to use
* reserved block if that happens.
*/
- err = ext4_force_split_extent_at(handle, inode, &path,
- end + 1, 1);
- if (err < 0)
+ path = ext4_force_split_extent_at(handle, inode, path,
+ end + 1, 1);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
goto out;
-
+ }
} else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
partial.state == initial) {
/*
@@ -2912,7 +2922,7 @@ again:
*/
lblk = ex_end + 1;
err = ext4_ext_search_right(inode, path, &lblk, &pblk,
- NULL);
+ NULL, flags);
if (err < 0)
goto out;
if (pblk) {
@@ -2955,8 +2965,7 @@ again:
err = ext4_ext_rm_leaf(handle, inode, path,
&partial, start, end);
/* root level has p_bh == NULL, brelse() eats this */
- brelse(path[i].p_bh);
- path[i].p_bh = NULL;
+ ext4_ext_path_brelse(path + i);
i--;
continue;
}
@@ -2989,8 +2998,7 @@ again:
i + 1, ext4_idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path));
bh = read_extent_tree_block(inode, path[i].p_idx,
- depth - i - 1,
- EXT4_EX_NOCACHE);
+ depth - i - 1, flags);
if (IS_ERR(bh)) {
/* should we reset i_size? */
err = PTR_ERR(bh);
@@ -3018,8 +3026,7 @@ again:
err = ext4_ext_rm_idx(handle, inode, path, i);
}
/* root level has p_bh == NULL, brelse() eats this */
- brelse(path[i].p_bh);
- path[i].p_bh = NULL;
+ ext4_ext_path_brelse(path + i);
i--;
ext_debug(inode, "return to level %d\n", i);
}
@@ -3060,8 +3067,7 @@ again:
}
}
out:
- ext4_ext_drop_refs(path);
- kfree(path);
+ ext4_free_ext_path(path);
path = NULL;
if (err == -EAGAIN)
goto again;
@@ -3121,7 +3127,7 @@ void ext4_ext_release(struct super_block *sb)
#endif
}
-static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
+static void ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
{
ext4_lblk_t ee_block;
ext4_fsblk_t ee_pblock;
@@ -3132,10 +3138,10 @@ static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
ee_pblock = ext4_ext_pblock(ex);
if (ee_len == 0)
- return 0;
+ return;
- return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
- EXTENT_STATUS_WRITTEN);
+ ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
+ EXTENT_STATUS_WRITTEN, false);
}
/* FIXME!! we need to try to merge to left or right after zero-out */
@@ -3169,16 +3175,14 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
* a> the extent are splitted into two extent.
* b> split is not needed, and just mark the extent.
*
- * return 0 on success.
+ * Return an extent path pointer on success, or an error pointer on failure.
*/
-static int ext4_split_extent_at(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path **ppath,
- ext4_lblk_t split,
- int split_flag,
- int flags)
+static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_lblk_t split,
+ int split_flag, int flags)
{
- struct ext4_ext_path *path = *ppath;
ext4_fsblk_t newblock;
ext4_lblk_t ee_block;
struct ext4_extent *ex, newex, orig_ex, zero_ex;
@@ -3220,7 +3224,7 @@ static int ext4_split_extent_at(handle_t *handle,
else
ext4_ext_mark_initialized(ex);
- if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
+ if (!(flags & EXT4_GET_BLOCKS_SPLIT_NOMERGE))
ext4_ext_try_to_merge(handle, inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
@@ -3248,10 +3252,31 @@ static int ext4_split_extent_at(handle_t *handle,
if (split_flag & EXT4_EXT_MARK_UNWRIT2)
ext4_ext_mark_unwritten(ex2);
- err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
- if (err != -ENOSPC && err != -EDQUOT)
+ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+ if (!IS_ERR(path))
goto out;
+ err = PTR_ERR(path);
+ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+ return path;
+
+ /*
+ * Get a new path to try to zeroout or fix the extent length.
+ * Using EXT4_EX_NOFAIL guarantees that ext4_find_extent()
+ * will not return -ENOMEM, otherwise -ENOMEM will cause a
+ * retry in do_writepages(), and a WARN_ON may be triggered
+ * in ext4_da_update_reserve_space() due to an incorrect
+ * ee_len causing the i_reserved_data_blocks exception.
+ */
+ path = ext4_find_extent(inode, ee_block, NULL, flags | EXT4_EX_NOFAIL);
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+ return path;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+
if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
if (split_flag & EXT4_EXT_DATA_VALID1) {
@@ -3285,7 +3310,7 @@ static int ext4_split_extent_at(handle_t *handle,
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
if (!err)
/* update extent status tree */
- err = ext4_zeroout_es(inode, &zero_ex);
+ ext4_zeroout_es(inode, &zero_ex);
/* If we failed at this point, we don't know in which
* state the extent tree exactly is so don't try to fix
* length of the original extent as it may do even more
@@ -3302,14 +3327,17 @@ fix_extent_len:
* and err is a non-zero error code.
*/
ext4_ext_dirty(handle, inode, path + path->p_depth);
- return err;
out:
+ if (err) {
+ ext4_free_ext_path(path);
+ path = ERR_PTR(err);
+ }
ext4_ext_show_leaf(inode, path);
- return err;
+ return path;
}
/*
- * ext4_split_extents() splits an extent and mark extent which is covered
+ * ext4_split_extent() splits an extent and mark extent which is covered
* by @map as split_flags indicates
*
* It may result in splitting the extent into multiple extents (up to three)
@@ -3319,21 +3347,18 @@ out:
* c> Splits in three extents: Somone is splitting in middle of the extent
*
*/
-static int ext4_split_extent(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path **ppath,
- struct ext4_map_blocks *map,
- int split_flag,
- int flags)
+static struct ext4_ext_path *ext4_split_extent(handle_t *handle,
+ struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_map_blocks *map,
+ int split_flag, int flags,
+ unsigned int *allocated)
{
- struct ext4_ext_path *path = *ppath;
ext4_lblk_t ee_block;
struct ext4_extent *ex;
unsigned int ee_len, depth;
- int err = 0;
int unwritten;
int split_flag1, flags1;
- int allocated = map->m_len;
depth = ext_depth(inode);
ex = path[depth].p_ext;
@@ -3343,35 +3368,33 @@ static int ext4_split_extent(handle_t *handle,
if (map->m_lblk + map->m_len < ee_block + ee_len) {
split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
- flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
+ flags1 = flags | EXT4_GET_BLOCKS_SPLIT_NOMERGE;
if (unwritten)
split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
EXT4_EXT_MARK_UNWRIT2;
if (split_flag & EXT4_EXT_DATA_VALID2)
split_flag1 |= EXT4_EXT_DATA_VALID1;
- err = ext4_split_extent_at(handle, inode, ppath,
+ path = ext4_split_extent_at(handle, inode, path,
map->m_lblk + map->m_len, split_flag1, flags1);
- if (err)
- goto out;
- } else {
- allocated = ee_len - (map->m_lblk - ee_block);
- }
- /*
- * Update path is required because previous ext4_split_extent_at() may
- * result in split of original leaf or extent zeroout.
- */
- path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
- if (IS_ERR(path))
- return PTR_ERR(path);
- depth = ext_depth(inode);
- ex = path[depth].p_ext;
- if (!ex) {
- EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
- (unsigned long) map->m_lblk);
- return -EFSCORRUPTED;
+ if (IS_ERR(path))
+ return path;
+ /*
+ * Update path is required because previous ext4_split_extent_at
+ * may result in split of original leaf or extent zeroout.
+ */
+ path = ext4_find_extent(inode, map->m_lblk, path, flags);
+ if (IS_ERR(path))
+ return path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ if (!ex) {
+ EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+ (unsigned long) map->m_lblk);
+ ext4_free_ext_path(path);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+ unwritten = ext4_ext_is_unwritten(ex);
}
- unwritten = ext4_ext_is_unwritten(ex);
- split_flag1 = 0;
if (map->m_lblk >= ee_block) {
split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
@@ -3380,15 +3403,20 @@ static int ext4_split_extent(handle_t *handle,
split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
EXT4_EXT_MARK_UNWRIT2);
}
- err = ext4_split_extent_at(handle, inode, ppath,
+ path = ext4_split_extent_at(handle, inode, path,
map->m_lblk, split_flag1, flags);
- if (err)
- goto out;
+ if (IS_ERR(path))
+ return path;
}
+ if (allocated) {
+ if (map->m_lblk + map->m_len > ee_block + ee_len)
+ *allocated = ee_len - (map->m_lblk - ee_block);
+ else
+ *allocated = map->m_len;
+ }
ext4_ext_show_leaf(inode, path);
-out:
- return err ? err : allocated;
+ return path;
}
/*
@@ -3411,13 +3439,11 @@ out:
* that are allocated and initialized.
* It is guaranteed to be >= map->m_len.
*/
-static int ext4_ext_convert_to_initialized(handle_t *handle,
- struct inode *inode,
- struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath,
- int flags)
+static struct ext4_ext_path *
+ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map, struct ext4_ext_path *path,
+ int flags, unsigned int *allocated)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_sb_info *sbi;
struct ext4_extent_header *eh;
struct ext4_map_blocks split_map;
@@ -3425,9 +3451,9 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
struct ext4_extent *ex, *abut_ex;
ext4_lblk_t ee_block, eof_block;
unsigned int ee_len, depth, map_len = map->m_len;
- int allocated = 0, max_zeroout = 0;
int err = 0;
int split_flag = EXT4_EXT_DATA_VALID2;
+ unsigned int max_zeroout = 0;
ext_debug(inode, "logical block %llu, max_blocks %u\n",
(unsigned long long)map->m_lblk, map_len);
@@ -3467,6 +3493,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
* - L2: we only attempt to merge with an extent stored in the
* same extent tree node.
*/
+ *allocated = 0;
if ((map->m_lblk == ee_block) &&
/* See if we can merge left */
(map_len < ee_len) && /*L1*/
@@ -3496,7 +3523,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
(prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- goto out;
+ goto errout;
trace_ext4_ext_convert_to_initialized_fastpath(inode,
map, ex, abut_ex);
@@ -3511,7 +3538,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
/* Result: number of initialized blocks past m_lblk */
- allocated = map_len;
+ *allocated = map_len;
}
} else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
(map_len < ee_len) && /*L1*/
@@ -3542,7 +3569,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
(next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- goto out;
+ goto errout;
trace_ext4_ext_convert_to_initialized_fastpath(inode,
map, ex, abut_ex);
@@ -3557,18 +3584,20 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
abut_ex->ee_len = cpu_to_le16(next_len + map_len);
/* Result: number of initialized blocks past m_lblk */
- allocated = map_len;
+ *allocated = map_len;
}
}
- if (allocated) {
+ if (*allocated) {
/* Mark the block containing both extents as dirty */
err = ext4_ext_dirty(handle, inode, path + depth);
/* Update path to point to the right extent */
path[depth].p_ext = abut_ex;
+ if (err)
+ goto errout;
goto out;
} else
- allocated = ee_len - (map->m_lblk - ee_block);
+ *allocated = ee_len - (map->m_lblk - ee_block);
WARN_ON(map->m_lblk < ee_block);
/*
@@ -3595,21 +3624,21 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
split_map.m_lblk = map->m_lblk;
split_map.m_len = map->m_len;
- if (max_zeroout && (allocated > split_map.m_len)) {
- if (allocated <= max_zeroout) {
+ if (max_zeroout && (*allocated > split_map.m_len)) {
+ if (*allocated <= max_zeroout) {
/* case 3 or 5 */
zero_ex1.ee_block =
cpu_to_le32(split_map.m_lblk +
split_map.m_len);
zero_ex1.ee_len =
- cpu_to_le16(allocated - split_map.m_len);
+ cpu_to_le16(*allocated - split_map.m_len);
ext4_ext_store_pblock(&zero_ex1,
ext4_ext_pblock(ex) + split_map.m_lblk +
split_map.m_len - ee_block);
err = ext4_ext_zeroout(inode, &zero_ex1);
if (err)
goto fallback;
- split_map.m_len = allocated;
+ split_map.m_len = *allocated;
}
if (split_map.m_lblk - ee_block + split_map.m_len <
max_zeroout) {
@@ -3627,23 +3656,24 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
split_map.m_len += split_map.m_lblk - ee_block;
split_map.m_lblk = ee_block;
- allocated = map->m_len;
+ *allocated = map->m_len;
}
}
fallback:
- err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
- flags);
- if (err > 0)
- err = 0;
+ path = ext4_split_extent(handle, inode, path, &split_map, split_flag,
+ flags, NULL);
+ if (IS_ERR(path))
+ return path;
out:
/* If we have gotten a failure, don't zero out status tree */
- if (!err) {
- err = ext4_zeroout_es(inode, &zero_ex1);
- if (!err)
- err = ext4_zeroout_es(inode, &zero_ex2);
- }
- return err ? err : allocated;
+ ext4_zeroout_es(inode, &zero_ex1);
+ ext4_zeroout_es(inode, &zero_ex2);
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
/*
@@ -3668,15 +3698,16 @@ out:
* being filled will be convert to initialized by the end_io callback function
* via ext4_convert_unwritten_extents().
*
- * Returns the size of unwritten extent to be written on success.
+ * The size of unwritten extent to be written is passed to the caller via the
+ * allocated pointer. Return an extent path pointer on success, or an error
+ * pointer on failure.
*/
-static int ext4_split_convert_extents(handle_t *handle,
+static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath,
- int flags)
+ struct ext4_ext_path *path,
+ int flags, unsigned int *allocated)
{
- struct ext4_ext_path *path = *ppath;
ext4_lblk_t eof_block;
ext4_lblk_t ee_block;
struct ext4_extent *ex;
@@ -3690,10 +3721,6 @@ static int ext4_split_convert_extents(handle_t *handle,
>> inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map->m_len)
eof_block = map->m_lblk + map->m_len;
- /*
- * It is safe to convert extent to initialized via explicit
- * zeroout only if extent is fully inside i_size or new_size.
- */
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
@@ -3704,20 +3731,24 @@ static int ext4_split_convert_extents(handle_t *handle,
split_flag |= EXT4_EXT_DATA_VALID1;
/* Convert to initialized */
} else if (flags & EXT4_GET_BLOCKS_CONVERT) {
+ /*
+ * It is safe to convert extent to initialized via explicit
+ * zeroout only if extent is fully inside i_size or new_size.
+ */
split_flag |= ee_block + ee_len <= eof_block ?
EXT4_EXT_MAY_ZEROOUT : 0;
split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
}
- flags |= EXT4_GET_BLOCKS_PRE_IO;
- return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
+ flags |= EXT4_GET_BLOCKS_SPLIT_NOMERGE;
+ return ext4_split_extent(handle, inode, path, map, split_flag, flags,
+ allocated);
}
-static int ext4_convert_unwritten_extents_endio(handle_t *handle,
- struct inode *inode,
- struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath)
+static struct ext4_ext_path *
+ext4_convert_unwritten_extents_endio(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map,
+ struct ext4_ext_path *path)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex;
ext4_lblk_t ee_block;
unsigned int ee_len;
@@ -3745,20 +3776,21 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
inode->i_ino, (unsigned long long)ee_block, ee_len,
(unsigned long long)map->m_lblk, map->m_len);
#endif
- err = ext4_split_convert_extents(handle, inode, map, ppath,
- EXT4_GET_BLOCKS_CONVERT);
- if (err < 0)
- return err;
- path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+ path = ext4_split_convert_extents(handle, inode, map, path,
+ EXT4_GET_BLOCKS_CONVERT, NULL);
if (IS_ERR(path))
- return PTR_ERR(path);
+ return path;
+
+ path = ext4_find_extent(inode, map->m_lblk, path, 0);
+ if (IS_ERR(path))
+ return path;
depth = ext_depth(inode);
ex = path[depth].p_ext;
}
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- goto out;
+ goto errout;
/* first mark the extent as initialized */
ext4_ext_mark_initialized(ex);
@@ -3769,18 +3801,23 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
/* Mark modified extent as dirty */
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
-out:
+ if (err)
+ goto errout;
+
ext4_ext_show_leaf(inode, path);
- return err;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
-static int
+static struct ext4_ext_path *
convert_initialized_extent(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath,
+ struct ext4_ext_path *path,
unsigned int *allocated)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex;
ext4_lblk_t ee_block;
unsigned int ee_len;
@@ -3803,25 +3840,27 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
(unsigned long long)ee_block, ee_len);
if (ee_block != map->m_lblk || ee_len > map->m_len) {
- err = ext4_split_convert_extents(handle, inode, map, ppath,
- EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
- if (err < 0)
- return err;
- path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+ path = ext4_split_convert_extents(handle, inode, map, path,
+ EXT4_GET_BLOCKS_CONVERT_UNWRITTEN, NULL);
if (IS_ERR(path))
- return PTR_ERR(path);
+ return path;
+
+ path = ext4_find_extent(inode, map->m_lblk, path, 0);
+ if (IS_ERR(path))
+ return path;
depth = ext_depth(inode);
ex = path[depth].p_ext;
if (!ex) {
EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
(unsigned long) map->m_lblk);
- return -EFSCORRUPTED;
+ err = -EFSCORRUPTED;
+ goto errout;
}
}
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- return err;
+ goto errout;
/* first mark the extent as unwritten */
ext4_ext_mark_unwritten(ex);
@@ -3833,7 +3872,7 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
/* Mark modified extent as dirty */
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
if (err)
- return err;
+ goto errout;
ext4_ext_show_leaf(inode, path);
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -3842,22 +3881,24 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
if (*allocated > map->m_len)
*allocated = map->m_len;
map->m_len = *allocated;
- return 0;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
-static int
+static struct ext4_ext_path *
ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath, int flags,
- unsigned int allocated, ext4_fsblk_t newblock)
+ struct ext4_ext_path *path, int flags,
+ unsigned int *allocated, ext4_fsblk_t newblock)
{
- struct ext4_ext_path __maybe_unused *path = *ppath;
- int ret = 0;
int err = 0;
ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
(unsigned long long)map->m_lblk, map->m_len, flags,
- allocated);
+ *allocated);
ext4_ext_show_leaf(inode, path);
/*
@@ -3867,36 +3908,34 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
- allocated, newblock);
+ *allocated, newblock);
/* get_block() before submitting IO, split the extent */
- if (flags & EXT4_GET_BLOCKS_PRE_IO) {
- ret = ext4_split_convert_extents(handle, inode, map, ppath,
- flags | EXT4_GET_BLOCKS_CONVERT);
- if (ret < 0) {
- err = ret;
- goto out2;
- }
+ if (flags & EXT4_GET_BLOCKS_SPLIT_NOMERGE) {
+ path = ext4_split_convert_extents(handle, inode, map, path,
+ flags | EXT4_GET_BLOCKS_CONVERT, allocated);
+ if (IS_ERR(path))
+ return path;
/*
- * shouldn't get a 0 return when splitting an extent unless
+ * shouldn't get a 0 allocated when splitting an extent unless
* m_len is 0 (bug) or extent has been corrupted
*/
- if (unlikely(ret == 0)) {
+ if (unlikely(*allocated == 0)) {
EXT4_ERROR_INODE(inode,
- "unexpected ret == 0, m_len = %u",
+ "unexpected allocated == 0, m_len = %u",
map->m_len);
err = -EFSCORRUPTED;
- goto out2;
+ goto errout;
}
map->m_flags |= EXT4_MAP_UNWRITTEN;
goto out;
}
/* IO end_io complete, convert the filled extent to written */
if (flags & EXT4_GET_BLOCKS_CONVERT) {
- err = ext4_convert_unwritten_extents_endio(handle, inode, map,
- ppath);
- if (err < 0)
- goto out2;
+ path = ext4_convert_unwritten_extents_endio(handle, inode,
+ map, path);
+ if (IS_ERR(path))
+ return path;
ext4_update_inode_fsync_trans(handle, inode, 1);
goto map_out;
}
@@ -3928,36 +3967,37 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
* For buffered writes, at writepage time, etc. Convert a
* discovered unwritten extent to written.
*/
- ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
- if (ret < 0) {
- err = ret;
- goto out2;
- }
+ path = ext4_ext_convert_to_initialized(handle, inode, map, path,
+ flags, allocated);
+ if (IS_ERR(path))
+ return path;
ext4_update_inode_fsync_trans(handle, inode, 1);
/*
- * shouldn't get a 0 return when converting an unwritten extent
+ * shouldn't get a 0 allocated when converting an unwritten extent
* unless m_len is 0 (bug) or extent has been corrupted
*/
- if (unlikely(ret == 0)) {
- EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
+ if (unlikely(*allocated == 0)) {
+ EXT4_ERROR_INODE(inode, "unexpected allocated == 0, m_len = %u",
map->m_len);
err = -EFSCORRUPTED;
- goto out2;
+ goto errout;
}
out:
- allocated = ret;
map->m_flags |= EXT4_MAP_NEW;
map_out:
map->m_flags |= EXT4_MAP_MAPPED;
out1:
map->m_pblk = newblock;
- if (allocated > map->m_len)
- allocated = map->m_len;
- map->m_len = allocated;
+ if (*allocated > map->m_len)
+ *allocated = map->m_len;
+ map->m_len = *allocated;
ext4_ext_show_leaf(inode, path);
-out2:
- return err ? err : allocated;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
/*
@@ -4062,6 +4102,73 @@ static int get_implied_cluster_alloc(struct super_block *sb,
return 0;
}
+/*
+ * Determine hole length around the given logical block, first try to
+ * locate and expand the hole from the given @path, and then adjust it
+ * if it's partially or completely converted to delayed extents, insert
+ * it into the extent cache tree if it's indeed a hole, finally return
+ * the length of the determined extent.
+ */
+static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_lblk_t lblk)
+{
+ ext4_lblk_t hole_start, len;
+ struct extent_status es;
+
+ hole_start = lblk;
+ len = ext4_ext_find_hole(inode, path, &hole_start);
+again:
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
+ hole_start + len - 1, &es);
+ if (!es.es_len)
+ goto insert_hole;
+
+ /*
+ * There's a delalloc extent in the hole, handle it if the delalloc
+ * extent is in front of, behind and straddle the queried range.
+ */
+ if (lblk >= es.es_lblk + es.es_len) {
+ /*
+ * The delalloc extent is in front of the queried range,
+ * find again from the queried start block.
+ */
+ len -= lblk - hole_start;
+ hole_start = lblk;
+ goto again;
+ } else if (in_range(lblk, es.es_lblk, es.es_len)) {
+ /*
+ * The delalloc extent containing lblk, it must have been
+ * added after ext4_map_blocks() checked the extent status
+ * tree so we are not holding i_rwsem and delalloc info is
+ * only stabilized by i_data_sem we are going to release
+ * soon. Don't modify the extent status tree and report
+ * extent as a hole, just adjust the length to the delalloc
+ * extent's after lblk.
+ */
+ len = es.es_lblk + es.es_len - lblk;
+ return len;
+ } else {
+ /*
+ * The delalloc extent is partially or completely behind
+ * the queried range, update hole length until the
+ * beginning of the delalloc extent.
+ */
+ len = min(es.es_lblk - hole_start, len);
+ }
+
+insert_hole:
+ /* Put just found gap into cache to speed up subsequent requests */
+ ext_debug(inode, " -> %u:%u\n", hole_start, len);
+ ext4_es_insert_extent(inode, hole_start, len, ~0,
+ EXTENT_STATUS_HOLE, false);
+
+ /* Update hole_len to reflect hole size after lblk */
+ if (hole_start != lblk)
+ len -= lblk - hole_start;
+
+ return len;
+}
/*
* Block allocation/map/preallocation routine for extents based files
@@ -4069,10 +4176,10 @@ static int get_implied_cluster_alloc(struct super_block *sb,
*
* Need to be called with
* down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
- * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
+ * (ie, flags is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
*
* return > 0, number of blocks already mapped/allocated
- * if create == 0 and these are pre-allocated blocks
+ * if flags doesn't contain EXT4_GET_BLOCKS_CREATE and these are pre-allocated blocks
* buffer head is unmapped
* otherwise blocks are mapped
*
@@ -4088,7 +4195,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent newex, *ex, ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_fsblk_t newblock = 0, pblk;
- int err = 0, depth, ret;
+ int err = 0, depth;
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
@@ -4098,10 +4205,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* find extent for this block */
- path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
+ path = ext4_find_extent(inode, map->m_lblk, NULL, flags);
if (IS_ERR(path)) {
err = PTR_ERR(path);
- path = NULL;
goto out;
}
@@ -4150,8 +4256,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
*/
if ((!ext4_ext_is_unwritten(ex)) &&
(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
- err = convert_initialized_extent(handle,
- inode, map, &path, &allocated);
+ path = convert_initialized_extent(handle,
+ inode, map, path, &allocated);
+ if (IS_ERR(path))
+ err = PTR_ERR(path);
goto out;
} else if (!ext4_ext_is_unwritten(ex)) {
map->m_flags |= EXT4_MAP_MAPPED;
@@ -4163,38 +4271,26 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
goto out;
}
- ret = ext4_ext_handle_unwritten_extents(
- handle, inode, map, &path, flags,
- allocated, newblock);
- if (ret < 0)
- err = ret;
- else
- allocated = ret;
+ path = ext4_ext_handle_unwritten_extents(
+ handle, inode, map, path, flags,
+ &allocated, newblock);
+ if (IS_ERR(path))
+ err = PTR_ERR(path);
goto out;
}
}
/*
* requested block isn't allocated yet;
- * we couldn't try to create block if create flag is zero
+ * we couldn't try to create block if flags doesn't contain EXT4_GET_BLOCKS_CREATE
*/
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
- ext4_lblk_t hole_start, hole_len;
+ ext4_lblk_t len;
- hole_start = map->m_lblk;
- hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
- /*
- * put just found gap into cache to speed up
- * subsequent requests
- */
- ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
+ len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk);
- /* Update hole_len to reflect hole size after map->m_lblk */
- if (hole_start != map->m_lblk)
- hole_len -= map->m_lblk - hole_start;
map->m_pblk = 0;
- map->m_len = min_t(unsigned int, map->m_len, hole_len);
-
+ map->m_len = min_t(unsigned int, map->m_len, len);
goto out;
}
@@ -4221,7 +4317,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
if (err)
goto out;
ar.lright = map->m_lblk;
- err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
+ err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright,
+ &ex2, flags);
if (err < 0)
goto out;
@@ -4231,6 +4328,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
+ err = 0;
goto got_allocated_blocks;
}
@@ -4303,8 +4401,9 @@ got_allocated_blocks:
map->m_flags |= EXT4_MAP_UNWRITTEN;
}
- err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
- if (err) {
+ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
if (allocated_clusters) {
int fb_flags = 0;
@@ -4313,7 +4412,7 @@ got_allocated_blocks:
* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free().
*/
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
ext4_free_blocks(handle, inode, NULL, newblock,
@@ -4324,43 +4423,6 @@ got_allocated_blocks:
}
/*
- * Reduce the reserved cluster count to reflect successful deferred
- * allocation of delayed allocated clusters or direct allocation of
- * clusters discovered to be delayed allocated. Once allocated, a
- * cluster is not included in the reserved count.
- */
- if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
- if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
- /*
- * When allocating delayed allocated clusters, simply
- * reduce the reserved cluster count and claim quota
- */
- ext4_da_update_reserve_space(inode, allocated_clusters,
- 1);
- } else {
- ext4_lblk_t lblk, len;
- unsigned int n;
-
- /*
- * When allocating non-delayed allocated clusters
- * (from fallocate, filemap, DIO, or clusters
- * allocated when delalloc has been disabled by
- * ext4_nonda_switch), reduce the reserved cluster
- * count by the number of allocated clusters that
- * have previously been delayed allocated. Quota
- * has been claimed by ext4_mb_new_blocks() above,
- * so release the quota reservations made for any
- * previously delayed allocated clusters.
- */
- lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
- len = allocated_clusters << sbi->s_cluster_bits;
- n = ext4_es_delayed_clu(inode, lblk, len);
- if (n > 0)
- ext4_da_update_reserve_space(inode, (int) n, 0);
- }
- }
-
- /*
* Cache the extent and update transaction to commit on fdatasync only
* when it is _not_ an unwritten extent.
*/
@@ -4375,8 +4437,21 @@ got_allocated_blocks:
allocated = map->m_len;
ext4_ext_show_leaf(inode, path);
out:
- ext4_ext_drop_refs(path);
- kfree(path);
+ /*
+ * We never use EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF with CREATE flag.
+ * So we know that the depth used here is correct, since there was no
+ * block allocation done if EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF is set.
+ * If tomorrow we start using this QUERY flag with CREATE, then we will
+ * need to re-calculate the depth as it might have changed due to block
+ * allocation.
+ */
+ if (flags & EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF) {
+ WARN_ON_ONCE(flags & EXT4_GET_BLOCKS_CREATE);
+ if (!err && ex && (ex == EXT_LAST_EXTENT(path[depth].p_hdr)))
+ map->m_flags |= EXT4_MAP_QUERY_LAST_IN_LEAF;
+ }
+
+ ext4_free_ext_path(path);
trace_ext4_ext_map_blocks_exit(inode, flags, map,
err ? err : allocated);
@@ -4403,21 +4478,12 @@ int ext4_ext_truncate(handle_t *handle, struct inode *inode)
last_block = (inode->i_size + sb->s_blocksize - 1)
>> EXT4_BLOCK_SIZE_BITS(sb);
-retry:
- err = ext4_es_remove_extent(inode, last_block,
- EXT_MAX_BLOCKS - last_block);
- if (err == -ENOMEM) {
- cond_resched();
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- goto retry;
- }
- if (err)
- return err;
+ ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
+
retry_remove_space:
err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
if (err == -ENOMEM) {
- cond_resched();
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ memalloc_retry_wait(GFP_ATOMIC);
goto retry_remove_space;
}
return err;
@@ -4434,7 +4500,9 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
int depth = 0;
struct ext4_map_blocks map;
unsigned int credits;
- loff_t epos;
+ loff_t epos, old_size = i_size_read(inode);
+ unsigned int blkbits = inode->i_blkbits;
+ bool alloc_zero = false;
BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
map.m_lblk = offset;
@@ -4448,6 +4516,17 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
/*
+ * Do the actual write zero during a running journal transaction
+ * costs a lot. First allocate an unwritten extent and then
+ * convert it to written after zeroing it out.
+ */
+ if (flags & EXT4_GET_BLOCKS_ZERO) {
+ flags &= ~EXT4_GET_BLOCKS_ZERO;
+ flags |= EXT4_GET_BLOCKS_UNWRIT_EXT;
+ alloc_zero = true;
+ }
+
+ /*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, len);
@@ -4483,15 +4562,19 @@ retry:
* allow a full retry cycle for any remaining allocations
*/
retries = 0;
- map.m_lblk += ret;
- map.m_len = len = len - ret;
- epos = (loff_t)map.m_lblk << inode->i_blkbits;
- inode->i_ctime = current_time(inode);
+ epos = EXT4_LBLK_TO_B(inode, map.m_lblk + ret);
+ inode_set_ctime_current(inode);
if (new_size) {
if (epos > new_size)
epos = new_size;
if (ext4_update_inode_size(inode, epos) & 0x1)
- inode->i_mtime = inode->i_ctime;
+ inode_set_mtime_to_ts(inode,
+ inode_get_ctime(inode));
+ if (epos > old_size) {
+ pagecache_isize_extended(inode, old_size, epos);
+ ext4_zero_partial_blocks(handle, inode,
+ old_size, epos - old_size);
+ }
}
ret2 = ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -4499,6 +4582,21 @@ retry:
ret2 = ret3 ? ret3 : ret2;
if (unlikely(ret2))
break;
+
+ if (alloc_zero &&
+ (map.m_flags & (EXT4_MAP_MAPPED | EXT4_MAP_UNWRITTEN))) {
+ ret2 = ext4_issue_zeroout(inode, map.m_lblk, map.m_pblk,
+ map.m_len);
+ if (likely(!ret2))
+ ret2 = ext4_convert_unwritten_extents(NULL,
+ inode, (loff_t)map.m_lblk << blkbits,
+ (loff_t)map.m_len << blkbits);
+ if (ret2)
+ break;
+ }
+
+ map.m_lblk += ret;
+ map.m_len = len = len - ret;
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
@@ -4506,126 +4604,77 @@ retry:
return ret > 0 ? ret2 : ret;
}
-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
+static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
+static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
static long ext4_zero_range(struct file *file, loff_t offset,
loff_t len, int mode)
{
struct inode *inode = file_inode(file);
- struct address_space *mapping = file->f_mapping;
handle_t *handle = NULL;
- unsigned int max_blocks;
loff_t new_size = 0;
- int ret = 0;
- int flags;
- int credits;
- int partial_begin, partial_end;
- loff_t start, end;
- ext4_lblk_t lblk;
+ loff_t end = offset + len;
+ ext4_lblk_t start_lblk, end_lblk;
+ unsigned int blocksize = i_blocksize(inode);
unsigned int blkbits = inode->i_blkbits;
+ int ret, flags, credits;
trace_ext4_zero_range(inode, offset, len, mode);
+ WARN_ON_ONCE(!inode_is_locked(inode));
- /* Call ext4_force_commit to flush all data in case of data=journal. */
- if (ext4_should_journal_data(inode)) {
- ret = ext4_force_commit(inode->i_sb);
- if (ret)
- return ret;
- }
-
- /*
- * Round up offset. This is not fallocate, we need to zero out
- * blocks, so convert interior block aligned part of the range to
- * unwritten and possibly manually zero out unaligned parts of the
- * range.
- */
- start = round_up(offset, 1 << blkbits);
- end = round_down((offset + len), 1 << blkbits);
-
- if (start < offset || end > offset + len)
- return -EINVAL;
- partial_begin = offset & ((1 << blkbits) - 1);
- partial_end = (offset + len) & ((1 << blkbits) - 1);
-
- lblk = start >> blkbits;
- max_blocks = (end >> blkbits);
- if (max_blocks < lblk)
- max_blocks = 0;
- else
- max_blocks -= lblk;
-
- inode_lock(inode);
-
- /*
- * Indirect files do not support unwritten extents
- */
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
- ret = -EOPNOTSUPP;
- goto out_mutex;
- }
+ /* Indirect files do not support unwritten extents */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+ return -EOPNOTSUPP;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- (offset + len > inode->i_size ||
- offset + len > EXT4_I(inode)->i_disksize)) {
- new_size = offset + len;
+ (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
+ new_size = end;
ret = inode_newsize_ok(inode, new_size);
if (ret)
- goto out_mutex;
+ return ret;
}
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
-
- /* Wait all existing dio workers, newcomers will block on i_mutex */
- inode_dio_wait(inode);
-
/* Preallocate the range including the unaligned edges */
- if (partial_begin || partial_end) {
- ret = ext4_alloc_file_blocks(file,
- round_down(offset, 1 << blkbits) >> blkbits,
- (round_up((offset + len), 1 << blkbits) -
- round_down(offset, 1 << blkbits)) >> blkbits,
- new_size, flags);
- if (ret)
- goto out_mutex;
+ if (!IS_ALIGNED(offset | end, blocksize)) {
+ ext4_lblk_t alloc_lblk = offset >> blkbits;
+ ext4_lblk_t len_lblk = EXT4_MAX_BLOCKS(len, offset, blkbits);
+ ret = ext4_alloc_file_blocks(file, alloc_lblk, len_lblk,
+ new_size, flags);
+ if (ret)
+ return ret;
}
- /* Zero range excluding the unaligned edges */
- if (max_blocks > 0) {
- flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
- EXT4_EX_NOCACHE);
-
- /*
- * Prevent page faults from reinstantiating pages we have
- * released from page cache.
- */
- filemap_invalidate_lock(mapping);
+ ret = ext4_update_disksize_before_punch(inode, offset, len);
+ if (ret)
+ return ret;
- ret = ext4_break_layouts(inode);
- if (ret) {
- filemap_invalidate_unlock(mapping);
- goto out_mutex;
- }
+ /* Now release the pages and zero block aligned part of pages */
+ ret = ext4_truncate_page_cache_block_range(inode, offset, end);
+ if (ret)
+ return ret;
- ret = ext4_update_disksize_before_punch(inode, offset, len);
- if (ret) {
- filemap_invalidate_unlock(mapping);
- goto out_mutex;
- }
- /* Now release the pages and zero block aligned part of pages */
- truncate_pagecache_range(inode, start, end - 1);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ /* Zero range excluding the unaligned edges */
+ start_lblk = EXT4_B_TO_LBLK(inode, offset);
+ end_lblk = end >> blkbits;
+ if (end_lblk > start_lblk) {
+ ext4_lblk_t zero_blks = end_lblk - start_lblk;
- ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
- flags);
- filemap_invalidate_unlock(mapping);
+ if (mode & FALLOC_FL_WRITE_ZEROES)
+ flags = EXT4_GET_BLOCKS_CREATE_ZERO | EXT4_EX_NOCACHE;
+ else
+ flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
+ EXT4_EX_NOCACHE);
+ ret = ext4_alloc_file_blocks(file, start_lblk, zero_blks,
+ new_size, flags);
if (ret)
- goto out_mutex;
+ return ret;
}
- if (!partial_begin && !partial_end)
- goto out_mutex;
+ /* Finish zeroing out if it doesn't contain partial block */
+ if (IS_ALIGNED(offset | end, blocksize))
+ return ret;
/*
* In worst case we have to writeout two nonadjacent unwritten
@@ -4638,29 +4687,69 @@ static long ext4_zero_range(struct file *file, loff_t offset,
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(inode->i_sb, ret);
- goto out_mutex;
+ return ret;
}
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ /* Zero out partial block at the edges of the range */
+ ret = ext4_zero_partial_blocks(handle, inode, offset, len);
+ if (ret)
+ goto out_handle;
+
if (new_size)
ext4_update_inode_size(inode, new_size);
ret = ext4_mark_inode_dirty(handle, inode);
if (unlikely(ret))
goto out_handle;
- ext4_fc_track_range(handle, inode, offset >> inode->i_sb->s_blocksize_bits,
- (offset + len - 1) >> inode->i_sb->s_blocksize_bits);
- /* Zero out partial block at the edges of the range */
- ret = ext4_zero_partial_blocks(handle, inode, offset, len);
- if (ret >= 0)
- ext4_update_inode_fsync_trans(handle, inode, 1);
+ ext4_update_inode_fsync_trans(handle, inode, 1);
if (file->f_flags & O_SYNC)
ext4_handle_sync(handle);
out_handle:
ext4_journal_stop(handle);
-out_mutex:
- inode_unlock(inode);
+ return ret;
+}
+
+static long ext4_do_fallocate(struct file *file, loff_t offset,
+ loff_t len, int mode)
+{
+ struct inode *inode = file_inode(file);
+ loff_t end = offset + len;
+ loff_t new_size = 0;
+ ext4_lblk_t start_lblk, len_lblk;
+ int ret;
+
+ trace_ext4_fallocate_enter(inode, offset, len, mode);
+ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ start_lblk = offset >> inode->i_blkbits;
+ len_lblk = EXT4_MAX_BLOCKS(len, offset, inode->i_blkbits);
+
+ /* We only support preallocation for extent-based files only. */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
+ new_size = end;
+ ret = inode_newsize_ok(inode, new_size);
+ if (ret)
+ goto out;
+ }
+
+ ret = ext4_alloc_file_blocks(file, start_lblk, len_lblk, new_size,
+ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
+ if (ret)
+ goto out;
+
+ if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
+ ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
+ EXT4_I(inode)->i_sync_tid);
+ }
+out:
+ trace_ext4_fallocate_exit(inode, offset, len_lblk, ret);
return ret;
}
@@ -4674,12 +4763,8 @@ out_mutex:
long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
- loff_t new_size = 0;
- unsigned int max_blocks;
- int ret = 0;
- int flags;
- ext4_lblk_t lblk;
- unsigned int blkbits = inode->i_blkbits;
+ struct address_space *mapping = file->f_mapping;
+ int ret;
/*
* Encrypted inodes can't handle collapse range or insert
@@ -4690,80 +4775,158 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (IS_ENCRYPTED(inode) &&
(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
return -EOPNOTSUPP;
+ /*
+ * Don't allow writing zeroes if the underlying device does not
+ * enable the unmap write zeroes operation.
+ */
+ if ((mode & FALLOC_FL_WRITE_ZEROES) &&
+ !bdev_write_zeroes_unmap_sectors(inode->i_sb->s_bdev))
+ return -EOPNOTSUPP;
/* Return error if mode is not supported */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
- FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
- FALLOC_FL_INSERT_RANGE))
+ FALLOC_FL_ZERO_RANGE | FALLOC_FL_COLLAPSE_RANGE |
+ FALLOC_FL_INSERT_RANGE | FALLOC_FL_WRITE_ZEROES))
return -EOPNOTSUPP;
- ext4_fc_start_update(inode);
+ inode_lock(inode);
+ ret = ext4_convert_inline_data(inode);
+ if (ret)
+ goto out_inode_lock;
- if (mode & FALLOC_FL_PUNCH_HOLE) {
- ret = ext4_punch_hole(inode, offset, len);
- goto exit;
- }
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+ inode_dio_wait(inode);
- ret = ext4_convert_inline_data(inode);
+ ret = file_modified(file);
if (ret)
- goto exit;
+ goto out_inode_lock;
- if (mode & FALLOC_FL_COLLAPSE_RANGE) {
- ret = ext4_collapse_range(inode, offset, len);
- goto exit;
+ if ((mode & FALLOC_FL_MODE_MASK) == FALLOC_FL_ALLOCATE_RANGE) {
+ ret = ext4_do_fallocate(file, offset, len, mode);
+ goto out_inode_lock;
}
- if (mode & FALLOC_FL_INSERT_RANGE) {
- ret = ext4_insert_range(inode, offset, len);
- goto exit;
- }
+ /*
+ * Follow-up operations will drop page cache, hold invalidate lock
+ * to prevent page faults from reinstantiating pages we have
+ * released from page cache.
+ */
+ filemap_invalidate_lock(mapping);
- if (mode & FALLOC_FL_ZERO_RANGE) {
+ ret = ext4_break_layouts(inode);
+ if (ret)
+ goto out_invalidate_lock;
+
+ switch (mode & FALLOC_FL_MODE_MASK) {
+ case FALLOC_FL_PUNCH_HOLE:
+ ret = ext4_punch_hole(file, offset, len);
+ break;
+ case FALLOC_FL_COLLAPSE_RANGE:
+ ret = ext4_collapse_range(file, offset, len);
+ break;
+ case FALLOC_FL_INSERT_RANGE:
+ ret = ext4_insert_range(file, offset, len);
+ break;
+ case FALLOC_FL_ZERO_RANGE:
+ case FALLOC_FL_WRITE_ZEROES:
ret = ext4_zero_range(file, offset, len, mode);
- goto exit;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
}
- trace_ext4_fallocate_enter(inode, offset, len, mode);
- lblk = offset >> blkbits;
- max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
- flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
+out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+out_inode_lock:
+ inode_unlock(inode);
+ return ret;
+}
- inode_lock(inode);
+/*
+ * This function converts a range of blocks to written extents. The caller of
+ * this function will pass the start offset and the size. all unwritten extents
+ * within this range will be converted to written extents.
+ *
+ * This function is called from the direct IO end io call back function for
+ * atomic writes, to convert the unwritten extents after IO is completed.
+ *
+ * Note that the requirement for atomic writes is that all conversion should
+ * happen atomically in a single fs journal transaction. We mainly only allocate
+ * unwritten extents either on a hole on a pre-exiting unwritten extent range in
+ * ext4_map_blocks_atomic_write(). The only case where we can have multiple
+ * unwritten extents in a range [offset, offset+len) is when there is a split
+ * unwritten extent between two leaf nodes which was cached in extent status
+ * cache during ext4_iomap_alloc() time. That will allow
+ * ext4_map_blocks_atomic_write() to return the unwritten extent range w/o going
+ * into the slow path. That means we might need a loop for conversion of this
+ * unwritten extent split across leaf block within a single journal transaction.
+ * Split extents across leaf nodes is a rare case, but let's still handle that
+ * to meet the requirements of multi-fsblock atomic writes.
+ *
+ * Returns 0 on success.
+ */
+int ext4_convert_unwritten_extents_atomic(handle_t *handle, struct inode *inode,
+ loff_t offset, ssize_t len)
+{
+ unsigned int max_blocks;
+ int ret = 0, ret2 = 0, ret3 = 0;
+ struct ext4_map_blocks map;
+ unsigned int blkbits = inode->i_blkbits;
+ unsigned int credits = 0;
+ int flags = EXT4_GET_BLOCKS_IO_CONVERT_EXT | EXT4_EX_NOCACHE;
- /*
- * We only support preallocation for extent-based files only
- */
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
- ret = -EOPNOTSUPP;
- goto out;
+ map.m_lblk = offset >> blkbits;
+ max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
+
+ if (!handle) {
+ /*
+ * TODO: An optimization can be added later by having an extent
+ * status flag e.g. EXTENT_STATUS_SPLIT_LEAF. If we query that
+ * it can tell if the extent in the cache is a split extent.
+ * But for now let's assume pextents as 2 always.
+ */
+ credits = ext4_meta_trans_blocks(inode, max_blocks, 2);
}
- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- (offset + len > inode->i_size ||
- offset + len > EXT4_I(inode)->i_disksize)) {
- new_size = offset + len;
- ret = inode_newsize_ok(inode, new_size);
- if (ret)
- goto out;
+ if (credits) {
+ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ return ret;
+ }
}
- /* Wait all existing dio workers, newcomers will block on i_mutex */
- inode_dio_wait(inode);
+ while (ret >= 0 && ret < max_blocks) {
+ map.m_lblk += ret;
+ map.m_len = (max_blocks -= ret);
+ ret = ext4_map_blocks(handle, inode, &map, flags);
+ if (ret != max_blocks)
+ ext4_msg(inode->i_sb, KERN_INFO,
+ "inode #%lu: block %u: len %u: "
+ "split block mapping found for atomic write, "
+ "ret = %d",
+ inode->i_ino, map.m_lblk,
+ map.m_len, ret);
+ if (ret <= 0)
+ break;
+ }
- ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
- if (ret)
- goto out;
+ ret2 = ext4_mark_inode_dirty(handle, inode);
- if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
- ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
- EXT4_I(inode)->i_sync_tid);
+ if (credits) {
+ ret3 = ext4_journal_stop(handle);
+ if (unlikely(ret3))
+ ret2 = ret3;
}
-out:
- inode_unlock(inode);
- trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
-exit:
- ext4_fc_stop_update(inode);
- return ret;
+
+ if (ret <= 0 || ret2)
+ ext4_warning(inode->i_sb,
+ "inode #%lu: block %u: len %u: "
+ "returned %d or %d",
+ inode->i_ino, map.m_lblk,
+ map.m_len, ret, ret2);
+
+ return ret > 0 ? ret2 : ret;
}
/*
@@ -4805,8 +4968,14 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
break;
}
}
+ /*
+ * Do not cache any unrelated extents, as it does not hold the
+ * i_rwsem or invalidate_lock, which could corrupt the extent
+ * status tree.
+ */
ret = ext4_map_blocks(handle, inode, &map,
- EXT4_GET_BLOCKS_IO_CONVERT_EXT);
+ EXT4_GET_BLOCKS_IO_CONVERT_EXT |
+ EXT4_EX_NOCACHE);
if (ret <= 0)
ext4_warning(inode->i_sb,
"inode #%lu: block %u: len %u: "
@@ -4917,12 +5086,7 @@ static const struct iomap_ops ext4_iomap_xattr_ops = {
static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
{
- u64 maxbytes;
-
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- maxbytes = inode->i_sb->s_maxbytes;
- else
- maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
+ u64 maxbytes = ext4_get_maxbytes(inode);
if (*len == 0)
return -EINVAL;
@@ -4942,10 +5106,11 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
{
int error = 0;
+ inode_lock_shared(inode);
if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
error = ext4_ext_precache(inode);
if (error)
- return error;
+ goto unlock;
fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
}
@@ -4956,15 +5121,19 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
*/
error = ext4_fiemap_check_ranges(inode, start, &len);
if (error)
- return error;
+ goto unlock;
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
- return iomap_fiemap(inode, fieinfo, start, len,
- &ext4_iomap_xattr_ops);
+ error = iomap_fiemap(inode, fieinfo, start, len,
+ &ext4_iomap_xattr_ops);
+ } else {
+ error = iomap_fiemap(inode, fieinfo, start, len,
+ &ext4_iomap_report_ops);
}
-
- return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
+unlock:
+ inode_unlock_shared(inode);
+ return error;
}
int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -4985,7 +5154,9 @@ int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
+ inode_lock_shared(inode);
error = ext4_ext_precache(inode);
+ inode_unlock_shared(inode);
if (error)
return error;
fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
@@ -5044,7 +5215,7 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
credits = depth + 2;
}
- restart_credits = ext4_writepage_trans_blocks(inode);
+ restart_credits = ext4_chunk_trans_extent(inode, 0);
err = ext4_datasem_ensure_credits(handle, inode, credits,
restart_credits, 0);
if (err) {
@@ -5146,7 +5317,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
* won't be shifted beyond EXT_MAX_BLOCKS.
*/
if (SHIFT == SHIFT_LEFT) {
- path = ext4_find_extent(inode, start - 1, &path,
+ path = ext4_find_extent(inode, start - 1, path,
EXT4_EX_NOCACHE);
if (IS_ERR(path))
return PTR_ERR(path);
@@ -5180,6 +5351,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
* and it is decreased till we reach start.
*/
again:
+ ret = 0;
if (SHIFT == SHIFT_LEFT)
iterator = &start;
else
@@ -5194,7 +5366,7 @@ again:
* becomes NULL to indicate the end of the loop.
*/
while (iterator && start <= stop) {
- path = ext4_find_extent(inode, *iterator, &path,
+ path = ext4_find_extent(inode, *iterator, path,
EXT4_EX_NOCACHE);
if (IS_ERR(path))
return PTR_ERR(path);
@@ -5223,14 +5395,21 @@ again:
ext4_ext_get_actual_len(extent);
} else {
extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
- if (le32_to_cpu(extent->ee_block) > 0)
+ if (le32_to_cpu(extent->ee_block) > start)
*iterator = le32_to_cpu(extent->ee_block) - 1;
- else
- /* Beginning is reached, end of the loop */
+ else if (le32_to_cpu(extent->ee_block) == start)
iterator = NULL;
- /* Update path extent in case we need to stop */
- while (le32_to_cpu(extent->ee_block) < start)
+ else {
+ extent = EXT_LAST_EXTENT(path[depth].p_hdr);
+ while (le32_to_cpu(extent->ee_block) >= start)
+ extent--;
+
+ if (extent == EXT_LAST_EXTENT(path[depth].p_hdr))
+ break;
+
extent++;
+ iterator = NULL;
+ }
path[depth].p_ext = extent;
}
ret = ext4_ext_shift_path_extents(path, shift, inode,
@@ -5242,8 +5421,7 @@ again:
break;
}
out:
- ext4_ext_drop_refs(path);
- kfree(path);
+ ext4_free_ext_path(path);
return ret;
}
@@ -5252,122 +5430,79 @@ out:
* This implements the fallocate's collapse range functionality for ext4
* Returns: 0 and non-zero on error.
*/
-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
{
+ struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
- ext4_lblk_t punch_start, punch_stop;
+ loff_t end = offset + len;
+ ext4_lblk_t start_lblk, end_lblk;
handle_t *handle;
unsigned int credits;
- loff_t new_size, ioffset;
+ loff_t start, new_size;
int ret;
- /*
- * We need to test this early because xfstests assumes that a
- * collapse range of (0, 1) will return EOPNOTSUPP if the file
- * system does not support collapse range.
- */
+ trace_ext4_collapse_range(inode, offset, len);
+ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ /* Currently just for extent based files */
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return -EOPNOTSUPP;
-
/* Collapse range works only on fs cluster size aligned regions. */
if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
return -EINVAL;
-
- trace_ext4_collapse_range(inode, offset, len);
-
- punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
- punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
-
- /* Call ext4_force_commit to flush all data in case of data=journal. */
- if (ext4_should_journal_data(inode)) {
- ret = ext4_force_commit(inode->i_sb);
- if (ret)
- return ret;
- }
-
- inode_lock(inode);
/*
* There is no need to overlap collapse range with EOF, in which case
* it is effectively a truncate operation
*/
- if (offset + len >= inode->i_size) {
- ret = -EINVAL;
- goto out_mutex;
- }
-
- /* Currently just for extent based files */
- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- ret = -EOPNOTSUPP;
- goto out_mutex;
- }
-
- /* Wait for existing dio to complete */
- inode_dio_wait(inode);
-
- /*
- * Prevent page faults from reinstantiating pages we have released from
- * page cache.
- */
- filemap_invalidate_lock(mapping);
-
- ret = ext4_break_layouts(inode);
- if (ret)
- goto out_mmap;
+ if (end >= inode->i_size)
+ return -EINVAL;
/*
+ * Write tail of the last page before removed range and data that
+ * will be shifted since they will get removed from the page cache
+ * below. We are also protected from pages becoming dirty by
+ * i_rwsem and invalidate_lock.
* Need to round down offset to be aligned with page size boundary
* for page size > block size.
*/
- ioffset = round_down(offset, PAGE_SIZE);
- /*
- * Write tail of the last page before removed range since it will get
- * removed from the page cache below.
- */
- ret = filemap_write_and_wait_range(mapping, ioffset, offset);
+ start = round_down(offset, PAGE_SIZE);
+ ret = filemap_write_and_wait_range(mapping, start, offset);
+ if (!ret)
+ ret = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
if (ret)
- goto out_mmap;
- /*
- * Write data that will be shifted to preserve them when discarding
- * page cache below. We are also protected from pages becoming dirty
- * by i_rwsem and invalidate_lock.
- */
- ret = filemap_write_and_wait_range(mapping, offset + len,
- LLONG_MAX);
- if (ret)
- goto out_mmap;
- truncate_pagecache(inode, ioffset);
+ return ret;
+
+ truncate_pagecache(inode, start);
- credits = ext4_writepage_trans_blocks(inode);
+ credits = ext4_chunk_trans_extent(inode, 0);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out_mmap;
- }
- ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
- down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode, 0);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
- ret = ext4_es_remove_extent(inode, punch_start,
- EXT_MAX_BLOCKS - punch_start);
- if (ret) {
- up_write(&EXT4_I(inode)->i_data_sem);
- goto out_stop;
- }
+ start_lblk = offset >> inode->i_blkbits;
+ end_lblk = (offset + len) >> inode->i_blkbits;
+
+ ext4_check_map_extents_env(inode);
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode);
+ ext4_es_remove_extent(inode, start_lblk, EXT_MAX_BLOCKS - start_lblk);
- ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
+ ret = ext4_ext_remove_space(inode, start_lblk, end_lblk - 1);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
- goto out_stop;
+ goto out_handle;
}
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
- ret = ext4_ext_shift_extents(inode, handle, punch_stop,
- punch_stop - punch_start, SHIFT_LEFT);
+ ret = ext4_ext_shift_extents(inode, handle, end_lblk,
+ end_lblk - start_lblk, SHIFT_LEFT);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
- goto out_stop;
+ goto out_handle;
}
new_size = inode->i_size - len;
@@ -5375,19 +5510,16 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
EXT4_I(inode)->i_disksize = new_size;
up_write(&EXT4_I(inode)->i_data_sem);
- if (IS_SYNC(inode))
- ext4_handle_sync(handle);
- inode->i_mtime = inode->i_ctime = current_time(inode);
ret = ext4_mark_inode_dirty(handle, inode);
+ if (ret)
+ goto out_handle;
+
ext4_update_inode_fsync_trans(handle, inode, 1);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
-out_stop:
+out_handle:
ext4_journal_stop(handle);
- ext4_fc_stop_ineligible(sb);
-out_mmap:
- filemap_invalidate_unlock(mapping);
-out_mutex:
- inode_unlock(inode);
return ret;
}
@@ -5399,109 +5531,73 @@ out_mutex:
* by len bytes.
* Returns 0 on success, error otherwise.
*/
-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
+static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
{
+ struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
handle_t *handle;
struct ext4_ext_path *path;
struct ext4_extent *extent;
- ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
+ ext4_lblk_t start_lblk, len_lblk, ee_start_lblk = 0;
unsigned int credits, ee_len;
- int ret = 0, depth, split_flag = 0;
- loff_t ioffset;
+ int ret, depth, split_flag = 0;
+ loff_t start;
- /*
- * We need to test this early because xfstests assumes that an
- * insert range of (0, 1) will return EOPNOTSUPP if the file
- * system does not support insert range.
- */
+ trace_ext4_insert_range(inode, offset, len);
+ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ /* Currently just for extent based files */
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return -EOPNOTSUPP;
-
/* Insert range works only on fs cluster size aligned regions. */
if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
return -EINVAL;
-
- trace_ext4_insert_range(inode, offset, len);
-
- offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
- len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
-
- /* Call ext4_force_commit to flush all data in case of data=journal */
- if (ext4_should_journal_data(inode)) {
- ret = ext4_force_commit(inode->i_sb);
- if (ret)
- return ret;
- }
-
- inode_lock(inode);
- /* Currently just for extent based files */
- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- ret = -EOPNOTSUPP;
- goto out_mutex;
- }
-
- /* Check whether the maximum file size would be exceeded */
- if (len > inode->i_sb->s_maxbytes - inode->i_size) {
- ret = -EFBIG;
- goto out_mutex;
- }
-
/* Offset must be less than i_size */
- if (offset >= inode->i_size) {
- ret = -EINVAL;
- goto out_mutex;
- }
-
- /* Wait for existing dio to complete */
- inode_dio_wait(inode);
+ if (offset >= inode->i_size)
+ return -EINVAL;
+ /* Check whether the maximum file size would be exceeded */
+ if (len > inode->i_sb->s_maxbytes - inode->i_size)
+ return -EFBIG;
/*
- * Prevent page faults from reinstantiating pages we have released from
- * page cache.
+ * Write out all dirty pages. Need to round down to align start offset
+ * to page size boundary for page size > block size.
*/
- filemap_invalidate_lock(mapping);
-
- ret = ext4_break_layouts(inode);
+ start = round_down(offset, PAGE_SIZE);
+ ret = filemap_write_and_wait_range(mapping, start, LLONG_MAX);
if (ret)
- goto out_mmap;
+ return ret;
- /*
- * Need to round down to align start offset to page size boundary
- * for page size > block size.
- */
- ioffset = round_down(offset, PAGE_SIZE);
- /* Write out all dirty pages */
- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
- LLONG_MAX);
- if (ret)
- goto out_mmap;
- truncate_pagecache(inode, ioffset);
+ truncate_pagecache(inode, start);
- credits = ext4_writepage_trans_blocks(inode);
+ credits = ext4_chunk_trans_extent(inode, 0);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out_mmap;
- }
- ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
/* Expand file to avoid data loss if there is error while shifting */
inode->i_size += len;
EXT4_I(inode)->i_disksize += len;
- inode->i_mtime = inode->i_ctime = current_time(inode);
ret = ext4_mark_inode_dirty(handle, inode);
if (ret)
- goto out_stop;
+ goto out_handle;
+
+ start_lblk = offset >> inode->i_blkbits;
+ len_lblk = len >> inode->i_blkbits;
+
+ ext4_check_map_extents_env(inode);
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
- path = ext4_find_extent(inode, offset_lblk, NULL, 0);
+ path = ext4_find_extent(inode, start_lblk, NULL, 0);
if (IS_ERR(path)) {
up_write(&EXT4_I(inode)->i_data_sem);
- goto out_stop;
+ ret = PTR_ERR(path);
+ goto out_handle;
}
depth = ext_depth(inode);
@@ -5511,60 +5607,47 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
ee_len = ext4_ext_get_actual_len(extent);
/*
- * If offset_lblk is not the starting block of extent, split
- * the extent @offset_lblk
+ * If start_lblk is not the starting block of extent, split
+ * the extent @start_lblk
*/
- if ((offset_lblk > ee_start_lblk) &&
- (offset_lblk < (ee_start_lblk + ee_len))) {
+ if ((start_lblk > ee_start_lblk) &&
+ (start_lblk < (ee_start_lblk + ee_len))) {
if (ext4_ext_is_unwritten(extent))
split_flag = EXT4_EXT_MARK_UNWRIT1 |
EXT4_EXT_MARK_UNWRIT2;
- ret = ext4_split_extent_at(handle, inode, &path,
- offset_lblk, split_flag,
+ path = ext4_split_extent_at(handle, inode, path,
+ start_lblk, split_flag,
EXT4_EX_NOCACHE |
- EXT4_GET_BLOCKS_PRE_IO |
+ EXT4_GET_BLOCKS_SPLIT_NOMERGE |
EXT4_GET_BLOCKS_METADATA_NOFAIL);
}
- ext4_ext_drop_refs(path);
- kfree(path);
- if (ret < 0) {
+ if (IS_ERR(path)) {
up_write(&EXT4_I(inode)->i_data_sem);
- goto out_stop;
+ ret = PTR_ERR(path);
+ goto out_handle;
}
- } else {
- ext4_ext_drop_refs(path);
- kfree(path);
}
- ret = ext4_es_remove_extent(inode, offset_lblk,
- EXT_MAX_BLOCKS - offset_lblk);
- if (ret) {
- up_write(&EXT4_I(inode)->i_data_sem);
- goto out_stop;
- }
+ ext4_free_ext_path(path);
+ ext4_es_remove_extent(inode, start_lblk, EXT_MAX_BLOCKS - start_lblk);
/*
- * if offset_lblk lies in a hole which is at start of file, use
+ * if start_lblk lies in a hole which is at start of file, use
* ee_start_lblk to shift extents
*/
ret = ext4_ext_shift_extents(inode, handle,
- ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk,
- len_lblk, SHIFT_RIGHT);
-
+ max(ee_start_lblk, start_lblk), len_lblk, SHIFT_RIGHT);
up_write(&EXT4_I(inode)->i_data_sem);
+ if (ret)
+ goto out_handle;
+
+ ext4_update_inode_fsync_trans(handle, inode, 1);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- if (ret >= 0)
- ext4_update_inode_fsync_trans(handle, inode, 1);
-out_stop:
+out_handle:
ext4_journal_stop(handle);
- ext4_fc_stop_ineligible(sb);
-out_mmap:
- filemap_invalidate_unlock(mapping);
-out_mutex:
- inode_unlock(inode);
return ret;
}
@@ -5583,7 +5666,7 @@ out_mutex:
* stuff such as page-cache locking consistency, bh mapping consistency or
* extent's data copying must be performed by caller.
* Locking:
- * i_mutex is held for both inodes
+ * i_rwsem is held for both inodes
* i_data_sem is locked for write for both inodes
* Assumptions:
* All pages from requested range are locked for both inodes
@@ -5602,12 +5685,8 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
BUG_ON(!inode_is_locked(inode1));
BUG_ON(!inode_is_locked(inode2));
- *erp = ext4_es_remove_extent(inode1, lblk1, count);
- if (unlikely(*erp))
- return 0;
- *erp = ext4_es_remove_extent(inode2, lblk2, count);
- if (unlikely(*erp))
- return 0;
+ ext4_es_remove_extent(inode1, lblk1, count);
+ ext4_es_remove_extent(inode2, lblk2, count);
while (count) {
struct ext4_extent *ex1, *ex2, tmp_ex;
@@ -5615,25 +5694,21 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
int e1_len, e2_len, len;
int split = 0;
- path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
+ path1 = ext4_find_extent(inode1, lblk1, path1, EXT4_EX_NOCACHE);
if (IS_ERR(path1)) {
*erp = PTR_ERR(path1);
- path1 = NULL;
- finish:
- count = 0;
- goto repeat;
+ goto errout;
}
- path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
+ path2 = ext4_find_extent(inode2, lblk2, path2, EXT4_EX_NOCACHE);
if (IS_ERR(path2)) {
*erp = PTR_ERR(path2);
- path2 = NULL;
- goto finish;
+ goto errout;
}
ex1 = path1[path1->p_depth].p_ext;
ex2 = path2[path2->p_depth].p_ext;
/* Do we have something to swap ? */
if (unlikely(!ex2 || !ex1))
- goto finish;
+ goto errout;
e1_blk = le32_to_cpu(ex1->ee_block);
e2_blk = le32_to_cpu(ex2->ee_block);
@@ -5655,7 +5730,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
next2 = e2_blk;
/* Do we have something to swap */
if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
- goto finish;
+ goto errout;
/* Move to the rightest boundary */
len = next1 - lblk1;
if (len < next2 - lblk2)
@@ -5665,28 +5740,32 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
lblk1 += len;
lblk2 += len;
count -= len;
- goto repeat;
+ continue;
}
/* Prepare left boundary */
if (e1_blk < lblk1) {
split = 1;
- *erp = ext4_force_split_extent_at(handle, inode1,
- &path1, lblk1, 0);
- if (unlikely(*erp))
- goto finish;
+ path1 = ext4_force_split_extent_at(handle, inode1,
+ path1, lblk1, 0);
+ if (IS_ERR(path1)) {
+ *erp = PTR_ERR(path1);
+ goto errout;
+ }
}
if (e2_blk < lblk2) {
split = 1;
- *erp = ext4_force_split_extent_at(handle, inode2,
- &path2, lblk2, 0);
- if (unlikely(*erp))
- goto finish;
+ path2 = ext4_force_split_extent_at(handle, inode2,
+ path2, lblk2, 0);
+ if (IS_ERR(path2)) {
+ *erp = PTR_ERR(path2);
+ goto errout;
+ }
}
/* ext4_split_extent_at() may result in leaf extent split,
* path must to be revalidated. */
if (split)
- goto repeat;
+ continue;
/* Prepare right boundary */
len = count;
@@ -5697,30 +5776,34 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
if (len != e1_len) {
split = 1;
- *erp = ext4_force_split_extent_at(handle, inode1,
- &path1, lblk1 + len, 0);
- if (unlikely(*erp))
- goto finish;
+ path1 = ext4_force_split_extent_at(handle, inode1,
+ path1, lblk1 + len, 0);
+ if (IS_ERR(path1)) {
+ *erp = PTR_ERR(path1);
+ goto errout;
+ }
}
if (len != e2_len) {
split = 1;
- *erp = ext4_force_split_extent_at(handle, inode2,
- &path2, lblk2 + len, 0);
- if (*erp)
- goto finish;
+ path2 = ext4_force_split_extent_at(handle, inode2,
+ path2, lblk2 + len, 0);
+ if (IS_ERR(path2)) {
+ *erp = PTR_ERR(path2);
+ goto errout;
+ }
}
/* ext4_split_extent_at() may result in leaf extent split,
* path must to be revalidated. */
if (split)
- goto repeat;
+ continue;
BUG_ON(e2_len != e1_len);
*erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
if (unlikely(*erp))
- goto finish;
+ goto errout;
*erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
if (unlikely(*erp))
- goto finish;
+ goto errout;
/* Both extents are fully inside boundaries. Swap it now */
tmp_ex = *ex1;
@@ -5738,7 +5821,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
*erp = ext4_ext_dirty(handle, inode2, path2 +
path2->p_depth);
if (unlikely(*erp))
- goto finish;
+ goto errout;
*erp = ext4_ext_dirty(handle, inode1, path1 +
path1->p_depth);
/*
@@ -5748,19 +5831,17 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
* aborted anyway.
*/
if (unlikely(*erp))
- goto finish;
+ goto errout;
+
lblk1 += len;
lblk2 += len;
replaced_count += len;
count -= len;
-
- repeat:
- ext4_ext_drop_refs(path1);
- kfree(path1);
- ext4_ext_drop_refs(path2);
- kfree(path2);
- path1 = path2 = NULL;
}
+
+errout:
+ ext4_free_ext_path(path1);
+ ext4_free_ext_path(path2);
return replaced_count;
}
@@ -5784,13 +5865,19 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
struct ext4_extent *extent;
ext4_lblk_t first_lblk, first_lclu, last_lclu;
+ /*
+ * if data can be stored inline, the logical cluster isn't
+ * mapped - no physical clusters have been allocated, and the
+ * file has no extents
+ */
+ if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) ||
+ ext4_has_inline_data(inode))
+ return 0;
+
/* search for the extent closest to the first block in the cluster */
path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- path = NULL;
- goto out;
- }
+ if (IS_ERR(path))
+ return PTR_ERR(path);
depth = ext_depth(inode);
@@ -5837,8 +5924,7 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
}
out:
- ext4_ext_drop_refs(path);
- kfree(path);
+ ext4_free_ext_path(path);
return err ? err : mapped;
}
@@ -5853,7 +5939,7 @@ out:
int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
int len, int unwritten, ext4_fsblk_t pblk)
{
- struct ext4_ext_path *path = NULL, *ppath;
+ struct ext4_ext_path *path;
struct ext4_extent *ex;
int ret;
@@ -5869,30 +5955,34 @@ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
if (le32_to_cpu(ex->ee_block) != start ||
ext4_ext_get_actual_len(ex) != len) {
/* We need to split this extent to match our extent first */
- ppath = path;
down_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1);
+ path = ext4_force_split_extent_at(NULL, inode, path, start, 1);
up_write(&EXT4_I(inode)->i_data_sem);
- if (ret)
+ if (IS_ERR(path)) {
+ ret = PTR_ERR(path);
goto out;
- kfree(path);
- path = ext4_find_extent(inode, start, NULL, 0);
+ }
+
+ path = ext4_find_extent(inode, start, path, 0);
if (IS_ERR(path))
- return -1;
- ppath = path;
+ return PTR_ERR(path);
+
ex = path[path->p_depth].p_ext;
WARN_ON(le32_to_cpu(ex->ee_block) != start);
+
if (ext4_ext_get_actual_len(ex) != len) {
down_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_force_split_extent_at(NULL, inode, &ppath,
- start + len, 1);
+ path = ext4_force_split_extent_at(NULL, inode, path,
+ start + len, 1);
up_write(&EXT4_I(inode)->i_data_sem);
- if (ret)
+ if (IS_ERR(path)) {
+ ret = PTR_ERR(path);
goto out;
- kfree(path);
- path = ext4_find_extent(inode, start, NULL, 0);
+ }
+
+ path = ext4_find_extent(inode, start, path, 0);
if (IS_ERR(path))
- return -EINVAL;
+ return PTR_ERR(path);
ex = path[path->p_depth].p_ext;
}
}
@@ -5905,8 +5995,7 @@ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
up_write(&EXT4_I(inode)->i_data_sem);
out:
- ext4_ext_drop_refs(path);
- kfree(path);
+ ext4_free_ext_path(path);
ext4_mark_inode_dirty(NULL, inode);
return ret;
}
@@ -5924,8 +6013,7 @@ void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
return;
ex = path[path->p_depth].p_ext;
if (!ex) {
- ext4_ext_drop_refs(path);
- kfree(path);
+ ext4_free_ext_path(path);
ext4_mark_inode_dirty(NULL, inode);
return;
}
@@ -5938,8 +6026,7 @@ void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
up_write(&EXT4_I(inode)->i_data_sem);
ext4_mark_inode_dirty(NULL, inode);
- ext4_ext_drop_refs(path);
- kfree(path);
+ ext4_free_ext_path(path);
}
}
@@ -5977,14 +6064,9 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
if (IS_ERR(path))
return PTR_ERR(path);
ex = path[path->p_depth].p_ext;
- if (!ex) {
- ext4_ext_drop_refs(path);
- kfree(path);
+ if (!ex)
goto out;
- }
end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
- ext4_ext_drop_refs(path);
- kfree(path);
/* Count the number of data blocks */
cur = 0;
@@ -6010,36 +6092,28 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
ret = skip_hole(inode, &cur);
if (ret < 0)
goto out;
- path = ext4_find_extent(inode, cur, NULL, 0);
+ path = ext4_find_extent(inode, cur, path, 0);
if (IS_ERR(path))
goto out;
numblks += path->p_depth;
- ext4_ext_drop_refs(path);
- kfree(path);
while (cur < end) {
- path = ext4_find_extent(inode, cur, NULL, 0);
+ path = ext4_find_extent(inode, cur, path, 0);
if (IS_ERR(path))
break;
ex = path[path->p_depth].p_ext;
- if (!ex) {
- ext4_ext_drop_refs(path);
- kfree(path);
- return 0;
- }
+ if (!ex)
+ goto cleanup;
+
cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
ext4_ext_get_actual_len(ex));
ret = skip_hole(inode, &cur);
- if (ret < 0) {
- ext4_ext_drop_refs(path);
- kfree(path);
+ if (ret < 0)
break;
- }
- path2 = ext4_find_extent(inode, cur, NULL, 0);
- if (IS_ERR(path2)) {
- ext4_ext_drop_refs(path);
- kfree(path);
+
+ path2 = ext4_find_extent(inode, cur, path2, 0);
+ if (IS_ERR(path2))
break;
- }
+
for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
cmp1 = cmp2 = 0;
if (i <= path->p_depth)
@@ -6051,15 +6125,14 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
if (cmp1 != cmp2 && cmp2 != 0)
numblks++;
}
- ext4_ext_drop_refs(path);
- ext4_ext_drop_refs(path2);
- kfree(path);
- kfree(path2);
}
out:
inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
ext4_mark_inode_dirty(NULL, inode);
+cleanup:
+ ext4_free_ext_path(path);
+ ext4_free_ext_path(path2);
return 0;
}
@@ -6080,14 +6153,9 @@ int ext4_ext_clear_bb(struct inode *inode)
if (IS_ERR(path))
return PTR_ERR(path);
ex = path[path->p_depth].p_ext;
- if (!ex) {
- ext4_ext_drop_refs(path);
- kfree(path);
- return 0;
- }
+ if (!ex)
+ goto out;
end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
- ext4_ext_drop_refs(path);
- kfree(path);
cur = 0;
while (cur < end) {
@@ -6097,20 +6165,25 @@ int ext4_ext_clear_bb(struct inode *inode)
if (ret < 0)
break;
if (ret > 0) {
- path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
- if (!IS_ERR_OR_NULL(path)) {
+ path = ext4_find_extent(inode, map.m_lblk, path, 0);
+ if (!IS_ERR(path)) {
for (j = 0; j < path->p_depth; j++) {
-
ext4_mb_mark_bb(inode->i_sb,
- path[j].p_block, 1, 0);
+ path[j].p_block, 1, false);
+ ext4_fc_record_regions(inode->i_sb, inode->i_ino,
+ 0, path[j].p_block, 1, 1);
}
- ext4_ext_drop_refs(path);
- kfree(path);
+ } else {
+ path = NULL;
}
- ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
+ ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false);
+ ext4_fc_record_regions(inode->i_sb, inode->i_ino,
+ map.m_lblk, map.m_pblk, map.m_len, 1);
}
cur = cur + map.m_len;
}
+out:
+ ext4_free_ext_path(path);
return 0;
}
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 9a3a8996aacf..e04fbf10fe4f 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -120,9 +120,40 @@
* memory. Hence, we will reclaim written/unwritten/hole extents from
* the tree under a heavy memory pressure.
*
+ * ==========================================================================
+ * 3. Assurance of Ext4 extent status tree consistency
+ *
+ * When mapping blocks, Ext4 queries the extent status tree first and should
+ * always trusts that the extent status tree is consistent and up to date.
+ * Therefore, it is important to adheres to the following rules when createing,
+ * modifying and removing extents.
+ *
+ * 1. Besides fastcommit replay, when Ext4 creates or queries block mappings,
+ * the extent information should always be processed through the extent
+ * status tree instead of being organized manually through the on-disk
+ * extent tree.
+ *
+ * 2. When updating the extent tree, Ext4 should acquire the i_data_sem
+ * exclusively and update the extent status tree atomically. If the extents
+ * to be modified are large enough to exceed the range that a single
+ * i_data_sem can process (as ext4_datasem_ensure_credits() may drop
+ * i_data_sem to restart a transaction), it must (e.g. as ext4_punch_hole()
+ * does):
+ *
+ * a) Hold the i_rwsem and invalidate_lock exclusively. This ensures
+ * exclusion against page faults, as well as reads and writes that may
+ * concurrently modify the extent status tree.
+ * b) Evict all page cache in the affected range and recommend rebuilding
+ * or dropping the extent status tree after modifying the on-disk
+ * extent tree. This ensures exclusion against concurrent writebacks
+ * that do not hold those locks but only holds a folio lock.
+ *
+ * 3. Based on the rules above, when querying block mappings, Ext4 should at
+ * least hold the i_rwsem or invalidate_lock or folio lock(s) for the
+ * specified querying range.
*
* ==========================================================================
- * 3. Performance analysis
+ * 4. Performance analysis
*
* -- overhead
* 1. There is a cache extent for write access, so if writes are
@@ -134,7 +165,7 @@
*
*
* ==========================================================================
- * 4. TODO list
+ * 5. TODO list
*
* -- Refactor delayed space reservation
*
@@ -144,20 +175,21 @@
static struct kmem_cache *ext4_es_cachep;
static struct kmem_cache *ext4_pending_cachep;
-static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
+static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
+ struct extent_status *prealloc);
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t end, int *reserved);
+ ext4_lblk_t end, int *reserved,
+ struct extent_status *prealloc);
static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
struct ext4_inode_info *locked_ei);
-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len);
+static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len,
+ struct pending_reservation **prealloc);
int __init ext4_init_es(void)
{
- ext4_es_cachep = kmem_cache_create("ext4_extent_status",
- sizeof(struct extent_status),
- 0, (SLAB_RECLAIM_ACCOUNT), NULL);
+ ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT);
if (ext4_es_cachep == NULL)
return -ENOMEM;
return 0;
@@ -203,6 +235,13 @@ static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
return es->es_lblk + es->es_len - 1;
}
+static inline void ext4_es_inc_seq(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ WRITE_ONCE(ei->i_es_seq, ei->i_es_seq + 1);
+}
+
/*
* search through the tree for an delayed extent with a given offset. If
* it can't be found, try to find next extent.
@@ -269,14 +308,12 @@ static void __es_find_extent_range(struct inode *inode,
/* see if the extent has been cached */
es->es_lblk = es->es_len = es->es_pblk = 0;
- if (tree->cache_es) {
- es1 = tree->cache_es;
- if (in_range(lblk, es1->es_lblk, es1->es_len)) {
- es_debug("%u cached by [%u/%u) %llu %x\n",
- lblk, es1->es_lblk, es1->es_len,
- ext4_es_pblock(es1), ext4_es_status(es1));
- goto out;
- }
+ es1 = READ_ONCE(tree->cache_es);
+ if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
+ es_debug("%u cached by [%u/%u) %llu %x\n",
+ lblk, es1->es_lblk, es1->es_len,
+ ext4_es_pblock(es1), ext4_es_status(es1));
+ goto out;
}
es1 = __es_tree_search(&tree->root, lblk);
@@ -295,7 +332,7 @@ out:
}
if (es1 && matching_fn(es1)) {
- tree->cache_es = es1;
+ WRITE_ONCE(tree->cache_es, es1);
es->es_lblk = es1->es_lblk;
es->es_len = es1->es_len;
es->es_pblk = es1->es_pblk;
@@ -311,6 +348,8 @@ void ext4_es_find_extent_range(struct inode *inode,
ext4_lblk_t lblk, ext4_lblk_t end,
struct extent_status *es)
{
+ es->es_lblk = es->es_len = es->es_pblk = 0;
+
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
return;
@@ -450,22 +489,49 @@ static void ext4_es_list_del(struct inode *inode)
spin_unlock(&sbi->s_es_lock);
}
-static struct extent_status *
-ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
- ext4_fsblk_t pblk)
+static inline struct pending_reservation *__alloc_pending(bool nofail)
+{
+ if (!nofail)
+ return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
+
+ return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL);
+}
+
+static inline void __free_pending(struct pending_reservation *pr)
+{
+ kmem_cache_free(ext4_pending_cachep, pr);
+}
+
+/*
+ * Returns true if we cannot fail to allocate memory for this extent_status
+ * entry and cannot reclaim it until its status changes.
+ */
+static inline bool ext4_es_must_keep(struct extent_status *es)
+{
+ /* fiemap, bigalloc, and seek_data/hole need to use it. */
+ if (ext4_es_is_delayed(es))
+ return true;
+
+ return false;
+}
+
+static inline struct extent_status *__es_alloc_extent(bool nofail)
+{
+ if (!nofail)
+ return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
+
+ return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL);
+}
+
+static void ext4_es_init_extent(struct inode *inode, struct extent_status *es,
+ ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk)
{
- struct extent_status *es;
- es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
- if (es == NULL)
- return NULL;
es->es_lblk = lblk;
es->es_len = len;
es->es_pblk = pblk;
- /*
- * We don't count delayed extent because we never try to reclaim them
- */
- if (!ext4_es_is_delayed(es)) {
+ /* We never try to reclaim a must kept extent, so we don't count it. */
+ if (!ext4_es_must_keep(es)) {
if (!EXT4_I(inode)->i_es_shk_nr++)
ext4_es_list_add(inode);
percpu_counter_inc(&EXT4_SB(inode->i_sb)->
@@ -474,8 +540,11 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
EXT4_I(inode)->i_es_all_nr++;
percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
+}
- return es;
+static inline void __es_free_extent(struct extent_status *es)
+{
+ kmem_cache_free(ext4_es_cachep, es);
}
static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
@@ -483,8 +552,8 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
EXT4_I(inode)->i_es_all_nr--;
percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
- /* Decrease the shrink counter when this es is not delayed */
- if (!ext4_es_is_delayed(es)) {
+ /* Decrease the shrink counter when we can reclaim the extent. */
+ if (!ext4_es_must_keep(es)) {
BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
if (!--EXT4_I(inode)->i_es_shk_nr)
ext4_es_list_del(inode);
@@ -492,7 +561,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
s_es_stats.es_stats_shk_cnt);
}
- kmem_cache_free(ext4_es_cachep, es);
+ __es_free_extent(es);
}
/*
@@ -527,8 +596,8 @@ static int ext4_es_can_be_merged(struct extent_status *es1,
if (ext4_es_is_hole(es1))
return 1;
- /* we need to check delayed extent is without unwritten status */
- if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
+ /* we need to check delayed extent */
+ if (ext4_es_is_delayed(es1))
return 1;
return 0;
@@ -667,8 +736,7 @@ static void ext4_es_insert_extent_ext_check(struct inode *inode,
}
}
out:
- ext4_ext_drop_refs(path);
- kfree(path);
+ ext4_free_ext_path(path);
}
static void ext4_es_insert_extent_ind_check(struct inode *inode,
@@ -754,7 +822,8 @@ static inline void ext4_es_insert_extent_check(struct inode *inode,
}
#endif
-static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
+static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
+ struct extent_status *prealloc)
{
struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
struct rb_node **p = &tree->root.rb_node;
@@ -794,10 +863,15 @@ static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
}
}
- es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
- newes->es_pblk);
+ if (prealloc)
+ es = prealloc;
+ else
+ es = __es_alloc_extent(false);
if (!es)
return -ENOMEM;
+ ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len,
+ newes->es_pblk);
+
rb_link_node(&es->rb_node, parent, p);
rb_insert_color(&es->rb_node, &tree->root);
@@ -809,67 +883,118 @@ out:
/*
* ext4_es_insert_extent() adds information to an inode's extent
* status tree.
- *
- * Return 0 on success, error code on failure.
*/
-int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len, ext4_fsblk_t pblk,
- unsigned int status)
+void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len, ext4_fsblk_t pblk,
+ unsigned int status, bool delalloc_reserve_used)
{
struct extent_status newes;
ext4_lblk_t end = lblk + len - 1;
- int err = 0;
+ int err1 = 0, err2 = 0, err3 = 0;
+ int resv_used = 0, pending = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct extent_status *es1 = NULL;
+ struct extent_status *es2 = NULL;
+ struct pending_reservation *pr = NULL;
+ bool revise_pending = false;
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
- return 0;
+ return;
- es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
- lblk, len, pblk, status, inode->i_ino);
+ es_debug("add [%u/%u) %llu %x %d to extent status tree of inode %lu\n",
+ lblk, len, pblk, status, delalloc_reserve_used, inode->i_ino);
if (!len)
- return 0;
+ return;
BUG_ON(end < lblk);
-
- if ((status & EXTENT_STATUS_DELAYED) &&
- (status & EXTENT_STATUS_WRITTEN)) {
- ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
- " delayed and written which can potentially "
- " cause data loss.", lblk, len);
- WARN_ON(1);
- }
+ WARN_ON_ONCE(status & EXTENT_STATUS_DELAYED);
newes.es_lblk = lblk;
newes.es_len = len;
ext4_es_store_pblock_status(&newes, pblk, status);
- trace_ext4_es_insert_extent(inode, &newes);
ext4_es_insert_extent_check(inode, &newes);
+ revise_pending = sbi->s_cluster_ratio > 1 &&
+ test_opt(inode->i_sb, DELALLOC) &&
+ (status & (EXTENT_STATUS_WRITTEN |
+ EXTENT_STATUS_UNWRITTEN));
+retry:
+ if (err1 && !es1)
+ es1 = __es_alloc_extent(true);
+ if ((err1 || err2) && !es2)
+ es2 = __es_alloc_extent(true);
+ if ((err1 || err2 || err3 < 0) && revise_pending && !pr)
+ pr = __alloc_pending(true);
write_lock(&EXT4_I(inode)->i_es_lock);
- err = __es_remove_extent(inode, lblk, end, NULL);
- if (err != 0)
+
+ err1 = __es_remove_extent(inode, lblk, end, &resv_used, es1);
+ if (err1 != 0)
goto error;
-retry:
- err = __es_insert_extent(inode, &newes);
- if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
- 128, EXT4_I(inode)))
- goto retry;
- if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
- err = 0;
+ /* Free preallocated extent if it didn't get used. */
+ if (es1) {
+ if (!es1->es_len)
+ __es_free_extent(es1);
+ es1 = NULL;
+ }
- if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
- (status & EXTENT_STATUS_WRITTEN ||
- status & EXTENT_STATUS_UNWRITTEN))
- __revise_pending(inode, lblk, len);
+ err2 = __es_insert_extent(inode, &newes, es2);
+ if (err2 == -ENOMEM && !ext4_es_must_keep(&newes))
+ err2 = 0;
+ if (err2 != 0)
+ goto error;
+ /* Free preallocated extent if it didn't get used. */
+ if (es2) {
+ if (!es2->es_len)
+ __es_free_extent(es2);
+ es2 = NULL;
+ }
+ if (revise_pending) {
+ err3 = __revise_pending(inode, lblk, len, &pr);
+ if (err3 < 0)
+ goto error;
+ if (pr) {
+ __free_pending(pr);
+ pr = NULL;
+ }
+ pending = err3;
+ }
+ /*
+ * TODO: For cache on-disk extents, there is no need to increment
+ * the sequence counter, this requires future optimization.
+ */
+ ext4_es_inc_seq(inode);
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
+ /*
+ * Reduce the reserved cluster count to reflect successful deferred
+ * allocation of delayed allocated clusters or direct allocation of
+ * clusters discovered to be delayed allocated. Once allocated, a
+ * cluster is not included in the reserved count.
+ *
+ * When direct allocating (from fallocate, filemap, DIO, or clusters
+ * allocated when delalloc has been disabled by ext4_nonda_switch())
+ * an extent either 1) contains delayed blocks but start with
+ * non-delayed allocated blocks (e.g. hole) or 2) contains non-delayed
+ * allocated blocks which belong to delayed allocated clusters when
+ * bigalloc feature is enabled, quota has already been claimed by
+ * ext4_mb_new_blocks(), so release the quota reservations made for
+ * any previously delayed allocated clusters instead of claim them
+ * again.
+ */
+ resv_used += pending;
+ if (resv_used)
+ ext4_da_update_reserve_space(inode, resv_used,
+ delalloc_reserve_used);
- ext4_es_print_tree(inode);
+ if (err1 || err2 || err3 < 0)
+ goto retry;
- return err;
+ trace_ext4_es_insert_extent(inode, &newes);
+ ext4_es_print_tree(inode);
+ return;
}
/*
@@ -902,7 +1027,7 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
if (!es || es->es_lblk > end)
- __es_insert_extent(inode, &newes);
+ __es_insert_extent(inode, &newes, NULL);
write_unlock(&EXT4_I(inode)->i_es_lock);
}
@@ -914,8 +1039,8 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
* Return: 1 on found, 0 on not
*/
int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t *next_lblk,
- struct extent_status *es)
+ ext4_lblk_t *next_lblk, struct extent_status *es,
+ u64 *pseq)
{
struct ext4_es_tree *tree;
struct ext4_es_stats *stats;
@@ -934,14 +1059,12 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
/* find extent in cache firstly */
es->es_lblk = es->es_len = es->es_pblk = 0;
- if (tree->cache_es) {
- es1 = tree->cache_es;
- if (in_range(lblk, es1->es_lblk, es1->es_len)) {
- es_debug("%u cached by [%u/%u)\n",
- lblk, es1->es_lblk, es1->es_len);
- found = 1;
- goto out;
- }
+ es1 = READ_ONCE(tree->cache_es);
+ if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
+ es_debug("%u cached by [%u/%u)\n",
+ lblk, es1->es_lblk, es1->es_len);
+ found = 1;
+ goto out;
}
node = tree->root.rb_node;
@@ -976,6 +1099,8 @@ out:
} else
*next_lblk = 0;
}
+ if (pseq)
+ *pseq = EXT4_I(inode)->i_es_seq;
} else {
percpu_counter_inc(&stats->es_stats_cache_misses);
}
@@ -987,7 +1112,7 @@ out:
}
struct rsvd_count {
- int ndelonly;
+ int ndelayed;
bool first_do_lblk_found;
ext4_lblk_t first_do_lblk;
ext4_lblk_t last_do_lblk;
@@ -1013,10 +1138,10 @@ static void init_rsvd(struct inode *inode, ext4_lblk_t lblk,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct rb_node *node;
- rc->ndelonly = 0;
+ rc->ndelayed = 0;
/*
- * for bigalloc, note the first delonly block in the range has not
+ * for bigalloc, note the first delayed block in the range has not
* been found, record the extent containing the block to the left of
* the region to be removed, if any, and note that there's no partial
* cluster to track
@@ -1036,9 +1161,8 @@ static void init_rsvd(struct inode *inode, ext4_lblk_t lblk,
}
/*
- * count_rsvd - count the clusters containing delayed and not unwritten
- * (delonly) blocks in a range within an extent and add to
- * the running tally in rsvd_count
+ * count_rsvd - count the clusters containing delayed blocks in a range
+ * within an extent and add to the running tally in rsvd_count
*
* @inode - file containing extent
* @lblk - first block in range
@@ -1055,13 +1179,13 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t i, end, nclu;
- if (!ext4_es_is_delonly(es))
+ if (!ext4_es_is_delayed(es))
return;
WARN_ON(len <= 0);
if (sbi->s_cluster_ratio == 1) {
- rc->ndelonly += (int) len;
+ rc->ndelayed += (int) len;
return;
}
@@ -1071,7 +1195,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
end = lblk + (ext4_lblk_t) len - 1;
end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end;
- /* record the first block of the first delonly extent seen */
+ /* record the first block of the first delayed extent seen */
if (!rc->first_do_lblk_found) {
rc->first_do_lblk = i;
rc->first_do_lblk_found = true;
@@ -1085,7 +1209,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
* doesn't start with it, count it and stop tracking
*/
if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) {
- rc->ndelonly++;
+ rc->ndelayed++;
rc->partial = false;
}
@@ -1095,7 +1219,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
*/
if (EXT4_LBLK_COFF(sbi, i) != 0) {
if (end >= EXT4_LBLK_CFILL(sbi, i)) {
- rc->ndelonly++;
+ rc->ndelayed++;
rc->partial = false;
i = EXT4_LBLK_CFILL(sbi, i) + 1;
}
@@ -1103,11 +1227,11 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
/*
* if the current cluster starts on a cluster boundary, count the
- * number of whole delonly clusters in the extent
+ * number of whole delayed clusters in the extent
*/
if ((i + sbi->s_cluster_ratio - 1) <= end) {
nclu = (end - i + 1) >> sbi->s_cluster_bits;
- rc->ndelonly += nclu;
+ rc->ndelayed += nclu;
i += nclu << sbi->s_cluster_bits;
}
@@ -1167,10 +1291,9 @@ static struct pending_reservation *__pr_tree_search(struct rb_root *root,
* @rc - pointer to reserved count data
*
* The number of reservations to be released is equal to the number of
- * clusters containing delayed and not unwritten (delonly) blocks within
- * the range, minus the number of clusters still containing delonly blocks
- * at the ends of the range, and minus the number of pending reservations
- * within the range.
+ * clusters containing delayed blocks within the range, minus the number of
+ * clusters still containing delayed blocks at the ends of the range, and
+ * minus the number of pending reservations within the range.
*/
static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
struct extent_status *right_es,
@@ -1181,33 +1304,33 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
struct rb_node *node;
ext4_lblk_t first_lclu, last_lclu;
- bool left_delonly, right_delonly, count_pending;
+ bool left_delayed, right_delayed, count_pending;
struct extent_status *es;
if (sbi->s_cluster_ratio > 1) {
/* count any remaining partial cluster */
if (rc->partial)
- rc->ndelonly++;
+ rc->ndelayed++;
- if (rc->ndelonly == 0)
+ if (rc->ndelayed == 0)
return 0;
first_lclu = EXT4_B2C(sbi, rc->first_do_lblk);
last_lclu = EXT4_B2C(sbi, rc->last_do_lblk);
/*
- * decrease the delonly count by the number of clusters at the
- * ends of the range that still contain delonly blocks -
+ * decrease the delayed count by the number of clusters at the
+ * ends of the range that still contain delayed blocks -
* these clusters still need to be reserved
*/
- left_delonly = right_delonly = false;
+ left_delayed = right_delayed = false;
es = rc->left_es;
while (es && ext4_es_end(es) >=
EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) {
- if (ext4_es_is_delonly(es)) {
- rc->ndelonly--;
- left_delonly = true;
+ if (ext4_es_is_delayed(es)) {
+ rc->ndelayed--;
+ left_delayed = true;
break;
}
node = rb_prev(&es->rb_node);
@@ -1215,7 +1338,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
break;
es = rb_entry(node, struct extent_status, rb_node);
}
- if (right_es && (!left_delonly || first_lclu != last_lclu)) {
+ if (right_es && (!left_delayed || first_lclu != last_lclu)) {
if (end < ext4_es_end(right_es)) {
es = right_es;
} else {
@@ -1225,9 +1348,9 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
}
while (es && es->es_lblk <=
EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) {
- if (ext4_es_is_delonly(es)) {
- rc->ndelonly--;
- right_delonly = true;
+ if (ext4_es_is_delayed(es)) {
+ rc->ndelayed--;
+ right_delayed = true;
break;
}
node = rb_next(&es->rb_node);
@@ -1241,21 +1364,21 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
/*
* Determine the block range that should be searched for
* pending reservations, if any. Clusters on the ends of the
- * original removed range containing delonly blocks are
+ * original removed range containing delayed blocks are
* excluded. They've already been accounted for and it's not
* possible to determine if an associated pending reservation
* should be released with the information available in the
* extents status tree.
*/
if (first_lclu == last_lclu) {
- if (left_delonly | right_delonly)
+ if (left_delayed | right_delayed)
count_pending = false;
else
count_pending = true;
} else {
- if (left_delonly)
+ if (left_delayed)
first_lclu++;
- if (right_delonly)
+ if (right_delayed)
last_lclu--;
if (first_lclu <= last_lclu)
count_pending = true;
@@ -1266,16 +1389,16 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
/*
* a pending reservation found between first_lclu and last_lclu
* represents an allocated cluster that contained at least one
- * delonly block, so the delonly total must be reduced by one
+ * delayed block, so the delayed total must be reduced by one
* for each pending reservation found and released
*/
if (count_pending) {
pr = __pr_tree_search(&tree->root, first_lclu);
while (pr && pr->lclu <= last_lclu) {
- rc->ndelonly--;
+ rc->ndelayed--;
node = rb_next(&pr->rb_node);
rb_erase(&pr->rb_node, &tree->root);
- kmem_cache_free(ext4_pending_cachep, pr);
+ __free_pending(pr);
if (!node)
break;
pr = rb_entry(node, struct pending_reservation,
@@ -1283,7 +1406,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
}
}
}
- return rc->ndelonly;
+ return rc->ndelayed;
}
@@ -1294,6 +1417,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
* @lblk - first block in range
* @end - last block in range
* @reserved - number of cluster reservations released
+ * @prealloc - pre-allocated es to avoid memory allocation failures
*
* If @reserved is not NULL and delayed allocation is enabled, counts
* block/cluster reservations freed by removing range and if bigalloc
@@ -1301,7 +1425,8 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
* error code on failure.
*/
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t end, int *reserved)
+ ext4_lblk_t end, int *reserved,
+ struct extent_status *prealloc)
{
struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
struct rb_node *node;
@@ -1309,14 +1434,12 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status orig_es;
ext4_lblk_t len1, len2;
ext4_fsblk_t block;
- int err;
+ int err = 0;
bool count_reserved = true;
struct rsvd_count rc;
if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
count_reserved = false;
-retry:
- err = 0;
es = __es_tree_search(&tree->root, lblk);
if (!es)
@@ -1350,14 +1473,13 @@ retry:
orig_es.es_len - len2;
ext4_es_store_pblock_status(&newes, block,
ext4_es_status(&orig_es));
- err = __es_insert_extent(inode, &newes);
+ err = __es_insert_extent(inode, &newes, prealloc);
if (err) {
+ if (!ext4_es_must_keep(&newes))
+ return 0;
+
es->es_lblk = orig_es.es_lblk;
es->es_len = orig_es.es_len;
- if ((err == -ENOMEM) &&
- __es_shrink(EXT4_SB(inode->i_sb),
- 128, EXT4_I(inode)))
- goto retry;
goto out;
}
} else {
@@ -1370,9 +1492,9 @@ retry:
}
}
if (count_reserved)
- count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
- &orig_es, &rc);
- goto out;
+ count_rsvd(inode, orig_es.es_lblk + len1,
+ orig_es.es_len - len1 - len2, &orig_es, &rc);
+ goto out_get_reserved;
}
if (len1 > 0) {
@@ -1414,6 +1536,7 @@ retry:
}
}
+out_get_reserved:
if (count_reserved)
*reserved = get_rsvd(inode, end, es, &rc);
out:
@@ -1428,39 +1551,55 @@ out:
* @len - number of blocks to remove
*
* Reduces block/cluster reservation count and for bigalloc cancels pending
- * reservations as needed. Returns 0 on success, error code on failure.
+ * reservations as needed.
*/
-int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len)
+void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len)
{
ext4_lblk_t end;
int err = 0;
int reserved = 0;
+ struct extent_status *es = NULL;
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
- return 0;
+ return;
- trace_ext4_es_remove_extent(inode, lblk, len);
es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
lblk, len, inode->i_ino);
if (!len)
- return err;
+ return;
end = lblk + len - 1;
BUG_ON(end < lblk);
+retry:
+ if (err && !es)
+ es = __es_alloc_extent(true);
/*
* ext4_clear_inode() depends on us taking i_es_lock unconditionally
* so that we are sure __es_shrink() is done with the inode before it
* is reclaimed.
*/
write_lock(&EXT4_I(inode)->i_es_lock);
- err = __es_remove_extent(inode, lblk, end, &reserved);
+ err = __es_remove_extent(inode, lblk, end, &reserved, es);
+ if (err)
+ goto error;
+ /* Free preallocated extent if it didn't get used. */
+ if (es) {
+ if (!es->es_len)
+ __es_free_extent(es);
+ es = NULL;
+ }
+ ext4_es_inc_seq(inode);
+error:
write_unlock(&EXT4_I(inode)->i_es_lock);
+ if (err)
+ goto retry;
+
+ trace_ext4_es_remove_extent(inode, lblk, len);
ext4_es_print_tree(inode);
ext4_da_release_space(inode, reserved);
- return err;
}
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
@@ -1557,7 +1696,7 @@ static unsigned long ext4_es_count(struct shrinker *shrink,
unsigned long nr;
struct ext4_sb_info *sbi;
- sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
+ sbi = shrink->private_data;
nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
return nr;
@@ -1566,8 +1705,7 @@ static unsigned long ext4_es_count(struct shrinker *shrink,
static unsigned long ext4_es_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct ext4_sb_info *sbi = container_of(shrink,
- struct ext4_sb_info, s_es_shrinker);
+ struct ext4_sb_info *sbi = shrink->private_data;
int nr_to_scan = sc->nr_to_scan;
int ret, nr_shrunk;
@@ -1651,12 +1789,17 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
if (err)
goto err3;
- sbi->s_es_shrinker.scan_objects = ext4_es_scan;
- sbi->s_es_shrinker.count_objects = ext4_es_count;
- sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
- err = register_shrinker(&sbi->s_es_shrinker);
- if (err)
+ sbi->s_es_shrinker = shrinker_alloc(0, "ext4-es:%s", sbi->s_sb->s_id);
+ if (!sbi->s_es_shrinker) {
+ err = -ENOMEM;
goto err4;
+ }
+
+ sbi->s_es_shrinker->scan_objects = ext4_es_scan;
+ sbi->s_es_shrinker->count_objects = ext4_es_count;
+ sbi->s_es_shrinker->private_data = sbi;
+
+ shrinker_register(sbi->s_es_shrinker);
return 0;
err4:
@@ -1676,7 +1819,7 @@ void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
- unregister_shrinker(&sbi->s_es_shrinker);
+ shrinker_free(sbi->s_es_shrinker);
}
/*
@@ -1707,11 +1850,8 @@ static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
(*nr_to_scan)--;
node = rb_next(&es->rb_node);
- /*
- * We can't reclaim delayed extent from status tree because
- * fiemap, bigallic, and seek_data/hole need to use it.
- */
- if (ext4_es_is_delayed(es))
+
+ if (ext4_es_must_keep(es))
goto next;
if (ext4_es_is_referenced(es)) {
ext4_es_clear_referenced(es);
@@ -1775,7 +1915,7 @@ void ext4_clear_inode_es(struct inode *inode)
while (node) {
es = rb_entry(node, struct extent_status, rb_node);
node = rb_next(node);
- if (!ext4_es_is_delayed(es)) {
+ if (!ext4_es_must_keep(es)) {
rb_erase(&es->rb_node, &tree->root);
ext4_es_free_extent(inode, es);
}
@@ -1807,9 +1947,7 @@ static void ext4_print_pending_tree(struct inode *inode)
int __init ext4_init_pending(void)
{
- ext4_pending_cachep = kmem_cache_create("ext4_pending_reservation",
- sizeof(struct pending_reservation),
- 0, (SLAB_RECLAIM_ACCOUNT), NULL);
+ ext4_pending_cachep = KMEM_CACHE(pending_reservation, SLAB_RECLAIM_ACCOUNT);
if (ext4_pending_cachep == NULL)
return -ENOMEM;
return 0;
@@ -1862,11 +2000,13 @@ static struct pending_reservation *__get_pending(struct inode *inode,
*
* @inode - file containing the cluster
* @lblk - logical block in the cluster to be added
+ * @prealloc - preallocated pending entry
*
- * Returns 0 on successful insertion and -ENOMEM on failure. If the
+ * Returns 1 on successful insertion and -ENOMEM on failure. If the
* pending reservation is already in the set, returns successfully.
*/
-static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
+static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
+ struct pending_reservation **prealloc)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
@@ -1892,15 +2032,21 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
}
}
- pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
- if (pr == NULL) {
- ret = -ENOMEM;
- goto out;
+ if (likely(*prealloc == NULL)) {
+ pr = __alloc_pending(false);
+ if (!pr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ } else {
+ pr = *prealloc;
+ *prealloc = NULL;
}
pr->lclu = lclu;
rb_link_node(&pr->rb_node, parent, p);
rb_insert_color(&pr->rb_node, &tree->root);
+ ret = 1;
out:
return ret;
@@ -1925,7 +2071,7 @@ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
if (pr != NULL) {
tree = &EXT4_I(inode)->i_pending_tree;
rb_erase(&pr->rb_node, &tree->root);
- kmem_cache_free(ext4_pending_cachep, pr);
+ __free_pending(pr);
}
}
@@ -1971,147 +2117,112 @@ bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk)
}
/*
- * ext4_es_insert_delayed_block - adds a delayed block to the extents status
- * tree, adding a pending reservation where
- * needed
+ * ext4_es_insert_delayed_extent - adds some delayed blocks to the extents
+ * status tree, adding a pending reservation
+ * where needed
*
* @inode - file containing the newly added block
- * @lblk - logical block to be added
- * @allocated - indicates whether a physical cluster has been allocated for
- * the logical cluster that contains the block
- *
- * Returns 0 on success, negative error code on failure.
+ * @lblk - start logical block to be added
+ * @len - length of blocks to be added
+ * @lclu_allocated/end_allocated - indicates whether a physical cluster has
+ * been allocated for the logical cluster
+ * that contains the start/end block. Note that
+ * end_allocated should always be set to false
+ * if the start and the end block are in the
+ * same cluster
*/
-int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
- bool allocated)
+void ext4_es_insert_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len, bool lclu_allocated,
+ bool end_allocated)
{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct extent_status newes;
- int err = 0;
+ ext4_lblk_t end = lblk + len - 1;
+ int err1 = 0, err2 = 0, err3 = 0;
+ struct extent_status *es1 = NULL;
+ struct extent_status *es2 = NULL;
+ struct pending_reservation *pr1 = NULL;
+ struct pending_reservation *pr2 = NULL;
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
- return 0;
+ return;
+
+ es_debug("add [%u/%u) delayed to extent status tree of inode %lu\n",
+ lblk, len, inode->i_ino);
+ if (!len)
+ return;
- es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
- lblk, inode->i_ino);
+ WARN_ON_ONCE((EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) &&
+ end_allocated);
newes.es_lblk = lblk;
- newes.es_len = 1;
+ newes.es_len = len;
ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED);
- trace_ext4_es_insert_delayed_block(inode, &newes, allocated);
ext4_es_insert_extent_check(inode, &newes);
+retry:
+ if (err1 && !es1)
+ es1 = __es_alloc_extent(true);
+ if ((err1 || err2) && !es2)
+ es2 = __es_alloc_extent(true);
+ if (err1 || err2 || err3 < 0) {
+ if (lclu_allocated && !pr1)
+ pr1 = __alloc_pending(true);
+ if (end_allocated && !pr2)
+ pr2 = __alloc_pending(true);
+ }
write_lock(&EXT4_I(inode)->i_es_lock);
- err = __es_remove_extent(inode, lblk, lblk, NULL);
- if (err != 0)
- goto error;
-retry:
- err = __es_insert_extent(inode, &newes);
- if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
- 128, EXT4_I(inode)))
- goto retry;
- if (err != 0)
+ err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
+ if (err1 != 0)
goto error;
+ /* Free preallocated extent if it didn't get used. */
+ if (es1) {
+ if (!es1->es_len)
+ __es_free_extent(es1);
+ es1 = NULL;
+ }
- if (allocated)
- __insert_pending(inode, lblk);
+ err2 = __es_insert_extent(inode, &newes, es2);
+ if (err2 != 0)
+ goto error;
+ /* Free preallocated extent if it didn't get used. */
+ if (es2) {
+ if (!es2->es_len)
+ __es_free_extent(es2);
+ es2 = NULL;
+ }
+ if (lclu_allocated) {
+ err3 = __insert_pending(inode, lblk, &pr1);
+ if (err3 < 0)
+ goto error;
+ if (pr1) {
+ __free_pending(pr1);
+ pr1 = NULL;
+ }
+ }
+ if (end_allocated) {
+ err3 = __insert_pending(inode, end, &pr2);
+ if (err3 < 0)
+ goto error;
+ if (pr2) {
+ __free_pending(pr2);
+ pr2 = NULL;
+ }
+ }
+ ext4_es_inc_seq(inode);
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
+ if (err1 || err2 || err3 < 0)
+ goto retry;
+ trace_ext4_es_insert_delayed_extent(inode, &newes, lclu_allocated,
+ end_allocated);
ext4_es_print_tree(inode);
ext4_print_pending_tree(inode);
-
- return err;
-}
-
-/*
- * __es_delayed_clu - count number of clusters containing blocks that
- * are delayed only
- *
- * @inode - file containing block range
- * @start - logical block defining start of range
- * @end - logical block defining end of range
- *
- * Returns the number of clusters containing only delayed (not delayed
- * and unwritten) blocks in the range specified by @start and @end. Any
- * cluster or part of a cluster within the range and containing a delayed
- * and not unwritten block within the range is counted as a whole cluster.
- */
-static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start,
- ext4_lblk_t end)
-{
- struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
- struct extent_status *es;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct rb_node *node;
- ext4_lblk_t first_lclu, last_lclu;
- unsigned long long last_counted_lclu;
- unsigned int n = 0;
-
- /* guaranteed to be unequal to any ext4_lblk_t value */
- last_counted_lclu = ~0ULL;
-
- es = __es_tree_search(&tree->root, start);
-
- while (es && (es->es_lblk <= end)) {
- if (ext4_es_is_delonly(es)) {
- if (es->es_lblk <= start)
- first_lclu = EXT4_B2C(sbi, start);
- else
- first_lclu = EXT4_B2C(sbi, es->es_lblk);
-
- if (ext4_es_end(es) >= end)
- last_lclu = EXT4_B2C(sbi, end);
- else
- last_lclu = EXT4_B2C(sbi, ext4_es_end(es));
-
- if (first_lclu == last_counted_lclu)
- n += last_lclu - first_lclu;
- else
- n += last_lclu - first_lclu + 1;
- last_counted_lclu = last_lclu;
- }
- node = rb_next(&es->rb_node);
- if (!node)
- break;
- es = rb_entry(node, struct extent_status, rb_node);
- }
-
- return n;
-}
-
-/*
- * ext4_es_delayed_clu - count number of clusters containing blocks that
- * are both delayed and unwritten
- *
- * @inode - file containing block range
- * @lblk - logical block defining start of range
- * @len - number of blocks in range
- *
- * Locking for external use of __es_delayed_clu().
- */
-unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- ext4_lblk_t end;
- unsigned int n;
-
- if (len == 0)
- return 0;
-
- end = lblk + len - 1;
- WARN_ON(end < lblk);
-
- read_lock(&ei->i_es_lock);
-
- n = __es_delayed_clu(inode, lblk, end);
-
- read_unlock(&ei->i_es_lock);
-
- return n;
+ return;
}
/*
@@ -2124,21 +2235,27 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
* @inode - file containing the range
* @lblk - logical block defining the start of range
* @len - length of range in blocks
+ * @prealloc - preallocated pending entry
*
* Used after a newly allocated extent is added to the extents status tree.
* Requires that the extents in the range have either written or unwritten
- * status. Must be called while holding i_es_lock.
+ * status. Must be called while holding i_es_lock. Returns number of new
+ * inserts pending cluster on insert pendings, returns 0 on remove pendings,
+ * return -ENOMEM on failure.
*/
-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len)
+static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len,
+ struct pending_reservation **prealloc)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t end = lblk + len - 1;
ext4_lblk_t first, last;
bool f_del = false, l_del = false;
+ int pendings = 0;
+ int ret = 0;
if (len == 0)
- return;
+ return 0;
/*
* Two cases - block range within single cluster and block range
@@ -2156,39 +2273,53 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) {
first = EXT4_LBLK_CMASK(sbi, lblk);
if (first != lblk)
- f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ f_del = __es_scan_range(inode, &ext4_es_is_delayed,
first, lblk - 1);
if (f_del) {
- __insert_pending(inode, first);
+ ret = __insert_pending(inode, first, prealloc);
+ if (ret < 0)
+ goto out;
+ pendings += ret;
} else {
last = EXT4_LBLK_CMASK(sbi, end) +
sbi->s_cluster_ratio - 1;
if (last != end)
l_del = __es_scan_range(inode,
- &ext4_es_is_delonly,
+ &ext4_es_is_delayed,
end + 1, last);
- if (l_del)
- __insert_pending(inode, last);
- else
+ if (l_del) {
+ ret = __insert_pending(inode, last, prealloc);
+ if (ret < 0)
+ goto out;
+ pendings += ret;
+ } else
__remove_pending(inode, last);
}
} else {
first = EXT4_LBLK_CMASK(sbi, lblk);
if (first != lblk)
- f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ f_del = __es_scan_range(inode, &ext4_es_is_delayed,
first, lblk - 1);
- if (f_del)
- __insert_pending(inode, first);
- else
+ if (f_del) {
+ ret = __insert_pending(inode, first, prealloc);
+ if (ret < 0)
+ goto out;
+ pendings += ret;
+ } else
__remove_pending(inode, first);
last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
if (last != end)
- l_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ l_del = __es_scan_range(inode, &ext4_es_is_delayed,
end + 1, last);
- if (l_del)
- __insert_pending(inode, last);
- else
+ if (l_del) {
+ ret = __insert_pending(inode, last, prealloc);
+ if (ret < 0)
+ goto out;
+ pendings += ret;
+ } else
__remove_pending(inode, last);
}
+out:
+ return (ret < 0) ? ret : pendings;
}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index 4ec30a798260..f3396cf32b44 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -42,6 +42,10 @@ enum {
#define ES_SHIFT (sizeof(ext4_fsblk_t)*8 - ES_FLAGS)
#define ES_MASK (~((ext4_fsblk_t)0) << ES_SHIFT)
+/*
+ * Besides EXTENT_STATUS_REFERENCED, all these extent type masks
+ * are exclusive, only one type can be set at a time.
+ */
#define EXTENT_STATUS_WRITTEN (1 << ES_WRITTEN_B)
#define EXTENT_STATUS_UNWRITTEN (1 << ES_UNWRITTEN_B)
#define EXTENT_STATUS_DELAYED (1 << ES_DELAYED_B)
@@ -51,7 +55,9 @@ enum {
#define ES_TYPE_MASK ((ext4_fsblk_t)(EXTENT_STATUS_WRITTEN | \
EXTENT_STATUS_UNWRITTEN | \
EXTENT_STATUS_DELAYED | \
- EXTENT_STATUS_HOLE) << ES_SHIFT)
+ EXTENT_STATUS_HOLE))
+
+#define ES_TYPE_VALID(type) ((type) && !((type) & ((type) - 1)))
struct ext4_sb_info;
struct ext4_extent;
@@ -127,21 +133,22 @@ extern int __init ext4_init_es(void);
extern void ext4_exit_es(void);
extern void ext4_es_init_tree(struct ext4_es_tree *tree);
-extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len, ext4_fsblk_t pblk,
- unsigned int status);
+extern void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len, ext4_fsblk_t pblk,
+ unsigned int status,
+ bool delalloc_reserve_used);
extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
unsigned int status);
-extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len);
+extern void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len);
extern void ext4_es_find_extent_range(struct inode *inode,
int (*match_fn)(struct extent_status *es),
ext4_lblk_t lblk, ext4_lblk_t end,
struct extent_status *es);
extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t *next_lblk,
- struct extent_status *es);
+ struct extent_status *es, u64 *pseq);
extern bool ext4_es_scan_range(struct inode *inode,
int (*matching_fn)(struct extent_status *es),
ext4_lblk_t lblk, ext4_lblk_t end);
@@ -156,7 +163,7 @@ static inline unsigned int ext4_es_status(struct extent_status *es)
static inline unsigned int ext4_es_type(struct extent_status *es)
{
- return (es->es_pblk & ES_TYPE_MASK) >> ES_SHIFT;
+ return (es->es_pblk >> ES_SHIFT) & ES_TYPE_MASK;
}
static inline int ext4_es_is_written(struct extent_status *es)
@@ -184,11 +191,6 @@ static inline int ext4_es_is_mapped(struct extent_status *es)
return (ext4_es_is_written(es) || ext4_es_is_unwritten(es));
}
-static inline int ext4_es_is_delonly(struct extent_status *es)
-{
- return (ext4_es_is_delayed(es) && !ext4_es_is_unwritten(es));
-}
-
static inline void ext4_es_set_referenced(struct extent_status *es)
{
es->es_pblk |= ((ext4_fsblk_t)EXTENT_STATUS_REFERENCED) << ES_SHIFT;
@@ -224,17 +226,12 @@ static inline void ext4_es_store_pblock(struct extent_status *es,
es->es_pblk = block;
}
-static inline void ext4_es_store_status(struct extent_status *es,
- unsigned int status)
-{
- es->es_pblk = (((ext4_fsblk_t)status << ES_SHIFT) & ES_MASK) |
- (es->es_pblk & ~ES_MASK);
-}
-
static inline void ext4_es_store_pblock_status(struct extent_status *es,
ext4_fsblk_t pb,
unsigned int status)
{
+ WARN_ON_ONCE(!ES_TYPE_VALID(status & ES_TYPE_MASK));
+
es->es_pblk = (((ext4_fsblk_t)status << ES_SHIFT) & ES_MASK) |
(pb & ~ES_MASK);
}
@@ -249,10 +246,9 @@ extern void ext4_exit_pending(void);
extern void ext4_init_pending_tree(struct ext4_pending_tree *tree);
extern void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk);
extern bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk);
-extern int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
- bool allocated);
-extern unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len);
+extern void ext4_es_insert_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len, bool lclu_allocated,
+ bool end_allocated);
extern void ext4_clear_inode_es(struct inode *inode);
#endif /* _EXT4_EXTENTS_STATUS_H */
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index 0f32b445582a..fa66b08de999 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -12,6 +12,7 @@
#include "ext4_extents.h"
#include "mballoc.h"
+#include <linux/lockdep.h>
/*
* Ext4 Fast Commits
* -----------------
@@ -49,37 +50,35 @@
* that need to be committed during a fast commit in another in memory queue of
* inodes. During the commit operation, we commit in the following order:
*
- * [1] Lock inodes for any further data updates by setting COMMITTING state
- * [2] Submit data buffers of all the inodes
- * [3] Wait for [2] to complete
- * [4] Commit all the directory entry updates in the fast commit space
- * [5] Commit all the changed inode structures
- * [6] Write tail tag (this tag ensures the atomicity, please read the following
+ * [1] Prepare all the inodes to write out their data by setting
+ * "EXT4_STATE_FC_FLUSHING_DATA". This ensures that inode cannot be
+ * deleted while it is being flushed.
+ * [2] Flush data buffers to disk and clear "EXT4_STATE_FC_FLUSHING_DATA"
+ * state.
+ * [3] Lock the journal by calling jbd2_journal_lock_updates. This ensures that
+ * all the exsiting handles finish and no new handles can start.
+ * [4] Mark all the fast commit eligible inodes as undergoing fast commit
+ * by setting "EXT4_STATE_FC_COMMITTING" state.
+ * [5] Unlock the journal by calling jbd2_journal_unlock_updates. This allows
+ * starting of new handles. If new handles try to start an update on
+ * any of the inodes that are being committed, ext4_fc_track_inode()
+ * will block until those inodes have finished the fast commit.
+ * [6] Commit all the directory entry updates in the fast commit space.
+ * [7] Commit all the changed inodes in the fast commit space and clear
+ * "EXT4_STATE_FC_COMMITTING" for these inodes.
+ * [8] Write tail tag (this tag ensures the atomicity, please read the following
* section for more details).
- * [7] Wait for [4], [5] and [6] to complete.
*
- * All the inode updates must call ext4_fc_start_update() before starting an
- * update. If such an ongoing update is present, fast commit waits for it to
- * complete. The completion of such an update is marked by
- * ext4_fc_stop_update().
+ * All the inode updates must be enclosed within jbd2_jounrnal_start()
+ * and jbd2_journal_stop() similar to JBD2 journaling.
*
* Fast Commit Ineligibility
* -------------------------
- * Not all operations are supported by fast commits today (e.g extended
- * attributes). Fast commit ineligibility is marked by calling one of the
- * two following functions:
- *
- * - ext4_fc_mark_ineligible(): This makes next fast commit operation to fall
- * back to full commit. This is useful in case of transient errors.
*
- * - ext4_fc_start_ineligible() and ext4_fc_stop_ineligible() - This makes all
- * the fast commits happening between ext4_fc_start_ineligible() and
- * ext4_fc_stop_ineligible() and one fast commit after the call to
- * ext4_fc_stop_ineligible() to fall back to full commits. It is important to
- * make one more fast commit to fall back to full commit after stop call so
- * that it guaranteed that the fast commit ineligible operation contained
- * within ext4_fc_start_ineligible() and ext4_fc_stop_ineligible() is
- * followed by at least 1 full commit.
+ * Not all operations are supported by fast commits today (e.g extended
+ * attributes). Fast commit ineligibility is marked by calling
+ * ext4_fc_mark_ineligible(): This makes next fast commit operation to fall back
+ * to full commit.
*
* Atomicity of commits
* --------------------
@@ -152,6 +151,13 @@
* similarly. Thus, by converting a non-idempotent procedure into a series of
* idempotent outcomes, fast commits ensured idempotence during the replay.
*
+ * Locking
+ * -------
+ * sbi->s_fc_lock protects the fast commit inodes queue and the fast commit
+ * dentry queue. ei->i_fc_lock protects the fast commit related info in a given
+ * inode. Most of the code avoids acquiring both the locks, but if one must do
+ * that then sbi->s_fc_lock must be acquired before ei->i_fc_lock.
+ *
* TODOs
* -----
*
@@ -166,15 +172,12 @@
* fast commit recovery even if that area is invalidated by later full
* commits.
*
- * 1) Make fast commit atomic updates more fine grained. Today, a fast commit
- * eligible update must be protected within ext4_fc_start_update() and
- * ext4_fc_stop_update(). These routines are called at much higher
- * routines. This can be made more fine grained by combining with
- * ext4_journal_start().
- *
- * 2) Same above for ext4_fc_start_ineligible() and ext4_fc_stop_ineligible()
+ * 1) Handle more ineligible cases.
*
- * 3) Handle more ineligible cases.
+ * 2) Change ext4_fc_commit() to lookup logical to physical mapping using extent
+ * status tree. This would get rid of the need to call ext4_fc_track_inode()
+ * before acquiring i_data_sem. To do that we would need to ensure that
+ * modified extents from the extent status tree are not evicted from memory.
*/
#include <trace/events/ext4.h>
@@ -211,77 +214,14 @@ void ext4_fc_init_inode(struct inode *inode)
ext4_fc_reset_inode(inode);
ext4_clear_inode_state(inode, EXT4_STATE_FC_COMMITTING);
INIT_LIST_HEAD(&ei->i_fc_list);
+ INIT_LIST_HEAD(&ei->i_fc_dilist);
init_waitqueue_head(&ei->i_fc_wait);
- atomic_set(&ei->i_fc_updates, 0);
-}
-
-/* This function must be called with sbi->s_fc_lock held. */
-static void ext4_fc_wait_committing_inode(struct inode *inode)
-__releases(&EXT4_SB(inode->i_sb)->s_fc_lock)
-{
- wait_queue_head_t *wq;
- struct ext4_inode_info *ei = EXT4_I(inode);
-
-#if (BITS_PER_LONG < 64)
- DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
- EXT4_STATE_FC_COMMITTING);
- wq = bit_waitqueue(&ei->i_state_flags,
- EXT4_STATE_FC_COMMITTING);
-#else
- DEFINE_WAIT_BIT(wait, &ei->i_flags,
- EXT4_STATE_FC_COMMITTING);
- wq = bit_waitqueue(&ei->i_flags,
- EXT4_STATE_FC_COMMITTING);
-#endif
- lockdep_assert_held(&EXT4_SB(inode->i_sb)->s_fc_lock);
- prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
- spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
- schedule();
- finish_wait(wq, &wait.wq_entry);
-}
-
-/*
- * Inform Ext4's fast about start of an inode update
- *
- * This function is called by the high level call VFS callbacks before
- * performing any inode update. This function blocks if there's an ongoing
- * fast commit on the inode in question.
- */
-void ext4_fc_start_update(struct inode *inode)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
-
- if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) ||
- (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY))
- return;
-
-restart:
- spin_lock(&EXT4_SB(inode->i_sb)->s_fc_lock);
- if (list_empty(&ei->i_fc_list))
- goto out;
-
- if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
- ext4_fc_wait_committing_inode(inode);
- goto restart;
- }
-out:
- atomic_inc(&ei->i_fc_updates);
- spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
}
-/*
- * Stop inode update and wake up waiting fast commits if any.
- */
-void ext4_fc_stop_update(struct inode *inode)
+static bool ext4_fc_disabled(struct super_block *sb)
{
- struct ext4_inode_info *ei = EXT4_I(inode);
-
- if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) ||
- (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY))
- return;
-
- if (atomic_dec_and_test(&ei->i_fc_updates))
- wake_up_all(&ei->i_fc_wait);
+ return (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
+ (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY));
}
/*
@@ -291,79 +231,112 @@ void ext4_fc_stop_update(struct inode *inode)
void ext4_fc_del(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_fc_dentry_update *fc_dentry;
+ wait_queue_head_t *wq;
- if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) ||
- (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY))
+ if (ext4_fc_disabled(inode->i_sb))
return;
-restart:
- spin_lock(&EXT4_SB(inode->i_sb)->s_fc_lock);
- if (list_empty(&ei->i_fc_list)) {
- spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
+ mutex_lock(&sbi->s_fc_lock);
+ if (list_empty(&ei->i_fc_list) && list_empty(&ei->i_fc_dilist)) {
+ mutex_unlock(&sbi->s_fc_lock);
return;
}
- if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
- ext4_fc_wait_committing_inode(inode);
- goto restart;
+ /*
+ * Since ext4_fc_del is called from ext4_evict_inode while having a
+ * handle open, there is no need for us to wait here even if a fast
+ * commit is going on. That is because, if this inode is being
+ * committed, ext4_mark_inode_dirty would have waited for inode commit
+ * operation to finish before we come here. So, by the time we come
+ * here, inode's EXT4_STATE_FC_COMMITTING would have been cleared. So,
+ * we shouldn't see EXT4_STATE_FC_COMMITTING to be set on this inode
+ * here.
+ *
+ * We may come here without any handles open in the "no_delete" case of
+ * ext4_evict_inode as well. However, if that happens, we first mark the
+ * file system as fast commit ineligible anyway. So, even in that case,
+ * it is okay to remove the inode from the fc list.
+ */
+ WARN_ON(ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)
+ && !ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE));
+ while (ext4_test_inode_state(inode, EXT4_STATE_FC_FLUSHING_DATA)) {
+#if (BITS_PER_LONG < 64)
+ DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
+ EXT4_STATE_FC_FLUSHING_DATA);
+ wq = bit_waitqueue(&ei->i_state_flags,
+ EXT4_STATE_FC_FLUSHING_DATA);
+#else
+ DEFINE_WAIT_BIT(wait, &ei->i_flags,
+ EXT4_STATE_FC_FLUSHING_DATA);
+ wq = bit_waitqueue(&ei->i_flags,
+ EXT4_STATE_FC_FLUSHING_DATA);
+#endif
+ prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+ if (ext4_test_inode_state(inode, EXT4_STATE_FC_FLUSHING_DATA)) {
+ mutex_unlock(&sbi->s_fc_lock);
+ schedule();
+ mutex_lock(&sbi->s_fc_lock);
+ }
+ finish_wait(wq, &wait.wq_entry);
}
list_del_init(&ei->i_fc_list);
- spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
-}
-/*
- * Mark file system as fast commit ineligible. This means that next commit
- * operation would result in a full jbd2 commit.
- */
-void ext4_fc_mark_ineligible(struct super_block *sb, int reason)
-{
- struct ext4_sb_info *sbi = EXT4_SB(sb);
-
- if (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
- (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))
+ /*
+ * Since this inode is getting removed, let's also remove all FC
+ * dentry create references, since it is not needed to log it anyways.
+ */
+ if (list_empty(&ei->i_fc_dilist)) {
+ mutex_unlock(&sbi->s_fc_lock);
return;
+ }
- ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
- WARN_ON(reason >= EXT4_FC_REASON_MAX);
- sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
+ fc_dentry = list_first_entry(&ei->i_fc_dilist, struct ext4_fc_dentry_update, fcd_dilist);
+ WARN_ON(fc_dentry->fcd_op != EXT4_FC_TAG_CREAT);
+ list_del_init(&fc_dentry->fcd_list);
+ list_del_init(&fc_dentry->fcd_dilist);
+
+ WARN_ON(!list_empty(&ei->i_fc_dilist));
+ mutex_unlock(&sbi->s_fc_lock);
+
+ release_dentry_name_snapshot(&fc_dentry->fcd_name);
+ kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
}
/*
- * Start a fast commit ineligible update. Any commits that happen while
- * such an operation is in progress fall back to full commits.
+ * Mark file system as fast commit ineligible, and record latest
+ * ineligible transaction tid. This means until the recorded
+ * transaction, commit operation would result in a full jbd2 commit.
*/
-void ext4_fc_start_ineligible(struct super_block *sb, int reason)
+void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ tid_t tid;
+ bool has_transaction = true;
+ bool is_ineligible;
- if (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
- (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))
+ if (ext4_fc_disabled(sb))
return;
+ if (handle && !IS_ERR(handle))
+ tid = handle->h_transaction->t_tid;
+ else {
+ read_lock(&sbi->s_journal->j_state_lock);
+ if (sbi->s_journal->j_running_transaction)
+ tid = sbi->s_journal->j_running_transaction->t_tid;
+ else
+ has_transaction = false;
+ read_unlock(&sbi->s_journal->j_state_lock);
+ }
+ mutex_lock(&sbi->s_fc_lock);
+ is_ineligible = ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ if (has_transaction && (!is_ineligible || tid_gt(tid, sbi->s_fc_ineligible_tid)))
+ sbi->s_fc_ineligible_tid = tid;
+ ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ mutex_unlock(&sbi->s_fc_lock);
WARN_ON(reason >= EXT4_FC_REASON_MAX);
sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
- atomic_inc(&sbi->s_fc_ineligible_updates);
-}
-
-/*
- * Stop a fast commit ineligible update. We set EXT4_MF_FC_INELIGIBLE flag here
- * to ensure that after stopping the ineligible update, at least one full
- * commit takes place.
- */
-void ext4_fc_stop_ineligible(struct super_block *sb)
-{
- if (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
- (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))
- return;
-
- ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
- atomic_dec(&EXT4_SB(sb)->s_fc_ineligible_updates);
-}
-
-static inline int ext4_fc_is_ineligible(struct super_block *sb)
-{
- return (ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE) ||
- atomic_read(&EXT4_SB(sb)->s_fc_ineligible_updates));
}
/*
@@ -378,7 +351,7 @@ static inline int ext4_fc_is_ineligible(struct super_block *sb)
*/
static int ext4_fc_track_template(
handle_t *handle, struct inode *inode,
- int (*__fc_track_fn)(struct inode *, void *, bool),
+ int (*__fc_track_fn)(handle_t *handle, struct inode *, void *, bool),
void *args, int enqueue)
{
bool update = false;
@@ -387,34 +360,27 @@ static int ext4_fc_track_template(
tid_t tid = 0;
int ret;
- if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) ||
- (sbi->s_mount_state & EXT4_FC_REPLAY))
- return -EOPNOTSUPP;
-
- if (ext4_fc_is_ineligible(inode->i_sb))
- return -EINVAL;
-
tid = handle->h_transaction->t_tid;
- mutex_lock(&ei->i_fc_lock);
+ spin_lock(&ei->i_fc_lock);
if (tid == ei->i_sync_tid) {
update = true;
} else {
ext4_fc_reset_inode(inode);
ei->i_sync_tid = tid;
}
- ret = __fc_track_fn(inode, args, update);
- mutex_unlock(&ei->i_fc_lock);
-
+ ret = __fc_track_fn(handle, inode, args, update);
+ spin_unlock(&ei->i_fc_lock);
if (!enqueue)
return ret;
- spin_lock(&sbi->s_fc_lock);
+ mutex_lock(&sbi->s_fc_lock);
if (list_empty(&EXT4_I(inode)->i_fc_list))
list_add_tail(&EXT4_I(inode)->i_fc_list,
- (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_COMMITTING)) ?
+ (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
+ sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING) ?
&sbi->s_fc_q[FC_Q_STAGING] :
&sbi->s_fc_q[FC_Q_MAIN]);
- spin_unlock(&sbi->s_fc_lock);
+ mutex_unlock(&sbi->s_fc_lock);
return ret;
}
@@ -425,52 +391,63 @@ struct __track_dentry_update_args {
};
/* __track_fn for directory entry updates. Called with ei->i_fc_lock. */
-static int __track_dentry_update(struct inode *inode, void *arg, bool update)
+static int __track_dentry_update(handle_t *handle, struct inode *inode,
+ void *arg, bool update)
{
struct ext4_fc_dentry_update *node;
struct ext4_inode_info *ei = EXT4_I(inode);
struct __track_dentry_update_args *dentry_update =
(struct __track_dentry_update_args *)arg;
struct dentry *dentry = dentry_update->dentry;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct inode *dir = dentry->d_parent->d_inode;
+ struct super_block *sb = inode->i_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ spin_unlock(&ei->i_fc_lock);
+
+ if (IS_ENCRYPTED(dir)) {
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_ENCRYPTED_FILENAME,
+ handle);
+ spin_lock(&ei->i_fc_lock);
+ return -EOPNOTSUPP;
+ }
- mutex_unlock(&ei->i_fc_lock);
node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS);
if (!node) {
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
- mutex_lock(&ei->i_fc_lock);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, handle);
+ spin_lock(&ei->i_fc_lock);
return -ENOMEM;
}
node->fcd_op = dentry_update->op;
- node->fcd_parent = dentry->d_parent->d_inode->i_ino;
+ node->fcd_parent = dir->i_ino;
node->fcd_ino = inode->i_ino;
- if (dentry->d_name.len > DNAME_INLINE_LEN) {
- node->fcd_name.name = kmalloc(dentry->d_name.len, GFP_NOFS);
- if (!node->fcd_name.name) {
- kmem_cache_free(ext4_fc_dentry_cachep, node);
- ext4_fc_mark_ineligible(inode->i_sb,
- EXT4_FC_REASON_NOMEM);
- mutex_lock(&ei->i_fc_lock);
- return -ENOMEM;
- }
- memcpy((u8 *)node->fcd_name.name, dentry->d_name.name,
- dentry->d_name.len);
- } else {
- memcpy(node->fcd_iname, dentry->d_name.name,
- dentry->d_name.len);
- node->fcd_name.name = node->fcd_iname;
- }
- node->fcd_name.len = dentry->d_name.len;
-
- spin_lock(&sbi->s_fc_lock);
- if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_COMMITTING))
+ take_dentry_name_snapshot(&node->fcd_name, dentry);
+ INIT_LIST_HEAD(&node->fcd_dilist);
+ INIT_LIST_HEAD(&node->fcd_list);
+ mutex_lock(&sbi->s_fc_lock);
+ if (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
+ sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING)
list_add_tail(&node->fcd_list,
&sbi->s_fc_dentry_q[FC_Q_STAGING]);
else
list_add_tail(&node->fcd_list, &sbi->s_fc_dentry_q[FC_Q_MAIN]);
- spin_unlock(&sbi->s_fc_lock);
- mutex_lock(&ei->i_fc_lock);
+
+ /*
+ * This helps us keep a track of all fc_dentry updates which is part of
+ * this ext4 inode. So in case the inode is getting unlinked, before
+ * even we get a chance to fsync, we could remove all fc_dentry
+ * references while evicting the inode in ext4_fc_del().
+ * Also with this, we don't need to loop over all the inodes in
+ * sbi->s_fc_q to get the corresponding inode in
+ * ext4_fc_commit_dentry_updates().
+ */
+ if (dentry_update->op == EXT4_FC_TAG_CREAT) {
+ WARN_ON(!list_empty(&ei->i_fc_dilist));
+ list_add_tail(&node->fcd_dilist, &ei->i_fc_dilist);
+ }
+ mutex_unlock(&sbi->s_fc_lock);
+ spin_lock(&ei->i_fc_lock);
return 0;
}
@@ -486,12 +463,20 @@ void __ext4_fc_track_unlink(handle_t *handle,
ret = ext4_fc_track_template(handle, inode, __track_dentry_update,
(void *)&args, 0);
- trace_ext4_fc_track_unlink(inode, dentry, ret);
+ trace_ext4_fc_track_unlink(handle, inode, dentry, ret);
}
void ext4_fc_track_unlink(handle_t *handle, struct dentry *dentry)
{
- __ext4_fc_track_unlink(handle, d_inode(dentry), dentry);
+ struct inode *inode = d_inode(dentry);
+
+ if (ext4_fc_disabled(inode->i_sb))
+ return;
+
+ if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
+ return;
+
+ __ext4_fc_track_unlink(handle, inode, dentry);
}
void __ext4_fc_track_link(handle_t *handle,
@@ -505,12 +490,20 @@ void __ext4_fc_track_link(handle_t *handle,
ret = ext4_fc_track_template(handle, inode, __track_dentry_update,
(void *)&args, 0);
- trace_ext4_fc_track_link(inode, dentry, ret);
+ trace_ext4_fc_track_link(handle, inode, dentry, ret);
}
void ext4_fc_track_link(handle_t *handle, struct dentry *dentry)
{
- __ext4_fc_track_link(handle, d_inode(dentry), dentry);
+ struct inode *inode = d_inode(dentry);
+
+ if (ext4_fc_disabled(inode->i_sb))
+ return;
+
+ if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
+ return;
+
+ __ext4_fc_track_link(handle, inode, dentry);
}
void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
@@ -524,16 +517,25 @@ void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
ret = ext4_fc_track_template(handle, inode, __track_dentry_update,
(void *)&args, 0);
- trace_ext4_fc_track_create(inode, dentry, ret);
+ trace_ext4_fc_track_create(handle, inode, dentry, ret);
}
void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
{
- __ext4_fc_track_create(handle, d_inode(dentry), dentry);
+ struct inode *inode = d_inode(dentry);
+
+ if (ext4_fc_disabled(inode->i_sb))
+ return;
+
+ if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
+ return;
+
+ __ext4_fc_track_create(handle, inode, dentry);
}
/* __track_fn for inode tracking */
-static int __track_inode(struct inode *inode, void *arg, bool update)
+static int __track_inode(handle_t *handle, struct inode *inode, void *arg,
+ bool update)
{
if (update)
return -EEXIST;
@@ -545,19 +547,56 @@ static int __track_inode(struct inode *inode, void *arg, bool update)
void ext4_fc_track_inode(handle_t *handle, struct inode *inode)
{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ wait_queue_head_t *wq;
int ret;
if (S_ISDIR(inode->i_mode))
return;
+ if (ext4_fc_disabled(inode->i_sb))
+ return;
+
if (ext4_should_journal_data(inode)) {
ext4_fc_mark_ineligible(inode->i_sb,
- EXT4_FC_REASON_INODE_JOURNAL_DATA);
+ EXT4_FC_REASON_INODE_JOURNAL_DATA, handle);
return;
}
+ if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
+ return;
+
+ /*
+ * If we come here, we may sleep while waiting for the inode to
+ * commit. We shouldn't be holding i_data_sem when we go to sleep since
+ * the commit path needs to grab the lock while committing the inode.
+ */
+ lockdep_assert_not_held(&ei->i_data_sem);
+
+ while (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
+#if (BITS_PER_LONG < 64)
+ DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
+ EXT4_STATE_FC_COMMITTING);
+ wq = bit_waitqueue(&ei->i_state_flags,
+ EXT4_STATE_FC_COMMITTING);
+#else
+ DEFINE_WAIT_BIT(wait, &ei->i_flags,
+ EXT4_STATE_FC_COMMITTING);
+ wq = bit_waitqueue(&ei->i_flags,
+ EXT4_STATE_FC_COMMITTING);
+#endif
+ prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+ if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING))
+ schedule();
+ finish_wait(wq, &wait.wq_entry);
+ }
+
+ /*
+ * From this point on, this inode will not be committed either
+ * by fast or full commit as long as the handle is open.
+ */
ret = ext4_fc_track_template(handle, inode, __track_inode, NULL, 1);
- trace_ext4_fc_track_inode(inode, ret);
+ trace_ext4_fc_track_inode(handle, inode, ret);
}
struct __track_range_args {
@@ -565,7 +604,8 @@ struct __track_range_args {
};
/* __track_fn for tracking data updates */
-static int __track_range(struct inode *inode, void *arg, bool update)
+static int __track_range(handle_t *handle, struct inode *inode, void *arg,
+ bool update)
{
struct ext4_inode_info *ei = EXT4_I(inode);
ext4_lblk_t oldstart;
@@ -601,17 +641,29 @@ void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t star
if (S_ISDIR(inode->i_mode))
return;
+ if (ext4_fc_disabled(inode->i_sb))
+ return;
+
+ if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
+ return;
+
+ if (ext4_has_inline_data(inode)) {
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR,
+ handle);
+ return;
+ }
+
args.start = start;
args.end = end;
ret = ext4_fc_track_template(handle, inode, __track_range, &args, 1);
- trace_ext4_fc_track_range(inode, start, end, ret);
+ trace_ext4_fc_track_range(handle, inode, start, end, ret);
}
static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail)
{
- int write_flags = REQ_SYNC;
+ blk_opf_t write_flags = JBD2_JOURNAL_REQ_FLAGS;
struct buffer_head *bh = EXT4_SB(sb)->s_fc_bh;
/* Add REQ_FUA | REQ_PREFLUSH only its tail */
@@ -621,24 +673,12 @@ static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail)
set_buffer_dirty(bh);
set_buffer_uptodate(bh);
bh->b_end_io = ext4_end_buffer_io_sync;
- submit_bh(REQ_OP_WRITE, write_flags, bh);
+ submit_bh(REQ_OP_WRITE | write_flags, bh);
EXT4_SB(sb)->s_fc_bh = NULL;
}
/* Ext4 commit path routines */
-/* memzero and update CRC */
-static void *ext4_fc_memzero(struct super_block *sb, void *dst, int len,
- u32 *crc)
-{
- void *ret;
-
- ret = memset(dst, 0, len);
- if (crc)
- *crc = ext4_chksum(EXT4_SB(sb), *crc, dst, len);
- return ret;
-}
-
/*
* Allocate len bytes on a fast commit buffer.
*
@@ -652,62 +692,60 @@ static void *ext4_fc_memzero(struct super_block *sb, void *dst, int len,
*/
static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc)
{
- struct ext4_fc_tl *tl;
+ struct ext4_fc_tl tl;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh;
int bsize = sbi->s_journal->j_blocksize;
int ret, off = sbi->s_fc_bytes % bsize;
- int pad_len;
+ int remaining;
+ u8 *dst;
/*
- * After allocating len, we should have space at least for a 0 byte
- * padding.
+ * If 'len' is too long to fit in any block alongside a PAD tlv, then we
+ * cannot fulfill the request.
*/
- if (len + sizeof(struct ext4_fc_tl) > bsize)
+ if (len > bsize - EXT4_FC_TAG_BASE_LEN)
return NULL;
- if (bsize - off - 1 > len + sizeof(struct ext4_fc_tl)) {
- /*
- * Only allocate from current buffer if we have enough space for
- * this request AND we have space to add a zero byte padding.
- */
- if (!sbi->s_fc_bh) {
- ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
- if (ret)
- return NULL;
- sbi->s_fc_bh = bh;
- }
+ if (!sbi->s_fc_bh) {
+ ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
+ if (ret)
+ return NULL;
+ sbi->s_fc_bh = bh;
+ }
+ dst = sbi->s_fc_bh->b_data + off;
+
+ /*
+ * Allocate the bytes in the current block if we can do so while still
+ * leaving enough space for a PAD tlv.
+ */
+ remaining = bsize - EXT4_FC_TAG_BASE_LEN - off;
+ if (len <= remaining) {
sbi->s_fc_bytes += len;
- return sbi->s_fc_bh->b_data + off;
- }
- /* Need to add PAD tag */
- tl = (struct ext4_fc_tl *)(sbi->s_fc_bh->b_data + off);
- tl->fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD);
- pad_len = bsize - off - 1 - sizeof(struct ext4_fc_tl);
- tl->fc_len = cpu_to_le16(pad_len);
- if (crc)
- *crc = ext4_chksum(sbi, *crc, tl, sizeof(*tl));
- if (pad_len > 0)
- ext4_fc_memzero(sb, tl + 1, pad_len, crc);
+ return dst;
+ }
+
+ /*
+ * Else, terminate the current block with a PAD tlv, then allocate a new
+ * block and allocate the bytes at the start of that new block.
+ */
+
+ tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD);
+ tl.fc_len = cpu_to_le16(remaining);
+ memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN);
+ memset(dst + EXT4_FC_TAG_BASE_LEN, 0, remaining);
+ *crc = ext4_chksum(*crc, sbi->s_fc_bh->b_data, bsize);
+
ext4_fc_submit_bh(sb, false);
ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
if (ret)
return NULL;
sbi->s_fc_bh = bh;
- sbi->s_fc_bytes = (sbi->s_fc_bytes / bsize + 1) * bsize + len;
+ sbi->s_fc_bytes += bsize - off + len;
return sbi->s_fc_bh->b_data;
}
-/* memcpy to fc reserved space and update CRC */
-static void *ext4_fc_memcpy(struct super_block *sb, void *dst, const void *src,
- int len, u32 *crc)
-{
- if (crc)
- *crc = ext4_chksum(EXT4_SB(sb), *crc, src, len);
- return memcpy(dst, src, len);
-}
-
/*
* Complete a fast commit by writing tail tag.
*
@@ -728,23 +766,27 @@ static int ext4_fc_write_tail(struct super_block *sb, u32 crc)
* ext4_fc_reserve_space takes care of allocating an extra block if
* there's no enough space on this block for accommodating this tail.
*/
- dst = ext4_fc_reserve_space(sb, sizeof(tl) + sizeof(tail), &crc);
+ dst = ext4_fc_reserve_space(sb, EXT4_FC_TAG_BASE_LEN + sizeof(tail), &crc);
if (!dst)
return -ENOSPC;
off = sbi->s_fc_bytes % bsize;
tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_TAIL);
- tl.fc_len = cpu_to_le16(bsize - off - 1 + sizeof(struct ext4_fc_tail));
+ tl.fc_len = cpu_to_le16(bsize - off + sizeof(struct ext4_fc_tail));
sbi->s_fc_bytes = round_up(sbi->s_fc_bytes, bsize);
- ext4_fc_memcpy(sb, dst, &tl, sizeof(tl), &crc);
- dst += sizeof(tl);
+ memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN);
+ dst += EXT4_FC_TAG_BASE_LEN;
tail.fc_tid = cpu_to_le32(sbi->s_journal->j_running_transaction->t_tid);
- ext4_fc_memcpy(sb, dst, &tail.fc_tid, sizeof(tail.fc_tid), &crc);
+ memcpy(dst, &tail.fc_tid, sizeof(tail.fc_tid));
dst += sizeof(tail.fc_tid);
+ crc = ext4_chksum(crc, sbi->s_fc_bh->b_data,
+ dst - (u8 *)sbi->s_fc_bh->b_data);
tail.fc_crc = cpu_to_le32(crc);
- ext4_fc_memcpy(sb, dst, &tail.fc_crc, sizeof(tail.fc_crc), NULL);
+ memcpy(dst, &tail.fc_crc, sizeof(tail.fc_crc));
+ dst += sizeof(tail.fc_crc);
+ memset(dst, 0, bsize - off); /* Don't leak uninitialized memory. */
ext4_fc_submit_bh(sb, true);
@@ -761,15 +803,15 @@ static bool ext4_fc_add_tlv(struct super_block *sb, u16 tag, u16 len, u8 *val,
struct ext4_fc_tl tl;
u8 *dst;
- dst = ext4_fc_reserve_space(sb, sizeof(tl) + len, crc);
+ dst = ext4_fc_reserve_space(sb, EXT4_FC_TAG_BASE_LEN + len, crc);
if (!dst)
return false;
tl.fc_tag = cpu_to_le16(tag);
tl.fc_len = cpu_to_le16(len);
- ext4_fc_memcpy(sb, dst, &tl, sizeof(tl), crc);
- ext4_fc_memcpy(sb, dst + sizeof(tl), val, len, crc);
+ memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN);
+ memcpy(dst + EXT4_FC_TAG_BASE_LEN, val, len);
return true;
}
@@ -780,9 +822,9 @@ static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u32 *crc,
{
struct ext4_fc_dentry_info fcd;
struct ext4_fc_tl tl;
- int dlen = fc_dentry->fcd_name.len;
- u8 *dst = ext4_fc_reserve_space(sb, sizeof(tl) + sizeof(fcd) + dlen,
- crc);
+ int dlen = fc_dentry->fcd_name.name.len;
+ u8 *dst = ext4_fc_reserve_space(sb,
+ EXT4_FC_TAG_BASE_LEN + sizeof(fcd) + dlen, crc);
if (!dst)
return false;
@@ -791,12 +833,11 @@ static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u32 *crc,
fcd.fc_ino = cpu_to_le32(fc_dentry->fcd_ino);
tl.fc_tag = cpu_to_le16(fc_dentry->fcd_op);
tl.fc_len = cpu_to_le16(sizeof(fcd) + dlen);
- ext4_fc_memcpy(sb, dst, &tl, sizeof(tl), crc);
- dst += sizeof(tl);
- ext4_fc_memcpy(sb, dst, &fcd, sizeof(fcd), crc);
+ memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN);
+ dst += EXT4_FC_TAG_BASE_LEN;
+ memcpy(dst, &fcd, sizeof(fcd));
dst += sizeof(fcd);
- ext4_fc_memcpy(sb, dst, fc_dentry->fcd_name.name, dlen, crc);
- dst += dlen;
+ memcpy(dst, fc_dentry->fcd_name.name.name, dlen);
return true;
}
@@ -828,22 +869,21 @@ static int ext4_fc_write_inode(struct inode *inode, u32 *crc)
tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_INODE);
tl.fc_len = cpu_to_le16(inode_len + sizeof(fc_inode.fc_ino));
+ ret = -ECANCELED;
dst = ext4_fc_reserve_space(inode->i_sb,
- sizeof(tl) + inode_len + sizeof(fc_inode.fc_ino), crc);
+ EXT4_FC_TAG_BASE_LEN + inode_len + sizeof(fc_inode.fc_ino), crc);
if (!dst)
- return -ECANCELED;
+ goto err;
- if (!ext4_fc_memcpy(inode->i_sb, dst, &tl, sizeof(tl), crc))
- return -ECANCELED;
- dst += sizeof(tl);
- if (!ext4_fc_memcpy(inode->i_sb, dst, &fc_inode, sizeof(fc_inode), crc))
- return -ECANCELED;
+ memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN);
+ dst += EXT4_FC_TAG_BASE_LEN;
+ memcpy(dst, &fc_inode, sizeof(fc_inode));
dst += sizeof(fc_inode);
- if (!ext4_fc_memcpy(inode->i_sb, dst, (u8 *)ext4_raw_inode(&iloc),
- inode_len, crc))
- return -ECANCELED;
-
- return 0;
+ memcpy(dst, (u8 *)ext4_raw_inode(&iloc), inode_len);
+ ret = 0;
+err:
+ brelse(iloc.bh);
+ return ret;
}
/*
@@ -860,24 +900,26 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
struct ext4_extent *ex;
int ret;
- mutex_lock(&ei->i_fc_lock);
+ spin_lock(&ei->i_fc_lock);
if (ei->i_fc_lblk_len == 0) {
- mutex_unlock(&ei->i_fc_lock);
+ spin_unlock(&ei->i_fc_lock);
return 0;
}
old_blk_size = ei->i_fc_lblk_start;
new_blk_size = ei->i_fc_lblk_start + ei->i_fc_lblk_len - 1;
ei->i_fc_lblk_len = 0;
- mutex_unlock(&ei->i_fc_lock);
+ spin_unlock(&ei->i_fc_lock);
cur_lblk_off = old_blk_size;
- jbd_debug(1, "%s: will try writing %d to %d for inode %ld\n",
- __func__, cur_lblk_off, new_blk_size, inode->i_ino);
+ ext4_debug("will try writing %d to %d for inode %ld\n",
+ cur_lblk_off, new_blk_size, inode->i_ino);
while (cur_lblk_off <= new_blk_size) {
map.m_lblk = cur_lblk_off;
map.m_len = new_blk_size - cur_lblk_off + 1;
- ret = ext4_map_blocks(NULL, inode, &map, 0);
+ ret = ext4_map_blocks(NULL, inode, &map,
+ EXT4_GET_BLOCKS_IO_SUBMIT |
+ EXT4_EX_NOCACHE);
if (ret < 0)
return -ECANCELED;
@@ -921,76 +963,37 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
}
-/* Submit data for all the fast commit inodes */
-static int ext4_fc_submit_inode_data_all(journal_t *journal)
+/* Flushes data of all the inodes in the commit queue. */
+static int ext4_fc_flush_data(journal_t *journal)
{
- struct super_block *sb = (struct super_block *)(journal->j_private);
+ struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_inode_info *ei;
int ret = 0;
- spin_lock(&sbi->s_fc_lock);
- ext4_set_mount_flag(sb, EXT4_MF_FC_COMMITTING);
list_for_each_entry(ei, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
- ext4_set_inode_state(&ei->vfs_inode, EXT4_STATE_FC_COMMITTING);
- while (atomic_read(&ei->i_fc_updates)) {
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&ei->i_fc_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- if (atomic_read(&ei->i_fc_updates)) {
- spin_unlock(&sbi->s_fc_lock);
- schedule();
- spin_lock(&sbi->s_fc_lock);
- }
- finish_wait(&ei->i_fc_wait, &wait);
- }
- spin_unlock(&sbi->s_fc_lock);
- ret = jbd2_submit_inode_data(ei->jinode);
+ ret = jbd2_submit_inode_data(journal, ei->jinode);
if (ret)
return ret;
- spin_lock(&sbi->s_fc_lock);
}
- spin_unlock(&sbi->s_fc_lock);
-
- return ret;
-}
-/* Wait for completion of data for all the fast commit inodes */
-static int ext4_fc_wait_inode_data_all(journal_t *journal)
-{
- struct super_block *sb = (struct super_block *)(journal->j_private);
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_inode_info *pos, *n;
- int ret = 0;
-
- spin_lock(&sbi->s_fc_lock);
- list_for_each_entry_safe(pos, n, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
- if (!ext4_test_inode_state(&pos->vfs_inode,
- EXT4_STATE_FC_COMMITTING))
- continue;
- spin_unlock(&sbi->s_fc_lock);
-
- ret = jbd2_wait_inode_data(journal, pos->jinode);
+ list_for_each_entry(ei, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
+ ret = jbd2_wait_inode_data(journal, ei->jinode);
if (ret)
return ret;
- spin_lock(&sbi->s_fc_lock);
}
- spin_unlock(&sbi->s_fc_lock);
return 0;
}
/* Commit all the directory entry updates */
static int ext4_fc_commit_dentry_updates(journal_t *journal, u32 *crc)
-__acquires(&sbi->s_fc_lock)
-__releases(&sbi->s_fc_lock)
{
- struct super_block *sb = (struct super_block *)(journal->j_private);
+ struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_fc_dentry_update *fc_dentry, *fc_dentry_n;
struct inode *inode;
- struct ext4_inode_info *ei, *ei_n;
+ struct ext4_inode_info *ei;
int ret;
if (list_empty(&sbi->s_fc_dentry_q[FC_Q_MAIN]))
@@ -998,30 +1001,21 @@ __releases(&sbi->s_fc_lock)
list_for_each_entry_safe(fc_dentry, fc_dentry_n,
&sbi->s_fc_dentry_q[FC_Q_MAIN], fcd_list) {
if (fc_dentry->fcd_op != EXT4_FC_TAG_CREAT) {
- spin_unlock(&sbi->s_fc_lock);
- if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry)) {
- ret = -ENOSPC;
- goto lock_and_exit;
- }
- spin_lock(&sbi->s_fc_lock);
+ if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry))
+ return -ENOSPC;
continue;
}
-
- inode = NULL;
- list_for_each_entry_safe(ei, ei_n, &sbi->s_fc_q[FC_Q_MAIN],
- i_fc_list) {
- if (ei->vfs_inode.i_ino == fc_dentry->fcd_ino) {
- inode = &ei->vfs_inode;
- break;
- }
- }
/*
- * If we don't find inode in our list, then it was deleted,
- * in which case, we don't need to record it's create tag.
+ * With fcd_dilist we need not loop in sbi->s_fc_q to get the
+ * corresponding inode. Also, the corresponding inode could have been
+ * deleted, in which case, we don't need to do anything.
*/
- if (!inode)
+ if (list_empty(&fc_dentry->fcd_dilist))
continue;
- spin_unlock(&sbi->s_fc_lock);
+ ei = list_first_entry(&fc_dentry->fcd_dilist,
+ struct ext4_inode_info, i_fc_dilist);
+ inode = &ei->vfs_inode;
+ WARN_ON(inode->i_ino != fc_dentry->fcd_ino);
/*
* We first write the inode and then the create dirent. This
@@ -1032,28 +1026,19 @@ __releases(&sbi->s_fc_lock)
*/
ret = ext4_fc_write_inode(inode, crc);
if (ret)
- goto lock_and_exit;
-
+ return ret;
ret = ext4_fc_write_inode_data(inode, crc);
if (ret)
- goto lock_and_exit;
-
- if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry)) {
- ret = -ENOSPC;
- goto lock_and_exit;
- }
-
- spin_lock(&sbi->s_fc_lock);
+ return ret;
+ if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry))
+ return -ENOSPC;
}
return 0;
-lock_and_exit:
- spin_lock(&sbi->s_fc_lock);
- return ret;
}
static int ext4_fc_perform_commit(journal_t *journal)
{
- struct super_block *sb = (struct super_block *)(journal->j_private);
+ struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_inode_info *iter;
struct ext4_fc_head head;
@@ -1062,26 +1047,81 @@ static int ext4_fc_perform_commit(journal_t *journal)
int ret = 0;
u32 crc = 0;
- ret = ext4_fc_submit_inode_data_all(journal);
- if (ret)
- return ret;
+ /*
+ * Step 1: Mark all inodes on s_fc_q[MAIN] with
+ * EXT4_STATE_FC_FLUSHING_DATA. This prevents these inodes from being
+ * freed until the data flush is over.
+ */
+ mutex_lock(&sbi->s_fc_lock);
+ list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
+ ext4_set_inode_state(&iter->vfs_inode,
+ EXT4_STATE_FC_FLUSHING_DATA);
+ }
+ mutex_unlock(&sbi->s_fc_lock);
+
+ /* Step 2: Flush data for all the eligible inodes. */
+ ret = ext4_fc_flush_data(journal);
+
+ /*
+ * Step 3: Clear EXT4_STATE_FC_FLUSHING_DATA flag, before returning
+ * any error from step 2. This ensures that waiters waiting on
+ * EXT4_STATE_FC_FLUSHING_DATA can resume.
+ */
+ mutex_lock(&sbi->s_fc_lock);
+ list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
+ ext4_clear_inode_state(&iter->vfs_inode,
+ EXT4_STATE_FC_FLUSHING_DATA);
+#if (BITS_PER_LONG < 64)
+ wake_up_bit(&iter->i_state_flags, EXT4_STATE_FC_FLUSHING_DATA);
+#else
+ wake_up_bit(&iter->i_flags, EXT4_STATE_FC_FLUSHING_DATA);
+#endif
+ }
+
+ /*
+ * Make sure clearing of EXT4_STATE_FC_FLUSHING_DATA is visible before
+ * the waiter checks the bit. Pairs with implicit barrier in
+ * prepare_to_wait() in ext4_fc_del().
+ */
+ smp_mb();
+ mutex_unlock(&sbi->s_fc_lock);
- ret = ext4_fc_wait_inode_data_all(journal);
+ /*
+ * If we encountered error in Step 2, return it now after clearing
+ * EXT4_STATE_FC_FLUSHING_DATA bit.
+ */
if (ret)
return ret;
+
+ /* Step 4: Mark all inodes as being committed. */
+ jbd2_journal_lock_updates(journal);
/*
- * If file system device is different from journal device, issue a cache
- * flush before we start writing fast commit blocks.
+ * The journal is now locked. No more handles can start and all the
+ * previous handles are now drained. We now mark the inodes on the
+ * commit queue as being committed.
+ */
+ mutex_lock(&sbi->s_fc_lock);
+ list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
+ ext4_set_inode_state(&iter->vfs_inode,
+ EXT4_STATE_FC_COMMITTING);
+ }
+ mutex_unlock(&sbi->s_fc_lock);
+ jbd2_journal_unlock_updates(journal);
+
+ /*
+ * Step 5: If file system device is different from journal device,
+ * issue a cache flush before we start writing fast commit blocks.
*/
if (journal->j_fs_dev != journal->j_dev)
blkdev_issue_flush(journal->j_fs_dev);
blk_start_plug(&plug);
+ /* Step 6: Write fast commit blocks to disk. */
if (sbi->s_fc_bytes == 0) {
/*
- * Add a head tag only if this is the first fast commit
- * in this TID.
+ * Step 6.1: Add a head tag only if this is the first fast
+ * commit in this TID.
*/
head.fc_features = cpu_to_le32(EXT4_FC_SUPPORTED_FEATURES);
head.fc_tid = cpu_to_le32(
@@ -1093,36 +1133,61 @@ static int ext4_fc_perform_commit(journal_t *journal)
}
}
- spin_lock(&sbi->s_fc_lock);
+ /* Step 6.2: Now write all the dentry updates. */
+ mutex_lock(&sbi->s_fc_lock);
ret = ext4_fc_commit_dentry_updates(journal, &crc);
- if (ret) {
- spin_unlock(&sbi->s_fc_lock);
+ if (ret)
goto out;
- }
+ /* Step 6.3: Now write all the changed inodes to disk. */
list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
inode = &iter->vfs_inode;
if (!ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING))
continue;
- spin_unlock(&sbi->s_fc_lock);
ret = ext4_fc_write_inode_data(inode, &crc);
if (ret)
goto out;
ret = ext4_fc_write_inode(inode, &crc);
if (ret)
goto out;
- spin_lock(&sbi->s_fc_lock);
}
- spin_unlock(&sbi->s_fc_lock);
-
+ /* Step 6.4: Finally write tail tag to conclude this fast commit. */
ret = ext4_fc_write_tail(sb, crc);
out:
+ mutex_unlock(&sbi->s_fc_lock);
blk_finish_plug(&plug);
return ret;
}
+static void ext4_fc_update_stats(struct super_block *sb, int status,
+ u64 commit_time, int nblks, tid_t commit_tid)
+{
+ struct ext4_fc_stats *stats = &EXT4_SB(sb)->s_fc_stats;
+
+ ext4_debug("Fast commit ended with status = %d for tid %u",
+ status, commit_tid);
+ if (status == EXT4_FC_STATUS_OK) {
+ stats->fc_num_commits++;
+ stats->fc_numblks += nblks;
+ if (likely(stats->s_fc_avg_commit_time))
+ stats->s_fc_avg_commit_time =
+ (commit_time +
+ stats->s_fc_avg_commit_time * 3) / 4;
+ else
+ stats->s_fc_avg_commit_time = commit_time;
+ } else if (status == EXT4_FC_STATUS_FAILED ||
+ status == EXT4_FC_STATUS_INELIGIBLE) {
+ if (status == EXT4_FC_STATUS_FAILED)
+ stats->fc_failed_commits++;
+ stats->fc_ineligible_commits++;
+ } else {
+ stats->fc_skipped_commits++;
+ }
+ trace_ext4_fc_commit_stop(sb, nblks, status, commit_tid);
+}
+
/*
* The main commit entry point. Performs a fast commit for transaction
* commit_tid if needed. If it's not possible to perform a fast commit
@@ -1131,122 +1196,140 @@ out:
*/
int ext4_fc_commit(journal_t *journal, tid_t commit_tid)
{
- struct super_block *sb = (struct super_block *)(journal->j_private);
+ struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
int nblks = 0, ret, bsize = journal->j_blocksize;
int subtid = atomic_read(&sbi->s_fc_subtid);
- int reason = EXT4_FC_REASON_OK, fc_bufs_before = 0;
+ int status = EXT4_FC_STATUS_OK, fc_bufs_before = 0;
ktime_t start_time, commit_time;
+ int old_ioprio, journal_ioprio;
- trace_ext4_fc_commit_start(sb);
+ if (!test_opt2(sb, JOURNAL_FAST_COMMIT))
+ return jbd2_complete_transaction(journal, commit_tid);
- start_time = ktime_get();
+ trace_ext4_fc_commit_start(sb, commit_tid);
- if (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
- (ext4_fc_is_ineligible(sb))) {
- reason = EXT4_FC_REASON_INELIGIBLE;
- goto out;
- }
+ start_time = ktime_get();
+ old_ioprio = get_current_ioprio();
restart_fc:
ret = jbd2_fc_begin_commit(journal, commit_tid);
if (ret == -EALREADY) {
/* There was an ongoing commit, check if we need to restart */
if (atomic_read(&sbi->s_fc_subtid) <= subtid &&
- commit_tid > journal->j_commit_sequence)
+ tid_gt(commit_tid, journal->j_commit_sequence))
goto restart_fc;
- reason = EXT4_FC_REASON_ALREADY_COMMITTED;
- goto out;
+ ext4_fc_update_stats(sb, EXT4_FC_STATUS_SKIPPED, 0, 0,
+ commit_tid);
+ return 0;
} else if (ret) {
- sbi->s_fc_stats.fc_ineligible_reason_count[EXT4_FC_COMMIT_FAILED]++;
- reason = EXT4_FC_REASON_FC_START_FAILED;
- goto out;
+ /*
+ * Commit couldn't start. Just update stats and perform a
+ * full commit.
+ */
+ ext4_fc_update_stats(sb, EXT4_FC_STATUS_FAILED, 0, 0,
+ commit_tid);
+ return jbd2_complete_transaction(journal, commit_tid);
+ }
+
+ /*
+ * After establishing journal barrier via jbd2_fc_begin_commit(), check
+ * if we are fast commit ineligible.
+ */
+ if (ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE)) {
+ status = EXT4_FC_STATUS_INELIGIBLE;
+ goto fallback;
}
+ /*
+ * Now that we know that this thread is going to do a fast commit,
+ * elevate the priority to match that of the journal thread.
+ */
+ if (journal->j_task->io_context)
+ journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
+ else
+ journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
+ set_task_ioprio(current, journal_ioprio);
fc_bufs_before = (sbi->s_fc_bytes + bsize - 1) / bsize;
ret = ext4_fc_perform_commit(journal);
if (ret < 0) {
- sbi->s_fc_stats.fc_ineligible_reason_count[EXT4_FC_COMMIT_FAILED]++;
- reason = EXT4_FC_REASON_FC_FAILED;
- goto out;
+ status = EXT4_FC_STATUS_FAILED;
+ goto fallback;
}
nblks = (sbi->s_fc_bytes + bsize - 1) / bsize - fc_bufs_before;
ret = jbd2_fc_wait_bufs(journal, nblks);
if (ret < 0) {
- sbi->s_fc_stats.fc_ineligible_reason_count[EXT4_FC_COMMIT_FAILED]++;
- reason = EXT4_FC_REASON_FC_FAILED;
- goto out;
+ status = EXT4_FC_STATUS_FAILED;
+ goto fallback;
}
atomic_inc(&sbi->s_fc_subtid);
- jbd2_fc_end_commit(journal);
-out:
- /* Has any ineligible update happened since we started? */
- if (reason == EXT4_FC_REASON_OK && ext4_fc_is_ineligible(sb)) {
- sbi->s_fc_stats.fc_ineligible_reason_count[EXT4_FC_COMMIT_FAILED]++;
- reason = EXT4_FC_REASON_INELIGIBLE;
- }
-
- spin_lock(&sbi->s_fc_lock);
- if (reason != EXT4_FC_REASON_OK &&
- reason != EXT4_FC_REASON_ALREADY_COMMITTED) {
- sbi->s_fc_stats.fc_ineligible_commits++;
- } else {
- sbi->s_fc_stats.fc_num_commits++;
- sbi->s_fc_stats.fc_numblks += nblks;
- }
- spin_unlock(&sbi->s_fc_lock);
- nblks = (reason == EXT4_FC_REASON_OK) ? nblks : 0;
- trace_ext4_fc_commit_stop(sb, nblks, reason);
- commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
+ ret = jbd2_fc_end_commit(journal);
+ set_task_ioprio(current, old_ioprio);
/*
- * weight the commit time higher than the average time so we don't
- * react too strongly to vast changes in the commit time
+ * weight the commit time higher than the average time so we
+ * don't react too strongly to vast changes in the commit time
*/
- if (likely(sbi->s_fc_avg_commit_time))
- sbi->s_fc_avg_commit_time = (commit_time +
- sbi->s_fc_avg_commit_time * 3) / 4;
- else
- sbi->s_fc_avg_commit_time = commit_time;
- jbd_debug(1,
- "Fast commit ended with blks = %d, reason = %d, subtid - %d",
- nblks, reason, subtid);
- if (reason == EXT4_FC_REASON_FC_FAILED)
- return jbd2_fc_end_commit_fallback(journal);
- if (reason == EXT4_FC_REASON_FC_START_FAILED ||
- reason == EXT4_FC_REASON_INELIGIBLE)
- return jbd2_complete_transaction(journal, commit_tid);
- return 0;
+ commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
+ ext4_fc_update_stats(sb, status, commit_time, nblks, commit_tid);
+ return ret;
+
+fallback:
+ set_task_ioprio(current, old_ioprio);
+ ret = jbd2_fc_end_commit_fallback(journal);
+ ext4_fc_update_stats(sb, status, 0, 0, commit_tid);
+ return ret;
}
/*
* Fast commit cleanup routine. This is called after every fast commit and
* full commit. full is true if we are called after a full commit.
*/
-static void ext4_fc_cleanup(journal_t *journal, int full)
+static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
{
struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_inode_info *iter, *iter_n;
+ struct ext4_inode_info *ei;
struct ext4_fc_dentry_update *fc_dentry;
if (full && sbi->s_fc_bh)
sbi->s_fc_bh = NULL;
+ trace_ext4_fc_cleanup(journal, full, tid);
jbd2_fc_release_bufs(journal);
- spin_lock(&sbi->s_fc_lock);
- list_for_each_entry_safe(iter, iter_n, &sbi->s_fc_q[FC_Q_MAIN],
- i_fc_list) {
- list_del_init(&iter->i_fc_list);
- ext4_clear_inode_state(&iter->vfs_inode,
+ mutex_lock(&sbi->s_fc_lock);
+ while (!list_empty(&sbi->s_fc_q[FC_Q_MAIN])) {
+ ei = list_first_entry(&sbi->s_fc_q[FC_Q_MAIN],
+ struct ext4_inode_info,
+ i_fc_list);
+ list_del_init(&ei->i_fc_list);
+ ext4_clear_inode_state(&ei->vfs_inode,
EXT4_STATE_FC_COMMITTING);
- ext4_fc_reset_inode(&iter->vfs_inode);
- /* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
+ if (tid_geq(tid, ei->i_sync_tid)) {
+ ext4_fc_reset_inode(&ei->vfs_inode);
+ } else if (full) {
+ /*
+ * We are called after a full commit, inode has been
+ * modified while the commit was running. Re-enqueue
+ * the inode into STAGING, which will then be splice
+ * back into MAIN. This cannot happen during
+ * fastcommit because the journal is locked all the
+ * time in that case (and tid doesn't increase so
+ * tid check above isn't reliable).
+ */
+ list_add_tail(&ei->i_fc_list,
+ &sbi->s_fc_q[FC_Q_STAGING]);
+ }
+ /*
+ * Make sure clearing of EXT4_STATE_FC_COMMITTING is
+ * visible before we send the wakeup. Pairs with implicit
+ * barrier in prepare_to_wait() in ext4_fc_track_inode().
+ */
smp_mb();
#if (BITS_PER_LONG < 64)
- wake_up_bit(&iter->i_state_flags, EXT4_STATE_FC_COMMITTING);
+ wake_up_bit(&ei->i_state_flags, EXT4_STATE_FC_COMMITTING);
#else
- wake_up_bit(&iter->i_flags, EXT4_STATE_FC_COMMITTING);
+ wake_up_bit(&ei->i_flags, EXT4_STATE_FC_COMMITTING);
#endif
}
@@ -1255,13 +1338,10 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
struct ext4_fc_dentry_update,
fcd_list);
list_del_init(&fc_dentry->fcd_list);
- spin_unlock(&sbi->s_fc_lock);
+ list_del_init(&fc_dentry->fcd_dilist);
- if (fc_dentry->fcd_name.name &&
- fc_dentry->fcd_name.len > DNAME_INLINE_LEN)
- kfree(fc_dentry->fcd_name.name);
+ release_dentry_name_snapshot(&fc_dentry->fcd_name);
kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
- spin_lock(&sbi->s_fc_lock);
}
list_splice_init(&sbi->s_fc_dentry_q[FC_Q_STAGING],
@@ -1269,12 +1349,14 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
&sbi->s_fc_q[FC_Q_MAIN]);
- ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
- ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ if (tid_geq(tid, sbi->s_fc_ineligible_tid)) {
+ sbi->s_fc_ineligible_tid = 0;
+ ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ }
if (full)
sbi->s_fc_bytes = 0;
- spin_unlock(&sbi->s_fc_lock);
+ mutex_unlock(&sbi->s_fc_lock);
trace_ext4_fc_stats(sb);
}
@@ -1286,8 +1368,14 @@ struct dentry_info_args {
char *dname;
};
+/* Same as struct ext4_fc_tl, but uses native endianness fields */
+struct ext4_fc_tl_mem {
+ u16 fc_tag;
+ u16 fc_len;
+};
+
static inline void tl_to_darg(struct dentry_info_args *darg,
- struct ext4_fc_tl *tl, u8 *val)
+ struct ext4_fc_tl_mem *tl, u8 *val)
{
struct ext4_fc_dentry_info fcd;
@@ -1296,13 +1384,21 @@ static inline void tl_to_darg(struct dentry_info_args *darg,
darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino);
darg->ino = le32_to_cpu(fcd.fc_ino);
darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname);
- darg->dname_len = le16_to_cpu(tl->fc_len) -
- sizeof(struct ext4_fc_dentry_info);
+ darg->dname_len = tl->fc_len - sizeof(struct ext4_fc_dentry_info);
+}
+
+static inline void ext4_fc_get_tl(struct ext4_fc_tl_mem *tl, u8 *val)
+{
+ struct ext4_fc_tl tl_disk;
+
+ memcpy(&tl_disk, val, EXT4_FC_TAG_BASE_LEN);
+ tl->fc_len = le16_to_cpu(tl_disk.fc_len);
+ tl->fc_tag = le16_to_cpu(tl_disk.fc_tag);
}
/* Unlink replay function */
-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
- u8 *val)
+static int ext4_fc_replay_unlink(struct super_block *sb,
+ struct ext4_fc_tl_mem *tl, u8 *val)
{
struct inode *inode, *old_parent;
struct qstr entry;
@@ -1319,19 +1415,19 @@ static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode %d not found", darg.ino);
+ ext4_debug("Inode %d not found", darg.ino);
return 0;
}
old_parent = ext4_iget(sb, darg.parent_ino,
EXT4_IGET_NORMAL);
if (IS_ERR(old_parent)) {
- jbd_debug(1, "Dir with inode %d not found", darg.parent_ino);
+ ext4_debug("Dir with inode %d not found", darg.parent_ino);
iput(inode);
return 0;
}
- ret = __ext4_unlink(NULL, old_parent, &entry, inode);
+ ret = __ext4_unlink(old_parent, &entry, inode, NULL);
/* -ENOENT ok coz it might not exist anymore. */
if (ret == -ENOENT)
ret = 0;
@@ -1351,21 +1447,21 @@ static int ext4_fc_replay_link_internal(struct super_block *sb,
dir = ext4_iget(sb, darg->parent_ino, EXT4_IGET_NORMAL);
if (IS_ERR(dir)) {
- jbd_debug(1, "Dir with inode %d not found.", darg->parent_ino);
+ ext4_debug("Dir with inode %d not found.", darg->parent_ino);
dir = NULL;
goto out;
}
dentry_dir = d_obtain_alias(dir);
if (IS_ERR(dentry_dir)) {
- jbd_debug(1, "Failed to obtain dentry");
+ ext4_debug("Failed to obtain dentry");
dentry_dir = NULL;
goto out;
}
dentry_inode = d_alloc(dentry_dir, &qstr_dname);
if (!dentry_inode) {
- jbd_debug(1, "Inode dentry not created.");
+ ext4_debug("Inode dentry not created.");
ret = -ENOMEM;
goto out;
}
@@ -1378,7 +1474,7 @@ static int ext4_fc_replay_link_internal(struct super_block *sb,
* could complete.
*/
if (ret && ret != -EEXIST) {
- jbd_debug(1, "Failed to link\n");
+ ext4_debug("Failed to link\n");
goto out;
}
@@ -1399,8 +1495,8 @@ out:
}
/* Link replay function */
-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
- u8 *val)
+static int ext4_fc_replay_link(struct super_block *sb,
+ struct ext4_fc_tl_mem *tl, u8 *val)
{
struct inode *inode;
struct dentry_info_args darg;
@@ -1412,7 +1508,7 @@ static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode not found.");
+ ext4_debug("Inode not found.");
return 0;
}
@@ -1435,14 +1531,17 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
if (state->fc_modified_inodes[i] == ino)
return 0;
if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) {
+ int *fc_modified_inodes;
+
+ fc_modified_inodes = krealloc(state->fc_modified_inodes,
+ sizeof(int) * (state->fc_modified_inodes_size +
+ EXT4_FC_REPLAY_REALLOC_INCREMENT),
+ GFP_KERNEL);
+ if (!fc_modified_inodes)
+ return -ENOMEM;
+ state->fc_modified_inodes = fc_modified_inodes;
state->fc_modified_inodes_size +=
EXT4_FC_REPLAY_REALLOC_INCREMENT;
- state->fc_modified_inodes = krealloc(
- state->fc_modified_inodes, sizeof(int) *
- state->fc_modified_inodes_size,
- GFP_KERNEL);
- if (!state->fc_modified_inodes)
- return -ENOMEM;
}
state->fc_modified_inodes[state->fc_modified_inodes_used++] = ino;
return 0;
@@ -1451,16 +1550,17 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
/*
* Inode replay function
*/
-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
- u8 *val)
+static int ext4_fc_replay_inode(struct super_block *sb,
+ struct ext4_fc_tl_mem *tl, u8 *val)
{
struct ext4_fc_inode fc_inode;
struct ext4_inode *raw_inode;
struct ext4_inode *raw_fc_inode;
struct inode *inode = NULL;
struct ext4_iloc iloc;
- int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag);
+ int inode_len, ino, ret, tag = tl->fc_tag;
struct ext4_extent_header *eh;
+ size_t off_gen = offsetof(struct ext4_inode, i_generation);
memcpy(&fc_inode, val, sizeof(fc_inode));
@@ -1474,7 +1574,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
}
inode = NULL;
- ext4_fc_record_modified_inode(sb, ino);
+ ret = ext4_fc_record_modified_inode(sb, ino);
+ if (ret)
+ goto out;
raw_fc_inode = (struct ext4_inode *)
(val + offsetof(struct ext4_fc_inode, fc_raw_inode));
@@ -1482,12 +1584,12 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
if (ret)
goto out;
- inode_len = le16_to_cpu(tl->fc_len) - sizeof(struct ext4_fc_inode);
+ inode_len = tl->fc_len - sizeof(struct ext4_fc_inode);
raw_inode = ext4_raw_inode(&iloc);
memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block));
- memcpy(&raw_inode->i_generation, &raw_fc_inode->i_generation,
- inode_len - offsetof(struct ext4_inode, i_generation));
+ memcpy((u8 *)raw_inode + off_gen, (u8 *)raw_fc_inode + off_gen,
+ inode_len - off_gen);
if (le32_to_cpu(raw_inode->i_flags) & EXT4_EXTENTS_FL) {
eh = (struct ext4_extent_header *)(&raw_inode->i_block[0]);
if (eh->eh_magic != EXT4_EXT_MAGIC) {
@@ -1517,7 +1619,7 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
/* Given that we just wrote the inode on disk, this SHOULD succeed. */
inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode not found.");
+ ext4_debug("Inode not found.");
return -EFSCORRUPTED;
}
@@ -1551,8 +1653,8 @@ out:
* inode for which we are trying to create a dentry here, should already have
* been replayed before we start here.
*/
-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
- u8 *val)
+static int ext4_fc_replay_create(struct super_block *sb,
+ struct ext4_fc_tl_mem *tl, u8 *val)
{
int ret = 0;
struct inode *inode = NULL;
@@ -1571,7 +1673,7 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "inode %d not found.", darg.ino);
+ ext4_debug("inode %d not found.", darg.ino);
inode = NULL;
ret = -EINVAL;
goto out;
@@ -1584,7 +1686,7 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
*/
dir = ext4_iget(sb, darg.parent_ino, EXT4_IGET_NORMAL);
if (IS_ERR(dir)) {
- jbd_debug(1, "Dir %d not found.", darg.ino);
+ ext4_debug("Dir %d not found.", darg.ino);
goto out;
}
ret = ext4_init_new_dir(NULL, dir, inode);
@@ -1600,32 +1702,41 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
set_nlink(inode, 1);
ext4_mark_inode_dirty(NULL, inode);
out:
- if (inode)
- iput(inode);
+ iput(inode);
return ret;
}
/*
- * Record physical disk regions which are in use as per fast commit area. Our
- * simple replay phase allocator excludes these regions from allocation.
+ * Record physical disk regions which are in use as per fast commit area,
+ * and used by inodes during replay phase. Our simple replay phase
+ * allocator excludes these regions from allocation.
*/
-static int ext4_fc_record_regions(struct super_block *sb, int ino,
- ext4_lblk_t lblk, ext4_fsblk_t pblk, int len)
+int ext4_fc_record_regions(struct super_block *sb, int ino,
+ ext4_lblk_t lblk, ext4_fsblk_t pblk, int len, int replay)
{
struct ext4_fc_replay_state *state;
struct ext4_fc_alloc_region *region;
state = &EXT4_SB(sb)->s_fc_replay_state;
+ /*
+ * during replay phase, the fc_regions_valid may not same as
+ * fc_regions_used, update it when do new additions.
+ */
+ if (replay && state->fc_regions_used != state->fc_regions_valid)
+ state->fc_regions_used = state->fc_regions_valid;
if (state->fc_regions_used == state->fc_regions_size) {
+ struct ext4_fc_alloc_region *fc_regions;
+
+ fc_regions = krealloc(state->fc_regions,
+ sizeof(struct ext4_fc_alloc_region) *
+ (state->fc_regions_size +
+ EXT4_FC_REPLAY_REALLOC_INCREMENT),
+ GFP_KERNEL);
+ if (!fc_regions)
+ return -ENOMEM;
state->fc_regions_size +=
EXT4_FC_REPLAY_REALLOC_INCREMENT;
- state->fc_regions = krealloc(
- state->fc_regions,
- state->fc_regions_size *
- sizeof(struct ext4_fc_alloc_region),
- GFP_KERNEL);
- if (!state->fc_regions)
- return -ENOMEM;
+ state->fc_regions = fc_regions;
}
region = &state->fc_regions[state->fc_regions_used++];
region->ino = ino;
@@ -1633,12 +1744,15 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino,
region->pblk = pblk;
region->len = len;
+ if (replay)
+ state->fc_regions_valid++;
+
return 0;
}
/* Replay add range tag */
static int ext4_fc_replay_add_range(struct super_block *sb,
- struct ext4_fc_tl *tl, u8 *val)
+ struct ext4_fc_tl_mem *tl, u8 *val)
{
struct ext4_fc_add_range fc_add_ex;
struct ext4_extent newex, *ex;
@@ -1659,11 +1773,13 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode not found.");
+ ext4_debug("Inode not found.");
return 0;
}
ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
+ if (ret)
+ goto out;
start = le32_to_cpu(ex->ee_block);
start_pblk = ext4_ext_pblock(ex);
@@ -1671,7 +1787,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
cur = start;
remaining = len;
- jbd_debug(1, "ADD_RANGE, lblk %d, pblk %lld, len %d, unwritten %d, inode %ld\n",
+ ext4_debug("ADD_RANGE, lblk %d, pblk %lld, len %d, unwritten %d, inode %ld\n",
start, start_pblk, len, ext4_ext_is_unwritten(ex),
inode->i_ino);
@@ -1681,18 +1797,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
map.m_pblk = 0;
ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0) {
- iput(inode);
- return 0;
- }
+ if (ret < 0)
+ goto out;
if (ret == 0) {
/* Range is not mapped */
- path = ext4_find_extent(inode, cur, NULL, 0);
- if (IS_ERR(path)) {
- iput(inode);
- return 0;
- }
+ path = ext4_find_extent(inode, cur, path, 0);
+ if (IS_ERR(path))
+ goto out;
memset(&newex, 0, sizeof(newex));
newex.ee_block = cpu_to_le32(cur);
ext4_ext_store_pblock(
@@ -1701,15 +1813,11 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
if (ext4_ext_is_unwritten(ex))
ext4_ext_mark_unwritten(&newex);
down_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_ext_insert_extent(
- NULL, inode, &path, &newex, 0);
+ path = ext4_ext_insert_extent(NULL, inode,
+ path, &newex, 0);
up_write((&EXT4_I(inode)->i_data_sem));
- ext4_ext_drop_refs(path);
- kfree(path);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (IS_ERR(path))
+ goto out;
goto next;
}
@@ -1722,10 +1830,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
ext4_ext_is_unwritten(ex),
start_pblk + cur - start);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
/*
* Mark the old blocks as free since they aren't used
* anymore. We maintain an array of all the modified
@@ -1735,20 +1841,18 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
* at the end of the FC replay using our array of
* modified inodes.
*/
- ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
+ ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false);
goto next;
}
/* Range is mapped and needs a state change */
- jbd_debug(1, "Converting from %ld to %d %lld",
+ ext4_debug("Converting from %ld to %d %lld",
map.m_flags & EXT4_MAP_UNWRITTEN,
ext4_ext_is_unwritten(ex), map.m_pblk);
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
ext4_ext_is_unwritten(ex), map.m_pblk);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
/*
* We may have split the extent tree while toggling the state.
* Try to shrink the extent tree now.
@@ -1760,14 +1864,16 @@ next:
}
ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >>
sb->s_blocksize_bits);
+out:
+ ext4_free_ext_path(path);
iput(inode);
return 0;
}
/* Replay DEL_RANGE tag */
static int
-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
- u8 *val)
+ext4_fc_replay_del_range(struct super_block *sb,
+ struct ext4_fc_tl_mem *tl, u8 *val)
{
struct inode *inode;
struct ext4_fc_del_range lrange;
@@ -1784,13 +1890,15 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino));
+ ext4_debug("Inode %d not found", le32_to_cpu(lrange.fc_ino));
return 0;
}
ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
+ if (ret)
+ goto out;
- jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
+ ext4_debug("DEL_RANGE, inode %ld, lblk %d, len %d\n",
inode->i_ino, le32_to_cpu(lrange.fc_lblk),
le32_to_cpu(lrange.fc_len));
while (remaining > 0) {
@@ -1798,30 +1906,30 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
map.m_len = remaining;
ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0) {
- iput(inode);
- return 0;
- }
+ if (ret < 0)
+ goto out;
if (ret > 0) {
remaining -= ret;
cur += ret;
- ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
+ ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false);
} else {
remaining -= map.m_len;
cur += map.m_len;
}
}
- ret = ext4_punch_hole(inode,
- le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits,
- le32_to_cpu(lrange.fc_len) << sb->s_blocksize_bits);
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ret = ext4_ext_remove_space(inode, le32_to_cpu(lrange.fc_lblk),
+ le32_to_cpu(lrange.fc_lblk) +
+ le32_to_cpu(lrange.fc_len) - 1);
+ up_write(&EXT4_I(inode)->i_data_sem);
if (ret)
- jbd_debug(1, "ext4_punch_hole returned %d", ret);
+ goto out;
ext4_ext_replay_shrink_inode(inode,
i_size_read(inode) >> sb->s_blocksize_bits);
ext4_mark_inode_dirty(NULL, inode);
+out:
iput(inode);
-
return 0;
}
@@ -1839,7 +1947,7 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
inode = ext4_iget(sb, state->fc_modified_inodes[i],
EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode %d not found.",
+ ext4_debug("Inode %d not found.",
state->fc_modified_inodes[i]);
continue;
}
@@ -1858,23 +1966,25 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
break;
if (ret > 0) {
- path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
+ path = ext4_find_extent(inode, map.m_lblk, path, 0);
if (!IS_ERR(path)) {
for (j = 0; j < path->p_depth; j++)
ext4_mb_mark_bb(inode->i_sb,
- path[j].p_block, 1, 1);
- ext4_ext_drop_refs(path);
- kfree(path);
+ path[j].p_block, 1, true);
+ } else {
+ path = NULL;
}
cur += ret;
ext4_mb_mark_bb(inode->i_sb, map.m_pblk,
- map.m_len, 1);
+ map.m_len, true);
} else {
cur = cur + (map.m_len ? map.m_len : 1);
}
}
iput(inode);
}
+
+ ext4_free_ext_path(path);
}
/*
@@ -1892,8 +2002,8 @@ bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t blk)
if (state->fc_regions[i].ino == 0 ||
state->fc_regions[i].len == 0)
continue;
- if (blk >= state->fc_regions[i].pblk &&
- blk < state->fc_regions[i].pblk + state->fc_regions[i].len)
+ if (in_range(blk, state->fc_regions[i].pblk,
+ state->fc_regions[i].len))
return true;
}
return false;
@@ -1909,6 +2019,33 @@ void ext4_fc_replay_cleanup(struct super_block *sb)
kfree(sbi->s_fc_replay_state.fc_modified_inodes);
}
+static bool ext4_fc_value_len_isvalid(struct ext4_sb_info *sbi,
+ int tag, int len)
+{
+ switch (tag) {
+ case EXT4_FC_TAG_ADD_RANGE:
+ return len == sizeof(struct ext4_fc_add_range);
+ case EXT4_FC_TAG_DEL_RANGE:
+ return len == sizeof(struct ext4_fc_del_range);
+ case EXT4_FC_TAG_CREAT:
+ case EXT4_FC_TAG_LINK:
+ case EXT4_FC_TAG_UNLINK:
+ len -= sizeof(struct ext4_fc_dentry_info);
+ return len >= 1 && len <= EXT4_NAME_LEN;
+ case EXT4_FC_TAG_INODE:
+ len -= sizeof(struct ext4_fc_inode);
+ return len >= EXT4_GOOD_OLD_INODE_SIZE &&
+ len <= sbi->s_inode_size;
+ case EXT4_FC_TAG_PAD:
+ return true; /* padding can have any length */
+ case EXT4_FC_TAG_TAIL:
+ return len >= sizeof(struct ext4_fc_tail);
+ case EXT4_FC_TAG_HEAD:
+ return len == sizeof(struct ext4_fc_head);
+ }
+ return false;
+}
+
/*
* Recovery Scan phase handler
*
@@ -1935,7 +2072,7 @@ static int ext4_fc_replay_scan(journal_t *journal,
struct ext4_fc_replay_state *state;
int ret = JBD2_FC_REPLAY_CONTINUE;
struct ext4_fc_add_range ext;
- struct ext4_fc_tl tl;
+ struct ext4_fc_tl_mem tl;
struct ext4_fc_tail tail;
__u8 *start, *end, *cur, *val;
struct ext4_fc_head head;
@@ -1944,7 +2081,7 @@ static int ext4_fc_replay_scan(journal_t *journal,
state = &sbi->s_fc_replay_state;
start = (u8 *)bh->b_data;
- end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
+ end = start + journal->j_blocksize;
if (state->fc_replay_expected_off == 0) {
state->fc_cur_tag = 0;
@@ -1965,19 +2102,26 @@ static int ext4_fc_replay_scan(journal_t *journal,
}
state->fc_replay_expected_off++;
- for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
- memcpy(&tl, cur, sizeof(tl));
- val = cur + sizeof(tl);
- jbd_debug(3, "Scan phase, tag:%s, blk %lld\n",
- tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr);
- switch (le16_to_cpu(tl.fc_tag)) {
+ for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN;
+ cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) {
+ ext4_fc_get_tl(&tl, cur);
+ val = cur + EXT4_FC_TAG_BASE_LEN;
+ if (tl.fc_len > end - val ||
+ !ext4_fc_value_len_isvalid(sbi, tl.fc_tag, tl.fc_len)) {
+ ret = state->fc_replay_num_tags ?
+ JBD2_FC_REPLAY_STOP : -ECANCELED;
+ goto out_err;
+ }
+ ext4_debug("Scan phase, tag:%s, blk %lld\n",
+ tag2str(tl.fc_tag), bh->b_blocknr);
+ switch (tl.fc_tag) {
case EXT4_FC_TAG_ADD_RANGE:
memcpy(&ext, val, sizeof(ext));
ex = (struct ext4_extent *)&ext.fc_ex;
ret = ext4_fc_record_regions(sb,
le32_to_cpu(ext.fc_ino),
le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
- ext4_ext_get_actual_len(ex));
+ ext4_ext_get_actual_len(ex), 0);
if (ret < 0)
break;
ret = JBD2_FC_REPLAY_CONTINUE;
@@ -1989,14 +2133,14 @@ static int ext4_fc_replay_scan(journal_t *journal,
case EXT4_FC_TAG_INODE:
case EXT4_FC_TAG_PAD:
state->fc_cur_tag++;
- state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
- sizeof(tl) + le16_to_cpu(tl.fc_len));
+ state->fc_crc = ext4_chksum(state->fc_crc, cur,
+ EXT4_FC_TAG_BASE_LEN + tl.fc_len);
break;
case EXT4_FC_TAG_TAIL:
state->fc_cur_tag++;
memcpy(&tail, val, sizeof(tail));
- state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
- sizeof(tl) +
+ state->fc_crc = ext4_chksum(state->fc_crc, cur,
+ EXT4_FC_TAG_BASE_LEN +
offsetof(struct ext4_fc_tail,
fc_crc));
if (le32_to_cpu(tail.fc_tid) == expected_tid &&
@@ -2022,8 +2166,8 @@ static int ext4_fc_replay_scan(journal_t *journal,
break;
}
state->fc_cur_tag++;
- state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
- sizeof(tl) + le16_to_cpu(tl.fc_len));
+ state->fc_crc = ext4_chksum(state->fc_crc, cur,
+ EXT4_FC_TAG_BASE_LEN + tl.fc_len);
break;
default:
ret = state->fc_replay_num_tags ?
@@ -2047,7 +2191,7 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
{
struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_fc_tl tl;
+ struct ext4_fc_tl_mem tl;
__u8 *start, *end, *cur, *val;
int ret = JBD2_FC_REPLAY_CONTINUE;
struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
@@ -2063,7 +2207,7 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
sbi->s_mount_state |= EXT4_FC_REPLAY;
}
if (!sbi->s_fc_replay_state.fc_replay_num_tags) {
- jbd_debug(1, "Replay stops\n");
+ ext4_debug("Replay stops\n");
ext4_fc_set_bitmaps_and_counters(sb);
return 0;
}
@@ -2076,21 +2220,22 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
#endif
start = (u8 *)bh->b_data;
- end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
+ end = start + journal->j_blocksize;
- for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
- memcpy(&tl, cur, sizeof(tl));
- val = cur + sizeof(tl);
+ for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN;
+ cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) {
+ ext4_fc_get_tl(&tl, cur);
+ val = cur + EXT4_FC_TAG_BASE_LEN;
if (state->fc_replay_num_tags == 0) {
ret = JBD2_FC_REPLAY_STOP;
ext4_fc_set_bitmaps_and_counters(sb);
break;
}
- jbd_debug(3, "Replay phase, tag:%s\n",
- tag2str(le16_to_cpu(tl.fc_tag)));
+
+ ext4_debug("Replay phase, tag:%s\n", tag2str(tl.fc_tag));
state->fc_replay_num_tags--;
- switch (le16_to_cpu(tl.fc_tag)) {
+ switch (tl.fc_tag) {
case EXT4_FC_TAG_LINK:
ret = ext4_fc_replay_link(sb, &tl, val);
break;
@@ -2111,19 +2256,18 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
break;
case EXT4_FC_TAG_PAD:
trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0,
- le16_to_cpu(tl.fc_len), 0);
+ tl.fc_len, 0);
break;
case EXT4_FC_TAG_TAIL:
- trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0,
- le16_to_cpu(tl.fc_len), 0);
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL,
+ 0, tl.fc_len, 0);
memcpy(&tail, val, sizeof(tail));
WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid);
break;
case EXT4_FC_TAG_HEAD:
break;
default:
- trace_ext4_fc_replay(sb, le16_to_cpu(tl.fc_tag), 0,
- le16_to_cpu(tl.fc_len), 0);
+ trace_ext4_fc_replay(sb, tl.fc_tag, 0, tl.fc_len, 0);
ret = -ECANCELED;
break;
}
@@ -2147,17 +2291,17 @@ void ext4_fc_init(struct super_block *sb, journal_t *journal)
journal->j_fc_cleanup_callback = ext4_fc_cleanup;
}
-static const char *fc_ineligible_reasons[] = {
- "Extended attributes changed",
- "Cross rename",
- "Journal flag changed",
- "Insufficient memory",
- "Swap boot",
- "Resize",
- "Dir renamed",
- "Falloc range op",
- "Data journalling",
- "FC Commit Failed"
+static const char * const fc_ineligible_reasons[] = {
+ [EXT4_FC_REASON_XATTR] = "Extended attributes changed",
+ [EXT4_FC_REASON_CROSS_RENAME] = "Cross rename",
+ [EXT4_FC_REASON_JOURNAL_FLAG_CHANGE] = "Journal flag changed",
+ [EXT4_FC_REASON_NOMEM] = "Insufficient memory",
+ [EXT4_FC_REASON_SWAP_BOOT] = "Swap boot",
+ [EXT4_FC_REASON_RESIZE] = "Resize",
+ [EXT4_FC_REASON_RENAME_DIR] = "Dir renamed",
+ [EXT4_FC_REASON_FALLOC_RANGE] = "Falloc range op",
+ [EXT4_FC_REASON_INODE_JOURNAL_DATA] = "Data journalling",
+ [EXT4_FC_REASON_ENCRYPTED_FILENAME] = "Encrypted filename",
};
int ext4_fc_info_show(struct seq_file *seq, void *v)
@@ -2173,7 +2317,7 @@ int ext4_fc_info_show(struct seq_file *seq, void *v)
"fc stats:\n%ld commits\n%ld ineligible\n%ld numblks\n%lluus avg_commit_time\n",
stats->fc_num_commits, stats->fc_ineligible_commits,
stats->fc_numblks,
- div_u64(sbi->s_fc_avg_commit_time, 1000));
+ div_u64(stats->s_fc_avg_commit_time, 1000));
seq_puts(seq, "Ineligible reasons:\n");
for (i = 0; i < EXT4_FC_REASON_MAX; i++)
seq_printf(seq, "\"%s\":\t%d\n", fc_ineligible_reasons[i],
@@ -2192,3 +2336,8 @@ int __init ext4_fc_init_dentry_cache(void)
return 0;
}
+
+void ext4_fc_destroy_dentry_cache(void)
+{
+ kmem_cache_destroy(ext4_fc_dentry_cachep);
+}
diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h
index 937c381b4c85..3bd534e4dbbf 100644
--- a/fs/ext4/fast_commit.h
+++ b/fs/ext4/fast_commit.h
@@ -55,13 +55,13 @@ struct ext4_fc_del_range {
struct ext4_fc_dentry_info {
__le32 fc_parent_ino;
__le32 fc_ino;
- __u8 fc_dname[0];
+ __u8 fc_dname[];
};
-/* Value structure for EXT4_FC_TAG_INODE and EXT4_FC_TAG_INODE_PARTIAL. */
+/* Value structure for EXT4_FC_TAG_INODE. */
struct ext4_fc_inode {
__le32 fc_ino;
- __u8 fc_raw_inode[0];
+ __u8 fc_raw_inode[];
};
/* Value structure for tag EXT4_FC_TAG_TAIL. */
@@ -70,22 +70,23 @@ struct ext4_fc_tail {
__le32 fc_crc;
};
+/* Tag base length */
+#define EXT4_FC_TAG_BASE_LEN (sizeof(struct ext4_fc_tl))
+
+/*
+ * Fast commit status codes
+ */
+enum {
+ EXT4_FC_STATUS_OK = 0,
+ EXT4_FC_STATUS_INELIGIBLE,
+ EXT4_FC_STATUS_SKIPPED,
+ EXT4_FC_STATUS_FAILED,
+};
+
/*
- * Fast commit reason codes
+ * Fast commit ineligiblity reasons:
*/
enum {
- /*
- * Commit status codes:
- */
- EXT4_FC_REASON_OK = 0,
- EXT4_FC_REASON_INELIGIBLE,
- EXT4_FC_REASON_ALREADY_COMMITTED,
- EXT4_FC_REASON_FC_START_FAILED,
- EXT4_FC_REASON_FC_FAILED,
-
- /*
- * Fast commit ineligiblity reasons:
- */
EXT4_FC_REASON_XATTR = 0,
EXT4_FC_REASON_CROSS_RENAME,
EXT4_FC_REASON_JOURNAL_FLAG_CHANGE,
@@ -95,7 +96,7 @@ enum {
EXT4_FC_REASON_RENAME_DIR,
EXT4_FC_REASON_FALLOC_RANGE,
EXT4_FC_REASON_INODE_JOURNAL_DATA,
- EXT4_FC_COMMIT_FAILED,
+ EXT4_FC_REASON_ENCRYPTED_FILENAME,
EXT4_FC_REASON_MAX
};
@@ -108,16 +109,19 @@ struct ext4_fc_dentry_update {
int fcd_op; /* Type of update create / unlink / link */
int fcd_parent; /* Parent inode number */
int fcd_ino; /* Inode number */
- struct qstr fcd_name; /* Dirent name */
- unsigned char fcd_iname[DNAME_INLINE_LEN]; /* Dirent name string */
+ struct name_snapshot fcd_name; /* Dirent name */
struct list_head fcd_list;
+ struct list_head fcd_dilist;
};
struct ext4_fc_stats {
unsigned int fc_ineligible_reason_count[EXT4_FC_REASON_MAX];
unsigned long fc_num_commits;
unsigned long fc_ineligible_commits;
+ unsigned long fc_failed_commits;
+ unsigned long fc_skipped_commits;
unsigned long fc_numblks;
+ u64 s_fc_avg_commit_time;
};
#define EXT4_FC_REPLAY_REALLOC_INCREMENT 4
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 4c5f41052351..7a8b30932189 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -36,17 +36,34 @@
#include "acl.h"
#include "truncate.h"
-static bool ext4_dio_supported(struct inode *inode)
+/*
+ * Returns %true if the given DIO request should be attempted with DIO, or
+ * %false if it should fall back to buffered I/O.
+ *
+ * DIO isn't well specified; when it's unsupported (either due to the request
+ * being misaligned, or due to the file not supporting DIO at all), filesystems
+ * either fall back to buffered I/O or return EINVAL. For files that don't use
+ * any special features like encryption or verity, ext4 has traditionally
+ * returned EINVAL for misaligned DIO. iomap_dio_rw() uses this convention too.
+ * In this case, we should attempt the DIO, *not* fall back to buffered I/O.
+ *
+ * In contrast, in cases where DIO is unsupported due to ext4 features, ext4
+ * traditionally falls back to buffered I/O.
+ *
+ * This function implements the traditional ext4 behavior in all these cases.
+ */
+static bool ext4_should_use_dio(struct kiocb *iocb, struct iov_iter *iter)
{
- if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
- return false;
- if (fsverity_active(inode))
- return false;
- if (ext4_should_journal_data(inode))
- return false;
- if (ext4_has_inline_data(inode))
+ struct inode *inode = file_inode(iocb->ki_filp);
+ u32 dio_align = ext4_dio_alignment(inode);
+
+ if (dio_align == 0)
return false;
- return true;
+
+ if (dio_align == 1)
+ return true;
+
+ return IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), dio_align);
}
static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -61,7 +78,7 @@ static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
inode_lock_shared(inode);
}
- if (!ext4_dio_supported(inode)) {
+ if (!ext4_should_use_dio(iocb, to)) {
inode_unlock_shared(inode);
/*
* Fallback to buffered I/O if the operation being performed on
@@ -74,7 +91,7 @@ static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
return generic_file_read_iter(iocb, to);
}
- ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0, 0);
+ ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0, NULL, 0);
inode_unlock_shared(inode);
file_accessed(iocb->ki_filp);
@@ -114,7 +131,7 @@ static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct inode *inode = file_inode(iocb->ki_filp);
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ if (unlikely(ext4_forced_shutdown(inode->i_sb)))
return -EIO;
if (!iov_iter_count(to))
@@ -130,6 +147,17 @@ static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
return generic_file_read_iter(iocb, to);
}
+static ssize_t ext4_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ struct inode *inode = file_inode(in);
+
+ if (unlikely(ext4_forced_shutdown(inode->i_sb)))
+ return -EIO;
+ return filemap_splice_read(in, ppos, pipe, len, flags);
+}
+
/*
* Called when an inode is released. Note that this is different
* from ext4_file_open: open gets called at every open, but release
@@ -146,7 +174,7 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
(atomic_read(&inode->i_writecount) == 1) &&
!EXT4_I(inode)->i_reserved_data_blocks) {
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
up_write(&EXT4_I(inode)->i_data_sem);
}
if (is_dx(inode) && filp->private_data)
@@ -185,8 +213,9 @@ ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
return false;
}
-/* Is IO overwriting allocated and initialized blocks? */
-static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
+/* Is IO overwriting allocated or initialized blocks? */
+static bool ext4_overwrite_io(struct inode *inode,
+ loff_t pos, loff_t len, bool *unwritten)
{
struct ext4_map_blocks map;
unsigned int blkbits = inode->i_blkbits;
@@ -200,12 +229,15 @@ static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
blklen = map.m_len;
err = ext4_map_blocks(NULL, inode, &map, 0);
+ if (err != blklen)
+ return false;
/*
* 'err==len' means that all of the blocks have been preallocated,
- * regardless of whether they have been initialized or not. To exclude
- * unwritten extents, we need to check m_flags.
+ * regardless of whether they have been initialized or not. We need to
+ * check m_flags to distinguish the unwritten extents.
*/
- return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
+ *unwritten = !(map.m_flags & EXT4_MAP_MAPPED);
+ return true;
}
static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
@@ -259,102 +291,53 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
if (iocb->ki_flags & IOCB_NOWAIT)
return -EOPNOTSUPP;
- ext4_fc_start_update(inode);
inode_lock(inode);
ret = ext4_write_checks(iocb, from);
if (ret <= 0)
goto out;
- current->backing_dev_info = inode_to_bdi(inode);
- ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
- current->backing_dev_info = NULL;
+ ret = generic_perform_write(iocb, from);
out:
inode_unlock(inode);
- ext4_fc_stop_update(inode);
- if (likely(ret > 0)) {
- iocb->ki_pos += ret;
- ret = generic_write_sync(iocb, ret);
- }
-
- return ret;
+ if (unlikely(ret <= 0))
+ return ret;
+ return generic_write_sync(iocb, ret);
}
static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
- ssize_t written, size_t count)
+ ssize_t written, ssize_t count)
{
handle_t *handle;
- bool truncate = false;
- u8 blkbits = inode->i_blkbits;
- ext4_lblk_t written_blk, end_blk;
- int ret;
-
- /*
- * Note that EXT4_I(inode)->i_disksize can get extended up to
- * inode->i_size while the I/O was running due to writeback of delalloc
- * blocks. But, the code in ext4_iomap_alloc() is careful to use
- * zeroed/unwritten extents if this is possible; thus we won't leave
- * uninitialized blocks in a file even if we didn't succeed in writing
- * as much as we intended.
- */
- WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
- if (offset + count <= EXT4_I(inode)->i_disksize) {
- /*
- * We need to ensure that the inode is removed from the orphan
- * list if it has been added prematurely, due to writeback of
- * delalloc blocks.
- */
- if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
-
- if (IS_ERR(handle)) {
- ext4_orphan_del(NULL, inode);
- return PTR_ERR(handle);
- }
-
- ext4_orphan_del(handle, inode);
- ext4_journal_stop(handle);
- }
-
- return written;
- }
-
- if (written < 0)
- goto truncate;
+ lockdep_assert_held_write(&inode->i_rwsem);
handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
- if (IS_ERR(handle)) {
- written = PTR_ERR(handle);
- goto truncate;
- }
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
if (ext4_update_inode_size(inode, offset + written)) {
- ret = ext4_mark_inode_dirty(handle, inode);
+ int ret = ext4_mark_inode_dirty(handle, inode);
if (unlikely(ret)) {
- written = ret;
ext4_journal_stop(handle);
- goto truncate;
+ return ret;
}
}
- /*
- * We may need to truncate allocated but not written blocks beyond EOF.
- */
- written_blk = ALIGN(offset + written, 1 << blkbits);
- end_blk = ALIGN(offset + count, 1 << blkbits);
- if (written_blk < end_blk && ext4_can_truncate(inode))
- truncate = true;
-
- /*
- * Remove the inode from the orphan list if it has been extended and
- * everything went OK.
- */
- if (!truncate && inode->i_nlink)
+ if ((written == count) && inode->i_nlink)
ext4_orphan_del(handle, inode);
ext4_journal_stop(handle);
- if (truncate) {
-truncate:
+ return written;
+}
+
+/*
+ * Clean up the inode after DIO or DAX extending write has completed and the
+ * inode size has been updated using ext4_handle_inode_extension().
+ */
+static void ext4_inode_extension_cleanup(struct inode *inode, bool need_trunc)
+{
+ lockdep_assert_held_write(&inode->i_rwsem);
+ if (need_trunc) {
ext4_truncate_failed_write(inode);
/*
* If the truncate operation failed early, then the inode may
@@ -363,9 +346,29 @@ truncate:
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
+ return;
}
+ /*
+ * If i_disksize got extended either due to writeback of delalloc
+ * blocks or extending truncate while the DIO was running we could fail
+ * to cleanup the orphan list in ext4_handle_inode_extension(). Do it
+ * now.
+ */
+ if (ext4_inode_orphan_tracked(inode) && inode->i_nlink) {
+ handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
- return written;
+ if (IS_ERR(handle)) {
+ /*
+ * The write has successfully completed. Not much to
+ * do with the error here so just cleanup the orphan
+ * list and hope for the best.
+ */
+ ext4_orphan_del(NULL, inode);
+ return;
+ }
+ ext4_orphan_del(handle, inode);
+ ext4_journal_stop(handle);
+ }
}
static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
@@ -374,31 +377,29 @@ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
loff_t pos = iocb->ki_pos;
struct inode *inode = file_inode(iocb->ki_filp);
- if (error)
- return error;
- if (size && flags & IOMAP_DIO_UNWRITTEN) {
+ if (!error && size && (flags & IOMAP_DIO_UNWRITTEN) &&
+ (iocb->ki_flags & IOCB_ATOMIC))
+ error = ext4_convert_unwritten_extents_atomic(NULL, inode, pos,
+ size);
+ else if (!error && size && flags & IOMAP_DIO_UNWRITTEN)
error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
- if (error < 0)
- return error;
- }
+ if (error)
+ return error;
/*
- * If we are extending the file, we have to update i_size here before
- * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
- * buffered reads could zero out too much from page cache pages. Update
- * of on-disk size will happen later in ext4_dio_write_iter() where
- * we have enough information to also perform orphan list handling etc.
- * Note that we perform all extending writes synchronously under
- * i_rwsem held exclusively so i_size update is safe here in that case.
- * If the write was not extending, we cannot see pos > i_size here
- * because operations reducing i_size like truncate wait for all
- * outstanding DIO before updating i_size.
+ * Note that EXT4_I(inode)->i_disksize can get extended up to
+ * inode->i_size while the I/O was running due to writeback of delalloc
+ * blocks. But the code in ext4_iomap_alloc() is careful to use
+ * zeroed/unwritten extents if this is possible; thus we won't leave
+ * uninitialized blocks in a file even if we didn't succeed in writing
+ * as much as we intended. Also we can race with truncate or write
+ * expanding the file so we have to be a bit careful here.
*/
- pos += size;
- if (pos > i_size_read(inode))
- i_size_write(inode, pos);
-
- return 0;
+ if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize) &&
+ pos + size <= i_size_read(inode))
+ return 0;
+ error = ext4_handle_inode_extension(inode, pos, size, size);
+ return error < 0 ? error : 0;
}
static const struct iomap_dio_ops ext4_dio_write_ops = {
@@ -416,17 +417,23 @@ static const struct iomap_dio_ops ext4_dio_write_ops = {
* - For extending writes case we don't take the shared lock, since it requires
* updating inode i_disksize and/or orphan handling with exclusive lock.
*
- * - shared locking will only be true mostly with overwrites. Otherwise we will
- * switch to exclusive i_rwsem lock.
+ * - shared locking will only be true mostly with overwrites, including
+ * initialized blocks and unwritten blocks. For overwrite unwritten blocks
+ * we protect splitting extents by i_data_sem in ext4_inode_info, so we can
+ * also release exclusive i_rwsem lock.
+ *
+ * - Otherwise we will switch to exclusive i_rwsem lock.
*/
static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
- bool *ilock_shared, bool *extend)
+ bool *ilock_shared, bool *extend,
+ bool *unwritten, int *dio_flags)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
loff_t offset;
size_t count;
ssize_t ret;
+ bool overwrite, unaligned_io;
restart:
ret = ext4_generic_write_checks(iocb, from);
@@ -435,16 +442,25 @@ restart:
offset = iocb->ki_pos;
count = ret;
- if (ext4_extending_io(inode, offset, count))
- *extend = true;
+
+ unaligned_io = ext4_unaligned_io(inode, from, offset);
+ *extend = ext4_extending_io(inode, offset, count);
+ overwrite = ext4_overwrite_io(inode, offset, count, unwritten);
+
/*
- * Determine whether the IO operation will overwrite allocated
- * and initialized blocks.
- * We need exclusive i_rwsem for changing security info
- * in file_modified().
+ * Determine whether we need to upgrade to an exclusive lock. This is
+ * required to change security info in file_modified(), for extending
+ * I/O, any form of non-overwrite I/O, and unaligned I/O to unwritten
+ * extents (as partial block zeroing may be required).
+ *
+ * Note that unaligned writes are allowed under shared lock so long as
+ * they are pure overwrites. Otherwise, concurrent unaligned writes risk
+ * data corruption due to partial block zeroing in the dio layer, and so
+ * the I/O must occur exclusively.
*/
- if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
- !ext4_overwrite_io(inode, offset, count))) {
+ if (*ilock_shared &&
+ ((!IS_NOSEC(inode) || *extend || !overwrite ||
+ (unaligned_io && *unwritten)))) {
if (iocb->ki_flags & IOCB_NOWAIT) {
ret = -EAGAIN;
goto out;
@@ -455,6 +471,23 @@ restart:
goto restart;
}
+ /*
+ * Now that locking is settled, determine dio flags and exclusivity
+ * requirements. We don't use DIO_OVERWRITE_ONLY because we enforce
+ * behavior already. The inode lock is already held exclusive if the
+ * write is non-overwrite or extending, so drain all outstanding dio and
+ * set the force wait dio flag.
+ */
+ if (!*ilock_shared && (unaligned_io || *extend)) {
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (unaligned_io && (!overwrite || *unwritten))
+ inode_dio_wait(inode);
+ *dio_flags = IOMAP_DIO_FORCE_WAIT;
+ }
+
ret = file_modified(file);
if (ret < 0)
goto out;
@@ -476,18 +509,11 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
loff_t offset = iocb->ki_pos;
size_t count = iov_iter_count(from);
const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
- bool extend = false, unaligned_io = false;
+ bool extend = false, unwritten = false;
bool ilock_shared = true;
+ int dio_flags = 0;
/*
- * We initially start with shared inode lock unless it is
- * unaligned IO which needs exclusive lock anyways.
- */
- if (ext4_unaligned_io(inode, from, offset)) {
- unaligned_io = true;
- ilock_shared = false;
- }
- /*
* Quick check here without any i_rwsem lock to see if it is extending
* IO. A more reliable check is done in ext4_dio_write_checks() with
* proper locking in place.
@@ -511,7 +537,7 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
}
/* Fallback to buffered I/O if the inode does not support direct I/O. */
- if (!ext4_dio_supported(inode)) {
+ if (!ext4_should_use_dio(iocb, from)) {
if (ilock_shared)
inode_unlock_shared(inode);
else
@@ -519,32 +545,23 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
return ext4_buffered_write_iter(iocb, from);
}
- ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
+ /*
+ * Prevent inline data from being created since we are going to allocate
+ * blocks for DIO. We know the inode does not currently have inline data
+ * because ext4_should_use_dio() checked for it, but we have to clear
+ * the state flag before the write checks because a lock cycle could
+ * introduce races with other writers.
+ */
+ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+
+ ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend,
+ &unwritten, &dio_flags);
if (ret <= 0)
return ret;
- /* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
- if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
- ret = -EAGAIN;
- goto out;
- }
-
offset = iocb->ki_pos;
count = ret;
- /*
- * Unaligned direct IO must be serialized among each other as zeroing
- * of partial blocks of two competing unaligned IOs can result in data
- * corruption.
- *
- * So we make sure we don't allow any unaligned IO in flight.
- * For IOs where we need not wait (like unaligned non-AIO DIO),
- * below inode_dio_wait() may anyway become a no-op, since we start
- * with exclusive lock.
- */
- if (unaligned_io)
- inode_dio_wait(inode);
-
if (extend) {
handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle)) {
@@ -552,27 +569,28 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
- ext4_fc_start_update(inode);
ret = ext4_orphan_add(handle, inode);
- ext4_fc_stop_update(inode);
- if (ret) {
- ext4_journal_stop(handle);
- goto out;
- }
-
ext4_journal_stop(handle);
+ if (ret)
+ goto out;
}
- if (ilock_shared)
+ if (ilock_shared && !unwritten)
iomap_ops = &ext4_iomap_overwrite_ops;
ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
- (unaligned_io || extend) ? IOMAP_DIO_FORCE_WAIT : 0,
- 0);
+ dio_flags, NULL, 0);
if (ret == -ENOTBLK)
ret = 0;
-
- if (extend)
- ret = ext4_handle_inode_extension(inode, offset, ret, count);
+ if (extend) {
+ /*
+ * We always perform extending DIO write synchronously so by
+ * now the IO is completed and ext4_handle_inode_extension()
+ * was called. Cleanup the inode in case of error or race with
+ * writeback of delalloc blocks.
+ */
+ WARN_ON_ONCE(ret == -EIOCBQUEUED);
+ ext4_inode_extension_cleanup(inode, ret < 0);
+ }
out:
if (ilock_shared)
@@ -584,6 +602,13 @@ out:
ssize_t err;
loff_t endbyte;
+ /*
+ * There is no support for atomic writes on buffered-io yet,
+ * we should never fallback to buffered-io for DIO atomic
+ * writes.
+ */
+ WARN_ON_ONCE(iocb->ki_flags & IOCB_ATOMIC);
+
offset = iocb->ki_pos;
err = ext4_buffered_write_iter(iocb, from);
if (err < 0)
@@ -653,8 +678,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
- if (extend)
+ if (extend) {
ret = ext4_handle_inode_extension(inode, offset, ret, count);
+ ext4_inode_extension_cleanup(inode, ret < (ssize_t)count);
+ }
out:
inode_unlock(inode);
if (ret > 0)
@@ -666,15 +693,30 @@ out:
static ssize_t
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
+ int ret;
struct inode *inode = file_inode(iocb->ki_filp);
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
- return -EIO;
+ ret = ext4_emergency_state(inode->i_sb);
+ if (unlikely(ret))
+ return ret;
#ifdef CONFIG_FS_DAX
if (IS_DAX(inode))
return ext4_dax_write_iter(iocb, from);
#endif
+
+ if (iocb->ki_flags & IOCB_ATOMIC) {
+ size_t len = iov_iter_count(from);
+
+ if (len < EXT4_SB(inode->i_sb)->s_awu_min ||
+ len > EXT4_SB(inode->i_sb)->s_awu_max)
+ return -EINVAL;
+
+ ret = generic_atomic_write_valid(iocb, from);
+ if (ret)
+ return ret;
+ }
+
if (iocb->ki_flags & IOCB_DIRECT)
return ext4_dio_write_iter(iocb, from);
else
@@ -682,8 +724,7 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
}
#ifdef CONFIG_FS_DAX
-static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size)
+static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
{
int error = 0;
vm_fault_t result;
@@ -699,14 +740,14 @@ static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
* read-only.
*
* We check for VM_SHARED rather than vmf->cow_page since the latter is
- * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
+ * unset for order != 0 (i.e. only in do_cow_fault); for
* other sizes, dax_iomap_fault will handle splitting / fallback so that
* we eventually come back with a COW page.
*/
bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
(vmf->vma->vm_flags & VM_SHARED);
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
- pfn_t pfn;
+ unsigned long pfn;
if (write) {
sb_start_pagefault(sb);
@@ -723,7 +764,7 @@ retry:
} else {
filemap_invalidate_lock_shared(mapping);
}
- result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
+ result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops);
if (write) {
ext4_journal_stop(handle);
@@ -732,7 +773,7 @@ retry:
goto retry;
/* Handling synchronous page fault? */
if (result & VM_FAULT_NEEDDSYNC)
- result = dax_finish_sync_fault(vmf, pe_size, pfn);
+ result = dax_finish_sync_fault(vmf, order, pfn);
filemap_invalidate_unlock_shared(mapping);
sb_end_pagefault(sb);
} else {
@@ -744,7 +785,7 @@ retry:
static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
{
- return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
+ return ext4_dax_huge_fault(vmf, 0);
}
static const struct vm_operations_struct ext4_dax_vm_ops = {
@@ -763,28 +804,33 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
.page_mkwrite = ext4_page_mkwrite,
};
-static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int ext4_file_mmap_prepare(struct vm_area_desc *desc)
{
+ int ret;
+ struct file *file = desc->file;
struct inode *inode = file->f_mapping->host;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct dax_device *dax_dev = sbi->s_daxdev;
+ struct dax_device *dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
- if (unlikely(ext4_forced_shutdown(sbi)))
- return -EIO;
+ if (file->f_mode & FMODE_WRITE)
+ ret = ext4_emergency_state(inode->i_sb);
+ else
+ ret = ext4_forced_shutdown(inode->i_sb) ? -EIO : 0;
+ if (unlikely(ret))
+ return ret;
/*
* We don't support synchronous mappings for non-DAX files and
* for DAX files if underneath dax_device is not synchronous.
*/
- if (!daxdev_mapping_supported(vma, dax_dev))
+ if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), dax_dev))
return -EOPNOTSUPP;
file_accessed(file);
if (IS_DAX(file_inode(file))) {
- vma->vm_ops = &ext4_dax_vm_ops;
- vma->vm_flags |= VM_HUGEPAGE;
+ desc->vm_ops = &ext4_dax_vm_ops;
+ desc->vm_flags |= VM_HUGEPAGE;
} else {
- vma->vm_ops = &ext4_file_vm_ops;
+ desc->vm_ops = &ext4_file_vm_ops;
}
return 0;
}
@@ -801,7 +847,8 @@ static int ext4_sample_last_mounted(struct super_block *sb,
if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED)))
return 0;
- if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
+ if (ext4_emergency_state(sb) || sb_rdonly(sb) ||
+ !sb_start_intwrite_trylock(sb))
return 0;
ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED);
@@ -829,8 +876,7 @@ static int ext4_sample_last_mounted(struct super_block *sb,
if (err)
goto out_journal;
lock_buffer(sbi->s_sbh);
- strncpy(sbi->s_es->s_last_mounted, cp,
- sizeof(sbi->s_es->s_last_mounted));
+ strtomem_pad(sbi->s_es->s_last_mounted, cp, 0);
ext4_superblock_csum_set(sb);
unlock_buffer(sbi->s_sbh);
ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
@@ -845,8 +891,12 @@ static int ext4_file_open(struct inode *inode, struct file *filp)
{
int ret;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
- return -EIO;
+ if (filp->f_mode & FMODE_WRITE)
+ ret = ext4_emergency_state(inode->i_sb);
+ else
+ ret = ext4_forced_shutdown(inode->i_sb) ? -EIO : 0;
+ if (unlikely(ret))
+ return ret;
ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
if (ret)
@@ -870,7 +920,10 @@ static int ext4_file_open(struct inode *inode, struct file *filp)
return ret;
}
- filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
+ if (ext4_inode_can_atomic_write(inode))
+ filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
+
+ filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
return dquot_file_open(inode, filp);
}
@@ -882,12 +935,7 @@ static int ext4_file_open(struct inode *inode, struct file *filp)
loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- loff_t maxbytes;
-
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
- else
- maxbytes = inode->i_sb->s_maxbytes;
+ loff_t maxbytes = ext4_get_maxbytes(inode);
switch (whence) {
default:
@@ -921,22 +969,24 @@ const struct file_operations ext4_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl,
#endif
- .mmap = ext4_file_mmap,
- .mmap_supported_flags = MAP_SYNC,
+ .mmap_prepare = ext4_file_mmap_prepare,
.open = ext4_file_open,
.release = ext4_release_file,
.fsync = ext4_sync_file,
.get_unmapped_area = thp_get_unmapped_area,
- .splice_read = generic_file_splice_read,
+ .splice_read = ext4_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = ext4_fallocate,
+ .fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC |
+ FOP_DIO_PARALLEL_WRITE |
+ FOP_DONTCACHE,
};
const struct inode_operations ext4_file_inode_operations = {
.setattr = ext4_setattr,
.getattr = ext4_file_getattr,
.listxattr = ext4_listxattr,
- .get_acl = ext4_get_acl,
+ .get_inode_acl = ext4_get_acl,
.set_acl = ext4_set_acl,
.fiemap = ext4_fiemap,
.fileattr_get = ext4_fileattr_get,
diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
index 4493ef0c715e..22fc333244ef 100644
--- a/fs/ext4/fsmap.c
+++ b/fs/ext4/fsmap.c
@@ -74,7 +74,8 @@ static int ext4_getfsmap_dev_compare(const void *p1, const void *p2)
static bool ext4_getfsmap_rec_before_low_key(struct ext4_getfsmap_info *info,
struct ext4_fsmap *rec)
{
- return rec->fmr_physical < info->gfi_low.fmr_physical;
+ return rec->fmr_physical + rec->fmr_length <=
+ info->gfi_low.fmr_physical;
}
/*
@@ -185,6 +186,59 @@ static inline ext4_fsblk_t ext4_fsmap_next_pblk(struct ext4_fsmap *fmr)
return fmr->fmr_physical + fmr->fmr_length;
}
+static int ext4_getfsmap_meta_helper(struct super_block *sb,
+ ext4_group_t agno, ext4_grpblk_t start,
+ ext4_grpblk_t len, void *priv)
+{
+ struct ext4_getfsmap_info *info = priv;
+ struct ext4_fsmap *p;
+ struct ext4_fsmap *tmp;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_fsblk_t fsb, fs_start, fs_end;
+ int error;
+
+ fs_start = fsb = (EXT4_C2B(sbi, start) +
+ ext4_group_first_block_no(sb, agno));
+ fs_end = fs_start + EXT4_C2B(sbi, len);
+
+ /*
+ * Return relevant extents from the meta_list. We emit all extents that
+ * partially/fully overlap with the query range
+ */
+ list_for_each_entry_safe(p, tmp, &info->gfi_meta_list, fmr_list) {
+ if (p->fmr_physical + p->fmr_length <= info->gfi_next_fsblk) {
+ list_del(&p->fmr_list);
+ kfree(p);
+ continue;
+ }
+ if (p->fmr_physical <= fs_end &&
+ p->fmr_physical + p->fmr_length > fs_start) {
+ /* Emit the retained free extent record if present */
+ if (info->gfi_lastfree.fmr_owner) {
+ error = ext4_getfsmap_helper(sb, info,
+ &info->gfi_lastfree);
+ if (error)
+ return error;
+ info->gfi_lastfree.fmr_owner = 0;
+ }
+ error = ext4_getfsmap_helper(sb, info, p);
+ if (error)
+ return error;
+ fsb = p->fmr_physical + p->fmr_length;
+ if (info->gfi_next_fsblk < fsb)
+ info->gfi_next_fsblk = fsb;
+ list_del(&p->fmr_list);
+ kfree(p);
+ continue;
+ }
+ }
+ if (info->gfi_next_fsblk < fsb)
+ info->gfi_next_fsblk = fsb;
+
+ return 0;
+}
+
+
/* Transform a blockgroup's free record into a fsmap */
static int ext4_getfsmap_datadev_helper(struct super_block *sb,
ext4_group_t agno, ext4_grpblk_t start,
@@ -343,6 +397,14 @@ static unsigned int ext4_getfsmap_find_sb(struct super_block *sb,
/* Reserved GDT blocks */
if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg) {
len = le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
+
+ /*
+ * mkfs.ext4 can set s_reserved_gdt_blocks as 0 in some cases,
+ * check for that.
+ */
+ if (!len)
+ return 0;
+
error = ext4_getfsmap_fill(meta_list, fsb, len,
EXT4_FMR_OWN_RESV_GDT);
if (error)
@@ -476,6 +538,7 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
ext4_group_t end_ag;
ext4_grpblk_t first_cluster;
ext4_grpblk_t last_cluster;
+ struct ext4_fsmap irec;
int error = 0;
bofs = le32_to_cpu(sbi->s_es->s_first_data_block);
@@ -486,6 +549,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
keys[0].fmr_physical = bofs;
if (keys[1].fmr_physical >= eofs)
keys[1].fmr_physical = eofs - 1;
+ if (keys[1].fmr_physical < keys[0].fmr_physical)
+ return 0;
start_fsb = keys[0].fmr_physical;
end_fsb = keys[1].fmr_physical;
@@ -537,6 +602,7 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
error = ext4_mballoc_query_range(sb, info->gfi_agno,
EXT4_B2C(sbi, info->gfi_low.fmr_physical),
EXT4_B2C(sbi, info->gfi_high.fmr_physical),
+ ext4_getfsmap_meta_helper,
ext4_getfsmap_datadev_helper, info);
if (error)
goto err;
@@ -556,9 +622,18 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
goto err;
}
- /* Report any gaps at the end of the bg */
+ /*
+ * The dummy record below will cause ext4_getfsmap_helper() to report
+ * any allocated blocks at the end of the range.
+ */
+ irec.fmr_device = 0;
+ irec.fmr_physical = end_fsb + 1;
+ irec.fmr_length = 0;
+ irec.fmr_owner = EXT4_FMR_OWN_FREE;
+ irec.fmr_flags = 0;
+
info->gfi_last = true;
- error = ext4_getfsmap_datadev_helper(sb, end_ag, last_cluster, 0, info);
+ error = ext4_getfsmap_helper(sb, info, &irec);
if (error)
goto err;
@@ -574,8 +649,9 @@ static bool ext4_getfsmap_is_valid_device(struct super_block *sb,
if (fm->fmr_device == 0 || fm->fmr_device == UINT_MAX ||
fm->fmr_device == new_encode_dev(sb->s_bdev->bd_dev))
return true;
- if (EXT4_SB(sb)->s_journal_bdev &&
- fm->fmr_device == new_encode_dev(EXT4_SB(sb)->s_journal_bdev->bd_dev))
+ if (EXT4_SB(sb)->s_journal_bdev_file &&
+ fm->fmr_device ==
+ new_encode_dev(file_bdev(EXT4_SB(sb)->s_journal_bdev_file)->bd_dev))
return true;
return false;
}
@@ -645,9 +721,9 @@ int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head,
memset(handlers, 0, sizeof(handlers));
handlers[0].gfd_dev = new_encode_dev(sb->s_bdev->bd_dev);
handlers[0].gfd_fn = ext4_getfsmap_datadev;
- if (EXT4_SB(sb)->s_journal_bdev) {
+ if (EXT4_SB(sb)->s_journal_bdev_file) {
handlers[1].gfd_dev = new_encode_dev(
- EXT4_SB(sb)->s_journal_bdev->bd_dev);
+ file_bdev(EXT4_SB(sb)->s_journal_bdev_file)->bd_dev);
handlers[1].gfd_fn = ext4_getfsmap_logdev;
}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 027a7d7037a0..e476c6de3074 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -28,6 +28,7 @@
#include <linux/sched.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
#include "ext4.h"
#include "ext4_jbd2.h"
@@ -78,21 +79,13 @@ static int ext4_sync_parent(struct inode *inode)
return ret;
}
-static int ext4_fsync_nojournal(struct inode *inode, bool datasync,
- bool *needs_barrier)
+static int ext4_fsync_nojournal(struct file *file, loff_t start, loff_t end,
+ int datasync, bool *needs_barrier)
{
- int ret, err;
-
- ret = sync_mapping_buffers(inode->i_mapping);
- if (!(inode->i_state & I_DIRTY_ALL))
- return ret;
- if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
- return ret;
-
- err = sync_inode_metadata(inode, 1);
- if (!ret)
- ret = err;
+ struct inode *inode = file->f_inode;
+ int ret;
+ ret = generic_buffers_fsync_noflush(file, start, end, datasync);
if (!ret)
ret = ext4_sync_parent(inode);
if (test_opt(inode->i_sb, BARRIER))
@@ -108,6 +101,13 @@ static int ext4_fsync_journal(struct inode *inode, bool datasync,
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
+ /*
+ * Fastcommit does not really support fsync on directories or other
+ * special files. Force a full commit.
+ */
+ if (!S_ISREG(inode->i_mode))
+ return ext4_force_commit(inode->i_sb);
+
if (journal->j_flags & JBD2_BARRIER &&
!jbd2_trans_will_send_data_barrier(journal, commit_tid))
*needs_barrier = true;
@@ -131,20 +131,23 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
int ret = 0, err;
bool needs_barrier = false;
struct inode *inode = file->f_mapping->host;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- if (unlikely(ext4_forced_shutdown(sbi)))
- return -EIO;
+ ret = ext4_emergency_state(inode->i_sb);
+ if (unlikely(ret))
+ return ret;
ASSERT(ext4_journal_current_handle() == NULL);
trace_ext4_sync_file_enter(file, datasync);
- if (sb_rdonly(inode->i_sb)) {
- /* Make sure that we read updated s_mount_flags value */
- smp_rmb();
- if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FS_ABORTED))
- ret = -EROFS;
+ if (sb_rdonly(inode->i_sb))
+ goto out;
+
+ if (!EXT4_SB(inode->i_sb)->s_journal) {
+ ret = ext4_fsync_nojournal(file, start, end, datasync,
+ &needs_barrier);
+ if (needs_barrier)
+ goto issue_flush;
goto out;
}
@@ -153,26 +156,13 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
/*
- * data=writeback,ordered:
* The caller's filemap_fdatawrite()/wait will sync the data.
* Metadata is in the journal, we wait for proper transaction to
* commit here.
- *
- * data=journal:
- * filemap_fdatawrite won't do anything (the buffers are clean).
- * ext4_force_commit will write the file data into the journal and
- * will wait on that.
- * filemap_fdatawait() will encounter a ton of newly-dirtied pages
- * (they were dirtied by commit). But that's OK - the blocks are
- * safe in-journal, which is all fsync() needs to ensure.
*/
- if (!sbi->s_journal)
- ret = ext4_fsync_nojournal(inode, datasync, &needs_barrier);
- else if (ext4_should_journal_data(inode))
- ret = ext4_force_commit(inode->i_sb);
- else
- ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
+ ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
+issue_flush:
if (needs_barrier) {
err = blkdev_issue_flush(inode->i_sb->s_bdev);
if (!ret)
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index f34f4176c1e7..48483cd015d3 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -268,7 +268,7 @@ static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len,
combined_hash = fscrypt_fname_siphash(dir, &qname);
} else {
ext4_warning_inode(dir, "Siphash requires key");
- return -1;
+ return -EINVAL;
}
hash = (__u32)(combined_hash >> 32);
@@ -277,7 +277,11 @@ static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len,
}
default:
hinfo->hash = 0;
- return -1;
+ hinfo->minor_hash = 0;
+ ext4_warning(dir->i_sb,
+ "invalid/unsupported hash tree version %u",
+ hinfo->hash_version);
+ return -EINVAL;
}
hash = hash & ~1;
if (hash == (EXT4_HTREE_EOF_32BIT << 1))
@@ -290,15 +294,15 @@ static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len,
int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
struct dx_hash_info *hinfo)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
const struct unicode_map *um = dir->i_sb->s_encoding;
int r, dlen;
unsigned char *buff;
struct qstr qstr = {.name = name, .len = len };
- if (len && IS_CASEFOLDED(dir) && um &&
+ if (len && IS_CASEFOLDED(dir) &&
(!IS_ENCRYPTED(dir) || fscrypt_has_encryption_key(dir))) {
- buff = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL);
+ buff = kzalloc(PATH_MAX, GFP_KERNEL);
if (!buff)
return -ENOMEM;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index f73e5eb43eae..b20a1bf866ab 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -87,19 +87,18 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
return 0;
- grp = ext4_get_group_info(sb, block_group);
-
if (buffer_verified(bh))
return 0;
- if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
+
+ grp = ext4_get_group_info(sb, block_group);
+ if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
return -EFSCORRUPTED;
ext4_lock_group(sb, block_group);
if (buffer_verified(bh))
goto verified;
blk = ext4_inode_bitmap(sb, desc);
- if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
- EXT4_INODES_PER_GROUP(sb) / 8) ||
+ if (!ext4_inode_bitmap_csum_verify(sb, desc, bh) ||
ext4_simulate_fail(sb, EXT4_SIM_IBITMAP_CRC)) {
ext4_unlock_group(sb, block_group);
ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
@@ -194,8 +193,9 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
* submit the buffer_head for reading
*/
trace_ext4_load_inode_bitmap(sb, block_group);
- ext4_read_bh(bh, REQ_META | REQ_PRIO, ext4_end_bitmap_read);
- ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO);
+ ext4_read_bh(bh, REQ_META | REQ_PRIO,
+ ext4_end_bitmap_read,
+ ext4_simulate_fail(sb, EXT4_SIM_IBITMAP_EIO));
if (!buffer_uptodate(bh)) {
put_bh(bh);
ext4_error_err(sb, EIO, "Cannot read inode bitmap - "
@@ -252,10 +252,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
"nonexistent device\n", __func__, __LINE__);
return;
}
- if (atomic_read(&inode->i_count) > 1) {
+ if (icount_read(inode) > 1) {
ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
__func__, __LINE__, inode->i_ino,
- atomic_read(&inode->i_count));
+ icount_read(inode));
return;
}
if (inode->i_nlink) {
@@ -293,7 +293,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
}
if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
grp = ext4_get_group_info(sb, block_group);
- if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
+ if (!grp || unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
fatal = -EFSCORRUPTED;
goto error_return;
}
@@ -327,8 +327,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
if (percpu_counter_initialized(&sbi->s_dirs_counter))
percpu_counter_dec(&sbi->s_dirs_counter);
}
- ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_inode_bitmap_csum_set(sb, gdp, bitmap_bh);
ext4_group_desc_csum_set(sb, block_group, gdp);
ext4_unlock_group(sb, block_group);
@@ -463,10 +462,9 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
hinfo.hash_version = DX_HASH_HALF_MD4;
hinfo.seed = sbi->s_hash_seed;
ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo);
- grp = hinfo.hash;
+ parent_group = hinfo.hash % ngroups;
} else
- grp = prandom_u32();
- parent_group = (unsigned)grp % ngroups;
+ parent_group = get_random_u32_below(ngroups);
for (i = 0; i < ngroups; i++) {
g = (parent_group + i) % ngroups;
get_orlov_stats(sb, g, flex_size, &stats);
@@ -510,11 +508,13 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
goto fallback;
}
- max_dirs = ndirs / ngroups + inodes_per_group / 16;
+ max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
min_inodes = avefreei - inodes_per_group*flex_size / 4;
if (min_inodes < 1)
min_inodes = 1;
min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
+ if (min_clusters < 0)
+ min_clusters = 0;
/*
* Start looking in the flex group where we last allocated an
@@ -691,7 +691,8 @@ static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
if (!bh || !buffer_uptodate(bh))
/*
* If the block is not in the buffer cache, then it
- * must have been written out.
+ * must have been written out, or, most unlikely, is
+ * being migrated - false failure should be OK here.
*/
goto out;
@@ -756,10 +757,10 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
struct ext4_group_desc *gdp;
ext4_group_t group;
int bit;
- int err = -EFSCORRUPTED;
+ int err;
if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
- goto out;
+ return -EFSCORRUPTED;
group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
@@ -773,7 +774,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
}
gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
- if (!gdp || !group_desc_bh) {
+ if (!gdp) {
err = -EINVAL;
goto out;
}
@@ -814,8 +815,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb, group, gdp));
- ext4_block_bitmap_csum_set(sb, group, gdp,
- block_bitmap_bh);
+ ext4_block_bitmap_csum_set(sb, gdp, block_bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
}
ext4_unlock_group(sb, group);
@@ -853,8 +853,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
if (ext4_has_group_desc_csum(sb)) {
- ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_inode_bitmap_csum_set(sb, gdp, inode_bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
}
@@ -862,6 +861,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh);
sync_dirty_buffer(group_desc_bh);
out:
+ brelse(inode_bitmap_bh);
return err;
}
@@ -871,7 +871,7 @@ static int ext4_xattr_credits_for_new_inode(struct inode *dir, mode_t mode,
struct super_block *sb = dir->i_sb;
int nblocks = 0;
#ifdef CONFIG_EXT4_FS_POSIX_ACL
- struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT);
+ struct posix_acl *p = get_inode_acl(dir, ACL_TYPE_DEFAULT);
if (IS_ERR(p))
return PTR_ERR(p);
@@ -922,7 +922,7 @@ static int ext4_xattr_credits_for_new_inode(struct inode *dir, mode_t mode,
* For other inodes, search forward from the parent directory's block
* group to find a free inode.
*/
-struct inode *__ext4_new_inode(struct user_namespace *mnt_userns,
+struct inode *__ext4_new_inode(struct mnt_idmap *idmap,
handle_t *handle, struct inode *dir,
umode_t mode, const struct qstr *qstr,
__u32 goal, uid_t *owner, __u32 i_flags,
@@ -952,8 +952,9 @@ struct inode *__ext4_new_inode(struct user_namespace *mnt_userns,
sb = dir->i_sb;
sbi = EXT4_SB(sb);
- if (unlikely(ext4_forced_shutdown(sbi)))
- return ERR_PTR(-EIO);
+ ret2 = ext4_emergency_state(sb);
+ if (unlikely(ret2))
+ return ERR_PTR(ret2);
ngroups = ext4_get_groups_count(sb);
trace_ext4_request_inode(dir, mode);
@@ -973,10 +974,10 @@ struct inode *__ext4_new_inode(struct user_namespace *mnt_userns,
i_gid_write(inode, owner[1]);
} else if (test_opt(sb, GRPID)) {
inode->i_mode = mode;
- inode_fsuid_set(inode, mnt_userns);
+ inode_fsuid_set(inode, idmap);
inode->i_gid = dir->i_gid;
} else
- inode_init_owner(mnt_userns, inode, dir, mode);
+ inode_init_owner(idmap, inode, dir, mode);
if (ext4_has_feature_project(sb) &&
ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT))
@@ -1048,21 +1049,21 @@ got_group:
* Skip groups with already-known suspicious inode
* tables
*/
- if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
+ if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
goto next_group;
}
brelse(inode_bitmap_bh);
inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
/* Skip groups with suspicious inode tables */
- if (((!(sbi->s_mount_state & EXT4_FC_REPLAY))
- && EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) ||
- IS_ERR(inode_bitmap_bh)) {
+ if (IS_ERR(inode_bitmap_bh)) {
inode_bitmap_bh = NULL;
goto next_group;
}
+ if (!(sbi->s_mount_state & EXT4_FC_REPLAY) &&
+ EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
+ goto next_group;
-repeat_in_this_group:
ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
if (!ret2)
goto next_group;
@@ -1077,8 +1078,8 @@ repeat_in_this_group:
if ((!(sbi->s_mount_state & EXT4_FC_REPLAY)) && !handle) {
BUG_ON(nblocks <= 0);
- handle = __ext4_journal_start_sb(dir->i_sb, line_no,
- handle_type, nblocks, 0,
+ handle = __ext4_journal_start_sb(NULL, dir->i_sb,
+ line_no, handle_type, nblocks, 0,
ext4_trans_default_revoke_credits(sb));
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
@@ -1112,8 +1113,6 @@ repeat_in_this_group:
if (!ret2)
goto got; /* we grabbed the inode! */
- if (ino < EXT4_INODES_PER_GROUP(sb))
- goto repeat_in_this_group;
next_group:
if (++group == ngroups)
group = 0;
@@ -1166,8 +1165,7 @@ got:
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb, group, gdp));
- ext4_block_bitmap_csum_set(sb, group, gdp,
- block_bitmap_bh);
+ ext4_block_bitmap_csum_set(sb, gdp, block_bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
}
ext4_unlock_group(sb, group);
@@ -1186,6 +1184,10 @@ got:
if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
grp = ext4_get_group_info(sb, group);
+ if (!grp) {
+ err = -EFSCORRUPTED;
+ goto out;
+ }
down_read(&grp->alloc_sem); /*
* protect vs itable
* lazyinit
@@ -1223,8 +1225,7 @@ got:
}
}
if (ext4_has_group_desc_csum(sb)) {
- ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_inode_bitmap_csum_set(sb, gdp, inode_bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
}
ext4_unlock_group(sb, group);
@@ -1249,8 +1250,8 @@ got:
inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
/* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
- ei->i_crtime = inode->i_mtime;
+ simple_inode_init_ts(inode);
+ ei->i_crtime = inode_get_mtime(inode);
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_dir_start_lookup = 0;
@@ -1280,26 +1281,24 @@ got:
EXT4_GROUP_INFO_IBITMAP_CORRUPT);
goto out;
}
- inode->i_generation = prandom_u32();
+ inode->i_generation = get_random_u32();
/* Precompute checksum seed for inode metadata */
- if (ext4_has_metadata_csum(sb)) {
+ if (ext4_has_feature_metadata_csum(sb)) {
__u32 csum;
__le32 inum = cpu_to_le32(inode->i_ino);
__le32 gen = cpu_to_le32(inode->i_generation);
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
+ csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)&inum,
sizeof(inum));
- ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
- sizeof(gen));
+ ei->i_csum_seed = ext4_chksum(csum, (__u8 *)&gen, sizeof(gen));
}
- ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
ext4_set_inode_state(inode, EXT4_STATE_NEW);
ei->i_extra_isize = sbi->s_want_extra_isize;
ei->i_inline_off = 0;
if (ext4_has_feature_inline_data(sb) &&
- (!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
+ (!(ei->i_flags & (EXT4_DAX_FL|EXT4_EA_INODE_FL)) || S_ISDIR(mode)))
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
ret = inode;
err = dquot_alloc_inode(inode);
@@ -1335,10 +1334,9 @@ got:
}
}
- if (ext4_handle_valid(handle)) {
- ei->i_sync_tid = handle->h_transaction->t_tid;
- ei->i_datasync_tid = handle->h_transaction->t_tid;
- }
+ ext4_set_inode_mapping_order(inode);
+
+ ext4_update_inode_fsync_trans(handle, inode, 1);
err = ext4_mark_inode_dirty(handle, inode);
if (err) {
@@ -1522,14 +1520,8 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
int num, ret = 0, used_blks = 0;
unsigned long used_inos = 0;
- /* This should not happen, but just to be sure check this */
- if (sb_rdonly(sb)) {
- ret = 1;
- goto out;
- }
-
gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
- if (!gdp)
+ if (!gdp || !grp)
goto out;
/*
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 89efa78ed4b2..da76353b3a57 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -148,6 +148,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
struct super_block *sb = inode->i_sb;
Indirect *p = chain;
struct buffer_head *bh;
+ unsigned int key;
int ret = -EIO;
*err = 0;
@@ -156,14 +157,20 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
if (!p->key)
goto no_block;
while (--depth) {
- bh = sb_getblk(sb, le32_to_cpu(p->key));
+ key = le32_to_cpu(p->key);
+ if (key > ext4_blocks_count(EXT4_SB(sb)->s_es)) {
+ /* the block was out of range */
+ ret = -EFSCORRUPTED;
+ goto failure;
+ }
+ bh = sb_getblk(sb, key);
if (unlikely(!bh)) {
ret = -ENOMEM;
goto failure;
}
if (!bh_uptodate_or_lock(bh)) {
- if (ext4_read_bh(bh, 0, NULL) < 0) {
+ if (ext4_read_bh(bh, 0, NULL, false) < 0) {
put_bh(bh);
goto failure;
}
@@ -460,7 +467,7 @@ static int ext4_splice_branch(handle_t *handle,
* the new i_size. But that is not done here - it is done in
* generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
*/
- jbd_debug(5, "splicing indirect only\n");
+ ext4_debug("splicing indirect only\n");
BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
if (err)
@@ -472,7 +479,7 @@ static int ext4_splice_branch(handle_t *handle,
err = ext4_mark_inode_dirty(handle, ar->inode);
if (unlikely(err))
goto err_out;
- jbd_debug(5, "splicing direct\n");
+ ext4_debug("splicing direct\n");
}
return err;
@@ -532,7 +539,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
int indirect_blks;
int blocks_to_boundary = 0;
int depth;
- int count = 0;
+ u64 count = 0;
ext4_fsblk_t first_block = 0;
trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
@@ -581,7 +588,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
count++;
/* Fill in size of a hole we found */
map->m_pblk = 0;
- map->m_len = min_t(unsigned int, map->m_len, count);
+ map->m_len = umin(map->m_len, count);
goto cleanup;
}
@@ -644,6 +651,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
ext4_update_inode_fsync_trans(handle, inode, 1);
count = ar.len;
+
got_it:
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = le32_to_cpu(chain[depth-1].key);
@@ -696,10 +704,10 @@ static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
* Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
* moment, get_block can be called only for blocks inside i_size since
* page cache has been already dropped and writes are blocked by
- * i_mutex. So we can safely drop the i_data_sem here.
+ * i_rwsem. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
up_write(&EXT4_I(inode)->i_data_sem);
*dropped = 1;
return 0;
@@ -1017,7 +1025,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
}
/* Go read the buffer for the next level down */
- bh = ext4_sb_bread(inode->i_sb, nr, 0);
+ bh = ext4_sb_bread_nofail(inode->i_sb, nr);
/*
* A read failure? Report error and clear slot
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 39a1ab129fdc..1f6bc05593df 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -6,8 +6,9 @@
#include <linux/iomap.h>
#include <linux/fiemap.h>
+#include <linux/namei.h>
#include <linux/iversion.h>
-#include <linux/backing-dev.h>
+#include <linux/sched/mm.h>
#include "ext4_jbd2.h"
#include "ext4.h"
@@ -19,6 +20,11 @@
#define EXT4_INLINE_DOTDOT_OFFSET 2
#define EXT4_INLINE_DOTDOT_SIZE 4
+
+static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
+ struct inode *inode,
+ void **fsdata);
+
static int ext4_get_inline_size(struct inode *inode)
{
if (EXT4_I(inode)->i_inline_off)
@@ -33,8 +39,12 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
struct ext4_xattr_ibody_header *header;
struct ext4_xattr_entry *entry;
struct ext4_inode *raw_inode;
+ void *end;
int free, min_offs;
+ if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
+ return 0;
+
min_offs = EXT4_SB(inode->i_sb)->s_inode_size -
EXT4_GOOD_OLD_INODE_SIZE -
EXT4_I(inode)->i_extra_isize -
@@ -53,14 +63,23 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
raw_inode = ext4_raw_inode(iloc);
header = IHDR(inode, raw_inode);
entry = IFIRST(header);
+ end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
/* Compute min_offs. */
- for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
+ while (!IS_LAST_ENTRY(entry)) {
+ void *next = EXT4_XATTR_NEXT(entry);
+
+ if (next >= end) {
+ EXT4_ERROR_INODE(inode,
+ "corrupt xattr in inline inode");
+ return 0;
+ }
if (!entry->e_value_inum && entry->e_value_size) {
size_t offs = le16_to_cpu(entry->e_value_offs);
if (offs < min_offs)
min_offs = offs;
}
+ entry = next;
}
free = min_offs -
((void *)entry - (void *)IFIRST(header)) - sizeof(__u32);
@@ -155,7 +174,6 @@ int ext4_find_inline_data_nolock(struct inode *inode)
(void *)ext4_raw_inode(&is.iloc));
EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
le32_to_cpu(is.s.here->e_value_size);
- ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
}
out:
brelse(is.iloc.bh);
@@ -176,8 +194,7 @@ static int ext4_read_inline_data(struct inode *inode, void *buffer,
BUG_ON(len > EXT4_I(inode)->i_inline_size);
- cp_len = len < EXT4_MIN_INLINE_DATA_SIZE ?
- len : EXT4_MIN_INLINE_DATA_SIZE;
+ cp_len = min_t(unsigned int, len, EXT4_MIN_INLINE_DATA_SIZE);
raw_inode = ext4_raw_inode(iloc);
memcpy(buffer, (void *)(raw_inode->i_block), cp_len);
@@ -216,7 +233,7 @@ static void ext4_write_inline_data(struct inode *inode, struct ext4_iloc *iloc,
struct ext4_inode *raw_inode;
int cp_len = 0;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ if (unlikely(ext4_emergency_state(inode->i_sb)))
return;
BUG_ON(!EXT4_I(inode)->i_inline_off);
@@ -286,7 +303,11 @@ static int ext4_create_inline_data(handle_t *handle,
if (error)
goto out;
- BUG_ON(!is.s.not_found);
+ if (!is.s.not_found) {
+ EXT4_ERROR_INODE(inode, "unexpected inline data xattr");
+ error = -EFSCORRUPTED;
+ goto out;
+ }
error = ext4_xattr_ibody_set(handle, inode, &i, &is);
if (error) {
@@ -337,7 +358,11 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
if (error)
goto out;
- BUG_ON(is.s.not_found);
+ if (is.s.not_found) {
+ EXT4_ERROR_INODE(inode, "missing inline data xattr");
+ error = -EFSCORRUPTED;
+ goto out;
+ }
len -= EXT4_MIN_INLINE_DATA_SIZE;
value = kzalloc(len, GFP_NOFS);
@@ -348,7 +373,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
value, len);
- if (error == -ENODATA)
+ if (error < 0)
goto out;
BUFFER_TRACE(is.iloc.bh, "get_write_access");
@@ -380,7 +405,7 @@ out:
}
static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
- unsigned int len)
+ loff_t len)
{
int ret, size, no_expand;
struct ext4_inode_info *ei = EXT4_I(inode);
@@ -393,7 +418,12 @@ static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
return -ENOSPC;
ext4_write_lock_xattr(inode, &no_expand);
-
+ /*
+ * ei->i_inline_size may have changed since the initial check
+ * if other xattrs were added. Recalculate to ensure
+ * ext4_update_inline_data() validates against current capacity.
+ */
+ (void) ext4_find_inline_data_nolock(inode);
if (ei->i_inline_off)
ret = ext4_update_inline_data(handle, inode, len);
else
@@ -421,9 +451,13 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
if (!ei->i_inline_off)
return 0;
+ down_write(&ei->i_data_sem);
+
error = ext4_get_inode_loc(inode, &is.iloc);
- if (error)
+ if (error) {
+ up_write(&ei->i_data_sem);
return error;
+ }
error = ext4_xattr_ibody_find(inode, &i, &is);
if (error)
@@ -462,19 +496,20 @@ out:
brelse(is.iloc.bh);
if (error == -ENODATA)
error = 0;
+ up_write(&ei->i_data_sem);
return error;
}
-static int ext4_read_inline_page(struct inode *inode, struct page *page)
+static int ext4_read_inline_folio(struct inode *inode, struct folio *folio)
{
void *kaddr;
int ret = 0;
size_t len;
struct ext4_iloc iloc;
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
BUG_ON(!ext4_has_inline_data(inode));
- BUG_ON(page->index);
+ BUG_ON(folio->index);
if (!EXT4_I(inode)->i_inline_off) {
ext4_warning(inode->i_sb, "inode %lu doesn't have inline data.",
@@ -487,19 +522,19 @@ static int ext4_read_inline_page(struct inode *inode, struct page *page)
goto out;
len = min_t(size_t, ext4_get_inline_size(inode), i_size_read(inode));
- kaddr = kmap_atomic(page);
+ BUG_ON(len > PAGE_SIZE);
+ kaddr = kmap_local_folio(folio, 0);
ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
- flush_dcache_page(page);
- kunmap_atomic(kaddr);
- zero_user_segment(page, len, PAGE_SIZE);
- SetPageUptodate(page);
+ kaddr = folio_zero_tail(folio, len, kaddr + len);
+ kunmap_local(kaddr);
+ folio_mark_uptodate(folio);
brelse(iloc.bh);
out:
return ret;
}
-int ext4_readpage_inline(struct inode *inode, struct page *page)
+int ext4_readpage_inline(struct inode *inode, struct folio *folio)
{
int ret = 0;
@@ -513,27 +548,26 @@ int ext4_readpage_inline(struct inode *inode, struct page *page)
* Current inline data can only exist in the 1st page,
* So for all the other pages, just set them uptodate.
*/
- if (!page->index)
- ret = ext4_read_inline_page(inode, page);
- else if (!PageUptodate(page)) {
- zero_user_segment(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
+ if (!folio->index)
+ ret = ext4_read_inline_folio(inode, folio);
+ else if (!folio_test_uptodate(folio)) {
+ folio_zero_segment(folio, 0, folio_size(folio));
+ folio_mark_uptodate(folio);
}
up_read(&EXT4_I(inode)->xattr_sem);
- unlock_page(page);
+ folio_unlock(folio);
return ret >= 0 ? 0 : ret;
}
static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
- struct inode *inode,
- unsigned flags)
+ struct inode *inode)
{
int ret, needed_blocks, no_expand;
handle_t *handle = NULL;
int retries = 0, sem_held = 0;
- struct page *page = NULL;
+ struct folio *folio = NULL;
unsigned from, to;
struct ext4_iloc iloc;
@@ -546,7 +580,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
return 0;
}
- needed_blocks = ext4_writepage_trans_blocks(inode);
+ needed_blocks = ext4_chunk_trans_extent(inode, 1);
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
@@ -562,12 +596,11 @@ retry:
/* We cannot recurse into the filesystem as the transaction is already
* started */
- flags |= AOP_FLAG_NOFS;
-
- page = grab_cache_page_write_begin(mapping, 0, flags);
- if (!page) {
- ret = -ENOMEM;
- goto out;
+ folio = __filemap_get_folio(mapping, 0, FGP_WRITEBEGIN | FGP_NOFS,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
+ goto out_nofolio;
}
ext4_write_lock_xattr(inode, &no_expand);
@@ -580,32 +613,35 @@ retry:
from = 0;
to = ext4_get_inline_size(inode);
- if (!PageUptodate(page)) {
- ret = ext4_read_inline_page(inode, page);
+ if (!folio_test_uptodate(folio)) {
+ ret = ext4_read_inline_folio(inode, folio);
if (ret < 0)
goto out;
}
+ ext4_fc_track_inode(handle, inode);
ret = ext4_destroy_inline_data_nolock(handle, inode);
if (ret)
goto out;
if (ext4_should_dioread_nolock(inode)) {
- ret = __block_write_begin(page, from, to,
- ext4_get_block_unwritten);
+ ret = ext4_block_write_begin(handle, folio, from, to,
+ ext4_get_block_unwritten);
} else
- ret = __block_write_begin(page, from, to, ext4_get_block);
+ ret = ext4_block_write_begin(handle, folio, from, to,
+ ext4_get_block);
+ clear_buffer_new(folio_buffers(folio));
if (!ret && ext4_should_journal_data(inode)) {
- ret = ext4_walk_page_buffers(handle, inode, page_buffers(page),
- from, to, NULL,
- do_journal_get_write_access);
+ ret = ext4_walk_page_buffers(handle, inode,
+ folio_buffers(folio), from, to,
+ NULL, do_journal_get_write_access);
}
if (ret) {
- unlock_page(page);
- put_page(page);
- page = NULL;
+ folio_unlock(folio);
+ folio_put(folio);
+ folio = NULL;
ext4_orphan_add(handle, inode);
ext4_write_unlock_xattr(inode, &no_expand);
sem_held = 0;
@@ -625,13 +661,14 @@ retry:
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
- if (page)
- block_commit_write(page, from, to);
+ if (folio)
+ block_commit_write(folio, from, to);
out:
- if (page) {
- unlock_page(page);
- put_page(page);
+ if (folio) {
+ folio_unlock(folio);
+ folio_put(folio);
}
+out_nofolio:
if (sem_held)
ext4_write_unlock_xattr(inode, &no_expand);
if (handle)
@@ -641,98 +678,113 @@ out:
}
/*
- * Try to write data in the inode.
- * If the inode has inline data, check whether the new write can be
- * in the inode also. If not, create the page the handle, move the data
- * to the page make it update and let the later codes create extent for it.
+ * Prepare the write for the inline data.
+ * If the data can be written into the inode, we just read
+ * the page and make it uptodate, and start the journal.
+ * Otherwise read the page, makes it dirty so that it can be
+ * handle in writepages(the i_disksize update is left to the
+ * normal ext4_da_write_end).
*/
-int ext4_try_to_write_inline_data(struct address_space *mapping,
- struct inode *inode,
- loff_t pos, unsigned len,
- unsigned flags,
- struct page **pagep)
+int ext4_generic_write_inline_data(struct address_space *mapping,
+ struct inode *inode,
+ loff_t pos, unsigned len,
+ struct folio **foliop,
+ void **fsdata, bool da)
{
int ret;
handle_t *handle;
- struct page *page;
+ struct folio *folio;
struct ext4_iloc iloc;
-
- if (pos + len > ext4_get_max_inline_size(inode))
- goto convert;
+ int retries = 0;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
return ret;
- /*
- * The possible write could happen in the inode,
- * so try to reserve the space in inode first.
- */
+retry_journal:
handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
- handle = NULL;
- goto out;
+ goto out_release_bh;
}
ret = ext4_prepare_inline_data(handle, inode, pos + len);
if (ret && ret != -ENOSPC)
- goto out;
+ goto out_stop_journal;
- /* We don't have space in inline inode, so convert it to extent. */
if (ret == -ENOSPC) {
ext4_journal_stop(handle);
- brelse(iloc.bh);
- goto convert;
- }
-
- ret = ext4_journal_get_write_access(handle, inode->i_sb, iloc.bh,
- EXT4_JTR_NONE);
- if (ret)
- goto out;
+ if (!da) {
+ brelse(iloc.bh);
+ /* Retry inside */
+ return ext4_convert_inline_data_to_extent(mapping, inode);
+ }
- flags |= AOP_FLAG_NOFS;
+ ret = ext4_da_convert_inline_data_to_extent(mapping, inode, fsdata);
+ if (ret == -ENOSPC &&
+ ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry_journal;
+ goto out_release_bh;
+ }
- page = grab_cache_page_write_begin(mapping, 0, flags);
- if (!page) {
- ret = -ENOMEM;
- goto out;
+ folio = __filemap_get_folio(mapping, 0, FGP_WRITEBEGIN | FGP_NOFS,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
+ goto out_stop_journal;
}
- *pagep = page;
down_read(&EXT4_I(inode)->xattr_sem);
+ /* Someone else had converted it to extent */
if (!ext4_has_inline_data(inode)) {
ret = 0;
- unlock_page(page);
- put_page(page);
- goto out_up_read;
+ goto out_release_folio;
}
- if (!PageUptodate(page)) {
- ret = ext4_read_inline_page(inode, page);
- if (ret < 0) {
- unlock_page(page);
- put_page(page);
- goto out_up_read;
- }
+ if (!folio_test_uptodate(folio)) {
+ ret = ext4_read_inline_folio(inode, folio);
+ if (ret < 0)
+ goto out_release_folio;
}
- ret = 1;
- handle = NULL;
-out_up_read:
+ ret = ext4_journal_get_write_access(handle, inode->i_sb, iloc.bh, EXT4_JTR_NONE);
+ if (ret)
+ goto out_release_folio;
+ *foliop = folio;
up_read(&EXT4_I(inode)->xattr_sem);
-out:
- if (handle && (ret != 1))
- ext4_journal_stop(handle);
+ brelse(iloc.bh);
+ return 1;
+
+out_release_folio:
+ up_read(&EXT4_I(inode)->xattr_sem);
+ folio_unlock(folio);
+ folio_put(folio);
+out_stop_journal:
+ ext4_journal_stop(handle);
+out_release_bh:
brelse(iloc.bh);
return ret;
-convert:
- return ext4_convert_inline_data_to_extent(mapping,
- inode, flags);
+}
+
+/*
+ * Try to write data in the inode.
+ * If the inode has inline data, check whether the new write can be
+ * in the inode also. If not, create the page the handle, move the data
+ * to the page make it update and let the later codes create extent for it.
+ */
+int ext4_try_to_write_inline_data(struct address_space *mapping,
+ struct inode *inode,
+ loff_t pos, unsigned len,
+ struct folio **foliop)
+{
+ if (pos + len > ext4_get_max_inline_size(inode))
+ return ext4_convert_inline_data_to_extent(mapping, inode);
+ return ext4_generic_write_inline_data(mapping, inode, pos, len,
+ foliop, NULL, false);
}
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
- unsigned copied, struct page *page)
+ unsigned copied, struct folio *folio)
{
handle_t *handle = ext4_journal_current_handle();
int no_expand;
@@ -740,14 +792,14 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
struct ext4_iloc iloc;
int ret = 0, ret2;
- if (unlikely(copied < len) && !PageUptodate(page))
+ if (unlikely(copied < len) && !folio_test_uptodate(folio))
copied = 0;
if (likely(copied)) {
ret = ext4_get_inode_loc(inode, &iloc);
if (ret) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
ext4_std_error(inode->i_sb, ret);
goto out;
}
@@ -761,30 +813,30 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
*/
(void) ext4_find_inline_data_nolock(inode);
- kaddr = kmap_atomic(page);
+ kaddr = kmap_local_folio(folio, 0);
ext4_write_inline_data(inode, &iloc, kaddr, pos, copied);
- kunmap_atomic(kaddr);
- SetPageUptodate(page);
- /* clear page dirty so that writepages wouldn't work for us. */
- ClearPageDirty(page);
+ kunmap_local(kaddr);
+ folio_mark_uptodate(folio);
+ /* clear dirty flag so that writepages wouldn't work for us. */
+ folio_clear_dirty(folio);
ext4_write_unlock_xattr(inode, &no_expand);
brelse(iloc.bh);
/*
- * It's important to update i_size while still holding page
+ * It's important to update i_size while still holding folio
* lock: page writeout could otherwise come in and zero
* beyond i_size.
*/
ext4_update_inode_size(inode, pos + copied);
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
/*
- * Don't mark the inode dirty under page lock. First, it unnecessarily
- * makes the holding time of page lock longer. Second, it forces lock
- * ordering of page lock and transaction start for journaling
+ * Don't mark the inode dirty under folio lock. First, it unnecessarily
+ * makes the holding time of folio lock longer. Second, it forces lock
+ * ordering of folio lock and transaction start for journaling
* filesystems.
*/
if (likely(copied))
@@ -813,30 +865,6 @@ out:
return ret ? ret : copied;
}
-struct buffer_head *
-ext4_journalled_write_inline_data(struct inode *inode,
- unsigned len,
- struct page *page)
-{
- int ret, no_expand;
- void *kaddr;
- struct ext4_iloc iloc;
-
- ret = ext4_get_inode_loc(inode, &iloc);
- if (ret) {
- ext4_std_error(inode->i_sb, ret);
- return NULL;
- }
-
- ext4_write_lock_xattr(inode, &no_expand);
- kaddr = kmap_atomic(page);
- ext4_write_inline_data(inode, &iloc, kaddr, 0, len);
- kunmap_atomic(kaddr);
- ext4_write_unlock_xattr(inode, &no_expand);
-
- return iloc.bh;
-}
-
/*
* Try to make the page cache and handle ready for the inline data case.
* We can call this function in 2 cases:
@@ -848,15 +876,15 @@ ext4_journalled_write_inline_data(struct inode *inode,
*/
static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
struct inode *inode,
- unsigned flags,
void **fsdata)
{
int ret = 0, inline_size;
- struct page *page;
+ struct folio *folio;
- page = grab_cache_page_write_begin(mapping, 0, flags);
- if (!page)
- return -ENOMEM;
+ folio = __filemap_get_folio(mapping, 0, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
down_read(&EXT4_I(inode)->xattr_sem);
if (!ext4_has_inline_data(inode)) {
@@ -866,132 +894,37 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
inline_size = ext4_get_inline_size(inode);
- if (!PageUptodate(page)) {
- ret = ext4_read_inline_page(inode, page);
+ if (!folio_test_uptodate(folio)) {
+ ret = ext4_read_inline_folio(inode, folio);
if (ret < 0)
goto out;
}
- ret = __block_write_begin(page, 0, inline_size,
- ext4_da_get_block_prep);
+ ret = ext4_block_write_begin(NULL, folio, 0, inline_size,
+ ext4_da_get_block_prep);
if (ret) {
up_read(&EXT4_I(inode)->xattr_sem);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
ext4_truncate_failed_write(inode);
return ret;
}
- SetPageDirty(page);
- SetPageUptodate(page);
+ clear_buffer_new(folio_buffers(folio));
+ folio_mark_dirty(folio);
+ folio_mark_uptodate(folio);
ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
*fsdata = (void *)CONVERT_INLINE_DATA;
out:
up_read(&EXT4_I(inode)->xattr_sem);
- if (page) {
- unlock_page(page);
- put_page(page);
+ if (folio) {
+ folio_unlock(folio);
+ folio_put(folio);
}
return ret;
}
-/*
- * Prepare the write for the inline data.
- * If the data can be written into the inode, we just read
- * the page and make it uptodate, and start the journal.
- * Otherwise read the page, makes it dirty so that it can be
- * handle in writepages(the i_disksize update is left to the
- * normal ext4_da_write_end).
- */
-int ext4_da_write_inline_data_begin(struct address_space *mapping,
- struct inode *inode,
- loff_t pos, unsigned len,
- unsigned flags,
- struct page **pagep,
- void **fsdata)
-{
- int ret, inline_size;
- handle_t *handle;
- struct page *page;
- struct ext4_iloc iloc;
- int retries = 0;
-
- ret = ext4_get_inode_loc(inode, &iloc);
- if (ret)
- return ret;
-
-retry_journal:
- handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out;
- }
-
- inline_size = ext4_get_max_inline_size(inode);
-
- ret = -ENOSPC;
- if (inline_size >= pos + len) {
- ret = ext4_prepare_inline_data(handle, inode, pos + len);
- if (ret && ret != -ENOSPC)
- goto out_journal;
- }
-
- /*
- * We cannot recurse into the filesystem as the transaction
- * is already started.
- */
- flags |= AOP_FLAG_NOFS;
-
- if (ret == -ENOSPC) {
- ext4_journal_stop(handle);
- ret = ext4_da_convert_inline_data_to_extent(mapping,
- inode,
- flags,
- fsdata);
- if (ret == -ENOSPC &&
- ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry_journal;
- goto out;
- }
-
- page = grab_cache_page_write_begin(mapping, 0, flags);
- if (!page) {
- ret = -ENOMEM;
- goto out_journal;
- }
-
- down_read(&EXT4_I(inode)->xattr_sem);
- if (!ext4_has_inline_data(inode)) {
- ret = 0;
- goto out_release_page;
- }
-
- if (!PageUptodate(page)) {
- ret = ext4_read_inline_page(inode, page);
- if (ret < 0)
- goto out_release_page;
- }
- ret = ext4_journal_get_write_access(handle, inode->i_sb, iloc.bh,
- EXT4_JTR_NONE);
- if (ret)
- goto out_release_page;
-
- up_read(&EXT4_I(inode)->xattr_sem);
- *pagep = page;
- brelse(iloc.bh);
- return 1;
-out_release_page:
- up_read(&EXT4_I(inode)->xattr_sem);
- unlock_page(page);
- put_page(page);
-out_journal:
- ext4_journal_stop(handle);
-out:
- brelse(iloc.bh);
- return ret;
-}
-
#ifdef INLINE_DIR_DEBUG
void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
void *inline_start, int inline_size)
@@ -1035,7 +968,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
int err;
struct ext4_dir_entry_2 *de;
- err = ext4_find_dest_de(dir, inode, iloc->bh, inline_start,
+ err = ext4_find_dest_de(dir, iloc->bh, inline_start,
inline_size, fname, &de);
if (err)
return err;
@@ -1060,7 +993,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
ext4_update_dx_flag(dir);
inode_inc_iversion(dir);
return 1;
@@ -1082,20 +1015,20 @@ static void *ext4_get_inline_xattr_pos(struct inode *inode,
}
/* Set the final de to cover the whole block. */
-static void ext4_update_final_de(void *de_buf, int old_size, int new_size)
+void ext4_update_final_de(void *de_buf, int old_size, int new_size)
{
struct ext4_dir_entry_2 *de, *prev_de;
void *limit;
int de_len;
- de = (struct ext4_dir_entry_2 *)de_buf;
+ de = de_buf;
if (old_size) {
limit = de_buf + old_size;
do {
prev_de = de;
de_len = ext4_rec_len_from_disk(de->rec_len, old_size);
de_buf += de_len;
- de = (struct ext4_dir_entry_2 *)de_buf;
+ de = de_buf;
} while (de_buf < limit);
prev_de->rec_len = ext4_rec_len_to_disk(de_len + new_size -
@@ -1133,55 +1066,19 @@ static void ext4_restore_inline_data(handle_t *handle, struct inode *inode,
struct ext4_iloc *iloc,
void *buf, int inline_size)
{
- ext4_create_inline_data(handle, inode, inline_size);
+ int ret;
+
+ ret = ext4_create_inline_data(handle, inode, inline_size);
+ if (ret) {
+ ext4_msg(inode->i_sb, KERN_EMERG,
+ "error restoring inline_data for inode -- potential data loss! (inode %lu, error %d)",
+ inode->i_ino, ret);
+ return;
+ }
ext4_write_inline_data(inode, iloc, buf, 0, inline_size);
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
}
-static int ext4_finish_convert_inline_dir(handle_t *handle,
- struct inode *inode,
- struct buffer_head *dir_block,
- void *buf,
- int inline_size)
-{
- int err, csum_size = 0, header_size = 0;
- struct ext4_dir_entry_2 *de;
- void *target = dir_block->b_data;
-
- /*
- * First create "." and ".." and then copy the dir information
- * back to the block.
- */
- de = (struct ext4_dir_entry_2 *)target;
- de = ext4_init_dot_dotdot(inode, de,
- inode->i_sb->s_blocksize, csum_size,
- le32_to_cpu(((struct ext4_dir_entry_2 *)buf)->inode), 1);
- header_size = (void *)de - target;
-
- memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
- inline_size - EXT4_INLINE_DOTDOT_SIZE);
-
- if (ext4_has_metadata_csum(inode->i_sb))
- csum_size = sizeof(struct ext4_dir_entry_tail);
-
- inode->i_size = inode->i_sb->s_blocksize;
- i_size_write(inode, inode->i_sb->s_blocksize);
- EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
- ext4_update_final_de(dir_block->b_data,
- inline_size - EXT4_INLINE_DOTDOT_SIZE + header_size,
- inode->i_sb->s_blocksize - csum_size);
-
- if (csum_size)
- ext4_initialize_dirent_tail(dir_block,
- inode->i_sb->s_blocksize);
- set_buffer_uptodate(dir_block);
- err = ext4_handle_dirty_dirblock(handle, inode, dir_block);
- if (err)
- return err;
- set_buffer_verified(dir_block);
- return ext4_mark_inode_dirty(handle, inode);
-}
-
static int ext4_convert_inline_data_nolock(handle_t *handle,
struct inode *inode,
struct ext4_iloc *iloc)
@@ -1249,14 +1146,23 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
if (!S_ISDIR(inode->i_mode)) {
memcpy(data_bh->b_data, buf, inline_size);
set_buffer_uptodate(data_bh);
+ unlock_buffer(data_bh);
error = ext4_handle_dirty_metadata(handle,
inode, data_bh);
} else {
- error = ext4_finish_convert_inline_dir(handle, inode, data_bh,
- buf, inline_size);
+ unlock_buffer(data_bh);
+ inode->i_size = inode->i_sb->s_blocksize;
+ i_size_write(inode, inode->i_sb->s_blocksize);
+ EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+
+ error = ext4_init_dirblock(handle, inode, data_bh,
+ le32_to_cpu(((struct ext4_dir_entry_2 *)buf)->inode),
+ buf + EXT4_INLINE_DOTDOT_SIZE,
+ inline_size - EXT4_INLINE_DOTDOT_SIZE);
+ if (!error)
+ error = ext4_mark_inode_dirty(handle, inode);
}
- unlock_buffer(data_bh);
out_restore:
if (error)
ext4_restore_inline_data(handle, inode, iloc, buf, inline_size);
@@ -1393,7 +1299,7 @@ int ext4_inlinedir_to_tree(struct file *dir_file,
if (pos == 0) {
fake.inode = cpu_to_le32(inode->i_ino);
fake.name_len = 1;
- strcpy(fake.name, ".");
+ memcpy(fake.name, ".", 2);
fake.rec_len = ext4_rec_len_to_disk(
ext4_dir_rec_len(fake.name_len, NULL),
inline_size);
@@ -1403,7 +1309,7 @@ int ext4_inlinedir_to_tree(struct file *dir_file,
} else if (pos == EXT4_INLINE_DOTDOT_OFFSET) {
fake.inode = cpu_to_le32(parent_ino);
fake.name_len = 2;
- strcpy(fake.name, "..");
+ memcpy(fake.name, "..", 3);
fake.rec_len = ext4_rec_len_to_disk(
ext4_dir_rec_len(fake.name_len, NULL),
inline_size);
@@ -1425,7 +1331,11 @@ int ext4_inlinedir_to_tree(struct file *dir_file,
hinfo->hash = EXT4_DIRENT_HASH(de);
hinfo->minor_hash = EXT4_DIRENT_MINOR_HASH(de);
} else {
- ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
+ err = ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
+ if (err) {
+ ret = err;
+ goto out;
+ }
}
if ((hinfo->hash < start_hash) ||
((hinfo->hash == start_hash) &&
@@ -1471,6 +1381,7 @@ int ext4_read_inline_dir(struct file *file,
struct ext4_iloc iloc;
void *dir_buf = NULL;
int dotdot_offset, dotdot_size, extra_offset, extra_size;
+ struct dir_private_info *info = file->private_data;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
@@ -1514,12 +1425,12 @@ int ext4_read_inline_dir(struct file *file,
extra_size = extra_offset + inline_size;
/*
- * If the version has changed since the last call to
+ * If the cookie has changed since the last call to
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the inline
* dir to make sure.
*/
- if (!inode_eq_iversion(inode, file->f_version)) {
+ if (!inode_eq_iversion(inode, info->cookie)) {
for (i = 0; i < extra_size && i < offset;) {
/*
* "." is with offset 0 and
@@ -1551,7 +1462,7 @@ int ext4_read_inline_dir(struct file *file,
}
offset = i;
ctx->pos = offset;
- file->f_version = inode_query_iversion(inode);
+ info->cookie = inode_query_iversion(inode);
}
while (ctx->pos < extra_size) {
@@ -1588,6 +1499,35 @@ out:
return ret;
}
+void *ext4_read_inline_link(struct inode *inode)
+{
+ struct ext4_iloc iloc;
+ int ret, inline_size;
+ void *link;
+
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = -ENOMEM;
+ inline_size = ext4_get_inline_size(inode);
+ link = kmalloc(inline_size + 1, GFP_NOFS);
+ if (!link)
+ goto out;
+
+ ret = ext4_read_inline_data(inode, link, inline_size, &iloc);
+ if (ret < 0) {
+ kfree(link);
+ goto out;
+ }
+ nd_terminate_link(link, inode->i_size, ret);
+out:
+ if (ret < 0)
+ link = ERR_PTR(ret);
+ brelse(iloc.bh);
+ return link;
+}
+
struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
struct ext4_dir_entry_2 **parent_de,
int *retval)
@@ -1646,24 +1586,36 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
struct ext4_dir_entry_2 **res_dir,
int *has_inline_data)
{
+ struct ext4_xattr_ibody_find is = {
+ .s = { .not_found = -ENODATA, },
+ };
+ struct ext4_xattr_info i = {
+ .name_index = EXT4_XATTR_INDEX_SYSTEM,
+ .name = EXT4_XATTR_SYSTEM_DATA,
+ };
int ret;
- struct ext4_iloc iloc;
void *inline_start;
int inline_size;
- if (ext4_get_inode_loc(dir, &iloc))
- return NULL;
+ ret = ext4_get_inode_loc(dir, &is.iloc);
+ if (ret)
+ return ERR_PTR(ret);
down_read(&EXT4_I(dir)->xattr_sem);
+
+ ret = ext4_xattr_ibody_find(dir, &i, &is);
+ if (ret)
+ goto out;
+
if (!ext4_has_inline_data(dir)) {
*has_inline_data = 0;
goto out;
}
- inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
+ inline_start = (void *)ext4_raw_inode(&is.iloc)->i_block +
EXT4_INLINE_DOTDOT_SIZE;
inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
- ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
+ ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size,
dir, fname, 0, res_dir);
if (ret == 1)
goto out_find;
@@ -1673,20 +1625,23 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
if (ext4_get_inline_size(dir) == EXT4_MIN_INLINE_DATA_SIZE)
goto out;
- inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
+ inline_start = ext4_get_inline_xattr_pos(dir, &is.iloc);
inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
- ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
+ ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size,
dir, fname, 0, res_dir);
if (ret == 1)
goto out_find;
out:
- brelse(iloc.bh);
- iloc.bh = NULL;
+ brelse(is.iloc.bh);
+ if (ret < 0)
+ is.iloc.bh = ERR_PTR(ret);
+ else
+ is.iloc.bh = NULL;
out_find:
up_read(&EXT4_I(dir)->xattr_sem);
- return iloc.bh;
+ return is.iloc.bh;
}
int ext4_delete_inline_entry(handle_t *handle,
@@ -1780,19 +1735,20 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
void *inline_pos;
unsigned int offset;
struct ext4_dir_entry_2 *de;
- bool ret = true;
+ bool ret = false;
err = ext4_get_inode_loc(dir, &iloc);
if (err) {
EXT4_ERROR_INODE_ERR(dir, -err,
"error %d getting inode %lu block",
err, dir->i_ino);
- return true;
+ return false;
}
down_read(&EXT4_I(dir)->xattr_sem);
if (!ext4_has_inline_data(dir)) {
*has_inline_data = 0;
+ ret = true;
goto out;
}
@@ -1801,7 +1757,6 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
ext4_warning(dir->i_sb,
"bad inline directory (dir #%lu) - no `..'",
dir->i_ino);
- ret = true;
goto out;
}
@@ -1820,16 +1775,15 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
dir->i_ino, le32_to_cpu(de->inode),
le16_to_cpu(de->rec_len), de->name_len,
inline_size);
- ret = true;
goto out;
}
if (le32_to_cpu(de->inode)) {
- ret = false;
goto out;
}
offset += ext4_rec_len_from_disk(de->rec_len, inline_size);
}
+ ret = true;
out:
up_read(&EXT4_I(dir)->xattr_sem);
brelse(iloc.bh);
@@ -1894,7 +1848,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
};
- needed_blocks = ext4_writepage_trans_blocks(inode);
+ needed_blocks = ext4_chunk_trans_extent(inode, 1);
handle = ext4_journal_start(inode, EXT4_HT_INODE, needed_blocks);
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -1925,24 +1879,20 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
* the extent status cache must be cleared to avoid leaving
* behind stale delayed allocated extent entries
*/
- if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
-retry:
- err = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
- if (err == -ENOMEM) {
- cond_resched();
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- goto retry;
- }
- if (err)
- goto out_error;
- }
+ if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
+ ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
/* Clear the content in the xattr space. */
if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) {
if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0)
goto out_error;
- BUG_ON(is.s.not_found);
+ if (is.s.not_found) {
+ EXT4_ERROR_INODE(inode,
+ "missing inline data xattr");
+ err = -EFSCORRUPTED;
+ goto out_error;
+ }
value_len = le32_to_cpu(is.s.here->e_value_size);
value = kmalloc(value_len, GFP_NOFS);
@@ -1986,7 +1936,7 @@ out:
ext4_orphan_del(handle, inode);
if (err == 0) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
err = ext4_mark_inode_dirty(handle, inode);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
@@ -2004,9 +1954,21 @@ int ext4_convert_inline_data(struct inode *inode)
if (!ext4_has_inline_data(inode)) {
ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
return 0;
+ } else if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+ /*
+ * Inode has inline data but EXT4_STATE_MAY_INLINE_DATA is
+ * cleared. This means we are in the middle of moving of
+ * inline data to delay allocated block. Just force writeout
+ * here to finish conversion.
+ */
+ error = filemap_flush(inode->i_mapping);
+ if (error)
+ return error;
+ if (!ext4_has_inline_data(inode))
+ return 0;
}
- needed_blocks = ext4_writepage_trans_blocks(inode);
+ needed_blocks = ext4_chunk_trans_extent(inode, 1);
iloc.bh = NULL;
error = ext4_get_inode_loc(inode, &iloc);
diff --git a/fs/ext4/inode-test.c b/fs/ext4/inode-test.c
index 7935ea6cf92c..749af7ad4e09 100644
--- a/fs/ext4/inode-test.c
+++ b/fs/ext4/inode-test.c
@@ -245,9 +245,9 @@ static void inode_test_xtimestamp_decoding(struct kunit *test)
struct timestamp_expectation *test_param =
(struct timestamp_expectation *)(test->param_value);
- timestamp.tv_sec = get_32bit_time(test_param);
- ext4_decode_extra_time(&timestamp,
- cpu_to_le32(test_param->extra_bits));
+ timestamp = ext4_decode_extra_time(
+ cpu_to_le32(get_32bit_time(test_param)),
+ cpu_to_le32(test_param->extra_bits));
KUNIT_EXPECT_EQ_MSG(test,
test_param->expected.tv_sec,
@@ -279,4 +279,5 @@ static struct kunit_suite ext4_inode_test_suite = {
kunit_test_suites(&ext4_inode_test_suite);
+MODULE_DESCRIPTION("KUnit test of ext4 inode timestamp decoding");
MODULE_LICENSE("GPL v2");
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index bfd3545f1e5d..0c466ccbed69 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -31,6 +31,7 @@
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/mpage.h>
+#include <linux/rmap.h>
#include <linux/namei.h>
#include <linux/uio.h>
#include <linux/bio.h>
@@ -49,32 +50,35 @@
#include <trace/events/ext4.h>
+static void ext4_journalled_zero_new_buffers(handle_t *handle,
+ struct inode *inode,
+ struct folio *folio,
+ unsigned from, unsigned to);
+
static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
__u16 dummy_csum = 0;
int offset = offsetof(struct ext4_inode, i_checksum_lo);
unsigned int csum_size = sizeof(dummy_csum);
- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
- csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
+ csum = ext4_chksum(ei->i_csum_seed, (__u8 *)raw, offset);
+ csum = ext4_chksum(csum, (__u8 *)&dummy_csum, csum_size);
offset += csum_size;
- csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ csum = ext4_chksum(csum, (__u8 *)raw + offset,
EXT4_GOOD_OLD_INODE_SIZE - offset);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
offset = offsetof(struct ext4_inode, i_checksum_hi);
- csum = ext4_chksum(sbi, csum, (__u8 *)raw +
- EXT4_GOOD_OLD_INODE_SIZE,
+ csum = ext4_chksum(csum, (__u8 *)raw + EXT4_GOOD_OLD_INODE_SIZE,
offset - EXT4_GOOD_OLD_INODE_SIZE);
if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
- csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
+ csum = ext4_chksum(csum, (__u8 *)&dummy_csum,
csum_size);
offset += csum_size;
}
- csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ csum = ext4_chksum(csum, (__u8 *)raw + offset,
EXT4_INODE_SIZE(inode->i_sb) - offset);
}
@@ -88,7 +92,7 @@ static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_LINUX) ||
- !ext4_has_metadata_csum(inode->i_sb))
+ !ext4_has_feature_metadata_csum(inode->i_sb))
return 1;
provided = le16_to_cpu(raw->i_checksum_lo);
@@ -109,7 +113,7 @@ void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_LINUX) ||
- !ext4_has_metadata_csum(inode->i_sb))
+ !ext4_has_feature_metadata_csum(inode->i_sb))
return;
csum = ext4_inode_csum(inode, raw, ei);
@@ -136,19 +140,13 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
new_size);
}
-static void ext4_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
-static int __ext4_journalled_writepage(struct page *page, unsigned int len);
-static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
- int pextents);
-
/*
* Test whether an inode is a fast symlink.
* A fast symlink has its symlink data stored in ext4_inode_info->i_data.
*/
int ext4_inode_is_fast_symlink(struct inode *inode)
{
- if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+ if (!ext4_has_feature_ea_inode(inode->i_sb)) {
int ea_blocks = EXT4_I(inode)->i_file_acl ?
EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
@@ -179,35 +177,11 @@ void ext4_evict_inode(struct inode *inode)
trace_ext4_evict_inode(inode);
+ dax_break_layout_final(inode);
+
+ if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
+ ext4_evict_ea_inode(inode);
if (inode->i_nlink) {
- /*
- * When journalling data dirty buffers are tracked only in the
- * journal. So although mm thinks everything is clean and
- * ready for reaping the inode might still have some pages to
- * write in the running transaction or waiting to be
- * checkpointed. Thus calling jbd2_journal_invalidatepage()
- * (via truncate_inode_pages()) to discard these buffers can
- * cause data loss. Also even if we did not discard these
- * buffers, we would have no way to find them after the inode
- * is reaped and thus user could see stale data if he tries to
- * read them before the transaction is checkpointed. So be
- * careful and force everything to disk here... We use
- * ei->i_datasync_tid to store the newest transaction
- * containing inode's data.
- *
- * Note that directories do not have this problem because they
- * don't use page cache.
- */
- if (inode->i_ino != EXT4_JOURNAL_INO &&
- ext4_should_journal_data(inode) &&
- (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
- inode->i_data.nrpages) {
- journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
- tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
-
- jbd2_complete_transaction(journal, commit_tid);
- filemap_write_and_wait(&inode->i_data);
- }
truncate_inode_pages_final(&inode->i_data);
goto no_delete;
@@ -223,13 +197,12 @@ void ext4_evict_inode(struct inode *inode)
/*
* For inodes with journalled data, transaction commit could have
- * dirtied the inode. Flush worker is ignoring it because of I_FREEING
- * flag but we still need to remove the inode from the writeback lists.
+ * dirtied the inode. And for inodes with dioread_nolock, unwritten
+ * extents converting worker could merge extents and also have dirtied
+ * the inode. Flush worker is ignoring it because of I_FREEING flag but
+ * we still need to remove the inode from the writeback lists.
*/
- if (!list_empty_careful(&inode->i_io_list)) {
- WARN_ON_ONCE(!ext4_should_journal_data(inode));
- inode_io_list_del(inode);
- }
+ inode_io_list_del(inode);
/*
* Protect us against freezing - iput() caller didn't have to have any
@@ -336,8 +309,14 @@ stop_handle:
ext4_xattr_inode_array_free(ea_inode_array);
return;
no_delete:
+ /*
+ * Check out some where else accidentally dirty the evicting inode,
+ * which may probably cause inode use-after-free issues later.
+ */
+ WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list));
+
if (!list_empty(&EXT4_I(inode)->i_fc_list))
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
}
@@ -394,17 +373,18 @@ void ext4_da_update_reserve_space(struct inode *inode,
*/
if ((ei->i_reserved_data_blocks == 0) &&
!inode_is_open_for_write(inode))
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
}
static int __check_block_validity(struct inode *inode, const char *func,
unsigned int line,
struct ext4_map_blocks *map)
{
- if (ext4_has_feature_journal(inode->i_sb) &&
- (inode->i_ino ==
- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+
+ if (journal && inode == journal->j_inode)
return 0;
+
if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
ext4_error_inode(inode, func, line, map->m_pblk,
"lblock %lu mapped to illegal pblock %llu "
@@ -430,6 +410,32 @@ int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
return ret;
}
+/*
+ * For generic regular files, when updating the extent tree, Ext4 should
+ * hold the i_rwsem and invalidate_lock exclusively. This ensures
+ * exclusion against concurrent page faults, as well as reads and writes.
+ */
+#ifdef CONFIG_EXT4_DEBUG
+void ext4_check_map_extents_env(struct inode *inode)
+{
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return;
+
+ if (!S_ISREG(inode->i_mode) ||
+ IS_NOQUOTA(inode) || IS_VERITY(inode) ||
+ is_special_ino(inode->i_sb, inode->i_ino) ||
+ (inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) ||
+ ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE) ||
+ ext4_verity_in_progress(inode))
+ return;
+
+ WARN_ON_ONCE(!inode_is_locked(inode) &&
+ !rwsem_is_locked(&inode->i_mapping->invalidate_lock));
+}
+#else
+void ext4_check_map_extents_env(struct inode *inode) {}
+#endif
+
#define check_block_validity(inode, map) \
__check_block_validity((inode), __func__, __LINE__, (map))
@@ -476,6 +482,191 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
}
#endif /* ES_AGGRESSIVE_TEST */
+static int ext4_map_query_blocks_next_in_leaf(handle_t *handle,
+ struct inode *inode, struct ext4_map_blocks *map,
+ unsigned int orig_mlen)
+{
+ struct ext4_map_blocks map2;
+ unsigned int status, status2;
+ int retval;
+
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+
+ WARN_ON_ONCE(!(map->m_flags & EXT4_MAP_QUERY_LAST_IN_LEAF));
+ WARN_ON_ONCE(orig_mlen <= map->m_len);
+
+ /* Prepare map2 for lookup in next leaf block */
+ map2.m_lblk = map->m_lblk + map->m_len;
+ map2.m_len = orig_mlen - map->m_len;
+ map2.m_flags = 0;
+ retval = ext4_ext_map_blocks(handle, inode, &map2, 0);
+
+ if (retval <= 0) {
+ ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+ map->m_pblk, status, false);
+ return map->m_len;
+ }
+
+ if (unlikely(retval != map2.m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map2.m_len);
+ WARN_ON(1);
+ }
+
+ status2 = map2.m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+
+ /*
+ * If map2 is contiguous with map, then let's insert it as a single
+ * extent in es cache and return the combined length of both the maps.
+ */
+ if (map->m_pblk + map->m_len == map2.m_pblk &&
+ status == status2) {
+ ext4_es_insert_extent(inode, map->m_lblk,
+ map->m_len + map2.m_len, map->m_pblk,
+ status, false);
+ map->m_len += map2.m_len;
+ } else {
+ ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+ map->m_pblk, status, false);
+ }
+
+ return map->m_len;
+}
+
+static int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map, int flags)
+{
+ unsigned int status;
+ int retval;
+ unsigned int orig_mlen = map->m_len;
+
+ flags &= EXT4_EX_QUERY_FILTER;
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ retval = ext4_ext_map_blocks(handle, inode, map, flags);
+ else
+ retval = ext4_ind_map_blocks(handle, inode, map, flags);
+ if (retval < 0)
+ return retval;
+
+ /* A hole? */
+ if (retval == 0)
+ goto out;
+
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
+ }
+
+ /*
+ * No need to query next in leaf:
+ * - if returned extent is not last in leaf or
+ * - if the last in leaf is the full requested range
+ */
+ if (!(map->m_flags & EXT4_MAP_QUERY_LAST_IN_LEAF) ||
+ map->m_len == orig_mlen) {
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+ map->m_pblk, status, false);
+ } else {
+ retval = ext4_map_query_blocks_next_in_leaf(handle, inode, map,
+ orig_mlen);
+ }
+out:
+ map->m_seq = READ_ONCE(EXT4_I(inode)->i_es_seq);
+ return retval;
+}
+
+static int ext4_map_create_blocks(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map, int flags)
+{
+ struct extent_status es;
+ unsigned int status;
+ int err, retval = 0;
+
+ /*
+ * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE
+ * indicates that the blocks and quotas has already been
+ * checked when the data was copied into the page cache.
+ */
+ if (map->m_flags & EXT4_MAP_DELAYED)
+ flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
+
+ /*
+ * Here we clear m_flags because after allocating an new extent,
+ * it will be set again.
+ */
+ map->m_flags &= ~EXT4_MAP_FLAGS;
+
+ /*
+ * We need to check for EXT4 here because migrate could have
+ * changed the inode type in between.
+ */
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ retval = ext4_ext_map_blocks(handle, inode, map, flags);
+ } else {
+ retval = ext4_ind_map_blocks(handle, inode, map, flags);
+
+ /*
+ * We allocated new blocks which will result in i_data's
+ * format changing. Force the migrate to fail by clearing
+ * migrate flags.
+ */
+ if (retval > 0 && map->m_flags & EXT4_MAP_NEW)
+ ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
+ }
+ if (retval <= 0)
+ return retval;
+
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode %lu: "
+ "retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
+ }
+
+ /*
+ * We have to zeroout blocks before inserting them into extent
+ * status tree. Otherwise someone could look them up there and
+ * use them before they are really zeroed. We also have to
+ * unmap metadata before zeroing as otherwise writeback can
+ * overwrite zeros with stale data from block device.
+ */
+ if (flags & EXT4_GET_BLOCKS_ZERO &&
+ map->m_flags & EXT4_MAP_MAPPED && map->m_flags & EXT4_MAP_NEW) {
+ err = ext4_issue_zeroout(inode, map->m_lblk, map->m_pblk,
+ map->m_len);
+ if (err)
+ return err;
+ }
+
+ /*
+ * If the extent has been zeroed out, we don't need to update
+ * extent status tree.
+ */
+ if (flags & EXT4_GET_BLOCKS_SPLIT_NOMERGE &&
+ ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, &map->m_seq)) {
+ if (ext4_es_is_written(&es))
+ return retval;
+ }
+
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk,
+ status, flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE);
+ map->m_seq = READ_ONCE(EXT4_I(inode)->i_es_seq);
+
+ return retval;
+}
+
/*
* The ext4_map_blocks() function tries to look up the requested blocks,
* and returns if the blocks are already mapped.
@@ -488,9 +679,10 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
* Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
* based files
*
- * On success, it returns the number of blocks being mapped or allocated. if
- * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
- * is marked as unwritten. If the create == 1, it will mark @map as mapped.
+ * On success, it returns the number of blocks being mapped or allocated.
+ * If flags doesn't contain EXT4_GET_BLOCKS_CREATE the blocks are
+ * pre-allocated and unwritten, the resulting @map is marked as unwritten.
+ * If the flags contain EXT4_GET_BLOCKS_CREATE, it will mark @map as mapped.
*
* It returns 0 if plain look up failed (blocks have not been allocated), in
* that case, @map is returned as unmapped but we still do fill map->m_len to
@@ -504,6 +696,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct extent_status es;
int retval;
int ret = 0;
+ unsigned int orig_mlen = map->m_len;
#ifdef ES_AGGRESSIVE_TEST
struct ext4_map_blocks orig_map;
@@ -524,9 +717,18 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
return -EFSCORRUPTED;
+ /*
+ * Callers from the context of data submission are the only exceptions
+ * for regular files that do not hold the i_rwsem or invalidate_lock.
+ * However, caching unrelated ranges is not permitted.
+ */
+ if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
+ WARN_ON_ONCE(!(flags & EXT4_EX_NOCACHE));
+ else
+ ext4_check_map_extents_env(inode);
+
/* Lookup extent status tree firstly */
- if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
- ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
+ if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, &map->m_seq)) {
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
map->m_pblk = ext4_es_pblock(&es) +
map->m_lblk - es.es_lblk;
@@ -538,6 +740,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
map->m_len = retval;
} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
map->m_pblk = 0;
+ map->m_flags |= ext4_es_is_delayed(&es) ?
+ EXT4_MAP_DELAYED : 0;
retval = es.es_len - (map->m_lblk - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
@@ -546,46 +750,32 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
} else {
BUG();
}
+
+ if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
+ return retval;
#ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(handle, inode, map,
&orig_map, flags);
#endif
- goto found;
+ if (!(flags & EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF) ||
+ orig_mlen == map->m_len)
+ goto found;
+
+ map->m_len = orig_mlen;
}
+ /*
+ * In the query cache no-wait mode, nothing we can do more if we
+ * cannot find extent in the cache.
+ */
+ if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
+ return 0;
/*
* Try to see if we can get the block without requesting a new
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- retval = ext4_ext_map_blocks(handle, inode, map, 0);
- } else {
- retval = ext4_ind_map_blocks(handle, inode, map, 0);
- }
- if (retval > 0) {
- unsigned int status;
-
- if (unlikely(retval != map->m_len)) {
- ext4_warning(inode->i_sb,
- "ES len assertion failed for inode "
- "%lu: retval %d != map->m_len %d",
- inode->i_ino, retval, map->m_len);
- WARN_ON(1);
- }
-
- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
- if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
- !(status & EXTENT_STATUS_WRITTEN) &&
- ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
- map->m_lblk + map->m_len - 1))
- status |= EXTENT_STATUS_DELAYED;
- ret = ext4_es_insert_extent(inode, map->m_lblk,
- map->m_len, map->m_pblk, status);
- if (ret < 0)
- retval = ret;
- }
+ retval = ext4_map_query_blocks(handle, inode, map, flags);
up_read((&EXT4_I(inode)->i_data_sem));
found:
@@ -603,8 +793,7 @@ found:
* Returns if the blocks have already allocated
*
* Note that if blocks have been preallocated
- * ext4_ext_get_block() returns the create = 0
- * with buffer head unmapped.
+ * ext4_ext_map_blocks() returns with buffer head unmapped
*/
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
/*
@@ -615,12 +804,8 @@ found:
if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
return retval;
- /*
- * Here we clear m_flags because after allocating an new extent,
- * it will be set again.
- */
- map->m_flags &= ~EXT4_MAP_FLAGS;
+ ext4_fc_track_inode(handle, inode);
/*
* New blocks allocate and/or writing to unwritten extent
* will possibly result in updating i_data, so we take
@@ -628,92 +813,15 @@ found:
* with create == 1 flag.
*/
down_write(&EXT4_I(inode)->i_data_sem);
+ retval = ext4_map_create_blocks(handle, inode, map, flags);
+ up_write((&EXT4_I(inode)->i_data_sem));
- /*
- * We need to check for EXT4 here because migrate
- * could have changed the inode type in between
- */
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- retval = ext4_ext_map_blocks(handle, inode, map, flags);
- } else {
- retval = ext4_ind_map_blocks(handle, inode, map, flags);
-
- if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
- /*
- * We allocated new blocks which will result in
- * i_data's format changing. Force the migrate
- * to fail by clearing migrate flags
- */
- ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
- }
-
- /*
- * Update reserved blocks/metadata blocks after successful
- * block allocation which had been deferred till now. We don't
- * support fallocate for non extent files. So we can update
- * reserve space here.
- */
- if ((retval > 0) &&
- (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
- ext4_da_update_reserve_space(inode, retval, 1);
- }
-
- if (retval > 0) {
- unsigned int status;
-
- if (unlikely(retval != map->m_len)) {
- ext4_warning(inode->i_sb,
- "ES len assertion failed for inode "
- "%lu: retval %d != map->m_len %d",
- inode->i_ino, retval, map->m_len);
- WARN_ON(1);
- }
-
- /*
- * We have to zeroout blocks before inserting them into extent
- * status tree. Otherwise someone could look them up there and
- * use them before they are really zeroed. We also have to
- * unmap metadata before zeroing as otherwise writeback can
- * overwrite zeros with stale data from block device.
- */
- if (flags & EXT4_GET_BLOCKS_ZERO &&
- map->m_flags & EXT4_MAP_MAPPED &&
- map->m_flags & EXT4_MAP_NEW) {
- ret = ext4_issue_zeroout(inode, map->m_lblk,
- map->m_pblk, map->m_len);
- if (ret) {
- retval = ret;
- goto out_sem;
- }
- }
-
- /*
- * If the extent has been zeroed out, we don't need to update
- * extent status tree.
- */
- if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
- ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
- if (ext4_es_is_written(&es))
- goto out_sem;
- }
- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
- if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
- !(status & EXTENT_STATUS_WRITTEN) &&
- ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
- map->m_lblk + map->m_len - 1))
- status |= EXTENT_STATUS_DELAYED;
- ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
- map->m_pblk, status);
- if (ret < 0) {
- retval = ret;
- goto out_sem;
- }
- }
+ if (retval < 0)
+ ext_debug(inode, "failed with err %d\n", retval);
+ if (retval <= 0)
+ return retval;
-out_sem:
- up_write((&EXT4_I(inode)->i_data_sem));
- if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
+ if (map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
@@ -728,9 +836,8 @@ out_sem:
!(flags & EXT4_GET_BLOCKS_ZERO) &&
!ext4_is_quota_file(inode) &&
ext4_should_order_data(inode)) {
- loff_t start_byte =
- (loff_t)map->m_lblk << inode->i_blkbits;
- loff_t length = (loff_t)map->m_len << inode->i_blkbits;
+ loff_t start_byte = EXT4_LBLK_TO_B(inode, map->m_lblk);
+ loff_t length = EXT4_LBLK_TO_B(inode, map->m_len);
if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
ret = ext4_jbd2_inode_add_wait(handle, inode,
@@ -741,12 +848,9 @@ out_sem:
if (ret)
return ret;
}
- ext4_fc_track_range(handle, inode, map->m_lblk,
- map->m_lblk + map->m_len - 1);
}
-
- if (retval < 0)
- ext_debug(inode, "failed with err %d\n", retval);
+ ext4_fc_track_range(handle, inode, map->m_lblk, map->m_lblk +
+ map->m_len - 1);
return retval;
}
@@ -762,7 +866,7 @@ static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
flags &= EXT4_MAP_FLAGS;
/* Dummy buffer_head? Set non-atomically. */
- if (!bh->b_page) {
+ if (!bh->b_folio) {
bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
return;
}
@@ -771,11 +875,30 @@ static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
* once we get rid of using bh as a container for mapping information
* to pass to / from get_block functions, this can go away.
*/
+ old_state = READ_ONCE(bh->b_state);
do {
- old_state = READ_ONCE(bh->b_state);
new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
- } while (unlikely(
- cmpxchg(&bh->b_state, old_state, new_state) != old_state));
+ } while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state)));
+}
+
+/*
+ * Make sure that the current journal transaction has enough credits to map
+ * one extent. Return -EAGAIN if it cannot extend the current running
+ * transaction.
+ */
+static inline int ext4_journal_ensure_extent_credits(handle_t *handle,
+ struct inode *inode)
+{
+ int credits;
+ int ret;
+
+ /* Called from ext4_da_write_begin() which has no handle started? */
+ if (!handle)
+ return 0;
+
+ credits = ext4_chunk_trans_blocks(inode, 1);
+ ret = __ext4_journal_ensure_credits(handle, credits, credits, 0);
+ return ret <= 0 ? ret : -EAGAIN;
}
static int _ext4_get_block(struct inode *inode, sector_t iblock,
@@ -819,10 +942,22 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
+ int ret = 0;
+
ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
inode->i_ino, create);
- return _ext4_get_block(inode, iblock, bh_result,
- EXT4_GET_BLOCKS_IO_CREATE_EXT);
+ ret = _ext4_get_block(inode, iblock, bh_result,
+ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
+
+ /*
+ * If the buffer is marked unwritten, mark it as new to make sure it is
+ * zeroed out correctly in case of partial writes. Otherwise, there is
+ * a chance of stale data getting exposed.
+ */
+ if (ret == 0 && buffer_unwritten(bh_result))
+ set_buffer_new(bh_result);
+
+ return ret;
}
/* Maximum number of blocks we map for direct IO at once. */
@@ -837,10 +972,12 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
struct ext4_map_blocks map;
struct buffer_head *bh;
int create = map_flags & EXT4_GET_BLOCKS_CREATE;
+ bool nowait = map_flags & EXT4_GET_BLOCKS_CACHED_NOWAIT;
int err;
ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
|| handle != NULL || create == 0);
+ ASSERT(create == 0 || !nowait);
map.m_lblk = block;
map.m_len = 1;
@@ -851,7 +988,17 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
if (err < 0)
return ERR_PTR(err);
- bh = sb_getblk(inode->i_sb, map.m_pblk);
+ if (nowait)
+ return sb_find_get_block(inode->i_sb, map.m_pblk);
+
+ /*
+ * Since bh could introduce extra ref count such as referred by
+ * journal_head etc. Try to avoid using __GFP_MOVABLE here
+ * as it may fail the migration when journal_head remains.
+ */
+ bh = getblk_unmovable(inode->i_sb->s_bdev, map.m_pblk,
+ inode->i_sb->s_blocksize);
+
if (unlikely(!bh))
return ERR_PTR(-ENOMEM);
if (map.m_flags & EXT4_MAP_NEW) {
@@ -986,112 +1133,103 @@ int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
}
/*
- * To preserve ordering, it is essential that the hole instantiation and
- * the data write be encapsulated in a single transaction. We cannot
- * close off a transaction and start a new one between the ext4_get_block()
- * and the commit_write(). So doing the jbd2_journal_start at the start of
- * prepare_write() is the right place.
- *
- * Also, this function can nest inside ext4_writepage(). In that case, we
- * *know* that ext4_writepage() has generated enough buffer credits to do the
- * whole page. So we won't block on the journal in that case, which is good,
- * because the caller may be PF_MEMALLOC.
- *
- * By accident, ext4 can be reentered when a transaction is open via
- * quota file writes. If we were to commit the transaction while thus
- * reentered, there can be a deadlock - we would be holding a quota
- * lock, and the commit would never complete if another thread had a
- * transaction open and was blocking on the quota lock - a ranking
- * violation.
- *
- * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
- * will _not_ run commit under these circumstances because handle->h_ref
- * is elevated. We'll still have enough credits for the tiny quotafile
- * write.
+ * Helper for handling dirtying of journalled data. We also mark the folio as
+ * dirty so that writeback code knows about this page (and inode) contains
+ * dirty data. ext4_writepages() then commits appropriate transaction to
+ * make data stable.
*/
+static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
+{
+ struct folio *folio = bh->b_folio;
+ struct inode *inode = folio->mapping->host;
+
+ /* only regular files have a_ops */
+ if (S_ISREG(inode->i_mode))
+ folio_mark_dirty(folio);
+ return ext4_handle_dirty_metadata(handle, NULL, bh);
+}
+
int do_journal_get_write_access(handle_t *handle, struct inode *inode,
struct buffer_head *bh)
{
- int dirty = buffer_dirty(bh);
- int ret;
-
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
- /*
- * __block_write_begin() could have dirtied some buffers. Clean
- * the dirty bit as jbd2_journal_get_write_access() could complain
- * otherwise about fs integrity issues. Setting of the dirty bit
- * by __block_write_begin() isn't a real problem here as we clear
- * the bit before releasing a page lock and thus writeback cannot
- * ever write the buffer.
- */
- if (dirty)
- clear_buffer_dirty(bh);
BUFFER_TRACE(bh, "get write access");
- ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
+ return ext4_journal_get_write_access(handle, inode->i_sb, bh,
EXT4_JTR_NONE);
- if (!ret && dirty)
- ret = ext4_handle_dirty_metadata(handle, NULL, bh);
- return ret;
}
-#ifdef CONFIG_FS_ENCRYPTION
-static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
- get_block_t *get_block)
+int ext4_block_write_begin(handle_t *handle, struct folio *folio,
+ loff_t pos, unsigned len,
+ get_block_t *get_block)
{
- unsigned from = pos & (PAGE_SIZE - 1);
+ unsigned int from = offset_in_folio(folio, pos);
unsigned to = from + len;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
- unsigned blocksize = inode->i_sb->s_blocksize;
- unsigned bbits;
+ unsigned int blocksize = i_blocksize(inode);
struct buffer_head *bh, *head, *wait[2];
int nr_wait = 0;
int i;
+ bool should_journal_data = ext4_should_journal_data(inode);
- BUG_ON(!PageLocked(page));
- BUG_ON(from > PAGE_SIZE);
- BUG_ON(to > PAGE_SIZE);
+ BUG_ON(!folio_test_locked(folio));
+ BUG_ON(to > folio_size(folio));
BUG_ON(from > to);
+ WARN_ON_ONCE(blocksize > folio_size(folio));
- if (!page_has_buffers(page))
- create_empty_buffers(page, blocksize, 0);
- head = page_buffers(page);
- bbits = ilog2(blocksize);
- block = (sector_t)page->index << (PAGE_SHIFT - bbits);
+ head = folio_buffers(folio);
+ if (!head)
+ head = create_empty_buffers(folio, blocksize, 0);
+ block = EXT4_PG_TO_LBLK(inode, folio->index);
for (bh = head, block_start = 0; bh != head || !block_start;
block++, block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
set_buffer_uptodate(bh);
}
continue;
}
- if (buffer_new(bh))
+ if (WARN_ON_ONCE(buffer_new(bh)))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
- err = get_block(inode, block, bh, 1);
+ err = ext4_journal_ensure_extent_credits(handle, inode);
+ if (!err)
+ err = get_block(inode, block, bh, 1);
if (err)
break;
if (buffer_new(bh)) {
- if (PageUptodate(page)) {
- clear_buffer_new(bh);
+ /*
+ * We may be zeroing partial buffers or all new
+ * buffers in case of failure. Prepare JBD2 for
+ * that.
+ */
+ if (should_journal_data)
+ do_journal_get_write_access(handle,
+ inode, bh);
+ if (folio_test_uptodate(folio)) {
+ /*
+ * Unlike __block_write_begin() we leave
+ * dirtying of new uptodate buffers to
+ * ->write_end() time or
+ * folio_zero_new_buffers().
+ */
set_buffer_uptodate(bh);
- mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from)
- zero_user_segments(page, to, block_end,
- block_start, from);
+ folio_zero_segments(folio, to,
+ block_end,
+ block_start, from);
continue;
}
}
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
set_buffer_uptodate(bh);
continue;
}
@@ -1111,13 +1249,17 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
err = -EIO;
}
if (unlikely(err)) {
- page_zero_new_buffers(page, from, to);
+ if (should_journal_data)
+ ext4_journalled_zero_new_buffers(handle, inode, folio,
+ from, to);
+ else
+ folio_zero_new_buffers(folio, from, to);
} else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
for (i = 0; i < nr_wait; i++) {
int err2;
- err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
- bh_offset(wait[i]));
+ err2 = fscrypt_decrypt_pagecache_blocks(folio,
+ blocksize, bh_offset(wait[i]));
if (err2) {
clear_buffer_uptodate(wait[i]);
err = err2;
@@ -1127,36 +1269,43 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
return err;
}
-#endif
-static int ext4_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+/*
+ * To preserve ordering, it is essential that the hole instantiation and
+ * the data write be encapsulated in a single transaction. We cannot
+ * close off a transaction and start a new one between the ext4_get_block()
+ * and the ext4_write_end(). So doing the jbd2_journal_start at the start of
+ * ext4_write_begin() is the right place.
+ */
+static int ext4_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
struct inode *inode = mapping->host;
int ret, needed_blocks;
handle_t *handle;
int retries = 0;
- struct page *page;
+ struct folio *folio;
pgoff_t index;
unsigned from, to;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
- return -EIO;
+ ret = ext4_emergency_state(inode->i_sb);
+ if (unlikely(ret))
+ return ret;
- trace_ext4_write_begin(inode, pos, len, flags);
+ trace_ext4_write_begin(inode, pos, len);
/*
* Reserve one block more for addition to orphan list in case
* we allocate blocks but write fails for some reason
*/
- needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
+ needed_blocks = ext4_chunk_trans_extent(inode,
+ ext4_journal_blocks_per_folio(inode)) + 1;
index = pos >> PAGE_SHIFT;
- from = pos & (PAGE_SIZE - 1);
- to = from + len;
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
- flags, pagep);
+ foliop);
if (ret < 0)
return ret;
if (ret == 1)
@@ -1164,65 +1313,71 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
}
/*
- * grab_cache_page_write_begin() can take a long time if the
- * system is thrashing due to memory pressure, or if the page
+ * write_begin_get_folio() can take a long time if the
+ * system is thrashing due to memory pressure, or if the folio
* is being written back. So grab it first before we start
* the transaction handle. This also allows us to allocate
- * the page (if needed) without using GFP_NOFS.
+ * the folio (if needed) without using GFP_NOFS.
*/
retry_grab:
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
- unlock_page(page);
+ folio = write_begin_get_folio(iocb, mapping, index, len);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ if (len > folio_next_pos(folio) - pos)
+ len = folio_next_pos(folio) - pos;
+
+ from = offset_in_folio(folio, pos);
+ to = from + len;
+
+ /*
+ * The same as page allocation, we prealloc buffer heads before
+ * starting the handle.
+ */
+ if (!folio_buffers(folio))
+ create_empty_buffers(folio, inode->i_sb->s_blocksize, 0);
+
+ folio_unlock(folio);
retry_journal:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
if (IS_ERR(handle)) {
- put_page(page);
+ folio_put(folio);
return PTR_ERR(handle);
}
- lock_page(page);
- if (page->mapping != mapping) {
- /* The page got truncated from under us */
- unlock_page(page);
- put_page(page);
+ folio_lock(folio);
+ if (folio->mapping != mapping) {
+ /* The folio got truncated from under us */
+ folio_unlock(folio);
+ folio_put(folio);
ext4_journal_stop(handle);
goto retry_grab;
}
- /* In case writeback began while the page was unlocked */
- wait_for_stable_page(page);
+ /* In case writeback began while the folio was unlocked */
+ folio_wait_stable(folio);
-#ifdef CONFIG_FS_ENCRYPTION
if (ext4_should_dioread_nolock(inode))
- ret = ext4_block_write_begin(page, pos, len,
+ ret = ext4_block_write_begin(handle, folio, pos, len,
ext4_get_block_unwritten);
else
- ret = ext4_block_write_begin(page, pos, len,
+ ret = ext4_block_write_begin(handle, folio, pos, len,
ext4_get_block);
-#else
- if (ext4_should_dioread_nolock(inode))
- ret = __block_write_begin(page, pos, len,
- ext4_get_block_unwritten);
- else
- ret = __block_write_begin(page, pos, len, ext4_get_block);
-#endif
if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, inode,
- page_buffers(page), from, to, NULL,
- do_journal_get_write_access);
+ folio_buffers(folio), from, to,
+ NULL, do_journal_get_write_access);
}
if (ret) {
bool extended = (pos + len > inode->i_size) &&
!ext4_verity_in_progress(inode);
- unlock_page(page);
+ folio_unlock(folio);
/*
- * __block_write_begin may have instantiated a few blocks
+ * ext4_block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
+ * i_size_read because we hold i_rwsem.
*
* Add inode to orphan list in case we crash before
* truncate finishes
@@ -1243,13 +1398,14 @@ retry_journal:
ext4_orphan_del(NULL, inode);
}
- if (ret == -ENOSPC &&
- ext4_should_retry_alloc(inode->i_sb, &retries))
+ if (ret == -EAGAIN ||
+ (ret == -ENOSPC &&
+ ext4_should_retry_alloc(inode->i_sb, &retries)))
goto retry_journal;
- put_page(page);
+ folio_put(folio);
return ret;
}
- *pagep = page;
+ *foliop = folio;
return ret;
}
@@ -1261,23 +1417,24 @@ static int write_end_fn(handle_t *handle, struct inode *inode,
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
set_buffer_uptodate(bh);
- ret = ext4_handle_dirty_metadata(handle, NULL, bh);
+ ret = ext4_dirty_journalled_data(handle, bh);
clear_buffer_meta(bh);
clear_buffer_prio(bh);
+ clear_buffer_new(bh);
return ret;
}
/*
* We need to pick up the new inode size which generic_commit_write gave us
- * `file' can be NULL - eg, when called from page_symlink().
+ * `iocb` can be NULL - eg, when called from page_symlink().
*
- * ext4 never places buffers on inode->i_mapping->private_list. metadata
+ * ext4 never places buffers on inode->i_mapping->i_private_list. metadata
* buffers are managed internally.
*/
-static int ext4_write_end(struct file *file,
+static int ext4_write_end(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
@@ -1288,12 +1445,14 @@ static int ext4_write_end(struct file *file,
trace_ext4_write_end(inode, pos, len, copied);
- if (ext4_has_inline_data(inode))
- return ext4_write_inline_data_end(inode, pos, len, copied, page);
+ if (ext4_has_inline_data(inode) &&
+ ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
+ return ext4_write_inline_data_end(inode, pos, len, copied,
+ folio);
- copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+ copied = block_write_end(pos, len, copied, folio);
/*
- * it's important to update i_size while still holding page lock:
+ * it's important to update i_size while still holding folio lock:
* page writeout could otherwise come in and zero beyond i_size.
*
* If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
@@ -1301,15 +1460,17 @@ static int ext4_write_end(struct file *file,
*/
if (!verity)
i_size_changed = ext4_update_inode_size(inode, pos + copied);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
- if (old_size < pos && !verity)
+ if (old_size < pos && !verity) {
pagecache_isize_extended(inode, old_size, pos);
+ ext4_zero_partial_blocks(handle, inode, old_size, pos - old_size);
+ }
/*
- * Don't mark the inode dirty under page lock. First, it unnecessarily
- * makes the holding time of page lock longer. Second, it forces lock
- * ordering of page lock and transaction start for journaling
+ * Don't mark the inode dirty under folio lock. First, it unnecessarily
+ * makes the holding time of folio lock longer. Second, it forces lock
+ * ordering of folio lock and transaction start for journaling
* filesystems.
*/
if (i_size_changed)
@@ -1341,33 +1502,33 @@ static int ext4_write_end(struct file *file,
}
/*
- * This is a private version of page_zero_new_buffers() which doesn't
+ * This is a private version of folio_zero_new_buffers() which doesn't
* set the buffer to be dirty, since in data=journalled mode we need
- * to call ext4_handle_dirty_metadata() instead.
+ * to call ext4_dirty_journalled_data() instead.
*/
static void ext4_journalled_zero_new_buffers(handle_t *handle,
struct inode *inode,
- struct page *page,
+ struct folio *folio,
unsigned from, unsigned to)
{
unsigned int block_start = 0, block_end;
struct buffer_head *head, *bh;
- bh = head = page_buffers(page);
+ bh = head = folio_buffers(folio);
do {
block_end = block_start + bh->b_size;
if (buffer_new(bh)) {
if (block_end > from && block_start < to) {
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
unsigned start, size;
start = max(from, block_start);
size = min(to, block_end) - start;
- zero_user(page, start, size);
- write_end_fn(handle, inode, bh);
+ folio_zero_range(folio, start, size);
}
clear_buffer_new(bh);
+ write_end_fn(handle, inode, bh);
}
}
block_start = block_end;
@@ -1375,10 +1536,10 @@ static void ext4_journalled_zero_new_buffers(handle_t *handle,
} while (bh != head);
}
-static int ext4_journalled_write_end(struct file *file,
+static int ext4_journalled_write_end(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
@@ -1396,30 +1557,34 @@ static int ext4_journalled_write_end(struct file *file,
BUG_ON(!ext4_handle_valid(handle));
if (ext4_has_inline_data(inode))
- return ext4_write_inline_data_end(inode, pos, len, copied, page);
+ return ext4_write_inline_data_end(inode, pos, len, copied,
+ folio);
- if (unlikely(copied < len) && !PageUptodate(page)) {
+ if (unlikely(copied < len) && !folio_test_uptodate(folio)) {
copied = 0;
- ext4_journalled_zero_new_buffers(handle, inode, page, from, to);
+ ext4_journalled_zero_new_buffers(handle, inode, folio,
+ from, to);
} else {
if (unlikely(copied < len))
- ext4_journalled_zero_new_buffers(handle, inode, page,
+ ext4_journalled_zero_new_buffers(handle, inode, folio,
from + copied, to);
- ret = ext4_walk_page_buffers(handle, inode, page_buffers(page),
+ ret = ext4_walk_page_buffers(handle, inode,
+ folio_buffers(folio),
from, from + copied, &partial,
write_end_fn);
if (!partial)
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
if (!verity)
size_changed = ext4_update_inode_size(inode, pos + copied);
- ext4_set_inode_state(inode, EXT4_STATE_JDATA);
EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
- if (old_size < pos && !verity)
+ if (old_size < pos && !verity) {
pagecache_isize_extended(inode, old_size, pos);
+ ext4_zero_partial_blocks(handle, inode, old_size, pos - old_size);
+ }
if (size_changed) {
ret2 = ext4_mark_inode_dirty(handle, inode);
@@ -1452,9 +1617,9 @@ static int ext4_journalled_write_end(struct file *file,
}
/*
- * Reserve space for a single cluster
+ * Reserve space for 'nr_resv' clusters
*/
-static int ext4_da_reserve_space(struct inode *inode)
+static int ext4_da_reserve_space(struct inode *inode, int nr_resv)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
@@ -1465,18 +1630,18 @@ static int ext4_da_reserve_space(struct inode *inode)
* us from metadata over-estimation, though we may go over by
* a small amount in the end. Here we just reserve for data.
*/
- ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
+ ret = dquot_reserve_block(inode, EXT4_C2B(sbi, nr_resv));
if (ret)
return ret;
spin_lock(&ei->i_block_reservation_lock);
- if (ext4_claim_free_clusters(sbi, 1, 0)) {
+ if (ext4_claim_free_clusters(sbi, nr_resv, 0)) {
spin_unlock(&ei->i_block_reservation_lock);
- dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
+ dquot_release_reservation_block(inode, EXT4_C2B(sbi, nr_resv));
return -ENOSPC;
}
- ei->i_reserved_data_blocks++;
- trace_ext4_da_reserve_space(inode);
+ ei->i_reserved_data_blocks += nr_resv;
+ trace_ext4_da_reserve_space(inode, nr_resv);
spin_unlock(&ei->i_block_reservation_lock);
return 0; /* success */
@@ -1522,14 +1687,18 @@ void ext4_da_release_space(struct inode *inode, int to_free)
*/
struct mpage_da_data {
+ /* These are input fields for ext4_do_writepages() */
struct inode *inode;
struct writeback_control *wbc;
+ unsigned int can_map:1; /* Can writepages call map blocks? */
+
+ /* These are internal state of ext4_do_writepages() */
+ loff_t start_pos; /* The start pos to write */
+ loff_t next_pos; /* Current pos to examine */
+ loff_t end_pos; /* Last pos to examine */
- pgoff_t first_page; /* The first page to write */
- pgoff_t next_page; /* Current page to examine */
- pgoff_t last_page; /* Last page to examine */
/*
- * Extent to map - this can be after first_page because that can be
+ * Extent to map - this can be after start_pos because that can be
* fully mapped. We somewhat abuse m_flags to store whether the extent
* is delalloc or unwritten.
*/
@@ -1537,50 +1706,63 @@ struct mpage_da_data {
struct ext4_io_submit io_submit; /* IO submission data */
unsigned int do_map:1;
unsigned int scanned_until_end:1;
+ unsigned int journalled_more_data:1;
};
static void mpage_release_unused_pages(struct mpage_da_data *mpd,
bool invalidate)
{
- int nr_pages, i;
+ unsigned nr, i;
pgoff_t index, end;
- struct pagevec pvec;
+ struct folio_batch fbatch;
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
- /* This is necessary when next_page == 0. */
- if (mpd->first_page >= mpd->next_page)
+ /* This is necessary when next_pos == 0. */
+ if (mpd->start_pos >= mpd->next_pos)
return;
mpd->scanned_until_end = 0;
- index = mpd->first_page;
- end = mpd->next_page - 1;
if (invalidate) {
ext4_lblk_t start, last;
- start = index << (PAGE_SHIFT - inode->i_blkbits);
- last = end << (PAGE_SHIFT - inode->i_blkbits);
- ext4_es_remove_extent(inode, start, last - start + 1);
+ start = EXT4_B_TO_LBLK(inode, mpd->start_pos);
+ last = mpd->next_pos >> inode->i_blkbits;
+
+ /*
+ * avoid racing with extent status tree scans made by
+ * ext4_insert_delayed_block()
+ */
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_es_remove_extent(inode, start, last - start);
+ up_write(&EXT4_I(inode)->i_data_sem);
}
- pagevec_init(&pvec);
- while (index <= end) {
- nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end);
- if (nr_pages == 0)
+ folio_batch_init(&fbatch);
+ index = mpd->start_pos >> PAGE_SHIFT;
+ end = mpd->next_pos >> PAGE_SHIFT;
+ while (index < end) {
+ nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
+ if (nr == 0)
break;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ for (i = 0; i < nr; i++) {
+ struct folio *folio = fbatch.folios[i];
- BUG_ON(!PageLocked(page));
- BUG_ON(PageWriteback(page));
+ if (folio_pos(folio) < mpd->start_pos)
+ continue;
+ if (folio_next_index(folio) > end)
+ continue;
+ BUG_ON(!folio_test_locked(folio));
+ BUG_ON(folio_test_writeback(folio));
if (invalidate) {
- if (page_mapped(page))
- clear_page_dirty_for_io(page);
- block_invalidatepage(page, 0, PAGE_SIZE);
- ClearPageUptodate(page);
+ if (folio_mapped(folio))
+ folio_clear_dirty_for_io(folio);
+ block_invalidate_folio(folio, 0,
+ folio_size(folio));
+ folio_clear_uptodate(folio);
}
- unlock_page(page);
+ folio_unlock(folio);
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
}
}
@@ -1606,32 +1788,59 @@ static void ext4_print_free_blocks(struct inode *inode)
return;
}
-static int ext4_bh_delay_or_unwritten(handle_t *handle, struct inode *inode,
- struct buffer_head *bh)
+/*
+ * Check whether the cluster containing lblk has been allocated or has
+ * delalloc reservation.
+ *
+ * Returns 0 if the cluster doesn't have either, 1 if it has delalloc
+ * reservation, 2 if it's already been allocated, negative error code on
+ * failure.
+ */
+static int ext4_clu_alloc_state(struct inode *inode, ext4_lblk_t lblk)
{
- return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ int ret;
+
+ /* Has delalloc reservation? */
+ if (ext4_es_scan_clu(inode, &ext4_es_is_delayed, lblk))
+ return 1;
+
+ /* Already been allocated? */
+ if (ext4_es_scan_clu(inode, &ext4_es_is_mapped, lblk))
+ return 2;
+ ret = ext4_clu_mapped(inode, EXT4_B2C(sbi, lblk));
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return 2;
+
+ return 0;
}
/*
- * ext4_insert_delayed_block - adds a delayed block to the extents status
- * tree, incrementing the reserved cluster/block
- * count or making a pending reservation
- * where needed
+ * ext4_insert_delayed_blocks - adds a multiple delayed blocks to the extents
+ * status tree, incrementing the reserved
+ * cluster/block count or making pending
+ * reservations where needed
*
* @inode - file containing the newly added block
- * @lblk - logical block to be added
+ * @lblk - start logical block to be added
+ * @len - length of blocks to be added
*
* Returns 0 on success, negative error code on failure.
*/
-static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
+static int ext4_insert_delayed_blocks(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int ret;
- bool allocated = false;
- bool reserved = false;
+ bool lclu_allocated = false;
+ bool end_allocated = false;
+ ext4_lblk_t resv_clu;
+ ext4_lblk_t end = lblk + len - 1;
/*
- * If the cluster containing lblk is shared with a delayed,
+ * If the cluster containing lblk or end is shared with a delayed,
* written, or unwritten extent in a bigalloc file system, it's
* already been accounted for and does not need to be reserved.
* A pending reservation must be made for the cluster if it's
@@ -1642,90 +1851,86 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
* extents status tree doesn't get a match.
*/
if (sbi->s_cluster_ratio == 1) {
- ret = ext4_da_reserve_space(inode);
+ ret = ext4_da_reserve_space(inode, len);
if (ret != 0) /* ENOSPC */
- goto errout;
- reserved = true;
+ return ret;
} else { /* bigalloc */
- if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
- if (!ext4_es_scan_clu(inode,
- &ext4_es_is_mapped, lblk)) {
- ret = ext4_clu_mapped(inode,
- EXT4_B2C(sbi, lblk));
- if (ret < 0)
- goto errout;
- if (ret == 0) {
- ret = ext4_da_reserve_space(inode);
- if (ret != 0) /* ENOSPC */
- goto errout;
- reserved = true;
- } else {
- allocated = true;
- }
- } else {
- allocated = true;
+ resv_clu = EXT4_B2C(sbi, end) - EXT4_B2C(sbi, lblk) + 1;
+
+ ret = ext4_clu_alloc_state(inode, lblk);
+ if (ret < 0)
+ return ret;
+ if (ret > 0) {
+ resv_clu--;
+ lclu_allocated = (ret == 2);
+ }
+
+ if (EXT4_B2C(sbi, lblk) != EXT4_B2C(sbi, end)) {
+ ret = ext4_clu_alloc_state(inode, end);
+ if (ret < 0)
+ return ret;
+ if (ret > 0) {
+ resv_clu--;
+ end_allocated = (ret == 2);
}
}
- }
- ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
- if (ret && reserved)
- ext4_da_release_space(inode, 1);
+ if (resv_clu) {
+ ret = ext4_da_reserve_space(inode, resv_clu);
+ if (ret != 0) /* ENOSPC */
+ return ret;
+ }
+ }
-errout:
- return ret;
+ ext4_es_insert_delayed_extent(inode, lblk, len, lclu_allocated,
+ end_allocated);
+ return 0;
}
/*
- * This function is grabs code from the very beginning of
- * ext4_map_blocks, but assumes that the caller is from delayed write
- * time. This function looks up the requested blocks and sets the
- * buffer delay bit under the protection of i_data_sem.
+ * Looks up the requested blocks and sets the delalloc extent map.
+ * First try to look up for the extent entry that contains the requested
+ * blocks in the extent status tree without i_data_sem, then try to look
+ * up for the ondisk extent mapping with i_data_sem in read mode,
+ * finally hold i_data_sem in write mode, looks up again and add a
+ * delalloc extent entry if it still couldn't find any extent. Pass out
+ * the mapped extent through @map and return 0 on success.
*/
-static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
- struct ext4_map_blocks *map,
- struct buffer_head *bh)
+static int ext4_da_map_blocks(struct inode *inode, struct ext4_map_blocks *map)
{
struct extent_status es;
int retval;
- sector_t invalid_block = ~((sector_t) 0xffff);
#ifdef ES_AGGRESSIVE_TEST
struct ext4_map_blocks orig_map;
memcpy(&orig_map, map, sizeof(*map));
#endif
- if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
- invalid_block = ~0;
-
map->m_flags = 0;
ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
(unsigned long) map->m_lblk);
+ ext4_check_map_extents_env(inode);
+
/* Lookup extent status tree firstly */
- if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
- if (ext4_es_is_hole(&es)) {
- retval = 0;
- down_read(&EXT4_I(inode)->i_data_sem);
+ if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, NULL)) {
+ map->m_len = min_t(unsigned int, map->m_len,
+ es.es_len - (map->m_lblk - es.es_lblk));
+
+ if (ext4_es_is_hole(&es))
goto add_delayed;
- }
+found:
/*
* Delayed extent could be allocated by fallocate.
* So we need to check it.
*/
- if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
- map_bh(bh, inode->i_sb, invalid_block);
- set_buffer_new(bh);
- set_buffer_delay(bh);
+ if (ext4_es_is_delayed(&es)) {
+ map->m_flags |= EXT4_MAP_DELAYED;
return 0;
}
- map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
- retval = es.es_len - (iblock - es.es_lblk);
- if (retval > map->m_len)
- retval = map->m_len;
- map->m_len = retval;
+ map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk;
if (ext4_es_is_written(&es))
map->m_flags |= EXT4_MAP_MAPPED;
else if (ext4_es_is_unwritten(&es))
@@ -1736,7 +1941,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
#ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
#endif
- return retval;
+ return 0;
}
/*
@@ -1746,51 +1951,42 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_has_inline_data(inode))
retval = 0;
- else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- retval = ext4_ext_map_blocks(NULL, inode, map, 0);
else
- retval = ext4_ind_map_blocks(NULL, inode, map, 0);
+ retval = ext4_map_query_blocks(NULL, inode, map, 0);
+ up_read(&EXT4_I(inode)->i_data_sem);
+ if (retval)
+ return retval < 0 ? retval : 0;
add_delayed:
- if (retval == 0) {
- int ret;
-
- /*
- * XXX: __block_prepare_write() unmaps passed block,
- * is it OK?
- */
+ down_write(&EXT4_I(inode)->i_data_sem);
+ /*
+ * Page fault path (ext4_page_mkwrite does not take i_rwsem)
+ * and fallocate path (no folio lock) can race. Make sure we
+ * lookup the extent status tree here again while i_data_sem
+ * is held in write mode, before inserting a new da entry in
+ * the extent status tree.
+ */
+ if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, NULL)) {
+ map->m_len = min_t(unsigned int, map->m_len,
+ es.es_len - (map->m_lblk - es.es_lblk));
- ret = ext4_insert_delayed_block(inode, map->m_lblk);
- if (ret != 0) {
- retval = ret;
- goto out_unlock;
+ if (!ext4_es_is_hole(&es)) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+ goto found;
}
-
- map_bh(bh, inode->i_sb, invalid_block);
- set_buffer_new(bh);
- set_buffer_delay(bh);
- } else if (retval > 0) {
- int ret;
- unsigned int status;
-
- if (unlikely(retval != map->m_len)) {
- ext4_warning(inode->i_sb,
- "ES len assertion failed for inode "
- "%lu: retval %d != map->m_len %d",
- inode->i_ino, retval, map->m_len);
- WARN_ON(1);
+ } else if (!ext4_has_inline_data(inode)) {
+ retval = ext4_map_query_blocks(NULL, inode, map, 0);
+ if (retval) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+ return retval < 0 ? retval : 0;
}
-
- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
- ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
- map->m_pblk, status);
- if (ret != 0)
- retval = ret;
}
-out_unlock:
- up_read((&EXT4_I(inode)->i_data_sem));
+ map->m_flags |= EXT4_MAP_DELAYED;
+ retval = ext4_insert_delayed_blocks(inode, map->m_lblk, map->m_len);
+ if (!retval)
+ map->m_seq = READ_ONCE(EXT4_I(inode)->i_es_seq);
+ up_write(&EXT4_I(inode)->i_data_sem);
return retval;
}
@@ -1811,11 +2007,15 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
struct ext4_map_blocks map;
+ sector_t invalid_block = ~((sector_t) 0xffff);
int ret = 0;
BUG_ON(create == 0);
BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
+ if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
+ invalid_block = ~0;
+
map.m_lblk = iblock;
map.m_len = 1;
@@ -1824,10 +2024,17 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
* preallocated blocks are unmapped but should treated
* the same as allocated blocks.
*/
- ret = ext4_da_map_blocks(inode, iblock, &map, bh);
- if (ret <= 0)
+ ret = ext4_da_map_blocks(inode, &map);
+ if (ret < 0)
return ret;
+ if (map.m_flags & EXT4_MAP_DELAYED) {
+ map_bh(bh, inode->i_sb, invalid_block);
+ set_buffer_new(bh);
+ set_buffer_delay(bh);
+ return 0;
+ }
+
map_bh(bh, inode->i_sb, map.m_pblk);
ext4_update_bh_state(bh, map.m_flags);
@@ -1844,258 +2051,40 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
return 0;
}
-static int bget_one(handle_t *handle, struct inode *inode,
- struct buffer_head *bh)
+static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio)
{
- get_bh(bh);
- return 0;
+ mpd->start_pos += folio_size(folio);
+ mpd->wbc->nr_to_write -= folio_nr_pages(folio);
+ folio_unlock(folio);
}
-static int bput_one(handle_t *handle, struct inode *inode,
- struct buffer_head *bh)
+static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
{
- put_bh(bh);
- return 0;
-}
-
-static int __ext4_journalled_writepage(struct page *page,
- unsigned int len)
-{
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
- struct buffer_head *page_bufs = NULL;
- handle_t *handle = NULL;
- int ret = 0, err = 0;
- int inline_data = ext4_has_inline_data(inode);
- struct buffer_head *inode_bh = NULL;
-
- ClearPageChecked(page);
-
- if (inline_data) {
- BUG_ON(page->index != 0);
- BUG_ON(len > ext4_get_max_inline_size(inode));
- inode_bh = ext4_journalled_write_inline_data(inode, len, page);
- if (inode_bh == NULL)
- goto out;
- } else {
- page_bufs = page_buffers(page);
- if (!page_bufs) {
- BUG();
- goto out;
- }
- ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
- NULL, bget_one);
- }
- /*
- * We need to release the page lock before we start the
- * journal, so grab a reference so the page won't disappear
- * out from under us.
- */
- get_page(page);
- unlock_page(page);
-
- handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
- ext4_writepage_trans_blocks(inode));
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- put_page(page);
- goto out_no_pagelock;
- }
- BUG_ON(!ext4_handle_valid(handle));
-
- lock_page(page);
- put_page(page);
- if (page->mapping != mapping) {
- /* The page got truncated from under us */
- ext4_journal_stop(handle);
- ret = 0;
- goto out;
- }
-
- if (inline_data) {
- ret = ext4_mark_inode_dirty(handle, inode);
- } else {
- ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
- NULL, do_journal_get_write_access);
-
- err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
- NULL, write_end_fn);
- }
- if (ret == 0)
- ret = err;
- err = ext4_jbd2_inode_add_write(handle, inode, page_offset(page), len);
- if (ret == 0)
- ret = err;
- EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
- err = ext4_journal_stop(handle);
- if (!ret)
- ret = err;
-
- ext4_set_inode_state(inode, EXT4_STATE_JDATA);
-out:
- unlock_page(page);
-out_no_pagelock:
- if (!inline_data && page_bufs)
- ext4_walk_page_buffers(NULL, inode, page_bufs, 0, len,
- NULL, bput_one);
- brelse(inode_bh);
- return ret;
-}
-
-/*
- * Note that we don't need to start a transaction unless we're journaling data
- * because we should have holes filled from ext4_page_mkwrite(). We even don't
- * need to file the inode to the transaction's list in ordered mode because if
- * we are writing back data added by write(), the inode is already there and if
- * we are writing back data modified via mmap(), no one guarantees in which
- * transaction the data will hit the disk. In case we are journaling data, we
- * cannot start transaction directly because transaction start ranks above page
- * lock so we have to do some magic.
- *
- * This function can get called via...
- * - ext4_writepages after taking page lock (have journal handle)
- * - journal_submit_inode_data_buffers (no journal handle)
- * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
- * - grab_page_cache when doing write_begin (have journal handle)
- *
- * We don't do any block allocation in this function. If we have page with
- * multiple blocks we need to write those buffer_heads that are mapped. This
- * is important for mmaped based write. So if we do with blocksize 1K
- * truncate(f, 1024);
- * a = mmap(f, 0, 4096);
- * a[0] = 'a';
- * truncate(f, 4096);
- * we have in the page first buffer_head mapped via page_mkwrite call back
- * but other buffer_heads would be unmapped but dirty (dirty done via the
- * do_wp_page). So writepage should write the first block. If we modify
- * the mmap area beyond 1024 we will again get a page_fault and the
- * page_mkwrite callback will do the block allocation and mark the
- * buffer_heads mapped.
- *
- * We redirty the page if we have any buffer_heads that is either delay or
- * unwritten in the page.
- *
- * We can get recursively called as show below.
- *
- * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
- * ext4_writepage()
- *
- * But since we don't do any block allocation we should not deadlock.
- * Page also have the dirty flag cleared so we don't get recurive page_lock.
- */
-static int ext4_writepage(struct page *page,
- struct writeback_control *wbc)
-{
- int ret = 0;
- loff_t size;
- unsigned int len;
- struct buffer_head *page_bufs = NULL;
- struct inode *inode = page->mapping->host;
- struct ext4_io_submit io_submit;
- bool keep_towrite = false;
-
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
- inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
- unlock_page(page);
- return -EIO;
- }
-
- trace_ext4_writepage(page);
- size = i_size_read(inode);
- if (page->index == size >> PAGE_SHIFT &&
- !ext4_verity_in_progress(inode))
- len = size & ~PAGE_MASK;
- else
- len = PAGE_SIZE;
-
- page_bufs = page_buffers(page);
- /*
- * We cannot do block allocation or other extent handling in this
- * function. If there are buffers needing that, we have to redirty
- * the page. But we may reach here when we do a journal commit via
- * journal_submit_inode_data_buffers() and in that case we must write
- * allocated buffers to achieve data=ordered mode guarantees.
- *
- * Also, if there is only one buffer per page (the fs block
- * size == the page size), if one buffer needs block
- * allocation or needs to modify the extent tree to clear the
- * unwritten flag, we know that the page can't be written at
- * all, so we might as well refuse the write immediately.
- * Unfortunately if the block size != page size, we can't as
- * easily detect this case using ext4_walk_page_buffers(), but
- * for the extremely common case, this is an optimization that
- * skips a useless round trip through ext4_bio_write_page().
- */
- if (ext4_walk_page_buffers(NULL, inode, page_bufs, 0, len, NULL,
- ext4_bh_delay_or_unwritten)) {
- redirty_page_for_writepage(wbc, page);
- if ((current->flags & PF_MEMALLOC) ||
- (inode->i_sb->s_blocksize == PAGE_SIZE)) {
- /*
- * For memory cleaning there's no point in writing only
- * some buffers. So just bail out. Warn if we came here
- * from direct reclaim.
- */
- WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
- == PF_MEMALLOC);
- unlock_page(page);
- return 0;
- }
- keep_towrite = true;
- }
-
- if (PageChecked(page) && ext4_should_journal_data(inode))
- /*
- * It's mmapped pagecache. Add buffers and journal it. There
- * doesn't seem much point in redirtying the page here.
- */
- return __ext4_journalled_writepage(page, len);
-
- ext4_io_submit_init(&io_submit, wbc);
- io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
- if (!io_submit.io_end) {
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return -ENOMEM;
- }
- ret = ext4_bio_write_page(&io_submit, page, len, keep_towrite);
- ext4_io_submit(&io_submit);
- /* Drop io_end reference we got from init */
- ext4_put_io_end_defer(io_submit.io_end);
- return ret;
-}
-
-static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
-{
- int len;
+ size_t len;
loff_t size;
int err;
- BUG_ON(page->index != mpd->first_page);
- clear_page_dirty_for_io(page);
+ WARN_ON_ONCE(folio_pos(folio) != mpd->start_pos);
+ folio_clear_dirty_for_io(folio);
/*
* We have to be very careful here! Nothing protects writeback path
* against i_size changes and the page can be writeably mapped into
* page tables. So an application can be growing i_size and writing
- * data through mmap while writeback runs. clear_page_dirty_for_io()
+ * data through mmap while writeback runs. folio_clear_dirty_for_io()
* write-protects our page in page tables and the page cannot get
- * written to again until we release page lock. So only after
- * clear_page_dirty_for_io() we are safe to sample i_size for
- * ext4_bio_write_page() to zero-out tail of the written page. We rely
- * on the barrier provided by TestClearPageDirty in
- * clear_page_dirty_for_io() to make sure i_size is really sampled only
+ * written to again until we release folio lock. So only after
+ * folio_clear_dirty_for_io() we are safe to sample i_size for
+ * ext4_bio_write_folio() to zero-out tail of the written page. We rely
+ * on the barrier provided by folio_test_clear_dirty() in
+ * folio_clear_dirty_for_io() to make sure i_size is really sampled only
* after page tables are updated.
*/
size = i_size_read(mpd->inode);
- if (page->index == size >> PAGE_SHIFT &&
+ len = folio_size(folio);
+ if (folio_pos(folio) + len > size &&
!ext4_verity_in_progress(mpd->inode))
- len = size & ~PAGE_MASK;
- else
- len = PAGE_SIZE;
- err = ext4_bio_write_page(&mpd->io_submit, page, len, false);
- if (!err)
- mpd->wbc->nr_to_write--;
- mpd->first_page++;
+ len = size & (len - 1);
+ err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
return err;
}
@@ -2206,9 +2195,10 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
} while (lblk++, (bh = bh->b_this_page) != head);
/* So far everything mapped? Submit the page for IO. */
if (mpd->map.m_len == 0) {
- err = mpage_submit_page(mpd, head->b_page);
+ err = mpage_submit_folio(mpd, head->b_folio);
if (err < 0)
return err;
+ mpage_folio_done(mpd, head->b_folio);
}
if (lblk >= blocks) {
mpd->scanned_until_end = 1;
@@ -2218,21 +2208,22 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
}
/*
- * mpage_process_page - update page buffers corresponding to changed extent and
- * may submit fully mapped page for IO
- *
- * @mpd - description of extent to map, on return next extent to map
- * @m_lblk - logical block mapping.
- * @m_pblk - corresponding physical mapping.
- * @map_bh - determines on return whether this page requires any further
+ * mpage_process_folio - update folio buffers corresponding to changed extent
+ * and may submit fully mapped page for IO
+ * @mpd: description of extent to map, on return next extent to map
+ * @folio: Contains these buffers.
+ * @m_lblk: logical block mapping.
+ * @m_pblk: corresponding physical mapping.
+ * @map_bh: determines on return whether this page requires any further
* mapping or not.
- * Scan given page buffers corresponding to changed extent and update buffer
+ *
+ * Scan given folio buffers corresponding to changed extent and update buffer
* state according to new extent state.
* We map delalloc buffers to their physical location, clear unwritten bits.
- * If the given page is not fully mapped, we update @map to the next extent in
- * the given page that needs mapping & return @map_bh as true.
+ * If the given folio is not fully mapped, we update @mpd to the next extent in
+ * the given folio that needs mapping & return @map_bh as true.
*/
-static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
+static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
bool *map_bh)
{
@@ -2241,23 +2232,21 @@ static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
ext4_lblk_t lblk = *m_lblk;
ext4_fsblk_t pblock = *m_pblk;
int err = 0;
- int blkbits = mpd->inode->i_blkbits;
ssize_t io_end_size = 0;
struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
- bh = head = page_buffers(page);
+ bh = head = folio_buffers(folio);
do {
if (lblk < mpd->map.m_lblk)
continue;
if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
/*
* Buffer after end of mapped extent.
- * Find next buffer in the page to map.
+ * Find next buffer in the folio to map.
*/
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
io_end_vec->size += io_end_size;
- io_end_size = 0;
err = mpage_process_page_bufs(mpd, head, bh, lblk);
if (err > 0)
@@ -2268,7 +2257,8 @@ static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
err = PTR_ERR(io_end_vec);
goto out;
}
- io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
+ io_end_vec->offset = EXT4_LBLK_TO_B(mpd->inode,
+ mpd->map.m_lblk);
}
*map_bh = true;
goto out;
@@ -2278,11 +2268,10 @@ static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
bh->b_blocknr = pblock++;
}
clear_buffer_unwritten(bh);
- io_end_size += (1 << blkbits);
+ io_end_size += i_blocksize(mpd->inode);
} while (lblk++, (bh = bh->b_this_page) != head);
io_end_vec->size += io_end_size;
- io_end_size = 0;
*map_bh = false;
out:
*m_lblk = lblk;
@@ -2306,31 +2295,29 @@ out:
*/
static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
{
- struct pagevec pvec;
- int nr_pages, i;
+ struct folio_batch fbatch;
+ unsigned nr, i;
struct inode *inode = mpd->inode;
- int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
pgoff_t start, end;
ext4_lblk_t lblk;
ext4_fsblk_t pblock;
int err;
bool map_bh = false;
- start = mpd->map.m_lblk >> bpp_bits;
- end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
- lblk = start << bpp_bits;
+ start = EXT4_LBLK_TO_PG(inode, mpd->map.m_lblk);
+ end = EXT4_LBLK_TO_PG(inode, mpd->map.m_lblk + mpd->map.m_len - 1);
pblock = mpd->map.m_pblk;
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
while (start <= end) {
- nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
- &start, end);
- if (nr_pages == 0)
+ nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch);
+ if (nr == 0)
break;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ for (i = 0; i < nr; i++) {
+ struct folio *folio = fbatch.folios[i];
- err = mpage_process_page(mpd, page, &lblk, &pblock,
+ lblk = EXT4_PG_TO_LBLK(inode, folio->index);
+ err = mpage_process_folio(mpd, folio, &lblk, &pblock,
&map_bh);
/*
* If map_bh is true, means page may require further bh
@@ -2340,18 +2327,19 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
if (err < 0 || map_bh)
goto out;
/* Page fully mapped - let IO run! */
- err = mpage_submit_page(mpd, page);
+ err = mpage_submit_folio(mpd, folio);
if (err < 0)
goto out;
+ mpage_folio_done(mpd, folio);
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
}
/* Extent fully mapped and matches with page boundary. We are done. */
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
return 0;
out:
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
return err;
}
@@ -2362,6 +2350,11 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
int get_blocks_flags;
int err, dioread_nolock;
+ /* Make sure transaction has enough credits for this extent */
+ err = ext4_journal_ensure_extent_credits(handle, inode);
+ if (err < 0)
+ return err;
+
trace_ext4_da_write_pages_extent(inode, map);
/*
* Call ext4_map_blocks() to allocate any delayed allocation blocks, or
@@ -2371,21 +2364,18 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
* previously reserved. However we must not fail because we're in
* writeback and there is nothing we can do about it so it might result
* in data loss. So use reserved blocks to allocate metadata if
- * possible.
- *
- * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
- * the blocks in question are delalloc blocks. This indicates
- * that the blocks and quotas has already been checked when
- * the data was copied into the page cache.
+ * possible. In addition, do not cache any unrelated extents, as it
+ * only holds the folio lock but does not hold the i_rwsem or
+ * invalidate_lock, which could corrupt the extent status tree.
*/
get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
EXT4_GET_BLOCKS_METADATA_NOFAIL |
- EXT4_GET_BLOCKS_IO_SUBMIT;
+ EXT4_GET_BLOCKS_IO_SUBMIT |
+ EXT4_EX_NOCACHE;
+
dioread_nolock = ext4_should_dioread_nolock(inode);
if (dioread_nolock)
get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
- if (map->m_flags & BIT(BH_Delay))
- get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
if (err < 0)
@@ -2396,7 +2386,7 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
mpd->io_submit.io_end->handle = handle->h_rsv_handle;
handle->h_rsv_handle = NULL;
}
- ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
+ ext4_set_io_unwritten_flag(mpd->io_submit.io_end);
}
BUG_ON(map->m_len == 0);
@@ -2404,6 +2394,47 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
}
/*
+ * This is used to submit mapped buffers in a single folio that is not fully
+ * mapped for various reasons, such as insufficient space or journal credits.
+ */
+static int mpage_submit_partial_folio(struct mpage_da_data *mpd)
+{
+ struct inode *inode = mpd->inode;
+ struct folio *folio;
+ loff_t pos;
+ int ret;
+
+ folio = filemap_get_folio(inode->i_mapping,
+ mpd->start_pos >> PAGE_SHIFT);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ /*
+ * The mapped position should be within the current processing folio
+ * but must not be the folio start position.
+ */
+ pos = ((loff_t)mpd->map.m_lblk) << inode->i_blkbits;
+ if (WARN_ON_ONCE((folio_pos(folio) == pos) ||
+ !folio_contains(folio, pos >> PAGE_SHIFT)))
+ return -EINVAL;
+
+ ret = mpage_submit_folio(mpd, folio);
+ if (ret)
+ goto out;
+ /*
+ * Update start_pos to prevent this folio from being released in
+ * mpage_release_unused_pages(), it will be reset to the aligned folio
+ * pos when this folio is written again in the next round. Additionally,
+ * do not update wbc->nr_to_write here, as it will be updated once the
+ * entire folio has finished processing.
+ */
+ mpd->start_pos = pos;
+out:
+ folio_unlock(folio);
+ folio_put(folio);
+ return ret;
+}
+
+/*
* mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
* mpd->len and submit pages underlying it for IO
*
@@ -2438,24 +2469,31 @@ static int mpage_map_and_submit_extent(handle_t *handle,
io_end_vec = ext4_alloc_io_end_vec(io_end);
if (IS_ERR(io_end_vec))
return PTR_ERR(io_end_vec);
- io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
+ io_end_vec->offset = EXT4_LBLK_TO_B(inode, map->m_lblk);
do {
err = mpage_map_one_extent(handle, mpd);
if (err < 0) {
struct super_block *sb = inode->i_sb;
- if (ext4_forced_shutdown(EXT4_SB(sb)) ||
- ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
+ if (ext4_emergency_state(sb))
goto invalidate_dirty_pages;
/*
* Let the uper layers retry transient errors.
* In the case of ENOSPC, if ext4_count_free_blocks()
* is non-zero, a commit should free up blocks.
*/
- if ((err == -ENOMEM) ||
+ if ((err == -ENOMEM) || (err == -EAGAIN) ||
(err == -ENOSPC && ext4_count_free_clusters(sb))) {
- if (progress)
+ /*
+ * We may have already allocated extents for
+ * some bhs inside the folio, issue the
+ * corresponding data to prevent stale data.
+ */
+ if (progress) {
+ if (mpage_submit_partial_folio(mpd))
+ goto invalidate_dirty_pages;
goto update_disksize;
+ }
return err;
}
ext4_msg(sb, KERN_CRIT,
@@ -2489,7 +2527,7 @@ update_disksize:
* Update on-disk size after IO is submitted. Races with
* truncate are avoided by checking i_size under i_data_sem.
*/
- disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
+ disksize = mpd->start_pos;
if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
int err2;
loff_t i_size;
@@ -2513,33 +2551,59 @@ update_disksize:
return err;
}
-/*
- * Calculate the total number of credits to reserve for one writepages
- * iteration. This is called from ext4_writepages(). We map an extent of
- * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
- * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
- * bpp - 1 blocks in bpp different extents.
- */
-static int ext4_da_writepages_trans_blocks(struct inode *inode)
+static int ext4_journal_folio_buffers(handle_t *handle, struct folio *folio,
+ size_t len)
{
- int bpp = ext4_journal_blocks_per_page(inode);
+ struct buffer_head *page_bufs = folio_buffers(folio);
+ struct inode *inode = folio->mapping->host;
+ int ret, err;
+
+ ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
+ NULL, do_journal_get_write_access);
+ err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
+ NULL, write_end_fn);
+ if (ret == 0)
+ ret = err;
+ err = ext4_jbd2_inode_add_write(handle, inode, folio_pos(folio), len);
+ if (ret == 0)
+ ret = err;
+ EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
+
+ return ret;
+}
+
+static int mpage_journal_page_buffers(handle_t *handle,
+ struct mpage_da_data *mpd,
+ struct folio *folio)
+{
+ struct inode *inode = mpd->inode;
+ loff_t size = i_size_read(inode);
+ size_t len = folio_size(folio);
- return ext4_meta_trans_blocks(inode,
- MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
+ folio_clear_checked(folio);
+ mpd->wbc->nr_to_write -= folio_nr_pages(folio);
+
+ if (folio_pos(folio) + len > size &&
+ !ext4_verity_in_progress(inode))
+ len = size & (len - 1);
+
+ return ext4_journal_folio_buffers(handle, folio, len);
}
/*
* mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
- * and underlying extent to map
+ * needing mapping, submit mapped pages
*
* @mpd - where to look for pages
*
* Walk dirty pages in the mapping. If they are fully mapped, submit them for
- * IO immediately. When we find a page which isn't mapped we start accumulating
- * extent of buffers underlying these pages that needs mapping (formed by
- * either delayed or unwritten buffers). We also lock the pages containing
- * these buffers. The extent found is returned in @mpd structure (starting at
- * mpd->lblk with length mpd->len blocks).
+ * IO immediately. If we cannot map blocks, we submit just already mapped
+ * buffers in the page for IO and keep page dirty. When we can map blocks and
+ * we find a page which isn't mapped we start accumulating extent of buffers
+ * underlying these pages that needs mapping (formed by either delayed or
+ * unwritten buffers). We also lock the pages containing these buffers. The
+ * extent found is returned in @mpd structure (starting at mpd->lblk with
+ * length mpd->len blocks).
*
* Note that this function can attach bios to one io_end structure which are
* neither logically nor physically contiguous. Although it may seem as an
@@ -2549,33 +2613,36 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
{
struct address_space *mapping = mpd->inode->i_mapping;
- struct pagevec pvec;
- unsigned int nr_pages;
- long left = mpd->wbc->nr_to_write;
- pgoff_t index = mpd->first_page;
- pgoff_t end = mpd->last_page;
+ struct folio_batch fbatch;
+ unsigned int nr_folios;
+ pgoff_t index = mpd->start_pos >> PAGE_SHIFT;
+ pgoff_t end = mpd->end_pos >> PAGE_SHIFT;
xa_mark_t tag;
int i, err = 0;
- int blkbits = mpd->inode->i_blkbits;
ext4_lblk_t lblk;
struct buffer_head *head;
+ handle_t *handle = NULL;
+ int bpp = ext4_journal_blocks_per_folio(mpd->inode);
- if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
- tag = PAGECACHE_TAG_TOWRITE;
- else
- tag = PAGECACHE_TAG_DIRTY;
+ tag = wbc_to_tag(mpd->wbc);
- pagevec_init(&pvec);
mpd->map.m_len = 0;
- mpd->next_page = index;
+ mpd->next_pos = mpd->start_pos;
+ if (ext4_should_journal_data(mpd->inode)) {
+ handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE,
+ bpp);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ }
+ folio_batch_init(&fbatch);
while (index <= end) {
- nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
- tag);
- if (nr_pages == 0)
+ nr_folios = filemap_get_folios_tag(mapping, &index, end,
+ tag, &fbatch);
+ if (nr_folios == 0)
break;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
/*
* Accumulated enough dirty pages? This doesn't apply
@@ -2585,14 +2652,24 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
* newly appeared dirty pages, but have not synced all
* of the old dirty pages.
*/
- if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
+ if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
+ mpd->wbc->nr_to_write <=
+ EXT4_LBLK_TO_PG(mpd->inode, mpd->map.m_len))
goto out;
/* If we can't merge this page, we are done. */
- if (mpd->map.m_len > 0 && mpd->next_page != page->index)
+ if (mpd->map.m_len > 0 &&
+ mpd->next_pos != folio_pos(folio))
goto out;
- lock_page(page);
+ if (handle) {
+ err = ext4_journal_ensure_credits(handle, bpp,
+ 0);
+ if (err < 0)
+ goto out;
+ }
+
+ folio_lock(folio);
/*
* If the page is no longer dirty, or its mapping no
* longer corresponds to inode we are writing (which
@@ -2600,59 +2677,99 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
* page is already under writeback and we are not doing
* a data integrity writeback, skip the page
*/
- if (!PageDirty(page) ||
- (PageWriteback(page) &&
+ if (!folio_test_dirty(folio) ||
+ (folio_test_writeback(folio) &&
(mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
- unlikely(page->mapping != mapping)) {
- unlock_page(page);
+ unlikely(folio->mapping != mapping)) {
+ folio_unlock(folio);
continue;
}
- wait_on_page_writeback(page);
- BUG_ON(PageWriteback(page));
+ folio_wait_writeback(folio);
+ BUG_ON(folio_test_writeback(folio));
+
+ /*
+ * Should never happen but for buggy code in
+ * other subsystems that call
+ * set_page_dirty() without properly warning
+ * the file system first. See [1] for more
+ * information.
+ *
+ * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz
+ */
+ if (!folio_buffers(folio)) {
+ ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
+ folio_clear_dirty(folio);
+ folio_unlock(folio);
+ continue;
+ }
if (mpd->map.m_len == 0)
- mpd->first_page = page->index;
- mpd->next_page = page->index + 1;
- /* Add all dirty buffers to mpd */
- lblk = ((ext4_lblk_t)page->index) <<
- (PAGE_SHIFT - blkbits);
- head = page_buffers(page);
- err = mpage_process_page_bufs(mpd, head, head, lblk);
- if (err <= 0)
- goto out;
- err = 0;
- left--;
+ mpd->start_pos = folio_pos(folio);
+ mpd->next_pos = folio_next_pos(folio);
+ /*
+ * Writeout when we cannot modify metadata is simple.
+ * Just submit the page. For data=journal mode we
+ * first handle writeout of the page for checkpoint and
+ * only after that handle delayed page dirtying. This
+ * makes sure current data is checkpointed to the final
+ * location before possibly journalling it again which
+ * is desirable when the page is frequently dirtied
+ * through a pin.
+ */
+ if (!mpd->can_map) {
+ err = mpage_submit_folio(mpd, folio);
+ if (err < 0)
+ goto out;
+ /* Pending dirtying of journalled data? */
+ if (folio_test_checked(folio)) {
+ err = mpage_journal_page_buffers(handle,
+ mpd, folio);
+ if (err < 0)
+ goto out;
+ mpd->journalled_more_data = 1;
+ }
+ mpage_folio_done(mpd, folio);
+ } else {
+ /* Add all dirty buffers to mpd */
+ lblk = EXT4_PG_TO_LBLK(mpd->inode, folio->index);
+ head = folio_buffers(folio);
+ err = mpage_process_page_bufs(mpd, head, head,
+ lblk);
+ if (err <= 0)
+ goto out;
+ err = 0;
+ }
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
}
mpd->scanned_until_end = 1;
+ if (handle)
+ ext4_journal_stop(handle);
return 0;
out:
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
+ if (handle)
+ ext4_journal_stop(handle);
return err;
}
-static int ext4_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
+static int ext4_do_writepages(struct mpage_da_data *mpd)
{
+ struct writeback_control *wbc = mpd->wbc;
pgoff_t writeback_index = 0;
long nr_to_write = wbc->nr_to_write;
int range_whole = 0;
int cycled = 1;
handle_t *handle = NULL;
- struct mpage_da_data mpd;
- struct inode *inode = mapping->host;
+ struct inode *inode = mpd->inode;
+ struct address_space *mapping = inode->i_mapping;
int needed_blocks, rsv_blocks = 0, ret = 0;
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
struct blk_plug plug;
bool give_up_on_write = false;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
- return -EIO;
-
- percpu_down_read(&sbi->s_writepages_rwsem);
trace_ext4_writepages(inode, wbc);
/*
@@ -2663,26 +2780,19 @@ static int ext4_writepages(struct address_space *mapping,
if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
goto out_writepages;
- if (ext4_should_journal_data(inode)) {
- ret = generic_writepages(mapping, wbc);
- goto out_writepages;
- }
-
/*
* If the filesystem has aborted, it is read-only, so return
* right away instead of dumping stack traces later on that
* will obscure the real source of the problem. We test
- * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
+ * fs shutdown state instead of sb->s_flag's SB_RDONLY because
* the latter could be true if the filesystem is mounted
* read-only, and in that case, ext4_writepages should
* *never* be called, so if that ever happens, we would want
* the stack trace.
*/
- if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) ||
- ext4_test_mount_flag(inode->i_sb, EXT4_MF_FS_ABORTED))) {
- ret = -EROFS;
+ ret = ext4_emergency_state(mapping->host->i_sb);
+ if (unlikely(ret))
goto out_writepages;
- }
/*
* If we have inline data and arrive here, it means that
@@ -2702,13 +2812,33 @@ static int ext4_writepages(struct address_space *mapping,
ext4_journal_stop(handle);
}
+ /*
+ * data=journal mode does not do delalloc so we just need to writeout /
+ * journal already mapped buffers. On the other hand we need to commit
+ * transaction to make data stable. We expect all the data to be
+ * already in the journal (the only exception are DMA pinned pages
+ * dirtied behind our back) so we commit transaction here and run the
+ * writeback loop to checkpoint them. The checkpointing is not actually
+ * necessary to make data persistent *but* quite a few places (extent
+ * shifting operations, fsverity, ...) depend on being able to drop
+ * pagecache pages after calling filemap_write_and_wait() and for that
+ * checkpointing needs to happen.
+ */
+ if (ext4_should_journal_data(inode)) {
+ mpd->can_map = 0;
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ ext4_fc_commit(sbi->s_journal,
+ EXT4_I(inode)->i_datasync_tid);
+ }
+ mpd->journalled_more_data = 0;
+
if (ext4_should_dioread_nolock(inode)) {
+ int bpf = ext4_journal_blocks_per_folio(inode);
/*
* We may need to convert up to one extent per block in
- * the page and we may dirty the inode.
+ * the folio and we may dirty the inode.
*/
- rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
- PAGE_SIZE >> inode->i_blkbits);
+ rsv_blocks = 1 + ext4_ext_index_trans_blocks(inode, bpf);
}
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
@@ -2718,19 +2848,18 @@ static int ext4_writepages(struct address_space *mapping,
writeback_index = mapping->writeback_index;
if (writeback_index)
cycled = 0;
- mpd.first_page = writeback_index;
- mpd.last_page = -1;
+ mpd->start_pos = writeback_index << PAGE_SHIFT;
+ mpd->end_pos = LLONG_MAX;
} else {
- mpd.first_page = wbc->range_start >> PAGE_SHIFT;
- mpd.last_page = wbc->range_end >> PAGE_SHIFT;
+ mpd->start_pos = wbc->range_start;
+ mpd->end_pos = wbc->range_end;
}
- mpd.inode = inode;
- mpd.wbc = wbc;
- ext4_io_submit_init(&mpd.io_submit, wbc);
+ ext4_io_submit_init(&mpd->io_submit, wbc);
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
+ tag_pages_for_writeback(mapping, mpd->start_pos >> PAGE_SHIFT,
+ mpd->end_pos >> PAGE_SHIFT);
blk_start_plug(&plug);
/*
@@ -2739,31 +2868,32 @@ retry:
* in the block layer on device congestion while having transaction
* started.
*/
- mpd.do_map = 0;
- mpd.scanned_until_end = 0;
- mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
- if (!mpd.io_submit.io_end) {
+ mpd->do_map = 0;
+ mpd->scanned_until_end = 0;
+ mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
+ if (!mpd->io_submit.io_end) {
ret = -ENOMEM;
goto unplug;
}
- ret = mpage_prepare_extent_to_map(&mpd);
+ ret = mpage_prepare_extent_to_map(mpd);
/* Unlock pages we didn't use */
- mpage_release_unused_pages(&mpd, false);
+ mpage_release_unused_pages(mpd, false);
/* Submit prepared bio */
- ext4_io_submit(&mpd.io_submit);
- ext4_put_io_end_defer(mpd.io_submit.io_end);
- mpd.io_submit.io_end = NULL;
+ ext4_io_submit(&mpd->io_submit);
+ ext4_put_io_end_defer(mpd->io_submit.io_end);
+ mpd->io_submit.io_end = NULL;
if (ret < 0)
goto unplug;
- while (!mpd.scanned_until_end && wbc->nr_to_write > 0) {
+ while (!mpd->scanned_until_end && wbc->nr_to_write > 0) {
/* For each extent of pages we use new io_end */
- mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
- if (!mpd.io_submit.io_end) {
+ mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
+ if (!mpd->io_submit.io_end) {
ret = -ENOMEM;
break;
}
+ WARN_ON_ONCE(!mpd->can_map);
/*
* We have two constraints: We find one extent to map and we
* must always write out whole page (makes a difference when
@@ -2772,8 +2902,14 @@ retry:
* not supported by delalloc.
*/
BUG_ON(ext4_should_journal_data(inode));
- needed_blocks = ext4_da_writepages_trans_blocks(inode);
-
+ /*
+ * Calculate the number of credits needed to reserve for one
+ * extent of up to MAX_WRITEPAGES_EXTENT_LEN blocks. It will
+ * attempt to extend the transaction or start a new iteration
+ * if the reserved credits are insufficient.
+ */
+ needed_blocks = ext4_chunk_trans_blocks(inode,
+ MAX_WRITEPAGES_EXTENT_LEN);
/* start a new transaction */
handle = ext4_journal_start_with_reserve(inode,
EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
@@ -2783,16 +2919,17 @@ retry:
"%ld pages, ino %lu; err %d", __func__,
wbc->nr_to_write, inode->i_ino, ret);
/* Release allocated io_end */
- ext4_put_io_end(mpd.io_submit.io_end);
- mpd.io_submit.io_end = NULL;
+ ext4_put_io_end(mpd->io_submit.io_end);
+ mpd->io_submit.io_end = NULL;
break;
}
- mpd.do_map = 1;
+ mpd->do_map = 1;
- trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
- ret = mpage_prepare_extent_to_map(&mpd);
- if (!ret && mpd.map.m_len)
- ret = mpage_map_and_submit_extent(handle, &mpd,
+ trace_ext4_da_write_folios_start(inode, mpd->start_pos,
+ mpd->next_pos, wbc);
+ ret = mpage_prepare_extent_to_map(mpd);
+ if (!ret && mpd->map.m_len)
+ ret = mpage_map_and_submit_extent(handle, mpd,
&give_up_on_write);
/*
* Caution: If the handle is synchronous,
@@ -2807,12 +2944,12 @@ retry:
if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
ext4_journal_stop(handle);
handle = NULL;
- mpd.do_map = 0;
+ mpd->do_map = 0;
}
/* Unlock pages we didn't use */
- mpage_release_unused_pages(&mpd, give_up_on_write);
+ mpage_release_unused_pages(mpd, give_up_on_write);
/* Submit prepared bio */
- ext4_io_submit(&mpd.io_submit);
+ ext4_io_submit(&mpd->io_submit);
/*
* Drop our io_end reference we got from init. We have
@@ -2822,11 +2959,13 @@ retry:
* up doing unwritten extent conversion.
*/
if (handle) {
- ext4_put_io_end_defer(mpd.io_submit.io_end);
+ ext4_put_io_end_defer(mpd->io_submit.io_end);
ext4_journal_stop(handle);
} else
- ext4_put_io_end(mpd.io_submit.io_end);
- mpd.io_submit.io_end = NULL;
+ ext4_put_io_end(mpd->io_submit.io_end);
+ mpd->io_submit.io_end = NULL;
+ trace_ext4_da_write_folios_end(inode, mpd->start_pos,
+ mpd->next_pos, wbc, ret);
if (ret == -ENOSPC && sbi->s_journal) {
/*
@@ -2838,6 +2977,8 @@ retry:
ret = 0;
continue;
}
+ if (ret == -EAGAIN)
+ ret = 0;
/* Fatal error - ENOMEM, EIO... */
if (ret)
break;
@@ -2846,8 +2987,8 @@ unplug:
blk_finish_plug(&plug);
if (!ret && !cycled && wbc->nr_to_write > 0) {
cycled = 1;
- mpd.last_page = writeback_index - 1;
- mpd.first_page = 0;
+ mpd->end_pos = (writeback_index << PAGE_SHIFT) - 1;
+ mpd->start_pos = 0;
goto retry;
}
@@ -2857,33 +2998,80 @@ unplug:
* Set the writeback_index so that range_cyclic
* mode will write it back later
*/
- mapping->writeback_index = mpd.first_page;
+ mapping->writeback_index = mpd->start_pos >> PAGE_SHIFT;
out_writepages:
trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write);
- percpu_up_read(&sbi->s_writepages_rwsem);
return ret;
}
+static int ext4_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct super_block *sb = mapping->host->i_sb;
+ struct mpage_da_data mpd = {
+ .inode = mapping->host,
+ .wbc = wbc,
+ .can_map = 1,
+ };
+ int ret;
+ int alloc_ctx;
+
+ ret = ext4_emergency_state(sb);
+ if (unlikely(ret))
+ return ret;
+
+ alloc_ctx = ext4_writepages_down_read(sb);
+ ret = ext4_do_writepages(&mpd);
+ /*
+ * For data=journal writeback we could have come across pages marked
+ * for delayed dirtying (PageChecked) which were just added to the
+ * running transaction. Try once more to get them to stable storage.
+ */
+ if (!ret && mpd.journalled_more_data)
+ ret = ext4_do_writepages(&mpd);
+ ext4_writepages_up_read(sb, alloc_ctx);
+
+ return ret;
+}
+
+int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode)
+{
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = LONG_MAX,
+ .range_start = jinode->i_dirty_start,
+ .range_end = jinode->i_dirty_end,
+ };
+ struct mpage_da_data mpd = {
+ .inode = jinode->i_vfs_inode,
+ .wbc = &wbc,
+ .can_map = 0,
+ };
+ return ext4_do_writepages(&mpd);
+}
+
static int ext4_dax_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
int ret;
long nr_to_write = wbc->nr_to_write;
struct inode *inode = mapping->host;
- struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
+ int alloc_ctx;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
- return -EIO;
+ ret = ext4_emergency_state(inode->i_sb);
+ if (unlikely(ret))
+ return ret;
- percpu_down_read(&sbi->s_writepages_rwsem);
+ alloc_ctx = ext4_writepages_down_read(inode->i_sb);
trace_ext4_writepages(inode, wbc);
- ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
+ ret = dax_writeback_mapping_range(mapping,
+ EXT4_SB(inode->i_sb)->s_daxdev, wbc);
trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write);
- percpu_up_read(&sbi->s_writepages_rwsem);
+ ext4_writepages_up_read(inode->i_sb, alloc_ctx);
return ret;
}
@@ -2921,33 +3109,33 @@ static int ext4_nonda_switch(struct super_block *sb)
return 0;
}
-static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int ext4_da_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret, retries = 0;
- struct page *page;
+ struct folio *folio;
pgoff_t index;
struct inode *inode = mapping->host;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
- return -EIO;
+ ret = ext4_emergency_state(inode->i_sb);
+ if (unlikely(ret))
+ return ret;
index = pos >> PAGE_SHIFT;
- if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) ||
- ext4_verity_in_progress(inode)) {
+ if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
- return ext4_write_begin(file, mapping, pos,
- len, flags, pagep, fsdata);
+ return ext4_write_begin(iocb, mapping, pos,
+ len, foliop, fsdata);
}
*fsdata = (void *)0;
- trace_ext4_da_write_begin(inode, pos, len, flags);
+ trace_ext4_da_write_begin(inode, pos, len);
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
- ret = ext4_da_write_inline_data_begin(mapping, inode,
- pos, len, flags,
- pagep, fsdata);
+ ret = ext4_generic_write_inline_data(mapping, inode, pos, len,
+ foliop, fsdata, true);
if (ret < 0)
return ret;
if (ret == 1)
@@ -2955,24 +3143,20 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
}
retry:
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
+ folio = write_begin_get_folio(iocb, mapping, index, len);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- /* In case writeback began while the page was unlocked */
- wait_for_stable_page(page);
+ if (len > folio_next_pos(folio) - pos)
+ len = folio_next_pos(folio) - pos;
-#ifdef CONFIG_FS_ENCRYPTION
- ret = ext4_block_write_begin(page, pos, len,
+ ret = ext4_block_write_begin(NULL, folio, pos, len,
ext4_da_get_block_prep);
-#else
- ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
-#endif
if (ret < 0) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
/*
- * block_write_begin may have instantiated a few blocks
+ * ext4_block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold inode lock.
*/
@@ -2985,7 +3169,7 @@ retry:
return ret;
}
- *pagep = page;
+ *foliop = folio;
return ret;
}
@@ -2993,15 +3177,15 @@ retry:
* Check if we should update i_disksize
* when write to the end of file but not require block allocation
*/
-static int ext4_da_should_update_i_disksize(struct page *page,
+static int ext4_da_should_update_i_disksize(struct folio *folio,
unsigned long offset)
{
struct buffer_head *bh;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
unsigned int idx;
int i;
- bh = page_buffers(page);
+ bh = folio_buffers(folio);
idx = offset >> inode->i_blkbits;
for (i = 0; i < idx; i++)
@@ -3012,51 +3196,100 @@ static int ext4_da_should_update_i_disksize(struct page *page,
return 1;
}
-static int ext4_da_write_end(struct file *file,
+static int ext4_da_do_write_end(struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ loff_t old_size = inode->i_size;
+ bool disksize_changed = false;
+ loff_t new_i_size, zero_len = 0;
+ handle_t *handle;
+
+ if (unlikely(!folio_buffers(folio))) {
+ folio_unlock(folio);
+ folio_put(folio);
+ return -EIO;
+ }
+ /*
+ * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES
+ * flag, which all that's needed to trigger page writeback.
+ */
+ copied = block_write_end(pos, len, copied, folio);
+ new_i_size = pos + copied;
+
+ /*
+ * It's important to update i_size while still holding folio lock,
+ * because folio writeout could otherwise come in and zero beyond
+ * i_size.
+ *
+ * Since we are holding inode lock, we are sure i_disksize <=
+ * i_size. We also know that if i_disksize < i_size, there are
+ * delalloc writes pending in the range up to i_size. If the end of
+ * the current write is <= i_size, there's no need to touch
+ * i_disksize since writeback will push i_disksize up to i_size
+ * eventually. If the end of the current write is > i_size and
+ * inside an allocated block which ext4_da_should_update_i_disksize()
+ * checked, we need to update i_disksize here as certain
+ * ext4_writepages() paths not allocating blocks and update i_disksize.
+ */
+ if (new_i_size > inode->i_size) {
+ unsigned long end;
+
+ i_size_write(inode, new_i_size);
+ end = offset_in_folio(folio, new_i_size - 1);
+ if (copied && ext4_da_should_update_i_disksize(folio, end)) {
+ ext4_update_i_disksize(inode, new_i_size);
+ disksize_changed = true;
+ }
+ }
+
+ folio_unlock(folio);
+ folio_put(folio);
+
+ if (pos > old_size) {
+ pagecache_isize_extended(inode, old_size, pos);
+ zero_len = pos - old_size;
+ }
+
+ if (!disksize_changed && !zero_len)
+ return copied;
+
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (zero_len)
+ ext4_zero_partial_blocks(handle, inode, old_size, zero_len);
+ ext4_mark_inode_dirty(handle, inode);
+ ext4_journal_stop(handle);
+
+ return copied;
+}
+
+static int ext4_da_write_end(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
- loff_t new_i_size;
- unsigned long start, end;
int write_mode = (int)(unsigned long)fsdata;
if (write_mode == FALL_BACK_TO_NONDELALLOC)
- return ext4_write_end(file, mapping, pos,
- len, copied, page, fsdata);
+ return ext4_write_end(iocb, mapping, pos,
+ len, copied, folio, fsdata);
trace_ext4_da_write_end(inode, pos, len, copied);
if (write_mode != CONVERT_INLINE_DATA &&
ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
ext4_has_inline_data(inode))
- return ext4_write_inline_data_end(inode, pos, len, copied, page);
-
- start = pos & (PAGE_SIZE - 1);
- end = start + copied - 1;
+ return ext4_write_inline_data_end(inode, pos, len, copied,
+ folio);
- /*
- * Since we are holding inode lock, we are sure i_disksize <=
- * i_size. We also know that if i_disksize < i_size, there are
- * delalloc writes pending in the range upto i_size. If the end of
- * the current write is <= i_size, there's no need to touch
- * i_disksize since writeback will push i_disksize upto i_size
- * eventually. If the end of the current write is > i_size and
- * inside an allocated block (ext4_da_should_update_i_disksize()
- * check), we need to update i_disksize here as neither
- * ext4_writepage() nor certain ext4_writepages() paths not
- * allocating blocks update i_disksize.
- *
- * Note that we defer inode dirtying to generic_write_end() /
- * ext4_da_write_inline_data_end().
- */
- new_i_size = pos + copied;
- if (copied && new_i_size > inode->i_size &&
- ext4_da_should_update_i_disksize(page, end))
- ext4_update_i_disksize(inode, new_i_size);
+ if (unlikely(copied < len) && !folio_test_uptodate(folio))
+ copied = 0;
- return generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+ return ext4_da_do_write_end(mapping, pos, len, copied, folio);
}
/*
@@ -3120,70 +3353,45 @@ int ext4_alloc_da_blocks(struct inode *inode)
static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
- journal_t *journal;
- int err;
+ sector_t ret = 0;
+ inode_lock_shared(inode);
/*
* We can get here for an inline file via the FIBMAP ioctl
*/
if (ext4_has_inline_data(inode))
- return 0;
+ goto out;
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
- test_opt(inode->i_sb, DELALLOC)) {
+ (test_opt(inode->i_sb, DELALLOC) ||
+ ext4_should_journal_data(inode))) {
/*
- * With delalloc we want to sync the file
- * so that we can make sure we allocate
- * blocks for file
+ * With delalloc or journalled data we want to sync the file so
+ * that we can make sure we allocate blocks for file and data
+ * is in place for the user to see it
*/
filemap_write_and_wait(mapping);
}
- if (EXT4_JOURNAL(inode) &&
- ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
- /*
- * This is a REALLY heavyweight approach, but the use of
- * bmap on dirty files is expected to be extremely rare:
- * only if we run lilo or swapon on a freshly made file
- * do we expect this to happen.
- *
- * (bmap requires CAP_SYS_RAWIO so this does not
- * represent an unprivileged user DOS attack --- we'd be
- * in trouble if mortal users could trigger this path at
- * will.)
- *
- * NB. EXT4_STATE_JDATA is not set on files other than
- * regular files. If somebody wants to bmap a directory
- * or symlink and gets confused because the buffer
- * hasn't yet been flushed to disk, they deserve
- * everything they get.
- */
+ ret = iomap_bmap(mapping, block, &ext4_iomap_ops);
- ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
- journal = EXT4_JOURNAL(inode);
- jbd2_journal_lock_updates(journal);
- err = jbd2_journal_flush(journal, 0);
- jbd2_journal_unlock_updates(journal);
-
- if (err)
- return 0;
- }
-
- return iomap_bmap(mapping, block, &ext4_iomap_ops);
+out:
+ inode_unlock_shared(inode);
+ return ret;
}
-static int ext4_readpage(struct file *file, struct page *page)
+static int ext4_read_folio(struct file *file, struct folio *folio)
{
int ret = -EAGAIN;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
- trace_ext4_readpage(page);
+ trace_ext4_read_folio(inode, folio);
if (ext4_has_inline_data(inode))
- ret = ext4_readpage_inline(inode, page);
+ ret = ext4_readpage_inline(inode, folio);
if (ret == -EAGAIN)
- return ext4_mpage_readpages(inode, NULL, page);
+ return ext4_mpage_readpages(inode, NULL, folio);
return ret;
}
@@ -3199,55 +3407,55 @@ static void ext4_readahead(struct readahead_control *rac)
ext4_mpage_readpages(inode, rac, NULL);
}
-static void ext4_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void ext4_invalidate_folio(struct folio *folio, size_t offset,
+ size_t length)
{
- trace_ext4_invalidatepage(page, offset, length);
+ trace_ext4_invalidate_folio(folio, offset, length);
/* No journalling happens on data buffers when this function is used */
- WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
+ WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
- block_invalidatepage(page, offset, length);
+ block_invalidate_folio(folio, offset, length);
}
-static int __ext4_journalled_invalidatepage(struct page *page,
- unsigned int offset,
- unsigned int length)
+static int __ext4_journalled_invalidate_folio(struct folio *folio,
+ size_t offset, size_t length)
{
- journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+ journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
- trace_ext4_journalled_invalidatepage(page, offset, length);
+ trace_ext4_journalled_invalidate_folio(folio, offset, length);
/*
* If it's a full truncate we just forget about the pending dirtying
*/
- if (offset == 0 && length == PAGE_SIZE)
- ClearPageChecked(page);
+ if (offset == 0 && length == folio_size(folio))
+ folio_clear_checked(folio);
- return jbd2_journal_invalidatepage(journal, page, offset, length);
+ return jbd2_journal_invalidate_folio(journal, folio, offset, length);
}
/* Wrapper for aops... */
-static void ext4_journalled_invalidatepage(struct page *page,
- unsigned int offset,
- unsigned int length)
+static void ext4_journalled_invalidate_folio(struct folio *folio,
+ size_t offset,
+ size_t length)
{
- WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
+ WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
}
-static int ext4_releasepage(struct page *page, gfp_t wait)
+static bool ext4_release_folio(struct folio *folio, gfp_t wait)
{
- journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+ struct inode *inode = folio->mapping->host;
+ journal_t *journal = EXT4_JOURNAL(inode);
- trace_ext4_releasepage(page);
+ trace_ext4_release_folio(inode, folio);
/* Page has dirty journalled data -> cannot release */
- if (PageChecked(page))
- return 0;
+ if (folio_test_checked(folio))
+ return false;
if (journal)
- return jbd2_journal_try_to_free_buffers(journal, page);
+ return jbd2_journal_try_to_free_buffers(journal, folio);
else
- return try_to_free_buffers(page);
+ return try_to_free_buffers(folio);
}
static bool ext4_inode_datasync_dirty(struct inode *inode)
@@ -3264,14 +3472,14 @@ static bool ext4_inode_datasync_dirty(struct inode *inode)
}
/* Any metadata buffers to write? */
- if (!list_empty(&inode->i_mapping->private_list))
+ if (!list_empty(&inode->i_mapping->i_private_list))
return true;
- return inode->i_state & I_DIRTY_DATASYNC;
+ return inode_state_read_once(inode) & I_DIRTY_DATASYNC;
}
static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
struct ext4_map_blocks *map, loff_t offset,
- loff_t length)
+ loff_t length, unsigned int flags)
{
u8 blkbits = inode->i_blkbits;
@@ -3288,10 +3496,16 @@ static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
if (map->m_flags & EXT4_MAP_NEW)
iomap->flags |= IOMAP_F_NEW;
- iomap->bdev = inode->i_sb->s_bdev;
- iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
- iomap->offset = (u64) map->m_lblk << blkbits;
- iomap->length = (u64) map->m_len << blkbits;
+ /* HW-offload atomics are always used */
+ if (flags & IOMAP_ATOMIC)
+ iomap->flags |= IOMAP_F_ATOMIC_BIO;
+
+ if (flags & IOMAP_DAX)
+ iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
+ else
+ iomap->bdev = inode->i_sb->s_bdev;
+ iomap->offset = EXT4_LBLK_TO_B(inode, map->m_lblk);
+ iomap->length = EXT4_LBLK_TO_B(inode, map->m_len);
if ((map->m_flags & EXT4_MAP_MAPPED) &&
!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
@@ -3309,21 +3523,164 @@ static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
if (map->m_flags & EXT4_MAP_UNWRITTEN) {
iomap->type = IOMAP_UNWRITTEN;
iomap->addr = (u64) map->m_pblk << blkbits;
+ if (flags & IOMAP_DAX)
+ iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
} else if (map->m_flags & EXT4_MAP_MAPPED) {
iomap->type = IOMAP_MAPPED;
iomap->addr = (u64) map->m_pblk << blkbits;
+ if (flags & IOMAP_DAX)
+ iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
+ } else if (map->m_flags & EXT4_MAP_DELAYED) {
+ iomap->type = IOMAP_DELALLOC;
+ iomap->addr = IOMAP_NULL_ADDR;
} else {
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
}
}
+static int ext4_map_blocks_atomic_write_slow(handle_t *handle,
+ struct inode *inode, struct ext4_map_blocks *map)
+{
+ ext4_lblk_t m_lblk = map->m_lblk;
+ unsigned int m_len = map->m_len;
+ unsigned int mapped_len = 0, m_flags = 0;
+ ext4_fsblk_t next_pblk = 0;
+ bool check_next_pblk = false;
+ int ret = 0;
+
+ WARN_ON_ONCE(!ext4_has_feature_bigalloc(inode->i_sb));
+
+ /*
+ * This is a slow path in case of mixed mapping. We use
+ * EXT4_GET_BLOCKS_CREATE_ZERO flag here to make sure we get a single
+ * contiguous mapped mapping. This will ensure any unwritten or hole
+ * regions within the requested range is zeroed out and we return
+ * a single contiguous mapped extent.
+ */
+ m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
+
+ do {
+ ret = ext4_map_blocks(handle, inode, map, m_flags);
+ if (ret < 0 && ret != -ENOSPC)
+ goto out_err;
+ /*
+ * This should never happen, but let's return an error code to
+ * avoid an infinite loop in here.
+ */
+ if (ret == 0) {
+ ret = -EFSCORRUPTED;
+ ext4_warning_inode(inode,
+ "ext4_map_blocks() couldn't allocate blocks m_flags: 0x%x, ret:%d",
+ m_flags, ret);
+ goto out_err;
+ }
+ /*
+ * With bigalloc we should never get ENOSPC nor discontiguous
+ * physical extents.
+ */
+ if ((check_next_pblk && next_pblk != map->m_pblk) ||
+ ret == -ENOSPC) {
+ ext4_warning_inode(inode,
+ "Non-contiguous allocation detected: expected %llu, got %llu, "
+ "or ext4_map_blocks() returned out of space ret: %d",
+ next_pblk, map->m_pblk, ret);
+ ret = -EFSCORRUPTED;
+ goto out_err;
+ }
+ next_pblk = map->m_pblk + map->m_len;
+ check_next_pblk = true;
+
+ mapped_len += map->m_len;
+ map->m_lblk += map->m_len;
+ map->m_len = m_len - mapped_len;
+ } while (mapped_len < m_len);
+
+ /*
+ * We might have done some work in above loop, so we need to query the
+ * start of the physical extent, based on the origin m_lblk and m_len.
+ * Let's also ensure we were able to allocate the required range for
+ * mixed mapping case.
+ */
+ map->m_lblk = m_lblk;
+ map->m_len = m_len;
+ map->m_flags = 0;
+
+ ret = ext4_map_blocks(handle, inode, map,
+ EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF);
+ if (ret != m_len) {
+ ext4_warning_inode(inode,
+ "allocation failed for atomic write request m_lblk:%u, m_len:%u, ret:%d\n",
+ m_lblk, m_len, ret);
+ ret = -EINVAL;
+ }
+ return ret;
+
+out_err:
+ /* reset map before returning an error */
+ map->m_lblk = m_lblk;
+ map->m_len = m_len;
+ map->m_flags = 0;
+ return ret;
+}
+
+/*
+ * ext4_map_blocks_atomic: Helper routine to ensure the entire requested
+ * range in @map [lblk, lblk + len) is one single contiguous extent with no
+ * mixed mappings.
+ *
+ * We first use m_flags passed to us by our caller (ext4_iomap_alloc()).
+ * We only call EXT4_GET_BLOCKS_ZERO in the slow path, when the underlying
+ * physical extent for the requested range does not have a single contiguous
+ * mapping type i.e. (Hole, Mapped, or Unwritten) throughout.
+ * In that case we will loop over the requested range to allocate and zero out
+ * the unwritten / holes in between, to get a single mapped extent from
+ * [m_lblk, m_lblk + m_len). Note that this is only possible because we know
+ * this can be called only with bigalloc enabled filesystem where the underlying
+ * cluster is already allocated. This avoids allocating discontiguous extents
+ * in the slow path due to multiple calls to ext4_map_blocks().
+ * The slow path is mostly non-performance critical path, so it should be ok to
+ * loop using ext4_map_blocks() with appropriate flags to allocate & zero the
+ * underlying short holes/unwritten extents within the requested range.
+ */
+static int ext4_map_blocks_atomic_write(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map, int m_flags,
+ bool *force_commit)
+{
+ ext4_lblk_t m_lblk = map->m_lblk;
+ unsigned int m_len = map->m_len;
+ int ret = 0;
+
+ WARN_ON_ONCE(m_len > 1 && !ext4_has_feature_bigalloc(inode->i_sb));
+
+ ret = ext4_map_blocks(handle, inode, map, m_flags);
+ if (ret < 0 || ret == m_len)
+ goto out;
+ /*
+ * This is a mixed mapping case where we were not able to allocate
+ * a single contiguous extent. In that case let's reset requested
+ * mapping and call the slow path.
+ */
+ map->m_lblk = m_lblk;
+ map->m_len = m_len;
+ map->m_flags = 0;
+
+ /*
+ * slow path means we have mixed mapping, that means we will need
+ * to force txn commit.
+ */
+ *force_commit = true;
+ return ext4_map_blocks_atomic_write_slow(handle, inode, map);
+out:
+ return ret;
+}
+
static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
unsigned int flags)
{
handle_t *handle;
- u8 blkbits = inode->i_blkbits;
int ret, dio_credits, m_flags = 0, retries = 0;
+ bool force_commit = false;
/*
* Trim the mapping request to the maximum value that we can map at
@@ -3331,7 +3688,30 @@ static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
*/
if (map->m_len > DIO_MAX_BLOCKS)
map->m_len = DIO_MAX_BLOCKS;
- dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
+
+ /*
+ * journal credits estimation for atomic writes. We call
+ * ext4_map_blocks(), to find if there could be a mixed mapping. If yes,
+ * then let's assume the no. of pextents required can be m_len i.e.
+ * every alternate block can be unwritten and hole.
+ */
+ if (flags & IOMAP_ATOMIC) {
+ unsigned int orig_mlen = map->m_len;
+
+ ret = ext4_map_blocks(NULL, inode, map, 0);
+ if (ret < 0)
+ return ret;
+ if (map->m_len < orig_mlen) {
+ map->m_len = orig_mlen;
+ dio_credits = ext4_meta_trans_blocks(inode, orig_mlen,
+ map->m_len);
+ } else {
+ dio_credits = ext4_chunk_trans_blocks(inode,
+ map->m_len);
+ }
+ } else {
+ dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
+ }
retry:
/*
@@ -3348,8 +3728,8 @@ retry:
* DAX and direct I/O are the only two operations that are currently
* supported with IOMAP_WRITE.
*/
- WARN_ON(!IS_DAX(inode) && !(flags & IOMAP_DIRECT));
- if (IS_DAX(inode))
+ WARN_ON(!(flags & (IOMAP_DAX | IOMAP_DIRECT)));
+ if (flags & IOMAP_DAX)
m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
/*
* We use i_size instead of i_disksize here because delalloc writeback
@@ -3357,12 +3737,16 @@ retry:
* i_disksize out to i_size. This could be beyond where direct I/O is
* happening and thus expose allocated blocks to direct I/O reads.
*/
- else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
+ else if (EXT4_LBLK_TO_B(inode, map->m_lblk) >= i_size_read(inode))
m_flags = EXT4_GET_BLOCKS_CREATE;
else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
- ret = ext4_map_blocks(handle, inode, map, m_flags);
+ if (flags & IOMAP_ATOMIC)
+ ret = ext4_map_blocks_atomic_write(handle, inode, map, m_flags,
+ &force_commit);
+ else
+ ret = ext4_map_blocks(handle, inode, map, m_flags);
/*
* We cannot fill holes in indirect tree based inodes as that could
@@ -3376,6 +3760,22 @@ retry:
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
+ /*
+ * Force commit the current transaction if the allocation spans a mixed
+ * mapping range. This ensures any pending metadata updates (like
+ * unwritten to written extents conversion) in this range are in
+ * consistent state with the file data blocks, before performing the
+ * actual write I/O. If the commit fails, the whole I/O must be aborted
+ * to prevent any possible torn writes.
+ */
+ if (ret > 0 && force_commit) {
+ int ret2;
+
+ ret2 = ext4_force_commit(inode->i_sb);
+ if (ret2)
+ return ret2;
+ }
+
return ret;
}
@@ -3386,6 +3786,7 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
int ret;
struct ext4_map_blocks map;
u8 blkbits = inode->i_blkbits;
+ unsigned int orig_mlen;
if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
return -EINVAL;
@@ -3399,6 +3800,7 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
map.m_lblk = offset >> blkbits;
map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
+ orig_mlen = map.m_len;
if (flags & IOMAP_WRITE) {
/*
@@ -3409,18 +3811,47 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
*/
if (offset + length <= i_size_read(inode)) {
ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
- goto out;
+ /*
+ * For atomic writes the entire requested length should
+ * be mapped.
+ */
+ if (map.m_flags & EXT4_MAP_MAPPED) {
+ if ((!(flags & IOMAP_ATOMIC) && ret > 0) ||
+ (flags & IOMAP_ATOMIC && ret >= orig_mlen))
+ goto out;
+ }
+ map.m_len = orig_mlen;
}
ret = ext4_iomap_alloc(inode, &map, flags);
} else {
+ /*
+ * This can be called for overwrites path from
+ * ext4_iomap_overwrite_begin().
+ */
ret = ext4_map_blocks(NULL, inode, &map, 0);
}
if (ret < 0)
return ret;
out:
- ext4_set_iomap(inode, iomap, &map, offset, length);
+ /*
+ * When inline encryption is enabled, sometimes I/O to an encrypted file
+ * has to be broken up to guarantee DUN contiguity. Handle this by
+ * limiting the length of the mapping returned.
+ */
+ map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
+
+ /*
+ * Before returning to iomap, let's ensure the allocated mapping
+ * covers the entire requested length for atomic writes.
+ */
+ if (flags & IOMAP_ATOMIC) {
+ if (map.m_len < (length >> blkbits)) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ }
+ ext4_set_iomap(inode, iomap, &map, offset, length, flags);
return 0;
}
@@ -3437,65 +3868,23 @@ static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
*/
flags &= ~IOMAP_WRITE;
ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
- WARN_ON_ONCE(iomap->type != IOMAP_MAPPED);
+ WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED);
return ret;
}
-static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
- ssize_t written, unsigned flags, struct iomap *iomap)
-{
- /*
- * Check to see whether an error occurred while writing out the data to
- * the allocated blocks. If so, return the magic error code so that we
- * fallback to buffered I/O and attempt to complete the remainder of
- * the I/O. Any blocks that may have been allocated in preparation for
- * the direct I/O will be reused during buffered I/O.
- */
- if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
- return -ENOTBLK;
-
- return 0;
-}
-
const struct iomap_ops ext4_iomap_ops = {
.iomap_begin = ext4_iomap_begin,
- .iomap_end = ext4_iomap_end,
};
const struct iomap_ops ext4_iomap_overwrite_ops = {
.iomap_begin = ext4_iomap_overwrite_begin,
- .iomap_end = ext4_iomap_end,
};
-static bool ext4_iomap_is_delalloc(struct inode *inode,
- struct ext4_map_blocks *map)
-{
- struct extent_status es;
- ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
-
- ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
- map->m_lblk, end, &es);
-
- if (!es.es_len || es.es_lblk > end)
- return false;
-
- if (es.es_lblk > map->m_lblk) {
- map->m_len = es.es_lblk - map->m_lblk;
- return false;
- }
-
- offset = map->m_lblk - es.es_lblk;
- map->m_len = es.es_len - offset;
-
- return true;
-}
-
static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
loff_t length, unsigned int flags,
struct iomap *iomap, struct iomap *srcmap)
{
int ret;
- bool delalloc = false;
struct ext4_map_blocks map;
u8 blkbits = inode->i_blkbits;
@@ -3536,13 +3925,8 @@ static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret < 0)
return ret;
- if (ret == 0)
- delalloc = ext4_iomap_is_delalloc(inode, &map);
-
set_iomap:
- ext4_set_iomap(inode, iomap, &map, offset, length);
- if (delalloc && iomap->type == IOMAP_HOLE)
- iomap->type = IOMAP_DELALLOC;
+ ext4_set_iomap(inode, iomap, &map, offset, length, flags);
return 0;
}
@@ -3552,29 +3936,34 @@ const struct iomap_ops ext4_iomap_report_ops = {
};
/*
- * Pages can be marked dirty completely asynchronously from ext4's journalling
- * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
- * much here because ->set_page_dirty is called under VFS locks. The page is
- * not necessarily locked.
- *
- * We cannot just dirty the page and leave attached buffers clean, because the
- * buffers' dirty state is "definitive". We cannot just set the buffers dirty
- * or jbddirty because all the journalling code will explode.
- *
- * So what we do is to mark the page "pending dirty" and next time writepage
- * is called, propagate that into the buffers appropriately.
+ * For data=journal mode, folio should be marked dirty only when it was
+ * writeably mapped. When that happens, it was already attached to the
+ * transaction and marked as jbddirty (we take care of this in
+ * ext4_page_mkwrite()). On transaction commit, we writeprotect page mappings
+ * so we should have nothing to do here, except for the case when someone
+ * had the page pinned and dirtied the page through this pin (e.g. by doing
+ * direct IO to it). In that case we'd need to attach buffers here to the
+ * transaction but we cannot due to lock ordering. We cannot just dirty the
+ * folio and leave attached buffers clean, because the buffers' dirty state is
+ * "definitive". We cannot just set the buffers dirty or jbddirty because all
+ * the journalling code will explode. So what we do is to mark the folio
+ * "pending dirty" and next time ext4_writepages() is called, attach buffers
+ * to the transaction appropriately.
*/
-static int ext4_journalled_set_page_dirty(struct page *page)
+static bool ext4_journalled_dirty_folio(struct address_space *mapping,
+ struct folio *folio)
{
- SetPageChecked(page);
- return __set_page_dirty_nobuffers(page);
+ WARN_ON_ONCE(!folio_buffers(folio));
+ if (folio_maybe_dma_pinned(folio))
+ folio_set_checked(folio);
+ return filemap_dirty_folio(mapping, folio);
}
-static int ext4_set_page_dirty(struct page *page)
+static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
{
- WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
- WARN_ON_ONCE(!page_has_buffers(page));
- return __set_page_dirty_buffers(page);
+ WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
+ WARN_ON_ONCE(!folio_buffers(folio));
+ return block_dirty_folio(mapping, folio);
}
static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
@@ -3585,64 +3974,57 @@ static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
}
static const struct address_space_operations ext4_aops = {
- .readpage = ext4_readpage,
+ .read_folio = ext4_read_folio,
.readahead = ext4_readahead,
- .writepage = ext4_writepage,
.writepages = ext4_writepages,
.write_begin = ext4_write_begin,
.write_end = ext4_write_end,
- .set_page_dirty = ext4_set_page_dirty,
+ .dirty_folio = ext4_dirty_folio,
.bmap = ext4_bmap,
- .invalidatepage = ext4_invalidatepage,
- .releasepage = ext4_releasepage,
- .direct_IO = noop_direct_IO,
- .migratepage = buffer_migrate_page,
+ .invalidate_folio = ext4_invalidate_folio,
+ .release_folio = ext4_release_folio,
+ .migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
.swap_activate = ext4_iomap_swap_activate,
};
static const struct address_space_operations ext4_journalled_aops = {
- .readpage = ext4_readpage,
+ .read_folio = ext4_read_folio,
.readahead = ext4_readahead,
- .writepage = ext4_writepage,
.writepages = ext4_writepages,
.write_begin = ext4_write_begin,
.write_end = ext4_journalled_write_end,
- .set_page_dirty = ext4_journalled_set_page_dirty,
+ .dirty_folio = ext4_journalled_dirty_folio,
.bmap = ext4_bmap,
- .invalidatepage = ext4_journalled_invalidatepage,
- .releasepage = ext4_releasepage,
- .direct_IO = noop_direct_IO,
+ .invalidate_folio = ext4_journalled_invalidate_folio,
+ .release_folio = ext4_release_folio,
+ .migrate_folio = buffer_migrate_folio_norefs,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
.swap_activate = ext4_iomap_swap_activate,
};
static const struct address_space_operations ext4_da_aops = {
- .readpage = ext4_readpage,
+ .read_folio = ext4_read_folio,
.readahead = ext4_readahead,
- .writepage = ext4_writepage,
.writepages = ext4_writepages,
.write_begin = ext4_da_write_begin,
.write_end = ext4_da_write_end,
- .set_page_dirty = ext4_set_page_dirty,
+ .dirty_folio = ext4_dirty_folio,
.bmap = ext4_bmap,
- .invalidatepage = ext4_invalidatepage,
- .releasepage = ext4_releasepage,
- .direct_IO = noop_direct_IO,
- .migratepage = buffer_migrate_page,
+ .invalidate_folio = ext4_invalidate_folio,
+ .release_folio = ext4_release_folio,
+ .migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
.swap_activate = ext4_iomap_swap_activate,
};
static const struct address_space_operations ext4_dax_aops = {
.writepages = ext4_dax_writepages,
- .direct_IO = noop_direct_IO,
- .set_page_dirty = __set_page_dirty_no_writeback,
+ .dirty_folio = noop_dirty_folio,
.bmap = ext4_bmap,
- .invalidatepage = noop_invalidatepage,
.swap_activate = ext4_iomap_swap_activate,
};
@@ -3666,32 +4048,38 @@ void ext4_set_aops(struct inode *inode)
inode->i_mapping->a_ops = &ext4_aops;
}
+/*
+ * Here we can't skip an unwritten buffer even though it usually reads zero
+ * because it might have data in pagecache (eg, if called from ext4_zero_range,
+ * ext4_punch_hole, etc) which needs to be properly zeroed out. Otherwise a
+ * racing writeback can come later and flush the stale pagecache to disk.
+ */
static int __ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
- ext4_fsblk_t index = from >> PAGE_SHIFT;
- unsigned offset = from & (PAGE_SIZE-1);
- unsigned blocksize, pos;
+ unsigned int offset, blocksize, pos;
ext4_lblk_t iblock;
struct inode *inode = mapping->host;
struct buffer_head *bh;
- struct page *page;
+ struct folio *folio;
int err = 0;
- page = find_or_create_page(mapping, from >> PAGE_SHIFT,
- mapping_gfp_constraint(mapping, ~__GFP_FS));
- if (!page)
- return -ENOMEM;
+ folio = __filemap_get_folio(mapping, from >> PAGE_SHIFT,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mapping_gfp_constraint(mapping, ~__GFP_FS));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
blocksize = inode->i_sb->s_blocksize;
- iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
+ iblock = EXT4_PG_TO_LBLK(inode, folio->index);
- if (!page_has_buffers(page))
- create_empty_buffers(page, blocksize, 0);
+ bh = folio_buffers(folio);
+ if (!bh)
+ bh = create_empty_buffers(folio, blocksize, 0);
/* Find the buffer that contains "offset" */
- bh = page_buffers(page);
+ offset = offset_in_folio(folio, from);
pos = blocksize;
while (offset >= pos) {
bh = bh->b_this_page;
@@ -3713,7 +4101,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
}
/* Ok, it's mapped. Make sure it's up-to-date */
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
@@ -3723,7 +4111,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
- err = fscrypt_decrypt_pagecache_blocks(page, blocksize,
+ err = fscrypt_decrypt_pagecache_blocks(folio,
+ blocksize,
bh_offset(bh));
if (err) {
clear_buffer_uptodate(bh);
@@ -3738,11 +4127,11 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (err)
goto unlock;
}
- zero_user(page, offset, length);
+ folio_zero_range(folio, offset, length);
BUFFER_TRACE(bh, "zeroed end of block");
if (ext4_should_journal_data(inode)) {
- err = ext4_handle_dirty_metadata(handle, inode, bh);
+ err = ext4_dirty_journalled_data(handle, bh);
} else {
err = 0;
mark_buffer_dirty(bh);
@@ -3752,8 +4141,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
}
unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return err;
}
@@ -3768,9 +4157,8 @@ static int ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
struct inode *inode = mapping->host;
- unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize = inode->i_sb->s_blocksize;
- unsigned max = blocksize - (offset & (blocksize - 1));
+ unsigned int max = blocksize - (from & (blocksize - 1));
/*
* correct length if it does not fall between
@@ -3780,8 +4168,8 @@ static int ext4_block_zero_page_range(handle_t *handle,
length = max;
if (IS_DAX(inode)) {
- return iomap_zero_range(inode, from, length, NULL,
- &ext4_iomap_ops);
+ return dax_zero_range(inode, from, length, NULL,
+ &ext4_iomap_ops);
}
return __ext4_block_zero_page_range(handle, mapping, from, length);
}
@@ -3795,7 +4183,6 @@ static int ext4_block_zero_page_range(handle_t *handle,
static int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from)
{
- unsigned offset = from & (PAGE_SIZE-1);
unsigned length;
unsigned blocksize;
struct inode *inode = mapping->host;
@@ -3804,8 +4191,8 @@ static int ext4_block_truncate_page(handle_t *handle,
if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
return 0;
- blocksize = inode->i_sb->s_blocksize;
- length = blocksize - (offset & (blocksize - 1));
+ blocksize = i_blocksize(inode);
+ length = blocksize - (from & (blocksize - 1));
return ext4_block_zero_page_range(handle, mapping, from, length);
}
@@ -3863,7 +4250,11 @@ int ext4_can_truncate(struct inode *inode)
* We have to make sure i_disksize gets properly updated before we truncate
* page cache due to hole punching or zero range. Otherwise i_disksize update
* can get lost as it may have been postponed to submission of writeback but
- * that will never happen after we truncate page cache.
+ * that will never happen if we remove the folio containing i_size from the
+ * page cache. Also if we punch hole within i_size but above i_disksize,
+ * following ext4_page_mkwrite() may mistakenly allocate written blocks over
+ * the hole and thus introduce allocated blocks beyond i_disksize which is
+ * not allowed (e2fsck would complain in case of crash).
*/
int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
loff_t len)
@@ -3874,9 +4265,11 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
loff_t size = i_size_read(inode);
WARN_ON(!inode_is_locked(inode));
- if (offset > size || offset + len < size)
+ if (offset > size)
return 0;
+ if (offset + len < size)
+ size = offset + len;
if (EXT4_I(inode)->i_disksize >= size)
return 0;
@@ -3890,6 +4283,68 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
return ret;
}
+static inline void ext4_truncate_folio(struct inode *inode,
+ loff_t start, loff_t end)
+{
+ unsigned long blocksize = i_blocksize(inode);
+ struct folio *folio;
+
+ /* Nothing to be done if no complete block needs to be truncated. */
+ if (round_up(start, blocksize) >= round_down(end, blocksize))
+ return;
+
+ folio = filemap_lock_folio(inode->i_mapping, start >> PAGE_SHIFT);
+ if (IS_ERR(folio))
+ return;
+
+ if (folio_mkclean(folio))
+ folio_mark_dirty(folio);
+ folio_unlock(folio);
+ folio_put(folio);
+}
+
+int ext4_truncate_page_cache_block_range(struct inode *inode,
+ loff_t start, loff_t end)
+{
+ unsigned long blocksize = i_blocksize(inode);
+ int ret;
+
+ /*
+ * For journalled data we need to write (and checkpoint) pages
+ * before discarding page cache to avoid inconsitent data on disk
+ * in case of crash before freeing or unwritten converting trans
+ * is committed.
+ */
+ if (ext4_should_journal_data(inode)) {
+ ret = filemap_write_and_wait_range(inode->i_mapping, start,
+ end - 1);
+ if (ret)
+ return ret;
+ goto truncate_pagecache;
+ }
+
+ /*
+ * If the block size is less than the page size, the file's mapped
+ * blocks within one page could be freed or converted to unwritten.
+ * So it's necessary to remove writable userspace mappings, and then
+ * ext4_page_mkwrite() can be called during subsequent write access
+ * to these partial folios.
+ */
+ if (!IS_ALIGNED(start | end, PAGE_SIZE) &&
+ blocksize < PAGE_SIZE && start < inode->i_size) {
+ loff_t page_boundary = round_up(start, PAGE_SIZE);
+
+ ext4_truncate_folio(inode, start, min(page_boundary, end));
+ if (end > page_boundary)
+ ext4_truncate_folio(inode,
+ round_down(end, PAGE_SIZE), end);
+ }
+
+truncate_pagecache:
+ truncate_pagecache_range(inode, start, end - 1);
+ return 0;
+}
+
static void ext4_wait_dax_page(struct inode *inode)
{
filemap_invalidate_unlock(inode->i_mapping);
@@ -3899,24 +4354,10 @@ static void ext4_wait_dax_page(struct inode *inode)
int ext4_break_layouts(struct inode *inode)
{
- struct page *page;
- int error;
-
if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
return -EINVAL;
- do {
- page = dax_layout_busy_page(inode->i_mapping);
- if (!page)
- return 0;
-
- error = ___wait_var_event(&page->_refcount,
- atomic_read(&page->_refcount) == 1,
- TASK_INTERRUPTIBLE, 0, 0,
- ext4_wait_dax_page(inode));
- } while (error == 0);
-
- return error;
+ return dax_break_layout_inode(inode, ext4_wait_dax_page);
}
/*
@@ -3930,149 +4371,116 @@ int ext4_break_layouts(struct inode *inode)
* Returns: 0 on success or negative on failure
*/
-int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
{
+ struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
- ext4_lblk_t first_block, stop_block;
- struct address_space *mapping = inode->i_mapping;
- loff_t first_block_offset, last_block_offset;
+ ext4_lblk_t start_lblk, end_lblk;
+ loff_t max_end = sb->s_maxbytes;
+ loff_t end = offset + length;
handle_t *handle;
unsigned int credits;
- int ret = 0, ret2 = 0;
+ int ret;
trace_ext4_punch_hole(inode, offset, length, 0);
-
- ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
- if (ext4_has_inline_data(inode)) {
- filemap_invalidate_lock(mapping);
- ret = ext4_convert_inline_data(inode);
- filemap_invalidate_unlock(mapping);
- if (ret)
- return ret;
- }
+ WARN_ON_ONCE(!inode_is_locked(inode));
/*
- * Write out all dirty pages to avoid race conditions
- * Then release them.
+ * For indirect-block based inodes, make sure that the hole within
+ * one block before last range.
*/
- if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
- ret = filemap_write_and_wait_range(mapping, offset,
- offset + length - 1);
- if (ret)
- return ret;
- }
-
- inode_lock(inode);
+ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
/* No need to punch hole beyond i_size */
- if (offset >= inode->i_size)
- goto out_mutex;
+ if (offset >= inode->i_size || offset >= max_end)
+ return 0;
/*
- * If the hole extends beyond i_size, set the hole
- * to end after the page that contains i_size
+ * If the hole extends beyond i_size, set the hole to end after
+ * the block that contains i_size to save pointless tail block zeroing.
*/
- if (offset + length > inode->i_size) {
- length = inode->i_size +
- PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
- offset;
- }
+ if (end >= inode->i_size)
+ end = round_up(inode->i_size, sb->s_blocksize);
+ if (end > max_end)
+ end = max_end;
+ length = end - offset;
- if (offset & (sb->s_blocksize - 1) ||
- (offset + length) & (sb->s_blocksize - 1)) {
- /*
- * Attach jinode to inode for jbd2 if we do any zeroing of
- * partial block
- */
+ /*
+ * Attach jinode to inode for jbd2 if we do any zeroing of partial
+ * block.
+ */
+ if (!IS_ALIGNED(offset | end, sb->s_blocksize)) {
ret = ext4_inode_attach_jinode(inode);
if (ret < 0)
- goto out_mutex;
-
+ return ret;
}
- /* Wait all existing dio workers, newcomers will block on i_mutex */
- inode_dio_wait(inode);
- /*
- * Prevent page faults from reinstantiating pages we have released from
- * page cache.
- */
- filemap_invalidate_lock(mapping);
-
- ret = ext4_break_layouts(inode);
+ ret = ext4_update_disksize_before_punch(inode, offset, length);
if (ret)
- goto out_dio;
-
- first_block_offset = round_up(offset, sb->s_blocksize);
- last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
+ return ret;
/* Now release the pages and zero block aligned part of pages*/
- if (last_block_offset > first_block_offset) {
- ret = ext4_update_disksize_before_punch(inode, offset, length);
- if (ret)
- goto out_dio;
- truncate_pagecache_range(inode, first_block_offset,
- last_block_offset);
- }
+ ret = ext4_truncate_page_cache_block_range(inode, offset, end);
+ if (ret)
+ return ret;
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- credits = ext4_writepage_trans_blocks(inode);
+ credits = ext4_chunk_trans_extent(inode, 2);
else
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(sb, ret);
- goto out_dio;
+ return ret;
}
- ret = ext4_zero_partial_blocks(handle, inode, offset,
- length);
+ ret = ext4_zero_partial_blocks(handle, inode, offset, length);
if (ret)
- goto out_stop;
-
- first_block = (offset + sb->s_blocksize - 1) >>
- EXT4_BLOCK_SIZE_BITS(sb);
- stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
+ goto out_handle;
/* If there are blocks to remove, do it */
- if (stop_block > first_block) {
+ start_lblk = EXT4_B_TO_LBLK(inode, offset);
+ end_lblk = end >> inode->i_blkbits;
+ if (end_lblk > start_lblk) {
+ ext4_lblk_t hole_len = end_lblk - start_lblk;
+
+ ext4_fc_track_inode(handle, inode);
+ ext4_check_map_extents_env(inode);
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
- ret = ext4_es_remove_extent(inode, first_block,
- stop_block - first_block);
- if (ret) {
- up_write(&EXT4_I(inode)->i_data_sem);
- goto out_stop;
- }
+ ext4_es_remove_extent(inode, start_lblk, hole_len);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- ret = ext4_ext_remove_space(inode, first_block,
- stop_block - 1);
+ ret = ext4_ext_remove_space(inode, start_lblk,
+ end_lblk - 1);
else
- ret = ext4_ind_remove_space(handle, inode, first_block,
- stop_block);
+ ret = ext4_ind_remove_space(handle, inode, start_lblk,
+ end_lblk);
+ if (ret) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+ goto out_handle;
+ }
+ ext4_es_insert_extent(inode, start_lblk, hole_len, ~0,
+ EXTENT_STATUS_HOLE, 0);
up_write(&EXT4_I(inode)->i_data_sem);
}
- ext4_fc_track_range(handle, inode, first_block, stop_block);
+ ext4_fc_track_range(handle, inode, start_lblk, end_lblk);
+
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret))
+ goto out_handle;
+
+ ext4_update_inode_fsync_trans(handle, inode, 1);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
-
- inode->i_mtime = inode->i_ctime = current_time(inode);
- ret2 = ext4_mark_inode_dirty(handle, inode);
- if (unlikely(ret2))
- ret = ret2;
- if (ret >= 0)
- ext4_update_inode_fsync_trans(handle, inode, 1);
-out_stop:
+out_handle:
ext4_journal_stop(handle);
-out_dio:
- filemap_invalidate_unlock(mapping);
-out_mutex:
- inode_unlock(inode);
return ret;
}
@@ -4140,9 +4548,9 @@ int ext4_truncate(struct inode *inode)
/*
* There is a possibility that we're either freeing the inode
* or it's a completely new inode. In those cases we might not
- * have i_mutex locked because it's not necessary.
+ * have i_rwsem locked because it's not necessary.
*/
- if (!(inode->i_state & (I_NEW|I_FREEING)))
+ if (!(inode_state_read_once(inode) & (I_NEW | I_FREEING)))
WARN_ON(!inode_is_locked(inode));
trace_ext4_truncate_enter(inode);
@@ -4162,12 +4570,13 @@ int ext4_truncate(struct inode *inode)
/* If we zero-out tail of the page, we have to create jinode for jbd2 */
if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
- if (ext4_inode_attach_jinode(inode) < 0)
+ err = ext4_inode_attach_jinode(inode);
+ if (err)
goto out_trace;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- credits = ext4_writepage_trans_blocks(inode);
+ credits = ext4_chunk_trans_extent(inode, 1);
else
credits = ext4_blocks_for_truncate(inode);
@@ -4193,9 +4602,11 @@ int ext4_truncate(struct inode *inode)
if (err)
goto out_stop;
- down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_fc_track_inode(handle, inode);
+ ext4_check_map_extents_env(inode);
- ext4_discard_preallocations(inode, 0);
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
err = ext4_ext_truncate(handle, inode);
@@ -4220,7 +4631,7 @@ out_stop:
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
err2 = ext4_mark_inode_dirty(handle, inode);
if (unlikely(err2 && !err))
err = err2;
@@ -4306,7 +4717,7 @@ static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode
* old inodes get re-used with the upper 16 bits of the
* uid/gid intact.
*/
- if (ei->i_dtime && list_empty(&ei->i_orphan)) {
+ if (ei->i_dtime && !ext4_inode_orphan_tracked(inode)) {
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
} else {
@@ -4323,9 +4734,9 @@ static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
- EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
- EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
- EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
+ EXT4_INODE_SET_CTIME(inode, raw_inode);
+ EXT4_INODE_SET_MTIME(inode, raw_inode);
+ EXT4_INODE_SET_ATIME(inode, raw_inode);
EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
@@ -4410,9 +4821,17 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
inode_offset = ((ino - 1) %
EXT4_INODES_PER_GROUP(sb));
- block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
+ block = ext4_inode_table(sb, gdp);
+ if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) ||
+ (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) {
+ ext4_error(sb, "Invalid inode table block %llu in "
+ "block_group %u", block, iloc->block_group);
+ return -EFSCORRUPTED;
+ }
+ block += (inode_offset / inodes_per_block);
+
bh = sb_getblk(sb, block);
if (unlikely(!bh))
return -ENOMEM;
@@ -4505,10 +4924,10 @@ make_io:
* Read the block from disk.
*/
trace_ext4_load_inode(sb, ino);
- ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
+ ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL,
+ ext4_simulate_fail(sb, EXT4_SIM_INODE_EIO));
blk_finish_plug(&plug);
wait_on_buffer(bh);
- ext4_simulate_fail_bh(sb, bh, EXT4_SIM_INODE_EIO);
if (!buffer_uptodate(bh)) {
if (ret_block)
*ret_block = block;
@@ -4523,7 +4942,7 @@ has_buffer:
static int __ext4_get_inode_loc_noinmem(struct inode *inode,
struct ext4_iloc *iloc)
{
- ext4_fsblk_t err_blk;
+ ext4_fsblk_t err_blk = 0;
int ret;
ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
@@ -4538,7 +4957,7 @@ static int __ext4_get_inode_loc_noinmem(struct inode *inode,
int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
{
- ext4_fsblk_t err_blk;
+ ext4_fsblk_t err_blk = 0;
int ret;
ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
@@ -4646,11 +5065,20 @@ static inline int ext4_iget_extra_inode(struct inode *inode,
__le32 *magic = (void *)raw_inode +
EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
- if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
- EXT4_INODE_SIZE(inode->i_sb) &&
+ if (EXT4_INODE_HAS_XATTR_SPACE(inode) &&
*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
+ int err;
+
+ err = xattr_check_inode(inode, IHDR(inode, raw_inode),
+ ITAIL(inode, raw_inode));
+ if (err)
+ return err;
+
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
- return ext4_find_inline_data_nolock(inode);
+ err = ext4_find_inline_data_nolock(inode);
+ if (!err && ext4_has_inline_data(inode))
+ ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ return err;
} else
EXT4_I(inode)->i_inline_off = 0;
return 0;
@@ -4677,6 +5105,64 @@ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
inode_set_iversion_queried(inode, val);
}
+static int check_igot_inode(struct inode *inode, ext4_iget_flags flags,
+ const char *function, unsigned int line)
+{
+ const char *err_str;
+
+ if (flags & EXT4_IGET_EA_INODE) {
+ if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+ err_str = "missing EA_INODE flag";
+ goto error;
+ }
+ if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
+ EXT4_I(inode)->i_file_acl) {
+ err_str = "ea_inode with extended attributes";
+ goto error;
+ }
+ } else {
+ if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+ /*
+ * open_by_handle_at() could provide an old inode number
+ * that has since been reused for an ea_inode; this does
+ * not indicate filesystem corruption
+ */
+ if (flags & EXT4_IGET_HANDLE)
+ return -ESTALE;
+ err_str = "unexpected EA_INODE flag";
+ goto error;
+ }
+ }
+ if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) {
+ err_str = "unexpected bad inode w/o EXT4_IGET_BAD";
+ goto error;
+ }
+ return 0;
+
+error:
+ ext4_error_inode(inode, function, line, 0, "%s", err_str);
+ return -EFSCORRUPTED;
+}
+
+void ext4_set_inode_mapping_order(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ u16 min_order, max_order;
+
+ max_order = EXT4_SB(sb)->s_max_folio_order;
+ if (!max_order)
+ return;
+
+ min_order = EXT4_SB(sb)->s_min_folio_order;
+ if (!min_order && !S_ISREG(inode->i_mode))
+ return;
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
+ max_order = min_order;
+
+ mapping_set_folio_order_range(inode->i_mapping, min_order, max_order);
+}
+
struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ext4_iget_flags flags, const char *function,
unsigned int line)
@@ -4694,12 +5180,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
gid_t i_gid;
projid_t i_projid;
- if ((!(flags & EXT4_IGET_SPECIAL) &&
- ((ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) ||
- ino == le32_to_cpu(es->s_usr_quota_inum) ||
- ino == le32_to_cpu(es->s_grp_quota_inum) ||
- ino == le32_to_cpu(es->s_prj_quota_inum) ||
- ino == le32_to_cpu(es->s_orphan_file_inum))) ||
+ if ((!(flags & EXT4_IGET_SPECIAL) && is_special_ino(sb, ino)) ||
(ino < EXT4_ROOT_INO) ||
(ino > le32_to_cpu(es->s_inodes_count))) {
if (flags & EXT4_IGET_HANDLE)
@@ -4713,8 +5194,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW)) {
+ ret = check_igot_inode(inode, flags, function, line);
+ if (ret) {
+ iput(inode);
+ return ERR_PTR(ret);
+ }
return inode;
+ }
ei = EXT4_I(inode);
iloc.bh = NULL;
@@ -4724,13 +5211,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
goto bad_inode;
raw_inode = ext4_raw_inode(&iloc);
- if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
- ext4_error_inode(inode, function, line, 0,
- "iget: root inode unallocated");
- ret = -EFSCORRUPTED;
- goto bad_inode;
- }
-
if ((flags & EXT4_IGET_HANDLE) &&
(raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
ret = -ESTALE;
@@ -4754,15 +5234,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ei->i_extra_isize = 0;
/* Precompute checksum seed for inode metadata */
- if (ext4_has_metadata_csum(sb)) {
+ if (ext4_has_feature_metadata_csum(sb)) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
__le32 inum = cpu_to_le32(inode->i_ino);
__le32 gen = raw_inode->i_generation;
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
+ csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)&inum,
sizeof(inum));
- ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
- sizeof(gen));
+ ei->i_csum_seed = ext4_chksum(csum, (__u8 *)&gen, sizeof(gen));
}
if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
@@ -4793,7 +5272,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ei->i_projid = make_kprojid(&init_user_ns, i_projid);
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
- ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
ei->i_inline_off = 0;
ei->i_dir_start_lookup = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
@@ -4803,11 +5281,16 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
* NeilBrown 1999oct15
*/
if (inode->i_nlink == 0) {
- if ((inode->i_mode == 0 ||
+ if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
ino != EXT4_BOOT_LOADER_INO) {
- /* this inode is deleted */
- ret = -ESTALE;
+ /* this inode is deleted or unallocated */
+ if (flags & EXT4_IGET_SPECIAL) {
+ ext4_error_inode(inode, function, line, 0,
+ "iget: special inode unallocated");
+ ret = -EFSCORRUPTED;
+ } else
+ ret = -ESTALE;
goto bad_inode;
}
/* The only unlinked inodes we let through here have
@@ -4819,13 +5302,22 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
}
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
ext4_set_inode_flags(inode, true);
+ /* Detect invalid flag combination - can't have both inline data and extents */
+ if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
+ ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ ext4_error_inode(inode, function, line, 0,
+ "inode has both inline data and extents flags");
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
if (ext4_has_feature_64bit(sb))
ei->i_file_acl |=
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
inode->i_size = ext4_isize(sb, raw_inode);
- if ((size = i_size_read(inode)) < 0) {
+ size = i_size_read(inode);
+ if (size < 0 || size > ext4_get_maxbytes(inode)) {
ext4_error_inode(inode, function, line, 0,
"iget: bad i_size value: %lld", size);
ret = -EFSCORRUPTED;
@@ -4836,7 +5328,8 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
* we'd normally treat htree data as empty space. But with metadata
* checksumming that corrupts checksums so forbid that.
*/
- if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
+ if (!ext4_has_feature_dir_index(sb) &&
+ ext4_has_feature_metadata_csum(sb) &&
ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
ext4_error_inode(inode, function, line, 0,
"iget: Dir with htree data on filesystem without dir_index feature.");
@@ -4897,9 +5390,9 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
}
}
- EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
- EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
- EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
+ EXT4_INODE_GET_CTIME(inode, raw_inode);
+ EXT4_INODE_GET_ATIME(inode, raw_inode);
+ EXT4_INODE_GET_MTIME(inode, raw_inode);
EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
@@ -4954,17 +5447,23 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
}
if (IS_ENCRYPTED(inode)) {
inode->i_op = &ext4_encrypted_symlink_inode_operations;
- ext4_set_aops(inode);
} else if (ext4_inode_is_fast_symlink(inode)) {
- inode->i_link = (char *)ei->i_data;
inode->i_op = &ext4_fast_symlink_inode_operations;
- nd_terminate_link(ei->i_data, inode->i_size,
- sizeof(ei->i_data) - 1);
+ if (inode->i_size == 0 ||
+ inode->i_size >= sizeof(ei->i_data) ||
+ strnlen((char *)ei->i_data, inode->i_size + 1) !=
+ inode->i_size) {
+ ext4_error_inode(inode, function, line, 0,
+ "invalid fast symlink length %llu",
+ (unsigned long long)inode->i_size);
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
+ inode_set_cached_link(inode, (char *)ei->i_data,
+ inode->i_size);
} else {
inode->i_op = &ext4_symlink_inode_operations;
- ext4_set_aops(inode);
}
- inode_nohighmem(inode);
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
inode->i_op = &ext4_special_inode_operations;
@@ -4982,11 +5481,32 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
"iget: bogus i_mode (%o)", inode->i_mode);
goto bad_inode;
}
- if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
+ if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) {
ext4_error_inode(inode, function, line, 0,
"casefold flag without casefold feature");
- brelse(iloc.bh);
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
+ ext4_set_inode_mapping_order(inode);
+
+ ret = check_igot_inode(inode, flags, function, line);
+ /*
+ * -ESTALE here means there is nothing inherently wrong with the inode,
+ * it's just not an inode we can return for an fhandle lookup.
+ */
+ if (ret == -ESTALE) {
+ brelse(iloc.bh);
+ unlock_new_inode(inode);
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
+ if (ret)
+ goto bad_inode;
+ brelse(iloc.bh);
+ /* Initialize the "no ACL's" state for the simple cases */
+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) && !ei->i_file_acl)
+ cache_no_acl(inode);
unlock_new_inode(inode);
return inode;
@@ -5014,13 +5534,13 @@ static void __ext4_update_other_inode_time(struct super_block *sb,
if (inode_is_dirtytime_only(inode)) {
struct ext4_inode_info *ei = EXT4_I(inode);
- inode->i_state &= ~I_DIRTY_TIME;
+ inode_state_clear(inode, I_DIRTY_TIME);
spin_unlock(&inode->i_lock);
spin_lock(&ei->i_raw_lock);
- EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
- EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
- EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
+ EXT4_INODE_SET_CTIME(inode, raw_inode);
+ EXT4_INODE_SET_MTIME(inode, raw_inode);
+ EXT4_INODE_SET_ATIME(inode, raw_inode);
ext4_inode_csum_set(inode, raw_inode, ei);
spin_unlock(&ei->i_raw_lock);
trace_ext4_other_inode_update_time(inode, orig_ino);
@@ -5168,16 +5688,16 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int err;
- if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
- sb_rdonly(inode->i_sb))
+ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
return 0;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
- return -EIO;
+ err = ext4_emergency_state(inode->i_sb);
+ if (unlikely(err))
+ return err;
if (EXT4_SB(inode->i_sb)->s_journal) {
if (ext4_journal_current_handle()) {
- jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
+ ext4_debug("called recursively, non-PF_MEMALLOC!\n");
dump_stack();
return -EIO;
}
@@ -5215,47 +5735,49 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
}
/*
- * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
- * buffers that are attached to a page stradding i_size and are undergoing
+ * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate
+ * buffers that are attached to a folio straddling i_size and are undergoing
* commit. In that case we have to wait for commit to finish and try again.
*/
static void ext4_wait_for_tail_page_commit(struct inode *inode)
{
- struct page *page;
unsigned offset;
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
- tid_t commit_tid = 0;
+ tid_t commit_tid;
int ret;
+ bool has_transaction;
offset = inode->i_size & (PAGE_SIZE - 1);
/*
- * If the page is fully truncated, we don't need to wait for any commit
- * (and we even should not as __ext4_journalled_invalidatepage() may
- * strip all buffers from the page but keep the page dirty which can then
- * confuse e.g. concurrent ext4_writepage() seeing dirty page without
+ * If the folio is fully truncated, we don't need to wait for any commit
+ * (and we even should not as __ext4_journalled_invalidate_folio() may
+ * strip all buffers from the folio but keep the folio dirty which can then
+ * confuse e.g. concurrent ext4_writepages() seeing dirty folio without
* buffers). Also we don't need to wait for any commit if all buffers in
- * the page remain valid. This is most beneficial for the common case of
+ * the folio remain valid. This is most beneficial for the common case of
* blocksize == PAGESIZE.
*/
if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
return;
while (1) {
- page = find_lock_page(inode->i_mapping,
+ struct folio *folio = filemap_lock_folio(inode->i_mapping,
inode->i_size >> PAGE_SHIFT);
- if (!page)
+ if (IS_ERR(folio))
return;
- ret = __ext4_journalled_invalidatepage(page, offset,
- PAGE_SIZE - offset);
- unlock_page(page);
- put_page(page);
+ ret = __ext4_journalled_invalidate_folio(folio, offset,
+ folio_size(folio) - offset);
+ folio_unlock(folio);
+ folio_put(folio);
if (ret != -EBUSY)
return;
- commit_tid = 0;
+ has_transaction = false;
read_lock(&journal->j_state_lock);
- if (journal->j_committing_transaction)
+ if (journal->j_committing_transaction) {
commit_tid = journal->j_committing_transaction->t_tid;
+ has_transaction = true;
+ }
read_unlock(&journal->j_state_lock);
- if (commit_tid)
+ if (has_transaction)
jbd2_log_wait_commit(journal, commit_tid);
}
}
@@ -5282,18 +5804,20 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
* transaction are already on disk (truncate waits for pages under
* writeback).
*
- * Called with inode->i_mutex down.
+ * Called with inode->i_rwsem down.
*/
-int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error, rc = 0;
int orphan = 0;
const unsigned int ia_valid = attr->ia_valid;
+ bool inc_ivers = true;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
- return -EIO;
+ error = ext4_emergency_state(inode->i_sb);
+ if (unlikely(error))
+ return error;
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
@@ -5303,7 +5827,7 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
ATTR_GID | ATTR_TIMES_SET))))
return -EPERM;
- error = setattr_prepare(mnt_userns, dentry, attr);
+ error = setattr_prepare(idmap, dentry, attr);
if (error)
return error;
@@ -5315,14 +5839,14 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
if (error)
return error;
- if (is_quota_modification(inode, attr)) {
+ if (is_quota_modification(idmap, inode, attr)) {
error = dquot_initialize(inode);
if (error)
return error;
}
- ext4_fc_start_update(inode);
- if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
- (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
+
+ if (i_uid_needs_update(idmap, attr, inode) ||
+ i_gid_needs_update(idmap, attr, inode)) {
handle_t *handle;
/* (user+group)*(old+new) structure, inode write (sb,
@@ -5339,24 +5863,20 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
* counts xattr inode references.
*/
down_read(&EXT4_I(inode)->xattr_sem);
- error = dquot_transfer(inode, attr);
+ error = dquot_transfer(idmap, inode, attr);
up_read(&EXT4_I(inode)->xattr_sem);
if (error) {
ext4_journal_stop(handle);
- ext4_fc_stop_update(inode);
return error;
}
/* Update corresponding info in inode so that everything is in
* one transaction */
- if (attr->ia_valid & ATTR_UID)
- inode->i_uid = attr->ia_uid;
- if (attr->ia_valid & ATTR_GID)
- inode->i_gid = attr->ia_gid;
+ i_uid_update(idmap, attr, inode);
+ i_gid_update(idmap, attr, inode);
error = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
if (unlikely(error)) {
- ext4_fc_stop_update(inode);
return error;
}
}
@@ -5364,23 +5884,22 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
if (attr->ia_valid & ATTR_SIZE) {
handle_t *handle;
loff_t oldsize = inode->i_size;
+ loff_t old_disksize;
int shrink = (attr->ia_size < inode->i_size);
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
if (attr->ia_size > sbi->s_bitmap_maxbytes) {
- ext4_fc_stop_update(inode);
return -EFBIG;
}
}
if (!S_ISREG(inode->i_mode)) {
- ext4_fc_stop_update(inode);
return -EINVAL;
}
- if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
- inode_inc_iversion(inode);
+ if (attr->ia_size == inode->i_size)
+ inc_ivers = false;
if (shrink) {
if (ext4_should_order_data(inode)) {
@@ -5405,6 +5924,14 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
}
if (attr->ia_size != inode->i_size) {
+ /* attach jbd2 jinode for EOF folio tail zeroing */
+ if (attr->ia_size & (inode->i_sb->s_blocksize - 1) ||
+ oldsize & (inode->i_sb->s_blocksize - 1)) {
+ error = ext4_inode_attach_jinode(inode);
+ if (error)
+ goto out_mmap_sem;
+ }
+
handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
@@ -5415,20 +5942,23 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
orphan = 1;
}
/*
- * Update c/mtime on truncate up, ext4_truncate() will
- * update c/mtime in shrink case below
+ * Update c/mtime and tail zero the EOF folio on
+ * truncate up. ext4_truncate() handles the shrink case
+ * below.
*/
if (!shrink) {
- inode->i_mtime = current_time(inode);
- inode->i_ctime = inode->i_mtime;
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
+ if (oldsize & (inode->i_sb->s_blocksize - 1))
+ ext4_block_truncate_page(handle,
+ inode->i_mapping, oldsize);
}
if (shrink)
ext4_fc_track_range(handle, inode,
(attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
inode->i_sb->s_blocksize_bits,
- (oldsize > 0 ? oldsize - 1 : 0) >>
- inode->i_sb->s_blocksize_bits);
+ EXT_MAX_BLOCKS - 1);
else
ext4_fc_track_range(
handle, inode,
@@ -5438,10 +5968,9 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
inode->i_sb->s_blocksize_bits);
down_write(&EXT4_I(inode)->i_data_sem);
+ old_disksize = EXT4_I(inode)->i_disksize;
EXT4_I(inode)->i_disksize = attr->ia_size;
- rc = ext4_mark_inode_dirty(handle, inode);
- if (!error)
- error = rc;
+
/*
* We have to update i_size under i_data_sem together
* with i_disksize to avoid races with writeback code
@@ -5449,7 +5978,12 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
*/
if (!error)
i_size_write(inode, attr->ia_size);
+ else
+ EXT4_I(inode)->i_disksize = old_disksize;
up_write(&EXT4_I(inode)->i_data_sem);
+ rc = ext4_mark_inode_dirty(handle, inode);
+ if (!error)
+ error = rc;
ext4_journal_stop(handle);
if (error)
goto out_mmap_sem;
@@ -5480,7 +6014,9 @@ out_mmap_sem:
}
if (!error) {
- setattr_copy(mnt_userns, inode, attr);
+ if (inc_ivers)
+ inode_inc_iversion(inode);
+ setattr_copy(idmap, inode, attr);
mark_inode_dirty(inode);
}
@@ -5492,18 +6028,33 @@ out_mmap_sem:
ext4_orphan_del(NULL, inode);
if (!error && (ia_valid & ATTR_MODE))
- rc = posix_acl_chmod(mnt_userns, inode, inode->i_mode);
+ rc = posix_acl_chmod(idmap, dentry, inode->i_mode);
err_out:
if (error)
ext4_std_error(inode->i_sb, error);
if (!error)
error = rc;
- ext4_fc_stop_update(inode);
return error;
}
-int ext4_getattr(struct user_namespace *mnt_userns, const struct path *path,
+u32 ext4_dio_alignment(struct inode *inode)
+{
+ if (fsverity_active(inode))
+ return 0;
+ if (ext4_should_journal_data(inode))
+ return 0;
+ if (ext4_has_inline_data(inode))
+ return 0;
+ if (IS_ENCRYPTED(inode)) {
+ if (!fscrypt_dio_supported(inode))
+ return 0;
+ return i_blocksize(inode);
+ }
+ return 1; /* use the iomap defaults */
+}
+
+int ext4_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
@@ -5518,6 +6069,39 @@ int ext4_getattr(struct user_namespace *mnt_userns, const struct path *path,
stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
}
+ /*
+ * Return the DIO alignment restrictions if requested. We only return
+ * this information when requested, since on encrypted files it might
+ * take a fair bit of work to get if the file wasn't opened recently.
+ */
+ if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
+ u32 dio_align = ext4_dio_alignment(inode);
+
+ stat->result_mask |= STATX_DIOALIGN;
+ if (dio_align == 1) {
+ struct block_device *bdev = inode->i_sb->s_bdev;
+
+ /* iomap defaults */
+ stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
+ stat->dio_offset_align = bdev_logical_block_size(bdev);
+ } else {
+ stat->dio_mem_align = dio_align;
+ stat->dio_offset_align = dio_align;
+ }
+ }
+
+ if ((request_mask & STATX_WRITE_ATOMIC) && S_ISREG(inode->i_mode)) {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ unsigned int awu_min = 0, awu_max = 0;
+
+ if (ext4_inode_can_atomic_write(inode)) {
+ awu_min = sbi->s_awu_min;
+ awu_max = sbi->s_awu_max;
+ }
+
+ generic_fill_statx_atomic_writes(stat, awu_min, awu_max, 0);
+ }
+
flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
if (flags & EXT4_APPEND_FL)
stat->attributes |= STATX_ATTR_APPEND;
@@ -5539,18 +6123,18 @@ int ext4_getattr(struct user_namespace *mnt_userns, const struct path *path,
STATX_ATTR_NODUMP |
STATX_ATTR_VERITY);
- generic_fillattr(mnt_userns, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
return 0;
}
-int ext4_file_getattr(struct user_namespace *mnt_userns,
+int ext4_file_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
u64 delalloc_blocks;
- ext4_getattr(mnt_userns, path, stat, request_mask, query_flags);
+ ext4_getattr(idmap, path, stat, request_mask, query_flags);
/*
* If there is inline data in the inode, the inode will normally not
@@ -5596,22 +6180,19 @@ static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
*
* Also account for superblock, inode, quota and xattr blocks
*/
-static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
- int pextents)
+int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents)
{
ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
int gdpblocks;
int idxblocks;
- int ret = 0;
+ int ret;
/*
- * How many index blocks need to touch to map @lblocks logical blocks
- * to @pextents physical extents?
+ * How many index and leaf blocks need to touch to map @lblocks
+ * logical blocks to @pextents physical extents?
*/
idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
- ret = idxblocks;
-
/*
* Now let's see how many group bitmaps and group descriptors need
* to account
@@ -5624,7 +6205,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
/* bitmaps and block group descriptor blocks */
- ret += groups + gdpblocks;
+ ret = idxblocks + groups + gdpblocks;
/* Blocks for super block, inode, quota and xattr blocks */
ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
@@ -5633,25 +6214,19 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
}
/*
- * Calculate the total number of credits to reserve to fit
- * the modification of a single pages into a single transaction,
- * which may include multiple chunks of block allocations.
- *
- * This could be called via ext4_write_begin()
- *
- * We need to consider the worse case, when
- * one new block per extent.
+ * Calculate the journal credits for modifying the number of blocks
+ * in a single extent within one transaction. 'nrblocks' is used only
+ * for non-extent inodes. For extent type inodes, 'nrblocks' can be
+ * zero if the exact number of blocks is unknown.
*/
-int ext4_writepage_trans_blocks(struct inode *inode)
+int ext4_chunk_trans_extent(struct inode *inode, int nrblocks)
{
- int bpp = ext4_journal_blocks_per_page(inode);
int ret;
- ret = ext4_meta_trans_blocks(inode, bpp, bpp);
-
+ ret = ext4_meta_trans_blocks(inode, nrblocks, 1);
/* Account for data blocks for journalled mode */
if (ext4_should_journal_data(inode))
- ret += bpp;
+ ret += nrblocks;
return ret;
}
@@ -5678,15 +6253,13 @@ int ext4_mark_iloc_dirty(handle_t *handle,
{
int err = 0;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
+ err = ext4_emergency_state(inode->i_sb);
+ if (unlikely(err)) {
put_bh(iloc->bh);
- return -EIO;
+ return err;
}
ext4_fc_track_inode(handle, inode);
- if (IS_I_VERSION(inode))
- inode_inc_iversion(inode);
-
/* the do_update_inode consumes one bh->b_count */
get_bh(iloc->bh);
@@ -5707,8 +6280,9 @@ ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
{
int err;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
- return -EIO;
+ err = ext4_emergency_state(inode->i_sb);
+ if (unlikely(err))
+ return err;
err = ext4_get_inode_loc(inode, iloc);
if (!err) {
@@ -5719,6 +6293,7 @@ ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
brelse(iloc->bh);
iloc->bh = NULL;
}
+ ext4_fc_track_inode(handle, inode);
}
ext4_std_error(inode->i_sb, err);
return err;
@@ -5762,6 +6337,14 @@ static int __ext4_expand_extra_isize(struct inode *inode,
return 0;
}
+ /*
+ * We may need to allocate external xattr block so we need quotas
+ * initialized. Here we can be called with various locks held so we
+ * cannot affort to initialize quotas ourselves. So just bail.
+ */
+ if (dquot_initialize_needed(inode))
+ return -EAGAIN;
+
/* try to expand with EAs present */
error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
raw_inode, handle);
@@ -5925,7 +6508,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
journal_t *journal;
handle_t *handle;
int err;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ int alloc_ctx;
/*
* We have to be very careful here: changing a data block's
@@ -5954,16 +6537,16 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
* dirty data which can be converted only after flushing the dirty
* data (and journalled aops don't know how to handle these cases).
*/
- if (val) {
- filemap_invalidate_lock(inode->i_mapping);
- err = filemap_write_and_wait(inode->i_mapping);
- if (err < 0) {
- filemap_invalidate_unlock(inode->i_mapping);
- return err;
- }
+ filemap_invalidate_lock(inode->i_mapping);
+ err = filemap_write_and_wait(inode->i_mapping);
+ if (err < 0) {
+ filemap_invalidate_unlock(inode->i_mapping);
+ return err;
}
+ /* Before switch the inode journalling mode evict all the page cache. */
+ truncate_pagecache(inode, 0);
- percpu_down_write(&sbi->s_writepages_rwsem);
+ alloc_ctx = ext4_writepages_down_write(inode->i_sb);
jbd2_journal_lock_updates(journal);
/*
@@ -5980,18 +6563,18 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
err = jbd2_journal_flush(journal, 0);
if (err < 0) {
jbd2_journal_unlock_updates(journal);
- percpu_up_write(&sbi->s_writepages_rwsem);
+ ext4_writepages_up_write(inode->i_sb, alloc_ctx);
+ filemap_invalidate_unlock(inode->i_mapping);
return err;
}
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
}
ext4_set_aops(inode);
+ ext4_set_inode_mapping_order(inode);
jbd2_journal_unlock_updates(journal);
- percpu_up_write(&sbi->s_writepages_rwsem);
-
- if (val)
- filemap_invalidate_unlock(inode->i_mapping);
+ ext4_writepages_up_write(inode->i_sb, alloc_ctx);
+ filemap_invalidate_unlock(inode->i_mapping);
/* Finally we can mark the inode as dirty. */
@@ -6000,7 +6583,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
return PTR_ERR(handle);
ext4_fc_mark_ineligible(inode->i_sb,
- EXT4_FC_REASON_JOURNAL_FLAG_CHANGE);
+ EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle);
err = ext4_mark_inode_dirty(handle, inode);
ext4_handle_sync(handle);
ext4_journal_stop(handle);
@@ -6015,10 +6598,59 @@ static int ext4_bh_unmapped(handle_t *handle, struct inode *inode,
return !buffer_mapped(bh);
}
+static int ext4_block_page_mkwrite(struct inode *inode, struct folio *folio,
+ get_block_t get_block)
+{
+ handle_t *handle;
+ loff_t size;
+ unsigned long len;
+ int credits;
+ int ret;
+
+ credits = ext4_chunk_trans_extent(inode,
+ ext4_journal_blocks_per_folio(inode));
+ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, credits);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ folio_lock(folio);
+ size = i_size_read(inode);
+ /* Page got truncated from under us? */
+ if (folio->mapping != inode->i_mapping || folio_pos(folio) > size) {
+ ret = -EFAULT;
+ goto out_error;
+ }
+
+ len = folio_size(folio);
+ if (folio_pos(folio) + len > size)
+ len = size - folio_pos(folio);
+
+ ret = ext4_block_write_begin(handle, folio, 0, len, get_block);
+ if (ret)
+ goto out_error;
+
+ if (!ext4_should_journal_data(inode)) {
+ block_commit_write(folio, 0, len);
+ folio_mark_dirty(folio);
+ } else {
+ ret = ext4_journal_folio_buffers(handle, folio, len);
+ if (ret)
+ goto out_error;
+ }
+ ext4_journal_stop(handle);
+ folio_wait_stable(folio);
+ return ret;
+
+out_error:
+ folio_unlock(folio);
+ ext4_journal_stop(handle);
+ return ret;
+}
+
vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
loff_t size;
unsigned long len;
int err;
@@ -6026,8 +6658,7 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
- handle_t *handle;
- get_block_t *get_block;
+ get_block_t *get_block = ext4_get_block;
int retries = 0;
if (unlikely(IS_IMMUTABLE(inode)))
@@ -6062,19 +6693,18 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
goto out_ret;
}
- lock_page(page);
+ folio_lock(folio);
size = i_size_read(inode);
/* Page got truncated from under us? */
- if (page->mapping != mapping || page_offset(page) > size) {
- unlock_page(page);
+ if (folio->mapping != mapping || folio_pos(folio) > size) {
+ folio_unlock(folio);
ret = VM_FAULT_NOPAGE;
goto out;
}
- if (page->index == size >> PAGE_SHIFT)
- len = size & ~PAGE_MASK;
- else
- len = PAGE_SIZE;
+ len = folio_size(folio);
+ if (folio_pos(folio) + len > size)
+ len = size - folio_pos(folio);
/*
* Return if we have all the buffers mapped. This avoids the need to do
* journal_start/journal_stop which can block and take a long time
@@ -6082,80 +6712,30 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
* This cannot be done for data journalling, as we have to add the
* inode to the transaction's list to writeprotect pages on commit.
*/
- if (page_has_buffers(page)) {
- if (!ext4_walk_page_buffers(NULL, inode, page_buffers(page),
+ if (folio_buffers(folio)) {
+ if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio),
0, len, NULL,
ext4_bh_unmapped)) {
/* Wait so that we don't change page under IO */
- wait_for_stable_page(page);
+ folio_wait_stable(folio);
ret = VM_FAULT_LOCKED;
goto out;
}
}
- unlock_page(page);
+ folio_unlock(folio);
/* OK, we need to fill the hole... */
if (ext4_should_dioread_nolock(inode))
get_block = ext4_get_block_unwritten;
- else
- get_block = ext4_get_block;
retry_alloc:
- handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
- ext4_writepage_trans_blocks(inode));
- if (IS_ERR(handle)) {
- ret = VM_FAULT_SIGBUS;
- goto out;
- }
- /*
- * Data journalling can't use block_page_mkwrite() because it
- * will set_buffer_dirty() before do_journal_get_write_access()
- * thus might hit warning messages for dirty metadata buffers.
- */
- if (!ext4_should_journal_data(inode)) {
- err = block_page_mkwrite(vma, vmf, get_block);
- } else {
- lock_page(page);
- size = i_size_read(inode);
- /* Page got truncated from under us? */
- if (page->mapping != mapping || page_offset(page) > size) {
- ret = VM_FAULT_NOPAGE;
- goto out_error;
- }
-
- if (page->index == size >> PAGE_SHIFT)
- len = size & ~PAGE_MASK;
- else
- len = PAGE_SIZE;
-
- err = __block_write_begin(page, 0, len, ext4_get_block);
- if (!err) {
- ret = VM_FAULT_SIGBUS;
- if (ext4_walk_page_buffers(handle, inode,
- page_buffers(page), 0, len, NULL,
- do_journal_get_write_access))
- goto out_error;
- if (ext4_walk_page_buffers(handle, inode,
- page_buffers(page), 0, len, NULL,
- write_end_fn))
- goto out_error;
- if (ext4_jbd2_inode_add_write(handle, inode,
- page_offset(page), len))
- goto out_error;
- ext4_set_inode_state(inode, EXT4_STATE_JDATA);
- } else {
- unlock_page(page);
- }
- }
- ext4_journal_stop(handle);
- if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ /* Start journal and allocate blocks */
+ err = ext4_block_page_mkwrite(inode, folio, get_block);
+ if (err == -EAGAIN ||
+ (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)))
goto retry_alloc;
out_ret:
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
out:
filemap_invalidate_unlock_shared(mapping);
sb_end_pagefault(inode->i_sb);
return ret;
-out_error:
- unlock_page(page);
- ext4_journal_stop(handle);
- goto out;
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 606dee9e08a3..7ce0fc40aec2 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -16,18 +16,268 @@
#include <linux/file.h>
#include <linux/quotaops.h>
#include <linux/random.h>
-#include <linux/uuid.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/iversion.h>
#include <linux/fileattr.h>
+#include <linux/uuid.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include <linux/fsmap.h>
#include "fsmap.h"
#include <trace/events/ext4.h>
-/**
+typedef void ext4_update_sb_callback(struct ext4_sb_info *sbi,
+ struct ext4_super_block *es,
+ const void *arg);
+
+/*
+ * Superblock modification callback function for changing file system
+ * label
+ */
+static void ext4_sb_setlabel(struct ext4_sb_info *sbi,
+ struct ext4_super_block *es, const void *arg)
+{
+ /* Sanity check, this should never happen */
+ BUILD_BUG_ON(sizeof(es->s_volume_name) < EXT4_LABEL_MAX);
+
+ memcpy(es->s_volume_name, (char *)arg, EXT4_LABEL_MAX);
+}
+
+/*
+ * Superblock modification callback function for changing file system
+ * UUID.
+ */
+static void ext4_sb_setuuid(struct ext4_sb_info *sbi,
+ struct ext4_super_block *es, const void *arg)
+{
+ memcpy(es->s_uuid, (__u8 *)arg, UUID_SIZE);
+}
+
+static
+int ext4_update_primary_sb(struct super_block *sb, handle_t *handle,
+ ext4_update_sb_callback func,
+ const void *arg)
+{
+ int err = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct buffer_head *bh = sbi->s_sbh;
+ struct ext4_super_block *es = sbi->s_es;
+
+ trace_ext4_update_sb(sb, bh->b_blocknr, 1);
+
+ BUFFER_TRACE(bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, sb,
+ bh,
+ EXT4_JTR_NONE);
+ if (err)
+ goto out_err;
+
+ lock_buffer(bh);
+ func(sbi, es, arg);
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(bh);
+
+ if (buffer_write_io_error(bh) || !buffer_uptodate(bh)) {
+ ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to "
+ "superblock detected");
+ clear_buffer_write_io_error(bh);
+ set_buffer_uptodate(bh);
+ }
+
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ if (err)
+ goto out_err;
+ err = sync_dirty_buffer(bh);
+out_err:
+ ext4_std_error(sb, err);
+ return err;
+}
+
+/*
+ * Update one backup superblock in the group 'grp' using the callback
+ * function 'func' and argument 'arg'. If the handle is NULL the
+ * modification is not journalled.
+ *
+ * Returns: 0 when no modification was done (no superblock in the group)
+ * 1 when the modification was successful
+ * <0 on error
+ */
+static int ext4_update_backup_sb(struct super_block *sb,
+ handle_t *handle, ext4_group_t grp,
+ ext4_update_sb_callback func, const void *arg)
+{
+ int err = 0;
+ ext4_fsblk_t sb_block;
+ struct buffer_head *bh;
+ unsigned long offset = 0;
+ struct ext4_super_block *es;
+
+ if (!ext4_bg_has_super(sb, grp))
+ return 0;
+
+ /*
+ * For the group 0 there is always 1k padding, so we have
+ * either adjust offset, or sb_block depending on blocksize
+ */
+ if (grp == 0) {
+ sb_block = 1 * EXT4_MIN_BLOCK_SIZE;
+ offset = do_div(sb_block, sb->s_blocksize);
+ } else {
+ sb_block = ext4_group_first_block_no(sb, grp);
+ offset = 0;
+ }
+
+ trace_ext4_update_sb(sb, sb_block, handle ? 1 : 0);
+
+ bh = ext4_sb_bread(sb, sb_block, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
+
+ if (handle) {
+ BUFFER_TRACE(bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, sb,
+ bh,
+ EXT4_JTR_NONE);
+ if (err)
+ goto out_bh;
+ }
+
+ es = (struct ext4_super_block *) (bh->b_data + offset);
+ lock_buffer(bh);
+ if (ext4_has_feature_metadata_csum(sb) &&
+ es->s_checksum != ext4_superblock_csum(es)) {
+ ext4_msg(sb, KERN_ERR, "Invalid checksum for backup "
+ "superblock %llu", sb_block);
+ unlock_buffer(bh);
+ goto out_bh;
+ }
+ func(EXT4_SB(sb), es, arg);
+ if (ext4_has_feature_metadata_csum(sb))
+ es->s_checksum = ext4_superblock_csum(es);
+ set_buffer_uptodate(bh);
+ unlock_buffer(bh);
+
+ if (handle) {
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ if (err)
+ goto out_bh;
+ } else {
+ BUFFER_TRACE(bh, "marking dirty");
+ mark_buffer_dirty(bh);
+ }
+ err = sync_dirty_buffer(bh);
+
+out_bh:
+ brelse(bh);
+ ext4_std_error(sb, err);
+ return (err) ? err : 1;
+}
+
+/*
+ * Update primary and backup superblocks using the provided function
+ * func and argument arg.
+ *
+ * Only the primary superblock and at most two backup superblock
+ * modifications are journalled; the rest is modified without journal.
+ * This is safe because e2fsck will re-write them if there is a problem,
+ * and we're very unlikely to ever need more than two backups.
+ */
+static
+int ext4_update_superblocks_fn(struct super_block *sb,
+ ext4_update_sb_callback func,
+ const void *arg)
+{
+ handle_t *handle;
+ ext4_group_t ngroups;
+ unsigned int three = 1;
+ unsigned int five = 5;
+ unsigned int seven = 7;
+ int err = 0, ret, i;
+ ext4_group_t grp, primary_grp;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ /*
+ * We can't update superblocks while the online resize is running
+ */
+ if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
+ &sbi->s_ext4_flags)) {
+ ext4_msg(sb, KERN_ERR, "Can't modify superblock while"
+ "performing online resize");
+ return -EBUSY;
+ }
+
+ /*
+ * We're only going to update primary superblock and two
+ * backup superblocks in this transaction.
+ */
+ handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 3);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto out;
+ }
+
+ /* Update primary superblock */
+ err = ext4_update_primary_sb(sb, handle, func, arg);
+ if (err) {
+ ext4_msg(sb, KERN_ERR, "Failed to update primary "
+ "superblock");
+ goto out_journal;
+ }
+
+ primary_grp = ext4_get_group_number(sb, sbi->s_sbh->b_blocknr);
+ ngroups = ext4_get_groups_count(sb);
+
+ /*
+ * Update backup superblocks. We have to start from group 0
+ * because it might not be where the primary superblock is
+ * if the fs is mounted with -o sb=<backup_sb_block>
+ */
+ i = 0;
+ grp = 0;
+ while (grp < ngroups) {
+ /* Skip primary superblock */
+ if (grp == primary_grp)
+ goto next_grp;
+
+ ret = ext4_update_backup_sb(sb, handle, grp, func, arg);
+ if (ret < 0) {
+ /* Ignore bad checksum; try to update next sb */
+ if (ret == -EFSBADCRC)
+ goto next_grp;
+ err = ret;
+ goto out_journal;
+ }
+
+ i += ret;
+ if (handle && i > 1) {
+ /*
+ * We're only journalling primary superblock and
+ * two backup superblocks; the rest is not
+ * journalled.
+ */
+ err = ext4_journal_stop(handle);
+ if (err)
+ goto out;
+ handle = NULL;
+ }
+next_grp:
+ grp = ext4_list_backups(sb, &three, &five, &seven);
+ }
+
+out_journal:
+ if (handle) {
+ ret = ext4_journal_stop(handle);
+ if (ret && !err)
+ err = ret;
+ }
+out:
+ clear_bit_unlock(EXT4_FLAGS_RESIZING, &sbi->s_ext4_flags);
+ smp_mb__after_atomic();
+ return err ? err : 0;
+}
+
+/*
* Swap memory between @a and @b for @len bytes.
*
* @a: pointer to first memory area
@@ -48,7 +298,7 @@ static void memswap(void *a, void *b, size_t len)
}
}
-/**
+/*
* Swap i_data and associated attributes between @inode1 and @inode2.
* This function is used for the primary swap between inode1 and inode2
* and also to revert this primary swap in case of errors.
@@ -65,13 +315,22 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
struct ext4_inode_info *ei1;
struct ext4_inode_info *ei2;
unsigned long tmp;
+ struct timespec64 ts1, ts2;
ei1 = EXT4_I(inode1);
ei2 = EXT4_I(inode2);
swap(inode1->i_version, inode2->i_version);
- swap(inode1->i_atime, inode2->i_atime);
- swap(inode1->i_mtime, inode2->i_mtime);
+
+ ts1 = inode_get_atime(inode1);
+ ts2 = inode_get_atime(inode2);
+ inode_set_atime_to_ts(inode1, ts2);
+ inode_set_atime_to_ts(inode2, ts1);
+
+ ts1 = inode_get_mtime(inode1);
+ ts2 = inode_get_mtime(inode2);
+ inode_set_mtime_to_ts(inode1, ts2);
+ inode_set_mtime_to_ts(inode2, ts1);
memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP;
@@ -95,25 +354,25 @@ void ext4_reset_inode_seed(struct inode *inode)
__le32 gen = cpu_to_le32(inode->i_generation);
__u32 csum;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return;
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum));
- ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, sizeof(gen));
+ csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum));
+ ei->i_csum_seed = ext4_chksum(csum, (__u8 *)&gen, sizeof(gen));
}
-/**
+/*
* Swap the information from the given @inode and the inode
* EXT4_BOOT_LOADER_INO. It will basically swap i_data and all other
* important fields of the inodes.
*
* @sb: the super block of the filesystem
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @inode: the inode to swap with EXT4_BOOT_LOADER_INO
*
*/
static long swap_inode_boot_loader(struct super_block *sb,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct inode *inode)
{
handle_t *handle;
@@ -124,7 +383,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
blkcnt_t blocks;
unsigned short bytes;
- inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
+ inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO,
+ EXT4_IGET_SPECIAL | EXT4_IGET_BAD);
if (IS_ERR(inode_bl))
return PTR_ERR(inode_bl);
ei_bl = EXT4_I(inode_bl);
@@ -142,7 +402,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
}
if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
- !inode_owner_or_capable(mnt_userns, inode) ||
+ !inode_owner_or_capable(idmap, inode) ||
!capable(CAP_SYS_ADMIN)) {
err = -EPERM;
goto journal_err_out;
@@ -169,12 +429,12 @@ static long swap_inode_boot_loader(struct super_block *sb,
err = -EINVAL;
goto err_out;
}
- ext4_fc_start_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT, handle);
/* Protect extent tree against block allocations via delalloc */
ext4_double_down_write_data_sem(inode, inode_bl);
- if (inode_bl->i_nlink == 0) {
+ if (is_bad_inode(inode_bl) || !S_ISREG(inode_bl->i_mode)) {
/* this inode has never been used as a BOOT_LOADER */
set_nlink(inode_bl, 1);
i_uid_write(inode_bl, 0);
@@ -183,6 +443,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
ei_bl->i_flags = 0;
inode_set_iversion(inode_bl, 1);
i_size_write(inode_bl, 0);
+ EXT4_I(inode_bl)->i_disksize = inode_bl->i_size;
inode_bl->i_mode = S_IFREG;
if (ext4_has_feature_extents(sb)) {
ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS);
@@ -200,14 +461,16 @@ static long swap_inode_boot_loader(struct super_block *sb,
diff = size - size_bl;
swap_inode_data(inode, inode_bl);
- inode->i_ctime = inode_bl->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
+ inode_set_ctime_current(inode_bl);
+ inode_inc_iversion(inode);
- inode->i_generation = prandom_u32();
- inode_bl->i_generation = prandom_u32();
+ inode->i_generation = get_random_u32();
+ inode_bl->i_generation = get_random_u32();
ext4_reset_inode_seed(inode);
ext4_reset_inode_seed(inode_bl);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
err = ext4_mark_inode_dirty(handle, inode);
if (err < 0) {
@@ -252,7 +515,6 @@ revert:
err_out1:
ext4_journal_stop(handle);
- ext4_fc_stop_ineligible(sb);
ext4_double_up_write_data_sem(inode, inode_bl);
err_out:
@@ -263,18 +525,6 @@ journal_err_out:
return err;
}
-#ifdef CONFIG_FS_ENCRYPTION
-static int uuid_is_zero(__u8 u[16])
-{
- int i;
-
- for (i = 0; i < 16; i++)
- if (u[i])
- return 0;
- return 1;
-}
-#endif
-
/*
* If immutable is set and we are not clearing it, we're not allowed to change
* anything else in the inode. Don't error out if we're only trying to set
@@ -426,7 +676,8 @@ static int ext4_ioctl_setflags(struct inode *inode,
ext4_set_inode_flags(inode, false);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
+ inode_inc_iversion(inode);
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
flags_err:
@@ -492,6 +743,10 @@ static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
if (ext4_is_quota_file(inode))
return err;
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
+
err = ext4_get_inode_loc(inode, &iloc);
if (err)
return err;
@@ -507,10 +762,6 @@ static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
brelse(iloc.bh);
}
- err = dquot_initialize(inode);
- if (err)
- return err;
-
handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
EXT4_QUOTA_INIT_BLOCKS(sb) +
EXT4_QUOTA_DEL_BLOCKS(sb) + 3);
@@ -536,7 +787,8 @@ static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
}
EXT4_I(inode)->i_projid = kprojid;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
+ inode_inc_iversion(inode);
out_dirty:
rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
if (!err)
@@ -554,21 +806,15 @@ static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
}
#endif
-static int ext4_shutdown(struct super_block *sb, unsigned long arg)
+int ext4_force_shutdown(struct super_block *sb, u32 flags)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- __u32 flags;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (get_user(flags, (__u32 __user *)arg))
- return -EFAULT;
+ int ret;
if (flags > EXT4_GOING_FLAGS_NOLOGFLUSH)
return -EINVAL;
- if (ext4_forced_shutdown(sbi))
+ if (ext4_forced_shutdown(sb))
return 0;
ext4_msg(sb, KERN_ALERT, "shut down requested (%d)", flags);
@@ -576,9 +822,11 @@ static int ext4_shutdown(struct super_block *sb, unsigned long arg)
switch (flags) {
case EXT4_GOING_FLAGS_DEFAULT:
- freeze_bdev(sb->s_bdev);
+ ret = bdev_freeze(sb->s_bdev);
+ if (ret)
+ return ret;
set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
- thaw_bdev(sb->s_bdev);
+ bdev_thaw(sb->s_bdev);
break;
case EXT4_GOING_FLAGS_LOGFLUSH:
set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
@@ -599,6 +847,19 @@ static int ext4_shutdown(struct super_block *sb, unsigned long arg)
return 0;
}
+static int ext4_ioctl_shutdown(struct super_block *sb, unsigned long arg)
+{
+ u32 flags;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(flags, (__u32 __user *)arg))
+ return -EFAULT;
+
+ return ext4_force_shutdown(sb, flags);
+}
+
struct getfsmap_info {
struct super_block *gi_sb;
struct fsmap_head __user *gi_data;
@@ -716,11 +977,13 @@ static long ext4_ioctl_group_add(struct file *file,
test_opt(sb, INIT_INODE_TABLE))
err = ext4_register_li_request(sb, input->group);
group_add_out:
- ext4_resize_end(sb);
+ err2 = ext4_resize_end(sb, false);
+ if (err == 0)
+ err = err2;
return err;
}
-int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int ext4_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct ext4_inode_info *ei = EXT4_I(inode);
@@ -736,14 +999,13 @@ int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa)
return 0;
}
-int ext4_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa)
+int ext4_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
u32 flags = fa->flags;
int err = -EOPNOTSUPP;
- ext4_fc_start_update(inode);
if (flags & ~EXT4_FL_USER_VISIBLE)
goto out;
@@ -764,7 +1026,6 @@ int ext4_fileattr_set(struct user_namespace *mnt_userns,
goto out;
err = ext4_ioctl_setproject(inode, fa->fsx_projid);
out:
- ext4_fc_stop_update(inode);
return err;
}
@@ -805,7 +1066,6 @@ static int ext4_ioctl_checkpoint(struct file *filp, unsigned long arg)
__u32 flags = 0;
unsigned int flush_flags = 0;
struct super_block *sb = file_inode(filp)->i_sb;
- struct request_queue *q;
if (copy_from_user(&flags, (__u32 __user *)arg,
sizeof(__u32)))
@@ -823,13 +1083,8 @@ static int ext4_ioctl_checkpoint(struct file *filp, unsigned long arg)
if (!EXT4_SB(sb)->s_journal)
return -ENODEV;
- if (flags & ~EXT4_IOC_CHECKPOINT_FLAG_VALID)
- return -EINVAL;
-
- q = bdev_get_queue(EXT4_SB(sb)->s_journal->j_dev);
- if (!q)
- return -ENXIO;
- if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) && !blk_queue_discard(q))
+ if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) &&
+ !bdev_max_discard_sectors(EXT4_SB(sb)->s_journal->j_dev))
return -EOPNOTSUPP;
if (flags & EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN)
@@ -850,11 +1105,432 @@ static int ext4_ioctl_checkpoint(struct file *filp, unsigned long arg)
return err;
}
+static int ext4_ioctl_setlabel(struct file *filp, const char __user *user_label)
+{
+ size_t len;
+ int ret = 0;
+ char new_label[EXT4_LABEL_MAX + 1];
+ struct super_block *sb = file_inode(filp)->i_sb;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * Copy the maximum length allowed for ext4 label with one more to
+ * find the required terminating null byte in order to test the
+ * label length. The on disk label doesn't need to be null terminated.
+ */
+ if (copy_from_user(new_label, user_label, EXT4_LABEL_MAX + 1))
+ return -EFAULT;
+
+ len = strnlen(new_label, EXT4_LABEL_MAX + 1);
+ if (len > EXT4_LABEL_MAX)
+ return -EINVAL;
+
+ /*
+ * Clear the buffer after the new label
+ */
+ memset(new_label + len, 0, EXT4_LABEL_MAX - len);
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ ret = ext4_update_superblocks_fn(sb, ext4_sb_setlabel, new_label);
+
+ mnt_drop_write_file(filp);
+ return ret;
+}
+
+static int ext4_ioctl_getlabel(struct ext4_sb_info *sbi, char __user *user_label)
+{
+ char label[EXT4_LABEL_MAX + 1];
+
+ /*
+ * EXT4_LABEL_MAX must always be smaller than FSLABEL_MAX because
+ * FSLABEL_MAX must include terminating null byte, while s_volume_name
+ * does not have to.
+ */
+ BUILD_BUG_ON(EXT4_LABEL_MAX >= FSLABEL_MAX);
+
+ lock_buffer(sbi->s_sbh);
+ memtostr_pad(label, sbi->s_es->s_volume_name);
+ unlock_buffer(sbi->s_sbh);
+
+ if (copy_to_user(user_label, label, sizeof(label)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ext4_ioctl_getuuid(struct ext4_sb_info *sbi,
+ struct fsuuid __user *ufsuuid)
+{
+ struct fsuuid fsuuid;
+ __u8 uuid[UUID_SIZE];
+
+ if (copy_from_user(&fsuuid, ufsuuid, sizeof(fsuuid)))
+ return -EFAULT;
+
+ if (fsuuid.fsu_len == 0) {
+ fsuuid.fsu_len = UUID_SIZE;
+ if (copy_to_user(&ufsuuid->fsu_len, &fsuuid.fsu_len,
+ sizeof(fsuuid.fsu_len)))
+ return -EFAULT;
+ return 0;
+ }
+
+ if (fsuuid.fsu_len < UUID_SIZE || fsuuid.fsu_flags != 0)
+ return -EINVAL;
+
+ lock_buffer(sbi->s_sbh);
+ memcpy(uuid, sbi->s_es->s_uuid, UUID_SIZE);
+ unlock_buffer(sbi->s_sbh);
+
+ fsuuid.fsu_len = UUID_SIZE;
+ if (copy_to_user(ufsuuid, &fsuuid, sizeof(fsuuid)) ||
+ copy_to_user(&ufsuuid->fsu_uuid[0], uuid, UUID_SIZE))
+ return -EFAULT;
+ return 0;
+}
+
+static int ext4_ioctl_setuuid(struct file *filp,
+ const struct fsuuid __user *ufsuuid)
+{
+ int ret = 0;
+ struct super_block *sb = file_inode(filp)->i_sb;
+ struct fsuuid fsuuid;
+ __u8 uuid[UUID_SIZE];
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * If any checksums (group descriptors or metadata) are being used
+ * then the checksum seed feature is required to change the UUID.
+ */
+ if (((ext4_has_feature_gdt_csum(sb) ||
+ ext4_has_feature_metadata_csum(sb))
+ && !ext4_has_feature_csum_seed(sb))
+ || ext4_has_feature_stable_inodes(sb))
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&fsuuid, ufsuuid, sizeof(fsuuid)))
+ return -EFAULT;
+
+ if (fsuuid.fsu_len != UUID_SIZE || fsuuid.fsu_flags != 0)
+ return -EINVAL;
+
+ if (copy_from_user(uuid, &ufsuuid->fsu_uuid[0], UUID_SIZE))
+ return -EFAULT;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ ret = ext4_update_superblocks_fn(sb, ext4_sb_setuuid, &uuid);
+ mnt_drop_write_file(filp);
+
+ return ret;
+}
+
+
+#define TUNE_OPS_SUPPORTED (EXT4_TUNE_FL_ERRORS_BEHAVIOR | \
+ EXT4_TUNE_FL_MNT_COUNT | EXT4_TUNE_FL_MAX_MNT_COUNT | \
+ EXT4_TUNE_FL_CHECKINTRVAL | EXT4_TUNE_FL_LAST_CHECK_TIME | \
+ EXT4_TUNE_FL_RESERVED_BLOCKS | EXT4_TUNE_FL_RESERVED_UID | \
+ EXT4_TUNE_FL_RESERVED_GID | EXT4_TUNE_FL_DEFAULT_MNT_OPTS | \
+ EXT4_TUNE_FL_DEF_HASH_ALG | EXT4_TUNE_FL_RAID_STRIDE | \
+ EXT4_TUNE_FL_RAID_STRIPE_WIDTH | EXT4_TUNE_FL_MOUNT_OPTS | \
+ EXT4_TUNE_FL_FEATURES | EXT4_TUNE_FL_EDIT_FEATURES | \
+ EXT4_TUNE_FL_FORCE_FSCK | EXT4_TUNE_FL_ENCODING | \
+ EXT4_TUNE_FL_ENCODING_FLAGS)
+
+#define EXT4_TUNE_SET_COMPAT_SUPP \
+ (EXT4_FEATURE_COMPAT_DIR_INDEX | \
+ EXT4_FEATURE_COMPAT_STABLE_INODES)
+#define EXT4_TUNE_SET_INCOMPAT_SUPP \
+ (EXT4_FEATURE_INCOMPAT_EXTENTS | \
+ EXT4_FEATURE_INCOMPAT_EA_INODE | \
+ EXT4_FEATURE_INCOMPAT_ENCRYPT | \
+ EXT4_FEATURE_INCOMPAT_CSUM_SEED | \
+ EXT4_FEATURE_INCOMPAT_LARGEDIR | \
+ EXT4_FEATURE_INCOMPAT_CASEFOLD)
+#define EXT4_TUNE_SET_RO_COMPAT_SUPP \
+ (EXT4_FEATURE_RO_COMPAT_LARGE_FILE | \
+ EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \
+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
+ EXT4_FEATURE_RO_COMPAT_PROJECT | \
+ EXT4_FEATURE_RO_COMPAT_VERITY)
+
+#define EXT4_TUNE_CLEAR_COMPAT_SUPP (0)
+#define EXT4_TUNE_CLEAR_INCOMPAT_SUPP (0)
+#define EXT4_TUNE_CLEAR_RO_COMPAT_SUPP (0)
+
+#define SB_ENC_SUPP_MASK (SB_ENC_STRICT_MODE_FL | \
+ SB_ENC_NO_COMPAT_FALLBACK_FL)
+
+static int ext4_ioctl_get_tune_sb(struct ext4_sb_info *sbi,
+ struct ext4_tune_sb_params __user *params)
+{
+ struct ext4_tune_sb_params ret;
+ struct ext4_super_block *es = sbi->s_es;
+
+ memset(&ret, 0, sizeof(ret));
+ ret.set_flags = TUNE_OPS_SUPPORTED;
+ ret.errors_behavior = le16_to_cpu(es->s_errors);
+ ret.mnt_count = le16_to_cpu(es->s_mnt_count);
+ ret.max_mnt_count = le16_to_cpu(es->s_max_mnt_count);
+ ret.checkinterval = le32_to_cpu(es->s_checkinterval);
+ ret.last_check_time = le32_to_cpu(es->s_lastcheck);
+ ret.reserved_blocks = ext4_r_blocks_count(es);
+ ret.blocks_count = ext4_blocks_count(es);
+ ret.reserved_uid = ext4_get_resuid(es);
+ ret.reserved_gid = ext4_get_resgid(es);
+ ret.default_mnt_opts = le32_to_cpu(es->s_default_mount_opts);
+ ret.def_hash_alg = es->s_def_hash_version;
+ ret.raid_stride = le16_to_cpu(es->s_raid_stride);
+ ret.raid_stripe_width = le32_to_cpu(es->s_raid_stripe_width);
+ ret.encoding = le16_to_cpu(es->s_encoding);
+ ret.encoding_flags = le16_to_cpu(es->s_encoding_flags);
+ strscpy_pad(ret.mount_opts, es->s_mount_opts);
+ ret.feature_compat = le32_to_cpu(es->s_feature_compat);
+ ret.feature_incompat = le32_to_cpu(es->s_feature_incompat);
+ ret.feature_ro_compat = le32_to_cpu(es->s_feature_ro_compat);
+ ret.set_feature_compat_mask = EXT4_TUNE_SET_COMPAT_SUPP;
+ ret.set_feature_incompat_mask = EXT4_TUNE_SET_INCOMPAT_SUPP;
+ ret.set_feature_ro_compat_mask = EXT4_TUNE_SET_RO_COMPAT_SUPP;
+ ret.clear_feature_compat_mask = EXT4_TUNE_CLEAR_COMPAT_SUPP;
+ ret.clear_feature_incompat_mask = EXT4_TUNE_CLEAR_INCOMPAT_SUPP;
+ ret.clear_feature_ro_compat_mask = EXT4_TUNE_CLEAR_RO_COMPAT_SUPP;
+ if (copy_to_user(params, &ret, sizeof(ret)))
+ return -EFAULT;
+ return 0;
+}
+
+static void ext4_sb_setparams(struct ext4_sb_info *sbi,
+ struct ext4_super_block *es, const void *arg)
+{
+ const struct ext4_tune_sb_params *params = arg;
+
+ if (params->set_flags & EXT4_TUNE_FL_ERRORS_BEHAVIOR)
+ es->s_errors = cpu_to_le16(params->errors_behavior);
+ if (params->set_flags & EXT4_TUNE_FL_MNT_COUNT)
+ es->s_mnt_count = cpu_to_le16(params->mnt_count);
+ if (params->set_flags & EXT4_TUNE_FL_MAX_MNT_COUNT)
+ es->s_max_mnt_count = cpu_to_le16(params->max_mnt_count);
+ if (params->set_flags & EXT4_TUNE_FL_CHECKINTRVAL)
+ es->s_checkinterval = cpu_to_le32(params->checkinterval);
+ if (params->set_flags & EXT4_TUNE_FL_LAST_CHECK_TIME)
+ es->s_lastcheck = cpu_to_le32(params->last_check_time);
+ if (params->set_flags & EXT4_TUNE_FL_RESERVED_BLOCKS) {
+ ext4_fsblk_t blk = params->reserved_blocks;
+
+ es->s_r_blocks_count_lo = cpu_to_le32((u32)blk);
+ es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
+ }
+ if (params->set_flags & EXT4_TUNE_FL_RESERVED_UID) {
+ int uid = params->reserved_uid;
+
+ es->s_def_resuid = cpu_to_le16(uid & 0xFFFF);
+ es->s_def_resuid_hi = cpu_to_le16(uid >> 16);
+ }
+ if (params->set_flags & EXT4_TUNE_FL_RESERVED_GID) {
+ int gid = params->reserved_gid;
+
+ es->s_def_resgid = cpu_to_le16(gid & 0xFFFF);
+ es->s_def_resgid_hi = cpu_to_le16(gid >> 16);
+ }
+ if (params->set_flags & EXT4_TUNE_FL_DEFAULT_MNT_OPTS)
+ es->s_default_mount_opts = cpu_to_le32(params->default_mnt_opts);
+ if (params->set_flags & EXT4_TUNE_FL_DEF_HASH_ALG)
+ es->s_def_hash_version = params->def_hash_alg;
+ if (params->set_flags & EXT4_TUNE_FL_RAID_STRIDE)
+ es->s_raid_stride = cpu_to_le16(params->raid_stride);
+ if (params->set_flags & EXT4_TUNE_FL_RAID_STRIPE_WIDTH)
+ es->s_raid_stripe_width =
+ cpu_to_le32(params->raid_stripe_width);
+ if (params->set_flags & EXT4_TUNE_FL_ENCODING)
+ es->s_encoding = cpu_to_le16(params->encoding);
+ if (params->set_flags & EXT4_TUNE_FL_ENCODING_FLAGS)
+ es->s_encoding_flags = cpu_to_le16(params->encoding_flags);
+ strscpy_pad(es->s_mount_opts, params->mount_opts);
+ if (params->set_flags & EXT4_TUNE_FL_EDIT_FEATURES) {
+ es->s_feature_compat |=
+ cpu_to_le32(params->set_feature_compat_mask);
+ es->s_feature_incompat |=
+ cpu_to_le32(params->set_feature_incompat_mask);
+ es->s_feature_ro_compat |=
+ cpu_to_le32(params->set_feature_ro_compat_mask);
+ es->s_feature_compat &=
+ ~cpu_to_le32(params->clear_feature_compat_mask);
+ es->s_feature_incompat &=
+ ~cpu_to_le32(params->clear_feature_incompat_mask);
+ es->s_feature_ro_compat &=
+ ~cpu_to_le32(params->clear_feature_ro_compat_mask);
+ if (params->set_feature_compat_mask &
+ EXT4_FEATURE_COMPAT_DIR_INDEX)
+ es->s_def_hash_version = sbi->s_def_hash_version;
+ if (params->set_feature_incompat_mask &
+ EXT4_FEATURE_INCOMPAT_CSUM_SEED)
+ es->s_checksum_seed = cpu_to_le32(sbi->s_csum_seed);
+ }
+ if (params->set_flags & EXT4_TUNE_FL_FORCE_FSCK)
+ es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
+}
+
+static int ext4_ioctl_set_tune_sb(struct file *filp,
+ struct ext4_tune_sb_params __user *in)
+{
+ struct ext4_tune_sb_params params;
+ struct super_block *sb = file_inode(filp)->i_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ int enabling_casefold = 0;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&params, in, sizeof(params)))
+ return -EFAULT;
+
+ if (strnlen(params.mount_opts, sizeof(params.mount_opts)) ==
+ sizeof(params.mount_opts))
+ return -E2BIG;
+
+ if ((params.set_flags & ~TUNE_OPS_SUPPORTED) != 0)
+ return -EOPNOTSUPP;
+
+ if ((params.set_flags & EXT4_TUNE_FL_ERRORS_BEHAVIOR) &&
+ (params.errors_behavior > EXT4_ERRORS_PANIC))
+ return -EINVAL;
+
+ if ((params.set_flags & EXT4_TUNE_FL_RESERVED_BLOCKS) &&
+ (params.reserved_blocks > ext4_blocks_count(sbi->s_es) / 2))
+ return -EINVAL;
+ if ((params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG) &&
+ ((params.def_hash_alg > DX_HASH_LAST) ||
+ (params.def_hash_alg == DX_HASH_SIPHASH)))
+ return -EINVAL;
+ if ((params.set_flags & EXT4_TUNE_FL_FEATURES) &&
+ (params.set_flags & EXT4_TUNE_FL_EDIT_FEATURES))
+ return -EINVAL;
+
+ if (params.set_flags & EXT4_TUNE_FL_FEATURES) {
+ params.set_feature_compat_mask =
+ params.feature_compat &
+ ~le32_to_cpu(es->s_feature_compat);
+ params.set_feature_incompat_mask =
+ params.feature_incompat &
+ ~le32_to_cpu(es->s_feature_incompat);
+ params.set_feature_ro_compat_mask =
+ params.feature_ro_compat &
+ ~le32_to_cpu(es->s_feature_ro_compat);
+ params.clear_feature_compat_mask =
+ ~params.feature_compat &
+ le32_to_cpu(es->s_feature_compat);
+ params.clear_feature_incompat_mask =
+ ~params.feature_incompat &
+ le32_to_cpu(es->s_feature_incompat);
+ params.clear_feature_ro_compat_mask =
+ ~params.feature_ro_compat &
+ le32_to_cpu(es->s_feature_ro_compat);
+ params.set_flags |= EXT4_TUNE_FL_EDIT_FEATURES;
+ }
+ if (params.set_flags & EXT4_TUNE_FL_EDIT_FEATURES) {
+ if ((params.set_feature_compat_mask &
+ ~EXT4_TUNE_SET_COMPAT_SUPP) ||
+ (params.set_feature_incompat_mask &
+ ~EXT4_TUNE_SET_INCOMPAT_SUPP) ||
+ (params.set_feature_ro_compat_mask &
+ ~EXT4_TUNE_SET_RO_COMPAT_SUPP) ||
+ (params.clear_feature_compat_mask &
+ ~EXT4_TUNE_CLEAR_COMPAT_SUPP) ||
+ (params.clear_feature_incompat_mask &
+ ~EXT4_TUNE_CLEAR_INCOMPAT_SUPP) ||
+ (params.clear_feature_ro_compat_mask &
+ ~EXT4_TUNE_CLEAR_RO_COMPAT_SUPP))
+ return -EOPNOTSUPP;
+
+ /*
+ * Filter out the features that are already set from
+ * the set_mask.
+ */
+ params.set_feature_compat_mask &=
+ ~le32_to_cpu(es->s_feature_compat);
+ params.set_feature_incompat_mask &=
+ ~le32_to_cpu(es->s_feature_incompat);
+ params.set_feature_ro_compat_mask &=
+ ~le32_to_cpu(es->s_feature_ro_compat);
+ if ((params.set_feature_incompat_mask &
+ EXT4_FEATURE_INCOMPAT_CASEFOLD)) {
+ enabling_casefold = 1;
+ if (!(params.set_flags & EXT4_TUNE_FL_ENCODING)) {
+ params.encoding = EXT4_ENC_UTF8_12_1;
+ params.set_flags |= EXT4_TUNE_FL_ENCODING;
+ }
+ if (!(params.set_flags & EXT4_TUNE_FL_ENCODING_FLAGS)) {
+ params.encoding_flags = 0;
+ params.set_flags |= EXT4_TUNE_FL_ENCODING_FLAGS;
+ }
+ }
+ if ((params.set_feature_compat_mask &
+ EXT4_FEATURE_COMPAT_DIR_INDEX)) {
+ uuid_t uu;
+
+ memcpy(&uu, sbi->s_hash_seed, UUID_SIZE);
+ if (uuid_is_null(&uu))
+ generate_random_uuid((char *)
+ &sbi->s_hash_seed);
+ if (params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG)
+ sbi->s_def_hash_version = params.def_hash_alg;
+ else if (sbi->s_def_hash_version == 0)
+ sbi->s_def_hash_version = DX_HASH_HALF_MD4;
+ if (!(es->s_flags &
+ cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH)) &&
+ !(es->s_flags &
+ cpu_to_le32(EXT2_FLAGS_SIGNED_HASH))) {
+#ifdef __CHAR_UNSIGNED__
+ sbi->s_hash_unsigned = 3;
+#else
+ sbi->s_hash_unsigned = 0;
+#endif
+ }
+ }
+ }
+ if (params.set_flags & EXT4_TUNE_FL_ENCODING) {
+ if (!enabling_casefold)
+ return -EINVAL;
+ if (params.encoding == 0)
+ params.encoding = EXT4_ENC_UTF8_12_1;
+ else if (params.encoding != EXT4_ENC_UTF8_12_1)
+ return -EINVAL;
+ }
+ if (params.set_flags & EXT4_TUNE_FL_ENCODING_FLAGS) {
+ if (!enabling_casefold)
+ return -EINVAL;
+ if (params.encoding_flags & ~SB_ENC_SUPP_MASK)
+ return -EINVAL;
+ }
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ ret = ext4_update_superblocks_fn(sb, ext4_sb_setparams, &params);
+ mnt_drop_write_file(filp);
+
+ if (params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG)
+ sbi->s_def_hash_version = params.def_hash_alg;
+
+ return ret;
+}
+
static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
- struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
+ struct mnt_idmap *idmap = file_mnt_idmap(filp);
ext4_debug("cmd = %u, arg = %lu\n", cmd, arg);
@@ -871,10 +1547,10 @@ static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
__u32 generation;
int err;
- if (!inode_owner_or_capable(mnt_userns, inode))
+ if (!inode_owner_or_capable(idmap, inode))
return -EPERM;
- if (ext4_has_metadata_csum(inode->i_sb)) {
+ if (ext4_has_feature_metadata_csum(inode->i_sb)) {
ext4_warning(sb, "Setting inode version is not "
"supported with metadata_csum enabled.");
return -ENOTTY;
@@ -896,7 +1572,8 @@ static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err == 0) {
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
+ inode_inc_iversion(inode);
inode->i_generation = generation;
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
}
@@ -942,13 +1619,14 @@ setversion_out:
err = err2;
mnt_drop_write_file(filp);
group_extend_out:
- ext4_resize_end(sb);
+ err2 = ext4_resize_end(sb, false);
+ if (err == 0)
+ err = err2;
return err;
}
case EXT4_IOC_MOVE_EXT: {
struct move_extent me;
- struct fd donor;
int err;
if (!(filp->f_mode & FMODE_READ) ||
@@ -960,40 +1638,24 @@ group_extend_out:
return -EFAULT;
me.moved_len = 0;
- donor = fdget(me.donor_fd);
- if (!donor.file)
+ CLASS(fd, donor)(me.donor_fd);
+ if (fd_empty(donor))
return -EBADF;
- if (!(donor.file->f_mode & FMODE_WRITE)) {
- err = -EBADF;
- goto mext_out;
- }
-
- if (ext4_has_feature_bigalloc(sb)) {
- ext4_msg(sb, KERN_ERR,
- "Online defrag not supported with bigalloc");
- err = -EOPNOTSUPP;
- goto mext_out;
- } else if (IS_DAX(inode)) {
- ext4_msg(sb, KERN_ERR,
- "Online defrag not supported with DAX");
- err = -EOPNOTSUPP;
- goto mext_out;
- }
+ if (!(fd_file(donor)->f_mode & FMODE_WRITE))
+ return -EBADF;
err = mnt_want_write_file(filp);
if (err)
- goto mext_out;
+ return err;
- err = ext4_move_extents(filp, donor.file, me.orig_start,
+ err = ext4_move_extents(filp, fd_file(donor), me.orig_start,
me.donor_start, me.len, &me.moved_len);
mnt_drop_write_file(filp);
if (copy_to_user((struct move_extent __user *)arg,
&me, sizeof(me)))
err = -EFAULT;
-mext_out:
- fdput(donor);
return err;
}
@@ -1010,7 +1672,7 @@ mext_out:
case EXT4_IOC_MIGRATE:
{
int err;
- if (!inode_owner_or_capable(mnt_userns, inode))
+ if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
err = mnt_want_write_file(filp);
@@ -1032,7 +1694,7 @@ mext_out:
case EXT4_IOC_ALLOC_DA_BLKS:
{
int err;
- if (!inode_owner_or_capable(mnt_userns, inode))
+ if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
err = mnt_want_write_file(filp);
@@ -1051,7 +1713,7 @@ mext_out:
err = mnt_want_write_file(filp);
if (err)
return err;
- err = swap_inode_boot_loader(sb, mnt_userns, inode);
+ err = swap_inode_boot_loader(sb, idmap, inode);
mnt_drop_write_file(filp);
return err;
}
@@ -1076,7 +1738,7 @@ mext_out:
err = ext4_resize_fs(sb, n_blocks_count);
if (EXT4_SB(sb)->s_journal) {
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE, NULL);
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
@@ -1090,20 +1752,21 @@ mext_out:
err = ext4_register_li_request(sb, o_group);
resizefs_out:
- ext4_resize_end(sb);
+ err2 = ext4_resize_end(sb, true);
+ if (err == 0)
+ err = err2;
return err;
}
case FITRIM:
{
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
struct fstrim_range range;
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!blk_queue_discard(q))
+ if (!bdev_max_discard_sectors(sb->s_bdev))
return -EOPNOTSUPP;
/*
@@ -1117,8 +1780,6 @@ resizefs_out:
sizeof(range)))
return -EFAULT;
- range.minlen = max((unsigned int)range.minlen,
- q->limits.discard_granularity);
ret = ext4_trim_fs(sb, &range);
if (ret < 0)
return ret;
@@ -1130,58 +1791,22 @@ resizefs_out:
return 0;
}
case EXT4_IOC_PRECACHE_EXTENTS:
- return ext4_ext_precache(inode);
+ {
+ int ret;
+ inode_lock_shared(inode);
+ ret = ext4_ext_precache(inode);
+ inode_unlock_shared(inode);
+ return ret;
+ }
case FS_IOC_SET_ENCRYPTION_POLICY:
if (!ext4_has_feature_encrypt(sb))
return -EOPNOTSUPP;
return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
- case FS_IOC_GET_ENCRYPTION_PWSALT: {
-#ifdef CONFIG_FS_ENCRYPTION
- int err, err2;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- handle_t *handle;
+ case FS_IOC_GET_ENCRYPTION_PWSALT:
+ return ext4_ioctl_get_encryption_pwsalt(filp, (void __user *)arg);
- if (!ext4_has_feature_encrypt(sb))
- return -EOPNOTSUPP;
- if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) {
- err = mnt_want_write_file(filp);
- if (err)
- return err;
- handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
- if (IS_ERR(handle)) {
- err = PTR_ERR(handle);
- goto pwsalt_err_exit;
- }
- err = ext4_journal_get_write_access(handle, sb,
- sbi->s_sbh,
- EXT4_JTR_NONE);
- if (err)
- goto pwsalt_err_journal;
- lock_buffer(sbi->s_sbh);
- generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
- ext4_superblock_csum_set(sb);
- unlock_buffer(sbi->s_sbh);
- err = ext4_handle_dirty_metadata(handle, NULL,
- sbi->s_sbh);
- pwsalt_err_journal:
- err2 = ext4_journal_stop(handle);
- if (err2 && !err)
- err = err2;
- pwsalt_err_exit:
- mnt_drop_write_file(filp);
- if (err)
- return err;
- }
- if (copy_to_user((void __user *) arg,
- sbi->s_es->s_encrypt_pw_salt, 16))
- return -EFAULT;
- return 0;
-#else
- return -EOPNOTSUPP;
-#endif
- }
case FS_IOC_GET_ENCRYPTION_POLICY:
if (!ext4_has_feature_encrypt(sb))
return -EOPNOTSUPP;
@@ -1219,7 +1844,7 @@ resizefs_out:
case EXT4_IOC_CLEAR_ES_CACHE:
{
- if (!inode_owner_or_capable(mnt_userns, inode))
+ if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
ext4_clear_inode_es(inode);
return 0;
@@ -1245,7 +1870,7 @@ resizefs_out:
return ext4_ioctl_get_es_cache(filp, arg);
case EXT4_IOC_SHUTDOWN:
- return ext4_shutdown(sb, arg);
+ return ext4_ioctl_shutdown(sb, arg);
case FS_IOC_ENABLE_VERITY:
if (!ext4_has_feature_verity(sb))
@@ -1266,6 +1891,22 @@ resizefs_out:
case EXT4_IOC_CHECKPOINT:
return ext4_ioctl_checkpoint(filp, arg);
+ case FS_IOC_GETFSLABEL:
+ return ext4_ioctl_getlabel(EXT4_SB(sb), (void __user *)arg);
+
+ case FS_IOC_SETFSLABEL:
+ return ext4_ioctl_setlabel(filp,
+ (const void __user *)arg);
+
+ case EXT4_IOC_GETFSUUID:
+ return ext4_ioctl_getuuid(EXT4_SB(sb), (void __user *)arg);
+ case EXT4_IOC_SETFSUUID:
+ return ext4_ioctl_setuuid(filp, (const void __user *)arg);
+ case EXT4_IOC_GET_TUNE_SB_PARAM:
+ return ext4_ioctl_get_tune_sb(EXT4_SB(sb),
+ (void __user *)arg);
+ case EXT4_IOC_SET_TUNE_SB_PARAM:
+ return ext4_ioctl_set_tune_sb(filp, (void __user *)arg);
default:
return -ENOTTY;
}
@@ -1273,13 +1914,7 @@ resizefs_out:
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- long ret;
-
- ext4_fc_start_update(file_inode(filp));
- ret = __ext4_ioctl(filp, cmd, arg);
- ext4_fc_stop_update(file_inode(filp));
-
- return ret;
+ return __ext4_ioctl(filp, cmd, arg);
}
#ifdef CONFIG_COMPAT
@@ -1347,6 +1982,10 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case EXT4_IOC_GETSTATE:
case EXT4_IOC_GET_ES_CACHE:
case EXT4_IOC_CHECKPOINT:
+ case FS_IOC_GETFSLABEL:
+ case FS_IOC_SETFSLABEL:
+ case EXT4_IOC_GETFSUUID:
+ case EXT4_IOC_SETFSUUID:
break;
default:
return -ENOIOCTLCMD;
@@ -1354,3 +1993,22 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif
+
+static void set_overhead(struct ext4_sb_info *sbi,
+ struct ext4_super_block *es, const void *arg)
+{
+ es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg));
+}
+
+int ext4_update_overhead(struct super_block *sb, bool force)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (ext4_emergency_state(sb) || sb_rdonly(sb))
+ return 0;
+ if (!force &&
+ (sbi->s_overhead == 0 ||
+ sbi->s_overhead == le32_to_cpu(sbi->s_es->s_overhead_clusters)))
+ return 0;
+ return ext4_update_superblocks_fn(sb, set_overhead, &sbi->s_overhead);
+}
diff --git a/fs/ext4/mballoc-test.c b/fs/ext4/mballoc-test.c
new file mode 100644
index 000000000000..a9416b20ff64
--- /dev/null
+++ b/fs/ext4/mballoc-test.c
@@ -0,0 +1,999 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test of ext4 multiblocks allocation.
+ */
+
+#include <kunit/test.h>
+#include <kunit/static_stub.h>
+#include <linux/random.h>
+
+#include "ext4.h"
+
+struct mbt_grp_ctx {
+ struct buffer_head bitmap_bh;
+ /* desc and gd_bh are just the place holders for now */
+ struct ext4_group_desc desc;
+ struct buffer_head gd_bh;
+};
+
+struct mbt_ctx {
+ struct mbt_grp_ctx *grp_ctx;
+};
+
+struct mbt_ext4_super_block {
+ struct ext4_super_block es;
+ struct ext4_sb_info sbi;
+ struct mbt_ctx mbt_ctx;
+};
+
+#define MBT_SB(_sb) (container_of((_sb)->s_fs_info, struct mbt_ext4_super_block, sbi))
+#define MBT_CTX(_sb) (&MBT_SB(_sb)->mbt_ctx)
+#define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group])
+
+static struct inode *mbt_alloc_inode(struct super_block *sb)
+{
+ struct ext4_inode_info *ei;
+
+ ei = kmalloc(sizeof(struct ext4_inode_info), GFP_KERNEL);
+ if (!ei)
+ return NULL;
+
+ INIT_LIST_HEAD(&ei->i_orphan);
+ init_rwsem(&ei->xattr_sem);
+ init_rwsem(&ei->i_data_sem);
+ inode_init_once(&ei->vfs_inode);
+ ext4_fc_init_inode(&ei->vfs_inode);
+
+ return &ei->vfs_inode;
+}
+
+static void mbt_free_inode(struct inode *inode)
+{
+ kfree(EXT4_I(inode));
+}
+
+static const struct super_operations mbt_sops = {
+ .alloc_inode = mbt_alloc_inode,
+ .free_inode = mbt_free_inode,
+};
+
+static void mbt_kill_sb(struct super_block *sb)
+{
+ generic_shutdown_super(sb);
+}
+
+static struct file_system_type mbt_fs_type = {
+ .name = "mballoc test",
+ .kill_sb = mbt_kill_sb,
+};
+
+static int mbt_mb_init(struct super_block *sb)
+{
+ ext4_fsblk_t block;
+ int ret;
+
+ /* needed by ext4_mb_init->bdev_nonrot(sb->s_bdev) */
+ sb->s_bdev = kzalloc(sizeof(*sb->s_bdev), GFP_KERNEL);
+ if (sb->s_bdev == NULL)
+ return -ENOMEM;
+
+ sb->s_bdev->bd_queue = kzalloc(sizeof(struct request_queue), GFP_KERNEL);
+ if (sb->s_bdev->bd_queue == NULL) {
+ kfree(sb->s_bdev);
+ return -ENOMEM;
+ }
+
+ /*
+ * needed by ext4_mb_init->ext4_mb_init_backend-> sbi->s_buddy_cache =
+ * new_inode(sb);
+ */
+ INIT_LIST_HEAD(&sb->s_inodes);
+ sb->s_op = &mbt_sops;
+
+ ret = ext4_mb_init(sb);
+ if (ret != 0)
+ goto err_out;
+
+ block = ext4_count_free_clusters(sb);
+ ret = percpu_counter_init(&EXT4_SB(sb)->s_freeclusters_counter, block,
+ GFP_KERNEL);
+ if (ret != 0)
+ goto err_mb_release;
+
+ ret = percpu_counter_init(&EXT4_SB(sb)->s_dirtyclusters_counter, 0,
+ GFP_KERNEL);
+ if (ret != 0)
+ goto err_freeclusters;
+
+ return 0;
+
+err_freeclusters:
+ percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
+err_mb_release:
+ ext4_mb_release(sb);
+err_out:
+ kfree(sb->s_bdev->bd_queue);
+ kfree(sb->s_bdev);
+ return ret;
+}
+
+static void mbt_mb_release(struct super_block *sb)
+{
+ percpu_counter_destroy(&EXT4_SB(sb)->s_dirtyclusters_counter);
+ percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
+ ext4_mb_release(sb);
+ kfree(sb->s_bdev->bd_queue);
+ kfree(sb->s_bdev);
+}
+
+static int mbt_set(struct super_block *sb, void *data)
+{
+ return 0;
+}
+
+static struct super_block *mbt_ext4_alloc_super_block(void)
+{
+ struct mbt_ext4_super_block *fsb;
+ struct super_block *sb;
+ struct ext4_sb_info *sbi;
+
+ fsb = kzalloc(sizeof(*fsb), GFP_KERNEL);
+ if (fsb == NULL)
+ return NULL;
+
+ sb = sget(&mbt_fs_type, NULL, mbt_set, 0, NULL);
+ if (IS_ERR(sb))
+ goto out;
+
+ sbi = &fsb->sbi;
+
+ sbi->s_blockgroup_lock =
+ kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
+ if (!sbi->s_blockgroup_lock)
+ goto out_deactivate;
+
+ bgl_lock_init(sbi->s_blockgroup_lock);
+
+ sbi->s_es = &fsb->es;
+ sbi->s_sb = sb;
+ sb->s_fs_info = sbi;
+
+ up_write(&sb->s_umount);
+ return sb;
+
+out_deactivate:
+ deactivate_locked_super(sb);
+out:
+ kfree(fsb);
+ return NULL;
+}
+
+static void mbt_ext4_free_super_block(struct super_block *sb)
+{
+ struct mbt_ext4_super_block *fsb = MBT_SB(sb);
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ kfree(sbi->s_blockgroup_lock);
+ deactivate_super(sb);
+ kfree(fsb);
+}
+
+struct mbt_ext4_block_layout {
+ unsigned char blocksize_bits;
+ unsigned int cluster_bits;
+ uint32_t blocks_per_group;
+ ext4_group_t group_count;
+ uint16_t desc_size;
+};
+
+static void mbt_init_sb_layout(struct super_block *sb,
+ struct mbt_ext4_block_layout *layout)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+
+ sb->s_blocksize = 1UL << layout->blocksize_bits;
+ sb->s_blocksize_bits = layout->blocksize_bits;
+
+ sbi->s_groups_count = layout->group_count;
+ sbi->s_blocks_per_group = layout->blocks_per_group;
+ sbi->s_cluster_bits = layout->cluster_bits;
+ sbi->s_cluster_ratio = 1U << layout->cluster_bits;
+ sbi->s_clusters_per_group = layout->blocks_per_group >>
+ layout->cluster_bits;
+ sbi->s_desc_size = layout->desc_size;
+ sbi->s_desc_per_block_bits =
+ sb->s_blocksize_bits - (fls(layout->desc_size) - 1);
+ sbi->s_desc_per_block = 1 << sbi->s_desc_per_block_bits;
+
+ es->s_first_data_block = cpu_to_le32(0);
+ es->s_blocks_count_lo = cpu_to_le32(layout->blocks_per_group *
+ layout->group_count);
+}
+
+static int mbt_grp_ctx_init(struct super_block *sb,
+ struct mbt_grp_ctx *grp_ctx)
+{
+ ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
+
+ grp_ctx->bitmap_bh.b_data = kzalloc(EXT4_BLOCK_SIZE(sb), GFP_KERNEL);
+ if (grp_ctx->bitmap_bh.b_data == NULL)
+ return -ENOMEM;
+ mb_set_bits(grp_ctx->bitmap_bh.b_data, max, sb->s_blocksize * 8 - max);
+ ext4_free_group_clusters_set(sb, &grp_ctx->desc, max);
+
+ return 0;
+}
+
+static void mbt_grp_ctx_release(struct mbt_grp_ctx *grp_ctx)
+{
+ kfree(grp_ctx->bitmap_bh.b_data);
+ grp_ctx->bitmap_bh.b_data = NULL;
+}
+
+static void mbt_ctx_mark_used(struct super_block *sb, ext4_group_t group,
+ unsigned int start, unsigned int len)
+{
+ struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
+
+ mb_set_bits(grp_ctx->bitmap_bh.b_data, start, len);
+}
+
+static void *mbt_ctx_bitmap(struct super_block *sb, ext4_group_t group)
+{
+ struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
+
+ return grp_ctx->bitmap_bh.b_data;
+}
+
+/* called after mbt_init_sb_layout */
+static int mbt_ctx_init(struct super_block *sb)
+{
+ struct mbt_ctx *ctx = MBT_CTX(sb);
+ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+
+ ctx->grp_ctx = kcalloc(ngroups, sizeof(struct mbt_grp_ctx),
+ GFP_KERNEL);
+ if (ctx->grp_ctx == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < ngroups; i++)
+ if (mbt_grp_ctx_init(sb, &ctx->grp_ctx[i]))
+ goto out;
+
+ /*
+ * first data block(first cluster in first group) is used by
+ * metadata, mark it used to avoid to alloc data block at first
+ * block which will fail ext4_sb_block_valid check.
+ */
+ mb_set_bits(ctx->grp_ctx[0].bitmap_bh.b_data, 0, 1);
+ ext4_free_group_clusters_set(sb, &ctx->grp_ctx[0].desc,
+ EXT4_CLUSTERS_PER_GROUP(sb) - 1);
+
+ return 0;
+out:
+ while (i-- > 0)
+ mbt_grp_ctx_release(&ctx->grp_ctx[i]);
+ kfree(ctx->grp_ctx);
+ return -ENOMEM;
+}
+
+static void mbt_ctx_release(struct super_block *sb)
+{
+ struct mbt_ctx *ctx = MBT_CTX(sb);
+ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+
+ for (i = 0; i < ngroups; i++)
+ mbt_grp_ctx_release(&ctx->grp_ctx[i]);
+ kfree(ctx->grp_ctx);
+}
+
+static struct buffer_head *
+ext4_read_block_bitmap_nowait_stub(struct super_block *sb, ext4_group_t block_group,
+ bool ignore_locked)
+{
+ struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
+
+ /* paired with brelse from caller of ext4_read_block_bitmap_nowait */
+ get_bh(&grp_ctx->bitmap_bh);
+ return &grp_ctx->bitmap_bh;
+}
+
+static int ext4_wait_block_bitmap_stub(struct super_block *sb,
+ ext4_group_t block_group,
+ struct buffer_head *bh)
+{
+ /*
+ * real ext4_wait_block_bitmap will set these flags and
+ * functions like ext4_mb_init_cache will verify the flags.
+ */
+ set_buffer_uptodate(bh);
+ set_bitmap_uptodate(bh);
+ set_buffer_verified(bh);
+ return 0;
+}
+
+static struct ext4_group_desc *
+ext4_get_group_desc_stub(struct super_block *sb, ext4_group_t block_group,
+ struct buffer_head **bh)
+{
+ struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
+
+ if (bh != NULL)
+ *bh = &grp_ctx->gd_bh;
+
+ return &grp_ctx->desc;
+}
+
+static int
+ext4_mb_mark_context_stub(handle_t *handle, struct super_block *sb, bool state,
+ ext4_group_t group, ext4_grpblk_t blkoff,
+ ext4_grpblk_t len, int flags,
+ ext4_grpblk_t *ret_changed)
+{
+ struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
+ struct buffer_head *bitmap_bh = &grp_ctx->bitmap_bh;
+
+ if (state)
+ mb_set_bits(bitmap_bh->b_data, blkoff, len);
+ else
+ mb_clear_bits(bitmap_bh->b_data, blkoff, len);
+
+ return 0;
+}
+
+#define TEST_GOAL_GROUP 1
+static int mbt_kunit_init(struct kunit *test)
+{
+ struct mbt_ext4_block_layout *layout =
+ (struct mbt_ext4_block_layout *)(test->param_value);
+ struct super_block *sb;
+ int ret;
+
+ sb = mbt_ext4_alloc_super_block();
+ if (sb == NULL)
+ return -ENOMEM;
+
+ mbt_init_sb_layout(sb, layout);
+
+ ret = mbt_ctx_init(sb);
+ if (ret != 0) {
+ mbt_ext4_free_super_block(sb);
+ return ret;
+ }
+
+ test->priv = sb;
+ kunit_activate_static_stub(test,
+ ext4_read_block_bitmap_nowait,
+ ext4_read_block_bitmap_nowait_stub);
+ kunit_activate_static_stub(test,
+ ext4_wait_block_bitmap,
+ ext4_wait_block_bitmap_stub);
+ kunit_activate_static_stub(test,
+ ext4_get_group_desc,
+ ext4_get_group_desc_stub);
+ kunit_activate_static_stub(test,
+ ext4_mb_mark_context,
+ ext4_mb_mark_context_stub);
+
+ /* stub function will be called in mbt_mb_init->ext4_mb_init */
+ if (mbt_mb_init(sb) != 0) {
+ mbt_ctx_release(sb);
+ mbt_ext4_free_super_block(sb);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mbt_kunit_exit(struct kunit *test)
+{
+ struct super_block *sb = (struct super_block *)test->priv;
+
+ mbt_mb_release(sb);
+ mbt_ctx_release(sb);
+ mbt_ext4_free_super_block(sb);
+}
+
+static void test_new_blocks_simple(struct kunit *test)
+{
+ struct super_block *sb = (struct super_block *)test->priv;
+ struct inode *inode;
+ struct ext4_allocation_request ar;
+ ext4_group_t i, goal_group = TEST_GOAL_GROUP;
+ int err = 0;
+ ext4_fsblk_t found;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
+ if (!inode)
+ return;
+
+ inode->i_sb = sb;
+ ar.inode = inode;
+
+ /* get block at goal */
+ ar.goal = ext4_group_first_block_no(sb, goal_group);
+ found = ext4_mb_new_blocks_simple(&ar, &err);
+ KUNIT_ASSERT_EQ_MSG(test, ar.goal, found,
+ "failed to alloc block at goal, expected %llu found %llu",
+ ar.goal, found);
+
+ /* get block after goal in goal group */
+ ar.goal = ext4_group_first_block_no(sb, goal_group);
+ found = ext4_mb_new_blocks_simple(&ar, &err);
+ KUNIT_ASSERT_EQ_MSG(test, ar.goal + EXT4_C2B(sbi, 1), found,
+ "failed to alloc block after goal in goal group, expected %llu found %llu",
+ ar.goal + 1, found);
+
+ /* get block after goal group */
+ mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
+ ar.goal = ext4_group_first_block_no(sb, goal_group);
+ found = ext4_mb_new_blocks_simple(&ar, &err);
+ KUNIT_ASSERT_EQ_MSG(test,
+ ext4_group_first_block_no(sb, goal_group + 1), found,
+ "failed to alloc block after goal group, expected %llu found %llu",
+ ext4_group_first_block_no(sb, goal_group + 1), found);
+
+ /* get block before goal group */
+ for (i = goal_group; i < ext4_get_groups_count(sb); i++)
+ mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
+ ar.goal = ext4_group_first_block_no(sb, goal_group);
+ found = ext4_mb_new_blocks_simple(&ar, &err);
+ KUNIT_ASSERT_EQ_MSG(test,
+ ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found,
+ "failed to alloc block before goal group, expected %llu found %llu",
+ ext4_group_first_block_no(sb, 0 + EXT4_C2B(sbi, 1)), found);
+
+ /* no block available, fail to allocate block */
+ for (i = 0; i < ext4_get_groups_count(sb); i++)
+ mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
+ ar.goal = ext4_group_first_block_no(sb, goal_group);
+ found = ext4_mb_new_blocks_simple(&ar, &err);
+ KUNIT_ASSERT_NE_MSG(test, err, 0,
+ "unexpectedly get block when no block is available");
+}
+
+#define TEST_RANGE_COUNT 8
+
+struct test_range {
+ ext4_grpblk_t start;
+ ext4_grpblk_t len;
+};
+
+static void
+mbt_generate_test_ranges(struct super_block *sb, struct test_range *ranges,
+ int count)
+{
+ ext4_grpblk_t start, len, max;
+ int i;
+
+ max = EXT4_CLUSTERS_PER_GROUP(sb) / count;
+ for (i = 0; i < count; i++) {
+ start = get_random_u32() % max;
+ len = get_random_u32() % max;
+ len = min(len, max - start);
+
+ ranges[i].start = start + i * max;
+ ranges[i].len = len;
+ }
+}
+
+static void
+validate_free_blocks_simple(struct kunit *test, struct super_block *sb,
+ ext4_group_t goal_group, ext4_grpblk_t start,
+ ext4_grpblk_t len)
+{
+ void *bitmap;
+ ext4_grpblk_t bit, max = EXT4_CLUSTERS_PER_GROUP(sb);
+ ext4_group_t i;
+
+ for (i = 0; i < ext4_get_groups_count(sb); i++) {
+ if (i == goal_group)
+ continue;
+
+ bitmap = mbt_ctx_bitmap(sb, i);
+ bit = mb_find_next_zero_bit(bitmap, max, 0);
+ KUNIT_ASSERT_EQ_MSG(test, bit, max,
+ "free block on unexpected group %d", i);
+ }
+
+ bitmap = mbt_ctx_bitmap(sb, goal_group);
+ bit = mb_find_next_zero_bit(bitmap, max, 0);
+ KUNIT_ASSERT_EQ(test, bit, start);
+
+ bit = mb_find_next_bit(bitmap, max, bit + 1);
+ KUNIT_ASSERT_EQ(test, bit, start + len);
+}
+
+static void
+test_free_blocks_simple_range(struct kunit *test, ext4_group_t goal_group,
+ ext4_grpblk_t start, ext4_grpblk_t len)
+{
+ struct super_block *sb = (struct super_block *)test->priv;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct inode *inode;
+ ext4_fsblk_t block;
+
+ inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
+ if (!inode)
+ return;
+ inode->i_sb = sb;
+
+ if (len == 0)
+ return;
+
+ block = ext4_group_first_block_no(sb, goal_group) +
+ EXT4_C2B(sbi, start);
+ ext4_free_blocks_simple(inode, block, len);
+ validate_free_blocks_simple(test, sb, goal_group, start, len);
+ mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
+}
+
+static void test_free_blocks_simple(struct kunit *test)
+{
+ struct super_block *sb = (struct super_block *)test->priv;
+ ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
+ ext4_group_t i;
+ struct test_range ranges[TEST_RANGE_COUNT];
+
+ for (i = 0; i < ext4_get_groups_count(sb); i++)
+ mbt_ctx_mark_used(sb, i, 0, max);
+
+ mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+ for (i = 0; i < TEST_RANGE_COUNT; i++)
+ test_free_blocks_simple_range(test, TEST_GOAL_GROUP,
+ ranges[i].start, ranges[i].len);
+}
+
+static void
+test_mark_diskspace_used_range(struct kunit *test,
+ struct ext4_allocation_context *ac,
+ ext4_grpblk_t start,
+ ext4_grpblk_t len)
+{
+ struct super_block *sb = (struct super_block *)test->priv;
+ int ret;
+ void *bitmap;
+ ext4_grpblk_t i, max;
+
+ /* ext4_mb_mark_diskspace_used will BUG if len is 0 */
+ if (len == 0)
+ return;
+
+ ac->ac_b_ex.fe_group = TEST_GOAL_GROUP;
+ ac->ac_b_ex.fe_start = start;
+ ac->ac_b_ex.fe_len = len;
+
+ bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP);
+ memset(bitmap, 0, sb->s_blocksize);
+ ret = ext4_mb_mark_diskspace_used(ac, NULL, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ max = EXT4_CLUSTERS_PER_GROUP(sb);
+ i = mb_find_next_bit(bitmap, max, 0);
+ KUNIT_ASSERT_EQ(test, i, start);
+ i = mb_find_next_zero_bit(bitmap, max, i + 1);
+ KUNIT_ASSERT_EQ(test, i, start + len);
+ i = mb_find_next_bit(bitmap, max, i + 1);
+ KUNIT_ASSERT_EQ(test, max, i);
+}
+
+static void test_mark_diskspace_used(struct kunit *test)
+{
+ struct super_block *sb = (struct super_block *)test->priv;
+ struct inode *inode;
+ struct ext4_allocation_context ac;
+ struct test_range ranges[TEST_RANGE_COUNT];
+ int i;
+
+ mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+
+ inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
+ if (!inode)
+ return;
+ inode->i_sb = sb;
+
+ ac.ac_status = AC_STATUS_FOUND;
+ ac.ac_sb = sb;
+ ac.ac_inode = inode;
+ for (i = 0; i < TEST_RANGE_COUNT; i++)
+ test_mark_diskspace_used_range(test, &ac, ranges[i].start,
+ ranges[i].len);
+}
+
+static void mbt_generate_buddy(struct super_block *sb, void *buddy,
+ void *bitmap, struct ext4_group_info *grp)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ uint32_t order, off;
+ void *bb, *bb_h;
+ int max;
+
+ memset(buddy, 0xff, sb->s_blocksize);
+ memset(grp, 0, offsetof(struct ext4_group_info,
+ bb_counters[MB_NUM_ORDERS(sb)]));
+
+ bb = bitmap;
+ max = EXT4_CLUSTERS_PER_GROUP(sb);
+ bb_h = buddy + sbi->s_mb_offsets[1];
+
+ off = mb_find_next_zero_bit(bb, max, 0);
+ grp->bb_first_free = off;
+ while (off < max) {
+ grp->bb_counters[0]++;
+ grp->bb_free++;
+
+ if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
+ grp->bb_free++;
+ grp->bb_counters[0]--;
+ mb_clear_bit(off >> 1, bb_h);
+ grp->bb_counters[1]++;
+ grp->bb_largest_free_order = 1;
+ off++;
+ }
+
+ off = mb_find_next_zero_bit(bb, max, off + 1);
+ }
+
+ for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) {
+ bb = buddy + sbi->s_mb_offsets[order];
+ bb_h = buddy + sbi->s_mb_offsets[order + 1];
+ max = max >> 1;
+ off = mb_find_next_zero_bit(bb, max, 0);
+
+ while (off < max) {
+ if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
+ mb_set_bits(bb, off, 2);
+ grp->bb_counters[order] -= 2;
+ mb_clear_bit(off >> 1, bb_h);
+ grp->bb_counters[order + 1]++;
+ grp->bb_largest_free_order = order + 1;
+ off++;
+ }
+
+ off = mb_find_next_zero_bit(bb, max, off + 1);
+ }
+ }
+
+ max = EXT4_CLUSTERS_PER_GROUP(sb);
+ off = mb_find_next_zero_bit(bitmap, max, 0);
+ while (off < max) {
+ grp->bb_fragments++;
+
+ off = mb_find_next_bit(bitmap, max, off + 1);
+ if (off + 1 >= max)
+ break;
+
+ off = mb_find_next_zero_bit(bitmap, max, off + 1);
+ }
+}
+
+static void
+mbt_validate_group_info(struct kunit *test, struct ext4_group_info *grp1,
+ struct ext4_group_info *grp2)
+{
+ struct super_block *sb = (struct super_block *)test->priv;
+ int i;
+
+ KUNIT_ASSERT_EQ(test, grp1->bb_first_free,
+ grp2->bb_first_free);
+ KUNIT_ASSERT_EQ(test, grp1->bb_fragments,
+ grp2->bb_fragments);
+ KUNIT_ASSERT_EQ(test, grp1->bb_free, grp2->bb_free);
+ KUNIT_ASSERT_EQ(test, grp1->bb_largest_free_order,
+ grp2->bb_largest_free_order);
+
+ for (i = 1; i < MB_NUM_ORDERS(sb); i++) {
+ KUNIT_ASSERT_EQ_MSG(test, grp1->bb_counters[i],
+ grp2->bb_counters[i],
+ "bb_counters[%d] diffs, expected %d, generated %d",
+ i, grp1->bb_counters[i],
+ grp2->bb_counters[i]);
+ }
+}
+
+static void
+do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap,
+ void *mbt_buddy, struct ext4_group_info *mbt_grp,
+ void *ext4_buddy, struct ext4_group_info *ext4_grp)
+{
+ int i;
+
+ mbt_generate_buddy(sb, mbt_buddy, bitmap, mbt_grp);
+
+ for (i = 0; i < MB_NUM_ORDERS(sb); i++)
+ ext4_grp->bb_counters[i] = 0;
+ /* needed by validation in ext4_mb_generate_buddy */
+ ext4_grp->bb_free = mbt_grp->bb_free;
+ memset(ext4_buddy, 0xff, sb->s_blocksize);
+ ext4_mb_generate_buddy(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP,
+ ext4_grp);
+
+ KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize),
+ 0);
+ mbt_validate_group_info(test, mbt_grp, ext4_grp);
+}
+
+static void test_mb_generate_buddy(struct kunit *test)
+{
+ struct super_block *sb = (struct super_block *)test->priv;
+ void *bitmap, *expected_bb, *generate_bb;
+ struct ext4_group_info *expected_grp, *generate_grp;
+ struct test_range ranges[TEST_RANGE_COUNT];
+ int i;
+
+ bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
+ expected_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_bb);
+ generate_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, generate_bb);
+ expected_grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
+ bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_grp);
+ generate_grp = ext4_get_group_info(sb, TEST_GOAL_GROUP);
+ KUNIT_ASSERT_NOT_NULL(test, generate_grp);
+
+ mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+ for (i = 0; i < TEST_RANGE_COUNT; i++) {
+ mb_set_bits(bitmap, ranges[i].start, ranges[i].len);
+ do_test_generate_buddy(test, sb, bitmap, expected_bb,
+ expected_grp, generate_bb, generate_grp);
+ }
+}
+
+static void
+test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b,
+ ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
+ void *buddy, struct ext4_group_info *grp)
+{
+ struct super_block *sb = (struct super_block *)test->priv;
+ struct ext4_free_extent ex;
+ int i;
+
+ /* mb_mark_used only accepts non-zero len */
+ if (len == 0)
+ return;
+
+ ex.fe_start = start;
+ ex.fe_len = len;
+ ex.fe_group = TEST_GOAL_GROUP;
+
+ ext4_lock_group(sb, TEST_GOAL_GROUP);
+ mb_mark_used(e4b, &ex);
+ ext4_unlock_group(sb, TEST_GOAL_GROUP);
+
+ mb_set_bits(bitmap, start, len);
+ /* bypass bb_free validatoin in ext4_mb_generate_buddy */
+ grp->bb_free -= len;
+ memset(buddy, 0xff, sb->s_blocksize);
+ for (i = 0; i < MB_NUM_ORDERS(sb); i++)
+ grp->bb_counters[i] = 0;
+ ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
+
+ KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
+ 0);
+ mbt_validate_group_info(test, grp, e4b->bd_info);
+}
+
+static void test_mb_mark_used(struct kunit *test)
+{
+ struct ext4_buddy e4b;
+ struct super_block *sb = (struct super_block *)test->priv;
+ void *bitmap, *buddy;
+ struct ext4_group_info *grp;
+ int ret;
+ struct test_range ranges[TEST_RANGE_COUNT];
+ int i;
+
+ /* buddy cache assumes that each page contains at least one block */
+ if (sb->s_blocksize > PAGE_SIZE)
+ kunit_skip(test, "blocksize exceeds pagesize");
+
+ bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
+ buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
+ grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
+ bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp);
+
+ ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
+ grp->bb_largest_free_order = -1;
+ grp->bb_avg_fragment_size_order = -1;
+ mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+ for (i = 0; i < TEST_RANGE_COUNT; i++)
+ test_mb_mark_used_range(test, &e4b, ranges[i].start,
+ ranges[i].len, bitmap, buddy, grp);
+
+ ext4_mb_unload_buddy(&e4b);
+}
+
+static void
+test_mb_free_blocks_range(struct kunit *test, struct ext4_buddy *e4b,
+ ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
+ void *buddy, struct ext4_group_info *grp)
+{
+ struct super_block *sb = (struct super_block *)test->priv;
+ int i;
+
+ /* mb_free_blocks will WARN if len is 0 */
+ if (len == 0)
+ return;
+
+ ext4_lock_group(sb, e4b->bd_group);
+ mb_free_blocks(NULL, e4b, start, len);
+ ext4_unlock_group(sb, e4b->bd_group);
+
+ mb_clear_bits(bitmap, start, len);
+ /* bypass bb_free validatoin in ext4_mb_generate_buddy */
+ grp->bb_free += len;
+ memset(buddy, 0xff, sb->s_blocksize);
+ for (i = 0; i < MB_NUM_ORDERS(sb); i++)
+ grp->bb_counters[i] = 0;
+ ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
+
+ KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
+ 0);
+ mbt_validate_group_info(test, grp, e4b->bd_info);
+
+}
+
+static void test_mb_free_blocks(struct kunit *test)
+{
+ struct ext4_buddy e4b;
+ struct super_block *sb = (struct super_block *)test->priv;
+ void *bitmap, *buddy;
+ struct ext4_group_info *grp;
+ struct ext4_free_extent ex;
+ int ret;
+ int i;
+ struct test_range ranges[TEST_RANGE_COUNT];
+
+ /* buddy cache assumes that each page contains at least one block */
+ if (sb->s_blocksize > PAGE_SIZE)
+ kunit_skip(test, "blocksize exceeds pagesize");
+
+ bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
+ buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
+ grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
+ bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp);
+
+ ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ex.fe_start = 0;
+ ex.fe_len = EXT4_CLUSTERS_PER_GROUP(sb);
+ ex.fe_group = TEST_GOAL_GROUP;
+
+ ext4_lock_group(sb, TEST_GOAL_GROUP);
+ mb_mark_used(&e4b, &ex);
+ ext4_unlock_group(sb, TEST_GOAL_GROUP);
+
+ grp->bb_free = 0;
+ grp->bb_largest_free_order = -1;
+ grp->bb_avg_fragment_size_order = -1;
+ memset(bitmap, 0xff, sb->s_blocksize);
+
+ mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+ for (i = 0; i < TEST_RANGE_COUNT; i++)
+ test_mb_free_blocks_range(test, &e4b, ranges[i].start,
+ ranges[i].len, bitmap, buddy, grp);
+
+ ext4_mb_unload_buddy(&e4b);
+}
+
+#define COUNT_FOR_ESTIMATE 100000
+static void test_mb_mark_used_cost(struct kunit *test)
+{
+ struct ext4_buddy e4b;
+ struct super_block *sb = (struct super_block *)test->priv;
+ struct ext4_free_extent ex;
+ int ret;
+ struct test_range ranges[TEST_RANGE_COUNT];
+ int i, j;
+ unsigned long start, end, all = 0;
+
+ /* buddy cache assumes that each page contains at least one block */
+ if (sb->s_blocksize > PAGE_SIZE)
+ kunit_skip(test, "blocksize exceeds pagesize");
+
+ ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ex.fe_group = TEST_GOAL_GROUP;
+ for (j = 0; j < COUNT_FOR_ESTIMATE; j++) {
+ mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+ start = jiffies;
+ for (i = 0; i < TEST_RANGE_COUNT; i++) {
+ if (ranges[i].len == 0)
+ continue;
+
+ ex.fe_start = ranges[i].start;
+ ex.fe_len = ranges[i].len;
+ ext4_lock_group(sb, TEST_GOAL_GROUP);
+ mb_mark_used(&e4b, &ex);
+ ext4_unlock_group(sb, TEST_GOAL_GROUP);
+ }
+ end = jiffies;
+ all += (end - start);
+
+ for (i = 0; i < TEST_RANGE_COUNT; i++) {
+ if (ranges[i].len == 0)
+ continue;
+
+ ext4_lock_group(sb, TEST_GOAL_GROUP);
+ mb_free_blocks(NULL, &e4b, ranges[i].start,
+ ranges[i].len);
+ ext4_unlock_group(sb, TEST_GOAL_GROUP);
+ }
+ }
+
+ kunit_info(test, "costed jiffies %lu\n", all);
+ ext4_mb_unload_buddy(&e4b);
+}
+
+static const struct mbt_ext4_block_layout mbt_test_layouts[] = {
+ {
+ .blocksize_bits = 10,
+ .cluster_bits = 3,
+ .blocks_per_group = 8192,
+ .group_count = 4,
+ .desc_size = 64,
+ },
+ {
+ .blocksize_bits = 12,
+ .cluster_bits = 3,
+ .blocks_per_group = 8192,
+ .group_count = 4,
+ .desc_size = 64,
+ },
+ {
+ .blocksize_bits = 16,
+ .cluster_bits = 3,
+ .blocks_per_group = 8192,
+ .group_count = 4,
+ .desc_size = 64,
+ },
+};
+
+static void mbt_show_layout(const struct mbt_ext4_block_layout *layout,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_bits=%d cluster_bits=%d "
+ "blocks_per_group=%d group_count=%d desc_size=%d\n",
+ layout->blocksize_bits, layout->cluster_bits,
+ layout->blocks_per_group, layout->group_count,
+ layout->desc_size);
+}
+KUNIT_ARRAY_PARAM(mbt_layouts, mbt_test_layouts, mbt_show_layout);
+
+static struct kunit_case mbt_test_cases[] = {
+ KUNIT_CASE_PARAM(test_new_blocks_simple, mbt_layouts_gen_params),
+ KUNIT_CASE_PARAM(test_free_blocks_simple, mbt_layouts_gen_params),
+ KUNIT_CASE_PARAM(test_mb_generate_buddy, mbt_layouts_gen_params),
+ KUNIT_CASE_PARAM(test_mb_mark_used, mbt_layouts_gen_params),
+ KUNIT_CASE_PARAM(test_mb_free_blocks, mbt_layouts_gen_params),
+ KUNIT_CASE_PARAM(test_mark_diskspace_used, mbt_layouts_gen_params),
+ KUNIT_CASE_PARAM_ATTR(test_mb_mark_used_cost, mbt_layouts_gen_params,
+ { .speed = KUNIT_SPEED_SLOW }),
+ {}
+};
+
+static struct kunit_suite mbt_test_suite = {
+ .name = "ext4_mballoc_test",
+ .init = mbt_kunit_init,
+ .exit = mbt_kunit_exit,
+ .test_cases = mbt_test_cases,
+};
+
+kunit_test_suites(&mbt_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 215b7068f548..56d50fd3310b 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -16,7 +16,9 @@
#include <linux/slab.h>
#include <linux/nospec.h>
#include <linux/backing-dev.h>
+#include <linux/freezer.h>
#include <trace/events/ext4.h>
+#include <kunit/static_stub.h>
/*
* MUSTDO:
@@ -96,14 +98,14 @@
* block bitmap and buddy information. The information are stored in the
* inode as:
*
- * { page }
+ * { folio }
* [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
*
*
* one block each for bitmap and buddy information. So for each group we
- * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
- * blocksize) blocks. So it can have information regarding groups_per_page
- * which is blocks_per_page/2
+ * take up 2 blocks. A folio can contain blocks_per_folio (folio_size /
+ * blocksize) blocks. So it can have information regarding groups_per_folio
+ * which is blocks_per_folio/2
*
* The buddy cache inode is not stored on disk. The inode is thrown
* away when the filesystem is unmounted.
@@ -130,47 +132,67 @@
* If "mb_optimize_scan" mount option is set, we maintain in memory group info
* structures in two data structures:
*
- * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
+ * 1) Array of largest free order xarrays (sbi->s_mb_largest_free_orders)
*
- * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
+ * Locking: Writers use xa_lock, readers use rcu_read_lock.
*
- * This is an array of lists where the index in the array represents the
+ * This is an array of xarrays where the index in the array represents the
* largest free order in the buddy bitmap of the participating group infos of
- * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
- * number of buddy bitmap orders possible) number of lists. Group-infos are
- * placed in appropriate lists.
+ * that xarray. So, there are exactly MB_NUM_ORDERS(sb) (which means total
+ * number of buddy bitmap orders possible) number of xarrays. Group-infos are
+ * placed in appropriate xarrays.
*
- * 2) Average fragment size rb tree (sbi->s_mb_avg_fragment_size_root)
+ * 2) Average fragment size xarrays (sbi->s_mb_avg_fragment_size)
*
- * Locking: sbi->s_mb_rb_lock (rwlock)
+ * Locking: Writers use xa_lock, readers use rcu_read_lock.
*
- * This is a red black tree consisting of group infos and the tree is sorted
- * by average fragment sizes (which is calculated as ext4_group_info->bb_free
- * / ext4_group_info->bb_fragments).
+ * This is an array of xarrays where in the i-th xarray there are groups with
+ * average fragment size >= 2^i and < 2^(i+1). The average fragment size
+ * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
+ * Note that we don't bother with a special xarray for completely empty
+ * groups so we only have MB_NUM_ORDERS(sb) xarrays. Group-infos are placed
+ * in appropriate xarrays.
+ *
+ * In xarray, the index is the block group number, the value is the block group
+ * information, and a non-empty value indicates the block group is present in
+ * the current xarray.
*
* When "mb_optimize_scan" mount option is set, mballoc consults the above data
* structures to decide the order in which groups are to be traversed for
* fulfilling an allocation request.
*
- * At CR = 0, we look for groups which have the largest_free_order >= the order
- * of the request. We directly look at the largest free order list in the data
- * structure (1) above where largest_free_order = order of the request. If that
- * list is empty, we look at remaining list in the increasing order of
- * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time.
+ * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order
+ * >= the order of the request. We directly look at the largest free order list
+ * in the data structure (1) above where largest_free_order = order of the
+ * request. If that list is empty, we look at remaining list in the increasing
+ * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED
+ * lookup in O(1) time.
+ *
+ * At CR_GOAL_LEN_FAST, we only consider groups where
+ * average fragment size > request size. So, we lookup a group which has average
+ * fragment size just above or equal to request size using our average fragment
+ * size group lists (data structure 2) in O(1) time.
*
- * At CR = 1, we only consider groups where average fragment size > request
- * size. So, we lookup a group which has average fragment size just above or
- * equal to request size using our rb tree (data structure 2) in O(log N) time.
+ * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied
+ * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in
+ * CR_GOAL_LEN_FAST suggests that there is no BG that has avg
+ * fragment size > goal length. So before falling to the slower
+ * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and
+ * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big
+ * enough average fragment size. This increases the chances of finding a
+ * suitable block group in O(1) time and results in faster allocation at the
+ * cost of reduced size of allocation.
*
* If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
- * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
+ * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and
+ * CR_GOAL_LEN_FAST phase.
*
* The regular allocator (using the buddy cache) supports a few tunables.
*
* /sys/fs/ext4/<partition>/mb_min_to_scan
* /sys/fs/ext4/<partition>/mb_max_to_scan
* /sys/fs/ext4/<partition>/mb_order2_req
- * /sys/fs/ext4/<partition>/mb_linear_limit
+ * /sys/fs/ext4/<partition>/mb_max_linear_groups
*
* The regular allocator uses buddy scan only if the request len is power of
* 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
@@ -192,7 +214,7 @@
* get traversed linearly. That may result in subsequent allocations being not
* close to each other. And so, the underlying device may get filled up in a
* non-linear fashion. While that may not matter on non-rotational devices, for
- * rotational devices that may result in higher seek times. "mb_linear_limit"
+ * rotational devices that may result in higher seek times. "mb_max_linear_groups"
* tells mballoc how many groups mballoc should search linearly before
* performing consulting above data structures for more efficient lookups. For
* non rotational devices, this value defaults to 0 and for rotational devices
@@ -348,8 +370,8 @@
* - bitlock on a group (group)
* - object (inode/locality) (object)
* - per-pa lock (pa)
- * - cr0 lists lock (cr0)
- * - cr1 tree lock (cr1)
+ * - cr_power2_aligned lists lock (cr_power2_aligned)
+ * - cr_goal_len_fast lists lock (cr_goal_len_fast)
*
* Paths:
* - new pa
@@ -381,7 +403,7 @@
*
* - allocation path (ext4_mb_regular_allocator)
* group
- * cr0/cr1
+ * cr_power2_aligned/cr_goal_len_fast
*/
static struct kmem_cache *ext4_pspace_cachep;
static struct kmem_cache *ext4_ac_cachep;
@@ -401,12 +423,10 @@ static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_group_t group);
-static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
- ext4_group_t group);
static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
-static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
- ext4_group_t group, int cr);
+static int ext4_mb_scan_group(struct ext4_allocation_context *ac,
+ ext4_group_t group);
static int ext4_try_to_trim_range(struct super_block *sb,
struct ext4_buddy *e4b, ext4_grpblk_t start,
@@ -549,14 +569,14 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
+ ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
ext4_grp_locked_error(sb, e4b->bd_group,
inode ? inode->i_ino : 0,
blocknr,
"freeing block already freed "
"(bit %u)",
first + i);
- ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
- EXT4_GROUP_INFO_BBITMAP_CORRUPT);
}
mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
}
@@ -662,7 +682,25 @@ do { \
} \
} while (0)
-static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
+/*
+ * Perform buddy integrity check with the following steps:
+ *
+ * 1. Top-down validation (from highest order down to order 1, excluding order-0 bitmap):
+ * For each pair of adjacent orders, if a higher-order bit is set (indicating a free block),
+ * at most one of the two corresponding lower-order bits may be clear (free).
+ *
+ * 2. Order-0 (bitmap) validation, performed on bit pairs:
+ * - If either bit in a pair is set (1, allocated), then all corresponding higher-order bits
+ * must not be free (0).
+ * - If both bits in a pair are clear (0, free), then exactly one of the corresponding
+ * higher-order bits must be free (0).
+ *
+ * 3. Preallocation (pa) list validation:
+ * For each preallocated block (pa) in the group:
+ * - Verify that pa_pstart falls within the bounds of this block group.
+ * - Ensure the corresponding bit(s) in the order-0 bitmap are marked as allocated (1).
+ */
+static void __mb_check_buddy(struct ext4_buddy *e4b, char *file,
const char *function, int line)
{
struct super_block *sb = e4b->bd_sb;
@@ -681,7 +719,7 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
void *buddy2;
if (e4b->bd_info->bb_check_counter++ % 10)
- return 0;
+ return;
while (order > 1) {
buddy = mb_find_buddy(e4b, order, &max);
@@ -695,26 +733,14 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
for (i = 0; i < max; i++) {
if (mb_test_bit(i, buddy)) {
- /* only single bit in buddy2 may be 1 */
+ /* only single bit in buddy2 may be 0 */
if (!mb_test_bit(i << 1, buddy2)) {
MB_CHECK_ASSERT(
mb_test_bit((i<<1)+1, buddy2));
- } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
- MB_CHECK_ASSERT(
- mb_test_bit(i << 1, buddy2));
}
continue;
}
- /* both bits in buddy2 must be 1 */
- MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
- MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
-
- for (j = 0; j < (1 << order); j++) {
- k = (i * (1 << order)) + j;
- MB_CHECK_ASSERT(
- !mb_test_bit(k, e4b->bd_bitmap));
- }
count++;
}
MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
@@ -730,31 +756,40 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
fragments++;
fstart = i;
}
- continue;
+ } else {
+ fstart = -1;
}
- fstart = -1;
- /* check used bits only */
- for (j = 0; j < e4b->bd_blkbits + 1; j++) {
- buddy2 = mb_find_buddy(e4b, j, &max2);
- k = i >> j;
- MB_CHECK_ASSERT(k < max2);
- MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
+ if (!(i & 1)) {
+ int in_use, zero_bit_count = 0;
+
+ in_use = mb_test_bit(i, buddy) || mb_test_bit(i + 1, buddy);
+ for (j = 1; j < e4b->bd_blkbits + 2; j++) {
+ buddy2 = mb_find_buddy(e4b, j, &max2);
+ k = i >> j;
+ MB_CHECK_ASSERT(k < max2);
+ if (!mb_test_bit(k, buddy2))
+ zero_bit_count++;
+ }
+ MB_CHECK_ASSERT(zero_bit_count == !in_use);
}
}
MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
grp = ext4_get_group_info(sb, e4b->bd_group);
+ if (!grp)
+ return;
list_for_each(cur, &grp->bb_prealloc_list) {
ext4_group_t groupnr;
struct ext4_prealloc_space *pa;
pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
+ if (!pa->pa_len)
+ continue;
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
MB_CHECK_ASSERT(groupnr == e4b->bd_group);
for (i = 0; i < pa->pa_len; i++)
MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
}
- return 0;
}
#undef MB_CHECK_ASSERT
#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
@@ -805,267 +840,390 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
}
}
-static void ext4_mb_rb_insert(struct rb_root *root, struct rb_node *new,
- int (*cmp)(struct rb_node *, struct rb_node *))
+static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
{
- struct rb_node **iter = &root->rb_node, *parent = NULL;
+ int order;
- while (*iter) {
- parent = *iter;
- if (cmp(new, *iter) > 0)
- iter = &((*iter)->rb_left);
- else
- iter = &((*iter)->rb_right);
- }
-
- rb_link_node(new, parent, iter);
- rb_insert_color(new, root);
+ /*
+ * We don't bother with a special lists groups with only 1 block free
+ * extents and for completely empty groups.
+ */
+ order = fls(len) - 2;
+ if (order < 0)
+ return 0;
+ if (order == MB_NUM_ORDERS(sb))
+ order--;
+ if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb)))
+ order = MB_NUM_ORDERS(sb) - 1;
+ return order;
}
-static int
-ext4_mb_avg_fragment_size_cmp(struct rb_node *rb1, struct rb_node *rb2)
+/* Move group to appropriate avg_fragment_size list */
+static void
+mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
{
- struct ext4_group_info *grp1 = rb_entry(rb1,
- struct ext4_group_info,
- bb_avg_fragment_size_rb);
- struct ext4_group_info *grp2 = rb_entry(rb2,
- struct ext4_group_info,
- bb_avg_fragment_size_rb);
- int num_frags_1, num_frags_2;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int new, old;
+
+ if (!test_opt2(sb, MB_OPTIMIZE_SCAN))
+ return;
+
+ old = grp->bb_avg_fragment_size_order;
+ new = grp->bb_fragments == 0 ? -1 :
+ mb_avg_fragment_size_order(sb, grp->bb_free / grp->bb_fragments);
+ if (new == old)
+ return;
- num_frags_1 = grp1->bb_fragments ?
- grp1->bb_free / grp1->bb_fragments : 0;
- num_frags_2 = grp2->bb_fragments ?
- grp2->bb_free / grp2->bb_fragments : 0;
+ if (old >= 0)
+ xa_erase(&sbi->s_mb_avg_fragment_size[old], grp->bb_group);
- return (num_frags_2 - num_frags_1);
+ grp->bb_avg_fragment_size_order = new;
+ if (new >= 0) {
+ /*
+ * Cannot use __GFP_NOFAIL because we hold the group lock.
+ * Although allocation for insertion may fails, it's not fatal
+ * as we have linear traversal to fall back on.
+ */
+ int err = xa_insert(&sbi->s_mb_avg_fragment_size[new],
+ grp->bb_group, grp, GFP_ATOMIC);
+ if (err)
+ mb_debug(sb, "insert group: %u to s_mb_avg_fragment_size[%d] failed, err %d",
+ grp->bb_group, new, err);
+ }
}
-/*
- * Reinsert grpinfo into the avg_fragment_size tree with new average
- * fragment size.
- */
-static void
-mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+static int ext4_mb_scan_groups_xa_range(struct ext4_allocation_context *ac,
+ struct xarray *xa,
+ ext4_group_t start, ext4_group_t end)
{
+ struct super_block *sb = ac->ac_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ enum criteria cr = ac->ac_criteria;
+ ext4_group_t ngroups = ext4_get_groups_count(sb);
+ unsigned long group = start;
+ struct ext4_group_info *grp;
- if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
- return;
+ if (WARN_ON_ONCE(end > ngroups || start >= end))
+ return 0;
+
+ xa_for_each_range(xa, group, grp, start, end - 1) {
+ int err;
+
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
- write_lock(&sbi->s_mb_rb_lock);
- if (!RB_EMPTY_NODE(&grp->bb_avg_fragment_size_rb)) {
- rb_erase(&grp->bb_avg_fragment_size_rb,
- &sbi->s_mb_avg_fragment_size_root);
- RB_CLEAR_NODE(&grp->bb_avg_fragment_size_rb);
+ err = ext4_mb_scan_group(ac, grp->bb_group);
+ if (err || ac->ac_status != AC_STATUS_CONTINUE)
+ return err;
+
+ cond_resched();
}
- ext4_mb_rb_insert(&sbi->s_mb_avg_fragment_size_root,
- &grp->bb_avg_fragment_size_rb,
- ext4_mb_avg_fragment_size_cmp);
- write_unlock(&sbi->s_mb_rb_lock);
+ return 0;
+}
+
+/*
+ * Find a suitable group of given order from the largest free orders xarray.
+ */
+static inline int
+ext4_mb_scan_groups_largest_free_order_range(struct ext4_allocation_context *ac,
+ int order, ext4_group_t start,
+ ext4_group_t end)
+{
+ struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_largest_free_orders[order];
+
+ if (xa_empty(xa))
+ return 0;
+
+ return ext4_mb_scan_groups_xa_range(ac, xa, start, end);
}
/*
* Choose next group by traversing largest_free_order lists. Updates *new_cr if
* cr level needs an update.
*/
-static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
- int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
+static int ext4_mb_scan_groups_p2_aligned(struct ext4_allocation_context *ac,
+ ext4_group_t group)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct ext4_group_info *iter, *grp;
int i;
+ int ret = 0;
+ ext4_group_t start, end;
- if (ac->ac_status == AC_STATUS_FOUND)
- return;
-
- if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED))
- atomic_inc(&sbi->s_bal_cr0_bad_suggestions);
-
- grp = NULL;
+ start = group;
+ end = ext4_get_groups_count(ac->ac_sb);
+wrap_around:
for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
- if (list_empty(&sbi->s_mb_largest_free_orders[i]))
- continue;
- read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
- if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
- read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
- continue;
- }
- grp = NULL;
- list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
- bb_largest_free_order_node) {
- if (sbi->s_mb_stats)
- atomic64_inc(&sbi->s_bal_cX_groups_considered[0]);
- if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) {
- grp = iter;
- break;
- }
- }
- read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
- if (grp)
- break;
+ ret = ext4_mb_scan_groups_largest_free_order_range(ac, i,
+ start, end);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
+ }
+ if (start) {
+ end = start;
+ start = 0;
+ goto wrap_around;
}
- if (!grp) {
- /* Increment cr and search again */
- *new_cr = 1;
- } else {
- *group = grp->bb_group;
- ac->ac_last_optimal_group = *group;
- ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]);
+
+ /* Increment cr and search again if no group is found */
+ ac->ac_criteria = CR_GOAL_LEN_FAST;
+ return ret;
+}
+
+/*
+ * Find a suitable group of given order from the average fragments xarray.
+ */
+static int
+ext4_mb_scan_groups_avg_frag_order_range(struct ext4_allocation_context *ac,
+ int order, ext4_group_t start,
+ ext4_group_t end)
+{
+ struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_avg_fragment_size[order];
+
+ if (xa_empty(xa))
+ return 0;
+
+ return ext4_mb_scan_groups_xa_range(ac, xa, start, end);
+}
+
+/*
+ * Choose next group by traversing average fragment size list of suitable
+ * order. Updates *new_cr if cr level needs an update.
+ */
+static int ext4_mb_scan_groups_goal_fast(struct ext4_allocation_context *ac,
+ ext4_group_t group)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ int i, ret = 0;
+ ext4_group_t start, end;
+
+ start = group;
+ end = ext4_get_groups_count(ac->ac_sb);
+wrap_around:
+ i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
+ for (; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
+ ret = ext4_mb_scan_groups_avg_frag_order_range(ac, i,
+ start, end);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
+ }
+ if (start) {
+ end = start;
+ start = 0;
+ goto wrap_around;
}
+
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]);
+ /*
+ * CR_BEST_AVAIL_LEN works based on the concept that we have
+ * a larger normalized goal len request which can be trimmed to
+ * a smaller goal len such that it can still satisfy original
+ * request len. However, allocation request for non-regular
+ * files never gets normalized.
+ * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA).
+ */
+ if (ac->ac_flags & EXT4_MB_HINT_DATA)
+ ac->ac_criteria = CR_BEST_AVAIL_LEN;
+ else
+ ac->ac_criteria = CR_GOAL_LEN_SLOW;
+
+ return ret;
}
/*
- * Choose next group by traversing average fragment size tree. Updates *new_cr
- * if cr lvel needs an update. Sets EXT4_MB_SEARCH_NEXT_LINEAR to indicate that
- * the linear search should continue for one iteration since there's lock
- * contention on the rb tree lock.
+ * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment
+ * order we have and proactively trim the goal request length to that order to
+ * find a suitable group faster.
+ *
+ * This optimizes allocation speed at the cost of slightly reduced
+ * preallocations. However, we make sure that we don't trim the request too
+ * much and fall to CR_GOAL_LEN_SLOW in that case.
*/
-static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
- int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
+static int ext4_mb_scan_groups_best_avail(struct ext4_allocation_context *ac,
+ ext4_group_t group)
{
+ int ret = 0;
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- int avg_fragment_size, best_so_far;
- struct rb_node *node, *found;
- struct ext4_group_info *grp;
+ int i, order, min_order;
+ unsigned long num_stripe_clusters = 0;
+ ext4_group_t start, end;
/*
- * If there is contention on the lock, instead of waiting for the lock
- * to become available, just continue searching lineraly. We'll resume
- * our rb tree search later starting at ac->ac_last_optimal_group.
+ * mb_avg_fragment_size_order() returns order in a way that makes
+ * retrieving back the length using (1 << order) inaccurate. Hence, use
+ * fls() instead since we need to know the actual length while modifying
+ * goal length.
*/
- if (!read_trylock(&sbi->s_mb_rb_lock)) {
- ac->ac_flags |= EXT4_MB_SEARCH_NEXT_LINEAR;
- return;
+ order = fls(ac->ac_g_ex.fe_len) - 1;
+ if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb)))
+ order = MB_NUM_ORDERS(ac->ac_sb);
+ min_order = order - sbi->s_mb_best_avail_max_trim_order;
+ if (min_order < 0)
+ min_order = 0;
+
+ if (sbi->s_stripe > 0) {
+ /*
+ * We are assuming that stripe size is always a multiple of
+ * cluster ratio otherwise __ext4_fill_super exists early.
+ */
+ num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe);
+ if (1 << min_order < num_stripe_clusters)
+ /*
+ * We consider 1 order less because later we round
+ * up the goal len to num_stripe_clusters
+ */
+ min_order = fls(num_stripe_clusters) - 1;
}
- if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
- if (sbi->s_mb_stats)
- atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
- /* We have found something at CR 1 in the past */
- grp = ext4_get_group_info(ac->ac_sb, ac->ac_last_optimal_group);
- for (found = rb_next(&grp->bb_avg_fragment_size_rb); found != NULL;
- found = rb_next(found)) {
- grp = rb_entry(found, struct ext4_group_info,
- bb_avg_fragment_size_rb);
- if (sbi->s_mb_stats)
- atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
- if (likely(ext4_mb_good_group(ac, grp->bb_group, 1)))
- break;
- }
- goto done;
- }
-
- node = sbi->s_mb_avg_fragment_size_root.rb_node;
- best_so_far = 0;
- found = NULL;
-
- while (node) {
- grp = rb_entry(node, struct ext4_group_info,
- bb_avg_fragment_size_rb);
- avg_fragment_size = 0;
- if (ext4_mb_good_group(ac, grp->bb_group, 1)) {
- avg_fragment_size = grp->bb_fragments ?
- grp->bb_free / grp->bb_fragments : 0;
- if (!best_so_far || avg_fragment_size < best_so_far) {
- best_so_far = avg_fragment_size;
- found = node;
- }
+ if (1 << min_order < ac->ac_o_ex.fe_len)
+ min_order = fls(ac->ac_o_ex.fe_len);
+
+ start = group;
+ end = ext4_get_groups_count(ac->ac_sb);
+wrap_around:
+ for (i = order; i >= min_order; i--) {
+ int frag_order;
+ /*
+ * Scale down goal len to make sure we find something
+ * in the free fragments list. Basically, reduce
+ * preallocations.
+ */
+ ac->ac_g_ex.fe_len = 1 << i;
+
+ if (num_stripe_clusters > 0) {
+ /*
+ * Try to round up the adjusted goal length to
+ * stripe size (in cluster units) multiple for
+ * efficiency.
+ */
+ ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len,
+ num_stripe_clusters);
}
- if (avg_fragment_size > ac->ac_g_ex.fe_len)
- node = node->rb_right;
- else
- node = node->rb_left;
- }
-done:
- if (found) {
- grp = rb_entry(found, struct ext4_group_info,
- bb_avg_fragment_size_rb);
- *group = grp->bb_group;
- ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
- } else {
- *new_cr = 2;
+ frag_order = mb_avg_fragment_size_order(ac->ac_sb,
+ ac->ac_g_ex.fe_len);
+
+ ret = ext4_mb_scan_groups_avg_frag_order_range(ac, frag_order,
+ start, end);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
}
+ if (start) {
+ end = start;
+ start = 0;
+ goto wrap_around;
+ }
+
+ /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
+ ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]);
+ ac->ac_criteria = CR_GOAL_LEN_SLOW;
- read_unlock(&sbi->s_mb_rb_lock);
- ac->ac_last_optimal_group = *group;
+ return ret;
}
static inline int should_optimize_scan(struct ext4_allocation_context *ac)
{
if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
return 0;
- if (ac->ac_criteria >= 2)
+ if (ac->ac_criteria >= CR_GOAL_LEN_SLOW)
return 0;
- if (ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
+ if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
return 0;
return 1;
}
/*
- * Return next linear group for allocation. If linear traversal should not be
- * performed, this function just returns the same group
+ * next linear group for allocation.
*/
-static int
-next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
+static void next_linear_group(ext4_group_t *group, ext4_group_t ngroups)
{
- if (!should_optimize_scan(ac))
- goto inc_and_return;
-
- if (ac->ac_groups_linear_remaining) {
- ac->ac_groups_linear_remaining--;
- goto inc_and_return;
- }
-
- if (ac->ac_flags & EXT4_MB_SEARCH_NEXT_LINEAR) {
- ac->ac_flags &= ~EXT4_MB_SEARCH_NEXT_LINEAR;
- goto inc_and_return;
- }
-
- return group;
-inc_and_return:
/*
* Artificially restricted ngroups for non-extent
* files makes group > ngroups possible on first loop.
*/
- return group + 1 >= ngroups ? 0 : group + 1;
+ *group = *group + 1 >= ngroups ? 0 : *group + 1;
}
-/*
- * ext4_mb_choose_next_group: choose next group for allocation.
- *
- * @ac Allocation Context
- * @new_cr This is an output parameter. If the there is no good group
- * available at current CR level, this field is updated to indicate
- * the new cr level that should be used.
- * @group This is an input / output parameter. As an input it indicates the
- * next group that the allocator intends to use for allocation. As
- * output, this field indicates the next group that should be used as
- * determined by the optimization functions.
- * @ngroups Total number of groups
- */
-static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
- int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
+static int ext4_mb_scan_groups_linear(struct ext4_allocation_context *ac,
+ ext4_group_t ngroups, ext4_group_t *start, ext4_group_t count)
{
- *new_cr = ac->ac_criteria;
+ int ret, i;
+ enum criteria cr = ac->ac_criteria;
+ struct super_block *sb = ac->ac_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_group_t group = *start;
- if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining)
- return;
+ for (i = 0; i < count; i++, next_linear_group(&group, ngroups)) {
+ ret = ext4_mb_scan_group(ac, group);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
+ cond_resched();
+ }
- if (*new_cr == 0) {
- ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
- } else if (*new_cr == 1) {
- ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups);
- } else {
+ *start = group;
+ if (count == ngroups)
+ ac->ac_criteria++;
+
+ /* Processed all groups and haven't found blocks */
+ if (sbi->s_mb_stats && i == ngroups)
+ atomic64_inc(&sbi->s_bal_cX_failed[cr]);
+
+ return 0;
+}
+
+static int ext4_mb_scan_groups(struct ext4_allocation_context *ac)
+{
+ int ret = 0;
+ ext4_group_t start;
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ ext4_group_t ngroups = ext4_get_groups_count(ac->ac_sb);
+
+ /* non-extent files are limited to low blocks/groups */
+ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
+ ngroups = sbi->s_blockfile_groups;
+
+ /* searching for the right group start from the goal value specified */
+ start = ac->ac_g_ex.fe_group;
+ ac->ac_prefetch_grp = start;
+ ac->ac_prefetch_nr = 0;
+
+ if (!should_optimize_scan(ac))
+ return ext4_mb_scan_groups_linear(ac, ngroups, &start, ngroups);
+
+ /*
+ * Optimized scanning can return non adjacent groups which can cause
+ * seek overhead for rotational disks. So try few linear groups before
+ * trying optimized scan.
+ */
+ if (sbi->s_mb_max_linear_groups)
+ ret = ext4_mb_scan_groups_linear(ac, ngroups, &start,
+ sbi->s_mb_max_linear_groups);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
+
+ switch (ac->ac_criteria) {
+ case CR_POWER2_ALIGNED:
+ return ext4_mb_scan_groups_p2_aligned(ac, start);
+ case CR_GOAL_LEN_FAST:
+ return ext4_mb_scan_groups_goal_fast(ac, start);
+ case CR_BEST_AVAIL_LEN:
+ return ext4_mb_scan_groups_best_avail(ac, start);
+ default:
/*
- * TODO: For CR=2, we can arrange groups in an rb tree sorted by
- * bb_free. But until that happens, we should never come here.
+ * TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an
+ * rb tree sorted by bb_free. But until that happens, we should
+ * never come here.
*/
WARN_ON(1);
}
+
+ return 0;
}
/*
@@ -1076,39 +1234,43 @@ static void
mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- int i;
-
- if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) {
- write_lock(&sbi->s_mb_largest_free_orders_locks[
- grp->bb_largest_free_order]);
- list_del_init(&grp->bb_largest_free_order_node);
- write_unlock(&sbi->s_mb_largest_free_orders_locks[
- grp->bb_largest_free_order]);
- }
- grp->bb_largest_free_order = -1; /* uninit */
+ int new, old = grp->bb_largest_free_order;
- for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) {
- if (grp->bb_counters[i] > 0) {
- grp->bb_largest_free_order = i;
+ for (new = MB_NUM_ORDERS(sb) - 1; new >= 0; new--)
+ if (grp->bb_counters[new] > 0)
break;
- }
+
+ /* No need to move between order lists? */
+ if (new == old)
+ return;
+
+ if (old >= 0) {
+ struct xarray *xa = &sbi->s_mb_largest_free_orders[old];
+
+ if (!xa_empty(xa) && xa_load(xa, grp->bb_group))
+ xa_erase(xa, grp->bb_group);
}
- if (test_opt2(sb, MB_OPTIMIZE_SCAN) &&
- grp->bb_largest_free_order >= 0 && grp->bb_free) {
- write_lock(&sbi->s_mb_largest_free_orders_locks[
- grp->bb_largest_free_order]);
- list_add_tail(&grp->bb_largest_free_order_node,
- &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
- write_unlock(&sbi->s_mb_largest_free_orders_locks[
- grp->bb_largest_free_order]);
+
+ grp->bb_largest_free_order = new;
+ if (test_opt2(sb, MB_OPTIMIZE_SCAN) && new >= 0 && grp->bb_free) {
+ /*
+ * Cannot use __GFP_NOFAIL because we hold the group lock.
+ * Although allocation for insertion may fails, it's not fatal
+ * as we have linear traversal to fall back on.
+ */
+ int err = xa_insert(&sbi->s_mb_largest_free_orders[new],
+ grp->bb_group, grp, GFP_ATOMIC);
+ if (err)
+ mb_debug(sb, "insert group: %u to s_mb_largest_free_orders[%d] failed, err %d",
+ grp->bb_group, new, err);
}
}
static noinline_for_stack
void ext4_mb_generate_buddy(struct super_block *sb,
- void *buddy, void *bitmap, ext4_group_t group)
+ void *buddy, void *bitmap, ext4_group_t group,
+ struct ext4_group_info *grp)
{
- struct ext4_group_info *grp = ext4_get_group_info(sb, group);
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
ext4_grpblk_t i = 0;
@@ -1151,13 +1313,31 @@ void ext4_mb_generate_buddy(struct super_block *sb,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
}
mb_set_largest_free_order(sb, grp);
+ mb_update_avg_fragment_size(sb, grp);
clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
period = get_cycles() - period;
atomic_inc(&sbi->s_mb_buddies_generated);
atomic64_add(period, &sbi->s_mb_generation_time);
- mb_update_avg_fragment_size(sb, grp);
+}
+
+static void mb_regenerate_buddy(struct ext4_buddy *e4b)
+{
+ int count;
+ int order = 1;
+ void *buddy;
+
+ while ((buddy = mb_find_buddy(e4b, order++, &count)))
+ mb_set_bits(buddy, 0, count);
+
+ e4b->bd_info->bb_fragments = 0;
+ memset(e4b->bd_info->bb_counters, 0,
+ sizeof(*e4b->bd_info->bb_counters) *
+ (e4b->bd_sb->s_blocksize_bits + 2));
+
+ ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
+ e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
}
/* The buddy information is attached the buddy cache inode
@@ -1166,26 +1346,25 @@ void ext4_mb_generate_buddy(struct super_block *sb,
* block bitmap and buddy information. The information are
* stored in the inode as
*
- * { page }
+ * { folio }
* [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
*
*
* one block each for bitmap and buddy information.
- * So for each group we take up 2 blocks. A page can
- * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
- * So it can have information regarding groups_per_page which
- * is blocks_per_page/2
+ * So for each group we take up 2 blocks. A folio can
+ * contain blocks_per_folio (folio_size / blocksize) blocks.
+ * So it can have information regarding groups_per_folio which
+ * is blocks_per_folio/2
*
* Locking note: This routine takes the block group lock of all groups
- * for this page; do not hold this lock when calling this routine!
+ * for this folio; do not hold this lock when calling this routine!
*/
-
-static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
+static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
{
ext4_group_t ngroups;
- int blocksize;
- int blocks_per_page;
- int groups_per_page;
+ unsigned int blocksize;
+ int blocks_per_folio;
+ int groups_per_folio;
int err = 0;
int i;
ext4_group_t first_group, group;
@@ -1198,44 +1377,42 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
char *bitmap;
struct ext4_group_info *grinfo;
- inode = page->mapping->host;
+ inode = folio->mapping->host;
sb = inode->i_sb;
ngroups = ext4_get_groups_count(sb);
blocksize = i_blocksize(inode);
- blocks_per_page = PAGE_SIZE / blocksize;
-
- mb_debug(sb, "init page %lu\n", page->index);
+ blocks_per_folio = folio_size(folio) / blocksize;
+ WARN_ON_ONCE(!blocks_per_folio);
+ groups_per_folio = DIV_ROUND_UP(blocks_per_folio, 2);
- groups_per_page = blocks_per_page >> 1;
- if (groups_per_page == 0)
- groups_per_page = 1;
+ mb_debug(sb, "init folio %lu\n", folio->index);
/* allocate buffer_heads to read bitmaps */
- if (groups_per_page > 1) {
- i = sizeof(struct buffer_head *) * groups_per_page;
+ if (groups_per_folio > 1) {
+ i = sizeof(struct buffer_head *) * groups_per_folio;
bh = kzalloc(i, gfp);
- if (bh == NULL) {
- err = -ENOMEM;
- goto out;
- }
+ if (bh == NULL)
+ return -ENOMEM;
} else
bh = &bhs;
- first_group = page->index * blocks_per_page / 2;
-
- /* read all groups the page covers into the cache */
- for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
+ /* read all groups the folio covers into the cache */
+ first_group = EXT4_PG_TO_LBLK(inode, folio->index) / 2;
+ for (i = 0, group = first_group; i < groups_per_folio; i++, group++) {
if (group >= ngroups)
break;
grinfo = ext4_get_group_info(sb, group);
+ if (!grinfo)
+ continue;
/*
- * If page is uptodate then we came here after online resize
+ * If folio is uptodate then we came here after online resize
* which added some new uninitialized group info structs, so
- * we must skip all initialized uptodate buddies on the page,
+ * we must skip all initialized uptodate buddies on the folio,
* which may be currently in use by an allocating task.
*/
- if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
+ if (folio_test_uptodate(folio) &&
+ !EXT4_MB_GRP_NEED_INIT(grinfo)) {
bh[i] = NULL;
continue;
}
@@ -1249,7 +1426,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
}
/* wait for I/O completion */
- for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
+ for (i = 0, group = first_group; i < groups_per_folio; i++, group++) {
int err2;
if (!bh[i])
@@ -1259,8 +1436,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
err = err2;
}
- first_block = page->index * blocks_per_page;
- for (i = 0; i < blocks_per_page; i++) {
+ first_block = EXT4_PG_TO_LBLK(inode, folio->index);
+ for (i = 0; i < blocks_per_folio; i++) {
group = (first_block + i) >> 1;
if (group >= ngroups)
break;
@@ -1280,20 +1457,24 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
* above
*
*/
- data = page_address(page) + (i * blocksize);
+ data = folio_address(folio) + (i * blocksize);
bitmap = bh[group - first_group]->b_data;
/*
* We place the buddy block and bitmap block
* close together
*/
+ grinfo = ext4_get_group_info(sb, group);
+ if (!grinfo) {
+ err = -EFSCORRUPTED;
+ goto out;
+ }
if ((first_block + i) & 1) {
/* this is block of buddy */
BUG_ON(incore == NULL);
- mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
- group, page->index, i * blocksize);
+ mb_debug(sb, "put buddy for group %u in folio %lu/%x\n",
+ group, folio->index, i * blocksize);
trace_ext4_mb_buddy_bitmap_load(sb, group);
- grinfo = ext4_get_group_info(sb, group);
grinfo->bb_fragments = 0;
memset(grinfo->bb_counters, 0,
sizeof(*grinfo->bb_counters) *
@@ -1304,14 +1485,14 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
ext4_lock_group(sb, group);
/* init the buddy */
memset(data, 0xff, blocksize);
- ext4_mb_generate_buddy(sb, data, incore, group);
+ ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
ext4_unlock_group(sb, group);
incore = NULL;
} else {
/* this is block of bitmap */
BUG_ON(incore != NULL);
- mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
- group, page->index, i * blocksize);
+ mb_debug(sb, "put bitmap for group %u in folio %lu/%x\n",
+ group, folio->index, i * blocksize);
trace_ext4_mb_bitmap_load(sb, group);
/* see comments in ext4_mb_put_pa() */
@@ -1320,7 +1501,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
/* mark all preallocated blks used in in-core bitmap */
ext4_mb_generate_from_pa(sb, data, group);
- ext4_mb_generate_from_freelist(sb, data, group);
+ WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root));
ext4_unlock_group(sb, group);
/* set incore so that the buddy information can be
@@ -1329,11 +1510,11 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
incore = data;
}
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
out:
if (bh) {
- for (i = 0; i < groups_per_page; i++)
+ for (i = 0; i < groups_per_folio; i++)
brelse(bh[i]);
if (bh != &bhs)
kfree(bh);
@@ -1342,68 +1523,71 @@ out:
}
/*
- * Lock the buddy and bitmap pages. This make sure other parallel init_group
- * on the same buddy page doesn't happen whild holding the buddy page lock.
- * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
- * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
+ * Lock the buddy and bitmap folios. This makes sure other parallel init_group
+ * on the same buddy folio doesn't happen while holding the buddy folio lock.
+ * Return locked buddy and bitmap folios on e4b struct. If buddy and bitmap
+ * are on the same folio e4b->bd_buddy_folio is NULL and return value is 0.
*/
-static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+static int ext4_mb_get_buddy_folio_lock(struct super_block *sb,
ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
{
struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
- int block, pnum, poff;
- int blocks_per_page;
- struct page *page;
+ int block, pnum;
+ struct folio *folio;
- e4b->bd_buddy_page = NULL;
- e4b->bd_bitmap_page = NULL;
+ e4b->bd_buddy_folio = NULL;
+ e4b->bd_bitmap_folio = NULL;
- blocks_per_page = PAGE_SIZE / sb->s_blocksize;
/*
* the buddy cache inode stores the block bitmap
* and buddy information in consecutive blocks.
* So for each group we need two blocks.
*/
block = group * 2;
- pnum = block / blocks_per_page;
- poff = block % blocks_per_page;
- page = find_or_create_page(inode->i_mapping, pnum, gfp);
- if (!page)
- return -ENOMEM;
- BUG_ON(page->mapping != inode->i_mapping);
- e4b->bd_bitmap_page = page;
- e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
+ pnum = EXT4_LBLK_TO_PG(inode, block);
+ folio = __filemap_get_folio(inode->i_mapping, pnum,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ BUG_ON(folio->mapping != inode->i_mapping);
+ WARN_ON_ONCE(folio_size(folio) < sb->s_blocksize);
+ e4b->bd_bitmap_folio = folio;
+ e4b->bd_bitmap = folio_address(folio) +
+ offset_in_folio(folio, EXT4_LBLK_TO_B(inode, block));
- if (blocks_per_page >= 2) {
- /* buddy and bitmap are on the same page */
+ block++;
+ pnum = EXT4_LBLK_TO_PG(inode, block);
+ if (folio_contains(folio, pnum)) {
+ /* buddy and bitmap are on the same folio */
return 0;
}
- block++;
- pnum = block / blocks_per_page;
- page = find_or_create_page(inode->i_mapping, pnum, gfp);
- if (!page)
- return -ENOMEM;
- BUG_ON(page->mapping != inode->i_mapping);
- e4b->bd_buddy_page = page;
+ /* we need another folio for the buddy */
+ folio = __filemap_get_folio(inode->i_mapping, pnum,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ BUG_ON(folio->mapping != inode->i_mapping);
+ WARN_ON_ONCE(folio_size(folio) < sb->s_blocksize);
+ e4b->bd_buddy_folio = folio;
return 0;
}
-static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
+static void ext4_mb_put_buddy_folio_lock(struct ext4_buddy *e4b)
{
- if (e4b->bd_bitmap_page) {
- unlock_page(e4b->bd_bitmap_page);
- put_page(e4b->bd_bitmap_page);
+ if (e4b->bd_bitmap_folio) {
+ folio_unlock(e4b->bd_bitmap_folio);
+ folio_put(e4b->bd_bitmap_folio);
}
- if (e4b->bd_buddy_page) {
- unlock_page(e4b->bd_buddy_page);
- put_page(e4b->bd_buddy_page);
+ if (e4b->bd_buddy_folio) {
+ folio_unlock(e4b->bd_buddy_folio);
+ folio_put(e4b->bd_buddy_folio);
}
}
/*
* Locking note: This routine calls ext4_mb_init_cache(), which takes the
- * block group lock of all groups for this page; do not hold the BG lock when
+ * block group lock of all groups for this folio; do not hold the BG lock when
* calling this routine!
*/
static noinline_for_stack
@@ -1412,22 +1596,25 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
struct ext4_group_info *this_grp;
struct ext4_buddy e4b;
- struct page *page;
+ struct folio *folio;
int ret = 0;
might_sleep();
mb_debug(sb, "init group %u\n", group);
this_grp = ext4_get_group_info(sb, group);
+ if (!this_grp)
+ return -EFSCORRUPTED;
+
/*
* This ensures that we don't reinit the buddy cache
- * page which map to the group from which we are already
+ * folio which map to the group from which we are already
* allocating. If we are looking at the buddy cache we would
* have taken a reference using ext4_mb_load_buddy and that
- * would have pinned buddy page to page cache.
- * The call to ext4_mb_get_buddy_page_lock will mark the
- * page accessed.
+ * would have pinned buddy folio to page cache.
+ * The call to ext4_mb_get_buddy_folio_lock will mark the
+ * folio accessed.
*/
- ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
+ ret = ext4_mb_get_buddy_folio_lock(sb, group, &e4b, gfp);
if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
/*
* somebody initialized the group
@@ -1436,52 +1623,50 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
goto err;
}
- page = e4b.bd_bitmap_page;
- ret = ext4_mb_init_cache(page, NULL, gfp);
+ folio = e4b.bd_bitmap_folio;
+ ret = ext4_mb_init_cache(folio, NULL, gfp);
if (ret)
goto err;
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
ret = -EIO;
goto err;
}
- if (e4b.bd_buddy_page == NULL) {
+ if (e4b.bd_buddy_folio == NULL) {
/*
* If both the bitmap and buddy are in
- * the same page we don't need to force
+ * the same folio we don't need to force
* init the buddy
*/
ret = 0;
goto err;
}
/* init buddy cache */
- page = e4b.bd_buddy_page;
- ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
+ folio = e4b.bd_buddy_folio;
+ ret = ext4_mb_init_cache(folio, e4b.bd_bitmap, gfp);
if (ret)
goto err;
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
ret = -EIO;
goto err;
}
err:
- ext4_mb_put_buddy_page_lock(&e4b);
+ ext4_mb_put_buddy_folio_lock(&e4b);
return ret;
}
/*
* Locking note: This routine calls ext4_mb_init_cache(), which takes the
- * block group lock of all groups for this page; do not hold the BG lock when
+ * block group lock of all groups for this folio; do not hold the BG lock when
* calling this routine!
*/
static noinline_for_stack int
ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
struct ext4_buddy *e4b, gfp_t gfp)
{
- int blocks_per_page;
int block;
int pnum;
- int poff;
- struct page *page;
+ struct folio *folio;
int ret;
struct ext4_group_info *grp;
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -1490,15 +1675,16 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
might_sleep();
mb_debug(sb, "load group %u\n", group);
- blocks_per_page = PAGE_SIZE / sb->s_blocksize;
grp = ext4_get_group_info(sb, group);
+ if (!grp)
+ return -EFSCORRUPTED;
e4b->bd_blkbits = sb->s_blocksize_bits;
e4b->bd_info = grp;
e4b->bd_sb = sb;
e4b->bd_group = group;
- e4b->bd_buddy_page = NULL;
- e4b->bd_bitmap_page = NULL;
+ e4b->bd_buddy_folio = NULL;
+ e4b->bd_bitmap_folio = NULL;
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
/*
@@ -1516,95 +1702,115 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
* So for each group we need two blocks.
*/
block = group * 2;
- pnum = block / blocks_per_page;
- poff = block % blocks_per_page;
-
- /* we could use find_or_create_page(), but it locks page
- * what we'd like to avoid in fast path ... */
- page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
- if (page == NULL || !PageUptodate(page)) {
- if (page)
+ pnum = EXT4_LBLK_TO_PG(inode, block);
+
+ /* Avoid locking the folio in the fast path ... */
+ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
+ if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
+ if (!IS_ERR(folio))
/*
- * drop the page reference and try
- * to get the page with lock. If we
+ * drop the folio reference and try
+ * to get the folio with lock. If we
* are not uptodate that implies
- * somebody just created the page but
- * is yet to initialize the same. So
+ * somebody just created the folio but
+ * is yet to initialize it. So
* wait for it to initialize.
*/
- put_page(page);
- page = find_or_create_page(inode->i_mapping, pnum, gfp);
- if (page) {
- BUG_ON(page->mapping != inode->i_mapping);
- if (!PageUptodate(page)) {
- ret = ext4_mb_init_cache(page, NULL, gfp);
+ folio_put(folio);
+ folio = __filemap_get_folio(inode->i_mapping, pnum,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
+ if (!IS_ERR(folio)) {
+ if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
+ "ext4: bitmap's mapping != inode->i_mapping\n")) {
+ /* should never happen */
+ folio_unlock(folio);
+ ret = -EINVAL;
+ goto err;
+ }
+ if (!folio_test_uptodate(folio)) {
+ ret = ext4_mb_init_cache(folio, NULL, gfp);
if (ret) {
- unlock_page(page);
+ folio_unlock(folio);
goto err;
}
- mb_cmp_bitmaps(e4b, page_address(page) +
- (poff * sb->s_blocksize));
+ mb_cmp_bitmaps(e4b, folio_address(folio) +
+ offset_in_folio(folio,
+ EXT4_LBLK_TO_B(inode, block)));
}
- unlock_page(page);
+ folio_unlock(folio);
}
}
- if (page == NULL) {
- ret = -ENOMEM;
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
goto err;
}
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
ret = -EIO;
goto err;
}
- /* Pages marked accessed already */
- e4b->bd_bitmap_page = page;
- e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
+ /* Folios marked accessed already */
+ e4b->bd_bitmap_folio = folio;
+ e4b->bd_bitmap = folio_address(folio) +
+ offset_in_folio(folio, EXT4_LBLK_TO_B(inode, block));
block++;
- pnum = block / blocks_per_page;
- poff = block % blocks_per_page;
-
- page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
- if (page == NULL || !PageUptodate(page)) {
- if (page)
- put_page(page);
- page = find_or_create_page(inode->i_mapping, pnum, gfp);
- if (page) {
- BUG_ON(page->mapping != inode->i_mapping);
- if (!PageUptodate(page)) {
- ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
+ pnum = EXT4_LBLK_TO_PG(inode, block);
+ /* buddy and bitmap are on the same folio? */
+ if (folio_contains(folio, pnum)) {
+ folio_get(folio);
+ goto update_buddy;
+ }
+
+ /* we need another folio for the buddy */
+ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
+ if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
+ if (!IS_ERR(folio))
+ folio_put(folio);
+ folio = __filemap_get_folio(inode->i_mapping, pnum,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
+ if (!IS_ERR(folio)) {
+ if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
+ "ext4: buddy bitmap's mapping != inode->i_mapping\n")) {
+ /* should never happen */
+ folio_unlock(folio);
+ ret = -EINVAL;
+ goto err;
+ }
+ if (!folio_test_uptodate(folio)) {
+ ret = ext4_mb_init_cache(folio, e4b->bd_bitmap,
gfp);
if (ret) {
- unlock_page(page);
+ folio_unlock(folio);
goto err;
}
}
- unlock_page(page);
+ folio_unlock(folio);
}
}
- if (page == NULL) {
- ret = -ENOMEM;
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
goto err;
}
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
ret = -EIO;
goto err;
}
- /* Pages marked accessed already */
- e4b->bd_buddy_page = page;
- e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
+update_buddy:
+ /* Folios marked accessed already */
+ e4b->bd_buddy_folio = folio;
+ e4b->bd_buddy = folio_address(folio) +
+ offset_in_folio(folio, EXT4_LBLK_TO_B(inode, block));
return 0;
err:
- if (page)
- put_page(page);
- if (e4b->bd_bitmap_page)
- put_page(e4b->bd_bitmap_page);
- if (e4b->bd_buddy_page)
- put_page(e4b->bd_buddy_page);
+ if (!IS_ERR_OR_NULL(folio))
+ folio_put(folio);
+ if (e4b->bd_bitmap_folio)
+ folio_put(e4b->bd_bitmap_folio);
+
e4b->bd_buddy = NULL;
e4b->bd_bitmap = NULL;
return ret;
@@ -1618,10 +1824,10 @@ static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
{
- if (e4b->bd_bitmap_page)
- put_page(e4b->bd_bitmap_page);
- if (e4b->bd_buddy_page)
- put_page(e4b->bd_buddy_page);
+ if (e4b->bd_bitmap_folio)
+ folio_put(e4b->bd_bitmap_folio);
+ if (e4b->bd_buddy_folio)
+ folio_put(e4b->bd_buddy_folio);
}
@@ -1689,7 +1895,7 @@ static int mb_test_and_clear_bits(void *bm, int cur, int len)
return zero_bit;
}
-void ext4_set_bits(void *bm, int cur, int len)
+void mb_set_bits(void *bm, int cur, int len)
{
__u32 *addr;
@@ -1767,7 +1973,8 @@ static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
break;
order++;
- if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
+ buddy2 = mb_find_buddy(e4b, order, &max);
+ if (!buddy2) {
mb_clear_bits(buddy, first, last - first + 1);
e4b->bd_info->bb_counters[order - 1] += last - first + 1;
break;
@@ -1798,11 +2005,6 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
mb_check_buddy(e4b);
mb_free_blocks_double(inode, e4b, first, count);
- this_cpu_inc(discard_pa_seq);
- e4b->bd_info->bb_free += count;
- if (first < e4b->bd_info->bb_first_free)
- e4b->bd_info->bb_first_free = first;
-
/* access memory sequentially: check left neighbour,
* clear range and then check right neighbour
*/
@@ -1816,21 +2018,31 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t blocknr;
+ /*
+ * Fastcommit replay can free already freed blocks which
+ * corrupts allocation info. Regenerate it.
+ */
+ if (sbi->s_mount_state & EXT4_FC_REPLAY) {
+ mb_regenerate_buddy(e4b);
+ goto check;
+ }
+
blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += EXT4_C2B(sbi, block);
- if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
- ext4_grp_locked_error(sb, e4b->bd_group,
- inode ? inode->i_ino : 0,
- blocknr,
- "freeing already freed block (bit %u); block bitmap corrupt.",
- block);
- ext4_mark_group_bitmap_corrupted(
- sb, e4b->bd_group,
+ ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
- }
- goto done;
+ ext4_grp_locked_error(sb, e4b->bd_group,
+ inode ? inode->i_ino : 0, blocknr,
+ "freeing already freed block (bit %u); block bitmap corrupt.",
+ block);
+ return;
}
+ this_cpu_inc(discard_pa_seq);
+ e4b->bd_info->bb_free += count;
+ if (first < e4b->bd_info->bb_first_free)
+ e4b->bd_info->bb_first_free = first;
+
/* let's maintain fragments counter */
if (left_is_free && right_is_free)
e4b->bd_info->bb_fragments--;
@@ -1855,17 +2067,16 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
if (first <= last)
mb_buddy_mark_free(e4b, first >> 1, last >> 1);
-done:
mb_set_largest_free_order(sb, e4b->bd_info);
mb_update_avg_fragment_size(sb, e4b->bd_info);
+check:
mb_check_buddy(e4b);
}
static int mb_find_extent(struct ext4_buddy *e4b, int block,
int needed, struct ext4_free_extent *ex)
{
- int next = block;
- int max, order;
+ int max, order, next;
void *buddy;
assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
@@ -1883,16 +2094,12 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block,
/* find actual order */
order = mb_find_order_for_block(e4b, block);
- block = block >> order;
- ex->fe_len = 1 << order;
- ex->fe_start = block << order;
+ ex->fe_len = (1 << order) - (block & ((1 << order) - 1));
+ ex->fe_start = block;
ex->fe_group = e4b->bd_group;
- /* calc difference from given start */
- next = next - ex->fe_start;
- ex->fe_len -= next;
- ex->fe_start += next;
+ block = block >> order;
while (needed > ex->fe_len &&
mb_find_buddy(e4b, order, &max)) {
@@ -1930,12 +2137,12 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
int ord;
int mlen = 0;
int max = 0;
- int cur;
int start = ex->fe_start;
int len = ex->fe_len;
unsigned ret = 0;
int len0 = len;
void *buddy;
+ int ord_start, ord_end;
BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
BUG_ON(e4b->bd_group != ex->fe_group);
@@ -1979,24 +2186,34 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
if (ret == 0)
ret = len | (ord << 16);
- /* we have to split large buddy */
BUG_ON(ord <= 0);
buddy = mb_find_buddy(e4b, ord, &max);
mb_set_bit(start >> ord, buddy);
e4b->bd_info->bb_counters[ord]--;
- ord--;
- cur = (start >> ord) & ~1U;
- buddy = mb_find_buddy(e4b, ord, &max);
- mb_clear_bit(cur, buddy);
- mb_clear_bit(cur + 1, buddy);
- e4b->bd_info->bb_counters[ord]++;
- e4b->bd_info->bb_counters[ord]++;
+ ord_start = (start >> ord) << ord;
+ ord_end = ord_start + (1 << ord);
+ /* first chunk */
+ if (start > ord_start)
+ ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy,
+ ord_start, start - ord_start,
+ e4b->bd_info);
+
+ /* last chunk */
+ if (start + len < ord_end) {
+ ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy,
+ start + len,
+ ord_end - (start + len),
+ e4b->bd_info);
+ break;
+ }
+ len = start + len - ord_end;
+ start = ord_end;
}
mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
- ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
+ mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
mb_check_buddy(e4b);
return ret;
@@ -2027,23 +2244,23 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
ac->ac_buddy = ret >> 16;
/*
- * take the page reference. We want the page to be pinned
+ * take the folio reference. We want the folio to be pinned
* so that we don't get a ext4_mb_init_cache_call for this
* group until we update the bitmap. That would mean we
* double allocate blocks. The reference is dropped
* in ext4_mb_release_context
*/
- ac->ac_bitmap_page = e4b->bd_bitmap_page;
- get_page(ac->ac_bitmap_page);
- ac->ac_buddy_page = e4b->bd_buddy_page;
- get_page(ac->ac_buddy_page);
+ ac->ac_bitmap_folio = e4b->bd_bitmap_folio;
+ folio_get(ac->ac_bitmap_folio);
+ ac->ac_buddy_folio = e4b->bd_buddy_folio;
+ folio_get(ac->ac_buddy_folio);
/* store last allocated for subsequent stream allocation */
if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
- spin_lock(&sbi->s_md_lock);
- sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
- sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
- spin_unlock(&sbi->s_md_lock);
+ int hash = ac->ac_inode->i_ino % sbi->s_mb_nr_global_goals;
+
+ WRITE_ONCE(sbi->s_mb_last_groups[hash], ac->ac_f_ex.fe_group);
}
+
/*
* As we've just preallocated more space than
* user requested originally, we store allocated
@@ -2061,8 +2278,6 @@ static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_free_extent *bex = &ac->ac_b_ex;
struct ext4_free_extent *gex = &ac->ac_g_ex;
- struct ext4_free_extent ex;
- int max;
if (ac->ac_status == AC_STATUS_FOUND)
return;
@@ -2081,17 +2296,8 @@ static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
if (bex->fe_len < gex->fe_len)
return;
- if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
- && bex->fe_group == e4b->bd_group) {
- /* recheck chunk's availability - we don't know
- * when it was found (within this lock-unlock
- * period or not) */
- max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
- if (max >= gex->fe_len) {
- ext4_mb_use_best_found(ac, e4b);
- return;
- }
- }
+ if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
+ ext4_mb_use_best_found(ac, e4b);
}
/*
@@ -2102,6 +2308,20 @@ static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
* in the context. Later, the best found extent will be used, if
* mballoc can't find good enough extent.
*
+ * The algorithm used is roughly as follows:
+ *
+ * * If free extent found is exactly as big as goal, then
+ * stop the scan and use it immediately
+ *
+ * * If free extent found is smaller than goal, then keep retrying
+ * upto a max of sbi->s_mb_max_to_scan times (default 200). After
+ * that stop scanning and use whatever we have.
+ *
+ * * If free extent found is bigger than goal, then keep retrying
+ * upto a max of sbi->s_mb_min_to_scan times (default 10) before
+ * stopping the scan and using the extent.
+ *
+ *
* FIXME: real allocation policy is to be designed yet!
*/
static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
@@ -2117,6 +2337,7 @@ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
ac->ac_found++;
+ ac->ac_cX_found[ac->ac_criteria]++;
/*
* The special case - take what you catch first
@@ -2164,7 +2385,7 @@ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
}
static noinline_for_stack
-int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
+void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
struct ext4_buddy *e4b)
{
struct ext4_free_extent ex = ac->ac_b_ex;
@@ -2175,9 +2396,12 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
BUG_ON(ex.fe_len <= 0);
err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
if (err)
- return err;
+ return;
ext4_lock_group(ac->ac_sb, group);
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+ goto out;
+
max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
if (max > 0) {
@@ -2185,10 +2409,9 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
ext4_mb_use_best_found(ac, e4b);
}
+out:
ext4_unlock_group(ac->ac_sb, group);
ext4_mb_unload_buddy(e4b);
-
- return 0;
}
static noinline_for_stack
@@ -2202,7 +2425,9 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
struct ext4_free_extent ex;
- if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
+ if (!grp)
+ return -EFSCORRUPTED;
+ if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
return 0;
if (grp->bb_free == 0)
return 0;
@@ -2211,21 +2436,19 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
if (err)
return err;
- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
- ext4_mb_unload_buddy(e4b);
- return 0;
- }
-
ext4_lock_group(ac->ac_sb, group);
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+ goto out;
+
max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
ac->ac_g_ex.fe_len, &ex);
ex.fe_logical = 0xDEADFA11; /* debug value */
- if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
+ if (max >= ac->ac_g_ex.fe_len &&
+ ac->ac_g_ex.fe_len == EXT4_NUM_B2C(sbi, sbi->s_stripe)) {
ext4_fsblk_t start;
- start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
- ex.fe_start;
+ start = ext4_grp_offs_to_block(ac->ac_sb, &ex);
/* use do_div to get remainder (would be 64-bit modulo) */
if (do_div(start, sbi->s_stripe) == 0) {
ac->ac_found++;
@@ -2249,6 +2472,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
ac->ac_b_ex = ex;
ext4_mb_use_best_found(ac, e4b);
}
+out:
ext4_unlock_group(ac->ac_sb, group);
ext4_mb_unload_buddy(e4b);
@@ -2276,19 +2500,22 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
continue;
buddy = mb_find_buddy(e4b, i, &max);
- BUG_ON(buddy == NULL);
+ if (WARN_RATELIMIT(buddy == NULL,
+ "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i))
+ continue;
k = mb_find_next_zero_bit(buddy, max, 0);
if (k >= max) {
+ ext4_mark_group_bitmap_corrupted(ac->ac_sb,
+ e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
"%d free clusters of order %d. But found 0",
grp->bb_counters[i], i);
- ext4_mark_group_bitmap_corrupted(ac->ac_sb,
- e4b->bd_group,
- EXT4_GROUP_INFO_BBITMAP_CORRUPT);
break;
}
ac->ac_found++;
+ ac->ac_cX_found[ac->ac_criteria]++;
ac->ac_b_ex.fe_len = 1 << i;
ac->ac_b_ex.fe_start = k << i;
@@ -2317,7 +2544,7 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
struct super_block *sb = ac->ac_sb;
void *bitmap = e4b->bd_bitmap;
struct ext4_free_extent ex;
- int i;
+ int i, j, freelen;
int free;
free = e4b->bd_info->bb_free;
@@ -2335,25 +2562,43 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
* free blocks even though group info says we
* have free blocks
*/
+ ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
"%d free clusters as per "
"group info. But bitmap says 0",
free);
- ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
- EXT4_GROUP_INFO_BBITMAP_CORRUPT);
break;
}
+ if (!ext4_mb_cr_expensive(ac->ac_criteria)) {
+ /*
+ * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are
+ * sure that this group will have a large enough
+ * continuous free extent, so skip over the smaller free
+ * extents
+ */
+ j = mb_find_next_bit(bitmap,
+ EXT4_CLUSTERS_PER_GROUP(sb), i);
+ freelen = j - i;
+
+ if (freelen < ac->ac_g_ex.fe_len) {
+ i = j;
+ free -= freelen;
+ continue;
+ }
+ }
+
mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
if (WARN_ON(ex.fe_len <= 0))
break;
if (free < ex.fe_len) {
+ ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
"%d free clusters as per "
"group info. But got %d blocks",
free, ex.fe_len);
- ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
- EXT4_GROUP_INFO_BBITMAP_CORRUPT);
/*
* The number of free blocks differs. This mostly
* indicate that the bitmap is corrupt. So exit
@@ -2385,7 +2630,7 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
struct ext4_free_extent ex;
ext4_fsblk_t first_group_block;
ext4_fsblk_t a;
- ext4_grpblk_t i;
+ ext4_grpblk_t i, stripe;
int max;
BUG_ON(sbi->s_stripe == 0);
@@ -2397,36 +2642,63 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
do_div(a, sbi->s_stripe);
i = (a * sbi->s_stripe) - first_group_block;
+ stripe = EXT4_NUM_B2C(sbi, sbi->s_stripe);
+ i = EXT4_B2C(sbi, i);
while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
if (!mb_test_bit(i, bitmap)) {
- max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
- if (max >= sbi->s_stripe) {
+ max = mb_find_extent(e4b, i, stripe, &ex);
+ if (max >= stripe) {
ac->ac_found++;
+ ac->ac_cX_found[ac->ac_criteria]++;
ex.fe_logical = 0xDEADF00D; /* debug value */
ac->ac_b_ex = ex;
ext4_mb_use_best_found(ac, e4b);
break;
}
}
- i += sbi->s_stripe;
+ i += stripe;
}
}
+static void __ext4_mb_scan_group(struct ext4_allocation_context *ac)
+{
+ bool is_stripe_aligned;
+ struct ext4_sb_info *sbi;
+ enum criteria cr = ac->ac_criteria;
+
+ ac->ac_groups_scanned++;
+ if (cr == CR_POWER2_ALIGNED)
+ return ext4_mb_simple_scan_group(ac, ac->ac_e4b);
+
+ sbi = EXT4_SB(ac->ac_sb);
+ is_stripe_aligned = false;
+ if ((sbi->s_stripe >= sbi->s_cluster_ratio) &&
+ !(ac->ac_g_ex.fe_len % EXT4_NUM_B2C(sbi, sbi->s_stripe)))
+ is_stripe_aligned = true;
+
+ if ((cr == CR_GOAL_LEN_FAST || cr == CR_BEST_AVAIL_LEN) &&
+ is_stripe_aligned)
+ ext4_mb_scan_aligned(ac, ac->ac_e4b);
+
+ if (ac->ac_status == AC_STATUS_CONTINUE)
+ ext4_mb_complex_scan_group(ac, ac->ac_e4b);
+}
+
/*
* This is also called BEFORE we load the buddy bitmap.
* Returns either 1 or 0 indicating that the group is either suitable
* for the allocation or not.
*/
static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
- ext4_group_t group, int cr)
+ ext4_group_t group, enum criteria cr)
{
ext4_grpblk_t free, fragments;
int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
- BUG_ON(cr < 0 || cr >= 4);
+ BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS);
- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
+ if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
return false;
free = grp->bb_free;
@@ -2438,7 +2710,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
return false;
switch (cr) {
- case 0:
+ case CR_POWER2_ALIGNED:
BUG_ON(ac->ac_2order == 0);
/* Avoid using the first bg of a flexgroup for data files */
@@ -2457,15 +2729,16 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
return false;
return true;
- case 1:
+ case CR_GOAL_LEN_FAST:
+ case CR_BEST_AVAIL_LEN:
if ((free / fragments) >= ac->ac_g_ex.fe_len)
return true;
break;
- case 2:
+ case CR_GOAL_LEN_SLOW:
if (free >= ac->ac_g_ex.fe_len)
return true;
break;
- case 3:
+ case CR_ANY_FREE:
return true;
default:
BUG();
@@ -2486,7 +2759,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
* out"!
*/
static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
- ext4_group_t group, int cr)
+ ext4_group_t group, enum criteria cr)
{
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
struct super_block *sb = ac->ac_sb;
@@ -2495,6 +2768,8 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
ext4_grpblk_t free;
int ret = 0;
+ if (!grp)
+ return -EFSCORRUPTED;
if (sbi->s_mb_stats)
atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
if (should_lock) {
@@ -2504,7 +2779,12 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
free = grp->bb_free;
if (free == 0)
goto out;
- if (cr <= 2 && free < ac->ac_g_ex.fe_len)
+ /*
+ * In all criterias except CR_ANY_FREE we try to avoid groups that
+ * can't possibly satisfy the full goal request due to insufficient
+ * free blocks.
+ */
+ if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len)
goto out;
if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
goto out;
@@ -2519,15 +2799,16 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
ext4_get_group_desc(sb, group, NULL);
int ret;
- /* cr=0/1 is a very optimistic search to find large
- * good chunks almost for free. If buddy data is not
- * ready, then this optimization makes no sense. But
- * we never skip the first block group in a flex_bg,
- * since this gets used for metadata block allocation,
- * and we want to make sure we locate metadata blocks
- * in the first block group in the flex_bg if possible.
+ /*
+ * CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
+ * search to find large good chunks almost for free. If buddy
+ * data is not ready, then this optimization makes no sense. But
+ * we never skip the first block group in a flex_bg, since this
+ * gets used for metadata block allocation, and we want to make
+ * sure we locate metadata blocks in the first block group in
+ * the flex_bg if possible.
*/
- if (cr < 2 &&
+ if (!ext4_mb_cr_expensive(cr) &&
(!sbi->s_log_groups_per_flex ||
((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
!(ext4_has_group_desc_csum(sb) &&
@@ -2575,11 +2856,9 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
* prefetch once, so we avoid getblk() call, which can
* be expensive.
*/
- if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
+ if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
EXT4_MB_GRP_NEED_INIT(grp) &&
- ext4_free_group_clusters(sb, gdp) > 0 &&
- !(ext4_has_group_desc_csum(sb) &&
- (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
+ ext4_free_group_clusters(sb, gdp) > 0 ) {
bh = ext4_read_block_bitmap_nowait(sb, group, true);
if (bh && !IS_ERR(bh)) {
if (!buffer_uptodate(bh) && cnt)
@@ -2595,6 +2874,37 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
}
/*
+ * Batch reads of the block allocation bitmaps to get
+ * multiple READs in flight; limit prefetching at inexpensive
+ * CR, otherwise mballoc can spend a lot of time loading
+ * imperfect groups
+ */
+static void ext4_mb_might_prefetch(struct ext4_allocation_context *ac,
+ ext4_group_t group)
+{
+ struct ext4_sb_info *sbi;
+
+ if (ac->ac_prefetch_grp != group)
+ return;
+
+ sbi = EXT4_SB(ac->ac_sb);
+ if (ext4_mb_cr_expensive(ac->ac_criteria) ||
+ ac->ac_prefetch_ios < sbi->s_mb_prefetch_limit) {
+ unsigned int nr = sbi->s_mb_prefetch;
+
+ if (ext4_has_feature_flex_bg(ac->ac_sb)) {
+ nr = 1 << sbi->s_log_groups_per_flex;
+ nr -= group & (nr - 1);
+ nr = umin(nr, sbi->s_mb_prefetch);
+ }
+
+ ac->ac_prefetch_nr = nr;
+ ac->ac_prefetch_grp = ext4_mb_prefetch(ac->ac_sb, group, nr,
+ &ac->ac_prefetch_ios);
+ }
+}
+
+/*
* Prefetching reads the block bitmap into the buffer cache; but we
* need to make sure that the buddy bitmap in the page cache has been
* initialized. Note that ext4_mb_init_group() will block if the I/O
@@ -2609,44 +2919,76 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
unsigned int nr)
{
- while (nr-- > 0) {
- struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
- NULL);
- struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+ struct ext4_group_desc *gdp;
+ struct ext4_group_info *grp;
+ while (nr-- > 0) {
if (!group)
group = ext4_get_groups_count(sb);
group--;
+ gdp = ext4_get_group_desc(sb, group, NULL);
grp = ext4_get_group_info(sb, group);
- if (EXT4_MB_GRP_NEED_INIT(grp) &&
- ext4_free_group_clusters(sb, gdp) > 0 &&
- !(ext4_has_group_desc_csum(sb) &&
- (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
+ if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
+ ext4_free_group_clusters(sb, gdp) > 0) {
if (ext4_mb_init_group(sb, group, GFP_NOFS))
break;
}
}
}
+static int ext4_mb_scan_group(struct ext4_allocation_context *ac,
+ ext4_group_t group)
+{
+ int ret;
+ struct super_block *sb = ac->ac_sb;
+ enum criteria cr = ac->ac_criteria;
+
+ ext4_mb_might_prefetch(ac, group);
+
+ /* prevent unnecessary buddy loading. */
+ if (cr < CR_ANY_FREE && spin_is_locked(ext4_group_lock_ptr(sb, group)))
+ return 0;
+
+ /* This now checks without needing the buddy folio */
+ ret = ext4_mb_good_group_nolock(ac, group, cr);
+ if (ret <= 0) {
+ if (!ac->ac_first_err)
+ ac->ac_first_err = ret;
+ return 0;
+ }
+
+ ret = ext4_mb_load_buddy(sb, group, ac->ac_e4b);
+ if (ret)
+ return ret;
+
+ /* skip busy group */
+ if (cr >= CR_ANY_FREE)
+ ext4_lock_group(sb, group);
+ else if (!ext4_try_lock_group(sb, group))
+ goto out_unload;
+
+ /* We need to check again after locking the block group. */
+ if (unlikely(!ext4_mb_good_group(ac, group, cr)))
+ goto out_unlock;
+
+ __ext4_mb_scan_group(ac);
+
+out_unlock:
+ ext4_unlock_group(sb, group);
+out_unload:
+ ext4_mb_unload_buddy(ac->ac_e4b);
+ return ret;
+}
+
static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
{
- ext4_group_t prefetch_grp = 0, ngroups, group, i;
- int cr = -1;
- int err = 0, first_err = 0;
- unsigned int nr = 0, prefetch_ios = 0;
- struct ext4_sb_info *sbi;
- struct super_block *sb;
+ ext4_group_t i;
+ int err = 0;
+ struct super_block *sb = ac->ac_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_buddy e4b;
- int lost;
-
- sb = ac->ac_sb;
- sbi = EXT4_SB(sb);
- ngroups = ext4_get_groups_count(sb);
- /* non-extent files are limited to low blocks/groups */
- if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
- ngroups = sbi->s_blockfile_groups;
BUG_ON(ac->ac_status == AC_STATUS_FOUND);
@@ -2660,8 +3002,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
/*
* ac->ac_2order is set only if the fe_len is a power of 2
- * if ac->ac_2order is set we also set criteria to 0 so that we
- * try exact allocation using buddy.
+ * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED
+ * so that we try exact allocation using buddy.
*/
i = fls(ac->ac_g_ex.fe_len);
ac->ac_2order = 0;
@@ -2673,119 +3015,40 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
* requests upto maximum buddy size we have constructed.
*/
if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
- /*
- * This should tell if fe_len is exactly power of 2
- */
- if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
+ if (is_power_of_2(ac->ac_g_ex.fe_len))
ac->ac_2order = array_index_nospec(i - 1,
MB_NUM_ORDERS(sb));
}
/* if stream allocation is enabled, use global goal */
if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
- /* TBD: may be hot point */
- spin_lock(&sbi->s_md_lock);
- ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
- ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
- spin_unlock(&sbi->s_md_lock);
+ int hash = ac->ac_inode->i_ino % sbi->s_mb_nr_global_goals;
+
+ ac->ac_g_ex.fe_group = READ_ONCE(sbi->s_mb_last_groups[hash]);
+ ac->ac_g_ex.fe_start = -1;
+ ac->ac_flags &= ~EXT4_MB_HINT_TRY_GOAL;
}
- /* Let's just scan groups to find more-less suitable blocks */
- cr = ac->ac_2order ? 0 : 1;
/*
- * cr == 0 try to get exact allocation,
- * cr == 3 try to get anything
+ * Let's just scan groups to find more-less suitable blocks We
+ * start with CR_GOAL_LEN_FAST, unless it is power of 2
+ * aligned, in which case let's do that faster approach first.
*/
-repeat:
- for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
- ac->ac_criteria = cr;
- /*
- * searching for the right group start
- * from the goal value specified
- */
- group = ac->ac_g_ex.fe_group;
- ac->ac_last_optimal_group = group;
- ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
- prefetch_grp = group;
-
- for (i = 0; i < ngroups; group = next_linear_group(ac, group, ngroups),
- i++) {
- int ret = 0, new_cr;
-
- cond_resched();
-
- ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups);
- if (new_cr != cr) {
- cr = new_cr;
- goto repeat;
- }
-
- /*
- * Batch reads of the block allocation bitmaps
- * to get multiple READs in flight; limit
- * prefetching at cr=0/1, otherwise mballoc can
- * spend a lot of time loading imperfect groups
- */
- if ((prefetch_grp == group) &&
- (cr > 1 ||
- prefetch_ios < sbi->s_mb_prefetch_limit)) {
- unsigned int curr_ios = prefetch_ios;
-
- nr = sbi->s_mb_prefetch;
- if (ext4_has_feature_flex_bg(sb)) {
- nr = 1 << sbi->s_log_groups_per_flex;
- nr -= group & (nr - 1);
- nr = min(nr, sbi->s_mb_prefetch);
- }
- prefetch_grp = ext4_mb_prefetch(sb, group,
- nr, &prefetch_ios);
- if (prefetch_ios == curr_ios)
- nr = 0;
- }
-
- /* This now checks without needing the buddy page */
- ret = ext4_mb_good_group_nolock(ac, group, cr);
- if (ret <= 0) {
- if (!first_err)
- first_err = ret;
- continue;
- }
-
- err = ext4_mb_load_buddy(sb, group, &e4b);
- if (err)
- goto out;
-
- ext4_lock_group(sb, group);
+ ac->ac_criteria = CR_GOAL_LEN_FAST;
+ if (ac->ac_2order)
+ ac->ac_criteria = CR_POWER2_ALIGNED;
- /*
- * We need to check again after locking the
- * block group
- */
- ret = ext4_mb_good_group(ac, group, cr);
- if (ret == 0) {
- ext4_unlock_group(sb, group);
- ext4_mb_unload_buddy(&e4b);
- continue;
- }
-
- ac->ac_groups_scanned++;
- if (cr == 0)
- ext4_mb_simple_scan_group(ac, &e4b);
- else if (cr == 1 && sbi->s_stripe &&
- !(ac->ac_g_ex.fe_len % sbi->s_stripe))
- ext4_mb_scan_aligned(ac, &e4b);
- else
- ext4_mb_complex_scan_group(ac, &e4b);
-
- ext4_unlock_group(sb, group);
- ext4_mb_unload_buddy(&e4b);
+ ac->ac_e4b = &e4b;
+ ac->ac_prefetch_ios = 0;
+ ac->ac_first_err = 0;
+repeat:
+ while (ac->ac_criteria < EXT4_MB_NUM_CRS) {
+ err = ext4_mb_scan_groups(ac);
+ if (err)
+ goto out;
- if (ac->ac_status != AC_STATUS_CONTINUE)
- break;
- }
- /* Processed all groups and haven't found blocks */
- if (sbi->s_mb_stats && i == ngroups)
- atomic64_inc(&sbi->s_bal_cX_failed[cr]);
+ if (ac->ac_status != AC_STATUS_CONTINUE)
+ break;
}
if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
@@ -2796,6 +3059,8 @@ repeat:
*/
ext4_mb_try_best_found(ac, &e4b);
if (ac->ac_status != AC_STATUS_FOUND) {
+ int lost;
+
/*
* Someone more lucky has already allocated it.
* The only thing we can do is just take first
@@ -2811,30 +3076,34 @@ repeat:
ac->ac_b_ex.fe_len = 0;
ac->ac_status = AC_STATUS_CONTINUE;
ac->ac_flags |= EXT4_MB_HINT_FIRST;
- cr = 3;
+ ac->ac_criteria = CR_ANY_FREE;
goto repeat;
}
}
- if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
+ if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) {
atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
+ if (ac->ac_flags & EXT4_MB_STREAM_ALLOC &&
+ ac->ac_b_ex.fe_group == ac->ac_g_ex.fe_group)
+ atomic_inc(&sbi->s_bal_stream_goals);
+ }
out:
- if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
- err = first_err;
+ if (!err && ac->ac_status != AC_STATUS_FOUND && ac->ac_first_err)
+ err = ac->ac_first_err;
mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
- ac->ac_flags, cr, err);
+ ac->ac_flags, ac->ac_criteria, err);
- if (nr)
- ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
+ if (ac->ac_prefetch_nr)
+ ext4_mb_prefetch_fini(sb, ac->ac_prefetch_grp, ac->ac_prefetch_nr);
return err;
}
static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
ext4_group_t group;
if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
@@ -2845,7 +3114,7 @@ static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
ext4_group_t group;
++*pos;
@@ -2857,19 +3126,17 @@ static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
ext4_group_t group = (ext4_group_t) ((unsigned long) v);
- int i;
- int err, buddy_loaded = 0;
+ int i, err;
+ char nbuf[16];
struct ext4_buddy e4b;
struct ext4_group_info *grinfo;
unsigned char blocksize_bits = min_t(unsigned char,
sb->s_blocksize_bits,
EXT4_MAX_BLOCK_LOG_SIZE);
- struct sg {
- struct ext4_group_info info;
- ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
- } sg;
+ DEFINE_RAW_FLEX(struct ext4_group_info, sg, bb_counters,
+ EXT4_MAX_BLOCK_LOG_SIZE + 2);
group--;
if (group == 0)
@@ -2877,32 +3144,36 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
" 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
" 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
- i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
+ i = (blocksize_bits + 2) * sizeof(sg->bb_counters[0]) +
sizeof(struct ext4_group_info);
grinfo = ext4_get_group_info(sb, group);
+ if (!grinfo)
+ return 0;
/* Load the group info in memory only if not already loaded. */
if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
err = ext4_mb_load_buddy(sb, group, &e4b);
if (err) {
- seq_printf(seq, "#%-5u: I/O error\n", group);
+ seq_printf(seq, "#%-5u: %s\n", group, ext4_decode_error(NULL, err, nbuf));
return 0;
}
- buddy_loaded = 1;
- }
-
- memcpy(&sg, ext4_get_group_info(sb, group), i);
-
- if (buddy_loaded)
ext4_mb_unload_buddy(&e4b);
+ }
- seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
- sg.info.bb_fragments, sg.info.bb_first_free);
+ /*
+ * We care only about free space counters in the group info and
+ * these are safe to access even after the buddy has been unloaded
+ */
+ memcpy(sg, grinfo, i);
+ seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg->bb_free,
+ sg->bb_fragments, sg->bb_first_free);
for (i = 0; i <= 13; i++)
seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
- sg.info.bb_counters[i] : 0);
- seq_puts(seq, " ]\n");
-
+ sg->bb_counters[i] : 0);
+ seq_puts(seq, " ]");
+ if (EXT4_MB_GRP_BBITMAP_CORRUPT(sg))
+ seq_puts(seq, " Block bitmap corrupted!");
+ seq_putc(seq, '\n');
return 0;
}
@@ -2919,57 +3190,96 @@ const struct seq_operations ext4_mb_seq_groups_ops = {
int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
{
- struct super_block *sb = (struct super_block *)seq->private;
+ struct super_block *sb = seq->private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
seq_puts(seq, "mballoc:\n");
if (!sbi->s_mb_stats) {
seq_puts(seq, "\tmb stats collection turned off.\n");
- seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
+ seq_puts(
+ seq,
+ "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
return 0;
}
seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
- seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned));
-
- seq_puts(seq, "\tcr0_stats:\n");
- seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0]));
- seq_printf(seq, "\t\tgroups_considered: %llu\n",
- atomic64_read(&sbi->s_bal_cX_groups_considered[0]));
+ seq_printf(seq, "\tgroups_scanned: %u\n",
+ atomic_read(&sbi->s_bal_groups_scanned));
+
+ /* CR_POWER2_ALIGNED stats */
+ seq_puts(seq, "\tcr_p2_aligned_stats:\n");
+ seq_printf(seq, "\t\thits: %llu\n",
+ atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED]));
+ seq_printf(
+ seq, "\t\tgroups_considered: %llu\n",
+ atomic64_read(
+ &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]));
+ seq_printf(seq, "\t\textents_scanned: %u\n",
+ atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED]));
seq_printf(seq, "\t\tuseless_loops: %llu\n",
- atomic64_read(&sbi->s_bal_cX_failed[0]));
- seq_printf(seq, "\t\tbad_suggestions: %u\n",
- atomic_read(&sbi->s_bal_cr0_bad_suggestions));
+ atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED]));
- seq_puts(seq, "\tcr1_stats:\n");
- seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1]));
+ /* CR_GOAL_LEN_FAST stats */
+ seq_puts(seq, "\tcr_goal_fast_stats:\n");
+ seq_printf(seq, "\t\thits: %llu\n",
+ atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST]));
seq_printf(seq, "\t\tgroups_considered: %llu\n",
- atomic64_read(&sbi->s_bal_cX_groups_considered[1]));
+ atomic64_read(
+ &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST]));
+ seq_printf(seq, "\t\textents_scanned: %u\n",
+ atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST]));
seq_printf(seq, "\t\tuseless_loops: %llu\n",
- atomic64_read(&sbi->s_bal_cX_failed[1]));
- seq_printf(seq, "\t\tbad_suggestions: %u\n",
- atomic_read(&sbi->s_bal_cr1_bad_suggestions));
-
- seq_puts(seq, "\tcr2_stats:\n");
- seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2]));
- seq_printf(seq, "\t\tgroups_considered: %llu\n",
- atomic64_read(&sbi->s_bal_cX_groups_considered[2]));
+ atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST]));
+
+ /* CR_BEST_AVAIL_LEN stats */
+ seq_puts(seq, "\tcr_best_avail_stats:\n");
+ seq_printf(seq, "\t\thits: %llu\n",
+ atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN]));
+ seq_printf(
+ seq, "\t\tgroups_considered: %llu\n",
+ atomic64_read(
+ &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN]));
+ seq_printf(seq, "\t\textents_scanned: %u\n",
+ atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN]));
seq_printf(seq, "\t\tuseless_loops: %llu\n",
- atomic64_read(&sbi->s_bal_cX_failed[2]));
+ atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN]));
- seq_puts(seq, "\tcr3_stats:\n");
- seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3]));
+ /* CR_GOAL_LEN_SLOW stats */
+ seq_puts(seq, "\tcr_goal_slow_stats:\n");
+ seq_printf(seq, "\t\thits: %llu\n",
+ atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW]));
seq_printf(seq, "\t\tgroups_considered: %llu\n",
- atomic64_read(&sbi->s_bal_cX_groups_considered[3]));
+ atomic64_read(
+ &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW]));
+ seq_printf(seq, "\t\textents_scanned: %u\n",
+ atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW]));
+ seq_printf(seq, "\t\tuseless_loops: %llu\n",
+ atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW]));
+
+ /* CR_ANY_FREE stats */
+ seq_puts(seq, "\tcr_any_free_stats:\n");
+ seq_printf(seq, "\t\thits: %llu\n",
+ atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE]));
+ seq_printf(
+ seq, "\t\tgroups_considered: %llu\n",
+ atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE]));
+ seq_printf(seq, "\t\textents_scanned: %u\n",
+ atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE]));
seq_printf(seq, "\t\tuseless_loops: %llu\n",
- atomic64_read(&sbi->s_bal_cX_failed[3]));
- seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned));
+ atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE]));
+
+ /* Aggregates */
+ seq_printf(seq, "\textents_scanned: %u\n",
+ atomic_read(&sbi->s_bal_ex_scanned));
seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
+ seq_printf(seq, "\t\tstream_goal_hits: %u\n",
+ atomic_read(&sbi->s_bal_stream_goals));
+ seq_printf(seq, "\t\tlen_goal_hits: %u\n",
+ atomic_read(&sbi->s_bal_len_goals));
seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
-
seq_printf(seq, "\tbuddies_generated: %u/%u\n",
atomic_read(&sbi->s_mb_buddies_generated),
ext4_get_groups_count(sb));
@@ -2977,20 +3287,16 @@ int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
atomic64_read(&sbi->s_mb_generation_time));
seq_printf(seq, "\tpreallocated: %u\n",
atomic_read(&sbi->s_mb_preallocated));
- seq_printf(seq, "\tdiscarded: %u\n",
- atomic_read(&sbi->s_mb_discarded));
+ seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded));
return 0;
}
static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
-__acquires(&EXT4_SB(sb)->s_mb_rb_lock)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
unsigned long position;
- read_lock(&EXT4_SB(sb)->s_mb_rb_lock);
-
- if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
+ if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
return NULL;
position = *pos + 1;
return (void *) ((unsigned long) position);
@@ -2998,11 +3304,11 @@ __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
unsigned long position;
++*pos;
- if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
+ if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
return NULL;
position = *pos + 1;
return (void *) ((unsigned long) position);
@@ -3010,33 +3316,24 @@ static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, lof
static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
struct ext4_sb_info *sbi = EXT4_SB(sb);
unsigned long position = ((unsigned long) v);
struct ext4_group_info *grp;
- struct rb_node *n;
- unsigned int count, min, max;
+ unsigned int count;
+ unsigned long idx;
position--;
if (position >= MB_NUM_ORDERS(sb)) {
- seq_puts(seq, "fragment_size_tree:\n");
- n = rb_first(&sbi->s_mb_avg_fragment_size_root);
- if (!n) {
- seq_puts(seq, "\ttree_min: 0\n\ttree_max: 0\n\ttree_nodes: 0\n");
- return 0;
- }
- grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
- min = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
- count = 1;
- while (rb_next(n)) {
- count++;
- n = rb_next(n);
- }
- grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
- max = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
+ position -= MB_NUM_ORDERS(sb);
+ if (position == 0)
+ seq_puts(seq, "avg_fragment_size_lists:\n");
- seq_printf(seq, "\ttree_min: %u\n\ttree_max: %u\n\ttree_nodes: %u\n",
- min, max, count);
+ count = 0;
+ xa_for_each(&sbi->s_mb_avg_fragment_size[position], idx, grp)
+ count++;
+ seq_printf(seq, "\tlist_order_%u_groups: %u\n",
+ (unsigned int)position, count);
return 0;
}
@@ -3046,8 +3343,7 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
seq_puts(seq, "max_free_order_lists:\n");
}
count = 0;
- list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
- bb_largest_free_order_node)
+ xa_for_each(&sbi->s_mb_largest_free_orders[position], idx, grp)
count++;
seq_printf(seq, "\tlist_order_%u_groups: %u\n",
(unsigned int)position, count);
@@ -3056,11 +3352,7 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
}
static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
-__releases(&EXT4_SB(sb)->s_mb_rb_lock)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
-
- read_unlock(&EXT4_SB(sb)->s_mb_rb_lock);
}
const struct seq_operations ext4_mb_seq_structs_summary_ops = {
@@ -3138,7 +3430,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
if (meta_group_info == NULL) {
ext4_msg(sb, KERN_ERR, "can't allocate mem "
"for a buddy group");
- goto exit_meta_group_info;
+ return -ENOMEM;
}
rcu_read_lock();
rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
@@ -3172,9 +3464,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
init_rwsem(&meta_group_info[i]->alloc_sem);
meta_group_info[i]->bb_free_root = RB_ROOT;
- INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
- RB_CLEAR_NODE(&meta_group_info[i]->bb_avg_fragment_size_rb);
meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
+ meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */
meta_group_info[i]->bb_group = group;
mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
@@ -3191,7 +3482,6 @@ exit_group_info:
group_info[idx] = NULL;
rcu_read_unlock();
}
-exit_meta_group_info:
return -ENOMEM;
} /* ext4_mb_add_groupinfo */
@@ -3220,6 +3510,8 @@ static int ext4_mb_init_backend(struct super_block *sb)
* this will avoid confusion if it ever shows up during debugging. */
sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
+ ext4_set_inode_mapping_order(sbi->s_buddy_cache);
+
for (i = 0; i < ngroups; i++) {
cond_resched();
desc = ext4_get_group_desc(sb, i, NULL);
@@ -3248,10 +3540,11 @@ static int ext4_mb_init_backend(struct super_block *sb)
}
if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
sbi->s_mb_prefetch = ext4_get_groups_count(sb);
- /* now many real IOs to prefetch within a single allocation at cr=0
- * given cr=0 is an CPU-related optimization we shouldn't try to
- * load too many groups, at some point we should start to use what
- * we've got in memory.
+ /*
+ * now many real IOs to prefetch within a single allocation at
+ * CR_POWER2_ALIGNED. Given CR_POWER2_ALIGNED is an CPU-related
+ * optimization we shouldn't try to load too many groups, at some point
+ * we should start to use what we've got in memory.
* with an average random access time 5ms, it'd take a second to get
* 200 groups (* N with flex_bg), so let's make this limit 4
*/
@@ -3263,8 +3556,12 @@ static int ext4_mb_init_backend(struct super_block *sb)
err_freebuddy:
cachep = get_groupinfo_cache(sb->s_blocksize_bits);
- while (i-- > 0)
- kmem_cache_free(cachep, ext4_get_group_info(sb, i));
+ while (i-- > 0) {
+ struct ext4_group_info *grp = ext4_get_group_info(sb, i);
+
+ if (grp)
+ kmem_cache_free(cachep, grp);
+ }
i = sbi->s_group_info_size;
rcu_read_lock();
group_info = rcu_dereference(sbi->s_group_info);
@@ -3335,11 +3632,10 @@ static void ext4_discard_work(struct work_struct *work)
struct super_block *sb = sbi->s_sb;
struct ext4_free_data *fd, *nfd;
struct ext4_buddy e4b;
- struct list_head discard_list;
+ LIST_HEAD(discard_list);
ext4_group_t grp, load_grp;
int err = 0;
- INIT_LIST_HEAD(&discard_list);
spin_lock(&sbi->s_md_lock);
list_splice_init(&sbi->s_discard_list, &discard_list);
spin_unlock(&sbi->s_md_lock);
@@ -3379,6 +3675,30 @@ static void ext4_discard_work(struct work_struct *work)
ext4_mb_unload_buddy(&e4b);
}
+static inline void ext4_mb_avg_fragment_size_destroy(struct ext4_sb_info *sbi)
+{
+ if (!sbi->s_mb_avg_fragment_size)
+ return;
+
+ for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++)
+ xa_destroy(&sbi->s_mb_avg_fragment_size[i]);
+
+ kfree(sbi->s_mb_avg_fragment_size);
+ sbi->s_mb_avg_fragment_size = NULL;
+}
+
+static inline void ext4_mb_largest_free_orders_destroy(struct ext4_sb_info *sbi)
+{
+ if (!sbi->s_mb_largest_free_orders)
+ return;
+
+ for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++)
+ xa_destroy(&sbi->s_mb_largest_free_orders[i]);
+
+ kfree(sbi->s_mb_largest_free_orders);
+ sbi->s_mb_largest_free_orders = NULL;
+}
+
int ext4_mb_init(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -3423,30 +3743,30 @@ int ext4_mb_init(struct super_block *sb)
i++;
} while (i < MB_NUM_ORDERS(sb));
- sbi->s_mb_avg_fragment_size_root = RB_ROOT;
- sbi->s_mb_largest_free_orders =
- kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
+ sbi->s_mb_avg_fragment_size =
+ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct xarray),
GFP_KERNEL);
- if (!sbi->s_mb_largest_free_orders) {
+ if (!sbi->s_mb_avg_fragment_size) {
ret = -ENOMEM;
goto out;
}
- sbi->s_mb_largest_free_orders_locks =
- kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
+ for (i = 0; i < MB_NUM_ORDERS(sb); i++)
+ xa_init(&sbi->s_mb_avg_fragment_size[i]);
+
+ sbi->s_mb_largest_free_orders =
+ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct xarray),
GFP_KERNEL);
- if (!sbi->s_mb_largest_free_orders_locks) {
+ if (!sbi->s_mb_largest_free_orders) {
ret = -ENOMEM;
goto out;
}
- for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
- INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
- rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
- }
- rwlock_init(&sbi->s_mb_rb_lock);
+ for (i = 0; i < MB_NUM_ORDERS(sb); i++)
+ xa_init(&sbi->s_mb_largest_free_orders[i]);
spin_lock_init(&sbi->s_md_lock);
- sbi->s_mb_free_pending = 0;
- INIT_LIST_HEAD(&sbi->s_freed_data_list);
+ atomic_set(&sbi->s_mb_free_pending, 0);
+ INIT_LIST_HEAD(&sbi->s_freed_data_list[0]);
+ INIT_LIST_HEAD(&sbi->s_freed_data_list[1]);
INIT_LIST_HEAD(&sbi->s_discard_list);
INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
atomic_set(&sbi->s_retry_alloc_pending, 0);
@@ -3456,7 +3776,8 @@ int ext4_mb_init(struct super_block *sb)
sbi->s_mb_stats = MB_DEFAULT_STATS;
sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
- sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
+ sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER;
+
/*
* The default group preallocation is 512, which for 4k block
* sizes translates to 2 megabytes. However for bigalloc file
@@ -3481,13 +3802,22 @@ int ext4_mb_init(struct super_block *sb)
*/
if (sbi->s_stripe > 1) {
sbi->s_mb_group_prealloc = roundup(
- sbi->s_mb_group_prealloc, sbi->s_stripe);
+ sbi->s_mb_group_prealloc, EXT4_NUM_B2C(sbi, sbi->s_stripe));
+ }
+
+ sbi->s_mb_nr_global_goals = umin(num_possible_cpus(),
+ DIV_ROUND_UP(sbi->s_groups_count, 4));
+ sbi->s_mb_last_groups = kcalloc(sbi->s_mb_nr_global_goals,
+ sizeof(ext4_group_t), GFP_KERNEL);
+ if (sbi->s_mb_last_groups == NULL) {
+ ret = -ENOMEM;
+ goto out;
}
sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
if (sbi->s_locality_groups == NULL) {
ret = -ENOMEM;
- goto out;
+ goto out_free_last_groups;
}
for_each_possible_cpu(i) {
struct ext4_locality_group *lg;
@@ -3498,7 +3828,7 @@ int ext4_mb_init(struct super_block *sb)
spin_lock_init(&lg->lg_prealloc_lock);
}
- if (blk_queue_nonrot(bdev_get_queue(sb->s_bdev)))
+ if (bdev_nonrot(sb->s_bdev))
sbi->s_mb_max_linear_groups = 0;
else
sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
@@ -3512,9 +3842,12 @@ int ext4_mb_init(struct super_block *sb)
out_free_locality_groups:
free_percpu(sbi->s_locality_groups);
sbi->s_locality_groups = NULL;
+out_free_last_groups:
+ kfree(sbi->s_mb_last_groups);
+ sbi->s_mb_last_groups = NULL;
out:
- kfree(sbi->s_mb_largest_free_orders);
- kfree(sbi->s_mb_largest_free_orders_locks);
+ ext4_mb_avg_fragment_size_destroy(sbi);
+ ext4_mb_largest_free_orders_destroy(sbi);
kfree(sbi->s_mb_offsets);
sbi->s_mb_offsets = NULL;
kfree(sbi->s_mb_maxs);
@@ -3538,7 +3871,7 @@ static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
return count;
}
-int ext4_mb_release(struct super_block *sb)
+void ext4_mb_release(struct super_block *sb)
{
ext4_group_t ngroups = ext4_get_groups_count(sb);
ext4_group_t i;
@@ -3560,6 +3893,8 @@ int ext4_mb_release(struct super_block *sb)
for (i = 0; i < ngroups; i++) {
cond_resched();
grinfo = ext4_get_group_info(sb, i);
+ if (!grinfo)
+ continue;
mb_group_bb_bitmap_free(grinfo);
ext4_lock_group(sb, i);
count = ext4_mb_cleanup_pa(grinfo);
@@ -3579,8 +3914,8 @@ int ext4_mb_release(struct super_block *sb)
kvfree(group_info);
rcu_read_unlock();
}
- kfree(sbi->s_mb_largest_free_orders);
- kfree(sbi->s_mb_largest_free_orders_locks);
+ ext4_mb_avg_fragment_size_destroy(sbi);
+ ext4_mb_largest_free_orders_destroy(sbi);
kfree(sbi->s_mb_offsets);
kfree(sbi->s_mb_maxs);
iput(sbi->s_buddy_cache);
@@ -3610,13 +3945,11 @@ int ext4_mb_release(struct super_block *sb)
}
free_percpu(sbi->s_locality_groups);
-
- return 0;
+ kfree(sbi->s_mb_last_groups);
}
static inline int ext4_issue_discard(struct super_block *sb,
- ext4_group_t block_group, ext4_grpblk_t cluster, int count,
- struct bio **biop)
+ ext4_group_t block_group, ext4_grpblk_t cluster, int count)
{
ext4_fsblk_t discard_block;
@@ -3625,13 +3958,8 @@ static inline int ext4_issue_discard(struct super_block *sb,
count = EXT4_C2B(EXT4_SB(sb), count);
trace_ext4_discard_blocks(sb,
(unsigned long long) discard_block, count);
- if (biop) {
- return __blkdev_issue_discard(sb->s_bdev,
- (sector_t)discard_block << (sb->s_blocksize_bits - 9),
- (sector_t)count << (sb->s_blocksize_bits - 9),
- GFP_NOFS, 0, biop);
- } else
- return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
+
+ return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
}
static void ext4_free_data_in_buddy(struct super_block *sb,
@@ -3639,7 +3967,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
{
struct ext4_buddy e4b;
struct ext4_group_info *db;
- int err, count = 0, count2 = 0;
+ int err, count = 0;
mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
entry->efd_count, entry->efd_group, entry);
@@ -3648,14 +3976,10 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
/* we expect to find existing buddy because it's pinned */
BUG_ON(err != 0);
- spin_lock(&EXT4_SB(sb)->s_md_lock);
- EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
- spin_unlock(&EXT4_SB(sb)->s_md_lock);
-
+ atomic_sub(entry->efd_count, &EXT4_SB(sb)->s_mb_free_pending);
db = e4b.bd_info;
/* there are blocks to put in buddy to make them really free */
count += entry->efd_count;
- count2++;
ext4_lock_group(sb, entry->efd_group);
/* Take it out of per group rb tree */
rb_erase(&entry->efd_node, &(db->bb_free_root));
@@ -3664,24 +3988,20 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
/*
* Clear the trimmed flag for the group so that the next
* ext4_trim_fs can trim it.
- * If the volume is mounted with -o discard, online discard
- * is supported and the free blocks will be trimmed online.
*/
- if (!test_opt(sb, DISCARD))
- EXT4_MB_GRP_CLEAR_TRIMMED(db);
+ EXT4_MB_GRP_CLEAR_TRIMMED(db);
if (!db->bb_free_root.rb_node) {
/* No more items in the per group rb tree
* balance refcounts from ext4_mb_free_metadata()
*/
- put_page(e4b.bd_buddy_page);
- put_page(e4b.bd_bitmap_page);
+ folio_put(e4b.bd_buddy_folio);
+ folio_put(e4b.bd_bitmap_folio);
}
ext4_unlock_group(sb, entry->efd_group);
ext4_mb_unload_buddy(&e4b);
- mb_debug(sb, "freed %d blocks in %d structures\n", count,
- count2);
+ mb_debug(sb, "freed %d blocks in 1 structures\n", count);
}
/*
@@ -3692,22 +4012,11 @@ void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_free_data *entry, *tmp;
- struct list_head freed_data_list;
- struct list_head *cut_pos = NULL;
+ LIST_HEAD(freed_data_list);
+ struct list_head *s_freed_head = &sbi->s_freed_data_list[commit_tid & 1];
bool wake;
- INIT_LIST_HEAD(&freed_data_list);
-
- spin_lock(&sbi->s_md_lock);
- list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
- if (entry->efd_tid != commit_tid)
- break;
- cut_pos = &entry->efd_list;
- }
- if (cut_pos)
- list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
- cut_pos);
- spin_unlock(&sbi->s_md_lock);
+ list_replace_init(s_freed_head, &freed_data_list);
list_for_each_entry(entry, &freed_data_list, efd_list)
ext4_free_data_in_buddy(sb, entry);
@@ -3718,7 +4027,7 @@ void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
list_splice_tail(&freed_data_list, &sbi->s_discard_list);
spin_unlock(&sbi->s_md_lock);
if (wake)
- queue_work(system_unbound_wq, &sbi->s_discard_work);
+ queue_work(system_dfl_wq, &sbi->s_discard_work);
} else {
list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
kmem_cache_free(ext4_free_data_cachep, entry);
@@ -3765,6 +4074,111 @@ void ext4_exit_mballoc(void)
ext4_groupinfo_destroy_slabs();
}
+#define EXT4_MB_BITMAP_MARKED_CHECK 0x0001
+#define EXT4_MB_SYNC_UPDATE 0x0002
+static int
+ext4_mb_mark_context(handle_t *handle, struct super_block *sb, bool state,
+ ext4_group_t group, ext4_grpblk_t blkoff,
+ ext4_grpblk_t len, int flags, ext4_grpblk_t *ret_changed)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct buffer_head *bitmap_bh = NULL;
+ struct ext4_group_desc *gdp;
+ struct buffer_head *gdp_bh;
+ int err;
+ unsigned int i, already, changed = len;
+
+ KUNIT_STATIC_STUB_REDIRECT(ext4_mb_mark_context,
+ handle, sb, state, group, blkoff, len,
+ flags, ret_changed);
+
+ if (ret_changed)
+ *ret_changed = 0;
+ bitmap_bh = ext4_read_block_bitmap(sb, group);
+ if (IS_ERR(bitmap_bh))
+ return PTR_ERR(bitmap_bh);
+
+ if (handle) {
+ BUFFER_TRACE(bitmap_bh, "getting write access");
+ err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
+ EXT4_JTR_NONE);
+ if (err)
+ goto out_err;
+ }
+
+ err = -EIO;
+ gdp = ext4_get_group_desc(sb, group, &gdp_bh);
+ if (!gdp)
+ goto out_err;
+
+ if (handle) {
+ BUFFER_TRACE(gdp_bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, sb, gdp_bh,
+ EXT4_JTR_NONE);
+ if (err)
+ goto out_err;
+ }
+
+ ext4_lock_group(sb, group);
+ if (ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
+ gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
+ ext4_free_group_clusters_set(sb, gdp,
+ ext4_free_clusters_after_init(sb, group, gdp));
+ }
+
+ if (flags & EXT4_MB_BITMAP_MARKED_CHECK) {
+ already = 0;
+ for (i = 0; i < len; i++)
+ if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
+ state)
+ already++;
+ changed = len - already;
+ }
+
+ if (state) {
+ mb_set_bits(bitmap_bh->b_data, blkoff, len);
+ ext4_free_group_clusters_set(sb, gdp,
+ ext4_free_group_clusters(sb, gdp) - changed);
+ } else {
+ mb_clear_bits(bitmap_bh->b_data, blkoff, len);
+ ext4_free_group_clusters_set(sb, gdp,
+ ext4_free_group_clusters(sb, gdp) + changed);
+ }
+
+ ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
+ ext4_group_desc_csum_set(sb, group, gdp);
+ ext4_unlock_group(sb, group);
+ if (ret_changed)
+ *ret_changed = changed;
+
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi, group);
+ struct flex_groups *fg = sbi_array_rcu_deref(sbi,
+ s_flex_groups, flex_group);
+
+ if (state)
+ atomic64_sub(changed, &fg->free_clusters);
+ else
+ atomic64_add(changed, &fg->free_clusters);
+ }
+
+ err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+ if (err)
+ goto out_err;
+ err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
+ if (err)
+ goto out_err;
+
+ if (flags & EXT4_MB_SYNC_UPDATE) {
+ sync_dirty_buffer(bitmap_bh);
+ sync_dirty_buffer(gdp_bh);
+ }
+
+out_err:
+ brelse(bitmap_bh);
+ return err;
+}
/*
* Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
@@ -3774,13 +4188,13 @@ static noinline_for_stack int
ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
handle_t *handle, unsigned int reserv_clstrs)
{
- struct buffer_head *bitmap_bh = NULL;
struct ext4_group_desc *gdp;
- struct buffer_head *gdp_bh;
struct ext4_sb_info *sbi;
struct super_block *sb;
ext4_fsblk_t block;
int err, len;
+ int flags = 0;
+ ext4_grpblk_t changed;
BUG_ON(ac->ac_status != AC_STATUS_FOUND);
BUG_ON(ac->ac_b_ex.fe_len <= 0);
@@ -3788,34 +4202,13 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
sb = ac->ac_sb;
sbi = EXT4_SB(sb);
- bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
- if (IS_ERR(bitmap_bh)) {
- err = PTR_ERR(bitmap_bh);
- bitmap_bh = NULL;
- goto out_err;
- }
-
- BUFFER_TRACE(bitmap_bh, "getting write access");
- err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
- EXT4_JTR_NONE);
- if (err)
- goto out_err;
-
- err = -EIO;
- gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
+ gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL);
if (!gdp)
- goto out_err;
-
+ return -EIO;
ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
ext4_free_group_clusters(sb, gdp));
- BUFFER_TRACE(gdp_bh, "get_write_access");
- err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE);
- if (err)
- goto out_err;
-
block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
-
len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
@@ -3824,41 +4217,29 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
* Fix the bitmap and return EFSCORRUPTED
* We leak some of the blocks here.
*/
- ext4_lock_group(sb, ac->ac_b_ex.fe_group);
- ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
- ac->ac_b_ex.fe_len);
- ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
- err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+ err = ext4_mb_mark_context(handle, sb, true,
+ ac->ac_b_ex.fe_group,
+ ac->ac_b_ex.fe_start,
+ ac->ac_b_ex.fe_len,
+ 0, NULL);
if (!err)
err = -EFSCORRUPTED;
- goto out_err;
+ return err;
}
- ext4_lock_group(sb, ac->ac_b_ex.fe_group);
#ifdef AGGRESSIVE_CHECK
- {
- int i;
- for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
- BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
- bitmap_bh->b_data));
- }
- }
+ flags |= EXT4_MB_BITMAP_MARKED_CHECK;
#endif
- ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
- ac->ac_b_ex.fe_len);
- if (ext4_has_group_desc_csum(sb) &&
- (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
- gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
- ext4_free_group_clusters_set(sb, gdp,
- ext4_free_clusters_after_init(sb,
- ac->ac_b_ex.fe_group, gdp));
- }
- len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
- ext4_free_group_clusters_set(sb, gdp, len);
- ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
- ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
+ err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group,
+ ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len,
+ flags, &changed);
+
+ if (err && changed == 0)
+ return err;
- ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+#ifdef AGGRESSIVE_CHECK
+ BUG_ON(changed != ac->ac_b_ex.fe_len);
+#endif
percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
/*
* Now reduce the dirty block count also. Should not go negative
@@ -3868,21 +4249,6 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
percpu_counter_sub(&sbi->s_dirtyclusters_counter,
reserv_clstrs);
- if (sbi->s_log_groups_per_flex) {
- ext4_group_t flex_group = ext4_flex_group(sbi,
- ac->ac_b_ex.fe_group);
- atomic64_sub(ac->ac_b_ex.fe_len,
- &sbi_array_rcu_deref(sbi, s_flex_groups,
- flex_group)->free_clusters);
- }
-
- err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
- if (err)
- goto out_err;
- err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
-
-out_err:
- brelse(bitmap_bh);
return err;
}
@@ -3891,77 +4257,48 @@ out_err:
* blocks in bitmaps and update counters.
*/
void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
- int len, int state)
+ int len, bool state)
{
- struct buffer_head *bitmap_bh = NULL;
- struct ext4_group_desc *gdp;
- struct buffer_head *gdp_bh;
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_group_t group;
ext4_grpblk_t blkoff;
- int i, clen, err;
- int already;
-
- clen = EXT4_B2C(sbi, len);
-
- ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
- bitmap_bh = ext4_read_block_bitmap(sb, group);
- if (IS_ERR(bitmap_bh)) {
- err = PTR_ERR(bitmap_bh);
- bitmap_bh = NULL;
- goto out_err;
- }
-
- err = -EIO;
- gdp = ext4_get_group_desc(sb, group, &gdp_bh);
- if (!gdp)
- goto out_err;
-
- ext4_lock_group(sb, group);
- already = 0;
- for (i = 0; i < clen; i++)
- if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == !state)
- already++;
-
- if (state)
- ext4_set_bits(bitmap_bh->b_data, blkoff, clen);
- else
- mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen);
- if (ext4_has_group_desc_csum(sb) &&
- (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
- gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
- ext4_free_group_clusters_set(sb, gdp,
- ext4_free_clusters_after_init(sb,
- group, gdp));
- }
- if (state)
- clen = ext4_free_group_clusters(sb, gdp) - clen + already;
- else
- clen = ext4_free_group_clusters(sb, gdp) + clen - already;
+ int err = 0;
+ unsigned int clen, thisgrp_len;
- ext4_free_group_clusters_set(sb, gdp, clen);
- ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
- ext4_group_desc_csum_set(sb, group, gdp);
+ while (len > 0) {
+ ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
- ext4_unlock_group(sb, group);
+ /*
+ * Check to see if we are freeing blocks across a group
+ * boundary.
+ * In case of flex_bg, this can happen that (block, len) may
+ * span across more than one group. In that case we need to
+ * get the corresponding group metadata to work with.
+ * For this we have goto again loop.
+ */
+ thisgrp_len = min_t(unsigned int, (unsigned int)len,
+ EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
+ clen = EXT4_NUM_B2C(sbi, thisgrp_len);
+
+ if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
+ ext4_error(sb, "Marking blocks in system zone - "
+ "Block = %llu, len = %u",
+ block, thisgrp_len);
+ break;
+ }
- if (sbi->s_log_groups_per_flex) {
- ext4_group_t flex_group = ext4_flex_group(sbi, group);
+ err = ext4_mb_mark_context(NULL, sb, state,
+ group, blkoff, clen,
+ EXT4_MB_BITMAP_MARKED_CHECK |
+ EXT4_MB_SYNC_UPDATE,
+ NULL);
+ if (err)
+ break;
- atomic64_sub(len,
- &sbi_array_rcu_deref(sbi, s_flex_groups,
- flex_group)->free_clusters);
+ block += thisgrp_len;
+ len -= thisgrp_len;
+ BUG_ON(len < 0);
}
-
- err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
- if (err)
- goto out_err;
- sync_dirty_buffer(bitmap_bh);
- err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
- sync_dirty_buffer(gdp_bh);
-
-out_err:
- brelse(bitmap_bh);
}
/*
@@ -3984,6 +4321,197 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
}
/*
+ * This function returns the next element to look at during inode
+ * PA rbtree walk. We assume that we have held the inode PA rbtree lock
+ * (ei->i_prealloc_lock)
+ *
+ * new_start The start of the range we want to compare
+ * cur_start The existing start that we are comparing against
+ * node The node of the rb_tree
+ */
+static inline struct rb_node*
+ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node)
+{
+ if (new_start < cur_start)
+ return node->rb_left;
+ else
+ return node->rb_right;
+}
+
+static inline void
+ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
+ ext4_lblk_t start, loff_t end)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
+ struct ext4_prealloc_space *tmp_pa;
+ ext4_lblk_t tmp_pa_start;
+ loff_t tmp_pa_end;
+ struct rb_node *iter;
+
+ read_lock(&ei->i_prealloc_lock);
+ for (iter = ei->i_prealloc_node.rb_node; iter;
+ iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) {
+ tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
+ pa_node.inode_node);
+ tmp_pa_start = tmp_pa->pa_lstart;
+ tmp_pa_end = pa_logical_end(sbi, tmp_pa);
+
+ spin_lock(&tmp_pa->pa_lock);
+ if (tmp_pa->pa_deleted == 0)
+ BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start));
+ spin_unlock(&tmp_pa->pa_lock);
+ }
+ read_unlock(&ei->i_prealloc_lock);
+}
+
+/*
+ * Given an allocation context "ac" and a range "start", "end", check
+ * and adjust boundaries if the range overlaps with any of the existing
+ * preallocatoins stored in the corresponding inode of the allocation context.
+ *
+ * Parameters:
+ * ac allocation context
+ * start start of the new range
+ * end end of the new range
+ */
+static inline void
+ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
+ ext4_lblk_t *start, loff_t *end)
+{
+ struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
+ struct rb_node *iter;
+ ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
+ loff_t new_end, tmp_pa_end, left_pa_end = -1;
+
+ new_start = *start;
+ new_end = *end;
+
+ /*
+ * Adjust the normalized range so that it doesn't overlap with any
+ * existing preallocated blocks(PAs). Make sure to hold the rbtree lock
+ * so it doesn't change underneath us.
+ */
+ read_lock(&ei->i_prealloc_lock);
+
+ /* Step 1: find any one immediate neighboring PA of the normalized range */
+ for (iter = ei->i_prealloc_node.rb_node; iter;
+ iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
+ tmp_pa_start, iter)) {
+ tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
+ pa_node.inode_node);
+ tmp_pa_start = tmp_pa->pa_lstart;
+ tmp_pa_end = pa_logical_end(sbi, tmp_pa);
+
+ /* PA must not overlap original request */
+ spin_lock(&tmp_pa->pa_lock);
+ if (tmp_pa->pa_deleted == 0)
+ BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
+ ac->ac_o_ex.fe_logical < tmp_pa_start));
+ spin_unlock(&tmp_pa->pa_lock);
+ }
+
+ /*
+ * Step 2: check if the found PA is left or right neighbor and
+ * get the other neighbor
+ */
+ if (tmp_pa) {
+ if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) {
+ struct rb_node *tmp;
+
+ left_pa = tmp_pa;
+ tmp = rb_next(&left_pa->pa_node.inode_node);
+ if (tmp) {
+ right_pa = rb_entry(tmp,
+ struct ext4_prealloc_space,
+ pa_node.inode_node);
+ }
+ } else {
+ struct rb_node *tmp;
+
+ right_pa = tmp_pa;
+ tmp = rb_prev(&right_pa->pa_node.inode_node);
+ if (tmp) {
+ left_pa = rb_entry(tmp,
+ struct ext4_prealloc_space,
+ pa_node.inode_node);
+ }
+ }
+ }
+
+ /* Step 3: get the non deleted neighbors */
+ if (left_pa) {
+ for (iter = &left_pa->pa_node.inode_node;;
+ iter = rb_prev(iter)) {
+ if (!iter) {
+ left_pa = NULL;
+ break;
+ }
+
+ tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
+ pa_node.inode_node);
+ left_pa = tmp_pa;
+ spin_lock(&tmp_pa->pa_lock);
+ if (tmp_pa->pa_deleted == 0) {
+ spin_unlock(&tmp_pa->pa_lock);
+ break;
+ }
+ spin_unlock(&tmp_pa->pa_lock);
+ }
+ }
+
+ if (right_pa) {
+ for (iter = &right_pa->pa_node.inode_node;;
+ iter = rb_next(iter)) {
+ if (!iter) {
+ right_pa = NULL;
+ break;
+ }
+
+ tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
+ pa_node.inode_node);
+ right_pa = tmp_pa;
+ spin_lock(&tmp_pa->pa_lock);
+ if (tmp_pa->pa_deleted == 0) {
+ spin_unlock(&tmp_pa->pa_lock);
+ break;
+ }
+ spin_unlock(&tmp_pa->pa_lock);
+ }
+ }
+
+ if (left_pa) {
+ left_pa_end = pa_logical_end(sbi, left_pa);
+ BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
+ }
+
+ if (right_pa) {
+ right_pa_start = right_pa->pa_lstart;
+ BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical);
+ }
+
+ /* Step 4: trim our normalized range to not overlap with the neighbors */
+ if (left_pa) {
+ if (left_pa_end > new_start)
+ new_start = left_pa_end;
+ }
+
+ if (right_pa) {
+ if (right_pa_start < new_end)
+ new_end = right_pa_start;
+ }
+ read_unlock(&ei->i_prealloc_lock);
+
+ /* XXX: extra loop to check we really don't overlap preallocations */
+ ext4_mb_pa_assert_overlap(ac, new_start, new_end);
+
+ *start = new_start;
+ *end = new_end;
+}
+
+/*
* Normalization means making request better in terms of
* size and alignment
*/
@@ -3992,13 +4520,11 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
struct ext4_allocation_request *ar)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ struct ext4_super_block *es = sbi->s_es;
int bsbits, max;
- ext4_lblk_t end;
- loff_t size, start_off;
+ loff_t size, start_off, end;
loff_t orig_size __maybe_unused;
ext4_lblk_t start;
- struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
- struct ext4_prealloc_space *pa;
/* do normalize only data requests, metadata requests
do not need preallocation */
@@ -4023,7 +4549,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
/* first, let's learn actual file size
* given current request is allocated */
- size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
+ size = extent_logical_end(sbi, &ac->ac_o_ex);
size = size << bsbits;
if (size < i_size_read(ac->ac_inode))
size = i_size_read(ac->ac_inode);
@@ -4060,19 +4586,32 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
(22 - bsbits)) << 22;
size = 4 * 1024 * 1024;
- } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
+ } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len),
(8<<20)>>bsbits, max, 8 * 1024)) {
start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
(23 - bsbits)) << 23;
size = 8 * 1024 * 1024;
} else {
start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
- size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
+ size = (loff_t) EXT4_C2B(sbi,
ac->ac_o_ex.fe_len) << bsbits;
}
size = size >> bsbits;
start = start_off >> bsbits;
+ /*
+ * For tiny groups (smaller than 8MB) the chosen allocation
+ * alignment may be larger than group size. Make sure the
+ * alignment does not move allocation to a different group which
+ * makes mballoc fail assertions later.
+ */
+ start = max(start, rounddown(ac->ac_o_ex.fe_logical,
+ (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
+
+ /* avoid unnecessary preallocation that may trigger assertions */
+ if (start + size > EXT_MAX_BLOCKS)
+ size = EXT_MAX_BLOCKS - start;
+
/* don't cover already allocated blocks in selected range */
if (ar->pleft && start <= ar->lleft) {
size -= ar->lleft + 1 - start;
@@ -4090,62 +4629,26 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
end = start + size;
- /* check we don't cross already preallocated blocks */
- rcu_read_lock();
- list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
- ext4_lblk_t pa_end;
-
- if (pa->pa_deleted)
- continue;
- spin_lock(&pa->pa_lock);
- if (pa->pa_deleted) {
- spin_unlock(&pa->pa_lock);
- continue;
- }
-
- pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
- pa->pa_len);
-
- /* PA must not overlap original request */
- BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
- ac->ac_o_ex.fe_logical < pa->pa_lstart));
+ ext4_mb_pa_adjust_overlap(ac, &start, &end);
- /* skip PAs this normalized request doesn't overlap with */
- if (pa->pa_lstart >= end || pa_end <= start) {
- spin_unlock(&pa->pa_lock);
- continue;
- }
- BUG_ON(pa->pa_lstart <= start && pa_end >= end);
-
- /* adjust start or end to be adjacent to this pa */
- if (pa_end <= ac->ac_o_ex.fe_logical) {
- BUG_ON(pa_end < start);
- start = pa_end;
- } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
- BUG_ON(pa->pa_lstart > end);
- end = pa->pa_lstart;
- }
- spin_unlock(&pa->pa_lock);
- }
- rcu_read_unlock();
size = end - start;
- /* XXX: extra loop to check we really don't overlap preallocations */
- rcu_read_lock();
- list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
- ext4_lblk_t pa_end;
-
- spin_lock(&pa->pa_lock);
- if (pa->pa_deleted == 0) {
- pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
- pa->pa_len);
- BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
- }
- spin_unlock(&pa->pa_lock);
- }
- rcu_read_unlock();
-
- if (start + size <= ac->ac_o_ex.fe_logical &&
+ /*
+ * In this function "start" and "size" are normalized for better
+ * alignment and length such that we could preallocate more blocks.
+ * This normalization is done such that original request of
+ * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
+ * "size" boundaries.
+ * (Note fe_len can be relaxed since FS block allocation API does not
+ * provide gurantee on number of contiguous blocks allocation since that
+ * depends upon free space left, etc).
+ * In case of inode pa, later we use the allocated blocks
+ * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated
+ * range of goal/best blocks [start, size] to put it at the
+ * ac_o_ex.fe_logical extent of this inode.
+ * (See ext4_mb_use_inode_pa() for more details)
+ */
+ if (start + size <= ac->ac_o_ex.fe_logical ||
start > ac->ac_o_ex.fe_logical) {
ext4_msg(ac->ac_sb, KERN_ERR,
"start %lu, size %lu, fe_logical %lu",
@@ -4161,20 +4664,24 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
* placement or satisfy big request as is */
ac->ac_g_ex.fe_logical = start;
ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
+ ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
/* define goal start in order to merge */
- if (ar->pright && (ar->lright == (start + size))) {
+ if (ar->pright && (ar->lright == (start + size)) &&
+ ar->pright >= size &&
+ ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
/* merge to the right */
ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
- &ac->ac_f_ex.fe_group,
- &ac->ac_f_ex.fe_start);
+ &ac->ac_g_ex.fe_group,
+ &ac->ac_g_ex.fe_start);
ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
}
- if (ar->pleft && (ar->lleft + 1 == start)) {
+ if (ar->pleft && (ar->lleft + 1 == start) &&
+ ar->pleft + 1 < ext4_blocks_count(es)) {
/* merge to the left */
ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
- &ac->ac_f_ex.fe_group,
- &ac->ac_f_ex.fe_start);
+ &ac->ac_g_ex.fe_group,
+ &ac->ac_g_ex.fe_start);
ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
}
@@ -4191,11 +4698,20 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
atomic_inc(&sbi->s_bal_success);
+
atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
+ for (int i=0; i<EXT4_MB_NUM_CRS; i++) {
+ atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]);
+ }
+
atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
atomic_inc(&sbi->s_bal_goals);
+ /* did we allocate as much as normalizer originally wanted? */
+ if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len)
+ atomic_inc(&sbi->s_bal_len_goals);
+
if (ac->ac_found > sbi->s_mb_max_to_scan)
atomic_inc(&sbi->s_bal_breaks);
}
@@ -4222,15 +4738,14 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
if (ac->ac_f_ex.fe_len == 0)
return;
err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
- if (err) {
+ if (WARN_RATELIMIT(err,
+ "ext4: mb_load_buddy failed (%d)", err))
/*
* This should never happen since we pin the
- * pages in the ext4_allocation_context so
+ * folios in the ext4_allocation_context so
* ext4_mb_load_buddy() should never fail.
*/
- WARN(1, "mb_load_buddy failed (%d)", err);
return;
- }
ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
ac->ac_f_ex.fe_len);
@@ -4238,8 +4753,11 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
ext4_mb_unload_buddy(&e4b);
return;
}
- if (pa->pa_type == MB_INODE_PA)
+ if (pa->pa_type == MB_INODE_PA) {
+ spin_lock(&pa->pa_lock);
pa->pa_free += ac->ac_b_ex.fe_len;
+ spin_unlock(&pa->pa_lock);
+ }
}
/*
@@ -4267,6 +4785,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
BUG_ON(start < pa->pa_pstart);
BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
BUG_ON(pa->pa_free < len);
+ BUG_ON(ac->ac_b_ex.fe_len <= 0);
pa->pa_free -= len;
mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
@@ -4287,14 +4806,14 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
ac->ac_status = AC_STATUS_FOUND;
ac->ac_pa = pa;
- /* we don't correct pa_pstart or pa_plen here to avoid
+ /* we don't correct pa_pstart or pa_len here to avoid
* possible race when the group is being loaded concurrently
* instead we correct pa later, after blocks are marked
* in on-disk bitmap -- see ext4_mb_release_context()
* Other CPUs are prevented from allocating from this pa by lg_mutex
*/
mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
- pa->pa_lstart-len, len, pa);
+ pa->pa_lstart, len, pa);
}
/*
@@ -4327,6 +4846,37 @@ ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
}
/*
+ * check if found pa meets EXT4_MB_HINT_GOAL_ONLY
+ */
+static bool
+ext4_mb_pa_goal_check(struct ext4_allocation_context *ac,
+ struct ext4_prealloc_space *pa)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ ext4_fsblk_t start;
+
+ if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)))
+ return true;
+
+ /*
+ * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted
+ * in ext4_mb_normalize_request and will keep same with ac_o_ex
+ * from ext4_mb_initialize_context. Choose ac_g_ex here to keep
+ * consistent with ext4_mb_find_by_goal.
+ */
+ start = pa->pa_pstart +
+ (ac->ac_g_ex.fe_logical - pa->pa_lstart);
+ if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start)
+ return false;
+
+ if (ac->ac_g_ex.fe_len > pa->pa_len -
+ EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart))
+ return false;
+
+ return true;
+}
+
+/*
* search goal blocks in preallocated space
*/
static noinline_for_stack bool
@@ -4336,43 +4886,158 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
int order, i;
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_locality_group *lg;
- struct ext4_prealloc_space *pa, *cpa = NULL;
+ struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
+ struct rb_node *iter;
ext4_fsblk_t goal_block;
/* only data can be preallocated */
if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
return false;
- /* first, try per-file preallocation */
- rcu_read_lock();
- list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
+ /*
+ * first, try per-file preallocation by searching the inode pa rbtree.
+ *
+ * Here, we can't do a direct traversal of the tree because
+ * ext4_mb_discard_group_preallocation() can paralelly mark the pa
+ * deleted and that can cause direct traversal to skip some entries.
+ */
+ read_lock(&ei->i_prealloc_lock);
- /* all fields in this condition don't change,
- * so we can skip locking for them */
- if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
- ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
- EXT4_C2B(sbi, pa->pa_len)))
- continue;
+ if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) {
+ goto try_group_pa;
+ }
- /* non-extent files can't have physical blocks past 2^32 */
- if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
- (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
- EXT4_MAX_BLOCK_FILE_PHYS))
- continue;
+ /*
+ * Step 1: Find a pa with logical start immediately adjacent to the
+ * original logical start. This could be on the left or right.
+ *
+ * (tmp_pa->pa_lstart never changes so we can skip locking for it).
+ */
+ for (iter = ei->i_prealloc_node.rb_node; iter;
+ iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
+ tmp_pa->pa_lstart, iter)) {
+ tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
+ pa_node.inode_node);
+ }
- /* found preallocated blocks, use them */
- spin_lock(&pa->pa_lock);
- if (pa->pa_deleted == 0 && pa->pa_free) {
- atomic_inc(&pa->pa_count);
- ext4_mb_use_inode_pa(ac, pa);
- spin_unlock(&pa->pa_lock);
- ac->ac_criteria = 10;
- rcu_read_unlock();
- return true;
+ /*
+ * Step 2: The adjacent pa might be to the right of logical start, find
+ * the left adjacent pa. After this step we'd have a valid tmp_pa whose
+ * logical start is towards the left of original request's logical start
+ */
+ if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) {
+ struct rb_node *tmp;
+ tmp = rb_prev(&tmp_pa->pa_node.inode_node);
+
+ if (tmp) {
+ tmp_pa = rb_entry(tmp, struct ext4_prealloc_space,
+ pa_node.inode_node);
+ } else {
+ /*
+ * If there is no adjacent pa to the left then finding
+ * an overlapping pa is not possible hence stop searching
+ * inode pa tree
+ */
+ goto try_group_pa;
}
- spin_unlock(&pa->pa_lock);
}
- rcu_read_unlock();
+
+ BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
+
+ /*
+ * Step 3: If the left adjacent pa is deleted, keep moving left to find
+ * the first non deleted adjacent pa. After this step we should have a
+ * valid tmp_pa which is guaranteed to be non deleted.
+ */
+ for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) {
+ if (!iter) {
+ /*
+ * no non deleted left adjacent pa, so stop searching
+ * inode pa tree
+ */
+ goto try_group_pa;
+ }
+ tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
+ pa_node.inode_node);
+ spin_lock(&tmp_pa->pa_lock);
+ if (tmp_pa->pa_deleted == 0) {
+ /*
+ * We will keep holding the pa_lock from
+ * this point on because we don't want group discard
+ * to delete this pa underneath us. Since group
+ * discard is anyways an ENOSPC operation it
+ * should be okay for it to wait a few more cycles.
+ */
+ break;
+ } else {
+ spin_unlock(&tmp_pa->pa_lock);
+ }
+ }
+
+ BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
+ BUG_ON(tmp_pa->pa_deleted == 1);
+
+ /*
+ * Step 4: We now have the non deleted left adjacent pa. Only this
+ * pa can possibly satisfy the request hence check if it overlaps
+ * original logical start and stop searching if it doesn't.
+ */
+ if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
+ spin_unlock(&tmp_pa->pa_lock);
+ goto try_group_pa;
+ }
+
+ /* non-extent files can't have physical blocks past 2^32 */
+ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
+ (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
+ EXT4_MAX_BLOCK_FILE_PHYS)) {
+ /*
+ * Since PAs don't overlap, we won't find any other PA to
+ * satisfy this.
+ */
+ spin_unlock(&tmp_pa->pa_lock);
+ goto try_group_pa;
+ }
+
+ if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
+ atomic_inc(&tmp_pa->pa_count);
+ ext4_mb_use_inode_pa(ac, tmp_pa);
+ spin_unlock(&tmp_pa->pa_lock);
+ read_unlock(&ei->i_prealloc_lock);
+ return true;
+ } else {
+ /*
+ * We found a valid overlapping pa but couldn't use it because
+ * it had no free blocks. This should ideally never happen
+ * because:
+ *
+ * 1. When a new inode pa is added to rbtree it must have
+ * pa_free > 0 since otherwise we won't actually need
+ * preallocation.
+ *
+ * 2. An inode pa that is in the rbtree can only have it's
+ * pa_free become zero when another thread calls:
+ * ext4_mb_new_blocks
+ * ext4_mb_use_preallocated
+ * ext4_mb_use_inode_pa
+ *
+ * 3. Further, after the above calls make pa_free == 0, we will
+ * immediately remove it from the rbtree in:
+ * ext4_mb_new_blocks
+ * ext4_mb_release_context
+ * ext4_mb_put_pa
+ *
+ * 4. Since the pa_free becoming 0 and pa_free getting removed
+ * from tree both happen in ext4_mb_new_blocks, which is always
+ * called with i_data_sem held for data allocations, we can be
+ * sure that another process will never see a pa in rbtree with
+ * pa_free == 0.
+ */
+ WARN_ON_ONCE(tmp_pa->pa_free == 0);
+ }
+ spin_unlock(&tmp_pa->pa_lock);
+try_group_pa:
+ read_unlock(&ei->i_prealloc_lock);
/* can we use group allocation? */
if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
@@ -4394,52 +5059,27 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
*/
for (i = order; i < PREALLOC_TB_SIZE; i++) {
rcu_read_lock();
- list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
- pa_inode_list) {
- spin_lock(&pa->pa_lock);
- if (pa->pa_deleted == 0 &&
- pa->pa_free >= ac->ac_o_ex.fe_len) {
+ list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
+ pa_node.lg_list) {
+ spin_lock(&tmp_pa->pa_lock);
+ if (tmp_pa->pa_deleted == 0 &&
+ tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
cpa = ext4_mb_check_group_pa(goal_block,
- pa, cpa);
+ tmp_pa, cpa);
}
- spin_unlock(&pa->pa_lock);
+ spin_unlock(&tmp_pa->pa_lock);
}
rcu_read_unlock();
}
if (cpa) {
ext4_mb_use_group_pa(ac, cpa);
- ac->ac_criteria = 20;
return true;
}
return false;
}
/*
- * the function goes through all block freed in the group
- * but not yet committed and marks them used in in-core bitmap.
- * buddy must be generated from this bitmap
- * Need to be called with the ext4 group lock held
- */
-static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
- ext4_group_t group)
-{
- struct rb_node *n;
- struct ext4_group_info *grp;
- struct ext4_free_data *entry;
-
- grp = ext4_get_group_info(sb, group);
- n = rb_first(&(grp->bb_free_root));
-
- while (n) {
- entry = rb_entry(n, struct ext4_free_data, efd_node);
- ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
- n = rb_next(n);
- }
- return;
-}
-
-/*
* the function goes through all preallocation in this group and marks them
* used in in-core bitmap. buddy must be generated from this bitmap
* Need to be called with ext4 group lock held
@@ -4456,6 +5096,9 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
int preallocated = 0;
int len;
+ if (!grp)
+ return;
+
/* all form of preallocation discards first load group,
* so the only competing code is preallocation use.
* we don't need any locking here
@@ -4474,7 +5117,7 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
if (unlikely(len == 0))
continue;
BUG_ON(groupnr != group);
- ext4_set_bits(bitmap, start, len);
+ mb_set_bits(bitmap, start, len);
preallocated += len;
}
mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
@@ -4500,16 +5143,22 @@ static void ext4_mb_mark_pa_deleted(struct super_block *sb,
}
}
-static void ext4_mb_pa_callback(struct rcu_head *head)
+static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa)
{
- struct ext4_prealloc_space *pa;
- pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
-
+ BUG_ON(!pa);
BUG_ON(atomic_read(&pa->pa_count));
BUG_ON(pa->pa_deleted == 0);
kmem_cache_free(ext4_pspace_cachep, pa);
}
+static void ext4_mb_pa_callback(struct rcu_head *head)
+{
+ struct ext4_prealloc_space *pa;
+
+ pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
+ ext4_mb_pa_free(pa);
+}
+
/*
* drops a reference to preallocated space descriptor
* if this was the last reference and the space is consumed
@@ -4519,6 +5168,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
{
ext4_group_t grp;
ext4_fsblk_t grp_blk;
+ struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
/* in this short window concurrent discard can set pa_deleted */
spin_lock(&pa->pa_lock);
@@ -4563,11 +5213,42 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
list_del(&pa->pa_group_list);
ext4_unlock_group(sb, grp);
- spin_lock(pa->pa_obj_lock);
- list_del_rcu(&pa->pa_inode_list);
- spin_unlock(pa->pa_obj_lock);
+ if (pa->pa_type == MB_INODE_PA) {
+ write_lock(pa->pa_node_lock.inode_lock);
+ rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
+ write_unlock(pa->pa_node_lock.inode_lock);
+ ext4_mb_pa_free(pa);
+ } else {
+ spin_lock(pa->pa_node_lock.lg_lock);
+ list_del_rcu(&pa->pa_node.lg_list);
+ spin_unlock(pa->pa_node_lock.lg_lock);
+ call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
+ }
+}
+
+static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new)
+{
+ struct rb_node **iter = &root->rb_node, *parent = NULL;
+ struct ext4_prealloc_space *iter_pa, *new_pa;
+ ext4_lblk_t iter_start, new_start;
- call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
+ while (*iter) {
+ iter_pa = rb_entry(*iter, struct ext4_prealloc_space,
+ pa_node.inode_node);
+ new_pa = rb_entry(new, struct ext4_prealloc_space,
+ pa_node.inode_node);
+ iter_start = iter_pa->pa_lstart;
+ new_start = new_pa->pa_lstart;
+
+ parent = *iter;
+ if (new_start < iter_start)
+ iter = &((*iter)->rb_left);
+ else
+ iter = &((*iter)->rb_right);
+ }
+
+ rb_link_node(new, parent, iter);
+ rb_insert_color(new, root);
}
/*
@@ -4590,50 +5271,59 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
pa = ac->ac_pa;
- if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
- int winl;
- int wins;
- int win;
- int offs;
+ if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) {
+ struct ext4_free_extent ex = {
+ .fe_logical = ac->ac_g_ex.fe_logical,
+ .fe_len = ac->ac_orig_goal_len,
+ };
+ loff_t orig_goal_end = extent_logical_end(sbi, &ex);
+ loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
- /* we can't allocate as much as normalizer wants.
- * so, found space must get proper lstart
- * to cover original request */
+ /*
+ * We can't allocate as much as normalizer wants, so we try
+ * to get proper lstart to cover the original request, except
+ * when the goal doesn't cover the original request as below:
+ *
+ * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
+ * best_ex:0/200(200) -> adjusted: 1848/2048(200)
+ */
BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
- /* we're limited by original request in that
- * logical block must be covered any way
- * winl is window we can move our chunk within */
- winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
+ /*
+ * Use the below logic for adjusting best extent as it keeps
+ * fragmentation in check while ensuring logical range of best
+ * extent doesn't overflow out of goal extent:
+ *
+ * 1. Check if best ex can be kept at end of goal (before
+ * cr_best_avail trimmed it) and still cover original start
+ * 2. Else, check if best ex can be kept at start of goal and
+ * still cover original end
+ * 3. Else, keep the best ex at start of original request.
+ */
+ ex.fe_len = ac->ac_b_ex.fe_len;
- /* also, we should cover whole original request */
- wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
+ ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
+ if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
+ goto adjust_bex;
- /* the smallest one defines real window */
- win = min(winl, wins);
+ ex.fe_logical = ac->ac_g_ex.fe_logical;
+ if (o_ex_end <= extent_logical_end(sbi, &ex))
+ goto adjust_bex;
- offs = ac->ac_o_ex.fe_logical %
- EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
- if (offs && offs < win)
- win = offs;
+ ex.fe_logical = ac->ac_o_ex.fe_logical;
+adjust_bex:
+ ac->ac_b_ex.fe_logical = ex.fe_logical;
- ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
- EXT4_NUM_B2C(sbi, win);
BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
- BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
+ BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
}
- /* preallocation can change ac_b_ex, thus we store actually
- * allocated blocks for history */
- ac->ac_f_ex = ac->ac_b_ex;
-
pa->pa_lstart = ac->ac_b_ex.fe_logical;
pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
pa->pa_len = ac->ac_b_ex.fe_len;
pa->pa_free = pa->pa_len;
spin_lock_init(&pa->pa_lock);
- INIT_LIST_HEAD(&pa->pa_inode_list);
INIT_LIST_HEAD(&pa->pa_group_list);
pa->pa_deleted = 0;
pa->pa_type = MB_INODE_PA;
@@ -4642,20 +5332,22 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
pa->pa_len, pa->pa_lstart);
trace_ext4_mb_new_inode_pa(ac, pa);
- ext4_mb_use_inode_pa(ac, pa);
atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
+ ext4_mb_use_inode_pa(ac, pa);
ei = EXT4_I(ac->ac_inode);
grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+ if (!grp)
+ return;
- pa->pa_obj_lock = &ei->i_prealloc_lock;
+ pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock;
pa->pa_inode = ac->ac_inode;
list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
- spin_lock(pa->pa_obj_lock);
- list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
- spin_unlock(pa->pa_obj_lock);
+ write_lock(pa->pa_node_lock.inode_lock);
+ ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node);
+ write_unlock(pa->pa_node_lock.inode_lock);
atomic_inc(&ei->i_prealloc_active);
}
@@ -4678,16 +5370,12 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
pa = ac->ac_pa;
- /* preallocation can change ac_b_ex, thus we store actually
- * allocated blocks for history */
- ac->ac_f_ex = ac->ac_b_ex;
-
pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
pa->pa_lstart = pa->pa_pstart;
pa->pa_len = ac->ac_b_ex.fe_len;
pa->pa_free = pa->pa_len;
spin_lock_init(&pa->pa_lock);
- INIT_LIST_HEAD(&pa->pa_inode_list);
+ INIT_LIST_HEAD(&pa->pa_node.lg_list);
INIT_LIST_HEAD(&pa->pa_group_list);
pa->pa_deleted = 0;
pa->pa_type = MB_GROUP_PA;
@@ -4700,10 +5388,12 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+ if (!grp)
+ return;
lg = ac->ac_lg;
BUG_ON(lg == NULL);
- pa->pa_obj_lock = &lg->lg_prealloc_lock;
+ pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock;
pa->pa_inode = NULL;
list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
@@ -4730,7 +5420,7 @@ static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
* the caller MUST hold group/inode locks.
* TODO: optimize the case when there are no in-core structures yet
*/
-static noinline_for_stack int
+static noinline_for_stack void
ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
struct ext4_prealloc_space *pa)
{
@@ -4780,11 +5470,9 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
*/
}
atomic_add(free, &sbi->s_mb_discarded);
-
- return 0;
}
-static noinline_for_stack int
+static noinline_for_stack void
ext4_mb_release_group_pa(struct ext4_buddy *e4b,
struct ext4_prealloc_space *pa)
{
@@ -4795,12 +5483,14 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
trace_ext4_mb_release_group_pa(sb, pa);
BUG_ON(pa->pa_deleted == 0);
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
- BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+ if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
+ ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
+ e4b->bd_group, group, pa->pa_pstart);
+ return;
+ }
mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
-
- return 0;
}
/*
@@ -4814,17 +5504,19 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
*/
static noinline_for_stack int
ext4_mb_discard_group_preallocations(struct super_block *sb,
- ext4_group_t group, int needed)
+ ext4_group_t group, int *busy)
{
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
struct buffer_head *bitmap_bh = NULL;
struct ext4_prealloc_space *pa, *tmp;
- struct list_head list;
+ LIST_HEAD(list);
struct ext4_buddy e4b;
+ struct ext4_inode_info *ei;
int err;
- int busy = 0;
- int free, free_total = 0;
+ int free = 0;
+ if (!grp)
+ return 0;
mb_debug(sb, "discard preallocation for group %u\n", group);
if (list_empty(&grp->bb_prealloc_list))
goto out_dbg;
@@ -4846,19 +5538,13 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
goto out_dbg;
}
- if (needed == 0)
- needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
-
- INIT_LIST_HEAD(&list);
-repeat:
- free = 0;
ext4_lock_group(sb, group);
list_for_each_entry_safe(pa, tmp,
&grp->bb_prealloc_list, pa_group_list) {
spin_lock(&pa->pa_lock);
if (atomic_read(&pa->pa_count)) {
spin_unlock(&pa->pa_lock);
- busy = 1;
+ *busy = 1;
continue;
}
if (pa->pa_deleted) {
@@ -4885,35 +5571,35 @@ repeat:
list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
/* remove from object (inode or locality group) */
- spin_lock(pa->pa_obj_lock);
- list_del_rcu(&pa->pa_inode_list);
- spin_unlock(pa->pa_obj_lock);
+ if (pa->pa_type == MB_GROUP_PA) {
+ spin_lock(pa->pa_node_lock.lg_lock);
+ list_del_rcu(&pa->pa_node.lg_list);
+ spin_unlock(pa->pa_node_lock.lg_lock);
+ } else {
+ write_lock(pa->pa_node_lock.inode_lock);
+ ei = EXT4_I(pa->pa_inode);
+ rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
+ write_unlock(pa->pa_node_lock.inode_lock);
+ }
- if (pa->pa_type == MB_GROUP_PA)
+ list_del(&pa->u.pa_tmp_list);
+
+ if (pa->pa_type == MB_GROUP_PA) {
ext4_mb_release_group_pa(&e4b, pa);
- else
+ call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
+ } else {
ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
-
- list_del(&pa->u.pa_tmp_list);
- call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
+ ext4_mb_pa_free(pa);
+ }
}
- free_total += free;
-
- /* if we still need more blocks and some PAs were used, try again */
- if (free_total < needed && busy) {
- ext4_unlock_group(sb, group);
- cond_resched();
- busy = 0;
- goto repeat;
- }
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh);
out_dbg:
mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
- free_total, group, grp->bb_free);
- return free_total;
+ free, group, grp->bb_free);
+ return free;
}
/*
@@ -4925,21 +5611,20 @@ out_dbg:
*
* FIXME!! Make sure it is valid at all the call sites
*/
-void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
+void ext4_discard_preallocations(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct super_block *sb = inode->i_sb;
struct buffer_head *bitmap_bh = NULL;
struct ext4_prealloc_space *pa, *tmp;
ext4_group_t group = 0;
- struct list_head list;
+ LIST_HEAD(list);
struct ext4_buddy e4b;
+ struct rb_node *iter;
int err;
- if (!S_ISREG(inode->i_mode)) {
- /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
+ if (!S_ISREG(inode->i_mode))
return;
- }
if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
return;
@@ -4947,26 +5632,23 @@ void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
mb_debug(sb, "discard preallocation for inode %lu\n",
inode->i_ino);
trace_ext4_discard_preallocations(inode,
- atomic_read(&ei->i_prealloc_active), needed);
-
- INIT_LIST_HEAD(&list);
-
- if (needed == 0)
- needed = UINT_MAX;
+ atomic_read(&ei->i_prealloc_active));
repeat:
/* first, collect all pa's in the inode */
- spin_lock(&ei->i_prealloc_lock);
- while (!list_empty(&ei->i_prealloc_list) && needed) {
- pa = list_entry(ei->i_prealloc_list.prev,
- struct ext4_prealloc_space, pa_inode_list);
- BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
+ write_lock(&ei->i_prealloc_lock);
+ for (iter = rb_first(&ei->i_prealloc_node); iter;
+ iter = rb_next(iter)) {
+ pa = rb_entry(iter, struct ext4_prealloc_space,
+ pa_node.inode_node);
+ BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock);
+
spin_lock(&pa->pa_lock);
if (atomic_read(&pa->pa_count)) {
/* this shouldn't happen often - nobody should
* use preallocation while we're discarding it */
spin_unlock(&pa->pa_lock);
- spin_unlock(&ei->i_prealloc_lock);
+ write_unlock(&ei->i_prealloc_lock);
ext4_msg(sb, KERN_ERR,
"uh-oh! used pa while discarding");
WARN_ON(1);
@@ -4977,15 +5659,14 @@ repeat:
if (pa->pa_deleted == 0) {
ext4_mb_mark_pa_deleted(sb, pa);
spin_unlock(&pa->pa_lock);
- list_del_rcu(&pa->pa_inode_list);
+ rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
list_add(&pa->u.pa_tmp_list, &list);
- needed--;
continue;
}
/* someone is deleting pa right now */
spin_unlock(&pa->pa_lock);
- spin_unlock(&ei->i_prealloc_lock);
+ write_unlock(&ei->i_prealloc_lock);
/* we have to wait here because pa_deleted
* doesn't mean pa is already unlinked from
@@ -5002,7 +5683,7 @@ repeat:
schedule_timeout_uninterruptible(HZ);
goto repeat;
}
- spin_unlock(&ei->i_prealloc_lock);
+ write_unlock(&ei->i_prealloc_lock);
list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
BUG_ON(pa->pa_type != MB_INODE_PA);
@@ -5034,7 +5715,7 @@ repeat:
put_bh(bitmap_bh);
list_del(&pa->u.pa_tmp_list);
- call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
+ ext4_mb_pa_free(pa);
}
}
@@ -5051,14 +5732,20 @@ static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
return 0;
}
-static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
+static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac)
{
struct ext4_prealloc_space *pa = ac->ac_pa;
BUG_ON(!pa);
ac->ac_pa = NULL;
WARN_ON(!atomic_dec_and_test(&pa->pa_count));
- kmem_cache_free(ext4_pspace_cachep, pa);
+ /*
+ * current function is only called due to an error or due to
+ * len of found blocks < len of requested blocks hence the PA has not
+ * been added to grp->bb_prealloc_list. So we don't need to lock it
+ */
+ pa->pa_deleted = 1;
+ ext4_mb_pa_free(pa);
}
#ifdef CONFIG_EXT4_DEBUG
@@ -5066,7 +5753,7 @@ static inline void ext4_mb_show_pa(struct super_block *sb)
{
ext4_group_t i, ngroups;
- if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
+ if (ext4_emergency_state(sb))
return;
ngroups = ext4_get_groups_count(sb);
@@ -5076,6 +5763,9 @@ static inline void ext4_mb_show_pa(struct super_block *sb)
struct ext4_prealloc_space *pa;
ext4_grpblk_t start;
struct list_head *cur;
+
+ if (!grp)
+ continue;
ext4_lock_group(sb, i);
list_for_each(cur, &grp->bb_prealloc_list) {
pa = list_entry(cur, struct ext4_prealloc_space,
@@ -5097,7 +5787,7 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
- if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
+ if (ext4_emergency_state(sb))
return;
mb_debug(sb, "Can't allocate:"
@@ -5121,17 +5811,19 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
(unsigned long)ac->ac_b_ex.fe_logical,
(int)ac->ac_criteria);
mb_debug(sb, "%u found", ac->ac_found);
+ mb_debug(sb, "used pa: %s, ", str_yes_no(ac->ac_pa));
+ if (ac->ac_pa)
+ mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ?
+ "group pa" : "inode pa");
ext4_mb_show_pa(sb);
}
#else
static inline void ext4_mb_show_pa(struct super_block *sb)
{
- return;
}
static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
{
ext4_mb_show_pa(ac->ac_sb);
- return;
}
#endif
@@ -5147,6 +5839,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
int bsbits = ac->ac_sb->s_blocksize_bits;
loff_t size, isize;
+ bool inode_pa_eligible, group_pa_eligible;
if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
return;
@@ -5154,25 +5847,27 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
return;
- size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
+ group_pa_eligible = sbi->s_mb_group_prealloc > 0;
+ inode_pa_eligible = true;
+ size = extent_logical_end(sbi, &ac->ac_o_ex);
isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
>> bsbits;
+ /* No point in using inode preallocation for closed files */
if ((size == isize) && !ext4_fs_is_busy(sbi) &&
- !inode_is_open_for_write(ac->ac_inode)) {
- ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
- return;
- }
+ !inode_is_open_for_write(ac->ac_inode))
+ inode_pa_eligible = false;
- if (sbi->s_mb_group_prealloc <= 0) {
- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
- return;
- }
-
- /* don't use group allocation for large files */
size = max(size, isize);
- if (size > sbi->s_mb_stream_request) {
- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+ /* Don't use group allocation for large files */
+ if (size > sbi->s_mb_stream_request)
+ group_pa_eligible = false;
+
+ if (!group_pa_eligible) {
+ if (inode_pa_eligible)
+ ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+ else
+ ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
return;
}
@@ -5191,7 +5886,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
mutex_lock(&ac->ac_lg->lg_mutex);
}
-static noinline_for_stack int
+static noinline_for_stack void
ext4_mb_initialize_context(struct ext4_allocation_context *ac,
struct ext4_allocation_request *ar)
{
@@ -5227,6 +5922,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
ac->ac_o_ex.fe_start = block;
ac->ac_o_ex.fe_len = len;
ac->ac_g_ex = ac->ac_o_ex;
+ ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
ac->ac_flags = ar->flags;
/* we have to define context: we'll work with a file or
@@ -5240,8 +5936,6 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
(unsigned) ar->lleft, (unsigned) ar->pleft,
(unsigned) ar->lright, (unsigned) ar->pright,
inode_is_open_for_write(ar->inode) ? "" : "non-");
- return 0;
-
}
static noinline_for_stack void
@@ -5251,16 +5945,14 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
{
ext4_group_t group = 0;
struct ext4_buddy e4b;
- struct list_head discard_list;
+ LIST_HEAD(discard_list);
struct ext4_prealloc_space *pa, *tmp;
mb_debug(sb, "discard locality group preallocation\n");
- INIT_LIST_HEAD(&discard_list);
-
spin_lock(&lg->lg_prealloc_lock);
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
- pa_inode_list,
+ pa_node.lg_list,
lockdep_is_held(&lg->lg_prealloc_lock)) {
spin_lock(&pa->pa_lock);
if (atomic_read(&pa->pa_count)) {
@@ -5283,7 +5975,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
ext4_mb_mark_pa_deleted(sb, pa);
spin_unlock(&pa->pa_lock);
- list_del_rcu(&pa->pa_inode_list);
+ list_del_rcu(&pa->pa_node.lg_list);
list_add(&pa->u.pa_tmp_list, &discard_list);
total_entries--;
@@ -5344,7 +6036,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
/* Add the prealloc space to lg */
spin_lock(&lg->lg_prealloc_lock);
list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
- pa_inode_list,
+ pa_node.lg_list,
lockdep_is_held(&lg->lg_prealloc_lock)) {
spin_lock(&tmp_pa->pa_lock);
if (tmp_pa->pa_deleted) {
@@ -5353,8 +6045,8 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
}
if (!added && pa->pa_free < tmp_pa->pa_free) {
/* Add to the tail of the previous entry */
- list_add_tail_rcu(&pa->pa_inode_list,
- &tmp_pa->pa_inode_list);
+ list_add_tail_rcu(&pa->pa_node.lg_list,
+ &tmp_pa->pa_node.lg_list);
added = 1;
/*
* we want to count the total
@@ -5365,43 +6057,21 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
lg_prealloc_count++;
}
if (!added)
- list_add_tail_rcu(&pa->pa_inode_list,
+ list_add_tail_rcu(&pa->pa_node.lg_list,
&lg->lg_prealloc_list[order]);
spin_unlock(&lg->lg_prealloc_lock);
/* Now trim the list to be not more than 8 elements */
- if (lg_prealloc_count > 8) {
+ if (lg_prealloc_count > 8)
ext4_mb_discard_lg_preallocations(sb, lg,
order, lg_prealloc_count);
- return;
- }
- return ;
-}
-
-/*
- * if per-inode prealloc list is too long, trim some PA
- */
-static void ext4_mb_trim_inode_pa(struct inode *inode)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- int count, delta;
-
- count = atomic_read(&ei->i_prealloc_active);
- delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
- if (count > sbi->s_mb_max_inode_prealloc + delta) {
- count -= sbi->s_mb_max_inode_prealloc;
- ext4_discard_preallocations(inode, count);
- }
}
/*
* release all resource we used in allocation
*/
-static int ext4_mb_release_context(struct ext4_allocation_context *ac)
+static void ext4_mb_release_context(struct ext4_allocation_context *ac)
{
- struct inode *inode = ac->ac_inode;
- struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_prealloc_space *pa = ac->ac_pa;
if (pa) {
@@ -5421,47 +6091,46 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
* doesn't grow big.
*/
if (likely(pa->pa_free)) {
- spin_lock(pa->pa_obj_lock);
- list_del_rcu(&pa->pa_inode_list);
- spin_unlock(pa->pa_obj_lock);
+ spin_lock(pa->pa_node_lock.lg_lock);
+ list_del_rcu(&pa->pa_node.lg_list);
+ spin_unlock(pa->pa_node_lock.lg_lock);
ext4_mb_add_n_trim(ac);
}
}
- if (pa->pa_type == MB_INODE_PA) {
- /*
- * treat per-inode prealloc list as a lru list, then try
- * to trim the least recently used PA.
- */
- spin_lock(pa->pa_obj_lock);
- list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
- spin_unlock(pa->pa_obj_lock);
- }
-
ext4_mb_put_pa(ac, ac->ac_sb, pa);
}
- if (ac->ac_bitmap_page)
- put_page(ac->ac_bitmap_page);
- if (ac->ac_buddy_page)
- put_page(ac->ac_buddy_page);
+ if (ac->ac_bitmap_folio)
+ folio_put(ac->ac_bitmap_folio);
+ if (ac->ac_buddy_folio)
+ folio_put(ac->ac_buddy_folio);
if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
mutex_unlock(&ac->ac_lg->lg_mutex);
ext4_mb_collect_stats(ac);
- ext4_mb_trim_inode_pa(inode);
- return 0;
}
static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
{
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
int ret;
- int freed = 0;
+ int freed = 0, busy = 0;
+ int retry = 0;
trace_ext4_mb_discard_preallocations(sb, needed);
+
+ if (needed == 0)
+ needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
+ repeat:
for (i = 0; i < ngroups && needed > 0; i++) {
- ret = ext4_mb_discard_group_preallocations(sb, i, needed);
+ ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
freed += ret;
needed -= ret;
+ cond_resched();
+ }
+
+ if (needed > 0 && busy && ++retry < 3) {
+ busy = 0;
+ goto repeat;
}
return freed;
@@ -5487,12 +6156,77 @@ static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
}
out_dbg:
- mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
+ mb_debug(sb, "freed %d, retry ? %s\n", freed, str_yes_no(ret));
return ret;
}
-static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
- struct ext4_allocation_request *ar, int *errp);
+/*
+ * Simple allocator for Ext4 fast commit replay path. It searches for blocks
+ * linearly starting at the goal block and also excludes the blocks which
+ * are going to be in use after fast commit replay.
+ */
+static ext4_fsblk_t
+ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
+{
+ struct buffer_head *bitmap_bh;
+ struct super_block *sb = ar->inode->i_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_group_t group, nr;
+ ext4_grpblk_t blkoff;
+ ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
+ ext4_grpblk_t i = 0;
+ ext4_fsblk_t goal, block;
+ struct ext4_super_block *es = sbi->s_es;
+
+ goal = ar->goal;
+ if (goal < le32_to_cpu(es->s_first_data_block) ||
+ goal >= ext4_blocks_count(es))
+ goal = le32_to_cpu(es->s_first_data_block);
+
+ ar->len = 0;
+ ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
+ for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
+ bitmap_bh = ext4_read_block_bitmap(sb, group);
+ if (IS_ERR(bitmap_bh)) {
+ *errp = PTR_ERR(bitmap_bh);
+ pr_warn("Failed to read block bitmap\n");
+ return 0;
+ }
+
+ while (1) {
+ i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
+ blkoff);
+ if (i >= max)
+ break;
+ if (ext4_fc_replay_check_excluded(sb,
+ ext4_group_first_block_no(sb, group) +
+ EXT4_C2B(sbi, i))) {
+ blkoff = i + 1;
+ } else
+ break;
+ }
+ brelse(bitmap_bh);
+ if (i < max)
+ break;
+
+ if (++group >= ext4_get_groups_count(sb))
+ group = 0;
+
+ blkoff = 0;
+ }
+
+ if (i >= max) {
+ *errp = -ENOSPC;
+ return 0;
+ }
+
+ block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
+ ext4_mb_mark_bb(sb, block, 1, true);
+ ar->len = 1;
+
+ *errp = 0;
+ return block;
+}
/*
* Main entry point into mballoc to allocate blocks
@@ -5508,6 +6242,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
ext4_fsblk_t block = 0;
unsigned int inquota = 0;
unsigned int reserv_clstrs = 0;
+ int retries = 0;
u64 seq;
might_sleep();
@@ -5516,7 +6251,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
trace_ext4_request_blocks(ar);
if (sbi->s_mount_state & EXT4_FC_REPLAY)
- return ext4_mb_new_blocks_simple(handle, ar, errp);
+ return ext4_mb_new_blocks_simple(ar, errp);
/* Allow to use superuser reservation for quota file */
if (ext4_is_quota_file(ar->inode))
@@ -5566,11 +6301,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
goto out;
}
- *errp = ext4_mb_initialize_context(ac, ar);
- if (*errp) {
- ar->len = 0;
- goto out;
- }
+ ext4_mb_initialize_context(ac, ar);
ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
seq = this_cpu_read(discard_pa_seq);
@@ -5592,13 +6323,13 @@ repeat:
* So we have to free this pa here itself.
*/
if (*errp) {
- ext4_mb_pa_free(ac);
+ ext4_mb_pa_put_free(ac);
ext4_discard_allocated_blocks(ac);
goto errout;
}
if (ac->ac_status == AC_STATUS_FOUND &&
ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
- ext4_mb_pa_free(ac);
+ ext4_mb_pa_put_free(ac);
}
if (likely(ac->ac_status == AC_STATUS_FOUND)) {
*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
@@ -5610,26 +6341,26 @@ repeat:
ar->len = ac->ac_b_ex.fe_len;
}
} else {
- if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
+ if (++retries < 3 &&
+ ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
goto repeat;
/*
* If block allocation fails then the pa allocated above
* needs to be freed here itself.
*/
- ext4_mb_pa_free(ac);
+ ext4_mb_pa_put_free(ac);
*errp = -ENOSPC;
}
-errout:
if (*errp) {
+errout:
ac->ac_b_ex.fe_len = 0;
ar->len = 0;
ext4_mb_show_ac(ac);
}
ext4_mb_release_context(ac);
+ kmem_cache_free(ext4_ac_cachep, ac);
out:
- if (ac)
- kmem_cache_free(ext4_ac_cachep, ac);
if (inquota && ar->len < inquota)
dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
if (!ar->len) {
@@ -5649,47 +6380,83 @@ out:
* are contiguous, AND the extents were freed by the same transaction,
* AND the blocks are associated with the same group.
*/
-static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
- struct ext4_free_data *entry,
- struct ext4_free_data *new_entry,
- struct rb_root *entry_rb_root)
+static inline bool
+ext4_freed_extents_can_be_merged(struct ext4_free_data *entry1,
+ struct ext4_free_data *entry2)
{
- if ((entry->efd_tid != new_entry->efd_tid) ||
- (entry->efd_group != new_entry->efd_group))
- return;
- if (entry->efd_start_cluster + entry->efd_count ==
- new_entry->efd_start_cluster) {
- new_entry->efd_start_cluster = entry->efd_start_cluster;
- new_entry->efd_count += entry->efd_count;
- } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
- entry->efd_start_cluster) {
- new_entry->efd_count += entry->efd_count;
- } else
- return;
+ if (entry1->efd_tid != entry2->efd_tid)
+ return false;
+ if (entry1->efd_start_cluster + entry1->efd_count !=
+ entry2->efd_start_cluster)
+ return false;
+ if (WARN_ON_ONCE(entry1->efd_group != entry2->efd_group))
+ return false;
+ return true;
+}
+
+static inline void
+ext4_merge_freed_extents(struct ext4_sb_info *sbi, struct rb_root *root,
+ struct ext4_free_data *entry1,
+ struct ext4_free_data *entry2)
+{
+ entry1->efd_count += entry2->efd_count;
spin_lock(&sbi->s_md_lock);
- list_del(&entry->efd_list);
+ list_del(&entry2->efd_list);
spin_unlock(&sbi->s_md_lock);
- rb_erase(&entry->efd_node, entry_rb_root);
- kmem_cache_free(ext4_free_data_cachep, entry);
+ rb_erase(&entry2->efd_node, root);
+ kmem_cache_free(ext4_free_data_cachep, entry2);
}
-static noinline_for_stack int
+static inline void
+ext4_try_merge_freed_extent_prev(struct ext4_sb_info *sbi, struct rb_root *root,
+ struct ext4_free_data *entry)
+{
+ struct ext4_free_data *prev;
+ struct rb_node *node;
+
+ node = rb_prev(&entry->efd_node);
+ if (!node)
+ return;
+
+ prev = rb_entry(node, struct ext4_free_data, efd_node);
+ if (ext4_freed_extents_can_be_merged(prev, entry))
+ ext4_merge_freed_extents(sbi, root, prev, entry);
+}
+
+static inline void
+ext4_try_merge_freed_extent_next(struct ext4_sb_info *sbi, struct rb_root *root,
+ struct ext4_free_data *entry)
+{
+ struct ext4_free_data *next;
+ struct rb_node *node;
+
+ node = rb_next(&entry->efd_node);
+ if (!node)
+ return;
+
+ next = rb_entry(node, struct ext4_free_data, efd_node);
+ if (ext4_freed_extents_can_be_merged(entry, next))
+ ext4_merge_freed_extents(sbi, root, entry, next);
+}
+
+static noinline_for_stack void
ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
struct ext4_free_data *new_entry)
{
ext4_group_t group = e4b->bd_group;
ext4_grpblk_t cluster;
ext4_grpblk_t clusters = new_entry->efd_count;
- struct ext4_free_data *entry;
+ struct ext4_free_data *entry = NULL;
struct ext4_group_info *db = e4b->bd_info;
struct super_block *sb = e4b->bd_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct rb_node **n = &db->bb_free_root.rb_node, *node;
+ struct rb_root *root = &db->bb_free_root;
+ struct rb_node **n = &root->rb_node;
struct rb_node *parent = NULL, *new_node;
BUG_ON(!ext4_handle_valid(handle));
- BUG_ON(e4b->bd_bitmap_page == NULL);
- BUG_ON(e4b->bd_buddy_page == NULL);
+ BUG_ON(e4b->bd_bitmap_folio == NULL);
+ BUG_ON(e4b->bd_buddy_folio == NULL);
new_node = &new_entry->efd_node;
cluster = new_entry->efd_start_cluster;
@@ -5700,8 +6467,8 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
* otherwise we'll refresh it from
* on-disk bitmap and lose not-yet-available
* blocks */
- get_page(e4b->bd_buddy_page);
- get_page(e4b->bd_bitmap_page);
+ folio_get(e4b->bd_buddy_folio);
+ folio_get(e4b->bd_bitmap_folio);
}
while (*n) {
parent = *n;
@@ -5716,239 +6483,93 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
EXT4_C2B(sbi, cluster),
"Block already on to-be-freed list");
kmem_cache_free(ext4_free_data_cachep, new_entry);
- return 0;
+ return;
}
}
- rb_link_node(new_node, parent, n);
- rb_insert_color(new_node, &db->bb_free_root);
+ atomic_add(clusters, &sbi->s_mb_free_pending);
+ if (!entry)
+ goto insert;
- /* Now try to see the extent can be merged to left and right */
- node = rb_prev(new_node);
- if (node) {
- entry = rb_entry(node, struct ext4_free_data, efd_node);
- ext4_try_merge_freed_extent(sbi, entry, new_entry,
- &(db->bb_free_root));
+ /* Now try to see the extent can be merged to prev and next */
+ if (ext4_freed_extents_can_be_merged(new_entry, entry)) {
+ entry->efd_start_cluster = cluster;
+ entry->efd_count += new_entry->efd_count;
+ kmem_cache_free(ext4_free_data_cachep, new_entry);
+ ext4_try_merge_freed_extent_prev(sbi, root, entry);
+ return;
}
-
- node = rb_next(new_node);
- if (node) {
- entry = rb_entry(node, struct ext4_free_data, efd_node);
- ext4_try_merge_freed_extent(sbi, entry, new_entry,
- &(db->bb_free_root));
+ if (ext4_freed_extents_can_be_merged(entry, new_entry)) {
+ entry->efd_count += new_entry->efd_count;
+ kmem_cache_free(ext4_free_data_cachep, new_entry);
+ ext4_try_merge_freed_extent_next(sbi, root, entry);
+ return;
}
+insert:
+ rb_link_node(new_node, parent, n);
+ rb_insert_color(new_node, root);
spin_lock(&sbi->s_md_lock);
- list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
- sbi->s_mb_free_pending += clusters;
+ list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]);
spin_unlock(&sbi->s_md_lock);
- return 0;
-}
-
-/*
- * Simple allocator for Ext4 fast commit replay path. It searches for blocks
- * linearly starting at the goal block and also excludes the blocks which
- * are going to be in use after fast commit replay.
- */
-static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
- struct ext4_allocation_request *ar, int *errp)
-{
- struct buffer_head *bitmap_bh;
- struct super_block *sb = ar->inode->i_sb;
- ext4_group_t group;
- ext4_grpblk_t blkoff;
- int i = sb->s_blocksize;
- ext4_fsblk_t goal, block;
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-
- goal = ar->goal;
- if (goal < le32_to_cpu(es->s_first_data_block) ||
- goal >= ext4_blocks_count(es))
- goal = le32_to_cpu(es->s_first_data_block);
-
- ar->len = 0;
- ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
- for (; group < ext4_get_groups_count(sb); group++) {
- bitmap_bh = ext4_read_block_bitmap(sb, group);
- if (IS_ERR(bitmap_bh)) {
- *errp = PTR_ERR(bitmap_bh);
- pr_warn("Failed to read block bitmap\n");
- return 0;
- }
-
- ext4_get_group_no_and_offset(sb,
- max(ext4_group_first_block_no(sb, group), goal),
- NULL, &blkoff);
- i = mb_find_next_zero_bit(bitmap_bh->b_data, sb->s_blocksize,
- blkoff);
- brelse(bitmap_bh);
- if (i >= sb->s_blocksize)
- continue;
- if (ext4_fc_replay_check_excluded(sb,
- ext4_group_first_block_no(sb, group) + i))
- continue;
- break;
- }
-
- if (group >= ext4_get_groups_count(sb) && i >= sb->s_blocksize)
- return 0;
-
- block = ext4_group_first_block_no(sb, group) + i;
- ext4_mb_mark_bb(sb, block, 1, 1);
- ar->len = 1;
-
- return block;
}
static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
unsigned long count)
{
- struct buffer_head *bitmap_bh;
struct super_block *sb = inode->i_sb;
- struct ext4_group_desc *gdp;
- struct buffer_head *gdp_bh;
ext4_group_t group;
ext4_grpblk_t blkoff;
- int already_freed = 0, err, i;
ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
- bitmap_bh = ext4_read_block_bitmap(sb, group);
- if (IS_ERR(bitmap_bh)) {
- err = PTR_ERR(bitmap_bh);
- pr_warn("Failed to read block bitmap\n");
- return;
- }
- gdp = ext4_get_group_desc(sb, group, &gdp_bh);
- if (!gdp)
- return;
-
- for (i = 0; i < count; i++) {
- if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
- already_freed++;
- }
- mb_clear_bits(bitmap_bh->b_data, blkoff, count);
- err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
- if (err)
- return;
- ext4_free_group_clusters_set(
- sb, gdp, ext4_free_group_clusters(sb, gdp) +
- count - already_freed);
- ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
- ext4_group_desc_csum_set(sb, group, gdp);
- ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
- sync_dirty_buffer(bitmap_bh);
- sync_dirty_buffer(gdp_bh);
- brelse(bitmap_bh);
+ ext4_mb_mark_context(NULL, sb, false, group, blkoff, count,
+ EXT4_MB_BITMAP_MARKED_CHECK |
+ EXT4_MB_SYNC_UPDATE,
+ NULL);
}
/**
- * ext4_free_blocks() -- Free given blocks and update quota
+ * ext4_mb_clear_bb() -- helper function for freeing blocks.
+ * Used by ext4_free_blocks()
* @handle: handle for this transaction
* @inode: inode
- * @bh: optional buffer of the block to be freed
* @block: starting physical block to be freed
* @count: number of blocks to be freed
* @flags: flags used by ext4_free_blocks
*/
-void ext4_free_blocks(handle_t *handle, struct inode *inode,
- struct buffer_head *bh, ext4_fsblk_t block,
- unsigned long count, int flags)
+static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
+ ext4_fsblk_t block, unsigned long count,
+ int flags)
{
- struct buffer_head *bitmap_bh = NULL;
struct super_block *sb = inode->i_sb;
- struct ext4_group_desc *gdp;
+ struct ext4_group_info *grp;
unsigned int overflow;
ext4_grpblk_t bit;
- struct buffer_head *gd_bh;
ext4_group_t block_group;
struct ext4_sb_info *sbi;
struct ext4_buddy e4b;
unsigned int count_clusters;
int err = 0;
- int ret;
+ int mark_flags = 0;
+ ext4_grpblk_t changed;
sbi = EXT4_SB(sb);
- if (sbi->s_mount_state & EXT4_FC_REPLAY) {
- ext4_free_blocks_simple(inode, block, count);
- return;
- }
-
- might_sleep();
- if (bh) {
- if (block)
- BUG_ON(block != bh->b_blocknr);
- else
- block = bh->b_blocknr;
- }
-
if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
!ext4_inode_block_valid(inode, block, count)) {
- ext4_error(sb, "Freeing blocks not in datazone - "
- "block = %llu, count = %lu", block, count);
- goto error_return;
- }
-
- ext4_debug("freeing block %llu\n", block);
- trace_ext4_free_blocks(inode, block, count, flags);
-
- if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
- BUG_ON(count > 1);
-
- ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
- inode, bh, block);
- }
-
- /*
- * If the extent to be freed does not begin on a cluster
- * boundary, we need to deal with partial clusters at the
- * beginning and end of the extent. Normally we will free
- * blocks at the beginning or the end unless we are explicitly
- * requested to avoid doing so.
- */
- overflow = EXT4_PBLK_COFF(sbi, block);
- if (overflow) {
- if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
- overflow = sbi->s_cluster_ratio - overflow;
- block += overflow;
- if (count > overflow)
- count -= overflow;
- else
- return;
- } else {
- block -= overflow;
- count += overflow;
- }
- }
- overflow = EXT4_LBLK_COFF(sbi, count);
- if (overflow) {
- if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
- if (count > overflow)
- count -= overflow;
- else
- return;
- } else
- count += sbi->s_cluster_ratio - overflow;
- }
-
- if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
- int i;
- int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
-
- for (i = 0; i < count; i++) {
- cond_resched();
- if (is_metadata)
- bh = sb_find_get_block(inode->i_sb, block + i);
- ext4_forget(handle, is_metadata, inode, bh, block + i);
- }
+ ext4_error(sb, "Freeing blocks in system zone - "
+ "Block = %llu, count = %lu", block, count);
+ /* err = 0. ext4_std_error should be a no op */
+ goto error_out;
}
+ flags |= EXT4_FREE_BLOCKS_VALIDATED;
do_more:
overflow = 0;
ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
- ext4_get_group_info(sb, block_group))))
+ grp = ext4_get_group_info(sb, block_group);
+ if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
return;
/*
@@ -5959,62 +6580,39 @@ do_more:
overflow = EXT4_C2B(sbi, bit) + count -
EXT4_BLOCKS_PER_GROUP(sb);
count -= overflow;
+ /* The range changed so it's no longer validated */
+ flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
}
count_clusters = EXT4_NUM_B2C(sbi, count);
- bitmap_bh = ext4_read_block_bitmap(sb, block_group);
- if (IS_ERR(bitmap_bh)) {
- err = PTR_ERR(bitmap_bh);
- bitmap_bh = NULL;
- goto error_return;
- }
- gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
- if (!gdp) {
- err = -EIO;
- goto error_return;
- }
+ trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
- if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
- in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
- in_range(block, ext4_inode_table(sb, gdp),
- sbi->s_itb_per_group) ||
- in_range(block + count - 1, ext4_inode_table(sb, gdp),
- sbi->s_itb_per_group)) {
+ /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
+ err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
+ GFP_NOFS|__GFP_NOFAIL);
+ if (err)
+ goto error_out;
+ if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
+ !ext4_inode_block_valid(inode, block, count)) {
ext4_error(sb, "Freeing blocks in system zone - "
"Block = %llu, count = %lu", block, count);
/* err = 0. ext4_std_error should be a no op */
- goto error_return;
+ goto error_clean;
}
- BUFFER_TRACE(bitmap_bh, "getting write access");
- err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
- EXT4_JTR_NONE);
- if (err)
- goto error_return;
-
- /*
- * We are about to modify some metadata. Call the journal APIs
- * to unshare ->b_data if a currently-committing transaction is
- * using it
- */
- BUFFER_TRACE(gd_bh, "get_write_access");
- err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
- if (err)
- goto error_return;
#ifdef AGGRESSIVE_CHECK
- {
- int i;
- for (i = 0; i < count_clusters; i++)
- BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
- }
+ mark_flags |= EXT4_MB_BITMAP_MARKED_CHECK;
#endif
- trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
+ err = ext4_mb_mark_context(handle, sb, false, block_group, bit,
+ count_clusters, mark_flags, &changed);
- /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
- err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
- GFP_NOFS|__GFP_NOFAIL);
- if (err)
- goto error_return;
+
+ if (err && changed == 0)
+ goto error_clean;
+
+#ifdef AGGRESSIVE_CHECK
+ BUG_ON(changed != count_clusters);
+#endif
/*
* We need to make sure we don't reuse the freed block until after the
@@ -6038,42 +6636,32 @@ do_more:
new_entry->efd_tid = handle->h_transaction->t_tid;
ext4_lock_group(sb, block_group);
- mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
ext4_mb_free_metadata(handle, &e4b, new_entry);
} else {
- /* need to update group_info->bb_free and bitmap
- * with group lock held. generate_buddy look at
- * them with group lock_held
- */
if (test_opt(sb, DISCARD)) {
- err = ext4_issue_discard(sb, block_group, bit, count,
- NULL);
- if (err && err != -EOPNOTSUPP)
+ err = ext4_issue_discard(sb, block_group, bit,
+ count_clusters);
+ /*
+ * Ignore EOPNOTSUPP error. This is consistent with
+ * what happens when using journal.
+ */
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ if (err)
ext4_msg(sb, KERN_WARNING, "discard request in"
- " group:%d block:%d count:%lu failed"
+ " group:%u block:%d count:%lu failed"
" with %d", block_group, bit, count,
err);
- } else
- EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
+ }
+
+ EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
ext4_lock_group(sb, block_group);
- mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
mb_free_blocks(inode, &e4b, bit, count_clusters);
}
- ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
- ext4_free_group_clusters_set(sb, gdp, ret);
- ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
- ext4_group_desc_csum_set(sb, block_group, gdp);
ext4_unlock_group(sb, block_group);
- if (sbi->s_log_groups_per_flex) {
- ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
- atomic64_add(count_clusters,
- &sbi_array_rcu_deref(sbi, s_flex_groups,
- flex_group)->free_clusters);
- }
-
/*
* on a bigalloc file system, defer the s_freeclusters_counter
* update to the caller (ext4_remove_space and friends) so they
@@ -6086,28 +6674,122 @@ do_more:
count_clusters);
}
- ext4_mb_unload_buddy(&e4b);
-
- /* We dirtied the bitmap block */
- BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
- err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
-
- /* And the group descriptor block */
- BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
- ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
- if (!err)
- err = ret;
-
if (overflow && !err) {
block += count;
count = overflow;
- put_bh(bitmap_bh);
+ ext4_mb_unload_buddy(&e4b);
+ /* The range changed so it's no longer validated */
+ flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
goto do_more;
}
-error_return:
- brelse(bitmap_bh);
+
+error_clean:
+ ext4_mb_unload_buddy(&e4b);
+error_out:
ext4_std_error(sb, err);
- return;
+}
+
+/**
+ * ext4_free_blocks() -- Free given blocks and update quota
+ * @handle: handle for this transaction
+ * @inode: inode
+ * @bh: optional buffer of the block to be freed
+ * @block: starting physical block to be freed
+ * @count: number of blocks to be freed
+ * @flags: flags used by ext4_free_blocks
+ */
+void ext4_free_blocks(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh, ext4_fsblk_t block,
+ unsigned long count, int flags)
+{
+ struct super_block *sb = inode->i_sb;
+ unsigned int overflow;
+ struct ext4_sb_info *sbi;
+
+ sbi = EXT4_SB(sb);
+
+ if (bh) {
+ if (block)
+ BUG_ON(block != bh->b_blocknr);
+ else
+ block = bh->b_blocknr;
+ }
+
+ if (sbi->s_mount_state & EXT4_FC_REPLAY) {
+ ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count));
+ return;
+ }
+
+ might_sleep();
+
+ if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
+ !ext4_inode_block_valid(inode, block, count)) {
+ ext4_error(sb, "Freeing blocks not in datazone - "
+ "block = %llu, count = %lu", block, count);
+ return;
+ }
+ flags |= EXT4_FREE_BLOCKS_VALIDATED;
+
+ ext4_debug("freeing block %llu\n", block);
+ trace_ext4_free_blocks(inode, block, count, flags);
+
+ if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
+ BUG_ON(count > 1);
+
+ ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
+ inode, bh, block);
+ }
+
+ /*
+ * If the extent to be freed does not begin on a cluster
+ * boundary, we need to deal with partial clusters at the
+ * beginning and end of the extent. Normally we will free
+ * blocks at the beginning or the end unless we are explicitly
+ * requested to avoid doing so.
+ */
+ overflow = EXT4_PBLK_COFF(sbi, block);
+ if (overflow) {
+ if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
+ overflow = sbi->s_cluster_ratio - overflow;
+ block += overflow;
+ if (count > overflow)
+ count -= overflow;
+ else
+ return;
+ } else {
+ block -= overflow;
+ count += overflow;
+ }
+ /* The range changed so it's no longer validated */
+ flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
+ }
+ overflow = EXT4_LBLK_COFF(sbi, count);
+ if (overflow) {
+ if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
+ if (count > overflow)
+ count -= overflow;
+ else
+ return;
+ } else
+ count += sbi->s_cluster_ratio - overflow;
+ /* The range changed so it's no longer validated */
+ flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
+ }
+
+ if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
+ int i;
+ int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
+
+ for (i = 0; i < count; i++) {
+ cond_resched();
+ if (is_metadata)
+ bh = sb_find_get_block_nonatomic(inode->i_sb,
+ block + i);
+ ext4_forget(handle, is_metadata, inode, bh, block + i);
+ }
+ }
+
+ ext4_mb_clear_bb(handle, inode, block, count, flags);
}
/**
@@ -6122,23 +6804,19 @@ error_return:
int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
ext4_fsblk_t block, unsigned long count)
{
- struct buffer_head *bitmap_bh = NULL;
- struct buffer_head *gd_bh;
ext4_group_t block_group;
ext4_grpblk_t bit;
- unsigned int i;
- struct ext4_group_desc *desc;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_buddy e4b;
- int err = 0, ret, free_clusters_count;
- ext4_grpblk_t clusters_freed;
+ int err = 0;
ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
unsigned long cluster_count = last_cluster - first_cluster + 1;
+ ext4_grpblk_t changed;
ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
- if (count == 0)
+ if (cluster_count == 0)
return 0;
ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
@@ -6150,103 +6828,39 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
ext4_warning(sb, "too many blocks added to group %u",
block_group);
err = -EINVAL;
- goto error_return;
+ goto error_out;
}
- bitmap_bh = ext4_read_block_bitmap(sb, block_group);
- if (IS_ERR(bitmap_bh)) {
- err = PTR_ERR(bitmap_bh);
- bitmap_bh = NULL;
- goto error_return;
- }
-
- desc = ext4_get_group_desc(sb, block_group, &gd_bh);
- if (!desc) {
- err = -EIO;
- goto error_return;
- }
+ err = ext4_mb_load_buddy(sb, block_group, &e4b);
+ if (err)
+ goto error_out;
- if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
- in_range(ext4_inode_bitmap(sb, desc), block, count) ||
- in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
- in_range(block + count - 1, ext4_inode_table(sb, desc),
- sbi->s_itb_per_group)) {
+ if (!ext4_sb_block_valid(sb, NULL, block, count)) {
ext4_error(sb, "Adding blocks in system zones - "
"Block = %llu, count = %lu",
block, count);
err = -EINVAL;
- goto error_return;
+ goto error_clean;
}
- BUFFER_TRACE(bitmap_bh, "getting write access");
- err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
- EXT4_JTR_NONE);
- if (err)
- goto error_return;
+ err = ext4_mb_mark_context(handle, sb, false, block_group, bit,
+ cluster_count, EXT4_MB_BITMAP_MARKED_CHECK,
+ &changed);
+ if (err && changed == 0)
+ goto error_clean;
- /*
- * We are about to modify some metadata. Call the journal APIs
- * to unshare ->b_data if a currently-committing transaction is
- * using it
- */
- BUFFER_TRACE(gd_bh, "get_write_access");
- err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
- if (err)
- goto error_return;
-
- for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
- BUFFER_TRACE(bitmap_bh, "clear bit");
- if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
- ext4_error(sb, "bit already cleared for block %llu",
- (ext4_fsblk_t)(block + i));
- BUFFER_TRACE(bitmap_bh, "bit already cleared");
- } else {
- clusters_freed++;
- }
- }
+ if (changed != cluster_count)
+ ext4_error(sb, "bit already cleared in group %u", block_group);
- err = ext4_mb_load_buddy(sb, block_group, &e4b);
- if (err)
- goto error_return;
-
- /*
- * need to update group_info->bb_free and bitmap
- * with group lock held. generate_buddy look at
- * them with group lock_held
- */
ext4_lock_group(sb, block_group);
- mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
mb_free_blocks(NULL, &e4b, bit, cluster_count);
- free_clusters_count = clusters_freed +
- ext4_free_group_clusters(sb, desc);
- ext4_free_group_clusters_set(sb, desc, free_clusters_count);
- ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
- ext4_group_desc_csum_set(sb, block_group, desc);
ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeclusters_counter,
- clusters_freed);
-
- if (sbi->s_log_groups_per_flex) {
- ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
- atomic64_add(clusters_freed,
- &sbi_array_rcu_deref(sbi, s_flex_groups,
- flex_group)->free_clusters);
- }
+ changed);
+error_clean:
ext4_mb_unload_buddy(&e4b);
-
- /* We dirtied the bitmap block */
- BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
- err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
-
- /* And the group descriptor block */
- BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
- ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
- if (!err)
- err = ret;
-
-error_return:
- brelse(bitmap_bh);
+error_out:
ext4_std_error(sb, err);
return err;
}
@@ -6285,24 +6899,51 @@ __acquires(bitlock)
*/
mb_mark_used(e4b, &ex);
ext4_unlock_group(sb, group);
- ret = ext4_issue_discard(sb, group, start, count, NULL);
+ ret = ext4_issue_discard(sb, group, start, count);
ext4_lock_group(sb, group);
mb_free_blocks(NULL, e4b, start, ex.fe_len);
return ret;
}
+static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
+ ext4_group_t grp)
+{
+ unsigned long nr_clusters_in_group;
+
+ if (grp < (ext4_get_groups_count(sb) - 1))
+ nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
+ else
+ nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
+ ext4_group_first_block_no(sb, grp))
+ >> EXT4_CLUSTER_BITS(sb);
+
+ return nr_clusters_in_group - 1;
+}
+
+static bool ext4_trim_interrupted(void)
+{
+ return fatal_signal_pending(current) || freezing(current);
+}
+
static int ext4_try_to_trim_range(struct super_block *sb,
struct ext4_buddy *e4b, ext4_grpblk_t start,
ext4_grpblk_t max, ext4_grpblk_t minblocks)
__acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
__releases(ext4_group_lock_ptr(sb, e4b->bd_group))
{
- ext4_grpblk_t next, count, free_count;
+ ext4_grpblk_t next, count, free_count, last, origin_start;
+ bool set_trimmed = false;
void *bitmap;
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+ return 0;
+
+ last = ext4_last_grp_cluster(sb, e4b->bd_group);
bitmap = e4b->bd_bitmap;
- start = (e4b->bd_info->bb_first_free > start) ?
- e4b->bd_info->bb_first_free : start;
+ if (start == 0 && max >= last)
+ set_trimmed = true;
+ origin_start = start;
+ start = max(e4b->bd_info->bb_first_free, start);
count = 0;
free_count = 0;
@@ -6310,22 +6951,23 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
start = mb_find_next_zero_bit(bitmap, max + 1, start);
if (start > max)
break;
- next = mb_find_next_bit(bitmap, max + 1, start);
+
+ next = mb_find_next_bit(bitmap, last + 1, start);
+ if (origin_start == 0 && next >= last)
+ set_trimmed = true;
if ((next - start) >= minblocks) {
int ret = ext4_trim_extent(sb, start, next - start, e4b);
if (ret && ret != -EOPNOTSUPP)
- break;
+ return count;
count += next - start;
}
free_count += next - start;
start = next + 1;
- if (fatal_signal_pending(current)) {
- count = -ERESTARTSYS;
- break;
- }
+ if (ext4_trim_interrupted())
+ return count;
if (need_resched()) {
ext4_unlock_group(sb, e4b->bd_group);
@@ -6337,6 +6979,9 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
break;
}
+ if (set_trimmed)
+ EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
+
return count;
}
@@ -6373,13 +7018,10 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_lock_group(sb, group);
if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
- minblocks < atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) {
+ minblocks < EXT4_SB(sb)->s_last_trim_minblks)
ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
- if (ret >= 0)
- EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
- } else {
+ else
ret = 0;
- }
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
@@ -6404,6 +7046,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
*/
int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
{
+ unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
struct ext4_group_info *grp;
ext4_group_t group, first_group, last_group;
ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
@@ -6422,7 +7065,14 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
start >= max_blks ||
range->len < sb->s_blocksize)
return -EINVAL;
- if (end >= max_blks)
+ /* No point to try to trim less than discard granularity */
+ if (range->minlen < discard_granularity) {
+ minlen = EXT4_NUM_B2C(EXT4_SB(sb),
+ discard_granularity >> sb->s_blocksize_bits);
+ if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
+ goto out;
+ }
+ if (end >= max_blks - 1)
end = max_blks - 1;
if (end <= first_data_blk)
goto out;
@@ -6439,7 +7089,11 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
for (group = first_group; group <= last_group; group++) {
+ if (ext4_trim_interrupted())
+ break;
grp = ext4_get_group_info(sb, group);
+ if (!grp)
+ continue;
/* We only do this if the grp has never been initialized */
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
ret = ext4_mb_init_group(sb, group, GFP_NOFS);
@@ -6455,10 +7109,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
*/
if (group == last_group)
end = last_cluster;
-
if (grp->bb_free >= minlen) {
cnt = ext4_trim_all_free(sb, group, first_cluster,
- end, minlen);
+ end, minlen);
if (cnt < 0) {
ret = cnt;
break;
@@ -6474,7 +7127,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
}
if (!ret)
- atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
+ EXT4_SB(sb)->s_last_trim_minblks = minlen;
out:
range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
@@ -6486,13 +7139,14 @@ int
ext4_mballoc_query_range(
struct super_block *sb,
ext4_group_t group,
- ext4_grpblk_t start,
+ ext4_grpblk_t first,
ext4_grpblk_t end,
+ ext4_mballoc_query_range_fn meta_formatter,
ext4_mballoc_query_range_fn formatter,
void *priv)
{
void *bitmap;
- ext4_grpblk_t next;
+ ext4_grpblk_t start, next;
struct ext4_buddy e4b;
int error;
@@ -6503,11 +7157,19 @@ ext4_mballoc_query_range(
ext4_lock_group(sb, group);
- start = (e4b.bd_info->bb_first_free > start) ?
- e4b.bd_info->bb_first_free : start;
+ start = max(e4b.bd_info->bb_first_free, first);
if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
-
+ if (meta_formatter && start != first) {
+ if (start > end)
+ start = end;
+ ext4_unlock_group(sb, group);
+ error = meta_formatter(sb, group, first, start - first,
+ priv);
+ if (error)
+ goto out_unload;
+ ext4_lock_group(sb, group);
+ }
while (start <= end) {
start = mb_find_next_zero_bit(bitmap, end + 1, start);
if (start > end)
@@ -6529,3 +7191,7 @@ out_unload:
return error;
}
+
+#ifdef CONFIG_EXT4_KUNIT_TESTS
+#include "mballoc-test.c"
+#endif
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 39da92ceabf8..15a049f05d04 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -49,7 +49,7 @@
#define MB_DEFAULT_MIN_TO_SCAN 10
/*
- * with 'ext4_mb_stats' allocator will collect stats that will be
+ * with 's_mb_stats' allocator will collect stats that will be
* shown at umount. The collecting costs though!
*/
#define MB_DEFAULT_STATS 0
@@ -74,11 +74,6 @@
#define MB_DEFAULT_GROUP_PREALLOC 512
/*
- * maximum length of inode prealloc list
- */
-#define MB_DEFAULT_MAX_INODE_PREALLOC 512
-
-/*
* Number of groups to search linearly before performing group scanning
* optimization.
*/
@@ -91,6 +86,13 @@
#define MB_DEFAULT_LINEAR_SCAN_THRESHOLD 16
/*
+ * The maximum order upto which CR_BEST_AVAIL_LEN can trim a particular
+ * allocation request. Example, if we have an order 7 request and max trim order
+ * of 3, we can trim this request upto order 4.
+ */
+#define MB_DEFAULT_BEST_AVAIL_TRIM_ORDER 3
+
+/*
* Number of valid buddy orders
*/
#define MB_NUM_ORDERS(sb) ((sb)->s_blocksize_bits + 2)
@@ -114,7 +116,10 @@ struct ext4_free_data {
};
struct ext4_prealloc_space {
- struct list_head pa_inode_list;
+ union {
+ struct rb_node inode_node; /* for inode PA rbtree */
+ struct list_head lg_list; /* for lg PAs */
+ } pa_node;
struct list_head pa_group_list;
union {
struct list_head pa_tmp_list;
@@ -128,8 +133,11 @@ struct ext4_prealloc_space {
ext4_grpblk_t pa_len; /* len of preallocated chunk */
ext4_grpblk_t pa_free; /* how many blocks are free */
unsigned short pa_type; /* pa type. inode or group */
- spinlock_t *pa_obj_lock;
- struct inode *pa_inode; /* hack, for history only */
+ union {
+ rwlock_t *inode_lock; /* locks the rbtree holding this PA */
+ spinlock_t *lg_lock; /* locks the lg list holding this PA */
+ } pa_node_lock;
+ struct inode *pa_inode; /* used to get the inode during group discard */
};
enum {
@@ -178,12 +186,22 @@ struct ext4_allocation_context {
/* copy of the best found extent taken before preallocation efforts */
struct ext4_free_extent ac_f_ex;
- ext4_group_t ac_last_optimal_group;
- __u32 ac_groups_considered;
+ /*
+ * goal len can change in CR_BEST_AVAIL_LEN, so save the original len.
+ * This is used while adjusting the PA window and for accounting.
+ */
+ ext4_grpblk_t ac_orig_goal_len;
+
+ ext4_group_t ac_prefetch_grp;
+ unsigned int ac_prefetch_ios;
+ unsigned int ac_prefetch_nr;
+
+ int ac_first_err;
+
__u32 ac_flags; /* allocation hints */
__u16 ac_groups_scanned;
- __u16 ac_groups_linear_remaining;
__u16 ac_found;
+ __u16 ac_cX_found[EXT4_MB_NUM_CRS];
__u16 ac_tail;
__u16 ac_buddy;
__u8 ac_status;
@@ -191,8 +209,10 @@ struct ext4_allocation_context {
__u8 ac_2order; /* if request is to allocate 2^N blocks and
* N > 0, the field stores N, otherwise 0 */
__u8 ac_op; /* operation, for history only */
- struct page *ac_bitmap_page;
- struct page *ac_buddy_page;
+
+ struct ext4_buddy *ac_e4b;
+ struct folio *ac_bitmap_folio;
+ struct folio *ac_buddy_folio;
struct ext4_prealloc_space *ac_pa;
struct ext4_locality_group *ac_lg;
};
@@ -202,9 +222,9 @@ struct ext4_allocation_context {
#define AC_STATUS_BREAK 3
struct ext4_buddy {
- struct page *bd_buddy_page;
+ struct folio *bd_buddy_folio;
void *bd_buddy;
- struct page *bd_bitmap_page;
+ struct folio *bd_bitmap_folio;
void *bd_bitmap;
struct ext4_group_info *bd_info;
struct super_block *bd_sb;
@@ -219,6 +239,20 @@ static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
(fex->fe_start << EXT4_SB(sb)->s_cluster_bits);
}
+static inline loff_t extent_logical_end(struct ext4_sb_info *sbi,
+ struct ext4_free_extent *fex)
+{
+ /* Use loff_t to avoid end exceeding ext4_lblk_t max. */
+ return (loff_t)fex->fe_logical + EXT4_C2B(sbi, fex->fe_len);
+}
+
+static inline loff_t pa_logical_end(struct ext4_sb_info *sbi,
+ struct ext4_prealloc_space *pa)
+{
+ /* Use loff_t to avoid end exceeding ext4_lblk_t max. */
+ return (loff_t)pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len);
+}
+
typedef int (*ext4_mballoc_query_range_fn)(
struct super_block *sb,
ext4_group_t agno,
@@ -232,6 +266,7 @@ ext4_mballoc_query_range(
ext4_group_t agno,
ext4_grpblk_t start,
ext4_grpblk_t end,
+ ext4_mballoc_query_range_fn meta_formatter,
ext4_mballoc_query_range_fn formatter,
void *priv);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 7e0b4f81c6c0..1b0dfd963d3f 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -37,7 +37,6 @@ static int finish_range(handle_t *handle, struct inode *inode,
path = ext4_find_extent(inode, lb->first_block, NULL, 0);
if (IS_ERR(path)) {
retval = PTR_ERR(path);
- path = NULL;
goto err_out;
}
@@ -53,11 +52,12 @@ static int finish_range(handle_t *handle, struct inode *inode,
retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
if (retval < 0)
goto err_out;
- retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
+ path = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
+ if (IS_ERR(path))
+ retval = PTR_ERR(path);
err_out:
up_write((&EXT4_I(inode)->i_data_sem));
- ext4_ext_drop_refs(path);
- kfree(path);
+ ext4_free_ext_path(path);
lb->first_pblock = 0;
return retval;
}
@@ -409,7 +409,6 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
int ext4_ext_migrate(struct inode *inode)
{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
handle_t *handle;
int retval = 0, i;
__le32 *i_data;
@@ -417,15 +416,17 @@ int ext4_ext_migrate(struct inode *inode)
struct inode *tmp_inode = NULL;
struct migrate_struct lb;
unsigned long max_entries;
- __u32 goal;
+ __u32 goal, tmp_csum_seed;
uid_t owner[2];
+ int alloc_ctx;
/*
* If the filesystem does not support extents, or the inode
* already is extent-based, error out.
*/
if (!ext4_has_feature_extents(inode->i_sb) ||
- (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+ ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
+ ext4_has_inline_data(inode))
return -EINVAL;
if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
@@ -434,15 +435,15 @@ int ext4_ext_migrate(struct inode *inode)
*/
return retval;
- percpu_down_write(&sbi->s_writepages_rwsem);
+ alloc_ctx = ext4_writepages_down_write(inode->i_sb);
/*
- * Worst case we can touch the allocation bitmaps, a bgd
- * block, and a block to link in the orphan list. We do need
- * need to worry about credits for modifying the quota inode.
+ * Worst case we can touch the allocation bitmaps and a block
+ * group descriptor block. We do need to worry about
+ * credits for modifying the quota inode.
*/
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
- 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
+ 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
@@ -459,6 +460,14 @@ int ext4_ext_migrate(struct inode *inode)
ext4_journal_stop(handle);
goto out_unlock;
}
+ /*
+ * Use the correct seed for checksum (i.e. the seed from 'inode'). This
+ * is so that the metadata blocks will have the correct checksum after
+ * the migration.
+ */
+ ei = EXT4_I(inode);
+ tmp_csum_seed = EXT4_I(tmp_inode)->i_csum_seed;
+ EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed;
i_size_write(tmp_inode, i_size_read(inode));
/*
* Set the i_nlink to zero so it will be deleted later
@@ -467,7 +476,6 @@ int ext4_ext_migrate(struct inode *inode)
clear_nlink(tmp_inode);
ext4_ext_tree_init(handle, tmp_inode);
- ext4_orphan_add(handle, tmp_inode);
ext4_journal_stop(handle);
/*
@@ -479,7 +487,7 @@ int ext4_ext_migrate(struct inode *inode)
* when we add extents we extent the journal
*/
/*
- * Even though we take i_mutex we can still cause block
+ * Even though we take i_rwsem we can still cause block
* allocation via mmap write to holes. If we have allocated
* new blocks we fail migrate. New block allocation will
* clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
@@ -492,17 +500,10 @@ int ext4_ext_migrate(struct inode *inode)
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
if (IS_ERR(handle)) {
- /*
- * It is impossible to update on-disk structures without
- * a handle, so just rollback in-core changes and live other
- * work to orphan_list_cleanup()
- */
- ext4_orphan_del(NULL, tmp_inode);
retval = PTR_ERR(handle);
goto out_tmp_inode;
}
- ei = EXT4_I(inode);
i_data = ei->i_data;
memset(&lb, 0, sizeof(lb));
@@ -576,6 +577,7 @@ err_out:
* the inode is not visible to user space.
*/
tmp_inode->i_blocks = 0;
+ EXT4_I(tmp_inode)->i_csum_seed = tmp_csum_seed;
/* Reset the extent details */
ext4_ext_tree_init(handle, tmp_inode);
@@ -585,7 +587,7 @@ out_tmp_inode:
unlock_new_inode(tmp_inode);
iput(tmp_inode);
out_unlock:
- percpu_up_write(&sbi->s_writepages_rwsem);
+ ext4_writepages_up_write(inode->i_sb, alloc_ctx);
return retval;
}
@@ -604,6 +606,7 @@ int ext4_ind_migrate(struct inode *inode)
ext4_fsblk_t blk;
handle_t *handle;
int ret, ret2 = 0;
+ int alloc_ctx;
if (!ext4_has_feature_extents(inode->i_sb) ||
(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
@@ -620,7 +623,7 @@ int ext4_ind_migrate(struct inode *inode)
if (test_opt(inode->i_sb, DELALLOC))
ext4_alloc_da_blocks(inode);
- percpu_down_write(&sbi->s_writepages_rwsem);
+ alloc_ctx = ext4_writepages_down_write(inode->i_sb);
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
if (IS_ERR(handle)) {
@@ -661,9 +664,9 @@ int ext4_ind_migrate(struct inode *inode)
if (unlikely(ret2 && !ret))
ret = ret2;
errout:
- ext4_journal_stop(handle);
up_write(&EXT4_I(inode)->i_data_sem);
+ ext4_journal_stop(handle);
out_unlock:
- percpu_up_write(&sbi->s_writepages_rwsem);
+ ext4_writepages_up_write(inode->i_sb, alloc_ctx);
return ret;
}
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index cebea4270817..6f57c181ff77 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -14,14 +14,14 @@ static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
int offset = offsetof(struct mmp_struct, mmp_checksum);
__u32 csum;
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset);
+ csum = ext4_chksum(sbi->s_csum_seed, (char *)mmp, offset);
return cpu_to_le32(csum);
}
static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
{
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return 1;
return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
@@ -29,7 +29,7 @@ static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
{
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return;
mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
@@ -39,28 +39,32 @@ static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
* Write the MMP block using REQ_SYNC to try to get the block on-disk
* faster.
*/
-static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
+static int write_mmp_block_thawed(struct super_block *sb,
+ struct buffer_head *bh)
{
struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
- /*
- * We protect against freezing so that we don't create dirty buffers
- * on frozen filesystem.
- */
- sb_start_write(sb);
ext4_mmp_csum_set(sb, mmp);
lock_buffer(bh);
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
- submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh);
+ submit_bh(REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
- sb_end_write(sb);
if (unlikely(!buffer_uptodate(bh)))
return -EIO;
-
return 0;
}
+static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
+{
+ /*
+ * We protect against freezing so that we don't create dirty buffers
+ * on frozen filesystem.
+ */
+ scoped_guard(super_write, sb)
+ return write_mmp_block_thawed(sb, bh);
+}
+
/*
* Read the MMP block. It _must_ be read from disk and hence we clear the
* uptodate flag on the buffer.
@@ -86,7 +90,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
}
lock_buffer(*bh);
- ret = ext4_read_bh(*bh, REQ_META | REQ_PRIO, NULL);
+ ret = ext4_read_bh(*bh, REQ_META | REQ_PRIO, NULL, false);
if (ret)
goto warn_exit;
@@ -127,7 +131,7 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
*/
static int kmmpd(void *data)
{
- struct super_block *sb = (struct super_block *) data;
+ struct super_block *sb = data;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
struct buffer_head *bh = EXT4_SB(sb)->s_mmp_bh;
struct mmp_struct *mmp;
@@ -150,13 +154,11 @@ static int kmmpd(void *data)
mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
EXT4_MMP_MIN_CHECK_INTERVAL);
mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
- BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
- bdevname(bh->b_bdev, mmp->mmp_bdevname);
memcpy(mmp->mmp_nodename, init_utsname()->nodename,
sizeof(mmp->mmp_nodename));
- while (!kthread_should_stop() && !sb_rdonly(sb)) {
+ while (!kthread_should_stop() && !ext4_emergency_state(sb)) {
if (!ext4_has_feature_mmp(sb)) {
ext4_warning(sb, "kmmpd being stopped since MMP feature"
" has been disabled.");
@@ -225,9 +227,9 @@ static int kmmpd(void *data)
* Adjust the mmp_check_interval depending on how much time
* it took for the MMP block to be written.
*/
- mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ,
- EXT4_MMP_MAX_CHECK_INTERVAL),
- EXT4_MMP_MIN_CHECK_INTERVAL);
+ mmp_check_interval = clamp(EXT4_MMP_CHECK_MULT * diff / HZ,
+ EXT4_MMP_MIN_CHECK_INTERVAL,
+ EXT4_MMP_MAX_CHECK_INTERVAL);
mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
}
@@ -264,13 +266,7 @@ void ext4_stop_mmpd(struct ext4_sb_info *sbi)
*/
static unsigned int mmp_new_seq(void)
{
- u32 new_seq;
-
- do {
- new_seq = prandom_u32();
- } while (new_seq > EXT4_MMP_SEQ_MAX);
-
- return new_seq;
+ return get_random_u32_below(EXT4_MMP_SEQ_MAX + 1);
}
/*
@@ -290,6 +286,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
mmp_block >= ext4_blocks_count(es)) {
ext4_warning(sb, "Invalid MMP block in superblock");
+ retval = -EINVAL;
goto failed;
}
@@ -315,6 +312,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
if (seq == EXT4_MMP_SEQ_FSCK) {
dump_mmp_msg(sb, mmp, "fsck is running on the filesystem");
+ retval = -EBUSY;
goto failed;
}
@@ -328,6 +326,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
ext4_warning(sb, "MMP startup interrupted, failing mount\n");
+ retval = -ETIMEDOUT;
goto failed;
}
@@ -338,6 +337,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
if (seq != le32_to_cpu(mmp->mmp_seq)) {
dump_mmp_msg(sb, mmp,
"Device is already active on another node.");
+ retval = -EBUSY;
goto failed;
}
@@ -348,7 +348,11 @@ skip:
seq = mmp_new_seq();
mmp->mmp_seq = cpu_to_le32(seq);
- retval = write_mmp_block(sb, bh);
+ /*
+ * On mount / remount we are protected against fs freezing (by s_umount
+ * semaphore) and grabbing freeze protection upsets lockdep
+ */
+ retval = write_mmp_block_thawed(sb, bh);
if (retval)
goto failed;
@@ -357,6 +361,7 @@ skip:
*/
if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
ext4_warning(sb, "MMP startup interrupted, failing mount");
+ retval = -ETIMEDOUT;
goto failed;
}
@@ -367,22 +372,27 @@ skip:
if (seq != le32_to_cpu(mmp->mmp_seq)) {
dump_mmp_msg(sb, mmp,
"Device is already active on another node.");
+ retval = -EBUSY;
goto failed;
}
EXT4_SB(sb)->s_mmp_bh = bh;
+ BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
+ snprintf(mmp->mmp_bdevname, sizeof(mmp->mmp_bdevname),
+ "%pg", bh->b_bdev);
+
/*
* Start a kernel thread to update the MMP block periodically.
*/
EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, sb, "kmmpd-%.*s",
(int)sizeof(mmp->mmp_bdevname),
- bdevname(bh->b_bdev,
- mmp->mmp_bdevname));
+ mmp->mmp_bdevname);
if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
EXT4_SB(sb)->s_mmp_tsk = NULL;
ext4_warning(sb, "Unable to create kmmpd thread for %s.",
sb->s_id);
+ retval = -ENOMEM;
goto failed;
}
@@ -390,5 +400,5 @@ skip:
failed:
brelse(bh);
- return 1;
+ return retval;
}
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 64a579734f93..0550fd30fd10 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -8,37 +8,19 @@
#include <linux/fs.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
+#include <linux/sched/mm.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "ext4_extents.h"
-/**
- * get_ext_path() - Find an extent path for designated logical block number.
- * @inode: inode to be searched
- * @lblock: logical block number to find an extent path
- * @ppath: pointer to an extent path pointer (for output)
- *
- * ext4_find_extent wrapper. Return 0 on success, or a negative error value
- * on failure.
- */
-static inline int
-get_ext_path(struct inode *inode, ext4_lblk_t lblock,
- struct ext4_ext_path **ppath)
-{
- struct ext4_ext_path *path;
-
- path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
- if (IS_ERR(path))
- return PTR_ERR(path);
- if (path[ext_depth(inode)].p_ext == NULL) {
- ext4_ext_drop_refs(path);
- kfree(path);
- *ppath = NULL;
- return -ENODATA;
- }
- *ppath = path;
- return 0;
-}
+#include <trace/events/ext4.h>
+
+struct mext_data {
+ struct inode *orig_inode; /* Origin file inode */
+ struct inode *donor_inode; /* Donor file inode */
+ struct ext4_map_blocks orig_map;/* Origin file's move mapping */
+ ext4_lblk_t donor_lblk; /* Start block of the donor file */
+};
/**
* ext4_double_down_write_data_sem() - write lock two inodes's i_data_sem
@@ -56,7 +38,6 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
} else {
down_write(&EXT4_I(second)->i_data_sem);
down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
-
}
}
@@ -75,59 +56,14 @@ ext4_double_up_write_data_sem(struct inode *orig_inode,
up_write(&EXT4_I(donor_inode)->i_data_sem);
}
-/**
- * mext_check_coverage - Check that all extents in range has the same type
- *
- * @inode: inode in question
- * @from: block offset of inode
- * @count: block count to be checked
- * @unwritten: extents expected to be unwritten
- * @err: pointer to save error value
- *
- * Return 1 if all extents in range has expected type, and zero otherwise.
- */
-static int
-mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
- int unwritten, int *err)
-{
- struct ext4_ext_path *path = NULL;
- struct ext4_extent *ext;
- int ret = 0;
- ext4_lblk_t last = from + count;
- while (from < last) {
- *err = get_ext_path(inode, from, &path);
- if (*err)
- goto out;
- ext = path[ext_depth(inode)].p_ext;
- if (unwritten != ext4_ext_is_unwritten(ext))
- goto out;
- from += ext4_ext_get_actual_len(ext);
- ext4_ext_drop_refs(path);
- }
- ret = 1;
-out:
- ext4_ext_drop_refs(path);
- kfree(path);
- return ret;
-}
-
-/**
- * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
- *
- * @inode1: the inode structure
- * @inode2: the inode structure
- * @index1: page index
- * @index2: page index
- * @page: result page vector
- *
- * Grab two locked pages for inode's by inode order
- */
-static int
-mext_page_double_lock(struct inode *inode1, struct inode *inode2,
- pgoff_t index1, pgoff_t index2, struct page *page[2])
+/* Grab and lock folio on both @inode1 and @inode2 by inode order. */
+static int mext_folio_double_lock(struct inode *inode1, struct inode *inode2,
+ pgoff_t index1, pgoff_t index2, size_t len,
+ struct folio *folio[2])
{
struct address_space *mapping[2];
- unsigned fl = AOP_FLAG_NOFS;
+ unsigned int flags;
+ fgf_t fgp_flags = FGP_WRITEBEGIN;
BUG_ON(!inode1 || !inode2);
if (inode1 < inode2) {
@@ -139,357 +75,425 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
mapping[1] = inode1->i_mapping;
}
- page[0] = grab_cache_page_write_begin(mapping[0], index1, fl);
- if (!page[0])
- return -ENOMEM;
+ flags = memalloc_nofs_save();
+ fgp_flags |= fgf_set_order(len);
+ folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
+ mapping_gfp_mask(mapping[0]));
+ if (IS_ERR(folio[0])) {
+ memalloc_nofs_restore(flags);
+ return PTR_ERR(folio[0]);
+ }
- page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
- if (!page[1]) {
- unlock_page(page[0]);
- put_page(page[0]);
- return -ENOMEM;
+ folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
+ mapping_gfp_mask(mapping[1]));
+ memalloc_nofs_restore(flags);
+ if (IS_ERR(folio[1])) {
+ folio_unlock(folio[0]);
+ folio_put(folio[0]);
+ return PTR_ERR(folio[1]);
}
/*
- * grab_cache_page_write_begin() may not wait on page's writeback if
+ * __filemap_get_folio() may not wait on folio's writeback if
* BDI not demand that. But it is reasonable to be very conservative
- * here and explicitly wait on page's writeback
+ * here and explicitly wait on folio's writeback
*/
- wait_on_page_writeback(page[0]);
- wait_on_page_writeback(page[1]);
+ folio_wait_writeback(folio[0]);
+ folio_wait_writeback(folio[1]);
if (inode1 > inode2)
- swap(page[0], page[1]);
+ swap(folio[0], folio[1]);
return 0;
}
-/* Force page buffers uptodate w/o dropping page's lock */
-static int
-mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
+static void mext_folio_double_unlock(struct folio *folio[2])
+{
+ folio_unlock(folio[0]);
+ folio_put(folio[0]);
+ folio_unlock(folio[1]);
+ folio_put(folio[1]);
+}
+
+/* Force folio buffers uptodate w/o dropping folio's lock */
+static int mext_folio_mkuptodate(struct folio *folio, size_t from, size_t to)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
sector_t block;
- struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
+ struct buffer_head *bh, *head;
unsigned int blocksize, block_start, block_end;
- int i, err, nr = 0, partial = 0;
- BUG_ON(!PageLocked(page));
- BUG_ON(PageWriteback(page));
+ int nr = 0;
+ bool partial = false;
- if (PageUptodate(page))
+ BUG_ON(!folio_test_locked(folio));
+ BUG_ON(folio_test_writeback(folio));
+
+ if (folio_test_uptodate(folio))
return 0;
blocksize = i_blocksize(inode);
- if (!page_has_buffers(page))
- create_empty_buffers(page, blocksize, 0);
-
- head = page_buffers(page);
- block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
- for (bh = head, block_start = 0; bh != head || !block_start;
- block++, block_start = block_end, bh = bh->b_this_page) {
+ head = folio_buffers(folio);
+ if (!head)
+ head = create_empty_buffers(folio, blocksize, 0);
+
+ block = folio_pos(folio) >> inode->i_blkbits;
+ block_end = 0;
+ bh = head;
+ do {
+ block_start = block_end;
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (!buffer_uptodate(bh))
- partial = 1;
+ partial = true;
continue;
}
if (buffer_uptodate(bh))
continue;
if (!buffer_mapped(bh)) {
- err = ext4_get_block(inode, block, bh, 0);
- if (err) {
- SetPageError(page);
+ int err = ext4_get_block(inode, block, bh, 0);
+ if (err)
return err;
- }
if (!buffer_mapped(bh)) {
- zero_user(page, block_start, blocksize);
+ folio_zero_range(folio, block_start, blocksize);
set_buffer_uptodate(bh);
continue;
}
}
- BUG_ON(nr >= MAX_BUF_PER_PAGE);
- arr[nr++] = bh;
- }
+ lock_buffer(bh);
+ if (buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ continue;
+ }
+ ext4_read_bh_nowait(bh, 0, NULL, false);
+ nr++;
+ } while (block++, (bh = bh->b_this_page) != head);
+
/* No io required */
if (!nr)
goto out;
- for (i = 0; i < nr; i++) {
- bh = arr[i];
- if (!bh_uptodate_or_lock(bh)) {
- err = ext4_read_bh(bh, 0, NULL);
- if (err)
- return err;
- }
- }
+ bh = head;
+ do {
+ if (bh_offset(bh) + blocksize <= from)
+ continue;
+ if (bh_offset(bh) >= to)
+ break;
+ wait_on_buffer(bh);
+ if (buffer_uptodate(bh))
+ continue;
+ return -EIO;
+ } while ((bh = bh->b_this_page) != head);
out:
if (!partial)
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
return 0;
}
-/**
- * move_extent_per_page - Move extent data per page
- *
- * @o_filp: file structure of original file
- * @donor_inode: donor inode
- * @orig_page_offset: page index on original file
- * @donor_page_offset: page index on donor file
- * @data_offset_in_page: block index where data swapping starts
- * @block_len_in_page: the number of blocks to be swapped
- * @unwritten: orig extent is unwritten or not
- * @err: pointer to save return value
- *
- * Save the data in original inode blocks and replace original inode extents
- * with donor inode extents by calling ext4_swap_extents().
- * Finally, write out the saved data in new original inode blocks. Return
- * replaced block count.
+enum mext_move_type {MEXT_SKIP_EXTENT, MEXT_MOVE_EXTENT, MEXT_COPY_DATA};
+
+/*
+ * Start to move extent between the origin inode and the donor inode,
+ * hold one folio for each inode and check the candidate moving extent
+ * mapping status again.
*/
-static int
-move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
- pgoff_t orig_page_offset, pgoff_t donor_page_offset,
- int data_offset_in_page,
- int block_len_in_page, int unwritten, int *err)
+static int mext_move_begin(struct mext_data *mext, struct folio *folio[2],
+ enum mext_move_type *move_type)
{
- struct inode *orig_inode = file_inode(o_filp);
- struct page *pagep[2] = {NULL, NULL};
- handle_t *handle;
- ext4_lblk_t orig_blk_offset, donor_blk_offset;
- unsigned long blocksize = orig_inode->i_sb->s_blocksize;
- unsigned int tmp_data_size, data_size, replaced_size;
- int i, err2, jblocks, retries = 0;
- int replaced_count = 0;
- int from = data_offset_in_page << orig_inode->i_blkbits;
- int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
- struct super_block *sb = orig_inode->i_sb;
- struct buffer_head *bh = NULL;
+ struct inode *orig_inode = mext->orig_inode;
+ struct inode *donor_inode = mext->donor_inode;
+ unsigned int blkbits = orig_inode->i_blkbits;
+ struct ext4_map_blocks donor_map = {0};
+ loff_t orig_pos, donor_pos;
+ size_t move_len;
+ int ret;
+
+ orig_pos = ((loff_t)mext->orig_map.m_lblk) << blkbits;
+ donor_pos = ((loff_t)mext->donor_lblk) << blkbits;
+ ret = mext_folio_double_lock(orig_inode, donor_inode,
+ orig_pos >> PAGE_SHIFT, donor_pos >> PAGE_SHIFT,
+ ((size_t)mext->orig_map.m_len) << blkbits, folio);
+ if (ret)
+ return ret;
/*
- * It needs twice the amount of ordinary journal buffers because
- * inode and donor_inode may change each different metadata blocks.
+ * Check the origin inode's mapping information again under the
+ * folio lock, as we do not hold the i_data_sem at all times, and
+ * it may change during the concurrent write-back operation.
*/
-again:
- *err = 0;
- jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
- handle = ext4_journal_start(orig_inode, EXT4_HT_MOVE_EXTENTS, jblocks);
+ if (mext->orig_map.m_seq != READ_ONCE(EXT4_I(orig_inode)->i_es_seq)) {
+ ret = -ESTALE;
+ goto error;
+ }
+
+ /* Adjust the moving length according to the length of shorter folio. */
+ move_len = umin(folio_pos(folio[0]) + folio_size(folio[0]) - orig_pos,
+ folio_pos(folio[1]) + folio_size(folio[1]) - donor_pos);
+ move_len >>= blkbits;
+ if (move_len < mext->orig_map.m_len)
+ mext->orig_map.m_len = move_len;
+
+ donor_map.m_lblk = mext->donor_lblk;
+ donor_map.m_len = mext->orig_map.m_len;
+ donor_map.m_flags = 0;
+ ret = ext4_map_blocks(NULL, donor_inode, &donor_map, 0);
+ if (ret < 0)
+ goto error;
+
+ /* Adjust the moving length according to the donor mapping length. */
+ mext->orig_map.m_len = donor_map.m_len;
+
+ /* Skip moving if the donor range is a hole or a delalloc extent. */
+ if (!(donor_map.m_flags & (EXT4_MAP_MAPPED | EXT4_MAP_UNWRITTEN)))
+ *move_type = MEXT_SKIP_EXTENT;
+ /* If both mapping ranges are unwritten, no need to copy data. */
+ else if ((mext->orig_map.m_flags & EXT4_MAP_UNWRITTEN) &&
+ (donor_map.m_flags & EXT4_MAP_UNWRITTEN))
+ *move_type = MEXT_MOVE_EXTENT;
+ else
+ *move_type = MEXT_COPY_DATA;
+
+ return 0;
+error:
+ mext_folio_double_unlock(folio);
+ return ret;
+}
+
+/*
+ * Re-create the new moved mapping buffers of the original inode and commit
+ * the entire written range.
+ */
+static int mext_folio_mkwrite(struct inode *inode, struct folio *folio,
+ size_t from, size_t to)
+{
+ unsigned int blocksize = i_blocksize(inode);
+ struct buffer_head *bh, *head;
+ size_t block_start, block_end;
+ sector_t block;
+ int ret;
+
+ head = folio_buffers(folio);
+ if (!head)
+ head = create_empty_buffers(folio, blocksize, 0);
+
+ block = folio_pos(folio) >> inode->i_blkbits;
+ block_end = 0;
+ bh = head;
+ do {
+ block_start = block_end;
+ block_end = block_start + blocksize;
+ if (block_end <= from || block_start >= to)
+ continue;
+
+ ret = ext4_get_block(inode, block, bh, 0);
+ if (ret)
+ return ret;
+ } while (block++, (bh = bh->b_this_page) != head);
+
+ block_commit_write(folio, from, to);
+ return 0;
+}
+
+/*
+ * Save the data in original inode extent blocks and replace one folio size
+ * aligned original inode extent with one or one partial donor inode extent,
+ * and then write out the saved data in new original inode blocks. Pass out
+ * the replaced block count through m_len. Return 0 on success, and an error
+ * code otherwise.
+ */
+static int mext_move_extent(struct mext_data *mext, u64 *m_len)
+{
+ struct inode *orig_inode = mext->orig_inode;
+ struct inode *donor_inode = mext->donor_inode;
+ struct ext4_map_blocks *orig_map = &mext->orig_map;
+ unsigned int blkbits = orig_inode->i_blkbits;
+ struct folio *folio[2] = {NULL, NULL};
+ loff_t from, length;
+ enum mext_move_type move_type = 0;
+ handle_t *handle;
+ u64 r_len = 0;
+ unsigned int credits;
+ int ret, ret2;
+
+ *m_len = 0;
+ trace_ext4_move_extent_enter(orig_inode, orig_map, donor_inode,
+ mext->donor_lblk);
+ credits = ext4_chunk_trans_extent(orig_inode, 0) * 2;
+ handle = ext4_journal_start(orig_inode, EXT4_HT_MOVE_EXTENTS, credits);
if (IS_ERR(handle)) {
- *err = PTR_ERR(handle);
- return 0;
+ ret = PTR_ERR(handle);
+ goto out;
}
- orig_blk_offset = orig_page_offset * blocks_per_page +
- data_offset_in_page;
-
- donor_blk_offset = donor_page_offset * blocks_per_page +
- data_offset_in_page;
-
- /* Calculate data_size */
- if ((orig_blk_offset + block_len_in_page - 1) ==
- ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
- /* Replace the last block */
- tmp_data_size = orig_inode->i_size & (blocksize - 1);
- /*
- * If data_size equal zero, it shows data_size is multiples of
- * blocksize. So we set appropriate value.
- */
- if (tmp_data_size == 0)
- tmp_data_size = blocksize;
-
- data_size = tmp_data_size +
- ((block_len_in_page - 1) << orig_inode->i_blkbits);
- } else
- data_size = block_len_in_page << orig_inode->i_blkbits;
-
- replaced_size = data_size;
-
- *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
- donor_page_offset, pagep);
- if (unlikely(*err < 0))
- goto stop_journal;
+ ret = mext_move_begin(mext, folio, &move_type);
+ if (ret)
+ goto stop_handle;
+
+ if (move_type == MEXT_SKIP_EXTENT)
+ goto unlock;
+
/*
- * If orig extent was unwritten it can become initialized
- * at any time after i_data_sem was dropped, in order to
- * serialize with delalloc we have recheck extent while we
- * hold page's lock, if it is still the case data copy is not
- * necessary, just swap data blocks between orig and donor.
+ * Copy the data. First, read the original inode data into the page
+ * cache. Then, release the existing mapping relationships and swap
+ * the extent. Finally, re-establish the new mapping relationships
+ * and dirty the page cache.
*/
- if (unwritten) {
- ext4_double_down_write_data_sem(orig_inode, donor_inode);
- /* If any of extents in range became initialized we have to
- * fallback to data copying */
- unwritten = mext_check_coverage(orig_inode, orig_blk_offset,
- block_len_in_page, 1, err);
- if (*err)
- goto drop_data_sem;
-
- unwritten &= mext_check_coverage(donor_inode, donor_blk_offset,
- block_len_in_page, 1, err);
- if (*err)
- goto drop_data_sem;
-
- if (!unwritten) {
- ext4_double_up_write_data_sem(orig_inode, donor_inode);
- goto data_copy;
- }
- if ((page_has_private(pagep[0]) &&
- !try_to_release_page(pagep[0], 0)) ||
- (page_has_private(pagep[1]) &&
- !try_to_release_page(pagep[1], 0))) {
- *err = -EBUSY;
- goto drop_data_sem;
- }
- replaced_count = ext4_swap_extents(handle, orig_inode,
- donor_inode, orig_blk_offset,
- donor_blk_offset,
- block_len_in_page, 1, err);
- drop_data_sem:
- ext4_double_up_write_data_sem(orig_inode, donor_inode);
- goto unlock_pages;
+ if (move_type == MEXT_COPY_DATA) {
+ from = offset_in_folio(folio[0],
+ ((loff_t)orig_map->m_lblk) << blkbits);
+ length = ((loff_t)orig_map->m_len) << blkbits;
+
+ ret = mext_folio_mkuptodate(folio[0], from, from + length);
+ if (ret)
+ goto unlock;
}
-data_copy:
- *err = mext_page_mkuptodate(pagep[0], from, from + replaced_size);
- if (*err)
- goto unlock_pages;
-
- /* At this point all buffers in range are uptodate, old mapping layout
- * is no longer required, try to drop it now. */
- if ((page_has_private(pagep[0]) && !try_to_release_page(pagep[0], 0)) ||
- (page_has_private(pagep[1]) && !try_to_release_page(pagep[1], 0))) {
- *err = -EBUSY;
- goto unlock_pages;
+
+ if (!filemap_release_folio(folio[0], 0) ||
+ !filemap_release_folio(folio[1], 0)) {
+ ret = -EBUSY;
+ goto unlock;
}
+
+ /* Move extent */
ext4_double_down_write_data_sem(orig_inode, donor_inode);
- replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
- orig_blk_offset, donor_blk_offset,
- block_len_in_page, 1, err);
+ *m_len = ext4_swap_extents(handle, orig_inode, donor_inode,
+ orig_map->m_lblk, mext->donor_lblk,
+ orig_map->m_len, 1, &ret);
ext4_double_up_write_data_sem(orig_inode, donor_inode);
- if (*err) {
- if (replaced_count) {
- block_len_in_page = replaced_count;
- replaced_size =
- block_len_in_page << orig_inode->i_blkbits;
- } else
- goto unlock_pages;
- }
- /* Perform all necessary steps similar write_begin()/write_end()
- * but keeping in mind that i_size will not change */
- if (!page_has_buffers(pagep[0]))
- create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
- bh = page_buffers(pagep[0]);
- for (i = 0; i < data_offset_in_page; i++)
- bh = bh->b_this_page;
- for (i = 0; i < block_len_in_page; i++) {
- *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
- if (*err < 0)
- break;
- bh = bh->b_this_page;
- }
- if (!*err)
- *err = block_commit_write(pagep[0], from, from + replaced_size);
- if (unlikely(*err < 0))
- goto repair_branches;
+ /* A short-length swap cannot occur after a successful swap extent. */
+ if (WARN_ON_ONCE(!ret && (*m_len != orig_map->m_len)))
+ ret = -EIO;
- /* Even in case of data=writeback it is reasonable to pin
- * inode to transaction, to prevent unexpected data loss */
- *err = ext4_jbd2_inode_add_write(handle, orig_inode,
- (loff_t)orig_page_offset << PAGE_SHIFT, replaced_size);
-
-unlock_pages:
- unlock_page(pagep[0]);
- put_page(pagep[0]);
- unlock_page(pagep[1]);
- put_page(pagep[1]);
-stop_journal:
- ext4_journal_stop(handle);
- if (*err == -ENOSPC &&
- ext4_should_retry_alloc(sb, &retries))
- goto again;
- /* Buffer was busy because probably is pinned to journal transaction,
- * force transaction commit may help to free it. */
- if (*err == -EBUSY && retries++ < 4 && EXT4_SB(sb)->s_journal &&
- jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal))
- goto again;
- return replaced_count;
+ if (!(*m_len) || (move_type == MEXT_MOVE_EXTENT))
+ goto unlock;
-repair_branches:
+ /* Copy data */
+ length = (*m_len) << blkbits;
+ ret2 = mext_folio_mkwrite(orig_inode, folio[0], from, from + length);
+ if (ret2) {
+ if (!ret)
+ ret = ret2;
+ goto repair_branches;
+ }
/*
- * This should never ever happen!
- * Extents are swapped already, but we are not able to copy data.
- * Try to swap extents to it's original places
+ * Even in case of data=writeback it is reasonable to pin
+ * inode to transaction, to prevent unexpected data loss.
*/
- ext4_double_down_write_data_sem(orig_inode, donor_inode);
- replaced_count = ext4_swap_extents(handle, donor_inode, orig_inode,
- orig_blk_offset, donor_blk_offset,
- block_len_in_page, 0, &err2);
- ext4_double_up_write_data_sem(orig_inode, donor_inode);
- if (replaced_count != block_len_in_page) {
- ext4_error_inode_block(orig_inode, (sector_t)(orig_blk_offset),
- EIO, "Unable to copy data block,"
- " data will be lost.");
- *err = -EIO;
+ ret2 = ext4_jbd2_inode_add_write(handle, orig_inode,
+ ((loff_t)orig_map->m_lblk) << blkbits, length);
+ if (!ret)
+ ret = ret2;
+unlock:
+ mext_folio_double_unlock(folio);
+stop_handle:
+ ext4_journal_stop(handle);
+out:
+ trace_ext4_move_extent_exit(orig_inode, orig_map->m_lblk, donor_inode,
+ mext->donor_lblk, orig_map->m_len, *m_len,
+ move_type, ret);
+ return ret;
+
+repair_branches:
+ ret2 = 0;
+ r_len = ext4_swap_extents(handle, donor_inode, orig_inode,
+ mext->donor_lblk, orig_map->m_lblk,
+ *m_len, 0, &ret2);
+ if (ret2 || r_len != *m_len) {
+ ext4_error_inode_block(orig_inode, (sector_t)(orig_map->m_lblk),
+ EIO, "Unable to copy data block, data will be lost!");
+ ret = -EIO;
}
- replaced_count = 0;
- goto unlock_pages;
+ *m_len = 0;
+ goto unlock;
}
-/**
- * mext_check_arguments - Check whether move extent can be done
- *
- * @orig_inode: original inode
- * @donor_inode: donor inode
- * @orig_start: logical start offset in block for orig
- * @donor_start: logical start offset in block for donor
- * @len: the number of blocks to be moved
- *
- * Check the arguments of ext4_move_extents() whether the files can be
- * exchanged with each other.
- * Return 0 on success, or a negative error value on failure.
+/*
+ * Check the validity of the basic filesystem environment and the
+ * inodes' support status.
*/
-static int
-mext_check_arguments(struct inode *orig_inode,
- struct inode *donor_inode, __u64 orig_start,
- __u64 donor_start, __u64 *len)
+static int mext_check_validity(struct inode *orig_inode,
+ struct inode *donor_inode)
{
- __u64 orig_eof, donor_eof;
- unsigned int blkbits = orig_inode->i_blkbits;
- unsigned int blocksize = 1 << blkbits;
+ struct super_block *sb = orig_inode->i_sb;
- orig_eof = (i_size_read(orig_inode) + blocksize - 1) >> blkbits;
- donor_eof = (i_size_read(donor_inode) + blocksize - 1) >> blkbits;
+ /* origin and donor should be different inodes */
+ if (orig_inode == donor_inode) {
+ ext4_debug("ext4 move extent: The argument files should not be same inode [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+ /* origin and donor should belone to the same filesystem */
+ if (orig_inode->i_sb != donor_inode->i_sb) {
+ ext4_debug("ext4 move extent: The argument files should be in same FS [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
- if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
- ext4_debug("ext4 move extent: suid or sgid is set"
- " to donor file [ino:orig %lu, donor %lu]\n",
+ /* Regular file check */
+ if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
+ ext4_debug("ext4 move extent: The argument files should be regular file [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
- if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
- return -EPERM;
+ if (ext4_has_feature_bigalloc(sb)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online defrag not supported with bigalloc");
+ return -EOPNOTSUPP;
+ }
- /* Ext4 move extent does not support swapfile */
- if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
- ext4_debug("ext4 move extent: The argument files should "
- "not be swapfile [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EBUSY;
+ if (IS_DAX(orig_inode)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online defrag not supported with DAX");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * TODO: it's not obvious how to swap blocks for inodes with full
+ * journaling enabled.
+ */
+ if (ext4_should_journal_data(orig_inode) ||
+ ext4_should_journal_data(donor_inode)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online defrag not supported with data journaling");
+ return -EOPNOTSUPP;
}
- if (ext4_is_quota_file(orig_inode) && ext4_is_quota_file(donor_inode)) {
- ext4_debug("ext4 move extent: The argument files should "
- "not be quota files [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EBUSY;
+ if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online defrag not supported for encrypted files");
+ return -EOPNOTSUPP;
}
/* Ext4 move extent supports only extent based file */
- if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
- ext4_debug("ext4 move extent: orig file is not extents "
- "based file [ino:orig %lu]\n", orig_inode->i_ino);
+ if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS)) ||
+ !(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
+ ext4_msg(sb, KERN_ERR,
+ "Online defrag not supported for non-extent files");
return -EOPNOTSUPP;
- } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
- ext4_debug("ext4 move extent: donor file is not extents "
- "based file [ino:donor %lu]\n", donor_inode->i_ino);
+ }
+
+ if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
+ ext4_debug("ext4 move extent: suid or sgid is set to donor file [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+
+ if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode)) {
+ ext4_debug("ext4 move extent: donor should not be immutable or append file [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EPERM;
+ }
+
+ /* Ext4 move extent does not support swap files */
+ if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
+ ext4_debug("ext4 move extent: The argument files should not be swap files [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -ETXTBSY;
+ }
+
+ if (ext4_is_quota_file(orig_inode) || ext4_is_quota_file(donor_inode)) {
+ ext4_debug("ext4 move extent: The argument files should not be quota files [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
return -EOPNOTSUPP;
}
@@ -498,12 +502,25 @@ mext_check_arguments(struct inode *orig_inode,
return -EINVAL;
}
+ return 0;
+}
+
+/*
+ * Check the moving range of ext4_move_extents() whether the files can be
+ * exchanged with each other, and adjust the length to fit within the file
+ * size. Return 0 on success, or a negative error value on failure.
+ */
+static int mext_check_adjust_range(struct inode *orig_inode,
+ struct inode *donor_inode, __u64 orig_start,
+ __u64 donor_start, __u64 *len)
+{
+ __u64 orig_eof, donor_eof;
+
/* Start offset should be same */
if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
(donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
- ext4_debug("ext4 move extent: orig and donor's start "
- "offsets are not aligned [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
+ ext4_debug("ext4 move extent: orig and donor's start offsets are not aligned [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
@@ -512,11 +529,14 @@ mext_check_arguments(struct inode *orig_inode,
(*len > EXT_MAX_BLOCKS) ||
(donor_start + *len >= EXT_MAX_BLOCKS) ||
(orig_start + *len >= EXT_MAX_BLOCKS)) {
- ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
- "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
- orig_inode->i_ino, donor_inode->i_ino);
+ ext4_debug("ext4 move extent: Can't handle over [%u] blocks [ino:orig %lu, donor %lu]\n",
+ EXT_MAX_BLOCKS,
+ orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
+
+ orig_eof = EXT4_B_TO_LBLK(orig_inode, i_size_read(orig_inode));
+ donor_eof = EXT4_B_TO_LBLK(donor_inode, i_size_read(donor_inode));
if (orig_eof <= orig_start)
*len = 0;
else if (orig_eof < orig_start + *len - 1)
@@ -526,9 +546,8 @@ mext_check_arguments(struct inode *orig_inode,
else if (donor_eof < donor_start + *len - 1)
*len = donor_eof - donor_start;
if (!*len) {
- ext4_debug("ext4 move extent: len should not be 0 "
- "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
- donor_inode->i_ino);
+ ext4_debug("ext4 move extent: len should not be 0 [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
@@ -547,153 +566,89 @@ mext_check_arguments(struct inode *orig_inode,
*
* This function returns 0 and moved block length is set in moved_len
* if succeed, otherwise returns error value.
- *
*/
-int
-ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
- __u64 donor_blk, __u64 len, __u64 *moved_len)
+int ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
+ __u64 donor_blk, __u64 len, __u64 *moved_len)
{
struct inode *orig_inode = file_inode(o_filp);
struct inode *donor_inode = file_inode(d_filp);
- struct ext4_ext_path *path = NULL;
- int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
- ext4_lblk_t o_end, o_start = orig_blk;
- ext4_lblk_t d_start = donor_blk;
+ struct mext_data mext;
+ struct super_block *sb = orig_inode->i_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int retries = 0;
+ u64 m_len;
int ret;
- if (orig_inode->i_sb != donor_inode->i_sb) {
- ext4_debug("ext4 move extent: The argument files "
- "should be in same FS [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EINVAL;
- }
-
- /* orig and donor should be different inodes */
- if (orig_inode == donor_inode) {
- ext4_debug("ext4 move extent: The argument files should not "
- "be same inode [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EINVAL;
- }
-
- /* Regular file check */
- if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
- ext4_debug("ext4 move extent: The argument files should be "
- "regular file [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EINVAL;
- }
-
- /* TODO: it's not obvious how to swap blocks for inodes with full
- journaling enabled */
- if (ext4_should_journal_data(orig_inode) ||
- ext4_should_journal_data(donor_inode)) {
- ext4_msg(orig_inode->i_sb, KERN_ERR,
- "Online defrag not supported with data journaling");
- return -EOPNOTSUPP;
- }
-
- if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
- ext4_msg(orig_inode->i_sb, KERN_ERR,
- "Online defrag not supported for encrypted files");
- return -EOPNOTSUPP;
- }
+ *moved_len = 0;
/* Protect orig and donor inodes against a truncate */
lock_two_nondirectories(orig_inode, donor_inode);
+ ret = mext_check_validity(orig_inode, donor_inode);
+ if (ret)
+ goto out;
+
/* Wait for all existing dio workers */
inode_dio_wait(orig_inode);
inode_dio_wait(donor_inode);
- /* Protect extent tree against block allocations via delalloc */
- ext4_double_down_write_data_sem(orig_inode, donor_inode);
- /* Check the filesystem environment whether move_extent can be done */
- ret = mext_check_arguments(orig_inode, donor_inode, orig_blk,
- donor_blk, &len);
+ /* Check and adjust the specified move_extent range. */
+ ret = mext_check_adjust_range(orig_inode, donor_inode, orig_blk,
+ donor_blk, &len);
if (ret)
goto out;
- o_end = o_start + len;
- while (o_start < o_end) {
- struct ext4_extent *ex;
- ext4_lblk_t cur_blk, next_blk;
- pgoff_t orig_page_index, donor_page_index;
- int offset_in_page;
- int unwritten, cur_len;
+ mext.orig_inode = orig_inode;
+ mext.donor_inode = donor_inode;
+ while (len) {
+ mext.orig_map.m_lblk = orig_blk;
+ mext.orig_map.m_len = len;
+ mext.orig_map.m_flags = 0;
+ mext.donor_lblk = donor_blk;
- ret = get_ext_path(orig_inode, o_start, &path);
- if (ret)
+ ret = ext4_map_blocks(NULL, orig_inode, &mext.orig_map, 0);
+ if (ret < 0)
goto out;
- ex = path[path->p_depth].p_ext;
- next_blk = ext4_ext_next_allocated_block(path);
- cur_blk = le32_to_cpu(ex->ee_block);
- cur_len = ext4_ext_get_actual_len(ex);
- /* Check hole before the start pos */
- if (cur_blk + cur_len - 1 < o_start) {
- if (next_blk == EXT_MAX_BLOCKS) {
- o_start = o_end;
- ret = -ENODATA;
- goto out;
+
+ /* Skip moving if it is a hole or a delalloc extent. */
+ if (mext.orig_map.m_flags &
+ (EXT4_MAP_MAPPED | EXT4_MAP_UNWRITTEN)) {
+ ret = mext_move_extent(&mext, &m_len);
+ *moved_len += m_len;
+ if (!ret)
+ goto next;
+
+ /* Move failed or partially failed. */
+ if (m_len) {
+ orig_blk += m_len;
+ donor_blk += m_len;
+ len -= m_len;
}
- d_start += next_blk - o_start;
- o_start = next_blk;
- continue;
- /* Check hole after the start pos */
- } else if (cur_blk > o_start) {
- /* Skip hole */
- d_start += cur_blk - o_start;
- o_start = cur_blk;
- /* Extent inside requested range ?*/
- if (cur_blk >= o_end)
- goto out;
- } else { /* in_range(o_start, o_blk, o_len) */
- cur_len += cur_blk - o_start;
+ if (ret == -ESTALE)
+ continue;
+ if (ret == -ENOSPC &&
+ ext4_should_retry_alloc(sb, &retries))
+ continue;
+ if (ret == -EBUSY &&
+ sbi->s_journal && retries++ < 4 &&
+ jbd2_journal_force_commit_nested(sbi->s_journal))
+ continue;
+
+ goto out;
}
- unwritten = ext4_ext_is_unwritten(ex);
- if (o_end - o_start < cur_len)
- cur_len = o_end - o_start;
-
- orig_page_index = o_start >> (PAGE_SHIFT -
- orig_inode->i_blkbits);
- donor_page_index = d_start >> (PAGE_SHIFT -
- donor_inode->i_blkbits);
- offset_in_page = o_start % blocks_per_page;
- if (cur_len > blocks_per_page- offset_in_page)
- cur_len = blocks_per_page - offset_in_page;
- /*
- * Up semaphore to avoid following problems:
- * a. transaction deadlock among ext4_journal_start,
- * ->write_begin via pagefault, and jbd2_journal_commit
- * b. racing with ->readpage, ->write_begin, and ext4_get_block
- * in move_extent_per_page
- */
- ext4_double_up_write_data_sem(orig_inode, donor_inode);
- /* Swap original branches with new branches */
- move_extent_per_page(o_filp, donor_inode,
- orig_page_index, donor_page_index,
- offset_in_page, cur_len,
- unwritten, &ret);
- ext4_double_down_write_data_sem(orig_inode, donor_inode);
- if (ret < 0)
- break;
- o_start += cur_len;
- d_start += cur_len;
+next:
+ orig_blk += mext.orig_map.m_len;
+ donor_blk += mext.orig_map.m_len;
+ len -= mext.orig_map.m_len;
+ retries = 0;
}
- *moved_len = o_start - orig_blk;
- if (*moved_len > len)
- *moved_len = len;
out:
if (*moved_len) {
- ext4_discard_preallocations(orig_inode, 0);
- ext4_discard_preallocations(donor_inode, 0);
+ ext4_discard_preallocations(orig_inode);
+ ext4_discard_preallocations(donor_inode);
}
- ext4_ext_drop_refs(path);
- kfree(path);
- ext4_double_up_write_data_sem(orig_inode, donor_inode);
unlock_two_nondirectories(orig_inode, donor_inode);
-
return ret;
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 52c9bd154122..c4b5e252af0e 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -54,6 +54,7 @@ static struct buffer_head *ext4_append(handle_t *handle,
struct inode *inode,
ext4_lblk_t *block)
{
+ struct ext4_map_blocks map;
struct buffer_head *bh;
int err;
@@ -63,21 +64,41 @@ static struct buffer_head *ext4_append(handle_t *handle,
return ERR_PTR(-ENOSPC);
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
+ map.m_lblk = *block;
+ map.m_len = 1;
+
+ /*
+ * We're appending new directory block. Make sure the block is not
+ * allocated yet, otherwise we will end up corrupting the
+ * directory.
+ */
+ err = ext4_map_blocks(NULL, inode, &map, 0);
+ if (err < 0)
+ return ERR_PTR(err);
+ if (err) {
+ EXT4_ERROR_INODE(inode, "Logical block already allocated");
+ return ERR_PTR(-EFSCORRUPTED);
+ }
bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
if (IS_ERR(bh))
return bh;
inode->i_size += inode->i_sb->s_blocksize;
EXT4_I(inode)->i_disksize = inode->i_size;
+ err = ext4_mark_inode_dirty(handle, inode);
+ if (err)
+ goto out;
BUFFER_TRACE(bh, "get_write_access");
err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
EXT4_JTR_NONE);
- if (err) {
- brelse(bh);
- ext4_std_error(inode->i_sb, err);
- return ERR_PTR(err);
- }
+ if (err)
+ goto out;
return bh;
+
+out:
+ brelse(bh);
+ ext4_std_error(inode->i_sb, err);
+ return ERR_PTR(err);
}
static int ext4_dx_csum_verify(struct inode *inode,
@@ -110,6 +131,13 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
struct ext4_dir_entry *dirent;
int is_dx_block = 0;
+ if (block >= inode->i_size >> inode->i_blkbits) {
+ ext4_error_inode(inode, func, line, block,
+ "Attempting to read directory block (%u) that is past i_size (%llu)",
+ block, inode->i_size);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+
if (ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_EIO))
bh = ERR_PTR(-EIO);
else
@@ -123,10 +151,11 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
return bh;
}
- if (!bh && (type == INDEX || type == DIRENT_HTREE)) {
+ /* The first directory block must not be a hole. */
+ if (!bh && (type == INDEX || type == DIRENT_HTREE || block == 0)) {
ext4_error_inode(inode, func, line, block,
- "Directory hole found for htree %s block",
- (type == INDEX) ? "index" : "leaf");
+ "Directory hole found for htree %s block %u",
+ (type == INDEX) ? "index" : "leaf", block);
return ERR_PTR(-EFSCORRUPTED);
}
if (!bh)
@@ -147,7 +176,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
brelse(bh);
return ERR_PTR(-EFSCORRUPTED);
}
- if (!ext4_has_metadata_csum(inode->i_sb) ||
+ if (!ext4_has_feature_metadata_csum(inode->i_sb) ||
buffer_verified(bh))
return bh;
@@ -262,36 +291,6 @@ struct dx_tail {
__le32 dt_checksum; /* crc32c(uuid+inum+dirblock) */
};
-static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
-static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
-static inline unsigned dx_get_hash(struct dx_entry *entry);
-static void dx_set_hash(struct dx_entry *entry, unsigned value);
-static unsigned dx_get_count(struct dx_entry *entries);
-static unsigned dx_get_limit(struct dx_entry *entries);
-static void dx_set_count(struct dx_entry *entries, unsigned value);
-static void dx_set_limit(struct dx_entry *entries, unsigned value);
-static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
-static unsigned dx_node_limit(struct inode *dir);
-static struct dx_frame *dx_probe(struct ext4_filename *fname,
- struct inode *dir,
- struct dx_hash_info *hinfo,
- struct dx_frame *frame);
-static void dx_release(struct dx_frame *frames);
-static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
- unsigned blocksize, struct dx_hash_info *hinfo,
- struct dx_map_entry map[]);
-static void dx_sort_map(struct dx_map_entry *map, unsigned count);
-static struct ext4_dir_entry_2 *dx_move_dirents(struct inode *dir, char *from,
- char *to, struct dx_map_entry *offsets,
- int count, unsigned int blocksize);
-static struct ext4_dir_entry_2 *dx_pack_dirents(struct inode *dir, char *base,
- unsigned int blocksize);
-static void dx_insert_block(struct dx_frame *frame,
- u32 hash, ext4_lblk_t block);
-static int ext4_htree_next_block(struct inode *dir, __u32 hash,
- struct dx_frame *frame,
- struct dx_frame *frames,
- __u32 *start_hash);
static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
struct ext4_filename *fname,
struct ext4_dir_entry_2 **res_dir);
@@ -315,17 +314,17 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
struct buffer_head *bh)
{
struct ext4_dir_entry_tail *t;
+ int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
#ifdef PARANOID
struct ext4_dir_entry *d, *top;
d = (struct ext4_dir_entry *)bh->b_data;
top = (struct ext4_dir_entry *)(bh->b_data +
- (EXT4_BLOCK_SIZE(inode->i_sb) -
- sizeof(struct ext4_dir_entry_tail)));
- while (d < top && d->rec_len)
+ (blocksize - sizeof(struct ext4_dir_entry_tail)));
+ while (d < top && ext4_rec_len_from_disk(d->rec_len, blocksize))
d = (struct ext4_dir_entry *)(((void *)d) +
- le16_to_cpu(d->rec_len));
+ ext4_rec_len_from_disk(d->rec_len, blocksize));
if (d != top)
return NULL;
@@ -336,7 +335,8 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
#endif
if (t->det_reserved_zero1 ||
- le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
+ (ext4_rec_len_from_disk(t->det_rec_len, blocksize) !=
+ sizeof(struct ext4_dir_entry_tail)) ||
t->det_reserved_zero2 ||
t->det_reserved_ft != EXT4_FT_DIR_CSUM)
return NULL;
@@ -346,11 +346,10 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
static __le32 ext4_dirblock_csum(struct inode *inode, void *dirent, int size)
{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
__u32 csum;
- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
+ csum = ext4_chksum(ei->i_csum_seed, (__u8 *)dirent, size);
return cpu_to_le32(csum);
}
@@ -368,7 +367,7 @@ int ext4_dirblock_csum_verify(struct inode *inode, struct buffer_head *bh)
{
struct ext4_dir_entry_tail *t;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return 1;
t = get_dirent_tail(inode, bh);
@@ -389,7 +388,7 @@ static void ext4_dirblock_csum_set(struct inode *inode,
{
struct ext4_dir_entry_tail *t;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return;
t = get_dirent_tail(inode, bh);
@@ -417,13 +416,14 @@ static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
struct ext4_dir_entry *dp;
struct dx_root_info *root;
int count_offset;
+ int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
+ unsigned int rlen = ext4_rec_len_from_disk(dirent->rec_len, blocksize);
- if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
+ if (rlen == blocksize)
count_offset = 8;
- else if (le16_to_cpu(dirent->rec_len) == 12) {
+ else if (rlen == 12) {
dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
- if (le16_to_cpu(dp->rec_len) !=
- EXT4_BLOCK_SIZE(inode->i_sb) - 12)
+ if (ext4_rec_len_from_disk(dp->rec_len, blocksize) != blocksize - 12)
return NULL;
root = (struct dx_root_info *)(((void *)dp + 12));
if (root->reserved_zero ||
@@ -441,7 +441,6 @@ static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
int count_offset, int count, struct dx_tail *t)
{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
__u32 csum;
int size;
@@ -449,9 +448,9 @@ static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
int offset = offsetof(struct dx_tail, dt_checksum);
size = count_offset + (count * sizeof(struct dx_entry));
- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
- csum = ext4_chksum(sbi, csum, (__u8 *)t, offset);
- csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
+ csum = ext4_chksum(ei->i_csum_seed, (__u8 *)dirent, size);
+ csum = ext4_chksum(csum, (__u8 *)t, offset);
+ csum = ext4_chksum(csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
return cpu_to_le32(csum);
}
@@ -463,7 +462,7 @@ static int ext4_dx_csum_verify(struct inode *inode,
struct dx_tail *t;
int count_offset, limit, count;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return 1;
c = get_dx_countlimit(inode, dirent, &count_offset);
@@ -492,7 +491,7 @@ static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
struct dx_tail *t;
int count_offset, limit, count;
- if (!ext4_has_metadata_csum(inode->i_sb))
+ if (!ext4_has_feature_metadata_csum(inode->i_sb))
return;
c = get_dx_countlimit(inode, dirent, &count_offset);
@@ -581,7 +580,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
ext4_dir_rec_len(1, NULL) -
ext4_dir_rec_len(2, NULL) - infosize;
- if (ext4_has_metadata_csum(dir->i_sb))
+ if (ext4_has_feature_metadata_csum(dir->i_sb))
entry_space -= sizeof(struct dx_tail);
return entry_space / sizeof(struct dx_entry);
}
@@ -591,7 +590,7 @@ static inline unsigned dx_node_limit(struct inode *dir)
unsigned int entry_space = dir->i_sb->s_blocksize -
ext4_dir_rec_len(0, dir);
- if (ext4_has_metadata_csum(dir->i_sb))
+ if (ext4_has_feature_metadata_csum(dir->i_sb))
entry_space -= sizeof(struct dx_tail);
return entry_space / sizeof(struct dx_entry);
}
@@ -646,7 +645,7 @@ static struct stats dx_show_leaf(struct inode *dir,
len = de->name_len;
if (!IS_ENCRYPTED(dir)) {
/* Directory is not encrypted */
- ext4fs_dirhash(dir, de->name,
+ (void) ext4fs_dirhash(dir, de->name,
de->name_len, &h);
printk("%*.s:(U)%x.%u ", len,
name, h.hash,
@@ -681,8 +680,9 @@ static struct stats dx_show_leaf(struct inode *dir,
if (IS_CASEFOLDED(dir))
h.hash = EXT4_DIRENT_HASH(de);
else
- ext4fs_dirhash(dir, de->name,
- de->name_len, &h);
+ (void) ext4fs_dirhash(dir,
+ de->name,
+ de->name_len, &h);
printk("%*.s:(E)%x.%u ", len, name,
h.hash, (unsigned) ((char *) de
- base));
@@ -692,7 +692,8 @@ static struct stats dx_show_leaf(struct inode *dir,
#else
int len = de->name_len;
char *name = de->name;
- ext4fs_dirhash(dir, de->name, de->name_len, &h);
+ (void) ext4fs_dirhash(dir, de->name,
+ de->name_len, &h);
printk("%*.s:%x.%u ", len, name, h.hash,
(unsigned) ((char *) de - base));
#endif
@@ -777,12 +778,14 @@ static struct dx_frame *
dx_probe(struct ext4_filename *fname, struct inode *dir,
struct dx_hash_info *hinfo, struct dx_frame *frame_in)
{
- unsigned count, indirect;
+ unsigned count, indirect, level, i;
struct dx_entry *at, *entries, *p, *q, *m;
struct dx_root *root;
struct dx_frame *frame = frame_in;
struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
u32 hash;
+ ext4_lblk_t block;
+ ext4_lblk_t blocks[EXT4_HTREE_LEVEL];
memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0]));
frame->bh = ext4_read_dirblock(dir, 0, INDEX);
@@ -819,8 +822,14 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
/* hash is already computed for encrypted casefolded directory */
if (fname && fname_name(fname) &&
- !(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)))
- ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), hinfo);
+ !(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir))) {
+ int ret = ext4fs_dirhash(dir, fname_name(fname),
+ fname_len(fname), hinfo);
+ if (ret < 0) {
+ ret_err = ERR_PTR(ret);
+ goto fail;
+ }
+ }
hash = hinfo->hash;
if (root->info.unused_flags & 1) {
@@ -854,6 +863,8 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
}
dxtrace(printk("Look up %x", hash));
+ level = 0;
+ blocks[0] = 0;
while (1) {
count = dx_get_count(entries);
if (!count || count > dx_get_limit(entries)) {
@@ -882,15 +893,27 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
dx_get_block(at)));
frame->entries = entries;
frame->at = at;
- if (!indirect--)
+
+ block = dx_get_block(at);
+ for (i = 0; i <= level; i++) {
+ if (blocks[i] == block) {
+ ext4_warning_inode(dir,
+ "dx entry: tree cycle block %u points back to block %u",
+ blocks[level], block);
+ goto fail;
+ }
+ }
+ if (++level > indirect)
return frame;
+ blocks[level] = block;
frame++;
- frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
+ frame->bh = ext4_read_dirblock(dir, block, INDEX);
if (IS_ERR(frame->bh)) {
ret_err = (struct dx_frame *) frame->bh;
frame->bh = NULL;
goto fail;
}
+
entries = ((struct dx_node *) frame->bh->b_data)->entries;
if (dx_get_limit(entries) != dx_node_limit(dir)) {
@@ -1021,7 +1044,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
struct ext4_dir_entry_2 *de, *top;
int err = 0, count = 0;
struct fscrypt_str fname_crypto_str = FSTR_INIT(NULL, 0), tmp_str;
- int csum = ext4_has_metadata_csum(dir->i_sb);
+ int csum = ext4_has_feature_metadata_csum(dir->i_sb);
dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
(unsigned long)block));
@@ -1053,7 +1076,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
bh->b_data, bh->b_size,
- (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+ EXT4_LBLK_TO_B(dir, block)
+ ((char *)de - bh->b_data))) {
/* silently ignore the rest of the block */
break;
@@ -1067,7 +1090,12 @@ static int htree_dirblock_to_tree(struct file *dir_file,
hinfo->minor_hash = 0;
}
} else {
- ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
+ err = ext4fs_dirhash(dir, de->name,
+ de->name_len, hinfo);
+ if (err < 0) {
+ count = err;
+ goto errout;
+ }
}
if ((hinfo->hash < start_hash) ||
((hinfo->hash == start_hash) &&
@@ -1249,28 +1277,41 @@ static inline int search_dirblock(struct buffer_head *bh,
* Create map of hash values, offsets, and sizes, stored at end of block.
* Returns number of entries mapped.
*/
-static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
- unsigned blocksize, struct dx_hash_info *hinfo,
+static int dx_make_map(struct inode *dir, struct buffer_head *bh,
+ struct dx_hash_info *hinfo,
struct dx_map_entry *map_tail)
{
int count = 0;
- char *base = (char *) de;
+ struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)bh->b_data;
+ unsigned int buflen = bh->b_size;
+ char *base = bh->b_data;
struct dx_hash_info h = *hinfo;
+ int blocksize = EXT4_BLOCK_SIZE(dir->i_sb);
+
+ if (ext4_has_feature_metadata_csum(dir->i_sb))
+ buflen -= sizeof(struct ext4_dir_entry_tail);
- while ((char *) de < base + blocksize) {
+ while ((char *) de < base + buflen) {
+ if (ext4_check_dir_entry(dir, NULL, de, bh, base, buflen,
+ ((char *)de) - base))
+ return -EFSCORRUPTED;
if (de->name_len && de->inode) {
if (ext4_hash_in_dirent(dir))
h.hash = EXT4_DIRENT_HASH(de);
- else
- ext4fs_dirhash(dir, de->name, de->name_len, &h);
+ else {
+ int err = ext4fs_dirhash(dir, de->name,
+ de->name_len, &h);
+ if (err < 0)
+ return err;
+ }
map_tail--;
map_tail->hash = h.hash;
map_tail->offs = ((char *) de - base)>>2;
- map_tail->size = le16_to_cpu(de->rec_len);
+ map_tail->size = ext4_rec_len_from_disk(de->rec_len,
+ blocksize);
count++;
cond_resched();
}
- /* XXX: do we need to check rec_len == 0 case? -Chris */
de = ext4_next_entry(de, blocksize);
}
return count;
@@ -1317,94 +1358,42 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
dx_set_count(entries, count + 1);
}
-#ifdef CONFIG_UNICODE
-/*
- * Test whether a case-insensitive directory entry matches the filename
- * being searched for. If quick is set, assume the name being looked up
- * is already in the casefolded form.
- *
- * Returns: 0 if the directory entry matches, more than 0 if it
- * doesn't match or less than zero on error.
- */
-static int ext4_ci_compare(const struct inode *parent, const struct qstr *name,
- u8 *de_name, size_t de_name_len, bool quick)
-{
- const struct super_block *sb = parent->i_sb;
- const struct unicode_map *um = sb->s_encoding;
- struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len);
- struct qstr entry = QSTR_INIT(de_name, de_name_len);
- int ret;
-
- if (IS_ENCRYPTED(parent)) {
- const struct fscrypt_str encrypted_name =
- FSTR_INIT(de_name, de_name_len);
-
- decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL);
- if (!decrypted_name.name)
- return -ENOMEM;
- ret = fscrypt_fname_disk_to_usr(parent, 0, 0, &encrypted_name,
- &decrypted_name);
- if (ret < 0)
- goto out;
- entry.name = decrypted_name.name;
- entry.len = decrypted_name.len;
- }
-
- if (quick)
- ret = utf8_strncasecmp_folded(um, name, &entry);
- else
- ret = utf8_strncasecmp(um, name, &entry);
- if (ret < 0) {
- /* Handle invalid character sequence as either an error
- * or as an opaque byte sequence.
- */
- if (sb_has_strict_encoding(sb))
- ret = -EINVAL;
- else if (name->len != entry.len)
- ret = 1;
- else
- ret = !!memcmp(name->name, entry.name, entry.len);
- }
-out:
- kfree(decrypted_name.name);
- return ret;
-}
-
+#if IS_ENABLED(CONFIG_UNICODE)
int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
struct ext4_filename *name)
{
- struct fscrypt_str *cf_name = &name->cf_name;
+ struct qstr *cf_name = &name->cf_name;
+ unsigned char *buf;
struct dx_hash_info *hinfo = &name->hinfo;
int len;
- if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding ||
+ if (!IS_CASEFOLDED(dir) ||
(IS_ENCRYPTED(dir) && !fscrypt_has_encryption_key(dir))) {
cf_name->name = NULL;
return 0;
}
- cf_name->name = kmalloc(EXT4_NAME_LEN, GFP_NOFS);
- if (!cf_name->name)
+ buf = kmalloc(EXT4_NAME_LEN, GFP_NOFS);
+ if (!buf)
return -ENOMEM;
- len = utf8_casefold(dir->i_sb->s_encoding,
- iname, cf_name->name,
- EXT4_NAME_LEN);
+ len = utf8_casefold(dir->i_sb->s_encoding, iname, buf, EXT4_NAME_LEN);
if (len <= 0) {
- kfree(cf_name->name);
- cf_name->name = NULL;
+ kfree(buf);
+ buf = NULL;
}
+ cf_name->name = buf;
cf_name->len = (unsigned) len;
+
if (!IS_ENCRYPTED(dir))
return 0;
hinfo->hash_version = DX_HASH_SIPHASH;
hinfo->seed = NULL;
if (cf_name->name)
- ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo);
+ return ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo);
else
- ext4fs_dirhash(dir, iname->name, iname->len, hinfo);
- return 0;
+ return ext4fs_dirhash(dir, iname->name, iname->len, hinfo);
}
#endif
@@ -1428,25 +1417,33 @@ static bool ext4_match(struct inode *parent,
f.crypto_buf = fname->crypto_buf;
#endif
-#ifdef CONFIG_UNICODE
- if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent) &&
+#if IS_ENABLED(CONFIG_UNICODE)
+ if (IS_CASEFOLDED(parent) &&
(!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) {
- if (fname->cf_name.name) {
- struct qstr cf = {.name = fname->cf_name.name,
- .len = fname->cf_name.len};
- if (IS_ENCRYPTED(parent)) {
- if (fname->hinfo.hash != EXT4_DIRENT_HASH(de) ||
- fname->hinfo.minor_hash !=
- EXT4_DIRENT_MINOR_HASH(de)) {
-
- return false;
- }
- }
- return !ext4_ci_compare(parent, &cf, de->name,
- de->name_len, true);
- }
- return !ext4_ci_compare(parent, fname->usr_fname, de->name,
- de->name_len, false);
+ /*
+ * Just checking IS_ENCRYPTED(parent) below is not
+ * sufficient to decide whether one can use the hash for
+ * skipping the string comparison, because the key might
+ * have been added right after
+ * ext4_fname_setup_ci_filename(). In this case, a hash
+ * mismatch will be a false negative. Therefore, make
+ * sure cf_name was properly initialized before
+ * considering the calculated hash.
+ */
+ if (sb_no_casefold_compat_fallback(parent->i_sb) &&
+ IS_ENCRYPTED(parent) && fname->cf_name.name &&
+ (fname->hinfo.hash != EXT4_DIRENT_HASH(de) ||
+ fname->hinfo.minor_hash != EXT4_DIRENT_MINOR_HASH(de)))
+ return false;
+ /*
+ * Treat comparison errors as not a match. The
+ * only case where it happens is on a disk
+ * corruption or ENOMEM.
+ */
+
+ return generic_ci_match(parent, fname->usr_fname,
+ &fname->cf_name, de->name,
+ de->name_len) > 0;
}
#endif
@@ -1454,7 +1451,7 @@ static bool ext4_match(struct inode *parent,
}
/*
- * Returns 0 if not found, -1 on failure, and 1 on success
+ * Returns 0 if not found, -EFSCORRUPTED on failure, and 1 on success
*/
int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
struct inode *dir, struct ext4_filename *fname,
@@ -1466,16 +1463,16 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
de = (struct ext4_dir_entry_2 *)search_buf;
dlimit = search_buf + buf_size;
- while ((char *) de < dlimit) {
+ while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) {
/* this code is executed quadratically often */
/* do minimal checking `by hand' */
- if ((char *) de + de->name_len <= dlimit &&
+ if (de->name + de->name_len <= dlimit &&
ext4_match(dir, fname, de)) {
/* found a match - just to be sure, do
* a full check */
if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
buf_size, offset))
- return -1;
+ return -EFSCORRUPTED;
*res_dir = de;
return 1;
}
@@ -1483,7 +1480,7 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
de_len = ext4_rec_len_from_disk(de->rec_len,
dir->i_sb->s_blocksize);
if (de_len <= 0)
- return -1;
+ return -EFSCORRUPTED;
offset += de_len;
de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
}
@@ -1544,11 +1541,10 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
int has_inline_data = 1;
ret = ext4_find_inline_entry(dir, fname, res_dir,
&has_inline_data);
- if (has_inline_data) {
- if (inlined)
- *inlined = 1;
+ if (inlined)
+ *inlined = has_inline_data;
+ if (has_inline_data || IS_ERR(ret))
goto cleanup_and_exit;
- }
}
if ((namelen <= 2) && (name[0] == '.') &&
@@ -1568,10 +1564,15 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
* return. Otherwise, fall back to doing a search the
* old fashioned way.
*/
- if (!IS_ERR(ret) || PTR_ERR(ret) != ERR_BAD_DX_DIR)
+ if (IS_ERR(ret) && PTR_ERR(ret) == ERR_BAD_DX_DIR)
+ dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
+ "falling back\n"));
+ else if (!sb_no_casefold_compat_fallback(dir->i_sb) &&
+ *res_dir == NULL && IS_CASEFOLDED(dir))
+ dxtrace(printk(KERN_DEBUG "ext4_find_entry: casefold "
+ "failed, falling back\n"));
+ else
goto cleanup_and_exit;
- dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
- "falling back\n"));
ret = NULL;
}
nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
@@ -1629,15 +1630,17 @@ restart:
}
set_buffer_verified(bh);
i = search_dirblock(bh, dir, fname,
- block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
+ EXT4_LBLK_TO_B(dir, block), res_dir);
if (i == 1) {
EXT4_I(dir)->i_dir_start_lookup = block;
ret = bh;
goto cleanup_and_exit;
} else {
brelse(bh);
- if (i < 0)
+ if (i < 0) {
+ ret = ERR_PTR(i);
goto cleanup_and_exit;
+ }
}
next:
if (++block >= nblocks)
@@ -1692,7 +1695,6 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir,
struct buffer_head *bh;
err = ext4_fname_prepare_lookup(dir, dentry, &fname);
- generic_set_encrypted_ci_d_ops(dentry);
if (err == -ENOENT)
return NULL;
if (err)
@@ -1708,7 +1710,6 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
struct ext4_filename *fname,
struct ext4_dir_entry_2 **res_dir)
{
- struct super_block * sb = dir->i_sb;
struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
struct buffer_head *bh;
ext4_lblk_t block;
@@ -1719,7 +1720,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
#endif
frame = dx_probe(fname, dir, NULL, frames);
if (IS_ERR(frame))
- return (struct buffer_head *) frame;
+ return ERR_CAST(frame);
do {
block = dx_get_block(frame->at);
bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
@@ -1727,12 +1728,11 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
goto errout;
retval = search_dirblock(bh, dir, fname,
- block << EXT4_BLOCK_SIZE_BITS(sb),
- res_dir);
+ EXT4_LBLK_TO_B(dir, block), res_dir);
if (retval == 1)
goto success;
brelse(bh);
- if (retval == -1) {
+ if (retval < 0) {
bh = ERR_PTR(ERR_BAD_DX_DIR);
goto errout;
}
@@ -1760,7 +1760,7 @@ success:
static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
- struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_2 *de = NULL;
struct buffer_head *bh;
if (dentry->d_name.len > EXT4_NAME_LEN)
@@ -1800,8 +1800,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
}
}
-#ifdef CONFIG_UNICODE
- if (!inode && IS_CASEFOLDED(dir)) {
+ if (IS_ENABLED(CONFIG_UNICODE) && !inode && IS_CASEFOLDED(dir)) {
/* Eventually we want to call d_add_ci(dentry, NULL)
* for negative dentries in the encoding case as
* well. For now, prevent the negative dentry
@@ -1809,7 +1808,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
*/
return NULL;
}
-#endif
+
return d_splice_alias(inode, dentry);
}
@@ -1817,7 +1816,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
struct dentry *ext4_get_parent(struct dentry *child)
{
__u32 ino;
- struct ext4_dir_entry_2 * de;
+ struct ext4_dir_entry_2 * de = NULL;
struct buffer_head *bh;
bh = ext4_find_entry(d_inode(child), &dotdot_name, &de, NULL);
@@ -1906,7 +1905,8 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
struct dx_hash_info *hinfo)
{
unsigned blocksize = dir->i_sb->s_blocksize;
- unsigned count, continued;
+ unsigned continued;
+ int count;
struct buffer_head *bh2;
ext4_lblk_t newblock;
u32 hash2;
@@ -1917,14 +1917,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
int csum_size = 0;
int err = 0, i;
- if (ext4_has_metadata_csum(dir->i_sb))
+ if (ext4_has_feature_metadata_csum(dir->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
bh2 = ext4_append(handle, dir, &newblock);
if (IS_ERR(bh2)) {
brelse(*bh);
*bh = NULL;
- return (struct ext4_dir_entry_2 *) bh2;
+ return ERR_CAST(bh2);
}
BUFFER_TRACE(*bh, "get_write_access");
@@ -1943,8 +1943,11 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
/* create map in the end of data2 block */
map = (struct dx_map_entry *) (data2 + blocksize);
- count = dx_make_map(dir, (struct ext4_dir_entry_2 *) data1,
- blocksize, hinfo, map);
+ count = dx_make_map(dir, *bh, hinfo, map);
+ if (count < 0) {
+ err = count;
+ goto journal_error;
+ }
map -= count;
dx_sort_map(map, count);
/* Ensure that neither split block is over half full */
@@ -1964,11 +1967,20 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
* split it in half by count; each resulting block will have at least
* half the space free.
*/
- if (i > 0)
+ if (i >= 0)
split = count - move;
else
split = count/2;
+ if (WARN_ON_ONCE(split == 0)) {
+ /* Should never happen, but avoid out-of-bounds access below */
+ ext4_error_inode_block(dir, (*bh)->b_blocknr, 0,
+ "bad indexed directory? hash=%08x:%08x count=%d move=%u",
+ hinfo->hash, hinfo->minor_hash, count, move);
+ err = -EFSCORRUPTED;
+ goto out;
+ }
+
hash2 = map[split].hash;
continued = hash2 == map[split - 1].hash;
dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
@@ -2012,15 +2024,15 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
return de;
journal_error:
+ ext4_std_error(dir->i_sb, err);
+out:
brelse(*bh);
brelse(bh2);
*bh = NULL;
- ext4_std_error(dir->i_sb, err);
return ERR_PTR(err);
}
-int ext4_find_dest_de(struct inode *dir, struct inode *inode,
- struct buffer_head *bh,
+int ext4_find_dest_de(struct inode *dir, struct buffer_head *bh,
void *buf, int buf_size,
struct ext4_filename *fname,
struct ext4_dir_entry_2 **dest_de)
@@ -2031,7 +2043,7 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
unsigned int offset = 0;
char *top;
- de = (struct ext4_dir_entry_2 *)buf;
+ de = buf;
top = buf + buf_size - reclen;
while ((char *) de <= top) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
@@ -2102,11 +2114,11 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
int csum_size = 0;
int err, err2;
- if (ext4_has_metadata_csum(inode->i_sb))
+ if (ext4_has_feature_metadata_csum(inode->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
if (!de) {
- err = ext4_find_dest_de(dir, inode, bh, bh->b_data,
+ err = ext4_find_dest_de(dir, bh, bh->b_data,
blocksize - csum_size, fname, &de);
if (err)
return err;
@@ -2133,7 +2145,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
ext4_update_dx_flag(dir);
inode_inc_iversion(dir);
err2 = ext4_mark_inode_dirty(handle, dir);
@@ -2144,6 +2156,52 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
return err ? err : err2;
}
+static bool ext4_check_dx_root(struct inode *dir, struct dx_root *root)
+{
+ struct fake_dirent *fde;
+ const char *error_msg;
+ unsigned int rlen;
+ unsigned int blocksize = dir->i_sb->s_blocksize;
+ char *blockend = (char *)root + dir->i_sb->s_blocksize;
+
+ fde = &root->dot;
+ if (unlikely(fde->name_len != 1)) {
+ error_msg = "invalid name_len for '.'";
+ goto corrupted;
+ }
+ if (unlikely(strncmp(root->dot_name, ".", fde->name_len))) {
+ error_msg = "invalid name for '.'";
+ goto corrupted;
+ }
+ rlen = ext4_rec_len_from_disk(fde->rec_len, blocksize);
+ if (unlikely((char *)fde + rlen >= blockend)) {
+ error_msg = "invalid rec_len for '.'";
+ goto corrupted;
+ }
+
+ fde = &root->dotdot;
+ if (unlikely(fde->name_len != 2)) {
+ error_msg = "invalid name_len for '..'";
+ goto corrupted;
+ }
+ if (unlikely(strncmp(root->dotdot_name, "..", fde->name_len))) {
+ error_msg = "invalid name for '..'";
+ goto corrupted;
+ }
+ rlen = ext4_rec_len_from_disk(fde->rec_len, blocksize);
+ if (unlikely((char *)fde + rlen >= blockend)) {
+ error_msg = "invalid rec_len for '..'";
+ goto corrupted;
+ }
+
+ return true;
+
+corrupted:
+ EXT4_ERROR_INODE(dir, "Corrupt dir, %s, running e2fsck is recommended",
+ error_msg);
+ return false;
+}
+
/*
* This converts a one block unindexed directory to a 3 block indexed
* directory, and adds the dentry to the indexed directory.
@@ -2165,7 +2223,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
struct fake_dirent *fde;
int csum_size = 0;
- if (ext4_has_metadata_csum(inode->i_sb))
+ if (ext4_has_feature_metadata_csum(inode->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
blocksize = dir->i_sb->s_blocksize;
@@ -2178,17 +2236,17 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
brelse(bh);
return retval;
}
+
root = (struct dx_root *) bh->b_data;
+ if (!ext4_check_dx_root(dir, root)) {
+ brelse(bh);
+ return -EFSCORRUPTED;
+ }
/* The 0th block becomes the root, move the dirents out */
fde = &root->dotdot;
de = (struct ext4_dir_entry_2 *)((char *)fde +
ext4_rec_len_from_disk(fde->rec_len, blocksize));
- if ((char *) de >= (((char *) root) + blocksize)) {
- EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
- brelse(bh);
- return -EFSCORRUPTED;
- }
len = ((char *) root) + (blocksize - csum_size) - (char *) de;
/* Allocate new block for the 0th block's dirents */
@@ -2204,8 +2262,15 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
memset(de, 0, len); /* wipe old data */
de = (struct ext4_dir_entry_2 *) data2;
top = data2 + len;
- while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
+ while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) {
+ if (ext4_check_dir_entry(dir, NULL, de, bh2, data2, len,
+ (char *)de - data2)) {
+ brelse(bh2);
+ brelse(bh);
+ return -EFSCORRUPTED;
+ }
de = de2;
+ }
de->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
(char *) de, blocksize);
@@ -2236,10 +2301,15 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
fname->hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
/* casefolded encrypted hashes are computed on fname setup */
- if (!ext4_hash_in_dirent(dir))
- ext4fs_dirhash(dir, fname_name(fname),
- fname_len(fname), &fname->hinfo);
-
+ if (!ext4_hash_in_dirent(dir)) {
+ int err = ext4fs_dirhash(dir, fname_name(fname),
+ fname_len(fname), &fname->hinfo);
+ if (err < 0) {
+ brelse(bh2);
+ brelse(bh);
+ return err;
+ }
+ }
memset(frames, 0, sizeof(frames));
frame = frames;
frame->entries = entries;
@@ -2297,22 +2367,17 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
ext4_lblk_t block, blocks;
int csum_size = 0;
- if (ext4_has_metadata_csum(inode->i_sb))
+ if (ext4_has_feature_metadata_csum(inode->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
sb = dir->i_sb;
blocksize = sb->s_blocksize;
- if (!dentry->d_name.len)
- return -EINVAL;
if (fscrypt_is_nokey_name(dentry))
return -ENOKEY;
-#ifdef CONFIG_UNICODE
- if (sb_has_strict_encoding(sb) && IS_CASEFOLDED(dir) &&
- sb->s_encoding && utf8_validate(sb->s_encoding, &dentry->d_name))
+ if (!generic_ci_validate_strict_name(dir, &dentry->d_name))
return -EINVAL;
-#endif
retval = ext4_fname_setup_filename(dir, &dentry->d_name, 0, &fname);
if (retval)
@@ -2333,7 +2398,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
if (!retval || (retval != ERR_BAD_DX_DIR))
goto out;
/* Can we just ignore htree data? */
- if (ext4_has_metadata_csum(sb)) {
+ if (ext4_has_feature_metadata_csum(sb)) {
EXT4_ERROR_INODE(dir,
"Directory has corrupted htree index.");
retval = -EFSCORRUPTED;
@@ -2483,8 +2548,10 @@ again:
BUFFER_TRACE(frame->bh, "get_write_access");
err = ext4_journal_get_write_access(handle, sb, frame->bh,
EXT4_JTR_NONE);
- if (err)
+ if (err) {
+ brelse(bh2);
goto journal_error;
+ }
if (!add_level) {
unsigned icount1 = icount/2, icount2 = icount - icount1;
unsigned hash2 = dx_get_hash(entries + icount1);
@@ -2495,8 +2562,10 @@ again:
err = ext4_journal_get_write_access(handle, sb,
(frame - 1)->bh,
EXT4_JTR_NONE);
- if (err)
+ if (err) {
+ brelse(bh2);
goto journal_error;
+ }
memcpy((char *) entries2, (char *) (entries + icount1),
icount2 * sizeof(struct dx_entry));
@@ -2515,8 +2584,10 @@ again:
dxtrace(dx_show_index("node",
((struct dx_node *) bh2->b_data)->entries));
err = ext4_handle_dirty_dx_node(handle, dir, bh2);
- if (err)
+ if (err) {
+ brelse(bh2);
goto journal_error;
+ }
brelse (bh2);
err = ext4_handle_dirty_dx_node(handle, dir,
(frame - 1)->bh);
@@ -2541,8 +2612,10 @@ again:
"Creating %d level index...\n",
dxroot->info.indirect_levels));
err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
- if (err)
+ if (err) {
+ brelse(bh2);
goto journal_error;
+ }
err = ext4_handle_dirty_dx_node(handle, dir, bh2);
brelse(bh2);
restart = 1;
@@ -2587,7 +2660,7 @@ int ext4_generic_delete_entry(struct inode *dir,
i = 0;
pde = NULL;
- de = (struct ext4_dir_entry_2 *)entry_buf;
+ de = entry_buf;
while (i < buf_size - csum_size) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
entry_buf, buf_size, i))
@@ -2639,7 +2712,7 @@ static int ext4_delete_entry(handle_t *handle,
return err;
}
- if (ext4_has_metadata_csum(dir->i_sb))
+ if (ext4_has_feature_metadata_csum(dir->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
BUFFER_TRACE(bh, "get_write_access");
@@ -2716,6 +2789,7 @@ static int ext4_add_nondir(handle_t *handle,
return err;
}
drop_nlink(inode);
+ ext4_mark_inode_dirty(handle, inode);
ext4_orphan_add(handle, inode);
unlock_new_inode(inode);
return err;
@@ -2729,7 +2803,7 @@ static int ext4_add_nondir(handle_t *handle,
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int ext4_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int ext4_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
handle_t *handle;
@@ -2743,7 +2817,7 @@ static int ext4_create(struct user_namespace *mnt_userns, struct inode *dir,
credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
retry:
- inode = ext4_new_inode_start_handle(mnt_userns, dir, mode, &dentry->d_name,
+ inode = ext4_new_inode_start_handle(idmap, dir, mode, &dentry->d_name,
0, NULL, EXT4_HT_DIR, credits);
handle = ext4_journal_current_handle();
err = PTR_ERR(inode);
@@ -2764,7 +2838,7 @@ retry:
return err;
}
-static int ext4_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+static int ext4_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
handle_t *handle;
@@ -2778,7 +2852,7 @@ static int ext4_mknod(struct user_namespace *mnt_userns, struct inode *dir,
credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
retry:
- inode = ext4_new_inode_start_handle(mnt_userns, dir, mode, &dentry->d_name,
+ inode = ext4_new_inode_start_handle(idmap, dir, mode, &dentry->d_name,
0, NULL, EXT4_HT_DIR, credits);
handle = ext4_journal_current_handle();
err = PTR_ERR(inode);
@@ -2798,8 +2872,8 @@ retry:
return err;
}
-static int ext4_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static int ext4_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ struct file *file, umode_t mode)
{
handle_t *handle;
struct inode *inode;
@@ -2810,10 +2884,10 @@ static int ext4_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
return err;
retry:
- inode = ext4_new_inode_start_handle(mnt_userns, dir, mode,
+ inode = ext4_new_inode_start_handle(idmap, dir, mode,
NULL, 0, NULL,
EXT4_HT_DIR,
- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
+ EXT4_MAXQUOTAS_TRANS_BLOCKS(dir->i_sb) +
4 + EXT4_XATTR_TRANS_BLOCKS);
handle = ext4_journal_current_handle();
err = PTR_ERR(inode);
@@ -2821,7 +2895,7 @@ retry:
inode->i_op = &ext4_file_inode_operations;
inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
- d_tmpfile(dentry, inode);
+ d_tmpfile(file, inode);
err = ext4_orphan_add(handle, inode);
if (err)
goto err_unlock_inode;
@@ -2832,55 +2906,66 @@ retry:
ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
- return err;
+ return finish_open_simple(file, err);
err_unlock_inode:
ext4_journal_stop(handle);
unlock_new_inode(inode);
return err;
}
-struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
- struct ext4_dir_entry_2 *de,
- int blocksize, int csum_size,
- unsigned int parent_ino, int dotdot_real_len)
+int ext4_init_dirblock(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh, unsigned int parent_ino,
+ void *inline_buf, int inline_size)
{
+ struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) bh->b_data;
+ size_t blocksize = bh->b_size;
+ int csum_size = 0, header_size;
+
+ if (ext4_has_feature_metadata_csum(inode->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
de->inode = cpu_to_le32(inode->i_ino);
de->name_len = 1;
de->rec_len = ext4_rec_len_to_disk(ext4_dir_rec_len(de->name_len, NULL),
blocksize);
- strcpy(de->name, ".");
+ memcpy(de->name, ".", 2);
ext4_set_de_type(inode->i_sb, de, S_IFDIR);
de = ext4_next_entry(de, blocksize);
de->inode = cpu_to_le32(parent_ino);
de->name_len = 2;
- if (!dotdot_real_len)
- de->rec_len = ext4_rec_len_to_disk(blocksize -
- (csum_size + ext4_dir_rec_len(1, NULL)),
- blocksize);
- else
+ memcpy(de->name, "..", 3);
+ ext4_set_de_type(inode->i_sb, de, S_IFDIR);
+ if (inline_buf) {
de->rec_len = ext4_rec_len_to_disk(
ext4_dir_rec_len(de->name_len, NULL),
blocksize);
- strcpy(de->name, "..");
- ext4_set_de_type(inode->i_sb, de, S_IFDIR);
+ de = ext4_next_entry(de, blocksize);
+ header_size = (char *)de - bh->b_data;
+ memcpy((void *)de, inline_buf, inline_size);
+ ext4_update_final_de(bh->b_data, inline_size + header_size,
+ blocksize - csum_size);
+ } else {
+ de->rec_len = ext4_rec_len_to_disk(blocksize -
+ (csum_size + ext4_dir_rec_len(1, NULL)),
+ blocksize);
+ }
- return ext4_next_entry(de, blocksize);
+ if (csum_size)
+ ext4_initialize_dirent_tail(bh, blocksize);
+ BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
+ set_buffer_uptodate(bh);
+ set_buffer_verified(bh);
+ return ext4_handle_dirty_dirblock(handle, inode, bh);
}
int ext4_init_new_dir(handle_t *handle, struct inode *dir,
struct inode *inode)
{
struct buffer_head *dir_block = NULL;
- struct ext4_dir_entry_2 *de;
ext4_lblk_t block = 0;
- unsigned int blocksize = dir->i_sb->s_blocksize;
- int csum_size = 0;
int err;
- if (ext4_has_metadata_csum(dir->i_sb))
- csum_size = sizeof(struct ext4_dir_entry_tail);
-
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
err = ext4_try_create_inline_dir(handle, dir, inode);
if (err < 0 && err != -ENOSPC)
@@ -2889,44 +2974,35 @@ int ext4_init_new_dir(handle_t *handle, struct inode *dir,
goto out;
}
+ set_nlink(inode, 2);
inode->i_size = 0;
dir_block = ext4_append(handle, inode, &block);
if (IS_ERR(dir_block))
return PTR_ERR(dir_block);
- de = (struct ext4_dir_entry_2 *)dir_block->b_data;
- ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
- set_nlink(inode, 2);
- if (csum_size)
- ext4_initialize_dirent_tail(dir_block, blocksize);
-
- BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_dirblock(handle, inode, dir_block);
- if (err)
- goto out;
- set_buffer_verified(dir_block);
+ err = ext4_init_dirblock(handle, inode, dir_block, dir->i_ino, NULL, 0);
out:
brelse(dir_block);
return err;
}
-static int ext4_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ext4_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
handle_t *handle;
struct inode *inode;
int err, err2 = 0, credits, retries = 0;
if (EXT4_DIR_LINK_MAX(dir))
- return -EMLINK;
+ return ERR_PTR(-EMLINK);
err = dquot_initialize(dir);
if (err)
- return err;
+ return ERR_PTR(err);
credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
retry:
- inode = ext4_new_inode_start_handle(mnt_userns, dir, S_IFDIR | mode,
+ inode = ext4_new_inode_start_handle(idmap, dir, S_IFDIR | mode,
&dentry->d_name,
0, NULL, EXT4_HT_DIR, credits);
handle = ext4_journal_current_handle();
@@ -2971,7 +3047,7 @@ out_stop:
out_retry:
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
- return err;
+ return ERR_PTR(err);
}
/*
@@ -2997,31 +3073,30 @@ bool ext4_empty_dir(struct inode *inode)
if (inode->i_size < ext4_dir_rec_len(1, NULL) +
ext4_dir_rec_len(2, NULL)) {
EXT4_ERROR_INODE(inode, "invalid size");
- return true;
+ return false;
}
- /* The first directory block must not be a hole,
- * so treat it as DIRENT_HTREE
- */
- bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
+ bh = ext4_read_dirblock(inode, 0, EITHER);
if (IS_ERR(bh))
- return true;
+ return false;
de = (struct ext4_dir_entry_2 *) bh->b_data;
if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
0) ||
- le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) {
+ le32_to_cpu(de->inode) != inode->i_ino || de->name_len != 1 ||
+ de->name[0] != '.') {
ext4_warning_inode(inode, "directory missing '.'");
brelse(bh);
- return true;
+ return false;
}
offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
de = ext4_next_entry(de, sb->s_blocksize);
if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
offset) ||
- le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
+ le32_to_cpu(de->inode) == 0 || de->name_len != 2 ||
+ de->name[0] != '.' || de->name[1] != '.') {
ext4_warning_inode(inode, "directory missing '..'");
brelse(bh);
- return true;
+ return false;
}
offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
while (offset < inode->i_size) {
@@ -3035,16 +3110,13 @@ bool ext4_empty_dir(struct inode *inode)
continue;
}
if (IS_ERR(bh))
- return true;
+ return false;
}
de = (struct ext4_dir_entry_2 *) (bh->b_data +
(offset & (sb->s_blocksize - 1)));
if (ext4_check_dir_entry(inode, NULL, de, bh,
- bh->b_data, bh->b_size, offset)) {
- offset = (offset | (sb->s_blocksize - 1)) + 1;
- continue;
- }
- if (le32_to_cpu(de->inode)) {
+ bh->b_data, bh->b_size, offset) ||
+ le32_to_cpu(de->inode)) {
brelse(bh);
return false;
}
@@ -3059,11 +3131,12 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
int retval;
struct inode *inode;
struct buffer_head *bh;
- struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_2 *de = NULL;
handle_t *handle = NULL;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb))))
- return -EIO;
+ retval = ext4_emergency_state(dir->i_sb);
+ if (unlikely(retval))
+ return retval;
/* Initialize quotas before so that eventual writes go in
* separate transaction */
@@ -3117,7 +3190,8 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
* recovery. */
inode->i_size = 0;
ext4_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ inode_set_ctime_current(inode);
retval = ext4_mark_inode_dirty(handle, inode);
if (retval)
goto end_rmdir;
@@ -3126,16 +3200,14 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
ext4_fc_track_unlink(handle, dentry);
retval = ext4_mark_inode_dirty(handle, dir);
-#ifdef CONFIG_UNICODE
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
* negative dentries at ext4_lookup(), when it is better
* supported by the VFS for the CI case.
*/
- if (IS_CASEFOLDED(dir))
+ if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
d_invalidate(dentry);
-#endif
end_rmdir:
brelse(bh);
@@ -3144,14 +3216,20 @@ end_rmdir:
return retval;
}
-int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name,
- struct inode *inode)
+int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
+ struct inode *inode,
+ struct dentry *dentry /* NULL during fast_commit recovery */)
{
int retval = -ENOENT;
struct buffer_head *bh;
- struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_2 *de = NULL;
+ handle_t *handle;
int skip_remove_dentry = 0;
+ /*
+ * Keep this outside the transaction; it may have to set up the
+ * directory's encryption key, which isn't GFP_NOFS-safe.
+ */
bh = ext4_find_entry(dir, d_name, &de, NULL);
if (IS_ERR(bh))
return PTR_ERR(bh);
@@ -3168,7 +3246,14 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
skip_remove_dentry = 1;
else
- goto out;
+ goto out_bh;
+ }
+
+ handle = ext4_journal_start(dir, EXT4_HT_DIR,
+ EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
+ if (IS_ERR(handle)) {
+ retval = PTR_ERR(handle);
+ goto out_bh;
}
if (IS_DIRSYNC(dir))
@@ -3177,12 +3262,12 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name
if (!skip_remove_dentry) {
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
- goto out;
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ goto out_handle;
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
ext4_update_dx_flag(dir);
retval = ext4_mark_inode_dirty(handle, dir);
if (retval)
- goto out;
+ goto out_handle;
} else {
retval = 0;
}
@@ -3193,21 +3278,24 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name
drop_nlink(inode);
if (!inode->i_nlink)
ext4_orphan_add(handle, inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
retval = ext4_mark_inode_dirty(handle, inode);
-
-out:
+ if (dentry && !retval)
+ ext4_fc_track_unlink(handle, dentry);
+out_handle:
+ ext4_journal_stop(handle);
+out_bh:
brelse(bh);
return retval;
}
static int ext4_unlink(struct inode *dir, struct dentry *dentry)
{
- handle_t *handle;
int retval;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb))))
- return -EIO;
+ retval = ext4_emergency_state(dir->i_sb);
+ if (unlikely(retval))
+ return retval;
trace_ext4_unlink_enter(dir, dentry);
/*
@@ -3221,35 +3309,49 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
if (retval)
goto out_trace;
- handle = ext4_journal_start(dir, EXT4_HT_DIR,
- EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
- if (IS_ERR(handle)) {
- retval = PTR_ERR(handle);
- goto out_trace;
- }
+ retval = __ext4_unlink(dir, &dentry->d_name, d_inode(dentry), dentry);
- retval = __ext4_unlink(handle, dir, &dentry->d_name, d_inode(dentry));
- if (!retval)
- ext4_fc_track_unlink(handle, dentry);
-#ifdef CONFIG_UNICODE
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
* negative dentries at ext4_lookup(), when it is better
* supported by the VFS for the CI case.
*/
- if (IS_CASEFOLDED(dir))
+ if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
d_invalidate(dentry);
-#endif
- if (handle)
- ext4_journal_stop(handle);
out_trace:
trace_ext4_unlink_exit(dentry, retval);
return retval;
}
-static int ext4_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+static int ext4_init_symlink_block(handle_t *handle, struct inode *inode,
+ struct fscrypt_str *disk_link)
+{
+ struct buffer_head *bh;
+ char *kaddr;
+ int err = 0;
+
+ bh = ext4_bread(handle, inode, 0, EXT4_GET_BLOCKS_CREATE);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
+
+ BUFFER_TRACE(bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, inode->i_sb, bh, EXT4_JTR_NONE);
+ if (err)
+ goto out;
+
+ kaddr = (char *)bh->b_data;
+ memcpy(kaddr, disk_link->name, disk_link->len);
+ inode->i_size = disk_link->len - 1;
+ EXT4_I(inode)->i_disksize = inode->i_size;
+ err = ext4_handle_dirty_metadata(handle, inode, bh);
+out:
+ brelse(bh);
+ return err;
+}
+
+static int ext4_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
handle_t *handle;
@@ -3257,9 +3359,11 @@ static int ext4_symlink(struct user_namespace *mnt_userns, struct inode *dir,
int err, len = strlen(symname);
int credits;
struct fscrypt_str disk_link;
+ int retries = 0;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb))))
- return -EIO;
+ err = ext4_emergency_state(dir->i_sb);
+ if (unlikely(err))
+ return err;
err = fscrypt_prepare_symlink(dir, symname, len, dir->i_sb->s_blocksize,
&disk_link);
@@ -3270,34 +3374,24 @@ static int ext4_symlink(struct user_namespace *mnt_userns, struct inode *dir,
if (err)
return err;
- if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
- /*
- * For non-fast symlinks, we just allocate inode and put it on
- * orphan list in the first transaction => we need bitmap,
- * group descriptor, sb, inode block, quota blocks, and
- * possibly selinux xattr blocks.
- */
- credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
- EXT4_XATTR_TRANS_BLOCKS;
- } else {
- /*
- * Fast symlink. We have to add entry to directory
- * (EXT4_DATA_TRANS_BLOCKS + EXT4_INDEX_EXTRA_TRANS_BLOCKS),
- * allocate new inode (bitmap, group descriptor, inode block,
- * quota blocks, sb is already counted in previous macros).
- */
- credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
- EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3;
- }
-
- inode = ext4_new_inode_start_handle(mnt_userns, dir, S_IFLNK|S_IRWXUGO,
+ /*
+ * EXT4_INDEX_EXTRA_TRANS_BLOCKS for addition of entry into the
+ * directory. +3 for inode, inode bitmap, group descriptor allocation.
+ * EXT4_DATA_TRANS_BLOCKS for the data block allocation and
+ * modification.
+ */
+ credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+ EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3;
+retry:
+ inode = ext4_new_inode_start_handle(idmap, dir, S_IFLNK|S_IRWXUGO,
&dentry->d_name, 0, NULL,
EXT4_HT_DIR, credits);
handle = ext4_journal_current_handle();
if (IS_ERR(inode)) {
if (handle)
ext4_journal_stop(handle);
- return PTR_ERR(inode);
+ err = PTR_ERR(inode);
+ goto out_retry;
}
if (IS_ENCRYPTED(inode)) {
@@ -3305,75 +3399,47 @@ static int ext4_symlink(struct user_namespace *mnt_userns, struct inode *dir,
if (err)
goto err_drop_inode;
inode->i_op = &ext4_encrypted_symlink_inode_operations;
+ } else {
+ if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
+ inode->i_op = &ext4_symlink_inode_operations;
+ } else {
+ inode->i_op = &ext4_fast_symlink_inode_operations;
+ }
}
if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
- if (!IS_ENCRYPTED(inode))
- inode->i_op = &ext4_symlink_inode_operations;
- inode_nohighmem(inode);
- ext4_set_aops(inode);
- /*
- * We cannot call page_symlink() with transaction started
- * because it calls into ext4_write_begin() which can wait
- * for transaction commit if we are running out of space
- * and thus we deadlock. So we have to stop transaction now
- * and restart it when symlink contents is written.
- *
- * To keep fs consistent in case of crash, we have to put inode
- * to orphan list in the mean time.
- */
- drop_nlink(inode);
- err = ext4_orphan_add(handle, inode);
- if (handle)
- ext4_journal_stop(handle);
- handle = NULL;
- if (err)
- goto err_drop_inode;
- err = __page_symlink(inode, disk_link.name, disk_link.len, 1);
- if (err)
- goto err_drop_inode;
- /*
- * Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS
- * + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
- */
- handle = ext4_journal_start(dir, EXT4_HT_DIR,
- EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
- EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
- if (IS_ERR(handle)) {
- err = PTR_ERR(handle);
- handle = NULL;
- goto err_drop_inode;
- }
- set_nlink(inode, 1);
- err = ext4_orphan_del(handle, inode);
+ /* alloc symlink block and fill it */
+ err = ext4_init_symlink_block(handle, inode, &disk_link);
if (err)
goto err_drop_inode;
} else {
/* clear the extent format for fast symlink */
ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
- if (!IS_ENCRYPTED(inode)) {
- inode->i_op = &ext4_fast_symlink_inode_operations;
- inode->i_link = (char *)&EXT4_I(inode)->i_data;
- }
memcpy((char *)&EXT4_I(inode)->i_data, disk_link.name,
disk_link.len);
inode->i_size = disk_link.len - 1;
+ EXT4_I(inode)->i_disksize = inode->i_size;
+ if (!IS_ENCRYPTED(inode))
+ inode_set_cached_link(inode, (char *)&EXT4_I(inode)->i_data,
+ inode->i_size);
}
- EXT4_I(inode)->i_disksize = inode->i_size;
err = ext4_add_nondir(handle, dentry, &inode);
if (handle)
ext4_journal_stop(handle);
- if (inode)
- iput(inode);
- goto out_free_encrypted_link;
+ iput(inode);
+ goto out_retry;
err_drop_inode:
- if (handle)
- ext4_journal_stop(handle);
clear_nlink(inode);
+ ext4_mark_inode_dirty(handle, inode);
+ ext4_orphan_add(handle, inode);
unlock_new_inode(inode);
+ if (handle)
+ ext4_journal_stop(handle);
iput(inode);
-out_free_encrypted_link:
+out_retry:
+ if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
+ goto retry;
if (disk_link.name != (unsigned char *)symname)
kfree(disk_link.name);
return err;
@@ -3393,7 +3459,7 @@ retry:
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
ext4_inc_count(inode);
ihold(inode);
@@ -3455,17 +3521,39 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
struct buffer_head *bh;
if (!ext4_has_inline_data(inode)) {
- /* The first directory block must not be a hole, so
- * treat it as DIRENT_HTREE
- */
- bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
+ struct ext4_dir_entry_2 *de;
+ unsigned int offset;
+
+ bh = ext4_read_dirblock(inode, 0, EITHER);
if (IS_ERR(bh)) {
*retval = PTR_ERR(bh);
return NULL;
}
- *parent_de = ext4_next_entry(
- (struct ext4_dir_entry_2 *)bh->b_data,
- inode->i_sb->s_blocksize);
+
+ de = (struct ext4_dir_entry_2 *) bh->b_data;
+ if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data,
+ bh->b_size, 0) ||
+ le32_to_cpu(de->inode) != inode->i_ino ||
+ de->name_len != 1 || de->name[0] != '.') {
+ EXT4_ERROR_INODE(inode, "directory missing '.'");
+ brelse(bh);
+ *retval = -EFSCORRUPTED;
+ return NULL;
+ }
+ offset = ext4_rec_len_from_disk(de->rec_len,
+ inode->i_sb->s_blocksize);
+ de = ext4_next_entry(de, inode->i_sb->s_blocksize);
+ if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data,
+ bh->b_size, offset) ||
+ le32_to_cpu(de->inode) == 0 || de->name_len != 2 ||
+ de->name[0] != '.' || de->name[1] != '.') {
+ EXT4_ERROR_INODE(inode, "directory missing '..'");
+ brelse(bh);
+ *retval = -EFSCORRUPTED;
+ return NULL;
+ }
+ *parent_de = de;
+
return bh;
}
@@ -3491,10 +3579,14 @@ struct ext4_renament {
int dir_inlined;
};
-static int ext4_rename_dir_prepare(handle_t *handle, struct ext4_renament *ent)
+static int ext4_rename_dir_prepare(handle_t *handle, struct ext4_renament *ent, bool is_cross)
{
int retval;
+ ent->is_dir = true;
+ if (!is_cross)
+ return 0;
+
ent->dir_bh = ext4_get_first_dir_block(handle, ent->inode,
&retval, &ent->parent_de,
&ent->dir_inlined);
@@ -3512,6 +3604,9 @@ static int ext4_rename_dir_finish(handle_t *handle, struct ext4_renament *ent,
{
int retval;
+ if (!ent->dir_bh)
+ return 0;
+
ent->parent_de->inode = cpu_to_le32(dir_ino);
BUFFER_TRACE(ent->dir_bh, "call ext4_handle_dirty_metadata");
if (!ent->dir_inlined) {
@@ -3547,8 +3642,7 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
if (ext4_has_feature_filetype(ent->dir->i_sb))
ent->de->file_type = file_type;
inode_inc_iversion(ent->dir);
- ent->dir->i_ctime = ent->dir->i_mtime =
- current_time(ent->dir);
+ inode_set_mtime_to_ts(ent->dir, inode_set_ctime_current(ent->dir));
retval = ext4_mark_inode_dirty(handle, ent->dir);
BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata");
if (!ent->inlined) {
@@ -3572,7 +3666,8 @@ static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,
* so the old->de may no longer valid and need to find it again
* before reset old inode info.
*/
- old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
+ &old.inlined);
if (IS_ERR(old.bh))
retval = PTR_ERR(old.bh);
if (!old.bh)
@@ -3591,7 +3686,7 @@ static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
{
int retval = -ENOENT;
struct buffer_head *bh;
- struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_2 *de = NULL;
bh = ext4_find_entry(dir, d_name, &de, NULL);
if (IS_ERR(bh))
@@ -3646,7 +3741,7 @@ static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent)
}
}
-static struct inode *ext4_whiteout_for_rename(struct user_namespace *mnt_userns,
+static struct inode *ext4_whiteout_for_rename(struct mnt_idmap *idmap,
struct ext4_renament *ent,
int credits, handle_t **h)
{
@@ -3661,7 +3756,7 @@ static struct inode *ext4_whiteout_for_rename(struct user_namespace *mnt_userns,
credits += (EXT4_MAXQUOTAS_TRANS_BLOCKS(ent->dir->i_sb) +
EXT4_XATTR_TRANS_BLOCKS + 4);
retry:
- wh = ext4_new_inode_start_handle(mnt_userns, ent->dir,
+ wh = ext4_new_inode_start_handle(idmap, ent->dir,
S_IFCHR | WHITEOUT_MODE,
&ent->dentry->d_name, 0, NULL,
EXT4_HT_DIR, credits);
@@ -3689,7 +3784,7 @@ retry:
* while new_{dentry,inode) refers to the destination dentry/inode
* This comes from rename(const char *oldpath, const char *newpath)
*/
-static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+static int ext4_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
@@ -3724,6 +3819,9 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
retval = dquot_initialize(old.dir);
if (retval)
return retval;
+ retval = dquot_initialize(old.inode);
+ if (retval)
+ return retval;
retval = dquot_initialize(new.dir);
if (retval)
return retval;
@@ -3736,9 +3834,11 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
return retval;
}
- old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
+ &old.inlined);
if (IS_ERR(old.bh))
return PTR_ERR(old.bh);
+
/*
* Check for inode number is _not_ due to possible IO errors.
* We might rmdir the source, keep it as pwd of some process
@@ -3774,7 +3874,7 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
goto release_bh;
}
} else {
- whiteout = ext4_whiteout_for_rename(mnt_userns, &old, credits, &handle);
+ whiteout = ext4_whiteout_for_rename(idmap, &old, credits, &handle);
if (IS_ERR(whiteout)) {
retval = PTR_ERR(whiteout);
goto release_bh;
@@ -3795,7 +3895,7 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
if (new.dir != old.dir && EXT4_DIR_LINK_MAX(new.dir))
goto end_rename;
}
- retval = ext4_rename_dir_prepare(handle, &old);
+ retval = ext4_rename_dir_prepare(handle, &old, new.dir != old.dir);
if (retval)
goto end_rename;
}
@@ -3841,7 +3941,7 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- old.inode->i_ctime = current_time(old.inode);
+ inode_set_ctime_current(old.inode);
retval = ext4_mark_inode_dirty(handle, old.inode);
if (unlikely(retval))
goto end_rename;
@@ -3855,11 +3955,11 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
if (new.inode) {
ext4_dec_count(new.inode);
- new.inode->i_ctime = current_time(new.inode);
+ inode_set_ctime_current(new.inode);
}
- old.dir->i_ctime = old.dir->i_mtime = current_time(old.dir);
+ inode_set_mtime_to_ts(old.dir, inode_set_ctime_current(old.dir));
ext4_update_dx_flag(old.dir);
- if (old.dir_bh) {
+ if (old.is_dir) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
if (retval)
goto end_rename;
@@ -3882,21 +3982,28 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
if (unlikely(retval))
goto end_rename;
- if (S_ISDIR(old.inode->i_mode)) {
+ if (old.is_dir) {
/*
* We disable fast commits here that's because the
* replay code is not yet capable of changing dot dot
* dirents in directories.
*/
ext4_fc_mark_ineligible(old.inode->i_sb,
- EXT4_FC_REASON_RENAME_DIR);
+ EXT4_FC_REASON_RENAME_DIR, handle);
} else {
+ struct super_block *sb = old.inode->i_sb;
+
if (new.inode)
ext4_fc_track_unlink(handle, new.dentry);
- __ext4_fc_track_link(handle, old.inode, new.dentry);
- __ext4_fc_track_unlink(handle, old.inode, old.dentry);
- if (whiteout)
- __ext4_fc_track_create(handle, whiteout, old.dentry);
+ if (test_opt2(sb, JOURNAL_FAST_COMMIT) &&
+ !(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
+ !(ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE))) {
+ __ext4_fc_track_link(handle, old.inode, new.dentry);
+ __ext4_fc_track_unlink(handle, old.inode, old.dentry);
+ if (whiteout)
+ __ext4_fc_track_create(handle, whiteout,
+ old.dentry);
+ }
}
if (new.inode) {
@@ -3914,6 +4021,7 @@ end_rename:
ext4_resetent(handle, &old,
old.inode->i_ino, old_file_type);
drop_nlink(whiteout);
+ ext4_mark_inode_dirty(handle, whiteout);
ext4_orphan_add(handle, whiteout);
}
unlock_new_inode(whiteout);
@@ -3926,6 +4034,7 @@ release_bh:
brelse(old.dir_bh);
brelse(old.bh);
brelse(new.bh);
+
return retval;
}
@@ -3945,7 +4054,6 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
};
u8 new_file_type;
int retval;
- struct timespec64 ctime;
if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT) &&
!projid_eq(EXT4_I(new_dir)->i_projid,
@@ -4001,14 +4109,12 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
ext4_handle_sync(handle);
if (S_ISDIR(old.inode->i_mode)) {
- old.is_dir = true;
- retval = ext4_rename_dir_prepare(handle, &old);
+ retval = ext4_rename_dir_prepare(handle, &old, new.dir != old.dir);
if (retval)
goto end_rename;
}
if (S_ISDIR(new.inode->i_mode)) {
- new.is_dir = true;
- retval = ext4_rename_dir_prepare(handle, &new);
+ retval = ext4_rename_dir_prepare(handle, &new, new.dir != old.dir);
if (retval)
goto end_rename;
}
@@ -4039,9 +4145,8 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- ctime = current_time(old.inode);
- old.inode->i_ctime = ctime;
- new.inode->i_ctime = ctime;
+ inode_set_ctime_current(old.inode);
+ inode_set_ctime_current(new.inode);
retval = ext4_mark_inode_dirty(handle, old.inode);
if (unlikely(retval))
goto end_rename;
@@ -4049,7 +4154,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(retval))
goto end_rename;
ext4_fc_mark_ineligible(new.inode->i_sb,
- EXT4_FC_REASON_CROSS_RENAME);
+ EXT4_FC_REASON_CROSS_RENAME, handle);
if (old.dir_bh) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
if (retval)
@@ -4074,15 +4179,16 @@ end_rename:
return retval;
}
-static int ext4_rename2(struct user_namespace *mnt_userns,
+static int ext4_rename2(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
int err;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(old_dir->i_sb))))
- return -EIO;
+ err = ext4_emergency_state(old_dir->i_sb);
+ if (unlikely(err))
+ return err;
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
@@ -4097,7 +4203,7 @@ static int ext4_rename2(struct user_namespace *mnt_userns,
new_dir, new_dentry);
}
- return ext4_rename(mnt_userns, old_dir, old_dentry, new_dir, new_dentry, flags);
+ return ext4_rename(idmap, old_dir, old_dentry, new_dir, new_dentry, flags);
}
/*
@@ -4117,7 +4223,7 @@ const struct inode_operations ext4_dir_inode_operations = {
.setattr = ext4_setattr,
.getattr = ext4_getattr,
.listxattr = ext4_listxattr,
- .get_acl = ext4_get_acl,
+ .get_inode_acl = ext4_get_acl,
.set_acl = ext4_set_acl,
.fiemap = ext4_fiemap,
.fileattr_get = ext4_fileattr_get,
@@ -4128,6 +4234,6 @@ const struct inode_operations ext4_special_inode_operations = {
.setattr = ext4_setattr,
.getattr = ext4_getattr,
.listxattr = ext4_listxattr,
- .get_acl = ext4_get_acl,
+ .get_inode_acl = ext4_get_acl,
.set_acl = ext4_set_acl,
};
diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
index 53adc8f570a3..c9b93b670b0f 100644
--- a/fs/ext4/orphan.c
+++ b/fs/ext4/orphan.c
@@ -8,6 +8,8 @@
#include "ext4.h"
#include "ext4_jbd2.h"
+#define EXT4_MAX_ORPHAN_FILE_BLOCKS 512
+
static int ext4_orphan_file_add(handle_t *handle, struct inode *inode)
{
int i, j, start;
@@ -93,7 +95,7 @@ static int ext4_orphan_file_add(handle_t *handle, struct inode *inode)
* At filesystem recovery time, we walk this list deleting unlinked
* inodes and truncating linked inodes in ext4_orphan_cleanup().
*
- * Orphan list manipulation functions must be called under i_mutex unless
+ * Orphan list manipulation functions must be called under i_rwsem unless
* we are just creating the inode or deleting it.
*/
int ext4_orphan_add(handle_t *handle, struct inode *inode)
@@ -107,19 +109,15 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
if (!sbi->s_journal || is_bad_inode(inode))
return 0;
- WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
+ WARN_ON_ONCE(!(inode_state_read_once(inode) & (I_NEW | I_FREEING)) &&
!inode_is_locked(inode));
- /*
- * Inode orphaned in orphan file or in orphan list?
- */
- if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) ||
- !list_empty(&EXT4_I(inode)->i_orphan))
+ if (ext4_inode_orphan_tracked(inode))
return 0;
/*
* Orphan handling is only valid for files with data blocks
* being truncated, or files being unlinked. Note that we either
- * hold i_mutex, or the inode can not be referenced from outside,
+ * hold i_rwsem, or the inode can not be referenced from outside,
* so i_nlink should not be bumped due to race
*/
ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
@@ -181,8 +179,8 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
} else
brelse(iloc.bh);
- jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
- jbd_debug(4, "orphan inode %lu will point to %d\n",
+ ext4_debug("superblock will point to %lu\n", inode->i_ino);
+ ext4_debug("orphan inode %lu will point to %d\n",
inode->i_ino, NEXT_ORPHAN(inode));
out:
ext4_std_error(sb, err);
@@ -236,7 +234,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
if (!sbi->s_journal && !(sbi->s_mount_state & EXT4_ORPHAN_FS))
return 0;
- WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
+ WARN_ON_ONCE(!(inode_state_read_once(inode) & (I_NEW | I_FREEING)) &&
!inode_is_locked(inode));
if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE))
return ext4_orphan_file_del(handle, inode);
@@ -251,7 +249,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
}
mutex_lock(&sbi->s_orphan_lock);
- jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
+ ext4_debug("remove inode %lu from orphan list\n", inode->i_ino);
prev = ei->i_orphan.prev;
list_del_init(&ei->i_orphan);
@@ -267,7 +265,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
ino_next = NEXT_ORPHAN(inode);
if (prev == &sbi->s_orphan) {
- jbd_debug(4, "superblock will point to %u\n", ino_next);
+ ext4_debug("superblock will point to %u\n", ino_next);
BUFFER_TRACE(sbi->s_sbh, "get_write_access");
err = ext4_journal_get_write_access(handle, inode->i_sb,
sbi->s_sbh, EXT4_JTR_NONE);
@@ -286,7 +284,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
struct inode *i_prev =
&list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
- jbd_debug(4, "orphan inode %lu will point to %u\n",
+ ext4_debug("orphan inode %lu will point to %u\n",
i_prev->i_ino, ino_next);
err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
if (err) {
@@ -332,8 +330,8 @@ static void ext4_process_orphan(struct inode *inode,
ext4_msg(sb, KERN_DEBUG,
"%s: truncating inode %lu to %lld bytes",
__func__, inode->i_ino, inode->i_size);
- jbd_debug(2, "truncating inode %lu to %lld bytes\n",
- inode->i_ino, inode->i_size);
+ ext4_debug("truncating inode %lu to %lld bytes\n",
+ inode->i_ino, inode->i_size);
inode_lock(inode);
truncate_inode_pages(inode->i_mapping, inode->i_size);
ret = ext4_truncate(inode);
@@ -353,8 +351,8 @@ static void ext4_process_orphan(struct inode *inode,
ext4_msg(sb, KERN_DEBUG,
"%s: deleting unreferenced inode %lu",
__func__, inode->i_ino);
- jbd_debug(2, "deleting unreferenced inode %lu\n",
- inode->i_ino);
+ ext4_debug("deleting unreferenced inode %lu\n",
+ inode->i_ino);
(*nr_orphans)++;
}
iput(inode); /* The delete magic happens here! */
@@ -391,7 +389,7 @@ void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es)
int inodes_per_ob = ext4_inodes_per_orphan_block(sb);
if (!es->s_last_orphan && !oi->of_blocks) {
- jbd_debug(4, "no orphan inodes to clean up\n");
+ ext4_debug("no orphan inodes to clean up\n");
return;
}
@@ -412,10 +410,10 @@ void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es)
/* don't clear list on RO mount w/ errors */
if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
- "clearing orphan list.\n");
+ "clearing orphan list.");
es->s_last_orphan = 0;
}
- jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
+ ext4_debug("Skipping orphan recovery on fs with errors.\n");
return;
}
@@ -459,7 +457,7 @@ void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es)
* so, skip the rest.
*/
if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
- jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
+ ext4_debug("Skipping orphan recovery on fs with errors.\n");
es->s_last_orphan = 0;
break;
}
@@ -517,7 +515,7 @@ void ext4_release_orphan_info(struct super_block *sb)
return;
for (i = 0; i < oi->of_blocks; i++)
brelse(oi->of_binfo[i].ob_bh);
- kfree(oi->of_binfo);
+ kvfree(oi->of_binfo);
}
static struct ext4_orphan_block_tail *ext4_orphan_block_tail(
@@ -537,13 +535,13 @@ static int ext4_orphan_file_block_csum_verify(struct super_block *sb,
struct ext4_orphan_block_tail *ot;
__le64 dsk_block_nr = cpu_to_le64(bh->b_blocknr);
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return 1;
ot = ext4_orphan_block_tail(sb, bh);
- calculated = ext4_chksum(EXT4_SB(sb), oi->of_csum_seed,
- (__u8 *)&dsk_block_nr, sizeof(dsk_block_nr));
- calculated = ext4_chksum(EXT4_SB(sb), calculated, (__u8 *)bh->b_data,
+ calculated = ext4_chksum(oi->of_csum_seed, (__u8 *)&dsk_block_nr,
+ sizeof(dsk_block_nr));
+ calculated = ext4_chksum(calculated, (__u8 *)bh->b_data,
inodes_per_ob * sizeof(__u32));
return le32_to_cpu(ot->ob_checksum) == calculated;
}
@@ -560,10 +558,9 @@ void ext4_orphan_file_block_trigger(struct jbd2_buffer_trigger_type *triggers,
struct ext4_orphan_block_tail *ot;
__le64 dsk_block_nr = cpu_to_le64(bh->b_blocknr);
- csum = ext4_chksum(EXT4_SB(sb), oi->of_csum_seed,
- (__u8 *)&dsk_block_nr, sizeof(dsk_block_nr));
- csum = ext4_chksum(EXT4_SB(sb), csum, (__u8 *)data,
- inodes_per_ob * sizeof(__u32));
+ csum = ext4_chksum(oi->of_csum_seed, (__u8 *)&dsk_block_nr,
+ sizeof(dsk_block_nr));
+ csum = ext4_chksum(csum, (__u8 *)data, inodes_per_ob * sizeof(__u32));
ot = ext4_orphan_block_tail(sb, bh);
ot->ob_checksum = cpu_to_le32(csum);
}
@@ -588,10 +585,22 @@ int ext4_init_orphan_info(struct super_block *sb)
ext4_msg(sb, KERN_ERR, "get orphan inode failed");
return PTR_ERR(inode);
}
+ /*
+ * This is just an artificial limit to prevent corrupted fs from
+ * consuming absurd amounts of memory when pinning blocks of orphan
+ * file in memory.
+ */
+ if (inode->i_size > (EXT4_MAX_ORPHAN_FILE_BLOCKS << inode->i_blkbits)) {
+ ext4_msg(sb, KERN_ERR, "orphan file too big: %llu",
+ (unsigned long long)inode->i_size);
+ ret = -EFSCORRUPTED;
+ goto out_put;
+ }
oi->of_blocks = inode->i_size >> sb->s_blocksize_bits;
oi->of_csum_seed = EXT4_I(inode)->i_csum_seed;
- oi->of_binfo = kmalloc(oi->of_blocks*sizeof(struct ext4_orphan_block),
- GFP_KERNEL);
+ oi->of_binfo = kvmalloc_array(oi->of_blocks,
+ sizeof(struct ext4_orphan_block),
+ GFP_KERNEL);
if (!oi->of_binfo) {
ret = -ENOMEM;
goto out_put;
@@ -630,7 +639,7 @@ int ext4_init_orphan_info(struct super_block *sb)
out_free:
for (i--; i >= 0; i--)
brelse(oi->of_binfo[i].ob_bh);
- kfree(oi->of_binfo);
+ kvfree(oi->of_binfo);
out_put:
iput(inode);
return ret;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 9cb261714991..39abfeec5f36 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -24,7 +24,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/backing-dev.h>
+#include <linux/sched/mm.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -99,30 +99,29 @@ static void buffer_io_error(struct buffer_head *bh)
static void ext4_finish_bio(struct bio *bio)
{
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
- struct page *bounce_page = NULL;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
+ struct folio *io_folio = NULL;
struct buffer_head *bh, *head;
- unsigned bio_start = bvec->bv_offset;
- unsigned bio_end = bio_start + bvec->bv_len;
+ size_t bio_start = fi.offset;
+ size_t bio_end = bio_start + fi.length;
unsigned under_io = 0;
unsigned long flags;
- if (fscrypt_is_bounce_page(page)) {
- bounce_page = page;
- page = fscrypt_pagecache_page(bounce_page);
+ if (fscrypt_is_bounce_folio(folio)) {
+ io_folio = folio;
+ folio = fscrypt_pagecache_folio(folio);
}
if (bio->bi_status) {
- SetPageError(page);
- mapping_set_error(page->mapping, -EIO);
+ int err = blk_status_to_errno(bio->bi_status);
+ mapping_set_error(folio->mapping, err);
}
- bh = head = page_buffers(page);
+ bh = head = folio_buffers(folio);
/*
- * We check all buffers in the page under b_uptodate_lock
+ * We check all buffers in the folio under b_uptodate_lock
* to avoid races with other end io clearing async_write flags
*/
spin_lock_irqsave(&head->b_uptodate_lock, flags);
@@ -134,13 +133,15 @@ static void ext4_finish_bio(struct bio *bio)
continue;
}
clear_buffer_async_write(bh);
- if (bio->bi_status)
+ if (bio->bi_status) {
+ set_buffer_write_io_error(bh);
buffer_io_error(bh);
+ }
} while ((bh = bh->b_this_page) != head);
spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
if (!under_io) {
- fscrypt_free_bounce_page(bounce_page);
- end_page_writeback(page);
+ fscrypt_free_bounce_page(&io_folio->page);
+ folio_end_writeback(folio);
}
}
}
@@ -163,7 +164,8 @@ static void ext4_release_io_end(ext4_io_end_t *io_end)
}
/*
- * Check a range of space and convert unwritten extents to written. Note that
+ * On successful IO, check a range of space and convert unwritten extents to
+ * written. On IO failure, check if journal abort is needed. Note that
* we are protected from truncate touching same part of extent tree by the
* fact that truncate code waits for all DIO to finish (thus exclusion from
* direct IO is achieved) and also waits for PageWriteback bits. Thus we
@@ -174,20 +176,36 @@ static int ext4_end_io_end(ext4_io_end_t *io_end)
{
struct inode *inode = io_end->inode;
handle_t *handle = io_end->handle;
+ struct super_block *sb = inode->i_sb;
int ret = 0;
ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
"list->prev 0x%p\n",
io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
- io_end->handle = NULL; /* Following call will use up the handle */
- ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
- if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) {
- ext4_msg(inode->i_sb, KERN_EMERG,
+ /*
+ * Do not convert the unwritten extents if data writeback fails,
+ * or stale data may be exposed.
+ */
+ io_end->handle = NULL; /* Following call will use up the handle */
+ if (unlikely(io_end->flag & EXT4_IO_END_FAILED)) {
+ ret = -EIO;
+ if (handle)
+ jbd2_journal_free_reserved(handle);
+
+ if (test_opt(sb, DATA_ERR_ABORT))
+ jbd2_journal_abort(EXT4_SB(sb)->s_journal, ret);
+ } else {
+ ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
+ }
+ if (ret < 0 && !ext4_emergency_state(sb) &&
+ io_end->flag & EXT4_IO_END_UNWRITTEN) {
+ ext4_msg(sb, KERN_EMERG,
"failed to convert unwritten extents to written "
"extents -- potential data loss! "
"(inode %lu, error %d)", inode->i_ino, ret);
}
+
ext4_clear_io_unwritten_flag(io_end);
ext4_release_io_end(io_end);
return ret;
@@ -216,6 +234,18 @@ static void dump_completed_IO(struct inode *inode, struct list_head *head)
#endif
}
+static bool ext4_io_end_defer_completion(ext4_io_end_t *io_end)
+{
+ if (io_end->flag & EXT4_IO_END_UNWRITTEN &&
+ !list_empty(&io_end->list_vec))
+ return true;
+ if (test_opt(io_end->inode->i_sb, DATA_ERR_ABORT) &&
+ io_end->flag & EXT4_IO_END_FAILED &&
+ !ext4_emergency_state(io_end->inode->i_sb))
+ return true;
+ return false;
+}
+
/* Add the io_end to per-inode completed end_io list. */
static void ext4_add_complete_io(ext4_io_end_t *io_end)
{
@@ -224,9 +254,12 @@ static void ext4_add_complete_io(ext4_io_end_t *io_end)
struct workqueue_struct *wq;
unsigned long flags;
- /* Only reserved conversions from writeback should enter here */
- WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
- WARN_ON(!io_end->handle && sbi->s_journal);
+ /* Only reserved conversions or pending IO errors will enter here. */
+ WARN_ON(!(io_end->flag & EXT4_IO_END_DEFER_COMPLETION));
+ WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN &&
+ !io_end->handle && sbi->s_journal);
+ WARN_ON(!io_end->bio);
+
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
wq = sbi->rsv_conversion_wq;
if (list_empty(&ei->i_rsv_conversion_list))
@@ -251,7 +284,7 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
while (!list_empty(&unwritten)) {
io_end = list_entry(unwritten.next, ext4_io_end_t, list);
- BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
+ BUG_ON(!(io_end->flag & EXT4_IO_END_DEFER_COMPLETION));
list_del_init(&io_end->list);
err = ext4_end_io_end(io_end);
@@ -262,7 +295,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
}
/*
- * work on completed IO, to convert unwritten extents to extents
+ * Used to convert unwritten extents to written extents upon IO completion,
+ * or used to abort the journal upon IO errors.
*/
void ext4_end_io_rsv_work(struct work_struct *work)
{
@@ -287,29 +321,22 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
void ext4_put_io_end_defer(ext4_io_end_t *io_end)
{
if (refcount_dec_and_test(&io_end->count)) {
- if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
- list_empty(&io_end->list_vec)) {
- ext4_release_io_end(io_end);
- return;
- }
- ext4_add_complete_io(io_end);
+ if (ext4_io_end_defer_completion(io_end))
+ return ext4_add_complete_io(io_end);
+
+ ext4_release_io_end(io_end);
}
}
int ext4_put_io_end(ext4_io_end_t *io_end)
{
- int err = 0;
-
if (refcount_dec_and_test(&io_end->count)) {
- if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
- err = ext4_convert_unwritten_io_end_vec(io_end->handle,
- io_end);
- io_end->handle = NULL;
- ext4_clear_io_unwritten_flag(io_end);
- }
+ if (ext4_io_end_defer_completion(io_end))
+ return ext4_end_io_end(io_end);
+
ext4_release_io_end(io_end);
}
- return err;
+ return 0;
}
ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
@@ -323,10 +350,9 @@ static void ext4_end_bio(struct bio *bio)
{
ext4_io_end_t *io_end = bio->bi_private;
sector_t bi_sector = bio->bi_iter.bi_sector;
- char b[BDEVNAME_SIZE];
- if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n",
- bio_devname(bio, b),
+ if (WARN_ONCE(!io_end, "io_end is NULL: %pg: sector %Lu len %u err %d\n",
+ bio->bi_bdev,
(long long) bio->bi_iter.bi_sector,
(unsigned) bio_sectors(bio),
bio->bi_status)) {
@@ -344,11 +370,12 @@ static void ext4_end_bio(struct bio *bio)
bio->bi_status, inode->i_ino,
(unsigned long long)
bi_sector >> (inode->i_blkbits - 9));
+ io_end->flag |= EXT4_IO_END_FAILED;
mapping_set_error(inode->i_mapping,
blk_status_to_errno(bio->bi_status));
}
- if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
+ if (ext4_io_end_defer_completion(io_end)) {
/*
* Link bio into list hanging from io_end. We have to do it
* atomically as bio completions can be racing against each
@@ -372,10 +399,8 @@ void ext4_io_submit(struct ext4_io_submit *io)
struct bio *bio = io->io_bio;
if (bio) {
- int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
- REQ_SYNC : 0;
- io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
- bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
+ if (io->io_wbc->sync_mode == WB_SYNC_ALL)
+ io->io_bio->bi_opf |= REQ_SYNC;
submit_bio(io->io_bio);
}
io->io_bio = NULL;
@@ -398,10 +423,9 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
* bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
*/
- bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
+ bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end);
io->io_bio = bio;
@@ -411,11 +435,10 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
static void io_submit_add_bh(struct ext4_io_submit *io,
struct inode *inode,
- struct page *page,
+ struct folio *folio,
+ struct folio *io_folio,
struct buffer_head *bh)
{
- int ret;
-
if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
!fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
submit_and_retry:
@@ -425,55 +448,46 @@ submit_and_retry:
io_submit_init_bio(io, bh);
io->io_bio->bi_write_hint = inode->i_write_hint;
}
- ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
- if (ret != bh->b_size)
+ if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
goto submit_and_retry;
- wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
+ wbc_account_cgroup_owner(io->io_wbc, folio, bh->b_size);
io->io_next_block++;
}
-int ext4_bio_write_page(struct ext4_io_submit *io,
- struct page *page,
- int len,
- bool keep_towrite)
+int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
+ size_t len)
{
- struct page *bounce_page = NULL;
- struct inode *inode = page->mapping->host;
+ struct folio *io_folio = folio;
+ struct inode *inode = folio->mapping->host;
unsigned block_start;
struct buffer_head *bh, *head;
int ret = 0;
- int nr_submitted = 0;
int nr_to_submit = 0;
struct writeback_control *wbc = io->io_wbc;
+ bool keep_towrite = false;
- BUG_ON(!PageLocked(page));
- BUG_ON(PageWriteback(page));
-
- if (keep_towrite)
- set_page_writeback_keepwrite(page);
- else
- set_page_writeback(page);
- ClearPageError(page);
+ BUG_ON(!folio_test_locked(folio));
+ BUG_ON(folio_test_writeback(folio));
/*
- * Comments copied from block_write_full_page:
+ * Comments copied from block_write_full_folio:
*
- * The page straddles i_size. It must be zeroed out on each and every
+ * The folio straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- if (len < PAGE_SIZE)
- zero_user_segment(page, len, PAGE_SIZE);
+ if (len < folio_size(folio))
+ folio_zero_segment(folio, len, folio_size(folio));
/*
* In the first loop we prepare and mark buffers to submit. We have to
- * mark all buffers in the page before submitting so that
- * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
+ * mark all buffers in the folio before submitting so that
+ * folio_end_writeback() cannot be called from ext4_end_bio() when IO
* on the first buffer finishes and we are still working on submitting
* the second buffer.
*/
- bh = head = page_buffers(page);
+ bh = head = folio_buffers(folio);
do {
block_start = bh_offset(bh);
if (block_start >= len) {
@@ -486,17 +500,34 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
/* A hole? We can safely clear the dirty bit */
if (!buffer_mapped(bh))
clear_buffer_dirty(bh);
- if (io->io_bio)
- ext4_io_submit(io);
+ /*
+ * Keeping dirty some buffer we cannot write? Make sure
+ * to redirty the folio and keep TOWRITE tag so that
+ * racing WB_SYNC_ALL writeback does not skip the folio.
+ * This happens e.g. when doing writeout for
+ * transaction commit or when journalled data is not
+ * yet committed.
+ */
+ if (buffer_dirty(bh) ||
+ (buffer_jbd(bh) && buffer_jbddirty(bh))) {
+ if (!folio_test_dirty(folio))
+ folio_redirty_for_writepage(wbc, folio);
+ keep_towrite = true;
+ }
continue;
}
if (buffer_new(bh))
clear_buffer_new(bh);
set_buffer_async_write(bh);
+ clear_buffer_dirty(bh);
nr_to_submit++;
} while ((bh = bh->b_this_page) != head);
- bh = head = page_buffers(page);
+ /* Nothing to submit? Just unlock the folio... */
+ if (!nr_to_submit)
+ return 0;
+
+ bh = head = folio_buffers(folio);
/*
* If any blocks are being written to an encrypted file, encrypt them
@@ -505,9 +536,10 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
* (e.g. holes) to be unnecessarily encrypted, but this is rare and
* can't happen in the common case of blocksize == PAGE_SIZE.
*/
- if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) {
+ if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
gfp_t gfp_flags = GFP_NOFS;
unsigned int enc_bytes = round_up(len, i_blocksize(inode));
+ struct page *bounce_page;
/*
* Since bounce page allocation uses a mempool, we can only use
@@ -515,47 +547,47 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
* first page of the bio. Otherwise it can deadlock.
*/
if (io->io_bio)
- gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
+ gfp_flags = GFP_NOWAIT;
retry_encrypt:
- bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
- 0, gfp_flags);
+ bounce_page = fscrypt_encrypt_pagecache_blocks(folio,
+ enc_bytes, 0, gfp_flags);
if (IS_ERR(bounce_page)) {
ret = PTR_ERR(bounce_page);
if (ret == -ENOMEM &&
(io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
- gfp_flags = GFP_NOFS;
+ gfp_t new_gfp_flags = GFP_NOFS;
if (io->io_bio)
ext4_io_submit(io);
else
- gfp_flags |= __GFP_NOFAIL;
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ new_gfp_flags |= __GFP_NOFAIL;
+ memalloc_retry_wait(gfp_flags);
+ gfp_flags = new_gfp_flags;
goto retry_encrypt;
}
printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
do {
- clear_buffer_async_write(bh);
+ if (buffer_async_write(bh)) {
+ clear_buffer_async_write(bh);
+ set_buffer_dirty(bh);
+ }
bh = bh->b_this_page;
} while (bh != head);
- goto unlock;
+
+ return ret;
}
+ io_folio = page_folio(bounce_page);
}
+ __folio_start_writeback(folio, keep_towrite);
+
/* Now submit buffers to write */
do {
if (!buffer_async_write(bh))
continue;
- io_submit_add_bh(io, inode,
- bounce_page ? bounce_page : page, bh);
- nr_submitted++;
- clear_buffer_dirty(bh);
+ io_submit_add_bh(io, inode, folio, io_folio, bh);
} while ((bh = bh->b_this_page) != head);
-unlock:
- unlock_page(page);
- /* Nothing submitted - we have to end page writeback */
- if (!nr_submitted)
- end_page_writeback(page);
- return ret;
+ return 0;
}
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 3db923403505..e7f2350c725b 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -43,7 +43,6 @@
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
-#include <linux/cleancache.h>
#include "ext4.h"
@@ -69,23 +68,10 @@ struct bio_post_read_ctx {
static void __read_end_io(struct bio *bio)
{
- struct page *page;
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
-
- bio_for_each_segment_all(bv, bio, iter_all) {
- page = bv->bv_page;
-
- /* PG_error was set if any post_read step failed */
- if (bio->bi_status || PageError(page)) {
- ClearPageUptodate(page);
- /* will re-read again later */
- ClearPageError(page);
- } else {
- SetPageUptodate(page);
- }
- unlock_page(page);
- }
+ struct folio_iter fi;
+
+ bio_for_each_folio_all(fi, bio)
+ folio_end_read(fi.folio, bio->bi_status == 0);
if (bio->bi_private)
mempool_free(bio->bi_private, bio_post_read_ctx_pool);
bio_put(bio);
@@ -97,10 +83,12 @@ static void decrypt_work(struct work_struct *work)
{
struct bio_post_read_ctx *ctx =
container_of(work, struct bio_post_read_ctx, work);
+ struct bio *bio = ctx->bio;
- fscrypt_decrypt_bio(ctx->bio);
-
- bio_post_read_processing(ctx);
+ if (fscrypt_decrypt_bio(bio))
+ bio_post_read_processing(ctx);
+ else
+ __read_end_io(bio);
}
static void verity_work(struct work_struct *work)
@@ -110,7 +98,7 @@ static void verity_work(struct work_struct *work)
struct bio *bio = ctx->bio;
/*
- * fsverity_verify_bio() may call readpages() again, and although verity
+ * fsverity_verify_bio() may call readahead() again, and although verity
* will be disabled for that, decryption may still be needed, causing
* another bio_post_read_ctx to be allocated. So to guarantee that
* mempool_alloc() never deadlocks we must free the current ctx first.
@@ -164,7 +152,7 @@ static bool bio_post_read_required(struct bio *bio)
*
* The mpage code never puts partial pages into a BIO (except for end-of-file).
* If a page does not map to a contiguous run of blocks then it simply falls
- * back to block_read_full_page().
+ * back to block_read_full_folio().
*
* Why is this? If a page's completion depends on a number of different BIOs
* which can complete in any order (or at the same time) then determining the
@@ -214,54 +202,55 @@ static void ext4_set_bio_post_read_ctx(struct bio *bio,
static inline loff_t ext4_readpage_limit(struct inode *inode)
{
- if (IS_ENABLED(CONFIG_FS_VERITY) &&
- (IS_VERITY(inode) || ext4_verity_in_progress(inode)))
+ if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
return inode->i_sb->s_maxbytes;
return i_size_read(inode);
}
int ext4_mpage_readpages(struct inode *inode,
- struct readahead_control *rac, struct page *page)
+ struct readahead_control *rac, struct folio *folio)
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
-
const unsigned blkbits = inode->i_blkbits;
- const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
const unsigned blocksize = 1 << blkbits;
sector_t next_block;
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
- sector_t blocks[MAX_BUF_PER_PAGE];
+ sector_t first_block;
unsigned page_block;
struct block_device *bdev = inode->i_sb->s_bdev;
int length;
unsigned relative_block = 0;
struct ext4_map_blocks map;
- unsigned int nr_pages = rac ? readahead_count(rac) : 1;
+ unsigned int nr_pages, folio_pages;
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
map.m_flags = 0;
- for (; nr_pages; nr_pages--) {
+ nr_pages = rac ? readahead_count(rac) : folio_nr_pages(folio);
+ for (; nr_pages; nr_pages -= folio_pages) {
int fully_mapped = 1;
- unsigned first_hole = blocks_per_page;
+ unsigned int first_hole;
+ unsigned int blocks_per_folio;
- if (rac) {
- page = readahead_page(rac);
- prefetchw(&page->flags);
- }
+ if (rac)
+ folio = readahead_folio(rac);
+
+ folio_pages = folio_nr_pages(folio);
+ prefetchw(&folio->flags);
- if (page_has_buffers(page))
+ if (folio_buffers(folio))
goto confused;
- block_in_file = next_block =
- (sector_t)page->index << (PAGE_SHIFT - blkbits);
- last_block = block_in_file + nr_pages * blocks_per_page;
+ blocks_per_folio = folio_size(folio) >> blkbits;
+ first_hole = blocks_per_folio;
+ block_in_file = next_block = EXT4_PG_TO_LBLK(inode, folio->index);
+ last_block = EXT4_PG_TO_LBLK(inode, folio->index + nr_pages);
last_block_in_file = (ext4_readpage_limit(inode) +
blocksize - 1) >> blkbits;
if (last_block > last_block_in_file)
@@ -277,16 +266,15 @@ int ext4_mpage_readpages(struct inode *inode,
unsigned map_offset = block_in_file - map.m_lblk;
unsigned last = map.m_len - map_offset;
+ first_block = map.m_pblk + map_offset;
for (relative_block = 0; ; relative_block++) {
if (relative_block == last) {
/* needed? */
map.m_flags &= ~EXT4_MAP_MAPPED;
break;
}
- if (page_block == blocks_per_page)
+ if (page_block == blocks_per_folio)
break;
- blocks[page_block] = map.m_pblk + map_offset +
- relative_block;
page_block++;
block_in_file++;
}
@@ -294,73 +282,67 @@ int ext4_mpage_readpages(struct inode *inode,
/*
* Then do more ext4_map_blocks() calls until we are
- * done with this page.
+ * done with this folio.
*/
- while (page_block < blocks_per_page) {
+ while (page_block < blocks_per_folio) {
if (block_in_file < last_block) {
map.m_lblk = block_in_file;
map.m_len = last_block - block_in_file;
if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
set_error_page:
- SetPageError(page);
- zero_user_segment(page, 0,
- PAGE_SIZE);
- unlock_page(page);
+ folio_zero_segment(folio, 0,
+ folio_size(folio));
+ folio_unlock(folio);
goto next_page;
}
}
if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
fully_mapped = 0;
- if (first_hole == blocks_per_page)
+ if (first_hole == blocks_per_folio)
first_hole = page_block;
page_block++;
block_in_file++;
continue;
}
- if (first_hole != blocks_per_page)
+ if (first_hole != blocks_per_folio)
goto confused; /* hole -> non-hole */
/* Contiguous blocks? */
- if (page_block && blocks[page_block-1] != map.m_pblk-1)
+ if (!page_block)
+ first_block = map.m_pblk;
+ else if (first_block + page_block != map.m_pblk)
goto confused;
for (relative_block = 0; ; relative_block++) {
if (relative_block == map.m_len) {
/* needed? */
map.m_flags &= ~EXT4_MAP_MAPPED;
break;
- } else if (page_block == blocks_per_page)
+ } else if (page_block == blocks_per_folio)
break;
- blocks[page_block] = map.m_pblk+relative_block;
page_block++;
block_in_file++;
}
}
- if (first_hole != blocks_per_page) {
- zero_user_segment(page, first_hole << blkbits,
- PAGE_SIZE);
+ if (first_hole != blocks_per_folio) {
+ folio_zero_segment(folio, first_hole << blkbits,
+ folio_size(folio));
if (first_hole == 0) {
- if (ext4_need_verity(inode, page->index) &&
- !fsverity_verify_page(page))
+ if (ext4_need_verity(inode, folio->index) &&
+ !fsverity_verify_folio(folio))
goto set_error_page;
- SetPageUptodate(page);
- unlock_page(page);
- goto next_page;
+ folio_end_read(folio, true);
+ continue;
}
} else if (fully_mapped) {
- SetPageMappedToDisk(page);
- }
- if (fully_mapped && blocks_per_page == 1 &&
- !PageUptodate(page) && cleancache_get_page(page) == 0) {
- SetPageUptodate(page);
- goto confused;
+ folio_set_mappedtodisk(folio);
}
/*
- * This page will go to BIO. Do we need to send this
+ * This folio will go to BIO. Do we need to send this
* BIO off first?
*/
- if (bio && (last_block_in_bio != blocks[0] - 1 ||
+ if (bio && (last_block_in_bio != first_block - 1 ||
!fscrypt_mergeable_bio(bio, inode, next_block))) {
submit_and_realloc:
submit_bio(bio);
@@ -371,41 +353,40 @@ int ext4_mpage_readpages(struct inode *inode,
* bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
*/
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(nr_pages));
+ bio = bio_alloc(bdev, bio_max_segs(nr_pages),
+ REQ_OP_READ, GFP_KERNEL);
fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
GFP_KERNEL);
- ext4_set_bio_post_read_ctx(bio, inode, page->index);
- bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
+ ext4_set_bio_post_read_ctx(bio, inode, folio->index);
+ bio->bi_iter.bi_sector = first_block << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ,
- rac ? REQ_RAHEAD : 0);
+ if (rac)
+ bio->bi_opf |= REQ_RAHEAD;
}
length = first_hole << blkbits;
- if (bio_add_page(bio, page, length, 0) < length)
+ if (!bio_add_folio(bio, folio, length, 0))
goto submit_and_realloc;
if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
(relative_block == map.m_len)) ||
- (first_hole != blocks_per_page)) {
+ (first_hole != blocks_per_folio)) {
submit_bio(bio);
bio = NULL;
} else
- last_block_in_bio = blocks[blocks_per_page - 1];
- goto next_page;
+ last_block_in_bio = first_block + blocks_per_folio - 1;
+ continue;
confused:
if (bio) {
submit_bio(bio);
bio = NULL;
}
- if (!PageUptodate(page))
- block_read_full_page(page, ext4_get_block);
+ if (!folio_test_uptodate(folio))
+ block_read_full_folio(folio, ext4_get_block);
else
- unlock_page(page);
- next_page:
- if (rac)
- put_page(page);
+ folio_unlock(folio);
+next_page:
+ ; /* A label shall be followed by a statement until C23 */
}
if (bio)
submit_bio(bio);
@@ -414,9 +395,8 @@ int ext4_mpage_readpages(struct inode *inode,
int __init ext4_init_post_read_processing(void)
{
- bio_post_read_ctx_cache =
- kmem_cache_create("ext4_bio_post_read_ctx",
- sizeof(struct bio_post_read_ctx), 0, 0, NULL);
+ bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT);
+
if (!bio_post_read_ctx_cache)
goto fail;
bio_post_read_ctx_pool =
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index b63cb88ccdae..050f26168d97 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -10,10 +10,9 @@
*/
-#define EXT4FS_DEBUG
-
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/jiffies.h>
#include "ext4_jbd2.h"
@@ -53,14 +52,24 @@ int ext4_resize_begin(struct super_block *sb)
return -EPERM;
/*
+ * If the reserved GDT blocks is non-zero, the resize_inode feature
+ * should always be set.
+ */
+ if (sbi->s_es->s_reserved_gdt_blocks &&
+ !ext4_has_feature_resize_inode(sb)) {
+ ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
+ return -EFSCORRUPTED;
+ }
+
+ /*
* If we are not using the primary superblock/GDT copy don't resize,
* because the user tools have no way of handling this. Probably a
* bad time to do it anyways.
*/
if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
- le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
+ le32_to_cpu(sbi->s_es->s_first_data_block)) {
ext4_warning(sb, "won't resize using backup superblock at %llu",
- (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
+ (unsigned long long)sbi->s_sbh->b_blocknr);
return -EPERM;
}
@@ -68,7 +77,7 @@ int ext4_resize_begin(struct super_block *sb)
* We are not allowed to do online-resizing on a filesystem mounted
* with error, because it can destroy the filesystem easily.
*/
- if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
+ if (sbi->s_mount_state & EXT4_ERROR_FS) {
ext4_warning(sb, "There are errors in the filesystem, "
"so online resizing is not allowed");
return -EPERM;
@@ -80,28 +89,19 @@ int ext4_resize_begin(struct super_block *sb)
}
if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
- &EXT4_SB(sb)->s_ext4_flags))
+ &sbi->s_ext4_flags))
ret = -EBUSY;
return ret;
}
-void ext4_resize_end(struct super_block *sb)
+int ext4_resize_end(struct super_block *sb, bool update_backups)
{
clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
smp_mb__after_atomic();
-}
-
-static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
- ext4_group_t group) {
- return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
- EXT4_DESC_PER_BLOCK_BITS(sb);
-}
-
-static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
- ext4_group_t group) {
- group = ext4_meta_bg_first_group(sb, group);
- return ext4_group_first_block_no(sb, group);
+ if (update_backups)
+ return ext4_update_overhead(sb, true);
+ return 0;
}
static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
@@ -140,8 +140,9 @@ static int verify_group_input(struct super_block *sb,
overhead = ext4_group_overhead_blocks(sb, group);
metaend = start + overhead;
- input->free_clusters_count = free_blocks_count =
- input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
+ free_blocks_count = input->blocks_count - 2 - overhead -
+ sbi->s_itb_per_group;
+ input->free_clusters_count = EXT4_B2C(sbi, free_blocks_count);
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
@@ -217,35 +218,55 @@ struct ext4_new_flex_group_data {
in the flex group */
__u16 *bg_flags; /* block group flags of groups
in @groups */
+ ext4_group_t resize_bg; /* number of allocated
+ new_group_data */
ext4_group_t count; /* number of groups in @groups
*/
};
/*
- * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
- * @flexbg_size.
+ * Avoiding memory allocation failures due to too many groups added each time.
+ */
+#define MAX_RESIZE_BG 16384
+
+/*
+ * alloc_flex_gd() allocates an ext4_new_flex_group_data that satisfies the
+ * resizing from @o_group to @n_group, its size is typically @flexbg_size.
*
* Returns NULL on failure otherwise address of the allocated structure.
*/
-static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
+static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size,
+ ext4_group_t o_group, ext4_group_t n_group)
{
+ ext4_group_t last_group;
+ unsigned int max_resize_bg;
struct ext4_new_flex_group_data *flex_gd;
flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
if (flex_gd == NULL)
goto out3;
- if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
- goto out2;
- flex_gd->count = flexbg_size;
+ max_resize_bg = umin(flexbg_size, MAX_RESIZE_BG);
+ flex_gd->resize_bg = max_resize_bg;
+
+ /* Avoid allocating large 'groups' array if not needed */
+ last_group = o_group | (flex_gd->resize_bg - 1);
+ if (n_group <= last_group)
+ flex_gd->resize_bg = 1 << fls(n_group - o_group);
+ else if (n_group - last_group < flex_gd->resize_bg)
+ flex_gd->resize_bg = 1 << max(fls(last_group - o_group),
+ fls(n_group - last_group));
- flex_gd->groups = kmalloc_array(flexbg_size,
+ if (WARN_ON_ONCE(flex_gd->resize_bg > max_resize_bg))
+ flex_gd->resize_bg = max_resize_bg;
+
+ flex_gd->groups = kmalloc_array(flex_gd->resize_bg,
sizeof(struct ext4_new_group_data),
GFP_NOFS);
if (flex_gd->groups == NULL)
goto out2;
- flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
+ flex_gd->bg_flags = kmalloc_array(flex_gd->resize_bg, sizeof(__u16),
GFP_NOFS);
if (flex_gd->bg_flags == NULL)
goto out1;
@@ -282,7 +303,7 @@ static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
*/
static int ext4_alloc_group_tables(struct super_block *sb,
struct ext4_new_flex_group_data *flex_gd,
- int flexbg_size)
+ unsigned int flexbg_size)
{
struct ext4_new_group_data *group_data = flex_gd->groups;
ext4_fsblk_t start_blk;
@@ -383,12 +404,12 @@ next_group:
group = group_data[0].group;
printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
- "%d groups, flexbg size is %d:\n", flex_gd->count,
+ "%u groups, flexbg size is %u:\n", flex_gd->count,
flexbg_size);
for (i = 0; i < flex_gd->count; i++) {
ext4_debug(
- "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
+ "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n",
ext4_bg_has_super(sb, group + i) ? "normal" :
"no-super", group + i,
group_data[i].blocks_count,
@@ -446,8 +467,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
last_cluster);
- for (count2 = count; count > 0;
- count -= count2, first_cluster += count2) {
+ for (; count > 0; count -= count2, first_cluster += count2) {
ext4_fsblk_t start;
struct buffer_head *bh;
ext4_group_t group;
@@ -483,7 +503,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
}
ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
first_cluster, first_cluster - start, count2);
- ext4_set_bits(bh->b_data, first_cluster - start, count2);
+ mb_set_bits(bh->b_data, first_cluster - start, count2);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
brelse(bh);
@@ -546,13 +566,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
goto handle_itb;
- if (meta_bg == 1) {
- ext4_group_t first_group;
- first_group = ext4_meta_bg_first_group(sb, group);
- if (first_group != group + 1 &&
- first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
- goto handle_itb;
- }
+ if (meta_bg == 1)
+ goto handle_itb;
block = start + ext4_bg_has_super(sb, group);
/* Copy all of the GDT blocks into the backup in this group */
@@ -600,7 +615,7 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
}
handle_itb:
- /* Initialize group tables of the grop @group */
+ /* Initialize group tables of the group @group */
if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
goto handle_bb;
@@ -632,7 +647,7 @@ handle_bb:
if (overhead != 0) {
ext4_debug("mark backup superblock %#04llx (+0)\n",
start);
- ext4_set_bits(bh->b_data, 0,
+ mb_set_bits(bh->b_data, 0,
EXT4_NUM_B2C(sbi, overhead));
}
ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
@@ -690,16 +705,14 @@ handle_ib:
block = start;
}
- if (count) {
- err = set_flexbg_block_bitmap(sb, handle,
- flex_gd,
- EXT4_B2C(sbi, start),
- EXT4_B2C(sbi,
- start + count
- - 1));
- if (err)
- goto out;
- }
+ err = set_flexbg_block_bitmap(sb, handle,
+ flex_gd,
+ EXT4_B2C(sbi, start),
+ EXT4_B2C(sbi,
+ start + count
+ - 1));
+ if (err)
+ goto out;
}
out:
@@ -717,12 +730,23 @@ out:
* sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
* For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
*/
-static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
- unsigned *five, unsigned *seven)
+unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three,
+ unsigned int *five, unsigned int *seven)
{
- unsigned *min = three;
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ unsigned int *min = three;
int mult = 3;
- unsigned ret;
+ unsigned int ret;
+
+ if (ext4_has_feature_sparse_super2(sb)) {
+ do {
+ if (*min > 2)
+ return UINT_MAX;
+ ret = le32_to_cpu(es->s_backup_bgs[*min - 1]);
+ *min += 1;
+ } while (!ret);
+ return ret;
+ }
if (!ext4_has_feature_sparse_super(sb)) {
ret = *min;
@@ -927,7 +951,13 @@ errout:
}
/*
- * add_new_gdb_meta_bg is the sister of add_new_gdb.
+ * If there is no available space in the existing block group descriptors for
+ * the new block group and there are no reserved block group descriptors, then
+ * the meta_bg feature will get enabled, and es->s_first_meta_bg will get set
+ * to the first block group that is managed using meta_bg and s_first_meta_bg
+ * must be a multiple of EXT4_DESC_PER_BLOCK(sb).
+ * This function will be called when first group of meta_bg is added to bring
+ * new group descriptors block of new added meta_bg.
*/
static int add_new_gdb_meta_bg(struct super_block *sb,
handle_t *handle, ext4_group_t group) {
@@ -937,8 +967,8 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
int err;
- gdblock = ext4_meta_bg_first_block_no(sb, group) +
- ext4_bg_has_super(sb, group);
+ gdblock = ext4_group_first_block_no(sb, group) +
+ ext4_bg_has_super(sb, group);
gdb_bh = ext4_sb_bread(sb, gdblock, 0);
if (IS_ERR(gdb_bh))
return PTR_ERR(gdb_bh);
@@ -1062,9 +1092,6 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
for (i = 0; i < reserved_gdb; i++) {
int err2;
data = (__le32 *)primary[i]->b_data;
- /* printk("reserving backup %lu[%u] = %lu\n",
- primary[i]->b_blocknr, gdbackups,
- blk + primary[i]->b_blocknr); */
data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
if (!err)
@@ -1085,6 +1112,16 @@ exit_free:
return err;
}
+static inline void ext4_set_block_group_nr(struct super_block *sb, char *data,
+ ext4_group_t group)
+{
+ struct ext4_super_block *es = (struct ext4_super_block *) data;
+
+ es->s_block_group_nr = cpu_to_le16(group);
+ if (ext4_has_feature_metadata_csum(sb))
+ es->s_checksum = ext4_superblock_csum(es);
+}
+
/*
* Update the backup copies of the ext4 metadata. These don't need to be part
* of the main resize transaction, because e2fsck will re-write them if there
@@ -1133,6 +1170,8 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
while (group < sbi->s_groups_count) {
struct buffer_head *bh;
ext4_fsblk_t backup_block;
+ int has_super = ext4_bg_has_super(sb, group);
+ ext4_fsblk_t first_block = ext4_group_first_block_no(sb, group);
/* Out of journal space, and can't get more - abort - so sad */
err = ext4_resize_ensure_credits_batch(handle, 1);
@@ -1142,8 +1181,7 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
if (meta_bg == 0)
backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
else
- backup_block = (ext4_group_first_block_no(sb, group) +
- ext4_bg_has_super(sb, group));
+ backup_block = first_block + has_super;
bh = sb_getblk(sb, backup_block);
if (unlikely(!bh)) {
@@ -1155,12 +1193,16 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
ext4_group_first_block_no(sb, group));
BUFFER_TRACE(bh, "get_write_access");
if ((err = ext4_journal_get_write_access(handle, sb, bh,
- EXT4_JTR_NONE)))
+ EXT4_JTR_NONE))) {
+ brelse(bh);
break;
+ }
lock_buffer(bh);
memcpy(bh->b_data, data, size);
if (rest)
memset(bh->b_data + size, 0, rest);
+ if (has_super && (backup_block == first_block))
+ ext4_set_block_group_nr(sb, bh->b_data, group);
set_buffer_uptodate(bh);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
@@ -1258,7 +1300,7 @@ static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
if (unlikely(!bh))
return NULL;
if (!bh_uptodate_or_lock(bh)) {
- if (ext4_read_bh(bh, 0, NULL) < 0) {
+ if (ext4_read_bh(bh, 0, NULL, false) < 0) {
brelse(bh);
return NULL;
}
@@ -1268,26 +1310,24 @@ static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
}
static int ext4_set_bitmap_checksums(struct super_block *sb,
- ext4_group_t group,
struct ext4_group_desc *gdp,
struct ext4_new_group_data *group_data)
{
struct buffer_head *bh;
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return 0;
bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
if (!bh)
return -EIO;
- ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_inode_bitmap_csum_set(sb, gdp, bh);
brelse(bh);
bh = ext4_get_bitmap(sb, group_data->block_bitmap);
if (!bh)
return -EIO;
- ext4_block_bitmap_csum_set(sb, group, gdp, bh);
+ ext4_block_bitmap_csum_set(sb, gdp, bh);
brelse(bh);
return 0;
@@ -1325,7 +1365,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
memset(gdp, 0, EXT4_DESC_SIZE(sb));
ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
- err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
+ err = ext4_set_bitmap_checksums(sb, gdp, group_data);
if (err) {
ext4_std_error(sb, err);
break;
@@ -1358,6 +1398,17 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
return err;
}
+static void ext4_add_overhead(struct super_block *sb,
+ const ext4_fsblk_t overhead)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+
+ sbi->s_overhead += overhead;
+ es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
+ smp_wmb();
+}
+
/*
* ext4_update_super() updates the super block so that the newly added
* groups can be seen by the filesystem.
@@ -1435,8 +1486,6 @@ static void ext4_update_super(struct super_block *sb,
* active. */
ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
reserved_blocks);
- ext4_superblock_csum_set(sb);
- unlock_buffer(sbi->s_sbh);
/* Update the free space counts */
percpu_counter_add(&sbi->s_freeclusters_counter,
@@ -1459,10 +1508,21 @@ static void ext4_update_super(struct super_block *sb,
}
/*
- * Update the fs overhead information
+ * Update the fs overhead information.
+ *
+ * For bigalloc, if the superblock already has a properly calculated
+ * overhead, update it with a value based on numbers already computed
+ * above for the newly allocated capacity.
*/
- ext4_calculate_overhead(sb);
+ if (ext4_has_feature_bigalloc(sb) && (sbi->s_overhead != 0))
+ ext4_add_overhead(sb,
+ EXT4_NUM_B2C(sbi, blocks_count - free_blocks));
+ else
+ ext4_calculate_overhead(sb);
+ es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(sbi->s_sbh);
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT4-fs: added group %u:"
"%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
@@ -1543,21 +1603,20 @@ exit_journal:
int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
int gdb_num_end = ((group + flex_gd->count - 1) /
EXT4_DESC_PER_BLOCK(sb));
- int meta_bg = ext4_has_feature_meta_bg(sb);
- sector_t old_gdb = 0;
+ int meta_bg = ext4_has_feature_meta_bg(sb) &&
+ gdb_num >= le32_to_cpu(es->s_first_meta_bg);
+ sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
+ ext4_group_first_block_no(sb, 0);
- update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
- sizeof(struct ext4_super_block), 0);
+ update_backups(sb, ext4_group_first_block_no(sb, 0),
+ (char *)es, sizeof(struct ext4_super_block), 0);
for (; gdb_num <= gdb_num_end; gdb_num++) {
struct buffer_head *gdb_bh;
gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
gdb_num);
- if (old_gdb == gdb_bh->b_blocknr)
- continue;
- update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
- gdb_bh->b_size, meta_bg);
- old_gdb = gdb_bh->b_blocknr;
+ update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
+ gdb_bh->b_data, gdb_bh->b_size, meta_bg);
}
}
exit:
@@ -1566,8 +1625,7 @@ exit:
static int ext4_setup_next_flex_gd(struct super_block *sb,
struct ext4_new_flex_group_data *flex_gd,
- ext4_fsblk_t n_blocks_count,
- unsigned long flexbg_size)
+ ext4_fsblk_t n_blocks_count)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
@@ -1591,7 +1649,7 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
BUG_ON(last);
ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
- last_group = group | (flexbg_size - 1);
+ last_group = group | (flex_gd->resize_bg - 1);
if (last_group > n_group)
last_group = n_group;
@@ -1758,7 +1816,7 @@ errout:
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
"blocks\n", ext4_blocks_count(es));
- update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
+ update_backups(sb, ext4_group_first_block_no(sb, 0),
(char *)es, sizeof(struct ext4_super_block), 0);
}
return err;
@@ -1781,7 +1839,6 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
ext4_grpblk_t last;
ext4_grpblk_t add;
struct buffer_head *bh;
- int err;
ext4_group_t group;
o_blocks_count = ext4_blocks_count(es);
@@ -1836,8 +1893,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
}
brelse(bh);
- err = ext4_group_extend_no_check(sb, o_blocks_count, add);
- return err;
+ return ext4_group_extend_no_check(sb, o_blocks_count, add);
} /* ext4_group_extend */
@@ -1925,9 +1981,7 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
errout:
ret = ext4_journal_stop(handle);
- if (!err)
- err = ret;
- return ret;
+ return err ? err : ret;
invalid_resize_inode:
ext4_error(sb, "corrupted/inconsistent resize inode");
@@ -1955,8 +2009,9 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
ext4_fsblk_t o_blocks_count;
ext4_fsblk_t n_blocks_count_retry = 0;
unsigned long last_update_time = 0;
- int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
+ int err = 0;
int meta_bg;
+ unsigned int flexbg_size = ext4_flex_bg_size(sbi);
/* See if the device is actually as big as what was requested */
bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
@@ -1966,6 +2021,16 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
}
brelse(bh);
+ /*
+ * For bigalloc, trim the requested size to the nearest cluster
+ * boundary to avoid creating an unusable filesystem. We do this
+ * silently, instead of returning an error, to avoid breaking
+ * callers that blindly resize the filesystem to the full size of
+ * the underlying block device.
+ */
+ if (ext4_has_feature_bigalloc(sb))
+ n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1);
+
retry:
o_blocks_count = ext4_blocks_count(es);
@@ -2021,7 +2086,7 @@ retry:
}
}
- if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
+ if ((!resize_inode && !meta_bg && n_desc_blocks > o_desc_blocks) || n_blocks_count == o_blocks_count) {
err = ext4_convert_meta_bg(sb, resize_inode);
if (err)
goto out;
@@ -2067,7 +2132,7 @@ retry:
goto out;
}
- if (ext4_blocks_count(es) == n_blocks_count)
+ if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0)
goto out;
err = ext4_alloc_flex_bg_array(sb, n_group + 1);
@@ -2078,7 +2143,7 @@ retry:
if (err)
goto out;
- flex_gd = alloc_flex_gd(flexbg_size);
+ flex_gd = alloc_flex_gd(flexbg_size, o_group, n_group);
if (flex_gd == NULL) {
err = -ENOMEM;
goto out;
@@ -2087,9 +2152,8 @@ retry:
/* Add flex groups. Note that a regular group is a
* flex group with 1 group.
*/
- while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
- flexbg_size)) {
- if (jiffies - last_update_time > HZ * 10) {
+ while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count)) {
+ if (time_is_before_jiffies(last_update_time + HZ * 10)) {
if (last_update_time)
ext4_msg(sb, KERN_INFO,
"resized to %llu blocks",
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 4e33b5eca694..87205660c5d0 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -39,7 +39,6 @@
#include <linux/log2.h>
#include <linux/crc16.h>
#include <linux/dax.h>
-#include <linux/cleancache.h>
#include <linux/uaccess.h>
#include <linux/iversion.h>
#include <linux/unicode.h>
@@ -47,6 +46,8 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/fsnotify.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include "ext4.h"
#include "ext4_extents.h" /* Needed for trace points definition */
@@ -73,19 +74,26 @@ static int ext4_mark_recovery_complete(struct super_block *sb,
static int ext4_clear_journal_err(struct super_block *sb,
struct ext4_super_block *es);
static int ext4_sync_fs(struct super_block *sb, int wait);
-static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
static int ext4_unfreeze(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
-static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data);
static inline int ext2_feature_set_ok(struct super_block *sb);
static inline int ext3_feature_set_ok(struct super_block *sb);
-static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
static void ext4_clear_request_list(void);
static struct inode *ext4_get_journal_inode(struct super_block *sb,
unsigned int journal_inum);
+static int ext4_validate_options(struct fs_context *fc);
+static int ext4_check_opt_consistency(struct fs_context *fc,
+ struct super_block *sb);
+static void ext4_apply_options(struct fs_context *fc, struct super_block *sb);
+static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param);
+static int ext4_get_tree(struct fs_context *fc);
+static int ext4_reconfigure(struct fs_context *fc);
+static void ext4_fc_free(struct fs_context *fc);
+static int ext4_init_fs_context(struct fs_context *fc);
+static void ext4_kill_sb(struct super_block *sb);
+static const struct fs_parameter_spec ext4_param_specs[];
/*
* Lock ordering
@@ -113,37 +121,53 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
* transaction start -> page lock(s) -> i_data_sem (rw)
*/
+static const struct fs_context_operations ext4_context_ops = {
+ .parse_param = ext4_parse_param,
+ .get_tree = ext4_get_tree,
+ .reconfigure = ext4_reconfigure,
+ .free = ext4_fc_free,
+};
+
+
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
static struct file_system_type ext2_fs_type = {
- .owner = THIS_MODULE,
- .name = "ext2",
- .mount = ext4_mount,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .owner = THIS_MODULE,
+ .name = "ext2",
+ .init_fs_context = ext4_init_fs_context,
+ .parameters = ext4_param_specs,
+ .kill_sb = ext4_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("ext2");
MODULE_ALIAS("ext2");
-#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
+#define IS_EXT2_SB(sb) ((sb)->s_type == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif
static struct file_system_type ext3_fs_type = {
- .owner = THIS_MODULE,
- .name = "ext3",
- .mount = ext4_mount,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .owner = THIS_MODULE,
+ .name = "ext3",
+ .init_fs_context = ext4_init_fs_context,
+ .parameters = ext4_param_specs,
+ .kill_sb = ext4_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("ext3");
MODULE_ALIAS("ext3");
-#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
+#define IS_EXT3_SB(sb) ((sb)->s_type == &ext3_fs_type)
-static inline void __ext4_read_bh(struct buffer_head *bh, int op_flags,
- bh_end_io_t *end_io)
+static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
+ bh_end_io_t *end_io, bool simu_fail)
{
+ if (simu_fail) {
+ clear_buffer_uptodate(bh);
+ unlock_buffer(bh);
+ return;
+ }
+
/*
* buffer's verified bit is no longer valid after reading from
* disk again due to write out error, clear it to make sure we
@@ -153,11 +177,11 @@ static inline void __ext4_read_bh(struct buffer_head *bh, int op_flags,
bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
get_bh(bh);
- submit_bh(REQ_OP_READ, op_flags, bh);
+ submit_bh(REQ_OP_READ | op_flags, bh);
}
-void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
- bh_end_io_t *end_io)
+void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags,
+ bh_end_io_t *end_io, bool simu_fail)
{
BUG_ON(!buffer_locked(bh));
@@ -165,10 +189,11 @@ void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
unlock_buffer(bh);
return;
}
- __ext4_read_bh(bh, op_flags, end_io);
+ __ext4_read_bh(bh, op_flags, end_io, simu_fail);
}
-int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io)
+int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
+ bh_end_io_t *end_io, bool simu_fail)
{
BUG_ON(!buffer_locked(bh));
@@ -177,7 +202,7 @@ int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io)
return 0;
}
- __ext4_read_bh(bh, op_flags, end_io);
+ __ext4_read_bh(bh, op_flags, end_io, simu_fail);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
@@ -185,21 +210,14 @@ int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io)
return -EIO;
}
-int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait)
+int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
{
- if (trylock_buffer(bh)) {
- if (wait)
- return ext4_read_bh(bh, op_flags, NULL);
- ext4_read_bh_nowait(bh, op_flags, NULL);
+ lock_buffer(bh);
+ if (!wait) {
+ ext4_read_bh_nowait(bh, op_flags, NULL, false);
return 0;
}
- if (wait) {
- wait_on_buffer(bh);
- if (buffer_uptodate(bh))
- return 0;
- return -EIO;
- }
- return 0;
+ return ext4_read_bh(bh, op_flags, NULL, false);
}
/*
@@ -209,8 +227,8 @@ int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait)
* return.
*/
static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
- sector_t block, int op_flags,
- gfp_t gfp)
+ sector_t block,
+ blk_opf_t op_flags, gfp_t gfp)
{
struct buffer_head *bh;
int ret;
@@ -230,23 +248,40 @@ static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
}
struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
- int op_flags)
+ blk_opf_t op_flags)
{
- return __ext4_sb_bread_gfp(sb, block, op_flags, __GFP_MOVABLE);
+ gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
+ ~__GFP_FS) | __GFP_MOVABLE;
+
+ return __ext4_sb_bread_gfp(sb, block, op_flags, gfp);
}
struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
sector_t block)
{
- return __ext4_sb_bread_gfp(sb, block, 0, 0);
+ gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
+ ~__GFP_FS);
+
+ return __ext4_sb_bread_gfp(sb, block, 0, gfp);
+}
+
+struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb,
+ sector_t block)
+{
+ gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
+ ~__GFP_FS) | __GFP_MOVABLE | __GFP_NOFAIL;
+
+ return __ext4_sb_bread_gfp(sb, block, 0, gfp);
}
void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
{
- struct buffer_head *bh = sb_getblk_gfp(sb, block, 0);
+ struct buffer_head *bh = bdev_getblk(sb->s_bdev, block,
+ sb->s_blocksize, GFP_NOWAIT);
if (likely(bh)) {
- ext4_read_bh_lock(bh, REQ_RAHEAD, false);
+ if (trylock_buffer(bh))
+ ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL, false);
brelse(bh);
}
}
@@ -260,14 +295,12 @@ static int ext4_verify_csum_type(struct super_block *sb,
return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
}
-static __le32 ext4_superblock_csum(struct super_block *sb,
- struct ext4_super_block *es)
+__le32 ext4_superblock_csum(struct ext4_super_block *es)
{
- struct ext4_sb_info *sbi = EXT4_SB(sb);
int offset = offsetof(struct ext4_super_block, s_checksum);
__u32 csum;
- csum = ext4_chksum(sbi, ~0, (char *)es, offset);
+ csum = ext4_chksum(~0, (char *)es, offset);
return cpu_to_le32(csum);
}
@@ -275,20 +308,20 @@ static __le32 ext4_superblock_csum(struct super_block *sb,
static int ext4_superblock_csum_verify(struct super_block *sb,
struct ext4_super_block *es)
{
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return 1;
- return es->s_checksum == ext4_superblock_csum(sb, es);
+ return es->s_checksum == ext4_superblock_csum(es);
}
void ext4_superblock_csum_set(struct super_block *sb)
{
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
- if (!ext4_has_metadata_csum(sb))
+ if (!ext4_has_feature_metadata_csum(sb))
return;
- es->s_checksum = ext4_superblock_csum(sb, es);
+ es->s_checksum = ext4_superblock_csum(es);
}
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
@@ -326,9 +359,9 @@ __u32 ext4_free_group_clusters(struct super_block *sb,
__u32 ext4_free_inodes_count(struct super_block *sb,
struct ext4_group_desc *bg)
{
- return le16_to_cpu(bg->bg_free_inodes_count_lo) |
+ return le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_lo)) |
(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
- (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
+ (__u32)le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_hi)) << 16 : 0);
}
__u32 ext4_used_dirs_count(struct super_block *sb,
@@ -382,9 +415,9 @@ void ext4_free_group_clusters_set(struct super_block *sb,
void ext4_free_inodes_set(struct super_block *sb,
struct ext4_group_desc *bg, __u32 count)
{
- bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
+ WRITE_ONCE(bg->bg_free_inodes_count_lo, cpu_to_le16((__u16)count));
if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
- bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
+ WRITE_ONCE(bg->bg_free_inodes_count_hi, cpu_to_le16(count >> 16));
}
void ext4_used_dirs_set(struct super_block *sb,
@@ -422,63 +455,73 @@ static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
__ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
/*
- * The del_gendisk() function uninitializes the disk-specific data
- * structures, including the bdi structure, without telling anyone
- * else. Once this happens, any attempt to call mark_buffer_dirty()
- * (for example, by ext4_commit_super), will cause a kernel OOPS.
- * This is a kludge to prevent these oops until we can put in a proper
- * hook in del_gendisk() to inform the VFS and file system layers.
+ * The ext4_maybe_update_superblock() function checks and updates the
+ * superblock if needed.
+ *
+ * This function is designed to update the on-disk superblock only under
+ * certain conditions to prevent excessive disk writes and unnecessary
+ * waking of the disk from sleep. The superblock will be updated if:
+ * 1. More than sbi->s_sb_update_sec (def: 1 hour) has passed since the last
+ * superblock update
+ * 2. More than sbi->s_sb_update_kb (def: 16MB) kbs have been written since the
+ * last superblock update.
+ *
+ * @sb: The superblock
*/
-static int block_device_ejected(struct super_block *sb)
+static void ext4_maybe_update_superblock(struct super_block *sb)
{
- struct inode *bd_inode = sb->s_bdev->bd_inode;
- struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ journal_t *journal = sbi->s_journal;
+ time64_t now;
+ __u64 last_update;
+ __u64 lifetime_write_kbytes;
+ __u64 diff_size;
+
+ if (ext4_emergency_state(sb) || sb_rdonly(sb) ||
+ !(sb->s_flags & SB_ACTIVE) || !journal ||
+ journal->j_flags & JBD2_UNMOUNT)
+ return;
+
+ now = ktime_get_real_seconds();
+ last_update = ext4_get_tstamp(es, s_wtime);
+
+ if (likely(now - last_update < sbi->s_sb_update_sec))
+ return;
+
+ lifetime_write_kbytes = sbi->s_kbytes_written +
+ ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
+ sbi->s_sectors_written_start) >> 1);
+
+ /* Get the number of kilobytes not written to disk to account
+ * for statistics and compare with a multiple of 16 MB. This
+ * is used to determine when the next superblock commit should
+ * occur (i.e. not more often than once per 16MB if there was
+ * less written in an hour).
+ */
+ diff_size = lifetime_write_kbytes - le64_to_cpu(es->s_kbytes_written);
- return bdi->dev == NULL;
+ if (diff_size > sbi->s_sb_update_kb)
+ schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
}
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
struct super_block *sb = journal->j_private;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- int error = is_journal_aborted(journal);
- struct ext4_journal_cb_entry *jce;
BUG_ON(txn->t_state == T_FINISHED);
ext4_process_freed_data(sb, txn->t_tid);
-
- spin_lock(&sbi->s_md_lock);
- while (!list_empty(&txn->t_private_list)) {
- jce = list_entry(txn->t_private_list.next,
- struct ext4_journal_cb_entry, jce_list);
- list_del_init(&jce->jce_list);
- spin_unlock(&sbi->s_md_lock);
- jce->jce_func(sb, jce, error);
- spin_lock(&sbi->s_md_lock);
- }
- spin_unlock(&sbi->s_md_lock);
+ ext4_maybe_update_superblock(sb);
}
-/*
- * This writepage callback for write_cache_pages()
- * takes care of a few cases after page cleaning.
- *
- * write_cache_pages() already checks for dirty pages
- * and calls clear_page_dirty_for_io(), which we want,
- * to write protect the pages.
- *
- * However, we may have to redirty a page (see below.)
- */
-static int ext4_journalled_writepage_callback(struct page *page,
- struct writeback_control *wbc,
- void *data)
+static bool ext4_journalled_writepage_needs_redirty(struct jbd2_inode *jinode,
+ struct folio *folio)
{
- transaction_t *transaction = (transaction_t *) data;
struct buffer_head *bh, *head;
struct journal_head *jh;
- bh = head = page_buffers(page);
+ bh = head = folio_buffers(folio);
do {
/*
* We have to redirty a page in these cases:
@@ -495,15 +538,12 @@ static int ext4_journalled_writepage_callback(struct page *page,
*/
jh = bh2jh(bh);
if (buffer_dirty(bh) ||
- (jh && (jh->b_transaction != transaction ||
- jh->b_next_transaction))) {
- redirty_page_for_writepage(wbc, page);
- goto out;
- }
+ (jh && (jh->b_transaction != jinode->i_transaction ||
+ jh->b_next_transaction)))
+ return true;
} while ((bh = bh->b_this_page) != head);
-out:
- return AOP_WRITEPAGE_ACTIVATE;
+ return false;
}
static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
@@ -515,10 +555,23 @@ static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
.range_start = jinode->i_dirty_start,
.range_end = jinode->i_dirty_end,
};
+ struct folio *folio = NULL;
+ int error;
+
+ /*
+ * writeback_iter() already checks for dirty pages and calls
+ * folio_clear_dirty_for_io(), which we want to write protect the
+ * folios.
+ *
+ * However, we may have to redirty a folio sometimes.
+ */
+ while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
+ if (ext4_journalled_writepage_needs_redirty(jinode, folio))
+ folio_redirty_for_writepage(&wbc, folio);
+ folio_unlock(folio);
+ }
- return write_cache_pages(mapping, &wbc,
- ext4_journalled_writepage_callback,
- jinode->i_transaction);
+ return error;
}
static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
@@ -528,8 +581,7 @@ static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
if (ext4_should_journal_data(jinode->i_vfs_inode))
ret = ext4_journalled_submit_inode_data_buffers(jinode);
else
- ret = jbd2_journal_submit_inode_data_buffers(jinode);
-
+ ret = ext4_normal_submit_inode_data_buffers(jinode);
return ret;
}
@@ -645,11 +697,8 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
if (test_opt(sb, WARN_ON_ERROR))
WARN_ON_ONCE(1);
- if (!continue_fs && !sb_rdonly(sb)) {
- ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
- if (journal)
- jbd2_journal_abort(journal, -EIO);
- }
+ if (!continue_fs && !ext4_emergency_ro(sb) && journal)
+ jbd2_journal_abort(journal, -error);
if (!bdev_read_only(sb->s_bdev)) {
save_error_info(sb, error, ino, block, func, line);
@@ -657,10 +706,14 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
* In case the fs should keep running, we need to writeout
* superblock through the journal. Due to lock ordering
* constraints, it may not be safe to do it right here so we
- * defer superblock flushing to a workqueue.
+ * defer superblock flushing to a workqueue. We just need to be
+ * careful when the journal is already shutting down. If we get
+ * here in that case, just update the sb directly as the last
+ * transaction won't commit anyway.
*/
- if (continue_fs && journal)
- schedule_work(&EXT4_SB(sb)->s_error_work);
+ if (continue_fs && journal &&
+ !ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY))
+ schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
else
ext4_commit_super(sb);
}
@@ -675,22 +728,23 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
sb->s_id);
}
- if (sb_rdonly(sb) || continue_fs)
+ if (ext4_emergency_ro(sb) || continue_fs)
return;
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
/*
- * Make sure updated value of ->s_mount_flags will be visible before
- * ->s_flags update
+ * We don't set SB_RDONLY because that requires sb->s_umount
+ * semaphore and setting it without proper remount procedure is
+ * confusing code such as freeze_super() leading to deadlocks
+ * and other problems.
*/
- smp_wmb();
- sb->s_flags |= SB_RDONLY;
+ set_bit(EXT4_FLAGS_EMERGENCY_RO, &EXT4_SB(sb)->s_ext4_flags);
}
-static void flush_stashed_error_work(struct work_struct *work)
+static void update_super_work(struct work_struct *work)
{
struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
- s_error_work);
+ s_sb_upd_work);
journal_t *journal = sbi->s_journal;
handle_t *handle;
@@ -702,8 +756,11 @@ static void flush_stashed_error_work(struct work_struct *work)
* We use directly jbd2 functions here to avoid recursing back into
* ext4 error handling code during handling of previous errors.
*/
- if (!sb_rdonly(sbi->s_sb) && journal) {
+ if (!ext4_emergency_state(sbi->s_sb) &&
+ !sb_rdonly(sbi->s_sb) && journal) {
struct buffer_head *sbh = sbi->s_sbh;
+ bool call_notify_err = false;
+
handle = jbd2_journal_start(journal, 1);
if (IS_ERR(handle))
goto write_directly;
@@ -711,6 +768,10 @@ static void flush_stashed_error_work(struct work_struct *work)
jbd2_journal_stop(handle);
goto write_directly;
}
+
+ if (sbi->s_add_error_count > 0)
+ call_notify_err = true;
+
ext4_update_super(sbi->s_sb);
if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to "
@@ -724,7 +785,10 @@ static void flush_stashed_error_work(struct work_struct *work)
goto write_directly;
}
jbd2_journal_stop(handle);
- ext4_notify_error_sysfs(sbi);
+
+ if (call_notify_err)
+ ext4_notify_error_sysfs(sbi);
+
return;
}
write_directly:
@@ -747,7 +811,7 @@ void __ext4_error(struct super_block *sb, const char *function,
struct va_format vaf;
va_list args;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
+ if (unlikely(ext4_emergency_state(sb)))
return;
trace_ext4_error(sb, function, line);
@@ -772,7 +836,7 @@ void __ext4_error_inode(struct inode *inode, const char *function,
va_list args;
struct va_format vaf;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ if (unlikely(ext4_emergency_state(inode->i_sb)))
return;
trace_ext4_error(inode->i_sb, function, line);
@@ -807,7 +871,7 @@ void __ext4_error_file(struct file *file, const char *function,
struct inode *inode = file_inode(file);
char pathname[80], *path;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ if (unlikely(ext4_emergency_state(inode->i_sb)))
return;
trace_ext4_error(inode->i_sb, function, line);
@@ -887,7 +951,7 @@ void __ext4_std_error(struct super_block *sb, const char *function,
char nbuf[16];
const char *errstr;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
+ if (unlikely(ext4_emergency_state(sb)))
return;
/* Special case: if the error is EROFS, and we're not already
@@ -912,14 +976,20 @@ void __ext4_msg(struct super_block *sb,
struct va_format vaf;
va_list args;
- atomic_inc(&EXT4_SB(sb)->s_msg_count);
- if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
- return;
+ if (sb) {
+ atomic_inc(&EXT4_SB(sb)->s_msg_count);
+ if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state),
+ "EXT4-fs"))
+ return;
+ }
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+ if (sb)
+ printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+ else
+ printk("%sEXT4-fs: %pV\n", prefix, &vaf);
va_end(args);
}
@@ -975,7 +1045,7 @@ __acquires(bitlock)
struct va_format vaf;
va_list args;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
+ if (unlikely(ext4_emergency_state(sb)))
return;
trace_ext4_error(sb, function, line);
@@ -1001,7 +1071,7 @@ __acquires(bitlock)
if (!bdev_read_only(sb->s_bdev)) {
save_error_info(sb, EFSCORRUPTED, ino, block, function,
line);
- schedule_work(&EXT4_SB(sb)->s_error_work);
+ schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
}
return;
}
@@ -1031,6 +1101,8 @@ void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
int ret;
+ if (!grp || !gdp)
+ return;
if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
&grp->bb_state);
@@ -1077,43 +1149,6 @@ void ext4_update_dynamic_rev(struct super_block *sb)
*/
}
-/*
- * Open the external journal device
- */
-static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
-{
- struct block_device *bdev;
-
- bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
- if (IS_ERR(bdev))
- goto fail;
- return bdev;
-
-fail:
- ext4_msg(sb, KERN_ERR,
- "failed to open journal device unknown-block(%u,%u) %ld",
- MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
- return NULL;
-}
-
-/*
- * Release the journal device
- */
-static void ext4_blkdev_put(struct block_device *bdev)
-{
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-}
-
-static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
-{
- struct block_device *bdev;
- bdev = sbi->s_journal_bdev;
- if (bdev) {
- ext4_blkdev_put(bdev);
- sbi->s_journal_bdev = NULL;
- }
-}
-
static inline struct inode *orphan_list_entry(struct list_head *l)
{
return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
@@ -1140,12 +1175,12 @@ static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
#ifdef CONFIG_QUOTA
static int ext4_quota_off(struct super_block *sb, int type);
-static inline void ext4_quota_off_umount(struct super_block *sb)
+static inline void ext4_quotas_off(struct super_block *sb, int type)
{
- int type;
+ BUG_ON(type > EXT4_MAXQUOTAS);
/* Use our quota_off function to clear inode flags etc. */
- for (type = 0; type < EXT4_MAXQUOTAS; type++)
+ for (type--; type >= 0; type--)
ext4_quota_off(sb, type);
}
@@ -1161,77 +1196,143 @@ static inline char *get_qf_name(struct super_block *sb,
lockdep_is_held(&sb->s_umount));
}
#else
-static inline void ext4_quota_off_umount(struct super_block *sb)
+static inline void ext4_quotas_off(struct super_block *sb, int type)
{
}
#endif
-static void ext4_put_super(struct super_block *sb)
+static int ext4_percpu_param_init(struct ext4_sb_info *sbi)
+{
+ ext4_fsblk_t block;
+ int err;
+
+ block = ext4_count_free_clusters(sbi->s_sb);
+ ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block));
+ err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
+ GFP_KERNEL);
+ if (!err) {
+ unsigned long freei = ext4_count_free_inodes(sbi->s_sb);
+ sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
+ err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
+ GFP_KERNEL);
+ }
+ if (!err)
+ err = percpu_counter_init(&sbi->s_dirs_counter,
+ ext4_count_dirs(sbi->s_sb), GFP_KERNEL);
+ if (!err)
+ err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
+ GFP_KERNEL);
+ if (!err)
+ err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
+ GFP_KERNEL);
+ if (!err)
+ err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
+
+ if (err)
+ ext4_msg(sbi->s_sb, KERN_ERR, "insufficient memory");
+
+ return err;
+}
+
+static void ext4_percpu_param_destroy(struct ext4_sb_info *sbi)
+{
+ percpu_counter_destroy(&sbi->s_freeclusters_counter);
+ percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ percpu_counter_destroy(&sbi->s_dirs_counter);
+ percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+ percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
+ percpu_free_rwsem(&sbi->s_writepages_rwsem);
+}
+
+static void ext4_group_desc_free(struct ext4_sb_info *sbi)
{
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_super_block *es = sbi->s_es;
struct buffer_head **group_desc;
+ int i;
+
+ rcu_read_lock();
+ group_desc = rcu_dereference(sbi->s_group_desc);
+ for (i = 0; i < sbi->s_gdb_count; i++)
+ brelse(group_desc[i]);
+ kvfree(group_desc);
+ rcu_read_unlock();
+}
+
+static void ext4_flex_groups_free(struct ext4_sb_info *sbi)
+{
struct flex_groups **flex_groups;
- int aborted = 0;
- int i, err;
+ int i;
- ext4_unregister_li_request(sb);
- ext4_quota_off_umount(sb);
+ rcu_read_lock();
+ flex_groups = rcu_dereference(sbi->s_flex_groups);
+ if (flex_groups) {
+ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
+ kvfree(flex_groups[i]);
+ kvfree(flex_groups);
+ }
+ rcu_read_unlock();
+}
- flush_work(&sbi->s_error_work);
- destroy_workqueue(sbi->rsv_conversion_wq);
- ext4_release_orphan_info(sb);
+static void ext4_put_super(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ int aborted = 0;
+ int err;
/*
* Unregister sysfs before destroying jbd2 journal.
* Since we could still access attr_journal_task attribute via sysfs
* path which could have sbi->s_journal->j_task as NULL
+ * Unregister sysfs before flush sbi->s_sb_upd_work.
+ * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If
+ * read metadata verify failed then will queue error work.
+ * update_super_work will call start_this_handle may trigger
+ * BUG_ON.
*/
ext4_unregister_sysfs(sb);
+ if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs unmount"))
+ ext4_msg(sb, KERN_INFO, "unmounting filesystem %pU.",
+ &sb->s_uuid);
+
+ ext4_unregister_li_request(sb);
+ ext4_quotas_off(sb, EXT4_MAXQUOTAS);
+
+ destroy_workqueue(sbi->rsv_conversion_wq);
+ ext4_release_orphan_info(sb);
+
if (sbi->s_journal) {
aborted = is_journal_aborted(sbi->s_journal);
- err = jbd2_journal_destroy(sbi->s_journal);
- sbi->s_journal = NULL;
+ err = ext4_journal_destroy(sbi, sbi->s_journal);
if ((err < 0) && !aborted) {
ext4_abort(sb, -err, "Couldn't clean up the journal");
}
- }
+ } else
+ flush_work(&sbi->s_sb_upd_work);
ext4_es_unregister_shrinker(sbi);
- del_timer_sync(&sbi->s_err_report);
+ timer_shutdown_sync(&sbi->s_err_report);
ext4_release_system_zone(sb);
ext4_mb_release(sb);
ext4_ext_release(sb);
- if (!sb_rdonly(sb) && !aborted) {
- ext4_clear_feature_journal_needs_recovery(sb);
- ext4_clear_feature_orphan_present(sb);
- es->s_state = cpu_to_le16(sbi->s_mount_state);
- }
- if (!sb_rdonly(sb))
+ if (!ext4_emergency_state(sb) && !sb_rdonly(sb)) {
+ if (!aborted) {
+ ext4_clear_feature_journal_needs_recovery(sb);
+ ext4_clear_feature_orphan_present(sb);
+ es->s_state = cpu_to_le16(sbi->s_mount_state);
+ }
ext4_commit_super(sb);
-
- rcu_read_lock();
- group_desc = rcu_dereference(sbi->s_group_desc);
- for (i = 0; i < sbi->s_gdb_count; i++)
- brelse(group_desc[i]);
- kvfree(group_desc);
- flex_groups = rcu_dereference(sbi->s_flex_groups);
- if (flex_groups) {
- for (i = 0; i < sbi->s_flex_groups_allocated; i++)
- kvfree(flex_groups[i]);
- kvfree(flex_groups);
}
- rcu_read_unlock();
- percpu_counter_destroy(&sbi->s_freeclusters_counter);
- percpu_counter_destroy(&sbi->s_freeinodes_counter);
- percpu_counter_destroy(&sbi->s_dirs_counter);
- percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
- percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
- percpu_free_rwsem(&sbi->s_writepages_rwsem);
+
+ ext4_group_desc_free(sbi);
+ ext4_flex_groups_free(sbi);
+
+ WARN_ON_ONCE(!(sbi->s_mount_state & EXT4_ERROR_FS) &&
+ percpu_counter_sum(&sbi->s_dirtyclusters_counter));
+ ext4_percpu_param_destroy(sbi);
#ifdef CONFIG_QUOTA
- for (i = 0; i < EXT4_MAXQUOTAS; i++)
+ for (int i = 0; i < EXT4_MAXQUOTAS; i++)
kfree(get_qf_name(sb, sbi, i));
#endif
@@ -1245,15 +1346,14 @@ static void ext4_put_super(struct super_block *sb)
sync_blockdev(sb->s_bdev);
invalidate_bdev(sb->s_bdev);
- if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) {
+ if (sbi->s_journal_bdev_file) {
/*
* Invalidate the journal device's buffers. We don't want them
* floating about in memory - the physical journal device may
* hotswapped, and it breaks the `ro-after' testing code.
*/
- sync_blockdev(sbi->s_journal_bdev);
- invalidate_bdev(sbi->s_journal_bdev);
- ext4_blkdev_remove(sbi);
+ sync_blockdev(file_bdev(sbi->s_journal_bdev_file));
+ invalidate_bdev(file_bdev(sbi->s_journal_bdev_file));
}
ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
@@ -1272,12 +1372,10 @@ static void ext4_put_super(struct super_block *sb)
*/
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
- if (sbi->s_chksum_driver)
- crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi->s_blockgroup_lock);
- fs_put_dax(sbi->s_daxdev);
+ fs_put_dax(sbi->s_daxdev, NULL);
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
kfree(sbi);
@@ -1292,21 +1390,24 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
{
struct ext4_inode_info *ei;
- ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
+ ei = alloc_inode_sb(sb, ext4_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
inode_set_iversion(&ei->vfs_inode, 1);
+ ei->i_flags = 0;
+ ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
spin_lock_init(&ei->i_raw_lock);
- INIT_LIST_HEAD(&ei->i_prealloc_list);
+ ei->i_prealloc_node = RB_ROOT;
atomic_set(&ei->i_prealloc_active, 0);
- spin_lock_init(&ei->i_prealloc_lock);
+ rwlock_init(&ei->i_prealloc_lock);
ext4_es_init_tree(&ei->i_es_tree);
rwlock_init(&ei->i_es_lock);
INIT_LIST_HEAD(&ei->i_es_list);
ei->i_es_all_nr = 0;
ei->i_es_shk_nr = 0;
ei->i_es_shrink_lblk = 0;
+ ei->i_es_seq = 0;
ei->i_reserved_data_blocks = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
ext4_init_pending_tree(&ei->i_pending_tree);
@@ -1319,16 +1420,15 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
spin_lock_init(&ei->i_completed_io_lock);
ei->i_sync_tid = 0;
ei->i_datasync_tid = 0;
- atomic_set(&ei->i_unwritten, 0);
INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
ext4_fc_init_inode(&ei->vfs_inode);
- mutex_init(&ei->i_fc_lock);
+ spin_lock_init(&ei->i_fc_lock);
return &ei->vfs_inode;
}
static int ext4_drop_inode(struct inode *inode)
{
- int drop = generic_drop_inode(inode);
+ int drop = inode_generic_drop(inode);
if (!drop)
drop = fscrypt_drop_inode(inode);
@@ -1349,9 +1449,9 @@ static void ext4_free_in_core_inode(struct inode *inode)
static void ext4_destroy_inode(struct inode *inode)
{
- if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
+ if (ext4_inode_orphan_tracked(inode)) {
ext4_msg(inode->i_sb, KERN_ERR,
- "Inode %lu (%p): orphan list check failed!",
+ "Inode %lu (%p): inode tracked as orphan!",
inode->i_ino, EXT4_I(inode));
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
EXT4_I(inode), sizeof(struct ext4_inode_info),
@@ -1359,30 +1459,41 @@ static void ext4_destroy_inode(struct inode *inode)
dump_stack();
}
- if (EXT4_I(inode)->i_reserved_data_blocks)
+ if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ERROR_FS) &&
+ WARN_ON_ONCE(EXT4_I(inode)->i_reserved_data_blocks))
ext4_msg(inode->i_sb, KERN_ERR,
"Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",
inode->i_ino, EXT4_I(inode),
EXT4_I(inode)->i_reserved_data_blocks);
}
+static void ext4_shutdown(struct super_block *sb)
+{
+ ext4_force_shutdown(sb, EXT4_GOING_FLAGS_NOLOGFLUSH);
+}
+
static void init_once(void *foo)
{
- struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
+ struct ext4_inode_info *ei = foo;
INIT_LIST_HEAD(&ei->i_orphan);
init_rwsem(&ei->xattr_sem);
init_rwsem(&ei->i_data_sem);
inode_init_once(&ei->vfs_inode);
ext4_fc_init_inode(&ei->vfs_inode);
+#ifdef CONFIG_FS_ENCRYPTION
+ ei->i_crypt_info = NULL;
+#endif
+#ifdef CONFIG_FS_VERITY
+ ei->i_verity_info = NULL;
+#endif
}
static int __init init_inodecache(void)
{
ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
sizeof(struct ext4_inode_info), 0,
- (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
- SLAB_ACCOUNT),
+ SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
offsetof(struct ext4_inode_info, i_data),
sizeof_field(struct ext4_inode_info, i_data),
init_once);
@@ -1406,7 +1517,7 @@ void ext4_clear_inode(struct inode *inode)
ext4_fc_del(inode);
invalidate_inode_buffers(inode);
clear_inode(inode);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
dquot_drop(inode);
if (EXT4_I(inode)->jinode) {
@@ -1463,128 +1574,6 @@ static int ext4_nfs_commit_metadata(struct inode *inode)
return ext4_write_inode(inode, &wbc);
}
-#ifdef CONFIG_FS_ENCRYPTION
-static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
-{
- return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
-}
-
-static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
- void *fs_data)
-{
- handle_t *handle = fs_data;
- int res, res2, credits, retries = 0;
-
- /*
- * Encrypting the root directory is not allowed because e2fsck expects
- * lost+found to exist and be unencrypted, and encrypting the root
- * directory would imply encrypting the lost+found directory as well as
- * the filename "lost+found" itself.
- */
- if (inode->i_ino == EXT4_ROOT_INO)
- return -EPERM;
-
- if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
- return -EINVAL;
-
- if (ext4_test_inode_flag(inode, EXT4_INODE_DAX))
- return -EOPNOTSUPP;
-
- res = ext4_convert_inline_data(inode);
- if (res)
- return res;
-
- /*
- * If a journal handle was specified, then the encryption context is
- * being set on a new inode via inheritance and is part of a larger
- * transaction to create the inode. Otherwise the encryption context is
- * being set on an existing inode in its own transaction. Only in the
- * latter case should the "retry on ENOSPC" logic be used.
- */
-
- if (handle) {
- res = ext4_xattr_set_handle(handle, inode,
- EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
- ctx, len, 0);
- if (!res) {
- ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
- ext4_clear_inode_state(inode,
- EXT4_STATE_MAY_INLINE_DATA);
- /*
- * Update inode->i_flags - S_ENCRYPTED will be enabled,
- * S_DAX may be disabled
- */
- ext4_set_inode_flags(inode, false);
- }
- return res;
- }
-
- res = dquot_initialize(inode);
- if (res)
- return res;
-retry:
- res = ext4_xattr_set_credits(inode, len, false /* is_create */,
- &credits);
- if (res)
- return res;
-
- handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
- ctx, len, 0);
- if (!res) {
- ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
- /*
- * Update inode->i_flags - S_ENCRYPTED will be enabled,
- * S_DAX may be disabled
- */
- ext4_set_inode_flags(inode, false);
- res = ext4_mark_inode_dirty(handle, inode);
- if (res)
- EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
- }
- res2 = ext4_journal_stop(handle);
-
- if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry;
- if (!res)
- res = res2;
- return res;
-}
-
-static const union fscrypt_policy *ext4_get_dummy_policy(struct super_block *sb)
-{
- return EXT4_SB(sb)->s_dummy_enc_policy.policy;
-}
-
-static bool ext4_has_stable_inodes(struct super_block *sb)
-{
- return ext4_has_feature_stable_inodes(sb);
-}
-
-static void ext4_get_ino_and_lblk_bits(struct super_block *sb,
- int *ino_bits_ret, int *lblk_bits_ret)
-{
- *ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count);
- *lblk_bits_ret = 8 * sizeof(ext4_lblk_t);
-}
-
-static const struct fscrypt_operations ext4_cryptops = {
- .key_prefix = "ext4:",
- .get_context = ext4_get_context,
- .set_context = ext4_set_context,
- .get_dummy_policy = ext4_get_dummy_policy,
- .empty_dir = ext4_empty_dir,
- .has_stable_inodes = ext4_has_stable_inodes,
- .get_ino_and_lblk_bits = ext4_get_ino_and_lblk_bits,
-};
-#endif
-
#ifdef CONFIG_QUOTA
static const char * const quotatypes[] = INITQFNAMES;
#define QTYPE2NAME(t) (quotatypes[t])
@@ -1603,7 +1592,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
unsigned int flags);
-static struct dquot **ext4_get_dquots(struct inode *inode)
+static struct dquot __rcu **ext4_get_dquots(struct inode *inode)
{
return EXT4_I(inode)->i_dquot;
}
@@ -1647,8 +1636,8 @@ static const struct super_operations ext4_sops = {
.freeze_fs = ext4_freeze,
.unfreeze_fs = ext4_unfreeze,
.statfs = ext4_statfs,
- .remount_fs = ext4_remount,
.show_options = ext4_show_options,
+ .shutdown = ext4_shutdown,
#ifdef CONFIG_QUOTA
.quota_read = ext4_quota_read,
.quota_write = ext4_quota_write,
@@ -1657,6 +1646,7 @@ static const struct super_operations ext4_sops = {
};
static const struct export_operations ext4_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = ext4_fh_to_dentry,
.fh_to_parent = ext4_fh_to_parent,
.get_parent = ext4_get_parent,
@@ -1665,253 +1655,190 @@ static const struct export_operations ext4_export_ops = {
enum {
Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
- Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
+ Opt_resgid, Opt_resuid, Opt_sb,
Opt_nouid32, Opt_debug, Opt_removed,
- Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
+ Opt_user_xattr, Opt_acl,
Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
Opt_inlinecrypt,
- Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
+ Opt_usrjquota, Opt_grpjquota, Opt_quota,
Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
- Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version,
+ Opt_usrquota, Opt_grpquota, Opt_prjquota,
Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never,
Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
- Opt_nowarn_on_error, Opt_mblk_io_submit,
- Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
+ Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_debug_want_extra_isize,
Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
Opt_no_prefetch_block_bitmaps, Opt_mb_optimize_scan,
+ Opt_errors, Opt_data, Opt_data_err, Opt_jqfmt, Opt_dax_type,
#ifdef CONFIG_EXT4_DEBUG
Opt_fc_debug_max_replay, Opt_fc_debug_force
#endif
};
-static const match_table_t tokens = {
- {Opt_bsd_df, "bsddf"},
- {Opt_minix_df, "minixdf"},
- {Opt_grpid, "grpid"},
- {Opt_grpid, "bsdgroups"},
- {Opt_nogrpid, "nogrpid"},
- {Opt_nogrpid, "sysvgroups"},
- {Opt_resgid, "resgid=%u"},
- {Opt_resuid, "resuid=%u"},
- {Opt_sb, "sb=%u"},
- {Opt_err_cont, "errors=continue"},
- {Opt_err_panic, "errors=panic"},
- {Opt_err_ro, "errors=remount-ro"},
- {Opt_nouid32, "nouid32"},
- {Opt_debug, "debug"},
- {Opt_removed, "oldalloc"},
- {Opt_removed, "orlov"},
- {Opt_user_xattr, "user_xattr"},
- {Opt_nouser_xattr, "nouser_xattr"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_noload, "norecovery"},
- {Opt_noload, "noload"},
- {Opt_removed, "nobh"},
- {Opt_removed, "bh"},
- {Opt_commit, "commit=%u"},
- {Opt_min_batch_time, "min_batch_time=%u"},
- {Opt_max_batch_time, "max_batch_time=%u"},
- {Opt_journal_dev, "journal_dev=%u"},
- {Opt_journal_path, "journal_path=%s"},
- {Opt_journal_checksum, "journal_checksum"},
- {Opt_nojournal_checksum, "nojournal_checksum"},
- {Opt_journal_async_commit, "journal_async_commit"},
- {Opt_abort, "abort"},
- {Opt_data_journal, "data=journal"},
- {Opt_data_ordered, "data=ordered"},
- {Opt_data_writeback, "data=writeback"},
- {Opt_data_err_abort, "data_err=abort"},
- {Opt_data_err_ignore, "data_err=ignore"},
- {Opt_offusrjquota, "usrjquota="},
- {Opt_usrjquota, "usrjquota=%s"},
- {Opt_offgrpjquota, "grpjquota="},
- {Opt_grpjquota, "grpjquota=%s"},
- {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
- {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
- {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
- {Opt_grpquota, "grpquota"},
- {Opt_noquota, "noquota"},
- {Opt_quota, "quota"},
- {Opt_usrquota, "usrquota"},
- {Opt_prjquota, "prjquota"},
- {Opt_barrier, "barrier=%u"},
- {Opt_barrier, "barrier"},
- {Opt_nobarrier, "nobarrier"},
- {Opt_i_version, "i_version"},
- {Opt_dax, "dax"},
- {Opt_dax_always, "dax=always"},
- {Opt_dax_inode, "dax=inode"},
- {Opt_dax_never, "dax=never"},
- {Opt_stripe, "stripe=%u"},
- {Opt_delalloc, "delalloc"},
- {Opt_warn_on_error, "warn_on_error"},
- {Opt_nowarn_on_error, "nowarn_on_error"},
- {Opt_lazytime, "lazytime"},
- {Opt_nolazytime, "nolazytime"},
- {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
- {Opt_nodelalloc, "nodelalloc"},
- {Opt_removed, "mblk_io_submit"},
- {Opt_removed, "nomblk_io_submit"},
- {Opt_block_validity, "block_validity"},
- {Opt_noblock_validity, "noblock_validity"},
- {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
- {Opt_journal_ioprio, "journal_ioprio=%u"},
- {Opt_auto_da_alloc, "auto_da_alloc=%u"},
- {Opt_auto_da_alloc, "auto_da_alloc"},
- {Opt_noauto_da_alloc, "noauto_da_alloc"},
- {Opt_dioread_nolock, "dioread_nolock"},
- {Opt_dioread_lock, "nodioread_nolock"},
- {Opt_dioread_lock, "dioread_lock"},
- {Opt_discard, "discard"},
- {Opt_nodiscard, "nodiscard"},
- {Opt_init_itable, "init_itable=%u"},
- {Opt_init_itable, "init_itable"},
- {Opt_noinit_itable, "noinit_itable"},
-#ifdef CONFIG_EXT4_DEBUG
- {Opt_fc_debug_force, "fc_debug_force"},
- {Opt_fc_debug_max_replay, "fc_debug_max_replay=%u"},
-#endif
- {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
- {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
- {Opt_test_dummy_encryption, "test_dummy_encryption"},
- {Opt_inlinecrypt, "inlinecrypt"},
- {Opt_nombcache, "nombcache"},
- {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */
- {Opt_removed, "prefetch_block_bitmaps"},
- {Opt_no_prefetch_block_bitmaps, "no_prefetch_block_bitmaps"},
- {Opt_mb_optimize_scan, "mb_optimize_scan=%d"},
- {Opt_removed, "check=none"}, /* mount option from ext2/3 */
- {Opt_removed, "nocheck"}, /* mount option from ext2/3 */
- {Opt_removed, "reservation"}, /* mount option from ext2/3 */
- {Opt_removed, "noreservation"}, /* mount option from ext2/3 */
- {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */
- {Opt_err, NULL},
+static const struct constant_table ext4_param_errors[] = {
+ {"continue", EXT4_MOUNT_ERRORS_CONT},
+ {"panic", EXT4_MOUNT_ERRORS_PANIC},
+ {"remount-ro", EXT4_MOUNT_ERRORS_RO},
+ {}
};
-static ext4_fsblk_t get_sb_block(void **data)
-{
- ext4_fsblk_t sb_block;
- char *options = (char *) *data;
-
- if (!options || strncmp(options, "sb=", 3) != 0)
- return 1; /* Default location */
-
- options += 3;
- /* TODO: use simple_strtoll with >32bit ext4 */
- sb_block = simple_strtoul(options, &options, 0);
- if (*options && *options != ',') {
- printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
- (char *) *data);
- return 1;
- }
- if (*options == ',')
- options++;
- *data = (void *) options;
-
- return sb_block;
-}
-
-#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
-#define DEFAULT_MB_OPTIMIZE_SCAN (-1)
-
-static const char deprecated_msg[] =
- "Mount option \"%s\" will be removed by %s\n"
- "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
-
-#ifdef CONFIG_QUOTA
-static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
-{
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- char *qname, *old_qname = get_qf_name(sb, sbi, qtype);
- int ret = -1;
+static const struct constant_table ext4_param_data[] = {
+ {"journal", EXT4_MOUNT_JOURNAL_DATA},
+ {"ordered", EXT4_MOUNT_ORDERED_DATA},
+ {"writeback", EXT4_MOUNT_WRITEBACK_DATA},
+ {}
+};
- if (sb_any_quota_loaded(sb) && !old_qname) {
- ext4_msg(sb, KERN_ERR,
- "Cannot change journaled "
- "quota options when quota turned on");
- return -1;
- }
- if (ext4_has_feature_quota(sb)) {
- ext4_msg(sb, KERN_INFO, "Journaled quota options "
- "ignored when QUOTA feature is enabled");
- return 1;
- }
- qname = match_strdup(args);
- if (!qname) {
- ext4_msg(sb, KERN_ERR,
- "Not enough memory for storing quotafile name");
- return -1;
- }
- if (old_qname) {
- if (strcmp(old_qname, qname) == 0)
- ret = 1;
- else
- ext4_msg(sb, KERN_ERR,
- "%s quota file already specified",
- QTYPE2NAME(qtype));
- goto errout;
- }
- if (strchr(qname, '/')) {
- ext4_msg(sb, KERN_ERR,
- "quotafile must be on filesystem root");
- goto errout;
- }
- rcu_assign_pointer(sbi->s_qf_names[qtype], qname);
- set_opt(sb, QUOTA);
- return 1;
-errout:
- kfree(qname);
- return ret;
-}
+static const struct constant_table ext4_param_data_err[] = {
+ {"abort", Opt_data_err_abort},
+ {"ignore", Opt_data_err_ignore},
+ {}
+};
-static int clear_qf_name(struct super_block *sb, int qtype)
-{
+static const struct constant_table ext4_param_jqfmt[] = {
+ {"vfsold", QFMT_VFS_OLD},
+ {"vfsv0", QFMT_VFS_V0},
+ {"vfsv1", QFMT_VFS_V1},
+ {}
+};
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- char *old_qname = get_qf_name(sb, sbi, qtype);
+static const struct constant_table ext4_param_dax[] = {
+ {"always", Opt_dax_always},
+ {"inode", Opt_dax_inode},
+ {"never", Opt_dax_never},
+ {}
+};
- if (sb_any_quota_loaded(sb) && old_qname) {
- ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
- " when quota turned on");
- return -1;
- }
- rcu_assign_pointer(sbi->s_qf_names[qtype], NULL);
- synchronize_rcu();
- kfree(old_qname);
- return 1;
-}
+/*
+ * Mount option specification
+ * We don't use fsparam_flag_no because of the way we set the
+ * options and the way we show them in _ext4_show_options(). To
+ * keep the changes to a minimum, let's keep the negative options
+ * separate for now.
+ */
+static const struct fs_parameter_spec ext4_param_specs[] = {
+ fsparam_flag ("bsddf", Opt_bsd_df),
+ fsparam_flag ("minixdf", Opt_minix_df),
+ fsparam_flag ("grpid", Opt_grpid),
+ fsparam_flag ("bsdgroups", Opt_grpid),
+ fsparam_flag ("nogrpid", Opt_nogrpid),
+ fsparam_flag ("sysvgroups", Opt_nogrpid),
+ fsparam_gid ("resgid", Opt_resgid),
+ fsparam_uid ("resuid", Opt_resuid),
+ fsparam_u32 ("sb", Opt_sb),
+ fsparam_enum ("errors", Opt_errors, ext4_param_errors),
+ fsparam_flag ("nouid32", Opt_nouid32),
+ fsparam_flag ("debug", Opt_debug),
+ fsparam_flag ("oldalloc", Opt_removed),
+ fsparam_flag ("orlov", Opt_removed),
+ fsparam_flag ("user_xattr", Opt_user_xattr),
+ fsparam_flag ("acl", Opt_acl),
+ fsparam_flag ("norecovery", Opt_noload),
+ fsparam_flag ("noload", Opt_noload),
+ fsparam_flag ("bh", Opt_removed),
+ fsparam_flag ("nobh", Opt_removed),
+ fsparam_u32 ("commit", Opt_commit),
+ fsparam_u32 ("min_batch_time", Opt_min_batch_time),
+ fsparam_u32 ("max_batch_time", Opt_max_batch_time),
+ fsparam_u32 ("journal_dev", Opt_journal_dev),
+ fsparam_bdev ("journal_path", Opt_journal_path),
+ fsparam_flag ("journal_checksum", Opt_journal_checksum),
+ fsparam_flag ("nojournal_checksum", Opt_nojournal_checksum),
+ fsparam_flag ("journal_async_commit",Opt_journal_async_commit),
+ fsparam_flag ("abort", Opt_abort),
+ fsparam_enum ("data", Opt_data, ext4_param_data),
+ fsparam_enum ("data_err", Opt_data_err,
+ ext4_param_data_err),
+ fsparam_string_empty
+ ("usrjquota", Opt_usrjquota),
+ fsparam_string_empty
+ ("grpjquota", Opt_grpjquota),
+ fsparam_enum ("jqfmt", Opt_jqfmt, ext4_param_jqfmt),
+ fsparam_flag ("grpquota", Opt_grpquota),
+ fsparam_flag ("quota", Opt_quota),
+ fsparam_flag ("noquota", Opt_noquota),
+ fsparam_flag ("usrquota", Opt_usrquota),
+ fsparam_flag ("prjquota", Opt_prjquota),
+ fsparam_flag ("barrier", Opt_barrier),
+ fsparam_u32 ("barrier", Opt_barrier),
+ fsparam_flag ("nobarrier", Opt_nobarrier),
+ fsparam_flag ("i_version", Opt_removed),
+ fsparam_flag ("dax", Opt_dax),
+ fsparam_enum ("dax", Opt_dax_type, ext4_param_dax),
+ fsparam_u32 ("stripe", Opt_stripe),
+ fsparam_flag ("delalloc", Opt_delalloc),
+ fsparam_flag ("nodelalloc", Opt_nodelalloc),
+ fsparam_flag ("warn_on_error", Opt_warn_on_error),
+ fsparam_flag ("nowarn_on_error", Opt_nowarn_on_error),
+ fsparam_u32 ("debug_want_extra_isize",
+ Opt_debug_want_extra_isize),
+ fsparam_flag ("mblk_io_submit", Opt_removed),
+ fsparam_flag ("nomblk_io_submit", Opt_removed),
+ fsparam_flag ("block_validity", Opt_block_validity),
+ fsparam_flag ("noblock_validity", Opt_noblock_validity),
+ fsparam_u32 ("inode_readahead_blks",
+ Opt_inode_readahead_blks),
+ fsparam_u32 ("journal_ioprio", Opt_journal_ioprio),
+ fsparam_u32 ("auto_da_alloc", Opt_auto_da_alloc),
+ fsparam_flag ("auto_da_alloc", Opt_auto_da_alloc),
+ fsparam_flag ("noauto_da_alloc", Opt_noauto_da_alloc),
+ fsparam_flag ("dioread_nolock", Opt_dioread_nolock),
+ fsparam_flag ("nodioread_nolock", Opt_dioread_lock),
+ fsparam_flag ("dioread_lock", Opt_dioread_lock),
+ fsparam_flag ("discard", Opt_discard),
+ fsparam_flag ("nodiscard", Opt_nodiscard),
+ fsparam_u32 ("init_itable", Opt_init_itable),
+ fsparam_flag ("init_itable", Opt_init_itable),
+ fsparam_flag ("noinit_itable", Opt_noinit_itable),
+#ifdef CONFIG_EXT4_DEBUG
+ fsparam_flag ("fc_debug_force", Opt_fc_debug_force),
+ fsparam_u32 ("fc_debug_max_replay", Opt_fc_debug_max_replay),
#endif
+ fsparam_u32 ("max_dir_size_kb", Opt_max_dir_size_kb),
+ fsparam_flag ("test_dummy_encryption",
+ Opt_test_dummy_encryption),
+ fsparam_string ("test_dummy_encryption",
+ Opt_test_dummy_encryption),
+ fsparam_flag ("inlinecrypt", Opt_inlinecrypt),
+ fsparam_flag ("nombcache", Opt_nombcache),
+ fsparam_flag ("no_mbcache", Opt_nombcache), /* for backward compatibility */
+ fsparam_flag ("prefetch_block_bitmaps",
+ Opt_removed),
+ fsparam_flag ("no_prefetch_block_bitmaps",
+ Opt_no_prefetch_block_bitmaps),
+ fsparam_s32 ("mb_optimize_scan", Opt_mb_optimize_scan),
+ fsparam_string ("check", Opt_removed), /* mount option from ext2/3 */
+ fsparam_flag ("nocheck", Opt_removed), /* mount option from ext2/3 */
+ fsparam_flag ("reservation", Opt_removed), /* mount option from ext2/3 */
+ fsparam_flag ("noreservation", Opt_removed), /* mount option from ext2/3 */
+ fsparam_u32 ("journal", Opt_removed), /* mount option from ext2/3 */
+ {}
+};
+
#define MOPT_SET 0x0001
#define MOPT_CLEAR 0x0002
#define MOPT_NOSUPPORT 0x0004
#define MOPT_EXPLICIT 0x0008
-#define MOPT_CLEAR_ERR 0x0010
-#define MOPT_GTE0 0x0020
#ifdef CONFIG_QUOTA
#define MOPT_Q 0
-#define MOPT_QFMT 0x0040
+#define MOPT_QFMT 0x0010
#else
#define MOPT_Q MOPT_NOSUPPORT
#define MOPT_QFMT MOPT_NOSUPPORT
#endif
-#define MOPT_DATAJ 0x0080
-#define MOPT_NO_EXT2 0x0100
-#define MOPT_NO_EXT3 0x0200
+#define MOPT_NO_EXT2 0x0020
+#define MOPT_NO_EXT3 0x0040
#define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
-#define MOPT_STRING 0x0400
-#define MOPT_SKIP 0x0800
-#define MOPT_2 0x1000
+#define MOPT_SKIP 0x0080
+#define MOPT_2 0x0100
static const struct mount_opts {
int token;
@@ -1936,6 +1863,7 @@ static const struct mount_opts {
MOPT_EXT4_ONLY | MOPT_CLEAR},
{Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
{Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
+ {Opt_commit, 0, MOPT_NO_EXT2},
{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
MOPT_EXT4_ONLY | MOPT_CLEAR},
{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
@@ -1944,52 +1872,25 @@ static const struct mount_opts {
EXT4_MOUNT_JOURNAL_CHECKSUM),
MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
{Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
- {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
- {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
- {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
- {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
- MOPT_NO_EXT2},
- {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
- MOPT_NO_EXT2},
+ {Opt_data_err, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2},
{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
- {Opt_commit, 0, MOPT_GTE0},
- {Opt_max_batch_time, 0, MOPT_GTE0},
- {Opt_min_batch_time, 0, MOPT_GTE0},
- {Opt_inode_readahead_blks, 0, MOPT_GTE0},
- {Opt_init_itable, 0, MOPT_GTE0},
- {Opt_dax, EXT4_MOUNT_DAX_ALWAYS, MOPT_SET | MOPT_SKIP},
- {Opt_dax_always, EXT4_MOUNT_DAX_ALWAYS,
- MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
- {Opt_dax_inode, EXT4_MOUNT2_DAX_INODE,
- MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
- {Opt_dax_never, EXT4_MOUNT2_DAX_NEVER,
- MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
- {Opt_stripe, 0, MOPT_GTE0},
- {Opt_resuid, 0, MOPT_GTE0},
- {Opt_resgid, 0, MOPT_GTE0},
- {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
- {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
- {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
- {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
- {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
- {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
- MOPT_NO_EXT2 | MOPT_DATAJ},
+ {Opt_dax_type, 0, MOPT_EXT4_ONLY},
+ {Opt_journal_dev, 0, MOPT_NO_EXT2},
+ {Opt_journal_path, 0, MOPT_NO_EXT2},
+ {Opt_journal_ioprio, 0, MOPT_NO_EXT2},
+ {Opt_data, 0, MOPT_NO_EXT2},
{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
- {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
#ifdef CONFIG_EXT4_FS_POSIX_ACL
{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
- {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
#else
{Opt_acl, 0, MOPT_NOSUPPORT},
- {Opt_noacl, 0, MOPT_NOSUPPORT},
#endif
{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
- {Opt_debug_want_extra_isize, 0, MOPT_GTE0},
{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
MOPT_SET | MOPT_Q},
@@ -2000,499 +1901,978 @@ static const struct mount_opts {
{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
MOPT_CLEAR | MOPT_Q},
- {Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
- {Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
- {Opt_offusrjquota, 0, MOPT_Q},
- {Opt_offgrpjquota, 0, MOPT_Q},
- {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
- {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
- {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
- {Opt_max_dir_size_kb, 0, MOPT_GTE0},
- {Opt_test_dummy_encryption, 0, MOPT_STRING},
+ {Opt_usrjquota, 0, MOPT_Q},
+ {Opt_grpjquota, 0, MOPT_Q},
+ {Opt_jqfmt, 0, MOPT_QFMT},
{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
{Opt_no_prefetch_block_bitmaps, EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS,
MOPT_SET},
- {Opt_mb_optimize_scan, EXT4_MOUNT2_MB_OPTIMIZE_SCAN, MOPT_GTE0},
#ifdef CONFIG_EXT4_DEBUG
{Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY},
- {Opt_fc_debug_max_replay, 0, MOPT_GTE0},
#endif
+ {Opt_abort, EXT4_MOUNT2_ABORT, MOPT_SET | MOPT_2},
{Opt_err, 0, 0}
};
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
static const struct ext4_sb_encodings {
__u16 magic;
char *name;
- char *version;
+ unsigned int version;
} ext4_sb_encoding_map[] = {
- {EXT4_ENC_UTF8_12_1, "utf8", "12.1.0"},
+ {EXT4_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
};
-static int ext4_sb_read_encoding(const struct ext4_super_block *es,
- const struct ext4_sb_encodings **encoding,
- __u16 *flags)
+static const struct ext4_sb_encodings *
+ext4_sb_read_encoding(const struct ext4_super_block *es)
{
__u16 magic = le16_to_cpu(es->s_encoding);
int i;
for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++)
if (magic == ext4_sb_encoding_map[i].magic)
- break;
+ return &ext4_sb_encoding_map[i];
- if (i >= ARRAY_SIZE(ext4_sb_encoding_map))
+ return NULL;
+}
+#endif
+
+#define EXT4_SPEC_JQUOTA (1 << 0)
+#define EXT4_SPEC_JQFMT (1 << 1)
+#define EXT4_SPEC_DATAJ (1 << 2)
+#define EXT4_SPEC_SB_BLOCK (1 << 3)
+#define EXT4_SPEC_JOURNAL_DEV (1 << 4)
+#define EXT4_SPEC_JOURNAL_IOPRIO (1 << 5)
+#define EXT4_SPEC_s_want_extra_isize (1 << 7)
+#define EXT4_SPEC_s_max_batch_time (1 << 8)
+#define EXT4_SPEC_s_min_batch_time (1 << 9)
+#define EXT4_SPEC_s_inode_readahead_blks (1 << 10)
+#define EXT4_SPEC_s_li_wait_mult (1 << 11)
+#define EXT4_SPEC_s_max_dir_size_kb (1 << 12)
+#define EXT4_SPEC_s_stripe (1 << 13)
+#define EXT4_SPEC_s_resuid (1 << 14)
+#define EXT4_SPEC_s_resgid (1 << 15)
+#define EXT4_SPEC_s_commit_interval (1 << 16)
+#define EXT4_SPEC_s_fc_debug_max_replay (1 << 17)
+#define EXT4_SPEC_s_sb_block (1 << 18)
+#define EXT4_SPEC_mb_optimize_scan (1 << 19)
+
+struct ext4_fs_context {
+ char *s_qf_names[EXT4_MAXQUOTAS];
+ struct fscrypt_dummy_policy dummy_enc_policy;
+ int s_jquota_fmt; /* Format of quota to use */
+#ifdef CONFIG_EXT4_DEBUG
+ int s_fc_debug_max_replay;
+#endif
+ unsigned short qname_spec;
+ unsigned long vals_s_flags; /* Bits to set in s_flags */
+ unsigned long mask_s_flags; /* Bits changed in s_flags */
+ unsigned long journal_devnum;
+ unsigned long s_commit_interval;
+ unsigned long s_stripe;
+ unsigned int s_inode_readahead_blks;
+ unsigned int s_want_extra_isize;
+ unsigned int s_li_wait_mult;
+ unsigned int s_max_dir_size_kb;
+ unsigned int journal_ioprio;
+ unsigned int vals_s_mount_opt;
+ unsigned int mask_s_mount_opt;
+ unsigned int vals_s_mount_opt2;
+ unsigned int mask_s_mount_opt2;
+ unsigned int opt_flags; /* MOPT flags */
+ unsigned int spec;
+ u32 s_max_batch_time;
+ u32 s_min_batch_time;
+ kuid_t s_resuid;
+ kgid_t s_resgid;
+ ext4_fsblk_t s_sb_block;
+};
+
+static void ext4_fc_free(struct fs_context *fc)
+{
+ struct ext4_fs_context *ctx = fc->fs_private;
+ int i;
+
+ if (!ctx)
+ return;
+
+ for (i = 0; i < EXT4_MAXQUOTAS; i++)
+ kfree(ctx->s_qf_names[i]);
+
+ fscrypt_free_dummy_policy(&ctx->dummy_enc_policy);
+ kfree(ctx);
+}
+
+int ext4_init_fs_context(struct fs_context *fc)
+{
+ struct ext4_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ fc->fs_private = ctx;
+ fc->ops = &ext4_context_ops;
+
+ /* i_version is always enabled now */
+ fc->sb_flags |= SB_I_VERSION;
+
+ return 0;
+}
+
+#ifdef CONFIG_QUOTA
+/*
+ * Note the name of the specified quota file.
+ */
+static int note_qf_name(struct fs_context *fc, int qtype,
+ struct fs_parameter *param)
+{
+ struct ext4_fs_context *ctx = fc->fs_private;
+ char *qname;
+
+ if (param->size < 1) {
+ ext4_msg(NULL, KERN_ERR, "Missing quota name");
+ return -EINVAL;
+ }
+ if (strchr(param->string, '/')) {
+ ext4_msg(NULL, KERN_ERR,
+ "quotafile must be on filesystem root");
return -EINVAL;
+ }
+ if (ctx->s_qf_names[qtype]) {
+ if (strcmp(ctx->s_qf_names[qtype], param->string) != 0) {
+ ext4_msg(NULL, KERN_ERR,
+ "%s quota file already specified",
+ QTYPE2NAME(qtype));
+ return -EINVAL;
+ }
+ return 0;
+ }
- *encoding = &ext4_sb_encoding_map[i];
- *flags = le16_to_cpu(es->s_encoding_flags);
+ qname = kmemdup_nul(param->string, param->size, GFP_KERNEL);
+ if (!qname) {
+ ext4_msg(NULL, KERN_ERR,
+ "Not enough memory for storing quotafile name");
+ return -ENOMEM;
+ }
+ ctx->s_qf_names[qtype] = qname;
+ ctx->qname_spec |= 1 << qtype;
+ ctx->spec |= EXT4_SPEC_JQUOTA;
+ return 0;
+}
+/*
+ * Clear the name of the specified quota file.
+ */
+static int unnote_qf_name(struct fs_context *fc, int qtype)
+{
+ struct ext4_fs_context *ctx = fc->fs_private;
+
+ kfree(ctx->s_qf_names[qtype]);
+
+ ctx->s_qf_names[qtype] = NULL;
+ ctx->qname_spec |= 1 << qtype;
+ ctx->spec |= EXT4_SPEC_JQUOTA;
return 0;
}
#endif
-static int ext4_set_test_dummy_encryption(struct super_block *sb,
- const char *opt,
- const substring_t *arg,
- bool is_remount)
+static int ext4_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct ext4_fs_context *ctx)
{
-#ifdef CONFIG_FS_ENCRYPTION
- struct ext4_sb_info *sbi = EXT4_SB(sb);
int err;
- /*
- * This mount option is just for testing, and it's not worthwhile to
- * implement the extra complexity (e.g. RCU protection) that would be
- * needed to allow it to be set or changed during remount. We do allow
- * it to be specified during remount, but only if there is no change.
- */
- if (is_remount && !sbi->s_dummy_enc_policy.policy) {
- ext4_msg(sb, KERN_WARNING,
- "Can't set test_dummy_encryption on remount");
- return -1;
+ if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
+ ext4_msg(NULL, KERN_WARNING,
+ "test_dummy_encryption option not supported");
+ return -EINVAL;
}
- err = fscrypt_set_test_dummy_encryption(sb, arg->from,
- &sbi->s_dummy_enc_policy);
- if (err) {
- if (err == -EEXIST)
- ext4_msg(sb, KERN_WARNING,
- "Can't change test_dummy_encryption on remount");
- else if (err == -EINVAL)
- ext4_msg(sb, KERN_WARNING,
- "Value of option \"%s\" is unrecognized", opt);
- else
- ext4_msg(sb, KERN_WARNING,
- "Error processing option \"%s\" [%d]",
- opt, err);
- return -1;
+ err = fscrypt_parse_test_dummy_encryption(param,
+ &ctx->dummy_enc_policy);
+ if (err == -EINVAL) {
+ ext4_msg(NULL, KERN_WARNING,
+ "Value of option \"%s\" is unrecognized", param->key);
+ } else if (err == -EEXIST) {
+ ext4_msg(NULL, KERN_WARNING,
+ "Conflicting test_dummy_encryption options");
+ return -EINVAL;
}
- ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled");
-#else
- ext4_msg(sb, KERN_WARNING,
- "Test dummy encryption mount option ignored");
-#endif
- return 1;
+ return err;
}
-struct ext4_parsed_options {
- unsigned long journal_devnum;
- unsigned int journal_ioprio;
- int mb_optimize_scan;
-};
+#define EXT4_SET_CTX(name) \
+static inline __maybe_unused \
+void ctx_set_##name(struct ext4_fs_context *ctx, unsigned long flag) \
+{ \
+ ctx->mask_s_##name |= flag; \
+ ctx->vals_s_##name |= flag; \
+}
+
+#define EXT4_CLEAR_CTX(name) \
+static inline __maybe_unused \
+void ctx_clear_##name(struct ext4_fs_context *ctx, unsigned long flag) \
+{ \
+ ctx->mask_s_##name |= flag; \
+ ctx->vals_s_##name &= ~flag; \
+}
+
+#define EXT4_TEST_CTX(name) \
+static inline unsigned long \
+ctx_test_##name(struct ext4_fs_context *ctx, unsigned long flag) \
+{ \
+ return (ctx->vals_s_##name & flag); \
+}
-static int handle_mount_opt(struct super_block *sb, char *opt, int token,
- substring_t *args, struct ext4_parsed_options *parsed_opts,
- int is_remount)
+EXT4_SET_CTX(flags); /* set only */
+EXT4_SET_CTX(mount_opt);
+EXT4_CLEAR_CTX(mount_opt);
+EXT4_TEST_CTX(mount_opt);
+EXT4_SET_CTX(mount_opt2);
+EXT4_CLEAR_CTX(mount_opt2);
+EXT4_TEST_CTX(mount_opt2);
+
+static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_fs_context *ctx = fc->fs_private;
+ struct fs_parse_result result;
const struct mount_opts *m;
- kuid_t uid;
- kgid_t gid;
- int arg = 0;
+ int is_remount;
+ int token;
-#ifdef CONFIG_QUOTA
- if (token == Opt_usrjquota)
- return set_qf_name(sb, USRQUOTA, &args[0]);
- else if (token == Opt_grpjquota)
- return set_qf_name(sb, GRPQUOTA, &args[0]);
- else if (token == Opt_offusrjquota)
- return clear_qf_name(sb, USRQUOTA);
- else if (token == Opt_offgrpjquota)
- return clear_qf_name(sb, GRPQUOTA);
-#endif
- switch (token) {
- case Opt_noacl:
- case Opt_nouser_xattr:
- ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
- break;
- case Opt_sb:
- return 1; /* handled by get_sb_block() */
- case Opt_removed:
- ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
- return 1;
- case Opt_abort:
- ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
- return 1;
- case Opt_i_version:
- sb->s_flags |= SB_I_VERSION;
- return 1;
- case Opt_lazytime:
- sb->s_flags |= SB_LAZYTIME;
- return 1;
- case Opt_nolazytime:
- sb->s_flags &= ~SB_LAZYTIME;
- return 1;
- case Opt_inlinecrypt:
-#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
- sb->s_flags |= SB_INLINECRYPT;
-#else
- ext4_msg(sb, KERN_ERR, "inline encryption not supported");
-#endif
- return 1;
- }
+ token = fs_parse(fc, ext4_param_specs, param, &result);
+ if (token < 0)
+ return token;
+ is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
for (m = ext4_mount_opts; m->token != Opt_err; m++)
if (token == m->token)
break;
- if (m->token == Opt_err) {
- ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
- "or missing value", opt);
- return -1;
- }
-
- if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
- ext4_msg(sb, KERN_ERR,
- "Mount option \"%s\" incompatible with ext2", opt);
- return -1;
- }
- if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
- ext4_msg(sb, KERN_ERR,
- "Mount option \"%s\" incompatible with ext3", opt);
- return -1;
- }
+ ctx->opt_flags |= m->flags;
- if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
- return -1;
- if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
- return -1;
if (m->flags & MOPT_EXPLICIT) {
if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
- set_opt2(sb, EXPLICIT_DELALLOC);
+ ctx_set_mount_opt2(ctx, EXT4_MOUNT2_EXPLICIT_DELALLOC);
} else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
- set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
+ ctx_set_mount_opt2(ctx,
+ EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM);
} else
- return -1;
- }
- if (m->flags & MOPT_CLEAR_ERR)
- clear_opt(sb, ERRORS_MASK);
- if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
- ext4_msg(sb, KERN_ERR, "Cannot change quota "
- "options when quota turned on");
- return -1;
+ return -EINVAL;
}
if (m->flags & MOPT_NOSUPPORT) {
- ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
- } else if (token == Opt_commit) {
- if (arg == 0)
- arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
- else if (arg > INT_MAX / HZ) {
- ext4_msg(sb, KERN_ERR,
+ ext4_msg(NULL, KERN_ERR, "%s option not supported",
+ param->key);
+ return 0;
+ }
+
+ switch (token) {
+#ifdef CONFIG_QUOTA
+ case Opt_usrjquota:
+ if (!*param->string)
+ return unnote_qf_name(fc, USRQUOTA);
+ else
+ return note_qf_name(fc, USRQUOTA, param);
+ case Opt_grpjquota:
+ if (!*param->string)
+ return unnote_qf_name(fc, GRPQUOTA);
+ else
+ return note_qf_name(fc, GRPQUOTA, param);
+#endif
+ case Opt_sb:
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ ext4_msg(NULL, KERN_WARNING,
+ "Ignoring %s option on remount", param->key);
+ } else {
+ ctx->s_sb_block = result.uint_32;
+ ctx->spec |= EXT4_SPEC_s_sb_block;
+ }
+ return 0;
+ case Opt_removed:
+ ext4_msg(NULL, KERN_WARNING, "Ignoring removed %s option",
+ param->key);
+ return 0;
+ case Opt_inlinecrypt:
+#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
+ ctx_set_flags(ctx, SB_INLINECRYPT);
+#else
+ ext4_msg(NULL, KERN_ERR, "inline encryption not supported");
+#endif
+ return 0;
+ case Opt_errors:
+ ctx_clear_mount_opt(ctx, EXT4_MOUNT_ERRORS_MASK);
+ ctx_set_mount_opt(ctx, result.uint_32);
+ return 0;
+#ifdef CONFIG_QUOTA
+ case Opt_jqfmt:
+ ctx->s_jquota_fmt = result.uint_32;
+ ctx->spec |= EXT4_SPEC_JQFMT;
+ return 0;
+#endif
+ case Opt_data:
+ ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS);
+ ctx_set_mount_opt(ctx, result.uint_32);
+ ctx->spec |= EXT4_SPEC_DATAJ;
+ return 0;
+ case Opt_commit:
+ if (result.uint_32 == 0)
+ result.uint_32 = JBD2_DEFAULT_MAX_COMMIT_AGE;
+ else if (result.uint_32 > INT_MAX / HZ) {
+ ext4_msg(NULL, KERN_ERR,
"Invalid commit interval %d, "
"must be smaller than %d",
- arg, INT_MAX / HZ);
- return -1;
+ result.uint_32, INT_MAX / HZ);
+ return -EINVAL;
}
- sbi->s_commit_interval = HZ * arg;
- } else if (token == Opt_debug_want_extra_isize) {
- if ((arg & 1) ||
- (arg < 4) ||
- (arg > (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE))) {
- ext4_msg(sb, KERN_ERR,
- "Invalid want_extra_isize %d", arg);
- return -1;
+ ctx->s_commit_interval = HZ * result.uint_32;
+ ctx->spec |= EXT4_SPEC_s_commit_interval;
+ return 0;
+ case Opt_debug_want_extra_isize:
+ if ((result.uint_32 & 1) || (result.uint_32 < 4)) {
+ ext4_msg(NULL, KERN_ERR,
+ "Invalid want_extra_isize %d", result.uint_32);
+ return -EINVAL;
}
- sbi->s_want_extra_isize = arg;
- } else if (token == Opt_max_batch_time) {
- sbi->s_max_batch_time = arg;
- } else if (token == Opt_min_batch_time) {
- sbi->s_min_batch_time = arg;
- } else if (token == Opt_inode_readahead_blks) {
- if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
- ext4_msg(sb, KERN_ERR,
+ ctx->s_want_extra_isize = result.uint_32;
+ ctx->spec |= EXT4_SPEC_s_want_extra_isize;
+ return 0;
+ case Opt_max_batch_time:
+ ctx->s_max_batch_time = result.uint_32;
+ ctx->spec |= EXT4_SPEC_s_max_batch_time;
+ return 0;
+ case Opt_min_batch_time:
+ ctx->s_min_batch_time = result.uint_32;
+ ctx->spec |= EXT4_SPEC_s_min_batch_time;
+ return 0;
+ case Opt_inode_readahead_blks:
+ if (result.uint_32 &&
+ (result.uint_32 > (1 << 30) ||
+ !is_power_of_2(result.uint_32))) {
+ ext4_msg(NULL, KERN_ERR,
"EXT4-fs: inode_readahead_blks must be "
"0 or a power of 2 smaller than 2^31");
- return -1;
+ return -EINVAL;
}
- sbi->s_inode_readahead_blks = arg;
- } else if (token == Opt_init_itable) {
- set_opt(sb, INIT_INODE_TABLE);
- if (!args->from)
- arg = EXT4_DEF_LI_WAIT_MULT;
- sbi->s_li_wait_mult = arg;
- } else if (token == Opt_max_dir_size_kb) {
- sbi->s_max_dir_size_kb = arg;
+ ctx->s_inode_readahead_blks = result.uint_32;
+ ctx->spec |= EXT4_SPEC_s_inode_readahead_blks;
+ return 0;
+ case Opt_init_itable:
+ ctx_set_mount_opt(ctx, EXT4_MOUNT_INIT_INODE_TABLE);
+ ctx->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
+ if (param->type == fs_value_is_string)
+ ctx->s_li_wait_mult = result.uint_32;
+ ctx->spec |= EXT4_SPEC_s_li_wait_mult;
+ return 0;
+ case Opt_max_dir_size_kb:
+ ctx->s_max_dir_size_kb = result.uint_32;
+ ctx->spec |= EXT4_SPEC_s_max_dir_size_kb;
+ return 0;
#ifdef CONFIG_EXT4_DEBUG
- } else if (token == Opt_fc_debug_max_replay) {
- sbi->s_fc_debug_max_replay = arg;
+ case Opt_fc_debug_max_replay:
+ ctx->s_fc_debug_max_replay = result.uint_32;
+ ctx->spec |= EXT4_SPEC_s_fc_debug_max_replay;
+ return 0;
#endif
- } else if (token == Opt_stripe) {
- sbi->s_stripe = arg;
- } else if (token == Opt_resuid) {
- uid = make_kuid(current_user_ns(), arg);
- if (!uid_valid(uid)) {
- ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
- return -1;
- }
- sbi->s_resuid = uid;
- } else if (token == Opt_resgid) {
- gid = make_kgid(current_user_ns(), arg);
- if (!gid_valid(gid)) {
- ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
- return -1;
- }
- sbi->s_resgid = gid;
- } else if (token == Opt_journal_dev) {
+ case Opt_stripe:
+ ctx->s_stripe = result.uint_32;
+ ctx->spec |= EXT4_SPEC_s_stripe;
+ return 0;
+ case Opt_resuid:
+ ctx->s_resuid = result.uid;
+ ctx->spec |= EXT4_SPEC_s_resuid;
+ return 0;
+ case Opt_resgid:
+ ctx->s_resgid = result.gid;
+ ctx->spec |= EXT4_SPEC_s_resgid;
+ return 0;
+ case Opt_journal_dev:
if (is_remount) {
- ext4_msg(sb, KERN_ERR,
+ ext4_msg(NULL, KERN_ERR,
"Cannot specify journal on remount");
- return -1;
+ return -EINVAL;
}
- parsed_opts->journal_devnum = arg;
- } else if (token == Opt_journal_path) {
- char *journal_path;
+ ctx->journal_devnum = result.uint_32;
+ ctx->spec |= EXT4_SPEC_JOURNAL_DEV;
+ return 0;
+ case Opt_journal_path:
+ {
struct inode *journal_inode;
struct path path;
int error;
if (is_remount) {
- ext4_msg(sb, KERN_ERR,
+ ext4_msg(NULL, KERN_ERR,
"Cannot specify journal on remount");
- return -1;
- }
- journal_path = match_strdup(&args[0]);
- if (!journal_path) {
- ext4_msg(sb, KERN_ERR, "error: could not dup "
- "journal device string");
- return -1;
+ return -EINVAL;
}
- error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
+ error = fs_lookup_param(fc, param, 1, LOOKUP_FOLLOW, &path);
if (error) {
- ext4_msg(sb, KERN_ERR, "error: could not find "
- "journal device path: error %d", error);
- kfree(journal_path);
- return -1;
+ ext4_msg(NULL, KERN_ERR, "error: could not find "
+ "journal device path");
+ return -EINVAL;
}
journal_inode = d_inode(path.dentry);
- if (!S_ISBLK(journal_inode->i_mode)) {
- ext4_msg(sb, KERN_ERR, "error: journal path %s "
- "is not a block device", journal_path);
- path_put(&path);
- kfree(journal_path);
- return -1;
- }
-
- parsed_opts->journal_devnum = new_encode_dev(journal_inode->i_rdev);
+ ctx->journal_devnum = new_encode_dev(journal_inode->i_rdev);
+ ctx->spec |= EXT4_SPEC_JOURNAL_DEV;
path_put(&path);
- kfree(journal_path);
- } else if (token == Opt_journal_ioprio) {
- if (arg > 7) {
- ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
+ return 0;
+ }
+ case Opt_journal_ioprio:
+ if (result.uint_32 > 7) {
+ ext4_msg(NULL, KERN_ERR, "Invalid journal IO priority"
" (must be 0-7)");
- return -1;
- }
- parsed_opts->journal_ioprio =
- IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
- } else if (token == Opt_test_dummy_encryption) {
- return ext4_set_test_dummy_encryption(sb, opt, &args[0],
- is_remount);
- } else if (m->flags & MOPT_DATAJ) {
- if (is_remount) {
- if (!sbi->s_journal)
- ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
- else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
- ext4_msg(sb, KERN_ERR,
- "Cannot change data mode on remount");
- return -1;
- }
- } else {
- clear_opt(sb, DATA_FLAGS);
- sbi->s_mount_opt |= m->mount_opt;
- }
-#ifdef CONFIG_QUOTA
- } else if (m->flags & MOPT_QFMT) {
- if (sb_any_quota_loaded(sb) &&
- sbi->s_jquota_fmt != m->mount_opt) {
- ext4_msg(sb, KERN_ERR, "Cannot change journaled "
- "quota options when quota turned on");
- return -1;
- }
- if (ext4_has_feature_quota(sb)) {
- ext4_msg(sb, KERN_INFO,
- "Quota format mount options ignored "
- "when QUOTA feature is enabled");
- return 1;
+ return -EINVAL;
}
- sbi->s_jquota_fmt = m->mount_opt;
-#endif
- } else if (token == Opt_dax || token == Opt_dax_always ||
- token == Opt_dax_inode || token == Opt_dax_never) {
+ ctx->journal_ioprio =
+ IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, result.uint_32);
+ ctx->spec |= EXT4_SPEC_JOURNAL_IOPRIO;
+ return 0;
+ case Opt_test_dummy_encryption:
+ return ext4_parse_test_dummy_encryption(param, ctx);
+ case Opt_dax:
+ case Opt_dax_type:
#ifdef CONFIG_FS_DAX
- switch (token) {
+ {
+ int type = (token == Opt_dax) ?
+ Opt_dax : result.uint_32;
+
+ switch (type) {
case Opt_dax:
case Opt_dax_always:
- if (is_remount &&
- (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
- (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) {
- fail_dax_change_remount:
- ext4_msg(sb, KERN_ERR, "can't change "
- "dax mount option while remounting");
- return -1;
- }
- if (is_remount &&
- (test_opt(sb, DATA_FLAGS) ==
- EXT4_MOUNT_JOURNAL_DATA)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "both data=journal and dax");
- return -1;
- }
- ext4_msg(sb, KERN_WARNING,
- "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
- sbi->s_mount_opt |= EXT4_MOUNT_DAX_ALWAYS;
- sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER;
+ ctx_set_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
+ ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
break;
case Opt_dax_never:
- if (is_remount &&
- (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
- (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS)))
- goto fail_dax_change_remount;
- sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER;
- sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
+ ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
+ ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
break;
case Opt_dax_inode:
- if (is_remount &&
- ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
- (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
- !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE)))
- goto fail_dax_change_remount;
- sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
- sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER;
+ ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
+ ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
/* Strictly for printing options */
- sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_INODE;
+ ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE);
break;
}
+ return 0;
+ }
#else
- ext4_msg(sb, KERN_INFO, "dax option not supported");
- sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER;
- sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
- return -1;
+ ext4_msg(NULL, KERN_INFO, "dax option not supported");
+ return -EINVAL;
#endif
- } else if (token == Opt_data_err_abort) {
- sbi->s_mount_opt |= m->mount_opt;
- } else if (token == Opt_data_err_ignore) {
- sbi->s_mount_opt &= ~m->mount_opt;
- } else if (token == Opt_mb_optimize_scan) {
- if (arg != 0 && arg != 1) {
- ext4_msg(sb, KERN_WARNING,
+ case Opt_data_err:
+ if (result.uint_32 == Opt_data_err_abort)
+ ctx_set_mount_opt(ctx, m->mount_opt);
+ else if (result.uint_32 == Opt_data_err_ignore)
+ ctx_clear_mount_opt(ctx, m->mount_opt);
+ return 0;
+ case Opt_mb_optimize_scan:
+ if (result.int_32 == 1) {
+ ctx_set_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN);
+ ctx->spec |= EXT4_SPEC_mb_optimize_scan;
+ } else if (result.int_32 == 0) {
+ ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN);
+ ctx->spec |= EXT4_SPEC_mb_optimize_scan;
+ } else {
+ ext4_msg(NULL, KERN_WARNING,
"mb_optimize_scan should be set to 0 or 1.");
- return -1;
+ return -EINVAL;
}
- parsed_opts->mb_optimize_scan = arg;
- } else {
- if (!args->from)
- arg = 1;
+ return 0;
+ }
+
+ /*
+ * At this point we should only be getting options requiring MOPT_SET,
+ * or MOPT_CLEAR. Anything else is a bug
+ */
+ if (m->token == Opt_err) {
+ ext4_msg(NULL, KERN_WARNING, "buggy handling of option %s",
+ param->key);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ else {
+ unsigned int set = 0;
+
+ if ((param->type == fs_value_is_flag) ||
+ result.uint_32 > 0)
+ set = 1;
+
if (m->flags & MOPT_CLEAR)
- arg = !arg;
+ set = !set;
else if (unlikely(!(m->flags & MOPT_SET))) {
- ext4_msg(sb, KERN_WARNING,
- "buggy handling of option %s", opt);
+ ext4_msg(NULL, KERN_WARNING,
+ "buggy handling of option %s",
+ param->key);
WARN_ON(1);
- return -1;
+ return -EINVAL;
}
if (m->flags & MOPT_2) {
- if (arg != 0)
- sbi->s_mount_opt2 |= m->mount_opt;
+ if (set != 0)
+ ctx_set_mount_opt2(ctx, m->mount_opt);
else
- sbi->s_mount_opt2 &= ~m->mount_opt;
+ ctx_clear_mount_opt2(ctx, m->mount_opt);
} else {
- if (arg != 0)
- sbi->s_mount_opt |= m->mount_opt;
+ if (set != 0)
+ ctx_set_mount_opt(ctx, m->mount_opt);
else
- sbi->s_mount_opt &= ~m->mount_opt;
+ ctx_clear_mount_opt(ctx, m->mount_opt);
}
}
- return 1;
+
+ return 0;
}
-static int parse_options(char *options, struct super_block *sb,
- struct ext4_parsed_options *ret_opts,
- int is_remount)
+static int parse_options(struct fs_context *fc, char *options)
{
- struct ext4_sb_info __maybe_unused *sbi = EXT4_SB(sb);
- char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
- substring_t args[MAX_OPT_ARGS];
- int token;
+ struct fs_parameter param;
+ int ret;
+ char *key;
if (!options)
- return 1;
+ return 0;
- while ((p = strsep(&options, ",")) != NULL) {
- if (!*p)
- continue;
- /*
- * Initialize args struct so we know whether arg was
- * found; some options take optional arguments.
- */
- args[0].to = args[0].from = NULL;
- token = match_token(p, tokens, args);
- if (handle_mount_opt(sb, p, token, args, ret_opts,
- is_remount) < 0)
- return 0;
+ while ((key = strsep(&options, ",")) != NULL) {
+ if (*key) {
+ size_t v_len = 0;
+ char *value = strchr(key, '=');
+
+ param.type = fs_value_is_flag;
+ param.string = NULL;
+
+ if (value) {
+ if (value == key)
+ continue;
+
+ *value++ = 0;
+ v_len = strlen(value);
+ param.string = kmemdup_nul(value, v_len,
+ GFP_KERNEL);
+ if (!param.string)
+ return -ENOMEM;
+ param.type = fs_value_is_string;
+ }
+
+ param.key = key;
+ param.size = v_len;
+
+ ret = ext4_parse_param(fc, &param);
+ kfree(param.string);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ ret = ext4_validate_options(fc);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int parse_apply_sb_mount_options(struct super_block *sb,
+ struct ext4_fs_context *m_ctx)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ char s_mount_opts[64];
+ struct ext4_fs_context *s_ctx = NULL;
+ struct fs_context *fc = NULL;
+ int ret = -ENOMEM;
+
+ if (!sbi->s_es->s_mount_opts[0])
+ return 0;
+
+ if (strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts) < 0)
+ return -E2BIG;
+
+ fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL);
+ if (!fc)
+ return -ENOMEM;
+
+ s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL);
+ if (!s_ctx)
+ goto out_free;
+
+ fc->fs_private = s_ctx;
+ fc->s_fs_info = sbi;
+
+ ret = parse_options(fc, s_mount_opts);
+ if (ret < 0)
+ goto parse_failed;
+
+ ret = ext4_check_opt_consistency(fc, sb);
+ if (ret < 0) {
+parse_failed:
+ ext4_msg(sb, KERN_WARNING,
+ "failed to parse options in superblock: %s",
+ s_mount_opts);
+ ret = 0;
+ goto out_free;
}
+
+ if (s_ctx->spec & EXT4_SPEC_JOURNAL_DEV)
+ m_ctx->journal_devnum = s_ctx->journal_devnum;
+ if (s_ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)
+ m_ctx->journal_ioprio = s_ctx->journal_ioprio;
+
+ ext4_apply_options(fc, sb);
+ ret = 0;
+
+out_free:
+ ext4_fc_free(fc);
+ kfree(fc);
+ return ret;
+}
+
+static void ext4_apply_quota_options(struct fs_context *fc,
+ struct super_block *sb)
+{
#ifdef CONFIG_QUOTA
+ bool quota_feature = ext4_has_feature_quota(sb);
+ struct ext4_fs_context *ctx = fc->fs_private;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ char *qname;
+ int i;
+
+ if (quota_feature)
+ return;
+
+ if (ctx->spec & EXT4_SPEC_JQUOTA) {
+ for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+ if (!(ctx->qname_spec & (1 << i)))
+ continue;
+
+ qname = ctx->s_qf_names[i]; /* May be NULL */
+ if (qname)
+ set_opt(sb, QUOTA);
+ ctx->s_qf_names[i] = NULL;
+ qname = rcu_replace_pointer(sbi->s_qf_names[i], qname,
+ lockdep_is_held(&sb->s_umount));
+ if (qname)
+ kfree_rcu_mightsleep(qname);
+ }
+ }
+
+ if (ctx->spec & EXT4_SPEC_JQFMT)
+ sbi->s_jquota_fmt = ctx->s_jquota_fmt;
+#endif
+}
+
+/*
+ * Check quota settings consistency.
+ */
+static int ext4_check_quota_consistency(struct fs_context *fc,
+ struct super_block *sb)
+{
+#ifdef CONFIG_QUOTA
+ struct ext4_fs_context *ctx = fc->fs_private;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ bool quota_feature = ext4_has_feature_quota(sb);
+ bool quota_loaded = sb_any_quota_loaded(sb);
+ bool usr_qf_name, grp_qf_name, usrquota, grpquota;
+ int quota_flags, i;
+
/*
* We do the test below only for project quotas. 'usrquota' and
* 'grpquota' mount options are allowed even without quota feature
* to support legacy quotas in quota files.
*/
- if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
- ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
+ if (ctx_test_mount_opt(ctx, EXT4_MOUNT_PRJQUOTA) &&
+ !ext4_has_feature_project(sb)) {
+ ext4_msg(NULL, KERN_ERR, "Project quota feature not enabled. "
"Cannot enable project quota enforcement.");
- return 0;
+ return -EINVAL;
}
- usr_qf_name = get_qf_name(sb, sbi, USRQUOTA);
- grp_qf_name = get_qf_name(sb, sbi, GRPQUOTA);
- if (usr_qf_name || grp_qf_name) {
- if (test_opt(sb, USRQUOTA) && usr_qf_name)
- clear_opt(sb, USRQUOTA);
- if (test_opt(sb, GRPQUOTA) && grp_qf_name)
- clear_opt(sb, GRPQUOTA);
+ quota_flags = EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
+ EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA;
+ if (quota_loaded &&
+ ctx->mask_s_mount_opt & quota_flags &&
+ !ctx_test_mount_opt(ctx, quota_flags))
+ goto err_quota_change;
+
+ if (ctx->spec & EXT4_SPEC_JQUOTA) {
- if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
- ext4_msg(sb, KERN_ERR, "old and new quota "
- "format mixing");
+ for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+ if (!(ctx->qname_spec & (1 << i)))
+ continue;
+
+ if (quota_loaded &&
+ !!sbi->s_qf_names[i] != !!ctx->s_qf_names[i])
+ goto err_jquota_change;
+
+ if (sbi->s_qf_names[i] && ctx->s_qf_names[i] &&
+ strcmp(get_qf_name(sb, sbi, i),
+ ctx->s_qf_names[i]) != 0)
+ goto err_jquota_specified;
+ }
+
+ if (quota_feature) {
+ ext4_msg(NULL, KERN_INFO,
+ "Journaled quota options ignored when "
+ "QUOTA feature is enabled");
return 0;
}
+ }
- if (!sbi->s_jquota_fmt) {
- ext4_msg(sb, KERN_ERR, "journaled quota format "
- "not specified");
+ if (ctx->spec & EXT4_SPEC_JQFMT) {
+ if (sbi->s_jquota_fmt != ctx->s_jquota_fmt && quota_loaded)
+ goto err_jquota_change;
+ if (quota_feature) {
+ ext4_msg(NULL, KERN_INFO, "Quota format mount options "
+ "ignored when QUOTA feature is enabled");
+ return 0;
+ }
+ }
+
+ /* Make sure we don't mix old and new quota format */
+ usr_qf_name = (get_qf_name(sb, sbi, USRQUOTA) ||
+ ctx->s_qf_names[USRQUOTA]);
+ grp_qf_name = (get_qf_name(sb, sbi, GRPQUOTA) ||
+ ctx->s_qf_names[GRPQUOTA]);
+
+ usrquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) ||
+ test_opt(sb, USRQUOTA));
+
+ grpquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) ||
+ test_opt(sb, GRPQUOTA));
+
+ if (usr_qf_name) {
+ ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA);
+ usrquota = false;
+ }
+ if (grp_qf_name) {
+ ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA);
+ grpquota = false;
+ }
+
+ if (usr_qf_name || grp_qf_name) {
+ if (usrquota || grpquota) {
+ ext4_msg(NULL, KERN_ERR, "old and new quota "
+ "format mixing");
+ return -EINVAL;
+ }
+
+ if (!(ctx->spec & EXT4_SPEC_JQFMT || sbi->s_jquota_fmt)) {
+ ext4_msg(NULL, KERN_ERR, "journaled quota format "
+ "not specified");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+
+err_quota_change:
+ ext4_msg(NULL, KERN_ERR,
+ "Cannot change quota options when quota turned on");
+ return -EINVAL;
+err_jquota_change:
+ ext4_msg(NULL, KERN_ERR, "Cannot change journaled quota "
+ "options when quota turned on");
+ return -EINVAL;
+err_jquota_specified:
+ ext4_msg(NULL, KERN_ERR, "%s quota file already specified",
+ QTYPE2NAME(i));
+ return -EINVAL;
+#else
+ return 0;
+#endif
+}
+
+static int ext4_check_test_dummy_encryption(const struct fs_context *fc,
+ struct super_block *sb)
+{
+ const struct ext4_fs_context *ctx = fc->fs_private;
+ const struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy))
+ return 0;
+
+ if (!ext4_has_feature_encrypt(sb)) {
+ ext4_msg(NULL, KERN_WARNING,
+ "test_dummy_encryption requires encrypt feature");
+ return -EINVAL;
+ }
+ /*
+ * This mount option is just for testing, and it's not worthwhile to
+ * implement the extra complexity (e.g. RCU protection) that would be
+ * needed to allow it to be set or changed during remount. We do allow
+ * it to be specified during remount, but only if there is no change.
+ */
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy,
+ &ctx->dummy_enc_policy))
+ return 0;
+ ext4_msg(NULL, KERN_WARNING,
+ "Can't set or change test_dummy_encryption on remount");
+ return -EINVAL;
+ }
+ /* Also make sure s_mount_opts didn't contain a conflicting value. */
+ if (fscrypt_is_dummy_policy_set(&sbi->s_dummy_enc_policy)) {
+ if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy,
+ &ctx->dummy_enc_policy))
return 0;
+ ext4_msg(NULL, KERN_WARNING,
+ "Conflicting test_dummy_encryption options");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void ext4_apply_test_dummy_encryption(struct ext4_fs_context *ctx,
+ struct super_block *sb)
+{
+ if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy) ||
+ /* if already set, it was already verified to be the same */
+ fscrypt_is_dummy_policy_set(&EXT4_SB(sb)->s_dummy_enc_policy))
+ return;
+ EXT4_SB(sb)->s_dummy_enc_policy = ctx->dummy_enc_policy;
+ memset(&ctx->dummy_enc_policy, 0, sizeof(ctx->dummy_enc_policy));
+ ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled");
+}
+
+static int ext4_check_opt_consistency(struct fs_context *fc,
+ struct super_block *sb)
+{
+ struct ext4_fs_context *ctx = fc->fs_private;
+ struct ext4_sb_info *sbi = fc->s_fs_info;
+ int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
+ int err;
+
+ if ((ctx->opt_flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
+ ext4_msg(NULL, KERN_ERR,
+ "Mount option(s) incompatible with ext2");
+ return -EINVAL;
+ }
+ if ((ctx->opt_flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
+ ext4_msg(NULL, KERN_ERR,
+ "Mount option(s) incompatible with ext3");
+ return -EINVAL;
+ }
+
+ if (ctx->s_want_extra_isize >
+ (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE)) {
+ ext4_msg(NULL, KERN_ERR,
+ "Invalid want_extra_isize %d",
+ ctx->s_want_extra_isize);
+ return -EINVAL;
+ }
+
+ err = ext4_check_test_dummy_encryption(fc, sb);
+ if (err)
+ return err;
+
+ if ((ctx->spec & EXT4_SPEC_DATAJ) && is_remount) {
+ if (!sbi->s_journal) {
+ ext4_msg(NULL, KERN_WARNING,
+ "Remounting file system with no journal "
+ "so ignoring journalled data option");
+ ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS);
+ } else if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS) !=
+ test_opt(sb, DATA_FLAGS)) {
+ ext4_msg(NULL, KERN_ERR, "Cannot change data mode "
+ "on remount");
+ return -EINVAL;
+ }
+ }
+
+ if (is_remount) {
+ if (!sbi->s_journal &&
+ ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT)) {
+ ext4_msg(NULL, KERN_WARNING,
+ "Remounting fs w/o journal so ignoring data_err option");
+ ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT);
+ }
+
+ if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) &&
+ (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
+ ext4_msg(NULL, KERN_ERR, "can't mount with "
+ "both data=journal and dax");
+ return -EINVAL;
+ }
+
+ if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) &&
+ (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
+ (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) {
+fail_dax_change_remount:
+ ext4_msg(NULL, KERN_ERR, "can't change "
+ "dax mount option while remounting");
+ return -EINVAL;
+ } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER) &&
+ (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
+ (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS))) {
+ goto fail_dax_change_remount;
+ } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE) &&
+ ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
+ (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
+ !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE))) {
+ goto fail_dax_change_remount;
}
}
+
+ return ext4_check_quota_consistency(fc, sb);
+}
+
+static void ext4_apply_options(struct fs_context *fc, struct super_block *sb)
+{
+ struct ext4_fs_context *ctx = fc->fs_private;
+ struct ext4_sb_info *sbi = fc->s_fs_info;
+
+ sbi->s_mount_opt &= ~ctx->mask_s_mount_opt;
+ sbi->s_mount_opt |= ctx->vals_s_mount_opt;
+ sbi->s_mount_opt2 &= ~ctx->mask_s_mount_opt2;
+ sbi->s_mount_opt2 |= ctx->vals_s_mount_opt2;
+ sb->s_flags &= ~ctx->mask_s_flags;
+ sb->s_flags |= ctx->vals_s_flags;
+
+#define APPLY(X) ({ if (ctx->spec & EXT4_SPEC_##X) sbi->X = ctx->X; })
+ APPLY(s_commit_interval);
+ APPLY(s_stripe);
+ APPLY(s_max_batch_time);
+ APPLY(s_min_batch_time);
+ APPLY(s_want_extra_isize);
+ APPLY(s_inode_readahead_blks);
+ APPLY(s_max_dir_size_kb);
+ APPLY(s_li_wait_mult);
+ APPLY(s_resgid);
+ APPLY(s_resuid);
+
+#ifdef CONFIG_EXT4_DEBUG
+ APPLY(s_fc_debug_max_replay);
#endif
- if (test_opt(sb, DIOREAD_NOLOCK)) {
- int blocksize =
- BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
- if (blocksize < PAGE_SIZE)
- ext4_msg(sb, KERN_WARNING, "Warning: mounting with an "
- "experimental mount option 'dioread_nolock' "
- "for blocksize < PAGE_SIZE");
+
+ ext4_apply_quota_options(fc, sb);
+ ext4_apply_test_dummy_encryption(ctx, sb);
+}
+
+
+static int ext4_validate_options(struct fs_context *fc)
+{
+#ifdef CONFIG_QUOTA
+ struct ext4_fs_context *ctx = fc->fs_private;
+ char *usr_qf_name, *grp_qf_name;
+
+ usr_qf_name = ctx->s_qf_names[USRQUOTA];
+ grp_qf_name = ctx->s_qf_names[GRPQUOTA];
+
+ if (usr_qf_name || grp_qf_name) {
+ if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) && usr_qf_name)
+ ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA);
+
+ if (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) && grp_qf_name)
+ ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA);
+
+ if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) ||
+ ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA)) {
+ ext4_msg(NULL, KERN_ERR, "old and new quota "
+ "format mixing");
+ return -EINVAL;
+ }
}
+#endif
return 1;
}
@@ -2533,12 +2913,12 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
static const char *token2str(int token)
{
- const struct match_token *t;
+ const struct fs_parameter_spec *spec;
- for (t = tokens; t->token != Opt_err; t++)
- if (t->token == token && !strchr(t->pattern, '='))
+ for (spec = ext4_param_specs; spec->name != NULL; spec++)
+ if (spec->opt == token && !spec->type)
break;
- return t->pattern;
+ return spec->name;
}
/*
@@ -2551,7 +2931,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
- int def_errors, def_mount_opt = sbi->s_def_mount_opt;
+ int def_errors;
const struct mount_opts *m;
char sep = nodefs ? '\n' : ',';
@@ -2563,24 +2943,37 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
for (m = ext4_mount_opts; m->token != Opt_err; m++) {
int want_set = m->flags & MOPT_SET;
+ int opt_2 = m->flags & MOPT_2;
+ unsigned int mount_opt, def_mount_opt;
+
if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
- (m->flags & MOPT_CLEAR_ERR) || m->flags & MOPT_SKIP)
+ m->flags & MOPT_SKIP)
+ continue;
+
+ if (opt_2) {
+ mount_opt = sbi->s_mount_opt2;
+ def_mount_opt = sbi->s_def_mount_opt2;
+ } else {
+ mount_opt = sbi->s_mount_opt;
+ def_mount_opt = sbi->s_def_mount_opt;
+ }
+ /* skip if same as the default */
+ if (!nodefs && !(m->mount_opt & (mount_opt ^ def_mount_opt)))
continue;
- if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
- continue; /* skip if same as the default */
+ /* select Opt_noFoo vs Opt_Foo */
if ((want_set &&
- (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
- (!want_set && (sbi->s_mount_opt & m->mount_opt)))
- continue; /* select Opt_noFoo vs Opt_Foo */
+ (mount_opt & m->mount_opt) != m->mount_opt) ||
+ (!want_set && (mount_opt & m->mount_opt)))
+ continue;
SEQ_OPTS_PRINT("%s", token2str(m->token));
}
if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
- le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
+ ext4_get_resuid(es) != EXT4_DEF_RESUID)
SEQ_OPTS_PRINT("resuid=%u",
from_kuid_munged(&init_user_ns, sbi->s_resuid));
if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
- le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
+ ext4_get_resgid(es) != EXT4_DEF_RESGID)
SEQ_OPTS_PRINT("resgid=%u",
from_kgid_munged(&init_user_ns, sbi->s_resgid));
def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
@@ -2596,12 +2989,12 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
- if (sb->s_flags & SB_I_VERSION)
+ if (nodefs && sb->s_flags & SB_I_VERSION)
SEQ_OPTS_PUTS("i_version");
if (nodefs || sbi->s_stripe)
SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
if (nodefs || EXT4_MOUNT_DATA_FLAGS &
- (sbi->s_mount_opt ^ def_mount_opt)) {
+ (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
SEQ_OPTS_PUTS("data=journal");
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
@@ -2637,6 +3030,24 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
} else if (test_opt2(sb, DAX_INODE)) {
SEQ_OPTS_PUTS("dax=inode");
}
+
+ if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD &&
+ !test_opt2(sb, MB_OPTIMIZE_SCAN)) {
+ SEQ_OPTS_PUTS("mb_optimize_scan=0");
+ } else if (sbi->s_groups_count < MB_DEFAULT_LINEAR_SCAN_THRESHOLD &&
+ test_opt2(sb, MB_OPTIMIZE_SCAN)) {
+ SEQ_OPTS_PUTS("mb_optimize_scan=1");
+ }
+
+ if (nodefs && !test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS))
+ SEQ_OPTS_PUTS("prefetch_block_bitmaps");
+
+ if (ext4_emergency_ro(sb))
+ SEQ_OPTS_PUTS("emergency_ro");
+
+ if (ext4_forced_shutdown(sb))
+ SEQ_OPTS_PUTS("shutdown");
+
ext4_show_quota_options(seq, sb);
return 0;
}
@@ -2653,7 +3064,7 @@ int ext4_seq_options_show(struct seq_file *seq, void *offset)
seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
rc = _ext4_show_options(seq, sb, 1);
- seq_puts(seq, "\n");
+ seq_putc(seq, '\n');
return rc;
}
@@ -2712,8 +3123,6 @@ done:
EXT4_BLOCKS_PER_GROUP(sb),
EXT4_INODES_PER_GROUP(sb),
sbi->s_mount_opt, sbi->s_mount_opt2);
-
- cleancache_init_fs(sb);
return err;
}
@@ -2806,19 +3215,19 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
__le32 le_group = cpu_to_le32(block_group);
struct ext4_sb_info *sbi = EXT4_SB(sb);
- if (ext4_has_metadata_csum(sbi->s_sb)) {
+ if (ext4_has_feature_metadata_csum(sbi->s_sb)) {
/* Use new metadata_csum algorithm */
__u32 csum32;
__u16 dummy_csum = 0;
- csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
+ csum32 = ext4_chksum(sbi->s_csum_seed, (__u8 *)&le_group,
sizeof(le_group));
- csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
- csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
+ csum32 = ext4_chksum(csum32, (__u8 *)gdp, offset);
+ csum32 = ext4_chksum(csum32, (__u8 *)&dummy_csum,
sizeof(dummy_csum));
offset += sizeof(dummy_csum);
if (offset < sbi->s_desc_size)
- csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
+ csum32 = ext4_chksum(csum32, (__u8 *)gdp + offset,
sbi->s_desc_size - offset);
crc = csum32 & 0xFFFF;
@@ -2834,11 +3243,9 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
crc = crc16(crc, (__u8 *)gdp, offset);
offset += sizeof(gdp->bg_checksum); /* skip checksum */
/* for checksum of struct ext4_group_desc do the rest...*/
- if (ext4_has_feature_64bit(sb) &&
- offset < le16_to_cpu(sbi->s_es->s_desc_size))
+ if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size)
crc = crc16(crc, (__u8 *)gdp + offset,
- le16_to_cpu(sbi->s_es->s_desc_size) -
- offset);
+ sbi->s_desc_size - offset);
out:
return cpu_to_le16(crc);
@@ -3034,8 +3441,9 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
*/
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
{
- unsigned long long upper_limit, res = EXT4_NDIR_BLOCKS;
+ loff_t upper_limit, res = EXT4_NDIR_BLOCKS;
int meta_blocks;
+ unsigned int ppb = 1 << (bits - 2);
/*
* This is calculated to be the largest file size for a dense, block
@@ -3067,27 +3475,42 @@ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
}
+ /* Compute how many blocks we can address by block tree */
+ res += ppb;
+ res += ppb * ppb;
+ res += ((loff_t)ppb) * ppb * ppb;
+ /* Compute how many metadata blocks are needed */
+ meta_blocks = 1;
+ meta_blocks += 1 + ppb;
+ meta_blocks += 1 + ppb + ppb * ppb;
+ /* Does block tree limit file size? */
+ if (res + meta_blocks <= upper_limit)
+ goto check_lfs;
+
+ res = upper_limit;
+ /* How many metadata blocks are needed for addressing upper_limit? */
+ upper_limit -= EXT4_NDIR_BLOCKS;
/* indirect blocks */
meta_blocks = 1;
+ upper_limit -= ppb;
/* double indirect blocks */
- meta_blocks += 1 + (1LL << (bits-2));
- /* tripple indirect blocks */
- meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
-
- upper_limit -= meta_blocks;
- upper_limit <<= bits;
-
- res += 1LL << (bits-2);
- res += 1LL << (2*(bits-2));
- res += 1LL << (3*(bits-2));
+ if (upper_limit < ppb * ppb) {
+ meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb);
+ res -= meta_blocks;
+ goto check_lfs;
+ }
+ meta_blocks += 1 + ppb;
+ upper_limit -= ppb * ppb;
+ /* tripple indirect blocks for the rest */
+ meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb) +
+ DIV_ROUND_UP_ULL(upper_limit, ppb*ppb);
+ res -= meta_blocks;
+check_lfs:
res <<= bits;
- if (res > upper_limit)
- res = upper_limit;
-
if (res > MAX_LFS_FILESIZE)
res = MAX_LFS_FILESIZE;
- return (loff_t)res;
+ return res;
}
static ext4_fsblk_t descriptor_loc(struct super_block *sb,
@@ -3172,14 +3595,12 @@ int ext4_feature_set_ok(struct super_block *sb, int readonly)
return 0;
}
-#ifndef CONFIG_UNICODE
- if (ext4_has_feature_casefold(sb)) {
+ if (!IS_ENABLED(CONFIG_UNICODE) && ext4_has_feature_casefold(sb)) {
ext4_msg(sb, KERN_ERR,
"Filesystem with casefold feature cannot be "
"mounted without CONFIG_UNICODE");
return 0;
}
-#endif
if (readonly)
return 1;
@@ -3222,7 +3643,7 @@ int ext4_feature_set_ok(struct super_block *sb, int readonly)
*/
static void print_daily_error_info(struct timer_list *t)
{
- struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
+ struct ext4_sb_info *sbi = timer_container_of(sbi, t, s_err_report);
struct super_block *sb = sbi->s_sb;
struct ext4_super_block *es = sbi->s_es;
@@ -3272,20 +3693,18 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
ext4_group_t group = elr->lr_next_group;
unsigned int prefetch_ios = 0;
int ret = 0;
+ int nr = EXT4_SB(sb)->s_mb_prefetch;
u64 start_time;
if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
- elr->lr_next_group = ext4_mb_prefetch(sb, group,
- EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios);
- if (prefetch_ios)
- ext4_mb_prefetch_fini(sb, elr->lr_next_group,
- prefetch_ios);
- trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group,
- prefetch_ios);
+ elr->lr_next_group = ext4_mb_prefetch(sb, group, nr, &prefetch_ios);
+ ext4_mb_prefetch_fini(sb, elr->lr_next_group, nr);
+ trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, nr);
if (group >= elr->lr_next_group) {
ret = 1;
if (elr->lr_first_not_zeroed != ngroups &&
- !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) {
+ !ext4_emergency_state(sb) && !sb_rdonly(sb) &&
+ test_opt(sb, INIT_INODE_TABLE)) {
elr->lr_next_group = elr->lr_first_not_zeroed;
elr->lr_mode = EXT4_LI_MODE_ITABLE;
ret = 0;
@@ -3309,12 +3728,12 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
ret = 1;
if (!ret) {
- start_time = ktime_get_real_ns();
+ start_time = ktime_get_ns();
ret = ext4_init_inode_table(sb, group,
elr->lr_timeout ? 0 : 1);
trace_ext4_lazy_itable_init(sb, group);
if (elr->lr_timeout == 0) {
- elr->lr_timeout = nsecs_to_jiffies((ktime_get_real_ns() - start_time) *
+ elr->lr_timeout = nsecs_to_jiffies((ktime_get_ns() - start_time) *
EXT4_SB(elr->lr_super)->s_li_wait_mult);
}
elr->lr_next_sched = jiffies + elr->lr_timeout;
@@ -3364,17 +3783,19 @@ static struct task_struct *ext4_lazyinit_task;
*/
static int ext4_lazyinit_thread(void *arg)
{
- struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
+ struct ext4_lazy_init *eli = arg;
struct list_head *pos, *n;
struct ext4_li_request *elr;
unsigned long next_wakeup, cur;
BUG_ON(NULL == eli);
+ set_freezable();
cont_thread:
while (true) {
- next_wakeup = MAX_JIFFY_OFFSET;
+ bool next_wakeup_initialized = false;
+ next_wakeup = 0;
mutex_lock(&eli->li_list_mtx);
if (list_empty(&eli->li_request_list)) {
mutex_unlock(&eli->li_list_mtx);
@@ -3387,8 +3808,11 @@ cont_thread:
lr_request);
if (time_before(jiffies, elr->lr_next_sched)) {
- if (time_before(elr->lr_next_sched, next_wakeup))
+ if (!next_wakeup_initialized ||
+ time_before(elr->lr_next_sched, next_wakeup)) {
next_wakeup = elr->lr_next_sched;
+ next_wakeup_initialized = true;
+ }
continue;
}
if (down_read_trylock(&elr->lr_super->s_umount)) {
@@ -3414,19 +3838,20 @@ cont_thread:
}
if (!progress) {
elr->lr_next_sched = jiffies +
- (prandom_u32()
- % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
+ get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ);
}
- if (time_before(elr->lr_next_sched, next_wakeup))
+ if (!next_wakeup_initialized ||
+ time_before(elr->lr_next_sched, next_wakeup)) {
next_wakeup = elr->lr_next_sched;
+ next_wakeup_initialized = true;
+ }
}
mutex_unlock(&eli->li_list_mtx);
try_to_freeze();
cur = jiffies;
- if ((time_after_eq(cur, next_wakeup)) ||
- (MAX_JIFFY_OFFSET == next_wakeup)) {
+ if (!next_wakeup_initialized || time_after_eq(cur, next_wakeup)) {
cond_resched();
continue;
}
@@ -3562,8 +3987,7 @@ static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
* spread the inode table initialization requests
* better.
*/
- elr->lr_next_sched = jiffies + (prandom_u32() %
- (EXT4_DEF_LI_MAX_START_DELAY * HZ));
+ elr->lr_next_sched = jiffies + get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ);
return elr;
}
@@ -3585,9 +4009,9 @@ int ext4_register_li_request(struct super_block *sb,
goto out;
}
- if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) &&
- (first_not_zeroed == ngroups || sb_rdonly(sb) ||
- !test_opt(sb, INIT_INODE_TABLE)))
+ if (ext4_emergency_state(sb) || sb_rdonly(sb) ||
+ (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) &&
+ (first_not_zeroed == ngroups || !test_opt(sb, INIT_INODE_TABLE))))
goto out;
elr = ext4_li_request_new(sb, first_not_zeroed);
@@ -3648,7 +4072,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
int compat, incompat;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- if (ext4_has_metadata_csum(sb)) {
+ if (ext4_has_feature_metadata_csum(sb)) {
/* journal checksum v3 */
compat = 0;
incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
@@ -3704,9 +4128,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
ext4_fsblk_t first_block, last_block, b;
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
int s, j, count = 0;
+ int has_super = ext4_bg_has_super(sb, grp);
if (!ext4_has_feature_bigalloc(sb))
- return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
+ return (has_super + ext4_bg_num_gdb(sb, grp) +
+ (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
sbi->s_itb_per_group + 2);
first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
@@ -3765,7 +4191,7 @@ int ext4_calculate_overhead(struct super_block *sb)
unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
ext4_fsblk_t overhead = 0;
- char *buf = (char *) get_zeroed_page(GFP_NOFS);
+ char *buf = kvmalloc(sb->s_blocksize, GFP_NOFS | __GFP_ZERO);
if (!buf)
return -ENOMEM;
@@ -3790,7 +4216,7 @@ int ext4_calculate_overhead(struct super_block *sb)
blks = count_overhead(sb, i, buf);
overhead += blks;
if (blks)
- memset(buf, 0, PAGE_SIZE);
+ memset(buf, 0, sb->s_blocksize);
cond_resched();
}
@@ -3798,12 +4224,12 @@ int ext4_calculate_overhead(struct super_block *sb)
* Add the internal journal blocks whether the journal has been
* loaded or not
*/
- if (sbi->s_journal && !sbi->s_journal_bdev)
+ if (sbi->s_journal && !sbi->s_journal_bdev_file)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
/* j_inum for internal journal is non-zero */
j_inode = ext4_get_journal_inode(sb, j_inum);
- if (j_inode) {
+ if (!IS_ERR(j_inode)) {
j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
overhead += EXT4_NUM_B2C(sbi, j_blocks);
iput(j_inode);
@@ -3813,7 +4239,7 @@ int ext4_calculate_overhead(struct super_block *sb)
}
sbi->s_overhead = overhead;
smp_wmb();
- free_page((unsigned long) buf);
+ kvfree(buf);
return 0;
}
@@ -3876,131 +4302,46 @@ static void ext4_setup_csum_trigger(struct super_block *sb,
sbi->s_journal_triggers[type].tr_triggers.t_frozen = trigger;
}
-static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+static void ext4_free_sbi(struct ext4_sb_info *sbi)
{
- struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
- char *orig_data = kstrdup(data, GFP_KERNEL);
- struct buffer_head *bh, **group_desc;
- struct ext4_super_block *es = NULL;
- struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- struct flex_groups **flex_groups;
- ext4_fsblk_t block;
- ext4_fsblk_t sb_block = get_sb_block(&data);
- ext4_fsblk_t logical_sb_block;
- unsigned long offset = 0;
- unsigned long def_mount_opts;
- struct inode *root;
- const char *descr;
- int ret = -ENOMEM;
- int blocksize, clustersize;
- unsigned int db_count;
- unsigned int i;
- int needs_recovery, has_huge_files;
- __u64 blocks_count;
- int err = 0;
- ext4_group_t first_not_zeroed;
- struct ext4_parsed_options parsed_opts;
+ if (!sbi)
+ return;
- /* Set defaults for the variables that will be set during parsing */
- parsed_opts.journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
- parsed_opts.journal_devnum = 0;
- parsed_opts.mb_optimize_scan = DEFAULT_MB_OPTIMIZE_SCAN;
+ kfree(sbi->s_blockgroup_lock);
+ fs_put_dax(sbi->s_daxdev, NULL);
+ kfree(sbi);
+}
- if ((data && !orig_data) || !sbi)
- goto out_free_base;
+static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi;
+
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ if (!sbi)
+ return NULL;
+
+ sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off,
+ NULL, NULL);
- sbi->s_daxdev = dax_dev;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
+
if (!sbi->s_blockgroup_lock)
- goto out_free_base;
+ goto err_out;
sb->s_fs_info = sbi;
sbi->s_sb = sb;
- sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
- sbi->s_sb_block = sb_block;
- sbi->s_sectors_written_start =
- part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
-
- /* Cleanup superblock name */
- strreplace(sb->s_id, '/', '!');
-
- /* -EINVAL is default */
- ret = -EINVAL;
- blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
- if (!blocksize) {
- ext4_msg(sb, KERN_ERR, "unable to set blocksize");
- goto out_fail;
- }
-
- /*
- * The ext4 superblock will not be buffer aligned for other than 1kB
- * block sizes. We need to calculate the offset from buffer start.
- */
- if (blocksize != EXT4_MIN_BLOCK_SIZE) {
- logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
- offset = do_div(logical_sb_block, blocksize);
- } else {
- logical_sb_block = sb_block;
- }
-
- bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
- if (IS_ERR(bh)) {
- ext4_msg(sb, KERN_ERR, "unable to read superblock");
- ret = PTR_ERR(bh);
- goto out_fail;
- }
- /*
- * Note: s_es must be initialized as soon as possible because
- * some ext4 macro-instructions depend on its value
- */
- es = (struct ext4_super_block *) (bh->b_data + offset);
- sbi->s_es = es;
- sb->s_magic = le16_to_cpu(es->s_magic);
- if (sb->s_magic != EXT4_SUPER_MAGIC)
- goto cantfind_ext4;
- sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
-
- /* Warn if metadata_csum and gdt_csum are both set. */
- if (ext4_has_feature_metadata_csum(sb) &&
- ext4_has_feature_gdt_csum(sb))
- ext4_warning(sb, "metadata_csum and uninit_bg are "
- "redundant flags; please run fsck.");
-
- /* Check for a known checksum algorithm */
- if (!ext4_verify_csum_type(sb, es)) {
- ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
- "unknown checksum algorithm.");
- silent = 1;
- goto cantfind_ext4;
- }
- ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE,
- ext4_orphan_file_block_trigger);
-
- /* Load the checksum driver */
- sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(sbi->s_chksum_driver)) {
- ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
- ret = PTR_ERR(sbi->s_chksum_driver);
- sbi->s_chksum_driver = NULL;
- goto failed_mount;
- }
-
- /* Check superblock checksum */
- if (!ext4_superblock_csum_verify(sb, es)) {
- ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
- "invalid superblock checksum. Run e2fsck?");
- silent = 1;
- ret = -EFSBADCRC;
- goto cantfind_ext4;
- }
+ return sbi;
+err_out:
+ fs_put_dax(sbi->s_daxdev, NULL);
+ kfree(sbi);
+ return NULL;
+}
- /* Precompute checksum seed for all metadata */
- if (ext4_has_feature_csum_seed(sb))
- sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
- else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
- sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
- sizeof(es->s_uuid));
+static void ext4_set_def_opts(struct super_block *sb,
+ struct ext4_super_block *es)
+{
+ unsigned long def_mount_opts;
/* Set defaults before we parse the mount options */
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
@@ -4019,7 +4360,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (ext4_has_feature_fast_commit(sb))
set_opt2(sb, JOURNAL_FAST_COMMIT);
/* don't forget to enable journal_csum when metadata_csum is enabled. */
- if (ext4_has_metadata_csum(sb))
+ if (ext4_has_feature_metadata_csum(sb))
set_opt(sb, JOURNAL_CHECKSUM);
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
@@ -4029,9 +4370,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
set_opt(sb, WRITEBACK_DATA);
- if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
+ if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_PANIC)
set_opt(sb, ERRORS_PANIC);
- else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
+ else if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_CONTINUE)
set_opt(sb, ERRORS_CONT);
else
set_opt(sb, ERRORS_RO);
@@ -4040,12 +4381,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (def_mount_opts & EXT4_DEFM_DISCARD)
set_opt(sb, DISCARD);
- sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
- sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
- sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
- sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
- sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
-
if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
set_opt(sb, BARRIER);
@@ -4057,31 +4392,124 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
set_opt(sb, DELALLOC);
- /*
- * set default s_li_wait_mult for lazyinit, for the case there is
- * no mount option specified.
- */
- sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
+ set_opt(sb, DIOREAD_NOLOCK);
+}
- if (le32_to_cpu(es->s_log_block_size) >
- (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
- ext4_msg(sb, KERN_ERR,
- "Invalid log block size: %u",
- le32_to_cpu(es->s_log_block_size));
- goto failed_mount;
+static int ext4_handle_clustersize(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ int clustersize;
+
+ /* Handle clustersize */
+ clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
+ if (ext4_has_feature_bigalloc(sb)) {
+ if (clustersize < sb->s_blocksize) {
+ ext4_msg(sb, KERN_ERR,
+ "cluster size (%d) smaller than "
+ "block size (%lu)", clustersize, sb->s_blocksize);
+ return -EINVAL;
+ }
+ sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
+ le32_to_cpu(es->s_log_block_size);
+ } else {
+ if (clustersize != sb->s_blocksize) {
+ ext4_msg(sb, KERN_ERR,
+ "fragment/cluster size (%d) != "
+ "block size (%lu)", clustersize, sb->s_blocksize);
+ return -EINVAL;
+ }
+ if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
+ ext4_msg(sb, KERN_ERR,
+ "#blocks per group too big: %lu",
+ sbi->s_blocks_per_group);
+ return -EINVAL;
+ }
+ sbi->s_cluster_bits = 0;
}
- if (le32_to_cpu(es->s_log_cluster_size) >
- (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ sbi->s_clusters_per_group = le32_to_cpu(es->s_clusters_per_group);
+ if (sbi->s_clusters_per_group > sb->s_blocksize * 8) {
+ ext4_msg(sb, KERN_ERR, "#clusters per group too big: %lu",
+ sbi->s_clusters_per_group);
+ return -EINVAL;
+ }
+ if (sbi->s_blocks_per_group !=
+ (sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) {
ext4_msg(sb, KERN_ERR,
- "Invalid log cluster size: %u",
- le32_to_cpu(es->s_log_cluster_size));
- goto failed_mount;
+ "blocks per group (%lu) and clusters per group (%lu) inconsistent",
+ sbi->s_blocks_per_group, sbi->s_clusters_per_group);
+ return -EINVAL;
}
+ sbi->s_cluster_ratio = clustersize / sb->s_blocksize;
- blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+ /* Do we have standard group size of clustersize * 8 blocks ? */
+ if (sbi->s_blocks_per_group == clustersize << 3)
+ set_opt2(sb, STD_GROUP_SIZE);
+
+ return 0;
+}
+
+/*
+ * ext4_atomic_write_init: Initializes filesystem min & max atomic write units.
+ * With non-bigalloc filesystem awu will be based upon filesystem blocksize
+ * & bdev awu units.
+ * With bigalloc it will be based upon bigalloc cluster size & bdev awu units.
+ * @sb: super block
+ */
+static void ext4_atomic_write_init(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct block_device *bdev = sb->s_bdev;
+ unsigned int clustersize = EXT4_CLUSTER_SIZE(sb);
- if (blocksize == PAGE_SIZE)
- set_opt(sb, DIOREAD_NOLOCK);
+ if (!bdev_can_atomic_write(bdev))
+ return;
+
+ if (!ext4_has_feature_extents(sb))
+ return;
+
+ sbi->s_awu_min = max(sb->s_blocksize,
+ bdev_atomic_write_unit_min_bytes(bdev));
+ sbi->s_awu_max = min(clustersize,
+ bdev_atomic_write_unit_max_bytes(bdev));
+ if (sbi->s_awu_min && sbi->s_awu_max &&
+ sbi->s_awu_min <= sbi->s_awu_max) {
+ ext4_msg(sb, KERN_NOTICE, "Supports (experimental) DIO atomic writes awu_min: %u, awu_max: %u",
+ sbi->s_awu_min, sbi->s_awu_max);
+ } else {
+ sbi->s_awu_min = 0;
+ sbi->s_awu_max = 0;
+ }
+}
+
+static void ext4_fast_commit_init(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ /* Initialize fast commit stuff */
+ atomic_set(&sbi->s_fc_subtid, 0);
+ INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]);
+ INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]);
+ INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]);
+ INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
+ sbi->s_fc_bytes = 0;
+ ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ sbi->s_fc_ineligible_tid = 0;
+ mutex_init(&sbi->s_fc_lock);
+ memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
+ sbi->s_fc_replay_state.fc_regions = NULL;
+ sbi->s_fc_replay_state.fc_regions_size = 0;
+ sbi->s_fc_replay_state.fc_regions_used = 0;
+ sbi->s_fc_replay_state.fc_regions_valid = 0;
+ sbi->s_fc_replay_state.fc_modified_inodes = NULL;
+ sbi->s_fc_replay_state.fc_modified_inodes_size = 0;
+ sbi->s_fc_replay_state.fc_modified_inodes_used = 0;
+}
+
+static int ext4_inode_info_init(struct super_block *sb,
+ struct ext4_super_block *es)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
@@ -4092,16 +4520,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
sbi->s_first_ino);
- goto failed_mount;
+ return -EINVAL;
}
if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
(!is_power_of_2(sbi->s_inode_size)) ||
- (sbi->s_inode_size > blocksize)) {
+ (sbi->s_inode_size > sb->s_blocksize)) {
ext4_msg(sb, KERN_ERR,
"unsupported inode size: %d",
sbi->s_inode_size);
- ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
- goto failed_mount;
+ ext4_msg(sb, KERN_ERR, "blocksize: %lu", sb->s_blocksize);
+ return -EINVAL;
}
/*
* i_atime_extra is the last extra field available for
@@ -4119,6 +4547,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
sb->s_time_min = EXT4_TIMESTAMP_MIN;
}
+
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
@@ -4130,7 +4559,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (v > max) {
ext4_msg(sb, KERN_ERR,
"bad s_want_extra_isize: %d", v);
- goto failed_mount;
+ return -EINVAL;
}
if (sbi->s_want_extra_isize < v)
sbi->s_want_extra_isize = v;
@@ -4139,89 +4568,105 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (v > max) {
ext4_msg(sb, KERN_ERR,
"bad s_min_extra_isize: %d", v);
- goto failed_mount;
+ return -EINVAL;
}
if (sbi->s_want_extra_isize < v)
sbi->s_want_extra_isize = v;
}
}
- if (sbi->s_es->s_mount_opts[0]) {
- char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
- sizeof(sbi->s_es->s_mount_opts),
- GFP_KERNEL);
- if (!s_mount_opts)
- goto failed_mount;
- if (!parse_options(s_mount_opts, sb, &parsed_opts, 0)) {
- ext4_msg(sb, KERN_WARNING,
- "failed to parse options in superblock: %s",
- s_mount_opts);
- }
- kfree(s_mount_opts);
- }
- sbi->s_def_mount_opt = sbi->s_mount_opt;
- if (!parse_options((char *) data, sb, &parsed_opts, 0))
- goto failed_mount;
+ return 0;
+}
-#ifdef CONFIG_UNICODE
- if (ext4_has_feature_casefold(sb) && !sb->s_encoding) {
- const struct ext4_sb_encodings *encoding_info;
- struct unicode_map *encoding;
- __u16 encoding_flags;
+#if IS_ENABLED(CONFIG_UNICODE)
+static int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es)
+{
+ const struct ext4_sb_encodings *encoding_info;
+ struct unicode_map *encoding;
+ __u16 encoding_flags = le16_to_cpu(es->s_encoding_flags);
- if (ext4_sb_read_encoding(es, &encoding_info,
- &encoding_flags)) {
- ext4_msg(sb, KERN_ERR,
- "Encoding requested by superblock is unknown");
- goto failed_mount;
- }
+ if (!ext4_has_feature_casefold(sb) || sb->s_encoding)
+ return 0;
- encoding = utf8_load(encoding_info->version);
- if (IS_ERR(encoding)) {
- ext4_msg(sb, KERN_ERR,
- "can't mount with superblock charset: %s-%s "
- "not supported by the kernel. flags: 0x%x.",
- encoding_info->name, encoding_info->version,
- encoding_flags);
- goto failed_mount;
- }
- ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: "
- "%s-%s with flags 0x%hx", encoding_info->name,
- encoding_info->version?:"\b", encoding_flags);
+ encoding_info = ext4_sb_read_encoding(es);
+ if (!encoding_info) {
+ ext4_msg(sb, KERN_ERR,
+ "Encoding requested by superblock is unknown");
+ return -EINVAL;
+ }
- sb->s_encoding = encoding;
- sb->s_encoding_flags = encoding_flags;
+ encoding = utf8_load(encoding_info->version);
+ if (IS_ERR(encoding)) {
+ ext4_msg(sb, KERN_ERR,
+ "can't mount with superblock charset: %s-%u.%u.%u "
+ "not supported by the kernel. flags: 0x%x.",
+ encoding_info->name,
+ unicode_major(encoding_info->version),
+ unicode_minor(encoding_info->version),
+ unicode_rev(encoding_info->version),
+ encoding_flags);
+ return -EINVAL;
}
+ ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: "
+ "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
+ unicode_major(encoding_info->version),
+ unicode_minor(encoding_info->version),
+ unicode_rev(encoding_info->version),
+ encoding_flags);
+
+ sb->s_encoding = encoding;
+ sb->s_encoding_flags = encoding_flags;
+
+ return 0;
+}
+#else
+static inline int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es)
+{
+ return 0;
+}
#endif
- if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
- printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, O_DIRECT and fast_commit support!\n");
- /* can't mount with both data=journal and dioread_nolock. */
- clear_opt(sb, DIOREAD_NOLOCK);
- clear_opt2(sb, JOURNAL_FAST_COMMIT);
- if (test_opt2(sb, EXPLICIT_DELALLOC)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "both data=journal and delalloc");
- goto failed_mount;
- }
- if (test_opt(sb, DAX_ALWAYS)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "both data=journal and dax");
- goto failed_mount;
- }
- if (ext4_has_feature_encrypt(sb)) {
- ext4_msg(sb, KERN_WARNING,
- "encrypted files will use data=ordered "
- "instead of data journaling mode");
- }
- if (test_opt(sb, DELALLOC))
- clear_opt(sb, DELALLOC);
- } else {
- sb->s_iflags |= SB_I_CGROUPWB;
+static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_block *es)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ /* Warn if metadata_csum and gdt_csum are both set. */
+ if (ext4_has_feature_metadata_csum(sb) &&
+ ext4_has_feature_gdt_csum(sb))
+ ext4_warning(sb, "metadata_csum and uninit_bg are "
+ "redundant flags; please run fsck.");
+
+ /* Check for a known checksum algorithm */
+ if (!ext4_verify_csum_type(sb, es)) {
+ ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
+ "unknown checksum algorithm.");
+ return -EINVAL;
}
+ ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE,
+ ext4_orphan_file_block_trigger);
- sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
- (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
+ /* Check superblock checksum */
+ if (!ext4_superblock_csum_verify(sb, es)) {
+ ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
+ "invalid superblock checksum. Run e2fsck?");
+ return -EFSBADCRC;
+ }
+
+ /* Precompute checksum seed for all metadata */
+ if (ext4_has_feature_csum_seed(sb))
+ sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
+ else if (ext4_has_feature_metadata_csum(sb) ||
+ ext4_has_feature_ea_inode(sb))
+ sbi->s_csum_seed = ext4_chksum(~0, es->s_uuid,
+ sizeof(es->s_uuid));
+ return 0;
+}
+
+static int ext4_check_feature_compatibility(struct super_block *sb,
+ struct ext4_super_block *es,
+ int silent)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
(ext4_has_compat_features(sb) ||
@@ -4236,7 +4681,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (ext4_has_feature_64bit(sb)) {
ext4_msg(sb, KERN_ERR,
"The Hurd can't support 64-bit file systems");
- goto failed_mount;
+ return -EINVAL;
}
/*
@@ -4246,7 +4691,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (ext4_has_feature_ea_inode(sb)) {
ext4_msg(sb, KERN_ERR,
"ea_inode feature is not supported for Hurd");
- goto failed_mount;
+ return -EINVAL;
}
}
@@ -4260,10 +4705,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
* it's actually an ext[34] filesystem.
*/
if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
- goto failed_mount;
+ return -EINVAL;
ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
"to feature incompatibilities");
- goto failed_mount;
+ return -EINVAL;
}
}
@@ -4277,10 +4722,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
* it's actually an ext4 filesystem.
*/
if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
- goto failed_mount;
+ return -EINVAL;
ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
"to feature incompatibilities");
- goto failed_mount;
+ return -EINVAL;
}
}
@@ -4290,180 +4735,50 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
* so there is a chance incompat flags are set on a rev 0 filesystem.
*/
if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
- goto failed_mount;
+ return -EINVAL;
- if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
- ext4_msg(sb, KERN_ERR,
- "Number of reserved GDT blocks insanely large: %d",
- le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
- goto failed_mount;
+ if (sbi->s_daxdev) {
+ if (sb->s_blocksize == PAGE_SIZE)
+ set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);
+ else
+ ext4_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n");
}
- if (dax_supported(dax_dev, sb->s_bdev, blocksize, 0,
- bdev_nr_sectors(sb->s_bdev)))
- set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);
-
if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) {
if (ext4_has_feature_inline_data(sb)) {
ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
" that may contain inline data");
- goto failed_mount;
+ return -EINVAL;
}
if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) {
ext4_msg(sb, KERN_ERR,
"DAX unsupported by block device.");
- goto failed_mount;
+ return -EINVAL;
}
}
if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
es->s_encryption_level);
- goto failed_mount;
- }
-
- if (sb->s_blocksize != blocksize) {
- /*
- * bh must be released before kill_bdev(), otherwise
- * it won't be freed and its page also. kill_bdev()
- * is called by sb_set_blocksize().
- */
- brelse(bh);
- /* Validate the filesystem blocksize */
- if (!sb_set_blocksize(sb, blocksize)) {
- ext4_msg(sb, KERN_ERR, "bad block size %d",
- blocksize);
- bh = NULL;
- goto failed_mount;
- }
-
- logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
- offset = do_div(logical_sb_block, blocksize);
- bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
- if (IS_ERR(bh)) {
- ext4_msg(sb, KERN_ERR,
- "Can't read superblock on 2nd try");
- ret = PTR_ERR(bh);
- bh = NULL;
- goto failed_mount;
- }
- es = (struct ext4_super_block *)(bh->b_data + offset);
- sbi->s_es = es;
- if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
- ext4_msg(sb, KERN_ERR,
- "Magic mismatch, very weird!");
- goto failed_mount;
- }
+ return -EINVAL;
}
- has_huge_files = ext4_has_feature_huge_file(sb);
- sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
- has_huge_files);
- sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
-
- sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
- if (ext4_has_feature_64bit(sb)) {
- if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
- sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
- !is_power_of_2(sbi->s_desc_size)) {
- ext4_msg(sb, KERN_ERR,
- "unsupported descriptor size %lu",
- sbi->s_desc_size);
- goto failed_mount;
- }
- } else
- sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
-
- sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
- sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
-
- sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
- if (sbi->s_inodes_per_block == 0)
- goto cantfind_ext4;
- if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
- sbi->s_inodes_per_group > blocksize * 8) {
- ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
- sbi->s_inodes_per_group);
- goto failed_mount;
- }
- sbi->s_itb_per_group = sbi->s_inodes_per_group /
- sbi->s_inodes_per_block;
- sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
- sbi->s_sbh = bh;
- sbi->s_mount_state = le16_to_cpu(es->s_state);
- sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
- sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
+ return 0;
+}
- for (i = 0; i < 4; i++)
- sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
- sbi->s_def_hash_version = es->s_def_hash_version;
- if (ext4_has_feature_dir_index(sb)) {
- i = le32_to_cpu(es->s_flags);
- if (i & EXT2_FLAGS_UNSIGNED_HASH)
- sbi->s_hash_unsigned = 3;
- else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
-#ifdef __CHAR_UNSIGNED__
- if (!sb_rdonly(sb))
- es->s_flags |=
- cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
- sbi->s_hash_unsigned = 3;
-#else
- if (!sb_rdonly(sb))
- es->s_flags |=
- cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
-#endif
- }
- }
+static int ext4_check_geometry(struct super_block *sb,
+ struct ext4_super_block *es)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ __u64 blocks_count;
+ int err;
- /* Handle clustersize */
- clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
- if (ext4_has_feature_bigalloc(sb)) {
- if (clustersize < blocksize) {
- ext4_msg(sb, KERN_ERR,
- "cluster size (%d) smaller than "
- "block size (%d)", clustersize, blocksize);
- goto failed_mount;
- }
- sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
- le32_to_cpu(es->s_log_block_size);
- sbi->s_clusters_per_group =
- le32_to_cpu(es->s_clusters_per_group);
- if (sbi->s_clusters_per_group > blocksize * 8) {
- ext4_msg(sb, KERN_ERR,
- "#clusters per group too big: %lu",
- sbi->s_clusters_per_group);
- goto failed_mount;
- }
- if (sbi->s_blocks_per_group !=
- (sbi->s_clusters_per_group * (clustersize / blocksize))) {
- ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
- "clusters per group (%lu) inconsistent",
- sbi->s_blocks_per_group,
- sbi->s_clusters_per_group);
- goto failed_mount;
- }
- } else {
- if (clustersize != blocksize) {
- ext4_msg(sb, KERN_ERR,
- "fragment/cluster size (%d) != "
- "block size (%d)", clustersize, blocksize);
- goto failed_mount;
- }
- if (sbi->s_blocks_per_group > blocksize * 8) {
- ext4_msg(sb, KERN_ERR,
- "#blocks per group too big: %lu",
- sbi->s_blocks_per_group);
- goto failed_mount;
- }
- sbi->s_clusters_per_group = sbi->s_blocks_per_group;
- sbi->s_cluster_bits = 0;
+ if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (sb->s_blocksize / 4)) {
+ ext4_msg(sb, KERN_ERR,
+ "Number of reserved GDT blocks insanely large: %d",
+ le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
+ return -EINVAL;
}
- sbi->s_cluster_ratio = clustersize / blocksize;
-
- /* Do we have standard group size of clustersize * 8 blocks ? */
- if (sbi->s_blocks_per_group == clustersize << 3)
- set_opt2(sb, STD_GROUP_SIZE);
-
/*
* Test whether we have more sectors than will fit in sector_t,
* and whether the max offset is addressable by the page cache.
@@ -4473,19 +4788,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (err) {
ext4_msg(sb, KERN_ERR, "filesystem"
" too large to mount safely on this system");
- goto failed_mount;
+ return err;
}
- if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
- goto cantfind_ext4;
-
/* check blocks count against device size */
blocks_count = sb_bdev_nr_blocks(sb);
if (blocks_count && ext4_blocks_count(es) > blocks_count) {
ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
"exceeds size of device (%llu blocks)",
ext4_blocks_count(es), blocks_count);
- goto failed_mount;
+ return -EINVAL;
}
/*
@@ -4497,13 +4809,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"block %u is beyond end of filesystem (%llu)",
le32_to_cpu(es->s_first_data_block),
ext4_blocks_count(es));
- goto failed_mount;
+ return -EINVAL;
}
if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
(sbi->s_cluster_ratio == 1)) {
ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
"block is 0 with a 1k block and cluster size");
- goto failed_mount;
+ return -EINVAL;
}
blocks_count = (ext4_blocks_count(es) -
@@ -4517,7 +4829,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_blocks_count(es),
le32_to_cpu(es->s_first_data_block),
EXT4_BLOCKS_PER_GROUP(sb));
- goto failed_mount;
+ return -EINVAL;
}
sbi->s_groups_count = blocks_count;
sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
@@ -4527,9 +4839,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
le32_to_cpu(es->s_inodes_count),
((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
- ret = -EINVAL;
- goto failed_mount;
+ return -EINVAL;
}
+
+ return 0;
+}
+
+static int ext4_group_desc_init(struct super_block *sb,
+ struct ext4_super_block *es,
+ ext4_fsblk_t logical_sb_block,
+ ext4_group_t *first_not_zeroed)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ unsigned int db_count;
+ ext4_fsblk_t block;
+ int i;
+
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
if (ext4_has_feature_meta_bg(sb)) {
@@ -4538,7 +4863,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"first meta block group too large: %u "
"(group descriptor block count %u)",
le32_to_cpu(es->s_first_meta_bg), db_count);
- goto failed_mount;
+ return -EINVAL;
}
}
rcu_assign_pointer(sbi->s_group_desc,
@@ -4547,8 +4872,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
GFP_KERNEL));
if (sbi->s_group_desc == NULL) {
ext4_msg(sb, KERN_ERR, "not enough memory");
- ret = -ENOMEM;
- goto failed_mount;
+ return -ENOMEM;
}
bgl_lock_init(sbi->s_blockgroup_lock);
@@ -4567,144 +4891,44 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (IS_ERR(bh)) {
ext4_msg(sb, KERN_ERR,
"can't read group descriptor %d", i);
- db_count = i;
- ret = PTR_ERR(bh);
- goto failed_mount2;
+ sbi->s_gdb_count = i;
+ return PTR_ERR(bh);
}
rcu_read_lock();
rcu_dereference(sbi->s_group_desc)[i] = bh;
rcu_read_unlock();
}
sbi->s_gdb_count = db_count;
- if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
+ if (!ext4_check_descriptors(sb, logical_sb_block, first_not_zeroed)) {
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
- ret = -EFSCORRUPTED;
- goto failed_mount2;
+ return -EFSCORRUPTED;
}
- timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
- spin_lock_init(&sbi->s_error_lock);
- INIT_WORK(&sbi->s_error_work, flush_stashed_error_work);
-
- /* Register extent status tree shrinker */
- if (ext4_es_register_shrinker(sbi))
- goto failed_mount3;
-
- sbi->s_stripe = ext4_get_stripe_size(sbi);
- sbi->s_extent_max_zeroout_kb = 32;
-
- /*
- * set up enough so that it can read an inode
- */
- sb->s_op = &ext4_sops;
- sb->s_export_op = &ext4_export_ops;
- sb->s_xattr = ext4_xattr_handlers;
-#ifdef CONFIG_FS_ENCRYPTION
- sb->s_cop = &ext4_cryptops;
-#endif
-#ifdef CONFIG_FS_VERITY
- sb->s_vop = &ext4_verityops;
-#endif
-#ifdef CONFIG_QUOTA
- sb->dq_op = &ext4_quota_operations;
- if (ext4_has_feature_quota(sb))
- sb->s_qcop = &dquot_quotactl_sysfile_ops;
- else
- sb->s_qcop = &ext4_qctl_operations;
- sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
-#endif
- memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
-
- INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
- mutex_init(&sbi->s_orphan_lock);
-
- /* Initialize fast commit stuff */
- atomic_set(&sbi->s_fc_subtid, 0);
- atomic_set(&sbi->s_fc_ineligible_updates, 0);
- INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]);
- INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]);
- INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]);
- INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
- sbi->s_fc_bytes = 0;
- ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
- ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
- spin_lock_init(&sbi->s_fc_lock);
- memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
- sbi->s_fc_replay_state.fc_regions = NULL;
- sbi->s_fc_replay_state.fc_regions_size = 0;
- sbi->s_fc_replay_state.fc_regions_used = 0;
- sbi->s_fc_replay_state.fc_regions_valid = 0;
- sbi->s_fc_replay_state.fc_modified_inodes = NULL;
- sbi->s_fc_replay_state.fc_modified_inodes_size = 0;
- sbi->s_fc_replay_state.fc_modified_inodes_used = 0;
-
- sb->s_root = NULL;
-
- needs_recovery = (es->s_last_orphan != 0 ||
- ext4_has_feature_orphan_present(sb) ||
- ext4_has_feature_journal_needs_recovery(sb));
+ return 0;
+}
- if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
- if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
- goto failed_mount3a;
+static int ext4_load_and_init_journal(struct super_block *sb,
+ struct ext4_super_block *es,
+ struct ext4_fs_context *ctx)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int err;
- /*
- * The first inode we look at is the journal inode. Don't try
- * root first: it may be modified in the journal!
- */
- if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
- err = ext4_load_journal(sb, es, parsed_opts.journal_devnum);
- if (err)
- goto failed_mount3a;
- } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
- ext4_has_feature_journal_needs_recovery(sb)) {
- ext4_msg(sb, KERN_ERR, "required journal recovery "
- "suppressed and not mounted read-only");
- goto failed_mount_wq;
- } else {
- /* Nojournal mode, all journal mount options are illegal */
- if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "journal_checksum, fs mounted w/o journal");
- goto failed_mount_wq;
- }
- if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "journal_async_commit, fs mounted w/o journal");
- goto failed_mount_wq;
- }
- if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "commit=%lu, fs mounted w/o journal",
- sbi->s_commit_interval / HZ);
- goto failed_mount_wq;
- }
- if (EXT4_MOUNT_DATA_FLAGS &
- (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "data=, fs mounted w/o journal");
- goto failed_mount_wq;
- }
- sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
- clear_opt(sb, JOURNAL_CHECKSUM);
- clear_opt(sb, DATA_FLAGS);
- clear_opt2(sb, JOURNAL_FAST_COMMIT);
- sbi->s_journal = NULL;
- needs_recovery = 0;
- goto no_journal;
- }
+ err = ext4_load_journal(sb, es, ctx->journal_devnum);
+ if (err)
+ return err;
if (ext4_has_feature_64bit(sb) &&
!jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_64BIT)) {
ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
- goto failed_mount_wq;
+ goto out;
}
if (!set_journal_csum_feature_set(sb)) {
ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
"feature set");
- goto failed_mount_wq;
+ goto out;
}
if (test_opt2(sb, JOURNAL_FAST_COMMIT) &&
@@ -4712,7 +4936,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) {
ext4_msg(sb, KERN_ERR,
"Failed to set fast commit journal feature");
- goto failed_mount_wq;
+ goto out;
}
/* We have now updated the journal if required, so we can
@@ -4739,7 +4963,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
ext4_msg(sb, KERN_ERR, "Journal does not support "
"requested data journaling mode");
- goto failed_mount_wq;
+ goto out;
}
break;
default:
@@ -4750,22 +4974,530 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"journal_async_commit in data=ordered mode");
- goto failed_mount_wq;
+ goto out;
}
- set_task_ioprio(sbi->s_journal->j_task, parsed_opts.journal_ioprio);
+ set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio);
sbi->s_journal->j_submit_inode_data_buffers =
ext4_journal_submit_inode_data_buffers;
sbi->s_journal->j_finish_inode_data_buffers =
ext4_journal_finish_inode_data_buffers;
-no_journal:
+ return 0;
+
+out:
+ ext4_journal_destroy(sbi, sbi->s_journal);
+ return -EINVAL;
+}
+
+static int ext4_check_journal_data_mode(struct super_block *sb)
+{
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+ printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with "
+ "data=journal disables delayed allocation, "
+ "dioread_nolock, O_DIRECT and fast_commit support!\n");
+ /* can't mount with both data=journal and dioread_nolock. */
+ clear_opt(sb, DIOREAD_NOLOCK);
+ clear_opt2(sb, JOURNAL_FAST_COMMIT);
+ if (test_opt2(sb, EXPLICIT_DELALLOC)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and delalloc");
+ return -EINVAL;
+ }
+ if (test_opt(sb, DAX_ALWAYS)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and dax");
+ return -EINVAL;
+ }
+ if (ext4_has_feature_encrypt(sb)) {
+ ext4_msg(sb, KERN_WARNING,
+ "encrypted files will use data=ordered "
+ "instead of data journaling mode");
+ }
+ if (test_opt(sb, DELALLOC))
+ clear_opt(sb, DELALLOC);
+ } else {
+ sb->s_iflags |= SB_I_CGROUPWB;
+ }
+
+ return 0;
+}
+
+static const char *ext4_has_journal_option(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
+ return "journal_async_commit";
+ if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM))
+ return "journal_checksum";
+ if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
+ return "commit=";
+ if (EXT4_MOUNT_DATA_FLAGS &
+ (sbi->s_mount_opt ^ sbi->s_def_mount_opt))
+ return "data=";
+ if (test_opt(sb, DATA_ERR_ABORT))
+ return "data_err=abort";
+ return NULL;
+}
+
+/*
+ * Limit the maximum folio order to 2048 blocks to prevent overestimation
+ * of reserve handle credits during the folio writeback in environments
+ * where the PAGE_SIZE exceeds 4KB.
+ */
+#define EXT4_MAX_PAGECACHE_ORDER(sb) \
+ umin(MAX_PAGECACHE_ORDER, (11 + (sb)->s_blocksize_bits - PAGE_SHIFT))
+static void ext4_set_max_mapping_order(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+ sbi->s_max_folio_order = sbi->s_min_folio_order;
+ else
+ sbi->s_max_folio_order = EXT4_MAX_PAGECACHE_ORDER(sb);
+}
+
+static int ext4_check_large_folio(struct super_block *sb)
+{
+ const char *err_str = NULL;
+
+ if (ext4_has_feature_encrypt(sb))
+ err_str = "encrypt";
+
+ if (!err_str) {
+ ext4_set_max_mapping_order(sb);
+ } else if (sb->s_blocksize > PAGE_SIZE) {
+ ext4_msg(sb, KERN_ERR, "bs(%lu) > ps(%lu) unsupported for %s",
+ sb->s_blocksize, PAGE_SIZE, err_str);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ext4_load_super(struct super_block *sb, ext4_fsblk_t *lsb,
+ int silent)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es;
+ ext4_fsblk_t logical_sb_block;
+ unsigned long offset = 0;
+ struct buffer_head *bh;
+ int ret = -EINVAL;
+ int blocksize;
+
+ blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
+ if (!blocksize) {
+ ext4_msg(sb, KERN_ERR, "unable to set blocksize");
+ return -EINVAL;
+ }
+
+ /*
+ * The ext4 superblock will not be buffer aligned for other than 1kB
+ * block sizes. We need to calculate the offset from buffer start.
+ */
+ if (blocksize != EXT4_MIN_BLOCK_SIZE) {
+ logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE;
+ offset = do_div(logical_sb_block, blocksize);
+ } else {
+ logical_sb_block = sbi->s_sb_block;
+ }
+
+ bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
+ if (IS_ERR(bh)) {
+ ext4_msg(sb, KERN_ERR, "unable to read superblock");
+ return PTR_ERR(bh);
+ }
+ /*
+ * Note: s_es must be initialized as soon as possible because
+ * some ext4 macro-instructions depend on its value
+ */
+ es = (struct ext4_super_block *) (bh->b_data + offset);
+ sbi->s_es = es;
+ sb->s_magic = le16_to_cpu(es->s_magic);
+ if (sb->s_magic != EXT4_SUPER_MAGIC) {
+ if (!silent)
+ ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
+ goto out;
+ }
+
+ if (le32_to_cpu(es->s_log_block_size) >
+ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid log block size: %u",
+ le32_to_cpu(es->s_log_block_size));
+ goto out;
+ }
+ if (le32_to_cpu(es->s_log_cluster_size) >
+ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid log cluster size: %u",
+ le32_to_cpu(es->s_log_cluster_size));
+ goto out;
+ }
+
+ blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+
+ /*
+ * If the default block size is not the same as the real block size,
+ * we need to reload it.
+ */
+ if (sb->s_blocksize == blocksize)
+ goto success;
+
+ /*
+ * bh must be released before kill_bdev(), otherwise
+ * it won't be freed and its page also. kill_bdev()
+ * is called by sb_set_blocksize().
+ */
+ brelse(bh);
+ /* Validate the filesystem blocksize */
+ if (!sb_set_blocksize(sb, blocksize)) {
+ ext4_msg(sb, KERN_ERR, "bad block size %d",
+ blocksize);
+ bh = NULL;
+ goto out;
+ }
+
+ logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE;
+ offset = do_div(logical_sb_block, blocksize);
+ bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
+ if (IS_ERR(bh)) {
+ ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try");
+ ret = PTR_ERR(bh);
+ bh = NULL;
+ goto out;
+ }
+ es = (struct ext4_super_block *)(bh->b_data + offset);
+ sbi->s_es = es;
+ if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
+ ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!");
+ goto out;
+ }
+
+success:
+ sbi->s_min_folio_order = get_order(blocksize);
+ *lsb = logical_sb_block;
+ sbi->s_sbh = bh;
+ return 0;
+out:
+ brelse(bh);
+ return ret;
+}
+
+static int ext4_hash_info_init(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ unsigned int i;
+
+ sbi->s_def_hash_version = es->s_def_hash_version;
+
+ if (sbi->s_def_hash_version > DX_HASH_LAST) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid default hash set in the superblock");
+ return -EINVAL;
+ } else if (sbi->s_def_hash_version == DX_HASH_SIPHASH) {
+ ext4_msg(sb, KERN_ERR,
+ "SIPHASH is not a valid default hash value");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 4; i++)
+ sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
+
+ if (ext4_has_feature_dir_index(sb)) {
+ i = le32_to_cpu(es->s_flags);
+ if (i & EXT2_FLAGS_UNSIGNED_HASH)
+ sbi->s_hash_unsigned = 3;
+ else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
+#ifdef __CHAR_UNSIGNED__
+ if (!sb_rdonly(sb))
+ es->s_flags |=
+ cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
+ sbi->s_hash_unsigned = 3;
+#else
+ if (!sb_rdonly(sb))
+ es->s_flags |=
+ cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
+#endif
+ }
+ }
+ return 0;
+}
+
+static int ext4_block_group_meta_init(struct super_block *sb, int silent)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ int has_huge_files;
+
+ has_huge_files = ext4_has_feature_huge_file(sb);
+ sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
+ has_huge_files);
+ sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
+
+ sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
+ if (ext4_has_feature_64bit(sb)) {
+ if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
+ sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
+ !is_power_of_2(sbi->s_desc_size)) {
+ ext4_msg(sb, KERN_ERR,
+ "unsupported descriptor size %lu",
+ sbi->s_desc_size);
+ return -EINVAL;
+ }
+ } else
+ sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
+
+ sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
+ sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
+
+ sbi->s_inodes_per_block = sb->s_blocksize / EXT4_INODE_SIZE(sb);
+ if (sbi->s_inodes_per_block == 0 || sbi->s_blocks_per_group == 0) {
+ if (!silent)
+ ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
+ return -EINVAL;
+ }
+ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+ sbi->s_inodes_per_group > sb->s_blocksize * 8) {
+ ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
+ sbi->s_inodes_per_group);
+ return -EINVAL;
+ }
+ sbi->s_itb_per_group = sbi->s_inodes_per_group /
+ sbi->s_inodes_per_block;
+ sbi->s_desc_per_block = sb->s_blocksize / EXT4_DESC_SIZE(sb);
+ sbi->s_mount_state = le16_to_cpu(es->s_state) & ~EXT4_FC_REPLAY;
+ sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
+ sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
+
+ return 0;
+}
+
+/*
+ * It's hard to get stripe aligned blocks if stripe is not aligned with
+ * cluster, just disable stripe and alert user to simplify code and avoid
+ * stripe aligned allocation which will rarely succeed.
+ */
+static bool ext4_is_stripe_incompatible(struct super_block *sb, unsigned long stripe)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ return (stripe > 0 && sbi->s_cluster_ratio > 1 &&
+ stripe % sbi->s_cluster_ratio != 0);
+}
+
+static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+{
+ struct ext4_super_block *es = NULL;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_fsblk_t logical_sb_block;
+ struct inode *root;
+ int needs_recovery;
+ int err;
+ ext4_group_t first_not_zeroed;
+ struct ext4_fs_context *ctx = fc->fs_private;
+ int silent = fc->sb_flags & SB_SILENT;
+
+ /* Set defaults for the variables that will be set during parsing */
+ if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO))
+ ctx->journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
+
+ sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
+ sbi->s_sectors_written_start =
+ part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
+
+ err = ext4_load_super(sb, &logical_sb_block, silent);
+ if (err)
+ goto out_fail;
+
+ es = sbi->s_es;
+ sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
+
+ err = ext4_init_metadata_csum(sb, es);
+ if (err)
+ goto failed_mount;
+
+ ext4_set_def_opts(sb, es);
+
+ sbi->s_resuid = make_kuid(&init_user_ns, ext4_get_resuid(es));
+ sbi->s_resgid = make_kgid(&init_user_ns, ext4_get_resuid(es));
+ sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
+ sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
+ sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
+ sbi->s_sb_update_kb = EXT4_DEF_SB_UPDATE_INTERVAL_KB;
+ sbi->s_sb_update_sec = EXT4_DEF_SB_UPDATE_INTERVAL_SEC;
+
+ /*
+ * set default s_li_wait_mult for lazyinit, for the case there is
+ * no mount option specified.
+ */
+ sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
+
+ err = ext4_inode_info_init(sb, es);
+ if (err)
+ goto failed_mount;
+
+ err = parse_apply_sb_mount_options(sb, ctx);
+ if (err < 0)
+ goto failed_mount;
+
+ sbi->s_def_mount_opt = sbi->s_mount_opt;
+ sbi->s_def_mount_opt2 = sbi->s_mount_opt2;
+
+ err = ext4_check_opt_consistency(fc, sb);
+ if (err < 0)
+ goto failed_mount;
+
+ ext4_apply_options(fc, sb);
+
+ err = ext4_check_large_folio(sb);
+ if (err < 0)
+ goto failed_mount;
+
+ err = ext4_encoding_init(sb, es);
+ if (err)
+ goto failed_mount;
+
+ err = ext4_check_journal_data_mode(sb);
+ if (err)
+ goto failed_mount;
+
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
+
+ /* HSM events are allowed by default. */
+ sb->s_iflags |= SB_I_ALLOW_HSM;
+
+ err = ext4_check_feature_compatibility(sb, es, silent);
+ if (err)
+ goto failed_mount;
+
+ err = ext4_block_group_meta_init(sb, silent);
+ if (err)
+ goto failed_mount;
+
+ err = ext4_hash_info_init(sb);
+ if (err)
+ goto failed_mount;
+
+ err = ext4_handle_clustersize(sb);
+ if (err)
+ goto failed_mount;
+
+ err = ext4_check_geometry(sb, es);
+ if (err)
+ goto failed_mount;
+
+ timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
+ spin_lock_init(&sbi->s_error_lock);
+ INIT_WORK(&sbi->s_sb_upd_work, update_super_work);
+
+ err = ext4_group_desc_init(sb, es, logical_sb_block, &first_not_zeroed);
+ if (err)
+ goto failed_mount3;
+
+ err = ext4_es_register_shrinker(sbi);
+ if (err)
+ goto failed_mount3;
+
+ sbi->s_stripe = ext4_get_stripe_size(sbi);
+ if (ext4_is_stripe_incompatible(sb, sbi->s_stripe)) {
+ ext4_msg(sb, KERN_WARNING,
+ "stripe (%lu) is not aligned with cluster size (%u), "
+ "stripe is disabled",
+ sbi->s_stripe, sbi->s_cluster_ratio);
+ sbi->s_stripe = 0;
+ }
+ sbi->s_extent_max_zeroout_kb = 32;
+
+ /*
+ * set up enough so that it can read an inode
+ */
+ sb->s_op = &ext4_sops;
+ sb->s_export_op = &ext4_export_ops;
+ sb->s_xattr = ext4_xattr_handlers;
+#ifdef CONFIG_FS_ENCRYPTION
+ sb->s_cop = &ext4_cryptops;
+#endif
+#ifdef CONFIG_FS_VERITY
+ sb->s_vop = &ext4_verityops;
+#endif
+#ifdef CONFIG_QUOTA
+ sb->dq_op = &ext4_quota_operations;
+ if (ext4_has_feature_quota(sb))
+ sb->s_qcop = &dquot_quotactl_sysfile_ops;
+ else
+ sb->s_qcop = &ext4_qctl_operations;
+ sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
+#endif
+ super_set_uuid(sb, es->s_uuid, sizeof(es->s_uuid));
+ super_set_sysfs_name_bdev(sb);
+
+ INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
+ mutex_init(&sbi->s_orphan_lock);
+
+ spin_lock_init(&sbi->s_bdev_wb_lock);
+
+ ext4_atomic_write_init(sb);
+ ext4_fast_commit_init(sb);
+
+ sb->s_root = NULL;
+
+ needs_recovery = (es->s_last_orphan != 0 ||
+ ext4_has_feature_orphan_present(sb) ||
+ ext4_has_feature_journal_needs_recovery(sb));
+
+ if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) {
+ err = ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block));
+ if (err)
+ goto failed_mount3a;
+ }
+
+ err = -EINVAL;
+ /*
+ * The first inode we look at is the journal inode. Don't try
+ * root first: it may be modified in the journal!
+ */
+ if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
+ err = ext4_load_and_init_journal(sb, es, ctx);
+ if (err)
+ goto failed_mount3a;
+ if (bdev_read_only(sb->s_bdev))
+ needs_recovery = 0;
+ } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
+ ext4_has_feature_journal_needs_recovery(sb)) {
+ ext4_msg(sb, KERN_ERR, "required journal recovery "
+ "suppressed and not mounted read-only");
+ goto failed_mount3a;
+ } else {
+ const char *journal_option;
+
+ /* Nojournal mode, all journal mount options are illegal */
+ journal_option = ext4_has_journal_option(sb);
+ if (journal_option != NULL) {
+ ext4_msg(sb, KERN_ERR,
+ "can't mount with %s, fs mounted w/o journal",
+ journal_option);
+ goto failed_mount3a;
+ }
+
+ sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
+ clear_opt(sb, JOURNAL_CHECKSUM);
+ clear_opt(sb, DATA_FLAGS);
+ clear_opt2(sb, JOURNAL_FAST_COMMIT);
+ sbi->s_journal = NULL;
+ needs_recovery = 0;
+ }
+
if (!test_opt(sb, NO_MBCACHE)) {
sbi->s_ea_block_cache = ext4_xattr_create_cache();
if (!sbi->s_ea_block_cache) {
ext4_msg(sb, KERN_ERR,
"Failed to create ea_block_cache");
+ err = -EINVAL;
goto failed_mount_wq;
}
@@ -4774,29 +5506,28 @@ no_journal:
if (!sbi->s_ea_inode_cache) {
ext4_msg(sb, KERN_ERR,
"Failed to create ea_inode_cache");
+ err = -EINVAL;
goto failed_mount_wq;
}
}
}
- if (ext4_has_feature_verity(sb) && blocksize != PAGE_SIZE) {
- ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs-verity");
- goto failed_mount_wq;
- }
-
- if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
- !ext4_has_feature_encrypt(sb)) {
- ext4_set_feature_encrypt(sb);
- ext4_commit_super(sb);
- }
-
/*
* Get the # of file system overhead blocks from the
* superblock if present.
*/
- if (es->s_overhead_clusters)
- sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
- else {
+ sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
+ /* ignore the precalculated value if it is ridiculous */
+ if (sbi->s_overhead > ext4_blocks_count(es))
+ sbi->s_overhead = 0;
+ /*
+ * If the bigalloc feature is not enabled recalculating the
+ * overhead doesn't take long, so we might as well just redo
+ * it to make sure we are using the correct value.
+ */
+ if (!ext4_has_feature_bigalloc(sb))
+ sbi->s_overhead = 0;
+ if (sbi->s_overhead == 0) {
err = ext4_calculate_overhead(sb);
if (err)
goto failed_mount_wq;
@@ -4810,7 +5541,7 @@ no_journal:
alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!EXT4_SB(sb)->rsv_conversion_wq) {
printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
- ret = -ENOMEM;
+ err = -ENOMEM;
goto failed_mount4;
}
@@ -4822,28 +5553,29 @@ no_journal:
root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
if (IS_ERR(root)) {
ext4_msg(sb, KERN_ERR, "get root inode failed");
- ret = PTR_ERR(root);
+ err = PTR_ERR(root);
root = NULL;
goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
iput(root);
+ err = -EFSCORRUPTED;
goto failed_mount4;
}
+ generic_set_sb_d_ops(sb);
sb->s_root = d_make_root(root);
if (!sb->s_root) {
ext4_msg(sb, KERN_ERR, "get root dentry failed");
- ret = -ENOMEM;
+ err = -ENOMEM;
goto failed_mount4;
}
- ret = ext4_setup_super(sb, es, sb_rdonly(sb));
- if (ret == -EROFS) {
+ err = ext4_setup_super(sb, es, sb_rdonly(sb));
+ if (err == -EROFS) {
sb->s_flags |= SB_RDONLY;
- ret = 0;
- } else if (ret)
+ } else if (err)
goto failed_mount4a;
ext4_set_resv_clusters(sb);
@@ -4865,12 +5597,12 @@ no_journal:
* turned off by passing "mb_optimize_scan=0". This can also be
* turned on forcefully by passing "mb_optimize_scan=1".
*/
- if (parsed_opts.mb_optimize_scan == 1)
- set_opt2(sb, MB_OPTIMIZE_SCAN);
- else if (parsed_opts.mb_optimize_scan == 0)
- clear_opt2(sb, MB_OPTIMIZE_SCAN);
- else if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD)
- set_opt2(sb, MB_OPTIMIZE_SCAN);
+ if (!(ctx->spec & EXT4_SPEC_mb_optimize_scan)) {
+ if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD)
+ set_opt2(sb, MB_OPTIMIZE_SCAN);
+ else
+ clear_opt2(sb, MB_OPTIMIZE_SCAN);
+ }
err = ext4_mb_init(sb);
if (err) {
@@ -4887,48 +5619,16 @@ no_journal:
sbi->s_journal->j_commit_callback =
ext4_journal_commit_callback;
- block = ext4_count_free_clusters(sb);
- ext4_free_blocks_count_set(sbi->s_es,
- EXT4_C2B(sbi, block));
- err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
- GFP_KERNEL);
- if (!err) {
- unsigned long freei = ext4_count_free_inodes(sb);
- sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
- err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
- GFP_KERNEL);
- }
- /*
- * Update the checksum after updating free space/inode
- * counters. Otherwise the superblock can have an incorrect
- * checksum in the buffer cache until it is written out and
- * e2fsprogs programs trying to open a file system immediately
- * after it is mounted can fail.
- */
- ext4_superblock_csum_set(sb);
- if (!err)
- err = percpu_counter_init(&sbi->s_dirs_counter,
- ext4_count_dirs(sb), GFP_KERNEL);
- if (!err)
- err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
- GFP_KERNEL);
- if (!err)
- err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
- GFP_KERNEL);
- if (!err)
- err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
-
- if (err) {
- ext4_msg(sb, KERN_ERR, "insufficient memory");
+ err = ext4_percpu_param_init(sbi);
+ if (err)
goto failed_mount6;
- }
if (ext4_has_feature_flex_bg(sb))
if (!ext4_fill_flex_info(sb)) {
ext4_msg(sb, KERN_ERR,
"unable to initialize "
"flex_bg meta info!");
- ret = -ENOMEM;
+ err = -ENOMEM;
goto failed_mount6;
}
@@ -4936,19 +5636,15 @@ no_journal:
if (err)
goto failed_mount6;
- err = ext4_register_sysfs(sb);
- if (err)
- goto failed_mount7;
-
err = ext4_init_orphan_info(sb);
if (err)
- goto failed_mount8;
+ goto failed_mount7;
#ifdef CONFIG_QUOTA
/* Enable quota usage during mount. */
if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
err = ext4_enable_quotas(sb);
if (err)
- goto failed_mount9;
+ goto failed_mount8;
}
#endif /* CONFIG_QUOTA */
@@ -4956,45 +5652,32 @@ no_journal:
* Save the original bdev mapping's wb_err value which could be
* used to detect the metadata async write error.
*/
- spin_lock_init(&sbi->s_bdev_wb_lock);
- errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
+ errseq_check_and_advance(&sb->s_bdev->bd_mapping->wb_err,
&sbi->s_bdev_wb_err);
- sb->s_bdev->bd_super = sb;
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
ext4_orphan_cleanup(sb, es);
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
+ /*
+ * Update the checksum after updating free space/inode counters and
+ * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect
+ * checksum in the buffer cache until it is written out and
+ * e2fsprogs programs trying to open a file system immediately
+ * after it is mounted can fail.
+ */
+ ext4_superblock_csum_set(sb);
if (needs_recovery) {
ext4_msg(sb, KERN_INFO, "recovery complete");
err = ext4_mark_recovery_complete(sb, es);
if (err)
goto failed_mount9;
}
- if (EXT4_SB(sb)->s_journal) {
- if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
- descr = " journalled data mode";
- else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
- descr = " ordered data mode";
- else
- descr = " writeback data mode";
- } else
- descr = "out journal";
- if (test_opt(sb, DISCARD)) {
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
- if (!blk_queue_discard(q))
- ext4_msg(sb, KERN_WARNING,
- "mounting with \"discard\" option, but "
- "the device does not support discard");
+ if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev)) {
+ ext4_msg(sb, KERN_WARNING,
+ "mounting with \"discard\" option, but the device does not support discard");
+ clear_opt(sb, DISCARD);
}
- if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
- ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
- "Opts: %.*s%s%s. Quota mode: %s.", descr,
- (int) sizeof(sbi->s_es->s_mount_opts),
- sbi->s_es->s_mount_opts,
- *sbi->s_es->s_mount_opts ? "; " : "", orig_data,
- ext4_quota_mode(sb));
-
if (es->s_error_count)
mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
@@ -5005,37 +5688,23 @@ no_journal:
atomic_set(&sbi->s_warning_count, 0);
atomic_set(&sbi->s_msg_count, 0);
- kfree(orig_data);
- return 0;
+ /* Register sysfs after all initializations are complete. */
+ err = ext4_register_sysfs(sb);
+ if (err)
+ goto failed_mount9;
-cantfind_ext4:
- if (!silent)
- ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
- goto failed_mount;
+ return 0;
failed_mount9:
+ ext4_quotas_off(sb, EXT4_MAXQUOTAS);
+failed_mount8: __maybe_unused
ext4_release_orphan_info(sb);
-failed_mount8:
- ext4_unregister_sysfs(sb);
- kobject_put(&sbi->s_kobj);
failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
ext4_mb_release(sb);
- rcu_read_lock();
- flex_groups = rcu_dereference(sbi->s_flex_groups);
- if (flex_groups) {
- for (i = 0; i < sbi->s_flex_groups_allocated; i++)
- kvfree(flex_groups[i]);
- kvfree(flex_groups);
- }
- rcu_read_unlock();
- percpu_counter_destroy(&sbi->s_freeclusters_counter);
- percpu_counter_destroy(&sbi->s_freeinodes_counter);
- percpu_counter_destroy(&sbi->s_dirs_counter);
- percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
- percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
- percpu_free_rwsem(&sbi->s_writepages_rwsem);
+ ext4_flex_groups_free(sbi);
+ ext4_percpu_param_destroy(sbi);
failed_mount5:
ext4_ext_release(sb);
ext4_release_system_zone(sb);
@@ -5054,49 +5723,90 @@ failed_mount_wq:
sbi->s_ea_block_cache = NULL;
if (sbi->s_journal) {
- /* flush s_error_work before journal destroy. */
- flush_work(&sbi->s_error_work);
- jbd2_journal_destroy(sbi->s_journal);
- sbi->s_journal = NULL;
+ ext4_journal_destroy(sbi, sbi->s_journal);
}
failed_mount3a:
ext4_es_unregister_shrinker(sbi);
failed_mount3:
- /* flush s_error_work before sbi destroy */
- flush_work(&sbi->s_error_work);
- del_timer_sync(&sbi->s_err_report);
+ /* flush s_sb_upd_work before sbi destroy */
+ flush_work(&sbi->s_sb_upd_work);
ext4_stop_mmpd(sbi);
-failed_mount2:
- rcu_read_lock();
- group_desc = rcu_dereference(sbi->s_group_desc);
- for (i = 0; i < db_count; i++)
- brelse(group_desc[i]);
- kvfree(group_desc);
- rcu_read_unlock();
+ timer_delete_sync(&sbi->s_err_report);
+ ext4_group_desc_free(sbi);
failed_mount:
- if (sbi->s_chksum_driver)
- crypto_free_shash(sbi->s_chksum_driver);
-
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
#ifdef CONFIG_QUOTA
- for (i = 0; i < EXT4_MAXQUOTAS; i++)
+ for (unsigned int i = 0; i < EXT4_MAXQUOTAS; i++)
kfree(get_qf_name(sb, sbi, i));
#endif
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
- /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
- brelse(bh);
- ext4_blkdev_remove(sbi);
+ brelse(sbi->s_sbh);
+ if (sbi->s_journal_bdev_file) {
+ invalidate_bdev(file_bdev(sbi->s_journal_bdev_file));
+ bdev_fput(sbi->s_journal_bdev_file);
+ }
out_fail:
+ invalidate_bdev(sb->s_bdev);
sb->s_fs_info = NULL;
- kfree(sbi->s_blockgroup_lock);
-out_free_base:
- kfree(sbi);
- kfree(orig_data);
- fs_put_dax(dax_dev);
- return err ? err : ret;
+ return err;
+}
+
+static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ struct ext4_fs_context *ctx = fc->fs_private;
+ struct ext4_sb_info *sbi;
+ const char *descr;
+ int ret;
+
+ sbi = ext4_alloc_sbi(sb);
+ if (!sbi)
+ return -ENOMEM;
+
+ fc->s_fs_info = sbi;
+
+ /* Cleanup superblock name */
+ strreplace(sb->s_id, '/', '!');
+
+ sbi->s_sb_block = 1; /* Default super block location */
+ if (ctx->spec & EXT4_SPEC_s_sb_block)
+ sbi->s_sb_block = ctx->s_sb_block;
+
+ ret = __ext4_fill_super(fc, sb);
+ if (ret < 0)
+ goto free_sbi;
+
+ if (sbi->s_journal) {
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+ descr = " journalled data mode";
+ else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
+ descr = " ordered data mode";
+ else
+ descr = " writeback data mode";
+ } else
+ descr = "out journal";
+
+ if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
+ ext4_msg(sb, KERN_INFO, "mounted filesystem %pU %s with%s. "
+ "Quota mode: %s.", &sb->s_uuid,
+ sb_rdonly(sb) ? "ro" : "r/w", descr,
+ ext4_quota_mode(sb));
+
+ /* Update the s_overhead_clusters if necessary */
+ ext4_update_overhead(sb, false);
+ return 0;
+
+free_sbi:
+ ext4_free_sbi(sbi);
+ fc->s_fs_info = NULL;
+ return ret;
+}
+
+static int ext4_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, ext4_fill_super);
}
/*
@@ -5118,10 +5828,11 @@ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
journal->j_flags |= JBD2_BARRIER;
else
journal->j_flags &= ~JBD2_BARRIER;
- if (test_opt(sb, DATA_ERR_ABORT))
- journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
- else
- journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
+ /*
+ * Always enable journal cycle record option, letting the journal
+ * records log transactions continuously between each mount.
+ */
+ journal->j_flags |= JBD2_CYCLE_RECORD;
write_unlock(&journal->j_state_lock);
}
@@ -5138,83 +5849,110 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
if (IS_ERR(journal_inode)) {
ext4_msg(sb, KERN_ERR, "no journal found");
- return NULL;
+ return ERR_CAST(journal_inode);
}
if (!journal_inode->i_nlink) {
make_bad_inode(journal_inode);
iput(journal_inode);
ext4_msg(sb, KERN_ERR, "journal inode is deleted");
- return NULL;
+ return ERR_PTR(-EFSCORRUPTED);
}
-
- jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
- journal_inode, journal_inode->i_size);
- if (!S_ISREG(journal_inode->i_mode)) {
+ if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) {
ext4_msg(sb, KERN_ERR, "invalid journal inode");
iput(journal_inode);
- return NULL;
+ return ERR_PTR(-EFSCORRUPTED);
}
+
+ ext4_debug("Journal inode found at %p: %lld bytes\n",
+ journal_inode, journal_inode->i_size);
return journal_inode;
}
-static journal_t *ext4_get_journal(struct super_block *sb,
- unsigned int journal_inum)
+static int ext4_journal_bmap(journal_t *journal, sector_t *block)
+{
+ struct ext4_map_blocks map;
+ int ret;
+
+ if (journal->j_inode == NULL)
+ return 0;
+
+ map.m_lblk = *block;
+ map.m_len = 1;
+ ret = ext4_map_blocks(NULL, journal->j_inode, &map, 0);
+ if (ret <= 0) {
+ ext4_msg(journal->j_inode->i_sb, KERN_CRIT,
+ "journal bmap failed: block %llu ret %d\n",
+ *block, ret);
+ jbd2_journal_abort(journal, ret ? ret : -EFSCORRUPTED);
+ return ret;
+ }
+ *block = map.m_pblk;
+ return 0;
+}
+
+static journal_t *ext4_open_inode_journal(struct super_block *sb,
+ unsigned int journal_inum)
{
struct inode *journal_inode;
journal_t *journal;
- if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
- return NULL;
-
journal_inode = ext4_get_journal_inode(sb, journal_inum);
- if (!journal_inode)
- return NULL;
+ if (IS_ERR(journal_inode))
+ return ERR_CAST(journal_inode);
journal = jbd2_journal_init_inode(journal_inode);
- if (!journal) {
+ if (IS_ERR(journal)) {
ext4_msg(sb, KERN_ERR, "Could not load journal inode");
iput(journal_inode);
- return NULL;
+ return ERR_CAST(journal);
}
journal->j_private = sb;
+ journal->j_bmap = ext4_journal_bmap;
ext4_init_journal_params(sb, journal);
return journal;
}
-static journal_t *ext4_get_dev_journal(struct super_block *sb,
- dev_t j_dev)
+static struct file *ext4_get_journal_blkdev(struct super_block *sb,
+ dev_t j_dev, ext4_fsblk_t *j_start,
+ ext4_fsblk_t *j_len)
{
struct buffer_head *bh;
- journal_t *journal;
- ext4_fsblk_t start;
- ext4_fsblk_t len;
+ struct block_device *bdev;
+ struct file *bdev_file;
int hblock, blocksize;
ext4_fsblk_t sb_block;
unsigned long offset;
struct ext4_super_block *es;
- struct block_device *bdev;
-
- if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
- return NULL;
+ int errno;
- bdev = ext4_blkdev_get(j_dev, sb);
- if (bdev == NULL)
- return NULL;
+ bdev_file = bdev_file_open_by_dev(j_dev,
+ BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
+ sb, &fs_holder_ops);
+ if (IS_ERR(bdev_file)) {
+ ext4_msg(sb, KERN_ERR,
+ "failed to open journal device unknown-block(%u,%u) %ld",
+ MAJOR(j_dev), MINOR(j_dev), PTR_ERR(bdev_file));
+ return bdev_file;
+ }
+ bdev = file_bdev(bdev_file);
blocksize = sb->s_blocksize;
hblock = bdev_logical_block_size(bdev);
if (blocksize < hblock) {
ext4_msg(sb, KERN_ERR,
"blocksize too small for journal device");
+ errno = -EINVAL;
goto out_bdev;
}
sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
offset = EXT4_MIN_BLOCK_SIZE % blocksize;
- set_blocksize(bdev, blocksize);
- if (!(bh = __bread(bdev, sb_block, blocksize))) {
+ set_blocksize(bdev_file, blocksize);
+ bh = __bread(bdev, sb_block, blocksize);
+ if (!bh) {
ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
"external journal");
+ errno = -EINVAL;
goto out_bdev;
}
@@ -5222,57 +5960,74 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
!(le32_to_cpu(es->s_feature_incompat) &
EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
- ext4_msg(sb, KERN_ERR, "external journal has "
- "bad superblock");
- brelse(bh);
- goto out_bdev;
+ ext4_msg(sb, KERN_ERR, "external journal has bad superblock");
+ errno = -EFSCORRUPTED;
+ goto out_bh;
}
if ((le32_to_cpu(es->s_feature_ro_compat) &
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
- es->s_checksum != ext4_superblock_csum(sb, es)) {
- ext4_msg(sb, KERN_ERR, "external journal has "
- "corrupt superblock");
- brelse(bh);
- goto out_bdev;
+ es->s_checksum != ext4_superblock_csum(es)) {
+ ext4_msg(sb, KERN_ERR, "external journal has corrupt superblock");
+ errno = -EFSCORRUPTED;
+ goto out_bh;
}
if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
ext4_msg(sb, KERN_ERR, "journal UUID does not match");
- brelse(bh);
- goto out_bdev;
+ errno = -EFSCORRUPTED;
+ goto out_bh;
}
- len = ext4_blocks_count(es);
- start = sb_block + 1;
- brelse(bh); /* we're done with the superblock */
+ *j_start = sb_block + 1;
+ *j_len = ext4_blocks_count(es);
+ brelse(bh);
+ return bdev_file;
+
+out_bh:
+ brelse(bh);
+out_bdev:
+ bdev_fput(bdev_file);
+ return ERR_PTR(errno);
+}
- journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
- start, len, blocksize);
- if (!journal) {
+static journal_t *ext4_open_dev_journal(struct super_block *sb,
+ dev_t j_dev)
+{
+ journal_t *journal;
+ ext4_fsblk_t j_start;
+ ext4_fsblk_t j_len;
+ struct file *bdev_file;
+ int errno = 0;
+
+ bdev_file = ext4_get_journal_blkdev(sb, j_dev, &j_start, &j_len);
+ if (IS_ERR(bdev_file))
+ return ERR_CAST(bdev_file);
+
+ journal = jbd2_journal_init_dev(file_bdev(bdev_file), sb->s_bdev, j_start,
+ j_len, sb->s_blocksize);
+ if (IS_ERR(journal)) {
ext4_msg(sb, KERN_ERR, "failed to create device journal");
+ errno = PTR_ERR(journal);
goto out_bdev;
}
- journal->j_private = sb;
- if (ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO, true)) {
- ext4_msg(sb, KERN_ERR, "I/O error on journal device");
- goto out_journal;
- }
if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
ext4_msg(sb, KERN_ERR, "External journal has more than one "
"user (unsupported) - %d",
be32_to_cpu(journal->j_superblock->s_nr_users));
+ errno = -EINVAL;
goto out_journal;
}
- EXT4_SB(sb)->s_journal_bdev = bdev;
+ journal->j_private = sb;
+ EXT4_SB(sb)->s_journal_bdev_file = bdev_file;
ext4_init_journal_params(sb, journal);
return journal;
out_journal:
- jbd2_journal_destroy(journal);
+ ext4_journal_destroy(EXT4_SB(sb), journal);
out_bdev:
- ext4_blkdev_put(bdev);
- return NULL;
+ bdev_fput(bdev_file);
+ return ERR_PTR(errno);
}
static int ext4_load_journal(struct super_block *sb,
@@ -5304,13 +6059,13 @@ static int ext4_load_journal(struct super_block *sb,
}
if (journal_inum) {
- journal = ext4_get_journal(sb, journal_inum);
- if (!journal)
- return -EINVAL;
+ journal = ext4_open_inode_journal(sb, journal_inum);
+ if (IS_ERR(journal))
+ return PTR_ERR(journal);
} else {
- journal = ext4_get_dev_journal(sb, journal_dev);
- if (!journal)
- return -EINVAL;
+ journal = ext4_open_dev_journal(sb, journal_dev);
+ if (IS_ERR(journal))
+ return PTR_ERR(journal);
}
journal_dev_ro = bdev_read_only(journal->j_dev);
@@ -5351,14 +6106,31 @@ static int ext4_load_journal(struct super_block *sb,
err = jbd2_journal_wipe(journal, !really_read_only);
if (!err) {
char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
+ __le16 orig_state;
+ bool changed = false;
+
if (save)
memcpy(save, ((char *) es) +
EXT4_S_ERR_START, EXT4_S_ERR_LEN);
err = jbd2_journal_load(journal);
- if (save)
+ if (save && memcmp(((char *) es) + EXT4_S_ERR_START,
+ save, EXT4_S_ERR_LEN)) {
memcpy(((char *) es) + EXT4_S_ERR_START,
save, EXT4_S_ERR_LEN);
+ changed = true;
+ }
kfree(save);
+ orig_state = es->s_state;
+ es->s_state |= cpu_to_le16(EXT4_SB(sb)->s_mount_state &
+ EXT4_ERROR_FS);
+ if (orig_state != es->s_state)
+ changed = true;
+ /* Write out restored error information to the superblock */
+ if (changed && !really_read_only) {
+ int err2;
+ err2 = ext4_commit_super(sb);
+ err = err ? : err2;
+ }
}
if (err) {
@@ -5369,23 +6141,25 @@ static int ext4_load_journal(struct super_block *sb,
EXT4_SB(sb)->s_journal = journal;
err = ext4_clear_journal_err(sb, es);
if (err) {
- EXT4_SB(sb)->s_journal = NULL;
- jbd2_journal_destroy(journal);
+ ext4_journal_destroy(EXT4_SB(sb), journal);
return err;
}
if (!really_read_only && journal_devnum &&
journal_devnum != le32_to_cpu(es->s_journal_dev)) {
es->s_journal_dev = cpu_to_le32(journal_devnum);
-
- /* Make sure we flush the recovery flag to disk. */
+ ext4_commit_super(sb);
+ }
+ if (!really_read_only && journal_inum &&
+ journal_inum != le32_to_cpu(es->s_journal_inum)) {
+ es->s_journal_inum = cpu_to_le32(journal_inum);
ext4_commit_super(sb);
}
return 0;
err_out:
- jbd2_journal_destroy(journal);
+ ext4_journal_destroy(EXT4_SB(sb), journal);
return err;
}
@@ -5407,7 +6181,7 @@ static void ext4_update_super(struct super_block *sb)
* the clock is set in the future, and this will cause e2fsck
* to complain and force a full file system check.
*/
- if (!(sb->s_flags & SB_RDONLY))
+ if (!sb_rdonly(sb))
ext4_update_tstamp(es, s_wtime);
es->s_kbytes_written =
cpu_to_le64(sbi->s_kbytes_written +
@@ -5429,8 +6203,8 @@ static void ext4_update_super(struct super_block *sb)
__ext4_update_tstamp(&es->s_first_error_time,
&es->s_first_error_time_hi,
sbi->s_first_error_time);
- strncpy(es->s_first_error_func, sbi->s_first_error_func,
- sizeof(es->s_first_error_func));
+ strtomem_pad(es->s_first_error_func,
+ sbi->s_first_error_func, 0);
es->s_first_error_line =
cpu_to_le32(sbi->s_first_error_line);
es->s_first_error_ino =
@@ -5443,8 +6217,7 @@ static void ext4_update_super(struct super_block *sb)
__ext4_update_tstamp(&es->s_last_error_time,
&es->s_last_error_time_hi,
sbi->s_last_error_time);
- strncpy(es->s_last_error_func, sbi->s_last_error_func,
- sizeof(es->s_last_error_func));
+ strtomem_pad(es->s_last_error_func, sbi->s_last_error_func, 0);
es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line);
es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino);
es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block);
@@ -5468,15 +6241,19 @@ static void ext4_update_super(struct super_block *sb)
static int ext4_commit_super(struct super_block *sb)
{
struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
- int error = 0;
if (!sbh)
return -EINVAL;
- if (block_device_ejected(sb))
- return -ENODEV;
ext4_update_super(sb);
+ lock_buffer(sbh);
+ /* Buffer got discarded which means block device got invalidated */
+ if (!buffer_mapped(sbh)) {
+ unlock_buffer(sbh);
+ return -EIO;
+ }
+
if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
/*
* Oh, dear. A previous attempt to write the
@@ -5491,17 +6268,21 @@ static int ext4_commit_super(struct super_block *sb)
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
}
- BUFFER_TRACE(sbh, "marking dirty");
- mark_buffer_dirty(sbh);
- error = __sync_dirty_buffer(sbh,
- REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
+ get_bh(sbh);
+ /* Clear potential dirty bit if it was journalled update */
+ clear_buffer_dirty(sbh);
+ sbh->b_end_io = end_buffer_write_sync;
+ submit_bh(REQ_OP_WRITE | REQ_SYNC |
+ (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh);
+ wait_on_buffer(sbh);
if (buffer_write_io_error(sbh)) {
ext4_msg(sb, KERN_ERR, "I/O error while writing "
"superblock");
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
+ return -EIO;
}
- return error;
+ return 0;
}
/*
@@ -5575,11 +6356,13 @@ static int ext4_clear_journal_err(struct super_block *sb,
errstr = ext4_decode_error(sb, j_errno, nbuf);
ext4_warning(sb, "Filesystem error recorded "
"from previous mount: %s", errstr);
- ext4_warning(sb, "Marking fs in need of filesystem check.");
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
- ext4_commit_super(sb);
+ j_errno = ext4_commit_super(sb);
+ if (j_errno)
+ return j_errno;
+ ext4_warning(sb, "Marked fs in need of filesystem check.");
jbd2_journal_clear_err(journal);
jbd2_journal_update_sb_errno(journal);
@@ -5593,13 +6376,7 @@ static int ext4_clear_journal_err(struct super_block *sb,
*/
int ext4_force_commit(struct super_block *sb)
{
- journal_t *journal;
-
- if (sb_rdonly(sb))
- return 0;
-
- journal = EXT4_SB(sb)->s_journal;
- return ext4_journal_force_commit(journal);
+ return ext4_journal_force_commit(EXT4_SB(sb)->s_journal);
}
static int ext4_sync_fs(struct super_block *sb, int wait)
@@ -5609,8 +6386,9 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
bool needs_barrier = false;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- if (unlikely(ext4_forced_shutdown(sbi)))
- return 0;
+ ret = ext4_emergency_state(sb);
+ if (unlikely(ret))
+ return ret;
trace_ext4_sync_fs(sb, wait);
flush_workqueue(sbi->rsv_conversion_wq);
@@ -5658,12 +6436,7 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
static int ext4_freeze(struct super_block *sb)
{
int error = 0;
- journal_t *journal;
-
- if (sb_rdonly(sb))
- return 0;
-
- journal = EXT4_SB(sb)->s_journal;
+ journal_t *journal = EXT4_SB(sb)->s_journal;
if (journal) {
/* Now we set up the journal barrier. */
@@ -5697,7 +6470,7 @@ out:
*/
static int ext4_unfreeze(struct super_block *sb)
{
- if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
+ if (ext4_emergency_state(sb))
return 0;
if (EXT4_SB(sb)->s_journal) {
@@ -5727,27 +6500,22 @@ struct ext4_mount_options {
#endif
};
-static int ext4_remount(struct super_block *sb, int *flags, char *data)
+static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
{
+ struct ext4_fs_context *ctx = fc->fs_private;
struct ext4_super_block *es;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- unsigned long old_sb_flags, vfs_flags;
+ unsigned long old_sb_flags;
struct ext4_mount_options old_opts;
ext4_group_t g;
int err = 0;
+ int alloc_ctx;
#ifdef CONFIG_QUOTA
int enable_quota = 0;
int i, j;
char *to_free[EXT4_MAXQUOTAS];
#endif
- char *orig_data = kstrdup(data, GFP_KERNEL);
- struct ext4_parsed_options parsed_opts;
-
- parsed_opts.journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
- parsed_opts.journal_devnum = 0;
- if (data && !orig_data)
- return -ENOMEM;
/* Store the original options */
old_sb_flags = sb->s_flags;
@@ -5768,29 +6536,40 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (!old_opts.s_qf_names[i]) {
for (j = 0; j < i; j++)
kfree(old_opts.s_qf_names[j]);
- kfree(orig_data);
return -ENOMEM;
}
} else
old_opts.s_qf_names[i] = NULL;
#endif
- if (sbi->s_journal && sbi->s_journal->j_task->io_context)
- parsed_opts.journal_ioprio =
- sbi->s_journal->j_task->io_context->ioprio;
+ if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) {
+ if (sbi->s_journal && sbi->s_journal->j_task->io_context)
+ ctx->journal_ioprio =
+ sbi->s_journal->j_task->io_context->ioprio;
+ else
+ ctx->journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
- /*
- * Some options can be enabled by ext4 and/or by VFS mount flag
- * either way we need to make sure it matches in both *flags and
- * s_flags. Copy those selected flags from *flags to s_flags
- */
- vfs_flags = SB_LAZYTIME | SB_I_VERSION;
- sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags);
+ }
- if (!parse_options(data, sb, &parsed_opts, 1)) {
- err = -EINVAL;
- goto restore_opts;
+ if ((ctx->spec & EXT4_SPEC_s_stripe) &&
+ ext4_is_stripe_incompatible(sb, ctx->s_stripe)) {
+ ext4_msg(sb, KERN_WARNING,
+ "stripe (%lu) is not aligned with cluster size (%u), "
+ "stripe is disabled",
+ ctx->s_stripe, sbi->s_cluster_ratio);
+ ctx->s_stripe = 0;
}
+ /*
+ * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause
+ * two calls to ext4_should_dioread_nolock() to return inconsistent
+ * values, triggering WARN_ON in ext4_add_complete_io(). we grab
+ * here s_writepages_rwsem to avoid race between writepages ops and
+ * remount.
+ */
+ alloc_ctx = ext4_writepages_down_write(sb);
+ ext4_apply_options(fc, sb);
+ ext4_writepages_up_write(sb, alloc_ctx);
+
if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
test_opt(sb, JOURNAL_CHECKSUM)) {
ext4_msg(sb, KERN_ERR, "changing journal_checksum "
@@ -5826,8 +6605,12 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
- if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
- ext4_abort(sb, ESHUTDOWN, "Abort forced by user");
+ if ((old_opts.s_mount_opt & EXT4_MOUNT_DELALLOC) &&
+ !test_opt(sb, DELALLOC)) {
+ ext4_msg(sb, KERN_ERR, "can't disable delalloc during remount");
+ err = -EINVAL;
+ goto restore_opts;
+ }
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
@@ -5836,19 +6619,19 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (sbi->s_journal) {
ext4_init_journal_params(sb, sbi->s_journal);
- set_task_ioprio(sbi->s_journal->j_task, parsed_opts.journal_ioprio);
+ set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio);
}
/* Flush outstanding errors before changing fs state */
- flush_work(&sbi->s_error_work);
+ flush_work(&sbi->s_sb_upd_work);
- if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
- if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) {
+ if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) {
+ if (ext4_emergency_state(sb)) {
err = -EROFS;
goto restore_opts;
}
- if (*flags & SB_RDONLY) {
+ if (fc->sb_flags & SB_RDONLY) {
err = sync_filesystem(sb);
if (err < 0)
goto restore_opts;
@@ -5928,19 +6711,20 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (err)
goto restore_opts;
}
- sbi->s_mount_state = le16_to_cpu(es->s_state);
+ sbi->s_mount_state = (le16_to_cpu(es->s_state) &
+ ~EXT4_FC_REPLAY);
err = ext4_setup_super(sb, es, 0);
if (err)
goto restore_opts;
sb->s_flags &= ~SB_RDONLY;
- if (ext4_has_feature_mmp(sb))
- if (ext4_multi_mount_protect(sb,
- le64_to_cpu(es->s_mmp_block))) {
- err = -EROFS;
+ if (ext4_has_feature_mmp(sb)) {
+ err = ext4_multi_mount_protect(sb,
+ le64_to_cpu(es->s_mmp_block));
+ if (err)
goto restore_opts;
- }
+ }
#ifdef CONFIG_QUOTA
enable_quota = 1;
#endif
@@ -5948,18 +6732,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
/*
- * Reinitialize lazy itable initialization thread based on
- * current settings
- */
- if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
- ext4_unregister_li_request(sb);
- else {
- ext4_group_t first_not_zeroed;
- first_not_zeroed = ext4_has_uninit_itable(sb);
- ext4_register_li_request(sb, first_not_zeroed);
- }
-
- /*
* Handle creation of system zone data early because it can fail.
* Releasing of existing data is done when we are sure remount will
* succeed.
@@ -5977,9 +6749,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
#ifdef CONFIG_QUOTA
- /* Release old quota file names */
- for (i = 0; i < EXT4_MAXQUOTAS; i++)
- kfree(old_opts.s_qf_names[i]);
if (enable_quota) {
if (sb_any_quota_suspended(sb))
dquot_resume(sb, -1);
@@ -5989,26 +6758,48 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
}
+ /* Release old quota file names */
+ for (i = 0; i < EXT4_MAXQUOTAS; i++)
+ kfree(old_opts.s_qf_names[i]);
#endif
if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
ext4_release_system_zone(sb);
+ /*
+ * Reinitialize lazy itable initialization thread based on
+ * current settings
+ */
+ if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
+ ext4_unregister_li_request(sb);
+ else {
+ ext4_group_t first_not_zeroed;
+ first_not_zeroed = ext4_has_uninit_itable(sb);
+ ext4_register_li_request(sb, first_not_zeroed);
+ }
+
if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
ext4_stop_mmpd(sbi);
/*
- * Some options can be enabled by ext4 and/or by VFS mount flag
- * either way we need to make sure it matches in both *flags and
- * s_flags. Copy those selected flags from s_flags to *flags
+ * Handle aborting the filesystem as the last thing during remount to
+ * avoid obsure errors during remount when some option changes fail to
+ * apply due to shutdown filesystem.
*/
- *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags);
+ if (test_opt2(sb, ABORT))
+ ext4_abort(sb, ESHUTDOWN, "Abort forced by user");
- ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s. Quota mode: %s.",
- orig_data, ext4_quota_mode(sb));
- kfree(orig_data);
return 0;
restore_opts:
+ /*
+ * If there was a failing r/w to ro transition, we may need to
+ * re-enable quota
+ */
+ if (sb_rdonly(sb) && !(old_sb_flags & SB_RDONLY) &&
+ sb_any_quota_suspended(sb))
+ dquot_resume(sb, -1);
+
+ alloc_ctx = ext4_writepages_down_write(sb);
sb->s_flags = old_sb_flags;
sbi->s_mount_opt = old_opts.s_mount_opt;
sbi->s_mount_opt2 = old_opts.s_mount_opt2;
@@ -6017,6 +6808,8 @@ restore_opts:
sbi->s_commit_interval = old_opts.s_commit_interval;
sbi->s_min_batch_time = old_opts.s_min_batch_time;
sbi->s_max_batch_time = old_opts.s_max_batch_time;
+ ext4_writepages_up_write(sb, alloc_ctx);
+
if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
ext4_release_system_zone(sb);
#ifdef CONFIG_QUOTA
@@ -6031,10 +6824,32 @@ restore_opts:
#endif
if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
ext4_stop_mmpd(sbi);
- kfree(orig_data);
return err;
}
+static int ext4_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb = fc->root->d_sb;
+ int ret;
+ bool old_ro = sb_rdonly(sb);
+
+ fc->s_fs_info = EXT4_SB(sb);
+
+ ret = ext4_check_opt_consistency(fc, sb);
+ if (ret < 0)
+ return ret;
+
+ ret = __ext4_remount(fc, sb);
+ if (ret < 0)
+ return ret;
+
+ ext4_msg(sb, KERN_INFO, "re-mounted %pU%s.",
+ &sb->s_uuid,
+ (old_ro != sb_rdonly(sb)) ? (sb_rdonly(sb) ? " ro" : " r/w") : "");
+
+ return 0;
+}
+
#ifdef CONFIG_QUOTA
static int ext4_statfs_project(struct super_block *sb,
kprojid_t projid, struct kstatfs *buf)
@@ -6054,22 +6869,29 @@ static int ext4_statfs_project(struct super_block *sb,
dquot->dq_dqb.dqb_bhardlimit);
limit >>= sb->s_blocksize_bits;
- if (limit && buf->f_blocks > limit) {
+ if (limit) {
+ uint64_t remaining = 0;
+
curblock = (dquot->dq_dqb.dqb_curspace +
dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
- buf->f_blocks = limit;
- buf->f_bfree = buf->f_bavail =
- (buf->f_blocks > curblock) ?
- (buf->f_blocks - curblock) : 0;
+ if (limit > curblock)
+ remaining = limit - curblock;
+
+ buf->f_blocks = min(buf->f_blocks, limit);
+ buf->f_bfree = min(buf->f_bfree, remaining);
+ buf->f_bavail = min(buf->f_bavail, remaining);
}
limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
dquot->dq_dqb.dqb_ihardlimit);
- if (limit && buf->f_files > limit) {
- buf->f_files = limit;
- buf->f_ffree =
- (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
- (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
+ if (limit) {
+ uint64_t remaining = 0;
+
+ if (limit > dquot->dq_dqb.dqb_curinodes)
+ remaining = limit - dquot->dq_dqb.dqb_curinodes;
+
+ buf->f_files = min(buf->f_files, limit);
+ buf->f_ffree = min(buf->f_ffree, remaining);
}
spin_unlock(&dquot->dq_dqb_lock);
@@ -6138,6 +6960,10 @@ static int ext4_write_dquot(struct dquot *dquot)
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_commit(dquot);
+ if (ret < 0)
+ ext4_error_err(dquot->dq_sb, -ret,
+ "Failed to commit dquot type %d",
+ dquot->dq_id.type);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
@@ -6154,6 +6980,10 @@ static int ext4_acquire_dquot(struct dquot *dquot)
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_acquire(dquot);
+ if (ret < 0)
+ ext4_error_err(dquot->dq_sb, -ret,
+ "Failed to acquire dquot type %d",
+ dquot->dq_id.type);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
@@ -6164,18 +6994,39 @@ static int ext4_release_dquot(struct dquot *dquot)
{
int ret, err;
handle_t *handle;
+ bool freeze_protected = false;
+
+ /*
+ * Trying to sb_start_intwrite() in a running transaction
+ * can result in a deadlock. Further, running transactions
+ * are already protected from freezing.
+ */
+ if (!ext4_journal_current_handle()) {
+ sb_start_intwrite(dquot->dq_sb);
+ freeze_protected = true;
+ }
handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
if (IS_ERR(handle)) {
/* Release dquot anyway to avoid endless cycle in dqput() */
dquot_release(dquot);
+ if (freeze_protected)
+ sb_end_intwrite(dquot->dq_sb);
return PTR_ERR(handle);
}
ret = dquot_release(dquot);
+ if (ret < 0)
+ ext4_error_err(dquot->dq_sb, -ret,
+ "Failed to release dquot type %d",
+ dquot->dq_id.type);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
+
+ if (freeze_protected)
+ sb_end_intwrite(dquot->dq_sb);
+
return ret;
}
@@ -6197,7 +7048,7 @@ static int ext4_write_info(struct super_block *sb, int type)
handle_t *handle;
/* Data block + inode block */
- handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
+ handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2);
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_commit_info(sb, type);
@@ -6256,29 +7107,9 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
}
- /*
- * When we journal data on quota file, we have to flush journal to see
- * all updates to the file when we bypass pagecache...
- */
- if (EXT4_SB(sb)->s_journal &&
- ext4_should_journal_data(d_inode(path->dentry))) {
- /*
- * We don't need to lock updates but journal_flush() could
- * otherwise be livelocked...
- */
- jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
- err = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
- jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
- if (err)
- return err;
- }
-
lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
err = dquot_quota_on(sb, type, format_id, path);
- if (err) {
- lockdep_set_quota_inode(path->dentry->d_inode,
- I_DATA_SEM_NORMAL);
- } else {
+ if (!err) {
struct inode *inode = d_inode(path->dentry);
handle_t *handle;
@@ -6298,10 +7129,29 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
ext4_journal_stop(handle);
unlock_inode:
inode_unlock(inode);
+ if (err)
+ dquot_quota_off(sb, type);
}
+ if (err)
+ lockdep_set_quota_inode(path->dentry->d_inode,
+ I_DATA_SEM_NORMAL);
return err;
}
+static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum)
+{
+ switch (type) {
+ case USRQUOTA:
+ return qf_inum == EXT4_USR_QUOTA_INO;
+ case GRPQUOTA:
+ return qf_inum == EXT4_GRP_QUOTA_INO;
+ case PRJQUOTA:
+ return qf_inum >= EXT4_GOOD_OLD_FIRST_INO;
+ default:
+ BUG();
+ }
+}
+
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
unsigned int flags)
{
@@ -6318,9 +7168,16 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
if (!qf_inums[type])
return -EPERM;
+ if (!ext4_check_quota_inum(type, qf_inums[type])) {
+ ext4_error(sb, "Bad quota inum: %lu, type: %d",
+ qf_inums[type], type);
+ return -EUCLEAN;
+ }
+
qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
if (IS_ERR(qf_inode)) {
- ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
+ ext4_error(sb, "Bad quota inode: %lu, type: %d",
+ qf_inums[type], type);
return PTR_ERR(qf_inode);
}
@@ -6359,11 +7216,11 @@ int ext4_enable_quotas(struct super_block *sb)
if (err) {
ext4_warning(sb,
"Failed to enable quota tracking "
- "(type=%d, err=%d). Please run "
- "e2fsck to fix.", type, err);
- for (type--; type >= 0; type--)
- dquot_quota_off(sb, type);
+ "(type=%d, err=%d, ino=%lu). "
+ "Please run e2fsck to fix.", type,
+ err, qf_inums[type]);
+ ext4_quotas_off(sb, type);
return err;
}
}
@@ -6388,6 +7245,13 @@ static int ext4_quota_off(struct super_block *sb, int type)
err = dquot_quota_off(sb, type);
if (err || ext4_has_feature_quota(sb))
goto out_put;
+ /*
+ * When the filesystem was remounted read-only first, we cannot cleanup
+ * inode flags here. Bad luck but people should be using QUOTA feature
+ * these days anyway.
+ */
+ if (sb_rdonly(sb))
+ goto out_put;
inode_lock(inode);
/*
@@ -6402,7 +7266,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
}
EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
err = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
out_unlock:
@@ -6436,8 +7300,7 @@ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
len = i_size-off;
toread = len;
while (toread > 0) {
- tocopy = sb->s_blocksize - offset < toread ?
- sb->s_blocksize - offset : toread;
+ tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
bh = ext4_bread(NULL, inode, blk, 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
@@ -6466,7 +7329,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
struct buffer_head *bh;
handle_t *handle = journal_current_handle();
- if (EXT4_SB(sb)->s_journal && !handle) {
+ if (!handle) {
ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
" cancelled because transaction is not started",
(unsigned long long)off, (unsigned long long)len);
@@ -6501,7 +7364,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, len);
- flush_dcache_page(bh->b_page);
+ flush_dcache_folio(bh->b_folio);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
brelse(bh);
@@ -6517,12 +7380,6 @@ out:
}
#endif
-static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
-}
-
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
static inline void register_as_ext2(void)
{
@@ -6579,21 +7436,31 @@ static inline int ext3_feature_set_ok(struct super_block *sb)
return 1;
}
+static void ext4_kill_sb(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct file *bdev_file = sbi ? sbi->s_journal_bdev_file : NULL;
+
+ kill_block_super(sb);
+
+ if (bdev_file)
+ bdev_fput(bdev_file);
+}
+
static struct file_system_type ext4_fs_type = {
- .owner = THIS_MODULE,
- .name = "ext4",
- .mount = ext4_mount,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+ .owner = THIS_MODULE,
+ .name = "ext4",
+ .init_fs_context = ext4_init_fs_context,
+ .parameters = ext4_param_specs,
+ .kill_sb = ext4_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME |
+ FS_LBS,
};
MODULE_ALIAS_FS("ext4");
-/* Shared across all ext4 file systems */
-wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
-
static int __init ext4_init_fs(void)
{
- int i, err;
+ int err;
ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
ext4_li_info = NULL;
@@ -6601,9 +7468,6 @@ static int __init ext4_init_fs(void)
/* Build-time check for flags consistency */
ext4_check_flag_values();
- for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
- init_waitqueue_head(&ext4__ioend_wq[i]);
-
err = ext4_init_es();
if (err)
return err;
@@ -6649,6 +7513,7 @@ static int __init ext4_init_fs(void)
out:
unregister_as_ext2();
unregister_as_ext3();
+ ext4_fc_destroy_dentry_cache();
out05:
destroy_inodecache();
out1:
@@ -6675,6 +7540,7 @@ static void __exit ext4_exit_fs(void)
unregister_as_ext2();
unregister_as_ext3();
unregister_filesystem(&ext4_fs_type);
+ ext4_fc_destroy_dentry_cache();
destroy_inodecache();
ext4_exit_mballoc();
ext4_exit_sysfs();
@@ -6688,6 +7554,5 @@ static void __exit ext4_exit_fs(void)
MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
MODULE_DESCRIPTION("Fourth Extended Filesystem");
MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: crc32c");
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 69109746e6e2..645240cc0229 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -27,7 +27,7 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- struct page *cpage = NULL;
+ struct buffer_head *bh = NULL;
const void *caddr;
unsigned int max_size;
const char *paddr;
@@ -39,29 +39,81 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
caddr = EXT4_I(inode)->i_data;
max_size = sizeof(EXT4_I(inode)->i_data);
} else {
- cpage = read_mapping_page(inode->i_mapping, 0, NULL);
- if (IS_ERR(cpage))
- return ERR_CAST(cpage);
- caddr = page_address(cpage);
+ bh = ext4_bread(NULL, inode, 0, 0);
+ if (IS_ERR(bh))
+ return ERR_CAST(bh);
+ if (!bh) {
+ EXT4_ERROR_INODE(inode, "bad symlink.");
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+ caddr = bh->b_data;
max_size = inode->i_sb->s_blocksize;
}
paddr = fscrypt_get_symlink(inode, caddr, max_size, done);
- if (cpage)
- put_page(cpage);
+ brelse(bh);
return paddr;
}
-static int ext4_encrypted_symlink_getattr(struct user_namespace *mnt_userns,
+static int ext4_encrypted_symlink_getattr(struct mnt_idmap *idmap,
const struct path *path,
struct kstat *stat, u32 request_mask,
unsigned int query_flags)
{
- ext4_getattr(mnt_userns, path, stat, request_mask, query_flags);
+ ext4_getattr(idmap, path, stat, request_mask, query_flags);
return fscrypt_symlink_getattr(path, stat);
}
+static void ext4_free_link(void *bh)
+{
+ brelse(bh);
+}
+
+static const char *ext4_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ struct buffer_head *bh;
+ char *inline_link;
+
+ /*
+ * Create a new inlined symlink is not supported, just provide a
+ * method to read the leftovers.
+ */
+ if (ext4_has_inline_data(inode)) {
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+
+ inline_link = ext4_read_inline_link(inode);
+ if (!IS_ERR(inline_link))
+ set_delayed_call(callback, kfree_link, inline_link);
+ return inline_link;
+ }
+
+ if (!dentry) {
+ bh = ext4_getblk(NULL, inode, 0, EXT4_GET_BLOCKS_CACHED_NOWAIT);
+ if (IS_ERR(bh) || !bh)
+ return ERR_PTR(-ECHILD);
+ if (!ext4_buffer_uptodate(bh)) {
+ brelse(bh);
+ return ERR_PTR(-ECHILD);
+ }
+ } else {
+ bh = ext4_bread(NULL, inode, 0, 0);
+ if (IS_ERR(bh))
+ return ERR_CAST(bh);
+ if (!bh) {
+ EXT4_ERROR_INODE(inode, "bad symlink.");
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+ }
+
+ set_delayed_call(callback, ext4_free_link, bh);
+ nd_terminate_link(bh->b_data, inode->i_size,
+ inode->i_sb->s_blocksize - 1);
+ return bh->b_data;
+}
+
const struct inode_operations ext4_encrypted_symlink_inode_operations = {
.get_link = ext4_encrypted_get_link,
.setattr = ext4_setattr,
@@ -70,7 +122,7 @@ const struct inode_operations ext4_encrypted_symlink_inode_operations = {
};
const struct inode_operations ext4_symlink_inode_operations = {
- .get_link = page_get_link,
+ .get_link = ext4_get_link,
.setattr = ext4_setattr,
.getattr = ext4_getattr,
.listxattr = ext4_listxattr,
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 2314f7446592..0018e09b867e 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -29,7 +29,10 @@ typedef enum {
attr_trigger_test_error,
attr_first_error_time,
attr_last_error_time,
+ attr_clusters_in_group,
+ attr_mb_order,
attr_feature,
+ attr_pointer_pi,
attr_pointer_ui,
attr_pointer_ul,
attr_pointer_u64,
@@ -63,7 +66,7 @@ static ssize_t session_write_kbytes_show(struct ext4_sb_info *sbi, char *buf)
{
struct super_block *sb = sbi->s_buddy_cache->i_sb;
- return snprintf(buf, PAGE_SIZE, "%lu\n",
+ return sysfs_emit(buf, "%lu\n",
(part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
sbi->s_sectors_written_start) >> 1);
}
@@ -72,7 +75,7 @@ static ssize_t lifetime_write_kbytes_show(struct ext4_sb_info *sbi, char *buf)
{
struct super_block *sb = sbi->s_buddy_cache->i_sb;
- return snprintf(buf, PAGE_SIZE, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
(unsigned long long)(sbi->s_kbytes_written +
((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
EXT4_SB(sb)->s_sectors_written_start) >> 1)));
@@ -104,7 +107,7 @@ static ssize_t reserved_clusters_store(struct ext4_sb_info *sbi,
int ret;
ret = kstrtoull(skip_spaces(buf), 0, &val);
- if (ret || val >= clusters)
+ if (ret || val >= clusters || (s64)val < 0)
return -EINVAL;
atomic64_set(&sbi->s_resv_clusters, val);
@@ -130,8 +133,8 @@ static ssize_t trigger_test_error(struct ext4_sb_info *sbi,
static ssize_t journal_task_show(struct ext4_sb_info *sbi, char *buf)
{
if (!sbi->s_journal)
- return snprintf(buf, PAGE_SIZE, "<none>\n");
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "<none>\n");
+ return sysfs_emit(buf, "%d\n",
task_pid_vnr(sbi->s_journal->j_task));
}
@@ -178,6 +181,9 @@ static struct ext4_attr ext4_attr_##_name = { \
#define EXT4_RO_ATTR_ES_STRING(_name,_elname,_size) \
EXT4_ATTR_STRING(_name, 0444, _size, ext4_super_block, _elname)
+#define EXT4_RW_ATTR_SBI_PI(_name,_elname) \
+ EXT4_ATTR_OFFSET(_name, 0644, pointer_pi, ext4_sb_info, _elname)
+
#define EXT4_RW_ATTR_SBI_UI(_name,_elname) \
EXT4_ATTR_OFFSET(_name, 0644, pointer_ui, ext4_sb_info, _elname)
@@ -207,23 +213,25 @@ EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);
EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
ext4_sb_info, s_inode_readahead_blks);
+EXT4_ATTR_OFFSET(mb_group_prealloc, 0644, clusters_in_group,
+ ext4_sb_info, s_mb_group_prealloc);
+EXT4_ATTR_OFFSET(mb_best_avail_max_trim_order, 0644, mb_order,
+ ext4_sb_info, s_mb_best_avail_max_trim_order);
EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
-EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
-EXT4_RW_ATTR_SBI_UI(mb_max_inode_prealloc, s_mb_max_inode_prealloc);
EXT4_RW_ATTR_SBI_UI(mb_max_linear_groups, s_mb_max_linear_groups);
EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
-EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
-EXT4_RW_ATTR_SBI_UI(err_ratelimit_burst, s_err_ratelimit_state.burst);
-EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.interval);
-EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
-EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
-EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_PI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_PI(err_ratelimit_burst, s_err_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_PI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_PI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_PI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_PI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
#ifdef CONFIG_EXT4_DEBUG
EXT4_RW_ATTR_SBI_UL(simulate_fail, s_simulate_fail);
#endif
@@ -245,6 +253,9 @@ EXT4_ATTR(last_error_time, 0444, last_error_time);
EXT4_ATTR(journal_task, 0444, journal_task);
EXT4_RW_ATTR_SBI_UI(mb_prefetch, s_mb_prefetch);
EXT4_RW_ATTR_SBI_UI(mb_prefetch_limit, s_mb_prefetch_limit);
+EXT4_RW_ATTR_SBI_UL(last_trim_minblks, s_last_trim_minblks);
+EXT4_RW_ATTR_SBI_UI(sb_update_sec, s_sb_update_sec);
+EXT4_RW_ATTR_SBI_UI(sb_update_kb, s_sb_update_kb);
static unsigned int old_bump_val = 128;
EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val);
@@ -263,7 +274,6 @@ static struct attribute *ext4_attrs[] = {
ATTR_LIST(mb_order2_req),
ATTR_LIST(mb_stream_req),
ATTR_LIST(mb_group_prealloc),
- ATTR_LIST(mb_max_inode_prealloc),
ATTR_LIST(mb_max_linear_groups),
ATTR_LIST(max_writeback_mb_bump),
ATTR_LIST(extent_max_zeroout_kb),
@@ -274,6 +284,7 @@ static struct attribute *ext4_attrs[] = {
ATTR_LIST(warning_ratelimit_burst),
ATTR_LIST(msg_ratelimit_interval_ms),
ATTR_LIST(msg_ratelimit_burst),
+ ATTR_LIST(mb_best_avail_max_trim_order),
ATTR_LIST(errors_count),
ATTR_LIST(warning_count),
ATTR_LIST(msg_count),
@@ -295,6 +306,9 @@ static struct attribute *ext4_attrs[] = {
#endif
ATTR_LIST(mb_prefetch),
ATTR_LIST(mb_prefetch_limit),
+ ATTR_LIST(last_trim_minblks),
+ ATTR_LIST(sb_update_sec),
+ ATTR_LIST(sb_update_kb),
NULL,
};
ATTRIBUTE_GROUPS(ext4);
@@ -307,7 +321,7 @@ EXT4_ATTR_FEATURE(meta_bg_resize);
EXT4_ATTR_FEATURE(encryption);
EXT4_ATTR_FEATURE(test_dummy_encryption_v2);
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
EXT4_ATTR_FEATURE(casefold);
#endif
#ifdef CONFIG_FS_VERITY
@@ -315,9 +329,12 @@ EXT4_ATTR_FEATURE(verity);
#endif
EXT4_ATTR_FEATURE(metadata_csum_seed);
EXT4_ATTR_FEATURE(fast_commit);
-#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
+#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
EXT4_ATTR_FEATURE(encrypted_casefold);
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+EXT4_ATTR_FEATURE(blocksize_gt_pagesize);
+#endif
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
@@ -327,7 +344,7 @@ static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(encryption),
ATTR_LIST(test_dummy_encryption_v2),
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
ATTR_LIST(casefold),
#endif
#ifdef CONFIG_FS_VERITY
@@ -335,9 +352,12 @@ static struct attribute *ext4_feat_attrs[] = {
#endif
ATTR_LIST(metadata_csum_seed),
ATTR_LIST(fast_commit),
-#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
+#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
ATTR_LIST(encrypted_casefold),
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ ATTR_LIST(blocksize_gt_pagesize),
+#endif
NULL,
};
ATTRIBUTE_GROUPS(ext4_feat);
@@ -357,24 +377,56 @@ static void *calc_ptr(struct ext4_attr *a, struct ext4_sb_info *sbi)
static ssize_t __print_tstamp(char *buf, __le32 lo, __u8 hi)
{
- return snprintf(buf, PAGE_SIZE, "%lld\n",
+ return sysfs_emit(buf, "%lld\n",
((time64_t)hi << 32) + le32_to_cpu(lo));
}
#define print_tstamp(buf, es, tstamp) \
__print_tstamp(buf, (es)->tstamp, (es)->tstamp ## _hi)
+static ssize_t ext4_generic_attr_show(struct ext4_attr *a,
+ struct ext4_sb_info *sbi, char *buf)
+{
+ void *ptr = calc_ptr(a, sbi);
+
+ if (!ptr)
+ return 0;
+
+ switch (a->attr_id) {
+ case attr_inode_readahead:
+ case attr_clusters_in_group:
+ case attr_mb_order:
+ case attr_pointer_pi:
+ case attr_pointer_ui:
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ return sysfs_emit(buf, "%u\n", le32_to_cpup(ptr));
+ return sysfs_emit(buf, "%u\n", *((unsigned int *) ptr));
+ case attr_pointer_ul:
+ return sysfs_emit(buf, "%lu\n", *((unsigned long *) ptr));
+ case attr_pointer_u8:
+ return sysfs_emit(buf, "%u\n", *((unsigned char *) ptr));
+ case attr_pointer_u64:
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ return sysfs_emit(buf, "%llu\n", le64_to_cpup(ptr));
+ return sysfs_emit(buf, "%llu\n", *((unsigned long long *) ptr));
+ case attr_pointer_string:
+ return sysfs_emit(buf, "%.*s\n", a->attr_size, (char *) ptr);
+ case attr_pointer_atomic:
+ return sysfs_emit(buf, "%d\n", atomic_read((atomic_t *) ptr));
+ }
+ return 0;
+}
+
static ssize_t ext4_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
s_kobj);
struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
- void *ptr = calc_ptr(a, sbi);
switch (a->attr_id) {
case attr_delayed_allocation_blocks:
- return snprintf(buf, PAGE_SIZE, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
(s64) EXT4_C2B(sbi,
percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
case attr_session_write_kbytes:
@@ -382,83 +434,49 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
case attr_lifetime_write_kbytes:
return lifetime_write_kbytes_show(sbi, buf);
case attr_reserved_clusters:
- return snprintf(buf, PAGE_SIZE, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
(unsigned long long)
atomic64_read(&sbi->s_resv_clusters));
case attr_sra_exceeded_retry_limit:
- return snprintf(buf, PAGE_SIZE, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
(unsigned long long)
percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
- case attr_inode_readahead:
- case attr_pointer_ui:
- if (!ptr)
- return 0;
- if (a->attr_ptr == ptr_ext4_super_block_offset)
- return snprintf(buf, PAGE_SIZE, "%u\n",
- le32_to_cpup(ptr));
- else
- return snprintf(buf, PAGE_SIZE, "%u\n",
- *((unsigned int *) ptr));
- case attr_pointer_ul:
- if (!ptr)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%lu\n",
- *((unsigned long *) ptr));
- case attr_pointer_u8:
- if (!ptr)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%u\n",
- *((unsigned char *) ptr));
- case attr_pointer_u64:
- if (!ptr)
- return 0;
- if (a->attr_ptr == ptr_ext4_super_block_offset)
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- le64_to_cpup(ptr));
- else
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- *((unsigned long long *) ptr));
- case attr_pointer_string:
- if (!ptr)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%.*s\n", a->attr_size,
- (char *) ptr);
- case attr_pointer_atomic:
- if (!ptr)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- atomic_read((atomic_t *) ptr));
case attr_feature:
- return snprintf(buf, PAGE_SIZE, "supported\n");
+ return sysfs_emit(buf, "supported\n");
case attr_first_error_time:
return print_tstamp(buf, sbi->s_es, s_first_error_time);
case attr_last_error_time:
return print_tstamp(buf, sbi->s_es, s_last_error_time);
case attr_journal_task:
return journal_task_show(sbi, buf);
+ default:
+ return ext4_generic_attr_show(a, sbi, buf);
}
-
- return 0;
}
-static ssize_t ext4_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf, size_t len)
+static ssize_t ext4_generic_attr_store(struct ext4_attr *a,
+ struct ext4_sb_info *sbi,
+ const char *buf, size_t len)
{
- struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
- s_kobj);
- struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
- void *ptr = calc_ptr(a, sbi);
- unsigned long t;
int ret;
+ unsigned int t;
+ unsigned long lt;
+ void *ptr = calc_ptr(a, sbi);
+
+ if (!ptr)
+ return 0;
switch (a->attr_id) {
- case attr_reserved_clusters:
- return reserved_clusters_store(sbi, buf, len);
+ case attr_pointer_pi:
+ ret = kstrtouint(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+ if ((int)t < 0)
+ return -EINVAL;
+ *((unsigned int *) ptr) = t;
+ return len;
case attr_pointer_ui:
- if (!ptr)
- return 0;
- ret = kstrtoul(skip_spaces(buf), 0, &t);
+ ret = kstrtouint(skip_spaces(buf), 0, &t);
if (ret)
return ret;
if (a->attr_ptr == ptr_ext4_super_block_offset)
@@ -466,20 +484,50 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
else
*((unsigned int *) ptr) = t;
return len;
+ case attr_mb_order:
+ ret = kstrtouint(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+ if (t > 64)
+ return -EINVAL;
+ *((unsigned int *) ptr) = t;
+ return len;
+ case attr_clusters_in_group:
+ ret = kstrtouint(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+ if (t > sbi->s_clusters_per_group)
+ return -EINVAL;
+ *((unsigned int *) ptr) = t;
+ return len;
case attr_pointer_ul:
- if (!ptr)
- return 0;
- ret = kstrtoul(skip_spaces(buf), 0, &t);
+ ret = kstrtoul(skip_spaces(buf), 0, &lt);
if (ret)
return ret;
- *((unsigned long *) ptr) = t;
+ *((unsigned long *) ptr) = lt;
return len;
+ }
+ return 0;
+}
+
+static ssize_t ext4_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
+ s_kobj);
+ struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
+
+ switch (a->attr_id) {
+ case attr_reserved_clusters:
+ return reserved_clusters_store(sbi, buf, len);
case attr_inode_readahead:
return inode_readahead_blks_store(sbi, buf, len);
case attr_trigger_test_error:
return trigger_test_error(sbi, buf, len);
+ default:
+ return ext4_generic_attr_store(a, sbi, buf, len);
}
- return 0;
}
static void ext4_sb_release(struct kobject *kobj)
@@ -489,21 +537,26 @@ static void ext4_sb_release(struct kobject *kobj)
complete(&sbi->s_kobj_unregister);
}
+static void ext4_feat_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
static const struct sysfs_ops ext4_attr_ops = {
.show = ext4_attr_show,
.store = ext4_attr_store,
};
-static struct kobj_type ext4_sb_ktype = {
+static const struct kobj_type ext4_sb_ktype = {
.default_groups = ext4_groups,
.sysfs_ops = &ext4_attr_ops,
.release = ext4_sb_release,
};
-static struct kobj_type ext4_feat_ktype = {
+static const struct kobj_type ext4_feat_ktype = {
.default_groups = ext4_feat_groups,
.sysfs_ops = &ext4_attr_ops,
- .release = (void (*)(struct kobject *))kfree,
+ .release = ext4_feat_release,
};
void ext4_notify_error_sysfs(struct ext4_sb_info *sbi)
diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
index eacbd489e3bf..415d9c4d8a32 100644
--- a/fs/ext4/verity.c
+++ b/fs/ext4/verity.c
@@ -42,18 +42,16 @@ static int pagecache_read(struct inode *inode, void *buf, size_t count,
loff_t pos)
{
while (count) {
- size_t n = min_t(size_t, count,
- PAGE_SIZE - offset_in_page(pos));
- struct page *page;
+ struct folio *folio;
+ size_t n;
- page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT,
+ folio = read_mapping_folio(inode->i_mapping, pos >> PAGE_SHIFT,
NULL);
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- memcpy_from_page(buf, page, offset_in_page(pos), n);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- put_page(page);
+ n = memcpy_from_file_folio(buf, folio, pos, count);
+ folio_put(folio);
buf += n;
pos += n;
@@ -69,25 +67,26 @@ static int pagecache_read(struct inode *inode, void *buf, size_t count,
static int pagecache_write(struct inode *inode, const void *buf, size_t count,
loff_t pos)
{
+ struct address_space *mapping = inode->i_mapping;
+ const struct address_space_operations *aops = mapping->a_ops;
+
if (pos + count > inode->i_sb->s_maxbytes)
return -EFBIG;
while (count) {
size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos));
- struct page *page;
- void *fsdata;
+ struct folio *folio;
+ void *fsdata = NULL;
int res;
- res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
- &page, &fsdata);
+ res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata);
if (res)
return res;
- memcpy_to_page(page, offset_in_page(pos), buf, n);
+ memcpy_to_folio(folio, offset_in_folio(folio, pos), buf, n);
- res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
- page, fsdata);
+ res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata);
if (res < 0)
return res;
if (res != n)
@@ -297,16 +296,14 @@ static int ext4_get_verity_descriptor_location(struct inode *inode,
last_extent = path[path->p_depth].p_ext;
if (!last_extent) {
EXT4_ERROR_INODE(inode, "verity file has no extents");
- ext4_ext_drop_refs(path);
- kfree(path);
+ ext4_free_ext_path(path);
return -EFSCORRUPTED;
}
end_lblk = le32_to_cpu(last_extent->ee_block) +
ext4_ext_get_actual_len(last_extent);
- desc_size_pos = (u64)end_lblk << inode->i_blkbits;
- ext4_ext_drop_refs(path);
- kfree(path);
+ desc_size_pos = EXT4_LBLK_TO_B(inode, end_lblk);
+ ext4_free_ext_path(path);
if (desc_size_pos < sizeof(desc_size_disk))
goto bad;
@@ -364,31 +361,36 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
{
- DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
- struct page *page;
+ struct folio *folio;
index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT;
- page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
- if (!page || !PageUptodate(page)) {
- if (page)
- put_page(page);
+ folio = __filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
+ if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
+ DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
+
+ if (!IS_ERR(folio))
+ folio_put(folio);
else if (num_ra_pages > 1)
page_cache_ra_unbounded(&ractl, num_ra_pages, 0);
- page = read_mapping_page(inode->i_mapping, index, NULL);
+ folio = read_mapping_folio(inode->i_mapping, index, NULL);
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
}
- return page;
+ return folio_file_page(folio, index);
}
static int ext4_write_merkle_tree_block(struct inode *inode, const void *buf,
- u64 index, int log_blocksize)
+ u64 pos, unsigned int size)
{
- loff_t pos = ext4_verity_metadata_pos(inode) + (index << log_blocksize);
+ pos += ext4_verity_metadata_pos(inode);
- return pagecache_write(inode, buf, 1 << log_blocksize, pos);
+ return pagecache_write(inode, buf, size, pos);
}
const struct fsverity_operations ext4_verityops = {
+ .inode_info_offs = (int)offsetof(struct ext4_inode_info, i_verity_info) -
+ (int)offsetof(struct ext4_inode_info, vfs_inode),
.begin_enable_verity = ext4_begin_enable_verity,
.end_enable_verity = ext4_end_enable_verity,
.get_verity_descriptor = ext4_get_verity_descriptor,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 1e0fc1ed845b..2e02efbddaac 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -81,13 +81,15 @@ ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *,
struct mb_cache_entry **);
static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
size_t value_count);
+static __le32 ext4_xattr_hash_entry_signed(char *name, size_t name_len, __le32 *value,
+ size_t value_count);
static void ext4_xattr_rehash(struct ext4_xattr_header *);
static const struct xattr_handler * const ext4_xattr_handler_map[] = {
[EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
#ifdef CONFIG_EXT4_FS_POSIX_ACL
- [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
- [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
+ [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
+ [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &nop_posix_acl_default,
#endif
[EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler,
#ifdef CONFIG_EXT4_FS_SECURITY
@@ -96,13 +98,9 @@ static const struct xattr_handler * const ext4_xattr_handler_map[] = {
[EXT4_XATTR_INDEX_HURD] = &ext4_xattr_hurd_handler,
};
-const struct xattr_handler *ext4_xattr_handlers[] = {
+const struct xattr_handler * const ext4_xattr_handlers[] = {
&ext4_xattr_user_handler,
&ext4_xattr_trusted_handler,
-#ifdef CONFIG_EXT4_FS_POSIX_ACL
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
-#endif
#ifdef CONFIG_EXT4_FS_SECURITY
&ext4_xattr_security_handler,
#endif
@@ -123,7 +121,11 @@ ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
#ifdef CONFIG_LOCKDEP
void ext4_xattr_inode_set_class(struct inode *ea_inode)
{
+ struct ext4_inode_info *ei = EXT4_I(ea_inode);
+
lockdep_set_subclass(&ea_inode->i_rwsem, 1);
+ (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
+ lockdep_set_subclass(&ei->i_data_sem, I_DATA_SEM_EA);
}
#endif
@@ -137,12 +139,12 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
__u32 dummy_csum = 0;
int offset = offsetof(struct ext4_xattr_header, h_checksum);
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
+ csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
sizeof(dsk_block_nr));
- csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
- csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
+ csum = ext4_chksum(csum, (__u8 *)hdr, offset);
+ csum = ext4_chksum(csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
offset += sizeof(dummy_csum);
- csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
+ csum = ext4_chksum(csum, (__u8 *)hdr + offset,
EXT4_BLOCK_SIZE(inode->i_sb) - offset);
return cpu_to_le32(csum);
@@ -154,7 +156,7 @@ static int ext4_xattr_block_csum_verify(struct inode *inode,
struct ext4_xattr_header *hdr = BHDR(bh);
int ret = 1;
- if (ext4_has_metadata_csum(inode->i_sb)) {
+ if (ext4_has_feature_metadata_csum(inode->i_sb)) {
lock_buffer(bh);
ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
bh->b_blocknr, hdr));
@@ -166,43 +168,97 @@ static int ext4_xattr_block_csum_verify(struct inode *inode,
static void ext4_xattr_block_csum_set(struct inode *inode,
struct buffer_head *bh)
{
- if (ext4_has_metadata_csum(inode->i_sb))
+ if (ext4_has_feature_metadata_csum(inode->i_sb))
BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
bh->b_blocknr, BHDR(bh));
}
-static inline const struct xattr_handler *
-ext4_xattr_handler(int name_index)
+static inline const char *ext4_xattr_prefix(int name_index,
+ struct dentry *dentry)
{
const struct xattr_handler *handler = NULL;
if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
handler = ext4_xattr_handler_map[name_index];
- return handler;
+
+ if (!xattr_handler_can_list(handler, dentry))
+ return NULL;
+
+ return xattr_prefix(handler);
}
static int
-ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
- void *value_start)
+check_xattrs(struct inode *inode, struct buffer_head *bh,
+ struct ext4_xattr_entry *entry, void *end, void *value_start,
+ const char *function, unsigned int line)
{
struct ext4_xattr_entry *e = entry;
+ int err = -EFSCORRUPTED;
+ char *err_str;
+
+ if (bh) {
+ if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
+ BHDR(bh)->h_blocks != cpu_to_le32(1)) {
+ err_str = "invalid header";
+ goto errout;
+ }
+ if (buffer_verified(bh))
+ return 0;
+ if (!ext4_xattr_block_csum_verify(inode, bh)) {
+ err = -EFSBADCRC;
+ err_str = "invalid checksum";
+ goto errout;
+ }
+ } else {
+ struct ext4_xattr_ibody_header *header = value_start;
+
+ header -= 1;
+ if (end - (void *)header < sizeof(*header) + sizeof(u32)) {
+ err_str = "in-inode xattr block too small";
+ goto errout;
+ }
+ if (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
+ err_str = "bad magic number in in-inode xattr";
+ goto errout;
+ }
+ }
/* Find the end of the names list */
while (!IS_LAST_ENTRY(e)) {
struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
- if ((void *)next >= end)
- return -EFSCORRUPTED;
- if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
- return -EFSCORRUPTED;
+ if ((void *)next >= end) {
+ err_str = "e_name out of bounds";
+ goto errout;
+ }
+ if (strnlen(e->e_name, e->e_name_len) != e->e_name_len) {
+ err_str = "bad e_name length";
+ goto errout;
+ }
e = next;
}
/* Check the values */
while (!IS_LAST_ENTRY(entry)) {
u32 size = le32_to_cpu(entry->e_value_size);
+ unsigned long ea_ino = le32_to_cpu(entry->e_value_inum);
- if (size > EXT4_XATTR_SIZE_MAX)
- return -EFSCORRUPTED;
+ if (!ext4_has_feature_ea_inode(inode->i_sb) && ea_ino) {
+ err_str = "ea_inode specified without ea_inode feature enabled";
+ goto errout;
+ }
+ if (ea_ino && ((ea_ino == EXT4_ROOT_INO) ||
+ !ext4_valid_inum(inode->i_sb, ea_ino))) {
+ err_str = "invalid ea_ino";
+ goto errout;
+ }
+ if (ea_ino && !size) {
+ err_str = "invalid size in ea xattr";
+ goto errout;
+ }
+ if (size > EXT4_XATTR_SIZE_MAX) {
+ err_str = "e_value size too large";
+ goto errout;
+ }
if (size != 0 && entry->e_value_inum == 0) {
u16 offs = le16_to_cpu(entry->e_value_offs);
@@ -214,71 +270,56 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
* the padded and unpadded sizes, since the size may
* overflow to 0 when adding padding.
*/
- if (offs > end - value_start)
- return -EFSCORRUPTED;
+ if (offs > end - value_start) {
+ err_str = "e_value out of bounds";
+ goto errout;
+ }
value = value_start + offs;
if (value < (void *)e + sizeof(u32) ||
size > end - value ||
- EXT4_XATTR_SIZE(size) > end - value)
- return -EFSCORRUPTED;
+ EXT4_XATTR_SIZE(size) > end - value) {
+ err_str = "overlapping e_value ";
+ goto errout;
+ }
}
entry = EXT4_XATTR_NEXT(entry);
}
-
+ if (bh)
+ set_buffer_verified(bh);
return 0;
+
+errout:
+ if (bh)
+ __ext4_error_inode(inode, function, line, 0, -err,
+ "corrupted xattr block %llu: %s",
+ (unsigned long long) bh->b_blocknr,
+ err_str);
+ else
+ __ext4_error_inode(inode, function, line, 0, -err,
+ "corrupted in-inode xattr: %s", err_str);
+ return err;
}
static inline int
__ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
const char *function, unsigned int line)
{
- int error = -EFSCORRUPTED;
-
- if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
- BHDR(bh)->h_blocks != cpu_to_le32(1))
- goto errout;
- if (buffer_verified(bh))
- return 0;
-
- error = -EFSBADCRC;
- if (!ext4_xattr_block_csum_verify(inode, bh))
- goto errout;
- error = ext4_xattr_check_entries(BFIRST(bh), bh->b_data + bh->b_size,
- bh->b_data);
-errout:
- if (error)
- __ext4_error_inode(inode, function, line, 0, -error,
- "corrupted xattr block %llu",
- (unsigned long long) bh->b_blocknr);
- else
- set_buffer_verified(bh);
- return error;
+ return check_xattrs(inode, bh, BFIRST(bh), bh->b_data + bh->b_size,
+ bh->b_data, function, line);
}
#define ext4_xattr_check_block(inode, bh) \
__ext4_xattr_check_block((inode), (bh), __func__, __LINE__)
-static int
+int
__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
void *end, const char *function, unsigned int line)
{
- int error = -EFSCORRUPTED;
-
- if (end - (void *)header < sizeof(*header) + sizeof(u32) ||
- (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
- goto errout;
- error = ext4_xattr_check_entries(IFIRST(header), end, IFIRST(header));
-errout:
- if (error)
- __ext4_error_inode(inode, function, line, 0, -error,
- "corrupted in-inode xattr");
- return error;
+ return check_xattrs(inode, NULL, IFIRST(header), end, IFIRST(header),
+ function, line);
}
-#define xattr_check_inode(inode, header, end) \
- __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
-
static int
xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
void *end, int name_index, const char *name, int sorted)
@@ -301,7 +342,7 @@ xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
cmp = name_len - entry->e_name_len;
if (!cmp)
cmp = memcmp(name, entry->e_name, name_len);
- if (cmp <= 0 && (sorted || cmp == 0))
+ if (!cmp || (cmp < 0 && sorted))
break;
}
*pentry = entry;
@@ -311,29 +352,29 @@ xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
static u32
ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
{
- return ext4_chksum(sbi, sbi->s_csum_seed, buffer, size);
+ return ext4_chksum(sbi->s_csum_seed, buffer, size);
}
static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
{
- return ((u64)ea_inode->i_ctime.tv_sec << 32) |
+ return ((u64) inode_get_ctime_sec(ea_inode) << 32) |
(u32) inode_peek_iversion_raw(ea_inode);
}
static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
{
- ea_inode->i_ctime.tv_sec = (u32)(ref_count >> 32);
+ inode_set_ctime(ea_inode, (u32)(ref_count >> 32), 0);
inode_set_iversion_raw(ea_inode, ref_count & 0xffffffff);
}
static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode)
{
- return (u32)ea_inode->i_atime.tv_sec;
+ return (u32) inode_get_atime_sec(ea_inode);
}
static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash)
{
- ea_inode->i_atime.tv_sec = hash;
+ inode_set_atime(ea_inode, hash, 0);
}
/*
@@ -378,7 +419,7 @@ free_bhs:
return ret;
}
-#define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec)
+#define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode_get_mtime_sec(inode)))
static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
u32 ea_inode_hash, struct inode **ea_inode)
@@ -386,7 +427,18 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
struct inode *inode;
int err;
- inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
+ /*
+ * We have to check for this corruption early as otherwise
+ * iget_locked() could wait indefinitely for the state of our
+ * parent inode.
+ */
+ if (parent->i_ino == ea_ino) {
+ ext4_error(parent->i_sb,
+ "Parent and EA inode have the same ino %lu", ea_ino);
+ return -EFSCORRUPTED;
+ }
+
+ inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_EA_INODE);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
ext4_error(parent->i_sb,
@@ -394,23 +446,6 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
err);
return err;
}
-
- if (is_bad_inode(inode)) {
- ext4_error(parent->i_sb,
- "error while reading EA inode %lu is_bad_inode",
- ea_ino);
- err = -EIO;
- goto error;
- }
-
- if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
- ext4_error(parent->i_sb,
- "EA inode %lu does not have EXT4_EA_INODE_FL flag",
- ea_ino);
- err = -EINVAL;
- goto error;
- }
-
ext4_xattr_inode_set_class(inode);
/*
@@ -424,16 +459,28 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
ext4_set_inode_state(inode, EXT4_STATE_LUSTRE_EA_INODE);
ext4_xattr_inode_set_ref(inode, 1);
} else {
- inode_lock(inode);
+ inode_lock_nested(inode, I_MUTEX_XATTR);
inode->i_flags |= S_NOQUOTA;
inode_unlock(inode);
}
*ea_inode = inode;
return 0;
-error:
- iput(inode);
- return err;
+}
+
+/* Remove entry from mbcache when EA inode is getting evicted */
+void ext4_evict_ea_inode(struct inode *inode)
+{
+ struct mb_cache_entry *oe;
+
+ if (!EA_INODE_CACHE(inode))
+ return;
+ /* Wait for entry to get unused so that we can remove it */
+ while ((oe = mb_cache_entry_delete_or_get(EA_INODE_CACHE(inode),
+ ext4_xattr_inode_get_hash(inode), inode->i_ino))) {
+ mb_cache_entry_wait_unused(oe);
+ mb_cache_entry_put(EA_INODE_CACHE(inode), oe);
+ }
}
static int
@@ -455,8 +502,22 @@ ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
tmp_data = cpu_to_le32(hash);
e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len,
&tmp_data, 1);
+ /* All good? */
+ if (e_hash == entry->e_hash)
+ return 0;
+
+ /*
+ * Not good. Maybe the entry hash was calculated
+ * using the buggy signed char version?
+ */
+ e_hash = ext4_xattr_hash_entry_signed(entry->e_name, entry->e_name_len,
+ &tmp_data, 1);
+ /* Still no match - bad */
if (e_hash != entry->e_hash)
return -EFSCORRUPTED;
+
+ /* Let people know about old hash */
+ pr_warn_once("ext4: filesystem with signed xattr name hash");
}
return 0;
}
@@ -589,10 +650,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
return error;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
- end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
- error = xattr_check_inode(inode, header, end);
- if (error)
- goto cleanup;
+ end = ITAIL(inode, raw_inode);
entry = IFIRST(header);
error = xattr_find_entry(inode, &entry, end, name_index, name, 0);
if (error)
@@ -641,7 +699,7 @@ ext4_xattr_get(struct inode *inode, int name_index, const char *name,
{
int error;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ if (unlikely(ext4_forced_shutdown(inode->i_sb)))
return -EIO;
if (strlen(name) > 255)
@@ -664,11 +722,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
size_t rest = buffer_size;
for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
- const struct xattr_handler *handler =
- ext4_xattr_handler(entry->e_name_index);
+ const char *prefix;
- if (handler && (!handler->list || handler->list(dentry))) {
- const char *prefix = handler->prefix ?: handler->name;
+ prefix = ext4_xattr_prefix(entry->e_name_index, dentry);
+ if (prefix) {
size_t prefix_len = strlen(prefix);
size_t size = prefix_len + entry->e_name_len + 1;
@@ -724,7 +781,6 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
struct ext4_xattr_ibody_header *header;
struct ext4_inode *raw_inode;
struct ext4_iloc iloc;
- void *end;
int error;
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
@@ -734,14 +790,9 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
return error;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
- end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
- error = xattr_check_inode(inode, header, end);
- if (error)
- goto cleanup;
error = ext4_xattr_list_entries(dentry, IFIRST(header),
buffer, buffer_size);
-cleanup:
brelse(iloc.bh);
return error;
}
@@ -809,7 +860,6 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
struct ext4_xattr_ibody_header *header;
struct ext4_xattr_entry *entry;
qsize_t ea_inode_refs = 0;
- void *end;
int ret;
lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem);
@@ -820,10 +870,6 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
goto out;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
- end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
- ret = xattr_check_inode(inode, header, end);
- if (ret)
- goto out;
for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
entry = EXT4_XATTR_NEXT(entry))
@@ -920,7 +966,7 @@ int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
* so we need to reserve credits for this eventuality
*/
if (inode && ext4_has_inline_data(inode))
- credits += ext4_writepage_trans_blocks(inode) + 1;
+ credits += ext4_chunk_trans_extent(inode, 1) + 1;
/* We are done if ea_inode feature is not enabled. */
if (!ext4_has_feature_ea_inode(sb))
@@ -976,45 +1022,36 @@ int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
int ref_change)
{
- struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode);
struct ext4_iloc iloc;
- s64 ref_count;
- u32 hash;
+ u64 ref_count;
int ret;
- inode_lock(ea_inode);
+ inode_lock_nested(ea_inode, I_MUTEX_XATTR);
ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
if (ret)
goto out;
ref_count = ext4_xattr_inode_get_ref(ea_inode);
+ if ((ref_count == 0 && ref_change < 0) || (ref_count == U64_MAX && ref_change > 0)) {
+ ext4_error_inode(ea_inode, __func__, __LINE__, 0,
+ "EA inode %lu ref wraparound: ref_count=%lld ref_change=%d",
+ ea_inode->i_ino, ref_count, ref_change);
+ ret = -EFSCORRUPTED;
+ goto out;
+ }
ref_count += ref_change;
ext4_xattr_inode_set_ref(ea_inode, ref_count);
if (ref_change > 0) {
- WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld",
- ea_inode->i_ino, ref_count);
-
if (ref_count == 1) {
WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u",
ea_inode->i_ino, ea_inode->i_nlink);
set_nlink(ea_inode, 1);
ext4_orphan_del(handle, ea_inode);
-
- if (ea_inode_cache) {
- hash = ext4_xattr_inode_get_hash(ea_inode);
- mb_cache_entry_create(ea_inode_cache,
- GFP_NOFS, hash,
- ea_inode->i_ino,
- true /* reusable */);
- }
}
} else {
- WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
- ea_inode->i_ino, ref_count);
-
if (ref_count == 0) {
WARN_ONCE(ea_inode->i_nlink != 1,
"EA inode %lu i_nlink=%u",
@@ -1022,12 +1059,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
clear_nlink(ea_inode);
ext4_orphan_add(handle, ea_inode);
-
- if (ea_inode_cache) {
- hash = ext4_xattr_inode_get_hash(ea_inode);
- mb_cache_entry_delete(ea_inode_cache, hash,
- ea_inode->i_ino);
- }
}
}
@@ -1133,15 +1164,28 @@ ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
{
struct inode *ea_inode;
struct ext4_xattr_entry *entry;
+ struct ext4_iloc iloc;
bool dirty = false;
unsigned int ea_ino;
int err;
int credits;
+ void *end;
+
+ if (block_csum)
+ end = (void *)bh->b_data + bh->b_size;
+ else {
+ err = ext4_get_inode_loc(parent, &iloc);
+ if (err) {
+ EXT4_ERROR_INODE(parent, "parent inode loc (error %d)", err);
+ return;
+ }
+ end = (void *)ext4_raw_inode(&iloc) + EXT4_SB(parent->i_sb)->s_inode_size;
+ }
/* One credit for dec ref on ea_inode, one for orphan list addition, */
credits = 2 + extra_credits;
- for (entry = first; !IS_LAST_ENTRY(entry);
+ for (entry = first; (void *)entry < end && !IS_LAST_ENTRY(entry);
entry = EXT4_XATTR_NEXT(entry)) {
if (!entry->e_value_inum)
continue;
@@ -1237,6 +1281,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
if (error)
goto out;
+retry_ref:
lock_buffer(bh);
hash = le32_to_cpu(BHDR(bh)->h_hash);
ref = le32_to_cpu(BHDR(bh)->h_refcount);
@@ -1246,9 +1291,18 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
* This must happen under buffer lock for
* ext4_xattr_block_set() to reliably detect freed block
*/
- if (ea_block_cache)
- mb_cache_entry_delete(ea_block_cache, hash,
- bh->b_blocknr);
+ if (ea_block_cache) {
+ struct mb_cache_entry *oe;
+
+ oe = mb_cache_entry_delete_or_get(ea_block_cache, hash,
+ bh->b_blocknr);
+ if (oe) {
+ unlock_buffer(bh);
+ mb_cache_entry_wait_unused(oe);
+ mb_cache_entry_put(ea_block_cache, oe);
+ goto retry_ref;
+ }
+ }
get_bh(bh);
unlock_buffer(bh);
@@ -1272,7 +1326,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
ce = mb_cache_entry_get(ea_block_cache, hash,
bh->b_blocknr);
if (ce) {
- ce->e_reusable = 1;
+ set_bit(MBE_REUSABLE_B, &ce->e_flags);
mb_cache_entry_put(ea_block_cache, ce);
}
}
@@ -1380,6 +1434,12 @@ retry:
goto out;
memcpy(bh->b_data, buf, csize);
+ /*
+ * Zero out block tail to avoid writing uninitialized memory
+ * to disk.
+ */
+ if (csize < blocksize)
+ memset(bh->b_data + csize, 0, blocksize - csize);
set_buffer_uptodate(bh);
ext4_handle_dirty_metadata(handle, ea_inode, bh);
@@ -1413,6 +1473,13 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle,
uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) };
int err;
+ if (inode->i_sb->s_root == NULL) {
+ ext4_warning(inode->i_sb,
+ "refuse to create EA inode when umounting");
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+ }
+
/*
* Let the next inode be the goal, so we try and allocate the EA inode
* in the same group, or nearby one.
@@ -1432,6 +1499,9 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle,
if (!err)
err = ext4_inode_attach_jinode(ea_inode);
if (err) {
+ if (ext4_xattr_inode_dec_ref(handle, ea_inode))
+ ext4_warning_inode(ea_inode,
+ "cleanup dec ref error %d", err);
iput(ea_inode);
return ERR_PTR(err);
}
@@ -1469,7 +1539,7 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) &&
!(current->flags & PF_MEMALLOC_NOFS));
- ea_data = kvmalloc(value_len, GFP_KERNEL);
+ ea_data = kvmalloc(value_len, GFP_NOFS);
if (!ea_data) {
mb_cache_entry_put(ea_inode_cache, ce);
return NULL;
@@ -1477,11 +1547,11 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
while (ce) {
ea_inode = ext4_iget(inode->i_sb, ce->e_value,
- EXT4_IGET_NORMAL);
- if (!IS_ERR(ea_inode) &&
- !is_bad_inode(ea_inode) &&
- (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
- i_size_read(ea_inode) == value_len &&
+ EXT4_IGET_EA_INODE);
+ if (IS_ERR(ea_inode))
+ goto next_entry;
+ ext4_xattr_inode_set_class(ea_inode);
+ if (i_size_read(ea_inode) == value_len &&
!ext4_xattr_inode_read(ea_inode, ea_data, value_len) &&
!ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data,
value_len) &&
@@ -1491,9 +1561,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
kvfree(ea_data);
return ea_inode;
}
-
- if (!IS_ERR(ea_inode))
- iput(ea_inode);
+ iput(ea_inode);
+ next_entry:
ce = mb_cache_entry_find_next(ea_inode_cache, ce);
}
kvfree(ea_data);
@@ -1503,45 +1572,49 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
/*
* Add value of the EA in an inode.
*/
-static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
- const void *value, size_t value_len,
- struct inode **ret_inode)
+static struct inode *ext4_xattr_inode_lookup_create(handle_t *handle,
+ struct inode *inode, const void *value, size_t value_len)
{
struct inode *ea_inode;
u32 hash;
int err;
+ /* Account inode & space to quota even if sharing... */
+ err = ext4_xattr_inode_alloc_quota(inode, value_len);
+ if (err)
+ return ERR_PTR(err);
+
hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len);
ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash);
if (ea_inode) {
err = ext4_xattr_inode_inc_ref(handle, ea_inode);
- if (err) {
- iput(ea_inode);
- return err;
- }
-
- *ret_inode = ea_inode;
- return 0;
+ if (err)
+ goto out_err;
+ return ea_inode;
}
/* Create an inode for the EA value */
ea_inode = ext4_xattr_inode_create(handle, inode, hash);
- if (IS_ERR(ea_inode))
- return PTR_ERR(ea_inode);
+ if (IS_ERR(ea_inode)) {
+ ext4_xattr_inode_free_quota(inode, NULL, value_len);
+ return ea_inode;
+ }
err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
if (err) {
- ext4_xattr_inode_dec_ref(handle, ea_inode);
- iput(ea_inode);
- return err;
+ if (ext4_xattr_inode_dec_ref(handle, ea_inode))
+ ext4_warning_inode(ea_inode, "cleanup dec ref error %d", err);
+ goto out_err;
}
if (EA_INODE_CACHE(inode))
mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
ea_inode->i_ino, true /* reusable */);
-
- *ret_inode = ea_inode;
- return 0;
+ return ea_inode;
+out_err:
+ iput(ea_inode);
+ ext4_xattr_inode_free_quota(inode, NULL, value_len);
+ return ERR_PTR(err);
}
/*
@@ -1553,6 +1626,7 @@ static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
struct ext4_xattr_search *s,
handle_t *handle, struct inode *inode,
+ struct inode *new_ea_inode,
bool is_block)
{
struct ext4_xattr_entry *last, *next;
@@ -1560,7 +1634,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
size_t min_offs = s->end - s->base, name_len = strlen(i->name);
int in_inode = i->in_inode;
struct inode *old_ea_inode = NULL;
- struct inode *new_ea_inode = NULL;
size_t old_size, new_size;
int ret;
@@ -1645,43 +1718,11 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
old_ea_inode = NULL;
goto out;
}
- }
- if (i->value && in_inode) {
- WARN_ON_ONCE(!i->value_len);
-
- ret = ext4_xattr_inode_alloc_quota(inode, i->value_len);
- if (ret)
- goto out;
-
- ret = ext4_xattr_inode_lookup_create(handle, inode, i->value,
- i->value_len,
- &new_ea_inode);
- if (ret) {
- new_ea_inode = NULL;
- ext4_xattr_inode_free_quota(inode, NULL, i->value_len);
- goto out;
- }
- }
- if (old_ea_inode) {
/* We are ready to release ref count on the old_ea_inode. */
ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
- if (ret) {
- /* Release newly required ref count on new_ea_inode. */
- if (new_ea_inode) {
- int err;
-
- err = ext4_xattr_inode_dec_ref(handle,
- new_ea_inode);
- if (err)
- ext4_warning_inode(new_ea_inode,
- "dec ref new_ea_inode err=%d",
- err);
- ext4_xattr_inode_free_quota(inode, new_ea_inode,
- i->value_len);
- }
+ if (ret)
goto out;
- }
ext4_xattr_inode_free_quota(inode, old_ea_inode,
le32_to_cpu(here->e_value_size));
@@ -1719,6 +1760,20 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
memmove(here, (void *)here + size,
(void *)last - (void *)here + sizeof(__u32));
memset(last, 0, size);
+
+ /*
+ * Update i_inline_off - moved ibody region might contain
+ * system.data attribute. Handling a failure here won't
+ * cause other complications for setting an xattr.
+ */
+ if (!is_block && ext4_has_inline_data(inode)) {
+ ret = ext4_find_inline_data_nolock(inode);
+ if (ret) {
+ ext4_warning_inode(inode,
+ "unable to update i_inline_off");
+ goto out;
+ }
+ }
} else if (s->not_found) {
/* Insert new name. */
size_t size = EXT4_XATTR_LEN(name_len);
@@ -1791,7 +1846,6 @@ update_hash:
ret = 0;
out:
iput(old_ea_inode);
- iput(new_ea_inode);
return ret;
}
@@ -1854,15 +1908,30 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
size_t old_ea_inode_quota = 0;
unsigned int ea_ino;
-
#define header(x) ((struct ext4_xattr_header *)(x))
+ /* If we need EA inode, prepare it before locking the buffer */
+ if (i->value && i->in_inode) {
+ WARN_ON_ONCE(!i->value_len);
+
+ ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
+ i->value, i->value_len);
+ if (IS_ERR(ea_inode)) {
+ error = PTR_ERR(ea_inode);
+ ea_inode = NULL;
+ goto cleanup;
+ }
+ }
+
if (s->base) {
+ int offset = (char *)s->here - bs->bh->b_data;
+
BUFFER_TRACE(bs->bh, "get_write_access");
error = ext4_journal_get_write_access(handle, sb, bs->bh,
EXT4_JTR_NONE);
if (error)
goto cleanup;
+
lock_buffer(bs->bh);
if (header(s->base)->h_refcount == cpu_to_le32(1)) {
@@ -1873,12 +1942,23 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
* ext4_xattr_block_set() to reliably detect modified
* block
*/
- if (ea_block_cache)
- mb_cache_entry_delete(ea_block_cache, hash,
- bs->bh->b_blocknr);
+ if (ea_block_cache) {
+ struct mb_cache_entry *oe;
+
+ oe = mb_cache_entry_delete_or_get(ea_block_cache,
+ hash, bs->bh->b_blocknr);
+ if (oe) {
+ /*
+ * Xattr block is getting reused. Leave
+ * it alone.
+ */
+ mb_cache_entry_put(ea_block_cache, oe);
+ goto clone_block;
+ }
+ }
ea_bdebug(bs->bh, "modifying in-place");
error = ext4_xattr_set_entry(i, s, handle, inode,
- true /* is_block */);
+ ea_inode, true /* is_block */);
ext4_xattr_block_csum_set(inode, bs->bh);
unlock_buffer(bs->bh);
if (error == -EFSCORRUPTED)
@@ -1890,50 +1970,47 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
if (error)
goto cleanup;
goto inserted;
- } else {
- int offset = (char *)s->here - bs->bh->b_data;
+ }
+clone_block:
+ unlock_buffer(bs->bh);
+ ea_bdebug(bs->bh, "cloning");
+ s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS);
+ error = -ENOMEM;
+ if (s->base == NULL)
+ goto cleanup;
+ s->first = ENTRY(header(s->base)+1);
+ header(s->base)->h_refcount = cpu_to_le32(1);
+ s->here = ENTRY(s->base + offset);
+ s->end = s->base + bs->bh->b_size;
- unlock_buffer(bs->bh);
- ea_bdebug(bs->bh, "cloning");
- s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
- error = -ENOMEM;
- if (s->base == NULL)
+ /*
+ * If existing entry points to an xattr inode, we need
+ * to prevent ext4_xattr_set_entry() from decrementing
+ * ref count on it because the reference belongs to the
+ * original block. In this case, make the entry look
+ * like it has an empty value.
+ */
+ if (!s->not_found && s->here->e_value_inum) {
+ ea_ino = le32_to_cpu(s->here->e_value_inum);
+ error = ext4_xattr_inode_iget(inode, ea_ino,
+ le32_to_cpu(s->here->e_hash),
+ &tmp_inode);
+ if (error)
goto cleanup;
- memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
- s->first = ENTRY(header(s->base)+1);
- header(s->base)->h_refcount = cpu_to_le32(1);
- s->here = ENTRY(s->base + offset);
- s->end = s->base + bs->bh->b_size;
-
- /*
- * If existing entry points to an xattr inode, we need
- * to prevent ext4_xattr_set_entry() from decrementing
- * ref count on it because the reference belongs to the
- * original block. In this case, make the entry look
- * like it has an empty value.
- */
- if (!s->not_found && s->here->e_value_inum) {
- ea_ino = le32_to_cpu(s->here->e_value_inum);
- error = ext4_xattr_inode_iget(inode, ea_ino,
- le32_to_cpu(s->here->e_hash),
- &tmp_inode);
- if (error)
- goto cleanup;
- if (!ext4_test_inode_state(tmp_inode,
- EXT4_STATE_LUSTRE_EA_INODE)) {
- /*
- * Defer quota free call for previous
- * inode until success is guaranteed.
- */
- old_ea_inode_quota = le32_to_cpu(
- s->here->e_value_size);
- }
- iput(tmp_inode);
-
- s->here->e_value_inum = 0;
- s->here->e_value_size = 0;
+ if (!ext4_test_inode_state(tmp_inode,
+ EXT4_STATE_LUSTRE_EA_INODE)) {
+ /*
+ * Defer quota free call for previous
+ * inode until success is guaranteed.
+ */
+ old_ea_inode_quota = le32_to_cpu(
+ s->here->e_value_size);
}
+ iput(tmp_inode);
+
+ s->here->e_value_inum = 0;
+ s->here->e_value_size = 0;
}
} else {
/* Allocate a buffer where we construct the new block. */
@@ -1949,33 +2026,22 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
s->end = s->base + sb->s_blocksize;
}
- error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
+ error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
+ true /* is_block */);
if (error == -EFSCORRUPTED)
goto bad_block;
if (error)
goto cleanup;
- if (i->value && s->here->e_value_inum) {
- /*
- * A ref count on ea_inode has been taken as part of the call to
- * ext4_xattr_set_entry() above. We would like to drop this
- * extra ref but we have to wait until the xattr block is
- * initialized and has its own ref count on the ea_inode.
- */
- ea_ino = le32_to_cpu(s->here->e_value_inum);
- error = ext4_xattr_inode_iget(inode, ea_ino,
- le32_to_cpu(s->here->e_hash),
- &ea_inode);
- if (error) {
- ea_inode = NULL;
+inserted:
+ if (!IS_LAST_ENTRY(s->first)) {
+ new_bh = ext4_xattr_block_cache_find(inode, header(s->base), &ce);
+ if (IS_ERR(new_bh)) {
+ error = PTR_ERR(new_bh);
+ new_bh = NULL;
goto cleanup;
}
- }
-inserted:
- if (!IS_LAST_ENTRY(s->first)) {
- new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
- &ce);
if (new_bh) {
/* We found an identical block in the cache. */
if (new_bh == bs->bh)
@@ -1983,8 +2049,9 @@ inserted:
else {
u32 ref;
+#ifdef EXT4_XATTR_DEBUG
WARN_ON_ONCE(dquot_initialize_needed(inode));
-
+#endif
/* The old block is released after updating
the inode. */
error = dquot_alloc_block(inode,
@@ -2000,18 +2067,13 @@ inserted:
lock_buffer(new_bh);
/*
* We have to be careful about races with
- * freeing, rehashing or adding references to
- * xattr block. Once we hold buffer lock xattr
- * block's state is stable so we can check
- * whether the block got freed / rehashed or
- * not. Since we unhash mbcache entry under
- * buffer lock when freeing / rehashing xattr
- * block, checking whether entry is still
- * hashed is reliable. Same rules hold for
- * e_reusable handling.
+ * adding references to xattr block. Once we
+ * hold buffer lock xattr block's state is
+ * stable so we can check the additional
+ * reference fits.
*/
- if (hlist_bl_unhashed(&ce->e_hash_list) ||
- !ce->e_reusable) {
+ ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
+ if (ref > EXT4_XATTR_REFCOUNT_MAX) {
/*
* Undo everything and check mbcache
* again.
@@ -2026,10 +2088,9 @@ inserted:
new_bh = NULL;
goto inserted;
}
- ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
- if (ref >= EXT4_XATTR_REFCOUNT_MAX)
- ce->e_reusable = 0;
+ if (ref == EXT4_XATTR_REFCOUNT_MAX)
+ clear_bit(MBE_REUSABLE_B, &ce->e_flags);
ea_bdebug(new_bh, "reusing; refcount now=%d",
ref);
ext4_xattr_block_csum_set(inode, new_bh);
@@ -2053,23 +2114,16 @@ inserted:
/* We need to allocate a new block */
ext4_fsblk_t goal, block;
+#ifdef EXT4_XATTR_DEBUG
WARN_ON_ONCE(dquot_initialize_needed(inode));
-
+#endif
goal = ext4_group_first_block_no(sb,
EXT4_I(inode)->i_block_group);
-
- /* non-extent files can't have physical blocks past 2^32 */
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
-
block = ext4_new_meta_blocks(handle, inode, goal, 0,
NULL, &error);
if (error)
goto cleanup;
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
-
ea_idebug(inode, "creating block %llu",
(unsigned long long)block);
@@ -2136,17 +2190,16 @@ getblk_failed:
cleanup:
if (ea_inode) {
- int error2;
-
- error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
- if (error2)
- ext4_warning_inode(ea_inode, "dec ref error=%d",
- error2);
+ if (error) {
+ int error2;
- /* If there was an error, revert the quota charge. */
- if (error)
+ error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
+ if (error2)
+ ext4_warning_inode(ea_inode, "dec ref error=%d",
+ error2);
ext4_xattr_inode_free_quota(inode, ea_inode,
i_size_read(ea_inode));
+ }
iput(ea_inode);
}
if (ce)
@@ -2176,17 +2229,15 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
struct ext4_inode *raw_inode;
int error;
- if (EXT4_I(inode)->i_extra_isize == 0)
+ if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
return 0;
+
raw_inode = ext4_raw_inode(&is->iloc);
header = IHDR(inode, raw_inode);
is->s.base = is->s.first = IFIRST(header);
is->s.here = is->s.first;
- is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ is->s.end = ITAIL(inode, raw_inode);
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
- error = xattr_check_inode(inode, header, is->s.end);
- if (error)
- return error;
/* Find the named attribute. */
error = xattr_find_entry(inode, &is->s.here, is->s.end,
i->name_index, i->name, 0);
@@ -2203,13 +2254,38 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
{
struct ext4_xattr_ibody_header *header;
struct ext4_xattr_search *s = &is->s;
+ struct inode *ea_inode = NULL;
int error;
- if (EXT4_I(inode)->i_extra_isize == 0)
+ if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
return -ENOSPC;
- error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
- if (error)
+
+ /* If we need EA inode, prepare it before locking the buffer */
+ if (i->value && i->in_inode) {
+ WARN_ON_ONCE(!i->value_len);
+
+ ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
+ i->value, i->value_len);
+ if (IS_ERR(ea_inode))
+ return PTR_ERR(ea_inode);
+ }
+ error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
+ false /* is_block */);
+ if (error) {
+ if (ea_inode) {
+ int error2;
+
+ error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
+ if (error2)
+ ext4_warning_inode(ea_inode, "dec ref error=%d",
+ error2);
+
+ ext4_xattr_inode_free_quota(inode, ea_inode,
+ i_size_read(ea_inode));
+ iput(ea_inode);
+ }
return error;
+ }
header = IHDR(inode, ext4_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
@@ -2218,6 +2294,7 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
header->h_magic = cpu_to_le32(0);
ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
}
+ iput(ea_inode);
return 0;
}
@@ -2396,7 +2473,8 @@ retry_inode:
}
if (!error) {
ext4_xattr_update_super_block(handle, inode->i_sb);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
+ inode_inc_iversion(inode);
if (!value)
no_expand = 0;
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
@@ -2408,7 +2486,7 @@ retry_inode:
if (IS_SYNC(inode))
ext4_handle_sync(handle);
}
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle);
cleanup:
brelse(is.iloc.bh);
@@ -2479,6 +2557,8 @@ retry:
error = ext4_xattr_set_handle(handle, inode, name_index, name,
value, value_len, flags);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR,
+ handle);
error2 = ext4_journal_stop(handle);
if (error == -ENOSPC &&
ext4_should_retry_alloc(sb, &retries))
@@ -2486,7 +2566,6 @@ retry:
if (error == 0)
error = error2;
}
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
return error;
}
@@ -2535,13 +2614,13 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
.in_inode = !!entry->e_value_inum,
};
struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
+ int needs_kvfree = 0;
int error;
is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
- buffer = kmalloc(value_size, GFP_NOFS);
b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
- if (!is || !bs || !buffer || !b_entry_name) {
+ if (!is || !bs || !b_entry_name) {
error = -ENOMEM;
goto out;
}
@@ -2553,12 +2632,18 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
/* Save the entry name and the entry value */
if (entry->e_value_inum) {
+ buffer = kvmalloc(value_size, GFP_NOFS);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+ needs_kvfree = 1;
error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
if (error)
goto out;
} else {
size_t value_offs = le16_to_cpu(entry->e_value_offs);
- memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size);
+ buffer = (void *)IFIRST(header) + value_offs;
}
memcpy(b_entry_name, entry->e_name, entry->e_name_len);
@@ -2573,25 +2658,26 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
if (error)
goto out;
- /* Remove the chosen entry from the inode */
- error = ext4_xattr_ibody_set(handle, inode, &i, is);
- if (error)
- goto out;
-
i.value = buffer;
i.value_len = value_size;
error = ext4_xattr_block_find(inode, &i, bs);
if (error)
goto out;
- /* Add entry which was removed from the inode into the block */
+ /* Move ea entry from the inode into the block */
error = ext4_xattr_block_set(handle, inode, &i, bs);
if (error)
goto out;
- error = 0;
+
+ /* Remove the chosen entry from the inode */
+ i.value = NULL;
+ i.value_len = 0;
+ error = ext4_xattr_ibody_set(handle, inode, &i, is);
+
out:
kfree(b_entry_name);
- kfree(buffer);
+ if (needs_kvfree && buffer)
+ kvfree(buffer);
if (is)
brelse(is->iloc.bh);
if (bs)
@@ -2698,14 +2784,10 @@ retry:
*/
base = IFIRST(header);
- end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ end = ITAIL(inode, raw_inode);
min_offs = end - base;
total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
- error = xattr_check_inode(inode, header, end);
- if (error)
- goto cleanup;
-
ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
if (ifree >= isize_diff)
goto shift;
@@ -2766,6 +2848,9 @@ shift:
(void *)header, total_ino);
EXT4_I(inode)->i_extra_isize = new_extra_isize;
+ if (ext4_has_inline_data(inode))
+ error = ext4_find_inline_data_nolock(inode);
+
cleanup:
if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) {
ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.",
@@ -2789,33 +2874,31 @@ ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
if (*ea_inode_array == NULL) {
/*
* Start with 15 inodes, so it fits into a power-of-two size.
- * If *ea_inode_array is NULL, this is essentially offsetof()
*/
- (*ea_inode_array) =
- kmalloc(offsetof(struct ext4_xattr_inode_array,
- inodes[EIA_MASK]),
- GFP_NOFS);
+ (*ea_inode_array) = kmalloc(
+ struct_size(*ea_inode_array, inodes, EIA_MASK),
+ GFP_NOFS);
if (*ea_inode_array == NULL)
return -ENOMEM;
(*ea_inode_array)->count = 0;
} else if (((*ea_inode_array)->count & EIA_MASK) == EIA_MASK) {
/* expand the array once all 15 + n * 16 slots are full */
struct ext4_xattr_inode_array *new_array = NULL;
- int count = (*ea_inode_array)->count;
- /* if new_array is NULL, this is essentially offsetof() */
new_array = kmalloc(
- offsetof(struct ext4_xattr_inode_array,
- inodes[count + EIA_INCR]),
- GFP_NOFS);
+ struct_size(*ea_inode_array, inodes,
+ (*ea_inode_array)->count + EIA_INCR),
+ GFP_NOFS);
if (new_array == NULL)
return -ENOMEM;
memcpy(new_array, *ea_inode_array,
- offsetof(struct ext4_xattr_inode_array, inodes[count]));
+ struct_size(*ea_inode_array, inodes,
+ (*ea_inode_array)->count));
kfree(*ea_inode_array);
*ea_inode_array = new_array;
}
- (*ea_inode_array)->inodes[(*ea_inode_array)->count++] = inode;
+ (*ea_inode_array)->count++;
+ (*ea_inode_array)->inodes[(*ea_inode_array)->count - 1] = inode;
return 0;
}
@@ -2920,7 +3003,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
error);
goto cleanup;
}
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle);
}
error = 0;
cleanup:
@@ -2946,8 +3029,6 @@ void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *ea_inode_array)
*
* Create a new entry in the extended attribute block cache, and insert
* it unless such an entry is already in the cache.
- *
- * Returns 0, or a negative error number on failure.
*/
static void
ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
@@ -2975,8 +3056,7 @@ ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
*
* Compare two extended attribute blocks for equality.
*
- * Returns 0 if the blocks are equal, 1 if they differ, and
- * a negative error number on errors.
+ * Returns 0 if the blocks are equal, 1 if they differ.
*/
static int
ext4_xattr_cmp(struct ext4_xattr_header *header1,
@@ -3015,8 +3095,8 @@ ext4_xattr_cmp(struct ext4_xattr_header *header1,
*
* Find an identical extended attribute block.
*
- * Returns a pointer to the block found, or NULL if such a block was
- * not found or an error occurred.
+ * Returns a pointer to the block found, or NULL if such a block was not
+ * found, or an error pointer if an error occurred while reading ea block.
*/
static struct buffer_head *
ext4_xattr_block_cache_find(struct inode *inode,
@@ -3038,11 +3118,11 @@ ext4_xattr_block_cache_find(struct inode *inode,
bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
if (IS_ERR(bh)) {
- if (PTR_ERR(bh) == -ENOMEM)
- return NULL;
- bh = NULL;
- EXT4_ERROR_INODE(inode, "block %lu read error",
- (unsigned long)ce->e_value);
+ if (PTR_ERR(bh) != -ENOMEM)
+ EXT4_ERROR_INODE(inode, "block %lu read error",
+ (unsigned long)ce->e_value);
+ mb_cache_entry_put(ea_block_cache, ce);
+ return bh;
} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
*pce = ce;
return bh;
@@ -3069,7 +3149,29 @@ static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
while (name_len--) {
hash = (hash << NAME_HASH_SHIFT) ^
(hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
- *name++;
+ (unsigned char)*name++;
+ }
+ while (value_count--) {
+ hash = (hash << VALUE_HASH_SHIFT) ^
+ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
+ le32_to_cpu(*value++);
+ }
+ return cpu_to_le32(hash);
+}
+
+/*
+ * ext4_xattr_hash_entry_signed()
+ *
+ * Compute the hash of an extended attribute incorrectly.
+ */
+static __le32 ext4_xattr_hash_entry_signed(char *name, size_t name_len, __le32 *value, size_t value_count)
+{
+ __u32 hash = 0;
+
+ while (name_len--) {
+ hash = (hash << NAME_HASH_SHIFT) ^
+ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
+ (signed char)*name++;
}
while (value_count--) {
hash = (hash << VALUE_HASH_SHIFT) ^
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 77efb9a627ad..1fedf44d4fb6 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -32,8 +32,7 @@ struct ext4_xattr_header {
__le32 h_refcount; /* reference count */
__le32 h_blocks; /* number of disk blocks used */
__le32 h_hash; /* hash value of all attributes */
- __le32 h_checksum; /* crc32c(uuid+id+xattrblock) */
- /* id = inum if refcount=1, blknum otherwise */
+ __le32 h_checksum; /* crc32c(uuid+blknum+xattrblock) */
__u32 h_reserved[3]; /* zero right now */
};
@@ -68,6 +67,9 @@ struct ext4_xattr_entry {
((void *)raw_inode + \
EXT4_GOOD_OLD_INODE_SIZE + \
EXT4_I(inode)->i_extra_isize))
+#define ITAIL(inode, raw_inode) \
+ ((void *)(raw_inode) + \
+ EXT4_SB((inode)->i_sb)->s_inode_size)
#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
/*
@@ -84,7 +86,7 @@ struct ext4_xattr_entry {
/*
* The minimum size of EA value when you start storing it in an external inode
* size of block - size of header - size of 1 entry - 4 null bytes
-*/
+ */
#define EXT4_XATTR_MIN_LARGE_EA_SIZE(b) \
((b) - EXT4_XATTR_LEN(3) - sizeof(struct ext4_xattr_header) - 4)
@@ -95,6 +97,19 @@ struct ext4_xattr_entry {
#define EXT4_ZERO_XATTR_VALUE ((void *)-1)
+/*
+ * If we want to add an xattr to the inode, we should make sure that
+ * i_extra_isize is not 0 and that the inode size is not less than
+ * EXT4_GOOD_OLD_INODE_SIZE + extra_isize + pad.
+ * EXT4_GOOD_OLD_INODE_SIZE extra_isize header entry pad data
+ * |--------------------------|------------|------|---------|---|-------|
+ */
+#define EXT4_INODE_HAS_XATTR_SPACE(inode) \
+ ((EXT4_I(inode)->i_extra_isize != 0) && \
+ (EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize + \
+ sizeof(struct ext4_xattr_ibody_header) + EXT4_XATTR_PAD <= \
+ EXT4_INODE_SIZE((inode)->i_sb)))
+
struct ext4_xattr_info {
const char *name;
const void *value;
@@ -117,8 +132,8 @@ struct ext4_xattr_ibody_find {
};
struct ext4_xattr_inode_array {
- unsigned int count; /* # of used items in the array */
- struct inode *inodes[];
+ unsigned int count;
+ struct inode *inodes[] __counted_by(count);
};
extern const struct xattr_handler ext4_xattr_user_handler;
@@ -178,8 +193,9 @@ extern void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *array);
extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
struct ext4_inode *raw_inode, handle_t *handle);
+extern void ext4_evict_ea_inode(struct inode *inode);
-extern const struct xattr_handler *ext4_xattr_handlers[];
+extern const struct xattr_handler * const ext4_xattr_handlers[];
extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
struct ext4_xattr_ibody_find *is);
@@ -193,6 +209,13 @@ extern int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
extern struct mb_cache *ext4_xattr_create_cache(void);
extern void ext4_xattr_destroy_cache(struct mb_cache *);
+extern int
+__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
+ void *end, const char *function, unsigned int line);
+
+#define xattr_check_inode(inode, header, end) \
+ __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
+
#ifdef CONFIG_EXT4_FS_SECURITY
extern int ext4_init_security(handle_t *handle, struct inode *inode,
struct inode *dir, const struct qstr *qstr);
diff --git a/fs/ext4/xattr_hurd.c b/fs/ext4/xattr_hurd.c
index c78df5790377..8a5842e4cd95 100644
--- a/fs/ext4/xattr_hurd.c
+++ b/fs/ext4/xattr_hurd.c
@@ -32,7 +32,7 @@ ext4_xattr_hurd_get(const struct xattr_handler *handler,
static int
ext4_xattr_hurd_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index 8213f66f7b2d..776cf11d24ca 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -23,7 +23,7 @@ ext4_xattr_security_get(const struct xattr_handler *handler,
static int
ext4_xattr_security_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c
index 7c21ffb26d25..9811eb0ab276 100644
--- a/fs/ext4/xattr_trusted.c
+++ b/fs/ext4/xattr_trusted.c
@@ -30,7 +30,7 @@ ext4_xattr_trusted_get(const struct xattr_handler *handler,
static int
ext4_xattr_trusted_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c
index 2fe7ff0a479c..4b70bf4e7626 100644
--- a/fs/ext4/xattr_user.c
+++ b/fs/ext4/xattr_user.c
@@ -31,7 +31,7 @@ ext4_xattr_user_get(const struct xattr_handler *handler,
static int
ext4_xattr_user_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 7eea3cfd894d..5916a02fb46d 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -2,11 +2,12 @@
config F2FS_FS
tristate "F2FS filesystem support"
depends on BLOCK
+ select BUFFER_HEAD
select NLS
- select CRYPTO
- select CRYPTO_CRC32
+ select CRC32
select F2FS_FS_XATTR if FS_ENCRYPTION
select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
+ select FS_IOMAP
select LZ4_COMPRESS if F2FS_FS_LZ4
select LZ4_DECOMPRESS if F2FS_FS_LZ4
select LZ4HC_COMPRESS if F2FS_FS_LZ4HC
@@ -142,3 +143,10 @@ config F2FS_IOSTAT
Support getting IO statistics through sysfs and printing out periodic
IO statistics tracepoint events. You have to turn on "iostat_enable"
sysfs node to enable this feature.
+
+config F2FS_UNFAIR_RWSEM
+ bool "F2FS unfair rw_semaphore"
+ depends on F2FS_FS && BLK_CGROUP
+ help
+ Use unfair rw_semaphore, if system configured IO priority by block
+ cgroup.
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 16e826e01f09..fa8d81a30fb9 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -9,6 +9,7 @@
*
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
*/
+#include <linux/fs_struct.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "xattr.h"
@@ -166,7 +167,7 @@ fail:
}
static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
- struct page *dpage)
+ struct folio *dfolio)
{
int name_index = F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT;
void *value = NULL;
@@ -176,13 +177,13 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
if (type == ACL_TYPE_ACCESS)
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
- retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage);
+ retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dfolio);
if (retval > 0) {
value = f2fs_kmalloc(F2FS_I_SB(inode), retval, GFP_F2FS_ZERO);
if (!value)
return ERR_PTR(-ENOMEM);
retval = f2fs_getxattr(inode, name_index, "", value,
- retval, dpage);
+ retval, dfolio);
}
if (retval > 0)
@@ -204,8 +205,9 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type, bool rcu)
return __f2fs_get_acl(inode, type, NULL);
}
-static int f2fs_acl_update_mode(struct inode *inode, umode_t *mode_p,
- struct posix_acl **acl)
+static int f2fs_acl_update_mode(struct mnt_idmap *idmap,
+ struct inode *inode, umode_t *mode_p,
+ struct posix_acl **acl)
{
umode_t mode = inode->i_mode;
int error;
@@ -218,15 +220,15 @@ static int f2fs_acl_update_mode(struct inode *inode, umode_t *mode_p,
return error;
if (error == 0)
*acl = NULL;
- if (!in_group_p(i_gid_into_mnt(&init_user_ns, inode)) &&
- !capable_wrt_inode_uidgid(&init_user_ns, inode, CAP_FSETID))
+ if (!in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode)))
mode &= ~S_ISGID;
*mode_p = mode;
return 0;
}
-static int __f2fs_set_acl(struct inode *inode, int type,
- struct posix_acl *acl, struct page *ipage)
+static int __f2fs_set_acl(struct mnt_idmap *idmap,
+ struct inode *inode, int type,
+ struct posix_acl *acl, struct folio *ifolio)
{
int name_index;
void *value = NULL;
@@ -237,8 +239,8 @@ static int __f2fs_set_acl(struct inode *inode, int type,
switch (type) {
case ACL_TYPE_ACCESS:
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
- if (acl && !ipage) {
- error = f2fs_acl_update_mode(inode, &mode, &acl);
+ if (acl && !ifolio) {
+ error = f2fs_acl_update_mode(idmap, inode, &mode, &acl);
if (error)
return error;
set_acl_inode(inode, mode);
@@ -263,7 +265,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
}
}
- error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0);
+ error = f2fs_setxattr(inode, name_index, "", value, size, ifolio, 0);
kfree(value);
if (!error)
@@ -273,13 +275,15 @@ static int __f2fs_set_acl(struct inode *inode, int type,
return error;
}
-int f2fs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+int f2fs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
+ struct inode *inode = d_inode(dentry);
+
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
return -EIO;
- return __f2fs_set_acl(inode, type, acl, NULL);
+ return __f2fs_set_acl(idmap, inode, type, acl, NULL);
}
/*
@@ -292,9 +296,8 @@ static struct posix_acl *f2fs_acl_clone(const struct posix_acl *acl,
struct posix_acl *clone = NULL;
if (acl) {
- int size = sizeof(struct posix_acl) + acl->a_count *
- sizeof(struct posix_acl_entry);
- clone = kmemdup(acl, size, flags);
+ clone = kmemdup(acl, struct_size(acl, a_entries, acl->a_count),
+ flags);
if (clone)
refcount_set(&clone->a_refcount, 1);
}
@@ -357,7 +360,7 @@ static int f2fs_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
static int f2fs_acl_create(struct inode *dir, umode_t *mode,
struct posix_acl **default_acl, struct posix_acl **acl,
- struct page *dpage)
+ struct folio *dfolio)
{
struct posix_acl *p;
struct posix_acl *clone;
@@ -369,7 +372,7 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
if (S_ISLNK(*mode) || !IS_POSIXACL(dir))
return 0;
- p = __f2fs_get_acl(dir, ACL_TYPE_DEFAULT, dpage);
+ p = __f2fs_get_acl(dir, ACL_TYPE_DEFAULT, dfolio);
if (!p || p == ERR_PTR(-EOPNOTSUPP)) {
*mode &= ~current_umask();
return 0;
@@ -406,29 +409,29 @@ release_acl:
return ret;
}
-int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
- struct page *dpage)
+int f2fs_init_acl(struct inode *inode, struct inode *dir, struct folio *ifolio,
+ struct folio *dfolio)
{
struct posix_acl *default_acl = NULL, *acl = NULL;
int error;
- error = f2fs_acl_create(dir, &inode->i_mode, &default_acl, &acl, dpage);
+ error = f2fs_acl_create(dir, &inode->i_mode, &default_acl, &acl, dfolio);
if (error)
return error;
f2fs_mark_inode_dirty_sync(inode, true);
if (default_acl) {
- error = __f2fs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl,
- ipage);
+ error = __f2fs_set_acl(NULL, inode, ACL_TYPE_DEFAULT,
+ default_acl, ifolio);
posix_acl_release(default_acl);
} else {
inode->i_default_acl = NULL;
}
if (acl) {
if (!error)
- error = __f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl,
- ipage);
+ error = __f2fs_set_acl(NULL, inode, ACL_TYPE_ACCESS,
+ acl, ifolio);
posix_acl_release(acl);
} else {
inode->i_acl = NULL;
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index a26e33cab4ff..20e87e63c089 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -33,17 +33,17 @@ struct f2fs_acl_header {
#ifdef CONFIG_F2FS_FS_POSIX_ACL
-extern struct posix_acl *f2fs_get_acl(struct inode *, int, bool);
-extern int f2fs_set_acl(struct user_namespace *, struct inode *,
+struct posix_acl *f2fs_get_acl(struct inode *, int, bool);
+int f2fs_set_acl(struct mnt_idmap *, struct dentry *,
struct posix_acl *, int);
-extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
- struct page *);
+int f2fs_init_acl(struct inode *, struct inode *, struct folio *ifolio,
+ struct folio *dfolio);
#else
#define f2fs_get_acl NULL
#define f2fs_set_acl NULL
static inline int f2fs_init_acl(struct inode *inode, struct inode *dir,
- struct page *ipage, struct page *dpage)
+ struct folio *ifolio, struct folio *dfolio)
{
return 0;
}
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index f1693d45bb78..300664269eb6 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -21,43 +21,44 @@
#include "iostat.h"
#include <trace/events/f2fs.h>
-#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
+#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 3))
static struct kmem_cache *ino_entry_slab;
struct kmem_cache *f2fs_inode_entry_slab;
-void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
+void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
+ unsigned char reason)
{
- f2fs_build_fault_attr(sbi, 0, 0);
- set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ f2fs_build_fault_attr(sbi, 0, 0, FAULT_ALL);
if (!end_io)
f2fs_flush_merged_writes(sbi);
+ f2fs_handle_critical_error(sbi, reason);
}
/*
* We guarantee no failure on the returned page.
*/
-struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index)
{
struct address_space *mapping = META_MAPPING(sbi);
- struct page *page;
+ struct folio *folio;
repeat:
- page = f2fs_grab_cache_page(mapping, index, false);
- if (!page) {
+ folio = f2fs_grab_cache_folio(mapping, index, false);
+ if (IS_ERR(folio)) {
cond_resched();
goto repeat;
}
- f2fs_wait_on_page_writeback(page, META, true, true);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- return page;
+ f2fs_folio_wait_writeback(folio, META, true, true);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ return folio;
}
-static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
+static struct folio *__get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index,
bool is_meta)
{
struct address_space *mapping = META_MAPPING(sbi);
- struct page *page;
+ struct folio *folio;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
@@ -66,70 +67,71 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
.old_blkaddr = index,
.new_blkaddr = index,
.encrypted_page = NULL,
- .is_por = !is_meta,
+ .is_por = !is_meta ? 1 : 0,
};
int err;
if (unlikely(!is_meta))
fio.op_flags &= ~REQ_META;
repeat:
- page = f2fs_grab_cache_page(mapping, index, false);
- if (!page) {
+ folio = f2fs_grab_cache_folio(mapping, index, false);
+ if (IS_ERR(folio)) {
cond_resched();
goto repeat;
}
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
goto out;
- fio.page = page;
+ fio.folio = folio;
err = f2fs_submit_page_bio(&fio);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
- f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, F2FS_BLKSIZE);
- lock_page(page);
- if (unlikely(page->mapping != mapping)) {
- f2fs_put_page(page, 1);
+ folio_lock(folio);
+ if (unlikely(!is_meta_folio(folio))) {
+ f2fs_folio_put(folio, true);
goto repeat;
}
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
+ if (unlikely(!folio_test_uptodate(folio))) {
+ f2fs_handle_page_eio(sbi, folio, META);
+ f2fs_folio_put(folio, true);
return ERR_PTR(-EIO);
}
out:
- return page;
+ return folio;
}
-struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+struct folio *f2fs_get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index)
{
- return __get_meta_page(sbi, index, true);
+ return __get_meta_folio(sbi, index, true);
}
-struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index)
+struct folio *f2fs_get_meta_folio_retry(struct f2fs_sb_info *sbi, pgoff_t index)
{
- struct page *page;
+ struct folio *folio;
int count = 0;
retry:
- page = __get_meta_page(sbi, index, true);
- if (IS_ERR(page)) {
- if (PTR_ERR(page) == -EIO &&
+ folio = __get_meta_folio(sbi, index, true);
+ if (IS_ERR(folio)) {
+ if (PTR_ERR(folio) == -EIO &&
++count <= DEFAULT_RETRY_IO_COUNT)
goto retry;
- f2fs_stop_checkpoint(sbi, false);
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_META_PAGE);
}
- return page;
+ return folio;
}
/* for POR only */
-struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
+struct folio *f2fs_get_tmp_folio(struct f2fs_sb_info *sbi, pgoff_t index)
{
- return __get_meta_page(sbi, index, false);
+ return __get_meta_folio(sbi, index, false);
}
static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
@@ -139,7 +141,7 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
unsigned int segno, offset;
bool exist;
- if (type != DATA_GENERIC_ENHANCE && type != DATA_GENERIC_ENHANCE_READ)
+ if (type == DATA_GENERIC)
return true;
segno = GET_SEGNO(sbi, blkaddr);
@@ -147,16 +149,29 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
se = get_seg_entry(sbi, segno);
exist = f2fs_test_bit(offset, se->cur_valid_map);
- if (!exist && type == DATA_GENERIC_ENHANCE) {
- f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
- blkaddr, exist);
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- WARN_ON(1);
- }
+
+ /* skip data, if we already have an error in checkpoint. */
+ if (unlikely(f2fs_cp_error(sbi)))
+ return exist;
+
+ if ((exist && type == DATA_GENERIC_ENHANCE_UPDATE) ||
+ (!exist && type == DATA_GENERIC_ENHANCE))
+ goto out_err;
+ if (!exist && type != DATA_GENERIC_ENHANCE_UPDATE)
+ goto out_handle;
+ return exist;
+
+out_err:
+ f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
+ blkaddr, exist);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ dump_stack();
+out_handle:
+ f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
return exist;
}
-bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type)
{
switch (type) {
@@ -164,33 +179,39 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
break;
case META_SIT:
if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
- return false;
+ goto check_only;
break;
case META_SSA:
if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
blkaddr < SM_I(sbi)->ssa_blkaddr))
- return false;
+ goto check_only;
break;
case META_CP:
if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
blkaddr < __start_cp_addr(sbi)))
- return false;
+ goto check_only;
break;
case META_POR:
if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
blkaddr < MAIN_BLKADDR(sbi)))
- return false;
+ goto check_only;
break;
case DATA_GENERIC:
case DATA_GENERIC_ENHANCE:
case DATA_GENERIC_ENHANCE_READ:
+ case DATA_GENERIC_ENHANCE_UPDATE:
if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
blkaddr < MAIN_BLKADDR(sbi))) {
+
+ /* Skip to emit an error message. */
+ if (unlikely(f2fs_cp_error(sbi)))
+ return false;
+
f2fs_warn(sbi, "access invalid blkaddr:%u",
blkaddr);
set_sbi_flag(sbi, SBI_NEED_FSCK);
- WARN_ON(1);
- return false;
+ dump_stack();
+ goto err;
} else {
return __is_bitmap_valid(sbi, blkaddr, type);
}
@@ -198,13 +219,31 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
case META_GENERIC:
if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
blkaddr >= MAIN_BLKADDR(sbi)))
- return false;
+ goto err;
break;
default:
BUG();
}
return true;
+err:
+ f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
+check_only:
+ return false;
+}
+
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type)
+{
+ if (time_to_inject(sbi, FAULT_BLKADDR_VALIDITY))
+ return false;
+ return __f2fs_is_valid_blkaddr(sbi, blkaddr, type);
+}
+
+bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type)
+{
+ return __f2fs_is_valid_blkaddr(sbi, blkaddr, type);
}
/*
@@ -213,7 +252,6 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync)
{
- struct page *page;
block_t blkno = start;
struct f2fs_io_info fio = {
.sbi = sbi,
@@ -221,8 +259,8 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
.op = REQ_OP_READ,
.op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
.encrypted_page = NULL,
- .in_list = false,
- .is_por = (type == META_POR),
+ .in_list = 0,
+ .is_por = (type == META_POR) ? 1 : 0,
};
struct blk_plug plug;
int err;
@@ -232,6 +270,7 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
blk_start_plug(&plug);
for (; nrpages-- > 0; blkno++) {
+ struct folio *folio;
if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
goto out;
@@ -261,78 +300,79 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
BUG();
}
- page = f2fs_grab_cache_page(META_MAPPING(sbi),
+ folio = f2fs_grab_cache_folio(META_MAPPING(sbi),
fio.new_blkaddr, false);
- if (!page)
+ if (IS_ERR(folio))
continue;
- if (PageUptodate(page)) {
- f2fs_put_page(page, 1);
+ if (folio_test_uptodate(folio)) {
+ f2fs_folio_put(folio, true);
continue;
}
- fio.page = page;
+ fio.folio = folio;
err = f2fs_submit_page_bio(&fio);
- f2fs_put_page(page, err ? 1 : 0);
+ f2fs_folio_put(folio, err ? true : false);
if (!err)
- f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(sbi, NULL, FS_META_READ_IO,
+ F2FS_BLKSIZE);
}
out:
blk_finish_plug(&plug);
return blkno - start;
}
-void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
+void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
+ unsigned int ra_blocks)
{
- struct page *page;
+ struct folio *folio;
bool readahead = false;
- page = find_get_page(META_MAPPING(sbi), index);
- if (!page || !PageUptodate(page))
+ if (ra_blocks == RECOVERY_MIN_RA_BLOCKS)
+ return;
+
+ folio = filemap_get_folio(META_MAPPING(sbi), index);
+ if (IS_ERR(folio) || !folio_test_uptodate(folio))
readahead = true;
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
if (readahead)
- f2fs_ra_meta_pages(sbi, index, BIO_MAX_VECS, META_POR, true);
+ f2fs_ra_meta_pages(sbi, index, ra_blocks, META_POR, true);
}
-static int __f2fs_write_meta_page(struct page *page,
+static bool __f2fs_write_meta_folio(struct folio *folio,
struct writeback_control *wbc,
enum iostat_type io_type)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
- trace_f2fs_writepage(page, META);
+ trace_f2fs_writepage(folio, META);
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
+ folio_clear_uptodate(folio);
+ dec_page_count(sbi, F2FS_DIRTY_META);
+ folio_unlock(folio);
+ return true;
+ }
goto redirty_out;
+ }
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
- goto redirty_out;
- f2fs_do_write_meta_page(sbi, page, io_type);
+ f2fs_do_write_meta_page(sbi, folio, io_type);
dec_page_count(sbi, F2FS_DIRTY_META);
- if (wbc->for_reclaim)
- f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
-
- unlock_page(page);
+ folio_unlock(folio);
if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_write(sbi, META);
- return 0;
+ return true;
redirty_out:
- redirty_page_for_writepage(wbc, page);
- return AOP_WRITEPAGE_ACTIVATE;
-}
-
-static int f2fs_write_meta_page(struct page *page,
- struct writeback_control *wbc)
-{
- return __f2fs_write_meta_page(page, wbc, FS_META_IO);
+ folio_redirty_for_writepage(wbc, folio);
+ return false;
}
static int f2fs_write_meta_pages(struct address_space *mapping,
@@ -351,13 +391,13 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
goto skip_write;
/* if locked failed, cp will flush dirty pages instead */
- if (!down_write_trylock(&sbi->cp_global_sem))
+ if (!f2fs_down_write_trylock(&sbi->cp_global_sem))
goto skip_write;
trace_f2fs_writepages(mapping->host, wbc, META);
diff = nr_pages_to_write(sbi, META, wbc);
written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
- up_write(&sbi->cp_global_sem);
+ f2fs_up_write(&sbi->cp_global_sem);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0;
@@ -372,59 +412,59 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
{
struct address_space *mapping = META_MAPPING(sbi);
pgoff_t index = 0, prev = ULONG_MAX;
- struct pagevec pvec;
+ struct folio_batch fbatch;
long nwritten = 0;
- int nr_pages;
- struct writeback_control wbc = {
- .for_reclaim = 0,
- };
+ int nr_folios;
+ struct writeback_control wbc = {};
struct blk_plug plug;
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
blk_start_plug(&plug);
- while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_DIRTY))) {
+ while ((nr_folios = filemap_get_folios_tag(mapping, &index,
+ (pgoff_t)-1,
+ PAGECACHE_TAG_DIRTY, &fbatch))) {
int i;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
- if (prev == ULONG_MAX)
- prev = page->index - 1;
- if (nr_to_write != LONG_MAX && page->index != prev + 1) {
- pagevec_release(&pvec);
+ if (nr_to_write != LONG_MAX && i != 0 &&
+ folio->index != prev +
+ folio_nr_pages(fbatch.folios[i-1])) {
+ folio_batch_release(&fbatch);
goto stop;
}
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != mapping)) {
+ if (unlikely(!is_meta_folio(folio))) {
continue_unlock:
- unlock_page(page);
+ folio_unlock(folio);
continue;
}
- if (!PageDirty(page)) {
+ if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
- f2fs_wait_on_page_writeback(page, META, true, true);
+ f2fs_folio_wait_writeback(folio, META, true, true);
- if (!clear_page_dirty_for_io(page))
+ if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
- if (__f2fs_write_meta_page(page, &wbc, io_type)) {
- unlock_page(page);
+ if (!__f2fs_write_meta_folio(folio, &wbc,
+ io_type)) {
+ folio_unlock(folio);
break;
}
- nwritten++;
- prev = page->index;
+ nwritten += folio_nr_pages(folio);
+ prev = folio->index;
if (unlikely(nwritten >= nr_to_write))
break;
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
}
stop:
@@ -436,30 +476,27 @@ stop:
return nwritten;
}
-static int f2fs_set_meta_page_dirty(struct page *page)
+static bool f2fs_dirty_meta_folio(struct address_space *mapping,
+ struct folio *folio)
{
- trace_f2fs_set_page_dirty(page, META);
+ trace_f2fs_set_page_dirty(folio, META);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
- inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
- set_page_private_reference(page);
- return 1;
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ if (filemap_dirty_folio(mapping, folio)) {
+ inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META);
+ folio_set_f2fs_reference(folio);
+ return true;
}
- return 0;
+ return false;
}
const struct address_space_operations f2fs_meta_aops = {
- .writepage = f2fs_write_meta_page,
.writepages = f2fs_write_meta_pages,
- .set_page_dirty = f2fs_set_meta_page_dirty,
- .invalidatepage = f2fs_invalidate_page,
- .releasepage = f2fs_release_page,
-#ifdef CONFIG_MIGRATION
- .migratepage = f2fs_migrate_page,
-#endif
+ .dirty_folio = f2fs_dirty_meta_folio,
+ .invalidate_folio = f2fs_invalidate_folio,
+ .release_folio = f2fs_release_folio,
+ .migrate_folio = filemap_migrate_folio,
};
static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
@@ -467,6 +504,7 @@ static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
{
struct inode_management *im = &sbi->im[type];
struct ino_entry *e = NULL, *new = NULL;
+ int ret;
if (type == FLUSH_INO) {
rcu_read_lock();
@@ -479,13 +517,15 @@ retry:
new = f2fs_kmem_cache_alloc(ino_entry_slab,
GFP_NOFS, true, NULL);
- radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
+ ret = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
+ f2fs_bug_on(sbi, ret);
spin_lock(&im->ino_lock);
e = radix_tree_lookup(&im->ino_root, ino);
if (!e) {
if (!new) {
spin_unlock(&im->ino_lock);
+ radix_tree_preload_end();
goto retry;
}
e = new;
@@ -601,7 +641,6 @@ int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
if (time_to_inject(sbi, FAULT_ORPHAN)) {
spin_unlock(&im->ino_lock);
- f2fs_show_injection_info(sbi, FAULT_ORPHAN);
return -ENOSPC;
}
@@ -664,7 +703,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
/* truncate all the data during iput */
iput(inode);
- err = f2fs_get_node_info(sbi, ino, &ni);
+ err = f2fs_get_node_info(sbi, ino, &ni, false);
if (err)
goto err_out;
@@ -685,32 +724,18 @@ err_out:
int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
{
block_t start_blk, orphan_blocks, i, j;
- unsigned int s_flags = sbi->sb->s_flags;
int err = 0;
-#ifdef CONFIG_QUOTA
- int quota_enabled;
-#endif
if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
return 0;
- if (bdev_read_only(sbi->sb->s_bdev)) {
+ if (f2fs_hw_is_readonly(sbi)) {
f2fs_info(sbi, "write access unavailable, skipping orphan cleanup");
return 0;
}
- if (s_flags & SB_RDONLY) {
+ if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE))
f2fs_info(sbi, "orphan cleanup on readonly fs");
- sbi->sb->s_flags &= ~SB_RDONLY;
- }
-
-#ifdef CONFIG_QUOTA
- /*
- * Turn on quotas which were not enabled for read-only mounts if
- * filesystem has quota feature, so that they are updated correctly.
- */
- quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
-#endif
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
@@ -718,39 +743,32 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
for (i = 0; i < orphan_blocks; i++) {
- struct page *page;
+ struct folio *folio;
struct f2fs_orphan_block *orphan_blk;
- page = f2fs_get_meta_page(sbi, start_blk + i);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ folio = f2fs_get_meta_folio(sbi, start_blk + i);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto out;
}
- orphan_blk = (struct f2fs_orphan_block *)page_address(page);
+ orphan_blk = folio_address(folio);
for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
err = recover_orphan_inode(sbi, ino);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
goto out;
}
}
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
/* clear Orphan Flag */
clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
out:
set_sbi_flag(sbi, SBI_IS_RECOVERED);
-#ifdef CONFIG_QUOTA
- /* Turn quotas off */
- if (quota_enabled)
- f2fs_quota_off_umount(sbi->sb);
-#endif
- sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
-
return err;
}
@@ -761,7 +779,7 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
unsigned int nentries = 0;
unsigned short index = 1;
unsigned short orphan_blocks;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct ino_entry *orphan = NULL;
struct inode_management *im = &sbi->im[ORPHAN_INO];
@@ -774,12 +792,11 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
*/
head = &im->ino_list;
- /* loop for each orphan inode entry and write them in Jornal block */
+ /* loop for each orphan inode entry and write them in journal block */
list_for_each_entry(orphan, head, list) {
- if (!page) {
- page = f2fs_grab_meta_page(sbi, start_blk++);
- orphan_blk =
- (struct f2fs_orphan_block *)page_address(page);
+ if (!folio) {
+ folio = f2fs_grab_meta_folio(sbi, start_blk++);
+ orphan_blk = folio_address(folio);
memset(orphan_blk, 0, sizeof(*orphan_blk));
}
@@ -794,62 +811,61 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
orphan_blk->blk_addr = cpu_to_le16(index);
orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
orphan_blk->entry_count = cpu_to_le32(nentries);
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
index++;
nentries = 0;
- page = NULL;
+ folio = NULL;
}
}
- if (page) {
+ if (folio) {
orphan_blk->blk_addr = cpu_to_le16(index);
orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
orphan_blk->entry_count = cpu_to_le32(nentries);
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
}
-static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi,
- struct f2fs_checkpoint *ckpt)
+static __u32 f2fs_checkpoint_chksum(struct f2fs_checkpoint *ckpt)
{
unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset);
__u32 chksum;
- chksum = f2fs_crc32(sbi, ckpt, chksum_ofs);
+ chksum = f2fs_crc32(ckpt, chksum_ofs);
if (chksum_ofs < CP_CHKSUM_OFFSET) {
chksum_ofs += sizeof(chksum);
- chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs,
- F2FS_BLKSIZE - chksum_ofs);
+ chksum = f2fs_chksum(chksum, (__u8 *)ckpt + chksum_ofs,
+ F2FS_BLKSIZE - chksum_ofs);
}
return chksum;
}
static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
- struct f2fs_checkpoint **cp_block, struct page **cp_page,
+ struct f2fs_checkpoint **cp_block, struct folio **cp_folio,
unsigned long long *version)
{
size_t crc_offset = 0;
__u32 crc;
- *cp_page = f2fs_get_meta_page(sbi, cp_addr);
- if (IS_ERR(*cp_page))
- return PTR_ERR(*cp_page);
+ *cp_folio = f2fs_get_meta_folio(sbi, cp_addr);
+ if (IS_ERR(*cp_folio))
+ return PTR_ERR(*cp_folio);
- *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
+ *cp_block = folio_address(*cp_folio);
crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
if (crc_offset < CP_MIN_CHKSUM_OFFSET ||
crc_offset > CP_CHKSUM_OFFSET) {
- f2fs_put_page(*cp_page, 1);
+ f2fs_folio_put(*cp_folio, true);
f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset);
return -EINVAL;
}
- crc = f2fs_checkpoint_chksum(sbi, *cp_block);
+ crc = f2fs_checkpoint_chksum(*cp_block);
if (crc != cur_cp_crc(*cp_block)) {
- f2fs_put_page(*cp_page, 1);
+ f2fs_folio_put(*cp_folio, true);
f2fs_warn(sbi, "invalid crc value");
return -EINVAL;
}
@@ -858,42 +874,44 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
return 0;
}
-static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
+static struct folio *validate_checkpoint(struct f2fs_sb_info *sbi,
block_t cp_addr, unsigned long long *version)
{
- struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
+ struct folio *cp_folio_1 = NULL, *cp_folio_2 = NULL;
struct f2fs_checkpoint *cp_block = NULL;
unsigned long long cur_version = 0, pre_version = 0;
+ unsigned int cp_blocks;
int err;
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
- &cp_page_1, version);
+ &cp_folio_1, version);
if (err)
return NULL;
- if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
- sbi->blocks_per_seg) {
+ cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count);
+
+ if (cp_blocks > BLKS_PER_SEG(sbi) || cp_blocks <= F2FS_CP_PACKS) {
f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
le32_to_cpu(cp_block->cp_pack_total_block_count));
goto invalid_cp;
}
pre_version = *version;
- cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
+ cp_addr += cp_blocks - 1;
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
- &cp_page_2, version);
+ &cp_folio_2, version);
if (err)
goto invalid_cp;
cur_version = *version;
if (cur_version == pre_version) {
*version = cur_version;
- f2fs_put_page(cp_page_2, 1);
- return cp_page_1;
+ f2fs_folio_put(cp_folio_2, true);
+ return cp_folio_1;
}
- f2fs_put_page(cp_page_2, 1);
+ f2fs_folio_put(cp_folio_2, true);
invalid_cp:
- f2fs_put_page(cp_page_1, 1);
+ f2fs_folio_put(cp_folio_1, true);
return NULL;
}
@@ -901,7 +919,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
{
struct f2fs_checkpoint *cp_block;
struct f2fs_super_block *fsb = sbi->raw_super;
- struct page *cp1, *cp2, *cur_page;
+ struct folio *cp1, *cp2, *cur_folio;
unsigned long blk_size = sbi->blocksize;
unsigned long long cp1_version = 0, cp2_version = 0;
unsigned long long cp_start_blk_no;
@@ -928,22 +946,22 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
if (cp1 && cp2) {
if (ver_after(cp2_version, cp1_version))
- cur_page = cp2;
+ cur_folio = cp2;
else
- cur_page = cp1;
+ cur_folio = cp1;
} else if (cp1) {
- cur_page = cp1;
+ cur_folio = cp1;
} else if (cp2) {
- cur_page = cp2;
+ cur_folio = cp2;
} else {
err = -EFSCORRUPTED;
goto fail_no_cp;
}
- cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
+ cp_block = folio_address(cur_folio);
memcpy(sbi->ckpt, cp_block, blk_size);
- if (cur_page == cp1)
+ if (cur_folio == cp1)
sbi->cur_cp_pack = 1;
else
sbi->cur_cp_pack = 2;
@@ -958,30 +976,30 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
goto done;
cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
- if (cur_page == cp2)
- cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
+ if (cur_folio == cp2)
+ cp_blk_no += BIT(le32_to_cpu(fsb->log_blocks_per_seg));
for (i = 1; i < cp_blks; i++) {
void *sit_bitmap_ptr;
unsigned char *ckpt = (unsigned char *)sbi->ckpt;
- cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
- if (IS_ERR(cur_page)) {
- err = PTR_ERR(cur_page);
+ cur_folio = f2fs_get_meta_folio(sbi, cp_blk_no + i);
+ if (IS_ERR(cur_folio)) {
+ err = PTR_ERR(cur_folio);
goto free_fail_no_cp;
}
- sit_bitmap_ptr = page_address(cur_page);
+ sit_bitmap_ptr = folio_address(cur_folio);
memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
- f2fs_put_page(cur_page, 1);
+ f2fs_folio_put(cur_folio, true);
}
done:
- f2fs_put_page(cp1, 1);
- f2fs_put_page(cp2, 1);
+ f2fs_folio_put(cp1, true);
+ f2fs_folio_put(cp2, true);
return 0;
free_fail_no_cp:
- f2fs_put_page(cp1, 1);
- f2fs_put_page(cp2, 1);
+ f2fs_folio_put(cp1, true);
+ f2fs_folio_put(cp2, true);
fail_no_cp:
kvfree(sbi->ckpt);
return err;
@@ -996,9 +1014,7 @@ static void __add_dirty_inode(struct inode *inode, enum inode_type type)
return;
set_inode_flag(inode, flag);
- if (!f2fs_is_volatile_file(inode))
- list_add_tail(&F2FS_I(inode)->dirty_list,
- &sbi->inode_list[type]);
+ list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
stat_inc_dirty_inode(sbi, type);
}
@@ -1014,7 +1030,7 @@ static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
stat_dec_dirty_inode(F2FS_I_SB(inode), type);
}
-void f2fs_update_dirty_page(struct inode *inode, struct page *page)
+void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
@@ -1029,7 +1045,7 @@ void f2fs_update_dirty_page(struct inode *inode, struct page *page)
inode_inc_dirty_pages(inode);
spin_unlock(&sbi->inode_lock[type]);
- set_page_private_reference(page);
+ folio_set_f2fs_reference(folio);
}
void f2fs_remove_dirty_inode(struct inode *inode)
@@ -1049,7 +1065,8 @@ void f2fs_remove_dirty_inode(struct inode *inode)
spin_unlock(&sbi->inode_lock[type]);
}
-int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
+int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
+ bool from_cp)
{
struct list_head *head;
struct inode *inode;
@@ -1084,11 +1101,15 @@ retry:
if (inode) {
unsigned long cur_ino = inode->i_ino;
- F2FS_I(inode)->cp_task = current;
+ if (from_cp)
+ F2FS_I(inode)->cp_task = current;
+ F2FS_I(inode)->wb_task = current;
filemap_fdatawrite(inode->i_mapping);
- F2FS_I(inode)->cp_task = NULL;
+ F2FS_I(inode)->wb_task = NULL;
+ if (from_cp)
+ F2FS_I(inode)->cp_task = NULL;
iput(inode);
/* We need to give cpu to another writers. */
@@ -1099,7 +1120,7 @@ retry:
} else {
/*
* We should submit bio, since it exists several
- * wribacking dentry pages in the freeing inode.
+ * writebacking dentry pages in the freeing inode.
*/
f2fs_submit_merged_write(sbi, DATA);
cond_resched();
@@ -1107,7 +1128,7 @@ retry:
goto retry;
}
-int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
+static int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
{
struct list_head *head = &sbi->inode_list[DIRTY_META];
struct inode *inode;
@@ -1150,6 +1171,11 @@ static void __prepare_cp_block(struct f2fs_sb_info *sbi)
ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
ckpt->next_free_nid = cpu_to_le32(last_nid);
+
+ /* update user_block_counts */
+ sbi->last_valid_block_count = sbi->total_valid_block_count;
+ percpu_counter_set(&sbi->alloc_valid_block_count, 0);
+ percpu_counter_set(&sbi->rf_node_block_count, 0);
}
static bool __need_flush_quota(struct f2fs_sb_info *sbi)
@@ -1159,7 +1185,7 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
if (!is_journalled_quota(sbi))
return false;
- if (!down_write_trylock(&sbi->quota_sem))
+ if (!f2fs_down_write_trylock(&sbi->quota_sem))
return true;
if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
ret = false;
@@ -1171,7 +1197,7 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
} else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
ret = true;
}
- up_write(&sbi->quota_sem);
+ f2fs_up_write(&sbi->quota_sem);
return ret;
}
@@ -1183,7 +1209,6 @@ static int block_operations(struct f2fs_sb_info *sbi)
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
- .for_reclaim = 0,
};
int err = 0, cnt = 0;
@@ -1195,7 +1220,7 @@ static int block_operations(struct f2fs_sb_info *sbi)
retry_flush_quotas:
f2fs_lock_all(sbi);
if (__need_flush_quota(sbi)) {
- int locked;
+ bool need_lock = sbi->umount_lock_holder != current;
if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
@@ -1204,11 +1229,13 @@ retry_flush_quotas:
}
f2fs_unlock_all(sbi);
- /* only failed during mount/umount/freeze/quotactl */
- locked = down_read_trylock(&sbi->sb->s_umount);
- f2fs_quota_sync(sbi->sb, -1);
- if (locked)
+ /* don't grab s_umount lock during mount/umount/remount/freeze/quotactl */
+ if (!need_lock) {
+ f2fs_do_quota_sync(sbi->sb, -1);
+ } else if (down_read_trylock(&sbi->sb->s_umount)) {
+ f2fs_do_quota_sync(sbi->sb, -1);
up_read(&sbi->sb->s_umount);
+ }
cond_resched();
goto retry_flush_quotas;
}
@@ -1217,7 +1244,7 @@ retry_flush_dents:
/* write all the dirty dentry pages */
if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
f2fs_unlock_all(sbi);
- err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
+ err = f2fs_sync_dirty_inodes(sbi, DIR_INODE, true);
if (err)
return err;
cond_resched();
@@ -1228,10 +1255,10 @@ retry_flush_dents:
* POR: we should ensure that there are no dirty node pages
* until finishing nat/sit flush. inode->i_blocks can be updated.
*/
- down_write(&sbi->node_change);
+ f2fs_down_write(&sbi->node_change);
if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
- up_write(&sbi->node_change);
+ f2fs_up_write(&sbi->node_change);
f2fs_unlock_all(sbi);
err = f2fs_sync_inode_meta(sbi);
if (err)
@@ -1241,15 +1268,15 @@ retry_flush_dents:
}
retry_flush_nodes:
- down_write(&sbi->node_write);
+ f2fs_down_write(&sbi->node_write);
if (get_pages(sbi, F2FS_DIRTY_NODES)) {
- up_write(&sbi->node_write);
+ f2fs_up_write(&sbi->node_write);
atomic_inc(&sbi->wb_sync_req[NODE]);
err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
atomic_dec(&sbi->wb_sync_req[NODE]);
if (err) {
- up_write(&sbi->node_change);
+ f2fs_up_write(&sbi->node_change);
f2fs_unlock_all(sbi);
return err;
}
@@ -1262,13 +1289,13 @@ retry_flush_nodes:
* dirty node blocks and some checkpoint values by block allocation.
*/
__prepare_cp_block(sbi);
- up_write(&sbi->node_change);
+ f2fs_up_write(&sbi->node_change);
return err;
}
static void unblock_operations(struct f2fs_sb_info *sbi)
{
- up_write(&sbi->node_write);
+ f2fs_up_write(&sbi->node_write);
f2fs_unlock_all(sbi);
}
@@ -1280,7 +1307,8 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
if (!get_pages(sbi, type))
break;
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi) &&
+ !is_sbi_flag_set(sbi, SBI_IS_CLOSE)))
break;
if (type == F2FS_DIRTY_META)
@@ -1290,7 +1318,7 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
f2fs_submit_merged_write(sbi, DATA);
prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
- io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
}
finish_wait(&sbi->cp_wait, &wait);
}
@@ -1301,21 +1329,13 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned long flags;
- if (cpc->reason & CP_UMOUNT) {
- if (le32_to_cpu(ckpt->cp_pack_total_block_count) >
- sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks) {
- clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
- f2fs_notice(sbi, "Disable nat_bits due to no space");
- } else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) &&
- f2fs_nat_bitmap_enabled(sbi)) {
- f2fs_enable_nat_bits(sbi);
- set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
- f2fs_notice(sbi, "Rebuild and enable nat_bits");
- }
- }
-
spin_lock_irqsave(&sbi->cp_lock, flags);
+ if ((cpc->reason & CP_UMOUNT) &&
+ le32_to_cpu(ckpt->cp_pack_total_block_count) >
+ sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
+ disable_nat_bits(sbi, false);
+
if (cpc->reason & CP_TRIMMED)
__set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
else
@@ -1372,35 +1392,31 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
static void commit_checkpoint(struct f2fs_sb_info *sbi,
void *src, block_t blk_addr)
{
- struct writeback_control wbc = {
- .for_reclaim = 0,
- };
+ struct writeback_control wbc = {};
/*
- * pagevec_lookup_tag and lock_page again will take
+ * filemap_get_folios_tag and folio_lock again will take
* some extra time. Therefore, f2fs_update_meta_pages and
* f2fs_sync_meta_pages are combined in this function.
*/
- struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
- int err;
-
- f2fs_wait_on_page_writeback(page, META, true, true);
+ struct folio *folio = f2fs_grab_meta_folio(sbi, blk_addr);
- memcpy(page_address(page), src, PAGE_SIZE);
+ memcpy(folio_address(folio), src, PAGE_SIZE);
- set_page_dirty(page);
- if (unlikely(!clear_page_dirty_for_io(page)))
+ folio_mark_dirty(folio);
+ if (unlikely(!folio_clear_dirty_for_io(folio)))
f2fs_bug_on(sbi, 1);
/* writeout cp pack 2 page */
- err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
- if (unlikely(err && f2fs_cp_error(sbi))) {
- f2fs_put_page(page, 1);
- return;
+ if (unlikely(!__f2fs_write_meta_folio(folio, &wbc, FS_CP_META_IO))) {
+ if (f2fs_cp_error(sbi)) {
+ f2fs_folio_put(folio, true);
+ return;
+ }
+ f2fs_bug_on(sbi, true);
}
- f2fs_bug_on(sbi, err);
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
/* submit checkpoint (with barrier if NOBARRIER is not set) */
f2fs_submit_merged_write(sbi, META_FLUSH);
@@ -1426,6 +1442,34 @@ u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi)
return get_sectors_written(sbi->sb->s_bdev);
}
+static inline void stat_cp_time(struct cp_control *cpc, enum cp_time type)
+{
+ cpc->stats.times[type] = ktime_get();
+}
+
+static inline void check_cp_time(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+{
+ unsigned long long sb_diff, cur_diff;
+ enum cp_time ct;
+
+ sb_diff = (u64)ktime_ms_delta(sbi->cp_stats.times[CP_TIME_END],
+ sbi->cp_stats.times[CP_TIME_START]);
+ cur_diff = (u64)ktime_ms_delta(cpc->stats.times[CP_TIME_END],
+ cpc->stats.times[CP_TIME_START]);
+
+ if (cur_diff > sb_diff) {
+ sbi->cp_stats = cpc->stats;
+ if (cur_diff < CP_LONG_LATENCY_THRESHOLD)
+ return;
+
+ f2fs_warn(sbi, "checkpoint was blocked for %llu ms", cur_diff);
+ for (ct = CP_TIME_START; ct < CP_TIME_MAX - 1; ct++)
+ f2fs_warn(sbi, "Step#%d: %llu ms", ct,
+ (u64)ktime_ms_delta(cpc->stats.times[ct + 1],
+ cpc->stats.times[ct]));
+ }
+}
+
static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -1443,24 +1487,24 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* Flush all the NAT/SIT pages */
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+ stat_cp_time(cpc, CP_TIME_SYNC_META);
+
/* start to update checkpoint, cp ver is already updated previously */
ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
- ckpt->cur_node_segno[i] =
- cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
- ckpt->cur_node_blkoff[i] =
- cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
- ckpt->alloc_type[i + CURSEG_HOT_NODE] =
- curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
+ struct curseg_info *curseg = CURSEG_I(sbi, i + CURSEG_HOT_NODE);
+
+ ckpt->cur_node_segno[i] = cpu_to_le32(curseg->segno);
+ ckpt->cur_node_blkoff[i] = cpu_to_le16(curseg->next_blkoff);
+ ckpt->alloc_type[i + CURSEG_HOT_NODE] = curseg->alloc_type;
}
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
- ckpt->cur_data_segno[i] =
- cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
- ckpt->cur_data_blkoff[i] =
- cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
- ckpt->alloc_type[i + CURSEG_HOT_DATA] =
- curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
+ struct curseg_info *curseg = CURSEG_I(sbi, i + CURSEG_HOT_DATA);
+
+ ckpt->cur_data_segno[i] = cpu_to_le32(curseg->segno);
+ ckpt->cur_data_blkoff[i] = cpu_to_le16(curseg->next_blkoff);
+ ckpt->alloc_type[i + CURSEG_HOT_DATA] = curseg->alloc_type;
}
/* 2 cp + n data seg summary + orphan inode blocks */
@@ -1492,7 +1536,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
- crc32 = f2fs_checkpoint_chksum(sbi, ckpt);
+ crc32 = f2fs_checkpoint_chksum(ckpt);
*((__le32 *)((unsigned char *)ckpt +
le32_to_cpu(ckpt->checksum_offset)))
= cpu_to_le32(crc32);
@@ -1500,18 +1544,17 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
start_blk = __start_cp_next_addr(sbi);
/* write nat bits */
- if ((cpc->reason & CP_UMOUNT) &&
- is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) {
+ if (enabled_nat_bits(sbi, cpc)) {
__u64 cp_ver = cur_cp_version(ckpt);
block_t blk;
cp_ver |= ((__u64)crc32 << 32);
*(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
- blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
+ blk = start_blk + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++)
f2fs_update_meta_page(sbi, nm_i->nat_bits +
- (i << F2FS_BLKSIZE_BITS), blk + i);
+ F2FS_BLK_TO_BYTES(i), blk + i);
}
/* write out checkpoint buffer at block 0 */
@@ -1540,26 +1583,28 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
start_blk += NR_CURSEG_NODE_TYPE;
}
- /* update user_block_counts */
- sbi->last_valid_block_count = sbi->total_valid_block_count;
- percpu_counter_set(&sbi->alloc_valid_block_count, 0);
-
/* Here, we have one bio having CP pack except cp pack 2 page */
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+ stat_cp_time(cpc, CP_TIME_SYNC_CP_META);
+
/* Wait for all dirty meta pages to be submitted for IO */
f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META);
+ stat_cp_time(cpc, CP_TIME_WAIT_DIRTY_META);
/* wait for previous submitted meta pages writeback */
f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
+ stat_cp_time(cpc, CP_TIME_WAIT_CP_DATA);
/* flush all device cache */
err = f2fs_flush_device_cache(sbi);
if (err)
return err;
+ stat_cp_time(cpc, CP_TIME_FLUSH_DEVICE);
/* barrier and flush checkpoint cp pack 2 page if it can */
commit_checkpoint(sbi, ckpt, start_blk);
f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
+ stat_cp_time(cpc, CP_TIME_WAIT_LAST_CP);
/*
* invalidate intermediate page cache borrowed from meta inode which are
@@ -1567,8 +1612,9 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
*/
if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi) ||
f2fs_sb_has_compression(sbi))
- invalidate_mapping_pages(META_MAPPING(sbi),
- MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
+ f2fs_bug_on(sbi,
+ invalidate_inode_pages2_range(META_MAPPING(sbi),
+ MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1));
f2fs_release_ino_entry(sbi, false);
@@ -1603,6 +1649,8 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unsigned long long ckpt_ver;
int err = 0;
+ stat_cp_time(cpc, CP_TIME_START);
+
if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi))
return -EROFS;
@@ -1612,7 +1660,9 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_warn(sbi, "Start checkpoint disabled!");
}
if (cpc->reason != CP_RESIZE)
- down_write(&sbi->cp_global_sem);
+ f2fs_down_write(&sbi->cp_global_sem);
+
+ stat_cp_time(cpc, CP_TIME_LOCK);
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
@@ -1623,13 +1673,15 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
goto out;
}
- trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
+ trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, CP_PHASE_START_BLOCK_OPS);
err = block_operations(sbi);
if (err)
goto out;
- trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
+ stat_cp_time(cpc, CP_TIME_OP_LOCK);
+
+ trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, CP_PHASE_FINISH_BLOCK_OPS);
f2fs_flush_merged_writes(sbi);
@@ -1668,6 +1720,8 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_flush_sit_entries(sbi, cpc);
+ stat_cp_time(cpc, CP_TIME_FLUSH_META);
+
/* save inmem log status */
f2fs_save_inmem_curseg(sbi);
@@ -1681,19 +1735,22 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
}
f2fs_restore_inmem_curseg(sbi);
+ f2fs_reinit_atgc_curseg(sbi);
+ stat_inc_cp_count(sbi);
stop:
unblock_operations(sbi);
- stat_inc_cp_count(sbi->stat_info);
+ stat_cp_time(cpc, CP_TIME_END);
+ check_cp_time(sbi, cpc);
if (cpc->reason & CP_RECOVERY)
f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
/* update CP_TIME to trigger checkpoint periodically */
f2fs_update_time(sbi, CP_TIME);
- trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
+ trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, CP_PHASE_FINISH_CHECKPOINT);
out:
if (cpc->reason != CP_RESIZE)
- up_write(&sbi->cp_global_sem);
+ f2fs_up_write(&sbi->cp_global_sem);
return err;
}
@@ -1710,9 +1767,9 @@ void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
im->ino_num = 0;
}
- sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
+ sbi->max_orphans = (BLKS_PER_SEG(sbi) - F2FS_CP_PACKS -
NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) *
- F2FS_ORPHANS_PER_BLOCK;
+ F2FS_ORPHANS_PER_BLOCK;
}
int __init f2fs_create_checkpoint_caches(void)
@@ -1741,9 +1798,9 @@ static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
struct cp_control cpc = { .reason = CP_SYNC, };
int err;
- down_write(&sbi->gc_lock);
+ f2fs_down_write(&sbi->gc_lock);
err = f2fs_write_checkpoint(sbi, &cpc);
- up_write(&sbi->gc_lock);
+ f2fs_up_write(&sbi->gc_lock);
return err;
}
@@ -1767,6 +1824,7 @@ static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi)
llist_for_each_entry_safe(req, next, dispatch_list, llnode) {
diff = (u64)ktime_ms_delta(ktime_get(), req->queue_time);
req->ret = ret;
+ req->delta_time = diff;
complete(&req->wait);
sum_diff += diff;
@@ -1828,12 +1886,13 @@ int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
struct cp_control cpc;
cpc.reason = __get_cp_reason(sbi);
- if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
+ if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC ||
+ sbi->umount_lock_holder == current) {
int ret;
- down_write(&sbi->gc_lock);
+ f2fs_down_write(&sbi->gc_lock);
ret = f2fs_write_checkpoint(sbi, &cpc);
- up_write(&sbi->gc_lock);
+ f2fs_up_write(&sbi->gc_lock);
return ret;
}
@@ -1861,6 +1920,12 @@ int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
else
flush_remained_ckpt_reqs(sbi, &req);
+ if (unlikely(req.delta_time >= CP_LONG_LATENCY_THRESHOLD)) {
+ f2fs_warn_ratelimited(sbi,
+ "blocked on checkpoint for %u ms", cprc->peak_time);
+ dump_stack();
+ }
+
return req.ret;
}
@@ -1875,8 +1940,10 @@ int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi)
cprc->f2fs_issue_ckpt = kthread_run(issue_checkpoint_thread, sbi,
"f2fs_ckpt-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(cprc->f2fs_issue_ckpt)) {
+ int err = PTR_ERR(cprc->f2fs_issue_ckpt);
+
cprc->f2fs_issue_ckpt = NULL;
- return -ENOMEM;
+ return err;
}
set_task_ioprio(cprc->f2fs_issue_ckpt, cprc->ckpt_thread_ioprio);
@@ -1887,15 +1954,27 @@ int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi)
void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi)
{
struct ckpt_req_control *cprc = &sbi->cprc_info;
+ struct task_struct *ckpt_task;
- if (cprc->f2fs_issue_ckpt) {
- struct task_struct *ckpt_task = cprc->f2fs_issue_ckpt;
+ if (!cprc->f2fs_issue_ckpt)
+ return;
- cprc->f2fs_issue_ckpt = NULL;
- kthread_stop(ckpt_task);
+ ckpt_task = cprc->f2fs_issue_ckpt;
+ cprc->f2fs_issue_ckpt = NULL;
+ kthread_stop(ckpt_task);
- flush_remained_ckpt_reqs(sbi, NULL);
- }
+ f2fs_flush_ckpt_thread(sbi);
+}
+
+void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi)
+{
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+
+ flush_remained_ckpt_reqs(sbi, NULL);
+
+ /* Let's wait for the previous dispatched checkpoint. */
+ while (atomic_read(&cprc->queued_ckpt))
+ io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
}
void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index a0d5cfab75e4..7b68bf22989d 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -23,20 +23,18 @@
static struct kmem_cache *cic_entry_slab;
static struct kmem_cache *dic_entry_slab;
-static void *page_array_alloc(struct inode *inode, int nr)
+static void *page_array_alloc(struct f2fs_sb_info *sbi, int nr)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int size = sizeof(struct page *) * nr;
if (likely(size <= sbi->page_array_slab_size))
return f2fs_kmem_cache_alloc(sbi->page_array_slab,
- GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
+ GFP_F2FS_ZERO, false, sbi);
return f2fs_kzalloc(sbi, size, GFP_NOFS);
}
-static void page_array_free(struct inode *inode, void *pages, int nr)
+static void page_array_free(struct f2fs_sb_info *sbi, void *pages, int nr)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int size = sizeof(struct page *) * nr;
if (!pages)
@@ -55,6 +53,7 @@ struct f2fs_compress_ops {
int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
int (*decompress_pages)(struct decompress_io_ctx *dic);
+ bool (*is_level_valid)(int level);
};
static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
@@ -72,28 +71,28 @@ static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
return cc->cluster_idx << cc->log_cluster_size;
}
-bool f2fs_is_compressed_page(struct page *page)
+bool f2fs_is_compressed_page(struct folio *folio)
{
- if (!PagePrivate(page))
- return false;
- if (!page_private(page))
+ if (!folio->private)
return false;
- if (page_private_nonpointer(page))
+ if (folio_test_f2fs_nonpointer(folio))
return false;
- f2fs_bug_on(F2FS_M_SB(page->mapping),
- *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
+ f2fs_bug_on(F2FS_F_SB(folio),
+ *((u32 *)folio->private) != F2FS_COMPRESSED_PAGE_MAGIC);
return true;
}
static void f2fs_set_compressed_page(struct page *page,
struct inode *inode, pgoff_t index, void *data)
{
- attach_page_private(page, (void *)data);
+ struct folio *folio = page_folio(page);
+
+ folio_attach_private(folio, (void *)data);
/* i_crypto_info and iv index */
- page->index = index;
- page->mapping = inode->i_mapping;
+ folio->index = index;
+ folio->mapping = inode->i_mapping;
}
static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
@@ -121,7 +120,7 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
}
static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
- struct writeback_control *wbc, bool redirty, int unlock)
+ struct writeback_control *wbc, bool redirty, bool unlock)
{
unsigned int i;
@@ -134,9 +133,11 @@ static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
}
}
-struct page *f2fs_compress_control_page(struct page *page)
+struct folio *f2fs_compress_control_folio(struct folio *folio)
{
- return ((struct compress_io_ctx *)page_private(page))->rpages[0];
+ struct compress_io_ctx *ctx = folio->private;
+
+ return page_folio(ctx->rpages[0]);
}
int f2fs_init_compress_ctx(struct compress_ctx *cc)
@@ -144,38 +145,39 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
if (cc->rpages)
return 0;
- cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
+ cc->rpages = page_array_alloc(F2FS_I_SB(cc->inode), cc->cluster_size);
return cc->rpages ? 0 : -ENOMEM;
}
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
{
- page_array_free(cc->inode, cc->rpages, cc->cluster_size);
+ page_array_free(F2FS_I_SB(cc->inode), cc->rpages, cc->cluster_size);
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
+ cc->valid_nr_cpages = 0;
if (!reuse)
cc->cluster_idx = NULL_CLUSTER;
}
-void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
{
unsigned int cluster_ofs;
- if (!f2fs_cluster_can_merge_page(cc, page->index))
+ if (!f2fs_cluster_can_merge_page(cc, folio->index))
f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
- cluster_ofs = offset_in_cluster(cc, page->index);
- cc->rpages[cluster_ofs] = page;
+ cluster_ofs = offset_in_cluster(cc, folio->index);
+ cc->rpages[cluster_ofs] = folio_page(folio, 0);
cc->nr_rpages++;
- cc->cluster_idx = cluster_idx(cc, page->index);
+ cc->cluster_idx = cluster_idx(cc, folio->index);
}
#ifdef CONFIG_F2FS_FS_LZO
static int lzo_init_compress_ctx(struct compress_ctx *cc)
{
- cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
- LZO1X_MEM_COMPRESS, GFP_NOFS);
+ cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode),
+ LZO1X_MEM_COMPRESS);
if (!cc->private)
return -ENOMEM;
@@ -185,7 +187,7 @@ static int lzo_init_compress_ctx(struct compress_ctx *cc)
static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
{
- kvfree(cc->private);
+ vfree(cc->private);
cc->private = NULL;
}
@@ -196,8 +198,8 @@ static int lzo_compress_pages(struct compress_ctx *cc)
ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
&cc->clen, cc->private);
if (ret != LZO_E_OK) {
- printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
- KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
+ f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
+ "lzo compress failed, ret:%d", ret);
return -EIO;
}
return 0;
@@ -210,17 +212,15 @@ static int lzo_decompress_pages(struct decompress_io_ctx *dic)
ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
dic->rbuf, &dic->rlen);
if (ret != LZO_E_OK) {
- printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
- KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
+ f2fs_err_ratelimited(dic->sbi,
+ "lzo decompress failed, ret:%d", ret);
return -EIO;
}
if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
- printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
- "expected:%lu\n", KERN_ERR,
- F2FS_I_SB(dic->inode)->sb->s_id,
- dic->rlen,
- PAGE_SIZE << dic->log_cluster_size);
+ f2fs_err_ratelimited(dic->sbi,
+ "lzo invalid rlen:%zu, expected:%lu",
+ dic->rlen, PAGE_SIZE << dic->log_cluster_size);
return -EIO;
}
return 0;
@@ -240,11 +240,11 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc)
unsigned int size = LZ4_MEM_COMPRESS;
#ifdef CONFIG_F2FS_FS_LZ4HC
- if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
+ if (F2FS_I(cc->inode)->i_compress_level)
size = LZ4HC_MEM_COMPRESS;
#endif
- cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
+ cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode), size);
if (!cc->private)
return -ENOMEM;
@@ -259,40 +259,25 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc)
static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
{
- kvfree(cc->private);
+ vfree(cc->private);
cc->private = NULL;
}
-#ifdef CONFIG_F2FS_FS_LZ4HC
-static int lz4hc_compress_pages(struct compress_ctx *cc)
+static int lz4_compress_pages(struct compress_ctx *cc)
{
- unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
- COMPRESS_LEVEL_OFFSET;
- int len;
+ int len = -EINVAL;
+ unsigned char level = F2FS_I(cc->inode)->i_compress_level;
- if (level)
- len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
- cc->clen, level, cc->private);
- else
+ if (!level)
len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
cc->clen, cc->private);
- if (!len)
- return -EAGAIN;
-
- cc->clen = len;
- return 0;
-}
-#endif
-
-static int lz4_compress_pages(struct compress_ctx *cc)
-{
- int len;
-
#ifdef CONFIG_F2FS_FS_LZ4HC
- return lz4hc_compress_pages(cc);
+ else
+ len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
+ cc->clen, level, cc->private);
#endif
- len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
- cc->clen, cc->private);
+ if (len < 0)
+ return len;
if (!len)
return -EAGAIN;
@@ -307,59 +292,63 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic)
ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
dic->clen, dic->rlen);
if (ret < 0) {
- printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
- KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
+ f2fs_err_ratelimited(dic->sbi,
+ "lz4 decompress failed, ret:%d", ret);
return -EIO;
}
if (ret != PAGE_SIZE << dic->log_cluster_size) {
- printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
- "expected:%lu\n", KERN_ERR,
- F2FS_I_SB(dic->inode)->sb->s_id,
- dic->rlen,
- PAGE_SIZE << dic->log_cluster_size);
+ f2fs_err_ratelimited(dic->sbi,
+ "lz4 invalid ret:%d, expected:%lu",
+ ret, PAGE_SIZE << dic->log_cluster_size);
return -EIO;
}
return 0;
}
+static bool lz4_is_level_valid(int lvl)
+{
+#ifdef CONFIG_F2FS_FS_LZ4HC
+ return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
+#else
+ return lvl == 0;
+#endif
+}
+
static const struct f2fs_compress_ops f2fs_lz4_ops = {
.init_compress_ctx = lz4_init_compress_ctx,
.destroy_compress_ctx = lz4_destroy_compress_ctx,
.compress_pages = lz4_compress_pages,
.decompress_pages = lz4_decompress_pages,
+ .is_level_valid = lz4_is_level_valid,
};
#endif
#ifdef CONFIG_F2FS_FS_ZSTD
-#define F2FS_ZSTD_DEFAULT_CLEVEL 1
-
static int zstd_init_compress_ctx(struct compress_ctx *cc)
{
- ZSTD_parameters params;
- ZSTD_CStream *stream;
+ zstd_parameters params;
+ zstd_cstream *stream;
void *workspace;
unsigned int workspace_size;
- unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
- COMPRESS_LEVEL_OFFSET;
+ unsigned char level = F2FS_I(cc->inode)->i_compress_level;
+ /* Need to remain this for backward compatibility */
if (!level)
level = F2FS_ZSTD_DEFAULT_CLEVEL;
- params = ZSTD_getParams(level, cc->rlen, 0);
- workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
+ params = zstd_get_params(level, cc->rlen);
+ workspace_size = zstd_cstream_workspace_bound(&params.cParams);
- workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
- workspace_size, GFP_NOFS);
+ workspace = f2fs_vmalloc(F2FS_I_SB(cc->inode), workspace_size);
if (!workspace)
return -ENOMEM;
- stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
+ stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
if (!stream) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
- KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
- __func__);
- kvfree(workspace);
+ f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
+ "%s zstd_init_cstream failed", __func__);
+ vfree(workspace);
return -EIO;
}
@@ -372,16 +361,16 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
{
- kvfree(cc->private);
+ vfree(cc->private);
cc->private = NULL;
cc->private2 = NULL;
}
static int zstd_compress_pages(struct compress_ctx *cc)
{
- ZSTD_CStream *stream = cc->private2;
- ZSTD_inBuffer inbuf;
- ZSTD_outBuffer outbuf;
+ zstd_cstream *stream = cc->private2;
+ zstd_in_buffer inbuf;
+ zstd_out_buffer outbuf;
int src_size = cc->rlen;
int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
int ret;
@@ -394,19 +383,19 @@ static int zstd_compress_pages(struct compress_ctx *cc)
outbuf.dst = cc->cbuf->cdata;
outbuf.size = dst_size;
- ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
- if (ZSTD_isError(ret)) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
- KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
- __func__, ZSTD_getErrorCode(ret));
+ ret = zstd_compress_stream(stream, &outbuf, &inbuf);
+ if (zstd_is_error(ret)) {
+ f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
+ "%s zstd_compress_stream failed, ret: %d",
+ __func__, zstd_get_error_code(ret));
return -EIO;
}
- ret = ZSTD_endStream(stream, &outbuf);
- if (ZSTD_isError(ret)) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
- KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
- __func__, ZSTD_getErrorCode(ret));
+ ret = zstd_end_stream(stream, &outbuf);
+ if (zstd_is_error(ret)) {
+ f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
+ "%s zstd_end_stream returned %d",
+ __func__, zstd_get_error_code(ret));
return -EIO;
}
@@ -423,25 +412,23 @@ static int zstd_compress_pages(struct compress_ctx *cc)
static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
{
- ZSTD_DStream *stream;
+ zstd_dstream *stream;
void *workspace;
unsigned int workspace_size;
unsigned int max_window_size =
MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
- workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
+ workspace_size = zstd_dstream_workspace_bound(max_window_size);
- workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
- workspace_size, GFP_NOFS);
+ workspace = f2fs_vmalloc(dic->sbi, workspace_size);
if (!workspace)
return -ENOMEM;
- stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
+ stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
if (!stream) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
- KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
- __func__);
- kvfree(workspace);
+ f2fs_err_ratelimited(dic->sbi,
+ "%s zstd_init_dstream failed", __func__);
+ vfree(workspace);
return -EIO;
}
@@ -453,16 +440,16 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
{
- kvfree(dic->private);
+ vfree(dic->private);
dic->private = NULL;
dic->private2 = NULL;
}
static int zstd_decompress_pages(struct decompress_io_ctx *dic)
{
- ZSTD_DStream *stream = dic->private2;
- ZSTD_inBuffer inbuf;
- ZSTD_outBuffer outbuf;
+ zstd_dstream *stream = dic->private2;
+ zstd_in_buffer inbuf;
+ zstd_out_buffer outbuf;
int ret;
inbuf.pos = 0;
@@ -473,18 +460,17 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
outbuf.dst = dic->rbuf;
outbuf.size = dic->rlen;
- ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
- if (ZSTD_isError(ret)) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
- KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
- __func__, ZSTD_getErrorCode(ret));
+ ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
+ if (zstd_is_error(ret)) {
+ f2fs_err_ratelimited(dic->sbi,
+ "%s zstd_decompress_stream failed, ret: %d",
+ __func__, zstd_get_error_code(ret));
return -EIO;
}
if (dic->rlen != outbuf.pos) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
- "expected:%lu\n", KERN_ERR,
- F2FS_I_SB(dic->inode)->sb->s_id,
+ f2fs_err_ratelimited(dic->sbi,
+ "%s ZSTD invalid rlen:%zu, expected:%lu",
__func__, dic->rlen,
PAGE_SIZE << dic->log_cluster_size);
return -EIO;
@@ -493,6 +479,11 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
return 0;
}
+static bool zstd_is_level_valid(int lvl)
+{
+ return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
+}
+
static const struct f2fs_compress_ops f2fs_zstd_ops = {
.init_compress_ctx = zstd_init_compress_ctx,
.destroy_compress_ctx = zstd_destroy_compress_ctx,
@@ -500,6 +491,7 @@ static const struct f2fs_compress_ops f2fs_zstd_ops = {
.init_decompress_ctx = zstd_init_decompress_ctx,
.destroy_decompress_ctx = zstd_destroy_decompress_ctx,
.decompress_pages = zstd_decompress_pages,
+ .is_level_valid = zstd_is_level_valid,
};
#endif
@@ -512,8 +504,8 @@ static int lzorle_compress_pages(struct compress_ctx *cc)
ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
&cc->clen, cc->private);
if (ret != LZO_E_OK) {
- printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
- KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
+ f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
+ "lzo-rle compress failed, ret:%d", ret);
return -EIO;
}
return 0;
@@ -558,19 +550,26 @@ bool f2fs_is_compress_backend_ready(struct inode *inode)
return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
}
+bool f2fs_is_compress_level_valid(int alg, int lvl)
+{
+ const struct f2fs_compress_ops *cops = f2fs_cops[alg];
+
+ if (cops->is_level_valid)
+ return cops->is_level_valid(lvl);
+
+ return lvl == 0;
+}
+
static mempool_t *compress_page_pool;
static int num_compress_pages = 512;
module_param(num_compress_pages, uint, 0444);
MODULE_PARM_DESC(num_compress_pages,
"Number of intermediate compress pages to preallocate");
-int f2fs_init_compress_mempool(void)
+int __init f2fs_init_compress_mempool(void)
{
compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
- if (!compress_page_pool)
- return -ENOMEM;
-
- return 0;
+ return compress_page_pool ? 0 : -ENOMEM;
}
void f2fs_destroy_compress_mempool(void)
@@ -590,11 +589,14 @@ static struct page *f2fs_compress_alloc_page(void)
static void f2fs_compress_free_page(struct page *page)
{
+ struct folio *folio;
+
if (!page)
return;
- detach_page_private(page);
- page->mapping = NULL;
- unlock_page(page);
+ folio = page_folio(page);
+ folio_detach_private(folio);
+ folio->mapping = NULL;
+ folio_unlock(folio);
mempool_free(page, compress_page_pool);
}
@@ -616,11 +618,11 @@ static void *f2fs_vmap(struct page **pages, unsigned int count)
static int f2fs_compress_pages(struct compress_ctx *cc)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct f2fs_inode_info *fi = F2FS_I(cc->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
unsigned int max_len, new_nr_cpages;
- struct page **new_cpages;
u32 chksum = 0;
int i, ret;
@@ -635,20 +637,16 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
+ cc->valid_nr_cpages = cc->nr_cpages;
- cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
+ cc->cpages = page_array_alloc(sbi, cc->nr_cpages);
if (!cc->cpages) {
ret = -ENOMEM;
goto destroy_compress_ctx;
}
- for (i = 0; i < cc->nr_cpages; i++) {
+ for (i = 0; i < cc->nr_cpages; i++)
cc->cpages[i] = f2fs_compress_alloc_page();
- if (!cc->cpages[i]) {
- ret = -ENOMEM;
- goto out_free_cpages;
- }
- }
cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
if (!cc->rbuf) {
@@ -675,9 +673,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
cc->cbuf->clen = cpu_to_le32(cc->clen);
- if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
- chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
- cc->cbuf->cdata, cc->clen);
+ if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
+ chksum = f2fs_crc32(cc->cbuf->cdata, cc->clen);
cc->cbuf->chksum = cpu_to_le32(chksum);
for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
@@ -685,13 +682,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
- /* Now we're going to cut unnecessary tail pages */
- new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
- if (!new_cpages) {
- ret = -ENOMEM;
- goto out_vunmap_cbuf;
- }
-
/* zero out any unused part of the last page */
memset(&cc->cbuf->cdata[cc->clen], 0,
(new_nr_cpages * PAGE_SIZE) -
@@ -700,11 +690,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
vm_unmap_ram(cc->cbuf, cc->nr_cpages);
vm_unmap_ram(cc->rbuf, cc->cluster_size);
- for (i = 0; i < cc->nr_cpages; i++) {
- if (i < new_nr_cpages) {
- new_cpages[i] = cc->cpages[i];
- continue;
- }
+ for (i = new_nr_cpages; i < cc->nr_cpages; i++) {
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
@@ -712,9 +698,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cops->destroy_compress_ctx)
cops->destroy_compress_ctx(cc);
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
- cc->cpages = new_cpages;
- cc->nr_cpages = new_nr_cpages;
+ cc->valid_nr_cpages = new_nr_cpages;
trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
cc->clen, ret);
@@ -729,7 +713,7 @@ out_free_cpages:
if (cc->cpages[i])
f2fs_compress_free_page(cc->cpages[i]);
}
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ page_array_free(sbi, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
destroy_compress_ctx:
if (cops->destroy_compress_ctx)
@@ -740,14 +724,19 @@ out:
return ret;
}
-void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
+static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
+ bool pre_alloc);
+static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
+ bool bypass_destroy_callback, bool pre_alloc);
+
+void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
+ struct f2fs_sb_info *sbi = dic->sbi;
struct f2fs_inode_info *fi = F2FS_I(dic->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
+ bool bypass_callback = false;
int ret;
- int i;
trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
dic->cluster_size, fi->i_compress_algorithm);
@@ -757,41 +746,10 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
goto out_end_io;
}
- dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
- if (!dic->tpages) {
- ret = -ENOMEM;
- goto out_end_io;
- }
-
- for (i = 0; i < dic->cluster_size; i++) {
- if (dic->rpages[i]) {
- dic->tpages[i] = dic->rpages[i];
- continue;
- }
-
- dic->tpages[i] = f2fs_compress_alloc_page();
- if (!dic->tpages[i]) {
- ret = -ENOMEM;
- goto out_end_io;
- }
- }
-
- if (cops->init_decompress_ctx) {
- ret = cops->init_decompress_ctx(dic);
- if (ret)
- goto out_end_io;
- }
-
- dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
- if (!dic->rbuf) {
- ret = -ENOMEM;
- goto out_destroy_decompress_ctx;
- }
-
- dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
- if (!dic->cbuf) {
- ret = -ENOMEM;
- goto out_vunmap_rbuf;
+ ret = f2fs_prepare_decomp_mem(dic, false);
+ if (ret) {
+ bypass_callback = true;
+ goto out_release;
}
dic->clen = le32_to_cpu(dic->cbuf->clen);
@@ -799,63 +757,64 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
ret = -EFSCORRUPTED;
- goto out_vunmap_cbuf;
+
+ /* Avoid f2fs_commit_super in irq context */
+ f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
+ goto out_release;
}
ret = cops->decompress_pages(dic);
- if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
+ if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
u32 provided = le32_to_cpu(dic->cbuf->chksum);
- u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
+ u32 calculated = f2fs_crc32(dic->cbuf->cdata, dic->clen);
if (provided != calculated) {
if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
- printk_ratelimited(
- "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
- KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
+ f2fs_info_ratelimited(sbi,
+ "checksum invalid, nid = %lu, %x vs %x",
+ dic->inode->i_ino,
provided, calculated);
}
set_sbi_flag(sbi, SBI_NEED_FSCK);
}
}
-out_vunmap_cbuf:
- vm_unmap_ram(dic->cbuf, dic->nr_cpages);
-out_vunmap_rbuf:
- vm_unmap_ram(dic->rbuf, dic->cluster_size);
-out_destroy_decompress_ctx:
- if (cops->destroy_decompress_ctx)
- cops->destroy_decompress_ctx(dic);
+out_release:
+ f2fs_release_decomp_mem(dic, bypass_callback, false);
+
out_end_io:
trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
dic->clen, ret);
- f2fs_decompress_end_io(dic, ret);
+ f2fs_decompress_end_io(dic, ret, in_task);
}
+static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
+ struct folio *folio, nid_t ino, block_t blkaddr);
+
/*
* This is called when a page of a compressed cluster has been read from disk
* (or failed to be read from disk). It checks whether this page was the last
* page being waited on in the cluster, and if so, it decompresses the cluster
* (or in the case of a failure, cleans up without actually decompressing).
*/
-void f2fs_end_read_compressed_page(struct page *page, bool failed,
- block_t blkaddr)
+void f2fs_end_read_compressed_page(struct folio *folio, bool failed,
+ block_t blkaddr, bool in_task)
{
- struct decompress_io_ctx *dic =
- (struct decompress_io_ctx *)page_private(page);
- struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
+ struct decompress_io_ctx *dic = folio->private;
+ struct f2fs_sb_info *sbi = dic->sbi;
dec_page_count(sbi, F2FS_RD_DATA);
if (failed)
WRITE_ONCE(dic->failed, true);
- else if (blkaddr)
- f2fs_cache_compressed_page(sbi, page,
+ else if (blkaddr && in_task)
+ f2fs_cache_compressed_page(sbi, folio,
dic->inode->i_ino, blkaddr);
if (atomic_dec_and_test(&dic->remaining_pages))
- f2fs_decompress_cluster(dic);
+ f2fs_decompress_cluster(dic, in_task);
}
static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
@@ -882,19 +841,28 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
return is_page_in_cluster(cc, index);
}
-bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
- int index, int nr_pages)
+bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
+ int index, int nr_pages, bool uptodate)
{
- unsigned long pgidx;
- int i;
+ unsigned long pgidx = page_folio(pages[index])->index;
+ int i = uptodate ? 0 : 1;
+
+ /*
+ * when uptodate set to true, try to check all pages in cluster is
+ * uptodate or not.
+ */
+ if (uptodate && (pgidx % cc->cluster_size))
+ return false;
if (nr_pages - index < cc->cluster_size)
return false;
- pgidx = pvec->pages[index]->index;
+ for (; i < cc->cluster_size; i++) {
+ struct folio *folio = page_folio(pages[index + i]);
- for (i = 1; i < cc->cluster_size; i++) {
- if (pvec->pages[index + i]->index != pgidx + i)
+ if (folio->index != pgidx + i)
+ return false;
+ if (uptodate && !folio_test_uptodate(folio))
return false;
}
@@ -913,7 +881,7 @@ static bool cluster_has_invalid_data(struct compress_ctx *cc)
f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
/* beyond EOF */
- if (page->index >= nr_pages)
+ if (page_folio(page)->index >= nr_pages)
return true;
}
return false;
@@ -921,14 +889,15 @@ static bool cluster_has_invalid_data(struct compress_ctx *cc)
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
{
+#ifdef CONFIG_F2FS_CHECK_FS
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
- bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
int cluster_end = 0;
+ unsigned int count;
int i;
char *reason = "";
- if (!compressed)
+ if (dn->data_blkaddr != COMPRESS_ADDR)
return false;
/* [..., COMPR_ADDR, ...] */
@@ -937,8 +906,8 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
goto out;
}
- for (i = 1; i < cluster_size; i++) {
- block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ for (i = 1, count = 1; i < cluster_size; i++, count++) {
+ block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
/* [COMPR_ADDR, ..., COMPR_ADDR] */
@@ -946,32 +915,53 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
reason = "[C|*|C|*]";
goto out;
}
- if (compressed) {
- if (!__is_valid_data_blkaddr(blkaddr)) {
- if (!cluster_end)
- cluster_end = i;
- continue;
- }
- /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
- if (cluster_end) {
- reason = "[C|N|N|V]";
- goto out;
- }
+ if (!__is_valid_data_blkaddr(blkaddr)) {
+ if (!cluster_end)
+ cluster_end = i;
+ continue;
+ }
+ /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
+ if (cluster_end) {
+ reason = "[C|N|N|V]";
+ goto out;
}
}
+
+ f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size &&
+ !is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED));
+
return false;
out:
f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
set_sbi_flag(sbi, SBI_NEED_FSCK);
return true;
+#else
+ return false;
+#endif
}
-static int __f2fs_cluster_blocks(struct inode *inode,
- unsigned int cluster_idx, bool compr)
+static int __f2fs_get_cluster_blocks(struct inode *inode,
+ struct dnode_of_data *dn)
{
- struct dnode_of_data dn;
unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
+ int count, i;
+
+ for (i = 0, count = 0; i < cluster_size; i++) {
+ block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
+ dn->ofs_in_node + i);
+
+ if (__is_valid_data_blkaddr(blkaddr))
+ count++;
+ }
+
+ return count;
+}
+
+static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
+ enum cluster_check_type type)
+{
+ struct dnode_of_data dn;
unsigned int start_idx = cluster_idx <<
F2FS_I(inode)->i_log_cluster_size;
int ret;
@@ -990,26 +980,12 @@ static int __f2fs_cluster_blocks(struct inode *inode,
}
if (dn.data_blkaddr == COMPRESS_ADDR) {
- int i;
-
- ret = 1;
- for (i = 1; i < cluster_size; i++) {
- block_t blkaddr;
-
- blkaddr = data_blkaddr(dn.inode,
- dn.node_page, dn.ofs_in_node + i);
- if (compr) {
- if (__is_valid_data_blkaddr(blkaddr))
- ret++;
- } else {
- if (blkaddr != NULL_ADDR)
- ret++;
- }
- }
-
- f2fs_bug_on(F2FS_I_SB(inode),
- !compr && ret != cluster_size &&
- !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
+ if (type == CLUSTER_COMPR_BLKS)
+ ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
+ else if (type == CLUSTER_IS_COMPR)
+ ret = 1;
+ } else if (type == CLUSTER_RAW_BLKS) {
+ ret = __f2fs_get_cluster_blocks(inode, &dn);
}
fail:
f2fs_put_dnode(&dn);
@@ -1019,15 +995,33 @@ fail:
/* return # of compressed blocks in compressed cluster */
static int f2fs_compressed_blocks(struct compress_ctx *cc)
{
- return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
+ return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
+ CLUSTER_COMPR_BLKS);
}
-/* return # of valid blocks in compressed cluster */
+/* return # of raw blocks in non-compressed cluster */
+static int f2fs_decompressed_blocks(struct inode *inode,
+ unsigned int cluster_idx)
+{
+ return __f2fs_cluster_blocks(inode, cluster_idx,
+ CLUSTER_RAW_BLKS);
+}
+
+/* return whether cluster is compressed one or not */
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
{
return __f2fs_cluster_blocks(inode,
index >> F2FS_I(inode)->i_log_cluster_size,
- false);
+ CLUSTER_IS_COMPR);
+}
+
+/* return whether cluster contains non raw blocks or not */
+bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
+{
+ unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
+
+ return f2fs_decompressed_blocks(inode, cluster_idx) !=
+ F2FS_I(inode)->i_cluster_size;
}
static bool cluster_may_compress(struct compress_ctx *cc)
@@ -1053,13 +1047,40 @@ static void set_cluster_writeback(struct compress_ctx *cc)
}
}
+static void cancel_cluster_writeback(struct compress_ctx *cc,
+ struct compress_io_ctx *cic, int submitted)
+{
+ int i;
+
+ /* Wait for submitted IOs. */
+ if (submitted > 1) {
+ f2fs_submit_merged_write(F2FS_I_SB(cc->inode), DATA);
+ while (atomic_read(&cic->pending_pages) !=
+ (cc->valid_nr_cpages - submitted + 1))
+ f2fs_io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
+ }
+
+ /* Cancel writeback and stay locked. */
+ for (i = 0; i < cc->cluster_size; i++) {
+ if (i < submitted) {
+ inode_inc_dirty_pages(cc->inode);
+ lock_page(cc->rpages[i]);
+ }
+ clear_page_private_gcing(cc->rpages[i]);
+ if (folio_test_writeback(page_folio(cc->rpages[i])))
+ end_page_writeback(cc->rpages[i]);
+ }
+}
+
static void set_cluster_dirty(struct compress_ctx *cc)
{
int i;
for (i = 0; i < cc->cluster_size; i++)
- if (cc->rpages[i])
+ if (cc->rpages[i]) {
set_page_dirty(cc->rpages[i]);
+ set_page_private_gcing(cc->rpages[i]);
+ }
}
static int prepare_compress_overwrite(struct compress_ctx *cc,
@@ -1067,9 +1088,9 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
{
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct address_space *mapping = cc->inode->i_mapping;
- struct page *page;
+ struct folio *folio;
sector_t last_block_in_bio;
- unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
+ fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
pgoff_t start_idx = start_idx_of_cluster(cc);
int i, ret;
@@ -1082,32 +1103,32 @@ retry:
if (ret)
return ret;
- /* keep page reference to avoid page reclaim */
+ /* keep folio reference to avoid page reclaim */
for (i = 0; i < cc->cluster_size; i++) {
- page = f2fs_pagecache_get_page(mapping, start_idx + i,
- fgp_flag, GFP_NOFS);
- if (!page) {
- ret = -ENOMEM;
+ folio = f2fs_filemap_get_folio(mapping, start_idx + i,
+ fgp_flag, GFP_NOFS);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
goto unlock_pages;
}
- if (PageUptodate(page))
- f2fs_put_page(page, 1);
+ if (folio_test_uptodate(folio))
+ f2fs_folio_put(folio, true);
else
- f2fs_compress_ctx_add_page(cc, page);
+ f2fs_compress_ctx_add_page(cc, folio);
}
if (!f2fs_cluster_is_empty(cc)) {
struct bio *bio = NULL;
ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
- &last_block_in_bio, false, true);
+ &last_block_in_bio, NULL, true);
f2fs_put_rpages(cc);
f2fs_destroy_compress_ctx(cc, true);
if (ret)
goto out;
if (bio)
- f2fs_submit_bio(sbi, bio, DATA);
+ f2fs_submit_read_bio(sbi, bio, DATA);
ret = f2fs_init_compress_ctx(cc);
if (ret)
@@ -1117,16 +1138,17 @@ retry:
for (i = 0; i < cc->cluster_size; i++) {
f2fs_bug_on(sbi, cc->rpages[i]);
- page = find_lock_page(mapping, start_idx + i);
- if (!page) {
- /* page can be truncated */
+ folio = filemap_lock_folio(mapping, start_idx + i);
+ if (IS_ERR(folio)) {
+ /* folio could be truncated */
goto release_and_retry;
}
- f2fs_wait_on_page_writeback(page, DATA, true, true);
- f2fs_compress_ctx_add_page(cc, page);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
+ f2fs_compress_ctx_add_page(cc, folio);
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
+ f2fs_handle_page_eio(sbi, folio, DATA);
release_and_retry:
f2fs_put_rpages(cc);
f2fs_unlock_rpages(cc, i + 1);
@@ -1174,12 +1196,13 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
.cluster_size = F2FS_I(inode)->i_cluster_size,
.rpages = fsdata,
};
- bool first_index = (index == cc.rpages[0]->index);
+ struct folio *folio = page_folio(cc.rpages[0]);
+ bool first_index = (index == folio->index);
if (copied)
set_cluster_dirty(&cc);
- f2fs_put_rpages_wbc(&cc, NULL, false, 1);
+ f2fs_put_rpages_wbc(&cc, NULL, false, true);
f2fs_destroy_compress_ctx(&cc, false);
return first_index;
@@ -1189,9 +1212,11 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
{
void *fsdata = NULL;
struct page *pagep;
+ struct page **rpages;
int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
log_cluster_size;
+ int i;
int err;
err = f2fs_is_compressed_cluster(inode, start_idx);
@@ -1212,26 +1237,30 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
if (err <= 0)
return err;
- if (err > 0) {
- struct page **rpages = fsdata;
- int cluster_size = F2FS_I(inode)->i_cluster_size;
- int i;
+ rpages = fsdata;
- for (i = cluster_size - 1; i >= 0; i--) {
- loff_t start = rpages[i]->index << PAGE_SHIFT;
+ for (i = (1 << log_cluster_size) - 1; i >= 0; i--) {
+ struct folio *folio = page_folio(rpages[i]);
+ loff_t start = (loff_t)folio->index << PAGE_SHIFT;
+ loff_t offset = from > start ? from - start : 0;
- if (from <= start) {
- zero_user_segment(rpages[i], 0, PAGE_SIZE);
- } else {
- zero_user_segment(rpages[i], from - start,
- PAGE_SIZE);
- break;
- }
- }
+ folio_zero_segment(folio, offset, folio_size(folio));
- f2fs_compress_write_end(inode, fsdata, start_idx, true);
+ if (from >= start)
+ break;
}
- return 0;
+
+ f2fs_compress_write_end(inode, fsdata, start_idx, true);
+
+ err = filemap_write_and_wait_range(inode->i_mapping,
+ round_down(from, 1 << log_cluster_size << PAGE_SHIFT),
+ LLONG_MAX);
+ if (err)
+ return err;
+
+ truncate_pagecache(inode, from);
+
+ return f2fs_do_truncate_blocks(inode, round_up(from, PAGE_SIZE), lock);
}
static int f2fs_write_compressed_pages(struct compress_ctx *cc,
@@ -1252,11 +1281,12 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
.page = NULL,
.encrypted_page = NULL,
.compressed_page = NULL,
- .submitted = false,
.io_type = io_type,
.io_wbc = wbc,
- .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
+ .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
+ 1 : 0,
};
+ struct folio *folio;
struct dnode_of_data dn;
struct node_info ni;
struct compress_io_ctx *cic;
@@ -1264,20 +1294,21 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
unsigned int last_index = cc->cluster_size - 1;
loff_t psize;
int i, err;
+ bool quota_inode = IS_NOQUOTA(inode);
- /* we should bypass data pages to proceed the kworkder jobs */
+ /* we should bypass data pages to proceed the kworker jobs */
if (unlikely(f2fs_cp_error(sbi))) {
- mapping_set_error(cc->rpages[0]->mapping, -EIO);
+ mapping_set_error(inode->i_mapping, -EIO);
goto out_free;
}
- if (IS_NOQUOTA(inode)) {
+ if (quota_inode) {
/*
* We need to wait for node_write to avoid block allocation during
* checkpoint. This can only happen to quota writes which can cause
* the below discard race condition.
*/
- down_read(&sbi->node_write);
+ f2fs_down_read(&sbi->node_write);
} else if (!f2fs_trylock_op(sbi)) {
goto out_free;
}
@@ -1289,14 +1320,15 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
goto out_unlock_op;
for (i = 0; i < cc->cluster_size; i++) {
- if (data_blkaddr(dn.inode, dn.node_page,
+ if (data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i) == NULL_ADDR)
goto out_put_dnode;
}
- psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
+ folio = page_folio(cc->rpages[last_index]);
+ psize = folio_next_pos(folio);
- err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
if (err)
goto out_put_dnode;
@@ -1308,19 +1340,19 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
- atomic_set(&cic->pending_pages, cc->nr_cpages);
- cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
+ atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
+ cic->rpages = page_array_alloc(sbi, cc->cluster_size);
if (!cic->rpages)
goto out_put_cic;
cic->nr_rpages = cc->cluster_size;
- for (i = 0; i < cc->nr_cpages; i++) {
+ for (i = 0; i < cc->valid_nr_cpages; i++) {
f2fs_set_compressed_page(cc->cpages[i], inode,
- cc->rpages[i + 1]->index, cic);
+ page_folio(cc->rpages[i + 1])->index, cic);
fio.compressed_page = cc->cpages[i];
- fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
+ fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i + 1);
/* wait for GCed page writeback via META_MAPPING */
@@ -1352,7 +1384,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (blkaddr == COMPRESS_ADDR)
fio.compr_blocks++;
if (__is_valid_data_blkaddr(blkaddr))
- f2fs_invalidate_blocks(sbi, blkaddr);
+ f2fs_invalidate_blocks(sbi, blkaddr, 1);
f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
goto unlock_continue;
}
@@ -1360,9 +1392,9 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
fio.compr_blocks++;
- if (i > cc->nr_cpages) {
+ if (i > cc->valid_nr_cpages) {
if (__is_valid_data_blkaddr(blkaddr)) {
- f2fs_invalidate_blocks(sbi, blkaddr);
+ f2fs_invalidate_blocks(sbi, blkaddr, 1);
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
}
goto unlock_continue;
@@ -1376,25 +1408,32 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
fio.compressed_page = cc->cpages[i - 1];
cc->cpages[i - 1] = NULL;
+ fio.submitted = 0;
f2fs_outplace_write_data(&dn, &fio);
+ if (unlikely(!fio.submitted)) {
+ cancel_cluster_writeback(cc, cic, i);
+
+ /* To call fscrypt_finalize_bounce_page */
+ i = cc->valid_nr_cpages;
+ *submitted = 0;
+ goto out_destroy_crypt;
+ }
(*submitted)++;
unlock_continue:
inode_dec_dirty_pages(cc->inode);
- unlock_page(fio.page);
+ folio_unlock(fio.folio);
}
if (fio.compr_blocks)
f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
- f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
- add_compr_block_stat(inode, cc->nr_cpages);
+ f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
+ add_compr_block_stat(inode, cc->valid_nr_cpages);
set_inode_flag(cc->inode, FI_APPEND_WRITE);
- if (cc->cluster_idx == 0)
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
f2fs_put_dnode(&dn);
- if (IS_NOQUOTA(inode))
- up_read(&sbi->node_write);
+ if (quota_inode)
+ f2fs_up_read(&sbi->node_write);
else
f2fs_unlock_op(sbi);
@@ -1404,50 +1443,53 @@ unlock_continue:
spin_unlock(&fi->i_size_lock);
f2fs_put_rpages(cc);
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ page_array_free(sbi, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
f2fs_destroy_compress_ctx(cc, false);
return 0;
out_destroy_crypt:
- page_array_free(cc->inode, cic->rpages, cc->cluster_size);
+ page_array_free(sbi, cic->rpages, cc->cluster_size);
- for (--i; i >= 0; i--)
+ for (--i; i >= 0; i--) {
+ if (!cc->cpages[i])
+ continue;
fscrypt_finalize_bounce_page(&cc->cpages[i]);
+ }
out_put_cic:
kmem_cache_free(cic_entry_slab, cic);
out_put_dnode:
f2fs_put_dnode(&dn);
out_unlock_op:
- if (IS_NOQUOTA(inode))
- up_read(&sbi->node_write);
+ if (quota_inode)
+ f2fs_up_read(&sbi->node_write);
else
f2fs_unlock_op(sbi);
out_free:
- for (i = 0; i < cc->nr_cpages; i++) {
- if (!cc->cpages[i])
- continue;
+ for (i = 0; i < cc->valid_nr_cpages; i++) {
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ page_array_free(sbi, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
return -EAGAIN;
}
-void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
+void f2fs_compress_write_end_io(struct bio *bio, struct folio *folio)
{
+ struct page *page = &folio->page;
struct f2fs_sb_info *sbi = bio->bi_private;
- struct compress_io_ctx *cic =
- (struct compress_io_ctx *)page_private(page);
+ struct compress_io_ctx *cic = folio->private;
+ enum count_type type = WB_DATA_TYPE(folio,
+ f2fs_is_compressed_page(folio));
int i;
- if (unlikely(bio->bi_status))
+ if (unlikely(bio->bi_status != BLK_STS_OK))
mapping_set_error(cic->inode->i_mapping, -EIO);
f2fs_compress_free_page(page);
- dec_page_count(sbi, F2FS_WB_DATA);
+ dec_page_count(sbi, type);
if (atomic_dec_return(&cic->pending_pages))
return;
@@ -1458,85 +1500,95 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
end_page_writeback(cic->rpages[i]);
}
- page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
+ page_array_free(sbi, cic->rpages, cic->nr_rpages);
kmem_cache_free(cic_entry_slab, cic);
}
static int f2fs_write_raw_pages(struct compress_ctx *cc,
- int *submitted,
+ int *submitted_p,
struct writeback_control *wbc,
enum iostat_type io_type)
{
struct address_space *mapping = cc->inode->i_mapping;
- int _submitted, compr_blocks, ret;
- int i = -1, err = 0;
+ struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ int submitted, compr_blocks, i;
+ int ret = 0;
compr_blocks = f2fs_compressed_blocks(cc);
- if (compr_blocks < 0) {
- err = compr_blocks;
- goto out_err;
+
+ for (i = 0; i < cc->cluster_size; i++) {
+ if (!cc->rpages[i])
+ continue;
+
+ redirty_page_for_writepage(wbc, cc->rpages[i]);
+ unlock_page(cc->rpages[i]);
}
+ if (compr_blocks < 0)
+ return compr_blocks;
+
+ /* overwrite compressed cluster w/ normal cluster */
+ if (compr_blocks > 0)
+ f2fs_lock_op(sbi);
+
for (i = 0; i < cc->cluster_size; i++) {
+ struct folio *folio;
+
if (!cc->rpages[i])
continue;
+ folio = page_folio(cc->rpages[i]);
retry_write:
- if (cc->rpages[i]->mapping != mapping) {
- unlock_page(cc->rpages[i]);
+ folio_lock(folio);
+
+ if (folio->mapping != mapping) {
+continue_unlock:
+ folio_unlock(folio);
continue;
}
- BUG_ON(!PageLocked(cc->rpages[i]));
+ if (!folio_test_dirty(folio))
+ goto continue_unlock;
+
+ if (folio_test_writeback(folio)) {
+ if (wbc->sync_mode == WB_SYNC_NONE)
+ goto continue_unlock;
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
+ }
+
+ if (!folio_clear_dirty_for_io(folio))
+ goto continue_unlock;
- ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
+ submitted = 0;
+ ret = f2fs_write_single_data_page(folio, &submitted,
NULL, NULL, wbc, io_type,
compr_blocks, false);
if (ret) {
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
- unlock_page(cc->rpages[i]);
+ if (ret == 1) {
ret = 0;
} else if (ret == -EAGAIN) {
+ ret = 0;
/*
* for quota file, just redirty left pages to
* avoid deadlock caused by cluster update race
* from foreground operation.
*/
- if (IS_NOQUOTA(cc->inode)) {
- err = 0;
- goto out_err;
- }
- ret = 0;
- cond_resched();
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
- lock_page(cc->rpages[i]);
-
- if (!PageDirty(cc->rpages[i])) {
- unlock_page(cc->rpages[i]);
- continue;
- }
-
- clear_page_dirty_for_io(cc->rpages[i]);
+ if (IS_NOQUOTA(cc->inode))
+ goto out;
+ f2fs_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
goto retry_write;
}
- err = ret;
- goto out_err;
+ goto out;
}
- *submitted += _submitted;
+ *submitted_p += submitted;
}
- f2fs_balance_fs(F2FS_M_SB(mapping), true);
+out:
+ if (compr_blocks > 0)
+ f2fs_unlock_op(sbi);
- return 0;
-out_err:
- for (++i; i < cc->cluster_size; i++) {
- if (!cc->rpages[i])
- continue;
- redirty_page_for_writepage(wbc, cc->rpages[i]);
- unlock_page(cc->rpages[i]);
- }
- return err;
+ f2fs_balance_fs(sbi, true);
+ return ret;
}
int f2fs_write_multi_pages(struct compress_ctx *cc,
@@ -1553,7 +1605,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
add_compr_block_stat(cc->inode, cc->cluster_size);
goto write;
} else if (err) {
- f2fs_put_rpages_wbc(cc, wbc, true, 1);
+ f2fs_put_rpages_wbc(cc, wbc, true, true);
goto destroy_out;
}
@@ -1567,26 +1619,87 @@ write:
f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
- f2fs_put_rpages_wbc(cc, wbc, false, 0);
+ f2fs_put_rpages_wbc(cc, wbc, false, false);
destroy_out:
f2fs_destroy_compress_ctx(cc, false);
return err;
}
-static void f2fs_free_dic(struct decompress_io_ctx *dic);
+static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
+ bool pre_alloc)
+{
+ return pre_alloc ^ f2fs_low_mem_mode(sbi);
+}
+
+static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
+ bool pre_alloc)
+{
+ const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
+ int i;
+
+ if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
+ return 0;
+
+ dic->tpages = page_array_alloc(dic->sbi, dic->cluster_size);
+ if (!dic->tpages)
+ return -ENOMEM;
+
+ for (i = 0; i < dic->cluster_size; i++) {
+ if (dic->rpages[i]) {
+ dic->tpages[i] = dic->rpages[i];
+ continue;
+ }
+
+ dic->tpages[i] = f2fs_compress_alloc_page();
+ }
+
+ dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
+ if (!dic->rbuf)
+ return -ENOMEM;
+
+ dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
+ if (!dic->cbuf)
+ return -ENOMEM;
+
+ if (cops->init_decompress_ctx)
+ return cops->init_decompress_ctx(dic);
+
+ return 0;
+}
+
+static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
+ bool bypass_destroy_callback, bool pre_alloc)
+{
+ const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
+
+ if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
+ return;
+
+ if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
+ cops->destroy_decompress_ctx(dic);
+
+ if (dic->cbuf)
+ vm_unmap_ram(dic->cbuf, dic->nr_cpages);
+
+ if (dic->rbuf)
+ vm_unmap_ram(dic->rbuf, dic->cluster_size);
+}
+
+static void f2fs_free_dic(struct decompress_io_ctx *dic,
+ bool bypass_destroy_callback);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
{
struct decompress_io_ctx *dic;
pgoff_t start_idx = start_idx_of_cluster(cc);
- int i;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+ int i, ret;
- dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO,
- false, F2FS_I_SB(cc->inode));
+ dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
if (!dic)
return ERR_PTR(-ENOMEM);
- dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
+ dic->rpages = page_array_alloc(sbi, cc->cluster_size);
if (!dic->rpages) {
kmem_cache_free(dic_entry_slab, dic);
return ERR_PTR(-ENOMEM);
@@ -1594,6 +1707,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
dic->inode = cc->inode;
+ dic->sbi = sbi;
+ dic->compress_algorithm = F2FS_I(cc->inode)->i_compress_algorithm;
atomic_set(&dic->remaining_pages, cc->nr_cpages);
dic->cluster_idx = cc->cluster_idx;
dic->cluster_size = cc->cluster_size;
@@ -1607,32 +1722,40 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->rpages[i] = cc->rpages[i];
dic->nr_rpages = cc->cluster_size;
- dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
- if (!dic->cpages)
+ dic->cpages = page_array_alloc(sbi, dic->nr_cpages);
+ if (!dic->cpages) {
+ ret = -ENOMEM;
goto out_free;
+ }
for (i = 0; i < dic->nr_cpages; i++) {
struct page *page;
page = f2fs_compress_alloc_page();
- if (!page)
- goto out_free;
-
f2fs_set_compressed_page(page, cc->inode,
start_idx + i + 1, dic);
dic->cpages[i] = page;
}
+ ret = f2fs_prepare_decomp_mem(dic, true);
+ if (ret)
+ goto out_free;
+
return dic;
out_free:
- f2fs_free_dic(dic);
- return ERR_PTR(-ENOMEM);
+ f2fs_free_dic(dic, true);
+ return ERR_PTR(ret);
}
-static void f2fs_free_dic(struct decompress_io_ctx *dic)
+static void f2fs_free_dic(struct decompress_io_ctx *dic,
+ bool bypass_destroy_callback)
{
int i;
+ /* use sbi in dic to avoid UFA of dic->inode*/
+ struct f2fs_sb_info *sbi = dic->sbi;
+
+ f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
if (dic->tpages) {
for (i = 0; i < dic->cluster_size; i++) {
@@ -1642,7 +1765,7 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->tpages[i]);
}
- page_array_free(dic->inode, dic->tpages, dic->cluster_size);
+ page_array_free(sbi, dic->tpages, dic->cluster_size);
}
if (dic->cpages) {
@@ -1651,45 +1774,31 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->cpages[i]);
}
- page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
+ page_array_free(sbi, dic->cpages, dic->nr_cpages);
}
- page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
+ page_array_free(sbi, dic->rpages, dic->nr_rpages);
kmem_cache_free(dic_entry_slab, dic);
}
-static void f2fs_put_dic(struct decompress_io_ctx *dic)
+static void f2fs_late_free_dic(struct work_struct *work)
{
- if (refcount_dec_and_test(&dic->refcnt))
- f2fs_free_dic(dic);
+ struct decompress_io_ctx *dic =
+ container_of(work, struct decompress_io_ctx, free_work);
+
+ f2fs_free_dic(dic, false);
}
-/*
- * Update and unlock the cluster's pagecache pages, and release the reference to
- * the decompress_io_ctx that was being held for I/O completion.
- */
-static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
+static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
{
- int i;
-
- for (i = 0; i < dic->cluster_size; i++) {
- struct page *rpage = dic->rpages[i];
-
- if (!rpage)
- continue;
-
- /* PG_error was set if verity failed. */
- if (failed || PageError(rpage)) {
- ClearPageUptodate(rpage);
- /* will re-read again later */
- ClearPageError(rpage);
+ if (refcount_dec_and_test(&dic->refcnt)) {
+ if (in_task) {
+ f2fs_free_dic(dic, false);
} else {
- SetPageUptodate(rpage);
+ INIT_WORK(&dic->free_work, f2fs_late_free_dic);
+ queue_work(dic->sbi->post_read_wq, &dic->free_work);
}
- unlock_page(rpage);
}
-
- f2fs_put_dic(dic);
}
static void f2fs_verify_cluster(struct work_struct *work)
@@ -1698,23 +1807,32 @@ static void f2fs_verify_cluster(struct work_struct *work)
container_of(work, struct decompress_io_ctx, verity_work);
int i;
- /* Verify the cluster's decompressed pages with fs-verity. */
+ /* Verify, update, and unlock the decompressed pages. */
for (i = 0; i < dic->cluster_size; i++) {
struct page *rpage = dic->rpages[i];
- if (rpage && !fsverity_verify_page(rpage))
- SetPageError(rpage);
+ if (!rpage)
+ continue;
+
+ if (fsverity_verify_page(rpage))
+ SetPageUptodate(rpage);
+ else
+ ClearPageUptodate(rpage);
+ unlock_page(rpage);
}
- __f2fs_decompress_end_io(dic, false);
+ f2fs_put_dic(dic, true);
}
/*
* This is called when a compressed cluster has been decompressed
* (or failed to be read and/or decompressed).
*/
-void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
+void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
+ bool in_task)
{
+ int i;
+
if (!failed && dic->need_verity) {
/*
* Note that to avoid deadlocks, the verity work can't be done
@@ -1724,38 +1842,58 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
*/
INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
fsverity_enqueue_verify_work(&dic->verity_work);
- } else {
- __f2fs_decompress_end_io(dic, failed);
+ return;
+ }
+
+ /* Update and unlock the cluster's pagecache pages. */
+ for (i = 0; i < dic->cluster_size; i++) {
+ struct page *rpage = dic->rpages[i];
+
+ if (!rpage)
+ continue;
+
+ if (failed)
+ ClearPageUptodate(rpage);
+ else
+ SetPageUptodate(rpage);
+ unlock_page(rpage);
}
+
+ /*
+ * Release the reference to the decompress_io_ctx that was being held
+ * for I/O completion.
+ */
+ f2fs_put_dic(dic, in_task);
}
/*
- * Put a reference to a compressed page's decompress_io_ctx.
+ * Put a reference to a compressed folio's decompress_io_ctx.
*
- * This is called when the page is no longer needed and can be freed.
+ * This is called when the folio is no longer needed and can be freed.
*/
-void f2fs_put_page_dic(struct page *page)
+void f2fs_put_folio_dic(struct folio *folio, bool in_task)
{
- struct decompress_io_ctx *dic =
- (struct decompress_io_ctx *)page_private(page);
+ struct decompress_io_ctx *dic = folio->private;
- f2fs_put_dic(dic);
+ f2fs_put_dic(dic, in_task);
}
/*
* check whether cluster blocks are contiguous, and add extent cache entry
* only if cluster blocks are logically and physically contiguous.
*/
-unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
+unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
+ unsigned int ofs_in_node)
{
- bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
+ bool compressed = data_blkaddr(dn->inode, dn->node_folio,
+ ofs_in_node) == COMPRESS_ADDR;
int i = compressed ? 1 : 0;
- block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
- dn->ofs_in_node + i);
+ block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio,
+ ofs_in_node + i);
for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
- block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
- dn->ofs_in_node + i);
+ block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
+ ofs_in_node + i);
if (!__is_valid_data_blkaddr(blkaddr))
break;
@@ -1767,8 +1905,9 @@ unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
}
const struct address_space_operations f2fs_compress_aops = {
- .releasepage = f2fs_release_page,
- .invalidatepage = f2fs_invalidate_page,
+ .release_folio = f2fs_release_folio,
+ .invalidate_folio = f2fs_invalidate_folio,
+ .migrate_folio = filemap_migrate_folio,
};
struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
@@ -1776,17 +1915,18 @@ struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
return sbi->compress_inode->i_mapping;
}
-void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
+void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int len)
{
if (!sbi->compress_inode)
return;
- invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
+ invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1);
}
-void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
- nid_t ino, block_t blkaddr)
+static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
+ struct folio *folio, nid_t ino, block_t blkaddr)
{
- struct page *cpage;
+ struct folio *cfolio;
int ret;
if (!test_opt(sbi, COMPRESS_CACHE))
@@ -1798,53 +1938,49 @@ void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
return;
- cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
- if (cpage) {
- f2fs_put_page(cpage, 0);
+ cfolio = filemap_get_folio(COMPRESS_MAPPING(sbi), blkaddr);
+ if (!IS_ERR(cfolio)) {
+ f2fs_folio_put(cfolio, false);
return;
}
- cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
- if (!cpage)
+ cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0, NULL);
+ if (!cfolio)
return;
- ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
+ ret = filemap_add_folio(COMPRESS_MAPPING(sbi), cfolio,
blkaddr, GFP_NOFS);
if (ret) {
- f2fs_put_page(cpage, 0);
+ f2fs_folio_put(cfolio, false);
return;
}
- set_page_private_data(cpage, ino);
-
- if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
- goto out;
+ folio_set_f2fs_data(cfolio, ino);
- memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
- SetPageUptodate(cpage);
-out:
- f2fs_put_page(cpage, 1);
+ memcpy(folio_address(cfolio), folio_address(folio), PAGE_SIZE);
+ folio_mark_uptodate(cfolio);
+ f2fs_folio_put(cfolio, true);
}
-bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio,
block_t blkaddr)
{
- struct page *cpage;
+ struct folio *cfolio;
bool hitted = false;
if (!test_opt(sbi, COMPRESS_CACHE))
return false;
- cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
+ cfolio = f2fs_filemap_get_folio(COMPRESS_MAPPING(sbi),
blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
- if (cpage) {
- if (PageUptodate(cpage)) {
+ if (!IS_ERR(cfolio)) {
+ if (folio_test_uptodate(cfolio)) {
atomic_inc(&sbi->compress_page_hit);
- memcpy(page_address(page),
- page_address(cpage), PAGE_SIZE);
+ memcpy(folio_address(folio),
+ folio_address(cfolio), folio_size(folio));
hitted = true;
}
- f2fs_put_page(cpage, 1);
+ f2fs_folio_put(cfolio, true);
}
return hitted;
@@ -1852,46 +1988,41 @@ bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
{
- struct address_space *mapping = sbi->compress_inode->i_mapping;
- struct pagevec pvec;
+ struct address_space *mapping = COMPRESS_MAPPING(sbi);
+ struct folio_batch fbatch;
pgoff_t index = 0;
pgoff_t end = MAX_BLKADDR(sbi);
if (!mapping->nrpages)
return;
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
do {
- unsigned int nr_pages;
- int i;
+ unsigned int nr, i;
- nr_pages = pagevec_lookup_range(&pvec, mapping,
- &index, end - 1);
- if (!nr_pages)
+ nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
+ if (!nr)
break;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
-
- if (page->index > end)
- break;
+ for (i = 0; i < nr; i++) {
+ struct folio *folio = fbatch.folios[i];
- lock_page(page);
- if (page->mapping != mapping) {
- unlock_page(page);
+ folio_lock(folio);
+ if (folio->mapping != mapping) {
+ folio_unlock(folio);
continue;
}
- if (ino != get_page_private_data(page)) {
- unlock_page(page);
+ if (ino != folio_get_f2fs_data(folio)) {
+ folio_unlock(folio);
continue;
}
- generic_error_remove_page(mapping, page);
- unlock_page(page);
+ generic_error_remove_folio(mapping, folio);
+ folio_unlock(folio);
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
} while (index < end);
}
@@ -1927,7 +2058,10 @@ void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
{
dev_t dev = sbi->sb->s_bdev->bd_dev;
- char slab_name[32];
+ char slab_name[35];
+
+ if (!f2fs_sb_has_compression(sbi))
+ return 0;
sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
@@ -1936,9 +2070,7 @@ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
sbi->page_array_slab_size);
- if (!sbi->page_array_slab)
- return -ENOMEM;
- return 0;
+ return sbi->page_array_slab ? 0 : -ENOMEM;
}
void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
@@ -1946,53 +2078,24 @@ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
kmem_cache_destroy(sbi->page_array_slab);
}
-static int __init f2fs_init_cic_cache(void)
+int __init f2fs_init_compress_cache(void)
{
cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
sizeof(struct compress_io_ctx));
if (!cic_entry_slab)
return -ENOMEM;
- return 0;
-}
-
-static void f2fs_destroy_cic_cache(void)
-{
- kmem_cache_destroy(cic_entry_slab);
-}
-
-static int __init f2fs_init_dic_cache(void)
-{
dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
sizeof(struct decompress_io_ctx));
if (!dic_entry_slab)
- return -ENOMEM;
- return 0;
-}
-
-static void f2fs_destroy_dic_cache(void)
-{
- kmem_cache_destroy(dic_entry_slab);
-}
-
-int __init f2fs_init_compress_cache(void)
-{
- int err;
-
- err = f2fs_init_cic_cache();
- if (err)
- goto out;
- err = f2fs_init_dic_cache();
- if (err)
goto free_cic;
return 0;
free_cic:
- f2fs_destroy_cic_cache();
-out:
+ kmem_cache_destroy(cic_entry_slab);
return -ENOMEM;
}
void f2fs_destroy_compress_cache(void)
{
- f2fs_destroy_dic_cache();
- f2fs_destroy_cic_cache();
+ kmem_cache_destroy(dic_entry_slab);
+ kmem_cache_destroy(cic_entry_slab);
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9f754aaef558..c30e69392a62 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -7,10 +7,9 @@
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
-#include <linux/buffer_head.h>
+#include <linux/sched/mm.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
-#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
@@ -18,9 +17,9 @@
#include <linux/swap.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
-#include <linux/cleancache.h>
#include <linux/sched/signal.h>
#include <linux/fiemap.h>
+#include <linux/iomap.h>
#include "f2fs.h"
#include "node.h"
@@ -39,10 +38,8 @@ static struct bio_set f2fs_bioset;
int __init f2fs_init_bioset(void)
{
- if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
- 0, BIOSET_NEED_BVECS))
- return -ENOMEM;
- return 0;
+ return bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
+ 0, BIOSET_NEED_BVECS);
}
void f2fs_destroy_bioset(void)
@@ -50,14 +47,14 @@ void f2fs_destroy_bioset(void)
bioset_exit(&f2fs_bioset);
}
-static bool __is_cp_guaranteed(struct page *page)
+bool f2fs_is_cp_guaranteed(const struct folio *folio)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct inode *inode;
struct f2fs_sb_info *sbi;
- if (!mapping)
- return false;
+ if (fscrypt_is_bounce_folio(folio))
+ return folio_test_f2fs_gcing(fscrypt_pagecache_folio(folio));
inode = mapping->host;
sbi = F2FS_I_SB(inode);
@@ -67,18 +64,15 @@ static bool __is_cp_guaranteed(struct page *page)
S_ISDIR(inode->i_mode))
return true;
- if (f2fs_is_compressed_page(page))
- return false;
- if ((S_ISREG(inode->i_mode) &&
- (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
- page_private_gcing(page))
+ if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
+ folio_test_f2fs_gcing(folio))
return true;
return false;
}
-static enum count_type __read_io_type(struct page *page)
+static enum count_type __read_io_type(struct folio *folio)
{
- struct address_space *mapping = page_file_mapping(page);
+ struct address_space *mapping = folio->mapping;
if (mapping) {
struct inode *inode = mapping->host;
@@ -96,17 +90,17 @@ static enum count_type __read_io_type(struct page *page)
/* postprocessing steps for read bios */
enum bio_post_read_step {
#ifdef CONFIG_FS_ENCRYPTION
- STEP_DECRYPT = 1 << 0,
+ STEP_DECRYPT = BIT(0),
#else
STEP_DECRYPT = 0, /* compile out the decryption-related code */
#endif
#ifdef CONFIG_F2FS_FS_COMPRESSION
- STEP_DECOMPRESS = 1 << 1,
+ STEP_DECOMPRESS = BIT(1),
#else
STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
#endif
#ifdef CONFIG_FS_VERITY
- STEP_VERITY = 1 << 2,
+ STEP_VERITY = BIT(2),
#else
STEP_VERITY = 0, /* compile out the verity-related code */
#endif
@@ -117,42 +111,51 @@ struct bio_post_read_ctx {
struct f2fs_sb_info *sbi;
struct work_struct work;
unsigned int enabled_steps;
+ /*
+ * decompression_attempted keeps track of whether
+ * f2fs_end_read_compressed_page() has been called on the pages in the
+ * bio that belong to a compressed cluster yet.
+ */
+ bool decompression_attempted;
block_t fs_blkaddr;
};
-static void f2fs_finish_read_bio(struct bio *bio)
+/*
+ * Update and unlock a bio's pages, and free the bio.
+ *
+ * This marks pages up-to-date only if there was no error in the bio (I/O error,
+ * decryption error, or verity error), as indicated by bio->bi_status.
+ *
+ * "Compressed pages" (pagecache pages backed by a compressed cluster on-disk)
+ * aren't marked up-to-date here, as decompression is done on a per-compression-
+ * cluster basis rather than a per-bio basis. Instead, we only must do two
+ * things for each compressed page here: call f2fs_end_read_compressed_page()
+ * with failed=true if an error occurred before it would have normally gotten
+ * called (i.e., I/O error or decryption error, but *not* verity error), and
+ * release the bio's reference to the decompress_io_ctx of the page's cluster.
+ */
+static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
+ struct bio_post_read_ctx *ctx = bio->bi_private;
- /*
- * Update and unlock the bio's pagecache pages, and put the
- * decompression context for any compressed pages.
- */
- bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
- if (f2fs_is_compressed_page(page)) {
- if (bio->bi_status)
- f2fs_end_read_compressed_page(page, true, 0);
- f2fs_put_page_dic(page);
+ if (f2fs_is_compressed_page(folio)) {
+ if (ctx && !ctx->decompression_attempted)
+ f2fs_end_read_compressed_page(folio, true, 0,
+ in_task);
+ f2fs_put_folio_dic(folio, in_task);
continue;
}
- /* PG_error was set if decryption or verity failed. */
- if (bio->bi_status || PageError(page)) {
- ClearPageUptodate(page);
- /* will re-read again later */
- ClearPageError(page);
- } else {
- SetPageUptodate(page);
- }
- dec_page_count(F2FS_P_SB(page), __read_io_type(page));
- unlock_page(page);
+ dec_page_count(F2FS_F_SB(folio), __read_io_type(folio));
+ folio_end_read(folio, bio->bi_status == BLK_STS_OK);
}
- if (bio->bi_private)
- mempool_free(bio->bi_private, bio_post_read_ctx_pool);
+ if (ctx)
+ mempool_free(ctx, bio_post_read_ctx_pool);
bio_put(bio);
}
@@ -164,7 +167,7 @@ static void f2fs_verify_bio(struct work_struct *work)
bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
/*
- * fsverity_verify_bio() may call readpages() again, and while verity
+ * fsverity_verify_bio() may call readahead() again, and while verity
* will be disabled for this, decryption and/or decompression may still
* be needed, resulting in another bio_post_read_ctx being allocated.
* So to prevent deadlocks we need to release the current ctx to the
@@ -178,21 +181,22 @@ static void f2fs_verify_bio(struct work_struct *work)
* as those were handled separately by f2fs_end_read_compressed_page().
*/
if (may_have_compressed_pages) {
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
- bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
- if (!f2fs_is_compressed_page(page) &&
- !PageError(page) && !fsverity_verify_page(page))
- SetPageError(page);
+ if (!f2fs_is_compressed_page(folio) &&
+ !fsverity_verify_page(&folio->page)) {
+ bio->bi_status = BLK_STS_IOERR;
+ break;
+ }
}
} else {
fsverity_verify_bio(bio);
}
- f2fs_finish_read_bio(bio);
+ f2fs_finish_read_bio(bio, true);
}
/*
@@ -204,7 +208,7 @@ static void f2fs_verify_bio(struct work_struct *work)
* can involve reading verity metadata pages from the file, and these verity
* metadata pages may be encrypted and/or compressed.
*/
-static void f2fs_verify_and_finish_bio(struct bio *bio)
+static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
{
struct bio_post_read_ctx *ctx = bio->bi_private;
@@ -212,7 +216,7 @@ static void f2fs_verify_and_finish_bio(struct bio *bio)
INIT_WORK(&ctx->work, f2fs_verify_bio);
fsverity_enqueue_verify_work(&ctx->work);
} else {
- f2fs_finish_read_bio(bio);
+ f2fs_finish_read_bio(bio, in_task);
}
}
@@ -225,26 +229,27 @@ static void f2fs_verify_and_finish_bio(struct bio *bio)
* that the bio includes at least one compressed page. The actual decompression
* is done on a per-cluster basis, not a per-bio basis.
*/
-static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
+static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
+ bool in_task)
{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
bool all_compressed = true;
block_t blkaddr = ctx->fs_blkaddr;
- bio_for_each_segment_all(bv, ctx->bio, iter_all) {
- struct page *page = bv->bv_page;
+ bio_for_each_folio_all(fi, ctx->bio) {
+ struct folio *folio = fi.folio;
- /* PG_error was set if decryption failed. */
- if (f2fs_is_compressed_page(page))
- f2fs_end_read_compressed_page(page, PageError(page),
- blkaddr);
+ if (f2fs_is_compressed_page(folio))
+ f2fs_end_read_compressed_page(folio, false, blkaddr,
+ in_task);
else
all_compressed = false;
blkaddr++;
}
+ ctx->decompression_attempted = true;
+
/*
* Optimization: if all the bio's pages are compressed, then scheduling
* the per-bio verity work is unnecessary, as verity will be fully
@@ -258,93 +263,103 @@ static void f2fs_post_read_work(struct work_struct *work)
{
struct bio_post_read_ctx *ctx =
container_of(work, struct bio_post_read_ctx, work);
+ struct bio *bio = ctx->bio;
- if (ctx->enabled_steps & STEP_DECRYPT)
- fscrypt_decrypt_bio(ctx->bio);
+ if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) {
+ f2fs_finish_read_bio(bio, true);
+ return;
+ }
if (ctx->enabled_steps & STEP_DECOMPRESS)
- f2fs_handle_step_decompress(ctx);
+ f2fs_handle_step_decompress(ctx, true);
- f2fs_verify_and_finish_bio(ctx->bio);
+ f2fs_verify_and_finish_bio(bio, true);
}
static void f2fs_read_end_io(struct bio *bio)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
+ struct f2fs_sb_info *sbi = F2FS_F_SB(bio_first_folio_all(bio));
struct bio_post_read_ctx *ctx;
+ bool intask = in_task() && !irqs_disabled();
- iostat_update_and_unbind_ctx(bio, 0);
+ iostat_update_and_unbind_ctx(bio);
ctx = bio->bi_private;
- if (time_to_inject(sbi, FAULT_READ_IO)) {
- f2fs_show_injection_info(sbi, FAULT_READ_IO);
+ if (time_to_inject(sbi, FAULT_READ_IO))
bio->bi_status = BLK_STS_IOERR;
- }
- if (bio->bi_status) {
- f2fs_finish_read_bio(bio);
+ if (bio->bi_status != BLK_STS_OK) {
+ f2fs_finish_read_bio(bio, intask);
return;
}
- if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
- INIT_WORK(&ctx->work, f2fs_post_read_work);
- queue_work(ctx->sbi->post_read_wq, &ctx->work);
- } else {
- f2fs_verify_and_finish_bio(bio);
+ if (ctx) {
+ unsigned int enabled_steps = ctx->enabled_steps &
+ (STEP_DECRYPT | STEP_DECOMPRESS);
+
+ /*
+ * If we have only decompression step between decompression and
+ * decrypt, we don't need post processing for this.
+ */
+ if (enabled_steps == STEP_DECOMPRESS &&
+ !f2fs_low_mem_mode(sbi)) {
+ f2fs_handle_step_decompress(ctx, intask);
+ } else if (enabled_steps) {
+ INIT_WORK(&ctx->work, f2fs_post_read_work);
+ queue_work(ctx->sbi->post_read_wq, &ctx->work);
+ return;
+ }
}
+
+ f2fs_verify_and_finish_bio(bio, intask);
}
static void f2fs_write_end_io(struct bio *bio)
{
struct f2fs_sb_info *sbi;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
- iostat_update_and_unbind_ctx(bio, 1);
+ iostat_update_and_unbind_ctx(bio);
sbi = bio->bi_private;
- if (time_to_inject(sbi, FAULT_WRITE_IO)) {
- f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
+ if (time_to_inject(sbi, FAULT_WRITE_IO))
bio->bi_status = BLK_STS_IOERR;
- }
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
- enum count_type type = WB_DATA_TYPE(page);
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
+ enum count_type type;
- if (page_private_dummy(page)) {
- clear_page_private_dummy(page);
- unlock_page(page);
- mempool_free(page, sbi->write_io_dummy);
+ if (fscrypt_is_bounce_folio(folio)) {
+ struct folio *io_folio = folio;
- if (unlikely(bio->bi_status))
- f2fs_stop_checkpoint(sbi, true);
- continue;
+ folio = fscrypt_pagecache_folio(io_folio);
+ fscrypt_free_bounce_page(&io_folio->page);
}
- fscrypt_finalize_bounce_page(&page);
-
#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (f2fs_is_compressed_page(page)) {
- f2fs_compress_write_end_io(bio, page);
+ if (f2fs_is_compressed_page(folio)) {
+ f2fs_compress_write_end_io(bio, folio);
continue;
}
#endif
- if (unlikely(bio->bi_status)) {
- mapping_set_error(page->mapping, -EIO);
+ type = WB_DATA_TYPE(folio, false);
+
+ if (unlikely(bio->bi_status != BLK_STS_OK)) {
+ mapping_set_error(folio->mapping, -EIO);
if (type == F2FS_WB_CP_DATA)
- f2fs_stop_checkpoint(sbi, true);
+ f2fs_stop_checkpoint(sbi, true,
+ STOP_CP_REASON_WRITE_FAIL);
}
- f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
- page->index != nid_of_node(page));
+ f2fs_bug_on(sbi, is_node_folio(folio) &&
+ folio->index != nid_of_node(folio));
dec_page_count(sbi, type);
- if (f2fs_in_warm_node_list(sbi, page))
- f2fs_del_fsync_node_entry(sbi, page);
- clear_page_private_gcing(page);
- end_page_writeback(page);
+ if (f2fs_in_warm_node_list(sbi, folio))
+ f2fs_del_fsync_node_entry(sbi, folio);
+ folio_clear_f2fs_gcing(folio);
+ folio_end_writeback(folio);
}
if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
wq_has_sleeper(&sbi->cp_wait))
@@ -353,8 +368,19 @@ static void f2fs_write_end_io(struct bio *bio)
bio_put(bio);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static void f2fs_zone_write_end_io(struct bio *bio)
+{
+ struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private;
+
+ bio->bi_private = io->bi_private;
+ complete(&io->zone_wait);
+ f2fs_write_end_io(bio);
+}
+#endif
+
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
- block_t blk_addr, struct bio *bio)
+ block_t blk_addr, sector_t *sector)
{
struct block_device *bdev = sbi->sb->s_bdev;
int i;
@@ -369,10 +395,9 @@ struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
}
}
}
- if (bio) {
- bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
- }
+
+ if (sector)
+ *sector = SECTOR_FROM_BLOCK(blk_addr);
return bdev;
}
@@ -389,14 +414,54 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
return 0;
}
+static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
+{
+ unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0);
+ unsigned int fua_flag, meta_flag, io_flag;
+ blk_opf_t op_flags = 0;
+
+ if (fio->op != REQ_OP_WRITE)
+ return 0;
+ if (fio->type == DATA)
+ io_flag = fio->sbi->data_io_flag;
+ else if (fio->type == NODE)
+ io_flag = fio->sbi->node_io_flag;
+ else
+ return 0;
+
+ fua_flag = io_flag & temp_mask;
+ meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
+
+ /*
+ * data/node io flag bits per temp:
+ * REQ_META | REQ_FUA |
+ * 5 | 4 | 3 | 2 | 1 | 0 |
+ * Cold | Warm | Hot | Cold | Warm | Hot |
+ */
+ if (BIT(fio->temp) & meta_flag)
+ op_flags |= REQ_META;
+ if (BIT(fio->temp) & fua_flag)
+ op_flags |= REQ_FUA;
+
+ if (fio->type == DATA &&
+ F2FS_I(fio->folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE)
+ op_flags |= REQ_PRIO;
+
+ return op_flags;
+}
+
static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
{
struct f2fs_sb_info *sbi = fio->sbi;
+ struct block_device *bdev;
+ sector_t sector;
struct bio *bio;
- bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
-
- f2fs_target_device(sbi, fio->new_blkaddr, bio);
+ bdev = f2fs_target_device(sbi, fio->new_blkaddr, &sector);
+ bio = bio_alloc_bioset(bdev, npages,
+ fio->op | fio->op_flags | f2fs_io_flags(fio),
+ GFP_NOIO, &f2fs_bioset);
+ bio->bi_iter.bi_sector = sector;
if (is_read_io(fio->op)) {
bio->bi_end_io = f2fs_read_end_io;
bio->bi_private = NULL;
@@ -441,91 +506,23 @@ static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
return fscrypt_mergeable_bio(bio, inode, next_idx);
}
-static inline void __submit_bio(struct f2fs_sb_info *sbi,
- struct bio *bio, enum page_type type)
+void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
+ enum page_type type)
{
- if (!is_read_io(bio_op(bio))) {
- unsigned int start;
-
- if (type != DATA && type != NODE)
- goto submit_io;
-
- if (f2fs_lfs_mode(sbi) && current->plug)
- blk_finish_plug(current->plug);
-
- if (!F2FS_IO_ALIGNED(sbi))
- goto submit_io;
-
- start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
- start %= F2FS_IO_SIZE(sbi);
-
- if (start == 0)
- goto submit_io;
-
- /* fill dummy pages */
- for (; start < F2FS_IO_SIZE(sbi); start++) {
- struct page *page =
- mempool_alloc(sbi->write_io_dummy,
- GFP_NOIO | __GFP_NOFAIL);
- f2fs_bug_on(sbi, !page);
-
- lock_page(page);
-
- zero_user_segment(page, 0, PAGE_SIZE);
- set_page_private_dummy(page);
-
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
- f2fs_bug_on(sbi, 1);
- }
- /*
- * In the NODE case, we lose next block address chain. So, we
- * need to do checkpoint in f2fs_sync_file.
- */
- if (type == NODE)
- set_sbi_flag(sbi, SBI_NEED_CP);
- }
-submit_io:
- if (is_read_io(bio_op(bio)))
- trace_f2fs_submit_read_bio(sbi->sb, type, bio);
- else
- trace_f2fs_submit_write_bio(sbi->sb, type, bio);
+ WARN_ON_ONCE(!is_read_io(bio_op(bio)));
+ trace_f2fs_submit_read_bio(sbi->sb, type, bio);
iostat_update_submit_ctx(bio, type);
submit_bio(bio);
}
-void f2fs_submit_bio(struct f2fs_sb_info *sbi,
- struct bio *bio, enum page_type type)
-{
- __submit_bio(sbi, bio, type);
-}
-
-static void __attach_io_flag(struct f2fs_io_info *fio)
+static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
+ enum page_type type)
{
- struct f2fs_sb_info *sbi = fio->sbi;
- unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
- unsigned int io_flag, fua_flag, meta_flag;
-
- if (fio->type == DATA)
- io_flag = sbi->data_io_flag;
- else if (fio->type == NODE)
- io_flag = sbi->node_io_flag;
- else
- return;
-
- fua_flag = io_flag & temp_mask;
- meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
-
- /*
- * data/node io flag bits per temp:
- * REQ_META | REQ_FUA |
- * 5 | 4 | 3 | 2 | 1 | 0 |
- * Cold | Warm | Hot | Cold | Warm | Hot |
- */
- if ((1 << fio->temp) & meta_flag)
- fio->op_flags |= REQ_META;
- if ((1 << fio->temp) & fua_flag)
- fio->op_flags |= REQ_FUA;
+ WARN_ON_ONCE(is_read_io(bio_op(bio)));
+ trace_f2fs_submit_write_bio(sbi->sb, type, bio);
+ iostat_update_submit_ctx(bio, type);
+ submit_bio(bio);
}
static void __submit_merged_bio(struct f2fs_bio_info *io)
@@ -535,47 +532,44 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
if (!io->bio)
return;
- __attach_io_flag(fio);
- bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
-
- if (is_read_io(fio->op))
+ if (is_read_io(fio->op)) {
trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
- else
+ f2fs_submit_read_bio(io->sbi, io->bio, fio->type);
+ } else {
trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
-
- __submit_bio(io->sbi, io->bio, fio->type);
+ f2fs_submit_write_bio(io->sbi, io->bio, fio->type);
+ }
io->bio = NULL;
}
static bool __has_merged_page(struct bio *bio, struct inode *inode,
- struct page *page, nid_t ino)
+ struct folio *folio, nid_t ino)
{
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
if (!bio)
return false;
- if (!inode && !page && !ino)
+ if (!inode && !folio && !ino)
return true;
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *target = bvec->bv_page;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *target = fi.folio;
- if (fscrypt_is_bounce_page(target)) {
- target = fscrypt_pagecache_page(target);
+ if (fscrypt_is_bounce_folio(target)) {
+ target = fscrypt_pagecache_folio(target);
if (IS_ERR(target))
continue;
}
if (f2fs_is_compressed_page(target)) {
- target = f2fs_compress_control_page(target);
+ target = f2fs_compress_control_folio(target);
if (IS_ERR(target))
continue;
}
if (inode && inode == target->mapping->host)
return true;
- if (page && page == target)
+ if (folio && folio == target)
return true;
if (ino && ino == ino_of_node(target))
return true;
@@ -584,28 +578,67 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
return false;
}
+int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi)
+{
+ int i;
+
+ for (i = 0; i < NR_PAGE_TYPE; i++) {
+ int n = (i == META) ? 1 : NR_TEMP_TYPE;
+ int j;
+
+ sbi->write_io[i] = f2fs_kmalloc(sbi,
+ array_size(n, sizeof(struct f2fs_bio_info)),
+ GFP_KERNEL);
+ if (!sbi->write_io[i])
+ return -ENOMEM;
+
+ for (j = HOT; j < n; j++) {
+ struct f2fs_bio_info *io = &sbi->write_io[i][j];
+
+ init_f2fs_rwsem(&io->io_rwsem);
+ io->sbi = sbi;
+ io->bio = NULL;
+ io->last_block_in_bio = 0;
+ spin_lock_init(&io->io_lock);
+ INIT_LIST_HEAD(&io->io_list);
+ INIT_LIST_HEAD(&io->bio_list);
+ init_f2fs_rwsem(&io->bio_list_lock);
+#ifdef CONFIG_BLK_DEV_ZONED
+ init_completion(&io->zone_wait);
+ io->zone_pending_bio = NULL;
+ io->bi_private = NULL;
+#endif
+ }
+ }
+
+ return 0;
+}
+
static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
enum page_type type, enum temp_type temp)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
- down_write(&io->io_rwsem);
+ f2fs_down_write(&io->io_rwsem);
+
+ if (!io->bio)
+ goto unlock_out;
/* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) {
io->fio.type = META_FLUSH;
- io->fio.op = REQ_OP_WRITE;
- io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
+ io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC;
if (!test_opt(sbi, NOBARRIER))
- io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
+ io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
}
__submit_merged_bio(io);
- up_write(&io->io_rwsem);
+unlock_out:
+ f2fs_up_write(&io->io_rwsem);
}
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, struct page *page,
+ struct inode *inode, struct folio *folio,
nid_t ino, enum page_type type, bool force)
{
enum temp_type temp;
@@ -616,9 +649,9 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
- down_read(&io->io_rwsem);
- ret = __has_merged_page(io->bio, inode, page, ino);
- up_read(&io->io_rwsem);
+ f2fs_down_read(&io->io_rwsem);
+ ret = __has_merged_page(io->bio, inode, folio, ino);
+ f2fs_up_read(&io->io_rwsem);
}
if (ret)
__f2fs_submit_merged_write(sbi, type, temp);
@@ -635,10 +668,10 @@ void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
}
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, struct page *page,
+ struct inode *inode, struct folio *folio,
nid_t ino, enum page_type type)
{
- __submit_merged_write_cond(sbi, inode, page, ino, type, false);
+ __submit_merged_write_cond(sbi, inode, folio, ino, type, false);
}
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
@@ -655,37 +688,34 @@ void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
- struct page *page = fio->encrypted_page ?
- fio->encrypted_page : fio->page;
+ struct folio *fio_folio = fio->folio;
+ struct folio *data_folio = fio->encrypted_page ?
+ page_folio(fio->encrypted_page) : fio_folio;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
fio->is_por ? META_POR : (__is_meta_io(fio) ?
META_GENERIC : DATA_GENERIC_ENHANCE)))
return -EFSCORRUPTED;
- trace_f2fs_submit_page_bio(page, fio);
+ trace_f2fs_submit_folio_bio(data_folio, fio);
/* Allocate a new bio */
bio = __bio_alloc(fio, 1);
- f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
- fio->page->index, fio, GFP_NOIO);
-
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
- bio_put(bio);
- return -EFAULT;
- }
+ f2fs_set_bio_crypt_ctx(bio, fio_folio->mapping->host,
+ fio_folio->index, fio, GFP_NOIO);
+ bio_add_folio_nofail(bio, data_folio, folio_size(data_folio), 0);
if (fio->io_wbc && !is_read_io(fio->op))
- wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
-
- __attach_io_flag(fio);
- bio_set_op_attrs(bio, fio->op, fio->op_flags);
+ wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
- __read_io_type(page): WB_DATA_TYPE(fio->page));
+ __read_io_type(data_folio) : WB_DATA_TYPE(fio->folio, false));
- __submit_bio(fio->sbi, bio, fio->type);
+ if (is_read_io(bio_op(bio)))
+ f2fs_submit_read_bio(fio->sbi, bio, fio->type);
+ else
+ f2fs_submit_write_bio(fio->sbi, bio, fio->type);
return 0;
}
@@ -703,9 +733,11 @@ static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
static bool io_type_is_mergeable(struct f2fs_bio_info *io,
struct f2fs_io_info *fio)
{
+ blk_opf_t mask = ~(REQ_PREFLUSH | REQ_FUA);
+
if (io->fio.op != fio->op)
return false;
- return io->fio.op_flags == fio->op_flags;
+ return (io->fio.op_flags & mask) == (fio->op_flags & mask);
}
static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
@@ -714,23 +746,13 @@ static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
block_t last_blkaddr,
block_t cur_blkaddr)
{
- if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
- unsigned int filled_blocks =
- F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
- unsigned int io_size = F2FS_IO_SIZE(sbi);
- unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
-
- /* IOs in bio is aligned and left space of vectors is not enough */
- if (!(filled_blocks % io_size) && left_vecs < io_size)
- return false;
- }
if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
return false;
return io_type_is_mergeable(io, fio);
}
static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
- struct page *page, enum temp_type temp)
+ struct folio *folio, enum temp_type temp)
{
struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
struct bio_entry *be;
@@ -739,12 +761,11 @@ static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
be->bio = bio;
bio_get(bio);
- if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
- f2fs_bug_on(sbi, 1);
+ bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
- down_write(&io->bio_list_lock);
+ f2fs_down_write(&io->bio_list_lock);
list_add_tail(&be->list, &io->bio_list);
- up_write(&io->bio_list_lock);
+ f2fs_up_write(&io->bio_list_lock);
}
static void del_bio_entry(struct bio_entry *be)
@@ -754,8 +775,9 @@ static void del_bio_entry(struct bio_entry *be)
}
static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
- struct page *page)
+ struct folio *folio)
{
+ struct folio *fio_folio = fio->folio;
struct f2fs_sb_info *sbi = fio->sbi;
enum temp_type temp;
bool found = false;
@@ -766,7 +788,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
struct list_head *head = &io->bio_list;
struct bio_entry *be;
- down_write(&io->bio_list_lock);
+ f2fs_down_write(&io->bio_list_lock);
list_for_each_entry(be, head, list) {
if (be->bio != *bio)
continue;
@@ -777,20 +799,19 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
*fio->last_block,
fio->new_blkaddr));
if (f2fs_crypt_mergeable_bio(*bio,
- fio->page->mapping->host,
- fio->page->index, fio) &&
- bio_add_page(*bio, page, PAGE_SIZE, 0) ==
- PAGE_SIZE) {
+ fio_folio->mapping->host,
+ fio_folio->index, fio) &&
+ bio_add_folio(*bio, folio, folio_size(folio), 0)) {
ret = 0;
break;
}
/* page can't be merged into bio; submit the bio */
del_bio_entry(be);
- __submit_bio(sbi, *bio, DATA);
+ f2fs_submit_write_bio(sbi, *bio, DATA);
break;
}
- up_write(&io->bio_list_lock);
+ f2fs_up_write(&io->bio_list_lock);
}
if (ret) {
@@ -802,12 +823,14 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
}
void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
- struct bio **bio, struct page *page)
+ struct bio **bio, struct folio *folio)
{
enum temp_type temp;
bool found = false;
struct bio *target = bio ? *bio : NULL;
+ f2fs_bug_on(sbi, !target && !folio);
+
for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
struct list_head *head = &io->bio_list;
@@ -816,41 +839,41 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
if (list_empty(head))
continue;
- down_read(&io->bio_list_lock);
+ f2fs_down_read(&io->bio_list_lock);
list_for_each_entry(be, head, list) {
if (target)
found = (target == be->bio);
else
found = __has_merged_page(be->bio, NULL,
- page, 0);
+ folio, 0);
if (found)
break;
}
- up_read(&io->bio_list_lock);
+ f2fs_up_read(&io->bio_list_lock);
if (!found)
continue;
found = false;
- down_write(&io->bio_list_lock);
+ f2fs_down_write(&io->bio_list_lock);
list_for_each_entry(be, head, list) {
if (target)
found = (target == be->bio);
else
found = __has_merged_page(be->bio, NULL,
- page, 0);
+ folio, 0);
if (found) {
target = be->bio;
del_bio_entry(be);
break;
}
}
- up_write(&io->bio_list_lock);
+ f2fs_up_write(&io->bio_list_lock);
}
if (found)
- __submit_bio(sbi, target, DATA);
+ f2fs_submit_write_bio(sbi, target, DATA);
if (bio && *bio) {
bio_put(*bio);
*bio = NULL;
@@ -860,14 +883,15 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
int f2fs_merge_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio = *fio->bio;
- struct page *page = fio->encrypted_page ?
- fio->encrypted_page : fio->page;
+ struct folio *data_folio = fio->encrypted_page ?
+ page_folio(fio->encrypted_page) : fio->folio;
+ struct folio *folio = fio->folio;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
return -EFSCORRUPTED;
- trace_f2fs_submit_page_bio(page, fio);
+ trace_f2fs_submit_folio_bio(data_folio, fio);
if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
fio->new_blkaddr))
@@ -875,21 +899,19 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
alloc_new:
if (!bio) {
bio = __bio_alloc(fio, BIO_MAX_VECS);
- __attach_io_flag(fio);
- f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
- fio->page->index, fio, GFP_NOIO);
- bio_set_op_attrs(bio, fio->op, fio->op_flags);
+ f2fs_set_bio_crypt_ctx(bio, folio->mapping->host,
+ folio->index, fio, GFP_NOIO);
- add_bio_entry(fio->sbi, bio, page, fio->temp);
+ add_bio_entry(fio->sbi, bio, data_folio, fio->temp);
} else {
- if (add_ipu_page(fio, &bio, page))
+ if (add_ipu_page(fio, &bio, data_folio))
goto alloc_new;
}
if (fio->io_wbc)
- wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, folio, folio_size(folio));
- inc_page_count(fio->sbi, WB_DATA_TYPE(page));
+ inc_page_count(fio->sbi, WB_DATA_TYPE(folio, false));
*fio->last_block = fio->new_blkaddr;
*fio->bio = bio;
@@ -897,17 +919,49 @@ alloc_new:
return 0;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+ struct block_device *bdev = sbi->sb->s_bdev;
+ int devi = 0;
+
+ if (f2fs_is_multi_device(sbi)) {
+ devi = f2fs_target_device_index(sbi, blkaddr);
+ if (blkaddr < FDEV(devi).start_blk ||
+ blkaddr > FDEV(devi).end_blk) {
+ f2fs_err(sbi, "Invalid block %x", blkaddr);
+ return false;
+ }
+ blkaddr -= FDEV(devi).start_blk;
+ bdev = FDEV(devi).bdev;
+ }
+ return bdev_is_zoned(bdev) &&
+ f2fs_blkz_is_seq(sbi, devi, blkaddr) &&
+ (blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1);
+}
+#endif
+
void f2fs_submit_page_write(struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = fio->sbi;
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
- struct page *bio_page;
+ struct folio *bio_folio;
+ enum count_type type;
f2fs_bug_on(sbi, is_read_io(fio->op));
- down_write(&io->io_rwsem);
+ f2fs_down_write(&io->io_rwsem);
next:
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) {
+ wait_for_completion_io(&io->zone_wait);
+ bio_put(io->zone_pending_bio);
+ io->zone_pending_bio = NULL;
+ io->bi_private = NULL;
+ }
+#endif
+
if (fio->in_list) {
spin_lock(&io->io_lock);
if (list_empty(&io->io_list)) {
@@ -923,78 +977,82 @@ next:
verify_fio_blkaddr(fio);
if (fio->encrypted_page)
- bio_page = fio->encrypted_page;
+ bio_folio = page_folio(fio->encrypted_page);
else if (fio->compressed_page)
- bio_page = fio->compressed_page;
+ bio_folio = page_folio(fio->compressed_page);
else
- bio_page = fio->page;
+ bio_folio = fio->folio;
/* set submitted = true as a return value */
- fio->submitted = true;
+ fio->submitted = 1;
- inc_page_count(sbi, WB_DATA_TYPE(bio_page));
+ type = WB_DATA_TYPE(bio_folio, fio->compressed_page);
+ inc_page_count(sbi, type);
if (io->bio &&
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
fio->new_blkaddr) ||
- !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
- bio_page->index, fio)))
+ !f2fs_crypt_mergeable_bio(io->bio, fio_inode(fio),
+ bio_folio->index, fio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
- if (F2FS_IO_ALIGNED(sbi) &&
- (fio->type == DATA || fio->type == NODE) &&
- fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
- dec_page_count(sbi, WB_DATA_TYPE(bio_page));
- fio->retry = true;
- goto skip;
- }
io->bio = __bio_alloc(fio, BIO_MAX_VECS);
- f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
- bio_page->index, fio, GFP_NOIO);
+ f2fs_set_bio_crypt_ctx(io->bio, fio_inode(fio),
+ bio_folio->index, fio, GFP_NOIO);
io->fio = *fio;
}
- if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ if (!bio_add_folio(io->bio, bio_folio, folio_size(bio_folio), 0)) {
__submit_merged_bio(io);
goto alloc_new;
}
if (fio->io_wbc)
- wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, fio->folio,
+ folio_size(fio->folio));
io->last_block_in_bio = fio->new_blkaddr;
- trace_f2fs_submit_page_write(fio->page, fio);
-skip:
+ trace_f2fs_submit_folio_write(fio->folio, fio);
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
+ is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
+ bio_get(io->bio);
+ reinit_completion(&io->zone_wait);
+ io->bi_private = io->bio->bi_private;
+ io->bio->bi_private = io;
+ io->bio->bi_end_io = f2fs_zone_write_end_io;
+ io->zone_pending_bio = io->bio;
+ __submit_merged_bio(io);
+ }
+#endif
if (fio->in_list)
goto next;
out:
if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
!f2fs_is_checkpoint_ready(sbi))
__submit_merged_bio(io);
- up_write(&io->io_rwsem);
+ f2fs_up_write(&io->io_rwsem);
}
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
- unsigned nr_pages, unsigned op_flag,
+ unsigned nr_pages, blk_opf_t op_flag,
pgoff_t first_idx, bool for_write)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
struct bio_post_read_ctx *ctx = NULL;
unsigned int post_read_steps = 0;
+ sector_t sector;
+ struct block_device *bdev = f2fs_target_device(sbi, blkaddr, &sector);
- bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
- bio_max_segs(nr_pages), &f2fs_bioset);
- if (!bio)
- return ERR_PTR(-ENOMEM);
-
+ bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
+ REQ_OP_READ | op_flag,
+ for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
+ bio->bi_iter.bi_sector = sector;
f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
-
- f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
if (fscrypt_inode_uses_fs_layer_crypto(inode))
post_read_steps |= STEP_DECRYPT;
@@ -1016,6 +1074,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
ctx->sbi = sbi;
ctx->enabled_steps = post_read_steps;
ctx->fs_blkaddr = blkaddr;
+ ctx->decompression_attempted = false;
bio->bi_private = ctx;
}
iostat_alloc_and_bind_ctx(sbi, bio, ctx);
@@ -1024,64 +1083,53 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
}
/* This can handle encryption stuffs */
-static int f2fs_submit_page_read(struct inode *inode, struct page *page,
- block_t blkaddr, int op_flags, bool for_write)
+static void f2fs_submit_page_read(struct inode *inode, struct folio *folio,
+ block_t blkaddr, blk_opf_t op_flags,
+ bool for_write)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
- page->index, for_write);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
+ folio->index, for_write);
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, blkaddr);
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
- bio_put(bio);
- return -EFAULT;
- }
- ClearPageError(page);
+ if (!bio_add_folio(bio, folio, PAGE_SIZE, 0))
+ f2fs_bug_on(sbi, 1);
+
inc_page_count(sbi, F2FS_RD_DATA);
- f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
- __submit_bio(sbi, bio, DATA);
- return 0;
+ f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE);
+ f2fs_submit_read_bio(sbi, bio, DATA);
}
-static void __set_data_blkaddr(struct dnode_of_data *dn)
+static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- struct f2fs_node *rn = F2FS_NODE(dn->node_page);
- __le32 *addr_array;
- int base = 0;
+ __le32 *addr = get_dnode_addr(dn->inode, dn->node_folio);
- if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
- base = get_extra_isize(dn->inode);
-
- /* Get physical address of data block */
- addr_array = blkaddr_in_node(rn);
- addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
+ dn->data_blkaddr = blkaddr;
+ addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}
/*
* Lock ordering for the change of data block address:
* ->data_page
- * ->node_page
+ * ->node_folio
* update block addresses in the node page
*/
-void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
+void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
- __set_data_blkaddr(dn);
- if (set_page_dirty(dn->node_page))
+ f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true);
+ __set_data_blkaddr(dn, blkaddr);
+ if (folio_mark_dirty(dn->node_folio))
dn->node_changed = true;
}
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- dn->data_blkaddr = blkaddr;
- f2fs_set_data_blkaddr(dn);
- f2fs_update_extent_cache(dn);
+ f2fs_set_data_blkaddr(dn, blkaddr);
+ f2fs_update_read_extent_cache(dn);
}
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
@@ -1095,25 +1143,25 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
- if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
+ err = inc_valid_block_count(sbi, dn->inode, &count, true);
+ if (unlikely(err))
return err;
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
dn->ofs_in_node, count);
- f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
+ f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true);
for (; count > 0; dn->ofs_in_node++) {
block_t blkaddr = f2fs_data_blkaddr(dn);
if (blkaddr == NULL_ADDR) {
- dn->data_blkaddr = NEW_ADDR;
- __set_data_blkaddr(dn);
+ __set_data_blkaddr(dn, NEW_ADDR);
count--;
}
}
- if (set_page_dirty(dn->node_page))
+ if (folio_mark_dirty(dn->node_folio))
dn->node_changed = true;
return 0;
}
@@ -1131,7 +1179,7 @@ int f2fs_reserve_new_block(struct dnode_of_data *dn)
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
- bool need_put = dn->inode_page ? false : true;
+ bool need_put = dn->inode_folio ? false : true;
int err;
err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
@@ -1145,34 +1193,20 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
return err;
}
-int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
-{
- struct extent_info ei = {0, };
- struct inode *inode = dn->inode;
-
- if (f2fs_lookup_extent_cache(inode, index, &ei)) {
- dn->data_blkaddr = ei.blk + index - ei.fofs;
- return 0;
- }
-
- return f2fs_reserve_block(dn, index);
-}
-
-struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
- int op_flags, bool for_write)
+struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index,
+ blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
- struct page *page;
- struct extent_info ei = {0, };
+ struct folio *folio;
int err;
- page = f2fs_grab_cache_page(mapping, index, for_write);
- if (!page)
- return ERR_PTR(-ENOMEM);
+ folio = f2fs_grab_cache_folio(mapping, index, for_write);
+ if (IS_ERR(folio))
+ return folio;
- if (f2fs_lookup_extent_cache(inode, index, &ei)) {
- dn.data_blkaddr = ei.blk + index - ei.fofs;
+ if (f2fs_lookup_read_extent_cache_block(inode, index,
+ &dn.data_blkaddr)) {
if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
DATA_GENERIC_ENHANCE_READ)) {
err = -EFSCORRUPTED;
@@ -1183,12 +1217,17 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
- if (err)
+ if (err) {
+ if (err == -ENOENT && next_pgofs)
+ *next_pgofs = f2fs_get_next_page_offset(&dn, index);
goto put_err;
+ }
f2fs_put_dnode(&dn);
if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
err = -ENOENT;
+ if (next_pgofs)
+ *next_pgofs = index + 1;
goto put_err;
}
if (dn.data_blkaddr != NEW_ADDR &&
@@ -1199,60 +1238,62 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
goto put_err;
}
got_it:
- if (PageUptodate(page)) {
- unlock_page(page);
- return page;
+ if (folio_test_uptodate(folio)) {
+ folio_unlock(folio);
+ return folio;
}
/*
* A new dentry page is allocated but not able to be written, since its
* new inode page couldn't be allocated due to -ENOSPC.
* In such the case, its blkaddr can be remained as NEW_ADDR.
- * see, f2fs_add_link -> f2fs_get_new_data_page ->
+ * see, f2fs_add_link -> f2fs_get_new_data_folio ->
* f2fs_init_inode_metadata.
*/
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_SIZE);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- unlock_page(page);
- return page;
+ folio_zero_segment(folio, 0, folio_size(folio));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
+ return folio;
}
- err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
+ f2fs_submit_page_read(inode, folio, dn.data_blkaddr,
op_flags, for_write);
- if (err)
- goto put_err;
- return page;
+ return folio;
put_err:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
-struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
+struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index,
+ pgoff_t *next_pgofs)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page;
-
- page = find_get_page(mapping, index);
- if (page && PageUptodate(page))
- return page;
- f2fs_put_page(page, 0);
-
- page = f2fs_get_read_data_page(inode, index, 0, false);
- if (IS_ERR(page))
- return page;
-
- if (PageUptodate(page))
- return page;
-
- wait_on_page_locked(page);
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 0);
+ struct folio *folio;
+
+ folio = f2fs_filemap_get_folio(mapping, index, FGP_ACCESSED, 0);
+ if (IS_ERR(folio))
+ goto read;
+ if (folio_test_uptodate(folio))
+ return folio;
+ f2fs_folio_put(folio, false);
+
+read:
+ folio = f2fs_get_read_data_folio(inode, index, 0, false, next_pgofs);
+ if (IS_ERR(folio))
+ return folio;
+
+ if (folio_test_uptodate(folio))
+ return folio;
+
+ folio_wait_locked(folio);
+ if (unlikely(!folio_test_uptodate(folio))) {
+ f2fs_folio_put(folio, false);
return ERR_PTR(-EIO);
}
- return page;
+ return folio;
}
/*
@@ -1260,27 +1301,23 @@ struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
* Because, the callers, functions in dir.c and GC, should be able to know
* whether this page exists or not.
*/
-struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
+struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index,
bool for_write)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page;
-repeat:
- page = f2fs_get_read_data_page(inode, index, 0, for_write);
- if (IS_ERR(page))
- return page;
+ struct folio *folio;
+
+ folio = f2fs_get_read_data_folio(inode, index, 0, for_write, NULL);
+ if (IS_ERR(folio))
+ return folio;
/* wait for read completion */
- lock_page(page);
- if (unlikely(page->mapping != mapping)) {
- f2fs_put_page(page, 1);
- goto repeat;
- }
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
+ folio_lock(folio);
+ if (unlikely(folio->mapping != mapping || !folio_test_uptodate(folio))) {
+ f2fs_folio_put(folio, true);
return ERR_PTR(-EIO);
}
- return page;
+ return folio;
}
/*
@@ -1289,57 +1326,57 @@ repeat:
*
* Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
- * Note that, ipage is set only by make_empty_dir, and if any error occur,
- * ipage should be released by this function.
+ * Note that, ifolio is set only by make_empty_dir, and if any error occur,
+ * ifolio should be released by this function.
*/
-struct page *f2fs_get_new_data_page(struct inode *inode,
- struct page *ipage, pgoff_t index, bool new_i_size)
+struct folio *f2fs_get_new_data_folio(struct inode *inode,
+ struct folio *ifolio, pgoff_t index, bool new_i_size)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
struct dnode_of_data dn;
int err;
- page = f2fs_grab_cache_page(mapping, index, true);
- if (!page) {
+ folio = f2fs_grab_cache_folio(mapping, index, true);
+ if (IS_ERR(folio)) {
/*
- * before exiting, we should make sure ipage will be released
+ * before exiting, we should make sure ifolio will be released
* if any error occur.
*/
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
return ERR_PTR(-ENOMEM);
}
- set_new_dnode(&dn, inode, ipage, NULL, 0);
+ set_new_dnode(&dn, inode, ifolio, NULL, 0);
err = f2fs_reserve_block(&dn, index);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
- if (!ipage)
+ if (!ifolio)
f2fs_put_dnode(&dn);
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
goto got_it;
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_SIZE);
- if (!PageUptodate(page))
- SetPageUptodate(page);
+ folio_zero_segment(folio, 0, folio_size(folio));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
} else {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
- /* if ipage exists, blkaddr should be NEW_ADDR */
- f2fs_bug_on(F2FS_I_SB(inode), ipage);
- page = f2fs_get_lock_data_page(inode, index, true);
- if (IS_ERR(page))
- return page;
+ /* if ifolio exists, blkaddr should be NEW_ADDR */
+ f2fs_bug_on(F2FS_I_SB(inode), ifolio);
+ folio = f2fs_get_lock_data_folio(inode, index, true);
+ if (IS_ERR(folio))
+ return folio;
}
got_it:
if (new_i_size && i_size_read(inode) <
((loff_t)(index + 1) << PAGE_SHIFT))
f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
- return page;
+ return folio;
}
static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
@@ -1354,96 +1391,137 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
- err = f2fs_get_node_info(sbi, dn->nid, &ni);
+ err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
if (err)
return err;
dn->data_blkaddr = f2fs_data_blkaddr(dn);
- if (dn->data_blkaddr != NULL_ADDR)
- goto alloc;
-
- if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
- return err;
+ if (dn->data_blkaddr == NULL_ADDR) {
+ err = inc_valid_block_count(sbi, dn->inode, &count, true);
+ if (unlikely(err))
+ return err;
+ }
-alloc:
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
old_blkaddr = dn->data_blkaddr;
- f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
- &sum, seg_type, NULL);
- if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
- invalidate_mapping_pages(META_MAPPING(sbi),
- old_blkaddr, old_blkaddr);
- f2fs_invalidate_compress_page(sbi, old_blkaddr);
- }
+ err = f2fs_allocate_data_block(sbi, NULL, old_blkaddr,
+ &dn->data_blkaddr, &sum, seg_type, NULL);
+ if (err)
+ return err;
+
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);
+
f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
+ return 0;
+}
+
+static void f2fs_map_lock(struct f2fs_sb_info *sbi, int flag)
+{
+ f2fs_down_read(&sbi->cp_enable_rwsem);
+ if (flag == F2FS_GET_BLOCK_PRE_AIO)
+ f2fs_down_read(&sbi->node_change);
+ else
+ f2fs_lock_op(sbi);
+}
+
+static void f2fs_map_unlock(struct f2fs_sb_info *sbi, int flag)
+{
+ if (flag == F2FS_GET_BLOCK_PRE_AIO)
+ f2fs_up_read(&sbi->node_change);
+ else
+ f2fs_unlock_op(sbi);
+ f2fs_up_read(&sbi->cp_enable_rwsem);
+}
+
+int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ int err = 0;
+
+ f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
+ if (!f2fs_lookup_read_extent_cache_block(dn->inode, index,
+ &dn->data_blkaddr))
+ err = f2fs_reserve_block(dn, index);
+ f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
+
+ return err;
+}
+
+static int f2fs_map_no_dnode(struct inode *inode,
+ struct f2fs_map_blocks *map, struct dnode_of_data *dn,
+ pgoff_t pgoff)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
/*
- * i_size will be updated by direct_IO. Otherwise, we'll get stale
- * data from unwritten block via dio_read.
+ * There is one exceptional case that read_node_page() may return
+ * -ENOENT due to filesystem has been shutdown or cp_error, return
+ * -EIO in that case.
*/
+ if (map->m_may_create &&
+ (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || f2fs_cp_error(sbi)))
+ return -EIO;
+
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff);
+ if (map->m_next_extent)
+ *map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff);
return 0;
}
-int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
+static bool f2fs_map_blocks_cached(struct inode *inode,
+ struct f2fs_map_blocks *map, int flag)
{
- struct inode *inode = file_inode(iocb->ki_filp);
- struct f2fs_map_blocks map;
- int flag;
- int err = 0;
- bool direct_io = iocb->ki_flags & IOCB_DIRECT;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned int maxblocks = map->m_len;
+ pgoff_t pgoff = (pgoff_t)map->m_lblk;
+ struct extent_info ei = {};
- map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
- map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
- if (map.m_len > map.m_lblk)
- map.m_len -= map.m_lblk;
- else
- map.m_len = 0;
+ if (!f2fs_lookup_read_extent_cache(inode, pgoff, &ei))
+ return false;
- map.m_next_pgofs = NULL;
- map.m_next_extent = NULL;
- map.m_seg_type = NO_CHECK_TYPE;
- map.m_may_create = true;
+ map->m_pblk = ei.blk + pgoff - ei.fofs;
+ map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff);
+ map->m_flags = F2FS_MAP_MAPPED;
+ if (map->m_next_extent)
+ *map->m_next_extent = pgoff + map->m_len;
- if (direct_io) {
- map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
- flag = f2fs_force_buffered_io(inode, iocb, from) ?
- F2FS_GET_BLOCK_PRE_AIO :
- F2FS_GET_BLOCK_PRE_DIO;
- goto map_blocks;
- }
- if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
- err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
- if (f2fs_has_inline_data(inode))
- return err;
+ /* for hardware encryption, but to avoid potential issue in future */
+ if (flag == F2FS_GET_BLOCK_DIO)
+ f2fs_wait_on_block_writeback_range(inode,
+ map->m_pblk, map->m_len);
- flag = F2FS_GET_BLOCK_PRE_AIO;
+ if (f2fs_allow_multi_device_dio(sbi, flag)) {
+ int bidx = f2fs_target_device_index(sbi, map->m_pblk);
+ struct f2fs_dev_info *dev = &sbi->devs[bidx];
-map_blocks:
- err = f2fs_map_blocks(inode, &map, 1, flag);
- if (map.m_len > 0 && err == -ENOSPC) {
- if (!direct_io)
- set_inode_flag(inode, FI_NO_PREALLOC);
- err = 0;
+ map->m_bdev = dev->bdev;
+ map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
+ map->m_pblk -= dev->start_blk;
+ } else {
+ map->m_bdev = inode->i_sb->s_bdev;
}
- return err;
+ return true;
}
-void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
+static bool map_is_mergeable(struct f2fs_sb_info *sbi,
+ struct f2fs_map_blocks *map,
+ block_t blkaddr, int flag, int bidx,
+ int ofs)
{
- if (flag == F2FS_GET_BLOCK_PRE_AIO) {
- if (lock)
- down_read(&sbi->node_change);
- else
- up_read(&sbi->node_change);
- } else {
- if (lock)
- f2fs_lock_op(sbi);
- else
- f2fs_unlock_op(sbi);
- }
+ if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
+ return false;
+ if (map->m_pblk != NEW_ADDR && blkaddr == (map->m_pblk + ofs))
+ return true;
+ if (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR)
+ return true;
+ if (flag == F2FS_GET_BLOCK_PRE_DIO)
+ return true;
+ if (flag == F2FS_GET_BLOCK_DIO &&
+ map->m_pblk == NULL_ADDR && blkaddr == NULL_ADDR)
+ return true;
+ return false;
}
/*
@@ -1451,8 +1529,7 @@ void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
* maps continuous logical blocks to physical blocks, and return such
* info via f2fs_map_blocks structure.
*/
-int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
- int create, int flag)
+int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
{
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
@@ -1462,14 +1539,21 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int err = 0, ofs = 1;
unsigned int ofs_in_node, last_ofs_in_node;
blkcnt_t prealloc;
- struct extent_info ei = {0, };
block_t blkaddr;
unsigned int start_pgofs;
int bidx = 0;
+ bool is_hole;
+ bool lfs_dio_write;
if (!maxblocks)
return 0;
+ lfs_dio_write = (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
+ map->m_may_create);
+
+ if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag))
+ goto out;
+
map->m_bdev = inode->i_sb->s_bdev;
map->m_multidev_dio =
f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
@@ -1481,42 +1565,15 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
pgofs = (pgoff_t)map->m_lblk;
end = pgofs + maxblocks;
- if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
- if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
- map->m_may_create)
- goto next_dnode;
-
- map->m_pblk = ei.blk + pgofs - ei.fofs;
- map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
- map->m_flags = F2FS_MAP_MAPPED;
- if (map->m_next_extent)
- *map->m_next_extent = pgofs + map->m_len;
-
- /* for hardware encryption, but to avoid potential issue in future */
- if (flag == F2FS_GET_BLOCK_DIO)
- f2fs_wait_on_block_writeback_range(inode,
- map->m_pblk, map->m_len);
-
- if (map->m_multidev_dio) {
- block_t blk_addr = map->m_pblk;
-
- bidx = f2fs_target_device_index(sbi, map->m_pblk);
-
- map->m_bdev = FDEV(bidx).bdev;
- map->m_pblk -= FDEV(bidx).start_blk;
- map->m_len = min(map->m_len,
- FDEV(bidx).end_blk + 1 - map->m_pblk);
-
- if (map->m_may_create)
- f2fs_update_device_state(sbi, inode->i_ino,
- blk_addr, map->m_len);
- }
- goto out;
- }
+ if (flag == F2FS_GET_BLOCK_PRECACHE)
+ mode = LOOKUP_NODE_RA;
next_dnode:
- if (map->m_may_create)
- f2fs_do_map_lock(sbi, flag, true);
+ if (map->m_may_create) {
+ if (f2fs_lfs_mode(sbi))
+ f2fs_balance_fs(sbi, true);
+ f2fs_map_lock(sbi, flag);
+ }
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -1524,105 +1581,90 @@ next_dnode:
if (err) {
if (flag == F2FS_GET_BLOCK_BMAP)
map->m_pblk = 0;
-
- if (err == -ENOENT) {
- /*
- * There is one exceptional case that read_node_page()
- * may return -ENOENT due to filesystem has been
- * shutdown or cp_error, so force to convert error
- * number to EIO for such case.
- */
- if (map->m_may_create &&
- (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
- f2fs_cp_error(sbi))) {
- err = -EIO;
- goto unlock_out;
- }
-
- err = 0;
- if (map->m_next_pgofs)
- *map->m_next_pgofs =
- f2fs_get_next_page_offset(&dn, pgofs);
- if (map->m_next_extent)
- *map->m_next_extent =
- f2fs_get_next_page_offset(&dn, pgofs);
- }
+ if (err == -ENOENT)
+ err = f2fs_map_no_dnode(inode, map, &dn, pgofs);
goto unlock_out;
}
start_pgofs = pgofs;
prealloc = 0;
last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
next_block:
blkaddr = f2fs_data_blkaddr(&dn);
-
- if (__is_valid_data_blkaddr(blkaddr) &&
- !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
+ is_hole = !__is_valid_data_blkaddr(blkaddr);
+ if (!is_hole &&
+ !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
err = -EFSCORRUPTED;
goto sync_out;
}
- if (__is_valid_data_blkaddr(blkaddr)) {
- /* use out-place-update for driect IO under LFS mode */
- if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
- map->m_may_create) {
+ /* use out-place-update for direct IO under LFS mode */
+ if (map->m_may_create && (is_hole ||
+ (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
+ !f2fs_is_pinned_file(inode) && map->m_last_pblk != blkaddr))) {
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
+ goto sync_out;
+ }
+
+ switch (flag) {
+ case F2FS_GET_BLOCK_PRE_AIO:
+ if (blkaddr == NULL_ADDR) {
+ prealloc++;
+ last_ofs_in_node = dn.ofs_in_node;
+ }
+ break;
+ case F2FS_GET_BLOCK_PRE_DIO:
+ case F2FS_GET_BLOCK_DIO:
err = __allocate_data_block(&dn, map->m_seg_type);
if (err)
goto sync_out;
- blkaddr = dn.data_blkaddr;
+ if (flag == F2FS_GET_BLOCK_PRE_DIO)
+ file_need_truncate(inode);
set_inode_flag(inode, FI_APPEND_WRITE);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ err = -EIO;
+ goto sync_out;
}
- } else {
- if (create) {
- if (unlikely(f2fs_cp_error(sbi))) {
- err = -EIO;
- goto sync_out;
- }
- if (flag == F2FS_GET_BLOCK_PRE_AIO) {
- if (blkaddr == NULL_ADDR) {
- prealloc++;
- last_ofs_in_node = dn.ofs_in_node;
- }
- } else {
- WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
- flag != F2FS_GET_BLOCK_DIO);
- err = __allocate_data_block(&dn,
- map->m_seg_type);
- if (!err)
- set_inode_flag(inode, FI_APPEND_WRITE);
- }
- if (err)
- goto sync_out;
+
+ blkaddr = dn.data_blkaddr;
+ if (is_hole)
map->m_flags |= F2FS_MAP_NEW;
- blkaddr = dn.data_blkaddr;
- } else {
- if (f2fs_compressed_file(inode) &&
- f2fs_sanity_check_cluster(&dn) &&
- (flag != F2FS_GET_BLOCK_FIEMAP ||
- IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
- err = -EFSCORRUPTED;
- goto sync_out;
- }
- if (flag == F2FS_GET_BLOCK_BMAP) {
- map->m_pblk = 0;
- goto sync_out;
- }
- if (flag == F2FS_GET_BLOCK_PRECACHE)
- goto sync_out;
- if (flag == F2FS_GET_BLOCK_FIEMAP &&
- blkaddr == NULL_ADDR) {
- if (map->m_next_pgofs)
- *map->m_next_pgofs = pgofs + 1;
- goto sync_out;
- }
- if (flag != F2FS_GET_BLOCK_FIEMAP) {
- /* for defragment case */
+ } else if (is_hole) {
+ if (f2fs_compressed_file(inode) &&
+ f2fs_sanity_check_cluster(&dn)) {
+ err = -EFSCORRUPTED;
+ f2fs_handle_error(sbi,
+ ERROR_CORRUPTED_CLUSTER);
+ goto sync_out;
+ }
+
+ switch (flag) {
+ case F2FS_GET_BLOCK_PRECACHE:
+ goto sync_out;
+ case F2FS_GET_BLOCK_BMAP:
+ map->m_pblk = 0;
+ goto sync_out;
+ case F2FS_GET_BLOCK_FIEMAP:
+ if (blkaddr == NULL_ADDR) {
if (map->m_next_pgofs)
*map->m_next_pgofs = pgofs + 1;
goto sync_out;
}
+ break;
+ case F2FS_GET_BLOCK_DIO:
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs = pgofs + 1;
+ break;
+ default:
+ /* for defragment case */
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs = pgofs + 1;
+ goto sync_out;
}
}
@@ -1633,25 +1675,27 @@ next_block:
bidx = f2fs_target_device_index(sbi, blkaddr);
if (map->m_len == 0) {
- /* preallocated unwritten block should be mapped for fiemap. */
+ /* reserved delalloc block should be mapped for fiemap. */
if (blkaddr == NEW_ADDR)
- map->m_flags |= F2FS_MAP_UNWRITTEN;
- map->m_flags |= F2FS_MAP_MAPPED;
+ map->m_flags |= F2FS_MAP_DELALLOC;
+ /* DIO READ and hole case, should not map the blocks. */
+ if (!(flag == F2FS_GET_BLOCK_DIO && is_hole && !map->m_may_create))
+ map->m_flags |= F2FS_MAP_MAPPED;
map->m_pblk = blkaddr;
map->m_len = 1;
if (map->m_multidev_dio)
map->m_bdev = FDEV(bidx).bdev;
- } else if ((map->m_pblk != NEW_ADDR &&
- blkaddr == (map->m_pblk + ofs)) ||
- (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
- flag == F2FS_GET_BLOCK_PRE_DIO) {
- if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
- goto sync_out;
+
+ if (lfs_dio_write)
+ map->m_last_pblk = NULL_ADDR;
+ } else if (map_is_mergeable(sbi, map, blkaddr, flag, bidx, ofs)) {
ofs++;
map->m_len++;
} else {
+ if (lfs_dio_write && !f2fs_is_pinned_file(inode))
+ map->m_last_pblk = blkaddr;
goto sync_out;
}
@@ -1685,7 +1729,7 @@ skip:
if (map->m_flags & F2FS_MAP_MAPPED) {
unsigned int ofs = start_pgofs - map->m_lblk;
- f2fs_update_extent_cache_range(&dn,
+ f2fs_update_read_extent_cache_range(&dn,
start_pgofs, map->m_pblk + ofs,
map->m_len - ofs);
}
@@ -1694,7 +1738,7 @@ skip:
f2fs_put_dnode(&dn);
if (map->m_may_create) {
- f2fs_do_map_lock(sbi, flag, false);
+ f2fs_map_unlock(sbi, flag);
f2fs_balance_fs(sbi, dn.node_changed);
}
goto next_dnode;
@@ -1708,8 +1752,6 @@ sync_out:
*/
f2fs_wait_on_block_writeback_range(inode,
map->m_pblk, map->m_len);
- invalidate_mapping_pages(META_MAPPING(sbi),
- map->m_pblk, map->m_pblk);
if (map->m_multidev_dio) {
block_t blk_addr = map->m_pblk;
@@ -1732,21 +1774,22 @@ sync_out:
if (map->m_flags & F2FS_MAP_MAPPED) {
unsigned int ofs = start_pgofs - map->m_lblk;
- f2fs_update_extent_cache_range(&dn,
- start_pgofs, map->m_pblk + ofs,
- map->m_len - ofs);
+ if (map->m_len > ofs)
+ f2fs_update_read_extent_cache_range(&dn,
+ start_pgofs, map->m_pblk + ofs,
+ map->m_len - ofs);
}
if (map->m_next_extent)
- *map->m_next_extent = pgofs + 1;
+ *map->m_next_extent = is_hole ? pgofs + 1 : pgofs;
}
f2fs_put_dnode(&dn);
unlock_out:
if (map->m_may_create) {
- f2fs_do_map_lock(sbi, flag, false);
+ f2fs_map_unlock(sbi, flag);
f2fs_balance_fs(sbi, dn.node_changed);
}
out:
- trace_f2fs_map_blocks(inode, map, create, flag, err);
+ trace_f2fs_map_blocks(inode, map, flag, err);
return err;
}
@@ -1768,7 +1811,7 @@ bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
while (map.m_lblk < last_lblk) {
map.m_len = last_lblk - map.m_lblk;
- err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
+ err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
if (err || map.m_len == 0)
return false;
map.m_lblk += map.m_len;
@@ -1776,65 +1819,10 @@ bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
return true;
}
-static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
-{
- return (bytes >> inode->i_blkbits);
-}
-
-static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
-{
- return (blks << inode->i_blkbits);
-}
-
-static int __get_data_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create, int flag,
- pgoff_t *next_pgofs, int seg_type, bool may_write)
-{
- struct f2fs_map_blocks map;
- int err;
-
- map.m_lblk = iblock;
- map.m_len = bytes_to_blks(inode, bh->b_size);
- map.m_next_pgofs = next_pgofs;
- map.m_next_extent = NULL;
- map.m_seg_type = seg_type;
- map.m_may_create = may_write;
-
- err = f2fs_map_blocks(inode, &map, create, flag);
- if (!err) {
- map_bh(bh, inode->i_sb, map.m_pblk);
- bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
- bh->b_size = blks_to_bytes(inode, map.m_len);
-
- if (map.m_multidev_dio)
- bh->b_bdev = map.m_bdev;
- }
- return err;
-}
-
-static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DIO, NULL,
- f2fs_rw_hint_to_seg_type(inode->i_write_hint),
- true);
-}
-
-static int get_data_block_dio(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DIO, NULL,
- f2fs_rw_hint_to_seg_type(inode->i_write_hint),
- false);
-}
-
static int f2fs_xattr_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *page;
struct node_info ni;
__u64 phys = 0, len;
__u32 flags;
@@ -1843,19 +1831,19 @@ static int f2fs_xattr_fiemap(struct inode *inode,
if (f2fs_has_inline_xattr(inode)) {
int offset;
+ struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi),
+ inode->i_ino, false);
- page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
- inode->i_ino, false);
- if (!page)
- return -ENOMEM;
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
- phys = blks_to_bytes(inode, ni.blk_addr);
+ phys = F2FS_BLK_TO_BYTES(ni.blk_addr);
offset = offsetof(struct f2fs_inode, i_addr) +
sizeof(__le32) * (DEF_ADDRS_PER_INODE -
get_inline_xattr_addrs(inode));
@@ -1863,7 +1851,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
phys += offset;
len = inline_xattr_size(inode);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
@@ -1872,25 +1860,27 @@ static int f2fs_xattr_fiemap(struct inode *inode,
err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
- if (err || err == 1)
+ if (err)
return err;
}
if (xnid) {
- page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
- if (!page)
- return -ENOMEM;
+ struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi),
+ xnid, false);
- err = f2fs_get_node_info(sbi, xnid, &ni);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ err = f2fs_get_node_info(sbi, xnid, &ni, false);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
- phys = blks_to_bytes(inode, ni.blk_addr);
+ phys = F2FS_BLK_TO_BYTES(ni.blk_addr);
len = inode->i_sb->s_blocksize;
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
flags = FIEMAP_EXTENT_LAST;
}
@@ -1903,30 +1893,11 @@ static int f2fs_xattr_fiemap(struct inode *inode,
return (err < 0 ? err : 0);
}
-static loff_t max_inode_blocks(struct inode *inode)
-{
- loff_t result = ADDRS_PER_INODE(inode);
- loff_t leaf_count = ADDRS_PER_BLOCK(inode);
-
- /* two direct node blocks */
- result += (leaf_count * 2);
-
- /* two indirect node blocks */
- leaf_count *= NIDS_PER_BLOCK;
- result += (leaf_count * 2);
-
- /* one double indirect node block */
- leaf_count *= NIDS_PER_BLOCK;
- result += leaf_count;
-
- return result;
-}
-
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
struct f2fs_map_blocks map;
- sector_t start_blk, last_blk;
+ sector_t start_blk, last_blk, blk_len, max_len;
pgoff_t next_pgofs;
u64 logical = 0, phys = 0, size = 0;
u32 flags = 0;
@@ -1946,9 +1917,9 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (ret)
return ret;
- inode_lock(inode);
+ inode_lock_shared(inode);
- maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
+ maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
if (start > maxbytes) {
ret = -EFBIG;
goto out;
@@ -1968,16 +1939,15 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
goto out;
}
- if (bytes_to_blks(inode, len) == 0)
- len = blks_to_bytes(inode, 1);
-
- start_blk = bytes_to_blks(inode, start);
- last_blk = bytes_to_blks(inode, start + len - 1);
+ start_blk = F2FS_BYTES_TO_BLK(start);
+ last_blk = F2FS_BYTES_TO_BLK(start + len - 1);
+ blk_len = last_blk - start_blk + 1;
+ max_len = F2FS_BYTES_TO_BLK(maxbytes) - start_blk;
next:
memset(&map, 0, sizeof(map));
map.m_lblk = start_blk;
- map.m_len = bytes_to_blks(inode, len);
+ map.m_len = blk_len;
map.m_next_pgofs = &next_pgofs;
map.m_seg_type = NO_CHECK_TYPE;
@@ -1986,7 +1956,7 @@ next:
map.m_len = cluster_size - count_in_cluster;
}
- ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
+ ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
if (ret)
goto out;
@@ -1994,16 +1964,26 @@ next:
if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
start_blk = next_pgofs;
- if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
- max_inode_blocks(inode)))
+ if (F2FS_BLK_TO_BYTES(start_blk) < maxbytes)
goto prep_next;
flags |= FIEMAP_EXTENT_LAST;
}
+ /*
+ * current extent may cross boundary of inquiry, increase len to
+ * requery.
+ */
+ if (!compr_cluster && (map.m_flags & F2FS_MAP_MAPPED) &&
+ map.m_lblk + map.m_len - 1 == last_blk &&
+ blk_len != max_len) {
+ blk_len = max_len;
+ goto next;
+ }
+
compr_appended = false;
/* In a case of compressed cluster, append this to the last extent */
- if (compr_cluster && ((map.m_flags & F2FS_MAP_UNWRITTEN) ||
+ if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) ||
!(map.m_flags & F2FS_MAP_FLAGS))) {
compr_appended = true;
goto skip_fill;
@@ -2032,14 +2012,14 @@ skip_fill:
} else if (compr_appended) {
unsigned int appended_blks = cluster_size -
count_in_cluster + 1;
- size += blks_to_bytes(inode, appended_blks);
+ size += F2FS_BLK_TO_BYTES(appended_blks);
start_blk += appended_blks;
compr_cluster = false;
} else {
- logical = blks_to_bytes(inode, start_blk);
+ logical = F2FS_BLK_TO_BYTES(start_blk);
phys = __is_valid_data_blkaddr(map.m_pblk) ?
- blks_to_bytes(inode, map.m_pblk) : 0;
- size = blks_to_bytes(inode, map.m_len);
+ F2FS_BLK_TO_BYTES(map.m_pblk) : 0;
+ size = F2FS_BLK_TO_BYTES(map.m_len);
flags = 0;
if (compr_cluster) {
@@ -2047,13 +2027,13 @@ skip_fill:
count_in_cluster += map.m_len;
if (count_in_cluster == cluster_size) {
compr_cluster = false;
- size += blks_to_bytes(inode, 1);
+ size += F2FS_BLKSIZE;
}
- } else if (map.m_flags & F2FS_MAP_UNWRITTEN) {
+ } else if (map.m_flags & F2FS_MAP_DELALLOC) {
flags = FIEMAP_EXTENT_UNWRITTEN;
}
- start_blk += bytes_to_blks(inode, size);
+ start_blk += F2FS_BYTES_TO_BLK(size);
}
prep_next:
@@ -2066,38 +2046,43 @@ out:
if (ret == 1)
ret = 0;
- inode_unlock(inode);
+ inode_unlock_shared(inode);
return ret;
}
static inline loff_t f2fs_readpage_limit(struct inode *inode)
{
- if (IS_ENABLED(CONFIG_FS_VERITY) &&
- (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
- return inode->i_sb->s_maxbytes;
+ if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
+ return F2FS_BLK_TO_BYTES(max_file_blocks(inode));
return i_size_read(inode);
}
-static int f2fs_read_single_page(struct inode *inode, struct page *page,
+static inline blk_opf_t f2fs_ra_op_flags(struct readahead_control *rac)
+{
+ return rac ? REQ_RAHEAD : 0;
+}
+
+static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
unsigned nr_pages,
struct f2fs_map_blocks *map,
struct bio **bio_ret,
sector_t *last_block_in_bio,
- bool is_readahead)
+ struct readahead_control *rac)
{
struct bio *bio = *bio_ret;
- const unsigned blocksize = blks_to_bytes(inode, 1);
+ const unsigned int blocksize = F2FS_BLKSIZE;
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
sector_t block_nr;
+ pgoff_t index = folio->index;
int ret = 0;
- block_in_file = (sector_t)page_index(page);
+ block_in_file = (sector_t)index;
last_block = block_in_file + nr_pages;
- last_block_in_file = bytes_to_blks(inode,
- f2fs_readpage_limit(inode) + blocksize - 1);
+ last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
+ blocksize - 1);
if (last_block > last_block_in_file)
last_block = last_block_in_file;
@@ -2119,19 +2104,13 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
map->m_lblk = block_in_file;
map->m_len = last_block - block_in_file;
- ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
+ ret = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_DEFAULT);
if (ret)
goto out;
got_it:
if ((map->m_flags & F2FS_MAP_MAPPED)) {
block_nr = map->m_pblk + block_in_file - map->m_lblk;
- SetPageMappedToDisk(page);
-
- if (!PageUptodate(page) && (!PageSwapCache(page) &&
- !cleancache_get_page(page))) {
- SetPageUptodate(page);
- goto confused;
- }
+ folio_set_mappedtodisk(folio);
if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
DATA_GENERIC_ENHANCE_READ)) {
@@ -2140,15 +2119,15 @@ got_it:
}
} else {
zero_out:
- zero_user_segment(page, 0, PAGE_SIZE);
- if (f2fs_need_verity(inode, page->index) &&
- !fsverity_verify_page(page)) {
+ folio_zero_segment(folio, 0, folio_size(folio));
+ if (f2fs_need_verity(inode, index) &&
+ !fsverity_verify_folio(folio)) {
ret = -EIO;
goto out;
}
- if (!PageUptodate(page))
- SetPageUptodate(page);
- unlock_page(page);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
goto out;
}
@@ -2158,21 +2137,15 @@ zero_out:
*/
if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
*last_block_in_bio, block_nr) ||
- !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
+ !f2fs_crypt_mergeable_bio(bio, inode, index, NULL))) {
submit_and_realloc:
- __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
- if (bio == NULL) {
+ if (bio == NULL)
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
- is_readahead ? REQ_RAHEAD : 0, page->index,
+ f2fs_ra_op_flags(rac), index,
false);
- if (IS_ERR(bio)) {
- ret = PTR_ERR(bio);
- bio = NULL;
- goto out;
- }
- }
/*
* If the page is under writeback, we need to wait for
@@ -2180,20 +2153,13 @@ submit_and_realloc:
*/
f2fs_wait_on_block_writeback(inode, block_nr);
- if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+ if (!bio_add_folio(bio, folio, blocksize, 0))
goto submit_and_realloc;
inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
- f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
- ClearPageError(page);
+ f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
+ F2FS_BLKSIZE);
*last_block_in_bio = block_nr;
- goto out;
-confused:
- if (bio) {
- __submit_bio(F2FS_I_SB(inode), bio, DATA);
- bio = NULL;
- }
- unlock_page(page);
out:
*bio_ret = bio;
return ret;
@@ -2202,7 +2168,7 @@ out:
#ifdef CONFIG_F2FS_FS_COMPRESSION
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio,
- bool is_readahead, bool for_write)
+ struct readahead_control *rac, bool for_write)
{
struct dnode_of_data dn;
struct inode *inode = cc->inode;
@@ -2210,34 +2176,43 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
struct bio *bio = *bio_ret;
unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
sector_t last_block_in_file;
- const unsigned blocksize = blks_to_bytes(inode, 1);
+ const unsigned int blocksize = F2FS_BLKSIZE;
struct decompress_io_ctx *dic = NULL;
- struct extent_info ei = {0, };
+ struct extent_info ei = {};
bool from_dnode = true;
int i;
int ret = 0;
+ if (unlikely(f2fs_cp_error(sbi))) {
+ ret = -EIO;
+ from_dnode = false;
+ goto out_put_dnode;
+ }
+
f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
- last_block_in_file = bytes_to_blks(inode,
- f2fs_readpage_limit(inode) + blocksize - 1);
+ last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
+ blocksize - 1);
/* get rid of pages beyond EOF */
for (i = 0; i < cc->cluster_size; i++) {
struct page *page = cc->rpages[i];
+ struct folio *folio;
if (!page)
continue;
- if ((sector_t)page->index >= last_block_in_file) {
- zero_user_segment(page, 0, PAGE_SIZE);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- } else if (!PageUptodate(page)) {
+
+ folio = page_folio(page);
+ if ((sector_t)folio->index >= last_block_in_file) {
+ folio_zero_segment(folio, 0, folio_size(folio));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ } else if (!folio_test_uptodate(folio)) {
continue;
}
- unlock_page(page);
+ folio_unlock(folio);
if (for_write)
- put_page(page);
+ folio_put(folio);
cc->rpages[i] = NULL;
cc->nr_rpages--;
}
@@ -2246,7 +2221,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
if (f2fs_cluster_is_empty(cc))
goto out;
- if (f2fs_lookup_extent_cache(inode, start_idx, &ei))
+ if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei))
from_dnode = false;
if (!from_dnode)
@@ -2263,7 +2238,7 @@ skip_reading_dnode:
for (i = 1; i < cc->cluster_size; i++) {
block_t blkaddr;
- blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
+ blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i) :
ei.blk + i - 1;
@@ -2293,44 +2268,38 @@ skip_reading_dnode:
}
for (i = 0; i < cc->nr_cpages; i++) {
- struct page *page = dic->cpages[i];
+ struct folio *folio = page_folio(dic->cpages[i]);
block_t blkaddr;
struct bio_post_read_ctx *ctx;
- blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
+ blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i + 1) :
ei.blk + i;
f2fs_wait_on_block_writeback(inode, blkaddr);
- if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
- if (atomic_dec_and_test(&dic->remaining_pages))
- f2fs_decompress_cluster(dic);
+ if (f2fs_load_compressed_folio(sbi, folio, blkaddr)) {
+ if (atomic_dec_and_test(&dic->remaining_pages)) {
+ f2fs_decompress_cluster(dic, true);
+ break;
+ }
continue;
}
if (bio && (!page_is_mergeable(sbi, bio,
*last_block_in_bio, blkaddr) ||
- !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
+ !f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) {
submit_and_realloc:
- __submit_bio(sbi, bio, DATA);
+ f2fs_submit_read_bio(sbi, bio, DATA);
bio = NULL;
}
- if (!bio) {
- bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
- is_readahead ? REQ_RAHEAD : 0,
- page->index, for_write);
- if (IS_ERR(bio)) {
- ret = PTR_ERR(bio);
- f2fs_decompress_end_io(dic, ret);
- f2fs_put_dnode(&dn);
- *bio_ret = NULL;
- return ret;
- }
- }
+ if (!bio)
+ bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages - i,
+ f2fs_ra_op_flags(rac),
+ folio->index, for_write);
- if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+ if (!bio_add_folio(bio, folio, blocksize, 0))
goto submit_and_realloc;
ctx = get_post_read_ctx(bio);
@@ -2338,9 +2307,7 @@ submit_and_realloc:
refcount_inc(&dic->refcnt);
inc_page_count(sbi, F2FS_RD_DATA);
- f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
- f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
- ClearPageError(page);
+ f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
*last_block_in_bio = blkaddr;
}
@@ -2357,7 +2324,6 @@ out:
for (i = 0; i < cc->cluster_size; i++) {
if (cc->rpages[i]) {
ClearPageUptodate(cc->rpages[i]);
- ClearPageError(cc->rpages[i]);
unlock_page(cc->rpages[i]);
}
}
@@ -2371,7 +2337,7 @@ out:
* Major change was from block_size == page_size in f2fs by default.
*/
static int f2fs_mpage_readpages(struct inode *inode,
- struct readahead_control *rac, struct page *page)
+ struct readahead_control *rac, struct folio *folio)
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
@@ -2388,11 +2354,20 @@ static int f2fs_mpage_readpages(struct inode *inode,
.nr_cpages = 0,
};
pgoff_t nc_cluster_idx = NULL_CLUSTER;
+ pgoff_t index;
#endif
unsigned nr_pages = rac ? readahead_count(rac) : 1;
unsigned max_nr_pages = nr_pages;
int ret = 0;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ index = rac ? readahead_index(rac) : folio->index;
+ max_nr_pages = round_up(index + nr_pages, cc.cluster_size) -
+ round_down(index, cc.cluster_size);
+ }
+#endif
+
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
@@ -2404,65 +2379,63 @@ static int f2fs_mpage_readpages(struct inode *inode,
for (; nr_pages; nr_pages--) {
if (rac) {
- page = readahead_page(rac);
- prefetchw(&page->flags);
+ folio = readahead_folio(rac);
+ prefetchw(&folio->flags);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (f2fs_compressed_file(inode)) {
- /* there are remained comressed pages, submit them */
- if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
- ret = f2fs_read_multi_pages(&cc, &bio,
- max_nr_pages,
- &last_block_in_bio,
- rac != NULL, false);
- f2fs_destroy_compress_ctx(&cc, false);
- if (ret)
- goto set_error_page;
- }
- if (cc.cluster_idx == NULL_CLUSTER) {
- if (nc_cluster_idx ==
- page->index >> cc.log_cluster_size) {
- goto read_single_page;
- }
-
- ret = f2fs_is_compressed_cluster(inode, page->index);
- if (ret < 0)
- goto set_error_page;
- else if (!ret) {
- nc_cluster_idx =
- page->index >> cc.log_cluster_size;
- goto read_single_page;
- }
-
- nc_cluster_idx = NULL_CLUSTER;
- }
- ret = f2fs_init_compress_ctx(&cc);
+ index = folio->index;
+
+ if (!f2fs_compressed_file(inode))
+ goto read_single_page;
+
+ /* there are remained compressed pages, submit them */
+ if (!f2fs_cluster_can_merge_page(&cc, index)) {
+ ret = f2fs_read_multi_pages(&cc, &bio,
+ max_nr_pages,
+ &last_block_in_bio,
+ rac, false);
+ f2fs_destroy_compress_ctx(&cc, false);
if (ret)
goto set_error_page;
+ }
+ if (cc.cluster_idx == NULL_CLUSTER) {
+ if (nc_cluster_idx == index >> cc.log_cluster_size)
+ goto read_single_page;
- f2fs_compress_ctx_add_page(&cc, page);
+ ret = f2fs_is_compressed_cluster(inode, index);
+ if (ret < 0)
+ goto set_error_page;
+ else if (!ret) {
+ nc_cluster_idx =
+ index >> cc.log_cluster_size;
+ goto read_single_page;
+ }
- goto next_page;
+ nc_cluster_idx = NULL_CLUSTER;
}
+ ret = f2fs_init_compress_ctx(&cc);
+ if (ret)
+ goto set_error_page;
+
+ f2fs_compress_ctx_add_page(&cc, folio);
+
+ goto next_page;
read_single_page:
#endif
- ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
+ ret = f2fs_read_single_page(inode, folio, max_nr_pages, &map,
&bio, &last_block_in_bio, rac);
if (ret) {
#ifdef CONFIG_F2FS_FS_COMPRESSION
set_error_page:
#endif
- SetPageError(page);
- zero_user_segment(page, 0, PAGE_SIZE);
- unlock_page(page);
+ folio_zero_segment(folio, 0, folio_size(folio));
+ folio_unlock(folio);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
next_page:
#endif
- if (rac)
- put_page(page);
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
@@ -2471,34 +2444,34 @@ next_page:
ret = f2fs_read_multi_pages(&cc, &bio,
max_nr_pages,
&last_block_in_bio,
- rac != NULL, false);
+ rac, false);
f2fs_destroy_compress_ctx(&cc, false);
}
}
#endif
}
if (bio)
- __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
return ret;
}
-static int f2fs_read_data_page(struct file *file, struct page *page)
+static int f2fs_read_data_folio(struct file *file, struct folio *folio)
{
- struct inode *inode = page_file_mapping(page)->host;
+ struct inode *inode = folio->mapping->host;
int ret = -EAGAIN;
- trace_f2fs_readpage(page, DATA);
+ trace_f2fs_readpage(folio, DATA);
if (!f2fs_is_compress_backend_ready(inode)) {
- unlock_page(page);
+ folio_unlock(folio);
return -EOPNOTSUPP;
}
/* If the file has inline data, try to read it directly */
if (f2fs_has_inline_data(inode))
- ret = f2fs_read_inline_data(inode, page);
+ ret = f2fs_read_inline_data(inode, folio);
if (ret == -EAGAIN)
- ret = f2fs_mpage_readpages(inode, NULL, page);
+ ret = f2fs_mpage_readpages(inode, NULL, folio);
return ret;
}
@@ -2511,7 +2484,7 @@ static void f2fs_readahead(struct readahead_control *rac)
if (!f2fs_is_compress_backend_ready(inode))
return;
- /* If the file has inline data, skip readpages */
+ /* If the file has inline data, skip readahead */
if (f2fs_has_inline_data(inode))
return;
@@ -2520,8 +2493,9 @@ static void f2fs_readahead(struct readahead_control *rac)
int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
{
- struct inode *inode = fio->page->mapping->host;
- struct page *mpage, *page;
+ struct inode *inode = fio_inode(fio);
+ struct folio *mfolio;
+ struct page *page;
gfp_t gfp_flags = GFP_NOFS;
if (!f2fs_encrypted_file(inode))
@@ -2529,32 +2503,29 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
page = fio->compressed_page ? fio->compressed_page : fio->page;
- /* wait for GCed page writeback via META_MAPPING */
- f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
-
if (fscrypt_inode_uses_inline_crypto(inode))
return 0;
retry_encrypt:
- fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
+ fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page_folio(page),
PAGE_SIZE, 0, gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
/* flush pending IOs and wait for a while in the ENOMEM case */
if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
f2fs_flush_merged_writes(fio->sbi);
- congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
gfp_flags |= __GFP_NOFAIL;
goto retry_encrypt;
}
return PTR_ERR(fio->encrypted_page);
}
- mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
- if (mpage) {
- if (PageUptodate(mpage))
- memcpy(page_address(mpage),
+ mfolio = filemap_lock_folio(META_MAPPING(fio->sbi), fio->old_blkaddr);
+ if (!IS_ERR(mfolio)) {
+ if (folio_test_uptodate(mfolio))
+ memcpy(folio_address(mfolio),
page_address(fio->encrypted_page), PAGE_SIZE);
- f2fs_put_page(mpage, 1);
+ f2fs_folio_put(mfolio, true);
}
return 0;
}
@@ -2563,31 +2534,29 @@ static inline bool check_inplace_update_policy(struct inode *inode,
struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- unsigned int policy = SM_I(sbi)->ipu_policy;
- if (policy & (0x1 << F2FS_IPU_FORCE))
+ if (IS_F2FS_IPU_HONOR_OPU_WRITE(sbi) &&
+ is_inode_flag_set(inode, FI_OPU_WRITE))
+ return false;
+ if (IS_F2FS_IPU_FORCE(sbi))
return true;
- if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
+ if (IS_F2FS_IPU_SSR(sbi) && f2fs_need_SSR(sbi))
return true;
- if (policy & (0x1 << F2FS_IPU_UTIL) &&
- utilization(sbi) > SM_I(sbi)->min_ipu_util)
+ if (IS_F2FS_IPU_UTIL(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util)
return true;
- if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
- utilization(sbi) > SM_I(sbi)->min_ipu_util)
+ if (IS_F2FS_IPU_SSR_UTIL(sbi) && f2fs_need_SSR(sbi) &&
+ utilization(sbi) > SM_I(sbi)->min_ipu_util)
return true;
/*
* IPU for rewrite async pages
*/
- if (policy & (0x1 << F2FS_IPU_ASYNC) &&
- fio && fio->op == REQ_OP_WRITE &&
- !(fio->op_flags & REQ_SYNC) &&
- !IS_ENCRYPTED(inode))
+ if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE &&
+ !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode))
return true;
/* this is only set during fdatasync */
- if (policy & (0x1 << F2FS_IPU_FSYNC) &&
- is_inode_flag_set(inode, FI_NEED_IPU))
+ if (IS_F2FS_IPU_FSYNC(sbi) && is_inode_flag_set(inode, FI_NEED_IPU))
return true;
if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
@@ -2607,7 +2576,7 @@ bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
return true;
/* if this is cold file, we should overwrite to avoid fragmentation */
- if (file_is_cold(inode))
+ if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE))
return true;
return check_inplace_update_policy(inode, fio);
@@ -2617,26 +2586,35 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ /* The below cases were checked when setting it. */
+ if (f2fs_is_pinned_file(inode))
+ return false;
+ if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
+ return true;
if (f2fs_lfs_mode(sbi))
return true;
if (S_ISDIR(inode->i_mode))
return true;
if (IS_NOQUOTA(inode))
return true;
- if (f2fs_is_atomic_file(inode))
+ if (f2fs_used_in_atomic_write(inode))
return true;
- if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
+ /* rewrite low ratio compress data w/ OPU mode to avoid fragmentation */
+ if (f2fs_compressed_file(inode) &&
+ F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER &&
+ is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
return true;
/* swap file is migrating in aligned write mode */
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
return true;
+ if (is_inode_flag_set(inode, FI_OPU_WRITE))
+ return true;
+
if (fio) {
if (page_private_gcing(fio->page))
return true;
- if (page_private_dummy(fio->page))
- return true;
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
return true;
@@ -2646,7 +2624,7 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
- struct inode *inode = fio->page->mapping->host;
+ struct inode *inode = fio_inode(fio);
if (f2fs_should_update_outplace(inode, fio))
return false;
@@ -2656,19 +2634,25 @@ static inline bool need_inplace_update(struct f2fs_io_info *fio)
int f2fs_do_write_data_page(struct f2fs_io_info *fio)
{
- struct page *page = fio->page;
- struct inode *inode = page->mapping->host;
+ struct folio *folio = fio->folio;
+ struct inode *inode = folio->mapping->host;
struct dnode_of_data dn;
- struct extent_info ei = {0, };
struct node_info ni;
bool ipu_force = false;
+ bool atomic_commit;
int err = 0;
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- if (need_inplace_update(fio) &&
- f2fs_lookup_extent_cache(inode, page->index, &ei)) {
- fio->old_blkaddr = ei.blk + page->index - ei.fofs;
+ /* Use COW inode to make dnode_of_data for atomic write */
+ atomic_commit = f2fs_is_atomic_file(inode) &&
+ folio_test_f2fs_atomic(folio);
+ if (atomic_commit)
+ set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
+ else
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ if (need_inplace_update(fio) &&
+ f2fs_lookup_read_extent_cache_block(inode, folio->index,
+ &fio->old_blkaddr)) {
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
DATA_GENERIC_ENHANCE))
return -EFSCORRUPTED;
@@ -2682,7 +2666,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
return -EAGAIN;
- err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+ err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
if (err)
goto out;
@@ -2690,8 +2674,8 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
/* This page is already truncated */
if (fio->old_blkaddr == NULL_ADDR) {
- ClearPageUptodate(page);
- clear_page_private_gcing(page);
+ folio_clear_uptodate(folio);
+ folio_clear_f2fs_gcing(folio);
goto out_writepage;
}
got_it:
@@ -2701,6 +2685,11 @@ got_it:
err = -EFSCORRUPTED;
goto out_writepage;
}
+
+ /* wait for GCed page writeback via META_MAPPING */
+ if (fio->meta_gc)
+ f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
+
/*
* If current allocation needs SSR,
* it had better in-place writes for updated data.
@@ -2712,8 +2701,7 @@ got_it:
if (err)
goto out_writepage;
- set_page_writeback(page);
- ClearPageError(page);
+ folio_start_writeback(folio);
f2fs_put_dnode(&dn);
if (fio->need_lock == LOCK_REQ)
f2fs_unlock_op(fio->sbi);
@@ -2721,12 +2709,11 @@ got_it:
if (err) {
if (fscrypt_inode_uses_fs_layer_crypto(inode))
fscrypt_finalize_bounce_page(&fio->encrypted_page);
- if (PageWriteback(page))
- end_page_writeback(page);
+ folio_end_writeback(folio);
} else {
set_inode_flag(inode, FI_UPDATE_WRITE);
}
- trace_f2fs_do_write_data_page(fio->page, IPU);
+ trace_f2fs_do_write_data_page(folio, IPU);
return err;
}
@@ -2738,7 +2725,7 @@ got_it:
fio->need_lock = LOCK_REQ;
}
- err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
if (err)
goto out_writepage;
@@ -2748,18 +2735,17 @@ got_it:
if (err)
goto out_writepage;
- set_page_writeback(page);
- ClearPageError(page);
+ folio_start_writeback(folio);
if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
/* LFS mode write path */
f2fs_outplace_write_data(&dn, fio);
- trace_f2fs_do_write_data_page(page, OPU);
+ trace_f2fs_do_write_data_page(folio, OPU);
set_inode_flag(inode, FI_APPEND_WRITE);
- if (page->index == 0)
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ if (atomic_commit)
+ folio_clear_f2fs_atomic(folio);
out_writepage:
f2fs_put_dnode(&dn);
out:
@@ -2768,7 +2754,7 @@ out:
return err;
}
-int f2fs_write_single_data_page(struct page *page, int *submitted,
+int f2fs_write_single_data_page(struct folio *folio, int *submitted,
struct bio **bio,
sector_t *last_block,
struct writeback_control *wbc,
@@ -2776,14 +2762,15 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
int compr_blocks,
bool allow_balance)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long)i_size)
>> PAGE_SHIFT;
- loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
+ loff_t psize = (loff_t)(folio->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
+ bool quota_inode = IS_NOQUOTA(inode);
int err = 0;
struct f2fs_io_info fio = {
.sbi = sbi,
@@ -2792,27 +2779,33 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.op = REQ_OP_WRITE,
.op_flags = wbc_to_write_flags(wbc),
.old_blkaddr = NULL_ADDR,
- .page = page,
+ .folio = folio,
.encrypted_page = NULL,
- .submitted = false,
+ .submitted = 0,
.compr_blocks = compr_blocks,
- .need_lock = LOCK_RETRY,
+ .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
+ .meta_gc = f2fs_meta_inode_gc_required(inode) ? 1 : 0,
.io_type = io_type,
.io_wbc = wbc,
.bio = bio,
.last_block = last_block,
};
- trace_f2fs_writepage(page, DATA);
+ trace_f2fs_writepage(folio, DATA);
- /* we should bypass data pages to proceed the kworkder jobs */
+ /* we should bypass data pages to proceed the kworker jobs */
if (unlikely(f2fs_cp_error(sbi))) {
- mapping_set_error(page->mapping, -EIO);
+ mapping_set_error(folio->mapping, -EIO);
/*
* don't drop any dirty dentry pages for keeping lastest
* directory structure.
*/
- if (S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode) &&
+ !is_sbi_flag_set(sbi, SBI_IS_CLOSE))
+ goto redirty_out;
+
+ /* keep data pages in remount-ro mode */
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
goto redirty_out;
goto out;
}
@@ -2820,7 +2813,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (page->index < end_index ||
+ if (folio->index < end_index ||
f2fs_verity_in_progress(inode) ||
compr_blocks)
goto write;
@@ -2830,48 +2823,34 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
* this page does not have to be written to disk.
*/
offset = i_size & (PAGE_SIZE - 1);
- if ((page->index >= end_index + 1) || !offset)
+ if ((folio->index >= end_index + 1) || !offset)
goto out;
- zero_user_segment(page, offset, PAGE_SIZE);
+ folio_zero_segment(folio, offset, folio_size(folio));
write:
- if (f2fs_is_drop_cache(inode))
- goto out;
- /* we should not write 0'th page having journal header */
- if (f2fs_is_volatile_file(inode) && (!page->index ||
- (!wbc->for_reclaim &&
- f2fs_available_free_memory(sbi, BASE_CHECK))))
- goto redirty_out;
-
/* Dentry/quota blocks are controlled by checkpoint */
- if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
+ if (S_ISDIR(inode->i_mode) || quota_inode) {
/*
* We need to wait for node_write to avoid block allocation during
* checkpoint. This can only happen to quota writes which can cause
* the below discard race condition.
*/
- if (IS_NOQUOTA(inode))
- down_read(&sbi->node_write);
+ if (quota_inode)
+ f2fs_down_read(&sbi->node_write);
fio.need_lock = LOCK_DONE;
err = f2fs_do_write_data_page(&fio);
- if (IS_NOQUOTA(inode))
- up_read(&sbi->node_write);
+ if (quota_inode)
+ f2fs_up_read(&sbi->node_write);
goto done;
}
- if (!wbc->for_reclaim)
- need_balance_fs = true;
- else if (has_not_enough_free_secs(sbi, 0, 0))
- goto redirty_out;
- else
- set_inode_flag(inode, FI_HOT_DATA);
-
+ need_balance_fs = true;
err = -EAGAIN;
if (f2fs_has_inline_data(inode)) {
- err = f2fs_write_inline_data(inode, page);
+ err = f2fs_write_inline_data(inode, folio);
if (!err)
goto out;
}
@@ -2879,6 +2858,7 @@ write:
if (err == -EAGAIN) {
err = f2fs_do_write_data_page(&fio);
if (err == -EAGAIN) {
+ f2fs_bug_on(sbi, compr_blocks);
fio.need_lock = LOCK_REQ;
err = f2fs_do_write_data_page(&fio);
}
@@ -2900,70 +2880,42 @@ done:
out:
inode_dec_dirty_pages(inode);
if (err) {
- ClearPageUptodate(page);
- clear_page_private_gcing(page);
- }
-
- if (wbc->for_reclaim) {
- f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
- clear_inode_flag(inode, FI_HOT_DATA);
- f2fs_remove_dirty_inode(inode);
- submitted = NULL;
+ folio_clear_uptodate(folio);
+ folio_clear_f2fs_gcing(folio);
}
- unlock_page(page);
+ folio_unlock(folio);
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
- !F2FS_I(inode)->cp_task && allow_balance)
+ !F2FS_I(inode)->wb_task && allow_balance)
f2fs_balance_fs(sbi, need_balance_fs);
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_submit_merged_write(sbi, DATA);
- f2fs_submit_merged_ipu_write(sbi, bio, NULL);
+ if (bio && *bio)
+ f2fs_submit_merged_ipu_write(sbi, bio, NULL);
submitted = NULL;
}
if (submitted)
- *submitted = fio.submitted ? 1 : 0;
+ *submitted = fio.submitted;
return 0;
redirty_out:
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
/*
- * pageout() in MM traslates EAGAIN, so calls handle_write_error()
+ * pageout() in MM translates EAGAIN, so calls handle_write_error()
* -> mapping_set_error() -> set_bit(AS_EIO, ...).
* file_write_and_wait_range() will see EIO error, which is critical
* to return value of fsync() followed by atomic_write failure to user.
*/
- if (!err || wbc->for_reclaim)
- return AOP_WRITEPAGE_ACTIVATE;
- unlock_page(page);
+ folio_unlock(folio);
+ if (!err)
+ return 1;
return err;
}
-static int f2fs_write_data_page(struct page *page,
- struct writeback_control *wbc)
-{
-#ifdef CONFIG_F2FS_FS_COMPRESSION
- struct inode *inode = page->mapping->host;
-
- if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
- goto out;
-
- if (f2fs_compressed_file(inode)) {
- if (f2fs_is_compressed_cluster(inode, page->index)) {
- redirty_page_for_writepage(wbc, page);
- return AOP_WRITEPAGE_ACTIVATE;
- }
- }
-out:
-#endif
-
- return f2fs_write_single_data_page(page, NULL, NULL, NULL,
- wbc, FS_DATA_IO, 0, true);
-}
-
/*
- * This function was copied from write_cche_pages from mm/page-writeback.c.
+ * This function was copied from write_cache_pages from mm/page-writeback.c.
* The major change is making write step of cold data page separately from
* warm/hot data page.
*/
@@ -2973,7 +2925,9 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
{
int ret = 0;
int done = 0, retry = 0;
- struct pagevec pvec;
+ struct page *pages_local[F2FS_ONSTACK_PAGES];
+ struct page **pages = pages_local;
+ struct folio_batch fbatch;
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
struct bio *bio = NULL;
sector_t last_block;
@@ -2987,13 +2941,16 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
.rpages = NULL,
.nr_rpages = 0,
.cpages = NULL,
+ .valid_nr_cpages = 0,
.rbuf = NULL,
.cbuf = NULL,
.rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
.private = NULL,
};
#endif
+ int nr_folios, p, idx;
int nr_pages;
+ unsigned int max_pages = F2FS_ONSTACK_PAGES;
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
@@ -3003,7 +2960,16 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int submitted = 0;
int i;
- pagevec_init(&pvec);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode) &&
+ 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
+ pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
+ cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
+ max_pages = 1 << cc.log_cluster_size;
+ }
+#endif
+
+ folio_batch_init(&fbatch);
if (get_dirty_pages(mapping->host) <=
SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
@@ -3020,23 +2986,45 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
}
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag = PAGECACHE_TAG_TOWRITE;
- else
- tag = PAGECACHE_TAG_DIRTY;
+ tag = wbc_to_tag(wbc);
retry:
retry = 0;
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !retry && (index <= end)) {
- nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
- tag);
- if (nr_pages == 0)
+ nr_pages = 0;
+again:
+ nr_folios = filemap_get_folios_tag(mapping, &index, end,
+ tag, &fbatch);
+ if (nr_folios == 0) {
+ if (nr_pages)
+ goto write;
break;
+ }
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
+
+ idx = 0;
+ p = folio_nr_pages(folio);
+add_more:
+ pages[nr_pages] = folio_page(folio, idx);
+ folio_get(folio);
+ if (++nr_pages == max_pages) {
+ index = folio->index + idx + 1;
+ folio_batch_release(&fbatch);
+ goto write;
+ }
+ if (++idx < p)
+ goto add_more;
+ }
+ folio_batch_release(&fbatch);
+ goto again;
+write:
for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ struct page *page = pages[i];
+ struct folio *folio = page_folio(page);
bool need_readd;
readd:
need_readd = false;
@@ -3053,7 +3041,7 @@ readd:
}
if (!f2fs_cluster_can_merge_page(&cc,
- page->index)) {
+ folio->index)) {
ret = f2fs_write_multi_pages(&cc,
&submitted, wbc, io_type);
if (!ret)
@@ -3062,23 +3050,28 @@ readd:
}
if (unlikely(f2fs_cp_error(sbi)))
- goto lock_page;
+ goto lock_folio;
if (!f2fs_cluster_is_empty(&cc))
- goto lock_page;
+ goto lock_folio;
+
+ if (f2fs_all_cluster_page_ready(&cc,
+ pages, i, nr_pages, true))
+ goto lock_folio;
ret2 = f2fs_prepare_compress_overwrite(
inode, &pagep,
- page->index, &fsdata);
+ folio->index, &fsdata);
if (ret2 < 0) {
ret = ret2;
done = 1;
break;
} else if (ret2 &&
(!f2fs_compress_write_end(inode,
- fsdata, page->index, 1) ||
- !f2fs_all_cluster_page_loaded(&cc,
- &pvec, i, nr_pages))) {
+ fsdata, folio->index, 1) ||
+ !f2fs_all_cluster_page_ready(&cc,
+ pages, i, nr_pages,
+ false))) {
retry = 1;
break;
}
@@ -3091,46 +3084,43 @@ readd:
break;
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
-lock_page:
+lock_folio:
#endif
- done_index = page->index;
+ done_index = folio->index;
retry_write:
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != mapping)) {
+ if (unlikely(folio->mapping != mapping)) {
continue_unlock:
- unlock_page(page);
+ folio_unlock(folio);
continue;
}
- if (!PageDirty(page)) {
+ if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
- if (PageWriteback(page)) {
- if (wbc->sync_mode != WB_SYNC_NONE)
- f2fs_wait_on_page_writeback(page,
- DATA, true, true);
- else
+ if (folio_test_writeback(folio)) {
+ if (wbc->sync_mode == WB_SYNC_NONE)
goto continue_unlock;
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
}
- if (!clear_page_dirty_for_io(page))
+ if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
- get_page(page);
- f2fs_compress_ctx_add_page(&cc, page);
+ folio_get(folio);
+ f2fs_compress_ctx_add_page(&cc, folio);
continue;
}
#endif
- ret = f2fs_write_single_data_page(page, &submitted,
- &bio, &last_block, wbc, io_type,
- 0, true);
- if (ret == AOP_WRITEPAGE_ACTIVATE)
- unlock_page(page);
+ submitted = 0;
+ ret = f2fs_write_single_data_page(folio,
+ &submitted, &bio, &last_block,
+ wbc, io_type, 0, true);
#ifdef CONFIG_F2FS_FS_COMPRESSION
result:
#endif
@@ -3142,20 +3132,19 @@ result:
* keep nr_to_write, since vfs uses this to
* get # of written pages.
*/
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
+ if (ret == 1) {
ret = 0;
goto next;
} else if (ret == -EAGAIN) {
ret = 0;
if (wbc->sync_mode == WB_SYNC_ALL) {
- cond_resched();
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
+ f2fs_schedule_timeout(
+ DEFAULT_SCHEDULE_TIMEOUT);
goto retry_write;
}
goto next;
}
- done_index = page->index + 1;
+ done_index = folio_next_index(folio);
done = 1;
break;
}
@@ -3169,7 +3158,7 @@ next:
if (need_readd)
goto readd;
}
- pagevec_release(&pvec);
+ release_pages(pages, nr_pages);
cond_resched();
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -3203,6 +3192,11 @@ next:
if (bio)
f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (pages != pages_local)
+ kfree(pages);
+#endif
+
return ret;
}
@@ -3210,7 +3204,7 @@ static inline bool __should_serialize_io(struct inode *inode,
struct writeback_control *wbc)
{
/* to avoid deadlock in path of data flush */
- if (F2FS_I(inode)->cp_task)
+ if (F2FS_I(inode)->wb_task)
return false;
if (!S_ISREG(inode->i_mode))
@@ -3227,6 +3221,19 @@ static inline bool __should_serialize_io(struct inode *inode,
return false;
}
+static inline void account_writeback(struct inode *inode, bool inc)
+{
+ if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ return;
+
+ f2fs_down_read(&F2FS_I(inode)->i_sem);
+ if (inc)
+ atomic_inc(&F2FS_I(inode)->writeback);
+ else
+ atomic_dec(&F2FS_I(inode)->writeback);
+ f2fs_up_read(&F2FS_I(inode)->i_sem);
+}
+
static int __f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc,
enum iostat_type io_type)
@@ -3237,10 +3244,6 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
int ret;
bool locked = false;
- /* deal with chardevs and other special file */
- if (!mapping->a_ops->writepage)
- return 0;
-
/* skip writing if there is no dirty page in this inode */
if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
return 0;
@@ -3255,8 +3258,8 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
f2fs_available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
- /* skip writing during file defragment */
- if (is_inode_flag_set(inode, FI_DO_DEFRAG))
+ /* skip writing in file defragment preparing stage */
+ if (is_inode_flag_set(inode, FI_SKIP_WRITES))
goto skip_write;
trace_f2fs_writepages(mapping->host, wbc, DATA);
@@ -3264,18 +3267,26 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
if (wbc->sync_mode == WB_SYNC_ALL)
atomic_inc(&sbi->wb_sync_req[DATA]);
- else if (atomic_read(&sbi->wb_sync_req[DATA]))
+ else if (atomic_read(&sbi->wb_sync_req[DATA])) {
+ /* to avoid potential deadlock */
+ if (current->plug)
+ blk_finish_plug(current->plug);
goto skip_write;
+ }
if (__should_serialize_io(inode, wbc)) {
mutex_lock(&sbi->writepages);
locked = true;
}
+ account_writeback(inode, true);
+
blk_start_plug(&plug);
ret = f2fs_write_cache_pages(mapping, wbc, io_type);
blk_finish_plug(&plug);
+ account_writeback(inode, false);
+
if (locked)
mutex_unlock(&sbi->writepages);
@@ -3305,7 +3316,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
FS_CP_DATA_IO : FS_DATA_IO);
}
-static void f2fs_write_failed(struct inode *inode, loff_t to)
+void f2fs_write_failed(struct inode *inode, loff_t to)
{
loff_t i_size = i_size_read(inode);
@@ -3314,135 +3325,230 @@ static void f2fs_write_failed(struct inode *inode, loff_t to)
/* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
if (to > i_size && !f2fs_verity_in_progress(inode)) {
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
truncate_pagecache(inode, i_size);
f2fs_truncate_blocks(inode, i_size, true);
filemap_invalidate_unlock(inode->i_mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
}
}
static int prepare_write_begin(struct f2fs_sb_info *sbi,
- struct page *page, loff_t pos, unsigned len,
+ struct folio *folio, loff_t pos, unsigned int len,
block_t *blk_addr, bool *node_changed)
{
- struct inode *inode = page->mapping->host;
- pgoff_t index = page->index;
+ struct inode *inode = folio->mapping->host;
+ pgoff_t index = folio->index;
struct dnode_of_data dn;
- struct page *ipage;
+ struct folio *ifolio;
bool locked = false;
- struct extent_info ei = {0, };
+ int flag = F2FS_GET_BLOCK_PRE_AIO;
int err = 0;
- int flag;
/*
- * we already allocated all the blocks, so we don't need to get
- * the block addresses when there is no need to fill the page.
+ * If a whole page is being written and we already preallocated all the
+ * blocks, then there is no need to get a block address now.
*/
- if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
- !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
- !f2fs_verity_in_progress(inode))
+ if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
return 0;
/* f2fs_lock_op avoids race between write CP and convert_inline_page */
- if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
- flag = F2FS_GET_BLOCK_DEFAULT;
- else
- flag = F2FS_GET_BLOCK_PRE_AIO;
-
- if (f2fs_has_inline_data(inode) ||
- (pos & PAGE_MASK) >= i_size_read(inode)) {
- f2fs_do_map_lock(sbi, flag, true);
+ if (f2fs_has_inline_data(inode)) {
+ if (pos + len > MAX_INLINE_DATA(inode))
+ flag = F2FS_GET_BLOCK_DEFAULT;
+ f2fs_map_lock(sbi, flag);
+ locked = true;
+ } else if ((pos & PAGE_MASK) >= i_size_read(inode)) {
+ f2fs_map_lock(sbi, flag);
locked = true;
}
restart:
/* check inline_data */
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio)) {
+ err = PTR_ERR(ifolio);
goto unlock_out;
}
- set_new_dnode(&dn, inode, ipage, ipage, 0);
+ set_new_dnode(&dn, inode, ifolio, ifolio, 0);
if (f2fs_has_inline_data(inode)) {
if (pos + len <= MAX_INLINE_DATA(inode)) {
- f2fs_do_read_inline_data(page, ipage);
+ f2fs_do_read_inline_data(folio, ifolio);
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
- set_page_private_inline(ipage);
- } else {
- err = f2fs_convert_inline_page(&dn, page);
- if (err)
- goto out;
- if (dn.data_blkaddr == NULL_ADDR)
- err = f2fs_get_block(&dn, index);
+ folio_set_f2fs_inline(ifolio);
+ goto out;
}
- } else if (locked) {
- err = f2fs_get_block(&dn, index);
- } else {
- if (f2fs_lookup_extent_cache(inode, index, &ei)) {
- dn.data_blkaddr = ei.blk + index - ei.fofs;
- } else {
- /* hole case */
- err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
- if (err || dn.data_blkaddr == NULL_ADDR) {
- f2fs_put_dnode(&dn);
- f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
- true);
- WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
- locked = true;
- goto restart;
- }
+ err = f2fs_convert_inline_folio(&dn, folio);
+ if (err || dn.data_blkaddr != NULL_ADDR)
+ goto out;
+ }
+
+ if (!f2fs_lookup_read_extent_cache_block(inode, index,
+ &dn.data_blkaddr)) {
+ if (IS_DEVICE_ALIASING(inode)) {
+ err = -ENODATA;
+ goto out;
+ }
+
+ if (locked) {
+ err = f2fs_reserve_block(&dn, index);
+ goto out;
}
+
+ /* hole case */
+ err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ if (!err && dn.data_blkaddr != NULL_ADDR)
+ goto out;
+ f2fs_put_dnode(&dn);
+ f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
+ WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
+ locked = true;
+ goto restart;
}
+out:
+ if (!err) {
+ /* convert_inline_page can make node_changed */
+ *blk_addr = dn.data_blkaddr;
+ *node_changed = dn.node_changed;
+ }
+ f2fs_put_dnode(&dn);
+unlock_out:
+ if (locked)
+ f2fs_map_unlock(sbi, flag);
+ return err;
+}
+
+static int __find_data_block(struct inode *inode, pgoff_t index,
+ block_t *blk_addr)
+{
+ struct dnode_of_data dn;
+ struct folio *ifolio;
+ int err = 0;
+
+ ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
+
+ set_new_dnode(&dn, inode, ifolio, ifolio, 0);
+
+ if (!f2fs_lookup_read_extent_cache_block(inode, index,
+ &dn.data_blkaddr)) {
+ /* hole case */
+ err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ if (err) {
+ dn.data_blkaddr = NULL_ADDR;
+ err = 0;
+ }
+ }
+ *blk_addr = dn.data_blkaddr;
+ f2fs_put_dnode(&dn);
+ return err;
+}
+
+static int __reserve_data_block(struct inode *inode, pgoff_t index,
+ block_t *blk_addr, bool *node_changed)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct dnode_of_data dn;
+ struct folio *ifolio;
+ int err = 0;
+
+ f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
+
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio)) {
+ err = PTR_ERR(ifolio);
+ goto unlock_out;
+ }
+ set_new_dnode(&dn, inode, ifolio, ifolio, 0);
+
+ if (!f2fs_lookup_read_extent_cache_block(dn.inode, index,
+ &dn.data_blkaddr))
+ err = f2fs_reserve_block(&dn, index);
- /* convert_inline_page can make node_changed */
*blk_addr = dn.data_blkaddr;
*node_changed = dn.node_changed;
-out:
f2fs_put_dnode(&dn);
+
unlock_out:
- if (locked)
- f2fs_do_map_lock(sbi, flag, false);
+ f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
return err;
}
-static int f2fs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
+ struct folio *folio, loff_t pos, unsigned int len,
+ block_t *blk_addr, bool *node_changed, bool *use_cow)
+{
+ struct inode *inode = folio->mapping->host;
+ struct inode *cow_inode = F2FS_I(inode)->cow_inode;
+ pgoff_t index = folio->index;
+ int err = 0;
+ block_t ori_blk_addr = NULL_ADDR;
+
+ /* If pos is beyond the end of file, reserve a new block in COW inode */
+ if ((pos & PAGE_MASK) >= i_size_read(inode))
+ goto reserve_block;
+
+ /* Look for the block in COW inode first */
+ err = __find_data_block(cow_inode, index, blk_addr);
+ if (err) {
+ return err;
+ } else if (*blk_addr != NULL_ADDR) {
+ *use_cow = true;
+ return 0;
+ }
+
+ if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE))
+ goto reserve_block;
+
+ /* Look for the block in the original inode */
+ err = __find_data_block(inode, index, &ori_blk_addr);
+ if (err)
+ return err;
+
+reserve_block:
+ /* Finally, we should reserve a new block in COW inode for the update */
+ err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
+ if (err)
+ return err;
+ inc_atomic_write_cnt(inode);
+
+ if (ori_blk_addr != NULL_ADDR)
+ *blk_addr = ori_blk_addr;
+ return 0;
+}
+
+static int f2fs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, struct folio **foliop,
+ void **fsdata)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *page = NULL;
- pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
- bool need_balance = false, drop_atomic = false;
+ struct folio *folio;
+ pgoff_t index = pos >> PAGE_SHIFT;
+ bool need_balance = false;
+ bool use_cow = false;
block_t blkaddr = NULL_ADDR;
int err = 0;
- trace_f2fs_write_begin(inode, pos, len, flags);
+ trace_f2fs_write_begin(inode, pos, len);
if (!f2fs_is_checkpoint_ready(sbi)) {
err = -ENOSPC;
goto fail;
}
- if ((f2fs_is_atomic_file(inode) &&
- !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
- is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
- err = -ENOMEM;
- drop_atomic = true;
- goto fail;
- }
-
/*
* We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be:
- * lock_page(page #0) -> lock_page(inode_page)
+ * folio_lock(folio #0) -> folio_lock(inode_page)
*/
if (index != 0) {
err = f2fs_convert_inline_inode(inode);
@@ -3453,18 +3559,20 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
int ret;
+ struct page *page;
*fsdata = NULL;
- if (len == PAGE_SIZE)
+ if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
goto repeat;
- ret = f2fs_prepare_compress_overwrite(inode, pagep,
+ ret = f2fs_prepare_compress_overwrite(inode, &page,
index, fsdata);
if (ret < 0) {
err = ret;
goto fail;
} else if (ret) {
+ *foliop = page_folio(page);
return 0;
}
}
@@ -3472,87 +3580,93 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
repeat:
/*
- * Do not use grab_cache_page_write_begin() to avoid deadlock due to
- * wait_for_stable_page. Will wait that below with our IO control.
+ * Do not use FGP_STABLE to avoid deadlock.
+ * Will wait that below with our IO control.
*/
- page = f2fs_pagecache_get_page(mapping, index,
- FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
- if (!page) {
- err = -ENOMEM;
+ folio = f2fs_filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_NOFS,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto fail;
}
/* TODO: cluster can be compressed due to race with .writepage */
- *pagep = page;
+ *foliop = folio;
- err = prepare_write_begin(sbi, page, pos, len,
+ if (f2fs_is_atomic_file(inode))
+ err = prepare_atomic_write_begin(sbi, folio, pos, len,
+ &blkaddr, &need_balance, &use_cow);
+ else
+ err = prepare_write_begin(sbi, folio, pos, len,
&blkaddr, &need_balance);
if (err)
- goto fail;
+ goto put_folio;
if (need_balance && !IS_NOQUOTA(inode) &&
has_not_enough_free_secs(sbi, 0, 0)) {
- unlock_page(page);
+ folio_unlock(folio);
f2fs_balance_fs(sbi, true);
- lock_page(page);
- if (page->mapping != mapping) {
- /* The page got truncated from under us */
- f2fs_put_page(page, 1);
+ folio_lock(folio);
+ if (folio->mapping != mapping) {
+ /* The folio got truncated from under us */
+ folio_unlock(folio);
+ folio_put(folio);
goto repeat;
}
}
- f2fs_wait_on_page_writeback(page, DATA, false, true);
+ f2fs_folio_wait_writeback(folio, DATA, false, true);
- if (len == PAGE_SIZE || PageUptodate(page))
+ if (len == folio_size(folio) || folio_test_uptodate(folio))
return 0;
if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
!f2fs_verity_in_progress(inode)) {
- zero_user_segment(page, len, PAGE_SIZE);
+ folio_zero_segment(folio, len, folio_size(folio));
return 0;
}
if (blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
+ folio_zero_segment(folio, 0, folio_size(folio));
+ folio_mark_uptodate(folio);
} else {
if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
DATA_GENERIC_ENHANCE_READ)) {
err = -EFSCORRUPTED;
- goto fail;
+ goto put_folio;
}
- err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
- if (err)
- goto fail;
-
- lock_page(page);
- if (unlikely(page->mapping != mapping)) {
- f2fs_put_page(page, 1);
+ f2fs_submit_page_read(use_cow ?
+ F2FS_I(inode)->cow_inode : inode,
+ folio, blkaddr, 0, true);
+
+ folio_lock(folio);
+ if (unlikely(folio->mapping != mapping)) {
+ folio_unlock(folio);
+ folio_put(folio);
goto repeat;
}
- if (unlikely(!PageUptodate(page))) {
+ if (unlikely(!folio_test_uptodate(folio))) {
err = -EIO;
- goto fail;
+ goto put_folio;
}
}
return 0;
+put_folio:
+ f2fs_folio_put(folio, true);
fail:
- f2fs_put_page(page, 1);
f2fs_write_failed(inode, pos + len);
- if (drop_atomic)
- f2fs_drop_inmem_pages_all(sbi, false);
return err;
}
-static int f2fs_write_end(struct file *file,
+static int f2fs_write_end(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
trace_f2fs_write_end(inode, pos, len, copied);
@@ -3561,17 +3675,17 @@ static int f2fs_write_end(struct file *file,
* should be PAGE_SIZE. Otherwise, we treat it with zero copied and
* let generic_perform_write() try to copy data again through copied=0.
*/
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
if (unlikely(copied != len))
copied = 0;
else
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
/* overwrite compressed file */
if (f2fs_compressed_file(inode) && fsdata) {
- f2fs_compress_write_end(inode, fsdata, page->index, copied);
+ f2fs_compress_write_end(inode, fsdata, folio->index, copied);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
if (pos + copied > i_size_read(inode) &&
@@ -3584,180 +3698,34 @@ static int f2fs_write_end(struct file *file,
if (!copied)
goto unlock_out;
- set_page_dirty(page);
+ folio_mark_dirty(folio);
+
+ if (f2fs_is_atomic_file(inode))
+ folio_set_f2fs_atomic(folio);
if (pos + copied > i_size_read(inode) &&
- !f2fs_verity_in_progress(inode))
+ !f2fs_verity_in_progress(inode)) {
f2fs_i_size_write(inode, pos + copied);
+ if (f2fs_is_atomic_file(inode))
+ f2fs_i_size_write(F2FS_I(inode)->cow_inode,
+ pos + copied);
+ }
unlock_out:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied;
}
-static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
- loff_t offset)
-{
- unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
- unsigned blkbits = i_blkbits;
- unsigned blocksize_mask = (1 << blkbits) - 1;
- unsigned long align = offset | iov_iter_alignment(iter);
- struct block_device *bdev = inode->i_sb->s_bdev;
-
- if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
- return 1;
-
- if (align & blocksize_mask) {
- if (bdev)
- blkbits = blksize_bits(bdev_logical_block_size(bdev));
- blocksize_mask = (1 << blkbits) - 1;
- if (align & blocksize_mask)
- return -EINVAL;
- return 1;
- }
- return 0;
-}
-
-static void f2fs_dio_end_io(struct bio *bio)
+void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
- struct f2fs_private_dio *dio = bio->bi_private;
-
- dec_page_count(F2FS_I_SB(dio->inode),
- dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
-
- bio->bi_private = dio->orig_private;
- bio->bi_end_io = dio->orig_end_io;
-
- kfree(dio);
-
- bio_endio(bio);
-}
-
-static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
- loff_t file_offset)
-{
- struct f2fs_private_dio *dio;
- bool write = (bio_op(bio) == REQ_OP_WRITE);
-
- dio = f2fs_kzalloc(F2FS_I_SB(inode),
- sizeof(struct f2fs_private_dio), GFP_NOFS);
- if (!dio)
- goto out;
-
- dio->inode = inode;
- dio->orig_end_io = bio->bi_end_io;
- dio->orig_private = bio->bi_private;
- dio->write = write;
-
- bio->bi_end_io = f2fs_dio_end_io;
- bio->bi_private = dio;
-
- inc_page_count(F2FS_I_SB(inode),
- write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
-
- submit_bio(bio);
- return;
-out:
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
-}
-
-static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct address_space *mapping = iocb->ki_filp->f_mapping;
- struct inode *inode = mapping->host;
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_inode_info *fi = F2FS_I(inode);
- size_t count = iov_iter_count(iter);
- loff_t offset = iocb->ki_pos;
- int rw = iov_iter_rw(iter);
- int err;
- enum rw_hint hint = iocb->ki_hint;
- int whint_mode = F2FS_OPTION(sbi).whint_mode;
- bool do_opu;
-
- err = check_direct_IO(inode, iter, offset);
- if (err)
- return err < 0 ? err : 0;
-
- if (f2fs_force_buffered_io(inode, iocb, iter))
- return 0;
-
- do_opu = rw == WRITE && f2fs_lfs_mode(sbi);
-
- trace_f2fs_direct_IO_enter(inode, offset, count, rw);
-
- if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
- iocb->ki_hint = WRITE_LIFE_NOT_SET;
-
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
- iocb->ki_hint = hint;
- err = -EAGAIN;
- goto out;
- }
- if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
- up_read(&fi->i_gc_rwsem[rw]);
- iocb->ki_hint = hint;
- err = -EAGAIN;
- goto out;
- }
- } else {
- down_read(&fi->i_gc_rwsem[rw]);
- if (do_opu)
- down_read(&fi->i_gc_rwsem[READ]);
- }
-
- err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
- iter, rw == WRITE ? get_data_block_dio_write :
- get_data_block_dio, NULL, f2fs_dio_submit_bio,
- rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
- DIO_SKIP_HOLES);
-
- if (do_opu)
- up_read(&fi->i_gc_rwsem[READ]);
-
- up_read(&fi->i_gc_rwsem[rw]);
-
- if (rw == WRITE) {
- if (whint_mode == WHINT_MODE_OFF)
- iocb->ki_hint = hint;
- if (err > 0) {
- f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
- err);
- if (!do_opu)
- set_inode_flag(inode, FI_UPDATE_WRITE);
- } else if (err == -EIOCBQUEUED) {
- f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
- count - iov_iter_count(iter));
- } else if (err < 0) {
- f2fs_write_failed(inode, offset + count);
- }
- } else {
- if (err > 0)
- f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
- else if (err == -EIOCBQUEUED)
- f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
- count - iov_iter_count(iter));
- }
-
-out:
- trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
-
- return err;
-}
-
-void f2fs_invalidate_page(struct page *page, unsigned int offset,
- unsigned int length)
-{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
- (offset % PAGE_SIZE || length != PAGE_SIZE))
+ (offset || length != folio_size(folio)))
return;
- if (PageDirty(page)) {
+ if (folio_test_dirty(folio)) {
if (inode->i_ino == F2FS_META_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_META);
} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
@@ -3767,79 +3735,35 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
f2fs_remove_dirty_inode(inode);
}
}
-
- clear_page_private_gcing(page);
-
- if (test_opt(sbi, COMPRESS_CACHE)) {
- if (f2fs_compressed_file(inode))
- f2fs_invalidate_compress_pages(sbi, inode->i_ino);
- if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
- clear_page_private_data(page);
- }
-
- if (page_private_atomic(page))
- return f2fs_drop_inmem_page(inode, page);
-
- detach_page_private(page);
- set_page_private(page, 0);
+ folio_detach_private(folio);
}
-int f2fs_release_page(struct page *page, gfp_t wait)
+bool f2fs_release_folio(struct folio *folio, gfp_t wait)
{
- /* If this is dirty page, keep PagePrivate */
- if (PageDirty(page))
- return 0;
-
- /* This is atomic written page, keep Private */
- if (page_private_atomic(page))
- return 0;
-
- if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
- struct f2fs_sb_info *sbi = F2FS_P_SB(page);
- struct inode *inode = page->mapping->host;
-
- if (f2fs_compressed_file(inode))
- f2fs_invalidate_compress_pages(sbi, inode->i_ino);
- if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
- clear_page_private_data(page);
- }
-
- clear_page_private_gcing(page);
+ /* If this is dirty folio, keep private data */
+ if (folio_test_dirty(folio))
+ return false;
- detach_page_private(page);
- set_page_private(page, 0);
- return 1;
+ folio_detach_private(folio);
+ return true;
}
-static int f2fs_set_data_page_dirty(struct page *page)
+static bool f2fs_dirty_data_folio(struct address_space *mapping,
+ struct folio *folio)
{
- struct inode *inode = page_file_mapping(page)->host;
-
- trace_f2fs_set_page_dirty(page, DATA);
+ struct inode *inode = mapping->host;
- if (!PageUptodate(page))
- SetPageUptodate(page);
- if (PageSwapCache(page))
- return __set_page_dirty_nobuffers(page);
+ trace_f2fs_set_page_dirty(folio, DATA);
- if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
- if (!page_private_atomic(page)) {
- f2fs_register_inmem_page(inode, page);
- return 1;
- }
- /*
- * Previously, this page has been registered, we just
- * return here.
- */
- return 0;
- }
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ BUG_ON(folio_test_swapcache(folio));
- if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
- f2fs_update_dirty_page(inode, page);
- return 1;
+ if (filemap_dirty_folio(mapping, folio)) {
+ f2fs_update_dirty_folio(inode, folio);
+ return true;
}
- return 0;
+ return false;
}
@@ -3899,7 +3823,7 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
map.m_next_pgofs = NULL;
map.m_seg_type = NO_CHECK_TYPE;
- if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
+ if (!f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_BMAP))
blknr = map.m_pblk;
}
out:
@@ -3907,70 +3831,6 @@ out:
return blknr;
}
-#ifdef CONFIG_MIGRATION
-#include <linux/migrate.h>
-
-int f2fs_migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page, enum migrate_mode mode)
-{
- int rc, extra_count;
- struct f2fs_inode_info *fi = F2FS_I(mapping->host);
- bool atomic_written = page_private_atomic(page);
-
- BUG_ON(PageWriteback(page));
-
- /* migrating an atomic written page is safe with the inmem_lock hold */
- if (atomic_written) {
- if (mode != MIGRATE_SYNC)
- return -EBUSY;
- if (!mutex_trylock(&fi->inmem_lock))
- return -EAGAIN;
- }
-
- /* one extra reference was held for atomic_write page */
- extra_count = atomic_written ? 1 : 0;
- rc = migrate_page_move_mapping(mapping, newpage,
- page, extra_count);
- if (rc != MIGRATEPAGE_SUCCESS) {
- if (atomic_written)
- mutex_unlock(&fi->inmem_lock);
- return rc;
- }
-
- if (atomic_written) {
- struct inmem_pages *cur;
-
- list_for_each_entry(cur, &fi->inmem_pages, list)
- if (cur->page == page) {
- cur->page = newpage;
- break;
- }
- mutex_unlock(&fi->inmem_lock);
- put_page(page);
- get_page(newpage);
- }
-
- /* guarantee to start from no stale private field */
- set_page_private(newpage, 0);
- if (PagePrivate(page)) {
- set_page_private(newpage, page_private(page));
- SetPagePrivate(newpage);
- get_page(newpage);
-
- set_page_private(page, 0);
- ClearPagePrivate(page);
- put_page(page);
- }
-
- if (mode != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
- else
- migrate_page_states(newpage, page);
-
- return MIGRATEPAGE_SUCCESS;
-}
-#endif
-
#ifdef CONFIG_SWAP
static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
unsigned int blkcnt)
@@ -3978,55 +3838,67 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int blkofs;
unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
+ unsigned int end_blk = start_blk + blkcnt - 1;
unsigned int secidx = start_blk / blk_per_sec;
- unsigned int end_sec = secidx + blkcnt / blk_per_sec;
+ unsigned int end_sec;
int ret = 0;
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ if (!blkcnt)
+ return 0;
+ end_sec = end_blk / blk_per_sec;
+
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
set_inode_flag(inode, FI_ALIGNED_WRITE);
+ set_inode_flag(inode, FI_OPU_WRITE);
- for (; secidx < end_sec; secidx++) {
- down_write(&sbi->pin_sem);
+ for (; secidx <= end_sec; secidx++) {
+ unsigned int blkofs_end = secidx == end_sec ?
+ end_blk % blk_per_sec : blk_per_sec - 1;
- f2fs_lock_op(sbi);
- f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
- f2fs_unlock_op(sbi);
+ f2fs_down_write(&sbi->pin_sem);
- set_inode_flag(inode, FI_DO_DEFRAG);
+ ret = f2fs_allocate_pinning_section(sbi);
+ if (ret) {
+ f2fs_up_write(&sbi->pin_sem);
+ break;
+ }
+
+ set_inode_flag(inode, FI_SKIP_WRITES);
- for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
- struct page *page;
+ for (blkofs = 0; blkofs <= blkofs_end; blkofs++) {
+ struct folio *folio;
unsigned int blkidx = secidx * blk_per_sec + blkofs;
- page = f2fs_get_lock_data_page(inode, blkidx, true);
- if (IS_ERR(page)) {
- up_write(&sbi->pin_sem);
- ret = PTR_ERR(page);
+ folio = f2fs_get_lock_data_folio(inode, blkidx, true);
+ if (IS_ERR(folio)) {
+ f2fs_up_write(&sbi->pin_sem);
+ ret = PTR_ERR(folio);
goto done;
}
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
- clear_inode_flag(inode, FI_DO_DEFRAG);
+ clear_inode_flag(inode, FI_SKIP_WRITES);
ret = filemap_fdatawrite(inode->i_mapping);
- up_write(&sbi->pin_sem);
+ f2fs_up_write(&sbi->pin_sem);
if (ret)
break;
}
done:
- clear_inode_flag(inode, FI_DO_DEFRAG);
+ clear_inode_flag(inode, FI_SKIP_WRITES);
+ clear_inode_flag(inode, FI_OPU_WRITE);
clear_inode_flag(inode, FI_ALIGNED_WRITE);
filemap_invalidate_unlock(inode->i_mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret;
}
@@ -4037,15 +3909,14 @@ static int check_swap_activate(struct swap_info_struct *sis,
struct address_space *mapping = swap_file->f_mapping;
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- sector_t cur_lblock;
- sector_t last_lblock;
- sector_t pblock;
- sector_t lowest_pblock = -1;
- sector_t highest_pblock = 0;
+ block_t cur_lblock;
+ block_t last_lblock;
+ block_t pblock;
+ block_t lowest_pblock = -1;
+ block_t highest_pblock = 0;
int nr_extents = 0;
- unsigned long nr_pblocks;
+ unsigned int nr_pblocks;
unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
- unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
unsigned int not_aligned = 0;
int ret = 0;
@@ -4054,7 +3925,7 @@ static int check_swap_activate(struct swap_info_struct *sis,
* to be very smart.
*/
cur_lblock = 0;
- last_lblock = bytes_to_blks(inode, i_size_read(inode));
+ last_lblock = F2FS_BYTES_TO_BLK(i_size_read(inode));
while (cur_lblock < last_lblock && cur_lblock < sis->max) {
struct f2fs_map_blocks map;
@@ -4069,7 +3940,7 @@ retry:
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
- ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
+ ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
if (ret)
goto out;
@@ -4083,28 +3954,35 @@ retry:
pblock = map.m_pblk;
nr_pblocks = map.m_len;
- if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
- nr_pblocks & sec_blks_mask) {
+ if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec ||
+ nr_pblocks % blks_per_sec ||
+ f2fs_is_sequential_zone_area(sbi, pblock)) {
+ bool last_extent = false;
+
not_aligned++;
nr_pblocks = roundup(nr_pblocks, blks_per_sec);
if (cur_lblock + nr_pblocks > sis->max)
nr_pblocks -= blks_per_sec;
+ /* this extent is last one */
if (!nr_pblocks) {
- /* this extent is last one */
- nr_pblocks = map.m_len;
- f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
- goto next;
+ nr_pblocks = last_lblock - cur_lblock;
+ last_extent = true;
}
ret = f2fs_migrate_blocks(inode, cur_lblock,
nr_pblocks);
- if (ret)
+ if (ret) {
+ if (ret == -ENOENT)
+ ret = -EINVAL;
goto out;
- goto retry;
+ }
+
+ if (!last_extent)
+ goto retry;
}
-next:
+
if (cur_lblock + nr_pblocks >= sis->max)
nr_pblocks = sis->max - cur_lblock;
@@ -4130,10 +4008,9 @@ next:
cur_lblock = 1; /* force Empty message */
sis->max = cur_lblock;
sis->pages = cur_lblock - 1;
- sis->highest_bit = cur_lblock - 1;
out:
if (not_aligned)
- f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
+ f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)",
not_aligned, blks_per_sec * F2FS_BLKSIZE);
return ret;
}
@@ -4142,17 +4019,17 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
sector_t *span)
{
struct inode *inode = file_inode(file);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int ret;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
- if (f2fs_readonly(F2FS_I_SB(inode)->sb))
+ if (f2fs_readonly(sbi->sb))
return -EROFS;
- if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
- f2fs_err(F2FS_I_SB(inode),
- "Swapfile not supported in LFS mode");
+ if (f2fs_lfs_mode(sbi) && !f2fs_sb_has_blkzoned(sbi)) {
+ f2fs_err(sbi, "Swapfile not supported in LFS mode");
return -EINVAL;
}
@@ -4163,14 +4040,19 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (!f2fs_disable_compressed_file(inode))
return -EINVAL;
+ ret = filemap_fdatawrite(inode->i_mapping);
+ if (ret < 0)
+ return ret;
+
f2fs_precache_extents(inode);
ret = check_swap_activate(sis, file, span);
if (ret < 0)
return ret;
+ stat_inc_swapfile_inode(inode);
set_inode_flag(inode, FI_PIN_FILE);
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+ f2fs_update_time(sbi, REQ_TIME);
return ret;
}
@@ -4178,6 +4060,7 @@ static void f2fs_swap_deactivate(struct file *file)
{
struct inode *inode = file_inode(file);
+ stat_dec_swapfile_inode(inode);
clear_inode_flag(inode, FI_PIN_FILE);
}
#else
@@ -4193,31 +4076,27 @@ static void f2fs_swap_deactivate(struct file *file)
#endif
const struct address_space_operations f2fs_dblock_aops = {
- .readpage = f2fs_read_data_page,
+ .read_folio = f2fs_read_data_folio,
.readahead = f2fs_readahead,
- .writepage = f2fs_write_data_page,
.writepages = f2fs_write_data_pages,
.write_begin = f2fs_write_begin,
.write_end = f2fs_write_end,
- .set_page_dirty = f2fs_set_data_page_dirty,
- .invalidatepage = f2fs_invalidate_page,
- .releasepage = f2fs_release_page,
- .direct_IO = f2fs_direct_IO,
+ .dirty_folio = f2fs_dirty_data_folio,
+ .migrate_folio = filemap_migrate_folio,
+ .invalidate_folio = f2fs_invalidate_folio,
+ .release_folio = f2fs_release_folio,
.bmap = f2fs_bmap,
.swap_activate = f2fs_swap_activate,
.swap_deactivate = f2fs_swap_deactivate,
-#ifdef CONFIG_MIGRATION
- .migratepage = f2fs_migrate_page,
-#endif
};
-void f2fs_clear_page_cache_dirty_tag(struct page *page)
+void f2fs_clear_page_cache_dirty_tag(struct folio *folio)
{
- struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping = folio->mapping;
unsigned long flags;
xa_lock_irqsave(&mapping->i_pages, flags);
- __xa_clear_mark(&mapping->i_pages, page_index(page),
+ __xa_clear_mark(&mapping->i_pages, folio->index,
PAGECACHE_TAG_DIRTY);
xa_unlock_irqrestore(&mapping->i_pages, flags);
}
@@ -4258,9 +4137,7 @@ int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
WQ_UNBOUND | WQ_HIGHPRI,
num_online_cpus());
- if (!sbi->post_read_wq)
- return -ENOMEM;
- return 0;
+ return sbi->post_read_wq ? 0 : -ENOMEM;
}
void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
@@ -4273,12 +4150,98 @@ int __init f2fs_init_bio_entry_cache(void)
{
bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
sizeof(struct bio_entry));
- if (!bio_entry_slab)
- return -ENOMEM;
- return 0;
+ return bio_entry_slab ? 0 : -ENOMEM;
}
void f2fs_destroy_bio_entry_cache(void)
{
kmem_cache_destroy(bio_entry_slab);
}
+
+static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ unsigned int flags, struct iomap *iomap,
+ struct iomap *srcmap)
+{
+ struct f2fs_map_blocks map = { NULL, };
+ pgoff_t next_pgofs = 0;
+ int err;
+
+ map.m_lblk = F2FS_BYTES_TO_BLK(offset);
+ map.m_len = F2FS_BYTES_TO_BLK(offset + length - 1) - map.m_lblk + 1;
+ map.m_next_pgofs = &next_pgofs;
+ map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
+ inode->i_write_hint);
+ if (flags & IOMAP_WRITE && iomap->private) {
+ map.m_last_pblk = (unsigned long)iomap->private;
+ iomap->private = NULL;
+ }
+
+ /*
+ * If the blocks being overwritten are already allocated,
+ * f2fs_map_lock and f2fs_balance_fs are not necessary.
+ */
+ if ((flags & IOMAP_WRITE) &&
+ !f2fs_overwrite_io(inode, offset, length))
+ map.m_may_create = true;
+
+ err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
+ if (err)
+ return err;
+
+ iomap->offset = F2FS_BLK_TO_BYTES(map.m_lblk);
+
+ /*
+ * When inline encryption is enabled, sometimes I/O to an encrypted file
+ * has to be broken up to guarantee DUN contiguity. Handle this by
+ * limiting the length of the mapping returned.
+ */
+ map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
+
+ /*
+ * We should never see delalloc or compressed extents here based on
+ * prior flushing and checks.
+ */
+ if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
+ return -EINVAL;
+
+ if (map.m_flags & F2FS_MAP_MAPPED) {
+ if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR))
+ return -EINVAL;
+
+ iomap->length = F2FS_BLK_TO_BYTES(map.m_len);
+ iomap->type = IOMAP_MAPPED;
+ iomap->flags |= IOMAP_F_MERGED;
+ iomap->bdev = map.m_bdev;
+ iomap->addr = F2FS_BLK_TO_BYTES(map.m_pblk);
+
+ if (flags & IOMAP_WRITE && map.m_last_pblk)
+ iomap->private = (void *)map.m_last_pblk;
+ } else {
+ if (flags & IOMAP_WRITE)
+ return -ENOTBLK;
+
+ if (map.m_pblk == NULL_ADDR) {
+ iomap->length = F2FS_BLK_TO_BYTES(next_pgofs) -
+ iomap->offset;
+ iomap->type = IOMAP_HOLE;
+ } else if (map.m_pblk == NEW_ADDR) {
+ iomap->length = F2FS_BLK_TO_BYTES(map.m_len);
+ iomap->type = IOMAP_UNWRITTEN;
+ } else {
+ f2fs_bug_on(F2FS_I_SB(inode), 1);
+ }
+ iomap->addr = IOMAP_NULL_ADDR;
+ }
+
+ if (map.m_flags & F2FS_MAP_NEW)
+ iomap->flags |= IOMAP_F_NEW;
+ if ((inode_state_read_once(inode) & I_DIRTY_DATASYNC) ||
+ offset + length > i_size_read(inode))
+ iomap->flags |= IOMAP_F_DIRTY;
+
+ return 0;
+}
+
+const struct iomap_ops f2fs_iomap_ops = {
+ .iomap_begin = f2fs_iomap_begin,
+};
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 8c50518475a9..032683835569 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -21,7 +21,7 @@
#include "gc.h"
static LIST_HEAD(f2fs_stat_list);
-static DEFINE_MUTEX(f2fs_stat_mutex);
+static DEFINE_SPINLOCK(f2fs_stat_lock);
#ifdef CONFIG_DEBUG_FS
static struct dentry *f2fs_debugfs_root;
#endif
@@ -39,9 +39,9 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi)
bimodal = 0;
total_vblocks = 0;
- blks_per_sec = BLKS_PER_SEC(sbi);
+ blks_per_sec = CAP_BLKS_PER_SEC(sbi);
hblks_per_sec = blks_per_sec / 2;
- for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
+ for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
vblocks = get_valid_blocks(sbi, segno, true);
dist = abs(vblocks - hblks_per_sec);
bimodal += dist * dist;
@@ -60,6 +60,70 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi)
}
#ifdef CONFIG_DEBUG_FS
+static void update_multidevice_stats(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_stat_info *si = F2FS_STAT(sbi);
+ struct f2fs_dev_stats *dev_stats = si->dev_stats;
+ int i, j;
+
+ if (!f2fs_is_multi_device(sbi))
+ return;
+
+ memset(dev_stats, 0, sizeof(struct f2fs_dev_stats) * sbi->s_ndevs);
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ unsigned int start_segno, end_segno;
+ block_t start_blk, end_blk;
+
+ if (i == 0) {
+ start_blk = MAIN_BLKADDR(sbi);
+ end_blk = FDEV(i).end_blk + 1 - SEG0_BLKADDR(sbi);
+ } else {
+ start_blk = FDEV(i).start_blk;
+ end_blk = FDEV(i).end_blk + 1;
+ }
+
+ start_segno = GET_SEGNO(sbi, start_blk);
+ end_segno = GET_SEGNO(sbi, end_blk);
+
+ for (j = start_segno; j < end_segno; j++) {
+ unsigned int seg_blks, sec_blks;
+
+ seg_blks = get_seg_entry(sbi, j)->valid_blocks;
+
+ /* update segment stats */
+ if (is_curseg(sbi, j))
+ dev_stats[i].devstats[0][DEVSTAT_INUSE]++;
+ else if (seg_blks == BLKS_PER_SEG(sbi))
+ dev_stats[i].devstats[0][DEVSTAT_FULL]++;
+ else if (seg_blks != 0)
+ dev_stats[i].devstats[0][DEVSTAT_DIRTY]++;
+ else if (!test_bit(j, FREE_I(sbi)->free_segmap))
+ dev_stats[i].devstats[0][DEVSTAT_FREE]++;
+ else
+ dev_stats[i].devstats[0][DEVSTAT_PREFREE]++;
+
+ if (!__is_large_section(sbi) ||
+ (j % SEGS_PER_SEC(sbi)) != 0)
+ continue;
+
+ sec_blks = get_sec_entry(sbi, j)->valid_blocks;
+
+ /* update section stats */
+ if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, j)))
+ dev_stats[i].devstats[1][DEVSTAT_INUSE]++;
+ else if (sec_blks == BLKS_PER_SEC(sbi))
+ dev_stats[i].devstats[1][DEVSTAT_FULL]++;
+ else if (sec_blks != 0)
+ dev_stats[i].devstats[1][DEVSTAT_DIRTY]++;
+ else if (!test_bit(GET_SEC_FROM_SEG(sbi, j),
+ FREE_I(sbi)->free_secmap))
+ dev_stats[i].devstats[1][DEVSTAT_FREE]++;
+ else
+ dev_stats[i].devstats[1][DEVSTAT_PREFREE]++;
+ }
+ }
+}
+
static void update_general_status(struct f2fs_sb_info *sbi)
{
struct f2fs_stat_info *si = F2FS_STAT(sbi);
@@ -72,15 +136,26 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->main_area_zones = si->main_area_sections /
le32_to_cpu(raw_super->secs_per_zone);
- /* validation check of the segment numbers */
+ /* general extent cache stats */
+ for (i = 0; i < NR_EXTENT_CACHES; i++) {
+ struct extent_tree_info *eti = &sbi->extent_tree[i];
+
+ si->hit_cached[i] = atomic64_read(&sbi->read_hit_cached[i]);
+ si->hit_rbtree[i] = atomic64_read(&sbi->read_hit_rbtree[i]);
+ si->total_ext[i] = atomic64_read(&sbi->total_hit_ext[i]);
+ si->hit_total[i] = si->hit_cached[i] + si->hit_rbtree[i];
+ si->ext_tree[i] = atomic_read(&eti->total_ext_tree);
+ si->zombie_tree[i] = atomic_read(&eti->total_zombie_tree);
+ si->ext_node[i] = atomic_read(&eti->total_ext_node);
+ }
+ /* read extent_cache only */
si->hit_largest = atomic64_read(&sbi->read_hit_largest);
- si->hit_cached = atomic64_read(&sbi->read_hit_cached);
- si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
- si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
- si->total_ext = atomic64_read(&sbi->total_hit_ext);
- si->ext_tree = atomic_read(&sbi->total_ext_tree);
- si->zombie_tree = atomic_read(&sbi->total_zombie_tree);
- si->ext_node = atomic_read(&sbi->total_ext_node);
+ si->hit_total[EX_READ] += si->hit_largest;
+
+ /* block age extent_cache only */
+ si->allocated_data_blocks = atomic64_read(&sbi->allocated_data_blocks);
+
+ /* validation check of the segment numbers */
si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
@@ -89,13 +164,11 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
+ si->ndonate_files = sbi->donate_files;
si->nquota_files = sbi->nquota_files;
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
- si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
- si->aw_cnt = sbi->atomic_files;
- si->vw_cnt = atomic_read(&sbi->vw_cnt);
+ si->aw_cnt = atomic_read(&sbi->atomic_files);
si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
- si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
si->nr_dio_read = get_pages(sbi, F2FS_DIO_READ);
si->nr_dio_write = get_pages(sbi, F2FS_DIO_WRITE);
si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
@@ -127,7 +200,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->cur_ckpt_time = sbi->cprc_info.cur_time;
si->peak_ckpt_time = sbi->cprc_info.peak_time;
spin_unlock(&sbi->cprc_info.stat_lock);
- si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
+ si->total_count = BLKS_TO_SEGS(sbi, (int)sbi->user_block_count);
si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi);
si->valid_count = valid_user_blocks(sbi);
@@ -138,6 +211,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->inline_inode = atomic_read(&sbi->inline_inode);
si->inline_dir = atomic_read(&sbi->inline_dir);
si->compr_inode = atomic_read(&sbi->compr_inode);
+ si->swapfile_inode = atomic_read(&sbi->swapfile_inode);
si->compr_blocks = atomic64_read(&sbi->compr_blocks);
si->append = sbi->im[APPEND_INO].ino_num;
si->update = sbi->im[UPDATE_INO].ino_num;
@@ -167,19 +241,17 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
si->io_skip_bggc = sbi->io_skip_bggc;
si->other_skip_bggc = sbi->other_skip_bggc;
- si->skipped_atomic_files[BG_GC] = sbi->skipped_atomic_files[BG_GC];
- si->skipped_atomic_files[FG_GC] = sbi->skipped_atomic_files[FG_GC];
- si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
+ si->util_free = (int)(BLKS_TO_SEGS(sbi, free_user_blocks(sbi)))
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
/ 2;
- si->util_valid = (int)(written_block_count(sbi) >>
- sbi->log_blocks_per_seg)
+ si->util_valid = (int)(BLKS_TO_SEGS(sbi, written_block_count(sbi)))
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
/ 2;
si->util_invalid = 50 - si->util_free - si->util_valid;
for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
struct curseg_info *curseg = CURSEG_I(sbi, i);
+ si->blkoff[i] = curseg->next_blkoff;
si->curseg[i] = curseg->segno;
si->cursec[i] = GET_SEC_FROM_SEG(sbi, curseg->segno);
si->curzone[i] = GET_ZONE_FROM_SEC(sbi, si->cursec[i]);
@@ -201,13 +273,18 @@ static void update_general_status(struct f2fs_sb_info *sbi)
if (!blks)
continue;
- if (blks == sbi->blocks_per_seg)
+ if (blks == BLKS_PER_SEG(sbi))
si->full_seg[type]++;
else
si->dirty_seg[type]++;
si->valid_blks[type] += blks;
}
+ update_multidevice_stats(sbi);
+
+ for (i = 0; i < MAX_CALL_TYPE; i++)
+ si->cp_call_count[i] = atomic_read(&sbi->cp_call_count[i]);
+
for (i = 0; i < 2; i++) {
si->segment_count[i] = sbi->segment_count[i];
si->block_count[i] = sbi->block_count[i];
@@ -266,7 +343,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
/* build nm */
si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
- si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
+ si->base_mem += F2FS_BLK_TO_BYTES(NM_I(sbi)->nat_bits_blocks);
si->base_mem += NM_I(sbi)->nat_blocks *
f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
si->base_mem += NM_I(sbi)->nat_blocks / 8;
@@ -296,48 +373,67 @@ get_cache:
sizeof(struct nat_entry);
si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] *
sizeof(struct nat_entry_set);
- si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
for (i = 0; i < MAX_INO_ENTRY; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
- si->cache_mem += atomic_read(&sbi->total_ext_tree) *
+
+ for (i = 0; i < NR_EXTENT_CACHES; i++) {
+ struct extent_tree_info *eti = &sbi->extent_tree[i];
+
+ si->ext_mem[i] = atomic_read(&eti->total_ext_tree) *
sizeof(struct extent_tree);
- si->cache_mem += atomic_read(&sbi->total_ext_node) *
+ si->ext_mem[i] += atomic_read(&eti->total_ext_node) *
sizeof(struct extent_node);
+ si->cache_mem += si->ext_mem[i];
+ }
si->page_mem = 0;
if (sbi->node_inode) {
- unsigned npages = NODE_MAPPING(sbi)->nrpages;
+ unsigned long npages = NODE_MAPPING(sbi)->nrpages;
si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
}
if (sbi->meta_inode) {
- unsigned npages = META_MAPPING(sbi)->nrpages;
+ unsigned long npages = META_MAPPING(sbi)->nrpages;
si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (sbi->compress_inode) {
- unsigned npages = COMPRESS_MAPPING(sbi)->nrpages;
+ unsigned long npages = COMPRESS_MAPPING(sbi)->nrpages;
+
si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
}
#endif
}
-static char *s_flag[] = {
- [SBI_IS_DIRTY] = " fs_dirty",
- [SBI_IS_CLOSE] = " closing",
- [SBI_NEED_FSCK] = " need_fsck",
- [SBI_POR_DOING] = " recovering",
- [SBI_NEED_SB_WRITE] = " sb_dirty",
- [SBI_NEED_CP] = " need_cp",
- [SBI_IS_SHUTDOWN] = " shutdown",
- [SBI_IS_RECOVERED] = " recovered",
- [SBI_CP_DISABLED] = " cp_disabled",
- [SBI_CP_DISABLED_QUICK] = " cp_disabled_quick",
- [SBI_QUOTA_NEED_FLUSH] = " quota_need_flush",
- [SBI_QUOTA_SKIP_FLUSH] = " quota_skip_flush",
- [SBI_QUOTA_NEED_REPAIR] = " quota_need_repair",
- [SBI_IS_RESIZEFS] = " resizefs",
+static const char *s_flag[MAX_SBI_FLAG] = {
+ [SBI_IS_DIRTY] = "fs_dirty",
+ [SBI_IS_CLOSE] = "closing",
+ [SBI_NEED_FSCK] = "need_fsck",
+ [SBI_POR_DOING] = "recovering",
+ [SBI_NEED_SB_WRITE] = "sb_dirty",
+ [SBI_NEED_CP] = "need_cp",
+ [SBI_IS_SHUTDOWN] = "shutdown",
+ [SBI_IS_RECOVERED] = "recovered",
+ [SBI_CP_DISABLED] = "cp_disabled",
+ [SBI_CP_DISABLED_QUICK] = "cp_disabled_quick",
+ [SBI_QUOTA_NEED_FLUSH] = "quota_need_flush",
+ [SBI_QUOTA_SKIP_FLUSH] = "quota_skip_flush",
+ [SBI_QUOTA_NEED_REPAIR] = "quota_need_repair",
+ [SBI_IS_RESIZEFS] = "resizefs",
+ [SBI_IS_FREEZING] = "freezefs",
+ [SBI_IS_WRITABLE] = "writable",
+};
+
+static const char *ipu_mode_names[F2FS_IPU_MAX] = {
+ [F2FS_IPU_FORCE] = "FORCE",
+ [F2FS_IPU_SSR] = "SSR",
+ [F2FS_IPU_UTIL] = "UTIL",
+ [F2FS_IPU_SSR_UTIL] = "SSR_UTIL",
+ [F2FS_IPU_FSYNC] = "FSYNC",
+ [F2FS_IPU_ASYNC] = "ASYNC",
+ [F2FS_IPU_NOCACHE] = "NOCACHE",
+ [F2FS_IPU_HONOR_OPU_WRITE] = "HONOR_OPU_WRITE",
};
static int stat_show(struct seq_file *s, void *v)
@@ -345,19 +441,21 @@ static int stat_show(struct seq_file *s, void *v)
struct f2fs_stat_info *si;
int i = 0, j = 0;
- mutex_lock(&f2fs_stat_mutex);
+ spin_lock(&f2fs_stat_lock);
list_for_each_entry(si, &f2fs_stat_list, stat_list) {
- update_general_status(si->sbi);
+ struct f2fs_sb_info *sbi = si->sbi;
+
+ update_general_status(sbi);
seq_printf(s, "\n=====[ partition info(%pg). #%d, %s, CP: %s]=====\n",
- si->sbi->sb->s_bdev, i++,
- f2fs_readonly(si->sbi->sb) ? "RO": "RW",
- is_set_ckpt_flags(si->sbi, CP_DISABLED_FLAG) ?
- "Disabled" : (f2fs_cp_error(si->sbi) ? "Error" : "Good"));
- if (si->sbi->s_flag) {
+ sbi->sb->s_bdev, i++,
+ f2fs_readonly(sbi->sb) ? "RO" : "RW",
+ is_set_ckpt_flags(sbi, CP_DISABLED_FLAG) ?
+ "Disabled" : (f2fs_cp_error(sbi) ? "Error" : "Good"));
+ if (sbi->s_flag) {
seq_puts(s, "[SBI:");
- for_each_set_bit(j, &si->sbi->s_flag, 32)
- seq_puts(s, s_flag[j]);
+ for_each_set_bit(j, &sbi->s_flag, MAX_SBI_FLAG)
+ seq_printf(s, " %s", s_flag[j]);
seq_puts(s, "]\n");
}
seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
@@ -368,8 +466,21 @@ static int stat_show(struct seq_file *s, void *v)
si->overp_segs, si->rsvd_segs);
seq_printf(s, "Current Time Sec: %llu / Mounted Time Sec: %llu\n\n",
ktime_get_boottime_seconds(),
- SIT_I(si->sbi)->mounted_time);
- if (test_opt(si->sbi, DISCARD))
+ SIT_I(sbi)->mounted_time);
+
+ seq_puts(s, "Policy:\n");
+ seq_puts(s, " - IPU: [");
+ if (IS_F2FS_IPU_DISABLE(sbi)) {
+ seq_puts(s, " DISABLE");
+ } else {
+ unsigned long policy = SM_I(sbi)->ipu_policy;
+
+ for_each_set_bit(j, &policy, F2FS_IPU_MAX)
+ seq_printf(s, " %s", ipu_mode_names[j]);
+ }
+ seq_puts(s, " ]\n\n");
+
+ if (test_opt(sbi, DISCARD))
seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n",
si->utilization, si->valid_count, si->discard_blks);
else
@@ -389,60 +500,72 @@ static int stat_show(struct seq_file *s, void *v)
si->inline_dir);
seq_printf(s, " - Compressed Inode: %u, Blocks: %llu\n",
si->compr_inode, si->compr_blocks);
+ seq_printf(s, " - Swapfile Inode: %u\n",
+ si->swapfile_inode);
+ seq_printf(s, " - Donate Inode: %u\n",
+ si->ndonate_files);
seq_printf(s, " - Orphan/Append/Update Inode: %u, %u, %u\n",
si->orphans, si->append, si->update);
seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
si->main_area_segs, si->main_area_sections,
si->main_area_zones);
- seq_printf(s, " TYPE %8s %8s %8s %10s %10s %10s\n",
- "segno", "secno", "zoneno", "dirty_seg", "full_seg", "valid_blk");
- seq_printf(s, " - COLD data: %8d %8d %8d %10u %10u %10u\n",
+ seq_printf(s, " TYPE %8s %8s %8s %8s %10s %10s %10s\n",
+ "blkoff", "segno", "secno", "zoneno", "dirty_seg", "full_seg", "valid_blk");
+ seq_printf(s, " - COLD data: %8d %8d %8d %8d %10u %10u %10u\n",
+ si->blkoff[CURSEG_COLD_DATA],
si->curseg[CURSEG_COLD_DATA],
si->cursec[CURSEG_COLD_DATA],
si->curzone[CURSEG_COLD_DATA],
si->dirty_seg[CURSEG_COLD_DATA],
si->full_seg[CURSEG_COLD_DATA],
si->valid_blks[CURSEG_COLD_DATA]);
- seq_printf(s, " - WARM data: %8d %8d %8d %10u %10u %10u\n",
+ seq_printf(s, " - WARM data: %8d %8d %8d %8d %10u %10u %10u\n",
+ si->blkoff[CURSEG_WARM_DATA],
si->curseg[CURSEG_WARM_DATA],
si->cursec[CURSEG_WARM_DATA],
si->curzone[CURSEG_WARM_DATA],
si->dirty_seg[CURSEG_WARM_DATA],
si->full_seg[CURSEG_WARM_DATA],
si->valid_blks[CURSEG_WARM_DATA]);
- seq_printf(s, " - HOT data: %8d %8d %8d %10u %10u %10u\n",
+ seq_printf(s, " - HOT data: %8d %8d %8d %8d %10u %10u %10u\n",
+ si->blkoff[CURSEG_HOT_DATA],
si->curseg[CURSEG_HOT_DATA],
si->cursec[CURSEG_HOT_DATA],
si->curzone[CURSEG_HOT_DATA],
si->dirty_seg[CURSEG_HOT_DATA],
si->full_seg[CURSEG_HOT_DATA],
si->valid_blks[CURSEG_HOT_DATA]);
- seq_printf(s, " - Dir dnode: %8d %8d %8d %10u %10u %10u\n",
+ seq_printf(s, " - Dir dnode: %8d %8d %8d %8d %10u %10u %10u\n",
+ si->blkoff[CURSEG_HOT_NODE],
si->curseg[CURSEG_HOT_NODE],
si->cursec[CURSEG_HOT_NODE],
si->curzone[CURSEG_HOT_NODE],
si->dirty_seg[CURSEG_HOT_NODE],
si->full_seg[CURSEG_HOT_NODE],
si->valid_blks[CURSEG_HOT_NODE]);
- seq_printf(s, " - File dnode: %8d %8d %8d %10u %10u %10u\n",
+ seq_printf(s, " - File dnode: %8d %8d %8d %8d %10u %10u %10u\n",
+ si->blkoff[CURSEG_WARM_NODE],
si->curseg[CURSEG_WARM_NODE],
si->cursec[CURSEG_WARM_NODE],
si->curzone[CURSEG_WARM_NODE],
si->dirty_seg[CURSEG_WARM_NODE],
si->full_seg[CURSEG_WARM_NODE],
si->valid_blks[CURSEG_WARM_NODE]);
- seq_printf(s, " - Indir nodes: %8d %8d %8d %10u %10u %10u\n",
+ seq_printf(s, " - Indir nodes: %8d %8d %8d %8d %10u %10u %10u\n",
+ si->blkoff[CURSEG_COLD_NODE],
si->curseg[CURSEG_COLD_NODE],
si->cursec[CURSEG_COLD_NODE],
si->curzone[CURSEG_COLD_NODE],
si->dirty_seg[CURSEG_COLD_NODE],
si->full_seg[CURSEG_COLD_NODE],
si->valid_blks[CURSEG_COLD_NODE]);
- seq_printf(s, " - Pinned file: %8d %8d %8d\n",
+ seq_printf(s, " - Pinned file: %8d %8d %8d %8d\n",
+ si->blkoff[CURSEG_COLD_DATA_PINNED],
si->curseg[CURSEG_COLD_DATA_PINNED],
si->cursec[CURSEG_COLD_DATA_PINNED],
si->curzone[CURSEG_COLD_DATA_PINNED]);
- seq_printf(s, " - ATGC data: %8d %8d %8d\n",
+ seq_printf(s, " - ATGC data: %8d %8d %8d %8d\n",
+ si->blkoff[CURSEG_ALL_DATA_ATGC],
si->curseg[CURSEG_ALL_DATA_ATGC],
si->cursec[CURSEG_ALL_DATA_ATGC],
si->curzone[CURSEG_ALL_DATA_ATGC]);
@@ -452,8 +575,40 @@ static int stat_show(struct seq_file *s, void *v)
si->dirty_count);
seq_printf(s, " - Prefree: %d\n - Free: %d (%d)\n\n",
si->prefree_count, si->free_segs, si->free_secs);
+ if (f2fs_is_multi_device(sbi)) {
+ seq_puts(s, "Multidevice stats:\n");
+ seq_printf(s, " [seg: %8s %8s %8s %8s %8s]",
+ "inuse", "dirty", "full", "free", "prefree");
+ if (__is_large_section(sbi))
+ seq_printf(s, " [sec: %8s %8s %8s %8s %8s]\n",
+ "inuse", "dirty", "full", "free", "prefree");
+ else
+ seq_puts(s, "\n");
+
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ seq_printf(s, " #%-2d %8u %8u %8u %8u %8u", i,
+ si->dev_stats[i].devstats[0][DEVSTAT_INUSE],
+ si->dev_stats[i].devstats[0][DEVSTAT_DIRTY],
+ si->dev_stats[i].devstats[0][DEVSTAT_FULL],
+ si->dev_stats[i].devstats[0][DEVSTAT_FREE],
+ si->dev_stats[i].devstats[0][DEVSTAT_PREFREE]);
+ if (!__is_large_section(sbi)) {
+ seq_puts(s, "\n");
+ continue;
+ }
+ seq_printf(s, " %8u %8u %8u %8u %8u\n",
+ si->dev_stats[i].devstats[1][DEVSTAT_INUSE],
+ si->dev_stats[i].devstats[1][DEVSTAT_DIRTY],
+ si->dev_stats[i].devstats[1][DEVSTAT_FULL],
+ si->dev_stats[i].devstats[1][DEVSTAT_FREE],
+ si->dev_stats[i].devstats[1][DEVSTAT_PREFREE]);
+ }
+ seq_puts(s, "\n");
+ }
seq_printf(s, "CP calls: %d (BG: %d)\n",
- si->cp_count, si->bg_cp_count);
+ si->cp_call_count[TOTAL_CALL],
+ si->cp_call_count[BACKGROUND]);
+ seq_printf(s, "CP count: %d\n", si->cp_count);
seq_printf(s, " - cp blocks : %u\n", si->meta_count[META_CP]);
seq_printf(s, " - sit blocks : %u\n",
si->meta_count[META_SIT]);
@@ -461,77 +616,106 @@ static int stat_show(struct seq_file *s, void *v)
si->meta_count[META_NAT]);
seq_printf(s, " - ssa blocks : %u\n",
si->meta_count[META_SSA]);
- seq_printf(s, "CP merge (Queued: %4d, Issued: %4d, Total: %4d, "
- "Cur time: %4d(ms), Peak time: %4d(ms))\n",
- si->nr_queued_ckpt, si->nr_issued_ckpt,
- si->nr_total_ckpt, si->cur_ckpt_time,
- si->peak_ckpt_time);
- seq_printf(s, "GC calls: %d (BG: %d)\n",
- si->call_count, si->bg_gc);
- seq_printf(s, " - data segments : %d (%d)\n",
- si->data_segs, si->bg_data_segs);
- seq_printf(s, " - node segments : %d (%d)\n",
- si->node_segs, si->bg_node_segs);
- seq_printf(s, " - Reclaimed segs : Normal (%d), Idle CB (%d), "
- "Idle Greedy (%d), Idle AT (%d), "
- "Urgent High (%d), Urgent Low (%d)\n",
- si->sbi->gc_reclaimed_segs[GC_NORMAL],
- si->sbi->gc_reclaimed_segs[GC_IDLE_CB],
- si->sbi->gc_reclaimed_segs[GC_IDLE_GREEDY],
- si->sbi->gc_reclaimed_segs[GC_IDLE_AT],
- si->sbi->gc_reclaimed_segs[GC_URGENT_HIGH],
- si->sbi->gc_reclaimed_segs[GC_URGENT_LOW]);
+ seq_puts(s, "CP merge:\n");
+ seq_printf(s, " - Queued : %4d\n", si->nr_queued_ckpt);
+ seq_printf(s, " - Issued : %4d\n", si->nr_issued_ckpt);
+ seq_printf(s, " - Total : %4d\n", si->nr_total_ckpt);
+ seq_printf(s, " - Cur time : %4d(ms)\n", si->cur_ckpt_time);
+ seq_printf(s, " - Peak time : %4d(ms)\n", si->peak_ckpt_time);
+ seq_printf(s, "GC calls: %d (gc_thread: %d)\n",
+ si->gc_call_count[BACKGROUND] +
+ si->gc_call_count[FOREGROUND],
+ si->gc_call_count[BACKGROUND]);
+ if (__is_large_section(sbi)) {
+ seq_printf(s, " - data sections : %d (BG: %d)\n",
+ si->gc_secs[DATA][BG_GC] + si->gc_secs[DATA][FG_GC],
+ si->gc_secs[DATA][BG_GC]);
+ seq_printf(s, " - node sections : %d (BG: %d)\n",
+ si->gc_secs[NODE][BG_GC] + si->gc_secs[NODE][FG_GC],
+ si->gc_secs[NODE][BG_GC]);
+ }
+ seq_printf(s, " - data segments : %d (BG: %d)\n",
+ si->gc_segs[DATA][BG_GC] + si->gc_segs[DATA][FG_GC],
+ si->gc_segs[DATA][BG_GC]);
+ seq_printf(s, " - node segments : %d (BG: %d)\n",
+ si->gc_segs[NODE][BG_GC] + si->gc_segs[NODE][FG_GC],
+ si->gc_segs[NODE][BG_GC]);
+ seq_puts(s, " - Reclaimed segs :\n");
+ seq_printf(s, " - Normal : %d\n", sbi->gc_reclaimed_segs[GC_NORMAL]);
+ seq_printf(s, " - Idle CB : %d\n", sbi->gc_reclaimed_segs[GC_IDLE_CB]);
+ seq_printf(s, " - Idle Greedy : %d\n",
+ sbi->gc_reclaimed_segs[GC_IDLE_GREEDY]);
+ seq_printf(s, " - Idle AT : %d\n", sbi->gc_reclaimed_segs[GC_IDLE_AT]);
+ seq_printf(s, " - Urgent High : %d\n",
+ sbi->gc_reclaimed_segs[GC_URGENT_HIGH]);
+ seq_printf(s, " - Urgent Mid : %d\n", sbi->gc_reclaimed_segs[GC_URGENT_MID]);
+ seq_printf(s, " - Urgent Low : %d\n", sbi->gc_reclaimed_segs[GC_URGENT_LOW]);
seq_printf(s, "Try to move %d blocks (BG: %d)\n", si->tot_blks,
si->bg_data_blks + si->bg_node_blks);
seq_printf(s, " - data blocks : %d (%d)\n", si->data_blks,
si->bg_data_blks);
seq_printf(s, " - node blocks : %d (%d)\n", si->node_blks,
si->bg_node_blks);
- seq_printf(s, "Skipped : atomic write %llu (%llu)\n",
- si->skipped_atomic_files[BG_GC] +
- si->skipped_atomic_files[FG_GC],
- si->skipped_atomic_files[BG_GC]);
seq_printf(s, "BG skip : IO: %u, Other: %u\n",
si->io_skip_bggc, si->other_skip_bggc);
- seq_puts(s, "\nExtent Cache:\n");
+ seq_puts(s, "\nExtent Cache (Read):\n");
seq_printf(s, " - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
- si->hit_largest, si->hit_cached,
- si->hit_rbtree);
+ si->hit_largest, si->hit_cached[EX_READ],
+ si->hit_rbtree[EX_READ]);
+ seq_printf(s, " - Hit Ratio: %llu%% (%llu / %llu)\n",
+ !si->total_ext[EX_READ] ? 0 :
+ div64_u64(si->hit_total[EX_READ] * 100,
+ si->total_ext[EX_READ]),
+ si->hit_total[EX_READ], si->total_ext[EX_READ]);
+ seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
+ si->ext_tree[EX_READ], si->zombie_tree[EX_READ],
+ si->ext_node[EX_READ]);
+ seq_puts(s, "\nExtent Cache (Block Age):\n");
+ seq_printf(s, " - Allocated Data Blocks: %llu\n",
+ si->allocated_data_blocks);
+ seq_printf(s, " - Hit Count: L1:%llu L2:%llu\n",
+ si->hit_cached[EX_BLOCK_AGE],
+ si->hit_rbtree[EX_BLOCK_AGE]);
seq_printf(s, " - Hit Ratio: %llu%% (%llu / %llu)\n",
- !si->total_ext ? 0 :
- div64_u64(si->hit_total * 100, si->total_ext),
- si->hit_total, si->total_ext);
+ !si->total_ext[EX_BLOCK_AGE] ? 0 :
+ div64_u64(si->hit_total[EX_BLOCK_AGE] * 100,
+ si->total_ext[EX_BLOCK_AGE]),
+ si->hit_total[EX_BLOCK_AGE],
+ si->total_ext[EX_BLOCK_AGE]);
seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
- si->ext_tree, si->zombie_tree, si->ext_node);
+ si->ext_tree[EX_BLOCK_AGE],
+ si->zombie_tree[EX_BLOCK_AGE],
+ si->ext_node[EX_BLOCK_AGE]);
seq_puts(s, "\nBalancing F2FS Async:\n");
seq_printf(s, " - DIO (R: %4d, W: %4d)\n",
si->nr_dio_read, si->nr_dio_write);
seq_printf(s, " - IO_R (Data: %4d, Node: %4d, Meta: %4d\n",
si->nr_rd_data, si->nr_rd_node, si->nr_rd_meta);
- seq_printf(s, " - IO_W (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), "
- "Discard: (%4d %4d)) cmd: %4d undiscard:%4u\n",
+ seq_printf(s, " - IO_W (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), ",
si->nr_wb_cp_data, si->nr_wb_data,
si->nr_flushing, si->nr_flushed,
- si->flush_list_empty,
+ si->flush_list_empty);
+ seq_printf(s, "Discard: (%4d %4d)) cmd: %4d undiscard:%4u\n",
si->nr_discarding, si->nr_discarded,
si->nr_discard_cmd, si->undiscard_blks);
- seq_printf(s, " - inmem: %4d, atomic IO: %4d (Max. %4d), "
- "volatile IO: %4d (Max. %4d)\n",
- si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
- si->vw_cnt, si->max_vw_cnt);
+ seq_printf(s, " - atomic IO: %4d (Max. %4d)\n",
+ si->aw_cnt, si->max_aw_cnt);
seq_printf(s, " - compress: %4d, hit:%8d\n", si->compress_pages, si->compress_page_hit);
seq_printf(s, " - nodes: %4d in %4d\n",
si->ndirty_node, si->node_pages);
seq_printf(s, " - dents: %4d in dirs:%4d (%4d)\n",
si->ndirty_dent, si->ndirty_dirs, si->ndirty_all);
- seq_printf(s, " - datas: %4d in files:%4d\n",
+ seq_printf(s, " - data: %4d in files:%4d\n",
si->ndirty_data, si->ndirty_files);
- seq_printf(s, " - quota datas: %4d in quota files:%4d\n",
+ seq_printf(s, " - quota data: %4d in quota files:%4d\n",
si->ndirty_qdata, si->nquota_files);
seq_printf(s, " - meta: %4d in %4d\n",
si->ndirty_meta, si->meta_pages);
seq_printf(s, " - imeta: %4d\n",
si->ndirty_imeta);
+ seq_printf(s, " - fsync mark: %4lld\n",
+ percpu_counter_sum_positive(
+ &sbi->rf_node_block_count));
seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
si->dirty_nats, si->nats, si->dirty_sits, si->sits);
seq_printf(s, " - free_nids: %9d/%9d\n - alloc_nids: %9d\n",
@@ -558,22 +742,26 @@ static int stat_show(struct seq_file *s, void *v)
si->block_count[LFS], si->segment_count[LFS]);
/* segment usage info */
- f2fs_update_sit_info(si->sbi);
+ f2fs_update_sit_info(sbi);
seq_printf(s, "\nBDF: %u, avg. vblocks: %u\n",
si->bimodal, si->avg_vblocks);
/* memory footprint */
- update_mem_info(si->sbi);
+ update_mem_info(sbi);
seq_printf(s, "\nMemory: %llu KB\n",
(si->base_mem + si->cache_mem + si->page_mem) >> 10);
seq_printf(s, " - static: %llu KB\n",
si->base_mem >> 10);
- seq_printf(s, " - cached: %llu KB\n",
+ seq_printf(s, " - cached all: %llu KB\n",
si->cache_mem >> 10);
+ seq_printf(s, " - read extent cache: %llu KB\n",
+ si->ext_mem[EX_READ] >> 10);
+ seq_printf(s, " - block age extent cache: %llu KB\n",
+ si->ext_mem[EX_BLOCK_AGE] >> 10);
seq_printf(s, " - paged : %llu KB\n",
si->page_mem >> 10);
}
- mutex_unlock(&f2fs_stat_mutex);
+ spin_unlock(&f2fs_stat_lock);
return 0;
}
@@ -584,12 +772,22 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_stat_info *si;
+ struct f2fs_dev_stats *dev_stats;
int i;
si = f2fs_kzalloc(sbi, sizeof(struct f2fs_stat_info), GFP_KERNEL);
if (!si)
return -ENOMEM;
+ dev_stats = f2fs_kzalloc(sbi, sizeof(struct f2fs_dev_stats) *
+ sbi->s_ndevs, GFP_KERNEL);
+ if (!dev_stats) {
+ kfree(si);
+ return -ENOMEM;
+ }
+
+ si->dev_stats = dev_stats;
+
si->all_area_segs = le32_to_cpu(raw_super->segment_count);
si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);
si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat);
@@ -601,27 +799,34 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
si->sbi = sbi;
sbi->stat_info = si;
- atomic64_set(&sbi->total_hit_ext, 0);
- atomic64_set(&sbi->read_hit_rbtree, 0);
+ /* general extent cache stats */
+ for (i = 0; i < NR_EXTENT_CACHES; i++) {
+ atomic64_set(&sbi->total_hit_ext[i], 0);
+ atomic64_set(&sbi->read_hit_rbtree[i], 0);
+ atomic64_set(&sbi->read_hit_cached[i], 0);
+ }
+
+ /* read extent_cache only */
atomic64_set(&sbi->read_hit_largest, 0);
- atomic64_set(&sbi->read_hit_cached, 0);
atomic_set(&sbi->inline_xattr, 0);
atomic_set(&sbi->inline_inode, 0);
atomic_set(&sbi->inline_dir, 0);
atomic_set(&sbi->compr_inode, 0);
atomic64_set(&sbi->compr_blocks, 0);
+ atomic_set(&sbi->swapfile_inode, 0);
+ atomic_set(&sbi->atomic_files, 0);
atomic_set(&sbi->inplace_count, 0);
for (i = META_CP; i < META_MAX; i++)
atomic_set(&sbi->meta_count[i], 0);
+ for (i = 0; i < MAX_CALL_TYPE; i++)
+ atomic_set(&sbi->cp_call_count[i], 0);
- atomic_set(&sbi->vw_cnt, 0);
atomic_set(&sbi->max_aw_cnt, 0);
- atomic_set(&sbi->max_vw_cnt, 0);
- mutex_lock(&f2fs_stat_mutex);
+ spin_lock(&f2fs_stat_lock);
list_add_tail(&si->stat_list, &f2fs_stat_list);
- mutex_unlock(&f2fs_stat_mutex);
+ spin_unlock(&f2fs_stat_lock);
return 0;
}
@@ -630,10 +835,11 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
{
struct f2fs_stat_info *si = F2FS_STAT(sbi);
- mutex_lock(&f2fs_stat_mutex);
+ spin_lock(&f2fs_stat_lock);
list_del(&si->stat_list);
- mutex_unlock(&f2fs_stat_mutex);
+ spin_unlock(&f2fs_stat_lock);
+ kfree(si->dev_stats);
kfree(si);
}
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 1820e9c106f7..48f4f98afb01 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -5,7 +5,7 @@
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/sched/signal.h>
@@ -16,7 +16,22 @@
#include "xattr.h"
#include <trace/events/f2fs.h>
-#ifdef CONFIG_UNICODE
+static inline bool f2fs_should_fallback_to_linear(struct inode *dir)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+
+ switch (F2FS_OPTION(sbi).lookup_mode) {
+ case LOOKUP_PERF:
+ return false;
+ case LOOKUP_COMPAT:
+ return true;
+ case LOOKUP_AUTO:
+ return !sb_no_casefold_compat_fallback(sbi->sb);
+ }
+ return false;
+}
+
+#if IS_ENABLED(CONFIG_UNICODE)
extern struct kmem_cache *f2fs_cf_name_slab;
#endif
@@ -29,7 +44,7 @@ static unsigned long dir_blocks(struct inode *inode)
static unsigned int dir_buckets(unsigned int level, int dir_level)
{
if (level + dir_level < MAX_DIR_HASH_DEPTH / 2)
- return 1 << (level + dir_level);
+ return BIT(level + dir_level);
else
return MAX_DIR_BUCKETS;
}
@@ -42,67 +57,49 @@ static unsigned int bucket_blocks(unsigned int level)
return 4;
}
-static unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
- [F2FS_FT_UNKNOWN] = DT_UNKNOWN,
- [F2FS_FT_REG_FILE] = DT_REG,
- [F2FS_FT_DIR] = DT_DIR,
- [F2FS_FT_CHRDEV] = DT_CHR,
- [F2FS_FT_BLKDEV] = DT_BLK,
- [F2FS_FT_FIFO] = DT_FIFO,
- [F2FS_FT_SOCK] = DT_SOCK,
- [F2FS_FT_SYMLINK] = DT_LNK,
-};
-
-static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
- [S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE,
- [S_IFDIR >> S_SHIFT] = F2FS_FT_DIR,
- [S_IFCHR >> S_SHIFT] = F2FS_FT_CHRDEV,
- [S_IFBLK >> S_SHIFT] = F2FS_FT_BLKDEV,
- [S_IFIFO >> S_SHIFT] = F2FS_FT_FIFO,
- [S_IFSOCK >> S_SHIFT] = F2FS_FT_SOCK,
- [S_IFLNK >> S_SHIFT] = F2FS_FT_SYMLINK,
-};
-
-static void set_de_type(struct f2fs_dir_entry *de, umode_t mode)
-{
- de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
-}
-
-unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de)
-{
- if (de->file_type < F2FS_FT_MAX)
- return f2fs_filetype_table[de->file_type];
- return DT_UNKNOWN;
-}
-
+#if IS_ENABLED(CONFIG_UNICODE)
/* If @dir is casefolded, initialize @fname->cf_name from @fname->usr_fname. */
int f2fs_init_casefolded_name(const struct inode *dir,
struct f2fs_filename *fname)
{
-#ifdef CONFIG_UNICODE
struct super_block *sb = dir->i_sb;
-
- if (IS_CASEFOLDED(dir)) {
- fname->cf_name.name = f2fs_kmem_cache_alloc(f2fs_cf_name_slab,
- GFP_NOFS, false, F2FS_SB(sb));
- if (!fname->cf_name.name)
+ unsigned char *buf;
+ int len;
+
+ if (IS_CASEFOLDED(dir) &&
+ !is_dot_dotdot(fname->usr_fname->name, fname->usr_fname->len)) {
+ buf = f2fs_kmem_cache_alloc(f2fs_cf_name_slab,
+ GFP_NOFS, false, F2FS_SB(sb));
+ if (!buf)
return -ENOMEM;
- fname->cf_name.len = utf8_casefold(sb->s_encoding,
- fname->usr_fname,
- fname->cf_name.name,
- F2FS_NAME_LEN);
- if ((int)fname->cf_name.len <= 0) {
- kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
- fname->cf_name.name = NULL;
+
+ len = utf8_casefold(sb->s_encoding, fname->usr_fname,
+ buf, F2FS_NAME_LEN);
+ if (len <= 0) {
+ kmem_cache_free(f2fs_cf_name_slab, buf);
if (sb_has_strict_encoding(sb))
return -EINVAL;
/* fall back to treating name as opaque byte sequence */
+ return 0;
}
+ fname->cf_name.name = buf;
+ fname->cf_name.len = len;
}
-#endif
+
return 0;
}
+void f2fs_free_casefolded_name(struct f2fs_filename *fname)
+{
+ unsigned char *buf = (unsigned char *)fname->cf_name.name;
+
+ if (buf) {
+ kmem_cache_free(f2fs_cf_name_slab, buf);
+ fname->cf_name.name = NULL;
+ }
+}
+#endif /* CONFIG_UNICODE */
+
static int __f2fs_setup_filename(const struct inode *dir,
const struct fscrypt_name *crypt_name,
struct f2fs_filename *fname)
@@ -174,12 +171,7 @@ void f2fs_free_filename(struct f2fs_filename *fname)
kfree(fname->crypto_buf.name);
fname->crypto_buf.name = NULL;
#endif
-#ifdef CONFIG_UNICODE
- if (fname->cf_name.name) {
- kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
- fname->cf_name.name = NULL;
- }
-#endif
+ f2fs_free_casefolded_name(fname);
}
static unsigned long dir_block_index(unsigned int level,
@@ -189,76 +181,26 @@ static unsigned long dir_block_index(unsigned int level,
unsigned long bidx = 0;
for (i = 0; i < level; i++)
- bidx += dir_buckets(i, dir_level) * bucket_blocks(i);
+ bidx += mul_u32_u32(dir_buckets(i, dir_level),
+ bucket_blocks(i));
bidx += idx * bucket_blocks(level);
return bidx;
}
static struct f2fs_dir_entry *find_in_block(struct inode *dir,
- struct page *dentry_page,
+ struct folio *dentry_folio,
const struct f2fs_filename *fname,
- int *max_slots)
+ int *max_slots,
+ bool use_hash)
{
struct f2fs_dentry_block *dentry_blk;
struct f2fs_dentry_ptr d;
- dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page);
+ dentry_blk = folio_address(dentry_folio);
make_dentry_ptr_block(dir, &d, dentry_blk);
- return f2fs_find_target_dentry(&d, fname, max_slots);
-}
-
-#ifdef CONFIG_UNICODE
-/*
- * Test whether a case-insensitive directory entry matches the filename
- * being searched for.
- *
- * Returns 1 for a match, 0 for no match, and -errno on an error.
- */
-static int f2fs_match_ci_name(const struct inode *dir, const struct qstr *name,
- const u8 *de_name, u32 de_name_len)
-{
- const struct super_block *sb = dir->i_sb;
- const struct unicode_map *um = sb->s_encoding;
- struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len);
- struct qstr entry = QSTR_INIT(de_name, de_name_len);
- int res;
-
- if (IS_ENCRYPTED(dir)) {
- const struct fscrypt_str encrypted_name =
- FSTR_INIT((u8 *)de_name, de_name_len);
-
- if (WARN_ON_ONCE(!fscrypt_has_encryption_key(dir)))
- return -EINVAL;
-
- decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL);
- if (!decrypted_name.name)
- return -ENOMEM;
- res = fscrypt_fname_disk_to_usr(dir, 0, 0, &encrypted_name,
- &decrypted_name);
- if (res < 0)
- goto out;
- entry.name = decrypted_name.name;
- entry.len = decrypted_name.len;
- }
-
- res = utf8_strncasecmp_folded(um, name, &entry);
- /*
- * In strict mode, ignore invalid names. In non-strict mode,
- * fall back to treating them as opaque byte sequences.
- */
- if (res < 0 && !sb_has_strict_encoding(sb)) {
- res = name->len == entry.len &&
- memcmp(name->name, entry.name, name->len) == 0;
- } else {
- /* utf8_strncasecmp_folded returns 0 on match */
- res = (res == 0);
- }
-out:
- kfree(decrypted_name.name);
- return res;
+ return f2fs_find_target_dentry(&d, fname, max_slots, use_hash);
}
-#endif /* CONFIG_UNICODE */
static inline int f2fs_match_name(const struct inode *dir,
const struct f2fs_filename *fname,
@@ -266,12 +208,12 @@ static inline int f2fs_match_name(const struct inode *dir,
{
struct fscrypt_name f;
-#ifdef CONFIG_UNICODE
- if (fname->cf_name.name) {
- struct qstr cf = FSTR_TO_QSTR(&fname->cf_name);
+#if IS_ENABLED(CONFIG_UNICODE)
+ if (fname->cf_name.name)
+ return generic_ci_match(dir, fname->usr_fname,
+ &fname->cf_name,
+ de_name, de_name_len);
- return f2fs_match_ci_name(dir, &cf, de_name, de_name_len);
- }
#endif
f.usr_fname = fname->usr_fname;
f.disk_name = fname->disk_name;
@@ -282,7 +224,8 @@ static inline int f2fs_match_name(const struct inode *dir,
}
struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
- const struct f2fs_filename *fname, int *max_slots)
+ const struct f2fs_filename *fname, int *max_slots,
+ bool use_hash)
{
struct f2fs_dir_entry *de;
unsigned long bit_pos = 0;
@@ -305,7 +248,7 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
continue;
}
- if (de->hash_code == fname->hash) {
+ if (!use_hash || de->hash_code == fname->hash) {
res = f2fs_match_name(d->inode, fname,
d->filename[bit_pos],
le16_to_cpu(de->name_len));
@@ -332,72 +275,90 @@ found:
static struct f2fs_dir_entry *find_in_level(struct inode *dir,
unsigned int level,
const struct f2fs_filename *fname,
- struct page **res_page)
+ struct folio **res_folio,
+ bool use_hash)
{
int s = GET_DENTRY_SLOTS(fname->disk_name.len);
unsigned int nbucket, nblock;
- unsigned int bidx, end_block;
- struct page *dentry_page;
+ unsigned int bidx, end_block, bucket_no;
struct f2fs_dir_entry *de = NULL;
+ pgoff_t next_pgofs;
bool room = false;
int max_slots;
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
+ bucket_no = use_hash ? le32_to_cpu(fname->hash) % nbucket : 0;
+
+start_find_bucket:
bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
- le32_to_cpu(fname->hash) % nbucket);
+ bucket_no);
end_block = bidx + nblock;
- for (; bidx < end_block; bidx++) {
+ while (bidx < end_block) {
/* no need to allocate new dentry pages to all the indices */
- dentry_page = f2fs_find_data_page(dir, bidx);
- if (IS_ERR(dentry_page)) {
- if (PTR_ERR(dentry_page) == -ENOENT) {
+ struct folio *dentry_folio;
+ dentry_folio = f2fs_find_data_folio(dir, bidx, &next_pgofs);
+ if (IS_ERR(dentry_folio)) {
+ if (PTR_ERR(dentry_folio) == -ENOENT) {
room = true;
+ bidx = next_pgofs;
continue;
} else {
- *res_page = dentry_page;
+ *res_folio = dentry_folio;
break;
}
}
- de = find_in_block(dir, dentry_page, fname, &max_slots);
+ de = find_in_block(dir, dentry_folio, fname, &max_slots, use_hash);
if (IS_ERR(de)) {
- *res_page = ERR_CAST(de);
+ *res_folio = ERR_CAST(de);
de = NULL;
break;
} else if (de) {
- *res_page = dentry_page;
+ *res_folio = dentry_folio;
break;
}
if (max_slots >= s)
room = true;
- f2fs_put_page(dentry_page, 0);
- }
+ f2fs_folio_put(dentry_folio, false);
- if (!de && room && F2FS_I(dir)->chash != fname->hash) {
- F2FS_I(dir)->chash = fname->hash;
- F2FS_I(dir)->clevel = level;
+ bidx++;
}
- return de;
+ if (de)
+ return de;
+
+ if (likely(use_hash)) {
+ if (room && F2FS_I(dir)->chash != fname->hash) {
+ F2FS_I(dir)->chash = fname->hash;
+ F2FS_I(dir)->clevel = level;
+ }
+ } else if (++bucket_no < nbucket) {
+ goto start_find_bucket;
+ }
+ return NULL;
}
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
const struct f2fs_filename *fname,
- struct page **res_page)
+ struct folio **res_folio)
{
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
unsigned int max_depth;
unsigned int level;
+ bool use_hash = true;
- *res_page = NULL;
+ *res_folio = NULL;
+#if IS_ENABLED(CONFIG_UNICODE)
+start_find_entry:
+#endif
if (f2fs_has_inline_dentry(dir)) {
- de = f2fs_find_in_inline_dir(dir, fname, res_page);
+ de = f2fs_find_in_inline_dir(dir, fname, res_folio, use_hash);
goto out;
}
@@ -413,11 +374,19 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
}
for (level = 0; level < max_depth; level++) {
- de = find_in_level(dir, level, fname, res_page);
- if (de || IS_ERR(*res_page))
+ de = find_in_level(dir, level, fname, res_folio, use_hash);
+ if (de || IS_ERR(*res_folio))
break;
}
+
out:
+#if IS_ENABLED(CONFIG_UNICODE)
+ if (f2fs_should_fallback_to_linear(dir) &&
+ IS_CASEFOLDED(dir) && !de && use_hash) {
+ use_hash = false;
+ goto start_find_entry;
+ }
+#endif
/* This is to increase the speed of f2fs_create */
if (!de)
F2FS_I(dir)->task = current;
@@ -431,7 +400,7 @@ out:
* Entry is guaranteed to be valid.
*/
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
- const struct qstr *child, struct page **res_page)
+ const struct qstr *child, struct folio **res_folio)
{
struct f2fs_dir_entry *de = NULL;
struct f2fs_filename fname;
@@ -440,67 +409,67 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
err = f2fs_setup_filename(dir, child, 1, &fname);
if (err) {
if (err == -ENOENT)
- *res_page = NULL;
+ *res_folio = NULL;
else
- *res_page = ERR_PTR(err);
+ *res_folio = ERR_PTR(err);
return NULL;
}
- de = __f2fs_find_entry(dir, &fname, res_page);
+ de = __f2fs_find_entry(dir, &fname, res_folio);
f2fs_free_filename(&fname);
return de;
}
-struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
+struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct folio **f)
{
- return f2fs_find_entry(dir, &dotdot_name, p);
+ return f2fs_find_entry(dir, &dotdot_name, f);
}
ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
- struct page **page)
+ struct folio **folio)
{
ino_t res = 0;
struct f2fs_dir_entry *de;
- de = f2fs_find_entry(dir, qstr, page);
+ de = f2fs_find_entry(dir, qstr, folio);
if (de) {
res = le32_to_cpu(de->ino);
- f2fs_put_page(*page, 0);
+ f2fs_folio_put(*folio, false);
}
return res;
}
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
- struct page *page, struct inode *inode)
+ struct folio *folio, struct inode *inode)
{
enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
- lock_page(page);
- f2fs_wait_on_page_writeback(page, type, true, true);
+ folio_lock(folio);
+ f2fs_folio_wait_writeback(folio, type, true, true);
de->ino = cpu_to_le32(inode->i_ino);
- set_de_type(de, inode->i_mode);
- set_page_dirty(page);
+ de->file_type = fs_umode_to_ftype(inode->i_mode);
+ folio_mark_dirty(folio);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
static void init_dent_inode(struct inode *dir, struct inode *inode,
const struct f2fs_filename *fname,
- struct page *ipage)
+ struct folio *ifolio)
{
struct f2fs_inode *ri;
if (!fname) /* tmpfile case? */
return;
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
- /* copy name info. to this inode page */
- ri = F2FS_INODE(ipage);
+ /* copy name info. to this inode folio */
+ ri = F2FS_INODE(ifolio);
ri->i_namelen = cpu_to_le32(fname->disk_name.len);
memcpy(ri->i_name, fname->disk_name.name, fname->disk_name.len);
if (IS_ENCRYPTED(dir)) {
@@ -521,7 +490,7 @@ static void init_dent_inode(struct inode *dir, struct inode *inode,
file_lost_pino(inode);
}
}
- set_page_dirty(ipage);
+ folio_mark_dirty(ifolio);
}
void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
@@ -538,72 +507,73 @@ void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
}
static int make_empty_dir(struct inode *inode,
- struct inode *parent, struct page *page)
+ struct inode *parent, struct folio *folio)
{
- struct page *dentry_page;
+ struct folio *dentry_folio;
struct f2fs_dentry_block *dentry_blk;
struct f2fs_dentry_ptr d;
if (f2fs_has_inline_dentry(inode))
- return f2fs_make_empty_inline_dir(inode, parent, page);
+ return f2fs_make_empty_inline_dir(inode, parent, folio);
- dentry_page = f2fs_get_new_data_page(inode, page, 0, true);
- if (IS_ERR(dentry_page))
- return PTR_ERR(dentry_page);
+ dentry_folio = f2fs_get_new_data_folio(inode, folio, 0, true);
+ if (IS_ERR(dentry_folio))
+ return PTR_ERR(dentry_folio);
- dentry_blk = page_address(dentry_page);
+ dentry_blk = folio_address(dentry_folio);
make_dentry_ptr_block(NULL, &d, dentry_blk);
f2fs_do_make_empty_dir(inode, parent, &d);
- set_page_dirty(dentry_page);
- f2fs_put_page(dentry_page, 1);
+ folio_mark_dirty(dentry_folio);
+ f2fs_folio_put(dentry_folio, true);
return 0;
}
-struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct f2fs_filename *fname, struct page *dpage)
+struct folio *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
+ const struct f2fs_filename *fname, struct folio *dfolio)
{
- struct page *page;
+ struct folio *folio;
int err;
if (is_inode_flag_set(inode, FI_NEW_INODE)) {
- page = f2fs_new_inode_page(inode);
- if (IS_ERR(page))
- return page;
+ folio = f2fs_new_inode_folio(inode);
+ if (IS_ERR(folio))
+ return folio;
if (S_ISDIR(inode->i_mode)) {
/* in order to handle error case */
- get_page(page);
- err = make_empty_dir(inode, dir, page);
+ folio_get(folio);
+ err = make_empty_dir(inode, dir, folio);
if (err) {
- lock_page(page);
+ folio_lock(folio);
goto put_error;
}
- put_page(page);
+ folio_put(folio);
}
- err = f2fs_init_acl(inode, dir, page, dpage);
+ err = f2fs_init_acl(inode, dir, folio, dfolio);
if (err)
goto put_error;
err = f2fs_init_security(inode, dir,
- fname ? fname->usr_fname : NULL, page);
+ fname ? fname->usr_fname : NULL,
+ folio);
if (err)
goto put_error;
if (IS_ENCRYPTED(inode)) {
- err = fscrypt_set_context(inode, page);
+ err = fscrypt_set_context(inode, folio);
if (err)
goto put_error;
}
} else {
- page = f2fs_get_node_page(F2FS_I_SB(dir), inode->i_ino);
- if (IS_ERR(page))
- return page;
+ folio = f2fs_get_inode_folio(F2FS_I_SB(dir), inode->i_ino);
+ if (IS_ERR(folio))
+ return folio;
}
- init_dent_inode(dir, inode, fname, page);
+ init_dent_inode(dir, inode, fname, folio);
/*
* This file should be checkpointed during fsync.
@@ -620,12 +590,12 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
f2fs_remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
f2fs_i_links_write(inode, true);
}
- return page;
+ return folio;
put_error:
clear_nlink(inode);
- f2fs_update_inode(inode, page);
- f2fs_put_page(page, 1);
+ f2fs_update_inode(inode, folio);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
@@ -637,7 +607,7 @@ void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
f2fs_i_links_write(dir, true);
clear_inode_flag(inode, FI_NEW_INODE);
}
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
if (F2FS_I(dir)->i_current_depth != current_depth)
@@ -667,14 +637,14 @@ next:
goto next;
}
-bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
+bool f2fs_has_enough_room(struct inode *dir, struct folio *ifolio,
const struct f2fs_filename *fname)
{
struct f2fs_dentry_ptr d;
unsigned int bit_pos;
int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
- make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ipage));
+ make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ifolio));
bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
@@ -694,7 +664,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
de->name_len = cpu_to_le16(name->len);
memcpy(d->filename[bit_pos], name->name, name->len);
de->ino = cpu_to_le32(ino);
- set_de_type(de, mode);
+ de->file_type = fs_umode_to_ftype(mode);
for (i = 0; i < slots; i++) {
__set_bit_le(bit_pos + i, (void *)d->bitmap);
/* avoid wrong garbage data for readdir */
@@ -711,10 +681,10 @@ int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
unsigned int current_depth;
unsigned long bidx, block;
unsigned int nbucket, nblock;
- struct page *dentry_page = NULL;
+ struct folio *dentry_folio = NULL;
struct f2fs_dentry_block *dentry_blk = NULL;
struct f2fs_dentry_ptr d;
- struct page *page = NULL;
+ struct folio *folio = NULL;
int slots, err = 0;
level = 0;
@@ -727,10 +697,8 @@ int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
}
start:
- if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) {
- f2fs_show_injection_info(F2FS_I_SB(dir), FAULT_DIR_DEPTH);
+ if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH))
return -ENOSPC;
- }
if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
return -ENOSPC;
@@ -746,30 +714,30 @@ start:
(le32_to_cpu(fname->hash) % nbucket));
for (block = bidx; block <= (bidx + nblock - 1); block++) {
- dentry_page = f2fs_get_new_data_page(dir, NULL, block, true);
- if (IS_ERR(dentry_page))
- return PTR_ERR(dentry_page);
+ dentry_folio = f2fs_get_new_data_folio(dir, NULL, block, true);
+ if (IS_ERR(dentry_folio))
+ return PTR_ERR(dentry_folio);
- dentry_blk = page_address(dentry_page);
+ dentry_blk = folio_address(dentry_folio);
bit_pos = f2fs_room_for_filename(&dentry_blk->dentry_bitmap,
slots, NR_DENTRY_IN_BLOCK);
if (bit_pos < NR_DENTRY_IN_BLOCK)
goto add_dentry;
- f2fs_put_page(dentry_page, 1);
+ f2fs_folio_put(dentry_folio, true);
}
/* Move to next level to find the empty slot for new dentry */
++level;
goto start;
add_dentry:
- f2fs_wait_on_page_writeback(dentry_page, DATA, true, true);
+ f2fs_folio_wait_writeback(dentry_folio, DATA, true, true);
if (inode) {
- down_write(&F2FS_I(inode)->i_sem);
- page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
+ folio = f2fs_init_inode_metadata(inode, dir, fname, NULL);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto fail;
}
}
@@ -778,24 +746,24 @@ add_dentry:
f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
bit_pos);
- set_page_dirty(dentry_page);
+ folio_mark_dirty(dentry_folio);
if (inode) {
f2fs_i_pino_write(inode, dir->i_ino);
/* synchronize inode page's data from inode cache */
if (is_inode_flag_set(inode, FI_NEW_INODE))
- f2fs_update_inode(inode, page);
+ f2fs_update_inode(inode, folio);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
f2fs_update_parent_metadata(dir, inode, current_depth);
fail:
if (inode)
- up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
- f2fs_put_page(dentry_page, 1);
+ f2fs_folio_put(dentry_folio, true);
return err;
}
@@ -805,8 +773,15 @@ int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
{
int err = -EAGAIN;
- if (f2fs_has_inline_dentry(dir))
+ if (f2fs_has_inline_dentry(dir)) {
+ /*
+ * Should get i_xattr_sem to keep the lock order:
+ * i_xattr_sem -> inode_page lock used by f2fs_setxattr.
+ */
+ f2fs_down_read(&F2FS_I(dir)->i_xattr_sem);
err = f2fs_add_inline_entry(dir, fname, inode, ino, mode);
+ f2fs_up_read(&F2FS_I(dir)->i_xattr_sem);
+ }
if (err == -EAGAIN)
err = f2fs_add_regular_entry(dir, fname, inode, ino, mode);
@@ -822,7 +797,7 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode)
{
struct f2fs_filename fname;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct f2fs_dir_entry *de = NULL;
int err;
@@ -838,14 +813,14 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
* consistency more.
*/
if (current != F2FS_I(dir)->task) {
- de = __f2fs_find_entry(dir, &fname, &page);
+ de = __f2fs_find_entry(dir, &fname, &folio);
F2FS_I(dir)->task = NULL;
}
if (de) {
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
err = -EEXIST;
- } else if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ } else if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
} else {
err = f2fs_add_dentry(dir, &fname, inode, ino, mode);
}
@@ -853,23 +828,24 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
return err;
}
-int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
+int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
+ struct f2fs_filename *fname)
{
- struct page *page;
+ struct folio *folio;
int err = 0;
- down_write(&F2FS_I(inode)->i_sem);
- page = f2fs_init_inode_metadata(inode, dir, NULL, NULL);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
+ folio = f2fs_init_inode_metadata(inode, dir, fname, NULL);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto fail;
}
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
clear_inode_flag(inode, FI_NEW_INODE);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
fail:
- up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
return err;
}
@@ -877,18 +853,18 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- down_write(&F2FS_I(inode)->i_sem);
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
if (S_ISDIR(inode->i_mode))
f2fs_i_links_write(dir, false);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_i_links_write(inode, false);
if (S_ISDIR(inode->i_mode)) {
f2fs_i_links_write(inode, false);
f2fs_i_size_write(inode, 0);
}
- up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
if (inode->i_nlink == 0)
f2fs_add_orphan_inode(inode);
@@ -900,12 +876,13 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
* It only removes the dentry from the dentry page, corresponding name
* entry in name page does not need to be touched during deletion.
*/
-void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
+void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct folio *folio,
struct inode *dir, struct inode *inode)
{
- struct f2fs_dentry_block *dentry_blk;
+ struct f2fs_dentry_block *dentry_blk;
unsigned int bit_pos;
int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
+ pgoff_t index = folio->index;
int i;
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
@@ -914,12 +891,12 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
f2fs_add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO);
if (f2fs_has_inline_dentry(dir))
- return f2fs_delete_inline_entry(dentry, page, dir, inode);
+ return f2fs_delete_inline_entry(dentry, folio, dir, inode);
- lock_page(page);
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ folio_lock(folio);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
- dentry_blk = page_address(page);
+ dentry_blk = folio_address(folio);
bit_pos = dentry - dentry_blk->dentry;
for (i = 0; i < slots; i++)
__clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
@@ -928,25 +905,21 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
NR_DENTRY_IN_BLOCK,
0);
- set_page_dirty(page);
+ folio_mark_dirty(folio);
if (bit_pos == NR_DENTRY_IN_BLOCK &&
- !f2fs_truncate_hole(dir, page->index, page->index + 1)) {
- f2fs_clear_page_cache_dirty_tag(page);
- clear_page_dirty_for_io(page);
- ClearPageUptodate(page);
-
- clear_page_private_gcing(page);
+ !f2fs_truncate_hole(dir, index, index + 1)) {
+ f2fs_clear_page_cache_dirty_tag(folio);
+ folio_clear_dirty_for_io(folio);
+ folio_clear_uptodate(folio);
+ folio_detach_private(folio);
inode_dec_dirty_pages(dir);
f2fs_remove_dirty_inode(dir);
-
- detach_page_private(page);
- set_page_private(page, 0);
}
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
@@ -955,8 +928,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
bool f2fs_empty_dir(struct inode *dir)
{
- unsigned long bidx;
- struct page *dentry_page;
+ unsigned long bidx = 0;
unsigned int bit_pos;
struct f2fs_dentry_block *dentry_blk;
unsigned long nblock = dir_blocks(dir);
@@ -964,16 +936,21 @@ bool f2fs_empty_dir(struct inode *dir)
if (f2fs_has_inline_dentry(dir))
return f2fs_empty_inline_dir(dir);
- for (bidx = 0; bidx < nblock; bidx++) {
- dentry_page = f2fs_get_lock_data_page(dir, bidx, false);
- if (IS_ERR(dentry_page)) {
- if (PTR_ERR(dentry_page) == -ENOENT)
+ while (bidx < nblock) {
+ pgoff_t next_pgofs;
+ struct folio *dentry_folio;
+
+ dentry_folio = f2fs_find_data_folio(dir, bidx, &next_pgofs);
+ if (IS_ERR(dentry_folio)) {
+ if (PTR_ERR(dentry_folio) == -ENOENT) {
+ bidx = next_pgofs;
continue;
- else
+ } else {
return false;
+ }
}
- dentry_blk = page_address(dentry_page);
+ dentry_blk = folio_address(dentry_folio);
if (bidx == 0)
bit_pos = 2;
else
@@ -982,10 +959,12 @@ bool f2fs_empty_dir(struct inode *dir)
NR_DENTRY_IN_BLOCK,
bit_pos);
- f2fs_put_page(dentry_page, 1);
+ f2fs_folio_put(dentry_folio, false);
if (bit_pos < NR_DENTRY_IN_BLOCK)
return false;
+
+ bidx++;
}
return true;
}
@@ -999,7 +978,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
struct f2fs_sb_info *sbi = F2FS_I_SB(d->inode);
struct blk_plug plug;
- bool readdir_ra = sbi->readdir_ra == 1;
+ bool readdir_ra = sbi->readdir_ra;
bool found_valid_dirent = false;
int err = 0;
@@ -1016,9 +995,8 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
de = &d->dentry[bit_pos];
if (de->name_len == 0) {
if (found_valid_dirent || !bit_pos) {
- printk_ratelimited(
- "%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
- KERN_WARNING, sbi->sb->s_id,
+ f2fs_warn_ratelimited(sbi,
+ "invalid namelen(0), ino:%u, run fsck to fix.",
le32_to_cpu(de->ino));
set_sbi_flag(sbi, SBI_NEED_FSCK);
}
@@ -1027,7 +1005,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
continue;
}
- d_type = f2fs_get_de_type(de);
+ d_type = fs_ftype_to_dtype(de->file_type);
de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len);
@@ -1040,6 +1018,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
__func__, le16_to_cpu(de->name_len));
set_sbi_flag(sbi, SBI_NEED_FSCK);
err = -EFSCORRUPTED;
+ f2fs_handle_error(sbi, ERROR_CORRUPTED_DIRENT);
goto out;
}
@@ -1079,7 +1058,6 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
struct inode *inode = file_inode(file);
unsigned long npages = dir_blocks(inode);
struct f2fs_dentry_block *dentry_blk = NULL;
- struct page *dentry_page = NULL;
struct file_ra_state *ra = &file->f_ra;
loff_t start_pos = ctx->pos;
unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK);
@@ -1102,7 +1080,9 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
goto out_free;
}
- for (; n < npages; n++, ctx->pos = n * NR_DENTRY_IN_BLOCK) {
+ for (; n < npages; ctx->pos = n * NR_DENTRY_IN_BLOCK) {
+ struct folio *dentry_folio;
+ pgoff_t next_pgofs;
/* allow readdir() to be interrupted */
if (fatal_signal_pending(current)) {
@@ -1116,29 +1096,29 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
page_cache_sync_readahead(inode->i_mapping, ra, file, n,
min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
- dentry_page = f2fs_find_data_page(inode, n);
- if (IS_ERR(dentry_page)) {
- err = PTR_ERR(dentry_page);
+ dentry_folio = f2fs_find_data_folio(inode, n, &next_pgofs);
+ if (IS_ERR(dentry_folio)) {
+ err = PTR_ERR(dentry_folio);
if (err == -ENOENT) {
err = 0;
+ n = next_pgofs;
continue;
} else {
goto out_free;
}
}
- dentry_blk = page_address(dentry_page);
+ dentry_blk = folio_address(dentry_folio);
make_dentry_ptr_block(inode, &d, dentry_blk);
err = f2fs_fill_dentries(ctx, &d,
n * NR_DENTRY_IN_BLOCK, &fstr);
- if (err) {
- f2fs_put_page(dentry_page, 0);
+ f2fs_folio_put(dentry_folio, false);
+ if (err)
break;
- }
- f2fs_put_page(dentry_page, 0);
+ n++;
}
out_free:
fscrypt_fname_free_buffer(&fstr);
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 866e72b29bd5..0ed84cc065a7 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -6,6 +6,10 @@
* Copyright (c) 2015 Samsung Electronics
* Authors: Jaegeuk Kim <jaegeuk@kernel.org>
* Chao Yu <chao2.yu@samsung.com>
+ *
+ * block_age-based extent cache added by:
+ * Copyright (c) 2022 xiaomi Co., Ltd.
+ * http://www.xiaomi.com/
*/
#include <linux/fs.h>
@@ -15,118 +19,212 @@
#include "node.h"
#include <trace/events/f2fs.h>
-static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
- unsigned int ofs)
+bool sanity_check_extent_cache(struct inode *inode, struct folio *ifolio)
{
- if (cached_re) {
- if (cached_re->ofs <= ofs &&
- cached_re->ofs + cached_re->len > ofs) {
- return cached_re;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_extent *i_ext = &F2FS_INODE(ifolio)->i_ext;
+ struct extent_info ei;
+ int devi;
+
+ get_read_extent_info(&ei, i_ext);
+
+ if (!ei.len)
+ return true;
+
+ if (!f2fs_is_valid_blkaddr(sbi, ei.blk, DATA_GENERIC_ENHANCE) ||
+ !f2fs_is_valid_blkaddr(sbi, ei.blk + ei.len - 1,
+ DATA_GENERIC_ENHANCE)) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
+ __func__, inode->i_ino,
+ ei.blk, ei.fofs, ei.len);
+ return false;
+ }
+
+ if (!IS_DEVICE_ALIASING(inode))
+ return true;
+
+ for (devi = 0; devi < sbi->s_ndevs; devi++) {
+ if (FDEV(devi).start_blk != ei.blk ||
+ FDEV(devi).end_blk != ei.blk + ei.len - 1)
+ continue;
+
+ if (devi == 0) {
+ f2fs_warn(sbi,
+ "%s: inode (ino=%lx) is an alias of meta device",
+ __func__, inode->i_ino);
+ return false;
}
+
+ if (bdev_is_zoned(FDEV(devi).bdev)) {
+ f2fs_warn(sbi,
+ "%s: device alias inode (ino=%lx)'s extent info "
+ "[%u, %u, %u] maps to zoned block device",
+ __func__, inode->i_ino, ei.blk, ei.fofs, ei.len);
+ return false;
+ }
+ return true;
}
- return NULL;
+
+ f2fs_warn(sbi, "%s: device alias inode (ino=%lx)'s extent info "
+ "[%u, %u, %u] is inconsistent w/ any devices",
+ __func__, inode->i_ino, ei.blk, ei.fofs, ei.len);
+ return false;
}
-static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
- unsigned int ofs)
+static void __set_extent_info(struct extent_info *ei,
+ unsigned int fofs, unsigned int len,
+ block_t blk, bool keep_clen,
+ unsigned long age, unsigned long last_blocks,
+ enum extent_type type)
{
- struct rb_node *node = root->rb_root.rb_node;
- struct rb_entry *re;
+ ei->fofs = fofs;
+ ei->len = len;
- while (node) {
- re = rb_entry(node, struct rb_entry, rb_node);
-
- if (ofs < re->ofs)
- node = node->rb_left;
- else if (ofs >= re->ofs + re->len)
- node = node->rb_right;
- else
- return re;
+ if (type == EX_READ) {
+ ei->blk = blk;
+ if (keep_clen)
+ return;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ ei->c_len = 0;
+#endif
+ } else if (type == EX_BLOCK_AGE) {
+ ei->age = age;
+ ei->last_blocks = last_blocks;
}
- return NULL;
}
-struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
- struct rb_entry *cached_re, unsigned int ofs)
+static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
{
- struct rb_entry *re;
+ if (type == EX_READ)
+ return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
+ S_ISREG(inode->i_mode);
+ if (type == EX_BLOCK_AGE)
+ return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
+ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
+ return false;
+}
+
+static bool __may_extent_tree(struct inode *inode, enum extent_type type)
+{
+ if (IS_DEVICE_ALIASING(inode) && type == EX_READ)
+ return true;
- re = __lookup_rb_tree_fast(cached_re, ofs);
- if (!re)
- return __lookup_rb_tree_slow(root, ofs);
+ /*
+ * for recovered files during mount do not create extents
+ * if shrinker is not registered.
+ */
+ if (list_empty(&F2FS_I_SB(inode)->s_list))
+ return false;
+
+ if (!__init_may_extent_tree(inode, type))
+ return false;
- return re;
+ if (type == EX_READ) {
+ if (is_inode_flag_set(inode, FI_NO_EXTENT))
+ return false;
+ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+ !f2fs_sb_has_readonly(F2FS_I_SB(inode)))
+ return false;
+ } else if (type == EX_BLOCK_AGE) {
+ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
+ return false;
+ if (file_is_cold(inode))
+ return false;
+ }
+ return true;
}
-struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
- struct rb_root_cached *root,
- struct rb_node **parent,
- unsigned long long key, bool *leftmost)
+static void __try_update_largest_extent(struct extent_tree *et,
+ struct extent_node *en)
{
- struct rb_node **p = &root->rb_root.rb_node;
- struct rb_entry *re;
+ if (et->type != EX_READ)
+ return;
+ if (en->ei.len <= et->largest.len)
+ return;
- while (*p) {
- *parent = *p;
- re = rb_entry(*parent, struct rb_entry, rb_node);
+ et->largest = en->ei;
+ et->largest_updated = true;
+}
- if (key < re->key) {
- p = &(*p)->rb_left;
- } else {
- p = &(*p)->rb_right;
- *leftmost = false;
- }
+static bool __is_extent_mergeable(struct extent_info *back,
+ struct extent_info *front, enum extent_type type)
+{
+ if (type == EX_READ) {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (back->c_len && back->len != back->c_len)
+ return false;
+ if (front->c_len && front->len != front->c_len)
+ return false;
+#endif
+ return (back->fofs + back->len == front->fofs &&
+ back->blk + back->len == front->blk);
+ } else if (type == EX_BLOCK_AGE) {
+ return (back->fofs + back->len == front->fofs &&
+ abs(back->age - front->age) <= SAME_AGE_REGION &&
+ abs(back->last_blocks - front->last_blocks) <=
+ SAME_AGE_REGION);
}
+ return false;
+}
- return p;
+static bool __is_back_mergeable(struct extent_info *cur,
+ struct extent_info *back, enum extent_type type)
+{
+ return __is_extent_mergeable(back, cur, type);
}
-struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
- struct rb_root_cached *root,
- struct rb_node **parent,
- unsigned int ofs, bool *leftmost)
+static bool __is_front_mergeable(struct extent_info *cur,
+ struct extent_info *front, enum extent_type type)
{
- struct rb_node **p = &root->rb_root.rb_node;
- struct rb_entry *re;
+ return __is_extent_mergeable(cur, front, type);
+}
- while (*p) {
- *parent = *p;
- re = rb_entry(*parent, struct rb_entry, rb_node);
+static struct extent_node *__lookup_extent_node(struct rb_root_cached *root,
+ struct extent_node *cached_en, unsigned int fofs)
+{
+ struct rb_node *node = root->rb_root.rb_node;
+ struct extent_node *en;
- if (ofs < re->ofs) {
- p = &(*p)->rb_left;
- } else if (ofs >= re->ofs + re->len) {
- p = &(*p)->rb_right;
- *leftmost = false;
- } else {
- f2fs_bug_on(sbi, 1);
- }
- }
+ /* check a cached entry */
+ if (cached_en && cached_en->ei.fofs <= fofs &&
+ cached_en->ei.fofs + cached_en->ei.len > fofs)
+ return cached_en;
- return p;
+ /* check rb_tree */
+ while (node) {
+ en = rb_entry(node, struct extent_node, rb_node);
+
+ if (fofs < en->ei.fofs)
+ node = node->rb_left;
+ else if (fofs >= en->ei.fofs + en->ei.len)
+ node = node->rb_right;
+ else
+ return en;
+ }
+ return NULL;
}
/*
- * lookup rb entry in position of @ofs in rb-tree,
+ * lookup rb entry in position of @fofs in rb-tree,
* if hit, return the entry, otherwise, return NULL
- * @prev_ex: extent before ofs
- * @next_ex: extent after ofs
- * @insert_p: insert point for new extent at ofs
- * in order to simpfy the insertion after.
+ * @prev_ex: extent before fofs
+ * @next_ex: extent after fofs
+ * @insert_p: insert point for new extent at fofs
+ * in order to simplify the insertion after.
* tree must stay unchanged between lookup and insertion.
*/
-struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
- struct rb_entry *cached_re,
- unsigned int ofs,
- struct rb_entry **prev_entry,
- struct rb_entry **next_entry,
+static struct extent_node *__lookup_extent_node_ret(struct rb_root_cached *root,
+ struct extent_node *cached_en,
+ unsigned int fofs,
+ struct extent_node **prev_entry,
+ struct extent_node **next_entry,
struct rb_node ***insert_p,
struct rb_node **insert_parent,
- bool force, bool *leftmost)
+ bool *leftmost)
{
struct rb_node **pnode = &root->rb_root.rb_node;
struct rb_node *parent = NULL, *tmp_node;
- struct rb_entry *re = cached_re;
+ struct extent_node *en = cached_en;
*insert_p = NULL;
*insert_parent = NULL;
@@ -136,24 +234,20 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
if (RB_EMPTY_ROOT(&root->rb_root))
return NULL;
- if (re) {
- if (re->ofs <= ofs && re->ofs + re->len > ofs)
- goto lookup_neighbors;
- }
+ if (en && en->ei.fofs <= fofs && en->ei.fofs + en->ei.len > fofs)
+ goto lookup_neighbors;
- if (leftmost)
- *leftmost = true;
+ *leftmost = true;
while (*pnode) {
parent = *pnode;
- re = rb_entry(*pnode, struct rb_entry, rb_node);
+ en = rb_entry(*pnode, struct extent_node, rb_node);
- if (ofs < re->ofs) {
+ if (fofs < en->ei.fofs) {
pnode = &(*pnode)->rb_left;
- } else if (ofs >= re->ofs + re->len) {
+ } else if (fofs >= en->ei.fofs + en->ei.len) {
pnode = &(*pnode)->rb_right;
- if (leftmost)
- *leftmost = false;
+ *leftmost = false;
} else {
goto lookup_neighbors;
}
@@ -162,71 +256,32 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
*insert_p = pnode;
*insert_parent = parent;
- re = rb_entry(parent, struct rb_entry, rb_node);
+ en = rb_entry(parent, struct extent_node, rb_node);
tmp_node = parent;
- if (parent && ofs > re->ofs)
+ if (parent && fofs > en->ei.fofs)
tmp_node = rb_next(parent);
- *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+ *next_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node);
tmp_node = parent;
- if (parent && ofs < re->ofs)
+ if (parent && fofs < en->ei.fofs)
tmp_node = rb_prev(parent);
- *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+ *prev_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node);
return NULL;
lookup_neighbors:
- if (ofs == re->ofs || force) {
+ if (fofs == en->ei.fofs) {
/* lookup prev node for merging backward later */
- tmp_node = rb_prev(&re->rb_node);
- *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+ tmp_node = rb_prev(&en->rb_node);
+ *prev_entry = rb_entry_safe(tmp_node,
+ struct extent_node, rb_node);
}
- if (ofs == re->ofs + re->len - 1 || force) {
+ if (fofs == en->ei.fofs + en->ei.len - 1) {
/* lookup next node for merging frontward later */
- tmp_node = rb_next(&re->rb_node);
- *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+ tmp_node = rb_next(&en->rb_node);
+ *next_entry = rb_entry_safe(tmp_node,
+ struct extent_node, rb_node);
}
- return re;
-}
-
-bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
- struct rb_root_cached *root, bool check_key)
-{
-#ifdef CONFIG_F2FS_CHECK_FS
- struct rb_node *cur = rb_first_cached(root), *next;
- struct rb_entry *cur_re, *next_re;
-
- if (!cur)
- return true;
-
- while (cur) {
- next = rb_next(cur);
- if (!next)
- return true;
-
- cur_re = rb_entry(cur, struct rb_entry, rb_node);
- next_re = rb_entry(next, struct rb_entry, rb_node);
-
- if (check_key) {
- if (cur_re->key > next_re->key) {
- f2fs_info(sbi, "inconsistent rbtree, "
- "cur(%llu) next(%llu)",
- cur_re->key, next_re->key);
- return false;
- }
- goto next;
- }
-
- if (cur_re->ofs + cur_re->len > next_re->ofs) {
- f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
- cur_re->ofs, cur_re->len,
- next_re->ofs, next_re->len);
- return false;
- }
-next:
- cur = next;
- }
-#endif
- return true;
+ return en;
}
static struct kmem_cache *extent_tree_slab;
@@ -237,6 +292,7 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
struct rb_node *parent, struct rb_node **p,
bool leftmost)
{
+ struct extent_tree_info *eti = &sbi->extent_tree[et->type];
struct extent_node *en;
en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
@@ -250,16 +306,18 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
rb_link_node(&en->rb_node, parent, p);
rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
atomic_inc(&et->node_cnt);
- atomic_inc(&sbi->total_ext_node);
+ atomic_inc(&eti->total_ext_node);
return en;
}
static void __detach_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_node *en)
{
+ struct extent_tree_info *eti = &sbi->extent_tree[et->type];
+
rb_erase_cached(&en->rb_node, &et->root);
atomic_dec(&et->node_cnt);
- atomic_dec(&sbi->total_ext_node);
+ atomic_dec(&eti->total_ext_node);
if (et->cached_en == en)
et->cached_en = NULL;
@@ -275,183 +333,205 @@ static void __detach_extent_node(struct f2fs_sb_info *sbi,
static void __release_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_node *en)
{
- spin_lock(&sbi->extent_lock);
+ struct extent_tree_info *eti = &sbi->extent_tree[et->type];
+
+ spin_lock(&eti->extent_lock);
f2fs_bug_on(sbi, list_empty(&en->list));
list_del_init(&en->list);
- spin_unlock(&sbi->extent_lock);
+ spin_unlock(&eti->extent_lock);
__detach_extent_node(sbi, et, en);
}
-static struct extent_tree *__grab_extent_tree(struct inode *inode)
+static struct extent_tree *__grab_extent_tree(struct inode *inode,
+ enum extent_type type)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_tree_info *eti = &sbi->extent_tree[type];
struct extent_tree *et;
nid_t ino = inode->i_ino;
- mutex_lock(&sbi->extent_tree_lock);
- et = radix_tree_lookup(&sbi->extent_tree_root, ino);
+ mutex_lock(&eti->extent_tree_lock);
+ et = radix_tree_lookup(&eti->extent_tree_root, ino);
if (!et) {
et = f2fs_kmem_cache_alloc(extent_tree_slab,
GFP_NOFS, true, NULL);
- f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
+ f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et);
memset(et, 0, sizeof(struct extent_tree));
et->ino = ino;
+ et->type = type;
et->root = RB_ROOT_CACHED;
et->cached_en = NULL;
rwlock_init(&et->lock);
INIT_LIST_HEAD(&et->list);
atomic_set(&et->node_cnt, 0);
- atomic_inc(&sbi->total_ext_tree);
+ atomic_inc(&eti->total_ext_tree);
} else {
- atomic_dec(&sbi->total_zombie_tree);
+ atomic_dec(&eti->total_zombie_tree);
list_del_init(&et->list);
}
- mutex_unlock(&sbi->extent_tree_lock);
+ mutex_unlock(&eti->extent_tree_lock);
/* never died until evict_inode */
- F2FS_I(inode)->extent_tree = et;
+ F2FS_I(inode)->extent_tree[type] = et;
return et;
}
-static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
- struct extent_tree *et, struct extent_info *ei)
-{
- struct rb_node **p = &et->root.rb_root.rb_node;
- struct extent_node *en;
-
- en = __attach_extent_node(sbi, et, ei, NULL, p, true);
- if (!en)
- return NULL;
-
- et->largest = en->ei;
- et->cached_en = en;
- return en;
-}
-
static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
- struct extent_tree *et)
+ struct extent_tree *et, unsigned int nr_shrink)
{
struct rb_node *node, *next;
struct extent_node *en;
- unsigned int count = atomic_read(&et->node_cnt);
+ unsigned int count;
node = rb_first_cached(&et->root);
- while (node) {
+
+ for (count = 0; node && count < nr_shrink; count++) {
next = rb_next(node);
en = rb_entry(node, struct extent_node, rb_node);
__release_extent_node(sbi, et, en);
node = next;
}
- return count - atomic_read(&et->node_cnt);
+ return count;
}
static void __drop_largest_extent(struct extent_tree *et,
pgoff_t fofs, unsigned int len)
{
- if (fofs < et->largest.fofs + et->largest.len &&
+ if (fofs < (pgoff_t)et->largest.fofs + et->largest.len &&
fofs + len > et->largest.fofs) {
et->largest.len = 0;
et->largest_updated = true;
}
}
-/* return true, if inode page is changed */
-static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
+void f2fs_init_read_extent_tree(struct inode *inode, struct folio *ifolio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL;
+ struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
+ struct f2fs_extent *i_ext = &F2FS_INODE(ifolio)->i_ext;
struct extent_tree *et;
struct extent_node *en;
- struct extent_info ei;
+ struct extent_info ei = {0};
- if (!f2fs_may_extent_tree(inode)) {
- /* drop largest extent */
- if (i_ext && i_ext->len) {
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ if (!__may_extent_tree(inode, EX_READ)) {
+ /* drop largest read extent */
+ if (i_ext->len) {
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
i_ext->len = 0;
- set_page_dirty(ipage);
- return;
+ folio_mark_dirty(ifolio);
}
+ set_inode_flag(inode, FI_NO_EXTENT);
return;
}
- et = __grab_extent_tree(inode);
-
- if (!i_ext || !i_ext->len)
- return;
+ et = __grab_extent_tree(inode, EX_READ);
- get_extent_info(&ei, i_ext);
+ get_read_extent_info(&ei, i_ext);
write_lock(&et->lock);
- if (atomic_read(&et->node_cnt))
- goto out;
+ if (atomic_read(&et->node_cnt) || !ei.len)
+ goto skip;
- en = __init_extent_tree(sbi, et, &ei);
+ if (IS_DEVICE_ALIASING(inode)) {
+ et->largest = ei;
+ goto skip;
+ }
+
+ en = __attach_extent_node(sbi, et, &ei, NULL,
+ &et->root.rb_root.rb_node, true);
if (en) {
- spin_lock(&sbi->extent_lock);
- list_add_tail(&en->list, &sbi->extent_list);
- spin_unlock(&sbi->extent_lock);
+ et->largest = en->ei;
+ et->cached_en = en;
+
+ spin_lock(&eti->extent_lock);
+ list_add_tail(&en->list, &eti->extent_list);
+ spin_unlock(&eti->extent_lock);
+ }
+skip:
+ /* Let's drop, if checkpoint got corrupted. */
+ if (f2fs_cp_error(sbi)) {
+ et->largest.len = 0;
+ et->largest_updated = true;
}
-out:
write_unlock(&et->lock);
}
-void f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
+void f2fs_init_age_extent_tree(struct inode *inode)
{
- __f2fs_init_extent_tree(inode, ipage);
+ if (!__init_may_extent_tree(inode, EX_BLOCK_AGE))
+ return;
+ __grab_extent_tree(inode, EX_BLOCK_AGE);
+}
- if (!F2FS_I(inode)->extent_tree)
- set_inode_flag(inode, FI_NO_EXTENT);
+void f2fs_init_extent_tree(struct inode *inode)
+{
+ /* initialize read cache */
+ if (__init_may_extent_tree(inode, EX_READ))
+ __grab_extent_tree(inode, EX_READ);
+
+ /* initialize block age cache */
+ if (__init_may_extent_tree(inode, EX_BLOCK_AGE))
+ __grab_extent_tree(inode, EX_BLOCK_AGE);
}
-static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
- struct extent_info *ei)
+static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+ struct extent_info *ei, enum extent_type type)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct extent_tree *et = F2FS_I(inode)->extent_tree;
+ struct extent_tree_info *eti = &sbi->extent_tree[type];
+ struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
struct extent_node *en;
bool ret = false;
- f2fs_bug_on(sbi, !et);
+ if (!et)
+ return false;
- trace_f2fs_lookup_extent_tree_start(inode, pgofs);
+ trace_f2fs_lookup_extent_tree_start(inode, pgofs, type);
read_lock(&et->lock);
- if (et->largest.fofs <= pgofs &&
- et->largest.fofs + et->largest.len > pgofs) {
+ if (type == EX_READ &&
+ et->largest.fofs <= pgofs &&
+ (pgoff_t)et->largest.fofs + et->largest.len > pgofs) {
*ei = et->largest;
ret = true;
stat_inc_largest_node_hit(sbi);
goto out;
}
- en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
- (struct rb_entry *)et->cached_en, pgofs);
+ if (IS_DEVICE_ALIASING(inode)) {
+ ret = false;
+ goto out;
+ }
+
+ en = __lookup_extent_node(&et->root, et->cached_en, pgofs);
if (!en)
goto out;
if (en == et->cached_en)
- stat_inc_cached_node_hit(sbi);
+ stat_inc_cached_node_hit(sbi, type);
else
- stat_inc_rbtree_node_hit(sbi);
+ stat_inc_rbtree_node_hit(sbi, type);
*ei = en->ei;
- spin_lock(&sbi->extent_lock);
+ spin_lock(&eti->extent_lock);
if (!list_empty(&en->list)) {
- list_move_tail(&en->list, &sbi->extent_list);
+ list_move_tail(&en->list, &eti->extent_list);
et->cached_en = en;
}
- spin_unlock(&sbi->extent_lock);
+ spin_unlock(&eti->extent_lock);
ret = true;
out:
- stat_inc_total_hit(sbi);
+ stat_inc_total_hit(sbi, type);
read_unlock(&et->lock);
- trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
+ if (type == EX_READ)
+ trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei);
+ else if (type == EX_BLOCK_AGE)
+ trace_f2fs_lookup_age_extent_tree_end(inode, pgofs, ei);
return ret;
}
@@ -460,18 +540,20 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
struct extent_node *prev_ex,
struct extent_node *next_ex)
{
+ struct extent_tree_info *eti = &sbi->extent_tree[et->type];
struct extent_node *en = NULL;
- if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
+ if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) {
prev_ex->ei.len += ei->len;
ei = &prev_ex->ei;
en = prev_ex;
}
- if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
+ if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) {
next_ex->ei.fofs = ei->fofs;
- next_ex->ei.blk = ei->blk;
next_ex->ei.len += ei->len;
+ if (et->type == EX_READ)
+ next_ex->ei.blk = ei->blk;
if (en)
__release_extent_node(sbi, et, prev_ex);
@@ -483,12 +565,12 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
__try_update_largest_extent(et, en);
- spin_lock(&sbi->extent_lock);
+ spin_lock(&eti->extent_lock);
if (!list_empty(&en->list)) {
- list_move_tail(&en->list, &sbi->extent_list);
+ list_move_tail(&en->list, &eti->extent_list);
et->cached_en = en;
}
- spin_unlock(&sbi->extent_lock);
+ spin_unlock(&eti->extent_lock);
return en;
}
@@ -498,7 +580,8 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
struct rb_node *insert_parent,
bool leftmost)
{
- struct rb_node **p;
+ struct extent_tree_info *eti = &sbi->extent_tree[et->type];
+ struct rb_node **p = &et->root.rb_root.rb_node;
struct rb_node *parent = NULL;
struct extent_node *en = NULL;
@@ -510,8 +593,27 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
leftmost = true;
- p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
- ei->fofs, &leftmost);
+ /* look up extent_node in the rb tree */
+ while (*p) {
+ parent = *p;
+ en = rb_entry(parent, struct extent_node, rb_node);
+
+ if (ei->fofs < en->ei.fofs) {
+ p = &(*p)->rb_left;
+ } else if (ei->fofs >= en->ei.fofs + en->ei.len) {
+ p = &(*p)->rb_right;
+ leftmost = false;
+ } else {
+ f2fs_err_ratelimited(sbi, "%s: corrupted extent, type: %d, "
+ "extent node in rb tree [%u, %u, %u], age [%llu, %llu], "
+ "extent node to insert [%u, %u, %u], age [%llu, %llu]",
+ __func__, et->type, en->ei.fofs, en->ei.blk, en->ei.len, en->ei.age,
+ en->ei.last_blocks, ei->fofs, ei->blk, ei->len, ei->age, ei->last_blocks);
+ f2fs_bug_on(sbi, 1);
+ return NULL;
+ }
+ }
+
do_insert:
en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
if (!en)
@@ -520,59 +622,98 @@ do_insert:
__try_update_largest_extent(et, en);
/* update in global extent list */
- spin_lock(&sbi->extent_lock);
- list_add_tail(&en->list, &sbi->extent_list);
+ spin_lock(&eti->extent_lock);
+ list_add_tail(&en->list, &eti->extent_list);
et->cached_en = en;
- spin_unlock(&sbi->extent_lock);
+ spin_unlock(&eti->extent_lock);
return en;
}
-static void f2fs_update_extent_tree_range(struct inode *inode,
- pgoff_t fofs, block_t blkaddr, unsigned int len)
+static unsigned int __destroy_extent_node(struct inode *inode,
+ enum extent_type type)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ unsigned int nr_shrink = type == EX_READ ?
+ READ_EXTENT_CACHE_SHRINK_NUMBER :
+ AGE_EXTENT_CACHE_SHRINK_NUMBER;
+ unsigned int node_cnt = 0;
+
+ if (!et || !atomic_read(&et->node_cnt))
+ return 0;
+
+ while (atomic_read(&et->node_cnt)) {
+ write_lock(&et->lock);
+ node_cnt += __free_extent_tree(sbi, et, nr_shrink);
+ write_unlock(&et->lock);
+ }
+
+ f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+
+ return node_cnt;
+}
+
+static void __update_extent_tree_range(struct inode *inode,
+ struct extent_info *tei, enum extent_type type)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct extent_tree *et = F2FS_I(inode)->extent_tree;
+ struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
struct extent_node *en = NULL, *en1 = NULL;
struct extent_node *prev_en = NULL, *next_en = NULL;
struct extent_info ei, dei, prev;
struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ unsigned int fofs = tei->fofs, len = tei->len;
unsigned int end = fofs + len;
- unsigned int pos = (unsigned int)fofs;
bool updated = false;
bool leftmost = false;
if (!et)
return;
- trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
+ if (unlikely(len == 0)) {
+ f2fs_err_ratelimited(sbi, "%s: extent len is zero, type: %d, "
+ "extent [%u, %u, %u], age [%llu, %llu]",
+ __func__, type, tei->fofs, tei->blk, tei->len,
+ tei->age, tei->last_blocks);
+ f2fs_bug_on(sbi, 1);
+ return;
+ }
+
+ if (type == EX_READ)
+ trace_f2fs_update_read_extent_tree_range(inode, fofs, len,
+ tei->blk, 0);
+ else if (type == EX_BLOCK_AGE)
+ trace_f2fs_update_age_extent_tree_range(inode, fofs, len,
+ tei->age, tei->last_blocks);
write_lock(&et->lock);
- if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
- write_unlock(&et->lock);
- return;
- }
+ if (type == EX_READ) {
+ if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
+ write_unlock(&et->lock);
+ return;
+ }
- prev = et->largest;
- dei.len = 0;
+ prev = et->largest;
+ dei.len = 0;
- /*
- * drop largest extent before lookup, in case it's already
- * been shrunk from extent tree
- */
- __drop_largest_extent(et, fofs, len);
+ /*
+ * drop largest extent before lookup, in case it's already
+ * been shrunk from extent tree
+ */
+ __drop_largest_extent(et, fofs, len);
+ }
/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
- en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
- (struct rb_entry *)et->cached_en, fofs,
- (struct rb_entry **)&prev_en,
- (struct rb_entry **)&next_en,
- &insert_p, &insert_parent, false,
+ en = __lookup_extent_node_ret(&et->root,
+ et->cached_en, fofs,
+ &prev_en, &next_en,
+ &insert_p, &insert_parent,
&leftmost);
if (!en)
en = next_en;
- /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
+ /* 2. invalidate all extent nodes in range [fofs, fofs + len - 1] */
while (en && en->ei.fofs < end) {
unsigned int org_end;
int parts = 0; /* # of parts current extent split into */
@@ -581,26 +722,34 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
dei = en->ei;
org_end = dei.fofs + dei.len;
- f2fs_bug_on(sbi, pos >= org_end);
+ f2fs_bug_on(sbi, fofs >= org_end);
- if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
- en->ei.len = pos - en->ei.fofs;
+ if (fofs > dei.fofs && (type != EX_READ ||
+ fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) {
+ en->ei.len = fofs - en->ei.fofs;
prev_en = en;
parts = 1;
}
- if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
+ if (end < org_end && (type != EX_READ ||
+ (org_end - end >= F2FS_MIN_EXTENT_LEN &&
+ atomic_read(&et->node_cnt) <
+ sbi->max_read_extent_count))) {
if (parts) {
- set_extent_info(&ei, end,
- end - dei.fofs + dei.blk,
- org_end - end);
+ __set_extent_info(&ei,
+ end, org_end - end,
+ end - dei.fofs + dei.blk, false,
+ dei.age, dei.last_blocks,
+ type);
en1 = __insert_extent_tree(sbi, et, &ei,
NULL, NULL, true);
next_en = en1;
} else {
- en->ei.fofs = end;
- en->ei.blk += end - dei.fofs;
- en->ei.len -= end - dei.fofs;
+ __set_extent_info(&en->ei,
+ end, en->ei.len - (end - dei.fofs),
+ en->ei.blk + (end - dei.fofs), true,
+ dei.age, dei.last_blocks,
+ type);
next_en = en;
}
parts++;
@@ -630,10 +779,15 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
en = next_en;
}
- /* 3. update extent in extent cache */
- if (blkaddr) {
+ if (type == EX_BLOCK_AGE)
+ goto update_age_extent_cache;
+
+ /* 3. update extent in read extent cache */
+ BUG_ON(type != EX_READ);
- set_extent_info(&ei, fofs, blkaddr, len);
+ if (tei->blk) {
+ __set_extent_info(&ei, fofs, len, tei->blk, false,
+ 0, 0, EX_READ);
if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
__insert_extent_tree(sbi, et, &ei,
insert_p, insert_parent, leftmost);
@@ -648,34 +802,45 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
}
}
- if (is_inode_flag_set(inode, FI_NO_EXTENT))
- __free_extent_tree(sbi, et);
-
if (et->largest_updated) {
et->largest_updated = false;
updated = true;
}
+ goto out_read_extent_cache;
+update_age_extent_cache:
+ if (tei->last_blocks == F2FS_EXTENT_AGE_INVALID)
+ goto out_read_extent_cache;
+ __set_extent_info(&ei, fofs, len, 0, false,
+ tei->age, tei->last_blocks, EX_BLOCK_AGE);
+ if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+ __insert_extent_tree(sbi, et, &ei,
+ insert_p, insert_parent, leftmost);
+out_read_extent_cache:
write_unlock(&et->lock);
+ if (is_inode_flag_set(inode, FI_NO_EXTENT))
+ __destroy_extent_node(inode, EX_READ);
+
if (updated)
f2fs_mark_inode_dirty_sync(inode, true);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
-void f2fs_update_extent_tree_range_compressed(struct inode *inode,
+void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
pgoff_t fofs, block_t blkaddr, unsigned int llen,
unsigned int c_len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct extent_tree *et = F2FS_I(inode)->extent_tree;
+ struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
struct extent_node *en = NULL;
struct extent_node *prev_en = NULL, *next_en = NULL;
struct extent_info ei;
struct rb_node **insert_p = NULL, *insert_parent = NULL;
bool leftmost = false;
- trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
+ trace_f2fs_update_read_extent_tree_range(inode, fofs, llen,
+ blkaddr, c_len);
/* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
if (is_inode_flag_set(inode, FI_NO_EXTENT))
@@ -683,16 +848,15 @@ void f2fs_update_extent_tree_range_compressed(struct inode *inode,
write_lock(&et->lock);
- en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
- (struct rb_entry *)et->cached_en, fofs,
- (struct rb_entry **)&prev_en,
- (struct rb_entry **)&next_en,
- &insert_p, &insert_parent, false,
- &leftmost);
+ en = __lookup_extent_node_ret(&et->root,
+ et->cached_en, fofs,
+ &prev_en, &next_en,
+ &insert_p, &insert_parent,
+ &leftmost);
if (en)
goto unlock_out;
- set_extent_info(&ei, fofs, blkaddr, llen);
+ __set_extent_info(&ei, fofs, llen, blkaddr, true, 0, 0, EX_READ);
ei.c_len = c_len;
if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
@@ -703,124 +867,311 @@ unlock_out:
}
#endif
-unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
+static unsigned long long __calculate_block_age(struct f2fs_sb_info *sbi,
+ unsigned long long new,
+ unsigned long long old)
{
+ unsigned int rem_old, rem_new;
+ unsigned long long res;
+ unsigned int weight = sbi->last_age_weight;
+
+ res = div_u64_rem(new, 100, &rem_new) * (100 - weight)
+ + div_u64_rem(old, 100, &rem_old) * weight;
+
+ if (rem_new)
+ res += rem_new * (100 - weight) / 100;
+ if (rem_old)
+ res += rem_old * weight / 100;
+
+ return res;
+}
+
+/* This returns a new age and allocated blocks in ei */
+static int __get_new_block_age(struct inode *inode, struct extent_info *ei,
+ block_t blkaddr)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ loff_t f_size = i_size_read(inode);
+ unsigned long long cur_blocks =
+ atomic64_read(&sbi->allocated_data_blocks);
+ struct extent_info tei = *ei; /* only fofs and len are valid */
+
+ /*
+ * When I/O is not aligned to a PAGE_SIZE, update will happen to the last
+ * file block even in seq write. So don't record age for newly last file
+ * block here.
+ */
+ if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) &&
+ blkaddr == NEW_ADDR)
+ return -EINVAL;
+
+ if (__lookup_extent_tree(inode, ei->fofs, &tei, EX_BLOCK_AGE)) {
+ unsigned long long cur_age;
+
+ if (cur_blocks >= tei.last_blocks)
+ cur_age = cur_blocks - tei.last_blocks;
+ else
+ /* allocated_data_blocks overflow */
+ cur_age = (ULLONG_MAX - 1) - tei.last_blocks + cur_blocks;
+
+ if (tei.age)
+ ei->age = __calculate_block_age(sbi, cur_age, tei.age);
+ else
+ ei->age = cur_age;
+ ei->last_blocks = cur_blocks;
+ WARN_ON(ei->age > cur_blocks);
+ return 0;
+ }
+
+ f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
+
+ /* the data block was allocated for the first time */
+ if (blkaddr == NEW_ADDR)
+ goto out;
+
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
+ return -EINVAL;
+out:
+ /*
+ * init block age with zero, this can happen when the block age extent
+ * was reclaimed due to memory constraint or system reboot
+ */
+ ei->age = 0;
+ ei->last_blocks = cur_blocks;
+ return 0;
+}
+
+static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type)
+{
+ struct extent_info ei = {};
+
+ if (!__may_extent_tree(dn->inode, type))
+ return;
+
+ ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_folio), dn->inode) +
+ dn->ofs_in_node;
+ ei.len = 1;
+
+ if (type == EX_READ) {
+ if (dn->data_blkaddr == NEW_ADDR)
+ ei.blk = NULL_ADDR;
+ else
+ ei.blk = dn->data_blkaddr;
+ } else if (type == EX_BLOCK_AGE) {
+ if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr))
+ return;
+ }
+ __update_extent_tree_range(dn->inode, &ei, type);
+}
+
+static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink,
+ enum extent_type type)
+{
+ struct extent_tree_info *eti = &sbi->extent_tree[type];
struct extent_tree *et, *next;
struct extent_node *en;
unsigned int node_cnt = 0, tree_cnt = 0;
int remained;
- if (!test_opt(sbi, EXTENT_CACHE))
- return 0;
-
- if (!atomic_read(&sbi->total_zombie_tree))
+ if (!atomic_read(&eti->total_zombie_tree))
goto free_node;
- if (!mutex_trylock(&sbi->extent_tree_lock))
+ if (!mutex_trylock(&eti->extent_tree_lock))
goto out;
/* 1. remove unreferenced extent tree */
- list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
+ list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
if (atomic_read(&et->node_cnt)) {
write_lock(&et->lock);
- node_cnt += __free_extent_tree(sbi, et);
+ node_cnt += __free_extent_tree(sbi, et,
+ nr_shrink - node_cnt - tree_cnt);
write_unlock(&et->lock);
}
- f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+
+ if (atomic_read(&et->node_cnt))
+ goto unlock_out;
+
list_del_init(&et->list);
- radix_tree_delete(&sbi->extent_tree_root, et->ino);
+ radix_tree_delete(&eti->extent_tree_root, et->ino);
kmem_cache_free(extent_tree_slab, et);
- atomic_dec(&sbi->total_ext_tree);
- atomic_dec(&sbi->total_zombie_tree);
+ atomic_dec(&eti->total_ext_tree);
+ atomic_dec(&eti->total_zombie_tree);
tree_cnt++;
if (node_cnt + tree_cnt >= nr_shrink)
goto unlock_out;
cond_resched();
}
- mutex_unlock(&sbi->extent_tree_lock);
+ mutex_unlock(&eti->extent_tree_lock);
free_node:
/* 2. remove LRU extent entries */
- if (!mutex_trylock(&sbi->extent_tree_lock))
+ if (!mutex_trylock(&eti->extent_tree_lock))
goto out;
remained = nr_shrink - (node_cnt + tree_cnt);
- spin_lock(&sbi->extent_lock);
+ spin_lock(&eti->extent_lock);
for (; remained > 0; remained--) {
- if (list_empty(&sbi->extent_list))
+ if (list_empty(&eti->extent_list))
break;
- en = list_first_entry(&sbi->extent_list,
+ en = list_first_entry(&eti->extent_list,
struct extent_node, list);
et = en->et;
if (!write_trylock(&et->lock)) {
/* refresh this extent node's position in extent list */
- list_move_tail(&en->list, &sbi->extent_list);
+ list_move_tail(&en->list, &eti->extent_list);
continue;
}
list_del_init(&en->list);
- spin_unlock(&sbi->extent_lock);
+ spin_unlock(&eti->extent_lock);
__detach_extent_node(sbi, et, en);
write_unlock(&et->lock);
node_cnt++;
- spin_lock(&sbi->extent_lock);
+ spin_lock(&eti->extent_lock);
}
- spin_unlock(&sbi->extent_lock);
+ spin_unlock(&eti->extent_lock);
unlock_out:
- mutex_unlock(&sbi->extent_tree_lock);
+ mutex_unlock(&eti->extent_tree_lock);
out:
- trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
+ trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type);
return node_cnt + tree_cnt;
}
-unsigned int f2fs_destroy_extent_node(struct inode *inode)
+/* read extent cache operations */
+bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
+ struct extent_info *ei)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct extent_tree *et = F2FS_I(inode)->extent_tree;
- unsigned int node_cnt = 0;
+ if (!__may_extent_tree(inode, EX_READ))
+ return false;
- if (!et || !atomic_read(&et->node_cnt))
+ return __lookup_extent_tree(inode, pgofs, ei, EX_READ);
+}
+
+bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
+ block_t *blkaddr)
+{
+ struct extent_info ei = {};
+
+ if (!f2fs_lookup_read_extent_cache(inode, index, &ei))
+ return false;
+ *blkaddr = ei.blk + index - ei.fofs;
+ return true;
+}
+
+void f2fs_update_read_extent_cache(struct dnode_of_data *dn)
+{
+ return __update_extent_cache(dn, EX_READ);
+}
+
+void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
+ pgoff_t fofs, block_t blkaddr, unsigned int len)
+{
+ struct extent_info ei = {
+ .fofs = fofs,
+ .len = len,
+ .blk = blkaddr,
+ };
+
+ if (!__may_extent_tree(dn->inode, EX_READ))
+ return;
+
+ __update_extent_tree_range(dn->inode, &ei, EX_READ);
+}
+
+unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+ if (!test_opt(sbi, READ_EXTENT_CACHE))
return 0;
- write_lock(&et->lock);
- node_cnt = __free_extent_tree(sbi, et);
- write_unlock(&et->lock);
+ return __shrink_extent_tree(sbi, nr_shrink, EX_READ);
+}
- return node_cnt;
+/* block age extent cache operations */
+bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
+ struct extent_info *ei)
+{
+ if (!__may_extent_tree(inode, EX_BLOCK_AGE))
+ return false;
+
+ return __lookup_extent_tree(inode, pgofs, ei, EX_BLOCK_AGE);
}
-void f2fs_drop_extent_tree(struct inode *inode)
+void f2fs_update_age_extent_cache(struct dnode_of_data *dn)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct extent_tree *et = F2FS_I(inode)->extent_tree;
- bool updated = false;
+ return __update_extent_cache(dn, EX_BLOCK_AGE);
+}
- if (!f2fs_may_extent_tree(inode))
+void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
+ pgoff_t fofs, unsigned int len)
+{
+ struct extent_info ei = {
+ .fofs = fofs,
+ .len = len,
+ .last_blocks = F2FS_EXTENT_AGE_INVALID,
+ };
+
+ if (!__may_extent_tree(dn->inode, EX_BLOCK_AGE))
return;
- set_inode_flag(inode, FI_NO_EXTENT);
+ __update_extent_tree_range(dn->inode, &ei, EX_BLOCK_AGE);
+}
+
+unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+ if (!test_opt(sbi, AGE_EXTENT_CACHE))
+ return 0;
+
+ return __shrink_extent_tree(sbi, nr_shrink, EX_BLOCK_AGE);
+}
+
+void f2fs_destroy_extent_node(struct inode *inode)
+{
+ __destroy_extent_node(inode, EX_READ);
+ __destroy_extent_node(inode, EX_BLOCK_AGE);
+}
+
+static void __drop_extent_tree(struct inode *inode, enum extent_type type)
+{
+ struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ bool updated = false;
+
+ if (!__may_extent_tree(inode, type))
+ return;
write_lock(&et->lock);
- __free_extent_tree(sbi, et);
- if (et->largest.len) {
- et->largest.len = 0;
- updated = true;
+ if (type == EX_READ) {
+ set_inode_flag(inode, FI_NO_EXTENT);
+ if (et->largest.len) {
+ et->largest.len = 0;
+ updated = true;
+ }
}
write_unlock(&et->lock);
+
+ __destroy_extent_node(inode, type);
+
if (updated)
f2fs_mark_inode_dirty_sync(inode, true);
}
-void f2fs_destroy_extent_tree(struct inode *inode)
+void f2fs_drop_extent_tree(struct inode *inode)
+{
+ __drop_extent_tree(inode, EX_READ);
+ __drop_extent_tree(inode, EX_BLOCK_AGE);
+}
+
+static void __destroy_extent_tree(struct inode *inode, enum extent_type type)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct extent_tree *et = F2FS_I(inode)->extent_tree;
+ struct extent_tree_info *eti = &sbi->extent_tree[type];
+ struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
unsigned int node_cnt = 0;
if (!et)
@@ -828,76 +1179,58 @@ void f2fs_destroy_extent_tree(struct inode *inode)
if (inode->i_nlink && !is_bad_inode(inode) &&
atomic_read(&et->node_cnt)) {
- mutex_lock(&sbi->extent_tree_lock);
- list_add_tail(&et->list, &sbi->zombie_list);
- atomic_inc(&sbi->total_zombie_tree);
- mutex_unlock(&sbi->extent_tree_lock);
+ mutex_lock(&eti->extent_tree_lock);
+ list_add_tail(&et->list, &eti->zombie_list);
+ atomic_inc(&eti->total_zombie_tree);
+ mutex_unlock(&eti->extent_tree_lock);
return;
}
/* free all extent info belong to this extent tree */
- node_cnt = f2fs_destroy_extent_node(inode);
+ node_cnt = __destroy_extent_node(inode, type);
/* delete extent tree entry in radix tree */
- mutex_lock(&sbi->extent_tree_lock);
+ mutex_lock(&eti->extent_tree_lock);
f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
- radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
+ radix_tree_delete(&eti->extent_tree_root, inode->i_ino);
kmem_cache_free(extent_tree_slab, et);
- atomic_dec(&sbi->total_ext_tree);
- mutex_unlock(&sbi->extent_tree_lock);
+ atomic_dec(&eti->total_ext_tree);
+ mutex_unlock(&eti->extent_tree_lock);
- F2FS_I(inode)->extent_tree = NULL;
+ F2FS_I(inode)->extent_tree[type] = NULL;
- trace_f2fs_destroy_extent_tree(inode, node_cnt);
+ trace_f2fs_destroy_extent_tree(inode, node_cnt, type);
}
-bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
- struct extent_info *ei)
+void f2fs_destroy_extent_tree(struct inode *inode)
{
- if (!f2fs_may_extent_tree(inode))
- return false;
-
- return f2fs_lookup_extent_tree(inode, pgofs, ei);
+ __destroy_extent_tree(inode, EX_READ);
+ __destroy_extent_tree(inode, EX_BLOCK_AGE);
}
-void f2fs_update_extent_cache(struct dnode_of_data *dn)
+static void __init_extent_tree_info(struct extent_tree_info *eti)
{
- pgoff_t fofs;
- block_t blkaddr;
-
- if (!f2fs_may_extent_tree(dn->inode))
- return;
-
- if (dn->data_blkaddr == NEW_ADDR)
- blkaddr = NULL_ADDR;
- else
- blkaddr = dn->data_blkaddr;
-
- fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
- dn->ofs_in_node;
- f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
-}
-
-void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
- pgoff_t fofs, block_t blkaddr, unsigned int len)
-
-{
- if (!f2fs_may_extent_tree(dn->inode))
- return;
-
- f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
+ INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO);
+ mutex_init(&eti->extent_tree_lock);
+ INIT_LIST_HEAD(&eti->extent_list);
+ spin_lock_init(&eti->extent_lock);
+ atomic_set(&eti->total_ext_tree, 0);
+ INIT_LIST_HEAD(&eti->zombie_list);
+ atomic_set(&eti->total_zombie_tree, 0);
+ atomic_set(&eti->total_ext_node, 0);
}
void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
{
- INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
- mutex_init(&sbi->extent_tree_lock);
- INIT_LIST_HEAD(&sbi->extent_list);
- spin_lock_init(&sbi->extent_lock);
- atomic_set(&sbi->total_ext_tree, 0);
- INIT_LIST_HEAD(&sbi->zombie_list);
- atomic_set(&sbi->total_zombie_tree, 0);
- atomic_set(&sbi->total_ext_node, 0);
+ __init_extent_tree_info(&sbi->extent_tree[EX_READ]);
+ __init_extent_tree_info(&sbi->extent_tree[EX_BLOCK_AGE]);
+
+ /* initialize for block age extents */
+ atomic64_set(&sbi->allocated_data_blocks, 0);
+ sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD;
+ sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD;
+ sbi->last_age_weight = LAST_AGE_WEIGHT;
+ sbi->max_read_extent_count = DEF_MAX_READ_EXTENT_COUNT;
}
int __init f2fs_create_extent_cache(void)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index ce9fc9f13000..20edbb99b814 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -11,23 +11,25 @@
#include <linux/uio.h>
#include <linux/types.h>
#include <linux/page-flags.h>
-#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/magic.h>
#include <linux/kobject.h>
#include <linux/sched.h>
#include <linux/cred.h>
+#include <linux/sched/mm.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/quotaops.h>
#include <linux/part_stat.h>
-#include <crypto/hash.h>
+#include <linux/rw_hint.h>
#include <linux/fscrypt.h>
#include <linux/fsverity.h>
+struct pagevec;
+
#ifdef CONFIG_F2FS_CHECK_FS
#define f2fs_bug_on(sbi, condition) BUG_ON(condition)
#else
@@ -56,57 +58,90 @@ enum {
FAULT_WRITE_IO,
FAULT_SLAB_ALLOC,
FAULT_DQUOT_INIT,
+ FAULT_LOCK_OP,
+ FAULT_BLKADDR_VALIDITY,
+ FAULT_BLKADDR_CONSISTENCE,
+ FAULT_NO_SEGMENT,
+ FAULT_INCONSISTENT_FOOTER,
+ FAULT_TIMEOUT,
+ FAULT_VMALLOC,
FAULT_MAX,
};
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-#define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1)
+/* indicate which option to update */
+enum fault_option {
+ FAULT_RATE = 1, /* only update fault rate */
+ FAULT_TYPE = 2, /* only update fault type */
+ FAULT_ALL = 4, /* reset all fault injection options/stats */
+};
+#ifdef CONFIG_F2FS_FAULT_INJECTION
struct f2fs_fault_info {
atomic_t inject_ops;
- unsigned int inject_rate;
+ int inject_rate;
unsigned int inject_type;
+ /* Used to account total count of injection for each type */
+ unsigned int inject_count[FAULT_MAX];
};
extern const char *f2fs_fault_name[FAULT_MAX];
-#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
+#define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
+
+/* maximum retry count for injected failure */
+#define DEFAULT_FAILURE_RETRY_COUNT 8
+#else
+#define DEFAULT_FAILURE_RETRY_COUNT 1
#endif
/*
* For mount options
*/
-#define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
-#define F2FS_MOUNT_DISCARD 0x00000004
-#define F2FS_MOUNT_NOHEAP 0x00000008
-#define F2FS_MOUNT_XATTR_USER 0x00000010
-#define F2FS_MOUNT_POSIX_ACL 0x00000020
-#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
-#define F2FS_MOUNT_INLINE_XATTR 0x00000080
-#define F2FS_MOUNT_INLINE_DATA 0x00000100
-#define F2FS_MOUNT_INLINE_DENTRY 0x00000200
-#define F2FS_MOUNT_FLUSH_MERGE 0x00000400
-#define F2FS_MOUNT_NOBARRIER 0x00000800
-#define F2FS_MOUNT_FASTBOOT 0x00001000
-#define F2FS_MOUNT_EXTENT_CACHE 0x00002000
-#define F2FS_MOUNT_DATA_FLUSH 0x00008000
-#define F2FS_MOUNT_FAULT_INJECTION 0x00010000
-#define F2FS_MOUNT_USRQUOTA 0x00080000
-#define F2FS_MOUNT_GRPQUOTA 0x00100000
-#define F2FS_MOUNT_PRJQUOTA 0x00200000
-#define F2FS_MOUNT_QUOTA 0x00400000
-#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
-#define F2FS_MOUNT_RESERVE_ROOT 0x01000000
-#define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
-#define F2FS_MOUNT_NORECOVERY 0x04000000
-#define F2FS_MOUNT_ATGC 0x08000000
-#define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000
-#define F2FS_MOUNT_GC_MERGE 0x20000000
-#define F2FS_MOUNT_COMPRESS_CACHE 0x40000000
+enum f2fs_mount_opt {
+ F2FS_MOUNT_DISABLE_ROLL_FORWARD,
+ F2FS_MOUNT_DISCARD,
+ F2FS_MOUNT_NOHEAP,
+ F2FS_MOUNT_XATTR_USER,
+ F2FS_MOUNT_POSIX_ACL,
+ F2FS_MOUNT_DISABLE_EXT_IDENTIFY,
+ F2FS_MOUNT_INLINE_XATTR,
+ F2FS_MOUNT_INLINE_DATA,
+ F2FS_MOUNT_INLINE_DENTRY,
+ F2FS_MOUNT_FLUSH_MERGE,
+ F2FS_MOUNT_NOBARRIER,
+ F2FS_MOUNT_FASTBOOT,
+ F2FS_MOUNT_READ_EXTENT_CACHE,
+ F2FS_MOUNT_DATA_FLUSH,
+ F2FS_MOUNT_FAULT_INJECTION,
+ F2FS_MOUNT_USRQUOTA,
+ F2FS_MOUNT_GRPQUOTA,
+ F2FS_MOUNT_PRJQUOTA,
+ F2FS_MOUNT_QUOTA,
+ F2FS_MOUNT_INLINE_XATTR_SIZE,
+ F2FS_MOUNT_RESERVE_ROOT,
+ F2FS_MOUNT_DISABLE_CHECKPOINT,
+ F2FS_MOUNT_NORECOVERY,
+ F2FS_MOUNT_ATGC,
+ F2FS_MOUNT_MERGE_CHECKPOINT,
+ F2FS_MOUNT_GC_MERGE,
+ F2FS_MOUNT_COMPRESS_CACHE,
+ F2FS_MOUNT_AGE_EXTENT_CACHE,
+ F2FS_MOUNT_NAT_BITS,
+ F2FS_MOUNT_INLINECRYPT,
+ /*
+ * Some f2fs environments expect to be able to pass the "lazytime" option
+ * string rather than using the MS_LAZYTIME flag, so this must remain.
+ */
+ F2FS_MOUNT_LAZYTIME,
+ F2FS_MOUNT_RESERVE_NODE,
+};
#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
-#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
-#define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
-#define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
+#define clear_opt(sbi, option) \
+ (F2FS_OPTION(sbi).opt &= ~BIT(F2FS_MOUNT_##option))
+#define set_opt(sbi, option) \
+ (F2FS_OPTION(sbi).opt |= BIT(F2FS_MOUNT_##option))
+#define test_opt(sbi, option) \
+ (F2FS_OPTION(sbi).opt & BIT(F2FS_MOUNT_##option))
#define ver_after(a, b) (typecheck(unsigned long long, a) && \
typecheck(unsigned long long, b) && \
@@ -120,10 +155,42 @@ typedef u32 nid_t;
#define COMPRESS_EXT_NUM 16
+enum blkzone_allocation_policy {
+ BLKZONE_ALLOC_PRIOR_SEQ, /* Prioritize writing to sequential zones */
+ BLKZONE_ALLOC_ONLY_SEQ, /* Only allow writing to sequential zones */
+ BLKZONE_ALLOC_PRIOR_CONV, /* Prioritize writing to conventional zones */
+};
+
+enum bggc_io_aware_policy {
+ AWARE_ALL_IO, /* skip background GC if there is any kind of pending IO */
+ AWARE_READ_IO, /* skip background GC if there is pending read IO */
+ AWARE_NONE, /* don't aware IO for background GC */
+};
+
+enum device_allocation_policy {
+ ALLOCATE_FORWARD_NOHINT,
+ ALLOCATE_FORWARD_WITHIN_HINT,
+ ALLOCATE_FORWARD_FROM_HINT,
+};
+
+/*
+ * An implementation of an rwsem that is explicitly unfair to readers. This
+ * prevents priority inversion when a low-priority reader acquires the read lock
+ * while sleeping on the write lock but the write lock is needed by
+ * higher-priority clients.
+ */
+
+struct f2fs_rwsem {
+ struct rw_semaphore internal_rwsem;
+#ifdef CONFIG_F2FS_UNFAIR_RWSEM
+ wait_queue_head_t read_waiters;
+#endif
+};
+
struct f2fs_mount_info {
- unsigned int opt;
- int write_io_size_bits; /* Write IO size bits */
+ unsigned long long opt;
block_t root_reserved_blocks; /* root reserved blocks */
+ block_t root_reserved_nodes; /* root reserved nodes */
kuid_t s_resuid; /* reserved blocks for uid */
kgid_t s_resgid; /* reserved blocks for gid */
int active_logs; /* # of active logs */
@@ -137,11 +204,12 @@ struct f2fs_mount_info {
int s_jquota_fmt; /* Format of quota to use */
#endif
/* For which write hints are passed down to block layer */
- int whint_mode;
int alloc_mode; /* segment allocation policy */
int fsync_mode; /* fsync policy */
int fs_mode; /* fs mode: LFS or ADAPTIVE */
int bggc_mode; /* bggc mode: off, on or sync */
+ int memory_mode; /* memory mode */
+ int errors; /* errors parameter */
int discard_unit; /*
* discard command's offset/size should
* be aligned to this unit: block,
@@ -163,31 +231,30 @@ struct f2fs_mount_info {
int compress_mode; /* compression mode */
unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
+ unsigned int lookup_mode;
};
-#define F2FS_FEATURE_ENCRYPT 0x0001
-#define F2FS_FEATURE_BLKZONED 0x0002
-#define F2FS_FEATURE_ATOMIC_WRITE 0x0004
-#define F2FS_FEATURE_EXTRA_ATTR 0x0008
-#define F2FS_FEATURE_PRJQUOTA 0x0010
-#define F2FS_FEATURE_INODE_CHKSUM 0x0020
-#define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040
-#define F2FS_FEATURE_QUOTA_INO 0x0080
-#define F2FS_FEATURE_INODE_CRTIME 0x0100
-#define F2FS_FEATURE_LOST_FOUND 0x0200
-#define F2FS_FEATURE_VERITY 0x0400
-#define F2FS_FEATURE_SB_CHKSUM 0x0800
-#define F2FS_FEATURE_CASEFOLD 0x1000
-#define F2FS_FEATURE_COMPRESSION 0x2000
-#define F2FS_FEATURE_RO 0x4000
+#define F2FS_FEATURE_ENCRYPT 0x00000001
+#define F2FS_FEATURE_BLKZONED 0x00000002
+#define F2FS_FEATURE_ATOMIC_WRITE 0x00000004
+#define F2FS_FEATURE_EXTRA_ATTR 0x00000008
+#define F2FS_FEATURE_PRJQUOTA 0x00000010
+#define F2FS_FEATURE_INODE_CHKSUM 0x00000020
+#define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x00000040
+#define F2FS_FEATURE_QUOTA_INO 0x00000080
+#define F2FS_FEATURE_INODE_CRTIME 0x00000100
+#define F2FS_FEATURE_LOST_FOUND 0x00000200
+#define F2FS_FEATURE_VERITY 0x00000400
+#define F2FS_FEATURE_SB_CHKSUM 0x00000800
+#define F2FS_FEATURE_CASEFOLD 0x00001000
+#define F2FS_FEATURE_COMPRESSION 0x00002000
+#define F2FS_FEATURE_RO 0x00004000
+#define F2FS_FEATURE_DEVICE_ALIAS 0x00008000
+#define F2FS_FEATURE_PACKED_SSA 0x00010000
#define __F2FS_HAS_FEATURE(raw_super, mask) \
((raw_super->feature & cpu_to_le32(mask)) != 0)
#define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask)
-#define F2FS_SET_FEATURE(sbi, mask) \
- (sbi->raw_super->feature |= cpu_to_le32(mask))
-#define F2FS_CLEAR_FEATURE(sbi, mask) \
- (sbi->raw_super->feature &= ~cpu_to_le32(mask))
/*
* Default values for user and/or group using reserved blocks
@@ -212,7 +279,6 @@ enum {
#define CP_PAUSE 0x00000040
#define CP_RESIZE 0x00000080
-#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
#define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
#define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
@@ -221,14 +287,42 @@ enum {
#define DEF_CP_INTERVAL 60 /* 60 secs */
#define DEF_IDLE_INTERVAL 5 /* 5 secs */
#define DEF_DISABLE_INTERVAL 5 /* 5 secs */
+#define DEF_ENABLE_INTERVAL 5 /* 5 secs */
#define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */
#define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */
+enum cp_time {
+ CP_TIME_START, /* begin */
+ CP_TIME_LOCK, /* after cp_global_sem */
+ CP_TIME_OP_LOCK, /* after block_operation */
+ CP_TIME_FLUSH_META, /* after flush sit/nat */
+ CP_TIME_SYNC_META, /* after sync_meta_pages */
+ CP_TIME_SYNC_CP_META, /* after sync cp meta pages */
+ CP_TIME_WAIT_DIRTY_META,/* after wait on dirty meta */
+ CP_TIME_WAIT_CP_DATA, /* after wait on cp data */
+ CP_TIME_FLUSH_DEVICE, /* after flush device cache */
+ CP_TIME_WAIT_LAST_CP, /* after wait on last cp pack */
+ CP_TIME_END, /* after unblock_operation */
+ CP_TIME_MAX,
+};
+
+/* time cost stats of checkpoint */
+struct cp_stats {
+ ktime_t times[CP_TIME_MAX];
+};
+
struct cp_control {
int reason;
__u64 trim_start;
__u64 trim_end;
__u64 trim_minlen;
+ struct cp_stats stats;
+};
+
+enum f2fs_cp_phase {
+ CP_PHASE_START_BLOCK_OPS,
+ CP_PHASE_FINISH_BLOCK_OPS,
+ CP_PHASE_FINISH_CHECKPOINT,
};
/*
@@ -249,6 +343,10 @@ enum {
* condition of read on truncated area
* by extent_cache
*/
+ DATA_GENERIC_ENHANCE_UPDATE, /*
+ * strong check on range and segment
+ * bitmap for update case
+ */
META_GENERIC,
};
@@ -257,7 +355,8 @@ enum {
ORPHAN_INO, /* for orphan ino list */
APPEND_INO, /* for append ino list */
UPDATE_INO, /* for update ino list */
- TRANS_DIR_INO, /* for trasactions dir ino list */
+ TRANS_DIR_INO, /* for transactions dir ino list */
+ XATTR_DIR_INO, /* for xattr updated dir ino list */
FLUSH_INO, /* for multiple device flushing */
MAX_INO_ENTRY, /* max. list */
};
@@ -276,7 +375,7 @@ struct inode_entry {
struct fsync_node_entry {
struct list_head list; /* list head */
- struct page *page; /* warm node page pointer */
+ struct folio *folio; /* warm node folio pointer */
unsigned int seq_id; /* sequence id */
};
@@ -284,7 +383,10 @@ struct ckpt_req {
struct completion wait; /* completion for checkpoint done */
struct llist_node llnode; /* llist_node to be linked in wait queue */
int ret; /* return code of checkpoint */
- ktime_t queue_time; /* request queued time */
+ union {
+ ktime_t queue_time; /* request queued time */
+ ktime_t delta_time; /* time in queue */
+ };
};
struct ckpt_req_control {
@@ -300,6 +402,9 @@ struct ckpt_req_control {
unsigned int peak_time; /* peak wait time in msec until now */
};
+/* a time threshold that checkpoint was blocked for, unit: ms */
+#define CP_LONG_LATENCY_THRESHOLD 5000
+
/* for the bitmap indicate blocks to be discarded */
struct discard_entry {
struct list_head list; /* list head */
@@ -307,8 +412,14 @@ struct discard_entry {
unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */
};
+/* minimum discard granularity, unit: block count */
+#define MIN_DISCARD_GRANULARITY 1
/* default discard granularity of inner discard thread, unit: block count */
#define DEFAULT_DISCARD_GRANULARITY 16
+/* default maximum discard granularity of ordered discard, unit: block count */
+#define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY 16
+/* default interval of periodical discard submission */
+#define DEFAULT_DISCARD_INTERVAL (msecs_to_jiffies(20))
/* max discard pend list number */
#define MAX_PLIST_NUM 512
@@ -330,17 +441,9 @@ struct discard_info {
struct discard_cmd {
struct rb_node rb_node; /* rb node located in rb-tree */
- union {
- struct {
- block_t lstart; /* logical start address */
- block_t len; /* length */
- block_t start; /* actual start address in dev */
- };
- struct discard_info di; /* discard info */
-
- };
+ struct discard_info di; /* discard info */
struct list_head list; /* command list */
- struct completion wait; /* compleation */
+ struct completion wait; /* completion */
struct block_device *bdev; /* bdev */
unsigned short ref; /* reference count */
unsigned char state; /* state */
@@ -358,6 +461,12 @@ enum {
MAX_DPOLICY,
};
+enum {
+ DPOLICY_IO_AWARE_DISABLE, /* force to not be aware of IO */
+ DPOLICY_IO_AWARE_ENABLE, /* force to be aware of IO */
+ DPOLICY_IO_AWARE_MAX,
+};
+
struct discard_policy {
int type; /* type of discard */
unsigned int min_interval; /* used for candidates exist */
@@ -379,11 +488,18 @@ struct discard_cmd_control {
struct list_head wait_list; /* store on-flushing entries */
struct list_head fstrim_list; /* in-flight discard from fstrim */
wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
- unsigned int discard_wake; /* to wake up discard thread */
struct mutex cmd_lock;
unsigned int nr_discards; /* # of discards in the list */
unsigned int max_discards; /* max. discards to be issued */
+ unsigned int max_discard_request; /* max. discard request per round */
+ unsigned int min_discard_issue_time; /* min. interval between discard issue */
+ unsigned int mid_discard_issue_time; /* mid. interval between discard issue */
+ unsigned int max_discard_issue_time; /* max. interval between discard issue */
+ unsigned int discard_io_aware_gran; /* minimum discard granularity not be aware of I/O */
+ unsigned int discard_urgent_util; /* utilization which issue discard proactively */
unsigned int discard_granularity; /* discard granularity */
+ unsigned int max_ordered_discard; /* maximum discard granularity issued by lba order */
+ unsigned int discard_io_aware; /* io_aware policy */
unsigned int undiscard_blks; /* # of undiscard blocks */
unsigned int next_pos; /* next discard position */
atomic_t issued_discard; /* # of issued discard */
@@ -391,6 +507,7 @@ struct discard_cmd_control {
atomic_t discard_cmd_cnt; /* # of cached cmd count */
struct rb_root_cached root; /* root of discard rb-tree */
bool rbtree_check; /* config for consistence check */
+ bool discard_wake; /* to wake up discard thread */
};
/* for the list of fsync inodes, used only during recovery */
@@ -485,16 +602,16 @@ struct f2fs_filename {
*/
struct fscrypt_str crypto_buf;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/*
* For casefolded directories: the casefolded name, but it's left NULL
- * if the original name is not valid Unicode, if the directory is both
- * casefolded and encrypted and its encryption key is unavailable, or if
- * the filesystem is doing an internal operation where usr_fname is also
- * NULL. In all these cases we fall back to treating the name as an
- * opaque byte sequence.
+ * if the original name is not valid Unicode, if the original name is
+ * "." or "..", if the directory is both casefolded and encrypted and
+ * its encryption key is unavailable, or if the filesystem is doing an
+ * internal operation where usr_fname is also NULL. In all these cases
+ * we fall back to treating the name as an opaque byte sequence.
*/
- struct fscrypt_str cf_name;
+ struct qstr cf_name;
#endif
};
@@ -552,12 +669,18 @@ enum {
#define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */
-/* congestion wait timeout value, default: 20ms */
-#define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20))
+/* IO/non-IO congestion wait timeout value, default: 1ms */
+#define DEFAULT_SCHEDULE_TIMEOUT (msecs_to_jiffies(1))
+
+/* timeout value injected, default: 1000ms */
+#define DEFAULT_FAULT_TIMEOUT (msecs_to_jiffies(1000))
/* maximum retry quota flush count */
#define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
+/* maximum retry of EIO'ed page */
+#define MAX_RETRY_PAGE_EIO 100
+
#define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
#define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
@@ -565,30 +688,66 @@ enum {
/* dirty segments threshold for triggering CP */
#define DEFAULT_DIRTY_THRESHOLD 4
+#define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS
+#define RECOVERY_MIN_RA_BLOCKS 1
+
+#define F2FS_ONSTACK_PAGES 16 /* nr of onstack pages */
+
/* for in-memory extent cache entry */
#define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
/* number of extent info in extent cache we try to shrink */
-#define EXTENT_CACHE_SHRINK_NUMBER 128
+#define READ_EXTENT_CACHE_SHRINK_NUMBER 128
-struct rb_entry {
- struct rb_node rb_node; /* rb node located in rb-tree */
- union {
- struct {
- unsigned int ofs; /* start offset of the entry */
- unsigned int len; /* length of the entry */
- };
- unsigned long long key; /* 64-bits key */
- } __packed;
+/* number of age extent info in extent cache we try to shrink */
+#define AGE_EXTENT_CACHE_SHRINK_NUMBER 128
+#define LAST_AGE_WEIGHT 30
+#define SAME_AGE_REGION 1024
+
+/*
+ * Define data block with age less than 1GB as hot data
+ * define data block with age less than 10GB but more than 1GB as warm data
+ */
+#define DEF_HOT_DATA_AGE_THRESHOLD 262144
+#define DEF_WARM_DATA_AGE_THRESHOLD 2621440
+
+/* default max read extent count per inode */
+#define DEF_MAX_READ_EXTENT_COUNT 10240
+
+/* extent cache type */
+enum extent_type {
+ EX_READ,
+ EX_BLOCK_AGE,
+ NR_EXTENT_CACHES,
};
+/*
+ * Reserved value to mark invalid age extents, hence valid block range
+ * from 0 to ULLONG_MAX-1
+ */
+#define F2FS_EXTENT_AGE_INVALID ULLONG_MAX
+
struct extent_info {
unsigned int fofs; /* start offset in a file */
unsigned int len; /* length of the extent */
- u32 blk; /* start block address of the extent */
+ union {
+ /* read extent_cache */
+ struct {
+ /* start block address of the extent */
+ block_t blk;
#ifdef CONFIG_F2FS_FS_COMPRESSION
- unsigned int c_len; /* physical extent length of compressed blocks */
+ /* physical extent length of compressed blocks */
+ unsigned int c_len;
#endif
+ };
+ /* block age extent_cache */
+ struct {
+ /* block age of the extent */
+ unsigned long long age;
+ /* last total blocks allocated */
+ unsigned long long last_blocks;
+ };
+ };
};
struct extent_node {
@@ -600,25 +759,35 @@ struct extent_node {
struct extent_tree {
nid_t ino; /* inode number */
+ enum extent_type type; /* keep the extent tree type */
struct rb_root_cached root; /* root of extent info rb-tree */
struct extent_node *cached_en; /* recently accessed extent node */
- struct extent_info largest; /* largested extent info */
struct list_head list; /* to be used by sbi->zombie_list */
rwlock_t lock; /* protect extent info rb-tree */
atomic_t node_cnt; /* # of extent node in rb-tree*/
bool largest_updated; /* largest extent updated */
+ struct extent_info largest; /* largest cached extent for EX_READ */
+};
+
+struct extent_tree_info {
+ struct radix_tree_root extent_tree_root;/* cache extent cache entries */
+ struct mutex extent_tree_lock; /* locking extent radix tree */
+ struct list_head extent_list; /* lru list for shrinker */
+ spinlock_t extent_lock; /* locking extent lru list */
+ atomic_t total_ext_tree; /* extent tree count */
+ struct list_head zombie_list; /* extent zombie tree list */
+ atomic_t total_zombie_tree; /* extent zombie tree count */
+ atomic_t total_ext_node; /* extent info count */
};
/*
- * This structure is taken from ext4_map_blocks.
- *
- * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
+ * State of block returned by f2fs_map_blocks.
*/
-#define F2FS_MAP_NEW (1 << BH_New)
-#define F2FS_MAP_MAPPED (1 << BH_Mapped)
-#define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten)
+#define F2FS_MAP_NEW (1U << 0)
+#define F2FS_MAP_MAPPED (1U << 1)
+#define F2FS_MAP_DELALLOC (1U << 2)
#define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
- F2FS_MAP_UNWRITTEN)
+ F2FS_MAP_DELALLOC)
struct f2fs_map_blocks {
struct block_device *m_bdev; /* for multi-device dio */
@@ -626,6 +795,7 @@ struct f2fs_map_blocks {
block_t m_lblk;
unsigned int m_len;
unsigned int m_flags;
+ unsigned long m_last_pblk; /* last allocated block, only used for DIO in LFS mode */
pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
pgoff_t *m_next_extent; /* point to next possible extent */
int m_seg_type;
@@ -654,6 +824,7 @@ enum {
#define FADVISE_KEEP_SIZE_BIT 0x10
#define FADVISE_HOT_BIT 0x20
#define FADVISE_VERITY_BIT 0x40
+#define FADVISE_TRUNC_BIT 0x80
#define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
@@ -681,13 +852,11 @@ enum {
#define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT)
#define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT)
-#define DEF_DIR_LEVEL 0
+#define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT)
+#define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT)
+#define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT)
-enum {
- GC_FAILURE_PIN,
- GC_FAILURE_ATOMIC,
- MAX_GC_FAILURE
-};
+#define DEF_DIR_LEVEL 0
/* used for f2fs_inode_info->flags */
enum {
@@ -707,20 +876,15 @@ enum {
FI_UPDATE_WRITE, /* inode has in-place-update data */
FI_NEED_IPU, /* used for ipu per file */
FI_ATOMIC_FILE, /* indicate atomic file */
- FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */
- FI_VOLATILE_FILE, /* indicate volatile file */
- FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
- FI_DROP_CACHE, /* drop dirty page cache */
FI_DATA_EXIST, /* indicate data exists */
- FI_INLINE_DOTS, /* indicate inline dot dentries */
- FI_DO_DEFRAG, /* indicate defragment is running */
+ FI_SKIP_WRITES, /* should skip data page writeback */
+ FI_OPU_WRITE, /* used for opu per file */
FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
- FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
+ FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */
FI_HOT_DATA, /* indicate file is hot */
FI_EXTRA_ATTR, /* indicate file has extra attribute */
FI_PROJ_INHERIT, /* indicate file inherits projectid */
FI_PIN_FILE, /* indicate file should not be gced */
- FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */
@@ -728,6 +892,12 @@ enum {
FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */
FI_COMPRESS_RELEASED, /* compressed blocks were released */
FI_ALIGNED_WRITE, /* enable aligned write */
+ FI_COW_FILE, /* indicate COW file */
+ FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */
+ FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */
+ FI_ATOMIC_REPLACE, /* indicate atomic replace */
+ FI_OPENED_FILE, /* indicate file has been opened */
+ FI_DONATE_FINISHED, /* indicate page donation of file has been finished */
FI_MAX, /* max flag, never be used */
};
@@ -736,58 +906,80 @@ struct f2fs_inode_info {
unsigned long i_flags; /* keep an inode flags for ioctl */
unsigned char i_advise; /* use to give file attribute hints */
unsigned char i_dir_level; /* use for dentry level for large dir */
- unsigned int i_current_depth; /* only for directory depth */
- /* for gc failure statistic */
- unsigned int i_gc_failures[MAX_GC_FAILURE];
+ union {
+ unsigned int i_current_depth; /* only for directory depth */
+ unsigned short i_gc_failures; /* for gc failure statistic */
+ };
unsigned int i_pino; /* parent inode number */
umode_t i_acl_mode; /* keep file acl mode temporarily */
/* Use below internally in f2fs*/
unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
- struct rw_semaphore i_sem; /* protect fi info */
+ unsigned int ioprio_hint; /* hint for IO priority */
+ struct f2fs_rwsem i_sem; /* protect fi info */
atomic_t dirty_pages; /* # of dirty pages */
f2fs_hash_t chash; /* hash value of given file name */
unsigned int clevel; /* maximum level of given file name */
struct task_struct *task; /* lookup and create consistency */
struct task_struct *cp_task; /* separate cp/wb IO stats*/
+ struct task_struct *wb_task; /* indicate inode is in context of writeback */
nid_t i_xattr_nid; /* node id that contains xattrs */
loff_t last_disk_size; /* lastly written file size */
spinlock_t i_size_lock; /* protect last_disk_size */
#ifdef CONFIG_QUOTA
- struct dquot *i_dquot[MAXQUOTAS];
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
/* quota space reservation, managed internally by quota code */
qsize_t i_reserved_quota;
#endif
struct list_head dirty_list; /* dirty list for dirs and files */
struct list_head gdirty_list; /* linked in global dirty list */
- struct list_head inmem_ilist; /* list for inmem inodes */
- struct list_head inmem_pages; /* inmemory pages managed by f2fs */
- struct task_struct *inmem_task; /* store inmemory task */
- struct mutex inmem_lock; /* lock for inmemory pages */
- struct extent_tree *extent_tree; /* cached extent_tree entry */
+
+ /* linked in global inode list for cache donation */
+ struct list_head gdonate_list;
+ pgoff_t donate_start, donate_end; /* inclusive */
+ atomic_t open_count; /* # of open files */
+
+ struct task_struct *atomic_write_task; /* store atomic write task */
+ struct extent_tree *extent_tree[NR_EXTENT_CACHES];
+ /* cached extent_tree entry */
+ union {
+ struct inode *cow_inode; /* copy-on-write inode for atomic write */
+ struct inode *atomic_inode;
+ /* point to atomic_inode, available only for cow_inode */
+ };
/* avoid racing between foreground op and gc */
- struct rw_semaphore i_gc_rwsem[2];
- struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
+ struct f2fs_rwsem i_gc_rwsem[2];
+ struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
int i_extra_isize; /* size of extra space located in i_addr */
kprojid_t i_projid; /* id for project quota */
int i_inline_xattr_size; /* inline xattr size */
struct timespec64 i_crtime; /* inode creation time */
- struct timespec64 i_disk_time[4];/* inode disk times */
+ struct timespec64 i_disk_time[3];/* inode disk times */
/* for file compress */
atomic_t i_compr_blocks; /* # of compressed blocks */
unsigned char i_compress_algorithm; /* algorithm type */
unsigned char i_log_cluster_size; /* log of cluster size */
unsigned char i_compress_level; /* compress level (lz4hc,zstd) */
- unsigned short i_compress_flag; /* compress flag */
+ unsigned char i_compress_flag; /* compress flag */
unsigned int i_cluster_size; /* cluster size */
+ atomic_t writeback; /* count # of writeback thread */
+
+ unsigned int atomic_write_cnt;
+ loff_t original_i_size; /* original i_size before atomic write */
+#ifdef CONFIG_FS_ENCRYPTION
+ struct fscrypt_inode_info *i_crypt_info; /* filesystem encryption info */
+#endif
+#ifdef CONFIG_FS_VERITY
+ struct fsverity_info *i_verity_info; /* filesystem verity info */
+#endif
};
-static inline void get_extent_info(struct extent_info *ext,
+static inline void get_read_extent_info(struct extent_info *ext,
struct f2fs_extent *i_ext)
{
ext->fofs = le32_to_cpu(i_ext->fofs);
@@ -795,7 +987,7 @@ static inline void get_extent_info(struct extent_info *ext,
ext->len = le32_to_cpu(i_ext->len);
}
-static inline void set_raw_extent(struct extent_info *ext,
+static inline void set_raw_read_extent(struct extent_info *ext,
struct f2fs_extent *i_ext)
{
i_ext->fofs = cpu_to_le32(ext->fofs);
@@ -803,17 +995,6 @@ static inline void set_raw_extent(struct extent_info *ext,
i_ext->len = cpu_to_le32(ext->len);
}
-static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
- u32 blk, unsigned int len)
-{
- ei->fofs = fofs;
- ei->blk = blk;
- ei->len = len;
-#ifdef CONFIG_F2FS_FS_COMPRESSION
- ei->c_len = 0;
-#endif
-}
-
static inline bool __is_discard_mergeable(struct discard_info *back,
struct discard_info *front, unsigned int max_len)
{
@@ -833,41 +1014,6 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
return __is_discard_mergeable(cur, front, max_len);
}
-static inline bool __is_extent_mergeable(struct extent_info *back,
- struct extent_info *front)
-{
-#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (back->c_len && back->len != back->c_len)
- return false;
- if (front->c_len && front->len != front->c_len)
- return false;
-#endif
- return (back->fofs + back->len == front->fofs &&
- back->blk + back->len == front->blk);
-}
-
-static inline bool __is_back_mergeable(struct extent_info *cur,
- struct extent_info *back)
-{
- return __is_extent_mergeable(back, cur);
-}
-
-static inline bool __is_front_mergeable(struct extent_info *cur,
- struct extent_info *front)
-{
- return __is_extent_mergeable(cur, front);
-}
-
-extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
-static inline void __try_update_largest_extent(struct extent_tree *et,
- struct extent_node *en)
-{
- if (en->ei.len > et->largest.len) {
- et->largest = en->ei;
- et->largest_updated = true;
- }
-}
-
/*
* For free nid management
*/
@@ -889,6 +1035,7 @@ struct f2fs_nm_info {
nid_t max_nid; /* maximum possible node ids */
nid_t available_nids; /* # of available node ids */
nid_t next_scan_nid; /* the next nid to be scanned */
+ nid_t max_rf_node_blocks; /* max # of nodes for recovery */
unsigned int ram_thresh; /* control the memory footprint */
unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */
@@ -896,7 +1043,7 @@ struct f2fs_nm_info {
/* NAT cache management */
struct radix_tree_root nat_root;/* root of the nat entry cache */
struct radix_tree_root nat_set_root;/* root of the nat set cache */
- struct rw_semaphore nat_tree_lock; /* protect nat entry tree */
+ struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */
struct list_head nat_entries; /* cached nat entry list (clean) */
spinlock_t nat_list_lock; /* protect clean nat entry list */
unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
@@ -932,11 +1079,11 @@ struct f2fs_nm_info {
*/
struct dnode_of_data {
struct inode *inode; /* vfs inode pointer */
- struct page *inode_page; /* its inode page, NULL is possible */
- struct page *node_page; /* cached direct node page */
+ struct folio *inode_folio; /* its inode folio, NULL is possible */
+ struct folio *node_folio; /* cached direct node folio */
nid_t nid; /* node id of the direct node block */
unsigned int ofs_in_node; /* data offset in the node page */
- bool inode_page_locked; /* inode page is locked or not */
+ bool inode_folio_locked; /* inode folio is locked or not */
bool node_changed; /* is node block changed */
char cur_level; /* level of hole node page */
char max_level; /* level of current page located */
@@ -944,12 +1091,12 @@ struct dnode_of_data {
};
static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
- struct page *ipage, struct page *npage, nid_t nid)
+ struct folio *ifolio, struct folio *nfolio, nid_t nid)
{
memset(dn, 0, sizeof(*dn));
dn->inode = inode;
- dn->inode_page = ipage;
- dn->node_page = npage;
+ dn->inode_folio = ifolio;
+ dn->node_folio = nfolio;
dn->nid = nid;
}
@@ -973,7 +1120,7 @@ static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
#define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
#define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
-enum {
+enum log_type {
CURSEG_HOT_DATA = 0, /* directory entry blocks */
CURSEG_WARM_DATA, /* data blocks */
CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
@@ -1009,7 +1156,7 @@ struct f2fs_sm_info {
struct dirty_seglist_info *dirty_info; /* dirty segment information */
struct curseg_info *curseg_array; /* active segment information */
- struct rw_semaphore curseg_lock; /* for preventing curseg change */
+ struct f2fs_rwsem curseg_lock; /* for preventing curseg change */
block_t seg0_blkaddr; /* block address of 0'th segment */
block_t main_blkaddr; /* start block address of main area */
@@ -1023,9 +1170,6 @@ struct f2fs_sm_info {
/* a threshold to reclaim prefree segments */
unsigned int rec_prefree_segments;
- /* for batched trimming */
- unsigned int trim_sections; /* # of sections to trim */
-
struct list_head sit_entry_set; /* sit entry set list */
unsigned int ipu_policy; /* in-place-update policy */
@@ -1051,14 +1195,14 @@ struct f2fs_sm_info {
* f2fs monitors the number of several block types such as on-writeback,
* dirty dentry blocks, dirty node blocks, and dirty meta blocks.
*/
-#define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
+#define WB_DATA_TYPE(folio, f) \
+ (f || f2fs_is_cp_guaranteed(folio) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
enum count_type {
F2FS_DIRTY_DENTS,
F2FS_DIRTY_DATA,
F2FS_DIRTY_QDATA,
F2FS_DIRTY_NODES,
F2FS_DIRTY_META,
- F2FS_INMEM_PAGES,
F2FS_DIRTY_IMETA,
F2FS_WB_CP_DATA,
F2FS_WB_DATA,
@@ -1082,17 +1226,14 @@ enum count_type {
* ... Only can be used with META.
*/
#define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
+#define PAGE_TYPE_ON_MAIN(type) ((type) == DATA || (type) == NODE)
enum page_type {
- DATA,
- NODE,
+ DATA = 0,
+ NODE = 1, /* should not change this */
META,
NR_PAGE_TYPE,
META_FLUSH,
- INMEM, /* the below types are used by tracepoints only. */
- INMEM_DROP,
- INMEM_INVALIDATE,
- INMEM_REVOKE,
- IPU,
+ IPU, /* the below types are used by tracepoints only. */
OPU,
};
@@ -1121,6 +1262,7 @@ enum cp_reason_type {
CP_FASTBOOT_MODE,
CP_SPEC_LOG_NUM,
CP_RECOVER_DIR,
+ CP_XATTR_DIR,
};
enum iostat_type {
@@ -1129,7 +1271,10 @@ enum iostat_type {
APP_BUFFERED_IO, /* app buffered write IOs */
APP_WRITE_IO, /* app write IOs */
APP_MAPPED_IO, /* app mapped IOs */
+ APP_BUFFERED_CDATA_IO, /* app buffered write IOs on compressed file */
+ APP_MAPPED_CDATA_IO, /* app mapped write IOs on compressed file */
FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */
+ FS_CDATA_IO, /* data IOs from kworker/fsync/reclaimer on compressed file */
FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */
FS_META_IO, /* meta IOs from kworker/reclaimer */
FS_GC_DATA_IO, /* data IOs from forground gc */
@@ -1143,6 +1288,8 @@ enum iostat_type {
APP_BUFFERED_READ_IO, /* app buffered read IOs */
APP_READ_IO, /* app read IOs */
APP_MAPPED_READ_IO, /* app mapped read IOs */
+ APP_BUFFERED_CDATA_READ_IO, /* app buffered read IOs on compressed file */
+ APP_MAPPED_CDATA_READ_IO, /* app mapped read IOs on compressed file */
FS_DATA_READ_IO, /* data read IOs */
FS_GDATA_READ_IO, /* data read IOs from background gc */
FS_CDATA_READ_IO, /* compressed data read IOs */
@@ -1150,7 +1297,9 @@ enum iostat_type {
FS_META_READ_IO, /* meta read IOs */
/* other */
- FS_DISCARD, /* discard */
+ FS_DISCARD_IO, /* discard */
+ FS_FLUSH_IO, /* flush */
+ FS_ZONE_RESET_IO, /* zone reset */
NR_IO_TYPE,
};
@@ -1159,26 +1308,29 @@ struct f2fs_io_info {
nid_t ino; /* inode number */
enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
enum temp_type temp; /* contains HOT/WARM/COLD */
- int op; /* contains REQ_OP_ */
- int op_flags; /* req_flag_bits */
+ enum req_op op; /* contains REQ_OP_ */
+ blk_opf_t op_flags; /* req_flag_bits */
block_t new_blkaddr; /* new block address to be written */
block_t old_blkaddr; /* old block address before Cow */
- struct page *page; /* page to be written */
+ union {
+ struct page *page; /* page to be written */
+ struct folio *folio;
+ };
struct page *encrypted_page; /* encrypted page */
struct page *compressed_page; /* compressed page */
struct list_head list; /* serialize IOs */
- bool submitted; /* indicate IO submission */
- int need_lock; /* indicate we need to lock cp_rwsem */
- bool in_list; /* indicate fio is in io_list */
- bool is_por; /* indicate IO is from recovery or not */
- bool retry; /* need to reallocate block address */
- int compr_blocks; /* # of compressed block addresses */
- bool encrypted; /* indicate file is encrypted */
+ unsigned int compr_blocks; /* # of compressed block addresses */
+ unsigned int need_lock:8; /* indicate we need to lock cp_rwsem */
+ unsigned int version:8; /* version of the node */
+ unsigned int submitted:1; /* indicate IO submission */
+ unsigned int in_list:1; /* indicate fio is in io_list */
+ unsigned int is_por:1; /* indicate IO is from recovery or not */
+ unsigned int encrypted:1; /* indicate file is encrypted */
+ unsigned int meta_gc:1; /* require meta inode GC */
enum iostat_type io_type; /* io type */
struct writeback_control *io_wbc; /* writeback control */
struct bio **bio; /* bio for ipu */
sector_t *last_block; /* last block number in bio */
- unsigned char version; /* version of the node */
};
struct bio_entry {
@@ -1192,25 +1344,30 @@ struct f2fs_bio_info {
struct bio *bio; /* bios to merge */
sector_t last_block_in_bio; /* last block number */
struct f2fs_io_info fio; /* store buffered io info. */
- struct rw_semaphore io_rwsem; /* blocking op for bio */
+#ifdef CONFIG_BLK_DEV_ZONED
+ struct completion zone_wait; /* condition value for the previous open zone to close */
+ struct bio *zone_pending_bio; /* pending bio for the previous zone */
+ void *bi_private; /* previous bi_private for pending bio */
+#endif
+ struct f2fs_rwsem io_rwsem; /* blocking op for bio */
spinlock_t io_lock; /* serialize DATA/NODE IOs */
struct list_head io_list; /* track fios */
struct list_head bio_list; /* bio entry list head */
- struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */
+ struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */
};
#define FDEV(i) (sbi->devs[i])
#define RDEV(i) (raw_super->devs[i])
struct f2fs_dev_info {
+ struct file *bdev_file;
struct block_device *bdev;
- char path[MAX_PATH_LEN];
+ char path[MAX_PATH_LEN + 1];
unsigned int total_segments;
block_t start_blk;
block_t end_blk;
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int nr_blkz; /* Total number of zones */
unsigned long *blkz_seq; /* Bitmap indicating sequential zones */
- block_t *zone_capacity_blocks; /* Array of zone capacity in blks */
#endif
};
@@ -1218,7 +1375,7 @@ enum inode_type {
DIR_INODE, /* for dirty dir inode */
FILE_INODE, /* for dirty regular/symlink inode */
DIRTY_META, /* for all dirtied inode metadata */
- ATOMIC_FILE, /* for all atomic files */
+ DONATE_INODE, /* for all inode to donate pages */
NR_INODE_TYPE,
};
@@ -1242,7 +1399,20 @@ struct atgc_management {
unsigned long long age_threshold; /* age threshold */
};
-/* For s_flag in struct f2fs_sb_info */
+struct f2fs_gc_control {
+ unsigned int victim_segno; /* target victim segment number */
+ int init_gc_type; /* FG_GC or BG_GC */
+ bool no_bg_gc; /* check the space and stop bg_gc */
+ bool should_migrate_blocks; /* should migrate blocks */
+ bool err_gc_skipped; /* return EAGAIN if GC skipped */
+ bool one_time; /* require one time GC in one migration unit */
+ unsigned int nr_free_secs; /* # of free sections to do GC */
+};
+
+/*
+ * For s_flag in struct f2fs_sb_info
+ * Modification on enum should be synchronized with s_flag array
+ */
enum {
SBI_IS_DIRTY, /* dirty flag for checkpoint */
SBI_IS_CLOSE, /* specify unmounting */
@@ -1258,6 +1428,9 @@ enum {
SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
SBI_IS_RESIZEFS, /* resizefs is in process */
+ SBI_IS_FREEZING, /* freezefs is in process */
+ SBI_IS_WRITABLE, /* remove ro mountoption transiently */
+ MAX_SBI_FLAG,
};
enum {
@@ -1266,10 +1439,12 @@ enum {
DISCARD_TIME,
GC_TIME,
DISABLE_TIME,
+ ENABLE_TIME,
UMOUNT_DISCARD_TIMEOUT,
MAX_TIME,
};
+/* Note that you need to keep synchronization with this gc_mode_names array */
enum {
GC_NORMAL,
GC_IDLE_CB,
@@ -1277,6 +1452,7 @@ enum {
GC_IDLE_AT,
GC_URGENT_HIGH,
GC_URGENT_LOW,
+ GC_URGENT_MID,
MAX_GC_MODE,
};
@@ -1297,12 +1473,6 @@ enum {
};
enum {
- WHINT_MODE_OFF, /* not pass down write hints */
- WHINT_MODE_USER, /* try to pass down hints given by users */
- WHINT_MODE_FS, /* pass down hints with F2FS policy */
-};
-
-enum {
ALLOC_MODE_DEFAULT, /* stay default */
ALLOC_MODE_REUSE, /* reuse segments as much as possible */
};
@@ -1331,6 +1501,30 @@ enum {
DISCARD_UNIT_SECTION, /* basic discard unit is section */
};
+enum {
+ MEMORY_MODE_NORMAL, /* memory mode for normal devices */
+ MEMORY_MODE_LOW, /* memory mode for low memory devices */
+};
+
+enum errors_option {
+ MOUNT_ERRORS_READONLY, /* remount fs ro on errors */
+ MOUNT_ERRORS_CONTINUE, /* continue on errors */
+ MOUNT_ERRORS_PANIC, /* panic on errors */
+};
+
+enum {
+ BACKGROUND,
+ FOREGROUND,
+ MAX_CALL_TYPE,
+ TOTAL_CALL = FOREGROUND,
+};
+
+enum f2fs_lookup_mode {
+ LOOKUP_PERF,
+ LOOKUP_COMPAT,
+ LOOKUP_AUTO,
+};
+
static inline int f2fs_test_bit(unsigned int nr, char *addr);
static inline void f2fs_set_bit(unsigned int nr, char *addr);
static inline void f2fs_clear_bit(unsigned int nr, char *addr);
@@ -1341,110 +1535,24 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr);
* Layout A: lowest bit should be 1
* | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
* bit 0 PAGE_PRIVATE_NOT_POINTER
- * bit 1 PAGE_PRIVATE_ATOMIC_WRITE
- * bit 2 PAGE_PRIVATE_DUMMY_WRITE
- * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION
- * bit 4 PAGE_PRIVATE_INLINE_INODE
- * bit 5 PAGE_PRIVATE_REF_RESOURCE
- * bit 6- f2fs private data
+ * bit 1 PAGE_PRIVATE_ONGOING_MIGRATION
+ * bit 2 PAGE_PRIVATE_INLINE_INODE
+ * bit 3 PAGE_PRIVATE_REF_RESOURCE
+ * bit 4 PAGE_PRIVATE_ATOMIC_WRITE
+ * bit 5- f2fs private data
*
* Layout B: lowest bit should be 0
* page.private is a wrapped pointer.
*/
enum {
PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */
- PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
- PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */
PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
+ PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
PAGE_PRIVATE_MAX
};
-#define PAGE_PRIVATE_GET_FUNC(name, flagname) \
-static inline bool page_private_##name(struct page *page) \
-{ \
- return PagePrivate(page) && \
- test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
- test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
-}
-
-#define PAGE_PRIVATE_SET_FUNC(name, flagname) \
-static inline void set_page_private_##name(struct page *page) \
-{ \
- if (!PagePrivate(page)) { \
- get_page(page); \
- SetPagePrivate(page); \
- set_page_private(page, 0); \
- } \
- set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
- set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
-}
-
-#define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
-static inline void clear_page_private_##name(struct page *page) \
-{ \
- clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
- if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \
- set_page_private(page, 0); \
- if (PagePrivate(page)) { \
- ClearPagePrivate(page); \
- put_page(page); \
- }\
- } \
-}
-
-PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
-PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE);
-PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
-PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
-PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
-PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
-
-PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
-PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
-PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
-PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
-PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
-
-PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
-PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
-PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
-PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
-PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
-
-static inline unsigned long get_page_private_data(struct page *page)
-{
- unsigned long data = page_private(page);
-
- if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
- return 0;
- return data >> PAGE_PRIVATE_MAX;
-}
-
-static inline void set_page_private_data(struct page *page, unsigned long data)
-{
- if (!PagePrivate(page)) {
- get_page(page);
- SetPagePrivate(page);
- set_page_private(page, 0);
- }
- set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
- page_private(page) |= data << PAGE_PRIVATE_MAX;
-}
-
-static inline void clear_page_private_data(struct page *page)
-{
- page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1;
- if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) {
- set_page_private(page, 0);
- if (PagePrivate(page)) {
- ClearPagePrivate(page);
- put_page(page);
- }
- }
-}
-
/* For compression */
enum compress_algorithm_type {
COMPRESS_LZO,
@@ -1465,7 +1573,7 @@ enum compress_flag {
#define COMPRESS_DATA_RESERVED_SIZE 4
struct compress_data {
__le32 clen; /* compressed data size */
- __le32 chksum; /* compressed data chksum */
+ __le32 chksum; /* compressed data checksum */
__le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
u8 cdata[]; /* compressed data */
};
@@ -1474,6 +1582,8 @@ struct compress_data {
#define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
+#define F2FS_ZSTD_DEFAULT_CLEVEL 1
+
#define COMPRESS_LEVEL_OFFSET 8
/* compress context */
@@ -1486,6 +1596,7 @@ struct compress_ctx {
unsigned int nr_rpages; /* total page number in rpages */
struct page **cpages; /* pages store compressed data in cluster */
unsigned int nr_cpages; /* total page number in cpages */
+ unsigned int valid_nr_cpages; /* valid page number in cpages */
void *rbuf; /* virtual mapped address on rpages */
struct compress_data *cbuf; /* virtual mapped address on cpages */
size_t rlen; /* valid data length in rbuf */
@@ -1507,6 +1618,7 @@ struct compress_io_ctx {
struct decompress_io_ctx {
u32 magic; /* magic number to indicate page is compressed */
struct inode *inode; /* inode the context belong to */
+ struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
pgoff_t cluster_idx; /* cluster index number */
unsigned int cluster_size; /* page count in cluster */
unsigned int log_cluster_size; /* log of cluster size */
@@ -1547,9 +1659,11 @@ struct decompress_io_ctx {
bool failed; /* IO error occurred before decompression? */
bool need_verity; /* need fs-verity verification after decompression? */
+ unsigned char compress_algorithm; /* backup algorithm type */
void *private; /* payload buffer for specified decompression algorithm */
void *private2; /* extra payload buffer */
struct work_struct verity_work; /* work to verify the decompressed pages */
+ struct work_struct free_work; /* work for late free this structure itself */
};
#define NULL_CLUSTER ((unsigned int)(~0))
@@ -1561,14 +1675,17 @@ struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */
struct f2fs_super_block *raw_super; /* raw super block pointer */
- struct rw_semaphore sb_lock; /* lock for raw super block */
+ struct f2fs_rwsem sb_lock; /* lock for raw super block */
int valid_super_block; /* valid super block no */
unsigned long s_flag; /* flags for sbi */
struct mutex writepages; /* mutex for writepages() */
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int blocks_per_blkz; /* F2FS blocks per zone */
- unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */
+ unsigned int unusable_blocks_per_sec; /* unusable blocks per section */
+ unsigned int max_open_zones; /* max open zone resources of the zoned device */
+ /* For adjust the priority writing position of data in zone UFS */
+ unsigned int blkzone_alloc_policy;
#endif
/* for node-related operations */
@@ -1581,22 +1698,25 @@ struct f2fs_sb_info {
/* for bio operations */
struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
/* keep migration IO order for LFS mode */
- struct rw_semaphore io_order_lock;
- mempool_t *write_io_dummy; /* Dummy pages */
+ struct f2fs_rwsem io_order_lock;
+ pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */
+ int page_eio_cnt[NR_PAGE_TYPE]; /* EIO count */
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
int cur_cp_pack; /* remain current cp pack */
spinlock_t cp_lock; /* for flag in ckpt */
struct inode *meta_inode; /* cache meta blocks */
- struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */
- struct rw_semaphore cp_rwsem; /* blocking FS operations */
- struct rw_semaphore node_write; /* locking node writes */
- struct rw_semaphore node_change; /* locking node change */
+ struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */
+ struct f2fs_rwsem cp_rwsem; /* blocking FS operations */
+ struct f2fs_rwsem node_write; /* locking node writes */
+ struct f2fs_rwsem node_change; /* locking node change */
wait_queue_head_t cp_wait;
unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
long interval_time[MAX_TIME]; /* to store thresholds */
struct ckpt_req_control cprc_info; /* for checkpoint request control */
+ struct cp_stats cp_stats; /* for time stat of checkpoint */
+ struct f2fs_rwsem cp_enable_rwsem; /* block cache/dio write */
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
@@ -1614,14 +1734,17 @@ struct f2fs_sb_info {
struct mutex flush_lock; /* for flush exclusion */
/* for extent tree cache */
- struct radix_tree_root extent_tree_root;/* cache extent cache entries */
- struct mutex extent_tree_lock; /* locking extent radix tree */
- struct list_head extent_list; /* lru list for shrinker */
- spinlock_t extent_lock; /* locking extent lru list */
- atomic_t total_ext_tree; /* extent tree count */
- struct list_head zombie_list; /* extent zombie tree list */
- atomic_t total_zombie_tree; /* extent zombie tree count */
- atomic_t total_ext_node; /* extent info count */
+ struct extent_tree_info extent_tree[NR_EXTENT_CACHES];
+ atomic64_t allocated_data_blocks; /* for block age extent_cache */
+ unsigned int max_read_extent_count; /* max read extent count per inode */
+
+ /* The threshold used for hot and warm data seperation*/
+ unsigned int hot_data_age_threshold;
+ unsigned int warm_data_age_threshold;
+ unsigned int last_age_weight;
+
+ /* control donate caches */
+ unsigned int donate_files;
/* basic filesystem units */
unsigned int log_sectors_per_block; /* log2 sectors per block */
@@ -1638,7 +1761,7 @@ struct f2fs_sb_info {
unsigned int total_node_count; /* total node block count */
unsigned int total_valid_node_count; /* valid node block count */
int dir_level; /* directory level */
- int readdir_ra; /* readahead inode in readdir */
+ bool readdir_ra; /* readahead inode in readdir */
u64 max_io_bytes; /* max io bytes to merge IOs */
block_t user_block_count; /* # of user blocks */
@@ -1652,12 +1775,15 @@ struct f2fs_sb_info {
block_t unusable_block_count; /* # of blocks saved by last cp */
unsigned int nquota_files; /* # of quota sysfile */
- struct rw_semaphore quota_sem; /* blocking cp for flags */
+ struct f2fs_rwsem quota_sem; /* blocking cp for flags */
+ struct task_struct *umount_lock_holder; /* s_umount lock holder */
/* # of pages, see count_type */
atomic_t nr_pages[NR_COUNT_TYPE];
/* # of allocated blocks */
struct percpu_counter alloc_valid_block_count;
+ /* # of node block writes as roll forward recovery */
+ struct percpu_counter rf_node_block_count;
/* writeback control */
atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */
@@ -1668,7 +1794,7 @@ struct f2fs_sb_info {
struct f2fs_mount_info mount_opt; /* mount options */
/* for cleaning operations */
- struct rw_semaphore gc_lock; /*
+ struct f2fs_rwsem gc_lock; /*
* semaphore for GC, avoid
* race between GC and GC or CP
*/
@@ -1677,20 +1803,26 @@ struct f2fs_sb_info {
unsigned int cur_victim_sec; /* current victim section num */
unsigned int gc_mode; /* current GC state */
unsigned int next_victim_seg[2]; /* next segment in victim section */
+ spinlock_t gc_remaining_trials_lock;
+ /* remaining trial count for GC_URGENT_* and GC_IDLE_* */
+ unsigned int gc_remaining_trials;
/* for skip statistic */
- unsigned int atomic_files; /* # of opened atomic file */
- unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
unsigned long long skipped_gc_rwsem; /* FG_GC only */
+ /* free sections reserved for pinned file */
+ unsigned int reserved_pin_section;
+
/* threshold for gc trials on pinned files */
- u64 gc_pin_file_threshold;
- struct rw_semaphore pin_sem;
+ unsigned short gc_pin_file_threshold;
+ struct f2fs_rwsem pin_sem;
/* maximum # of trials to find a victim segment for SSR and GC */
unsigned int max_victim_search;
/* migration granularity of garbage collection, unit: segment */
unsigned int migration_granularity;
+ /* migration window granularity of garbage collection, unit: segment */
+ unsigned int migration_window_granularity;
/*
* for stat information.
@@ -1702,21 +1834,26 @@ struct f2fs_sb_info {
unsigned int segment_count[2]; /* # of allocated segments */
unsigned int block_count[2]; /* # of allocated blocks */
atomic_t inplace_count; /* # of inplace update */
- atomic64_t total_hit_ext; /* # of lookup extent cache */
- atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */
- atomic64_t read_hit_largest; /* # of hit largest extent node */
- atomic64_t read_hit_cached; /* # of hit cached extent node */
+ /* # of lookup extent cache */
+ atomic64_t total_hit_ext[NR_EXTENT_CACHES];
+ /* # of hit rbtree extent node */
+ atomic64_t read_hit_rbtree[NR_EXTENT_CACHES];
+ /* # of hit cached extent node */
+ atomic64_t read_hit_cached[NR_EXTENT_CACHES];
+ /* # of hit largest extent node in read extent cache */
+ atomic64_t read_hit_largest;
atomic_t inline_xattr; /* # of inline_xattr inodes */
atomic_t inline_inode; /* # of inline_data inodes */
atomic_t inline_dir; /* # of inline_dentry inodes */
atomic_t compr_inode; /* # of compressed inodes */
atomic64_t compr_blocks; /* # of compressed blocks */
- atomic_t vw_cnt; /* # of volatile writes */
+ atomic_t swapfile_inode; /* # of swapfile inodes */
+ atomic_t atomic_files; /* # of opened atomic file */
atomic_t max_aw_cnt; /* max # of atomic writes */
- atomic_t max_vw_cnt; /* max # of volatile writes */
unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
unsigned int other_skip_bggc; /* skip background gc for other reasons */
unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
+ atomic_t cp_call_count[MAX_CALL_TYPE]; /* # of cp call */
#endif
spinlock_t stat_lock; /* lock for stat operations */
@@ -1724,7 +1861,7 @@ struct f2fs_sb_info {
unsigned int data_io_flag;
unsigned int node_io_flag;
- /* For sysfs suppport */
+ /* For sysfs support */
struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */
struct completion s_kobj_unregister;
@@ -1745,21 +1882,29 @@ struct f2fs_sb_info {
unsigned int dirty_device; /* for checkpoint data flush */
spinlock_t dev_lock; /* protect dirty_device */
bool aligned_blksize; /* all devices has the same logical blksize */
+ unsigned int first_seq_zone_segno; /* first segno in sequential zone */
+ unsigned int bggc_io_aware; /* For adjust the BG_GC priority when pending IO */
+ unsigned int allocate_section_hint; /* the boundary position between devices */
+ unsigned int allocate_section_policy; /* determine the section writing priority */
/* For write statistics */
u64 sectors_written_start;
u64 kbytes_written;
- /* Reference to checksum algorithm driver via cryptoapi */
- struct crypto_shash *s_chksum_driver;
-
/* Precomputed FS UUID checksum for seeding other checksums */
__u32 s_chksum_seed;
struct workqueue_struct *post_read_wq; /* post read workqueue */
- struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
- unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
+ /*
+ * If we are in irq context, let's update error information into
+ * on-disk superblock in the work.
+ */
+ struct work_struct s_error_work;
+ unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */
+ unsigned char stop_reason[MAX_STOP_REASON]; /* stop reason */
+ spinlock_t error_lock; /* protect errors/stop_reason array */
+ bool error_dirty; /* errors of sb is dirty */
/* For reclaimed segs statistics per each GC mode */
unsigned int gc_segment_mode; /* GC state for reclaimed segments */
@@ -1770,6 +1915,15 @@ struct f2fs_sb_info {
int max_fragment_chunk; /* max chunk size for block fragmentation mode */
int max_fragment_hole; /* max hole size for block fragmentation mode */
+ /* For atomic write statistics */
+ atomic64_t current_atomic_write;
+ s64 peak_atomic_write;
+ u64 committed_atomic_block;
+ u64 revoked_atomic_block;
+
+ /* carve out reserved_blocks from total blocks */
+ bool carve_out;
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct kmem_cache *page_array_slab; /* page array entry */
unsigned int page_array_slab_size; /* default page array slab size */
@@ -1789,8 +1943,9 @@ struct f2fs_sb_info {
#ifdef CONFIG_F2FS_IOSTAT
/* For app/fs IO statistics */
spinlock_t iostat_lock;
- unsigned long long rw_iostat[NR_IO_TYPE];
- unsigned long long prev_rw_iostat[NR_IO_TYPE];
+ unsigned long long iostat_count[NR_IO_TYPE];
+ unsigned long long iostat_bytes[NR_IO_TYPE];
+ unsigned long long prev_iostat_bytes[NR_IO_TYPE];
bool iostat_enable;
unsigned long iostat_next_period;
unsigned int iostat_period_ms;
@@ -1801,20 +1956,42 @@ struct f2fs_sb_info {
#endif
};
-struct f2fs_private_dio {
- struct inode *inode;
- void *orig_private;
- bio_end_io_t *orig_end_io;
- bool write;
-};
+/* Definitions to access f2fs_sb_info */
+#define SEGS_TO_BLKS(sbi, segs) \
+ ((segs) << (sbi)->log_blocks_per_seg)
+#define BLKS_TO_SEGS(sbi, blks) \
+ ((blks) >> (sbi)->log_blocks_per_seg)
+
+#define BLKS_PER_SEG(sbi) ((sbi)->blocks_per_seg)
+#define BLKS_PER_SEC(sbi) (SEGS_TO_BLKS(sbi, (sbi)->segs_per_sec))
+#define SEGS_PER_SEC(sbi) ((sbi)->segs_per_sec)
+
+__printf(3, 4)
+void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, const char *fmt, ...);
+
+#define f2fs_err(sbi, fmt, ...) \
+ f2fs_printk(sbi, false, KERN_ERR fmt, ##__VA_ARGS__)
+#define f2fs_warn(sbi, fmt, ...) \
+ f2fs_printk(sbi, false, KERN_WARNING fmt, ##__VA_ARGS__)
+#define f2fs_notice(sbi, fmt, ...) \
+ f2fs_printk(sbi, false, KERN_NOTICE fmt, ##__VA_ARGS__)
+#define f2fs_info(sbi, fmt, ...) \
+ f2fs_printk(sbi, false, KERN_INFO fmt, ##__VA_ARGS__)
+#define f2fs_debug(sbi, fmt, ...) \
+ f2fs_printk(sbi, false, KERN_DEBUG fmt, ##__VA_ARGS__)
+
+#define f2fs_err_ratelimited(sbi, fmt, ...) \
+ f2fs_printk(sbi, true, KERN_ERR fmt, ##__VA_ARGS__)
+#define f2fs_warn_ratelimited(sbi, fmt, ...) \
+ f2fs_printk(sbi, true, KERN_WARNING fmt, ##__VA_ARGS__)
+#define f2fs_info_ratelimited(sbi, fmt, ...) \
+ f2fs_printk(sbi, true, KERN_INFO fmt, ##__VA_ARGS__)
#ifdef CONFIG_F2FS_FAULT_INJECTION
-#define f2fs_show_injection_info(sbi, type) \
- printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \
- KERN_INFO, sbi->sb->s_id, \
- f2fs_fault_name[type], \
- __func__, __builtin_return_address(0))
-static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+#define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__, \
+ __builtin_return_address(0))
+static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type,
+ const char *func, const char *parent_func)
{
struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
@@ -1827,12 +2004,14 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
atomic_inc(&ffi->inject_ops);
if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
atomic_set(&ffi->inject_ops, 0);
+ ffi->inject_count[type]++;
+ f2fs_info_ratelimited(sbi, "inject %s in %s of %pS",
+ f2fs_fault_name[type], func, parent_func);
return true;
}
return false;
}
#else
-#define f2fs_show_injection_info(sbi, type) do { } while (0)
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
return false;
@@ -1887,42 +2066,20 @@ static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
/*
* Inline functions
*/
-static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
- const void *address, unsigned int length)
+static inline u32 __f2fs_crc32(u32 crc, const void *address,
+ unsigned int length)
{
- struct {
- struct shash_desc shash;
- char ctx[4];
- } desc;
- int err;
-
- BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
-
- desc.shash.tfm = sbi->s_chksum_driver;
- *(u32 *)desc.ctx = crc;
-
- err = crypto_shash_update(&desc.shash, address, length);
- BUG_ON(err);
-
- return *(u32 *)desc.ctx;
-}
-
-static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
- unsigned int length)
-{
- return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
+ return crc32(crc, address, length);
}
-static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
- void *buf, size_t buf_size)
+static inline u32 f2fs_crc32(const void *address, unsigned int length)
{
- return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
+ return __f2fs_crc32(F2FS_SUPER_MAGIC, address, length);
}
-static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
- const void *address, unsigned int length)
+static inline u32 f2fs_chksum(u32 crc, const void *address, unsigned int length)
{
- return __f2fs_crc32(sbi, crc, address, length);
+ return __f2fs_crc32(crc, address, length);
}
static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
@@ -1945,9 +2102,9 @@ static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
return F2FS_I_SB(mapping->host);
}
-static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
+static inline struct f2fs_sb_info *F2FS_F_SB(const struct folio *folio)
{
- return F2FS_M_SB(page_file_mapping(page));
+ return F2FS_M_SB(folio->mapping);
}
static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
@@ -1955,19 +2112,29 @@ static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
return (struct f2fs_super_block *)(sbi->raw_super);
}
+static inline struct f2fs_super_block *F2FS_SUPER_BLOCK(struct folio *folio,
+ pgoff_t index)
+{
+ pgoff_t idx_in_folio = index % folio_nr_pages(folio);
+
+ return (struct f2fs_super_block *)
+ (page_address(folio_page(folio, idx_in_folio)) +
+ F2FS_SUPER_OFFSET);
+}
+
static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
{
return (struct f2fs_checkpoint *)(sbi->ckpt);
}
-static inline struct f2fs_node *F2FS_NODE(struct page *page)
+static inline struct f2fs_node *F2FS_NODE(const struct folio *folio)
{
- return (struct f2fs_node *)page_address(page);
+ return (struct f2fs_node *)folio_address(folio);
}
-static inline struct f2fs_inode *F2FS_INODE(struct page *page)
+static inline struct f2fs_inode *F2FS_INODE(const struct folio *folio)
{
- return &((struct f2fs_node *)page_address(page))->i;
+ return &((struct f2fs_node *)folio_address(folio))->i;
}
static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
@@ -2005,6 +2172,16 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
return sbi->node_inode->i_mapping;
}
+static inline bool is_meta_folio(struct folio *folio)
+{
+ return folio->mapping == META_MAPPING(F2FS_F_SB(folio));
+}
+
+static inline bool is_node_folio(struct folio *folio)
+{
+ return folio->mapping == NODE_MAPPING(F2FS_F_SB(folio));
+}
+
static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
{
return test_bit(type, &sbi->s_flag);
@@ -2086,29 +2263,139 @@ static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
spin_unlock_irqrestore(&sbi->cp_lock, flags);
}
+#define init_f2fs_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_f2fs_rwsem((sem), #sem, &__key); \
+} while (0)
+
+static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem,
+ const char *sem_name, struct lock_class_key *key)
+{
+ __init_rwsem(&sem->internal_rwsem, sem_name, key);
+#ifdef CONFIG_F2FS_UNFAIR_RWSEM
+ init_waitqueue_head(&sem->read_waiters);
+#endif
+}
+
+static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
+{
+ return rwsem_is_locked(&sem->internal_rwsem);
+}
+
+static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
+{
+ return rwsem_is_contended(&sem->internal_rwsem);
+}
+
+static inline void f2fs_down_read(struct f2fs_rwsem *sem)
+{
+#ifdef CONFIG_F2FS_UNFAIR_RWSEM
+ wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
+#else
+ down_read(&sem->internal_rwsem);
+#endif
+}
+
+static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
+{
+ return down_read_trylock(&sem->internal_rwsem);
+}
+
+static inline void f2fs_up_read(struct f2fs_rwsem *sem)
+{
+ up_read(&sem->internal_rwsem);
+}
+
+static inline void f2fs_down_write(struct f2fs_rwsem *sem)
+{
+ down_write(&sem->internal_rwsem);
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
+{
+ down_read_nested(&sem->internal_rwsem, subclass);
+}
+
+static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass)
+{
+ down_write_nested(&sem->internal_rwsem, subclass);
+}
+#else
+#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
+#define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem)
+#endif
+
+static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
+{
+ return down_write_trylock(&sem->internal_rwsem);
+}
+
+static inline void f2fs_up_write(struct f2fs_rwsem *sem)
+{
+ up_write(&sem->internal_rwsem);
+#ifdef CONFIG_F2FS_UNFAIR_RWSEM
+ wake_up_all(&sem->read_waiters);
+#endif
+}
+
+static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
+{
+ unsigned long flags;
+ unsigned char *nat_bits;
+
+ /*
+ * In order to re-enable nat_bits we need to call fsck.f2fs by
+ * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
+ * so let's rely on regular fsck or unclean shutdown.
+ */
+
+ if (lock)
+ spin_lock_irqsave(&sbi->cp_lock, flags);
+ __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
+ nat_bits = NM_I(sbi)->nat_bits;
+ NM_I(sbi)->nat_bits = NULL;
+ if (lock)
+ spin_unlock_irqrestore(&sbi->cp_lock, flags);
+
+ kvfree(nat_bits);
+}
+
+static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
+ struct cp_control *cpc)
+{
+ bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
+
+ return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
+}
+
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
- down_read(&sbi->cp_rwsem);
+ f2fs_down_read(&sbi->cp_rwsem);
}
static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
{
- return down_read_trylock(&sbi->cp_rwsem);
+ if (time_to_inject(sbi, FAULT_LOCK_OP))
+ return 0;
+ return f2fs_down_read_trylock(&sbi->cp_rwsem);
}
static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
{
- up_read(&sbi->cp_rwsem);
+ f2fs_up_read(&sbi->cp_rwsem);
}
static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
{
- down_write(&sbi->cp_rwsem);
+ f2fs_down_write(&sbi->cp_rwsem);
}
static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
{
- up_write(&sbi->cp_rwsem);
+ f2fs_up_write(&sbi->cp_rwsem);
}
static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
@@ -2148,13 +2435,11 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
return ofs == XATTR_NODE_OFFSET;
}
-static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
+static inline bool __allow_reserved_root(struct f2fs_sb_info *sbi,
struct inode *inode, bool cap)
{
if (!inode)
return true;
- if (!test_opt(sbi, RESERVE_ROOT))
- return false;
if (IS_NOQUOTA(inode))
return true;
if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
@@ -2167,11 +2452,32 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
return false;
}
+static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi,
+ struct inode *inode, bool cap)
+{
+ block_t avail_user_block_count;
+
+ avail_user_block_count = sbi->user_block_count -
+ sbi->current_reserved_blocks;
+
+ if (test_opt(sbi, RESERVE_ROOT) && !__allow_reserved_root(sbi, inode, cap))
+ avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
+
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ if (avail_user_block_count > sbi->unusable_block_count)
+ avail_user_block_count -= sbi->unusable_block_count;
+ else
+ avail_user_block_count = 0;
+ }
+
+ return avail_user_block_count;
+}
+
static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
- struct inode *inode, blkcnt_t *count)
+ struct inode *inode, blkcnt_t *count, bool partial)
{
- blkcnt_t diff = 0, release = 0;
+ long long diff = 0, release = 0;
block_t avail_user_block_count;
int ret;
@@ -2180,7 +2486,6 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
return ret;
if (time_to_inject(sbi, FAULT_BLOCK)) {
- f2fs_show_injection_info(sbi, FAULT_BLOCK);
release = *count;
goto release_quota;
}
@@ -2192,30 +2497,27 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
spin_lock(&sbi->stat_lock);
- sbi->total_valid_block_count += (block_t)(*count);
- avail_user_block_count = sbi->user_block_count -
- sbi->current_reserved_blocks;
- if (!__allow_reserved_blocks(sbi, inode, true))
- avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
- if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
- if (avail_user_block_count > sbi->unusable_block_count)
- avail_user_block_count -= sbi->unusable_block_count;
- else
- avail_user_block_count = 0;
- }
- if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
- diff = sbi->total_valid_block_count - avail_user_block_count;
+ avail_user_block_count = get_available_block_count(sbi, inode, true);
+ diff = (long long)sbi->total_valid_block_count + *count -
+ avail_user_block_count;
+ if (unlikely(diff > 0)) {
+ if (!partial) {
+ spin_unlock(&sbi->stat_lock);
+ release = *count;
+ goto enospc;
+ }
if (diff > *count)
diff = *count;
*count -= diff;
release = diff;
- sbi->total_valid_block_count -= diff;
if (!*count) {
spin_unlock(&sbi->stat_lock);
goto enospc;
}
}
+ sbi->total_valid_block_count += (block_t)(*count);
+
spin_unlock(&sbi->stat_lock);
if (unlikely(release)) {
@@ -2232,19 +2534,92 @@ release_quota:
return -ENOSPC;
}
-__printf(2, 3)
-void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
+#define PAGE_PRIVATE_GET_FUNC(name, flagname) \
+static inline bool folio_test_f2fs_##name(const struct folio *folio) \
+{ \
+ unsigned long priv = (unsigned long)folio->private; \
+ unsigned long v = (1UL << PAGE_PRIVATE_NOT_POINTER) | \
+ (1UL << PAGE_PRIVATE_##flagname); \
+ return (priv & v) == v; \
+} \
+static inline bool page_private_##name(struct page *page) \
+{ \
+ return PagePrivate(page) && \
+ test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
+ test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
+}
-#define f2fs_err(sbi, fmt, ...) \
- f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
-#define f2fs_warn(sbi, fmt, ...) \
- f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
-#define f2fs_notice(sbi, fmt, ...) \
- f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
-#define f2fs_info(sbi, fmt, ...) \
- f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
-#define f2fs_debug(sbi, fmt, ...) \
- f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
+#define PAGE_PRIVATE_SET_FUNC(name, flagname) \
+static inline void folio_set_f2fs_##name(struct folio *folio) \
+{ \
+ unsigned long v = (1UL << PAGE_PRIVATE_NOT_POINTER) | \
+ (1UL << PAGE_PRIVATE_##flagname); \
+ if (!folio->private) \
+ folio_attach_private(folio, (void *)v); \
+ else { \
+ v |= (unsigned long)folio->private; \
+ folio->private = (void *)v; \
+ } \
+} \
+static inline void set_page_private_##name(struct page *page) \
+{ \
+ if (!PagePrivate(page)) \
+ attach_page_private(page, (void *)0); \
+ set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
+ set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
+}
+
+#define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
+static inline void folio_clear_f2fs_##name(struct folio *folio) \
+{ \
+ unsigned long v = (unsigned long)folio->private; \
+ \
+ v &= ~(1UL << PAGE_PRIVATE_##flagname); \
+ if (v == (1UL << PAGE_PRIVATE_NOT_POINTER)) \
+ folio_detach_private(folio); \
+ else \
+ folio->private = (void *)v; \
+} \
+static inline void clear_page_private_##name(struct page *page) \
+{ \
+ clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
+ if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \
+ detach_page_private(page); \
+}
+
+PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
+PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
+PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
+
+PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
+PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
+PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
+
+PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
+PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
+PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
+
+static inline unsigned long folio_get_f2fs_data(struct folio *folio)
+{
+ unsigned long data = (unsigned long)folio->private;
+
+ if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
+ return 0;
+ return data >> PAGE_PRIVATE_MAX;
+}
+
+static inline void folio_set_f2fs_data(struct folio *folio, unsigned long data)
+{
+ data = (1UL << PAGE_PRIVATE_NOT_POINTER) | (data << PAGE_PRIVATE_MAX);
+
+ if (!folio_test_private(folio))
+ folio_attach_private(folio, (void *)data);
+ else
+ folio->private = (void *)((unsigned long)folio->private | data);
+}
static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
struct inode *inode,
@@ -2253,8 +2628,14 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
- sbi->total_valid_block_count -= (block_t)count;
+ if (unlikely(sbi->total_valid_block_count < count)) {
+ f2fs_warn(sbi, "Inconsistent total_valid_block_count:%u, ino:%lu, count:%u",
+ sbi->total_valid_block_count, inode->i_ino, count);
+ sbi->total_valid_block_count = 0;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ } else {
+ sbi->total_valid_block_count -= count;
+ }
if (sbi->reserved_blocks &&
sbi->current_reserved_blocks < sbi->reserved_blocks)
sbi->current_reserved_blocks = min(sbi->reserved_blocks,
@@ -2310,6 +2691,28 @@ static inline void inode_dec_dirty_pages(struct inode *inode)
dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
}
+static inline void inc_atomic_write_cnt(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ u64 current_write;
+
+ fi->atomic_write_cnt++;
+ atomic64_inc(&sbi->current_atomic_write);
+ current_write = atomic64_read(&sbi->current_atomic_write);
+ if (current_write > sbi->peak_atomic_write)
+ sbi->peak_atomic_write = current_write;
+}
+
+static inline void release_atomic_write_cnt(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+
+ atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write);
+ fi->atomic_write_cnt = 0;
+}
+
static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
{
return atomic_read(&sbi->nr_pages[count_type]);
@@ -2322,11 +2725,8 @@ static inline int get_dirty_pages(struct inode *inode)
static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
{
- unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
- unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
- sbi->log_blocks_per_seg;
-
- return segs / sbi->segs_per_sec;
+ return div_u64(get_pages(sbi, block_type) + BLKS_PER_SEC(sbi) - 1,
+ BLKS_PER_SEC(sbi));
}
static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
@@ -2375,7 +2775,7 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
if (__cp_payload(sbi) > 0) {
if (flag == NAT_BITMAP)
- return &ckpt->sit_nat_version_bitmap;
+ return tmp_ptr;
else
return (unsigned char *)ckpt + F2FS_BLKSIZE;
} else {
@@ -2390,7 +2790,7 @@ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
if (sbi->cur_cp_pack == 2)
- start_addr += sbi->blocks_per_seg;
+ start_addr += BLKS_PER_SEG(sbi);
return start_addr;
}
@@ -2399,7 +2799,7 @@ static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
if (sbi->cur_cp_pack == 1)
- start_addr += sbi->blocks_per_seg;
+ start_addr += BLKS_PER_SEG(sbi);
return start_addr;
}
@@ -2413,11 +2813,13 @@ static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
}
+extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
struct inode *inode, bool is_inode)
{
block_t valid_block_count;
- unsigned int valid_node_count, user_block_count;
+ unsigned int valid_node_count, avail_user_node_count;
+ unsigned int avail_user_block_count;
int err;
if (is_inode) {
@@ -2432,29 +2834,26 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
return err;
}
- if (time_to_inject(sbi, FAULT_BLOCK)) {
- f2fs_show_injection_info(sbi, FAULT_BLOCK);
+ if (time_to_inject(sbi, FAULT_BLOCK))
goto enospc;
- }
spin_lock(&sbi->stat_lock);
- valid_block_count = sbi->total_valid_block_count +
- sbi->current_reserved_blocks + 1;
+ valid_block_count = sbi->total_valid_block_count + 1;
+ avail_user_block_count = get_available_block_count(sbi, inode,
+ test_opt(sbi, RESERVE_NODE));
- if (!__allow_reserved_blocks(sbi, inode, false))
- valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
- user_block_count = sbi->user_block_count;
- if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
- user_block_count -= sbi->unusable_block_count;
-
- if (unlikely(valid_block_count > user_block_count)) {
+ if (unlikely(valid_block_count > avail_user_block_count)) {
spin_unlock(&sbi->stat_lock);
goto enospc;
}
+ avail_user_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
+ if (test_opt(sbi, RESERVE_NODE) &&
+ !__allow_reserved_root(sbi, inode, true))
+ avail_user_node_count -= F2FS_OPTION(sbi).root_reserved_nodes;
valid_node_count = sbi->total_valid_node_count + 1;
- if (unlikely(valid_node_count > sbi->total_node_count)) {
+ if (unlikely(valid_node_count > avail_user_node_count)) {
spin_unlock(&sbi->stat_lock);
goto enospc;
}
@@ -2488,11 +2887,17 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
{
spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi, !sbi->total_valid_block_count);
- f2fs_bug_on(sbi, !sbi->total_valid_node_count);
+ if (unlikely(!sbi->total_valid_block_count ||
+ !sbi->total_valid_node_count)) {
+ f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u",
+ sbi->total_valid_block_count,
+ sbi->total_valid_node_count);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ } else {
+ sbi->total_valid_block_count--;
+ sbi->total_valid_node_count--;
+ }
- sbi->total_valid_node_count--;
- sbi->total_valid_block_count--;
if (sbi->reserved_blocks &&
sbi->current_reserved_blocks < sbi->reserved_blocks)
sbi->current_reserved_blocks++;
@@ -2533,74 +2938,75 @@ static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
}
-static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
- pgoff_t index, bool for_write)
+static inline struct folio *f2fs_grab_cache_folio(struct address_space *mapping,
+ pgoff_t index, bool for_write)
{
- struct page *page;
+ struct folio *folio;
+ unsigned int flags;
if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
+ fgf_t fgf_flags;
+
if (!for_write)
- page = find_get_page_flags(mapping, index,
- FGP_LOCK | FGP_ACCESSED);
+ fgf_flags = FGP_LOCK | FGP_ACCESSED;
else
- page = find_lock_page(mapping, index);
- if (page)
- return page;
-
- if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
- f2fs_show_injection_info(F2FS_M_SB(mapping),
- FAULT_PAGE_ALLOC);
- return NULL;
- }
+ fgf_flags = FGP_LOCK;
+ folio = __filemap_get_folio(mapping, index, fgf_flags, 0);
+ if (!IS_ERR(folio))
+ return folio;
+
+ if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
+ return ERR_PTR(-ENOMEM);
}
if (!for_write)
- return grab_cache_page(mapping, index);
- return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
+ return filemap_grab_folio(mapping, index);
+
+ flags = memalloc_nofs_save();
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ memalloc_nofs_restore(flags);
+
+ return folio;
}
-static inline struct page *f2fs_pagecache_get_page(
+static inline struct folio *f2fs_filemap_get_folio(
struct address_space *mapping, pgoff_t index,
- int fgp_flags, gfp_t gfp_mask)
+ fgf_t fgp_flags, gfp_t gfp_mask)
{
- if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
- f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
- return NULL;
- }
+ if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
+ return ERR_PTR(-ENOMEM);
- return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
+ return __filemap_get_folio(mapping, index, fgp_flags, gfp_mask);
}
-static inline void f2fs_copy_page(struct page *src, struct page *dst)
+static inline void f2fs_folio_put(struct folio *folio, bool unlock)
{
- char *src_kaddr = kmap(src);
- char *dst_kaddr = kmap(dst);
+ if (IS_ERR_OR_NULL(folio))
+ return;
- memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
- kunmap(dst);
- kunmap(src);
+ if (unlock) {
+ f2fs_bug_on(F2FS_F_SB(folio), !folio_test_locked(folio));
+ folio_unlock(folio);
+ }
+ folio_put(folio);
}
-static inline void f2fs_put_page(struct page *page, int unlock)
+static inline void f2fs_put_page(struct page *page, bool unlock)
{
if (!page)
return;
-
- if (unlock) {
- f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
- unlock_page(page);
- }
- put_page(page);
+ f2fs_folio_put(page_folio(page), unlock);
}
static inline void f2fs_put_dnode(struct dnode_of_data *dn)
{
- if (dn->node_page)
- f2fs_put_page(dn->node_page, 1);
- if (dn->inode_page && dn->node_page != dn->inode_page)
- f2fs_put_page(dn->inode_page, 0);
- dn->node_page = NULL;
- dn->inode_page = NULL;
+ if (dn->node_folio)
+ f2fs_folio_put(dn->node_folio, true);
+ if (dn->inode_folio && dn->node_folio != dn->inode_folio)
+ f2fs_folio_put(dn->inode_folio, false);
+ dn->node_folio = NULL;
+ dn->inode_folio = NULL;
}
static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
@@ -2626,10 +3032,8 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
if (nofail)
return f2fs_kmem_cache_alloc_nofail(cachep, flags);
- if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) {
- f2fs_show_injection_info(sbi, FAULT_SLAB_ALLOC);
+ if (time_to_inject(sbi, FAULT_SLAB_ALLOC))
return NULL;
- }
return kmem_cache_alloc(cachep, flags);
}
@@ -2653,18 +3057,34 @@ static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
return false;
}
+static inline bool is_inflight_read_io(struct f2fs_sb_info *sbi)
+{
+ return get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_DIO_READ);
+}
+
static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
{
+ bool zoned_gc = (type == GC_TIME &&
+ F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_BLKZONED));
+
if (sbi->gc_mode == GC_URGENT_HIGH)
return true;
- if (is_inflight_io(sbi, type))
+ if (sbi->bggc_io_aware == AWARE_READ_IO && is_inflight_read_io(sbi))
return false;
+ if (sbi->bggc_io_aware == AWARE_ALL_IO && is_inflight_io(sbi, type))
+ return false;
+
+ if (sbi->gc_mode == GC_URGENT_MID)
+ return true;
if (sbi->gc_mode == GC_URGENT_LOW &&
(type == DISCARD_TIME || type == GC_TIME))
return true;
+ if (zoned_gc)
+ return true;
+
return f2fs_time_over(sbi, type);
}
@@ -2677,9 +3097,9 @@ static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
-static inline bool IS_INODE(struct page *page)
+static inline bool IS_INODE(const struct folio *folio)
{
- struct f2fs_node *p = F2FS_NODE(page);
+ struct f2fs_node *p = F2FS_NODE(folio);
return RAW_IS_INODE(p);
}
@@ -2696,31 +3116,32 @@ static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
}
static inline int f2fs_has_extra_attr(struct inode *inode);
-static inline block_t data_blkaddr(struct inode *inode,
- struct page *node_page, unsigned int offset)
+static inline unsigned int get_dnode_base(struct inode *inode,
+ struct folio *node_folio)
{
- struct f2fs_node *raw_node;
- __le32 *addr_array;
- int base = 0;
- bool is_inode = IS_INODE(node_page);
+ if (!IS_INODE(node_folio))
+ return 0;
- raw_node = F2FS_NODE(node_page);
+ return inode ? get_extra_isize(inode) :
+ offset_in_addr(&F2FS_NODE(node_folio)->i);
+}
- if (is_inode) {
- if (!inode)
- /* from GC path only */
- base = offset_in_addr(&raw_node->i);
- else if (f2fs_has_extra_attr(inode))
- base = get_extra_isize(inode);
- }
+static inline __le32 *get_dnode_addr(struct inode *inode,
+ struct folio *node_folio)
+{
+ return blkaddr_in_node(F2FS_NODE(node_folio)) +
+ get_dnode_base(inode, node_folio);
+}
- addr_array = blkaddr_in_node(raw_node);
- return le32_to_cpu(addr_array[base + offset]);
+static inline block_t data_blkaddr(struct inode *inode,
+ struct folio *node_folio, unsigned int offset)
+{
+ return le32_to_cpu(*(get_dnode_addr(inode, node_folio) + offset));
}
static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
{
- return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
+ return data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node);
}
static inline int f2fs_test_bit(unsigned int nr, char *addr)
@@ -2728,7 +3149,7 @@ static inline int f2fs_test_bit(unsigned int nr, char *addr)
int mask;
addr += (nr >> 3);
- mask = 1 << (7 - (nr & 0x07));
+ mask = BIT(7 - (nr & 0x07));
return mask & *addr;
}
@@ -2737,7 +3158,7 @@ static inline void f2fs_set_bit(unsigned int nr, char *addr)
int mask;
addr += (nr >> 3);
- mask = 1 << (7 - (nr & 0x07));
+ mask = BIT(7 - (nr & 0x07));
*addr |= mask;
}
@@ -2746,7 +3167,7 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr)
int mask;
addr += (nr >> 3);
- mask = 1 << (7 - (nr & 0x07));
+ mask = BIT(7 - (nr & 0x07));
*addr &= ~mask;
}
@@ -2756,7 +3177,7 @@ static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
int ret;
addr += (nr >> 3);
- mask = 1 << (7 - (nr & 0x07));
+ mask = BIT(7 - (nr & 0x07));
ret = mask & *addr;
*addr |= mask;
return ret;
@@ -2768,7 +3189,7 @@ static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
int ret;
addr += (nr >> 3);
- mask = 1 << (7 - (nr & 0x07));
+ mask = BIT(7 - (nr & 0x07));
ret = mask & *addr;
*addr &= ~mask;
return ret;
@@ -2779,7 +3200,7 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
int mask;
addr += (nr >> 3);
- mask = 1 << (7 - (nr & 0x07));
+ mask = BIT(7 - (nr & 0x07));
*addr ^= mask;
}
@@ -2797,11 +3218,14 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
#define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
#define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
#define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
+#define F2FS_DEVICE_ALIAS_FL 0x80000000 /* File for aliasing a device */
+
+#define F2FS_QUOTA_DEFAULT_FL (F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL)
/* Flags that should be inherited by new inodes from their parent. */
#define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
- F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
+ F2FS_CASEFOLD_FL)
/* Flags that are appropriate for regular files (all but dir-specific ones). */
#define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
@@ -2810,6 +3234,8 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
/* Flags that are appropriate for non-directories/regular files. */
#define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
+#define IS_DEVICE_ALIASING(inode) (F2FS_I(inode)->i_flags & F2FS_DEVICE_ALIAS_FL)
+
static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
{
if (S_ISDIR(mode))
@@ -2832,7 +3258,6 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
return;
fallthrough;
case FI_DATA_EXIST:
- case FI_INLINE_DOTS:
case FI_PIN_FILE:
case FI_COMPRESS_RELEASED:
f2fs_mark_inode_dirty_sync(inode, true);
@@ -2899,6 +3324,8 @@ static inline void f2fs_i_blocks_write(struct inode *inode,
set_inode_flag(inode, FI_AUTO_RECOVER);
}
+static inline bool f2fs_is_atomic_file(struct inode *inode);
+
static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
{
bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
@@ -2908,6 +3335,10 @@ static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
return;
i_size_write(inode, i_size);
+
+ if (f2fs_is_atomic_file(inode))
+ return;
+
f2fs_mark_inode_dirty_sync(inode, true);
if (clean || recover)
set_inode_flag(inode, FI_AUTO_RECOVER);
@@ -2922,7 +3353,7 @@ static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
static inline void f2fs_i_gc_failures_write(struct inode *inode,
unsigned int count)
{
- F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
+ F2FS_I(inode)->i_gc_failures = count;
f2fs_mark_inode_dirty_sync(inode, true);
}
@@ -2950,8 +3381,6 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
set_bit(FI_INLINE_DENTRY, fi->flags);
if (ri->i_inline & F2FS_DATA_EXIST)
set_bit(FI_DATA_EXIST, fi->flags);
- if (ri->i_inline & F2FS_INLINE_DOTS)
- set_bit(FI_INLINE_DOTS, fi->flags);
if (ri->i_inline & F2FS_EXTRA_ATTR)
set_bit(FI_EXTRA_ATTR, fi->flags);
if (ri->i_inline & F2FS_PIN_FILE)
@@ -2972,8 +3401,6 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
ri->i_inline |= F2FS_INLINE_DENTRY;
if (is_inode_flag_set(inode, FI_DATA_EXIST))
ri->i_inline |= F2FS_DATA_EXIST;
- if (is_inode_flag_set(inode, FI_INLINE_DOTS))
- ri->i_inline |= F2FS_INLINE_DOTS;
if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
ri->i_inline |= F2FS_EXTRA_ATTR;
if (is_inode_flag_set(inode, FI_PIN_FILE))
@@ -3014,26 +3441,21 @@ static inline bool f2fs_need_compress_data(struct inode *inode)
return false;
}
-static inline unsigned int addrs_per_inode(struct inode *inode)
+static inline unsigned int addrs_per_page(struct inode *inode,
+ bool is_inode)
{
- unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
- get_inline_xattr_addrs(inode);
+ unsigned int addrs = is_inode ? (CUR_ADDRS_PER_INODE(inode) -
+ get_inline_xattr_addrs(inode)) : DEF_ADDRS_PER_BLOCK;
- if (!f2fs_compressed_file(inode))
- return addrs;
- return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
-}
-
-static inline unsigned int addrs_per_block(struct inode *inode)
-{
- if (!f2fs_compressed_file(inode))
- return DEF_ADDRS_PER_BLOCK;
- return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
+ if (f2fs_compressed_file(inode))
+ return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
+ return addrs;
}
-static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
+static inline
+void *inline_xattr_addr(struct inode *inode, const struct folio *folio)
{
- struct f2fs_inode *ri = F2FS_INODE(page);
+ struct f2fs_inode *ri = F2FS_INODE(folio);
return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
get_inline_xattr_addrs(inode)]);
@@ -3046,6 +3468,10 @@ static inline int inline_xattr_size(struct inode *inode)
return 0;
}
+/*
+ * Notice: check inline_data flag without inode page lock is unsafe.
+ * It could change at any time by f2fs_convert_inline_folio().
+ */
static inline int f2fs_has_inline_data(struct inode *inode)
{
return is_inode_flag_set(inode, FI_INLINE_DATA);
@@ -3056,11 +3482,6 @@ static inline int f2fs_exist_data(struct inode *inode)
return is_inode_flag_set(inode, FI_DATA_EXIST);
}
-static inline int f2fs_has_inline_dots(struct inode *inode)
-{
- return is_inode_flag_set(inode, FI_INLINE_DOTS);
-}
-
static inline int f2fs_is_mmap_file(struct inode *inode)
{
return is_inode_flag_set(inode, FI_MMAP_FILE);
@@ -3076,32 +3497,16 @@ static inline bool f2fs_is_atomic_file(struct inode *inode)
return is_inode_flag_set(inode, FI_ATOMIC_FILE);
}
-static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
+static inline bool f2fs_is_cow_file(struct inode *inode)
{
- return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
+ return is_inode_flag_set(inode, FI_COW_FILE);
}
-static inline bool f2fs_is_volatile_file(struct inode *inode)
+static inline void *inline_data_addr(struct inode *inode, struct folio *folio)
{
- return is_inode_flag_set(inode, FI_VOLATILE_FILE);
-}
-
-static inline bool f2fs_is_first_block_written(struct inode *inode)
-{
- return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
-}
-
-static inline bool f2fs_is_drop_cache(struct inode *inode)
-{
- return is_inode_flag_set(inode, FI_DROP_CACHE);
-}
+ __le32 *addr = get_dnode_addr(inode, folio);
-static inline void *inline_data_addr(struct inode *inode, struct page *page)
-{
- struct f2fs_inode *ri = F2FS_INODE(page);
- int extra_size = get_extra_isize(inode);
-
- return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
+ return (void *)(addr + DEF_INLINE_RESERVED_SIZE);
}
static inline int f2fs_has_inline_dentry(struct inode *inode)
@@ -3116,26 +3521,31 @@ static inline int is_file(struct inode *inode, int type)
static inline void set_file(struct inode *inode, int type)
{
+ if (is_file(inode, type))
+ return;
F2FS_I(inode)->i_advise |= type;
f2fs_mark_inode_dirty_sync(inode, true);
}
static inline void clear_file(struct inode *inode, int type)
{
+ if (!is_file(inode, type))
+ return;
F2FS_I(inode)->i_advise &= ~type;
f2fs_mark_inode_dirty_sync(inode, true);
}
static inline bool f2fs_is_time_consistent(struct inode *inode)
{
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
- return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
+ struct timespec64 ts = inode_get_atime(inode);
+
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &ts))
return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
+ ts = inode_get_ctime(inode);
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ts))
return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
- &F2FS_I(inode)->i_crtime))
+ ts = inode_get_mtime(inode);
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &ts))
return false;
return true;
}
@@ -3177,26 +3587,26 @@ static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
}
-static inline bool is_dot_dotdot(const u8 *name, size_t len)
+static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
+ size_t size, gfp_t flags)
{
- if (len == 1 && name[0] == '.')
- return true;
-
- if (len == 2 && name[0] == '.' && name[1] == '.')
- return true;
+ if (time_to_inject(sbi, FAULT_KMALLOC))
+ return NULL;
- return false;
+ return kmalloc(size, flags);
}
-static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
- size_t size, gfp_t flags)
+static inline void *f2fs_getname(struct f2fs_sb_info *sbi)
{
- if (time_to_inject(sbi, FAULT_KMALLOC)) {
- f2fs_show_injection_info(sbi, FAULT_KMALLOC);
+ if (time_to_inject(sbi, FAULT_KMALLOC))
return NULL;
- }
- return kmalloc(size, flags);
+ return __getname();
+}
+
+static inline void f2fs_putname(char *buf)
+{
+ __putname(buf);
}
static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
@@ -3208,10 +3618,8 @@ static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
- if (time_to_inject(sbi, FAULT_KVMALLOC)) {
- f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
+ if (time_to_inject(sbi, FAULT_KVMALLOC))
return NULL;
- }
return kvmalloc(size, flags);
}
@@ -3222,6 +3630,14 @@ static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
}
+static inline void *f2fs_vmalloc(struct f2fs_sb_info *sbi, size_t size)
+{
+ if (time_to_inject(sbi, FAULT_VMALLOC))
+ return NULL;
+
+ return vmalloc(size);
+}
+
static inline int get_extra_isize(struct inode *inode)
{
return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
@@ -3236,6 +3652,8 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
+#define F2FS_MIN_EXTRA_ATTR_SIZE (sizeof(__le32))
+
#define F2FS_TOTAL_EXTRA_ATTR_SIZE \
(offsetof(struct f2fs_inode, i_extra_end) - \
offsetof(struct f2fs_inode, i_extra_isize)) \
@@ -3246,7 +3664,7 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
sizeof((f2fs_inode)->field)) \
<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
-#define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
+#define __is_large_section(sbi) (SEGS_PER_SEC(sbi) > 1)
#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
@@ -3255,11 +3673,9 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type)
{
- if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type))
f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
blkaddr, type);
- f2fs_bug_on(sbi, 1);
- }
}
static inline bool __is_valid_data_blkaddr(block_t blkaddr)
@@ -3274,20 +3690,21 @@ static inline bool __is_valid_data_blkaddr(block_t blkaddr)
* file.c
*/
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
-void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
int f2fs_truncate(struct inode *inode);
-int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int flags);
-int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr);
int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
+int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
+ bool readonly, bool need_lock);
int f2fs_precache_extents(struct inode *inode);
-int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
-int f2fs_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa);
+int f2fs_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
+int f2fs_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa);
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
@@ -3297,14 +3714,15 @@ int f2fs_pin_file_control(struct inode *inode, bool inc);
* inode.c
*/
void f2fs_set_inode_flags(struct inode *inode);
-bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
-void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
+bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct folio *folio);
+void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct folio *folio);
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
-void f2fs_update_inode(struct inode *inode, struct page *node_page);
+void f2fs_update_inode(struct inode *inode, struct folio *node_folio);
void f2fs_update_inode_page(struct inode *inode);
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
+void f2fs_remove_donate_inode(struct inode *inode);
void f2fs_evict_inode(struct inode *inode);
void f2fs_handle_failed_inode(struct inode *inode);
@@ -3314,41 +3732,56 @@ void f2fs_handle_failed_inode(struct inode *inode);
int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
bool hot, bool set);
struct dentry *f2fs_get_parent(struct dentry *child);
+int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ struct inode **new_inode);
/*
* dir.c
*/
-unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
+#if IS_ENABLED(CONFIG_UNICODE)
int f2fs_init_casefolded_name(const struct inode *dir,
struct f2fs_filename *fname);
+void f2fs_free_casefolded_name(struct f2fs_filename *fname);
+#else
+static inline int f2fs_init_casefolded_name(const struct inode *dir,
+ struct f2fs_filename *fname)
+{
+ return 0;
+}
+
+static inline void f2fs_free_casefolded_name(struct f2fs_filename *fname)
+{
+}
+#endif /* CONFIG_UNICODE */
+
int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
int lookup, struct f2fs_filename *fname);
int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
struct f2fs_filename *fname);
void f2fs_free_filename(struct f2fs_filename *fname);
struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
- const struct f2fs_filename *fname, int *max_slots);
+ const struct f2fs_filename *fname, int *max_slots,
+ bool use_hash);
int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
unsigned int start_pos, struct fscrypt_str *fstr);
void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
struct f2fs_dentry_ptr *d);
-struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct f2fs_filename *fname, struct page *dpage);
+struct folio *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
+ const struct f2fs_filename *fname, struct folio *dfolio);
void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth);
int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
- const struct f2fs_filename *fname,
- struct page **res_page);
+ const struct f2fs_filename *fname, struct folio **res_folio);
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
- const struct qstr *child, struct page **res_page);
-struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
+ const struct qstr *child, struct folio **res_folio);
+struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct folio **f);
ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
- struct page **page);
+ struct folio **folio);
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
- struct page *page, struct inode *inode);
-bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
+ struct folio *folio, struct inode *inode);
+bool f2fs_has_enough_room(struct inode *dir, struct folio *ifolio,
const struct f2fs_filename *fname);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
const struct fscrypt_str *name, f2fs_hash_t name_hash,
@@ -3359,9 +3792,10 @@ int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode);
int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode);
-void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
+void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct folio *folio,
struct inode *dir, struct inode *inode);
-int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
+int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
+ struct f2fs_filename *fname);
bool f2fs_empty_dir(struct inode *dir);
static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
@@ -3379,9 +3813,12 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync);
void f2fs_inode_synced(struct inode *inode);
int f2fs_dquot_initialize(struct inode *inode);
int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
-int f2fs_quota_sync(struct super_block *sb, int type);
+int f2fs_do_quota_sync(struct super_block *sb, int type);
loff_t max_file_blocks(struct inode *inode);
void f2fs_quota_off_umount(struct super_block *sb);
+void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
+void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason);
+void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
int f2fs_sync_fs(struct super_block *sb, int sync);
int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
@@ -3395,32 +3832,34 @@ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
* node.c
*/
struct node_info;
+enum node_type;
int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
-bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio);
void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
-void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio);
void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
- struct node_info *ni);
+ struct node_info *ni, bool checkpoint_context);
pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
int f2fs_truncate_xattr_node(struct inode *inode);
int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
unsigned int seq_id);
-bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi);
int f2fs_remove_inode_page(struct inode *inode);
-struct page *f2fs_new_inode_page(struct inode *inode);
-struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
+struct folio *f2fs_new_inode_folio(struct inode *inode);
+struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs);
void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
-struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
-struct page *f2fs_get_node_page_ra(struct page *parent, int start);
-int f2fs_move_node_page(struct page *node_page, int gc_type);
+struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
+ enum node_type node_type);
+struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino);
+struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid);
+int f2fs_move_node_folio(struct folio *node_folio, int gc_type);
void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic,
@@ -3433,12 +3872,11 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
-int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
-int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
-int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
+int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio);
+int f2fs_recover_xattr_data(struct inode *inode, struct folio *folio);
+int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct folio *folio);
int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum);
-void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi);
int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
@@ -3449,18 +3887,16 @@ void f2fs_destroy_node_manager_caches(void);
* segment.c
*/
bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
-void f2fs_register_inmem_page(struct inode *inode, struct page *page);
-void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
-void f2fs_drop_inmem_pages(struct inode *inode);
-void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
-int f2fs_commit_inmem_pages(struct inode *inode);
+int f2fs_commit_atomic_write(struct inode *inode);
+void f2fs_abort_atomic_write(struct inode *inode, bool clean);
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
-void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
+void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
+ unsigned int len);
bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
@@ -3474,22 +3910,22 @@ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
-void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
+int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
+int f2fs_reinit_atgc_curseg(struct f2fs_sb_info *sbi);
void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
-void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
- unsigned int *newseg, bool new_sec, int dir);
-void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
unsigned int start, unsigned int end);
-void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
-void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
+int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
+int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi);
+int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
struct cp_control *cpc);
-struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
+struct folio *f2fs_get_sum_folio(struct f2fs_sb_info *sbi, unsigned int segno);
void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
block_t blk_addr);
-void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
enum iostat_type io_type);
void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
void f2fs_outplace_write_data(struct dnode_of_data *dn,
@@ -3503,14 +3939,18 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
block_t old_addr, block_t new_addr,
unsigned char version, bool recover_curseg,
bool recover_newaddr);
-void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+enum temp_type f2fs_get_segment_temp(struct f2fs_sb_info *sbi,
+ enum log_type seg_type);
+int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct folio *folio,
block_t old_blkaddr, block_t *new_blkaddr,
struct f2fs_summary *sum, int type,
struct f2fs_io_info *fio);
void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
block_t blkaddr, unsigned int blkcnt);
-void f2fs_wait_on_page_writeback(struct page *page,
- enum page_type type, bool ordered, bool locked);
+void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type,
+ bool ordered, bool locked);
+#define f2fs_wait_on_page_writeback(page, type, ordered, locked) \
+ f2fs_folio_wait_writeback(page_folio(page), type, ordered, locked)
void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
block_t len);
@@ -3519,19 +3959,24 @@ void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
unsigned int val, int alloc);
void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
-int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
-int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
+int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi);
int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
int __init f2fs_create_segment_manager_caches(void);
void f2fs_destroy_segment_manager_caches(void);
-int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
+int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint);
enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
enum page_type type, enum temp_type temp);
-unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
- unsigned int segno);
+unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi);
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
unsigned int segno);
+unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
+ unsigned int segno);
+
+static inline struct inode *fio_inode(struct f2fs_io_info *fio)
+{
+ return fio->folio->mapping->host;
+}
#define DEF_FRAGMENT_SIZE 4
#define MIN_FRAGMENT_SIZE 1
@@ -3546,16 +3991,21 @@ static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
/*
* checkpoint.c
*/
-void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
-struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
-struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
-struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
-struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
+void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
+ unsigned char reason);
+void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
+struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index);
+struct folio *f2fs_get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index);
+struct folio *f2fs_get_meta_folio_retry(struct f2fs_sb_info *sbi, pgoff_t index);
+struct folio *f2fs_get_tmp_folio(struct f2fs_sb_info *sbi, pgoff_t index);
bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type);
+bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type);
int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync);
-void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
+void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
+ unsigned int ra_blocks);
long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
long nr_to_write, enum iostat_type io_type);
void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
@@ -3566,16 +4016,16 @@ void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
unsigned int devidx, int type);
bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
unsigned int devidx, int type);
-int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
void f2fs_add_orphan_inode(struct inode *inode);
void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
-void f2fs_update_dirty_page(struct inode *inode, struct page *page);
+void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio);
void f2fs_remove_dirty_inode(struct inode *inode);
-int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
+int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
+ bool from_cp);
void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
@@ -3592,64 +4042,61 @@ void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
*/
int __init f2fs_init_bioset(void);
void f2fs_destroy_bioset(void);
+bool f2fs_is_cp_guaranteed(const struct folio *folio);
int f2fs_init_bio_entry_cache(void);
void f2fs_destroy_bio_entry_cache(void);
-void f2fs_submit_bio(struct f2fs_sb_info *sbi,
- struct bio *bio, enum page_type type);
+void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
+ enum page_type type);
+int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi);
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, struct page *page,
+ struct inode *inode, struct folio *folio,
nid_t ino, enum page_type type);
void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
- struct bio **bio, struct page *page);
+ struct bio **bio, struct folio *folio);
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
int f2fs_submit_page_bio(struct f2fs_io_info *fio);
int f2fs_merge_page_bio(struct f2fs_io_info *fio);
void f2fs_submit_page_write(struct f2fs_io_info *fio);
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
- block_t blk_addr, struct bio *bio);
+ block_t blk_addr, sector_t *sector);
int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
-void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
+void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
int f2fs_reserve_new_block(struct dnode_of_data *dn);
-int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
-int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
+int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index);
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
-struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
- int op_flags, bool for_write);
-struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
-struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
+struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index,
+ blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
+struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index,
+ pgoff_t *next_pgofs);
+struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index,
bool for_write);
-struct page *f2fs_get_new_data_page(struct inode *inode,
- struct page *ipage, pgoff_t index, bool new_i_size);
+struct folio *f2fs_get_new_data_folio(struct inode *inode,
+ struct folio *ifolio, pgoff_t index, bool new_i_size);
int f2fs_do_write_data_page(struct f2fs_io_info *fio);
-void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
-int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
- int create, int flag);
+int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
-int f2fs_write_single_data_page(struct page *page, int *submitted,
+int f2fs_write_single_data_page(struct folio *folio, int *submitted,
struct bio **bio, sector_t *last_block,
struct writeback_control *wbc,
enum iostat_type io_type,
int compr_blocks, bool allow_balance);
-void f2fs_invalidate_page(struct page *page, unsigned int offset,
- unsigned int length);
-int f2fs_release_page(struct page *page, gfp_t wait);
-#ifdef CONFIG_MIGRATION
-int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode);
-#endif
+void f2fs_write_failed(struct inode *inode, loff_t to);
+void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
+bool f2fs_release_folio(struct folio *folio, gfp_t wait);
bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
-void f2fs_clear_page_cache_dirty_tag(struct page *page);
+void f2fs_clear_page_cache_dirty_tag(struct folio *folio);
int f2fs_init_post_read_processing(void);
void f2fs_destroy_post_read_processing(void);
int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
+extern const struct iomap_ops f2fs_iomap_ops;
/*
* gc.c
@@ -3657,12 +4104,18 @@ void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
- unsigned int segno);
+int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
-int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
+int f2fs_gc_range(struct f2fs_sb_info *sbi,
+ unsigned int start_seg, unsigned int end_seg,
+ bool dry_run, unsigned int dry_run_sections);
+int f2fs_resize_fs(struct file *filp, __u64 block_count);
int __init f2fs_create_garbage_collection_cache(void);
void f2fs_destroy_garbage_collection_cache(void);
+/* victim selection function for cleaning and SSR */
+int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
+ int gc_type, int type, char alloc_mode,
+ unsigned long long age, bool one_time);
/*
* recovery.c
@@ -3676,22 +4129,45 @@ void f2fs_destroy_recovery_cache(void);
* debug.c
*/
#ifdef CONFIG_F2FS_STAT_FS
+enum {
+ DEVSTAT_INUSE,
+ DEVSTAT_DIRTY,
+ DEVSTAT_FULL,
+ DEVSTAT_FREE,
+ DEVSTAT_PREFREE,
+ DEVSTAT_MAX,
+};
+
+struct f2fs_dev_stats {
+ unsigned int devstats[2][DEVSTAT_MAX]; /* 0: segs, 1: secs */
+};
+
struct f2fs_stat_info {
struct list_head stat_list;
struct f2fs_sb_info *sbi;
int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
int main_area_segs, main_area_sections, main_area_zones;
- unsigned long long hit_largest, hit_cached, hit_rbtree;
- unsigned long long hit_total, total_ext;
- int ext_tree, zombie_tree, ext_node;
+ unsigned long long hit_cached[NR_EXTENT_CACHES];
+ unsigned long long hit_rbtree[NR_EXTENT_CACHES];
+ unsigned long long total_ext[NR_EXTENT_CACHES];
+ unsigned long long hit_total[NR_EXTENT_CACHES];
+ int ext_tree[NR_EXTENT_CACHES];
+ int zombie_tree[NR_EXTENT_CACHES];
+ int ext_node[NR_EXTENT_CACHES];
+ /* to count memory footprint */
+ unsigned long long ext_mem[NR_EXTENT_CACHES];
+ /* for read extent cache */
+ unsigned long long hit_largest;
+ /* for block age extent cache */
+ unsigned long long allocated_data_blocks;
int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
int ndirty_data, ndirty_qdata;
- int inmem_pages;
- unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
+ unsigned int ndirty_dirs, ndirty_files, ndirty_all;
+ unsigned int nquota_files, ndonate_files;
int nats, dirty_nats, sits, dirty_sits;
int free_nids, avail_nids, alloc_nids;
int total_count, utilization;
- int bg_gc, nr_wb_cp_data, nr_wb_data;
+ int nr_wb_cp_data, nr_wb_data;
int nr_rd_data, nr_rd_node, nr_rd_meta;
int nr_dio_read, nr_dio_write;
unsigned int io_skip_bggc, other_skip_bggc;
@@ -3702,21 +4178,23 @@ struct f2fs_stat_info {
int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
unsigned int cur_ckpt_time, peak_ckpt_time;
int inline_xattr, inline_inode, inline_dir, append, update, orphans;
- int compr_inode;
+ int compr_inode, swapfile_inode;
unsigned long long compr_blocks;
- int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
+ int aw_cnt, max_aw_cnt;
unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid;
int rsvd_segs, overp_segs;
int dirty_count, node_pages, meta_pages, compress_pages;
int compress_page_hit;
- int prefree_count, call_count, cp_count, bg_cp_count;
- int tot_segs, node_segs, data_segs, free_segs, free_secs;
- int bg_node_segs, bg_data_segs;
+ int prefree_count, free_segs, free_secs;
+ int cp_call_count[MAX_CALL_TYPE], cp_count;
+ int gc_call_count[MAX_CALL_TYPE];
+ int gc_segs[2][2];
+ int gc_secs[2][2];
int tot_blks, data_blks, node_blks;
int bg_data_blks, bg_node_blks;
- unsigned long long skipped_atomic_files[2];
+ int blkoff[NR_CURSEG_TYPE];
int curseg[NR_CURSEG_TYPE];
int cursec[NR_CURSEG_TYPE];
int curzone[NR_CURSEG_TYPE];
@@ -3729,6 +4207,7 @@ struct f2fs_stat_info {
unsigned int block_count[2];
unsigned int inplace_count;
unsigned long long base_mem, cache_mem, page_mem;
+ struct f2fs_dev_stats *dev_stats;
};
static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
@@ -3736,18 +4215,17 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
return (struct f2fs_stat_info *)sbi->stat_info;
}
-#define stat_inc_cp_count(si) ((si)->cp_count++)
-#define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
-#define stat_inc_call_count(si) ((si)->call_count++)
-#define stat_inc_bggc_count(si) ((si)->bg_gc++)
+#define stat_inc_cp_call_count(sbi, foreground) \
+ atomic_inc(&sbi->cp_call_count[(foreground)])
+#define stat_inc_cp_count(sbi) (F2FS_STAT(sbi)->cp_count++)
#define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
#define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
#define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
-#define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
-#define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree))
+#define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type]))
+#define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type]))
#define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
-#define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached))
+#define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type]))
#define stat_inc_inline_xattr(inode) \
do { \
if (f2fs_has_inline_xattr(inode)) \
@@ -3792,6 +4270,14 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
(atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
#define stat_sub_compr_blocks(inode, blocks) \
(atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
+#define stat_inc_swapfile_inode(inode) \
+ (atomic_inc(&F2FS_I_SB(inode)->swapfile_inode))
+#define stat_dec_swapfile_inode(inode) \
+ (atomic_dec(&F2FS_I_SB(inode)->swapfile_inode))
+#define stat_inc_atomic_inode(inode) \
+ (atomic_inc(&F2FS_I_SB(inode)->atomic_files))
+#define stat_dec_atomic_inode(inode) \
+ (atomic_dec(&F2FS_I_SB(inode)->atomic_files))
#define stat_inc_meta_count(sbi, blkaddr) \
do { \
if (blkaddr < SIT_I(sbi)->sit_base_addr) \
@@ -3811,34 +4297,17 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
(atomic_inc(&(sbi)->inplace_count))
#define stat_update_max_atomic_write(inode) \
do { \
- int cur = F2FS_I_SB(inode)->atomic_files; \
+ int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files); \
int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
if (cur > max) \
atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
} while (0)
-#define stat_inc_volatile_write(inode) \
- (atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
-#define stat_dec_volatile_write(inode) \
- (atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
-#define stat_update_max_volatile_write(inode) \
- do { \
- int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \
- int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \
- if (cur > max) \
- atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \
- } while (0)
-#define stat_inc_seg_count(sbi, type, gc_type) \
- do { \
- struct f2fs_stat_info *si = F2FS_STAT(sbi); \
- si->tot_segs++; \
- if ((type) == SUM_TYPE_DATA) { \
- si->data_segs++; \
- si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
- } else { \
- si->node_segs++; \
- si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \
- } \
- } while (0)
+#define stat_inc_gc_call_count(sbi, foreground) \
+ (F2FS_STAT(sbi)->gc_call_count[(foreground)]++)
+#define stat_inc_gc_sec_count(sbi, type, gc_type) \
+ (F2FS_STAT(sbi)->gc_secs[(type)][(gc_type)]++)
+#define stat_inc_gc_seg_count(sbi, type, gc_type) \
+ (F2FS_STAT(sbi)->gc_segs[(type)][(gc_type)]++)
#define stat_inc_tot_blk_count(si, blks) \
((si)->tot_blks += (blks))
@@ -3865,18 +4334,16 @@ void __init f2fs_create_root_stats(void);
void f2fs_destroy_root_stats(void);
void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
#else
-#define stat_inc_cp_count(si) do { } while (0)
-#define stat_inc_bg_cp_count(si) do { } while (0)
-#define stat_inc_call_count(si) do { } while (0)
-#define stat_inc_bggc_count(si) do { } while (0)
+#define stat_inc_cp_call_count(sbi, foreground) do { } while (0)
+#define stat_inc_cp_count(sbi) do { } while (0)
#define stat_io_skip_bggc_count(sbi) do { } while (0)
#define stat_other_skip_bggc_count(sbi) do { } while (0)
#define stat_inc_dirty_inode(sbi, type) do { } while (0)
#define stat_dec_dirty_inode(sbi, type) do { } while (0)
-#define stat_inc_total_hit(sbi) do { } while (0)
-#define stat_inc_rbtree_node_hit(sbi) do { } while (0)
+#define stat_inc_total_hit(sbi, type) do { } while (0)
+#define stat_inc_rbtree_node_hit(sbi, type) do { } while (0)
#define stat_inc_largest_node_hit(sbi) do { } while (0)
-#define stat_inc_cached_node_hit(sbi) do { } while (0)
+#define stat_inc_cached_node_hit(sbi, type) do { } while (0)
#define stat_inc_inline_xattr(inode) do { } while (0)
#define stat_dec_inline_xattr(inode) do { } while (0)
#define stat_inc_inline_inode(inode) do { } while (0)
@@ -3887,15 +4354,18 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
#define stat_dec_compr_inode(inode) do { } while (0)
#define stat_add_compr_blocks(inode, blocks) do { } while (0)
#define stat_sub_compr_blocks(inode, blocks) do { } while (0)
+#define stat_inc_swapfile_inode(inode) do { } while (0)
+#define stat_dec_swapfile_inode(inode) do { } while (0)
+#define stat_inc_atomic_inode(inode) do { } while (0)
+#define stat_dec_atomic_inode(inode) do { } while (0)
#define stat_update_max_atomic_write(inode) do { } while (0)
-#define stat_inc_volatile_write(inode) do { } while (0)
-#define stat_dec_volatile_write(inode) do { } while (0)
-#define stat_update_max_volatile_write(inode) do { } while (0)
#define stat_inc_meta_count(sbi, blkaddr) do { } while (0)
#define stat_inc_seg_type(sbi, curseg) do { } while (0)
#define stat_inc_block_count(sbi, curseg) do { } while (0)
#define stat_inc_inplace_blocks(sbi) do { } while (0)
-#define stat_inc_seg_count(sbi, type, gc_type) do { } while (0)
+#define stat_inc_gc_call_count(sbi, foreground) do { } while (0)
+#define stat_inc_gc_sec_count(sbi, type, gc_type) do { } while (0)
+#define stat_inc_gc_seg_count(sbi, type, gc_type) do { } while (0)
#define stat_inc_tot_blk_count(si, blks) do { } while (0)
#define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
#define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
@@ -3923,26 +4393,26 @@ extern struct kmem_cache *f2fs_inode_entry_slab;
* inline.c
*/
bool f2fs_may_inline_data(struct inode *inode);
+bool f2fs_sanity_check_inline_data(struct inode *inode, struct folio *ifolio);
bool f2fs_may_inline_dentry(struct inode *inode);
-void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
-void f2fs_truncate_inline_inode(struct inode *inode,
- struct page *ipage, u64 from);
-int f2fs_read_inline_data(struct inode *inode, struct page *page);
-int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
+void f2fs_do_read_inline_data(struct folio *folio, struct folio *ifolio);
+void f2fs_truncate_inline_inode(struct inode *inode, struct folio *ifolio,
+ u64 from);
+int f2fs_read_inline_data(struct inode *inode, struct folio *folio);
+int f2fs_convert_inline_folio(struct dnode_of_data *dn, struct folio *folio);
int f2fs_convert_inline_inode(struct inode *inode);
int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
-int f2fs_write_inline_data(struct inode *inode, struct page *page);
-int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
+int f2fs_write_inline_data(struct inode *inode, struct folio *folio);
+int f2fs_recover_inline_data(struct inode *inode, struct folio *nfolio);
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
- const struct f2fs_filename *fname,
- struct page **res_page);
+ const struct f2fs_filename *fname, struct folio **res_folio,
+ bool use_hash);
int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
- struct page *ipage);
+ struct folio *ifolio);
int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode);
void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
- struct page *page, struct inode *dir,
- struct inode *inode);
+ struct folio *folio, struct inode *dir, struct inode *inode);
bool f2fs_empty_inline_dir(struct inode *dir);
int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
struct fscrypt_str *fstr);
@@ -3957,43 +4427,45 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
struct shrink_control *sc);
unsigned long f2fs_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc);
+unsigned int f2fs_donate_files(void);
+void f2fs_reclaim_caches(unsigned int reclaim_caches_kb);
void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
/*
* extent_cache.c
*/
-struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
- struct rb_entry *cached_re, unsigned int ofs);
-struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
- struct rb_root_cached *root,
- struct rb_node **parent,
- unsigned long long key, bool *left_most);
-struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
- struct rb_root_cached *root,
- struct rb_node **parent,
- unsigned int ofs, bool *leftmost);
-struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
- struct rb_entry *cached_re, unsigned int ofs,
- struct rb_entry **prev_entry, struct rb_entry **next_entry,
- struct rb_node ***insert_p, struct rb_node **insert_parent,
- bool force, bool *leftmost);
-bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
- struct rb_root_cached *root, bool check_key);
-unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
-void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
+bool sanity_check_extent_cache(struct inode *inode, struct folio *ifolio);
+void f2fs_init_extent_tree(struct inode *inode);
void f2fs_drop_extent_tree(struct inode *inode);
-unsigned int f2fs_destroy_extent_node(struct inode *inode);
+void f2fs_destroy_extent_node(struct inode *inode);
void f2fs_destroy_extent_tree(struct inode *inode);
-bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
- struct extent_info *ei);
-void f2fs_update_extent_cache(struct dnode_of_data *dn);
-void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
- pgoff_t fofs, block_t blkaddr, unsigned int len);
void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
int __init f2fs_create_extent_cache(void);
void f2fs_destroy_extent_cache(void);
+/* read extent cache ops */
+void f2fs_init_read_extent_tree(struct inode *inode, struct folio *ifolio);
+bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
+ struct extent_info *ei);
+bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
+ block_t *blkaddr);
+void f2fs_update_read_extent_cache(struct dnode_of_data *dn);
+void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
+ pgoff_t fofs, block_t blkaddr, unsigned int len);
+unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi,
+ int nr_shrink);
+
+/* block age extent cache ops */
+void f2fs_init_age_extent_tree(struct inode *inode);
+bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
+ struct extent_info *ei);
+void f2fs_update_age_extent_cache(struct dnode_of_data *dn);
+void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
+ pgoff_t fofs, unsigned int len);
+unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi,
+ int nr_shrink);
+
/*
* sysfs.c
*/
@@ -4034,45 +4506,64 @@ static inline bool f2fs_post_read_required(struct inode *inode)
f2fs_compressed_file(inode);
}
+static inline bool f2fs_used_in_atomic_write(struct inode *inode)
+{
+ return f2fs_is_atomic_file(inode) || f2fs_is_cow_file(inode);
+}
+
+static inline bool f2fs_meta_inode_gc_required(struct inode *inode)
+{
+ return f2fs_post_read_required(inode) || f2fs_used_in_atomic_write(inode);
+}
+
/*
* compress.c
*/
#ifdef CONFIG_F2FS_FS_COMPRESSION
-bool f2fs_is_compressed_page(struct page *page);
-struct page *f2fs_compress_control_page(struct page *page);
+enum cluster_check_type {
+ CLUSTER_IS_COMPR, /* check only if compressed cluster */
+ CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */
+ CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */
+};
+bool f2fs_is_compressed_page(struct folio *folio);
+struct folio *f2fs_compress_control_folio(struct folio *folio);
int f2fs_prepare_compress_overwrite(struct inode *inode,
struct page **pagep, pgoff_t index, void **fsdata);
bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
pgoff_t index, unsigned copied);
int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
-void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
+void f2fs_compress_write_end_io(struct bio *bio, struct folio *folio);
bool f2fs_is_compress_backend_ready(struct inode *inode);
-int f2fs_init_compress_mempool(void);
+bool f2fs_is_compress_level_valid(int alg, int lvl);
+int __init f2fs_init_compress_mempool(void);
void f2fs_destroy_compress_mempool(void);
-void f2fs_decompress_cluster(struct decompress_io_ctx *dic);
-void f2fs_end_read_compressed_page(struct page *page, bool failed,
- block_t blkaddr);
+void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
+void f2fs_end_read_compressed_page(struct folio *folio, bool failed,
+ block_t blkaddr, bool in_task);
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
-bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
- int index, int nr_pages);
+bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
+ int index, int nr_pages, bool uptodate);
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
-void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio);
int f2fs_write_multi_pages(struct compress_ctx *cc,
int *submitted,
struct writeback_control *wbc,
enum iostat_type io_type);
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
-void f2fs_update_extent_tree_range_compressed(struct inode *inode,
- pgoff_t fofs, block_t blkaddr, unsigned int llen,
- unsigned int c_len);
+bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index);
+void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
+ pgoff_t fofs, block_t blkaddr,
+ unsigned int llen, unsigned int c_len);
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio,
- bool is_readahead, bool for_write);
+ struct readahead_control *rac, bool for_write);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
-void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
-void f2fs_put_page_dic(struct page *page);
-unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
+void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
+ bool in_task);
+void f2fs_put_folio_dic(struct folio *folio, bool in_task);
+unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
+ unsigned int ofs_in_node);
int f2fs_init_compress_ctx(struct compress_ctx *cc);
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
@@ -4083,10 +4574,9 @@ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
int __init f2fs_init_compress_cache(void);
void f2fs_destroy_compress_cache(void);
struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
-void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
-void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
- nid_t ino, block_t blkaddr);
-bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int len);
+bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio,
block_t blkaddr);
void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
#define inc_compr_inode_stat(inode) \
@@ -4102,7 +4592,7 @@ void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
sbi->compr_saved_block += diff; \
} while (0)
#else
-static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
+static inline bool f2fs_is_compressed_page(struct folio *folio) { return false; }
static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
{
if (!f2fs_compressed_file(inode))
@@ -4110,24 +4600,27 @@ static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
/* not support compression */
return false;
}
-static inline struct page *f2fs_compress_control_page(struct page *page)
+static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; }
+static inline struct folio *f2fs_compress_control_folio(struct folio *folio)
{
WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL);
}
-static inline int f2fs_init_compress_mempool(void) { return 0; }
+static inline int __init f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { }
-static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { }
-static inline void f2fs_end_read_compressed_page(struct page *page,
- bool failed, block_t blkaddr)
+static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic,
+ bool in_task) { }
+static inline void f2fs_end_read_compressed_page(struct folio *folio,
+ bool failed, block_t blkaddr, bool in_task)
{
WARN_ON_ONCE(1);
}
-static inline void f2fs_put_page_dic(struct page *page)
+static inline void f2fs_put_folio_dic(struct folio *folio, bool in_task)
{
WARN_ON_ONCE(1);
}
-static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
+static inline unsigned int f2fs_cluster_blocks_are_contiguous(
+ struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; }
static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
@@ -4135,64 +4628,78 @@ static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return
static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
static inline int __init f2fs_init_compress_cache(void) { return 0; }
static inline void f2fs_destroy_compress_cache(void) { }
-static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
- block_t blkaddr) { }
-static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
- struct page *page, nid_t ino, block_t blkaddr) { }
-static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
- struct page *page, block_t blkaddr) { return false; }
+static inline void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int len) { }
+static inline bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi,
+ struct folio *folio, block_t blkaddr) { return false; }
static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
nid_t ino) { }
#define inc_compr_inode_stat(inode) do { } while (0)
-static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode,
- pgoff_t fofs, block_t blkaddr, unsigned int llen,
- unsigned int c_len) { }
+static inline int f2fs_is_compressed_cluster(
+ struct inode *inode,
+ pgoff_t index) { return 0; }
+static inline bool f2fs_is_sparse_cluster(
+ struct inode *inode,
+ pgoff_t index) { return true; }
+static inline void f2fs_update_read_extent_tree_range_compressed(
+ struct inode *inode,
+ pgoff_t fofs, block_t blkaddr,
+ unsigned int llen, unsigned int c_len) { }
#endif
-static inline void set_compress_context(struct inode *inode)
+static inline int set_compress_context(struct inode *inode)
{
+#ifdef CONFIG_F2FS_FS_COMPRESSION
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
- F2FS_I(inode)->i_compress_algorithm =
- F2FS_OPTION(sbi).compress_algorithm;
- F2FS_I(inode)->i_log_cluster_size =
- F2FS_OPTION(sbi).compress_log_size;
- F2FS_I(inode)->i_compress_flag =
- F2FS_OPTION(sbi).compress_chksum ?
- 1 << COMPRESS_CHKSUM : 0;
- F2FS_I(inode)->i_cluster_size =
- 1 << F2FS_I(inode)->i_log_cluster_size;
- if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
- F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
+ fi->i_compress_algorithm = F2FS_OPTION(sbi).compress_algorithm;
+ fi->i_log_cluster_size = F2FS_OPTION(sbi).compress_log_size;
+ fi->i_compress_flag = F2FS_OPTION(sbi).compress_chksum ?
+ BIT(COMPRESS_CHKSUM) : 0;
+ fi->i_cluster_size = BIT(fi->i_log_cluster_size);
+ if ((fi->i_compress_algorithm == COMPRESS_LZ4 ||
+ fi->i_compress_algorithm == COMPRESS_ZSTD) &&
F2FS_OPTION(sbi).compress_level)
- F2FS_I(inode)->i_compress_flag |=
- F2FS_OPTION(sbi).compress_level <<
- COMPRESS_LEVEL_OFFSET;
- F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
+ fi->i_compress_level = F2FS_OPTION(sbi).compress_level;
+ fi->i_flags |= F2FS_COMPR_FL;
set_inode_flag(inode, FI_COMPRESSED_FILE);
stat_inc_compr_inode(inode);
inc_compr_inode_stat(inode);
f2fs_mark_inode_dirty_sync(inode, true);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
}
static inline bool f2fs_disable_compressed_file(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
- if (!f2fs_compressed_file(inode))
+ f2fs_down_write(&fi->i_sem);
+
+ if (!f2fs_compressed_file(inode)) {
+ f2fs_up_write(&fi->i_sem);
return true;
- if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
+ }
+ if (f2fs_is_mmap_file(inode) || atomic_read(&fi->writeback) ||
+ (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
+ f2fs_up_write(&fi->i_sem);
return false;
+ }
fi->i_flags &= ~F2FS_COMPR_FL;
stat_dec_compr_inode(inode);
clear_inode_flag(inode, FI_COMPRESSED_FILE);
f2fs_mark_inode_dirty_sync(inode, true);
+
+ f2fs_up_write(&fi->i_sem);
return true;
}
#define F2FS_FEATURE_FUNCS(name, flagname) \
-static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
+static inline bool f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
{ \
return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
}
@@ -4211,37 +4718,39 @@ F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
F2FS_FEATURE_FUNCS(compression, COMPRESSION);
F2FS_FEATURE_FUNCS(readonly, RO);
+F2FS_FEATURE_FUNCS(device_alias, DEVICE_ALIAS);
+F2FS_FEATURE_FUNCS(packed_ssa, PACKED_SSA);
-static inline bool f2fs_may_extent_tree(struct inode *inode)
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline bool f2fs_zone_is_seq(struct f2fs_sb_info *sbi, int devi,
+ unsigned int zone)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-
- if (!test_opt(sbi, EXTENT_CACHE) ||
- is_inode_flag_set(inode, FI_NO_EXTENT) ||
- (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
- !f2fs_sb_has_readonly(sbi)))
- return false;
-
- /*
- * for recovered files during mount do not create extents
- * if shrinker is not registered.
- */
- if (list_empty(&sbi->s_list))
- return false;
-
- return S_ISREG(inode->i_mode);
+ return test_bit(zone, FDEV(devi).blkz_seq);
}
-#ifdef CONFIG_BLK_DEV_ZONED
static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
- block_t blkaddr)
+ block_t blkaddr)
{
- unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
-
- return test_bit(zno, FDEV(devi).blkz_seq);
+ return f2fs_zone_is_seq(sbi, devi, blkaddr / sbi->blocks_per_blkz);
}
#endif
+static inline int f2fs_bdev_index(struct f2fs_sb_info *sbi,
+ struct block_device *bdev)
+{
+ int i;
+
+ if (!f2fs_is_multi_device(sbi))
+ return 0;
+
+ for (i = 0; i < sbi->s_ndevs; i++)
+ if (FDEV(i).bdev == bdev)
+ return i;
+
+ WARN_ON(1);
+ return -1;
+}
+
static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
{
return f2fs_sb_has_blkzoned(sbi);
@@ -4249,8 +4758,7 @@ static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
{
- return blk_queue_discard(bdev_get_queue(bdev)) ||
- bdev_is_zoned(bdev);
+ return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev);
}
static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
@@ -4266,6 +4774,18 @@ static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
return false;
}
+static inline unsigned int f2fs_hw_discard_granularity(struct f2fs_sb_info *sbi)
+{
+ int i = 1;
+ unsigned int discard_granularity = bdev_discard_granularity(sbi->sb->s_bdev);
+
+ if (f2fs_is_multi_device(sbi))
+ for (; i < sbi->s_ndevs && !bdev_is_zoned(FDEV(i).bdev); i++)
+ discard_granularity = max_t(unsigned int, discard_granularity,
+ bdev_discard_granularity(FDEV(i).bdev));
+ return discard_granularity;
+}
+
static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
{
return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
@@ -4285,16 +4805,53 @@ static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
return false;
}
+static inline bool f2fs_dev_is_readonly(struct f2fs_sb_info *sbi)
+{
+ return f2fs_sb_has_readonly(sbi) || f2fs_hw_is_readonly(sbi);
+}
+
static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
{
return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
}
+static inline bool f2fs_is_sequential_zone_area(struct f2fs_sb_info *sbi,
+ block_t blkaddr)
+{
+ if (f2fs_sb_has_blkzoned(sbi)) {
+#ifdef CONFIG_BLK_DEV_ZONED
+ int devi = f2fs_target_device_index(sbi, blkaddr);
+
+ if (!bdev_is_zoned(FDEV(devi).bdev))
+ return false;
+
+ if (f2fs_is_multi_device(sbi)) {
+ if (blkaddr < FDEV(devi).start_blk ||
+ blkaddr > FDEV(devi).end_blk) {
+ f2fs_err(sbi, "Invalid block %x", blkaddr);
+ return false;
+ }
+ blkaddr -= FDEV(devi).start_blk;
+ }
+
+ return f2fs_blkz_is_seq(sbi, devi, blkaddr);
+#else
+ return false;
+#endif
+ }
+ return false;
+}
+
+static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
+{
+ return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW;
+}
+
static inline bool f2fs_may_compress(struct inode *inode)
{
if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
- f2fs_is_atomic_file(inode) ||
- f2fs_is_volatile_file(inode))
+ f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) ||
+ f2fs_is_mmap_file(inode))
return false;
return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
}
@@ -4302,8 +4859,8 @@ static inline bool f2fs_may_compress(struct inode *inode)
static inline void f2fs_i_compr_blocks_update(struct inode *inode,
u64 blocks, bool add)
{
- int diff = F2FS_I(inode)->i_cluster_size - blocks;
struct f2fs_inode_info *fi = F2FS_I(inode);
+ int diff = fi->i_cluster_size - blocks;
/* don't update i_compr_blocks if saved blocks were released */
if (!add && !atomic_read(&fi->i_compr_blocks))
@@ -4319,17 +4876,6 @@ static inline void f2fs_i_compr_blocks_update(struct inode *inode,
f2fs_mark_inode_dirty_sync(inode, true);
}
-static inline int block_unaligned_IO(struct inode *inode,
- struct kiocb *iocb, struct iov_iter *iter)
-{
- unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
- unsigned int blocksize_mask = (1 << i_blkbits) - 1;
- loff_t offset = iocb->ki_pos;
- unsigned long align = offset | iov_iter_alignment(iter);
-
- return align & blocksize_mask;
-}
-
static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi,
int flag)
{
@@ -4340,36 +4886,6 @@ static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi,
return sbi->aligned_blksize;
}
-static inline bool f2fs_force_buffered_io(struct inode *inode,
- struct kiocb *iocb, struct iov_iter *iter)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- int rw = iov_iter_rw(iter);
-
- if (f2fs_post_read_required(inode))
- return true;
-
- /* disallow direct IO if any of devices has unaligned blksize */
- if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
- return true;
- /*
- * for blkzoned device, fallback direct IO to buffered IO, so
- * all IOs can be serialized by log-structured write.
- */
- if (f2fs_sb_has_blkzoned(sbi))
- return true;
- if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
- if (block_unaligned_IO(inode, iocb, iter))
- return true;
- if (F2FS_IO_ALIGNED(sbi))
- return true;
- }
- if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED))
- return true;
-
- return false;
-}
-
static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
{
return fsverity_active(inode) &&
@@ -4377,10 +4893,15 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
}
#ifdef CONFIG_F2FS_FAULT_INJECTION
-extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
- unsigned int type);
+extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
+ unsigned long type, enum fault_option fo);
#else
-#define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
+static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
+ unsigned long rate, unsigned long type,
+ enum fault_option fo)
+{
+ return 0;
+}
#endif
static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
@@ -4401,6 +4922,88 @@ static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi)
return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK;
}
+static inline void __f2fs_schedule_timeout(long timeout, bool io)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (io)
+ io_schedule_timeout(timeout);
+ else
+ schedule_timeout(timeout);
+}
+
+#define f2fs_io_schedule_timeout(timeout) \
+ __f2fs_schedule_timeout(timeout, true)
+#define f2fs_schedule_timeout(timeout) \
+ __f2fs_schedule_timeout(timeout, false)
+
+static inline void f2fs_io_schedule_timeout_killable(long timeout)
+{
+ while (timeout) {
+ if (fatal_signal_pending(current))
+ return;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
+ if (timeout <= DEFAULT_SCHEDULE_TIMEOUT)
+ return;
+ timeout -= DEFAULT_SCHEDULE_TIMEOUT;
+ }
+}
+
+static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi,
+ struct folio *folio, enum page_type type)
+{
+ pgoff_t ofs = folio->index;
+
+ if (unlikely(f2fs_cp_error(sbi)))
+ return;
+
+ if (ofs == sbi->page_eio_ofs[type]) {
+ if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO)
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ } else {
+ sbi->page_eio_ofs[type] = ofs;
+ sbi->page_eio_cnt[type] = 0;
+ }
+}
+
+static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi)
+{
+ return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb);
+}
+
+static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int cnt)
+{
+ bool need_submit = false;
+ int i = 0;
+
+ do {
+ struct folio *folio;
+
+ folio = filemap_get_folio(META_MAPPING(sbi), blkaddr + i);
+ if (!IS_ERR(folio)) {
+ if (folio_test_writeback(folio))
+ need_submit = true;
+ f2fs_folio_put(folio, false);
+ }
+ } while (++i < cnt && !need_submit);
+
+ if (need_submit)
+ f2fs_submit_merged_write_cond(sbi, sbi->meta_inode,
+ NULL, 0, DATA);
+
+ truncate_inode_pages_range(META_MAPPING(sbi),
+ F2FS_BLK_TO_BYTES((loff_t)blkaddr),
+ F2FS_BLK_END_BYTES((loff_t)(blkaddr + cnt - 1)));
+}
+
+static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int len)
+{
+ f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
+ f2fs_invalidate_compress_pages_range(sbi, blkaddr, len);
+}
+
#define EFSBADCRC EBADMSG /* Bad CRC detected */
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 92ec2699bc85..d7047ca6b98d 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -8,7 +8,6 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
-#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/falloc.h>
@@ -24,6 +23,7 @@
#include <linux/sched/signal.h>
#include <linux/fileattr.h>
#include <linux/fadvise.h>
+#include <linux/iomap.h>
#include "f2fs.h"
#include "node.h"
@@ -35,57 +35,80 @@
#include <trace/events/f2fs.h>
#include <uapi/linux/f2fs.h>
+static void f2fs_zero_post_eof_page(struct inode *inode,
+ loff_t new_size, bool lock)
+{
+ loff_t old_size = i_size_read(inode);
+
+ if (old_size >= new_size)
+ return;
+
+ if (mapping_empty(inode->i_mapping))
+ return;
+
+ if (lock)
+ filemap_invalidate_lock(inode->i_mapping);
+ /* zero or drop pages only in range of [old_size, new_size] */
+ truncate_inode_pages_range(inode->i_mapping, old_size, new_size);
+ if (lock)
+ filemap_invalidate_unlock(inode->i_mapping);
+}
+
static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
+ vm_flags_t flags = vmf->vma->vm_flags;
vm_fault_t ret;
ret = filemap_fault(vmf);
- if (!ret)
- f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
- F2FS_BLKSIZE);
+ if (ret & VM_FAULT_LOCKED)
+ f2fs_update_iostat(F2FS_I_SB(inode), inode,
+ APP_MAPPED_READ_IO, F2FS_BLKSIZE);
- trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
+ trace_f2fs_filemap_fault(inode, vmf->pgoff, flags, ret);
return ret;
}
static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
- bool need_alloc = true;
+ bool need_alloc = !f2fs_is_pinned_file(inode);
int err = 0;
+ vm_fault_t ret;
if (unlikely(IS_IMMUTABLE(inode)))
return VM_FAULT_SIGBUS;
- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
- return VM_FAULT_SIGBUS;
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ err = -EIO;
+ goto out;
+ }
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
- goto err;
+ goto out;
}
if (!f2fs_is_checkpoint_ready(sbi)) {
err = -ENOSPC;
- goto err;
+ goto out;
}
err = f2fs_convert_inline_inode(inode);
if (err)
- goto err;
+ goto out;
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
- int ret = f2fs_is_compressed_cluster(inode, page->index);
+ int ret = f2fs_is_compressed_cluster(inode, folio->index);
if (ret < 0) {
err = ret;
- goto err;
+ goto out;
} else if (ret) {
need_alloc = false;
}
@@ -99,38 +122,38 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
+ f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT, true);
+
file_update_time(vmf->vma->vm_file);
filemap_invalidate_lock_shared(inode->i_mapping);
- lock_page(page);
- if (unlikely(page->mapping != inode->i_mapping ||
- page_offset(page) > i_size_read(inode) ||
- !PageUptodate(page))) {
- unlock_page(page);
+
+ folio_lock(folio);
+ if (unlikely(folio->mapping != inode->i_mapping ||
+ folio_pos(folio) > i_size_read(inode) ||
+ !folio_test_uptodate(folio))) {
+ folio_unlock(folio);
err = -EFAULT;
goto out_sem;
}
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
if (need_alloc) {
/* block allocation */
- f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = f2fs_get_block(&dn, page->index);
- f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
- }
-
-#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (!need_alloc) {
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+ err = f2fs_get_block_locked(&dn, folio->index);
+ } else {
+ err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
f2fs_put_dnode(&dn);
+ if (f2fs_is_pinned_file(inode) &&
+ !__is_valid_data_blkaddr(dn.data_blkaddr))
+ err = -EIO;
}
-#endif
+
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
goto out_sem;
}
- f2fs_wait_on_page_writeback(page, DATA, false, true);
+ f2fs_folio_wait_writeback(folio, DATA, false, true);
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
@@ -138,31 +161,31 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
/*
* check to see if the page is mapped already (no holes)
*/
- if (PageMappedToDisk(page))
+ if (folio_test_mappedtodisk(folio))
goto out_sem;
/* page is wholly or partially inside EOF */
- if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
+ if (((loff_t)(folio->index + 1) << PAGE_SHIFT) >
i_size_read(inode)) {
loff_t offset;
offset = i_size_read(inode) & ~PAGE_MASK;
- zero_user_segment(page, offset, PAGE_SIZE);
+ folio_zero_segment(folio, offset, folio_size(folio));
}
- set_page_dirty(page);
- if (!PageUptodate(page))
- SetPageUptodate(page);
+ folio_mark_dirty(folio);
- f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
f2fs_update_time(sbi, REQ_TIME);
- trace_f2fs_vm_page_mkwrite(page, DATA);
out_sem:
filemap_invalidate_unlock_shared(inode->i_mapping);
sb_end_pagefault(inode->i_sb);
-err:
- return block_page_mkwrite_return(err);
+out:
+ ret = vmf_fs_error(err);
+
+ trace_f2fs_vm_page_mkwrite(inode, folio->index, vmf->vma->vm_flags, ret);
+ return ret;
}
static const struct vm_operations_struct f2fs_file_vm_ops = {
@@ -183,7 +206,7 @@ static int get_parent_ino(struct inode *inode, nid_t *pino)
if (!dentry)
return 0;
- *pino = parent_ino(dentry);
+ *pino = d_parent_ino(dentry);
dput(dentry);
return 1;
}
@@ -216,18 +239,22 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
TRANS_DIR_INO))
cp_reason = CP_RECOVER_DIR;
+ else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
+ XATTR_DIR_INO))
+ cp_reason = CP_XATTR_DIR;
return cp_reason;
}
static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
{
- struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
+ struct folio *i = filemap_get_folio(NODE_MAPPING(sbi), ino);
bool ret = false;
/* But we need to avoid that there are some inode updates */
- if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
+ if ((!IS_ERR(i) && folio_test_dirty(i)) ||
+ f2fs_need_inode_block_update(sbi, ino))
ret = true;
- f2fs_put_page(i, 0);
+ f2fs_folio_put(i, false);
return ret;
}
@@ -236,13 +263,13 @@ static void try_to_fix_pino(struct inode *inode)
struct f2fs_inode_info *fi = F2FS_I(inode);
nid_t pino;
- down_write(&fi->i_sem);
+ f2fs_down_write(&fi->i_sem);
if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
get_parent_ino(inode, &pino)) {
f2fs_i_pino_write(inode, pino);
file_got_pino(inode);
}
- up_write(&fi->i_sem);
+ f2fs_up_write(&fi->i_sem);
}
static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
@@ -256,7 +283,6 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
- .for_reclaim = 0,
};
unsigned int seq_id = 0;
@@ -304,7 +330,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
* for OPU case, during fsync(), node can be persisted before
* data when lower device doesn't support write barrier, result
* in data corruption after SPO.
- * So for strict fsync mode, force to use atomic write sematics
+ * So for strict fsync mode, force to use atomic write semantics
* to keep write order in between data/node and last node to
* avoid potential data corruption.
*/
@@ -317,9 +343,9 @@ go_write:
* Both of fdatasync() and fsync() are able to be recovered from
* sudden-power-off.
*/
- down_read(&F2FS_I(inode)->i_sem);
+ f2fs_down_read(&F2FS_I(inode)->i_sem);
cp_reason = need_do_checkpoint(inode);
- up_read(&F2FS_I(inode)->i_sem);
+ f2fs_up_read(&F2FS_I(inode)->i_sem);
if (cp_reason) {
/* all the dirty node pages should be flushed for POR */
@@ -391,9 +417,20 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
return f2fs_do_sync_file(file, start, end, datasync, false);
}
-static bool __found_offset(struct address_space *mapping, block_t blkaddr,
- pgoff_t index, int whence)
+static bool __found_offset(struct address_space *mapping,
+ struct dnode_of_data *dn, pgoff_t index, int whence)
{
+ block_t blkaddr = f2fs_data_blkaddr(dn);
+ struct inode *inode = mapping->host;
+ bool compressed_cluster = false;
+
+ if (f2fs_compressed_file(inode)) {
+ block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio,
+ ALIGN_DOWN(dn->ofs_in_node, F2FS_I(inode)->i_cluster_size));
+
+ compressed_cluster = first_blkaddr == COMPRESS_ADDR;
+ }
+
switch (whence) {
case SEEK_DATA:
if (__is_valid_data_blkaddr(blkaddr))
@@ -401,8 +438,12 @@ static bool __found_offset(struct address_space *mapping, block_t blkaddr,
if (blkaddr == NEW_ADDR &&
xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
return true;
+ if (compressed_cluster)
+ return true;
break;
case SEEK_HOLE:
+ if (compressed_cluster)
+ return false;
if (blkaddr == NULL_ADDR)
return true;
break;
@@ -413,14 +454,14 @@ static bool __found_offset(struct address_space *mapping, block_t blkaddr,
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- loff_t maxbytes = inode->i_sb->s_maxbytes;
+ loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
struct dnode_of_data dn;
pgoff_t pgofs, end_offset;
loff_t data_ofs = offset;
loff_t isize;
int err = 0;
- inode_lock(inode);
+ inode_lock_shared(inode);
isize = i_size_read(inode);
if (offset >= isize)
@@ -454,7 +495,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
}
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
/* find data/hole in dnode block */
for (; dn.ofs_in_node < end_offset;
@@ -471,7 +512,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
goto fail;
}
- if (__found_offset(file->f_mapping, blkaddr,
+ if (__found_offset(file->f_mapping, &dn,
pgofs, whence)) {
f2fs_put_dnode(&dn);
goto found;
@@ -485,20 +526,17 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
found:
if (whence == SEEK_HOLE && data_ofs > isize)
data_ofs = isize;
- inode_unlock(inode);
+ inode_unlock_shared(inode);
return vfs_setpos(file, data_ofs, maxbytes);
fail:
- inode_unlock(inode);
+ inode_unlock_shared(inode);
return -ENXIO;
}
static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- loff_t maxbytes = inode->i_sb->s_maxbytes;
-
- if (f2fs_compressed_file(inode))
- maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
+ loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
switch (whence) {
case SEEK_SET:
@@ -516,8 +554,9 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
return -EINVAL;
}
-static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int f2fs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
struct inode *inode = file_inode(file);
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
@@ -527,11 +566,54 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
return -EOPNOTSUPP;
file_accessed(file);
- vma->vm_ops = &f2fs_file_vm_ops;
+ desc->vm_ops = &f2fs_file_vm_ops;
+
+ f2fs_down_read(&F2FS_I(inode)->i_sem);
set_inode_flag(inode, FI_MMAP_FILE);
+ f2fs_up_read(&F2FS_I(inode)->i_sem);
+
return 0;
}
+static int finish_preallocate_blocks(struct inode *inode)
+{
+ int ret = 0;
+ bool opened;
+
+ f2fs_down_read(&F2FS_I(inode)->i_sem);
+ opened = is_inode_flag_set(inode, FI_OPENED_FILE);
+ f2fs_up_read(&F2FS_I(inode)->i_sem);
+ if (opened)
+ return 0;
+
+ inode_lock(inode);
+ if (is_inode_flag_set(inode, FI_OPENED_FILE))
+ goto out_unlock;
+
+ if (!file_should_truncate(inode))
+ goto out_update;
+
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ filemap_invalidate_lock(inode->i_mapping);
+
+ truncate_setsize(inode, i_size_read(inode));
+ ret = f2fs_truncate(inode);
+
+ filemap_invalidate_unlock(inode->i_mapping);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ if (ret)
+ goto out_unlock;
+
+ file_dont_truncate(inode);
+out_update:
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
+ set_inode_flag(inode, FI_OPENED_FILE);
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
+out_unlock:
+ inode_unlock(inode);
+ return ret;
+}
+
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
int err = fscrypt_file_open(inode, filp);
@@ -547,29 +629,34 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
return err;
filp->f_mode |= FMODE_NOWAIT;
+ filp->f_mode |= FMODE_CAN_ODIRECT;
+
+ err = dquot_file_open(inode, filp);
+ if (err)
+ return err;
- return dquot_file_open(inode, filp);
+ err = finish_preallocate_blocks(inode);
+ if (!err)
+ atomic_inc(&F2FS_I(inode)->open_count);
+ return err;
}
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- struct f2fs_node *raw_node;
int nr_free = 0, ofs = dn->ofs_in_node, len = count;
__le32 *addr;
- int base = 0;
bool compressed_cluster = false;
int cluster_index = 0, valid_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
+ block_t blkstart;
+ int blklen = 0;
- if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
- base = get_extra_isize(dn->inode);
-
- raw_node = F2FS_NODE(dn->node_page);
- addr = blkaddr_in_node(raw_node) + base + ofs;
+ addr = get_dnode_addr(dn->inode, dn->node_folio) + ofs;
+ blkstart = le32_to_cpu(*addr);
- /* Assumption: truncateion starts with cluster */
+ /* Assumption: truncation starts with cluster */
for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
block_t blkaddr = le32_to_cpu(*addr);
@@ -583,28 +670,44 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
}
if (blkaddr == NULL_ADDR)
- continue;
+ goto next;
- dn->data_blkaddr = NULL_ADDR;
- f2fs_set_data_blkaddr(dn);
+ f2fs_set_data_blkaddr(dn, NULL_ADDR);
if (__is_valid_data_blkaddr(blkaddr)) {
- if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
- DATA_GENERIC_ENHANCE))
- continue;
+ if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
+ goto next;
+ if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
+ DATA_GENERIC_ENHANCE))
+ goto next;
if (compressed_cluster)
valid_blocks++;
}
- if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
- clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
-
- f2fs_invalidate_blocks(sbi, blkaddr);
+ if (blkstart + blklen == blkaddr) {
+ blklen++;
+ } else {
+ f2fs_invalidate_blocks(sbi, blkstart, blklen);
+ blkstart = blkaddr;
+ blklen = 1;
+ }
if (!released || blkaddr != COMPRESS_ADDR)
nr_free++;
+
+ continue;
+
+next:
+ if (blklen)
+ f2fs_invalidate_blocks(sbi, blkstart, blklen);
+
+ blkstart = le32_to_cpu(*(addr + 1));
+ blklen = 0;
}
+ if (blklen)
+ f2fs_invalidate_blocks(sbi, blkstart, blklen);
+
if (compressed_cluster)
f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
@@ -614,9 +717,10 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
* once we invalidate valid blkaddr in range [ofs, ofs + count],
* we will invalidate all blkaddr in the whole range.
*/
- fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
+ fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_folio),
dn->inode) + ofs;
- f2fs_update_extent_cache_range(dn, fofs, 0, len);
+ f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
+ f2fs_update_age_extent_cache_range(dn, fofs, len);
dec_valid_block_count(sbi, dn->inode, nr_free);
}
dn->ofs_in_node = ofs;
@@ -626,42 +730,39 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
dn->ofs_in_node, nr_free);
}
-void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
-{
- f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
-}
-
static int truncate_partial_data_page(struct inode *inode, u64 from,
bool cache_only)
{
loff_t offset = from & (PAGE_SIZE - 1);
pgoff_t index = from >> PAGE_SHIFT;
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
if (!offset && !cache_only)
return 0;
if (cache_only) {
- page = find_lock_page(mapping, index);
- if (page && PageUptodate(page))
+ folio = filemap_lock_folio(mapping, index);
+ if (IS_ERR(folio))
+ return 0;
+ if (folio_test_uptodate(folio))
goto truncate_out;
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return 0;
}
- page = f2fs_get_lock_data_page(inode, index, true);
- if (IS_ERR(page))
- return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
+ folio = f2fs_get_lock_data_folio(inode, index, true);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio) == -ENOENT ? 0 : PTR_ERR(folio);
truncate_out:
- f2fs_wait_on_page_writeback(page, DATA, true, true);
- zero_user(page, offset, PAGE_SIZE - offset);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
+ folio_zero_segment(folio, offset, folio_size(folio));
/* An encrypted inode should have a key and truncate the last page. */
f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
if (!cache_only)
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
return 0;
}
@@ -671,11 +772,16 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
struct dnode_of_data dn;
pgoff_t free_from;
int count = 0, err = 0;
- struct page *ipage;
+ struct folio *ifolio;
bool truncate_page = false;
trace_f2fs_truncate_blocks_enter(inode, from);
+ if (IS_DEVICE_ALIASING(inode) && from) {
+ err = -EINVAL;
+ goto out_err;
+ }
+
free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
if (free_from >= max_file_blocks(inode))
@@ -684,20 +790,33 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
if (lock)
f2fs_lock_op(sbi);
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio)) {
+ err = PTR_ERR(ifolio);
+ goto out;
+ }
+
+ if (IS_DEVICE_ALIASING(inode)) {
+ struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
+ struct extent_info ei = et->largest;
+
+ f2fs_invalidate_blocks(sbi, ei.blk, ei.len);
+
+ dec_valid_block_count(sbi, inode, ei.len);
+ f2fs_update_time(sbi, REQ_TIME);
+
+ f2fs_folio_put(ifolio, true);
goto out;
}
if (f2fs_has_inline_data(inode)) {
- f2fs_truncate_inline_inode(inode, ipage, from);
- f2fs_put_page(ipage, 1);
+ f2fs_truncate_inline_inode(inode, ifolio, from);
+ f2fs_folio_put(ifolio, true);
truncate_page = true;
goto out;
}
- set_new_dnode(&dn, inode, ipage, NULL, 0);
+ set_new_dnode(&dn, inode, ifolio, NULL, 0);
err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
if (err) {
if (err == -ENOENT)
@@ -705,12 +824,12 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
goto out;
}
- count = ADDRS_PER_PAGE(dn.node_page, inode);
+ count = ADDRS_PER_PAGE(dn.node_folio, inode);
count -= dn.ofs_in_node;
f2fs_bug_on(sbi, count < 0);
- if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
+ if (dn.ofs_in_node || IS_INODE(dn.node_folio)) {
f2fs_truncate_data_blocks_range(&dn, count);
free_from += count;
}
@@ -725,7 +844,7 @@ free_partial:
/* lastly zero out the first data page */
if (!err)
err = truncate_partial_data_page(inode, from, truncate_page);
-
+out_err:
trace_f2fs_truncate_blocks_exit(inode, err);
return err;
}
@@ -781,10 +900,8 @@ int f2fs_truncate(struct inode *inode)
trace_f2fs_truncate(inode);
- if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
- f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
+ if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE))
return -EIO;
- }
err = f2fs_dquot_initialize(inode);
if (err)
@@ -793,25 +910,66 @@ int f2fs_truncate(struct inode *inode)
/* we should check inline_data size */
if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
- if (err)
+ if (err) {
+ /*
+ * Always truncate page #0 to avoid page cache
+ * leak in evict() path.
+ */
+ truncate_inode_pages_range(inode->i_mapping,
+ F2FS_BLK_TO_BYTES(0),
+ F2FS_BLK_END_BYTES(0));
return err;
+ }
}
err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
if (err)
return err;
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
f2fs_mark_inode_dirty_sync(inode, false);
return 0;
}
-int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+static bool f2fs_force_buffered_io(struct inode *inode, int rw)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (!fscrypt_dio_supported(inode))
+ return true;
+ if (fsverity_active(inode))
+ return true;
+ if (f2fs_compressed_file(inode))
+ return true;
+ /*
+ * only force direct read to use buffered IO, for direct write,
+ * it expects inline data conversion before committing IO.
+ */
+ if (f2fs_has_inline_data(inode) && rw == READ)
+ return true;
+
+ /* disallow direct IO if any of devices has unaligned blksize */
+ if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
+ return true;
+ /*
+ * for blkzoned device, fallback direct IO to buffered IO, so
+ * all IOs can be serialized by log-structured write.
+ */
+ if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE) &&
+ !f2fs_is_pinned_file(inode))
+ return true;
+ if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
+ return true;
+
+ return false;
+}
+
+int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct f2fs_inode_info *fi = F2FS_I(inode);
- struct f2fs_inode *ri;
+ struct f2fs_inode *ri = NULL;
unsigned int flags;
if (f2fs_has_extra_attr(inode) &&
@@ -822,6 +980,24 @@ int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
}
+ /*
+ * Return the DIO alignment restrictions if requested. We only return
+ * this information when requested, since on encrypted files it might
+ * take a fair bit of work to get if the file wasn't opened recently.
+ *
+ * f2fs sometimes supports DIO reads but not DIO writes. STATX_DIOALIGN
+ * cannot represent that, so in that case we report no DIO support.
+ */
+ if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
+ unsigned int bsize = i_blocksize(inode);
+
+ stat->result_mask |= STATX_DIOALIGN;
+ if (!f2fs_force_buffered_io(inode, WRITE)) {
+ stat->dio_mem_align = bsize;
+ stat->dio_offset_align = bsize;
+ }
+ }
+
flags = fi->i_flags;
if (flags & F2FS_COMPR_FL)
stat->attributes |= STATX_ATTR_COMPRESSED;
@@ -843,7 +1019,7 @@ int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
STATX_ATTR_NODUMP |
STATX_ATTR_VERITY);
- generic_fillattr(&init_user_ns, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
/* we need to show initial sectors used for inline_data/dentries */
if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
@@ -854,26 +1030,23 @@ int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
}
#ifdef CONFIG_F2FS_FS_POSIX_ACL
-static void __setattr_copy(struct user_namespace *mnt_userns,
+static void __setattr_copy(struct mnt_idmap *idmap,
struct inode *inode, const struct iattr *attr)
{
unsigned int ia_valid = attr->ia_valid;
- if (ia_valid & ATTR_UID)
- inode->i_uid = attr->ia_uid;
- if (ia_valid & ATTR_GID)
- inode->i_gid = attr->ia_gid;
+ i_uid_update(idmap, attr, inode);
+ i_gid_update(idmap, attr, inode);
if (ia_valid & ATTR_ATIME)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
if (ia_valid & ATTR_MTIME)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
if (ia_valid & ATTR_CTIME)
- inode->i_ctime = attr->ia_ctime;
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
- kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
- if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
+ if (!in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode)))
mode &= ~S_ISGID;
set_acl_inode(inode, mode);
}
@@ -882,28 +1055,18 @@ static void __setattr_copy(struct user_namespace *mnt_userns,
#define __setattr_copy setattr_copy
#endif
-int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int err;
- if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- if (unlikely(IS_IMMUTABLE(inode)))
- return -EPERM;
-
- if (unlikely(IS_APPEND(inode) &&
- (attr->ia_valid & (ATTR_MODE | ATTR_UID |
- ATTR_GID | ATTR_TIMES_SET))))
- return -EPERM;
-
- if ((attr->ia_valid & ATTR_SIZE) &&
- !f2fs_is_compress_backend_ready(inode))
- return -EOPNOTSUPP;
-
- err = setattr_prepare(&init_user_ns, dentry, attr);
+ err = setattr_prepare(idmap, dentry, attr);
if (err)
return err;
@@ -915,33 +1078,57 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
if (err)
return err;
- if (is_quota_modification(inode, attr)) {
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
+ if (unlikely(IS_APPEND(inode) &&
+ (attr->ia_valid & (ATTR_MODE | ATTR_UID |
+ ATTR_GID | ATTR_TIMES_SET))))
+ return -EPERM;
+
+ if ((attr->ia_valid & ATTR_SIZE)) {
+ if (!f2fs_is_compress_backend_ready(inode) ||
+ IS_DEVICE_ALIASING(inode))
+ return -EOPNOTSUPP;
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) &&
+ !IS_ALIGNED(attr->ia_size,
+ F2FS_BLK_TO_BYTES(fi->i_cluster_size)))
+ return -EINVAL;
+ /*
+ * To prevent scattered pin block generation, we don't allow
+ * smaller/equal size unaligned truncation for pinned file.
+ * We only support overwrite IO to pinned file, so don't
+ * care about larger size truncation.
+ */
+ if (f2fs_is_pinned_file(inode) &&
+ attr->ia_size <= i_size_read(inode) &&
+ !IS_ALIGNED(attr->ia_size,
+ F2FS_BLK_TO_BYTES(CAP_BLKS_PER_SEC(sbi))))
+ return -EINVAL;
+ }
+
+ if (is_quota_modification(idmap, inode, attr)) {
err = f2fs_dquot_initialize(inode);
if (err)
return err;
}
- if ((attr->ia_valid & ATTR_UID &&
- !uid_eq(attr->ia_uid, inode->i_uid)) ||
- (attr->ia_valid & ATTR_GID &&
- !gid_eq(attr->ia_gid, inode->i_gid))) {
- f2fs_lock_op(F2FS_I_SB(inode));
- err = dquot_transfer(inode, attr);
+ if (i_uid_needs_update(idmap, attr, inode) ||
+ i_gid_needs_update(idmap, attr, inode)) {
+ f2fs_lock_op(sbi);
+ err = dquot_transfer(idmap, inode, attr);
if (err) {
- set_sbi_flag(F2FS_I_SB(inode),
- SBI_QUOTA_NEED_REPAIR);
- f2fs_unlock_op(F2FS_I_SB(inode));
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ f2fs_unlock_op(sbi);
return err;
}
/*
* update uid/gid under lock_op(), so that dquot and inode can
* be updated atomically.
*/
- if (attr->ia_valid & ATTR_UID)
- inode->i_uid = attr->ia_uid;
- if (attr->ia_valid & ATTR_GID)
- inode->i_gid = attr->ia_gid;
+ i_uid_update(idmap, attr, inode);
+ i_gid_update(idmap, attr, inode);
f2fs_mark_inode_dirty_sync(inode, true);
- f2fs_unlock_op(F2FS_I_SB(inode));
+ f2fs_unlock_op(sbi);
}
if (attr->ia_valid & ATTR_SIZE) {
@@ -957,9 +1144,18 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
return err;
}
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ /*
+ * wait for inflight dio, blocks should be removed after
+ * IO completion.
+ */
+ if (attr->ia_size < old_size)
+ inode_dio_wait(inode);
+
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
+ if (attr->ia_size > old_size)
+ f2fs_zero_post_eof_page(inode, attr->ia_size, false);
truncate_setsize(inode, attr->ia_size);
if (attr->ia_size <= old_size)
@@ -969,24 +1165,24 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
* larger than i_size.
*/
filemap_invalidate_unlock(inode->i_mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
if (err)
return err;
- spin_lock(&F2FS_I(inode)->i_size_lock);
- inode->i_mtime = inode->i_ctime = current_time(inode);
- F2FS_I(inode)->last_disk_size = i_size_read(inode);
- spin_unlock(&F2FS_I(inode)->i_size_lock);
+ spin_lock(&fi->i_size_lock);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ fi->last_disk_size = i_size_read(inode);
+ spin_unlock(&fi->i_size_lock);
}
- __setattr_copy(&init_user_ns, inode, attr);
+ __setattr_copy(idmap, inode, attr);
if (attr->ia_valid & ATTR_MODE) {
- err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode));
+ err = posix_acl_chmod(idmap, dentry, f2fs_get_inode_mode(inode));
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
if (!err)
- inode->i_mode = F2FS_I(inode)->i_acl_mode;
+ inode->i_mode = fi->i_acl_mode;
clear_inode_flag(inode, FI_ACL_MODE);
}
}
@@ -995,7 +1191,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
f2fs_mark_inode_dirty_sync(inode, true);
/* inode change will produce dirty node pages flushed by checkpoint */
- f2fs_balance_fs(F2FS_I_SB(inode), true);
+ f2fs_balance_fs(sbi, true);
return err;
}
@@ -1003,7 +1199,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
const struct inode_operations f2fs_file_inode_operations = {
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
- .get_acl = f2fs_get_acl,
+ .get_inode_acl = f2fs_get_acl,
.set_acl = f2fs_set_acl,
.listxattr = f2fs_listxattr,
.fiemap = f2fs_fiemap,
@@ -1015,7 +1211,7 @@ static int fill_zero(struct inode *inode, pgoff_t index,
loff_t start, loff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *page;
+ struct folio *folio;
if (!len)
return 0;
@@ -1023,16 +1219,16 @@ static int fill_zero(struct inode *inode, pgoff_t index,
f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
- page = f2fs_get_new_data_page(inode, NULL, index, false);
+ folio = f2fs_get_new_data_folio(inode, NULL, index, false);
f2fs_unlock_op(sbi);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- f2fs_wait_on_page_writeback(page, DATA, true, true);
- zero_user(page, start, len);
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
+ folio_zero_range(folio, start, len);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
return 0;
}
@@ -1055,7 +1251,7 @@ int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
return err;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
@@ -1068,7 +1264,7 @@ int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
return 0;
}
-static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
+static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
pgoff_t pg_start, pg_end;
loff_t off_start, off_end;
@@ -1078,6 +1274,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (ret)
return ret;
+ f2fs_zero_post_eof_page(inode, offset + len, true);
+
pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
@@ -1111,7 +1309,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
blk_start = (loff_t)pg_start << PAGE_SHIFT;
blk_end = (loff_t)pg_end << PAGE_SHIFT;
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
truncate_pagecache_range(inode, blk_start, blk_end - 1);
@@ -1121,7 +1319,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
f2fs_unlock_op(sbi);
filemap_invalidate_unlock(inode->i_mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
}
}
@@ -1150,7 +1348,7 @@ next_dnode:
goto next;
}
- done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
+ done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_folio, inode) -
dn.ofs_in_node, len);
for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
*blkaddr = f2fs_data_blkaddr(&dn);
@@ -1198,7 +1396,7 @@ static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
if (ret) {
dec_valid_block_count(sbi, inode, 1);
- f2fs_invalidate_blocks(sbi, *blkaddr);
+ f2fs_invalidate_blocks(sbi, *blkaddr, 1);
} else {
f2fs_update_data_blkaddr(&dn, *blkaddr);
}
@@ -1232,14 +1430,14 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
if (ret)
return ret;
- ret = f2fs_get_node_info(sbi, dn.nid, &ni);
+ ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
if (ret) {
f2fs_put_dnode(&dn);
return ret;
}
ilen = min((pgoff_t)
- ADDRS_PER_PAGE(dn.node_page, dst_inode) -
+ ADDRS_PER_PAGE(dn.node_folio, dst_inode) -
dn.ofs_in_node, len - i);
do {
dn.data_blkaddr = f2fs_data_blkaddr(&dn);
@@ -1264,22 +1462,26 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
f2fs_put_dnode(&dn);
} else {
- struct page *psrc, *pdst;
+ struct folio *fsrc, *fdst;
- psrc = f2fs_get_lock_data_page(src_inode,
+ fsrc = f2fs_get_lock_data_folio(src_inode,
src + i, true);
- if (IS_ERR(psrc))
- return PTR_ERR(psrc);
- pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
+ if (IS_ERR(fsrc))
+ return PTR_ERR(fsrc);
+ fdst = f2fs_get_new_data_folio(dst_inode, NULL, dst + i,
true);
- if (IS_ERR(pdst)) {
- f2fs_put_page(psrc, 1);
- return PTR_ERR(pdst);
+ if (IS_ERR(fdst)) {
+ f2fs_folio_put(fsrc, true);
+ return PTR_ERR(fdst);
}
- f2fs_copy_page(psrc, pdst);
- set_page_dirty(pdst);
- f2fs_put_page(pdst, 1);
- f2fs_put_page(psrc, 1);
+
+ f2fs_folio_wait_writeback(fdst, DATA, true, true);
+
+ memcpy_folio(fdst, 0, fsrc, 0, PAGE_SIZE);
+ folio_mark_dirty(fdst);
+ folio_set_f2fs_gcing(fdst);
+ f2fs_folio_put(fdst, true);
+ f2fs_folio_put(fsrc, true);
ret = f2fs_truncate_hole(src_inode,
src + i, src + i + 1);
@@ -1354,9 +1556,11 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
f2fs_balance_fs(sbi, true);
/* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
+ f2fs_zero_post_eof_page(inode, offset + len, false);
+
f2fs_lock_op(sbi);
f2fs_drop_extent_tree(inode);
truncate_pagecache(inode, offset);
@@ -1364,7 +1568,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
f2fs_unlock_op(sbi);
filemap_invalidate_unlock(inode->i_mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret;
}
@@ -1436,14 +1640,25 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
ret = -ENOSPC;
break;
}
- if (dn->data_blkaddr != NEW_ADDR) {
- f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
- dn->data_blkaddr = NEW_ADDR;
- f2fs_set_data_blkaddr(dn);
+
+ if (dn->data_blkaddr == NEW_ADDR)
+ continue;
+
+ if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
+ DATA_GENERIC_ENHANCE)) {
+ ret = -EFSCORRUPTED;
+ break;
}
+
+ f2fs_invalidate_blocks(sbi, dn->data_blkaddr, 1);
+ f2fs_set_data_blkaddr(dn, NEW_ADDR);
}
- f2fs_update_extent_cache_range(dn, start, 0, index - start);
+ if (index > start) {
+ f2fs_update_read_extent_cache_range(dn, start, 0,
+ index - start);
+ f2fs_update_age_extent_cache_range(dn, start, index - start);
+ }
return ret;
}
@@ -1470,6 +1685,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
if (ret)
return ret;
+ f2fs_zero_post_eof_page(inode, offset + len, true);
+
pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
@@ -1499,7 +1716,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
unsigned int end_offset;
pgoff_t end;
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(mapping);
truncate_pagecache_range(inode,
@@ -1513,11 +1730,11 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
if (ret) {
f2fs_unlock_op(sbi);
filemap_invalidate_unlock(mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
end = min(pg_end, end_offset - dn.ofs_in_node + index);
ret = f2fs_do_zero_range(&dn, index, end);
@@ -1525,7 +1742,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
f2fs_unlock_op(sbi);
filemap_invalidate_unlock(mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_balance_fs(sbi, dn.node_changed);
@@ -1599,8 +1816,10 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
/* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(mapping);
+
+ f2fs_zero_post_eof_page(inode, offset + len, false);
truncate_pagecache(inode, offset);
while (!ret && idx > pg_start) {
@@ -1617,11 +1836,13 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
f2fs_unlock_op(sbi);
}
filemap_invalidate_unlock(mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ if (ret)
+ return ret;
/* write out all moved pages, if possible */
filemap_invalidate_lock(mapping);
- filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
+ ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
truncate_pagecache(inode, offset);
filemap_invalidate_unlock(mapping);
@@ -1630,15 +1851,20 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
return ret;
}
-static int expand_inode_data(struct inode *inode, loff_t offset,
+static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
loff_t len, int mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
.m_may_create = true };
+ struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
+ .init_gc_type = FG_GC,
+ .should_migrate_blocks = false,
+ .err_gc_skipped = true,
+ .nr_free_secs = 0 };
pgoff_t pg_start, pg_end;
- loff_t new_size = i_size_read(inode);
+ loff_t new_size;
loff_t off_end;
block_t expanded = 0;
int err;
@@ -1651,6 +1877,8 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
if (err)
return err;
+ f2fs_zero_post_eof_page(inode, offset + len, true);
+
f2fs_balance_fs(sbi, true);
pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
@@ -1666,29 +1894,47 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
return 0;
if (f2fs_is_pinned_file(inode)) {
- block_t sec_blks = BLKS_PER_SEC(sbi);
+ block_t sec_blks = CAP_BLKS_PER_SEC(sbi);
block_t sec_len = roundup(map.m_len, sec_blks);
map.m_len = sec_blks;
next_alloc:
- if (has_not_enough_free_secs(sbi, 0,
- GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
- down_write(&sbi->gc_lock);
- err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
- if (err && err != -ENODATA && err != -EAGAIN)
+ f2fs_down_write(&sbi->pin_sem);
+
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ if (has_not_enough_free_secs(sbi, 0, 0)) {
+ f2fs_up_write(&sbi->pin_sem);
+ err = -ENOSPC;
+ f2fs_warn_ratelimited(sbi,
+ "ino:%lu, start:%lu, end:%lu, need to trigger GC to "
+ "reclaim enough free segment when checkpoint is enabled",
+ inode->i_ino, pg_start, pg_end);
goto out_err;
+ }
}
- down_write(&sbi->pin_sem);
+ if (has_not_enough_free_secs(sbi, 0,
+ sbi->reserved_pin_section)) {
+ f2fs_down_write(&sbi->gc_lock);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
+ err = f2fs_gc(sbi, &gc_control);
+ if (err && err != -ENODATA) {
+ f2fs_up_write(&sbi->pin_sem);
+ goto out_err;
+ }
+ }
- f2fs_lock_op(sbi);
- f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
- f2fs_unlock_op(sbi);
+ err = f2fs_allocate_pinning_section(sbi);
+ if (err) {
+ f2fs_up_write(&sbi->pin_sem);
+ goto out_err;
+ }
map.m_seg_type = CURSEG_COLD_DATA_PINNED;
- err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
+ err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO);
+ file_dont_truncate(inode);
- up_write(&sbi->pin_sem);
+ f2fs_up_write(&sbi->pin_sem);
expanded += map.m_len;
sec_len -= map.m_len;
@@ -1698,7 +1944,7 @@ next_alloc:
map.m_len = expanded;
} else {
- err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_AIO);
expanded = map.m_len;
}
out_err:
@@ -1737,7 +1983,7 @@ static long f2fs_fallocate(struct file *file, int mode,
return -EIO;
if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
return -ENOSPC;
- if (!f2fs_is_compress_backend_ready(inode))
+ if (!f2fs_is_compress_backend_ready(inode) || IS_DEVICE_ALIASING(inode))
return -EOPNOTSUPP;
/* f2fs only support ->fallocate for regular file */
@@ -1748,11 +1994,6 @@ static long f2fs_fallocate(struct file *file, int mode,
(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
return -EOPNOTSUPP;
- if (f2fs_compressed_file(inode) &&
- (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
- FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
- return -EOPNOTSUPP;
-
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
FALLOC_FL_INSERT_RANGE))
@@ -1760,11 +2001,32 @@ static long f2fs_fallocate(struct file *file, int mode,
inode_lock(inode);
+ /*
+ * Pinned file should not support partial truncation since the block
+ * can be used by applications.
+ */
+ if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
+ (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
+ FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE))) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = file_modified(file);
+ if (ret)
+ goto out;
+
+ /*
+ * wait for inflight dio, blocks should be removed after IO
+ * completion.
+ */
+ inode_dio_wait(inode);
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
if (offset >= inode->i_size)
goto out;
- ret = punch_hole(inode, offset, len);
+ ret = f2fs_punch_hole(inode, offset, len);
} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
ret = f2fs_collapse_range(inode, offset, len);
} else if (mode & FALLOC_FL_ZERO_RANGE) {
@@ -1772,11 +2034,11 @@ static long f2fs_fallocate(struct file *file, int mode,
} else if (mode & FALLOC_FL_INSERT_RANGE) {
ret = f2fs_insert_range(inode, offset, len);
} else {
- ret = expand_inode_data(inode, offset, len, mode);
+ ret = f2fs_expand_inode_data(inode, offset, len, mode);
}
if (!ret) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
f2fs_mark_inode_dirty_sync(inode, false);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
}
@@ -1790,24 +2052,21 @@ out:
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
+ if (atomic_dec_and_test(&F2FS_I(inode)->open_count))
+ f2fs_remove_donate_inode(inode);
+
/*
- * f2fs_relase_file is called at every close calls. So we should
+ * f2fs_release_file is called at every close calls. So we should
* not drop any inmemory pages by close called by other process.
*/
if (!(filp->f_mode & FMODE_WRITE) ||
atomic_read(&inode->i_writecount) != 1)
return 0;
- /* some remained atomic pages should discarded */
- if (f2fs_is_atomic_file(inode))
- f2fs_drop_inmem_pages(inode);
- if (f2fs_is_volatile_file(inode)) {
- set_inode_flag(inode, FI_DROP_CACHE);
- filemap_fdatawrite(inode->i_mapping);
- clear_inode_flag(inode, FI_DROP_CACHE);
- clear_inode_flag(inode, FI_VOLATILE_FILE);
- stat_dec_volatile_write(inode);
- }
+ inode_lock(inode);
+ f2fs_abort_atomic_write(inode, true);
+ inode_unlock(inode);
+
return 0;
}
@@ -1821,9 +2080,13 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
* until all the writers close its file. Since this should be done
* before dropping file lock, it needs to do in ->flush.
*/
- if (f2fs_is_atomic_file(inode) &&
- F2FS_I(inode)->inmem_task == current)
- f2fs_drop_inmem_pages(inode);
+ if (F2FS_I(inode)->atomic_write_task == current &&
+ (current->flags & PF_EXITING)) {
+ inode_lock(inode);
+ f2fs_abort_atomic_write(inode, true);
+ inode_unlock(inode);
+ }
+
return 0;
}
@@ -1857,22 +2120,27 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
if (masked_flags & F2FS_COMPR_FL) {
if (!f2fs_disable_compressed_file(inode))
return -EINVAL;
- }
- if (iflags & F2FS_NOCOMP_FL)
- return -EINVAL;
- if (iflags & F2FS_COMPR_FL) {
- if (!f2fs_may_compress(inode))
- return -EINVAL;
- if (S_ISREG(inode->i_mode) && inode->i_size)
+ } else {
+ /* try to convert inline_data to support compression */
+ int err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+
+ f2fs_down_write(&fi->i_sem);
+ if (!f2fs_may_compress(inode) ||
+ atomic_read(&fi->writeback) ||
+ (S_ISREG(inode->i_mode) &&
+ F2FS_HAS_BLOCKS(inode))) {
+ f2fs_up_write(&fi->i_sem);
return -EINVAL;
+ }
+ err = set_compress_context(inode);
+ f2fs_up_write(&fi->i_sem);
- set_compress_context(inode);
+ if (err)
+ return err;
}
}
- if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
- if (masked_flags & F2FS_COMPR_FL)
- return -EINVAL;
- }
fi->i_flags = iflags | (fi->i_flags & ~mask);
f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
@@ -1883,7 +2151,7 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
else
clear_inode_flag(inode, FI_PROJ_INHERIT);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_set_inode_flags(inode);
f2fs_mark_inode_dirty_sync(inode, true);
return 0;
@@ -1980,14 +2248,19 @@ static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
return put_user(inode->i_generation, (int __user *)arg);
}
-static int f2fs_ioc_start_atomic_write(struct file *filp)
+static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
{
struct inode *inode = file_inode(filp);
+ struct mnt_idmap *idmap = file_mnt_idmap(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ loff_t isize;
int ret;
- if (!inode_owner_or_capable(&init_user_ns, inode))
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
if (!S_ISREG(inode->i_mode))
@@ -2002,165 +2275,136 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
inode_lock(inode);
- f2fs_disable_compressed_file(inode);
-
- if (f2fs_is_atomic_file(inode)) {
- if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
- ret = -EINVAL;
+ if (!f2fs_disable_compressed_file(inode) ||
+ f2fs_is_pinned_file(inode)) {
+ ret = -EINVAL;
goto out;
}
+ if (f2fs_is_atomic_file(inode))
+ goto out;
+
ret = f2fs_convert_inline_inode(inode);
if (ret)
goto out;
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&fi->i_gc_rwsem[READ]);
/*
* Should wait end_io to count F2FS_WB_CP_DATA correctly by
* f2fs_is_atomic_file.
*/
if (get_dirty_pages(inode))
- f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
+ f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
- if (ret) {
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- goto out;
- }
-
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (list_empty(&fi->inmem_ilist))
- list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
- sbi->atomic_files++;
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
-
- /* add inode in inmem_list first and set atomic_file */
- set_inode_flag(inode, FI_ATOMIC_FILE);
- clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
- F2FS_I(inode)->inmem_task = current;
- stat_update_max_atomic_write(inode);
-out:
- inode_unlock(inode);
- mnt_drop_write_file(filp);
- return ret;
-}
-
-static int f2fs_ioc_commit_atomic_write(struct file *filp)
-{
- struct inode *inode = file_inode(filp);
- int ret;
-
- if (!inode_owner_or_capable(&init_user_ns, inode))
- return -EACCES;
-
- ret = mnt_want_write_file(filp);
if (ret)
- return ret;
-
- f2fs_balance_fs(F2FS_I_SB(inode), true);
-
- inode_lock(inode);
+ goto out_unlock;
- if (f2fs_is_volatile_file(inode)) {
- ret = -EINVAL;
- goto err_out;
- }
+ /* Check if the inode already has a COW inode */
+ if (fi->cow_inode == NULL) {
+ /* Create a COW inode for atomic write */
+ struct dentry *dentry = file_dentry(filp);
+ struct inode *dir = d_inode(dentry->d_parent);
- if (f2fs_is_atomic_file(inode)) {
- ret = f2fs_commit_inmem_pages(inode);
+ ret = f2fs_get_tmpfile(idmap, dir, &fi->cow_inode);
if (ret)
- goto err_out;
+ goto out_unlock;
- ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
- if (!ret)
- f2fs_drop_inmem_pages(inode);
+ set_inode_flag(fi->cow_inode, FI_COW_FILE);
+ clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
+
+ /* Set the COW inode's atomic_inode to the atomic inode */
+ F2FS_I(fi->cow_inode)->atomic_inode = inode;
} else {
- ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
- }
-err_out:
- if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
- clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
- ret = -EINVAL;
- }
- inode_unlock(inode);
- mnt_drop_write_file(filp);
- return ret;
-}
+ /* Reuse the already created COW inode */
+ f2fs_bug_on(sbi, get_dirty_pages(fi->cow_inode));
-static int f2fs_ioc_start_volatile_write(struct file *filp)
-{
- struct inode *inode = file_inode(filp);
- int ret;
+ invalidate_mapping_pages(fi->cow_inode->i_mapping, 0, -1);
- if (!inode_owner_or_capable(&init_user_ns, inode))
- return -EACCES;
+ ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
+ if (ret)
+ goto out_unlock;
+ }
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
+ f2fs_write_inode(inode, NULL);
- ret = mnt_want_write_file(filp);
- if (ret)
- return ret;
+ stat_inc_atomic_inode(inode);
- inode_lock(inode);
+ set_inode_flag(inode, FI_ATOMIC_FILE);
- if (f2fs_is_volatile_file(inode))
- goto out;
+ isize = i_size_read(inode);
+ fi->original_i_size = isize;
+ if (truncate) {
+ set_inode_flag(inode, FI_ATOMIC_REPLACE);
+ truncate_inode_pages_final(inode->i_mapping);
+ f2fs_i_size_write(inode, 0);
+ isize = 0;
+ }
+ f2fs_i_size_write(fi->cow_inode, isize);
- ret = f2fs_convert_inline_inode(inode);
+out_unlock:
+ f2fs_up_write(&fi->i_gc_rwsem[READ]);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
if (ret)
goto out;
- stat_inc_volatile_write(inode);
- stat_update_max_volatile_write(inode);
-
- set_inode_flag(inode, FI_VOLATILE_FILE);
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+ f2fs_update_time(sbi, REQ_TIME);
+ fi->atomic_write_task = current;
+ stat_update_max_atomic_write(inode);
+ fi->atomic_write_cnt = 0;
out:
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
}
-static int f2fs_ioc_release_volatile_write(struct file *filp)
+static int f2fs_ioc_commit_atomic_write(struct file *filp)
{
struct inode *inode = file_inode(filp);
+ struct mnt_idmap *idmap = file_mnt_idmap(filp);
int ret;
- if (!inode_owner_or_capable(&init_user_ns, inode))
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
+
inode_lock(inode);
- if (!f2fs_is_volatile_file(inode))
- goto out;
+ if (f2fs_is_atomic_file(inode)) {
+ ret = f2fs_commit_atomic_write(inode);
+ if (!ret)
+ ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
- if (!f2fs_is_first_block_written(inode)) {
- ret = truncate_partial_data_page(inode, 0, true);
- goto out;
+ f2fs_abort_atomic_write(inode, ret);
+ } else {
+ ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
}
- ret = punch_hole(inode, 0, F2FS_BLKSIZE);
-out:
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
}
-static int f2fs_ioc_abort_volatile_write(struct file *filp)
+static int f2fs_ioc_abort_atomic_write(struct file *filp)
{
struct inode *inode = file_inode(filp);
+ struct mnt_idmap *idmap = file_mnt_idmap(filp);
int ret;
- if (!inode_owner_or_capable(&init_user_ns, inode))
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
ret = mnt_want_write_file(filp);
@@ -2169,15 +2413,7 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
inode_lock(inode);
- if (f2fs_is_atomic_file(inode))
- f2fs_drop_inmem_pages(inode);
- if (f2fs_is_volatile_file(inode)) {
- clear_inode_flag(inode, FI_VOLATILE_FILE);
- stat_dec_volatile_write(inode);
- ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
- }
-
- clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
+ f2fs_abort_atomic_write(inode, true);
inode_unlock(inode);
@@ -2186,58 +2422,36 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
return ret;
}
-static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
+int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
+ bool readonly, bool need_lock)
{
- struct inode *inode = file_inode(filp);
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
- __u32 in;
int ret = 0;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (get_user(in, (__u32 __user *)arg))
- return -EFAULT;
-
- if (in != F2FS_GOING_DOWN_FULLSYNC) {
- ret = mnt_want_write_file(filp);
- if (ret) {
- if (ret == -EROFS) {
- ret = 0;
- f2fs_stop_checkpoint(sbi, false);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
- trace_f2fs_shutdown(sbi, in, ret);
- }
- return ret;
- }
- }
-
- switch (in) {
+ switch (flag) {
case F2FS_GOING_DOWN_FULLSYNC:
- ret = freeze_bdev(sb->s_bdev);
+ ret = bdev_freeze(sb->s_bdev);
if (ret)
goto out;
- f2fs_stop_checkpoint(sbi, false);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
- thaw_bdev(sb->s_bdev);
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
+ bdev_thaw(sb->s_bdev);
break;
case F2FS_GOING_DOWN_METASYNC:
/* do checkpoint only */
ret = f2fs_sync_fs(sb, 1);
- if (ret)
+ if (ret) {
+ if (ret == -EIO)
+ ret = 0;
goto out;
- f2fs_stop_checkpoint(sbi, false);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
+ }
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
break;
case F2FS_GOING_DOWN_NOSYNC:
- f2fs_stop_checkpoint(sbi, false);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
break;
case F2FS_GOING_DOWN_METAFLUSH:
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
- f2fs_stop_checkpoint(sbi, false);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
break;
case F2FS_GOING_DOWN_NEED_FSCK:
set_sbi_flag(sbi, SBI_NEED_FSCK);
@@ -2245,24 +2459,128 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
set_sbi_flag(sbi, SBI_IS_DIRTY);
/* do checkpoint only */
ret = f2fs_sync_fs(sb, 1);
+ if (ret == -EIO)
+ ret = 0;
goto out;
default:
ret = -EINVAL;
goto out;
}
+ if (readonly)
+ goto out;
+
+ /*
+ * grab sb->s_umount to avoid racing w/ remount() and other shutdown
+ * paths.
+ */
+ if (need_lock)
+ down_write(&sbi->sb->s_umount);
+
f2fs_stop_gc_thread(sbi);
f2fs_stop_discard_thread(sbi);
f2fs_drop_discard_cmd(sbi);
clear_opt(sbi, DISCARD);
+ if (need_lock)
+ up_write(&sbi->sb->s_umount);
+
f2fs_update_time(sbi, REQ_TIME);
out:
- if (in != F2FS_GOING_DOWN_FULLSYNC)
+
+ trace_f2fs_shutdown(sbi, flag, ret);
+
+ return ret;
+}
+
+static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ __u32 in;
+ int ret;
+ bool need_drop = false, readonly = false;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(in, (__u32 __user *)arg))
+ return -EFAULT;
+
+ if (in != F2FS_GOING_DOWN_FULLSYNC) {
+ ret = mnt_want_write_file(filp);
+ if (ret) {
+ if (ret != -EROFS)
+ return ret;
+
+ /* fallback to nosync shutdown for readonly fs */
+ in = F2FS_GOING_DOWN_NOSYNC;
+ readonly = true;
+ } else {
+ need_drop = true;
+ }
+ }
+
+ ret = f2fs_do_shutdown(sbi, in, readonly, true);
+
+ if (need_drop)
mnt_drop_write_file(filp);
- trace_f2fs_shutdown(sbi, in, ret);
+ return ret;
+}
+
+static int f2fs_keep_noreuse_range(struct inode *inode,
+ loff_t offset, loff_t len)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ u64 max_bytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
+ u64 start, end;
+ int ret = 0;
+
+ if (!S_ISREG(inode->i_mode))
+ return 0;
+
+ if (offset >= max_bytes || len > max_bytes ||
+ (offset + len) > max_bytes)
+ return 0;
+
+ start = offset >> PAGE_SHIFT;
+ end = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+
+ inode_lock(inode);
+ if (f2fs_is_atomic_file(inode)) {
+ inode_unlock(inode);
+ return 0;
+ }
+
+ spin_lock(&sbi->inode_lock[DONATE_INODE]);
+ /* let's remove the range, if len = 0 */
+ if (!len) {
+ if (!list_empty(&F2FS_I(inode)->gdonate_list)) {
+ list_del_init(&F2FS_I(inode)->gdonate_list);
+ sbi->donate_files--;
+ if (is_inode_flag_set(inode, FI_DONATE_FINISHED))
+ ret = -EALREADY;
+ else
+ set_inode_flag(inode, FI_DONATE_FINISHED);
+ } else
+ ret = -ENOENT;
+ } else {
+ if (list_empty(&F2FS_I(inode)->gdonate_list)) {
+ list_add_tail(&F2FS_I(inode)->gdonate_list,
+ &sbi->inode_list[DONATE_INODE]);
+ sbi->donate_files++;
+ } else {
+ list_move_tail(&F2FS_I(inode)->gdonate_list,
+ &sbi->inode_list[DONATE_INODE]);
+ }
+ F2FS_I(inode)->donate_start = start;
+ F2FS_I(inode)->donate_end = end - 1;
+ clear_inode_flag(inode, FI_DONATE_FINISHED);
+ }
+ spin_unlock(&sbi->inode_lock[DONATE_INODE]);
+ inode_unlock(inode);
return ret;
}
@@ -2270,15 +2588,14 @@ out:
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- struct super_block *sb = inode->i_sb;
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct fstrim_range range;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!f2fs_hw_support_discard(F2FS_SB(sb)))
+ if (!f2fs_hw_support_discard(sbi))
return -EOPNOTSUPP;
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
@@ -2289,9 +2606,9 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
if (ret)
return ret;
- range.minlen = max((unsigned int)range.minlen,
- q->limits.discard_granularity);
- ret = f2fs_trim_fs(F2FS_SB(sb), &range);
+ range.minlen = max_t(unsigned int, range.minlen,
+ f2fs_hw_discard_granularity(sbi));
+ ret = f2fs_trim_fs(sbi, &range);
mnt_drop_write_file(filp);
if (ret < 0)
return ret;
@@ -2299,7 +2616,7 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
if (copy_to_user((struct fstrim_range __user *)arg, &range,
sizeof(range)))
return -EFAULT;
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+ f2fs_update_time(sbi, REQ_TIME);
return 0;
}
@@ -2316,13 +2633,14 @@ static bool uuid_is_nonzero(__u8 u[16])
static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
+ int ret;
if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
return -EOPNOTSUPP;
+ ret = fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
-
- return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
+ return ret;
}
static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
@@ -2336,6 +2654,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ u8 encrypt_pw_salt[16];
int err;
if (!f2fs_sb_has_encrypt(sbi))
@@ -2345,7 +2664,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
if (err)
return err;
- down_write(&sbi->sb_lock);
+ f2fs_down_write(&sbi->sb_lock);
if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
goto got_it;
@@ -2360,12 +2679,14 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
goto out_err;
}
got_it:
- if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
- 16))
- err = -EFAULT;
+ memcpy(encrypt_pw_salt, sbi->raw_super->encrypt_pw_salt, 16);
out_err:
- up_write(&sbi->sb_lock);
+ f2fs_up_write(&sbi->sb_lock);
mnt_drop_write_file(filp);
+
+ if (!err && copy_to_user((__u8 __user *)arg, encrypt_pw_salt, 16))
+ err = -EFAULT;
+
return err;
}
@@ -2424,6 +2745,10 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
+ .no_bg_gc = false,
+ .should_migrate_blocks = false,
+ .nr_free_secs = 0 };
__u32 sync;
int ret;
@@ -2441,15 +2766,18 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
return ret;
if (!sync) {
- if (!down_write_trylock(&sbi->gc_lock)) {
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY;
goto out;
}
} else {
- down_write(&sbi->gc_lock);
+ f2fs_down_write(&sbi->gc_lock);
}
- ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
+ gc_control.init_gc_type = sync ? FG_GC : BG_GC;
+ gc_control.err_gc_skipped = sync;
+ stat_inc_gc_call_count(sbi, FOREGROUND);
+ ret = f2fs_gc(sbi, &gc_control);
out:
mnt_drop_write_file(filp);
return ret;
@@ -2458,6 +2786,12 @@ out:
static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
+ struct f2fs_gc_control gc_control = {
+ .init_gc_type = range->sync ? FG_GC : BG_GC,
+ .no_bg_gc = false,
+ .should_migrate_blocks = false,
+ .err_gc_skipped = range->sync,
+ .nr_free_secs = 0 };
u64 end;
int ret;
@@ -2477,22 +2811,23 @@ static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
do_more:
if (!range->sync) {
- if (!down_write_trylock(&sbi->gc_lock)) {
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY;
goto out;
}
} else {
- down_write(&sbi->gc_lock);
+ f2fs_down_write(&sbi->gc_lock);
}
- ret = f2fs_gc(sbi, range->sync, true, false,
- GET_SEGNO(sbi, range->start));
+ gc_control.victim_segno = GET_SEGNO(sbi, range->start);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
+ ret = f2fs_gc(sbi, &gc_control);
if (ret) {
if (ret == -EBUSY)
ret = -EAGAIN;
goto out;
}
- range->start += BLKS_PER_SEC(sbi);
+ range->start += CAP_BLKS_PER_SEC(sbi);
if (range->start <= end)
goto do_more;
out:
@@ -2510,7 +2845,7 @@ static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
return __f2fs_ioc_gc_range(filp, &range);
}
-static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
+static int f2fs_ioc_write_checkpoint(struct file *filp)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -2545,28 +2880,38 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
struct f2fs_map_blocks map = { .m_next_extent = NULL,
.m_seg_type = NO_CHECK_TYPE,
.m_may_create = false };
- struct extent_info ei = {0, 0, 0};
+ struct extent_info ei = {};
pgoff_t pg_start, pg_end, next_pgofs;
- unsigned int blk_per_seg = sbi->blocks_per_seg;
unsigned int total = 0, sec_num;
block_t blk_end = 0;
bool fragmented = false;
int err;
- /* if in-place-update policy is enabled, don't waste time here */
- if (f2fs_should_update_inplace(inode, NULL))
- return -EINVAL;
+ f2fs_balance_fs(sbi, true);
+ inode_lock(inode);
pg_start = range->start >> PAGE_SHIFT;
- pg_end = (range->start + range->len) >> PAGE_SHIFT;
+ pg_end = min_t(pgoff_t,
+ (range->start + range->len) >> PAGE_SHIFT,
+ DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
- f2fs_balance_fs(sbi, true);
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) ||
+ f2fs_is_atomic_file(inode)) {
+ err = -EINVAL;
+ goto unlock_out;
+ }
- inode_lock(inode);
+ /* if in-place-update policy is enabled, don't waste time here */
+ set_inode_flag(inode, FI_OPU_WRITE);
+ if (f2fs_should_update_inplace(inode, NULL)) {
+ err = -EINVAL;
+ goto out;
+ }
/* writeback all dirty pages in the range */
- err = filemap_write_and_wait_range(inode->i_mapping, range->start,
- range->start + range->len - 1);
+ err = filemap_write_and_wait_range(inode->i_mapping,
+ pg_start << PAGE_SHIFT,
+ (pg_end << PAGE_SHIFT) - 1);
if (err)
goto out;
@@ -2574,8 +2919,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
* lookup mapping info in extent cache, skip defragmenting if physical
* block addresses are continuous.
*/
- if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
- if (ei.fofs + ei.len >= pg_end)
+ if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
+ if ((pgoff_t)ei.fofs + ei.len >= pg_end)
goto out;
}
@@ -2589,7 +2934,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
*/
while (map.m_lblk < pg_end) {
map.m_len = pg_end - map.m_lblk;
- err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
+ err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
if (err)
goto out;
@@ -2614,7 +2959,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
goto out;
}
- sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
+ sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi));
/*
* make sure there are enough free section for LFS allocation, this can
@@ -2636,7 +2981,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
do_map:
map.m_len = pg_end - map.m_lblk;
- err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
+ err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
if (err)
goto clear_out;
@@ -2645,20 +2990,24 @@ do_map:
goto check;
}
- set_inode_flag(inode, FI_DO_DEFRAG);
+ set_inode_flag(inode, FI_SKIP_WRITES);
idx = map.m_lblk;
- while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
- struct page *page;
+ while (idx < map.m_lblk + map.m_len &&
+ cnt < BLKS_PER_SEG(sbi)) {
+ struct folio *folio;
- page = f2fs_get_lock_data_page(inode, idx, true);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ folio = f2fs_get_lock_data_folio(inode, idx, true);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto clear_out;
}
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
+
+ folio_mark_dirty(folio);
+ folio_set_f2fs_gcing(folio);
+ f2fs_folio_put(folio, true);
idx++;
cnt++;
@@ -2667,18 +3016,20 @@ do_map:
map.m_lblk = idx;
check:
- if (map.m_lblk < pg_end && cnt < blk_per_seg)
+ if (map.m_lblk < pg_end && cnt < BLKS_PER_SEG(sbi))
goto do_map;
- clear_inode_flag(inode, FI_DO_DEFRAG);
+ clear_inode_flag(inode, FI_SKIP_WRITES);
err = filemap_fdatawrite(inode->i_mapping);
if (err)
goto out;
}
clear_out:
- clear_inode_flag(inode, FI_DO_DEFRAG);
+ clear_inode_flag(inode, FI_SKIP_WRITES);
out:
+ clear_inode_flag(inode, FI_OPU_WRITE);
+unlock_out:
inode_unlock(inode);
if (!err)
range->len = (u64)total << PAGE_SHIFT;
@@ -2695,7 +3046,7 @@ static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
+ if (!S_ISREG(inode->i_mode))
return -EINVAL;
if (f2fs_readonly(sbi->sb))
@@ -2720,7 +3071,8 @@ static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
err = f2fs_defragment_range(sbi, filp, &range);
mnt_drop_write_file(filp);
- f2fs_update_time(sbi, REQ_TIME);
+ if (range.len)
+ f2fs_update_time(sbi, REQ_TIME);
if (err < 0)
return err;
@@ -2771,6 +3123,17 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
goto out;
}
+ if (f2fs_compressed_file(src) || f2fs_compressed_file(dst) ||
+ f2fs_is_pinned_file(src) || f2fs_is_pinned_file(dst)) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
+ if (f2fs_is_atomic_file(src) || f2fs_is_atomic_file(dst)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = -EINVAL;
if (pos_in + len > src->i_size || pos_in + len < pos_in)
goto out_unlock;
@@ -2814,17 +3177,17 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
f2fs_balance_fs(sbi, true);
- down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
if (src != dst) {
ret = -EBUSY;
- if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
+ if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
goto out_src;
}
f2fs_lock_op(sbi);
- ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
- pos_out >> F2FS_BLKSIZE_BITS,
- len >> F2FS_BLKSIZE_BITS, false);
+ ret = __exchange_data_block(src, dst, F2FS_BYTES_TO_BLK(pos_in),
+ F2FS_BYTES_TO_BLK(pos_out),
+ F2FS_BYTES_TO_BLK(len), false);
if (!ret) {
if (dst_max_i_size)
@@ -2835,9 +3198,20 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
f2fs_unlock_op(sbi);
if (src != dst)
- up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
out_src:
- up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+ if (ret)
+ goto out_unlock;
+
+ inode_set_mtime_to_ts(src, inode_set_ctime_current(src));
+ f2fs_mark_inode_dirty_sync(src, false);
+ if (src != dst) {
+ inode_set_mtime_to_ts(dst, inode_set_ctime_current(dst));
+ f2fs_mark_inode_dirty_sync(dst, false);
+ }
+ f2fs_update_time(sbi, REQ_TIME);
+
out_unlock:
if (src != dst)
inode_unlock(dst);
@@ -2849,32 +3223,27 @@ out:
static int __f2fs_ioc_move_range(struct file *filp,
struct f2fs_move_range *range)
{
- struct fd dst;
int err;
if (!(filp->f_mode & FMODE_READ) ||
!(filp->f_mode & FMODE_WRITE))
return -EBADF;
- dst = fdget(range->dst_fd);
- if (!dst.file)
+ CLASS(fd, dst)(range->dst_fd);
+ if (fd_empty(dst))
return -EBADF;
- if (!(dst.file->f_mode & FMODE_WRITE)) {
- err = -EBADF;
- goto err_out;
- }
+ if (!(fd_file(dst)->f_mode & FMODE_WRITE))
+ return -EBADF;
err = mnt_want_write_file(filp);
if (err)
- goto err_out;
+ return err;
- err = f2fs_move_file_range(filp, range->pos_in, dst.file,
+ err = f2fs_move_file_range(filp, range->pos_in, fd_file(dst),
range->pos_out, range->len);
mnt_drop_write_file(filp);
-err_out:
- fdput(dst);
return err;
}
@@ -2896,6 +3265,11 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
unsigned int start_segno = 0, end_segno = 0;
unsigned int dev_start_segno = 0, dev_end_segno = 0;
struct f2fs_flush_device range;
+ struct f2fs_gc_control gc_control = {
+ .init_gc_type = FG_GC,
+ .should_migrate_blocks = true,
+ .err_gc_skipped = true,
+ .nr_free_secs = 0 };
int ret;
if (!capable(CAP_SYS_ADMIN))
@@ -2913,8 +3287,8 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
__is_large_section(sbi)) {
- f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
- range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
+ f2fs_warn(sbi, "Can't flush %u in %d for SEGS_PER_SEC %u != 1",
+ range.dev_num, sbi->s_ndevs, SEGS_PER_SEC(sbi));
return -EINVAL;
}
@@ -2932,14 +3306,17 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
end_segno = min(start_segno + range.segments, dev_end_segno);
while (start_segno < end_segno) {
- if (!down_write_trylock(&sbi->gc_lock)) {
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY;
goto out;
}
sm->last_victim[GC_CB] = end_segno + 1;
sm->last_victim[GC_GREEDY] = end_segno + 1;
sm->last_victim[ALLOC_NEXT] = end_segno + 1;
- ret = f2fs_gc(sbi, true, true, true, start_segno);
+
+ gc_control.victim_segno = start_segno;
+ stat_inc_gc_call_count(sbi, FOREGROUND);
+ ret = f2fs_gc(sbi, &gc_control);
if (ret == -EAGAIN)
ret = 0;
else if (ret < 0)
@@ -2968,15 +3345,16 @@ int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
struct dquot *transfer_to[MAXQUOTAS] = {};
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
- int err = 0;
+ int err;
transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
- if (!IS_ERR(transfer_to[PRJQUOTA])) {
- err = __dquot_transfer(inode, transfer_to);
- if (err)
- set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
- dqput(transfer_to[PRJQUOTA]);
- }
+ if (IS_ERR(transfer_to[PRJQUOTA]))
+ return PTR_ERR(transfer_to[PRJQUOTA]);
+
+ err = __dquot_transfer(inode, transfer_to);
+ if (err)
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ dqput(transfer_to[PRJQUOTA]);
return err;
}
@@ -2984,7 +3362,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *ipage;
+ struct f2fs_inode *ri = NULL;
kprojid_t kprojid;
int err;
@@ -3000,7 +3378,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
- if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
+ if (projid_eq(kprojid, fi->i_projid))
return 0;
err = -EPERM;
@@ -3008,17 +3386,8 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
if (IS_NOQUOTA(inode))
return err;
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
-
- if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
- i_projid)) {
- err = -EOVERFLOW;
- f2fs_put_page(ipage, 1);
- return err;
- }
- f2fs_put_page(ipage, 1);
+ if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
+ return -EOVERFLOW;
err = f2fs_dquot_initialize(inode);
if (err)
@@ -3029,8 +3398,8 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
if (err)
goto out_unlock;
- F2FS_I(inode)->i_projid = kprojid;
- inode->i_ctime = current_time(inode);
+ fi->i_projid = kprojid;
+ inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, true);
out_unlock:
f2fs_unlock_op(sbi);
@@ -3050,7 +3419,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
}
#endif
-int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int f2fs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -3073,8 +3442,8 @@ int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
return 0;
}
-int f2fs_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa)
+int f2fs_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
@@ -3107,24 +3476,27 @@ int f2fs_pin_file_control(struct inode *inode, bool inc)
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- /* Use i_gc_failures for normal file as a risk signal. */
- if (inc)
- f2fs_i_gc_failures_write(inode,
- fi->i_gc_failures[GC_FAILURE_PIN] + 1);
+ if (IS_DEVICE_ALIASING(inode))
+ return -EINVAL;
- if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
+ if (fi->i_gc_failures >= sbi->gc_pin_file_threshold) {
f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
- __func__, inode->i_ino,
- fi->i_gc_failures[GC_FAILURE_PIN]);
+ __func__, inode->i_ino, fi->i_gc_failures);
clear_inode_flag(inode, FI_PIN_FILE);
return -EAGAIN;
}
+
+ /* Use i_gc_failures for normal file as a risk signal. */
+ if (inc)
+ f2fs_i_gc_failures_write(inode, fi->i_gc_failures + 1);
+
return 0;
}
static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
__u32 pin;
int ret = 0;
@@ -3134,16 +3506,19 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
if (!S_ISREG(inode->i_mode))
return -EINVAL;
- if (f2fs_readonly(F2FS_I_SB(inode)->sb))
+ if (f2fs_readonly(sbi->sb))
return -EROFS;
+ if (!pin && IS_DEVICE_ALIASING(inode))
+ return -EOPNOTSUPP;
+
ret = mnt_want_write_file(filp);
if (ret)
return ret;
inode_lock(inode);
- if (f2fs_should_update_outplace(inode, NULL)) {
+ if (f2fs_is_atomic_file(inode)) {
ret = -EINVAL;
goto out;
}
@@ -3152,6 +3527,20 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
clear_inode_flag(inode, FI_PIN_FILE);
f2fs_i_gc_failures_write(inode, 0);
goto done;
+ } else if (f2fs_is_pinned_file(inode)) {
+ goto done;
+ }
+
+ if (F2FS_HAS_BLOCKS(inode)) {
+ ret = -EFBIG;
+ goto out;
+ }
+
+ /* Let's allow file pinning on zoned device. */
+ if (!f2fs_sb_has_blkzoned(sbi) &&
+ f2fs_should_update_outplace(inode, NULL)) {
+ ret = -EINVAL;
+ goto out;
}
if (f2fs_pin_file_control(inode, false)) {
@@ -3169,9 +3558,9 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
}
set_inode_flag(inode, FI_PIN_FILE);
- ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
+ ret = F2FS_I(inode)->i_gc_failures;
done:
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+ f2fs_update_time(sbi, REQ_TIME);
out:
inode_unlock(inode);
mnt_drop_write_file(filp);
@@ -3184,10 +3573,33 @@ static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
__u32 pin = 0;
if (is_inode_flag_set(inode, FI_PIN_FILE))
- pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
+ pin = F2FS_I(inode)->i_gc_failures;
return put_user(pin, (u32 __user *)arg);
}
+static int f2fs_ioc_get_dev_alias_file(struct file *filp, unsigned long arg)
+{
+ return put_user(IS_DEVICE_ALIASING(file_inode(filp)) ? 1 : 0,
+ (u32 __user *)arg);
+}
+
+static int f2fs_ioc_io_prio(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ __u32 level;
+
+ if (get_user(level, (__u32 __user *)arg))
+ return -EFAULT;
+
+ if (!S_ISREG(inode->i_mode) || level >= F2FS_IOPRIO_MAX)
+ return -EINVAL;
+
+ inode_lock(inode);
+ F2FS_I(inode)->ioprio_hint = level;
+ inode_unlock(inode);
+ return 0;
+}
+
int f2fs_precache_extents(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -3200,19 +3612,20 @@ int f2fs_precache_extents(struct inode *inode)
return -EOPNOTSUPP;
map.m_lblk = 0;
+ map.m_pblk = 0;
map.m_next_pgofs = NULL;
map.m_next_extent = &m_next_extent;
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
- end = max_file_blocks(inode);
+ end = F2FS_BLK_ALIGN(i_size_read(inode));
while (map.m_lblk < end) {
map.m_len = end - map.m_lblk;
- down_write(&fi->i_gc_rwsem[WRITE]);
- err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
- up_write(&fi->i_gc_rwsem[WRITE]);
- if (err)
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
+ err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+ if (err || !map.m_len)
return err;
map.m_lblk = m_next_extent;
@@ -3221,7 +3634,7 @@ int f2fs_precache_extents(struct inode *inode)
return 0;
}
-static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
+static int f2fs_ioc_precache_extents(struct file *filp)
{
return f2fs_precache_extents(file_inode(filp));
}
@@ -3241,7 +3654,7 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
sizeof(block_count)))
return -EFAULT;
- return f2fs_resize_fs(sbi, block_count);
+ return f2fs_resize_fs(filp, block_count);
}
static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
@@ -3288,11 +3701,11 @@ static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
if (!vbuf)
return -ENOMEM;
- down_read(&sbi->sb_lock);
+ f2fs_down_read(&sbi->sb_lock);
count = utf16s_to_utf8s(sbi->raw_super->volume_name,
ARRAY_SIZE(sbi->raw_super->volume_name),
UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
- up_read(&sbi->sb_lock);
+ f2fs_up_read(&sbi->sb_lock);
if (copy_to_user((char __user *)arg, vbuf,
min(FSLABEL_MAX, count)))
@@ -3320,7 +3733,7 @@ static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
if (err)
goto out;
- down_write(&sbi->sb_lock);
+ f2fs_down_write(&sbi->sb_lock);
memset(sbi->raw_super->volume_name, 0,
sizeof(sbi->raw_super->volume_name));
@@ -3330,7 +3743,7 @@ static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
err = f2fs_commit_super(sbi, false);
- up_write(&sbi->sb_lock);
+ f2fs_up_write(&sbi->sb_lock);
mnt_drop_write_file(filp);
out:
@@ -3338,18 +3751,29 @@ out:
return err;
}
-static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
+static int f2fs_get_compress_blocks(struct inode *inode, __u64 *blocks)
{
- struct inode *inode = file_inode(filp);
- __u64 blocks;
-
if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
return -EOPNOTSUPP;
if (!f2fs_compressed_file(inode))
return -EINVAL;
- blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
+ *blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
+
+ return 0;
+}
+
+static int f2fs_ioc_get_compress_blocks(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ __u64 blocks;
+ int ret;
+
+ ret = f2fs_get_compress_blocks(inode, &blocks);
+ if (ret < 0)
+ return ret;
+
return put_user(blocks, (u64 __user *)arg);
}
@@ -3362,7 +3786,7 @@ static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
int i;
for (i = 0; i < count; i++) {
- blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
if (!__is_valid_data_blkaddr(blkaddr))
@@ -3391,8 +3815,7 @@ static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
if (blkaddr != NEW_ADDR)
continue;
- dn->data_blkaddr = NULL_ADDR;
- f2fs_set_data_blkaddr(dn);
+ f2fs_set_data_blkaddr(dn, NULL_ADDR);
}
f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
@@ -3410,18 +3833,16 @@ next:
static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t page_idx = 0, last_idx;
unsigned int released_blocks = 0;
int ret;
int writecount;
- if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ if (!f2fs_sb_has_compression(sbi))
return -EOPNOTSUPP;
- if (!f2fs_compressed_file(inode))
- return -EINVAL;
-
if (f2fs_readonly(sbi->sb))
return -EROFS;
@@ -3429,7 +3850,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
if (ret)
return ret;
- f2fs_balance_fs(F2FS_I_SB(inode), true);
+ f2fs_balance_fs(sbi, true);
inode_lock(inode);
@@ -3440,7 +3861,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
goto out;
}
- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ if (!f2fs_compressed_file(inode) ||
+ is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EINVAL;
goto out;
}
@@ -3449,14 +3871,16 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
if (ret)
goto out;
+ if (!atomic_read(&fi->i_compr_blocks)) {
+ ret = -EPERM;
+ goto out;
+ }
+
set_inode_flag(inode, FI_COMPRESS_RELEASED);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, true);
- if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
- goto out;
-
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
@@ -3465,9 +3889,12 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
struct dnode_of_data dn;
pgoff_t end_offset, count;
+ f2fs_lock_op(sbi);
+
set_new_dnode(&dn, inode, NULL, NULL, 0);
ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
if (ret) {
+ f2fs_unlock_op(sbi);
if (ret == -ENOENT) {
page_idx = f2fs_get_next_page_offset(&dn,
page_idx);
@@ -3477,14 +3904,16 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
break;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
- count = round_up(count, F2FS_I(inode)->i_cluster_size);
+ count = round_up(count, fi->i_cluster_size);
ret = release_compress_blocks(&dn, count);
f2fs_put_dnode(&dn);
+ f2fs_unlock_op(sbi);
+
if (ret < 0)
break;
@@ -3493,8 +3922,10 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
}
filemap_invalidate_unlock(inode->i_mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
out:
+ if (released_blocks)
+ f2fs_update_time(sbi, REQ_TIME);
inode_unlock(inode);
mnt_drop_write_file(filp);
@@ -3502,29 +3933,29 @@ out:
if (ret >= 0) {
ret = put_user(released_blocks, (u64 __user *)arg);
} else if (released_blocks &&
- atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
+ atomic_read(&fi->i_compr_blocks)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
"iblocks=%llu, released=%u, compr_blocks=%u, "
"run fsck to fix.",
__func__, inode->i_ino, inode->i_blocks,
released_blocks,
- atomic_read(&F2FS_I(inode)->i_compr_blocks));
+ atomic_read(&fi->i_compr_blocks));
}
return ret;
}
-static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
+ unsigned int *reserved_blocks)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- unsigned int reserved_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
block_t blkaddr;
int i;
for (i = 0; i < count; i++) {
- blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
if (!__is_valid_data_blkaddr(blkaddr))
@@ -3536,60 +3967,77 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
while (count) {
int compr_blocks = 0;
- blkcnt_t reserved;
+ blkcnt_t reserved = 0;
+ blkcnt_t to_reserved;
int ret;
- for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
- blkaddr = f2fs_data_blkaddr(dn);
+ for (i = 0; i < cluster_size; i++) {
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
+ dn->ofs_in_node + i);
if (i == 0) {
- if (blkaddr == COMPRESS_ADDR)
- continue;
- dn->ofs_in_node += cluster_size;
- goto next;
+ if (blkaddr != COMPRESS_ADDR) {
+ dn->ofs_in_node += cluster_size;
+ goto next;
+ }
+ continue;
}
+ /*
+ * compressed cluster was not released due to it
+ * fails in release_compress_blocks(), so NEW_ADDR
+ * is a possible case.
+ */
+ if (blkaddr == NEW_ADDR) {
+ reserved++;
+ continue;
+ }
if (__is_valid_data_blkaddr(blkaddr)) {
compr_blocks++;
continue;
}
+ }
+
+ to_reserved = cluster_size - compr_blocks - reserved;
- dn->data_blkaddr = NEW_ADDR;
- f2fs_set_data_blkaddr(dn);
+ /* for the case all blocks in cluster were reserved */
+ if (reserved && to_reserved == 1) {
+ dn->ofs_in_node += cluster_size;
+ goto next;
}
- reserved = cluster_size - compr_blocks;
- ret = inc_valid_block_count(sbi, dn->inode, &reserved);
- if (ret)
+ ret = inc_valid_block_count(sbi, dn->inode,
+ &to_reserved, false);
+ if (unlikely(ret))
return ret;
- if (reserved != cluster_size - compr_blocks)
- return -ENOSPC;
+ for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
+ if (f2fs_data_blkaddr(dn) == NULL_ADDR)
+ f2fs_set_data_blkaddr(dn, NEW_ADDR);
+ }
f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
- reserved_blocks += reserved;
+ *reserved_blocks += to_reserved;
next:
count -= cluster_size;
}
- return reserved_blocks;
+ return 0;
}
static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t page_idx = 0, last_idx;
unsigned int reserved_blocks = 0;
int ret;
- if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ if (!f2fs_sb_has_compression(sbi))
return -EOPNOTSUPP;
- if (!f2fs_compressed_file(inode))
- return -EINVAL;
-
if (f2fs_readonly(sbi->sb))
return -EROFS;
@@ -3597,19 +4045,20 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
if (ret)
return ret;
- if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
- goto out;
-
- f2fs_balance_fs(F2FS_I_SB(inode), true);
+ f2fs_balance_fs(sbi, true);
inode_lock(inode);
- if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ if (!f2fs_compressed_file(inode) ||
+ !is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EINVAL;
goto unlock_inode;
}
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ if (atomic_read(&fi->i_compr_blocks))
+ goto unlock_inode;
+
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
@@ -3618,9 +4067,12 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
struct dnode_of_data dn;
pgoff_t end_offset, count;
+ f2fs_lock_op(sbi);
+
set_new_dnode(&dn, inode, NULL, NULL, 0);
ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
if (ret) {
+ f2fs_unlock_op(sbi);
if (ret == -ENOENT) {
page_idx = f2fs_get_next_page_offset(&dn,
page_idx);
@@ -3630,45 +4082,47 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
break;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
- count = round_up(count, F2FS_I(inode)->i_cluster_size);
+ count = round_up(count, fi->i_cluster_size);
- ret = reserve_compress_blocks(&dn, count);
+ ret = reserve_compress_blocks(&dn, count, &reserved_blocks);
f2fs_put_dnode(&dn);
+ f2fs_unlock_op(sbi);
+
if (ret < 0)
break;
page_idx += count;
- reserved_blocks += ret;
}
filemap_invalidate_unlock(inode->i_mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- if (ret >= 0) {
+ if (!ret) {
clear_inode_flag(inode, FI_COMPRESS_RELEASED);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, true);
}
unlock_inode:
+ if (reserved_blocks)
+ f2fs_update_time(sbi, REQ_TIME);
inode_unlock(inode);
-out:
mnt_drop_write_file(filp);
- if (ret >= 0) {
+ if (!ret) {
ret = put_user(reserved_blocks, (u64 __user *)arg);
} else if (reserved_blocks &&
- atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
+ atomic_read(&fi->i_compr_blocks)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
+ f2fs_warn(sbi, "%s: partial blocks were reserved i_ino=%lx "
"iblocks=%llu, reserved=%u, compr_blocks=%u, "
"run fsck to fix.",
__func__, inode->i_ino, inode->i_blocks,
reserved_blocks,
- atomic_read(&F2FS_I(inode)->i_compr_blocks));
+ atomic_read(&fi->i_compr_blocks));
}
return ret;
@@ -3677,18 +4131,18 @@ out:
static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
pgoff_t off, block_t block, block_t len, u32 flags)
{
- struct request_queue *q = bdev_get_queue(bdev);
sector_t sector = SECTOR_FROM_BLOCK(block);
sector_t nr_sects = SECTOR_FROM_BLOCK(len);
int ret = 0;
- if (!q)
- return -ENXIO;
-
- if (flags & F2FS_TRIM_FILE_DISCARD)
- ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
- blk_queue_secure_erase(q) ?
- BLKDEV_DISCARD_SECURE : 0);
+ if (flags & F2FS_TRIM_FILE_DISCARD) {
+ if (bdev_max_secure_erase_sectors(bdev))
+ ret = blkdev_issue_secure_erase(bdev, sector, nr_sects,
+ GFP_NOFS);
+ else
+ ret = blkdev_issue_discard(bdev, sector, nr_sects,
+ GFP_NOFS);
+ }
if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
if (IS_ENCRYPTED(inode))
@@ -3731,7 +4185,9 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
return -EOPNOTSUPP;
- file_start_write(filp);
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
inode_lock(inode);
if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
@@ -3764,7 +4220,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
if (ret)
goto err;
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(mapping);
ret = filemap_write_and_wait_range(mapping, range.start,
@@ -3790,7 +4246,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
goto out;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
count = min(end_offset - dn.ofs_in_node, pg_end - index);
for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
struct block_device *cur_bdev;
@@ -3851,12 +4307,13 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
if (len)
ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
prev_block, len, range.flags);
+ f2fs_update_time(sbi, REQ_TIME);
out:
filemap_invalidate_unlock(mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
err:
inode_unlock(inode);
- file_end_write(filp);
+ mnt_drop_write_file(filp);
return ret;
}
@@ -3891,6 +4348,7 @@ static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_comp_option option;
int ret = 0;
@@ -3905,36 +4363,53 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
sizeof(option)))
return -EFAULT;
- if (!f2fs_compressed_file(inode) ||
- option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
- option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
- option.algorithm >= COMPRESS_MAX)
+ if (option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
+ option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
+ option.algorithm >= COMPRESS_MAX)
return -EINVAL;
- file_start_write(filp);
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
inode_lock(inode);
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
+ if (!f2fs_compressed_file(inode)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
ret = -EBUSY;
goto out;
}
- if (inode->i_size != 0) {
+ if (F2FS_HAS_BLOCKS(inode)) {
ret = -EFBIG;
goto out;
}
- F2FS_I(inode)->i_compress_algorithm = option.algorithm;
- F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
- F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
+ fi->i_compress_algorithm = option.algorithm;
+ fi->i_log_cluster_size = option.log_cluster_size;
+ fi->i_cluster_size = BIT(option.log_cluster_size);
+ /* Set default level */
+ if (fi->i_compress_algorithm == COMPRESS_ZSTD)
+ fi->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
+ else
+ fi->i_compress_level = 0;
+ /* Adjust mount option level */
+ if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
+ F2FS_OPTION(sbi).compress_level)
+ fi->i_compress_level = F2FS_OPTION(sbi).compress_level;
f2fs_mark_inode_dirty_sync(inode, true);
if (!f2fs_is_compress_backend_ready(inode))
f2fs_warn(sbi, "compression algorithm is successfully set, "
"but current kernel doesn't support this algorithm.");
out:
+ f2fs_up_write(&fi->i_sem);
inode_unlock(inode);
- file_end_write(filp);
+ mnt_drop_write_file(filp);
return ret;
}
@@ -3943,44 +4418,47 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
{
DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
pgoff_t redirty_idx = page_idx;
- int i, page_len = 0, ret = 0;
+ int page_len = 0, ret = 0;
page_cache_ra_unbounded(&ractl, len, 0);
- for (i = 0; i < len; i++, page_idx++) {
- page = read_cache_page(mapping, page_idx, NULL, NULL);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
+ do {
+ folio = read_cache_folio(mapping, page_idx, NULL, NULL);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
break;
}
- page_len++;
- }
+ page_len += folio_nr_pages(folio) - (page_idx - folio->index);
+ page_idx = folio_next_index(folio);
+ } while (page_len < len);
- for (i = 0; i < page_len; i++, redirty_idx++) {
- page = find_lock_page(mapping, redirty_idx);
- if (!page) {
- ret = -ENOMEM;
- break;
- }
- set_page_dirty(page);
- f2fs_put_page(page, 1);
- f2fs_put_page(page, 0);
- }
+ do {
+ folio = filemap_lock_folio(mapping, redirty_idx);
+
+ /* It will never fail, when folio has pinned above */
+ f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(folio));
+
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
+
+ folio_mark_dirty(folio);
+ folio_set_f2fs_gcing(folio);
+ redirty_idx = folio_next_index(folio);
+ folio_unlock(folio);
+ folio_put_refs(folio, 2);
+ } while (redirty_idx < page_idx);
return ret;
}
-static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
+static int f2fs_ioc_decompress_file(struct file *filp)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
- pgoff_t page_idx = 0, last_idx;
- unsigned int blk_per_seg = sbi->blocks_per_seg;
- int cluster_size = F2FS_I(inode)->i_cluster_size;
- int count, ret;
+ pgoff_t page_idx = 0, last_idx, cluster_idx;
+ int ret;
if (!f2fs_sb_has_compression(sbi) ||
F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
@@ -3989,12 +4467,11 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
if (!(filp->f_mode & FMODE_WRITE))
return -EBADF;
- if (!f2fs_compressed_file(inode))
- return -EINVAL;
-
- f2fs_balance_fs(F2FS_I_SB(inode), true);
+ f2fs_balance_fs(sbi, true);
- file_start_write(filp);
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
inode_lock(inode);
if (!f2fs_is_compress_backend_ready(inode)) {
@@ -4002,8 +4479,9 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
goto out;
}
- if (f2fs_is_mmap_file(inode)) {
- ret = -EBUSY;
+ if (!f2fs_compressed_file(inode) ||
+ is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ ret = -EINVAL;
goto out;
}
@@ -4015,20 +4493,29 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
goto out;
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ last_idx >>= fi->i_log_cluster_size;
- count = last_idx - page_idx;
- while (count) {
- int len = min(cluster_size, count);
+ for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
+ page_idx = cluster_idx << fi->i_log_cluster_size;
- ret = redirty_blocks(inode, page_idx, len);
+ if (!f2fs_is_compressed_cluster(inode, page_idx))
+ continue;
+
+ ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
if (ret < 0)
break;
- if (get_dirty_pages(inode) >= blk_per_seg)
- filemap_fdatawrite(inode->i_mapping);
+ if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
+ ret = filemap_fdatawrite(inode->i_mapping);
+ if (ret < 0)
+ break;
+ }
- count -= len;
- page_idx += len;
+ cond_resched();
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
}
if (!ret)
@@ -4038,21 +4525,21 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
if (ret)
f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
__func__, ret);
+ f2fs_update_time(sbi, REQ_TIME);
out:
inode_unlock(inode);
- file_end_write(filp);
+ mnt_drop_write_file(filp);
return ret;
}
-static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
+static int f2fs_ioc_compress_file(struct file *filp)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- pgoff_t page_idx = 0, last_idx;
- unsigned int blk_per_seg = sbi->blocks_per_seg;
- int cluster_size = F2FS_I(inode)->i_cluster_size;
- int count, ret;
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ pgoff_t page_idx = 0, last_idx, cluster_idx;
+ int ret;
if (!f2fs_sb_has_compression(sbi) ||
F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
@@ -4061,12 +4548,11 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
if (!(filp->f_mode & FMODE_WRITE))
return -EBADF;
- if (!f2fs_compressed_file(inode))
- return -EINVAL;
-
- f2fs_balance_fs(F2FS_I_SB(inode), true);
+ f2fs_balance_fs(sbi, true);
- file_start_write(filp);
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
inode_lock(inode);
if (!f2fs_is_compress_backend_ready(inode)) {
@@ -4074,8 +4560,9 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
goto out;
}
- if (f2fs_is_mmap_file(inode)) {
- ret = -EBUSY;
+ if (!f2fs_compressed_file(inode) ||
+ is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ ret = -EINVAL;
goto out;
}
@@ -4086,20 +4573,29 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
set_inode_flag(inode, FI_ENABLE_COMPRESS);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ last_idx >>= fi->i_log_cluster_size;
- count = last_idx - page_idx;
- while (count) {
- int len = min(cluster_size, count);
+ for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
+ page_idx = cluster_idx << fi->i_log_cluster_size;
- ret = redirty_blocks(inode, page_idx, len);
+ if (f2fs_is_sparse_cluster(inode, page_idx))
+ continue;
+
+ ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
if (ret < 0)
break;
- if (get_dirty_pages(inode) >= blk_per_seg)
- filemap_fdatawrite(inode->i_mapping);
+ if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
+ ret = filemap_fdatawrite(inode->i_mapping);
+ if (ret < 0)
+ break;
+ }
- count -= len;
- page_idx += len;
+ cond_resched();
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
}
if (!ret)
@@ -4111,9 +4607,10 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
if (ret)
f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
__func__, ret);
+ f2fs_update_time(sbi, REQ_TIME);
out:
inode_unlock(inode);
- file_end_write(filp);
+ mnt_drop_write_file(filp);
return ret;
}
@@ -4124,15 +4621,16 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case FS_IOC_GETVERSION:
return f2fs_ioc_getversion(filp, arg);
case F2FS_IOC_START_ATOMIC_WRITE:
- return f2fs_ioc_start_atomic_write(filp);
+ return f2fs_ioc_start_atomic_write(filp, false);
+ case F2FS_IOC_START_ATOMIC_REPLACE:
+ return f2fs_ioc_start_atomic_write(filp, true);
case F2FS_IOC_COMMIT_ATOMIC_WRITE:
return f2fs_ioc_commit_atomic_write(filp);
+ case F2FS_IOC_ABORT_ATOMIC_WRITE:
+ return f2fs_ioc_abort_atomic_write(filp);
case F2FS_IOC_START_VOLATILE_WRITE:
- return f2fs_ioc_start_volatile_write(filp);
case F2FS_IOC_RELEASE_VOLATILE_WRITE:
- return f2fs_ioc_release_volatile_write(filp);
- case F2FS_IOC_ABORT_VOLATILE_WRITE:
- return f2fs_ioc_abort_volatile_write(filp);
+ return -EOPNOTSUPP;
case F2FS_IOC_SHUTDOWN:
return f2fs_ioc_shutdown(filp, arg);
case FITRIM:
@@ -4160,7 +4658,7 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case F2FS_IOC_GARBAGE_COLLECT_RANGE:
return f2fs_ioc_gc_range(filp, arg);
case F2FS_IOC_WRITE_CHECKPOINT:
- return f2fs_ioc_write_checkpoint(filp, arg);
+ return f2fs_ioc_write_checkpoint(filp);
case F2FS_IOC_DEFRAGMENT:
return f2fs_ioc_defragment(filp, arg);
case F2FS_IOC_MOVE_RANGE:
@@ -4174,7 +4672,7 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case F2FS_IOC_SET_PIN_FILE:
return f2fs_ioc_set_pin_file(filp, arg);
case F2FS_IOC_PRECACHE_EXTENTS:
- return f2fs_ioc_precache_extents(filp, arg);
+ return f2fs_ioc_precache_extents(filp);
case F2FS_IOC_RESIZE_FS:
return f2fs_ioc_resize_fs(filp, arg);
case FS_IOC_ENABLE_VERITY:
@@ -4188,7 +4686,7 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case FS_IOC_SETFSLABEL:
return f2fs_ioc_setfslabel(filp, arg);
case F2FS_IOC_GET_COMPRESS_BLOCKS:
- return f2fs_get_compress_blocks(filp, arg);
+ return f2fs_ioc_get_compress_blocks(filp, arg);
case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
return f2fs_release_compress_blocks(filp, arg);
case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
@@ -4200,9 +4698,13 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case F2FS_IOC_SET_COMPRESS_OPTION:
return f2fs_ioc_set_compress_option(filp, arg);
case F2FS_IOC_DECOMPRESS_FILE:
- return f2fs_ioc_decompress_file(filp, arg);
+ return f2fs_ioc_decompress_file(filp);
case F2FS_IOC_COMPRESS_FILE:
- return f2fs_ioc_compress_file(filp, arg);
+ return f2fs_ioc_compress_file(filp);
+ case F2FS_IOC_GET_DEV_ALIAS_FILE:
+ return f2fs_ioc_get_dev_alias_file(filp, arg);
+ case F2FS_IOC_IO_PRIO:
+ return f2fs_ioc_io_prio(filp, arg);
default:
return -ENOTTY;
}
@@ -4218,27 +4720,476 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return __f2fs_ioctl(filp, cmd, arg);
}
-static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+/*
+ * Return %true if the given read or write request should use direct I/O, or
+ * %false if it should use buffered I/O.
+ */
+static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
+ struct iov_iter *iter)
+{
+ unsigned int align;
+
+ if (!(iocb->ki_flags & IOCB_DIRECT))
+ return false;
+
+ if (f2fs_force_buffered_io(inode, iov_iter_rw(iter)))
+ return false;
+
+ /*
+ * Direct I/O not aligned to the disk's logical_block_size will be
+ * attempted, but will fail with -EINVAL.
+ *
+ * f2fs additionally requires that direct I/O be aligned to the
+ * filesystem block size, which is often a stricter requirement.
+ * However, f2fs traditionally falls back to buffered I/O on requests
+ * that are logical_block_size-aligned but not fs-block aligned.
+ *
+ * The below logic implements this behavior.
+ */
+ align = iocb->ki_pos | iov_iter_alignment(iter);
+ if (!IS_ALIGNED(align, i_blocksize(inode)) &&
+ IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
+ return false;
+
+ return true;
+}
+
+static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
+ unsigned int flags)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
+
+ dec_page_count(sbi, F2FS_DIO_READ);
+ if (error)
+ return error;
+ f2fs_update_iostat(sbi, NULL, APP_DIRECT_READ_IO, size);
+ return 0;
+}
+
+static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
+ .end_io = f2fs_dio_read_end_io,
+};
+
+static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- int ret;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ const loff_t pos = iocb->ki_pos;
+ const size_t count = iov_iter_count(to);
+ struct iomap_dio *dio;
+ ssize_t ret;
+
+ if (count == 0)
+ return 0; /* skip atime update */
+
+ trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ } else {
+ f2fs_down_read(&fi->i_gc_rwsem[READ]);
+ }
+
+ /* dio is not compatible w/ atomic file */
+ if (f2fs_is_atomic_file(inode)) {
+ f2fs_up_read(&fi->i_gc_rwsem[READ]);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /*
+ * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
+ * the higher-level function iomap_dio_rw() in order to ensure that the
+ * F2FS_DIO_READ counter will be decremented correctly in all cases.
+ */
+ inc_page_count(sbi, F2FS_DIO_READ);
+ dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
+ &f2fs_iomap_dio_read_ops, 0, NULL, 0);
+ if (IS_ERR_OR_NULL(dio)) {
+ ret = PTR_ERR_OR_ZERO(dio);
+ if (ret != -EIOCBQUEUED)
+ dec_page_count(sbi, F2FS_DIO_READ);
+ } else {
+ ret = iomap_dio_complete(dio);
+ }
+
+ f2fs_up_read(&fi->i_gc_rwsem[READ]);
+
+ file_accessed(file);
+out:
+ trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
+ return ret;
+}
+
+static void f2fs_trace_rw_file_path(struct file *file, loff_t pos, size_t count,
+ int rw)
+{
+ struct inode *inode = file_inode(file);
+ char *buf, *path;
+
+ buf = f2fs_getname(F2FS_I_SB(inode));
+ if (!buf)
+ return;
+ path = dentry_path_raw(file_dentry(file), buf, PATH_MAX);
+ if (IS_ERR(path))
+ goto free_buf;
+ if (rw == WRITE)
+ trace_f2fs_datawrite_start(inode, pos, count,
+ current->pid, path, current->comm);
+ else
+ trace_f2fs_dataread_start(inode, pos, count,
+ current->pid, path, current->comm);
+free_buf:
+ f2fs_putname(buf);
+}
+
+static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ const loff_t pos = iocb->ki_pos;
+ ssize_t ret;
+ bool dio;
+
+ if (!f2fs_is_compress_backend_ready(inode))
+ return -EOPNOTSUPP;
+
+ if (trace_f2fs_dataread_start_enabled())
+ f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
+ iov_iter_count(to), READ);
+
+ dio = f2fs_should_use_dio(inode, iocb, to);
+
+ /* In LFS mode, if there is inflight dio, wait for its completion */
+ if (f2fs_lfs_mode(F2FS_I_SB(inode)) &&
+ get_pages(F2FS_I_SB(inode), F2FS_DIO_WRITE) &&
+ (!f2fs_is_pinned_file(inode) || !dio))
+ inode_dio_wait(inode);
+
+ if (dio) {
+ ret = f2fs_dio_read_iter(iocb, to);
+ } else {
+ ret = filemap_read(iocb, to, 0);
+ if (ret > 0)
+ f2fs_update_iostat(F2FS_I_SB(inode), inode,
+ APP_BUFFERED_READ_IO, ret);
+ }
+ trace_f2fs_dataread_end(inode, pos, ret);
+ return ret;
+}
+
+static ssize_t f2fs_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ struct inode *inode = file_inode(in);
+ const loff_t pos = *ppos;
+ ssize_t ret;
if (!f2fs_is_compress_backend_ready(inode))
return -EOPNOTSUPP;
- ret = generic_file_read_iter(iocb, iter);
+ if (trace_f2fs_dataread_start_enabled())
+ f2fs_trace_rw_file_path(in, pos, len, READ);
+ ret = filemap_splice_read(in, ppos, pipe, len, flags);
if (ret > 0)
- f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
+ f2fs_update_iostat(F2FS_I_SB(inode), inode,
+ APP_BUFFERED_READ_IO, ret);
+ trace_f2fs_dataread_end(inode, pos, ret);
return ret;
}
-static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ ssize_t count;
+ int err;
+
+ if (IS_IMMUTABLE(inode))
+ return -EPERM;
+
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
+ return -EPERM;
+
+ count = generic_write_checks(iocb, from);
+ if (count <= 0)
+ return count;
+
+ err = file_modified(file);
+ if (err)
+ return err;
+
+ f2fs_zero_post_eof_page(inode,
+ iocb->ki_pos + iov_iter_count(from), true);
+ return count;
+}
+
+/*
+ * Preallocate blocks for a write request, if it is possible and helpful to do
+ * so. Returns a positive number if blocks may have been preallocated, 0 if no
+ * blocks were preallocated, or a negative errno value if something went
+ * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the
+ * requested blocks (not just some of them) have been allocated.
+ */
+static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
+ bool dio)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ const loff_t pos = iocb->ki_pos;
+ const size_t count = iov_iter_count(iter);
+ struct f2fs_map_blocks map = {};
+ int flag;
+ int ret;
+
+ /* If it will be an out-of-place direct write, don't bother. */
+ if (dio && f2fs_lfs_mode(sbi))
+ return 0;
+ /*
+ * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
+ * buffered IO, if DIO meets any holes.
+ */
+ if (dio && i_size_read(inode) &&
+ (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
+ return 0;
+
+ /* No-wait I/O can't allocate blocks. */
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return 0;
+
+ /* If it will be a short write, don't bother. */
+ if (fault_in_iov_iter_readable(iter, count))
+ return 0;
+
+ if (f2fs_has_inline_data(inode)) {
+ /* If the data will fit inline, don't bother. */
+ if (pos + count <= MAX_INLINE_DATA(inode))
+ return 0;
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
+ }
+
+ /* Do not preallocate blocks that will be written partially in 4KB. */
+ map.m_lblk = F2FS_BLK_ALIGN(pos);
+ map.m_len = F2FS_BYTES_TO_BLK(pos + count);
+ if (map.m_len > map.m_lblk)
+ map.m_len -= map.m_lblk;
+ else
+ return 0;
+
+ if (!IS_DEVICE_ALIASING(inode))
+ map.m_may_create = true;
+ if (dio) {
+ map.m_seg_type = f2fs_rw_hint_to_seg_type(sbi,
+ inode->i_write_hint);
+ flag = F2FS_GET_BLOCK_PRE_DIO;
+ } else {
+ map.m_seg_type = NO_CHECK_TYPE;
+ flag = F2FS_GET_BLOCK_PRE_AIO;
+ }
+
+ ret = f2fs_map_blocks(inode, &map, flag);
+ /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
+ if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
+ return ret;
+ if (ret == 0)
+ set_inode_flag(inode, FI_PREALLOCATED_ALL);
+ return map.m_len;
+}
+
+static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ ssize_t ret;
+
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return -EOPNOTSUPP;
+
+ ret = generic_perform_write(iocb, from);
+
+ if (ret > 0) {
+ f2fs_update_iostat(F2FS_I_SB(inode), inode,
+ APP_BUFFERED_IO, ret);
+ }
+ return ret;
+}
+
+static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
+ unsigned int flags)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
+
+ dec_page_count(sbi, F2FS_DIO_WRITE);
+ if (error)
+ return error;
+ f2fs_update_time(sbi, REQ_TIME);
+ f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size);
+ return 0;
+}
+
+static void f2fs_dio_write_submit_io(const struct iomap_iter *iter,
+ struct bio *bio, loff_t file_offset)
+{
+ struct inode *inode = iter->inode;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ enum log_type type = f2fs_rw_hint_to_seg_type(sbi, inode->i_write_hint);
+ enum temp_type temp = f2fs_get_segment_temp(sbi, type);
+
+ bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, DATA, temp);
+ submit_bio(bio);
+}
+
+static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
+ .end_io = f2fs_dio_write_end_io,
+ .submit_io = f2fs_dio_write_submit_io,
+};
+
+static void f2fs_flush_buffered_write(struct address_space *mapping,
+ loff_t start_pos, loff_t end_pos)
+{
+ int ret;
+
+ ret = filemap_write_and_wait_range(mapping, start_pos, end_pos);
+ if (ret < 0)
+ return;
+ invalidate_mapping_pages(mapping,
+ start_pos >> PAGE_SHIFT,
+ end_pos >> PAGE_SHIFT);
+}
+
+static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
+ bool *may_need_sync)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ const bool do_opu = f2fs_lfs_mode(sbi);
+ const loff_t pos = iocb->ki_pos;
+ const ssize_t count = iov_iter_count(from);
+ unsigned int dio_flags;
+ struct iomap_dio *dio;
+ ssize_t ret;
+
+ trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ /* f2fs_convert_inline_inode() and block allocation can block */
+ if (f2fs_has_inline_data(inode) ||
+ !f2fs_overwrite_io(inode, pos, count)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
+ f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
+ ret = -EAGAIN;
+ goto out;
+ }
+ } else {
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ goto out;
+
+ f2fs_down_read(&fi->i_gc_rwsem[WRITE]);
+ if (do_opu)
+ f2fs_down_read(&fi->i_gc_rwsem[READ]);
+ }
+
+ /*
+ * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
+ * the higher-level function iomap_dio_rw() in order to ensure that the
+ * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
+ */
+ inc_page_count(sbi, F2FS_DIO_WRITE);
+ dio_flags = 0;
+ if (pos + count > inode->i_size)
+ dio_flags |= IOMAP_DIO_FORCE_WAIT;
+ dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
+ &f2fs_iomap_dio_write_ops, dio_flags, NULL, 0);
+ if (IS_ERR_OR_NULL(dio)) {
+ ret = PTR_ERR_OR_ZERO(dio);
+ if (ret == -ENOTBLK)
+ ret = 0;
+ if (ret != -EIOCBQUEUED)
+ dec_page_count(sbi, F2FS_DIO_WRITE);
+ } else {
+ ret = iomap_dio_complete(dio);
+ }
+
+ if (do_opu)
+ f2fs_up_read(&fi->i_gc_rwsem[READ]);
+ f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
+
+ if (ret < 0)
+ goto out;
+ if (pos + ret > inode->i_size)
+ f2fs_i_size_write(inode, pos + ret);
+ if (!do_opu)
+ set_inode_flag(inode, FI_UPDATE_WRITE);
+
+ if (iov_iter_count(from)) {
+ ssize_t ret2;
+ loff_t bufio_start_pos = iocb->ki_pos;
+
+ /*
+ * The direct write was partial, so we need to fall back to a
+ * buffered write for the remainder.
+ */
+
+ ret2 = f2fs_buffered_write_iter(iocb, from);
+ if (iov_iter_count(from))
+ f2fs_write_failed(inode, iocb->ki_pos);
+ if (ret2 < 0)
+ goto out;
+
+ /*
+ * Ensure that the pagecache pages are written to disk and
+ * invalidated to preserve the expected O_DIRECT semantics.
+ */
+ if (ret2 > 0) {
+ loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
+
+ ret += ret2;
+
+ f2fs_flush_buffered_write(file->f_mapping,
+ bufio_start_pos,
+ bufio_end_pos);
+ }
+ } else {
+ /* iomap_dio_rw() already handled the generic_write_sync(). */
+ *may_need_sync = false;
+ }
+out:
+ trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
+ return ret;
+}
+
+static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ const loff_t orig_pos = iocb->ki_pos;
+ const size_t orig_count = iov_iter_count(from);
+ loff_t target_size;
+ bool dio;
+ bool may_need_sync = true;
+ int preallocated;
+ const loff_t pos = iocb->ki_pos;
+ const ssize_t count = iov_iter_count(from);
ssize_t ret;
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
@@ -4260,104 +5211,86 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
inode_lock(inode);
}
- if (unlikely(IS_IMMUTABLE(inode))) {
- ret = -EPERM;
- goto unlock;
+ if (f2fs_is_pinned_file(inode) &&
+ !f2fs_overwrite_io(inode, pos, count)) {
+ ret = -EIO;
+ goto out_unlock;
}
- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
- ret = -EPERM;
- goto unlock;
- }
+ ret = f2fs_write_checks(iocb, from);
+ if (ret <= 0)
+ goto out_unlock;
- ret = generic_write_checks(iocb, from);
- if (ret > 0) {
- bool preallocated = false;
- size_t target_size = 0;
- int err;
-
- if (fault_in_iov_iter_readable(from, iov_iter_count(from)))
- set_inode_flag(inode, FI_NO_PREALLOC);
-
- if ((iocb->ki_flags & IOCB_NOWAIT)) {
- if (!f2fs_overwrite_io(inode, iocb->ki_pos,
- iov_iter_count(from)) ||
- f2fs_has_inline_data(inode) ||
- f2fs_force_buffered_io(inode, iocb, from)) {
- clear_inode_flag(inode, FI_NO_PREALLOC);
- inode_unlock(inode);
- ret = -EAGAIN;
- goto out;
- }
- goto write;
- }
+ /* Determine whether we will do a direct write or a buffered write. */
+ dio = f2fs_should_use_dio(inode, iocb, from);
- if (is_inode_flag_set(inode, FI_NO_PREALLOC))
- goto write;
+ /* dio is not compatible w/ atomic write */
+ if (dio && f2fs_is_atomic_file(inode)) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
- if (iocb->ki_flags & IOCB_DIRECT) {
- /*
- * Convert inline data for Direct I/O before entering
- * f2fs_direct_IO().
- */
- err = f2fs_convert_inline_inode(inode);
- if (err)
- goto out_err;
- /*
- * If force_buffere_io() is true, we have to allocate
- * blocks all the time, since f2fs_direct_IO will fall
- * back to buffered IO.
- */
- if (!f2fs_force_buffered_io(inode, iocb, from) &&
- f2fs_lfs_mode(F2FS_I_SB(inode)))
- goto write;
- }
- preallocated = true;
- target_size = iocb->ki_pos + iov_iter_count(from);
+ /* Possibly preallocate the blocks for the write. */
+ target_size = iocb->ki_pos + iov_iter_count(from);
+ preallocated = f2fs_preallocate_blocks(iocb, from, dio);
+ if (preallocated < 0) {
+ ret = preallocated;
+ } else {
+ if (trace_f2fs_datawrite_start_enabled())
+ f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
+ orig_count, WRITE);
- err = f2fs_preallocate_blocks(iocb, from);
- if (err) {
-out_err:
- clear_inode_flag(inode, FI_NO_PREALLOC);
- inode_unlock(inode);
- ret = err;
- goto out;
- }
-write:
- ret = __generic_file_write_iter(iocb, from);
- clear_inode_flag(inode, FI_NO_PREALLOC);
+ /* Do the actual write. */
+ ret = dio ?
+ f2fs_dio_write_iter(iocb, from, &may_need_sync) :
+ f2fs_buffered_write_iter(iocb, from);
- /* if we couldn't write data, we should deallocate blocks. */
- if (preallocated && i_size_read(inode) < target_size) {
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- filemap_invalidate_lock(inode->i_mapping);
- f2fs_truncate(inode);
- filemap_invalidate_unlock(inode->i_mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- }
+ trace_f2fs_datawrite_end(inode, orig_pos, ret);
+ }
- if (ret > 0)
- f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
+ /* Don't leave any preallocated blocks around past i_size. */
+ if (preallocated && i_size_read(inode) < target_size) {
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ filemap_invalidate_lock(inode->i_mapping);
+ if (!f2fs_truncate(inode))
+ file_dont_truncate(inode);
+ filemap_invalidate_unlock(inode->i_mapping);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ } else {
+ file_dont_truncate(inode);
}
-unlock:
+
+ clear_inode_flag(inode, FI_PREALLOCATED_ALL);
+out_unlock:
inode_unlock(inode);
out:
- trace_f2fs_file_write_iter(inode, iocb->ki_pos,
- iov_iter_count(from), ret);
- if (ret > 0)
+ trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
+
+ if (ret > 0 && may_need_sync)
ret = generic_write_sync(iocb, ret);
+
+ /* If buffered IO was forced, flush and drop the data from
+ * the page cache to preserve O_DIRECT semantics
+ */
+ if (ret > 0 && !dio && (iocb->ki_flags & IOCB_DIRECT))
+ f2fs_flush_buffered_write(iocb->ki_filp->f_mapping,
+ orig_pos,
+ orig_pos + ret - 1);
+
return ret;
}
static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
int advice)
{
- struct inode *inode;
struct address_space *mapping;
struct backing_dev_info *bdi;
+ struct inode *inode = file_inode(filp);
+ int err;
+
+ trace_f2fs_fadvise(inode, offset, len, advice);
if (advice == POSIX_FADV_SEQUENTIAL) {
- inode = file_inode(filp);
if (S_ISFIFO(inode->i_mode))
return -ESPIPE;
@@ -4372,9 +5305,22 @@ static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
filp->f_mode &= ~FMODE_RANDOM;
spin_unlock(&filp->f_lock);
return 0;
+ } else if (advice == POSIX_FADV_WILLNEED && offset == 0) {
+ /* Load extent cache at the first readahead. */
+ f2fs_precache_extents(inode);
}
- return generic_fadvise(filp, offset, len, advice);
+ err = generic_fadvise(filp, offset, len, advice);
+ if (err)
+ return err;
+
+ if (advice == POSIX_FADV_DONTNEED &&
+ (test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
+ f2fs_compressed_file(inode)))
+ f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
+ else if (advice == POSIX_FADV_NOREUSE)
+ err = f2fs_keep_noreuse_range(inode, offset, len);
+ return err;
}
#ifdef CONFIG_COMPAT
@@ -4444,10 +5390,11 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC32_MOVE_RANGE:
return f2fs_compat_ioc_move_range(file, arg);
case F2FS_IOC_START_ATOMIC_WRITE:
+ case F2FS_IOC_START_ATOMIC_REPLACE:
case F2FS_IOC_COMMIT_ATOMIC_WRITE:
case F2FS_IOC_START_VOLATILE_WRITE:
case F2FS_IOC_RELEASE_VOLATILE_WRITE:
- case F2FS_IOC_ABORT_VOLATILE_WRITE:
+ case F2FS_IOC_ABORT_ATOMIC_WRITE:
case F2FS_IOC_SHUTDOWN:
case FITRIM:
case FS_IOC_SET_ENCRYPTION_POLICY:
@@ -4481,6 +5428,8 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_SET_COMPRESS_OPTION:
case F2FS_IOC_DECOMPRESS_FILE:
case F2FS_IOC_COMPRESS_FILE:
+ case F2FS_IOC_GET_DEV_ALIAS_FILE:
+ case F2FS_IOC_IO_PRIO:
break;
default:
return -ENOIOCTLCMD;
@@ -4493,9 +5442,10 @@ const struct file_operations f2fs_file_operations = {
.llseek = f2fs_llseek,
.read_iter = f2fs_file_read_iter,
.write_iter = f2fs_file_write_iter,
+ .iopoll = iocb_bio_iopoll,
.open = f2fs_file_open,
.release = f2fs_release_file,
- .mmap = f2fs_file_mmap,
+ .mmap_prepare = f2fs_file_mmap_prepare,
.flush = f2fs_file_flush,
.fsync = f2fs_sync_file,
.fallocate = f2fs_fallocate,
@@ -4503,7 +5453,8 @@ const struct file_operations f2fs_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = f2fs_compat_ioctl,
#endif
- .splice_read = generic_file_splice_read,
+ .splice_read = f2fs_file_splice_read,
.splice_write = iter_file_splice_write,
.fadvise = f2fs_file_fadvise,
+ .fop_flags = FOP_BUFFER_RASYNC,
};
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index a946ce0ead34..384fa7e2085b 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -7,7 +7,6 @@
*/
#include <linux/fs.h>
#include <linux/module.h>
-#include <linux/backing-dev.h>
#include <linux/init.h>
#include <linux/f2fs_fs.h>
#include <linux/kthread.h>
@@ -15,6 +14,7 @@
#include <linux/freezer.h>
#include <linux/sched/signal.h>
#include <linux/random.h>
+#include <linux/sched/mm.h>
#include "f2fs.h"
#include "node.h"
@@ -35,27 +35,36 @@ static int gc_thread_func(void *data)
wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
unsigned int wait_ms;
+ struct f2fs_gc_control gc_control = {
+ .victim_segno = NULL_SEGNO,
+ .should_migrate_blocks = false,
+ .err_gc_skipped = false,
+ .one_time = false };
wait_ms = gc_th->min_sleep_time;
set_freezable();
do {
- bool sync_mode, foreground = false;
+ bool sync_mode, foreground = false, gc_boost = false;
- wait_event_interruptible_timeout(*wq,
- kthread_should_stop() || freezing(current) ||
+ wait_event_freezable_timeout(*wq,
+ kthread_should_stop() ||
waitqueue_active(fggc_wq) ||
gc_th->gc_wake,
msecs_to_jiffies(wait_ms));
- if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
+ if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) {
foreground = true;
+ gc_control.one_time = false;
+ } else if (f2fs_sb_has_blkzoned(sbi)) {
+ gc_control.one_time = true;
+ }
/* give it a try one time */
if (gc_th->gc_wake)
- gc_th->gc_wake = 0;
+ gc_th->gc_wake = false;
- if (try_to_freeze()) {
+ if (f2fs_readonly(sbi->sb)) {
stat_other_skip_bggc_count(sbi);
continue;
}
@@ -68,10 +77,9 @@ static int gc_thread_func(void *data)
continue;
}
- if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
- f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
- f2fs_stop_checkpoint(sbi, false);
- }
+ if (time_to_inject(sbi, FAULT_CHECKPOINT))
+ f2fs_stop_checkpoint(sbi, false,
+ STOP_CP_REASON_FAULT_INJECT);
if (!sb_start_write_trylock(sbi->sb)) {
stat_other_skip_bggc_count(sbi);
@@ -91,44 +99,71 @@ static int gc_thread_func(void *data)
* invalidated soon after by user update or deletion.
* So, I'd like to wait some time to collect dirty segments.
*/
- if (sbi->gc_mode == GC_URGENT_HIGH) {
+ if (sbi->gc_mode == GC_URGENT_HIGH ||
+ sbi->gc_mode == GC_URGENT_MID) {
wait_ms = gc_th->urgent_sleep_time;
- down_write(&sbi->gc_lock);
+ f2fs_down_write(&sbi->gc_lock);
goto do_gc;
}
if (foreground) {
- down_write(&sbi->gc_lock);
+ f2fs_down_write(&sbi->gc_lock);
goto do_gc;
- } else if (!down_write_trylock(&sbi->gc_lock)) {
+ } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
stat_other_skip_bggc_count(sbi);
goto next;
}
if (!is_idle(sbi, GC_TIME)) {
increase_sleep_time(gc_th, &wait_ms);
- up_write(&sbi->gc_lock);
+ f2fs_up_write(&sbi->gc_lock);
stat_io_skip_bggc_count(sbi);
goto next;
}
- if (has_enough_invalid_blocks(sbi))
+ if (f2fs_sb_has_blkzoned(sbi)) {
+ if (has_enough_free_blocks(sbi,
+ gc_th->no_zoned_gc_percent)) {
+ wait_ms = gc_th->no_gc_sleep_time;
+ f2fs_up_write(&sbi->gc_lock);
+ goto next;
+ }
+ if (wait_ms == gc_th->no_gc_sleep_time)
+ wait_ms = gc_th->max_sleep_time;
+ }
+
+ if (need_to_boost_gc(sbi)) {
decrease_sleep_time(gc_th, &wait_ms);
- else
+ if (f2fs_sb_has_blkzoned(sbi))
+ gc_boost = true;
+ } else {
increase_sleep_time(gc_th, &wait_ms);
+ }
do_gc:
- if (!foreground)
- stat_inc_bggc_count(sbi->stat_info);
+ stat_inc_gc_call_count(sbi, foreground ?
+ FOREGROUND : BACKGROUND);
- sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
+ sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
+ (gc_boost && gc_th->boost_gc_greedy);
/* foreground GC was been triggered via f2fs_balance_fs() */
- if (foreground)
+ if (foreground && !f2fs_sb_has_blkzoned(sbi))
sync_mode = false;
+ gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
+ gc_control.no_bg_gc = foreground;
+ gc_control.nr_free_secs = foreground ? 1 : 0;
+
/* if return value is not zero, no victim was selected */
- if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO))
- wait_ms = gc_th->no_gc_sleep_time;
+ if (f2fs_gc(sbi, &gc_control)) {
+ /* don't bother wait_ms by foreground gc */
+ if (!foreground)
+ wait_ms = gc_th->no_gc_sleep_time;
+ } else {
+ /* reset wait_ms to default sleep time */
+ if (wait_ms == gc_th->no_gc_sleep_time)
+ wait_ms = gc_th->min_sleep_time;
+ }
if (foreground)
wake_up_all(&gc_th->fggc_wq);
@@ -139,6 +174,15 @@ do_gc:
/* balancing f2fs's metadata periodically */
f2fs_balance_fs_bg(sbi, true);
next:
+ if (sbi->gc_mode != GC_NORMAL) {
+ spin_lock(&sbi->gc_remaining_trials_lock);
+ if (sbi->gc_remaining_trials) {
+ sbi->gc_remaining_trials--;
+ if (!sbi->gc_remaining_trials)
+ sbi->gc_mode = GC_NORMAL;
+ }
+ spin_unlock(&sbi->gc_remaining_trials_lock);
+ }
sb_end_write(sbi->sb);
} while (!kthread_should_stop());
@@ -149,20 +193,31 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
{
struct f2fs_gc_kthread *gc_th;
dev_t dev = sbi->sb->s_bdev->bd_dev;
- int err = 0;
gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
- if (!gc_th) {
- err = -ENOMEM;
- goto out;
- }
+ if (!gc_th)
+ return -ENOMEM;
gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
- gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
- gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
- gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
+ gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO;
+ gc_th->boost_gc_multiple = BOOST_GC_MULTIPLE;
+ gc_th->boost_gc_greedy = GC_GREEDY;
+
+ if (f2fs_sb_has_blkzoned(sbi)) {
+ gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED;
+ gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED;
+ gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED;
+ gc_th->no_zoned_gc_percent = LIMIT_NO_ZONED_GC;
+ gc_th->boost_zoned_gc_percent = LIMIT_BOOST_ZONED_GC;
+ } else {
+ gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
+ gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
+ gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
+ gc_th->no_zoned_gc_percent = 0;
+ gc_th->boost_zoned_gc_percent = 0;
+ }
- gc_th->gc_wake = 0;
+ gc_th->gc_wake = false;
sbi->gc_thread = gc_th;
init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
@@ -170,12 +225,14 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(gc_th->f2fs_gc_task)) {
- err = PTR_ERR(gc_th->f2fs_gc_task);
+ int err = PTR_ERR(gc_th->f2fs_gc_task);
+
kfree(gc_th);
sbi->gc_thread = NULL;
+ return err;
}
-out:
- return err;
+
+ return 0;
}
void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
@@ -205,6 +262,8 @@ static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
switch (sbi->gc_mode) {
case GC_IDLE_CB:
+ case GC_URGENT_LOW:
+ case GC_URGENT_MID:
gc_mode = GC_CB;
break;
case GC_IDLE_GREEDY:
@@ -224,19 +283,14 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- if (p->alloc_mode == SSR) {
- p->gc_mode = GC_GREEDY;
- p->dirty_bitmap = dirty_i->dirty_segmap[type];
- p->max_search = dirty_i->nr_dirty[type];
- p->ofs_unit = 1;
- } else if (p->alloc_mode == AT_SSR) {
+ if (p->alloc_mode == SSR || p->alloc_mode == AT_SSR) {
p->gc_mode = GC_GREEDY;
p->dirty_bitmap = dirty_i->dirty_segmap[type];
p->max_search = dirty_i->nr_dirty[type];
p->ofs_unit = 1;
} else {
p->gc_mode = select_gc_type(sbi, gc_type);
- p->ofs_unit = sbi->segs_per_sec;
+ p->ofs_unit = SEGS_PER_SEC(sbi);
if (__is_large_section(sbi)) {
p->dirty_bitmap = dirty_i->dirty_secmap;
p->max_search = count_bits(p->dirty_bitmap,
@@ -257,11 +311,11 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
p->max_search > sbi->max_victim_search)
p->max_search = sbi->max_victim_search;
- /* let's select beginning hot/small space first in no_heap mode*/
+ /* let's select beginning hot/small space first. */
if (f2fs_need_rand_seg(sbi))
- p->offset = prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
- else if (test_opt(sbi, NOHEAP) &&
- (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
+ p->offset = get_random_u32_below(MAIN_SECS(sbi) *
+ SEGS_PER_SEC(sbi));
+ else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
p->offset = 0;
else
p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
@@ -272,13 +326,13 @@ static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
{
/* SSR allocates in a segment unit */
if (p->alloc_mode == SSR)
- return sbi->blocks_per_seg;
+ return BLKS_PER_SEG(sbi);
else if (p->alloc_mode == AT_SSR)
return UINT_MAX;
/* LFS */
if (p->gc_mode == GC_GREEDY)
- return 2 * sbi->blocks_per_seg * p->ofs_unit;
+ return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
else if (p->gc_mode == GC_CB)
return UINT_MAX;
else if (p->gc_mode == GC_AT)
@@ -309,23 +363,18 @@ static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct sit_info *sit_i = SIT_I(sbi);
- unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
- unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
unsigned long long mtime = 0;
unsigned int vblocks;
unsigned char age = 0;
unsigned char u;
- unsigned int i;
- unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
+ unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
- for (i = 0; i < usable_segs_per_sec; i++)
- mtime += get_seg_entry(sbi, start + i)->mtime;
+ mtime = f2fs_get_section_mtime(sbi, segno);
+ f2fs_bug_on(sbi, mtime == INVALID_MTIME);
vblocks = get_valid_blocks(sbi, segno, true);
-
- mtime = div_u64(mtime, usable_segs_per_sec);
vblocks = div_u64(vblocks, usable_segs_per_sec);
- u = (vblocks * 100) >> sbi->log_blocks_per_seg;
+ u = BLKS_TO_SEGS(sbi, vblocks * 100);
/* Handle if the system time has changed by the user */
if (mtime < sit_i->min_mtime)
@@ -340,11 +389,17 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
}
static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
- unsigned int segno, struct victim_sel_policy *p)
+ unsigned int segno, struct victim_sel_policy *p,
+ unsigned int valid_thresh_ratio)
{
if (p->alloc_mode == SSR)
return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+ if (p->one_time_gc && (valid_thresh_ratio < 100) &&
+ (get_valid_blocks(sbi, segno, true) >=
+ CAP_BLKS_PER_SEC(sbi) * valid_thresh_ratio / 100))
+ return UINT_MAX;
+
/* alloc_mode == LFS */
if (p->gc_mode == GC_GREEDY)
return get_valid_blocks(sbi, segno, true);
@@ -367,50 +422,102 @@ static unsigned int count_bits(const unsigned long *addr,
return sum;
}
-static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
- unsigned long long mtime, unsigned int segno,
- struct rb_node *parent, struct rb_node **p,
- bool left_most)
+static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
+ struct rb_root_cached *root)
+{
+#ifdef CONFIG_F2FS_CHECK_FS
+ struct rb_node *cur = rb_first_cached(root), *next;
+ struct victim_entry *cur_ve, *next_ve;
+
+ while (cur) {
+ next = rb_next(cur);
+ if (!next)
+ return true;
+
+ cur_ve = rb_entry(cur, struct victim_entry, rb_node);
+ next_ve = rb_entry(next, struct victim_entry, rb_node);
+
+ if (cur_ve->mtime > next_ve->mtime) {
+ f2fs_info(sbi, "broken victim_rbtree, "
+ "cur_mtime(%llu) next_mtime(%llu)",
+ cur_ve->mtime, next_ve->mtime);
+ return false;
+ }
+ cur = next;
+ }
+#endif
+ return true;
+}
+
+static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
+ unsigned long long mtime)
+{
+ struct atgc_management *am = &sbi->am;
+ struct rb_node *node = am->root.rb_root.rb_node;
+ struct victim_entry *ve = NULL;
+
+ while (node) {
+ ve = rb_entry(node, struct victim_entry, rb_node);
+
+ if (mtime < ve->mtime)
+ node = node->rb_left;
+ else
+ node = node->rb_right;
+ }
+ return ve;
+}
+
+static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
+ unsigned long long mtime, unsigned int segno)
{
struct atgc_management *am = &sbi->am;
struct victim_entry *ve;
- ve = f2fs_kmem_cache_alloc(victim_entry_slab,
- GFP_NOFS, true, NULL);
+ ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
ve->mtime = mtime;
ve->segno = segno;
- rb_link_node(&ve->rb_node, parent, p);
- rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
-
list_add_tail(&ve->list, &am->victim_list);
-
am->victim_count++;
return ve;
}
-static void insert_victim_entry(struct f2fs_sb_info *sbi,
+static void __insert_victim_entry(struct f2fs_sb_info *sbi,
unsigned long long mtime, unsigned int segno)
{
struct atgc_management *am = &sbi->am;
- struct rb_node **p;
+ struct rb_root_cached *root = &am->root;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
+ struct victim_entry *ve;
bool left_most = true;
- p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
- attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
+ /* look up rb tree to find parent node */
+ while (*p) {
+ parent = *p;
+ ve = rb_entry(parent, struct victim_entry, rb_node);
+
+ if (mtime < ve->mtime) {
+ p = &(*p)->rb_left;
+ } else {
+ p = &(*p)->rb_right;
+ left_most = false;
+ }
+ }
+
+ ve = __create_victim_entry(sbi, mtime, segno);
+
+ rb_link_node(&ve->rb_node, parent, p);
+ rb_insert_color_cached(&ve->rb_node, root, left_most);
}
static void add_victim_entry(struct f2fs_sb_info *sbi,
struct victim_sel_policy *p, unsigned int segno)
{
struct sit_info *sit_i = SIT_I(sbi);
- unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
- unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
unsigned long long mtime = 0;
- unsigned int i;
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
if (p->gc_mode == GC_AT &&
@@ -418,9 +525,8 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
return;
}
- for (i = 0; i < sbi->segs_per_sec; i++)
- mtime += get_seg_entry(sbi, start + i)->mtime;
- mtime = div_u64(mtime, sbi->segs_per_sec);
+ mtime = f2fs_get_section_mtime(sbi, segno);
+ f2fs_bug_on(sbi, mtime == INVALID_MTIME);
/* Handle if the system time has changed by the user */
if (mtime < sit_i->min_mtime)
@@ -436,19 +542,7 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
return;
- insert_victim_entry(sbi, mtime, segno);
-}
-
-static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
- struct victim_sel_policy *p)
-{
- struct atgc_management *am = &sbi->am;
- struct rb_node *parent = NULL;
- bool left_most;
-
- f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
-
- return parent;
+ __insert_victim_entry(sbi, mtime, segno);
}
static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
@@ -458,13 +552,12 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
struct atgc_management *am = &sbi->am;
struct rb_root_cached *root = &am->root;
struct rb_node *node;
- struct rb_entry *re;
struct victim_entry *ve;
unsigned long long total_time;
unsigned long long age, u, accu;
unsigned long long max_mtime = sit_i->dirty_max_mtime;
unsigned long long min_mtime = sit_i->dirty_min_mtime;
- unsigned int sec_blocks = BLKS_PER_SEC(sbi);
+ unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
unsigned int vblocks;
unsigned int dirty_threshold = max(am->max_candidate_count,
am->candidate_ratio *
@@ -485,12 +578,10 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
node = rb_first_cached(root);
next:
- re = rb_entry_safe(node, struct rb_entry, rb_node);
- if (!re)
+ ve = rb_entry_safe(node, struct victim_entry, rb_node);
+ if (!ve)
return;
- ve = (struct victim_entry *)re;
-
if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
goto skip;
@@ -532,36 +623,30 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
{
struct sit_info *sit_i = SIT_I(sbi);
struct atgc_management *am = &sbi->am;
- struct rb_node *node;
- struct rb_entry *re;
struct victim_entry *ve;
unsigned long long age;
unsigned long long max_mtime = sit_i->dirty_max_mtime;
unsigned long long min_mtime = sit_i->dirty_min_mtime;
- unsigned int seg_blocks = sbi->blocks_per_seg;
unsigned int vblocks;
unsigned int dirty_threshold = max(am->max_candidate_count,
am->candidate_ratio *
am->victim_count / 100);
- unsigned int cost;
- unsigned int iter = 0;
+ unsigned int cost, iter;
int stage = 0;
if (max_mtime < min_mtime)
return;
max_mtime += 1;
next_stage:
- node = lookup_central_victim(sbi, p);
+ iter = 0;
+ ve = __lookup_victim_entry(sbi, p->age);
next_node:
- re = rb_entry_safe(node, struct rb_entry, rb_node);
- if (!re) {
- if (stage == 0)
- goto skip_stage;
+ if (!ve) {
+ if (stage++ == 0)
+ goto next_stage;
return;
}
- ve = (struct victim_entry *)re;
-
if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
goto skip_node;
@@ -571,7 +656,7 @@ next_node:
f2fs_bug_on(sbi, !vblocks);
/* rare case */
- if (vblocks == seg_blocks)
+ if (vblocks == BLKS_PER_SEG(sbi))
goto skip_node;
iter++;
@@ -587,24 +672,20 @@ next_node:
}
skip_node:
if (iter < dirty_threshold) {
- if (stage == 0)
- node = rb_prev(node);
- else if (stage == 1)
- node = rb_next(node);
+ ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
+ rb_next(&ve->rb_node),
+ struct victim_entry, rb_node);
goto next_node;
}
-skip_stage:
- if (stage < 1) {
- stage++;
- iter = 0;
+
+ if (stage++ == 0)
goto next_stage;
- }
}
+
static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
struct victim_sel_policy *p)
{
- f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
- &sbi->am.root, true));
+ f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
if (p->gc_mode == GC_AT)
atgc_lookup_victim(sbi, p);
@@ -631,6 +712,54 @@ static void release_victim_entry(struct f2fs_sb_info *sbi)
f2fs_bug_on(sbi, !list_empty(&am->victim_list));
}
+static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+
+ if (!dirty_i->enable_pin_section)
+ return false;
+ if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
+ dirty_i->pinned_secmap_cnt++;
+ return true;
+}
+
+static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
+{
+ return dirty_i->pinned_secmap_cnt;
+}
+
+static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
+ unsigned int secno)
+{
+ return dirty_i->enable_pin_section &&
+ f2fs_pinned_section_exists(dirty_i) &&
+ test_bit(secno, dirty_i->pinned_secmap);
+}
+
+static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
+{
+ unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
+
+ if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
+ memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
+ DIRTY_I(sbi)->pinned_secmap_cnt = 0;
+ }
+ DIRTY_I(sbi)->enable_pin_section = enable;
+}
+
+static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
+ unsigned int segno)
+{
+ if (!f2fs_is_pinned_file(inode))
+ return 0;
+ if (gc_type != FG_GC)
+ return -EBUSY;
+ if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
+ f2fs_pin_file_control(inode, true);
+ return -EAGAIN;
+}
+
/*
* This function is called from two paths.
* One is garbage collection and the other is SSR segment selection.
@@ -639,25 +768,31 @@ static void release_victim_entry(struct f2fs_sb_info *sbi)
* When it is called from SSR segment selection, it finds a segment
* which has minimum valid blocks and removes it from dirty seglist.
*/
-static int get_victim_by_default(struct f2fs_sb_info *sbi,
- unsigned int *result, int gc_type, int type,
- char alloc_mode, unsigned long long age)
+int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
+ int gc_type, int type, char alloc_mode,
+ unsigned long long age, bool one_time)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct sit_info *sm = SIT_I(sbi);
- struct victim_sel_policy p;
+ struct victim_sel_policy p = {0};
unsigned int secno, last_victim;
unsigned int last_segment;
unsigned int nsearched;
+ unsigned int valid_thresh_ratio = 100;
bool is_atgc;
int ret = 0;
mutex_lock(&dirty_i->seglist_lock);
- last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
+ last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
p.alloc_mode = alloc_mode;
p.age = age;
p.age_threshold = sbi->am.age_threshold;
+ if (one_time) {
+ p.one_time_gc = one_time;
+ if (has_enough_free_secs(sbi, 0, NR_PERSISTENT_LOG))
+ valid_thresh_ratio = sbi->gc_thread->valid_thresh_ratio;
+ }
retry:
select_policy(sbi, gc_type, type, &p);
@@ -677,11 +812,14 @@ retry:
goto out;
}
- if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
+ if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) {
ret = -EBUSY;
- else
- p.min_segno = *result;
- goto out;
+ goto out;
+ }
+ if (gc_type == FG_GC)
+ clear_bit(GET_SEC_FROM_SEG(sbi, *result), dirty_i->victim_secmap);
+ p.min_segno = *result;
+ goto got_result;
}
ret = -ENODATA;
@@ -772,12 +910,15 @@ retry:
if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
goto next;
+ if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
+ goto next;
+
if (is_atgc) {
add_victim_entry(sbi, &p, segno);
goto next;
}
- cost = get_gc_cost(sbi, segno, &p);
+ cost = get_gc_cost(sbi, segno, &p, valid_thresh_ratio);
if (p.min_cost > cost) {
p.min_segno = segno;
@@ -791,7 +932,7 @@ next:
else
sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
sm->last_victim[p.gc_mode] %=
- (MAIN_SECS(sbi) * sbi->segs_per_sec);
+ (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
break;
}
}
@@ -832,10 +973,6 @@ out:
return ret;
}
-static const struct victim_selection default_v_ops = {
- .get_victim = get_victim_by_default,
-};
-
static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
{
struct inode_entry *ie;
@@ -914,7 +1051,7 @@ next_step:
for (off = 0; off < usable_blks_in_seg; off++, entry++) {
nid_t nid = le32_to_cpu(entry->nid);
- struct page *node_page;
+ struct folio *node_folio;
struct node_info ni;
int err;
@@ -937,27 +1074,27 @@ next_step:
}
/* phase == 2 */
- node_page = f2fs_get_node_page(sbi, nid);
- if (IS_ERR(node_page))
+ node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR);
+ if (IS_ERR(node_folio))
continue;
- /* block may become invalid during f2fs_get_node_page */
+ /* block may become invalid during f2fs_get_node_folio */
if (check_valid_map(sbi, segno, off) == 0) {
- f2fs_put_page(node_page, 1);
+ f2fs_folio_put(node_folio, true);
continue;
}
- if (f2fs_get_node_info(sbi, nid, &ni)) {
- f2fs_put_page(node_page, 1);
+ if (f2fs_get_node_info(sbi, nid, &ni, false)) {
+ f2fs_folio_put(node_folio, true);
continue;
}
if (ni.blk_addr != start_addr + off) {
- f2fs_put_page(node_page, 1);
+ f2fs_folio_put(node_folio, true);
continue;
}
- err = f2fs_move_node_page(node_page, gc_type);
+ err = f2fs_move_node_folio(node_folio, gc_type);
if (!err && gc_type == FG_GC)
submitted++;
stat_inc_node_blk_count(sbi, 1, gc_type);
@@ -1003,20 +1140,20 @@ block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
struct node_info *dni, block_t blkaddr, unsigned int *nofs)
{
- struct page *node_page;
+ struct folio *node_folio;
nid_t nid;
- unsigned int ofs_in_node;
+ unsigned int ofs_in_node, max_addrs, base;
block_t source_blkaddr;
nid = le32_to_cpu(sum->nid);
ofs_in_node = le16_to_cpu(sum->ofs_in_node);
- node_page = f2fs_get_node_page(sbi, nid);
- if (IS_ERR(node_page))
+ node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR);
+ if (IS_ERR(node_folio))
return false;
- if (f2fs_get_node_info(sbi, nid, dni)) {
- f2fs_put_page(node_page, 1);
+ if (f2fs_get_node_info(sbi, nid, dni, false)) {
+ f2fs_folio_put(node_folio, true);
return false;
}
@@ -1026,9 +1163,29 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
set_sbi_flag(sbi, SBI_NEED_FSCK);
}
- *nofs = ofs_of_node(node_page);
- source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
- f2fs_put_page(node_page, 1);
+ if (f2fs_check_nid_range(sbi, dni->ino)) {
+ f2fs_folio_put(node_folio, true);
+ return false;
+ }
+
+ if (IS_INODE(node_folio)) {
+ base = offset_in_addr(F2FS_INODE(node_folio));
+ max_addrs = DEF_ADDRS_PER_INODE;
+ } else {
+ base = 0;
+ max_addrs = DEF_ADDRS_PER_BLOCK;
+ }
+
+ if (base + ofs_in_node >= max_addrs) {
+ f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
+ base, ofs_in_node, max_addrs, dni->ino, dni->nid);
+ f2fs_folio_put(node_folio, true);
+ return false;
+ }
+
+ *nofs = ofs_of_node(node_folio);
+ source_blkaddr = data_blkaddr(NULL, node_folio, ofs_in_node);
+ f2fs_folio_put(node_folio, true);
if (source_blkaddr != blkaddr) {
#ifdef CONFIG_F2FS_CHECK_FS
@@ -1039,7 +1196,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
blkaddr, source_blkaddr, segno);
- f2fs_bug_on(sbi, 1);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
}
}
#endif
@@ -1051,10 +1208,10 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
static int ra_data_block(struct inode *inode, pgoff_t index)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct address_space *mapping = inode->i_mapping;
+ struct address_space *mapping = f2fs_is_cow_file(inode) ?
+ F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
struct dnode_of_data dn;
- struct page *page;
- struct extent_info ei = {0, 0, 0};
+ struct folio *folio, *efolio;
struct f2fs_io_info fio = {
.sbi = sbi,
.ino = inode->i_ino,
@@ -1063,21 +1220,20 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
.op = REQ_OP_READ,
.op_flags = 0,
.encrypted_page = NULL,
- .in_list = false,
- .retry = false,
+ .in_list = 0,
};
int err;
- page = f2fs_grab_cache_page(mapping, index, true);
- if (!page)
- return -ENOMEM;
+ folio = f2fs_grab_cache_folio(mapping, index, true);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- if (f2fs_lookup_extent_cache(inode, index, &ei)) {
- dn.data_blkaddr = ei.blk + index - ei.fofs;
+ if (f2fs_lookup_read_extent_cache_block(inode, index,
+ &dn.data_blkaddr)) {
if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
DATA_GENERIC_ENHANCE_READ))) {
err = -EFSCORRUPTED;
- goto put_page;
+ goto put_folio;
}
goto got_it;
}
@@ -1085,53 +1241,54 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err)
- goto put_page;
+ goto put_folio;
f2fs_put_dnode(&dn);
if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
err = -ENOENT;
- goto put_page;
+ goto put_folio;
}
if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
DATA_GENERIC_ENHANCE))) {
err = -EFSCORRUPTED;
- goto put_page;
+ goto put_folio;
}
got_it:
- /* read page */
- fio.page = page;
+ /* read folio */
+ fio.folio = folio;
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
/*
* don't cache encrypted data into meta inode until previous dirty
* data were writebacked to avoid racing between GC and flush.
*/
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
- fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
- dn.data_blkaddr,
+ efolio = f2fs_filemap_get_folio(META_MAPPING(sbi), dn.data_blkaddr,
FGP_LOCK | FGP_CREAT, GFP_NOFS);
- if (!fio.encrypted_page) {
- err = -ENOMEM;
- goto put_page;
+ if (IS_ERR(efolio)) {
+ err = PTR_ERR(efolio);
+ goto put_folio;
}
+ fio.encrypted_page = &efolio->page;
+
err = f2fs_submit_page_bio(&fio);
if (err)
goto put_encrypted_page;
- f2fs_put_page(fio.encrypted_page, 0);
- f2fs_put_page(page, 1);
+ f2fs_put_page(fio.encrypted_page, false);
+ f2fs_folio_put(folio, true);
- f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
- f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
return 0;
put_encrypted_page:
- f2fs_put_page(fio.encrypted_page, 1);
-put_page:
- f2fs_put_page(page, 1);
+ f2fs_put_page(fio.encrypted_page, true);
+put_folio:
+ f2fs_folio_put(folio, true);
return err;
}
@@ -1142,6 +1299,8 @@ put_page:
static int move_data_block(struct inode *inode, block_t bidx,
int gc_type, unsigned int segno, int off)
{
+ struct address_space *mapping = f2fs_is_cow_file(inode) ?
+ F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
.ino = inode->i_ino,
@@ -1150,13 +1309,12 @@ static int move_data_block(struct inode *inode, block_t bidx,
.op = REQ_OP_READ,
.op_flags = 0,
.encrypted_page = NULL,
- .in_list = false,
- .retry = false,
+ .in_list = 0,
};
struct dnode_of_data dn;
struct f2fs_summary sum;
struct node_info ni;
- struct page *page, *mpage;
+ struct folio *folio, *mfolio, *efolio;
block_t newaddr;
int err = 0;
bool lfs_mode = f2fs_lfs_mode(fio.sbi);
@@ -1165,27 +1323,18 @@ static int move_data_block(struct inode *inode, block_t bidx,
CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
/* do not read out */
- page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
- if (!page)
- return -ENOMEM;
+ folio = f2fs_grab_cache_folio(mapping, bidx, false);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
err = -ENOENT;
goto out;
}
- if (f2fs_is_atomic_file(inode)) {
- F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
- F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
- err = -EAGAIN;
- goto out;
- }
-
- if (f2fs_is_pinned_file(inode)) {
- f2fs_pin_file_control(inode, true);
- err = -EAGAIN;
+ err = f2fs_gc_pinned_control(inode, gc_type, segno);
+ if (err)
goto out;
- }
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
@@ -1193,7 +1342,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
goto out;
if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
err = -ENOENT;
goto put_out;
}
@@ -1202,46 +1351,48 @@ static int move_data_block(struct inode *inode, block_t bidx,
* don't cache encrypted data into meta inode until previous dirty
* data were writebacked to avoid racing between GC and flush.
*/
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
- err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
if (err)
goto put_out;
/* read page */
- fio.page = page;
+ fio.folio = folio;
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
if (lfs_mode)
- down_write(&fio.sbi->io_order_lock);
+ f2fs_down_write(&fio.sbi->io_order_lock);
- mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
+ mfolio = f2fs_grab_cache_folio(META_MAPPING(fio.sbi),
fio.old_blkaddr, false);
- if (!mpage) {
- err = -ENOMEM;
+ if (IS_ERR(mfolio)) {
+ err = PTR_ERR(mfolio);
goto up_out;
}
- fio.encrypted_page = mpage;
+ fio.encrypted_page = folio_file_page(mfolio, fio.old_blkaddr);
- /* read source block in mpage */
- if (!PageUptodate(mpage)) {
+ /* read source block in mfolio */
+ if (!folio_test_uptodate(mfolio)) {
err = f2fs_submit_page_bio(&fio);
if (err) {
- f2fs_put_page(mpage, 1);
+ f2fs_folio_put(mfolio, true);
goto up_out;
}
- f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
- f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
+ F2FS_BLKSIZE);
+ f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
+ F2FS_BLKSIZE);
- lock_page(mpage);
- if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
- !PageUptodate(mpage))) {
+ folio_lock(mfolio);
+ if (unlikely(!is_meta_folio(mfolio) ||
+ !folio_test_uptodate(mfolio))) {
err = -EIO;
- f2fs_put_page(mpage, 1);
+ f2fs_folio_put(mfolio, true);
goto up_out;
}
}
@@ -1249,101 +1400,89 @@ static int move_data_block(struct inode *inode, block_t bidx,
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
/* allocate block address */
- f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
+ err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
&sum, type, NULL);
+ if (err) {
+ f2fs_folio_put(mfolio, true);
+ /* filesystem should shutdown, no need to recovery block */
+ goto up_out;
+ }
- fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
- newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
- if (!fio.encrypted_page) {
- err = -ENOMEM;
- f2fs_put_page(mpage, 1);
+ efolio = f2fs_filemap_get_folio(META_MAPPING(fio.sbi), newaddr,
+ FGP_LOCK | FGP_CREAT, GFP_NOFS);
+ if (IS_ERR(efolio)) {
+ err = PTR_ERR(efolio);
+ f2fs_folio_put(mfolio, true);
goto recover_block;
}
+ fio.encrypted_page = &efolio->page;
+
/* write target block */
f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
memcpy(page_address(fio.encrypted_page),
- page_address(mpage), PAGE_SIZE);
- f2fs_put_page(mpage, 1);
- invalidate_mapping_pages(META_MAPPING(fio.sbi),
- fio.old_blkaddr, fio.old_blkaddr);
- f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
+ folio_address(mfolio), PAGE_SIZE);
+ f2fs_folio_put(mfolio, true);
+
+ f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1);
set_page_dirty(fio.encrypted_page);
if (clear_page_dirty_for_io(fio.encrypted_page))
dec_page_count(fio.sbi, F2FS_DIRTY_META);
set_page_writeback(fio.encrypted_page);
- ClearPageError(page);
fio.op = REQ_OP_WRITE;
fio.op_flags = REQ_SYNC;
fio.new_blkaddr = newaddr;
f2fs_submit_page_write(&fio);
- if (fio.retry) {
- err = -EAGAIN;
- if (PageWriteback(fio.encrypted_page))
- end_page_writeback(fio.encrypted_page);
- goto put_page_out;
- }
- f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
f2fs_update_data_blkaddr(&dn, newaddr);
set_inode_flag(inode, FI_APPEND_WRITE);
- if (page->index == 0)
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
-put_page_out:
- f2fs_put_page(fio.encrypted_page, 1);
+
+ f2fs_put_page(fio.encrypted_page, true);
recover_block:
if (err)
f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
true, true, true);
up_out:
if (lfs_mode)
- up_write(&fio.sbi->io_order_lock);
+ f2fs_up_write(&fio.sbi->io_order_lock);
put_out:
f2fs_put_dnode(&dn);
out:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
- unsigned int segno, int off)
+ unsigned int segno, int off)
{
- struct page *page;
+ struct folio *folio;
int err = 0;
- page = f2fs_get_lock_data_page(inode, bidx, true);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ folio = f2fs_get_lock_data_folio(inode, bidx, true);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
err = -ENOENT;
goto out;
}
- if (f2fs_is_atomic_file(inode)) {
- F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
- F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
- err = -EAGAIN;
- goto out;
- }
- if (f2fs_is_pinned_file(inode)) {
- if (gc_type == FG_GC)
- f2fs_pin_file_control(inode, true);
- err = -EAGAIN;
+ err = f2fs_gc_pinned_control(inode, gc_type, segno);
+ if (err)
goto out;
- }
if (gc_type == BG_GC) {
- if (PageWriteback(page)) {
+ if (folio_test_writeback(folio)) {
err = -EAGAIN;
goto out;
}
- set_page_dirty(page);
- set_page_private_gcing(page);
+ folio_mark_dirty(folio);
+ folio_set_f2fs_gcing(folio);
} else {
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
@@ -1353,38 +1492,37 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC,
.old_blkaddr = NULL_ADDR,
- .page = page,
+ .folio = folio,
.encrypted_page = NULL,
.need_lock = LOCK_REQ,
.io_type = FS_GC_DATA_IO,
};
- bool is_dirty = PageDirty(page);
+ bool is_dirty = folio_test_dirty(folio);
retry:
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
- set_page_dirty(page);
- if (clear_page_dirty_for_io(page)) {
+ folio_mark_dirty(folio);
+ if (folio_clear_dirty_for_io(folio)) {
inode_dec_dirty_pages(inode);
f2fs_remove_dirty_inode(inode);
}
- set_page_private_gcing(page);
+ folio_set_f2fs_gcing(folio);
err = f2fs_do_write_data_page(&fio);
if (err) {
- clear_page_private_gcing(page);
+ folio_clear_f2fs_gcing(folio);
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto retry;
}
if (is_dirty)
- set_page_dirty(page);
+ folio_mark_dirty(folio);
}
}
out:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
@@ -1413,7 +1551,6 @@ next_step:
entry = sum;
for (off = 0; off < usable_blks_in_seg; off++, entry++) {
- struct page *data_page;
struct inode *inode;
struct node_info dni; /* dnode info for the data */
unsigned int ofs_in_node, nofs;
@@ -1427,7 +1564,7 @@ next_step:
*/
if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
(!force_migrate && get_valid_blocks(sbi, segno, true) ==
- BLKS_PER_SEC(sbi)))
+ CAP_BLKS_PER_SEC(sbi)))
return submitted;
if (check_valid_map(sbi, segno, off) == 0)
@@ -1456,11 +1593,36 @@ next_step:
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
if (phase == 3) {
+ struct folio *data_folio;
+ int err;
+
inode = f2fs_iget(sb, dni.ino);
- if (IS_ERR(inode) || is_bad_inode(inode))
+ if (IS_ERR(inode))
continue;
- if (!down_write_trylock(
+ if (is_bad_inode(inode) ||
+ special_file(inode->i_mode)) {
+ iput(inode);
+ continue;
+ }
+
+ if (f2fs_has_inline_data(inode)) {
+ iput(inode);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_err_ratelimited(sbi,
+ "inode %lx has both inline_data flag and "
+ "data block, nid=%u, ofs_in_node=%u",
+ inode->i_ino, dni.nid, ofs_in_node);
+ continue;
+ }
+
+ err = f2fs_gc_pinned_control(inode, gc_type, segno);
+ if (err == -EAGAIN) {
+ iput(inode);
+ return submitted;
+ }
+
+ if (!f2fs_down_write_trylock(
&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
iput(inode);
sbi->skipped_gc_rwsem++;
@@ -1470,10 +1632,10 @@ next_step:
start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
ofs_in_node;
- if (f2fs_post_read_required(inode)) {
+ if (f2fs_meta_inode_gc_required(inode)) {
int err = ra_data_block(inode, start_bidx);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
if (err) {
iput(inode);
continue;
@@ -1482,15 +1644,15 @@ next_step:
continue;
}
- data_page = f2fs_get_read_data_page(inode,
- start_bidx, REQ_RAHEAD, true);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- if (IS_ERR(data_page)) {
+ data_folio = f2fs_get_read_data_folio(inode, start_bidx,
+ REQ_RAHEAD, true, NULL);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ if (IS_ERR(data_folio)) {
iput(inode);
continue;
}
- f2fs_put_page(data_page, 0);
+ f2fs_folio_put(data_folio, false);
add_gc_inode(gc_list, inode);
continue;
}
@@ -1503,14 +1665,14 @@ next_step:
int err;
if (S_ISREG(inode->i_mode)) {
- if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
+ if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
sbi->skipped_gc_rwsem++;
continue;
}
- if (!down_write_trylock(
- &fi->i_gc_rwsem[WRITE])) {
+ if (!f2fs_down_write_trylock(
+ &fi->i_gc_rwsem[READ])) {
sbi->skipped_gc_rwsem++;
- up_write(&fi->i_gc_rwsem[READ]);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
continue;
}
locked = true;
@@ -1521,7 +1683,7 @@ next_step:
start_bidx = f2fs_start_bidx_of_node(nofs, inode)
+ ofs_in_node;
- if (f2fs_post_read_required(inode))
+ if (f2fs_meta_inode_gc_required(inode))
err = move_data_block(inode, start_bidx,
gc_type, segno, off);
else
@@ -1529,12 +1691,12 @@ next_step:
segno, off);
if (!err && (gc_type == FG_GC ||
- f2fs_post_read_required(inode)))
+ f2fs_meta_inode_gc_required(inode)))
submitted++;
if (locked) {
- up_write(&fi->i_gc_rwsem[WRITE]);
- up_write(&fi->i_gc_rwsem[READ]);
+ f2fs_up_write(&fi->i_gc_rwsem[READ]);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
}
stat_inc_data_blk_count(sbi, 1, gc_type);
@@ -1548,14 +1710,14 @@ next_step:
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
- int gc_type)
+ int gc_type, bool one_time)
{
struct sit_info *sit_i = SIT_I(sbi);
int ret;
down_write(&sit_i->sentry_lock);
- ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
- NO_CHECK_TYPE, LFS, 0);
+ ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE,
+ LFS, 0, one_time);
up_write(&sit_i->sentry_lock);
return ret;
}
@@ -1563,139 +1725,186 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
static int do_garbage_collect(struct f2fs_sb_info *sbi,
unsigned int start_segno,
struct gc_inode_list *gc_list, int gc_type,
- bool force_migrate)
+ bool force_migrate, bool one_time)
{
- struct page *sum_page;
- struct f2fs_summary_block *sum;
struct blk_plug plug;
unsigned int segno = start_segno;
- unsigned int end_segno = start_segno + sbi->segs_per_sec;
+ unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
+ unsigned int sec_end_segno;
int seg_freed = 0, migrated = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE;
- int submitted = 0;
+ unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
+ int submitted = 0, sum_blk_cnt;
- if (__is_large_section(sbi))
- end_segno = rounddown(end_segno, sbi->segs_per_sec);
+ if (__is_large_section(sbi)) {
+ sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
- /*
- * zone-capacity can be less than zone-size in zoned devices,
- * resulting in less than expected usable segments in the zone,
- * calculate the end segno in the zone which can be garbage collected
- */
- if (f2fs_sb_has_blkzoned(sbi))
- end_segno -= sbi->segs_per_sec -
- f2fs_usable_segs_in_sec(sbi, segno);
+ /*
+ * zone-capacity can be less than zone-size in zoned devices,
+ * resulting in less than expected usable segments in the zone,
+ * calculate the end segno in the zone which can be garbage
+ * collected
+ */
+ if (f2fs_sb_has_blkzoned(sbi))
+ sec_end_segno -= SEGS_PER_SEC(sbi) -
+ f2fs_usable_segs_in_sec(sbi);
+
+ if (gc_type == BG_GC || one_time) {
+ unsigned int window_granularity =
+ sbi->migration_window_granularity;
+
+ if (f2fs_sb_has_blkzoned(sbi) &&
+ !has_enough_free_blocks(sbi,
+ sbi->gc_thread->boost_zoned_gc_percent))
+ window_granularity *=
+ sbi->gc_thread->boost_gc_multiple;
+
+ end_segno = start_segno + window_granularity;
+ }
+
+ if (end_segno > sec_end_segno)
+ end_segno = sec_end_segno;
+ }
sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
+ segno = rounddown(segno, SUMS_PER_BLOCK);
+ sum_blk_cnt = DIV_ROUND_UP(end_segno - segno, SUMS_PER_BLOCK);
/* readahead multi ssa blocks those have contiguous address */
if (__is_large_section(sbi))
f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
- end_segno - segno, META_SSA, true);
+ sum_blk_cnt, META_SSA, true);
/* reference all summary page */
while (segno < end_segno) {
- sum_page = f2fs_get_sum_page(sbi, segno++);
- if (IS_ERR(sum_page)) {
- int err = PTR_ERR(sum_page);
+ struct folio *sum_folio = f2fs_get_sum_folio(sbi, segno);
- end_segno = segno - 1;
- for (segno = start_segno; segno < end_segno; segno++) {
- sum_page = find_get_page(META_MAPPING(sbi),
+ segno += SUMS_PER_BLOCK;
+ if (IS_ERR(sum_folio)) {
+ int err = PTR_ERR(sum_folio);
+
+ end_segno = segno - SUMS_PER_BLOCK;
+ segno = rounddown(start_segno, SUMS_PER_BLOCK);
+ while (segno < end_segno) {
+ sum_folio = filemap_get_folio(META_MAPPING(sbi),
GET_SUM_BLOCK(sbi, segno));
- f2fs_put_page(sum_page, 0);
- f2fs_put_page(sum_page, 0);
+ folio_put_refs(sum_folio, 2);
+ segno += SUMS_PER_BLOCK;
}
return err;
}
- unlock_page(sum_page);
+ folio_unlock(sum_folio);
}
blk_start_plug(&plug);
- for (segno = start_segno; segno < end_segno; segno++) {
+ segno = start_segno;
+ while (segno < end_segno) {
+ unsigned int cur_segno;
/* find segment summary of victim */
- sum_page = find_get_page(META_MAPPING(sbi),
+ struct folio *sum_folio = filemap_get_folio(META_MAPPING(sbi),
GET_SUM_BLOCK(sbi, segno));
- f2fs_put_page(sum_page, 0);
-
- if (get_valid_blocks(sbi, segno, false) == 0)
- goto freed;
- if (gc_type == BG_GC && __is_large_section(sbi) &&
- migrated >= sbi->migration_granularity)
- goto skip;
- if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
- goto skip;
-
- sum = page_address(sum_page);
- if (type != GET_SUM_TYPE((&sum->footer))) {
- f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
- segno, type, GET_SUM_TYPE((&sum->footer)));
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_stop_checkpoint(sbi, false);
- goto skip;
+ unsigned int block_end_segno = rounddown(segno, SUMS_PER_BLOCK)
+ + SUMS_PER_BLOCK;
+
+ if (block_end_segno > end_segno)
+ block_end_segno = end_segno;
+
+ if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno))) {
+ f2fs_err(sbi, "%s: segment %u is used by log",
+ __func__, segno);
+ f2fs_bug_on(sbi, 1);
+ goto next_block;
}
- /*
- * this is to avoid deadlock:
- * - lock_page(sum_page) - f2fs_replace_block
- * - check_valid_map() - down_write(sentry_lock)
- * - down_read(sentry_lock) - change_curseg()
- * - lock_page(sum_page)
- */
- if (type == SUM_TYPE_NODE)
- submitted += gc_node_segment(sbi, sum->entries, segno,
- gc_type);
- else
- submitted += gc_data_segment(sbi, sum->entries, gc_list,
- segno, gc_type,
- force_migrate);
+ if (!folio_test_uptodate(sum_folio) ||
+ unlikely(f2fs_cp_error(sbi)))
+ goto next_block;
- stat_inc_seg_count(sbi, type, gc_type);
- sbi->gc_reclaimed_segs[sbi->gc_mode]++;
- migrated++;
+ for (cur_segno = segno; cur_segno < block_end_segno;
+ cur_segno++) {
+ struct f2fs_summary_block *sum;
-freed:
- if (gc_type == FG_GC &&
- get_valid_blocks(sbi, segno, false) == 0)
- seg_freed++;
+ if (get_valid_blocks(sbi, cur_segno, false) == 0)
+ goto freed;
+ if (gc_type == BG_GC && __is_large_section(sbi) &&
+ migrated >= sbi->migration_granularity)
+ continue;
- if (__is_large_section(sbi) && segno + 1 < end_segno)
- sbi->next_victim_seg[gc_type] = segno + 1;
-skip:
- f2fs_put_page(sum_page, 0);
+ sum = SUM_BLK_PAGE_ADDR(sum_folio, cur_segno);
+ if (type != GET_SUM_TYPE((&sum->footer))) {
+ f2fs_err(sbi, "Inconsistent segment (%u) type "
+ "[%d, %d] in SSA and SIT",
+ cur_segno, type,
+ GET_SUM_TYPE((&sum->footer)));
+ f2fs_stop_checkpoint(sbi, false,
+ STOP_CP_REASON_CORRUPTED_SUMMARY);
+ continue;
+ }
+
+ /*
+ * this is to avoid deadlock:
+ * - lock_page(sum_page) - f2fs_replace_block
+ * - check_valid_map() - down_write(sentry_lock)
+ * - down_read(sentry_lock) - change_curseg()
+ * - lock_page(sum_page)
+ */
+ if (type == SUM_TYPE_NODE)
+ submitted += gc_node_segment(sbi, sum->entries,
+ cur_segno, gc_type);
+ else
+ submitted += gc_data_segment(sbi, sum->entries,
+ gc_list, cur_segno,
+ gc_type, force_migrate);
+
+ stat_inc_gc_seg_count(sbi, data_type, gc_type);
+ sbi->gc_reclaimed_segs[sbi->gc_mode]++;
+ migrated++;
+
+freed:
+ if (gc_type == FG_GC &&
+ get_valid_blocks(sbi, cur_segno, false) == 0)
+ seg_freed++;
+
+ if (__is_large_section(sbi))
+ sbi->next_victim_seg[gc_type] =
+ (cur_segno + 1 < sec_end_segno) ?
+ cur_segno + 1 : NULL_SEGNO;
+ }
+next_block:
+ folio_put_refs(sum_folio, 2);
+ segno = block_end_segno;
}
if (submitted)
- f2fs_submit_merged_write(sbi,
- (type == SUM_TYPE_NODE) ? NODE : DATA);
+ f2fs_submit_merged_write(sbi, data_type);
blk_finish_plug(&plug);
- stat_inc_call_count(sbi->stat_info);
+ if (migrated)
+ stat_inc_gc_sec_count(sbi, data_type, gc_type);
return seg_freed;
}
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
- bool background, bool force, unsigned int segno)
+int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
{
- int gc_type = sync ? FG_GC : BG_GC;
- int sec_freed = 0, seg_freed = 0, total_freed = 0;
+ int gc_type = gc_control->init_gc_type;
+ unsigned int segno = gc_control->victim_segno;
+ int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
int ret = 0;
struct cp_control cpc;
- unsigned int init_segno = segno;
struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist),
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
- unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
- unsigned long long first_skipped;
unsigned int skipped_round = 0, round = 0;
+ unsigned int upper_secs;
- trace_f2fs_gc_begin(sbi->sb, sync, background,
+ trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
+ gc_control->nr_free_secs,
get_pages(sbi, F2FS_DIRTY_NODES),
get_pages(sbi, F2FS_DIRTY_DENTS),
get_pages(sbi, F2FS_DIRTY_IMETA),
@@ -1705,9 +1914,8 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
prefree_segments(sbi));
cpc.reason = __get_cp_reason(sbi);
- sbi->skipped_gc_rwsem = 0;
- first_skipped = last_skipped;
gc_more:
+ sbi->skipped_gc_rwsem = 0;
if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
ret = -EINVAL;
goto stop;
@@ -1717,73 +1925,108 @@ gc_more:
goto stop;
}
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
+ /* Let's run FG_GC, if we don't have enough space. */
+ if (has_not_enough_free_secs(sbi, 0, 0)) {
+ gc_type = FG_GC;
+ gc_control->one_time = false;
+
/*
* For example, if there are many prefree_segments below given
* threshold, we can make them free by checkpoint. Then, we
* secure free segments which doesn't need fggc any more.
*/
- if (prefree_segments(sbi) &&
- !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
+ if (prefree_segments(sbi)) {
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
ret = f2fs_write_checkpoint(sbi, &cpc);
if (ret)
goto stop;
+ /* Reset due to checkpoint */
+ sec_freed = 0;
}
- if (has_not_enough_free_secs(sbi, 0, 0))
- gc_type = FG_GC;
}
/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
- if (gc_type == BG_GC && !background) {
+ if (gc_type == BG_GC && gc_control->no_bg_gc) {
ret = -EINVAL;
goto stop;
}
- ret = __get_victim(sbi, &segno, gc_type);
- if (ret)
+retry:
+ ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time);
+ if (ret) {
+ /* allow to search victim from sections has pinned data */
+ if (ret == -ENODATA && gc_type == FG_GC &&
+ f2fs_pinned_section_exists(DIRTY_I(sbi))) {
+ f2fs_unpin_all_sections(sbi, false);
+ goto retry;
+ }
+ goto stop;
+ }
+
+ seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
+ gc_control->should_migrate_blocks,
+ gc_control->one_time);
+ if (seg_freed < 0)
goto stop;
- seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
- if (gc_type == FG_GC &&
- seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
- sec_freed++;
total_freed += seg_freed;
- if (gc_type == FG_GC) {
- if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
- sbi->skipped_gc_rwsem)
- skipped_round++;
- last_skipped = sbi->skipped_atomic_files[FG_GC];
- round++;
+ if (seg_freed == f2fs_usable_segs_in_sec(sbi)) {
+ sec_freed++;
+ total_sec_freed++;
}
- if (gc_type == FG_GC)
- sbi->cur_victim_sec = NULL_SEGNO;
-
- if (sync)
+ if (gc_control->one_time)
goto stop;
- if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
- if (skipped_round <= MAX_SKIP_GC_COUNT ||
- skipped_round * 2 < round) {
- segno = NULL_SEGNO;
- goto gc_more;
- }
+ if (gc_type == FG_GC) {
+ sbi->cur_victim_sec = NULL_SEGNO;
- if (first_skipped < last_skipped &&
- (last_skipped - first_skipped) >
- sbi->skipped_gc_rwsem) {
- f2fs_drop_inmem_pages_all(sbi, true);
- segno = NULL_SEGNO;
- goto gc_more;
+ if (has_enough_free_secs(sbi, sec_freed, 0)) {
+ if (!gc_control->no_bg_gc &&
+ total_sec_freed < gc_control->nr_free_secs)
+ goto go_gc_more;
+ goto stop;
}
- if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
+ if (sbi->skipped_gc_rwsem)
+ skipped_round++;
+ round++;
+ if (skipped_round > MAX_SKIP_GC_COUNT &&
+ skipped_round * 2 >= round) {
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
ret = f2fs_write_checkpoint(sbi, &cpc);
+ goto stop;
+ }
+ } else if (has_enough_free_secs(sbi, 0, 0)) {
+ goto stop;
}
+
+ __get_secs_required(sbi, NULL, &upper_secs, NULL);
+
+ /*
+ * Write checkpoint to reclaim prefree segments.
+ * We need more three extra sections for writer's data/node/dentry.
+ */
+ if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
+ prefree_segments(sbi)) {
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ ret = f2fs_write_checkpoint(sbi, &cpc);
+ if (ret)
+ goto stop;
+ /* Reset due to checkpoint */
+ sec_freed = 0;
+ }
+go_gc_more:
+ segno = NULL_SEGNO;
+ goto gc_more;
+
stop:
SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
- SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
+ SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
+
+ if (gc_type == FG_GC)
+ f2fs_unpin_all_sections(sbi, true);
- trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
+ trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
get_pages(sbi, F2FS_DIRTY_NODES),
get_pages(sbi, F2FS_DIRTY_DENTS),
get_pages(sbi, F2FS_DIRTY_IMETA),
@@ -1792,12 +2035,12 @@ stop:
reserved_segments(sbi),
prefree_segments(sbi));
- up_write(&sbi->gc_lock);
+ f2fs_up_write(&sbi->gc_lock);
put_gc_inode(&gc_list);
- if (sync && !ret)
- ret = sec_freed ? 0 : -EAGAIN;
+ if (gc_control->err_gc_skipped && !ret)
+ ret = total_sec_freed ? 0 : -EAGAIN;
return ret;
}
@@ -1805,9 +2048,7 @@ int __init f2fs_create_garbage_collection_cache(void)
{
victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
sizeof(struct victim_entry));
- if (!victim_entry_slab)
- return -ENOMEM;
- return 0;
+ return victim_entry_slab ? 0 : -ENOMEM;
}
void f2fs_destroy_garbage_collection_cache(void)
@@ -1835,8 +2076,6 @@ static void init_atgc_management(struct f2fs_sb_info *sbi)
void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
{
- DIRTY_I(sbi)->v_ops = &default_v_ops;
-
sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
/* give warm/cold data area from slower device */
@@ -1847,10 +2086,52 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
init_atgc_management(sbi);
}
+int f2fs_gc_range(struct f2fs_sb_info *sbi,
+ unsigned int start_seg, unsigned int end_seg,
+ bool dry_run, unsigned int dry_run_sections)
+{
+ unsigned int segno;
+ unsigned int gc_secs = dry_run_sections;
+
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
+ for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
+ struct gc_inode_list gc_list = {
+ .ilist = LIST_HEAD_INIT(gc_list.ilist),
+ .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
+ };
+
+ /*
+ * avoid migrating empty section, as it can be allocated by
+ * log in parallel.
+ */
+ if (!get_valid_blocks(sbi, segno, true))
+ continue;
+
+ if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno)))
+ continue;
+
+ do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
+ put_gc_inode(&gc_list);
+
+ if (!dry_run && get_valid_blocks(sbi, segno, true))
+ return -EAGAIN;
+ if (dry_run && dry_run_sections &&
+ !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
+ break;
+
+ if (fatal_signal_pending(current))
+ return -ERESTARTSYS;
+ }
+
+ return 0;
+}
+
static int free_segment_range(struct f2fs_sb_info *sbi,
- unsigned int secs, bool gc_only)
+ unsigned int secs, bool dry_run)
{
- unsigned int segno, next_inuse, start, end;
+ unsigned int next_inuse, start, end;
struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
int gc_mode, gc_type;
int err = 0;
@@ -1858,7 +2139,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
/* Force block allocation for GC */
MAIN_SECS(sbi) -= secs;
- start = MAIN_SECS(sbi) * sbi->segs_per_sec;
+ start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
end = MAIN_SEGS(sbi) - 1;
mutex_lock(&DIRTY_I(sbi)->seglist_lock);
@@ -1872,31 +2153,18 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
/* Move out cursegs from the target range */
- for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
- f2fs_allocate_segment_for_resize(sbi, type, start, end);
-
- /* do GC to move out valid blocks in the range */
- for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
- struct gc_inode_list gc_list = {
- .ilist = LIST_HEAD_INIT(gc_list.ilist),
- .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
- };
-
- do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
- put_gc_inode(&gc_list);
-
- if (!gc_only && get_valid_blocks(sbi, segno, true)) {
- err = -EAGAIN;
- goto out;
- }
- if (fatal_signal_pending(current)) {
- err = -ERESTARTSYS;
+ for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
+ err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
+ if (err)
goto out;
- }
}
- if (gc_only)
+
+ /* do GC to move out valid blocks in the range */
+ err = f2fs_gc_range(sbi, start, end, dry_run, 0);
+ if (err || dry_run)
goto out;
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
if (err)
goto out;
@@ -1919,9 +2187,9 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
int segment_count;
int segment_count_main;
long long block_count;
- int segs = secs * sbi->segs_per_sec;
+ int segs = secs * SEGS_PER_SEC(sbi);
- down_write(&sbi->sb_lock);
+ f2fs_down_write(&sbi->sb_lock);
section_count = le32_to_cpu(raw_sb->section_count);
segment_count = le32_to_cpu(raw_sb->segment_count);
@@ -1932,7 +2200,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
raw_sb->segment_count = cpu_to_le32(segment_count + segs);
raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
raw_sb->block_count = cpu_to_le64(block_count +
- (long long)segs * sbi->blocks_per_seg);
+ (long long)SEGS_TO_BLKS(sbi, segs));
if (f2fs_is_multi_device(sbi)) {
int last_dev = sbi->s_ndevs - 1;
int dev_segs =
@@ -1942,19 +2210,21 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
cpu_to_le32(dev_segs + segs);
}
- up_write(&sbi->sb_lock);
+ f2fs_up_write(&sbi->sb_lock);
}
static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
{
- int segs = secs * sbi->segs_per_sec;
- long long blks = (long long)segs * sbi->blocks_per_seg;
+ int segs = secs * SEGS_PER_SEC(sbi);
+ long long blks = SEGS_TO_BLKS(sbi, segs);
long long user_block_count =
le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
MAIN_SECS(sbi) += secs;
+ if (sbi->allocate_section_hint > MAIN_SECS(sbi))
+ sbi->allocate_section_hint = MAIN_SECS(sbi);
FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
@@ -1962,19 +2232,23 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
if (f2fs_is_multi_device(sbi)) {
int last_dev = sbi->s_ndevs - 1;
+ sbi->allocate_section_hint = FDEV(0).total_segments /
+ SEGS_PER_SEC(sbi);
+
FDEV(last_dev).total_segments =
(int)FDEV(last_dev).total_segments + segs;
FDEV(last_dev).end_blk =
(long long)FDEV(last_dev).end_blk + blks;
#ifdef CONFIG_BLK_DEV_ZONED
- FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
- (int)(blks >> sbi->log_blocks_per_blkz);
+ FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
+ div_u64(blks, sbi->blocks_per_blkz);
#endif
}
}
-int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
+int f2fs_resize_fs(struct file *filp, __u64 block_count)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
__u64 old_block_count, shrunk_blocks;
struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
unsigned int secs;
@@ -1989,7 +2263,7 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
int last_dev = sbi->s_ndevs - 1;
__u64 last_segs = FDEV(last_dev).total_segments;
- if (block_count + last_segs * sbi->blocks_per_seg <=
+ if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
old_block_count)
return -EINVAL;
}
@@ -2012,12 +2286,18 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
return -EINVAL;
}
+ err = mnt_want_write_file(filp);
+ if (err)
+ return err;
+
shrunk_blocks = old_block_count - block_count;
secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
/* stop other GC */
- if (!down_write_trylock(&sbi->gc_lock))
- return -EAGAIN;
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
+ err = -EAGAIN;
+ goto out_drop_write;
+ }
/* stop CP to protect MAIN_SEC in free_segment_range */
f2fs_lock_op(sbi);
@@ -2036,15 +2316,25 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
out_unlock:
f2fs_unlock_op(sbi);
- up_write(&sbi->gc_lock);
+ f2fs_up_write(&sbi->gc_lock);
+out_drop_write:
+ mnt_drop_write_file(filp);
if (err)
return err;
- set_sbi_flag(sbi, SBI_IS_RESIZEFS);
+ err = freeze_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
+ if (err)
+ return err;
+
+ if (f2fs_readonly(sbi->sb)) {
+ err = thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
+ if (err)
+ return err;
+ return -EROFS;
+ }
- freeze_super(sbi->sb);
- down_write(&sbi->gc_lock);
- down_write(&sbi->cp_global_sem);
+ f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write(&sbi->cp_global_sem);
spin_lock(&sbi->stat_lock);
if (shrunk_blocks + valid_user_blocks(sbi) +
@@ -2057,6 +2347,7 @@ out_unlock:
if (err)
goto out_err;
+ set_sbi_flag(sbi, SBI_IS_RESIZEFS);
err = free_segment_range(sbi, secs, false);
if (err)
goto recover_out;
@@ -2073,6 +2364,7 @@ out_unlock:
clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
set_sbi_flag(sbi, SBI_IS_DIRTY);
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
if (err) {
update_fs_metadata(sbi, secs);
@@ -2080,6 +2372,7 @@ out_unlock:
f2fs_commit_super(sbi, false);
}
recover_out:
+ clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
if (err) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
@@ -2089,9 +2382,8 @@ recover_out:
spin_unlock(&sbi->stat_lock);
}
out_err:
- up_write(&sbi->cp_global_sem);
- up_write(&sbi->gc_lock);
- thaw_super(sbi->sb);
- clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
+ f2fs_up_write(&sbi->cp_global_sem);
+ f2fs_up_write(&sbi->gc_lock);
+ thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
return err;
}
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index 3fe145e8e594..6c4d4567571e 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -15,21 +15,36 @@
#define DEF_GC_THREAD_MAX_SLEEP_TIME 60000
#define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
+/* GC sleep parameters for zoned deivces */
+#define DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED 10
+#define DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED 20
+#define DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED 60000
+
/* choose candidates from sections which has age of more than 7 days */
#define DEF_GC_THREAD_AGE_THRESHOLD (60 * 60 * 24 * 7)
#define DEF_GC_THREAD_CANDIDATE_RATIO 20 /* select 20% oldest sections as candidates */
#define DEF_GC_THREAD_MAX_CANDIDATE_COUNT 10 /* select at most 10 sections as candidates */
#define DEF_GC_THREAD_AGE_WEIGHT 60 /* age weight */
+#define DEF_GC_THREAD_VALID_THRESH_RATIO 80 /* do not GC over 80% valid block ratio for one time GC */
#define DEFAULT_ACCURACY_CLASS 10000 /* accuracy class */
#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
+#define LIMIT_NO_ZONED_GC 60 /* percentage over total user space of no gc for zoned devices */
+#define LIMIT_BOOST_ZONED_GC 25 /* percentage over total user space of boosted gc for zoned devices */
+#define DEF_MIGRATION_WINDOW_GRANULARITY_ZONED 3
+#define BOOST_GC_MULTIPLE 5
+#define ZONED_PIN_SEC_REQUIRED_COUNT 1
+
#define DEF_GC_FAILED_PINNED_FILES 2048
+#define MAX_GC_FAILED_PINNED_FILES USHRT_MAX
/* Search max. number of dirty segments to select a victim segment */
#define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
+#define NR_GC_CHECKPOINT_SECS (3) /* data/node/dentry sections */
+
struct f2fs_gc_kthread {
struct task_struct *f2fs_gc_task;
wait_queue_head_t gc_wait_queue_head;
@@ -41,13 +56,20 @@ struct f2fs_gc_kthread {
unsigned int no_gc_sleep_time;
/* for changing gc mode */
- unsigned int gc_wake;
+ bool gc_wake;
/* for GC_MERGE mount option */
wait_queue_head_t fggc_wq; /*
* caller of f2fs_balance_fs()
* will wait on this wait queue.
*/
+
+ /* for gc control for zoned devices */
+ unsigned int no_zoned_gc_percent;
+ unsigned int boost_zoned_gc_percent;
+ unsigned int valid_thresh_ratio;
+ unsigned int boost_gc_multiple;
+ unsigned int boost_gc_greedy;
};
struct gc_inode_list {
@@ -55,20 +77,10 @@ struct gc_inode_list {
struct radix_tree_root iroot;
};
-struct victim_info {
- unsigned long long mtime; /* mtime of section */
- unsigned int segno; /* section No. */
-};
-
struct victim_entry {
struct rb_node rb_node; /* rb node located in rb-tree */
- union {
- struct {
- unsigned long long mtime; /* mtime of section */
- unsigned int segno; /* segment No. */
- };
- struct victim_info vi; /* victim info */
- };
+ unsigned long long mtime; /* mtime of section */
+ unsigned int segno; /* segment No. */
struct list_head list;
};
@@ -104,7 +116,7 @@ static inline block_t free_segs_blk_count(struct f2fs_sb_info *sbi)
if (f2fs_sb_has_blkzoned(sbi))
return free_segs_blk_count_zoned(sbi);
- return free_segments(sbi) << sbi->log_blocks_per_seg;
+ return SEGS_TO_BLKS(sbi, free_segments(sbi));
}
static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
@@ -112,7 +124,7 @@ static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
block_t free_blks, ovp_blks;
free_blks = free_segs_blk_count(sbi);
- ovp_blks = overprovision_segments(sbi) << sbi->log_blocks_per_seg;
+ ovp_blks = SEGS_TO_BLKS(sbi, overprovision_segments(sbi));
if (free_blks < ovp_blks)
return 0;
@@ -120,15 +132,13 @@ static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
return free_blks - ovp_blks;
}
-static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
+static inline block_t limit_invalid_user_blocks(block_t user_block_count)
{
- return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
+ return (long)(user_block_count * LIMIT_INVALID_BLOCK) / 100;
}
-static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
+static inline block_t limit_free_user_blocks(block_t reclaimable_user_blocks)
{
- block_t reclaimable_user_blocks = sbi->user_block_count -
- written_block_count(sbi);
return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
}
@@ -161,17 +171,32 @@ static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
*wait -= min_time;
}
+static inline bool has_enough_free_blocks(struct f2fs_sb_info *sbi,
+ unsigned int limit_perc)
+{
+ return free_sections(sbi) > ((sbi->total_sections * limit_perc) / 100);
+}
+
static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
{
- block_t invalid_user_blocks = sbi->user_block_count -
- written_block_count(sbi);
+ block_t user_block_count = sbi->user_block_count;
+ block_t invalid_user_blocks = user_block_count -
+ written_block_count(sbi);
/*
* Background GC is triggered with the following conditions.
* 1. There are a number of invalid blocks.
* 2. There is not enough free space.
*/
- if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
- free_user_blocks(sbi) < limit_free_user_blocks(sbi))
- return true;
- return false;
+ return (invalid_user_blocks >
+ limit_invalid_user_blocks(user_block_count) &&
+ free_user_blocks(sbi) <
+ limit_free_user_blocks(invalid_user_blocks));
+}
+
+static inline bool need_to_boost_gc(struct f2fs_sb_info *sbi)
+{
+ if (f2fs_sb_has_blkzoned(sbi))
+ return !has_enough_free_blocks(sbi,
+ sbi->gc_thread->boost_zoned_gc_percent);
+ return has_enough_invalid_blocks(sbi);
}
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index e3beac546c63..049ce50cec9b 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -91,7 +91,7 @@ static u32 TEA_hash_name(const u8 *p, size_t len)
/*
* Compute @fname->hash. For all directories, @fname->disk_name must be set.
* For casefolded directories, @fname->usr_fname must be set, and also
- * @fname->cf_name if the filename is valid Unicode.
+ * @fname->cf_name if the filename is valid Unicode and is not "." or "..".
*/
void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname)
{
@@ -105,15 +105,16 @@ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname)
return;
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (IS_CASEFOLDED(dir)) {
/*
* If the casefolded name is provided, hash it instead of the
* on-disk name. If the casefolded name is *not* provided, that
- * should only be because the name wasn't valid Unicode, so fall
- * back to treating the name as an opaque byte sequence. Note
- * that to handle encrypted directories, the fallback must use
- * usr_fname (plaintext) rather than disk_name (ciphertext).
+ * should only be because the name wasn't valid Unicode or was
+ * "." or "..", so fall back to treating the name as an opaque
+ * byte sequence. Note that to handle encrypted directories,
+ * the fallback must use usr_fname (plaintext) rather than
+ * disk_name (ciphertext).
*/
WARN_ON_ONCE(!fname->usr_fname->name);
if (fname->cf_name.name) {
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index ea08f0dfa1bd..e5c6a08b7e4f 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -14,21 +14,58 @@
#include "node.h"
#include <trace/events/f2fs.h>
-bool f2fs_may_inline_data(struct inode *inode)
+static bool support_inline_data(struct inode *inode)
{
- if (f2fs_is_atomic_file(inode))
+ if (f2fs_used_in_atomic_write(inode))
return false;
-
if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
return false;
-
if (i_size_read(inode) > MAX_INLINE_DATA(inode))
return false;
+ return true;
+}
- if (f2fs_post_read_required(inode))
+bool f2fs_may_inline_data(struct inode *inode)
+{
+ if (!support_inline_data(inode))
return false;
- return true;
+ return !f2fs_post_read_required(inode);
+}
+
+static bool inode_has_blocks(struct inode *inode, struct folio *ifolio)
+{
+ struct f2fs_inode *ri = F2FS_INODE(ifolio);
+ int i;
+
+ if (F2FS_HAS_BLOCKS(inode))
+ return true;
+
+ for (i = 0; i < DEF_NIDS_PER_INODE; i++) {
+ if (ri->i_nid[i])
+ return true;
+ }
+ return false;
+}
+
+bool f2fs_sanity_check_inline_data(struct inode *inode, struct folio *ifolio)
+{
+ if (!f2fs_has_inline_data(inode))
+ return false;
+
+ if (inode_has_blocks(inode, ifolio))
+ return false;
+
+ if (!support_inline_data(inode))
+ return true;
+
+ /*
+ * used by sanity_check_inode(), when disk layout fields has not
+ * been synchronized to inmem fields.
+ */
+ return (S_ISREG(inode->i_mode) &&
+ (file_is_encrypt(inode) || file_is_verity(inode) ||
+ (F2FS_I(inode)->i_flags & F2FS_COMPR_FL)));
}
bool f2fs_may_inline_dentry(struct inode *inode)
@@ -42,74 +79,70 @@ bool f2fs_may_inline_dentry(struct inode *inode)
return true;
}
-void f2fs_do_read_inline_data(struct page *page, struct page *ipage)
+void f2fs_do_read_inline_data(struct folio *folio, struct folio *ifolio)
{
- struct inode *inode = page->mapping->host;
- void *src_addr, *dst_addr;
+ struct inode *inode = folio->mapping->host;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
return;
- f2fs_bug_on(F2FS_P_SB(page), page->index);
+ f2fs_bug_on(F2FS_I_SB(inode), folio->index);
- zero_user_segment(page, MAX_INLINE_DATA(inode), PAGE_SIZE);
+ folio_zero_segment(folio, MAX_INLINE_DATA(inode), folio_size(folio));
/* Copy the whole inline data block */
- src_addr = inline_data_addr(inode, ipage);
- dst_addr = kmap_atomic(page);
- memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
- flush_dcache_page(page);
- kunmap_atomic(dst_addr);
- if (!PageUptodate(page))
- SetPageUptodate(page);
+ memcpy_to_folio(folio, 0, inline_data_addr(inode, ifolio),
+ MAX_INLINE_DATA(inode));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
}
-void f2fs_truncate_inline_inode(struct inode *inode,
- struct page *ipage, u64 from)
+void f2fs_truncate_inline_inode(struct inode *inode, struct folio *ifolio,
+ u64 from)
{
void *addr;
if (from >= MAX_INLINE_DATA(inode))
return;
- addr = inline_data_addr(inode, ipage);
+ addr = inline_data_addr(inode, ifolio);
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
- set_page_dirty(ipage);
+ folio_mark_dirty(ifolio);
if (from == 0)
clear_inode_flag(inode, FI_DATA_EXIST);
}
-int f2fs_read_inline_data(struct inode *inode, struct page *page)
+int f2fs_read_inline_data(struct inode *inode, struct folio *folio)
{
- struct page *ipage;
+ struct folio *ifolio;
- ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
- if (IS_ERR(ipage)) {
- unlock_page(page);
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ifolio)) {
+ folio_unlock(folio);
+ return PTR_ERR(ifolio);
}
if (!f2fs_has_inline_data(inode)) {
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
return -EAGAIN;
}
- if (page->index)
- zero_user_segment(page, 0, PAGE_SIZE);
+ if (folio->index)
+ folio_zero_segment(folio, 0, folio_size(folio));
else
- f2fs_do_read_inline_data(page, ipage);
+ f2fs_do_read_inline_data(folio, ifolio);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- f2fs_put_page(ipage, 1);
- unlock_page(page);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ f2fs_folio_put(ifolio, true);
+ folio_unlock(folio);
return 0;
}
-int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
+int f2fs_convert_inline_folio(struct dnode_of_data *dn, struct folio *folio)
{
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(dn->inode),
@@ -117,7 +150,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_PRIO,
- .page = page,
+ .folio = folio,
.encrypted_page = NULL,
.io_type = FS_DATA_IO,
};
@@ -131,7 +164,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
if (err)
return err;
- err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
+ err = f2fs_get_node_info(fio.sbi, dn->nid, &ni, false);
if (err) {
f2fs_truncate_data_blocks_range(dn, 1);
f2fs_put_dnode(dn);
@@ -145,24 +178,24 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
f2fs_warn(fio.sbi, "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
__func__, dn->inode->i_ino, dn->data_blkaddr);
+ f2fs_handle_error(fio.sbi, ERROR_INVALID_BLKADDR);
return -EFSCORRUPTED;
}
- f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
+ f2fs_bug_on(F2FS_F_SB(folio), folio_test_writeback(folio));
- f2fs_do_read_inline_data(page, dn->inode_page);
- set_page_dirty(page);
+ f2fs_do_read_inline_data(folio, dn->inode_folio);
+ folio_mark_dirty(folio);
/* clear dirty state */
- dirty = clear_page_dirty_for_io(page);
+ dirty = folio_clear_dirty_for_io(folio);
/* write data page to try to make data consistent */
- set_page_writeback(page);
- ClearPageError(page);
+ folio_start_writeback(folio);
fio.old_blkaddr = dn->data_blkaddr;
set_inode_flag(dn->inode, FI_HOT_DATA);
f2fs_outplace_write_data(dn, &fio);
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
if (dirty) {
inode_dec_dirty_pages(dn->inode);
f2fs_remove_dirty_inode(dn->inode);
@@ -172,8 +205,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
set_inode_flag(dn->inode, FI_APPEND_WRITE);
/* clear inline data and flag after data writeback */
- f2fs_truncate_inline_inode(dn->inode, dn->inode_page, 0);
- clear_page_private_inline(dn->inode_page);
+ f2fs_truncate_inline_inode(dn->inode, dn->inode_folio, 0);
+ folio_clear_f2fs_inline(dn->inode_folio);
clear_out:
stat_dec_inline_inode(dn->inode);
clear_inode_flag(dn->inode, FI_INLINE_DATA);
@@ -185,39 +218,41 @@ int f2fs_convert_inline_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
- struct page *ipage, *page;
+ struct folio *ifolio, *folio;
int err = 0;
- if (!f2fs_has_inline_data(inode) ||
- f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
+ if (f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+ if (!f2fs_has_inline_data(inode))
return 0;
err = f2fs_dquot_initialize(inode);
if (err)
return err;
- page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
- if (!page)
- return -ENOMEM;
+ folio = f2fs_grab_cache_folio(inode->i_mapping, 0, false);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
f2fs_lock_op(sbi);
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio)) {
+ err = PTR_ERR(ifolio);
goto out;
}
- set_new_dnode(&dn, inode, ipage, ipage, 0);
+ set_new_dnode(&dn, inode, ifolio, ifolio, 0);
if (f2fs_has_inline_data(inode))
- err = f2fs_convert_inline_page(&dn, page);
+ err = f2fs_convert_inline_folio(&dn, folio);
f2fs_put_dnode(&dn);
out:
f2fs_unlock_op(sbi);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
if (!err)
f2fs_balance_fs(sbi, dn.node_changed);
@@ -225,47 +260,42 @@ out:
return err;
}
-int f2fs_write_inline_data(struct inode *inode, struct page *page)
+int f2fs_write_inline_data(struct inode *inode, struct folio *folio)
{
- void *src_addr, *dst_addr;
- struct dnode_of_data dn;
- int err;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct folio *ifolio;
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
- if (err)
- return err;
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
if (!f2fs_has_inline_data(inode)) {
- f2fs_put_dnode(&dn);
+ f2fs_folio_put(ifolio, true);
return -EAGAIN;
}
- f2fs_bug_on(F2FS_I_SB(inode), page->index);
+ f2fs_bug_on(F2FS_I_SB(inode), folio->index);
- f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
- src_addr = kmap_atomic(page);
- dst_addr = inline_data_addr(inode, dn.inode_page);
- memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
- kunmap_atomic(src_addr);
- set_page_dirty(dn.inode_page);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
+ memcpy_from_folio(inline_data_addr(inode, ifolio),
+ folio, 0, MAX_INLINE_DATA(inode));
+ folio_mark_dirty(ifolio);
- f2fs_clear_page_cache_dirty_tag(page);
+ f2fs_clear_page_cache_dirty_tag(folio);
set_inode_flag(inode, FI_APPEND_WRITE);
set_inode_flag(inode, FI_DATA_EXIST);
- clear_page_private_inline(dn.inode_page);
- f2fs_put_dnode(&dn);
+ folio_clear_f2fs_inline(ifolio);
+ f2fs_folio_put(ifolio, true);
return 0;
}
-int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
+int f2fs_recover_inline_data(struct inode *inode, struct folio *nfolio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode *ri = NULL;
void *src_addr, *dst_addr;
- struct page *ipage;
/*
* The inline_data recovery policy is as follows.
@@ -275,38 +305,39 @@ int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
* x o -> remove data blocks, and then recover inline_data
* x x -> recover data blocks
*/
- if (IS_INODE(npage))
- ri = F2FS_INODE(npage);
+ if (IS_INODE(nfolio))
+ ri = F2FS_INODE(nfolio);
if (f2fs_has_inline_data(inode) &&
ri && (ri->i_inline & F2FS_INLINE_DATA)) {
+ struct folio *ifolio;
process_inline:
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
- src_addr = inline_data_addr(inode, npage);
- dst_addr = inline_data_addr(inode, ipage);
+ src_addr = inline_data_addr(inode, nfolio);
+ dst_addr = inline_data_addr(inode, ifolio);
memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
set_inode_flag(inode, FI_INLINE_DATA);
set_inode_flag(inode, FI_DATA_EXIST);
- set_page_dirty(ipage);
- f2fs_put_page(ipage, 1);
+ folio_mark_dirty(ifolio);
+ f2fs_folio_put(ifolio, true);
return 1;
}
if (f2fs_has_inline_data(inode)) {
- ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
- f2fs_truncate_inline_inode(inode, ipage, 0);
+ struct folio *ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
+ f2fs_truncate_inline_inode(inode, ifolio, 0);
stat_dec_inline_inode(inode);
clear_inode_flag(inode, FI_INLINE_DATA);
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
int ret;
@@ -321,49 +352,50 @@ process_inline:
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
const struct f2fs_filename *fname,
- struct page **res_page)
+ struct folio **res_folio,
+ bool use_hash)
{
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
struct f2fs_dir_entry *de;
struct f2fs_dentry_ptr d;
- struct page *ipage;
+ struct folio *ifolio;
void *inline_dentry;
- ipage = f2fs_get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage)) {
- *res_page = ipage;
+ ifolio = f2fs_get_inode_folio(sbi, dir->i_ino);
+ if (IS_ERR(ifolio)) {
+ *res_folio = ifolio;
return NULL;
}
- inline_dentry = inline_data_addr(dir, ipage);
+ inline_dentry = inline_data_addr(dir, ifolio);
make_dentry_ptr_inline(dir, &d, inline_dentry);
- de = f2fs_find_target_dentry(&d, fname, NULL);
- unlock_page(ipage);
+ de = f2fs_find_target_dentry(&d, fname, NULL, use_hash);
+ folio_unlock(ifolio);
if (IS_ERR(de)) {
- *res_page = ERR_CAST(de);
+ *res_folio = ERR_CAST(de);
de = NULL;
}
if (de)
- *res_page = ipage;
+ *res_folio = ifolio;
else
- f2fs_put_page(ipage, 0);
+ f2fs_folio_put(ifolio, false);
return de;
}
int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
- struct page *ipage)
+ struct folio *ifolio)
{
struct f2fs_dentry_ptr d;
void *inline_dentry;
- inline_dentry = inline_data_addr(inode, ipage);
+ inline_dentry = inline_data_addr(inode, ifolio);
make_dentry_ptr_inline(inode, &d, inline_dentry);
f2fs_do_make_empty_dir(inode, parent, &d);
- set_page_dirty(ipage);
+ folio_mark_dirty(ifolio);
/* update i_size to MAX_INLINE_DATA */
if (i_size_read(inode) < MAX_INLINE_DATA(inode))
@@ -375,60 +407,60 @@ int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
* NOTE: ipage is grabbed by caller, but if any error occurs, we should
* release ipage in this function.
*/
-static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
+static int f2fs_move_inline_dirents(struct inode *dir, struct folio *ifolio,
void *inline_dentry)
{
- struct page *page;
+ struct folio *folio;
struct dnode_of_data dn;
struct f2fs_dentry_block *dentry_blk;
struct f2fs_dentry_ptr src, dst;
int err;
- page = f2fs_grab_cache_page(dir->i_mapping, 0, true);
- if (!page) {
- f2fs_put_page(ipage, 1);
- return -ENOMEM;
+ folio = f2fs_grab_cache_folio(dir->i_mapping, 0, true);
+ if (IS_ERR(folio)) {
+ f2fs_folio_put(ifolio, true);
+ return PTR_ERR(folio);
}
- set_new_dnode(&dn, dir, ipage, NULL, 0);
+ set_new_dnode(&dn, dir, ifolio, NULL, 0);
err = f2fs_reserve_block(&dn, 0);
if (err)
goto out;
if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
f2fs_put_dnode(&dn);
- set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
- f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
+ set_sbi_flag(F2FS_F_SB(folio), SBI_NEED_FSCK);
+ f2fs_warn(F2FS_F_SB(folio), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
__func__, dir->i_ino, dn.data_blkaddr);
+ f2fs_handle_error(F2FS_F_SB(folio), ERROR_INVALID_BLKADDR);
err = -EFSCORRUPTED;
goto out;
}
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ f2fs_folio_wait_writeback(folio, DATA, true, true);
- dentry_blk = page_address(page);
+ dentry_blk = folio_address(folio);
+
+ /*
+ * Start by zeroing the full block, to ensure that all unused space is
+ * zeroed and no uninitialized memory is leaked to disk.
+ */
+ memset(dentry_blk, 0, F2FS_BLKSIZE);
make_dentry_ptr_inline(dir, &src, inline_dentry);
make_dentry_ptr_block(dir, &dst, dentry_blk);
/* copy data from inline dentry block to new dentry block */
memcpy(dst.bitmap, src.bitmap, src.nr_bitmap);
- memset(dst.bitmap + src.nr_bitmap, 0, dst.nr_bitmap - src.nr_bitmap);
- /*
- * we do not need to zero out remainder part of dentry and filename
- * field, since we have used bitmap for marking the usage status of
- * them, besides, we can also ignore copying/zeroing reserved space
- * of dentry block, because them haven't been used so far.
- */
memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max);
memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- set_page_dirty(page);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ folio_mark_dirty(folio);
/* clear inline dir and flag after data writeback */
- f2fs_truncate_inline_inode(dir, ipage, 0);
+ f2fs_truncate_inline_inode(dir, ifolio, 0);
stat_dec_inline_dir(dir);
clear_inode_flag(dir, FI_INLINE_DENTRY);
@@ -445,7 +477,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
if (i_size_read(dir) < PAGE_SIZE)
f2fs_i_size_write(dir, PAGE_SIZE);
out:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return err;
}
@@ -485,7 +517,7 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
fname.hash = de->hash_code;
ino = le32_to_cpu(de->ino);
- fake_mode = f2fs_get_de_type(de) << S_SHIFT;
+ fake_mode = fs_ftype_to_dtype(de->file_type) << S_DT_SHIFT;
err = f2fs_add_regular_entry(dir, &fname, NULL, ino, fake_mode);
if (err)
@@ -501,7 +533,7 @@ punch_dentry_pages:
return err;
}
-static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
+static int f2fs_move_rehashed_dirents(struct inode *dir, struct folio *ifolio,
void *inline_dentry)
{
void *backup_dentry;
@@ -510,20 +542,20 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
MAX_INLINE_DATA(dir), GFP_F2FS_ZERO);
if (!backup_dentry) {
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
return -ENOMEM;
}
memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA(dir));
- f2fs_truncate_inline_inode(dir, ipage, 0);
+ f2fs_truncate_inline_inode(dir, ifolio, 0);
- unlock_page(ipage);
+ folio_unlock(ifolio);
err = f2fs_add_inline_entries(dir, backup_dentry);
if (err)
goto recover;
- lock_page(ipage);
+ folio_lock(ifolio);
stat_dec_inline_dir(dir);
clear_inode_flag(dir, FI_INLINE_DENTRY);
@@ -539,31 +571,31 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
kfree(backup_dentry);
return 0;
recover:
- lock_page(ipage);
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ folio_lock(ifolio);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
f2fs_i_depth_write(dir, 0);
f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
- set_page_dirty(ipage);
- f2fs_put_page(ipage, 1);
+ folio_mark_dirty(ifolio);
+ f2fs_folio_put(ifolio, true);
kfree(backup_dentry);
return err;
}
-static int do_convert_inline_dir(struct inode *dir, struct page *ipage,
+static int do_convert_inline_dir(struct inode *dir, struct folio *ifolio,
void *inline_dentry)
{
if (!F2FS_I(dir)->i_dir_level)
- return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
+ return f2fs_move_inline_dirents(dir, ifolio, inline_dentry);
else
- return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
+ return f2fs_move_rehashed_dirents(dir, ifolio, inline_dentry);
}
int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct page *ipage;
+ struct folio *ifolio;
struct f2fs_filename fname;
void *inline_dentry = NULL;
int err = 0;
@@ -577,22 +609,22 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
if (err)
goto out;
- ipage = f2fs_get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, dir->i_ino);
+ if (IS_ERR(ifolio)) {
+ err = PTR_ERR(ifolio);
goto out_fname;
}
- if (f2fs_has_enough_room(dir, ipage, &fname)) {
- f2fs_put_page(ipage, 1);
+ if (f2fs_has_enough_room(dir, ifolio, &fname)) {
+ f2fs_folio_put(ifolio, true);
goto out_fname;
}
- inline_dentry = inline_data_addr(dir, ipage);
+ inline_dentry = inline_data_addr(dir, ifolio);
- err = do_convert_inline_dir(dir, ipage, inline_dentry);
+ err = do_convert_inline_dir(dir, ifolio, inline_dentry);
if (!err)
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
out_fname:
f2fs_free_filename(&fname);
out:
@@ -604,24 +636,24 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct page *ipage;
+ struct folio *ifolio;
unsigned int bit_pos;
void *inline_dentry = NULL;
struct f2fs_dentry_ptr d;
int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
- struct page *page = NULL;
+ struct folio *folio = NULL;
int err = 0;
- ipage = f2fs_get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(sbi, dir->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
- inline_dentry = inline_data_addr(dir, ipage);
+ inline_dentry = inline_data_addr(dir, ifolio);
make_dentry_ptr_inline(dir, &d, inline_dentry);
bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
if (bit_pos >= d.max) {
- err = do_convert_inline_dir(dir, ipage, inline_dentry);
+ err = do_convert_inline_dir(dir, ifolio, inline_dentry);
if (err)
return err;
err = -EAGAIN;
@@ -629,20 +661,21 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
}
if (inode) {
- down_write(&F2FS_I(inode)->i_sem);
- page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ f2fs_down_write_nested(&F2FS_I(inode)->i_sem,
+ SINGLE_DEPTH_NESTING);
+ folio = f2fs_init_inode_metadata(inode, dir, fname, ifolio);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto fail;
}
}
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
bit_pos);
- set_page_dirty(ipage);
+ folio_mark_dirty(ifolio);
/* we don't need to mark_inode_dirty now */
if (inode) {
@@ -650,22 +683,22 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
/* synchronize inode page's data from inode cache */
if (is_inode_flag_set(inode, FI_NEW_INODE))
- f2fs_update_inode(inode, page);
+ f2fs_update_inode(inode, folio);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
f2fs_update_parent_metadata(dir, inode, 0);
fail:
if (inode)
- up_write(&F2FS_I(inode)->i_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
out:
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
return err;
}
-void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
- struct inode *dir, struct inode *inode)
+void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
+ struct folio *folio, struct inode *dir, struct inode *inode)
{
struct f2fs_dentry_ptr d;
void *inline_dentry;
@@ -673,20 +706,20 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
unsigned int bit_pos;
int i;
- lock_page(page);
- f2fs_wait_on_page_writeback(page, NODE, true, true);
+ folio_lock(folio);
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
- inline_dentry = inline_data_addr(dir, page);
+ inline_dentry = inline_data_addr(dir, folio);
make_dentry_ptr_inline(dir, &d, inline_dentry);
bit_pos = dentry - d.dentry;
for (i = 0; i < slots; i++)
__clear_bit_le(bit_pos + i, d.bitmap);
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
@@ -696,21 +729,21 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
bool f2fs_empty_inline_dir(struct inode *dir)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct page *ipage;
+ struct folio *ifolio;
unsigned int bit_pos = 2;
void *inline_dentry;
struct f2fs_dentry_ptr d;
- ipage = f2fs_get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage))
+ ifolio = f2fs_get_inode_folio(sbi, dir->i_ino);
+ if (IS_ERR(ifolio))
return false;
- inline_dentry = inline_data_addr(dir, ipage);
+ inline_dentry = inline_data_addr(dir, ifolio);
make_dentry_ptr_inline(dir, &d, inline_dentry);
bit_pos = find_next_bit_le(d.bitmap, d.max, bit_pos);
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
if (bit_pos < d.max)
return false;
@@ -722,7 +755,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
struct fscrypt_str *fstr)
{
struct inode *inode = file_inode(file);
- struct page *ipage = NULL;
+ struct folio *ifolio = NULL;
struct f2fs_dentry_ptr d;
void *inline_dentry = NULL;
int err;
@@ -732,17 +765,17 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
if (ctx->pos == d.max)
return 0;
- ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
/*
* f2fs_readdir was protected by inode.i_rwsem, it is safe to access
* ipage without page's lock held.
*/
- unlock_page(ipage);
+ folio_unlock(ifolio);
- inline_dentry = inline_data_addr(inode, ipage);
+ inline_dentry = inline_data_addr(inode, ifolio);
make_dentry_ptr_inline(inode, &d, inline_dentry);
@@ -750,7 +783,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
if (!err)
ctx->pos = d.max;
- f2fs_put_page(ipage, 0);
+ f2fs_folio_put(ifolio, false);
return err < 0 ? err : 0;
}
@@ -761,12 +794,12 @@ int f2fs_inline_data_fiemap(struct inode *inode,
__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
FIEMAP_EXTENT_LAST;
struct node_info ni;
- struct page *ipage;
+ struct folio *ifolio;
int err = 0;
- ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
if ((S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!f2fs_has_inline_data(inode)) {
@@ -786,16 +819,16 @@ int f2fs_inline_data_fiemap(struct inode *inode,
ilen = start + len;
ilen -= start;
- err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+ err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni, false);
if (err)
goto out;
byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
- byteaddr += (char *)inline_data_addr(inode, ipage) -
- (char *)F2FS_INODE(ipage);
+ byteaddr += (char *)inline_data_addr(inode, ifolio) -
+ (char *)F2FS_INODE(ifolio);
err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
trace_f2fs_fiemap(inode, start, byteaddr, ilen, flags, err);
out:
- f2fs_put_page(ipage, 1);
+ f2fs_folio_put(ifolio, true);
return err;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 0f8b2df3e1e0..38b8994bc1b2 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -7,9 +7,10 @@
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
-#include <linux/buffer_head.h>
-#include <linux/backing-dev.h>
#include <linux/writeback.h>
+#include <linux/sched/mm.h>
+#include <linux/lz4.h>
+#include <linux/zstd.h>
#include "f2fs.h"
#include "node.h"
@@ -27,9 +28,17 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
if (is_inode_flag_set(inode, FI_NEW_INODE))
return;
+ if (f2fs_readonly(F2FS_I_SB(inode)->sb))
+ return;
+
if (f2fs_inode_dirtied(inode, sync))
return;
+ /* only atomic file w/ FI_ATOMIC_COMMITTED can be set vfs dirty */
+ if (f2fs_is_atomic_file(inode) &&
+ !is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
+ return;
+
mark_inode_dirty_sync(inode);
}
@@ -59,78 +68,63 @@ void f2fs_set_inode_flags(struct inode *inode)
S_ENCRYPTED|S_VERITY|S_CASEFOLD);
}
-static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+static void __get_inode_rdev(struct inode *inode, struct folio *node_folio)
{
- int extra_size = get_extra_isize(inode);
+ __le32 *addr = get_dnode_addr(inode, node_folio);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
- if (ri->i_addr[extra_size])
- inode->i_rdev = old_decode_dev(
- le32_to_cpu(ri->i_addr[extra_size]));
+ if (addr[0])
+ inode->i_rdev = old_decode_dev(le32_to_cpu(addr[0]));
else
- inode->i_rdev = new_decode_dev(
- le32_to_cpu(ri->i_addr[extra_size + 1]));
+ inode->i_rdev = new_decode_dev(le32_to_cpu(addr[1]));
}
}
-static int __written_first_block(struct f2fs_sb_info *sbi,
- struct f2fs_inode *ri)
-{
- block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
-
- if (!__is_valid_data_blkaddr(addr))
- return 1;
- if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
- return -EFSCORRUPTED;
- return 0;
-}
-
-static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+static void __set_inode_rdev(struct inode *inode, struct folio *node_folio)
{
- int extra_size = get_extra_isize(inode);
+ __le32 *addr = get_dnode_addr(inode, node_folio);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (old_valid_dev(inode->i_rdev)) {
- ri->i_addr[extra_size] =
- cpu_to_le32(old_encode_dev(inode->i_rdev));
- ri->i_addr[extra_size + 1] = 0;
+ addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
+ addr[1] = 0;
} else {
- ri->i_addr[extra_size] = 0;
- ri->i_addr[extra_size + 1] =
- cpu_to_le32(new_encode_dev(inode->i_rdev));
- ri->i_addr[extra_size + 2] = 0;
+ addr[0] = 0;
+ addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
+ addr[2] = 0;
}
}
}
-static void __recover_inline_status(struct inode *inode, struct page *ipage)
+static void __recover_inline_status(struct inode *inode, struct folio *ifolio)
{
- void *inline_data = inline_data_addr(inode, ipage);
+ void *inline_data = inline_data_addr(inode, ifolio);
__le32 *start = inline_data;
__le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
while (start < end) {
if (*start++) {
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
set_inode_flag(inode, FI_DATA_EXIST);
- set_raw_inline(inode, F2FS_INODE(ipage));
- set_page_dirty(ipage);
+ set_raw_inline(inode, F2FS_INODE(ifolio));
+ folio_mark_dirty(ifolio);
return;
}
}
return;
}
-static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
+static
+bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct folio *folio)
{
- struct f2fs_inode *ri = &F2FS_NODE(page)->i;
+ struct f2fs_inode *ri = &F2FS_NODE(folio)->i;
if (!f2fs_sb_has_inode_chksum(sbi))
return false;
- if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
+ if (!IS_INODE(folio) || !(ri->i_inline & F2FS_EXTRA_ATTR))
return false;
if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
@@ -140,9 +134,9 @@ static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page
return true;
}
-static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
+static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct folio *folio)
{
- struct f2fs_node *node = F2FS_NODE(page);
+ struct f2fs_node *node = F2FS_NODE(folio);
struct f2fs_inode *ri = &node->i;
__le32 ino = node->footer.ino;
__le32 gen = ri->i_generation;
@@ -151,19 +145,18 @@ static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
unsigned int cs_size = sizeof(dummy_cs);
- chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
- sizeof(ino));
- chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
+ chksum = f2fs_chksum(sbi->s_chksum_seed, (__u8 *)&ino, sizeof(ino));
+ chksum_seed = f2fs_chksum(chksum, (__u8 *)&gen, sizeof(gen));
- chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
- chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
+ chksum = f2fs_chksum(chksum_seed, (__u8 *)ri, offset);
+ chksum = f2fs_chksum(chksum, (__u8 *)&dummy_cs, cs_size);
offset += cs_size;
- chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
- F2FS_BLKSIZE - offset);
+ chksum = f2fs_chksum(chksum, (__u8 *)ri + offset,
+ F2FS_BLKSIZE - offset);
return chksum;
}
-bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
+bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct folio *folio)
{
struct f2fs_inode *ri;
__u32 provided, calculated;
@@ -172,161 +165,235 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
return true;
#ifdef CONFIG_F2FS_CHECK_FS
- if (!f2fs_enable_inode_chksum(sbi, page))
+ if (!f2fs_enable_inode_chksum(sbi, folio))
#else
- if (!f2fs_enable_inode_chksum(sbi, page) ||
- PageDirty(page) || PageWriteback(page))
+ if (!f2fs_enable_inode_chksum(sbi, folio) ||
+ folio_test_dirty(folio) ||
+ folio_test_writeback(folio))
#endif
return true;
- ri = &F2FS_NODE(page)->i;
+ ri = &F2FS_NODE(folio)->i;
provided = le32_to_cpu(ri->i_inode_checksum);
- calculated = f2fs_inode_chksum(sbi, page);
+ calculated = f2fs_inode_chksum(sbi, folio);
if (provided != calculated)
f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
- page->index, ino_of_node(page), provided, calculated);
+ folio->index, ino_of_node(folio),
+ provided, calculated);
return provided == calculated;
}
-void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
+void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct folio *folio)
{
- struct f2fs_inode *ri = &F2FS_NODE(page)->i;
+ struct f2fs_inode *ri = &F2FS_NODE(folio)->i;
- if (!f2fs_enable_inode_chksum(sbi, page))
+ if (!f2fs_enable_inode_chksum(sbi, folio))
return;
- ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
+ ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, folio));
+}
+
+static bool sanity_check_compress_inode(struct inode *inode,
+ struct f2fs_inode *ri)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned char clevel;
+
+ if (ri->i_compress_algorithm >= COMPRESS_MAX) {
+ f2fs_warn(sbi,
+ "%s: inode (ino=%lx) has unsupported compress algorithm: %u, run fsck to fix",
+ __func__, inode->i_ino, ri->i_compress_algorithm);
+ return false;
+ }
+ if (le64_to_cpu(ri->i_compr_blocks) >
+ SECTOR_TO_BLOCK(inode->i_blocks)) {
+ f2fs_warn(sbi,
+ "%s: inode (ino=%lx) has inconsistent i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
+ __func__, inode->i_ino, le64_to_cpu(ri->i_compr_blocks),
+ SECTOR_TO_BLOCK(inode->i_blocks));
+ return false;
+ }
+ if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
+ ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
+ f2fs_warn(sbi,
+ "%s: inode (ino=%lx) has unsupported log cluster size: %u, run fsck to fix",
+ __func__, inode->i_ino, ri->i_log_cluster_size);
+ return false;
+ }
+
+ clevel = le16_to_cpu(ri->i_compress_flag) >>
+ COMPRESS_LEVEL_OFFSET;
+ switch (ri->i_compress_algorithm) {
+ case COMPRESS_LZO:
+#ifdef CONFIG_F2FS_FS_LZO
+ if (clevel)
+ goto err_level;
+#endif
+ break;
+ case COMPRESS_LZORLE:
+#ifdef CONFIG_F2FS_FS_LZORLE
+ if (clevel)
+ goto err_level;
+#endif
+ break;
+ case COMPRESS_LZ4:
+#ifdef CONFIG_F2FS_FS_LZ4
+#ifdef CONFIG_F2FS_FS_LZ4HC
+ if (clevel &&
+ (clevel < LZ4HC_MIN_CLEVEL || clevel > LZ4HC_MAX_CLEVEL))
+ goto err_level;
+#else
+ if (clevel)
+ goto err_level;
+#endif
+#endif
+ break;
+ case COMPRESS_ZSTD:
+#ifdef CONFIG_F2FS_FS_ZSTD
+ if (clevel < zstd_min_clevel() || clevel > zstd_max_clevel())
+ goto err_level;
+#endif
+ break;
+ default:
+ goto err_level;
+ }
+
+ return true;
+err_level:
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported compress level: %u, run fsck to fix",
+ __func__, inode->i_ino, clevel);
+ return false;
}
-static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+static bool sanity_check_inode(struct inode *inode, struct folio *node_folio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
- struct f2fs_inode *ri = F2FS_INODE(node_page);
+ struct f2fs_inode *ri = F2FS_INODE(node_folio);
unsigned long long iblocks;
- iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
+ iblocks = le64_to_cpu(F2FS_INODE(node_folio)->i_blocks);
if (!iblocks) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
__func__, inode->i_ino, iblocks);
return false;
}
- if (ino_of_node(node_page) != nid_of_node(node_page)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
+ if (ino_of_node(node_folio) != nid_of_node(node_folio)) {
f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
__func__, inode->i_ino,
- ino_of_node(node_page), nid_of_node(node_page));
+ ino_of_node(node_folio), nid_of_node(node_folio));
return false;
}
- if (f2fs_sb_has_flexible_inline_xattr(sbi)
- && !f2fs_has_extra_attr(inode)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
- __func__, inode->i_ino);
+ if (ino_of_node(node_folio) == fi->i_xattr_nid) {
+ f2fs_warn(sbi, "%s: corrupted inode i_ino=%lx, xnid=%x, run fsck to fix.",
+ __func__, inode->i_ino, fi->i_xattr_nid);
return false;
}
- if (f2fs_has_extra_attr(inode) &&
- !f2fs_sb_has_extra_attr(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
+ if (S_ISDIR(inode->i_mode) && unlikely(inode->i_nlink == 1)) {
+ f2fs_warn(sbi, "%s: directory inode (ino=%lx) has a single i_nlink",
__func__, inode->i_ino);
return false;
}
- if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
+ if (f2fs_has_extra_attr(inode)) {
+ if (!f2fs_sb_has_extra_attr(sbi)) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
+ __func__, inode->i_ino);
+ return false;
+ }
+ if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
+ fi->i_extra_isize < F2FS_MIN_EXTRA_ATTR_SIZE ||
fi->i_extra_isize % sizeof(__le32)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
- __func__, inode->i_ino, fi->i_extra_isize,
- F2FS_TOTAL_EXTRA_ATTR_SIZE);
- return false;
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
+ __func__, inode->i_ino, fi->i_extra_isize,
+ F2FS_TOTAL_EXTRA_ATTR_SIZE);
+ return false;
+ }
+ if (f2fs_sb_has_compression(sbi) &&
+ fi->i_flags & F2FS_COMPR_FL &&
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
+ i_compress_flag)) {
+ if (!sanity_check_compress_inode(inode, ri))
+ return false;
+ }
}
- if (f2fs_has_extra_attr(inode) &&
- f2fs_sb_has_flexible_inline_xattr(sbi) &&
+ if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
f2fs_has_inline_xattr(inode) &&
- (!fi->i_inline_xattr_size ||
+ (fi->i_inline_xattr_size < MIN_INLINE_XATTR_SIZE ||
fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, min: %zu, max: %lu",
__func__, inode->i_ino, fi->i_inline_xattr_size,
- MAX_INLINE_XATTR_SIZE);
+ MIN_INLINE_XATTR_SIZE, MAX_INLINE_XATTR_SIZE);
return false;
}
- if (F2FS_I(inode)->extent_tree) {
- struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
-
- if (ei->len &&
- (!f2fs_is_valid_blkaddr(sbi, ei->blk,
- DATA_GENERIC_ENHANCE) ||
- !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
- DATA_GENERIC_ENHANCE))) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
- __func__, inode->i_ino,
- ei->blk, ei->fofs, ei->len);
+ if (!f2fs_sb_has_extra_attr(sbi)) {
+ if (f2fs_sb_has_project_quota(sbi)) {
+ f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
+ __func__, inode->i_ino, F2FS_FEATURE_PRJQUOTA);
+ return false;
+ }
+ if (f2fs_sb_has_inode_chksum(sbi)) {
+ f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
+ __func__, inode->i_ino, F2FS_FEATURE_INODE_CHKSUM);
+ return false;
+ }
+ if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
+ f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
+ __func__, inode->i_ino, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR);
+ return false;
+ }
+ if (f2fs_sb_has_inode_crtime(sbi)) {
+ f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
+ __func__, inode->i_ino, F2FS_FEATURE_INODE_CRTIME);
+ return false;
+ }
+ if (f2fs_sb_has_compression(sbi)) {
+ f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
+ __func__, inode->i_ino, F2FS_FEATURE_COMPRESSION);
return false;
}
}
- if (f2fs_has_inline_data(inode) &&
- (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
+ if (f2fs_sanity_check_inline_data(inode, node_folio)) {
f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
__func__, inode->i_ino, inode->i_mode);
return false;
}
if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
__func__, inode->i_ino, inode->i_mode);
return false;
}
if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
__func__, inode->i_ino);
return false;
}
- if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
- fi->i_flags & F2FS_COMPR_FL &&
- F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
- i_log_cluster_size)) {
- if (ri->i_compress_algorithm >= COMPRESS_MAX) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
- "compress algorithm: %u, run fsck to fix",
- __func__, inode->i_ino,
- ri->i_compress_algorithm);
- return false;
- }
- if (le64_to_cpu(ri->i_compr_blocks) >
- SECTOR_TO_BLOCK(inode->i_blocks)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
- "i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
- __func__, inode->i_ino,
- le64_to_cpu(ri->i_compr_blocks),
- SECTOR_TO_BLOCK(inode->i_blocks));
+ if (fi->i_xattr_nid && f2fs_check_nid_range(sbi, fi->i_xattr_nid)) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_xattr_nid: %u, run fsck to fix.",
+ __func__, inode->i_ino, fi->i_xattr_nid);
+ return false;
+ }
+
+ if (IS_DEVICE_ALIASING(inode)) {
+ if (!f2fs_sb_has_device_alias(sbi)) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has device alias flag, but the feature is off",
+ __func__, inode->i_ino);
return false;
}
- if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
- ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
- "log cluster size: %u, run fsck to fix",
- __func__, inode->i_ino,
- ri->i_log_cluster_size);
+ if (!f2fs_is_pinned_file(inode)) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has device alias flag, but is not pinned",
+ __func__, inode->i_ino);
return false;
}
}
@@ -334,24 +401,32 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
return true;
}
+static void init_idisk_time(struct inode *inode)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+
+ fi->i_disk_time[0] = inode_get_atime(inode);
+ fi->i_disk_time[1] = inode_get_ctime(inode);
+ fi->i_disk_time[2] = inode_get_mtime(inode);
+}
+
static int do_read_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
- struct page *node_page;
+ struct folio *node_folio;
struct f2fs_inode *ri;
projid_t i_projid;
- int err;
/* Check if ino is within scope */
if (f2fs_check_nid_range(sbi, inode->i_ino))
return -EINVAL;
- node_page = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(node_page))
- return PTR_ERR(node_page);
+ node_folio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(node_folio))
+ return PTR_ERR(node_folio);
- ri = F2FS_INODE(node_page);
+ ri = F2FS_INODE(node_folio);
inode->i_mode = le16_to_cpu(ri->i_mode);
i_uid_write(inode, le32_to_cpu(ri->i_uid));
@@ -360,18 +435,17 @@ static int do_read_inode(struct inode *inode)
inode->i_size = le64_to_cpu(ri->i_size);
inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
- inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
- inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
- inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
- inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
- inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
- inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
+ inode_set_atime(inode, le64_to_cpu(ri->i_atime),
+ le32_to_cpu(ri->i_atime_nsec));
+ inode_set_ctime(inode, le64_to_cpu(ri->i_ctime),
+ le32_to_cpu(ri->i_ctime_nsec));
+ inode_set_mtime(inode, le64_to_cpu(ri->i_mtime),
+ le32_to_cpu(ri->i_mtime_nsec));
inode->i_generation = le32_to_cpu(ri->i_generation);
if (S_ISDIR(inode->i_mode))
fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
else if (S_ISREG(inode->i_mode))
- fi->i_gc_failures[GC_FAILURE_PIN] =
- le16_to_cpu(ri->i_gc_failures);
+ fi->i_gc_failures = le16_to_cpu(ri->i_gc_failures);
fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
fi->i_flags = le32_to_cpu(ri->i_flags);
if (S_ISREG(inode->i_mode))
@@ -381,8 +455,6 @@ static int do_read_inode(struct inode *inode)
fi->i_pino = le32_to_cpu(ri->i_pino);
fi->i_dir_level = ri->i_dir_level;
- f2fs_init_extent_tree(inode, node_page);
-
get_inline_info(inode, ri);
fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
@@ -404,34 +476,26 @@ static int do_read_inode(struct inode *inode)
fi->i_inline_xattr_size = 0;
}
- if (!sanity_check_inode(inode, node_page)) {
- f2fs_put_page(node_page, 1);
+ if (!sanity_check_inode(inode, node_folio)) {
+ f2fs_folio_put(node_folio, true);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
return -EFSCORRUPTED;
}
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
- __recover_inline_status(inode, node_page);
+ __recover_inline_status(inode, node_folio);
/* try to recover cold bit for non-dir inode */
- if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
- f2fs_wait_on_page_writeback(node_page, NODE, true, true);
- set_cold_node(node_page, false);
- set_page_dirty(node_page);
+ if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_folio)) {
+ f2fs_folio_wait_writeback(node_folio, NODE, true, true);
+ set_cold_node(node_folio, false);
+ folio_mark_dirty(node_folio);
}
/* get rdev by using inline_info */
- __get_inode_rdev(inode, ri);
-
- if (S_ISREG(inode->i_mode)) {
- err = __written_first_block(sbi, ri);
- if (err < 0) {
- f2fs_put_page(node_page, 1);
- return err;
- }
- if (!err)
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
- }
+ __get_inode_rdev(inode, node_folio);
if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
fi->last_disk_size = inode->i_size;
@@ -455,22 +519,36 @@ static int do_read_inode(struct inode *inode)
if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
(fi->i_flags & F2FS_COMPR_FL)) {
if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
- i_log_cluster_size)) {
+ i_compress_flag)) {
+ unsigned short compress_flag;
+
atomic_set(&fi->i_compr_blocks,
le64_to_cpu(ri->i_compr_blocks));
fi->i_compress_algorithm = ri->i_compress_algorithm;
fi->i_log_cluster_size = ri->i_log_cluster_size;
- fi->i_compress_flag = le16_to_cpu(ri->i_compress_flag);
- fi->i_cluster_size = 1 << fi->i_log_cluster_size;
+ compress_flag = le16_to_cpu(ri->i_compress_flag);
+ fi->i_compress_level = compress_flag >>
+ COMPRESS_LEVEL_OFFSET;
+ fi->i_compress_flag = compress_flag &
+ GENMASK(COMPRESS_LEVEL_OFFSET - 1, 0);
+ fi->i_cluster_size = BIT(fi->i_log_cluster_size);
set_inode_flag(inode, FI_COMPRESSED_FILE);
}
}
- F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
- F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
- F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
- F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
- f2fs_put_page(node_page, 1);
+ init_idisk_time(inode);
+
+ if (!sanity_check_extent_cache(inode, node_folio)) {
+ f2fs_folio_put(node_folio, true);
+ f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
+ return -EFSCORRUPTED;
+ }
+
+ /* Need all the flag bits */
+ f2fs_init_read_extent_tree(inode, node_folio);
+ f2fs_init_age_extent_tree(inode);
+
+ f2fs_folio_put(node_folio, true);
stat_inc_inline_xattr(inode);
stat_inc_inline_inode(inode);
@@ -481,6 +559,12 @@ static int do_read_inode(struct inode *inode)
return 0;
}
+static bool is_meta_ino(struct f2fs_sb_info *sbi, unsigned int ino)
+{
+ return ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi) ||
+ ino == F2FS_COMPRESS_INO(sbi);
+}
+
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -491,17 +575,23 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW)) {
+ if (!(inode_state_read_once(inode) & I_NEW)) {
+ if (is_meta_ino(sbi, ino)) {
+ f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ ret = -EFSCORRUPTED;
+ trace_f2fs_iget_exit(inode, ret);
+ iput(inode);
+ f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
+ return ERR_PTR(ret);
+ }
+
trace_f2fs_iget(inode);
return inode;
}
- if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
- goto make_now;
-#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (ino == F2FS_COMPRESS_INO(sbi))
+ if (is_meta_ino(sbi, ino))
goto make_now;
-#endif
ret = do_read_inode(inode);
if (ret)
@@ -516,6 +606,11 @@ make_now:
} else if (ino == F2FS_COMPRESS_INO(sbi)) {
#ifdef CONFIG_F2FS_FS_COMPRESSION
inode->i_mapping->a_ops = &f2fs_compress_aops;
+ /*
+ * generic_error_remove_folio only truncates pages of regular
+ * inode
+ */
+ inode->i_mode |= S_IFREG;
#endif
mapping_set_gfp_mask(inode->i_mapping,
GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
@@ -544,6 +639,7 @@ make_now:
goto bad_inode;
}
f2fs_set_inode_flags(inode);
+
unlock_new_inode(inode);
trace_f2fs_iget(inode);
return inode;
@@ -562,136 +658,136 @@ retry:
inode = f2fs_iget(sb, ino);
if (IS_ERR(inode)) {
if (PTR_ERR(inode) == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto retry;
}
}
return inode;
}
-void f2fs_update_inode(struct inode *inode, struct page *node_page)
+void f2fs_update_inode(struct inode *inode, struct folio *node_folio)
{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_inode *ri;
- struct extent_tree *et = F2FS_I(inode)->extent_tree;
+ struct extent_tree *et = fi->extent_tree[EX_READ];
- f2fs_wait_on_page_writeback(node_page, NODE, true, true);
- set_page_dirty(node_page);
+ f2fs_folio_wait_writeback(node_folio, NODE, true, true);
+ folio_mark_dirty(node_folio);
f2fs_inode_synced(inode);
- ri = F2FS_INODE(node_page);
+ ri = F2FS_INODE(node_folio);
ri->i_mode = cpu_to_le16(inode->i_mode);
- ri->i_advise = F2FS_I(inode)->i_advise;
+ ri->i_advise = fi->i_advise;
ri->i_uid = cpu_to_le32(i_uid_read(inode));
ri->i_gid = cpu_to_le32(i_gid_read(inode));
ri->i_links = cpu_to_le32(inode->i_nlink);
- ri->i_size = cpu_to_le64(i_size_read(inode));
ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
+ if (!f2fs_is_atomic_file(inode) ||
+ is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
+ ri->i_size = cpu_to_le64(i_size_read(inode));
+
if (et) {
read_lock(&et->lock);
- set_raw_extent(&et->largest, &ri->i_ext);
+ set_raw_read_extent(&et->largest, &ri->i_ext);
read_unlock(&et->lock);
} else {
memset(&ri->i_ext, 0, sizeof(ri->i_ext));
}
set_raw_inline(inode, ri);
- ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
- ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
- ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
- ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
- ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
- ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ ri->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
+ ri->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ ri->i_mtime = cpu_to_le64(inode_get_mtime_sec(inode));
+ ri->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
+ ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ ri->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
if (S_ISDIR(inode->i_mode))
- ri->i_current_depth =
- cpu_to_le32(F2FS_I(inode)->i_current_depth);
+ ri->i_current_depth = cpu_to_le32(fi->i_current_depth);
else if (S_ISREG(inode->i_mode))
- ri->i_gc_failures =
- cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
- ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
- ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
- ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
+ ri->i_gc_failures = cpu_to_le16(fi->i_gc_failures);
+ ri->i_xattr_nid = cpu_to_le32(fi->i_xattr_nid);
+ ri->i_flags = cpu_to_le32(fi->i_flags);
+ ri->i_pino = cpu_to_le32(fi->i_pino);
ri->i_generation = cpu_to_le32(inode->i_generation);
- ri->i_dir_level = F2FS_I(inode)->i_dir_level;
+ ri->i_dir_level = fi->i_dir_level;
if (f2fs_has_extra_attr(inode)) {
- ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
+ ri->i_extra_isize = cpu_to_le16(fi->i_extra_isize);
if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
ri->i_inline_xattr_size =
- cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
+ cpu_to_le16(fi->i_inline_xattr_size);
if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
- F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
- i_projid)) {
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) {
projid_t i_projid;
- i_projid = from_kprojid(&init_user_ns,
- F2FS_I(inode)->i_projid);
+ i_projid = from_kprojid(&init_user_ns, fi->i_projid);
ri->i_projid = cpu_to_le32(i_projid);
}
if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
- F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
- i_crtime)) {
- ri->i_crtime =
- cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
- ri->i_crtime_nsec =
- cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
+ ri->i_crtime = cpu_to_le64(fi->i_crtime.tv_sec);
+ ri->i_crtime_nsec = cpu_to_le32(fi->i_crtime.tv_nsec);
}
if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
- F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
- i_log_cluster_size)) {
- ri->i_compr_blocks =
- cpu_to_le64(atomic_read(
- &F2FS_I(inode)->i_compr_blocks));
- ri->i_compress_algorithm =
- F2FS_I(inode)->i_compress_algorithm;
- ri->i_compress_flag =
- cpu_to_le16(F2FS_I(inode)->i_compress_flag);
- ri->i_log_cluster_size =
- F2FS_I(inode)->i_log_cluster_size;
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
+ i_compress_flag)) {
+ unsigned short compress_flag;
+
+ ri->i_compr_blocks = cpu_to_le64(
+ atomic_read(&fi->i_compr_blocks));
+ ri->i_compress_algorithm = fi->i_compress_algorithm;
+ compress_flag = fi->i_compress_flag |
+ fi->i_compress_level <<
+ COMPRESS_LEVEL_OFFSET;
+ ri->i_compress_flag = cpu_to_le16(compress_flag);
+ ri->i_log_cluster_size = fi->i_log_cluster_size;
}
}
- __set_inode_rdev(inode, ri);
+ __set_inode_rdev(inode, node_folio);
/* deleted inode */
if (inode->i_nlink == 0)
- clear_page_private_inline(node_page);
-
- F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
- F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
- F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
- F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
+ folio_clear_f2fs_inline(node_folio);
+ init_idisk_time(inode);
#ifdef CONFIG_F2FS_CHECK_FS
- f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
+ f2fs_inode_chksum_set(F2FS_I_SB(inode), node_folio);
#endif
}
void f2fs_update_inode_page(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *node_page;
+ struct folio *node_folio;
+ int count = 0;
retry:
- node_page = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(node_page)) {
- int err = PTR_ERR(node_page);
+ node_folio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(node_folio)) {
+ int err = PTR_ERR(node_folio);
+
+ /* The node block was truncated. */
+ if (err == -ENOENT)
+ return;
+
+ if (err == -EFSCORRUPTED)
+ goto stop_checkpoint;
- if (err == -ENOMEM) {
- cond_resched();
+ if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT)
goto retry;
- } else if (err != -ENOENT) {
- f2fs_stop_checkpoint(sbi, false);
- }
+stop_checkpoint:
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE);
return;
}
- f2fs_update_inode(inode, node_page);
- f2fs_put_page(node_page, 1);
+ f2fs_update_inode(inode, node_folio);
+ f2fs_folio_put(node_folio, true);
}
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -709,8 +805,17 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
!is_inode_flag_set(inode, FI_DIRTY_INODE))
return 0;
- if (!f2fs_is_checkpoint_ready(sbi))
+ /*
+ * no need to update inode page, ultimately f2fs_evict_inode() will
+ * clear dirty status of inode.
+ */
+ if (f2fs_cp_error(sbi))
+ return -EIO;
+
+ if (!f2fs_is_checkpoint_ready(sbi)) {
+ f2fs_mark_inode_dirty_sync(inode, true);
return -ENOSPC;
+ }
/*
* We need to balance fs here to prevent from producing dirty node pages
@@ -722,23 +827,44 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
return 0;
}
+void f2fs_remove_donate_inode(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (list_empty(&F2FS_I(inode)->gdonate_list))
+ return;
+
+ spin_lock(&sbi->inode_lock[DONATE_INODE]);
+ list_del_init(&F2FS_I(inode)->gdonate_list);
+ sbi->donate_files--;
+ spin_unlock(&sbi->inode_lock[DONATE_INODE]);
+}
+
/*
* Called at the last iput() if i_nlink is zero
*/
void f2fs_evict_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ nid_t xnid = fi->i_xattr_nid;
int err = 0;
+ bool freeze_protected = false;
- /* some remained atomic pages should discarded */
- if (f2fs_is_atomic_file(inode))
- f2fs_drop_inmem_pages(inode);
+ f2fs_abort_atomic_write(inode, true);
+
+ if (fi->cow_inode && f2fs_is_cow_file(fi->cow_inode)) {
+ clear_inode_flag(fi->cow_inode, FI_COW_FILE);
+ F2FS_I(fi->cow_inode)->atomic_inode = NULL;
+ iput(fi->cow_inode);
+ fi->cow_inode = NULL;
+ }
trace_f2fs_evict_inode(inode);
truncate_inode_pages_final(&inode->i_data);
- if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
+ if ((inode->i_nlink || is_bad_inode(inode)) &&
+ test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
f2fs_invalidate_compress_pages(sbi, inode->i_ino);
if (inode->i_ino == F2FS_NODE_INO(sbi) ||
@@ -748,8 +874,10 @@ void f2fs_evict_inode(struct inode *inode)
f2fs_bug_on(sbi, get_dirty_pages(inode));
f2fs_remove_dirty_inode(inode);
+ f2fs_remove_donate_inode(inode);
- f2fs_destroy_extent_tree(inode);
+ if (!IS_DEVICE_ALIASING(inode))
+ f2fs_destroy_extent_tree(inode);
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
@@ -764,24 +892,39 @@ void f2fs_evict_inode(struct inode *inode)
f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
- sb_start_intwrite(inode->i_sb);
+ if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING)) {
+ sb_start_intwrite(inode->i_sb);
+ freeze_protected = true;
+ }
set_inode_flag(inode, FI_NO_ALLOC);
i_size_write(inode, 0);
retry:
if (F2FS_HAS_BLOCKS(inode))
err = f2fs_truncate(inode);
- if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
- f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
+ if (time_to_inject(sbi, FAULT_EVICT_INODE))
err = -EIO;
- }
if (!err) {
f2fs_lock_op(sbi);
err = f2fs_remove_inode_page(inode);
f2fs_unlock_op(sbi);
- if (err == -ENOENT)
+ if (err == -ENOENT) {
err = 0;
+
+ /*
+ * in fuzzed image, another node may has the same
+ * block address as inode's, if it was truncated
+ * previously, truncation of inode node will fail.
+ */
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+ f2fs_warn(F2FS_I_SB(inode),
+ "f2fs_evict_inode: inconsistent node id, ino:%lu",
+ inode->i_ino);
+ f2fs_inode_synced(inode);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ }
+ }
}
/* give more chances, if ENOMEM case */
@@ -790,12 +933,29 @@ retry:
goto retry;
}
+ if (IS_DEVICE_ALIASING(inode))
+ f2fs_destroy_extent_tree(inode);
+
if (err) {
f2fs_update_inode_page(inode);
if (dquot_initialize_needed(inode))
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+
+ /*
+ * If both f2fs_truncate() and f2fs_update_inode_page() failed
+ * due to fuzzed corrupted inode, call f2fs_inode_synced() to
+ * avoid triggering later f2fs_bug_on().
+ */
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+ f2fs_warn(sbi,
+ "f2fs_evict_inode: inode is dirty, ino:%lu",
+ inode->i_ino);
+ f2fs_inode_synced(inode);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ }
}
- sb_end_intwrite(inode->i_sb);
+ if (freeze_protected)
+ sb_end_intwrite(inode->i_sb);
no_delete:
dquot_drop(inode);
@@ -804,13 +964,17 @@ no_delete:
stat_dec_inline_inode(inode);
stat_dec_compr_inode(inode);
stat_sub_compr_blocks(inode,
- atomic_read(&F2FS_I(inode)->i_compr_blocks));
+ atomic_read(&fi->i_compr_blocks));
if (likely(!f2fs_cp_error(sbi) &&
!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
- else
- f2fs_inode_synced(inode);
+
+ /*
+ * anyway, it needs to remove the inode from sbi->inode_list[DIRTY_META]
+ * list to avoid UAF in f2fs_sync_inode_meta() during checkpoint.
+ */
+ f2fs_inode_synced(inode);
/* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
if (inode->i_ino)
@@ -868,9 +1032,10 @@ void f2fs_handle_failed_inode(struct inode *inode)
* so we can prevent losing this orphan when encoutering checkpoint
* and following suddenly power-off.
*/
- err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
if (err) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
+ set_inode_flag(inode, FI_FREE_NID);
f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
goto out;
}
diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c
index cdcf54ae0db8..f8703038e1d8 100644
--- a/fs/f2fs/iostat.c
+++ b/fs/f2fs/iostat.c
@@ -14,99 +14,98 @@
#include "iostat.h"
#include <trace/events/f2fs.h>
-#define NUM_PREALLOC_IOSTAT_CTXS 128
static struct kmem_cache *bio_iostat_ctx_cache;
static mempool_t *bio_iostat_ctx_pool;
+static inline unsigned long long iostat_get_avg_bytes(struct f2fs_sb_info *sbi,
+ enum iostat_type type)
+{
+ return sbi->iostat_count[type] ? div64_u64(sbi->iostat_bytes[type],
+ sbi->iostat_count[type]) : 0;
+}
+
+#define IOSTAT_INFO_SHOW(name, type) \
+ seq_printf(seq, "%-23s %-16llu %-16llu %-16llu\n", \
+ name":", sbi->iostat_bytes[type], \
+ sbi->iostat_count[type], \
+ iostat_get_avg_bytes(sbi, type))
+
int __maybe_unused iostat_info_seq_show(struct seq_file *seq, void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
- time64_t now = ktime_get_real_seconds();
if (!sbi->iostat_enable)
return 0;
- seq_printf(seq, "time: %-16llu\n", now);
+ seq_printf(seq, "time: %-16llu\n", ktime_get_real_seconds());
+ seq_printf(seq, "\t\t\t%-16s %-16s %-16s\n",
+ "io_bytes", "count", "avg_bytes");
/* print app write IOs */
seq_puts(seq, "[WRITE]\n");
- seq_printf(seq, "app buffered: %-16llu\n",
- sbi->rw_iostat[APP_BUFFERED_IO]);
- seq_printf(seq, "app direct: %-16llu\n",
- sbi->rw_iostat[APP_DIRECT_IO]);
- seq_printf(seq, "app mapped: %-16llu\n",
- sbi->rw_iostat[APP_MAPPED_IO]);
+ IOSTAT_INFO_SHOW("app buffered data", APP_BUFFERED_IO);
+ IOSTAT_INFO_SHOW("app direct data", APP_DIRECT_IO);
+ IOSTAT_INFO_SHOW("app mapped data", APP_MAPPED_IO);
+ IOSTAT_INFO_SHOW("app buffered cdata", APP_BUFFERED_CDATA_IO);
+ IOSTAT_INFO_SHOW("app mapped cdata", APP_MAPPED_CDATA_IO);
/* print fs write IOs */
- seq_printf(seq, "fs data: %-16llu\n",
- sbi->rw_iostat[FS_DATA_IO]);
- seq_printf(seq, "fs node: %-16llu\n",
- sbi->rw_iostat[FS_NODE_IO]);
- seq_printf(seq, "fs meta: %-16llu\n",
- sbi->rw_iostat[FS_META_IO]);
- seq_printf(seq, "fs gc data: %-16llu\n",
- sbi->rw_iostat[FS_GC_DATA_IO]);
- seq_printf(seq, "fs gc node: %-16llu\n",
- sbi->rw_iostat[FS_GC_NODE_IO]);
- seq_printf(seq, "fs cp data: %-16llu\n",
- sbi->rw_iostat[FS_CP_DATA_IO]);
- seq_printf(seq, "fs cp node: %-16llu\n",
- sbi->rw_iostat[FS_CP_NODE_IO]);
- seq_printf(seq, "fs cp meta: %-16llu\n",
- sbi->rw_iostat[FS_CP_META_IO]);
+ IOSTAT_INFO_SHOW("fs data", FS_DATA_IO);
+ IOSTAT_INFO_SHOW("fs cdata", FS_CDATA_IO);
+ IOSTAT_INFO_SHOW("fs node", FS_NODE_IO);
+ IOSTAT_INFO_SHOW("fs meta", FS_META_IO);
+ IOSTAT_INFO_SHOW("fs gc data", FS_GC_DATA_IO);
+ IOSTAT_INFO_SHOW("fs gc node", FS_GC_NODE_IO);
+ IOSTAT_INFO_SHOW("fs cp data", FS_CP_DATA_IO);
+ IOSTAT_INFO_SHOW("fs cp node", FS_CP_NODE_IO);
+ IOSTAT_INFO_SHOW("fs cp meta", FS_CP_META_IO);
/* print app read IOs */
seq_puts(seq, "[READ]\n");
- seq_printf(seq, "app buffered: %-16llu\n",
- sbi->rw_iostat[APP_BUFFERED_READ_IO]);
- seq_printf(seq, "app direct: %-16llu\n",
- sbi->rw_iostat[APP_DIRECT_READ_IO]);
- seq_printf(seq, "app mapped: %-16llu\n",
- sbi->rw_iostat[APP_MAPPED_READ_IO]);
+ IOSTAT_INFO_SHOW("app buffered data", APP_BUFFERED_READ_IO);
+ IOSTAT_INFO_SHOW("app direct data", APP_DIRECT_READ_IO);
+ IOSTAT_INFO_SHOW("app mapped data", APP_MAPPED_READ_IO);
+ IOSTAT_INFO_SHOW("app buffered cdata", APP_BUFFERED_CDATA_READ_IO);
+ IOSTAT_INFO_SHOW("app mapped cdata", APP_MAPPED_CDATA_READ_IO);
/* print fs read IOs */
- seq_printf(seq, "fs data: %-16llu\n",
- sbi->rw_iostat[FS_DATA_READ_IO]);
- seq_printf(seq, "fs gc data: %-16llu\n",
- sbi->rw_iostat[FS_GDATA_READ_IO]);
- seq_printf(seq, "fs compr_data: %-16llu\n",
- sbi->rw_iostat[FS_CDATA_READ_IO]);
- seq_printf(seq, "fs node: %-16llu\n",
- sbi->rw_iostat[FS_NODE_READ_IO]);
- seq_printf(seq, "fs meta: %-16llu\n",
- sbi->rw_iostat[FS_META_READ_IO]);
+ IOSTAT_INFO_SHOW("fs data", FS_DATA_READ_IO);
+ IOSTAT_INFO_SHOW("fs gc data", FS_GDATA_READ_IO);
+ IOSTAT_INFO_SHOW("fs cdata", FS_CDATA_READ_IO);
+ IOSTAT_INFO_SHOW("fs node", FS_NODE_READ_IO);
+ IOSTAT_INFO_SHOW("fs meta", FS_META_READ_IO);
/* print other IOs */
seq_puts(seq, "[OTHER]\n");
- seq_printf(seq, "fs discard: %-16llu\n",
- sbi->rw_iostat[FS_DISCARD]);
+ IOSTAT_INFO_SHOW("fs discard", FS_DISCARD_IO);
+ IOSTAT_INFO_SHOW("fs flush", FS_FLUSH_IO);
+ IOSTAT_INFO_SHOW("fs zone reset", FS_ZONE_RESET_IO);
return 0;
}
static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
{
- int io, idx = 0;
- unsigned int cnt;
+ int io, idx;
struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE];
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
+ unsigned long flags;
- spin_lock_irq(&sbi->iostat_lat_lock);
+ spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
for (idx = 0; idx < MAX_IO_TYPE; idx++) {
for (io = 0; io < NR_PAGE_TYPE; io++) {
- cnt = io_lat->bio_cnt[idx][io];
iostat_lat[idx][io].peak_lat =
jiffies_to_msecs(io_lat->peak_lat[idx][io]);
- iostat_lat[idx][io].cnt = cnt;
- iostat_lat[idx][io].avg_lat = cnt ?
- jiffies_to_msecs(io_lat->sum_lat[idx][io]) / cnt : 0;
+ iostat_lat[idx][io].cnt = io_lat->bio_cnt[idx][io];
+ iostat_lat[idx][io].avg_lat = iostat_lat[idx][io].cnt ?
+ jiffies_to_msecs(io_lat->sum_lat[idx][io]) / iostat_lat[idx][io].cnt : 0;
io_lat->sum_lat[idx][io] = 0;
io_lat->peak_lat[idx][io] = 0;
io_lat->bio_cnt[idx][io] = 0;
}
}
- spin_unlock_irq(&sbi->iostat_lat_lock);
+ spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
trace_f2fs_iostat_latency(sbi, iostat_lat);
}
@@ -115,25 +114,26 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
{
unsigned long long iostat_diff[NR_IO_TYPE];
int i;
+ unsigned long flags;
if (time_is_after_jiffies(sbi->iostat_next_period))
return;
/* Need double check under the lock */
- spin_lock(&sbi->iostat_lock);
+ spin_lock_irqsave(&sbi->iostat_lock, flags);
if (time_is_after_jiffies(sbi->iostat_next_period)) {
- spin_unlock(&sbi->iostat_lock);
+ spin_unlock_irqrestore(&sbi->iostat_lock, flags);
return;
}
sbi->iostat_next_period = jiffies +
msecs_to_jiffies(sbi->iostat_period_ms);
for (i = 0; i < NR_IO_TYPE; i++) {
- iostat_diff[i] = sbi->rw_iostat[i] -
- sbi->prev_rw_iostat[i];
- sbi->prev_rw_iostat[i] = sbi->rw_iostat[i];
+ iostat_diff[i] = sbi->iostat_bytes[i] -
+ sbi->prev_iostat_bytes[i];
+ sbi->prev_iostat_bytes[i] = sbi->iostat_bytes[i];
}
- spin_unlock(&sbi->iostat_lock);
+ spin_unlock_irqrestore(&sbi->iostat_lock, flags);
trace_f2fs_iostat(sbi, iostat_diff);
@@ -145,85 +145,113 @@ void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
int i;
- spin_lock(&sbi->iostat_lock);
+ spin_lock_irq(&sbi->iostat_lock);
for (i = 0; i < NR_IO_TYPE; i++) {
- sbi->rw_iostat[i] = 0;
- sbi->prev_rw_iostat[i] = 0;
+ sbi->iostat_count[i] = 0;
+ sbi->iostat_bytes[i] = 0;
+ sbi->prev_iostat_bytes[i] = 0;
}
- spin_unlock(&sbi->iostat_lock);
+ spin_unlock_irq(&sbi->iostat_lock);
spin_lock_irq(&sbi->iostat_lat_lock);
memset(io_lat, 0, sizeof(struct iostat_lat_info));
spin_unlock_irq(&sbi->iostat_lat_lock);
}
-void f2fs_update_iostat(struct f2fs_sb_info *sbi,
+static inline void __f2fs_update_iostat(struct f2fs_sb_info *sbi,
+ enum iostat_type type, unsigned long long io_bytes)
+{
+ sbi->iostat_bytes[type] += io_bytes;
+ sbi->iostat_count[type]++;
+}
+
+void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct inode *inode,
enum iostat_type type, unsigned long long io_bytes)
{
+ unsigned long flags;
+
if (!sbi->iostat_enable)
return;
- spin_lock(&sbi->iostat_lock);
- sbi->rw_iostat[type] += io_bytes;
+ spin_lock_irqsave(&sbi->iostat_lock, flags);
+ __f2fs_update_iostat(sbi, type, io_bytes);
+
+ if (type == APP_BUFFERED_IO || type == APP_DIRECT_IO)
+ __f2fs_update_iostat(sbi, APP_WRITE_IO, io_bytes);
- if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
- sbi->rw_iostat[APP_BUFFERED_IO] =
- sbi->rw_iostat[APP_WRITE_IO] -
- sbi->rw_iostat[APP_DIRECT_IO];
+ if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO)
+ __f2fs_update_iostat(sbi, APP_READ_IO, io_bytes);
- if (type == APP_READ_IO || type == APP_DIRECT_READ_IO)
- sbi->rw_iostat[APP_BUFFERED_READ_IO] =
- sbi->rw_iostat[APP_READ_IO] -
- sbi->rw_iostat[APP_DIRECT_READ_IO];
- spin_unlock(&sbi->iostat_lock);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (inode && f2fs_compressed_file(inode)) {
+ if (type == APP_BUFFERED_IO)
+ __f2fs_update_iostat(sbi, APP_BUFFERED_CDATA_IO, io_bytes);
+
+ if (type == APP_BUFFERED_READ_IO)
+ __f2fs_update_iostat(sbi, APP_BUFFERED_CDATA_READ_IO, io_bytes);
+
+ if (type == APP_MAPPED_READ_IO)
+ __f2fs_update_iostat(sbi, APP_MAPPED_CDATA_READ_IO, io_bytes);
+
+ if (type == APP_MAPPED_IO)
+ __f2fs_update_iostat(sbi, APP_MAPPED_CDATA_IO, io_bytes);
+
+ if (type == FS_DATA_READ_IO)
+ __f2fs_update_iostat(sbi, FS_CDATA_READ_IO, io_bytes);
+
+ if (type == FS_DATA_IO)
+ __f2fs_update_iostat(sbi, FS_CDATA_IO, io_bytes);
+ }
+#endif
+
+ spin_unlock_irqrestore(&sbi->iostat_lock, flags);
f2fs_record_iostat(sbi);
}
static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
- int rw, bool is_sync)
+ enum iostat_lat_type lat_type)
{
unsigned long ts_diff;
- unsigned int iotype = iostat_ctx->type;
- unsigned long flags;
+ unsigned int page_type = iostat_ctx->type;
struct f2fs_sb_info *sbi = iostat_ctx->sbi;
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
- int idx;
+ unsigned long flags;
if (!sbi->iostat_enable)
return;
ts_diff = jiffies - iostat_ctx->submit_ts;
- if (iotype >= META_FLUSH)
- iotype = META;
-
- if (rw == 0) {
- idx = READ_IO;
- } else {
- if (is_sync)
- idx = WRITE_SYNC_IO;
- else
- idx = WRITE_ASYNC_IO;
+ if (page_type == META_FLUSH) {
+ page_type = META;
+ } else if (page_type >= NR_PAGE_TYPE) {
+ f2fs_warn(sbi, "%s: %d over NR_PAGE_TYPE", __func__, page_type);
+ return;
}
spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
- io_lat->sum_lat[idx][iotype] += ts_diff;
- io_lat->bio_cnt[idx][iotype]++;
- if (ts_diff > io_lat->peak_lat[idx][iotype])
- io_lat->peak_lat[idx][iotype] = ts_diff;
+ io_lat->sum_lat[lat_type][page_type] += ts_diff;
+ io_lat->bio_cnt[lat_type][page_type]++;
+ if (ts_diff > io_lat->peak_lat[lat_type][page_type])
+ io_lat->peak_lat[lat_type][page_type] = ts_diff;
spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
}
-void iostat_update_and_unbind_ctx(struct bio *bio, int rw)
+void iostat_update_and_unbind_ctx(struct bio *bio)
{
struct bio_iostat_ctx *iostat_ctx = bio->bi_private;
- bool is_sync = bio->bi_opf & REQ_SYNC;
+ enum iostat_lat_type lat_type;
- if (rw == 0)
- bio->bi_private = iostat_ctx->post_read_ctx;
- else
+ if (op_is_write(bio_op(bio))) {
+ lat_type = bio->bi_opf & REQ_SYNC ?
+ WRITE_SYNC_IO : WRITE_ASYNC_IO;
bio->bi_private = iostat_ctx->sbi;
- __update_iostat_latency(iostat_ctx, rw, is_sync);
+ } else {
+ lat_type = READ_IO;
+ bio->bi_private = iostat_ctx->post_read_ctx;
+ }
+
+ __update_iostat_latency(iostat_ctx, lat_type);
mempool_free(iostat_ctx, bio_iostat_ctx_pool);
}
diff --git a/fs/f2fs/iostat.h b/fs/f2fs/iostat.h
index 22a2d01f57ef..eb99d05cf272 100644
--- a/fs/f2fs/iostat.h
+++ b/fs/f2fs/iostat.h
@@ -8,20 +8,21 @@
struct bio_post_read_ctx;
+enum iostat_lat_type {
+ READ_IO = 0,
+ WRITE_SYNC_IO,
+ WRITE_ASYNC_IO,
+ MAX_IO_TYPE,
+};
+
#ifdef CONFIG_F2FS_IOSTAT
+#define NUM_PREALLOC_IOSTAT_CTXS 128
#define DEFAULT_IOSTAT_PERIOD_MS 3000
#define MIN_IOSTAT_PERIOD_MS 100
/* maximum period of iostat tracing is 1 day */
#define MAX_IOSTAT_PERIOD_MS 8640000
-enum {
- READ_IO,
- WRITE_SYNC_IO,
- WRITE_ASYNC_IO,
- MAX_IO_TYPE,
-};
-
struct iostat_lat_info {
unsigned long sum_lat[MAX_IO_TYPE][NR_PAGE_TYPE]; /* sum of io latencies */
unsigned long peak_lat[MAX_IO_TYPE][NR_PAGE_TYPE]; /* peak io latency */
@@ -31,7 +32,7 @@ struct iostat_lat_info {
extern int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
void *offset);
extern void f2fs_reset_iostat(struct f2fs_sb_info *sbi);
-extern void f2fs_update_iostat(struct f2fs_sb_info *sbi,
+extern void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct inode *inode,
enum iostat_type type, unsigned long long io_bytes);
struct bio_iostat_ctx {
@@ -57,7 +58,7 @@ static inline struct bio_post_read_ctx *get_post_read_ctx(struct bio *bio)
return iostat_ctx->post_read_ctx;
}
-extern void iostat_update_and_unbind_ctx(struct bio *bio, int rw);
+extern void iostat_update_and_unbind_ctx(struct bio *bio);
extern void iostat_alloc_and_bind_ctx(struct f2fs_sb_info *sbi,
struct bio *bio, struct bio_post_read_ctx *ctx);
extern int f2fs_init_iostat_processing(void);
@@ -65,9 +66,9 @@ extern void f2fs_destroy_iostat_processing(void);
extern int f2fs_init_iostat(struct f2fs_sb_info *sbi);
extern void f2fs_destroy_iostat(struct f2fs_sb_info *sbi);
#else
-static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
+static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct inode *inode,
enum iostat_type type, unsigned long long io_bytes) {}
-static inline void iostat_update_and_unbind_ctx(struct bio *bio, int rw) {}
+static inline void iostat_update_and_unbind_ctx(struct bio *bio) {}
static inline void iostat_alloc_and_bind_ctx(struct f2fs_sb_info *sbi,
struct bio *bio, struct bio_post_read_ctx *ctx) {}
static inline void iostat_update_submit_ctx(struct bio *bio,
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index a728a0af9ce0..043d20516a21 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -22,199 +22,52 @@
#include "acl.h"
#include <trace/events/f2fs.h>
-static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- nid_t ino;
- struct inode *inode;
- bool nid_free = false;
- bool encrypt = false;
- int xattr_size = 0;
- int err;
-
- inode = new_inode(dir->i_sb);
- if (!inode)
- return ERR_PTR(-ENOMEM);
-
- f2fs_lock_op(sbi);
- if (!f2fs_alloc_nid(sbi, &ino)) {
- f2fs_unlock_op(sbi);
- err = -ENOSPC;
- goto fail;
- }
- f2fs_unlock_op(sbi);
-
- nid_free = true;
-
- inode_init_owner(&init_user_ns, inode, dir, mode);
-
- inode->i_ino = ino;
- inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
- F2FS_I(inode)->i_crtime = inode->i_mtime;
- inode->i_generation = prandom_u32();
-
- if (S_ISDIR(inode->i_mode))
- F2FS_I(inode)->i_current_depth = 1;
-
- err = insert_inode_locked(inode);
- if (err) {
- err = -EINVAL;
- goto fail;
- }
-
- if (f2fs_sb_has_project_quota(sbi) &&
- (F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL))
- F2FS_I(inode)->i_projid = F2FS_I(dir)->i_projid;
- else
- F2FS_I(inode)->i_projid = make_kprojid(&init_user_ns,
- F2FS_DEF_PROJID);
-
- err = fscrypt_prepare_new_inode(dir, inode, &encrypt);
- if (err)
- goto fail_drop;
-
- err = f2fs_dquot_initialize(inode);
- if (err)
- goto fail_drop;
-
- set_inode_flag(inode, FI_NEW_INODE);
-
- if (encrypt)
- f2fs_set_encrypted_inode(inode);
-
- if (f2fs_sb_has_extra_attr(sbi)) {
- set_inode_flag(inode, FI_EXTRA_ATTR);
- F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE;
- }
-
- if (test_opt(sbi, INLINE_XATTR))
- set_inode_flag(inode, FI_INLINE_XATTR);
-
- if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
- set_inode_flag(inode, FI_INLINE_DATA);
- if (f2fs_may_inline_dentry(inode))
- set_inode_flag(inode, FI_INLINE_DENTRY);
-
- if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
- f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode));
- if (f2fs_has_inline_xattr(inode))
- xattr_size = F2FS_OPTION(sbi).inline_xattr_size;
- /* Otherwise, will be 0 */
- } else if (f2fs_has_inline_xattr(inode) ||
- f2fs_has_inline_dentry(inode)) {
- xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
- }
- F2FS_I(inode)->i_inline_xattr_size = xattr_size;
-
- f2fs_init_extent_tree(inode, NULL);
-
- stat_inc_inline_xattr(inode);
- stat_inc_inline_inode(inode);
- stat_inc_inline_dir(inode);
-
- F2FS_I(inode)->i_flags =
- f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED);
-
- if (S_ISDIR(inode->i_mode))
- F2FS_I(inode)->i_flags |= F2FS_INDEX_FL;
-
- if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL)
- set_inode_flag(inode, FI_PROJ_INHERIT);
-
- if (f2fs_sb_has_compression(sbi)) {
- /* Inherit the compression flag in directory */
- if ((F2FS_I(dir)->i_flags & F2FS_COMPR_FL) &&
- f2fs_may_compress(inode))
- set_compress_context(inode);
- }
-
- f2fs_set_inode_flags(inode);
-
- trace_f2fs_new_inode(inode, 0);
- return inode;
-
-fail:
- trace_f2fs_new_inode(inode, err);
- make_bad_inode(inode);
- if (nid_free)
- set_inode_flag(inode, FI_FREE_NID);
- iput(inode);
- return ERR_PTR(err);
-fail_drop:
- trace_f2fs_new_inode(inode, err);
- dquot_drop(inode);
- inode->i_flags |= S_NOQUOTA;
- if (nid_free)
- set_inode_flag(inode, FI_FREE_NID);
- clear_nlink(inode);
- unlock_new_inode(inode);
- iput(inode);
- return ERR_PTR(err);
-}
-
-static inline int is_extension_exist(const unsigned char *s, const char *sub,
- bool tmp_ext)
+static inline bool is_extension_exist(const unsigned char *s, const char *sub,
+ bool tmp_ext, bool tmp_dot)
{
size_t slen = strlen(s);
size_t sublen = strlen(sub);
int i;
if (sublen == 1 && *sub == '*')
- return 1;
+ return true;
/*
* filename format of multimedia file should be defined as:
* "filename + '.' + extension + (optional: '.' + temp extension)".
*/
if (slen < sublen + 2)
- return 0;
+ return false;
if (!tmp_ext) {
/* file has no temp extension */
if (s[slen - sublen - 1] != '.')
- return 0;
+ return false;
return !strncasecmp(s + slen - sublen, sub, sublen);
}
for (i = 1; i < slen - sublen; i++) {
if (s[i] != '.')
continue;
- if (!strncasecmp(s + i + 1, sub, sublen))
- return 1;
+ if (!strncasecmp(s + i + 1, sub, sublen)) {
+ if (!tmp_dot)
+ return true;
+ if (i == slen - sublen - 1 || s[i + 1 + sublen] == '.')
+ return true;
+ }
}
- return 0;
+ return false;
}
-/*
- * Set file's temperature for hot/cold data separation
- */
-static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *inode,
- const unsigned char *name)
+static inline bool is_temperature_extension(const unsigned char *s, const char *sub)
{
- __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
- int i, cold_count, hot_count;
-
- down_read(&sbi->sb_lock);
-
- cold_count = le32_to_cpu(sbi->raw_super->extension_count);
- hot_count = sbi->raw_super->hot_ext_count;
-
- for (i = 0; i < cold_count + hot_count; i++) {
- if (is_extension_exist(name, extlist[i], true))
- break;
- }
-
- up_read(&sbi->sb_lock);
-
- if (i == cold_count + hot_count)
- return;
+ return is_extension_exist(s, sub, true, false);
+}
- if (i < cold_count)
- file_set_cold(inode);
- else
- file_set_hot(inode);
+static inline bool is_compress_extension(const unsigned char *s, const char *sub)
+{
+ return is_extension_exist(s, sub, true, true);
}
int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
@@ -283,56 +136,221 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
return 0;
}
-static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
- const unsigned char *name)
+static void set_compress_new_inode(struct f2fs_sb_info *sbi, struct inode *dir,
+ struct inode *inode, const unsigned char *name)
{
__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
- unsigned char (*noext)[F2FS_EXTENSION_LEN] = F2FS_OPTION(sbi).noextensions;
+ unsigned char (*noext)[F2FS_EXTENSION_LEN] =
+ F2FS_OPTION(sbi).noextensions;
unsigned char (*ext)[F2FS_EXTENSION_LEN] = F2FS_OPTION(sbi).extensions;
unsigned char ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
unsigned char noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
int i, cold_count, hot_count;
- if (!f2fs_sb_has_compression(sbi) ||
- F2FS_I(inode)->i_flags & F2FS_NOCOMP_FL ||
- !f2fs_may_compress(inode) ||
- (!ext_cnt && !noext_cnt))
+ if (!f2fs_sb_has_compression(sbi))
return;
- down_read(&sbi->sb_lock);
+ if (S_ISDIR(inode->i_mode))
+ goto inherit_comp;
+ /* This name comes only from normal files. */
+ if (!name)
+ return;
+
+ /* Don't compress hot files. */
+ f2fs_down_read(&sbi->sb_lock);
cold_count = le32_to_cpu(sbi->raw_super->extension_count);
hot_count = sbi->raw_super->hot_ext_count;
+ for (i = cold_count; i < cold_count + hot_count; i++)
+ if (is_temperature_extension(name, extlist[i]))
+ break;
+ f2fs_up_read(&sbi->sb_lock);
+ if (i < (cold_count + hot_count))
+ return;
- for (i = cold_count; i < cold_count + hot_count; i++) {
- if (is_extension_exist(name, extlist[i], false)) {
- up_read(&sbi->sb_lock);
+ /* Don't compress unallowed extension. */
+ for (i = 0; i < noext_cnt; i++)
+ if (is_compress_extension(name, noext[i]))
return;
- }
- }
-
- up_read(&sbi->sb_lock);
- for (i = 0; i < noext_cnt; i++) {
- if (is_extension_exist(name, noext[i], false)) {
- f2fs_disable_compressed_file(inode);
+ /* Compress wanting extension. */
+ for (i = 0; i < ext_cnt; i++) {
+ if (is_compress_extension(name, ext[i])) {
+ set_compress_context(inode);
return;
}
}
+inherit_comp:
+ /* Inherit the {no-}compression flag in directory */
+ if (F2FS_I(dir)->i_flags & F2FS_NOCOMP_FL) {
+ F2FS_I(inode)->i_flags |= F2FS_NOCOMP_FL;
+ f2fs_mark_inode_dirty_sync(inode, true);
+ } else if (F2FS_I(dir)->i_flags & F2FS_COMPR_FL) {
+ set_compress_context(inode);
+ }
+}
- if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
- return;
+/*
+ * Set file's temperature for hot/cold data separation
+ */
+static void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *inode,
+ const unsigned char *name)
+{
+ __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
+ int i, cold_count, hot_count;
- for (i = 0; i < ext_cnt; i++) {
- if (!is_extension_exist(name, ext[i], false))
- continue;
+ f2fs_down_read(&sbi->sb_lock);
+ cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+ hot_count = sbi->raw_super->hot_ext_count;
+ for (i = 0; i < cold_count + hot_count; i++)
+ if (is_temperature_extension(name, extlist[i]))
+ break;
+ f2fs_up_read(&sbi->sb_lock);
- set_compress_context(inode);
+ if (i == cold_count + hot_count)
return;
+
+ if (i < cold_count)
+ file_set_cold(inode);
+ else
+ file_set_hot(inode);
+}
+
+static struct inode *f2fs_new_inode(struct mnt_idmap *idmap,
+ struct inode *dir, umode_t mode,
+ const char *name)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ struct f2fs_inode_info *fi;
+ nid_t ino;
+ struct inode *inode;
+ bool nid_free = false;
+ bool encrypt = false;
+ int xattr_size = 0;
+ int err;
+
+ inode = new_inode(dir->i_sb);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (!f2fs_alloc_nid(sbi, &ino)) {
+ err = -ENOSPC;
+ goto fail;
+ }
+
+ nid_free = true;
+
+ inode_init_owner(idmap, inode, dir, mode);
+
+ fi = F2FS_I(inode);
+ inode->i_ino = ino;
+ inode->i_blocks = 0;
+ simple_inode_init_ts(inode);
+ fi->i_crtime = inode_get_mtime(inode);
+ inode->i_generation = get_random_u32();
+
+ if (S_ISDIR(inode->i_mode))
+ fi->i_current_depth = 1;
+
+ err = insert_inode_locked(inode);
+ if (err) {
+ err = -EINVAL;
+ goto fail;
}
+
+ if (f2fs_sb_has_project_quota(sbi) &&
+ (F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL))
+ fi->i_projid = F2FS_I(dir)->i_projid;
+ else
+ fi->i_projid = make_kprojid(&init_user_ns,
+ F2FS_DEF_PROJID);
+
+ err = fscrypt_prepare_new_inode(dir, inode, &encrypt);
+ if (err)
+ goto fail_drop;
+
+ err = f2fs_dquot_initialize(inode);
+ if (err)
+ goto fail_drop;
+
+ set_inode_flag(inode, FI_NEW_INODE);
+
+ if (encrypt)
+ f2fs_set_encrypted_inode(inode);
+
+ if (f2fs_sb_has_extra_attr(sbi)) {
+ set_inode_flag(inode, FI_EXTRA_ATTR);
+ fi->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE;
+ }
+
+ if (test_opt(sbi, INLINE_XATTR))
+ set_inode_flag(inode, FI_INLINE_XATTR);
+
+ if (f2fs_may_inline_dentry(inode))
+ set_inode_flag(inode, FI_INLINE_DENTRY);
+
+ if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
+ f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode));
+ if (f2fs_has_inline_xattr(inode))
+ xattr_size = F2FS_OPTION(sbi).inline_xattr_size;
+ /* Otherwise, will be 0 */
+ } else if (f2fs_has_inline_xattr(inode) ||
+ f2fs_has_inline_dentry(inode)) {
+ xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
+ }
+ fi->i_inline_xattr_size = xattr_size;
+
+ fi->i_flags =
+ f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED);
+
+ if (S_ISDIR(inode->i_mode))
+ fi->i_flags |= F2FS_INDEX_FL;
+
+ if (fi->i_flags & F2FS_PROJINHERIT_FL)
+ set_inode_flag(inode, FI_PROJ_INHERIT);
+
+ /* Check compression first. */
+ set_compress_new_inode(sbi, dir, inode, name);
+
+ /* Should enable inline_data after compression set */
+ if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
+ set_inode_flag(inode, FI_INLINE_DATA);
+
+ if (name && !test_opt(sbi, DISABLE_EXT_IDENTIFY))
+ set_file_temperature(sbi, inode, name);
+
+ stat_inc_inline_xattr(inode);
+ stat_inc_inline_inode(inode);
+ stat_inc_inline_dir(inode);
+
+ f2fs_set_inode_flags(inode);
+
+ f2fs_init_extent_tree(inode);
+
+ trace_f2fs_new_inode(inode, 0);
+ return inode;
+
+fail:
+ trace_f2fs_new_inode(inode, err);
+ make_bad_inode(inode);
+ if (nid_free)
+ set_inode_flag(inode, FI_FREE_NID);
+ iput(inode);
+ return ERR_PTR(err);
+fail_drop:
+ trace_f2fs_new_inode(inode, err);
+ dquot_drop(inode);
+ inode->i_flags |= S_NOQUOTA;
+ make_bad_inode(inode);
+ if (nid_free)
+ set_inode_flag(inode, FI_FREE_NID);
+ clear_nlink(inode);
+ unlock_new_inode(inode);
+ iput(inode);
+ return ERR_PTR(err);
}
-static int f2fs_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int f2fs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
@@ -349,15 +367,10 @@ static int f2fs_create(struct user_namespace *mnt_userns, struct inode *dir,
if (err)
return err;
- inode = f2fs_new_inode(dir, mode);
+ inode = f2fs_new_inode(idmap, dir, mode, dentry->d_name.name);
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (!test_opt(sbi, DISABLE_EXT_IDENTIFY))
- set_file_temperature(sbi, inode, dentry->d_name.name);
-
- set_compress_inode(sbi, inode, dentry->d_name.name);
-
inode->i_op = &f2fs_file_inode_operations;
inode->i_fop = &f2fs_file_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
@@ -401,7 +414,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
if (is_inode_flag_set(dir, FI_PROJ_INHERIT) &&
(!projid_eq(F2FS_I(dir)->i_projid,
- F2FS_I(old_dentry->d_inode)->i_projid)))
+ F2FS_I(inode)->i_projid)))
return -EXDEV;
err = f2fs_dquot_initialize(dir);
@@ -410,7 +423,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
f2fs_balance_fs(sbi, true);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
ihold(inode);
set_inode_flag(inode, FI_INC_LINK);
@@ -434,77 +447,26 @@ out:
struct dentry *f2fs_get_parent(struct dentry *child)
{
- struct page *page;
- unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot_name, &page);
+ struct folio *folio;
+ unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot_name, &folio);
if (!ino) {
- if (IS_ERR(page))
- return ERR_CAST(page);
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
return ERR_PTR(-ENOENT);
}
return d_obtain_alias(f2fs_iget(child->d_sb, ino));
}
-static int __recover_dot_dentries(struct inode *dir, nid_t pino)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct qstr dot = QSTR_INIT(".", 1);
- struct qstr dotdot = QSTR_INIT("..", 2);
- struct f2fs_dir_entry *de;
- struct page *page;
- int err = 0;
-
- if (f2fs_readonly(sbi->sb)) {
- f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint",
- dir->i_ino, pino);
- return 0;
- }
-
- err = f2fs_dquot_initialize(dir);
- if (err)
- return err;
-
- f2fs_balance_fs(sbi, true);
-
- f2fs_lock_op(sbi);
-
- de = f2fs_find_entry(dir, &dot, &page);
- if (de) {
- f2fs_put_page(page, 0);
- } else if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto out;
- } else {
- err = f2fs_do_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
- if (err)
- goto out;
- }
-
- de = f2fs_find_entry(dir, &dotdot, &page);
- if (de)
- f2fs_put_page(page, 0);
- else if (IS_ERR(page))
- err = PTR_ERR(page);
- else
- err = f2fs_do_add_link(dir, &dotdot, NULL, pino, S_IFDIR);
-out:
- if (!err)
- clear_inode_flag(dir, FI_INLINE_DOTS);
-
- f2fs_unlock_op(sbi);
- return err;
-}
-
static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct inode *inode = NULL;
struct f2fs_dir_entry *de;
- struct page *page;
+ struct folio *folio;
struct dentry *new;
nid_t ino = -1;
int err = 0;
- unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
struct f2fs_filename fname;
trace_f2fs_lookup_start(dir, dentry, flags);
@@ -515,17 +477,16 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
}
err = f2fs_prepare_lookup(dir, dentry, &fname);
- generic_set_encrypted_ci_d_ops(dentry);
if (err == -ENOENT)
goto out_splice;
if (err)
goto out;
- de = __f2fs_find_entry(dir, &fname, &page);
+ de = __f2fs_find_entry(dir, &fname, &folio);
f2fs_free_filename(&fname);
if (!de) {
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto out;
}
err = -ENOENT;
@@ -533,7 +494,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
}
ino = le32_to_cpu(de->ino);
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
inode = f2fs_iget(dir->i_sb, ino);
if (IS_ERR(inode)) {
@@ -541,17 +502,14 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) {
- err = __recover_dot_dentries(dir, root_ino);
- if (err)
- goto out_iput;
+ if (inode->i_nlink == 0) {
+ f2fs_warn(F2FS_I_SB(inode), "%s: inode (ino=%lx) has zero i_nlink",
+ __func__, inode->i_ino);
+ err = -EFSCORRUPTED;
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ goto out_iput;
}
- if (f2fs_has_inline_dots(inode)) {
- err = __recover_dot_dentries(inode, dir->i_ino);
- if (err)
- goto out_iput;
- }
if (IS_ENCRYPTED(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!fscrypt_has_permitted_context(dir, inode)) {
@@ -561,8 +519,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out_iput;
}
out_splice:
-#ifdef CONFIG_UNICODE
- if (!inode && IS_CASEFOLDED(dir)) {
+ if (IS_ENABLED(CONFIG_UNICODE) && !inode && IS_CASEFOLDED(dir)) {
/* Eventually we want to call d_add_ci(dentry, NULL)
* for negative dentries in the encoding case as
* well. For now, prevent the negative dentry
@@ -571,10 +528,10 @@ out_splice:
trace_f2fs_lookup_end(dir, dentry, ino, err);
return NULL;
}
-#endif
+
new = d_splice_alias(inode, dentry);
- err = PTR_ERR_OR_ZERO(new);
- trace_f2fs_lookup_end(dir, dentry, ino, !new ? -ENOENT : err);
+ trace_f2fs_lookup_end(dir, !IS_ERR_OR_NULL(new) ? new : dentry,
+ ino, IS_ERR(new) ? PTR_ERR(new) : err);
return new;
out_iput:
iput(inode);
@@ -588,28 +545,38 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode = d_inode(dentry);
struct f2fs_dir_entry *de;
- struct page *page;
+ struct folio *folio;
int err;
trace_f2fs_unlink_enter(dir, dentry);
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
- goto fail;
+ goto out;
}
err = f2fs_dquot_initialize(dir);
if (err)
- goto fail;
+ goto out;
err = f2fs_dquot_initialize(inode);
if (err)
- goto fail;
+ goto out;
- de = f2fs_find_entry(dir, &dentry->d_name, &page);
+ de = f2fs_find_entry(dir, &dentry->d_name, &folio);
if (!de) {
- if (IS_ERR(page))
- err = PTR_ERR(page);
- goto fail;
+ if (IS_ERR(folio))
+ err = PTR_ERR(folio);
+ goto out;
+ }
+
+ if (unlikely(inode->i_nlink == 0)) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has zero i_nlink",
+ __func__, inode->i_ino);
+ goto corrupted;
+ } else if (S_ISDIR(inode->i_mode) && unlikely(inode->i_nlink == 1)) {
+ f2fs_warn(sbi, "%s: directory inode (ino=%lx) has a single i_nlink",
+ __func__, inode->i_ino);
+ goto corrupted;
}
f2fs_balance_fs(sbi, true);
@@ -618,25 +585,30 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
err = f2fs_acquire_orphan_inode(sbi);
if (err) {
f2fs_unlock_op(sbi);
- f2fs_put_page(page, 0);
- goto fail;
+ f2fs_folio_put(folio, false);
+ goto out;
}
- f2fs_delete_entry(de, page, dir, inode);
-#ifdef CONFIG_UNICODE
+ f2fs_delete_entry(de, folio, dir, inode);
+ f2fs_unlock_op(sbi);
+
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
* negative dentries at f2fs_lookup(), when it is better
* supported by the VFS for the CI case.
*/
- if (IS_CASEFOLDED(dir))
+ if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
d_invalidate(dentry);
-#endif
- f2fs_unlock_op(sbi);
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
-fail:
+
+ goto out;
+corrupted:
+ err = -EFSCORRUPTED;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_folio_put(folio, false);
+out:
trace_f2fs_unlink_exit(inode, err);
return err;
}
@@ -656,7 +628,7 @@ static const char *f2fs_get_link(struct dentry *dentry,
return link;
}
-static int f2fs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+static int f2fs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
@@ -679,7 +651,7 @@ static int f2fs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
if (err)
return err;
- inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO);
+ inode = f2fs_new_inode(idmap, dir, S_IFLNK | S_IRWXUGO, NULL);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -736,23 +708,23 @@ out_free_encrypted_link:
return err;
}
-static int f2fs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *f2fs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
int err;
if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ return ERR_PTR(-EIO);
err = f2fs_dquot_initialize(dir);
if (err)
- return err;
+ return ERR_PTR(err);
- inode = f2fs_new_inode(dir, S_IFDIR | mode);
+ inode = f2fs_new_inode(idmap, dir, S_IFDIR | mode, NULL);
if (IS_ERR(inode))
- return PTR_ERR(inode);
+ return ERR_CAST(inode);
inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations;
@@ -774,12 +746,12 @@ static int f2fs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
f2fs_sync_fs(sbi->sb, 1);
f2fs_balance_fs(sbi, true);
- return 0;
+ return NULL;
out_fail:
clear_inode_flag(inode, FI_INC_LINK);
f2fs_handle_failed_inode(inode);
- return err;
+ return ERR_PTR(err);
}
static int f2fs_rmdir(struct inode *dir, struct dentry *dentry)
@@ -791,7 +763,7 @@ static int f2fs_rmdir(struct inode *dir, struct dentry *dentry)
return -ENOTEMPTY;
}
-static int f2fs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+static int f2fs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
@@ -807,7 +779,7 @@ static int f2fs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
if (err)
return err;
- inode = f2fs_new_inode(dir, mode);
+ inode = f2fs_new_inode(idmap, dir, mode, NULL);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -834,8 +806,9 @@ out:
return err;
}
-static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
- umode_t mode, struct inode **whiteout)
+static int __f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ struct file *file, umode_t mode, bool is_whiteout,
+ struct inode **new_inode, struct f2fs_filename *fname)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
@@ -845,11 +818,11 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
if (err)
return err;
- inode = f2fs_new_inode(dir, mode);
+ inode = f2fs_new_inode(idmap, dir, mode, NULL);
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (whiteout) {
+ if (is_whiteout) {
init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
inode->i_op = &f2fs_special_inode_operations;
} else {
@@ -863,7 +836,7 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
if (err)
goto out;
- err = f2fs_do_tmpfile(inode, dir);
+ err = f2fs_do_tmpfile(inode, dir, fname);
if (err)
goto release_out;
@@ -874,21 +847,25 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
f2fs_add_orphan_inode(inode);
f2fs_alloc_nid_done(sbi, inode->i_ino);
- if (whiteout) {
+ if (is_whiteout) {
f2fs_i_links_write(inode, false);
spin_lock(&inode->i_lock);
- inode->i_state |= I_LINKABLE;
+ inode_state_set(inode, I_LINKABLE);
spin_unlock(&inode->i_lock);
-
- *whiteout = inode;
} else {
- d_tmpfile(dentry, inode);
+ if (file)
+ d_tmpfile(file, inode);
+ else
+ f2fs_i_links_write(inode, false);
}
/* link_count was changed by d_tmpfile as well. */
f2fs_unlock_op(sbi);
unlock_new_inode(inode);
+ if (new_inode)
+ *new_inode = inode;
+
f2fs_balance_fs(sbi, true);
return 0;
@@ -899,40 +876,51 @@ out:
return err;
}
-static int f2fs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static int f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ struct file *file, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ int err;
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
if (!f2fs_is_checkpoint_ready(sbi))
return -ENOSPC;
- return __f2fs_tmpfile(dir, dentry, mode, NULL);
+ err = __f2fs_tmpfile(idmap, dir, file, mode, false, NULL, NULL);
+
+ return finish_open_simple(file, err);
}
-static int f2fs_create_whiteout(struct inode *dir, struct inode **whiteout)
+static int f2fs_create_whiteout(struct mnt_idmap *idmap,
+ struct inode *dir, struct inode **whiteout,
+ struct f2fs_filename *fname)
{
- if (unlikely(f2fs_cp_error(F2FS_I_SB(dir))))
- return -EIO;
+ return __f2fs_tmpfile(idmap, dir, NULL, S_IFCHR | WHITEOUT_MODE,
+ true, whiteout, fname);
+}
- return __f2fs_tmpfile(dir, NULL, S_IFCHR | WHITEOUT_MODE, whiteout);
+int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ struct inode **new_inode)
+{
+ return __f2fs_tmpfile(idmap, dir, NULL, S_IFREG,
+ false, new_inode, NULL);
}
-static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir);
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
struct inode *whiteout = NULL;
- struct page *old_dir_page = NULL;
- struct page *old_page, *new_page = NULL;
+ struct folio *old_dir_folio = NULL;
+ struct folio *old_folio, *new_folio = NULL;
struct f2fs_dir_entry *old_dir_entry = NULL;
struct f2fs_dir_entry *old_entry;
struct f2fs_dir_entry *new_entry;
+ bool old_is_dir = S_ISDIR(old_inode->i_mode);
int err;
if (unlikely(f2fs_cp_error(sbi)))
@@ -942,12 +930,12 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
(!projid_eq(F2FS_I(new_dir)->i_projid,
- F2FS_I(old_dentry->d_inode)->i_projid)))
+ F2FS_I(old_inode)->i_projid)))
return -EXDEV;
/*
* If new_inode is null, the below renaming flow will
- * add a link in old_dir which can conver inline_dir.
+ * add a link in old_dir which can convert inline_dir.
* After then, if we failed to get the entry due to other
* reasons like ENOMEM, we had to remove the new entry.
* Instead of adding such the error handling routine, let's
@@ -960,7 +948,14 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
if (flags & RENAME_WHITEOUT) {
- err = f2fs_create_whiteout(old_dir, &whiteout);
+ struct f2fs_filename fname;
+
+ err = f2fs_setup_filename(old_dir, &old_dentry->d_name,
+ 0, &fname);
+ if (err)
+ return err;
+
+ err = f2fs_create_whiteout(idmap, old_dir, &whiteout, &fname);
if (err)
return err;
}
@@ -980,18 +975,18 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
err = -ENOENT;
- old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
+ old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_folio);
if (!old_entry) {
- if (IS_ERR(old_page))
- err = PTR_ERR(old_page);
+ if (IS_ERR(old_folio))
+ err = PTR_ERR(old_folio);
goto out;
}
- if (S_ISDIR(old_inode->i_mode)) {
- old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page);
+ if (old_is_dir && old_dir != new_dir) {
+ old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_folio);
if (!old_dir_entry) {
- if (IS_ERR(old_dir_page))
- err = PTR_ERR(old_dir_page);
+ if (IS_ERR(old_dir_folio))
+ err = PTR_ERR(old_dir_folio);
goto out_old;
}
}
@@ -999,15 +994,15 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (new_inode) {
err = -ENOTEMPTY;
- if (old_dir_entry && !f2fs_empty_dir(new_inode))
+ if (old_is_dir && !f2fs_empty_dir(new_inode))
goto out_dir;
err = -ENOENT;
new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
- &new_page);
+ &new_folio);
if (!new_entry) {
- if (IS_ERR(new_page))
- err = PTR_ERR(new_page);
+ if (IS_ERR(new_folio))
+ err = PTR_ERR(new_folio);
goto out_dir;
}
@@ -1019,15 +1014,15 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (err)
goto put_out_dir;
- f2fs_set_link(new_dir, new_entry, new_page, old_inode);
- new_page = NULL;
+ f2fs_set_link(new_dir, new_entry, new_folio, old_inode);
+ new_folio = NULL;
- new_inode->i_ctime = current_time(new_inode);
- down_write(&F2FS_I(new_inode)->i_sem);
- if (old_dir_entry)
+ inode_set_ctime_current(new_inode);
+ f2fs_down_write(&F2FS_I(new_inode)->i_sem);
+ if (old_is_dir)
f2fs_i_links_write(new_inode, false);
f2fs_i_links_write(new_inode, false);
- up_write(&F2FS_I(new_inode)->i_sem);
+ f2fs_up_write(&F2FS_I(new_inode)->i_sem);
if (!new_inode->i_nlink)
f2fs_add_orphan_inode(new_inode);
@@ -1044,45 +1039,44 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out_dir;
}
- if (old_dir_entry)
+ if (old_is_dir)
f2fs_i_links_write(new_dir, true);
}
- down_write(&F2FS_I(old_inode)->i_sem);
- if (!old_dir_entry || whiteout)
+ f2fs_down_write(&F2FS_I(old_inode)->i_sem);
+ if (!old_is_dir || whiteout)
file_lost_pino(old_inode);
else
/* adjust dir's i_pino to pass fsck check */
f2fs_i_pino_write(old_inode, new_dir->i_ino);
- up_write(&F2FS_I(old_inode)->i_sem);
+ f2fs_up_write(&F2FS_I(old_inode)->i_sem);
- old_inode->i_ctime = current_time(old_inode);
+ inode_set_ctime_current(old_inode);
f2fs_mark_inode_dirty_sync(old_inode, false);
- f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
- old_page = NULL;
+ f2fs_delete_entry(old_entry, old_folio, old_dir, NULL);
+ old_folio = NULL;
if (whiteout) {
set_inode_flag(whiteout, FI_INC_LINK);
err = f2fs_add_link(old_dentry, whiteout);
- if (err)
+ if (err) {
+ d_invalidate(old_dentry);
+ d_invalidate(new_dentry);
goto put_out_dir;
-
+ }
spin_lock(&whiteout->i_lock);
- whiteout->i_state &= ~I_LINKABLE;
+ inode_state_clear(whiteout, I_LINKABLE);
spin_unlock(&whiteout->i_lock);
iput(whiteout);
}
- if (old_dir_entry) {
- if (old_dir != new_dir && !whiteout)
- f2fs_set_link(old_inode, old_dir_entry,
- old_dir_page, new_dir);
- else
- f2fs_put_page(old_dir_page, 0);
+ if (old_dir_entry)
+ f2fs_set_link(old_inode, old_dir_entry, old_dir_folio, new_dir);
+ if (old_is_dir)
f2fs_i_links_write(old_dir, false);
- }
+
if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) {
f2fs_add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
if (S_ISDIR(old_inode->i_mode))
@@ -1100,15 +1094,14 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
put_out_dir:
f2fs_unlock_op(sbi);
- f2fs_put_page(new_page, 0);
+ f2fs_folio_put(new_folio, false);
out_dir:
if (old_dir_entry)
- f2fs_put_page(old_dir_page, 0);
+ f2fs_folio_put(old_dir_folio, false);
out_old:
- f2fs_put_page(old_page, 0);
+ f2fs_folio_put(old_folio, false);
out:
- if (whiteout)
- iput(whiteout);
+ iput(whiteout);
return err;
}
@@ -1118,8 +1111,8 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir);
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
- struct page *old_dir_page, *new_dir_page;
- struct page *old_page, *new_page;
+ struct folio *old_dir_folio, *new_dir_folio;
+ struct folio *old_folio, *new_folio;
struct f2fs_dir_entry *old_dir_entry = NULL, *new_dir_entry = NULL;
struct f2fs_dir_entry *old_entry, *new_entry;
int old_nlink = 0, new_nlink = 0;
@@ -1132,10 +1125,10 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
!projid_eq(F2FS_I(new_dir)->i_projid,
- F2FS_I(old_dentry->d_inode)->i_projid)) ||
- (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
+ F2FS_I(old_inode)->i_projid)) ||
+ (is_inode_flag_set(old_dir, FI_PROJ_INHERIT) &&
!projid_eq(F2FS_I(old_dir)->i_projid,
- F2FS_I(new_dentry->d_inode)->i_projid)))
+ F2FS_I(new_inode)->i_projid)))
return -EXDEV;
err = f2fs_dquot_initialize(old_dir);
@@ -1147,17 +1140,17 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out;
err = -ENOENT;
- old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
+ old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_folio);
if (!old_entry) {
- if (IS_ERR(old_page))
- err = PTR_ERR(old_page);
+ if (IS_ERR(old_folio))
+ err = PTR_ERR(old_folio);
goto out;
}
- new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page);
+ new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_folio);
if (!new_entry) {
- if (IS_ERR(new_page))
- err = PTR_ERR(new_page);
+ if (IS_ERR(new_folio))
+ err = PTR_ERR(new_folio);
goto out_old;
}
@@ -1165,20 +1158,20 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (old_dir != new_dir) {
if (S_ISDIR(old_inode->i_mode)) {
old_dir_entry = f2fs_parent_dir(old_inode,
- &old_dir_page);
+ &old_dir_folio);
if (!old_dir_entry) {
- if (IS_ERR(old_dir_page))
- err = PTR_ERR(old_dir_page);
+ if (IS_ERR(old_dir_folio))
+ err = PTR_ERR(old_dir_folio);
goto out_new;
}
}
if (S_ISDIR(new_inode->i_mode)) {
new_dir_entry = f2fs_parent_dir(new_inode,
- &new_dir_page);
+ &new_dir_folio);
if (!new_dir_entry) {
- if (IS_ERR(new_dir_page))
- err = PTR_ERR(new_dir_page);
+ if (IS_ERR(new_dir_folio))
+ err = PTR_ERR(new_dir_folio);
goto out_old_dir;
}
}
@@ -1205,47 +1198,47 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
/* update ".." directory entry info of old dentry */
if (old_dir_entry)
- f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir);
+ f2fs_set_link(old_inode, old_dir_entry, old_dir_folio, new_dir);
/* update ".." directory entry info of new dentry */
if (new_dir_entry)
- f2fs_set_link(new_inode, new_dir_entry, new_dir_page, old_dir);
+ f2fs_set_link(new_inode, new_dir_entry, new_dir_folio, old_dir);
/* update directory entry info of old dir inode */
- f2fs_set_link(old_dir, old_entry, old_page, new_inode);
+ f2fs_set_link(old_dir, old_entry, old_folio, new_inode);
- down_write(&F2FS_I(old_inode)->i_sem);
+ f2fs_down_write(&F2FS_I(old_inode)->i_sem);
if (!old_dir_entry)
file_lost_pino(old_inode);
else
/* adjust dir's i_pino to pass fsck check */
f2fs_i_pino_write(old_inode, new_dir->i_ino);
- up_write(&F2FS_I(old_inode)->i_sem);
+ f2fs_up_write(&F2FS_I(old_inode)->i_sem);
- old_dir->i_ctime = current_time(old_dir);
+ inode_set_ctime_current(old_dir);
if (old_nlink) {
- down_write(&F2FS_I(old_dir)->i_sem);
+ f2fs_down_write(&F2FS_I(old_dir)->i_sem);
f2fs_i_links_write(old_dir, old_nlink > 0);
- up_write(&F2FS_I(old_dir)->i_sem);
+ f2fs_up_write(&F2FS_I(old_dir)->i_sem);
}
f2fs_mark_inode_dirty_sync(old_dir, false);
/* update directory entry info of new dir inode */
- f2fs_set_link(new_dir, new_entry, new_page, old_inode);
+ f2fs_set_link(new_dir, new_entry, new_folio, old_inode);
- down_write(&F2FS_I(new_inode)->i_sem);
+ f2fs_down_write(&F2FS_I(new_inode)->i_sem);
if (!new_dir_entry)
file_lost_pino(new_inode);
else
/* adjust dir's i_pino to pass fsck check */
f2fs_i_pino_write(new_inode, old_dir->i_ino);
- up_write(&F2FS_I(new_inode)->i_sem);
+ f2fs_up_write(&F2FS_I(new_inode)->i_sem);
- new_dir->i_ctime = current_time(new_dir);
+ inode_set_ctime_current(new_dir);
if (new_nlink) {
- down_write(&F2FS_I(new_dir)->i_sem);
+ f2fs_down_write(&F2FS_I(new_dir)->i_sem);
f2fs_i_links_write(new_dir, new_nlink > 0);
- up_write(&F2FS_I(new_dir)->i_sem);
+ f2fs_up_write(&F2FS_I(new_dir)->i_sem);
}
f2fs_mark_inode_dirty_sync(new_dir, false);
@@ -1263,21 +1256,21 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
return 0;
out_new_dir:
if (new_dir_entry) {
- f2fs_put_page(new_dir_page, 0);
+ f2fs_folio_put(new_dir_folio, false);
}
out_old_dir:
if (old_dir_entry) {
- f2fs_put_page(old_dir_page, 0);
+ f2fs_folio_put(old_dir_folio, false);
}
out_new:
- f2fs_put_page(new_page, 0);
+ f2fs_folio_put(new_folio, false);
out_old:
- f2fs_put_page(old_page, 0);
+ f2fs_folio_put(old_folio, false);
out:
return err;
}
-static int f2fs_rename2(struct user_namespace *mnt_userns,
+static int f2fs_rename2(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
@@ -1287,48 +1280,55 @@ static int f2fs_rename2(struct user_namespace *mnt_userns,
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
+ trace_f2fs_rename_start(old_dir, old_dentry, new_dir, new_dentry,
+ flags);
+
err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry,
flags);
if (err)
return err;
- if (flags & RENAME_EXCHANGE) {
- return f2fs_cross_rename(old_dir, old_dentry,
- new_dir, new_dentry);
- }
+ if (flags & RENAME_EXCHANGE)
+ err = f2fs_cross_rename(old_dir, old_dentry,
+ new_dir, new_dentry);
+ else
/*
* VFS has already handled the new dentry existence case,
* here, we just deal with "RENAME_NOREPLACE" as regular rename.
*/
- return f2fs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
+ err = f2fs_rename(idmap, old_dir, old_dentry,
+ new_dir, new_dentry, flags);
+
+ trace_f2fs_rename_end(old_dentry, new_dentry, flags, err);
+ return err;
}
static const char *f2fs_encrypted_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- struct page *page;
+ struct folio *folio;
const char *target;
if (!dentry)
return ERR_PTR(-ECHILD);
- page = read_mapping_page(inode->i_mapping, 0, NULL);
- if (IS_ERR(page))
- return ERR_CAST(page);
+ folio = read_mapping_folio(inode->i_mapping, 0, NULL);
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
- target = fscrypt_get_symlink(inode, page_address(page),
+ target = fscrypt_get_symlink(inode, folio_address(folio),
inode->i_sb->s_blocksize, done);
- put_page(page);
+ folio_put(folio);
return target;
}
-static int f2fs_encrypted_symlink_getattr(struct user_namespace *mnt_userns,
+static int f2fs_encrypted_symlink_getattr(struct mnt_idmap *idmap,
const struct path *path,
struct kstat *stat, u32 request_mask,
unsigned int query_flags)
{
- f2fs_getattr(mnt_userns, path, stat, request_mask, query_flags);
+ f2fs_getattr(idmap, path, stat, request_mask, query_flags);
return fscrypt_symlink_getattr(path, stat);
}
@@ -1353,7 +1353,7 @@ const struct inode_operations f2fs_dir_inode_operations = {
.tmpfile = f2fs_tmpfile,
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
- .get_acl = f2fs_get_acl,
+ .get_inode_acl = f2fs_get_acl,
.set_acl = f2fs_set_acl,
.listxattr = f2fs_listxattr,
.fiemap = f2fs_fiemap,
@@ -1371,7 +1371,7 @@ const struct inode_operations f2fs_symlink_inode_operations = {
const struct inode_operations f2fs_special_inode_operations = {
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
- .get_acl = f2fs_get_acl,
+ .get_inode_acl = f2fs_get_acl,
.set_acl = f2fs_set_acl,
.listxattr = f2fs_listxattr,
};
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 556fcd8457f3..482a362f2625 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -8,7 +8,7 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/mpage.h>
-#include <linux/backing-dev.h>
+#include <linux/sched/mm.h>
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/swap.h>
@@ -20,22 +20,28 @@
#include "iostat.h"
#include <trace/events/f2fs.h>
-#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
+#define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock)
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
static struct kmem_cache *nat_entry_set_slab;
static struct kmem_cache *fsync_node_entry_slab;
+static inline bool is_invalid_nid(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ return nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid;
+}
+
/*
* Check whether the given nid is within node id range.
*/
int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
{
- if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
+ if (unlikely(is_invalid_nid(sbi, nid))) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
__func__, nid);
+ f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
return -EFSCORRUPTED;
}
return 0;
@@ -59,7 +65,7 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
avail_ram = val.totalram - val.totalhigh;
/*
- * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
+ * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively
*/
if (type == FREE_NIDS) {
mem_size = (nm_i->nid_cnt[FREE_NID] *
@@ -84,16 +90,16 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
sizeof(struct ino_entry);
mem_size >>= PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
- } else if (type == EXTENT_CACHE) {
- mem_size = (atomic_read(&sbi->total_ext_tree) *
+ } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) {
+ enum extent_type etype = type == READ_EXTENT_CACHE ?
+ EX_READ : EX_BLOCK_AGE;
+ struct extent_tree_info *eti = &sbi->extent_tree[etype];
+
+ mem_size = (atomic_read(&eti->total_ext_tree) *
sizeof(struct extent_tree) +
- atomic_read(&sbi->total_ext_node) *
+ atomic_read(&eti->total_ext_node) *
sizeof(struct extent_node)) >> PAGE_SHIFT;
- res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
- } else if (type == INMEM_PAGES) {
- /* it allows 20% / total_ram for inmemory pages */
- mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
- res = mem_size < (val.totalram / 5);
+ res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
} else if (type == DISCARD_CACHE) {
mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
sizeof(struct discard_cmd)) >> PAGE_SHIFT;
@@ -119,25 +125,25 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
return res;
}
-static void clear_node_page_dirty(struct page *page)
+static void clear_node_folio_dirty(struct folio *folio)
{
- if (PageDirty(page)) {
- f2fs_clear_page_cache_dirty_tag(page);
- clear_page_dirty_for_io(page);
- dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
+ if (folio_test_dirty(folio)) {
+ f2fs_clear_page_cache_dirty_tag(folio);
+ folio_clear_dirty_for_io(folio);
+ dec_page_count(F2FS_F_SB(folio), F2FS_DIRTY_NODES);
}
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
}
-static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
+static struct folio *get_current_nat_folio(struct f2fs_sb_info *sbi, nid_t nid)
{
- return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
+ return f2fs_get_meta_folio_retry(sbi, current_nat_addr(sbi, nid));
}
-static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
+static struct folio *get_next_nat_folio(struct f2fs_sb_info *sbi, nid_t nid)
{
- struct page *src_page;
- struct page *dst_page;
+ struct folio *src_folio;
+ struct folio *dst_folio;
pgoff_t dst_off;
void *src_addr;
void *dst_addr;
@@ -146,21 +152,21 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
/* get current nat block page with lock */
- src_page = get_current_nat_page(sbi, nid);
- if (IS_ERR(src_page))
- return src_page;
- dst_page = f2fs_grab_meta_page(sbi, dst_off);
- f2fs_bug_on(sbi, PageDirty(src_page));
-
- src_addr = page_address(src_page);
- dst_addr = page_address(dst_page);
+ src_folio = get_current_nat_folio(sbi, nid);
+ if (IS_ERR(src_folio))
+ return src_folio;
+ dst_folio = f2fs_grab_meta_folio(sbi, dst_off);
+ f2fs_bug_on(sbi, folio_test_dirty(src_folio));
+
+ src_addr = folio_address(src_folio);
+ dst_addr = folio_address(dst_folio);
memcpy(dst_addr, src_addr, PAGE_SIZE);
- set_page_dirty(dst_page);
- f2fs_put_page(src_page, 1);
+ folio_mark_dirty(dst_folio);
+ f2fs_folio_put(src_folio, true);
set_to_next_nat(nm_i, nid);
- return dst_page;
+ return dst_folio;
}
static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
@@ -184,7 +190,7 @@ static void __free_nat_entry(struct nat_entry *e)
/* must be locked by nat_tree_lock */
static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
- struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
+ struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail, bool init_dirty)
{
if (no_fail)
f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
@@ -194,6 +200,12 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
if (raw_ne)
node_info_from_raw_nat(&ne->ni, raw_ne);
+ if (init_dirty) {
+ INIT_LIST_HEAD(&ne->list);
+ nm_i->nat_cnt[TOTAL_NAT]++;
+ return ne;
+ }
+
spin_lock(&nm_i->nat_list_lock);
list_add_tail(&ne->list, &nm_i->nat_entries);
spin_unlock(&nm_i->nat_list_lock);
@@ -203,14 +215,17 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
return ne;
}
-static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
+static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n, bool for_dirty)
{
struct nat_entry *ne;
ne = radix_tree_lookup(&nm_i->nat_root, n);
- /* for recent accessed nat entry, move it to tail of lru list */
- if (ne && !get_nat_flag(ne, IS_DIRTY)) {
+ /*
+ * for recent accessed nat entry which will not be dirtied soon
+ * later, move it to tail of lru list.
+ */
+ if (ne && !get_nat_flag(ne, IS_DIRTY) && !for_dirty) {
spin_lock(&nm_i->nat_list_lock);
if (!list_empty(&ne->list))
list_move_tail(&ne->list, &nm_i->nat_entries);
@@ -255,7 +270,7 @@ static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
}
static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
- struct nat_entry *ne)
+ struct nat_entry *ne, bool init_dirty)
{
struct nat_entry_set *head;
bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
@@ -278,7 +293,8 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
goto refresh_list;
nm_i->nat_cnt[DIRTY_NAT]++;
- nm_i->nat_cnt[RECLAIMABLE_NAT]--;
+ if (!init_dirty)
+ nm_i->nat_cnt[RECLAIMABLE_NAT]--;
set_nat_flag(ne, IS_DIRTY, true);
refresh_list:
spin_lock(&nm_i->nat_list_lock);
@@ -309,10 +325,9 @@ static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
start, nr);
}
-bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio)
{
- return NODE_MAPPING(sbi) == page->mapping &&
- IS_DNODE(page) && is_cold_node(page);
+ return is_node_folio(folio) && IS_DNODE(folio) && is_cold_node(folio);
}
void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
@@ -324,7 +339,7 @@ void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
}
static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
- struct page *page)
+ struct folio *folio)
{
struct fsync_node_entry *fn;
unsigned long flags;
@@ -333,8 +348,8 @@ static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
GFP_NOFS, true, NULL);
- get_page(page);
- fn->page = page;
+ folio_get(folio);
+ fn->folio = folio;
INIT_LIST_HEAD(&fn->list);
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
@@ -347,19 +362,19 @@ static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
return seq_id;
}
-void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio)
{
struct fsync_node_entry *fn;
unsigned long flags;
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
list_for_each_entry(fn, &sbi->fsync_node_list, list) {
- if (fn->page == page) {
+ if (fn->folio == folio) {
list_del(&fn->list);
sbi->fsync_node_num--;
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
kmem_cache_free(fsync_node_entry_slab, fn);
- put_page(page);
+ folio_put(folio);
return;
}
}
@@ -382,14 +397,14 @@ int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
struct nat_entry *e;
bool need = false;
- down_read(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, nid);
+ f2fs_down_read(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, nid, false);
if (e) {
if (!get_nat_flag(e, IS_CHECKPOINTED) &&
!get_nat_flag(e, HAS_FSYNCED_INODE))
need = true;
}
- up_read(&nm_i->nat_tree_lock);
+ f2fs_up_read(&nm_i->nat_tree_lock);
return need;
}
@@ -399,11 +414,11 @@ bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
struct nat_entry *e;
bool is_cp = true;
- down_read(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, nid);
+ f2fs_down_read(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, nid, false);
if (e && !get_nat_flag(e, IS_CHECKPOINTED))
is_cp = false;
- up_read(&nm_i->nat_tree_lock);
+ f2fs_up_read(&nm_i->nat_tree_lock);
return is_cp;
}
@@ -413,13 +428,13 @@ bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
struct nat_entry *e;
bool need_update = true;
- down_read(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, ino);
+ f2fs_down_read(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, ino, false);
if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
(get_nat_flag(e, IS_CHECKPOINTED) ||
get_nat_flag(e, HAS_FSYNCED_INODE)))
need_update = false;
- up_read(&nm_i->nat_tree_lock);
+ f2fs_up_read(&nm_i->nat_tree_lock);
return need_update;
}
@@ -430,20 +445,24 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *new, *e;
+ /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
+ if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
+ return;
+
new = __alloc_nat_entry(sbi, nid, false);
if (!new)
return;
- down_write(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, nid);
+ f2fs_down_write(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, nid, false);
if (!e)
- e = __init_nat_entry(nm_i, new, ne, false);
+ e = __init_nat_entry(nm_i, new, ne, false, false);
else
f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
nat_get_blkaddr(e) !=
le32_to_cpu(ne->block_addr) ||
nat_get_version(e) != ne->version);
- up_write(&nm_i->nat_tree_lock);
+ f2fs_up_write(&nm_i->nat_tree_lock);
if (e != new)
__free_nat_entry(new);
}
@@ -454,11 +473,13 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
+ bool init_dirty = false;
- down_write(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, ni->nid);
+ f2fs_down_write(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, ni->nid, true);
if (!e) {
- e = __init_nat_entry(nm_i, new, NULL, true);
+ init_dirty = true;
+ e = __init_nat_entry(nm_i, new, NULL, true, true);
copy_node_info(&e->ni, ni);
f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
} else if (new_blkaddr == NEW_ADDR) {
@@ -494,17 +515,17 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
nat_set_blkaddr(e, new_blkaddr);
if (!__is_valid_data_blkaddr(new_blkaddr))
set_nat_flag(e, IS_CHECKPOINTED, false);
- __set_nat_cache_dirty(nm_i, e);
+ __set_nat_cache_dirty(nm_i, e, init_dirty);
/* update fsync_mark if its inode nat entry is still alive */
if (ni->nid != ni->ino)
- e = __lookup_nat_cache(nm_i, ni->ino);
+ e = __lookup_nat_cache(nm_i, ni->ino, false);
if (e) {
if (fsync_done && ni->nid == ni->ino)
set_nat_flag(e, HAS_FSYNCED_INODE, true);
set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
}
- up_write(&nm_i->nat_tree_lock);
+ f2fs_up_write(&nm_i->nat_tree_lock);
}
int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
@@ -512,7 +533,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
struct f2fs_nm_info *nm_i = NM_I(sbi);
int nr = nr_shrink;
- if (!down_write_trylock(&nm_i->nat_tree_lock))
+ if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
return 0;
spin_lock(&nm_i->nat_list_lock);
@@ -534,35 +555,40 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
}
spin_unlock(&nm_i->nat_list_lock);
- up_write(&nm_i->nat_tree_lock);
+ f2fs_up_write(&nm_i->nat_tree_lock);
return nr - nr_shrink;
}
int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
- struct node_info *ni)
+ struct node_info *ni, bool checkpoint_context)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
struct f2fs_journal *journal = curseg->journal;
nid_t start_nid = START_NID(nid);
struct f2fs_nat_block *nat_blk;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct f2fs_nat_entry ne;
struct nat_entry *e;
pgoff_t index;
- block_t blkaddr;
int i;
+ bool need_cache = true;
+ ni->flag = 0;
ni->nid = nid;
retry:
/* Check nat cache */
- down_read(&nm_i->nat_tree_lock);
- e = __lookup_nat_cache(nm_i, nid);
+ f2fs_down_read(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, nid, false);
if (e) {
ni->ino = nat_get_ino(e);
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
- up_read(&nm_i->nat_tree_lock);
+ f2fs_up_read(&nm_i->nat_tree_lock);
+ if (IS_ENABLED(CONFIG_F2FS_CHECK_FS)) {
+ need_cache = false;
+ goto sanity_check;
+ }
return 0;
}
@@ -572,10 +598,11 @@ retry:
* nat_tree_lock. Therefore, we should retry, if we failed to grab here
* while not bothering checkpoint.
*/
- if (!rwsem_is_locked(&sbi->cp_global_sem)) {
+ if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
down_read(&curseg->journal_rwsem);
- } else if (!down_read_trylock(&curseg->journal_rwsem)) {
- up_read(&nm_i->nat_tree_lock);
+ } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
+ !down_read_trylock(&curseg->journal_rwsem)) {
+ f2fs_up_read(&nm_i->nat_tree_lock);
goto retry;
}
@@ -586,39 +613,48 @@ retry:
}
up_read(&curseg->journal_rwsem);
if (i >= 0) {
- up_read(&nm_i->nat_tree_lock);
- goto cache;
+ f2fs_up_read(&nm_i->nat_tree_lock);
+ goto sanity_check;
}
/* Fill node_info from nat page */
index = current_nat_addr(sbi, nid);
- up_read(&nm_i->nat_tree_lock);
+ f2fs_up_read(&nm_i->nat_tree_lock);
- page = f2fs_get_meta_page(sbi, index);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ folio = f2fs_get_meta_folio(sbi, index);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- nat_blk = (struct f2fs_nat_block *)page_address(page);
+ nat_blk = folio_address(folio);
ne = nat_blk->entries[nid - start_nid];
node_info_from_raw_nat(ni, &ne);
- f2fs_put_page(page, 1);
-cache:
- blkaddr = le32_to_cpu(ne.block_addr);
- if (__is_valid_data_blkaddr(blkaddr) &&
- !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
- return -EFAULT;
+ f2fs_folio_put(folio, true);
+sanity_check:
+ if (__is_valid_data_blkaddr(ni->blk_addr) &&
+ !f2fs_is_valid_blkaddr(sbi, ni->blk_addr,
+ DATA_GENERIC_ENHANCE)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_err_ratelimited(sbi,
+ "f2fs_get_node_info of %pS: inconsistent nat entry, "
+ "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
+ __builtin_return_address(0),
+ ni->ino, ni->nid, ni->blk_addr, ni->version, ni->flag);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
+ return -EFSCORRUPTED;
+ }
/* cache nat entry */
- cache_nat_entry(sbi, nid, &ne);
+ if (need_cache)
+ cache_nat_entry(sbi, nid, &ne);
return 0;
}
/*
* readahead MAX_RA_NODE number of node pages.
*/
-static void f2fs_ra_node_pages(struct page *parent, int start, int n)
+static void f2fs_ra_node_pages(struct folio *parent, int start, int n)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
+ struct f2fs_sb_info *sbi = F2FS_F_SB(parent);
struct blk_plug plug;
int i, end;
nid_t nid;
@@ -627,7 +663,7 @@ static void f2fs_ra_node_pages(struct page *parent, int start, int n)
/* Then, try readahead for siblings of the desired node */
end = start + n;
- end = min(end, NIDS_PER_BLOCK);
+ end = min(end, (int)NIDS_PER_BLOCK);
for (i = start; i < end; i++) {
nid = get_nid(parent, i, false);
f2fs_ra_node_page(sbi, nid);
@@ -747,6 +783,8 @@ got:
return level;
}
+static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start);
+
/*
* Caller should call f2fs_put_dnode(dn).
* Also, it should grab and release a rwsem by calling f2fs_lock_op() and
@@ -755,8 +793,8 @@ got:
int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- struct page *npage[4];
- struct page *parent = NULL;
+ struct folio *nfolio[4];
+ struct folio *parent = NULL;
int offset[4];
unsigned int noffset[4];
nid_t nids[4];
@@ -768,31 +806,42 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
return level;
nids[0] = dn->inode->i_ino;
- npage[0] = dn->inode_page;
- if (!npage[0]) {
- npage[0] = f2fs_get_node_page(sbi, nids[0]);
- if (IS_ERR(npage[0]))
- return PTR_ERR(npage[0]);
+ if (!dn->inode_folio) {
+ nfolio[0] = f2fs_get_inode_folio(sbi, nids[0]);
+ if (IS_ERR(nfolio[0]))
+ return PTR_ERR(nfolio[0]);
+ } else {
+ nfolio[0] = dn->inode_folio;
}
/* if inline_data is set, should not report any block indices */
if (f2fs_has_inline_data(dn->inode) && index) {
err = -ENOENT;
- f2fs_put_page(npage[0], 1);
+ f2fs_folio_put(nfolio[0], true);
goto release_out;
}
- parent = npage[0];
+ parent = nfolio[0];
if (level != 0)
nids[1] = get_nid(parent, offset[0], true);
- dn->inode_page = npage[0];
- dn->inode_page_locked = true;
+ dn->inode_folio = nfolio[0];
+ dn->inode_folio_locked = true;
/* get indirect or direct nodes */
for (i = 1; i <= level; i++) {
bool done = false;
+ if (nids[i] && nids[i] == dn->inode->i_ino) {
+ err = -EFSCORRUPTED;
+ f2fs_err_ratelimited(sbi,
+ "inode mapping table is corrupted, run fsck to fix it, "
+ "ino:%lu, nid:%u, level:%d, offset:%d",
+ dn->inode->i_ino, nids[i], level, offset[level]);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ goto release_pages;
+ }
+
if (!nids[i] && mode == ALLOC_NODE) {
/* alloc new node */
if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
@@ -801,10 +850,10 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
}
dn->nid = nids[i];
- npage[i] = f2fs_new_node_page(dn, noffset[i]);
- if (IS_ERR(npage[i])) {
+ nfolio[i] = f2fs_new_node_folio(dn, noffset[i]);
+ if (IS_ERR(nfolio[i])) {
f2fs_alloc_nid_failed(sbi, nids[i]);
- err = PTR_ERR(npage[i]);
+ err = PTR_ERR(nfolio[i]);
goto release_pages;
}
@@ -812,66 +861,75 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
f2fs_alloc_nid_done(sbi, nids[i]);
done = true;
} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
- npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
- if (IS_ERR(npage[i])) {
- err = PTR_ERR(npage[i]);
+ nfolio[i] = f2fs_get_node_folio_ra(parent, offset[i - 1]);
+ if (IS_ERR(nfolio[i])) {
+ err = PTR_ERR(nfolio[i]);
goto release_pages;
}
done = true;
}
if (i == 1) {
- dn->inode_page_locked = false;
- unlock_page(parent);
+ dn->inode_folio_locked = false;
+ folio_unlock(parent);
} else {
- f2fs_put_page(parent, 1);
+ f2fs_folio_put(parent, true);
}
if (!done) {
- npage[i] = f2fs_get_node_page(sbi, nids[i]);
- if (IS_ERR(npage[i])) {
- err = PTR_ERR(npage[i]);
- f2fs_put_page(npage[0], 0);
+ nfolio[i] = f2fs_get_node_folio(sbi, nids[i],
+ NODE_TYPE_NON_INODE);
+ if (IS_ERR(nfolio[i])) {
+ err = PTR_ERR(nfolio[i]);
+ f2fs_folio_put(nfolio[0], false);
goto release_out;
}
}
if (i < level) {
- parent = npage[i];
+ parent = nfolio[i];
nids[i + 1] = get_nid(parent, offset[i], false);
}
}
dn->nid = nids[level];
dn->ofs_in_node = offset[level];
- dn->node_page = npage[level];
+ dn->node_folio = nfolio[level];
dn->data_blkaddr = f2fs_data_blkaddr(dn);
if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
f2fs_sb_has_readonly(sbi)) {
- unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
+ unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ unsigned int ofs_in_node = dn->ofs_in_node;
+ pgoff_t fofs = index;
+ unsigned int c_len;
block_t blkaddr;
+ /* should align fofs and ofs_in_node to cluster_size */
+ if (fofs % cluster_size) {
+ fofs = round_down(fofs, cluster_size);
+ ofs_in_node = round_down(ofs_in_node, cluster_size);
+ }
+
+ c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
if (!c_len)
goto out;
- blkaddr = f2fs_data_blkaddr(dn);
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node);
if (blkaddr == COMPRESS_ADDR)
- blkaddr = data_blkaddr(dn->inode, dn->node_page,
- dn->ofs_in_node + 1);
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
+ ofs_in_node + 1);
- f2fs_update_extent_tree_range_compressed(dn->inode,
- index, blkaddr,
- F2FS_I(dn->inode)->i_cluster_size,
- c_len);
+ f2fs_update_read_extent_tree_range_compressed(dn->inode,
+ fofs, blkaddr, cluster_size, c_len);
}
out:
return 0;
release_pages:
- f2fs_put_page(parent, 1);
+ f2fs_folio_put(parent, true);
if (i > 1)
- f2fs_put_page(npage[0], 0);
+ f2fs_folio_put(nfolio[0], false);
release_out:
- dn->inode_page = NULL;
- dn->node_page = NULL;
+ dn->inode_folio = NULL;
+ dn->node_folio = NULL;
if (err == -ENOENT) {
dn->cur_level = i;
dn->max_level = level;
@@ -887,12 +945,22 @@ static int truncate_node(struct dnode_of_data *dn)
int err;
pgoff_t index;
- err = f2fs_get_node_info(sbi, dn->nid, &ni);
+ err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
if (err)
return err;
+ if (ni.blk_addr != NEW_ADDR &&
+ !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC_ENHANCE)) {
+ f2fs_err_ratelimited(sbi,
+ "nat entry is corrupted, run fsck to fix it, ino:%u, "
+ "nid:%u, blkaddr:%u", ni.ino, ni.nid, ni.blk_addr);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
+ return -EFSCORRUPTED;
+ }
+
/* Deallocate node address */
- f2fs_invalidate_blocks(sbi, ni.blk_addr);
+ f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
set_node_addr(sbi, &ni, NULL_ADDR, false);
@@ -902,16 +970,16 @@ static int truncate_node(struct dnode_of_data *dn)
f2fs_inode_synced(dn->inode);
}
- clear_node_page_dirty(dn->node_page);
+ clear_node_folio_dirty(dn->node_folio);
set_sbi_flag(sbi, SBI_IS_DIRTY);
- index = dn->node_page->index;
- f2fs_put_page(dn->node_page, 1);
+ index = dn->node_folio->index;
+ f2fs_folio_put(dn->node_folio, true);
invalidate_mapping_pages(NODE_MAPPING(sbi),
index, index);
- dn->node_page = NULL;
+ dn->node_folio = NULL;
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
return 0;
@@ -919,26 +987,38 @@ static int truncate_node(struct dnode_of_data *dn)
static int truncate_dnode(struct dnode_of_data *dn)
{
- struct page *page;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ struct folio *folio;
int err;
if (dn->nid == 0)
return 1;
/* get direct node */
- page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
- if (PTR_ERR(page) == -ENOENT)
+ folio = f2fs_get_node_folio(sbi, dn->nid, NODE_TYPE_NON_INODE);
+ if (PTR_ERR(folio) == -ENOENT)
return 1;
- else if (IS_ERR(page))
- return PTR_ERR(page);
+ else if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ if (IS_INODE(folio) || ino_of_node(folio) != dn->inode->i_ino) {
+ f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
+ dn->inode->i_ino, dn->nid, ino_of_node(folio));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
+ f2fs_folio_put(folio, true);
+ return -EFSCORRUPTED;
+ }
/* Make dnode_of_data for parameter */
- dn->node_page = page;
+ dn->node_folio = folio;
dn->ofs_in_node = 0;
- f2fs_truncate_data_blocks(dn);
+ f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
err = truncate_node(dn);
- if (err)
+ if (err) {
+ f2fs_folio_put(folio, true);
return err;
+ }
return 1;
}
@@ -947,7 +1027,7 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
int ofs, int depth)
{
struct dnode_of_data rdn = *dn;
- struct page *page;
+ struct folio *folio;
struct f2fs_node *rn;
nid_t child_nid;
unsigned int child_nofs;
@@ -959,15 +1039,16 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
- page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
- if (IS_ERR(page)) {
- trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
- return PTR_ERR(page);
+ folio = f2fs_get_node_folio(F2FS_I_SB(dn->inode), dn->nid,
+ NODE_TYPE_NON_INODE);
+ if (IS_ERR(folio)) {
+ trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(folio));
+ return PTR_ERR(folio);
}
- f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
+ f2fs_ra_node_pages(folio, ofs, NIDS_PER_BLOCK);
- rn = F2FS_NODE(page);
+ rn = F2FS_NODE(folio);
if (depth < 3) {
for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
child_nid = le32_to_cpu(rn->in.nid[i]);
@@ -977,7 +1058,7 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
ret = truncate_dnode(&rdn);
if (ret < 0)
goto out_err;
- if (set_nid(page, i, 0, false))
+ if (set_nid(folio, i, 0, false))
dn->node_changed = true;
}
} else {
@@ -991,7 +1072,7 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
rdn.nid = child_nid;
ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
if (ret == (NIDS_PER_BLOCK + 1)) {
- if (set_nid(page, i, 0, false))
+ if (set_nid(folio, i, 0, false))
dn->node_changed = true;
child_nofs += ret;
} else if (ret < 0 && ret != -ENOENT) {
@@ -1003,19 +1084,19 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
if (!ofs) {
/* remove current indirect node */
- dn->node_page = page;
+ dn->node_folio = folio;
ret = truncate_node(dn);
if (ret)
goto out_err;
freed++;
} else {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
trace_f2fs_truncate_nodes_exit(dn->inode, freed);
return freed;
out_err:
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
trace_f2fs_truncate_nodes_exit(dn->inode, ret);
return ret;
}
@@ -1023,59 +1104,60 @@ out_err:
static int truncate_partial_nodes(struct dnode_of_data *dn,
struct f2fs_inode *ri, int *offset, int depth)
{
- struct page *pages[2];
+ struct folio *folios[2];
nid_t nid[3];
nid_t child_nid;
int err = 0;
int i;
int idx = depth - 2;
- nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
+ nid[0] = get_nid(dn->inode_folio, offset[0], true);
if (!nid[0])
return 0;
/* get indirect nodes in the path */
for (i = 0; i < idx + 1; i++) {
/* reference count'll be increased */
- pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
- if (IS_ERR(pages[i])) {
- err = PTR_ERR(pages[i]);
+ folios[i] = f2fs_get_node_folio(F2FS_I_SB(dn->inode), nid[i],
+ NODE_TYPE_NON_INODE);
+ if (IS_ERR(folios[i])) {
+ err = PTR_ERR(folios[i]);
idx = i - 1;
goto fail;
}
- nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
+ nid[i + 1] = get_nid(folios[i], offset[i + 1], false);
}
- f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
+ f2fs_ra_node_pages(folios[idx], offset[idx + 1], NIDS_PER_BLOCK);
/* free direct nodes linked to a partial indirect node */
for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
- child_nid = get_nid(pages[idx], i, false);
+ child_nid = get_nid(folios[idx], i, false);
if (!child_nid)
continue;
dn->nid = child_nid;
err = truncate_dnode(dn);
if (err < 0)
goto fail;
- if (set_nid(pages[idx], i, 0, false))
+ if (set_nid(folios[idx], i, 0, false))
dn->node_changed = true;
}
if (offset[idx + 1] == 0) {
- dn->node_page = pages[idx];
+ dn->node_folio = folios[idx];
dn->nid = nid[idx];
err = truncate_node(dn);
if (err)
goto fail;
} else {
- f2fs_put_page(pages[idx], 1);
+ f2fs_folio_put(folios[idx], true);
}
offset[idx]++;
offset[idx + 1] = 0;
idx--;
fail:
for (i = idx; i >= 0; i--)
- f2fs_put_page(pages[i], 1);
+ f2fs_folio_put(folios[i], true);
trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
@@ -1093,26 +1175,33 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
unsigned int nofs = 0;
struct f2fs_inode *ri;
struct dnode_of_data dn;
- struct page *page;
+ struct folio *folio;
trace_f2fs_truncate_inode_blocks_enter(inode, from);
level = get_node_path(inode, from, offset, noffset);
- if (level < 0) {
+ if (level <= 0) {
+ if (!level) {
+ level = -EFSCORRUPTED;
+ f2fs_err(sbi, "%s: inode ino=%lx has corrupted node block, from:%lu addrs:%u",
+ __func__, inode->i_ino,
+ from, ADDRS_PER_INODE(inode));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ }
trace_f2fs_truncate_inode_blocks_exit(inode, level);
return level;
}
- page = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(page)) {
- trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
- return PTR_ERR(page);
+ folio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(folio)) {
+ trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(folio));
+ return PTR_ERR(folio);
}
- set_new_dnode(&dn, inode, page, NULL, 0);
- unlock_page(page);
+ set_new_dnode(&dn, inode, folio, NULL, 0);
+ folio_unlock(folio);
- ri = F2FS_INODE(page);
+ ri = F2FS_INODE(folio);
switch (level) {
case 0:
case 1:
@@ -1141,7 +1230,7 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
skip_partial:
while (cont) {
- dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
+ dn.nid = get_nid(folio, offset[0], true);
switch (offset[0]) {
case NODE_DIR1_BLOCK:
case NODE_DIR2_BLOCK:
@@ -1161,23 +1250,30 @@ skip_partial:
default:
BUG();
}
- if (err < 0 && err != -ENOENT)
+ if (err == -ENOENT) {
+ set_sbi_flag(F2FS_F_SB(folio), SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
+ f2fs_err_ratelimited(sbi,
+ "truncate node fail, ino:%lu, nid:%u, "
+ "offset[0]:%d, offset[1]:%d, nofs:%d",
+ inode->i_ino, dn.nid, offset[0],
+ offset[1], nofs);
+ err = 0;
+ }
+ if (err < 0)
goto fail;
- if (offset[1] == 0 &&
- ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
- lock_page(page);
- BUG_ON(page->mapping != NODE_MAPPING(sbi));
- f2fs_wait_on_page_writeback(page, NODE, true, true);
- ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
- set_page_dirty(page);
- unlock_page(page);
+ if (offset[1] == 0 && get_nid(folio, offset[0], true)) {
+ folio_lock(folio);
+ BUG_ON(!is_node_folio(folio));
+ set_nid(folio, offset[0], 0, true);
+ folio_unlock(folio);
}
offset[1] = 0;
offset[0]++;
nofs += err;
}
fail:
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
trace_f2fs_truncate_inode_blocks_exit(inode, err);
return err > 0 ? 0 : err;
}
@@ -1188,20 +1284,20 @@ int f2fs_truncate_xattr_node(struct inode *inode)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t nid = F2FS_I(inode)->i_xattr_nid;
struct dnode_of_data dn;
- struct page *npage;
+ struct folio *nfolio;
int err;
if (!nid)
return 0;
- npage = f2fs_get_node_page(sbi, nid);
- if (IS_ERR(npage))
- return PTR_ERR(npage);
+ nfolio = f2fs_get_xnode_folio(sbi, nid);
+ if (IS_ERR(nfolio))
+ return PTR_ERR(nfolio);
- set_new_dnode(&dn, inode, NULL, npage, nid);
+ set_new_dnode(&dn, inode, NULL, nfolio, nid);
err = truncate_node(&dn);
if (err) {
- f2fs_put_page(npage, 1);
+ f2fs_folio_put(nfolio, true);
return err;
}
@@ -1231,8 +1327,9 @@ int f2fs_remove_inode_page(struct inode *inode)
}
/* remove potential inline_data blocks */
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode))
+ if (!IS_DEVICE_ALIASING(inode) &&
+ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)))
f2fs_truncate_data_blocks_range(&dn, 1);
/* 0 is possible, after f2fs_new_inode() has failed */
@@ -1257,41 +1354,52 @@ int f2fs_remove_inode_page(struct inode *inode)
return 0;
}
-struct page *f2fs_new_inode_page(struct inode *inode)
+struct folio *f2fs_new_inode_folio(struct inode *inode)
{
struct dnode_of_data dn;
/* allocate inode page for new inode */
set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
- /* caller should f2fs_put_page(page, 1); */
- return f2fs_new_node_page(&dn, 0);
+ /* caller should f2fs_folio_put(folio, true); */
+ return f2fs_new_node_folio(&dn, 0);
}
-struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct node_info new_ni;
- struct page *page;
+ struct folio *folio;
int err;
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return ERR_PTR(-EPERM);
- page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
- if (!page)
- return ERR_PTR(-ENOMEM);
+ folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), dn->nid, false);
+ if (IS_ERR(folio))
+ return folio;
if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
goto fail;
#ifdef CONFIG_F2FS_CHECK_FS
- err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
+ err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
if (err) {
dec_valid_node_count(sbi, dn->inode, !ofs);
goto fail;
}
- f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
+ if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
+ err = -EFSCORRUPTED;
+ dec_valid_node_count(sbi, dn->inode, !ofs);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn_ratelimited(sbi,
+ "f2fs_new_node_folio: inconsistent nat entry, "
+ "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
+ new_ni.ino, new_ni.nid, new_ni.blk_addr,
+ new_ni.version, new_ni.flag);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
+ goto fail;
+ }
#endif
new_ni.nid = dn->nid;
new_ni.ino = dn->inode->i_ino;
@@ -1300,12 +1408,12 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
new_ni.version = 0;
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
- f2fs_wait_on_page_writeback(page, NODE, true, true);
- fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
- set_cold_node(page, S_ISDIR(dn->inode->i_mode));
- if (!PageUptodate(page))
- SetPageUptodate(page);
- if (set_page_dirty(page))
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
+ fill_node_footer(folio, dn->nid, dn->inode->i_ino, ofs, true);
+ set_cold_node(folio, S_ISDIR(dn->inode->i_mode));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ if (folio_mark_dirty(folio))
dn->node_changed = true;
if (f2fs_has_xattr_block(ofs))
@@ -1313,49 +1421,47 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
if (ofs == 0)
inc_valid_inode_count(sbi);
- return page;
-
+ return folio;
fail:
- clear_node_page_dirty(page);
- f2fs_put_page(page, 1);
+ clear_node_folio_dirty(folio);
+ f2fs_folio_put(folio, true);
return ERR_PTR(err);
}
/*
* Caller should do after getting the following values.
- * 0: f2fs_put_page(page, 0)
- * LOCKED_PAGE or error: f2fs_put_page(page, 1)
+ * 0: f2fs_folio_put(folio, false)
+ * LOCKED_PAGE or error: f2fs_folio_put(folio, true)
*/
-static int read_node_page(struct page *page, int op_flags)
+static int read_node_folio(struct folio *folio, blk_opf_t op_flags)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
struct node_info ni;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = NODE,
.op = REQ_OP_READ,
.op_flags = op_flags,
- .page = page,
+ .folio = folio,
.encrypted_page = NULL,
};
int err;
- if (PageUptodate(page)) {
- if (!f2fs_inode_chksum_verify(sbi, page)) {
- ClearPageUptodate(page);
+ if (folio_test_uptodate(folio)) {
+ if (!f2fs_inode_chksum_verify(sbi, folio)) {
+ folio_clear_uptodate(folio);
return -EFSBADCRC;
}
return LOCKED_PAGE;
}
- err = f2fs_get_node_info(sbi, page->index, &ni);
+ err = f2fs_get_node_info(sbi, folio->index, &ni, false);
if (err)
return err;
/* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
- if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR) ||
- is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
- ClearPageUptodate(page);
+ if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
+ folio_clear_uptodate(folio);
return -ENOENT;
}
@@ -1364,7 +1470,7 @@ static int read_node_page(struct page *page, int op_flags)
err = f2fs_submit_page_bio(&fio);
if (!err)
- f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE);
return err;
}
@@ -1374,7 +1480,7 @@ static int read_node_page(struct page *page, int op_flags)
*/
void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
{
- struct page *apage;
+ struct folio *afolio;
int err;
if (!nid)
@@ -1382,22 +1488,59 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
if (f2fs_check_nid_range(sbi, nid))
return;
- apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
- if (apage)
+ afolio = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
+ if (afolio)
return;
- apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
- if (!apage)
+ afolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
+ if (IS_ERR(afolio))
return;
- err = read_node_page(apage, REQ_RAHEAD);
- f2fs_put_page(apage, err ? 1 : 0);
+ err = read_node_folio(afolio, REQ_RAHEAD);
+ f2fs_folio_put(afolio, err ? true : false);
}
-static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
- struct page *parent, int start)
+static int sanity_check_node_footer(struct f2fs_sb_info *sbi,
+ struct folio *folio, pgoff_t nid,
+ enum node_type ntype)
{
- struct page *page;
+ if (unlikely(nid != nid_of_node(folio)))
+ goto out_err;
+
+ switch (ntype) {
+ case NODE_TYPE_INODE:
+ if (!IS_INODE(folio))
+ goto out_err;
+ break;
+ case NODE_TYPE_XATTR:
+ if (!f2fs_has_xattr_block(ofs_of_node(folio)))
+ goto out_err;
+ break;
+ case NODE_TYPE_NON_INODE:
+ if (IS_INODE(folio))
+ goto out_err;
+ break;
+ default:
+ break;
+ }
+ if (time_to_inject(sbi, FAULT_INCONSISTENT_FOOTER))
+ goto out_err;
+ return 0;
+out_err:
+ f2fs_warn(sbi, "inconsistent node block, node_type:%d, nid:%lu, "
+ "node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
+ ntype, nid, nid_of_node(folio), ino_of_node(folio),
+ ofs_of_node(folio), cpver_of_node(folio),
+ next_blkaddr_of_node(folio));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
+ return -EFSCORRUPTED;
+}
+
+static struct folio *__get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
+ struct folio *parent, int start, enum node_type ntype)
+{
+ struct folio *folio;
int err;
if (!nid)
@@ -1405,71 +1548,77 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
if (f2fs_check_nid_range(sbi, nid))
return ERR_PTR(-EINVAL);
repeat:
- page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
- if (!page)
- return ERR_PTR(-ENOMEM);
-
- err = read_node_page(page, 0);
- if (err < 0) {
- f2fs_put_page(page, 1);
- return ERR_PTR(err);
- } else if (err == LOCKED_PAGE) {
- err = 0;
+ folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
+ if (IS_ERR(folio))
+ return folio;
+
+ err = read_node_folio(folio, 0);
+ if (err < 0)
+ goto out_put_err;
+ if (err == LOCKED_PAGE)
goto page_hit;
- }
if (parent)
f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
- f2fs_put_page(page, 1);
+ if (unlikely(!is_node_folio(folio))) {
+ f2fs_folio_put(folio, true);
goto repeat;
}
- if (unlikely(!PageUptodate(page))) {
+ if (unlikely(!folio_test_uptodate(folio))) {
err = -EIO;
- goto out_err;
+ goto out_put_err;
}
- if (!f2fs_inode_chksum_verify(sbi, page)) {
+ if (!f2fs_inode_chksum_verify(sbi, folio)) {
err = -EFSBADCRC;
goto out_err;
}
page_hit:
- if (unlikely(nid != nid_of_node(page))) {
- f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
- nid, nid_of_node(page), ino_of_node(page),
- ofs_of_node(page), cpver_of_node(page),
- next_blkaddr_of_node(page));
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- err = -EINVAL;
+ err = sanity_check_node_footer(sbi, folio, nid, ntype);
+ if (!err)
+ return folio;
out_err:
- ClearPageUptodate(page);
- f2fs_put_page(page, 1);
- return ERR_PTR(err);
- }
- return page;
+ folio_clear_uptodate(folio);
+out_put_err:
+ /* ENOENT comes from read_node_folio which is not an error. */
+ if (err != -ENOENT)
+ f2fs_handle_page_eio(sbi, folio, NODE);
+ f2fs_folio_put(folio, true);
+ return ERR_PTR(err);
+}
+
+struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
+ enum node_type node_type)
+{
+ return __get_node_folio(sbi, nid, NULL, 0, node_type);
}
-struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino)
{
- return __get_node_page(sbi, nid, NULL, 0);
+ return __get_node_folio(sbi, ino, NULL, 0, NODE_TYPE_INODE);
}
-struct page *f2fs_get_node_page_ra(struct page *parent, int start)
+struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
+ return __get_node_folio(sbi, xnid, NULL, 0, NODE_TYPE_XATTR);
+}
+
+static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start)
+{
+ struct f2fs_sb_info *sbi = F2FS_F_SB(parent);
nid_t nid = get_nid(parent, start, false);
- return __get_node_page(sbi, nid, parent, start);
+ return __get_node_folio(sbi, nid, parent, start, NODE_TYPE_REGULAR);
}
static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
{
struct inode *inode;
- struct page *page;
+ struct folio *folio;
int ret;
/* should flush inline_data before evict_inode */
@@ -1477,115 +1626,119 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
if (!inode)
return;
- page = f2fs_pagecache_get_page(inode->i_mapping, 0,
+ folio = f2fs_filemap_get_folio(inode->i_mapping, 0,
FGP_LOCK|FGP_NOWAIT, 0);
- if (!page)
+ if (IS_ERR(folio))
goto iput_out;
- if (!PageUptodate(page))
- goto page_out;
+ if (!folio_test_uptodate(folio))
+ goto folio_out;
- if (!PageDirty(page))
- goto page_out;
+ if (!folio_test_dirty(folio))
+ goto folio_out;
- if (!clear_page_dirty_for_io(page))
- goto page_out;
+ if (!folio_clear_dirty_for_io(folio))
+ goto folio_out;
- ret = f2fs_write_inline_data(inode, page);
+ ret = f2fs_write_inline_data(inode, folio);
inode_dec_dirty_pages(inode);
f2fs_remove_dirty_inode(inode);
if (ret)
- set_page_dirty(page);
-page_out:
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+folio_out:
+ f2fs_folio_put(folio, true);
iput_out:
iput(inode);
}
-static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
+static struct folio *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
{
pgoff_t index;
- struct pagevec pvec;
- struct page *last_page = NULL;
- int nr_pages;
+ struct folio_batch fbatch;
+ struct folio *last_folio = NULL;
+ int nr_folios;
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
index = 0;
- while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_DIRTY))) {
+ while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
+ (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
+ &fbatch))) {
int i;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
if (unlikely(f2fs_cp_error(sbi))) {
- f2fs_put_page(last_page, 0);
- pagevec_release(&pvec);
+ f2fs_folio_put(last_folio, false);
+ folio_batch_release(&fbatch);
return ERR_PTR(-EIO);
}
- if (!IS_DNODE(page) || !is_cold_node(page))
+ if (!IS_DNODE(folio) || !is_cold_node(folio))
continue;
- if (ino_of_node(page) != ino)
+ if (ino_of_node(folio) != ino)
continue;
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+ if (unlikely(!is_node_folio(folio))) {
continue_unlock:
- unlock_page(page);
+ folio_unlock(folio);
continue;
}
- if (ino_of_node(page) != ino)
+ if (ino_of_node(folio) != ino)
goto continue_unlock;
- if (!PageDirty(page)) {
+ if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
- if (last_page)
- f2fs_put_page(last_page, 0);
+ if (last_folio)
+ f2fs_folio_put(last_folio, false);
- get_page(page);
- last_page = page;
- unlock_page(page);
+ folio_get(folio);
+ last_folio = folio;
+ folio_unlock(folio);
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
}
- return last_page;
+ return last_folio;
}
-static int __write_node_page(struct page *page, bool atomic, bool *submitted,
+static bool __write_node_folio(struct folio *folio, bool atomic, bool *submitted,
struct writeback_control *wbc, bool do_balance,
enum iostat_type io_type, unsigned int *seq_id)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
nid_t nid;
struct node_info ni;
struct f2fs_io_info fio = {
.sbi = sbi,
- .ino = ino_of_node(page),
+ .ino = ino_of_node(folio),
.type = NODE,
.op = REQ_OP_WRITE,
.op_flags = wbc_to_write_flags(wbc),
- .page = page,
+ .folio = folio,
.encrypted_page = NULL,
- .submitted = false,
+ .submitted = 0,
.io_type = io_type,
.io_wbc = wbc,
};
unsigned int seq;
- trace_f2fs_writepage(page, NODE);
+ trace_f2fs_writepage(folio, NODE);
if (unlikely(f2fs_cp_error(sbi))) {
- ClearPageUptodate(page);
+ /* keep node pages in remount-ro mode */
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
+ goto redirty_out;
+ folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_NODES);
- unlock_page(page);
- return 0;
+ folio_unlock(folio);
+ return true;
}
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
@@ -1593,36 +1746,31 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
wbc->sync_mode == WB_SYNC_NONE &&
- IS_DNODE(page) && is_cold_node(page))
+ IS_DNODE(folio) && is_cold_node(folio))
goto redirty_out;
/* get old block addr of this node page */
- nid = nid_of_node(page);
- f2fs_bug_on(sbi, page->index != nid);
+ nid = nid_of_node(folio);
+ f2fs_bug_on(sbi, folio->index != nid);
- if (f2fs_get_node_info(sbi, nid, &ni))
+ if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
goto redirty_out;
- if (wbc->for_reclaim) {
- if (!down_read_trylock(&sbi->node_write))
- goto redirty_out;
- } else {
- down_read(&sbi->node_write);
- }
+ f2fs_down_read(&sbi->node_write);
/* This page is already truncated */
if (unlikely(ni.blk_addr == NULL_ADDR)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_NODES);
- up_read(&sbi->node_write);
- unlock_page(page);
- return 0;
+ f2fs_up_read(&sbi->node_write);
+ folio_unlock(folio);
+ return true;
}
if (__is_valid_data_blkaddr(ni.blk_addr) &&
!f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
DATA_GENERIC_ENHANCE)) {
- up_read(&sbi->node_write);
+ f2fs_up_read(&sbi->node_write);
goto redirty_out;
}
@@ -1630,27 +1778,21 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
/* should add to global list before clearing PAGECACHE status */
- if (f2fs_in_warm_node_list(sbi, page)) {
- seq = f2fs_add_fsync_node_entry(sbi, page);
+ if (f2fs_in_warm_node_list(sbi, folio)) {
+ seq = f2fs_add_fsync_node_entry(sbi, folio);
if (seq_id)
*seq_id = seq;
}
- set_page_writeback(page);
- ClearPageError(page);
+ folio_start_writeback(folio);
fio.old_blkaddr = ni.blk_addr;
f2fs_do_write_node_page(nid, &fio);
- set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
+ set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(folio));
dec_page_count(sbi, F2FS_DIRTY_NODES);
- up_read(&sbi->node_write);
+ f2fs_up_read(&sbi->node_write);
- if (wbc->for_reclaim) {
- f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
- submitted = NULL;
- }
-
- unlock_page(page);
+ folio_unlock(folio);
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_submit_merged_write(sbi, NODE);
@@ -1661,14 +1803,15 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
if (do_balance)
f2fs_balance_fs(sbi, false);
- return 0;
+ return true;
redirty_out:
- redirty_page_for_writepage(wbc, page);
- return AOP_WRITEPAGE_ACTIVATE;
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
+ return false;
}
-int f2fs_move_node_page(struct page *node_page, int gc_type)
+int f2fs_move_node_folio(struct folio *node_folio, int gc_type)
{
int err = 0;
@@ -1676,159 +1819,149 @@ int f2fs_move_node_page(struct page *node_page, int gc_type)
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 1,
- .for_reclaim = 0,
};
- f2fs_wait_on_page_writeback(node_page, NODE, true, true);
+ f2fs_folio_wait_writeback(node_folio, NODE, true, true);
- set_page_dirty(node_page);
+ folio_mark_dirty(node_folio);
- if (!clear_page_dirty_for_io(node_page)) {
+ if (!folio_clear_dirty_for_io(node_folio)) {
err = -EAGAIN;
goto out_page;
}
- if (__write_node_page(node_page, false, NULL,
- &wbc, false, FS_GC_NODE_IO, NULL)) {
+ if (!__write_node_folio(node_folio, false, NULL,
+ &wbc, false, FS_GC_NODE_IO, NULL))
err = -EAGAIN;
- unlock_page(node_page);
- }
goto release_page;
} else {
/* set page dirty and write it */
- if (!PageWriteback(node_page))
- set_page_dirty(node_page);
+ if (!folio_test_writeback(node_folio))
+ folio_mark_dirty(node_folio);
}
out_page:
- unlock_page(node_page);
+ folio_unlock(node_folio);
release_page:
- f2fs_put_page(node_page, 0);
+ f2fs_folio_put(node_folio, false);
return err;
}
-static int f2fs_write_node_page(struct page *page,
- struct writeback_control *wbc)
-{
- return __write_node_page(page, false, NULL, wbc, false,
- FS_NODE_IO, NULL);
-}
-
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic,
unsigned int *seq_id)
{
pgoff_t index;
- struct pagevec pvec;
+ struct folio_batch fbatch;
int ret = 0;
- struct page *last_page = NULL;
+ struct folio *last_folio = NULL;
bool marked = false;
nid_t ino = inode->i_ino;
- int nr_pages;
+ int nr_folios;
int nwritten = 0;
if (atomic) {
- last_page = last_fsync_dnode(sbi, ino);
- if (IS_ERR_OR_NULL(last_page))
- return PTR_ERR_OR_ZERO(last_page);
+ last_folio = last_fsync_dnode(sbi, ino);
+ if (IS_ERR_OR_NULL(last_folio))
+ return PTR_ERR_OR_ZERO(last_folio);
}
retry:
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
index = 0;
- while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_DIRTY))) {
+ while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
+ (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
+ &fbatch))) {
int i;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
bool submitted = false;
if (unlikely(f2fs_cp_error(sbi))) {
- f2fs_put_page(last_page, 0);
- pagevec_release(&pvec);
+ f2fs_folio_put(last_folio, false);
+ folio_batch_release(&fbatch);
ret = -EIO;
goto out;
}
- if (!IS_DNODE(page) || !is_cold_node(page))
+ if (!IS_DNODE(folio) || !is_cold_node(folio))
continue;
- if (ino_of_node(page) != ino)
+ if (ino_of_node(folio) != ino)
continue;
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+ if (unlikely(!is_node_folio(folio))) {
continue_unlock:
- unlock_page(page);
+ folio_unlock(folio);
continue;
}
- if (ino_of_node(page) != ino)
+ if (ino_of_node(folio) != ino)
goto continue_unlock;
- if (!PageDirty(page) && page != last_page) {
+ if (!folio_test_dirty(folio) && folio != last_folio) {
/* someone wrote it for us */
goto continue_unlock;
}
- f2fs_wait_on_page_writeback(page, NODE, true, true);
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
- set_fsync_mark(page, 0);
- set_dentry_mark(page, 0);
+ set_fsync_mark(folio, 0);
+ set_dentry_mark(folio, 0);
- if (!atomic || page == last_page) {
- set_fsync_mark(page, 1);
- if (IS_INODE(page)) {
+ if (!atomic || folio == last_folio) {
+ set_fsync_mark(folio, 1);
+ percpu_counter_inc(&sbi->rf_node_block_count);
+ if (IS_INODE(folio)) {
if (is_inode_flag_set(inode,
FI_DIRTY_INODE))
- f2fs_update_inode(inode, page);
- set_dentry_mark(page,
+ f2fs_update_inode(inode, folio);
+ set_dentry_mark(folio,
f2fs_need_dentry_mark(sbi, ino));
}
/* may be written by other thread */
- if (!PageDirty(page))
- set_page_dirty(page);
+ if (!folio_test_dirty(folio))
+ folio_mark_dirty(folio);
}
- if (!clear_page_dirty_for_io(page))
+ if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
- ret = __write_node_page(page, atomic &&
- page == last_page,
+ if (!__write_node_folio(folio, atomic &&
+ folio == last_folio,
&submitted, wbc, true,
- FS_NODE_IO, seq_id);
- if (ret) {
- unlock_page(page);
- f2fs_put_page(last_page, 0);
- break;
- } else if (submitted) {
- nwritten++;
+ FS_NODE_IO, seq_id)) {
+ f2fs_folio_put(last_folio, false);
+ folio_batch_release(&fbatch);
+ ret = -EIO;
+ goto out;
}
+ if (submitted)
+ nwritten++;
- if (page == last_page) {
- f2fs_put_page(page, 0);
+ if (folio == last_folio) {
+ f2fs_folio_put(folio, false);
+ folio_batch_release(&fbatch);
marked = true;
- break;
+ goto out;
}
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
-
- if (ret || marked)
- break;
}
- if (!ret && atomic && !marked) {
+ if (atomic && !marked) {
f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
- ino, last_page->index);
- lock_page(last_page);
- f2fs_wait_on_page_writeback(last_page, NODE, true, true);
- set_page_dirty(last_page);
- unlock_page(last_page);
+ ino, last_folio->index);
+ folio_lock(last_folio);
+ f2fs_folio_wait_writeback(last_folio, NODE, true, true);
+ folio_mark_dirty(last_folio);
+ folio_unlock(last_folio);
goto retry;
}
out:
if (nwritten)
f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
- return ret ? -EIO : 0;
+ return ret;
}
static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
@@ -1855,18 +1988,18 @@ static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
return 1;
}
-static bool flush_dirty_inode(struct page *page)
+static bool flush_dirty_inode(struct folio *folio)
{
- struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
struct inode *inode;
- nid_t ino = ino_of_node(page);
+ nid_t ino = ino_of_node(folio);
inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
if (!inode)
return false;
- f2fs_update_inode(inode, page);
- unlock_page(page);
+ f2fs_update_inode(inode, folio);
+ folio_unlock(folio);
iput(inode);
return true;
@@ -1875,44 +2008,40 @@ static bool flush_dirty_inode(struct page *page)
void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
{
pgoff_t index = 0;
- struct pagevec pvec;
- int nr_pages;
+ struct folio_batch fbatch;
+ int nr_folios;
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
- while ((nr_pages = pagevec_lookup_tag(&pvec,
- NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
+ while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
+ (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
+ &fbatch))) {
int i;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
- if (!IS_DNODE(page))
+ if (!IS_INODE(folio))
continue;
- lock_page(page);
-
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
-continue_unlock:
- unlock_page(page);
- continue;
- }
+ folio_lock(folio);
- if (!PageDirty(page)) {
- /* someone wrote it for us */
- goto continue_unlock;
- }
+ if (unlikely(!is_node_folio(folio)))
+ goto unlock;
+ if (!folio_test_dirty(folio))
+ goto unlock;
/* flush inline_data, if it's async context. */
- if (page_private_inline(page)) {
- clear_page_private_inline(page);
- unlock_page(page);
- flush_inline_data(sbi, ino_of_node(page));
+ if (folio_test_f2fs_inline(folio)) {
+ folio_clear_f2fs_inline(folio);
+ folio_unlock(folio);
+ flush_inline_data(sbi, ino_of_node(folio));
continue;
}
- unlock_page(page);
+unlock:
+ folio_unlock(folio);
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
}
}
@@ -1922,25 +2051,25 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
bool do_balance, enum iostat_type io_type)
{
pgoff_t index;
- struct pagevec pvec;
+ struct folio_batch fbatch;
int step = 0;
int nwritten = 0;
int ret = 0;
- int nr_pages, done = 0;
+ int nr_folios, done = 0;
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
next_step:
index = 0;
- while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
- NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
+ while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi),
+ &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
+ &fbatch))) {
int i;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
bool submitted = false;
- bool may_dirty = true;
/* give a priority to WB_SYNC threads */
if (atomic_read(&sbi->wb_sync_req[NODE]) &&
@@ -1955,27 +2084,27 @@ next_step:
* 1. dentry dnodes
* 2. file dnodes
*/
- if (step == 0 && IS_DNODE(page))
+ if (step == 0 && IS_DNODE(folio))
continue;
- if (step == 1 && (!IS_DNODE(page) ||
- is_cold_node(page)))
+ if (step == 1 && (!IS_DNODE(folio) ||
+ is_cold_node(folio)))
continue;
- if (step == 2 && (!IS_DNODE(page) ||
- !is_cold_node(page)))
+ if (step == 2 && (!IS_DNODE(folio) ||
+ !is_cold_node(folio)))
continue;
lock_node:
if (wbc->sync_mode == WB_SYNC_ALL)
- lock_page(page);
- else if (!trylock_page(page))
+ folio_lock(folio);
+ else if (!folio_trylock(folio))
continue;
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+ if (unlikely(!is_node_folio(folio))) {
continue_unlock:
- unlock_page(page);
+ folio_unlock(folio);
continue;
}
- if (!PageDirty(page)) {
+ if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
@@ -1985,39 +2114,38 @@ continue_unlock:
goto write_node;
/* flush inline_data */
- if (page_private_inline(page)) {
- clear_page_private_inline(page);
- unlock_page(page);
- flush_inline_data(sbi, ino_of_node(page));
+ if (folio_test_f2fs_inline(folio)) {
+ folio_clear_f2fs_inline(folio);
+ folio_unlock(folio);
+ flush_inline_data(sbi, ino_of_node(folio));
goto lock_node;
}
/* flush dirty inode */
- if (IS_INODE(page) && may_dirty) {
- may_dirty = false;
- if (flush_dirty_inode(page))
- goto lock_node;
- }
+ if (IS_INODE(folio) && flush_dirty_inode(folio))
+ goto lock_node;
write_node:
- f2fs_wait_on_page_writeback(page, NODE, true, true);
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
- if (!clear_page_dirty_for_io(page))
+ if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
- set_fsync_mark(page, 0);
- set_dentry_mark(page, 0);
+ set_fsync_mark(folio, 0);
+ set_dentry_mark(folio, 0);
- ret = __write_node_page(page, false, &submitted,
- wbc, do_balance, io_type, NULL);
- if (ret)
- unlock_page(page);
- else if (submitted)
+ if (!__write_node_folio(folio, false, &submitted,
+ wbc, do_balance, io_type, NULL)) {
+ folio_batch_release(&fbatch);
+ ret = -EIO;
+ goto out;
+ }
+ if (submitted)
nwritten++;
if (--wbc->nr_to_write == 0)
break;
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
if (wbc->nr_to_write == 0) {
@@ -2046,13 +2174,13 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
unsigned int seq_id)
{
struct fsync_node_entry *fn;
- struct page *page;
struct list_head *head = &sbi->fsync_node_list;
unsigned long flags;
unsigned int cur_seq_id = 0;
- int ret2, ret = 0;
while (seq_id && cur_seq_id < seq_id) {
+ struct folio *folio;
+
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
if (list_empty(head)) {
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
@@ -2064,25 +2192,16 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
break;
}
cur_seq_id = fn->seq_id;
- page = fn->page;
- get_page(page);
+ folio = fn->folio;
+ folio_get(folio);
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
- f2fs_wait_on_page_writeback(page, NODE, true, false);
- if (TestClearPageError(page))
- ret = -EIO;
+ f2fs_folio_wait_writeback(folio, NODE, true, false);
- put_page(page);
-
- if (ret)
- break;
+ folio_put(folio);
}
- ret2 = filemap_check_errors(NODE_MAPPING(sbi));
- if (!ret)
- ret = ret2;
-
- return ret;
+ return filemap_check_errors(NODE_MAPPING(sbi));
}
static int f2fs_write_node_pages(struct address_space *mapping,
@@ -2106,8 +2225,12 @@ static int f2fs_write_node_pages(struct address_space *mapping,
if (wbc->sync_mode == WB_SYNC_ALL)
atomic_inc(&sbi->wb_sync_req[NODE]);
- else if (atomic_read(&sbi->wb_sync_req[NODE]))
+ else if (atomic_read(&sbi->wb_sync_req[NODE])) {
+ /* to avoid potential deadlock */
+ if (current->plug)
+ blk_finish_plug(current->plug);
goto skip_write;
+ }
trace_f2fs_writepages(mapping->host, wbc, NODE);
@@ -2127,37 +2250,34 @@ skip_write:
return 0;
}
-static int f2fs_set_node_page_dirty(struct page *page)
+static bool f2fs_dirty_node_folio(struct address_space *mapping,
+ struct folio *folio)
{
- trace_f2fs_set_page_dirty(page, NODE);
+ trace_f2fs_set_page_dirty(folio, NODE);
- if (!PageUptodate(page))
- SetPageUptodate(page);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
#ifdef CONFIG_F2FS_CHECK_FS
- if (IS_INODE(page))
- f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+ if (IS_INODE(folio))
+ f2fs_inode_chksum_set(F2FS_M_SB(mapping), folio);
#endif
- if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
- inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
- set_page_private_reference(page);
- return 1;
+ if (filemap_dirty_folio(mapping, folio)) {
+ inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
+ folio_set_f2fs_reference(folio);
+ return true;
}
- return 0;
+ return false;
}
/*
* Structure of the f2fs node operations
*/
const struct address_space_operations f2fs_node_aops = {
- .writepage = f2fs_write_node_page,
.writepages = f2fs_write_node_pages,
- .set_page_dirty = f2fs_set_node_page_dirty,
- .invalidatepage = f2fs_invalidate_page,
- .releasepage = f2fs_release_page,
-#ifdef CONFIG_MIGRATION
- .migratepage = f2fs_migrate_page,
-#endif
+ .dirty_folio = f2fs_dirty_node_folio,
+ .invalidate_folio = f2fs_invalidate_folio,
+ .release_folio = f2fs_release_folio,
+ .migrate_folio = filemap_migrate_folio,
};
static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
@@ -2214,24 +2334,6 @@ static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
}
}
-bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
-{
- struct f2fs_nm_info *nm_i = NM_I(sbi);
- unsigned int i;
- bool ret = true;
-
- down_read(&nm_i->nat_tree_lock);
- for (i = 0; i < nm_i->nat_blocks; i++) {
- if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
- ret = false;
- break;
- }
- }
- up_read(&nm_i->nat_tree_lock);
-
- return ret;
-}
-
static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
bool set, bool build)
{
@@ -2263,7 +2365,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi,
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i, *e;
struct nat_entry *ne;
- int err = -EINVAL;
+ int err;
bool ret = false;
/* 0 nid should not be used */
@@ -2277,7 +2379,10 @@ static bool add_free_nid(struct f2fs_sb_info *sbi,
i->nid = nid;
i->state = FREE_NID;
- radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
+ err = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
+ f2fs_bug_on(sbi, err);
+
+ err = -EINVAL;
spin_lock(&nm_i->nid_list_lock);
@@ -2296,14 +2401,14 @@ static bool add_free_nid(struct f2fs_sb_info *sbi,
* - __lookup_nat_cache
* - f2fs_add_link
* - f2fs_init_inode_metadata
- * - f2fs_new_inode_page
- * - f2fs_new_node_page
+ * - f2fs_new_inode_folio
+ * - f2fs_new_node_folio
* - set_node_addr
* - f2fs_alloc_nid_done
* - __remove_nid_from_list(PREALLOC_NID)
* - __insert_nid_to_list(FREE_NID)
*/
- ne = __lookup_nat_cache(nm_i, nid);
+ ne = __lookup_nat_cache(nm_i, nid, false);
if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
nat_get_blkaddr(ne) != NULL_ADDR))
goto err_out;
@@ -2350,10 +2455,9 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
}
static int scan_nat_page(struct f2fs_sb_info *sbi,
- struct page *nat_page, nid_t start_nid)
+ struct f2fs_nat_block *nat_blk, nid_t start_nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct f2fs_nat_block *nat_blk = page_address(nat_page);
block_t blk_addr;
unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
int i;
@@ -2369,7 +2473,7 @@ static int scan_nat_page(struct f2fs_sb_info *sbi,
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
if (blk_addr == NEW_ADDR)
- return -EINVAL;
+ return -EFSCORRUPTED;
if (blk_addr == NULL_ADDR) {
add_free_nid(sbi, start_nid, true, true);
@@ -2410,7 +2514,7 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
unsigned int i, idx;
nid_t nid;
- down_read(&nm_i->nat_tree_lock);
+ f2fs_down_read(&nm_i->nat_tree_lock);
for (i = 0; i < nm_i->nat_blocks; i++) {
if (!test_bit_le(i, nm_i->nat_block_bitmap))
@@ -2433,7 +2537,7 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
out:
scan_curseg_cache(sbi);
- up_read(&nm_i->nat_tree_lock);
+ f2fs_up_read(&nm_i->nat_tree_lock);
}
static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
@@ -2468,23 +2572,31 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
META_NAT, true);
- down_read(&nm_i->nat_tree_lock);
+ f2fs_down_read(&nm_i->nat_tree_lock);
while (1) {
if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
nm_i->nat_block_bitmap)) {
- struct page *page = get_current_nat_page(sbi, nid);
+ struct folio *folio = get_current_nat_folio(sbi, nid);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
} else {
- ret = scan_nat_page(sbi, page, nid);
- f2fs_put_page(page, 1);
+ ret = scan_nat_page(sbi, folio_address(folio),
+ nid);
+ f2fs_folio_put(folio, true);
}
if (ret) {
- up_read(&nm_i->nat_tree_lock);
- f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
+ f2fs_up_read(&nm_i->nat_tree_lock);
+
+ if (ret == -EFSCORRUPTED) {
+ f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi,
+ ERROR_INCONSISTENT_NAT);
+ }
+
return ret;
}
}
@@ -2503,7 +2615,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
/* find free nids from current sum_pages */
scan_curseg_cache(sbi);
- up_read(&nm_i->nat_tree_lock);
+ f2fs_up_read(&nm_i->nat_tree_lock);
f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false);
@@ -2532,10 +2644,8 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i = NULL;
retry:
- if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
- f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
+ if (time_to_inject(sbi, FAULT_ALLOC_NID))
return false;
- }
spin_lock(&nm_i->nid_list_lock);
@@ -2549,6 +2659,16 @@ retry:
f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
i = list_first_entry(&nm_i->free_nid_list,
struct free_nid, list);
+
+ if (unlikely(is_invalid_nid(sbi, i->nid))) {
+ spin_unlock(&nm_i->nid_list_lock);
+ f2fs_err(sbi, "Corrupted nid %u in free_nid_list",
+ i->nid);
+ f2fs_stop_checkpoint(sbi, false,
+ STOP_CP_REASON_CORRUPTED_NID);
+ return false;
+ }
+
*nid = i->nid;
__move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
@@ -2650,18 +2770,18 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
return nr - nr_shrink;
}
-int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
+int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio)
{
void *src_addr, *dst_addr;
size_t inline_size;
- struct page *ipage;
+ struct folio *ifolio;
struct f2fs_inode *ri;
- ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
- if (IS_ERR(ipage))
- return PTR_ERR(ipage);
+ ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ifolio))
+ return PTR_ERR(ifolio);
- ri = F2FS_INODE(page);
+ ri = F2FS_INODE(folio);
if (ri->i_inline & F2FS_INLINE_XATTR) {
if (!f2fs_has_inline_xattr(inode)) {
set_inode_flag(inode, FI_INLINE_XATTR);
@@ -2675,37 +2795,37 @@ int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
goto update_inode;
}
- dst_addr = inline_xattr_addr(inode, ipage);
- src_addr = inline_xattr_addr(inode, page);
+ dst_addr = inline_xattr_addr(inode, ifolio);
+ src_addr = inline_xattr_addr(inode, folio);
inline_size = inline_xattr_size(inode);
- f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ f2fs_folio_wait_writeback(ifolio, NODE, true, true);
memcpy(dst_addr, src_addr, inline_size);
update_inode:
- f2fs_update_inode(inode, ipage);
- f2fs_put_page(ipage, 1);
+ f2fs_update_inode(inode, ifolio);
+ f2fs_folio_put(ifolio, true);
return 0;
}
-int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
+int f2fs_recover_xattr_data(struct inode *inode, struct folio *folio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
nid_t new_xnid;
struct dnode_of_data dn;
struct node_info ni;
- struct page *xpage;
+ struct folio *xfolio;
int err;
if (!prev_xnid)
goto recover_xnid;
/* 1: invalidate the previous xattr nid */
- err = f2fs_get_node_info(sbi, prev_xnid, &ni);
+ err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
if (err)
return err;
- f2fs_invalidate_blocks(sbi, ni.blk_addr);
+ f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
dec_valid_node_count(sbi, inode, false);
set_node_addr(sbi, &ni, NULL_ADDR, false);
@@ -2715,55 +2835,57 @@ recover_xnid:
return -ENOSPC;
set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
- xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
- if (IS_ERR(xpage)) {
+ xfolio = f2fs_new_node_folio(&dn, XATTR_NODE_OFFSET);
+ if (IS_ERR(xfolio)) {
f2fs_alloc_nid_failed(sbi, new_xnid);
- return PTR_ERR(xpage);
+ return PTR_ERR(xfolio);
}
f2fs_alloc_nid_done(sbi, new_xnid);
f2fs_update_inode_page(inode);
/* 3: update and set xattr node page dirty */
- memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
-
- set_page_dirty(xpage);
- f2fs_put_page(xpage, 1);
+ if (folio) {
+ memcpy(F2FS_NODE(xfolio), F2FS_NODE(folio),
+ VALID_XATTR_BLOCK_SIZE);
+ folio_mark_dirty(xfolio);
+ }
+ f2fs_folio_put(xfolio, true);
return 0;
}
-int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
+int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct folio *folio)
{
struct f2fs_inode *src, *dst;
- nid_t ino = ino_of_node(page);
+ nid_t ino = ino_of_node(folio);
struct node_info old_ni, new_ni;
- struct page *ipage;
+ struct folio *ifolio;
int err;
- err = f2fs_get_node_info(sbi, ino, &old_ni);
+ err = f2fs_get_node_info(sbi, ino, &old_ni, false);
if (err)
return err;
if (unlikely(old_ni.blk_addr != NULL_ADDR))
return -EINVAL;
retry:
- ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
- if (!ipage) {
- congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
+ ifolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), ino, false);
+ if (IS_ERR(ifolio)) {
+ memalloc_retry_wait(GFP_NOFS);
goto retry;
}
/* Should not use this inode from free nid list */
remove_free_nid(sbi, ino);
- if (!PageUptodate(ipage))
- SetPageUptodate(ipage);
- fill_node_footer(ipage, ino, ino, 0, true);
- set_cold_node(ipage, false);
+ if (!folio_test_uptodate(ifolio))
+ folio_mark_uptodate(ifolio);
+ fill_node_footer(ifolio, ino, ino, 0, true);
+ set_cold_node(ifolio, false);
- src = F2FS_INODE(page);
- dst = F2FS_INODE(ipage);
+ src = F2FS_INODE(folio);
+ dst = F2FS_INODE(ifolio);
memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
dst->i_size = 0;
@@ -2799,8 +2921,8 @@ retry:
WARN_ON(1);
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
inc_valid_inode_count(sbi);
- set_page_dirty(ipage);
- f2fs_put_page(ipage, 1);
+ folio_mark_dirty(ifolio);
+ f2fs_folio_put(ifolio, true);
return 0;
}
@@ -2813,7 +2935,7 @@ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
int i, idx, last_offset, nrpages;
/* scan the node segment */
- last_offset = sbi->blocks_per_seg;
+ last_offset = BLKS_PER_SEG(sbi);
addr = START_BLOCK(sbi, segno);
sum_entry = &sum->entries[0];
@@ -2824,17 +2946,17 @@ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
for (idx = addr; idx < addr + nrpages; idx++) {
- struct page *page = f2fs_get_tmp_page(sbi, idx);
+ struct folio *folio = f2fs_get_tmp_folio(sbi, idx);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- rn = F2FS_NODE(page);
+ rn = F2FS_NODE(folio);
sum_entry->nid = rn->footer.nid;
sum_entry->version = 0;
sum_entry->ofs_in_node = 0;
sum_entry++;
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
}
invalidate_mapping_pages(META_MAPPING(sbi), addr,
@@ -2849,6 +2971,7 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
struct f2fs_journal *journal = curseg->journal;
int i;
+ bool init_dirty;
down_write(&curseg->journal_rwsem);
for (i = 0; i < nats_in_cursum(journal); i++) {
@@ -2859,12 +2982,15 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
if (f2fs_check_nid_range(sbi, nid))
continue;
+ init_dirty = false;
+
raw_ne = nat_in_journal(journal, i);
- ne = __lookup_nat_cache(nm_i, nid);
+ ne = __lookup_nat_cache(nm_i, nid, true);
if (!ne) {
+ init_dirty = true;
ne = __alloc_nat_entry(sbi, nid, true);
- __init_nat_entry(nm_i, ne, &raw_ne, true);
+ __init_nat_entry(nm_i, ne, &raw_ne, true, true);
}
/*
@@ -2879,7 +3005,7 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
spin_unlock(&nm_i->nid_list_lock);
}
- __set_nat_cache_dirty(nm_i, ne);
+ __set_nat_cache_dirty(nm_i, ne, init_dirty);
}
update_nats_in_cursum(journal, -i);
up_write(&curseg->journal_rwsem);
@@ -2903,32 +3029,15 @@ add_out:
list_add_tail(&nes->set_list, head);
}
-static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
- unsigned int valid)
-{
- if (valid == 0) {
- __set_bit_le(nat_ofs, nm_i->empty_nat_bits);
- __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
- return;
- }
-
- __clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
- if (valid == NAT_ENTRY_PER_BLOCK)
- __set_bit_le(nat_ofs, nm_i->full_nat_bits);
- else
- __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
-}
-
-static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
- struct page *page)
+static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
+ const struct f2fs_nat_block *nat_blk)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
- struct f2fs_nat_block *nat_blk = page_address(page);
int valid = 0;
int i = 0;
- if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
+ if (!enabled_nat_bits(sbi, NULL))
return;
if (nat_index == 0) {
@@ -2939,36 +3048,17 @@ static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
valid++;
}
-
- __update_nat_bits(nm_i, nat_index, valid);
-}
-
-void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
-{
- struct f2fs_nm_info *nm_i = NM_I(sbi);
- unsigned int nat_ofs;
-
- down_read(&nm_i->nat_tree_lock);
-
- for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
- unsigned int valid = 0, nid_ofs = 0;
-
- /* handle nid zero due to it should never be used */
- if (unlikely(nat_ofs == 0)) {
- valid = 1;
- nid_ofs = 1;
- }
-
- for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) {
- if (!test_bit_le(nid_ofs,
- nm_i->free_nid_bitmap[nat_ofs]))
- valid++;
- }
-
- __update_nat_bits(nm_i, nat_ofs, valid);
+ if (valid == 0) {
+ __set_bit_le(nat_index, nm_i->empty_nat_bits);
+ __clear_bit_le(nat_index, nm_i->full_nat_bits);
+ return;
}
- up_read(&nm_i->nat_tree_lock);
+ __clear_bit_le(nat_index, nm_i->empty_nat_bits);
+ if (valid == NAT_ENTRY_PER_BLOCK)
+ __set_bit_le(nat_index, nm_i->full_nat_bits);
+ else
+ __clear_bit_le(nat_index, nm_i->full_nat_bits);
}
static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
@@ -2980,25 +3070,25 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
bool to_journal = true;
struct f2fs_nat_block *nat_blk;
struct nat_entry *ne, *cur;
- struct page *page = NULL;
+ struct folio *folio = NULL;
/*
* there are two steps to flush nat entries:
* #1, flush nat entries to journal in current hot data summary block.
* #2, flush nat entries to nat page.
*/
- if ((cpc->reason & CP_UMOUNT) ||
+ if (enabled_nat_bits(sbi, cpc) ||
!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
to_journal = false;
if (to_journal) {
down_write(&curseg->journal_rwsem);
} else {
- page = get_next_nat_page(sbi, start_nid);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ folio = get_next_nat_folio(sbi, start_nid);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- nat_blk = page_address(page);
+ nat_blk = folio_address(folio);
f2fs_bug_on(sbi, !nat_blk);
}
@@ -3034,8 +3124,8 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
if (to_journal) {
up_write(&curseg->journal_rwsem);
} else {
- update_nat_bits(sbi, start_nid, page);
- f2fs_put_page(page, 1);
+ __update_nat_bits(sbi, start_nid, nat_blk);
+ f2fs_folio_put(folio, true);
}
/* Allow dirty nats by node block allocation in write_begin */
@@ -3054,7 +3144,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
struct f2fs_journal *journal = curseg->journal;
- struct nat_entry_set *setvec[SETVEC_SIZE];
+ struct nat_entry_set *setvec[NAT_VEC_SIZE];
struct nat_entry_set *set, *tmp;
unsigned int found;
nid_t set_idx = 0;
@@ -3065,29 +3155,29 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* during unmount, let's flush nat_bits before checking
* nat_cnt[DIRTY_NAT].
*/
- if (cpc->reason & CP_UMOUNT) {
- down_write(&nm_i->nat_tree_lock);
+ if (enabled_nat_bits(sbi, cpc)) {
+ f2fs_down_write(&nm_i->nat_tree_lock);
remove_nats_in_journal(sbi);
- up_write(&nm_i->nat_tree_lock);
+ f2fs_up_write(&nm_i->nat_tree_lock);
}
if (!nm_i->nat_cnt[DIRTY_NAT])
return 0;
- down_write(&nm_i->nat_tree_lock);
+ f2fs_down_write(&nm_i->nat_tree_lock);
/*
* if there are no enough space in journal to store dirty nat
* entries, remove all entries from journal and merge them
* into nat entry set.
*/
- if (cpc->reason & CP_UMOUNT ||
+ if (enabled_nat_bits(sbi, cpc) ||
!__has_cursum_space(journal,
nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
remove_nats_in_journal(sbi);
while ((found = __gang_lookup_nat_set(nm_i,
- set_idx, SETVEC_SIZE, setvec))) {
+ set_idx, NAT_VEC_SIZE, setvec))) {
unsigned idx;
set_idx = setvec[found - 1]->set + 1;
@@ -3103,7 +3193,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
break;
}
- up_write(&nm_i->nat_tree_lock);
+ f2fs_up_write(&nm_i->nat_tree_lock);
/* Allow dirty nats by node block allocation in write_begin */
return err;
@@ -3118,40 +3208,38 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
__u64 cp_ver = cur_cp_version(ckpt);
block_t nat_bits_addr;
+ if (!enabled_nat_bits(sbi, NULL))
+ return 0;
+
nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
nm_i->nat_bits = f2fs_kvzalloc(sbi,
- nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
+ F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL);
if (!nm_i->nat_bits)
return -ENOMEM;
- nm_i->full_nat_bits = nm_i->nat_bits + 8;
- nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
-
- if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
- return 0;
-
- nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
+ nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++) {
- struct page *page;
+ struct folio *folio;
- page = f2fs_get_meta_page(sbi, nat_bits_addr++);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ folio = f2fs_get_meta_folio(sbi, nat_bits_addr++);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
- page_address(page), F2FS_BLKSIZE);
- f2fs_put_page(page, 1);
+ memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i),
+ folio_address(folio), F2FS_BLKSIZE);
+ f2fs_folio_put(folio, true);
}
cp_ver |= (cur_cp_crc(ckpt) << 32);
if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
- clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
- f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
- cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
+ disable_nat_bits(sbi, true);
return 0;
}
+ nm_i->full_nat_bits = nm_i->nat_bits + 8;
+ nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
+
f2fs_notice(sbi, "Found nat_bits in checkpoint");
return 0;
}
@@ -3162,7 +3250,7 @@ static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
unsigned int i = 0;
nid_t nid, last_nid;
- if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
+ if (!enabled_nat_bits(sbi, NULL))
return;
for (i = 0; i < nm_i->nat_blocks; i++) {
@@ -3213,6 +3301,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
+ nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
INIT_LIST_HEAD(&nm_i->free_nid_list);
@@ -3223,7 +3312,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
mutex_init(&nm_i->build_lock);
spin_lock_init(&nm_i->nid_list_lock);
- init_rwsem(&nm_i->nat_tree_lock);
+ init_f2fs_rwsem(&nm_i->nat_tree_lock);
nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
@@ -3233,6 +3322,9 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
if (!nm_i->nat_bitmap)
return -ENOMEM;
+ if (!test_opt(sbi, NAT_BITS))
+ disable_nat_bits(sbi, true);
+
err = __get_nat_bitmaps(sbi);
if (err)
return err;
@@ -3307,8 +3399,9 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i, *next_i;
- struct nat_entry *natvec[NATVEC_SIZE];
- struct nat_entry_set *setvec[SETVEC_SIZE];
+ void *vec[NAT_VEC_SIZE];
+ struct nat_entry **natvec = (struct nat_entry **)vec;
+ struct nat_entry_set **setvec = (struct nat_entry_set **)vec;
nid_t nid = 0;
unsigned int found;
@@ -3329,9 +3422,9 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
spin_unlock(&nm_i->nid_list_lock);
/* destroy nat cache */
- down_write(&nm_i->nat_tree_lock);
+ f2fs_down_write(&nm_i->nat_tree_lock);
while ((found = __gang_lookup_nat_cache(nm_i,
- nid, NATVEC_SIZE, natvec))) {
+ nid, NAT_VEC_SIZE, natvec))) {
unsigned idx;
nid = nat_get_nid(natvec[found - 1]) + 1;
@@ -3347,8 +3440,9 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
/* destroy nat set cache */
nid = 0;
+ memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE);
while ((found = __gang_lookup_nat_set(nm_i,
- nid, SETVEC_SIZE, setvec))) {
+ nid, NAT_VEC_SIZE, setvec))) {
unsigned idx;
nid = setvec[found - 1]->set + 1;
@@ -3359,7 +3453,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
kmem_cache_free(nat_entry_set_slab, setvec[idx]);
}
}
- up_write(&nm_i->nat_tree_lock);
+ f2fs_up_write(&nm_i->nat_tree_lock);
kvfree(nm_i->nat_block_bitmap);
if (nm_i->free_nid_bitmap) {
@@ -3371,10 +3465,10 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
}
kvfree(nm_i->free_nid_count);
- kvfree(nm_i->nat_bitmap);
+ kfree(nm_i->nat_bitmap);
kvfree(nm_i->nat_bits);
#ifdef CONFIG_F2FS_CHECK_FS
- kvfree(nm_i->nat_bitmap_mir);
+ kfree(nm_i->nat_bitmap_mir);
#endif
sbi->nm_info = NULL;
kfree(nm_i);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 18b98cf0465b..9cb8dcf8d417 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -31,9 +31,11 @@
/* control total # of nats */
#define DEF_NAT_CACHE_THRESHOLD 100000
+/* control total # of node writes used for roll-forward recovery */
+#define DEF_RF_NODE_BLOCKS 0
+
/* vector size for gang look-up from nat cache that consists of radix tree */
-#define NATVEC_SIZE 64
-#define SETVEC_SIZE 32
+#define NAT_VEC_SIZE 32
/* return value for read_node_page */
#define LOCKED_PAGE 1
@@ -50,6 +52,14 @@ enum {
IS_PREALLOC, /* nat entry is preallocated */
};
+/* For node type in __get_node_folio() */
+enum node_type {
+ NODE_TYPE_REGULAR,
+ NODE_TYPE_INODE,
+ NODE_TYPE_XATTR,
+ NODE_TYPE_NON_INODE,
+};
+
/*
* For node information
*/
@@ -90,17 +100,15 @@ static inline void copy_node_info(struct node_info *dst,
static inline void set_nat_flag(struct nat_entry *ne,
unsigned int type, bool set)
{
- unsigned char mask = 0x01 << type;
if (set)
- ne->ni.flag |= mask;
+ ne->ni.flag |= BIT(type);
else
- ne->ni.flag &= ~mask;
+ ne->ni.flag &= ~BIT(type);
}
static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
{
- unsigned char mask = 0x01 << type;
- return ne->ni.flag & mask;
+ return ne->ni.flag & BIT(type);
}
static inline void nat_reset_flag(struct nat_entry *ne)
@@ -143,8 +151,8 @@ enum mem_type {
NAT_ENTRIES, /* indicates the cached nat entry */
DIRTY_DENTS, /* indicates dirty dentry pages */
INO_ENTRIES, /* indicates inode entries */
- EXTENT_CACHE, /* indicates extent cache */
- INMEM_PAGES, /* indicates inmemory pages */
+ READ_EXTENT_CACHE, /* indicates read extent cache */
+ AGE_EXTENT_CACHE, /* indicates age extent cache */
DISCARD_CACHE, /* indicates memory of cached discard cmds */
COMPRESS_PAGE, /* indicates memory of cached compressed pages */
BASE_CHECK, /* check kernel status */
@@ -208,10 +216,10 @@ static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
block_addr = (pgoff_t)(nm_i->nat_blkaddr +
(block_off << 1) -
- (block_off & (sbi->blocks_per_seg - 1)));
+ (block_off & (BLKS_PER_SEG(sbi) - 1)));
if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
- block_addr += sbi->blocks_per_seg;
+ block_addr += BLKS_PER_SEG(sbi);
return block_addr;
}
@@ -222,7 +230,7 @@ static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi,
struct f2fs_nm_info *nm_i = NM_I(sbi);
block_addr -= nm_i->nat_blkaddr;
- block_addr ^= 1 << sbi->log_blocks_per_seg;
+ block_addr ^= BIT(sbi->log_blocks_per_seg);
return block_addr + nm_i->nat_blkaddr;
}
@@ -236,41 +244,41 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
#endif
}
-static inline nid_t ino_of_node(struct page *node_page)
+static inline nid_t ino_of_node(const struct folio *node_folio)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
+ struct f2fs_node *rn = F2FS_NODE(node_folio);
return le32_to_cpu(rn->footer.ino);
}
-static inline nid_t nid_of_node(struct page *node_page)
+static inline nid_t nid_of_node(const struct folio *node_folio)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
+ struct f2fs_node *rn = F2FS_NODE(node_folio);
return le32_to_cpu(rn->footer.nid);
}
-static inline unsigned int ofs_of_node(struct page *node_page)
+static inline unsigned int ofs_of_node(const struct folio *node_folio)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
+ struct f2fs_node *rn = F2FS_NODE(node_folio);
unsigned flag = le32_to_cpu(rn->footer.flag);
return flag >> OFFSET_BIT_SHIFT;
}
-static inline __u64 cpver_of_node(struct page *node_page)
+static inline __u64 cpver_of_node(const struct folio *node_folio)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
+ struct f2fs_node *rn = F2FS_NODE(node_folio);
return le64_to_cpu(rn->footer.cp_ver);
}
-static inline block_t next_blkaddr_of_node(struct page *node_page)
+static inline block_t next_blkaddr_of_node(const struct folio *node_folio)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
+ struct f2fs_node *rn = F2FS_NODE(node_folio);
return le32_to_cpu(rn->footer.next_blkaddr);
}
-static inline void fill_node_footer(struct page *page, nid_t nid,
+static inline void fill_node_footer(const struct folio *folio, nid_t nid,
nid_t ino, unsigned int ofs, bool reset)
{
- struct f2fs_node *rn = F2FS_NODE(page);
+ struct f2fs_node *rn = F2FS_NODE(folio);
unsigned int old_flag = 0;
if (reset)
@@ -286,17 +294,18 @@ static inline void fill_node_footer(struct page *page, nid_t nid,
(old_flag & OFFSET_BIT_MASK));
}
-static inline void copy_node_footer(struct page *dst, struct page *src)
+static inline void copy_node_footer(const struct folio *dst,
+ const struct folio *src)
{
struct f2fs_node *src_rn = F2FS_NODE(src);
struct f2fs_node *dst_rn = F2FS_NODE(dst);
memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
}
-static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
+static inline void fill_node_footer_blkaddr(struct folio *folio, block_t blkaddr)
{
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
- struct f2fs_node *rn = F2FS_NODE(page);
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_F_SB(folio));
+ struct f2fs_node *rn = F2FS_NODE(folio);
__u64 cp_ver = cur_cp_version(ckpt);
if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
@@ -306,19 +315,19 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
}
-static inline bool is_recoverable_dnode(struct page *page)
+static inline bool is_recoverable_dnode(const struct folio *folio)
{
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_F_SB(folio));
__u64 cp_ver = cur_cp_version(ckpt);
/* Don't care crc part, if fsck.f2fs sets it. */
if (__is_set_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG))
- return (cp_ver << 32) == (cpver_of_node(page) << 32);
+ return (cp_ver << 32) == (cpver_of_node(folio) << 32);
if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
cp_ver |= (cur_cp_crc(ckpt) << 32);
- return cp_ver == cpver_of_node(page);
+ return cp_ver == cpver_of_node(folio);
}
/*
@@ -342,9 +351,9 @@ static inline bool is_recoverable_dnode(struct page *page)
* `- indirect node ((6 + 2N) + (N - 1)(N + 1))
* `- direct node
*/
-static inline bool IS_DNODE(struct page *node_page)
+static inline bool IS_DNODE(const struct folio *node_folio)
{
- unsigned int ofs = ofs_of_node(node_page);
+ unsigned int ofs = ofs_of_node(node_folio);
if (f2fs_has_xattr_block(ofs))
return true;
@@ -360,22 +369,22 @@ static inline bool IS_DNODE(struct page *node_page)
return true;
}
-static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
+static inline int set_nid(struct folio *folio, int off, nid_t nid, bool i)
{
- struct f2fs_node *rn = F2FS_NODE(p);
+ struct f2fs_node *rn = F2FS_NODE(folio);
- f2fs_wait_on_page_writeback(p, NODE, true, true);
+ f2fs_folio_wait_writeback(folio, NODE, true, true);
if (i)
rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
else
rn->in.nid[off] = cpu_to_le32(nid);
- return set_page_dirty(p);
+ return folio_mark_dirty(folio);
}
-static inline nid_t get_nid(struct page *p, int off, bool i)
+static inline nid_t get_nid(const struct folio *folio, int off, bool i)
{
- struct f2fs_node *rn = F2FS_NODE(p);
+ struct f2fs_node *rn = F2FS_NODE(folio);
if (i)
return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
@@ -389,41 +398,41 @@ static inline nid_t get_nid(struct page *p, int off, bool i)
* - Mark cold data pages in page cache
*/
-static inline int is_node(struct page *page, int type)
+static inline int is_node(const struct folio *folio, int type)
{
- struct f2fs_node *rn = F2FS_NODE(page);
- return le32_to_cpu(rn->footer.flag) & (1 << type);
+ struct f2fs_node *rn = F2FS_NODE(folio);
+ return le32_to_cpu(rn->footer.flag) & BIT(type);
}
-#define is_cold_node(page) is_node(page, COLD_BIT_SHIFT)
-#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
-#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
+#define is_cold_node(folio) is_node(folio, COLD_BIT_SHIFT)
+#define is_fsync_dnode(folio) is_node(folio, FSYNC_BIT_SHIFT)
+#define is_dent_dnode(folio) is_node(folio, DENT_BIT_SHIFT)
-static inline void set_cold_node(struct page *page, bool is_dir)
+static inline void set_cold_node(const struct folio *folio, bool is_dir)
{
- struct f2fs_node *rn = F2FS_NODE(page);
+ struct f2fs_node *rn = F2FS_NODE(folio);
unsigned int flag = le32_to_cpu(rn->footer.flag);
if (is_dir)
- flag &= ~(0x1 << COLD_BIT_SHIFT);
+ flag &= ~BIT(COLD_BIT_SHIFT);
else
- flag |= (0x1 << COLD_BIT_SHIFT);
+ flag |= BIT(COLD_BIT_SHIFT);
rn->footer.flag = cpu_to_le32(flag);
}
-static inline void set_mark(struct page *page, int mark, int type)
+static inline void set_mark(struct folio *folio, int mark, int type)
{
- struct f2fs_node *rn = F2FS_NODE(page);
+ struct f2fs_node *rn = F2FS_NODE(folio);
unsigned int flag = le32_to_cpu(rn->footer.flag);
if (mark)
- flag |= (0x1 << type);
+ flag |= BIT(type);
else
- flag &= ~(0x1 << type);
+ flag &= ~BIT(type);
rn->footer.flag = cpu_to_le32(flag);
#ifdef CONFIG_F2FS_CHECK_FS
- f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+ f2fs_inode_chksum_set(F2FS_F_SB(folio), folio);
#endif
}
-#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT)
-#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)
+#define set_dentry_mark(folio, mark) set_mark(folio, mark, DENT_BIT_SHIFT)
+#define set_fsync_mark(folio, mark) set_mark(folio, mark, FSYNC_BIT_SHIFT)
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 6a1b4668d933..c3415ebb9f50 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -5,9 +5,10 @@
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
+#include <linux/sched/mm.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
@@ -45,16 +46,16 @@
static struct kmem_cache *fsync_entry_slab;
-#ifdef CONFIG_UNICODE
-extern struct kmem_cache *f2fs_cf_name_slab;
-#endif
-
bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
{
s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
return false;
+ if (NM_I(sbi)->max_rf_node_blocks &&
+ percpu_counter_sum_positive(&sbi->rf_node_block_count) >=
+ NM_I(sbi)->max_rf_node_blocks)
+ return false;
return true;
}
@@ -148,26 +149,23 @@ static int init_recovered_filename(const struct inode *dir,
if (err)
return err;
f2fs_hash_filename(dir, fname);
-#ifdef CONFIG_UNICODE
/* Case-sensitive match is fine for recovery */
- kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
- fname->cf_name.name = NULL;
-#endif
+ f2fs_free_casefolded_name(fname);
} else {
f2fs_hash_filename(dir, fname);
}
return 0;
}
-static int recover_dentry(struct inode *inode, struct page *ipage,
+static int recover_dentry(struct inode *inode, struct folio *ifolio,
struct list_head *dir_list)
{
- struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
+ struct f2fs_inode *raw_inode = F2FS_INODE(ifolio);
nid_t pino = le32_to_cpu(raw_inode->i_pino);
struct f2fs_dir_entry *de;
struct f2fs_filename fname;
struct qstr usr_fname;
- struct page *page;
+ struct folio *folio;
struct inode *dir, *einode;
struct fsync_inode_entry *entry;
int err = 0;
@@ -189,7 +187,7 @@ static int recover_dentry(struct inode *inode, struct page *ipage,
if (err)
goto out;
retry:
- de = __f2fs_find_entry(dir, &fname, &page);
+ de = __f2fs_find_entry(dir, &fname, &folio);
if (de && inode->i_ino == le32_to_cpu(de->ino))
goto out_put;
@@ -214,11 +212,11 @@ retry:
iput(einode);
goto out_put;
}
- f2fs_delete_entry(de, page, dir, einode);
+ f2fs_delete_entry(de, folio, dir, einode);
iput(einode);
goto retry;
- } else if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ } else if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
} else {
err = f2fs_add_dentry(dir, &fname, inode,
inode->i_ino, inode->i_mode);
@@ -228,21 +226,21 @@ retry:
goto out;
out_put:
- f2fs_put_page(page, 0);
+ f2fs_folio_put(folio, false);
out:
if (file_enc_name(inode))
name = "<encrypted>";
else
name = raw_inode->i_name;
f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
- __func__, ino_of_node(ipage), name,
+ __func__, ino_of_node(ifolio), name,
IS_ERR(dir) ? 0 : dir->i_ino, err);
return err;
}
-static int recover_quota_data(struct inode *inode, struct page *page)
+static int recover_quota_data(struct inode *inode, struct folio *folio)
{
- struct f2fs_inode *raw = F2FS_INODE(page);
+ struct f2fs_inode *raw = F2FS_INODE(folio);
struct iattr attr;
uid_t i_uid = le32_to_cpu(raw->i_uid);
gid_t i_gid = le32_to_cpu(raw->i_gid);
@@ -250,18 +248,18 @@ static int recover_quota_data(struct inode *inode, struct page *page)
memset(&attr, 0, sizeof(attr));
- attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
- attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
+ attr.ia_vfsuid = VFSUIDT_INIT(make_kuid(inode->i_sb->s_user_ns, i_uid));
+ attr.ia_vfsgid = VFSGIDT_INIT(make_kgid(inode->i_sb->s_user_ns, i_gid));
- if (!uid_eq(attr.ia_uid, inode->i_uid))
+ if (!vfsuid_eq(attr.ia_vfsuid, i_uid_into_vfsuid(&nop_mnt_idmap, inode)))
attr.ia_valid |= ATTR_UID;
- if (!gid_eq(attr.ia_gid, inode->i_gid))
+ if (!vfsgid_eq(attr.ia_vfsgid, i_gid_into_vfsgid(&nop_mnt_idmap, inode)))
attr.ia_valid |= ATTR_GID;
if (!attr.ia_valid)
return 0;
- err = dquot_transfer(inode, &attr);
+ err = dquot_transfer(&nop_mnt_idmap, inode, &attr);
if (err)
set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
return err;
@@ -279,15 +277,16 @@ static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
clear_inode_flag(inode, FI_DATA_EXIST);
}
-static int recover_inode(struct inode *inode, struct page *page)
+static int recover_inode(struct inode *inode, struct folio *folio)
{
- struct f2fs_inode *raw = F2FS_INODE(page);
+ struct f2fs_inode *raw = F2FS_INODE(folio);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
char *name;
int err;
inode->i_mode = le16_to_cpu(raw->i_mode);
- err = recover_quota_data(inode, page);
+ err = recover_quota_data(inode, folio);
if (err)
return err;
@@ -304,29 +303,28 @@ static int recover_inode(struct inode *inode, struct page *page)
i_projid = (projid_t)le32_to_cpu(raw->i_projid);
kprojid = make_kprojid(&init_user_ns, i_projid);
- if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
+ if (!projid_eq(kprojid, fi->i_projid)) {
err = f2fs_transfer_project_quota(inode,
kprojid);
if (err)
return err;
- F2FS_I(inode)->i_projid = kprojid;
+ fi->i_projid = kprojid;
}
}
}
f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
- inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
- inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
- inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
- inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
- inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
- inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
-
- F2FS_I(inode)->i_advise = raw->i_advise;
- F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
+ inode_set_atime(inode, le64_to_cpu(raw->i_atime),
+ le32_to_cpu(raw->i_atime_nsec));
+ inode_set_ctime(inode, le64_to_cpu(raw->i_ctime),
+ le32_to_cpu(raw->i_ctime_nsec));
+ inode_set_mtime(inode, le64_to_cpu(raw->i_mtime),
+ le32_to_cpu(raw->i_mtime_nsec));
+
+ fi->i_advise = raw->i_advise;
+ fi->i_flags = le32_to_cpu(raw->i_flags);
f2fs_set_inode_flags(inode);
- F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
- le16_to_cpu(raw->i_gc_failures);
+ fi->i_gc_failures = le16_to_cpu(raw->i_gc_failures);
recover_inline_flags(inode, raw);
@@ -335,99 +333,150 @@ static int recover_inode(struct inode *inode, struct page *page)
if (file_enc_name(inode))
name = "<encrypted>";
else
- name = F2FS_INODE(page)->i_name;
+ name = F2FS_INODE(folio)->i_name;
f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
- ino_of_node(page), name, raw->i_inline);
+ ino_of_node(folio), name, raw->i_inline);
+ return 0;
+}
+
+static unsigned int adjust_por_ra_blocks(struct f2fs_sb_info *sbi,
+ unsigned int ra_blocks, unsigned int blkaddr,
+ unsigned int next_blkaddr)
+{
+ if (blkaddr + 1 == next_blkaddr)
+ ra_blocks = min_t(unsigned int, RECOVERY_MAX_RA_BLOCKS,
+ ra_blocks * 2);
+ else if (next_blkaddr % BLKS_PER_SEG(sbi))
+ ra_blocks = max_t(unsigned int, RECOVERY_MIN_RA_BLOCKS,
+ ra_blocks / 2);
+ return ra_blocks;
+}
+
+/* Detect looped node chain with Floyd's cycle detection algorithm. */
+static int sanity_check_node_chain(struct f2fs_sb_info *sbi, block_t blkaddr,
+ block_t *blkaddr_fast, bool *is_detecting)
+{
+ unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS;
+ int i;
+
+ if (!*is_detecting)
+ return 0;
+
+ for (i = 0; i < 2; i++) {
+ struct folio *folio;
+
+ if (!f2fs_is_valid_blkaddr(sbi, *blkaddr_fast, META_POR)) {
+ *is_detecting = false;
+ return 0;
+ }
+
+ folio = f2fs_get_tmp_folio(sbi, *blkaddr_fast);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ if (!is_recoverable_dnode(folio)) {
+ f2fs_folio_put(folio, true);
+ *is_detecting = false;
+ return 0;
+ }
+
+ ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, *blkaddr_fast,
+ next_blkaddr_of_node(folio));
+
+ *blkaddr_fast = next_blkaddr_of_node(folio);
+ f2fs_folio_put(folio, true);
+
+ f2fs_ra_meta_pages_cond(sbi, *blkaddr_fast, ra_blocks);
+ }
+
+ if (*blkaddr_fast == blkaddr) {
+ f2fs_notice(sbi, "%s: Detect looped node chain on blkaddr:%u."
+ " Run fsck to fix it.", __func__, blkaddr);
+ return -EINVAL;
+ }
return 0;
}
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
- bool check_only)
+ bool check_only, bool *new_inode)
{
struct curseg_info *curseg;
- struct page *page = NULL;
- block_t blkaddr;
- unsigned int loop_cnt = 0;
- unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
- valid_user_blocks(sbi);
+ block_t blkaddr, blkaddr_fast;
+ bool is_detecting = true;
int err = 0;
/* get node pages in the current segment */
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
+ blkaddr_fast = blkaddr;
while (1) {
struct fsync_inode_entry *entry;
+ struct folio *folio;
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
return 0;
- page = f2fs_get_tmp_page(sbi, blkaddr);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ folio = f2fs_get_tmp_folio(sbi, blkaddr);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
break;
}
- if (!is_recoverable_dnode(page)) {
- f2fs_put_page(page, 1);
+ if (!is_recoverable_dnode(folio)) {
+ f2fs_folio_put(folio, true);
break;
}
- if (!is_fsync_dnode(page))
+ if (!is_fsync_dnode(folio))
goto next;
- entry = get_fsync_inode(head, ino_of_node(page));
+ entry = get_fsync_inode(head, ino_of_node(folio));
if (!entry) {
bool quota_inode = false;
if (!check_only &&
- IS_INODE(page) && is_dent_dnode(page)) {
- err = f2fs_recover_inode_page(sbi, page);
+ IS_INODE(folio) &&
+ is_dent_dnode(folio)) {
+ err = f2fs_recover_inode_page(sbi, folio);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
break;
}
quota_inode = true;
}
- /*
- * CP | dnode(F) | inode(DF)
- * For this case, we should not give up now.
- */
- entry = add_fsync_inode(sbi, head, ino_of_node(page),
+ entry = add_fsync_inode(sbi, head, ino_of_node(folio),
quota_inode);
if (IS_ERR(entry)) {
err = PTR_ERR(entry);
+ /*
+ * CP | dnode(F) | inode(DF)
+ * For this case, we should not give up now.
+ */
if (err == -ENOENT) {
- err = 0;
+ if (check_only)
+ *new_inode = true;
goto next;
}
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
break;
}
}
entry->blkaddr = blkaddr;
- if (IS_INODE(page) && is_dent_dnode(page))
+ if (IS_INODE(folio) && is_dent_dnode(folio))
entry->last_dentry = blkaddr;
next:
- /* sanity check in order to detect looped node chain */
- if (++loop_cnt >= free_blocks ||
- blkaddr == next_blkaddr_of_node(page)) {
- f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
- __func__, blkaddr,
- next_blkaddr_of_node(page));
- f2fs_put_page(page, 1);
- err = -EINVAL;
- break;
- }
-
/* check next segment */
- blkaddr = next_blkaddr_of_node(page);
- f2fs_put_page(page, 1);
+ blkaddr = next_blkaddr_of_node(folio);
+ f2fs_folio_put(folio, true);
- f2fs_ra_meta_pages_cond(sbi, blkaddr);
+ err = sanity_check_node_chain(sbi, blkaddr, &blkaddr_fast,
+ &is_detecting);
+ if (err)
+ break;
}
return err;
}
@@ -448,11 +497,11 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
struct f2fs_summary_block *sum_node;
struct f2fs_summary sum;
- struct page *sum_page, *node_page;
+ struct folio *sum_folio, *node_folio;
struct dnode_of_data tdn = *dn;
nid_t ino, nid;
struct inode *inode;
- unsigned int offset;
+ unsigned int offset, ofs_in_node, max_addrs;
block_t bidx;
int i;
@@ -470,35 +519,45 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
}
}
- sum_page = f2fs_get_sum_page(sbi, segno);
- if (IS_ERR(sum_page))
- return PTR_ERR(sum_page);
- sum_node = (struct f2fs_summary_block *)page_address(sum_page);
+ sum_folio = f2fs_get_sum_folio(sbi, segno);
+ if (IS_ERR(sum_folio))
+ return PTR_ERR(sum_folio);
+ sum_node = SUM_BLK_PAGE_ADDR(sum_folio, segno);
sum = sum_node->entries[blkoff];
- f2fs_put_page(sum_page, 1);
+ f2fs_folio_put(sum_folio, true);
got_it:
/* Use the locked dnode page and inode */
nid = le32_to_cpu(sum.nid);
+ ofs_in_node = le16_to_cpu(sum.ofs_in_node);
+
+ max_addrs = ADDRS_PER_PAGE(dn->node_folio, dn->inode);
+ if (ofs_in_node >= max_addrs) {
+ f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u",
+ ofs_in_node, dn->inode->i_ino, nid, max_addrs);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUMMARY);
+ return -EFSCORRUPTED;
+ }
+
if (dn->inode->i_ino == nid) {
tdn.nid = nid;
- if (!dn->inode_page_locked)
- lock_page(dn->inode_page);
- tdn.node_page = dn->inode_page;
- tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
+ if (!dn->inode_folio_locked)
+ folio_lock(dn->inode_folio);
+ tdn.node_folio = dn->inode_folio;
+ tdn.ofs_in_node = ofs_in_node;
goto truncate_out;
} else if (dn->nid == nid) {
- tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
+ tdn.ofs_in_node = ofs_in_node;
goto truncate_out;
}
/* Get the node page */
- node_page = f2fs_get_node_page(sbi, nid);
- if (IS_ERR(node_page))
- return PTR_ERR(node_page);
+ node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR);
+ if (IS_ERR(node_folio))
+ return PTR_ERR(node_folio);
- offset = ofs_of_node(node_page);
- ino = ino_of_node(node_page);
- f2fs_put_page(node_page, 1);
+ offset = ofs_of_node(node_folio);
+ ino = ino_of_node(node_folio);
+ f2fs_folio_put(node_folio, true);
if (ino != dn->inode->i_ino) {
int ret;
@@ -524,8 +583,8 @@ got_it:
* if inode page is locked, unlock temporarily, but its reference
* count keeps alive.
*/
- if (ino == dn->inode->i_ino && dn->inode_page_locked)
- unlock_page(dn->inode_page);
+ if (ino == dn->inode->i_ino && dn->inode_folio_locked)
+ folio_unlock(dn->inode_folio);
set_new_dnode(&tdn, inode, NULL, NULL, 0);
if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
@@ -538,40 +597,53 @@ got_it:
out:
if (ino != dn->inode->i_ino)
iput(inode);
- else if (dn->inode_page_locked)
- lock_page(dn->inode_page);
+ else if (dn->inode_folio_locked)
+ folio_lock(dn->inode_folio);
return 0;
truncate_out:
if (f2fs_data_blkaddr(&tdn) == blkaddr)
f2fs_truncate_data_blocks_range(&tdn, 1);
- if (dn->inode->i_ino == nid && !dn->inode_page_locked)
- unlock_page(dn->inode_page);
+ if (dn->inode->i_ino == nid && !dn->inode_folio_locked)
+ folio_unlock(dn->inode_folio);
return 0;
}
+static int f2fs_reserve_new_block_retry(struct dnode_of_data *dn)
+{
+ int i, err = 0;
+
+ for (i = DEFAULT_FAILURE_RETRY_COUNT; i > 0; i--) {
+ err = f2fs_reserve_new_block(dn);
+ if (!err)
+ break;
+ }
+
+ return err;
+}
+
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
- struct page *page)
+ struct folio *folio)
{
struct dnode_of_data dn;
struct node_info ni;
- unsigned int start, end;
+ unsigned int start = 0, end = 0, index;
int err = 0, recovered = 0;
/* step 1: recover xattr */
- if (IS_INODE(page)) {
- err = f2fs_recover_inline_xattr(inode, page);
+ if (IS_INODE(folio)) {
+ err = f2fs_recover_inline_xattr(inode, folio);
if (err)
goto out;
- } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
- err = f2fs_recover_xattr_data(inode, page);
+ } else if (f2fs_has_xattr_block(ofs_of_node(folio))) {
+ err = f2fs_recover_xattr_data(inode, folio);
if (!err)
recovered++;
goto out;
}
/* step 2: recover inline data */
- err = f2fs_recover_inline_data(inode, page);
+ err = f2fs_recover_inline_data(inode, folio);
if (err) {
if (err == 1)
err = 0;
@@ -579,41 +651,42 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
}
/* step 3: recover data indices */
- start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
- end = start + ADDRS_PER_PAGE(page, inode);
+ start = f2fs_start_bidx_of_node(ofs_of_node(folio), inode);
+ end = start + ADDRS_PER_PAGE(folio, inode);
set_new_dnode(&dn, inode, NULL, NULL, 0);
retry_dn:
err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
if (err) {
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto retry_dn;
}
goto out;
}
- f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
+ f2fs_folio_wait_writeback(dn.node_folio, NODE, true, true);
- err = f2fs_get_node_info(sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
if (err)
goto err;
- f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
+ f2fs_bug_on(sbi, ni.ino != ino_of_node(folio));
- if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
+ if (ofs_of_node(dn.node_folio) != ofs_of_node(folio)) {
f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
- inode->i_ino, ofs_of_node(dn.node_page),
- ofs_of_node(page));
+ inode->i_ino, ofs_of_node(dn.node_folio),
+ ofs_of_node(folio));
err = -EFSCORRUPTED;
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
goto err;
}
- for (; start < end; start++, dn.ofs_in_node++) {
+ for (index = start; index < end; index++, dn.ofs_in_node++) {
block_t src, dest;
src = f2fs_data_blkaddr(&dn);
- dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
+ dest = data_blkaddr(dn.inode, folio, dn.ofs_in_node);
if (__is_valid_data_blkaddr(src) &&
!f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
@@ -638,9 +711,9 @@ retry_dn:
}
if (!file_keep_isize(inode) &&
- (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
+ (i_size_read(inode) <= ((loff_t)index << PAGE_SHIFT)))
f2fs_i_size_write(inode,
- (loff_t)(start + 1) << PAGE_SHIFT);
+ (loff_t)(index + 1) << PAGE_SHIFT);
/*
* dest is reserved block, invalidate src block
@@ -648,20 +721,17 @@ retry_dn:
*/
if (dest == NEW_ADDR) {
f2fs_truncate_data_blocks_range(&dn, 1);
- f2fs_reserve_new_block(&dn);
+
+ err = f2fs_reserve_new_block_retry(&dn);
+ if (err)
+ goto err;
continue;
}
/* dest is valid block, try to recover from src to dest */
if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
-
if (src == NULL_ADDR) {
- err = f2fs_reserve_new_block(&dn);
- while (err &&
- IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
- err = f2fs_reserve_new_block(&dn);
- /* We should not get -ENOSPC */
- f2fs_bug_on(sbi, err);
+ err = f2fs_reserve_new_block_retry(&dn);
if (err)
goto err;
}
@@ -670,13 +740,20 @@ retry_prev:
err = check_index_in_prev_nodes(sbi, dest, &dn);
if (err) {
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto retry_prev;
}
goto err;
}
+ if (f2fs_is_valid_blkaddr(sbi, dest,
+ DATA_GENERIC_ENHANCE_UPDATE)) {
+ f2fs_err(sbi, "Inconsistent dest blkaddr:%u, ino:%lu, ofs:%u",
+ dest, inode->i_ino, dn.ofs_in_node);
+ err = -EFSCORRUPTED;
+ goto err;
+ }
+
/* write dummy data page */
f2fs_replace_block(sbi, &dn, src, dest,
ni.version, false, false);
@@ -684,16 +761,18 @@ retry_prev:
}
}
- copy_node_footer(dn.node_page, page);
- fill_node_footer(dn.node_page, dn.nid, ni.ino,
- ofs_of_node(page), false);
- set_page_dirty(dn.node_page);
+ copy_node_footer(dn.node_folio, folio);
+ fill_node_footer(dn.node_folio, dn.nid, ni.ino,
+ ofs_of_node(folio), false);
+ folio_mark_dirty(dn.node_folio);
err:
f2fs_put_dnode(&dn);
out:
- f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
- inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
- recovered, err);
+ f2fs_notice(sbi, "recover_data: ino = %lx, nid = %x (i_size: %s), "
+ "range (%u, %u), recovered = %d, err = %d",
+ inode->i_ino, nid_of_node(folio),
+ file_keep_isize(inode) ? "keep" : "recover",
+ start, end, recovered, err);
return err;
}
@@ -701,9 +780,17 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
struct list_head *tmp_inode_list, struct list_head *dir_list)
{
struct curseg_info *curseg;
- struct page *page = NULL;
int err = 0;
block_t blkaddr;
+ unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS;
+ unsigned int recoverable_dnode = 0;
+ unsigned int fsynced_dnode = 0;
+ unsigned int total_dnode = 0;
+ unsigned int recovered_inode = 0;
+ unsigned int recovered_dentry = 0;
+ unsigned int recovered_dnode = 0;
+
+ f2fs_notice(sbi, "do_recover_data: start to recover dnode");
/* get node pages in the current segment */
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
@@ -711,96 +798,101 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
while (1) {
struct fsync_inode_entry *entry;
+ struct folio *folio;
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
break;
- f2fs_ra_meta_pages_cond(sbi, blkaddr);
-
- page = f2fs_get_tmp_page(sbi, blkaddr);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
+ folio = f2fs_get_tmp_folio(sbi, blkaddr);
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
break;
}
- if (!is_recoverable_dnode(page)) {
- f2fs_put_page(page, 1);
+ if (!is_recoverable_dnode(folio)) {
+ f2fs_folio_put(folio, true);
break;
}
+ recoverable_dnode++;
- entry = get_fsync_inode(inode_list, ino_of_node(page));
+ entry = get_fsync_inode(inode_list, ino_of_node(folio));
if (!entry)
goto next;
+ fsynced_dnode++;
/*
* inode(x) | CP | inode(x) | dnode(F)
* In this case, we can lose the latest inode(x).
* So, call recover_inode for the inode update.
*/
- if (IS_INODE(page)) {
- err = recover_inode(entry->inode, page);
+ if (IS_INODE(folio)) {
+ err = recover_inode(entry->inode, folio);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
break;
}
+ recovered_inode++;
}
if (entry->last_dentry == blkaddr) {
- err = recover_dentry(entry->inode, page, dir_list);
+ err = recover_dentry(entry->inode, folio, dir_list);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
break;
}
+ recovered_dentry++;
}
- err = do_recover_data(sbi, entry->inode, page);
+ err = do_recover_data(sbi, entry->inode, folio);
if (err) {
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
break;
}
+ recovered_dnode++;
if (entry->blkaddr == blkaddr)
list_move_tail(&entry->list, tmp_inode_list);
next:
+ ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, blkaddr,
+ next_blkaddr_of_node(folio));
+
/* check next segment */
- blkaddr = next_blkaddr_of_node(page);
- f2fs_put_page(page, 1);
+ blkaddr = next_blkaddr_of_node(folio);
+ f2fs_folio_put(folio, true);
+
+ f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks);
+ total_dnode++;
}
if (!err)
- f2fs_allocate_new_segments(sbi);
+ err = f2fs_allocate_new_segments(sbi);
+
+ f2fs_notice(sbi, "do_recover_data: dnode: (recoverable: %u, fsynced: %u, "
+ "total: %u), recovered: (inode: %u, dentry: %u, dnode: %u), err: %d",
+ recoverable_dnode, fsynced_dnode, total_dnode, recovered_inode,
+ recovered_dentry, recovered_dnode, err);
return err;
}
int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
{
- struct list_head inode_list, tmp_inode_list;
- struct list_head dir_list;
+ LIST_HEAD(inode_list);
+ LIST_HEAD(tmp_inode_list);
+ LIST_HEAD(dir_list);
int err;
int ret = 0;
unsigned long s_flags = sbi->sb->s_flags;
bool need_writecp = false;
- bool fix_curseg_write_pointer = false;
-#ifdef CONFIG_QUOTA
- int quota_enabled;
-#endif
+ bool new_inode = false;
- if (s_flags & SB_RDONLY) {
- f2fs_info(sbi, "recover fsync data on readonly fs");
- sbi->sb->s_flags &= ~SB_RDONLY;
- }
-
-#ifdef CONFIG_QUOTA
- /* Turn on quotas so that they are updated correctly */
- quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
-#endif
+ f2fs_notice(sbi, "f2fs_recover_fsync_data: recovery fsync data, "
+ "check_only: %d", check_only);
- INIT_LIST_HEAD(&inode_list);
- INIT_LIST_HEAD(&tmp_inode_list);
- INIT_LIST_HEAD(&dir_list);
+ if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE))
+ f2fs_info(sbi, "recover fsync data on readonly fs");
/* prevent checkpoint */
- down_write(&sbi->cp_global_sem);
+ f2fs_down_write(&sbi->cp_global_sem);
/* step #1: find fsynced inode numbers */
- err = find_fsync_dnodes(sbi, &inode_list, check_only);
- if (err || list_empty(&inode_list))
+ err = find_fsync_dnodes(sbi, &inode_list, check_only, &new_inode);
+ if (err < 0 || (list_empty(&inode_list) && (!check_only || !new_inode)))
goto skip;
if (check_only) {
@@ -817,8 +909,6 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
else
f2fs_bug_on(sbi, sbi->sb->s_flags & SB_ACTIVE);
skip:
- fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
-
destroy_fsync_dnodes(&inode_list, err);
destroy_fsync_dnodes(&tmp_inode_list, err);
@@ -836,16 +926,13 @@ skip:
* and the f2fs is not read only, check and fix zoned block devices'
* write pointer consistency.
*/
- if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
- f2fs_sb_has_blkzoned(sbi)) {
- err = f2fs_fix_curseg_write_pointer(sbi);
- ret = err;
- }
+ if (!err)
+ err = f2fs_check_and_fix_write_pointer(sbi);
if (!err)
clear_sbi_flag(sbi, SBI_POR_DOING);
- up_write(&sbi->cp_global_sem);
+ f2fs_up_write(&sbi->cp_global_sem);
/* let's drop all the directory inodes for clean checkpoint */
destroy_fsync_dnodes(&dir_list, err);
@@ -857,15 +944,11 @@ skip:
struct cp_control cpc = {
.reason = CP_RECOVERY,
};
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
}
}
-#ifdef CONFIG_QUOTA
- /* Turn quotas off */
- if (quota_enabled)
- f2fs_quota_off_umount(sbi->sb);
-#endif
sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
return ret ? ret : err;
@@ -875,9 +958,7 @@ int __init f2fs_create_recovery_cache(void)
{
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
sizeof(struct fsync_inode_entry));
- if (!fsync_entry_slab)
- return -ENOMEM;
- return 0;
+ return fsync_entry_slab ? 0 : -ENOMEM;
}
void f2fs_destroy_recovery_cache(void)
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index df9ed75f0b7a..c26424f47686 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -9,6 +9,7 @@
#include <linux/f2fs_fs.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/sched/mm.h>
#include <linux/prefetch.h>
#include <linux/kthread.h>
#include <linux/swap.h>
@@ -29,7 +30,7 @@
static struct kmem_cache *discard_entry_slab;
static struct kmem_cache *discard_cmd_slab;
static struct kmem_cache *sit_entry_set_slab;
-static struct kmem_cache *inmem_entry_slab;
+static struct kmem_cache *revoke_entry_slab;
static unsigned long __reverse_ulong(unsigned char *str)
{
@@ -184,309 +185,234 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
}
-void f2fs_register_inmem_page(struct inode *inode, struct page *page)
+void f2fs_abort_atomic_write(struct inode *inode, bool clean)
{
- struct inmem_pages *new;
+ struct f2fs_inode_info *fi = F2FS_I(inode);
- set_page_private_atomic(page);
+ if (!f2fs_is_atomic_file(inode))
+ return;
- new = f2fs_kmem_cache_alloc(inmem_entry_slab,
- GFP_NOFS, true, NULL);
+ if (clean)
+ truncate_inode_pages_final(inode->i_mapping);
- /* add atomic page indices to the list */
- new->page = page;
- INIT_LIST_HEAD(&new->list);
+ release_atomic_write_cnt(inode);
+ clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
+ clear_inode_flag(inode, FI_ATOMIC_REPLACE);
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
+ clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ /*
+ * The vfs inode keeps clean during commit, but the f2fs inode
+ * doesn't. So clear the dirty state after commit and let
+ * f2fs_mark_inode_dirty_sync ensure a consistent dirty state.
+ */
+ f2fs_inode_synced(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
+ stat_dec_atomic_inode(inode);
- /* increase reference count with clean state */
- get_page(page);
- mutex_lock(&F2FS_I(inode)->inmem_lock);
- list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
- inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
- mutex_unlock(&F2FS_I(inode)->inmem_lock);
+ F2FS_I(inode)->atomic_write_task = NULL;
- trace_f2fs_register_inmem_page(page, INMEM);
+ if (clean) {
+ f2fs_i_size_write(inode, fi->original_i_size);
+ fi->original_i_size = 0;
+ }
+ /* avoid stale dirty inode during eviction */
+ sync_inode_metadata(inode, 0);
}
-static int __revoke_inmem_pages(struct inode *inode,
- struct list_head *head, bool drop, bool recover,
- bool trylock)
+static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
+ block_t new_addr, block_t *old_addr, bool recover)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inmem_pages *cur, *tmp;
- int err = 0;
+ struct dnode_of_data dn;
+ struct node_info ni;
+ int err;
- list_for_each_entry_safe(cur, tmp, head, list) {
- struct page *page = cur->page;
+retry:
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
+ if (err) {
+ if (err == -ENOMEM) {
+ memalloc_retry_wait(GFP_NOFS);
+ goto retry;
+ }
+ return err;
+ }
- if (drop)
- trace_f2fs_commit_inmem_page(page, INMEM_DROP);
+ err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
+ }
- if (trylock) {
- /*
- * to avoid deadlock in between page lock and
- * inmem_lock.
- */
- if (!trylock_page(page))
- continue;
+ if (recover) {
+ /* dn.data_blkaddr is always valid */
+ if (!__is_valid_data_blkaddr(new_addr)) {
+ if (new_addr == NULL_ADDR)
+ dec_valid_block_count(sbi, inode, 1);
+ f2fs_invalidate_blocks(sbi, dn.data_blkaddr, 1);
+ f2fs_update_data_blkaddr(&dn, new_addr);
} else {
- lock_page(page);
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+ new_addr, ni.version, true, true);
}
+ } else {
+ blkcnt_t count = 1;
- f2fs_wait_on_page_writeback(page, DATA, true, true);
-
- if (recover) {
- struct dnode_of_data dn;
- struct node_info ni;
-
- trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
-retry:
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = f2fs_get_dnode_of_data(&dn, page->index,
- LOOKUP_NODE);
- if (err) {
- if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
- cond_resched();
- goto retry;
- }
- err = -EAGAIN;
- goto next;
- }
-
- err = f2fs_get_node_info(sbi, dn.nid, &ni);
- if (err) {
- f2fs_put_dnode(&dn);
- return err;
- }
-
- if (cur->old_addr == NEW_ADDR) {
- f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
- f2fs_update_data_blkaddr(&dn, NEW_ADDR);
- } else
- f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
- cur->old_addr, ni.version, true, true);
+ err = inc_valid_block_count(sbi, inode, &count, true);
+ if (err) {
f2fs_put_dnode(&dn);
+ return err;
}
-next:
- /* we don't need to invalidate this in the sccessful status */
- if (drop || recover) {
- ClearPageUptodate(page);
- clear_page_private_gcing(page);
- }
- detach_page_private(page);
- set_page_private(page, 0);
- f2fs_put_page(page, 1);
- list_del(&cur->list);
- kmem_cache_free(inmem_entry_slab, cur);
- dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
- }
- return err;
-}
+ *old_addr = dn.data_blkaddr;
+ f2fs_truncate_data_blocks_range(&dn, 1);
+ dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
-void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
-{
- struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
- struct inode *inode;
- struct f2fs_inode_info *fi;
- unsigned int count = sbi->atomic_files;
- unsigned int looped = 0;
-next:
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (list_empty(head)) {
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
- return;
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
+ ni.version, true, false);
}
- fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
- inode = igrab(&fi->vfs_inode);
- if (inode)
- list_move_tail(&fi->inmem_ilist, head);
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
- if (inode) {
- if (gc_failure) {
- if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
- goto skip;
- }
- set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
- f2fs_drop_inmem_pages(inode);
-skip:
- iput(inode);
- }
- congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
- cond_resched();
- if (gc_failure) {
- if (++looped >= count)
- return;
- }
- goto next;
-}
-
-void f2fs_drop_inmem_pages(struct inode *inode)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_inode_info *fi = F2FS_I(inode);
-
- do {
- mutex_lock(&fi->inmem_lock);
- if (list_empty(&fi->inmem_pages)) {
- fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
-
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (!list_empty(&fi->inmem_ilist))
- list_del_init(&fi->inmem_ilist);
- if (f2fs_is_atomic_file(inode)) {
- clear_inode_flag(inode, FI_ATOMIC_FILE);
- sbi->atomic_files--;
- }
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+ f2fs_put_dnode(&dn);
- mutex_unlock(&fi->inmem_lock);
- break;
- }
- __revoke_inmem_pages(inode, &fi->inmem_pages,
- true, false, true);
- mutex_unlock(&fi->inmem_lock);
- } while (1);
+ trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
+ index, old_addr ? *old_addr : 0, new_addr, recover);
+ return 0;
}
-void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
+static void __complete_revoke_list(struct inode *inode, struct list_head *head,
+ bool revoke)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct list_head *head = &fi->inmem_pages;
- struct inmem_pages *cur = NULL;
+ struct revoke_entry *cur, *tmp;
+ pgoff_t start_index = 0;
+ bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE);
- f2fs_bug_on(sbi, !page_private_atomic(page));
+ list_for_each_entry_safe(cur, tmp, head, list) {
+ if (revoke) {
+ __replace_atomic_write_block(inode, cur->index,
+ cur->old_addr, NULL, true);
+ } else if (truncate) {
+ f2fs_truncate_hole(inode, start_index, cur->index);
+ start_index = cur->index + 1;
+ }
- mutex_lock(&fi->inmem_lock);
- list_for_each_entry(cur, head, list) {
- if (cur->page == page)
- break;
+ list_del(&cur->list);
+ kmem_cache_free(revoke_entry_slab, cur);
}
- f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
- list_del(&cur->list);
- mutex_unlock(&fi->inmem_lock);
-
- dec_page_count(sbi, F2FS_INMEM_PAGES);
- kmem_cache_free(inmem_entry_slab, cur);
-
- ClearPageUptodate(page);
- clear_page_private_atomic(page);
- f2fs_put_page(page, 0);
-
- detach_page_private(page);
- set_page_private(page, 0);
-
- trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
+ if (!revoke && truncate)
+ f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false);
}
-static int __f2fs_commit_inmem_pages(struct inode *inode)
+static int __f2fs_commit_atomic_write(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
- struct inmem_pages *cur, *tmp;
- struct f2fs_io_info fio = {
- .sbi = sbi,
- .ino = inode->i_ino,
- .type = DATA,
- .op = REQ_OP_WRITE,
- .op_flags = REQ_SYNC | REQ_PRIO,
- .io_type = FS_DATA_IO,
- };
+ struct inode *cow_inode = fi->cow_inode;
+ struct revoke_entry *new;
struct list_head revoke_list;
- bool submit_bio = false;
- int err = 0;
+ block_t blkaddr;
+ struct dnode_of_data dn;
+ pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ pgoff_t off = 0, blen, index;
+ int ret = 0, i;
INIT_LIST_HEAD(&revoke_list);
- list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
- struct page *page = cur->page;
+ while (len) {
+ blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len);
- lock_page(page);
- if (page->mapping == inode->i_mapping) {
- trace_f2fs_commit_inmem_page(page, INMEM);
+ set_new_dnode(&dn, cow_inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
+ if (ret && ret != -ENOENT) {
+ goto out;
+ } else if (ret == -ENOENT) {
+ ret = 0;
+ if (dn.max_level == 0)
+ goto out;
+ goto next;
+ }
- f2fs_wait_on_page_writeback(page, DATA, true, true);
+ blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_folio, cow_inode),
+ len);
+ index = off;
+ for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) {
+ blkaddr = f2fs_data_blkaddr(&dn);
- set_page_dirty(page);
- if (clear_page_dirty_for_io(page)) {
- inode_dec_dirty_pages(inode);
- f2fs_remove_dirty_inode(inode);
+ if (!__is_valid_data_blkaddr(blkaddr)) {
+ continue;
+ } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
+ DATA_GENERIC_ENHANCE)) {
+ f2fs_put_dnode(&dn);
+ ret = -EFSCORRUPTED;
+ goto out;
}
-retry:
- fio.page = page;
- fio.old_blkaddr = NULL_ADDR;
- fio.encrypted_page = NULL;
- fio.need_lock = LOCK_DONE;
- err = f2fs_do_write_data_page(&fio);
- if (err) {
- if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
- cond_resched();
- goto retry;
- }
- unlock_page(page);
- break;
+
+ new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS,
+ true, NULL);
+
+ ret = __replace_atomic_write_block(inode, index, blkaddr,
+ &new->old_addr, false);
+ if (ret) {
+ f2fs_put_dnode(&dn);
+ kmem_cache_free(revoke_entry_slab, new);
+ goto out;
}
- /* record old blkaddr for revoking */
- cur->old_addr = fio.old_blkaddr;
- submit_bio = true;
+
+ f2fs_update_data_blkaddr(&dn, NULL_ADDR);
+ new->index = index;
+ list_add_tail(&new->list, &revoke_list);
}
- unlock_page(page);
- list_move_tail(&cur->list, &revoke_list);
+ f2fs_put_dnode(&dn);
+next:
+ off += blen;
+ len -= blen;
}
- if (submit_bio)
- f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
+out:
+ if (time_to_inject(sbi, FAULT_TIMEOUT))
+ f2fs_io_schedule_timeout_killable(DEFAULT_FAULT_TIMEOUT);
+
+ if (ret) {
+ sbi->revoked_atomic_block += fi->atomic_write_cnt;
+ } else {
+ sbi->committed_atomic_block += fi->atomic_write_cnt;
+ set_inode_flag(inode, FI_ATOMIC_COMMITTED);
- if (err) {
/*
- * try to revoke all committed pages, but still we could fail
- * due to no memory or other reason, if that happened, EAGAIN
- * will be returned, which means in such case, transaction is
- * already not integrity, caller should use journal to do the
- * recovery or rewrite & commit last transaction. For other
- * error number, revoking was done by filesystem itself.
+ * inode may has no FI_ATOMIC_DIRTIED flag due to no write
+ * before commit.
*/
- err = __revoke_inmem_pages(inode, &revoke_list,
- false, true, false);
-
- /* drop all uncommitted pages */
- __revoke_inmem_pages(inode, &fi->inmem_pages,
- true, false, false);
- } else {
- __revoke_inmem_pages(inode, &revoke_list,
- false, false, false);
+ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
+ /* clear atomic dirty status and set vfs dirty status */
+ clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
}
- return err;
+ __complete_revoke_list(inode, &revoke_list, ret ? true : false);
+
+ return ret;
}
-int f2fs_commit_inmem_pages(struct inode *inode)
+int f2fs_commit_atomic_write(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
int err;
- f2fs_balance_fs(sbi, true);
-
- down_write(&fi->i_gc_rwsem[WRITE]);
+ err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
+ if (err)
+ return err;
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
f2fs_lock_op(sbi);
- set_inode_flag(inode, FI_ATOMIC_COMMIT);
- mutex_lock(&fi->inmem_lock);
- err = __f2fs_commit_inmem_pages(inode);
- mutex_unlock(&fi->inmem_lock);
-
- clear_inode_flag(inode, FI_ATOMIC_COMMIT);
+ err = __f2fs_commit_atomic_write(inode);
f2fs_unlock_op(sbi);
- up_write(&fi->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
return err;
}
@@ -497,49 +423,60 @@ int f2fs_commit_inmem_pages(struct inode *inode)
*/
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
- if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
- f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
- f2fs_stop_checkpoint(sbi, false);
- }
+ if (f2fs_cp_error(sbi))
+ return;
+
+ if (time_to_inject(sbi, FAULT_CHECKPOINT))
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT);
/* balance_fs_bg is able to be pending */
if (need && excess_cached_nats(sbi))
f2fs_balance_fs_bg(sbi, false);
- if (!f2fs_is_checkpoint_ready(sbi))
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
return;
/*
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
*/
- if (has_not_enough_free_secs(sbi, 0, 0)) {
- if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
- sbi->gc_thread->f2fs_gc_task) {
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
- TASK_UNINTERRUPTIBLE);
- wake_up(&sbi->gc_thread->gc_wait_queue_head);
- io_schedule();
- finish_wait(&sbi->gc_thread->fggc_wq, &wait);
- } else {
- down_write(&sbi->gc_lock);
- f2fs_gc(sbi, false, false, false, NULL_SEGNO);
- }
+ if (has_enough_free_secs(sbi, 0, 0))
+ return;
+
+ if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
+ sbi->gc_thread->f2fs_gc_task) {
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ wake_up(&sbi->gc_thread->gc_wait_queue_head);
+ io_schedule();
+ finish_wait(&sbi->gc_thread->fggc_wq, &wait);
+ } else {
+ struct f2fs_gc_control gc_control = {
+ .victim_segno = NULL_SEGNO,
+ .init_gc_type = f2fs_sb_has_blkzoned(sbi) ?
+ FG_GC : BG_GC,
+ .no_bg_gc = true,
+ .should_migrate_blocks = false,
+ .err_gc_skipped = false,
+ .nr_free_secs = 1 };
+ f2fs_down_write(&sbi->gc_lock);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
+ f2fs_gc(sbi, &gc_control);
}
}
static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
{
- int factor = rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
+ int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
- unsigned int threshold = sbi->blocks_per_seg * factor *
- DEFAULT_DIRTY_THRESHOLD;
+ unsigned int threshold =
+ SEGS_TO_BLKS(sbi, (factor * DEFAULT_DIRTY_THRESHOLD));
unsigned int global_threshold = threshold * 3 / 2;
if (dents >= threshold || qdata >= threshold ||
@@ -555,8 +492,14 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
return;
/* try to shrink extent cache when there is no enough memory */
- if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
- f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
+ if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
+ f2fs_shrink_read_extent_tree(sbi,
+ READ_EXTENT_CACHE_SHRINK_NUMBER);
+
+ /* try to shrink age extent cache when there is no enough memory */
+ if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE))
+ f2fs_shrink_age_extent_tree(sbi,
+ AGE_EXTENT_CACHE_SHRINK_NUMBER);
/* check the # of cached NAT entries */
if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
@@ -573,7 +516,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
/* there is background inflight IO or foreground operation recently */
if (is_inflight_io(sbi, REQ_TIME) ||
- (!f2fs_time_over(sbi, REQ_TIME) && rwsem_is_locked(&sbi->cp_rwsem)))
+ (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
return;
/* exceed periodical checkpoint timeout threshold */
@@ -592,13 +535,13 @@ do_sync:
mutex_lock(&sbi->flush_lock);
blk_start_plug(&plug);
- f2fs_sync_dirty_inodes(sbi, FILE_INODE);
+ f2fs_sync_dirty_inodes(sbi, FILE_INODE, false);
blk_finish_plug(&plug);
mutex_unlock(&sbi->flush_lock);
}
- f2fs_sync_fs(sbi->sb, true);
- stat_inc_bg_cp_count(sbi->stat_info);
+ stat_inc_cp_call_count(sbi, BACKGROUND);
+ f2fs_sync_fs(sbi->sb, 1);
}
static int __submit_flush_wait(struct f2fs_sb_info *sbi,
@@ -608,6 +551,8 @@ static int __submit_flush_wait(struct f2fs_sb_info *sbi,
trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
test_opt(sbi, FLUSH_MERGE), ret);
+ if (!ret)
+ f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0);
return ret;
}
@@ -738,12 +683,11 @@ int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
{
dev_t dev = sbi->sb->s_bdev->bd_dev;
struct flush_cmd_control *fcc;
- int err = 0;
if (SM_I(sbi)->fcc_info) {
fcc = SM_I(sbi)->fcc_info;
if (fcc->f2fs_issue_flush)
- return err;
+ return 0;
goto init_thread;
}
@@ -756,19 +700,19 @@ int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
init_llist_head(&fcc->issue_list);
SM_I(sbi)->fcc_info = fcc;
if (!test_opt(sbi, FLUSH_MERGE))
- return err;
+ return 0;
init_thread:
fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(fcc->f2fs_issue_flush)) {
- err = PTR_ERR(fcc->f2fs_issue_flush);
- kfree(fcc);
- SM_I(sbi)->fcc_info = NULL;
+ int err = PTR_ERR(fcc->f2fs_issue_flush);
+
+ fcc->f2fs_issue_flush = NULL;
return err;
}
- return err;
+ return 0;
}
void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
@@ -806,12 +750,12 @@ int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
do {
ret = __submit_flush_wait(sbi, FDEV(i).bdev);
if (ret)
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
+ f2fs_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
} while (ret && --count);
if (ret) {
- f2fs_stop_checkpoint(sbi, false);
+ f2fs_stop_checkpoint(sbi, false,
+ STOP_CP_REASON_FLUSH_FAIL);
break;
}
@@ -829,7 +773,7 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
/* need not be added */
- if (IS_CURSEG(sbi, segno))
+ if (is_curseg(sbi, segno))
return;
if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
@@ -851,10 +795,12 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
block_t valid_blocks =
get_valid_blocks(sbi, segno, true);
- f2fs_bug_on(sbi, unlikely(!valid_blocks ||
- valid_blocks == BLKS_PER_SEC(sbi)));
+ f2fs_bug_on(sbi,
+ (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+ !valid_blocks) ||
+ valid_blocks == CAP_BLKS_PER_SEC(sbi));
- if (!IS_CURSEC(sbi, secno))
+ if (!is_cursec(sbi, secno))
set_bit(secno, dirty_i->dirty_secmap);
}
}
@@ -888,12 +834,12 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
if (!valid_blocks ||
- valid_blocks == BLKS_PER_SEC(sbi)) {
+ valid_blocks == CAP_BLKS_PER_SEC(sbi)) {
clear_bit(secno, dirty_i->dirty_secmap);
return;
}
- if (!IS_CURSEC(sbi, secno))
+ if (!is_cursec(sbi, secno))
set_bit(secno, dirty_i->dirty_secmap);
}
}
@@ -910,7 +856,7 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
unsigned short valid_blocks, ckpt_valid_blocks;
unsigned int usable_blocks;
- if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
+ if (segno == NULL_SEGNO || is_curseg(sbi, segno))
return;
usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
@@ -943,7 +889,7 @@ void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
if (get_valid_blocks(sbi, segno, false))
continue;
- if (IS_CURSEG(sbi, segno))
+ if (is_curseg(sbi, segno))
continue;
__locate_dirty_segment(sbi, segno, PRE);
__remove_dirty_segment(sbi, segno, DIRTY);
@@ -955,7 +901,7 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
{
int ovp_hole_segs =
(overprovision_segments(sbi) - reserved_segments(sbi));
- block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
+ block_t ovp_holes = SEGS_TO_BLKS(sbi, ovp_hole_segs);
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
block_t holes[2] = {0, 0}; /* DATA and NODE */
block_t unusable;
@@ -974,7 +920,7 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
}
mutex_unlock(&dirty_i->seglist_lock);
- unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE];
+ unusable = max(holes[DATA], holes[NODE]);
if (unusable > ovp_holes)
return unusable - ovp_holes;
return 0;
@@ -984,11 +930,16 @@ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
{
int ovp_hole_segs =
(overprovision_segments(sbi) - reserved_segments(sbi));
+
+ if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
+ return 0;
if (unusable > F2FS_OPTION(sbi).unusable_cap)
return -EAGAIN;
if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
dirty_segments(sbi) > ovp_hole_segs)
return -EAGAIN;
+ if (has_not_enough_free_secs(sbi, 0, 0))
+ return -EAGAIN;
return 0;
}
@@ -1026,9 +977,9 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
INIT_LIST_HEAD(&dc->list);
dc->bdev = bdev;
- dc->lstart = lstart;
- dc->start = start;
- dc->len = len;
+ dc->di.lstart = lstart;
+ dc->di.start = start;
+ dc->di.len = len;
dc->ref = 0;
dc->state = D_PREP;
dc->queued = 0;
@@ -1043,20 +994,108 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
return dc;
}
-static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
- struct block_device *bdev, block_t lstart,
- block_t start, block_t len,
- struct rb_node *parent, struct rb_node **p,
- bool leftmost)
+static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi)
{
+#ifdef CONFIG_F2FS_CHECK_FS
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct rb_node *cur = rb_first_cached(&dcc->root), *next;
+ struct discard_cmd *cur_dc, *next_dc;
+
+ while (cur) {
+ next = rb_next(cur);
+ if (!next)
+ return true;
+
+ cur_dc = rb_entry(cur, struct discard_cmd, rb_node);
+ next_dc = rb_entry(next, struct discard_cmd, rb_node);
+
+ if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) {
+ f2fs_info(sbi, "broken discard_rbtree, "
+ "cur(%u, %u) next(%u, %u)",
+ cur_dc->di.lstart, cur_dc->di.len,
+ next_dc->di.lstart, next_dc->di.len);
+ return false;
+ }
+ cur = next;
+ }
+#endif
+ return true;
+}
+
+static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi,
+ block_t blkaddr)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct rb_node *node = dcc->root.rb_root.rb_node;
struct discard_cmd *dc;
- dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
+ while (node) {
+ dc = rb_entry(node, struct discard_cmd, rb_node);
- rb_link_node(&dc->rb_node, parent, p);
- rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
+ if (blkaddr < dc->di.lstart)
+ node = node->rb_left;
+ else if (blkaddr >= dc->di.lstart + dc->di.len)
+ node = node->rb_right;
+ else
+ return dc;
+ }
+ return NULL;
+}
+
+static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root,
+ block_t blkaddr,
+ struct discard_cmd **prev_entry,
+ struct discard_cmd **next_entry,
+ struct rb_node ***insert_p,
+ struct rb_node **insert_parent)
+{
+ struct rb_node **pnode = &root->rb_root.rb_node;
+ struct rb_node *parent = NULL, *tmp_node;
+ struct discard_cmd *dc;
+ *insert_p = NULL;
+ *insert_parent = NULL;
+ *prev_entry = NULL;
+ *next_entry = NULL;
+
+ if (RB_EMPTY_ROOT(&root->rb_root))
+ return NULL;
+
+ while (*pnode) {
+ parent = *pnode;
+ dc = rb_entry(*pnode, struct discard_cmd, rb_node);
+
+ if (blkaddr < dc->di.lstart)
+ pnode = &(*pnode)->rb_left;
+ else if (blkaddr >= dc->di.lstart + dc->di.len)
+ pnode = &(*pnode)->rb_right;
+ else
+ goto lookup_neighbors;
+ }
+
+ *insert_p = pnode;
+ *insert_parent = parent;
+
+ dc = rb_entry(parent, struct discard_cmd, rb_node);
+ tmp_node = parent;
+ if (parent && blkaddr > dc->di.lstart)
+ tmp_node = rb_next(parent);
+ *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
+
+ tmp_node = parent;
+ if (parent && blkaddr < dc->di.lstart)
+ tmp_node = rb_prev(parent);
+ *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
+ return NULL;
+
+lookup_neighbors:
+ /* lookup prev node for merging backward later */
+ tmp_node = rb_prev(&dc->rb_node);
+ *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
+
+ /* lookup next node for merging frontward later */
+ tmp_node = rb_next(&dc->rb_node);
+ *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
return dc;
}
@@ -1068,7 +1107,7 @@ static void __detach_discard_cmd(struct discard_cmd_control *dcc,
list_del(&dc->list);
rb_erase_cached(&dc->rb_node, &dcc->root);
- dcc->undiscard_blks -= dc->len;
+ dcc->undiscard_blks -= dc->di.len;
kmem_cache_free(discard_cmd_slab, dc);
@@ -1081,7 +1120,7 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
unsigned long flags;
- trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
+ trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len);
spin_lock_irqsave(&dc->lock, flags);
if (dc->bio_ref) {
@@ -1096,10 +1135,9 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
dc->error = 0;
if (dc->error)
- printk_ratelimited(
- "%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
- KERN_INFO, sbi->sb->s_id,
- dc->lstart, dc->start, dc->len, dc->error);
+ f2fs_info_ratelimited(sbi,
+ "Issue discard(%u, %u, %u) failed, ret: %d",
+ dc->di.lstart, dc->di.start, dc->di.len, dc->error);
__detach_discard_cmd(dcc, dc);
}
@@ -1127,8 +1165,7 @@ static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
struct seg_entry *sentry;
unsigned int segno;
block_t blk = start;
- unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
- unsigned long *map;
+ unsigned long offset, size, *map;
while (blk < end) {
segno = GET_SEGNO(sbi, blk);
@@ -1138,7 +1175,7 @@ static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
if (end < START_BLOCK(sbi, segno + 1))
size = GET_BLKOFF_FROM_SEG0(sbi, end);
else
- size = max_blocks;
+ size = BLKS_PER_SEG(sbi);
map = (unsigned long *)(sentry->cur_valid_map);
offset = __find_rev_next_bit(map, size, offset);
f2fs_bug_on(sbi, offset != size);
@@ -1159,34 +1196,37 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
dpolicy->ordered = false;
dpolicy->granularity = granularity;
- dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
- dpolicy->io_aware_gran = MAX_PLIST_NUM;
+ dpolicy->max_requests = dcc->max_discard_request;
+ dpolicy->io_aware_gran = dcc->discard_io_aware_gran;
dpolicy->timeout = false;
if (discard_type == DPOLICY_BG) {
- dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
- dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
- dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
- dpolicy->io_aware = true;
+ dpolicy->min_interval = dcc->min_discard_issue_time;
+ dpolicy->mid_interval = dcc->mid_discard_issue_time;
+ dpolicy->max_interval = dcc->max_discard_issue_time;
+ if (dcc->discard_io_aware == DPOLICY_IO_AWARE_ENABLE)
+ dpolicy->io_aware = true;
+ else if (dcc->discard_io_aware == DPOLICY_IO_AWARE_DISABLE)
+ dpolicy->io_aware = false;
dpolicy->sync = false;
dpolicy->ordered = true;
- if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
- dpolicy->granularity = 1;
+ if (utilization(sbi) > dcc->discard_urgent_util) {
+ dpolicy->granularity = MIN_DISCARD_GRANULARITY;
if (atomic_read(&dcc->discard_cmd_cnt))
dpolicy->max_interval =
- DEF_MIN_DISCARD_ISSUE_TIME;
+ dcc->min_discard_issue_time;
}
} else if (discard_type == DPOLICY_FORCE) {
- dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
- dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
- dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
+ dpolicy->min_interval = dcc->min_discard_issue_time;
+ dpolicy->mid_interval = dcc->mid_discard_issue_time;
+ dpolicy->max_interval = dcc->max_discard_issue_time;
dpolicy->io_aware = false;
} else if (discard_type == DPOLICY_FSTRIM) {
dpolicy->io_aware = false;
} else if (discard_type == DPOLICY_UMOUNT) {
dpolicy->io_aware = false;
/* we need to issue all to keep CP_TRIMMED_FLAG */
- dpolicy->granularity = 1;
+ dpolicy->granularity = MIN_DISCARD_GRANULARITY;
dpolicy->timeout = true;
}
}
@@ -1194,20 +1234,57 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi,
+ struct discard_cmd *dc, blk_opf_t flag,
+ struct list_head *wait_list,
+ unsigned int *issued)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct block_device *bdev = dc->bdev;
+ struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS);
+ unsigned long flags;
+
+ trace_f2fs_issue_reset_zone(bdev, dc->di.start);
+
+ spin_lock_irqsave(&dc->lock, flags);
+ dc->state = D_SUBMIT;
+ dc->bio_ref++;
+ spin_unlock_irqrestore(&dc->lock, flags);
+
+ if (issued)
+ (*issued)++;
+
+ atomic_inc(&dcc->queued_discard);
+ dc->queued++;
+ list_move_tail(&dc->list, wait_list);
+
+ /* sanity check on discard range */
+ __check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len);
+
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start);
+ bio->bi_private = dc;
+ bio->bi_end_io = f2fs_submit_discard_endio;
+ submit_bio(bio);
+
+ atomic_inc(&dcc->issued_discard);
+ f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE);
+}
+#endif
+
/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
- struct discard_policy *dpolicy,
- struct discard_cmd *dc,
- unsigned int *issued)
+ struct discard_policy *dpolicy,
+ struct discard_cmd *dc, int *issued)
{
struct block_device *bdev = dc->bdev;
- struct request_queue *q = bdev_get_queue(bdev);
unsigned int max_discard_blocks =
- SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
+ SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
&(dcc->fstrim_list) : &(dcc->wait_list);
- int flag = dpolicy->sync ? REQ_SYNC : 0;
+ blk_opf_t flag = dpolicy->sync ? REQ_SYNC : 0;
block_t lstart, start, len, total_len;
int err = 0;
@@ -1217,14 +1294,38 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
return 0;
- trace_f2fs_issue_discard(bdev, dc->start, dc->len);
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
+ int devi = f2fs_bdev_index(sbi, bdev);
+
+ if (devi < 0)
+ return -EINVAL;
+
+ if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
+ __submit_zone_reset_cmd(sbi, dc, flag,
+ wait_list, issued);
+ return 0;
+ }
+ }
+#endif
+
+ /*
+ * stop issuing discard for any of below cases:
+ * 1. device is conventional zone, but it doesn't support discard.
+ * 2. device is regulare device, after snapshot it doesn't support
+ * discard.
+ */
+ if (!bdev_max_discard_sectors(bdev))
+ return -EOPNOTSUPP;
+
+ trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len);
- lstart = dc->lstart;
- start = dc->start;
- len = dc->len;
+ lstart = dc->di.lstart;
+ start = dc->di.start;
+ len = dc->di.len;
total_len = len;
- dc->len = 0;
+ dc->di.len = 0;
while (total_len && *issued < dpolicy->max_requests && !err) {
struct bio *bio = NULL;
@@ -1240,19 +1341,11 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
if (*issued == dpolicy->max_requests)
last = true;
- dc->len += len;
+ dc->di.len += len;
+ err = 0;
if (time_to_inject(sbi, FAULT_DISCARD)) {
- f2fs_show_injection_info(sbi, FAULT_DISCARD);
err = -EIO;
- goto submit;
- }
- err = __blkdev_issue_discard(bdev,
- SECTOR_FROM_BLOCK(start),
- SECTOR_FROM_BLOCK(len),
- GFP_NOFS, 0, &bio);
-submit:
- if (err) {
spin_lock_irqsave(&dc->lock, flags);
if (dc->state == D_PARTIAL)
dc->state = D_SUBMIT;
@@ -1261,6 +1354,8 @@ submit:
break;
}
+ __blkdev_issue_discard(bdev, SECTOR_FROM_BLOCK(start),
+ SECTOR_FROM_BLOCK(len), GFP_NOFS, &bio);
f2fs_bug_on(sbi, !bio);
/*
@@ -1289,7 +1384,7 @@ submit:
atomic_inc(&dcc->issued_discard);
- f2fs_update_iostat(sbi, FS_DISCARD, 1);
+ f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE);
lstart += len;
start += len;
@@ -1304,34 +1399,42 @@ submit:
return err;
}
-static void __insert_discard_tree(struct f2fs_sb_info *sbi,
+static void __insert_discard_cmd(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
- block_t start, block_t len,
- struct rb_node **insert_p,
- struct rb_node *insert_parent)
+ block_t start, block_t len)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
- struct rb_node **p;
+ struct rb_node **p = &dcc->root.rb_root.rb_node;
struct rb_node *parent = NULL;
+ struct discard_cmd *dc;
bool leftmost = true;
- if (insert_p && insert_parent) {
- parent = insert_parent;
- p = insert_p;
- goto do_insert;
+ /* look up rb tree to find parent node */
+ while (*p) {
+ parent = *p;
+ dc = rb_entry(parent, struct discard_cmd, rb_node);
+
+ if (lstart < dc->di.lstart) {
+ p = &(*p)->rb_left;
+ } else if (lstart >= dc->di.lstart + dc->di.len) {
+ p = &(*p)->rb_right;
+ leftmost = false;
+ } else {
+ /* Let's skip to add, if exists */
+ return;
+ }
}
- p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
- lstart, &leftmost);
-do_insert:
- __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
- p, leftmost);
+ dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
+
+ rb_link_node(&dc->rb_node, parent, p);
+ rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
}
static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
struct discard_cmd *dc)
{
- list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
+ list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]);
}
static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
@@ -1341,7 +1444,7 @@ static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_info di = dc->di;
bool modified = false;
- if (dc->state == D_DONE || dc->len == 1) {
+ if (dc->state == D_DONE || dc->di.len == 1) {
__remove_discard_cmd(sbi, dc);
return;
}
@@ -1349,23 +1452,22 @@ static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
dcc->undiscard_blks -= di.len;
if (blkaddr > di.lstart) {
- dc->len = blkaddr - dc->lstart;
- dcc->undiscard_blks += dc->len;
+ dc->di.len = blkaddr - dc->di.lstart;
+ dcc->undiscard_blks += dc->di.len;
__relocate_discard_cmd(dcc, dc);
modified = true;
}
if (blkaddr < di.lstart + di.len - 1) {
if (modified) {
- __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
+ __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1,
di.start + blkaddr + 1 - di.lstart,
- di.lstart + di.len - 1 - blkaddr,
- NULL, NULL);
+ di.lstart + di.len - 1 - blkaddr);
} else {
- dc->lstart++;
- dc->len--;
- dc->start++;
- dcc->undiscard_blks += dc->len;
+ dc->di.lstart++;
+ dc->di.len--;
+ dc->di.start++;
+ dcc->undiscard_blks += dc->di.len;
__relocate_discard_cmd(dcc, dc);
}
}
@@ -1380,22 +1482,18 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
struct discard_cmd *dc;
struct discard_info di = {0};
struct rb_node **insert_p = NULL, *insert_parent = NULL;
- struct request_queue *q = bdev_get_queue(bdev);
unsigned int max_discard_blocks =
- SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
+ SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
block_t end = lstart + len;
- dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
- NULL, lstart,
- (struct rb_entry **)&prev_dc,
- (struct rb_entry **)&next_dc,
- &insert_p, &insert_parent, true, NULL);
+ dc = __lookup_discard_cmd_ret(&dcc->root, lstart,
+ &prev_dc, &next_dc, &insert_p, &insert_parent);
if (dc)
prev_dc = dc;
if (!prev_dc) {
di.lstart = lstart;
- di.len = next_dc ? next_dc->lstart - lstart : len;
+ di.len = next_dc ? next_dc->di.lstart - lstart : len;
di.len = min(di.len, len);
di.start = start;
}
@@ -1406,16 +1504,16 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
struct discard_cmd *tdc = NULL;
if (prev_dc) {
- di.lstart = prev_dc->lstart + prev_dc->len;
+ di.lstart = prev_dc->di.lstart + prev_dc->di.len;
if (di.lstart < lstart)
di.lstart = lstart;
if (di.lstart >= end)
break;
- if (!next_dc || next_dc->lstart > end)
+ if (!next_dc || next_dc->di.lstart > end)
di.len = end - di.lstart;
else
- di.len = next_dc->lstart - di.lstart;
+ di.len = next_dc->di.lstart - di.lstart;
di.start = start + di.lstart - lstart;
}
@@ -1448,10 +1546,9 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
merged = true;
}
- if (!merged) {
- __insert_discard_tree(sbi, bdev, di.lstart, di.start,
- di.len, NULL, NULL);
- }
+ if (!merged)
+ __insert_discard_cmd(sbi, bdev,
+ di.lstart, di.start, di.len);
next:
prev_dc = next_dc;
if (!prev_dc)
@@ -1462,13 +1559,26 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
}
}
-static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
+#ifdef CONFIG_BLK_DEV_ZONED
+static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t lblkstart,
+ block_t blklen)
+{
+ trace_f2fs_queue_reset_zone(bdev, blkstart);
+
+ mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
+ __insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
+ mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
+}
+#endif
+
+static void __queue_discard_cmd(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t blkstart, block_t blklen)
{
block_t lblkstart = blkstart;
if (!f2fs_bdev_support_discard(bdev))
- return 0;
+ return;
trace_f2fs_queue_discard(bdev, blkstart, blklen);
@@ -1480,27 +1590,21 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
- return 0;
}
-static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
- struct discard_policy *dpolicy)
+static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
+ struct discard_policy *dpolicy, int *issued)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
struct rb_node **insert_p = NULL, *insert_parent = NULL;
struct discard_cmd *dc;
struct blk_plug plug;
- unsigned int pos = dcc->next_pos;
- unsigned int issued = 0;
bool io_interrupted = false;
mutex_lock(&dcc->cmd_lock);
- dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
- NULL, pos,
- (struct rb_entry **)&prev_dc,
- (struct rb_entry **)&next_dc,
- &insert_p, &insert_parent, true, NULL);
+ dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos,
+ &prev_dc, &next_dc, &insert_p, &insert_parent);
if (!dc)
dc = next_dc;
@@ -1518,10 +1622,10 @@ static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
break;
}
- dcc->next_pos = dc->lstart + dc->len;
- err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
+ dcc->next_pos = dc->di.lstart + dc->di.len;
+ err = __submit_discard_cmd(sbi, dpolicy, dc, issued);
- if (issued >= dpolicy->max_requests)
+ if (*issued >= dpolicy->max_requests)
break;
next:
node = rb_next(&dc->rb_node);
@@ -1537,10 +1641,8 @@ next:
mutex_unlock(&dcc->cmd_lock);
- if (!issued && io_interrupted)
- issued = -1;
-
- return issued;
+ if (!(*issued) && io_interrupted)
+ *issued = -1;
}
static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy);
@@ -1568,8 +1670,10 @@ retry:
if (i + 1 < dpolicy->granularity)
break;
- if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
- return __issue_discard_cmd_orderly(sbi, dpolicy);
+ if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered) {
+ __issue_discard_cmd_orderly(sbi, dpolicy, &issued);
+ return issued;
+ }
pend_list = &dcc->pend_list[i];
@@ -1577,8 +1681,7 @@ retry:
if (list_empty(pend_list))
goto next;
if (unlikely(dcc->rbtree_check))
- f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
- &dcc->root, false));
+ f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
blk_start_plug(&plug);
list_for_each_entry_safe(dc, tmp, pend_list, list) {
f2fs_bug_on(sbi, dc->state != D_PREP);
@@ -1656,7 +1759,7 @@ static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
dc->ref--;
if (!dc->ref) {
if (!dc->error)
- len = dc->len;
+ len = dc->di.len;
__remove_discard_cmd(sbi, dc);
}
mutex_unlock(&dcc->cmd_lock);
@@ -1671,33 +1774,33 @@ static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
&(dcc->fstrim_list) : &(dcc->wait_list);
- struct discard_cmd *dc, *tmp;
- bool need_wait;
+ struct discard_cmd *dc = NULL, *iter, *tmp;
unsigned int trimmed = 0;
next:
- need_wait = false;
+ dc = NULL;
mutex_lock(&dcc->cmd_lock);
- list_for_each_entry_safe(dc, tmp, wait_list, list) {
- if (dc->lstart + dc->len <= start || end <= dc->lstart)
+ list_for_each_entry_safe(iter, tmp, wait_list, list) {
+ if (iter->di.lstart + iter->di.len <= start ||
+ end <= iter->di.lstart)
continue;
- if (dc->len < dpolicy->granularity)
+ if (iter->di.len < dpolicy->granularity)
continue;
- if (dc->state == D_DONE && !dc->ref) {
- wait_for_completion_io(&dc->wait);
- if (!dc->error)
- trimmed += dc->len;
- __remove_discard_cmd(sbi, dc);
+ if (iter->state == D_DONE && !iter->ref) {
+ wait_for_completion_io(&iter->wait);
+ if (!iter->error)
+ trimmed += iter->di.len;
+ __remove_discard_cmd(sbi, iter);
} else {
- dc->ref++;
- need_wait = true;
+ iter->ref++;
+ dc = iter;
break;
}
}
mutex_unlock(&dcc->cmd_lock);
- if (need_wait) {
+ if (dc) {
trimmed += __wait_one_discard_bio(sbi, dc);
goto next;
}
@@ -1715,9 +1818,9 @@ static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
/* wait all */
- __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
+ __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY);
discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
- __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
+ __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY);
discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
return discard_blks;
@@ -1731,8 +1834,29 @@ static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
bool need_wait = false;
mutex_lock(&dcc->cmd_lock);
- dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
- NULL, blkaddr);
+ dc = __lookup_discard_cmd(sbi, blkaddr);
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
+ int devi = f2fs_bdev_index(sbi, dc->bdev);
+
+ if (devi < 0) {
+ mutex_unlock(&dcc->cmd_lock);
+ return;
+ }
+
+ if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
+ /* force submit zone reset */
+ if (dc->state == D_PREP)
+ __submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
+ &dcc->wait_list, NULL);
+ dc->ref++;
+ mutex_unlock(&dcc->cmd_lock);
+ /* wait zone reset */
+ __wait_one_discard_bio(sbi, dc);
+ return;
+ }
+ }
+#endif
if (dc) {
if (dc->state == D_PREP) {
__punch_discard_cmd(sbi, dc, blkaddr);
@@ -1759,13 +1883,23 @@ void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
}
}
-/* This comes from f2fs_put_super */
+/**
+ * f2fs_issue_discard_timeout() - Issue all discard cmd within UMOUNT_DISCARD_TIMEOUT
+ * @sbi: the f2fs_sb_info data for discard cmd to issue
+ *
+ * When UMOUNT_DISCARD_TIMEOUT is exceeded, all remaining discard commands will be dropped
+ *
+ * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false.
+ */
bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_policy dpolicy;
bool dropped;
+ if (!atomic_read(&dcc->discard_cmd_cnt))
+ return true;
+
__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
dcc->discard_granularity);
__issue_discard_cmd(sbi, &dpolicy);
@@ -1775,7 +1909,7 @@ bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
__wait_all_discard_cmd(sbi, NULL);
f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
- return dropped;
+ return !dropped;
}
static int issue_discard_thread(void *data)
@@ -1784,46 +1918,40 @@ static int issue_discard_thread(void *data)
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
wait_queue_head_t *q = &dcc->discard_wait_queue;
struct discard_policy dpolicy;
- unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
+ unsigned int wait_ms = dcc->min_discard_issue_time;
int issued;
set_freezable();
do {
+ wait_event_freezable_timeout(*q,
+ kthread_should_stop() || dcc->discard_wake,
+ msecs_to_jiffies(wait_ms));
+
if (sbi->gc_mode == GC_URGENT_HIGH ||
!f2fs_available_free_memory(sbi, DISCARD_CACHE))
- __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
+ __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE,
+ MIN_DISCARD_GRANULARITY);
else
__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
dcc->discard_granularity);
- if (!atomic_read(&dcc->discard_cmd_cnt))
- wait_ms = dpolicy.max_interval;
-
- wait_event_interruptible_timeout(*q,
- kthread_should_stop() || freezing(current) ||
- dcc->discard_wake,
- msecs_to_jiffies(wait_ms));
-
if (dcc->discard_wake)
- dcc->discard_wake = 0;
+ dcc->discard_wake = false;
/* clean up pending candidates before going to sleep */
if (atomic_read(&dcc->queued_discard))
__wait_all_discard_cmd(sbi, NULL);
- if (try_to_freeze())
- continue;
if (f2fs_readonly(sbi->sb))
continue;
if (kthread_should_stop())
return 0;
- if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
+ !atomic_read(&dcc->discard_cmd_cnt)) {
wait_ms = dpolicy.max_interval;
continue;
}
- if (!atomic_read(&dcc->discard_cmd_cnt))
- continue;
sb_start_intwrite(sbi->sb);
@@ -1838,6 +1966,8 @@ static int issue_discard_thread(void *data)
} else {
wait_ms = dpolicy.max_interval;
}
+ if (!atomic_read(&dcc->discard_cmd_cnt))
+ wait_ms = dpolicy.max_interval;
sb_end_intwrite(sbi->sb);
@@ -1852,6 +1982,7 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
sector_t sector, nr_sects;
block_t lblkstart = blkstart;
int devi = 0;
+ u64 remainder = 0;
if (f2fs_is_multi_device(sbi)) {
devi = f2fs_target_device_index(sbi, blkstart);
@@ -1867,21 +1998,34 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
sector = SECTOR_FROM_BLOCK(blkstart);
nr_sects = SECTOR_FROM_BLOCK(blklen);
+ div64_u64_rem(sector, bdev_zone_sectors(bdev), &remainder);
- if (sector & (bdev_zone_sectors(bdev) - 1) ||
- nr_sects != bdev_zone_sectors(bdev)) {
+ if (remainder || nr_sects != bdev_zone_sectors(bdev)) {
f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
devi, sbi->s_ndevs ? FDEV(devi).path : "",
blkstart, blklen);
return -EIO;
}
- trace_f2fs_issue_reset_zone(bdev, blkstart);
- return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
- sector, nr_sects, GFP_NOFS);
+
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) {
+ unsigned int nofs_flags;
+ int ret;
+
+ trace_f2fs_issue_reset_zone(bdev, blkstart);
+ nofs_flags = memalloc_nofs_save();
+ ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
+ sector, nr_sects);
+ memalloc_nofs_restore(nofs_flags);
+ return ret;
+ }
+
+ __queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen);
+ return 0;
}
/* For conventional zones, use regular discard if supported */
- return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
+ __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
+ return 0;
}
#endif
@@ -1892,7 +2036,8 @@ static int __issue_discard_async(struct f2fs_sb_info *sbi,
if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
#endif
- return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
+ __queue_discard_cmd(sbi, bdev, blkstart, blklen);
+ return 0;
}
static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
@@ -1940,7 +2085,6 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
bool check_only)
{
int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
- int max_blocks = sbi->blocks_per_seg;
struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
@@ -1952,12 +2096,15 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
int i;
- if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi) ||
- !f2fs_block_unit_discard(sbi))
+ if (se->valid_blocks == BLKS_PER_SEG(sbi) ||
+ !f2fs_hw_support_discard(sbi) ||
+ !f2fs_block_unit_discard(sbi))
return false;
if (!force) {
- if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
+ if (!f2fs_realtime_discard_enable(sbi) ||
+ (!se->valid_blocks &&
+ !is_curseg(sbi, cpc->trim_start)) ||
SM_I(sbi)->dcc_info->nr_discards >=
SM_I(sbi)->dcc_info->max_discards)
return false;
@@ -1970,13 +2117,14 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
while (force || SM_I(sbi)->dcc_info->nr_discards <=
SM_I(sbi)->dcc_info->max_discards) {
- start = __find_rev_next_bit(dmap, max_blocks, end + 1);
- if (start >= max_blocks)
+ start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1);
+ if (start >= BLKS_PER_SEG(sbi))
break;
- end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
- if (force && start && end != max_blocks
- && (end - start) < cpc->trim_minlen)
+ end = __find_rev_next_zero_bit(dmap,
+ BLKS_PER_SEG(sbi), start + 1);
+ if (force && start && end != BLKS_PER_SEG(sbi) &&
+ (end - start) < cpc->trim_minlen)
continue;
if (check_only)
@@ -2058,8 +2206,8 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
start + 1);
if (section_alignment) {
- start = rounddown(start, sbi->segs_per_sec);
- end = roundup(end, sbi->segs_per_sec);
+ start = rounddown(start, SEGS_PER_SEC(sbi));
+ end = roundup(end, SEGS_PER_SEC(sbi));
}
for (i = start; i < end; i++) {
@@ -2072,22 +2220,24 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
if (force && start >= cpc->trim_start &&
(end - 1) <= cpc->trim_end)
- continue;
+ continue;
- if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) {
+ /* Should cover 2MB zoned device for zone-based reset */
+ if (!f2fs_sb_has_blkzoned(sbi) &&
+ (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) {
f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
- (end - start) << sbi->log_blocks_per_seg);
+ SEGS_TO_BLKS(sbi, end - start));
continue;
}
next:
secno = GET_SEC_FROM_SEG(sbi, start);
start_segno = GET_SEG_FROM_SEC(sbi, secno);
- if (!IS_CURSEC(sbi, secno) &&
+ if (!is_cursec(sbi, secno) &&
!get_valid_blocks(sbi, start, true))
f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
- sbi->segs_per_sec << sbi->log_blocks_per_seg);
+ BLKS_PER_SEC(sbi));
- start = start_segno + sbi->segs_per_sec;
+ start = start_segno + SEGS_PER_SEC(sbi);
if (start < end)
goto next;
else
@@ -2106,7 +2256,7 @@ next:
find_next:
if (is_valid) {
next_pos = find_next_zero_bit_le(entry->discard_map,
- sbi->blocks_per_seg, cur_pos);
+ BLKS_PER_SEG(sbi), cur_pos);
len = next_pos - cur_pos;
if (f2fs_sb_has_blkzoned(sbi) ||
@@ -2118,13 +2268,13 @@ find_next:
total_len += len;
} else {
next_pos = find_next_bit_le(entry->discard_map,
- sbi->blocks_per_seg, cur_pos);
+ BLKS_PER_SEG(sbi), cur_pos);
}
skip:
cur_pos = next_pos;
is_valid = !is_valid;
- if (cur_pos < sbi->blocks_per_seg)
+ if (cur_pos < BLKS_PER_SEG(sbi))
goto find_next;
release_discard_addr(entry);
@@ -2141,13 +2291,21 @@ int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
int err = 0;
+ if (f2fs_sb_has_readonly(sbi)) {
+ f2fs_info(sbi,
+ "Skip to start discard thread for readonly image");
+ return 0;
+ }
+
if (!f2fs_realtime_discard_enable(sbi))
return 0;
dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
- if (IS_ERR(dcc->f2fs_issue_discard))
+ if (IS_ERR(dcc->f2fs_issue_discard)) {
err = PTR_ERR(dcc->f2fs_issue_discard);
+ dcc->f2fs_issue_discard = NULL;
+ }
return err;
}
@@ -2166,11 +2324,13 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
if (!dcc)
return -ENOMEM;
+ dcc->discard_io_aware_gran = MAX_PLIST_NUM;
dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
- if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
- dcc->discard_granularity = sbi->blocks_per_seg;
- else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
- dcc->discard_granularity = BLKS_PER_SEC(sbi);
+ dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
+ dcc->discard_io_aware = DPOLICY_IO_AWARE_ENABLE;
+ if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT ||
+ F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
+ dcc->discard_granularity = BLKS_PER_SEG(sbi);
INIT_LIST_HEAD(&dcc->entry_list);
for (i = 0; i < MAX_PLIST_NUM; i++)
@@ -2182,7 +2342,12 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
atomic_set(&dcc->queued_discard, 0);
atomic_set(&dcc->discard_cmd_cnt, 0);
dcc->nr_discards = 0;
- dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
+ dcc->max_discards = SEGS_TO_BLKS(sbi, MAIN_SEGS(sbi));
+ dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST;
+ dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME;
+ dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME;
+ dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME;
+ dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL;
dcc->undiscard_blks = 0;
dcc->next_pos = 0;
dcc->root = RB_ROOT_CACHED;
@@ -2213,8 +2378,7 @@ static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
* Recovery can cache discard commands, so in error path of
* fill_super(), it needs to give a chance to handle them.
*/
- if (unlikely(atomic_read(&dcc->discard_cmd_cnt)))
- f2fs_issue_discard_timeout(sbi);
+ f2fs_issue_discard_timeout(sbi);
kfree(dcc);
SM_I(sbi)->dcc_info = NULL;
@@ -2275,76 +2439,38 @@ static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
SIT_I(sbi)->max_mtime = ctime;
}
-static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+/*
+ * NOTE: when updating multiple blocks at the same time, please ensure
+ * that the consecutive input blocks belong to the same segment.
+ */
+static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_entry *se,
+ unsigned int segno, block_t blkaddr, unsigned int offset, int del)
{
- struct seg_entry *se;
- unsigned int segno, offset;
- long int new_vblocks;
bool exist;
#ifdef CONFIG_F2FS_CHECK_FS
bool mir_exist;
#endif
+ int i;
+ int del_count = -del;
- segno = GET_SEGNO(sbi, blkaddr);
-
- se = get_seg_entry(sbi, segno);
- new_vblocks = se->valid_blocks + del;
- offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
-
- f2fs_bug_on(sbi, (new_vblocks < 0 ||
- (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
-
- se->valid_blocks = new_vblocks;
-
- /* Update valid block bitmap */
- if (del > 0) {
- exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
-#ifdef CONFIG_F2FS_CHECK_FS
- mir_exist = f2fs_test_and_set_bit(offset,
- se->cur_valid_map_mir);
- if (unlikely(exist != mir_exist)) {
- f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
- blkaddr, exist);
- f2fs_bug_on(sbi, 1);
- }
-#endif
- if (unlikely(exist)) {
- f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
- blkaddr);
- f2fs_bug_on(sbi, 1);
- se->valid_blocks--;
- del = 0;
- }
-
- if (f2fs_block_unit_discard(sbi) &&
- !f2fs_test_and_set_bit(offset, se->discard_map))
- sbi->discard_blks--;
+ f2fs_bug_on(sbi, GET_SEGNO(sbi, blkaddr) != GET_SEGNO(sbi, blkaddr + del_count - 1));
- /*
- * SSR should never reuse block which is checkpointed
- * or newly invalidated.
- */
- if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
- if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
- se->ckpt_valid_blocks++;
- }
- } else {
- exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
+ for (i = 0; i < del_count; i++) {
+ exist = f2fs_test_and_clear_bit(offset + i, se->cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
- mir_exist = f2fs_test_and_clear_bit(offset,
+ mir_exist = f2fs_test_and_clear_bit(offset + i,
se->cur_valid_map_mir);
if (unlikely(exist != mir_exist)) {
f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
- blkaddr, exist);
+ blkaddr + i, exist);
f2fs_bug_on(sbi, 1);
}
#endif
if (unlikely(!exist)) {
- f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
- blkaddr);
+ f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", blkaddr + i);
f2fs_bug_on(sbi, 1);
se->valid_blocks++;
- del = 0;
+ del += 1;
} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
/*
* If checkpoints are off, we must not reuse data that
@@ -2352,7 +2478,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
* before, we must track that to know how much space we
* really have.
*/
- if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
+ if (f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
spin_lock(&sbi->stat_lock);
sbi->unusable_block_count++;
spin_unlock(&sbi->stat_lock);
@@ -2360,11 +2486,105 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
}
if (f2fs_block_unit_discard(sbi) &&
- f2fs_test_and_clear_bit(offset, se->discard_map))
+ f2fs_test_and_clear_bit(offset + i, se->discard_map))
sbi->discard_blks++;
+
+ if (!f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
+ se->ckpt_valid_blocks -= 1;
+ if (__is_large_section(sbi))
+ get_sec_entry(sbi, segno)->ckpt_valid_blocks -= 1;
+ }
+ }
+
+ if (__is_large_section(sbi))
+ sanity_check_valid_blocks(sbi, segno);
+
+ return del;
+}
+
+static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry *se,
+ unsigned int segno, block_t blkaddr, unsigned int offset, int del)
+{
+ bool exist;
+#ifdef CONFIG_F2FS_CHECK_FS
+ bool mir_exist;
+#endif
+
+ exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
+#ifdef CONFIG_F2FS_CHECK_FS
+ mir_exist = f2fs_test_and_set_bit(offset,
+ se->cur_valid_map_mir);
+ if (unlikely(exist != mir_exist)) {
+ f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
+ blkaddr, exist);
+ f2fs_bug_on(sbi, 1);
}
- if (!f2fs_test_bit(offset, se->ckpt_valid_map))
+#endif
+ if (unlikely(exist)) {
+ f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", blkaddr);
+ f2fs_bug_on(sbi, 1);
+ se->valid_blocks--;
+ del = 0;
+ }
+
+ if (f2fs_block_unit_discard(sbi) &&
+ !f2fs_test_and_set_bit(offset, se->discard_map))
+ sbi->discard_blks--;
+
+ /*
+ * SSR should never reuse block which is checkpointed
+ * or newly invalidated.
+ */
+ if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
+ if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) {
+ se->ckpt_valid_blocks++;
+ if (__is_large_section(sbi))
+ get_sec_entry(sbi, segno)->ckpt_valid_blocks++;
+ }
+ }
+
+ if (!f2fs_test_bit(offset, se->ckpt_valid_map)) {
se->ckpt_valid_blocks += del;
+ if (__is_large_section(sbi))
+ get_sec_entry(sbi, segno)->ckpt_valid_blocks += del;
+ }
+
+ if (__is_large_section(sbi))
+ sanity_check_valid_blocks(sbi, segno);
+
+ return del;
+}
+
+/*
+ * If releasing blocks, this function supports updating multiple consecutive blocks
+ * at one time, but please note that these consecutive blocks need to belong to the
+ * same segment.
+ */
+static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+{
+ struct seg_entry *se;
+ unsigned int segno, offset;
+ long int new_vblocks;
+
+ segno = GET_SEGNO(sbi, blkaddr);
+ if (segno == NULL_SEGNO)
+ return;
+
+ se = get_seg_entry(sbi, segno);
+ new_vblocks = se->valid_blocks + del;
+ offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
+
+ f2fs_bug_on(sbi, (new_vblocks < 0 ||
+ (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
+
+ se->valid_blocks = new_vblocks;
+
+ /* Update valid block bitmap */
+ if (del > 0) {
+ del = update_sit_entry_for_alloc(sbi, se, segno, blkaddr, offset, del);
+ } else {
+ del = update_sit_entry_for_release(sbi, se, segno, blkaddr, offset, del);
+ }
__mark_sit_entry_dirty(sbi, segno);
@@ -2375,26 +2595,43 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
get_sec_entry(sbi, segno)->valid_blocks += del;
}
-void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
+void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
+ unsigned int len)
{
unsigned int segno = GET_SEGNO(sbi, addr);
struct sit_info *sit_i = SIT_I(sbi);
+ block_t addr_start = addr, addr_end = addr + len - 1;
+ unsigned int seg_num = GET_SEGNO(sbi, addr_end) - segno + 1;
+ unsigned int i = 1, max_blocks = sbi->blocks_per_seg, cnt;
f2fs_bug_on(sbi, addr == NULL_ADDR);
if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
return;
- invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
- f2fs_invalidate_compress_page(sbi, addr);
+ f2fs_invalidate_internal_cache(sbi, addr, len);
/* add it into sit main buffer */
down_write(&sit_i->sentry_lock);
- update_segment_mtime(sbi, addr, 0);
- update_sit_entry(sbi, addr, -1);
+ if (seg_num == 1)
+ cnt = len;
+ else
+ cnt = max_blocks - GET_BLKOFF_FROM_SEG0(sbi, addr);
+
+ do {
+ update_segment_mtime(sbi, addr_start, 0);
+ update_sit_entry(sbi, addr_start, -cnt);
- /* add it into dirty seglist */
- locate_dirty_segment(sbi, segno);
+ /* add it into dirty seglist */
+ locate_dirty_segment(sbi, segno);
+
+ /* update @addr_start and @cnt and @segno */
+ addr_start = START_BLOCK(sbi, ++segno);
+ if (++i == seg_num)
+ cnt = GET_BLKOFF_FROM_SEG0(sbi, addr_end) + 1;
+ else
+ cnt = max_blocks;
+ } while (i <= seg_num);
up_write(&sit_i->sentry_lock);
}
@@ -2423,17 +2660,13 @@ bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
return is_cp;
}
-/*
- * This function should be resided under the curseg_mutex lock
- */
-static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
- struct f2fs_summary *sum)
+static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
- void *addr = curseg->sum_blk;
- addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
- memcpy(addr, sum, sizeof(struct f2fs_summary));
+ if (sbi->ckpt->alloc_type[type] == SSR)
+ return BLKS_PER_SEG(sbi);
+ return curseg->next_blkoff;
}
/*
@@ -2445,15 +2678,11 @@ int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
int i, sum_in_page;
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
- if (sbi->ckpt->alloc_type[i] == SSR)
- valid_sum_count += sbi->blocks_per_seg;
- else {
- if (for_ra)
- valid_sum_count += le16_to_cpu(
- F2FS_CKPT(sbi)->cur_data_blkoff[i]);
- else
- valid_sum_count += curseg_blkoff(sbi, i);
- }
+ if (sbi->ckpt->alloc_type[i] != SSR && for_ra)
+ valid_sum_count +=
+ le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]);
+ else
+ valid_sum_count += f2fs_curseg_valid_blocks(sbi, i);
}
sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
@@ -2467,40 +2696,60 @@ int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
}
/*
- * Caller should put this summary page
+ * Caller should put this summary folio
*/
-struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
+struct folio *f2fs_get_sum_folio(struct f2fs_sb_info *sbi, unsigned int segno)
{
if (unlikely(f2fs_cp_error(sbi)))
return ERR_PTR(-EIO);
- return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
+ return f2fs_get_meta_folio_retry(sbi, GET_SUM_BLOCK(sbi, segno));
}
void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
void *src, block_t blk_addr)
{
- struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
+ struct folio *folio;
+
+ if (SUMS_PER_BLOCK == 1)
+ folio = f2fs_grab_meta_folio(sbi, blk_addr);
+ else
+ folio = f2fs_get_meta_folio_retry(sbi, blk_addr);
+
+ if (IS_ERR(folio))
+ return;
- memcpy(page_address(page), src, PAGE_SIZE);
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ memcpy(folio_address(folio), src, PAGE_SIZE);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
static void write_sum_page(struct f2fs_sb_info *sbi,
- struct f2fs_summary_block *sum_blk, block_t blk_addr)
+ struct f2fs_summary_block *sum_blk, unsigned int segno)
{
- f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
+ struct folio *folio;
+
+ if (SUMS_PER_BLOCK == 1)
+ return f2fs_update_meta_page(sbi, (void *)sum_blk,
+ GET_SUM_BLOCK(sbi, segno));
+
+ folio = f2fs_get_sum_folio(sbi, segno);
+ if (IS_ERR(folio))
+ return;
+
+ memcpy(SUM_BLK_PAGE_ADDR(folio, segno), sum_blk, sizeof(*sum_blk));
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
static void write_current_sum_page(struct f2fs_sb_info *sbi,
int type, block_t blk_addr)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
- struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
+ struct folio *folio = f2fs_grab_meta_folio(sbi, blk_addr);
struct f2fs_summary_block *src = curseg->sum_blk;
struct f2fs_summary_block *dst;
- dst = (struct f2fs_summary_block *)page_address(page);
+ dst = folio_address(folio);
memset(dst, 0, PAGE_SIZE);
mutex_lock(&curseg->curseg_mutex);
@@ -2514,17 +2763,17 @@ static void write_current_sum_page(struct f2fs_sb_info *sbi,
mutex_unlock(&curseg->curseg_mutex);
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
static int is_next_segment_free(struct f2fs_sb_info *sbi,
- struct curseg_info *curseg, int type)
+ struct curseg_info *curseg)
{
unsigned int segno = curseg->segno + 1;
struct free_segmap_info *free_i = FREE_I(sbi);
- if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
+ if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi))
return !test_bit(segno, free_i->free_segmap);
return 0;
}
@@ -2533,54 +2782,93 @@ static int is_next_segment_free(struct f2fs_sb_info *sbi,
* Find a new segment from the free segments bitmap to right order
* This function should be returned with success, otherwise BUG
*/
-static void get_new_segment(struct f2fs_sb_info *sbi,
- unsigned int *newseg, bool new_sec, int dir)
+static int get_new_segment(struct f2fs_sb_info *sbi,
+ unsigned int *newseg, bool new_sec, bool pinning)
{
struct free_segmap_info *free_i = FREE_I(sbi);
unsigned int segno, secno, zoneno;
unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
- unsigned int left_start = hint;
+ unsigned int alloc_policy = sbi->allocate_section_policy;
+ unsigned int alloc_hint = sbi->allocate_section_hint;
bool init = true;
- int go_left = 0;
int i;
+ int ret = 0;
spin_lock(&free_i->segmap_lock);
- if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
+ if (time_to_inject(sbi, FAULT_NO_SEGMENT)) {
+ ret = -ENOSPC;
+ goto out_unlock;
+ }
+
+ if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) {
segno = find_next_zero_bit(free_i->free_segmap,
GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
goto got_it;
}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ /*
+ * If we format f2fs on zoned storage, let's try to get pinned sections
+ * from beginning of the storage, which should be a conventional one.
+ */
+ if (f2fs_sb_has_blkzoned(sbi)) {
+ /* Prioritize writing to conventional zones */
+ if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning)
+ segno = 0;
+ else
+ segno = max(sbi->first_seq_zone_segno, *newseg);
+ hint = GET_SEC_FROM_SEG(sbi, segno);
+ }
+#endif
+
+ /*
+ * Prevent allocate_section_hint from exceeding MAIN_SECS()
+ * due to desynchronization.
+ */
+ if (alloc_policy != ALLOCATE_FORWARD_NOHINT &&
+ alloc_hint > MAIN_SECS(sbi))
+ alloc_hint = MAIN_SECS(sbi);
+
+ if (alloc_policy == ALLOCATE_FORWARD_FROM_HINT &&
+ hint < alloc_hint)
+ hint = alloc_hint;
+ else if (alloc_policy == ALLOCATE_FORWARD_WITHIN_HINT &&
+ hint >= alloc_hint)
+ hint = 0;
+
find_other_zone:
secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
- if (secno >= MAIN_SECS(sbi)) {
- if (dir == ALLOC_RIGHT) {
- secno = find_next_zero_bit(free_i->free_secmap,
- MAIN_SECS(sbi), 0);
- f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
- } else {
- go_left = 1;
- left_start = hint - 1;
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) {
+ /* Write only to sequential zones */
+ if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) {
+ hint = GET_SEC_FROM_SEG(sbi, sbi->first_seq_zone_segno);
+ secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
+ } else
+ secno = find_first_zero_bit(free_i->free_secmap,
+ MAIN_SECS(sbi));
+ if (secno >= MAIN_SECS(sbi)) {
+ ret = -ENOSPC;
+ f2fs_bug_on(sbi, 1);
+ goto out_unlock;
}
}
- if (go_left == 0)
- goto skip_left;
+#endif
- while (test_bit(left_start, free_i->free_secmap)) {
- if (left_start > 0) {
- left_start--;
- continue;
+ if (secno >= MAIN_SECS(sbi)) {
+ secno = find_first_zero_bit(free_i->free_secmap,
+ MAIN_SECS(sbi));
+ if (secno >= MAIN_SECS(sbi)) {
+ ret = -ENOSPC;
+ f2fs_bug_on(sbi, !pinning);
+ goto out_unlock;
}
- left_start = find_next_zero_bit(free_i->free_secmap,
- MAIN_SECS(sbi), 0);
- f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
- break;
}
- secno = left_start;
-skip_left:
segno = GET_SEG_FROM_SEC(sbi, secno);
zoneno = GET_ZONE_FROM_SEC(sbi, secno);
@@ -2591,21 +2879,13 @@ skip_left:
goto got_it;
if (zoneno == old_zoneno)
goto got_it;
- if (dir == ALLOC_LEFT) {
- if (!go_left && zoneno + 1 >= total_zones)
- goto got_it;
- if (go_left && zoneno == 0)
- goto got_it;
- }
for (i = 0; i < NR_CURSEG_TYPE; i++)
if (CURSEG_I(sbi, i)->zone == zoneno)
break;
if (i < NR_CURSEG_TYPE) {
/* zone is in user, try another */
- if (go_left)
- hint = zoneno * sbi->secs_per_zone - 1;
- else if (zoneno + 1 >= total_zones)
+ if (zoneno + 1 >= total_zones)
hint = 0;
else
hint = (zoneno + 1) * sbi->secs_per_zone;
@@ -2614,10 +2894,26 @@ skip_left:
}
got_it:
/* set it as dirty segment in free segmap */
- f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
+ if (test_bit(segno, free_i->free_segmap)) {
+ ret = -EFSCORRUPTED;
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_CORRUPTED_FREE_BITMAP);
+ goto out_unlock;
+ }
+
+ /* no free section in conventional device or conventional zone */
+ if (new_sec && pinning &&
+ f2fs_is_sequential_zone_area(sbi, START_BLOCK(sbi, segno))) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
__set_inuse(sbi, segno);
*newseg = segno;
+out_unlock:
spin_unlock(&free_i->segmap_lock);
+
+ if (ret == -ENOSPC && !pinning)
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
+ return ret;
}
static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
@@ -2626,6 +2922,10 @@ static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
struct summary_footer *sum_footer;
unsigned short seg_type = curseg->seg_type;
+ /* only happen when get_new_segment() fails */
+ if (curseg->next_segno == NULL_SEGNO)
+ return;
+
curseg->inited = true;
curseg->segno = curseg->next_segno;
curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
@@ -2650,12 +2950,19 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
unsigned short seg_type = curseg->seg_type;
sanity_check_seg_type(sbi, seg_type);
- if (f2fs_need_rand_seg(sbi))
- return prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
+ if (__is_large_section(sbi)) {
+ if (f2fs_need_rand_seg(sbi)) {
+ unsigned int hint = GET_SEC_FROM_SEG(sbi, curseg->segno);
- /* if segs_per_sec is large than 1, we need to keep original policy. */
- if (__is_large_section(sbi))
+ if (GET_SEC_FROM_SEG(sbi, curseg->segno + 1) != hint)
+ return curseg->segno;
+ return get_random_u32_inclusive(curseg->segno + 1,
+ GET_SEG_FROM_SEC(sbi, hint + 1) - 1);
+ }
return curseg->segno;
+ } else if (f2fs_need_rand_seg(sbi)) {
+ return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
+ }
/* inmem log may not locate on any segment after mount */
if (!curseg->inited)
@@ -2664,8 +2971,7 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
return 0;
- if (test_opt(sbi, NOHEAP) &&
- (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)))
+ if (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type))
return 0;
if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
@@ -2678,34 +2984,42 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
return curseg->segno;
}
+static void reset_curseg_fields(struct curseg_info *curseg)
+{
+ curseg->inited = false;
+ curseg->segno = NULL_SEGNO;
+ curseg->next_segno = 0;
+}
+
/*
* Allocate a current working segment.
* This function always allocates a free segment in LFS manner.
*/
-static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
+static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
- unsigned short seg_type = curseg->seg_type;
unsigned int segno = curseg->segno;
- int dir = ALLOC_LEFT;
+ bool pinning = type == CURSEG_COLD_DATA_PINNED;
+ int ret;
if (curseg->inited)
- write_sum_page(sbi, curseg->sum_blk,
- GET_SUM_BLOCK(sbi, segno));
- if (seg_type == CURSEG_WARM_DATA || seg_type == CURSEG_COLD_DATA)
- dir = ALLOC_RIGHT;
-
- if (test_opt(sbi, NOHEAP))
- dir = ALLOC_RIGHT;
+ write_sum_page(sbi, curseg->sum_blk, segno);
segno = __get_next_segno(sbi, type);
- get_new_segment(sbi, &segno, new_sec, dir);
+ ret = get_new_segment(sbi, &segno, new_sec, pinning);
+ if (ret) {
+ if (ret == -ENOSPC)
+ reset_curseg_fields(curseg);
+ return ret;
+ }
+
curseg->next_segno = segno;
reset_curseg(sbi, type, 1);
curseg->alloc_type = LFS;
if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
curseg->fragment_remained_chunk =
- prandom_u32() % sbi->max_fragment_chunk + 1;
+ get_random_u32_inclusive(1, sbi->max_fragment_chunk);
+ return 0;
}
static int __next_free_blkoff(struct f2fs_sb_info *sbi,
@@ -2721,55 +3035,34 @@ static int __next_free_blkoff(struct f2fs_sb_info *sbi,
for (i = 0; i < entries; i++)
target_map[i] = ckpt_map[i] | cur_map[i];
- return __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
+ return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start);
}
-/*
- * If a segment is written by LFS manner, next block offset is just obtained
- * by increasing the current block offset. However, if a segment is written by
- * SSR manner, next block offset obtained by calling __next_free_blkoff
- */
-static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
- struct curseg_info *seg)
+static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi,
+ struct curseg_info *seg)
{
- if (seg->alloc_type == SSR) {
- seg->next_blkoff =
- __next_free_blkoff(sbi, seg->segno,
- seg->next_blkoff + 1);
- } else {
- seg->next_blkoff++;
- if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) {
- /* To allocate block chunks in different sizes, use random number */
- if (--seg->fragment_remained_chunk <= 0) {
- seg->fragment_remained_chunk =
- prandom_u32() % sbi->max_fragment_chunk + 1;
- seg->next_blkoff +=
- prandom_u32() % sbi->max_fragment_hole + 1;
- }
- }
- }
+ return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1);
}
bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
{
- return __next_free_blkoff(sbi, segno, 0) < sbi->blocks_per_seg;
+ return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi);
}
/*
* This function always allocates a used segment(from dirty seglist) by SSR
* manner, so it should recover the existing segment information of valid blocks
*/
-static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush)
+static int change_curseg(struct f2fs_sb_info *sbi, int type)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, type);
unsigned int new_segno = curseg->next_segno;
struct f2fs_summary_block *sum_node;
- struct page *sum_page;
+ struct folio *sum_folio;
- if (flush)
- write_sum_page(sbi, curseg->sum_blk,
- GET_SUM_BLOCK(sbi, curseg->segno));
+ if (curseg->inited)
+ write_sum_page(sbi, curseg->sum_blk, curseg->segno);
__set_test_and_inuse(sbi, new_segno);
@@ -2782,25 +3075,27 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush)
curseg->alloc_type = SSR;
curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
- sum_page = f2fs_get_sum_page(sbi, new_segno);
- if (IS_ERR(sum_page)) {
+ sum_folio = f2fs_get_sum_folio(sbi, new_segno);
+ if (IS_ERR(sum_folio)) {
/* GC won't be able to use stale summary pages by cp_error */
memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
- return;
+ return PTR_ERR(sum_folio);
}
- sum_node = (struct f2fs_summary_block *)page_address(sum_page);
+ sum_node = SUM_BLK_PAGE_ADDR(sum_folio, new_segno);
memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
- f2fs_put_page(sum_page, 1);
+ f2fs_folio_put(sum_folio, true);
+ return 0;
}
static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
int alloc_mode, unsigned long long age);
-static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
+static int get_atssr_segment(struct f2fs_sb_info *sbi, int type,
int target_type, int alloc_mode,
unsigned long long age)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
+ int ret = 0;
curseg->seg_type = target_type;
@@ -2808,38 +3103,62 @@ static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
curseg->seg_type = se->type;
- change_curseg(sbi, type, true);
+ ret = change_curseg(sbi, type);
} else {
/* allocate cold segment by default */
curseg->seg_type = CURSEG_COLD_DATA;
- new_curseg(sbi, type, true);
+ ret = new_curseg(sbi, type, true);
}
stat_inc_seg_type(sbi, curseg);
+ return ret;
}
-static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
+static int __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi, bool force)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
+ int ret = 0;
- if (!sbi->am.atgc_enabled)
- return;
+ if (!sbi->am.atgc_enabled && !force)
+ return 0;
- down_read(&SM_I(sbi)->curseg_lock);
+ f2fs_down_read(&SM_I(sbi)->curseg_lock);
mutex_lock(&curseg->curseg_mutex);
down_write(&SIT_I(sbi)->sentry_lock);
- get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0);
+ ret = get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC,
+ CURSEG_COLD_DATA, SSR, 0);
up_write(&SIT_I(sbi)->sentry_lock);
mutex_unlock(&curseg->curseg_mutex);
- up_read(&SM_I(sbi)->curseg_lock);
+ f2fs_up_read(&SM_I(sbi)->curseg_lock);
+ return ret;
+}
+int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
+{
+ return __f2fs_init_atgc_curseg(sbi, false);
}
-void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
+
+int f2fs_reinit_atgc_curseg(struct f2fs_sb_info *sbi)
{
- __f2fs_init_atgc_curseg(sbi);
+ int ret;
+
+ if (!test_opt(sbi, ATGC))
+ return 0;
+ if (sbi->am.atgc_enabled)
+ return 0;
+ if (le64_to_cpu(F2FS_CKPT(sbi)->elapsed_time) <
+ sbi->am.age_threshold)
+ return 0;
+
+ ret = __f2fs_init_atgc_curseg(sbi, true);
+ if (!ret) {
+ sbi->am.atgc_enabled = true;
+ f2fs_info(sbi, "reenabled age threshold GC");
+ }
+ return ret;
}
static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
@@ -2851,8 +3170,7 @@ static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
goto out;
if (get_valid_blocks(sbi, curseg->segno, false)) {
- write_sum_page(sbi, curseg->sum_blk,
- GET_SUM_BLOCK(sbi, curseg->segno));
+ write_sum_page(sbi, curseg->sum_blk, curseg->segno);
} else {
mutex_lock(&DIRTY_I(sbi)->seglist_lock);
__set_test_and_free(sbi, curseg->segno, true);
@@ -2899,7 +3217,6 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
int alloc_mode, unsigned long long age)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
- const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
unsigned segno = NULL_SEGNO;
unsigned short seg_type = curseg->seg_type;
int i, cnt;
@@ -2908,7 +3225,8 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
sanity_check_seg_type(sbi, seg_type);
/* f2fs_need_SSR() already forces to do this */
- if (!v_ops->get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
+ if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type,
+ alloc_mode, age, false)) {
curseg->next_segno = segno;
return 1;
}
@@ -2935,7 +3253,8 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
for (; cnt-- > 0; reversed ? i-- : i++) {
if (i == seg_type)
continue;
- if (!v_ops->get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
+ if (!f2fs_get_victim(sbi, &segno, BG_GC, i,
+ alloc_mode, age, false)) {
curseg->next_segno = segno;
return 1;
}
@@ -2952,40 +3271,29 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
return 0;
}
-/*
- * flush out current segment and replace it with new segment
- * This function should be returned with success, otherwise BUG
- */
-static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
- int type, bool force)
+static bool need_new_seg(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
- if (force)
- new_curseg(sbi, type, true);
- else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
- curseg->seg_type == CURSEG_WARM_NODE)
- new_curseg(sbi, type, false);
- else if (curseg->alloc_type == LFS &&
- is_next_segment_free(sbi, curseg, type) &&
- likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
- new_curseg(sbi, type, false);
- else if (f2fs_need_SSR(sbi) &&
- get_ssr_segment(sbi, type, SSR, 0))
- change_curseg(sbi, type, true);
- else
- new_curseg(sbi, type, false);
-
- stat_inc_seg_type(sbi, curseg);
+ if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
+ curseg->seg_type == CURSEG_WARM_NODE)
+ return true;
+ if (curseg->alloc_type == LFS && is_next_segment_free(sbi, curseg) &&
+ likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ return true;
+ if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0))
+ return true;
+ return false;
}
-void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
unsigned int start, unsigned int end)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
unsigned int segno;
+ int ret = 0;
- down_read(&SM_I(sbi)->curseg_lock);
+ f2fs_down_read(&SM_I(sbi)->curseg_lock);
mutex_lock(&curseg->curseg_mutex);
down_write(&SIT_I(sbi)->sentry_lock);
@@ -2994,9 +3302,9 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
goto unlock;
if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
- change_curseg(sbi, type, true);
+ ret = change_curseg(sbi, type);
else
- new_curseg(sbi, type, true);
+ ret = new_curseg(sbi, type, true);
stat_inc_seg_type(sbi, curseg);
@@ -3009,60 +3317,87 @@ unlock:
type, segno, curseg->segno);
mutex_unlock(&curseg->curseg_mutex);
- up_read(&SM_I(sbi)->curseg_lock);
+ f2fs_up_read(&SM_I(sbi)->curseg_lock);
+ return ret;
}
-static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
+static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
bool new_sec, bool force)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
unsigned int old_segno;
+ int err = 0;
- if (!curseg->inited)
- goto alloc;
+ if (type == CURSEG_COLD_DATA_PINNED && !curseg->inited)
+ goto allocate;
- if (force || curseg->next_blkoff ||
- get_valid_blocks(sbi, curseg->segno, new_sec))
- goto alloc;
+ if (!force && curseg->inited &&
+ !curseg->next_blkoff &&
+ !get_valid_blocks(sbi, curseg->segno, new_sec) &&
+ !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
+ return 0;
- if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
- return;
-alloc:
+allocate:
old_segno = curseg->segno;
- SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
+ err = new_curseg(sbi, type, true);
+ if (err)
+ return err;
+ stat_inc_seg_type(sbi, curseg);
locate_dirty_segment(sbi, old_segno);
+ return 0;
}
-static void __allocate_new_section(struct f2fs_sb_info *sbi,
- int type, bool force)
+int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
{
- __allocate_new_segment(sbi, type, true, force);
-}
+ int ret;
-void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
-{
- down_read(&SM_I(sbi)->curseg_lock);
+ f2fs_down_read(&SM_I(sbi)->curseg_lock);
down_write(&SIT_I(sbi)->sentry_lock);
- __allocate_new_section(sbi, type, force);
+ ret = __allocate_new_segment(sbi, type, true, force);
up_write(&SIT_I(sbi)->sentry_lock);
- up_read(&SM_I(sbi)->curseg_lock);
+ f2fs_up_read(&SM_I(sbi)->curseg_lock);
+
+ return ret;
+}
+
+int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi)
+{
+ int err;
+ bool gc_required = true;
+
+retry:
+ f2fs_lock_op(sbi);
+ err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
+ f2fs_unlock_op(sbi);
+
+ if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) {
+ f2fs_down_write(&sbi->gc_lock);
+ err = f2fs_gc_range(sbi, 0, sbi->first_seq_zone_segno - 1,
+ true, ZONED_PIN_SEC_REQUIRED_COUNT);
+ f2fs_up_write(&sbi->gc_lock);
+
+ gc_required = false;
+ if (!err)
+ goto retry;
+ }
+
+ return err;
}
-void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
+int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
{
int i;
+ int err = 0;
- down_read(&SM_I(sbi)->curseg_lock);
+ f2fs_down_read(&SM_I(sbi)->curseg_lock);
down_write(&SIT_I(sbi)->sentry_lock);
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
- __allocate_new_segment(sbi, i, false, false);
+ err += __allocate_new_segment(sbi, i, false, false);
up_write(&SIT_I(sbi)->sentry_lock);
- up_read(&SM_I(sbi)->curseg_lock);
-}
+ f2fs_up_read(&SM_I(sbi)->curseg_lock);
-static const struct segment_allocation default_salloc_ops = {
- .allocate_segment = allocate_segment_by_default,
-};
+ return err;
+}
bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
struct cp_control *cpc)
@@ -3100,24 +3435,20 @@ next:
mutex_lock(&dcc->cmd_lock);
if (unlikely(dcc->rbtree_check))
- f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
- &dcc->root, false));
-
- dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
- NULL, start,
- (struct rb_entry **)&prev_dc,
- (struct rb_entry **)&next_dc,
- &insert_p, &insert_parent, true, NULL);
+ f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
+
+ dc = __lookup_discard_cmd_ret(&dcc->root, start,
+ &prev_dc, &next_dc, &insert_p, &insert_parent);
if (!dc)
dc = next_dc;
blk_start_plug(&plug);
- while (dc && dc->lstart <= end) {
+ while (dc && dc->di.lstart <= end) {
struct rb_node *node;
int err = 0;
- if (dc->len < dpolicy->granularity)
+ if (dc->di.len < dpolicy->granularity)
goto skip;
if (dc->state != D_PREP) {
@@ -3128,7 +3459,7 @@ next:
err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
if (issued >= dpolicy->max_requests) {
- start = dc->lstart + dc->len;
+ start = dc->di.lstart + dc->di.len;
if (err)
__remove_discard_cmd(sbi, dc);
@@ -3136,7 +3467,7 @@ next:
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
trimmed += __wait_all_discard_cmd(sbi, NULL);
- congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
+ f2fs_schedule_timeout(DEFAULT_DISCARD_INTERVAL);
goto next;
}
skip:
@@ -3183,8 +3514,8 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
GET_SEGNO(sbi, end);
if (need_align) {
- start_segno = rounddown(start_segno, sbi->segs_per_sec);
- end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
+ start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi));
+ end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1;
}
cpc.reason = CP_DISCARD;
@@ -3195,9 +3526,10 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
if (sbi->discard_blks == 0)
goto out;
- down_write(&sbi->gc_lock);
+ f2fs_down_write(&sbi->gc_lock);
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
- up_write(&sbi->gc_lock);
+ f2fs_up_write(&sbi->gc_lock);
if (err)
goto out;
@@ -3225,15 +3557,14 @@ out:
return err;
}
-static bool __has_curseg_space(struct f2fs_sb_info *sbi,
- struct curseg_info *curseg)
+int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint)
{
- return curseg->next_blkoff < f2fs_usable_blks_in_seg(sbi,
- curseg->segno);
-}
+ if (F2FS_OPTION(sbi).active_logs == 2)
+ return CURSEG_HOT_DATA;
+ else if (F2FS_OPTION(sbi).active_logs == 4)
+ return CURSEG_COLD_DATA;
-int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
-{
+ /* active_log == 6 */
switch (hint) {
case WRITE_LIFE_SHORT:
return CURSEG_HOT_DATA;
@@ -3244,57 +3575,23 @@ int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
}
}
-/* This returns write hints for each segment type. This hints will be
- * passed down to block layer. There are mapping tables which depend on
- * the mount option 'whint_mode'.
- *
- * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
- *
- * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
- *
- * User F2FS Block
- * ---- ---- -----
- * META WRITE_LIFE_NOT_SET
- * HOT_NODE "
- * WARM_NODE "
- * COLD_NODE "
- * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
- * extension list " "
- *
- * -- buffered io
- * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
- * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
- * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
- * WRITE_LIFE_NONE " "
- * WRITE_LIFE_MEDIUM " "
- * WRITE_LIFE_LONG " "
- *
- * -- direct io
- * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
- * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
- * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
- * WRITE_LIFE_NONE " WRITE_LIFE_NONE
- * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
- * WRITE_LIFE_LONG " WRITE_LIFE_LONG
- *
- * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
+/*
+ * This returns write hints for each segment type. This hints will be
+ * passed down to block layer as below by default.
*
* User F2FS Block
* ---- ---- -----
- * META WRITE_LIFE_MEDIUM;
- * HOT_NODE WRITE_LIFE_NOT_SET
- * WARM_NODE "
- * COLD_NODE WRITE_LIFE_NONE
+ * META WRITE_LIFE_NONE|REQ_META
+ * HOT_NODE WRITE_LIFE_NONE
+ * WARM_NODE WRITE_LIFE_MEDIUM
+ * COLD_NODE WRITE_LIFE_LONG
* ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
* extension list " "
*
* -- buffered io
- * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
- * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
- * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_LONG
- * WRITE_LIFE_NONE " "
- * WRITE_LIFE_MEDIUM " "
- * WRITE_LIFE_LONG " "
+ * COLD_DATA WRITE_LIFE_EXTREME
+ * HOT_DATA WRITE_LIFE_SHORT
+ * WARM_DATA WRITE_LIFE_NOT_SET
*
* -- direct io
* WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
@@ -3304,39 +3601,37 @@ int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
* WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
* WRITE_LIFE_LONG " WRITE_LIFE_LONG
*/
-
enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
enum page_type type, enum temp_type temp)
{
- if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
- if (type == DATA) {
- if (temp == WARM)
- return WRITE_LIFE_NOT_SET;
- else if (temp == HOT)
- return WRITE_LIFE_SHORT;
- else if (temp == COLD)
- return WRITE_LIFE_EXTREME;
- } else {
+ switch (type) {
+ case DATA:
+ switch (temp) {
+ case WARM:
return WRITE_LIFE_NOT_SET;
+ case HOT:
+ return WRITE_LIFE_SHORT;
+ case COLD:
+ return WRITE_LIFE_EXTREME;
+ default:
+ return WRITE_LIFE_NONE;
}
- } else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
- if (type == DATA) {
- if (temp == WARM)
- return WRITE_LIFE_LONG;
- else if (temp == HOT)
- return WRITE_LIFE_SHORT;
- else if (temp == COLD)
- return WRITE_LIFE_EXTREME;
- } else if (type == NODE) {
- if (temp == WARM || temp == HOT)
- return WRITE_LIFE_NOT_SET;
- else if (temp == COLD)
- return WRITE_LIFE_NONE;
- } else if (type == META) {
+ case NODE:
+ switch (temp) {
+ case WARM:
return WRITE_LIFE_MEDIUM;
+ case HOT:
+ return WRITE_LIFE_NONE;
+ case COLD:
+ return WRITE_LIFE_LONG;
+ default:
+ return WRITE_LIFE_NONE;
}
+ case META:
+ return WRITE_LIFE_NONE;
+ default:
+ return WRITE_LIFE_NONE;
}
- return WRITE_LIFE_NOT_SET;
}
static int __get_segment_type_2(struct f2fs_io_info *fio)
@@ -3350,24 +3645,42 @@ static int __get_segment_type_2(struct f2fs_io_info *fio)
static int __get_segment_type_4(struct f2fs_io_info *fio)
{
if (fio->type == DATA) {
- struct inode *inode = fio->page->mapping->host;
+ struct inode *inode = fio_inode(fio);
if (S_ISDIR(inode->i_mode))
return CURSEG_HOT_DATA;
else
return CURSEG_COLD_DATA;
} else {
- if (IS_DNODE(fio->page) && is_cold_node(fio->page))
+ if (IS_DNODE(fio->folio) && is_cold_node(fio->folio))
return CURSEG_WARM_NODE;
else
return CURSEG_COLD_NODE;
}
}
+static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_info ei = {};
+
+ if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) {
+ if (!ei.age)
+ return NO_CHECK_TYPE;
+ if (ei.age <= sbi->hot_data_age_threshold)
+ return CURSEG_HOT_DATA;
+ if (ei.age <= sbi->warm_data_age_threshold)
+ return CURSEG_WARM_DATA;
+ return CURSEG_COLD_DATA;
+ }
+ return NO_CHECK_TYPE;
+}
+
static int __get_segment_type_6(struct f2fs_io_info *fio)
{
if (fio->type == DATA) {
- struct inode *inode = fio->page->mapping->host;
+ struct inode *inode = fio_inode(fio);
+ int type;
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
return CURSEG_COLD_DATA_PINNED;
@@ -3375,30 +3688,64 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
if (page_private_gcing(fio->page)) {
if (fio->sbi->am.atgc_enabled &&
(fio->io_type == FS_DATA_IO) &&
- (fio->sbi->gc_mode != GC_URGENT_HIGH))
+ (fio->sbi->gc_mode != GC_URGENT_HIGH) &&
+ __is_valid_data_blkaddr(fio->old_blkaddr) &&
+ !is_inode_flag_set(inode, FI_OPU_WRITE))
return CURSEG_ALL_DATA_ATGC;
else
return CURSEG_COLD_DATA;
}
if (file_is_cold(inode) || f2fs_need_compress_data(inode))
return CURSEG_COLD_DATA;
+
+ type = __get_age_segment_type(inode, fio->folio->index);
+ if (type != NO_CHECK_TYPE)
+ return type;
+
if (file_is_hot(inode) ||
is_inode_flag_set(inode, FI_HOT_DATA) ||
- f2fs_is_atomic_file(inode) ||
- f2fs_is_volatile_file(inode))
+ f2fs_is_cow_file(inode) ||
+ is_inode_flag_set(inode, FI_NEED_IPU))
return CURSEG_HOT_DATA;
- return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
+ return f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
+ inode->i_write_hint);
} else {
- if (IS_DNODE(fio->page))
- return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
+ if (IS_DNODE(fio->folio))
+ return is_cold_node(fio->folio) ? CURSEG_WARM_NODE :
CURSEG_HOT_NODE;
return CURSEG_COLD_NODE;
}
}
+enum temp_type f2fs_get_segment_temp(struct f2fs_sb_info *sbi,
+ enum log_type type)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ enum temp_type temp = COLD;
+
+ switch (curseg->seg_type) {
+ case CURSEG_HOT_NODE:
+ case CURSEG_HOT_DATA:
+ temp = HOT;
+ break;
+ case CURSEG_WARM_NODE:
+ case CURSEG_WARM_DATA:
+ temp = WARM;
+ break;
+ case CURSEG_COLD_NODE:
+ case CURSEG_COLD_DATA:
+ temp = COLD;
+ break;
+ default:
+ f2fs_bug_on(sbi, 1);
+ }
+
+ return temp;
+}
+
static int __get_segment_type(struct f2fs_io_info *fio)
{
- int type = 0;
+ enum log_type type = CURSEG_HOT_DATA;
switch (F2FS_OPTION(fio->sbi).active_logs) {
case 2:
@@ -3414,16 +3761,25 @@ static int __get_segment_type(struct f2fs_io_info *fio)
f2fs_bug_on(fio->sbi, true);
}
- if (IS_HOT(type))
- fio->temp = HOT;
- else if (IS_WARM(type))
- fio->temp = WARM;
- else
- fio->temp = COLD;
+ fio->temp = f2fs_get_segment_temp(fio->sbi, type);
+
return type;
}
-void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi,
+ struct curseg_info *seg)
+{
+ /* To allocate block chunks in different sizes, use random number */
+ if (--seg->fragment_remained_chunk > 0)
+ return;
+
+ seg->fragment_remained_chunk =
+ get_random_u32_inclusive(1, sbi->max_fragment_chunk);
+ seg->next_blkoff +=
+ get_random_u32_inclusive(1, sbi->max_fragment_hole);
+}
+
+int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct folio *folio,
block_t old_blkaddr, block_t *new_blkaddr,
struct f2fs_summary *sum, int type,
struct f2fs_io_info *fio)
@@ -3433,12 +3789,19 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
unsigned long long old_mtime;
bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
struct seg_entry *se = NULL;
+ bool segment_full = false;
+ int ret = 0;
- down_read(&SM_I(sbi)->curseg_lock);
+ f2fs_down_read(&SM_I(sbi)->curseg_lock);
mutex_lock(&curseg->curseg_mutex);
down_write(&sit_i->sentry_lock);
+ if (curseg->segno == NULL_SEGNO) {
+ ret = -ENOSPC;
+ goto out_err;
+ }
+
if (from_gc) {
f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
@@ -3447,19 +3810,20 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
}
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
- f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg);
+ f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi));
f2fs_wait_discard_bio(sbi, *new_blkaddr);
- /*
- * __add_sum_entry should be resided under the curseg_mutex
- * because, this function updates a summary entry in the
- * current summary block.
- */
- __add_sum_entry(sbi, type, sum);
-
- __refresh_next_blkoff(sbi, curseg);
-
+ curseg->sum_blk->entries[curseg->next_blkoff] = *sum;
+ if (curseg->alloc_type == SSR) {
+ curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg);
+ } else {
+ curseg->next_blkoff++;
+ if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
+ f2fs_randomize_chunk(sbi, curseg);
+ }
+ if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno))
+ segment_full = true;
stat_inc_block_count(sbi, curseg);
if (from_gc) {
@@ -3475,16 +3839,36 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
* since SSR needs latest valid block information.
*/
update_sit_entry(sbi, *new_blkaddr, 1);
- if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
- update_sit_entry(sbi, old_blkaddr, -1);
+ update_sit_entry(sbi, old_blkaddr, -1);
+
+ /*
+ * If the current segment is full, flush it out and replace it with a
+ * new segment.
+ */
+ if (segment_full) {
+ if (type == CURSEG_COLD_DATA_PINNED &&
+ !((curseg->segno + 1) % sbi->segs_per_sec)) {
+ write_sum_page(sbi, curseg->sum_blk, curseg->segno);
+ reset_curseg_fields(curseg);
+ goto skip_new_segment;
+ }
- if (!__has_curseg_space(sbi, curseg)) {
- if (from_gc)
- get_atssr_segment(sbi, type, se->type,
+ if (from_gc) {
+ ret = get_atssr_segment(sbi, type, se->type,
AT_SSR, se->mtime);
- else
- sit_i->s_ops->allocate_segment(sbi, type, false);
+ } else {
+ if (need_new_seg(sbi, type))
+ ret = new_curseg(sbi, type, false);
+ else
+ ret = change_curseg(sbi, type);
+ stat_inc_seg_type(sbi, curseg);
+ }
+
+ if (ret)
+ goto out_err;
}
+
+skip_new_segment:
/*
* segment dirty status should be updated after segment allocation,
* so we just need to update status only one time after previous
@@ -3493,22 +3877,27 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
+ if (IS_DATASEG(curseg->seg_type)) {
+ unsigned long long new_val;
+
+ new_val = atomic64_inc_return(&sbi->allocated_data_blocks);
+ if (unlikely(new_val == ULLONG_MAX))
+ atomic64_set(&sbi->allocated_data_blocks, 0);
+ }
+
up_write(&sit_i->sentry_lock);
- if (page && IS_NODESEG(type)) {
- fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
+ if (folio && IS_NODESEG(curseg->seg_type)) {
+ fill_node_footer_blkaddr(folio, NEXT_FREE_BLKADDR(sbi, curseg));
- f2fs_inode_chksum_set(sbi, page);
+ f2fs_inode_chksum_set(sbi, folio);
}
if (fio) {
struct f2fs_bio_info *io;
- if (F2FS_IO_ALIGNED(sbi))
- fio->retry = false;
-
INIT_LIST_HEAD(&fio->list);
- fio->in_list = true;
+ fio->in_list = 1;
io = sbi->write_io[fio->type] + fio->temp;
spin_lock(&io->io_lock);
list_add_tail(&fio->list, &io->io_list);
@@ -3516,8 +3905,15 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
}
mutex_unlock(&curseg->curseg_mutex);
+ f2fs_up_read(&SM_I(sbi)->curseg_lock);
+ return 0;
- up_read(&SM_I(sbi)->curseg_lock);
+out_err:
+ *new_blkaddr = NULL_ADDR;
+ up_write(&sit_i->sentry_lock);
+ mutex_unlock(&curseg->curseg_mutex);
+ f2fs_up_read(&SM_I(sbi)->curseg_lock);
+ return ret;
}
void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
@@ -3547,36 +3943,74 @@ void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
}
}
+static int log_type_to_seg_type(enum log_type type)
+{
+ int seg_type = CURSEG_COLD_DATA;
+
+ switch (type) {
+ case CURSEG_HOT_DATA:
+ case CURSEG_WARM_DATA:
+ case CURSEG_COLD_DATA:
+ case CURSEG_HOT_NODE:
+ case CURSEG_WARM_NODE:
+ case CURSEG_COLD_NODE:
+ seg_type = (int)type;
+ break;
+ case CURSEG_COLD_DATA_PINNED:
+ case CURSEG_ALL_DATA_ATGC:
+ seg_type = CURSEG_COLD_DATA;
+ break;
+ default:
+ break;
+ }
+ return seg_type;
+}
+
static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
{
- int type = __get_segment_type(fio);
- bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
+ struct folio *folio = fio->folio;
+ enum log_type type = __get_segment_type(fio);
+ int seg_type = log_type_to_seg_type(type);
+ bool keep_order = (f2fs_lfs_mode(fio->sbi) &&
+ seg_type == CURSEG_COLD_DATA);
+ int err;
if (keep_order)
- down_read(&fio->sbi->io_order_lock);
-reallocate:
- f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
+ f2fs_down_read(&fio->sbi->io_order_lock);
+
+ err = f2fs_allocate_data_block(fio->sbi, folio, fio->old_blkaddr,
&fio->new_blkaddr, sum, type, fio);
- if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
- invalidate_mapping_pages(META_MAPPING(fio->sbi),
- fio->old_blkaddr, fio->old_blkaddr);
- f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
+ if (unlikely(err)) {
+ f2fs_err_ratelimited(fio->sbi,
+ "%s Failed to allocate data block, ino:%u, index:%lu, type:%d, old_blkaddr:0x%x, new_blkaddr:0x%x, err:%d",
+ __func__, fio->ino, folio->index, type,
+ fio->old_blkaddr, fio->new_blkaddr, err);
+ if (fscrypt_inode_uses_fs_layer_crypto(folio->mapping->host))
+ fscrypt_finalize_bounce_page(&fio->encrypted_page);
+ folio_end_writeback(folio);
+ if (f2fs_in_warm_node_list(fio->sbi, folio))
+ f2fs_del_fsync_node_entry(fio->sbi, folio);
+ f2fs_bug_on(fio->sbi, !is_set_ckpt_flags(fio->sbi,
+ CP_ERROR_FLAG));
+ goto out;
}
+ f2fs_bug_on(fio->sbi, !f2fs_is_valid_blkaddr_raw(fio->sbi,
+ fio->new_blkaddr, DATA_GENERIC_ENHANCE));
+
+ if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
+ f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr, 1);
+
/* writeout dirty page into bdev */
f2fs_submit_page_write(fio);
- if (fio->retry) {
- fio->old_blkaddr = fio->new_blkaddr;
- goto reallocate;
- }
f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
-
+out:
if (keep_order)
- up_read(&fio->sbi->io_order_lock);
+ f2fs_up_read(&fio->sbi->io_order_lock);
}
-void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
enum iostat_type io_type)
{
struct f2fs_io_info fio = {
@@ -3585,22 +4019,21 @@ void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
.temp = HOT,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
- .old_blkaddr = page->index,
- .new_blkaddr = page->index,
- .page = page,
+ .old_blkaddr = folio->index,
+ .new_blkaddr = folio->index,
+ .folio = folio,
.encrypted_page = NULL,
- .in_list = false,
+ .in_list = 0,
};
- if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
+ if (unlikely(folio->index >= MAIN_BLKADDR(sbi)))
fio.op_flags &= ~REQ_META;
- set_page_writeback(page);
- ClearPageError(page);
+ folio_start_writeback(folio);
f2fs_submit_page_write(&fio);
- stat_inc_meta_count(sbi, page->index);
- f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
+ stat_inc_meta_count(sbi, folio->index);
+ f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
}
void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
@@ -3610,7 +4043,7 @@ void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
set_summary(&sum, nid, 0, 0);
do_write_page(&sum, fio);
- f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
+ f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE);
}
void f2fs_outplace_write_data(struct dnode_of_data *dn,
@@ -3620,11 +4053,13 @@ void f2fs_outplace_write_data(struct dnode_of_data *dn,
struct f2fs_summary sum;
f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
+ if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO)
+ f2fs_update_age_extent_cache(dn);
set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
do_write_page(&sum, fio);
f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
- f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
+ f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE);
}
int f2fs_inplace_write_data(struct f2fs_io_info *fio)
@@ -3644,6 +4079,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
__func__, segno);
err = -EFSCORRUPTED;
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
goto drop_bio;
}
@@ -3652,19 +4088,20 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
goto drop_bio;
}
- invalidate_mapping_pages(META_MAPPING(sbi),
- fio->new_blkaddr, fio->new_blkaddr);
+ if (fio->meta_gc)
+ f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1);
stat_inc_inplace_blocks(fio->sbi);
- if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
+ if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi))
err = f2fs_merge_page_bio(fio);
else
err = f2fs_submit_page_bio(fio);
if (!err) {
f2fs_update_device_state(fio->sbi, fio->ino,
fio->new_blkaddr, 1);
- f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
+ f2fs_update_iostat(fio->sbi, fio_inode(fio),
+ fio->io_type, F2FS_BLKSIZE);
}
return err;
@@ -3708,18 +4145,18 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
se = get_seg_entry(sbi, segno);
type = se->type;
- down_write(&SM_I(sbi)->curseg_lock);
+ f2fs_down_write(&SM_I(sbi)->curseg_lock);
if (!recover_curseg) {
/* for recovery flow */
- if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
+ if (se->valid_blocks == 0 && !is_curseg(sbi, segno)) {
if (old_blkaddr == NULL_ADDR)
type = CURSEG_COLD_DATA;
else
type = CURSEG_WARM_DATA;
}
} else {
- if (IS_CURSEG(sbi, segno)) {
+ if (is_curseg(sbi, segno)) {
/* se->type is volatile as SSR allocation */
type = __f2fs_get_curseg(sbi, segno);
f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
@@ -3728,8 +4165,8 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
}
}
- f2fs_bug_on(sbi, !IS_DATASEG(type));
curseg = CURSEG_I(sbi, type);
+ f2fs_bug_on(sbi, !IS_DATASEG(curseg->seg_type));
mutex_lock(&curseg->curseg_mutex);
down_write(&sit_i->sentry_lock);
@@ -3741,11 +4178,12 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
/* change the current segment */
if (segno != curseg->segno) {
curseg->next_segno = segno;
- change_curseg(sbi, type, true);
+ if (change_curseg(sbi, type))
+ goto out_unlock;
}
curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
- __add_sum_entry(sbi, type, sum);
+ curseg->sum_blk->entries[curseg->next_blkoff] = *sum;
if (!recover_curseg || recover_newaddr) {
if (!from_gc)
@@ -3753,9 +4191,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
update_sit_entry(sbi, new_blkaddr, 1);
}
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
- invalidate_mapping_pages(META_MAPPING(sbi),
- old_blkaddr, old_blkaddr);
- f2fs_invalidate_compress_page(sbi, old_blkaddr);
+ f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);
if (!from_gc)
update_segment_mtime(sbi, old_blkaddr, 0);
update_sit_entry(sbi, old_blkaddr, -1);
@@ -3769,15 +4205,17 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (recover_curseg) {
if (old_cursegno != curseg->segno) {
curseg->next_segno = old_cursegno;
- change_curseg(sbi, type, true);
+ if (change_curseg(sbi, type))
+ goto out_unlock;
}
curseg->next_blkoff = old_blkoff;
curseg->alloc_type = old_alloc_type;
}
+out_unlock:
up_write(&sit_i->sentry_lock);
mutex_unlock(&curseg->curseg_mutex);
- up_write(&SM_I(sbi)->curseg_lock);
+ f2fs_up_write(&SM_I(sbi)->curseg_lock);
}
void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
@@ -3795,21 +4233,21 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
f2fs_update_data_blkaddr(dn, new_addr);
}
-void f2fs_wait_on_page_writeback(struct page *page,
- enum page_type type, bool ordered, bool locked)
+void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type,
+ bool ordered, bool locked)
{
- if (PageWriteback(page)) {
- struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ if (folio_test_writeback(folio)) {
+ struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
/* submit cached LFS IO */
- f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
- /* sbumit cached IPU IO */
- f2fs_submit_merged_ipu_write(sbi, NULL, page);
+ f2fs_submit_merged_write_cond(sbi, NULL, folio, 0, type);
+ /* submit cached IPU IO */
+ f2fs_submit_merged_ipu_write(sbi, NULL, folio);
if (ordered) {
- wait_on_page_writeback(page);
- f2fs_bug_on(sbi, locked && PageWriteback(page));
+ folio_wait_writeback(folio);
+ f2fs_bug_on(sbi, locked && folio_test_writeback(folio));
} else {
- wait_for_stable_page(page);
+ folio_wait_stable(folio);
}
}
}
@@ -3817,28 +4255,34 @@ void f2fs_wait_on_page_writeback(struct page *page,
void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *cpage;
+ struct folio *cfolio;
- if (!f2fs_post_read_required(inode))
+ if (!f2fs_meta_inode_gc_required(inode))
return;
if (!__is_valid_data_blkaddr(blkaddr))
return;
- cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
- if (cpage) {
- f2fs_wait_on_page_writeback(cpage, DATA, true, true);
- f2fs_put_page(cpage, 1);
+ cfolio = filemap_lock_folio(META_MAPPING(sbi), blkaddr);
+ if (!IS_ERR(cfolio)) {
+ f2fs_folio_wait_writeback(cfolio, DATA, true, true);
+ f2fs_folio_put(cfolio, true);
}
}
void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
block_t len)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
block_t i;
+ if (!f2fs_meta_inode_gc_required(inode))
+ return;
+
for (i = 0; i < len; i++)
f2fs_wait_on_block_writeback(inode, blkaddr + i);
+
+ f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
}
static int read_compacted_summaries(struct f2fs_sb_info *sbi)
@@ -3846,16 +4290,16 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct curseg_info *seg_i;
unsigned char *kaddr;
- struct page *page;
+ struct folio *folio;
block_t start;
int i, j, offset;
start = start_sum_block(sbi);
- page = f2fs_get_meta_page(sbi, start++);
- if (IS_ERR(page))
- return PTR_ERR(page);
- kaddr = (unsigned char *)page_address(page);
+ folio = f2fs_get_meta_folio(sbi, start++);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ kaddr = folio_address(folio);
/* Step 1: restore nat cache */
seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -3880,7 +4324,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
seg_i->next_blkoff = blk_off;
if (seg_i->alloc_type == SSR)
- blk_off = sbi->blocks_per_seg;
+ blk_off = BLKS_PER_SEG(sbi);
for (j = 0; j < blk_off; j++) {
struct f2fs_summary *s;
@@ -3892,17 +4336,16 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
SUM_FOOTER_SIZE)
continue;
- f2fs_put_page(page, 1);
- page = NULL;
+ f2fs_folio_put(folio, true);
- page = f2fs_get_meta_page(sbi, start++);
- if (IS_ERR(page))
- return PTR_ERR(page);
- kaddr = (unsigned char *)page_address(page);
+ folio = f2fs_get_meta_folio(sbi, start++);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ kaddr = folio_address(folio);
offset = 0;
}
}
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return 0;
}
@@ -3911,7 +4354,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct f2fs_summary_block *sum;
struct curseg_info *curseg;
- struct page *new;
+ struct folio *new;
unsigned short blk_off;
unsigned int segno = 0;
block_t blk_addr = 0;
@@ -3938,17 +4381,17 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
blk_addr = GET_SUM_BLOCK(sbi, segno);
}
- new = f2fs_get_meta_page(sbi, blk_addr);
+ new = f2fs_get_meta_folio(sbi, blk_addr);
if (IS_ERR(new))
return PTR_ERR(new);
- sum = (struct f2fs_summary_block *)page_address(new);
+ sum = folio_address(new);
if (IS_NODESEG(type)) {
if (__exist_node_summaries(sbi)) {
struct f2fs_summary *ns = &sum->entries[0];
int i;
- for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
+ for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) {
ns->version = 0;
ns->ofs_in_node = 0;
}
@@ -3976,7 +4419,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
curseg->next_blkoff = blk_off;
mutex_unlock(&curseg->curseg_mutex);
out:
- f2fs_put_page(new, 1);
+ f2fs_folio_put(new, true);
return err;
}
@@ -4025,15 +4468,15 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
{
- struct page *page;
+ struct folio *folio;
unsigned char *kaddr;
struct f2fs_summary *summary;
struct curseg_info *seg_i;
int written_size = 0;
int i, j;
- page = f2fs_grab_meta_page(sbi, blkaddr++);
- kaddr = (unsigned char *)page_address(page);
+ folio = f2fs_grab_meta_folio(sbi, blkaddr++);
+ kaddr = folio_address(folio);
memset(kaddr, 0, PAGE_SIZE);
/* Step 1: write nat cache */
@@ -4048,18 +4491,11 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
/* Step 3: write summary entries */
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
- unsigned short blkoff;
-
seg_i = CURSEG_I(sbi, i);
- if (sbi->ckpt->alloc_type[i] == SSR)
- blkoff = sbi->blocks_per_seg;
- else
- blkoff = curseg_blkoff(sbi, i);
-
- for (j = 0; j < blkoff; j++) {
- if (!page) {
- page = f2fs_grab_meta_page(sbi, blkaddr++);
- kaddr = (unsigned char *)page_address(page);
+ for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) {
+ if (!folio) {
+ folio = f2fs_grab_meta_folio(sbi, blkaddr++);
+ kaddr = folio_address(folio);
memset(kaddr, 0, PAGE_SIZE);
written_size = 0;
}
@@ -4071,14 +4507,14 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
SUM_FOOTER_SIZE)
continue;
- set_page_dirty(page);
- f2fs_put_page(page, 1);
- page = NULL;
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
+ folio = NULL;
}
}
- if (page) {
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ if (folio) {
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
}
@@ -4131,29 +4567,29 @@ int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
return -1;
}
-static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
+static struct folio *get_current_sit_folio(struct f2fs_sb_info *sbi,
unsigned int segno)
{
- return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
+ return f2fs_get_meta_folio(sbi, current_sit_addr(sbi, segno));
}
-static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
+static struct folio *get_next_sit_folio(struct f2fs_sb_info *sbi,
unsigned int start)
{
struct sit_info *sit_i = SIT_I(sbi);
- struct page *page;
+ struct folio *folio;
pgoff_t src_off, dst_off;
src_off = current_sit_addr(sbi, start);
dst_off = next_sit_addr(sbi, src_off);
- page = f2fs_grab_meta_page(sbi, dst_off);
- seg_info_to_sit_page(sbi, page, start);
+ folio = f2fs_grab_meta_folio(sbi, dst_off);
+ seg_info_to_sit_folio(sbi, folio, start);
- set_page_dirty(page);
+ folio_mark_dirty(folio);
set_to_next_sit(sit_i, start);
- return page;
+ return folio;
}
static struct sit_entry_set *grab_sit_entry_set(void)
@@ -4182,10 +4618,12 @@ static void adjust_sit_entry_set(struct sit_entry_set *ses,
return;
list_for_each_entry_continue(next, head, set_list)
- if (ses->entry_cnt <= next->entry_cnt)
- break;
+ if (ses->entry_cnt <= next->entry_cnt) {
+ list_move_tail(&ses->set_list, &next->set_list);
+ return;
+ }
- list_move_tail(&ses->set_list, &next->set_list);
+ list_move_tail(&ses->set_list, head);
}
static void add_sit_entry(unsigned int segno, struct list_head *head)
@@ -4281,7 +4719,7 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* #2, flush sit entries to sit page.
*/
list_for_each_entry_safe(ses, tmp, head, set_list) {
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct f2fs_sit_block *raw_sit = NULL;
unsigned int start_segno = ses->start_segno;
unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
@@ -4295,8 +4733,8 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (to_journal) {
down_write(&curseg->journal_rwsem);
} else {
- page = get_next_sit_page(sbi, start_segno);
- raw_sit = page_address(page);
+ folio = get_next_sit_folio(sbi, start_segno);
+ raw_sit = folio_address(folio);
}
/* flush dirty sit entries in region of current sit set */
@@ -4334,6 +4772,12 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
&raw_sit->entries[sit_offset]);
}
+ /* update ckpt_valid_block */
+ if (__is_large_section(sbi)) {
+ set_ckpt_valid_blocks(sbi, segno);
+ sanity_check_valid_blocks(sbi, segno);
+ }
+
__clear_bit(segno, bitmap);
sit_i->dirty_sentries--;
ses->entry_cnt--;
@@ -4342,7 +4786,7 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (to_journal)
up_write(&curseg->journal_rwsem);
else
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
f2fs_bug_on(sbi, ses->entry_cnt);
release_sit_entry_set(ses);
@@ -4458,11 +4902,8 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
return -ENOMEM;
#endif
- /* init SIT information */
- sit_i->s_ops = &default_salloc_ops;
-
sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
- sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
+ sit_i->sit_blocks = SEGS_TO_BLKS(sbi, sit_segs);
sit_i->written_valid_blocks = 0;
sit_i->bitmap_size = sit_bitmap_size;
sit_i->dirty_sentries = 0;
@@ -4529,15 +4970,8 @@ static int build_curseg(struct f2fs_sb_info *sbi)
sizeof(struct f2fs_journal), GFP_KERNEL);
if (!array[i].journal)
return -ENOMEM;
- if (i < NR_PERSISTENT_LOG)
- array[i].seg_type = CURSEG_HOT_DATA + i;
- else if (i == CURSEG_COLD_DATA_PINNED)
- array[i].seg_type = CURSEG_COLD_DATA;
- else if (i == CURSEG_ALL_DATA_ATGC)
- array[i].seg_type = CURSEG_COLD_DATA;
- array[i].segno = NULL_SEGNO;
- array[i].next_blkoff = 0;
- array[i].inited = false;
+ array[i].seg_type = log_type_to_seg_type(i);
+ reset_curseg_fields(&array[i]);
}
return restore_curseg_summaries(sbi);
}
@@ -4553,7 +4987,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
unsigned int i, start, end;
unsigned int readed, start_blk = 0;
int err = 0;
- block_t total_node_blocks = 0;
+ block_t sit_valid_blocks[2] = {0, 0};
do {
readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
@@ -4564,38 +4998,45 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
for (; start < end && start < MAIN_SEGS(sbi); start++) {
struct f2fs_sit_block *sit_blk;
- struct page *page;
+ struct folio *folio;
se = &sit_i->sentries[start];
- page = get_current_sit_page(sbi, start);
- if (IS_ERR(page))
- return PTR_ERR(page);
- sit_blk = (struct f2fs_sit_block *)page_address(page);
+ folio = get_current_sit_folio(sbi, start);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ sit_blk = folio_address(folio);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
err = check_block_count(sbi, start, &sit);
if (err)
return err;
seg_info_from_raw_sit(se, &sit);
- if (IS_NODESEG(se->type))
- total_node_blocks += se->valid_blocks;
- if (f2fs_block_unit_discard(sbi)) {
- /* build discard map only one time */
- if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
- memset(se->discard_map, 0xff,
+ if (se->type >= NR_PERSISTENT_LOG) {
+ f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
+ se->type, start);
+ f2fs_handle_error(sbi,
+ ERROR_INCONSISTENT_SUM_TYPE);
+ return -EFSCORRUPTED;
+ }
+
+ sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
+
+ if (!f2fs_block_unit_discard(sbi))
+ goto init_discard_map_done;
+
+ /* build discard map only one time */
+ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+ memset(se->discard_map, 0xff,
SIT_VBLOCK_MAP_SIZE);
- } else {
- memcpy(se->discard_map,
- se->cur_valid_map,
+ goto init_discard_map_done;
+ }
+ memcpy(se->discard_map, se->cur_valid_map,
SIT_VBLOCK_MAP_SIZE);
- sbi->discard_blks +=
- sbi->blocks_per_seg -
+ sbi->discard_blks += BLKS_PER_SEG(sbi) -
se->valid_blocks;
- }
- }
-
+init_discard_map_done:
if (__is_large_section(sbi))
get_sec_entry(sbi, start)->valid_blocks +=
se->valid_blocks;
@@ -4612,6 +5053,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
f2fs_err(sbi, "Wrong journal entry on segno %u",
start);
err = -EFSCORRUPTED;
+ f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL);
break;
}
@@ -4619,15 +5061,23 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
sit = sit_in_journal(journal, i);
old_valid_blocks = se->valid_blocks;
- if (IS_NODESEG(se->type))
- total_node_blocks -= old_valid_blocks;
+
+ sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks;
err = check_block_count(sbi, start, &sit);
if (err)
break;
seg_info_from_raw_sit(se, &sit);
- if (IS_NODESEG(se->type))
- total_node_blocks += se->valid_blocks;
+
+ if (se->type >= NR_PERSISTENT_LOG) {
+ f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
+ se->type, start);
+ err = -EFSCORRUPTED;
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
+ break;
+ }
+
+ sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
if (f2fs_block_unit_discard(sbi)) {
if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
@@ -4649,13 +5099,36 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
}
up_read(&curseg->journal_rwsem);
- if (!err && total_node_blocks != valid_node_count(sbi)) {
+ /* update ckpt_valid_block */
+ if (__is_large_section(sbi)) {
+ unsigned int segno;
+
+ for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
+ set_ckpt_valid_blocks(sbi, segno);
+ sanity_check_valid_blocks(sbi, segno);
+ }
+ }
+
+ if (err)
+ return err;
+
+ if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
- total_node_blocks, valid_node_count(sbi));
- err = -EFSCORRUPTED;
+ sit_valid_blocks[NODE], valid_node_count(sbi));
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT);
+ return -EFSCORRUPTED;
}
- return err;
+ if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] >
+ valid_user_blocks(sbi)) {
+ f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
+ sit_valid_blocks[DATA], sit_valid_blocks[NODE],
+ valid_user_blocks(sbi));
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT);
+ return -EFSCORRUPTED;
+ }
+
+ return 0;
}
static void init_free_segmap(struct f2fs_sb_info *sbi)
@@ -4689,7 +5162,6 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
struct free_segmap_info *free_i = FREE_I(sbi);
unsigned int segno = 0, offset = 0, secno;
block_t valid_blocks, usable_blks_in_seg;
- block_t blks_per_sec = BLKS_PER_SEC(sbi);
while (1) {
/* find dirty segment based on free segmap */
@@ -4714,13 +5186,13 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
return;
mutex_lock(&dirty_i->seglist_lock);
- for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
+ for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
valid_blocks = get_valid_blocks(sbi, segno, true);
secno = GET_SEC_FROM_SEG(sbi, segno);
- if (!valid_blocks || valid_blocks == blks_per_sec)
+ if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi))
continue;
- if (IS_CURSEC(sbi, secno))
+ if (is_cursec(sbi, secno))
continue;
set_bit(secno, dirty_i->dirty_secmap);
}
@@ -4735,6 +5207,13 @@ static int init_victim_secmap(struct f2fs_sb_info *sbi)
dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
if (!dirty_i->victim_secmap)
return -ENOMEM;
+
+ dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
+ if (!dirty_i->pinned_secmap)
+ return -ENOMEM;
+
+ dirty_i->pinned_secmap_cnt = 0;
+ dirty_i->enable_pin_section = true;
return 0;
}
@@ -4792,13 +5271,21 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi)
sanity_check_seg_type(sbi, curseg->seg_type);
+ if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) {
+ f2fs_err(sbi,
+ "Current segment has invalid alloc_type:%d",
+ curseg->alloc_type);
+ f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
+ return -EFSCORRUPTED;
+ }
+
if (f2fs_test_bit(blkofs, se->cur_valid_map))
goto out;
if (curseg->alloc_type == SSR)
continue;
- for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
+ for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) {
if (!f2fs_test_bit(blkofs, se->cur_valid_map))
continue;
out:
@@ -4806,6 +5293,7 @@ out:
"Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
i, curseg->segno, curseg->alloc_type,
curseg->next_blkoff, blkofs);
+ f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
return -EFSCORRUPTED;
}
}
@@ -4813,89 +5301,84 @@ out:
}
#ifdef CONFIG_BLK_DEV_ZONED
-
static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
struct f2fs_dev_info *fdev,
struct blk_zone *zone)
{
- unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
- block_t zone_block, wp_block, last_valid_block;
+ unsigned int zone_segno;
+ block_t zone_block, valid_block_cnt;
unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
- int i, s, b, ret;
- struct seg_entry *se;
+ int ret;
+ unsigned int nofs_flags;
if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
return 0;
- wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
- wp_segno = GET_SEGNO(sbi, wp_block);
- wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
zone_segno = GET_SEGNO(sbi, zone_block);
- zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
-
- if (zone_segno >= MAIN_SEGS(sbi))
- return 0;
/*
* Skip check of zones cursegs point to, since
* fix_curseg_write_pointer() checks them.
*/
- for (i = 0; i < NO_CHECK_TYPE; i++)
- if (zone_secno == GET_SEC_FROM_SEG(sbi,
- CURSEG_I(sbi, i)->segno))
- return 0;
+ if (zone_segno >= MAIN_SEGS(sbi))
+ return 0;
/*
- * Get last valid block of the zone.
+ * Get # of valid block of the zone.
*/
- last_valid_block = zone_block - 1;
- for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
- segno = zone_segno + s;
- se = get_seg_entry(sbi, segno);
- for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
- if (f2fs_test_bit(b, se->cur_valid_map)) {
- last_valid_block = START_BLOCK(sbi, segno) + b;
- break;
- }
- if (last_valid_block >= zone_block)
- break;
+ valid_block_cnt = get_valid_blocks(sbi, zone_segno, true);
+ if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) {
+ f2fs_notice(sbi, "Open zones: valid block[0x%x,0x%x] cond[%s]",
+ zone_segno, valid_block_cnt,
+ blk_zone_cond_str(zone->cond));
+ return 0;
}
- /*
- * If last valid block is beyond the write pointer, report the
- * inconsistency. This inconsistency does not cause write error
- * because the zone will not be selected for write operation until
- * it get discarded. Just report it.
- */
- if (last_valid_block >= wp_block) {
- f2fs_notice(sbi, "Valid block beyond write pointer: "
- "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
- GET_SEGNO(sbi, last_valid_block),
- GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
- wp_segno, wp_blkoff);
+ if ((!valid_block_cnt && zone->cond == BLK_ZONE_COND_EMPTY) ||
+ (valid_block_cnt && zone->cond == BLK_ZONE_COND_FULL))
return 0;
- }
- /*
- * If there is no valid block in the zone and if write pointer is
- * not at zone start, reset the write pointer.
- */
- if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
- f2fs_notice(sbi,
- "Zone without valid block has non-zero write "
- "pointer. Reset the write pointer: wp[0x%x,0x%x]",
- wp_segno, wp_blkoff);
+ if (!valid_block_cnt) {
+ f2fs_notice(sbi, "Zone without valid block has non-zero write "
+ "pointer. Reset the write pointer: cond[%s]",
+ blk_zone_cond_str(zone->cond));
ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
zone->len >> log_sectors_per_block);
- if (ret) {
+ if (ret)
f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
fdev->path, ret);
- return ret;
- }
+ return ret;
}
- return 0;
+ /*
+ * If there are valid blocks and the write pointer doesn't match
+ * with them, we need to report the inconsistency and fill
+ * the zone till the end to close the zone. This inconsistency
+ * does not cause write error because the zone will not be
+ * selected for write operation until it get discarded.
+ */
+ f2fs_notice(sbi, "Valid blocks are not aligned with write "
+ "pointer: valid block[0x%x,0x%x] cond[%s]",
+ zone_segno, valid_block_cnt, blk_zone_cond_str(zone->cond));
+
+ nofs_flags = memalloc_nofs_save();
+ ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH,
+ zone->start, zone->len);
+ memalloc_nofs_restore(nofs_flags);
+ if (ret == -EOPNOTSUPP) {
+ ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
+ zone->len - (zone->wp - zone->start),
+ GFP_NOFS, 0);
+ if (ret)
+ f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
+ fdev->path, ret);
+ } else if (ret) {
+ f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)",
+ fdev->path, ret);
+ }
+
+ return ret;
}
static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
@@ -4921,7 +5404,7 @@ static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
return 0;
}
-static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
+static int do_fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *cs = CURSEG_I(sbi, type);
struct f2fs_dev_info *zbd;
@@ -4953,23 +5436,36 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
return 0;
- wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
- wp_segno = GET_SEGNO(sbi, wp_block);
- wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
- wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
-
- if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
- wp_sector_off == 0)
- return 0;
+ /*
+ * When safely unmounted in the previous mount, we could use current
+ * segments. Otherwise, allocate new sections.
+ */
+ if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+ wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
+ wp_segno = GET_SEGNO(sbi, wp_block);
+ wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
+ wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
+
+ if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
+ wp_sector_off == 0)
+ return 0;
- f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
- "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
- type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
+ f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
+ "curseg[0x%x,0x%x] wp[0x%x,0x%x]", type, cs->segno,
+ cs->next_blkoff, wp_segno, wp_blkoff);
+ }
- f2fs_notice(sbi, "Assign new section to curseg[%d]: "
- "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
+ /* Allocate a new section if it's not new. */
+ if (cs->next_blkoff ||
+ cs->segno != GET_SEG_FROM_SEC(sbi, GET_ZONE_FROM_SEC(sbi, cs_section))) {
+ unsigned int old_segno = cs->segno, old_blkoff = cs->next_blkoff;
- f2fs_allocate_new_section(sbi, type, true);
+ f2fs_allocate_new_section(sbi, type, true);
+ f2fs_notice(sbi, "Assign new section to curseg[%d]: "
+ "[0x%x,0x%x] -> [0x%x,0x%x]",
+ type, old_segno, old_blkoff,
+ cs->segno, cs->next_blkoff);
+ }
/* check consistency of the zone curseg pointed to */
if (check_zone_write_pointer(sbi, zbd, &zone))
@@ -5001,9 +5497,8 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
"New zone for curseg[%d] is not yet discarded. "
"Reset the zone: curseg[0x%x,0x%x]",
type, cs->segno, cs->next_blkoff);
- err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
- zone_sector >> log_sectors_per_block,
- zone.len >> log_sectors_per_block);
+ err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block,
+ zone.len >> log_sectors_per_block);
if (err) {
f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
zbd->path, err);
@@ -5014,12 +5509,12 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
return 0;
}
-int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
+static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
{
int i, ret;
for (i = 0; i < NR_PERSISTENT_LOG; i++) {
- ret = fix_curseg_write_pointer(sbi, i);
+ ret = do_fix_curseg_write_pointer(sbi, i);
if (ret)
return ret;
}
@@ -5042,7 +5537,7 @@ static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
return check_zone_write_pointer(args->sbi, args->fdev, zone);
}
-int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
+static int check_write_pointer(struct f2fs_sb_info *sbi)
{
int i, ret;
struct check_zone_write_pointer_args args;
@@ -5062,52 +5557,19 @@ int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
return 0;
}
-static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
- unsigned int dev_idx)
-{
- if (!bdev_is_zoned(FDEV(dev_idx).bdev))
- return true;
- return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
-}
-
-/* Return the zone index in the given device */
-static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
- int dev_idx)
+int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi)
{
- block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
-
- return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
- sbi->log_blocks_per_blkz;
-}
-
-/*
- * Return the usable segments in a section based on the zone's
- * corresponding zone capacity. Zone is equal to a section.
- */
-static inline unsigned int f2fs_usable_zone_segs_in_sec(
- struct f2fs_sb_info *sbi, unsigned int segno)
-{
- unsigned int dev_idx, zone_idx, unusable_segs_in_sec;
-
- dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
- zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
-
- /* Conventional zone's capacity is always equal to zone size */
- if (is_conv_zone(sbi, zone_idx, dev_idx))
- return sbi->segs_per_sec;
+ int ret;
- /*
- * If the zone_capacity_blocks array is NULL, then zone capacity
- * is equal to the zone size for all zones
- */
- if (!FDEV(dev_idx).zone_capacity_blocks)
- return sbi->segs_per_sec;
+ if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb) ||
+ f2fs_hw_is_readonly(sbi))
+ return 0;
- /* Get the segment count beyond zone capacity block */
- unusable_segs_in_sec = (sbi->blocks_per_blkz -
- FDEV(dev_idx).zone_capacity_blocks[zone_idx]) >>
- sbi->log_blocks_per_seg;
- return sbi->segs_per_sec - unusable_segs_in_sec;
+ f2fs_notice(sbi, "Checking entire write pointers");
+ ret = fix_curseg_write_pointer(sbi);
+ if (!ret)
+ ret = check_write_pointer(sbi);
+ return ret;
}
/*
@@ -5122,26 +5584,15 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(
struct f2fs_sb_info *sbi, unsigned int segno)
{
block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
- unsigned int zone_idx, dev_idx, secno;
+ unsigned int secno;
+
+ if (!sbi->unusable_blocks_per_sec)
+ return BLKS_PER_SEG(sbi);
secno = GET_SEC_FROM_SEG(sbi, segno);
seg_start = START_BLOCK(sbi, segno);
- dev_idx = f2fs_target_device_index(sbi, seg_start);
- zone_idx = get_zone_idx(sbi, secno, dev_idx);
-
- /*
- * Conventional zone's capacity is always equal to zone size,
- * so, blocks per segment is unchanged.
- */
- if (is_conv_zone(sbi, zone_idx, dev_idx))
- return sbi->blocks_per_seg;
-
- if (!FDEV(dev_idx).zone_capacity_blocks)
- return sbi->blocks_per_seg;
-
sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
- sec_cap_blkaddr = sec_start_blkaddr +
- FDEV(dev_idx).zone_capacity_blocks[zone_idx];
+ sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
/*
* If segment starts before zone capacity and spans beyond
@@ -5151,18 +5602,13 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(
*/
if (seg_start >= sec_cap_blkaddr)
return 0;
- if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr)
+ if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr)
return sec_cap_blkaddr - seg_start;
- return sbi->blocks_per_seg;
+ return BLKS_PER_SEG(sbi);
}
#else
-int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
-{
- return 0;
-}
-
-int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
+int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi)
{
return 0;
}
@@ -5173,11 +5619,6 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi
return 0;
}
-static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
- unsigned int segno)
-{
- return 0;
-}
#endif
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
unsigned int segno)
@@ -5185,16 +5626,50 @@ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
if (f2fs_sb_has_blkzoned(sbi))
return f2fs_usable_zone_blks_in_seg(sbi, segno);
- return sbi->blocks_per_seg;
+ return BLKS_PER_SEG(sbi);
}
-unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
- unsigned int segno)
+unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi)
{
if (f2fs_sb_has_blkzoned(sbi))
- return f2fs_usable_zone_segs_in_sec(sbi, segno);
+ return CAP_SEGS_PER_SEC(sbi);
- return sbi->segs_per_sec;
+ return SEGS_PER_SEC(sbi);
+}
+
+unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
+ unsigned int secno = 0, start = 0;
+ unsigned int total_valid_blocks = 0;
+ unsigned long long mtime = 0;
+ unsigned int i = 0;
+
+ secno = GET_SEC_FROM_SEG(sbi, segno);
+ start = GET_SEG_FROM_SEC(sbi, secno);
+
+ if (!__is_large_section(sbi)) {
+ mtime = get_seg_entry(sbi, start + i)->mtime;
+ goto out;
+ }
+
+ for (i = 0; i < usable_segs_per_sec; i++) {
+ /* for large section, only check the mtime of valid segments */
+ struct seg_entry *se = get_seg_entry(sbi, start+i);
+
+ mtime += se->mtime * se->valid_blocks;
+ total_valid_blocks += se->valid_blocks;
+ }
+
+ if (total_valid_blocks == 0)
+ return INVALID_MTIME;
+
+ mtime = div_u64(mtime, total_valid_blocks);
+out:
+ if (unlikely(mtime == INVALID_MTIME))
+ mtime -= 1;
+ return mtime;
}
/*
@@ -5209,14 +5684,10 @@ static void init_min_max_mtime(struct f2fs_sb_info *sbi)
sit_i->min_mtime = ULLONG_MAX;
- for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
- unsigned int i;
+ for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
unsigned long long mtime = 0;
- for (i = 0; i < sbi->segs_per_sec; i++)
- mtime += get_seg_entry(sbi, segno + i)->mtime;
-
- mtime = div_u64(mtime, sbi->segs_per_sec);
+ mtime = f2fs_get_section_mtime(sbi, segno);
if (sit_i->min_mtime > mtime)
sit_i->min_mtime = mtime;
@@ -5252,22 +5723,20 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
if (!f2fs_lfs_mode(sbi))
- sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
+ sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC);
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
- sm_info->min_seq_blocks = sbi->blocks_per_seg;
+ sm_info->min_seq_blocks = BLKS_PER_SEG(sbi);
sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
sm_info->min_ssr_sections = reserved_sections(sbi);
INIT_LIST_HEAD(&sm_info->sit_entry_set);
- init_rwsem(&sm_info->curseg_lock);
+ init_f2fs_rwsem(&sm_info->curseg_lock);
- if (!f2fs_readonly(sbi->sb)) {
- err = f2fs_create_flush_cmd_control(sbi);
- if (err)
- return err;
- }
+ err = f2fs_create_flush_cmd_control(sbi);
+ if (err)
+ return err;
err = create_discard_cmd_control(sbi);
if (err)
@@ -5316,6 +5785,7 @@ static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ kvfree(dirty_i->pinned_secmap);
kvfree(dirty_i->victim_secmap);
}
@@ -5385,9 +5855,9 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi)
kvfree(sit_i->dirty_sentries_bitmap);
SM_I(sbi)->sit_info = NULL;
- kvfree(sit_i->sit_bitmap);
+ kfree(sit_i->sit_bitmap);
#ifdef CONFIG_F2FS_CHECK_FS
- kvfree(sit_i->sit_bitmap_mir);
+ kfree(sit_i->sit_bitmap_mir);
kvfree(sit_i->invalid_segmap);
#endif
kfree(sit_i);
@@ -5426,9 +5896,9 @@ int __init f2fs_create_segment_manager_caches(void)
if (!sit_entry_set_slab)
goto destroy_discard_cmd;
- inmem_entry_slab = f2fs_kmem_cache_create("f2fs_inmem_page_entry",
- sizeof(struct inmem_pages));
- if (!inmem_entry_slab)
+ revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry",
+ sizeof(struct revoke_entry));
+ if (!revoke_entry_slab)
goto destroy_sit_entry_set;
return 0;
@@ -5447,5 +5917,5 @@ void f2fs_destroy_segment_manager_caches(void)
kmem_cache_destroy(sit_entry_set_slab);
kmem_cache_destroy(discard_cmd_slab);
kmem_cache_destroy(discard_entry_slab);
- kmem_cache_destroy(inmem_entry_slab);
+ kmem_cache_destroy(revoke_entry_slab);
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 46fde9f3f28e..07dcbcbeb7c6 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -18,12 +18,15 @@
#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
#define F2FS_MIN_META_SEGMENTS 8 /* SB + 2 (CP + SIT + NAT) + SSA */
+#define INVALID_MTIME ULLONG_MAX /* no valid blocks in a segment/section */
+
/* L: Logical segment # in volume, R: Relative segment # in main area */
#define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno)
#define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno)
#define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA)
#define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE)
+#define SE_PAGETYPE(se) ((IS_NODESEG((se)->type) ? NODE : DATA))
static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
unsigned short seg_type)
@@ -31,38 +34,6 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG);
}
-#define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
-#define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
-#define IS_COLD(t) ((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
-
-#define IS_CURSEG(sbi, seg) \
- (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \
- ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno))
-
-#define IS_CURSEC(sbi, secno) \
- (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
- (sbi)->segs_per_sec) || \
- ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
- (sbi)->segs_per_sec) || \
- ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
- (sbi)->segs_per_sec) || \
- ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
- (sbi)->segs_per_sec) || \
- ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
- (sbi)->segs_per_sec) || \
- ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
- (sbi)->segs_per_sec) || \
- ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \
- (sbi)->segs_per_sec) || \
- ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \
- (sbi)->segs_per_sec))
-
#define MAIN_BLKADDR(sbi) \
(SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
@@ -76,41 +47,55 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
#define TOTAL_SEGS(sbi) \
(SM_I(sbi) ? SM_I(sbi)->segment_count : \
le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
-#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
+#define TOTAL_BLKS(sbi) (SEGS_TO_BLKS(sbi, TOTAL_SEGS(sbi)))
#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
#define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \
(sbi)->log_blocks_per_seg))
#define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
- (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
+ (SEGS_TO_BLKS(sbi, GET_R2L_SEGNO(FREE_I(sbi), segno))))
#define NEXT_FREE_BLKADDR(sbi, curseg) \
(START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
- (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
+ (BLKS_TO_SEGS(sbi, GET_SEGOFF_FROM_SEG0(sbi, blk_addr)))
#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
- (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
+ (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (BLKS_PER_SEG(sbi) - 1))
#define GET_SEGNO(sbi, blk_addr) \
((!__is_valid_data_blkaddr(blk_addr)) ? \
NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
-#define BLKS_PER_SEC(sbi) \
- ((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
+#ifdef CONFIG_BLK_DEV_ZONED
+#define CAP_BLKS_PER_SEC(sbi) \
+ (BLKS_PER_SEC(sbi) - (sbi)->unusable_blocks_per_sec)
+#define CAP_SEGS_PER_SEC(sbi) \
+ (SEGS_PER_SEC(sbi) - \
+ BLKS_TO_SEGS(sbi, (sbi)->unusable_blocks_per_sec))
+#else
+#define CAP_BLKS_PER_SEC(sbi) BLKS_PER_SEC(sbi)
+#define CAP_SEGS_PER_SEC(sbi) SEGS_PER_SEC(sbi)
+#endif
+#define GET_START_SEG_FROM_SEC(sbi, segno) \
+ (rounddown(segno, SEGS_PER_SEC(sbi)))
#define GET_SEC_FROM_SEG(sbi, segno) \
- (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
+ (((segno) == -1) ? -1 : (segno) / SEGS_PER_SEC(sbi))
#define GET_SEG_FROM_SEC(sbi, secno) \
- ((secno) * (sbi)->segs_per_sec)
+ ((secno) * SEGS_PER_SEC(sbi))
#define GET_ZONE_FROM_SEC(sbi, secno) \
- (((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone)
+ (((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone)
#define GET_ZONE_FROM_SEG(sbi, segno) \
GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
-#define GET_SUM_BLOCK(sbi, segno) \
- ((sbi)->sm_info->ssa_blkaddr + (segno))
+#define SUMS_PER_BLOCK (F2FS_BLKSIZE / F2FS_SUM_BLKSIZE)
+#define GET_SUM_BLOCK(sbi, segno) \
+ (SM_I(sbi)->ssa_blkaddr + (segno / SUMS_PER_BLOCK))
+#define GET_SUM_BLKOFF(segno) (segno % SUMS_PER_BLOCK)
+#define SUM_BLK_PAGE_ADDR(folio, segno) \
+ (folio_address(folio) + GET_SUM_BLKOFF(segno) * F2FS_SUM_BLKSIZE)
#define GET_SUM_TYPE(footer) ((footer)->entry_type)
#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
@@ -132,16 +117,6 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
/*
- * indicate a block allocation direction: RIGHT and LEFT.
- * RIGHT means allocating new sections towards the end of volume.
- * LEFT means the opposite direction.
- */
-enum {
- ALLOC_RIGHT = 0,
- ALLOC_LEFT
-};
-
-/*
* In the victim_sel_policy->alloc_mode, there are three block allocation modes.
* LFS writes data sequentially with cleaning operations.
* SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
@@ -194,6 +169,7 @@ struct victim_sel_policy {
unsigned int min_segno; /* segment # having min. cost */
unsigned long long age; /* mtime of GCed section*/
unsigned long long age_threshold;/* age threshold */
+ bool one_time_gc; /* one time GC */
};
struct seg_entry {
@@ -216,23 +192,18 @@ struct seg_entry {
struct sec_entry {
unsigned int valid_blocks; /* # of valid blocks in a section */
-};
-
-struct segment_allocation {
- void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
+ unsigned int ckpt_valid_blocks; /* # of valid blocks last cp in a section */
};
#define MAX_SKIP_GC_COUNT 16
-struct inmem_pages {
+struct revoke_entry {
struct list_head list;
- struct page *page;
block_t old_addr; /* for revoking when fail to commit */
+ pgoff_t index;
};
struct sit_info {
- const struct segment_allocation *s_ops;
-
block_t sit_base_addr; /* start block address of SIT area */
block_t sit_blocks; /* # of blocks used by SIT area */
block_t written_valid_blocks; /* # of valid blocks in main area */
@@ -288,18 +259,14 @@ enum dirty_type {
};
struct dirty_seglist_info {
- const struct victim_selection *v_ops; /* victim selction operation */
unsigned long *dirty_segmap[NR_DIRTY_TYPE];
unsigned long *dirty_secmap;
struct mutex seglist_lock; /* lock for segment bitmaps */
int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
unsigned long *victim_secmap; /* background GC victims */
-};
-
-/* victim selection function for cleaning and SSR */
-struct victim_selection {
- int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
- int, int, char, unsigned long long);
+ unsigned long *pinned_secmap; /* pinned victims from foreground GC */
+ unsigned int pinned_secmap_cnt; /* count of victims which has pinned data */
+ bool enable_pin_section; /* enable pinning section */
};
/* for active log information */
@@ -332,6 +299,28 @@ static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
}
+static inline bool is_curseg(struct f2fs_sb_info *sbi, unsigned int segno)
+{
+ int i;
+
+ for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
+ if (segno == CURSEG_I(sbi, i)->segno)
+ return true;
+ }
+ return false;
+}
+
+static inline bool is_cursec(struct f2fs_sb_info *sbi, unsigned int secno)
+{
+ int i;
+
+ for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
+ if (secno == GET_SEC_FROM_SEG(sbi, CURSEG_I(sbi, i)->segno))
+ return true;
+ }
+ return false;
+}
+
static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
unsigned int segno)
{
@@ -362,21 +351,57 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
unsigned int segno, bool use_section)
{
- if (use_section && __is_large_section(sbi)) {
- unsigned int start_segno = START_SEGNO(segno);
- unsigned int blocks = 0;
- int i;
+ if (use_section && __is_large_section(sbi))
+ return get_sec_entry(sbi, segno)->ckpt_valid_blocks;
+ else
+ return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+}
+
+static inline void set_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
+ unsigned int blocks = 0;
+ int i;
- for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
- struct seg_entry *se = get_seg_entry(sbi, start_segno);
+ for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) {
+ struct seg_entry *se = get_seg_entry(sbi, start_segno);
- blocks += se->ckpt_valid_blocks;
- }
- return blocks;
+ blocks += se->ckpt_valid_blocks;
}
- return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+ get_sec_entry(sbi, segno)->ckpt_valid_blocks = blocks;
}
+#ifdef CONFIG_F2FS_CHECK_FS
+static inline void sanity_check_valid_blocks(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
+ unsigned int blocks = 0;
+ int i;
+
+ for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) {
+ struct seg_entry *se = get_seg_entry(sbi, start_segno);
+
+ blocks += se->ckpt_valid_blocks;
+ }
+
+ if (blocks != get_sec_entry(sbi, segno)->ckpt_valid_blocks) {
+ f2fs_err(sbi,
+ "Inconsistent ckpt valid blocks: "
+ "seg entry(%d) vs sec entry(%d) at secno %d",
+ blocks, get_sec_entry(sbi, segno)->ckpt_valid_blocks, secno);
+ f2fs_bug_on(sbi, 1);
+ }
+}
+#else
+static inline void sanity_check_valid_blocks(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+}
+#endif
static inline void seg_info_from_raw_sit(struct seg_entry *se,
struct f2fs_sit_entry *rs)
{
@@ -401,8 +426,8 @@ static inline void __seg_info_to_raw_sit(struct seg_entry *se,
rs->mtime = cpu_to_le64(se->mtime);
}
-static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
- struct page *page, unsigned int start)
+static inline void seg_info_to_sit_folio(struct f2fs_sb_info *sbi,
+ struct folio *folio, unsigned int start)
{
struct f2fs_sit_block *raw_sit;
struct seg_entry *se;
@@ -411,7 +436,7 @@ static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
(unsigned long)MAIN_SEGS(sbi));
int i;
- raw_sit = (struct f2fs_sit_block *)page_address(page);
+ raw_sit = folio_address(folio);
memset(raw_sit, 0, PAGE_SIZE);
for (i = 0; i < end - start; i++) {
rs = &raw_sit->entries[i];
@@ -445,15 +470,14 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
- unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
spin_lock(&free_i->segmap_lock);
clear_bit(segno, free_i->free_segmap);
free_i->free_segments++;
next = find_next_bit(free_i->free_segmap,
- start_segno + sbi->segs_per_sec, start_segno);
- if (next >= start_segno + usable_segs) {
+ start_segno + SEGS_PER_SEC(sbi), start_segno);
+ if (next >= start_segno + f2fs_usable_segs_in_sec(sbi)) {
clear_bit(secno, free_i->free_secmap);
free_i->free_sections++;
}
@@ -479,22 +503,36 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
- unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
+ bool ret;
spin_lock(&free_i->segmap_lock);
- if (test_and_clear_bit(segno, free_i->free_segmap)) {
- free_i->free_segments++;
-
- if (!inmem && IS_CURSEC(sbi, secno))
- goto skip_free;
- next = find_next_bit(free_i->free_segmap,
- start_segno + sbi->segs_per_sec, start_segno);
- if (next >= start_segno + usable_segs) {
- if (test_and_clear_bit(secno, free_i->free_secmap))
- free_i->free_sections++;
- }
- }
-skip_free:
+ ret = test_and_clear_bit(segno, free_i->free_segmap);
+ if (!ret)
+ goto unlock_out;
+
+ free_i->free_segments++;
+
+ if (!inmem && is_cursec(sbi, secno))
+ goto unlock_out;
+
+ /* check large section */
+ next = find_next_bit(free_i->free_segmap,
+ start_segno + SEGS_PER_SEC(sbi), start_segno);
+ if (next < start_segno + f2fs_usable_segs_in_sec(sbi))
+ goto unlock_out;
+
+ ret = test_and_clear_bit(secno, free_i->free_secmap);
+ if (!ret)
+ goto unlock_out;
+
+ free_i->free_sections++;
+
+ if (GET_SEC_FROM_SEG(sbi, sbi->next_victim_seg[BG_GC]) == secno)
+ sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
+ if (GET_SEC_FROM_SEG(sbi, sbi->next_victim_seg[FG_GC]) == secno)
+ sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
+
+unlock_out:
spin_unlock(&free_i->segmap_lock);
}
@@ -571,56 +609,140 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi)
return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi));
}
-static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
+static inline unsigned int get_left_section_blocks(struct f2fs_sb_info *sbi,
+ enum log_type type, unsigned int segno)
{
- unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
- get_pages(sbi, F2FS_DIRTY_DENTS);
- unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
- unsigned int segno, left_blocks;
+ if (f2fs_lfs_mode(sbi)) {
+ unsigned int used_blocks = __is_large_section(sbi) ? SEGS_TO_BLKS(sbi,
+ (segno - GET_START_SEG_FROM_SEC(sbi, segno))) : 0;
+ return CAP_BLKS_PER_SEC(sbi) - used_blocks -
+ CURSEG_I(sbi, type)->next_blkoff;
+ }
+ return CAP_BLKS_PER_SEC(sbi) - get_ckpt_valid_blocks(sbi, segno, true);
+}
+
+static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
+ unsigned int node_blocks, unsigned int data_blocks,
+ unsigned int dent_blocks)
+{
+ unsigned int segno, left_blocks, blocks;
int i;
- /* check current node segment */
- for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
+ /* check current data/node sections in the worst case. */
+ for (i = CURSEG_HOT_DATA; i < NR_PERSISTENT_LOG; i++) {
segno = CURSEG_I(sbi, i)->segno;
- left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
- get_seg_entry(sbi, segno)->ckpt_valid_blocks;
- if (node_blocks > left_blocks)
+ if (unlikely(segno == NULL_SEGNO))
+ return false;
+
+ left_blocks = get_left_section_blocks(sbi, i, segno);
+
+ blocks = i <= CURSEG_COLD_DATA ? data_blocks : node_blocks;
+ if (blocks > left_blocks)
return false;
}
- /* check current data segment */
+ /* check current data section for dentry blocks. */
segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
- left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
- get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+
+ if (unlikely(segno == NULL_SEGNO))
+ return false;
+
+ left_blocks = get_left_section_blocks(sbi, CURSEG_HOT_DATA, segno);
+
if (dent_blocks > left_blocks)
return false;
return true;
}
+/*
+ * calculate needed sections for dirty node/dentry and call
+ * has_curseg_enough_space, please note that, it needs to account
+ * dirty data as well in lfs mode when checkpoint is disabled.
+ */
+static inline void __get_secs_required(struct f2fs_sb_info *sbi,
+ unsigned int *lower_p, unsigned int *upper_p, bool *curseg_p)
+{
+ unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
+ get_pages(sbi, F2FS_DIRTY_DENTS) +
+ get_pages(sbi, F2FS_DIRTY_IMETA);
+ unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
+ unsigned int total_data_blocks = 0;
+ unsigned int node_secs = total_node_blocks / CAP_BLKS_PER_SEC(sbi);
+ unsigned int dent_secs = total_dent_blocks / CAP_BLKS_PER_SEC(sbi);
+ unsigned int data_secs = 0;
+ unsigned int node_blocks = total_node_blocks % CAP_BLKS_PER_SEC(sbi);
+ unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi);
+ unsigned int data_blocks = 0;
+
+ if (f2fs_lfs_mode(sbi)) {
+ total_data_blocks = get_pages(sbi, F2FS_DIRTY_DATA);
+ data_secs = total_data_blocks / CAP_BLKS_PER_SEC(sbi);
+ data_blocks = total_data_blocks % CAP_BLKS_PER_SEC(sbi);
+ }
+
+ if (lower_p)
+ *lower_p = node_secs + dent_secs + data_secs;
+ if (upper_p)
+ *upper_p = node_secs + dent_secs + data_secs +
+ (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0) +
+ (data_blocks ? 1 : 0);
+ if (curseg_p)
+ *curseg_p = has_curseg_enough_space(sbi,
+ node_blocks, data_blocks, dent_blocks);
+}
+
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
int freed, int needed)
{
- int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
- int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
- int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
+ unsigned int free_secs, lower_secs, upper_secs;
+ bool curseg_space;
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
- if (free_sections(sbi) + freed == reserved_sections(sbi) + needed &&
- has_curseg_enough_space(sbi))
+ __get_secs_required(sbi, &lower_secs, &upper_secs, &curseg_space);
+
+ free_secs = free_sections(sbi) + freed;
+ lower_secs += needed + reserved_sections(sbi);
+ upper_secs += needed + reserved_sections(sbi);
+
+ if (free_secs > upper_secs)
return false;
- return (free_sections(sbi) + freed) <=
- (node_secs + 2 * dent_secs + imeta_secs +
- reserved_sections(sbi) + needed);
+ if (free_secs <= lower_secs)
+ return true;
+ return !curseg_space;
+}
+
+static inline bool has_enough_free_secs(struct f2fs_sb_info *sbi,
+ int freed, int needed)
+{
+ return !has_not_enough_free_secs(sbi, freed, needed);
+}
+
+static inline bool has_enough_free_blks(struct f2fs_sb_info *sbi)
+{
+ unsigned int total_free_blocks = 0;
+ unsigned int avail_user_block_count;
+
+ spin_lock(&sbi->stat_lock);
+
+ avail_user_block_count = get_available_block_count(sbi, NULL, true);
+ total_free_blocks = avail_user_block_count - (unsigned int)valid_user_blocks(sbi);
+
+ spin_unlock(&sbi->stat_lock);
+
+ return total_free_blocks > 0;
}
static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
{
if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
return true;
- if (likely(!has_not_enough_free_secs(sbi, 0, 0)))
+ if (likely(has_enough_free_secs(sbi, 0, 0)))
+ return true;
+ if (!f2fs_lfs_mode(sbi) &&
+ likely(has_enough_free_blks(sbi)))
return true;
return false;
}
@@ -650,7 +772,9 @@ static inline int utilization(struct f2fs_sb_info *sbi)
* pages over min_fsync_blocks. (=default option)
* F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
* F2FS_IPU_NOCACHE - disable IPU bio cache.
- * F2FS_IPUT_DISABLE - disable IPU. (=default option in LFS mode)
+ * F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has
+ * FI_OPU_WRITE flag.
+ * F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode)
*/
#define DEF_MIN_IPU_UTIL 70
#define DEF_MIN_FSYNC_BLOCKS 8
@@ -658,6 +782,9 @@ static inline int utilization(struct f2fs_sb_info *sbi)
#define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */
+#define F2FS_IPU_DISABLE 0
+
+/* Modification on enum should be synchronized with ipu_mode_names array */
enum {
F2FS_IPU_FORCE,
F2FS_IPU_SSR,
@@ -666,8 +793,30 @@ enum {
F2FS_IPU_FSYNC,
F2FS_IPU_ASYNC,
F2FS_IPU_NOCACHE,
+ F2FS_IPU_HONOR_OPU_WRITE,
+ F2FS_IPU_MAX,
};
+static inline bool IS_F2FS_IPU_DISABLE(struct f2fs_sb_info *sbi)
+{
+ return SM_I(sbi)->ipu_policy == F2FS_IPU_DISABLE;
+}
+
+#define F2FS_IPU_POLICY(name) \
+static inline bool IS_##name(struct f2fs_sb_info *sbi) \
+{ \
+ return SM_I(sbi)->ipu_policy & BIT(name); \
+}
+
+F2FS_IPU_POLICY(F2FS_IPU_FORCE);
+F2FS_IPU_POLICY(F2FS_IPU_SSR);
+F2FS_IPU_POLICY(F2FS_IPU_UTIL);
+F2FS_IPU_POLICY(F2FS_IPU_SSR_UTIL);
+F2FS_IPU_POLICY(F2FS_IPU_FSYNC);
+F2FS_IPU_POLICY(F2FS_IPU_ASYNC);
+F2FS_IPU_POLICY(F2FS_IPU_NOCACHE);
+F2FS_IPU_POLICY(F2FS_IPU_HONOR_OPU_WRITE);
+
static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
int type)
{
@@ -682,15 +831,10 @@ static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
return curseg->alloc_type;
}
-static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
-{
- struct curseg_info *curseg = CURSEG_I(sbi, type);
- return curseg->next_blkoff;
-}
-
-static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
+static inline bool valid_main_segno(struct f2fs_sb_info *sbi,
+ unsigned int segno)
{
- f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
+ return segno <= (MAIN_SEGS(sbi) - 1);
}
static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
@@ -734,20 +878,22 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
GET_SIT_VBLOCKS(raw_sit), valid_blocks);
set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT);
return -EFSCORRUPTED;
}
- if (usable_blks_per_seg < sbi->blocks_per_seg)
+ if (usable_blks_per_seg < BLKS_PER_SEG(sbi))
f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map,
- sbi->blocks_per_seg,
- usable_blks_per_seg) != sbi->blocks_per_seg);
+ BLKS_PER_SEG(sbi),
+ usable_blks_per_seg) != BLKS_PER_SEG(sbi));
/* check segment usage, and check boundary of a given segment number */
if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg
- || segno > TOTAL_SEGS(sbi) - 1)) {
+ || !valid_main_segno(sbi, segno))) {
f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
GET_SIT_VBLOCKS(raw_sit), segno);
set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT);
return -EFSCORRUPTED;
}
return 0;
@@ -760,7 +906,7 @@ static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
unsigned int offset = SIT_BLOCK_OFFSET(start);
block_t blk_addr = sit_i->sit_base_addr + offset;
- check_seg_range(sbi, start);
+ f2fs_bug_on(sbi, !valid_main_segno(sbi, start));
#ifdef CONFIG_F2FS_CHECK_FS
if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
@@ -840,7 +986,7 @@ static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
{
- if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
+ if (is_cursec(sbi, secno) || (sbi->cur_victim_sec == secno))
return true;
return false;
}
@@ -858,9 +1004,9 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
return 0;
if (type == DATA)
- return sbi->blocks_per_seg;
+ return BLKS_PER_SEG(sbi);
else if (type == NODE)
- return 8 * sbi->blocks_per_seg;
+ return SEGS_TO_BLKS(sbi, 8);
else if (type == META)
return 8 * BIO_MAX_VECS;
else
@@ -909,6 +1055,6 @@ static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
if (!wakeup || !is_idle(sbi, DISCARD_TIME))
return;
wake_up:
- dcc->discard_wake = 1;
+ dcc->discard_wake = true;
wake_up_interruptible_all(&dcc->discard_wait_queue);
}
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index dd3c3c7a90ec..b88babcf6ab4 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -28,10 +28,13 @@ static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
return count > 0 ? count : 0;
}
-static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
+static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi,
+ enum extent_type type)
{
- return atomic_read(&sbi->total_zombie_tree) +
- atomic_read(&sbi->total_ext_node);
+ struct extent_tree_info *eti = &sbi->extent_tree[type];
+
+ return atomic_read(&eti->total_zombie_tree) +
+ atomic_read(&eti->total_ext_node);
}
unsigned long f2fs_shrink_count(struct shrinker *shrink,
@@ -53,8 +56,11 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
}
spin_unlock(&f2fs_list_lock);
- /* count extent cache entries */
- count += __count_extent_cache(sbi);
+ /* count read extent cache entries */
+ count += __count_extent_cache(sbi, EX_READ);
+
+ /* count block age extent cache entries */
+ count += __count_extent_cache(sbi, EX_BLOCK_AGE);
/* count clean nat cache entries */
count += __count_nat_entries(sbi);
@@ -67,7 +73,7 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
mutex_unlock(&sbi->umount_mutex);
}
spin_unlock(&f2fs_list_lock);
- return count;
+ return count ?: SHRINK_EMPTY;
}
unsigned long f2fs_shrink_scan(struct shrinker *shrink,
@@ -100,7 +106,10 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink,
sbi->shrinker_run_no = run_no;
/* shrink extent cache entries */
- freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
+ freed += f2fs_shrink_age_extent_tree(sbi, nr >> 2);
+
+ /* shrink read extent cache entries */
+ freed += f2fs_shrink_read_extent_tree(sbi, nr >> 2);
/* shrink clean nat cache entries */
if (freed < nr)
@@ -121,6 +130,103 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink,
return freed;
}
+unsigned int f2fs_donate_files(void)
+{
+ struct f2fs_sb_info *sbi;
+ struct list_head *p;
+ unsigned int donate_files = 0;
+
+ spin_lock(&f2fs_list_lock);
+ p = f2fs_list.next;
+ while (p != &f2fs_list) {
+ sbi = list_entry(p, struct f2fs_sb_info, s_list);
+
+ /* stop f2fs_put_super */
+ if (!mutex_trylock(&sbi->umount_mutex)) {
+ p = p->next;
+ continue;
+ }
+ spin_unlock(&f2fs_list_lock);
+
+ donate_files += sbi->donate_files;
+
+ spin_lock(&f2fs_list_lock);
+ p = p->next;
+ mutex_unlock(&sbi->umount_mutex);
+ }
+ spin_unlock(&f2fs_list_lock);
+
+ return donate_files;
+}
+
+static unsigned int do_reclaim_caches(struct f2fs_sb_info *sbi,
+ unsigned int reclaim_caches_kb)
+{
+ struct inode *inode;
+ struct f2fs_inode_info *fi;
+ unsigned int nfiles = sbi->donate_files;
+ pgoff_t npages = reclaim_caches_kb >> (PAGE_SHIFT - 10);
+
+ while (npages && nfiles--) {
+ pgoff_t len;
+
+ spin_lock(&sbi->inode_lock[DONATE_INODE]);
+ if (list_empty(&sbi->inode_list[DONATE_INODE])) {
+ spin_unlock(&sbi->inode_lock[DONATE_INODE]);
+ break;
+ }
+ fi = list_first_entry(&sbi->inode_list[DONATE_INODE],
+ struct f2fs_inode_info, gdonate_list);
+ list_move_tail(&fi->gdonate_list, &sbi->inode_list[DONATE_INODE]);
+ inode = igrab(&fi->vfs_inode);
+ spin_unlock(&sbi->inode_lock[DONATE_INODE]);
+
+ if (!inode)
+ continue;
+
+ inode_lock(inode);
+ if (!is_inode_flag_set(inode, FI_DONATE_FINISHED)) {
+ len = fi->donate_end - fi->donate_start + 1;
+ npages = npages < len ? 0 : npages - len;
+
+ invalidate_inode_pages2_range(inode->i_mapping,
+ fi->donate_start, fi->donate_end);
+ set_inode_flag(inode, FI_DONATE_FINISHED);
+ }
+ inode_unlock(inode);
+
+ iput(inode);
+ cond_resched();
+ }
+ return npages << (PAGE_SHIFT - 10);
+}
+
+void f2fs_reclaim_caches(unsigned int reclaim_caches_kb)
+{
+ struct f2fs_sb_info *sbi;
+ struct list_head *p;
+
+ spin_lock(&f2fs_list_lock);
+ p = f2fs_list.next;
+ while (p != &f2fs_list && reclaim_caches_kb) {
+ sbi = list_entry(p, struct f2fs_sb_info, s_list);
+
+ /* stop f2fs_put_super */
+ if (!mutex_trylock(&sbi->umount_mutex)) {
+ p = p->next;
+ continue;
+ }
+ spin_unlock(&f2fs_list_lock);
+
+ reclaim_caches_kb = do_reclaim_caches(sbi, reclaim_caches_kb);
+
+ spin_lock(&f2fs_list_lock);
+ p = p->next;
+ mutex_unlock(&sbi->umount_mutex);
+ }
+ spin_unlock(&f2fs_list_lock);
+}
+
void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
{
spin_lock(&f2fs_list_lock);
@@ -130,7 +236,9 @@ void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
{
- f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
+ f2fs_shrink_read_extent_tree(sbi, __count_extent_cache(sbi, EX_READ));
+ f2fs_shrink_age_extent_tree(sbi,
+ __count_extent_cache(sbi, EX_BLOCK_AGE));
spin_lock(&f2fs_list_lock);
list_del_init(&sbi->s_list);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 7960ce066c1b..c4c225e09dc4 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -8,9 +8,9 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/sched/mm.h>
#include <linux/statfs.h>
-#include <linux/buffer_head.h>
-#include <linux/backing-dev.h>
#include <linux/kthread.h>
#include <linux/parser.h>
#include <linux/mount.h>
@@ -27,6 +27,8 @@
#include <linux/part_stat.h>
#include <linux/zstd.h>
#include <linux/lz4.h>
+#include <linux/ctype.h>
+#include <linux/fs_parser.h>
#include "f2fs.h"
#include "node.h"
@@ -43,101 +45,118 @@ static struct kmem_cache *f2fs_inode_cachep;
#ifdef CONFIG_F2FS_FAULT_INJECTION
const char *f2fs_fault_name[FAULT_MAX] = {
- [FAULT_KMALLOC] = "kmalloc",
- [FAULT_KVMALLOC] = "kvmalloc",
- [FAULT_PAGE_ALLOC] = "page alloc",
- [FAULT_PAGE_GET] = "page get",
- [FAULT_ALLOC_NID] = "alloc nid",
- [FAULT_ORPHAN] = "orphan",
- [FAULT_BLOCK] = "no more block",
- [FAULT_DIR_DEPTH] = "too big dir depth",
- [FAULT_EVICT_INODE] = "evict_inode fail",
- [FAULT_TRUNCATE] = "truncate fail",
- [FAULT_READ_IO] = "read IO error",
- [FAULT_CHECKPOINT] = "checkpoint error",
- [FAULT_DISCARD] = "discard error",
- [FAULT_WRITE_IO] = "write IO error",
- [FAULT_SLAB_ALLOC] = "slab alloc",
- [FAULT_DQUOT_INIT] = "dquot initialize",
+ [FAULT_KMALLOC] = "kmalloc",
+ [FAULT_KVMALLOC] = "kvmalloc",
+ [FAULT_PAGE_ALLOC] = "page alloc",
+ [FAULT_PAGE_GET] = "page get",
+ [FAULT_ALLOC_BIO] = "alloc bio(obsolete)",
+ [FAULT_ALLOC_NID] = "alloc nid",
+ [FAULT_ORPHAN] = "orphan",
+ [FAULT_BLOCK] = "no more block",
+ [FAULT_DIR_DEPTH] = "too big dir depth",
+ [FAULT_EVICT_INODE] = "evict_inode fail",
+ [FAULT_TRUNCATE] = "truncate fail",
+ [FAULT_READ_IO] = "read IO error",
+ [FAULT_CHECKPOINT] = "checkpoint error",
+ [FAULT_DISCARD] = "discard error",
+ [FAULT_WRITE_IO] = "write IO error",
+ [FAULT_SLAB_ALLOC] = "slab alloc",
+ [FAULT_DQUOT_INIT] = "dquot initialize",
+ [FAULT_LOCK_OP] = "lock_op",
+ [FAULT_BLKADDR_VALIDITY] = "invalid blkaddr",
+ [FAULT_BLKADDR_CONSISTENCE] = "inconsistent blkaddr",
+ [FAULT_NO_SEGMENT] = "no free segment",
+ [FAULT_INCONSISTENT_FOOTER] = "inconsistent footer",
+ [FAULT_TIMEOUT] = "timeout",
+ [FAULT_VMALLOC] = "vmalloc",
};
-void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
- unsigned int type)
+int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
+ unsigned long type, enum fault_option fo)
{
struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
- if (rate) {
+ if (fo & FAULT_ALL) {
+ memset(ffi, 0, sizeof(struct f2fs_fault_info));
+ return 0;
+ }
+
+ if (fo & FAULT_RATE) {
+ if (rate > INT_MAX)
+ return -EINVAL;
atomic_set(&ffi->inject_ops, 0);
- ffi->inject_rate = rate;
+ ffi->inject_rate = (int)rate;
+ f2fs_info(sbi, "build fault injection rate: %lu", rate);
}
- if (type)
- ffi->inject_type = type;
+ if (fo & FAULT_TYPE) {
+ if (type >= BIT(FAULT_MAX))
+ return -EINVAL;
+ ffi->inject_type = (unsigned int)type;
+ f2fs_info(sbi, "build fault injection type: 0x%lx", type);
+ }
- if (!rate && !type)
- memset(ffi, 0, sizeof(struct f2fs_fault_info));
+ return 0;
}
#endif
/* f2fs-wide shrinker description */
-static struct shrinker f2fs_shrinker_info = {
- .scan_objects = f2fs_shrink_scan,
- .count_objects = f2fs_shrink_count,
- .seeks = DEFAULT_SEEKS,
-};
+static struct shrinker *f2fs_shrinker_info;
+
+static int __init f2fs_init_shrinker(void)
+{
+ f2fs_shrinker_info = shrinker_alloc(0, "f2fs-shrinker");
+ if (!f2fs_shrinker_info)
+ return -ENOMEM;
+
+ f2fs_shrinker_info->count_objects = f2fs_shrink_count;
+ f2fs_shrinker_info->scan_objects = f2fs_shrink_scan;
+
+ shrinker_register(f2fs_shrinker_info);
+
+ return 0;
+}
+
+static void f2fs_exit_shrinker(void)
+{
+ shrinker_free(f2fs_shrinker_info);
+}
enum {
Opt_gc_background,
Opt_disable_roll_forward,
Opt_norecovery,
Opt_discard,
- Opt_nodiscard,
Opt_noheap,
Opt_heap,
Opt_user_xattr,
- Opt_nouser_xattr,
Opt_acl,
- Opt_noacl,
Opt_active_logs,
Opt_disable_ext_identify,
Opt_inline_xattr,
- Opt_noinline_xattr,
Opt_inline_xattr_size,
Opt_inline_data,
Opt_inline_dentry,
- Opt_noinline_dentry,
Opt_flush_merge,
- Opt_noflush_merge,
- Opt_nobarrier,
+ Opt_barrier,
Opt_fastboot,
Opt_extent_cache,
- Opt_noextent_cache,
- Opt_noinline_data,
Opt_data_flush,
Opt_reserve_root,
+ Opt_reserve_node,
Opt_resgid,
Opt_resuid,
Opt_mode,
- Opt_io_size_bits,
Opt_fault_injection,
Opt_fault_type,
Opt_lazytime,
- Opt_nolazytime,
Opt_quota,
- Opt_noquota,
Opt_usrquota,
Opt_grpquota,
Opt_prjquota,
Opt_usrjquota,
Opt_grpjquota,
Opt_prjjquota,
- Opt_offusrjquota,
- Opt_offgrpjquota,
- Opt_offprjjquota,
- Opt_jqfmt_vfsold,
- Opt_jqfmt_vfsv0,
- Opt_jqfmt_vfsv1,
- Opt_whint,
Opt_alloc,
Opt_fsync,
Opt_test_dummy_encryption,
@@ -147,99 +166,221 @@ enum {
Opt_checkpoint_disable_cap_perc,
Opt_checkpoint_enable,
Opt_checkpoint_merge,
- Opt_nocheckpoint_merge,
Opt_compress_algorithm,
Opt_compress_log_size,
- Opt_compress_extension,
Opt_nocompress_extension,
+ Opt_compress_extension,
Opt_compress_chksum,
Opt_compress_mode,
Opt_compress_cache,
Opt_atgc,
Opt_gc_merge,
- Opt_nogc_merge,
Opt_discard_unit,
+ Opt_memory_mode,
+ Opt_age_extent_cache,
+ Opt_errors,
+ Opt_nat_bits,
+ Opt_jqfmt,
+ Opt_checkpoint,
+ Opt_lookup_mode,
Opt_err,
};
-static match_table_t f2fs_tokens = {
- {Opt_gc_background, "background_gc=%s"},
- {Opt_disable_roll_forward, "disable_roll_forward"},
- {Opt_norecovery, "norecovery"},
- {Opt_discard, "discard"},
- {Opt_nodiscard, "nodiscard"},
- {Opt_noheap, "no_heap"},
- {Opt_heap, "heap"},
- {Opt_user_xattr, "user_xattr"},
- {Opt_nouser_xattr, "nouser_xattr"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_active_logs, "active_logs=%u"},
- {Opt_disable_ext_identify, "disable_ext_identify"},
- {Opt_inline_xattr, "inline_xattr"},
- {Opt_noinline_xattr, "noinline_xattr"},
- {Opt_inline_xattr_size, "inline_xattr_size=%u"},
- {Opt_inline_data, "inline_data"},
- {Opt_inline_dentry, "inline_dentry"},
- {Opt_noinline_dentry, "noinline_dentry"},
- {Opt_flush_merge, "flush_merge"},
- {Opt_noflush_merge, "noflush_merge"},
- {Opt_nobarrier, "nobarrier"},
- {Opt_fastboot, "fastboot"},
- {Opt_extent_cache, "extent_cache"},
- {Opt_noextent_cache, "noextent_cache"},
- {Opt_noinline_data, "noinline_data"},
- {Opt_data_flush, "data_flush"},
- {Opt_reserve_root, "reserve_root=%u"},
- {Opt_resgid, "resgid=%u"},
- {Opt_resuid, "resuid=%u"},
- {Opt_mode, "mode=%s"},
- {Opt_io_size_bits, "io_bits=%u"},
- {Opt_fault_injection, "fault_injection=%u"},
- {Opt_fault_type, "fault_type=%u"},
- {Opt_lazytime, "lazytime"},
- {Opt_nolazytime, "nolazytime"},
- {Opt_quota, "quota"},
- {Opt_noquota, "noquota"},
- {Opt_usrquota, "usrquota"},
- {Opt_grpquota, "grpquota"},
- {Opt_prjquota, "prjquota"},
- {Opt_usrjquota, "usrjquota=%s"},
- {Opt_grpjquota, "grpjquota=%s"},
- {Opt_prjjquota, "prjjquota=%s"},
- {Opt_offusrjquota, "usrjquota="},
- {Opt_offgrpjquota, "grpjquota="},
- {Opt_offprjjquota, "prjjquota="},
- {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
- {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
- {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
- {Opt_whint, "whint_mode=%s"},
- {Opt_alloc, "alloc_mode=%s"},
- {Opt_fsync, "fsync_mode=%s"},
- {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
- {Opt_test_dummy_encryption, "test_dummy_encryption"},
- {Opt_inlinecrypt, "inlinecrypt"},
- {Opt_checkpoint_disable, "checkpoint=disable"},
- {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
- {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
- {Opt_checkpoint_enable, "checkpoint=enable"},
- {Opt_checkpoint_merge, "checkpoint_merge"},
- {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
- {Opt_compress_algorithm, "compress_algorithm=%s"},
- {Opt_compress_log_size, "compress_log_size=%u"},
- {Opt_compress_extension, "compress_extension=%s"},
- {Opt_nocompress_extension, "nocompress_extension=%s"},
- {Opt_compress_chksum, "compress_chksum"},
- {Opt_compress_mode, "compress_mode=%s"},
- {Opt_compress_cache, "compress_cache"},
- {Opt_atgc, "atgc"},
- {Opt_gc_merge, "gc_merge"},
- {Opt_nogc_merge, "nogc_merge"},
- {Opt_discard_unit, "discard_unit=%s"},
+static const struct constant_table f2fs_param_background_gc[] = {
+ {"on", BGGC_MODE_ON},
+ {"off", BGGC_MODE_OFF},
+ {"sync", BGGC_MODE_SYNC},
+ {}
+};
+
+static const struct constant_table f2fs_param_mode[] = {
+ {"adaptive", FS_MODE_ADAPTIVE},
+ {"lfs", FS_MODE_LFS},
+ {"fragment:segment", FS_MODE_FRAGMENT_SEG},
+ {"fragment:block", FS_MODE_FRAGMENT_BLK},
+ {}
+};
+
+static const struct constant_table f2fs_param_jqfmt[] = {
+ {"vfsold", QFMT_VFS_OLD},
+ {"vfsv0", QFMT_VFS_V0},
+ {"vfsv1", QFMT_VFS_V1},
+ {}
+};
+
+static const struct constant_table f2fs_param_alloc_mode[] = {
+ {"default", ALLOC_MODE_DEFAULT},
+ {"reuse", ALLOC_MODE_REUSE},
+ {}
+};
+static const struct constant_table f2fs_param_fsync_mode[] = {
+ {"posix", FSYNC_MODE_POSIX},
+ {"strict", FSYNC_MODE_STRICT},
+ {"nobarrier", FSYNC_MODE_NOBARRIER},
+ {}
+};
+
+static const struct constant_table f2fs_param_compress_mode[] = {
+ {"fs", COMPR_MODE_FS},
+ {"user", COMPR_MODE_USER},
+ {}
+};
+
+static const struct constant_table f2fs_param_discard_unit[] = {
+ {"block", DISCARD_UNIT_BLOCK},
+ {"segment", DISCARD_UNIT_SEGMENT},
+ {"section", DISCARD_UNIT_SECTION},
+ {}
+};
+
+static const struct constant_table f2fs_param_memory_mode[] = {
+ {"normal", MEMORY_MODE_NORMAL},
+ {"low", MEMORY_MODE_LOW},
+ {}
+};
+
+static const struct constant_table f2fs_param_errors[] = {
+ {"remount-ro", MOUNT_ERRORS_READONLY},
+ {"continue", MOUNT_ERRORS_CONTINUE},
+ {"panic", MOUNT_ERRORS_PANIC},
+ {}
+};
+
+static const struct constant_table f2fs_param_lookup_mode[] = {
+ {"perf", LOOKUP_PERF},
+ {"compat", LOOKUP_COMPAT},
+ {"auto", LOOKUP_AUTO},
+ {}
+};
+
+static const struct fs_parameter_spec f2fs_param_specs[] = {
+ fsparam_enum("background_gc", Opt_gc_background, f2fs_param_background_gc),
+ fsparam_flag("disable_roll_forward", Opt_disable_roll_forward),
+ fsparam_flag("norecovery", Opt_norecovery),
+ fsparam_flag_no("discard", Opt_discard),
+ fsparam_flag("no_heap", Opt_noheap),
+ fsparam_flag("heap", Opt_heap),
+ fsparam_flag_no("user_xattr", Opt_user_xattr),
+ fsparam_flag_no("acl", Opt_acl),
+ fsparam_s32("active_logs", Opt_active_logs),
+ fsparam_flag("disable_ext_identify", Opt_disable_ext_identify),
+ fsparam_flag_no("inline_xattr", Opt_inline_xattr),
+ fsparam_s32("inline_xattr_size", Opt_inline_xattr_size),
+ fsparam_flag_no("inline_data", Opt_inline_data),
+ fsparam_flag_no("inline_dentry", Opt_inline_dentry),
+ fsparam_flag_no("flush_merge", Opt_flush_merge),
+ fsparam_flag_no("barrier", Opt_barrier),
+ fsparam_flag("fastboot", Opt_fastboot),
+ fsparam_flag_no("extent_cache", Opt_extent_cache),
+ fsparam_flag("data_flush", Opt_data_flush),
+ fsparam_u32("reserve_root", Opt_reserve_root),
+ fsparam_u32("reserve_node", Opt_reserve_node),
+ fsparam_gid("resgid", Opt_resgid),
+ fsparam_uid("resuid", Opt_resuid),
+ fsparam_enum("mode", Opt_mode, f2fs_param_mode),
+ fsparam_s32("fault_injection", Opt_fault_injection),
+ fsparam_u32("fault_type", Opt_fault_type),
+ fsparam_flag_no("lazytime", Opt_lazytime),
+ fsparam_flag_no("quota", Opt_quota),
+ fsparam_flag("usrquota", Opt_usrquota),
+ fsparam_flag("grpquota", Opt_grpquota),
+ fsparam_flag("prjquota", Opt_prjquota),
+ fsparam_string_empty("usrjquota", Opt_usrjquota),
+ fsparam_string_empty("grpjquota", Opt_grpjquota),
+ fsparam_string_empty("prjjquota", Opt_prjjquota),
+ fsparam_flag("nat_bits", Opt_nat_bits),
+ fsparam_enum("jqfmt", Opt_jqfmt, f2fs_param_jqfmt),
+ fsparam_enum("alloc_mode", Opt_alloc, f2fs_param_alloc_mode),
+ fsparam_enum("fsync_mode", Opt_fsync, f2fs_param_fsync_mode),
+ fsparam_string("test_dummy_encryption", Opt_test_dummy_encryption),
+ fsparam_flag("test_dummy_encryption", Opt_test_dummy_encryption),
+ fsparam_flag("inlinecrypt", Opt_inlinecrypt),
+ fsparam_string("checkpoint", Opt_checkpoint),
+ fsparam_flag_no("checkpoint_merge", Opt_checkpoint_merge),
+ fsparam_string("compress_algorithm", Opt_compress_algorithm),
+ fsparam_u32("compress_log_size", Opt_compress_log_size),
+ fsparam_string("compress_extension", Opt_compress_extension),
+ fsparam_string("nocompress_extension", Opt_nocompress_extension),
+ fsparam_flag("compress_chksum", Opt_compress_chksum),
+ fsparam_enum("compress_mode", Opt_compress_mode, f2fs_param_compress_mode),
+ fsparam_flag("compress_cache", Opt_compress_cache),
+ fsparam_flag("atgc", Opt_atgc),
+ fsparam_flag_no("gc_merge", Opt_gc_merge),
+ fsparam_enum("discard_unit", Opt_discard_unit, f2fs_param_discard_unit),
+ fsparam_enum("memory", Opt_memory_mode, f2fs_param_memory_mode),
+ fsparam_flag("age_extent_cache", Opt_age_extent_cache),
+ fsparam_enum("errors", Opt_errors, f2fs_param_errors),
+ fsparam_enum("lookup_mode", Opt_lookup_mode, f2fs_param_lookup_mode),
+ {}
+};
+
+/* Resort to a match_table for this interestingly formatted option */
+static match_table_t f2fs_checkpoint_tokens = {
+ {Opt_checkpoint_disable, "disable"},
+ {Opt_checkpoint_disable_cap, "disable:%u"},
+ {Opt_checkpoint_disable_cap_perc, "disable:%u%%"},
+ {Opt_checkpoint_enable, "enable"},
{Opt_err, NULL},
};
-void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
+#define F2FS_SPEC_background_gc (1 << 0)
+#define F2FS_SPEC_inline_xattr_size (1 << 1)
+#define F2FS_SPEC_active_logs (1 << 2)
+#define F2FS_SPEC_reserve_root (1 << 3)
+#define F2FS_SPEC_resgid (1 << 4)
+#define F2FS_SPEC_resuid (1 << 5)
+#define F2FS_SPEC_mode (1 << 6)
+#define F2FS_SPEC_fault_injection (1 << 7)
+#define F2FS_SPEC_fault_type (1 << 8)
+#define F2FS_SPEC_jqfmt (1 << 9)
+#define F2FS_SPEC_alloc_mode (1 << 10)
+#define F2FS_SPEC_fsync_mode (1 << 11)
+#define F2FS_SPEC_checkpoint_disable_cap (1 << 12)
+#define F2FS_SPEC_checkpoint_disable_cap_perc (1 << 13)
+#define F2FS_SPEC_compress_level (1 << 14)
+#define F2FS_SPEC_compress_algorithm (1 << 15)
+#define F2FS_SPEC_compress_log_size (1 << 16)
+#define F2FS_SPEC_compress_extension (1 << 17)
+#define F2FS_SPEC_nocompress_extension (1 << 18)
+#define F2FS_SPEC_compress_chksum (1 << 19)
+#define F2FS_SPEC_compress_mode (1 << 20)
+#define F2FS_SPEC_discard_unit (1 << 21)
+#define F2FS_SPEC_memory_mode (1 << 22)
+#define F2FS_SPEC_errors (1 << 23)
+#define F2FS_SPEC_lookup_mode (1 << 24)
+#define F2FS_SPEC_reserve_node (1 << 25)
+
+struct f2fs_fs_context {
+ struct f2fs_mount_info info;
+ unsigned long long opt_mask; /* Bits changed */
+ unsigned int spec_mask;
+ unsigned short qname_mask;
+};
+
+#define F2FS_CTX_INFO(ctx) ((ctx)->info)
+
+static inline void ctx_set_opt(struct f2fs_fs_context *ctx,
+ enum f2fs_mount_opt flag)
+{
+ ctx->info.opt |= BIT(flag);
+ ctx->opt_mask |= BIT(flag);
+}
+
+static inline void ctx_clear_opt(struct f2fs_fs_context *ctx,
+ enum f2fs_mount_opt flag)
+{
+ ctx->info.opt &= ~BIT(flag);
+ ctx->opt_mask |= BIT(flag);
+}
+
+static inline bool ctx_test_opt(struct f2fs_fs_context *ctx,
+ enum f2fs_mount_opt flag)
+{
+ return ctx->info.opt & BIT(flag);
+}
+
+void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate,
+ const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -250,49 +391,52 @@ void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
level = printk_get_level(fmt);
vaf.fmt = printk_skip_level(fmt);
vaf.va = &args;
- printk("%c%cF2FS-fs (%s): %pV\n",
- KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
+ if (limit_rate)
+ if (sbi)
+ printk_ratelimited("%c%cF2FS-fs (%s): %pV\n",
+ KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
+ else
+ printk_ratelimited("%c%cF2FS-fs: %pV\n",
+ KERN_SOH_ASCII, level, &vaf);
+ else
+ if (sbi)
+ printk("%c%cF2FS-fs (%s): %pV\n",
+ KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
+ else
+ printk("%c%cF2FS-fs: %pV\n",
+ KERN_SOH_ASCII, level, &vaf);
va_end(args);
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
static const struct f2fs_sb_encodings {
__u16 magic;
char *name;
- char *version;
+ unsigned int version;
} f2fs_sb_encoding_map[] = {
- {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
+ {F2FS_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
};
-static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
- const struct f2fs_sb_encodings **encoding,
- __u16 *flags)
+static const struct f2fs_sb_encodings *
+f2fs_sb_read_encoding(const struct f2fs_super_block *sb)
{
__u16 magic = le16_to_cpu(sb->s_encoding);
int i;
for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
if (magic == f2fs_sb_encoding_map[i].magic)
- break;
-
- if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
- return -EINVAL;
+ return &f2fs_sb_encoding_map[i];
- *encoding = &f2fs_sb_encoding_map[i];
- *flags = le16_to_cpu(sb->s_encoding_flags);
-
- return 0;
+ return NULL;
}
struct kmem_cache *f2fs_cf_name_slab;
static int __init f2fs_create_casefold_cache(void)
{
f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
- F2FS_NAME_LEN);
- if (!f2fs_cf_name_slab)
- return -ENOMEM;
- return 0;
+ F2FS_NAME_LEN);
+ return f2fs_cf_name_slab ? 0 : -ENOMEM;
}
static void f2fs_destroy_casefold_cache(void)
@@ -306,22 +450,30 @@ static void f2fs_destroy_casefold_cache(void) { }
static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
{
- block_t limit = min((sbi->user_block_count << 1) / 1000,
+ block_t block_limit = min((sbi->user_block_count >> 3),
sbi->user_block_count - sbi->reserved_blocks);
+ block_t node_limit = sbi->total_node_count >> 3;
- /* limit is 0.2% */
+ /* limit is 12.5% */
if (test_opt(sbi, RESERVE_ROOT) &&
- F2FS_OPTION(sbi).root_reserved_blocks > limit) {
- F2FS_OPTION(sbi).root_reserved_blocks = limit;
+ F2FS_OPTION(sbi).root_reserved_blocks > block_limit) {
+ F2FS_OPTION(sbi).root_reserved_blocks = block_limit;
f2fs_info(sbi, "Reduce reserved blocks for root = %u",
F2FS_OPTION(sbi).root_reserved_blocks);
}
- if (!test_opt(sbi, RESERVE_ROOT) &&
+ if (test_opt(sbi, RESERVE_NODE) &&
+ F2FS_OPTION(sbi).root_reserved_nodes > node_limit) {
+ F2FS_OPTION(sbi).root_reserved_nodes = node_limit;
+ f2fs_info(sbi, "Reduce reserved nodes for root = %u",
+ F2FS_OPTION(sbi).root_reserved_nodes);
+ }
+ if (!test_opt(sbi, RESERVE_ROOT) && !test_opt(sbi, RESERVE_NODE) &&
(!uid_eq(F2FS_OPTION(sbi).s_resuid,
make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
!gid_eq(F2FS_OPTION(sbi).s_resgid,
make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
- f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
+ f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root"
+ " and reserve_node",
from_kuid_munged(&init_user_ns,
F2FS_OPTION(sbi).s_resuid),
from_kgid_munged(&init_user_ns,
@@ -349,157 +501,123 @@ static void init_once(void *foo)
struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
inode_init_once(&fi->vfs_inode);
+#ifdef CONFIG_FS_ENCRYPTION
+ fi->i_crypt_info = NULL;
+#endif
+#ifdef CONFIG_FS_VERITY
+ fi->i_verity_info = NULL;
+#endif
}
#ifdef CONFIG_QUOTA
static const char * const quotatypes[] = INITQFNAMES;
#define QTYPE2NAME(t) (quotatypes[t])
-static int f2fs_set_qf_name(struct super_block *sb, int qtype,
- substring_t *args)
+/*
+ * Note the name of the specified quota file.
+ */
+static int f2fs_note_qf_name(struct fs_context *fc, int qtype,
+ struct fs_parameter *param)
{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct f2fs_fs_context *ctx = fc->fs_private;
char *qname;
- int ret = -EINVAL;
- if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
- f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
+ if (param->size < 1) {
+ f2fs_err(NULL, "Missing quota name");
return -EINVAL;
}
- if (f2fs_sb_has_quota_ino(sbi)) {
- f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
+ if (strchr(param->string, '/')) {
+ f2fs_err(NULL, "quotafile must be on filesystem root");
+ return -EINVAL;
+ }
+ if (ctx->info.s_qf_names[qtype]) {
+ if (strcmp(ctx->info.s_qf_names[qtype], param->string) != 0) {
+ f2fs_err(NULL, "Quota file already specified");
+ return -EINVAL;
+ }
return 0;
}
- qname = match_strdup(args);
+ qname = kmemdup_nul(param->string, param->size, GFP_KERNEL);
if (!qname) {
- f2fs_err(sbi, "Not enough memory for storing quotafile name");
+ f2fs_err(NULL, "Not enough memory for storing quotafile name");
return -ENOMEM;
}
- if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
- if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
- ret = 0;
- else
- f2fs_err(sbi, "%s quota file already specified",
- QTYPE2NAME(qtype));
- goto errout;
- }
- if (strchr(qname, '/')) {
- f2fs_err(sbi, "quotafile must be on filesystem root");
- goto errout;
- }
- F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
- set_opt(sbi, QUOTA);
+ F2FS_CTX_INFO(ctx).s_qf_names[qtype] = qname;
+ ctx->qname_mask |= 1 << qtype;
return 0;
-errout:
- kfree(qname);
- return ret;
}
-static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
+/*
+ * Clear the name of the specified quota file.
+ */
+static int f2fs_unnote_qf_name(struct fs_context *fc, int qtype)
{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct f2fs_fs_context *ctx = fc->fs_private;
- if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
- f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
- return -EINVAL;
- }
- kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
- F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
+ kfree(ctx->info.s_qf_names[qtype]);
+ ctx->info.s_qf_names[qtype] = NULL;
+ ctx->qname_mask |= 1 << qtype;
return 0;
}
-static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
+static void f2fs_unnote_qf_name_all(struct fs_context *fc)
{
- /*
- * We do the test below only for project quotas. 'usrquota' and
- * 'grpquota' mount options are allowed even without quota feature
- * to support legacy quotas in quota files.
- */
- if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
- f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
- return -1;
- }
- if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
- F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
- F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
- if (test_opt(sbi, USRQUOTA) &&
- F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
- clear_opt(sbi, USRQUOTA);
-
- if (test_opt(sbi, GRPQUOTA) &&
- F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
- clear_opt(sbi, GRPQUOTA);
-
- if (test_opt(sbi, PRJQUOTA) &&
- F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
- clear_opt(sbi, PRJQUOTA);
-
- if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
- test_opt(sbi, PRJQUOTA)) {
- f2fs_err(sbi, "old and new quota format mixing");
- return -1;
- }
-
- if (!F2FS_OPTION(sbi).s_jquota_fmt) {
- f2fs_err(sbi, "journaled quota format not specified");
- return -1;
- }
- }
+ int i;
- if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
- f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
- F2FS_OPTION(sbi).s_jquota_fmt = 0;
- }
- return 0;
+ for (i = 0; i < MAXQUOTAS; i++)
+ f2fs_unnote_qf_name(fc, i);
}
#endif
-static int f2fs_set_test_dummy_encryption(struct super_block *sb,
- const char *opt,
- const substring_t *arg,
- bool is_remount)
+static int f2fs_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct f2fs_fs_context *ctx)
{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
-#ifdef CONFIG_FS_ENCRYPTION
int err;
- if (!f2fs_sb_has_encrypt(sbi)) {
- f2fs_err(sbi, "Encrypt feature is off");
- return -EINVAL;
- }
-
- /*
- * This mount option is just for testing, and it's not worthwhile to
- * implement the extra complexity (e.g. RCU protection) that would be
- * needed to allow it to be set or changed during remount. We do allow
- * it to be specified during remount, but only if there is no change.
- */
- if (is_remount && !F2FS_OPTION(sbi).dummy_enc_policy.policy) {
- f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
+ if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
+ f2fs_warn(NULL, "test_dummy_encryption option not supported");
return -EINVAL;
}
- err = fscrypt_set_test_dummy_encryption(
- sb, arg->from, &F2FS_OPTION(sbi).dummy_enc_policy);
+ err = fscrypt_parse_test_dummy_encryption(param,
+ &ctx->info.dummy_enc_policy);
if (err) {
- if (err == -EEXIST)
- f2fs_warn(sbi,
- "Can't change test_dummy_encryption on remount");
- else if (err == -EINVAL)
- f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
- opt);
+ if (err == -EINVAL)
+ f2fs_warn(NULL, "Value of option \"%s\" is unrecognized",
+ param->key);
+ else if (err == -EEXIST)
+ f2fs_warn(NULL, "Conflicting test_dummy_encryption options");
else
- f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
- opt, err);
+ f2fs_warn(NULL, "Error processing option \"%s\" [%d]",
+ param->key, err);
return -EINVAL;
}
- f2fs_warn(sbi, "Test dummy encryption mode enabled");
-#else
- f2fs_warn(sbi, "Test dummy encryption mount option ignored");
-#endif
return 0;
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
+static bool is_compress_extension_exist(struct f2fs_mount_info *info,
+ const char *new_ext, bool is_ext)
+{
+ unsigned char (*ext)[F2FS_EXTENSION_LEN];
+ int ext_cnt;
+ int i;
+
+ if (is_ext) {
+ ext = info->extensions;
+ ext_cnt = info->compress_ext_cnt;
+ } else {
+ ext = info->noextensions;
+ ext_cnt = info->nocompress_ext_cnt;
+ }
+
+ for (i = 0; i < ext_cnt; i++) {
+ if (!strcasecmp(new_ext, ext[i]))
+ return true;
+ }
+
+ return false;
+}
+
/*
* 1. The same extension name cannot not appear in both compress and non-compress extension
* at the same time.
@@ -507,28 +625,28 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
* extension will be treated as special cases and will not be compressed.
* 3. Don't allow the non-compress extension specifies all files.
*/
-static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
+static int f2fs_test_compress_extension(unsigned char (*noext)[F2FS_EXTENSION_LEN],
+ int noext_cnt,
+ unsigned char (*ext)[F2FS_EXTENSION_LEN],
+ int ext_cnt)
{
- unsigned char (*ext)[F2FS_EXTENSION_LEN];
- unsigned char (*noext)[F2FS_EXTENSION_LEN];
- int ext_cnt, noext_cnt, index = 0, no_index = 0;
-
- ext = F2FS_OPTION(sbi).extensions;
- ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
- noext = F2FS_OPTION(sbi).noextensions;
- noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+ int index = 0, no_index = 0;
if (!noext_cnt)
return 0;
for (no_index = 0; no_index < noext_cnt; no_index++) {
+ if (strlen(noext[no_index]) == 0)
+ continue;
if (!strcasecmp("*", noext[no_index])) {
- f2fs_info(sbi, "Don't allow the nocompress extension specifies all files");
+ f2fs_info(NULL, "Don't allow the nocompress extension specifies all files");
return -EINVAL;
}
for (index = 0; index < ext_cnt; index++) {
+ if (strlen(ext[index]) == 0)
+ continue;
if (!strcasecmp(ext[index], noext[no_index])) {
- f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension",
+ f2fs_info(NULL, "Don't allow the same extension %s appear in both compress and nocompress extension",
ext[index]);
return -EINVAL;
}
@@ -538,767 +656,895 @@ static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
}
#ifdef CONFIG_F2FS_FS_LZ4
-static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
+static int f2fs_set_lz4hc_level(struct f2fs_fs_context *ctx, const char *str)
{
#ifdef CONFIG_F2FS_FS_LZ4HC
unsigned int level;
-#endif
if (strlen(str) == 3) {
- F2FS_OPTION(sbi).compress_level = 0;
+ F2FS_CTX_INFO(ctx).compress_level = 0;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
}
-#ifdef CONFIG_F2FS_FS_LZ4HC
str += 3;
if (str[0] != ':') {
- f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
+ f2fs_info(NULL, "wrong format, e.g. <alg_name>:<compr_level>");
return -EINVAL;
}
if (kstrtouint(str + 1, 10, &level))
return -EINVAL;
- if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
- f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
+ if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) {
+ f2fs_info(NULL, "invalid lz4hc compress level: %d", level);
return -EINVAL;
}
- F2FS_OPTION(sbi).compress_level = level;
+ F2FS_CTX_INFO(ctx).compress_level = level;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
#else
- f2fs_info(sbi, "kernel doesn't support lz4hc compression");
+ if (strlen(str) == 3) {
+ F2FS_CTX_INFO(ctx).compress_level = 0;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
+ return 0;
+ }
+ f2fs_info(NULL, "kernel doesn't support lz4hc compression");
return -EINVAL;
#endif
}
#endif
#ifdef CONFIG_F2FS_FS_ZSTD
-static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
+static int f2fs_set_zstd_level(struct f2fs_fs_context *ctx, const char *str)
{
- unsigned int level;
+ int level;
int len = 4;
if (strlen(str) == len) {
- F2FS_OPTION(sbi).compress_level = 0;
+ F2FS_CTX_INFO(ctx).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
}
str += len;
if (str[0] != ':') {
- f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
+ f2fs_info(NULL, "wrong format, e.g. <alg_name>:<compr_level>");
return -EINVAL;
}
- if (kstrtouint(str + 1, 10, &level))
+ if (kstrtoint(str + 1, 10, &level))
return -EINVAL;
- if (!level || level > ZSTD_maxCLevel()) {
- f2fs_info(sbi, "invalid zstd compress level: %d", level);
+ /* f2fs does not support negative compress level now */
+ if (level < 0) {
+ f2fs_info(NULL, "do not support negative compress level: %d", level);
+ return -ERANGE;
+ }
+
+ if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
+ f2fs_info(NULL, "invalid zstd compress level: %d", level);
return -EINVAL;
}
- F2FS_OPTION(sbi).compress_level = level;
+ F2FS_CTX_INFO(ctx).compress_level = level;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
}
#endif
#endif
-static int parse_options(struct super_block *sb, char *options, bool is_remount)
+static int f2fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
- substring_t args[MAX_OPT_ARGS];
+ struct f2fs_fs_context *ctx = fc->fs_private;
#ifdef CONFIG_F2FS_FS_COMPRESSION
unsigned char (*ext)[F2FS_EXTENSION_LEN];
unsigned char (*noext)[F2FS_EXTENSION_LEN];
int ext_cnt, noext_cnt;
+ char *name;
#endif
- char *p, *name;
- int arg = 0;
- kuid_t uid;
- kgid_t gid;
- int ret;
-
- if (!options)
- goto default_check;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
-
- if (!*p)
- continue;
- /*
- * Initialize args struct so we know whether arg was
- * found; some options take optional arguments.
- */
- args[0].to = args[0].from = NULL;
- token = match_token(p, f2fs_tokens, args);
+ substring_t args[MAX_OPT_ARGS];
+ struct fs_parse_result result;
+ int token, ret, arg;
- switch (token) {
- case Opt_gc_background:
- name = match_strdup(&args[0]);
+ token = fs_parse(fc, f2fs_param_specs, param, &result);
+ if (token < 0)
+ return token;
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "on")) {
- F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
- } else if (!strcmp(name, "off")) {
- F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
- } else if (!strcmp(name, "sync")) {
- F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_disable_roll_forward:
- set_opt(sbi, DISABLE_ROLL_FORWARD);
- break;
- case Opt_norecovery:
- /* this option mounts f2fs with ro */
- set_opt(sbi, NORECOVERY);
- if (!f2fs_readonly(sb))
- return -EINVAL;
- break;
- case Opt_discard:
- if (!f2fs_hw_support_discard(sbi)) {
- f2fs_warn(sbi, "device does not support discard");
- break;
- }
- set_opt(sbi, DISCARD);
- break;
- case Opt_nodiscard:
- if (f2fs_hw_should_discard(sbi)) {
- f2fs_warn(sbi, "discard is required for zoned block devices");
- return -EINVAL;
- }
- clear_opt(sbi, DISCARD);
- break;
- case Opt_noheap:
- set_opt(sbi, NOHEAP);
- break;
- case Opt_heap:
- clear_opt(sbi, NOHEAP);
- break;
+ switch (token) {
+ case Opt_gc_background:
+ F2FS_CTX_INFO(ctx).bggc_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_background_gc;
+ break;
+ case Opt_disable_roll_forward:
+ ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_ROLL_FORWARD);
+ break;
+ case Opt_norecovery:
+ /* requires ro mount, checked in f2fs_validate_options */
+ ctx_set_opt(ctx, F2FS_MOUNT_NORECOVERY);
+ break;
+ case Opt_discard:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_DISCARD);
+ break;
+ case Opt_noheap:
+ case Opt_heap:
+ f2fs_warn(NULL, "heap/no_heap options were deprecated");
+ break;
#ifdef CONFIG_F2FS_FS_XATTR
- case Opt_user_xattr:
- set_opt(sbi, XATTR_USER);
- break;
- case Opt_nouser_xattr:
- clear_opt(sbi, XATTR_USER);
- break;
- case Opt_inline_xattr:
- set_opt(sbi, INLINE_XATTR);
- break;
- case Opt_noinline_xattr:
- clear_opt(sbi, INLINE_XATTR);
- break;
- case Opt_inline_xattr_size:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- set_opt(sbi, INLINE_XATTR_SIZE);
- F2FS_OPTION(sbi).inline_xattr_size = arg;
- break;
+ case Opt_user_xattr:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_XATTR_USER);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_XATTR_USER);
+ break;
+ case Opt_inline_xattr:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_XATTR);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR);
+ break;
+ case Opt_inline_xattr_size:
+ if (result.int_32 < MIN_INLINE_XATTR_SIZE ||
+ result.int_32 > MAX_INLINE_XATTR_SIZE) {
+ f2fs_err(NULL, "inline xattr size is out of range: %u ~ %u",
+ (u32)MIN_INLINE_XATTR_SIZE, (u32)MAX_INLINE_XATTR_SIZE);
+ return -EINVAL;
+ }
+ ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE);
+ F2FS_CTX_INFO(ctx).inline_xattr_size = result.int_32;
+ ctx->spec_mask |= F2FS_SPEC_inline_xattr_size;
+ break;
#else
- case Opt_user_xattr:
- f2fs_info(sbi, "user_xattr options not supported");
- break;
- case Opt_nouser_xattr:
- f2fs_info(sbi, "nouser_xattr options not supported");
- break;
- case Opt_inline_xattr:
- f2fs_info(sbi, "inline_xattr options not supported");
- break;
- case Opt_noinline_xattr:
- f2fs_info(sbi, "noinline_xattr options not supported");
- break;
+ case Opt_user_xattr:
+ case Opt_inline_xattr:
+ case Opt_inline_xattr_size:
+ f2fs_info(NULL, "%s options not supported", param->key);
+ break;
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
- case Opt_acl:
- set_opt(sbi, POSIX_ACL);
- break;
- case Opt_noacl:
- clear_opt(sbi, POSIX_ACL);
- break;
+ case Opt_acl:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_POSIX_ACL);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_POSIX_ACL);
+ break;
#else
- case Opt_acl:
- f2fs_info(sbi, "acl options not supported");
- break;
- case Opt_noacl:
- f2fs_info(sbi, "noacl options not supported");
- break;
+ case Opt_acl:
+ f2fs_info(NULL, "%s options not supported", param->key);
+ break;
#endif
- case Opt_active_logs:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- if (arg != 2 && arg != 4 &&
- arg != NR_CURSEG_PERSIST_TYPE)
- return -EINVAL;
- F2FS_OPTION(sbi).active_logs = arg;
- break;
- case Opt_disable_ext_identify:
- set_opt(sbi, DISABLE_EXT_IDENTIFY);
- break;
- case Opt_inline_data:
- set_opt(sbi, INLINE_DATA);
- break;
- case Opt_inline_dentry:
- set_opt(sbi, INLINE_DENTRY);
- break;
- case Opt_noinline_dentry:
- clear_opt(sbi, INLINE_DENTRY);
- break;
- case Opt_flush_merge:
- set_opt(sbi, FLUSH_MERGE);
- break;
- case Opt_noflush_merge:
- clear_opt(sbi, FLUSH_MERGE);
- break;
- case Opt_nobarrier:
- set_opt(sbi, NOBARRIER);
- break;
- case Opt_fastboot:
- set_opt(sbi, FASTBOOT);
- break;
- case Opt_extent_cache:
- set_opt(sbi, EXTENT_CACHE);
- break;
- case Opt_noextent_cache:
- clear_opt(sbi, EXTENT_CACHE);
- break;
- case Opt_noinline_data:
- clear_opt(sbi, INLINE_DATA);
- break;
- case Opt_data_flush:
- set_opt(sbi, DATA_FLUSH);
- break;
- case Opt_reserve_root:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- if (test_opt(sbi, RESERVE_ROOT)) {
- f2fs_info(sbi, "Preserve previous reserve_root=%u",
- F2FS_OPTION(sbi).root_reserved_blocks);
- } else {
- F2FS_OPTION(sbi).root_reserved_blocks = arg;
- set_opt(sbi, RESERVE_ROOT);
- }
- break;
- case Opt_resuid:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- uid = make_kuid(current_user_ns(), arg);
- if (!uid_valid(uid)) {
- f2fs_err(sbi, "Invalid uid value %d", arg);
- return -EINVAL;
- }
- F2FS_OPTION(sbi).s_resuid = uid;
- break;
- case Opt_resgid:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- gid = make_kgid(current_user_ns(), arg);
- if (!gid_valid(gid)) {
- f2fs_err(sbi, "Invalid gid value %d", arg);
- return -EINVAL;
- }
- F2FS_OPTION(sbi).s_resgid = gid;
- break;
- case Opt_mode:
- name = match_strdup(&args[0]);
-
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "adaptive")) {
- if (f2fs_sb_has_blkzoned(sbi)) {
- f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
- kfree(name);
- return -EINVAL;
- }
- F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
- } else if (!strcmp(name, "lfs")) {
- F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
- } else if (!strcmp(name, "fragment:segment")) {
- F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG;
- } else if (!strcmp(name, "fragment:block")) {
- F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_io_size_bits:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
- f2fs_warn(sbi, "Not support %d, larger than %d",
- 1 << arg, BIO_MAX_VECS);
- return -EINVAL;
- }
- F2FS_OPTION(sbi).write_io_size_bits = arg;
- break;
+ case Opt_active_logs:
+ if (result.int_32 != 2 && result.int_32 != 4 &&
+ result.int_32 != NR_CURSEG_PERSIST_TYPE)
+ return -EINVAL;
+ ctx->spec_mask |= F2FS_SPEC_active_logs;
+ F2FS_CTX_INFO(ctx).active_logs = result.int_32;
+ break;
+ case Opt_disable_ext_identify:
+ ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_EXT_IDENTIFY);
+ break;
+ case Opt_inline_data:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DATA);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DATA);
+ break;
+ case Opt_inline_dentry:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DENTRY);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DENTRY);
+ break;
+ case Opt_flush_merge:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_FLUSH_MERGE);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_FLUSH_MERGE);
+ break;
+ case Opt_barrier:
+ if (result.negated)
+ ctx_set_opt(ctx, F2FS_MOUNT_NOBARRIER);
+ else
+ ctx_clear_opt(ctx, F2FS_MOUNT_NOBARRIER);
+ break;
+ case Opt_fastboot:
+ ctx_set_opt(ctx, F2FS_MOUNT_FASTBOOT);
+ break;
+ case Opt_extent_cache:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE);
+ break;
+ case Opt_data_flush:
+ ctx_set_opt(ctx, F2FS_MOUNT_DATA_FLUSH);
+ break;
+ case Opt_reserve_root:
+ ctx_set_opt(ctx, F2FS_MOUNT_RESERVE_ROOT);
+ F2FS_CTX_INFO(ctx).root_reserved_blocks = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_reserve_root;
+ break;
+ case Opt_reserve_node:
+ ctx_set_opt(ctx, F2FS_MOUNT_RESERVE_NODE);
+ F2FS_CTX_INFO(ctx).root_reserved_nodes = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_reserve_node;
+ break;
+ case Opt_resuid:
+ F2FS_CTX_INFO(ctx).s_resuid = result.uid;
+ ctx->spec_mask |= F2FS_SPEC_resuid;
+ break;
+ case Opt_resgid:
+ F2FS_CTX_INFO(ctx).s_resgid = result.gid;
+ ctx->spec_mask |= F2FS_SPEC_resgid;
+ break;
+ case Opt_mode:
+ F2FS_CTX_INFO(ctx).fs_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_mode;
+ break;
#ifdef CONFIG_F2FS_FAULT_INJECTION
- case Opt_fault_injection:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
- set_opt(sbi, FAULT_INJECTION);
- break;
+ case Opt_fault_injection:
+ F2FS_CTX_INFO(ctx).fault_info.inject_rate = result.int_32;
+ ctx->spec_mask |= F2FS_SPEC_fault_injection;
+ ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION);
+ break;
- case Opt_fault_type:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- f2fs_build_fault_attr(sbi, 0, arg);
- set_opt(sbi, FAULT_INJECTION);
- break;
+ case Opt_fault_type:
+ if (result.uint_32 > BIT(FAULT_MAX))
+ return -EINVAL;
+ F2FS_CTX_INFO(ctx).fault_info.inject_type = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_fault_type;
+ ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION);
+ break;
#else
- case Opt_fault_injection:
- f2fs_info(sbi, "fault_injection options not supported");
- break;
-
- case Opt_fault_type:
- f2fs_info(sbi, "fault_type options not supported");
- break;
+ case Opt_fault_injection:
+ case Opt_fault_type:
+ f2fs_info(NULL, "%s options not supported", param->key);
+ break;
#endif
- case Opt_lazytime:
- sb->s_flags |= SB_LAZYTIME;
- break;
- case Opt_nolazytime:
- sb->s_flags &= ~SB_LAZYTIME;
- break;
+ case Opt_lazytime:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_LAZYTIME);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_LAZYTIME);
+ break;
#ifdef CONFIG_QUOTA
- case Opt_quota:
- case Opt_usrquota:
- set_opt(sbi, USRQUOTA);
- break;
- case Opt_grpquota:
- set_opt(sbi, GRPQUOTA);
- break;
- case Opt_prjquota:
- set_opt(sbi, PRJQUOTA);
- break;
- case Opt_usrjquota:
- ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
- if (ret)
- return ret;
- break;
- case Opt_grpjquota:
- ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
- if (ret)
- return ret;
- break;
- case Opt_prjjquota:
- ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
- if (ret)
- return ret;
- break;
- case Opt_offusrjquota:
- ret = f2fs_clear_qf_name(sb, USRQUOTA);
- if (ret)
- return ret;
- break;
- case Opt_offgrpjquota:
- ret = f2fs_clear_qf_name(sb, GRPQUOTA);
- if (ret)
- return ret;
- break;
- case Opt_offprjjquota:
- ret = f2fs_clear_qf_name(sb, PRJQUOTA);
- if (ret)
- return ret;
- break;
- case Opt_jqfmt_vfsold:
- F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
- break;
- case Opt_jqfmt_vfsv0:
- F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
- break;
- case Opt_jqfmt_vfsv1:
- F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
- break;
- case Opt_noquota:
- clear_opt(sbi, QUOTA);
- clear_opt(sbi, USRQUOTA);
- clear_opt(sbi, GRPQUOTA);
- clear_opt(sbi, PRJQUOTA);
- break;
+ case Opt_quota:
+ if (result.negated) {
+ ctx_clear_opt(ctx, F2FS_MOUNT_QUOTA);
+ ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA);
+ ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA);
+ ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA);
+ } else
+ ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA);
+ break;
+ case Opt_usrquota:
+ ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA);
+ break;
+ case Opt_grpquota:
+ ctx_set_opt(ctx, F2FS_MOUNT_GRPQUOTA);
+ break;
+ case Opt_prjquota:
+ ctx_set_opt(ctx, F2FS_MOUNT_PRJQUOTA);
+ break;
+ case Opt_usrjquota:
+ if (!*param->string)
+ ret = f2fs_unnote_qf_name(fc, USRQUOTA);
+ else
+ ret = f2fs_note_qf_name(fc, USRQUOTA, param);
+ if (ret)
+ return ret;
+ break;
+ case Opt_grpjquota:
+ if (!*param->string)
+ ret = f2fs_unnote_qf_name(fc, GRPQUOTA);
+ else
+ ret = f2fs_note_qf_name(fc, GRPQUOTA, param);
+ if (ret)
+ return ret;
+ break;
+ case Opt_prjjquota:
+ if (!*param->string)
+ ret = f2fs_unnote_qf_name(fc, PRJQUOTA);
+ else
+ ret = f2fs_note_qf_name(fc, PRJQUOTA, param);
+ if (ret)
+ return ret;
+ break;
+ case Opt_jqfmt:
+ F2FS_CTX_INFO(ctx).s_jquota_fmt = result.int_32;
+ ctx->spec_mask |= F2FS_SPEC_jqfmt;
+ break;
#else
- case Opt_quota:
- case Opt_usrquota:
- case Opt_grpquota:
- case Opt_prjquota:
- case Opt_usrjquota:
- case Opt_grpjquota:
- case Opt_prjjquota:
- case Opt_offusrjquota:
- case Opt_offgrpjquota:
- case Opt_offprjjquota:
- case Opt_jqfmt_vfsold:
- case Opt_jqfmt_vfsv0:
- case Opt_jqfmt_vfsv1:
- case Opt_noquota:
- f2fs_info(sbi, "quota operations not supported");
- break;
+ case Opt_quota:
+ case Opt_usrquota:
+ case Opt_grpquota:
+ case Opt_prjquota:
+ case Opt_usrjquota:
+ case Opt_grpjquota:
+ case Opt_prjjquota:
+ f2fs_info(NULL, "quota operations not supported");
+ break;
#endif
- case Opt_whint:
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "user-based")) {
- F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
- } else if (!strcmp(name, "off")) {
- F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
- } else if (!strcmp(name, "fs-based")) {
- F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_alloc:
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
-
- if (!strcmp(name, "default")) {
- F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
- } else if (!strcmp(name, "reuse")) {
- F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_fsync:
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "posix")) {
- F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
- } else if (!strcmp(name, "strict")) {
- F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
- } else if (!strcmp(name, "nobarrier")) {
- F2FS_OPTION(sbi).fsync_mode =
- FSYNC_MODE_NOBARRIER;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_test_dummy_encryption:
- ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
- is_remount);
- if (ret)
- return ret;
- break;
- case Opt_inlinecrypt:
+ case Opt_alloc:
+ F2FS_CTX_INFO(ctx).alloc_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_alloc_mode;
+ break;
+ case Opt_fsync:
+ F2FS_CTX_INFO(ctx).fsync_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_fsync_mode;
+ break;
+ case Opt_test_dummy_encryption:
+ ret = f2fs_parse_test_dummy_encryption(param, ctx);
+ if (ret)
+ return ret;
+ break;
+ case Opt_inlinecrypt:
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
- sb->s_flags |= SB_INLINECRYPT;
+ ctx_set_opt(ctx, F2FS_MOUNT_INLINECRYPT);
#else
- f2fs_info(sbi, "inline encryption not supported");
+ f2fs_info(NULL, "inline encryption not supported");
#endif
- break;
+ break;
+ case Opt_checkpoint:
+ /*
+ * Initialize args struct so we know whether arg was
+ * found; some options take optional arguments.
+ */
+ args[0].from = args[0].to = NULL;
+ arg = 0;
+
+ /* revert to match_table for checkpoint= options */
+ token = match_token(param->string, f2fs_checkpoint_tokens, args);
+ switch (token) {
case Opt_checkpoint_disable_cap_perc:
if (args->from && match_int(args, &arg))
return -EINVAL;
if (arg < 0 || arg > 100)
return -EINVAL;
- F2FS_OPTION(sbi).unusable_cap_perc = arg;
- set_opt(sbi, DISABLE_CHECKPOINT);
+ F2FS_CTX_INFO(ctx).unusable_cap_perc = arg;
+ ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc;
+ ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
case Opt_checkpoint_disable_cap:
if (args->from && match_int(args, &arg))
return -EINVAL;
- F2FS_OPTION(sbi).unusable_cap = arg;
- set_opt(sbi, DISABLE_CHECKPOINT);
+ F2FS_CTX_INFO(ctx).unusable_cap = arg;
+ ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap;
+ ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
case Opt_checkpoint_disable:
- set_opt(sbi, DISABLE_CHECKPOINT);
+ ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
case Opt_checkpoint_enable:
- clear_opt(sbi, DISABLE_CHECKPOINT);
- break;
- case Opt_checkpoint_merge:
- set_opt(sbi, MERGE_CHECKPOINT);
- break;
- case Opt_nocheckpoint_merge:
- clear_opt(sbi, MERGE_CHECKPOINT);
+ F2FS_CTX_INFO(ctx).unusable_cap_perc = 0;
+ ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc;
+ F2FS_CTX_INFO(ctx).unusable_cap = 0;
+ ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap;
+ ctx_clear_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case Opt_checkpoint_merge:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT);
+ break;
#ifdef CONFIG_F2FS_FS_COMPRESSION
- case Opt_compress_algorithm:
- if (!f2fs_sb_has_compression(sbi)) {
- f2fs_info(sbi, "Image doesn't support compression");
- break;
- }
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "lzo")) {
+ case Opt_compress_algorithm:
+ name = param->string;
+ if (!strcmp(name, "lzo")) {
#ifdef CONFIG_F2FS_FS_LZO
- F2FS_OPTION(sbi).compress_level = 0;
- F2FS_OPTION(sbi).compress_algorithm =
- COMPRESS_LZO;
+ F2FS_CTX_INFO(ctx).compress_level = 0;
+ F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZO;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
+ ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
#else
- f2fs_info(sbi, "kernel doesn't support lzo compression");
+ f2fs_info(NULL, "kernel doesn't support lzo compression");
#endif
- } else if (!strncmp(name, "lz4", 3)) {
+ } else if (!strncmp(name, "lz4", 3)) {
#ifdef CONFIG_F2FS_FS_LZ4
- ret = f2fs_set_lz4hc_level(sbi, name);
- if (ret) {
- kfree(name);
- return -EINVAL;
- }
- F2FS_OPTION(sbi).compress_algorithm =
- COMPRESS_LZ4;
+ ret = f2fs_set_lz4hc_level(ctx, name);
+ if (ret)
+ return -EINVAL;
+ F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZ4;
+ ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
#else
- f2fs_info(sbi, "kernel doesn't support lz4 compression");
+ f2fs_info(NULL, "kernel doesn't support lz4 compression");
#endif
- } else if (!strncmp(name, "zstd", 4)) {
+ } else if (!strncmp(name, "zstd", 4)) {
#ifdef CONFIG_F2FS_FS_ZSTD
- ret = f2fs_set_zstd_level(sbi, name);
- if (ret) {
- kfree(name);
- return -EINVAL;
- }
- F2FS_OPTION(sbi).compress_algorithm =
- COMPRESS_ZSTD;
+ ret = f2fs_set_zstd_level(ctx, name);
+ if (ret)
+ return -EINVAL;
+ F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_ZSTD;
+ ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
#else
- f2fs_info(sbi, "kernel doesn't support zstd compression");
+ f2fs_info(NULL, "kernel doesn't support zstd compression");
#endif
- } else if (!strcmp(name, "lzo-rle")) {
+ } else if (!strcmp(name, "lzo-rle")) {
#ifdef CONFIG_F2FS_FS_LZORLE
- F2FS_OPTION(sbi).compress_level = 0;
- F2FS_OPTION(sbi).compress_algorithm =
- COMPRESS_LZORLE;
+ F2FS_CTX_INFO(ctx).compress_level = 0;
+ F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZORLE;
+ ctx->spec_mask |= F2FS_SPEC_compress_level;
+ ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
#else
- f2fs_info(sbi, "kernel doesn't support lzorle compression");
+ f2fs_info(NULL, "kernel doesn't support lzorle compression");
#endif
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
+ } else
+ return -EINVAL;
+ break;
+ case Opt_compress_log_size:
+ if (result.uint_32 < MIN_COMPRESS_LOG_SIZE ||
+ result.uint_32 > MAX_COMPRESS_LOG_SIZE) {
+ f2fs_err(NULL,
+ "Compress cluster log size is out of range");
+ return -EINVAL;
+ }
+ F2FS_CTX_INFO(ctx).compress_log_size = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_compress_log_size;
+ break;
+ case Opt_compress_extension:
+ name = param->string;
+ ext = F2FS_CTX_INFO(ctx).extensions;
+ ext_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
+
+ if (strlen(name) >= F2FS_EXTENSION_LEN ||
+ ext_cnt >= COMPRESS_EXT_NUM) {
+ f2fs_err(NULL, "invalid extension length/number");
+ return -EINVAL;
+ }
+
+ if (is_compress_extension_exist(&ctx->info, name, true))
break;
- case Opt_compress_log_size:
- if (!f2fs_sb_has_compression(sbi)) {
- f2fs_info(sbi, "Image doesn't support compression");
- break;
- }
- if (args->from && match_int(args, &arg))
- return -EINVAL;
- if (arg < MIN_COMPRESS_LOG_SIZE ||
- arg > MAX_COMPRESS_LOG_SIZE) {
- f2fs_err(sbi,
- "Compress cluster log size is out of range");
- return -EINVAL;
- }
- F2FS_OPTION(sbi).compress_log_size = arg;
+
+ ret = strscpy(ext[ext_cnt], name, F2FS_EXTENSION_LEN);
+ if (ret < 0)
+ return ret;
+ F2FS_CTX_INFO(ctx).compress_ext_cnt++;
+ ctx->spec_mask |= F2FS_SPEC_compress_extension;
+ break;
+ case Opt_nocompress_extension:
+ name = param->string;
+ noext = F2FS_CTX_INFO(ctx).noextensions;
+ noext_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
+
+ if (strlen(name) >= F2FS_EXTENSION_LEN ||
+ noext_cnt >= COMPRESS_EXT_NUM) {
+ f2fs_err(NULL, "invalid extension length/number");
+ return -EINVAL;
+ }
+
+ if (is_compress_extension_exist(&ctx->info, name, false))
break;
- case Opt_compress_extension:
- if (!f2fs_sb_has_compression(sbi)) {
- f2fs_info(sbi, "Image doesn't support compression");
- break;
- }
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- ext = F2FS_OPTION(sbi).extensions;
- ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+ ret = strscpy(noext[noext_cnt], name, F2FS_EXTENSION_LEN);
+ if (ret < 0)
+ return ret;
+ F2FS_CTX_INFO(ctx).nocompress_ext_cnt++;
+ ctx->spec_mask |= F2FS_SPEC_nocompress_extension;
+ break;
+ case Opt_compress_chksum:
+ F2FS_CTX_INFO(ctx).compress_chksum = true;
+ ctx->spec_mask |= F2FS_SPEC_compress_chksum;
+ break;
+ case Opt_compress_mode:
+ F2FS_CTX_INFO(ctx).compress_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_compress_mode;
+ break;
+ case Opt_compress_cache:
+ ctx_set_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE);
+ break;
+#else
+ case Opt_compress_algorithm:
+ case Opt_compress_log_size:
+ case Opt_compress_extension:
+ case Opt_nocompress_extension:
+ case Opt_compress_chksum:
+ case Opt_compress_mode:
+ case Opt_compress_cache:
+ f2fs_info(NULL, "compression options not supported");
+ break;
+#endif
+ case Opt_atgc:
+ ctx_set_opt(ctx, F2FS_MOUNT_ATGC);
+ break;
+ case Opt_gc_merge:
+ if (result.negated)
+ ctx_clear_opt(ctx, F2FS_MOUNT_GC_MERGE);
+ else
+ ctx_set_opt(ctx, F2FS_MOUNT_GC_MERGE);
+ break;
+ case Opt_discard_unit:
+ F2FS_CTX_INFO(ctx).discard_unit = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_discard_unit;
+ break;
+ case Opt_memory_mode:
+ F2FS_CTX_INFO(ctx).memory_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_memory_mode;
+ break;
+ case Opt_age_extent_cache:
+ ctx_set_opt(ctx, F2FS_MOUNT_AGE_EXTENT_CACHE);
+ break;
+ case Opt_errors:
+ F2FS_CTX_INFO(ctx).errors = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_errors;
+ break;
+ case Opt_nat_bits:
+ ctx_set_opt(ctx, F2FS_MOUNT_NAT_BITS);
+ break;
+ case Opt_lookup_mode:
+ F2FS_CTX_INFO(ctx).lookup_mode = result.uint_32;
+ ctx->spec_mask |= F2FS_SPEC_lookup_mode;
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Check quota settings consistency.
+ */
+static int f2fs_check_quota_consistency(struct fs_context *fc,
+ struct super_block *sb)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ #ifdef CONFIG_QUOTA
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ bool quota_feature = f2fs_sb_has_quota_ino(sbi);
+ bool quota_turnon = sb_any_quota_loaded(sb);
+ char *old_qname, *new_qname;
+ bool usr_qf_name, grp_qf_name, prj_qf_name, usrquota, grpquota, prjquota;
+ int i;
- if (strlen(name) >= F2FS_EXTENSION_LEN ||
- ext_cnt >= COMPRESS_EXT_NUM) {
- f2fs_err(sbi,
- "invalid extension length/number");
- kfree(name);
- return -EINVAL;
+ /*
+ * We do the test below only for project quotas. 'usrquota' and
+ * 'grpquota' mount options are allowed even without quota feature
+ * to support legacy quotas in quota files.
+ */
+ if (ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA) &&
+ !f2fs_sb_has_project_quota(sbi)) {
+ f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
+ return -EINVAL;
+ }
+
+ if (ctx->qname_mask) {
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (!(ctx->qname_mask & (1 << i)))
+ continue;
+
+ old_qname = F2FS_OPTION(sbi).s_qf_names[i];
+ new_qname = F2FS_CTX_INFO(ctx).s_qf_names[i];
+ if (quota_turnon &&
+ !!old_qname != !!new_qname)
+ goto err_jquota_change;
+
+ if (old_qname) {
+ if (!new_qname) {
+ f2fs_info(sbi, "remove qf_name %s",
+ old_qname);
+ continue;
+ } else if (strcmp(old_qname, new_qname) == 0) {
+ ctx->qname_mask &= ~(1 << i);
+ continue;
+ }
+ goto err_jquota_specified;
}
- strcpy(ext[ext_cnt], name);
- F2FS_OPTION(sbi).compress_ext_cnt++;
- kfree(name);
- break;
- case Opt_nocompress_extension:
- if (!f2fs_sb_has_compression(sbi)) {
- f2fs_info(sbi, "Image doesn't support compression");
- break;
+ if (quota_feature) {
+ f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
+ ctx->qname_mask &= ~(1 << i);
+ kfree(F2FS_CTX_INFO(ctx).s_qf_names[i]);
+ F2FS_CTX_INFO(ctx).s_qf_names[i] = NULL;
}
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
+ }
+ }
- noext = F2FS_OPTION(sbi).noextensions;
- noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+ /* Make sure we don't mix old and new quota format */
+ usr_qf_name = F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
+ F2FS_CTX_INFO(ctx).s_qf_names[USRQUOTA];
+ grp_qf_name = F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
+ F2FS_CTX_INFO(ctx).s_qf_names[GRPQUOTA];
+ prj_qf_name = F2FS_OPTION(sbi).s_qf_names[PRJQUOTA] ||
+ F2FS_CTX_INFO(ctx).s_qf_names[PRJQUOTA];
+ usrquota = test_opt(sbi, USRQUOTA) ||
+ ctx_test_opt(ctx, F2FS_MOUNT_USRQUOTA);
+ grpquota = test_opt(sbi, GRPQUOTA) ||
+ ctx_test_opt(ctx, F2FS_MOUNT_GRPQUOTA);
+ prjquota = test_opt(sbi, PRJQUOTA) ||
+ ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA);
+
+ if (usr_qf_name) {
+ ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA);
+ usrquota = false;
+ }
+ if (grp_qf_name) {
+ ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA);
+ grpquota = false;
+ }
+ if (prj_qf_name) {
+ ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA);
+ prjquota = false;
+ }
+ if (usr_qf_name || grp_qf_name || prj_qf_name) {
+ if (grpquota || usrquota || prjquota) {
+ f2fs_err(sbi, "old and new quota format mixing");
+ return -EINVAL;
+ }
+ if (!(ctx->spec_mask & F2FS_SPEC_jqfmt ||
+ F2FS_OPTION(sbi).s_jquota_fmt)) {
+ f2fs_err(sbi, "journaled quota format not specified");
+ return -EINVAL;
+ }
+ }
+ return 0;
- if (strlen(name) >= F2FS_EXTENSION_LEN ||
- noext_cnt >= COMPRESS_EXT_NUM) {
- f2fs_err(sbi,
- "invalid extension length/number");
- kfree(name);
- return -EINVAL;
- }
+err_jquota_change:
+ f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
+ return -EINVAL;
+err_jquota_specified:
+ f2fs_err(sbi, "%s quota file already specified",
+ QTYPE2NAME(i));
+ return -EINVAL;
- strcpy(noext[noext_cnt], name);
- F2FS_OPTION(sbi).nocompress_ext_cnt++;
- kfree(name);
- break;
- case Opt_compress_chksum:
- F2FS_OPTION(sbi).compress_chksum = true;
- break;
- case Opt_compress_mode:
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "fs")) {
- F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
- } else if (!strcmp(name, "user")) {
- F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
- case Opt_compress_cache:
- set_opt(sbi, COMPRESS_CACHE);
- break;
#else
- case Opt_compress_algorithm:
- case Opt_compress_log_size:
- case Opt_compress_extension:
- case Opt_nocompress_extension:
- case Opt_compress_chksum:
- case Opt_compress_mode:
- case Opt_compress_cache:
- f2fs_info(sbi, "compression options not supported");
- break;
+ if (f2fs_readonly(sbi->sb))
+ return 0;
+ if (f2fs_sb_has_quota_ino(sbi)) {
+ f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
+ return -EINVAL;
+ }
+ if (f2fs_sb_has_project_quota(sbi)) {
+ f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
+ return -EINVAL;
+ }
+
+ return 0;
#endif
- case Opt_atgc:
- set_opt(sbi, ATGC);
- break;
- case Opt_gc_merge:
- set_opt(sbi, GC_MERGE);
- break;
- case Opt_nogc_merge:
- clear_opt(sbi, GC_MERGE);
- break;
- case Opt_discard_unit:
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "block")) {
- F2FS_OPTION(sbi).discard_unit =
- DISCARD_UNIT_BLOCK;
- } else if (!strcmp(name, "segment")) {
- F2FS_OPTION(sbi).discard_unit =
- DISCARD_UNIT_SEGMENT;
- } else if (!strcmp(name, "section")) {
- F2FS_OPTION(sbi).discard_unit =
- DISCARD_UNIT_SECTION;
- } else {
- kfree(name);
- return -EINVAL;
+}
+
+static int f2fs_check_test_dummy_encryption(struct fs_context *fc,
+ struct super_block *sb)
+{
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy))
+ return 0;
+
+ if (!f2fs_sb_has_encrypt(sbi)) {
+ f2fs_err(sbi, "Encrypt feature is off");
+ return -EINVAL;
+ }
+
+ /*
+ * This mount option is just for testing, and it's not worthwhile to
+ * implement the extra complexity (e.g. RCU protection) that would be
+ * needed to allow it to be set or changed during remount. We do allow
+ * it to be specified during remount, but only if there is no change.
+ */
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ if (fscrypt_dummy_policies_equal(&F2FS_OPTION(sbi).dummy_enc_policy,
+ &F2FS_CTX_INFO(ctx).dummy_enc_policy))
+ return 0;
+ f2fs_warn(sbi, "Can't set or change test_dummy_encryption on remount");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline bool test_compression_spec(unsigned int mask)
+{
+ return mask & (F2FS_SPEC_compress_algorithm
+ | F2FS_SPEC_compress_log_size
+ | F2FS_SPEC_compress_extension
+ | F2FS_SPEC_nocompress_extension
+ | F2FS_SPEC_compress_chksum
+ | F2FS_SPEC_compress_mode);
+}
+
+static inline void clear_compression_spec(struct f2fs_fs_context *ctx)
+{
+ ctx->spec_mask &= ~(F2FS_SPEC_compress_algorithm
+ | F2FS_SPEC_compress_log_size
+ | F2FS_SPEC_compress_extension
+ | F2FS_SPEC_nocompress_extension
+ | F2FS_SPEC_compress_chksum
+ | F2FS_SPEC_compress_mode);
+}
+
+static int f2fs_check_compression(struct fs_context *fc,
+ struct super_block *sb)
+{
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int i, cnt;
+
+ if (!f2fs_sb_has_compression(sbi)) {
+ if (test_compression_spec(ctx->spec_mask) ||
+ ctx_test_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE))
+ f2fs_info(sbi, "Image doesn't support compression");
+ clear_compression_spec(ctx);
+ ctx->opt_mask &= ~BIT(F2FS_MOUNT_COMPRESS_CACHE);
+ return 0;
+ }
+ if (ctx->spec_mask & F2FS_SPEC_compress_extension) {
+ cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
+ for (i = 0; i < F2FS_CTX_INFO(ctx).compress_ext_cnt; i++) {
+ if (is_compress_extension_exist(&F2FS_OPTION(sbi),
+ F2FS_CTX_INFO(ctx).extensions[i], true)) {
+ F2FS_CTX_INFO(ctx).extensions[i][0] = '\0';
+ cnt--;
}
- kfree(name);
- break;
- default:
- f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
- p);
+ }
+ if (F2FS_OPTION(sbi).compress_ext_cnt + cnt > COMPRESS_EXT_NUM) {
+ f2fs_err(sbi, "invalid extension length/number");
return -EINVAL;
}
}
-default_check:
-#ifdef CONFIG_QUOTA
- if (f2fs_check_quota_options(sbi))
+ if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) {
+ cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
+ for (i = 0; i < F2FS_CTX_INFO(ctx).nocompress_ext_cnt; i++) {
+ if (is_compress_extension_exist(&F2FS_OPTION(sbi),
+ F2FS_CTX_INFO(ctx).noextensions[i], false)) {
+ F2FS_CTX_INFO(ctx).noextensions[i][0] = '\0';
+ cnt--;
+ }
+ }
+ if (F2FS_OPTION(sbi).nocompress_ext_cnt + cnt > COMPRESS_EXT_NUM) {
+ f2fs_err(sbi, "invalid noextension length/number");
+ return -EINVAL;
+ }
+ }
+
+ if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions,
+ F2FS_CTX_INFO(ctx).nocompress_ext_cnt,
+ F2FS_CTX_INFO(ctx).extensions,
+ F2FS_CTX_INFO(ctx).compress_ext_cnt)) {
+ f2fs_err(sbi, "new noextensions conflicts with new extensions");
return -EINVAL;
-#else
- if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
- f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
+ }
+ if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions,
+ F2FS_CTX_INFO(ctx).nocompress_ext_cnt,
+ F2FS_OPTION(sbi).extensions,
+ F2FS_OPTION(sbi).compress_ext_cnt)) {
+ f2fs_err(sbi, "new noextensions conflicts with old extensions");
return -EINVAL;
}
- if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
- f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
+ if (f2fs_test_compress_extension(F2FS_OPTION(sbi).noextensions,
+ F2FS_OPTION(sbi).nocompress_ext_cnt,
+ F2FS_CTX_INFO(ctx).extensions,
+ F2FS_CTX_INFO(ctx).compress_ext_cnt)) {
+ f2fs_err(sbi, "new extensions conflicts with old noextensions");
return -EINVAL;
}
#endif
-#ifndef CONFIG_UNICODE
- if (f2fs_sb_has_casefold(sbi)) {
+ return 0;
+}
+
+static int f2fs_check_opt_consistency(struct fs_context *fc,
+ struct super_block *sb)
+{
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int err;
+
+ if (ctx_test_opt(ctx, F2FS_MOUNT_NORECOVERY) && !f2fs_readonly(sb))
+ return -EINVAL;
+
+ if (f2fs_hw_should_discard(sbi) &&
+ (ctx->opt_mask & BIT(F2FS_MOUNT_DISCARD)) &&
+ !ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) {
+ f2fs_warn(sbi, "discard is required for zoned block devices");
+ return -EINVAL;
+ }
+
+ if (!f2fs_hw_support_discard(sbi) &&
+ (ctx->opt_mask & BIT(F2FS_MOUNT_DISCARD)) &&
+ ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) {
+ f2fs_warn(sbi, "device does not support discard");
+ ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD);
+ ctx->opt_mask &= ~BIT(F2FS_MOUNT_DISCARD);
+ }
+
+ if (f2fs_sb_has_device_alias(sbi) &&
+ (ctx->opt_mask & BIT(F2FS_MOUNT_READ_EXTENT_CACHE)) &&
+ !ctx_test_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE)) {
+ f2fs_err(sbi, "device aliasing requires extent cache");
+ return -EINVAL;
+ }
+
+ if (test_opt(sbi, RESERVE_ROOT) &&
+ (ctx->opt_mask & BIT(F2FS_MOUNT_RESERVE_ROOT)) &&
+ ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_ROOT)) {
+ f2fs_info(sbi, "Preserve previous reserve_root=%u",
+ F2FS_OPTION(sbi).root_reserved_blocks);
+ ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_ROOT);
+ ctx->opt_mask &= ~BIT(F2FS_MOUNT_RESERVE_ROOT);
+ }
+ if (test_opt(sbi, RESERVE_NODE) &&
+ (ctx->opt_mask & BIT(F2FS_MOUNT_RESERVE_NODE)) &&
+ ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_NODE)) {
+ f2fs_info(sbi, "Preserve previous reserve_node=%u",
+ F2FS_OPTION(sbi).root_reserved_nodes);
+ ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_NODE);
+ ctx->opt_mask &= ~BIT(F2FS_MOUNT_RESERVE_NODE);
+ }
+
+ err = f2fs_check_test_dummy_encryption(fc, sb);
+ if (err)
+ return err;
+
+ err = f2fs_check_compression(fc, sb);
+ if (err)
+ return err;
+
+ err = f2fs_check_quota_consistency(fc, sb);
+ if (err)
+ return err;
+
+ if (!IS_ENABLED(CONFIG_UNICODE) && f2fs_sb_has_casefold(sbi)) {
f2fs_err(sbi,
"Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
return -EINVAL;
}
-#endif
+
/*
* The BLKZONED feature indicates that the drive was formatted with
* zone alignment optimization. This is optional for host-aware
* devices, but mandatory for host-managed zoned block devices.
*/
-#ifndef CONFIG_BLK_DEV_ZONED
- if (f2fs_sb_has_blkzoned(sbi)) {
- f2fs_err(sbi, "Zoned block device support is not enabled");
- return -EINVAL;
- }
-#endif
if (f2fs_sb_has_blkzoned(sbi)) {
- if (F2FS_OPTION(sbi).discard_unit !=
- DISCARD_UNIT_SECTION) {
+ if (F2FS_CTX_INFO(ctx).bggc_mode == BGGC_MODE_OFF) {
+ f2fs_warn(sbi, "zoned devices need bggc");
+ return -EINVAL;
+ }
+#ifdef CONFIG_BLK_DEV_ZONED
+ if ((ctx->spec_mask & F2FS_SPEC_discard_unit) &&
+ F2FS_CTX_INFO(ctx).discard_unit != DISCARD_UNIT_SECTION) {
f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
- F2FS_OPTION(sbi).discard_unit =
- DISCARD_UNIT_SECTION;
+ F2FS_CTX_INFO(ctx).discard_unit = DISCARD_UNIT_SECTION;
}
- }
-#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (f2fs_test_compress_extension(sbi)) {
- f2fs_err(sbi, "invalid compress or nocompress extension");
+ if ((ctx->spec_mask & F2FS_SPEC_mode) &&
+ F2FS_CTX_INFO(ctx).fs_mode != FS_MODE_LFS) {
+ f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature");
+ return -EINVAL;
+ }
+#else
+ f2fs_err(sbi, "Zoned block device support is not enabled");
return -EINVAL;
- }
#endif
-
- if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
- f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
- F2FS_IO_SIZE_KB(sbi));
- return -EINVAL;
}
- if (test_opt(sbi, INLINE_XATTR_SIZE)) {
- int min_size, max_size;
-
+ if (ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE)) {
if (!f2fs_sb_has_extra_attr(sbi) ||
!f2fs_sb_has_flexible_inline_xattr(sbi)) {
f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
return -EINVAL;
}
- if (!test_opt(sbi, INLINE_XATTR)) {
+ if (!ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR) && !test_opt(sbi, INLINE_XATTR)) {
f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
return -EINVAL;
}
-
- min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
- max_size = MAX_INLINE_XATTR_SIZE;
-
- if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
- F2FS_OPTION(sbi).inline_xattr_size > max_size) {
- f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
- min_size, max_size);
- return -EINVAL;
- }
}
- if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
- f2fs_err(sbi, "LFS not compatible with checkpoint=disable");
+ if (ctx_test_opt(ctx, F2FS_MOUNT_ATGC) &&
+ F2FS_CTX_INFO(ctx).fs_mode == FS_MODE_LFS) {
+ f2fs_err(sbi, "LFS is not compatible with ATGC");
return -EINVAL;
}
- /* Not pass down write hints if the number of active logs is lesser
- * than NR_CURSEG_PERSIST_TYPE.
- */
- if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE)
- F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
+ if (f2fs_is_readonly(sbi) && ctx_test_opt(ctx, F2FS_MOUNT_FLUSH_MERGE)) {
+ f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode");
+ return -EINVAL;
+ }
if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
f2fs_err(sbi, "Allow to mount readonly mode only");
@@ -1307,12 +1553,203 @@ default_check:
return 0;
}
+static void f2fs_apply_quota_options(struct fs_context *fc,
+ struct super_block *sb)
+{
+#ifdef CONFIG_QUOTA
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ bool quota_feature = f2fs_sb_has_quota_ino(sbi);
+ char *qname;
+ int i;
+
+ if (quota_feature)
+ return;
+
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (!(ctx->qname_mask & (1 << i)))
+ continue;
+
+ qname = F2FS_CTX_INFO(ctx).s_qf_names[i];
+ if (qname) {
+ qname = kstrdup(F2FS_CTX_INFO(ctx).s_qf_names[i],
+ GFP_KERNEL | __GFP_NOFAIL);
+ set_opt(sbi, QUOTA);
+ }
+ F2FS_OPTION(sbi).s_qf_names[i] = qname;
+ }
+
+ if (ctx->spec_mask & F2FS_SPEC_jqfmt)
+ F2FS_OPTION(sbi).s_jquota_fmt = F2FS_CTX_INFO(ctx).s_jquota_fmt;
+
+ if (quota_feature && F2FS_OPTION(sbi).s_jquota_fmt) {
+ f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
+ F2FS_OPTION(sbi).s_jquota_fmt = 0;
+ }
+#endif
+}
+
+static void f2fs_apply_test_dummy_encryption(struct fs_context *fc,
+ struct super_block *sb)
+{
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy) ||
+ /* if already set, it was already verified to be the same */
+ fscrypt_is_dummy_policy_set(&F2FS_OPTION(sbi).dummy_enc_policy))
+ return;
+ swap(F2FS_OPTION(sbi).dummy_enc_policy, F2FS_CTX_INFO(ctx).dummy_enc_policy);
+ f2fs_warn(sbi, "Test dummy encryption mode enabled");
+}
+
+static void f2fs_apply_compression(struct fs_context *fc,
+ struct super_block *sb)
+{
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ unsigned char (*ctx_ext)[F2FS_EXTENSION_LEN];
+ unsigned char (*sbi_ext)[F2FS_EXTENSION_LEN];
+ int ctx_cnt, sbi_cnt, i;
+
+ if (ctx->spec_mask & F2FS_SPEC_compress_level)
+ F2FS_OPTION(sbi).compress_level =
+ F2FS_CTX_INFO(ctx).compress_level;
+ if (ctx->spec_mask & F2FS_SPEC_compress_algorithm)
+ F2FS_OPTION(sbi).compress_algorithm =
+ F2FS_CTX_INFO(ctx).compress_algorithm;
+ if (ctx->spec_mask & F2FS_SPEC_compress_log_size)
+ F2FS_OPTION(sbi).compress_log_size =
+ F2FS_CTX_INFO(ctx).compress_log_size;
+ if (ctx->spec_mask & F2FS_SPEC_compress_chksum)
+ F2FS_OPTION(sbi).compress_chksum =
+ F2FS_CTX_INFO(ctx).compress_chksum;
+ if (ctx->spec_mask & F2FS_SPEC_compress_mode)
+ F2FS_OPTION(sbi).compress_mode =
+ F2FS_CTX_INFO(ctx).compress_mode;
+ if (ctx->spec_mask & F2FS_SPEC_compress_extension) {
+ ctx_ext = F2FS_CTX_INFO(ctx).extensions;
+ ctx_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
+ sbi_ext = F2FS_OPTION(sbi).extensions;
+ sbi_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+ for (i = 0; i < ctx_cnt; i++) {
+ if (strlen(ctx_ext[i]) == 0)
+ continue;
+ strscpy(sbi_ext[sbi_cnt], ctx_ext[i]);
+ sbi_cnt++;
+ }
+ F2FS_OPTION(sbi).compress_ext_cnt = sbi_cnt;
+ }
+ if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) {
+ ctx_ext = F2FS_CTX_INFO(ctx).noextensions;
+ ctx_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
+ sbi_ext = F2FS_OPTION(sbi).noextensions;
+ sbi_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+ for (i = 0; i < ctx_cnt; i++) {
+ if (strlen(ctx_ext[i]) == 0)
+ continue;
+ strscpy(sbi_ext[sbi_cnt], ctx_ext[i]);
+ sbi_cnt++;
+ }
+ F2FS_OPTION(sbi).nocompress_ext_cnt = sbi_cnt;
+ }
+#endif
+}
+
+static void f2fs_apply_options(struct fs_context *fc, struct super_block *sb)
+{
+ struct f2fs_fs_context *ctx = fc->fs_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ F2FS_OPTION(sbi).opt &= ~ctx->opt_mask;
+ F2FS_OPTION(sbi).opt |= F2FS_CTX_INFO(ctx).opt;
+
+ if (ctx->spec_mask & F2FS_SPEC_background_gc)
+ F2FS_OPTION(sbi).bggc_mode = F2FS_CTX_INFO(ctx).bggc_mode;
+ if (ctx->spec_mask & F2FS_SPEC_inline_xattr_size)
+ F2FS_OPTION(sbi).inline_xattr_size =
+ F2FS_CTX_INFO(ctx).inline_xattr_size;
+ if (ctx->spec_mask & F2FS_SPEC_active_logs)
+ F2FS_OPTION(sbi).active_logs = F2FS_CTX_INFO(ctx).active_logs;
+ if (ctx->spec_mask & F2FS_SPEC_reserve_root)
+ F2FS_OPTION(sbi).root_reserved_blocks =
+ F2FS_CTX_INFO(ctx).root_reserved_blocks;
+ if (ctx->spec_mask & F2FS_SPEC_reserve_node)
+ F2FS_OPTION(sbi).root_reserved_nodes =
+ F2FS_CTX_INFO(ctx).root_reserved_nodes;
+ if (ctx->spec_mask & F2FS_SPEC_resgid)
+ F2FS_OPTION(sbi).s_resgid = F2FS_CTX_INFO(ctx).s_resgid;
+ if (ctx->spec_mask & F2FS_SPEC_resuid)
+ F2FS_OPTION(sbi).s_resuid = F2FS_CTX_INFO(ctx).s_resuid;
+ if (ctx->spec_mask & F2FS_SPEC_mode)
+ F2FS_OPTION(sbi).fs_mode = F2FS_CTX_INFO(ctx).fs_mode;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (ctx->spec_mask & F2FS_SPEC_fault_injection)
+ (void)f2fs_build_fault_attr(sbi,
+ F2FS_CTX_INFO(ctx).fault_info.inject_rate, 0, FAULT_RATE);
+ if (ctx->spec_mask & F2FS_SPEC_fault_type)
+ (void)f2fs_build_fault_attr(sbi, 0,
+ F2FS_CTX_INFO(ctx).fault_info.inject_type, FAULT_TYPE);
+#endif
+ if (ctx->spec_mask & F2FS_SPEC_alloc_mode)
+ F2FS_OPTION(sbi).alloc_mode = F2FS_CTX_INFO(ctx).alloc_mode;
+ if (ctx->spec_mask & F2FS_SPEC_fsync_mode)
+ F2FS_OPTION(sbi).fsync_mode = F2FS_CTX_INFO(ctx).fsync_mode;
+ if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap)
+ F2FS_OPTION(sbi).unusable_cap = F2FS_CTX_INFO(ctx).unusable_cap;
+ if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap_perc)
+ F2FS_OPTION(sbi).unusable_cap_perc =
+ F2FS_CTX_INFO(ctx).unusable_cap_perc;
+ if (ctx->spec_mask & F2FS_SPEC_discard_unit)
+ F2FS_OPTION(sbi).discard_unit = F2FS_CTX_INFO(ctx).discard_unit;
+ if (ctx->spec_mask & F2FS_SPEC_memory_mode)
+ F2FS_OPTION(sbi).memory_mode = F2FS_CTX_INFO(ctx).memory_mode;
+ if (ctx->spec_mask & F2FS_SPEC_errors)
+ F2FS_OPTION(sbi).errors = F2FS_CTX_INFO(ctx).errors;
+ if (ctx->spec_mask & F2FS_SPEC_lookup_mode)
+ F2FS_OPTION(sbi).lookup_mode = F2FS_CTX_INFO(ctx).lookup_mode;
+
+ f2fs_apply_compression(fc, sb);
+ f2fs_apply_test_dummy_encryption(fc, sb);
+ f2fs_apply_quota_options(fc, sb);
+}
+
+static int f2fs_sanity_check_options(struct f2fs_sb_info *sbi, bool remount)
+{
+ if (f2fs_sb_has_device_alias(sbi) &&
+ !test_opt(sbi, READ_EXTENT_CACHE)) {
+ f2fs_err(sbi, "device aliasing requires extent cache");
+ return -EINVAL;
+ }
+
+ if (!remount)
+ return 0;
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_has_blkzoned(sbi) &&
+ sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
+ f2fs_err(sbi,
+ "zoned: max open zones %u is too small, need at least %u open zones",
+ sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
+ return -EINVAL;
+ }
+#endif
+ if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) {
+ f2fs_warn(sbi, "LFS is not compatible with IPU");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static struct inode *f2fs_alloc_inode(struct super_block *sb)
{
struct f2fs_inode_info *fi;
- fi = f2fs_kmem_cache_alloc(f2fs_inode_cachep,
- GFP_F2FS_ZERO, false, F2FS_SB(sb));
+ if (time_to_inject(F2FS_SB(sb), FAULT_SLAB_ALLOC))
+ return NULL;
+
+ fi = alloc_inode_sb(sb, f2fs_inode_cachep, GFP_F2FS_ZERO);
if (!fi)
return NULL;
@@ -1321,16 +1758,16 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
/* Initialize f2fs-specific inode info */
atomic_set(&fi->dirty_pages, 0);
atomic_set(&fi->i_compr_blocks, 0);
- init_rwsem(&fi->i_sem);
+ atomic_set(&fi->open_count, 0);
+ atomic_set(&fi->writeback, 0);
+ init_f2fs_rwsem(&fi->i_sem);
spin_lock_init(&fi->i_size_lock);
INIT_LIST_HEAD(&fi->dirty_list);
INIT_LIST_HEAD(&fi->gdirty_list);
- INIT_LIST_HEAD(&fi->inmem_ilist);
- INIT_LIST_HEAD(&fi->inmem_pages);
- mutex_init(&fi->inmem_lock);
- init_rwsem(&fi->i_gc_rwsem[READ]);
- init_rwsem(&fi->i_gc_rwsem[WRITE]);
- init_rwsem(&fi->i_xattr_sem);
+ INIT_LIST_HEAD(&fi->gdonate_list);
+ init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
+ init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
+ init_f2fs_rwsem(&fi->i_xattr_sem);
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
@@ -1362,16 +1799,12 @@ static int f2fs_drop_inode(struct inode *inode)
* - f2fs_gc -> iput -> evict
* - inode_wait_for_writeback(inode)
*/
- if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
+ if ((!inode_unhashed(inode) && inode_state_read(inode) & I_SYNC)) {
if (!inode->i_nlink && !is_bad_inode(inode)) {
/* to avoid evict_inode call simultaneously */
- atomic_inc(&inode->i_count);
+ __iget(inode);
spin_unlock(&inode->i_lock);
- /* some remained atomic pages should discarded */
- if (f2fs_is_atomic_file(inode))
- f2fs_drop_inmem_pages(inode);
-
/* should remain fi->extent_tree for writepage */
f2fs_destroy_extent_node(inode);
@@ -1393,7 +1826,7 @@ static int f2fs_drop_inode(struct inode *inode)
trace_f2fs_drop_inode(inode, 0);
return 0;
}
- ret = generic_drop_inode(inode);
+ ret = inode_generic_drop(inode);
if (!ret)
ret = fscrypt_drop_inode(inode);
trace_f2fs_drop_inode(inode, ret);
@@ -1418,6 +1851,12 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync)
inc_page_count(sbi, F2FS_DIRTY_IMETA);
}
spin_unlock(&sbi->inode_lock[DIRTY_META]);
+
+ /* if atomic write is not committed, set inode w/ atomic dirty */
+ if (!ret && f2fs_is_atomic_file(inode) &&
+ !is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
+ set_inode_flag(inode, FI_ATOMIC_DIRTIED);
+
return ret;
}
@@ -1467,8 +1906,9 @@ static void f2fs_free_inode(struct inode *inode)
static void destroy_percpu_info(struct f2fs_sb_info *sbi)
{
- percpu_counter_destroy(&sbi->alloc_valid_block_count);
percpu_counter_destroy(&sbi->total_valid_inode_count);
+ percpu_counter_destroy(&sbi->rf_node_block_count);
+ percpu_counter_destroy(&sbi->alloc_valid_block_count);
}
static void destroy_device_list(struct f2fs_sb_info *sbi)
@@ -1476,10 +1916,10 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
int i;
for (i = 0; i < sbi->s_ndevs; i++) {
- blkdev_put(FDEV(i).bdev, FMODE_EXCL);
+ if (i > 0)
+ bdev_fput(FDEV(i).bdev_file);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
- kfree(FDEV(i).zone_capacity_blocks);
#endif
}
kvfree(sbi->devs);
@@ -1489,7 +1929,8 @@ static void f2fs_put_super(struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int i;
- bool dropped;
+ int err = 0;
+ bool done;
/* unregister procfs/sysfs entries in advance to avoid race case */
f2fs_unregister_sysfs(sbi);
@@ -1515,18 +1956,18 @@ static void f2fs_put_super(struct super_block *sb)
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
- f2fs_write_checkpoint(sbi, &cpc);
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ err = f2fs_write_checkpoint(sbi, &cpc);
}
/* be sure to wait for any on-going discard commands */
- dropped = f2fs_issue_discard_timeout(sbi);
-
- if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
- !sbi->discard_blks && !dropped) {
+ done = f2fs_issue_discard_timeout(sbi);
+ if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
struct cp_control cpc = {
.reason = CP_UMOUNT | CP_TRIMMED,
};
- f2fs_write_checkpoint(sbi, &cpc);
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ err = f2fs_write_checkpoint(sbi, &cpc);
}
/*
@@ -1543,6 +1984,11 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
+ if (err || f2fs_cp_error(sbi)) {
+ truncate_inode_pages_final(NODE_MAPPING(sbi));
+ truncate_inode_pages_final(META_MAPPING(sbi));
+ }
+
f2fs_bug_on(sbi, sbi->fsync_node_num);
f2fs_destroy_compress_inode(sbi);
@@ -1553,6 +1999,15 @@ static void f2fs_put_super(struct super_block *sb)
iput(sbi->meta_inode);
sbi->meta_inode = NULL;
+ /* Should check the page counts after dropping all node/meta pages */
+ for (i = 0; i < NR_COUNT_TYPE; i++) {
+ if (!get_pages(sbi, i))
+ continue;
+ f2fs_err(sbi, "detect filesystem reference count leak during "
+ "umount, type: %d, count: %lld", i, get_pages(sbi, i));
+ f2fs_bug_on(sbi, 1);
+ }
+
/*
* iput() can update stat information, if f2fs_write_checkpoint()
* above failed with error.
@@ -1563,19 +2018,16 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_destroy_node_manager(sbi);
f2fs_destroy_segment_manager(sbi);
+ /* flush s_error_work before sbi destroy */
+ flush_work(&sbi->s_error_work);
+
f2fs_destroy_post_read_wq(sbi);
kvfree(sbi->ckpt);
- sb->s_fs_info = NULL;
- if (sbi->s_chksum_driver)
- crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi->raw_super);
- destroy_device_list(sbi);
f2fs_destroy_page_array_cache(sbi);
- f2fs_destroy_xattr_caches(sbi);
- mempool_destroy(sbi->write_io_dummy);
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
kfree(F2FS_OPTION(sbi).s_qf_names[i]);
@@ -1584,11 +2036,10 @@ static void f2fs_put_super(struct super_block *sb)
destroy_percpu_info(sbi);
f2fs_destroy_iostat(sbi);
for (i = 0; i < NR_PAGE_TYPE; i++)
- kvfree(sbi->write_io[i]);
-#ifdef CONFIG_UNICODE
+ kfree(sbi->write_io[i]);
+#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
- kfree(sbi);
}
int f2fs_sync_fs(struct super_block *sb, int sync)
@@ -1606,33 +2057,56 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return -EAGAIN;
- if (sync)
+ if (sync) {
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_issue_checkpoint(sbi);
+ }
return err;
}
static int f2fs_freeze(struct super_block *sb)
{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
if (f2fs_readonly(sb))
return 0;
/* IO error happened before */
- if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
+ if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
/* must be clean, since sync_filesystem() was already called */
- if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
+ if (is_sbi_flag_set(sbi, SBI_IS_DIRTY))
return -EINVAL;
- /* ensure no checkpoint required */
- if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list))
- return -EINVAL;
+ sbi->umount_lock_holder = current;
+
+ /* Let's flush checkpoints and stop the thread. */
+ f2fs_flush_ckpt_thread(sbi);
+
+ sbi->umount_lock_holder = NULL;
+
+ /* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
+ set_sbi_flag(sbi, SBI_IS_FREEZING);
return 0;
}
static int f2fs_unfreeze(struct super_block *sb)
{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ /*
+ * It will update discard_max_bytes of mounted lvm device to zero
+ * after creating snapshot on this lvm device, let's drop all
+ * remained discards.
+ * We don't need to disable real-time discard because discard_max_bytes
+ * will recover after removal of snapshot.
+ */
+ if (test_opt(sbi, DISCARD) && !f2fs_hw_support_discard(sbi))
+ f2fs_issue_discard_timeout(sbi);
+
+ clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
return 0;
}
@@ -1653,26 +2127,32 @@ static int f2fs_statfs_project(struct super_block *sb,
limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
dquot->dq_dqb.dqb_bhardlimit);
- if (limit)
- limit >>= sb->s_blocksize_bits;
+ limit >>= sb->s_blocksize_bits;
+
+ if (limit) {
+ uint64_t remaining = 0;
- if (limit && buf->f_blocks > limit) {
curblock = (dquot->dq_dqb.dqb_curspace +
dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
- buf->f_blocks = limit;
- buf->f_bfree = buf->f_bavail =
- (buf->f_blocks > curblock) ?
- (buf->f_blocks - curblock) : 0;
+ if (limit > curblock)
+ remaining = limit - curblock;
+
+ buf->f_blocks = min(buf->f_blocks, limit);
+ buf->f_bfree = min(buf->f_bfree, remaining);
+ buf->f_bavail = min(buf->f_bavail, remaining);
}
limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
dquot->dq_dqb.dqb_ihardlimit);
- if (limit && buf->f_files > limit) {
- buf->f_files = limit;
- buf->f_ffree =
- (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
- (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
+ if (limit) {
+ uint64_t remaining = 0;
+
+ if (limit > dquot->dq_dqb.dqb_curinodes)
+ remaining = limit - dquot->dq_dqb.dqb_curinodes;
+
+ buf->f_files = min(buf->f_files, limit);
+ buf->f_ffree = min(buf->f_ffree, remaining);
}
spin_unlock(&dquot->dq_dqb_lock);
@@ -1688,18 +2168,24 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
block_t total_count, user_block_count, start_count;
u64 avail_node_count;
+ unsigned int total_valid_node_count;
total_count = le64_to_cpu(sbi->raw_super->block_count);
- user_block_count = sbi->user_block_count;
start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
buf->f_type = F2FS_SUPER_MAGIC;
buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count;
+
+ spin_lock(&sbi->stat_lock);
+ if (sbi->carve_out)
+ buf->f_blocks -= sbi->current_reserved_blocks;
+ user_block_count = sbi->user_block_count;
+ total_valid_node_count = valid_node_count(sbi);
+ avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
sbi->current_reserved_blocks;
- spin_lock(&sbi->stat_lock);
if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
buf->f_bfree = 0;
else
@@ -1712,14 +2198,12 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
else
buf->f_bavail = 0;
- avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
-
if (avail_node_count > user_block_count) {
buf->f_files = user_block_count;
buf->f_ffree = buf->f_bavail;
} else {
buf->f_files = avail_node_count;
- buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
+ buf->f_ffree = min(avail_node_count - total_valid_node_count,
buf->f_bavail);
}
@@ -1727,9 +2211,9 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_fsid = u64_to_fsid(id);
#ifdef CONFIG_QUOTA
- if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
+ if (is_inode_flag_set(d_inode(dentry), FI_PROJ_INHERIT) &&
sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
- f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
+ f2fs_statfs_project(sb, F2FS_I(d_inode(dentry))->i_projid, buf);
}
#endif
return 0;
@@ -1841,19 +2325,24 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
if (test_opt(sbi, GC_MERGE))
seq_puts(seq, ",gc_merge");
+ else
+ seq_puts(seq, ",nogc_merge");
if (test_opt(sbi, DISABLE_ROLL_FORWARD))
seq_puts(seq, ",disable_roll_forward");
if (test_opt(sbi, NORECOVERY))
seq_puts(seq, ",norecovery");
- if (test_opt(sbi, DISCARD))
+ if (test_opt(sbi, DISCARD)) {
seq_puts(seq, ",discard");
- else
+ if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
+ seq_printf(seq, ",discard_unit=%s", "block");
+ else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
+ seq_printf(seq, ",discard_unit=%s", "segment");
+ else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
+ seq_printf(seq, ",discard_unit=%s", "section");
+ } else {
seq_puts(seq, ",nodiscard");
- if (test_opt(sbi, NOHEAP))
- seq_puts(seq, ",no_heap");
- else
- seq_puts(seq, ",heap");
+ }
#ifdef CONFIG_F2FS_FS_XATTR
if (test_opt(sbi, XATTR_USER))
seq_puts(seq, ",user_xattr");
@@ -1883,16 +2372,22 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",inline_dentry");
else
seq_puts(seq, ",noinline_dentry");
- if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
+ if (test_opt(sbi, FLUSH_MERGE))
seq_puts(seq, ",flush_merge");
+ else
+ seq_puts(seq, ",noflush_merge");
if (test_opt(sbi, NOBARRIER))
seq_puts(seq, ",nobarrier");
+ else
+ seq_puts(seq, ",barrier");
if (test_opt(sbi, FASTBOOT))
seq_puts(seq, ",fastboot");
- if (test_opt(sbi, EXTENT_CACHE))
+ if (test_opt(sbi, READ_EXTENT_CACHE))
seq_puts(seq, ",extent_cache");
else
seq_puts(seq, ",noextent_cache");
+ if (test_opt(sbi, AGE_EXTENT_CACHE))
+ seq_puts(seq, ",age_extent_cache");
if (test_opt(sbi, DATA_FLUSH))
seq_puts(seq, ",data_flush");
@@ -1906,16 +2401,15 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
seq_puts(seq, "fragment:block");
seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
- if (test_opt(sbi, RESERVE_ROOT))
- seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
+ if (test_opt(sbi, RESERVE_ROOT) || test_opt(sbi, RESERVE_NODE))
+ seq_printf(seq, ",reserve_root=%u,reserve_node=%u,resuid=%u,"
+ "resgid=%u",
F2FS_OPTION(sbi).root_reserved_blocks,
+ F2FS_OPTION(sbi).root_reserved_nodes,
from_kuid_munged(&init_user_ns,
F2FS_OPTION(sbi).s_resuid),
from_kgid_munged(&init_user_ns,
F2FS_OPTION(sbi).s_resgid));
- if (F2FS_IO_SIZE_BITS(sbi))
- seq_printf(seq, ",io_bits=%u",
- F2FS_OPTION(sbi).write_io_size_bits);
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (test_opt(sbi, FAULT_INJECTION)) {
seq_printf(seq, ",fault_injection=%u",
@@ -1935,10 +2429,6 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",prjquota");
#endif
f2fs_show_quota_options(seq, sbi->sb);
- if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
- seq_printf(seq, ",whint_mode=%s", "user-based");
- else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
- seq_printf(seq, ",whint_mode=%s", "fs-based");
fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
@@ -1971,57 +2461,83 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
if (test_opt(sbi, ATGC))
seq_puts(seq, ",atgc");
- if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
- seq_printf(seq, ",discard_unit=%s", "block");
- else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
- seq_printf(seq, ",discard_unit=%s", "segment");
- else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
- seq_printf(seq, ",discard_unit=%s", "section");
+ if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL)
+ seq_printf(seq, ",memory=%s", "normal");
+ else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
+ seq_printf(seq, ",memory=%s", "low");
+
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
+ seq_printf(seq, ",errors=%s", "remount-ro");
+ else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE)
+ seq_printf(seq, ",errors=%s", "continue");
+ else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC)
+ seq_printf(seq, ",errors=%s", "panic");
+
+ if (test_opt(sbi, NAT_BITS))
+ seq_puts(seq, ",nat_bits");
+
+ if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_PERF)
+ seq_show_option(seq, "lookup_mode", "perf");
+ else if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_COMPAT)
+ seq_show_option(seq, "lookup_mode", "compat");
+ else if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_AUTO)
+ seq_show_option(seq, "lookup_mode", "auto");
return 0;
}
-static void default_options(struct f2fs_sb_info *sbi)
+static void default_options(struct f2fs_sb_info *sbi, bool remount)
{
/* init some FS parameters */
+ if (!remount) {
+ set_opt(sbi, READ_EXTENT_CACHE);
+ clear_opt(sbi, DISABLE_CHECKPOINT);
+
+ if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
+ set_opt(sbi, DISCARD);
+
+ if (f2fs_sb_has_blkzoned(sbi))
+ F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
+ else
+ F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
+ }
+
if (f2fs_sb_has_readonly(sbi))
F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
else
F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
- F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
- F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
+ if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <=
+ SMALL_VOLUME_SEGMENTS)
+ F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
+ else
+ F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
- F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
- F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
- F2FS_OPTION(sbi).compress_ext_cnt = 0;
- F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
+ if (f2fs_sb_has_compression(sbi)) {
+ F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
+ F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
+ F2FS_OPTION(sbi).compress_ext_cnt = 0;
+ F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
+ }
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
-
- sbi->sb->s_flags &= ~SB_INLINECRYPT;
+ F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
+ F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE;
set_opt(sbi, INLINE_XATTR);
set_opt(sbi, INLINE_DATA);
set_opt(sbi, INLINE_DENTRY);
- set_opt(sbi, EXTENT_CACHE);
- set_opt(sbi, NOHEAP);
- clear_opt(sbi, DISABLE_CHECKPOINT);
set_opt(sbi, MERGE_CHECKPOINT);
+ set_opt(sbi, LAZYTIME);
F2FS_OPTION(sbi).unusable_cap = 0;
- sbi->sb->s_flags |= SB_LAZYTIME;
- set_opt(sbi, FLUSH_MERGE);
- if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
- set_opt(sbi, DISCARD);
- if (f2fs_sb_has_blkzoned(sbi)) {
+ if (!f2fs_is_readonly(sbi))
+ set_opt(sbi, FLUSH_MERGE);
+ if (f2fs_sb_has_blkzoned(sbi))
F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
- F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
- } else {
+ else
F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
- F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
- }
#ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER);
@@ -2030,7 +2546,9 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, POSIX_ACL);
#endif
- f2fs_build_fault_attr(sbi, 0, 0);
+ f2fs_build_fault_attr(sbi, 0, 0, FAULT_ALL);
+
+ F2FS_OPTION(sbi).lookup_mode = LOOKUP_PERF;
}
#ifdef CONFIG_QUOTA
@@ -2041,6 +2559,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
{
unsigned int s_flags = sbi->sb->s_flags;
struct cp_control cpc;
+ unsigned int gc_mode = sbi->gc_mode;
int err = 0;
int ret;
block_t unusable;
@@ -2051,11 +2570,27 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
}
sbi->sb->s_flags |= SB_ACTIVE;
+ /* check if we need more GC first */
+ unusable = f2fs_get_unusable_blocks(sbi);
+ if (!f2fs_disable_cp_again(sbi, unusable))
+ goto skip_gc;
+
f2fs_update_time(sbi, DISABLE_TIME);
+ sbi->gc_mode = GC_URGENT_HIGH;
+
while (!f2fs_time_over(sbi, DISABLE_TIME)) {
- down_write(&sbi->gc_lock);
- err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
+ struct f2fs_gc_control gc_control = {
+ .victim_segno = NULL_SEGNO,
+ .init_gc_type = FG_GC,
+ .should_migrate_blocks = false,
+ .err_gc_skipped = true,
+ .no_bg_gc = true,
+ .nr_free_secs = 1 };
+
+ f2fs_down_write(&sbi->gc_lock);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
+ err = f2fs_gc(sbi, &gc_control);
if (err == -ENODATA) {
err = 0;
break;
@@ -2076,9 +2611,11 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
goto restore_flag;
}
- down_write(&sbi->gc_lock);
+skip_gc:
+ f2fs_down_write(&sbi->gc_lock);
cpc.reason = CP_PAUSE;
set_sbi_flag(sbi, SBI_CP_DISABLED);
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
if (err)
goto out_unlock;
@@ -2088,54 +2625,107 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
spin_unlock(&sbi->stat_lock);
out_unlock:
- up_write(&sbi->gc_lock);
+ f2fs_up_write(&sbi->gc_lock);
restore_flag:
+ sbi->gc_mode = gc_mode;
sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
+ f2fs_info(sbi, "f2fs_disable_checkpoint() finish, err:%d", err);
return err;
}
-static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
+static int f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
{
- int retry = DEFAULT_RETRY_IO_COUNT;
+ unsigned int nr_pages = get_pages(sbi, F2FS_DIRTY_DATA) / 16;
+ long long start, writeback, lock, sync_inode, end;
+ int ret;
+
+ f2fs_info(sbi, "%s start, meta: %lld, node: %lld, data: %lld",
+ __func__,
+ get_pages(sbi, F2FS_DIRTY_META),
+ get_pages(sbi, F2FS_DIRTY_NODES),
+ get_pages(sbi, F2FS_DIRTY_DATA));
+
+ f2fs_update_time(sbi, ENABLE_TIME);
+
+ start = ktime_get();
/* we should flush all the data to keep data consistency */
- do {
+ while (get_pages(sbi, F2FS_DIRTY_DATA)) {
+ writeback_inodes_sb_nr(sbi->sb, nr_pages, WB_REASON_SYNC);
+ f2fs_io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
+
+ if (f2fs_time_over(sbi, ENABLE_TIME))
+ break;
+ }
+ writeback = ktime_get();
+
+ f2fs_down_write(&sbi->cp_enable_rwsem);
+
+ lock = ktime_get();
+
+ if (get_pages(sbi, F2FS_DIRTY_DATA))
sync_inodes_sb(sbi->sb);
- cond_resched();
- congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
- } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
- if (unlikely(retry < 0))
- f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
+ if (unlikely(get_pages(sbi, F2FS_DIRTY_DATA)))
+ f2fs_warn(sbi, "%s: has some unwritten data: %lld",
+ __func__, get_pages(sbi, F2FS_DIRTY_DATA));
- down_write(&sbi->gc_lock);
+ sync_inode = ktime_get();
+
+ f2fs_down_write(&sbi->gc_lock);
f2fs_dirty_to_prefree(sbi);
clear_sbi_flag(sbi, SBI_CP_DISABLED);
set_sbi_flag(sbi, SBI_IS_DIRTY);
- up_write(&sbi->gc_lock);
+ f2fs_up_write(&sbi->gc_lock);
+
+ f2fs_info(sbi, "%s sync_fs, meta: %lld, imeta: %lld, node: %lld, dents: %lld, qdata: %lld",
+ __func__,
+ get_pages(sbi, F2FS_DIRTY_META),
+ get_pages(sbi, F2FS_DIRTY_IMETA),
+ get_pages(sbi, F2FS_DIRTY_NODES),
+ get_pages(sbi, F2FS_DIRTY_DENTS),
+ get_pages(sbi, F2FS_DIRTY_QDATA));
+ ret = f2fs_sync_fs(sbi->sb, 1);
+ if (ret)
+ f2fs_err(sbi, "%s sync_fs failed, ret: %d", __func__, ret);
- f2fs_sync_fs(sbi->sb, 1);
+ /* Let's ensure there's no pending checkpoint anymore */
+ f2fs_flush_ckpt_thread(sbi);
+
+ f2fs_up_write(&sbi->cp_enable_rwsem);
+
+ end = ktime_get();
+
+ f2fs_info(sbi, "%s end, writeback:%llu, "
+ "lock:%llu, sync_inode:%llu, sync_fs:%llu",
+ __func__,
+ ktime_ms_delta(writeback, start),
+ ktime_ms_delta(lock, writeback),
+ ktime_ms_delta(sync_inode, lock),
+ ktime_ms_delta(end, sync_inode));
+ return ret;
}
-static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+static int __f2fs_remount(struct fs_context *fc, struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct f2fs_mount_info org_mount_opt;
unsigned long old_sb_flags;
+ unsigned int flags = fc->sb_flags;
int err;
bool need_restart_gc = false, need_stop_gc = false;
- bool need_restart_ckpt = false, need_stop_ckpt = false;
bool need_restart_flush = false, need_stop_flush = false;
bool need_restart_discard = false, need_stop_discard = false;
- bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
+ bool need_enable_checkpoint = false, need_disable_checkpoint = false;
+ bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
+ bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
- bool no_io_align = !F2FS_IO_ALIGNED(sbi);
bool no_atgc = !test_opt(sbi, ATGC);
bool no_discard = !test_opt(sbi, DISCARD);
bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
bool block_unit_discard = f2fs_block_unit_discard(sbi);
- struct discard_cmd_control *dcc;
+ bool no_nat_bits = !test_opt(sbi, NAT_BITS);
#ifdef CONFIG_QUOTA
int i, j;
#endif
@@ -2147,6 +2737,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
org_mount_opt = sbi->mount_opt;
old_sb_flags = sb->s_flags;
+ sbi->umount_lock_holder = current;
+
#ifdef CONFIG_QUOTA
org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++) {
@@ -2166,7 +2758,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
#endif
/* recover superblocks we couldn't write due to previous RO mount */
- if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
+ if (!(flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
err = f2fs_commit_super(sbi, false);
f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
err);
@@ -2174,31 +2766,39 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
}
- default_options(sbi);
+ default_options(sbi, true);
+
+ err = f2fs_check_opt_consistency(fc, sb);
+ if (err)
+ goto restore_opts;
+
+ f2fs_apply_options(fc, sb);
- /* parse mount options */
- err = parse_options(sb, data, true);
+ err = f2fs_sanity_check_options(sbi, true);
if (err)
goto restore_opts;
+ /* flush outstanding errors before changing fs state */
+ flush_work(&sbi->s_error_work);
+
/*
* Previous and new state of filesystem is RO,
* so skip checking GC and FLUSH_MERGE conditions.
*/
- if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
+ if (f2fs_readonly(sb) && (flags & SB_RDONLY))
goto skip;
- if (f2fs_sb_has_readonly(sbi) && !(*flags & SB_RDONLY)) {
+ if (f2fs_dev_is_readonly(sbi) && !(flags & SB_RDONLY)) {
err = -EROFS;
goto restore_opts;
}
#ifdef CONFIG_QUOTA
- if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
+ if (!f2fs_readonly(sb) && (flags & SB_RDONLY)) {
err = dquot_suspend(sb, -1);
if (err < 0)
goto restore_opts;
- } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
+ } else if (f2fs_readonly(sb) && !(flags & SB_RDONLY)) {
/* dquot_resume needs RW */
sb->s_flags &= ~SB_RDONLY;
if (sb_any_quota_suspended(sb)) {
@@ -2218,15 +2818,15 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
}
/* disallow enable/disable extent_cache dynamically */
- if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
+ if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) {
err = -EINVAL;
f2fs_warn(sbi, "switch extent_cache option is not allowed");
goto restore_opts;
}
-
- if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
+ /* disallow enable/disable age extent_cache dynamically */
+ if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) {
err = -EINVAL;
- f2fs_warn(sbi, "switch io_bits option is not allowed");
+ f2fs_warn(sbi, "switch age_extent_cache option is not allowed");
goto restore_opts;
}
@@ -2242,7 +2842,13 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
- if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
+ if (no_nat_bits == !!test_opt(sbi, NAT_BITS)) {
+ err = -EINVAL;
+ f2fs_warn(sbi, "switch nat_bits option is not allowed");
+ goto restore_opts;
+ }
+
+ if ((flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
err = -EINVAL;
f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
goto restore_opts;
@@ -2253,7 +2859,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
* or if background_gc = off is passed in mount
* option. Also sync the filesystem.
*/
- if ((*flags & SB_RDONLY) ||
+ if ((flags & SB_RDONLY) ||
(F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
!test_opt(sbi, GC_MERGE))) {
if (sbi->gc_thread) {
@@ -2267,8 +2873,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
need_stop_gc = true;
}
- if (*flags & SB_RDONLY ||
- F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
+ if (flags & SB_RDONLY) {
sync_inodes_sb(sb);
set_sbi_flag(sbi, SBI_IS_DIRTY);
@@ -2277,33 +2882,18 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
clear_sbi_flag(sbi, SBI_IS_CLOSE);
}
- if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
- !test_opt(sbi, MERGE_CHECKPOINT)) {
- f2fs_stop_ckpt_thread(sbi);
- need_restart_ckpt = true;
- } else {
- err = f2fs_start_ckpt_thread(sbi);
- if (err) {
- f2fs_err(sbi,
- "Failed to start F2FS issue_checkpoint_thread (%d)",
- err);
- goto restore_gc;
- }
- need_stop_ckpt = true;
- }
-
/*
* We stop issue flush thread if FS is mounted as RO
* or if flush_merge is not passed in mount option.
*/
- if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
+ if ((flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
clear_opt(sbi, FLUSH_MERGE);
f2fs_destroy_flush_cmd_control(sbi, false);
need_restart_flush = true;
} else {
err = f2fs_create_flush_cmd_control(sbi);
if (err)
- goto restore_ckpt;
+ goto restore_gc;
need_stop_flush = true;
}
@@ -2314,21 +2904,45 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
goto restore_flush;
need_stop_discard = true;
} else {
- dcc = SM_I(sbi)->dcc_info;
f2fs_stop_discard_thread(sbi);
- if (atomic_read(&dcc->discard_cmd_cnt))
- f2fs_issue_discard_timeout(sbi);
+ f2fs_issue_discard_timeout(sbi);
need_restart_discard = true;
}
}
+ adjust_unusable_cap_perc(sbi);
if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
if (test_opt(sbi, DISABLE_CHECKPOINT)) {
err = f2fs_disable_checkpoint(sbi);
if (err)
goto restore_discard;
+ need_enable_checkpoint = true;
} else {
- f2fs_enable_checkpoint(sbi);
+ err = f2fs_enable_checkpoint(sbi);
+ if (err)
+ goto restore_discard;
+ need_disable_checkpoint = true;
+ }
+ }
+
+ /*
+ * Place this routine at the end, since a new checkpoint would be
+ * triggered while remount and we need to take care of it before
+ * returning from remount.
+ */
+ if ((flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
+ !test_opt(sbi, MERGE_CHECKPOINT)) {
+ f2fs_stop_ckpt_thread(sbi);
+ } else {
+ /* Flush if the previous checkpoint, if exists. */
+ f2fs_flush_ckpt_thread(sbi);
+
+ err = f2fs_start_ckpt_thread(sbi);
+ if (err) {
+ f2fs_err(sbi,
+ "Failed to start F2FS issue_checkpoint_thread (%d)",
+ err);
+ goto restore_checkpoint;
}
}
@@ -2343,9 +2957,18 @@ skip:
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
limit_reserve_root(sbi);
- adjust_unusable_cap_perc(sbi);
- *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
+ fc->sb_flags = (flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
+
+ sbi->umount_lock_holder = NULL;
return 0;
+restore_checkpoint:
+ if (need_enable_checkpoint) {
+ if (f2fs_enable_checkpoint(sbi))
+ f2fs_warn(sbi, "checkpoint has not been enabled");
+ } else if (need_disable_checkpoint) {
+ if (f2fs_disable_checkpoint(sbi))
+ f2fs_warn(sbi, "checkpoint has not been disabled");
+ }
restore_discard:
if (need_restart_discard) {
if (f2fs_start_discard_thread(sbi))
@@ -2361,13 +2984,6 @@ restore_flush:
clear_opt(sbi, FLUSH_MERGE);
f2fs_destroy_flush_cmd_control(sbi, false);
}
-restore_ckpt:
- if (need_restart_ckpt) {
- if (f2fs_start_ckpt_thread(sbi))
- f2fs_warn(sbi, "background ckpt thread has stopped");
- } else if (need_stop_ckpt) {
- f2fs_stop_ckpt_thread(sbi);
- }
restore_gc:
if (need_restart_gc) {
if (f2fs_start_gc_thread(sbi))
@@ -2385,23 +3001,74 @@ restore_opts:
#endif
sbi->mount_opt = org_mount_opt;
sb->s_flags = old_sb_flags;
+
+ sbi->umount_lock_holder = NULL;
return err;
}
+static void f2fs_shutdown(struct super_block *sb)
+{
+ f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false, false);
+}
+
#ifdef CONFIG_QUOTA
+static bool f2fs_need_recovery(struct f2fs_sb_info *sbi)
+{
+ /* need to recovery orphan */
+ if (is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
+ return true;
+ /* need to recovery data */
+ if (test_opt(sbi, DISABLE_ROLL_FORWARD))
+ return false;
+ if (test_opt(sbi, NORECOVERY))
+ return false;
+ return !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG);
+}
+
+static bool f2fs_recover_quota_begin(struct f2fs_sb_info *sbi)
+{
+ bool readonly = f2fs_readonly(sbi->sb);
+
+ if (!f2fs_need_recovery(sbi))
+ return false;
+
+ /* it doesn't need to check f2fs_sb_has_readonly() */
+ if (f2fs_hw_is_readonly(sbi))
+ return false;
+
+ if (readonly) {
+ sbi->sb->s_flags &= ~SB_RDONLY;
+ set_sbi_flag(sbi, SBI_IS_WRITABLE);
+ }
+
+ /*
+ * Turn on quotas which were not enabled for read-only mounts if
+ * filesystem has quota feature, so that they are updated correctly.
+ */
+ return f2fs_enable_quota_files(sbi, readonly);
+}
+
+static void f2fs_recover_quota_end(struct f2fs_sb_info *sbi,
+ bool quota_enabled)
+{
+ if (quota_enabled)
+ f2fs_quota_off_umount(sbi->sb);
+
+ if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) {
+ clear_sbi_flag(sbi, SBI_IS_WRITABLE);
+ sbi->sb->s_flags |= SB_RDONLY;
+ }
+}
+
/* Read data from quotafile */
static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
struct inode *inode = sb_dqopt(sb)->files[type];
struct address_space *mapping = inode->i_mapping;
- block_t blkidx = F2FS_BYTES_TO_BLK(off);
- int offset = off & (sb->s_blocksize - 1);
int tocopy;
size_t toread;
loff_t i_size = i_size_read(inode);
- struct page *page;
- char *kaddr;
if (off > i_size)
return 0;
@@ -2410,40 +3077,42 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
len = i_size - off;
toread = len;
while (toread > 0) {
- tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
+ struct folio *folio;
+ size_t offset;
+
repeat:
- page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
- if (IS_ERR(page)) {
- if (PTR_ERR(page) == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
+ folio = mapping_read_folio_gfp(mapping, off >> PAGE_SHIFT,
+ GFP_NOFS);
+ if (IS_ERR(folio)) {
+ if (PTR_ERR(folio) == -ENOMEM) {
+ memalloc_retry_wait(GFP_NOFS);
goto repeat;
}
set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
- return PTR_ERR(page);
+ return PTR_ERR(folio);
}
+ offset = offset_in_folio(folio, off);
+ tocopy = min(folio_size(folio) - offset, toread);
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != mapping)) {
- f2fs_put_page(page, 1);
+ if (unlikely(folio->mapping != mapping)) {
+ f2fs_folio_put(folio, true);
goto repeat;
}
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
- set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
- return -EIO;
- }
- kaddr = kmap_atomic(page);
- memcpy(data, kaddr + offset, tocopy);
- kunmap_atomic(kaddr);
- f2fs_put_page(page, 1);
+ /*
+ * should never happen, just leave f2fs_bug_on() here to catch
+ * any potential bug.
+ */
+ f2fs_bug_on(F2FS_SB(sb), !folio_test_uptodate(folio));
+
+ memcpy_from_folio(data, folio, offset, tocopy);
+ f2fs_folio_put(folio, true);
- offset = 0;
toread -= tocopy;
data += tocopy;
- blkidx++;
+ off += tocopy;
}
return len;
}
@@ -2457,9 +3126,8 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
const struct address_space_operations *a_ops = mapping->a_ops;
int offset = off & (sb->s_blocksize - 1);
size_t towrite = len;
- struct page *page;
+ struct folio *folio;
void *fsdata = NULL;
- char *kaddr;
int err = 0;
int tocopy;
@@ -2467,25 +3135,21 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
tocopy = min_t(unsigned long, sb->s_blocksize - offset,
towrite);
retry:
- err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
- &page, &fsdata);
+ err = a_ops->write_begin(NULL, mapping, off, tocopy,
+ &folio, &fsdata);
if (unlikely(err)) {
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto retry;
}
set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
break;
}
- kaddr = kmap_atomic(page);
- memcpy(kaddr + offset, data, tocopy);
- kunmap_atomic(kaddr);
- flush_dcache_page(page);
+ memcpy_to_folio(folio, offset_in_folio(folio, off), data, tocopy);
a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
- page, fsdata);
+ folio, fsdata);
offset = 0;
towrite -= tocopy;
off += tocopy;
@@ -2495,22 +3159,20 @@ retry:
if (len == towrite)
return err;
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
f2fs_mark_inode_dirty_sync(inode, false);
return len - towrite;
}
int f2fs_dquot_initialize(struct inode *inode)
{
- if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT)) {
- f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_DQUOT_INIT);
+ if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT))
return -ESRCH;
- }
return dquot_initialize(inode);
}
-static struct dquot **f2fs_get_dquots(struct inode *inode)
+static struct dquot __rcu **f2fs_get_dquots(struct inode *inode)
{
return F2FS_I(inode)->i_dquot;
}
@@ -2564,6 +3226,7 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
{
struct inode *qf_inode;
unsigned long qf_inum;
+ unsigned long qf_flag = F2FS_QUOTA_DEFAULT_FL;
int err;
BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
@@ -2579,7 +3242,15 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
}
/* Don't account quota for quota files to avoid recursion */
+ inode_lock(qf_inode);
qf_inode->i_flags |= S_NOQUOTA;
+
+ if ((F2FS_I(qf_inode)->i_flags & qf_flag) != qf_flag) {
+ F2FS_I(qf_inode)->i_flags |= qf_flag;
+ f2fs_set_inode_flags(qf_inode);
+ }
+ inode_unlock(qf_inode);
+
err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
iput(qf_inode);
return err;
@@ -2650,12 +3321,12 @@ out:
return ret;
}
-int f2fs_quota_sync(struct super_block *sb, int type)
+int f2fs_do_quota_sync(struct super_block *sb, int type)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct quota_info *dqopt = sb_dqopt(sb);
int cnt;
- int ret;
+ int ret = 0;
/*
* Now when everything is written we can discard the pagecache so
@@ -2666,29 +3337,31 @@ int f2fs_quota_sync(struct super_block *sb, int type)
if (type != -1 && cnt != type)
continue;
- if (!sb_has_quota_active(sb, type))
- return 0;
+ if (!sb_has_quota_active(sb, cnt))
+ continue;
- inode_lock(dqopt->files[cnt]);
+ if (!f2fs_sb_has_quota_ino(sbi))
+ inode_lock(dqopt->files[cnt]);
/*
* do_quotactl
* f2fs_quota_sync
- * down_read(quota_sem)
+ * f2fs_down_read(quota_sem)
* dquot_writeback_dquots()
* f2fs_dquot_commit
* block_operation
- * down_read(quota_sem)
+ * f2fs_down_read(quota_sem)
*/
f2fs_lock_op(sbi);
- down_read(&sbi->quota_sem);
+ f2fs_down_read(&sbi->quota_sem);
ret = f2fs_quota_sync_file(sbi, cnt);
- up_read(&sbi->quota_sem);
+ f2fs_up_read(&sbi->quota_sem);
f2fs_unlock_op(sbi);
- inode_unlock(dqopt->files[cnt]);
+ if (!f2fs_sb_has_quota_ino(sbi))
+ inode_unlock(dqopt->files[cnt]);
if (ret)
break;
@@ -2696,11 +3369,21 @@ int f2fs_quota_sync(struct super_block *sb, int type)
return ret;
}
+static int f2fs_quota_sync(struct super_block *sb, int type)
+{
+ int ret;
+
+ F2FS_SB(sb)->umount_lock_holder = current;
+ ret = f2fs_do_quota_sync(sb, type);
+ F2FS_SB(sb)->umount_lock_holder = NULL;
+ return ret;
+}
+
static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path)
{
struct inode *inode;
- int err;
+ int err = 0;
/* if quota sysfile exists, deny enabling quota with specific file */
if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
@@ -2708,23 +3391,37 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
return -EBUSY;
}
- err = f2fs_quota_sync(sb, type);
- if (err)
- return err;
+ if (path->dentry->d_sb != sb)
+ return -EXDEV;
- err = dquot_quota_on(sb, type, format_id, path);
+ F2FS_SB(sb)->umount_lock_holder = current;
+
+ err = f2fs_do_quota_sync(sb, type);
if (err)
- return err;
+ goto out;
inode = d_inode(path->dentry);
+ err = filemap_fdatawrite(inode->i_mapping);
+ if (err)
+ goto out;
+
+ err = filemap_fdatawait(inode->i_mapping);
+ if (err)
+ goto out;
+
+ err = dquot_quota_on(sb, type, format_id, path);
+ if (err)
+ goto out;
+
inode_lock(inode);
- F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
+ F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL;
f2fs_set_inode_flags(inode);
inode_unlock(inode);
f2fs_mark_inode_dirty_sync(inode, false);
-
- return 0;
+out:
+ F2FS_SB(sb)->umount_lock_holder = NULL;
+ return err;
}
static int __f2fs_quota_off(struct super_block *sb, int type)
@@ -2735,7 +3432,7 @@ static int __f2fs_quota_off(struct super_block *sb, int type)
if (!inode || !igrab(inode))
return dquot_quota_off(sb, type);
- err = f2fs_quota_sync(sb, type);
+ err = f2fs_do_quota_sync(sb, type);
if (err)
goto out_put;
@@ -2744,7 +3441,7 @@ static int __f2fs_quota_off(struct super_block *sb, int type)
goto out_put;
inode_lock(inode);
- F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
+ F2FS_I(inode)->i_flags &= ~F2FS_QUOTA_DEFAULT_FL;
f2fs_set_inode_flags(inode);
inode_unlock(inode);
f2fs_mark_inode_dirty_sync(inode, false);
@@ -2758,6 +3455,8 @@ static int f2fs_quota_off(struct super_block *sb, int type)
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int err;
+ F2FS_SB(sb)->umount_lock_holder = current;
+
err = __f2fs_quota_off(sb, type);
/*
@@ -2767,6 +3466,9 @@ static int f2fs_quota_off(struct super_block *sb, int type)
*/
if (is_journalled_quota(sbi))
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+
+ F2FS_SB(sb)->umount_lock_holder = NULL;
+
return err;
}
@@ -2810,11 +3512,11 @@ static int f2fs_dquot_commit(struct dquot *dquot)
struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
int ret;
- down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
+ f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
ret = dquot_commit(dquot);
if (ret < 0)
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
- up_read(&sbi->quota_sem);
+ f2fs_up_read(&sbi->quota_sem);
return ret;
}
@@ -2823,11 +3525,11 @@ static int f2fs_dquot_acquire(struct dquot *dquot)
struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
int ret;
- down_read(&sbi->quota_sem);
+ f2fs_down_read(&sbi->quota_sem);
ret = dquot_acquire(dquot);
if (ret < 0)
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
- up_read(&sbi->quota_sem);
+ f2fs_up_read(&sbi->quota_sem);
return ret;
}
@@ -2899,7 +3601,7 @@ int f2fs_dquot_initialize(struct inode *inode)
return 0;
}
-int f2fs_quota_sync(struct super_block *sb, int type)
+int f2fs_do_quota_sync(struct super_block *sb, int type)
{
return 0;
}
@@ -2927,7 +3629,7 @@ static const struct super_operations f2fs_sops = {
.freeze_fs = f2fs_freeze,
.unfreeze_fs = f2fs_unfreeze,
.statfs = f2fs_statfs,
- .remount_fs = f2fs_remount,
+ .shutdown = f2fs_shutdown,
};
#ifdef CONFIG_FS_ENCRYPTION
@@ -2968,44 +3670,41 @@ static bool f2fs_has_stable_inodes(struct super_block *sb)
return true;
}
-static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
- int *ino_bits_ret, int *lblk_bits_ret)
-{
- *ino_bits_ret = 8 * sizeof(nid_t);
- *lblk_bits_ret = 8 * sizeof(block_t);
-}
-
-static int f2fs_get_num_devices(struct super_block *sb)
+static struct block_device **f2fs_get_devices(struct super_block *sb,
+ unsigned int *num_devs)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct block_device **devs;
+ int i;
- if (f2fs_is_multi_device(sbi))
- return sbi->s_ndevs;
- return 1;
-}
+ if (!f2fs_is_multi_device(sbi))
+ return NULL;
-static void f2fs_get_devices(struct super_block *sb,
- struct request_queue **devs)
-{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
- int i;
+ devs = kmalloc_array(sbi->s_ndevs, sizeof(*devs), GFP_KERNEL);
+ if (!devs)
+ return ERR_PTR(-ENOMEM);
for (i = 0; i < sbi->s_ndevs; i++)
- devs[i] = bdev_get_queue(FDEV(i).bdev);
+ devs[i] = FDEV(i).bdev;
+ *num_devs = sbi->s_ndevs;
+ return devs;
}
static const struct fscrypt_operations f2fs_cryptops = {
- .key_prefix = "f2fs:",
+ .inode_info_offs = (int)offsetof(struct f2fs_inode_info, i_crypt_info) -
+ (int)offsetof(struct f2fs_inode_info, vfs_inode),
+ .needs_bounce_pages = 1,
+ .has_32bit_inodes = 1,
+ .supports_subblock_data_units = 1,
+ .legacy_key_prefix = "f2fs:",
.get_context = f2fs_get_context,
.set_context = f2fs_set_context,
.get_dummy_policy = f2fs_get_dummy_policy,
.empty_dir = f2fs_empty_dir,
.has_stable_inodes = f2fs_has_stable_inodes,
- .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
- .get_num_devices = f2fs_get_num_devices,
.get_devices = f2fs_get_devices,
};
-#endif
+#endif /* CONFIG_FS_ENCRYPTION */
static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation)
@@ -3047,6 +3746,7 @@ static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
}
static const struct export_operations f2fs_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = f2fs_fh_to_dentry,
.fh_to_parent = f2fs_fh_to_parent,
.get_parent = f2fs_get_parent,
@@ -3080,27 +3780,54 @@ loff_t max_file_blocks(struct inode *inode)
leaf_count *= NIDS_PER_BLOCK;
result += leaf_count;
+ /*
+ * For compatibility with FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{64,32} with
+ * a 4K crypto data unit, we must restrict the max filesize to what can
+ * fit within U32_MAX + 1 data units.
+ */
+
+ result = umin(result, F2FS_BYTES_TO_BLK(((loff_t)U32_MAX + 1) * 4096));
+
return result;
}
-static int __f2fs_commit_super(struct buffer_head *bh,
- struct f2fs_super_block *super)
+static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio,
+ pgoff_t index, bool update)
{
- lock_buffer(bh);
- if (super)
- memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
- set_buffer_dirty(bh);
- unlock_buffer(bh);
-
+ struct bio *bio;
/* it's rare case, we can do fua all the time */
- return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
+ blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA;
+ int ret;
+
+ folio_lock(folio);
+ folio_wait_writeback(folio);
+ if (update)
+ memcpy(F2FS_SUPER_BLOCK(folio, index), F2FS_RAW_SUPER(sbi),
+ sizeof(struct f2fs_super_block));
+ folio_mark_dirty(folio);
+ folio_clear_dirty_for_io(folio);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+
+ bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS);
+
+ /* it doesn't need to set crypto context for superblock update */
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio->index);
+
+ if (!bio_add_folio(bio, folio, folio_size(folio), 0))
+ f2fs_bug_on(sbi, 1);
+
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+ folio_end_writeback(folio);
+
+ return ret;
}
static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
- struct buffer_head *bh)
+ struct folio *folio, pgoff_t index)
{
- struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
- (bh->b_data + F2FS_SUPER_OFFSET);
+ struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
struct super_block *sb = sbi->sb;
u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
@@ -3116,9 +3843,9 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
u32 segment_count = le32_to_cpu(raw_super->segment_count);
u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
u64 main_end_blkaddr = main_blkaddr +
- (segment_count_main << log_blocks_per_seg);
+ ((u64)segment_count_main << log_blocks_per_seg);
u64 seg_end_blkaddr = segment0_blkaddr +
- (segment_count << log_blocks_per_seg);
+ ((u64)segment_count << log_blocks_per_seg);
if (segment0_blkaddr != cp_blkaddr) {
f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
@@ -3171,11 +3898,11 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
segment0_blkaddr) >> log_blocks_per_seg);
- if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
+ if (f2fs_readonly(sb) || f2fs_hw_is_readonly(sbi)) {
set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
res = "internally";
} else {
- err = __f2fs_commit_super(bh, NULL);
+ err = __f2fs_commit_super(sbi, folio, index, false);
res = err ? "failed" : "done";
}
f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
@@ -3188,12 +3915,11 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
}
static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
- struct buffer_head *bh)
+ struct folio *folio, pgoff_t index)
{
block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
block_t total_sections, blocks_per_seg;
- struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
- (bh->b_data + F2FS_SUPER_OFFSET);
+ struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
size_t crc_offset = 0;
__u32 crc = 0;
@@ -3213,13 +3939,13 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
return -EFSCORRUPTED;
}
crc = le32_to_cpu(raw_super->crc);
- if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
+ if (crc != f2fs_crc32(raw_super, crc_offset)) {
f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
return -EFSCORRUPTED;
}
}
- /* Currently, support only 4KB block size */
+ /* only support block_size equals to PAGE_SIZE */
if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
le32_to_cpu(raw_super->log_blocksize),
@@ -3234,7 +3960,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
return -EFSCORRUPTED;
}
- /* Currently, support 512/1024/2048/4096 bytes sector size */
+ /* Currently, support 512/1024/2048/4096/16K bytes sector size */
if (le32_to_cpu(raw_super->log_sectorsize) >
F2FS_MAX_LOG_SECTOR_SIZE ||
le32_to_cpu(raw_super->log_sectorsize) <
@@ -3259,7 +3985,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
total_sections = le32_to_cpu(raw_super->section_count);
/* blocks_per_seg should be 512, given the above check */
- blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
+ blocks_per_seg = BIT(le32_to_cpu(raw_super->log_blocks_per_seg));
if (segment_count > F2FS_MAX_SEGMENT ||
segment_count < F2FS_MIN_SEGMENTS) {
@@ -3351,9 +4077,23 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
}
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
- if (sanity_check_area_boundary(sbi, bh))
+ if (sanity_check_area_boundary(sbi, folio, index))
return -EFSCORRUPTED;
+ /*
+ * Check for legacy summary layout on 16KB+ block devices.
+ * Modern f2fs-tools packs multiple 4KB summary areas into one block,
+ * whereas legacy versions used one block per summary, leading
+ * to a much larger SSA.
+ */
+ if (SUMS_PER_BLOCK > 1 &&
+ !(__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_PACKED_SSA))) {
+ f2fs_info(sbi, "Error: Device formatted with a legacy version. "
+ "Please reformat with a tool supporting the packed ssa "
+ "feature for block sizes larger than 4kb.");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
@@ -3372,6 +4112,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
block_t user_block_count, valid_user_blocks;
block_t avail_node_count, valid_node_count;
unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
+ unsigned int sit_blk_cnt;
int i, j;
total = le32_to_cpu(raw_super->segment_count);
@@ -3422,7 +4163,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
}
main_segs = le32_to_cpu(raw_super->segment_count_main);
- blocks_per_seg = sbi->blocks_per_seg;
+ blocks_per_seg = BLKS_PER_SEG(sbi);
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
@@ -3483,6 +4224,13 @@ skip_cross:
return 1;
}
+ sit_blk_cnt = DIV_ROUND_UP(main_segs, SIT_ENTRY_PER_BLOCK);
+ if (sit_bitmap_size * 8 < sit_blk_cnt) {
+ f2fs_err(sbi, "Wrong bitmap size: sit: %u, sit_blk_cnt:%u",
+ sit_bitmap_size, sit_blk_cnt);
+ return 1;
+ }
+
cp_pack_start_sum = __start_sum_addr(sbi);
cp_payload = __cp_payload(sbi);
if (cp_pack_start_sum < cp_payload + 1 ||
@@ -3528,26 +4276,33 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->log_sectors_per_block =
le32_to_cpu(raw_super->log_sectors_per_block);
sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
- sbi->blocksize = 1 << sbi->log_blocksize;
+ sbi->blocksize = BIT(sbi->log_blocksize);
sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
- sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
+ sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg);
sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
sbi->total_sections = le32_to_cpu(raw_super->section_count);
- sbi->total_node_count =
- (le32_to_cpu(raw_super->segment_count_nat) / 2)
- * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
+ sbi->total_node_count = SEGS_TO_BLKS(sbi,
+ ((le32_to_cpu(raw_super->segment_count_nat) / 2) *
+ NAT_ENTRY_PER_BLOCK));
+ sbi->allocate_section_hint = le32_to_cpu(raw_super->section_count);
+ sbi->allocate_section_policy = ALLOCATE_FORWARD_NOHINT;
F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
sbi->cur_victim_sec = NULL_SECNO;
+ sbi->gc_mode = GC_NORMAL;
sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
- sbi->migration_granularity = sbi->segs_per_sec;
+ sbi->migration_granularity = SEGS_PER_SEC(sbi);
+ sbi->migration_window_granularity = f2fs_sb_has_blkzoned(sbi) ?
+ DEF_MIGRATION_WINDOW_GRANULARITY_ZONED : SEGS_PER_SEC(sbi);
sbi->seq_file_ra_mul = MIN_RA_MUL;
sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
+ spin_lock_init(&sbi->gc_remaining_trials_lock);
+ atomic64_set(&sbi->current_atomic_write, 0);
sbi->dir_level = DEF_DIR_LEVEL;
sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
@@ -3555,6 +4310,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
+ sbi->interval_time[ENABLE_TIME] = DEF_ENABLE_INTERVAL;
sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
DEF_UMOUNT_DISCARD_TIMEOUT;
clear_sbi_flag(sbi, SBI_NEED_FSCK);
@@ -3567,14 +4323,14 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex);
- init_rwsem(&sbi->io_order_lock);
+ init_f2fs_rwsem(&sbi->io_order_lock);
spin_lock_init(&sbi->cp_lock);
sbi->dirty_device = 0;
spin_lock_init(&sbi->dev_lock);
- init_rwsem(&sbi->sb_lock);
- init_rwsem(&sbi->pin_sem);
+ init_f2fs_rwsem(&sbi->sb_lock);
+ init_f2fs_rwsem(&sbi->pin_sem);
}
static int init_percpu_info(struct f2fs_sb_info *sbi)
@@ -3585,35 +4341,49 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
if (err)
return err;
+ err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL);
+ if (err)
+ goto err_valid_block;
+
err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
GFP_KERNEL);
if (err)
- percpu_counter_destroy(&sbi->alloc_valid_block_count);
+ goto err_node_block;
+ return 0;
+err_node_block:
+ percpu_counter_destroy(&sbi->rf_node_block_count);
+err_valid_block:
+ percpu_counter_destroy(&sbi->alloc_valid_block_count);
return err;
}
#ifdef CONFIG_BLK_DEV_ZONED
struct f2fs_report_zones_args {
+ struct f2fs_sb_info *sbi;
struct f2fs_dev_info *dev;
- bool zone_cap_mismatch;
};
static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
void *data)
{
struct f2fs_report_zones_args *rz_args = data;
+ block_t unusable_blocks = (zone->len - zone->capacity) >>
+ F2FS_LOG_SECTORS_PER_BLOCK;
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return 0;
set_bit(idx, rz_args->dev->blkz_seq);
- rz_args->dev->zone_capacity_blocks[idx] = zone->capacity >>
- F2FS_LOG_SECTORS_PER_BLOCK;
- if (zone->len != zone->capacity && !rz_args->zone_cap_mismatch)
- rz_args->zone_cap_mismatch = true;
-
+ if (!rz_args->sbi->unusable_blocks_per_sec) {
+ rz_args->sbi->unusable_blocks_per_sec = unusable_blocks;
+ return 0;
+ }
+ if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) {
+ f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -3622,22 +4392,33 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
struct block_device *bdev = FDEV(devi).bdev;
sector_t nr_sectors = bdev_nr_sectors(bdev);
struct f2fs_report_zones_args rep_zone_arg;
+ u64 zone_sectors;
+ unsigned int max_open_zones;
int ret;
if (!f2fs_sb_has_blkzoned(sbi))
return 0;
+ if (bdev_is_zoned(FDEV(devi).bdev)) {
+ max_open_zones = bdev_max_open_zones(bdev);
+ if (max_open_zones && (max_open_zones < sbi->max_open_zones))
+ sbi->max_open_zones = max_open_zones;
+ if (sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
+ f2fs_err(sbi,
+ "zoned: max open zones %u is too small, need at least %u open zones",
+ sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
+ return -EINVAL;
+ }
+ }
+
+ zone_sectors = bdev_zone_sectors(bdev);
if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
- SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
+ SECTOR_TO_BLOCK(zone_sectors))
return -EINVAL;
- sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
- if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
- __ilog2_u32(sbi->blocks_per_blkz))
- return -EINVAL;
- sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
- FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
- sbi->log_blocks_per_blkz;
- if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
+ sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors);
+ FDEV(devi).nr_blkz = div_u64(SECTOR_TO_BLOCK(nr_sectors),
+ sbi->blocks_per_blkz);
+ if (nr_sectors & (zone_sectors - 1))
FDEV(devi).nr_blkz++;
FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
@@ -3647,26 +4428,13 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
if (!FDEV(devi).blkz_seq)
return -ENOMEM;
- /* Get block zones type and zone-capacity */
- FDEV(devi).zone_capacity_blocks = f2fs_kzalloc(sbi,
- FDEV(devi).nr_blkz * sizeof(block_t),
- GFP_KERNEL);
- if (!FDEV(devi).zone_capacity_blocks)
- return -ENOMEM;
-
+ rep_zone_arg.sbi = sbi;
rep_zone_arg.dev = &FDEV(devi);
- rep_zone_arg.zone_cap_mismatch = false;
ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
&rep_zone_arg);
if (ret < 0)
return ret;
-
- if (!rep_zone_arg.zone_cap_mismatch) {
- kfree(FDEV(devi).zone_capacity_blocks);
- FDEV(devi).zone_capacity_blocks = NULL;
- }
-
return 0;
}
#endif
@@ -3683,7 +4451,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
{
struct super_block *sb = sbi->sb;
int block;
- struct buffer_head *bh;
+ struct folio *folio;
struct f2fs_super_block *super;
int err = 0;
@@ -3692,32 +4460,32 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
return -ENOMEM;
for (block = 0; block < 2; block++) {
- bh = sb_bread(sb, block);
- if (!bh) {
+ folio = read_mapping_folio(sb->s_bdev->bd_mapping, block, NULL);
+ if (IS_ERR(folio)) {
f2fs_err(sbi, "Unable to read %dth superblock",
block + 1);
- err = -EIO;
+ err = PTR_ERR(folio);
*recovery = 1;
continue;
}
/* sanity checking of raw super */
- err = sanity_check_raw_super(sbi, bh);
+ err = sanity_check_raw_super(sbi, folio, block);
if (err) {
f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
block + 1);
- brelse(bh);
+ folio_put(folio);
*recovery = 1;
continue;
}
if (!*raw_super) {
- memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
+ memcpy(super, F2FS_SUPER_BLOCK(folio, block),
sizeof(*super));
*valid_super_block = block;
*raw_super = super;
}
- brelse(bh);
+ folio_put(folio);
}
/* No valid superblock */
@@ -3731,48 +4499,213 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
{
- struct buffer_head *bh;
+ struct folio *folio;
+ pgoff_t index;
__u32 crc = 0;
int err;
if ((recover && f2fs_readonly(sbi->sb)) ||
- bdev_read_only(sbi->sb->s_bdev)) {
+ f2fs_hw_is_readonly(sbi)) {
set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
return -EROFS;
}
/* we should update superblock crc here */
if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
- crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
+ crc = f2fs_crc32(F2FS_RAW_SUPER(sbi),
offsetof(struct f2fs_super_block, crc));
F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
}
/* write back-up superblock first */
- bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
- if (!bh)
- return -EIO;
- err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
- brelse(bh);
+ index = sbi->valid_super_block ? 0 : 1;
+ folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ err = __f2fs_commit_super(sbi, folio, index, true);
+ folio_put(folio);
/* if we are in recovery path, skip writing valid superblock */
if (recover || err)
return err;
/* write current valid superblock */
- bh = sb_bread(sbi->sb, sbi->valid_super_block);
- if (!bh)
- return -EIO;
- err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
- brelse(bh);
+ index = sbi->valid_super_block;
+ folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ err = __f2fs_commit_super(sbi, folio, index, true);
+ folio_put(folio);
return err;
}
+static void save_stop_reason(struct f2fs_sb_info *sbi, unsigned char reason)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->error_lock, flags);
+ if (sbi->stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
+ sbi->stop_reason[reason]++;
+ spin_unlock_irqrestore(&sbi->error_lock, flags);
+}
+
+static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ unsigned long flags;
+ int err;
+
+ f2fs_down_write(&sbi->sb_lock);
+
+ spin_lock_irqsave(&sbi->error_lock, flags);
+ if (sbi->error_dirty) {
+ memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
+ MAX_F2FS_ERRORS);
+ sbi->error_dirty = false;
+ }
+ memcpy(raw_super->s_stop_reason, sbi->stop_reason, MAX_STOP_REASON);
+ spin_unlock_irqrestore(&sbi->error_lock, flags);
+
+ err = f2fs_commit_super(sbi, false);
+
+ f2fs_up_write(&sbi->sb_lock);
+ if (err)
+ f2fs_err_ratelimited(sbi,
+ "f2fs_commit_super fails to record stop_reason, err:%d",
+ err);
+}
+
+void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->error_lock, flags);
+ if (!test_bit(flag, (unsigned long *)sbi->errors)) {
+ set_bit(flag, (unsigned long *)sbi->errors);
+ sbi->error_dirty = true;
+ }
+ spin_unlock_irqrestore(&sbi->error_lock, flags);
+}
+
+void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error)
+{
+ f2fs_save_errors(sbi, error);
+
+ if (!sbi->error_dirty)
+ return;
+ if (!test_bit(error, (unsigned long *)sbi->errors))
+ return;
+ schedule_work(&sbi->s_error_work);
+}
+
+static bool system_going_down(void)
+{
+ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
+ || system_state == SYSTEM_RESTART;
+}
+
+void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason)
+{
+ struct super_block *sb = sbi->sb;
+ bool shutdown = reason == STOP_CP_REASON_SHUTDOWN;
+ bool continue_fs = !shutdown &&
+ F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE;
+
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
+
+ if (!f2fs_hw_is_readonly(sbi)) {
+ save_stop_reason(sbi, reason);
+
+ /*
+ * always create an asynchronous task to record stop_reason
+ * in order to avoid potential deadlock when running into
+ * f2fs_record_stop_reason() synchronously.
+ */
+ schedule_work(&sbi->s_error_work);
+ }
+
+ /*
+ * We force ERRORS_RO behavior when system is rebooting. Otherwise we
+ * could panic during 'reboot -f' as the underlying device got already
+ * disabled.
+ */
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC &&
+ !shutdown && !system_going_down() &&
+ !is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))
+ panic("F2FS-fs (device %s): panic forced after error\n",
+ sb->s_id);
+
+ if (shutdown)
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
+ else
+ dump_stack();
+
+ /*
+ * Continue filesystem operators if errors=continue. Should not set
+ * RO by shutdown, since RO bypasses thaw_super which can hang the
+ * system.
+ */
+ if (continue_fs || f2fs_readonly(sb) || shutdown) {
+ f2fs_warn(sbi, "Stopped filesystem due to reason: %d", reason);
+ return;
+ }
+
+ f2fs_warn(sbi, "Remounting filesystem read-only");
+
+ /*
+ * We have already set CP_ERROR_FLAG flag to stop all updates
+ * to filesystem, so it doesn't need to set SB_RDONLY flag here
+ * because the flag should be set covered w/ sb->s_umount semaphore
+ * via remount procedure, otherwise, it will confuse code like
+ * freeze_super() which will lead to deadlocks and other problems.
+ */
+}
+
+static void f2fs_record_error_work(struct work_struct *work)
+{
+ struct f2fs_sb_info *sbi = container_of(work,
+ struct f2fs_sb_info, s_error_work);
+
+ f2fs_record_stop_reason(sbi);
+}
+
+static inline unsigned int get_first_seq_zone_segno(struct f2fs_sb_info *sbi)
+{
+#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int zoneno, total_zones;
+ int devi;
+
+ if (!f2fs_sb_has_blkzoned(sbi))
+ return NULL_SEGNO;
+
+ for (devi = 0; devi < sbi->s_ndevs; devi++) {
+ if (!bdev_is_zoned(FDEV(devi).bdev))
+ continue;
+
+ total_zones = GET_ZONE_FROM_SEG(sbi, FDEV(devi).total_segments);
+
+ for (zoneno = 0; zoneno < total_zones; zoneno++) {
+ unsigned int segs, blks;
+
+ if (!f2fs_zone_is_seq(sbi, devi, zoneno))
+ continue;
+
+ segs = GET_SEG_FROM_SEC(sbi,
+ zoneno * sbi->secs_per_zone);
+ blks = SEGS_TO_BLKS(sbi, segs);
+ return GET_SEGNO(sbi, FDEV(devi).start_blk + blks);
+ }
+ }
+#endif
+ return NULL_SEGNO;
+}
+
static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
unsigned int max_devices = MAX_DEVICES;
unsigned int logical_blksize;
+ blk_mode_t mode = sb_open_mode(sbi->sb->s_flags);
int i;
/* Initialize single device information */
@@ -3795,18 +4728,28 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
sbi->aligned_blksize = true;
+ sbi->bggc_io_aware = AWARE_ALL_IO;
+#ifdef CONFIG_BLK_DEV_ZONED
+ sbi->max_open_zones = UINT_MAX;
+ sbi->blkzone_alloc_policy = BLKZONE_ALLOC_PRIOR_SEQ;
+ sbi->bggc_io_aware = AWARE_READ_IO;
+#endif
for (i = 0; i < max_devices; i++) {
+ if (max_devices == 1) {
+ FDEV(i).total_segments =
+ le32_to_cpu(raw_super->segment_count_main);
+ FDEV(i).start_blk = 0;
+ FDEV(i).end_blk = FDEV(i).total_segments *
+ BLKS_PER_SEG(sbi);
+ }
- if (i > 0 && !RDEV(i).path[0])
+ if (i == 0)
+ FDEV(0).bdev_file = sbi->sb->s_bdev_file;
+ else if (!RDEV(i).path[0])
break;
- if (max_devices == 1) {
- /* Single zoned block device mount */
- FDEV(0).bdev =
- blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
- sbi->sb->s_mode, sbi->sb->s_type);
- } else {
+ if (max_devices > 1) {
/* Multi-device mount */
memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
FDEV(i).total_segments =
@@ -3814,21 +4757,24 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
if (i == 0) {
FDEV(i).start_blk = 0;
FDEV(i).end_blk = FDEV(i).start_blk +
- (FDEV(i).total_segments <<
- sbi->log_blocks_per_seg) - 1 +
- le32_to_cpu(raw_super->segment0_blkaddr);
+ SEGS_TO_BLKS(sbi,
+ FDEV(i).total_segments) - 1 +
+ le32_to_cpu(raw_super->segment0_blkaddr);
+ sbi->allocate_section_hint = FDEV(i).total_segments /
+ SEGS_PER_SEC(sbi);
} else {
FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
FDEV(i).end_blk = FDEV(i).start_blk +
- (FDEV(i).total_segments <<
- sbi->log_blocks_per_seg) - 1;
+ SEGS_TO_BLKS(sbi,
+ FDEV(i).total_segments) - 1;
+ FDEV(i).bdev_file = bdev_file_open_by_path(
+ FDEV(i).path, mode, sbi->sb, NULL);
}
- FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
- sbi->sb->s_mode, sbi->sb->s_type);
}
- if (IS_ERR(FDEV(i).bdev))
- return PTR_ERR(FDEV(i).bdev);
+ if (IS_ERR(FDEV(i).bdev_file))
+ return PTR_ERR(FDEV(i).bdev_file);
+ FDEV(i).bdev = file_bdev(FDEV(i).bdev_file);
/* to release errored devices */
sbi->s_ndevs = i + 1;
@@ -3836,24 +4782,21 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
sbi->aligned_blksize = false;
#ifdef CONFIG_BLK_DEV_ZONED
- if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
- !f2fs_sb_has_blkzoned(sbi)) {
- f2fs_err(sbi, "Zoned block device feature not enabled");
- return -EINVAL;
- }
- if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
+ if (bdev_is_zoned(FDEV(i).bdev)) {
+ if (!f2fs_sb_has_blkzoned(sbi)) {
+ f2fs_err(sbi, "Zoned block device feature not enabled");
+ return -EINVAL;
+ }
if (init_blkz_info(sbi, i)) {
f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
return -EINVAL;
}
if (max_devices == 1)
break;
- f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
+ f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: Host-managed)",
i, FDEV(i).path,
FDEV(i).total_segments,
- FDEV(i).start_blk, FDEV(i).end_blk,
- bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
- "Host-aware" : "Host-managed");
+ FDEV(i).start_blk, FDEV(i).end_blk);
continue;
}
#endif
@@ -3862,38 +4805,43 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
FDEV(i).total_segments,
FDEV(i).start_blk, FDEV(i).end_blk);
}
- f2fs_info(sbi,
- "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
return 0;
}
static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
const struct f2fs_sb_encodings *encoding_info;
struct unicode_map *encoding;
__u16 encoding_flags;
- if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
- &encoding_flags)) {
+ encoding_info = f2fs_sb_read_encoding(sbi->raw_super);
+ if (!encoding_info) {
f2fs_err(sbi,
"Encoding requested by superblock is unknown");
return -EINVAL;
}
+ encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags);
encoding = utf8_load(encoding_info->version);
if (IS_ERR(encoding)) {
f2fs_err(sbi,
- "can't mount with superblock charset: %s-%s "
+ "can't mount with superblock charset: %s-%u.%u.%u "
"not supported by the kernel. flags: 0x%x.",
- encoding_info->name, encoding_info->version,
+ encoding_info->name,
+ unicode_major(encoding_info->version),
+ unicode_minor(encoding_info->version),
+ unicode_rev(encoding_info->version),
encoding_flags);
return PTR_ERR(encoding);
}
f2fs_info(sbi, "Using encoding defined by superblock: "
- "%s-%s with flags 0x%hx", encoding_info->name,
- encoding_info->version?:"\b", encoding_flags);
+ "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
+ unicode_major(encoding_info->version),
+ unicode_minor(encoding_info->version),
+ unicode_rev(encoding_info->version),
+ encoding_flags);
sbi->sb->s_encoding = encoding;
sbi->sb->s_encoding_flags = encoding_flags;
@@ -3909,30 +4857,33 @@ static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
{
- struct f2fs_sm_info *sm_i = SM_I(sbi);
-
/* adjust parameters according to the volume size */
- if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
- F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
+ if (MAIN_SEGS(sbi) <= SMALL_VOLUME_SEGMENTS) {
if (f2fs_block_unit_discard(sbi))
- sm_i->dcc_info->discard_granularity = 1;
- sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
+ SM_I(sbi)->dcc_info->discard_granularity =
+ MIN_DISCARD_GRANULARITY;
+ if (!f2fs_lfs_mode(sbi))
+ SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) |
+ BIT(F2FS_IPU_HONOR_OPU_WRITE);
}
- sbi->readdir_ra = 1;
+ sbi->readdir_ra = true;
}
-static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
+static int f2fs_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct f2fs_fs_context *ctx = fc->fs_private;
struct f2fs_sb_info *sbi;
struct f2fs_super_block *raw_super;
struct inode *root;
int err;
bool skip_recovery = false, need_fsck = false;
- char *options = NULL;
int recovery, i, valid_super_block;
struct curseg_info *seg_i;
int retry_cnt = 1;
+#ifdef CONFIG_QUOTA
+ bool quota_enabled = false;
+#endif
try_onemore:
err = -EINVAL;
@@ -3947,14 +4898,24 @@ try_onemore:
sbi->sb = sb;
- /* Load the checksum driver */
- sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
- if (IS_ERR(sbi->s_chksum_driver)) {
- f2fs_err(sbi, "Cannot load crc32 driver.");
- err = PTR_ERR(sbi->s_chksum_driver);
- sbi->s_chksum_driver = NULL;
- goto free_sbi;
+ /* initialize locks within allocated memory */
+ init_f2fs_rwsem(&sbi->gc_lock);
+ mutex_init(&sbi->writepages);
+ init_f2fs_rwsem(&sbi->cp_global_sem);
+ init_f2fs_rwsem(&sbi->node_write);
+ init_f2fs_rwsem(&sbi->node_change);
+ spin_lock_init(&sbi->stat_lock);
+ init_f2fs_rwsem(&sbi->cp_rwsem);
+ init_f2fs_rwsem(&sbi->cp_enable_rwsem);
+ init_f2fs_rwsem(&sbi->quota_sem);
+ init_waitqueue_head(&sbi->cp_wait);
+ spin_lock_init(&sbi->error_lock);
+
+ for (i = 0; i < NR_INODE_TYPE; i++) {
+ INIT_LIST_HEAD(&sbi->inode_list[i]);
+ spin_lock_init(&sbi->inode_lock[i]);
}
+ mutex_init(&sbi->flush_lock);
/* set a block size */
if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
@@ -3970,20 +4931,24 @@ try_onemore:
sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
+ INIT_WORK(&sbi->s_error_work, f2fs_record_error_work);
+ memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
+ memcpy(sbi->stop_reason, raw_super->s_stop_reason, MAX_STOP_REASON);
+
/* precompute checksum seed for metadata */
if (f2fs_sb_has_inode_chksum(sbi))
- sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
- sizeof(raw_super->uuid));
+ sbi->s_chksum_seed = f2fs_chksum(~0, raw_super->uuid,
+ sizeof(raw_super->uuid));
- default_options(sbi);
- /* parse mount options */
- options = kstrdup((const char *)data, GFP_KERNEL);
- if (data && !options) {
- err = -ENOMEM;
+ default_options(sbi, false);
+
+ err = f2fs_check_opt_consistency(fc, sb);
+ if (err)
goto free_sb_buf;
- }
- err = parse_options(sb, options, false);
+ f2fs_apply_options(fc, sb);
+
+ err = f2fs_sanity_check_options(sbi, false);
if (err)
goto free_options;
@@ -4021,49 +4986,28 @@ try_onemore:
sb->s_time_gran = 1;
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
- memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
+ if (test_opt(sbi, INLINECRYPT))
+ sb->s_flags |= SB_INLINECRYPT;
+
+ if (test_opt(sbi, LAZYTIME))
+ sb->s_flags |= SB_LAZYTIME;
+ else
+ sb->s_flags &= ~SB_LAZYTIME;
+
+ super_set_uuid(sb, (void *) raw_super->uuid, sizeof(raw_super->uuid));
+ super_set_sysfs_name_bdev(sb);
sb->s_iflags |= SB_I_CGROUPWB;
/* init f2fs-specific super block info */
sbi->valid_super_block = valid_super_block;
- init_rwsem(&sbi->gc_lock);
- mutex_init(&sbi->writepages);
- init_rwsem(&sbi->cp_global_sem);
- init_rwsem(&sbi->node_write);
- init_rwsem(&sbi->node_change);
/* disallow all the data/node/meta page writes */
set_sbi_flag(sbi, SBI_POR_DOING);
- spin_lock_init(&sbi->stat_lock);
-
- for (i = 0; i < NR_PAGE_TYPE; i++) {
- int n = (i == META) ? 1 : NR_TEMP_TYPE;
- int j;
-
- sbi->write_io[i] =
- f2fs_kmalloc(sbi,
- array_size(n,
- sizeof(struct f2fs_bio_info)),
- GFP_KERNEL);
- if (!sbi->write_io[i]) {
- err = -ENOMEM;
- goto free_bio_info;
- }
- for (j = HOT; j < n; j++) {
- init_rwsem(&sbi->write_io[i][j].io_rwsem);
- sbi->write_io[i][j].sbi = sbi;
- sbi->write_io[i][j].bio = NULL;
- spin_lock_init(&sbi->write_io[i][j].io_lock);
- INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
- INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
- init_rwsem(&sbi->write_io[i][j].bio_list_lock);
- }
- }
+ err = f2fs_init_write_merge_io(sbi);
+ if (err)
+ goto free_bio_info;
- init_rwsem(&sbi->cp_rwsem);
- init_rwsem(&sbi->quota_sem);
- init_waitqueue_head(&sbi->cp_wait);
init_sb_info(sbi);
err = f2fs_init_iostat(sbi);
@@ -4074,22 +5018,9 @@ try_onemore:
if (err)
goto free_iostat;
- if (F2FS_IO_ALIGNED(sbi)) {
- sbi->write_io_dummy =
- mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
- if (!sbi->write_io_dummy) {
- err = -ENOMEM;
- goto free_percpu;
- }
- }
-
- /* init per sbi slab cache */
- err = f2fs_init_xattr_caches(sbi);
- if (err)
- goto free_io_dummy;
err = f2fs_init_page_array_cache(sbi);
if (err)
- goto free_xattr_cache;
+ goto free_percpu;
/* get an inode for meta space */
sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
@@ -4141,12 +5072,6 @@ try_onemore:
limit_reserve_root(sbi);
adjust_unusable_cap_perc(sbi);
- for (i = 0; i < NR_INODE_TYPE; i++) {
- INIT_LIST_HEAD(&sbi->inode_list[i]);
- spin_lock_init(&sbi->inode_lock[i]);
- }
- mutex_init(&sbi->flush_lock);
-
f2fs_init_extent_cache_info(sbi);
f2fs_init_ino_entry_info(sbi);
@@ -4183,6 +5108,13 @@ try_onemore:
/* For write statistics */
sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
+ /* get segno of first zoned block device */
+ sbi->first_seq_zone_segno = get_first_seq_zone_segno(sbi);
+
+ sbi->reserved_pin_section = f2fs_sb_has_blkzoned(sbi) ?
+ ZONED_PIN_SEC_REQUIRED_COUNT :
+ GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi));
+
/* Read accumulated write IO statistics if exists */
seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
if (__exist_node_summaries(sbi))
@@ -4217,6 +5149,7 @@ try_onemore:
goto free_node_inode;
}
+ generic_set_sb_d_ops(sb);
sb->s_root = d_make_root(root); /* allocate root dentry */
if (!sb->s_root) {
err = -ENOMEM;
@@ -4231,6 +5164,7 @@ try_onemore:
if (err)
goto free_compress_inode;
+ sbi->umount_lock_holder = current;
#ifdef CONFIG_QUOTA
/* Enable quota usage during mount */
if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
@@ -4238,14 +5172,18 @@ try_onemore:
if (err)
f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
}
+
+ quota_enabled = f2fs_recover_quota_begin(sbi);
#endif
/* if there are any orphan inodes, free them */
err = f2fs_recover_orphan_inodes(sbi);
if (err)
goto free_meta;
- if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
+ if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))) {
+ skip_recovery = true;
goto reset_checkpoint;
+ }
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
@@ -4287,37 +5225,47 @@ try_onemore:
}
} else {
err = f2fs_recover_fsync_data(sbi, true);
-
- if (!f2fs_readonly(sb) && err > 0) {
- err = -EINVAL;
- f2fs_err(sbi, "Need to recover fsync data");
- goto free_meta;
+ if (err > 0) {
+ if (!f2fs_readonly(sb)) {
+ f2fs_err(sbi, "Need to recover fsync data");
+ err = -EINVAL;
+ goto free_meta;
+ } else {
+ f2fs_info(sbi, "drop all fsynced data");
+ err = 0;
+ }
}
}
+reset_checkpoint:
+#ifdef CONFIG_QUOTA
+ f2fs_recover_quota_end(sbi, quota_enabled);
+#endif
/*
* If the f2fs is not readonly and fsync data recovery succeeds,
- * check zoned block devices' write pointer consistency.
+ * write pointer consistency of cursegs and other zones are already
+ * checked and fixed during recovery. However, if recovery fails,
+ * write pointers are left untouched, and retry-mount should check
+ * them here.
*/
- if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
- err = f2fs_check_write_pointer(sbi);
- if (err)
- goto free_meta;
- }
-
-reset_checkpoint:
- f2fs_init_inmem_curseg(sbi);
+ if (skip_recovery)
+ err = f2fs_check_and_fix_write_pointer(sbi);
+ if (err)
+ goto free_meta;
/* f2fs_recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING);
- if (test_opt(sbi, DISABLE_CHECKPOINT)) {
+ err = f2fs_init_inmem_curseg(sbi);
+ if (err)
+ goto sync_free_meta;
+
+ if (test_opt(sbi, DISABLE_CHECKPOINT))
err = f2fs_disable_checkpoint(sbi);
- if (err)
- goto sync_free_meta;
- } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
- f2fs_enable_checkpoint(sbi);
- }
+ else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))
+ err = f2fs_enable_checkpoint(sbi);
+ if (err)
+ goto sync_free_meta;
/*
* If filesystem is not mounted as read-only then
@@ -4330,7 +5278,6 @@ reset_checkpoint:
if (err)
goto sync_free_meta;
}
- kvfree(options);
/* recover broken superblock */
if (recovery) {
@@ -4348,6 +5295,8 @@ reset_checkpoint:
f2fs_update_time(sbi, CP_TIME);
f2fs_update_time(sbi, REQ_TIME);
clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
+
+ sbi->umount_lock_holder = NULL;
return 0;
sync_free_meta:
@@ -4389,9 +5338,11 @@ free_nm:
f2fs_destroy_node_manager(sbi);
free_sm:
f2fs_destroy_segment_manager(sbi);
- f2fs_destroy_post_read_wq(sbi);
stop_ckpt_thread:
f2fs_stop_ckpt_thread(sbi);
+ /* flush s_error_work before sbi destroy */
+ flush_work(&sbi->s_error_work);
+ f2fs_destroy_post_read_wq(sbi);
free_devices:
destroy_device_list(sbi);
kvfree(sbi->ckpt);
@@ -4401,19 +5352,15 @@ free_meta_inode:
sbi->meta_inode = NULL;
free_page_array_cache:
f2fs_destroy_page_array_cache(sbi);
-free_xattr_cache:
- f2fs_destroy_xattr_caches(sbi);
-free_io_dummy:
- mempool_destroy(sbi->write_io_dummy);
free_percpu:
destroy_percpu_info(sbi);
free_iostat:
f2fs_destroy_iostat(sbi);
free_bio_info:
for (i = 0; i < NR_PAGE_TYPE; i++)
- kvfree(sbi->write_io[i]);
+ kfree(sbi->write_io[i]);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
sb->s_encoding = NULL;
#endif
@@ -4422,14 +5369,13 @@ free_options:
for (i = 0; i < MAXQUOTAS; i++)
kfree(F2FS_OPTION(sbi).s_qf_names[i]);
#endif
- fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
- kvfree(options);
+ /* no need to free dummy_enc_policy, we just keep it in ctx when failed */
+ swap(F2FS_CTX_INFO(ctx).dummy_enc_policy, F2FS_OPTION(sbi).dummy_enc_policy);
free_sb_buf:
kfree(raw_super);
free_sbi:
- if (sbi->s_chksum_driver)
- crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi);
+ sb->s_fs_info = NULL;
/* give only one another chance */
if (retry_cnt > 0 && skip_recovery) {
@@ -4440,16 +5386,45 @@ free_sbi:
return err;
}
-static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int f2fs_get_tree(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
+ return get_tree_bdev(fc, f2fs_fill_super);
}
+static int f2fs_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb = fc->root->d_sb;
+
+ return __f2fs_remount(fc, sb);
+}
+
+static void f2fs_fc_free(struct fs_context *fc)
+{
+ struct f2fs_fs_context *ctx = fc->fs_private;
+
+ if (!ctx)
+ return;
+
+#ifdef CONFIG_QUOTA
+ f2fs_unnote_qf_name_all(fc);
+#endif
+ fscrypt_free_dummy_policy(&F2FS_CTX_INFO(ctx).dummy_enc_policy);
+ kfree(ctx);
+}
+
+static const struct fs_context_operations f2fs_context_ops = {
+ .parse_param = f2fs_parse_param,
+ .get_tree = f2fs_get_tree,
+ .reconfigure = f2fs_reconfigure,
+ .free = f2fs_fc_free,
+};
+
static void kill_f2fs_super(struct super_block *sb)
{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
if (sb->s_root) {
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ sbi->umount_lock_holder = current;
set_sbi_flag(sbi, SBI_IS_CLOSE);
f2fs_stop_gc_thread(sbi);
@@ -4469,6 +5444,7 @@ static void kill_f2fs_super(struct super_block *sb)
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
f2fs_write_checkpoint(sbi, &cpc);
}
@@ -4476,14 +5452,34 @@ static void kill_f2fs_super(struct super_block *sb)
sb->s_flags &= ~SB_RDONLY;
}
kill_block_super(sb);
+ /* Release block devices last, after fscrypt_destroy_keyring(). */
+ if (sbi) {
+ destroy_device_list(sbi);
+ kfree(sbi);
+ sb->s_fs_info = NULL;
+ }
+}
+
+static int f2fs_init_fs_context(struct fs_context *fc)
+{
+ struct f2fs_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct f2fs_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ fc->fs_private = ctx;
+ fc->ops = &f2fs_context_ops;
+
+ return 0;
}
static struct file_system_type f2fs_fs_type = {
.owner = THIS_MODULE,
.name = "f2fs",
- .mount = f2fs_mount,
+ .init_fs_context = f2fs_init_fs_context,
.kill_sb = kill_f2fs_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("f2fs");
@@ -4492,9 +5488,7 @@ static int __init init_inodecache(void)
f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
sizeof(struct f2fs_inode_info), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
- if (!f2fs_inode_cachep)
- return -ENOMEM;
- return 0;
+ return f2fs_inode_cachep ? 0 : -ENOMEM;
}
static void destroy_inodecache(void)
@@ -4511,12 +5505,6 @@ static int __init init_f2fs_fs(void)
{
int err;
- if (PAGE_SIZE != F2FS_BLKSIZE) {
- printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
- PAGE_SIZE, F2FS_BLKSIZE);
- return -EINVAL;
- }
-
err = init_inodecache();
if (err)
goto fail;
@@ -4541,12 +5529,9 @@ static int __init init_f2fs_fs(void)
err = f2fs_init_sysfs();
if (err)
goto free_garbage_collection_cache;
- err = register_shrinker(&f2fs_shrinker_info);
+ err = f2fs_init_shrinker();
if (err)
goto free_sysfs;
- err = register_filesystem(&f2fs_fs_type);
- if (err)
- goto free_shrinker;
f2fs_create_root_stats();
err = f2fs_init_post_read_processing();
if (err)
@@ -4559,7 +5544,7 @@ static int __init init_f2fs_fs(void)
goto free_iostat;
err = f2fs_init_bioset();
if (err)
- goto free_bio_enrty_cache;
+ goto free_bio_entry_cache;
err = f2fs_init_compress_mempool();
if (err)
goto free_bioset;
@@ -4569,14 +5554,24 @@ static int __init init_f2fs_fs(void)
err = f2fs_create_casefold_cache();
if (err)
goto free_compress_cache;
+ err = f2fs_init_xattr_cache();
+ if (err)
+ goto free_casefold_cache;
+ err = register_filesystem(&f2fs_fs_type);
+ if (err)
+ goto free_xattr_cache;
return 0;
+free_xattr_cache:
+ f2fs_destroy_xattr_cache();
+free_casefold_cache:
+ f2fs_destroy_casefold_cache();
free_compress_cache:
f2fs_destroy_compress_cache();
free_compress_mempool:
f2fs_destroy_compress_mempool();
free_bioset:
f2fs_destroy_bioset();
-free_bio_enrty_cache:
+free_bio_entry_cache:
f2fs_destroy_bio_entry_cache();
free_iostat:
f2fs_destroy_iostat_processing();
@@ -4584,9 +5579,7 @@ free_post_read:
f2fs_destroy_post_read_processing();
free_root_stats:
f2fs_destroy_root_stats();
- unregister_filesystem(&f2fs_fs_type);
-free_shrinker:
- unregister_shrinker(&f2fs_shrinker_info);
+ f2fs_exit_shrinker();
free_sysfs:
f2fs_exit_sysfs();
free_garbage_collection_cache:
@@ -4609,6 +5602,8 @@ fail:
static void __exit exit_f2fs_fs(void)
{
+ unregister_filesystem(&f2fs_fs_type);
+ f2fs_destroy_xattr_cache();
f2fs_destroy_casefold_cache();
f2fs_destroy_compress_cache();
f2fs_destroy_compress_mempool();
@@ -4617,8 +5612,7 @@ static void __exit exit_f2fs_fs(void)
f2fs_destroy_iostat_processing();
f2fs_destroy_post_read_processing();
f2fs_destroy_root_stats();
- unregister_filesystem(&f2fs_fs_type);
- unregister_shrinker(&f2fs_shrinker_info);
+ f2fs_exit_shrinker();
f2fs_exit_sysfs();
f2fs_destroy_garbage_collection_cache();
f2fs_destroy_extent_cache();
@@ -4635,5 +5629,3 @@ module_exit(exit_f2fs_fs)
MODULE_AUTHOR("Samsung Electronics's Praesto Team");
MODULE_DESCRIPTION("Flash Friendly File System");
MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: crc32");
-
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 7d289249cd7e..c42f4f979d13 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -41,16 +41,32 @@ enum {
ATGC_INFO, /* struct atgc_management */
};
+static const char *gc_mode_names[MAX_GC_MODE] = {
+ "GC_NORMAL",
+ "GC_IDLE_CB",
+ "GC_IDLE_GREEDY",
+ "GC_IDLE_AT",
+ "GC_URGENT_HIGH",
+ "GC_URGENT_LOW",
+ "GC_URGENT_MID"
+};
+
struct f2fs_attr {
struct attribute attr;
- ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
- ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
- const char *, size_t);
+ ssize_t (*show)(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf);
+ ssize_t (*store)(struct f2fs_attr *a, struct f2fs_sb_info *sbi,
+ const char *buf, size_t len);
int struct_type;
int offset;
int id;
};
+struct f2fs_base_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct f2fs_base_attr *a, char *buf);
+ ssize_t (*store)(struct f2fs_base_attr *a, const char *buf, size_t len);
+};
+
static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf);
@@ -85,28 +101,28 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
static ssize_t dirty_segments_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- return sprintf(buf, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
(unsigned long long)(dirty_segments(sbi)));
}
static ssize_t free_segments_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- return sprintf(buf, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
(unsigned long long)(free_segments(sbi)));
}
static ssize_t ovp_segments_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- return sprintf(buf, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
(unsigned long long)(overprovision_segments(sbi)));
}
static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- return sprintf(buf, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
(unsigned long long)(sbi->kbytes_written +
((f2fs_get_sectors_written(sbi) -
sbi->sectors_written_start) >> 1)));
@@ -115,7 +131,61 @@ static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
static ssize_t sb_status_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- return sprintf(buf, "%lx\n", sbi->s_flag);
+ return sysfs_emit(buf, "%lx\n", sbi->s_flag);
+}
+
+static ssize_t cp_status_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ return sysfs_emit(buf, "%x\n", le32_to_cpu(F2FS_CKPT(sbi)->ckpt_flags));
+}
+
+static ssize_t pending_discard_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ if (!SM_I(sbi)->dcc_info)
+ return -EINVAL;
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)atomic_read(
+ &SM_I(sbi)->dcc_info->discard_cmd_cnt));
+}
+
+static ssize_t issued_discard_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ if (!SM_I(sbi)->dcc_info)
+ return -EINVAL;
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)atomic_read(
+ &SM_I(sbi)->dcc_info->issued_discard));
+}
+
+static ssize_t queued_discard_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ if (!SM_I(sbi)->dcc_info)
+ return -EINVAL;
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)atomic_read(
+ &SM_I(sbi)->dcc_info->queued_discard));
+}
+
+static ssize_t undiscard_blks_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ if (!SM_I(sbi)->dcc_info)
+ return -EINVAL;
+ return sysfs_emit(buf, "%u\n",
+ SM_I(sbi)->dcc_info->undiscard_blks);
+}
+
+static ssize_t atgc_enabled_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", sbi->am.atgc_enabled ? 1 : 0);
+}
+
+static ssize_t gc_mode_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", gc_mode_names[sbi->gc_mode]);
}
static ssize_t features_show(struct f2fs_attr *a,
@@ -124,57 +194,60 @@ static ssize_t features_show(struct f2fs_attr *a,
int len = 0;
if (f2fs_sb_has_encrypt(sbi))
- len += scnprintf(buf, PAGE_SIZE - len, "%s",
+ len += sysfs_emit_at(buf, len, "%s",
"encryption");
if (f2fs_sb_has_blkzoned(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "blkzoned");
if (f2fs_sb_has_extra_attr(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "extra_attr");
if (f2fs_sb_has_project_quota(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "projquota");
if (f2fs_sb_has_inode_chksum(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "inode_checksum");
if (f2fs_sb_has_flexible_inline_xattr(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "flexible_inline_xattr");
if (f2fs_sb_has_quota_ino(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "quota_ino");
if (f2fs_sb_has_inode_crtime(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "inode_crtime");
if (f2fs_sb_has_lost_found(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "lost_found");
if (f2fs_sb_has_verity(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "verity");
if (f2fs_sb_has_sb_chksum(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "sb_checksum");
if (f2fs_sb_has_casefold(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "casefold");
if (f2fs_sb_has_readonly(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "readonly");
if (f2fs_sb_has_compression(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "compression");
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ if (f2fs_sb_has_packed_ssa(sbi))
+ len += sysfs_emit_at(buf, len, "%s%s",
+ len ? ", " : "", "packed_ssa");
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "pin_file");
- len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
return len;
}
static ssize_t current_reserved_blocks_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- return sprintf(buf, "%u\n", sbi->current_reserved_blocks);
+ return sysfs_emit(buf, "%u\n", sbi->current_reserved_blocks);
}
static ssize_t unusable_show(struct f2fs_attr *a,
@@ -186,29 +259,51 @@ static ssize_t unusable_show(struct f2fs_attr *a,
unusable = sbi->unusable_block_count;
else
unusable = f2fs_get_unusable_blocks(sbi);
- return sprintf(buf, "%llu\n", (unsigned long long)unusable);
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)unusable);
}
static ssize_t encoding_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
struct super_block *sb = sbi->sb;
if (f2fs_sb_has_casefold(sbi))
- return sysfs_emit(buf, "%s (%d.%d.%d)\n",
- sb->s_encoding->charset,
+ return sysfs_emit(buf, "UTF-8 (%d.%d.%d)\n",
(sb->s_encoding->version >> 16) & 0xff,
(sb->s_encoding->version >> 8) & 0xff,
sb->s_encoding->version & 0xff);
#endif
- return sprintf(buf, "(none)");
+ return sysfs_emit(buf, "(none)\n");
+}
+
+static ssize_t encoding_flags_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ return sysfs_emit(buf, "%x\n",
+ le16_to_cpu(F2FS_RAW_SUPER(sbi)->s_encoding_flags));
+}
+
+static ssize_t effective_lookup_mode_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ switch (F2FS_OPTION(sbi).lookup_mode) {
+ case LOOKUP_PERF:
+ return sysfs_emit(buf, "perf\n");
+ case LOOKUP_COMPAT:
+ return sysfs_emit(buf, "compat\n");
+ case LOOKUP_AUTO:
+ if (sb_no_casefold_compat_fallback(sbi->sb))
+ return sysfs_emit(buf, "auto:perf\n");
+ return sysfs_emit(buf, "auto:compat\n");
+ }
+ return 0;
}
static ssize_t mounted_time_sec_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- return sprintf(buf, "%llu", SIT_I(sbi)->mounted_time);
+ return sysfs_emit(buf, "%llu\n", SIT_I(sbi)->mounted_time);
}
#ifdef CONFIG_F2FS_STAT_FS
@@ -217,7 +312,7 @@ static ssize_t moved_blocks_foreground_show(struct f2fs_attr *a,
{
struct f2fs_stat_info *si = F2FS_STAT(sbi);
- return sprintf(buf, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
(unsigned long long)(si->tot_blks -
(si->bg_data_blks + si->bg_node_blks)));
}
@@ -227,7 +322,7 @@ static ssize_t moved_blocks_background_show(struct f2fs_attr *a,
{
struct f2fs_stat_info *si = F2FS_STAT(sbi);
- return sprintf(buf, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
(unsigned long long)(si->bg_data_blks + si->bg_node_blks));
}
@@ -238,7 +333,7 @@ static ssize_t avg_vblocks_show(struct f2fs_attr *a,
si->dirty_count = dirty_segments(sbi);
f2fs_update_sit_info(sbi);
- return sprintf(buf, "%llu\n", (unsigned long long)(si->avg_vblocks));
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)(si->avg_vblocks));
}
#endif
@@ -266,35 +361,27 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
int hot_count = sbi->raw_super->hot_ext_count;
int len = 0, i;
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "cold file extension:\n");
+ len += sysfs_emit_at(buf, len, "cold file extension:\n");
for (i = 0; i < cold_count; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n",
- extlist[i]);
+ len += sysfs_emit_at(buf, len, "%s\n", extlist[i]);
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "hot file extension:\n");
+ len += sysfs_emit_at(buf, len, "hot file extension:\n");
for (i = cold_count; i < cold_count + hot_count; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n",
- extlist[i]);
+ len += sysfs_emit_at(buf, len, "%s\n", extlist[i]);
+
return len;
}
if (!strcmp(a->attr.name, "ckpt_thread_ioprio")) {
struct ckpt_req_control *cprc = &sbi->cprc_info;
- int len = 0;
int class = IOPRIO_PRIO_CLASS(cprc->ckpt_thread_ioprio);
- int data = IOPRIO_PRIO_DATA(cprc->ckpt_thread_ioprio);
+ int level = IOPRIO_PRIO_LEVEL(cprc->ckpt_thread_ioprio);
- if (class == IOPRIO_CLASS_RT)
- len += scnprintf(buf + len, PAGE_SIZE - len, "rt,");
- else if (class == IOPRIO_CLASS_BE)
- len += scnprintf(buf + len, PAGE_SIZE - len, "be,");
- else
+ if (class != IOPRIO_CLASS_RT && class != IOPRIO_CLASS_BE)
return -EINVAL;
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d\n", data);
- return len;
+ return sysfs_emit(buf, "%s,%d\n",
+ class == IOPRIO_CLASS_RT ? "rt" : "be", level);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -316,9 +403,34 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
sbi->gc_reclaimed_segs[sbi->gc_segment_mode]);
}
+ if (!strcmp(a->attr.name, "current_atomic_write")) {
+ s64 current_write = atomic64_read(&sbi->current_atomic_write);
+
+ return sysfs_emit(buf, "%lld\n", current_write);
+ }
+
+ if (!strcmp(a->attr.name, "peak_atomic_write"))
+ return sysfs_emit(buf, "%lld\n", sbi->peak_atomic_write);
+
+ if (!strcmp(a->attr.name, "committed_atomic_block"))
+ return sysfs_emit(buf, "%llu\n", sbi->committed_atomic_block);
+
+ if (!strcmp(a->attr.name, "revoked_atomic_block"))
+ return sysfs_emit(buf, "%llu\n", sbi->revoked_atomic_block);
+
+#ifdef CONFIG_F2FS_STAT_FS
+ if (!strcmp(a->attr.name, "cp_foreground_calls"))
+ return sysfs_emit(buf, "%d\n",
+ atomic_read(&sbi->cp_call_count[TOTAL_CALL]) -
+ atomic_read(&sbi->cp_call_count[BACKGROUND]));
+ if (!strcmp(a->attr.name, "cp_background_calls"))
+ return sysfs_emit(buf, "%d\n",
+ atomic_read(&sbi->cp_call_count[BACKGROUND]));
+#endif
+
ui = (unsigned int *)(ptr + a->offset);
- return sprintf(buf, "%u\n", *ui);
+ return sysfs_emit(buf, "%u\n", *ui);
}
static ssize_t __sbi_store(struct f2fs_attr *a,
@@ -355,7 +467,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
if (!strlen(name) || strlen(name) >= F2FS_EXTENSION_LEN)
return -EINVAL;
- down_write(&sbi->sb_lock);
+ f2fs_down_write(&sbi->sb_lock);
ret = f2fs_update_extension_list(sbi, name, hot, set);
if (ret)
@@ -365,7 +477,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
if (ret)
f2fs_update_extension_list(sbi, name, hot, !set);
out:
- up_write(&sbi->sb_lock);
+ f2fs_up_write(&sbi->sb_lock);
return ret ? ret : count;
}
@@ -373,7 +485,7 @@ out:
const char *name = strim((char *)buf);
struct ckpt_req_control *cprc = &sbi->cprc_info;
int class;
- long data;
+ long level;
int ret;
if (!strncmp(name, "rt,", 3))
@@ -384,13 +496,13 @@ out:
return -EINVAL;
name += 3;
- ret = kstrtol(name, 10, &data);
+ ret = kstrtol(name, 10, &level);
if (ret)
return ret;
- if (data >= IOPRIO_NR_LEVELS || data < 0)
+ if (level >= IOPRIO_NR_LEVELS || level < 0)
return -EINVAL;
- cprc->ckpt_thread_ioprio = IOPRIO_PRIO_VALUE(class, data);
+ cprc->ckpt_thread_ioprio = IOPRIO_PRIO_VALUE(class, level);
if (test_opt(sbi, MERGE_CHECKPOINT)) {
ret = set_task_ioprio(cprc->f2fs_issue_ckpt,
cprc->ckpt_thread_ioprio);
@@ -407,10 +519,16 @@ out:
if (ret < 0)
return ret;
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
- return -EINVAL;
- if (a->struct_type == FAULT_INFO_RATE && t >= UINT_MAX)
- return -EINVAL;
+ if (a->struct_type == FAULT_INFO_TYPE) {
+ if (f2fs_build_fault_attr(sbi, 0, t, FAULT_TYPE))
+ return -EINVAL;
+ return count;
+ }
+ if (a->struct_type == FAULT_INFO_RATE) {
+ if (f2fs_build_fault_attr(sbi, t, 0, FAULT_RATE))
+ return -EINVAL;
+ return count;
+ }
#endif
if (a->struct_type == RESERVED_BLOCKS) {
spin_lock(&sbi->stat_lock);
@@ -426,6 +544,17 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "discard_io_aware_gran")) {
+ if (t > MAX_PLIST_NUM)
+ return -EINVAL;
+ if (!f2fs_block_unit_discard(sbi))
+ return -EINVAL;
+ if (t == *ui)
+ return count;
+ *ui = t;
+ return count;
+ }
+
if (!strcmp(a->attr.name, "discard_granularity")) {
if (t == 0 || t > MAX_PLIST_NUM)
return -EINVAL;
@@ -437,13 +566,38 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "max_ordered_discard")) {
+ if (t == 0 || t > MAX_PLIST_NUM)
+ return -EINVAL;
+ if (!f2fs_block_unit_discard(sbi))
+ return -EINVAL;
+ *ui = t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "discard_urgent_util")) {
+ if (t > 100)
+ return -EINVAL;
+ *ui = t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "discard_io_aware")) {
+ if (t >= DPOLICY_IO_AWARE_MAX)
+ return -EINVAL;
+ *ui = t;
+ return count;
+ }
+
if (!strcmp(a->attr.name, "migration_granularity")) {
- if (t == 0 || t > sbi->segs_per_sec)
+ if (t == 0 || t > SEGS_PER_SEC(sbi))
return -EINVAL;
}
- if (!strcmp(a->attr.name, "trim_sections"))
- return -EINVAL;
+ if (!strcmp(a->attr.name, "migration_window_granularity")) {
+ if (t == 0 || t > SEGS_PER_SEC(sbi))
+ return -EINVAL;
+ }
if (!strcmp(a->attr.name, "gc_urgent")) {
if (t == 0) {
@@ -451,13 +605,20 @@ out:
} else if (t == 1) {
sbi->gc_mode = GC_URGENT_HIGH;
if (sbi->gc_thread) {
- sbi->gc_thread->gc_wake = 1;
+ sbi->gc_thread->gc_wake = true;
wake_up_interruptible_all(
&sbi->gc_thread->gc_wait_queue_head);
wake_up_discard_thread(sbi, true);
}
} else if (t == 2) {
sbi->gc_mode = GC_URGENT_LOW;
+ } else if (t == 3) {
+ sbi->gc_mode = GC_URGENT_MID;
+ if (sbi->gc_thread) {
+ sbi->gc_thread->gc_wake = true;
+ wake_up_interruptible_all(
+ &sbi->gc_thread->gc_wait_queue_head);
+ }
} else {
return -EINVAL;
}
@@ -471,13 +632,42 @@ out:
} else if (t == GC_IDLE_AT) {
if (!sbi->am.atgc_enabled)
return -EINVAL;
- sbi->gc_mode = GC_AT;
+ sbi->gc_mode = GC_IDLE_AT;
} else {
sbi->gc_mode = GC_NORMAL;
}
return count;
}
+ if (!strcmp(a->attr.name, "gc_remaining_trials")) {
+ spin_lock(&sbi->gc_remaining_trials_lock);
+ sbi->gc_remaining_trials = t;
+ spin_unlock(&sbi->gc_remaining_trials_lock);
+
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "gc_no_zoned_gc_percent")) {
+ if (t > 100)
+ return -EINVAL;
+ *ui = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "gc_boost_zoned_gc_percent")) {
+ if (t > 100)
+ return -EINVAL;
+ *ui = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "gc_valid_thresh_ratio")) {
+ if (t > 100)
+ return -EINVAL;
+ *ui = (unsigned int)t;
+ return count;
+ }
+
#ifdef CONFIG_F2FS_IOSTAT
if (!strcmp(a->attr.name, "iostat_enable")) {
sbi->iostat_enable = !!t;
@@ -489,9 +679,18 @@ out:
if (!strcmp(a->attr.name, "iostat_period_ms")) {
if (t < MIN_IOSTAT_PERIOD_MS || t > MAX_IOSTAT_PERIOD_MS)
return -EINVAL;
- spin_lock(&sbi->iostat_lock);
+ spin_lock_irq(&sbi->iostat_lock);
sbi->iostat_period_ms = (unsigned int)t;
- spin_unlock(&sbi->iostat_lock);
+ spin_unlock_irq(&sbi->iostat_lock);
+ return count;
+ }
+#endif
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (!strcmp(a->attr.name, "blkzone_alloc_policy")) {
+ if (t < BLKZONE_ALLOC_PRIOR_SEQ || t > BLKZONE_ALLOC_PRIOR_CONV)
+ return -EINVAL;
+ sbi->blkzone_alloc_policy = t;
return count;
}
#endif
@@ -512,6 +711,20 @@ out:
sbi->compr_new_inode = 0;
return count;
}
+
+ if (!strcmp(a->attr.name, "compress_percent")) {
+ if (t == 0 || t > 100)
+ return -EINVAL;
+ *ui = t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "compress_watermark")) {
+ if (t == 0 || t > 100)
+ return -EINVAL;
+ *ui = t;
+ return count;
+ }
#endif
if (!strcmp(a->attr.name, "atgc_candidate_ratio")) {
@@ -536,6 +749,13 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "gc_pin_file_threshold")) {
+ if (t > MAX_GC_FAILED_PINNED_FILES)
+ return -EINVAL;
+ sbi->gc_pin_file_threshold = t;
+ return count;
+ }
+
if (!strcmp(a->attr.name, "gc_reclaimed_segments")) {
if (t != 0)
return -EINVAL;
@@ -567,6 +787,125 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "peak_atomic_write")) {
+ if (t != 0)
+ return -EINVAL;
+ sbi->peak_atomic_write = 0;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "committed_atomic_block")) {
+ if (t != 0)
+ return -EINVAL;
+ sbi->committed_atomic_block = 0;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "revoked_atomic_block")) {
+ if (t != 0)
+ return -EINVAL;
+ sbi->revoked_atomic_block = 0;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "readdir_ra")) {
+ sbi->readdir_ra = !!t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "hot_data_age_threshold")) {
+ if (t == 0 || t >= sbi->warm_data_age_threshold)
+ return -EINVAL;
+ if (t == *ui)
+ return count;
+ *ui = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "warm_data_age_threshold")) {
+ if (t <= sbi->hot_data_age_threshold)
+ return -EINVAL;
+ if (t == *ui)
+ return count;
+ *ui = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "last_age_weight")) {
+ if (t > 100)
+ return -EINVAL;
+ if (t == *ui)
+ return count;
+ *ui = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "max_read_extent_count")) {
+ if (t > UINT_MAX)
+ return -EINVAL;
+ *ui = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "ipu_policy")) {
+ if (t >= BIT(F2FS_IPU_MAX))
+ return -EINVAL;
+ /* allow F2FS_IPU_NOCACHE only for IPU in the pinned file */
+ if (f2fs_lfs_mode(sbi) && (t & ~BIT(F2FS_IPU_NOCACHE)))
+ return -EINVAL;
+ SM_I(sbi)->ipu_policy = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "dir_level")) {
+ if (t > MAX_DIR_HASH_DEPTH)
+ return -EINVAL;
+ sbi->dir_level = t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "reserved_pin_section")) {
+ if (t > GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))
+ return -EINVAL;
+ *ui = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "gc_boost_gc_multiple")) {
+ if (t < 1 || t > SEGS_PER_SEC(sbi))
+ return -EINVAL;
+ sbi->gc_thread->boost_gc_multiple = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "gc_boost_gc_greedy")) {
+ if (t > GC_GREEDY)
+ return -EINVAL;
+ sbi->gc_thread->boost_gc_greedy = (unsigned int)t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "bggc_io_aware")) {
+ if (t < AWARE_ALL_IO || t > AWARE_NONE)
+ return -EINVAL;
+ sbi->bggc_io_aware = t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "allocate_section_hint")) {
+ if (t < 0 || t > MAIN_SECS(sbi))
+ return -EINVAL;
+ sbi->allocate_section_hint = t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "allocate_section_policy")) {
+ if (t < ALLOCATE_FORWARD_NOHINT || t > ALLOCATE_FORWARD_FROM_HINT)
+ return -EINVAL;
+ sbi->allocate_section_policy = t;
+ return count;
+ }
+
*ui = (unsigned int)t;
return count;
@@ -618,6 +957,25 @@ static void f2fs_sb_release(struct kobject *kobj)
complete(&sbi->s_kobj_unregister);
}
+static ssize_t f2fs_base_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct f2fs_base_attr *a = container_of(attr,
+ struct f2fs_base_attr, attr);
+
+ return a->show ? a->show(a, buf) : 0;
+}
+
+static ssize_t f2fs_base_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct f2fs_base_attr *a = container_of(attr,
+ struct f2fs_base_attr, attr);
+
+ return a->store ? a->store(a, buf, len) : 0;
+}
+
/*
* Note that there are three feature list entries:
* 1) /sys/fs/f2fs/features
@@ -636,24 +994,56 @@ static void f2fs_sb_release(struct kobject *kobj)
* please add new on-disk feature in this list only.
* - ref. F2FS_SB_FEATURE_RO_ATTR()
*/
-static ssize_t f2fs_feature_show(struct f2fs_attr *a,
- struct f2fs_sb_info *sbi, char *buf)
+static ssize_t f2fs_feature_show(struct f2fs_base_attr *a, char *buf)
{
- return sprintf(buf, "supported\n");
+ return sysfs_emit(buf, "supported\n");
}
#define F2FS_FEATURE_RO_ATTR(_name) \
-static struct f2fs_attr f2fs_attr_##_name = { \
+static struct f2fs_base_attr f2fs_base_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = 0444 }, \
.show = f2fs_feature_show, \
}
+static ssize_t f2fs_tune_show(struct f2fs_base_attr *a, char *buf)
+{
+ unsigned int res = 0;
+
+ if (!strcmp(a->attr.name, "reclaim_caches_kb"))
+ res = f2fs_donate_files();
+
+ return sysfs_emit(buf, "%u\n", res);
+}
+
+static ssize_t f2fs_tune_store(struct f2fs_base_attr *a,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+ int ret;
+
+ ret = kstrtoul(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+
+ if (!strcmp(a->attr.name, "reclaim_caches_kb"))
+ f2fs_reclaim_caches(t);
+
+ return count;
+}
+
+#define F2FS_TUNE_RW_ATTR(_name) \
+static struct f2fs_base_attr f2fs_base_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = 0644 }, \
+ .show = f2fs_tune_show, \
+ .store = f2fs_tune_store, \
+}
+
static ssize_t f2fs_sb_feature_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
if (F2FS_HAS_FEATURE(sbi, a->id))
- return sprintf(buf, "supported\n");
- return sprintf(buf, "unsupported\n");
+ return sysfs_emit(buf, "supported\n");
+ return sysfs_emit(buf, "unsupported\n");
}
#define F2FS_SB_FEATURE_RO_ATTR(_name, _feat) \
@@ -672,6 +1062,11 @@ static struct f2fs_attr f2fs_attr_##_name = { \
.offset = _offset \
}
+#define F2FS_RO_ATTR(struct_type, struct_name, name, elname) \
+ F2FS_ATTR_OFFSET(struct_type, name, 0444, \
+ f2fs_sbi_show, NULL, \
+ offsetof(struct struct_name, elname))
+
#define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
F2FS_ATTR_OFFSET(struct_type, name, 0644, \
f2fs_sbi_show, f2fs_sbi_store, \
@@ -680,60 +1075,177 @@ static struct f2fs_attr f2fs_attr_##_name = { \
#define F2FS_GENERAL_RO_ATTR(name) \
static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
-#define F2FS_STAT_ATTR(_struct_type, _struct_name, _name, _elname) \
-static struct f2fs_attr f2fs_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = 0444 }, \
- .show = f2fs_sbi_show, \
- .struct_type = _struct_type, \
- .offset = offsetof(struct _struct_name, _elname), \
-}
+#ifdef CONFIG_F2FS_STAT_FS
+#define STAT_INFO_RO_ATTR(name, elname) \
+ F2FS_RO_ATTR(STAT_INFO, f2fs_stat_info, name, elname)
+#endif
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_urgent_sleep_time,
- urgent_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle, gc_mode);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_urgent, gc_mode);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
-F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_small_discards, max_discards);
-F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_granularity, discard_granularity);
-F2FS_RW_ATTR(RESERVED_BLOCKS, f2fs_sb_info, reserved_blocks, reserved_blocks);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_seq_blocks, min_seq_blocks);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_hot_blocks, min_hot_blocks);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ssr_sections, min_ssr_sections);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, migration_granularity, migration_granularity);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, discard_idle_interval,
- interval_time[DISCARD_TIME]);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle_interval, interval_time[GC_TIME]);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info,
- umount_discard_timeout, interval_time[UMOUNT_DISCARD_TIMEOUT]);
-#ifdef CONFIG_F2FS_IOSTAT
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_period_ms, iostat_period_ms);
+#define GC_THREAD_RW_ATTR(name, elname) \
+ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, name, elname)
+
+#define SM_INFO_RW_ATTR(name, elname) \
+ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, name, elname)
+
+#define SM_INFO_GENERAL_RW_ATTR(elname) \
+ SM_INFO_RW_ATTR(elname, elname)
+
+#define DCC_INFO_RW_ATTR(name, elname) \
+ F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, name, elname)
+
+#define DCC_INFO_GENERAL_RW_ATTR(elname) \
+ DCC_INFO_RW_ATTR(elname, elname)
+
+#define NM_INFO_RW_ATTR(name, elname) \
+ F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, name, elname)
+
+#define NM_INFO_GENERAL_RW_ATTR(elname) \
+ NM_INFO_RW_ATTR(elname, elname)
+
+#define F2FS_SBI_RW_ATTR(name, elname) \
+ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, name, elname)
+
+#define F2FS_SBI_GENERAL_RW_ATTR(elname) \
+ F2FS_SBI_RW_ATTR(elname, elname)
+
+#define F2FS_SBI_GENERAL_RO_ATTR(elname) \
+ F2FS_RO_ATTR(F2FS_SBI, f2fs_sb_info, elname, elname)
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+#define FAULT_INFO_GENERAL_RW_ATTR(type, elname) \
+ F2FS_RW_ATTR(type, f2fs_fault_info, elname, elname)
#endif
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_io_bytes, max_io_bytes);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
+
+#define RESERVED_BLOCKS_GENERAL_RW_ATTR(elname) \
+ F2FS_RW_ATTR(RESERVED_BLOCKS, f2fs_sb_info, elname, elname)
+
+#define CPRC_INFO_GENERAL_RW_ATTR(elname) \
+ F2FS_RW_ATTR(CPRC_INFO, ckpt_req_control, elname, elname)
+
+#define ATGC_INFO_RW_ATTR(name, elname) \
+ F2FS_RW_ATTR(ATGC_INFO, atgc_management, name, elname)
+
+/* GC_THREAD ATTR */
+GC_THREAD_RW_ATTR(gc_urgent_sleep_time, urgent_sleep_time);
+GC_THREAD_RW_ATTR(gc_min_sleep_time, min_sleep_time);
+GC_THREAD_RW_ATTR(gc_max_sleep_time, max_sleep_time);
+GC_THREAD_RW_ATTR(gc_no_gc_sleep_time, no_gc_sleep_time);
+GC_THREAD_RW_ATTR(gc_no_zoned_gc_percent, no_zoned_gc_percent);
+GC_THREAD_RW_ATTR(gc_boost_zoned_gc_percent, boost_zoned_gc_percent);
+GC_THREAD_RW_ATTR(gc_valid_thresh_ratio, valid_thresh_ratio);
+GC_THREAD_RW_ATTR(gc_boost_gc_multiple, boost_gc_multiple);
+GC_THREAD_RW_ATTR(gc_boost_gc_greedy, boost_gc_greedy);
+
+/* SM_INFO ATTR */
+SM_INFO_RW_ATTR(reclaim_segments, rec_prefree_segments);
+SM_INFO_GENERAL_RW_ATTR(ipu_policy);
+SM_INFO_GENERAL_RW_ATTR(min_ipu_util);
+SM_INFO_GENERAL_RW_ATTR(min_fsync_blocks);
+SM_INFO_GENERAL_RW_ATTR(min_seq_blocks);
+SM_INFO_GENERAL_RW_ATTR(min_hot_blocks);
+SM_INFO_GENERAL_RW_ATTR(min_ssr_sections);
+SM_INFO_GENERAL_RW_ATTR(reserved_segments);
+
+/* DCC_INFO ATTR */
+DCC_INFO_RW_ATTR(max_small_discards, max_discards);
+DCC_INFO_GENERAL_RW_ATTR(max_discard_request);
+DCC_INFO_GENERAL_RW_ATTR(min_discard_issue_time);
+DCC_INFO_GENERAL_RW_ATTR(mid_discard_issue_time);
+DCC_INFO_GENERAL_RW_ATTR(max_discard_issue_time);
+DCC_INFO_GENERAL_RW_ATTR(discard_io_aware_gran);
+DCC_INFO_GENERAL_RW_ATTR(discard_urgent_util);
+DCC_INFO_GENERAL_RW_ATTR(discard_granularity);
+DCC_INFO_GENERAL_RW_ATTR(max_ordered_discard);
+DCC_INFO_GENERAL_RW_ATTR(discard_io_aware);
+
+/* NM_INFO ATTR */
+NM_INFO_RW_ATTR(max_roll_forward_node_blocks, max_rf_node_blocks);
+NM_INFO_GENERAL_RW_ATTR(ram_thresh);
+NM_INFO_GENERAL_RW_ATTR(ra_nid_pages);
+NM_INFO_GENERAL_RW_ATTR(dirty_nats_ratio);
+
+/* F2FS_SBI ATTR */
F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list);
+F2FS_SBI_RW_ATTR(gc_idle, gc_mode);
+F2FS_SBI_RW_ATTR(gc_urgent, gc_mode);
+F2FS_SBI_RW_ATTR(cp_interval, interval_time[CP_TIME]);
+F2FS_SBI_RW_ATTR(idle_interval, interval_time[REQ_TIME]);
+F2FS_SBI_RW_ATTR(discard_idle_interval, interval_time[DISCARD_TIME]);
+F2FS_SBI_RW_ATTR(gc_idle_interval, interval_time[GC_TIME]);
+F2FS_SBI_RW_ATTR(umount_discard_timeout, interval_time[UMOUNT_DISCARD_TIMEOUT]);
+F2FS_SBI_RW_ATTR(gc_pin_file_thresh, gc_pin_file_threshold);
+F2FS_SBI_RW_ATTR(gc_reclaimed_segments, gc_reclaimed_segs);
+F2FS_SBI_GENERAL_RW_ATTR(max_victim_search);
+F2FS_SBI_GENERAL_RW_ATTR(migration_granularity);
+F2FS_SBI_GENERAL_RW_ATTR(migration_window_granularity);
+F2FS_SBI_GENERAL_RW_ATTR(dir_level);
+F2FS_SBI_GENERAL_RW_ATTR(allocate_section_hint);
+F2FS_SBI_GENERAL_RW_ATTR(allocate_section_policy);
+#ifdef CONFIG_F2FS_IOSTAT
+F2FS_SBI_GENERAL_RW_ATTR(iostat_enable);
+F2FS_SBI_GENERAL_RW_ATTR(iostat_period_ms);
+#endif
+F2FS_SBI_GENERAL_RW_ATTR(readdir_ra);
+F2FS_SBI_GENERAL_RW_ATTR(max_io_bytes);
+F2FS_SBI_GENERAL_RW_ATTR(data_io_flag);
+F2FS_SBI_GENERAL_RW_ATTR(node_io_flag);
+F2FS_SBI_GENERAL_RW_ATTR(gc_remaining_trials);
+F2FS_SBI_GENERAL_RW_ATTR(seq_file_ra_mul);
+F2FS_SBI_GENERAL_RW_ATTR(gc_segment_mode);
+F2FS_SBI_GENERAL_RW_ATTR(max_fragment_chunk);
+F2FS_SBI_GENERAL_RW_ATTR(max_fragment_hole);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+F2FS_SBI_GENERAL_RW_ATTR(compr_written_block);
+F2FS_SBI_GENERAL_RW_ATTR(compr_saved_block);
+F2FS_SBI_GENERAL_RW_ATTR(compr_new_inode);
+F2FS_SBI_GENERAL_RW_ATTR(compress_percent);
+F2FS_SBI_GENERAL_RW_ATTR(compress_watermark);
+#endif
+/* atomic write */
+F2FS_SBI_GENERAL_RO_ATTR(current_atomic_write);
+F2FS_SBI_GENERAL_RW_ATTR(peak_atomic_write);
+F2FS_SBI_GENERAL_RW_ATTR(committed_atomic_block);
+F2FS_SBI_GENERAL_RW_ATTR(revoked_atomic_block);
+/* block age extent cache */
+F2FS_SBI_GENERAL_RW_ATTR(hot_data_age_threshold);
+F2FS_SBI_GENERAL_RW_ATTR(warm_data_age_threshold);
+F2FS_SBI_GENERAL_RW_ATTR(last_age_weight);
+/* read extent cache */
+F2FS_SBI_GENERAL_RW_ATTR(max_read_extent_count);
+#ifdef CONFIG_BLK_DEV_ZONED
+F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
+F2FS_SBI_GENERAL_RO_ATTR(max_open_zones);
+F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy);
+#endif
+F2FS_SBI_GENERAL_RW_ATTR(carve_out);
+F2FS_SBI_GENERAL_RW_ATTR(reserved_pin_section);
+F2FS_SBI_GENERAL_RW_ATTR(bggc_io_aware);
+
+/* STAT_INFO ATTR */
+#ifdef CONFIG_F2FS_STAT_FS
+STAT_INFO_RO_ATTR(cp_foreground_calls, cp_call_count[FOREGROUND]);
+STAT_INFO_RO_ATTR(cp_background_calls, cp_call_count[BACKGROUND]);
+STAT_INFO_RO_ATTR(gc_foreground_calls, gc_call_count[FOREGROUND]);
+STAT_INFO_RO_ATTR(gc_background_calls, gc_call_count[BACKGROUND]);
+#endif
+
+/* FAULT_INFO ATTR */
#ifdef CONFIG_F2FS_FAULT_INJECTION
-F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
-F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
+FAULT_INFO_GENERAL_RW_ATTR(FAULT_INFO_RATE, inject_rate);
+FAULT_INFO_GENERAL_RW_ATTR(FAULT_INFO_TYPE, inject_type);
#endif
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, data_io_flag, data_io_flag);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, node_io_flag, node_io_flag);
-F2FS_RW_ATTR(CPRC_INFO, ckpt_req_control, ckpt_thread_ioprio, ckpt_thread_ioprio);
+
+/* RESERVED_BLOCKS ATTR */
+RESERVED_BLOCKS_GENERAL_RW_ATTR(reserved_blocks);
+
+/* CPRC_INFO ATTR */
+CPRC_INFO_GENERAL_RW_ATTR(ckpt_thread_ioprio);
+
+/* ATGC_INFO ATTR */
+ATGC_INFO_RW_ATTR(atgc_candidate_ratio, candidate_ratio);
+ATGC_INFO_RW_ATTR(atgc_candidate_count, max_candidate_count);
+ATGC_INFO_RW_ATTR(atgc_age_weight, age_weight);
+ATGC_INFO_RW_ATTR(atgc_age_threshold, age_threshold);
+
F2FS_GENERAL_RO_ATTR(dirty_segments);
F2FS_GENERAL_RO_ATTR(free_segments);
F2FS_GENERAL_RO_ATTR(ovp_segments);
@@ -742,13 +1254,14 @@ F2FS_GENERAL_RO_ATTR(features);
F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
F2FS_GENERAL_RO_ATTR(unusable);
F2FS_GENERAL_RO_ATTR(encoding);
+F2FS_GENERAL_RO_ATTR(encoding_flags);
+F2FS_GENERAL_RO_ATTR(effective_lookup_mode);
F2FS_GENERAL_RO_ATTR(mounted_time_sec);
F2FS_GENERAL_RO_ATTR(main_blkaddr);
+F2FS_GENERAL_RO_ATTR(pending_discard);
+F2FS_GENERAL_RO_ATTR(atgc_enabled);
+F2FS_GENERAL_RO_ATTR(gc_mode);
#ifdef CONFIG_F2FS_STAT_FS
-F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_foreground_calls, cp_count);
-F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_background_calls, bg_cp_count);
-F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, gc_foreground_calls, call_count);
-F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, gc_background_calls, bg_gc);
F2FS_GENERAL_RO_ATTR(moved_blocks_background);
F2FS_GENERAL_RO_ATTR(moved_blocks_foreground);
F2FS_GENERAL_RO_ATTR(avg_vblocks);
@@ -757,7 +1270,7 @@ F2FS_GENERAL_RO_ATTR(avg_vblocks);
#ifdef CONFIG_FS_ENCRYPTION
F2FS_FEATURE_RO_ATTR(encryption);
F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
F2FS_FEATURE_RO_ATTR(encrypted_casefold);
#endif
#endif /* CONFIG_FS_ENCRYPTION */
@@ -776,29 +1289,18 @@ F2FS_FEATURE_RO_ATTR(lost_found);
F2FS_FEATURE_RO_ATTR(verity);
#endif
F2FS_FEATURE_RO_ATTR(sb_checksum);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
F2FS_FEATURE_RO_ATTR(casefold);
#endif
F2FS_FEATURE_RO_ATTR(readonly);
#ifdef CONFIG_F2FS_FS_COMPRESSION
F2FS_FEATURE_RO_ATTR(compression);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_written_block, compr_written_block);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_saved_block, compr_saved_block);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_new_inode, compr_new_inode);
#endif
F2FS_FEATURE_RO_ATTR(pin_file);
-
-/* For ATGC */
-F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_candidate_ratio, candidate_ratio);
-F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_candidate_count, max_candidate_count);
-F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_age_weight, age_weight);
-F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_age_threshold, age_threshold);
-
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, seq_file_ra_mul, seq_file_ra_mul);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_segment_mode, gc_segment_mode);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_reclaimed_segments, gc_reclaimed_segs);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_fragment_chunk, max_fragment_chunk);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_fragment_hole, max_fragment_hole);
+#ifdef CONFIG_UNICODE
+F2FS_FEATURE_RO_ATTR(linear_lookup);
+#endif
+F2FS_FEATURE_RO_ATTR(packed_ssa);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -806,30 +1308,48 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_min_sleep_time),
ATTR_LIST(gc_max_sleep_time),
ATTR_LIST(gc_no_gc_sleep_time),
+ ATTR_LIST(gc_no_zoned_gc_percent),
+ ATTR_LIST(gc_boost_zoned_gc_percent),
+ ATTR_LIST(gc_valid_thresh_ratio),
+ ATTR_LIST(gc_boost_gc_multiple),
+ ATTR_LIST(gc_boost_gc_greedy),
ATTR_LIST(gc_idle),
ATTR_LIST(gc_urgent),
ATTR_LIST(reclaim_segments),
ATTR_LIST(main_blkaddr),
ATTR_LIST(max_small_discards),
+ ATTR_LIST(max_discard_request),
+ ATTR_LIST(min_discard_issue_time),
+ ATTR_LIST(mid_discard_issue_time),
+ ATTR_LIST(max_discard_issue_time),
+ ATTR_LIST(discard_io_aware_gran),
+ ATTR_LIST(discard_urgent_util),
ATTR_LIST(discard_granularity),
- ATTR_LIST(batched_trim_sections),
+ ATTR_LIST(max_ordered_discard),
+ ATTR_LIST(discard_io_aware),
+ ATTR_LIST(pending_discard),
+ ATTR_LIST(gc_mode),
ATTR_LIST(ipu_policy),
ATTR_LIST(min_ipu_util),
ATTR_LIST(min_fsync_blocks),
ATTR_LIST(min_seq_blocks),
ATTR_LIST(min_hot_blocks),
ATTR_LIST(min_ssr_sections),
+ ATTR_LIST(reserved_segments),
ATTR_LIST(max_victim_search),
ATTR_LIST(migration_granularity),
+ ATTR_LIST(migration_window_granularity),
ATTR_LIST(dir_level),
ATTR_LIST(ram_thresh),
ATTR_LIST(ra_nid_pages),
ATTR_LIST(dirty_nats_ratio),
+ ATTR_LIST(max_roll_forward_node_blocks),
ATTR_LIST(cp_interval),
ATTR_LIST(idle_interval),
ATTR_LIST(discard_idle_interval),
ATTR_LIST(gc_idle_interval),
ATTR_LIST(umount_discard_timeout),
+ ATTR_LIST(bggc_io_aware),
#ifdef CONFIG_F2FS_IOSTAT
ATTR_LIST(iostat_enable),
ATTR_LIST(iostat_period_ms),
@@ -844,6 +1364,7 @@ static struct attribute *f2fs_attrs[] = {
#endif
ATTR_LIST(data_io_flag),
ATTR_LIST(node_io_flag),
+ ATTR_LIST(gc_remaining_trials),
ATTR_LIST(ckpt_thread_ioprio),
ATTR_LIST(dirty_segments),
ATTR_LIST(free_segments),
@@ -854,6 +1375,8 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(reserved_blocks),
ATTR_LIST(current_reserved_blocks),
ATTR_LIST(encoding),
+ ATTR_LIST(encoding_flags),
+ ATTR_LIST(effective_lookup_mode),
ATTR_LIST(mounted_time_sec),
#ifdef CONFIG_F2FS_STAT_FS
ATTR_LIST(cp_foreground_calls),
@@ -864,63 +1387,97 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(moved_blocks_background),
ATTR_LIST(avg_vblocks),
#endif
+#ifdef CONFIG_BLK_DEV_ZONED
+ ATTR_LIST(unusable_blocks_per_sec),
+ ATTR_LIST(max_open_zones),
+ ATTR_LIST(blkzone_alloc_policy),
+#endif
#ifdef CONFIG_F2FS_FS_COMPRESSION
ATTR_LIST(compr_written_block),
ATTR_LIST(compr_saved_block),
ATTR_LIST(compr_new_inode),
+ ATTR_LIST(compress_percent),
+ ATTR_LIST(compress_watermark),
#endif
/* For ATGC */
ATTR_LIST(atgc_candidate_ratio),
ATTR_LIST(atgc_candidate_count),
ATTR_LIST(atgc_age_weight),
ATTR_LIST(atgc_age_threshold),
+ ATTR_LIST(atgc_enabled),
ATTR_LIST(seq_file_ra_mul),
ATTR_LIST(gc_segment_mode),
ATTR_LIST(gc_reclaimed_segments),
ATTR_LIST(max_fragment_chunk),
ATTR_LIST(max_fragment_hole),
+ ATTR_LIST(current_atomic_write),
+ ATTR_LIST(peak_atomic_write),
+ ATTR_LIST(committed_atomic_block),
+ ATTR_LIST(revoked_atomic_block),
+ ATTR_LIST(hot_data_age_threshold),
+ ATTR_LIST(warm_data_age_threshold),
+ ATTR_LIST(last_age_weight),
+ ATTR_LIST(max_read_extent_count),
+ ATTR_LIST(carve_out),
+ ATTR_LIST(reserved_pin_section),
+ ATTR_LIST(allocate_section_hint),
+ ATTR_LIST(allocate_section_policy),
NULL,
};
ATTRIBUTE_GROUPS(f2fs);
+#define BASE_ATTR_LIST(name) (&f2fs_base_attr_##name.attr)
static struct attribute *f2fs_feat_attrs[] = {
#ifdef CONFIG_FS_ENCRYPTION
- ATTR_LIST(encryption),
- ATTR_LIST(test_dummy_encryption_v2),
-#ifdef CONFIG_UNICODE
- ATTR_LIST(encrypted_casefold),
+ BASE_ATTR_LIST(encryption),
+ BASE_ATTR_LIST(test_dummy_encryption_v2),
+#if IS_ENABLED(CONFIG_UNICODE)
+ BASE_ATTR_LIST(encrypted_casefold),
#endif
#endif /* CONFIG_FS_ENCRYPTION */
#ifdef CONFIG_BLK_DEV_ZONED
- ATTR_LIST(block_zoned),
+ BASE_ATTR_LIST(block_zoned),
#endif
- ATTR_LIST(atomic_write),
- ATTR_LIST(extra_attr),
- ATTR_LIST(project_quota),
- ATTR_LIST(inode_checksum),
- ATTR_LIST(flexible_inline_xattr),
- ATTR_LIST(quota_ino),
- ATTR_LIST(inode_crtime),
- ATTR_LIST(lost_found),
+ BASE_ATTR_LIST(atomic_write),
+ BASE_ATTR_LIST(extra_attr),
+ BASE_ATTR_LIST(project_quota),
+ BASE_ATTR_LIST(inode_checksum),
+ BASE_ATTR_LIST(flexible_inline_xattr),
+ BASE_ATTR_LIST(quota_ino),
+ BASE_ATTR_LIST(inode_crtime),
+ BASE_ATTR_LIST(lost_found),
#ifdef CONFIG_FS_VERITY
- ATTR_LIST(verity),
+ BASE_ATTR_LIST(verity),
#endif
- ATTR_LIST(sb_checksum),
-#ifdef CONFIG_UNICODE
- ATTR_LIST(casefold),
+ BASE_ATTR_LIST(sb_checksum),
+#if IS_ENABLED(CONFIG_UNICODE)
+ BASE_ATTR_LIST(casefold),
#endif
- ATTR_LIST(readonly),
+ BASE_ATTR_LIST(readonly),
#ifdef CONFIG_F2FS_FS_COMPRESSION
- ATTR_LIST(compression),
+ BASE_ATTR_LIST(compression),
+#endif
+ BASE_ATTR_LIST(pin_file),
+#ifdef CONFIG_UNICODE
+ BASE_ATTR_LIST(linear_lookup),
#endif
- ATTR_LIST(pin_file),
+ BASE_ATTR_LIST(packed_ssa),
NULL,
};
ATTRIBUTE_GROUPS(f2fs_feat);
F2FS_GENERAL_RO_ATTR(sb_status);
+F2FS_GENERAL_RO_ATTR(cp_status);
+F2FS_GENERAL_RO_ATTR(issued_discard);
+F2FS_GENERAL_RO_ATTR(queued_discard);
+F2FS_GENERAL_RO_ATTR(undiscard_blks);
+
static struct attribute *f2fs_stat_attrs[] = {
ATTR_LIST(sb_status),
+ ATTR_LIST(cp_status),
+ ATTR_LIST(issued_discard),
+ ATTR_LIST(queued_discard),
+ ATTR_LIST(undiscard_blks),
NULL,
};
ATTRIBUTE_GROUPS(f2fs_stat);
@@ -939,6 +1496,8 @@ F2FS_SB_FEATURE_RO_ATTR(sb_checksum, SB_CHKSUM);
F2FS_SB_FEATURE_RO_ATTR(casefold, CASEFOLD);
F2FS_SB_FEATURE_RO_ATTR(compression, COMPRESSION);
F2FS_SB_FEATURE_RO_ATTR(readonly, RO);
+F2FS_SB_FEATURE_RO_ATTR(device_alias, DEVICE_ALIAS);
+F2FS_SB_FEATURE_RO_ATTR(packed_ssa, PACKED_SSA);
static struct attribute *f2fs_sb_feat_attrs[] = {
ATTR_LIST(sb_encryption),
@@ -955,22 +1514,32 @@ static struct attribute *f2fs_sb_feat_attrs[] = {
ATTR_LIST(sb_casefold),
ATTR_LIST(sb_compression),
ATTR_LIST(sb_readonly),
+ ATTR_LIST(sb_device_alias),
+ ATTR_LIST(sb_packed_ssa),
NULL,
};
ATTRIBUTE_GROUPS(f2fs_sb_feat);
+F2FS_TUNE_RW_ATTR(reclaim_caches_kb);
+
+static struct attribute *f2fs_tune_attrs[] = {
+ BASE_ATTR_LIST(reclaim_caches_kb),
+ NULL,
+};
+ATTRIBUTE_GROUPS(f2fs_tune);
+
static const struct sysfs_ops f2fs_attr_ops = {
.show = f2fs_attr_show,
.store = f2fs_attr_store,
};
-static struct kobj_type f2fs_sb_ktype = {
+static const struct kobj_type f2fs_sb_ktype = {
.default_groups = f2fs_groups,
.sysfs_ops = &f2fs_attr_ops,
.release = f2fs_sb_release,
};
-static struct kobj_type f2fs_ktype = {
+static const struct kobj_type f2fs_ktype = {
.sysfs_ops = &f2fs_attr_ops,
};
@@ -978,15 +1547,34 @@ static struct kset f2fs_kset = {
.kobj = {.ktype = &f2fs_ktype},
};
-static struct kobj_type f2fs_feat_ktype = {
+static const struct sysfs_ops f2fs_feat_attr_ops = {
+ .show = f2fs_base_attr_show,
+ .store = f2fs_base_attr_store,
+};
+
+static const struct kobj_type f2fs_feat_ktype = {
.default_groups = f2fs_feat_groups,
- .sysfs_ops = &f2fs_attr_ops,
+ .sysfs_ops = &f2fs_feat_attr_ops,
};
static struct kobject f2fs_feat = {
.kset = &f2fs_kset,
};
+static const struct sysfs_ops f2fs_tune_attr_ops = {
+ .show = f2fs_base_attr_show,
+ .store = f2fs_base_attr_store,
+};
+
+static const struct kobj_type f2fs_tune_ktype = {
+ .default_groups = f2fs_tune_groups,
+ .sysfs_ops = &f2fs_tune_attr_ops,
+};
+
+static struct kobject f2fs_tune = {
+ .kset = &f2fs_kset,
+};
+
static ssize_t f2fs_stat_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -1019,7 +1607,7 @@ static const struct sysfs_ops f2fs_stat_attr_ops = {
.store = f2fs_stat_attr_store,
};
-static struct kobj_type f2fs_stat_ktype = {
+static const struct kobj_type f2fs_stat_ktype = {
.default_groups = f2fs_stat_groups,
.sysfs_ops = &f2fs_stat_attr_ops,
.release = f2fs_stat_kobj_release,
@@ -1046,7 +1634,7 @@ static const struct sysfs_ops f2fs_feature_list_attr_ops = {
.show = f2fs_sb_feat_attr_show,
};
-static struct kobj_type f2fs_feature_list_ktype = {
+static const struct kobj_type f2fs_feature_list_ktype = {
.default_groups = f2fs_sb_feat_groups,
.sysfs_ops = &f2fs_feature_list_attr_ops,
.release = f2fs_feature_list_kobj_release,
@@ -1088,7 +1676,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
le32_to_cpu(sbi->raw_super->segment_count_main);
int i, j;
- seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
+ seq_puts(seq, "format: segment_type|valid_blocks|bitmaps|mtime\n"
"segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
for (i = 0; i < total_segs; i++) {
@@ -1098,6 +1686,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
seq_printf(seq, "%d|%-3u|", se->type, se->valid_blocks);
for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
seq_printf(seq, " %.2x", se->cur_valid_map[j]);
+ seq_printf(seq, "| %llx", se->mtime);
seq_putc(seq, '\n');
}
return 0;
@@ -1125,6 +1714,172 @@ static int __maybe_unused victim_bits_seq_show(struct seq_file *seq,
return 0;
}
+static int __maybe_unused discard_plist_seq_show(struct seq_file *seq,
+ void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ int i, count;
+
+ seq_puts(seq, "Discard pend list(Show diacrd_cmd count on each entry, .:not exist):\n");
+ if (!f2fs_realtime_discard_enable(sbi))
+ return 0;
+
+ if (dcc) {
+ mutex_lock(&dcc->cmd_lock);
+ for (i = 0; i < MAX_PLIST_NUM; i++) {
+ struct list_head *pend_list;
+ struct discard_cmd *dc, *tmp;
+
+ if (i % 8 == 0)
+ seq_printf(seq, " %-3d", i);
+ count = 0;
+ pend_list = &dcc->pend_list[i];
+ list_for_each_entry_safe(dc, tmp, pend_list, list)
+ count++;
+ if (count)
+ seq_printf(seq, " %7d", count);
+ else
+ seq_puts(seq, " .");
+ if (i % 8 == 7)
+ seq_putc(seq, '\n');
+ }
+ seq_putc(seq, '\n');
+ mutex_unlock(&dcc->cmd_lock);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused disk_map_seq_show(struct seq_file *seq,
+ void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int i;
+
+ seq_printf(seq, "Address Layout : %5luB Block address (# of Segments)\n",
+ F2FS_BLKSIZE);
+ seq_printf(seq, " SB : %12s\n", "0/1024B");
+ seq_printf(seq, " seg0_blkaddr : 0x%010x\n", SEG0_BLKADDR(sbi));
+ seq_printf(seq, " Checkpoint : 0x%010x (%10d)\n",
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr), 2);
+ seq_printf(seq, " SIT : 0x%010x (%10d)\n",
+ SIT_I(sbi)->sit_base_addr,
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_sit));
+ seq_printf(seq, " NAT : 0x%010x (%10d)\n",
+ NM_I(sbi)->nat_blkaddr,
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_nat));
+ seq_printf(seq, " SSA : 0x%010x (%10d)\n",
+ SM_I(sbi)->ssa_blkaddr,
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_ssa));
+ seq_printf(seq, " Main : 0x%010x (%10d)\n",
+ SM_I(sbi)->main_blkaddr,
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main));
+ seq_printf(seq, " Block size : %12lu KB\n", F2FS_BLKSIZE >> 10);
+ seq_printf(seq, " Segment size : %12d MB\n",
+ (BLKS_PER_SEG(sbi) << (F2FS_BLKSIZE_BITS - 10)) >> 10);
+ seq_printf(seq, " Segs/Sections : %12d\n",
+ SEGS_PER_SEC(sbi));
+ seq_printf(seq, " Section size : %12d MB\n",
+ (BLKS_PER_SEC(sbi) << (F2FS_BLKSIZE_BITS - 10)) >> 10);
+ seq_printf(seq, " # of Sections : %12d\n",
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->section_count));
+
+ if (!f2fs_is_multi_device(sbi))
+ return 0;
+
+ seq_puts(seq, "\nDisk Map for multi devices:\n");
+ for (i = 0; i < sbi->s_ndevs; i++)
+ seq_printf(seq, "Disk:%2d (zoned=%d): 0x%010x - 0x%010x on %s\n",
+ i, bdev_is_zoned(FDEV(i).bdev),
+ FDEV(i).start_blk, FDEV(i).end_blk,
+ FDEV(i).path);
+ return 0;
+}
+
+static int __maybe_unused donation_list_seq_show(struct seq_file *seq,
+ void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct inode *inode;
+ struct f2fs_inode_info *fi;
+ struct dentry *dentry;
+ char *buf, *path;
+ int i;
+
+ buf = f2fs_getname(sbi);
+ if (!buf)
+ return 0;
+
+ seq_printf(seq, "Donation List\n");
+ seq_printf(seq, " # of files : %u\n", sbi->donate_files);
+ seq_printf(seq, " %-50s %10s %20s %20s %22s\n",
+ "File path", "Status", "Donation offset (kb)",
+ "Donation size (kb)", "File cached size (kb)");
+ seq_printf(seq, "---\n");
+
+ for (i = 0; i < sbi->donate_files; i++) {
+ spin_lock(&sbi->inode_lock[DONATE_INODE]);
+ if (list_empty(&sbi->inode_list[DONATE_INODE])) {
+ spin_unlock(&sbi->inode_lock[DONATE_INODE]);
+ break;
+ }
+ fi = list_first_entry(&sbi->inode_list[DONATE_INODE],
+ struct f2fs_inode_info, gdonate_list);
+ list_move_tail(&fi->gdonate_list, &sbi->inode_list[DONATE_INODE]);
+ inode = igrab(&fi->vfs_inode);
+ spin_unlock(&sbi->inode_lock[DONATE_INODE]);
+
+ if (!inode)
+ continue;
+
+ inode_lock_shared(inode);
+
+ dentry = d_find_alias(inode);
+ if (!dentry) {
+ path = NULL;
+ } else {
+ path = dentry_path_raw(dentry, buf, PATH_MAX);
+ if (IS_ERR(path))
+ goto next;
+ }
+ seq_printf(seq, " %-50s %10s %20llu %20llu %22llu\n",
+ path ? path : "<unlinked>",
+ is_inode_flag_set(inode, FI_DONATE_FINISHED) ?
+ "Evicted" : "Donated",
+ (loff_t)fi->donate_start << (PAGE_SHIFT - 10),
+ (loff_t)(fi->donate_end + 1) << (PAGE_SHIFT - 10),
+ (loff_t)inode->i_mapping->nrpages << (PAGE_SHIFT - 10));
+next:
+ dput(dentry);
+ inode_unlock_shared(inode);
+ iput(inode);
+ }
+ f2fs_putname(buf);
+ return 0;
+}
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+static int __maybe_unused inject_stats_seq_show(struct seq_file *seq,
+ void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
+ int i;
+
+ seq_puts(seq, "fault_type injected_count\n");
+
+ for (i = 0; i < FAULT_MAX; i++)
+ seq_printf(seq, "%-24s%-10u\n", f2fs_fault_name[i],
+ ffi->inject_count[i]);
+ return 0;
+}
+#endif
+
int __init f2fs_init_sysfs(void)
{
int ret;
@@ -1137,17 +1892,32 @@ int __init f2fs_init_sysfs(void)
ret = kobject_init_and_add(&f2fs_feat, &f2fs_feat_ktype,
NULL, "features");
- if (ret) {
- kobject_put(&f2fs_feat);
- kset_unregister(&f2fs_kset);
- } else {
- f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+ if (ret)
+ goto put_kobject;
+
+ ret = kobject_init_and_add(&f2fs_tune, &f2fs_tune_ktype,
+ NULL, "tuning");
+ if (ret)
+ goto put_kobject;
+
+ f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+ if (!f2fs_proc_root) {
+ ret = -ENOMEM;
+ goto put_kobject;
}
+
+ return 0;
+
+put_kobject:
+ kobject_put(&f2fs_tune);
+ kobject_put(&f2fs_feat);
+ kset_unregister(&f2fs_kset);
return ret;
}
void f2fs_exit_sysfs(void)
{
+ kobject_put(&f2fs_tune);
kobject_put(&f2fs_feat);
kset_unregister(&f2fs_kset);
remove_proc_entry("fs/f2fs", NULL);
@@ -1181,21 +1951,32 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
if (err)
goto put_feature_list_kobj;
- if (f2fs_proc_root)
- sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
+ sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
+ if (!sbi->s_proc) {
+ err = -ENOMEM;
+ goto put_feature_list_kobj;
+ }
- if (sbi->s_proc) {
- proc_create_single_data("segment_info", 0444, sbi->s_proc,
+ proc_create_single_data("segment_info", 0444, sbi->s_proc,
segment_info_seq_show, sb);
- proc_create_single_data("segment_bits", 0444, sbi->s_proc,
+ proc_create_single_data("segment_bits", 0444, sbi->s_proc,
segment_bits_seq_show, sb);
#ifdef CONFIG_F2FS_IOSTAT
- proc_create_single_data("iostat_info", 0444, sbi->s_proc,
+ proc_create_single_data("iostat_info", 0444, sbi->s_proc,
iostat_info_seq_show, sb);
#endif
- proc_create_single_data("victim_bits", 0444, sbi->s_proc,
+ proc_create_single_data("victim_bits", 0444, sbi->s_proc,
victim_bits_seq_show, sb);
- }
+ proc_create_single_data("discard_plist_info", 0444, sbi->s_proc,
+ discard_plist_seq_show, sb);
+ proc_create_single_data("disk_map", 0444, sbi->s_proc,
+ disk_map_seq_show, sb);
+ proc_create_single_data("donation_list", 0444, sbi->s_proc,
+ donation_list_seq_show, sb);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ proc_create_single_data("inject_stats", 0444, sbi->s_proc,
+ inject_stats_seq_show, sb);
+#endif
return 0;
put_feature_list_kobj:
kobject_put(&sbi->s_feature_list_kobj);
@@ -1211,24 +1992,13 @@ put_sb_kobj:
void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
{
- if (sbi->s_proc) {
-#ifdef CONFIG_F2FS_IOSTAT
- remove_proc_entry("iostat_info", sbi->s_proc);
-#endif
- remove_proc_entry("segment_info", sbi->s_proc);
- remove_proc_entry("segment_bits", sbi->s_proc);
- remove_proc_entry("victim_bits", sbi->s_proc);
- remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
- }
+ remove_proc_subtree(sbi->sb->s_id, f2fs_proc_root);
- kobject_del(&sbi->s_stat_kobj);
kobject_put(&sbi->s_stat_kobj);
wait_for_completion(&sbi->s_stat_kobj_unregister);
- kobject_del(&sbi->s_feature_list_kobj);
kobject_put(&sbi->s_feature_list_kobj);
wait_for_completion(&sbi->s_feature_list_kobj_unregister);
- kobject_del(&sbi->s_kobj);
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
}
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index fe5acdccaae1..05b935b55216 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -47,16 +47,13 @@ static int pagecache_read(struct inode *inode, void *buf, size_t count,
size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos));
struct page *page;
- void *addr;
page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT,
NULL);
if (IS_ERR(page))
return PTR_ERR(page);
- addr = kmap_atomic(page);
- memcpy(buf, addr + offset_in_page(pos), n);
- kunmap_atomic(addr);
+ memcpy_from_page(buf, page, offset_in_page(pos), n);
put_page(page);
@@ -74,28 +71,26 @@ static int pagecache_read(struct inode *inode, void *buf, size_t count,
static int pagecache_write(struct inode *inode, const void *buf, size_t count,
loff_t pos)
{
- if (pos + count > inode->i_sb->s_maxbytes)
+ struct address_space *mapping = inode->i_mapping;
+ const struct address_space_operations *aops = mapping->a_ops;
+
+ if (pos + count > F2FS_BLK_TO_BYTES(max_file_blocks(inode)))
return -EFBIG;
while (count) {
size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos));
- struct page *page;
- void *fsdata;
- void *addr;
+ struct folio *folio;
+ void *fsdata = NULL;
int res;
- res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
- &page, &fsdata);
+ res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata);
if (res)
return res;
- addr = kmap_atomic(page);
- memcpy(addr + offset_in_page(pos), buf, n);
- kunmap_atomic(addr);
+ memcpy_to_folio(folio, offset_in_folio(folio, pos), buf, n);
- res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
- page, fsdata);
+ res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata);
if (res < 0)
return res;
if (res != n)
@@ -128,7 +123,7 @@ static int f2fs_begin_enable_verity(struct file *filp)
if (f2fs_verity_in_progress(inode))
return -EBUSY;
- if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode))
+ if (f2fs_is_atomic_file(inode))
return -EOPNOTSUPP;
/*
@@ -208,7 +203,7 @@ cleanup:
* from re-instantiating cached pages we are truncating (since unlike
* normal file accesses, garbage collection isn't limited by i_size).
*/
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
truncate_inode_pages(inode->i_mapping, inode->i_size);
err2 = f2fs_truncate(inode);
if (err2) {
@@ -216,7 +211,7 @@ cleanup:
err2);
set_sbi_flag(sbi, SBI_NEED_FSCK);
}
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
return err ?: err2;
}
@@ -242,9 +237,12 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
pos = le64_to_cpu(dloc.pos);
/* Get the descriptor */
- if (pos + size < pos || pos + size > inode->i_sb->s_maxbytes ||
+ if (pos + size < pos ||
+ pos + size > F2FS_BLK_TO_BYTES(max_file_blocks(inode)) ||
pos < f2fs_verity_metadata_pos(inode) || size > INT_MAX) {
f2fs_warn(F2FS_I_SB(inode), "invalid verity xattr");
+ f2fs_handle_error(F2FS_I_SB(inode),
+ ERROR_CORRUPTED_VERITY_XATTR);
return -EFSCORRUPTED;
}
if (buf_size) {
@@ -261,31 +259,36 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
{
- DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
- struct page *page;
+ struct folio *folio;
index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT;
- page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
- if (!page || !PageUptodate(page)) {
- if (page)
- put_page(page);
+ folio = f2fs_filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
+ if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
+ DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
+
+ if (!IS_ERR(folio))
+ folio_put(folio);
else if (num_ra_pages > 1)
page_cache_ra_unbounded(&ractl, num_ra_pages, 0);
- page = read_mapping_page(inode->i_mapping, index, NULL);
+ folio = read_mapping_folio(inode->i_mapping, index, NULL);
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
}
- return page;
+ return folio_file_page(folio, index);
}
static int f2fs_write_merkle_tree_block(struct inode *inode, const void *buf,
- u64 index, int log_blocksize)
+ u64 pos, unsigned int size)
{
- loff_t pos = f2fs_verity_metadata_pos(inode) + (index << log_blocksize);
+ pos += f2fs_verity_metadata_pos(inode);
- return pagecache_write(inode, buf, 1 << log_blocksize, pos);
+ return pagecache_write(inode, buf, size, pos);
}
const struct fsverity_operations f2fs_verityops = {
+ .inode_info_offs = (int)offsetof(struct f2fs_inode_info, i_verity_info) -
+ (int)offsetof(struct f2fs_inode_info, vfs_inode),
.begin_enable_verity = f2fs_begin_enable_verity,
.end_enable_verity = f2fs_end_enable_verity,
.get_verity_descriptor = f2fs_get_verity_descriptor,
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index e348f33bcb2b..b4e5c406632f 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -23,11 +23,12 @@
#include "xattr.h"
#include "segment.h"
+static struct kmem_cache *inline_xattr_slab;
static void *xattr_alloc(struct f2fs_sb_info *sbi, int size, bool *is_inline)
{
- if (likely(size == sbi->inline_xattr_slab_size)) {
+ if (likely(size == DEFAULT_XATTR_SLAB_SIZE)) {
*is_inline = true;
- return f2fs_kmem_cache_alloc(sbi->inline_xattr_slab,
+ return f2fs_kmem_cache_alloc(inline_xattr_slab,
GFP_F2FS_ZERO, false, sbi);
}
*is_inline = false;
@@ -38,7 +39,7 @@ static void xattr_free(struct f2fs_sb_info *sbi, void *xattr_addr,
bool is_inline)
{
if (is_inline)
- kmem_cache_free(sbi->inline_xattr_slab, xattr_addr);
+ kmem_cache_free(inline_xattr_slab, xattr_addr);
else
kfree(xattr_addr);
}
@@ -65,7 +66,7 @@ static int f2fs_xattr_generic_get(const struct xattr_handler *handler,
}
static int f2fs_xattr_generic_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -109,7 +110,7 @@ static int f2fs_xattr_advise_get(const struct xattr_handler *handler,
}
static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -117,7 +118,7 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
unsigned char old_advise = F2FS_I(inode)->i_advise;
unsigned char new_advise;
- if (!inode_owner_or_capable(&init_user_ns, inode))
+ if (!inode_owner_or_capable(&nop_mnt_idmap, inode))
return -EPERM;
if (value == NULL)
return -EINVAL;
@@ -136,7 +137,7 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
#ifdef CONFIG_F2FS_FS_SECURITY
static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
- void *page)
+ void *folio)
{
const struct xattr *xattr;
int err = 0;
@@ -144,7 +145,7 @@ static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
xattr->name, xattr->value,
- xattr->value_len, (struct page *)page, 0);
+ xattr->value_len, folio, 0);
if (err < 0)
break;
}
@@ -152,10 +153,10 @@ static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
}
int f2fs_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, struct page *ipage)
+ const struct qstr *qstr, struct folio *ifolio)
{
return security_inode_init_security(inode, dir, qstr,
- &f2fs_initxattrs, ipage);
+ f2fs_initxattrs, ifolio);
}
#endif
@@ -189,11 +190,11 @@ const struct xattr_handler f2fs_xattr_security_handler = {
.set = f2fs_xattr_generic_set,
};
-static const struct xattr_handler *f2fs_xattr_handler_map[] = {
+static const struct xattr_handler * const f2fs_xattr_handler_map[] = {
[F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler,
#ifdef CONFIG_F2FS_FS_POSIX_ACL
- [F2FS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
- [F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
+ [F2FS_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
+ [F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &nop_posix_acl_default,
#endif
[F2FS_XATTR_INDEX_TRUSTED] = &f2fs_xattr_trusted_handler,
#ifdef CONFIG_F2FS_FS_SECURITY
@@ -202,12 +203,8 @@ static const struct xattr_handler *f2fs_xattr_handler_map[] = {
[F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler,
};
-const struct xattr_handler *f2fs_xattr_handlers[] = {
+const struct xattr_handler * const f2fs_xattr_handlers[] = {
&f2fs_xattr_user_handler,
-#ifdef CONFIG_F2FS_FS_POSIX_ACL
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
-#endif
&f2fs_xattr_trusted_handler,
#ifdef CONFIG_F2FS_FS_SECURITY
&f2fs_xattr_security_handler,
@@ -216,25 +213,33 @@ const struct xattr_handler *f2fs_xattr_handlers[] = {
NULL,
};
-static inline const struct xattr_handler *f2fs_xattr_handler(int index)
+static inline const char *f2fs_xattr_prefix(int index,
+ struct dentry *dentry)
{
const struct xattr_handler *handler = NULL;
if (index > 0 && index < ARRAY_SIZE(f2fs_xattr_handler_map))
handler = f2fs_xattr_handler_map[index];
- return handler;
+
+ if (!xattr_handler_can_list(handler, dentry))
+ return NULL;
+
+ return xattr_prefix(handler);
}
static struct f2fs_xattr_entry *__find_xattr(void *base_addr,
- void *last_base_addr, int index,
- size_t len, const char *name)
+ void *last_base_addr, void **last_addr,
+ int index, size_t len, const char *name)
{
struct f2fs_xattr_entry *entry;
list_for_each_xattr(entry, base_addr) {
if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
- (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr)
+ (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
+ if (last_addr)
+ *last_addr = entry;
return NULL;
+ }
if (entry->e_name_index != index)
continue;
@@ -254,19 +259,9 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
unsigned int inline_size = inline_xattr_size(inode);
void *max_addr = base_addr + inline_size;
- list_for_each_xattr(entry, base_addr) {
- if ((void *)entry + sizeof(__u32) > max_addr ||
- (void *)XATTR_NEXT_ENTRY(entry) > max_addr) {
- *last_addr = entry;
- return NULL;
- }
- if (entry->e_name_index != index)
- continue;
- if (entry->e_name_len != len)
- continue;
- if (!memcmp(entry->e_name, name, len))
- break;
- }
+ entry = __find_xattr(base_addr, max_addr, last_addr, index, len, name);
+ if (!entry)
+ return NULL;
/* inline xattr header or entry across max inline xattr size */
if (IS_XATTR_LAST_ENTRY(entry) &&
@@ -277,25 +272,25 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
return entry;
}
-static int read_inline_xattr(struct inode *inode, struct page *ipage,
+static int read_inline_xattr(struct inode *inode, struct folio *ifolio,
void *txattr_addr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int inline_size = inline_xattr_size(inode);
- struct page *page = NULL;
+ struct folio *folio = NULL;
void *inline_addr;
- if (ipage) {
- inline_addr = inline_xattr_addr(inode, ipage);
+ if (ifolio) {
+ inline_addr = inline_xattr_addr(inode, ifolio);
} else {
- page = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ folio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- inline_addr = inline_xattr_addr(inode, page);
+ inline_addr = inline_xattr_addr(inode, folio);
}
memcpy(txattr_addr, inline_addr, inline_size);
- f2fs_put_page(page, 1);
+ f2fs_folio_put(folio, true);
return 0;
}
@@ -305,22 +300,22 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
unsigned int inline_size = inline_xattr_size(inode);
- struct page *xpage;
+ struct folio *xfolio;
void *xattr_addr;
/* The inode already has an extended attribute block. */
- xpage = f2fs_get_node_page(sbi, xnid);
- if (IS_ERR(xpage))
- return PTR_ERR(xpage);
+ xfolio = f2fs_get_xnode_folio(sbi, xnid);
+ if (IS_ERR(xfolio))
+ return PTR_ERR(xfolio);
- xattr_addr = page_address(xpage);
+ xattr_addr = folio_address(xfolio);
memcpy(txattr_addr + inline_size, xattr_addr, VALID_XATTR_BLOCK_SIZE);
- f2fs_put_page(xpage, 1);
+ f2fs_folio_put(xfolio, true);
return 0;
}
-static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
+static int lookup_all_xattrs(struct inode *inode, struct folio *ifolio,
unsigned int index, unsigned int len,
const char *name, struct f2fs_xattr_entry **xe,
void **base_addr, int *base_size,
@@ -344,7 +339,7 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
/* read from inline xattr */
if (inline_size) {
- err = read_inline_xattr(inode, ipage, txattr_addr);
+ err = read_inline_xattr(inode, ifolio, txattr_addr);
if (err)
goto out;
@@ -368,12 +363,14 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
else
cur_addr = txattr_addr;
- *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
+ *xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name);
if (!*xe) {
- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+ f2fs_err(F2FS_I_SB(inode), "lookup inode (%lu) has corrupted xattr",
inode->i_ino);
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
- err = -EFSCORRUPTED;
+ err = -ENODATA;
+ f2fs_handle_error(F2FS_I_SB(inode),
+ ERROR_CORRUPTED_XATTR);
goto out;
}
check:
@@ -389,7 +386,7 @@ out:
return err;
}
-static int read_all_xattrs(struct inode *inode, struct page *ipage,
+static int read_all_xattrs(struct inode *inode, struct folio *ifolio,
void **base_addr)
{
struct f2fs_xattr_header *header;
@@ -406,7 +403,7 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage,
/* read from inline xattr */
if (inline_size) {
- err = read_inline_xattr(inode, ipage, txattr_addr);
+ err = read_inline_xattr(inode, ifolio, txattr_addr);
if (err)
goto fail;
}
@@ -433,14 +430,14 @@ fail:
}
static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
- void *txattr_addr, struct page *ipage)
+ void *txattr_addr, struct folio *ifolio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
size_t inline_size = inline_xattr_size(inode);
- struct page *in_page = NULL;
+ struct folio *in_folio = NULL;
void *xattr_addr;
void *inline_addr = NULL;
- struct page *xpage;
+ struct folio *xfolio;
nid_t new_nid = 0;
int err = 0;
@@ -450,73 +447,73 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
/* write to inline xattr */
if (inline_size) {
- if (ipage) {
- inline_addr = inline_xattr_addr(inode, ipage);
+ if (ifolio) {
+ inline_addr = inline_xattr_addr(inode, ifolio);
} else {
- in_page = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(in_page)) {
+ in_folio = f2fs_get_inode_folio(sbi, inode->i_ino);
+ if (IS_ERR(in_folio)) {
f2fs_alloc_nid_failed(sbi, new_nid);
- return PTR_ERR(in_page);
+ return PTR_ERR(in_folio);
}
- inline_addr = inline_xattr_addr(inode, in_page);
+ inline_addr = inline_xattr_addr(inode, in_folio);
}
- f2fs_wait_on_page_writeback(ipage ? ipage : in_page,
+ f2fs_folio_wait_writeback(ifolio ? ifolio : in_folio,
NODE, true, true);
/* no need to use xattr node block */
if (hsize <= inline_size) {
err = f2fs_truncate_xattr_node(inode);
f2fs_alloc_nid_failed(sbi, new_nid);
if (err) {
- f2fs_put_page(in_page, 1);
+ f2fs_folio_put(in_folio, true);
return err;
}
memcpy(inline_addr, txattr_addr, inline_size);
- set_page_dirty(ipage ? ipage : in_page);
+ folio_mark_dirty(ifolio ? ifolio : in_folio);
goto in_page_out;
}
}
/* write to xattr node block */
if (F2FS_I(inode)->i_xattr_nid) {
- xpage = f2fs_get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
- if (IS_ERR(xpage)) {
- err = PTR_ERR(xpage);
+ xfolio = f2fs_get_xnode_folio(sbi, F2FS_I(inode)->i_xattr_nid);
+ if (IS_ERR(xfolio)) {
+ err = PTR_ERR(xfolio);
f2fs_alloc_nid_failed(sbi, new_nid);
goto in_page_out;
}
f2fs_bug_on(sbi, new_nid);
- f2fs_wait_on_page_writeback(xpage, NODE, true, true);
+ f2fs_folio_wait_writeback(xfolio, NODE, true, true);
} else {
struct dnode_of_data dn;
set_new_dnode(&dn, inode, NULL, NULL, new_nid);
- xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
- if (IS_ERR(xpage)) {
- err = PTR_ERR(xpage);
+ xfolio = f2fs_new_node_folio(&dn, XATTR_NODE_OFFSET);
+ if (IS_ERR(xfolio)) {
+ err = PTR_ERR(xfolio);
f2fs_alloc_nid_failed(sbi, new_nid);
goto in_page_out;
}
f2fs_alloc_nid_done(sbi, new_nid);
}
- xattr_addr = page_address(xpage);
+ xattr_addr = folio_address(xfolio);
if (inline_size)
memcpy(inline_addr, txattr_addr, inline_size);
memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE);
if (inline_size)
- set_page_dirty(ipage ? ipage : in_page);
- set_page_dirty(xpage);
+ folio_mark_dirty(ifolio ? ifolio : in_folio);
+ folio_mark_dirty(xfolio);
- f2fs_put_page(xpage, 1);
+ f2fs_folio_put(xfolio, true);
in_page_out:
- f2fs_put_page(in_page, 1);
+ f2fs_folio_put(in_folio, true);
return err;
}
int f2fs_getxattr(struct inode *inode, int index, const char *name,
- void *buffer, size_t buffer_size, struct page *ipage)
+ void *buffer, size_t buffer_size, struct folio *ifolio)
{
struct f2fs_xattr_entry *entry = NULL;
int error;
@@ -532,10 +529,12 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (len > F2FS_NAME_LEN)
return -ERANGE;
- down_read(&F2FS_I(inode)->i_xattr_sem);
- error = lookup_all_xattrs(inode, ipage, index, len, name,
+ if (!ifolio)
+ f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
+ error = lookup_all_xattrs(inode, ifolio, index, len, name,
&entry, &base_addr, &base_size, &is_inline);
- up_read(&F2FS_I(inode)->i_xattr_sem);
+ if (!ifolio)
+ f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
if (error)
return error;
@@ -569,34 +568,34 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
int error;
size_t rest = buffer_size;
- down_read(&F2FS_I(inode)->i_xattr_sem);
+ f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
error = read_all_xattrs(inode, NULL, &base_addr);
- up_read(&F2FS_I(inode)->i_xattr_sem);
+ f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
if (error)
return error;
last_base_addr = (void *)base_addr + XATTR_SIZE(inode);
list_for_each_xattr(entry, base_addr) {
- const struct xattr_handler *handler =
- f2fs_xattr_handler(entry->e_name_index);
const char *prefix;
size_t prefix_len;
size_t size;
+ prefix = f2fs_xattr_prefix(entry->e_name_index, dentry);
+
if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
(void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+ f2fs_err(F2FS_I_SB(inode), "list inode (%lu) has corrupted xattr",
inode->i_ino);
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
- error = -EFSCORRUPTED;
- goto cleanup;
+ f2fs_handle_error(F2FS_I_SB(inode),
+ ERROR_CORRUPTED_XATTR);
+ break;
}
- if (!handler || (handler->list && !handler->list(dentry)))
+ if (!prefix)
continue;
- prefix = xattr_prefix(handler);
prefix_len = strlen(prefix);
size = prefix_len + entry->e_name_len + 1;
if (buffer) {
@@ -629,8 +628,9 @@ static bool f2fs_xattr_value_same(struct f2fs_xattr_entry *entry,
static int __f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
- struct page *ipage, int flags)
+ struct folio *ifolio, int flags)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_entry *here, *last;
void *base_addr, *last_base_addr;
int found, newsize;
@@ -651,20 +651,32 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (size > MAX_VALUE_LEN(inode))
return -E2BIG;
-
- error = read_all_xattrs(inode, ipage, &base_addr);
+retry:
+ error = read_all_xattrs(inode, ifolio, &base_addr);
if (error)
return error;
last_base_addr = (void *)base_addr + XATTR_SIZE(inode);
/* find entry with wanted name. */
- here = __find_xattr(base_addr, last_base_addr, index, len, name);
+ here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
if (!here) {
- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+ if (!F2FS_I(inode)->i_xattr_nid) {
+ error = f2fs_recover_xattr_data(inode, NULL);
+ f2fs_notice(F2FS_I_SB(inode),
+ "recover xattr in inode (%lu), error(%d)",
+ inode->i_ino, error);
+ if (!error) {
+ kfree(base_addr);
+ goto retry;
+ }
+ }
+ f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr",
inode->i_ino);
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
error = -EFSCORRUPTED;
+ f2fs_handle_error(F2FS_I_SB(inode),
+ ERROR_CORRUPTED_XATTR);
goto exit;
}
@@ -684,8 +696,19 @@ static int __f2fs_setxattr(struct inode *inode, int index,
}
last = here;
- while (!IS_XATTR_LAST_ENTRY(last))
+ while (!IS_XATTR_LAST_ENTRY(last)) {
+ if ((void *)(last) + sizeof(__u32) > last_base_addr ||
+ (void *)XATTR_NEXT_ENTRY(last) > last_base_addr) {
+ f2fs_err(F2FS_I_SB(inode), "inode (%lu) has invalid last xattr entry, entry_size: %zu",
+ inode->i_ino, ENTRY_SIZE(last));
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ error = -EFSCORRUPTED;
+ f2fs_handle_error(F2FS_I_SB(inode),
+ ERROR_CORRUPTED_XATTR);
+ goto exit;
+ }
last = XATTR_NEXT_ENTRY(last);
+ }
newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + len + size);
@@ -736,26 +759,41 @@ static int __f2fs_setxattr(struct inode *inode, int index,
memcpy(pval, value, size);
last->e_value_size = cpu_to_le16(size);
new_hsize += newsize;
+ /*
+ * Explicitly add the null terminator. The unused xattr space
+ * is supposed to always be zeroed, which would make this
+ * unnecessary, but don't depend on that.
+ */
+ *(u32 *)((u8 *)last + newsize) = 0;
}
- error = write_all_xattrs(inode, new_hsize, base_addr, ipage);
+ error = write_all_xattrs(inode, new_hsize, base_addr, ifolio);
if (error)
goto exit;
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
- f2fs_mark_inode_dirty_sync(inode, true);
- if (!error && S_ISDIR(inode->i_mode))
- set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
+ if (!S_ISDIR(inode->i_mode))
+ goto same;
+ /*
+ * In restrict mode, fsync() always try to trigger checkpoint for all
+ * metadata consistency, in other mode, it triggers checkpoint when
+ * parent's xattr metadata was updated.
+ */
+ if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
+ set_sbi_flag(sbi, SBI_NEED_CP);
+ else
+ f2fs_add_ino_entry(sbi, inode->i_ino, XATTR_DIR_INO);
same:
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
inode->i_mode = F2FS_I(inode)->i_acl_mode;
- inode->i_ctime = current_time(inode);
clear_inode_flag(inode, FI_ACL_MODE);
}
+ inode_set_ctime_current(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
exit:
kfree(base_addr);
return error;
@@ -763,7 +801,7 @@ exit:
int f2fs_setxattr(struct inode *inode, int index, const char *name,
const void *value, size_t size,
- struct page *ipage, int flags)
+ struct folio *ifolio, int flags)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int err;
@@ -778,40 +816,29 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
return err;
/* this case is only from f2fs_init_inode_metadata */
- if (ipage)
+ if (ifolio)
return __f2fs_setxattr(inode, index, name, value,
- size, ipage, flags);
+ size, ifolio, flags);
f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
- down_write(&F2FS_I(inode)->i_xattr_sem);
- err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags);
- up_write(&F2FS_I(inode)->i_xattr_sem);
+ f2fs_down_write(&F2FS_I(inode)->i_xattr_sem);
+ err = __f2fs_setxattr(inode, index, name, value, size, NULL, flags);
+ f2fs_up_write(&F2FS_I(inode)->i_xattr_sem);
f2fs_unlock_op(sbi);
f2fs_update_time(sbi, REQ_TIME);
return err;
}
-int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi)
+int __init f2fs_init_xattr_cache(void)
{
- dev_t dev = sbi->sb->s_bdev->bd_dev;
- char slab_name[32];
-
- sprintf(slab_name, "f2fs_xattr_entry-%u:%u", MAJOR(dev), MINOR(dev));
-
- sbi->inline_xattr_slab_size = F2FS_OPTION(sbi).inline_xattr_size *
- sizeof(__le32) + XATTR_PADDING_SIZE;
-
- sbi->inline_xattr_slab = f2fs_kmem_cache_create(slab_name,
- sbi->inline_xattr_slab_size);
- if (!sbi->inline_xattr_slab)
- return -ENOMEM;
-
- return 0;
+ inline_xattr_slab = f2fs_kmem_cache_create("f2fs_xattr_entry",
+ DEFAULT_XATTR_SLAB_SIZE);
+ return inline_xattr_slab ? 0 : -ENOMEM;
}
-void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi)
+void f2fs_destroy_xattr_cache(void)
{
- kmem_cache_destroy(sbi->inline_xattr_slab);
-}
+ kmem_cache_destroy(inline_xattr_slab);
+} \ No newline at end of file
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index 416d652774a3..bce3d93e4755 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -83,11 +83,14 @@ struct f2fs_xattr_entry {
sizeof(struct f2fs_xattr_header) - \
sizeof(struct f2fs_xattr_entry))
+#define MIN_INLINE_XATTR_SIZE (sizeof(struct f2fs_xattr_header) / sizeof(__le32))
#define MAX_INLINE_XATTR_SIZE \
(DEF_ADDRS_PER_INODE - \
F2FS_TOTAL_EXTRA_ATTR_SIZE / sizeof(__le32) - \
DEF_INLINE_RESERVED_SIZE - \
MIN_INLINE_DENTRY_SIZE / sizeof(__le32))
+#define DEFAULT_XATTR_SLAB_SIZE (DEFAULT_INLINE_XATTR_ADDRS * \
+ sizeof(__le32) + XATTR_PADDING_SIZE)
/*
* On-disk structure of f2fs_xattr
@@ -124,41 +127,41 @@ extern const struct xattr_handler f2fs_xattr_trusted_handler;
extern const struct xattr_handler f2fs_xattr_advise_handler;
extern const struct xattr_handler f2fs_xattr_security_handler;
-extern const struct xattr_handler *f2fs_xattr_handlers[];
+extern const struct xattr_handler * const f2fs_xattr_handlers[];
-extern int f2fs_setxattr(struct inode *, int, const char *,
- const void *, size_t, struct page *, int);
-extern int f2fs_getxattr(struct inode *, int, const char *, void *,
- size_t, struct page *);
-extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t);
-extern int f2fs_init_xattr_caches(struct f2fs_sb_info *);
-extern void f2fs_destroy_xattr_caches(struct f2fs_sb_info *);
+int f2fs_setxattr(struct inode *, int, const char *, const void *,
+ size_t, struct folio *, int);
+int f2fs_getxattr(struct inode *, int, const char *, void *,
+ size_t, struct folio *);
+ssize_t f2fs_listxattr(struct dentry *, char *, size_t);
+int __init f2fs_init_xattr_cache(void);
+void f2fs_destroy_xattr_cache(void);
#else
#define f2fs_xattr_handlers NULL
#define f2fs_listxattr NULL
static inline int f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
- struct page *page, int flags)
+ struct folio *folio, int flags)
{
return -EOPNOTSUPP;
}
static inline int f2fs_getxattr(struct inode *inode, int index,
const char *name, void *buffer,
- size_t buffer_size, struct page *dpage)
+ size_t buffer_size, struct folio *dfolio)
{
return -EOPNOTSUPP;
}
-static inline int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) { return 0; }
-static inline void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) { }
+static inline int __init f2fs_init_xattr_cache(void) { return 0; }
+static inline void f2fs_destroy_xattr_cache(void) { }
#endif
#ifdef CONFIG_F2FS_FS_SECURITY
-extern int f2fs_init_security(struct inode *, struct inode *,
- const struct qstr *, struct page *);
+int f2fs_init_security(struct inode *, struct inode *,
+ const struct qstr *, struct folio *);
#else
static inline int f2fs_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, struct page *ipage)
+ const struct qstr *qstr, struct folio *ifolio)
{
return 0;
}
diff --git a/fs/fat/Kconfig b/fs/fat/Kconfig
index 238cc55f84c4..25fae1c83725 100644
--- a/fs/fat/Kconfig
+++ b/fs/fat/Kconfig
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config FAT_FS
tristate
+ select BUFFER_HEAD
select NLS
+ select LEGACY_DIRECT_IO
help
If you want to use one of the FAT-based file systems (the MS-DOS and
VFAT (Windows 95) file systems), then you must say Y or M here
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 738e427e2d21..2af424e200b3 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -47,7 +47,7 @@ int __init fat_cache_init(void)
{
fat_cache_cachep = kmem_cache_create("fat_cache",
sizeof(struct fat_cache),
- 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
+ 0, SLAB_RECLAIM_ACCOUNT,
init_once);
if (fat_cache_cachep == NULL)
return -ENOMEM;
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index c4a274285858..92b091783966 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -269,6 +269,18 @@ enum { PARSE_INVALID = 1, PARSE_NOT_LONGNAME, PARSE_EOF, };
/**
* fat_parse_long - Parse extended directory entry.
*
+ * @dir: Pointer to the inode that represents the directory.
+ * @pos: On input, contains the starting position to read from.
+ * On output, updated with the new position.
+ * @bh: Pointer to the buffer head that may be used for reading directory
+ * entries. May be updated.
+ * @de: On input, points to the current directory entry.
+ * On output, points to the next directory entry.
+ * @unicode: Pointer to a buffer where the parsed Unicode long filename will be
+ * stored.
+ * @nr_slots: Pointer to a variable that will store the number of longname
+ * slots found.
+ *
* This function returns zero on success, negative value on error, or one of
* the following:
*
@@ -705,7 +717,7 @@ static int fat_readdir(struct file *file, struct dir_context *ctx)
}
#define FAT_IOCTL_FILLDIR_FUNC(func, dirent_type) \
-static int func(struct dir_context *ctx, const char *name, int name_len, \
+static bool func(struct dir_context *ctx, const char *name, int name_len, \
loff_t offset, u64 ino, unsigned int d_type) \
{ \
struct fat_ioctl_filldir_callback *buf = \
@@ -714,7 +726,7 @@ static int func(struct dir_context *ctx, const char *name, int name_len, \
struct dirent_type __user *d2 = d1 + 1; \
\
if (buf->result) \
- return -EINVAL; \
+ return false; \
buf->result++; \
\
if (name != NULL) { \
@@ -722,7 +734,7 @@ static int func(struct dir_context *ctx, const char *name, int name_len, \
if (name_len >= sizeof(d1->d_name)) \
name_len = sizeof(d1->d_name) - 1; \
\
- if (put_user(0, d2->d_name) || \
+ if (put_user(0, &d2->d_name[0]) || \
put_user(0, &d2->d_reclen) || \
copy_to_user(d1->d_name, name, name_len) || \
put_user(0, d1->d_name + name_len) || \
@@ -750,10 +762,10 @@ static int func(struct dir_context *ctx, const char *name, int name_len, \
put_user(short_len, &d1->d_reclen)) \
goto efault; \
} \
- return 0; \
+ return true; \
efault: \
buf->result = -EFAULT; \
- return -EFAULT; \
+ return false; \
}
FAT_IOCTL_FILLDIR_FUNC(fat_ioctl_filldir, __fat_dirent)
@@ -1197,7 +1209,7 @@ EXPORT_SYMBOL_GPL(fat_alloc_new_dir);
static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
int *nr_cluster, struct msdos_dir_entry **de,
- struct buffer_head **bh, loff_t *i_pos)
+ struct buffer_head **bh)
{
struct super_block *sb = dir->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
@@ -1257,7 +1269,6 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
get_bh(bhs[n]);
*bh = bhs[n];
*de = (struct msdos_dir_entry *)((*bh)->b_data + offset);
- *i_pos = fat_make_i_pos(sb, *bh, *de);
/* Second stage: clear the rest of cluster, and write outs */
err = fat_zeroed_cluster(dir, start_blknr, ++n, bhs, MAX_BUF_PER_PAGE);
@@ -1286,7 +1297,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */
struct msdos_dir_entry *de;
int err, free_slots, i, nr_bhs;
- loff_t pos, i_pos;
+ loff_t pos;
sinfo->nr_slots = nr_slots;
@@ -1374,7 +1385,7 @@ found:
* add the cluster to dir.
*/
cluster = fat_add_new_entries(dir, slots, nr_slots, &nr_cluster,
- &de, &bh, &i_pos);
+ &de, &bh);
if (cluster < 0) {
err = cluster;
goto error_remove;
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 02d4d4234956..d3e426de5f01 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -7,6 +7,8 @@
#include <linux/hash.h>
#include <linux/ratelimit.h>
#include <linux/msdos_fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
/*
* vfat shortname flags
@@ -51,7 +53,8 @@ struct fat_mount_options {
tz_set:1, /* Filesystem timestamps' offset set */
rodir:1, /* allow ATTR_RO for directory */
discard:1, /* Issue discard requests on deletions */
- dos1xfloppy:1; /* Assume default BPB for DOS 1.x floppies */
+ dos1xfloppy:1, /* Assume default BPB for DOS 1.x floppies */
+ debug:1; /* Not currently used */
};
#define FAT_HASH_BITS 8
@@ -126,6 +129,7 @@ struct msdos_inode_info {
struct hlist_node i_fat_hash; /* hash by i_location */
struct hlist_node i_dir_hash; /* hash by i_logstart */
struct rw_semaphore truncate_lock; /* protect bmap against truncate */
+ struct timespec64 i_crtime; /* File creation (birth) time */
struct inode vfs_inode;
};
@@ -397,10 +401,10 @@ extern long fat_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern const struct file_operations fat_file_operations;
extern const struct inode_operations fat_file_inode_operations;
-extern int fat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+extern int fat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr);
extern void fat_truncate_blocks(struct inode *inode, loff_t offset);
-extern int fat_getattr(struct user_namespace *mnt_userns,
+extern int fat_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
extern int fat_file_fsync(struct file *file, loff_t start, loff_t end,
@@ -414,12 +418,21 @@ extern struct inode *fat_iget(struct super_block *sb, loff_t i_pos);
extern struct inode *fat_build_inode(struct super_block *sb,
struct msdos_dir_entry *de, loff_t i_pos);
extern int fat_sync_inode(struct inode *inode);
-extern int fat_fill_super(struct super_block *sb, void *data, int silent,
- int isvfat, void (*setup)(struct super_block *));
+extern int fat_fill_super(struct super_block *sb, struct fs_context *fc,
+ void (*setup)(struct super_block *));
extern int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de);
extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
struct inode *i2);
+
+extern const struct fs_parameter_spec fat_param_spec[];
+int fat_init_fs_context(struct fs_context *fc, bool is_vfat);
+void fat_free_fc(struct fs_context *fc);
+
+int fat_parse_param(struct fs_context *fc, struct fs_parameter *param,
+ bool is_vfat);
+int fat_reconfigure(struct fs_context *fc);
+
static inline unsigned long fat_dir_hash(int logstart)
{
return hash_32(logstart, FAT_HASH_BITS);
@@ -433,8 +446,15 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...);
__fat_fs_error(sb, 1, fmt , ## args)
#define fat_fs_error_ratelimit(sb, fmt, args...) \
__fat_fs_error(sb, __ratelimit(&MSDOS_SB(sb)->ratelimit), fmt , ## args)
+
+#define FAT_PRINTK_PREFIX "%sFAT-fs (%s): "
+#define fat_msg(sb, level, fmt, args...) \
+do { \
+ printk_index_subsys_emit(FAT_PRINTK_PREFIX, level, fmt, ##args);\
+ _fat_msg(sb, level, fmt, ##args); \
+} while (0)
__printf(3, 4) __cold
-void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+void _fat_msg(struct super_block *sb, const char *level, const char *fmt, ...);
#define fat_msg_ratelimit(sb, level, fmt, args...) \
do { \
if (__ratelimit(&MSDOS_SB(sb)->ratelimit)) \
@@ -446,10 +466,13 @@ extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 __time, __le16 __date, u8 time_cs);
extern void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 *time, __le16 *date, u8 *time_cs);
+extern struct timespec64 fat_truncate_atime(const struct msdos_sb_info *sbi,
+ const struct timespec64 *ts);
+extern struct timespec64 fat_truncate_mtime(const struct msdos_sb_info *sbi,
+ const struct timespec64 *ts);
extern int fat_truncate_time(struct inode *inode, struct timespec64 *now,
int flags);
-extern int fat_update_time(struct inode *inode, struct timespec64 *now,
- int flags);
+extern int fat_update_time(struct inode *inode, int flags);
extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
int fat_cache_init(void);
diff --git a/fs/fat/fat_test.c b/fs/fat/fat_test.c
index 2dab4ca1d0d8..1f0062659067 100644
--- a/fs/fat/fat_test.c
+++ b/fs/fat/fat_test.c
@@ -193,4 +193,5 @@ static struct kunit_suite fat_test_suite = {
kunit_test_suites(&fat_test_suite);
+MODULE_DESCRIPTION("KUnit tests for FAT filesystems");
MODULE_LICENSE("GPL v2");
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 978ac6751aeb..a7061c2ad8e4 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -94,7 +94,8 @@ static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
err_brelse:
brelse(bhs[0]);
err:
- fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
+ fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
+ (llu)blocknr);
return -EIO;
}
@@ -107,8 +108,8 @@ static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
fatent->bhs[0] = sb_bread(sb, blocknr);
if (!fatent->bhs[0]) {
- fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
- (llu)blocknr);
+ fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
+ (llu)blocknr);
return -EIO;
}
fatent->nr_bhs = 1;
@@ -355,7 +356,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
if (!fat_valid_entry(sbi, entry)) {
fatent_brelse(fatent);
- fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
+ fat_fs_error_ratelimit(sb, "invalid access to FAT (entry 0x%08x)", entry);
return -EIO;
}
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 13855ba49cd9..4fc49a614fb8 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -90,12 +90,13 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
* out the RO attribute for checking by the security
* module, just because it maps to a file mode.
*/
- err = security_inode_setattr(file->f_path.dentry, &ia);
+ err = security_inode_setattr(file_mnt_idmap(file),
+ file->f_path.dentry, &ia);
if (err)
goto out_unlock_inode;
/* This MUST be done before doing anything irreversible... */
- err = fat_setattr(file_mnt_user_ns(file), file->f_path.dentry, &ia);
+ err = fat_setattr(file_mnt_idmap(file), file->f_path.dentry, &ia);
if (err)
goto out_unlock_inode;
@@ -127,13 +128,12 @@ static int fat_ioctl_fitrim(struct inode *inode, unsigned long arg)
struct super_block *sb = inode->i_sb;
struct fstrim_range __user *user_range;
struct fstrim_range range;
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
int err;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!blk_queue_discard(q))
+ if (!bdev_max_discard_sectors(sb->s_bdev))
return -EOPNOTSUPP;
user_range = (struct fstrim_range __user *)arg;
@@ -141,7 +141,7 @@ static int fat_ioctl_fitrim(struct inode *inode, unsigned long arg)
return -EFAULT;
range.minlen = max_t(unsigned int, range.minlen,
- q->limits.discard_granularity);
+ bdev_discard_granularity(sb->s_bdev));
err = fat_trim_fs(inode, &range);
if (err < 0)
@@ -175,9 +175,10 @@ long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static int fat_file_release(struct inode *inode, struct file *filp)
{
if ((filp->f_mode & FMODE_WRITE) &&
- MSDOS_SB(inode->i_sb)->options.flush) {
+ MSDOS_SB(inode->i_sb)->options.flush) {
fat_flush_inodes(inode->i_sb, inode, NULL);
- congestion_wait(BLK_RW_ASYNC, HZ/10);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ io_schedule_timeout(HZ/10);
}
return 0;
}
@@ -203,12 +204,12 @@ const struct file_operations fat_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.release = fat_file_release,
.unlocked_ioctl = fat_generic_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fsync = fat_file_fsync,
- .splice_read = generic_file_splice_read,
+ .splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = fat_fallocate,
};
@@ -394,17 +395,25 @@ void fat_truncate_blocks(struct inode *inode, loff_t offset)
fat_flush_inodes(inode->i_sb, inode, NULL);
}
-int fat_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int fat_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct inode *inode = d_inode(path->dentry);
- generic_fillattr(mnt_userns, inode, stat);
- stat->blksize = MSDOS_SB(inode->i_sb)->cluster_size;
+ struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+
+ generic_fillattr(idmap, request_mask, inode, stat);
+ stat->blksize = sbi->cluster_size;
- if (MSDOS_SB(inode->i_sb)->options.nfs == FAT_NFS_NOSTALE_RO) {
+ if (sbi->options.nfs == FAT_NFS_NOSTALE_RO) {
/* Use i_pos for ino. This is used as fileid of nfs. */
- stat->ino = fat_i_pos_read(MSDOS_SB(inode->i_sb), inode);
+ stat->ino = fat_i_pos_read(sbi, inode);
}
+
+ if (sbi->options.isvfat && request_mask & STATX_BTIME) {
+ stat->result_mask |= STATX_BTIME;
+ stat->btime = MSDOS_I(inode)->i_crtime;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(fat_getattr);
@@ -447,13 +456,14 @@ static int fat_sanitize_mode(const struct msdos_sb_info *sbi,
return 0;
}
-static int fat_allow_set_time(struct user_namespace *mnt_userns,
+static int fat_allow_set_time(struct mnt_idmap *idmap,
struct msdos_sb_info *sbi, struct inode *inode)
{
umode_t allow_utime = sbi->options.allow_utime;
- if (!uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode))) {
- if (in_group_p(i_gid_into_mnt(mnt_userns, inode)))
+ if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode),
+ current_fsuid())) {
+ if (vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
allow_utime >>= 3;
if (allow_utime & MAY_WRITE)
return 1;
@@ -467,7 +477,7 @@ static int fat_allow_set_time(struct user_namespace *mnt_userns,
/* valid file mode bits */
#define FAT_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXUGO)
-int fat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int fat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
@@ -478,11 +488,11 @@ int fat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
/* Check for setting the inode time. */
ia_valid = attr->ia_valid;
if (ia_valid & TIMES_SET_FLAGS) {
- if (fat_allow_set_time(mnt_userns, sbi, inode))
+ if (fat_allow_set_time(idmap, sbi, inode))
attr->ia_valid &= ~TIMES_SET_FLAGS;
}
- error = setattr_prepare(mnt_userns, dentry, attr);
+ error = setattr_prepare(idmap, dentry, attr);
attr->ia_valid = ia_valid;
if (error) {
if (sbi->options.quiet)
@@ -508,9 +518,11 @@ int fat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
}
if (((attr->ia_valid & ATTR_UID) &&
- (!uid_eq(attr->ia_uid, sbi->options.fs_uid))) ||
+ (!uid_eq(from_vfsuid(idmap, i_user_ns(inode), attr->ia_vfsuid),
+ sbi->options.fs_uid))) ||
((attr->ia_valid & ATTR_GID) &&
- (!gid_eq(attr->ia_gid, sbi->options.fs_gid))) ||
+ (!gid_eq(from_vfsgid(idmap, i_user_ns(inode), attr->ia_vfsgid),
+ sbi->options.fs_gid))) ||
((attr->ia_valid & ATTR_MODE) &&
(attr->ia_mode & ~FAT_VALID_MODE)))
error = -EPERM;
@@ -552,7 +564,7 @@ int fat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
fat_truncate_time(inode, &attr->ia_mtime, S_MTIME);
attr->ia_valid &= ~(ATTR_ATIME|ATTR_CTIME|ATTR_MTIME);
- setattr_copy(mnt_userns, inode, attr);
+ setattr_copy(idmap, inode, attr);
mark_inode_dirty(inode);
out:
return error;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index a6f1c6d426d1..0b6009cd1844 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -16,13 +16,13 @@
#include <linux/mpage.h>
#include <linux/vfs.h>
#include <linux/seq_file.h>
-#include <linux/parser.h>
#include <linux/uio.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/random.h>
#include <linux/iversion.h>
+#include <linux/fs_struct.h>
#include "fat.h"
#ifndef CONFIG_FAT_DEFAULT_IOCHARSET
@@ -194,20 +194,15 @@ static int fat_get_block(struct inode *inode, sector_t iblock,
return 0;
}
-static int fat_writepage(struct page *page, struct writeback_control *wbc)
-{
- return block_write_full_page(page, fat_get_block, wbc);
-}
-
static int fat_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
return mpage_writepages(mapping, wbc, fat_get_block);
}
-static int fat_readpage(struct file *file, struct page *page)
+static int fat_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, fat_get_block);
+ return mpage_read_folio(folio, fat_get_block);
}
static void fat_readahead(struct readahead_control *rac)
@@ -225,28 +220,29 @@ static void fat_write_failed(struct address_space *mapping, loff_t to)
}
}
-static int fat_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int fat_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int err;
- *pagep = NULL;
- err = cont_write_begin(file, mapping, pos, len, flags,
- pagep, fsdata, fat_get_block,
+ err = cont_write_begin(iocb, mapping, pos, len,
+ foliop, fsdata, fat_get_block,
&MSDOS_I(mapping->host)->mmu_private);
if (err < 0)
fat_write_failed(mapping, pos + len);
return err;
}
-static int fat_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *pagep, void *fsdata)
+static int fat_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
int err;
- err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
+ err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
if (err < len)
fat_write_failed(mapping, pos + len);
if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) {
@@ -342,15 +338,16 @@ int fat_block_truncate_page(struct inode *inode, loff_t from)
}
static const struct address_space_operations fat_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = fat_readpage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = fat_read_folio,
.readahead = fat_readahead,
- .writepage = fat_writepage,
.writepages = fat_writepages,
.write_begin = fat_write_begin,
.write_end = fat_write_end,
.direct_IO = fat_direct_IO,
- .bmap = _fat_bmap
+ .bmap = _fat_bmap,
+ .migrate_folio = buffer_migrate_folio,
};
/*
@@ -516,13 +513,14 @@ static int fat_validate_dir(struct inode *dir)
int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+ struct timespec64 mtime;
int error;
MSDOS_I(inode)->i_pos = 0;
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
inode_inc_iversion(inode);
- inode->i_generation = prandom_u32();
+ inode->i_generation = get_random_u32();
if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) {
inode->i_generation &= ~1;
@@ -565,13 +563,18 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
& ~((loff_t)sbi->cluster_size - 1)) >> 9;
- fat_time_fat2unix(sbi, &inode->i_mtime, de->time, de->date, 0);
+ fat_time_fat2unix(sbi, &mtime, de->time, de->date, 0);
+ inode_set_mtime_to_ts(inode, mtime);
+ inode_set_ctime_to_ts(inode, mtime);
if (sbi->options.isvfat) {
- fat_time_fat2unix(sbi, &inode->i_ctime, de->ctime,
+ struct timespec64 atime;
+
+ fat_time_fat2unix(sbi, &atime, 0, de->adate, 0);
+ inode_set_atime_to_ts(inode, atime);
+ fat_time_fat2unix(sbi, &MSDOS_I(inode)->i_crtime, de->ctime,
de->cdate, de->ctime_cs);
- fat_time_fat2unix(sbi, &inode->i_atime, 0, de->adate, 0);
} else
- fat_truncate_time(inode, &inode->i_mtime, S_ATIME|S_CTIME);
+ inode_set_atime_to_ts(inode, fat_truncate_atime(sbi, &mtime));
return 0;
}
@@ -745,7 +748,7 @@ static struct kmem_cache *fat_inode_cachep;
static struct inode *fat_alloc_inode(struct super_block *sb)
{
struct msdos_inode_info *ei;
- ei = kmem_cache_alloc(fat_inode_cachep, GFP_NOFS);
+ ei = alloc_inode_sb(sb, fat_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
@@ -756,6 +759,8 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
ei->i_logstart = 0;
ei->i_attrs = 0;
ei->i_pos = 0;
+ ei->i_crtime.tv_sec = 0;
+ ei->i_crtime.tv_nsec = 0;
return &ei->vfs_inode;
}
@@ -783,7 +788,7 @@ static int __init fat_init_inodecache(void)
fat_inode_cachep = kmem_cache_create("fat_inode_cache",
sizeof(struct msdos_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
if (fat_inode_cachep == NULL)
return -ENOMEM;
@@ -800,16 +805,17 @@ static void __exit fat_destroy_inodecache(void)
kmem_cache_destroy(fat_inode_cachep);
}
-static int fat_remount(struct super_block *sb, int *flags, char *data)
+int fat_reconfigure(struct fs_context *fc)
{
bool new_rdonly;
+ struct super_block *sb = fc->root->d_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
- *flags |= SB_NODIRATIME | (sbi->options.isvfat ? 0 : SB_NOATIME);
+ fc->sb_flags |= SB_NODIRATIME | (sbi->options.isvfat ? 0 : SB_NOATIME);
sync_filesystem(sb);
/* make sure we update state on remount. */
- new_rdonly = *flags & SB_RDONLY;
+ new_rdonly = fc->sb_flags & SB_RDONLY;
if (new_rdonly != sb_rdonly(sb)) {
if (new_rdonly)
fat_set_state(sb, 0, 0);
@@ -818,6 +824,7 @@ static int fat_remount(struct super_block *sb, int *flags, char *data)
}
return 0;
}
+EXPORT_SYMBOL_GPL(fat_reconfigure);
static int fat_statfs(struct dentry *dentry, struct kstatfs *buf)
{
@@ -850,6 +857,7 @@ static int __fat_write_inode(struct inode *inode, int wait)
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh;
struct msdos_dir_entry *raw_entry;
+ struct timespec64 mtime;
loff_t i_pos;
sector_t blocknr;
int err, offset;
@@ -883,14 +891,16 @@ retry:
raw_entry->size = cpu_to_le32(inode->i_size);
raw_entry->attr = fat_make_attrs(inode);
fat_set_start(raw_entry, MSDOS_I(inode)->i_logstart);
- fat_time_unix2fat(sbi, &inode->i_mtime, &raw_entry->time,
+ mtime = inode_get_mtime(inode);
+ fat_time_unix2fat(sbi, &mtime, &raw_entry->time,
&raw_entry->date, NULL);
if (sbi->options.isvfat) {
+ struct timespec64 ts = inode_get_atime(inode);
__le16 atime;
- fat_time_unix2fat(sbi, &inode->i_ctime, &raw_entry->ctime,
+
+ fat_time_unix2fat(sbi, &ts, &atime, &raw_entry->adate, NULL);
+ fat_time_unix2fat(sbi, &MSDOS_I(inode)->i_crtime, &raw_entry->ctime,
&raw_entry->cdate, &raw_entry->ctime_cs);
- fat_time_unix2fat(sbi, &inode->i_atime, &atime,
- &raw_entry->adate, NULL);
}
spin_unlock(&sbi->inode_hash_lock);
mark_buffer_dirty(bh);
@@ -932,8 +942,6 @@ static const struct super_operations fat_sops = {
.evict_inode = fat_evict_inode,
.put_super = fat_put_super,
.statfs = fat_statfs,
- .remount_fs = fat_remount,
-
.show_options = fat_show_options,
};
@@ -1030,355 +1038,282 @@ static int fat_show_options(struct seq_file *m, struct dentry *root)
}
enum {
- Opt_check_n, Opt_check_r, Opt_check_s, Opt_uid, Opt_gid,
- Opt_umask, Opt_dmask, Opt_fmask, Opt_allow_utime, Opt_codepage,
- Opt_usefree, Opt_nocase, Opt_quiet, Opt_showexec, Opt_debug,
- Opt_immutable, Opt_dots, Opt_nodots,
- Opt_charset, Opt_shortname_lower, Opt_shortname_win95,
- Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
- Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
- Opt_obsolete, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
- Opt_err_panic, Opt_err_ro, Opt_discard, Opt_nfs, Opt_time_offset,
- Opt_nfs_stale_rw, Opt_nfs_nostale_ro, Opt_err, Opt_dos1xfloppy,
+ Opt_check, Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask,
+ Opt_allow_utime, Opt_codepage, Opt_usefree, Opt_nocase, Opt_quiet,
+ Opt_showexec, Opt_debug, Opt_immutable, Opt_dots, Opt_dotsOK,
+ Opt_charset, Opt_shortname, Opt_utf8, Opt_utf8_bool,
+ Opt_uni_xl, Opt_uni_xl_bool, Opt_nonumtail, Opt_nonumtail_bool,
+ Opt_obsolete, Opt_flush, Opt_tz, Opt_rodir, Opt_errors, Opt_discard,
+ Opt_nfs, Opt_nfs_enum, Opt_time_offset, Opt_dos1xfloppy,
};
-static const match_table_t fat_tokens = {
- {Opt_check_r, "check=relaxed"},
- {Opt_check_s, "check=strict"},
- {Opt_check_n, "check=normal"},
- {Opt_check_r, "check=r"},
- {Opt_check_s, "check=s"},
- {Opt_check_n, "check=n"},
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_umask, "umask=%o"},
- {Opt_dmask, "dmask=%o"},
- {Opt_fmask, "fmask=%o"},
- {Opt_allow_utime, "allow_utime=%o"},
- {Opt_codepage, "codepage=%u"},
- {Opt_usefree, "usefree"},
- {Opt_nocase, "nocase"},
- {Opt_quiet, "quiet"},
- {Opt_showexec, "showexec"},
- {Opt_debug, "debug"},
- {Opt_immutable, "sys_immutable"},
- {Opt_flush, "flush"},
- {Opt_tz_utc, "tz=UTC"},
- {Opt_time_offset, "time_offset=%d"},
- {Opt_err_cont, "errors=continue"},
- {Opt_err_panic, "errors=panic"},
- {Opt_err_ro, "errors=remount-ro"},
- {Opt_discard, "discard"},
- {Opt_nfs_stale_rw, "nfs"},
- {Opt_nfs_stale_rw, "nfs=stale_rw"},
- {Opt_nfs_nostale_ro, "nfs=nostale_ro"},
- {Opt_dos1xfloppy, "dos1xfloppy"},
- {Opt_obsolete, "conv=binary"},
- {Opt_obsolete, "conv=text"},
- {Opt_obsolete, "conv=auto"},
- {Opt_obsolete, "conv=b"},
- {Opt_obsolete, "conv=t"},
- {Opt_obsolete, "conv=a"},
- {Opt_obsolete, "fat=%u"},
- {Opt_obsolete, "blocksize=%u"},
- {Opt_obsolete, "cvf_format=%20s"},
- {Opt_obsolete, "cvf_options=%100s"},
- {Opt_obsolete, "posix"},
- {Opt_err, NULL},
+static const struct constant_table fat_param_check[] = {
+ {"relaxed", 'r'},
+ {"r", 'r'},
+ {"strict", 's'},
+ {"s", 's'},
+ {"normal", 'n'},
+ {"n", 'n'},
+ {}
};
-static const match_table_t msdos_tokens = {
- {Opt_nodots, "nodots"},
- {Opt_nodots, "dotsOK=no"},
- {Opt_dots, "dots"},
- {Opt_dots, "dotsOK=yes"},
- {Opt_err, NULL}
-};
-static const match_table_t vfat_tokens = {
- {Opt_charset, "iocharset=%s"},
- {Opt_shortname_lower, "shortname=lower"},
- {Opt_shortname_win95, "shortname=win95"},
- {Opt_shortname_winnt, "shortname=winnt"},
- {Opt_shortname_mixed, "shortname=mixed"},
- {Opt_utf8_no, "utf8=0"}, /* 0 or no or false */
- {Opt_utf8_no, "utf8=no"},
- {Opt_utf8_no, "utf8=false"},
- {Opt_utf8_yes, "utf8=1"}, /* empty or 1 or yes or true */
- {Opt_utf8_yes, "utf8=yes"},
- {Opt_utf8_yes, "utf8=true"},
- {Opt_utf8_yes, "utf8"},
- {Opt_uni_xl_no, "uni_xlate=0"}, /* 0 or no or false */
- {Opt_uni_xl_no, "uni_xlate=no"},
- {Opt_uni_xl_no, "uni_xlate=false"},
- {Opt_uni_xl_yes, "uni_xlate=1"}, /* empty or 1 or yes or true */
- {Opt_uni_xl_yes, "uni_xlate=yes"},
- {Opt_uni_xl_yes, "uni_xlate=true"},
- {Opt_uni_xl_yes, "uni_xlate"},
- {Opt_nonumtail_no, "nonumtail=0"}, /* 0 or no or false */
- {Opt_nonumtail_no, "nonumtail=no"},
- {Opt_nonumtail_no, "nonumtail=false"},
- {Opt_nonumtail_yes, "nonumtail=1"}, /* empty or 1 or yes or true */
- {Opt_nonumtail_yes, "nonumtail=yes"},
- {Opt_nonumtail_yes, "nonumtail=true"},
- {Opt_nonumtail_yes, "nonumtail"},
- {Opt_rodir, "rodir"},
- {Opt_err, NULL}
+
+static const struct constant_table fat_param_tz[] = {
+ {"UTC", 0},
+ {}
};
-static int parse_options(struct super_block *sb, char *options, int is_vfat,
- int silent, int *debug, struct fat_mount_options *opts)
-{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
- char *iocharset;
+static const struct constant_table fat_param_errors[] = {
+ {"continue", FAT_ERRORS_CONT},
+ {"panic", FAT_ERRORS_PANIC},
+ {"remount-ro", FAT_ERRORS_RO},
+ {}
+};
- opts->isvfat = is_vfat;
- opts->fs_uid = current_uid();
- opts->fs_gid = current_gid();
- opts->fs_fmask = opts->fs_dmask = current_umask();
- opts->allow_utime = -1;
- opts->codepage = fat_default_codepage;
- fat_reset_iocharset(opts);
- if (is_vfat) {
- opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
- opts->rodir = 0;
- } else {
- opts->shortname = 0;
- opts->rodir = 1;
- }
- opts->name_check = 'n';
- opts->quiet = opts->showexec = opts->sys_immutable = opts->dotsOK = 0;
- opts->unicode_xlate = 0;
- opts->numtail = 1;
- opts->usefree = opts->nocase = 0;
- opts->tz_set = 0;
- opts->nfs = 0;
- opts->errors = FAT_ERRORS_RO;
- *debug = 0;
+static const struct constant_table fat_param_nfs[] = {
+ {"stale_rw", FAT_NFS_STALE_RW},
+ {"nostale_ro", FAT_NFS_NOSTALE_RO},
+ {}
+};
- opts->utf8 = IS_ENABLED(CONFIG_FAT_DEFAULT_UTF8) && is_vfat;
+/*
+ * These are all obsolete but we still reject invalid options.
+ * The corresponding values are therefore meaningless.
+ */
+static const struct constant_table fat_param_conv[] = {
+ {"binary", 0},
+ {"text", 0},
+ {"auto", 0},
+ {"b", 0},
+ {"t", 0},
+ {"a", 0},
+ {}
+};
- if (!options)
- goto out;
+/* Core options. See below for vfat and msdos extras */
+const struct fs_parameter_spec fat_param_spec[] = {
+ fsparam_enum ("check", Opt_check, fat_param_check),
+ fsparam_uid ("uid", Opt_uid),
+ fsparam_gid ("gid", Opt_gid),
+ fsparam_u32oct ("umask", Opt_umask),
+ fsparam_u32oct ("dmask", Opt_dmask),
+ fsparam_u32oct ("fmask", Opt_fmask),
+ fsparam_u32oct ("allow_utime", Opt_allow_utime),
+ fsparam_u32 ("codepage", Opt_codepage),
+ fsparam_flag ("usefree", Opt_usefree),
+ fsparam_flag ("nocase", Opt_nocase),
+ fsparam_flag ("quiet", Opt_quiet),
+ fsparam_flag ("showexec", Opt_showexec),
+ fsparam_flag ("debug", Opt_debug),
+ fsparam_flag ("sys_immutable", Opt_immutable),
+ fsparam_flag ("flush", Opt_flush),
+ fsparam_enum ("tz", Opt_tz, fat_param_tz),
+ fsparam_s32 ("time_offset", Opt_time_offset),
+ fsparam_enum ("errors", Opt_errors, fat_param_errors),
+ fsparam_flag ("discard", Opt_discard),
+ fsparam_flag ("nfs", Opt_nfs),
+ fsparam_enum ("nfs", Opt_nfs_enum, fat_param_nfs),
+ fsparam_flag ("dos1xfloppy", Opt_dos1xfloppy),
+ __fsparam(fs_param_is_enum, "conv",
+ Opt_obsolete, fs_param_deprecated, fat_param_conv),
+ __fsparam(fs_param_is_u32, "fat",
+ Opt_obsolete, fs_param_deprecated, NULL),
+ __fsparam(fs_param_is_u32, "blocksize",
+ Opt_obsolete, fs_param_deprecated, NULL),
+ __fsparam(fs_param_is_string, "cvf_format",
+ Opt_obsolete, fs_param_deprecated, NULL),
+ __fsparam(fs_param_is_string, "cvf_options",
+ Opt_obsolete, fs_param_deprecated, NULL),
+ __fsparam(NULL, "posix",
+ Opt_obsolete, fs_param_deprecated, NULL),
+ {}
+};
+EXPORT_SYMBOL_GPL(fat_param_spec);
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
+static const struct fs_parameter_spec msdos_param_spec[] = {
+ fsparam_flag_no ("dots", Opt_dots),
+ fsparam_bool ("dotsOK", Opt_dotsOK),
+ {}
+};
- token = match_token(p, fat_tokens, args);
- if (token == Opt_err) {
- if (is_vfat)
- token = match_token(p, vfat_tokens, args);
- else
- token = match_token(p, msdos_tokens, args);
- }
- switch (token) {
- case Opt_check_s:
- opts->name_check = 's';
- break;
- case Opt_check_r:
- opts->name_check = 'r';
- break;
- case Opt_check_n:
- opts->name_check = 'n';
- break;
- case Opt_usefree:
- opts->usefree = 1;
- break;
- case Opt_nocase:
- if (!is_vfat)
- opts->nocase = 1;
- else {
- /* for backward compatibility */
- opts->shortname = VFAT_SFN_DISPLAY_WIN95
- | VFAT_SFN_CREATE_WIN95;
- }
- break;
- case Opt_quiet:
- opts->quiet = 1;
- break;
- case Opt_showexec:
- opts->showexec = 1;
- break;
- case Opt_debug:
- *debug = 1;
- break;
- case Opt_immutable:
- opts->sys_immutable = 1;
- break;
- case Opt_uid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- opts->fs_uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(opts->fs_uid))
- return -EINVAL;
- break;
- case Opt_gid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- opts->fs_gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(opts->fs_gid))
- return -EINVAL;
- break;
- case Opt_umask:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->fs_fmask = opts->fs_dmask = option;
- break;
- case Opt_dmask:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->fs_dmask = option;
- break;
- case Opt_fmask:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->fs_fmask = option;
- break;
- case Opt_allow_utime:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->allow_utime = option & (S_IWGRP | S_IWOTH);
- break;
- case Opt_codepage:
- if (match_int(&args[0], &option))
- return -EINVAL;
- opts->codepage = option;
- break;
- case Opt_flush:
- opts->flush = 1;
- break;
- case Opt_time_offset:
- if (match_int(&args[0], &option))
- return -EINVAL;
- /*
- * GMT+-12 zones may have DST corrections so at least
- * 13 hours difference is needed. Make the limit 24
- * just in case someone invents something unusual.
- */
- if (option < -24 * 60 || option > 24 * 60)
- return -EINVAL;
- opts->tz_set = 1;
- opts->time_offset = option;
- break;
- case Opt_tz_utc:
- opts->tz_set = 1;
- opts->time_offset = 0;
- break;
- case Opt_err_cont:
- opts->errors = FAT_ERRORS_CONT;
- break;
- case Opt_err_panic:
- opts->errors = FAT_ERRORS_PANIC;
- break;
- case Opt_err_ro:
- opts->errors = FAT_ERRORS_RO;
- break;
- case Opt_nfs_stale_rw:
- opts->nfs = FAT_NFS_STALE_RW;
- break;
- case Opt_nfs_nostale_ro:
- opts->nfs = FAT_NFS_NOSTALE_RO;
- break;
- case Opt_dos1xfloppy:
- opts->dos1xfloppy = 1;
- break;
+static const struct constant_table fat_param_shortname[] = {
+ {"lower", VFAT_SFN_DISPLAY_LOWER | VFAT_SFN_CREATE_WIN95},
+ {"win95", VFAT_SFN_DISPLAY_WIN95 | VFAT_SFN_CREATE_WIN95},
+ {"winnt", VFAT_SFN_DISPLAY_WINNT | VFAT_SFN_CREATE_WINNT},
+ {"mixed", VFAT_SFN_DISPLAY_WINNT | VFAT_SFN_CREATE_WIN95},
+ {}
+};
- /* msdos specific */
- case Opt_dots:
- opts->dotsOK = 1;
- break;
- case Opt_nodots:
- opts->dotsOK = 0;
- break;
+static const struct fs_parameter_spec vfat_param_spec[] = {
+ fsparam_string ("iocharset", Opt_charset),
+ fsparam_enum ("shortname", Opt_shortname, fat_param_shortname),
+ fsparam_flag ("utf8", Opt_utf8),
+ fsparam_bool ("utf8", Opt_utf8_bool),
+ fsparam_flag ("uni_xlate", Opt_uni_xl),
+ fsparam_bool ("uni_xlate", Opt_uni_xl_bool),
+ fsparam_flag ("nonumtail", Opt_nonumtail),
+ fsparam_bool ("nonumtail", Opt_nonumtail_bool),
+ fsparam_flag ("rodir", Opt_rodir),
+ {}
+};
- /* vfat specific */
- case Opt_charset:
- fat_reset_iocharset(opts);
- iocharset = match_strdup(&args[0]);
- if (!iocharset)
- return -ENOMEM;
- opts->iocharset = iocharset;
- break;
- case Opt_shortname_lower:
- opts->shortname = VFAT_SFN_DISPLAY_LOWER
- | VFAT_SFN_CREATE_WIN95;
- break;
- case Opt_shortname_win95:
- opts->shortname = VFAT_SFN_DISPLAY_WIN95
- | VFAT_SFN_CREATE_WIN95;
- break;
- case Opt_shortname_winnt:
- opts->shortname = VFAT_SFN_DISPLAY_WINNT
- | VFAT_SFN_CREATE_WINNT;
- break;
- case Opt_shortname_mixed:
- opts->shortname = VFAT_SFN_DISPLAY_WINNT
- | VFAT_SFN_CREATE_WIN95;
- break;
- case Opt_utf8_no: /* 0 or no or false */
- opts->utf8 = 0;
- break;
- case Opt_utf8_yes: /* empty or 1 or yes or true */
- opts->utf8 = 1;
- break;
- case Opt_uni_xl_no: /* 0 or no or false */
- opts->unicode_xlate = 0;
- break;
- case Opt_uni_xl_yes: /* empty or 1 or yes or true */
- opts->unicode_xlate = 1;
- break;
- case Opt_nonumtail_no: /* 0 or no or false */
- opts->numtail = 1; /* negated option */
- break;
- case Opt_nonumtail_yes: /* empty or 1 or yes or true */
- opts->numtail = 0; /* negated option */
- break;
- case Opt_rodir:
- opts->rodir = 1;
- break;
- case Opt_discard:
- opts->discard = 1;
- break;
+int fat_parse_param(struct fs_context *fc, struct fs_parameter *param,
+ bool is_vfat)
+{
+ struct fat_mount_options *opts = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
- /* obsolete mount options */
- case Opt_obsolete:
- fat_msg(sb, KERN_INFO, "\"%s\" option is obsolete, "
- "not supported now", p);
- break;
- /* unknown option */
- default:
- if (!silent) {
- fat_msg(sb, KERN_ERR,
- "Unrecognized mount option \"%s\" "
- "or missing value", p);
- }
- return -EINVAL;
- }
- }
+ /* remount options have traditionally been ignored */
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE)
+ return 0;
-out:
- /* UTF-8 doesn't provide FAT semantics */
- if (!strcmp(opts->iocharset, "utf8")) {
- fat_msg(sb, KERN_WARNING, "utf8 is not a recommended IO charset"
- " for FAT filesystems, filesystem will be "
- "case sensitive!");
+ opt = fs_parse(fc, fat_param_spec, param, &result);
+ /* If option not found in fat_param_spec, try vfat/msdos options */
+ if (opt == -ENOPARAM) {
+ if (is_vfat)
+ opt = fs_parse(fc, vfat_param_spec, param, &result);
+ else
+ opt = fs_parse(fc, msdos_param_spec, param, &result);
}
- /* If user doesn't specify allow_utime, it's initialized from dmask. */
- if (opts->allow_utime == (unsigned short)-1)
- opts->allow_utime = ~opts->fs_dmask & (S_IWGRP | S_IWOTH);
- if (opts->unicode_xlate)
- opts->utf8 = 0;
- if (opts->nfs == FAT_NFS_NOSTALE_RO) {
- sb->s_flags |= SB_RDONLY;
- sb->s_export_op = &fat_export_ops_nostale;
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_check:
+ opts->name_check = result.uint_32;
+ break;
+ case Opt_usefree:
+ opts->usefree = 1;
+ break;
+ case Opt_nocase:
+ if (!is_vfat)
+ opts->nocase = 1;
+ else {
+ /* for backward compatibility */
+ opts->shortname = VFAT_SFN_DISPLAY_WIN95
+ | VFAT_SFN_CREATE_WIN95;
+ }
+ break;
+ case Opt_quiet:
+ opts->quiet = 1;
+ break;
+ case Opt_showexec:
+ opts->showexec = 1;
+ break;
+ case Opt_debug:
+ opts->debug = 1;
+ break;
+ case Opt_immutable:
+ opts->sys_immutable = 1;
+ break;
+ case Opt_uid:
+ opts->fs_uid = result.uid;
+ break;
+ case Opt_gid:
+ opts->fs_gid = result.gid;
+ break;
+ case Opt_umask:
+ opts->fs_fmask = opts->fs_dmask = result.uint_32;
+ break;
+ case Opt_dmask:
+ opts->fs_dmask = result.uint_32;
+ break;
+ case Opt_fmask:
+ opts->fs_fmask = result.uint_32;
+ break;
+ case Opt_allow_utime:
+ opts->allow_utime = result.uint_32 & (S_IWGRP | S_IWOTH);
+ break;
+ case Opt_codepage:
+ opts->codepage = result.uint_32;
+ break;
+ case Opt_flush:
+ opts->flush = 1;
+ break;
+ case Opt_time_offset:
+ /*
+ * GMT+-12 zones may have DST corrections so at least
+ * 13 hours difference is needed. Make the limit 24
+ * just in case someone invents something unusual.
+ */
+ if (result.int_32 < -24 * 60 || result.int_32 > 24 * 60)
+ return -EINVAL;
+ opts->tz_set = 1;
+ opts->time_offset = result.int_32;
+ break;
+ case Opt_tz:
+ opts->tz_set = 1;
+ opts->time_offset = result.uint_32;
+ break;
+ case Opt_errors:
+ opts->errors = result.uint_32;
+ break;
+ case Opt_nfs:
+ opts->nfs = FAT_NFS_STALE_RW;
+ break;
+ case Opt_nfs_enum:
+ opts->nfs = result.uint_32;
+ break;
+ case Opt_dos1xfloppy:
+ opts->dos1xfloppy = 1;
+ break;
+
+ /* msdos specific */
+ case Opt_dots: /* dots / nodots */
+ opts->dotsOK = !result.negated;
+ break;
+ case Opt_dotsOK: /* dotsOK = yes/no */
+ opts->dotsOK = result.boolean;
+ break;
+
+ /* vfat specific */
+ case Opt_charset:
+ fat_reset_iocharset(opts);
+ opts->iocharset = param->string;
+ param->string = NULL; /* Steal string */
+ break;
+ case Opt_shortname:
+ opts->shortname = result.uint_32;
+ break;
+ case Opt_utf8:
+ opts->utf8 = 1;
+ break;
+ case Opt_utf8_bool:
+ opts->utf8 = result.boolean;
+ break;
+ case Opt_uni_xl:
+ opts->unicode_xlate = 1;
+ break;
+ case Opt_uni_xl_bool:
+ opts->unicode_xlate = result.boolean;
+ break;
+ case Opt_nonumtail:
+ opts->numtail = 0; /* negated option */
+ break;
+ case Opt_nonumtail_bool:
+ opts->numtail = !result.boolean; /* negated option */
+ break;
+ case Opt_rodir:
+ opts->rodir = 1;
+ break;
+ case Opt_discard:
+ opts->discard = 1;
+ break;
+
+ /* obsolete mount options */
+ case Opt_obsolete:
+ printk(KERN_INFO "FAT-fs: \"%s\" option is obsolete, "
+ "not supported now", param->key);
+ break;
+ default:
+ return -EINVAL;
}
return 0;
}
+EXPORT_SYMBOL_GPL(fat_parse_param);
static int fat_read_root(struct inode *inode)
{
@@ -1408,8 +1343,8 @@ static int fat_read_root(struct inode *inode)
MSDOS_I(inode)->mmu_private = inode->i_size;
fat_save_attrs(inode, ATTR_DIR);
- inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = 0;
- inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = 0;
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime(inode, 0, 0)));
set_nlink(inode, fat_subdirs(inode)+2);
return 0;
@@ -1597,9 +1532,11 @@ out:
/*
* Read the super block of an MS-DOS FS.
*/
-int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
+int fat_fill_super(struct super_block *sb, struct fs_context *fc,
void (*setup)(struct super_block *))
{
+ struct fat_mount_options *opts = fc->fs_private;
+ int silent = fc->sb_flags & SB_SILENT;
struct inode *root_inode = NULL, *fat_inode = NULL;
struct inode *fsinfo_inode = NULL;
struct buffer_head *bh;
@@ -1607,7 +1544,6 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
struct msdos_sb_info *sbi;
u16 logical_sector_size;
u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
- int debug;
long error;
char buf[50];
struct timespec64 ts;
@@ -1636,14 +1572,36 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- error = parse_options(sb, data, isvfat, silent, &debug, &sbi->options);
- if (error)
- goto out_fail;
+ /* UTF-8 doesn't provide FAT semantics */
+ if (!strcmp(opts->iocharset, "utf8")) {
+ fat_msg(sb, KERN_WARNING, "utf8 is not a recommended IO charset"
+ " for FAT filesystems, filesystem will be"
+ " case sensitive!");
+ }
+
+ /* If user doesn't specify allow_utime, it's initialized from dmask. */
+ if (opts->allow_utime == (unsigned short)-1)
+ opts->allow_utime = ~opts->fs_dmask & (S_IWGRP | S_IWOTH);
+ if (opts->unicode_xlate)
+ opts->utf8 = 0;
+ if (opts->nfs == FAT_NFS_NOSTALE_RO) {
+ sb->s_flags |= SB_RDONLY;
+ sb->s_export_op = &fat_export_ops_nostale;
+ }
+
+ /* Apply parsed options to sbi (structure copy) */
+ sbi->options = *opts;
+ /* Transfer ownership of iocharset to sbi->options */
+ opts->iocharset = NULL;
setup(sb); /* flavour-specific stuff that needs options */
+ error = -EINVAL;
+ if (!sb_min_blocksize(sb, 512)) {
+ fat_msg(sb, KERN_ERR, "unable to set blocksize");
+ goto out_fail;
+ }
error = -EIO;
- sb_min_blocksize(sb, 512);
bh = sb_bread(sb, 0);
if (bh == NULL) {
fat_msg(sb, KERN_ERR, "unable to read boot sector");
@@ -1755,6 +1713,9 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
else /* fat 16 or 12 */
sbi->vol_id = bpb.fat16_vol_id;
+ __le32 vol_id_le = cpu_to_le32(sbi->vol_id);
+ super_set_uuid(sb, (void *) &vol_id_le, sizeof(vol_id_le));
+
sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry);
sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
@@ -1871,13 +1832,9 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
goto out_fail;
}
- if (sbi->options.discard) {
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
- if (!blk_queue_discard(q))
- fat_msg(sb, KERN_WARNING,
- "mounting with \"discard\" option, but "
- "the device does not support discard");
- }
+ if (sbi->options.discard && !bdev_max_discard_sectors(sb->s_bdev))
+ fat_msg(sb, KERN_WARNING,
+ "mounting with \"discard\" option, but the device does not support discard");
fat_set_state(sb, 1, 0);
return 0;
@@ -1888,10 +1845,8 @@ out_invalid:
fat_msg(sb, KERN_INFO, "Can't find a valid FAT filesystem");
out_fail:
- if (fsinfo_inode)
- iput(fsinfo_inode);
- if (fat_inode)
- iput(fat_inode);
+ iput(fsinfo_inode);
+ iput(fat_inode);
unload_nls(sbi->nls_io);
unload_nls(sbi->nls_disk);
fat_reset_iocharset(&sbi->options);
@@ -1946,6 +1901,57 @@ int fat_flush_inodes(struct super_block *sb, struct inode *i1, struct inode *i2)
}
EXPORT_SYMBOL_GPL(fat_flush_inodes);
+int fat_init_fs_context(struct fs_context *fc, bool is_vfat)
+{
+ struct fat_mount_options *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ opts->isvfat = is_vfat;
+ opts->fs_uid = current_uid();
+ opts->fs_gid = current_gid();
+ opts->fs_fmask = opts->fs_dmask = current_umask();
+ opts->allow_utime = -1;
+ opts->codepage = fat_default_codepage;
+ fat_reset_iocharset(opts);
+ if (is_vfat) {
+ opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
+ opts->rodir = 0;
+ } else {
+ opts->shortname = 0;
+ opts->rodir = 1;
+ }
+ opts->name_check = 'n';
+ opts->quiet = opts->showexec = opts->sys_immutable = opts->dotsOK = 0;
+ opts->unicode_xlate = 0;
+ opts->numtail = 1;
+ opts->usefree = opts->nocase = 0;
+ opts->tz_set = 0;
+ opts->nfs = 0;
+ opts->errors = FAT_ERRORS_RO;
+ opts->debug = 0;
+
+ opts->utf8 = IS_ENABLED(CONFIG_FAT_DEFAULT_UTF8) && is_vfat;
+
+ fc->fs_private = opts;
+ /* fc->ops assigned by caller */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fat_init_fs_context);
+
+void fat_free_fc(struct fs_context *fc)
+{
+ struct fat_mount_options *opts = fc->fs_private;
+
+ if (opts->iocharset != fat_default_iocharset)
+ kfree(opts->iocharset);
+ kfree(fc->fs_private);
+}
+EXPORT_SYMBOL_GPL(fat_free_fc);
+
static int __init init_fat_fs(void)
{
int err;
@@ -1974,4 +1980,5 @@ static void __exit exit_fat_fs(void)
module_init(init_fat_fs)
module_exit(exit_fat_fs)
+MODULE_DESCRIPTION("Core FAT filesystem support");
MODULE_LICENSE("GPL");
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 91ca3c304211..950da09f0961 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -42,10 +42,16 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
EXPORT_SYMBOL_GPL(__fat_fs_error);
/**
- * fat_msg() - print preformated FAT specific messages. Every thing what is
- * not fat_fs_error() should be fat_msg().
+ * _fat_msg() - Print a preformatted FAT message based on a superblock.
+ * @sb: A pointer to a &struct super_block
+ * @level: A Kernel printk level constant
+ * @fmt: The printf-style format string to print.
+ *
+ * Everything that is not fat_fs_error() should be fat_msg().
+ *
+ * fat_msg() wraps _fat_msg() for printk indexing.
*/
-void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...)
+void _fat_msg(struct super_block *sb, const char *level, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -53,7 +59,7 @@ void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...)
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- printk("%sFAT-fs (%s): %pV\n", level, sb->s_id, &vaf);
+ _printk(FAT_PRINTK_PREFIX "%pV\n", level, sb->s_id, &vaf);
va_end(args);
}
@@ -152,9 +158,9 @@ int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster)
mark_inode_dirty(inode);
}
if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) {
- fat_fs_error(sb, "clusters badly computed (%d != %llu)",
- new_fclus,
- (llu)(inode->i_blocks >> (sbi->cluster_bits - 9)));
+ fat_fs_error_ratelimit(
+ sb, "clusters badly computed (%d != %llu)", new_fclus,
+ (llu)(inode->i_blocks >> (sbi->cluster_bits - 9)));
fat_cache_inval_inode(inode);
}
inode->i_blocks += nr_cluster << (sbi->cluster_bits - 9);
@@ -187,7 +193,7 @@ static long days_in_year[] = {
0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
};
-static inline int fat_tz_offset(struct msdos_sb_info *sbi)
+static inline int fat_tz_offset(const struct msdos_sb_info *sbi)
{
return (sbi->options.tz_set ?
-sbi->options.time_offset :
@@ -275,23 +281,35 @@ static inline struct timespec64 fat_timespec64_trunc_2secs(struct timespec64 ts)
return (struct timespec64){ ts.tv_sec & ~1ULL, 0 };
}
-static inline struct timespec64 fat_timespec64_trunc_10ms(struct timespec64 ts)
+/*
+ * truncate atime to 24 hour granularity (00:00:00 in local timezone)
+ */
+struct timespec64 fat_truncate_atime(const struct msdos_sb_info *sbi,
+ const struct timespec64 *ts)
{
- if (ts.tv_nsec)
- ts.tv_nsec -= ts.tv_nsec % 10000000UL;
- return ts;
+ /* to localtime */
+ time64_t seconds = ts->tv_sec - fat_tz_offset(sbi);
+ s32 remainder;
+
+ div_s64_rem(seconds, SECS_PER_DAY, &remainder);
+ /* to day boundary, and back to unix time */
+ seconds = seconds + fat_tz_offset(sbi) - remainder;
+
+ return (struct timespec64){ seconds, 0 };
+}
+
+/*
+ * truncate mtime to 2 second granularity
+ */
+struct timespec64 fat_truncate_mtime(const struct msdos_sb_info *sbi,
+ const struct timespec64 *ts)
+{
+ return fat_timespec64_trunc_2secs(*ts);
}
/*
* truncate the various times with appropriate granularity:
- * root inode:
- * all times always 0
- * all other inodes:
- * mtime - 2 seconds
- * ctime
- * msdos - 2 seconds
- * vfat - 10 milliseconds
- * atime - 24 hours (00:00:00 in local timezone)
+ * all times in root node are always 0
*/
int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
{
@@ -306,31 +324,22 @@ int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
ts = current_time(inode);
}
- if (flags & S_ATIME) {
- /* to localtime */
- time64_t seconds = now->tv_sec - fat_tz_offset(sbi);
- s32 remainder;
-
- div_s64_rem(seconds, SECS_PER_DAY, &remainder);
- /* to day boundary, and back to unix time */
- seconds = seconds + fat_tz_offset(sbi) - remainder;
-
- inode->i_atime = (struct timespec64){ seconds, 0 };
- }
- if (flags & S_CTIME) {
- if (sbi->options.isvfat)
- inode->i_ctime = fat_timespec64_trunc_10ms(*now);
- else
- inode->i_ctime = fat_timespec64_trunc_2secs(*now);
- }
+ if (flags & S_ATIME)
+ inode_set_atime_to_ts(inode, fat_truncate_atime(sbi, now));
+ /*
+ * ctime and mtime share the same on-disk field, and should be
+ * identical in memory. all mtime updates will be applied to ctime,
+ * but ctime updates are ignored.
+ */
if (flags & S_MTIME)
- inode->i_mtime = fat_timespec64_trunc_2secs(*now);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_to_ts(inode, fat_truncate_mtime(sbi, now)));
return 0;
}
EXPORT_SYMBOL_GPL(fat_truncate_time);
-int fat_update_time(struct inode *inode, struct timespec64 *now, int flags)
+int fat_update_time(struct inode *inode, int flags)
{
int dirty_flags = 0;
@@ -338,16 +347,13 @@ int fat_update_time(struct inode *inode, struct timespec64 *now, int flags)
return 0;
if (flags & (S_ATIME | S_CTIME | S_MTIME)) {
- fat_truncate_time(inode, now, flags);
+ fat_truncate_time(inode, NULL, flags);
if (inode->i_sb->s_flags & SB_LAZYTIME)
dirty_flags |= I_DIRTY_TIME;
else
dirty_flags |= I_DIRTY_SYNC;
}
- if ((flags & S_VERSION) && inode_maybe_inc_iversion(inode, false))
- dirty_flags |= I_DIRTY_SYNC;
-
__mark_inode_dirty(inode, dirty_flags);
return 0;
}
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index efba301d68ae..0b920ee40a7f 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -261,7 +261,7 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name,
}
/***** Create a file */
-static int msdos_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int msdos_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct super_block *sb = dir->i_sb;
@@ -339,8 +339,8 @@ out:
}
/***** Make a directory */
-static int msdos_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *msdos_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
@@ -389,13 +389,13 @@ static int msdos_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
mutex_unlock(&MSDOS_SB(sb)->s_lock);
fat_flush_inodes(sb, dir, inode);
- return 0;
+ return NULL;
out_free:
fat_free_clusters(dir, cluster);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
- return err;
+ return ERR_PTR(err);
}
/***** Unlink a file */
@@ -594,7 +594,7 @@ error_inode:
}
/***** Rename, a wrapper for rename_same_dir & rename_diff_dir */
-static int msdos_rename(struct user_namespace *mnt_userns,
+static int msdos_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
@@ -646,28 +646,52 @@ static const struct inode_operations msdos_dir_inode_operations = {
static void setup(struct super_block *sb)
{
MSDOS_SB(sb)->dir_ops = &msdos_dir_inode_operations;
- sb->s_d_op = &msdos_dentry_operations;
+ set_default_d_op(sb, &msdos_dentry_operations);
sb->s_flags |= SB_NOATIME;
}
-static int msdos_fill_super(struct super_block *sb, void *data, int silent)
+static int msdos_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ return fat_fill_super(sb, fc, setup);
+}
+
+static int msdos_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, msdos_fill_super);
+}
+
+static int msdos_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- return fat_fill_super(sb, data, silent, 0, setup);
+ return fat_parse_param(fc, param, false);
}
-static struct dentry *msdos_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static const struct fs_context_operations msdos_context_ops = {
+ .parse_param = msdos_parse_param,
+ .get_tree = msdos_get_tree,
+ .reconfigure = fat_reconfigure,
+ .free = fat_free_fc,
+};
+
+static int msdos_init_fs_context(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, msdos_fill_super);
+ int err;
+
+ /* Initialize with is_vfat == false */
+ err = fat_init_fs_context(fc, false);
+ if (err)
+ return err;
+
+ fc->ops = &msdos_context_ops;
+ return 0;
}
static struct file_system_type msdos_fs_type = {
.owner = THIS_MODULE,
.name = "msdos",
- .mount = msdos_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+ .init_fs_context = msdos_init_fs_context,
+ .parameters = fat_param_spec,
};
MODULE_ALIAS_FS("msdos");
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 5369d82e0bfb..5dbc4cbb8fce 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -43,17 +43,13 @@ static inline void vfat_d_version_set(struct dentry *dentry,
* If it happened, the negative dentry isn't actually negative
* anymore. So, drop it.
*/
-static int vfat_revalidate_shortname(struct dentry *dentry)
+static bool vfat_revalidate_shortname(struct dentry *dentry, struct inode *dir)
{
- int ret = 1;
- spin_lock(&dentry->d_lock);
- if (!inode_eq_iversion(d_inode(dentry->d_parent), vfat_d_version(dentry)))
- ret = 0;
- spin_unlock(&dentry->d_lock);
- return ret;
+ return inode_eq_iversion(dir, vfat_d_version(dentry));
}
-static int vfat_revalidate(struct dentry *dentry, unsigned int flags)
+static int vfat_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -61,10 +57,11 @@ static int vfat_revalidate(struct dentry *dentry, unsigned int flags)
/* This is not negative dentry. Always valid. */
if (d_really_is_positive(dentry))
return 1;
- return vfat_revalidate_shortname(dentry);
+ return vfat_revalidate_shortname(dentry, dir);
}
-static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
+static int vfat_revalidate_ci(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -97,7 +94,7 @@ static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
- return vfat_revalidate_shortname(dentry);
+ return vfat_revalidate_shortname(dentry, dir);
}
/* returns the length of a struct qstr, ignoring trailing dots */
@@ -200,7 +197,7 @@ static const struct dentry_operations vfat_dentry_ops = {
/* Characters that are undesirable in an MS-DOS file name */
-static inline wchar_t vfat_bad_char(wchar_t w)
+static inline bool vfat_bad_char(wchar_t w)
{
return (w < 0x0020)
|| (w == '*') || (w == '?') || (w == '<') || (w == '>')
@@ -208,7 +205,7 @@ static inline wchar_t vfat_bad_char(wchar_t w)
|| (w == '\\');
}
-static inline wchar_t vfat_replace_char(wchar_t w)
+static inline bool vfat_replace_char(wchar_t w)
{
return (w == '[') || (w == ']') || (w == ';') || (w == ',')
|| (w == '+') || (w == '=');
@@ -756,7 +753,7 @@ error:
return ERR_PTR(err);
}
-static int vfat_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int vfat_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct super_block *sb = dir->i_sb;
@@ -780,8 +777,6 @@ static int vfat_create(struct user_namespace *mnt_userns, struct inode *dir,
goto out;
}
inode_inc_iversion(inode);
- fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
- /* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
out:
@@ -846,8 +841,8 @@ out:
return err;
}
-static int vfat_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *vfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
@@ -878,37 +873,70 @@ static int vfat_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
}
inode_inc_iversion(inode);
set_nlink(inode, 2);
- fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
- /* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
mutex_unlock(&MSDOS_SB(sb)->s_lock);
- return 0;
+ return NULL;
out_free:
fat_free_clusters(dir, cluster);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
- return err;
+ return ERR_PTR(err);
+}
+
+static int vfat_get_dotdot_de(struct inode *inode, struct buffer_head **bh,
+ struct msdos_dir_entry **de)
+{
+ if (S_ISDIR(inode->i_mode)) {
+ if (fat_get_dotdot_entry(inode, bh, de))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int vfat_sync_ipos(struct inode *dir, struct inode *inode)
+{
+ if (IS_DIRSYNC(dir))
+ return fat_sync_inode(inode);
+ mark_inode_dirty(inode);
+ return 0;
+}
+
+static int vfat_update_dotdot_de(struct inode *dir, struct inode *inode,
+ struct buffer_head *dotdot_bh,
+ struct msdos_dir_entry *dotdot_de)
+{
+ fat_set_start(dotdot_de, MSDOS_I(dir)->i_logstart);
+ mark_buffer_dirty_inode(dotdot_bh, inode);
+ if (IS_DIRSYNC(dir))
+ return sync_dirty_buffer(dotdot_bh);
+ return 0;
+}
+
+static void vfat_update_dir_metadata(struct inode *dir, struct timespec64 *ts)
+{
+ inode_inc_iversion(dir);
+ fat_truncate_time(dir, ts, S_CTIME | S_MTIME);
+ if (IS_DIRSYNC(dir))
+ (void)fat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
}
-static int vfat_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
- struct dentry *old_dentry, struct inode *new_dir,
- struct dentry *new_dentry, unsigned int flags)
+static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
{
struct buffer_head *dotdot_bh;
- struct msdos_dir_entry *dotdot_de;
+ struct msdos_dir_entry *dotdot_de = NULL;
struct inode *old_inode, *new_inode;
struct fat_slot_info old_sinfo, sinfo;
struct timespec64 ts;
loff_t new_i_pos;
- int err, is_dir, update_dotdot, corrupt = 0;
+ int err, is_dir, corrupt = 0;
struct super_block *sb = old_dir->i_sb;
- if (flags & ~RENAME_NOREPLACE)
- return -EINVAL;
-
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
old_inode = d_inode(old_dentry);
new_inode = d_inode(new_dentry);
@@ -917,15 +945,13 @@ static int vfat_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
if (err)
goto out;
- is_dir = S_ISDIR(old_inode->i_mode);
- update_dotdot = (is_dir && old_dir != new_dir);
- if (update_dotdot) {
- if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de)) {
- err = -EIO;
+ if (old_dir != new_dir) {
+ err = vfat_get_dotdot_de(old_inode, &dotdot_bh, &dotdot_de);
+ if (err)
goto out;
- }
}
+ is_dir = S_ISDIR(old_inode->i_mode);
ts = current_time(old_dir);
if (new_inode) {
if (is_dir) {
@@ -946,21 +972,15 @@ static int vfat_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
fat_detach(old_inode);
fat_attach(old_inode, new_i_pos);
- if (IS_DIRSYNC(new_dir)) {
- err = fat_sync_inode(old_inode);
- if (err)
- goto error_inode;
- } else
- mark_inode_dirty(old_inode);
+ err = vfat_sync_ipos(new_dir, old_inode);
+ if (err)
+ goto error_inode;
- if (update_dotdot) {
- fat_set_start(dotdot_de, MSDOS_I(new_dir)->i_logstart);
- mark_buffer_dirty_inode(dotdot_bh, old_inode);
- if (IS_DIRSYNC(new_dir)) {
- err = sync_dirty_buffer(dotdot_bh);
- if (err)
- goto error_dotdot;
- }
+ if (dotdot_de) {
+ err = vfat_update_dotdot_de(new_dir, old_inode, dotdot_bh,
+ dotdot_de);
+ if (err)
+ goto error_dotdot;
drop_nlink(old_dir);
if (!new_inode)
inc_nlink(new_dir);
@@ -970,12 +990,7 @@ static int vfat_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
old_sinfo.bh = NULL;
if (err)
goto error_dotdot;
- inode_inc_iversion(old_dir);
- fat_truncate_time(old_dir, &ts, S_CTIME|S_MTIME);
- if (IS_DIRSYNC(old_dir))
- (void)fat_sync_inode(old_dir);
- else
- mark_inode_dirty(old_dir);
+ vfat_update_dir_metadata(old_dir, &ts);
if (new_inode) {
drop_nlink(new_inode);
@@ -995,10 +1010,9 @@ error_dotdot:
/* data cluster is shared, serious corruption */
corrupt = 1;
- if (update_dotdot) {
- fat_set_start(dotdot_de, MSDOS_I(old_dir)->i_logstart);
- mark_buffer_dirty_inode(dotdot_bh, old_inode);
- corrupt |= sync_dirty_buffer(dotdot_bh);
+ if (dotdot_de) {
+ corrupt |= vfat_update_dotdot_de(old_dir, old_inode, dotdot_bh,
+ dotdot_de);
}
error_inode:
fat_detach(old_inode);
@@ -1020,18 +1034,150 @@ error_inode:
if (corrupt < 0) {
fat_fs_error(new_dir->i_sb,
"%s: Filesystem corrupted (i_pos %lld)",
- __func__, sinfo.i_pos);
+ __func__, new_i_pos);
+ }
+ goto out;
+}
+
+static void vfat_exchange_ipos(struct inode *old_inode, struct inode *new_inode,
+ loff_t old_i_pos, loff_t new_i_pos)
+{
+ fat_detach(old_inode);
+ fat_detach(new_inode);
+ fat_attach(old_inode, new_i_pos);
+ fat_attach(new_inode, old_i_pos);
+}
+
+static void vfat_move_nlink(struct inode *src, struct inode *dst)
+{
+ drop_nlink(src);
+ inc_nlink(dst);
+}
+
+static int vfat_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct buffer_head *old_dotdot_bh = NULL, *new_dotdot_bh = NULL;
+ struct msdos_dir_entry *old_dotdot_de = NULL, *new_dotdot_de = NULL;
+ struct inode *old_inode, *new_inode;
+ struct timespec64 ts = current_time(old_dir);
+ loff_t old_i_pos, new_i_pos;
+ int err, corrupt = 0;
+ struct super_block *sb = old_dir->i_sb;
+
+ old_inode = d_inode(old_dentry);
+ new_inode = d_inode(new_dentry);
+
+ /* Acquire super block lock for the operation to be atomic */
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
+
+ /* if directories are not the same, get ".." info to update */
+ if (old_dir != new_dir) {
+ err = vfat_get_dotdot_de(old_inode, &old_dotdot_bh,
+ &old_dotdot_de);
+ if (err)
+ goto out;
+
+ err = vfat_get_dotdot_de(new_inode, &new_dotdot_bh,
+ &new_dotdot_de);
+ if (err)
+ goto out;
+ }
+
+ old_i_pos = MSDOS_I(old_inode)->i_pos;
+ new_i_pos = MSDOS_I(new_inode)->i_pos;
+
+ vfat_exchange_ipos(old_inode, new_inode, old_i_pos, new_i_pos);
+
+ err = vfat_sync_ipos(old_dir, new_inode);
+ if (err)
+ goto error_exchange;
+ err = vfat_sync_ipos(new_dir, old_inode);
+ if (err)
+ goto error_exchange;
+
+ /* update ".." directory entry info */
+ if (old_dotdot_de) {
+ err = vfat_update_dotdot_de(new_dir, old_inode, old_dotdot_bh,
+ old_dotdot_de);
+ if (err)
+ goto error_old_dotdot;
+ }
+ if (new_dotdot_de) {
+ err = vfat_update_dotdot_de(old_dir, new_inode, new_dotdot_bh,
+ new_dotdot_de);
+ if (err)
+ goto error_new_dotdot;
+ }
+
+ /* if cross directory and only one is a directory, adjust nlink */
+ if (!old_dotdot_de != !new_dotdot_de) {
+ if (old_dotdot_de)
+ vfat_move_nlink(old_dir, new_dir);
+ else
+ vfat_move_nlink(new_dir, old_dir);
+ }
+
+ vfat_update_dir_metadata(old_dir, &ts);
+ /* if directories are not the same, update new_dir as well */
+ if (old_dir != new_dir)
+ vfat_update_dir_metadata(new_dir, &ts);
+
+out:
+ brelse(old_dotdot_bh);
+ brelse(new_dotdot_bh);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
+
+ return err;
+
+error_new_dotdot:
+ if (new_dotdot_de) {
+ corrupt |= vfat_update_dotdot_de(new_dir, new_inode,
+ new_dotdot_bh, new_dotdot_de);
+ }
+
+error_old_dotdot:
+ if (old_dotdot_de) {
+ corrupt |= vfat_update_dotdot_de(old_dir, old_inode,
+ old_dotdot_bh, old_dotdot_de);
+ }
+
+error_exchange:
+ vfat_exchange_ipos(old_inode, new_inode, new_i_pos, old_i_pos);
+ corrupt |= vfat_sync_ipos(new_dir, new_inode);
+ corrupt |= vfat_sync_ipos(old_dir, old_inode);
+
+ if (corrupt < 0) {
+ fat_fs_error(new_dir->i_sb,
+ "%s: Filesystem corrupted (i_pos %lld, %lld)",
+ __func__, old_i_pos, new_i_pos);
}
goto out;
}
+static int vfat_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
+{
+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
+ return -EINVAL;
+
+ if (flags & RENAME_EXCHANGE) {
+ return vfat_rename_exchange(old_dir, old_dentry,
+ new_dir, new_dentry);
+ }
+
+ /* VFS already handled RENAME_NOREPLACE, handle it as a normal rename */
+ return vfat_rename(old_dir, old_dentry, new_dir, new_dentry);
+}
+
static const struct inode_operations vfat_dir_inode_operations = {
.create = vfat_create,
.lookup = vfat_lookup,
.unlink = vfat_unlink,
.mkdir = vfat_mkdir,
.rmdir = vfat_rmdir,
- .rename = vfat_rename,
+ .rename = vfat_rename2,
.setattr = fat_setattr,
.getattr = fat_getattr,
.update_time = fat_update_time,
@@ -1041,29 +1187,53 @@ static void setup(struct super_block *sb)
{
MSDOS_SB(sb)->dir_ops = &vfat_dir_inode_operations;
if (MSDOS_SB(sb)->options.name_check != 's')
- sb->s_d_op = &vfat_ci_dentry_ops;
+ set_default_d_op(sb, &vfat_ci_dentry_ops);
else
- sb->s_d_op = &vfat_dentry_ops;
+ set_default_d_op(sb, &vfat_dentry_ops);
+}
+
+static int vfat_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ return fat_fill_super(sb, fc, setup);
+}
+
+static int vfat_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, vfat_fill_super);
}
-static int vfat_fill_super(struct super_block *sb, void *data, int silent)
+static int vfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- return fat_fill_super(sb, data, silent, 1, setup);
+ return fat_parse_param(fc, param, true);
}
-static struct dentry *vfat_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static const struct fs_context_operations vfat_context_ops = {
+ .parse_param = vfat_parse_param,
+ .get_tree = vfat_get_tree,
+ .reconfigure = fat_reconfigure,
+ .free = fat_free_fc,
+};
+
+static int vfat_init_fs_context(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, vfat_fill_super);
+ int err;
+
+ /* Initialize with is_vfat == true */
+ err = fat_init_fs_context(fc, true);
+ if (err)
+ return err;
+
+ fc->ops = &vfat_context_ops;
+ return 0;
}
static struct file_system_type vfat_fs_type = {
.owner = THIS_MODULE,
.name = "vfat",
- .mount = vfat_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+ .init_fs_context = vfat_init_fs_context,
+ .parameters = fat_param_spec,
};
MODULE_ALIAS_FS("vfat");
diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c
index af191371c352..509eea96a457 100644
--- a/fs/fat/nfs.c
+++ b/fs/fat/nfs.c
@@ -17,7 +17,7 @@ struct fat_fid {
#define FAT_FID_SIZE_WITHOUT_PARENT 3
#define FAT_FID_SIZE_WITH_PARENT (sizeof(struct fat_fid)/sizeof(u32))
-/**
+/*
* Look up a directory inode given its starting cluster.
*/
static struct inode *fat_dget(struct super_block *sb, int i_logstart)
@@ -130,12 +130,18 @@ fat_encode_fh_nostale(struct inode *inode, __u32 *fh, int *lenp,
fid->parent_i_gen = parent->i_generation;
type = FILEID_FAT_WITH_PARENT;
*lenp = FAT_FID_SIZE_WITH_PARENT;
+ } else {
+ /*
+ * We need to initialize this field because the fh is actually
+ * 12 bytes long
+ */
+ fid->parent_i_pos_hi = 0;
}
return type;
}
-/**
+/*
* Map a NFS file handle to a corresponding dentry.
* The dentry may or may not be connected to the filesystem root.
*/
@@ -279,6 +285,7 @@ static struct dentry *fat_get_parent(struct dentry *child_dir)
}
const struct export_operations fat_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = fat_fh_to_dentry,
.fh_to_parent = fat_fh_to_parent,
.get_parent = fat_get_parent,
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 9c6c6a3e2de5..f93dbca08435 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -10,8 +10,8 @@
#include <linux/mm.h>
#include <linux/sched/task.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/file.h>
-#include <linux/fdtable.h>
#include <linux/capability.h>
#include <linux/dnotify.h>
#include <linux/slab.h>
@@ -26,14 +26,17 @@
#include <linux/memfd.h>
#include <linux/compat.h>
#include <linux/mount.h>
+#include <linux/rw_hint.h>
#include <linux/poll.h>
#include <asm/siginfo.h>
#include <linux/uaccess.h>
+#include "internal.h"
+
#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
-static int setfl(int fd, struct file * filp, unsigned long arg)
+static int setfl(int fd, struct file * filp, unsigned int arg)
{
struct inode * inode = file_inode(filp);
int error = 0;
@@ -47,7 +50,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
/* O_NOATIME can only be set by the owner or superuser */
if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
- if (!inode_owner_or_capable(file_mnt_user_ns(filp), inode))
+ if (!inode_owner_or_capable(file_mnt_idmap(filp), inode))
return -EPERM;
/* required for strict SunOS emulation */
@@ -56,11 +59,10 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
arg |= O_NONBLOCK;
/* Pipe packetized mode is controlled by O_DIRECT flag */
- if (!S_ISFIFO(inode->i_mode) && (arg & O_DIRECT)) {
- if (!filp->f_mapping || !filp->f_mapping->a_ops ||
- !filp->f_mapping->a_ops->direct_IO)
- return -EINVAL;
- }
+ if (!S_ISFIFO(inode->i_mode) &&
+ (arg & O_DIRECT) &&
+ !(filp->f_mode & FMODE_CAN_ODIRECT))
+ return -EINVAL;
if (filp->f_op->check_flags)
error = filp->f_op->check_flags(arg);
@@ -79,43 +81,82 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
}
spin_lock(&filp->f_lock);
filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
+ filp->f_iocb_flags = iocb_flags(filp);
spin_unlock(&filp->f_lock);
out:
return error;
}
-static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
- int force)
+/*
+ * Allocate an file->f_owner struct if it doesn't exist, handling racing
+ * allocations correctly.
+ */
+int file_f_owner_allocate(struct file *file)
{
- write_lock_irq(&filp->f_owner.lock);
- if (force || !filp->f_owner.pid) {
- put_pid(filp->f_owner.pid);
- filp->f_owner.pid = get_pid(pid);
- filp->f_owner.pid_type = type;
+ struct fown_struct *f_owner;
- if (pid) {
- const struct cred *cred = current_cred();
- filp->f_owner.uid = cred->uid;
- filp->f_owner.euid = cred->euid;
- }
+ f_owner = file_f_owner(file);
+ if (f_owner)
+ return 0;
+
+ f_owner = kzalloc(sizeof(struct fown_struct), GFP_KERNEL);
+ if (!f_owner)
+ return -ENOMEM;
+
+ rwlock_init(&f_owner->lock);
+ f_owner->file = file;
+ /* If someone else raced us, drop our allocation. */
+ if (unlikely(cmpxchg(&file->f_owner, NULL, f_owner)))
+ kfree(f_owner);
+ return 0;
+}
+EXPORT_SYMBOL(file_f_owner_allocate);
+
+void file_f_owner_release(struct file *file)
+{
+ struct fown_struct *f_owner;
+
+ f_owner = file_f_owner(file);
+ if (f_owner) {
+ put_pid(f_owner->pid);
+ kfree(f_owner);
}
- write_unlock_irq(&filp->f_owner.lock);
}
void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
int force)
{
- security_file_set_fowner(filp);
- f_modown(filp, pid, type, force);
+ struct fown_struct *f_owner;
+
+ f_owner = file_f_owner(filp);
+ if (WARN_ON_ONCE(!f_owner))
+ return;
+
+ write_lock_irq(&f_owner->lock);
+ if (force || !f_owner->pid) {
+ put_pid(f_owner->pid);
+ f_owner->pid = get_pid(pid);
+ f_owner->pid_type = type;
+
+ if (pid) {
+ const struct cred *cred = current_cred();
+ security_file_set_fowner(filp);
+ f_owner->uid = cred->uid;
+ f_owner->euid = cred->euid;
+ }
+ }
+ write_unlock_irq(&f_owner->lock);
}
EXPORT_SYMBOL(__f_setown);
-int f_setown(struct file *filp, unsigned long arg, int force)
+int f_setown(struct file *filp, int who, int force)
{
enum pid_type type;
struct pid *pid = NULL;
- int who = arg, ret = 0;
+ int ret = 0;
+
+ might_sleep();
type = PIDTYPE_TGID;
if (who < 0) {
@@ -127,6 +168,10 @@ int f_setown(struct file *filp, unsigned long arg, int force)
who = -who;
}
+ ret = file_f_owner_allocate(filp);
+ if (ret)
+ return ret;
+
rcu_read_lock();
if (who) {
pid = find_vpid(who);
@@ -144,22 +189,27 @@ EXPORT_SYMBOL(f_setown);
void f_delown(struct file *filp)
{
- f_modown(filp, NULL, PIDTYPE_TGID, 1);
+ __f_setown(filp, NULL, PIDTYPE_TGID, 1);
}
pid_t f_getown(struct file *filp)
{
pid_t pid = 0;
+ struct fown_struct *f_owner;
- read_lock_irq(&filp->f_owner.lock);
+ f_owner = file_f_owner(filp);
+ if (!f_owner)
+ return pid;
+
+ read_lock_irq(&f_owner->lock);
rcu_read_lock();
- if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
- pid = pid_vnr(filp->f_owner.pid);
- if (filp->f_owner.pid_type == PIDTYPE_PGID)
+ if (pid_task(f_owner->pid, f_owner->pid_type)) {
+ pid = pid_vnr(f_owner->pid);
+ if (f_owner->pid_type == PIDTYPE_PGID)
pid = -pid;
}
rcu_read_unlock();
- read_unlock_irq(&filp->f_owner.lock);
+ read_unlock_irq(&f_owner->lock);
return pid;
}
@@ -192,6 +242,10 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
return -EINVAL;
}
+ ret = file_f_owner_allocate(filp);
+ if (ret)
+ return ret;
+
rcu_read_lock();
pid = find_vpid(owner.pid);
if (owner.pid && !pid)
@@ -208,13 +262,20 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
struct f_owner_ex __user *owner_p = (void __user *)arg;
struct f_owner_ex owner = {};
int ret = 0;
+ struct fown_struct *f_owner;
+ enum pid_type pid_type = PIDTYPE_PID;
- read_lock_irq(&filp->f_owner.lock);
- rcu_read_lock();
- if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
- owner.pid = pid_vnr(filp->f_owner.pid);
- rcu_read_unlock();
- switch (filp->f_owner.pid_type) {
+ f_owner = file_f_owner(filp);
+ if (f_owner) {
+ read_lock_irq(&f_owner->lock);
+ rcu_read_lock();
+ if (pid_task(f_owner->pid, f_owner->pid_type))
+ owner.pid = pid_vnr(f_owner->pid);
+ rcu_read_unlock();
+ pid_type = f_owner->pid_type;
+ }
+
+ switch (pid_type) {
case PIDTYPE_PID:
owner.type = F_OWNER_TID;
break;
@@ -232,7 +293,8 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
ret = -EINVAL;
break;
}
- read_unlock_irq(&filp->f_owner.lock);
+ if (f_owner)
+ read_unlock_irq(&f_owner->lock);
if (!ret) {
ret = copy_to_user(owner_p, &owner, sizeof(owner));
@@ -246,14 +308,18 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
static int f_getowner_uids(struct file *filp, unsigned long arg)
{
struct user_namespace *user_ns = current_user_ns();
+ struct fown_struct *f_owner;
uid_t __user *dst = (void __user *)arg;
- uid_t src[2];
+ uid_t src[2] = {0, 0};
int err;
- read_lock_irq(&filp->f_owner.lock);
- src[0] = from_kuid(user_ns, filp->f_owner.uid);
- src[1] = from_kuid(user_ns, filp->f_owner.euid);
- read_unlock_irq(&filp->f_owner.lock);
+ f_owner = file_f_owner(filp);
+ if (f_owner) {
+ read_lock_irq(&f_owner->lock);
+ src[0] = from_kuid(user_ns, f_owner->uid);
+ src[1] = from_kuid(user_ns, f_owner->euid);
+ read_unlock_irq(&f_owner->lock);
+ }
err = put_user(src[0], &dst[0]);
err |= put_user(src[1], &dst[1]);
@@ -267,8 +333,15 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
}
#endif
-static bool rw_hint_valid(enum rw_hint hint)
+static bool rw_hint_valid(u64 hint)
{
+ BUILD_BUG_ON(WRITE_LIFE_NOT_SET != RWH_WRITE_LIFE_NOT_SET);
+ BUILD_BUG_ON(WRITE_LIFE_NONE != RWH_WRITE_LIFE_NONE);
+ BUILD_BUG_ON(WRITE_LIFE_SHORT != RWH_WRITE_LIFE_SHORT);
+ BUILD_BUG_ON(WRITE_LIFE_MEDIUM != RWH_WRITE_LIFE_MEDIUM);
+ BUILD_BUG_ON(WRITE_LIFE_LONG != RWH_WRITE_LIFE_LONG);
+ BUILD_BUG_ON(WRITE_LIFE_EXTREME != RWH_WRITE_LIFE_EXTREME);
+
switch (hint) {
case RWH_WRITE_LIFE_NOT_SET:
case RWH_WRITE_LIFE_NONE:
@@ -282,78 +355,126 @@ static bool rw_hint_valid(enum rw_hint hint)
}
}
-static long fcntl_rw_hint(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long fcntl_get_rw_hint(struct file *file, unsigned long arg)
{
struct inode *inode = file_inode(file);
u64 __user *argp = (u64 __user *)arg;
- enum rw_hint hint;
- u64 h;
+ u64 hint = READ_ONCE(inode->i_write_hint);
- switch (cmd) {
- case F_GET_FILE_RW_HINT:
- h = file_write_hint(file);
- if (copy_to_user(argp, &h, sizeof(*argp)))
- return -EFAULT;
- return 0;
- case F_SET_FILE_RW_HINT:
- if (copy_from_user(&h, argp, sizeof(h)))
- return -EFAULT;
- hint = (enum rw_hint) h;
- if (!rw_hint_valid(hint))
- return -EINVAL;
+ if (copy_to_user(argp, &hint, sizeof(*argp)))
+ return -EFAULT;
+ return 0;
+}
- spin_lock(&file->f_lock);
- file->f_write_hint = hint;
- spin_unlock(&file->f_lock);
- return 0;
- case F_GET_RW_HINT:
- h = inode->i_write_hint;
- if (copy_to_user(argp, &h, sizeof(*argp)))
- return -EFAULT;
- return 0;
- case F_SET_RW_HINT:
- if (copy_from_user(&h, argp, sizeof(h)))
- return -EFAULT;
- hint = (enum rw_hint) h;
- if (!rw_hint_valid(hint))
- return -EINVAL;
+static long fcntl_set_rw_hint(struct file *file, unsigned long arg)
+{
+ struct inode *inode = file_inode(file);
+ u64 __user *argp = (u64 __user *)arg;
+ u64 hint;
- inode_lock(inode);
- inode->i_write_hint = hint;
- inode_unlock(inode);
- return 0;
- default:
+ if (!inode_owner_or_capable(file_mnt_idmap(file), inode))
+ return -EPERM;
+
+ if (copy_from_user(&hint, argp, sizeof(hint)))
+ return -EFAULT;
+ if (!rw_hint_valid(hint))
return -EINVAL;
+
+ WRITE_ONCE(inode->i_write_hint, hint);
+
+ /*
+ * file->f_mapping->host may differ from inode. As an example,
+ * blkdev_open() modifies file->f_mapping.
+ */
+ if (file->f_mapping->host != inode)
+ WRITE_ONCE(file->f_mapping->host->i_write_hint, hint);
+
+ return 0;
+}
+
+/* Is the file descriptor a dup of the file? */
+static long f_dupfd_query(int fd, struct file *filp)
+{
+ CLASS(fd_raw, f)(fd);
+
+ if (fd_empty(f))
+ return -EBADF;
+
+ /*
+ * We can do the 'fdput()' immediately, as the only thing that
+ * matters is the pointer value which isn't changed by the fdput.
+ *
+ * Technically we didn't need a ref at all, and 'fdget()' was
+ * overkill, but given our lockless file pointer lookup, the
+ * alternatives are complicated.
+ */
+ return fd_file(f) == filp;
+}
+
+/* Let the caller figure out whether a given file was just created. */
+static long f_created_query(const struct file *filp)
+{
+ return !!(filp->f_mode & FMODE_CREATED);
+}
+
+static int f_owner_sig(struct file *filp, int signum, bool setsig)
+{
+ int ret = 0;
+ struct fown_struct *f_owner;
+
+ might_sleep();
+
+ if (setsig) {
+ if (!valid_signal(signum))
+ return -EINVAL;
+
+ ret = file_f_owner_allocate(filp);
+ if (ret)
+ return ret;
}
+
+ f_owner = file_f_owner(filp);
+ if (setsig)
+ f_owner->signum = signum;
+ else if (f_owner)
+ ret = f_owner->signum;
+ return ret;
}
static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
struct file *filp)
{
void __user *argp = (void __user *)arg;
+ struct delegation deleg;
+ int argi = (int)arg;
struct flock flock;
long err = -EINVAL;
switch (cmd) {
+ case F_CREATED_QUERY:
+ err = f_created_query(filp);
+ break;
case F_DUPFD:
- err = f_dupfd(arg, filp, 0);
+ err = f_dupfd(argi, filp, 0);
break;
case F_DUPFD_CLOEXEC:
- err = f_dupfd(arg, filp, O_CLOEXEC);
+ err = f_dupfd(argi, filp, O_CLOEXEC);
+ break;
+ case F_DUPFD_QUERY:
+ err = f_dupfd_query(argi, filp);
break;
case F_GETFD:
err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
break;
case F_SETFD:
err = 0;
- set_close_on_exec(fd, arg & FD_CLOEXEC);
+ set_close_on_exec(fd, argi & FD_CLOEXEC);
break;
case F_GETFL:
err = filp->f_flags;
break;
case F_SETFL:
- err = setfl(fd, filp, arg);
+ err = setfl(fd, filp, argi);
break;
#if BITS_PER_LONG != 32
/* 32-bit arches must use fcntl64() */
@@ -390,7 +511,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
force_successful_syscall_return();
break;
case F_SETOWN:
- err = f_setown(filp, arg, 1);
+ err = f_setown(filp, argi, 1);
break;
case F_GETOWN_EX:
err = f_getown_ex(filp, arg);
@@ -402,38 +523,45 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
err = f_getowner_uids(filp, arg);
break;
case F_GETSIG:
- err = filp->f_owner.signum;
+ err = f_owner_sig(filp, 0, false);
break;
case F_SETSIG:
- /* arg == 0 restores default behaviour. */
- if (!valid_signal(arg)) {
- break;
- }
- err = 0;
- filp->f_owner.signum = arg;
+ err = f_owner_sig(filp, argi, true);
break;
case F_GETLEASE:
err = fcntl_getlease(filp);
break;
case F_SETLEASE:
- err = fcntl_setlease(fd, filp, arg);
+ err = fcntl_setlease(fd, filp, argi);
break;
case F_NOTIFY:
- err = fcntl_dirnotify(fd, filp, arg);
+ err = fcntl_dirnotify(fd, filp, argi);
break;
case F_SETPIPE_SZ:
case F_GETPIPE_SZ:
- err = pipe_fcntl(filp, cmd, arg);
+ err = pipe_fcntl(filp, cmd, argi);
break;
case F_ADD_SEALS:
case F_GET_SEALS:
- err = memfd_fcntl(filp, cmd, arg);
+ err = memfd_fcntl(filp, cmd, argi);
break;
case F_GET_RW_HINT:
+ err = fcntl_get_rw_hint(filp, arg);
+ break;
case F_SET_RW_HINT:
- case F_GET_FILE_RW_HINT:
- case F_SET_FILE_RW_HINT:
- err = fcntl_rw_hint(filp, cmd, arg);
+ err = fcntl_set_rw_hint(filp, arg);
+ break;
+ case F_GETDELEG:
+ if (copy_from_user(&deleg, argp, sizeof(deleg)))
+ return -EFAULT;
+ err = fcntl_getdeleg(filp, &deleg);
+ if (!err && copy_to_user(argp, &deleg, sizeof(deleg)))
+ return -EFAULT;
+ break;
+ case F_SETDELEG:
+ if (copy_from_user(&deleg, argp, sizeof(deleg)))
+ return -EFAULT;
+ err = fcntl_setdeleg(fd, filp, &deleg);
break;
default:
break;
@@ -444,8 +572,10 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
static int check_fcntl_cmd(unsigned cmd)
{
switch (cmd) {
+ case F_CREATED_QUERY:
case F_DUPFD:
case F_DUPFD_CLOEXEC:
+ case F_DUPFD_QUERY:
case F_GETFD:
case F_SETFD:
case F_GETFL:
@@ -456,24 +586,21 @@ static int check_fcntl_cmd(unsigned cmd)
SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
{
- struct fd f = fdget_raw(fd);
- long err = -EBADF;
+ CLASS(fd_raw, f)(fd);
+ long err;
- if (!f.file)
- goto out;
+ if (fd_empty(f))
+ return -EBADF;
- if (unlikely(f.file->f_mode & FMODE_PATH)) {
+ if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd))
- goto out1;
+ return -EBADF;
}
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (!err)
- err = do_fcntl(fd, cmd, arg, f.file);
+ err = do_fcntl(fd, cmd, arg, fd_file(f));
-out1:
- fdput(f);
-out:
return err;
}
@@ -482,21 +609,21 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
unsigned long, arg)
{
void __user *argp = (void __user *)arg;
- struct fd f = fdget_raw(fd);
+ CLASS(fd_raw, f)(fd);
struct flock64 flock;
- long err = -EBADF;
+ long err;
- if (!f.file)
- goto out;
+ if (fd_empty(f))
+ return -EBADF;
- if (unlikely(f.file->f_mode & FMODE_PATH)) {
+ if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd))
- goto out1;
+ return -EBADF;
}
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (err)
- goto out1;
+ return err;
switch (cmd) {
case F_GETLK64:
@@ -504,7 +631,7 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
err = -EFAULT;
if (copy_from_user(&flock, argp, sizeof(flock)))
break;
- err = fcntl_getlk64(f.file, cmd, &flock);
+ err = fcntl_getlk64(fd_file(f), cmd, &flock);
if (!err && copy_to_user(argp, &flock, sizeof(flock)))
err = -EFAULT;
break;
@@ -515,15 +642,12 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
err = -EFAULT;
if (copy_from_user(&flock, argp, sizeof(flock)))
break;
- err = fcntl_setlk64(fd, f.file, cmd, &flock);
+ err = fcntl_setlk64(fd, fd_file(f), cmd, &flock);
break;
default:
- err = do_fcntl(fd, cmd, arg, f.file);
+ err = do_fcntl(fd, cmd, arg, fd_file(f));
break;
}
-out1:
- fdput(f);
-out:
return err;
}
#endif
@@ -619,28 +743,28 @@ static int fixup_compat_flock(struct flock *flock)
static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
compat_ulong_t arg)
{
- struct fd f = fdget_raw(fd);
+ CLASS(fd_raw, f)(fd);
struct flock flock;
- long err = -EBADF;
+ long err;
- if (!f.file)
- return err;
+ if (fd_empty(f))
+ return -EBADF;
- if (unlikely(f.file->f_mode & FMODE_PATH)) {
+ if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd))
- goto out_put;
+ return -EBADF;
}
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (err)
- goto out_put;
+ return err;
switch (cmd) {
case F_GETLK:
err = get_compat_flock(&flock, compat_ptr(arg));
if (err)
break;
- err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
+ err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock);
if (err)
break;
err = fixup_compat_flock(&flock);
@@ -652,7 +776,7 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
err = get_compat_flock64(&flock, compat_ptr(arg));
if (err)
break;
- err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
+ err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock);
if (!err)
err = put_compat_flock64(&flock, compat_ptr(arg));
break;
@@ -661,7 +785,7 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
err = get_compat_flock(&flock, compat_ptr(arg));
if (err)
break;
- err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
+ err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock);
break;
case F_SETLK64:
case F_SETLKW64:
@@ -670,14 +794,12 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
err = get_compat_flock64(&flock, compat_ptr(arg));
if (err)
break;
- err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
+ err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock);
break;
default:
- err = do_fcntl(fd, cmd, arg, f.file);
+ err = do_fcntl(fd, cmd, arg, fd_file(f));
break;
}
-out_put:
- fdput(f);
return err;
}
@@ -824,14 +946,19 @@ static void send_sigurg_to_task(struct task_struct *p,
do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type);
}
-int send_sigurg(struct fown_struct *fown)
+int send_sigurg(struct file *file)
{
+ struct fown_struct *fown;
struct task_struct *p;
enum pid_type type;
struct pid *pid;
unsigned long flags;
int ret = 0;
+ fown = file_f_owner(file);
+ if (!fown)
+ return 0;
+
read_lock_irqsave(&fown->lock, flags);
type = fown->pid_type;
@@ -860,13 +987,7 @@ int send_sigurg(struct fown_struct *fown)
}
static DEFINE_SPINLOCK(fasync_lock);
-static struct kmem_cache *fasync_cache __read_mostly;
-
-static void fasync_free_rcu(struct rcu_head *head)
-{
- kmem_cache_free(fasync_cache,
- container_of(head, struct fasync_struct, fa_rcu));
-}
+static struct kmem_cache *fasync_cache __ro_after_init;
/*
* Remove a fasync entry. If successfully removed, return
@@ -893,7 +1014,7 @@ int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
write_unlock_irq(&fa->fa_lock);
*fp = fa->fa_next;
- call_rcu(&fa->fa_rcu, fasync_free_rcu);
+ kfree_rcu(fa, fa_rcu);
filp->f_flags &= ~FASYNC;
result = 1;
break;
@@ -1013,13 +1134,16 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
}
read_lock_irqsave(&fa->fa_lock, flags);
if (fa->fa_file) {
- fown = &fa->fa_file->f_owner;
+ fown = file_f_owner(fa->fa_file);
+ if (!fown)
+ goto next;
/* Don't send SIGURG to processes which have not set a
queued signum: SIGURG has its own default signalling
mechanism. */
if (!(sig == SIGURG && fown->signum == 0))
send_sigio(fown, fa->fa_fd, band);
}
+next:
read_unlock_irqrestore(&fa->fa_lock, flags);
fa = rcu_dereference(fa->fa_next);
}
@@ -1045,10 +1169,10 @@ static int __init fcntl_init(void)
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ !=
HWEIGHT32(
(VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
- __FMODE_EXEC | __FMODE_NONOTIFY));
+ __FMODE_EXEC));
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0,
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 6630c69c23a2..3de1547ec9d4 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -11,12 +11,14 @@
#include <linux/personality.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
+#include <linux/nsfs.h>
#include "internal.h"
#include "mount.h"
-static long do_sys_name_to_handle(struct path *path,
+static long do_sys_name_to_handle(const struct path *path,
struct file_handle __user *ufh,
- int __user *mnt_id)
+ void __user *mnt_id, bool unique_mntid,
+ int fh_flags)
{
long retval;
struct file_handle f_handle;
@@ -24,20 +26,27 @@ static long do_sys_name_to_handle(struct path *path,
struct file_handle *handle = NULL;
/*
- * We need to make sure whether the file system
- * support decoding of the file handle
+ * We need to make sure whether the file system support decoding of
+ * the file handle if decodeable file handle was requested.
*/
- if (!path->dentry->d_sb->s_export_op ||
- !path->dentry->d_sb->s_export_op->fh_to_dentry)
+ if (!exportfs_can_encode_fh(path->dentry->d_sb->s_export_op, fh_flags))
return -EOPNOTSUPP;
+ /*
+ * A request to encode a connectable handle for a disconnected dentry
+ * is unexpected since AT_EMPTY_PATH is not allowed.
+ */
+ if (fh_flags & EXPORT_FH_CONNECTABLE &&
+ WARN_ON(path->dentry->d_flags & DCACHE_DISCONNECTED))
+ return -EINVAL;
+
if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle)))
return -EFAULT;
if (f_handle.handle_bytes > MAX_HANDLE_SZ)
return -EINVAL;
- handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
+ handle = kzalloc(struct_size(handle, f_handle, f_handle.handle_bytes),
GFP_KERNEL);
if (!handle)
return -ENOMEM;
@@ -45,33 +54,59 @@ static long do_sys_name_to_handle(struct path *path,
/* convert handle size to multiple of sizeof(u32) */
handle_dwords = f_handle.handle_bytes >> 2;
- /* we ask for a non connected handle */
+ /* Encode a possibly decodeable/connectable file handle */
retval = exportfs_encode_fh(path->dentry,
(struct fid *)handle->f_handle,
- &handle_dwords, 0);
+ &handle_dwords, fh_flags);
handle->handle_type = retval;
/* convert handle size to bytes */
handle_bytes = handle_dwords * sizeof(u32);
handle->handle_bytes = handle_bytes;
if ((handle->handle_bytes > f_handle.handle_bytes) ||
- (retval == FILEID_INVALID) || (retval == -ENOSPC)) {
+ (retval == FILEID_INVALID) || (retval < 0)) {
/* As per old exportfs_encode_fh documentation
* we could return ENOSPC to indicate overflow
* But file system returned 255 always. So handle
* both the values
*/
+ if (retval == FILEID_INVALID || retval == -ENOSPC)
+ retval = -EOVERFLOW;
/*
* set the handle size to zero so we copy only
* non variable part of the file_handle
*/
handle_bytes = 0;
- retval = -EOVERFLOW;
- } else
+ } else {
+ /*
+ * When asked to encode a connectable file handle, encode this
+ * property in the file handle itself, so that we later know
+ * how to decode it.
+ * For sanity, also encode in the file handle if the encoded
+ * object is a directory and verify this during decode, because
+ * decoding directory file handles is quite different than
+ * decoding connectable non-directory file handles.
+ */
+ if (fh_flags & EXPORT_FH_CONNECTABLE) {
+ handle->handle_type |= FILEID_IS_CONNECTABLE;
+ if (d_is_dir(path->dentry))
+ handle->handle_type |= FILEID_IS_DIR;
+ }
retval = 0;
+ }
/* copy the mount id */
- if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
- copy_to_user(ufh, handle,
- sizeof(struct file_handle) + handle_bytes))
+ if (unique_mntid) {
+ if (put_user(real_mount(path->mnt)->mnt_id_unique,
+ (u64 __user *) mnt_id))
+ retval = -EFAULT;
+ } else {
+ if (put_user(real_mount(path->mnt)->mnt_id,
+ (int __user *) mnt_id))
+ retval = -EFAULT;
+ }
+ /* copy the handle */
+ if (retval != -EFAULT &&
+ copy_to_user(ufh, handle,
+ struct_size(handle, f_handle, handle_bytes)))
retval = -EFAULT;
kfree(handle);
return retval;
@@ -83,7 +118,9 @@ static long do_sys_name_to_handle(struct path *path,
* @name: name that should be converted to handle.
* @handle: resulting file handle
* @mnt_id: mount id of the file system containing the file
+ * (u64 if AT_HANDLE_MNT_ID_UNIQUE, otherwise int)
* @flag: flag value to indicate whether to follow symlink or not
+ * and whether a decodable file handle is required.
*
* @handle->handle_size indicate the space available to store the
* variable part of the file handle in bytes. If there is not
@@ -91,109 +128,252 @@ static long do_sys_name_to_handle(struct path *path,
* value required.
*/
SYSCALL_DEFINE5(name_to_handle_at, int, dfd, const char __user *, name,
- struct file_handle __user *, handle, int __user *, mnt_id,
+ struct file_handle __user *, handle, void __user *, mnt_id,
int, flag)
{
struct path path;
int lookup_flags;
+ int fh_flags = 0;
int err;
- if ((flag & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
+ if (flag & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH | AT_HANDLE_FID |
+ AT_HANDLE_MNT_ID_UNIQUE | AT_HANDLE_CONNECTABLE))
+ return -EINVAL;
+
+ /*
+ * AT_HANDLE_FID means there is no intention to decode file handle
+ * AT_HANDLE_CONNECTABLE means there is an intention to decode a
+ * connected fd (with known path), so these flags are conflicting.
+ * AT_EMPTY_PATH could be used along with a dfd that refers to a
+ * disconnected non-directory, which cannot be used to encode a
+ * connectable file handle, because its parent is unknown.
+ */
+ if (flag & AT_HANDLE_CONNECTABLE &&
+ flag & (AT_HANDLE_FID | AT_EMPTY_PATH))
return -EINVAL;
+ else if (flag & AT_HANDLE_FID)
+ fh_flags |= EXPORT_FH_FID;
+ else if (flag & AT_HANDLE_CONNECTABLE)
+ fh_flags |= EXPORT_FH_CONNECTABLE;
lookup_flags = (flag & AT_SYMLINK_FOLLOW) ? LOOKUP_FOLLOW : 0;
if (flag & AT_EMPTY_PATH)
lookup_flags |= LOOKUP_EMPTY;
err = user_path_at(dfd, name, lookup_flags, &path);
if (!err) {
- err = do_sys_name_to_handle(&path, handle, mnt_id);
+ err = do_sys_name_to_handle(&path, handle, mnt_id,
+ flag & AT_HANDLE_MNT_ID_UNIQUE,
+ fh_flags);
path_put(&path);
}
return err;
}
-static struct vfsmount *get_vfsmount_from_fd(int fd)
+static int get_path_anchor(int fd, struct path *root)
{
- struct vfsmount *mnt;
+ if (fd >= 0) {
+ CLASS(fd, f)(fd);
+ if (fd_empty(f))
+ return -EBADF;
+ *root = fd_file(f)->f_path;
+ path_get(root);
+ return 0;
+ }
if (fd == AT_FDCWD) {
- struct fs_struct *fs = current->fs;
- spin_lock(&fs->lock);
- mnt = mntget(fs->pwd.mnt);
- spin_unlock(&fs->lock);
- } else {
- struct fd f = fdget(fd);
- if (!f.file)
- return ERR_PTR(-EBADF);
- mnt = mntget(f.file->f_path.mnt);
- fdput(f);
+ get_fs_pwd(current->fs, root);
+ return 0;
}
- return mnt;
+
+ if (fd == FD_PIDFS_ROOT) {
+ pidfs_get_root(root);
+ return 0;
+ }
+
+ if (fd == FD_NSFS_ROOT) {
+ nsfs_get_root(root);
+ return 0;
+ }
+
+ return -EBADF;
}
static int vfs_dentry_acceptable(void *context, struct dentry *dentry)
{
- return 1;
+ struct handle_to_path_ctx *ctx = context;
+ struct user_namespace *user_ns = current_user_ns();
+ struct dentry *d, *root = ctx->root.dentry;
+ struct mnt_idmap *idmap = mnt_idmap(ctx->root.mnt);
+ int retval = 0;
+
+ if (!root)
+ return 1;
+
+ /* Old permission model with global CAP_DAC_READ_SEARCH. */
+ if (!ctx->flags)
+ return 1;
+
+ /*
+ * Verify that the decoded dentry itself has a valid id mapping.
+ * In case the decoded dentry is the mountfd root itself, this
+ * verifies that the mountfd inode itself has a valid id mapping.
+ */
+ if (!privileged_wrt_inode_uidgid(user_ns, idmap, d_inode(dentry)))
+ return 0;
+
+ /*
+ * It's racy as we're not taking rename_lock but we're able to ignore
+ * permissions and we just need an approximation whether we were able
+ * to follow a path to the file.
+ *
+ * It's also potentially expensive on some filesystems especially if
+ * there is a deep path.
+ */
+ d = dget(dentry);
+ while (d != root && !IS_ROOT(d)) {
+ struct dentry *parent = dget_parent(d);
+
+ /*
+ * We know that we have the ability to override DAC permissions
+ * as we've verified this earlier via CAP_DAC_READ_SEARCH. But
+ * we also need to make sure that there aren't any unmapped
+ * inodes in the path that would prevent us from reaching the
+ * file.
+ */
+ if (!privileged_wrt_inode_uidgid(user_ns, idmap,
+ d_inode(parent))) {
+ dput(d);
+ dput(parent);
+ return retval;
+ }
+
+ dput(d);
+ d = parent;
+ }
+
+ if (!(ctx->flags & HANDLE_CHECK_SUBTREE) || d == root)
+ retval = 1;
+ /*
+ * exportfs_decode_fh_raw() does not call acceptable() callback with
+ * a disconnected directory dentry, so we should have reached either
+ * mount fd directory or sb root.
+ */
+ if (ctx->fh_flags & EXPORT_FH_DIR_ONLY)
+ WARN_ON_ONCE(d != root && d != root->d_sb->s_root);
+ dput(d);
+ return retval;
}
-static int do_handle_to_path(int mountdirfd, struct file_handle *handle,
- struct path *path)
+static int do_handle_to_path(struct file_handle *handle, struct path *path,
+ struct handle_to_path_ctx *ctx)
{
- int retval = 0;
int handle_dwords;
+ struct vfsmount *mnt = ctx->root.mnt;
+ struct dentry *dentry;
- path->mnt = get_vfsmount_from_fd(mountdirfd);
- if (IS_ERR(path->mnt)) {
- retval = PTR_ERR(path->mnt);
- goto out_err;
- }
/* change the handle size to multiple of sizeof(u32) */
handle_dwords = handle->handle_bytes >> 2;
- path->dentry = exportfs_decode_fh(path->mnt,
- (struct fid *)handle->f_handle,
- handle_dwords, handle->handle_type,
- vfs_dentry_acceptable, NULL);
- if (IS_ERR(path->dentry)) {
- retval = PTR_ERR(path->dentry);
- goto out_mnt;
+ dentry = exportfs_decode_fh_raw(mnt, (struct fid *)handle->f_handle,
+ handle_dwords, handle->handle_type,
+ ctx->fh_flags, vfs_dentry_acceptable,
+ ctx);
+ if (IS_ERR_OR_NULL(dentry)) {
+ if (dentry == ERR_PTR(-ENOMEM))
+ return -ENOMEM;
+ return -ESTALE;
}
+ path->dentry = dentry;
+ path->mnt = mntget(mnt);
+ return 0;
+}
+
+static inline int may_decode_fh(struct handle_to_path_ctx *ctx,
+ unsigned int o_flags)
+{
+ struct path *root = &ctx->root;
+
+ if (capable(CAP_DAC_READ_SEARCH))
+ return 0;
+
+ /*
+ * Allow relaxed permissions of file handles if the caller has
+ * the ability to mount the filesystem or create a bind-mount of
+ * the provided @mountdirfd.
+ *
+ * In both cases the caller may be able to get an unobstructed
+ * way to the encoded file handle. If the caller is only able to
+ * create a bind-mount we need to verify that there are no
+ * locked mounts on top of it that could prevent us from getting
+ * to the encoded file.
+ *
+ * In principle, locked mounts can prevent the caller from
+ * mounting the filesystem but that only applies to procfs and
+ * sysfs neither of which support decoding file handles.
+ *
+ * Restrict to O_DIRECTORY to provide a deterministic API that
+ * avoids a confusing api in the face of disconnected non-dir
+ * dentries.
+ *
+ * There's only one dentry for each directory inode (VFS rule)...
+ */
+ if (!(o_flags & O_DIRECTORY))
+ return -EPERM;
+
+ if (ns_capable(root->mnt->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
+ ctx->flags = HANDLE_CHECK_PERMS;
+ else if (is_mounted(root->mnt) &&
+ ns_capable(real_mount(root->mnt)->mnt_ns->user_ns,
+ CAP_SYS_ADMIN) &&
+ !has_locked_children(real_mount(root->mnt), root->dentry))
+ ctx->flags = HANDLE_CHECK_PERMS | HANDLE_CHECK_SUBTREE;
+ else
+ return -EPERM;
+
+ /* Are we able to override DAC permissions? */
+ if (!ns_capable(current_user_ns(), CAP_DAC_READ_SEARCH))
+ return -EPERM;
+
+ ctx->fh_flags = EXPORT_FH_DIR_ONLY;
return 0;
-out_mnt:
- mntput(path->mnt);
-out_err:
- return retval;
}
static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
- struct path *path)
+ struct path *path, unsigned int o_flags)
{
int retval = 0;
struct file_handle f_handle;
- struct file_handle *handle = NULL;
+ struct file_handle *handle __free(kfree) = NULL;
+ struct handle_to_path_ctx ctx = {};
+ const struct export_operations *eops;
+
+ if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle)))
+ return -EFAULT;
- /*
- * With handle we don't look at the execute bit on the
- * directory. Ideally we would like CAP_DAC_SEARCH.
- * But we don't have that
- */
- if (!capable(CAP_DAC_READ_SEARCH)) {
- retval = -EPERM;
- goto out_err;
- }
- if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle))) {
- retval = -EFAULT;
- goto out_err;
- }
if ((f_handle.handle_bytes > MAX_HANDLE_SZ) ||
- (f_handle.handle_bytes == 0)) {
- retval = -EINVAL;
- goto out_err;
- }
- handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
+ (f_handle.handle_bytes == 0))
+ return -EINVAL;
+
+ if (f_handle.handle_type < 0 ||
+ FILEID_USER_FLAGS(f_handle.handle_type) & ~FILEID_VALID_USER_FLAGS)
+ return -EINVAL;
+
+ retval = get_path_anchor(mountdirfd, &ctx.root);
+ if (retval)
+ return retval;
+
+ eops = ctx.root.mnt->mnt_sb->s_export_op;
+ if (eops && eops->permission)
+ retval = eops->permission(&ctx, o_flags);
+ else
+ retval = may_decode_fh(&ctx, o_flags);
+ if (retval)
+ goto out_path;
+
+ handle = kmalloc(struct_size(handle, f_handle, f_handle.handle_bytes),
GFP_KERNEL);
if (!handle) {
retval = -ENOMEM;
- goto out_err;
+ goto out_path;
}
/* copy the full handle */
*handle = f_handle;
@@ -201,45 +381,51 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
&ufh->f_handle,
f_handle.handle_bytes)) {
retval = -EFAULT;
- goto out_handle;
+ goto out_path;
}
- retval = do_handle_to_path(mountdirfd, handle, path);
+ /*
+ * If handle was encoded with AT_HANDLE_CONNECTABLE, verify that we
+ * are decoding an fd with connected path, which is accessible from
+ * the mount fd path.
+ */
+ if (f_handle.handle_type & FILEID_IS_CONNECTABLE) {
+ ctx.fh_flags |= EXPORT_FH_CONNECTABLE;
+ ctx.flags |= HANDLE_CHECK_SUBTREE;
+ }
+ if (f_handle.handle_type & FILEID_IS_DIR)
+ ctx.fh_flags |= EXPORT_FH_DIR_ONLY;
+ /* Filesystem code should not be exposed to user flags */
+ handle->handle_type &= ~FILEID_USER_FLAGS_MASK;
+ retval = do_handle_to_path(handle, path, &ctx);
-out_handle:
- kfree(handle);
-out_err:
+out_path:
+ path_put(&ctx.root);
return retval;
}
+static struct file *file_open_handle(struct path *path, int open_flag)
+{
+ const struct export_operations *eops;
+
+ eops = path->mnt->mnt_sb->s_export_op;
+ if (eops->open)
+ return eops->open(path, open_flag);
+
+ return file_open_root(path, "", open_flag, 0);
+}
+
static long do_handle_open(int mountdirfd, struct file_handle __user *ufh,
int open_flag)
{
- long retval = 0;
- struct path path;
- struct file *file;
- int fd;
+ long retval;
+ struct path path __free(path_put) = {};
- retval = handle_to_path(mountdirfd, ufh, &path);
+ retval = handle_to_path(mountdirfd, ufh, &path, open_flag);
if (retval)
return retval;
- fd = get_unused_fd_flags(open_flag);
- if (fd < 0) {
- path_put(&path);
- return fd;
- }
- file = file_open_root(&path, "", open_flag, 0);
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- retval = PTR_ERR(file);
- } else {
- retval = fd;
- fsnotify_open(file);
- fd_install(fd, file);
- }
- path_put(&path);
- return retval;
+ return FD_ADD(open_flag, file_open_handle(&path, open_flag));
}
/**
diff --git a/fs/file.c b/fs/file.c
index 8627dacfc424..0a4f3bdb2dec 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -20,10 +20,79 @@
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/close_range.h>
+#include <linux/file_ref.h>
#include <net/sock.h>
+#include <linux/init_task.h>
#include "internal.h"
+static noinline bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt)
+{
+ /*
+ * If the reference count was already in the dead zone, then this
+ * put() operation is imbalanced. Warn, put the reference count back to
+ * DEAD and tell the caller to not deconstruct the object.
+ */
+ if (WARN_ONCE(cnt >= FILE_REF_RELEASED, "imbalanced put on file reference count")) {
+ atomic_long_set(&ref->refcnt, FILE_REF_DEAD);
+ return false;
+ }
+
+ /*
+ * This is a put() operation on a saturated refcount. Restore the
+ * mean saturation value and tell the caller to not deconstruct the
+ * object.
+ */
+ if (cnt > FILE_REF_MAXREF)
+ atomic_long_set(&ref->refcnt, FILE_REF_SATURATED);
+ return false;
+}
+
+/**
+ * __file_ref_put - Slowpath of file_ref_put()
+ * @ref: Pointer to the reference count
+ * @cnt: Current reference count
+ *
+ * Invoked when the reference count is outside of the valid zone.
+ *
+ * Return:
+ * True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely schedule the
+ * object, which is protected by the reference counter, for
+ * deconstruction.
+ *
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * deconstruct the protected object.
+ */
+bool __file_ref_put(file_ref_t *ref, unsigned long cnt)
+{
+ /* Did this drop the last reference? */
+ if (likely(cnt == FILE_REF_NOREF)) {
+ /*
+ * Carefully try to set the reference count to FILE_REF_DEAD.
+ *
+ * This can fail if a concurrent get() operation has
+ * elevated it again or the corresponding put() even marked
+ * it dead already. Both are valid situations and do not
+ * require a retry. If this fails the caller is not
+ * allowed to deconstruct the object.
+ */
+ if (!atomic_long_try_cmpxchg_release(&ref->refcnt, &cnt, FILE_REF_DEAD))
+ return false;
+
+ /*
+ * The caller can safely schedule the object for
+ * deconstruction. Provide acquire ordering.
+ */
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+
+ return __file_ref_put_badval(ref, cnt);
+}
+EXPORT_SYMBOL_GPL(__file_ref_put);
+
unsigned int sysctl_nr_open __read_mostly = 1024*1024;
unsigned int sysctl_nr_open_min = BITS_PER_LONG;
/* our min() is unusable in constant expressions ;-/ */
@@ -46,27 +115,23 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
#define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
#define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
+#define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
/*
* Copy 'count' fd bits from the old table to the new table and clear the extra
* space if any. This does not copy the file pointers. Called with the files
* spinlock held for write.
*/
-static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
- unsigned int count)
+static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
+ unsigned int copy_words)
{
- unsigned int cpy, set;
-
- cpy = count / BITS_PER_BYTE;
- set = (nfdt->max_fds - count) / BITS_PER_BYTE;
- memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
- memset((char *)nfdt->open_fds + cpy, 0, set);
- memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
- memset((char *)nfdt->close_on_exec + cpy, 0, set);
-
- cpy = BITBIT_SIZE(count);
- set = BITBIT_SIZE(nfdt->max_fds) - cpy;
- memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
- memset((char *)nfdt->full_fds_bits + cpy, 0, set);
+ unsigned int nwords = fdt_words(nfdt);
+
+ bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
+ copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
+ bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
+ copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
+ bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
+ copy_words, nwords);
}
/*
@@ -84,12 +149,20 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
memcpy(nfdt->fd, ofdt->fd, cpy);
memset((char *)nfdt->fd + cpy, 0, set);
- copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
+ copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
}
-static struct fdtable * alloc_fdtable(unsigned int nr)
+/*
+ * Note how the fdtable bitmap allocations very much have to be a multiple of
+ * BITS_PER_LONG. This is not only because we walk those things in chunks of
+ * 'unsigned long' in some places, but simply because that is how the Linux
+ * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
+ * they are very much "bits in an array of unsigned long".
+ */
+static struct fdtable *alloc_fdtable(unsigned int slots_wanted)
{
struct fdtable *fdt;
+ unsigned int nr;
void *data;
/*
@@ -97,21 +170,47 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
* Allocation steps are keyed to the size of the fdarray, since it
* grows far faster than any of the other dynamic data. We try to fit
* the fdarray into comfortable page-tuned chunks: starting at 1024B
- * and growing in powers of two from there on.
+ * and growing in powers of two from there on. Since we called only
+ * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab
+ * already gives BITS_PER_LONG slots), the above boils down to
+ * 1. use the smallest power of two large enough to give us that many
+ * slots.
+ * 2. on 32bit skip 64 and 128 - the minimal capacity we want there is
+ * 256 slots (i.e. 1Kb fd array).
+ * 3. on 64bit don't skip anything, 1Kb fd array means 128 slots there
+ * and we are never going to be asked for 64 or less.
*/
- nr /= (1024 / sizeof(struct file *));
- nr = roundup_pow_of_two(nr + 1);
- nr *= (1024 / sizeof(struct file *));
+ if (IS_ENABLED(CONFIG_32BIT) && slots_wanted < 256)
+ nr = 256;
+ else
+ nr = roundup_pow_of_two(slots_wanted);
/*
* Note that this can drive nr *below* what we had passed if sysctl_nr_open
- * had been set lower between the check in expand_files() and here. Deal
- * with that in caller, it's cheaper that way.
+ * had been set lower between the check in expand_files() and here.
*
* We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
* bitmaps handling below becomes unpleasant, to put it mildly...
*/
- if (unlikely(nr > sysctl_nr_open))
- nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
+ if (unlikely(nr > sysctl_nr_open)) {
+ nr = round_down(sysctl_nr_open, BITS_PER_LONG);
+ if (nr < slots_wanted)
+ return ERR_PTR(-EMFILE);
+ }
+
+ /*
+ * Check if the allocation size would exceed INT_MAX. kvmalloc_array()
+ * and kvmalloc() will warn if the allocation size is greater than
+ * INT_MAX, as filp_cachep objects are not __GFP_NOWARN.
+ *
+ * This can happen when sysctl_nr_open is set to a very high value and
+ * a process tries to use a file descriptor near that limit. For example,
+ * if sysctl_nr_open is set to 1073741816 (0x3ffffff8) - which is what
+ * systemd typically sets it to - then trying to use a file descriptor
+ * close to that value will require allocating a file descriptor table
+ * that exceeds 8GB in size.
+ */
+ if (unlikely(nr > INT_MAX / sizeof(struct file *)))
+ return ERR_PTR(-EMFILE);
fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
if (!fdt)
@@ -140,14 +239,14 @@ out_arr:
out_fdt:
kfree(fdt);
out:
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
/*
* Expand the file descriptor table.
* This function will allocate a new fdtable and both fd array and fdset, of
* the given size.
- * Return <0 error code on error; 1 on successful completion.
+ * Return <0 error code on error; 0 on successful completion.
* The files->file_lock should be held on entry, and will be held on exit.
*/
static int expand_fdtable(struct files_struct *files, unsigned int nr)
@@ -157,7 +256,7 @@ static int expand_fdtable(struct files_struct *files, unsigned int nr)
struct fdtable *new_fdt, *cur_fdt;
spin_unlock(&files->file_lock);
- new_fdt = alloc_fdtable(nr);
+ new_fdt = alloc_fdtable(nr + 1);
/* make sure all fd_install() have seen resize_in_progress
* or have finished their rcu_read_lock_sched() section.
@@ -166,16 +265,8 @@ static int expand_fdtable(struct files_struct *files, unsigned int nr)
synchronize_rcu();
spin_lock(&files->file_lock);
- if (!new_fdt)
- return -ENOMEM;
- /*
- * extremely unlikely race - sysctl_nr_open decreased between the check in
- * caller and alloc_fdtable(). Cheaper to catch it here...
- */
- if (unlikely(new_fdt->max_fds <= nr)) {
- __free_fdtable(new_fdt);
- return -EMFILE;
- }
+ if (IS_ERR(new_fdt))
+ return PTR_ERR(new_fdt);
cur_fdt = files_fdtable(files);
BUG_ON(nr < cur_fdt->max_fds);
copy_fdtable(new_fdt, cur_fdt);
@@ -184,15 +275,14 @@ static int expand_fdtable(struct files_struct *files, unsigned int nr)
call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
/* coupled with smp_rmb() in fd_install() */
smp_wmb();
- return 1;
+ return 0;
}
/*
* Expand files.
* This function will expand the file structures, if the requested size exceeds
* the current capacity and there is room for expansion.
- * Return <0 error code on error; 0 when nothing done; 1 when files were
- * expanded and execution may have blocked.
+ * Return <0 error code on error; 0 on success.
* The files->file_lock should be held on entry, and will be held on exit.
*/
static int expand_files(struct files_struct *files, unsigned int nr)
@@ -200,50 +290,50 @@ static int expand_files(struct files_struct *files, unsigned int nr)
__acquires(files->file_lock)
{
struct fdtable *fdt;
- int expanded = 0;
+ int error;
repeat:
fdt = files_fdtable(files);
/* Do we need to expand? */
if (nr < fdt->max_fds)
- return expanded;
-
- /* Can we expand? */
- if (nr >= sysctl_nr_open)
- return -EMFILE;
+ return 0;
if (unlikely(files->resize_in_progress)) {
spin_unlock(&files->file_lock);
- expanded = 1;
wait_event(files->resize_wait, !files->resize_in_progress);
spin_lock(&files->file_lock);
goto repeat;
}
+ /* Can we expand? */
+ if (unlikely(nr >= sysctl_nr_open))
+ return -EMFILE;
+
/* All good, so we try */
files->resize_in_progress = true;
- expanded = expand_fdtable(files, nr);
+ error = expand_fdtable(files, nr);
files->resize_in_progress = false;
wake_up_all(&files->resize_wait);
- return expanded;
-}
-
-static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
-{
- __set_bit(fd, fdt->close_on_exec);
+ return error;
}
-static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
+static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt,
+ bool set)
{
- if (test_bit(fd, fdt->close_on_exec))
- __clear_bit(fd, fdt->close_on_exec);
+ if (set) {
+ __set_bit(fd, fdt->close_on_exec);
+ } else {
+ if (test_bit(fd, fdt->close_on_exec))
+ __clear_bit(fd, fdt->close_on_exec);
+ }
}
-static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
+static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt, bool set)
{
__set_bit(fd, fdt->open_fds);
+ __set_close_on_exec(fd, fdt, set);
fd /= BITS_PER_LONG;
if (!~fdt->open_fds[fd])
__set_bit(fd, fdt->full_fds_bits);
@@ -252,49 +342,54 @@ static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
{
__clear_bit(fd, fdt->open_fds);
- __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
+ fd /= BITS_PER_LONG;
+ if (test_bit(fd, fdt->full_fds_bits))
+ __clear_bit(fd, fdt->full_fds_bits);
}
-static unsigned int count_open_files(struct fdtable *fdt)
+static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
{
- unsigned int size = fdt->max_fds;
- unsigned int i;
-
- /* Find the last open fd */
- for (i = size / BITS_PER_LONG; i > 0; ) {
- if (fdt->open_fds[--i])
- break;
- }
- i = (i + 1) * BITS_PER_LONG;
- return i;
+ return test_bit(fd, fdt->open_fds);
}
-static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
+/*
+ * Note that a sane fdtable size always has to be a multiple of
+ * BITS_PER_LONG, since we have bitmaps that are sized by this.
+ *
+ * punch_hole is optional - when close_range() is asked to unshare
+ * and close, we don't need to copy descriptors in that range, so
+ * a smaller cloned descriptor table might suffice if the last
+ * currently opened descriptor falls into that range.
+ */
+static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole)
{
- unsigned int count;
-
- count = count_open_files(fdt);
- if (max_fds < NR_OPEN_DEFAULT)
- max_fds = NR_OPEN_DEFAULT;
- return min(count, max_fds);
+ unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds);
+
+ if (last == fdt->max_fds)
+ return NR_OPEN_DEFAULT;
+ if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) {
+ last = find_last_bit(fdt->open_fds, punch_hole->from);
+ if (last == punch_hole->from)
+ return NR_OPEN_DEFAULT;
+ }
+ return ALIGN(last + 1, BITS_PER_LONG);
}
/*
- * Allocate a new files structure and copy contents from the
- * passed in files structure.
- * errorp will be valid only when the returned files_struct is NULL.
+ * Allocate a new descriptor table and copy contents from the passed in
+ * instance. Returns a pointer to cloned table on success, ERR_PTR()
+ * on failure. For 'punch_hole' see sane_fdtable_size().
*/
-struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
+struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole)
{
struct files_struct *newf;
struct file **old_fds, **new_fds;
unsigned int open_files, i;
struct fdtable *old_fdt, *new_fdt;
- *errorp = -ENOMEM;
newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
if (!newf)
- goto out;
+ return ERR_PTR(-ENOMEM);
atomic_set(&newf->count, 1);
@@ -311,7 +406,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
spin_lock(&oldf->file_lock);
old_fdt = files_fdtable(oldf);
- open_files = sane_fdtable_size(old_fdt, max_fds);
+ open_files = sane_fdtable_size(old_fdt, punch_hole);
/*
* Check whether we need to allocate a larger fd array and fd set.
@@ -322,17 +417,10 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
if (new_fdt != &newf->fdtab)
__free_fdtable(new_fdt);
- new_fdt = alloc_fdtable(open_files - 1);
- if (!new_fdt) {
- *errorp = -ENOMEM;
- goto out_release;
- }
-
- /* beyond sysctl_nr_open; nothing to do */
- if (unlikely(new_fdt->max_fds < open_files)) {
- __free_fdtable(new_fdt);
- *errorp = -EMFILE;
- goto out_release;
+ new_fdt = alloc_fdtable(open_files);
+ if (IS_ERR(new_fdt)) {
+ kmem_cache_free(files_cachep, newf);
+ return ERR_CAST(new_fdt);
}
/*
@@ -342,25 +430,33 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
*/
spin_lock(&oldf->file_lock);
old_fdt = files_fdtable(oldf);
- open_files = sane_fdtable_size(old_fdt, max_fds);
+ open_files = sane_fdtable_size(old_fdt, punch_hole);
}
- copy_fd_bitmaps(new_fdt, old_fdt, open_files);
+ copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
old_fds = old_fdt->fd;
new_fds = new_fdt->fd;
+ /*
+ * We may be racing against fd allocation from other threads using this
+ * files_struct, despite holding ->file_lock.
+ *
+ * alloc_fd() might have already claimed a slot, while fd_install()
+ * did not populate it yet. Note the latter operates locklessly, so
+ * the file can show up as we are walking the array below.
+ *
+ * At the same time we know no files will disappear as all other
+ * operations take the lock.
+ *
+ * Instead of trying to placate userspace racing with itself, we
+ * ref the file if we see it and mark the fd slot as unused otherwise.
+ */
for (i = open_files; i != 0; i--) {
- struct file *f = *old_fds++;
+ struct file *f = rcu_dereference_raw(*old_fds++);
if (f) {
get_file(f);
} else {
- /*
- * The fd may be claimed in the fd bitmap but not yet
- * instantiated in the files array if a sibling thread
- * is partway through open(). So make sure that this
- * fd is available to the new process.
- */
__clear_open_fd(open_files - i, new_fdt);
}
rcu_assign_pointer(*new_fds++, f);
@@ -373,11 +469,6 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
rcu_assign_pointer(newf->fdt, new_fdt);
return newf;
-
-out_release:
- kmem_cache_free(files_cachep, newf);
-out:
- return NULL;
}
static struct fdtable *close_files(struct files_struct * files)
@@ -398,7 +489,7 @@ static struct fdtable *close_files(struct files_struct * files)
set = fdt->open_fds[j++];
while (set) {
if (set & 1) {
- struct file * file = xchg(&fdt->fd[i], NULL);
+ struct file *file = fdt->fd[i];
if (file) {
filp_close(file, files);
cond_resched();
@@ -452,12 +543,21 @@ struct files_struct init_files = {
static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
{
- unsigned int maxfd = fdt->max_fds;
+ unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
unsigned int maxbit = maxfd / BITS_PER_LONG;
unsigned int bitbit = start / BITS_PER_LONG;
+ unsigned int bit;
+
+ /*
+ * Try to avoid looking at the second level bitmap
+ */
+ bit = find_next_zero_bit(&fdt->open_fds[bitbit], BITS_PER_LONG,
+ start & (BITS_PER_LONG - 1));
+ if (bit < BITS_PER_LONG)
+ return bit + bitbit * BITS_PER_LONG;
bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
- if (bitbit > maxfd)
+ if (bitbit >= maxfd)
return maxfd;
if (bitbit > start)
start = bitbit;
@@ -481,7 +581,7 @@ repeat:
if (fd < files->next_fd)
fd = files->next_fd;
- if (fd < fdt->max_fds)
+ if (likely(fd < fdt->max_fds))
fd = find_next_fd(fdt, fd);
/*
@@ -489,36 +589,23 @@ repeat:
* will limit the total number of files that can be opened.
*/
error = -EMFILE;
- if (fd >= end)
+ if (unlikely(fd >= end))
goto out;
- error = expand_files(files, fd);
- if (error < 0)
- goto out;
+ if (unlikely(fd >= fdt->max_fds)) {
+ error = expand_files(files, fd);
+ if (error < 0)
+ goto out;
- /*
- * If we needed to expand the fs array we
- * might have blocked - try again.
- */
- if (error)
goto repeat;
+ }
if (start <= files->next_fd)
files->next_fd = fd + 1;
- __set_open_fd(fd, fdt);
- if (flags & O_CLOEXEC)
- __set_close_on_exec(fd, fdt);
- else
- __clear_close_on_exec(fd, fdt);
+ __set_open_fd(fd, fdt, flags & O_CLOEXEC);
error = fd;
-#if 1
- /* Sanity check */
- if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
- printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
- rcu_assign_pointer(fdt->fd[fd], NULL);
- }
-#endif
+ VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL);
out:
spin_unlock(&files->file_lock);
@@ -555,41 +642,59 @@ void put_unused_fd(unsigned int fd)
EXPORT_SYMBOL(put_unused_fd);
/*
- * Install a file pointer in the fd array.
+ * Install a file pointer in the fd array while it is being resized.
+ *
+ * We need to make sure our update to the array does not get lost as the resizing
+ * thread can be copying the content as we modify it.
*
- * The VFS is full of places where we drop the files lock between
- * setting the open_fds bitmap and installing the file in the file
- * array. At any such point, we are vulnerable to a dup2() race
- * installing a file in the array before us. We need to detect this and
- * fput() the struct file we are about to overwrite in this case.
+ * We have two ways to do it:
+ * - go off CPU waiting for resize_in_progress to clear
+ * - take the spin lock
*
- * It should never happen - if we allow dup2() do it, _really_ bad things
- * will follow.
+ * The latter is trivial to implement and saves us from having to might_sleep()
+ * for debugging purposes.
+ *
+ * This is moved out of line from fd_install() to convince gcc to optimize that
+ * routine better.
+ */
+static void noinline fd_install_slowpath(unsigned int fd, struct file *file)
+{
+ struct files_struct *files = current->files;
+ struct fdtable *fdt;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL);
+ rcu_assign_pointer(fdt->fd[fd], file);
+ spin_unlock(&files->file_lock);
+}
+
+/**
+ * fd_install - install a file pointer in the fd array
+ * @fd: file descriptor to install the file in
+ * @file: the file to install
*
* This consumes the "file" refcount, so callers should treat it
* as if they had called fput(file).
*/
-
void fd_install(unsigned int fd, struct file *file)
{
struct files_struct *files = current->files;
struct fdtable *fdt;
- rcu_read_lock_sched();
+ if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING)))
+ return;
+ rcu_read_lock_sched();
if (unlikely(files->resize_in_progress)) {
rcu_read_unlock_sched();
- spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- BUG_ON(fdt->fd[fd] != NULL);
- rcu_assign_pointer(fdt->fd[fd], file);
- spin_unlock(&files->file_lock);
+ fd_install_slowpath(fd, file);
return;
}
/* coupled with smp_wmb() in expand_fdtable() */
smp_rmb();
fdt = rcu_dereference_sched(files->fdt);
- BUG_ON(fdt->fd[fd] != NULL);
+ VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL);
rcu_assign_pointer(fdt->fd[fd], file);
rcu_read_unlock_sched();
}
@@ -597,36 +702,32 @@ void fd_install(unsigned int fd, struct file *file)
EXPORT_SYMBOL(fd_install);
/**
- * pick_file - return file associatd with fd
+ * file_close_fd_locked - return file associated with fd
* @files: file struct to retrieve file from
* @fd: file descriptor to retrieve file for
*
- * If this functions returns an EINVAL error pointer the fd was beyond the
- * current maximum number of file descriptors for that fdtable.
+ * Doesn't take a separate reference count.
+ *
+ * Context: files_lock must be held.
*
- * Returns: The file associated with @fd, on error returns an error pointer.
+ * Returns: The file associated with @fd (NULL if @fd is not open)
*/
-static struct file *pick_file(struct files_struct *files, unsigned fd)
+struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
{
+ struct fdtable *fdt = files_fdtable(files);
struct file *file;
- struct fdtable *fdt;
- spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- if (fd >= fdt->max_fds) {
- file = ERR_PTR(-EINVAL);
- goto out_unlock;
- }
- file = fdt->fd[fd];
- if (!file) {
- file = ERR_PTR(-EBADF);
- goto out_unlock;
- }
- rcu_assign_pointer(fdt->fd[fd], NULL);
- __put_unused_fd(files, fd);
+ lockdep_assert_held(&files->file_lock);
-out_unlock:
- spin_unlock(&files->file_lock);
+ if (fd >= fdt->max_fds)
+ return NULL;
+
+ fd = array_index_nospec(fd, fdt->max_fds);
+ file = rcu_dereference_raw(fdt->fd[fd]);
+ if (file) {
+ rcu_assign_pointer(fdt->fd[fd], NULL);
+ __put_unused_fd(files, fd);
+ }
return file;
}
@@ -635,17 +736,19 @@ int close_fd(unsigned fd)
struct files_struct *files = current->files;
struct file *file;
- file = pick_file(files, fd);
- if (IS_ERR(file))
+ spin_lock(&files->file_lock);
+ file = file_close_fd_locked(files, fd);
+ spin_unlock(&files->file_lock);
+ if (!file)
return -EBADF;
return filp_close(file, files);
}
-EXPORT_SYMBOL(close_fd); /* for ksys_close() */
+EXPORT_SYMBOL(close_fd);
/**
* last_fd - return last valid index into fd table
- * @cur_fds: files struct
+ * @fdt: File descriptor table.
*
* Context: Either rcu read lock or files_lock must be held.
*
@@ -670,36 +773,45 @@ static inline void __range_cloexec(struct files_struct *cur_fds,
spin_unlock(&cur_fds->file_lock);
}
-static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
+static inline void __range_close(struct files_struct *files, unsigned int fd,
unsigned int max_fd)
{
- while (fd <= max_fd) {
- struct file *file;
+ struct file *file;
+ unsigned n;
+
+ spin_lock(&files->file_lock);
+ n = last_fd(files_fdtable(files));
+ max_fd = min(max_fd, n);
- file = pick_file(cur_fds, fd++);
- if (!IS_ERR(file)) {
- /* found a valid file to close */
- filp_close(file, cur_fds);
+ for (; fd <= max_fd; fd++) {
+ file = file_close_fd_locked(files, fd);
+ if (file) {
+ spin_unlock(&files->file_lock);
+ filp_close(file, files);
cond_resched();
- continue;
+ spin_lock(&files->file_lock);
+ } else if (need_resched()) {
+ spin_unlock(&files->file_lock);
+ cond_resched();
+ spin_lock(&files->file_lock);
}
-
- /* beyond the last fd in that table */
- if (PTR_ERR(file) == -EINVAL)
- return;
}
+ spin_unlock(&files->file_lock);
}
/**
- * __close_range() - Close all file descriptors in a given range.
+ * sys_close_range() - Close all file descriptors in a given range.
*
* @fd: starting file descriptor to close
* @max_fd: last file descriptor to close
+ * @flags: CLOSE_RANGE flags.
*
* This closes a range of file descriptors. All file descriptors
* from @fd up to and including @max_fd are closed.
+ * Currently, errors to close a given file descriptor are ignored.
*/
-int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
+SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd,
+ unsigned int, flags)
{
struct task_struct *me = current;
struct files_struct *cur_fds = me->files, *fds = NULL;
@@ -710,37 +822,25 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
if (fd > max_fd)
return -EINVAL;
- if (flags & CLOSE_RANGE_UNSHARE) {
- int ret;
- unsigned int max_unshare_fds = NR_OPEN_MAX;
+ if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) {
+ struct fd_range range = {fd, max_fd}, *punch_hole = &range;
/*
* If the caller requested all fds to be made cloexec we always
* copy all of the file descriptors since they still want to
* use them.
*/
- if (!(flags & CLOSE_RANGE_CLOEXEC)) {
- /*
- * If the requested range is greater than the current
- * maximum, we're closing everything so only copy all
- * file descriptors beneath the lowest file descriptor.
- */
- rcu_read_lock();
- if (max_fd >= last_fd(files_fdtable(cur_fds)))
- max_unshare_fds = fd;
- rcu_read_unlock();
- }
-
- ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
- if (ret)
- return ret;
+ if (flags & CLOSE_RANGE_CLOEXEC)
+ punch_hole = NULL;
+ fds = dup_fd(cur_fds, punch_hole);
+ if (IS_ERR(fds))
+ return PTR_ERR(fds);
/*
* We used to share our file descriptor table, and have now
* created a private one, make sure we're using it below.
*/
- if (fds)
- swap(cur_fds, fds);
+ swap(cur_fds, fds);
}
if (flags & CLOSE_RANGE_CLOEXEC)
@@ -762,47 +862,24 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
return 0;
}
-/*
- * See close_fd_get_file() below, this variant assumes current->files->file_lock
- * is held.
+/**
+ * file_close_fd - return file associated with fd
+ * @fd: file descriptor to retrieve file for
+ *
+ * Doesn't take a separate reference count.
+ *
+ * Returns: The file associated with @fd (NULL if @fd is not open)
*/
-int __close_fd_get_file(unsigned int fd, struct file **res)
+struct file *file_close_fd(unsigned int fd)
{
struct files_struct *files = current->files;
struct file *file;
- struct fdtable *fdt;
-
- fdt = files_fdtable(files);
- if (fd >= fdt->max_fds)
- goto out_err;
- file = fdt->fd[fd];
- if (!file)
- goto out_err;
- rcu_assign_pointer(fdt->fd[fd], NULL);
- __put_unused_fd(files, fd);
- get_file(file);
- *res = file;
- return 0;
-out_err:
- *res = NULL;
- return -ENOENT;
-}
-
-/*
- * variant of close_fd that gets a ref on the file for later fput.
- * The caller must ensure that filp_close() called on the file, and then
- * an fput().
- */
-int close_fd_get_file(unsigned int fd, struct file **res)
-{
- struct files_struct *files = current->files;
- int ret;
spin_lock(&files->file_lock);
- ret = __close_fd_get_file(fd, res);
+ file = file_close_fd_locked(files, fd);
spin_unlock(&files->file_lock);
- return ret;
+ return file;
}
void do_close_on_exec(struct files_struct *files)
@@ -841,49 +918,199 @@ void do_close_on_exec(struct files_struct *files)
spin_unlock(&files->file_lock);
}
-static struct file *__fget_files(struct files_struct *files, unsigned int fd,
- fmode_t mask, unsigned int refs)
+static struct file *__get_file_rcu(struct file __rcu **f)
{
- struct file *file;
+ struct file __rcu *file;
+ struct file __rcu *file_reloaded;
+ struct file __rcu *file_reloaded_cmp;
- rcu_read_lock();
-loop:
- file = files_lookup_fd_rcu(files, fd);
- if (file) {
- /* File object ref couldn't be taken.
- * dup2() atomicity guarantee is the reason
- * we loop to catch the new file (or NULL pointer)
- */
- if (file->f_mode & mask)
- file = NULL;
- else if (!get_file_rcu_many(file, refs))
- goto loop;
+ file = rcu_dereference_raw(*f);
+ if (!file)
+ return NULL;
+
+ if (unlikely(!file_ref_get(&file->f_ref)))
+ return ERR_PTR(-EAGAIN);
+
+ file_reloaded = rcu_dereference_raw(*f);
+
+ /*
+ * Ensure that all accesses have a dependency on the load from
+ * rcu_dereference_raw() above so we get correct ordering
+ * between reuse/allocation and the pointer check below.
+ */
+ file_reloaded_cmp = file_reloaded;
+ OPTIMIZER_HIDE_VAR(file_reloaded_cmp);
+
+ /*
+ * file_ref_get() above provided a full memory barrier when we
+ * acquired a reference.
+ *
+ * This is paired with the write barrier from assigning to the
+ * __rcu protected file pointer so that if that pointer still
+ * matches the current file, we know we have successfully
+ * acquired a reference to the right file.
+ *
+ * If the pointers don't match the file has been reallocated by
+ * SLAB_TYPESAFE_BY_RCU.
+ */
+ if (file == file_reloaded_cmp)
+ return file_reloaded;
+
+ fput(file);
+ return ERR_PTR(-EAGAIN);
+}
+
+/**
+ * get_file_rcu - try go get a reference to a file under rcu
+ * @f: the file to get a reference on
+ *
+ * This function tries to get a reference on @f carefully verifying that
+ * @f hasn't been reused.
+ *
+ * This function should rarely have to be used and only by users who
+ * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
+ *
+ * Return: Returns @f with the reference count increased or NULL.
+ */
+struct file *get_file_rcu(struct file __rcu **f)
+{
+ for (;;) {
+ struct file __rcu *file;
+
+ file = __get_file_rcu(f);
+ if (!IS_ERR(file))
+ return file;
}
- rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(get_file_rcu);
+
+/**
+ * get_file_active - try go get a reference to a file
+ * @f: the file to get a reference on
+ *
+ * In contast to get_file_rcu() the pointer itself isn't part of the
+ * reference counting.
+ *
+ * This function should rarely have to be used and only by users who
+ * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
+ *
+ * Return: Returns @f with the reference count increased or NULL.
+ */
+struct file *get_file_active(struct file **f)
+{
+ struct file __rcu *file;
+ rcu_read_lock();
+ file = __get_file_rcu(f);
+ rcu_read_unlock();
+ if (IS_ERR(file))
+ file = NULL;
return file;
}
+EXPORT_SYMBOL_GPL(get_file_active);
-static inline struct file *__fget(unsigned int fd, fmode_t mask,
- unsigned int refs)
+static inline struct file *__fget_files_rcu(struct files_struct *files,
+ unsigned int fd, fmode_t mask)
{
- return __fget_files(current->files, fd, mask, refs);
+ for (;;) {
+ struct file *file;
+ struct fdtable *fdt = rcu_dereference_raw(files->fdt);
+ struct file __rcu **fdentry;
+ unsigned long nospec_mask;
+
+ /* Mask is a 0 for invalid fd's, ~0 for valid ones */
+ nospec_mask = array_index_mask_nospec(fd, fdt->max_fds);
+
+ /*
+ * fdentry points to the 'fd' offset, or fdt->fd[0].
+ * Loading from fdt->fd[0] is always safe, because the
+ * array always exists.
+ */
+ fdentry = fdt->fd + (fd & nospec_mask);
+
+ /* Do the load, then mask any invalid result */
+ file = rcu_dereference_raw(*fdentry);
+ file = (void *)(nospec_mask & (unsigned long)file);
+ if (unlikely(!file))
+ return NULL;
+
+ /*
+ * Ok, we have a file pointer that was valid at
+ * some point, but it might have become stale since.
+ *
+ * We need to confirm it by incrementing the refcount
+ * and then check the lookup again.
+ *
+ * file_ref_get() gives us a full memory barrier. We
+ * only really need an 'acquire' one to protect the
+ * loads below, but we don't have that.
+ */
+ if (unlikely(!file_ref_get(&file->f_ref)))
+ continue;
+
+ /*
+ * Such a race can take two forms:
+ *
+ * (a) the file ref already went down to zero and the
+ * file hasn't been reused yet or the file count
+ * isn't zero but the file has already been reused.
+ *
+ * (b) the file table entry has changed under us.
+ * Note that we don't need to re-check the 'fdt->fd'
+ * pointer having changed, because it always goes
+ * hand-in-hand with 'fdt'.
+ *
+ * If so, we need to put our ref and try again.
+ */
+ if (unlikely(file != rcu_dereference_raw(*fdentry)) ||
+ unlikely(rcu_dereference_raw(files->fdt) != fdt)) {
+ fput(file);
+ continue;
+ }
+
+ /*
+ * This isn't the file we're looking for or we're not
+ * allowed to get a reference to it.
+ */
+ if (unlikely(file->f_mode & mask)) {
+ fput(file);
+ return NULL;
+ }
+
+ /*
+ * Ok, we have a ref to the file, and checked that it
+ * still exists.
+ */
+ return file;
+ }
+}
+
+static struct file *__fget_files(struct files_struct *files, unsigned int fd,
+ fmode_t mask)
+{
+ struct file *file;
+
+ rcu_read_lock();
+ file = __fget_files_rcu(files, fd, mask);
+ rcu_read_unlock();
+
+ return file;
}
-struct file *fget_many(unsigned int fd, unsigned int refs)
+static inline struct file *__fget(unsigned int fd, fmode_t mask)
{
- return __fget(fd, FMODE_PATH, refs);
+ return __fget_files(current->files, fd, mask);
}
struct file *fget(unsigned int fd)
{
- return __fget(fd, FMODE_PATH, 1);
+ return __fget(fd, FMODE_PATH);
}
EXPORT_SYMBOL(fget);
struct file *fget_raw(unsigned int fd)
{
- return __fget(fd, 0, 1);
+ return __fget(fd, 0);
}
EXPORT_SYMBOL(fget_raw);
@@ -893,28 +1120,13 @@ struct file *fget_task(struct task_struct *task, unsigned int fd)
task_lock(task);
if (task->files)
- file = __fget_files(task->files, fd, 0, 1);
- task_unlock(task);
-
- return file;
-}
-
-struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
-{
- /* Must be called with rcu_read_lock held */
- struct files_struct *files;
- struct file *file = NULL;
-
- task_lock(task);
- files = task->files;
- if (files)
- file = files_lookup_fd_rcu(files, fd);
+ file = __fget_files(task->files, fd, 0);
task_unlock(task);
return file;
}
-struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd)
+struct file *fget_task_next(struct task_struct *task, unsigned int *ret_fd)
{
/* Must be called with rcu_read_lock held */
struct files_struct *files;
@@ -924,16 +1136,19 @@ struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret
task_lock(task);
files = task->files;
if (files) {
+ rcu_read_lock();
for (; fd < files_fdtable(files)->max_fds; fd++) {
- file = files_lookup_fd_rcu(files, fd);
+ file = __fget_files_rcu(files, fd, 0);
if (file)
break;
}
+ rcu_read_unlock();
}
task_unlock(task);
*ret_fd = fd;
return file;
}
+EXPORT_SYMBOL(fget_task_next);
/*
* Lightweight file lookup - no refcnt increment if fd table isn't shared.
@@ -950,47 +1165,96 @@ struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret
*
* The fput_needed flag returned by fget_light should be passed to the
* corresponding fput_light.
+ *
+ * (As an exception to rule 2, you can call filp_close between fget_light and
+ * fput_light provided that you capture a real refcount with get_file before
+ * the call to filp_close, and ensure that this real refcount is fput *after*
+ * the fput_light call.)
+ *
+ * See also the documentation in rust/kernel/file.rs.
*/
-static unsigned long __fget_light(unsigned int fd, fmode_t mask)
+static inline struct fd __fget_light(unsigned int fd, fmode_t mask)
{
struct files_struct *files = current->files;
struct file *file;
- if (atomic_read(&files->count) == 1) {
+ /*
+ * If another thread is concurrently calling close_fd() followed
+ * by put_files_struct(), we must not observe the old table
+ * entry combined with the new refcount - otherwise we could
+ * return a file that is concurrently being freed.
+ *
+ * atomic_read_acquire() pairs with atomic_dec_and_test() in
+ * put_files_struct().
+ */
+ if (likely(atomic_read_acquire(&files->count) == 1)) {
file = files_lookup_fd_raw(files, fd);
if (!file || unlikely(file->f_mode & mask))
- return 0;
- return (unsigned long)file;
+ return EMPTY_FD;
+ return BORROWED_FD(file);
} else {
- file = __fget(fd, mask, 1);
+ file = __fget_files(files, fd, mask);
if (!file)
- return 0;
- return FDPUT_FPUT | (unsigned long)file;
+ return EMPTY_FD;
+ return CLONED_FD(file);
}
}
-unsigned long __fdget(unsigned int fd)
+struct fd fdget(unsigned int fd)
{
return __fget_light(fd, FMODE_PATH);
}
-EXPORT_SYMBOL(__fdget);
+EXPORT_SYMBOL(fdget);
-unsigned long __fdget_raw(unsigned int fd)
+struct fd fdget_raw(unsigned int fd)
{
return __fget_light(fd, 0);
}
-unsigned long __fdget_pos(unsigned int fd)
+/*
+ * Try to avoid f_pos locking. We only need it if the
+ * file is marked for FMODE_ATOMIC_POS, and it can be
+ * accessed multiple ways.
+ *
+ * Always do it for directories, because pidfd_getfd()
+ * can make a file accessible even if it otherwise would
+ * not be, and for directories this is a correctness
+ * issue, not a "POSIX requirement".
+ */
+static inline bool file_needs_f_pos_lock(struct file *file)
+{
+ if (!(file->f_mode & FMODE_ATOMIC_POS))
+ return false;
+ if (__file_ref_read_raw(&file->f_ref) != FILE_REF_ONEREF)
+ return true;
+ if (file->f_op->iterate_shared)
+ return true;
+ return false;
+}
+
+bool file_seek_cur_needs_f_lock(struct file *file)
{
- unsigned long v = __fdget(fd);
- struct file *file = (struct file *)(v & ~3);
+ if (!(file->f_mode & FMODE_ATOMIC_POS) && !file->f_op->iterate_shared)
+ return false;
- if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
- if (file_count(file) > 1) {
- v |= FDPUT_POS_UNLOCK;
- mutex_lock(&file->f_pos_lock);
- }
+ /*
+ * Note that we are not guaranteed to be called after fdget_pos() on
+ * this file obj, in which case the caller is expected to provide the
+ * appropriate locking.
+ */
+
+ return true;
+}
+
+struct fd fdget_pos(unsigned int fd)
+{
+ struct fd f = fdget(fd);
+ struct file *file = fd_file(f);
+
+ if (likely(file) && file_needs_f_pos_lock(file)) {
+ f.word |= FDPUT_POS_UNLOCK;
+ mutex_lock(&file->f_pos_lock);
}
- return v;
+ return f;
}
void __f_unlock_pos(struct file *f)
@@ -1007,24 +1271,16 @@ void __f_unlock_pos(struct file *f)
void set_close_on_exec(unsigned int fd, int flag)
{
struct files_struct *files = current->files;
- struct fdtable *fdt;
spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- if (flag)
- __set_close_on_exec(fd, fdt);
- else
- __clear_close_on_exec(fd, fdt);
+ __set_close_on_exec(fd, files_fdtable(files), flag);
spin_unlock(&files->file_lock);
}
bool get_close_on_exec(unsigned int fd)
{
- struct files_struct *files = current->files;
- struct fdtable *fdt;
bool res;
rcu_read_lock();
- fdt = files_fdtable(files);
- res = close_on_exec(fd, fdt);
+ res = close_on_exec(fd, current->files);
rcu_read_unlock();
return res;
}
@@ -1037,30 +1293,39 @@ __releases(&files->file_lock)
struct fdtable *fdt;
/*
- * We need to detect attempts to do dup2() over allocated but still
- * not finished descriptor. NB: OpenBSD avoids that at the price of
- * extra work in their equivalent of fget() - they insert struct
- * file immediately after grabbing descriptor, mark it larval if
- * more work (e.g. actual opening) is needed and make sure that
- * fget() treats larval files as absent. Potentially interesting,
- * but while extra work in fget() is trivial, locking implications
- * and amount of surgery on open()-related paths in VFS are not.
- * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
- * deadlocks in rather amusing ways, AFAICS. All of that is out of
- * scope of POSIX or SUS, since neither considers shared descriptor
- * tables and this condition does not arise without those.
+ * dup2() is expected to close the file installed in the target fd slot
+ * (if any). However, userspace hand-picking a fd may be racing against
+ * its own threads which happened to allocate it in open() et al but did
+ * not populate it yet.
+ *
+ * Broadly speaking we may be racing against the following:
+ * fd = get_unused_fd_flags(); // fd slot reserved, ->fd[fd] == NULL
+ * file = hard_work_goes_here();
+ * fd_install(fd, file); // only now ->fd[fd] == file
+ *
+ * It is an invariant that a successfully allocated fd has a NULL entry
+ * in the array until the matching fd_install().
+ *
+ * If we fit the window, we have the fd to populate, yet no target file
+ * to close. Trying to ignore it and install our new file would violate
+ * the invariant and make fd_install() overwrite our file.
+ *
+ * Things can be done(tm) to handle this. However, the issue does not
+ * concern legitimate programs and we only need to make sure the kernel
+ * does not trip over it.
+ *
+ * The simplest way out is to return an error if we find ourselves here.
+ *
+ * POSIX is silent on the issue, we return -EBUSY.
*/
fdt = files_fdtable(files);
- tofree = fdt->fd[fd];
+ fd = array_index_nospec(fd, fdt->max_fds);
+ tofree = rcu_dereference_raw(fdt->fd[fd]);
if (!tofree && fd_is_open(fd, fdt))
goto Ebusy;
get_file(file);
rcu_assign_pointer(fdt->fd[fd], file);
- __set_open_fd(fd, fdt);
- if (flags & O_CLOEXEC)
- __set_close_on_exec(fd, fdt);
- else
- __clear_close_on_exec(fd, fdt);
+ __set_open_fd(fd, fdt, flags & O_CLOEXEC);
spin_unlock(&files->file_lock);
if (tofree)
@@ -1088,7 +1353,10 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
err = expand_files(files, fd);
if (unlikely(err < 0))
goto out_unlock;
- return do_dup2(files, file, fd, flags);
+ err = do_dup2(files, file, fd, flags);
+ if (err < 0)
+ return err;
+ return 0;
out_unlock:
spin_unlock(&files->file_lock);
@@ -1096,7 +1364,7 @@ out_unlock:
}
/**
- * __receive_fd() - Install received file into file descriptor table
+ * receive_fd() - Install received file into file descriptor table
* @file: struct file that was received from another process
* @ufd: __user pointer to write new fd number to
* @o_flags: the O_* flags to apply to the new fd entry
@@ -1110,31 +1378,29 @@ out_unlock:
*
* Returns newly install fd or -ve on error.
*/
-int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
+int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
{
- int new_fd;
int error;
error = security_file_receive(file);
if (error)
return error;
- new_fd = get_unused_fd_flags(o_flags);
- if (new_fd < 0)
- return new_fd;
+ FD_PREPARE(fdf, o_flags, file);
+ if (fdf.err)
+ return fdf.err;
+ get_file(file);
if (ufd) {
- error = put_user(new_fd, ufd);
- if (error) {
- put_unused_fd(new_fd);
+ error = put_user(fd_prepare_fd(fdf), ufd);
+ if (error)
return error;
- }
}
- fd_install(new_fd, get_file(file));
- __receive_sock(file);
- return new_fd;
+ __receive_sock(fd_prepare_file(fdf));
+ return fd_publish(fdf);
}
+EXPORT_SYMBOL_GPL(receive_fd);
int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
{
@@ -1150,12 +1416,6 @@ int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
return new_fd;
}
-int receive_fd(struct file *file, unsigned int o_flags)
-{
- return __receive_fd(file, NULL, o_flags);
-}
-EXPORT_SYMBOL_GPL(receive_fd);
-
static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
{
int err = -EBADF;
@@ -1199,12 +1459,16 @@ SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
{
if (unlikely(newfd == oldfd)) { /* corner case */
struct files_struct *files = current->files;
+ struct file *f;
int retval = oldfd;
rcu_read_lock();
- if (!files_lookup_fd_rcu(files, oldfd))
+ f = __fget_files_rcu(files, oldfd, 0);
+ if (!f)
retval = -EBADF;
rcu_read_unlock();
+ if (f)
+ fput(f);
return retval;
}
return ksys_dup3(oldfd, newfd, 0);
diff --git a/fs/file_attr.c b/fs/file_attr.c
new file mode 100644
index 000000000000..4c4916632f11
--- /dev/null
+++ b/fs/file_attr.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/fscrypt.h>
+#include <linux/fileattr.h>
+#include <linux/export.h>
+#include <linux/syscalls.h>
+#include <linux/namei.h>
+
+#include "internal.h"
+
+/**
+ * fileattr_fill_xflags - initialize fileattr with xflags
+ * @fa: fileattr pointer
+ * @xflags: FS_XFLAG_* flags
+ *
+ * Set ->fsx_xflags, ->fsx_valid and ->flags (translated xflags). All
+ * other fields are zeroed.
+ */
+void fileattr_fill_xflags(struct file_kattr *fa, u32 xflags)
+{
+ memset(fa, 0, sizeof(*fa));
+ fa->fsx_valid = true;
+ fa->fsx_xflags = xflags;
+ if (fa->fsx_xflags & FS_XFLAG_IMMUTABLE)
+ fa->flags |= FS_IMMUTABLE_FL;
+ if (fa->fsx_xflags & FS_XFLAG_APPEND)
+ fa->flags |= FS_APPEND_FL;
+ if (fa->fsx_xflags & FS_XFLAG_SYNC)
+ fa->flags |= FS_SYNC_FL;
+ if (fa->fsx_xflags & FS_XFLAG_NOATIME)
+ fa->flags |= FS_NOATIME_FL;
+ if (fa->fsx_xflags & FS_XFLAG_NODUMP)
+ fa->flags |= FS_NODUMP_FL;
+ if (fa->fsx_xflags & FS_XFLAG_DAX)
+ fa->flags |= FS_DAX_FL;
+ if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
+ fa->flags |= FS_PROJINHERIT_FL;
+}
+EXPORT_SYMBOL(fileattr_fill_xflags);
+
+/**
+ * fileattr_fill_flags - initialize fileattr with flags
+ * @fa: fileattr pointer
+ * @flags: FS_*_FL flags
+ *
+ * Set ->flags, ->flags_valid and ->fsx_xflags (translated flags).
+ * All other fields are zeroed.
+ */
+void fileattr_fill_flags(struct file_kattr *fa, u32 flags)
+{
+ memset(fa, 0, sizeof(*fa));
+ fa->flags_valid = true;
+ fa->flags = flags;
+ if (fa->flags & FS_SYNC_FL)
+ fa->fsx_xflags |= FS_XFLAG_SYNC;
+ if (fa->flags & FS_IMMUTABLE_FL)
+ fa->fsx_xflags |= FS_XFLAG_IMMUTABLE;
+ if (fa->flags & FS_APPEND_FL)
+ fa->fsx_xflags |= FS_XFLAG_APPEND;
+ if (fa->flags & FS_NODUMP_FL)
+ fa->fsx_xflags |= FS_XFLAG_NODUMP;
+ if (fa->flags & FS_NOATIME_FL)
+ fa->fsx_xflags |= FS_XFLAG_NOATIME;
+ if (fa->flags & FS_DAX_FL)
+ fa->fsx_xflags |= FS_XFLAG_DAX;
+ if (fa->flags & FS_PROJINHERIT_FL)
+ fa->fsx_xflags |= FS_XFLAG_PROJINHERIT;
+}
+EXPORT_SYMBOL(fileattr_fill_flags);
+
+/**
+ * vfs_fileattr_get - retrieve miscellaneous file attributes
+ * @dentry: the object to retrieve from
+ * @fa: fileattr pointer
+ *
+ * Call i_op->fileattr_get() callback, if exists.
+ *
+ * Return: 0 on success, or a negative error on failure.
+ */
+int vfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
+{
+ struct inode *inode = d_inode(dentry);
+ int error;
+
+ if (!inode->i_op->fileattr_get)
+ return -ENOIOCTLCMD;
+
+ error = security_inode_file_getattr(dentry, fa);
+ if (error)
+ return error;
+
+ return inode->i_op->fileattr_get(dentry, fa);
+}
+EXPORT_SYMBOL(vfs_fileattr_get);
+
+static void fileattr_to_file_attr(const struct file_kattr *fa,
+ struct file_attr *fattr)
+{
+ __u32 mask = FS_XFLAGS_MASK;
+
+ memset(fattr, 0, sizeof(struct file_attr));
+ fattr->fa_xflags = fa->fsx_xflags & mask;
+ fattr->fa_extsize = fa->fsx_extsize;
+ fattr->fa_nextents = fa->fsx_nextents;
+ fattr->fa_projid = fa->fsx_projid;
+ fattr->fa_cowextsize = fa->fsx_cowextsize;
+}
+
+/**
+ * copy_fsxattr_to_user - copy fsxattr to userspace.
+ * @fa: fileattr pointer
+ * @ufa: fsxattr user pointer
+ *
+ * Return: 0 on success, or -EFAULT on failure.
+ */
+int copy_fsxattr_to_user(const struct file_kattr *fa, struct fsxattr __user *ufa)
+{
+ struct fsxattr xfa;
+ __u32 mask = FS_XFLAGS_MASK;
+
+ memset(&xfa, 0, sizeof(xfa));
+ xfa.fsx_xflags = fa->fsx_xflags & mask;
+ xfa.fsx_extsize = fa->fsx_extsize;
+ xfa.fsx_nextents = fa->fsx_nextents;
+ xfa.fsx_projid = fa->fsx_projid;
+ xfa.fsx_cowextsize = fa->fsx_cowextsize;
+
+ if (copy_to_user(ufa, &xfa, sizeof(xfa)))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL(copy_fsxattr_to_user);
+
+static int file_attr_to_fileattr(const struct file_attr *fattr,
+ struct file_kattr *fa)
+{
+ __u64 mask = FS_XFLAGS_MASK;
+
+ if (fattr->fa_xflags & ~mask)
+ return -EINVAL;
+
+ fileattr_fill_xflags(fa, fattr->fa_xflags);
+ fa->fsx_xflags &= ~FS_XFLAG_RDONLY_MASK;
+ fa->fsx_extsize = fattr->fa_extsize;
+ fa->fsx_projid = fattr->fa_projid;
+ fa->fsx_cowextsize = fattr->fa_cowextsize;
+
+ return 0;
+}
+
+static int copy_fsxattr_from_user(struct file_kattr *fa,
+ struct fsxattr __user *ufa)
+{
+ struct fsxattr xfa;
+ __u32 mask = FS_XFLAGS_MASK;
+
+ if (copy_from_user(&xfa, ufa, sizeof(xfa)))
+ return -EFAULT;
+
+ if (xfa.fsx_xflags & ~mask)
+ return -EOPNOTSUPP;
+
+ fileattr_fill_xflags(fa, xfa.fsx_xflags);
+ fa->fsx_xflags &= ~FS_XFLAG_RDONLY_MASK;
+ fa->fsx_extsize = xfa.fsx_extsize;
+ fa->fsx_nextents = xfa.fsx_nextents;
+ fa->fsx_projid = xfa.fsx_projid;
+ fa->fsx_cowextsize = xfa.fsx_cowextsize;
+
+ return 0;
+}
+
+/*
+ * Generic function to check FS_IOC_FSSETXATTR/FS_IOC_SETFLAGS values and reject
+ * any invalid configurations.
+ *
+ * Note: must be called with inode lock held.
+ */
+static int fileattr_set_prepare(struct inode *inode,
+ const struct file_kattr *old_ma,
+ struct file_kattr *fa)
+{
+ int err;
+
+ /*
+ * The IMMUTABLE and APPEND_ONLY flags can only be changed by
+ * the relevant capability.
+ */
+ if ((fa->flags ^ old_ma->flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) &&
+ !capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
+
+ err = fscrypt_prepare_setflags(inode, old_ma->flags, fa->flags);
+ if (err)
+ return err;
+
+ /*
+ * Project Quota ID state is only allowed to change from within the init
+ * namespace. Enforce that restriction only if we are trying to change
+ * the quota ID state. Everything else is allowed in user namespaces.
+ */
+ if (current_user_ns() != &init_user_ns) {
+ if (old_ma->fsx_projid != fa->fsx_projid)
+ return -EINVAL;
+ if ((old_ma->fsx_xflags ^ fa->fsx_xflags) &
+ FS_XFLAG_PROJINHERIT)
+ return -EINVAL;
+ } else {
+ /*
+ * Caller is allowed to change the project ID. If it is being
+ * changed, make sure that the new value is valid.
+ */
+ if (old_ma->fsx_projid != fa->fsx_projid &&
+ !projid_valid(make_kprojid(&init_user_ns, fa->fsx_projid)))
+ return -EINVAL;
+ }
+
+ /* Check extent size hints. */
+ if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) &&
+ !S_ISDIR(inode->i_mode))
+ return -EINVAL;
+
+ if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) &&
+ !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
+ return -EINVAL;
+
+ /*
+ * It is only valid to set the DAX flag on regular files and
+ * directories on filesystems.
+ */
+ if ((fa->fsx_xflags & FS_XFLAG_DAX) &&
+ !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
+ return -EINVAL;
+
+ /* Extent size hints of zero turn off the flags. */
+ if (fa->fsx_extsize == 0)
+ fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT);
+ if (fa->fsx_cowextsize == 0)
+ fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
+
+ return 0;
+}
+
+/**
+ * vfs_fileattr_set - change miscellaneous file attributes
+ * @idmap: idmap of the mount
+ * @dentry: the object to change
+ * @fa: fileattr pointer
+ *
+ * After verifying permissions, call i_op->fileattr_set() callback, if
+ * exists.
+ *
+ * Verifying attributes involves retrieving current attributes with
+ * i_op->fileattr_get(), this also allows initializing attributes that have
+ * not been set by the caller to current values. Inode lock is held
+ * thoughout to prevent racing with another instance.
+ *
+ * Return: 0 on success, or a negative error on failure.
+ */
+int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct file_kattr *fa)
+{
+ struct inode *inode = d_inode(dentry);
+ struct file_kattr old_ma = {};
+ int err;
+
+ if (!inode->i_op->fileattr_set)
+ return -ENOIOCTLCMD;
+
+ if (!inode_owner_or_capable(idmap, inode))
+ return -EPERM;
+
+ inode_lock(inode);
+ err = vfs_fileattr_get(dentry, &old_ma);
+ if (!err) {
+ /* initialize missing bits from old_ma */
+ if (fa->flags_valid) {
+ fa->fsx_xflags |= old_ma.fsx_xflags & ~FS_XFLAG_COMMON;
+ fa->fsx_extsize = old_ma.fsx_extsize;
+ fa->fsx_nextents = old_ma.fsx_nextents;
+ fa->fsx_projid = old_ma.fsx_projid;
+ fa->fsx_cowextsize = old_ma.fsx_cowextsize;
+ } else {
+ fa->flags |= old_ma.flags & ~FS_COMMON_FL;
+ }
+
+ err = fileattr_set_prepare(inode, &old_ma, fa);
+ if (err)
+ goto out;
+ err = security_inode_file_setattr(dentry, fa);
+ if (err)
+ goto out;
+ err = inode->i_op->fileattr_set(idmap, dentry, fa);
+ if (err)
+ goto out;
+ }
+
+out:
+ inode_unlock(inode);
+ return err;
+}
+EXPORT_SYMBOL(vfs_fileattr_set);
+
+int ioctl_getflags(struct file *file, unsigned int __user *argp)
+{
+ struct file_kattr fa = { .flags_valid = true }; /* hint only */
+ int err;
+
+ err = vfs_fileattr_get(file->f_path.dentry, &fa);
+ if (!err)
+ err = put_user(fa.flags, argp);
+ return err;
+}
+
+int ioctl_setflags(struct file *file, unsigned int __user *argp)
+{
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
+ struct dentry *dentry = file->f_path.dentry;
+ struct file_kattr fa;
+ unsigned int flags;
+ int err;
+
+ err = get_user(flags, argp);
+ if (!err) {
+ err = mnt_want_write_file(file);
+ if (!err) {
+ fileattr_fill_flags(&fa, flags);
+ err = vfs_fileattr_set(idmap, dentry, &fa);
+ mnt_drop_write_file(file);
+ }
+ }
+ return err;
+}
+
+int ioctl_fsgetxattr(struct file *file, void __user *argp)
+{
+ struct file_kattr fa = { .fsx_valid = true }; /* hint only */
+ int err;
+
+ err = vfs_fileattr_get(file->f_path.dentry, &fa);
+ if (!err)
+ err = copy_fsxattr_to_user(&fa, argp);
+
+ return err;
+}
+
+int ioctl_fssetxattr(struct file *file, void __user *argp)
+{
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
+ struct dentry *dentry = file->f_path.dentry;
+ struct file_kattr fa;
+ int err;
+
+ err = copy_fsxattr_from_user(&fa, argp);
+ if (!err) {
+ err = mnt_want_write_file(file);
+ if (!err) {
+ err = vfs_fileattr_set(idmap, dentry, &fa);
+ mnt_drop_write_file(file);
+ }
+ }
+ return err;
+}
+
+SYSCALL_DEFINE5(file_getattr, int, dfd, const char __user *, filename,
+ struct file_attr __user *, ufattr, size_t, usize,
+ unsigned int, at_flags)
+{
+ struct path filepath __free(path_put) = {};
+ struct filename *name __free(putname) = NULL;
+ unsigned int lookup_flags = 0;
+ struct file_attr fattr;
+ struct file_kattr fa;
+ int error;
+
+ BUILD_BUG_ON(sizeof(struct file_attr) < FILE_ATTR_SIZE_VER0);
+ BUILD_BUG_ON(sizeof(struct file_attr) != FILE_ATTR_SIZE_LATEST);
+
+ if ((at_flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
+ return -EINVAL;
+
+ if (!(at_flags & AT_SYMLINK_NOFOLLOW))
+ lookup_flags |= LOOKUP_FOLLOW;
+
+ if (usize > PAGE_SIZE)
+ return -E2BIG;
+
+ if (usize < FILE_ATTR_SIZE_VER0)
+ return -EINVAL;
+
+ name = getname_maybe_null(filename, at_flags);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ if (!name && dfd >= 0) {
+ CLASS(fd, f)(dfd);
+ if (fd_empty(f))
+ return -EBADF;
+
+ filepath = fd_file(f)->f_path;
+ path_get(&filepath);
+ } else {
+ error = filename_lookup(dfd, name, lookup_flags, &filepath,
+ NULL);
+ if (error)
+ return error;
+ }
+
+ error = vfs_fileattr_get(filepath.dentry, &fa);
+ if (error == -ENOIOCTLCMD || error == -ENOTTY)
+ error = -EOPNOTSUPP;
+ if (error)
+ return error;
+
+ fileattr_to_file_attr(&fa, &fattr);
+ error = copy_struct_to_user(ufattr, usize, &fattr,
+ sizeof(struct file_attr), NULL);
+
+ return error;
+}
+
+SYSCALL_DEFINE5(file_setattr, int, dfd, const char __user *, filename,
+ struct file_attr __user *, ufattr, size_t, usize,
+ unsigned int, at_flags)
+{
+ struct path filepath __free(path_put) = {};
+ struct filename *name __free(putname) = NULL;
+ unsigned int lookup_flags = 0;
+ struct file_attr fattr;
+ struct file_kattr fa;
+ int error;
+
+ BUILD_BUG_ON(sizeof(struct file_attr) < FILE_ATTR_SIZE_VER0);
+ BUILD_BUG_ON(sizeof(struct file_attr) != FILE_ATTR_SIZE_LATEST);
+
+ if ((at_flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
+ return -EINVAL;
+
+ if (!(at_flags & AT_SYMLINK_NOFOLLOW))
+ lookup_flags |= LOOKUP_FOLLOW;
+
+ if (usize > PAGE_SIZE)
+ return -E2BIG;
+
+ if (usize < FILE_ATTR_SIZE_VER0)
+ return -EINVAL;
+
+ error = copy_struct_from_user(&fattr, sizeof(struct file_attr), ufattr,
+ usize);
+ if (error)
+ return error;
+
+ error = file_attr_to_fileattr(&fattr, &fa);
+ if (error)
+ return error;
+
+ name = getname_maybe_null(filename, at_flags);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ if (!name && dfd >= 0) {
+ CLASS(fd, f)(dfd);
+ if (fd_empty(f))
+ return -EBADF;
+
+ filepath = fd_file(f)->f_path;
+ path_get(&filepath);
+ } else {
+ error = filename_lookup(dfd, name, lookup_flags, &filepath,
+ NULL);
+ if (error)
+ return error;
+ }
+
+ error = mnt_want_write(filepath.mnt);
+ if (!error) {
+ error = vfs_fileattr_set(mnt_idmap(filepath.mnt),
+ filepath.dentry, &fa);
+ if (error == -ENOIOCTLCMD || error == -ENOTTY)
+ error = -EOPNOTSUPP;
+ mnt_drop_write(filepath.mnt);
+ }
+
+ return error;
+}
diff --git a/fs/file_table.c b/fs/file_table.c
index 45437f8e1003..cd4a3db4659a 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -9,10 +9,10 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/file.h>
-#include <linux/fdtable.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/security.h>
#include <linux/cred.h>
#include <linux/eventpoll.h>
@@ -25,37 +25,59 @@
#include <linux/percpu_counter.h>
#include <linux/percpu.h>
#include <linux/task_work.h>
-#include <linux/ima.h>
#include <linux/swap.h>
+#include <linux/kmemleak.h>
#include <linux/atomic.h>
#include "internal.h"
/* sysctl tunables... */
-struct files_stat_struct files_stat = {
+static struct files_stat_struct files_stat = {
.max_files = NR_FILE
};
/* SLAB cache for file structures */
-static struct kmem_cache *filp_cachep __read_mostly;
+static struct kmem_cache *filp_cachep __ro_after_init;
+static struct kmem_cache *bfilp_cachep __ro_after_init;
static struct percpu_counter nr_files __cacheline_aligned_in_smp;
-static void file_free_rcu(struct rcu_head *head)
+/* Container for backing file with optional user path */
+struct backing_file {
+ struct file file;
+ union {
+ struct path user_path;
+ freeptr_t bf_freeptr;
+ };
+};
+
+#define backing_file(f) container_of(f, struct backing_file, file)
+
+const struct path *backing_file_user_path(const struct file *f)
{
- struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
+ return &backing_file(f)->user_path;
+}
+EXPORT_SYMBOL_GPL(backing_file_user_path);
- put_cred(f->f_cred);
- kmem_cache_free(filp_cachep, f);
+void backing_file_set_user_path(struct file *f, const struct path *path)
+{
+ backing_file(f)->user_path = *path;
}
+EXPORT_SYMBOL_GPL(backing_file_set_user_path);
static inline void file_free(struct file *f)
{
security_file_free(f);
- if (!(f->f_mode & FMODE_NOACCOUNT))
+ if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
percpu_counter_dec(&nr_files);
- call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
+ put_cred(f->f_cred);
+ if (unlikely(f->f_mode & FMODE_BACKING)) {
+ path_put(backing_file_user_path(f));
+ kmem_cache_free(bfilp_cachep, backing_file(f));
+ } else {
+ kmem_cache_free(filp_cachep, f);
+ }
}
/*
@@ -75,49 +97,112 @@ unsigned long get_max_files(void)
}
EXPORT_SYMBOL_GPL(get_max_files);
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
+
/*
* Handle nr_files sysctl
*/
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
-int proc_nr_files(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_nr_files(const struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
- files_stat.nr_files = get_nr_files();
+ files_stat.nr_files = percpu_counter_sum_positive(&nr_files);
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
-#else
-int proc_nr_files(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+
+static const struct ctl_table fs_stat_sysctls[] = {
+ {
+ .procname = "file-nr",
+ .data = &files_stat,
+ .maxlen = sizeof(files_stat),
+ .mode = 0444,
+ .proc_handler = proc_nr_files,
+ },
+ {
+ .procname = "file-max",
+ .data = &files_stat.max_files,
+ .maxlen = sizeof(files_stat.max_files),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = SYSCTL_LONG_ZERO,
+ .extra2 = SYSCTL_LONG_MAX,
+ },
+ {
+ .procname = "nr_open",
+ .data = &sysctl_nr_open,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = &sysctl_nr_open_min,
+ .extra2 = &sysctl_nr_open_max,
+ },
+};
+
+static int __init init_fs_stat_sysctls(void)
{
- return -ENOSYS;
+ register_sysctl_init("fs", fs_stat_sysctls);
+ if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
+ struct ctl_table_header *hdr;
+
+ hdr = register_sysctl_mount_point("fs/binfmt_misc");
+ kmemleak_not_leak(hdr);
+ }
+ return 0;
}
+fs_initcall(init_fs_stat_sysctls);
#endif
-static struct file *__alloc_file(int flags, const struct cred *cred)
+static int init_file(struct file *f, int flags, const struct cred *cred)
{
- struct file *f;
int error;
- f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
- if (unlikely(!f))
- return ERR_PTR(-ENOMEM);
-
f->f_cred = get_cred(cred);
error = security_file_alloc(f);
if (unlikely(error)) {
- file_free_rcu(&f->f_u.fu_rcuhead);
- return ERR_PTR(error);
+ put_cred(f->f_cred);
+ return error;
}
- atomic_long_set(&f->f_count, 1);
- rwlock_init(&f->f_owner.lock);
spin_lock_init(&f->f_lock);
+ /*
+ * Note that f_pos_lock is only used for files raising
+ * FMODE_ATOMIC_POS and directories. Other files such as pipes
+ * don't need it and since f_pos_lock is in a union may reuse
+ * the space for other purposes. They are expected to initialize
+ * the respective member when opening the file.
+ */
mutex_init(&f->f_pos_lock);
- f->f_flags = flags;
- f->f_mode = OPEN_FMODE(flags);
- /* f->f_version: 0 */
+ memset(&f->__f_path, 0, sizeof(f->f_path));
+ memset(&f->f_ra, 0, sizeof(f->f_ra));
+
+ f->f_flags = flags;
+ f->f_mode = OPEN_FMODE(flags);
+
+ f->f_op = NULL;
+ f->f_mapping = NULL;
+ f->private_data = NULL;
+ f->f_inode = NULL;
+ f->f_owner = NULL;
+#ifdef CONFIG_EPOLL
+ f->f_ep = NULL;
+#endif
- return f;
+ f->f_iocb_flags = 0;
+ f->f_pos = 0;
+ f->f_wb_err = 0;
+ f->f_sb_err = 0;
+
+ /*
+ * We're SLAB_TYPESAFE_BY_RCU so initialize f_ref last. While
+ * fget-rcu pattern users need to be able to handle spurious
+ * refcount bumps we should reinitialize the reused file first.
+ */
+ file_ref_init(&f->f_ref, 1);
+ /*
+ * Disable permission and pre-content events for all files by default.
+ * They may be enabled later by fsnotify_open_perm_and_set_mode().
+ */
+ file_set_fsnotify_mode(f, FMODE_NONOTIFY_PERM);
+ return 0;
}
/* Find an unused file structure and return a pointer to it.
@@ -134,11 +219,13 @@ struct file *alloc_empty_file(int flags, const struct cred *cred)
{
static long old_max;
struct file *f;
+ int error;
/*
* Privileged users can go above max_files
*/
- if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
+ if (unlikely(get_nr_files() >= files_stat.max_files) &&
+ !capable(CAP_SYS_ADMIN)) {
/*
* percpu_counters are inaccurate. Do an expensive check before
* we go and fail.
@@ -147,9 +234,17 @@ struct file *alloc_empty_file(int flags, const struct cred *cred)
goto over;
}
- f = __alloc_file(flags, cred);
- if (!IS_ERR(f))
- percpu_counter_inc(&nr_files);
+ f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
+ if (unlikely(!f))
+ return ERR_PTR(-ENOMEM);
+
+ error = init_file(f, flags, cred);
+ if (unlikely(error)) {
+ kmem_cache_free(filp_cachep, f);
+ return ERR_PTR(error);
+ }
+
+ percpu_counter_inc(&nr_files);
return f;
@@ -165,83 +260,176 @@ over:
/*
* Variant of alloc_empty_file() that doesn't check and modify nr_files.
*
- * Should not be used unless there's a very good reason to do so.
+ * This is only for kernel internal use, and the allocate file must not be
+ * installed into file tables or such.
*/
struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred)
{
- struct file *f = __alloc_file(flags, cred);
+ struct file *f;
+ int error;
+
+ f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
+ if (unlikely(!f))
+ return ERR_PTR(-ENOMEM);
+
+ error = init_file(f, flags, cred);
+ if (unlikely(error)) {
+ kmem_cache_free(filp_cachep, f);
+ return ERR_PTR(error);
+ }
- if (!IS_ERR(f))
- f->f_mode |= FMODE_NOACCOUNT;
+ f->f_mode |= FMODE_NOACCOUNT;
return f;
}
+/*
+ * Variant of alloc_empty_file() that allocates a backing_file container
+ * and doesn't check and modify nr_files.
+ *
+ * This is only for kernel internal use, and the allocate file must not be
+ * installed into file tables or such.
+ */
+struct file *alloc_empty_backing_file(int flags, const struct cred *cred)
+{
+ struct backing_file *ff;
+ int error;
+
+ ff = kmem_cache_alloc(bfilp_cachep, GFP_KERNEL);
+ if (unlikely(!ff))
+ return ERR_PTR(-ENOMEM);
+
+ error = init_file(&ff->file, flags, cred);
+ if (unlikely(error)) {
+ kmem_cache_free(bfilp_cachep, ff);
+ return ERR_PTR(error);
+ }
+
+ ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT;
+ return &ff->file;
+}
+
/**
- * alloc_file - allocate and initialize a 'struct file'
+ * file_init_path - initialize a 'struct file' based on path
*
+ * @file: the file to set up
* @path: the (dentry, vfsmount) pair for the new file
- * @flags: O_... flags with which the new file will be opened
* @fop: the 'struct file_operations' for the new file
*/
-static struct file *alloc_file(const struct path *path, int flags,
- const struct file_operations *fop)
+static void file_init_path(struct file *file, const struct path *path,
+ const struct file_operations *fop)
{
- struct file *file;
-
- file = alloc_empty_file(flags, current_cred());
- if (IS_ERR(file))
- return file;
-
- file->f_path = *path;
+ file->__f_path = *path;
file->f_inode = path->dentry->d_inode;
file->f_mapping = path->dentry->d_inode->i_mapping;
file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
file->f_sb_err = file_sample_sb_err(file);
+ if (fop->llseek)
+ file->f_mode |= FMODE_LSEEK;
if ((file->f_mode & FMODE_READ) &&
likely(fop->read || fop->read_iter))
file->f_mode |= FMODE_CAN_READ;
if ((file->f_mode & FMODE_WRITE) &&
likely(fop->write || fop->write_iter))
file->f_mode |= FMODE_CAN_WRITE;
+ file->f_iocb_flags = iocb_flags(file);
file->f_mode |= FMODE_OPENED;
file->f_op = fop;
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(path->dentry->d_inode);
+}
+
+/**
+ * alloc_file - allocate and initialize a 'struct file'
+ *
+ * @path: the (dentry, vfsmount) pair for the new file
+ * @flags: O_... flags with which the new file will be opened
+ * @fop: the 'struct file_operations' for the new file
+ */
+static struct file *alloc_file(const struct path *path, int flags,
+ const struct file_operations *fop)
+{
+ struct file *file;
+
+ file = alloc_empty_file(flags, current_cred());
+ if (!IS_ERR(file))
+ file_init_path(file, path, fop);
return file;
}
+static inline int alloc_path_pseudo(const char *name, struct inode *inode,
+ struct vfsmount *mnt, struct path *path)
+{
+ path->dentry = d_alloc_pseudo(mnt->mnt_sb, &QSTR(name));
+ if (!path->dentry)
+ return -ENOMEM;
+ path->mnt = mntget(mnt);
+ d_instantiate(path->dentry, inode);
+ return 0;
+}
+
struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
- const char *name, int flags,
- const struct file_operations *fops)
+ const char *name, int flags,
+ const struct file_operations *fops)
{
- static const struct dentry_operations anon_ops = {
- .d_dname = simple_dname
- };
- struct qstr this = QSTR_INIT(name, strlen(name));
+ int ret;
struct path path;
struct file *file;
- path.dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
- if (!path.dentry)
- return ERR_PTR(-ENOMEM);
- if (!mnt->mnt_sb->s_d_op)
- d_set_d_op(path.dentry, &anon_ops);
- path.mnt = mntget(mnt);
- d_instantiate(path.dentry, inode);
+ ret = alloc_path_pseudo(name, inode, mnt, &path);
+ if (ret)
+ return ERR_PTR(ret);
+
file = alloc_file(&path, flags, fops);
if (IS_ERR(file)) {
ihold(inode);
path_put(&path);
+ return file;
}
+ /*
+ * Disable all fsnotify events for pseudo files by default.
+ * They may be enabled by caller with file_set_fsnotify_mode().
+ */
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY);
return file;
}
EXPORT_SYMBOL(alloc_file_pseudo);
+struct file *alloc_file_pseudo_noaccount(struct inode *inode,
+ struct vfsmount *mnt, const char *name,
+ int flags,
+ const struct file_operations *fops)
+{
+ int ret;
+ struct path path;
+ struct file *file;
+
+ ret = alloc_path_pseudo(name, inode, mnt, &path);
+ if (ret)
+ return ERR_PTR(ret);
+
+ file = alloc_empty_file_noaccount(flags, current_cred());
+ if (IS_ERR(file)) {
+ ihold(inode);
+ path_put(&path);
+ return file;
+ }
+ file_init_path(file, &path, fops);
+ /*
+ * Disable all fsnotify events for pseudo files by default.
+ * They may be enabled by caller with file_set_fsnotify_mode().
+ */
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY);
+ return file;
+}
+EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount);
+
struct file *alloc_file_clone(struct file *base, int flags,
const struct file_operations *fops)
{
- struct file *f = alloc_file(&base->f_path, flags, fops);
+ struct file *f;
+
+ f = alloc_file(&base->f_path, flags, fops);
if (!IS_ERR(f)) {
path_get(&f->f_path);
f->f_mapping = base->f_mapping;
@@ -271,7 +459,7 @@ static void __fput(struct file *file)
eventpoll_release(file);
locks_remove_file(file);
- ima_file_free(file);
+ security_file_release(file);
if (unlikely(file->f_flags & FASYNC)) {
if (file->f_op->fasync)
file->f_op->fasync(-1, file, 0);
@@ -283,13 +471,8 @@ static void __fput(struct file *file)
cdev_put(inode->i_cdev);
}
fops_put(file->f_op);
- put_pid(file->f_owner.pid);
- if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
- i_readcount_dec(inode);
- if (mode & FMODE_WRITER) {
- put_write_access(inode);
- __mnt_drop_write(mnt);
- }
+ file_f_owner_release(file);
+ put_file_access(file);
dput(dentry);
if (unlikely(mode & FMODE_NEED_UNMOUNT))
dissolve_on_fput(mnt);
@@ -304,15 +487,17 @@ static void delayed_fput(struct work_struct *unused)
struct llist_node *node = llist_del_all(&delayed_fput_list);
struct file *f, *t;
- llist_for_each_entry_safe(f, t, node, f_u.fu_llist)
+ llist_for_each_entry_safe(f, t, node, f_llist)
__fput(f);
}
static void ____fput(struct callback_head *work)
{
- __fput(container_of(work, struct file, f_u.fu_rcuhead));
+ __fput(container_of(work, struct file, f_task_work));
}
+static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
+
/*
* If kernel thread really needs to have the final fput() it has done
* to complete, call this. The only user right now is the boot - we
@@ -326,36 +511,40 @@ static void ____fput(struct callback_head *work)
void flush_delayed_fput(void)
{
delayed_fput(NULL);
+ flush_delayed_work(&delayed_fput_work);
}
EXPORT_SYMBOL_GPL(flush_delayed_fput);
-static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
-
-void fput_many(struct file *file, unsigned int refs)
+static void __fput_deferred(struct file *file)
{
- if (atomic_long_sub_and_test(refs, &file->f_count)) {
- struct task_struct *task = current;
-
- if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
- init_task_work(&file->f_u.fu_rcuhead, ____fput);
- if (!task_work_add(task, &file->f_u.fu_rcuhead, TWA_RESUME))
- return;
- /*
- * After this task has run exit_task_work(),
- * task_work_add() will fail. Fall through to delayed
- * fput to avoid leaking *file.
- */
- }
-
- if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
- schedule_delayed_work(&delayed_fput_work, 1);
+ struct task_struct *task = current;
+
+ if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) {
+ file_free(file);
+ return;
}
+
+ if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
+ init_task_work(&file->f_task_work, ____fput);
+ if (!task_work_add(task, &file->f_task_work, TWA_RESUME))
+ return;
+ /*
+ * After this task has run exit_task_work(),
+ * task_work_add() will fail. Fall through to delayed
+ * fput to avoid leaking *file.
+ */
+ }
+
+ if (llist_add(&file->f_llist, &delayed_fput_list))
+ schedule_delayed_work(&delayed_fput_work, 1);
}
void fput(struct file *file)
{
- fput_many(file, 1);
+ if (unlikely(file_ref_put(&file->f_ref)))
+ __fput_deferred(file);
}
+EXPORT_SYMBOL(fput);
/*
* synchronous analog of fput(); for kernel threads that might be needed
@@ -367,19 +556,50 @@ void fput(struct file *file)
*/
void __fput_sync(struct file *file)
{
- if (atomic_long_dec_and_test(&file->f_count)) {
- struct task_struct *task = current;
- BUG_ON(!(task->flags & PF_KTHREAD));
+ if (file_ref_put(&file->f_ref))
__fput(file);
- }
}
+EXPORT_SYMBOL(__fput_sync);
-EXPORT_SYMBOL(fput);
+/*
+ * Equivalent to __fput_sync(), but optimized for being called with the last
+ * reference.
+ *
+ * See file_ref_put_close() for details.
+ */
+void fput_close_sync(struct file *file)
+{
+ if (likely(file_ref_put_close(&file->f_ref)))
+ __fput(file);
+}
+
+/*
+ * Equivalent to fput(), but optimized for being called with the last
+ * reference.
+ *
+ * See file_ref_put_close() for details.
+ */
+void fput_close(struct file *file)
+{
+ if (file_ref_put_close(&file->f_ref))
+ __fput_deferred(file);
+}
void __init files_init(void)
{
- filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL);
+ struct kmem_cache_args args = {
+ .use_freeptr_offset = true,
+ .freeptr_offset = offsetof(struct file, f_freeptr),
+ };
+
+ filp_cachep = kmem_cache_create("filp", sizeof(struct file), &args,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC |
+ SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
+
+ args.freeptr_offset = offsetof(struct backing_file, bf_freeptr);
+ bfilp_cachep = kmem_cache_create("bfilp", sizeof(struct backing_file),
+ &args, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
+ SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
percpu_counter_init(&nr_files, 0, GFP_KERNEL);
}
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 58b9067b2391..95e5256821a5 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -156,15 +156,19 @@ static int fs_index(const char __user * __name)
static int fs_name(unsigned int index, char __user * buf)
{
struct file_system_type * tmp;
- int len, res;
+ int len, res = -EINVAL;
read_lock(&file_systems_lock);
- for (tmp = file_systems; tmp; tmp = tmp->next, index--)
- if (index <= 0 && try_module_get(tmp->owner))
+ for (tmp = file_systems; tmp; tmp = tmp->next, index--) {
+ if (index == 0) {
+ if (try_module_get(tmp->owner))
+ res = 0;
break;
+ }
+ }
read_unlock(&file_systems_lock);
- if (!tmp)
- return -EINVAL;
+ if (res)
+ return res;
/* OK, we got the reference, so we can safely block */
len = strlen(tmp->name) + 1;
diff --git a/fs/freevxfs/Kconfig b/fs/freevxfs/Kconfig
index c05c71d57291..912107ebea6f 100644
--- a/fs/freevxfs/Kconfig
+++ b/fs/freevxfs/Kconfig
@@ -2,13 +2,14 @@
config VXFS_FS
tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)"
depends on BLOCK
+ select BUFFER_HEAD
help
FreeVxFS is a file system driver that support the VERITAS VxFS(TM)
file system format. VERITAS VxFS(TM) is the standard file system
of SCO UnixWare (and possibly others) and optionally available
for Sunsoft Solaris, HP-UX and many other operating systems. However
these particular OS implementations of vxfs may differ in on-disk
- data endianess and/or superblock offset. The vxfs module has been
+ data endianness and/or superblock offset. The vxfs module has been
tested with SCO UnixWare and HP-UX B.10.20 (pa-risc 1.1 arch.)
Currently only readonly access is supported and VxFX versions
2, 3 and 4. Tests were performed with HP-UX VxFS version 3.
diff --git a/fs/freevxfs/vxfs.h b/fs/freevxfs/vxfs.h
index a41ea0ba6943..bffd156d6434 100644
--- a/fs/freevxfs/vxfs.h
+++ b/fs/freevxfs/vxfs.h
@@ -1,32 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
*/
#ifndef _VXFS_SUPER_H_
#define _VXFS_SUPER_H_
diff --git a/fs/freevxfs/vxfs_bmap.c b/fs/freevxfs/vxfs_bmap.c
index 1fd41cf98b9f..26d367e3668d 100644
--- a/fs/freevxfs/vxfs_bmap.c
+++ b/fs/freevxfs/vxfs_bmap.c
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
@@ -53,7 +29,7 @@ vxfs_typdump(struct vxfs_typed *typ)
/**
* vxfs_bmap_ext4 - do bmap for ext4 extents
* @ip: pointer to the inode we do bmap for
- * @iblock: logical block.
+ * @bn: logical block.
*
* Description:
* vxfs_bmap_ext4 performs the bmap operation for inodes with
@@ -121,7 +97,7 @@ fail_buf:
* vxfs_bmap_indir reads a &struct vxfs_typed at @indir
* and performs the type-defined action.
*
- * Return Value:
+ * Returns:
* The physical block number on success, else Zero.
*
* Note:
@@ -203,7 +179,7 @@ out:
* Description:
* Performs the bmap operation for typed extents.
*
- * Return Value:
+ * Returns:
* The physical block number on success, else Zero.
*/
static daddr_t
@@ -267,7 +243,7 @@ vxfs_bmap_typed(struct inode *ip, long iblock)
* vxfs_bmap1 perfoms a logical to physical block mapping
* for vxfs-internal purposes.
*
- * Return Value:
+ * Returns:
* The physical block number on success, else Zero.
*/
daddr_t
diff --git a/fs/freevxfs/vxfs_dir.h b/fs/freevxfs/vxfs_dir.h
index acc5477b3f23..8c67627f2a3d 100644
--- a/fs/freevxfs/vxfs_dir.h
+++ b/fs/freevxfs/vxfs_dir.h
@@ -1,31 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
*/
#ifndef _VXFS_DIR_H_
#define _VXFS_DIR_H_
@@ -50,7 +25,7 @@
struct vxfs_dirblk {
__fs16 d_free; /* free space in dirblock */
__fs16 d_nhash; /* no of hash chains */
- __fs16 d_hash[1]; /* hash chain */
+ __fs16 d_hash[]; /* hash chain */
};
/*
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h
index f5c428e21024..3a2180c5e208 100644
--- a/fs/freevxfs/vxfs_extern.h
+++ b/fs/freevxfs/vxfs_extern.h
@@ -1,31 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
*/
#ifndef _VXFS_EXTERN_H_
#define _VXFS_EXTERN_H_
diff --git a/fs/freevxfs/vxfs_fshead.c b/fs/freevxfs/vxfs_fshead.c
index a4610a77649e..c1174a3f8990 100644
--- a/fs/freevxfs/vxfs_fshead.c
+++ b/fs/freevxfs/vxfs_fshead.c
@@ -1,31 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
diff --git a/fs/freevxfs/vxfs_fshead.h b/fs/freevxfs/vxfs_fshead.h
index e026f0c49159..dfd2147599c4 100644
--- a/fs/freevxfs/vxfs_fshead.h
+++ b/fs/freevxfs/vxfs_fshead.h
@@ -1,32 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
*/
#ifndef _VXFS_FSHEAD_H_
#define _VXFS_FSHEAD_H_
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index bfc780c682fb..ed51fcd34757 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
@@ -37,45 +13,41 @@
#include "vxfs_extern.h"
#include "vxfs_inode.h"
-
-static int vxfs_immed_readpage(struct file *, struct page *);
-
-/*
- * Address space operations for immed files and directories.
- */
-const struct address_space_operations vxfs_immed_aops = {
- .readpage = vxfs_immed_readpage,
-};
-
/**
- * vxfs_immed_readpage - read part of an immed inode into pagecache
- * @file: file context (unused)
- * @page: page frame to fill in.
+ * vxfs_immed_read_folio - read part of an immed inode into pagecache
+ * @fp: file context (unused)
+ * @folio: folio to fill in.
*
* Description:
- * vxfs_immed_readpage reads a part of the immed area of the
- * file that hosts @pp into the pagecache.
+ * vxfs_immed_read_folio reads a part of the immed area of the
+ * file that hosts @folio into the pagecache.
*
* Returns:
* Zero on success, else a negative error code.
*
* Locking status:
- * @page is locked and will be unlocked.
+ * @folio is locked and will be unlocked.
*/
-static int
-vxfs_immed_readpage(struct file *fp, struct page *pp)
+static int vxfs_immed_read_folio(struct file *fp, struct folio *folio)
{
- struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host);
- u_int64_t offset = (u_int64_t)pp->index << PAGE_SHIFT;
- caddr_t kaddr;
+ struct vxfs_inode_info *vip = VXFS_INO(folio->mapping->host);
+ void *src = vip->vii_immed.vi_immed + folio_pos(folio);
+ unsigned long i;
+
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ memcpy_to_page(folio_page(folio, i), 0, src, PAGE_SIZE);
+ src += PAGE_SIZE;
+ }
- kaddr = kmap(pp);
- memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_SIZE);
- kunmap(pp);
-
- flush_dcache_page(pp);
- SetPageUptodate(pp);
- unlock_page(pp);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
return 0;
}
+
+/*
+ * Address space operations for immed files and directories.
+ */
+const struct address_space_operations vxfs_immed_aops = {
+ .read_folio = vxfs_immed_read_folio,
+};
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 1f41b25ef38b..21fc94b98209 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -1,31 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
@@ -133,12 +109,9 @@ static inline void dip2vip_cpy(struct vxfs_sb_info *sbi,
set_nlink(inode, vip->vii_nlink);
inode->i_size = vip->vii_size;
- inode->i_atime.tv_sec = vip->vii_atime;
- inode->i_ctime.tv_sec = vip->vii_ctime;
- inode->i_mtime.tv_sec = vip->vii_mtime;
- inode->i_atime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
+ inode_set_atime(inode, vip->vii_atime, 0);
+ inode_set_ctime(inode, vip->vii_ctime, 0);
+ inode_set_mtime(inode, vip->vii_mtime, 0);
inode->i_blocks = vip->vii_blocks;
inode->i_generation = vip->vii_gen;
@@ -285,7 +258,7 @@ vxfs_iget(struct super_block *sbp, ino_t ino)
ip = iget_locked(sbp, ino);
if (!ip)
return ERR_PTR(-ENOMEM);
- if (!(ip->i_state & I_NEW))
+ if (!(inode_state_read_once(ip) & I_NEW))
return ip;
vip = VXFS_INO(ip);
diff --git a/fs/freevxfs/vxfs_inode.h b/fs/freevxfs/vxfs_inode.h
index f012abed125d..1e9e138d2b33 100644
--- a/fs/freevxfs/vxfs_inode.h
+++ b/fs/freevxfs/vxfs_inode.h
@@ -1,32 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
*/
#ifndef _VXFS_INODE_H_
#define _VXFS_INODE_H_
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index a51425634f65..1b0bca8b4cc6 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -1,31 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
@@ -201,8 +177,7 @@ vxfs_lookup(struct inode *dip, struct dentry *dp, unsigned int flags)
/**
* vxfs_readdir - read a directory
* @fp: the directory to read
- * @retp: return buffer
- * @filler: filldir callback
+ * @ctx: dir_context for filldir/readdir
*
* Description:
* vxfs_readdir fills @retp with directory entries from @fp
diff --git a/fs/freevxfs/vxfs_olt.c b/fs/freevxfs/vxfs_olt.c
index 813da6685151..23f35187c289 100644
--- a/fs/freevxfs/vxfs_olt.c
+++ b/fs/freevxfs/vxfs_olt.c
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
diff --git a/fs/freevxfs/vxfs_olt.h b/fs/freevxfs/vxfs_olt.h
index 0c0b0c9fa557..53afba08d617 100644
--- a/fs/freevxfs/vxfs_olt.h
+++ b/fs/freevxfs/vxfs_olt.h
@@ -1,31 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
*/
#ifndef _VXFS_OLT_H_
#define _VXFS_OLT_H_
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index e806694d4145..f439877ea6e8 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
@@ -38,11 +14,11 @@
#include "vxfs_extern.h"
-static int vxfs_readpage(struct file *, struct page *);
+static int vxfs_read_folio(struct file *, struct folio *);
static sector_t vxfs_bmap(struct address_space *, sector_t);
const struct address_space_operations vxfs_aops = {
- .readpage = vxfs_readpage,
+ .read_folio = vxfs_read_folio,
.bmap = vxfs_bmap,
};
@@ -55,7 +31,7 @@ vxfs_put_page(struct page *pp)
/**
* vxfs_get_page - read a page into memory.
- * @ip: inode to read from
+ * @mapping: mapping to read from
* @n: page number
*
* Description:
@@ -75,15 +51,9 @@ vxfs_get_page(struct address_space *mapping, u_long n)
kmap(pp);
/** if (!PageChecked(pp)) **/
/** vxfs_check_page(pp); **/
- if (PageError(pp))
- goto fail;
}
return (pp);
-
-fail:
- vxfs_put_page(pp);
- return ERR_PTR(-EIO);
}
/**
@@ -111,14 +81,14 @@ vxfs_bread(struct inode *ip, int block)
}
/**
- * vxfs_get_block - locate buffer for given inode,block tuple
+ * vxfs_getblk - locate buffer for given inode,block tuple
* @ip: inode
* @iblock: logical block
* @bp: buffer skeleton
* @create: %TRUE if blocks may be newly allocated.
*
* Description:
- * The vxfs_get_block function fills @bp with the right physical
+ * The vxfs_getblk function fills @bp with the right physical
* block and device number to perform a lowlevel read/write on
* it.
*
@@ -141,24 +111,23 @@ vxfs_getblk(struct inode *ip, sector_t iblock,
}
/**
- * vxfs_readpage - read one page synchronously into the pagecache
+ * vxfs_read_folio - read one page synchronously into the pagecache
* @file: file context (unused)
- * @page: page frame to fill in.
+ * @folio: folio to fill in.
*
* Description:
- * The vxfs_readpage routine reads @page synchronously into the
+ * The vxfs_read_folio routine reads @folio synchronously into the
* pagecache.
*
* Returns:
* Zero on success, else a negative error code.
*
* Locking status:
- * @page is locked and will be unlocked.
+ * @folio is locked and will be unlocked.
*/
-static int
-vxfs_readpage(struct file *file, struct page *page)
+static int vxfs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, vxfs_getblk);
+ return block_read_full_folio(folio, vxfs_getblk);
}
/**
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index 578a5062706e..fabe60778658 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -1,31 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
* Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
/*
@@ -41,7 +17,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/vfs.h>
-#include <linux/mount.h>
+#include <linux/fs_context.h>
#include "vxfs.h"
#include "vxfs_extern.h"
@@ -100,6 +76,7 @@ vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
{
struct vxfs_sb_info *infp = VXFS_SBI(dentry->d_sb);
struct vxfs_sb *raw_sb = infp->vsi_raw;
+ u64 id = huge_encode_dev(dentry->d_sb->s_bdev->bd_dev);
bufp->f_type = VXFS_SUPER_MAGIC;
bufp->f_bsize = dentry->d_sb->s_blocksize;
@@ -108,15 +85,16 @@ vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
bufp->f_bavail = 0;
bufp->f_files = 0;
bufp->f_ffree = fs32_to_cpu(infp, raw_sb->vs_ifree);
+ bufp->f_fsid = u64_to_fsid(id);
bufp->f_namelen = VXFS_NAMELEN;
return 0;
}
-static int vxfs_remount(struct super_block *sb, int *flags, char *data)
+static int vxfs_reconfigure(struct fs_context *fc)
{
- sync_filesystem(sb);
- *flags |= SB_RDONLY;
+ sync_filesystem(fc->root->d_sb);
+ fc->sb_flags |= SB_RDONLY;
return 0;
}
@@ -124,7 +102,7 @@ static struct inode *vxfs_alloc_inode(struct super_block *sb)
{
struct vxfs_inode_info *vi;
- vi = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL);
+ vi = alloc_inode_sb(sb, vxfs_inode_cachep, GFP_KERNEL);
if (!vi)
return NULL;
inode_init_once(&vi->vfs_inode);
@@ -142,24 +120,24 @@ static const struct super_operations vxfs_super_ops = {
.evict_inode = vxfs_evict_inode,
.put_super = vxfs_put_super,
.statfs = vxfs_statfs,
- .remount_fs = vxfs_remount,
};
-static int vxfs_try_sb_magic(struct super_block *sbp, int silent,
+static int vxfs_try_sb_magic(struct super_block *sbp, struct fs_context *fc,
unsigned blk, __fs32 magic)
{
struct buffer_head *bp;
struct vxfs_sb *rsbp;
struct vxfs_sb_info *infp = VXFS_SBI(sbp);
+ int silent = fc->sb_flags & SB_SILENT;
int rc = -ENOMEM;
bp = sb_bread(sbp, blk);
do {
if (!bp || !buffer_mapped(bp)) {
if (!silent) {
- printk(KERN_WARNING
- "vxfs: unable to read disk superblock at %u\n",
- blk);
+ warnf(fc,
+ "vxfs: unable to read disk superblock at %u",
+ blk);
}
break;
}
@@ -168,9 +146,9 @@ static int vxfs_try_sb_magic(struct super_block *sbp, int silent,
rsbp = (struct vxfs_sb *)bp->b_data;
if (rsbp->vs_magic != magic) {
if (!silent)
- printk(KERN_NOTICE
- "vxfs: WRONG superblock magic %08x at %u\n",
- rsbp->vs_magic, blk);
+ infof(fc,
+ "vxfs: WRONG superblock magic %08x at %u",
+ rsbp->vs_magic, blk);
break;
}
@@ -189,10 +167,9 @@ static int vxfs_try_sb_magic(struct super_block *sbp, int silent,
}
/**
- * vxfs_read_super - read superblock into memory and initialize filesystem
+ * vxfs_fill_super - read superblock into memory and initialize filesystem
* @sbp: VFS superblock (to fill)
- * @dp: fs private mount data
- * @silent: do not complain loudly when sth is wrong
+ * @fc: filesytem context
*
* Description:
* We are called on the first mount of a filesystem to read the
@@ -204,26 +181,27 @@ static int vxfs_try_sb_magic(struct super_block *sbp, int silent,
* Locking:
* We are under @sbp->s_lock.
*/
-static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
+static int vxfs_fill_super(struct super_block *sbp, struct fs_context *fc)
{
struct vxfs_sb_info *infp;
struct vxfs_sb *rsbp;
u_long bsize;
struct inode *root;
int ret = -EINVAL;
+ int silent = fc->sb_flags & SB_SILENT;
u32 j;
sbp->s_flags |= SB_RDONLY;
infp = kzalloc(sizeof(*infp), GFP_KERNEL);
if (!infp) {
- printk(KERN_WARNING "vxfs: unable to allocate incore superblock\n");
+ warnf(fc, "vxfs: unable to allocate incore superblock");
return -ENOMEM;
}
bsize = sb_min_blocksize(sbp, BLOCK_SIZE);
if (!bsize) {
- printk(KERN_WARNING "vxfs: unable to set blocksize\n");
+ warnf(fc, "vxfs: unable to set blocksize");
goto out;
}
@@ -232,24 +210,24 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
sbp->s_time_min = 0;
sbp->s_time_max = U32_MAX;
- if (!vxfs_try_sb_magic(sbp, silent, 1,
+ if (!vxfs_try_sb_magic(sbp, fc, 1,
(__force __fs32)cpu_to_le32(VXFS_SUPER_MAGIC))) {
/* Unixware, x86 */
infp->byte_order = VXFS_BO_LE;
- } else if (!vxfs_try_sb_magic(sbp, silent, 8,
+ } else if (!vxfs_try_sb_magic(sbp, fc, 8,
(__force __fs32)cpu_to_be32(VXFS_SUPER_MAGIC))) {
/* HP-UX, parisc */
infp->byte_order = VXFS_BO_BE;
} else {
if (!silent)
- printk(KERN_NOTICE "vxfs: can't find superblock.\n");
+ infof(fc, "vxfs: can't find superblock.");
goto out;
}
rsbp = infp->vsi_raw;
j = fs32_to_cpu(infp, rsbp->vs_version);
if ((j < 2 || j > 4) && !silent) {
- printk(KERN_NOTICE "vxfs: unsupported VxFS version (%d)\n", j);
+ infof(fc, "vxfs: unsupported VxFS version (%d)", j);
goto out;
}
@@ -266,17 +244,17 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
j = fs32_to_cpu(infp, rsbp->vs_bsize);
if (!sb_set_blocksize(sbp, j)) {
- printk(KERN_WARNING "vxfs: unable to set final block size\n");
+ warnf(fc, "vxfs: unable to set final block size");
goto out;
}
if (vxfs_read_olt(sbp, bsize)) {
- printk(KERN_WARNING "vxfs: unable to read olt\n");
+ warnf(fc, "vxfs: unable to read olt");
goto out;
}
if (vxfs_read_fshead(sbp)) {
- printk(KERN_WARNING "vxfs: unable to read fshead\n");
+ warnf(fc, "vxfs: unable to read fshead");
goto out;
}
@@ -287,7 +265,7 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
}
sbp->s_root = d_make_root(root);
if (!sbp->s_root) {
- printk(KERN_WARNING "vxfs: unable to get root dentry.\n");
+ warnf(fc, "vxfs: unable to get root dentry.");
goto out_free_ilist;
}
@@ -306,18 +284,29 @@ out:
/*
* The usual module blurb.
*/
-static struct dentry *vxfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int vxfs_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, vxfs_fill_super);
+}
+
+static const struct fs_context_operations vxfs_context_ops = {
+ .get_tree = vxfs_get_tree,
+ .reconfigure = vxfs_reconfigure,
+};
+
+static int vxfs_init_fs_context(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, vxfs_fill_super);
+ fc->ops = &vxfs_context_ops;
+
+ return 0;
}
static struct file_system_type vxfs_fs_type = {
.owner = THIS_MODULE,
.name = "vxfs",
- .mount = vxfs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = vxfs_init_fs_context,
};
MODULE_ALIAS_FS("vxfs"); /* makes mount -t vxfs autoload the module */
MODULE_ALIAS("vxfs");
@@ -329,7 +318,7 @@ vxfs_init(void)
vxfs_inode_cachep = kmem_cache_create_usercopy("vxfs_inode",
sizeof(struct vxfs_inode_info), 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
+ SLAB_RECLAIM_ACCOUNT,
offsetof(struct vxfs_inode_info, vii_immed.vi_immed),
sizeof_field(struct vxfs_inode_info,
vii_immed.vi_immed),
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 67f0e88eed01..6800886c4d10 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -14,6 +14,7 @@
* Additions for address_space-based writeback
*/
+#include <linux/sched/sysctl.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/spinlock.h>
@@ -32,11 +33,6 @@
#include "internal.h"
/*
- * 4MB minimal write chunk size
- */
-#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
-
-/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
struct wb_writeback_work {
@@ -65,7 +61,7 @@ struct wb_writeback_work {
* timestamps written to disk after 12 hours, but in the worst case a
* few inodes might not their timestamps updated for 24 hours.
*/
-unsigned int dirtytime_expire_interval = 12 * 60 * 60;
+static unsigned int dirtytime_expire_interval = 12 * 60 * 60;
static inline struct inode *wb_inode(struct list_head *head)
{
@@ -120,6 +116,8 @@ static bool inode_io_list_move_locked(struct inode *inode,
struct list_head *head)
{
assert_spin_locked(&wb->list_lock);
+ assert_spin_locked(&inode->i_lock);
+ WARN_ON_ONCE(inode_state_read(inode) & I_FREEING);
list_move(&inode->i_io_list, head);
@@ -133,14 +131,38 @@ static bool inode_io_list_move_locked(struct inode *inode,
static void wb_wakeup(struct bdi_writeback *wb)
{
- spin_lock_bh(&wb->work_lock);
+ spin_lock_irq(&wb->work_lock);
if (test_bit(WB_registered, &wb->state))
mod_delayed_work(bdi_wq, &wb->dwork, 0);
- spin_unlock_bh(&wb->work_lock);
+ spin_unlock_irq(&wb->work_lock);
}
-static void finish_writeback_work(struct bdi_writeback *wb,
- struct wb_writeback_work *work)
+/*
+ * This function is used when the first inode for this wb is marked dirty. It
+ * wakes-up the corresponding bdi thread which should then take care of the
+ * periodic background write-out of dirty inodes. Since the write-out would
+ * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
+ * set up a timer which wakes the bdi thread up later.
+ *
+ * Note, we wouldn't bother setting up the timer, but this function is on the
+ * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
+ * by delaying the wake-up.
+ *
+ * We have to be careful not to postpone flush work if it is scheduled for
+ * earlier. Thus we use queue_delayed_work().
+ */
+static void wb_wakeup_delayed(struct bdi_writeback *wb)
+{
+ unsigned long timeout;
+
+ timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
+ spin_lock_irq(&wb->work_lock);
+ if (test_bit(WB_registered, &wb->state))
+ queue_delayed_work(bdi_wq, &wb->dwork, timeout);
+ spin_unlock_irq(&wb->work_lock);
+}
+
+static void finish_writeback_work(struct wb_writeback_work *work)
{
struct wb_completion *done = work->done;
@@ -163,15 +185,28 @@ static void wb_queue_work(struct bdi_writeback *wb,
if (work->done)
atomic_inc(&work->done->cnt);
- spin_lock_bh(&wb->work_lock);
+ spin_lock_irq(&wb->work_lock);
if (test_bit(WB_registered, &wb->state)) {
list_add_tail(&work->list, &wb->work_list);
mod_delayed_work(bdi_wq, &wb->dwork, 0);
} else
- finish_writeback_work(wb, work);
+ finish_writeback_work(work);
- spin_unlock_bh(&wb->work_lock);
+ spin_unlock_irq(&wb->work_lock);
+}
+
+static bool wb_wait_for_completion_cb(struct wb_completion *done)
+{
+ unsigned long waited_secs = (jiffies - done->wait_start) / HZ;
+
+ done->progress_stamp = jiffies;
+ if (waited_secs > sysctl_hung_task_timeout_secs)
+ pr_info("INFO: The task %s:%d has been waiting for writeback "
+ "completion for more than %lu seconds.",
+ current->comm, current->pid, waited_secs);
+
+ return !atomic_read(&done->cnt);
}
/**
@@ -186,8 +221,9 @@ static void wb_queue_work(struct bdi_writeback *wb,
*/
void wb_wait_for_completion(struct wb_completion *done)
{
+ done->wait_start = jiffies;
atomic_dec(&done->cnt); /* put down the initial count */
- wait_event(*done->waitq, !atomic_read(&done->cnt));
+ wait_event(*done->waitq, wb_wait_for_completion_cb(done));
}
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -235,7 +271,7 @@ void wb_wait_for_completion(struct wb_completion *done)
static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
static struct workqueue_struct *isw_wq;
-void __inode_attach_wb(struct inode *inode, struct page *page)
+void __inode_attach_wb(struct inode *inode, struct folio *folio)
{
struct backing_dev_info *bdi = inode_to_bdi(inode);
struct bdi_writeback *wb = NULL;
@@ -243,8 +279,8 @@ void __inode_attach_wb(struct inode *inode, struct page *page)
if (inode_cgwb_enabled(inode)) {
struct cgroup_subsys_state *memcg_css;
- if (page) {
- memcg_css = mem_cgroup_css_from_page(page);
+ if (folio) {
+ memcg_css = mem_cgroup_css_from_folio(folio);
wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
} else {
/* must pin memcg_css, see wb_get_create() */
@@ -264,7 +300,6 @@ void __inode_attach_wb(struct inode *inode, struct page *page)
if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
wb_put(wb);
}
-EXPORT_SYMBOL_GPL(__inode_attach_wb);
/**
* inode_cgwb_move_to_attached - put the inode onto wb->b_attached list
@@ -279,8 +314,9 @@ static void inode_cgwb_move_to_attached(struct inode *inode,
{
assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
+ WARN_ON_ONCE(inode_state_read(inode) & I_FREEING);
- inode->i_state &= ~I_SYNC_QUEUED;
+ inode_state_clear(inode, I_SYNC_QUEUED);
if (wb != &wb->bdi->wb)
list_move(&inode->i_io_list, &wb->b_attached);
else
@@ -342,7 +378,8 @@ static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
}
struct inode_switch_wbs_context {
- struct rcu_work work;
+ /* List of queued switching contexts for the wb */
+ struct llist_node list;
/*
* Multiple inodes can be switched at once. The switching procedure
@@ -352,7 +389,6 @@ struct inode_switch_wbs_context {
* array embedded into struct inode_switch_wbs_context. Otherwise
* an inode could be left in a non-consistent state.
*/
- struct bdi_writeback *new_wb;
struct inode *inodes[];
};
@@ -372,7 +408,7 @@ static bool inode_do_switch_wbs(struct inode *inode,
{
struct address_space *mapping = inode->i_mapping;
XA_STATE(xas, &mapping->i_pages, 0);
- struct page *page;
+ struct folio *folio;
bool switched = false;
spin_lock(&inode->i_lock);
@@ -382,28 +418,30 @@ static bool inode_do_switch_wbs(struct inode *inode,
* Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction
* path owns the inode and we shouldn't modify ->i_io_list.
*/
- if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE)))
+ if (unlikely(inode_state_read(inode) & (I_FREEING | I_WILL_FREE)))
goto skip_switch;
trace_inode_switch_wbs(inode, old_wb, new_wb);
/*
* Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points
- * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
- * pages actually under writeback.
+ * to possibly dirty folios while PAGECACHE_TAG_WRITEBACK points to
+ * folios actually under writeback.
*/
- xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
- if (PageDirty(page)) {
- dec_wb_stat(old_wb, WB_RECLAIMABLE);
- inc_wb_stat(new_wb, WB_RECLAIMABLE);
+ xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
+ if (folio_test_dirty(folio)) {
+ long nr = folio_nr_pages(folio);
+ wb_stat_mod(old_wb, WB_RECLAIMABLE, -nr);
+ wb_stat_mod(new_wb, WB_RECLAIMABLE, nr);
}
}
xas_set(&xas, 0);
- xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
- WARN_ON_ONCE(!PageWriteback(page));
- dec_wb_stat(old_wb, WB_WRITEBACK);
- inc_wb_stat(new_wb, WB_WRITEBACK);
+ xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
+ long nr = folio_nr_pages(folio);
+ WARN_ON_ONCE(!folio_test_writeback(folio));
+ wb_stat_mod(old_wb, WB_WRITEBACK, -nr);
+ wb_stat_mod(new_wb, WB_WRITEBACK, nr);
}
if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
@@ -417,22 +455,23 @@ static bool inode_do_switch_wbs(struct inode *inode,
* Transfer to @new_wb's IO list if necessary. If the @inode is dirty,
* the specific list @inode was on is ignored and the @inode is put on
* ->b_dirty which is always correct including from ->b_dirty_time.
- * The transfer preserves @inode->dirtied_when ordering. If the @inode
- * was clean, it means it was on the b_attached list, so move it onto
- * the b_attached list of @new_wb.
+ * If the @inode was clean, it means it was on the b_attached list, so
+ * move it onto the b_attached list of @new_wb.
*/
if (!list_empty(&inode->i_io_list)) {
inode->i_wb = new_wb;
- if (inode->i_state & I_DIRTY_ALL) {
- struct inode *pos;
-
- list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
- if (time_after_eq(inode->dirtied_when,
- pos->dirtied_when))
- break;
+ if (inode_state_read(inode) & I_DIRTY_ALL) {
+ /*
+ * We need to keep b_dirty list sorted by
+ * dirtied_time_when. However properly sorting the
+ * inode in the list gets too expensive when switching
+ * many inodes. So just attach inode at the end of the
+ * dirty list and clobber the dirtied_time_when.
+ */
+ inode->dirtied_time_when = jiffies;
inode_io_list_move_locked(inode, new_wb,
- pos->i_io_list.prev);
+ &new_wb->b_dirty);
} else {
inode_cgwb_move_to_attached(inode, new_wb);
}
@@ -447,10 +486,11 @@ static bool inode_do_switch_wbs(struct inode *inode,
switched = true;
skip_switch:
/*
- * Paired with load_acquire in unlocked_inode_to_wb_begin() and
+ * Paired with an acquire fence in unlocked_inode_to_wb_begin() and
* ensures that the new wb is visible if they see !I_WB_SWITCH.
*/
- smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
+ smp_wmb();
+ inode_state_clear(inode, I_WB_SWITCH);
xa_unlock_irq(&mapping->i_pages);
spin_unlock(&inode->i_lock);
@@ -458,13 +498,11 @@ skip_switch:
return switched;
}
-static void inode_switch_wbs_work_fn(struct work_struct *work)
+static void process_inode_switch_wbs(struct bdi_writeback *new_wb,
+ struct inode_switch_wbs_context *isw)
{
- struct inode_switch_wbs_context *isw =
- container_of(to_rcu_work(work), struct inode_switch_wbs_context, work);
struct backing_dev_info *bdi = inode_to_bdi(isw->inodes[0]);
struct bdi_writeback *old_wb = isw->inodes[0]->i_wb;
- struct bdi_writeback *new_wb = isw->new_wb;
unsigned long nr_switched = 0;
struct inode **inodep;
@@ -474,6 +512,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
*/
down_read(&bdi->wb_switch_rwsem);
+ inodep = isw->inodes;
/*
* By the time control reaches here, RCU grace period has passed
* since I_WB_SWITCH assertion and all wb stat update transactions
@@ -484,6 +523,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
* gives us exclusion against all wb related operations on @inode
* including IO list manipulations and stat updates.
*/
+relock:
if (old_wb < new_wb) {
spin_lock(&old_wb->list_lock);
spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
@@ -492,10 +532,17 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
}
- for (inodep = isw->inodes; *inodep; inodep++) {
+ while (*inodep) {
WARN_ON_ONCE((*inodep)->i_wb != old_wb);
if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
nr_switched++;
+ inodep++;
+ if (*inodep && need_resched()) {
+ spin_unlock(&new_wb->list_lock);
+ spin_unlock(&old_wb->list_lock);
+ cond_resched();
+ goto relock;
+ }
}
spin_unlock(&new_wb->list_lock);
@@ -515,6 +562,38 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
atomic_dec(&isw_nr_in_flight);
}
+void inode_switch_wbs_work_fn(struct work_struct *work)
+{
+ struct bdi_writeback *new_wb = container_of(work, struct bdi_writeback,
+ switch_work);
+ struct inode_switch_wbs_context *isw, *next_isw;
+ struct llist_node *list;
+
+ /*
+ * Grab out reference to wb so that it cannot get freed under us
+ * after we process all the isw items.
+ */
+ wb_get(new_wb);
+ while (1) {
+ list = llist_del_all(&new_wb->switch_wbs_ctxs);
+ /* Nothing to do? */
+ if (!list)
+ break;
+ /*
+ * In addition to synchronizing among switchers, I_WB_SWITCH
+ * tells the RCU protected stat update paths to grab the i_page
+ * lock so that stat transfer can synchronize against them.
+ * Let's continue after I_WB_SWITCH is guaranteed to be
+ * visible.
+ */
+ synchronize_rcu();
+
+ llist_for_each_entry_safe(isw, next_isw, list, list)
+ process_inode_switch_wbs(new_wb, isw);
+ }
+ wb_put(new_wb);
+}
+
static bool inode_prepare_wbs_switch(struct inode *inode,
struct bdi_writeback *new_wb)
{
@@ -532,18 +611,25 @@ static bool inode_prepare_wbs_switch(struct inode *inode,
/* while holding I_WB_SWITCH, no one else can update the association */
spin_lock(&inode->i_lock);
if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
- inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
+ inode_state_read(inode) & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
inode_to_wb(inode) == new_wb) {
spin_unlock(&inode->i_lock);
return false;
}
- inode->i_state |= I_WB_SWITCH;
+ inode_state_set(inode, I_WB_SWITCH);
__iget(inode);
spin_unlock(&inode->i_lock);
return true;
}
+static void wb_queue_isw(struct bdi_writeback *wb,
+ struct inode_switch_wbs_context *isw)
+{
+ if (llist_add(&isw->list, &wb->switch_wbs_ctxs))
+ queue_work(isw_wq, &wb->switch_work);
+}
+
/**
* inode_switch_wbs - change the wb association of an inode
* @inode: target inode
@@ -557,9 +643,10 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
struct backing_dev_info *bdi = inode_to_bdi(inode);
struct cgroup_subsys_state *memcg_css;
struct inode_switch_wbs_context *isw;
+ struct bdi_writeback *new_wb = NULL;
/* noop if seems to be already in progress */
- if (inode->i_state & I_WB_SWITCH)
+ if (inode_state_read_once(inode) & I_WB_SWITCH)
return;
/* avoid queueing a new switch if too many are already in flight */
@@ -581,33 +668,46 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
if (!memcg_css)
goto out_free;
- isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
+ new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
css_put(memcg_css);
- if (!isw->new_wb)
+ if (!new_wb)
goto out_free;
- if (!inode_prepare_wbs_switch(inode, isw->new_wb))
+ if (!inode_prepare_wbs_switch(inode, new_wb))
goto out_free;
isw->inodes[0] = inode;
- /*
- * In addition to synchronizing among switchers, I_WB_SWITCH tells
- * the RCU protected stat update paths to grab the i_page
- * lock so that stat transfer can synchronize against them.
- * Let's continue after I_WB_SWITCH is guaranteed to be visible.
- */
- INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
- queue_rcu_work(isw_wq, &isw->work);
+ trace_inode_switch_wbs_queue(inode->i_wb, new_wb, 1);
+ wb_queue_isw(new_wb, isw);
return;
out_free:
atomic_dec(&isw_nr_in_flight);
- if (isw->new_wb)
- wb_put(isw->new_wb);
+ if (new_wb)
+ wb_put(new_wb);
kfree(isw);
}
+static bool isw_prepare_wbs_switch(struct bdi_writeback *new_wb,
+ struct inode_switch_wbs_context *isw,
+ struct list_head *list, int *nr)
+{
+ struct inode *inode;
+
+ list_for_each_entry(inode, list, i_io_list) {
+ if (!inode_prepare_wbs_switch(inode, new_wb))
+ continue;
+
+ isw->inodes[*nr] = inode;
+ (*nr)++;
+
+ if (*nr >= WB_MAX_INODES_PER_ISW - 1)
+ return true;
+ }
+ return false;
+}
+
/**
* cleanup_offline_cgwb - detach associated inodes
* @wb: target wb
@@ -620,7 +720,7 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
{
struct cgroup_subsys_state *memcg_css;
struct inode_switch_wbs_context *isw;
- struct inode *inode;
+ struct bdi_writeback *new_wb;
int nr;
bool restart = false;
@@ -633,44 +733,39 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
for (memcg_css = wb->memcg_css->parent; memcg_css;
memcg_css = memcg_css->parent) {
- isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
- if (isw->new_wb)
+ new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
+ if (new_wb)
break;
}
- if (unlikely(!isw->new_wb))
- isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
+ if (unlikely(!new_wb))
+ new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
nr = 0;
spin_lock(&wb->list_lock);
- list_for_each_entry(inode, &wb->b_attached, i_io_list) {
- if (!inode_prepare_wbs_switch(inode, isw->new_wb))
- continue;
-
- isw->inodes[nr++] = inode;
-
- if (nr >= WB_MAX_INODES_PER_ISW - 1) {
- restart = true;
- break;
- }
- }
+ /*
+ * In addition to the inodes that have completed writeback, also switch
+ * cgwbs for those inodes only with dirty timestamps. Otherwise, those
+ * inodes won't be written back for a long time when lazytime is
+ * enabled, and thus pinning the dying cgwbs. It won't break the
+ * bandwidth restrictions, as writeback of inode metadata is not
+ * accounted for.
+ */
+ restart = isw_prepare_wbs_switch(new_wb, isw, &wb->b_attached, &nr);
+ if (!restart)
+ restart = isw_prepare_wbs_switch(new_wb, isw, &wb->b_dirty_time,
+ &nr);
spin_unlock(&wb->list_lock);
/* no attached inodes? bail out */
if (nr == 0) {
atomic_dec(&isw_nr_in_flight);
- wb_put(isw->new_wb);
+ wb_put(new_wb);
kfree(isw);
return restart;
}
- /*
- * In addition to synchronizing among switchers, I_WB_SWITCH tells
- * the RCU protected stat update paths to grab the i_page
- * lock so that stat transfer can synchronize against them.
- * Let's continue after I_WB_SWITCH is guaranteed to be visible.
- */
- INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
- queue_rcu_work(isw_wq, &isw->work);
+ trace_inode_switch_wbs_queue(wb, new_wb, nr);
+ wb_queue_isw(new_wb, isw);
return restart;
}
@@ -685,8 +780,9 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
* writeback completion, wbc_detach_inode() should be called. This is used
* to track the cgroup writeback context.
*/
-void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
- struct inode *inode)
+static void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+ struct inode *inode)
+ __releases(&inode->i_lock)
{
if (!inode_cgwb_enabled(inode)) {
spin_unlock(&inode->i_lock);
@@ -716,7 +812,24 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
inode_switch_wbs(inode, wbc->wb_id);
}
-EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
+
+/**
+ * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
+ * @wbc: writeback_control of interest
+ * @inode: target inode
+ *
+ * This function is to be used by filemap_writeback(), which is an alternative
+ * entry point into writeback code, and first ensures @inode is associated with
+ * a bdi_writeback and attaches it to @wbc.
+ */
+void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
+ struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ inode_attach_wb(inode, NULL);
+ wbc_attach_and_unlock_inode(wbc, inode);
+}
+EXPORT_SYMBOL_GPL(wbc_attach_fdatawrite_inode);
/**
* wbc_detach_inode - disassociate wbc from inode and perform foreign detection
@@ -736,7 +849,7 @@ EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
* incorrectly attributed).
*
* To resolve this issue, cgroup writeback detects the majority dirtier of
- * an inode and transfers the ownership to it. To avoid unnnecessary
+ * an inode and transfers the ownership to it. To avoid unnecessary
* oscillation, the detection mechanism keeps track of history and gives
* out the switch verdict only if the foreign usage pattern is stable over
* a certain amount of time and/or writeback attempts.
@@ -824,7 +937,7 @@ void wbc_detach_inode(struct writeback_control *wbc)
* is okay. The main goal is avoiding keeping an inode on
* the wrong wb for an extended period of time.
*/
- if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
+ if (hweight16(history) > WB_FRN_HIST_THR_SLOTS)
inode_switch_wbs(inode, max_id);
}
@@ -844,14 +957,14 @@ EXPORT_SYMBOL_GPL(wbc_detach_inode);
/**
* wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
* @wbc: writeback_control of the writeback in progress
- * @page: page being written out
+ * @folio: folio being written out
* @bytes: number of bytes being written out
*
- * @bytes from @page are about to written out during the writeback
+ * @bytes from @folio are about to written out during the writeback
* controlled by @wbc. Keep the book for foreign inode detection. See
* wbc_detach_inode().
*/
-void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
+void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio,
size_t bytes)
{
struct cgroup_subsys_state *css;
@@ -866,7 +979,7 @@ void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
if (!wbc->wb || wbc->no_cgroup_owner)
return;
- css = mem_cgroup_css_from_page(page);
+ css = mem_cgroup_css_from_folio(folio);
/* dead cgroups shouldn't contribute to inode ownership arbitration */
if (!(css->flags & CSS_ONLINE))
return;
@@ -892,43 +1005,6 @@ void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner);
/**
- * inode_congested - test whether an inode is congested
- * @inode: inode to test for congestion (may be NULL)
- * @cong_bits: mask of WB_[a]sync_congested bits to test
- *
- * Tests whether @inode is congested. @cong_bits is the mask of congestion
- * bits to test and the return value is the mask of set bits.
- *
- * If cgroup writeback is enabled for @inode, the congestion state is
- * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
- * associated with @inode is congested; otherwise, the root wb's congestion
- * state is used.
- *
- * @inode is allowed to be NULL as this function is often called on
- * mapping->host which is NULL for the swapper space.
- */
-int inode_congested(struct inode *inode, int cong_bits)
-{
- /*
- * Once set, ->i_wb never becomes NULL while the inode is alive.
- * Start transaction iff ->i_wb is visible.
- */
- if (inode && inode_to_wb_is_valid(inode)) {
- struct bdi_writeback *wb;
- struct wb_lock_cookie lock_cookie = {};
- bool congested;
-
- wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
- congested = wb_congested(wb, cong_bits);
- unlocked_inode_to_wb_end(inode, &lock_cookie);
- return congested;
- }
-
- return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
-}
-EXPORT_SYMBOL_GPL(inode_congested);
-
-/**
* wb_split_bdi_pages - split nr_pages to write according to bandwidth
* @wb: target bdi_writeback to split @nr_pages to
* @nr_pages: number of pages to write for the whole bdi
@@ -1008,6 +1084,16 @@ restart:
continue;
}
+ /*
+ * If wb_tryget fails, the wb has been shutdown, skip it.
+ *
+ * Pin @wb so that it stays on @bdi->wb_list. This allows
+ * continuing iteration from @wb after dropping and
+ * regrabbing rcu read lock.
+ */
+ if (!wb_tryget(wb))
+ continue;
+
/* alloc failed, execute synchronously using on-stack fallback */
work = &fallback_work;
*work = *base_work;
@@ -1016,13 +1102,6 @@ restart:
work->done = &fallback_work_done;
wb_queue_work(wb, work);
-
- /*
- * Pin @wb so that it stays on @bdi->wb_list. This allows
- * continuing iteration from @wb after dropping and
- * regrabbing rcu read lock.
- */
- wb_get(wb);
last_wb = wb;
rcu_read_unlock();
@@ -1094,7 +1173,7 @@ int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
dirty = dirty * 10 / 8;
/* issue the writeback work */
- work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
+ work = kzalloc(sizeof(*work), GFP_NOWAIT);
if (work) {
work->nr_pages = dirty;
work->sync_mode = WB_SYNC_NONE;
@@ -1118,6 +1197,7 @@ out_bdi_put:
/**
* cgroup_writeback_umount - flush inode wb switches for umount
+ * @sb: target super_block
*
* This function is called when a super_block is about to be destroyed and
* flushes in-flight inode wb switches. An inode wb switch goes through
@@ -1126,8 +1206,12 @@ out_bdi_put:
* rare occurrences and synchronize_rcu() can take a while, perform
* flushing iff wb switches are in flight.
*/
-void cgroup_writeback_umount(void)
+void cgroup_writeback_umount(struct super_block *sb)
{
+
+ if (!(sb->s_bdi->capabilities & BDI_CAP_WRITEBACK))
+ return;
+
/*
* SB_ACTIVE should be reliably cleared before checking
* isw_nr_in_flight, see generic_shutdown_super().
@@ -1146,7 +1230,7 @@ void cgroup_writeback_umount(void)
static int __init cgroup_writeback_init(void)
{
- isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
+ isw_wq = alloc_workqueue("inode_switch_wbs", WQ_PERCPU, 0);
if (!isw_wq)
return -ENOMEM;
return 0;
@@ -1163,8 +1247,9 @@ static void inode_cgwb_move_to_attached(struct inode *inode,
{
assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
+ WARN_ON_ONCE(inode_state_read(inode) & I_FREEING);
- inode->i_state &= ~I_SYNC_QUEUED;
+ inode_state_clear(inode, I_SYNC_QUEUED);
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
}
@@ -1207,6 +1292,13 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
}
}
+static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+ struct inode *inode)
+ __releases(&inode->i_lock)
+{
+ spin_unlock(&inode->i_lock);
+}
+
#endif /* CONFIG_CGROUP_WRITEBACK */
/*
@@ -1267,10 +1359,17 @@ void inode_io_list_del(struct inode *inode)
{
struct bdi_writeback *wb;
+ /*
+ * FIXME: ext4 can call here from ext4_evict_inode() after evict() already
+ * unlinked the inode.
+ */
+ if (list_empty_careful(&inode->i_io_list))
+ return;
+
wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
- inode->i_state &= ~I_SYNC_QUEUED;
+ inode_state_clear(inode, I_SYNC_QUEUED);
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
@@ -1328,6 +1427,17 @@ static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
{
assert_spin_locked(&inode->i_lock);
+ inode_state_clear(inode, I_SYNC_QUEUED);
+ /*
+ * When the inode is being freed just don't bother with dirty list
+ * tracking. Flush worker will ignore this inode anyway and it will
+ * trigger assertions in inode_io_list_move_locked().
+ */
+ if (inode_state_read(inode) & I_FREEING) {
+ list_del_init(&inode->i_io_list);
+ wb_io_lists_depopulated(wb);
+ return;
+ }
if (!list_empty(&wb->b_dirty)) {
struct inode *tail;
@@ -1336,7 +1446,6 @@ static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
inode->dirtied_when = jiffies;
}
inode_io_list_move_locked(inode, wb, &wb->b_dirty);
- inode->i_state &= ~I_SYNC_QUEUED;
}
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
@@ -1356,12 +1465,13 @@ static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
static void inode_sync_complete(struct inode *inode)
{
- inode->i_state &= ~I_SYNC;
+ assert_spin_locked(&inode->i_lock);
+
+ inode_state_clear(inode, I_SYNC);
/* If inode is clean an unused, put it into LRU now... */
- inode_add_lru(inode);
- /* Waiters must see I_SYNC cleared before being woken up */
- smp_mb();
- wake_up_bit(&inode->i_state, __I_SYNC);
+ inode_lru_list_add(inode);
+ /* Called with inode->i_lock which ensures memory ordering. */
+ inode_wake_up_bit(inode, __I_SYNC);
}
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
@@ -1379,8 +1489,6 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
return ret;
}
-#define EXPIRE_DIRTY_ATIME 0x0001
-
/*
* Move expired (dirtied before dirtied_before) dirty inodes from
* @delaying_queue to @dispatch_queue.
@@ -1400,10 +1508,10 @@ static int move_expired_inodes(struct list_head *delaying_queue,
inode = wb_inode(delaying_queue->prev);
if (inode_dirtied_after(inode, dirtied_before))
break;
+ spin_lock(&inode->i_lock);
list_move(&inode->i_io_list, &tmp);
moved++;
- spin_lock(&inode->i_lock);
- inode->i_state |= I_SYNC_QUEUED;
+ inode_state_set(inode, I_SYNC_QUEUED);
spin_unlock(&inode->i_lock);
if (sb_is_blkdev_sb(inode->i_sb))
continue;
@@ -1418,7 +1526,12 @@ static int move_expired_inodes(struct list_head *delaying_queue,
goto out;
}
- /* Move inodes from one superblock together */
+ /*
+ * Although inode's i_io_list is moved from 'tmp' to 'dispatch_queue',
+ * we don't take inode->i_lock here because it is just a pointless overhead.
+ * Inode is already marked as I_SYNC_QUEUED so writeback list handling is
+ * fully under our control.
+ */
while (!list_empty(&tmp)) {
sb = wb_inode(tmp.prev)->i_sb;
list_for_each_prev_safe(pos, node, &tmp) {
@@ -1477,30 +1590,27 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
* Wait for writeback on an inode to complete. Called with i_lock held.
* Caller must make sure inode cannot go away when we drop i_lock.
*/
-static void __inode_wait_for_writeback(struct inode *inode)
- __releases(inode->i_lock)
- __acquires(inode->i_lock)
+void inode_wait_for_writeback(struct inode *inode)
{
- DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
- wait_queue_head_t *wqh;
+ struct wait_bit_queue_entry wqe;
+ struct wait_queue_head *wq_head;
+
+ assert_spin_locked(&inode->i_lock);
- wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
- while (inode->i_state & I_SYNC) {
+ if (!(inode_state_read(inode) & I_SYNC))
+ return;
+
+ wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC);
+ for (;;) {
+ prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
+ /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */
+ if (!(inode_state_read(inode) & I_SYNC))
+ break;
spin_unlock(&inode->i_lock);
- __wait_on_bit(wqh, &wq, bit_wait,
- TASK_UNINTERRUPTIBLE);
+ schedule();
spin_lock(&inode->i_lock);
}
-}
-
-/*
- * Wait for writeback on an inode to complete. Caller must have inode pinned.
- */
-void inode_wait_for_writeback(struct inode *inode)
-{
- spin_lock(&inode->i_lock);
- __inode_wait_for_writeback(inode);
- spin_unlock(&inode->i_lock);
+ finish_wait(wq_head, &wqe.wq_entry);
}
/*
@@ -1511,16 +1621,20 @@ void inode_wait_for_writeback(struct inode *inode)
static void inode_sleep_on_writeback(struct inode *inode)
__releases(inode->i_lock)
{
- DEFINE_WAIT(wait);
- wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
- int sleep;
+ struct wait_bit_queue_entry wqe;
+ struct wait_queue_head *wq_head;
+ bool sleep;
+
+ assert_spin_locked(&inode->i_lock);
- prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
- sleep = inode->i_state & I_SYNC;
+ wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC);
+ prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
+ /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */
+ sleep = !!(inode_state_read(inode) & I_SYNC);
spin_unlock(&inode->i_lock);
if (sleep)
schedule();
- finish_wait(wqh, &wait);
+ finish_wait(wq_head, &wqe.wq_entry);
}
/*
@@ -1532,9 +1646,10 @@ static void inode_sleep_on_writeback(struct inode *inode)
* thread's back can have unexpected consequences.
*/
static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
- struct writeback_control *wbc)
+ struct writeback_control *wbc,
+ unsigned long dirtied_before)
{
- if (inode->i_state & I_FREEING)
+ if (inode_state_read(inode) & I_FREEING)
return;
/*
@@ -1542,16 +1657,21 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* shot. If still dirty, it will be redirty_tail()'ed below. Update
* the dirty time to prevent enqueue and sync it again.
*/
- if ((inode->i_state & I_DIRTY) &&
+ if ((inode_state_read(inode) & I_DIRTY) &&
(wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
inode->dirtied_when = jiffies;
if (wbc->pages_skipped) {
/*
- * writeback is not making progress due to locked
- * buffers. Skip this inode for now.
+ * Writeback is not making progress due to locked buffers.
+ * Skip this inode for now. Although having skipped pages
+ * is odd for clean inodes, it can happen for some
+ * filesystems so handle that gracefully.
*/
- redirty_tail_locked(inode, wb);
+ if (inode_state_read(inode) & I_DIRTY_ALL)
+ redirty_tail_locked(inode, wb);
+ else
+ inode_cgwb_move_to_attached(inode, wb);
return;
}
@@ -1560,7 +1680,8 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* We didn't write back all the pages. nfs_writepages()
* sometimes bales out without doing anything.
*/
- if (wbc->nr_to_write <= 0) {
+ if (wbc->nr_to_write <= 0 &&
+ !inode_dirtied_after(inode, dirtied_before)) {
/* Slice used up. Queue for next turn. */
requeue_io(inode, wb);
} else {
@@ -1573,17 +1694,17 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
*/
redirty_tail_locked(inode, wb);
}
- } else if (inode->i_state & I_DIRTY) {
+ } else if (inode_state_read(inode) & I_DIRTY) {
/*
* Filesystems can dirty the inode during writeback operations,
* such as delayed allocation during submission or metadata
* updates after data IO completion.
*/
redirty_tail_locked(inode, wb);
- } else if (inode->i_state & I_DIRTY_TIME) {
+ } else if (inode_state_read(inode) & I_DIRTY_TIME) {
inode->dirtied_when = jiffies;
inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
- inode->i_state &= ~I_SYNC_QUEUED;
+ inode_state_clear(inode, I_SYNC_QUEUED);
} else {
/* The inode is clean. Remove from writeback lists. */
inode_cgwb_move_to_attached(inode, wb);
@@ -1609,7 +1730,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
unsigned dirty;
int ret;
- WARN_ON(!(inode->i_state & I_SYNC));
+ WARN_ON(!(inode_state_read_once(inode) & I_SYNC));
trace_writeback_single_inode_start(inode, wbc, nr_to_write);
@@ -1633,7 +1754,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* mark_inode_dirty_sync() to notify the filesystem about it and to
* change I_DIRTY_TIME into I_DIRTY_SYNC.
*/
- if ((inode->i_state & I_DIRTY_TIME) &&
+ if ((inode_state_read_once(inode) & I_DIRTY_TIME) &&
(wbc->sync_mode == WB_SYNC_ALL ||
time_after(jiffies, inode->dirtied_time_when +
dirtytime_expire_interval * HZ))) {
@@ -1648,8 +1769,8 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* after handling timestamp expiration, as that may dirty the inode too.
*/
spin_lock(&inode->i_lock);
- dirty = inode->i_state & I_DIRTY;
- inode->i_state &= ~dirty;
+ dirty = inode_state_read(inode) & I_DIRTY;
+ inode_state_clear(inode, dirty);
/*
* Paired with smp_mb() in __mark_inode_dirty(). This allows
@@ -1665,7 +1786,14 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
smp_mb();
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
- inode->i_state |= I_DIRTY_PAGES;
+ inode_state_set(inode, I_DIRTY_PAGES);
+ else if (unlikely(inode_state_read(inode) & I_PINNING_NETFS_WB)) {
+ if (!(inode_state_read(inode) & I_DIRTY_PAGES)) {
+ inode_state_clear(inode, I_PINNING_NETFS_WB);
+ wbc->unpinned_netfs_wb = true;
+ dirty |= I_PINNING_NETFS_WB; /* Cause write_inode */
+ }
+ }
spin_unlock(&inode->i_lock);
@@ -1675,6 +1803,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
if (ret == 0)
ret = err;
}
+ wbc->unpinned_netfs_wb = false;
trace_writeback_single_inode(inode, wbc, nr_to_write);
return ret;
}
@@ -1695,12 +1824,12 @@ static int writeback_single_inode(struct inode *inode,
int ret = 0;
spin_lock(&inode->i_lock);
- if (!atomic_read(&inode->i_count))
- WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
+ if (!icount_read(inode))
+ WARN_ON(!(inode_state_read(inode) & (I_WILL_FREE | I_FREEING)));
else
- WARN_ON(inode->i_state & I_WILL_FREE);
+ WARN_ON(inode_state_read(inode) & I_WILL_FREE);
- if (inode->i_state & I_SYNC) {
+ if (inode_state_read(inode) & I_SYNC) {
/*
* Writeback is already running on the inode. For WB_SYNC_NONE,
* that's enough and we can just return. For WB_SYNC_ALL, we
@@ -1709,9 +1838,9 @@ static int writeback_single_inode(struct inode *inode,
*/
if (wbc->sync_mode != WB_SYNC_ALL)
goto out;
- __inode_wait_for_writeback(inode);
+ inode_wait_for_writeback(inode);
}
- WARN_ON(inode->i_state & I_SYNC);
+ WARN_ON(inode_state_read(inode) & I_SYNC);
/*
* If the inode is already fully clean, then there's nothing to do.
*
@@ -1719,11 +1848,11 @@ static int writeback_single_inode(struct inode *inode,
* still under writeback, e.g. due to prior WB_SYNC_NONE writeback. If
* there are any such pages, we'll need to wait for them.
*/
- if (!(inode->i_state & I_DIRTY_ALL) &&
+ if (!(inode_state_read(inode) & I_DIRTY_ALL) &&
(wbc->sync_mode != WB_SYNC_ALL ||
!mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
goto out;
- inode->i_state |= I_SYNC;
+ inode_state_set(inode, I_SYNC);
wbc_attach_and_unlock_inode(wbc, inode);
ret = __writeback_single_inode(inode, wbc);
@@ -1733,12 +1862,29 @@ static int writeback_single_inode(struct inode *inode,
wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
/*
- * If the inode is now fully clean, then it can be safely removed from
- * its writeback list (if any). Otherwise the flusher threads are
- * responsible for the writeback lists.
+ * If the inode is freeing, its i_io_list shoudn't be updated
+ * as it can be finally deleted at this moment.
*/
- if (!(inode->i_state & I_DIRTY_ALL))
- inode_cgwb_move_to_attached(inode, wb);
+ if (!(inode_state_read(inode) & I_FREEING)) {
+ /*
+ * If the inode is now fully clean, then it can be safely
+ * removed from its writeback list (if any). Otherwise the
+ * flusher threads are responsible for the writeback lists.
+ */
+ if (!(inode_state_read(inode) & I_DIRTY_ALL))
+ inode_cgwb_move_to_attached(inode, wb);
+ else if (!(inode_state_read(inode) & I_SYNC_QUEUED)) {
+ if ((inode_state_read(inode) & I_DIRTY))
+ redirty_tail_locked(inode, wb);
+ else if (inode_state_read(inode) & I_DIRTY_TIME) {
+ inode->dirtied_when = jiffies;
+ inode_io_list_move_locked(inode,
+ wb,
+ &wb->b_dirty_time);
+ }
+ }
+ }
+
spin_unlock(&wb->list_lock);
inode_sync_complete(inode);
out:
@@ -1746,8 +1892,8 @@ out:
return ret;
}
-static long writeback_chunk_size(struct bdi_writeback *wb,
- struct wb_writeback_work *work)
+static long writeback_chunk_size(struct super_block *sb,
+ struct bdi_writeback *wb, struct wb_writeback_work *work)
{
long pages;
@@ -1765,16 +1911,13 @@ static long writeback_chunk_size(struct bdi_writeback *wb,
* (maybe slowly) sync all tagged pages
*/
if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
- pages = LONG_MAX;
- else {
- pages = min(wb->avg_write_bandwidth / 2,
- global_wb_domain.dirty_limit / DIRTY_SCOPE);
- pages = min(pages, work->nr_pages);
- pages = round_down(pages + MIN_WRITEBACK_PAGES,
- MIN_WRITEBACK_PAGES);
- }
+ return LONG_MAX;
- return pages;
+ pages = min(wb->avg_write_bandwidth / 2,
+ global_wb_domain.dirty_limit / DIRTY_SCOPE);
+ pages = min(pages, work->nr_pages);
+ return round_down(pages + sb->s_min_writeback_pages,
+ sb->s_min_writeback_pages);
}
/*
@@ -1802,11 +1945,17 @@ static long writeback_sb_inodes(struct super_block *sb,
};
unsigned long start_time = jiffies;
long write_chunk;
- long wrote = 0; /* count both pages and inodes */
+ long total_wrote = 0; /* count both pages and inodes */
+ unsigned long dirtied_before = jiffies;
+
+ if (work->for_kupdate)
+ dirtied_before = jiffies -
+ msecs_to_jiffies(dirty_expire_interval * 10);
while (!list_empty(&wb->b_io)) {
struct inode *inode = wb_inode(wb->b_io.prev);
struct bdi_writeback *tmp_wb;
+ long wrote;
if (inode->i_sb != sb) {
if (work->sb) {
@@ -1833,12 +1982,12 @@ static long writeback_sb_inodes(struct super_block *sb,
* kind writeout is handled by the freer.
*/
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ if (inode_state_read(inode) & (I_NEW | I_FREEING | I_WILL_FREE)) {
redirty_tail_locked(inode, wb);
spin_unlock(&inode->i_lock);
continue;
}
- if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
+ if ((inode_state_read(inode) & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
/*
* If this inode is locked for writeback and we are not
* doing writeback-for-data-integrity, move it to
@@ -1848,8 +1997,8 @@ static long writeback_sb_inodes(struct super_block *sb,
* We'll have another go at writing back this inode
* when we completed a full scan of b_io.
*/
- spin_unlock(&inode->i_lock);
requeue_io(inode, wb);
+ spin_unlock(&inode->i_lock);
trace_writeback_sb_inodes_requeue(inode);
continue;
}
@@ -1860,17 +2009,17 @@ static long writeback_sb_inodes(struct super_block *sb,
* are doing WB_SYNC_NONE writeback. So this catches only the
* WB_SYNC_ALL case.
*/
- if (inode->i_state & I_SYNC) {
+ if (inode_state_read(inode) & I_SYNC) {
/* Wait for I_SYNC. This function drops i_lock... */
inode_sleep_on_writeback(inode);
/* Inode may be gone, start again */
spin_lock(&wb->list_lock);
continue;
}
- inode->i_state |= I_SYNC;
+ inode_state_set(inode, I_SYNC);
wbc_attach_and_unlock_inode(&wbc, inode);
- write_chunk = writeback_chunk_size(wb, work);
+ write_chunk = writeback_chunk_size(inode->i_sb, wb, work);
wbc.nr_to_write = write_chunk;
wbc.pages_skipped = 0;
@@ -1880,9 +2029,17 @@ static long writeback_sb_inodes(struct super_block *sb,
*/
__writeback_single_inode(inode, &wbc);
+ /* Report progress to inform the hung task detector of the progress. */
+ if (work->done && work->done->progress_stamp &&
+ (jiffies - work->done->progress_stamp) > HZ *
+ sysctl_hung_task_timeout_secs / 2)
+ wake_up_all(work->done->waitq);
+
wbc_detach_inode(&wbc);
work->nr_pages -= write_chunk - wbc.nr_to_write;
- wrote += write_chunk - wbc.nr_to_write;
+ wrote = write_chunk - wbc.nr_to_write - wbc.pages_skipped;
+ wrote = wrote < 0 ? 0 : wrote;
+ total_wrote += wrote;
if (need_resched()) {
/*
@@ -1893,8 +2050,7 @@ static long writeback_sb_inodes(struct super_block *sb,
* unplug, so get our IOs out the door before we
* give up the CPU.
*/
- if (current->plug)
- blk_flush_plug(current->plug, false);
+ blk_flush_plug(current->plug, false);
cond_resched();
}
@@ -1904,9 +2060,9 @@ static long writeback_sb_inodes(struct super_block *sb,
*/
tmp_wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
- if (!(inode->i_state & I_DIRTY_ALL))
- wrote++;
- requeue_inode(inode, tmp_wb, &wbc);
+ if (!(inode_state_read(inode) & I_DIRTY_ALL))
+ total_wrote++;
+ requeue_inode(inode, tmp_wb, &wbc, dirtied_before);
inode_sync_complete(inode);
spin_unlock(&inode->i_lock);
@@ -1919,14 +2075,14 @@ static long writeback_sb_inodes(struct super_block *sb,
* bail out to wb_writeback() often enough to check
* background threshold and other termination conditions.
*/
- if (wrote) {
+ if (total_wrote) {
if (time_is_before_jiffies(start_time + HZ / 10UL))
break;
if (work->nr_pages <= 0)
break;
}
}
- return wrote;
+ return total_wrote;
}
static long __writeback_inodes_wb(struct bdi_writeback *wb,
@@ -1939,9 +2095,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
struct inode *inode = wb_inode(wb->b_io.prev);
struct super_block *sb = inode->i_sb;
- if (!trylock_super(sb)) {
+ if (!super_trylock_shared(sb)) {
/*
- * trylock_super() may fail consistently due to
+ * super_trylock_shared() may fail consistently due to
* s_umount being grabbed by someone else. Don't use
* requeue_io() to avoid busy retrying the inode/sb.
*/
@@ -2008,9 +2164,9 @@ static long wb_writeback(struct bdi_writeback *wb,
struct inode *inode;
long progress;
struct blk_plug plug;
+ bool queued = false;
blk_start_plug(&plug);
- spin_lock(&wb->list_lock);
for (;;) {
/*
* Stop writeback when nr_pages has been consumed
@@ -2035,21 +2191,27 @@ static long wb_writeback(struct bdi_writeback *wb,
if (work->for_background && !wb_over_bg_thresh(wb))
break;
- /*
- * Kupdate and background works are special and we want to
- * include all inodes that need writing. Livelock avoidance is
- * handled by these works yielding to any other work so we are
- * safe.
- */
- if (work->for_kupdate) {
- dirtied_before = jiffies -
- msecs_to_jiffies(dirty_expire_interval * 10);
- } else if (work->for_background)
- dirtied_before = jiffies;
+
+ spin_lock(&wb->list_lock);
trace_writeback_start(wb, work);
- if (list_empty(&wb->b_io))
+ if (list_empty(&wb->b_io)) {
+ /*
+ * Kupdate and background works are special and we want
+ * to include all inodes that need writing. Livelock
+ * avoidance is handled by these works yielding to any
+ * other work so we are safe.
+ */
+ if (work->for_kupdate) {
+ dirtied_before = jiffies -
+ msecs_to_jiffies(dirty_expire_interval *
+ 10);
+ } else if (work->for_background)
+ dirtied_before = jiffies;
+
queue_io(wb, work, dirtied_before);
+ queued = true;
+ }
if (work->sb)
progress = writeback_sb_inodes(work->sb, wb, work);
else
@@ -2064,13 +2226,19 @@ static long wb_writeback(struct bdi_writeback *wb,
* mean the overall work is done. So we keep looping as long
* as made some progress on cleaning pages or inodes.
*/
- if (progress)
+ if (progress || !queued) {
+ spin_unlock(&wb->list_lock);
continue;
+ }
+
/*
* No more inodes for IO, bail
*/
- if (list_empty(&wb->b_more_io))
+ if (list_empty(&wb->b_more_io)) {
+ spin_unlock(&wb->list_lock);
break;
+ }
+
/*
* Nothing written. Wait for some inode to
* become available for writeback. Otherwise
@@ -2082,9 +2250,7 @@ static long wb_writeback(struct bdi_writeback *wb,
spin_unlock(&wb->list_lock);
/* This function drops i_lock... */
inode_sleep_on_writeback(inode);
- spin_lock(&wb->list_lock);
}
- spin_unlock(&wb->list_lock);
blk_finish_plug(&plug);
return nr_pages - work->nr_pages;
@@ -2097,13 +2263,13 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
{
struct wb_writeback_work *work = NULL;
- spin_lock_bh(&wb->work_lock);
+ spin_lock_irq(&wb->work_lock);
if (!list_empty(&wb->work_list)) {
work = list_entry(wb->work_list.next,
struct wb_writeback_work, list);
list_del_init(&work->list);
}
- spin_unlock_bh(&wb->work_lock);
+ spin_unlock_irq(&wb->work_lock);
return work;
}
@@ -2195,7 +2361,7 @@ static long wb_do_writeback(struct bdi_writeback *wb)
while ((work = get_next_work_item(wb)) != NULL) {
trace_writeback_exec(wb, work);
wrote += wb_writeback(wb, work);
- finish_writeback_work(wb, work);
+ finish_writeback_work(work);
}
/*
@@ -2224,7 +2390,6 @@ void wb_workfn(struct work_struct *work)
long pages_written;
set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
- current->flags |= PF_SWAPWRITE;
if (likely(!current_is_workqueue_rescuer() ||
!test_bit(WB_registered, &wb->state))) {
@@ -2253,13 +2418,10 @@ void wb_workfn(struct work_struct *work)
wb_wakeup(wb);
else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
wb_wakeup_delayed(wb);
-
- current->flags &= ~PF_SWAPWRITE;
}
/*
- * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
- * write back the whole world.
+ * Start writeback of all dirty pages on this bdi.
*/
static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
enum wb_reason reason)
@@ -2291,8 +2453,7 @@ void wakeup_flusher_threads(enum wb_reason reason)
/*
* If we are expecting writeback progress we must submit plugged IO.
*/
- if (blk_needs_flush_plug(current))
- blk_flush_plug(current->plug, true);
+ blk_flush_plug(current->plug, true);
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
@@ -2334,24 +2495,36 @@ static void wakeup_dirtytime_writeback(struct work_struct *w)
schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
}
-static int __init start_dirtytime_writeback(void)
-{
- schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
- return 0;
-}
-__initcall(start_dirtytime_writeback);
-
-int dirtytime_interval_handler(struct ctl_table *table, int write,
+static int dirtytime_interval_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
- mod_delayed_work(system_wq, &dirtytime_work, 0);
+ mod_delayed_work(system_percpu_wq, &dirtytime_work, 0);
return ret;
}
+static const struct ctl_table vm_fs_writeback_table[] = {
+ {
+ .procname = "dirtytime_expire_seconds",
+ .data = &dirtytime_expire_interval,
+ .maxlen = sizeof(dirtytime_expire_interval),
+ .mode = 0644,
+ .proc_handler = dirtytime_interval_handler,
+ .extra1 = SYSCTL_ZERO,
+ },
+};
+
+static int __init start_dirtytime_writeback(void)
+{
+ schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+ register_sysctl_init("vm", vm_fs_writeback_table);
+ return 0;
+}
+__initcall(start_dirtytime_writeback);
+
/**
* __mark_inode_dirty - internal function to mark an inode dirty
*
@@ -2383,11 +2556,26 @@ void __mark_inode_dirty(struct inode *inode, int flags)
{
struct super_block *sb = inode->i_sb;
int dirtytime = 0;
+ struct bdi_writeback *wb = NULL;
trace_writeback_mark_inode_dirty(inode, flags);
if (flags & I_DIRTY_INODE) {
/*
+ * Inode timestamp update will piggback on this dirtying.
+ * We tell ->dirty_inode callback that timestamps need to
+ * be updated by setting I_DIRTY_TIME in flags.
+ */
+ if (inode_state_read_once(inode) & I_DIRTY_TIME) {
+ spin_lock(&inode->i_lock);
+ if (inode_state_read(inode) & I_DIRTY_TIME) {
+ inode_state_clear(inode, I_DIRTY_TIME);
+ flags |= I_DIRTY_TIME;
+ }
+ spin_unlock(&inode->i_lock);
+ }
+
+ /*
* Notify the filesystem about the inode being dirtied, so that
* (if needed) it can update on-disk fields and journal the
* inode. This is only needed when the inode itself is being
@@ -2396,7 +2584,8 @@ void __mark_inode_dirty(struct inode *inode, int flags)
*/
trace_writeback_dirty_inode_start(inode, flags);
if (sb->s_op->dirty_inode)
- sb->s_op->dirty_inode(inode, flags & I_DIRTY_INODE);
+ sb->s_op->dirty_inode(inode,
+ flags & (I_DIRTY_INODE | I_DIRTY_TIME));
trace_writeback_dirty_inode(inode, flags);
/* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
@@ -2417,22 +2606,27 @@ void __mark_inode_dirty(struct inode *inode, int flags)
*/
smp_mb();
- if (((inode->i_state & flags) == flags) ||
- (dirtytime && (inode->i_state & I_DIRTY_INODE)))
+ if ((inode_state_read_once(inode) & flags) == flags)
return;
spin_lock(&inode->i_lock);
- if (dirtytime && (inode->i_state & I_DIRTY_INODE))
- goto out_unlock_inode;
- if ((inode->i_state & flags) != flags) {
- const int was_dirty = inode->i_state & I_DIRTY;
+ if ((inode_state_read(inode) & flags) != flags) {
+ const int was_dirty = inode_state_read(inode) & I_DIRTY;
inode_attach_wb(inode, NULL);
- /* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
- if (flags & I_DIRTY_INODE)
- inode->i_state &= ~I_DIRTY_TIME;
- inode->i_state |= flags;
+ inode_state_set(inode, flags);
+
+ /*
+ * Grab inode's wb early because it requires dropping i_lock and we
+ * need to make sure following checks happen atomically with dirty
+ * list handling so that we don't move inodes under flush worker's
+ * hands.
+ */
+ if (!was_dirty) {
+ wb = locked_inode_to_wb_and_lock_list(inode);
+ spin_lock(&inode->i_lock);
+ }
/*
* If the inode is queued for writeback by flush worker, just
@@ -2440,8 +2634,8 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* the inode it will place it on the appropriate superblock
* list, based upon its state.
*/
- if (inode->i_state & I_SYNC_QUEUED)
- goto out_unlock_inode;
+ if (inode_state_read(inode) & I_SYNC_QUEUED)
+ goto out_unlock;
/*
* Only add valid (hashed) inodes to the superblock's
@@ -2449,27 +2643,24 @@ void __mark_inode_dirty(struct inode *inode, int flags)
*/
if (!S_ISBLK(inode->i_mode)) {
if (inode_unhashed(inode))
- goto out_unlock_inode;
+ goto out_unlock;
}
- if (inode->i_state & I_FREEING)
- goto out_unlock_inode;
+ if (inode_state_read(inode) & I_FREEING)
+ goto out_unlock;
/*
* If the inode was already on b_dirty/b_io/b_more_io, don't
* reposition it (that would break b_dirty time-ordering).
*/
if (!was_dirty) {
- struct bdi_writeback *wb;
struct list_head *dirty_list;
bool wakeup_bdi = false;
- wb = locked_inode_to_wb_and_lock_list(inode);
-
inode->dirtied_when = jiffies;
if (dirtytime)
inode->dirtied_time_when = jiffies;
- if (inode->i_state & I_DIRTY)
+ if (inode_state_read(inode) & I_DIRTY)
dirty_list = &wb->b_dirty;
else
dirty_list = &wb->b_dirty_time;
@@ -2477,9 +2668,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
wakeup_bdi = inode_io_list_move_locked(inode, wb,
dirty_list);
- spin_unlock(&wb->list_lock);
- trace_writeback_dirty_inode_enqueue(inode);
-
/*
* If this is the first dirty inode for this bdi,
* we have to wake-up the corresponding bdi thread
@@ -2489,10 +2677,17 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if (wakeup_bdi &&
(wb->bdi->capabilities & BDI_CAP_WRITEBACK))
wb_wakeup_delayed(wb);
+
+ spin_unlock(&wb->list_lock);
+ spin_unlock(&inode->i_lock);
+ trace_writeback_dirty_inode_enqueue(inode);
+
return;
}
}
-out_unlock_inode:
+out_unlock:
+ if (wb)
+ spin_unlock(&wb->list_lock);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(__mark_inode_dirty);
@@ -2562,7 +2757,7 @@ static void wait_sb_inodes(struct super_block *sb)
spin_unlock_irq(&sb->s_inode_wblist_lock);
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
+ if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) {
spin_unlock(&inode->i_lock);
spin_lock_irq(&sb->s_inode_wblist_lock);
@@ -2642,7 +2837,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
*/
void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
{
- return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
+ writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
}
EXPORT_SYMBOL(writeback_inodes_sb);
diff --git a/fs/fs_context.c b/fs/fs_context.c
index b7e43a780a62..93b7ebf8d927 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -161,21 +161,24 @@ int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param)
EXPORT_SYMBOL(vfs_parse_fs_param);
/**
- * vfs_parse_fs_string - Convenience function to just parse a string.
+ * vfs_parse_fs_qstr - Convenience function to just parse a string.
+ * @fc: Filesystem context.
+ * @key: Parameter name.
+ * @value: Default value.
*/
-int vfs_parse_fs_string(struct fs_context *fc, const char *key,
- const char *value, size_t v_size)
+int vfs_parse_fs_qstr(struct fs_context *fc, const char *key,
+ const struct qstr *value)
{
int ret;
struct fs_parameter param = {
.key = key,
.type = fs_value_is_flag,
- .size = v_size,
+ .size = value ? value->len : 0,
};
if (value) {
- param.string = kmemdup_nul(value, v_size, GFP_KERNEL);
+ param.string = kmemdup_nul(value->name, value->len, GFP_KERNEL);
if (!param.string)
return -ENOMEM;
param.type = fs_value_is_string;
@@ -185,20 +188,22 @@ int vfs_parse_fs_string(struct fs_context *fc, const char *key,
kfree(param.string);
return ret;
}
-EXPORT_SYMBOL(vfs_parse_fs_string);
+EXPORT_SYMBOL(vfs_parse_fs_qstr);
/**
- * generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
- * @ctx: The superblock configuration to fill in.
+ * vfs_parse_monolithic_sep - Parse key[=val][,key[=val]]* mount data
+ * @fc: The superblock configuration to fill in.
* @data: The data to parse
+ * @sep: callback for separating next option
*
- * Parse a blob of data that's in key[=val][,key[=val]]* form. This can be
- * called from the ->monolithic_mount_data() fs_context operation.
+ * Parse a blob of data that's in key[=val][,key[=val]]* form with a custom
+ * option separator callback.
*
* Returns 0 on success or the error returned by the ->parse_option() fs_context
* operation on failure.
*/
-int generic_parse_monolithic(struct fs_context *fc, void *data)
+int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
+ char *(*sep)(char **))
{
char *options = data, *key;
int ret = 0;
@@ -210,18 +215,16 @@ int generic_parse_monolithic(struct fs_context *fc, void *data)
if (ret)
return ret;
- while ((key = strsep(&options, ",")) != NULL) {
+ while ((key = sep(&options)) != NULL) {
if (*key) {
- size_t v_len = 0;
char *value = strchr(key, '=');
if (value) {
- if (value == key)
+ if (unlikely(value == key))
continue;
*value++ = 0;
- v_len = strlen(value);
}
- ret = vfs_parse_fs_string(fc, key, value, v_len);
+ ret = vfs_parse_fs_string(fc, key, value);
if (ret < 0)
break;
}
@@ -229,6 +232,28 @@ int generic_parse_monolithic(struct fs_context *fc, void *data)
return ret;
}
+EXPORT_SYMBOL(vfs_parse_monolithic_sep);
+
+static char *vfs_parse_comma_sep(char **s)
+{
+ return strsep(s, ",");
+}
+
+/**
+ * generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
+ * @fc: The superblock configuration to fill in.
+ * @data: The data to parse
+ *
+ * Parse a blob of data that's in key[=val][,key[=val]]* form. This can be
+ * called from the ->monolithic_mount_data() fs_context operation.
+ *
+ * Returns 0 on success or the error returned by the ->parse_option() fs_context
+ * operation on failure.
+ */
+int generic_parse_monolithic(struct fs_context *fc, void *data)
+{
+ return vfs_parse_monolithic_sep(fc, data, vfs_parse_comma_sep);
+}
EXPORT_SYMBOL(generic_parse_monolithic);
/**
@@ -315,10 +340,31 @@ struct fs_context *fs_context_for_reconfigure(struct dentry *dentry,
}
EXPORT_SYMBOL(fs_context_for_reconfigure);
+/**
+ * fs_context_for_submount: allocate a new fs_context for a submount
+ * @type: file_system_type of the new context
+ * @reference: reference dentry from which to copy relevant info
+ *
+ * Allocate a new fs_context suitable for a submount. This also ensures that
+ * the fc->security object is inherited from @reference (if needed).
+ */
struct fs_context *fs_context_for_submount(struct file_system_type *type,
struct dentry *reference)
{
- return alloc_fs_context(type, reference, 0, 0, FS_CONTEXT_FOR_SUBMOUNT);
+ struct fs_context *fc;
+ int ret;
+
+ fc = alloc_fs_context(type, reference, 0, 0, FS_CONTEXT_FOR_SUBMOUNT);
+ if (IS_ERR(fc))
+ return fc;
+
+ ret = security_fs_context_submount(fc, reference->d_sb);
+ if (ret) {
+ put_fs_context(fc);
+ return ERR_PTR(ret);
+ }
+
+ return fc;
}
EXPORT_SYMBOL(fs_context_for_submount);
@@ -333,7 +379,7 @@ void fc_drop_locked(struct fs_context *fc)
static void legacy_fs_context_free(struct fs_context *fc);
/**
- * vfs_dup_fc_config: Duplicate a filesystem context.
+ * vfs_dup_fs_context - Duplicate a filesystem context.
* @src_fc: The context to copy.
*/
struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc)
@@ -379,7 +425,9 @@ EXPORT_SYMBOL(vfs_dup_fs_context);
/**
* logfc - Log a message to a filesystem context
- * @fc: The filesystem context to log to.
+ * @log: The filesystem context to log to, or NULL to use printk.
+ * @prefix: A string to prefix the output with, or NULL.
+ * @level: 'w' for a warning, 'e' for an error. Anything else is a notice.
* @fmt: The format of the buffer.
*/
void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, ...)
@@ -398,6 +446,10 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt,
printk(KERN_ERR "%s%s%pV\n", prefix ? prefix : "",
prefix ? ": " : "", &vaf);
break;
+ case 'i':
+ printk(KERN_INFO "%s%s%pV\n", prefix ? prefix : "",
+ prefix ? ": " : "", &vaf);
+ break;
default:
printk(KERN_NOTICE "%s%s%pV\n", prefix ? prefix : "",
prefix ? ": " : "", &vaf);
@@ -442,7 +494,7 @@ static void put_fc_log(struct fs_context *fc)
if (log) {
if (refcount_dec_and_test(&log->usage)) {
fc->log.log = NULL;
- for (i = 0; i <= 7; i++)
+ for (i = 0; i < ARRAY_SIZE(log->buffer) ; i++)
if (log->need_free & (1 << i))
kfree(log->buffer[i]);
kfree(log);
@@ -548,7 +600,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
param->key);
}
- if (len > PAGE_SIZE - 2 - size)
+ if (size + len + 2 > PAGE_SIZE)
return invalf(fc, "VFS: Legacy: Cumulative options too large");
if (strchr(param->key, ',') ||
(param->type == fs_value_is_string &&
@@ -561,7 +613,8 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
return -ENOMEM;
}
- ctx->legacy_data[size++] = ',';
+ if (size)
+ ctx->legacy_data[size++] = ',';
len = strlen(param->key);
memcpy(ctx->legacy_data + size, param->key, len);
size += len;
@@ -691,6 +744,7 @@ void vfs_clean_context(struct fs_context *fc)
security_free_mnt_opts(&fc->security);
kfree(fc->source);
fc->source = NULL;
+ fc->exclusive = false;
fc->purpose = FS_CONTEXT_FOR_RECONFIGURE;
fc->phase = FS_CONTEXT_AWAITING_RECONF;
diff --git a/fs/fs_types.c b/fs/fs_dirent.c
index 78365e5dc08c..e5e08f213816 100644
--- a/fs/fs_types.c
+++ b/fs/fs_dirent.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/fs.h>
+#include <linux/fs_dirent.h>
#include <linux/export.h>
/*
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
index 3df07c0e32b3..c092a9f79e32 100644
--- a/fs/fs_parser.c
+++ b/fs/fs_parser.c
@@ -13,7 +13,7 @@
#include <linux/namei.h>
#include "internal.h"
-static const struct constant_table bool_names[] = {
+const struct constant_table bool_names[] = {
{ "0", false },
{ "1", true },
{ "false", false },
@@ -22,6 +22,7 @@ static const struct constant_table bool_names[] = {
{ "yes", true },
{ },
};
+EXPORT_SYMBOL(bool_names);
static const struct constant_table *
__lookup_constant(const struct constant_table *tbl, const char *name)
@@ -83,8 +84,8 @@ static const struct fs_parameter_spec *fs_lookup_key(
}
/*
- * fs_parse - Parse a filesystem configuration parameter
- * @fc: The filesystem context to log errors through.
+ * __fs_parse - Parse a filesystem configuration parameter
+ * @log: The filesystem context to log errors through.
* @desc: The parameter description to use.
* @param: The parameter.
* @result: Where to place the result of the parse
@@ -138,15 +139,16 @@ EXPORT_SYMBOL(__fs_parse);
* @fc: The filesystem context to log errors through.
* @param: The parameter.
* @want_bdev: T if want a blockdev
+ * @flags: Pathwalk flags passed to filename_lookup()
* @_path: The result of the lookup
*/
int fs_lookup_param(struct fs_context *fc,
struct fs_parameter *param,
bool want_bdev,
+ unsigned int flags,
struct path *_path)
{
struct filename *f;
- unsigned int flags = 0;
bool put_f;
int ret;
@@ -155,6 +157,7 @@ int fs_lookup_param(struct fs_context *fc,
f = getname_kernel(param->string);
if (IS_ERR(f))
return PTR_ERR(f);
+ param->dirfd = AT_FDCWD;
put_f = true;
break;
case fs_value_is_filename:
@@ -199,6 +202,8 @@ int fs_param_is_bool(struct p_log *log, const struct fs_parameter_spec *p,
int b;
if (param->type != fs_value_is_string)
return fs_param_bad_value(log, param);
+ if (!*param->string && (p->flags & fs_param_can_be_empty))
+ return 0;
b = lookup_constant(bool_names, param->string, -1);
if (b == -1)
return fs_param_bad_value(log, param);
@@ -211,8 +216,11 @@ int fs_param_is_u32(struct p_log *log, const struct fs_parameter_spec *p,
struct fs_parameter *param, struct fs_parse_result *result)
{
int base = (unsigned long)p->data;
- if (param->type != fs_value_is_string ||
- kstrtouint(param->string, base, &result->uint_32) < 0)
+ if (param->type != fs_value_is_string)
+ return fs_param_bad_value(log, param);
+ if (!*param->string && (p->flags & fs_param_can_be_empty))
+ return 0;
+ if (kstrtouint(param->string, base, &result->uint_32) < 0)
return fs_param_bad_value(log, param);
return 0;
}
@@ -221,8 +229,11 @@ EXPORT_SYMBOL(fs_param_is_u32);
int fs_param_is_s32(struct p_log *log, const struct fs_parameter_spec *p,
struct fs_parameter *param, struct fs_parse_result *result)
{
- if (param->type != fs_value_is_string ||
- kstrtoint(param->string, 0, &result->int_32) < 0)
+ if (param->type != fs_value_is_string)
+ return fs_param_bad_value(log, param);
+ if (!*param->string && (p->flags & fs_param_can_be_empty))
+ return 0;
+ if (kstrtoint(param->string, 0, &result->int_32) < 0)
return fs_param_bad_value(log, param);
return 0;
}
@@ -231,8 +242,11 @@ EXPORT_SYMBOL(fs_param_is_s32);
int fs_param_is_u64(struct p_log *log, const struct fs_parameter_spec *p,
struct fs_parameter *param, struct fs_parse_result *result)
{
- if (param->type != fs_value_is_string ||
- kstrtoull(param->string, 0, &result->uint_64) < 0)
+ if (param->type != fs_value_is_string)
+ return fs_param_bad_value(log, param);
+ if (!*param->string && (p->flags & fs_param_can_be_empty))
+ return 0;
+ if (kstrtoull(param->string, 0, &result->uint_64) < 0)
return fs_param_bad_value(log, param);
return 0;
}
@@ -244,6 +258,8 @@ int fs_param_is_enum(struct p_log *log, const struct fs_parameter_spec *p,
const struct constant_table *c;
if (param->type != fs_value_is_string)
return fs_param_bad_value(log, param);
+ if (!*param->string && (p->flags & fs_param_can_be_empty))
+ return 0;
c = __lookup_constant(p->data, param->string);
if (!c)
return fs_param_bad_value(log, param);
@@ -255,7 +271,8 @@ EXPORT_SYMBOL(fs_param_is_enum);
int fs_param_is_string(struct p_log *log, const struct fs_parameter_spec *p,
struct fs_parameter *param, struct fs_parse_result *result)
{
- if (param->type != fs_value_is_string || !*param->string)
+ if (param->type != fs_value_is_string ||
+ (!*param->string && !(p->flags & fs_param_can_be_empty)))
return fs_param_bad_value(log, param);
return 0;
}
@@ -275,7 +292,8 @@ int fs_param_is_fd(struct p_log *log, const struct fs_parameter_spec *p,
{
switch (param->type) {
case fs_value_is_string:
- if (kstrtouint(param->string, 0, &result->uint_32) < 0)
+ if ((!*param->string && !(p->flags & fs_param_can_be_empty)) ||
+ kstrtouint(param->string, 0, &result->uint_32) < 0)
break;
if (result->uint_32 <= INT_MAX)
return 0;
@@ -292,74 +310,79 @@ int fs_param_is_fd(struct p_log *log, const struct fs_parameter_spec *p,
}
EXPORT_SYMBOL(fs_param_is_fd);
-int fs_param_is_blockdev(struct p_log *log, const struct fs_parameter_spec *p,
- struct fs_parameter *param, struct fs_parse_result *result)
+int fs_param_is_file_or_string(struct p_log *log,
+ const struct fs_parameter_spec *p,
+ struct fs_parameter *param,
+ struct fs_parse_result *result)
{
- return 0;
+ switch (param->type) {
+ case fs_value_is_string:
+ return fs_param_is_string(log, p, param, result);
+ case fs_value_is_file:
+ result->uint_32 = param->dirfd;
+ if (result->uint_32 <= INT_MAX)
+ return 0;
+ break;
+ default:
+ break;
+ }
+ return fs_param_bad_value(log, param);
}
-EXPORT_SYMBOL(fs_param_is_blockdev);
+EXPORT_SYMBOL(fs_param_is_file_or_string);
-int fs_param_is_path(struct p_log *log, const struct fs_parameter_spec *p,
- struct fs_parameter *param, struct fs_parse_result *result)
+int fs_param_is_uid(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
{
+ kuid_t uid;
+
+ if (fs_param_is_u32(log, p, param, result) != 0)
+ return fs_param_bad_value(log, param);
+
+ uid = make_kuid(current_user_ns(), result->uint_32);
+ if (!uid_valid(uid))
+ return inval_plog(log, "Invalid uid '%s'", param->string);
+
+ result->uid = uid;
return 0;
}
-EXPORT_SYMBOL(fs_param_is_path);
+EXPORT_SYMBOL(fs_param_is_uid);
-#ifdef CONFIG_VALIDATE_FS_PARSER
-/**
- * validate_constant_table - Validate a constant table
- * @tbl: The constant table to validate.
- * @tbl_size: The size of the table.
- * @low: The lowest permissible value.
- * @high: The highest permissible value.
- * @special: One special permissible value outside of the range.
- */
-bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
- int low, int high, int special)
+int fs_param_is_gid(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
{
- size_t i;
- bool good = true;
+ kgid_t gid;
- if (tbl_size == 0) {
- pr_warn("VALIDATE C-TBL: Empty\n");
- return true;
- }
+ if (fs_param_is_u32(log, p, param, result) != 0)
+ return fs_param_bad_value(log, param);
- for (i = 0; i < tbl_size; i++) {
- if (!tbl[i].name) {
- pr_err("VALIDATE C-TBL[%zu]: Null\n", i);
- good = false;
- } else if (i > 0 && tbl[i - 1].name) {
- int c = strcmp(tbl[i-1].name, tbl[i].name);
+ gid = make_kgid(current_user_ns(), result->uint_32);
+ if (!gid_valid(gid))
+ return inval_plog(log, "Invalid gid '%s'", param->string);
- if (c == 0) {
- pr_err("VALIDATE C-TBL[%zu]: Duplicate %s\n",
- i, tbl[i].name);
- good = false;
- }
- if (c > 0) {
- pr_err("VALIDATE C-TBL[%zu]: Missorted %s>=%s\n",
- i, tbl[i-1].name, tbl[i].name);
- good = false;
- }
- }
+ result->gid = gid;
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_gid);
- if (tbl[i].value != special &&
- (tbl[i].value < low || tbl[i].value > high)) {
- pr_err("VALIDATE C-TBL[%zu]: %s->%d const out of range (%d-%d)\n",
- i, tbl[i].name, tbl[i].value, low, high);
- good = false;
- }
- }
+int fs_param_is_blockdev(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_blockdev);
- return good;
+int fs_param_is_path(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ return 0;
}
+EXPORT_SYMBOL(fs_param_is_path);
+#ifdef CONFIG_VALIDATE_FS_PARSER
/**
- * fs_validate_description - Validate a parameter description
- * @name: The parameter name to search for.
- * @desc: The parameter description to validate.
+ * fs_validate_description - Validate a parameter specification array
+ * @name: Owner name of the parameter specification array
+ * @desc: The parameter specification array to validate.
*/
bool fs_validate_description(const char *name,
const struct fs_parameter_spec *desc)
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index 04b3f5b9c629..b8c46c5a38a0 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -17,12 +17,10 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
struct path old_root;
path_get(path);
- spin_lock(&fs->lock);
- write_seqcount_begin(&fs->seq);
+ write_seqlock(&fs->seq);
old_root = fs->root;
fs->root = *path;
- write_seqcount_end(&fs->seq);
- spin_unlock(&fs->lock);
+ write_sequnlock(&fs->seq);
if (old_root.dentry)
path_put(&old_root);
}
@@ -36,12 +34,10 @@ void set_fs_pwd(struct fs_struct *fs, const struct path *path)
struct path old_pwd;
path_get(path);
- spin_lock(&fs->lock);
- write_seqcount_begin(&fs->seq);
+ write_seqlock(&fs->seq);
old_pwd = fs->pwd;
fs->pwd = *path;
- write_seqcount_end(&fs->seq);
- spin_unlock(&fs->lock);
+ write_sequnlock(&fs->seq);
if (old_pwd.dentry)
path_put(&old_pwd);
@@ -62,24 +58,22 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
int count = 0;
read_lock(&tasklist_lock);
- do_each_thread(g, p) {
+ for_each_process_thread(g, p) {
task_lock(p);
fs = p->fs;
if (fs) {
int hits = 0;
- spin_lock(&fs->lock);
- write_seqcount_begin(&fs->seq);
+ write_seqlock(&fs->seq);
hits += replace_path(&fs->root, old_root, new_root);
hits += replace_path(&fs->pwd, old_root, new_root);
- write_seqcount_end(&fs->seq);
while (hits--) {
count++;
path_get(new_root);
}
- spin_unlock(&fs->lock);
+ write_sequnlock(&fs->seq);
}
task_unlock(p);
- } while_each_thread(g, p);
+ }
read_unlock(&tasklist_lock);
while (count--)
path_put(old_root);
@@ -99,10 +93,10 @@ void exit_fs(struct task_struct *tsk)
if (fs) {
int kill;
task_lock(tsk);
- spin_lock(&fs->lock);
+ read_seqlock_excl(&fs->seq);
tsk->fs = NULL;
kill = !--fs->users;
- spin_unlock(&fs->lock);
+ read_sequnlock_excl(&fs->seq);
task_unlock(tsk);
if (kill)
free_fs_struct(fs);
@@ -116,16 +110,15 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
if (fs) {
fs->users = 1;
fs->in_exec = 0;
- spin_lock_init(&fs->lock);
- seqcount_spinlock_init(&fs->seq, &fs->lock);
+ seqlock_init(&fs->seq);
fs->umask = old->umask;
- spin_lock(&old->lock);
+ read_seqlock_excl(&old->seq);
fs->root = old->root;
path_get(&fs->root);
fs->pwd = old->pwd;
path_get(&fs->pwd);
- spin_unlock(&old->lock);
+ read_sequnlock_excl(&old->seq);
}
return fs;
}
@@ -140,10 +133,10 @@ int unshare_fs_struct(void)
return -ENOMEM;
task_lock(current);
- spin_lock(&fs->lock);
+ read_seqlock_excl(&fs->seq);
kill = !--fs->users;
current->fs = new_fs;
- spin_unlock(&fs->lock);
+ read_sequnlock_excl(&fs->seq);
task_unlock(current);
if (kill)
@@ -153,16 +146,9 @@ int unshare_fs_struct(void)
}
EXPORT_SYMBOL_GPL(unshare_fs_struct);
-int current_umask(void)
-{
- return current->fs->umask;
-}
-EXPORT_SYMBOL(current_umask);
-
/* to be mentioned only in INIT_TASK */
struct fs_struct init_fs = {
.users = 1,
- .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
- .seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock),
+ .seq = __SEQLOCK_UNLOCKED(init_fs.seq),
.umask = 0022,
};
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig
deleted file mode 100644
index b313a978ae0a..000000000000
--- a/fs/fscache/Kconfig
+++ /dev/null
@@ -1,40 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-config FSCACHE
- tristate "General filesystem local caching manager"
- select NETFS_SUPPORT
- help
- This option enables a generic filesystem caching manager that can be
- used by various network and other filesystems to cache data locally.
- Different sorts of caches can be plugged in, depending on the
- resources available.
-
- See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_STATS
- bool "Gather statistical information on local caching"
- depends on FSCACHE && PROC_FS
- select NETFS_STATS
- help
- This option causes statistical information to be gathered on local
- caching and exported through file:
-
- /proc/fs/fscache/stats
-
- The gathering of statistics adds a certain amount of overhead to
- execution as there are a quite a few stats gathered, and on a
- multi-CPU system these may be on cachelines that keep bouncing
- between CPUs. On the other hand, the stats are very useful for
- debugging purposes. Saying 'Y' here is recommended.
-
- See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_DEBUG
- bool "Debug FS-Cache"
- depends on FSCACHE
- help
- This permits debugging to be dynamically enabled in the local caching
- management module. If this is set, the debugging output may be
- enabled by setting bits in /sys/modules/fscache/parameter/debug.
-
- See Documentation/filesystems/caching/fscache.rst for more information.
diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile
deleted file mode 100644
index 03a871d689bb..000000000000
--- a/fs/fscache/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for general filesystem caching code
-#
-
-fscache-y := \
- cache.o \
- cookie.o \
- fsdef.o \
- io.o \
- main.o \
- netfs.o \
- object.o \
- operation.o \
- page.o
-
-fscache-$(CONFIG_PROC_FS) += proc.o
-fscache-$(CONFIG_FSCACHE_STATS) += stats.o
-
-obj-$(CONFIG_FSCACHE) := fscache.o
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
deleted file mode 100644
index bd4f44c1cce0..000000000000
--- a/fs/fscache/cache.c
+++ /dev/null
@@ -1,416 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* FS-Cache cache handling
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL CACHE
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-LIST_HEAD(fscache_cache_list);
-DECLARE_RWSEM(fscache_addremove_sem);
-DECLARE_WAIT_QUEUE_HEAD(fscache_cache_cleared_wq);
-EXPORT_SYMBOL(fscache_cache_cleared_wq);
-
-static LIST_HEAD(fscache_cache_tag_list);
-
-/*
- * look up a cache tag
- */
-struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *name)
-{
- struct fscache_cache_tag *tag, *xtag;
-
- /* firstly check for the existence of the tag under read lock */
- down_read(&fscache_addremove_sem);
-
- list_for_each_entry(tag, &fscache_cache_tag_list, link) {
- if (strcmp(tag->name, name) == 0) {
- atomic_inc(&tag->usage);
- up_read(&fscache_addremove_sem);
- return tag;
- }
- }
-
- up_read(&fscache_addremove_sem);
-
- /* the tag does not exist - create a candidate */
- xtag = kzalloc(sizeof(*xtag) + strlen(name) + 1, GFP_KERNEL);
- if (!xtag)
- /* return a dummy tag if out of memory */
- return ERR_PTR(-ENOMEM);
-
- atomic_set(&xtag->usage, 1);
- strcpy(xtag->name, name);
-
- /* write lock, search again and add if still not present */
- down_write(&fscache_addremove_sem);
-
- list_for_each_entry(tag, &fscache_cache_tag_list, link) {
- if (strcmp(tag->name, name) == 0) {
- atomic_inc(&tag->usage);
- up_write(&fscache_addremove_sem);
- kfree(xtag);
- return tag;
- }
- }
-
- list_add_tail(&xtag->link, &fscache_cache_tag_list);
- up_write(&fscache_addremove_sem);
- return xtag;
-}
-
-/*
- * release a reference to a cache tag
- */
-void __fscache_release_cache_tag(struct fscache_cache_tag *tag)
-{
- if (tag != ERR_PTR(-ENOMEM)) {
- down_write(&fscache_addremove_sem);
-
- if (atomic_dec_and_test(&tag->usage))
- list_del_init(&tag->link);
- else
- tag = NULL;
-
- up_write(&fscache_addremove_sem);
-
- kfree(tag);
- }
-}
-
-/*
- * select a cache in which to store an object
- * - the cache addremove semaphore must be at least read-locked by the caller
- * - the object will never be an index
- */
-struct fscache_cache *fscache_select_cache_for_object(
- struct fscache_cookie *cookie)
-{
- struct fscache_cache_tag *tag;
- struct fscache_object *object;
- struct fscache_cache *cache;
-
- _enter("");
-
- if (list_empty(&fscache_cache_list)) {
- _leave(" = NULL [no cache]");
- return NULL;
- }
-
- /* we check the parent to determine the cache to use */
- spin_lock(&cookie->lock);
-
- /* the first in the parent's backing list should be the preferred
- * cache */
- if (!hlist_empty(&cookie->backing_objects)) {
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- cache = object->cache;
- if (fscache_object_is_dying(object) ||
- test_bit(FSCACHE_IOERROR, &cache->flags))
- cache = NULL;
-
- spin_unlock(&cookie->lock);
- _leave(" = %s [parent]", cache ? cache->tag->name : "NULL");
- return cache;
- }
-
- /* the parent is unbacked */
- if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) {
- /* cookie not an index and is unbacked */
- spin_unlock(&cookie->lock);
- _leave(" = NULL [cookie ub,ni]");
- return NULL;
- }
-
- spin_unlock(&cookie->lock);
-
- if (!cookie->def->select_cache)
- goto no_preference;
-
- /* ask the netfs for its preference */
- tag = cookie->def->select_cache(cookie->parent->netfs_data,
- cookie->netfs_data);
- if (!tag)
- goto no_preference;
-
- if (tag == ERR_PTR(-ENOMEM)) {
- _leave(" = NULL [nomem tag]");
- return NULL;
- }
-
- if (!tag->cache) {
- _leave(" = NULL [unbacked tag]");
- return NULL;
- }
-
- if (test_bit(FSCACHE_IOERROR, &tag->cache->flags))
- return NULL;
-
- _leave(" = %s [specific]", tag->name);
- return tag->cache;
-
-no_preference:
- /* netfs has no preference - just select first cache */
- cache = list_entry(fscache_cache_list.next,
- struct fscache_cache, link);
- _leave(" = %s [first]", cache->tag->name);
- return cache;
-}
-
-/**
- * fscache_init_cache - Initialise a cache record
- * @cache: The cache record to be initialised
- * @ops: The cache operations to be installed in that record
- * @idfmt: Format string to define identifier
- * @...: sprintf-style arguments
- *
- * Initialise a record of a cache and fill in the name.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
- */
-void fscache_init_cache(struct fscache_cache *cache,
- const struct fscache_cache_ops *ops,
- const char *idfmt,
- ...)
-{
- va_list va;
-
- memset(cache, 0, sizeof(*cache));
-
- cache->ops = ops;
-
- va_start(va, idfmt);
- vsnprintf(cache->identifier, sizeof(cache->identifier), idfmt, va);
- va_end(va);
-
- INIT_WORK(&cache->op_gc, fscache_operation_gc);
- INIT_LIST_HEAD(&cache->link);
- INIT_LIST_HEAD(&cache->object_list);
- INIT_LIST_HEAD(&cache->op_gc_list);
- spin_lock_init(&cache->object_list_lock);
- spin_lock_init(&cache->op_gc_list_lock);
-}
-EXPORT_SYMBOL(fscache_init_cache);
-
-/**
- * fscache_add_cache - Declare a cache as being open for business
- * @cache: The record describing the cache
- * @ifsdef: The record of the cache object describing the top-level index
- * @tagname: The tag describing this cache
- *
- * Add a cache to the system, making it available for netfs's to use.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
- */
-int fscache_add_cache(struct fscache_cache *cache,
- struct fscache_object *ifsdef,
- const char *tagname)
-{
- struct fscache_cache_tag *tag;
-
- ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index);
- BUG_ON(!cache->ops);
- BUG_ON(!ifsdef);
-
- cache->flags = 0;
- ifsdef->event_mask =
- ((1 << NR_FSCACHE_OBJECT_EVENTS) - 1) &
- ~(1 << FSCACHE_OBJECT_EV_CLEARED);
- __set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &ifsdef->flags);
-
- if (!tagname)
- tagname = cache->identifier;
-
- BUG_ON(!tagname[0]);
-
- _enter("{%s.%s},,%s", cache->ops->name, cache->identifier, tagname);
-
- /* we use the cache tag to uniquely identify caches */
- tag = __fscache_lookup_cache_tag(tagname);
- if (IS_ERR(tag))
- goto nomem;
-
- if (test_and_set_bit(FSCACHE_TAG_RESERVED, &tag->flags))
- goto tag_in_use;
-
- cache->kobj = kobject_create_and_add(tagname, fscache_root);
- if (!cache->kobj)
- goto error;
-
- ifsdef->cache = cache;
- cache->fsdef = ifsdef;
-
- down_write(&fscache_addremove_sem);
-
- tag->cache = cache;
- cache->tag = tag;
-
- /* add the cache to the list */
- list_add(&cache->link, &fscache_cache_list);
-
- /* add the cache's netfs definition index object to the cache's
- * list */
- spin_lock(&cache->object_list_lock);
- list_add_tail(&ifsdef->cache_link, &cache->object_list);
- spin_unlock(&cache->object_list_lock);
-
- /* add the cache's netfs definition index object to the top level index
- * cookie as a known backing object */
- spin_lock(&fscache_fsdef_index.lock);
-
- hlist_add_head(&ifsdef->cookie_link,
- &fscache_fsdef_index.backing_objects);
-
- refcount_inc(&fscache_fsdef_index.ref);
-
- /* done */
- spin_unlock(&fscache_fsdef_index.lock);
- up_write(&fscache_addremove_sem);
-
- pr_notice("Cache \"%s\" added (type %s)\n",
- cache->tag->name, cache->ops->name);
- kobject_uevent(cache->kobj, KOBJ_ADD);
-
- _leave(" = 0 [%s]", cache->identifier);
- return 0;
-
-tag_in_use:
- pr_err("Cache tag '%s' already in use\n", tagname);
- __fscache_release_cache_tag(tag);
- _leave(" = -EXIST");
- return -EEXIST;
-
-error:
- __fscache_release_cache_tag(tag);
- _leave(" = -EINVAL");
- return -EINVAL;
-
-nomem:
- _leave(" = -ENOMEM");
- return -ENOMEM;
-}
-EXPORT_SYMBOL(fscache_add_cache);
-
-/**
- * fscache_io_error - Note a cache I/O error
- * @cache: The record describing the cache
- *
- * Note that an I/O error occurred in a cache and that it should no longer be
- * used for anything. This also reports the error into the kernel log.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
- */
-void fscache_io_error(struct fscache_cache *cache)
-{
- if (!test_and_set_bit(FSCACHE_IOERROR, &cache->flags))
- pr_err("Cache '%s' stopped due to I/O error\n",
- cache->ops->name);
-}
-EXPORT_SYMBOL(fscache_io_error);
-
-/*
- * request withdrawal of all the objects in a cache
- * - all the objects being withdrawn are moved onto the supplied list
- */
-static void fscache_withdraw_all_objects(struct fscache_cache *cache,
- struct list_head *dying_objects)
-{
- struct fscache_object *object;
-
- while (!list_empty(&cache->object_list)) {
- spin_lock(&cache->object_list_lock);
-
- if (!list_empty(&cache->object_list)) {
- object = list_entry(cache->object_list.next,
- struct fscache_object, cache_link);
- list_move_tail(&object->cache_link, dying_objects);
-
- _debug("withdraw %x", object->cookie->debug_id);
-
- /* This must be done under object_list_lock to prevent
- * a race with fscache_drop_object().
- */
- fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
- }
-
- spin_unlock(&cache->object_list_lock);
- cond_resched();
- }
-}
-
-/**
- * fscache_withdraw_cache - Withdraw a cache from the active service
- * @cache: The record describing the cache
- *
- * Withdraw a cache from service, unbinding all its cache objects from the
- * netfs cookies they're currently representing.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
- */
-void fscache_withdraw_cache(struct fscache_cache *cache)
-{
- LIST_HEAD(dying_objects);
-
- _enter("");
-
- pr_notice("Withdrawing cache \"%s\"\n",
- cache->tag->name);
-
- /* make the cache unavailable for cookie acquisition */
- if (test_and_set_bit(FSCACHE_CACHE_WITHDRAWN, &cache->flags))
- BUG();
-
- down_write(&fscache_addremove_sem);
- list_del_init(&cache->link);
- cache->tag->cache = NULL;
- up_write(&fscache_addremove_sem);
-
- /* make sure all pages pinned by operations on behalf of the netfs are
- * written to disk */
- fscache_stat(&fscache_n_cop_sync_cache);
- cache->ops->sync_cache(cache);
- fscache_stat_d(&fscache_n_cop_sync_cache);
-
- /* dissociate all the netfs pages backed by this cache from the block
- * mappings in the cache */
- fscache_stat(&fscache_n_cop_dissociate_pages);
- cache->ops->dissociate_pages(cache);
- fscache_stat_d(&fscache_n_cop_dissociate_pages);
-
- /* we now have to destroy all the active objects pertaining to this
- * cache - which we do by passing them off to thread pool to be
- * disposed of */
- _debug("destroy");
-
- fscache_withdraw_all_objects(cache, &dying_objects);
-
- /* wait for all extant objects to finish their outstanding operations
- * and go away */
- _debug("wait for finish");
- wait_event(fscache_cache_cleared_wq,
- atomic_read(&cache->object_count) == 0);
- _debug("wait for clearance");
- wait_event(fscache_cache_cleared_wq,
- list_empty(&cache->object_list));
- _debug("cleared");
- ASSERT(list_empty(&dying_objects));
-
- kobject_put(cache->kobj);
-
- clear_bit(FSCACHE_TAG_RESERVED, &cache->tag->flags);
- fscache_release_cache_tag(cache->tag);
- cache->tag = NULL;
-
- _leave("");
-}
-EXPORT_SYMBOL(fscache_withdraw_cache);
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
deleted file mode 100644
index cd42be646ed3..000000000000
--- a/fs/fscache/cookie.c
+++ /dev/null
@@ -1,1071 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* netfs cookie management
- *
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * See Documentation/filesystems/caching/netfs-api.rst for more information on
- * the netfs API.
- */
-
-#define FSCACHE_DEBUG_LEVEL COOKIE
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-struct kmem_cache *fscache_cookie_jar;
-
-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
-
-#define fscache_cookie_hash_shift 15
-static struct hlist_bl_head fscache_cookie_hash[1 << fscache_cookie_hash_shift];
-static LIST_HEAD(fscache_cookies);
-static DEFINE_RWLOCK(fscache_cookies_lock);
-
-static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie,
- loff_t object_size);
-static int fscache_alloc_object(struct fscache_cache *cache,
- struct fscache_cookie *cookie);
-static int fscache_attach_object(struct fscache_cookie *cookie,
- struct fscache_object *object);
-
-static void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
-{
- struct fscache_object *object;
- struct hlist_node *o;
- const u8 *k;
- unsigned loop;
-
- pr_err("%c-cookie c=%08x [p=%08x fl=%lx nc=%u na=%u]\n",
- prefix,
- cookie->debug_id,
- cookie->parent ? cookie->parent->debug_id : 0,
- cookie->flags,
- atomic_read(&cookie->n_children),
- atomic_read(&cookie->n_active));
- pr_err("%c-cookie d=%p{%s} n=%p\n",
- prefix,
- cookie->def,
- cookie->def ? cookie->def->name : "?",
- cookie->netfs_data);
-
- o = READ_ONCE(cookie->backing_objects.first);
- if (o) {
- object = hlist_entry(o, struct fscache_object, cookie_link);
- pr_err("%c-cookie o=%u\n", prefix, object->debug_id);
- }
-
- pr_err("%c-key=[%u] '", prefix, cookie->key_len);
- k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
- cookie->inline_key : cookie->key;
- for (loop = 0; loop < cookie->key_len; loop++)
- pr_cont("%02x", k[loop]);
- pr_cont("'\n");
-}
-
-void fscache_free_cookie(struct fscache_cookie *cookie)
-{
- if (cookie) {
- BUG_ON(!hlist_empty(&cookie->backing_objects));
- write_lock(&fscache_cookies_lock);
- list_del(&cookie->proc_link);
- write_unlock(&fscache_cookies_lock);
- if (cookie->aux_len > sizeof(cookie->inline_aux))
- kfree(cookie->aux);
- if (cookie->key_len > sizeof(cookie->inline_key))
- kfree(cookie->key);
- kmem_cache_free(fscache_cookie_jar, cookie);
- }
-}
-
-/*
- * Set the index key in a cookie. The cookie struct has space for a 16-byte
- * key plus length and hash, but if that's not big enough, it's instead a
- * pointer to a buffer containing 3 bytes of hash, 1 byte of length and then
- * the key data.
- */
-static int fscache_set_key(struct fscache_cookie *cookie,
- const void *index_key, size_t index_key_len)
-{
- u32 *buf;
- int bufs;
-
- bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
-
- if (index_key_len > sizeof(cookie->inline_key)) {
- buf = kcalloc(bufs, sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- cookie->key = buf;
- } else {
- buf = (u32 *)cookie->inline_key;
- }
-
- memcpy(buf, index_key, index_key_len);
- cookie->key_hash = fscache_hash(0, buf, bufs);
- return 0;
-}
-
-static long fscache_compare_cookie(const struct fscache_cookie *a,
- const struct fscache_cookie *b)
-{
- const void *ka, *kb;
-
- if (a->key_hash != b->key_hash)
- return (long)a->key_hash - (long)b->key_hash;
- if (a->parent != b->parent)
- return (long)a->parent - (long)b->parent;
- if (a->key_len != b->key_len)
- return (long)a->key_len - (long)b->key_len;
- if (a->type != b->type)
- return (long)a->type - (long)b->type;
-
- if (a->key_len <= sizeof(a->inline_key)) {
- ka = &a->inline_key;
- kb = &b->inline_key;
- } else {
- ka = a->key;
- kb = b->key;
- }
- return memcmp(ka, kb, a->key_len);
-}
-
-static atomic_t fscache_cookie_debug_id = ATOMIC_INIT(1);
-
-/*
- * Allocate a cookie.
- */
-struct fscache_cookie *fscache_alloc_cookie(
- struct fscache_cookie *parent,
- const struct fscache_cookie_def *def,
- const void *index_key, size_t index_key_len,
- const void *aux_data, size_t aux_data_len,
- void *netfs_data,
- loff_t object_size)
-{
- struct fscache_cookie *cookie;
-
- /* allocate and initialise a cookie */
- cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
- if (!cookie)
- return NULL;
-
- cookie->key_len = index_key_len;
- cookie->aux_len = aux_data_len;
-
- if (fscache_set_key(cookie, index_key, index_key_len) < 0)
- goto nomem;
-
- if (cookie->aux_len <= sizeof(cookie->inline_aux)) {
- memcpy(cookie->inline_aux, aux_data, cookie->aux_len);
- } else {
- cookie->aux = kmemdup(aux_data, cookie->aux_len, GFP_KERNEL);
- if (!cookie->aux)
- goto nomem;
- }
-
- refcount_set(&cookie->ref, 1);
- atomic_set(&cookie->n_children, 0);
- cookie->debug_id = atomic_inc_return(&fscache_cookie_debug_id);
-
- /* We keep the active count elevated until relinquishment to prevent an
- * attempt to wake up every time the object operations queue quiesces.
- */
- atomic_set(&cookie->n_active, 1);
-
- cookie->def = def;
- cookie->parent = parent;
- cookie->netfs_data = netfs_data;
- cookie->flags = (1 << FSCACHE_COOKIE_NO_DATA_YET);
- cookie->type = def->type;
- spin_lock_init(&cookie->lock);
- spin_lock_init(&cookie->stores_lock);
- INIT_HLIST_HEAD(&cookie->backing_objects);
-
- /* radix tree insertion won't use the preallocation pool unless it's
- * told it may not wait */
- INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
-
- write_lock(&fscache_cookies_lock);
- list_add_tail(&cookie->proc_link, &fscache_cookies);
- write_unlock(&fscache_cookies_lock);
- return cookie;
-
-nomem:
- fscache_free_cookie(cookie);
- return NULL;
-}
-
-/*
- * Attempt to insert the new cookie into the hash. If there's a collision, we
- * return the old cookie if it's not in use and an error otherwise.
- */
-struct fscache_cookie *fscache_hash_cookie(struct fscache_cookie *candidate)
-{
- struct fscache_cookie *cursor;
- struct hlist_bl_head *h;
- struct hlist_bl_node *p;
- unsigned int bucket;
-
- bucket = candidate->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
- h = &fscache_cookie_hash[bucket];
-
- hlist_bl_lock(h);
- hlist_bl_for_each_entry(cursor, p, h, hash_link) {
- if (fscache_compare_cookie(candidate, cursor) == 0)
- goto collision;
- }
-
- __set_bit(FSCACHE_COOKIE_ACQUIRED, &candidate->flags);
- fscache_cookie_get(candidate->parent, fscache_cookie_get_acquire_parent);
- atomic_inc(&candidate->parent->n_children);
- hlist_bl_add_head(&candidate->hash_link, h);
- hlist_bl_unlock(h);
- return candidate;
-
-collision:
- if (test_and_set_bit(FSCACHE_COOKIE_ACQUIRED, &cursor->flags)) {
- trace_fscache_cookie(cursor->debug_id, refcount_read(&cursor->ref),
- fscache_cookie_collision);
- pr_err("Duplicate cookie detected\n");
- fscache_print_cookie(cursor, 'O');
- fscache_print_cookie(candidate, 'N');
- hlist_bl_unlock(h);
- return NULL;
- }
-
- fscache_cookie_get(cursor, fscache_cookie_get_reacquire);
- hlist_bl_unlock(h);
- return cursor;
-}
-
-/*
- * request a cookie to represent an object (index, datafile, xattr, etc)
- * - parent specifies the parent object
- * - the top level index cookie for each netfs is stored in the fscache_netfs
- * struct upon registration
- * - def points to the definition
- * - the netfs_data will be passed to the functions pointed to in *def
- * - all attached caches will be searched to see if they contain this object
- * - index objects aren't stored on disk until there's a dependent file that
- * needs storing
- * - other objects are stored in a selected cache immediately, and all the
- * indices forming the path to it are instantiated if necessary
- * - we never let on to the netfs about errors
- * - we may set a negative cookie pointer, but that's okay
- */
-struct fscache_cookie *__fscache_acquire_cookie(
- struct fscache_cookie *parent,
- const struct fscache_cookie_def *def,
- const void *index_key, size_t index_key_len,
- const void *aux_data, size_t aux_data_len,
- void *netfs_data,
- loff_t object_size,
- bool enable)
-{
- struct fscache_cookie *candidate, *cookie;
-
- BUG_ON(!def);
-
- _enter("{%s},{%s},%p,%u",
- parent ? (char *) parent->def->name : "<no-parent>",
- def->name, netfs_data, enable);
-
- if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
- return NULL;
- if (!aux_data || !aux_data_len) {
- aux_data = NULL;
- aux_data_len = 0;
- }
-
- fscache_stat(&fscache_n_acquires);
-
- /* if there's no parent cookie, then we don't create one here either */
- if (!parent) {
- fscache_stat(&fscache_n_acquires_null);
- _leave(" [no parent]");
- return NULL;
- }
-
- /* validate the definition */
- BUG_ON(!def->name[0]);
-
- BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX &&
- parent->type != FSCACHE_COOKIE_TYPE_INDEX);
-
- candidate = fscache_alloc_cookie(parent, def,
- index_key, index_key_len,
- aux_data, aux_data_len,
- netfs_data, object_size);
- if (!candidate) {
- fscache_stat(&fscache_n_acquires_oom);
- _leave(" [ENOMEM]");
- return NULL;
- }
-
- cookie = fscache_hash_cookie(candidate);
- if (!cookie) {
- trace_fscache_cookie(candidate->debug_id, 1,
- fscache_cookie_discard);
- goto out;
- }
-
- if (cookie == candidate)
- candidate = NULL;
-
- switch (cookie->type) {
- case FSCACHE_COOKIE_TYPE_INDEX:
- fscache_stat(&fscache_n_cookie_index);
- break;
- case FSCACHE_COOKIE_TYPE_DATAFILE:
- fscache_stat(&fscache_n_cookie_data);
- break;
- default:
- fscache_stat(&fscache_n_cookie_special);
- break;
- }
-
- trace_fscache_acquire(cookie);
-
- if (enable) {
- /* if the object is an index then we need do nothing more here
- * - we create indices on disk when we need them as an index
- * may exist in multiple caches */
- if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) {
- if (fscache_acquire_non_index_cookie(cookie, object_size) == 0) {
- set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
- } else {
- atomic_dec(&parent->n_children);
- fscache_cookie_put(cookie,
- fscache_cookie_put_acquire_nobufs);
- fscache_stat(&fscache_n_acquires_nobufs);
- _leave(" = NULL");
- return NULL;
- }
- } else {
- set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
- }
- }
-
- fscache_stat(&fscache_n_acquires_ok);
-
-out:
- fscache_free_cookie(candidate);
- return cookie;
-}
-EXPORT_SYMBOL(__fscache_acquire_cookie);
-
-/*
- * Enable a cookie to permit it to accept new operations.
- */
-void __fscache_enable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- loff_t object_size,
- bool (*can_enable)(void *data),
- void *data)
-{
- _enter("%x", cookie->debug_id);
-
- trace_fscache_enable(cookie);
-
- wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
- TASK_UNINTERRUPTIBLE);
-
- fscache_update_aux(cookie, aux_data);
-
- if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
- goto out_unlock;
-
- if (can_enable && !can_enable(data)) {
- /* The netfs decided it didn't want to enable after all */
- } else if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) {
- /* Wait for outstanding disablement to complete */
- __fscache_wait_on_invalidate(cookie);
-
- if (fscache_acquire_non_index_cookie(cookie, object_size) == 0)
- set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
- } else {
- set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
- }
-
-out_unlock:
- clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
-}
-EXPORT_SYMBOL(__fscache_enable_cookie);
-
-/*
- * acquire a non-index cookie
- * - this must make sure the index chain is instantiated and instantiate the
- * object representation too
- */
-static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie,
- loff_t object_size)
-{
- struct fscache_object *object;
- struct fscache_cache *cache;
- int ret;
-
- _enter("");
-
- set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
-
- /* now we need to see whether the backing objects for this cookie yet
- * exist, if not there'll be nothing to search */
- down_read(&fscache_addremove_sem);
-
- if (list_empty(&fscache_cache_list)) {
- up_read(&fscache_addremove_sem);
- _leave(" = 0 [no caches]");
- return 0;
- }
-
- /* select a cache in which to store the object */
- cache = fscache_select_cache_for_object(cookie->parent);
- if (!cache) {
- up_read(&fscache_addremove_sem);
- fscache_stat(&fscache_n_acquires_no_cache);
- _leave(" = -ENOMEDIUM [no cache]");
- return -ENOMEDIUM;
- }
-
- _debug("cache %s", cache->tag->name);
-
- set_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
-
- /* ask the cache to allocate objects for this cookie and its parent
- * chain */
- ret = fscache_alloc_object(cache, cookie);
- if (ret < 0) {
- up_read(&fscache_addremove_sem);
- _leave(" = %d", ret);
- return ret;
- }
-
- spin_lock(&cookie->lock);
- if (hlist_empty(&cookie->backing_objects)) {
- spin_unlock(&cookie->lock);
- goto unavailable;
- }
-
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- fscache_set_store_limit(object, object_size);
-
- /* initiate the process of looking up all the objects in the chain
- * (done by fscache_initialise_object()) */
- fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD);
-
- spin_unlock(&cookie->lock);
-
- /* we may be required to wait for lookup to complete at this point */
- if (!fscache_defer_lookup) {
- wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
- TASK_UNINTERRUPTIBLE);
- if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
- goto unavailable;
- }
-
- up_read(&fscache_addremove_sem);
- _leave(" = 0 [deferred]");
- return 0;
-
-unavailable:
- up_read(&fscache_addremove_sem);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-}
-
-/*
- * recursively allocate cache object records for a cookie/cache combination
- * - caller must be holding the addremove sem
- */
-static int fscache_alloc_object(struct fscache_cache *cache,
- struct fscache_cookie *cookie)
-{
- struct fscache_object *object;
- int ret;
-
- _enter("%s,%x{%s}", cache->tag->name, cookie->debug_id, cookie->def->name);
-
- spin_lock(&cookie->lock);
- hlist_for_each_entry(object, &cookie->backing_objects,
- cookie_link) {
- if (object->cache == cache)
- goto object_already_extant;
- }
- spin_unlock(&cookie->lock);
-
- /* ask the cache to allocate an object (we may end up with duplicate
- * objects at this stage, but we sort that out later) */
- fscache_stat(&fscache_n_cop_alloc_object);
- object = cache->ops->alloc_object(cache, cookie);
- fscache_stat_d(&fscache_n_cop_alloc_object);
- if (IS_ERR(object)) {
- fscache_stat(&fscache_n_object_no_alloc);
- ret = PTR_ERR(object);
- goto error;
- }
-
- ASSERTCMP(object->cookie, ==, cookie);
- fscache_stat(&fscache_n_object_alloc);
-
- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
-
- _debug("ALLOC OBJ%x: %s {%lx}",
- object->debug_id, cookie->def->name, object->events);
-
- ret = fscache_alloc_object(cache, cookie->parent);
- if (ret < 0)
- goto error_put;
-
- /* only attach if we managed to allocate all we needed, otherwise
- * discard the object we just allocated and instead use the one
- * attached to the cookie */
- if (fscache_attach_object(cookie, object) < 0) {
- fscache_stat(&fscache_n_cop_put_object);
- cache->ops->put_object(object, fscache_obj_put_attach_fail);
- fscache_stat_d(&fscache_n_cop_put_object);
- }
-
- _leave(" = 0");
- return 0;
-
-object_already_extant:
- ret = -ENOBUFS;
- if (fscache_object_is_dying(object) ||
- fscache_cache_is_broken(object)) {
- spin_unlock(&cookie->lock);
- goto error;
- }
- spin_unlock(&cookie->lock);
- _leave(" = 0 [found]");
- return 0;
-
-error_put:
- fscache_stat(&fscache_n_cop_put_object);
- cache->ops->put_object(object, fscache_obj_put_alloc_fail);
- fscache_stat_d(&fscache_n_cop_put_object);
-error:
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * attach a cache object to a cookie
- */
-static int fscache_attach_object(struct fscache_cookie *cookie,
- struct fscache_object *object)
-{
- struct fscache_object *p;
- struct fscache_cache *cache = object->cache;
- int ret;
-
- _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
-
- ASSERTCMP(object->cookie, ==, cookie);
-
- spin_lock(&cookie->lock);
-
- /* there may be multiple initial creations of this object, but we only
- * want one */
- ret = -EEXIST;
- hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
- if (p->cache == object->cache) {
- if (fscache_object_is_dying(p))
- ret = -ENOBUFS;
- goto cant_attach_object;
- }
- }
-
- /* pin the parent object */
- spin_lock_nested(&cookie->parent->lock, 1);
- hlist_for_each_entry(p, &cookie->parent->backing_objects,
- cookie_link) {
- if (p->cache == object->cache) {
- if (fscache_object_is_dying(p)) {
- ret = -ENOBUFS;
- spin_unlock(&cookie->parent->lock);
- goto cant_attach_object;
- }
- object->parent = p;
- spin_lock(&p->lock);
- p->n_children++;
- spin_unlock(&p->lock);
- break;
- }
- }
- spin_unlock(&cookie->parent->lock);
-
- /* attach to the cache's object list */
- if (list_empty(&object->cache_link)) {
- spin_lock(&cache->object_list_lock);
- list_add(&object->cache_link, &cache->object_list);
- spin_unlock(&cache->object_list_lock);
- }
-
- /* Attach to the cookie. The object already has a ref on it. */
- hlist_add_head(&object->cookie_link, &cookie->backing_objects);
- ret = 0;
-
-cant_attach_object:
- spin_unlock(&cookie->lock);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * Invalidate an object. Callable with spinlocks held.
- */
-void __fscache_invalidate(struct fscache_cookie *cookie)
-{
- struct fscache_object *object;
-
- _enter("{%s}", cookie->def->name);
-
- fscache_stat(&fscache_n_invalidates);
-
- /* Only permit invalidation of data files. Invalidating an index will
- * require the caller to release all its attachments to the tree rooted
- * there, and if it's doing that, it may as well just retire the
- * cookie.
- */
- ASSERTCMP(cookie->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
-
- /* If there's an object, we tell the object state machine to handle the
- * invalidation on our behalf, otherwise there's nothing to do.
- */
- if (!hlist_empty(&cookie->backing_objects)) {
- spin_lock(&cookie->lock);
-
- if (fscache_cookie_enabled(cookie) &&
- !hlist_empty(&cookie->backing_objects) &&
- !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
- &cookie->flags)) {
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object,
- cookie_link);
- if (fscache_object_is_live(object))
- fscache_raise_event(
- object, FSCACHE_OBJECT_EV_INVALIDATE);
- }
-
- spin_unlock(&cookie->lock);
- }
-
- _leave("");
-}
-EXPORT_SYMBOL(__fscache_invalidate);
-
-/*
- * Wait for object invalidation to complete.
- */
-void __fscache_wait_on_invalidate(struct fscache_cookie *cookie)
-{
- _enter("%x", cookie->debug_id);
-
- wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING,
- TASK_UNINTERRUPTIBLE);
-
- _leave("");
-}
-EXPORT_SYMBOL(__fscache_wait_on_invalidate);
-
-/*
- * update the index entries backing a cookie
- */
-void __fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data)
-{
- struct fscache_object *object;
-
- fscache_stat(&fscache_n_updates);
-
- if (!cookie) {
- fscache_stat(&fscache_n_updates_null);
- _leave(" [no cookie]");
- return;
- }
-
- _enter("{%s}", cookie->def->name);
-
- spin_lock(&cookie->lock);
-
- fscache_update_aux(cookie, aux_data);
-
- if (fscache_cookie_enabled(cookie)) {
- /* update the index entry on disk in each cache backing this
- * cookie.
- */
- hlist_for_each_entry(object,
- &cookie->backing_objects, cookie_link) {
- fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
- }
- }
-
- spin_unlock(&cookie->lock);
- _leave("");
-}
-EXPORT_SYMBOL(__fscache_update_cookie);
-
-/*
- * Disable a cookie to stop it from accepting new requests from the netfs.
- */
-void __fscache_disable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool invalidate)
-{
- struct fscache_object *object;
- bool awaken = false;
-
- _enter("%x,%u", cookie->debug_id, invalidate);
-
- trace_fscache_disable(cookie);
-
- ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
-
- if (atomic_read(&cookie->n_children) != 0) {
- pr_err("Cookie '%s' still has children\n",
- cookie->def->name);
- BUG();
- }
-
- wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
- TASK_UNINTERRUPTIBLE);
-
- fscache_update_aux(cookie, aux_data);
-
- if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
- goto out_unlock_enable;
-
- /* If the cookie is being invalidated, wait for that to complete first
- * so that we can reuse the flag.
- */
- __fscache_wait_on_invalidate(cookie);
-
- /* Dispose of the backing objects */
- set_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags);
-
- spin_lock(&cookie->lock);
- if (!hlist_empty(&cookie->backing_objects)) {
- hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
- if (invalidate)
- set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
- clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
- fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
- }
- } else {
- if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
- awaken = true;
- }
- spin_unlock(&cookie->lock);
- if (awaken)
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
-
- /* Wait for cessation of activity requiring access to the netfs (when
- * n_active reaches 0). This makes sure outstanding reads and writes
- * have completed.
- */
- if (!atomic_dec_and_test(&cookie->n_active)) {
- wait_var_event(&cookie->n_active,
- !atomic_read(&cookie->n_active));
- }
-
- /* Make sure any pending writes are cancelled. */
- if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX)
- fscache_invalidate_writes(cookie);
-
- /* Reset the cookie state if it wasn't relinquished */
- if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
- atomic_inc(&cookie->n_active);
- set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
- }
-
-out_unlock_enable:
- clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
- _leave("");
-}
-EXPORT_SYMBOL(__fscache_disable_cookie);
-
-/*
- * release a cookie back to the cache
- * - the object will be marked as recyclable on disk if retire is true
- * - all dependents of this cookie must have already been unregistered
- * (indices/files/pages)
- */
-void __fscache_relinquish_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool retire)
-{
- fscache_stat(&fscache_n_relinquishes);
- if (retire)
- fscache_stat(&fscache_n_relinquishes_retire);
-
- if (!cookie) {
- fscache_stat(&fscache_n_relinquishes_null);
- _leave(" [no cookie]");
- return;
- }
-
- _enter("%x{%s,%d},%d",
- cookie->debug_id, cookie->def->name,
- atomic_read(&cookie->n_active), retire);
-
- trace_fscache_relinquish(cookie, retire);
-
- /* No further netfs-accessing operations on this cookie permitted */
- if (test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags))
- BUG();
-
- __fscache_disable_cookie(cookie, aux_data, retire);
-
- /* Clear pointers back to the netfs */
- cookie->netfs_data = NULL;
- cookie->def = NULL;
- BUG_ON(!radix_tree_empty(&cookie->stores));
-
- if (cookie->parent) {
- ASSERTCMP(refcount_read(&cookie->parent->ref), >, 0);
- ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0);
- atomic_dec(&cookie->parent->n_children);
- }
-
- /* Dispose of the netfs's link to the cookie */
- fscache_cookie_put(cookie, fscache_cookie_put_relinquish);
-
- _leave("");
-}
-EXPORT_SYMBOL(__fscache_relinquish_cookie);
-
-/*
- * Remove a cookie from the hash table.
- */
-static void fscache_unhash_cookie(struct fscache_cookie *cookie)
-{
- struct hlist_bl_head *h;
- unsigned int bucket;
-
- bucket = cookie->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
- h = &fscache_cookie_hash[bucket];
-
- hlist_bl_lock(h);
- hlist_bl_del(&cookie->hash_link);
- hlist_bl_unlock(h);
-}
-
-/*
- * Drop a reference to a cookie.
- */
-void fscache_cookie_put(struct fscache_cookie *cookie,
- enum fscache_cookie_trace where)
-{
- struct fscache_cookie *parent;
- int ref;
-
- _enter("%x", cookie->debug_id);
-
- do {
- unsigned int cookie_debug_id = cookie->debug_id;
- bool zero = __refcount_dec_and_test(&cookie->ref, &ref);
-
- trace_fscache_cookie(cookie_debug_id, ref - 1, where);
- if (!zero)
- return;
-
- parent = cookie->parent;
- fscache_unhash_cookie(cookie);
- fscache_free_cookie(cookie);
-
- cookie = parent;
- where = fscache_cookie_put_parent;
- } while (cookie);
-
- _leave("");
-}
-
-/*
- * Get a reference to a cookie.
- */
-struct fscache_cookie *fscache_cookie_get(struct fscache_cookie *cookie,
- enum fscache_cookie_trace where)
-{
- int ref;
-
- __refcount_inc(&cookie->ref, &ref);
- trace_fscache_cookie(cookie->debug_id, ref + 1, where);
- return cookie;
-}
-
-/*
- * check the consistency between the netfs inode and the backing cache
- *
- * NOTE: it only serves no-index type
- */
-int __fscache_check_consistency(struct fscache_cookie *cookie,
- const void *aux_data)
-{
- struct fscache_operation *op;
- struct fscache_object *object;
- bool wake_cookie = false;
- int ret;
-
- _enter("%p,", cookie);
-
- ASSERTCMP(cookie->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
-
- if (fscache_wait_for_deferred_lookup(cookie) < 0)
- return -ERESTARTSYS;
-
- if (hlist_empty(&cookie->backing_objects))
- return 0;
-
- op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
- if (!op)
- return -ENOMEM;
-
- fscache_operation_init(cookie, op, NULL, NULL, NULL);
- op->flags = FSCACHE_OP_MYTHREAD |
- (1 << FSCACHE_OP_WAITING) |
- (1 << FSCACHE_OP_UNUSE_COOKIE);
- trace_fscache_page_op(cookie, NULL, op, fscache_page_op_check_consistency);
-
- spin_lock(&cookie->lock);
-
- fscache_update_aux(cookie, aux_data);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto inconsistent;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
- if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
- goto inconsistent;
-
- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
-
- __fscache_use_cookie(cookie);
- if (fscache_submit_op(object, op) < 0)
- goto submit_failed;
-
- /* the work queue now carries its own ref on the object */
- spin_unlock(&cookie->lock);
-
- ret = fscache_wait_for_operation_activation(object, op, NULL, NULL);
- if (ret == 0) {
- /* ask the cache to honour the operation */
- ret = object->cache->ops->check_consistency(op);
- fscache_op_complete(op, false);
- } else if (ret == -ENOBUFS) {
- ret = 0;
- }
-
- fscache_put_operation(op);
- _leave(" = %d", ret);
- return ret;
-
-submit_failed:
- wake_cookie = __fscache_unuse_cookie(cookie);
-inconsistent:
- spin_unlock(&cookie->lock);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
- kfree(op);
- _leave(" = -ESTALE");
- return -ESTALE;
-}
-EXPORT_SYMBOL(__fscache_check_consistency);
-
-/*
- * Generate a list of extant cookies in /proc/fs/fscache/cookies
- */
-static int fscache_cookies_seq_show(struct seq_file *m, void *v)
-{
- struct fscache_cookie *cookie;
- unsigned int keylen = 0, auxlen = 0;
- char _type[3], *type;
- u8 *p;
-
- if (v == &fscache_cookies) {
- seq_puts(m,
- "COOKIE PARENT USAGE CHILD ACT TY FL DEF NETFS_DATA\n"
- "======== ======== ===== ===== === == === ================ ==========\n"
- );
- return 0;
- }
-
- cookie = list_entry(v, struct fscache_cookie, proc_link);
-
- switch (cookie->type) {
- case 0:
- type = "IX";
- break;
- case 1:
- type = "DT";
- break;
- default:
- snprintf(_type, sizeof(_type), "%02u",
- cookie->type);
- type = _type;
- break;
- }
-
- seq_printf(m,
- "%08x %08x %5u %5u %3u %s %03lx %-16s %px",
- cookie->debug_id,
- cookie->parent ? cookie->parent->debug_id : 0,
- refcount_read(&cookie->ref),
- atomic_read(&cookie->n_children),
- atomic_read(&cookie->n_active),
- type,
- cookie->flags,
- cookie->def->name,
- cookie->netfs_data);
-
- keylen = cookie->key_len;
- auxlen = cookie->aux_len;
-
- if (keylen > 0 || auxlen > 0) {
- seq_puts(m, " ");
- p = keylen <= sizeof(cookie->inline_key) ?
- cookie->inline_key : cookie->key;
- for (; keylen > 0; keylen--)
- seq_printf(m, "%02x", *p++);
- if (auxlen > 0) {
- seq_puts(m, ", ");
- p = auxlen <= sizeof(cookie->inline_aux) ?
- cookie->inline_aux : cookie->aux;
- for (; auxlen > 0; auxlen--)
- seq_printf(m, "%02x", *p++);
- }
- }
-
- seq_puts(m, "\n");
- return 0;
-}
-
-static void *fscache_cookies_seq_start(struct seq_file *m, loff_t *_pos)
- __acquires(fscache_cookies_lock)
-{
- read_lock(&fscache_cookies_lock);
- return seq_list_start_head(&fscache_cookies, *_pos);
-}
-
-static void *fscache_cookies_seq_next(struct seq_file *m, void *v, loff_t *_pos)
-{
- return seq_list_next(v, &fscache_cookies, _pos);
-}
-
-static void fscache_cookies_seq_stop(struct seq_file *m, void *v)
- __releases(rcu)
-{
- read_unlock(&fscache_cookies_lock);
-}
-
-
-const struct seq_operations fscache_cookies_seq_ops = {
- .start = fscache_cookies_seq_start,
- .next = fscache_cookies_seq_next,
- .stop = fscache_cookies_seq_stop,
- .show = fscache_cookies_seq_show,
-};
diff --git a/fs/fscache/fsdef.c b/fs/fscache/fsdef.c
deleted file mode 100644
index 0402673c680e..000000000000
--- a/fs/fscache/fsdef.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Filesystem index definition
- *
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL CACHE
-#include <linux/module.h>
-#include "internal.h"
-
-static
-enum fscache_checkaux fscache_fsdef_netfs_check_aux(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size);
-
-/*
- * The root index is owned by FS-Cache itself.
- *
- * When a netfs requests caching facilities, FS-Cache will, if one doesn't
- * already exist, create an entry in the root index with the key being the name
- * of the netfs ("AFS" for example), and the auxiliary data holding the index
- * structure version supplied by the netfs:
- *
- * FSDEF
- * |
- * +-----------+
- * | |
- * NFS AFS
- * [v=1] [v=1]
- *
- * If an entry with the appropriate name does already exist, the version is
- * compared. If the version is different, the entire subtree from that entry
- * will be discarded and a new entry created.
- *
- * The new entry will be an index, and a cookie referring to it will be passed
- * to the netfs. This is then the root handle by which the netfs accesses the
- * cache. It can create whatever objects it likes in that index, including
- * further indices.
- */
-static struct fscache_cookie_def fscache_fsdef_index_def = {
- .name = ".FS-Cache",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-struct fscache_cookie fscache_fsdef_index = {
- .debug_id = 1,
- .ref = REFCOUNT_INIT(1),
- .n_active = ATOMIC_INIT(1),
- .lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock),
- .backing_objects = HLIST_HEAD_INIT,
- .def = &fscache_fsdef_index_def,
- .flags = 1 << FSCACHE_COOKIE_ENABLED,
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-EXPORT_SYMBOL(fscache_fsdef_index);
-
-/*
- * Definition of an entry in the root index. Each entry is an index, keyed to
- * a specific netfs and only applicable to a particular version of the index
- * structure used by that netfs.
- */
-struct fscache_cookie_def fscache_fsdef_netfs_def = {
- .name = "FSDEF.netfs",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
- .check_aux = fscache_fsdef_netfs_check_aux,
-};
-
-/*
- * check that the index structure version number stored in the auxiliary data
- * matches the one the netfs gave us
- */
-static enum fscache_checkaux fscache_fsdef_netfs_check_aux(
- void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size)
-{
- struct fscache_netfs *netfs = cookie_netfs_data;
- uint32_t version;
-
- _enter("{%s},,%hu", netfs->name, datalen);
-
- if (datalen != sizeof(version)) {
- _leave(" = OBSOLETE [dl=%d v=%zu]", datalen, sizeof(version));
- return FSCACHE_CHECKAUX_OBSOLETE;
- }
-
- memcpy(&version, data, sizeof(version));
- if (version != netfs->version) {
- _leave(" = OBSOLETE [ver=%x net=%x]", version, netfs->version);
- return FSCACHE_CHECKAUX_OBSOLETE;
- }
-
- _leave(" = OKAY");
- return FSCACHE_CHECKAUX_OKAY;
-}
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
deleted file mode 100644
index c3e4804b8fcb..000000000000
--- a/fs/fscache/internal.h
+++ /dev/null
@@ -1,461 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Internal definitions for FS-Cache
- *
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-/*
- * Lock order, in the order in which multiple locks should be obtained:
- * - fscache_addremove_sem
- * - cookie->lock
- * - cookie->parent->lock
- * - cache->object_list_lock
- * - object->lock
- * - object->parent->lock
- * - cookie->stores_lock
- * - fscache_thread_lock
- *
- */
-
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) "FS-Cache: " fmt
-
-#include <linux/fscache-cache.h>
-#include <trace/events/fscache.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-
-#define FSCACHE_MIN_THREADS 4
-#define FSCACHE_MAX_THREADS 32
-
-/*
- * cache.c
- */
-extern struct list_head fscache_cache_list;
-extern struct rw_semaphore fscache_addremove_sem;
-
-extern struct fscache_cache *fscache_select_cache_for_object(
- struct fscache_cookie *);
-
-/*
- * cookie.c
- */
-extern struct kmem_cache *fscache_cookie_jar;
-extern const struct seq_operations fscache_cookies_seq_ops;
-
-extern void fscache_free_cookie(struct fscache_cookie *);
-extern struct fscache_cookie *fscache_alloc_cookie(struct fscache_cookie *,
- const struct fscache_cookie_def *,
- const void *, size_t,
- const void *, size_t,
- void *, loff_t);
-extern struct fscache_cookie *fscache_hash_cookie(struct fscache_cookie *);
-extern struct fscache_cookie *fscache_cookie_get(struct fscache_cookie *,
- enum fscache_cookie_trace);
-extern void fscache_cookie_put(struct fscache_cookie *,
- enum fscache_cookie_trace);
-
-static inline void fscache_cookie_see(struct fscache_cookie *cookie,
- enum fscache_cookie_trace where)
-{
- trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
- where);
-}
-
-/*
- * fsdef.c
- */
-extern struct fscache_cookie fscache_fsdef_index;
-extern struct fscache_cookie_def fscache_fsdef_netfs_def;
-
-/*
- * main.c
- */
-extern unsigned fscache_defer_lookup;
-extern unsigned fscache_defer_create;
-extern unsigned fscache_debug;
-extern struct kobject *fscache_root;
-extern struct workqueue_struct *fscache_object_wq;
-extern struct workqueue_struct *fscache_op_wq;
-DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
-
-extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n);
-
-static inline bool fscache_object_congested(void)
-{
- return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq);
-}
-
-/*
- * object.c
- */
-extern void fscache_enqueue_object(struct fscache_object *);
-
-/*
- * operation.c
- */
-extern int fscache_submit_exclusive_op(struct fscache_object *,
- struct fscache_operation *);
-extern int fscache_submit_op(struct fscache_object *,
- struct fscache_operation *);
-extern int fscache_cancel_op(struct fscache_operation *, bool);
-extern void fscache_cancel_all_ops(struct fscache_object *);
-extern void fscache_abort_object(struct fscache_object *);
-extern void fscache_start_operations(struct fscache_object *);
-extern void fscache_operation_gc(struct work_struct *);
-
-/*
- * page.c
- */
-extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
-extern int fscache_wait_for_operation_activation(struct fscache_object *,
- struct fscache_operation *,
- atomic_t *,
- atomic_t *);
-extern void fscache_invalidate_writes(struct fscache_cookie *);
-struct fscache_retrieval *fscache_alloc_retrieval(struct fscache_cookie *cookie,
- struct address_space *mapping,
- fscache_rw_complete_t end_io_func,
- void *context);
-
-/*
- * proc.c
- */
-#ifdef CONFIG_PROC_FS
-extern int __init fscache_proc_init(void);
-extern void fscache_proc_cleanup(void);
-#else
-#define fscache_proc_init() (0)
-#define fscache_proc_cleanup() do {} while (0)
-#endif
-
-/*
- * stats.c
- */
-#ifdef CONFIG_FSCACHE_STATS
-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
-
-extern atomic_t fscache_n_op_pend;
-extern atomic_t fscache_n_op_run;
-extern atomic_t fscache_n_op_enqueue;
-extern atomic_t fscache_n_op_deferred_release;
-extern atomic_t fscache_n_op_initialised;
-extern atomic_t fscache_n_op_release;
-extern atomic_t fscache_n_op_gc;
-extern atomic_t fscache_n_op_cancelled;
-extern atomic_t fscache_n_op_rejected;
-
-extern atomic_t fscache_n_attr_changed;
-extern atomic_t fscache_n_attr_changed_ok;
-extern atomic_t fscache_n_attr_changed_nobufs;
-extern atomic_t fscache_n_attr_changed_nomem;
-extern atomic_t fscache_n_attr_changed_calls;
-
-extern atomic_t fscache_n_allocs;
-extern atomic_t fscache_n_allocs_ok;
-extern atomic_t fscache_n_allocs_wait;
-extern atomic_t fscache_n_allocs_nobufs;
-extern atomic_t fscache_n_allocs_intr;
-extern atomic_t fscache_n_allocs_object_dead;
-extern atomic_t fscache_n_alloc_ops;
-extern atomic_t fscache_n_alloc_op_waits;
-
-extern atomic_t fscache_n_retrievals;
-extern atomic_t fscache_n_retrievals_ok;
-extern atomic_t fscache_n_retrievals_wait;
-extern atomic_t fscache_n_retrievals_nodata;
-extern atomic_t fscache_n_retrievals_nobufs;
-extern atomic_t fscache_n_retrievals_intr;
-extern atomic_t fscache_n_retrievals_nomem;
-extern atomic_t fscache_n_retrievals_object_dead;
-extern atomic_t fscache_n_retrieval_ops;
-extern atomic_t fscache_n_retrieval_op_waits;
-
-extern atomic_t fscache_n_stores;
-extern atomic_t fscache_n_stores_ok;
-extern atomic_t fscache_n_stores_again;
-extern atomic_t fscache_n_stores_nobufs;
-extern atomic_t fscache_n_stores_oom;
-extern atomic_t fscache_n_store_ops;
-extern atomic_t fscache_n_store_calls;
-extern atomic_t fscache_n_store_pages;
-extern atomic_t fscache_n_store_radix_deletes;
-extern atomic_t fscache_n_store_pages_over_limit;
-
-extern atomic_t fscache_n_store_vmscan_not_storing;
-extern atomic_t fscache_n_store_vmscan_gone;
-extern atomic_t fscache_n_store_vmscan_busy;
-extern atomic_t fscache_n_store_vmscan_cancelled;
-extern atomic_t fscache_n_store_vmscan_wait;
-
-extern atomic_t fscache_n_marks;
-extern atomic_t fscache_n_uncaches;
-
-extern atomic_t fscache_n_acquires;
-extern atomic_t fscache_n_acquires_null;
-extern atomic_t fscache_n_acquires_no_cache;
-extern atomic_t fscache_n_acquires_ok;
-extern atomic_t fscache_n_acquires_nobufs;
-extern atomic_t fscache_n_acquires_oom;
-
-extern atomic_t fscache_n_invalidates;
-extern atomic_t fscache_n_invalidates_run;
-
-extern atomic_t fscache_n_updates;
-extern atomic_t fscache_n_updates_null;
-extern atomic_t fscache_n_updates_run;
-
-extern atomic_t fscache_n_relinquishes;
-extern atomic_t fscache_n_relinquishes_null;
-extern atomic_t fscache_n_relinquishes_waitcrt;
-extern atomic_t fscache_n_relinquishes_retire;
-
-extern atomic_t fscache_n_cookie_index;
-extern atomic_t fscache_n_cookie_data;
-extern atomic_t fscache_n_cookie_special;
-
-extern atomic_t fscache_n_object_alloc;
-extern atomic_t fscache_n_object_no_alloc;
-extern atomic_t fscache_n_object_lookups;
-extern atomic_t fscache_n_object_lookups_negative;
-extern atomic_t fscache_n_object_lookups_positive;
-extern atomic_t fscache_n_object_lookups_timed_out;
-extern atomic_t fscache_n_object_created;
-extern atomic_t fscache_n_object_avail;
-extern atomic_t fscache_n_object_dead;
-
-extern atomic_t fscache_n_checkaux_none;
-extern atomic_t fscache_n_checkaux_okay;
-extern atomic_t fscache_n_checkaux_update;
-extern atomic_t fscache_n_checkaux_obsolete;
-
-extern atomic_t fscache_n_cop_alloc_object;
-extern atomic_t fscache_n_cop_lookup_object;
-extern atomic_t fscache_n_cop_lookup_complete;
-extern atomic_t fscache_n_cop_grab_object;
-extern atomic_t fscache_n_cop_invalidate_object;
-extern atomic_t fscache_n_cop_update_object;
-extern atomic_t fscache_n_cop_drop_object;
-extern atomic_t fscache_n_cop_put_object;
-extern atomic_t fscache_n_cop_sync_cache;
-extern atomic_t fscache_n_cop_attr_changed;
-extern atomic_t fscache_n_cop_read_or_alloc_page;
-extern atomic_t fscache_n_cop_read_or_alloc_pages;
-extern atomic_t fscache_n_cop_allocate_page;
-extern atomic_t fscache_n_cop_allocate_pages;
-extern atomic_t fscache_n_cop_write_page;
-extern atomic_t fscache_n_cop_uncache_page;
-extern atomic_t fscache_n_cop_dissociate_pages;
-
-extern atomic_t fscache_n_cache_no_space_reject;
-extern atomic_t fscache_n_cache_stale_objects;
-extern atomic_t fscache_n_cache_retired_objects;
-extern atomic_t fscache_n_cache_culled_objects;
-
-static inline void fscache_stat(atomic_t *stat)
-{
- atomic_inc(stat);
-}
-
-static inline void fscache_stat_d(atomic_t *stat)
-{
- atomic_dec(stat);
-}
-
-#define __fscache_stat(stat) (stat)
-
-int fscache_stats_show(struct seq_file *m, void *v);
-#else
-
-#define __fscache_stat(stat) (NULL)
-#define fscache_stat(stat) do {} while (0)
-#define fscache_stat_d(stat) do {} while (0)
-#endif
-
-/*
- * raise an event on an object
- * - if the event is not masked for that object, then the object is
- * queued for attention by the thread pool.
- */
-static inline void fscache_raise_event(struct fscache_object *object,
- unsigned event)
-{
- BUG_ON(event >= NR_FSCACHE_OBJECT_EVENTS);
-#if 0
- printk("*** fscache_raise_event(OBJ%d{%lx},%x)\n",
- object->debug_id, object->event_mask, (1 << event));
-#endif
- if (!test_and_set_bit(event, &object->events) &&
- test_bit(event, &object->event_mask))
- fscache_enqueue_object(object);
-}
-
-/*
- * get an extra reference to a netfs retrieval context
- */
-static inline
-void *fscache_get_context(struct fscache_cookie *cookie, void *context)
-{
- if (cookie->def->get_context)
- cookie->def->get_context(cookie->netfs_data, context);
- return context;
-}
-
-/*
- * release a reference to a netfs retrieval context
- */
-static inline
-void fscache_put_context(struct fscache_cookie *cookie, void *context)
-{
- if (cookie->def->put_context)
- cookie->def->put_context(cookie->netfs_data, context);
-}
-
-/*
- * Update the auxiliary data on a cookie.
- */
-static inline
-void fscache_update_aux(struct fscache_cookie *cookie, const void *aux_data)
-{
- void *p;
-
- if (!aux_data)
- return;
- if (cookie->aux_len <= sizeof(cookie->inline_aux))
- p = cookie->inline_aux;
- else
- p = cookie->aux;
-
- if (memcmp(p, aux_data, cookie->aux_len) != 0) {
- memcpy(p, aux_data, cookie->aux_len);
- set_bit(FSCACHE_COOKIE_AUX_UPDATED, &cookie->flags);
- }
-}
-
-/*****************************************************************************/
-/*
- * debug tracing
- */
-#define dbgprintk(FMT, ...) \
- printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
-
-#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
-#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
-#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
-
-#define kjournal(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
-
-#ifdef __KDEBUG
-#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
-#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
-#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
-
-#elif defined(CONFIG_FSCACHE_DEBUG)
-#define _enter(FMT, ...) \
-do { \
- if (__do_kdebug(ENTER)) \
- kenter(FMT, ##__VA_ARGS__); \
-} while (0)
-
-#define _leave(FMT, ...) \
-do { \
- if (__do_kdebug(LEAVE)) \
- kleave(FMT, ##__VA_ARGS__); \
-} while (0)
-
-#define _debug(FMT, ...) \
-do { \
- if (__do_kdebug(DEBUG)) \
- kdebug(FMT, ##__VA_ARGS__); \
-} while (0)
-
-#else
-#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
-#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
-#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
-#endif
-
-/*
- * determine whether a particular optional debugging point should be logged
- * - we need to go through three steps to persuade cpp to correctly join the
- * shorthand in FSCACHE_DEBUG_LEVEL with its prefix
- */
-#define ____do_kdebug(LEVEL, POINT) \
- unlikely((fscache_debug & \
- (FSCACHE_POINT_##POINT << (FSCACHE_DEBUG_ ## LEVEL * 3))))
-#define ___do_kdebug(LEVEL, POINT) \
- ____do_kdebug(LEVEL, POINT)
-#define __do_kdebug(POINT) \
- ___do_kdebug(FSCACHE_DEBUG_LEVEL, POINT)
-
-#define FSCACHE_DEBUG_CACHE 0
-#define FSCACHE_DEBUG_COOKIE 1
-#define FSCACHE_DEBUG_PAGE 2
-#define FSCACHE_DEBUG_OPERATION 3
-
-#define FSCACHE_POINT_ENTER 1
-#define FSCACHE_POINT_LEAVE 2
-#define FSCACHE_POINT_DEBUG 4
-
-#ifndef FSCACHE_DEBUG_LEVEL
-#define FSCACHE_DEBUG_LEVEL CACHE
-#endif
-
-/*
- * assertions
- */
-#if 1 /* defined(__KDEBUGALL) */
-
-#define ASSERT(X) \
-do { \
- if (unlikely(!(X))) { \
- pr_err("\n"); \
- pr_err("Assertion failed\n"); \
- BUG(); \
- } \
-} while (0)
-
-#define ASSERTCMP(X, OP, Y) \
-do { \
- if (unlikely(!((X) OP (Y)))) { \
- pr_err("\n"); \
- pr_err("Assertion failed\n"); \
- pr_err("%lx " #OP " %lx is false\n", \
- (unsigned long)(X), (unsigned long)(Y)); \
- BUG(); \
- } \
-} while (0)
-
-#define ASSERTIF(C, X) \
-do { \
- if (unlikely((C) && !(X))) { \
- pr_err("\n"); \
- pr_err("Assertion failed\n"); \
- BUG(); \
- } \
-} while (0)
-
-#define ASSERTIFCMP(C, X, OP, Y) \
-do { \
- if (unlikely((C) && !((X) OP (Y)))) { \
- pr_err("\n"); \
- pr_err("Assertion failed\n"); \
- pr_err("%lx " #OP " %lx is false\n", \
- (unsigned long)(X), (unsigned long)(Y)); \
- BUG(); \
- } \
-} while (0)
-
-#else
-
-#define ASSERT(X) do {} while (0)
-#define ASSERTCMP(X, OP, Y) do {} while (0)
-#define ASSERTIF(C, X) do {} while (0)
-#define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
-
-#endif /* assert or not */
diff --git a/fs/fscache/io.c b/fs/fscache/io.c
deleted file mode 100644
index 8ecc1141802f..000000000000
--- a/fs/fscache/io.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Cache data I/O routines
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL PAGE
-#include <linux/module.h>
-#define FSCACHE_USE_NEW_IO_API
-#include <linux/fscache-cache.h>
-#include <linux/slab.h>
-#include <linux/netfs.h>
-#include "internal.h"
-
-/*
- * Start a cache read operation.
- * - we return:
- * -ENOMEM - out of memory, some pages may be being read
- * -ERESTARTSYS - interrupted, some pages may be being read
- * -ENOBUFS - no backing object or space available in which to cache any
- * pages not being read
- * -ENODATA - no data available in the backing object for some or all of
- * the pages
- * 0 - dispatched a read on all pages
- */
-int __fscache_begin_read_operation(struct netfs_read_request *rreq,
- struct fscache_cookie *cookie)
-{
- struct fscache_retrieval *op;
- struct fscache_object *object;
- bool wake_cookie = false;
- int ret;
-
- _enter("rr=%08x", rreq->debug_id);
-
- fscache_stat(&fscache_n_retrievals);
-
- if (hlist_empty(&cookie->backing_objects))
- goto nobufs;
-
- if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
- _leave(" = -ENOBUFS [invalidating]");
- return -ENOBUFS;
- }
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
-
- if (fscache_wait_for_deferred_lookup(cookie) < 0)
- return -ERESTARTSYS;
-
- op = fscache_alloc_retrieval(cookie, NULL, NULL, NULL);
- if (!op)
- return -ENOMEM;
- trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi);
-
- spin_lock(&cookie->lock);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto nobufs_unlock;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- __fscache_use_cookie(cookie);
- atomic_inc(&object->n_reads);
- __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
-
- if (fscache_submit_op(object, &op->op) < 0)
- goto nobufs_unlock_dec;
- spin_unlock(&cookie->lock);
-
- fscache_stat(&fscache_n_retrieval_ops);
-
- /* we wait for the operation to become active, and then process it
- * *here*, in this thread, and not in the thread pool */
- ret = fscache_wait_for_operation_activation(
- object, &op->op,
- __fscache_stat(&fscache_n_retrieval_op_waits),
- __fscache_stat(&fscache_n_retrievals_object_dead));
- if (ret < 0)
- goto error;
-
- /* ask the cache to honour the operation */
- ret = object->cache->ops->begin_read_operation(rreq, op);
-
-error:
- if (ret == -ENOMEM)
- fscache_stat(&fscache_n_retrievals_nomem);
- else if (ret == -ERESTARTSYS)
- fscache_stat(&fscache_n_retrievals_intr);
- else if (ret == -ENODATA)
- fscache_stat(&fscache_n_retrievals_nodata);
- else if (ret < 0)
- fscache_stat(&fscache_n_retrievals_nobufs);
- else
- fscache_stat(&fscache_n_retrievals_ok);
-
- fscache_put_retrieval(op);
- _leave(" = %d", ret);
- return ret;
-
-nobufs_unlock_dec:
- atomic_dec(&object->n_reads);
- wake_cookie = __fscache_unuse_cookie(cookie);
-nobufs_unlock:
- spin_unlock(&cookie->lock);
- fscache_put_retrieval(op);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
-nobufs:
- fscache_stat(&fscache_n_retrievals_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(__fscache_begin_read_operation);
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
deleted file mode 100644
index 4207f98e405f..000000000000
--- a/fs/fscache/main.c
+++ /dev/null
@@ -1,230 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* General filesystem local caching manager
- *
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL CACHE
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-#define CREATE_TRACE_POINTS
-#include "internal.h"
-
-MODULE_DESCRIPTION("FS Cache Manager");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
-unsigned fscache_defer_lookup = 1;
-module_param_named(defer_lookup, fscache_defer_lookup, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(fscache_defer_lookup,
- "Defer cookie lookup to background thread");
-
-unsigned fscache_defer_create = 1;
-module_param_named(defer_create, fscache_defer_create, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(fscache_defer_create,
- "Defer cookie creation to background thread");
-
-unsigned fscache_debug;
-module_param_named(debug, fscache_debug, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(fscache_debug,
- "FS-Cache debugging mask");
-
-struct kobject *fscache_root;
-struct workqueue_struct *fscache_object_wq;
-struct workqueue_struct *fscache_op_wq;
-
-DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
-
-/* these values serve as lower bounds, will be adjusted in fscache_init() */
-static unsigned fscache_object_max_active = 4;
-static unsigned fscache_op_max_active = 2;
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *fscache_sysctl_header;
-
-static int fscache_max_active_sysctl(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- struct workqueue_struct **wqp = table->extra1;
- unsigned int *datap = table->data;
- int ret;
-
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- if (ret == 0)
- workqueue_set_max_active(*wqp, *datap);
- return ret;
-}
-
-static struct ctl_table fscache_sysctls[] = {
- {
- .procname = "object_max_active",
- .data = &fscache_object_max_active,
- .maxlen = sizeof(unsigned),
- .mode = 0644,
- .proc_handler = fscache_max_active_sysctl,
- .extra1 = &fscache_object_wq,
- },
- {
- .procname = "operation_max_active",
- .data = &fscache_op_max_active,
- .maxlen = sizeof(unsigned),
- .mode = 0644,
- .proc_handler = fscache_max_active_sysctl,
- .extra1 = &fscache_op_wq,
- },
- {}
-};
-
-static struct ctl_table fscache_sysctls_root[] = {
- {
- .procname = "fscache",
- .mode = 0555,
- .child = fscache_sysctls,
- },
- {}
-};
-#endif
-
-/*
- * Mixing scores (in bits) for (7,20):
- * Input delta: 1-bit 2-bit
- * 1 round: 330.3 9201.6
- * 2 rounds: 1246.4 25475.4
- * 3 rounds: 1907.1 31295.1
- * 4 rounds: 2042.3 31718.6
- * Perfect: 2048 31744
- * (32*64) (32*31/2 * 64)
- */
-#define HASH_MIX(x, y, a) \
- ( x ^= (a), \
- y ^= x, x = rol32(x, 7),\
- x += y, y = rol32(y,20),\
- y *= 9 )
-
-static inline unsigned int fold_hash(unsigned long x, unsigned long y)
-{
- /* Use arch-optimized multiply if one exists */
- return __hash_32(y ^ __hash_32(x));
-}
-
-/*
- * Generate a hash. This is derived from full_name_hash(), but we want to be
- * sure it is arch independent and that it doesn't change as bits of the
- * computed hash value might appear on disk. The caller also guarantees that
- * the hashed data will be a series of aligned 32-bit words.
- */
-unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n)
-{
- unsigned int a, x = 0, y = salt;
-
- for (; n; n--) {
- a = *data++;
- HASH_MIX(x, y, a);
- }
- return fold_hash(x, y);
-}
-
-/*
- * initialise the fs caching module
- */
-static int __init fscache_init(void)
-{
- unsigned int nr_cpus = num_possible_cpus();
- unsigned int cpu;
- int ret;
-
- fscache_object_max_active =
- clamp_val(nr_cpus,
- fscache_object_max_active, WQ_UNBOUND_MAX_ACTIVE);
-
- ret = -ENOMEM;
- fscache_object_wq = alloc_workqueue("fscache_object", WQ_UNBOUND,
- fscache_object_max_active);
- if (!fscache_object_wq)
- goto error_object_wq;
-
- fscache_op_max_active =
- clamp_val(fscache_object_max_active / 2,
- fscache_op_max_active, WQ_UNBOUND_MAX_ACTIVE);
-
- ret = -ENOMEM;
- fscache_op_wq = alloc_workqueue("fscache_operation", WQ_UNBOUND,
- fscache_op_max_active);
- if (!fscache_op_wq)
- goto error_op_wq;
-
- for_each_possible_cpu(cpu)
- init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu));
-
- ret = fscache_proc_init();
- if (ret < 0)
- goto error_proc;
-
-#ifdef CONFIG_SYSCTL
- ret = -ENOMEM;
- fscache_sysctl_header = register_sysctl_table(fscache_sysctls_root);
- if (!fscache_sysctl_header)
- goto error_sysctl;
-#endif
-
- fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar",
- sizeof(struct fscache_cookie),
- 0, 0, NULL);
- if (!fscache_cookie_jar) {
- pr_notice("Failed to allocate a cookie jar\n");
- ret = -ENOMEM;
- goto error_cookie_jar;
- }
-
- fscache_root = kobject_create_and_add("fscache", kernel_kobj);
- if (!fscache_root)
- goto error_kobj;
-
- pr_notice("Loaded\n");
- return 0;
-
-error_kobj:
- kmem_cache_destroy(fscache_cookie_jar);
-error_cookie_jar:
-#ifdef CONFIG_SYSCTL
- unregister_sysctl_table(fscache_sysctl_header);
-error_sysctl:
-#endif
- fscache_proc_cleanup();
-error_proc:
- destroy_workqueue(fscache_op_wq);
-error_op_wq:
- destroy_workqueue(fscache_object_wq);
-error_object_wq:
- return ret;
-}
-
-fs_initcall(fscache_init);
-
-/*
- * clean up on module removal
- */
-static void __exit fscache_exit(void)
-{
- _enter("");
-
- kobject_put(fscache_root);
- kmem_cache_destroy(fscache_cookie_jar);
-#ifdef CONFIG_SYSCTL
- unregister_sysctl_table(fscache_sysctl_header);
-#endif
- fscache_proc_cleanup();
- destroy_workqueue(fscache_op_wq);
- destroy_workqueue(fscache_object_wq);
- pr_notice("Unloaded\n");
-}
-
-module_exit(fscache_exit);
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c
deleted file mode 100644
index d6bdb7b5e723..000000000000
--- a/fs/fscache/netfs.c
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* FS-Cache netfs (client) registration
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL COOKIE
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-/*
- * register a network filesystem for caching
- */
-int __fscache_register_netfs(struct fscache_netfs *netfs)
-{
- struct fscache_cookie *candidate, *cookie;
-
- _enter("{%s}", netfs->name);
-
- /* allocate a cookie for the primary index */
- candidate = fscache_alloc_cookie(&fscache_fsdef_index,
- &fscache_fsdef_netfs_def,
- netfs->name, strlen(netfs->name),
- &netfs->version, sizeof(netfs->version),
- netfs, 0);
- if (!candidate) {
- _leave(" = -ENOMEM");
- return -ENOMEM;
- }
-
- candidate->flags = 1 << FSCACHE_COOKIE_ENABLED;
-
- /* check the netfs type is not already present */
- cookie = fscache_hash_cookie(candidate);
- if (!cookie)
- goto already_registered;
- if (cookie != candidate) {
- trace_fscache_cookie(candidate->debug_id, 1, fscache_cookie_discard);
- fscache_free_cookie(candidate);
- }
-
- fscache_cookie_get(cookie->parent, fscache_cookie_get_register_netfs);
- atomic_inc(&cookie->parent->n_children);
-
- netfs->primary_index = cookie;
-
- pr_notice("Netfs '%s' registered for caching\n", netfs->name);
- trace_fscache_netfs(netfs);
- _leave(" = 0");
- return 0;
-
-already_registered:
- fscache_cookie_put(candidate, fscache_cookie_put_dup_netfs);
- _leave(" = -EEXIST");
- return -EEXIST;
-}
-EXPORT_SYMBOL(__fscache_register_netfs);
-
-/*
- * unregister a network filesystem from the cache
- * - all cookies must have been released first
- */
-void __fscache_unregister_netfs(struct fscache_netfs *netfs)
-{
- _enter("{%s.%u}", netfs->name, netfs->version);
-
- fscache_relinquish_cookie(netfs->primary_index, NULL, false);
- pr_notice("Netfs '%s' unregistered from caching\n", netfs->name);
-
- _leave("");
-}
-EXPORT_SYMBOL(__fscache_unregister_netfs);
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
deleted file mode 100644
index 6a675652129b..000000000000
--- a/fs/fscache/object.c
+++ /dev/null
@@ -1,1125 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* FS-Cache object state machine handler
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * See Documentation/filesystems/caching/object.rst for a description of the
- * object state machine and the in-kernel representations.
- */
-
-#define FSCACHE_DEBUG_LEVEL COOKIE
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/prefetch.h>
-#include "internal.h"
-
-static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
-static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
-static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
-static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
-static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
-static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
-static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
-static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
-static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
-static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
-static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
-static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
-static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
-
-#define __STATE_NAME(n) fscache_osm_##n
-#define STATE(n) (&__STATE_NAME(n))
-
-/*
- * Define a work state. Work states are execution states. No event processing
- * is performed by them. The function attached to a work state returns a
- * pointer indicating the next state to which the state machine should
- * transition. Returning NO_TRANSIT repeats the current state, but goes back
- * to the scheduler first.
- */
-#define WORK_STATE(n, sn, f) \
- const struct fscache_state __STATE_NAME(n) = { \
- .name = #n, \
- .short_name = sn, \
- .work = f \
- }
-
-/*
- * Returns from work states.
- */
-#define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
-
-#define NO_TRANSIT ((struct fscache_state *)NULL)
-
-/*
- * Define a wait state. Wait states are event processing states. No execution
- * is performed by them. Wait states are just tables of "if event X occurs,
- * clear it and transition to state Y". The dispatcher returns to the
- * scheduler if none of the events in which the wait state has an interest are
- * currently pending.
- */
-#define WAIT_STATE(n, sn, ...) \
- const struct fscache_state __STATE_NAME(n) = { \
- .name = #n, \
- .short_name = sn, \
- .work = NULL, \
- .transitions = { __VA_ARGS__, { 0, NULL } } \
- }
-
-#define TRANSIT_TO(state, emask) \
- { .events = (emask), .transit_to = STATE(state) }
-
-/*
- * The object state machine.
- */
-static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object);
-static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready);
-static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation);
-static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object);
-static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available);
-static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents);
-
-static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object);
-static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object);
-
-static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
-static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
-static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
-static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
-static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
-
-static WAIT_STATE(WAIT_FOR_INIT, "?INI",
- TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
-
-static WAIT_STATE(WAIT_FOR_PARENT, "?PRN",
- TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY));
-
-static WAIT_STATE(WAIT_FOR_CMD, "?CMD",
- TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE),
- TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE),
- TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
-
-static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR",
- TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED));
-
-/*
- * Out-of-band event transition tables. These are for handling unexpected
- * events, such as an I/O error. If an OOB event occurs, the state machine
- * clears and disables the event and forces a transition to the nominated work
- * state (acurrently executing work states will complete first).
- *
- * In such a situation, object->state remembers the state the machine should
- * have been in/gone to and returning NO_TRANSIT returns to that.
- */
-static const struct fscache_transition fscache_osm_init_oob[] = {
- TRANSIT_TO(ABORT_INIT,
- (1 << FSCACHE_OBJECT_EV_ERROR) |
- (1 << FSCACHE_OBJECT_EV_KILL)),
- { 0, NULL }
-};
-
-static const struct fscache_transition fscache_osm_lookup_oob[] = {
- TRANSIT_TO(LOOKUP_FAILURE,
- (1 << FSCACHE_OBJECT_EV_ERROR) |
- (1 << FSCACHE_OBJECT_EV_KILL)),
- { 0, NULL }
-};
-
-static const struct fscache_transition fscache_osm_run_oob[] = {
- TRANSIT_TO(KILL_OBJECT,
- (1 << FSCACHE_OBJECT_EV_ERROR) |
- (1 << FSCACHE_OBJECT_EV_KILL)),
- { 0, NULL }
-};
-
-static int fscache_get_object(struct fscache_object *,
- enum fscache_obj_ref_trace);
-static void fscache_put_object(struct fscache_object *,
- enum fscache_obj_ref_trace);
-static bool fscache_enqueue_dependents(struct fscache_object *, int);
-static void fscache_dequeue_object(struct fscache_object *);
-static void fscache_update_aux_data(struct fscache_object *);
-
-/*
- * we need to notify the parent when an op completes that we had outstanding
- * upon it
- */
-static inline void fscache_done_parent_op(struct fscache_object *object)
-{
- struct fscache_object *parent = object->parent;
-
- _enter("OBJ%x {OBJ%x,%x}",
- object->debug_id, parent->debug_id, parent->n_ops);
-
- spin_lock_nested(&parent->lock, 1);
- parent->n_obj_ops--;
- parent->n_ops--;
- if (parent->n_ops == 0)
- fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
- spin_unlock(&parent->lock);
-}
-
-/*
- * Object state machine dispatcher.
- */
-static void fscache_object_sm_dispatcher(struct fscache_object *object)
-{
- const struct fscache_transition *t;
- const struct fscache_state *state, *new_state;
- unsigned long events, event_mask;
- bool oob;
- int event = -1;
-
- ASSERT(object != NULL);
-
- _enter("{OBJ%x,%s,%lx}",
- object->debug_id, object->state->name, object->events);
-
- event_mask = object->event_mask;
-restart:
- object->event_mask = 0; /* Mask normal event handling */
- state = object->state;
-restart_masked:
- events = object->events;
-
- /* Handle any out-of-band events (typically an error) */
- if (events & object->oob_event_mask) {
- _debug("{OBJ%x} oob %lx",
- object->debug_id, events & object->oob_event_mask);
- oob = true;
- for (t = object->oob_table; t->events; t++) {
- if (events & t->events) {
- state = t->transit_to;
- ASSERT(state->work != NULL);
- event = fls(events & t->events) - 1;
- __clear_bit(event, &object->oob_event_mask);
- clear_bit(event, &object->events);
- goto execute_work_state;
- }
- }
- }
- oob = false;
-
- /* Wait states are just transition tables */
- if (!state->work) {
- if (events & event_mask) {
- for (t = state->transitions; t->events; t++) {
- if (events & t->events) {
- new_state = t->transit_to;
- event = fls(events & t->events) - 1;
- trace_fscache_osm(object, state,
- true, false, event);
- clear_bit(event, &object->events);
- _debug("{OBJ%x} ev %d: %s -> %s",
- object->debug_id, event,
- state->name, new_state->name);
- object->state = state = new_state;
- goto execute_work_state;
- }
- }
-
- /* The event mask didn't include all the tabled bits */
- BUG();
- }
- /* Randomly woke up */
- goto unmask_events;
- }
-
-execute_work_state:
- _debug("{OBJ%x} exec %s", object->debug_id, state->name);
-
- trace_fscache_osm(object, state, false, oob, event);
- new_state = state->work(object, event);
- event = -1;
- if (new_state == NO_TRANSIT) {
- _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
- if (unlikely(state == STATE(OBJECT_DEAD))) {
- _leave(" [dead]");
- return;
- }
- fscache_enqueue_object(object);
- event_mask = object->oob_event_mask;
- goto unmask_events;
- }
-
- _debug("{OBJ%x} %s -> %s",
- object->debug_id, state->name, new_state->name);
- object->state = state = new_state;
-
- if (state->work) {
- if (unlikely(state == STATE(OBJECT_DEAD))) {
- _leave(" [dead]");
- return;
- }
- goto restart_masked;
- }
-
- /* Transited to wait state */
- event_mask = object->oob_event_mask;
- for (t = state->transitions; t->events; t++)
- event_mask |= t->events;
-
-unmask_events:
- object->event_mask = event_mask;
- smp_mb();
- events = object->events;
- if (events & event_mask)
- goto restart;
- _leave(" [msk %lx]", event_mask);
-}
-
-/*
- * execute an object
- */
-static void fscache_object_work_func(struct work_struct *work)
-{
- struct fscache_object *object =
- container_of(work, struct fscache_object, work);
-
- _enter("{OBJ%x}", object->debug_id);
-
- fscache_object_sm_dispatcher(object);
- fscache_put_object(object, fscache_obj_put_work);
-}
-
-/**
- * fscache_object_init - Initialise a cache object description
- * @object: Object description
- * @cookie: Cookie object will be attached to
- * @cache: Cache in which backing object will be found
- *
- * Initialise a cache object description to its basic values.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
- */
-void fscache_object_init(struct fscache_object *object,
- struct fscache_cookie *cookie,
- struct fscache_cache *cache)
-{
- const struct fscache_transition *t;
-
- atomic_inc(&cache->object_count);
-
- object->state = STATE(WAIT_FOR_INIT);
- object->oob_table = fscache_osm_init_oob;
- object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
- spin_lock_init(&object->lock);
- INIT_LIST_HEAD(&object->cache_link);
- INIT_HLIST_NODE(&object->cookie_link);
- INIT_WORK(&object->work, fscache_object_work_func);
- INIT_LIST_HEAD(&object->dependents);
- INIT_LIST_HEAD(&object->dep_link);
- INIT_LIST_HEAD(&object->pending_ops);
- object->n_children = 0;
- object->n_ops = object->n_in_progress = object->n_exclusive = 0;
- object->events = 0;
- object->store_limit = 0;
- object->store_limit_l = 0;
- object->cache = cache;
- object->cookie = cookie;
- fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
- object->parent = NULL;
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
- RB_CLEAR_NODE(&object->objlist_link);
-#endif
-
- object->oob_event_mask = 0;
- for (t = object->oob_table; t->events; t++)
- object->oob_event_mask |= t->events;
- object->event_mask = object->oob_event_mask;
- for (t = object->state->transitions; t->events; t++)
- object->event_mask |= t->events;
-}
-EXPORT_SYMBOL(fscache_object_init);
-
-/*
- * Mark the object as no longer being live, making sure that we synchronise
- * against op submission.
- */
-static inline void fscache_mark_object_dead(struct fscache_object *object)
-{
- spin_lock(&object->lock);
- clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
- spin_unlock(&object->lock);
-}
-
-/*
- * Abort object initialisation before we start it.
- */
-static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
- int event)
-{
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- object->oob_event_mask = 0;
- fscache_dequeue_object(object);
- return transit_to(KILL_OBJECT);
-}
-
-/*
- * initialise an object
- * - check the specified object's parent to see if we can make use of it
- * immediately to do a creation
- * - we may need to start the process of creating a parent and we need to wait
- * for the parent's lookup and creation to complete if it's not there yet
- */
-static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
- int event)
-{
- struct fscache_object *parent;
- bool success;
-
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- ASSERT(list_empty(&object->dep_link));
-
- parent = object->parent;
- if (!parent) {
- _leave(" [no parent]");
- return transit_to(DROP_OBJECT);
- }
-
- _debug("parent: %s of:%lx", parent->state->name, parent->flags);
-
- if (fscache_object_is_dying(parent)) {
- _leave(" [bad parent]");
- return transit_to(DROP_OBJECT);
- }
-
- if (fscache_object_is_available(parent)) {
- _leave(" [ready]");
- return transit_to(PARENT_READY);
- }
-
- _debug("wait");
-
- spin_lock(&parent->lock);
- fscache_stat(&fscache_n_cop_grab_object);
- success = false;
- if (fscache_object_is_live(parent) &&
- object->cache->ops->grab_object(object, fscache_obj_get_add_to_deps)) {
- list_add(&object->dep_link, &parent->dependents);
- success = true;
- }
- fscache_stat_d(&fscache_n_cop_grab_object);
- spin_unlock(&parent->lock);
- if (!success) {
- _leave(" [grab failed]");
- return transit_to(DROP_OBJECT);
- }
-
- /* fscache_acquire_non_index_cookie() uses this
- * to wake the chain up */
- fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
- _leave(" [wait]");
- return transit_to(WAIT_FOR_PARENT);
-}
-
-/*
- * Once the parent object is ready, we should kick off our lookup op.
- */
-static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
- int event)
-{
- struct fscache_object *parent = object->parent;
-
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- ASSERT(parent != NULL);
-
- spin_lock(&parent->lock);
- parent->n_ops++;
- parent->n_obj_ops++;
- spin_unlock(&parent->lock);
-
- _leave("");
- return transit_to(LOOK_UP_OBJECT);
-}
-
-/*
- * look an object up in the cache from which it was allocated
- * - we hold an "access lock" on the parent object, so the parent object cannot
- * be withdrawn by either party till we've finished
- */
-static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
- int event)
-{
- struct fscache_cookie *cookie = object->cookie;
- struct fscache_object *parent = object->parent;
- int ret;
-
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- object->oob_table = fscache_osm_lookup_oob;
-
- ASSERT(parent != NULL);
- ASSERTCMP(parent->n_ops, >, 0);
- ASSERTCMP(parent->n_obj_ops, >, 0);
-
- /* make sure the parent is still available */
- ASSERT(fscache_object_is_available(parent));
-
- if (fscache_object_is_dying(parent) ||
- test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
- !fscache_use_cookie(object)) {
- _leave(" [unavailable]");
- return transit_to(LOOKUP_FAILURE);
- }
-
- _debug("LOOKUP \"%s\" in \"%s\"",
- cookie->def->name, object->cache->tag->name);
-
- fscache_stat(&fscache_n_object_lookups);
- fscache_stat(&fscache_n_cop_lookup_object);
- ret = object->cache->ops->lookup_object(object);
- fscache_stat_d(&fscache_n_cop_lookup_object);
-
- fscache_unuse_cookie(object);
-
- if (ret == -ETIMEDOUT) {
- /* probably stuck behind another object, so move this one to
- * the back of the queue */
- fscache_stat(&fscache_n_object_lookups_timed_out);
- _leave(" [timeout]");
- return NO_TRANSIT;
- }
-
- if (ret < 0) {
- _leave(" [error]");
- return transit_to(LOOKUP_FAILURE);
- }
-
- _leave(" [ok]");
- return transit_to(OBJECT_AVAILABLE);
-}
-
-/**
- * fscache_object_lookup_negative - Note negative cookie lookup
- * @object: Object pointing to cookie to mark
- *
- * Note negative lookup, permitting those waiting to read data from an already
- * existing backing object to continue as there's no data for them to read.
- */
-void fscache_object_lookup_negative(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
-
- _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
-
- if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
- fscache_stat(&fscache_n_object_lookups_negative);
-
- /* Allow write requests to begin stacking up and read requests to begin
- * returning ENODATA.
- */
- set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
- clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
-
- clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
- }
- _leave("");
-}
-EXPORT_SYMBOL(fscache_object_lookup_negative);
-
-/**
- * fscache_obtained_object - Note successful object lookup or creation
- * @object: Object pointing to cookie to mark
- *
- * Note successful lookup and/or creation, permitting those waiting to write
- * data to a backing object to continue.
- *
- * Note that after calling this, an object's cookie may be relinquished by the
- * netfs, and so must be accessed with object lock held.
- */
-void fscache_obtained_object(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
-
- _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
-
- /* if we were still looking up, then we must have a positive lookup
- * result, in which case there may be data available */
- if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
- fscache_stat(&fscache_n_object_lookups_positive);
-
- /* We do (presumably) have data */
- clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
- clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
-
- /* Allow write requests to begin stacking up and read requests
- * to begin shovelling data.
- */
- clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
- } else {
- fscache_stat(&fscache_n_object_created);
- }
-
- set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
- _leave("");
-}
-EXPORT_SYMBOL(fscache_obtained_object);
-
-/*
- * handle an object that has just become available
- */
-static const struct fscache_state *fscache_object_available(struct fscache_object *object,
- int event)
-{
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- object->oob_table = fscache_osm_run_oob;
-
- spin_lock(&object->lock);
-
- fscache_done_parent_op(object);
- if (object->n_in_progress == 0) {
- if (object->n_ops > 0) {
- ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
- fscache_start_operations(object);
- } else {
- ASSERT(list_empty(&object->pending_ops));
- }
- }
- spin_unlock(&object->lock);
-
- fscache_stat(&fscache_n_cop_lookup_complete);
- object->cache->ops->lookup_complete(object);
- fscache_stat_d(&fscache_n_cop_lookup_complete);
-
- fscache_stat(&fscache_n_object_avail);
-
- _leave("");
- return transit_to(JUMPSTART_DEPS);
-}
-
-/*
- * Wake up this object's dependent objects now that we've become available.
- */
-static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
- int event)
-{
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
- return NO_TRANSIT; /* Not finished; requeue */
- return transit_to(WAIT_FOR_CMD);
-}
-
-/*
- * Handle lookup or creation failute.
- */
-static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
- int event)
-{
- struct fscache_cookie *cookie;
-
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- object->oob_event_mask = 0;
-
- fscache_stat(&fscache_n_cop_lookup_complete);
- object->cache->ops->lookup_complete(object);
- fscache_stat_d(&fscache_n_cop_lookup_complete);
-
- set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
-
- cookie = object->cookie;
- set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
- if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
-
- fscache_done_parent_op(object);
- return transit_to(KILL_OBJECT);
-}
-
-/*
- * Wait for completion of all active operations on this object and the death of
- * all child objects of this object.
- */
-static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
- int event)
-{
- _enter("{OBJ%x,%d,%d},%d",
- object->debug_id, object->n_ops, object->n_children, event);
-
- fscache_mark_object_dead(object);
- object->oob_event_mask = 0;
-
- if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
- /* Reject any new read/write ops and abort any that are pending. */
- clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
- fscache_cancel_all_ops(object);
- }
-
- if (list_empty(&object->dependents) &&
- object->n_ops == 0 &&
- object->n_children == 0)
- return transit_to(DROP_OBJECT);
-
- if (object->n_in_progress == 0) {
- spin_lock(&object->lock);
- if (object->n_ops > 0 && object->n_in_progress == 0)
- fscache_start_operations(object);
- spin_unlock(&object->lock);
- }
-
- if (!list_empty(&object->dependents))
- return transit_to(KILL_DEPENDENTS);
-
- return transit_to(WAIT_FOR_CLEARANCE);
-}
-
-/*
- * Kill dependent objects.
- */
-static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
- int event)
-{
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
- return NO_TRANSIT; /* Not finished */
- return transit_to(WAIT_FOR_CLEARANCE);
-}
-
-/*
- * Drop an object's attachments
- */
-static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
- int event)
-{
- struct fscache_object *parent = object->parent;
- struct fscache_cookie *cookie = object->cookie;
- struct fscache_cache *cache = object->cache;
- bool awaken = false;
-
- _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
-
- ASSERT(cookie != NULL);
- ASSERT(!hlist_unhashed(&object->cookie_link));
-
- if (test_bit(FSCACHE_COOKIE_AUX_UPDATED, &cookie->flags)) {
- _debug("final update");
- fscache_update_aux_data(object);
- }
-
- /* Make sure the cookie no longer points here and that the netfs isn't
- * waiting for us.
- */
- spin_lock(&cookie->lock);
- hlist_del_init(&object->cookie_link);
- if (hlist_empty(&cookie->backing_objects) &&
- test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
- awaken = true;
- spin_unlock(&cookie->lock);
-
- if (awaken)
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
- if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
-
-
- /* Prevent a race with our last child, which has to signal EV_CLEARED
- * before dropping our spinlock.
- */
- spin_lock(&object->lock);
- spin_unlock(&object->lock);
-
- /* Discard from the cache's collection of objects */
- spin_lock(&cache->object_list_lock);
- list_del_init(&object->cache_link);
- spin_unlock(&cache->object_list_lock);
-
- fscache_stat(&fscache_n_cop_drop_object);
- cache->ops->drop_object(object);
- fscache_stat_d(&fscache_n_cop_drop_object);
-
- /* The parent object wants to know when all it dependents have gone */
- if (parent) {
- _debug("release parent OBJ%x {%d}",
- parent->debug_id, parent->n_children);
-
- spin_lock(&parent->lock);
- parent->n_children--;
- if (parent->n_children == 0)
- fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
- spin_unlock(&parent->lock);
- object->parent = NULL;
- }
-
- /* this just shifts the object release to the work processor */
- fscache_put_object(object, fscache_obj_put_drop_obj);
- fscache_stat(&fscache_n_object_dead);
-
- _leave("");
- return transit_to(OBJECT_DEAD);
-}
-
-/*
- * get a ref on an object
- */
-static int fscache_get_object(struct fscache_object *object,
- enum fscache_obj_ref_trace why)
-{
- int ret;
-
- fscache_stat(&fscache_n_cop_grab_object);
- ret = object->cache->ops->grab_object(object, why) ? 0 : -EAGAIN;
- fscache_stat_d(&fscache_n_cop_grab_object);
- return ret;
-}
-
-/*
- * Discard a ref on an object
- */
-static void fscache_put_object(struct fscache_object *object,
- enum fscache_obj_ref_trace why)
-{
- fscache_stat(&fscache_n_cop_put_object);
- object->cache->ops->put_object(object, why);
- fscache_stat_d(&fscache_n_cop_put_object);
-}
-
-/**
- * fscache_object_destroy - Note that a cache object is about to be destroyed
- * @object: The object to be destroyed
- *
- * Note the imminent destruction and deallocation of a cache object record.
- */
-void fscache_object_destroy(struct fscache_object *object)
-{
- /* We can get rid of the cookie now */
- fscache_cookie_put(object->cookie, fscache_cookie_put_object);
- object->cookie = NULL;
-}
-EXPORT_SYMBOL(fscache_object_destroy);
-
-/*
- * enqueue an object for metadata-type processing
- */
-void fscache_enqueue_object(struct fscache_object *object)
-{
- _enter("{OBJ%x}", object->debug_id);
-
- if (fscache_get_object(object, fscache_obj_get_queue) >= 0) {
- wait_queue_head_t *cong_wq =
- &get_cpu_var(fscache_object_cong_wait);
-
- if (queue_work(fscache_object_wq, &object->work)) {
- if (fscache_object_congested())
- wake_up(cong_wq);
- } else
- fscache_put_object(object, fscache_obj_put_queue);
-
- put_cpu_var(fscache_object_cong_wait);
- }
-}
-
-/**
- * fscache_object_sleep_till_congested - Sleep until object wq is congested
- * @timeoutp: Scheduler sleep timeout
- *
- * Allow an object handler to sleep until the object workqueue is congested.
- *
- * The caller must set up a wake up event before calling this and must have set
- * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
- * condition before calling this function as no test is made here.
- *
- * %true is returned if the object wq is congested, %false otherwise.
- */
-bool fscache_object_sleep_till_congested(signed long *timeoutp)
-{
- wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
- DEFINE_WAIT(wait);
-
- if (fscache_object_congested())
- return true;
-
- add_wait_queue_exclusive(cong_wq, &wait);
- if (!fscache_object_congested())
- *timeoutp = schedule_timeout(*timeoutp);
- finish_wait(cong_wq, &wait);
-
- return fscache_object_congested();
-}
-EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
-
-/*
- * Enqueue the dependents of an object for metadata-type processing.
- *
- * If we don't manage to finish the list before the scheduler wants to run
- * again then return false immediately. We return true if the list was
- * cleared.
- */
-static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
-{
- struct fscache_object *dep;
- bool ret = true;
-
- _enter("{OBJ%x}", object->debug_id);
-
- if (list_empty(&object->dependents))
- return true;
-
- spin_lock(&object->lock);
-
- while (!list_empty(&object->dependents)) {
- dep = list_entry(object->dependents.next,
- struct fscache_object, dep_link);
- list_del_init(&dep->dep_link);
-
- fscache_raise_event(dep, event);
- fscache_put_object(dep, fscache_obj_put_enq_dep);
-
- if (!list_empty(&object->dependents) && need_resched()) {
- ret = false;
- break;
- }
- }
-
- spin_unlock(&object->lock);
- return ret;
-}
-
-/*
- * remove an object from whatever queue it's waiting on
- */
-static void fscache_dequeue_object(struct fscache_object *object)
-{
- _enter("{OBJ%x}", object->debug_id);
-
- if (!list_empty(&object->dep_link)) {
- spin_lock(&object->parent->lock);
- list_del_init(&object->dep_link);
- spin_unlock(&object->parent->lock);
- }
-
- _leave("");
-}
-
-/**
- * fscache_check_aux - Ask the netfs whether an object on disk is still valid
- * @object: The object to ask about
- * @data: The auxiliary data for the object
- * @datalen: The size of the auxiliary data
- * @object_size: The size of the object according to the server.
- *
- * This function consults the netfs about the coherency state of an object.
- * The caller must be holding a ref on cookie->n_active (held by
- * fscache_look_up_object() on behalf of the cache backend during object lookup
- * and creation).
- */
-enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
- const void *data, uint16_t datalen,
- loff_t object_size)
-{
- enum fscache_checkaux result;
-
- if (!object->cookie->def->check_aux) {
- fscache_stat(&fscache_n_checkaux_none);
- return FSCACHE_CHECKAUX_OKAY;
- }
-
- result = object->cookie->def->check_aux(object->cookie->netfs_data,
- data, datalen, object_size);
- switch (result) {
- /* entry okay as is */
- case FSCACHE_CHECKAUX_OKAY:
- fscache_stat(&fscache_n_checkaux_okay);
- break;
-
- /* entry requires update */
- case FSCACHE_CHECKAUX_NEEDS_UPDATE:
- fscache_stat(&fscache_n_checkaux_update);
- break;
-
- /* entry requires deletion */
- case FSCACHE_CHECKAUX_OBSOLETE:
- fscache_stat(&fscache_n_checkaux_obsolete);
- break;
-
- default:
- BUG();
- }
-
- return result;
-}
-EXPORT_SYMBOL(fscache_check_aux);
-
-/*
- * Asynchronously invalidate an object.
- */
-static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
- int event)
-{
- struct fscache_operation *op;
- struct fscache_cookie *cookie = object->cookie;
-
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- /* We're going to need the cookie. If the cookie is not available then
- * retire the object instead.
- */
- if (!fscache_use_cookie(object)) {
- ASSERT(radix_tree_empty(&object->cookie->stores));
- set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
- _leave(" [no cookie]");
- return transit_to(KILL_OBJECT);
- }
-
- /* Reject any new read/write ops and abort any that are pending. */
- fscache_invalidate_writes(cookie);
- clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
- fscache_cancel_all_ops(object);
-
- /* Now we have to wait for in-progress reads and writes */
- op = kzalloc(sizeof(*op), GFP_KERNEL);
- if (!op)
- goto nomem;
-
- fscache_operation_init(cookie, op, object->cache->ops->invalidate_object,
- NULL, NULL);
- op->flags = FSCACHE_OP_ASYNC |
- (1 << FSCACHE_OP_EXCLUSIVE) |
- (1 << FSCACHE_OP_UNUSE_COOKIE);
- trace_fscache_page_op(cookie, NULL, op, fscache_page_op_invalidate);
-
- spin_lock(&cookie->lock);
- if (fscache_submit_exclusive_op(object, op) < 0)
- goto submit_op_failed;
- spin_unlock(&cookie->lock);
- fscache_put_operation(op);
-
- /* Once we've completed the invalidation, we know there will be no data
- * stored in the cache and thus we can reinstate the data-check-skip
- * optimisation.
- */
- set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
-
- /* We can allow read and write requests to come in once again. They'll
- * queue up behind our exclusive invalidation operation.
- */
- if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
- _leave(" [ok]");
- return transit_to(UPDATE_OBJECT);
-
-nomem:
- fscache_mark_object_dead(object);
- fscache_unuse_cookie(object);
- _leave(" [ENOMEM]");
- return transit_to(KILL_OBJECT);
-
-submit_op_failed:
- fscache_mark_object_dead(object);
- spin_unlock(&cookie->lock);
- fscache_unuse_cookie(object);
- kfree(op);
- _leave(" [EIO]");
- return transit_to(KILL_OBJECT);
-}
-
-static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
- int event)
-{
- const struct fscache_state *s;
-
- fscache_stat(&fscache_n_invalidates_run);
- fscache_stat(&fscache_n_cop_invalidate_object);
- s = _fscache_invalidate_object(object, event);
- fscache_stat_d(&fscache_n_cop_invalidate_object);
- return s;
-}
-
-/*
- * Update auxiliary data.
- */
-static void fscache_update_aux_data(struct fscache_object *object)
-{
- fscache_stat(&fscache_n_updates_run);
- fscache_stat(&fscache_n_cop_update_object);
- object->cache->ops->update_object(object);
- fscache_stat_d(&fscache_n_cop_update_object);
-}
-
-/*
- * Asynchronously update an object.
- */
-static const struct fscache_state *fscache_update_object(struct fscache_object *object,
- int event)
-{
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- fscache_update_aux_data(object);
-
- _leave("");
- return transit_to(WAIT_FOR_CMD);
-}
-
-/**
- * fscache_object_retrying_stale - Note retrying stale object
- * @object: The object that will be retried
- *
- * Note that an object lookup found an on-disk object that was adjudged to be
- * stale and has been deleted. The lookup will be retried.
- */
-void fscache_object_retrying_stale(struct fscache_object *object)
-{
- fscache_stat(&fscache_n_cache_no_space_reject);
-}
-EXPORT_SYMBOL(fscache_object_retrying_stale);
-
-/**
- * fscache_object_mark_killed - Note that an object was killed
- * @object: The object that was culled
- * @why: The reason the object was killed.
- *
- * Note that an object was killed. Returns true if the object was
- * already marked killed, false if it wasn't.
- */
-void fscache_object_mark_killed(struct fscache_object *object,
- enum fscache_why_object_killed why)
-{
- if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
- pr_err("Error: Object already killed by cache [%s]\n",
- object->cache->identifier);
- return;
- }
-
- switch (why) {
- case FSCACHE_OBJECT_NO_SPACE:
- fscache_stat(&fscache_n_cache_no_space_reject);
- break;
- case FSCACHE_OBJECT_IS_STALE:
- fscache_stat(&fscache_n_cache_stale_objects);
- break;
- case FSCACHE_OBJECT_WAS_RETIRED:
- fscache_stat(&fscache_n_cache_retired_objects);
- break;
- case FSCACHE_OBJECT_WAS_CULLED:
- fscache_stat(&fscache_n_cache_culled_objects);
- break;
- }
-}
-EXPORT_SYMBOL(fscache_object_mark_killed);
-
-/*
- * The object is dead. We can get here if an object gets queued by an event
- * that would lead to its death (such as EV_KILL) when the dispatcher is
- * already running (and so can be requeued) but hasn't yet cleared the event
- * mask.
- */
-static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
- int event)
-{
- if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
- &object->flags))
- return NO_TRANSIT;
-
- WARN(true, "FS-Cache object redispatched after death");
- return NO_TRANSIT;
-}
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
deleted file mode 100644
index e002cdfaf3cc..000000000000
--- a/fs/fscache/operation.c
+++ /dev/null
@@ -1,633 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* FS-Cache worker operation management routines
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * See Documentation/filesystems/caching/operations.rst
- */
-
-#define FSCACHE_DEBUG_LEVEL OPERATION
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-atomic_t fscache_op_debug_id;
-EXPORT_SYMBOL(fscache_op_debug_id);
-
-static void fscache_operation_dummy_cancel(struct fscache_operation *op)
-{
-}
-
-/**
- * fscache_operation_init - Do basic initialisation of an operation
- * @cookie: The cookie to operate on
- * @op: The operation to initialise
- * @processor: The function to perform the operation
- * @cancel: A function to handle operation cancellation
- * @release: The release function to assign
- *
- * Do basic initialisation of an operation. The caller must still set flags,
- * object and processor if needed.
- */
-void fscache_operation_init(struct fscache_cookie *cookie,
- struct fscache_operation *op,
- fscache_operation_processor_t processor,
- fscache_operation_cancel_t cancel,
- fscache_operation_release_t release)
-{
- INIT_WORK(&op->work, fscache_op_work_func);
- atomic_set(&op->usage, 1);
- op->state = FSCACHE_OP_ST_INITIALISED;
- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
- op->processor = processor;
- op->cancel = cancel ?: fscache_operation_dummy_cancel;
- op->release = release;
- INIT_LIST_HEAD(&op->pend_link);
- fscache_stat(&fscache_n_op_initialised);
- trace_fscache_op(cookie, op, fscache_op_init);
-}
-EXPORT_SYMBOL(fscache_operation_init);
-
-/**
- * fscache_enqueue_operation - Enqueue an operation for processing
- * @op: The operation to enqueue
- *
- * Enqueue an operation for processing by the FS-Cache thread pool.
- *
- * This will get its own ref on the object.
- */
-void fscache_enqueue_operation(struct fscache_operation *op)
-{
- struct fscache_cookie *cookie = op->object->cookie;
-
- _enter("{OBJ%x OP%x,%u}",
- op->object->debug_id, op->debug_id, atomic_read(&op->usage));
-
- ASSERT(list_empty(&op->pend_link));
- ASSERT(op->processor != NULL);
- ASSERT(fscache_object_is_available(op->object));
- ASSERTCMP(atomic_read(&op->usage), >, 0);
- ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
- op->state, ==, FSCACHE_OP_ST_CANCELLED);
-
- fscache_stat(&fscache_n_op_enqueue);
- switch (op->flags & FSCACHE_OP_TYPE) {
- case FSCACHE_OP_ASYNC:
- trace_fscache_op(cookie, op, fscache_op_enqueue_async);
- _debug("queue async");
- atomic_inc(&op->usage);
- if (!queue_work(fscache_op_wq, &op->work))
- fscache_put_operation(op);
- break;
- case FSCACHE_OP_MYTHREAD:
- trace_fscache_op(cookie, op, fscache_op_enqueue_mythread);
- _debug("queue for caller's attention");
- break;
- default:
- pr_err("Unexpected op type %lx", op->flags);
- BUG();
- break;
- }
-}
-EXPORT_SYMBOL(fscache_enqueue_operation);
-
-/*
- * start an op running
- */
-static void fscache_run_op(struct fscache_object *object,
- struct fscache_operation *op)
-{
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
-
- op->state = FSCACHE_OP_ST_IN_PROGRESS;
- object->n_in_progress++;
- if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
- wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
- if (op->processor)
- fscache_enqueue_operation(op);
- else
- trace_fscache_op(object->cookie, op, fscache_op_run);
- fscache_stat(&fscache_n_op_run);
-}
-
-/*
- * report an unexpected submission
- */
-static void fscache_report_unexpected_submission(struct fscache_object *object,
- struct fscache_operation *op,
- const struct fscache_state *ostate)
-{
- static bool once_only;
- struct fscache_operation *p;
- unsigned n;
-
- if (once_only)
- return;
- once_only = true;
-
- kdebug("unexpected submission OP%x [OBJ%x %s]",
- op->debug_id, object->debug_id, object->state->name);
- kdebug("objstate=%s [%s]", object->state->name, ostate->name);
- kdebug("objflags=%lx", object->flags);
- kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
- kdebug("ops=%u inp=%u exc=%u",
- object->n_ops, object->n_in_progress, object->n_exclusive);
-
- if (!list_empty(&object->pending_ops)) {
- n = 0;
- list_for_each_entry(p, &object->pending_ops, pend_link) {
- ASSERTCMP(p->object, ==, object);
- kdebug("%p %p", op->processor, op->release);
- n++;
- }
-
- kdebug("n=%u", n);
- }
-
- dump_stack();
-}
-
-/*
- * submit an exclusive operation for an object
- * - other ops are excluded from running simultaneously with this one
- * - this gets any extra refs it needs on an op
- */
-int fscache_submit_exclusive_op(struct fscache_object *object,
- struct fscache_operation *op)
-{
- const struct fscache_state *ostate;
- unsigned long flags;
- int ret;
-
- _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
-
- trace_fscache_op(object->cookie, op, fscache_op_submit_ex);
-
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
- ASSERTCMP(atomic_read(&op->usage), >, 0);
-
- spin_lock(&object->lock);
- ASSERTCMP(object->n_ops, >=, object->n_in_progress);
- ASSERTCMP(object->n_ops, >=, object->n_exclusive);
- ASSERT(list_empty(&op->pend_link));
-
- ostate = object->state;
- smp_rmb();
-
- op->state = FSCACHE_OP_ST_PENDING;
- flags = READ_ONCE(object->flags);
- if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
- fscache_stat(&fscache_n_op_rejected);
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -ENOBUFS;
- } else if (unlikely(fscache_cache_is_broken(object))) {
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -EIO;
- } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
- op->object = object;
- object->n_ops++;
- object->n_exclusive++; /* reads and writes must wait */
-
- if (object->n_in_progress > 0) {
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
- fscache_stat(&fscache_n_op_pend);
- } else if (!list_empty(&object->pending_ops)) {
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
- fscache_stat(&fscache_n_op_pend);
- fscache_start_operations(object);
- } else {
- ASSERTCMP(object->n_in_progress, ==, 0);
- fscache_run_op(object, op);
- }
-
- /* need to issue a new write op after this */
- clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
- ret = 0;
- } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
- op->object = object;
- object->n_ops++;
- object->n_exclusive++; /* reads and writes must wait */
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
- fscache_stat(&fscache_n_op_pend);
- ret = 0;
- } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -ENOBUFS;
- } else {
- fscache_report_unexpected_submission(object, op, ostate);
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -ENOBUFS;
- }
-
- spin_unlock(&object->lock);
- return ret;
-}
-
-/*
- * submit an operation for an object
- * - objects may be submitted only in the following states:
- * - during object creation (write ops may be submitted)
- * - whilst the object is active
- * - after an I/O error incurred in one of the two above states (op rejected)
- * - this gets any extra refs it needs on an op
- */
-int fscache_submit_op(struct fscache_object *object,
- struct fscache_operation *op)
-{
- const struct fscache_state *ostate;
- unsigned long flags;
- int ret;
-
- _enter("{OBJ%x OP%x},{%u}",
- object->debug_id, op->debug_id, atomic_read(&op->usage));
-
- trace_fscache_op(object->cookie, op, fscache_op_submit);
-
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
- ASSERTCMP(atomic_read(&op->usage), >, 0);
-
- spin_lock(&object->lock);
- ASSERTCMP(object->n_ops, >=, object->n_in_progress);
- ASSERTCMP(object->n_ops, >=, object->n_exclusive);
- ASSERT(list_empty(&op->pend_link));
-
- ostate = object->state;
- smp_rmb();
-
- op->state = FSCACHE_OP_ST_PENDING;
- flags = READ_ONCE(object->flags);
- if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
- fscache_stat(&fscache_n_op_rejected);
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -ENOBUFS;
- } else if (unlikely(fscache_cache_is_broken(object))) {
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -EIO;
- } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
- op->object = object;
- object->n_ops++;
-
- if (object->n_exclusive > 0) {
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
- fscache_stat(&fscache_n_op_pend);
- } else if (!list_empty(&object->pending_ops)) {
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
- fscache_stat(&fscache_n_op_pend);
- fscache_start_operations(object);
- } else {
- ASSERTCMP(object->n_exclusive, ==, 0);
- fscache_run_op(object, op);
- }
- ret = 0;
- } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
- op->object = object;
- object->n_ops++;
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
- fscache_stat(&fscache_n_op_pend);
- ret = 0;
- } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -ENOBUFS;
- } else {
- fscache_report_unexpected_submission(object, op, ostate);
- ASSERT(!fscache_object_is_active(object));
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -ENOBUFS;
- }
-
- spin_unlock(&object->lock);
- return ret;
-}
-
-/*
- * queue an object for withdrawal on error, aborting all following asynchronous
- * operations
- */
-void fscache_abort_object(struct fscache_object *object)
-{
- _enter("{OBJ%x}", object->debug_id);
-
- fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
-}
-
-/*
- * Jump start the operation processing on an object. The caller must hold
- * object->lock.
- */
-void fscache_start_operations(struct fscache_object *object)
-{
- struct fscache_operation *op;
- bool stop = false;
-
- while (!list_empty(&object->pending_ops) && !stop) {
- op = list_entry(object->pending_ops.next,
- struct fscache_operation, pend_link);
-
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
- if (object->n_in_progress > 0)
- break;
- stop = true;
- }
- list_del_init(&op->pend_link);
- fscache_run_op(object, op);
-
- /* the pending queue was holding a ref on the object */
- fscache_put_operation(op);
- }
-
- ASSERTCMP(object->n_in_progress, <=, object->n_ops);
-
- _debug("woke %d ops on OBJ%x",
- object->n_in_progress, object->debug_id);
-}
-
-/*
- * cancel an operation that's pending on an object
- */
-int fscache_cancel_op(struct fscache_operation *op,
- bool cancel_in_progress_op)
-{
- struct fscache_object *object = op->object;
- bool put = false;
- int ret;
-
- _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
-
- trace_fscache_op(object->cookie, op, fscache_op_cancel);
-
- ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
- ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
- ASSERTCMP(atomic_read(&op->usage), >, 0);
-
- spin_lock(&object->lock);
-
- ret = -EBUSY;
- if (op->state == FSCACHE_OP_ST_PENDING) {
- ASSERT(!list_empty(&op->pend_link));
- list_del_init(&op->pend_link);
- put = true;
-
- fscache_stat(&fscache_n_op_cancelled);
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
- object->n_exclusive--;
- if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
- wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
- ret = 0;
- } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) {
- ASSERTCMP(object->n_in_progress, >, 0);
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
- object->n_exclusive--;
- object->n_in_progress--;
- if (object->n_in_progress == 0)
- fscache_start_operations(object);
-
- fscache_stat(&fscache_n_op_cancelled);
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
- object->n_exclusive--;
- if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
- wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
- ret = 0;
- }
-
- if (put)
- fscache_put_operation(op);
- spin_unlock(&object->lock);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * Cancel all pending operations on an object
- */
-void fscache_cancel_all_ops(struct fscache_object *object)
-{
- struct fscache_operation *op;
-
- _enter("OBJ%x", object->debug_id);
-
- spin_lock(&object->lock);
-
- while (!list_empty(&object->pending_ops)) {
- op = list_entry(object->pending_ops.next,
- struct fscache_operation, pend_link);
- fscache_stat(&fscache_n_op_cancelled);
- list_del_init(&op->pend_link);
-
- trace_fscache_op(object->cookie, op, fscache_op_cancel_all);
-
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
-
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
- object->n_exclusive--;
- if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
- wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
- fscache_put_operation(op);
- cond_resched_lock(&object->lock);
- }
-
- spin_unlock(&object->lock);
- _leave("");
-}
-
-/*
- * Record the completion or cancellation of an in-progress operation.
- */
-void fscache_op_complete(struct fscache_operation *op, bool cancelled)
-{
- struct fscache_object *object = op->object;
-
- _enter("OBJ%x", object->debug_id);
-
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
- ASSERTCMP(object->n_in_progress, >, 0);
- ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
- object->n_exclusive, >, 0);
- ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
- object->n_in_progress, ==, 1);
-
- spin_lock(&object->lock);
-
- if (!cancelled) {
- trace_fscache_op(object->cookie, op, fscache_op_completed);
- op->state = FSCACHE_OP_ST_COMPLETE;
- } else {
- op->cancel(op);
- trace_fscache_op(object->cookie, op, fscache_op_cancelled);
- op->state = FSCACHE_OP_ST_CANCELLED;
- }
-
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
- object->n_exclusive--;
- object->n_in_progress--;
- if (object->n_in_progress == 0)
- fscache_start_operations(object);
-
- spin_unlock(&object->lock);
- _leave("");
-}
-EXPORT_SYMBOL(fscache_op_complete);
-
-/*
- * release an operation
- * - queues pending ops if this is the last in-progress op
- */
-void fscache_put_operation(struct fscache_operation *op)
-{
- struct fscache_object *object;
- struct fscache_cache *cache;
-
- _enter("{OBJ%x OP%x,%d}",
- op->object ? op->object->debug_id : 0,
- op->debug_id, atomic_read(&op->usage));
-
- ASSERTCMP(atomic_read(&op->usage), >, 0);
-
- if (!atomic_dec_and_test(&op->usage))
- return;
-
- trace_fscache_op(op->object ? op->object->cookie : NULL, op, fscache_op_put);
-
- _debug("PUT OP");
- ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED &&
- op->state != FSCACHE_OP_ST_COMPLETE,
- op->state, ==, FSCACHE_OP_ST_CANCELLED);
-
- fscache_stat(&fscache_n_op_release);
-
- if (op->release) {
- op->release(op);
- op->release = NULL;
- }
- op->state = FSCACHE_OP_ST_DEAD;
-
- object = op->object;
- if (likely(object)) {
- if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
- atomic_dec(&object->n_reads);
- if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
- fscache_unuse_cookie(object);
-
- /* now... we may get called with the object spinlock held, so we
- * complete the cleanup here only if we can immediately acquire the
- * lock, and defer it otherwise */
- if (!spin_trylock(&object->lock)) {
- _debug("defer put");
- fscache_stat(&fscache_n_op_deferred_release);
-
- cache = object->cache;
- spin_lock(&cache->op_gc_list_lock);
- list_add_tail(&op->pend_link, &cache->op_gc_list);
- spin_unlock(&cache->op_gc_list_lock);
- schedule_work(&cache->op_gc);
- _leave(" [defer]");
- return;
- }
-
- ASSERTCMP(object->n_ops, >, 0);
- object->n_ops--;
- if (object->n_ops == 0)
- fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
-
- spin_unlock(&object->lock);
- }
-
- kfree(op);
- _leave(" [done]");
-}
-EXPORT_SYMBOL(fscache_put_operation);
-
-/*
- * garbage collect operations that have had their release deferred
- */
-void fscache_operation_gc(struct work_struct *work)
-{
- struct fscache_operation *op;
- struct fscache_object *object;
- struct fscache_cache *cache =
- container_of(work, struct fscache_cache, op_gc);
- int count = 0;
-
- _enter("");
-
- do {
- spin_lock(&cache->op_gc_list_lock);
- if (list_empty(&cache->op_gc_list)) {
- spin_unlock(&cache->op_gc_list_lock);
- break;
- }
-
- op = list_entry(cache->op_gc_list.next,
- struct fscache_operation, pend_link);
- list_del(&op->pend_link);
- spin_unlock(&cache->op_gc_list_lock);
-
- object = op->object;
- trace_fscache_op(object->cookie, op, fscache_op_gc);
-
- spin_lock(&object->lock);
-
- _debug("GC DEFERRED REL OBJ%x OP%x",
- object->debug_id, op->debug_id);
- fscache_stat(&fscache_n_op_gc);
-
- ASSERTCMP(atomic_read(&op->usage), ==, 0);
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
-
- ASSERTCMP(object->n_ops, >, 0);
- object->n_ops--;
- if (object->n_ops == 0)
- fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
-
- spin_unlock(&object->lock);
- kfree(op);
-
- } while (count++ < 20);
-
- if (!list_empty(&cache->op_gc_list))
- schedule_work(&cache->op_gc);
-
- _leave("");
-}
-
-/*
- * execute an operation using fs_op_wq to provide processing context -
- * the caller holds a ref to this object, so we don't need to hold one
- */
-void fscache_op_work_func(struct work_struct *work)
-{
- struct fscache_operation *op =
- container_of(work, struct fscache_operation, work);
-
- _enter("{OBJ%x OP%x,%d}",
- op->object->debug_id, op->debug_id, atomic_read(&op->usage));
-
- trace_fscache_op(op->object->cookie, op, fscache_op_work);
-
- ASSERT(op->processor != NULL);
- op->processor(op);
- fscache_put_operation(op);
-
- _leave("");
-}
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
deleted file mode 100644
index 27df94ef0e0b..000000000000
--- a/fs/fscache/page.c
+++ /dev/null
@@ -1,1242 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Cache page management and data I/O routines
- *
- * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL PAGE
-#include <linux/module.h>
-#include <linux/fscache-cache.h>
-#include <linux/buffer_head.h>
-#include <linux/pagevec.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-/*
- * check to see if a page is being written to the cache
- */
-bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
-{
- void *val;
-
- rcu_read_lock();
- val = radix_tree_lookup(&cookie->stores, page->index);
- rcu_read_unlock();
- trace_fscache_check_page(cookie, page, val, 0);
-
- return val != NULL;
-}
-EXPORT_SYMBOL(__fscache_check_page_write);
-
-/*
- * wait for a page to finish being written to the cache
- */
-void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
-{
- wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
-
- trace_fscache_page(cookie, page, fscache_page_write_wait);
-
- wait_event(*wq, !__fscache_check_page_write(cookie, page));
-}
-EXPORT_SYMBOL(__fscache_wait_on_page_write);
-
-/*
- * wait for a page to finish being written to the cache. Put a timeout here
- * since we might be called recursively via parent fs.
- */
-static
-bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
-{
- wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
-
- return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
- HZ);
-}
-
-/*
- * decide whether a page can be released, possibly by cancelling a store to it
- * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged
- */
-bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- struct page *xpage;
- void *val;
-
- _enter("%p,%p,%x", cookie, page, gfp);
-
- trace_fscache_page(cookie, page, fscache_page_maybe_release);
-
-try_again:
- rcu_read_lock();
- val = radix_tree_lookup(&cookie->stores, page->index);
- if (!val) {
- rcu_read_unlock();
- fscache_stat(&fscache_n_store_vmscan_not_storing);
- __fscache_uncache_page(cookie, page);
- return true;
- }
-
- /* see if the page is actually undergoing storage - if so we can't get
- * rid of it till the cache has finished with it */
- if (radix_tree_tag_get(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG)) {
- rcu_read_unlock();
- goto page_busy;
- }
-
- /* the page is pending storage, so we attempt to cancel the store and
- * discard the store request so that the page can be reclaimed */
- spin_lock(&cookie->stores_lock);
- rcu_read_unlock();
-
- if (radix_tree_tag_get(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG)) {
- /* the page started to undergo storage whilst we were looking,
- * so now we can only wait or return */
- spin_unlock(&cookie->stores_lock);
- goto page_busy;
- }
-
- xpage = radix_tree_delete(&cookie->stores, page->index);
- trace_fscache_page(cookie, page, fscache_page_radix_delete);
- spin_unlock(&cookie->stores_lock);
-
- if (xpage) {
- fscache_stat(&fscache_n_store_vmscan_cancelled);
- fscache_stat(&fscache_n_store_radix_deletes);
- ASSERTCMP(xpage, ==, page);
- } else {
- fscache_stat(&fscache_n_store_vmscan_gone);
- }
-
- wake_up_bit(&cookie->flags, 0);
- trace_fscache_wake_cookie(cookie);
- if (xpage)
- put_page(xpage);
- __fscache_uncache_page(cookie, page);
- return true;
-
-page_busy:
- /* We will wait here if we're allowed to, but that could deadlock the
- * allocator as the work threads writing to the cache may all end up
- * sleeping on memory allocation, so we may need to impose a timeout
- * too. */
- if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) {
- fscache_stat(&fscache_n_store_vmscan_busy);
- return false;
- }
-
- fscache_stat(&fscache_n_store_vmscan_wait);
- if (!release_page_wait_timeout(cookie, page))
- _debug("fscache writeout timeout page: %p{%lx}",
- page, page->index);
-
- gfp &= ~__GFP_DIRECT_RECLAIM;
- goto try_again;
-}
-EXPORT_SYMBOL(__fscache_maybe_release_page);
-
-/*
- * note that a page has finished being written to the cache
- */
-static void fscache_end_page_write(struct fscache_object *object,
- struct page *page)
-{
- struct fscache_cookie *cookie;
- struct page *xpage = NULL, *val;
-
- spin_lock(&object->lock);
- cookie = object->cookie;
- if (cookie) {
- /* delete the page from the tree if it is now no longer
- * pending */
- spin_lock(&cookie->stores_lock);
- radix_tree_tag_clear(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG);
- trace_fscache_page(cookie, page, fscache_page_radix_clear_store);
- if (!radix_tree_tag_get(&cookie->stores, page->index,
- FSCACHE_COOKIE_PENDING_TAG)) {
- fscache_stat(&fscache_n_store_radix_deletes);
- xpage = radix_tree_delete(&cookie->stores, page->index);
- trace_fscache_page(cookie, page, fscache_page_radix_delete);
- trace_fscache_page(cookie, page, fscache_page_write_end);
-
- val = radix_tree_lookup(&cookie->stores, page->index);
- trace_fscache_check_page(cookie, page, val, 1);
- } else {
- trace_fscache_page(cookie, page, fscache_page_write_end_pend);
- }
- spin_unlock(&cookie->stores_lock);
- wake_up_bit(&cookie->flags, 0);
- trace_fscache_wake_cookie(cookie);
- } else {
- trace_fscache_page(cookie, page, fscache_page_write_end_noc);
- }
- spin_unlock(&object->lock);
- if (xpage)
- put_page(xpage);
-}
-
-/*
- * actually apply the changed attributes to a cache object
- */
-static void fscache_attr_changed_op(struct fscache_operation *op)
-{
- struct fscache_object *object = op->object;
- int ret;
-
- _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
-
- fscache_stat(&fscache_n_attr_changed_calls);
-
- if (fscache_object_is_active(object)) {
- fscache_stat(&fscache_n_cop_attr_changed);
- ret = object->cache->ops->attr_changed(object);
- fscache_stat_d(&fscache_n_cop_attr_changed);
- if (ret < 0)
- fscache_abort_object(object);
- fscache_op_complete(op, ret < 0);
- } else {
- fscache_op_complete(op, true);
- }
-
- _leave("");
-}
-
-/*
- * notification that the attributes on an object have changed
- */
-int __fscache_attr_changed(struct fscache_cookie *cookie)
-{
- struct fscache_operation *op;
- struct fscache_object *object;
- bool wake_cookie = false;
-
- _enter("%p", cookie);
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
-
- fscache_stat(&fscache_n_attr_changed);
-
- op = kzalloc(sizeof(*op), GFP_KERNEL);
- if (!op) {
- fscache_stat(&fscache_n_attr_changed_nomem);
- _leave(" = -ENOMEM");
- return -ENOMEM;
- }
-
- fscache_operation_init(cookie, op, fscache_attr_changed_op, NULL, NULL);
- trace_fscache_page_op(cookie, NULL, op, fscache_page_op_attr_changed);
- op->flags = FSCACHE_OP_ASYNC |
- (1 << FSCACHE_OP_EXCLUSIVE) |
- (1 << FSCACHE_OP_UNUSE_COOKIE);
-
- spin_lock(&cookie->lock);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto nobufs;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- __fscache_use_cookie(cookie);
- if (fscache_submit_exclusive_op(object, op) < 0)
- goto nobufs_dec;
- spin_unlock(&cookie->lock);
- fscache_stat(&fscache_n_attr_changed_ok);
- fscache_put_operation(op);
- _leave(" = 0");
- return 0;
-
-nobufs_dec:
- wake_cookie = __fscache_unuse_cookie(cookie);
-nobufs:
- spin_unlock(&cookie->lock);
- fscache_put_operation(op);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
- fscache_stat(&fscache_n_attr_changed_nobufs);
- _leave(" = %d", -ENOBUFS);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(__fscache_attr_changed);
-
-/*
- * Handle cancellation of a pending retrieval op
- */
-static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
-{
- struct fscache_retrieval *op =
- container_of(_op, struct fscache_retrieval, op);
-
- atomic_set(&op->n_pages, 0);
-}
-
-/*
- * release a retrieval op reference
- */
-static void fscache_release_retrieval_op(struct fscache_operation *_op)
-{
- struct fscache_retrieval *op =
- container_of(_op, struct fscache_retrieval, op);
-
- _enter("{OP%x}", op->op.debug_id);
-
- ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
- atomic_read(&op->n_pages), ==, 0);
-
- if (op->context)
- fscache_put_context(op->cookie, op->context);
-
- _leave("");
-}
-
-/*
- * allocate a retrieval op
- */
-struct fscache_retrieval *fscache_alloc_retrieval(
- struct fscache_cookie *cookie,
- struct address_space *mapping,
- fscache_rw_complete_t end_io_func,
- void *context)
-{
- struct fscache_retrieval *op;
-
- /* allocate a retrieval operation and attempt to submit it */
- op = kzalloc(sizeof(*op), GFP_NOIO);
- if (!op) {
- fscache_stat(&fscache_n_retrievals_nomem);
- return NULL;
- }
-
- fscache_operation_init(cookie, &op->op, NULL,
- fscache_do_cancel_retrieval,
- fscache_release_retrieval_op);
- op->op.flags = FSCACHE_OP_MYTHREAD |
- (1UL << FSCACHE_OP_WAITING) |
- (1UL << FSCACHE_OP_UNUSE_COOKIE);
- op->cookie = cookie;
- op->mapping = mapping;
- op->end_io_func = end_io_func;
- op->context = context;
- INIT_LIST_HEAD(&op->to_do);
-
- /* Pin the netfs read context in case we need to do the actual netfs
- * read because we've encountered a cache read failure.
- */
- if (context)
- fscache_get_context(op->cookie, context);
- return op;
-}
-
-/*
- * wait for a deferred lookup to complete
- */
-int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
-{
- _enter("");
-
- if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
- _leave(" = 0 [imm]");
- return 0;
- }
-
- fscache_stat(&fscache_n_retrievals_wait);
-
- if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
- TASK_INTERRUPTIBLE) != 0) {
- fscache_stat(&fscache_n_retrievals_intr);
- _leave(" = -ERESTARTSYS");
- return -ERESTARTSYS;
- }
-
- ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
-
- smp_rmb();
- _leave(" = 0 [dly]");
- return 0;
-}
-
-/*
- * wait for an object to become active (or dead)
- */
-int fscache_wait_for_operation_activation(struct fscache_object *object,
- struct fscache_operation *op,
- atomic_t *stat_op_waits,
- atomic_t *stat_object_dead)
-{
- int ret;
-
- if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
- goto check_if_dead;
-
- _debug(">>> WT");
- if (stat_op_waits)
- fscache_stat(stat_op_waits);
- if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
- TASK_INTERRUPTIBLE) != 0) {
- trace_fscache_op(object->cookie, op, fscache_op_signal);
- ret = fscache_cancel_op(op, false);
- if (ret == 0)
- return -ERESTARTSYS;
-
- /* it's been removed from the pending queue by another party,
- * so we should get to run shortly */
- wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
- TASK_UNINTERRUPTIBLE);
- }
- _debug("<<< GO");
-
-check_if_dead:
- if (op->state == FSCACHE_OP_ST_CANCELLED) {
- if (stat_object_dead)
- fscache_stat(stat_object_dead);
- _leave(" = -ENOBUFS [cancelled]");
- return -ENOBUFS;
- }
- if (unlikely(fscache_object_is_dying(object) ||
- fscache_cache_is_broken(object))) {
- enum fscache_operation_state state = op->state;
- trace_fscache_op(object->cookie, op, fscache_op_signal);
- fscache_cancel_op(op, true);
- if (stat_object_dead)
- fscache_stat(stat_object_dead);
- _leave(" = -ENOBUFS [obj dead %d]", state);
- return -ENOBUFS;
- }
- return 0;
-}
-
-/*
- * read a page from the cache or allocate a block in which to store it
- * - we return:
- * -ENOMEM - out of memory, nothing done
- * -ERESTARTSYS - interrupted
- * -ENOBUFS - no backing object available in which to cache the block
- * -ENODATA - no data available in the backing object for this block
- * 0 - dispatched a read - it'll call end_io_func() when finished
- */
-int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
-{
- struct fscache_retrieval *op;
- struct fscache_object *object;
- bool wake_cookie = false;
- int ret;
-
- _enter("%p,%p,,,", cookie, page);
-
- fscache_stat(&fscache_n_retrievals);
-
- if (hlist_empty(&cookie->backing_objects))
- goto nobufs;
-
- if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
- _leave(" = -ENOBUFS [invalidating]");
- return -ENOBUFS;
- }
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
- ASSERTCMP(page, !=, NULL);
-
- if (fscache_wait_for_deferred_lookup(cookie) < 0)
- return -ERESTARTSYS;
-
- op = fscache_alloc_retrieval(cookie, page->mapping,
- end_io_func, context);
- if (!op) {
- _leave(" = -ENOMEM");
- return -ENOMEM;
- }
- atomic_set(&op->n_pages, 1);
- trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_retr_one);
-
- spin_lock(&cookie->lock);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto nobufs_unlock;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
-
- __fscache_use_cookie(cookie);
- atomic_inc(&object->n_reads);
- __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
-
- if (fscache_submit_op(object, &op->op) < 0)
- goto nobufs_unlock_dec;
- spin_unlock(&cookie->lock);
-
- fscache_stat(&fscache_n_retrieval_ops);
-
- /* we wait for the operation to become active, and then process it
- * *here*, in this thread, and not in the thread pool */
- ret = fscache_wait_for_operation_activation(
- object, &op->op,
- __fscache_stat(&fscache_n_retrieval_op_waits),
- __fscache_stat(&fscache_n_retrievals_object_dead));
- if (ret < 0)
- goto error;
-
- /* ask the cache to honour the operation */
- if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
- fscache_stat(&fscache_n_cop_allocate_page);
- ret = object->cache->ops->allocate_page(op, page, gfp);
- fscache_stat_d(&fscache_n_cop_allocate_page);
- if (ret == 0)
- ret = -ENODATA;
- } else {
- fscache_stat(&fscache_n_cop_read_or_alloc_page);
- ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
- fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
- }
-
-error:
- if (ret == -ENOMEM)
- fscache_stat(&fscache_n_retrievals_nomem);
- else if (ret == -ERESTARTSYS)
- fscache_stat(&fscache_n_retrievals_intr);
- else if (ret == -ENODATA)
- fscache_stat(&fscache_n_retrievals_nodata);
- else if (ret < 0)
- fscache_stat(&fscache_n_retrievals_nobufs);
- else
- fscache_stat(&fscache_n_retrievals_ok);
-
- fscache_put_retrieval(op);
- _leave(" = %d", ret);
- return ret;
-
-nobufs_unlock_dec:
- atomic_dec(&object->n_reads);
- wake_cookie = __fscache_unuse_cookie(cookie);
-nobufs_unlock:
- spin_unlock(&cookie->lock);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
- fscache_put_retrieval(op);
-nobufs:
- fscache_stat(&fscache_n_retrievals_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(__fscache_read_or_alloc_page);
-
-/*
- * read a list of page from the cache or allocate a block in which to store
- * them
- * - we return:
- * -ENOMEM - out of memory, some pages may be being read
- * -ERESTARTSYS - interrupted, some pages may be being read
- * -ENOBUFS - no backing object or space available in which to cache any
- * pages not being read
- * -ENODATA - no data available in the backing object for some or all of
- * the pages
- * 0 - dispatched a read on all pages
- *
- * end_io_func() will be called for each page read from the cache as it is
- * finishes being read
- *
- * any pages for which a read is dispatched will be removed from pages and
- * nr_pages
- */
-int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
-{
- struct fscache_retrieval *op;
- struct fscache_object *object;
- bool wake_cookie = false;
- int ret;
-
- _enter("%p,,%d,,,", cookie, *nr_pages);
-
- fscache_stat(&fscache_n_retrievals);
-
- if (hlist_empty(&cookie->backing_objects))
- goto nobufs;
-
- if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
- _leave(" = -ENOBUFS [invalidating]");
- return -ENOBUFS;
- }
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
- ASSERTCMP(*nr_pages, >, 0);
- ASSERT(!list_empty(pages));
-
- if (fscache_wait_for_deferred_lookup(cookie) < 0)
- return -ERESTARTSYS;
-
- op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
- if (!op)
- return -ENOMEM;
- atomic_set(&op->n_pages, *nr_pages);
- trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi);
-
- spin_lock(&cookie->lock);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto nobufs_unlock;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- __fscache_use_cookie(cookie);
- atomic_inc(&object->n_reads);
- __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
-
- if (fscache_submit_op(object, &op->op) < 0)
- goto nobufs_unlock_dec;
- spin_unlock(&cookie->lock);
-
- fscache_stat(&fscache_n_retrieval_ops);
-
- /* we wait for the operation to become active, and then process it
- * *here*, in this thread, and not in the thread pool */
- ret = fscache_wait_for_operation_activation(
- object, &op->op,
- __fscache_stat(&fscache_n_retrieval_op_waits),
- __fscache_stat(&fscache_n_retrievals_object_dead));
- if (ret < 0)
- goto error;
-
- /* ask the cache to honour the operation */
- if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
- fscache_stat(&fscache_n_cop_allocate_pages);
- ret = object->cache->ops->allocate_pages(
- op, pages, nr_pages, gfp);
- fscache_stat_d(&fscache_n_cop_allocate_pages);
- } else {
- fscache_stat(&fscache_n_cop_read_or_alloc_pages);
- ret = object->cache->ops->read_or_alloc_pages(
- op, pages, nr_pages, gfp);
- fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
- }
-
-error:
- if (ret == -ENOMEM)
- fscache_stat(&fscache_n_retrievals_nomem);
- else if (ret == -ERESTARTSYS)
- fscache_stat(&fscache_n_retrievals_intr);
- else if (ret == -ENODATA)
- fscache_stat(&fscache_n_retrievals_nodata);
- else if (ret < 0)
- fscache_stat(&fscache_n_retrievals_nobufs);
- else
- fscache_stat(&fscache_n_retrievals_ok);
-
- fscache_put_retrieval(op);
- _leave(" = %d", ret);
- return ret;
-
-nobufs_unlock_dec:
- atomic_dec(&object->n_reads);
- wake_cookie = __fscache_unuse_cookie(cookie);
-nobufs_unlock:
- spin_unlock(&cookie->lock);
- fscache_put_retrieval(op);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
-nobufs:
- fscache_stat(&fscache_n_retrievals_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
-
-/*
- * allocate a block in the cache on which to store a page
- * - we return:
- * -ENOMEM - out of memory, nothing done
- * -ERESTARTSYS - interrupted
- * -ENOBUFS - no backing object available in which to cache the block
- * 0 - block allocated
- */
-int __fscache_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- struct fscache_retrieval *op;
- struct fscache_object *object;
- bool wake_cookie = false;
- int ret;
-
- _enter("%p,%p,,,", cookie, page);
-
- fscache_stat(&fscache_n_allocs);
-
- if (hlist_empty(&cookie->backing_objects))
- goto nobufs;
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
- ASSERTCMP(page, !=, NULL);
-
- if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
- _leave(" = -ENOBUFS [invalidating]");
- return -ENOBUFS;
- }
-
- if (fscache_wait_for_deferred_lookup(cookie) < 0)
- return -ERESTARTSYS;
-
- op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
- if (!op)
- return -ENOMEM;
- atomic_set(&op->n_pages, 1);
- trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_alloc_one);
-
- spin_lock(&cookie->lock);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto nobufs_unlock;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- __fscache_use_cookie(cookie);
- if (fscache_submit_op(object, &op->op) < 0)
- goto nobufs_unlock_dec;
- spin_unlock(&cookie->lock);
-
- fscache_stat(&fscache_n_alloc_ops);
-
- ret = fscache_wait_for_operation_activation(
- object, &op->op,
- __fscache_stat(&fscache_n_alloc_op_waits),
- __fscache_stat(&fscache_n_allocs_object_dead));
- if (ret < 0)
- goto error;
-
- /* ask the cache to honour the operation */
- fscache_stat(&fscache_n_cop_allocate_page);
- ret = object->cache->ops->allocate_page(op, page, gfp);
- fscache_stat_d(&fscache_n_cop_allocate_page);
-
-error:
- if (ret == -ERESTARTSYS)
- fscache_stat(&fscache_n_allocs_intr);
- else if (ret < 0)
- fscache_stat(&fscache_n_allocs_nobufs);
- else
- fscache_stat(&fscache_n_allocs_ok);
-
- fscache_put_retrieval(op);
- _leave(" = %d", ret);
- return ret;
-
-nobufs_unlock_dec:
- wake_cookie = __fscache_unuse_cookie(cookie);
-nobufs_unlock:
- spin_unlock(&cookie->lock);
- fscache_put_retrieval(op);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
-nobufs:
- fscache_stat(&fscache_n_allocs_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(__fscache_alloc_page);
-
-/*
- * Unmark pages allocate in the readahead code path (via:
- * fscache_readpages_or_alloc) after delegating to the base filesystem
- */
-void __fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages)
-{
- struct page *page;
-
- list_for_each_entry(page, pages, lru) {
- if (PageFsCache(page))
- __fscache_uncache_page(cookie, page);
- }
-}
-EXPORT_SYMBOL(__fscache_readpages_cancel);
-
-/*
- * release a write op reference
- */
-static void fscache_release_write_op(struct fscache_operation *_op)
-{
- _enter("{OP%x}", _op->debug_id);
-}
-
-/*
- * perform the background storage of a page into the cache
- */
-static void fscache_write_op(struct fscache_operation *_op)
-{
- struct fscache_storage *op =
- container_of(_op, struct fscache_storage, op);
- struct fscache_object *object = op->op.object;
- struct fscache_cookie *cookie;
- struct page *page;
- unsigned n;
- void *results[1];
- int ret;
-
- _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
-
-again:
- spin_lock(&object->lock);
- cookie = object->cookie;
-
- if (!fscache_object_is_active(object)) {
- /* If we get here, then the on-disk cache object likely no
- * longer exists, so we should just cancel this write
- * operation.
- */
- spin_unlock(&object->lock);
- fscache_op_complete(&op->op, true);
- _leave(" [inactive]");
- return;
- }
-
- if (!cookie) {
- /* If we get here, then the cookie belonging to the object was
- * detached, probably by the cookie being withdrawn due to
- * memory pressure, which means that the pages we might write
- * to the cache from no longer exist - therefore, we can just
- * cancel this write operation.
- */
- spin_unlock(&object->lock);
- fscache_op_complete(&op->op, true);
- _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
- _op->flags, _op->state, object->state->short_name,
- object->flags);
- return;
- }
-
- spin_lock(&cookie->stores_lock);
-
- fscache_stat(&fscache_n_store_calls);
-
- /* find a page to store */
- results[0] = NULL;
- page = NULL;
- n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
- FSCACHE_COOKIE_PENDING_TAG);
- trace_fscache_gang_lookup(cookie, &op->op, results, n, op->store_limit);
- if (n != 1)
- goto superseded;
- page = results[0];
- _debug("gang %d [%lx]", n, page->index);
-
- radix_tree_tag_set(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG);
- radix_tree_tag_clear(&cookie->stores, page->index,
- FSCACHE_COOKIE_PENDING_TAG);
- trace_fscache_page(cookie, page, fscache_page_radix_pend2store);
-
- spin_unlock(&cookie->stores_lock);
- spin_unlock(&object->lock);
-
- if (page->index >= op->store_limit)
- goto discard_page;
-
- fscache_stat(&fscache_n_store_pages);
- fscache_stat(&fscache_n_cop_write_page);
- ret = object->cache->ops->write_page(op, page);
- fscache_stat_d(&fscache_n_cop_write_page);
- trace_fscache_wrote_page(cookie, page, &op->op, ret);
- fscache_end_page_write(object, page);
- if (ret < 0) {
- fscache_abort_object(object);
- fscache_op_complete(&op->op, true);
- } else {
- fscache_enqueue_operation(&op->op);
- }
-
- _leave("");
- return;
-
-discard_page:
- fscache_stat(&fscache_n_store_pages_over_limit);
- trace_fscache_wrote_page(cookie, page, &op->op, -ENOBUFS);
- fscache_end_page_write(object, page);
- goto again;
-
-superseded:
- /* this writer is going away and there aren't any more things to
- * write */
- _debug("cease");
- spin_unlock(&cookie->stores_lock);
- clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
- spin_unlock(&object->lock);
- fscache_op_complete(&op->op, false);
- _leave("");
-}
-
-/*
- * Clear the pages pending writing for invalidation
- */
-void fscache_invalidate_writes(struct fscache_cookie *cookie)
-{
- struct page *page;
- void *results[16];
- int n, i;
-
- _enter("");
-
- for (;;) {
- spin_lock(&cookie->stores_lock);
- n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
- ARRAY_SIZE(results),
- FSCACHE_COOKIE_PENDING_TAG);
- if (n == 0) {
- spin_unlock(&cookie->stores_lock);
- break;
- }
-
- for (i = n - 1; i >= 0; i--) {
- page = results[i];
- radix_tree_delete(&cookie->stores, page->index);
- trace_fscache_page(cookie, page, fscache_page_radix_delete);
- trace_fscache_page(cookie, page, fscache_page_inval);
- }
-
- spin_unlock(&cookie->stores_lock);
-
- for (i = n - 1; i >= 0; i--)
- put_page(results[i]);
- }
-
- wake_up_bit(&cookie->flags, 0);
- trace_fscache_wake_cookie(cookie);
-
- _leave("");
-}
-
-/*
- * request a page be stored in the cache
- * - returns:
- * -ENOMEM - out of memory, nothing done
- * -ENOBUFS - no backing object available in which to cache the page
- * 0 - dispatched a write - it'll call end_io_func() when finished
- *
- * if the cookie still has a backing object at this point, that object can be
- * in one of a few states with respect to storage processing:
- *
- * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
- * set)
- *
- * (a) no writes yet
- *
- * (b) writes deferred till post-creation (mark page for writing and
- * return immediately)
- *
- * (2) negative lookup, object created, initial fill being made from netfs
- *
- * (a) fill point not yet reached this page (mark page for writing and
- * return)
- *
- * (b) fill point passed this page (queue op to store this page)
- *
- * (3) object extant (queue op to store this page)
- *
- * any other state is invalid
- */
-int __fscache_write_page(struct fscache_cookie *cookie,
- struct page *page,
- loff_t object_size,
- gfp_t gfp)
-{
- struct fscache_storage *op;
- struct fscache_object *object;
- bool wake_cookie = false;
- int ret;
-
- _enter("%p,%x,", cookie, (u32) page->flags);
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
- ASSERT(PageFsCache(page));
-
- fscache_stat(&fscache_n_stores);
-
- if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
- _leave(" = -ENOBUFS [invalidating]");
- return -ENOBUFS;
- }
-
- op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
- if (!op)
- goto nomem;
-
- fscache_operation_init(cookie, &op->op, fscache_write_op, NULL,
- fscache_release_write_op);
- op->op.flags = FSCACHE_OP_ASYNC |
- (1 << FSCACHE_OP_WAITING) |
- (1 << FSCACHE_OP_UNUSE_COOKIE);
-
- ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
- if (ret < 0)
- goto nomem_free;
-
- trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_write_one);
-
- ret = -ENOBUFS;
- spin_lock(&cookie->lock);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto nobufs;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
- if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
- goto nobufs;
-
- trace_fscache_page(cookie, page, fscache_page_write);
-
- /* add the page to the pending-storage radix tree on the backing
- * object */
- spin_lock(&object->lock);
-
- if (object->store_limit_l != object_size)
- fscache_set_store_limit(object, object_size);
-
- spin_lock(&cookie->stores_lock);
-
- _debug("store limit %llx", (unsigned long long) object->store_limit);
-
- ret = radix_tree_insert(&cookie->stores, page->index, page);
- if (ret < 0) {
- if (ret == -EEXIST)
- goto already_queued;
- _debug("insert failed %d", ret);
- goto nobufs_unlock_obj;
- }
-
- trace_fscache_page(cookie, page, fscache_page_radix_insert);
- radix_tree_tag_set(&cookie->stores, page->index,
- FSCACHE_COOKIE_PENDING_TAG);
- trace_fscache_page(cookie, page, fscache_page_radix_set_pend);
- get_page(page);
-
- /* we only want one writer at a time, but we do need to queue new
- * writers after exclusive ops */
- if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
- goto already_pending;
-
- spin_unlock(&cookie->stores_lock);
- spin_unlock(&object->lock);
-
- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
- op->store_limit = object->store_limit;
-
- __fscache_use_cookie(cookie);
- if (fscache_submit_op(object, &op->op) < 0)
- goto submit_failed;
-
- spin_unlock(&cookie->lock);
- radix_tree_preload_end();
- fscache_stat(&fscache_n_store_ops);
- fscache_stat(&fscache_n_stores_ok);
-
- /* the work queue now carries its own ref on the object */
- fscache_put_operation(&op->op);
- _leave(" = 0");
- return 0;
-
-already_queued:
- fscache_stat(&fscache_n_stores_again);
-already_pending:
- spin_unlock(&cookie->stores_lock);
- spin_unlock(&object->lock);
- spin_unlock(&cookie->lock);
- radix_tree_preload_end();
- fscache_put_operation(&op->op);
- fscache_stat(&fscache_n_stores_ok);
- _leave(" = 0");
- return 0;
-
-submit_failed:
- spin_lock(&cookie->stores_lock);
- radix_tree_delete(&cookie->stores, page->index);
- trace_fscache_page(cookie, page, fscache_page_radix_delete);
- spin_unlock(&cookie->stores_lock);
- wake_cookie = __fscache_unuse_cookie(cookie);
- put_page(page);
- ret = -ENOBUFS;
- goto nobufs;
-
-nobufs_unlock_obj:
- spin_unlock(&cookie->stores_lock);
- spin_unlock(&object->lock);
-nobufs:
- spin_unlock(&cookie->lock);
- radix_tree_preload_end();
- fscache_put_operation(&op->op);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
- fscache_stat(&fscache_n_stores_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-
-nomem_free:
- fscache_put_operation(&op->op);
-nomem:
- fscache_stat(&fscache_n_stores_oom);
- _leave(" = -ENOMEM");
- return -ENOMEM;
-}
-EXPORT_SYMBOL(__fscache_write_page);
-
-/*
- * remove a page from the cache
- */
-void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
-{
- struct fscache_object *object;
-
- _enter(",%p", page);
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
- ASSERTCMP(page, !=, NULL);
-
- fscache_stat(&fscache_n_uncaches);
-
- /* cache withdrawal may beat us to it */
- if (!PageFsCache(page))
- goto done;
-
- trace_fscache_page(cookie, page, fscache_page_uncache);
-
- /* get the object */
- spin_lock(&cookie->lock);
-
- if (hlist_empty(&cookie->backing_objects)) {
- ClearPageFsCache(page);
- goto done_unlock;
- }
-
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- /* there might now be stuff on disk we could read */
- clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
-
- /* only invoke the cache backend if we managed to mark the page
- * uncached here; this deals with synchronisation vs withdrawal */
- if (TestClearPageFsCache(page) &&
- object->cache->ops->uncache_page) {
- /* the cache backend releases the cookie lock */
- fscache_stat(&fscache_n_cop_uncache_page);
- object->cache->ops->uncache_page(object, page);
- fscache_stat_d(&fscache_n_cop_uncache_page);
- goto done;
- }
-
-done_unlock:
- spin_unlock(&cookie->lock);
-done:
- _leave("");
-}
-EXPORT_SYMBOL(__fscache_uncache_page);
-
-/**
- * fscache_mark_page_cached - Mark a page as being cached
- * @op: The retrieval op pages are being marked for
- * @page: The page to be marked
- *
- * Mark a netfs page as being cached. After this is called, the netfs
- * must call fscache_uncache_page() to remove the mark.
- */
-void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
-{
- struct fscache_cookie *cookie = op->op.object->cookie;
-
-#ifdef CONFIG_FSCACHE_STATS
- atomic_inc(&fscache_n_marks);
-#endif
-
- trace_fscache_page(cookie, page, fscache_page_cached);
-
- _debug("- mark %p{%lx}", page, page->index);
- if (TestSetPageFsCache(page)) {
- static bool once_only;
- if (!once_only) {
- once_only = true;
- pr_warn("Cookie type %s marked page %lx multiple times\n",
- cookie->def->name, page->index);
- }
- }
-
- if (cookie->def->mark_page_cached)
- cookie->def->mark_page_cached(cookie->netfs_data,
- op->mapping, page);
-}
-EXPORT_SYMBOL(fscache_mark_page_cached);
-
-/**
- * fscache_mark_pages_cached - Mark pages as being cached
- * @op: The retrieval op pages are being marked for
- * @pagevec: The pages to be marked
- *
- * Mark a bunch of netfs pages as being cached. After this is called,
- * the netfs must call fscache_uncache_page() to remove the mark.
- */
-void fscache_mark_pages_cached(struct fscache_retrieval *op,
- struct pagevec *pagevec)
-{
- unsigned long loop;
-
- for (loop = 0; loop < pagevec->nr; loop++)
- fscache_mark_page_cached(op, pagevec->pages[loop]);
-
- pagevec_reinit(pagevec);
-}
-EXPORT_SYMBOL(fscache_mark_pages_cached);
-
-/*
- * Uncache all the pages in an inode that are marked PG_fscache, assuming them
- * to be associated with the given cookie.
- */
-void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
- struct inode *inode)
-{
- struct address_space *mapping = inode->i_mapping;
- struct pagevec pvec;
- pgoff_t next;
- int i;
-
- _enter("%p,%p", cookie, inode);
-
- if (!mapping || mapping->nrpages == 0) {
- _leave(" [no pages]");
- return;
- }
-
- pagevec_init(&pvec);
- next = 0;
- do {
- if (!pagevec_lookup(&pvec, mapping, &next))
- break;
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
- if (PageFsCache(page)) {
- __fscache_wait_on_page_write(cookie, page);
- __fscache_uncache_page(cookie, page);
- }
- }
- pagevec_release(&pvec);
- cond_resched();
- } while (next);
-
- _leave("");
-}
-EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);
diff --git a/fs/fscache/proc.c b/fs/fscache/proc.c
deleted file mode 100644
index 061df8f61ffc..000000000000
--- a/fs/fscache/proc.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* FS-Cache statistics viewing interface
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL OPERATION
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include "internal.h"
-
-/*
- * initialise the /proc/fs/fscache/ directory
- */
-int __init fscache_proc_init(void)
-{
- _enter("");
-
- if (!proc_mkdir("fs/fscache", NULL))
- goto error_dir;
-
- if (!proc_create_seq("fs/fscache/cookies", S_IFREG | 0444, NULL,
- &fscache_cookies_seq_ops))
- goto error_cookies;
-
-#ifdef CONFIG_FSCACHE_STATS
- if (!proc_create_single("fs/fscache/stats", S_IFREG | 0444, NULL,
- fscache_stats_show))
- goto error_stats;
-#endif
-
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
- if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
- &fscache_objlist_proc_ops))
- goto error_objects;
-#endif
-
- _leave(" = 0");
- return 0;
-
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
-error_objects:
-#endif
-#ifdef CONFIG_FSCACHE_STATS
- remove_proc_entry("fs/fscache/stats", NULL);
-error_stats:
-#endif
- remove_proc_entry("fs/fscache/cookies", NULL);
-error_cookies:
- remove_proc_entry("fs/fscache", NULL);
-error_dir:
- _leave(" = -ENOMEM");
- return -ENOMEM;
-}
-
-/*
- * clean up the /proc/fs/fscache/ directory
- */
-void fscache_proc_cleanup(void)
-{
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
- remove_proc_entry("fs/fscache/objects", NULL);
-#endif
-#ifdef CONFIG_FSCACHE_STATS
- remove_proc_entry("fs/fscache/stats", NULL);
-#endif
- remove_proc_entry("fs/fscache/cookies", NULL);
- remove_proc_entry("fs/fscache", NULL);
-}
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
deleted file mode 100644
index a7c3ed89a3e0..000000000000
--- a/fs/fscache/stats.c
+++ /dev/null
@@ -1,283 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* FS-Cache statistics
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL THREAD
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include "internal.h"
-
-/*
- * operation counters
- */
-atomic_t fscache_n_op_pend;
-atomic_t fscache_n_op_run;
-atomic_t fscache_n_op_enqueue;
-atomic_t fscache_n_op_deferred_release;
-atomic_t fscache_n_op_initialised;
-atomic_t fscache_n_op_release;
-atomic_t fscache_n_op_gc;
-atomic_t fscache_n_op_cancelled;
-atomic_t fscache_n_op_rejected;
-
-atomic_t fscache_n_attr_changed;
-atomic_t fscache_n_attr_changed_ok;
-atomic_t fscache_n_attr_changed_nobufs;
-atomic_t fscache_n_attr_changed_nomem;
-atomic_t fscache_n_attr_changed_calls;
-
-atomic_t fscache_n_allocs;
-atomic_t fscache_n_allocs_ok;
-atomic_t fscache_n_allocs_wait;
-atomic_t fscache_n_allocs_nobufs;
-atomic_t fscache_n_allocs_intr;
-atomic_t fscache_n_allocs_object_dead;
-atomic_t fscache_n_alloc_ops;
-atomic_t fscache_n_alloc_op_waits;
-
-atomic_t fscache_n_retrievals;
-atomic_t fscache_n_retrievals_ok;
-atomic_t fscache_n_retrievals_wait;
-atomic_t fscache_n_retrievals_nodata;
-atomic_t fscache_n_retrievals_nobufs;
-atomic_t fscache_n_retrievals_intr;
-atomic_t fscache_n_retrievals_nomem;
-atomic_t fscache_n_retrievals_object_dead;
-atomic_t fscache_n_retrieval_ops;
-atomic_t fscache_n_retrieval_op_waits;
-
-atomic_t fscache_n_stores;
-atomic_t fscache_n_stores_ok;
-atomic_t fscache_n_stores_again;
-atomic_t fscache_n_stores_nobufs;
-atomic_t fscache_n_stores_oom;
-atomic_t fscache_n_store_ops;
-atomic_t fscache_n_store_calls;
-atomic_t fscache_n_store_pages;
-atomic_t fscache_n_store_radix_deletes;
-atomic_t fscache_n_store_pages_over_limit;
-
-atomic_t fscache_n_store_vmscan_not_storing;
-atomic_t fscache_n_store_vmscan_gone;
-atomic_t fscache_n_store_vmscan_busy;
-atomic_t fscache_n_store_vmscan_cancelled;
-atomic_t fscache_n_store_vmscan_wait;
-
-atomic_t fscache_n_marks;
-atomic_t fscache_n_uncaches;
-
-atomic_t fscache_n_acquires;
-atomic_t fscache_n_acquires_null;
-atomic_t fscache_n_acquires_no_cache;
-atomic_t fscache_n_acquires_ok;
-atomic_t fscache_n_acquires_nobufs;
-atomic_t fscache_n_acquires_oom;
-
-atomic_t fscache_n_invalidates;
-atomic_t fscache_n_invalidates_run;
-
-atomic_t fscache_n_updates;
-atomic_t fscache_n_updates_null;
-atomic_t fscache_n_updates_run;
-
-atomic_t fscache_n_relinquishes;
-atomic_t fscache_n_relinquishes_null;
-atomic_t fscache_n_relinquishes_waitcrt;
-atomic_t fscache_n_relinquishes_retire;
-
-atomic_t fscache_n_cookie_index;
-atomic_t fscache_n_cookie_data;
-atomic_t fscache_n_cookie_special;
-
-atomic_t fscache_n_object_alloc;
-atomic_t fscache_n_object_no_alloc;
-atomic_t fscache_n_object_lookups;
-atomic_t fscache_n_object_lookups_negative;
-atomic_t fscache_n_object_lookups_positive;
-atomic_t fscache_n_object_lookups_timed_out;
-atomic_t fscache_n_object_created;
-atomic_t fscache_n_object_avail;
-atomic_t fscache_n_object_dead;
-
-atomic_t fscache_n_checkaux_none;
-atomic_t fscache_n_checkaux_okay;
-atomic_t fscache_n_checkaux_update;
-atomic_t fscache_n_checkaux_obsolete;
-
-atomic_t fscache_n_cop_alloc_object;
-atomic_t fscache_n_cop_lookup_object;
-atomic_t fscache_n_cop_lookup_complete;
-atomic_t fscache_n_cop_grab_object;
-atomic_t fscache_n_cop_invalidate_object;
-atomic_t fscache_n_cop_update_object;
-atomic_t fscache_n_cop_drop_object;
-atomic_t fscache_n_cop_put_object;
-atomic_t fscache_n_cop_sync_cache;
-atomic_t fscache_n_cop_attr_changed;
-atomic_t fscache_n_cop_read_or_alloc_page;
-atomic_t fscache_n_cop_read_or_alloc_pages;
-atomic_t fscache_n_cop_allocate_page;
-atomic_t fscache_n_cop_allocate_pages;
-atomic_t fscache_n_cop_write_page;
-atomic_t fscache_n_cop_uncache_page;
-atomic_t fscache_n_cop_dissociate_pages;
-
-atomic_t fscache_n_cache_no_space_reject;
-atomic_t fscache_n_cache_stale_objects;
-atomic_t fscache_n_cache_retired_objects;
-atomic_t fscache_n_cache_culled_objects;
-
-/*
- * display the general statistics
- */
-int fscache_stats_show(struct seq_file *m, void *v)
-{
- seq_puts(m, "FS-Cache statistics\n");
-
- seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
- atomic_read(&fscache_n_cookie_index),
- atomic_read(&fscache_n_cookie_data),
- atomic_read(&fscache_n_cookie_special));
-
- seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
- atomic_read(&fscache_n_object_alloc),
- atomic_read(&fscache_n_object_no_alloc),
- atomic_read(&fscache_n_object_avail),
- atomic_read(&fscache_n_object_dead));
- seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
- atomic_read(&fscache_n_checkaux_none),
- atomic_read(&fscache_n_checkaux_okay),
- atomic_read(&fscache_n_checkaux_update),
- atomic_read(&fscache_n_checkaux_obsolete));
-
- seq_printf(m, "Pages : mrk=%u unc=%u\n",
- atomic_read(&fscache_n_marks),
- atomic_read(&fscache_n_uncaches));
-
- seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
- " oom=%u\n",
- atomic_read(&fscache_n_acquires),
- atomic_read(&fscache_n_acquires_null),
- atomic_read(&fscache_n_acquires_no_cache),
- atomic_read(&fscache_n_acquires_ok),
- atomic_read(&fscache_n_acquires_nobufs),
- atomic_read(&fscache_n_acquires_oom));
-
- seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
- atomic_read(&fscache_n_object_lookups),
- atomic_read(&fscache_n_object_lookups_negative),
- atomic_read(&fscache_n_object_lookups_positive),
- atomic_read(&fscache_n_object_created),
- atomic_read(&fscache_n_object_lookups_timed_out));
-
- seq_printf(m, "Invals : n=%u run=%u\n",
- atomic_read(&fscache_n_invalidates),
- atomic_read(&fscache_n_invalidates_run));
-
- seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
- atomic_read(&fscache_n_updates),
- atomic_read(&fscache_n_updates_null),
- atomic_read(&fscache_n_updates_run));
-
- seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
- atomic_read(&fscache_n_relinquishes),
- atomic_read(&fscache_n_relinquishes_null),
- atomic_read(&fscache_n_relinquishes_waitcrt),
- atomic_read(&fscache_n_relinquishes_retire));
-
- seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
- atomic_read(&fscache_n_attr_changed),
- atomic_read(&fscache_n_attr_changed_ok),
- atomic_read(&fscache_n_attr_changed_nobufs),
- atomic_read(&fscache_n_attr_changed_nomem),
- atomic_read(&fscache_n_attr_changed_calls));
-
- seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
- atomic_read(&fscache_n_allocs),
- atomic_read(&fscache_n_allocs_ok),
- atomic_read(&fscache_n_allocs_wait),
- atomic_read(&fscache_n_allocs_nobufs),
- atomic_read(&fscache_n_allocs_intr));
- seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
- atomic_read(&fscache_n_alloc_ops),
- atomic_read(&fscache_n_alloc_op_waits),
- atomic_read(&fscache_n_allocs_object_dead));
-
- seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
- " int=%u oom=%u\n",
- atomic_read(&fscache_n_retrievals),
- atomic_read(&fscache_n_retrievals_ok),
- atomic_read(&fscache_n_retrievals_wait),
- atomic_read(&fscache_n_retrievals_nodata),
- atomic_read(&fscache_n_retrievals_nobufs),
- atomic_read(&fscache_n_retrievals_intr),
- atomic_read(&fscache_n_retrievals_nomem));
- seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
- atomic_read(&fscache_n_retrieval_ops),
- atomic_read(&fscache_n_retrieval_op_waits),
- atomic_read(&fscache_n_retrievals_object_dead));
-
- seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
- atomic_read(&fscache_n_stores),
- atomic_read(&fscache_n_stores_ok),
- atomic_read(&fscache_n_stores_again),
- atomic_read(&fscache_n_stores_nobufs),
- atomic_read(&fscache_n_stores_oom));
- seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
- atomic_read(&fscache_n_store_ops),
- atomic_read(&fscache_n_store_calls),
- atomic_read(&fscache_n_store_pages),
- atomic_read(&fscache_n_store_radix_deletes),
- atomic_read(&fscache_n_store_pages_over_limit));
-
- seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
- atomic_read(&fscache_n_store_vmscan_not_storing),
- atomic_read(&fscache_n_store_vmscan_gone),
- atomic_read(&fscache_n_store_vmscan_busy),
- atomic_read(&fscache_n_store_vmscan_cancelled),
- atomic_read(&fscache_n_store_vmscan_wait));
-
- seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
- atomic_read(&fscache_n_op_pend),
- atomic_read(&fscache_n_op_run),
- atomic_read(&fscache_n_op_enqueue),
- atomic_read(&fscache_n_op_cancelled),
- atomic_read(&fscache_n_op_rejected));
- seq_printf(m, "Ops : ini=%u dfr=%u rel=%u gc=%u\n",
- atomic_read(&fscache_n_op_initialised),
- atomic_read(&fscache_n_op_deferred_release),
- atomic_read(&fscache_n_op_release),
- atomic_read(&fscache_n_op_gc));
-
- seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
- atomic_read(&fscache_n_cop_alloc_object),
- atomic_read(&fscache_n_cop_lookup_object),
- atomic_read(&fscache_n_cop_lookup_complete),
- atomic_read(&fscache_n_cop_grab_object));
- seq_printf(m, "CacheOp: inv=%d upo=%d dro=%d pto=%d atc=%d syn=%d\n",
- atomic_read(&fscache_n_cop_invalidate_object),
- atomic_read(&fscache_n_cop_update_object),
- atomic_read(&fscache_n_cop_drop_object),
- atomic_read(&fscache_n_cop_put_object),
- atomic_read(&fscache_n_cop_attr_changed),
- atomic_read(&fscache_n_cop_sync_cache));
- seq_printf(m, "CacheOp: rap=%d ras=%d alp=%d als=%d wrp=%d ucp=%d dsp=%d\n",
- atomic_read(&fscache_n_cop_read_or_alloc_page),
- atomic_read(&fscache_n_cop_read_or_alloc_pages),
- atomic_read(&fscache_n_cop_allocate_page),
- atomic_read(&fscache_n_cop_allocate_pages),
- atomic_read(&fscache_n_cop_write_page),
- atomic_read(&fscache_n_cop_uncache_page),
- atomic_read(&fscache_n_cop_dissociate_pages));
- seq_printf(m, "CacheEv: nsp=%d stl=%d rtr=%d cul=%d\n",
- atomic_read(&fscache_n_cache_no_space_reject),
- atomic_read(&fscache_n_cache_stale_objects),
- atomic_read(&fscache_n_cache_retired_objects),
- atomic_read(&fscache_n_cache_culled_objects));
- netfs_stats_show(m);
- return 0;
-}
diff --git a/fs/fsopen.c b/fs/fsopen.c
index 27a890aa493a..f645c99204eb 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -18,50 +18,56 @@
#include "internal.h"
#include "mount.h"
+static inline const char *fetch_message_locked(struct fc_log *log, size_t len,
+ bool *need_free)
+{
+ const char *p;
+ int index;
+
+ if (unlikely(log->head == log->tail))
+ return ERR_PTR(-ENODATA);
+
+ index = log->tail & (ARRAY_SIZE(log->buffer) - 1);
+ p = log->buffer[index];
+ if (unlikely(strlen(p) > len))
+ return ERR_PTR(-EMSGSIZE);
+
+ log->buffer[index] = NULL;
+ *need_free = log->need_free & (1 << index);
+ log->need_free &= ~(1 << index);
+ log->tail++;
+
+ return p;
+}
+
/*
* Allow the user to read back any error, warning or informational messages.
+ * Only one message is returned for each read(2) call.
*/
static ssize_t fscontext_read(struct file *file,
char __user *_buf, size_t len, loff_t *pos)
{
struct fs_context *fc = file->private_data;
- struct fc_log *log = fc->log.log;
- unsigned int logsize = ARRAY_SIZE(log->buffer);
- ssize_t ret;
- char *p;
+ ssize_t err;
+ const char *p __free(kfree) = NULL, *message;
bool need_free;
- int index, n;
-
- ret = mutex_lock_interruptible(&fc->uapi_mutex);
- if (ret < 0)
- return ret;
-
- if (log->head == log->tail) {
- mutex_unlock(&fc->uapi_mutex);
- return -ENODATA;
- }
+ int n;
- index = log->tail & (logsize - 1);
- p = log->buffer[index];
- need_free = log->need_free & (1 << index);
- log->buffer[index] = NULL;
- log->need_free &= ~(1 << index);
- log->tail++;
+ err = mutex_lock_interruptible(&fc->uapi_mutex);
+ if (err < 0)
+ return err;
+ message = fetch_message_locked(fc->log.log, len, &need_free);
mutex_unlock(&fc->uapi_mutex);
+ if (IS_ERR(message))
+ return PTR_ERR(message);
- ret = -EMSGSIZE;
- n = strlen(p);
- if (n > len)
- goto err_free;
- ret = -EFAULT;
- if (copy_to_user(_buf, p, n) != 0)
- goto err_free;
- ret = n;
-
-err_free:
if (need_free)
- kfree(p);
- return ret;
+ p = message;
+
+ n = strlen(message);
+ if (copy_to_user(_buf, message, n))
+ return -EFAULT;
+ return n;
}
static int fscontext_release(struct inode *inode, struct file *file)
@@ -78,7 +84,6 @@ static int fscontext_release(struct inode *inode, struct file *file)
const struct file_operations fscontext_fops = {
.read = fscontext_read,
.release = fscontext_release,
- .llseek = no_llseek,
};
/*
@@ -119,7 +124,7 @@ SYSCALL_DEFINE2(fsopen, const char __user *, _fs_name, unsigned int, flags)
const char *fs_name;
int ret;
- if (!ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN))
+ if (!may_mount())
return -EPERM;
if (flags & ~FSOPEN_CLOEXEC)
@@ -162,7 +167,7 @@ SYSCALL_DEFINE3(fspick, int, dfd, const char __user *, path, unsigned int, flags
unsigned int lookup_flags;
int ret;
- if (!ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN))
+ if (!may_mount())
return -EPERM;
if ((flags & ~(FSPICK_CLOEXEC |
@@ -209,6 +214,68 @@ err:
return ret;
}
+static int vfs_cmd_create(struct fs_context *fc, bool exclusive)
+{
+ struct super_block *sb;
+ int ret;
+
+ if (fc->phase != FS_CONTEXT_CREATE_PARAMS)
+ return -EBUSY;
+
+ if (!mount_capable(fc))
+ return -EPERM;
+
+ fc->phase = FS_CONTEXT_CREATING;
+ fc->exclusive = exclusive;
+
+ ret = vfs_get_tree(fc);
+ if (ret) {
+ fc->phase = FS_CONTEXT_FAILED;
+ return ret;
+ }
+
+ sb = fc->root->d_sb;
+ ret = security_sb_kern_mount(sb);
+ if (unlikely(ret)) {
+ fc_drop_locked(fc);
+ fc->phase = FS_CONTEXT_FAILED;
+ return ret;
+ }
+
+ /* vfs_get_tree() callchains will have grabbed @s_umount */
+ up_write(&sb->s_umount);
+ fc->phase = FS_CONTEXT_AWAITING_MOUNT;
+ return 0;
+}
+
+static int vfs_cmd_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb;
+ int ret;
+
+ if (fc->phase != FS_CONTEXT_RECONF_PARAMS)
+ return -EBUSY;
+
+ fc->phase = FS_CONTEXT_RECONFIGURING;
+
+ sb = fc->root->d_sb;
+ if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
+ fc->phase = FS_CONTEXT_FAILED;
+ return -EPERM;
+ }
+
+ down_write(&sb->s_umount);
+ ret = reconfigure_super(fc);
+ up_write(&sb->s_umount);
+ if (ret) {
+ fc->phase = FS_CONTEXT_FAILED;
+ return ret;
+ }
+
+ vfs_clean_context(fc);
+ return 0;
+}
+
/*
* Check the state and apply the configuration. Note that this function is
* allowed to 'steal' the value by setting param->xxx to NULL before returning.
@@ -216,7 +283,6 @@ err:
static int vfs_fsconfig_locked(struct fs_context *fc, int cmd,
struct fs_parameter *param)
{
- struct super_block *sb;
int ret;
ret = finish_clean_context(fc);
@@ -224,39 +290,11 @@ static int vfs_fsconfig_locked(struct fs_context *fc, int cmd,
return ret;
switch (cmd) {
case FSCONFIG_CMD_CREATE:
- if (fc->phase != FS_CONTEXT_CREATE_PARAMS)
- return -EBUSY;
- if (!mount_capable(fc))
- return -EPERM;
- fc->phase = FS_CONTEXT_CREATING;
- ret = vfs_get_tree(fc);
- if (ret)
- break;
- sb = fc->root->d_sb;
- ret = security_sb_kern_mount(sb);
- if (unlikely(ret)) {
- fc_drop_locked(fc);
- break;
- }
- up_write(&sb->s_umount);
- fc->phase = FS_CONTEXT_AWAITING_MOUNT;
- return 0;
+ return vfs_cmd_create(fc, false);
+ case FSCONFIG_CMD_CREATE_EXCL:
+ return vfs_cmd_create(fc, true);
case FSCONFIG_CMD_RECONFIGURE:
- if (fc->phase != FS_CONTEXT_RECONF_PARAMS)
- return -EBUSY;
- fc->phase = FS_CONTEXT_RECONFIGURING;
- sb = fc->root->d_sb;
- if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
- ret = -EPERM;
- break;
- }
- down_write(&sb->s_umount);
- ret = reconfigure_super(fc);
- up_write(&sb->s_umount);
- if (ret)
- break;
- vfs_clean_context(fc);
- return 0;
+ return vfs_cmd_reconfigure(fc);
default:
if (fc->phase != FS_CONTEXT_CREATE_PARAMS &&
fc->phase != FS_CONTEXT_RECONF_PARAMS)
@@ -264,8 +302,6 @@ static int vfs_fsconfig_locked(struct fs_context *fc, int cmd,
return vfs_parse_fs_param(fc, param);
}
- fc->phase = FS_CONTEXT_FAILED;
- return ret;
}
/**
@@ -319,7 +355,6 @@ SYSCALL_DEFINE5(fsconfig,
int, aux)
{
struct fs_context *fc;
- struct fd f;
int ret;
int lookup_flags = 0;
@@ -353,6 +388,7 @@ SYSCALL_DEFINE5(fsconfig,
return -EINVAL;
break;
case FSCONFIG_CMD_CREATE:
+ case FSCONFIG_CMD_CREATE_EXCL:
case FSCONFIG_CMD_RECONFIGURE:
if (_key || _value || aux)
return -EINVAL;
@@ -361,31 +397,28 @@ SYSCALL_DEFINE5(fsconfig,
return -EOPNOTSUPP;
}
- f = fdget(fd);
- if (!f.file)
+ CLASS(fd, f)(fd);
+ if (fd_empty(f))
return -EBADF;
- ret = -EINVAL;
- if (f.file->f_op != &fscontext_fops)
- goto out_f;
+ if (fd_file(f)->f_op != &fscontext_fops)
+ return -EINVAL;
- fc = f.file->private_data;
+ fc = fd_file(f)->private_data;
if (fc->ops == &legacy_fs_context_ops) {
switch (cmd) {
case FSCONFIG_SET_BINARY:
case FSCONFIG_SET_PATH:
case FSCONFIG_SET_PATH_EMPTY:
case FSCONFIG_SET_FD:
- ret = -EOPNOTSUPP;
- goto out_f;
+ case FSCONFIG_CMD_CREATE_EXCL:
+ return -EOPNOTSUPP;
}
}
if (_key) {
param.key = strndup_user(_key, 256);
- if (IS_ERR(param.key)) {
- ret = PTR_ERR(param.key);
- goto out_f;
- }
+ if (IS_ERR(param.key))
+ return PTR_ERR(param.key);
}
switch (cmd) {
@@ -415,7 +448,7 @@ SYSCALL_DEFINE5(fsconfig,
fallthrough;
case FSCONFIG_SET_PATH:
param.type = fs_value_is_filename;
- param.name = getname_flags(_value, lookup_flags, NULL);
+ param.name = getname_flags(_value, lookup_flags);
if (IS_ERR(param.name)) {
ret = PTR_ERR(param.name);
goto out_key;
@@ -426,9 +459,10 @@ SYSCALL_DEFINE5(fsconfig,
case FSCONFIG_SET_FD:
param.type = fs_value_is_file;
ret = -EBADF;
- param.file = fget(aux);
+ param.file = fget_raw(aux);
if (!param.file)
goto out_key;
+ param.dirfd = aux;
break;
default:
break;
@@ -463,7 +497,5 @@ SYSCALL_DEFINE5(fsconfig,
}
out_key:
kfree(param.key);
-out_f:
- fdput(f);
return ret;
}
diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig
index 40ce9a1c12e5..3a4ae632c94a 100644
--- a/fs/fuse/Kconfig
+++ b/fs/fuse/Kconfig
@@ -2,6 +2,7 @@
config FUSE_FS
tristate "FUSE (Filesystem in Userspace) support"
select FS_POSIX_ACL
+ select FS_IOMAP
help
With FUSE it is possible to implement a fully functional filesystem
in a userspace program.
@@ -12,7 +13,7 @@ config FUSE_FS
although chances are your distribution already has that library
installed if you've installed the "fuse" package itself.
- See <file:Documentation/filesystems/fuse.rst> for more information.
+ See <file:Documentation/filesystems/fuse/fuse.rst> for more information.
See <file:Documentation/Changes> for needed library/utility version.
If you want to develop a userspace FS, or if you want to use
@@ -45,10 +46,33 @@ config FUSE_DAX
select INTERVAL_TREE
depends on VIRTIO_FS
depends on FS_DAX
- depends on DAX_DRIVER
+ depends on DAX
help
This allows bypassing guest page cache and allows mapping host page
cache directly in guest address space.
If you want to allow mounting a Virtio Filesystem with the "dax"
option, answer Y.
+
+config FUSE_PASSTHROUGH
+ bool "FUSE passthrough operations support"
+ default y
+ depends on FUSE_FS
+ select FS_STACK
+ help
+ This allows bypassing FUSE server by mapping specific FUSE operations
+ to be performed directly on a backing file.
+
+ If you want to allow passthrough operations, answer Y.
+
+config FUSE_IO_URING
+ bool "FUSE communication over io-uring"
+ default y
+ depends on FUSE_FS
+ depends on IO_URING
+ help
+ This allows sending FUSE requests over the io-uring interface and
+ also adds request core affinity.
+
+ If you want to allow fuse server/client communication through io-uring,
+ answer Y
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 0c48b35c058d..22ad9538dfc4 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -3,11 +3,19 @@
# Makefile for the FUSE filesystem.
#
+# Needed for trace events
+ccflags-y = -I$(src)
+
obj-$(CONFIG_FUSE_FS) += fuse.o
obj-$(CONFIG_CUSE) += cuse.o
obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
-fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o
+fuse-y := trace.o # put trace.o first so we see ftrace errors sooner
+fuse-y += dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o
+fuse-y += iomode.o
fuse-$(CONFIG_FUSE_DAX) += dax.o
+fuse-$(CONFIG_FUSE_PASSTHROUGH) += passthrough.o backing.o
+fuse-$(CONFIG_SYSCTL) += sysctl.o
+fuse-$(CONFIG_FUSE_IO_URING) += dev_uring.o
virtiofs-y := virtio_fs.o
diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c
index 337cb29a8dd5..8f484b105f13 100644
--- a/fs/fuse/acl.c
+++ b/fs/fuse/acl.c
@@ -11,9 +11,9 @@
#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
-struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu)
+static struct posix_acl *__fuse_get_acl(struct fuse_conn *fc,
+ struct inode *inode, int type, bool rcu)
{
- struct fuse_conn *fc = get_fuse_conn(inode);
int size;
const char *name;
void *value = NULL;
@@ -25,7 +25,7 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu)
if (fuse_is_bad(inode))
return ERR_PTR(-EIO);
- if (!fc->posix_acl || fc->no_getxattr)
+ if (fc->no_getxattr)
return NULL;
if (type == ACL_TYPE_ACCESS)
@@ -53,9 +53,49 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu)
return acl;
}
-int fuse_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+static inline bool fuse_no_acl(const struct fuse_conn *fc,
+ const struct inode *inode)
+{
+ /*
+ * Refuse interacting with POSIX ACLs for daemons that
+ * don't support FUSE_POSIX_ACL and are not mounted on
+ * the host to retain backwards compatibility.
+ */
+ return !fc->posix_acl && (i_user_ns(inode) != &init_user_ns);
+}
+
+struct posix_acl *fuse_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, int type)
+{
+ struct inode *inode = d_inode(dentry);
+ struct fuse_conn *fc = get_fuse_conn(inode);
+
+ if (fuse_no_acl(fc, inode))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return __fuse_get_acl(fc, inode, type, false);
+}
+
+struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu)
+{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+
+ /*
+ * FUSE daemons before FUSE_POSIX_ACL was introduced could get and set
+ * POSIX ACLs without them being used for permission checking by the
+ * vfs. Retain that behavior for backwards compatibility as there are
+ * filesystems that do all permission checking for acls in the daemon
+ * and not in the kernel.
+ */
+ if (!fc->posix_acl)
+ return NULL;
+ return __fuse_get_acl(fc, inode, type, rcu);
+}
+
+int fuse_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
+ struct inode *inode = d_inode(dentry);
struct fuse_conn *fc = get_fuse_conn(inode);
const char *name;
int ret;
@@ -63,7 +103,7 @@ int fuse_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
if (fuse_is_bad(inode))
return -EIO;
- if (!fc->posix_acl || fc->no_setxattr)
+ if (fc->no_setxattr || fuse_no_acl(fc, inode))
return -EOPNOTSUPP;
if (type == ACL_TYPE_ACCESS)
@@ -98,8 +138,14 @@ int fuse_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
return ret;
}
- if (!in_group_p(i_gid_into_mnt(&init_user_ns, inode)) &&
- !capable_wrt_inode_uidgid(&init_user_ns, inode, CAP_FSETID))
+ /*
+ * Fuse daemons without FUSE_POSIX_ACL never changed the passed
+ * through POSIX ACLs. Such daemons don't expect setgid bits to
+ * be stripped.
+ */
+ if (fc->posix_acl &&
+ !in_group_or_capable(idmap, inode,
+ i_gid_into_vfsgid(idmap, inode)))
extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID;
ret = fuse_setxattr(inode, name, value, size, 0, extra_flags);
@@ -107,8 +153,15 @@ int fuse_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
} else {
ret = fuse_removexattr(inode, name);
}
- forget_all_cached_acls(inode);
- fuse_invalidate_attr(inode);
+
+ if (fc->posix_acl) {
+ /*
+ * Fuse daemons without FUSE_POSIX_ACL never cached POSIX ACLs
+ * and didn't invalidate attributes. Retain that behavior.
+ */
+ forget_all_cached_acls(inode);
+ fuse_invalidate_attr(inode);
+ }
return ret;
}
diff --git a/fs/fuse/backing.c b/fs/fuse/backing.c
new file mode 100644
index 000000000000..4afda419dd14
--- /dev/null
+++ b/fs/fuse/backing.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FUSE passthrough to backing file.
+ *
+ * Copyright (c) 2023 CTERA Networks.
+ */
+
+#include "fuse_i.h"
+
+#include <linux/file.h>
+
+struct fuse_backing *fuse_backing_get(struct fuse_backing *fb)
+{
+ if (fb && refcount_inc_not_zero(&fb->count))
+ return fb;
+ return NULL;
+}
+
+static void fuse_backing_free(struct fuse_backing *fb)
+{
+ pr_debug("%s: fb=0x%p\n", __func__, fb);
+
+ if (fb->file)
+ fput(fb->file);
+ put_cred(fb->cred);
+ kfree_rcu(fb, rcu);
+}
+
+void fuse_backing_put(struct fuse_backing *fb)
+{
+ if (fb && refcount_dec_and_test(&fb->count))
+ fuse_backing_free(fb);
+}
+
+void fuse_backing_files_init(struct fuse_conn *fc)
+{
+ idr_init(&fc->backing_files_map);
+}
+
+static int fuse_backing_id_alloc(struct fuse_conn *fc, struct fuse_backing *fb)
+{
+ int id;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&fc->lock);
+ /* FIXME: xarray might be space inefficient */
+ id = idr_alloc_cyclic(&fc->backing_files_map, fb, 1, 0, GFP_ATOMIC);
+ spin_unlock(&fc->lock);
+ idr_preload_end();
+
+ WARN_ON_ONCE(id == 0);
+ return id;
+}
+
+static struct fuse_backing *fuse_backing_id_remove(struct fuse_conn *fc,
+ int id)
+{
+ struct fuse_backing *fb;
+
+ spin_lock(&fc->lock);
+ fb = idr_remove(&fc->backing_files_map, id);
+ spin_unlock(&fc->lock);
+
+ return fb;
+}
+
+static int fuse_backing_id_free(int id, void *p, void *data)
+{
+ struct fuse_backing *fb = p;
+
+ WARN_ON_ONCE(refcount_read(&fb->count) != 1);
+ fuse_backing_free(fb);
+ return 0;
+}
+
+void fuse_backing_files_free(struct fuse_conn *fc)
+{
+ idr_for_each(&fc->backing_files_map, fuse_backing_id_free, NULL);
+ idr_destroy(&fc->backing_files_map);
+}
+
+int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map)
+{
+ struct file *file;
+ struct super_block *backing_sb;
+ struct fuse_backing *fb = NULL;
+ int res;
+
+ pr_debug("%s: fd=%d flags=0x%x\n", __func__, map->fd, map->flags);
+
+ /* TODO: relax CAP_SYS_ADMIN once backing files are visible to lsof */
+ res = -EPERM;
+ if (!fc->passthrough || !capable(CAP_SYS_ADMIN))
+ goto out;
+
+ res = -EINVAL;
+ if (map->flags || map->padding)
+ goto out;
+
+ file = fget_raw(map->fd);
+ res = -EBADF;
+ if (!file)
+ goto out;
+
+ /* read/write/splice/mmap passthrough only relevant for regular files */
+ res = d_is_dir(file->f_path.dentry) ? -EISDIR : -EINVAL;
+ if (!d_is_reg(file->f_path.dentry))
+ goto out_fput;
+
+ backing_sb = file_inode(file)->i_sb;
+ res = -ELOOP;
+ if (backing_sb->s_stack_depth >= fc->max_stack_depth)
+ goto out_fput;
+
+ fb = kmalloc(sizeof(struct fuse_backing), GFP_KERNEL);
+ res = -ENOMEM;
+ if (!fb)
+ goto out_fput;
+
+ fb->file = file;
+ fb->cred = prepare_creds();
+ refcount_set(&fb->count, 1);
+
+ res = fuse_backing_id_alloc(fc, fb);
+ if (res < 0) {
+ fuse_backing_free(fb);
+ fb = NULL;
+ }
+
+out:
+ pr_debug("%s: fb=0x%p, ret=%i\n", __func__, fb, res);
+
+ return res;
+
+out_fput:
+ fput(file);
+ goto out;
+}
+
+int fuse_backing_close(struct fuse_conn *fc, int backing_id)
+{
+ struct fuse_backing *fb = NULL;
+ int err;
+
+ pr_debug("%s: backing_id=%d\n", __func__, backing_id);
+
+ /* TODO: relax CAP_SYS_ADMIN once backing files are visible to lsof */
+ err = -EPERM;
+ if (!fc->passthrough || !capable(CAP_SYS_ADMIN))
+ goto out;
+
+ err = -EINVAL;
+ if (backing_id <= 0)
+ goto out;
+
+ err = -ENOENT;
+ fb = fuse_backing_id_remove(fc, backing_id);
+ if (!fb)
+ goto out;
+
+ fuse_backing_put(fb);
+ err = 0;
+out:
+ pr_debug("%s: fb=0x%p, err=%i\n", __func__, fb, err);
+
+ return err;
+}
+
+struct fuse_backing *fuse_backing_lookup(struct fuse_conn *fc, int backing_id)
+{
+ struct fuse_backing *fb;
+
+ rcu_read_lock();
+ fb = idr_find(&fc->backing_files_map, backing_id);
+ fb = fuse_backing_get(fb);
+ rcu_read_unlock();
+
+ return fb;
+}
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 000d2e5627e9..140bd5730d99 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs_context.h>
+#include <linux/namei.h>
#define FUSE_CTL_SUPER_MAGIC 0x65735543
@@ -164,7 +165,6 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
{
unsigned val;
struct fuse_conn *fc;
- struct fuse_mount *fm;
ssize_t ret;
ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
@@ -175,27 +175,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
if (!fc)
goto out;
- down_read(&fc->killsb);
- spin_lock(&fc->bg_lock);
- fc->congestion_threshold = val;
-
- /*
- * Get any fuse_mount belonging to this fuse_conn; s_bdi is
- * shared between all of them
- */
-
- if (!list_empty(&fc->mounts)) {
- fm = list_first_entry(&fc->mounts, struct fuse_mount, fc_entry);
- if (fc->num_background < fc->congestion_threshold) {
- clear_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC);
- clear_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC);
- } else {
- set_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC);
- set_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC);
- }
- }
- spin_unlock(&fc->bg_lock);
- up_read(&fc->killsb);
+ WRITE_ONCE(fc->congestion_threshold, val);
fuse_conn_put(fc);
out:
return ret;
@@ -204,40 +184,34 @@ out:
static const struct file_operations fuse_ctl_abort_ops = {
.open = nonseekable_open,
.write = fuse_conn_abort_write,
- .llseek = no_llseek,
};
static const struct file_operations fuse_ctl_waiting_ops = {
.open = nonseekable_open,
.read = fuse_conn_waiting_read,
- .llseek = no_llseek,
};
static const struct file_operations fuse_conn_max_background_ops = {
.open = nonseekable_open,
.read = fuse_conn_max_background_read,
.write = fuse_conn_max_background_write,
- .llseek = no_llseek,
};
static const struct file_operations fuse_conn_congestion_threshold_ops = {
.open = nonseekable_open,
.read = fuse_conn_congestion_threshold_read,
.write = fuse_conn_congestion_threshold_write,
- .llseek = no_llseek,
};
static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
struct fuse_conn *fc,
- const char *name,
- int mode, int nlink,
+ const char *name, int mode,
const struct inode_operations *iop,
const struct file_operations *fop)
{
struct dentry *dentry;
struct inode *inode;
- BUG_ON(fc->ctl_ndents >= FUSE_CTL_NUM_DENTRIES);
dentry = d_alloc_name(parent, name);
if (!dentry)
return NULL;
@@ -252,17 +226,24 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
inode->i_mode = mode;
inode->i_uid = fc->user_id;
inode->i_gid = fc->group_id;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
/* setting ->i_op to NULL is not allowed */
if (iop)
inode->i_op = iop;
inode->i_fop = fop;
- set_nlink(inode, nlink);
+ if (S_ISDIR(mode)) {
+ inc_nlink(d_inode(parent));
+ inc_nlink(inode);
+ }
inode->i_private = fc;
- d_add(dentry, inode);
-
- fc->ctl_dentry[fc->ctl_ndents++] = dentry;
+ d_make_persistent(dentry, inode);
+ dput(dentry);
+ /*
+ * We are returning a borrowed reference here - it's only good while
+ * fuse_mutex is held. Actually it's d_make_persistent() return
+ * value...
+ */
return dentry;
}
@@ -275,26 +256,25 @@ int fuse_ctl_add_conn(struct fuse_conn *fc)
struct dentry *parent;
char name[32];
- if (!fuse_control_sb)
+ if (!fuse_control_sb || fc->no_control)
return 0;
parent = fuse_control_sb->s_root;
- inc_nlink(d_inode(parent));
sprintf(name, "%u", fc->dev);
- parent = fuse_ctl_add_dentry(parent, fc, name, S_IFDIR | 0500, 2,
+ parent = fuse_ctl_add_dentry(parent, fc, name, S_IFDIR | 0500,
&simple_dir_inode_operations,
&simple_dir_operations);
if (!parent)
goto err;
- if (!fuse_ctl_add_dentry(parent, fc, "waiting", S_IFREG | 0400, 1,
+ if (!fuse_ctl_add_dentry(parent, fc, "waiting", S_IFREG | 0400,
NULL, &fuse_ctl_waiting_ops) ||
- !fuse_ctl_add_dentry(parent, fc, "abort", S_IFREG | 0200, 1,
+ !fuse_ctl_add_dentry(parent, fc, "abort", S_IFREG | 0200,
NULL, &fuse_ctl_abort_ops) ||
!fuse_ctl_add_dentry(parent, fc, "max_background", S_IFREG | 0600,
- 1, NULL, &fuse_conn_max_background_ops) ||
+ NULL, &fuse_conn_max_background_ops) ||
!fuse_ctl_add_dentry(parent, fc, "congestion_threshold",
- S_IFREG | 0600, 1, NULL,
+ S_IFREG | 0600, NULL,
&fuse_conn_congestion_threshold_ops))
goto err;
@@ -305,27 +285,24 @@ int fuse_ctl_add_conn(struct fuse_conn *fc)
return -ENOMEM;
}
+static void remove_one(struct dentry *dentry)
+{
+ d_inode(dentry)->i_private = NULL;
+}
+
/*
* Remove a connection from the control filesystem (if it exists).
* Caller must hold fuse_mutex
*/
void fuse_ctl_remove_conn(struct fuse_conn *fc)
{
- int i;
+ char name[32];
- if (!fuse_control_sb)
+ if (!fuse_control_sb || fc->no_control)
return;
- for (i = fc->ctl_ndents - 1; i >= 0; i--) {
- struct dentry *dentry = fc->ctl_dentry[i];
- d_inode(dentry)->i_private = NULL;
- if (!i) {
- /* Get rid of submounts: */
- d_invalidate(dentry);
- }
- dput(dentry);
- }
- drop_nlink(d_inode(fuse_control_sb->s_root));
+ sprintf(name, "%u", fc->dev);
+ simple_remove_by_name(fuse_control_sb->s_root, name, remove_one);
}
static int fuse_ctl_fill_super(struct super_block *sb, struct fs_context *fsc)
@@ -371,15 +348,11 @@ static int fuse_ctl_init_fs_context(struct fs_context *fsc)
static void fuse_ctl_kill_sb(struct super_block *sb)
{
- struct fuse_conn *fc;
-
mutex_lock(&fuse_mutex);
fuse_control_sb = NULL;
- list_for_each_entry(fc, &fuse_conn_list, entry)
- fc->ctl_ndents = 0;
mutex_unlock(&fuse_mutex);
- kill_litter_super(sb);
+ kill_anon_super(sb);
}
static struct file_system_type fuse_ctl_fs_type = {
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index c7d882a9fe33..28c96961e85d 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -52,6 +52,7 @@
#include <linux/user_namespace.h>
#include "fuse_i.h"
+#include "fuse_dev_i.h"
#define CUSE_CONNTBL_LEN 64
@@ -256,7 +257,7 @@ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp)
}
/**
- * cuse_parse_dev_info - parse device info
+ * cuse_parse_devinfo - parse device info
* @p: device info string
* @len: length of device info string
* @devinfo: out parameter for parsed device info
@@ -303,13 +304,17 @@ struct cuse_init_args {
struct fuse_args_pages ap;
struct cuse_init_in in;
struct cuse_init_out out;
- struct page *page;
- struct fuse_page_desc desc;
+ struct folio *folio;
+ struct fuse_folio_desc desc;
};
/**
* cuse_process_init_reply - finish initializing CUSE channel
*
+ * @fm: The fuse mount information containing the CUSE connection.
+ * @args: The arguments passed to the init reply.
+ * @error: The error code signifying if any error occurred during the process.
+ *
* This function creates the character device and sets up all the
* required data structures for it. Please read the comment at the
* top of this file for high level overview.
@@ -322,7 +327,7 @@ static void cuse_process_init_reply(struct fuse_mount *fm,
struct fuse_args_pages *ap = &ia->ap;
struct cuse_conn *cc = fc_to_cc(fc), *pos;
struct cuse_init_out *arg = &ia->out;
- struct page *page = ap->pages[0];
+ struct folio *folio = ap->folios[0];
struct cuse_devinfo devinfo = { };
struct device *dev;
struct cdev *cdev;
@@ -339,7 +344,7 @@ static void cuse_process_init_reply(struct fuse_mount *fm,
/* parse init reply */
cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL;
- rc = cuse_parse_devinfo(page_address(page), ap->args.out_args[1].size,
+ rc = cuse_parse_devinfo(folio_address(folio), ap->args.out_args[1].size,
&devinfo);
if (rc)
goto err;
@@ -407,7 +412,7 @@ static void cuse_process_init_reply(struct fuse_mount *fm,
kobject_uevent(&dev->kobj, KOBJ_ADD);
out:
kfree(ia);
- __free_page(page);
+ folio_put(folio);
return;
err_cdev:
@@ -425,7 +430,7 @@ err:
static int cuse_send_init(struct cuse_conn *cc)
{
int rc;
- struct page *page;
+ struct folio *folio;
struct fuse_mount *fm = &cc->fm;
struct cuse_init_args *ia;
struct fuse_args_pages *ap;
@@ -433,13 +438,14 @@ static int cuse_send_init(struct cuse_conn *cc)
BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE);
rc = -ENOMEM;
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page)
+
+ folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 0);
+ if (!folio)
goto err;
ia = kzalloc(sizeof(*ia), GFP_KERNEL);
if (!ia)
- goto err_free_page;
+ goto err_free_folio;
ap = &ia->ap;
ia->in.major = FUSE_KERNEL_VERSION;
@@ -455,18 +461,18 @@ static int cuse_send_init(struct cuse_conn *cc)
ap->args.out_args[1].size = CUSE_INIT_INFO_MAX;
ap->args.out_argvar = true;
ap->args.out_pages = true;
- ap->num_pages = 1;
- ap->pages = &ia->page;
+ ap->num_folios = 1;
+ ap->folios = &ia->folio;
ap->descs = &ia->desc;
- ia->page = page;
+ ia->folio = folio;
ia->desc.length = ap->args.out_args[1].size;
ap->args.end = cuse_process_init_reply;
rc = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
if (rc) {
kfree(ia);
-err_free_page:
- __free_page(page);
+err_free_folio:
+ folio_put(folio);
}
err:
return rc;
@@ -474,8 +480,7 @@ err:
static void cuse_fc_release(struct fuse_conn *fc)
{
- struct cuse_conn *cc = fc_to_cc(fc);
- kfree_rcu(cc, fc.rcu);
+ kfree(fc_to_cc(fc));
}
/**
@@ -543,9 +548,8 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
*/
static int cuse_channel_release(struct inode *inode, struct file *file)
{
- struct fuse_dev *fud = file->private_data;
+ struct fuse_dev *fud = __fuse_get_dev(file);
struct cuse_conn *cc = fc_to_cc(fud->fc);
- int rc;
/* remove from the conntbl, no more access from this point on */
mutex_lock(&cuse_lock);
@@ -560,9 +564,7 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
cdev_del(cc->cdev);
}
- rc = fuse_dev_release(inode, file); /* puts the base reference */
-
- return rc;
+ return fuse_dev_release(inode, file);
}
static struct file_operations cuse_channel_fops; /* initialized during init */
@@ -626,7 +628,7 @@ static int __init cuse_init(void)
/* CUSE is not prepared for FUSE_DEV_IOC_CLONE */
cuse_channel_fops.unlocked_ioctl = NULL;
- cuse_class = class_create(THIS_MODULE, "cuse");
+ cuse_class = class_create("cuse");
if (IS_ERR(cuse_class))
return PTR_ERR(cuse_class);
diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
index 713818d74de6..ac6d4c1064cc 100644
--- a/fs/fuse/dax.c
+++ b/fs/fuse/dax.c
@@ -10,7 +10,6 @@
#include <linux/dax.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
-#include <linux/pfn_t.h>
#include <linux/iomap.h>
#include <linux/interval_tree.h>
@@ -138,9 +137,9 @@ static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
WARN_ON(fcd->nr_free_ranges <= 0);
fcd->nr_free_ranges--;
}
+ __kick_dmap_free_worker(fcd, 0);
spin_unlock(&fcd->lock);
- kick_dmap_free_worker(fcd, 0);
return dmap;
}
@@ -240,11 +239,12 @@ static int fuse_send_removemapping(struct inode *inode,
args.opcode = FUSE_REMOVEMAPPING;
args.nodeid = fi->nodeid;
- args.in_numargs = 2;
- args.in_args[0].size = sizeof(*inargp);
- args.in_args[0].value = inargp;
- args.in_args[1].size = inargp->count * sizeof(*remove_one);
- args.in_args[1].value = remove_one;
+ args.in_numargs = 3;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = sizeof(*inargp);
+ args.in_args[1].value = inargp;
+ args.in_args[2].size = inargp->count * sizeof(*remove_one);
+ args.in_args[2].value = remove_one;
return fuse_simple_request(fm, &args);
}
@@ -665,36 +665,12 @@ static void fuse_wait_dax_page(struct inode *inode)
filemap_invalidate_lock(inode->i_mapping);
}
-/* Should be called with mapping->invalidate_lock held exclusively */
-static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
- loff_t start, loff_t end)
-{
- struct page *page;
-
- page = dax_layout_busy_page_range(inode->i_mapping, start, end);
- if (!page)
- return 0;
-
- *retry = true;
- return ___wait_var_event(&page->_refcount,
- atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
- 0, 0, fuse_wait_dax_page(inode));
-}
-
-/* dmap_end == 0 leads to unmapping of whole file */
+/* Should be called with mapping->invalidate_lock held exclusively. */
int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
u64 dmap_end)
{
- bool retry;
- int ret;
-
- do {
- retry = false;
- ret = __fuse_dax_break_layouts(inode, &retry, dmap_start,
- dmap_end);
- } while (ret == 0 && retry);
-
- return ret;
+ return dax_break_layout(inode, dmap_start, dmap_end,
+ fuse_wait_dax_page);
}
ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -774,23 +750,13 @@ out:
return ret;
}
-static int fuse_dax_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
-{
-
- struct inode *inode = mapping->host;
- struct fuse_conn *fc = get_fuse_conn(inode);
-
- return dax_writeback_mapping_range(mapping, fc->dax->dev, wbc);
-}
-
-static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size, bool write)
+static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order,
+ bool write)
{
vm_fault_t ret;
struct inode *inode = file_inode(vmf->vma->vm_file);
struct super_block *sb = inode->i_sb;
- pfn_t pfn;
+ unsigned long pfn;
int error = 0;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_conn_dax *fcd = fc->dax;
@@ -809,7 +775,7 @@ retry:
* to populate page cache or access memory we are trying to free.
*/
filemap_invalidate_lock_shared(inode->i_mapping);
- ret = dax_iomap_fault(vmf, pe_size, &pfn, &error, &fuse_iomap_ops);
+ ret = dax_iomap_fault(vmf, order, &pfn, &error, &fuse_iomap_ops);
if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) {
error = 0;
retry = true;
@@ -818,7 +784,7 @@ retry:
}
if (ret & VM_FAULT_NEEDDSYNC)
- ret = dax_finish_sync_fault(vmf, pe_size, pfn);
+ ret = dax_finish_sync_fault(vmf, order, pfn);
filemap_invalidate_unlock_shared(inode->i_mapping);
if (write)
@@ -829,24 +795,22 @@ retry:
static vm_fault_t fuse_dax_fault(struct vm_fault *vmf)
{
- return __fuse_dax_fault(vmf, PE_SIZE_PTE,
- vmf->flags & FAULT_FLAG_WRITE);
+ return __fuse_dax_fault(vmf, 0, vmf->flags & FAULT_FLAG_WRITE);
}
-static vm_fault_t fuse_dax_huge_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size)
+static vm_fault_t fuse_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
{
- return __fuse_dax_fault(vmf, pe_size, vmf->flags & FAULT_FLAG_WRITE);
+ return __fuse_dax_fault(vmf, order, vmf->flags & FAULT_FLAG_WRITE);
}
static vm_fault_t fuse_dax_page_mkwrite(struct vm_fault *vmf)
{
- return __fuse_dax_fault(vmf, PE_SIZE_PTE, true);
+ return __fuse_dax_fault(vmf, 0, true);
}
static vm_fault_t fuse_dax_pfn_mkwrite(struct vm_fault *vmf)
{
- return __fuse_dax_fault(vmf, PE_SIZE_PTE, true);
+ return __fuse_dax_fault(vmf, 0, true);
}
static const struct vm_operations_struct fuse_dax_vm_ops = {
@@ -860,7 +824,7 @@ int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
vma->vm_ops = &fuse_dax_vm_ops;
- vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+ vm_flags_set(vma, VM_MIXEDMAP | VM_HUGEPAGE);
return 0;
}
@@ -1224,6 +1188,7 @@ void fuse_dax_conn_free(struct fuse_conn *fc)
if (fc->dax) {
fuse_free_dax_mem_ranges(&fc->dax->free_ranges);
kfree(fc->dax);
+ fc->dax = NULL;
}
}
@@ -1241,8 +1206,8 @@ static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
id = dax_read_lock();
- nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), NULL,
- NULL);
+ nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size),
+ DAX_ACCESS, NULL, NULL);
dax_read_unlock(id);
if (nr_pages < 0) {
pr_debug("dax_direct_access() returned %ld\n", nr_pages);
@@ -1279,11 +1244,14 @@ out_err:
return ret;
}
-int fuse_dax_conn_alloc(struct fuse_conn *fc, struct dax_device *dax_dev)
+int fuse_dax_conn_alloc(struct fuse_conn *fc, enum fuse_dax_mode dax_mode,
+ struct dax_device *dax_dev)
{
struct fuse_conn_dax *fcd;
int err;
+ fc->dax_mode = dax_mode;
+
if (!dax_dev)
return 0;
@@ -1321,23 +1289,50 @@ bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi)
}
static const struct address_space_operations fuse_dax_file_aops = {
- .writepages = fuse_dax_writepages,
.direct_IO = noop_direct_IO,
- .set_page_dirty = __set_page_dirty_no_writeback,
- .invalidatepage = noop_invalidatepage,
+ .dirty_folio = noop_dirty_folio,
};
-void fuse_dax_inode_init(struct inode *inode)
+static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags)
{
struct fuse_conn *fc = get_fuse_conn(inode);
+ enum fuse_dax_mode dax_mode = fc->dax_mode;
+ if (dax_mode == FUSE_DAX_NEVER)
+ return false;
+
+ /*
+ * fc->dax may be NULL in 'inode' mode when filesystem device doesn't
+ * support DAX, in which case it will silently fallback to 'never' mode.
+ */
if (!fc->dax)
+ return false;
+
+ if (dax_mode == FUSE_DAX_ALWAYS)
+ return true;
+
+ /* dax_mode is FUSE_DAX_INODE* */
+ return fc->inode_dax && (flags & FUSE_ATTR_DAX);
+}
+
+void fuse_dax_inode_init(struct inode *inode, unsigned int flags)
+{
+ if (!fuse_should_enable_dax(inode, flags))
return;
inode->i_flags |= S_DAX;
inode->i_data.a_ops = &fuse_dax_file_aops;
}
+void fuse_dax_dontcache(struct inode *inode, unsigned int flags)
+{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+
+ if (fuse_is_inode_dax_mode(fc->dax_mode) &&
+ ((bool) IS_DAX(inode) != (bool) (flags & FUSE_ATTR_DAX)))
+ d_mark_dontcache(inode);
+}
+
bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment)
{
if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) {
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 79f7eda49e06..6d59cbc877c6 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -6,7 +6,9 @@
See the file COPYING.
*/
+#include "dev_uring_i.h"
#include "fuse_i.h"
+#include "fuse_dev_i.h"
#include <linux/init.h>
#include <linux/module.h>
@@ -21,23 +23,107 @@
#include <linux/swap.h>
#include <linux/splice.h>
#include <linux/sched.h>
+#include <linux/seq_file.h>
+
+#include "fuse_trace.h"
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
-/* Ordinary requests have even IDs, while interrupts IDs are odd */
-#define FUSE_INT_REQ_BIT (1ULL << 0)
-#define FUSE_REQ_ID_STEP (1ULL << 1)
-
static struct kmem_cache *fuse_req_cachep;
-static struct fuse_dev *fuse_get_dev(struct file *file)
+const unsigned long fuse_timeout_timer_freq =
+ secs_to_jiffies(FUSE_TIMEOUT_TIMER_FREQ);
+
+bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list)
{
- /*
- * Lockless access is OK, because file->private data is set
- * once during mount and is valid until the file is released.
- */
- return READ_ONCE(file->private_data);
+ struct fuse_req *req;
+
+ req = list_first_entry_or_null(list, struct fuse_req, list);
+ if (!req)
+ return false;
+ return time_is_before_jiffies(req->create_time + fc->timeout.req_timeout);
+}
+
+static bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing)
+{
+ int i;
+
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ if (fuse_request_expired(fc, &processing[i]))
+ return true;
+
+ return false;
+}
+
+/*
+ * Check if any requests aren't being completed by the time the request timeout
+ * elapses. To do so, we:
+ * - check the fiq pending list
+ * - check the bg queue
+ * - check the fpq io and processing lists
+ *
+ * To make this fast, we only check against the head request on each list since
+ * these are generally queued in order of creation time (eg newer requests get
+ * queued to the tail). We might miss a few edge cases (eg requests transitioning
+ * between lists, re-sent requests at the head of the pending list having a
+ * later creation time than other requests on that list, etc.) but that is fine
+ * since if the request never gets fulfilled, it will eventually be caught.
+ */
+void fuse_check_timeout(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct fuse_conn *fc = container_of(dwork, struct fuse_conn,
+ timeout.work);
+ struct fuse_iqueue *fiq = &fc->iq;
+ struct fuse_dev *fud;
+ struct fuse_pqueue *fpq;
+ bool expired = false;
+
+ if (!atomic_read(&fc->num_waiting))
+ goto out;
+
+ spin_lock(&fiq->lock);
+ expired = fuse_request_expired(fc, &fiq->pending);
+ spin_unlock(&fiq->lock);
+ if (expired)
+ goto abort_conn;
+
+ spin_lock(&fc->bg_lock);
+ expired = fuse_request_expired(fc, &fc->bg_queue);
+ spin_unlock(&fc->bg_lock);
+ if (expired)
+ goto abort_conn;
+
+ spin_lock(&fc->lock);
+ if (!fc->connected) {
+ spin_unlock(&fc->lock);
+ return;
+ }
+ list_for_each_entry(fud, &fc->devices, entry) {
+ fpq = &fud->pq;
+ spin_lock(&fpq->lock);
+ if (fuse_request_expired(fc, &fpq->io) ||
+ fuse_fpq_processing_expired(fc, fpq->processing)) {
+ spin_unlock(&fpq->lock);
+ spin_unlock(&fc->lock);
+ goto abort_conn;
+ }
+
+ spin_unlock(&fpq->lock);
+ }
+ spin_unlock(&fc->lock);
+
+ if (fuse_uring_request_expired(fc))
+ goto abort_conn;
+
+out:
+ queue_delayed_work(system_percpu_wq, &fc->timeout.work,
+ fuse_timeout_timer_freq);
+ return;
+
+abort_conn:
+ fuse_abort_conn(fc);
}
static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
@@ -48,6 +134,7 @@ static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
refcount_set(&req->count, 1);
__set_bit(FR_PENDING, &req->flags);
req->fm = fm;
+ req->create_time = jiffies;
}
static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
@@ -84,7 +171,8 @@ void fuse_set_initialized(struct fuse_conn *fc)
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
{
- return !fc->initialized || (for_background && fc->blocked);
+ return !fc->initialized || (for_background && fc->blocked) ||
+ (fc->io_uring && fc->connected && !fuse_uring_ready(fc));
}
static void fuse_drop_waiting(struct fuse_conn *fc)
@@ -103,17 +191,24 @@ static void fuse_drop_waiting(struct fuse_conn *fc)
static void fuse_put_request(struct fuse_req *req);
-static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
+static struct fuse_req *fuse_get_req(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ bool for_background)
{
struct fuse_conn *fc = fm->fc;
struct fuse_req *req;
+ bool no_idmap = !fm->sb || (fm->sb->s_iflags & SB_I_NOIDMAP);
+ kuid_t fsuid;
+ kgid_t fsgid;
int err;
+
atomic_inc(&fc->num_waiting);
if (fuse_block_alloc(fc, for_background)) {
err = -EINTR;
- if (wait_event_killable_exclusive(fc->blocked_waitq,
- !fuse_block_alloc(fc, for_background)))
+ if (wait_event_state_exclusive(fc->blocked_waitq,
+ !fuse_block_alloc(fc, for_background),
+ (TASK_KILLABLE | TASK_FREEZABLE)))
goto out;
}
/* Matches smp_wmb() in fuse_set_initialized() */
@@ -135,19 +230,32 @@ static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
goto out;
}
- req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
- req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
__set_bit(FR_WAITING, &req->flags);
if (for_background)
__set_bit(FR_BACKGROUND, &req->flags);
- if (unlikely(req->in.h.uid == ((uid_t)-1) ||
- req->in.h.gid == ((gid_t)-1))) {
+ /*
+ * Keep the old behavior when idmappings support was not
+ * declared by a FUSE server.
+ *
+ * For those FUSE servers who support idmapped mounts,
+ * we send UID/GID only along with "inode creation"
+ * fuse requests, otherwise idmap == &invalid_mnt_idmap and
+ * req->in.h.{u,g}id will be equal to FUSE_INVALID_UIDGID.
+ */
+ fsuid = no_idmap ? current_fsuid() : mapped_fsuid(idmap, fc->user_ns);
+ fsgid = no_idmap ? current_fsgid() : mapped_fsgid(idmap, fc->user_ns);
+ req->in.h.uid = from_kuid(fc->user_ns, fsuid);
+ req->in.h.gid = from_kgid(fc->user_ns, fsgid);
+
+ if (no_idmap && unlikely(req->in.h.uid == ((uid_t)-1) ||
+ req->in.h.gid == ((gid_t)-1))) {
fuse_put_request(req);
return ERR_PTR(-EOVERFLOW);
}
+
return req;
out:
@@ -192,19 +300,31 @@ unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
}
EXPORT_SYMBOL_GPL(fuse_len_args);
-u64 fuse_get_unique(struct fuse_iqueue *fiq)
+static u64 fuse_get_unique_locked(struct fuse_iqueue *fiq)
{
fiq->reqctr += FUSE_REQ_ID_STEP;
return fiq->reqctr;
}
+
+u64 fuse_get_unique(struct fuse_iqueue *fiq)
+{
+ u64 ret;
+
+ spin_lock(&fiq->lock);
+ ret = fuse_get_unique_locked(fiq);
+ spin_unlock(&fiq->lock);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(fuse_get_unique);
-static unsigned int fuse_req_hash(u64 unique)
+unsigned int fuse_req_hash(u64 unique)
{
return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
}
+EXPORT_SYMBOL_GPL(fuse_req_hash);
-/**
+/*
* A new request is available, wake fiq->waitq
*/
static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
@@ -215,22 +335,90 @@ __releases(fiq->lock)
spin_unlock(&fiq->lock);
}
+void fuse_dev_queue_forget(struct fuse_iqueue *fiq,
+ struct fuse_forget_link *forget)
+{
+ spin_lock(&fiq->lock);
+ if (fiq->connected) {
+ fiq->forget_list_tail->next = forget;
+ fiq->forget_list_tail = forget;
+ fuse_dev_wake_and_unlock(fiq);
+ } else {
+ kfree(forget);
+ spin_unlock(&fiq->lock);
+ }
+}
+
+void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ spin_lock(&fiq->lock);
+ if (list_empty(&req->intr_entry)) {
+ list_add_tail(&req->intr_entry, &fiq->interrupts);
+ /*
+ * Pairs with smp_mb() implied by test_and_set_bit()
+ * from fuse_request_end().
+ */
+ smp_mb();
+ if (test_bit(FR_FINISHED, &req->flags)) {
+ list_del_init(&req->intr_entry);
+ spin_unlock(&fiq->lock);
+ } else {
+ fuse_dev_wake_and_unlock(fiq);
+ }
+ } else {
+ spin_unlock(&fiq->lock);
+ }
+}
+
+static inline void fuse_request_assign_unique_locked(struct fuse_iqueue *fiq,
+ struct fuse_req *req)
+{
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique_locked(fiq);
+
+ /* tracepoint captures in.h.unique and in.h.len */
+ trace_fuse_request_send(req);
+}
+
+inline void fuse_request_assign_unique(struct fuse_iqueue *fiq,
+ struct fuse_req *req)
+{
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique(fiq);
+
+ /* tracepoint captures in.h.unique and in.h.len */
+ trace_fuse_request_send(req);
+}
+EXPORT_SYMBOL_GPL(fuse_request_assign_unique);
+
+static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ spin_lock(&fiq->lock);
+ if (fiq->connected) {
+ fuse_request_assign_unique_locked(fiq, req);
+ list_add_tail(&req->list, &fiq->pending);
+ fuse_dev_wake_and_unlock(fiq);
+ } else {
+ spin_unlock(&fiq->lock);
+ req->out.h.error = -ENOTCONN;
+ clear_bit(FR_PENDING, &req->flags);
+ fuse_request_end(req);
+ }
+}
+
const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
- .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
- .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
- .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
+ .send_forget = fuse_dev_queue_forget,
+ .send_interrupt = fuse_dev_queue_interrupt,
+ .send_req = fuse_dev_queue_req,
};
EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
-static void queue_request_and_unlock(struct fuse_iqueue *fiq,
- struct fuse_req *req)
-__releases(fiq->lock)
+static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req)
{
req->in.h.len = sizeof(struct fuse_in_header) +
fuse_len_args(req->args->in_numargs,
(struct fuse_arg *) req->args->in_args);
- list_add_tail(&req->list, &fiq->pending);
- fiq->ops->wake_pending_and_unlock(fiq);
+ fiq->ops->send_req(fiq, req);
}
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
@@ -241,15 +429,7 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup;
- spin_lock(&fiq->lock);
- if (fiq->connected) {
- fiq->forget_list_tail->next = forget;
- fiq->forget_list_tail = forget;
- fiq->ops->wake_forget_and_unlock(fiq);
- } else {
- kfree(forget);
- spin_unlock(&fiq->lock);
- }
+ fiq->ops->send_forget(fiq, forget);
}
static void flush_bg_queue(struct fuse_conn *fc)
@@ -263,9 +443,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
- spin_lock(&fiq->lock);
- req->in.h.unique = fuse_get_unique(fiq);
- queue_request_and_unlock(fiq, req);
+ fuse_send_one(fiq, req);
}
}
@@ -286,6 +464,7 @@ void fuse_request_end(struct fuse_req *req)
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
+ trace_fuse_request_end(req);
/*
* test_and_set_bit() implies smp_mb() between bit
* changing and below FR_INTERRUPTED check. Pairs with
@@ -315,10 +494,6 @@ void fuse_request_end(struct fuse_req *req)
wake_up(&fc->blocked_waitq);
}
- if (fc->num_background == fc->congestion_threshold && fm->sb) {
- clear_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC);
- clear_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC);
- }
fc->num_background--;
fc->active_background--;
flush_bg_queue(fc);
@@ -339,30 +514,31 @@ static int queue_interrupt(struct fuse_req *req)
{
struct fuse_iqueue *fiq = &req->fm->fc->iq;
- spin_lock(&fiq->lock);
/* Check for we've sent request to interrupt this req */
- if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
- spin_unlock(&fiq->lock);
+ if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags)))
return -EINVAL;
- }
- if (list_empty(&req->intr_entry)) {
- list_add_tail(&req->intr_entry, &fiq->interrupts);
+ fiq->ops->send_interrupt(fiq, req);
+
+ return 0;
+}
+
+bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock)
+{
+ spin_lock(lock);
+ if (test_bit(FR_PENDING, &req->flags)) {
/*
- * Pairs with smp_mb() implied by test_and_set_bit()
- * from fuse_request_end().
+ * FR_PENDING does not get cleared as the request will end
+ * up in destruction anyway.
*/
- smp_mb();
- if (test_bit(FR_FINISHED, &req->flags)) {
- list_del_init(&req->intr_entry);
- spin_unlock(&fiq->lock);
- return 0;
- }
- fiq->ops->wake_interrupt_and_unlock(fiq);
- } else {
- spin_unlock(&fiq->lock);
+ list_del(&req->list);
+ spin_unlock(lock);
+ __fuse_put_request(req);
+ req->out.h.error = -EINTR;
+ return true;
}
- return 0;
+ spin_unlock(lock);
+ return false;
}
static void request_wait_answer(struct fuse_req *req)
@@ -386,22 +562,20 @@ static void request_wait_answer(struct fuse_req *req)
}
if (!test_bit(FR_FORCE, &req->flags)) {
+ bool removed;
+
/* Only fatal signals may interrupt this */
err = wait_event_killable(req->waitq,
test_bit(FR_FINISHED, &req->flags));
if (!err)
return;
- spin_lock(&fiq->lock);
- /* Request is not yet in userspace, bail out */
- if (test_bit(FR_PENDING, &req->flags)) {
- list_del(&req->list);
- spin_unlock(&fiq->lock);
- __fuse_put_request(req);
- req->out.h.error = -EINTR;
+ if (test_bit(FR_URING, &req->flags))
+ removed = fuse_uring_remove_pending_req(req);
+ else
+ removed = fuse_remove_pending_req(req, &fiq->lock);
+ if (removed)
return;
- }
- spin_unlock(&fiq->lock);
}
/*
@@ -416,21 +590,15 @@ static void __fuse_request_send(struct fuse_req *req)
struct fuse_iqueue *fiq = &req->fm->fc->iq;
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
- spin_lock(&fiq->lock);
- if (!fiq->connected) {
- spin_unlock(&fiq->lock);
- req->out.h.error = -ENOTCONN;
- } else {
- req->in.h.unique = fuse_get_unique(fiq);
- /* acquire extra reference, since request is still needed
- after fuse_request_end() */
- __fuse_get_request(req);
- queue_request_and_unlock(fiq, req);
- request_wait_answer(req);
- /* Pairs with smp_wmb() in fuse_request_end() */
- smp_rmb();
- }
+ /* acquire extra reference, since request is still needed after
+ fuse_request_end() */
+ __fuse_get_request(req);
+ fuse_send_one(fiq, req);
+
+ request_wait_answer(req);
+ /* Pairs with smp_wmb() in fuse_request_end() */
+ smp_rmb();
}
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
@@ -470,8 +638,14 @@ static void fuse_force_creds(struct fuse_req *req)
{
struct fuse_conn *fc = req->fm->fc;
- req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
- req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
+ if (!req->fm->sb || req->fm->sb->s_iflags & SB_I_NOIDMAP) {
+ req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
+ req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
+ } else {
+ req->in.h.uid = FUSE_INVALID_UIDGID;
+ req->in.h.gid = FUSE_INVALID_UIDGID;
+ }
+
req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
}
@@ -480,11 +654,15 @@ static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
req->in.h.opcode = args->opcode;
req->in.h.nodeid = args->nodeid;
req->args = args;
+ if (args->is_ext)
+ req->in.h.total_extlen = args->in_args[args->ext_idx].size / 8;
if (args->end)
__set_bit(FR_ASYNC, &req->flags);
}
-ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
+ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ struct fuse_args *args)
{
struct fuse_conn *fc = fm->fc;
struct fuse_req *req;
@@ -501,7 +679,7 @@ ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
__set_bit(FR_FORCE, &req->flags);
} else {
WARN_ON(args->nocreds);
- req = fuse_get_req(fm, false);
+ req = fuse_get_req(idmap, fm, false);
if (IS_ERR(req))
return PTR_ERR(req);
}
@@ -523,7 +701,25 @@ ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
return ret;
}
-static bool fuse_request_queue_background(struct fuse_req *req)
+#ifdef CONFIG_FUSE_IO_URING
+static bool fuse_request_queue_background_uring(struct fuse_conn *fc,
+ struct fuse_req *req)
+{
+ struct fuse_iqueue *fiq = &fc->iq;
+
+ req->in.h.len = sizeof(struct fuse_in_header) +
+ fuse_len_args(req->args->in_numargs,
+ (struct fuse_arg *) req->args->in_args);
+ fuse_request_assign_unique(fiq, req);
+
+ return fuse_uring_queue_bq_req(req);
+}
+#endif
+
+/*
+ * @return true if queued
+ */
+static int fuse_request_queue_background(struct fuse_req *req)
{
struct fuse_mount *fm = req->fm;
struct fuse_conn *fc = fm->fc;
@@ -535,15 +731,17 @@ static bool fuse_request_queue_background(struct fuse_req *req)
atomic_inc(&fc->num_waiting);
}
__set_bit(FR_ISREPLY, &req->flags);
+
+#ifdef CONFIG_FUSE_IO_URING
+ if (fuse_uring_ready(fc))
+ return fuse_request_queue_background_uring(fc, req);
+#endif
+
spin_lock(&fc->bg_lock);
if (likely(fc->connected)) {
fc->num_background++;
if (fc->num_background == fc->max_background)
fc->blocked = 1;
- if (fc->num_background == fc->congestion_threshold && fm->sb) {
- set_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC);
- set_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC);
- }
list_add_tail(&req->list, &fc->bg_queue);
flush_bg_queue(fc);
queued = true;
@@ -566,7 +764,7 @@ int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
__set_bit(FR_BACKGROUND, &req->flags);
} else {
WARN_ON(args->nocreds);
- req = fuse_get_req(fm, true);
+ req = fuse_get_req(&invalid_mnt_idmap, fm, true);
if (IS_ERR(req))
return PTR_ERR(req);
}
@@ -587,9 +785,8 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
{
struct fuse_req *req;
struct fuse_iqueue *fiq = &fm->fc->iq;
- int err = 0;
- req = fuse_get_req(fm, false);
+ req = fuse_get_req(&invalid_mnt_idmap, fm, false);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -598,16 +795,9 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
fuse_args_to_req(req, args);
- spin_lock(&fiq->lock);
- if (fiq->connected) {
- queue_request_and_unlock(fiq, req);
- } else {
- err = -ENODEV;
- spin_unlock(&fiq->lock);
- fuse_put_request(req);
- }
+ fuse_send_one(fiq, req);
- return err;
+ return 0;
}
/*
@@ -647,22 +837,8 @@ static int unlock_request(struct fuse_req *req)
return err;
}
-struct fuse_copy_state {
- int write;
- struct fuse_req *req;
- struct iov_iter *iter;
- struct pipe_buffer *pipebufs;
- struct pipe_buffer *currbuf;
- struct pipe_inode_info *pipe;
- unsigned long nr_segs;
- struct page *pg;
- unsigned len;
- unsigned offset;
- unsigned move_pages:1;
-};
-
-static void fuse_copy_init(struct fuse_copy_state *cs, int write,
- struct iov_iter *iter)
+void fuse_copy_init(struct fuse_copy_state *cs, bool write,
+ struct iov_iter *iter)
{
memset(cs, 0, sizeof(*cs));
cs->write = write;
@@ -670,7 +846,7 @@ static void fuse_copy_init(struct fuse_copy_state *cs, int write,
}
/* Unmap and put previous page of userspace buffer */
-static void fuse_copy_finish(struct fuse_copy_state *cs)
+void fuse_copy_finish(struct fuse_copy_state *cs)
{
if (cs->currbuf) {
struct pipe_buffer *buf = cs->currbuf;
@@ -738,14 +914,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
}
} else {
size_t off;
- err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
+ err = iov_iter_get_pages2(cs->iter, &page, PAGE_SIZE, 1, &off);
if (err < 0)
return err;
BUG_ON(!err);
cs->len = err;
cs->offset = off;
cs->pg = page;
- iov_iter_advance(cs->iter, err);
}
return lock_request(cs->req);
@@ -770,36 +945,45 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
*size -= ncpy;
cs->len -= ncpy;
cs->offset += ncpy;
+ if (cs->is_uring)
+ cs->ring.copied_sz += ncpy;
+
return ncpy;
}
-static int fuse_check_page(struct page *page)
+static int fuse_check_folio(struct folio *folio)
{
- if (page_mapcount(page) ||
- page->mapping != NULL ||
- (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
+ if (folio_mapped(folio) ||
+ folio->mapping != NULL ||
+ (folio->flags.f & PAGE_FLAGS_CHECK_AT_PREP &
~(1 << PG_locked |
1 << PG_referenced |
- 1 << PG_uptodate |
1 << PG_lru |
1 << PG_active |
1 << PG_workingset |
1 << PG_reclaim |
- 1 << PG_waiters))) {
- dump_page(page, "fuse: trying to steal weird page");
+ 1 << PG_waiters |
+ LRU_GEN_MASK | LRU_REFS_MASK))) {
+ dump_page(&folio->page, "fuse: trying to steal weird page");
return 1;
}
return 0;
}
-static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
+/*
+ * Attempt to steal a page from the splice() pipe and move it into the
+ * pagecache. If successful, the pointer in @pagep will be updated. The
+ * folio that was originally in @pagep will lose a reference and the new
+ * folio returned in @pagep will carry a reference.
+ */
+static int fuse_try_move_folio(struct fuse_copy_state *cs, struct folio **foliop)
{
int err;
- struct page *oldpage = *pagep;
- struct page *newpage;
+ struct folio *oldfolio = *foliop;
+ struct folio *newfolio;
struct pipe_buffer *buf = cs->pipebufs;
- get_page(oldpage);
+ folio_get(oldfolio);
err = unlock_request(cs->req);
if (err)
goto out_put_old;
@@ -816,36 +1000,40 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
cs->pipebufs++;
cs->nr_segs--;
- if (cs->len != PAGE_SIZE)
+ if (cs->len != folio_size(oldfolio))
goto out_fallback;
if (!pipe_buf_try_steal(cs->pipe, buf))
goto out_fallback;
- newpage = buf->page;
+ newfolio = page_folio(buf->page);
- if (!PageUptodate(newpage))
- SetPageUptodate(newpage);
+ folio_clear_uptodate(newfolio);
+ folio_clear_mappedtodisk(newfolio);
- ClearPageMappedToDisk(newpage);
-
- if (fuse_check_page(newpage) != 0)
+ if (fuse_check_folio(newfolio) != 0)
goto out_fallback_unlock;
/*
* This is a new and locked page, it shouldn't be mapped or
* have any special flags on it
*/
- if (WARN_ON(page_mapped(oldpage)))
+ if (WARN_ON(folio_mapped(oldfolio)))
goto out_fallback_unlock;
- if (WARN_ON(page_has_private(oldpage)))
+ if (WARN_ON(folio_has_private(oldfolio)))
goto out_fallback_unlock;
- if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
+ if (WARN_ON(folio_test_dirty(oldfolio) ||
+ folio_test_writeback(oldfolio)))
goto out_fallback_unlock;
- if (WARN_ON(PageMlocked(oldpage)))
+ if (WARN_ON(folio_test_mlocked(oldfolio)))
goto out_fallback_unlock;
- replace_page_cache_page(oldpage, newpage);
+ replace_page_cache_folio(oldfolio, newfolio);
+
+ folio_get(newfolio);
+
+ if (!(buf->flags & PIPE_BUF_FLAG_LRU))
+ folio_add_lru(newfolio);
/*
* Release while we have extra ref on stolen page. Otherwise
@@ -853,38 +1041,33 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
*/
pipe_buf_release(cs->pipe, buf);
- get_page(newpage);
-
- if (!(buf->flags & PIPE_BUF_FLAG_LRU))
- lru_cache_add(newpage);
-
err = 0;
spin_lock(&cs->req->waitq.lock);
if (test_bit(FR_ABORTED, &cs->req->flags))
err = -ENOENT;
else
- *pagep = newpage;
+ *foliop = newfolio;
spin_unlock(&cs->req->waitq.lock);
if (err) {
- unlock_page(newpage);
- put_page(newpage);
+ folio_unlock(newfolio);
+ folio_put(newfolio);
goto out_put_old;
}
- unlock_page(oldpage);
+ folio_unlock(oldfolio);
/* Drop ref for ap->pages[] array */
- put_page(oldpage);
+ folio_put(oldfolio);
cs->len = 0;
err = 0;
out_put_old:
/* Drop ref obtained in this function */
- put_page(oldpage);
+ folio_put(oldfolio);
return err;
out_fallback_unlock:
- unlock_page(newpage);
+ folio_unlock(newfolio);
out_fallback:
cs->pg = buf->page;
cs->offset = buf->offset;
@@ -896,8 +1079,8 @@ out_fallback:
goto out_put_old;
}
-static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
- unsigned offset, unsigned count)
+static int fuse_ref_folio(struct fuse_copy_state *cs, struct folio *folio,
+ unsigned offset, unsigned count)
{
struct pipe_buffer *buf;
int err;
@@ -905,17 +1088,17 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
if (cs->nr_segs >= cs->pipe->max_usage)
return -EIO;
- get_page(page);
+ folio_get(folio);
err = unlock_request(cs->req);
if (err) {
- put_page(page);
+ folio_put(folio);
return err;
}
fuse_copy_finish(cs);
buf = cs->pipebufs;
- buf->page = page;
+ buf->page = &folio->page;
buf->offset = offset;
buf->len = count;
@@ -927,25 +1110,39 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
}
/*
- * Copy a page in the request to/from the userspace buffer. Must be
+ * Copy a folio in the request to/from the userspace buffer. Must be
* done atomically
*/
-static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
- unsigned offset, unsigned count, int zeroing)
+static int fuse_copy_folio(struct fuse_copy_state *cs, struct folio **foliop,
+ unsigned offset, unsigned count, int zeroing)
{
int err;
- struct page *page = *pagep;
+ struct folio *folio = *foliop;
+ size_t size;
- if (page && zeroing && count < PAGE_SIZE)
- clear_highpage(page);
+ if (folio) {
+ size = folio_size(folio);
+ if (zeroing && count < size)
+ folio_zero_range(folio, 0, size);
+ }
while (count) {
- if (cs->write && cs->pipebufs && page) {
- return fuse_ref_page(cs, page, offset, count);
+ if (cs->write && cs->pipebufs && folio) {
+ /*
+ * Can't control lifetime of pipe buffers, so always
+ * copy user pages.
+ */
+ if (cs->req->args->user_pages) {
+ err = fuse_copy_fill(cs);
+ if (err)
+ return err;
+ } else {
+ return fuse_ref_folio(cs, folio, offset, count);
+ }
} else if (!cs->len) {
- if (cs->move_pages && page &&
- offset == 0 && count == PAGE_SIZE) {
- err = fuse_try_move_page(cs, pagep);
+ if (cs->move_folios && folio &&
+ offset == 0 && count == size) {
+ err = fuse_try_move_folio(cs, foliop);
if (err <= 0)
return err;
} else {
@@ -954,34 +1151,41 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
return err;
}
}
- if (page) {
- void *mapaddr = kmap_local_page(page);
- void *buf = mapaddr + offset;
- offset += fuse_copy_do(cs, &buf, &count);
+ if (folio) {
+ void *mapaddr = kmap_local_folio(folio, offset);
+ void *buf = mapaddr;
+ unsigned int copy = count;
+ unsigned int bytes_copied;
+
+ if (folio_test_highmem(folio) && count > PAGE_SIZE - offset_in_page(offset))
+ copy = PAGE_SIZE - offset_in_page(offset);
+
+ bytes_copied = fuse_copy_do(cs, &buf, &copy);
kunmap_local(mapaddr);
+ offset += bytes_copied;
+ count -= bytes_copied;
} else
offset += fuse_copy_do(cs, NULL, &count);
}
- if (page && !cs->write)
- flush_dcache_page(page);
+ if (folio && !cs->write)
+ flush_dcache_folio(folio);
return 0;
}
-/* Copy pages in the request to/from userspace buffer */
-static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
- int zeroing)
+/* Copy folios in the request to/from userspace buffer */
+static int fuse_copy_folios(struct fuse_copy_state *cs, unsigned nbytes,
+ int zeroing)
{
unsigned i;
struct fuse_req *req = cs->req;
struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
-
- for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
+ for (i = 0; i < ap->num_folios && (nbytes || zeroing); i++) {
int err;
unsigned int offset = ap->descs[i].offset;
unsigned int count = min(nbytes, ap->descs[i].length);
- err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing);
+ err = fuse_copy_folio(cs, &ap->folios[i], offset, count, zeroing);
if (err)
return err;
@@ -1005,9 +1209,9 @@ static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
}
/* Copy request arguments to/from userspace buffer */
-static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
- unsigned argpages, struct fuse_arg *args,
- int zeroing)
+int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
+ unsigned argpages, struct fuse_arg *args,
+ int zeroing)
{
int err = 0;
unsigned i;
@@ -1015,7 +1219,7 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
for (i = 0; !err && i < numargs; i++) {
struct fuse_arg *arg = &args[i];
if (i == numargs - 1 && argpages)
- err = fuse_copy_pages(cs, arg->size, zeroing);
+ err = fuse_copy_folios(cs, arg->size, zeroing);
else
err = fuse_copy_one(cs, arg->value, arg->size);
}
@@ -1071,9 +1275,9 @@ __releases(fiq->lock)
return err ? err : reqsize;
}
-struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
- unsigned int max,
- unsigned int *countp)
+static struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
+ unsigned int max,
+ unsigned int *countp)
{
struct fuse_forget_link *head = fiq->forget_list_head.next;
struct fuse_forget_link **newhead = &head;
@@ -1092,7 +1296,6 @@ struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
return head;
}
-EXPORT_SYMBOL(fuse_dequeue_forget);
static int fuse_read_single_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
@@ -1107,7 +1310,7 @@ __releases(fiq->lock)
struct fuse_in_header ih = {
.opcode = FUSE_FORGET,
.nodeid = forget->forget_one.nodeid,
- .unique = fuse_get_unique(fiq),
+ .unique = fuse_get_unique_locked(fiq),
.len = sizeof(ih) + sizeof(arg),
};
@@ -1138,7 +1341,7 @@ __releases(fiq->lock)
struct fuse_batch_forget_in arg = { .count = 0 };
struct fuse_in_header ih = {
.opcode = FUSE_BATCH_FORGET,
- .unique = fuse_get_unique(fiq),
+ .unique = fuse_get_unique_locked(fiq),
.len = sizeof(ih) + sizeof(arg),
};
@@ -1345,19 +1548,39 @@ static int fuse_dev_open(struct inode *inode, struct file *file)
return 0;
}
+struct fuse_dev *fuse_get_dev(struct file *file)
+{
+ struct fuse_dev *fud = __fuse_get_dev(file);
+ int err;
+
+ if (likely(fud))
+ return fud;
+
+ err = wait_event_interruptible(fuse_dev_waitq,
+ READ_ONCE(file->private_data) != FUSE_DEV_SYNC_INIT);
+ if (err)
+ return ERR_PTR(err);
+
+ fud = __fuse_get_dev(file);
+ if (!fud)
+ return ERR_PTR(-EPERM);
+
+ return fud;
+}
+
static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
{
struct fuse_copy_state cs;
struct file *file = iocb->ki_filp;
struct fuse_dev *fud = fuse_get_dev(file);
- if (!fud)
- return -EPERM;
+ if (IS_ERR(fud))
+ return PTR_ERR(fud);
- if (!iter_is_iovec(to))
+ if (!user_backed_iter(to))
return -EINVAL;
- fuse_copy_init(&cs, 1, to);
+ fuse_copy_init(&cs, true, to);
return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
}
@@ -1372,22 +1595,22 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
struct fuse_copy_state cs;
struct fuse_dev *fud = fuse_get_dev(in);
- if (!fud)
- return -EPERM;
+ if (IS_ERR(fud))
+ return PTR_ERR(fud);
bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer),
GFP_KERNEL);
if (!bufs)
return -ENOMEM;
- fuse_copy_init(&cs, 1, NULL);
+ fuse_copy_init(&cs, true, NULL);
cs.pipebufs = bufs;
cs.pipe = pipe;
ret = fuse_dev_do_read(fud, in, &cs, len);
if (ret < 0)
goto out;
- if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) {
+ if (pipe_buf_usage(pipe) + cs.nr_segs > pipe->max_usage) {
ret = -EIO;
goto out;
}
@@ -1417,35 +1640,31 @@ static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_poll_wakeup_out outarg;
- int err = -EINVAL;
+ int err;
if (size != sizeof(outarg))
- goto err;
+ return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- goto err;
+ return err;
fuse_copy_finish(cs);
return fuse_notify_poll_wakeup(fc, &outarg);
-
-err:
- fuse_copy_finish(cs);
- return err;
}
static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_inode_out outarg;
- int err = -EINVAL;
+ int err;
if (size != sizeof(outarg))
- goto err;
+ return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- goto err;
+ return err;
fuse_copy_finish(cs);
down_read(&fc->killsb);
@@ -1453,39 +1672,33 @@ static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
outarg.off, outarg.len);
up_read(&fc->killsb);
return err;
-
-err:
- fuse_copy_finish(cs);
- return err;
}
static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_entry_out outarg;
- int err = -ENOMEM;
+ int err;
char *buf;
struct qstr name;
- buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
- if (!buf)
- goto err;
-
- err = -EINVAL;
if (size < sizeof(outarg))
- goto err;
+ return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- goto err;
+ return err;
- err = -ENAMETOOLONG;
- if (outarg.namelen > FUSE_NAME_MAX)
- goto err;
+ if (outarg.namelen > fc->name_max)
+ return -ENAMETOOLONG;
err = -EINVAL;
if (size != sizeof(outarg) + outarg.namelen + 1)
- goto err;
+ return -EINVAL;
+
+ buf = kzalloc(outarg.namelen + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
name.name = buf;
name.len = outarg.namelen;
@@ -1496,14 +1709,10 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
buf[outarg.namelen] = 0;
down_read(&fc->killsb);
- err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name);
+ err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name, outarg.flags);
up_read(&fc->killsb);
- kfree(buf);
- return err;
-
err:
kfree(buf);
- fuse_copy_finish(cs);
return err;
}
@@ -1511,29 +1720,26 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_delete_out outarg;
- int err = -ENOMEM;
+ int err;
char *buf;
struct qstr name;
- buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
- if (!buf)
- goto err;
-
- err = -EINVAL;
if (size < sizeof(outarg))
- goto err;
+ return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- goto err;
+ return err;
- err = -ENAMETOOLONG;
- if (outarg.namelen > FUSE_NAME_MAX)
- goto err;
+ if (outarg.namelen > fc->name_max)
+ return -ENAMETOOLONG;
- err = -EINVAL;
if (size != sizeof(outarg) + outarg.namelen + 1)
- goto err;
+ return -EINVAL;
+
+ buf = kzalloc(outarg.namelen + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
name.name = buf;
name.len = outarg.namelen;
@@ -1544,14 +1750,10 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
buf[outarg.namelen] = 0;
down_read(&fc->killsb);
- err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name);
+ err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name, 0);
up_read(&fc->killsb);
- kfree(buf);
- return err;
-
err:
kfree(buf);
- fuse_copy_finish(cs);
return err;
}
@@ -1569,17 +1771,15 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
loff_t file_size;
loff_t end;
- err = -EINVAL;
if (size < sizeof(outarg))
- goto out_finish;
+ return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- goto out_finish;
+ return err;
- err = -EINVAL;
if (size - sizeof(outarg) != outarg.size)
- goto out_finish;
+ return -EINVAL;
nodeid = outarg.nodeid;
@@ -1602,29 +1802,35 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
num = outarg.size;
while (num) {
- struct page *page;
- unsigned int this_num;
-
- err = -ENOMEM;
- page = find_or_create_page(mapping, index,
- mapping_gfp_mask(mapping));
- if (!page)
+ struct folio *folio;
+ unsigned int folio_offset;
+ unsigned int nr_bytes;
+ unsigned int nr_pages;
+
+ folio = filemap_grab_folio(mapping, index);
+ err = PTR_ERR(folio);
+ if (IS_ERR(folio))
goto out_iput;
- this_num = min_t(unsigned, num, PAGE_SIZE - offset);
- err = fuse_copy_page(cs, &page, offset, this_num, 0);
- if (!err && offset == 0 &&
- (this_num == PAGE_SIZE || file_size == end))
- SetPageUptodate(page);
- unlock_page(page);
- put_page(page);
+ folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset;
+ nr_bytes = min_t(unsigned, num, folio_size(folio) - folio_offset);
+ nr_pages = (offset + nr_bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ err = fuse_copy_folio(cs, &folio, folio_offset, nr_bytes, 0);
+ if (!folio_test_uptodate(folio) && !err && offset == 0 &&
+ (nr_bytes == folio_size(folio) || file_size == end)) {
+ folio_zero_segment(folio, nr_bytes, folio_size(folio));
+ folio_mark_uptodate(folio);
+ }
+ folio_unlock(folio);
+ folio_put(folio);
if (err)
goto out_iput;
- num -= this_num;
+ num -= nr_bytes;
offset = 0;
- index++;
+ index += nr_pages;
}
err = 0;
@@ -1633,8 +1839,6 @@ out_iput:
iput(inode);
out_up_killsb:
up_read(&fc->killsb);
-out_finish:
- fuse_copy_finish(cs);
return err;
}
@@ -1649,7 +1853,7 @@ static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args,
struct fuse_retrieve_args *ra =
container_of(args, typeof(*ra), ap.args);
- release_pages(ra->ap.pages, ra->ap.num_pages);
+ release_pages(ra->ap.folios, ra->ap.num_folios);
kfree(ra);
}
@@ -1681,50 +1885,57 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
num_pages = min(num_pages, fc->max_pages);
+ num = min(num, num_pages << PAGE_SHIFT);
- args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
+ args_size += num_pages * (sizeof(ap->folios[0]) + sizeof(ap->descs[0]));
ra = kzalloc(args_size, GFP_KERNEL);
if (!ra)
return -ENOMEM;
ap = &ra->ap;
- ap->pages = (void *) (ra + 1);
- ap->descs = (void *) (ap->pages + num_pages);
+ ap->folios = (void *) (ra + 1);
+ ap->descs = (void *) (ap->folios + num_pages);
args = &ap->args;
args->nodeid = outarg->nodeid;
args->opcode = FUSE_NOTIFY_REPLY;
- args->in_numargs = 2;
+ args->in_numargs = 3;
args->in_pages = true;
args->end = fuse_retrieve_end;
index = outarg->offset >> PAGE_SHIFT;
- while (num && ap->num_pages < num_pages) {
- struct page *page;
- unsigned int this_num;
+ while (num && ap->num_folios < num_pages) {
+ struct folio *folio;
+ unsigned int folio_offset;
+ unsigned int nr_bytes;
+ unsigned int nr_pages;
- page = find_get_page(mapping, index);
- if (!page)
+ folio = filemap_get_folio(mapping, index);
+ if (IS_ERR(folio))
break;
- this_num = min_t(unsigned, num, PAGE_SIZE - offset);
- ap->pages[ap->num_pages] = page;
- ap->descs[ap->num_pages].offset = offset;
- ap->descs[ap->num_pages].length = this_num;
- ap->num_pages++;
+ folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset;
+ nr_bytes = min(folio_size(folio) - folio_offset, num);
+ nr_pages = (offset + nr_bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ ap->folios[ap->num_folios] = folio;
+ ap->descs[ap->num_folios].offset = folio_offset;
+ ap->descs[ap->num_folios].length = nr_bytes;
+ ap->num_folios++;
offset = 0;
- num -= this_num;
- total_len += this_num;
- index++;
+ num -= nr_bytes;
+ total_len += nr_bytes;
+ index += nr_pages;
}
ra->inarg.offset = outarg->offset;
ra->inarg.size = total_len;
- args->in_args[0].size = sizeof(ra->inarg);
- args->in_args[0].value = &ra->inarg;
- args->in_args[1].size = total_len;
+ fuse_set_zero_arg0(args);
+ args->in_args[1].size = sizeof(ra->inarg);
+ args->in_args[1].value = &ra->inarg;
+ args->in_args[2].size = total_len;
err = fuse_simple_notify_reply(fm, args, outarg->notify_unique);
if (err)
@@ -1742,13 +1953,12 @@ static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
u64 nodeid;
int err;
- err = -EINVAL;
if (size != sizeof(outarg))
- goto copy_finish;
+ return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- goto copy_finish;
+ return err;
fuse_copy_finish(cs);
@@ -1764,17 +1974,126 @@ static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
up_read(&fc->killsb);
return err;
+}
-copy_finish:
- fuse_copy_finish(cs);
- return err;
+/*
+ * Resending all processing queue requests.
+ *
+ * During a FUSE daemon panics and failover, it is possible for some inflight
+ * requests to be lost and never returned. As a result, applications awaiting
+ * replies would become stuck forever. To address this, we can use notification
+ * to trigger resending of these pending requests to the FUSE daemon, ensuring
+ * they are properly processed again.
+ *
+ * Please note that this strategy is applicable only to idempotent requests or
+ * if the FUSE daemon takes careful measures to avoid processing duplicated
+ * non-idempotent requests.
+ */
+static void fuse_resend(struct fuse_conn *fc)
+{
+ struct fuse_dev *fud;
+ struct fuse_req *req, *next;
+ struct fuse_iqueue *fiq = &fc->iq;
+ LIST_HEAD(to_queue);
+ unsigned int i;
+
+ spin_lock(&fc->lock);
+ if (!fc->connected) {
+ spin_unlock(&fc->lock);
+ return;
+ }
+
+ list_for_each_entry(fud, &fc->devices, entry) {
+ struct fuse_pqueue *fpq = &fud->pq;
+
+ spin_lock(&fpq->lock);
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ list_splice_tail_init(&fpq->processing[i], &to_queue);
+ spin_unlock(&fpq->lock);
+ }
+ spin_unlock(&fc->lock);
+
+ list_for_each_entry_safe(req, next, &to_queue, list) {
+ set_bit(FR_PENDING, &req->flags);
+ clear_bit(FR_SENT, &req->flags);
+ /* mark the request as resend request */
+ req->in.h.unique |= FUSE_UNIQUE_RESEND;
+ }
+
+ spin_lock(&fiq->lock);
+ if (!fiq->connected) {
+ spin_unlock(&fiq->lock);
+ list_for_each_entry(req, &to_queue, list)
+ clear_bit(FR_PENDING, &req->flags);
+ fuse_dev_end_requests(&to_queue);
+ return;
+ }
+ /* iq and pq requests are both oldest to newest */
+ list_splice(&to_queue, &fiq->pending);
+ fuse_dev_wake_and_unlock(fiq);
+}
+
+static int fuse_notify_resend(struct fuse_conn *fc)
+{
+ fuse_resend(fc);
+ return 0;
+}
+
+/*
+ * Increments the fuse connection epoch. This will result of dentries from
+ * previous epochs to be invalidated. Additionally, if inval_wq is set, a work
+ * queue is scheduled to trigger the invalidation.
+ */
+static int fuse_notify_inc_epoch(struct fuse_conn *fc)
+{
+ atomic_inc(&fc->epoch);
+ if (inval_wq)
+ schedule_work(&fc->epoch_work);
+
+ return 0;
+}
+
+static int fuse_notify_prune(struct fuse_conn *fc, unsigned int size,
+ struct fuse_copy_state *cs)
+{
+ struct fuse_notify_prune_out outarg;
+ const unsigned int batch = 512;
+ u64 *nodeids __free(kfree) = kmalloc(sizeof(u64) * batch, GFP_KERNEL);
+ unsigned int num, i;
+ int err;
+
+ if (!nodeids)
+ return -ENOMEM;
+
+ if (size < sizeof(outarg))
+ return -EINVAL;
+
+ err = fuse_copy_one(cs, &outarg, sizeof(outarg));
+ if (err)
+ return err;
+
+ if (size - sizeof(outarg) != outarg.count * sizeof(u64))
+ return -EINVAL;
+
+ for (; outarg.count; outarg.count -= num) {
+ num = min(batch, outarg.count);
+ err = fuse_copy_one(cs, nodeids, num * sizeof(u64));
+ if (err)
+ return err;
+
+ scoped_guard(rwsem_read, &fc->killsb) {
+ for (i = 0; i < num; i++)
+ fuse_try_prune_one_inode(fc, nodeids[i]);
+ }
+ }
+ return 0;
}
static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
unsigned int size, struct fuse_copy_state *cs)
{
- /* Don't try to move pages (yet) */
- cs->move_pages = 0;
+ /* Don't try to move folios (yet) */
+ cs->move_folios = false;
switch (code) {
case FUSE_NOTIFY_POLL:
@@ -1795,14 +2114,22 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
case FUSE_NOTIFY_DELETE:
return fuse_notify_delete(fc, size, cs);
+ case FUSE_NOTIFY_RESEND:
+ return fuse_notify_resend(fc);
+
+ case FUSE_NOTIFY_INC_EPOCH:
+ return fuse_notify_inc_epoch(fc);
+
+ case FUSE_NOTIFY_PRUNE:
+ return fuse_notify_prune(fc, size, cs);
+
default:
- fuse_copy_finish(cs);
return -EINVAL;
}
}
/* Look up request on processing list by unique ID */
-static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
+struct fuse_req *fuse_request_find(struct fuse_pqueue *fpq, u64 unique)
{
unsigned int hash = fuse_req_hash(unique);
struct fuse_req *req;
@@ -1814,10 +2141,17 @@ static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
return NULL;
}
-static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
- unsigned nbytes)
+int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
+ unsigned nbytes)
{
- unsigned reqsize = sizeof(struct fuse_out_header);
+
+ unsigned int reqsize = 0;
+
+ /*
+ * Uring has all headers separated from args - args is payload only
+ */
+ if (!cs->is_uring)
+ reqsize = sizeof(struct fuse_out_header);
reqsize += fuse_len_args(args->out_numargs, args->out_args);
@@ -1869,7 +2203,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
*/
if (!oh.unique) {
err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
- goto out;
+ goto copy_finish;
}
err = -EINVAL;
@@ -1879,7 +2213,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
spin_lock(&fpq->lock);
req = NULL;
if (fpq->connected)
- req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
+ req = fuse_request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
err = -ENOENT;
if (!req) {
@@ -1912,12 +2246,12 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
spin_unlock(&fpq->lock);
cs->req = req;
if (!req->args->page_replace)
- cs->move_pages = 0;
+ cs->move_folios = false;
if (oh.error)
err = nbytes != sizeof(oh) ? -EINVAL : 0;
else
- err = copy_out_args(cs, req->args, nbytes);
+ err = fuse_copy_out_args(cs, req->args, nbytes);
fuse_copy_finish(cs);
spin_lock(&fpq->lock);
@@ -1942,15 +2276,15 @@ copy_finish:
static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
{
struct fuse_copy_state cs;
- struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
+ struct fuse_dev *fud = __fuse_get_dev(iocb->ki_filp);
if (!fud)
return -EPERM;
- if (!iter_is_iovec(from))
+ if (!user_backed_iter(from))
return -EINVAL;
- fuse_copy_init(&cs, 0, from);
+ fuse_copy_init(&cs, false, from);
return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
}
@@ -1959,16 +2293,15 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos,
size_t len, unsigned int flags)
{
- unsigned int head, tail, mask, count;
+ unsigned int head, tail, count;
unsigned nbuf;
unsigned idx;
struct pipe_buffer *bufs;
struct fuse_copy_state cs;
- struct fuse_dev *fud;
+ struct fuse_dev *fud = __fuse_get_dev(out);
size_t rem;
ssize_t ret;
- fud = fuse_get_dev(out);
if (!fud)
return -EPERM;
@@ -1976,8 +2309,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
head = pipe->head;
tail = pipe->tail;
- mask = pipe->ring_size - 1;
- count = head - tail;
+ count = pipe_occupancy(head, tail);
bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
if (!bufs) {
@@ -1987,8 +2319,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
nbuf = 0;
rem = 0;
- for (idx = tail; idx != head && rem < len; idx++)
- rem += pipe->bufs[idx & mask].len;
+ for (idx = tail; !pipe_empty(head, idx) && rem < len; idx++)
+ rem += pipe_buf(pipe, idx)->len;
ret = -EINVAL;
if (rem < len)
@@ -1999,10 +2331,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct pipe_buffer *ibuf;
struct pipe_buffer *obuf;
- if (WARN_ON(nbuf >= count || tail == head))
+ if (WARN_ON(nbuf >= count || pipe_empty(head, tail)))
goto out_free;
- ibuf = &pipe->bufs[tail & mask];
+ ibuf = pipe_buf(pipe, tail);
obuf = &bufs[nbuf];
if (rem >= ibuf->len) {
@@ -2025,13 +2357,13 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
}
pipe_unlock(pipe);
- fuse_copy_init(&cs, 0, NULL);
+ fuse_copy_init(&cs, false, NULL);
cs.pipebufs = bufs;
cs.nr_segs = nbuf;
cs.pipe = pipe;
if (flags & SPLICE_F_MOVE)
- cs.move_pages = 1;
+ cs.move_folios = true;
ret = fuse_dev_do_write(fud, &cs, len);
@@ -2055,7 +2387,7 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
struct fuse_iqueue *fiq;
struct fuse_dev *fud = fuse_get_dev(file);
- if (!fud)
+ if (IS_ERR(fud))
return EPOLLERR;
fiq = &fud->fc->iq;
@@ -2072,7 +2404,7 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
}
/* Abort all requests on the given list (pending or processing) */
-static void end_requests(struct list_head *head)
+void fuse_dev_end_requests(struct list_head *head)
{
while (!list_empty(head)) {
struct fuse_req *req;
@@ -2108,7 +2440,7 @@ static void end_polls(struct fuse_conn *fc)
* The same effect is usually achievable through killing the filesystem daemon
* and all users of the filesystem. The exception is the combination of an
* asynchronous request and the tricky deadlock (see
- * Documentation/filesystems/fuse.rst).
+ * Documentation/filesystems/fuse/fuse.rst).
*
* Aborting requests under I/O goes as follows: 1: Separate out unlocked
* requests, they should be finished off immediately. Locked requests will be
@@ -2128,6 +2460,9 @@ void fuse_abort_conn(struct fuse_conn *fc)
LIST_HEAD(to_end);
unsigned int i;
+ if (fc->timeout.req_timeout)
+ cancel_delayed_work(&fc->timeout.work);
+
/* Background queuing checks fc->connected under bg_lock */
spin_lock(&fc->bg_lock);
fc->connected = 0;
@@ -2175,7 +2510,13 @@ void fuse_abort_conn(struct fuse_conn *fc)
wake_up_all(&fc->blocked_waitq);
spin_unlock(&fc->lock);
- end_requests(&to_end);
+ fuse_dev_end_requests(&to_end);
+
+ /*
+ * fc->lock must not be taken to avoid conflicts with io-uring
+ * locks
+ */
+ fuse_uring_abort(fc);
} else {
spin_unlock(&fc->lock);
}
@@ -2187,11 +2528,13 @@ void fuse_wait_aborted(struct fuse_conn *fc)
/* matches implicit memory barrier in fuse_drop_waiting() */
smp_mb();
wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
+
+ fuse_uring_wait_stopped_queues(fc);
}
int fuse_dev_release(struct inode *inode, struct file *file)
{
- struct fuse_dev *fud = fuse_get_dev(file);
+ struct fuse_dev *fud = __fuse_get_dev(file);
if (fud) {
struct fuse_conn *fc = fud->fc;
@@ -2205,7 +2548,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
list_splice_init(&fpq->processing[i], &to_end);
spin_unlock(&fpq->lock);
- end_requests(&to_end);
+ fuse_dev_end_requests(&to_end);
/* Are we the last open device? */
if (atomic_dec_and_test(&fc->dev_count)) {
@@ -2222,8 +2565,8 @@ static int fuse_dev_fasync(int fd, struct file *file, int on)
{
struct fuse_dev *fud = fuse_get_dev(file);
- if (!fud)
- return -EPERM;
+ if (IS_ERR(fud))
+ return PTR_ERR(fud);
/* No locking - fasync_helper does its own locking */
return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
@@ -2233,7 +2576,7 @@ static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
{
struct fuse_dev *fud;
- if (new->private_data)
+ if (__fuse_get_dev(new))
return -EINVAL;
fud = fuse_dev_alloc_install(fc);
@@ -2246,49 +2589,121 @@ static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
return 0;
}
-static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long fuse_dev_ioctl_clone(struct file *file, __u32 __user *argp)
{
int res;
int oldfd;
struct fuse_dev *fud = NULL;
+ if (get_user(oldfd, argp))
+ return -EFAULT;
+
+ CLASS(fd, f)(oldfd);
+ if (fd_empty(f))
+ return -EINVAL;
+
+ /*
+ * Check against file->f_op because CUSE
+ * uses the same ioctl handler.
+ */
+ if (fd_file(f)->f_op == file->f_op)
+ fud = __fuse_get_dev(fd_file(f));
+
+ res = -EINVAL;
+ if (fud) {
+ mutex_lock(&fuse_mutex);
+ res = fuse_device_clone(fud->fc, file);
+ mutex_unlock(&fuse_mutex);
+ }
+
+ return res;
+}
+
+static long fuse_dev_ioctl_backing_open(struct file *file,
+ struct fuse_backing_map __user *argp)
+{
+ struct fuse_dev *fud = fuse_get_dev(file);
+ struct fuse_backing_map map;
+
+ if (IS_ERR(fud))
+ return PTR_ERR(fud);
+
+ if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&map, argp, sizeof(map)))
+ return -EFAULT;
+
+ return fuse_backing_open(fud->fc, &map);
+}
+
+static long fuse_dev_ioctl_backing_close(struct file *file, __u32 __user *argp)
+{
+ struct fuse_dev *fud = fuse_get_dev(file);
+ int backing_id;
+
+ if (IS_ERR(fud))
+ return PTR_ERR(fud);
+
+ if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
+ return -EOPNOTSUPP;
+
+ if (get_user(backing_id, argp))
+ return -EFAULT;
+
+ return fuse_backing_close(fud->fc, backing_id);
+}
+
+static long fuse_dev_ioctl_sync_init(struct file *file)
+{
+ int err = -EINVAL;
+
+ mutex_lock(&fuse_mutex);
+ if (!__fuse_get_dev(file)) {
+ WRITE_ONCE(file->private_data, FUSE_DEV_SYNC_INIT);
+ err = 0;
+ }
+ mutex_unlock(&fuse_mutex);
+ return err;
+}
+
+static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+
switch (cmd) {
case FUSE_DEV_IOC_CLONE:
- res = -EFAULT;
- if (!get_user(oldfd, (__u32 __user *)arg)) {
- struct file *old = fget(oldfd);
-
- res = -EINVAL;
- if (old) {
- /*
- * Check against file->f_op because CUSE
- * uses the same ioctl handler.
- */
- if (old->f_op == file->f_op &&
- old->f_cred->user_ns == file->f_cred->user_ns)
- fud = fuse_get_dev(old);
-
- if (fud) {
- mutex_lock(&fuse_mutex);
- res = fuse_device_clone(fud->fc, file);
- mutex_unlock(&fuse_mutex);
- }
- fput(old);
- }
- }
- break;
+ return fuse_dev_ioctl_clone(file, argp);
+
+ case FUSE_DEV_IOC_BACKING_OPEN:
+ return fuse_dev_ioctl_backing_open(file, argp);
+
+ case FUSE_DEV_IOC_BACKING_CLOSE:
+ return fuse_dev_ioctl_backing_close(file, argp);
+
+ case FUSE_DEV_IOC_SYNC_INIT:
+ return fuse_dev_ioctl_sync_init(file);
+
default:
- res = -ENOTTY;
- break;
+ return -ENOTTY;
}
- return res;
}
+#ifdef CONFIG_PROC_FS
+static void fuse_dev_show_fdinfo(struct seq_file *seq, struct file *file)
+{
+ struct fuse_dev *fud = __fuse_get_dev(file);
+ if (!fud)
+ return;
+
+ seq_printf(seq, "fuse_connection:\t%u\n", fud->fc->dev);
+}
+#endif
+
const struct file_operations fuse_dev_operations = {
.owner = THIS_MODULE,
.open = fuse_dev_open,
- .llseek = no_llseek,
.read_iter = fuse_dev_read,
.splice_read = fuse_dev_splice_read,
.write_iter = fuse_dev_write,
@@ -2298,6 +2713,12 @@ const struct file_operations fuse_dev_operations = {
.fasync = fuse_dev_fasync,
.unlocked_ioctl = fuse_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+#ifdef CONFIG_FUSE_IO_URING
+ .uring_cmd = fuse_uring_cmd,
+#endif
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = fuse_dev_show_fdinfo,
+#endif
};
EXPORT_SYMBOL_GPL(fuse_dev_operations);
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
new file mode 100644
index 000000000000..5ceb217ced1b
--- /dev/null
+++ b/fs/fuse/dev_uring.c
@@ -0,0 +1,1373 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FUSE: Filesystem in Userspace
+ * Copyright (c) 2023-2024 DataDirect Networks.
+ */
+
+#include "fuse_i.h"
+#include "dev_uring_i.h"
+#include "fuse_dev_i.h"
+#include "fuse_trace.h"
+
+#include <linux/fs.h>
+#include <linux/io_uring/cmd.h>
+
+static bool __read_mostly enable_uring;
+module_param(enable_uring, bool, 0644);
+MODULE_PARM_DESC(enable_uring,
+ "Enable userspace communication through io-uring");
+
+#define FUSE_URING_IOV_SEGS 2 /* header and payload */
+
+
+bool fuse_uring_enabled(void)
+{
+ return enable_uring;
+}
+
+struct fuse_uring_pdu {
+ struct fuse_ring_ent *ent;
+};
+
+static const struct fuse_iqueue_ops fuse_io_uring_ops;
+
+static void uring_cmd_set_ring_ent(struct io_uring_cmd *cmd,
+ struct fuse_ring_ent *ring_ent)
+{
+ struct fuse_uring_pdu *pdu =
+ io_uring_cmd_to_pdu(cmd, struct fuse_uring_pdu);
+
+ pdu->ent = ring_ent;
+}
+
+static struct fuse_ring_ent *uring_cmd_to_ring_ent(struct io_uring_cmd *cmd)
+{
+ struct fuse_uring_pdu *pdu =
+ io_uring_cmd_to_pdu(cmd, struct fuse_uring_pdu);
+
+ return pdu->ent;
+}
+
+static void fuse_uring_flush_bg(struct fuse_ring_queue *queue)
+{
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+
+ lockdep_assert_held(&queue->lock);
+ lockdep_assert_held(&fc->bg_lock);
+
+ /*
+ * Allow one bg request per queue, ignoring global fc limits.
+ * This prevents a single queue from consuming all resources and
+ * eliminates the need for remote queue wake-ups when global
+ * limits are met but this queue has no more waiting requests.
+ */
+ while ((fc->active_background < fc->max_background ||
+ !queue->active_background) &&
+ (!list_empty(&queue->fuse_req_bg_queue))) {
+ struct fuse_req *req;
+
+ req = list_first_entry(&queue->fuse_req_bg_queue,
+ struct fuse_req, list);
+ fc->active_background++;
+ queue->active_background++;
+
+ list_move_tail(&req->list, &queue->fuse_req_queue);
+ }
+}
+
+static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
+ int error)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+
+ lockdep_assert_not_held(&queue->lock);
+ spin_lock(&queue->lock);
+ ent->fuse_req = NULL;
+ list_del_init(&req->list);
+ if (test_bit(FR_BACKGROUND, &req->flags)) {
+ queue->active_background--;
+ spin_lock(&fc->bg_lock);
+ fuse_uring_flush_bg(queue);
+ spin_unlock(&fc->bg_lock);
+ }
+
+ spin_unlock(&queue->lock);
+
+ if (error)
+ req->out.h.error = error;
+
+ clear_bit(FR_SENT, &req->flags);
+ fuse_request_end(req);
+}
+
+/* Abort all list queued request on the given ring queue */
+static void fuse_uring_abort_end_queue_requests(struct fuse_ring_queue *queue)
+{
+ struct fuse_req *req;
+ LIST_HEAD(req_list);
+
+ spin_lock(&queue->lock);
+ list_for_each_entry(req, &queue->fuse_req_queue, list)
+ clear_bit(FR_PENDING, &req->flags);
+ list_splice_init(&queue->fuse_req_queue, &req_list);
+ spin_unlock(&queue->lock);
+
+ /* must not hold queue lock to avoid order issues with fi->lock */
+ fuse_dev_end_requests(&req_list);
+}
+
+void fuse_uring_abort_end_requests(struct fuse_ring *ring)
+{
+ int qid;
+ struct fuse_ring_queue *queue;
+ struct fuse_conn *fc = ring->fc;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ queue = READ_ONCE(ring->queues[qid]);
+ if (!queue)
+ continue;
+
+ queue->stopped = true;
+
+ WARN_ON_ONCE(ring->fc->max_background != UINT_MAX);
+ spin_lock(&queue->lock);
+ spin_lock(&fc->bg_lock);
+ fuse_uring_flush_bg(queue);
+ spin_unlock(&fc->bg_lock);
+ spin_unlock(&queue->lock);
+ fuse_uring_abort_end_queue_requests(queue);
+ }
+}
+
+static bool ent_list_request_expired(struct fuse_conn *fc, struct list_head *list)
+{
+ struct fuse_ring_ent *ent;
+ struct fuse_req *req;
+
+ ent = list_first_entry_or_null(list, struct fuse_ring_ent, list);
+ if (!ent)
+ return false;
+
+ req = ent->fuse_req;
+
+ return time_is_before_jiffies(req->create_time +
+ fc->timeout.req_timeout);
+}
+
+bool fuse_uring_request_expired(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ int qid;
+
+ if (!ring)
+ return false;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ queue = READ_ONCE(ring->queues[qid]);
+ if (!queue)
+ continue;
+
+ spin_lock(&queue->lock);
+ if (fuse_request_expired(fc, &queue->fuse_req_queue) ||
+ fuse_request_expired(fc, &queue->fuse_req_bg_queue) ||
+ ent_list_request_expired(fc, &queue->ent_w_req_queue) ||
+ ent_list_request_expired(fc, &queue->ent_in_userspace)) {
+ spin_unlock(&queue->lock);
+ return true;
+ }
+ spin_unlock(&queue->lock);
+ }
+
+ return false;
+}
+
+void fuse_uring_destruct(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+ int qid;
+
+ if (!ring)
+ return;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = ring->queues[qid];
+ struct fuse_ring_ent *ent, *next;
+
+ if (!queue)
+ continue;
+
+ WARN_ON(!list_empty(&queue->ent_avail_queue));
+ WARN_ON(!list_empty(&queue->ent_w_req_queue));
+ WARN_ON(!list_empty(&queue->ent_commit_queue));
+ WARN_ON(!list_empty(&queue->ent_in_userspace));
+
+ list_for_each_entry_safe(ent, next, &queue->ent_released,
+ list) {
+ list_del_init(&ent->list);
+ kfree(ent);
+ }
+
+ kfree(queue->fpq.processing);
+ kfree(queue);
+ ring->queues[qid] = NULL;
+ }
+
+ kfree(ring->queues);
+ kfree(ring);
+ fc->ring = NULL;
+}
+
+/*
+ * Basic ring setup for this connection based on the provided configuration
+ */
+static struct fuse_ring *fuse_uring_create(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring;
+ size_t nr_queues = num_possible_cpus();
+ struct fuse_ring *res = NULL;
+ size_t max_payload_size;
+
+ ring = kzalloc(sizeof(*fc->ring), GFP_KERNEL_ACCOUNT);
+ if (!ring)
+ return NULL;
+
+ ring->queues = kcalloc(nr_queues, sizeof(struct fuse_ring_queue *),
+ GFP_KERNEL_ACCOUNT);
+ if (!ring->queues)
+ goto out_err;
+
+ max_payload_size = max(FUSE_MIN_READ_BUFFER, fc->max_write);
+ max_payload_size = max(max_payload_size, fc->max_pages * PAGE_SIZE);
+
+ spin_lock(&fc->lock);
+ if (fc->ring) {
+ /* race, another thread created the ring in the meantime */
+ spin_unlock(&fc->lock);
+ res = fc->ring;
+ goto out_err;
+ }
+
+ init_waitqueue_head(&ring->stop_waitq);
+
+ ring->nr_queues = nr_queues;
+ ring->fc = fc;
+ ring->max_payload_sz = max_payload_size;
+ smp_store_release(&fc->ring, ring);
+
+ spin_unlock(&fc->lock);
+ return ring;
+
+out_err:
+ kfree(ring->queues);
+ kfree(ring);
+ return res;
+}
+
+static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring,
+ int qid)
+{
+ struct fuse_conn *fc = ring->fc;
+ struct fuse_ring_queue *queue;
+ struct list_head *pq;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL_ACCOUNT);
+ if (!queue)
+ return NULL;
+ pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL);
+ if (!pq) {
+ kfree(queue);
+ return NULL;
+ }
+
+ queue->qid = qid;
+ queue->ring = ring;
+ spin_lock_init(&queue->lock);
+
+ INIT_LIST_HEAD(&queue->ent_avail_queue);
+ INIT_LIST_HEAD(&queue->ent_commit_queue);
+ INIT_LIST_HEAD(&queue->ent_w_req_queue);
+ INIT_LIST_HEAD(&queue->ent_in_userspace);
+ INIT_LIST_HEAD(&queue->fuse_req_queue);
+ INIT_LIST_HEAD(&queue->fuse_req_bg_queue);
+ INIT_LIST_HEAD(&queue->ent_released);
+
+ queue->fpq.processing = pq;
+ fuse_pqueue_init(&queue->fpq);
+
+ spin_lock(&fc->lock);
+ if (ring->queues[qid]) {
+ spin_unlock(&fc->lock);
+ kfree(queue->fpq.processing);
+ kfree(queue);
+ return ring->queues[qid];
+ }
+
+ /*
+ * write_once and lock as the caller mostly doesn't take the lock at all
+ */
+ WRITE_ONCE(ring->queues[qid], queue);
+ spin_unlock(&fc->lock);
+
+ return queue;
+}
+
+static void fuse_uring_stop_fuse_req_end(struct fuse_req *req)
+{
+ clear_bit(FR_SENT, &req->flags);
+ req->out.h.error = -ECONNABORTED;
+ fuse_request_end(req);
+}
+
+/*
+ * Release a request/entry on connection tear down
+ */
+static void fuse_uring_entry_teardown(struct fuse_ring_ent *ent)
+{
+ struct fuse_req *req;
+ struct io_uring_cmd *cmd;
+
+ struct fuse_ring_queue *queue = ent->queue;
+
+ spin_lock(&queue->lock);
+ cmd = ent->cmd;
+ ent->cmd = NULL;
+ req = ent->fuse_req;
+ ent->fuse_req = NULL;
+ if (req) {
+ /* remove entry from queue->fpq->processing */
+ list_del_init(&req->list);
+ }
+
+ /*
+ * The entry must not be freed immediately, due to access of direct
+ * pointer access of entries through IO_URING_F_CANCEL - there is a risk
+ * of race between daemon termination (which triggers IO_URING_F_CANCEL
+ * and accesses entries without checking the list state first
+ */
+ list_move(&ent->list, &queue->ent_released);
+ ent->state = FRRS_RELEASED;
+ spin_unlock(&queue->lock);
+
+ if (cmd)
+ io_uring_cmd_done(cmd, -ENOTCONN, IO_URING_F_UNLOCKED);
+
+ if (req)
+ fuse_uring_stop_fuse_req_end(req);
+}
+
+static void fuse_uring_stop_list_entries(struct list_head *head,
+ struct fuse_ring_queue *queue,
+ enum fuse_ring_req_state exp_state)
+{
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_ring_ent *ent, *next;
+ ssize_t queue_refs = SSIZE_MAX;
+ LIST_HEAD(to_teardown);
+
+ spin_lock(&queue->lock);
+ list_for_each_entry_safe(ent, next, head, list) {
+ if (ent->state != exp_state) {
+ pr_warn("entry teardown qid=%d state=%d expected=%d",
+ queue->qid, ent->state, exp_state);
+ continue;
+ }
+
+ ent->state = FRRS_TEARDOWN;
+ list_move(&ent->list, &to_teardown);
+ }
+ spin_unlock(&queue->lock);
+
+ /* no queue lock to avoid lock order issues */
+ list_for_each_entry_safe(ent, next, &to_teardown, list) {
+ fuse_uring_entry_teardown(ent);
+ queue_refs = atomic_dec_return(&ring->queue_refs);
+ WARN_ON_ONCE(queue_refs < 0);
+ }
+}
+
+static void fuse_uring_teardown_entries(struct fuse_ring_queue *queue)
+{
+ fuse_uring_stop_list_entries(&queue->ent_in_userspace, queue,
+ FRRS_USERSPACE);
+ fuse_uring_stop_list_entries(&queue->ent_avail_queue, queue,
+ FRRS_AVAILABLE);
+}
+
+/*
+ * Log state debug info
+ */
+static void fuse_uring_log_ent_state(struct fuse_ring *ring)
+{
+ int qid;
+ struct fuse_ring_ent *ent;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = ring->queues[qid];
+
+ if (!queue)
+ continue;
+
+ spin_lock(&queue->lock);
+ /*
+ * Log entries from the intermediate queue, the other queues
+ * should be empty
+ */
+ list_for_each_entry(ent, &queue->ent_w_req_queue, list) {
+ pr_info(" ent-req-queue ring=%p qid=%d ent=%p state=%d\n",
+ ring, qid, ent, ent->state);
+ }
+ list_for_each_entry(ent, &queue->ent_commit_queue, list) {
+ pr_info(" ent-commit-queue ring=%p qid=%d ent=%p state=%d\n",
+ ring, qid, ent, ent->state);
+ }
+ spin_unlock(&queue->lock);
+ }
+ ring->stop_debug_log = 1;
+}
+
+static void fuse_uring_async_stop_queues(struct work_struct *work)
+{
+ int qid;
+ struct fuse_ring *ring =
+ container_of(work, struct fuse_ring, async_teardown_work.work);
+
+ /* XXX code dup */
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]);
+
+ if (!queue)
+ continue;
+
+ fuse_uring_teardown_entries(queue);
+ }
+
+ /*
+ * Some ring entries might be in the middle of IO operations,
+ * i.e. in process to get handled by file_operations::uring_cmd
+ * or on the way to userspace - we could handle that with conditions in
+ * run time code, but easier/cleaner to have an async tear down handler
+ * If there are still queue references left
+ */
+ if (atomic_read(&ring->queue_refs) > 0) {
+ if (time_after(jiffies,
+ ring->teardown_time + FUSE_URING_TEARDOWN_TIMEOUT))
+ fuse_uring_log_ent_state(ring);
+
+ schedule_delayed_work(&ring->async_teardown_work,
+ FUSE_URING_TEARDOWN_INTERVAL);
+ } else {
+ wake_up_all(&ring->stop_waitq);
+ }
+}
+
+/*
+ * Stop the ring queues
+ */
+void fuse_uring_stop_queues(struct fuse_ring *ring)
+{
+ int qid;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]);
+
+ if (!queue)
+ continue;
+
+ fuse_uring_teardown_entries(queue);
+ }
+
+ if (atomic_read(&ring->queue_refs) > 0) {
+ ring->teardown_time = jiffies;
+ INIT_DELAYED_WORK(&ring->async_teardown_work,
+ fuse_uring_async_stop_queues);
+ schedule_delayed_work(&ring->async_teardown_work,
+ FUSE_URING_TEARDOWN_INTERVAL);
+ } else {
+ wake_up_all(&ring->stop_waitq);
+ }
+}
+
+/*
+ * Handle IO_URING_F_CANCEL, typically should come on daemon termination.
+ *
+ * Releasing the last entry should trigger fuse_dev_release() if
+ * the daemon was terminated
+ */
+static void fuse_uring_cancel(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd);
+ struct fuse_ring_queue *queue;
+ bool need_cmd_done = false;
+
+ /*
+ * direct access on ent - it must not be destructed as long as
+ * IO_URING_F_CANCEL might come up
+ */
+ queue = ent->queue;
+ spin_lock(&queue->lock);
+ if (ent->state == FRRS_AVAILABLE) {
+ ent->state = FRRS_USERSPACE;
+ list_move_tail(&ent->list, &queue->ent_in_userspace);
+ need_cmd_done = true;
+ ent->cmd = NULL;
+ }
+ spin_unlock(&queue->lock);
+
+ if (need_cmd_done) {
+ /* no queue lock to avoid lock order issues */
+ io_uring_cmd_done(cmd, -ENOTCONN, issue_flags);
+ }
+}
+
+static void fuse_uring_prepare_cancel(struct io_uring_cmd *cmd, int issue_flags,
+ struct fuse_ring_ent *ring_ent)
+{
+ uring_cmd_set_ring_ent(cmd, ring_ent);
+ io_uring_cmd_mark_cancelable(cmd, issue_flags);
+}
+
+/*
+ * Checks for errors and stores it into the request
+ */
+static int fuse_uring_out_header_has_err(struct fuse_out_header *oh,
+ struct fuse_req *req,
+ struct fuse_conn *fc)
+{
+ int err;
+
+ err = -EINVAL;
+ if (oh->unique == 0) {
+ /* Not supported through io-uring yet */
+ pr_warn_once("notify through fuse-io-uring not supported\n");
+ goto err;
+ }
+
+ if (oh->error <= -ERESTARTSYS || oh->error > 0)
+ goto err;
+
+ if (oh->error) {
+ err = oh->error;
+ goto err;
+ }
+
+ err = -ENOENT;
+ if ((oh->unique & ~FUSE_INT_REQ_BIT) != req->in.h.unique) {
+ pr_warn_ratelimited("unique mismatch, expected: %llu got %llu\n",
+ req->in.h.unique,
+ oh->unique & ~FUSE_INT_REQ_BIT);
+ goto err;
+ }
+
+ /*
+ * Is it an interrupt reply ID?
+ * XXX: Not supported through fuse-io-uring yet, it should not even
+ * find the request - should not happen.
+ */
+ WARN_ON_ONCE(oh->unique & FUSE_INT_REQ_BIT);
+
+ err = 0;
+err:
+ return err;
+}
+
+static int fuse_uring_copy_from_ring(struct fuse_ring *ring,
+ struct fuse_req *req,
+ struct fuse_ring_ent *ent)
+{
+ struct fuse_copy_state cs;
+ struct fuse_args *args = req->args;
+ struct iov_iter iter;
+ int err;
+ struct fuse_uring_ent_in_out ring_in_out;
+
+ err = copy_from_user(&ring_in_out, &ent->headers->ring_ent_in_out,
+ sizeof(ring_in_out));
+ if (err)
+ return -EFAULT;
+
+ err = import_ubuf(ITER_SOURCE, ent->payload, ring->max_payload_sz,
+ &iter);
+ if (err)
+ return err;
+
+ fuse_copy_init(&cs, false, &iter);
+ cs.is_uring = true;
+ cs.req = req;
+
+ err = fuse_copy_out_args(&cs, args, ring_in_out.payload_sz);
+ fuse_copy_finish(&cs);
+ return err;
+}
+
+/*
+ * Copy data from the req to the ring buffer
+ */
+static int fuse_uring_args_to_ring(struct fuse_ring *ring, struct fuse_req *req,
+ struct fuse_ring_ent *ent)
+{
+ struct fuse_copy_state cs;
+ struct fuse_args *args = req->args;
+ struct fuse_in_arg *in_args = args->in_args;
+ int num_args = args->in_numargs;
+ int err;
+ struct iov_iter iter;
+ struct fuse_uring_ent_in_out ent_in_out = {
+ .flags = 0,
+ .commit_id = req->in.h.unique,
+ };
+
+ err = import_ubuf(ITER_DEST, ent->payload, ring->max_payload_sz, &iter);
+ if (err) {
+ pr_info_ratelimited("fuse: Import of user buffer failed\n");
+ return err;
+ }
+
+ fuse_copy_init(&cs, true, &iter);
+ cs.is_uring = true;
+ cs.req = req;
+
+ if (num_args > 0) {
+ /*
+ * Expectation is that the first argument is the per op header.
+ * Some op code have that as zero size.
+ */
+ if (args->in_args[0].size > 0) {
+ err = copy_to_user(&ent->headers->op_in, in_args->value,
+ in_args->size);
+ if (err) {
+ pr_info_ratelimited(
+ "Copying the header failed.\n");
+ return -EFAULT;
+ }
+ }
+ in_args++;
+ num_args--;
+ }
+
+ /* copy the payload */
+ err = fuse_copy_args(&cs, num_args, args->in_pages,
+ (struct fuse_arg *)in_args, 0);
+ fuse_copy_finish(&cs);
+ if (err) {
+ pr_info_ratelimited("%s fuse_copy_args failed\n", __func__);
+ return err;
+ }
+
+ ent_in_out.payload_sz = cs.ring.copied_sz;
+ err = copy_to_user(&ent->headers->ring_ent_in_out, &ent_in_out,
+ sizeof(ent_in_out));
+ return err ? -EFAULT : 0;
+}
+
+static int fuse_uring_copy_to_ring(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ int err;
+
+ err = -EIO;
+ if (WARN_ON(ent->state != FRRS_FUSE_REQ)) {
+ pr_err("qid=%d ring-req=%p invalid state %d on send\n",
+ queue->qid, ent, ent->state);
+ return err;
+ }
+
+ err = -EINVAL;
+ if (WARN_ON(req->in.h.unique == 0))
+ return err;
+
+ /* copy the request */
+ err = fuse_uring_args_to_ring(ring, req, ent);
+ if (unlikely(err)) {
+ pr_info_ratelimited("Copy to ring failed: %d\n", err);
+ return err;
+ }
+
+ /* copy fuse_in_header */
+ err = copy_to_user(&ent->headers->in_out, &req->in.h,
+ sizeof(req->in.h));
+ if (err) {
+ err = -EFAULT;
+ return err;
+ }
+
+ return 0;
+}
+
+static int fuse_uring_prepare_send(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
+{
+ int err;
+
+ err = fuse_uring_copy_to_ring(ent, req);
+ if (!err)
+ set_bit(FR_SENT, &req->flags);
+ else
+ fuse_uring_req_end(ent, req, err);
+
+ return err;
+}
+
+/*
+ * Write data to the ring buffer and send the request to userspace,
+ * userspace will read it
+ * This is comparable with classical read(/dev/fuse)
+ */
+static int fuse_uring_send_next_to_ring(struct fuse_ring_ent *ent,
+ struct fuse_req *req,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ int err;
+ struct io_uring_cmd *cmd;
+
+ err = fuse_uring_prepare_send(ent, req);
+ if (err)
+ return err;
+
+ spin_lock(&queue->lock);
+ cmd = ent->cmd;
+ ent->cmd = NULL;
+ ent->state = FRRS_USERSPACE;
+ list_move_tail(&ent->list, &queue->ent_in_userspace);
+ spin_unlock(&queue->lock);
+
+ io_uring_cmd_done(cmd, 0, issue_flags);
+ return 0;
+}
+
+/*
+ * Make a ring entry available for fuse_req assignment
+ */
+static void fuse_uring_ent_avail(struct fuse_ring_ent *ent,
+ struct fuse_ring_queue *queue)
+{
+ WARN_ON_ONCE(!ent->cmd);
+ list_move(&ent->list, &queue->ent_avail_queue);
+ ent->state = FRRS_AVAILABLE;
+}
+
+/* Used to find the request on SQE commit */
+static void fuse_uring_add_to_pq(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_pqueue *fpq = &queue->fpq;
+ unsigned int hash;
+
+ req->ring_entry = ent;
+ hash = fuse_req_hash(req->in.h.unique);
+ list_move_tail(&req->list, &fpq->processing[hash]);
+}
+
+/*
+ * Assign a fuse queue entry to the given entry
+ */
+static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+
+ lockdep_assert_held(&queue->lock);
+
+ if (WARN_ON_ONCE(ent->state != FRRS_AVAILABLE &&
+ ent->state != FRRS_COMMIT)) {
+ pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid,
+ ent->state);
+ }
+
+ clear_bit(FR_PENDING, &req->flags);
+ ent->fuse_req = req;
+ ent->state = FRRS_FUSE_REQ;
+ list_move_tail(&ent->list, &queue->ent_w_req_queue);
+ fuse_uring_add_to_pq(ent, req);
+}
+
+/* Fetch the next fuse request if available */
+static struct fuse_req *fuse_uring_ent_assign_req(struct fuse_ring_ent *ent)
+ __must_hold(&queue->lock)
+{
+ struct fuse_req *req;
+ struct fuse_ring_queue *queue = ent->queue;
+ struct list_head *req_queue = &queue->fuse_req_queue;
+
+ lockdep_assert_held(&queue->lock);
+
+ /* get and assign the next entry while it is still holding the lock */
+ req = list_first_entry_or_null(req_queue, struct fuse_req, list);
+ if (req)
+ fuse_uring_add_req_to_ring_ent(ent, req);
+
+ return req;
+}
+
+/*
+ * Read data from the ring buffer, which user space has written to
+ * This is comparible with handling of classical write(/dev/fuse).
+ * Also make the ring request available again for new fuse requests.
+ */
+static void fuse_uring_commit(struct fuse_ring_ent *ent, struct fuse_req *req,
+ unsigned int issue_flags)
+{
+ struct fuse_ring *ring = ent->queue->ring;
+ struct fuse_conn *fc = ring->fc;
+ ssize_t err = 0;
+
+ err = copy_from_user(&req->out.h, &ent->headers->in_out,
+ sizeof(req->out.h));
+ if (err) {
+ req->out.h.error = -EFAULT;
+ goto out;
+ }
+
+ err = fuse_uring_out_header_has_err(&req->out.h, req, fc);
+ if (err) {
+ /* req->out.h.error already set */
+ goto out;
+ }
+
+ err = fuse_uring_copy_from_ring(ring, req, ent);
+out:
+ fuse_uring_req_end(ent, req, err);
+}
+
+/*
+ * Get the next fuse req and send it
+ */
+static void fuse_uring_next_fuse_req(struct fuse_ring_ent *ent,
+ struct fuse_ring_queue *queue,
+ unsigned int issue_flags)
+{
+ int err;
+ struct fuse_req *req;
+
+retry:
+ spin_lock(&queue->lock);
+ fuse_uring_ent_avail(ent, queue);
+ req = fuse_uring_ent_assign_req(ent);
+ spin_unlock(&queue->lock);
+
+ if (req) {
+ err = fuse_uring_send_next_to_ring(ent, req, issue_flags);
+ if (err)
+ goto retry;
+ }
+}
+
+static int fuse_ring_ent_set_commit(struct fuse_ring_ent *ent)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+
+ lockdep_assert_held(&queue->lock);
+
+ if (WARN_ON_ONCE(ent->state != FRRS_USERSPACE))
+ return -EIO;
+
+ ent->state = FRRS_COMMIT;
+ list_move(&ent->list, &queue->ent_commit_queue);
+
+ return 0;
+}
+
+/* FUSE_URING_CMD_COMMIT_AND_FETCH handler */
+static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags,
+ struct fuse_conn *fc)
+{
+ const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe);
+ struct fuse_ring_ent *ent;
+ int err;
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ uint64_t commit_id = READ_ONCE(cmd_req->commit_id);
+ unsigned int qid = READ_ONCE(cmd_req->qid);
+ struct fuse_pqueue *fpq;
+ struct fuse_req *req;
+
+ err = -ENOTCONN;
+ if (!ring)
+ return err;
+
+ if (qid >= ring->nr_queues)
+ return -EINVAL;
+
+ queue = ring->queues[qid];
+ if (!queue)
+ return err;
+ fpq = &queue->fpq;
+
+ if (!READ_ONCE(fc->connected) || READ_ONCE(queue->stopped))
+ return err;
+
+ spin_lock(&queue->lock);
+ /* Find a request based on the unique ID of the fuse request
+ * This should get revised, as it needs a hash calculation and list
+ * search. And full struct fuse_pqueue is needed (memory overhead).
+ * As well as the link from req to ring_ent.
+ */
+ req = fuse_request_find(fpq, commit_id);
+ err = -ENOENT;
+ if (!req) {
+ pr_info("qid=%d commit_id %llu not found\n", queue->qid,
+ commit_id);
+ spin_unlock(&queue->lock);
+ return err;
+ }
+ list_del_init(&req->list);
+ ent = req->ring_entry;
+ req->ring_entry = NULL;
+
+ err = fuse_ring_ent_set_commit(ent);
+ if (err != 0) {
+ pr_info_ratelimited("qid=%d commit_id %llu state %d",
+ queue->qid, commit_id, ent->state);
+ spin_unlock(&queue->lock);
+ req->out.h.error = err;
+ clear_bit(FR_SENT, &req->flags);
+ fuse_request_end(req);
+ return err;
+ }
+
+ ent->cmd = cmd;
+ spin_unlock(&queue->lock);
+
+ /* without the queue lock, as other locks are taken */
+ fuse_uring_prepare_cancel(cmd, issue_flags, ent);
+ fuse_uring_commit(ent, req, issue_flags);
+
+ /*
+ * Fetching the next request is absolutely required as queued
+ * fuse requests would otherwise not get processed - committing
+ * and fetching is done in one step vs legacy fuse, which has separated
+ * read (fetch request) and write (commit result).
+ */
+ fuse_uring_next_fuse_req(ent, queue, issue_flags);
+ return 0;
+}
+
+static bool is_ring_ready(struct fuse_ring *ring, int current_qid)
+{
+ int qid;
+ struct fuse_ring_queue *queue;
+ bool ready = true;
+
+ for (qid = 0; qid < ring->nr_queues && ready; qid++) {
+ if (current_qid == qid)
+ continue;
+
+ queue = ring->queues[qid];
+ if (!queue) {
+ ready = false;
+ break;
+ }
+
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->ent_avail_queue))
+ ready = false;
+ spin_unlock(&queue->lock);
+ }
+
+ return ready;
+}
+
+/*
+ * fuse_uring_req_fetch command handling
+ */
+static void fuse_uring_do_register(struct fuse_ring_ent *ent,
+ struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+ struct fuse_iqueue *fiq = &fc->iq;
+
+ fuse_uring_prepare_cancel(cmd, issue_flags, ent);
+
+ spin_lock(&queue->lock);
+ ent->cmd = cmd;
+ fuse_uring_ent_avail(ent, queue);
+ spin_unlock(&queue->lock);
+
+ if (!ring->ready) {
+ bool ready = is_ring_ready(ring, queue->qid);
+
+ if (ready) {
+ WRITE_ONCE(fiq->ops, &fuse_io_uring_ops);
+ WRITE_ONCE(ring->ready, true);
+ wake_up_all(&fc->blocked_waitq);
+ }
+ }
+}
+
+/*
+ * sqe->addr is a ptr to an iovec array, iov[0] has the headers, iov[1]
+ * the payload
+ */
+static int fuse_uring_get_iovec_from_sqe(const struct io_uring_sqe *sqe,
+ struct iovec iov[FUSE_URING_IOV_SEGS])
+{
+ struct iovec __user *uiov = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ struct iov_iter iter;
+ ssize_t ret;
+
+ if (sqe->len != FUSE_URING_IOV_SEGS)
+ return -EINVAL;
+
+ /*
+ * Direction for buffer access will actually be READ and WRITE,
+ * using write for the import should include READ access as well.
+ */
+ ret = import_iovec(WRITE, uiov, FUSE_URING_IOV_SEGS,
+ FUSE_URING_IOV_SEGS, &iov, &iter);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct fuse_ring_ent *
+fuse_uring_create_ring_ent(struct io_uring_cmd *cmd,
+ struct fuse_ring_queue *queue)
+{
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_ring_ent *ent;
+ size_t payload_size;
+ struct iovec iov[FUSE_URING_IOV_SEGS];
+ int err;
+
+ err = fuse_uring_get_iovec_from_sqe(cmd->sqe, iov);
+ if (err) {
+ pr_info_ratelimited("Failed to get iovec from sqe, err=%d\n",
+ err);
+ return ERR_PTR(err);
+ }
+
+ err = -EINVAL;
+ if (iov[0].iov_len < sizeof(struct fuse_uring_req_header)) {
+ pr_info_ratelimited("Invalid header len %zu\n", iov[0].iov_len);
+ return ERR_PTR(err);
+ }
+
+ payload_size = iov[1].iov_len;
+ if (payload_size < ring->max_payload_sz) {
+ pr_info_ratelimited("Invalid req payload len %zu\n",
+ payload_size);
+ return ERR_PTR(err);
+ }
+
+ err = -ENOMEM;
+ ent = kzalloc(sizeof(*ent), GFP_KERNEL_ACCOUNT);
+ if (!ent)
+ return ERR_PTR(err);
+
+ INIT_LIST_HEAD(&ent->list);
+
+ ent->queue = queue;
+ ent->headers = iov[0].iov_base;
+ ent->payload = iov[1].iov_base;
+
+ atomic_inc(&ring->queue_refs);
+ return ent;
+}
+
+/*
+ * Register header and payload buffer with the kernel and puts the
+ * entry as "ready to get fuse requests" on the queue
+ */
+static int fuse_uring_register(struct io_uring_cmd *cmd,
+ unsigned int issue_flags, struct fuse_conn *fc)
+{
+ const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe);
+ struct fuse_ring *ring = smp_load_acquire(&fc->ring);
+ struct fuse_ring_queue *queue;
+ struct fuse_ring_ent *ent;
+ int err;
+ unsigned int qid = READ_ONCE(cmd_req->qid);
+
+ err = -ENOMEM;
+ if (!ring) {
+ ring = fuse_uring_create(fc);
+ if (!ring)
+ return err;
+ }
+
+ if (qid >= ring->nr_queues) {
+ pr_info_ratelimited("fuse: Invalid ring qid %u\n", qid);
+ return -EINVAL;
+ }
+
+ queue = ring->queues[qid];
+ if (!queue) {
+ queue = fuse_uring_create_queue(ring, qid);
+ if (!queue)
+ return err;
+ }
+
+ /*
+ * The created queue above does not need to be destructed in
+ * case of entry errors below, will be done at ring destruction time.
+ */
+
+ ent = fuse_uring_create_ring_ent(cmd, queue);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ fuse_uring_do_register(ent, cmd, issue_flags);
+
+ return 0;
+}
+
+/*
+ * Entry function from io_uring to handle the given passthrough command
+ * (op code IORING_OP_URING_CMD)
+ */
+int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct fuse_dev *fud;
+ struct fuse_conn *fc;
+ u32 cmd_op = cmd->cmd_op;
+ int err;
+
+ if ((unlikely(issue_flags & IO_URING_F_CANCEL))) {
+ fuse_uring_cancel(cmd, issue_flags);
+ return 0;
+ }
+
+ /* This extra SQE size holds struct fuse_uring_cmd_req */
+ if (!(issue_flags & IO_URING_F_SQE128))
+ return -EINVAL;
+
+ fud = fuse_get_dev(cmd->file);
+ if (IS_ERR(fud)) {
+ pr_info_ratelimited("No fuse device found\n");
+ return PTR_ERR(fud);
+ }
+ fc = fud->fc;
+
+ /* Once a connection has io-uring enabled on it, it can't be disabled */
+ if (!enable_uring && !fc->io_uring) {
+ pr_info_ratelimited("fuse-io-uring is disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (fc->aborted)
+ return -ECONNABORTED;
+ if (!fc->connected)
+ return -ENOTCONN;
+
+ /*
+ * fuse_uring_register() needs the ring to be initialized,
+ * we need to know the max payload size
+ */
+ if (!fc->initialized)
+ return -EAGAIN;
+
+ switch (cmd_op) {
+ case FUSE_IO_URING_CMD_REGISTER:
+ err = fuse_uring_register(cmd, issue_flags, fc);
+ if (err) {
+ pr_info_once("FUSE_IO_URING_CMD_REGISTER failed err=%d\n",
+ err);
+ fc->io_uring = 0;
+ wake_up_all(&fc->blocked_waitq);
+ return err;
+ }
+ break;
+ case FUSE_IO_URING_CMD_COMMIT_AND_FETCH:
+ err = fuse_uring_commit_fetch(cmd, issue_flags, fc);
+ if (err) {
+ pr_info_once("FUSE_IO_URING_COMMIT_AND_FETCH failed err=%d\n",
+ err);
+ return err;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return -EIOCBQUEUED;
+}
+
+static void fuse_uring_send(struct fuse_ring_ent *ent, struct io_uring_cmd *cmd,
+ ssize_t ret, unsigned int issue_flags)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+
+ spin_lock(&queue->lock);
+ ent->state = FRRS_USERSPACE;
+ list_move_tail(&ent->list, &queue->ent_in_userspace);
+ ent->cmd = NULL;
+ spin_unlock(&queue->lock);
+
+ io_uring_cmd_done(cmd, ret, issue_flags);
+}
+
+/*
+ * This prepares and sends the ring request in fuse-uring task context.
+ * User buffers are not mapped yet - the application does not have permission
+ * to write to it - this has to be executed in ring task context.
+ */
+static void fuse_uring_send_in_task(struct io_tw_req tw_req, io_tw_token_t tw)
+{
+ unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS;
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
+ struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd);
+ struct fuse_ring_queue *queue = ent->queue;
+ int err;
+
+ if (!tw.cancel) {
+ err = fuse_uring_prepare_send(ent, ent->fuse_req);
+ if (err) {
+ fuse_uring_next_fuse_req(ent, queue, issue_flags);
+ return;
+ }
+ } else {
+ err = -ECANCELED;
+ }
+
+ fuse_uring_send(ent, cmd, err, issue_flags);
+}
+
+static struct fuse_ring_queue *fuse_uring_task_to_queue(struct fuse_ring *ring)
+{
+ unsigned int qid;
+ struct fuse_ring_queue *queue;
+
+ qid = task_cpu(current);
+
+ if (WARN_ONCE(qid >= ring->nr_queues,
+ "Core number (%u) exceeds nr queues (%zu)\n", qid,
+ ring->nr_queues))
+ qid = 0;
+
+ queue = ring->queues[qid];
+ WARN_ONCE(!queue, "Missing queue for qid %d\n", qid);
+
+ return queue;
+}
+
+static void fuse_uring_dispatch_ent(struct fuse_ring_ent *ent)
+{
+ struct io_uring_cmd *cmd = ent->cmd;
+
+ uring_cmd_set_ring_ent(cmd, ent);
+ io_uring_cmd_complete_in_task(cmd, fuse_uring_send_in_task);
+}
+
+/* queue a fuse request and send it if a ring entry is available */
+void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ struct fuse_conn *fc = req->fm->fc;
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ struct fuse_ring_ent *ent = NULL;
+ int err;
+
+ err = -EINVAL;
+ queue = fuse_uring_task_to_queue(ring);
+ if (!queue)
+ goto err;
+
+ fuse_request_assign_unique(fiq, req);
+
+ spin_lock(&queue->lock);
+ err = -ENOTCONN;
+ if (unlikely(queue->stopped))
+ goto err_unlock;
+
+ set_bit(FR_URING, &req->flags);
+ req->ring_queue = queue;
+ ent = list_first_entry_or_null(&queue->ent_avail_queue,
+ struct fuse_ring_ent, list);
+ if (ent)
+ fuse_uring_add_req_to_ring_ent(ent, req);
+ else
+ list_add_tail(&req->list, &queue->fuse_req_queue);
+ spin_unlock(&queue->lock);
+
+ if (ent)
+ fuse_uring_dispatch_ent(ent);
+
+ return;
+
+err_unlock:
+ spin_unlock(&queue->lock);
+err:
+ req->out.h.error = err;
+ clear_bit(FR_PENDING, &req->flags);
+ fuse_request_end(req);
+}
+
+bool fuse_uring_queue_bq_req(struct fuse_req *req)
+{
+ struct fuse_conn *fc = req->fm->fc;
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ struct fuse_ring_ent *ent = NULL;
+
+ queue = fuse_uring_task_to_queue(ring);
+ if (!queue)
+ return false;
+
+ spin_lock(&queue->lock);
+ if (unlikely(queue->stopped)) {
+ spin_unlock(&queue->lock);
+ return false;
+ }
+
+ set_bit(FR_URING, &req->flags);
+ req->ring_queue = queue;
+ list_add_tail(&req->list, &queue->fuse_req_bg_queue);
+
+ ent = list_first_entry_or_null(&queue->ent_avail_queue,
+ struct fuse_ring_ent, list);
+ spin_lock(&fc->bg_lock);
+ fc->num_background++;
+ if (fc->num_background == fc->max_background)
+ fc->blocked = 1;
+ fuse_uring_flush_bg(queue);
+ spin_unlock(&fc->bg_lock);
+
+ /*
+ * Due to bg_queue flush limits there might be other bg requests
+ * in the queue that need to be handled first. Or no further req
+ * might be available.
+ */
+ req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req,
+ list);
+ if (ent && req) {
+ fuse_uring_add_req_to_ring_ent(ent, req);
+ spin_unlock(&queue->lock);
+
+ fuse_uring_dispatch_ent(ent);
+ } else {
+ spin_unlock(&queue->lock);
+ }
+
+ return true;
+}
+
+bool fuse_uring_remove_pending_req(struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = req->ring_queue;
+
+ return fuse_remove_pending_req(req, &queue->lock);
+}
+
+static const struct fuse_iqueue_ops fuse_io_uring_ops = {
+ /* should be send over io-uring as enhancement */
+ .send_forget = fuse_dev_queue_forget,
+
+ /*
+ * could be send over io-uring, but interrupts should be rare,
+ * no need to make the code complex
+ */
+ .send_interrupt = fuse_dev_queue_interrupt,
+ .send_req = fuse_uring_queue_fuse_req,
+};
diff --git a/fs/fuse/dev_uring_i.h b/fs/fuse/dev_uring_i.h
new file mode 100644
index 000000000000..51a563922ce1
--- /dev/null
+++ b/fs/fuse/dev_uring_i.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * FUSE: Filesystem in Userspace
+ * Copyright (c) 2023-2024 DataDirect Networks.
+ */
+
+#ifndef _FS_FUSE_DEV_URING_I_H
+#define _FS_FUSE_DEV_URING_I_H
+
+#include "fuse_i.h"
+
+#ifdef CONFIG_FUSE_IO_URING
+
+#define FUSE_URING_TEARDOWN_TIMEOUT (5 * HZ)
+#define FUSE_URING_TEARDOWN_INTERVAL (HZ/20)
+
+enum fuse_ring_req_state {
+ FRRS_INVALID = 0,
+
+ /* The ring entry received from userspace and it is being processed */
+ FRRS_COMMIT,
+
+ /* The ring entry is waiting for new fuse requests */
+ FRRS_AVAILABLE,
+
+ /* The ring entry got assigned a fuse req */
+ FRRS_FUSE_REQ,
+
+ /* The ring entry is in or on the way to user space */
+ FRRS_USERSPACE,
+
+ /* The ring entry is in teardown */
+ FRRS_TEARDOWN,
+
+ /* The ring entry is released, but not freed yet */
+ FRRS_RELEASED,
+};
+
+/** A fuse ring entry, part of the ring queue */
+struct fuse_ring_ent {
+ /* userspace buffer */
+ struct fuse_uring_req_header __user *headers;
+ void __user *payload;
+
+ /* the ring queue that owns the request */
+ struct fuse_ring_queue *queue;
+
+ /* fields below are protected by queue->lock */
+
+ struct io_uring_cmd *cmd;
+
+ struct list_head list;
+
+ enum fuse_ring_req_state state;
+
+ struct fuse_req *fuse_req;
+};
+
+struct fuse_ring_queue {
+ /*
+ * back pointer to the main fuse uring structure that holds this
+ * queue
+ */
+ struct fuse_ring *ring;
+
+ /* queue id, corresponds to the cpu core */
+ unsigned int qid;
+
+ /*
+ * queue lock, taken when any value in the queue changes _and_ also
+ * a ring entry state changes.
+ */
+ spinlock_t lock;
+
+ /* available ring entries (struct fuse_ring_ent) */
+ struct list_head ent_avail_queue;
+
+ /*
+ * entries in the process of being committed or in the process
+ * to be sent to userspace
+ */
+ struct list_head ent_w_req_queue;
+ struct list_head ent_commit_queue;
+
+ /* entries in userspace */
+ struct list_head ent_in_userspace;
+
+ /* entries that are released */
+ struct list_head ent_released;
+
+ /* fuse requests waiting for an entry slot */
+ struct list_head fuse_req_queue;
+
+ /* background fuse requests */
+ struct list_head fuse_req_bg_queue;
+
+ struct fuse_pqueue fpq;
+
+ unsigned int active_background;
+
+ bool stopped;
+};
+
+/**
+ * Describes if uring is for communication and holds alls the data needed
+ * for uring communication
+ */
+struct fuse_ring {
+ /* back pointer */
+ struct fuse_conn *fc;
+
+ /* number of ring queues */
+ size_t nr_queues;
+
+ /* maximum payload/arg size */
+ size_t max_payload_sz;
+
+ struct fuse_ring_queue **queues;
+
+ /*
+ * Log ring entry states on stop when entries cannot be released
+ */
+ unsigned int stop_debug_log : 1;
+
+ wait_queue_head_t stop_waitq;
+
+ /* async tear down */
+ struct delayed_work async_teardown_work;
+
+ /* log */
+ unsigned long teardown_time;
+
+ atomic_t queue_refs;
+
+ bool ready;
+};
+
+bool fuse_uring_enabled(void);
+void fuse_uring_destruct(struct fuse_conn *fc);
+void fuse_uring_stop_queues(struct fuse_ring *ring);
+void fuse_uring_abort_end_requests(struct fuse_ring *ring);
+int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
+void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
+bool fuse_uring_queue_bq_req(struct fuse_req *req);
+bool fuse_uring_remove_pending_req(struct fuse_req *req);
+bool fuse_uring_request_expired(struct fuse_conn *fc);
+
+static inline void fuse_uring_abort(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+
+ if (ring == NULL)
+ return;
+
+ if (atomic_read(&ring->queue_refs) > 0) {
+ fuse_uring_abort_end_requests(ring);
+ fuse_uring_stop_queues(ring);
+ }
+}
+
+static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+
+ if (ring)
+ wait_event(ring->stop_waitq,
+ atomic_read(&ring->queue_refs) == 0);
+}
+
+static inline bool fuse_uring_ready(struct fuse_conn *fc)
+{
+ return fc->ring && fc->ring->ready;
+}
+
+#else /* CONFIG_FUSE_IO_URING */
+
+static inline void fuse_uring_destruct(struct fuse_conn *fc)
+{
+}
+
+static inline bool fuse_uring_enabled(void)
+{
+ return false;
+}
+
+static inline void fuse_uring_abort(struct fuse_conn *fc)
+{
+}
+
+static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
+{
+}
+
+static inline bool fuse_uring_ready(struct fuse_conn *fc)
+{
+ return false;
+}
+
+static inline bool fuse_uring_remove_pending_req(struct fuse_req *req)
+{
+ return false;
+}
+
+static inline bool fuse_uring_request_expired(struct fuse_conn *fc)
+{
+ return false;
+}
+
+#endif /* CONFIG_FUSE_IO_URING */
+
+#endif /* _FS_FUSE_DEV_URING_I_H */
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 0654bfedcbb0..4b6b3d2758ff 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -11,12 +11,82 @@
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/fs_context.h>
+#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/iversion.h>
#include <linux/posix_acl.h>
+#include <linux/security.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+static bool __read_mostly allow_sys_admin_access;
+module_param(allow_sys_admin_access, bool, 0644);
+MODULE_PARM_DESC(allow_sys_admin_access,
+ "Allow users with CAP_SYS_ADMIN in initial userns to bypass allow_other access check");
+
+struct dentry_bucket {
+ struct rb_root tree;
+ spinlock_t lock;
+};
+
+#define HASH_BITS 5
+#define HASH_SIZE (1 << HASH_BITS)
+static struct dentry_bucket dentry_hash[HASH_SIZE];
+struct delayed_work dentry_tree_work;
+
+/* Minimum invalidation work queue frequency */
+#define FUSE_DENTRY_INVAL_FREQ_MIN 5
+
+unsigned __read_mostly inval_wq;
+static int inval_wq_set(const char *val, const struct kernel_param *kp)
+{
+ unsigned int num;
+ unsigned int old = inval_wq;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtouint(val, 0, &num);
+ if (ret)
+ return ret;
+
+ if ((num < FUSE_DENTRY_INVAL_FREQ_MIN) && (num != 0))
+ return -EINVAL;
+
+ /* This should prevent overflow in secs_to_jiffies() */
+ if (num > USHRT_MAX)
+ return -EINVAL;
+
+ *((unsigned int *)kp->arg) = num;
+
+ if (num && !old)
+ schedule_delayed_work(&dentry_tree_work,
+ secs_to_jiffies(num));
+ else if (!num && old)
+ cancel_delayed_work_sync(&dentry_tree_work);
+
+ return 0;
+}
+static const struct kernel_param_ops inval_wq_ops = {
+ .set = inval_wq_set,
+ .get = param_get_uint,
+};
+module_param_cb(inval_wq, &inval_wq_ops, &inval_wq, 0644);
+__MODULE_PARM_TYPE(inval_wq, "uint");
+MODULE_PARM_DESC(inval_wq,
+ "Dentries invalidation work queue period in secs (>= "
+ __stringify(FUSE_DENTRY_INVAL_FREQ_MIN) ").");
+
+static inline struct dentry_bucket *get_dentry_bucket(struct dentry *dentry)
+{
+ int i = hash_ptr(dentry, HASH_BITS);
+
+ return &dentry_hash[i];
+}
static void fuse_advise_use_readdirplus(struct inode *dir)
{
@@ -25,33 +95,151 @@ static void fuse_advise_use_readdirplus(struct inode *dir)
set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
}
-#if BITS_PER_LONG >= 64
-static inline void __fuse_dentry_settime(struct dentry *entry, u64 time)
+struct fuse_dentry {
+ u64 time;
+ union {
+ struct rcu_head rcu;
+ struct rb_node node;
+ };
+ struct dentry *dentry;
+};
+
+static void __fuse_dentry_tree_del_node(struct fuse_dentry *fd,
+ struct dentry_bucket *bucket)
{
- entry->d_fsdata = (void *) time;
+ if (!RB_EMPTY_NODE(&fd->node)) {
+ rb_erase(&fd->node, &bucket->tree);
+ RB_CLEAR_NODE(&fd->node);
+ }
}
-static inline u64 fuse_dentry_time(const struct dentry *entry)
+static void fuse_dentry_tree_del_node(struct dentry *dentry)
{
- return (u64)entry->d_fsdata;
+ struct fuse_dentry *fd = dentry->d_fsdata;
+ struct dentry_bucket *bucket = get_dentry_bucket(dentry);
+
+ spin_lock(&bucket->lock);
+ __fuse_dentry_tree_del_node(fd, bucket);
+ spin_unlock(&bucket->lock);
}
-#else
-union fuse_dentry {
- u64 time;
- struct rcu_head rcu;
-};
+static void fuse_dentry_tree_add_node(struct dentry *dentry)
+{
+ struct fuse_dentry *fd = dentry->d_fsdata;
+ struct dentry_bucket *bucket;
+ struct fuse_dentry *cur;
+ struct rb_node **p, *parent = NULL;
+
+ if (!inval_wq)
+ return;
+
+ bucket = get_dentry_bucket(dentry);
+
+ spin_lock(&bucket->lock);
+
+ __fuse_dentry_tree_del_node(fd, bucket);
+
+ p = &bucket->tree.rb_node;
+ while (*p) {
+ parent = *p;
+ cur = rb_entry(*p, struct fuse_dentry, node);
+ if (fd->time < cur->time)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&fd->node, parent, p);
+ rb_insert_color(&fd->node, &bucket->tree);
+ spin_unlock(&bucket->lock);
+}
+
+/*
+ * work queue which, when enabled, will periodically check for expired dentries
+ * in the dentries tree.
+ */
+static void fuse_dentry_tree_work(struct work_struct *work)
+{
+ LIST_HEAD(dispose);
+ struct fuse_dentry *fd;
+ struct rb_node *node;
+ int i;
+
+ for (i = 0; i < HASH_SIZE; i++) {
+ spin_lock(&dentry_hash[i].lock);
+ node = rb_first(&dentry_hash[i].tree);
+ while (node) {
+ fd = rb_entry(node, struct fuse_dentry, node);
+ if (time_after64(get_jiffies_64(), fd->time)) {
+ rb_erase(&fd->node, &dentry_hash[i].tree);
+ RB_CLEAR_NODE(&fd->node);
+ spin_unlock(&dentry_hash[i].lock);
+ d_dispose_if_unused(fd->dentry, &dispose);
+ cond_resched();
+ spin_lock(&dentry_hash[i].lock);
+ } else
+ break;
+ node = rb_first(&dentry_hash[i].tree);
+ }
+ spin_unlock(&dentry_hash[i].lock);
+ shrink_dentry_list(&dispose);
+ }
+
+ if (inval_wq)
+ schedule_delayed_work(&dentry_tree_work,
+ secs_to_jiffies(inval_wq));
+}
+
+void fuse_epoch_work(struct work_struct *work)
+{
+ struct fuse_conn *fc = container_of(work, struct fuse_conn,
+ epoch_work);
+ struct fuse_mount *fm;
+ struct inode *inode;
+
+ down_read(&fc->killsb);
+
+ inode = fuse_ilookup(fc, FUSE_ROOT_ID, &fm);
+ if (inode) {
+ iput(inode);
+ /* Remove all possible active references to cached inodes */
+ shrink_dcache_sb(fm->sb);
+ } else
+ pr_warn("Failed to get root inode");
+
+ up_read(&fc->killsb);
+}
+
+void fuse_dentry_tree_init(void)
+{
+ int i;
+
+ for (i = 0; i < HASH_SIZE; i++) {
+ spin_lock_init(&dentry_hash[i].lock);
+ dentry_hash[i].tree = RB_ROOT;
+ }
+ INIT_DELAYED_WORK(&dentry_tree_work, fuse_dentry_tree_work);
+}
+
+void fuse_dentry_tree_cleanup(void)
+{
+ int i;
+
+ inval_wq = 0;
+ cancel_delayed_work_sync(&dentry_tree_work);
+
+ for (i = 0; i < HASH_SIZE; i++)
+ WARN_ON_ONCE(!RB_EMPTY_ROOT(&dentry_hash[i].tree));
+}
static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time)
{
- ((union fuse_dentry *) dentry->d_fsdata)->time = time;
+ ((struct fuse_dentry *) dentry->d_fsdata)->time = time;
}
static inline u64 fuse_dentry_time(const struct dentry *entry)
{
- return ((union fuse_dentry *) entry->d_fsdata)->time;
+ return ((struct fuse_dentry *) entry->d_fsdata)->time;
}
-#endif
static void fuse_dentry_settime(struct dentry *dentry, u64 time)
{
@@ -72,6 +260,7 @@ static void fuse_dentry_settime(struct dentry *dentry, u64 time)
}
__fuse_dentry_settime(dentry, time);
+ fuse_dentry_tree_add_node(dentry);
}
/*
@@ -83,7 +272,7 @@ static void fuse_dentry_settime(struct dentry *dentry, u64 time)
/*
* Calculate the time in jiffies until a dentry/attributes are valid
*/
-static u64 time_to_jiffies(u64 sec, u32 nsec)
+u64 fuse_time_to_jiffies(u64 sec, u32 nsec)
{
if (sec || nsec) {
struct timespec64 ts = {
@@ -103,17 +292,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o)
{
fuse_dentry_settime(entry,
- time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
-}
-
-static u64 attr_timeout(struct fuse_attr_out *o)
-{
- return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
-}
-
-u64 entry_attr_timeout(struct fuse_entry_out *o)
-{
- return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
+ fuse_time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
}
void fuse_invalidate_attr_mask(struct inode *inode, u32 mask)
@@ -136,7 +315,7 @@ static void fuse_dir_changed(struct inode *dir)
inode_maybe_inc_iversion(dir, false);
}
-/**
+/*
* Mark the attributes as stale due to an atime change. Avoid the invalidate if
* atime is not used.
*/
@@ -176,9 +355,12 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
memset(outarg, 0, sizeof(struct fuse_entry_out));
args->opcode = FUSE_LOOKUP;
args->nodeid = nodeid;
- args->in_numargs = 1;
- args->in_args[0].size = name->len + 1;
- args->in_args[0].value = name->name;
+ args->in_numargs = 3;
+ fuse_set_zero_arg0(args);
+ args->in_args[1].size = name->len;
+ args->in_args[1].value = name->name;
+ args->in_args[2].size = 1;
+ args->in_args[2].value = "";
args->out_numargs = 1;
args->out_args[0].size = sizeof(struct fuse_entry_out);
args->out_args[0].value = outarg;
@@ -193,19 +375,24 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
* the lookup once more. If the lookup results in the same inode,
* then refresh the attributes, timeouts and mark the dentry valid.
*/
-static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
+static int fuse_dentry_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *entry, unsigned int flags)
{
struct inode *inode;
- struct dentry *parent;
struct fuse_mount *fm;
+ struct fuse_conn *fc;
struct fuse_inode *fi;
int ret;
+ fc = get_fuse_conn_super(dir->i_sb);
+ if (entry->d_time < atomic_read(&fc->epoch))
+ goto invalid;
+
inode = d_inode_rcu(entry);
if (inode && fuse_is_bad(inode))
goto invalid;
else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
- (flags & (LOOKUP_EXCL | LOOKUP_REVAL))) {
+ (flags & (LOOKUP_EXCL | LOOKUP_REVAL | LOOKUP_RENAME_TARGET))) {
struct fuse_entry_out outarg;
FUSE_ARGS(args);
struct fuse_forget_link *forget;
@@ -228,11 +415,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
attr_version = fuse_get_attr_version(fm->fc);
- parent = dget_parent(entry);
- fuse_lookup_init(fm->fc, &args, get_node_id(d_inode(parent)),
- &entry->d_name, &outarg);
+ fuse_lookup_init(fm->fc, &args, get_node_id(dir),
+ name, &outarg);
ret = fuse_simple_request(fm, &args);
- dput(parent);
/* Zero nodeid is same as -ENOENT */
if (!ret && !outarg.nodeid)
ret = -ENOENT;
@@ -249,15 +434,15 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
spin_unlock(&fi->lock);
}
kfree(forget);
- if (ret == -ENOMEM)
+ if (ret == -ENOMEM || ret == -EINTR)
goto out;
if (ret || fuse_invalid_attr(&outarg.attr) ||
fuse_stale_inode(inode, outarg.generation, &outarg.attr))
goto invalid;
forget_all_cached_acls(inode);
- fuse_change_attributes(inode, &outarg.attr,
- entry_attr_timeout(&outarg),
+ fuse_change_attributes(inode, &outarg.attr, NULL,
+ ATTR_TIMEOUT(&outarg),
attr_version);
fuse_change_entry_timeout(entry, &outarg);
} else if (inode) {
@@ -266,9 +451,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
return -ECHILD;
} else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
- parent = dget_parent(entry);
- fuse_advise_use_readdirplus(d_inode(parent));
- dput(parent);
+ fuse_advise_use_readdirplus(dir);
}
}
ret = 1;
@@ -280,21 +463,36 @@ invalid:
goto out;
}
-#if BITS_PER_LONG < 64
static int fuse_dentry_init(struct dentry *dentry)
{
- dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry),
- GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
+ struct fuse_dentry *fd;
- return dentry->d_fsdata ? 0 : -ENOMEM;
+ fd = kzalloc(sizeof(struct fuse_dentry),
+ GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
+ if (!fd)
+ return -ENOMEM;
+
+ fd->dentry = dentry;
+ RB_CLEAR_NODE(&fd->node);
+ dentry->d_fsdata = fd;
+
+ return 0;
+}
+
+static void fuse_dentry_prune(struct dentry *dentry)
+{
+ struct fuse_dentry *fd = dentry->d_fsdata;
+
+ if (!RB_EMPTY_NODE(&fd->node))
+ fuse_dentry_tree_del_node(dentry);
}
+
static void fuse_dentry_release(struct dentry *dentry)
{
- union fuse_dentry *fd = dentry->d_fsdata;
+ struct fuse_dentry *fd = dentry->d_fsdata;
kfree_rcu(fd, rcu);
}
-#endif
static int fuse_dentry_delete(const struct dentry *dentry)
{
@@ -321,9 +519,6 @@ static struct vfsmount *fuse_dentry_automount(struct path *path)
/* Create the submount */
mnt = fc_mount(fsc);
- if (!IS_ERR(mnt))
- mntget(mnt);
-
put_fs_context(fsc);
return mnt;
}
@@ -331,30 +526,26 @@ static struct vfsmount *fuse_dentry_automount(struct path *path)
const struct dentry_operations fuse_dentry_operations = {
.d_revalidate = fuse_dentry_revalidate,
.d_delete = fuse_dentry_delete,
-#if BITS_PER_LONG < 64
.d_init = fuse_dentry_init,
+ .d_prune = fuse_dentry_prune,
.d_release = fuse_dentry_release,
-#endif
.d_automount = fuse_dentry_automount,
};
-const struct dentry_operations fuse_root_dentry_operations = {
-#if BITS_PER_LONG < 64
- .d_init = fuse_dentry_init,
- .d_release = fuse_dentry_release,
-#endif
-};
-
int fuse_valid_type(int m)
{
return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
}
+static bool fuse_valid_size(u64 size)
+{
+ return size <= LLONG_MAX;
+}
+
bool fuse_invalid_attr(struct fuse_attr *attr)
{
- return !fuse_valid_type(attr->mode) ||
- attr->size > LLONG_MAX;
+ return !fuse_valid_type(attr->mode) || !fuse_valid_size(attr->size);
}
int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
@@ -363,12 +554,12 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
struct fuse_mount *fm = get_fuse_mount_super(sb);
FUSE_ARGS(args);
struct fuse_forget_link *forget;
- u64 attr_version;
+ u64 attr_version, evict_ctr;
int err;
*inode = NULL;
err = -ENAMETOOLONG;
- if (name->len > FUSE_NAME_MAX)
+ if (name->len > fm->fc->name_max)
goto out;
@@ -378,6 +569,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
goto out;
attr_version = fuse_get_attr_version(fm->fc);
+ evict_ctr = fuse_get_evict_ctr(fm->fc);
fuse_lookup_init(fm->fc, &args, nodeid, name, outarg);
err = fuse_simple_request(fm, &args);
@@ -386,14 +578,16 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
goto out_put_forget;
err = -EIO;
- if (!outarg->nodeid)
- goto out_put_forget;
if (fuse_invalid_attr(&outarg->attr))
goto out_put_forget;
+ if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) {
+ pr_warn_once("root generation should be zero\n");
+ outarg->generation = 0;
+ }
*inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
- &outarg->attr, entry_attr_timeout(outarg),
- attr_version);
+ &outarg->attr, ATTR_TIMEOUT(outarg),
+ attr_version, evict_ctr);
err = -ENOMEM;
if (!*inode) {
fuse_queue_forget(fm->fc, forget, outarg->nodeid, 1);
@@ -410,16 +604,20 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
unsigned int flags)
{
- int err;
struct fuse_entry_out outarg;
+ struct fuse_conn *fc;
struct inode *inode;
struct dentry *newent;
+ int err, epoch;
bool outarg_valid = true;
bool locked;
if (fuse_is_bad(dir))
return ERR_PTR(-EIO);
+ fc = get_fuse_conn_super(dir->i_sb);
+ epoch = atomic_read(&fc->epoch);
+
locked = fuse_lock_inode(dir);
err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
&outarg, &inode);
@@ -441,6 +639,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
goto out_err;
entry = newent ? newent : entry;
+ entry->d_time = epoch;
if (outarg_valid)
fuse_change_entry_timeout(entry, &outarg);
else
@@ -456,37 +655,187 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
return ERR_PTR(err);
}
+static int get_security_context(struct dentry *entry, umode_t mode,
+ struct fuse_in_arg *ext)
+{
+ struct fuse_secctx *fctx;
+ struct fuse_secctx_header *header;
+ struct lsm_context lsmctx = { };
+ void *ptr;
+ u32 total_len = sizeof(*header);
+ int err, nr_ctx = 0;
+ const char *name = NULL;
+ size_t namesize;
+
+ err = security_dentry_init_security(entry, mode, &entry->d_name,
+ &name, &lsmctx);
+
+ /* If no LSM is supporting this security hook ignore error */
+ if (err && err != -EOPNOTSUPP)
+ goto out_err;
+
+ if (lsmctx.len) {
+ nr_ctx = 1;
+ namesize = strlen(name) + 1;
+ err = -EIO;
+ if (WARN_ON(namesize > XATTR_NAME_MAX + 1 ||
+ lsmctx.len > S32_MAX))
+ goto out_err;
+ total_len += FUSE_REC_ALIGN(sizeof(*fctx) + namesize +
+ lsmctx.len);
+ }
+
+ err = -ENOMEM;
+ header = ptr = kzalloc(total_len, GFP_KERNEL);
+ if (!ptr)
+ goto out_err;
+
+ header->nr_secctx = nr_ctx;
+ header->size = total_len;
+ ptr += sizeof(*header);
+ if (nr_ctx) {
+ fctx = ptr;
+ fctx->size = lsmctx.len;
+ ptr += sizeof(*fctx);
+
+ strscpy(ptr, name, namesize);
+ ptr += namesize;
+
+ memcpy(ptr, lsmctx.context, lsmctx.len);
+ }
+ ext->size = total_len;
+ ext->value = header;
+ err = 0;
+out_err:
+ if (nr_ctx)
+ security_release_secctx(&lsmctx);
+ return err;
+}
+
+static void *extend_arg(struct fuse_in_arg *buf, u32 bytes)
+{
+ void *p;
+ u32 newlen = buf->size + bytes;
+
+ p = krealloc(buf->value, newlen, GFP_KERNEL);
+ if (!p) {
+ kfree(buf->value);
+ buf->size = 0;
+ buf->value = NULL;
+ return NULL;
+ }
+
+ memset(p + buf->size, 0, bytes);
+ buf->value = p;
+ buf->size = newlen;
+
+ return p + newlen - bytes;
+}
+
+static u32 fuse_ext_size(size_t size)
+{
+ return FUSE_REC_ALIGN(sizeof(struct fuse_ext_header) + size);
+}
+
+/*
+ * This adds just a single supplementary group that matches the parent's group.
+ */
+static int get_create_supp_group(struct mnt_idmap *idmap,
+ struct inode *dir,
+ struct fuse_in_arg *ext)
+{
+ struct fuse_conn *fc = get_fuse_conn(dir);
+ struct fuse_ext_header *xh;
+ struct fuse_supp_groups *sg;
+ kgid_t kgid = dir->i_gid;
+ vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns, kgid);
+ gid_t parent_gid = from_kgid(fc->user_ns, kgid);
+
+ u32 sg_len = fuse_ext_size(sizeof(*sg) + sizeof(sg->groups[0]));
+
+ if (parent_gid == (gid_t) -1 || vfsgid_eq_kgid(vfsgid, current_fsgid()) ||
+ !vfsgid_in_group_p(vfsgid))
+ return 0;
+
+ xh = extend_arg(ext, sg_len);
+ if (!xh)
+ return -ENOMEM;
+
+ xh->size = sg_len;
+ xh->type = FUSE_EXT_GROUPS;
+
+ sg = (struct fuse_supp_groups *) &xh[1];
+ sg->nr_groups = 1;
+ sg->groups[0] = parent_gid;
+
+ return 0;
+}
+
+static int get_create_ext(struct mnt_idmap *idmap,
+ struct fuse_args *args,
+ struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
+ struct fuse_in_arg ext = { .size = 0, .value = NULL };
+ int err = 0;
+
+ if (fc->init_security)
+ err = get_security_context(dentry, mode, &ext);
+ if (!err && fc->create_supp_group)
+ err = get_create_supp_group(idmap, dir, &ext);
+
+ if (!err && ext.size) {
+ WARN_ON(args->in_numargs >= ARRAY_SIZE(args->in_args));
+ args->is_ext = true;
+ args->ext_idx = args->in_numargs++;
+ args->in_args[args->ext_idx] = ext;
+ } else {
+ kfree(ext.value);
+ }
+
+ return err;
+}
+
+static void free_ext_value(struct fuse_args *args)
+{
+ if (args->is_ext)
+ kfree(args->in_args[args->ext_idx].value);
+}
+
/*
* Atomic create+open operation
*
* If the filesystem doesn't support this, then fall back to separate
* 'mknod' + 'open' requests.
*/
-static int fuse_create_open(struct inode *dir, struct dentry *entry,
- struct file *file, unsigned int flags,
- umode_t mode)
+static int fuse_create_open(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *entry, struct file *file,
+ unsigned int flags, umode_t mode, u32 opcode)
{
- int err;
struct inode *inode;
struct fuse_mount *fm = get_fuse_mount(dir);
FUSE_ARGS(args);
struct fuse_forget_link *forget;
struct fuse_create_in inarg;
- struct fuse_open_out outopen;
+ struct fuse_open_out *outopenp;
struct fuse_entry_out outentry;
struct fuse_inode *fi;
struct fuse_file *ff;
+ int epoch, err;
+ bool trunc = flags & O_TRUNC;
/* Userspace expects S_IFREG in create mode */
BUG_ON((mode & S_IFMT) != S_IFREG);
+ epoch = atomic_read(&fm->fc->epoch);
forget = fuse_alloc_forget();
err = -ENOMEM;
if (!forget)
goto out_err;
err = -ENOMEM;
- ff = fuse_file_alloc(fm);
+ ff = fuse_file_alloc(fm, true);
if (!ff)
goto out_put_forget_req;
@@ -500,12 +849,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
inarg.mode = mode;
inarg.umask = current_umask();
- if (fm->fc->handle_killpriv_v2 && (flags & O_TRUNC) &&
+ if (fm->fc->handle_killpriv_v2 && trunc &&
!(flags & O_EXCL) && !capable(CAP_FSETID)) {
inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
}
- args.opcode = FUSE_CREATE;
+ args.opcode = opcode;
args.nodeid = get_node_id(dir);
args.in_numargs = 2;
args.in_args[0].size = sizeof(inarg);
@@ -515,9 +864,17 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
args.out_numargs = 2;
args.out_args[0].size = sizeof(outentry);
args.out_args[0].value = &outentry;
- args.out_args[1].size = sizeof(outopen);
- args.out_args[1].value = &outopen;
- err = fuse_simple_request(fm, &args);
+ /* Store outarg for fuse_finish_open() */
+ outopenp = &ff->args->open_outarg;
+ args.out_args[1].size = sizeof(*outopenp);
+ args.out_args[1].value = outopenp;
+
+ err = get_create_ext(idmap, &args, dir, entry, mode);
+ if (err)
+ goto out_free_ff;
+
+ err = fuse_simple_idmap_request(idmap, fm, &args);
+ free_ext_value(&args);
if (err)
goto out_free_ff;
@@ -526,11 +883,11 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
fuse_invalid_attr(&outentry.attr))
goto out_free_ff;
- ff->fh = outopen.fh;
+ ff->fh = outopenp->fh;
ff->nodeid = outentry.nodeid;
- ff->open_flags = outopen.open_flags;
+ ff->open_flags = outopenp->open_flags;
inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
- &outentry.attr, entry_attr_timeout(&outentry), 0);
+ &outentry.attr, ATTR_TIMEOUT(&outentry), 0, 0);
if (!inode) {
flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
fuse_sync_release(NULL, ff, flags);
@@ -540,15 +897,22 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
}
kfree(forget);
d_instantiate(entry, inode);
+ entry->d_time = epoch;
fuse_change_entry_timeout(entry, &outentry);
fuse_dir_changed(dir);
- err = finish_open(file, entry, generic_file_open);
+ err = generic_file_open(inode, file);
+ if (!err) {
+ file->private_data = ff;
+ err = finish_open(file, entry, fuse_finish_open);
+ }
if (err) {
fi = get_fuse_inode(inode);
fuse_sync_release(fi, ff, flags);
} else {
- file->private_data = ff;
- fuse_finish_open(inode, file);
+ if (fm->fc->atomic_o_trunc && trunc)
+ truncate_pagecache(inode, 0);
+ else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
+ invalidate_inode_pages2(inode->i_mapping);
}
return err;
@@ -560,30 +924,27 @@ out_err:
return err;
}
-static int fuse_mknod(struct user_namespace *, struct inode *, struct dentry *,
+static int fuse_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
umode_t, dev_t);
static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
struct file *file, unsigned flags,
umode_t mode)
{
int err;
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
struct fuse_conn *fc = get_fuse_conn(dir);
- struct dentry *res = NULL;
if (fuse_is_bad(dir))
return -EIO;
if (d_in_lookup(entry)) {
- res = fuse_lookup(dir, entry, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
-
- if (res)
- entry = res;
+ struct dentry *res = fuse_lookup(dir, entry, 0);
+ if (res || d_really_is_positive(entry))
+ return finish_no_open(file, res);
}
- if (!(flags & O_CREAT) || d_really_is_positive(entry))
- goto no_open;
+ if (!(flags & O_CREAT))
+ return finish_no_open(file, NULL);
/* Only creates */
file->f_mode |= FMODE_CREATED;
@@ -591,49 +952,57 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
if (fc->no_create)
goto mknod;
- err = fuse_create_open(dir, entry, file, flags, mode);
+ err = fuse_create_open(idmap, dir, entry, file, flags, mode, FUSE_CREATE);
if (err == -ENOSYS) {
fc->no_create = 1;
goto mknod;
- }
-out_dput:
- dput(res);
+ } else if (err == -EEXIST)
+ fuse_invalidate_entry(entry);
return err;
mknod:
- err = fuse_mknod(&init_user_ns, dir, entry, mode, 0);
+ err = fuse_mknod(idmap, dir, entry, mode, 0);
if (err)
- goto out_dput;
-no_open:
- return finish_no_open(file, res);
+ return err;
+ return finish_no_open(file, NULL);
}
/*
* Code shared between mknod, mkdir, symlink and link
*/
-static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
- struct inode *dir, struct dentry *entry,
- umode_t mode)
+static struct dentry *create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
+ struct fuse_args *args, struct inode *dir,
+ struct dentry *entry, umode_t mode)
{
struct fuse_entry_out outarg;
struct inode *inode;
struct dentry *d;
- int err;
struct fuse_forget_link *forget;
+ int epoch, err;
if (fuse_is_bad(dir))
- return -EIO;
+ return ERR_PTR(-EIO);
+
+ epoch = atomic_read(&fm->fc->epoch);
forget = fuse_alloc_forget();
if (!forget)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
memset(&outarg, 0, sizeof(outarg));
args->nodeid = get_node_id(dir);
args->out_numargs = 1;
args->out_args[0].size = sizeof(outarg);
args->out_args[0].value = &outarg;
- err = fuse_simple_request(fm, args);
+
+ if (args->opcode != FUSE_LINK) {
+ err = get_create_ext(idmap, args, dir, entry, mode);
+ if (err)
+ goto out_put_forget_req;
+ }
+
+ err = fuse_simple_idmap_request(idmap, fm, args);
+ free_ext_value(args);
if (err)
goto out_put_forget_req;
@@ -645,33 +1014,52 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
goto out_put_forget_req;
inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
- &outarg.attr, entry_attr_timeout(&outarg), 0);
+ &outarg.attr, ATTR_TIMEOUT(&outarg), 0, 0);
if (!inode) {
fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
kfree(forget);
d_drop(entry);
d = d_splice_alias(inode, entry);
if (IS_ERR(d))
- return PTR_ERR(d);
+ return d;
if (d) {
+ d->d_time = epoch;
fuse_change_entry_timeout(d, &outarg);
- dput(d);
} else {
+ entry->d_time = epoch;
fuse_change_entry_timeout(entry, &outarg);
}
fuse_dir_changed(dir);
- return 0;
+ return d;
out_put_forget_req:
+ if (err == -EEXIST)
+ fuse_invalidate_entry(entry);
kfree(forget);
- return err;
+ return ERR_PTR(err);
+}
+
+static int create_new_nondir(struct mnt_idmap *idmap, struct fuse_mount *fm,
+ struct fuse_args *args, struct inode *dir,
+ struct dentry *entry, umode_t mode)
+{
+ /*
+ * Note that when creating anything other than a directory we
+ * can be sure create_new_entry() will NOT return an alternate
+ * dentry as d_splice_alias() only returns an alternate dentry
+ * for directories. So we don't need to check for that case
+ * when passing back the result.
+ */
+ WARN_ON_ONCE(S_ISDIR(mode));
+
+ return PTR_ERR(create_new_entry(idmap, fm, args, dir, entry, mode));
}
-static int fuse_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *entry, umode_t mode, dev_t rdev)
{
struct fuse_mknod_in inarg;
@@ -691,17 +1079,35 @@ static int fuse_mknod(struct user_namespace *mnt_userns, struct inode *dir,
args.in_args[0].value = &inarg;
args.in_args[1].size = entry->d_name.len + 1;
args.in_args[1].value = entry->d_name.name;
- return create_new_entry(fm, &args, dir, entry, mode);
+ return create_new_nondir(idmap, fm, &args, dir, entry, mode);
}
-static int fuse_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int fuse_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *entry, umode_t mode, bool excl)
{
- return fuse_mknod(&init_user_ns, dir, entry, mode, 0);
+ return fuse_mknod(idmap, dir, entry, mode, 0);
+}
+
+static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ struct file *file, umode_t mode)
+{
+ struct fuse_conn *fc = get_fuse_conn(dir);
+ int err;
+
+ if (fc->no_tmpfile)
+ return -EOPNOTSUPP;
+
+ err = fuse_create_open(idmap, dir, file->f_path.dentry, file,
+ file->f_flags, mode, FUSE_TMPFILE);
+ if (err == -ENOSYS) {
+ fc->no_tmpfile = 1;
+ err = -EOPNOTSUPP;
+ }
+ return err;
}
-static int fuse_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *entry, umode_t mode)
+static struct dentry *fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *entry, umode_t mode)
{
struct fuse_mkdir_in inarg;
struct fuse_mount *fm = get_fuse_mount(dir);
@@ -719,10 +1125,10 @@ static int fuse_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
args.in_args[0].value = &inarg;
args.in_args[1].size = entry->d_name.len + 1;
args.in_args[1].value = entry->d_name.name;
- return create_new_entry(fm, &args, dir, entry, S_IFDIR);
+ return create_new_entry(idmap, fm, &args, dir, entry, S_IFDIR);
}
-static int fuse_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *entry, const char *link)
{
struct fuse_mount *fm = get_fuse_mount(dir);
@@ -730,12 +1136,13 @@ static int fuse_symlink(struct user_namespace *mnt_userns, struct inode *dir,
FUSE_ARGS(args);
args.opcode = FUSE_SYMLINK;
- args.in_numargs = 2;
- args.in_args[0].size = entry->d_name.len + 1;
- args.in_args[0].value = entry->d_name.name;
- args.in_args[1].size = len;
- args.in_args[1].value = link;
- return create_new_entry(fm, &args, dir, entry, S_IFLNK);
+ args.in_numargs = 3;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
+ args.in_args[2].size = len;
+ args.in_args[2].value = link;
+ return create_new_nondir(idmap, fm, &args, dir, entry, S_IFLNK);
}
void fuse_flush_time_update(struct inode *inode)
@@ -748,7 +1155,7 @@ void fuse_flush_time_update(struct inode *inode)
static void fuse_update_ctime_in_cache(struct inode *inode)
{
if (!IS_NOCMTIME(inode)) {
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty_sync(inode);
fuse_flush_time_update(inode);
}
@@ -794,14 +1201,15 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
args.opcode = FUSE_UNLINK;
args.nodeid = get_node_id(dir);
- args.in_numargs = 1;
- args.in_args[0].size = entry->d_name.len + 1;
- args.in_args[0].value = entry->d_name.name;
+ args.in_numargs = 2;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
err = fuse_simple_request(fm, &args);
if (!err) {
fuse_dir_changed(dir);
fuse_entry_unlinked(entry);
- } else if (err == -EINTR)
+ } else if (err == -EINTR || err == -ENOENT)
fuse_invalidate_entry(entry);
return err;
}
@@ -817,19 +1225,20 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
args.opcode = FUSE_RMDIR;
args.nodeid = get_node_id(dir);
- args.in_numargs = 1;
- args.in_args[0].size = entry->d_name.len + 1;
- args.in_args[0].value = entry->d_name.name;
+ args.in_numargs = 2;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
err = fuse_simple_request(fm, &args);
if (!err) {
fuse_dir_changed(dir);
fuse_entry_unlinked(entry);
- } else if (err == -EINTR)
+ } else if (err == -EINTR || err == -ENOENT)
fuse_invalidate_entry(entry);
return err;
}
-static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
+static int fuse_rename_common(struct mnt_idmap *idmap, struct inode *olddir, struct dentry *oldent,
struct inode *newdir, struct dentry *newent,
unsigned int flags, int opcode, size_t argsize)
{
@@ -850,7 +1259,7 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
args.in_args[1].value = oldent->d_name.name;
args.in_args[2].size = newent->d_name.len + 1;
args.in_args[2].value = newent->d_name.name;
- err = fuse_simple_request(fm, &args);
+ err = fuse_simple_idmap_request(idmap, fm, &args);
if (!err) {
/* ctime changes */
fuse_update_ctime(d_inode(oldent));
@@ -865,7 +1274,7 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
/* newent will end up negative */
if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent))
fuse_entry_unlinked(newent);
- } else if (err == -EINTR) {
+ } else if (err == -EINTR || err == -ENOENT) {
/* If request was interrupted, DEITY only knows if the
rename actually took place. If the invalidation
fails (e.g. some process has CWD under the renamed
@@ -879,7 +1288,7 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
return err;
}
-static int fuse_rename2(struct user_namespace *mnt_userns, struct inode *olddir,
+static int fuse_rename2(struct mnt_idmap *idmap, struct inode *olddir,
struct dentry *oldent, struct inode *newdir,
struct dentry *newent, unsigned int flags)
{
@@ -896,7 +1305,8 @@ static int fuse_rename2(struct user_namespace *mnt_userns, struct inode *olddir,
if (fc->no_rename2 || fc->minor < 23)
return -EINVAL;
- err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
+ err = fuse_rename_common((flags & RENAME_WHITEOUT) ? idmap : &invalid_mnt_idmap,
+ olddir, oldent, newdir, newent, flags,
FUSE_RENAME2,
sizeof(struct fuse_rename2_in));
if (err == -ENOSYS) {
@@ -904,7 +1314,7 @@ static int fuse_rename2(struct user_namespace *mnt_userns, struct inode *olddir,
err = -EINVAL;
}
} else {
- err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
+ err = fuse_rename_common(&invalid_mnt_idmap, olddir, oldent, newdir, newent, 0,
FUSE_RENAME,
sizeof(struct fuse_rename_in));
}
@@ -921,6 +1331,9 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
struct fuse_mount *fm = get_fuse_mount(inode);
FUSE_ARGS(args);
+ if (fm->fc->no_link)
+ goto out;
+
memset(&inarg, 0, sizeof(inarg));
inarg.oldnodeid = get_node_id(inode);
args.opcode = FUSE_LINK;
@@ -929,27 +1342,37 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
args.in_args[0].value = &inarg;
args.in_args[1].size = newent->d_name.len + 1;
args.in_args[1].value = newent->d_name.name;
- err = create_new_entry(fm, &args, newdir, newent, inode->i_mode);
+ err = create_new_nondir(&invalid_mnt_idmap, fm, &args, newdir, newent, inode->i_mode);
if (!err)
fuse_update_ctime_in_cache(inode);
else if (err == -EINTR)
fuse_invalidate_attr(inode);
+ if (err == -ENOSYS)
+ fm->fc->no_link = 1;
+out:
+ if (fm->fc->no_link)
+ return -EPERM;
+
return err;
}
-static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
- struct kstat *stat)
+static void fuse_fillattr(struct mnt_idmap *idmap, struct inode *inode,
+ struct fuse_attr *attr, struct kstat *stat)
{
unsigned int blkbits;
struct fuse_conn *fc = get_fuse_conn(inode);
+ vfsuid_t vfsuid = make_vfsuid(idmap, fc->user_ns,
+ make_kuid(fc->user_ns, attr->uid));
+ vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns,
+ make_kgid(fc->user_ns, attr->gid));
stat->dev = inode->i_sb->s_dev;
stat->ino = attr->ino;
stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
stat->nlink = attr->nlink;
- stat->uid = make_kuid(fc->user_ns, attr->uid);
- stat->gid = make_kgid(fc->user_ns, attr->gid);
+ stat->uid = vfsuid_into_kuid(vfsuid);
+ stat->gid = vfsgid_into_kgid(vfsgid);
stat->rdev = inode->i_rdev;
stat->atime.tv_sec = attr->atime;
stat->atime.tv_nsec = attr->atimensec;
@@ -968,8 +1391,89 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
stat->blksize = 1 << blkbits;
}
-static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
- struct file *file)
+static void fuse_statx_to_attr(struct fuse_statx *sx, struct fuse_attr *attr)
+{
+ memset(attr, 0, sizeof(*attr));
+ attr->ino = sx->ino;
+ attr->size = sx->size;
+ attr->blocks = sx->blocks;
+ attr->atime = sx->atime.tv_sec;
+ attr->mtime = sx->mtime.tv_sec;
+ attr->ctime = sx->ctime.tv_sec;
+ attr->atimensec = sx->atime.tv_nsec;
+ attr->mtimensec = sx->mtime.tv_nsec;
+ attr->ctimensec = sx->ctime.tv_nsec;
+ attr->mode = sx->mode;
+ attr->nlink = sx->nlink;
+ attr->uid = sx->uid;
+ attr->gid = sx->gid;
+ attr->rdev = new_encode_dev(MKDEV(sx->rdev_major, sx->rdev_minor));
+ attr->blksize = sx->blksize;
+}
+
+static int fuse_do_statx(struct mnt_idmap *idmap, struct inode *inode,
+ struct file *file, struct kstat *stat)
+{
+ int err;
+ struct fuse_attr attr;
+ struct fuse_statx *sx;
+ struct fuse_statx_in inarg;
+ struct fuse_statx_out outarg;
+ struct fuse_mount *fm = get_fuse_mount(inode);
+ u64 attr_version = fuse_get_attr_version(fm->fc);
+ FUSE_ARGS(args);
+
+ memset(&inarg, 0, sizeof(inarg));
+ memset(&outarg, 0, sizeof(outarg));
+ /* Directories have separate file-handle space */
+ if (file && S_ISREG(inode->i_mode)) {
+ struct fuse_file *ff = file->private_data;
+
+ inarg.getattr_flags |= FUSE_GETATTR_FH;
+ inarg.fh = ff->fh;
+ }
+ /* For now leave sync hints as the default, request all stats. */
+ inarg.sx_flags = 0;
+ inarg.sx_mask = STATX_BASIC_STATS | STATX_BTIME;
+ args.opcode = FUSE_STATX;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
+ err = fuse_simple_request(fm, &args);
+ if (err)
+ return err;
+
+ sx = &outarg.stat;
+ if (((sx->mask & STATX_SIZE) && !fuse_valid_size(sx->size)) ||
+ ((sx->mask & STATX_TYPE) && (!fuse_valid_type(sx->mode) ||
+ inode_wrong_type(inode, sx->mode)))) {
+ fuse_make_bad(inode);
+ return -EIO;
+ }
+
+ fuse_statx_to_attr(&outarg.stat, &attr);
+ if ((sx->mask & STATX_BASIC_STATS) == STATX_BASIC_STATS) {
+ fuse_change_attributes(inode, &attr, &outarg.stat,
+ ATTR_TIMEOUT(&outarg), attr_version);
+ }
+
+ if (stat) {
+ stat->result_mask = sx->mask & (STATX_BASIC_STATS | STATX_BTIME);
+ stat->btime.tv_sec = sx->btime.tv_sec;
+ stat->btime.tv_nsec = min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
+ fuse_fillattr(idmap, inode, &attr, stat);
+ stat->result_mask |= STATX_TYPE;
+ }
+
+ return 0;
+}
+
+static int fuse_do_getattr(struct mnt_idmap *idmap, struct inode *inode,
+ struct kstat *stat, struct file *file)
{
int err;
struct fuse_getattr_in inarg;
@@ -1004,27 +1508,37 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
fuse_make_bad(inode);
err = -EIO;
} else {
- fuse_change_attributes(inode, &outarg.attr,
- attr_timeout(&outarg),
+ fuse_change_attributes(inode, &outarg.attr, NULL,
+ ATTR_TIMEOUT(&outarg),
attr_version);
if (stat)
- fuse_fillattr(inode, &outarg.attr, stat);
+ fuse_fillattr(idmap, inode, &outarg.attr, stat);
}
}
return err;
}
-static int fuse_update_get_attr(struct inode *inode, struct file *file,
- struct kstat *stat, u32 request_mask,
- unsigned int flags)
+static int fuse_update_get_attr(struct mnt_idmap *idmap, struct inode *inode,
+ struct file *file, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
{
struct fuse_inode *fi = get_fuse_inode(inode);
+ struct fuse_conn *fc = get_fuse_conn(inode);
int err = 0;
bool sync;
u32 inval_mask = READ_ONCE(fi->inval_mask);
u32 cache_mask = fuse_get_cache_mask(inode);
- if (flags & AT_STATX_FORCE_SYNC)
+
+ /* FUSE only supports basic stats and possibly btime */
+ request_mask &= STATX_BASIC_STATS | STATX_BTIME;
+retry:
+ if (fc->no_statx)
+ request_mask &= STATX_BASIC_STATS;
+
+ if (!request_mask)
+ sync = false;
+ else if (flags & AT_STATX_FORCE_SYNC)
sync = true;
else if (flags & AT_STATX_DONT_SYNC)
sync = false;
@@ -1035,11 +1549,26 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
if (sync) {
forget_all_cached_acls(inode);
- err = fuse_do_getattr(inode, stat, file);
+ /* Try statx if BTIME is requested */
+ if (!fc->no_statx && (request_mask & ~STATX_BASIC_STATS)) {
+ err = fuse_do_statx(idmap, inode, file, stat);
+ if (err == -ENOSYS) {
+ fc->no_statx = 1;
+ err = 0;
+ goto retry;
+ }
+ } else {
+ err = fuse_do_getattr(idmap, inode, stat, file);
+ }
} else if (stat) {
- generic_fillattr(&init_user_ns, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
stat->mode = fi->orig_i_mode;
stat->ino = fi->orig_ino;
+ stat->blksize = 1 << fi->cached_i_blkbits;
+ if (test_bit(FUSE_I_BTIME, &fi->state)) {
+ stat->btime = fi->i_btime;
+ stat->result_mask |= STATX_BTIME;
+ }
}
return err;
@@ -1047,11 +1576,11 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
int fuse_update_attributes(struct inode *inode, struct file *file, u32 mask)
{
- return fuse_update_get_attr(inode, file, NULL, mask, 0);
+ return fuse_update_get_attr(&nop_mnt_idmap, inode, file, NULL, mask, 0);
}
int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
- u64 child_nodeid, struct qstr *name)
+ u64 child_nodeid, struct qstr *name, u32 flags)
{
int err = -ENOTDIR;
struct inode *parent;
@@ -1062,25 +1591,25 @@ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
if (!parent)
return -ENOENT;
- inode_lock_nested(parent, I_MUTEX_PARENT);
if (!S_ISDIR(parent->i_mode))
- goto unlock;
+ goto put_parent;
err = -ENOENT;
dir = d_find_alias(parent);
if (!dir)
- goto unlock;
+ goto put_parent;
- name->hash = full_name_hash(dir, name->name, name->len);
- entry = d_lookup(dir, name);
+ entry = start_removing_noperm(dir, name);
dput(dir);
- if (!entry)
- goto unlock;
+ if (IS_ERR(entry))
+ goto put_parent;
fuse_dir_changed(parent);
- fuse_invalidate_entry(entry);
+ if (!(flags & FUSE_EXPIRE_ONLY))
+ d_invalidate(entry);
+ fuse_invalidate_entry_cache(entry);
- if (child_nodeid != 0 && d_really_is_positive(entry)) {
+ if (child_nodeid != 0) {
inode_lock(d_inode(entry));
if (get_node_id(d_inode(entry)) != child_nodeid) {
err = -ENOENT;
@@ -1108,14 +1637,25 @@ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
} else {
err = 0;
}
- dput(entry);
- unlock:
- inode_unlock(parent);
+ end_removing(entry);
+ put_parent:
iput(parent);
return err;
}
+static inline bool fuse_permissible_uidgid(struct fuse_conn *fc)
+{
+ const struct cred *cred = current_cred();
+
+ return (uid_eq(cred->euid, fc->user_id) &&
+ uid_eq(cred->suid, fc->user_id) &&
+ uid_eq(cred->uid, fc->user_id) &&
+ gid_eq(cred->egid, fc->group_id) &&
+ gid_eq(cred->sgid, fc->group_id) &&
+ gid_eq(cred->gid, fc->group_id));
+}
+
/*
* Calling into a user-controlled filesystem gives the filesystem
* daemon ptrace-like capabilities over the current process. This
@@ -1129,23 +1669,19 @@ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
* for which the owner of the mount has ptrace privilege. This
* excludes processes started by other users, suid or sgid processes.
*/
-int fuse_allow_current_process(struct fuse_conn *fc)
+bool fuse_allow_current_process(struct fuse_conn *fc)
{
- const struct cred *cred;
+ bool allow;
if (fc->allow_other)
- return current_in_userns(fc->user_ns);
+ allow = current_in_userns(fc->user_ns);
+ else
+ allow = fuse_permissible_uidgid(fc);
- cred = current_cred();
- if (uid_eq(cred->euid, fc->user_id) &&
- uid_eq(cred->suid, fc->user_id) &&
- uid_eq(cred->uid, fc->user_id) &&
- gid_eq(cred->egid, fc->group_id) &&
- gid_eq(cred->sgid, fc->group_id) &&
- gid_eq(cred->gid, fc->group_id))
- return 1;
+ if (!allow && allow_sys_admin_access && capable(CAP_SYS_ADMIN))
+ allow = true;
- return 0;
+ return allow;
}
static int fuse_access(struct inode *inode, int mask)
@@ -1157,6 +1693,14 @@ static int fuse_access(struct inode *inode, int mask)
BUG_ON(mask & MAY_NOT_BLOCK);
+ /*
+ * We should not send FUSE_ACCESS to the userspace
+ * when idmapped mounts are enabled as for this case
+ * we have fc->default_permissions = 1 and access
+ * permission checks are done on the kernel side.
+ */
+ WARN_ON_ONCE(!(fm->sb->s_iflags & SB_I_NOIDMAP));
+
if (fm->fc->no_access)
return 0;
@@ -1181,7 +1725,7 @@ static int fuse_perm_getattr(struct inode *inode, int mask)
return -ECHILD;
forget_all_cached_acls(inode);
- return fuse_do_getattr(inode, NULL, NULL);
+ return fuse_do_getattr(&nop_mnt_idmap, inode, NULL, NULL);
}
/*
@@ -1189,7 +1733,7 @@ static int fuse_perm_getattr(struct inode *inode, int mask)
*
* 1) Local access checking ('default_permissions' mount option) based
* on file mode. This is the plain old disk filesystem permission
- * modell.
+ * model.
*
* 2) "Remote" access checking, where server is responsible for
* checking permission in each inode operation. An exception to this
@@ -1197,7 +1741,7 @@ static int fuse_perm_getattr(struct inode *inode, int mask)
* access request is sent. Execute permission is still checked
* locally based on file mode.
*/
-static int fuse_permission(struct user_namespace *mnt_userns,
+static int fuse_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -1229,7 +1773,7 @@ static int fuse_permission(struct user_namespace *mnt_userns,
}
if (fc->default_permissions) {
- err = generic_permission(&init_user_ns, inode, mask);
+ err = generic_permission(idmap, inode, mask);
/* If permission is denied, try to refresh file
attributes. This is also needed, because the root
@@ -1237,7 +1781,7 @@ static int fuse_permission(struct user_namespace *mnt_userns,
if (err == -EACCES && !refreshed) {
err = fuse_perm_getattr(inode, mask);
if (!err)
- err = generic_permission(&init_user_ns,
+ err = generic_permission(idmap,
inode, mask);
}
@@ -1260,13 +1804,13 @@ static int fuse_permission(struct user_namespace *mnt_userns,
return err;
}
-static int fuse_readlink_page(struct inode *inode, struct page *page)
+static int fuse_readlink_folio(struct inode *inode, struct folio *folio)
{
struct fuse_mount *fm = get_fuse_mount(inode);
- struct fuse_page_desc desc = { .length = PAGE_SIZE - 1 };
+ struct fuse_folio_desc desc = { .length = folio_size(folio) - 1 };
struct fuse_args_pages ap = {
- .num_pages = 1,
- .pages = &page,
+ .num_folios = 1,
+ .folios = &folio,
.descs = &desc,
};
char *link;
@@ -1289,7 +1833,7 @@ static int fuse_readlink_page(struct inode *inode, struct page *page)
if (WARN_ON(res >= PAGE_SIZE))
return -EIO;
- link = page_address(page);
+ link = folio_address(folio);
link[res] = '\0';
return 0;
@@ -1299,7 +1843,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *callback)
{
struct fuse_conn *fc = get_fuse_conn(inode);
- struct page *page;
+ struct folio *folio;
int err;
err = -EIO;
@@ -1307,26 +1851,26 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
goto out_err;
if (fc->cache_symlinks)
- return page_get_link(dentry, inode, callback);
+ return page_get_link_raw(dentry, inode, callback);
err = -ECHILD;
if (!dentry)
goto out_err;
- page = alloc_page(GFP_KERNEL);
+ folio = folio_alloc(GFP_KERNEL, 0);
err = -ENOMEM;
- if (!page)
+ if (!folio)
goto out_err;
- err = fuse_readlink_page(inode, page);
+ err = fuse_readlink_folio(inode, folio);
if (err) {
- __free_page(page);
+ folio_put(folio);
goto out_err;
}
- set_delayed_call(callback, page_put_link, page);
+ set_delayed_call(callback, page_put_link, folio);
- return page_address(page);
+ return folio_address(folio);
out_err:
return ERR_PTR(err);
@@ -1334,7 +1878,32 @@ out_err:
static int fuse_dir_open(struct inode *inode, struct file *file)
{
- return fuse_open_common(inode, file, true);
+ struct fuse_mount *fm = get_fuse_mount(inode);
+ int err;
+
+ if (fuse_is_bad(inode))
+ return -EIO;
+
+ err = generic_file_open(inode, file);
+ if (err)
+ return err;
+
+ err = fuse_do_open(fm, get_node_id(inode), file, true);
+ if (!err) {
+ struct fuse_file *ff = file->private_data;
+
+ /*
+ * Keep handling FOPEN_STREAM and FOPEN_NONSEEKABLE for
+ * directories for backward compatibility, though it's unlikely
+ * to be useful.
+ */
+ if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE))
+ nonseekable_open(inode, file);
+ if (!(ff->open_flags & FOPEN_KEEP_CACHE))
+ invalidate_inode_pages2(inode->i_mapping);
+ }
+
+ return err;
}
static int fuse_dir_release(struct inode *inode, struct file *file)
@@ -1410,17 +1979,29 @@ static bool update_mtime(unsigned ivalid, bool trust_local_mtime)
return true;
}
-static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
- struct fuse_setattr_in *arg, bool trust_local_cmtime)
+static void iattr_to_fattr(struct mnt_idmap *idmap, struct fuse_conn *fc,
+ struct iattr *iattr, struct fuse_setattr_in *arg,
+ bool trust_local_cmtime)
{
unsigned ivalid = iattr->ia_valid;
if (ivalid & ATTR_MODE)
arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
- if (ivalid & ATTR_UID)
- arg->valid |= FATTR_UID, arg->uid = from_kuid(fc->user_ns, iattr->ia_uid);
- if (ivalid & ATTR_GID)
- arg->valid |= FATTR_GID, arg->gid = from_kgid(fc->user_ns, iattr->ia_gid);
+
+ if (ivalid & ATTR_UID) {
+ kuid_t fsuid = from_vfsuid(idmap, fc->user_ns, iattr->ia_vfsuid);
+
+ arg->valid |= FATTR_UID;
+ arg->uid = from_kuid(fc->user_ns, fsuid);
+ }
+
+ if (ivalid & ATTR_GID) {
+ kgid_t fsgid = from_vfsgid(idmap, fc->user_ns, iattr->ia_vfsgid);
+
+ arg->valid |= FATTR_GID;
+ arg->gid = from_kgid(fc->user_ns, fsgid);
+ }
+
if (ivalid & ATTR_SIZE)
arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
if (ivalid & ATTR_ATIME) {
@@ -1516,12 +2097,12 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
memset(&outarg, 0, sizeof(outarg));
inarg.valid = FATTR_MTIME;
- inarg.mtime = inode->i_mtime.tv_sec;
- inarg.mtimensec = inode->i_mtime.tv_nsec;
+ inarg.mtime = inode_get_mtime_sec(inode);
+ inarg.mtimensec = inode_get_mtime_nsec(inode);
if (fm->fc->minor >= 23) {
inarg.valid |= FATTR_CTIME;
- inarg.ctime = inode->i_ctime.tv_sec;
- inarg.ctimensec = inode->i_ctime.tv_nsec;
+ inarg.ctime = inode_get_ctime_sec(inode);
+ inarg.ctimensec = inode_get_ctime_nsec(inode);
}
if (ff) {
inarg.valid |= FATTR_FH;
@@ -1540,8 +2121,8 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
* vmtruncate() doesn't allow for this case, so do the rlimit checking
* and the actual truncation by hand.
*/
-int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
- struct file *file)
+int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr, struct file *file)
{
struct inode *inode = d_inode(dentry);
struct fuse_mount *fm = get_fuse_mount(inode);
@@ -1557,11 +2138,12 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
int err;
bool trust_local_cmtime = is_wb;
bool fault_blocked = false;
+ u64 attr_version;
if (!fc->default_permissions)
attr->ia_valid |= ATTR_FORCE;
- err = setattr_prepare(&init_user_ns, dentry, attr);
+ err = setattr_prepare(idmap, dentry, attr);
if (err)
return err;
@@ -1574,7 +2156,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
if (FUSE_IS_DAX(inode) && is_truncate) {
filemap_invalidate_lock(mapping);
fault_blocked = true;
- err = fuse_dax_break_layouts(inode, 0, 0);
+ err = fuse_dax_break_layouts(inode, 0, -1);
if (err) {
filemap_invalidate_unlock(mapping);
return err;
@@ -1620,7 +2202,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
memset(&inarg, 0, sizeof(inarg));
memset(&outarg, 0, sizeof(outarg));
- iattr_to_fattr(fc, attr, &inarg, trust_local_cmtime);
+ iattr_to_fattr(idmap, fc, attr, &inarg, trust_local_cmtime);
if (file) {
struct fuse_file *ff = file->private_data;
inarg.valid |= FATTR_FH;
@@ -1641,6 +2223,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
if (fc->handle_killpriv_v2 && !capable(CAP_FSETID))
inarg.valid |= FATTR_KILL_SUIDGID;
}
+
+ attr_version = fuse_get_attr_version(fm->fc);
fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
err = fuse_simple_request(fm, &args);
if (err) {
@@ -1660,15 +2244,23 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
/* the kernel maintains i_mtime locally */
if (trust_local_cmtime) {
if (attr->ia_valid & ATTR_MTIME)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
if (attr->ia_valid & ATTR_CTIME)
- inode->i_ctime = attr->ia_ctime;
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
/* FIXME: clear I_DIRTY_SYNC? */
}
- fuse_change_attributes_common(inode, &outarg.attr,
- attr_timeout(&outarg),
- fuse_get_cache_mask(inode));
+ if (fi->attr_version > attr_version) {
+ /*
+ * Apply attributes, for example for fsnotify_change(), but set
+ * attribute timeout to zero.
+ */
+ outarg.attr_valid = outarg.attr_valid_nsec = 0;
+ }
+
+ fuse_change_attributes_common(inode, &outarg.attr, NULL,
+ ATTR_TIMEOUT(&outarg),
+ fuse_get_cache_mask(inode), 0);
oldsize = inode->i_size;
/* see the comment in fuse_change_attributes() */
if (!is_wb || is_truncate)
@@ -1682,7 +2274,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
/*
* Only call invalidate_inode_pages2() after removing
- * FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock.
+ * FUSE_NOWRITE, otherwise fuse_launder_folio() would deadlock.
*/
if ((is_truncate || !is_wb) &&
S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
@@ -1708,7 +2300,7 @@ error:
return err;
}
-static int fuse_setattr(struct user_namespace *mnt_userns, struct dentry *entry,
+static int fuse_setattr(struct mnt_idmap *idmap, struct dentry *entry,
struct iattr *attr)
{
struct inode *inode = d_inode(entry);
@@ -1737,7 +2329,7 @@ static int fuse_setattr(struct user_namespace *mnt_userns, struct dentry *entry,
* ia_mode calculation may have used stale i_mode.
* Refresh and recalculate.
*/
- ret = fuse_do_getattr(inode, NULL, file);
+ ret = fuse_do_getattr(idmap, inode, NULL, file);
if (ret)
return ret;
@@ -1755,7 +2347,7 @@ static int fuse_setattr(struct user_namespace *mnt_userns, struct dentry *entry,
if (!attr->ia_valid)
return 0;
- ret = fuse_do_setattr(entry, attr, file);
+ ret = fuse_do_setattr(idmap, entry, attr, file);
if (!ret) {
/*
* If filesystem supports acls it may have updated acl xattrs in
@@ -1771,7 +2363,7 @@ static int fuse_setattr(struct user_namespace *mnt_userns, struct dentry *entry,
return ret;
}
-static int fuse_getattr(struct user_namespace *mnt_userns,
+static int fuse_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
@@ -1794,7 +2386,7 @@ static int fuse_getattr(struct user_namespace *mnt_userns,
return -EACCES;
}
- return fuse_update_get_attr(inode, NULL, stat, request_mask, flags);
+ return fuse_update_get_attr(idmap, inode, NULL, stat, request_mask, flags);
}
static const struct inode_operations fuse_dir_inode_operations = {
@@ -1808,10 +2400,12 @@ static const struct inode_operations fuse_dir_inode_operations = {
.setattr = fuse_setattr,
.create = fuse_create,
.atomic_open = fuse_atomic_open,
+ .tmpfile = fuse_tmpfile,
.mknod = fuse_mknod,
.permission = fuse_permission,
.getattr = fuse_getattr,
.listxattr = fuse_listxattr,
+ .get_inode_acl = fuse_get_inode_acl,
.get_acl = fuse_get_acl,
.set_acl = fuse_set_acl,
.fileattr_get = fuse_fileattr_get,
@@ -1827,6 +2421,7 @@ static const struct file_operations fuse_dir_operations = {
.fsync = fuse_dir_fsync,
.unlocked_ioctl = fuse_dir_ioctl,
.compat_ioctl = fuse_dir_compat_ioctl,
+ .setlease = simple_nosetlease,
};
static const struct inode_operations fuse_common_inode_operations = {
@@ -1834,6 +2429,7 @@ static const struct inode_operations fuse_common_inode_operations = {
.permission = fuse_permission,
.getattr = fuse_getattr,
.listxattr = fuse_listxattr,
+ .get_inode_acl = fuse_get_inode_acl,
.get_acl = fuse_get_acl,
.set_acl = fuse_set_acl,
.fileattr_get = fuse_fileattr_get,
@@ -1866,20 +2462,20 @@ void fuse_init_dir(struct inode *inode)
fi->rdc.version = 0;
}
-static int fuse_symlink_readpage(struct file *null, struct page *page)
+static int fuse_symlink_read_folio(struct file *null, struct folio *folio)
{
- int err = fuse_readlink_page(page->mapping->host, page);
+ int err = fuse_readlink_folio(folio->mapping->host, folio);
if (!err)
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
- unlock_page(page);
+ folio_unlock(folio);
return err;
}
static const struct address_space_operations fuse_symlink_aops = {
- .readpage = fuse_symlink_readpage,
+ .read_folio = fuse_symlink_read_folio,
};
void fuse_init_symlink(struct inode *inode)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9d6c5f6361f7..01bc894e9c2b 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -18,6 +18,10 @@
#include <linux/falloc.h>
#include <linux/uio.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
+#include <linux/splice.h>
+#include <linux/task_io_accounting_ops.h>
+#include <linux/iomap.h>
static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
unsigned int open_flags, int opcode,
@@ -48,13 +52,7 @@ static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
return fuse_simple_request(fm, &args);
}
-struct fuse_release_args {
- struct fuse_args args;
- struct fuse_release_in inarg;
- struct inode *inode;
-};
-
-struct fuse_file *fuse_file_alloc(struct fuse_mount *fm)
+struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release)
{
struct fuse_file *ff;
@@ -63,15 +61,15 @@ struct fuse_file *fuse_file_alloc(struct fuse_mount *fm)
return NULL;
ff->fm = fm;
- ff->release_args = kzalloc(sizeof(*ff->release_args),
- GFP_KERNEL_ACCOUNT);
- if (!ff->release_args) {
- kfree(ff);
- return NULL;
+ if (release) {
+ ff->args = kzalloc(sizeof(*ff->args), GFP_KERNEL_ACCOUNT);
+ if (!ff->args) {
+ kfree(ff);
+ return NULL;
+ }
}
INIT_LIST_HEAD(&ff->write_entry);
- mutex_init(&ff->readdir.lock);
refcount_set(&ff->count, 1);
RB_CLEAR_NODE(&ff->polled_node);
init_waitqueue_head(&ff->poll_wait);
@@ -83,8 +81,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_mount *fm)
void fuse_file_free(struct fuse_file *ff)
{
- kfree(ff->release_args);
- mutex_destroy(&ff->readdir.lock);
+ kfree(ff->args);
kfree(ff);
}
@@ -103,13 +100,18 @@ static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
kfree(ra);
}
-static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
+static void fuse_file_put(struct fuse_file *ff, bool sync)
{
if (refcount_dec_and_test(&ff->count)) {
- struct fuse_args *args = &ff->release_args->args;
+ struct fuse_release_args *ra = &ff->args->release_args;
+ struct fuse_args *args = (ra ? &ra->args : NULL);
+
+ if (ra && ra->inode)
+ fuse_file_io_release(ff, ra->inode);
- if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) {
- /* Do nothing when client does not implement 'open' */
+ if (!args) {
+ /* Do nothing when server does not implement 'opendir' */
+ } else if (args->opcode == FUSE_RELEASE && ff->fm->fc->no_open) {
fuse_release_end(ff->fm, args, 0);
} else if (sync) {
fuse_simple_request(ff->fm, args);
@@ -130,31 +132,45 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
struct fuse_conn *fc = fm->fc;
struct fuse_file *ff;
int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
+ bool open = isdir ? !fc->no_opendir : !fc->no_open;
+ bool release = !isdir || open;
- ff = fuse_file_alloc(fm);
+ /*
+ * ff->args->release_args still needs to be allocated (so we can hold an
+ * inode reference while there are pending inflight file operations when
+ * ->release() is called, see fuse_prepare_release()) even if
+ * fc->no_open is set else it becomes possible for reclaim to deadlock
+ * if while servicing the readahead request the server triggers reclaim
+ * and reclaim evicts the inode of the file being read ahead.
+ */
+ ff = fuse_file_alloc(fm, release);
if (!ff)
return ERR_PTR(-ENOMEM);
ff->fh = 0;
/* Default for no-open */
ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
- if (isdir ? !fc->no_opendir : !fc->no_open) {
- struct fuse_open_out outarg;
+ if (open) {
+ /* Store outarg for fuse_finish_open() */
+ struct fuse_open_out *outargp = &ff->args->open_outarg;
int err;
- err = fuse_send_open(fm, nodeid, open_flags, opcode, &outarg);
+ err = fuse_send_open(fm, nodeid, open_flags, opcode, outargp);
if (!err) {
- ff->fh = outarg.fh;
- ff->open_flags = outarg.open_flags;
-
+ ff->fh = outargp->fh;
+ ff->open_flags = outargp->open_flags;
} else if (err != -ENOSYS) {
fuse_file_free(ff);
return ERR_PTR(err);
} else {
- if (isdir)
+ if (isdir) {
+ /* No release needed */
+ kfree(ff->args);
+ ff->args = NULL;
fc->no_opendir = 1;
- else
+ } else {
fc->no_open = 1;
+ }
}
}
@@ -193,44 +209,50 @@ static void fuse_link_write_file(struct file *file)
spin_unlock(&fi->lock);
}
-void fuse_finish_open(struct inode *inode, struct file *file)
+int fuse_finish_open(struct inode *inode, struct file *file)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = get_fuse_conn(inode);
+ int err;
+
+ err = fuse_file_io_open(file, inode);
+ if (err)
+ return err;
if (ff->open_flags & FOPEN_STREAM)
stream_open(inode, file);
else if (ff->open_flags & FOPEN_NONSEEKABLE)
nonseekable_open(inode, file);
- if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
- struct fuse_inode *fi = get_fuse_inode(inode);
-
- spin_lock(&fi->lock);
- fi->attr_version = atomic64_inc_return(&fc->attr_version);
- i_size_write(inode, 0);
- spin_unlock(&fi->lock);
- truncate_pagecache(inode, 0);
- file_update_time(file);
- fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
- } else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) {
- invalidate_inode_pages2(inode->i_mapping);
- }
-
if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
fuse_link_write_file(file);
+
+ return 0;
+}
+
+static void fuse_truncate_update_attr(struct inode *inode, struct file *file)
+{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
+ i_size_write(inode, 0);
+ spin_unlock(&fi->lock);
+ file_update_time(file);
+ fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
}
-int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
+static int fuse_open(struct inode *inode, struct file *file)
{
struct fuse_mount *fm = get_fuse_mount(inode);
+ struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = fm->fc;
+ struct fuse_file *ff;
int err;
- bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
- fc->atomic_o_trunc &&
- fc->writeback_cache;
- bool dax_truncate = (file->f_flags & O_TRUNC) &&
- fc->atomic_o_trunc && FUSE_IS_DAX(inode);
+ bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc;
+ bool is_wb_truncate = is_truncate && fc->writeback_cache;
+ bool dax_truncate = is_truncate && FUSE_IS_DAX(inode);
if (fuse_is_bad(inode))
return -EIO;
@@ -239,39 +261,54 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
if (err)
return err;
- if (is_wb_truncate || dax_truncate) {
+ if (is_wb_truncate || dax_truncate)
inode_lock(inode);
- fuse_set_nowrite(inode);
- }
if (dax_truncate) {
filemap_invalidate_lock(inode->i_mapping);
- err = fuse_dax_break_layouts(inode, 0, 0);
+ err = fuse_dax_break_layouts(inode, 0, -1);
if (err)
- goto out;
+ goto out_inode_unlock;
}
- err = fuse_do_open(fm, get_node_id(inode), file, isdir);
- if (!err)
- fuse_finish_open(inode, file);
+ if (is_wb_truncate || dax_truncate)
+ fuse_set_nowrite(inode);
-out:
- if (dax_truncate)
- filemap_invalidate_unlock(inode->i_mapping);
+ err = fuse_do_open(fm, get_node_id(inode), file, false);
+ if (!err) {
+ ff = file->private_data;
+ err = fuse_finish_open(inode, file);
+ if (err)
+ fuse_sync_release(fi, ff, file->f_flags);
+ else if (is_truncate)
+ fuse_truncate_update_attr(inode, file);
+ }
- if (is_wb_truncate | dax_truncate) {
+ if (is_wb_truncate || dax_truncate)
fuse_release_nowrite(inode);
- inode_unlock(inode);
+ if (!err) {
+ if (is_truncate)
+ truncate_pagecache(inode, 0);
+ else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
+ invalidate_inode_pages2(inode->i_mapping);
}
+ if (dax_truncate)
+ filemap_invalidate_unlock(inode->i_mapping);
+out_inode_unlock:
+ if (is_wb_truncate || dax_truncate)
+ inode_unlock(inode);
return err;
}
static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
- unsigned int flags, int opcode)
+ unsigned int flags, int opcode, bool sync)
{
struct fuse_conn *fc = ff->fm->fc;
- struct fuse_release_args *ra = ff->release_args;
+ struct fuse_release_args *ra = &ff->args->release_args;
+
+ if (fuse_file_passthrough(ff))
+ fuse_passthrough_release(ff, fuse_inode_backing(fi));
/* Inode is NULL on error path of fuse_create_open() */
if (likely(fi)) {
@@ -286,6 +323,11 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
wake_up_interruptible_all(&ff->poll_wait);
+ if (!ra)
+ return;
+
+ /* ff->args was used for open outarg */
+ memset(ff->args, 0, sizeof(*ff->args));
ra->inarg.fh = ff->fh;
ra->inarg.flags = flags;
ra->args.in_numargs = 1;
@@ -295,23 +337,28 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
ra->args.nodeid = ff->nodeid;
ra->args.force = true;
ra->args.nocreds = true;
+
+ /*
+ * Hold inode until release is finished.
+ * From fuse_sync_release() the refcount is 1 and everything's
+ * synchronous, so we are fine with not doing igrab() here.
+ */
+ ra->inode = sync ? NULL : igrab(&fi->inode);
}
void fuse_file_release(struct inode *inode, struct fuse_file *ff,
unsigned int open_flags, fl_owner_t id, bool isdir)
{
struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_release_args *ra = ff->release_args;
+ struct fuse_release_args *ra = &ff->args->release_args;
int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
- fuse_prepare_release(fi, ff, open_flags, opcode);
+ fuse_prepare_release(fi, ff, open_flags, opcode, false);
- if (ff->flock) {
+ if (ra && ff->flock) {
ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id);
}
- /* Hold inode until release is finished */
- ra->inode = igrab(inode);
/*
* Normally this will send the RELEASE request, however if
@@ -321,8 +368,14 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff,
* Make the release synchronous if this is a fuseblk mount,
* synchronous RELEASE is allowed (and desirable) in this case
* because the server can be trusted not to screw up.
+ *
+ * Always use the asynchronous file put because the current thread
+ * might be the fuse server. This can happen if a process starts some
+ * aio and closes the fd before the aio completes. Since aio takes its
+ * own ref to the file, the IO completion has to drop the ref, which is
+ * how the fuse server can end up closing its clients' files.
*/
- fuse_file_put(ff, ff->fm->fc->destroy, isdir);
+ fuse_file_put(ff, false);
}
void fuse_release_common(struct file *file, bool isdir)
@@ -331,13 +384,17 @@ void fuse_release_common(struct file *file, bool isdir)
(fl_owner_t) file, isdir);
}
-static int fuse_open(struct inode *inode, struct file *file)
-{
- return fuse_open_common(inode, file, false);
-}
-
static int fuse_release(struct inode *inode, struct file *file)
{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+
+ /*
+ * Dirty pages might remain despite write_inode_now() call from
+ * fuse_flush() due to writes racing with the close.
+ */
+ if (fc->writeback_cache)
+ write_inode_now(inode, 1);
+
fuse_release_common(file, false);
/* return value is ignored by VFS */
@@ -348,12 +405,8 @@ void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
unsigned int flags)
{
WARN_ON(refcount_read(&ff->count) > 1);
- fuse_prepare_release(fi, ff, flags, FUSE_RELEASE);
- /*
- * iput(NULL) is a no-op and since the refcount is 1 and everything's
- * synchronous, we are fine with not doing igrab() here"
- */
- fuse_file_put(ff, true, false);
+ fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true);
+ fuse_file_put(ff, true);
}
EXPORT_SYMBOL_GPL(fuse_sync_release);
@@ -381,74 +434,11 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
struct fuse_writepage_args {
struct fuse_io_args ia;
- struct rb_node writepages_entry;
struct list_head queue_entry;
- struct fuse_writepage_args *next;
struct inode *inode;
struct fuse_sync_bucket *bucket;
};
-static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
- pgoff_t idx_from, pgoff_t idx_to)
-{
- struct rb_node *n;
-
- n = fi->writepages.rb_node;
-
- while (n) {
- struct fuse_writepage_args *wpa;
- pgoff_t curr_index;
-
- wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry);
- WARN_ON(get_fuse_inode(wpa->inode) != fi);
- curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
- if (idx_from >= curr_index + wpa->ia.ap.num_pages)
- n = n->rb_right;
- else if (idx_to < curr_index)
- n = n->rb_left;
- else
- return wpa;
- }
- return NULL;
-}
-
-/*
- * Check if any page in a range is under writeback
- *
- * This is currently done by walking the list of writepage requests
- * for the inode, which can be pretty inefficient.
- */
-static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
- pgoff_t idx_to)
-{
- struct fuse_inode *fi = get_fuse_inode(inode);
- bool found;
-
- spin_lock(&fi->lock);
- found = fuse_find_writeback(fi, idx_from, idx_to);
- spin_unlock(&fi->lock);
-
- return found;
-}
-
-static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
-{
- return fuse_range_is_writeback(inode, index, index);
-}
-
-/*
- * Wait for page writeback to be completed.
- *
- * Since fuse doesn't rely on the VM writeback tracking, this has to
- * use some other means.
- */
-static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
-{
- struct fuse_inode *fi = get_fuse_inode(inode);
-
- wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
-}
-
/*
* Wait for all pending writepages on the inode to finish.
*
@@ -483,10 +473,6 @@ static int fuse_flush(struct file *file, fl_owner_t id)
if (err)
return err;
- inode_lock(inode);
- fuse_sync_writes(inode);
- inode_unlock(inode);
-
err = filemap_check_errors(file->f_mapping);
if (err)
return err;
@@ -611,16 +597,20 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
args->out_args[0].size = count;
}
-static void fuse_release_user_pages(struct fuse_args_pages *ap,
+static void fuse_release_user_pages(struct fuse_args_pages *ap, ssize_t nres,
bool should_dirty)
{
unsigned int i;
- for (i = 0; i < ap->num_pages; i++) {
+ for (i = 0; i < ap->num_folios; i++) {
if (should_dirty)
- set_page_dirty_lock(ap->pages[i]);
- put_page(ap->pages[i]);
+ folio_mark_dirty_lock(ap->folios[i]);
+ if (ap->args.is_pinned)
+ unpin_folio(ap->folios[i]);
}
+
+ if (nres > 0 && ap->args.invalidate_vmap)
+ invalidate_kernel_vmap_range(ap->args.vmap_base, nres);
}
static void fuse_io_release(struct kref *kref)
@@ -639,7 +629,7 @@ static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
return io->bytes < 0 ? io->size : io->bytes;
}
-/**
+/*
* In case of short read, the caller sets 'pos' to the position of
* actual end of fuse request in IO request. Otherwise, if bytes_requested
* == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
@@ -690,16 +680,16 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
}
static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
- unsigned int npages)
+ unsigned int nfolios)
{
struct fuse_io_args *ia;
ia = kzalloc(sizeof(*ia), GFP_KERNEL);
if (ia) {
ia->io = io;
- ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
- &ia->ap.descs);
- if (!ia->ap.pages) {
+ ia->ap.folios = fuse_folios_alloc(nfolios, GFP_KERNEL,
+ &ia->ap.descs);
+ if (!ia->ap.folios) {
kfree(ia);
ia = NULL;
}
@@ -709,7 +699,7 @@ static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
static void fuse_io_free(struct fuse_io_args *ia)
{
- kfree(ia->ap.pages);
+ kfree(ia->ap.folios);
kfree(ia);
}
@@ -719,25 +709,29 @@ static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
struct fuse_io_priv *io = ia->io;
ssize_t pos = -1;
-
- fuse_release_user_pages(&ia->ap, io->should_dirty);
+ size_t nres;
if (err) {
/* Nothing */
} else if (io->write) {
if (ia->write.out.size > ia->write.in.size) {
err = -EIO;
- } else if (ia->write.in.size != ia->write.out.size) {
- pos = ia->write.in.offset - io->offset +
- ia->write.out.size;
+ } else {
+ nres = ia->write.out.size;
+ if (ia->write.in.size != ia->write.out.size)
+ pos = ia->write.in.offset - io->offset +
+ ia->write.out.size;
}
} else {
u32 outsize = args->out_args[0].size;
+ nres = outsize;
if (ia->read.in.size != outsize)
pos = ia->read.in.offset - io->offset + outsize;
}
+ fuse_release_user_pages(&ia->ap, err ?: nres, io->should_dirty);
+
fuse_aio_complete(io, err, pos);
fuse_io_free(ia);
}
@@ -808,34 +802,31 @@ static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
* reached the client fs yet. So the hole is not present there.
*/
if (!fc->writeback_cache) {
- loff_t pos = page_offset(ap->pages[0]) + num_read;
+ loff_t pos = folio_pos(ap->folios[0]) + num_read;
fuse_read_update_size(inode, pos, attr_ver);
}
}
-static int fuse_do_readpage(struct file *file, struct page *page)
+static int fuse_do_readfolio(struct file *file, struct folio *folio,
+ size_t off, size_t len)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct fuse_mount *fm = get_fuse_mount(inode);
- loff_t pos = page_offset(page);
- struct fuse_page_desc desc = { .length = PAGE_SIZE };
+ loff_t pos = folio_pos(folio) + off;
+ struct fuse_folio_desc desc = {
+ .offset = off,
+ .length = len,
+ };
struct fuse_io_args ia = {
.ap.args.page_zeroing = true,
.ap.args.out_pages = true,
- .ap.num_pages = 1,
- .ap.pages = &page,
+ .ap.num_folios = 1,
+ .ap.folios = &folio,
.ap.descs = &desc,
};
ssize_t res;
u64 attr_ver;
- /*
- * Page writeback can extend beyond the lifetime of the
- * page-cache page, so make sure we read a properly synced
- * page.
- */
- fuse_wait_on_page_writeback(inode, page->index);
-
attr_ver = fuse_get_attr_version(fm->fc);
/* Don't overflow end offset */
@@ -852,25 +843,155 @@ static int fuse_do_readpage(struct file *file, struct page *page)
if (res < desc.length)
fuse_short_read(inode, attr_ver, res, &ia.ap);
- SetPageUptodate(page);
+ return 0;
+}
+
+static int fuse_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ unsigned int flags, struct iomap *iomap,
+ struct iomap *srcmap)
+{
+ iomap->type = IOMAP_MAPPED;
+ iomap->length = length;
+ iomap->offset = offset;
+ return 0;
+}
+
+static const struct iomap_ops fuse_iomap_ops = {
+ .iomap_begin = fuse_iomap_begin,
+};
+
+struct fuse_fill_read_data {
+ struct file *file;
+
+ /* Fields below are used if sending the read request asynchronously */
+ struct fuse_conn *fc;
+ struct fuse_io_args *ia;
+ unsigned int nr_bytes;
+};
+
+/* forward declarations */
+static bool fuse_folios_need_send(struct fuse_conn *fc, loff_t pos,
+ unsigned len, struct fuse_args_pages *ap,
+ unsigned cur_bytes, bool write);
+static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file,
+ unsigned int count, bool async);
+
+static int fuse_handle_readahead(struct folio *folio,
+ struct readahead_control *rac,
+ struct fuse_fill_read_data *data, loff_t pos,
+ size_t len)
+{
+ struct fuse_io_args *ia = data->ia;
+ size_t off = offset_in_folio(folio, pos);
+ struct fuse_conn *fc = data->fc;
+ struct fuse_args_pages *ap;
+ unsigned int nr_pages;
+
+ if (ia && fuse_folios_need_send(fc, pos, len, &ia->ap, data->nr_bytes,
+ false)) {
+ fuse_send_readpages(ia, data->file, data->nr_bytes,
+ fc->async_read);
+ data->nr_bytes = 0;
+ data->ia = NULL;
+ ia = NULL;
+ }
+ if (!ia) {
+ if (fc->num_background >= fc->congestion_threshold &&
+ rac->ra->async_size >= readahead_count(rac))
+ /*
+ * Congested and only async pages left, so skip the
+ * rest.
+ */
+ return -EAGAIN;
+
+ nr_pages = min(fc->max_pages, readahead_count(rac));
+ data->ia = fuse_io_alloc(NULL, nr_pages);
+ if (!data->ia)
+ return -ENOMEM;
+ ia = data->ia;
+ }
+ folio_get(folio);
+ ap = &ia->ap;
+ ap->folios[ap->num_folios] = folio;
+ ap->descs[ap->num_folios].offset = off;
+ ap->descs[ap->num_folios].length = len;
+ data->nr_bytes += len;
+ ap->num_folios++;
return 0;
}
-static int fuse_readpage(struct file *file, struct page *page)
+static int fuse_iomap_read_folio_range_async(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx,
+ size_t len)
{
- struct inode *inode = page->mapping->host;
- int err;
+ struct fuse_fill_read_data *data = ctx->read_ctx;
+ struct folio *folio = ctx->cur_folio;
+ loff_t pos = iter->pos;
+ size_t off = offset_in_folio(folio, pos);
+ struct file *file = data->file;
+ int ret;
- err = -EIO;
- if (fuse_is_bad(inode))
- goto out;
+ if (ctx->rac) {
+ ret = fuse_handle_readahead(folio, ctx->rac, data, pos, len);
+ } else {
+ /*
+ * for non-readahead read requests, do reads synchronously
+ * since it's not guaranteed that the server can handle
+ * out-of-order reads
+ */
+ ret = fuse_do_readfolio(file, folio, off, len);
+ if (!ret)
+ iomap_finish_folio_read(folio, off, len, ret);
+ }
+ return ret;
+}
- err = fuse_do_readpage(file, page);
+static void fuse_iomap_read_submit(struct iomap_read_folio_ctx *ctx)
+{
+ struct fuse_fill_read_data *data = ctx->read_ctx;
+
+ if (data->ia)
+ fuse_send_readpages(data->ia, data->file, data->nr_bytes,
+ data->fc->async_read);
+}
+
+static const struct iomap_read_ops fuse_iomap_read_ops = {
+ .read_folio_range = fuse_iomap_read_folio_range_async,
+ .submit_read = fuse_iomap_read_submit,
+};
+
+static int fuse_read_folio(struct file *file, struct folio *folio)
+{
+ struct inode *inode = folio->mapping->host;
+ struct fuse_fill_read_data data = {
+ .file = file,
+ };
+ struct iomap_read_folio_ctx ctx = {
+ .cur_folio = folio,
+ .ops = &fuse_iomap_read_ops,
+ .read_ctx = &data,
+
+ };
+
+ if (fuse_is_bad(inode)) {
+ folio_unlock(folio);
+ return -EIO;
+ }
+
+ iomap_read_folio(&fuse_iomap_ops, &ctx);
fuse_invalidate_atime(inode);
- out:
- unlock_page(page);
- return err;
+ return 0;
+}
+
+static int fuse_iomap_read_folio_range(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos,
+ size_t len)
+{
+ struct file *file = iter->private;
+ size_t off = offset_in_folio(folio, pos);
+
+ return fuse_do_readfolio(file, folio, off, len);
}
static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
@@ -881,46 +1002,39 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
struct fuse_args_pages *ap = &ia->ap;
size_t count = ia->read.in.size;
size_t num_read = args->out_args[0].size;
- struct address_space *mapping = NULL;
-
- for (i = 0; mapping == NULL && i < ap->num_pages; i++)
- mapping = ap->pages[i]->mapping;
+ struct address_space *mapping;
+ struct inode *inode;
- if (mapping) {
- struct inode *inode = mapping->host;
+ WARN_ON_ONCE(!ap->num_folios);
+ mapping = ap->folios[0]->mapping;
+ inode = mapping->host;
- /*
- * Short read means EOF. If file size is larger, truncate it
- */
- if (!err && num_read < count)
- fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
-
- fuse_invalidate_atime(inode);
- }
+ /*
+ * Short read means EOF. If file size is larger, truncate it
+ */
+ if (!err && num_read < count)
+ fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
- for (i = 0; i < ap->num_pages; i++) {
- struct page *page = ap->pages[i];
+ fuse_invalidate_atime(inode);
- if (!err)
- SetPageUptodate(page);
- else
- SetPageError(page);
- unlock_page(page);
- put_page(page);
+ for (i = 0; i < ap->num_folios; i++) {
+ iomap_finish_folio_read(ap->folios[i], ap->descs[i].offset,
+ ap->descs[i].length, err);
+ folio_put(ap->folios[i]);
}
if (ia->ff)
- fuse_file_put(ia->ff, false, false);
+ fuse_file_put(ia->ff, false);
fuse_io_free(ia);
}
-static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
+static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file,
+ unsigned int count, bool async)
{
struct fuse_file *ff = file->private_data;
struct fuse_mount *fm = ff->fm;
struct fuse_args_pages *ap = &ia->ap;
- loff_t pos = page_offset(ap->pages[0]);
- size_t count = ap->num_pages << PAGE_SHIFT;
+ loff_t pos = folio_pos(ap->folios[0]);
ssize_t res;
int err;
@@ -931,13 +1045,13 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
/* Don't overflow end offset */
if (pos + (count - 1) == LLONG_MAX) {
count--;
- ap->descs[ap->num_pages - 1].length--;
+ ap->descs[ap->num_folios - 1].length--;
}
WARN_ON((loff_t) (pos + count) < 0);
fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
ia->read.attr_ver = fuse_get_attr_version(fm->fc);
- if (fm->fc->async_read) {
+ if (async) {
ia->ff = fuse_file_get(ff);
ap->args.end = fuse_readpages_end;
err = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
@@ -954,36 +1068,20 @@ static void fuse_readahead(struct readahead_control *rac)
{
struct inode *inode = rac->mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
- unsigned int i, max_pages, nr_pages = 0;
+ struct fuse_fill_read_data data = {
+ .file = rac->file,
+ .fc = fc,
+ };
+ struct iomap_read_folio_ctx ctx = {
+ .ops = &fuse_iomap_read_ops,
+ .rac = rac,
+ .read_ctx = &data
+ };
if (fuse_is_bad(inode))
return;
- max_pages = min_t(unsigned int, fc->max_pages,
- fc->max_read / PAGE_SIZE);
-
- for (;;) {
- struct fuse_io_args *ia;
- struct fuse_args_pages *ap;
-
- nr_pages = readahead_count(rac) - nr_pages;
- if (nr_pages > max_pages)
- nr_pages = max_pages;
- if (nr_pages == 0)
- break;
- ia = fuse_io_alloc(NULL, nr_pages);
- if (!ia)
- return;
- ap = &ia->ap;
- nr_pages = __readahead_batch(rac, ap->pages, nr_pages);
- for (i = 0; i < nr_pages; i++) {
- fuse_wait_on_page_writeback(inode,
- readahead_index(rac) + i);
- ap->descs[i].length = PAGE_SIZE;
- }
- ap->num_pages = nr_pages;
- fuse_send_readpages(ia, rac->file);
- }
+ iomap_readahead(&fuse_iomap_ops, &ctx);
}
static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -1033,7 +1131,7 @@ static unsigned int fuse_write_flags(struct kiocb *iocb)
{
unsigned int flags = iocb->ki_filp->f_flags;
- if (iocb->ki_flags & IOCB_DSYNC)
+ if (iocb_is_dsync(iocb))
flags |= O_DSYNC;
if (iocb->ki_flags & IOCB_SYNC)
flags |= O_SYNC;
@@ -1099,8 +1197,8 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
bool short_write;
int err;
- for (i = 0; i < ap->num_pages; i++)
- fuse_wait_on_page_writeback(inode, ap->pages[i]->index);
+ for (i = 0; i < ap->num_folios; i++)
+ folio_wait_writeback(ap->folios[i]);
fuse_write_args_fill(ia, ff, pos, count);
ia->write.in.flags = fuse_write_flags(iocb);
@@ -1114,24 +1212,24 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
short_write = ia->write.out.size < count;
offset = ap->descs[0].offset;
count = ia->write.out.size;
- for (i = 0; i < ap->num_pages; i++) {
- struct page *page = ap->pages[i];
+ for (i = 0; i < ap->num_folios; i++) {
+ struct folio *folio = ap->folios[i];
if (err) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
} else {
- if (count >= PAGE_SIZE - offset)
- count -= PAGE_SIZE - offset;
+ if (count >= folio_size(folio) - offset)
+ count -= folio_size(folio) - offset;
else {
if (short_write)
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
count = 0;
}
offset = 0;
}
- if (ia->write.page_locked && (i == ap->num_pages - 1))
- unlock_page(page);
- put_page(page);
+ if (ia->write.folio_locked && (i == ap->num_folios - 1))
+ folio_unlock(folio);
+ folio_put(folio);
}
return err;
@@ -1140,73 +1238,84 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
struct address_space *mapping,
struct iov_iter *ii, loff_t pos,
- unsigned int max_pages)
+ unsigned int max_folios)
{
struct fuse_args_pages *ap = &ia->ap;
struct fuse_conn *fc = get_fuse_conn(mapping->host);
unsigned offset = pos & (PAGE_SIZE - 1);
size_t count = 0;
- int err;
+ unsigned int num;
+ int err = 0;
+
+ num = min(iov_iter_count(ii), fc->max_write);
ap->args.in_pages = true;
- ap->descs[0].offset = offset;
- do {
+ while (num && ap->num_folios < max_folios) {
size_t tmp;
- struct page *page;
+ struct folio *folio;
pgoff_t index = pos >> PAGE_SHIFT;
- size_t bytes = min_t(size_t, PAGE_SIZE - offset,
- iov_iter_count(ii));
-
- bytes = min_t(size_t, bytes, fc->max_write - count);
+ unsigned int bytes;
+ unsigned int folio_offset;
again:
- err = -EFAULT;
- if (fault_in_iov_iter_readable(ii, bytes))
- break;
-
- err = -ENOMEM;
- page = grab_cache_page_write_begin(mapping, index, 0);
- if (!page)
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
break;
+ }
if (mapping_writably_mapped(mapping))
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
+
+ folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset;
+ bytes = min(folio_size(folio) - folio_offset, num);
- tmp = copy_page_from_iter_atomic(page, offset, bytes, ii);
- flush_dcache_page(page);
+ tmp = copy_folio_from_iter_atomic(folio, folio_offset, bytes, ii);
+ flush_dcache_folio(folio);
if (!tmp) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
+
+ /*
+ * Ensure forward progress by faulting in
+ * while not holding the folio lock:
+ */
+ if (fault_in_iov_iter_readable(ii, bytes)) {
+ err = -EFAULT;
+ break;
+ }
+
goto again;
}
- err = 0;
- ap->pages[ap->num_pages] = page;
- ap->descs[ap->num_pages].length = tmp;
- ap->num_pages++;
+ ap->folios[ap->num_folios] = folio;
+ ap->descs[ap->num_folios].offset = folio_offset;
+ ap->descs[ap->num_folios].length = tmp;
+ ap->num_folios++;
count += tmp;
pos += tmp;
+ num -= tmp;
offset += tmp;
- if (offset == PAGE_SIZE)
+ if (offset == folio_size(folio))
offset = 0;
- /* If we copied full page, mark it uptodate */
- if (tmp == PAGE_SIZE)
- SetPageUptodate(page);
+ /* If we copied full folio, mark it uptodate */
+ if (tmp == folio_size(folio))
+ folio_mark_uptodate(folio);
- if (PageUptodate(page)) {
- unlock_page(page);
+ if (folio_test_uptodate(folio)) {
+ folio_unlock(folio);
} else {
- ia->write.page_locked = true;
+ ia->write.folio_locked = true;
break;
}
- if (!fc->big_writes)
+ if (!fc->big_writes || offset != 0)
break;
- } while (iov_iter_count(ii) && count < fc->max_write &&
- ap->num_pages < max_pages && offset == 0);
+ }
return count > 0 ? count : err;
}
@@ -1220,13 +1329,13 @@ static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
max_pages);
}
-static ssize_t fuse_perform_write(struct kiocb *iocb,
- struct address_space *mapping,
- struct iov_iter *ii, loff_t pos)
+static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii)
{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
+ loff_t pos = iocb->ki_pos;
int err = 0;
ssize_t res = 0;
@@ -1240,8 +1349,8 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
fc->max_pages);
- ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs);
- if (!ap->pages) {
+ ap->folios = fuse_folios_alloc(nr_pages, GFP_KERNEL, &ap->descs);
+ if (!ap->folios) {
err = -ENOMEM;
break;
}
@@ -1263,25 +1372,112 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
err = -EIO;
}
}
- kfree(ap->pages);
+ kfree(ap->folios);
} while (!err && iov_iter_count(ii));
fuse_write_update_attr(inode, pos, res);
clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
- return res > 0 ? res : err;
+ if (!res)
+ return err;
+ iocb->ki_pos += res;
+ return res;
}
+static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode);
+}
+
+/*
+ * @return true if an exclusive lock for direct IO writes is needed
+ */
+static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct fuse_file *ff = file->private_data;
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ /* Server side has to advise that it supports parallel dio writes. */
+ if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES))
+ return true;
+
+ /*
+ * Append will need to know the eventual EOF - always needs an
+ * exclusive lock.
+ */
+ if (iocb->ki_flags & IOCB_APPEND)
+ return true;
+
+ /* shared locks are not allowed with parallel page cache IO */
+ if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state))
+ return true;
+
+ /* Parallel dio beyond EOF is not supported, at least for now. */
+ if (fuse_io_past_eof(iocb, from))
+ return true;
+
+ return false;
+}
+
+static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
+ bool *exclusive)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ *exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
+ if (*exclusive) {
+ inode_lock(inode);
+ } else {
+ inode_lock_shared(inode);
+ /*
+ * New parallal dio allowed only if inode is not in caching
+ * mode and denies new opens in caching mode. This check
+ * should be performed only after taking shared inode lock.
+ * Previous past eof check was without inode lock and might
+ * have raced, so check it again.
+ */
+ if (fuse_io_past_eof(iocb, from) ||
+ fuse_inode_uncached_io_start(fi, NULL) != 0) {
+ inode_unlock_shared(inode);
+ inode_lock(inode);
+ *exclusive = true;
+ }
+ }
+}
+
+static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ if (exclusive) {
+ inode_unlock(inode);
+ } else {
+ /* Allow opens in caching mode after last parallel dio end */
+ fuse_inode_uncached_io_end(fi);
+ inode_unlock_shared(inode);
+ }
+}
+
+static const struct iomap_write_ops fuse_iomap_write_ops = {
+ .read_folio_range = fuse_iomap_read_folio_range,
+};
+
static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
struct address_space *mapping = file->f_mapping;
ssize_t written = 0;
- ssize_t written_buffered = 0;
struct inode *inode = mapping->host;
- ssize_t err;
+ ssize_t err, count;
struct fuse_conn *fc = get_fuse_conn(inode);
- loff_t endbyte = 0;
+ bool writeback = false;
if (fc->writeback_cache) {
/* Update size (EOF optimization) and mode (SUID clearing) */
@@ -1290,65 +1486,42 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (err)
return err;
- if (fc->handle_killpriv_v2 &&
- should_remove_suid(file_dentry(file))) {
- goto writethrough;
- }
-
- return generic_file_write_iter(iocb, from);
+ if (!fc->handle_killpriv_v2 ||
+ !setattr_should_drop_suidgid(idmap, file_inode(file)))
+ writeback = true;
}
-writethrough:
inode_lock(inode);
- /* We can write back this queue in page reclaim */
- current->backing_dev_info = inode_to_bdi(inode);
-
- err = generic_write_checks(iocb, from);
+ err = count = generic_write_checks(iocb, from);
if (err <= 0)
goto out;
- err = file_remove_privs(file);
- if (err)
- goto out;
+ task_io_account_write(count);
- err = file_update_time(file);
+ err = kiocb_modified(iocb);
if (err)
goto out;
if (iocb->ki_flags & IOCB_DIRECT) {
- loff_t pos = iocb->ki_pos;
written = generic_file_direct_write(iocb, from);
if (written < 0 || !iov_iter_count(from))
goto out;
-
- pos += written;
-
- written_buffered = fuse_perform_write(iocb, mapping, from, pos);
- if (written_buffered < 0) {
- err = written_buffered;
- goto out;
- }
- endbyte = pos + written_buffered - 1;
-
- err = filemap_write_and_wait_range(file->f_mapping, pos,
- endbyte);
- if (err)
- goto out;
-
- invalidate_mapping_pages(file->f_mapping,
- pos >> PAGE_SHIFT,
- endbyte >> PAGE_SHIFT);
-
- written += written_buffered;
- iocb->ki_pos = pos + written_buffered;
+ written = direct_write_fallback(iocb, from, written,
+ fuse_perform_write(iocb, from));
+ } else if (writeback) {
+ /*
+ * Use iomap so that we can do granular uptodate reads
+ * and granular dirty tracking for large folios.
+ */
+ written = iomap_file_buffered_write(iocb, from,
+ &fuse_iomap_ops,
+ &fuse_iomap_write_ops,
+ file);
} else {
- written = fuse_perform_write(iocb, mapping, from, iocb->ki_pos);
- if (written >= 0)
- iocb->ki_pos += written;
+ written = fuse_perform_write(iocb, from);
}
out:
- current->backing_dev_info = NULL;
inode_unlock(inode);
if (written > 0)
written = generic_write_sync(iocb, written);
@@ -1358,7 +1531,7 @@ out:
static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
{
- return (unsigned long)ii->iov->iov_base + ii->iov_offset;
+ return (unsigned long)iter_iov(ii)->iov_base + ii->iov_offset;
}
static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
@@ -1369,55 +1542,97 @@ static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
size_t *nbytesp, int write,
- unsigned int max_pages)
+ unsigned int max_pages,
+ bool use_pages_for_kvec_io)
{
+ bool flush_or_invalidate = false;
+ unsigned int nr_pages = 0;
size_t nbytes = 0; /* # bytes already packed in req */
ssize_t ret = 0;
- /* Special case for kernel I/O: can copy directly into the buffer */
+ /* Special case for kernel I/O: can copy directly into the buffer.
+ * However if the implementation of fuse_conn requires pages instead of
+ * pointer (e.g., virtio-fs), use iov_iter_extract_pages() instead.
+ */
if (iov_iter_is_kvec(ii)) {
- unsigned long user_addr = fuse_get_user_addr(ii);
- size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
+ void *user_addr = (void *)fuse_get_user_addr(ii);
- if (write)
- ap->args.in_args[1].value = (void *) user_addr;
- else
- ap->args.out_args[0].value = (void *) user_addr;
+ if (!use_pages_for_kvec_io) {
+ size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
- iov_iter_advance(ii, frag_size);
- *nbytesp = frag_size;
- return 0;
+ if (write)
+ ap->args.in_args[1].value = user_addr;
+ else
+ ap->args.out_args[0].value = user_addr;
+
+ iov_iter_advance(ii, frag_size);
+ *nbytesp = frag_size;
+ return 0;
+ }
+
+ if (is_vmalloc_addr(user_addr)) {
+ ap->args.vmap_base = user_addr;
+ flush_or_invalidate = true;
+ }
}
- while (nbytes < *nbytesp && ap->num_pages < max_pages) {
- unsigned npages;
+ /*
+ * Until there is support for iov_iter_extract_folios(), we have to
+ * manually extract pages using iov_iter_extract_pages() and then
+ * copy that to a folios array.
+ */
+ struct page **pages = kzalloc(max_pages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ while (nbytes < *nbytesp && nr_pages < max_pages) {
+ unsigned nfolios, i;
size_t start;
- ret = iov_iter_get_pages(ii, &ap->pages[ap->num_pages],
- *nbytesp - nbytes,
- max_pages - ap->num_pages,
- &start);
+
+ ret = iov_iter_extract_pages(ii, &pages,
+ *nbytesp - nbytes,
+ max_pages - nr_pages,
+ 0, &start);
if (ret < 0)
break;
- iov_iter_advance(ii, ret);
nbytes += ret;
- ret += start;
- npages = DIV_ROUND_UP(ret, PAGE_SIZE);
+ nfolios = DIV_ROUND_UP(ret + start, PAGE_SIZE);
+
+ for (i = 0; i < nfolios; i++) {
+ struct folio *folio = page_folio(pages[i]);
+ unsigned int offset = start +
+ (folio_page_idx(folio, pages[i]) << PAGE_SHIFT);
+ unsigned int len = min_t(unsigned int, ret, PAGE_SIZE - start);
- ap->descs[ap->num_pages].offset = start;
- fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
+ ap->descs[ap->num_folios].offset = offset;
+ ap->descs[ap->num_folios].length = len;
+ ap->folios[ap->num_folios] = folio;
+ start = 0;
+ ret -= len;
+ ap->num_folios++;
+ }
- ap->num_pages += npages;
- ap->descs[ap->num_pages - 1].length -=
- (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
+ nr_pages += nfolios;
}
+ kfree(pages);
+
+ if (write && flush_or_invalidate)
+ flush_kernel_vmap_range(ap->args.vmap_base, nbytes);
+ ap->args.invalidate_vmap = !write && flush_or_invalidate;
+ ap->args.is_pinned = iov_iter_extract_will_pin(ii);
+ ap->args.user_pages = true;
if (write)
ap->args.in_pages = true;
else
ap->args.out_pages = true;
+out:
*nbytesp = nbytes;
return ret < 0 ? ret : 0;
@@ -1429,7 +1644,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
int write = flags & FUSE_DIO_WRITE;
int cuse = flags & FUSE_DIO_CUSE;
struct file *file = io->iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fm->fc;
size_t nmax = write ? fc->max_write : fc->max_read;
@@ -1441,13 +1657,21 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
int err = 0;
struct fuse_io_args *ia;
unsigned int max_pages;
+ bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO;
max_pages = iov_iter_npages(iter, fc->max_pages);
ia = fuse_io_alloc(io, max_pages);
if (!ia)
return -ENOMEM;
- if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
+ if (fopen_direct_io) {
+ res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
+ if (res) {
+ fuse_io_free(ia);
+ return res;
+ }
+ }
+ if (!cuse && filemap_range_has_writeback(mapping, pos, (pos + count - 1))) {
if (!write)
inode_lock(inode);
fuse_sync_writes(inode);
@@ -1455,14 +1679,22 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
inode_unlock(inode);
}
- io->should_dirty = !write && iter_is_iovec(iter);
+ if (fopen_direct_io && write) {
+ res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
+ if (res) {
+ fuse_io_free(ia);
+ return res;
+ }
+ }
+
+ io->should_dirty = !write && user_backed_iter(iter);
while (count) {
ssize_t nres;
fl_owner_t owner = current->files;
size_t nbytes = min(count, nmax);
err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
- max_pages);
+ max_pages, fc->use_pages_for_kvec_io);
if (err && !nbytes)
break;
@@ -1476,7 +1708,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
}
if (!io->async || nres < 0) {
- fuse_release_user_pages(&ia->ap, io->should_dirty);
+ fuse_release_user_pages(&ia->ap, nres, io->should_dirty);
fuse_io_free(ia);
}
ia = NULL;
@@ -1506,6 +1738,15 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
if (res > 0)
*ppos = pos;
+ if (res > 0 && write && fopen_direct_io) {
+ /*
+ * As in generic_file_direct_write(), invalidate after the
+ * write, to invalidate read-ahead cache that may have competed
+ * with the write.
+ */
+ invalidate_inode_pages2_range(mapping, idx_from, idx_to);
+ }
+
return res > 0 ? res : err;
}
EXPORT_SYMBOL_GPL(fuse_direct_io);
@@ -1530,7 +1771,7 @@ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
ssize_t res;
- if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
+ if (!is_sync_kiocb(iocb)) {
res = fuse_direct_IO(iocb, to);
} else {
struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
@@ -1544,22 +1785,24 @@ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
- struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
ssize_t res;
+ bool exclusive;
- /* Don't allow parallel writes to the same file */
- inode_lock(inode);
+ fuse_dio_lock(iocb, from, &exclusive);
res = generic_write_checks(iocb, from);
if (res > 0) {
- if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
+ task_io_account_write(res);
+ if (!is_sync_kiocb(iocb)) {
res = fuse_direct_IO(iocb, from);
} else {
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
+
res = fuse_direct_io(&io, from, &iocb->ki_pos,
FUSE_DIO_WRITE);
fuse_write_update_attr(inode, iocb->ki_pos, res);
}
}
- inode_unlock(inode);
+ fuse_dio_unlock(iocb, exclusive);
return res;
}
@@ -1576,10 +1819,13 @@ static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (FUSE_IS_DAX(inode))
return fuse_dax_read_iter(iocb, to);
- if (!(ff->open_flags & FOPEN_DIRECT_IO))
- return fuse_cache_read_iter(iocb, to);
- else
+ /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
+ if (ff->open_flags & FOPEN_DIRECT_IO)
return fuse_direct_read_iter(iocb, to);
+ else if (fuse_file_passthrough(ff))
+ return fuse_passthrough_read_iter(iocb, to);
+ else
+ return fuse_cache_read_iter(iocb, to);
}
static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
@@ -1594,44 +1840,69 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (FUSE_IS_DAX(inode))
return fuse_dax_write_iter(iocb, from);
- if (!(ff->open_flags & FOPEN_DIRECT_IO))
+ /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
+ if (ff->open_flags & FOPEN_DIRECT_IO)
+ return fuse_direct_write_iter(iocb, from);
+ else if (fuse_file_passthrough(ff))
+ return fuse_passthrough_write_iter(iocb, from);
+ else
return fuse_cache_write_iter(iocb, from);
+}
+
+static ssize_t fuse_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ struct fuse_file *ff = in->private_data;
+
+ /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
+ if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO))
+ return fuse_passthrough_splice_read(in, ppos, pipe, len, flags);
else
- return fuse_direct_write_iter(iocb, from);
+ return filemap_splice_read(in, ppos, pipe, len, flags);
+}
+
+static ssize_t fuse_splice_write(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
+{
+ struct fuse_file *ff = out->private_data;
+
+ /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
+ if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO))
+ return fuse_passthrough_splice_write(pipe, out, ppos, len, flags);
+ else
+ return iter_file_splice_write(pipe, out, ppos, len, flags);
}
static void fuse_writepage_free(struct fuse_writepage_args *wpa)
{
struct fuse_args_pages *ap = &wpa->ia.ap;
- int i;
if (wpa->bucket)
fuse_sync_bucket_dec(wpa->bucket);
- for (i = 0; i < ap->num_pages; i++)
- __free_page(ap->pages[i]);
-
- if (wpa->ia.ff)
- fuse_file_put(wpa->ia.ff, false, false);
+ fuse_file_put(wpa->ia.ff, false);
- kfree(ap->pages);
+ kfree(ap->folios);
kfree(wpa);
}
-static void fuse_writepage_finish(struct fuse_mount *fm,
- struct fuse_writepage_args *wpa)
+static void fuse_writepage_finish(struct fuse_writepage_args *wpa)
{
struct fuse_args_pages *ap = &wpa->ia.ap;
struct inode *inode = wpa->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
- struct backing_dev_info *bdi = inode_to_bdi(inode);
int i;
- for (i = 0; i < ap->num_pages; i++) {
- dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
- wb_writeout_inc(&bdi->wb);
- }
+ for (i = 0; i < ap->num_folios; i++)
+ /*
+ * Benchmarks showed that ending writeback within the
+ * scope of the fi->lock alleviates xarray lock
+ * contention and noticeably improves performance.
+ */
+ iomap_finish_folio_write(inode, ap->folios[i],
+ ap->descs[i].length);
+
wake_up(&fi->page_waitq);
}
@@ -1641,12 +1912,15 @@ static void fuse_send_writepage(struct fuse_mount *fm,
__releases(fi->lock)
__acquires(fi->lock)
{
- struct fuse_writepage_args *aux, *next;
struct fuse_inode *fi = get_fuse_inode(wpa->inode);
+ struct fuse_args_pages *ap = &wpa->ia.ap;
struct fuse_write_in *inarg = &wpa->ia.write.in;
- struct fuse_args *args = &wpa->ia.ap.args;
- __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE;
- int err;
+ struct fuse_args *args = &ap->args;
+ __u64 data_size = 0;
+ int err, i;
+
+ for (i = 0; i < ap->num_folios; i++)
+ data_size += ap->descs[i].length;
fi->writectr++;
if (inarg->offset + data_size <= size) {
@@ -1677,17 +1951,8 @@ __acquires(fi->lock)
out_free:
fi->writectr--;
- rb_erase(&wpa->writepages_entry, &fi->writepages);
- fuse_writepage_finish(fm, wpa);
+ fuse_writepage_finish(wpa);
spin_unlock(&fi->lock);
-
- /* After fuse_writepage_finish() aux request list is private */
- for (aux = wpa->next; aux; aux = next) {
- next = aux->next;
- aux->next = NULL;
- fuse_writepage_free(aux);
- }
-
fuse_writepage_free(wpa);
spin_lock(&fi->lock);
}
@@ -1715,43 +1980,6 @@ __acquires(fi->lock)
}
}
-static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root,
- struct fuse_writepage_args *wpa)
-{
- pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
- pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1;
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
-
- WARN_ON(!wpa->ia.ap.num_pages);
- while (*p) {
- struct fuse_writepage_args *curr;
- pgoff_t curr_index;
-
- parent = *p;
- curr = rb_entry(parent, struct fuse_writepage_args,
- writepages_entry);
- WARN_ON(curr->inode != wpa->inode);
- curr_index = curr->ia.write.in.offset >> PAGE_SHIFT;
-
- if (idx_from >= curr_index + curr->ia.ap.num_pages)
- p = &(*p)->rb_right;
- else if (idx_to < curr_index)
- p = &(*p)->rb_left;
- else
- return curr;
- }
-
- rb_link_node(&wpa->writepages_entry, parent, p);
- rb_insert_color(&wpa->writepages_entry, root);
- return NULL;
-}
-
-static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
-{
- WARN_ON(fuse_insert_writeback(root, wpa));
-}
-
static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
int error)
{
@@ -1771,44 +1999,8 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
if (!fc->writeback_cache)
fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY);
spin_lock(&fi->lock);
- rb_erase(&wpa->writepages_entry, &fi->writepages);
- while (wpa->next) {
- struct fuse_mount *fm = get_fuse_mount(inode);
- struct fuse_write_in *inarg = &wpa->ia.write.in;
- struct fuse_writepage_args *next = wpa->next;
-
- wpa->next = next->next;
- next->next = NULL;
- next->ia.ff = fuse_file_get(wpa->ia.ff);
- tree_insert(&fi->writepages, next);
-
- /*
- * Skip fuse_flush_writepages() to make it easy to crop requests
- * based on primary request size.
- *
- * 1st case (trivial): there are no concurrent activities using
- * fuse_set/release_nowrite. Then we're on safe side because
- * fuse_flush_writepages() would call fuse_send_writepage()
- * anyway.
- *
- * 2nd case: someone called fuse_set_nowrite and it is waiting
- * now for completion of all in-flight requests. This happens
- * rarely and no more than once per page, so this should be
- * okay.
- *
- * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
- * of fuse_set_nowrite..fuse_release_nowrite section. The fact
- * that fuse_set_nowrite returned implies that all in-flight
- * requests were completed along with all of their secondary
- * requests. Further primary requests are blocked by negative
- * writectr. Hence there cannot be any in-flight requests and
- * no invocations of fuse_writepage_end() while we're in
- * fuse_set_nowrite..fuse_release_nowrite section.
- */
- fuse_send_writepage(fm, next, inarg->offset + inarg->size);
- }
fi->writectr--;
- fuse_writepage_finish(fm, wpa);
+ fuse_writepage_finish(wpa);
spin_unlock(&fi->lock);
fuse_writepage_free(wpa);
}
@@ -1840,21 +2032,10 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
struct fuse_file *ff;
int err;
- /*
- * Inode is always written before the last reference is dropped and
- * hence this should not be reached from reclaim.
- *
- * Writing back the inode from reclaim can deadlock if the request
- * processing itself needs an allocation. Allocations triggering
- * reclaim while serving a request can't be prevented, because it can
- * involve any number of unrelated userspace processes.
- */
- WARN_ON(wbc->for_reclaim);
-
ff = __fuse_write_file_get(fi);
err = fuse_flush_times(inode, ff);
if (ff)
- fuse_file_put(ff, false, false);
+ fuse_file_put(ff, false);
return err;
}
@@ -1867,9 +2048,9 @@ static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
wpa = kzalloc(sizeof(*wpa), GFP_NOFS);
if (wpa) {
ap = &wpa->ia.ap;
- ap->num_pages = 0;
- ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs);
- if (!ap->pages) {
+ ap->num_folios = 0;
+ ap->folios = fuse_folios_alloc(1, GFP_NOFS, &ap->descs);
+ if (!ap->folios) {
kfree(wpa);
wpa = NULL;
}
@@ -1892,455 +2073,244 @@ static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
rcu_read_unlock();
}
-static int fuse_writepage_locked(struct page *page)
+static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio,
+ uint32_t folio_index, loff_t offset, unsigned len)
{
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
+ struct fuse_args_pages *ap = &wpa->ia.ap;
+
+ ap->folios[folio_index] = folio;
+ ap->descs[folio_index].offset = offset;
+ ap->descs[folio_index].length = len;
+}
+
+static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio,
+ size_t offset,
+ struct fuse_file *ff)
+{
+ struct inode *inode = folio->mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_writepage_args *wpa;
struct fuse_args_pages *ap;
- struct page *tmp_page;
- int error = -ENOMEM;
-
- set_page_writeback(page);
wpa = fuse_writepage_args_alloc();
if (!wpa)
- goto err;
- ap = &wpa->ia.ap;
-
- tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
- if (!tmp_page)
- goto err_free;
-
- error = -EIO;
- wpa->ia.ff = fuse_write_file_get(fi);
- if (!wpa->ia.ff)
- goto err_nofile;
+ return NULL;
fuse_writepage_add_to_bucket(fc, wpa);
- fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0);
-
- copy_highpage(tmp_page, page);
+ fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio) + offset, 0);
wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
- wpa->next = NULL;
- ap->args.in_pages = true;
- ap->num_pages = 1;
- ap->pages[0] = tmp_page;
- ap->descs[0].offset = 0;
- ap->descs[0].length = PAGE_SIZE;
- ap->args.end = fuse_writepage_end;
wpa->inode = inode;
+ wpa->ia.ff = ff;
- inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
- inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
-
- spin_lock(&fi->lock);
- tree_insert(&fi->writepages, wpa);
- list_add_tail(&wpa->queue_entry, &fi->queued_writes);
- fuse_flush_writepages(inode);
- spin_unlock(&fi->lock);
-
- end_page_writeback(page);
-
- return 0;
-
-err_nofile:
- __free_page(tmp_page);
-err_free:
- kfree(wpa);
-err:
- mapping_set_error(page->mapping, error);
- end_page_writeback(page);
- return error;
-}
-
-static int fuse_writepage(struct page *page, struct writeback_control *wbc)
-{
- int err;
-
- if (fuse_page_is_writeback(page->mapping->host, page->index)) {
- /*
- * ->writepages() should be called for sync() and friends. We
- * should only get here on direct reclaim and then we are
- * allowed to skip a page which is already in flight
- */
- WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
-
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
- }
-
- err = fuse_writepage_locked(page);
- unlock_page(page);
+ ap = &wpa->ia.ap;
+ ap->args.in_pages = true;
+ ap->args.end = fuse_writepage_end;
- return err;
+ return wpa;
}
struct fuse_fill_wb_data {
struct fuse_writepage_args *wpa;
struct fuse_file *ff;
- struct inode *inode;
- struct page **orig_pages;
- unsigned int max_pages;
+ unsigned int max_folios;
+ /*
+ * nr_bytes won't overflow since fuse_folios_need_send() caps
+ * wb requests to never exceed fc->max_pages (which has an upper bound
+ * of U16_MAX).
+ */
+ unsigned int nr_bytes;
};
-static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
+static bool fuse_pages_realloc(struct fuse_fill_wb_data *data,
+ unsigned int max_pages)
{
struct fuse_args_pages *ap = &data->wpa->ia.ap;
- struct fuse_conn *fc = get_fuse_conn(data->inode);
- struct page **pages;
- struct fuse_page_desc *descs;
- unsigned int npages = min_t(unsigned int,
- max_t(unsigned int, data->max_pages * 2,
- FUSE_DEFAULT_MAX_PAGES_PER_REQ),
- fc->max_pages);
- WARN_ON(npages <= data->max_pages);
-
- pages = fuse_pages_alloc(npages, GFP_NOFS, &descs);
- if (!pages)
+ struct folio **folios;
+ struct fuse_folio_desc *descs;
+ unsigned int nfolios = min_t(unsigned int,
+ max_t(unsigned int, data->max_folios * 2,
+ FUSE_DEFAULT_MAX_PAGES_PER_REQ),
+ max_pages);
+ WARN_ON(nfolios <= data->max_folios);
+
+ folios = fuse_folios_alloc(nfolios, GFP_NOFS, &descs);
+ if (!folios)
return false;
- memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
- memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages);
- kfree(ap->pages);
- ap->pages = pages;
+ memcpy(folios, ap->folios, sizeof(struct folio *) * ap->num_folios);
+ memcpy(descs, ap->descs, sizeof(struct fuse_folio_desc) * ap->num_folios);
+ kfree(ap->folios);
+ ap->folios = folios;
ap->descs = descs;
- data->max_pages = npages;
+ data->max_folios = nfolios;
return true;
}
-static void fuse_writepages_send(struct fuse_fill_wb_data *data)
+static void fuse_writepages_send(struct inode *inode,
+ struct fuse_fill_wb_data *data)
{
struct fuse_writepage_args *wpa = data->wpa;
- struct inode *inode = data->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
- int num_pages = wpa->ia.ap.num_pages;
- int i;
- wpa->ia.ff = fuse_file_get(data->ff);
spin_lock(&fi->lock);
list_add_tail(&wpa->queue_entry, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fi->lock);
-
- for (i = 0; i < num_pages; i++)
- end_page_writeback(data->orig_pages[i]);
}
-/*
- * Check under fi->lock if the page is under writeback, and insert it onto the
- * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
- * one already added for a page at this offset. If there's none, then insert
- * this new request onto the auxiliary list, otherwise reuse the existing one by
- * swapping the new temp page with the old one.
- */
-static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
- struct page *page)
+static bool fuse_folios_need_send(struct fuse_conn *fc, loff_t pos,
+ unsigned len, struct fuse_args_pages *ap,
+ unsigned cur_bytes, bool write)
{
- struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
- struct fuse_writepage_args *tmp;
- struct fuse_writepage_args *old_wpa;
- struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
+ struct folio *prev_folio;
+ struct fuse_folio_desc prev_desc;
+ unsigned bytes = cur_bytes + len;
+ loff_t prev_pos;
+ size_t max_bytes = write ? fc->max_write : fc->max_read;
- WARN_ON(new_ap->num_pages != 0);
- new_ap->num_pages = 1;
-
- spin_lock(&fi->lock);
- old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa);
- if (!old_wpa) {
- spin_unlock(&fi->lock);
- return true;
- }
-
- for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
- pgoff_t curr_index;
-
- WARN_ON(tmp->inode != new_wpa->inode);
- curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT;
- if (curr_index == page->index) {
- WARN_ON(tmp->ia.ap.num_pages != 1);
- swap(tmp->ia.ap.pages[0], new_ap->pages[0]);
- break;
- }
- }
-
- if (!tmp) {
- new_wpa->next = old_wpa->next;
- old_wpa->next = new_wpa;
- }
-
- spin_unlock(&fi->lock);
-
- if (tmp) {
- struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
-
- dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
- wb_writeout_inc(&bdi->wb);
- fuse_writepage_free(new_wpa);
- }
-
- return false;
-}
-
-static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page,
- struct fuse_args_pages *ap,
- struct fuse_fill_wb_data *data)
-{
- WARN_ON(!ap->num_pages);
-
- /*
- * Being under writeback is unlikely but possible. For example direct
- * read to an mmaped fuse file will set the page dirty twice; once when
- * the pages are faulted with get_user_pages(), and then after the read
- * completed.
- */
- if (fuse_page_is_writeback(data->inode, page->index))
- return true;
+ WARN_ON(!ap->num_folios);
/* Reached max pages */
- if (ap->num_pages == fc->max_pages)
+ if ((bytes + PAGE_SIZE - 1) >> PAGE_SHIFT > fc->max_pages)
return true;
- /* Reached max write bytes */
- if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write)
+ if (bytes > max_bytes)
return true;
/* Discontinuity */
- if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)
- return true;
-
- /* Need to grow the pages array? If so, did the expansion fail? */
- if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data))
+ prev_folio = ap->folios[ap->num_folios - 1];
+ prev_desc = ap->descs[ap->num_folios - 1];
+ prev_pos = folio_pos(prev_folio) + prev_desc.offset + prev_desc.length;
+ if (prev_pos != pos)
return true;
return false;
}
-static int fuse_writepages_fill(struct page *page,
- struct writeback_control *wbc, void *_data)
+static ssize_t fuse_iomap_writeback_range(struct iomap_writepage_ctx *wpc,
+ struct folio *folio, u64 pos,
+ unsigned len, u64 end_pos)
{
- struct fuse_fill_wb_data *data = _data;
+ struct fuse_fill_wb_data *data = wpc->wb_ctx;
struct fuse_writepage_args *wpa = data->wpa;
struct fuse_args_pages *ap = &wpa->ia.ap;
- struct inode *inode = data->inode;
+ struct inode *inode = wpc->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
- struct page *tmp_page;
- int err;
+ loff_t offset = offset_in_folio(folio, pos);
+
+ WARN_ON_ONCE(!data);
if (!data->ff) {
- err = -EIO;
data->ff = fuse_write_file_get(fi);
if (!data->ff)
- goto out_unlock;
- }
-
- if (wpa && fuse_writepage_need_send(fc, page, ap, data)) {
- fuse_writepages_send(data);
- data->wpa = NULL;
+ return -EIO;
}
- err = -ENOMEM;
- tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
- if (!tmp_page)
- goto out_unlock;
-
- /*
- * The page must not be redirtied until the writeout is completed
- * (i.e. userspace has sent a reply to the write request). Otherwise
- * there could be more than one temporary page instance for each real
- * page.
- *
- * This is ensured by holding the page lock in page_mkwrite() while
- * checking fuse_page_is_writeback(). We already hold the page lock
- * since clear_page_dirty_for_io() and keep it held until we add the
- * request to the fi->writepages list and increment ap->num_pages.
- * After this fuse_page_is_writeback() will indicate that the page is
- * under writeback, so we can release the page lock.
- */
- if (data->wpa == NULL) {
- err = -ENOMEM;
- wpa = fuse_writepage_args_alloc();
- if (!wpa) {
- __free_page(tmp_page);
- goto out_unlock;
+ if (wpa) {
+ bool send = fuse_folios_need_send(fc, pos, len, ap,
+ data->nr_bytes, true);
+
+ if (!send) {
+ /*
+ * Need to grow the pages array? If so, did the
+ * expansion fail?
+ */
+ send = (ap->num_folios == data->max_folios) &&
+ !fuse_pages_realloc(data, fc->max_pages);
}
- fuse_writepage_add_to_bucket(fc, wpa);
- data->max_pages = 1;
+ if (send) {
+ fuse_writepages_send(inode, data);
+ data->wpa = NULL;
+ data->nr_bytes = 0;
+ }
+ }
+ if (data->wpa == NULL) {
+ wpa = fuse_writepage_args_setup(folio, offset, data->ff);
+ if (!wpa)
+ return -ENOMEM;
+ fuse_file_get(wpa->ia.ff);
+ data->max_folios = 1;
ap = &wpa->ia.ap;
- fuse_write_args_fill(&wpa->ia, data->ff, page_offset(page), 0);
- wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
- wpa->next = NULL;
- ap->args.in_pages = true;
- ap->args.end = fuse_writepage_end;
- ap->num_pages = 0;
- wpa->inode = inode;
}
- set_page_writeback(page);
-
- copy_highpage(tmp_page, page);
- ap->pages[ap->num_pages] = tmp_page;
- ap->descs[ap->num_pages].offset = 0;
- ap->descs[ap->num_pages].length = PAGE_SIZE;
- data->orig_pages[ap->num_pages] = page;
- inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
- inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
+ fuse_writepage_args_page_fill(wpa, folio, ap->num_folios,
+ offset, len);
+ data->nr_bytes += len;
- err = 0;
- if (data->wpa) {
- /*
- * Protected by fi->lock against concurrent access by
- * fuse_page_is_writeback().
- */
- spin_lock(&fi->lock);
- ap->num_pages++;
- spin_unlock(&fi->lock);
- } else if (fuse_writepage_add(wpa, page)) {
+ ap->num_folios++;
+ if (!data->wpa)
data->wpa = wpa;
- } else {
- end_page_writeback(page);
- }
-out_unlock:
- unlock_page(page);
- return err;
+ return len;
}
-static int fuse_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
+static int fuse_iomap_writeback_submit(struct iomap_writepage_ctx *wpc,
+ int error)
{
- struct inode *inode = mapping->host;
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_fill_wb_data data;
- int err;
-
- err = -EIO;
- if (fuse_is_bad(inode))
- goto out;
+ struct fuse_fill_wb_data *data = wpc->wb_ctx;
- data.inode = inode;
- data.wpa = NULL;
- data.ff = NULL;
+ WARN_ON_ONCE(!data);
- err = -ENOMEM;
- data.orig_pages = kcalloc(fc->max_pages,
- sizeof(struct page *),
- GFP_NOFS);
- if (!data.orig_pages)
- goto out;
-
- err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
- if (data.wpa) {
- WARN_ON(!data.wpa->ia.ap.num_pages);
- fuse_writepages_send(&data);
+ if (data->wpa) {
+ WARN_ON(!data->wpa->ia.ap.num_folios);
+ fuse_writepages_send(wpc->inode, data);
}
- if (data.ff)
- fuse_file_put(data.ff, false, false);
- kfree(data.orig_pages);
-out:
- return err;
-}
+ if (data->ff)
+ fuse_file_put(data->ff, false);
-/*
- * It's worthy to make sure that space is reserved on disk for the write,
- * but how to implement it without killing performance need more thinking.
- */
-static int fuse_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- pgoff_t index = pos >> PAGE_SHIFT;
- struct fuse_conn *fc = get_fuse_conn(file_inode(file));
- struct page *page;
- loff_t fsize;
- int err = -ENOMEM;
-
- WARN_ON(!fc->writeback_cache);
-
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- goto error;
-
- fuse_wait_on_page_writeback(mapping->host, page->index);
-
- if (PageUptodate(page) || len == PAGE_SIZE)
- goto success;
- /*
- * Check if the start this page comes after the end of file, in which
- * case the readpage can be optimized away.
- */
- fsize = i_size_read(mapping->host);
- if (fsize <= (pos & PAGE_MASK)) {
- size_t off = pos & ~PAGE_MASK;
- if (off)
- zero_user_segment(page, 0, off);
- goto success;
- }
- err = fuse_do_readpage(file, page);
- if (err)
- goto cleanup;
-success:
- *pagep = page;
- return 0;
-
-cleanup:
- unlock_page(page);
- put_page(page);
-error:
- return err;
+ return error;
}
-static int fuse_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- struct inode *inode = page->mapping->host;
-
- /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
- if (!copied)
- goto unlock;
-
- pos += copied;
- if (!PageUptodate(page)) {
- /* Zero any unwritten bytes at the end of the page */
- size_t endoff = pos & ~PAGE_MASK;
- if (endoff)
- zero_user_segment(page, endoff, PAGE_SIZE);
- SetPageUptodate(page);
- }
+static const struct iomap_writeback_ops fuse_writeback_ops = {
+ .writeback_range = fuse_iomap_writeback_range,
+ .writeback_submit = fuse_iomap_writeback_submit,
+};
- if (pos > inode->i_size)
- i_size_write(inode, pos);
+static int fuse_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_fill_wb_data data = {};
+ struct iomap_writepage_ctx wpc = {
+ .inode = inode,
+ .iomap.type = IOMAP_MAPPED,
+ .wbc = wbc,
+ .ops = &fuse_writeback_ops,
+ .wb_ctx = &data,
+ };
- set_page_dirty(page);
+ if (fuse_is_bad(inode))
+ return -EIO;
-unlock:
- unlock_page(page);
- put_page(page);
+ if (wbc->sync_mode == WB_SYNC_NONE &&
+ fc->num_background >= fc->congestion_threshold)
+ return 0;
- return copied;
+ return iomap_writepages(&wpc);
}
-static int fuse_launder_page(struct page *page)
+static int fuse_launder_folio(struct folio *folio)
{
int err = 0;
- if (clear_page_dirty_for_io(page)) {
- struct inode *inode = page->mapping->host;
+ struct fuse_fill_wb_data data = {};
+ struct iomap_writepage_ctx wpc = {
+ .inode = folio->mapping->host,
+ .iomap.type = IOMAP_MAPPED,
+ .ops = &fuse_writeback_ops,
+ .wb_ctx = &data,
+ };
- /* Serialize with pending writeback for the same page */
- fuse_wait_on_page_writeback(inode, page->index);
- err = fuse_writepage_locked(page);
+ if (folio_clear_dirty_for_io(folio)) {
+ err = iomap_writeback_folio(&wpc, folio);
+ err = fuse_iomap_writeback_submit(&wpc, err);
if (!err)
- fuse_wait_on_page_writeback(inode, page->index);
+ folio_wait_writeback(folio);
}
return err;
}
@@ -2374,17 +2344,17 @@ static void fuse_vma_close(struct vm_area_struct *vma)
*/
static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file);
file_update_time(vmf->vma->vm_file);
- lock_page(page);
- if (page->mapping != inode->i_mapping) {
- unlock_page(page);
+ folio_lock(folio);
+ if (folio->mapping != inode->i_mapping) {
+ folio_unlock(folio);
return VM_FAULT_NOPAGE;
}
- fuse_wait_on_page_writeback(inode, page->index);
+ folio_wait_writeback(folio);
return VM_FAULT_LOCKED;
}
@@ -2398,19 +2368,53 @@ static const struct vm_operations_struct fuse_file_vm_ops = {
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fuse_file *ff = file->private_data;
+ struct fuse_conn *fc = ff->fm->fc;
+ struct inode *inode = file_inode(file);
+ int rc;
/* DAX mmap is superior to direct_io mmap */
- if (FUSE_IS_DAX(file_inode(file)))
+ if (FUSE_IS_DAX(inode))
return fuse_dax_mmap(file, vma);
+ /*
+ * If inode is in passthrough io mode, because it has some file open
+ * in passthrough mode, either mmap to backing file or fail mmap,
+ * because mixing cached mmap and passthrough io mode is not allowed.
+ */
+ if (fuse_file_passthrough(ff))
+ return fuse_passthrough_mmap(file, vma);
+ else if (fuse_inode_backing(get_fuse_inode(inode)))
+ return -ENODEV;
+
+ /*
+ * FOPEN_DIRECT_IO handling is special compared to O_DIRECT,
+ * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP.
+ */
if (ff->open_flags & FOPEN_DIRECT_IO) {
- /* Can't provide the coherency needed for MAP_SHARED */
- if (vma->vm_flags & VM_MAYSHARE)
+ /*
+ * Can't provide the coherency needed for MAP_SHARED
+ * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
+ */
+ if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap)
return -ENODEV;
invalidate_inode_pages2(file->f_mapping);
- return generic_file_mmap(file, vma);
+ if (!(vma->vm_flags & VM_MAYSHARE)) {
+ /* MAP_PRIVATE */
+ return generic_file_mmap(file, vma);
+ }
+
+ /*
+ * First mmap of direct_io file enters caching inode io mode.
+ * Also waits for parallel dio writers to go into serial mode
+ * (exclusive instead of shared lock).
+ * After first mmap, the inode stays in caching io mode until
+ * the direct_io file release.
+ */
+ rc = fuse_file_cached_io_open(inode, ff);
+ if (rc)
+ return rc;
}
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
@@ -2443,14 +2447,14 @@ static int convert_fuse_file_lock(struct fuse_conn *fc,
* translate it into the caller's pid namespace.
*/
rcu_read_lock();
- fl->fl_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
+ fl->c.flc_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
rcu_read_unlock();
break;
default:
return -EIO;
}
- fl->fl_type = ffl->type;
+ fl->c.flc_type = ffl->type;
return 0;
}
@@ -2464,10 +2468,10 @@ static void fuse_lk_fill(struct fuse_args *args, struct file *file,
memset(inarg, 0, sizeof(*inarg));
inarg->fh = ff->fh;
- inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
+ inarg->owner = fuse_lock_owner_id(fc, fl->c.flc_owner);
inarg->lk.start = fl->fl_start;
inarg->lk.end = fl->fl_end;
- inarg->lk.type = fl->fl_type;
+ inarg->lk.type = fl->c.flc_type;
inarg->lk.pid = pid;
if (flock)
inarg->lk_flags |= FUSE_LK_FLOCK;
@@ -2504,8 +2508,8 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
struct fuse_mount *fm = get_fuse_mount(inode);
FUSE_ARGS(args);
struct fuse_lk_in inarg;
- int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
- struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL;
+ int opcode = (fl->c.flc_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
+ struct pid *pid = fl->c.flc_type != F_UNLCK ? task_tgid(current) : NULL;
pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns);
int err;
@@ -2514,10 +2518,6 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
return -ENOLCK;
}
- /* Unlock on close is handled by the flush method */
- if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX)
- return 0;
-
fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
err = fuse_simple_request(fm, &args);
@@ -2805,7 +2805,7 @@ static void fuse_do_truncate(struct file *file)
attr.ia_file = file;
attr.ia_valid |= ATTR_FILE;
- fuse_do_setattr(file_dentry(file), &attr, file);
+ fuse_do_setattr(file_mnt_idmap(file), file_dentry(file), &attr, file);
}
static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
@@ -2901,6 +2901,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
if (iov_iter_rw(iter) == WRITE) {
fuse_write_update_attr(inode, pos, ret);
+ /* For extending writes we already hold exclusive lock */
if (ret < 0 && offset + count > i_size)
fuse_do_truncate(file);
}
@@ -2910,7 +2911,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
{
- int err = filemap_write_and_wait_range(inode->i_mapping, start, -1);
+ int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX);
if (!err)
fuse_sync_writes(inode);
@@ -2933,11 +2934,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
.mode = mode
};
int err;
- bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
- (mode & (FALLOC_FL_PUNCH_HOLE |
- FALLOC_FL_ZERO_RANGE));
-
- bool block_faults = FUSE_IS_DAX(inode) && lock_inode;
+ bool block_faults = FUSE_IS_DAX(inode) &&
+ (!(mode & FALLOC_FL_KEEP_SIZE) ||
+ (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)));
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_ZERO_RANGE))
@@ -2946,22 +2945,20 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
if (fm->fc->no_fallocate)
return -EOPNOTSUPP;
- if (lock_inode) {
- inode_lock(inode);
- if (block_faults) {
- filemap_invalidate_lock(inode->i_mapping);
- err = fuse_dax_break_layouts(inode, 0, 0);
- if (err)
- goto out;
- }
+ inode_lock(inode);
+ if (block_faults) {
+ filemap_invalidate_lock(inode->i_mapping);
+ err = fuse_dax_break_layouts(inode, 0, -1);
+ if (err)
+ goto out;
+ }
- if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
- loff_t endbyte = offset + length - 1;
+ if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
+ loff_t endbyte = offset + length - 1;
- err = fuse_writeback_range(inode, offset, endbyte);
- if (err)
- goto out;
- }
+ err = fuse_writeback_range(inode, offset, endbyte);
+ if (err)
+ goto out;
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -2971,6 +2968,10 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
goto out;
}
+ err = file_modified(file);
+ if (err)
+ goto out;
+
if (!(mode & FALLOC_FL_KEEP_SIZE))
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
@@ -3005,8 +3006,7 @@ out:
if (block_faults)
filemap_invalidate_unlock(inode->i_mapping);
- if (lock_inode)
- inode_unlock(inode);
+ inode_unlock(inode);
fuse_flush_time_update(inode);
@@ -3035,6 +3035,8 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
.flags = flags
};
struct fuse_write_out outarg;
+ struct fuse_copy_file_range_out outarg_64;
+ u64 bytes_copied;
ssize_t err;
/* mark unstable when write-back is not used, and file_out gets
* extended */
@@ -3084,30 +3086,51 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
if (is_unstable)
set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
- args.opcode = FUSE_COPY_FILE_RANGE;
+ args.opcode = FUSE_COPY_FILE_RANGE_64;
args.nodeid = ff_in->nodeid;
args.in_numargs = 1;
args.in_args[0].size = sizeof(inarg);
args.in_args[0].value = &inarg;
args.out_numargs = 1;
- args.out_args[0].size = sizeof(outarg);
- args.out_args[0].value = &outarg;
+ args.out_args[0].size = sizeof(outarg_64);
+ args.out_args[0].value = &outarg_64;
+ if (fc->no_copy_file_range_64) {
+fallback:
+ /* Fall back to old op that can't handle large copy length */
+ args.opcode = FUSE_COPY_FILE_RANGE;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
+ inarg.len = len = min_t(size_t, len, UINT_MAX & PAGE_MASK);
+ }
err = fuse_simple_request(fm, &args);
if (err == -ENOSYS) {
- fc->no_copy_file_range = 1;
- err = -EOPNOTSUPP;
+ if (fc->no_copy_file_range_64) {
+ fc->no_copy_file_range = 1;
+ err = -EOPNOTSUPP;
+ } else {
+ fc->no_copy_file_range_64 = 1;
+ goto fallback;
+ }
}
if (err)
goto out;
+ bytes_copied = fc->no_copy_file_range_64 ?
+ outarg.size : outarg_64.bytes_copied;
+
+ if (bytes_copied > len) {
+ err = -EIO;
+ goto out;
+ }
+
truncate_inode_pages_range(inode_out->i_mapping,
ALIGN_DOWN(pos_out, PAGE_SIZE),
- ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
+ ALIGN(pos_out + bytes_copied, PAGE_SIZE) - 1);
file_update_time(file_out);
- fuse_write_update_attr(inode_out, pos_out + outarg.size, outarg.size);
+ fuse_write_update_attr(inode_out, pos_out + bytes_copied, bytes_copied);
- err = outarg.size;
+ err = bytes_copied;
out:
if (is_unstable)
clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
@@ -3130,8 +3153,8 @@ static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off,
len, flags);
if (ret == -EOPNOTSUPP || ret == -EXDEV)
- ret = generic_copy_file_range(src_file, src_off, dst_file,
- dst_off, len, flags);
+ ret = splice_copy_file_range(src_file, src_off, dst_file,
+ dst_off, len);
return ret;
}
@@ -3147,8 +3170,8 @@ static const struct file_operations fuse_file_operations = {
.lock = fuse_file_lock,
.get_unmapped_area = thp_get_unmapped_area,
.flock = fuse_file_flock,
- .splice_read = generic_file_splice_read,
- .splice_write = iter_file_splice_write,
+ .splice_read = fuse_splice_read,
+ .splice_write = fuse_splice_write,
.unlocked_ioctl = fuse_file_ioctl,
.compat_ioctl = fuse_file_compat_ioctl,
.poll = fuse_file_poll,
@@ -3157,31 +3180,36 @@ static const struct file_operations fuse_file_operations = {
};
static const struct address_space_operations fuse_file_aops = {
- .readpage = fuse_readpage,
+ .read_folio = fuse_read_folio,
.readahead = fuse_readahead,
- .writepage = fuse_writepage,
.writepages = fuse_writepages,
- .launder_page = fuse_launder_page,
- .set_page_dirty = __set_page_dirty_nobuffers,
+ .launder_folio = fuse_launder_folio,
+ .dirty_folio = iomap_dirty_folio,
+ .release_folio = iomap_release_folio,
+ .invalidate_folio = iomap_invalidate_folio,
+ .is_partially_uptodate = iomap_is_partially_uptodate,
+ .migrate_folio = filemap_migrate_folio,
.bmap = fuse_bmap,
.direct_IO = fuse_direct_IO,
- .write_begin = fuse_write_begin,
- .write_end = fuse_write_end,
};
-void fuse_init_file_inode(struct inode *inode)
+void fuse_init_file_inode(struct inode *inode, unsigned int flags)
{
struct fuse_inode *fi = get_fuse_inode(inode);
+ struct fuse_conn *fc = get_fuse_conn(inode);
inode->i_fop = &fuse_file_operations;
inode->i_data.a_ops = &fuse_file_aops;
+ if (fc->writeback_cache)
+ mapping_set_writeback_may_deadlock_on_reclaim(&inode->i_data);
INIT_LIST_HEAD(&fi->write_files);
INIT_LIST_HEAD(&fi->queued_writes);
fi->writectr = 0;
+ fi->iocachectr = 0;
init_waitqueue_head(&fi->page_waitq);
- fi->writepages = RB_ROOT;
+ init_waitqueue_head(&fi->direct_io_waitq);
if (IS_ENABLED(CONFIG_FUSE_DAX))
- fuse_dax_inode_init(inode);
+ fuse_dax_inode_init(inode, flags);
}
diff --git a/fs/fuse/fuse_dev_i.h b/fs/fuse/fuse_dev_i.h
new file mode 100644
index 000000000000..134bf44aff0d
--- /dev/null
+++ b/fs/fuse/fuse_dev_i.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * FUSE: Filesystem in Userspace
+ * Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
+ */
+#ifndef _FS_FUSE_DEV_I_H
+#define _FS_FUSE_DEV_I_H
+
+#include <linux/types.h>
+
+/* Ordinary requests have even IDs, while interrupts IDs are odd */
+#define FUSE_INT_REQ_BIT (1ULL << 0)
+#define FUSE_REQ_ID_STEP (1ULL << 1)
+
+extern struct wait_queue_head fuse_dev_waitq;
+
+struct fuse_arg;
+struct fuse_args;
+struct fuse_pqueue;
+struct fuse_req;
+struct fuse_iqueue;
+struct fuse_forget_link;
+
+struct fuse_copy_state {
+ struct fuse_req *req;
+ struct iov_iter *iter;
+ struct pipe_buffer *pipebufs;
+ struct pipe_buffer *currbuf;
+ struct pipe_inode_info *pipe;
+ unsigned long nr_segs;
+ struct page *pg;
+ unsigned int len;
+ unsigned int offset;
+ bool write:1;
+ bool move_folios:1;
+ bool is_uring:1;
+ struct {
+ unsigned int copied_sz; /* copied size into the user buffer */
+ } ring;
+};
+
+#define FUSE_DEV_SYNC_INIT ((struct fuse_dev *) 1)
+#define FUSE_DEV_PTR_MASK (~1UL)
+
+static inline struct fuse_dev *__fuse_get_dev(struct file *file)
+{
+ /*
+ * Lockless access is OK, because file->private data is set
+ * once during mount and is valid until the file is released.
+ */
+ struct fuse_dev *fud = READ_ONCE(file->private_data);
+
+ return (typeof(fud)) ((unsigned long) fud & FUSE_DEV_PTR_MASK);
+}
+
+struct fuse_dev *fuse_get_dev(struct file *file);
+
+unsigned int fuse_req_hash(u64 unique);
+struct fuse_req *fuse_request_find(struct fuse_pqueue *fpq, u64 unique);
+
+void fuse_dev_end_requests(struct list_head *head);
+
+void fuse_copy_init(struct fuse_copy_state *cs, bool write,
+ struct iov_iter *iter);
+void fuse_copy_finish(struct fuse_copy_state *cs);
+int fuse_copy_args(struct fuse_copy_state *cs, unsigned int numargs,
+ unsigned int argpages, struct fuse_arg *args,
+ int zeroing);
+int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
+ unsigned int nbytes);
+void fuse_dev_queue_forget(struct fuse_iqueue *fiq,
+ struct fuse_forget_link *forget);
+void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req);
+bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock);
+
+bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list);
+
+#endif
+
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 198637b41e19..7f16049387d1 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -35,18 +35,45 @@
/** Default max number of pages that can be used in a single read request */
#define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32
-/** Maximum of max_pages received in init_out */
-#define FUSE_MAX_MAX_PAGES 256
-
/** Bias for fi->writectr, meaning new writepages must not be sent */
#define FUSE_NOWRITE INT_MIN
-/** It could be as large as PATH_MAX, but would that have any uses? */
-#define FUSE_NAME_MAX 1024
+/** Maximum length of a filename, not including terminating null */
+
+/* maximum, small enough for FUSE_MIN_READ_BUFFER*/
+#define FUSE_NAME_LOW_MAX 1024
+/* maximum, but needs a request buffer > FUSE_MIN_READ_BUFFER */
+#define FUSE_NAME_MAX (PATH_MAX - 1)
/** Number of dentries for each connection in the control filesystem */
#define FUSE_CTL_NUM_DENTRIES 5
+/* Frequency (in seconds) of request timeout checks, if opted into */
+#define FUSE_TIMEOUT_TIMER_FREQ 15
+
+/** Frequency (in jiffies) of request timeout checks, if opted into */
+extern const unsigned long fuse_timeout_timer_freq;
+
+/*
+ * Dentries invalidation workqueue period, in seconds. The value of this
+ * parameter shall be >= FUSE_DENTRY_INVAL_FREQ_MIN seconds, or 0 (zero), in
+ * which case no workqueue will be created.
+ */
+extern unsigned inval_wq __read_mostly;
+
+/** Maximum of max_pages received in init_out */
+extern unsigned int fuse_max_pages_limit;
+/*
+ * Default timeout (in seconds) for the server to reply to a request
+ * before the connection is aborted, if no timeout was specified on mount.
+ */
+extern unsigned int fuse_default_req_timeout;
+/*
+ * Max timeout (in seconds) for the server to reply to a request before
+ * the connection is aborted.
+ */
+extern unsigned int fuse_max_req_timeout;
+
/** List of active connections */
extern struct list_head fuse_conn_list;
@@ -54,8 +81,8 @@ extern struct list_head fuse_conn_list;
extern struct mutex fuse_mutex;
/** Module parameters */
-extern unsigned max_user_bgreq;
-extern unsigned max_user_congthresh;
+extern unsigned int max_user_bgreq;
+extern unsigned int max_user_congthresh;
/* One forget request */
struct fuse_forget_link {
@@ -63,6 +90,29 @@ struct fuse_forget_link {
struct fuse_forget_link *next;
};
+/* Submount lookup tracking */
+struct fuse_submount_lookup {
+ /** Refcount */
+ refcount_t count;
+
+ /** Unique ID, which identifies the inode between userspace
+ * and kernel */
+ u64 nodeid;
+
+ /** The request used for sending the FORGET message */
+ struct fuse_forget_link *forget;
+};
+
+/** Container for data related to mapping to backing file */
+struct fuse_backing {
+ struct file *file;
+ struct cred *cred;
+
+ /** refcount */
+ refcount_t count;
+ struct rcu_head rcu;
+};
+
/** FUSE inode */
struct fuse_inode {
/** Inode data */
@@ -88,6 +138,9 @@ struct fuse_inode {
preserve the original mode */
umode_t orig_i_mode;
+ /* Cache birthtime */
+ struct timespec64 i_btime;
+
/** 64 bit inode number */
u64 orig_ino;
@@ -95,7 +148,7 @@ struct fuse_inode {
u64 attr_version;
union {
- /* Write related fields (regular file only) */
+ /* read/write io cache (regular file only) */
struct {
/* Files usable in writepage. Protected by fi->lock */
struct list_head write_files;
@@ -107,11 +160,14 @@ struct fuse_inode {
* (FUSE_NOWRITE) means more writes are blocked */
int writectr;
+ /** Number of files/maps using page cache */
+ int iocachectr;
+
/* Waitq for writepage completion */
wait_queue_head_t page_waitq;
- /* List of writepage requestst (pending or sent) */
- struct rb_root writepages;
+ /* waitq for direct-io completion */
+ wait_queue_head_t direct_io_waitq;
};
/* readdir cache (directory only) */
@@ -155,6 +211,18 @@ struct fuse_inode {
*/
struct fuse_inode_dax *dax;
#endif
+ /** Submount specific lookup tracking */
+ struct fuse_submount_lookup *submount_lookup;
+#ifdef CONFIG_FUSE_PASSTHROUGH
+ /** Reference to backing file in passthrough mode */
+ struct fuse_backing *fb;
+#endif
+
+ /*
+ * The underlying inode->i_blkbits value will not be modified,
+ * so preserve the blocksize specified by the server.
+ */
+ u8 cached_i_blkbits;
};
/** FUSE inode state bits */
@@ -167,19 +235,28 @@ enum {
FUSE_I_SIZE_UNSTABLE,
/* Bad inode */
FUSE_I_BAD,
+ /* Has btime */
+ FUSE_I_BTIME,
+ /* Wants or already has page cache IO */
+ FUSE_I_CACHE_IO_MODE,
+ /*
+ * Client has exclusive access to the inode, either because fs is local
+ * or the fuse server has an exclusive "lease" on distributed fs
+ */
+ FUSE_I_EXCLUSIVE,
};
struct fuse_conn;
struct fuse_mount;
-struct fuse_release_args;
+union fuse_file_args;
/** FUSE specific file data */
struct fuse_file {
/** Fuse connection for this file */
struct fuse_mount *fm;
- /* Argument space reserved for release */
- struct fuse_release_args *release_args;
+ /* Argument space reserved for open/release */
+ union fuse_file_args *args;
/** Kernel file handle guaranteed to be unique */
u64 kh;
@@ -201,12 +278,6 @@ struct fuse_file {
/* Readdir related */
struct {
- /*
- * Protects below fields against (crazy) parallel readdir on
- * same open file. Uncontended in the normal case.
- */
- struct mutex lock;
-
/* Dir stream position */
loff_t pos;
@@ -224,6 +295,15 @@ struct fuse_file {
/** Wait queue head for poll */
wait_queue_head_t poll_wait;
+ /** Does file hold a fi->iocachectr refcount? */
+ enum { IOM_NONE, IOM_CACHED, IOM_UNCACHED } iomode;
+
+#ifdef CONFIG_FUSE_PASSTHROUGH
+ /** Reference to backing file in passthrough mode */
+ struct file *passthrough;
+ const struct cred *cred;
+#endif
+
/** Has flock been performed on this file? */
bool flock:1;
};
@@ -240,8 +320,8 @@ struct fuse_arg {
void *value;
};
-/** FUSE page descriptor */
-struct fuse_page_desc {
+/** FUSE folio descriptor */
+struct fuse_folio_desc {
unsigned int length;
unsigned int offset;
};
@@ -249,27 +329,47 @@ struct fuse_page_desc {
struct fuse_args {
uint64_t nodeid;
uint32_t opcode;
- unsigned short in_numargs;
- unsigned short out_numargs;
+ uint8_t in_numargs;
+ uint8_t out_numargs;
+ uint8_t ext_idx;
bool force:1;
bool noreply:1;
bool nocreds:1;
bool in_pages:1;
bool out_pages:1;
+ bool user_pages:1;
bool out_argvar:1;
bool page_zeroing:1;
bool page_replace:1;
bool may_block:1;
- struct fuse_in_arg in_args[3];
+ bool is_ext:1;
+ bool is_pinned:1;
+ bool invalidate_vmap:1;
+ struct fuse_in_arg in_args[4];
struct fuse_arg out_args[2];
void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error);
+ /* Used for kvec iter backed by vmalloc address */
+ void *vmap_base;
};
struct fuse_args_pages {
struct fuse_args args;
- struct page **pages;
- struct fuse_page_desc *descs;
- unsigned int num_pages;
+ struct folio **folios;
+ struct fuse_folio_desc *descs;
+ unsigned int num_folios;
+};
+
+struct fuse_release_args {
+ struct fuse_args args;
+ struct fuse_release_in inarg;
+ struct inode *inode;
+};
+
+union fuse_file_args {
+ /* Used during open() */
+ struct fuse_open_out open_outarg;
+ /* Used during release() */
+ struct fuse_release_args release_args;
};
#define FUSE_ARGS(args) struct fuse_args args = {}
@@ -313,6 +413,7 @@ struct fuse_io_priv {
* FR_FINISHED: request is finished
* FR_PRIVATE: request is on private list
* FR_ASYNC: request is asynchronous
+ * FR_URING: request is handled through fuse-io-uring
*/
enum fuse_req_flag {
FR_ISREPLY,
@@ -327,6 +428,7 @@ enum fuse_req_flag {
FR_FINISHED,
FR_PRIVATE,
FR_ASYNC,
+ FR_URING,
};
/**
@@ -373,6 +475,13 @@ struct fuse_req {
/** fuse_mount this request belongs to */
struct fuse_mount *fm;
+
+#ifdef CONFIG_FUSE_IO_URING
+ void *ring_entry;
+ void *ring_queue;
+#endif
+ /** When (in jiffies) the request was created */
+ unsigned long create_time;
};
struct fuse_iqueue;
@@ -387,22 +496,19 @@ struct fuse_iqueue;
*/
struct fuse_iqueue_ops {
/**
- * Signal that a forget has been queued
+ * Send one forget
*/
- void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq)
- __releases(fiq->lock);
+ void (*send_forget)(struct fuse_iqueue *fiq, struct fuse_forget_link *link);
/**
- * Signal that an INTERRUPT request has been queued
+ * Send interrupt for request
*/
- void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq)
- __releases(fiq->lock);
+ void (*send_interrupt)(struct fuse_iqueue *fiq, struct fuse_req *req);
/**
- * Signal that a request has been queued
+ * Send one request
*/
- void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq)
- __releases(fiq->lock);
+ void (*send_req)(struct fuse_iqueue *fiq, struct fuse_req *req);
/**
* Clean up when fuse_iqueue is destroyed
@@ -480,6 +586,18 @@ struct fuse_dev {
struct list_head entry;
};
+enum fuse_dax_mode {
+ FUSE_DAX_INODE_DEFAULT, /* default */
+ FUSE_DAX_ALWAYS, /* "-o dax=always" */
+ FUSE_DAX_NEVER, /* "-o dax=never" */
+ FUSE_DAX_INODE_USER, /* "-o dax=inode" */
+};
+
+static inline bool fuse_is_inode_dax_mode(enum fuse_dax_mode mode)
+{
+ return mode == FUSE_DAX_INODE_DEFAULT || mode == FUSE_DAX_INODE_USER;
+}
+
struct fuse_fs_context {
int fd;
struct file *file;
@@ -497,7 +615,7 @@ struct fuse_fs_context {
bool no_control:1;
bool no_force_umount:1;
bool legacy_opts_show:1;
- bool dax:1;
+ enum fuse_dax_mode dax_mode;
unsigned int max_read;
unsigned int blksize;
const char *subtype;
@@ -533,6 +651,11 @@ struct fuse_conn {
/** Number of fuse_dev's */
atomic_t dev_count;
+ /** Current epoch for up-to-date dentries */
+ atomic_t epoch;
+
+ struct work_struct epoch_work;
+
struct rcu_head rcu;
/** The user id for this mount */
@@ -614,7 +737,7 @@ struct fuse_conn {
/** Connection successful. Only set in INIT */
unsigned conn_init:1;
- /** Do readpages asynchronously? Only set in INIT */
+ /** Do readahead asynchronously? Only set in INIT */
unsigned async_read:1;
/** Return an unique read error after abort. Only set in INIT */
@@ -747,6 +870,9 @@ struct fuse_conn {
/** Does the filesystem support copy_file_range? */
unsigned no_copy_file_range:1;
+ /** Does the filesystem support copy_file_range_64? */
+ unsigned no_copy_file_range_64:1;
+
/* Send DESTROY request */
unsigned int destroy:1;
@@ -765,30 +891,66 @@ struct fuse_conn {
/* Propagate syncfs() to server */
unsigned int sync_fs:1;
+ /* Initialize security xattrs when creating a new inode */
+ unsigned int init_security:1;
+
+ /* Add supplementary group info when creating a new inode */
+ unsigned int create_supp_group:1;
+
+ /* Does the filesystem support per inode DAX? */
+ unsigned int inode_dax:1;
+
+ /* Is tmpfile not implemented by fs? */
+ unsigned int no_tmpfile:1;
+
+ /* Relax restrictions to allow shared mmap in FOPEN_DIRECT_IO mode */
+ unsigned int direct_io_allow_mmap:1;
+
+ /* Is statx not implemented by fs? */
+ unsigned int no_statx:1;
+
+ /** Passthrough support for read/write IO */
+ unsigned int passthrough:1;
+
+ /* Use pages instead of pointer for kernel I/O */
+ unsigned int use_pages_for_kvec_io:1;
+
+ /* Is link not implemented by fs? */
+ unsigned int no_link:1;
+
+ /* Is synchronous FUSE_INIT allowed? */
+ unsigned int sync_init:1;
+
+ /* Use io_uring for communication */
+ unsigned int io_uring;
+
+ /** Maximum stack depth for passthrough backing files */
+ int max_stack_depth;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
/** Negotiated minor version */
unsigned minor;
- /** Entry on the fuse_mount_list */
+ /** Entry on the fuse_conn_list */
struct list_head entry;
/** Device ID from the root super block */
dev_t dev;
- /** Dentries in the control filesystem */
- struct dentry *ctl_dentry[FUSE_CTL_NUM_DENTRIES];
-
- /** number of dentries used in the above array */
- int ctl_ndents;
-
/** Key for lock owner ID scrambling */
u32 scramble_key[4];
/** Version counter for attribute changes */
atomic64_t attr_version;
+ /** Version counter for evict inode */
+ atomic64_t evict_ctr;
+
+ /* maximum file name length */
+ u32 name_max;
+
/** Called on final put */
void (*release)(struct fuse_conn *);
@@ -802,6 +964,9 @@ struct fuse_conn {
struct list_head devices;
#ifdef CONFIG_FUSE_DAX
+ /* Dax mode */
+ enum fuse_dax_mode dax_mode;
+
/* Dax specific conn data, non-NULL if DAX is enabled */
struct fuse_conn_dax *dax;
#endif
@@ -811,6 +976,25 @@ struct fuse_conn {
/* New writepages go into this bucket */
struct fuse_sync_bucket __rcu *curr_bucket;
+
+#ifdef CONFIG_FUSE_PASSTHROUGH
+ /** IDR for backing files ids */
+ struct idr backing_files_map;
+#endif
+
+#ifdef CONFIG_FUSE_IO_URING
+ /** uring connection information*/
+ struct fuse_ring *ring;
+#endif
+
+ /** Only used if the connection opts into request timeouts */
+ struct {
+ /* Worker for checking if any requests have timed out */
+ struct delayed_work work;
+
+ /* Request timeout (in jiffies). 0 = no timeout */
+ unsigned int req_timeout;
+ } timeout;
};
/*
@@ -832,8 +1016,22 @@ struct fuse_mount {
/* Entry on fc->mounts */
struct list_head fc_entry;
+ struct rcu_head rcu;
};
+/*
+ * Empty header for FUSE opcodes without specific header needs.
+ * Used as a placeholder in args->in_args[0] for consistency
+ * across all FUSE operations, simplifying request handling.
+ */
+struct fuse_zero_header {};
+
+static inline void fuse_set_zero_arg0(struct fuse_args *args)
+{
+ args->in_args[0].size = sizeof(struct fuse_zero_header);
+ args->in_args[0].value = NULL;
+}
+
static inline struct fuse_mount *get_fuse_mount_super(struct super_block *sb)
{
return sb->s_fs_info;
@@ -854,7 +1052,7 @@ static inline struct fuse_conn *get_fuse_conn(struct inode *inode)
return get_fuse_mount_super(inode->i_sb)->fc;
}
-static inline struct fuse_inode *get_fuse_inode(struct inode *inode)
+static inline struct fuse_inode *get_fuse_inode(const struct inode *inode)
{
return container_of(inode, struct fuse_inode, inode);
}
@@ -874,6 +1072,11 @@ static inline u64 fuse_get_attr_version(struct fuse_conn *fc)
return atomic64_read(&fc->attr_version);
}
+static inline u64 fuse_get_evict_ctr(struct fuse_conn *fc)
+{
+ return atomic64_read(&fc->evict_ctr);
+}
+
static inline bool fuse_stale_inode(const struct inode *inode, int generation,
struct fuse_attr *attr)
{
@@ -883,7 +1086,6 @@ static inline bool fuse_stale_inode(const struct inode *inode, int generation,
static inline void fuse_make_bad(struct inode *inode)
{
- remove_inode_hash(inode);
set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state);
}
@@ -892,25 +1094,32 @@ static inline bool fuse_is_bad(struct inode *inode)
return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state));
}
-static inline struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags,
- struct fuse_page_desc **desc)
+static inline bool fuse_inode_is_exclusive(const struct inode *inode)
{
- struct page **pages;
+ const struct fuse_inode *fi = get_fuse_inode(inode);
- pages = kzalloc(npages * (sizeof(struct page *) +
- sizeof(struct fuse_page_desc)), flags);
- *desc = (void *) (pages + npages);
+ return test_bit(FUSE_I_EXCLUSIVE, &fi->state);
+}
- return pages;
+static inline struct folio **fuse_folios_alloc(unsigned int nfolios, gfp_t flags,
+ struct fuse_folio_desc **desc)
+{
+ struct folio **folios;
+
+ folios = kzalloc(nfolios * (sizeof(struct folio *) +
+ sizeof(struct fuse_folio_desc)), flags);
+ *desc = (void *) (folios + nfolios);
+
+ return folios;
}
-static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs,
- unsigned int index,
- unsigned int nr_pages)
+static inline void fuse_folio_descs_length_init(struct fuse_folio_desc *descs,
+ unsigned int index,
+ unsigned int nr_folios)
{
int i;
- for (i = index; i < index + nr_pages; i++)
+ for (i = index; i < index + nr_folios; i++)
descs[i].length = PAGE_SIZE - descs[i].offset;
}
@@ -927,14 +1136,14 @@ static inline void fuse_sync_bucket_dec(struct fuse_sync_bucket *bucket)
extern const struct file_operations fuse_dev_operations;
extern const struct dentry_operations fuse_dentry_operations;
-extern const struct dentry_operations fuse_root_dentry_operations;
/**
* Get a filled in inode
*/
struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
int generation, struct fuse_attr *attr,
- u64 attr_valid, u64 attr_version);
+ u64 attr_valid, u64 attr_version,
+ u64 evict_ctr);
int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
struct fuse_entry_out *outarg, struct inode **inode);
@@ -947,10 +1156,6 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
struct fuse_forget_link *fuse_alloc_forget(void);
-struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
- unsigned int max,
- unsigned int *countp);
-
/*
* Initialize READ or READDIR request
*/
@@ -963,7 +1168,7 @@ struct fuse_io_args {
struct {
struct fuse_write_in in;
struct fuse_write_out out;
- bool page_locked;
+ bool folio_locked;
} write;
};
struct fuse_args_pages ap;
@@ -975,14 +1180,9 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
size_t count, int opcode);
-/**
- * Send OPEN or OPENDIR request
- */
-int fuse_open_common(struct inode *inode, struct file *file, bool isdir);
-
-struct fuse_file *fuse_file_alloc(struct fuse_mount *fm);
+struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release);
void fuse_file_free(struct fuse_file *ff);
-void fuse_finish_open(struct inode *inode, struct file *file);
+int fuse_finish_open(struct inode *inode, struct file *file);
void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
unsigned int flags);
@@ -1007,7 +1207,7 @@ int fuse_notify_poll_wakeup(struct fuse_conn *fc,
/**
* Initialize file operations on a regular file
*/
-void fuse_init_file_inode(struct inode *inode);
+void fuse_init_file_inode(struct inode *inode, unsigned int flags);
/**
* Initialize inode operations on regular files and special files
@@ -1028,10 +1228,13 @@ void fuse_init_symlink(struct inode *inode);
* Change attributes of an inode
*/
void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
+ struct fuse_statx *sx,
u64 attr_valid, u64 attr_version);
void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
- u64 attr_valid, u32 cache_mask);
+ struct fuse_statx *sx,
+ u64 attr_valid, u32 cache_mask,
+ u64 evict_ctr);
u32 fuse_get_cache_mask(struct inode *inode);
@@ -1051,11 +1254,31 @@ void __exit fuse_ctl_cleanup(void);
/**
* Simple request sending that does request allocation and freeing
*/
-ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args);
+ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ struct fuse_args *args);
+
+static inline ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
+{
+ return __fuse_simple_request(&invalid_mnt_idmap, fm, args);
+}
+
+static inline ssize_t fuse_simple_idmap_request(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ struct fuse_args *args)
+{
+ return __fuse_simple_request(idmap, fm, args);
+}
+
int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
gfp_t gfp_flags);
/**
+ * Assign a unique id to a fuse request
+ */
+void fuse_request_assign_unique(struct fuse_iqueue *fiq, struct fuse_req *req);
+
+/**
* End a finished request
*/
void fuse_request_end(struct fuse_req *req);
@@ -1064,6 +1287,14 @@ void fuse_request_end(struct fuse_req *req);
void fuse_abort_conn(struct fuse_conn *fc);
void fuse_wait_aborted(struct fuse_conn *fc);
+/* Check if any requests timed out */
+void fuse_check_timeout(struct work_struct *work);
+
+void fuse_dentry_tree_init(void);
+void fuse_dentry_tree_cleanup(void);
+
+void fuse_epoch_work(struct work_struct *work);
+
/**
* Invalidate inode attributes
*/
@@ -1081,7 +1312,10 @@ void fuse_invalidate_entry_cache(struct dentry *entry);
void fuse_invalidate_atime(struct inode *inode);
-u64 entry_attr_timeout(struct fuse_entry_out *o);
+u64 fuse_time_to_jiffies(u64 sec, u32 nsec);
+#define ATTR_TIMEOUT(o) \
+ fuse_time_to_jiffies((o)->attr_valid, (o)->attr_valid_nsec)
+
void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o);
/**
@@ -1090,6 +1324,11 @@ void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o);
struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
/**
+ * Initialize the fuse processing queue
+ */
+void fuse_pqueue_init(struct fuse_pqueue *fpq);
+
+/**
* Initialize fuse_conn
*/
void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
@@ -1105,7 +1344,7 @@ struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc);
struct fuse_dev *fuse_dev_alloc(void);
void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc);
void fuse_dev_free(struct fuse_dev *fud);
-void fuse_send_init(struct fuse_mount *fm);
+int fuse_send_init(struct fuse_mount *fm);
/**
* Fill in superblock and initialize fuse connection
@@ -1154,7 +1393,7 @@ bool fuse_invalid_attr(struct fuse_attr *attr);
/**
* Is current process allowed to perform filesystem operation?
*/
-int fuse_allow_current_process(struct fuse_conn *fc);
+bool fuse_allow_current_process(struct fuse_conn *fc);
u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id);
@@ -1195,7 +1434,13 @@ int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid,
* then the dentry is unhashed (d_delete()).
*/
int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
- u64 child_nodeid, struct qstr *name);
+ u64 child_nodeid, struct qstr *name, u32 flags);
+
+/*
+ * Try to prune this inode. If neither the inode itself nor dentries associated
+ * with this inode have any external reference, then the inode can be freed.
+ */
+void fuse_try_prune_one_inode(struct fuse_conn *fc, u64 nodeid);
int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
bool isdir);
@@ -1224,8 +1469,8 @@ bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written);
int fuse_flush_times(struct inode *inode, struct fuse_file *ff);
int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
-int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
- struct file *file);
+int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr, struct file *file);
void fuse_set_initialized(struct fuse_conn *fc);
@@ -1238,13 +1483,13 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
size_t size);
ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size);
int fuse_removexattr(struct inode *inode, const char *name);
-extern const struct xattr_handler *fuse_xattr_handlers[];
-extern const struct xattr_handler *fuse_acl_xattr_handlers[];
-extern const struct xattr_handler *fuse_no_acl_xattr_handlers[];
+extern const struct xattr_handler * const fuse_xattr_handlers[];
struct posix_acl;
-struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu);
-int fuse_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu);
+struct posix_acl *fuse_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, int type);
+int fuse_set_acl(struct mnt_idmap *, struct dentry *dentry,
struct posix_acl *acl, int type);
/* readdir.c */
@@ -1269,11 +1514,13 @@ ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to);
ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from);
int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma);
int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, u64 dmap_end);
-int fuse_dax_conn_alloc(struct fuse_conn *fc, struct dax_device *dax_dev);
+int fuse_dax_conn_alloc(struct fuse_conn *fc, enum fuse_dax_mode mode,
+ struct dax_device *dax_dev);
void fuse_dax_conn_free(struct fuse_conn *fc);
bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi);
-void fuse_dax_inode_init(struct inode *inode);
+void fuse_dax_inode_init(struct inode *inode, unsigned int flags);
void fuse_dax_inode_cleanup(struct inode *inode);
+void fuse_dax_dontcache(struct inode *inode, unsigned int flags);
bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment);
void fuse_dax_cancel_work(struct fuse_conn *fc);
@@ -1281,15 +1528,100 @@ void fuse_dax_cancel_work(struct fuse_conn *fc);
long fuse_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
-int fuse_fileattr_get(struct dentry *dentry, struct fileattr *fa);
-int fuse_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa);
+int fuse_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
+int fuse_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa);
-/* file.c */
+/* iomode.c */
+int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff);
+int fuse_inode_uncached_io_start(struct fuse_inode *fi,
+ struct fuse_backing *fb);
+void fuse_inode_uncached_io_end(struct fuse_inode *fi);
+int fuse_file_io_open(struct file *file, struct inode *inode);
+void fuse_file_io_release(struct fuse_file *ff, struct inode *inode);
+
+/* file.c */
struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
unsigned int open_flags, bool isdir);
void fuse_file_release(struct inode *inode, struct fuse_file *ff,
unsigned int open_flags, fl_owner_t id, bool isdir);
+/* backing.c */
+#ifdef CONFIG_FUSE_PASSTHROUGH
+struct fuse_backing *fuse_backing_get(struct fuse_backing *fb);
+void fuse_backing_put(struct fuse_backing *fb);
+struct fuse_backing *fuse_backing_lookup(struct fuse_conn *fc, int backing_id);
+#else
+
+static inline struct fuse_backing *fuse_backing_get(struct fuse_backing *fb)
+{
+ return NULL;
+}
+
+static inline void fuse_backing_put(struct fuse_backing *fb)
+{
+}
+static inline struct fuse_backing *fuse_backing_lookup(struct fuse_conn *fc,
+ int backing_id)
+{
+ return NULL;
+}
+#endif
+
+void fuse_backing_files_init(struct fuse_conn *fc);
+void fuse_backing_files_free(struct fuse_conn *fc);
+int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map);
+int fuse_backing_close(struct fuse_conn *fc, int backing_id);
+
+/* passthrough.c */
+static inline struct fuse_backing *fuse_inode_backing(struct fuse_inode *fi)
+{
+#ifdef CONFIG_FUSE_PASSTHROUGH
+ return READ_ONCE(fi->fb);
+#else
+ return NULL;
+#endif
+}
+
+static inline struct fuse_backing *fuse_inode_backing_set(struct fuse_inode *fi,
+ struct fuse_backing *fb)
+{
+#ifdef CONFIG_FUSE_PASSTHROUGH
+ return xchg(&fi->fb, fb);
+#else
+ return NULL;
+#endif
+}
+
+struct fuse_backing *fuse_passthrough_open(struct file *file, int backing_id);
+void fuse_passthrough_release(struct fuse_file *ff, struct fuse_backing *fb);
+
+static inline struct file *fuse_file_passthrough(struct fuse_file *ff)
+{
+#ifdef CONFIG_FUSE_PASSTHROUGH
+ return ff->passthrough;
+#else
+ return NULL;
+#endif
+}
+
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t fuse_passthrough_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
+ssize_t fuse_passthrough_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *ppos,
+ size_t len, unsigned int flags);
+ssize_t fuse_passthrough_mmap(struct file *file, struct vm_area_struct *vma);
+
+#ifdef CONFIG_SYSCTL
+extern int fuse_sysctl_register(void);
+extern void fuse_sysctl_unregister(void);
+#else
+#define fuse_sysctl_register() (0)
+#define fuse_sysctl_unregister() do { } while (0)
+#endif /* CONFIG_SYSCTL */
+
#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/fuse_trace.h b/fs/fuse/fuse_trace.h
new file mode 100644
index 000000000000..bbe9ddd8c716
--- /dev/null
+++ b/fs/fuse/fuse_trace.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fuse
+
+#if !defined(_TRACE_FUSE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FUSE_H
+
+#include <linux/tracepoint.h>
+
+#define OPCODES \
+ EM( FUSE_LOOKUP, "FUSE_LOOKUP") \
+ EM( FUSE_FORGET, "FUSE_FORGET") \
+ EM( FUSE_GETATTR, "FUSE_GETATTR") \
+ EM( FUSE_SETATTR, "FUSE_SETATTR") \
+ EM( FUSE_READLINK, "FUSE_READLINK") \
+ EM( FUSE_SYMLINK, "FUSE_SYMLINK") \
+ EM( FUSE_MKNOD, "FUSE_MKNOD") \
+ EM( FUSE_MKDIR, "FUSE_MKDIR") \
+ EM( FUSE_UNLINK, "FUSE_UNLINK") \
+ EM( FUSE_RMDIR, "FUSE_RMDIR") \
+ EM( FUSE_RENAME, "FUSE_RENAME") \
+ EM( FUSE_LINK, "FUSE_LINK") \
+ EM( FUSE_OPEN, "FUSE_OPEN") \
+ EM( FUSE_READ, "FUSE_READ") \
+ EM( FUSE_WRITE, "FUSE_WRITE") \
+ EM( FUSE_STATFS, "FUSE_STATFS") \
+ EM( FUSE_RELEASE, "FUSE_RELEASE") \
+ EM( FUSE_FSYNC, "FUSE_FSYNC") \
+ EM( FUSE_SETXATTR, "FUSE_SETXATTR") \
+ EM( FUSE_GETXATTR, "FUSE_GETXATTR") \
+ EM( FUSE_LISTXATTR, "FUSE_LISTXATTR") \
+ EM( FUSE_REMOVEXATTR, "FUSE_REMOVEXATTR") \
+ EM( FUSE_FLUSH, "FUSE_FLUSH") \
+ EM( FUSE_INIT, "FUSE_INIT") \
+ EM( FUSE_OPENDIR, "FUSE_OPENDIR") \
+ EM( FUSE_READDIR, "FUSE_READDIR") \
+ EM( FUSE_RELEASEDIR, "FUSE_RELEASEDIR") \
+ EM( FUSE_FSYNCDIR, "FUSE_FSYNCDIR") \
+ EM( FUSE_GETLK, "FUSE_GETLK") \
+ EM( FUSE_SETLK, "FUSE_SETLK") \
+ EM( FUSE_SETLKW, "FUSE_SETLKW") \
+ EM( FUSE_ACCESS, "FUSE_ACCESS") \
+ EM( FUSE_CREATE, "FUSE_CREATE") \
+ EM( FUSE_INTERRUPT, "FUSE_INTERRUPT") \
+ EM( FUSE_BMAP, "FUSE_BMAP") \
+ EM( FUSE_DESTROY, "FUSE_DESTROY") \
+ EM( FUSE_IOCTL, "FUSE_IOCTL") \
+ EM( FUSE_POLL, "FUSE_POLL") \
+ EM( FUSE_NOTIFY_REPLY, "FUSE_NOTIFY_REPLY") \
+ EM( FUSE_BATCH_FORGET, "FUSE_BATCH_FORGET") \
+ EM( FUSE_FALLOCATE, "FUSE_FALLOCATE") \
+ EM( FUSE_READDIRPLUS, "FUSE_READDIRPLUS") \
+ EM( FUSE_RENAME2, "FUSE_RENAME2") \
+ EM( FUSE_LSEEK, "FUSE_LSEEK") \
+ EM( FUSE_COPY_FILE_RANGE, "FUSE_COPY_FILE_RANGE") \
+ EM( FUSE_SETUPMAPPING, "FUSE_SETUPMAPPING") \
+ EM( FUSE_REMOVEMAPPING, "FUSE_REMOVEMAPPING") \
+ EM( FUSE_SYNCFS, "FUSE_SYNCFS") \
+ EM( FUSE_TMPFILE, "FUSE_TMPFILE") \
+ EM( FUSE_STATX, "FUSE_STATX") \
+ EMe(CUSE_INIT, "CUSE_INIT")
+
+/*
+ * This will turn the above table into TRACE_DEFINE_ENUM() for each of the
+ * entries.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+OPCODES
+
+/* Now we redfine it with the table that __print_symbolic needs. */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+TRACE_EVENT(fuse_request_send,
+ TP_PROTO(const struct fuse_req *req),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(dev_t, connection)
+ __field(uint64_t, unique)
+ __field(enum fuse_opcode, opcode)
+ __field(uint32_t, len)
+ ),
+
+ TP_fast_assign(
+ __entry->connection = req->fm->fc->dev;
+ __entry->unique = req->in.h.unique;
+ __entry->opcode = req->in.h.opcode;
+ __entry->len = req->in.h.len;
+ ),
+
+ TP_printk("connection %u req %llu opcode %u (%s) len %u ",
+ __entry->connection, __entry->unique, __entry->opcode,
+ __print_symbolic(__entry->opcode, OPCODES), __entry->len)
+);
+
+TRACE_EVENT(fuse_request_end,
+ TP_PROTO(const struct fuse_req *req),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(dev_t, connection)
+ __field(uint64_t, unique)
+ __field(uint32_t, len)
+ __field(int32_t, error)
+ ),
+
+ TP_fast_assign(
+ __entry->connection = req->fm->fc->dev;
+ __entry->unique = req->in.h.unique;
+ __entry->len = req->out.h.len;
+ __entry->error = req->out.h.error;
+ ),
+
+ TP_printk("connection %u req %llu len %u error %d", __entry->connection,
+ __entry->unique, __entry->len, __entry->error)
+);
+
+#endif /* _TRACE_FUSE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE fuse_trace
+#include <trace/define_trace.h>
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 8b89e3ba7df3..819e50d66622 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -7,7 +7,10 @@
*/
#include "fuse_i.h"
+#include "fuse_dev_i.h"
+#include "dev_uring_i.h"
+#include <linux/dax.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/file.h>
@@ -23,6 +26,7 @@
#include <linux/exportfs.h>
#include <linux/posix_acl.h>
#include <linux/pid_namespace.h>
+#include <uapi/linux/magic.h>
MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
MODULE_DESCRIPTION("Filesystem in Userspace");
@@ -31,10 +35,16 @@ MODULE_LICENSE("GPL");
static struct kmem_cache *fuse_inode_cachep;
struct list_head fuse_conn_list;
DEFINE_MUTEX(fuse_mutex);
+DECLARE_WAIT_QUEUE_HEAD(fuse_dev_waitq);
static int set_global_limit(const char *val, const struct kernel_param *kp);
-unsigned max_user_bgreq;
+unsigned int fuse_max_pages_limit = 256;
+/* default is no timeout */
+unsigned int fuse_default_req_timeout;
+unsigned int fuse_max_req_timeout;
+
+unsigned int max_user_bgreq;
module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
&max_user_bgreq, 0644);
__MODULE_PARM_TYPE(max_user_bgreq, "uint");
@@ -42,7 +52,7 @@ MODULE_PARM_DESC(max_user_bgreq,
"Global limit for the maximum number of backgrounded requests an "
"unprivileged user can set");
-unsigned max_user_congthresh;
+unsigned int max_user_congthresh;
module_param_call(max_user_congthresh, set_global_limit, param_get_uint,
&max_user_congthresh, 0644);
__MODULE_PARM_TYPE(max_user_congthresh, "uint");
@@ -50,8 +60,6 @@ MODULE_PARM_DESC(max_user_congthresh,
"Global limit for the maximum congestion threshold an "
"unprivileged user can set");
-#define FUSE_SUPER_MAGIC 0x65735546
-
#define FUSE_DEFAULT_BLKSIZE 512
/** Maximum number of outstanding background requests */
@@ -69,21 +77,37 @@ struct fuse_forget_link *fuse_alloc_forget(void)
return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT);
}
+static struct fuse_submount_lookup *fuse_alloc_submount_lookup(void)
+{
+ struct fuse_submount_lookup *sl;
+
+ sl = kzalloc(sizeof(struct fuse_submount_lookup), GFP_KERNEL_ACCOUNT);
+ if (!sl)
+ return NULL;
+ sl->forget = fuse_alloc_forget();
+ if (!sl->forget)
+ goto out_free;
+
+ return sl;
+
+out_free:
+ kfree(sl);
+ return NULL;
+}
+
static struct inode *fuse_alloc_inode(struct super_block *sb)
{
struct fuse_inode *fi;
- fi = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL);
+ fi = alloc_inode_sb(sb, fuse_inode_cachep, GFP_KERNEL);
if (!fi)
return NULL;
- fi->i_time = 0;
- fi->inval_mask = 0;
- fi->nodeid = 0;
- fi->nlookup = 0;
- fi->attr_version = 0;
- fi->orig_ino = 0;
- fi->state = 0;
+ /* Initialize private data (i.e. everything except fi->inode) */
+ BUILD_BUG_ON(offsetof(struct fuse_inode, inode) != 0);
+ memset((void *) fi + sizeof(fi->inode), 0, sizeof(*fi) - sizeof(fi->inode));
+
+ fi->inval_mask = ~0;
mutex_init(&fi->mutex);
spin_lock_init(&fi->lock);
fi->forget = fuse_alloc_forget();
@@ -93,6 +117,9 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
if (IS_ENABLED(CONFIG_FUSE_DAX) && !fuse_dax_inode_alloc(sb, fi))
goto out_free_forget;
+ if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
+ fuse_inode_backing_set(fi, NULL);
+
return &fi->inode;
out_free_forget:
@@ -111,15 +138,32 @@ static void fuse_free_inode(struct inode *inode)
#ifdef CONFIG_FUSE_DAX
kfree(fi->dax);
#endif
+ if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
+ fuse_backing_put(fuse_inode_backing(fi));
+
kmem_cache_free(fuse_inode_cachep, fi);
}
+static void fuse_cleanup_submount_lookup(struct fuse_conn *fc,
+ struct fuse_submount_lookup *sl)
+{
+ if (!refcount_dec_and_test(&sl->count))
+ return;
+
+ fuse_queue_forget(fc, sl->forget, sl->nodeid, 1);
+ sl->forget = NULL;
+ kfree(sl);
+}
+
static void fuse_evict_inode(struct inode *inode)
{
struct fuse_inode *fi = get_fuse_inode(inode);
/* Will write inode on close/munmap and in all other dirtiers */
- WARN_ON(inode->i_state & I_DIRTY_INODE);
+ WARN_ON(inode_state_read_once(inode) & I_DIRTY_INODE);
+
+ if (FUSE_IS_DAX(inode))
+ dax_break_layout_final(inode);
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
@@ -133,8 +177,22 @@ static void fuse_evict_inode(struct inode *inode)
fi->nlookup);
fi->forget = NULL;
}
+
+ if (fi->submount_lookup) {
+ fuse_cleanup_submount_lookup(fc, fi->submount_lookup);
+ fi->submount_lookup = NULL;
+ }
+ /*
+ * Evict of non-deleted inode may race with outstanding
+ * LOOKUP/READDIRPLUS requests and result in inconsistency when
+ * the request finishes. Deal with that here by bumping a
+ * counter that can be compared to the starting value.
+ */
+ if (inode->i_nlink > 0)
+ atomic64_inc(&fc->evict_ctr);
}
if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
+ WARN_ON(fi->iocachectr != 0);
WARN_ON(!list_empty(&fi->write_files));
WARN_ON(!list_empty(&fi->queued_writes));
}
@@ -164,16 +222,31 @@ static ino_t fuse_squash_ino(u64 ino64)
}
void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
- u64 attr_valid, u32 cache_mask)
+ struct fuse_statx *sx,
+ u64 attr_valid, u32 cache_mask,
+ u64 evict_ctr)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
lockdep_assert_held(&fi->lock);
+ /*
+ * Clear basic stats from invalid mask.
+ *
+ * Don't do this if this is coming from a fuse_iget() call and there
+ * might have been a racing evict which would've invalidated the result
+ * if the attr_version would've been preserved.
+ *
+ * !evict_ctr -> this is create
+ * fi->attr_version != 0 -> this is not a new inode
+ * evict_ctr == fuse_get_evict_ctr() -> no evicts while during request
+ */
+ if (!evict_ctr || fi->attr_version || evict_ctr == fuse_get_evict_ctr(fc))
+ set_mask_bits(&fi->inval_mask, STATX_BASIC_STATS, 0);
+
fi->attr_version = atomic64_inc_return(&fc->attr_version);
fi->i_time = attr_valid;
- WRITE_ONCE(fi->inval_mask, 0);
inode->i_ino = fuse_squash_ino(attr->ino);
inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
@@ -181,22 +254,44 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
inode->i_uid = make_kuid(fc->user_ns, attr->uid);
inode->i_gid = make_kgid(fc->user_ns, attr->gid);
inode->i_blocks = attr->blocks;
- inode->i_atime.tv_sec = attr->atime;
- inode->i_atime.tv_nsec = attr->atimensec;
+
+ /* Sanitize nsecs */
+ attr->atimensec = min_t(u32, attr->atimensec, NSEC_PER_SEC - 1);
+ attr->mtimensec = min_t(u32, attr->mtimensec, NSEC_PER_SEC - 1);
+ attr->ctimensec = min_t(u32, attr->ctimensec, NSEC_PER_SEC - 1);
+
+ inode_set_atime(inode, attr->atime, attr->atimensec);
/* mtime from server may be stale due to local buffered write */
if (!(cache_mask & STATX_MTIME)) {
- inode->i_mtime.tv_sec = attr->mtime;
- inode->i_mtime.tv_nsec = attr->mtimensec;
+ inode_set_mtime(inode, attr->mtime, attr->mtimensec);
}
if (!(cache_mask & STATX_CTIME)) {
- inode->i_ctime.tv_sec = attr->ctime;
- inode->i_ctime.tv_nsec = attr->ctimensec;
+ inode_set_ctime(inode, attr->ctime, attr->ctimensec);
}
+ if (sx) {
+ /* Sanitize nsecs */
+ sx->btime.tv_nsec =
+ min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
- if (attr->blksize != 0)
- inode->i_blkbits = ilog2(attr->blksize);
+ /*
+ * Btime has been queried, cache is valid (whether or not btime
+ * is available or not) so clear STATX_BTIME from inval_mask.
+ *
+ * Availability of the btime attribute is indicated in
+ * FUSE_I_BTIME
+ */
+ set_mask_bits(&fi->inval_mask, STATX_BTIME, 0);
+ if (sx->mask & STATX_BTIME) {
+ set_bit(FUSE_I_BTIME, &fi->state);
+ fi->i_btime.tv_sec = sx->btime.tv_sec;
+ fi->i_btime.tv_nsec = sx->btime.tv_nsec;
+ }
+ }
+
+ if (attr->blksize)
+ fi->cached_i_blkbits = ilog2(attr->blksize);
else
- inode->i_blkbits = inode->i_sb->s_blocksize_bits;
+ fi->cached_i_blkbits = inode->i_sb->s_blocksize_bits;
/*
* Don't set the sticky bit in i_mode, unless we want the VFS
@@ -230,8 +325,9 @@ u32 fuse_get_cache_mask(struct inode *inode)
return STATX_MTIME | STATX_CTIME | STATX_SIZE;
}
-void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
- u64 attr_valid, u64 attr_version)
+static void fuse_change_attributes_i(struct inode *inode, struct fuse_attr *attr,
+ struct fuse_statx *sx, u64 attr_valid,
+ u64 attr_version, u64 evict_ctr)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
@@ -250,12 +346,12 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
attr->size = i_size_read(inode);
if (cache_mask & STATX_MTIME) {
- attr->mtime = inode->i_mtime.tv_sec;
- attr->mtimensec = inode->i_mtime.tv_nsec;
+ attr->mtime = inode_get_mtime_sec(inode);
+ attr->mtimensec = inode_get_mtime_nsec(inode);
}
if (cache_mask & STATX_CTIME) {
- attr->ctime = inode->i_ctime.tv_sec;
- attr->ctimensec = inode->i_ctime.tv_nsec;
+ attr->ctime = inode_get_ctime_sec(inode);
+ attr->ctimensec = inode_get_ctime_nsec(inode);
}
if ((attr_version != 0 && fi->attr_version > attr_version) ||
@@ -264,8 +360,9 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
return;
}
- old_mtime = inode->i_mtime;
- fuse_change_attributes_common(inode, attr, attr_valid, cache_mask);
+ old_mtime = inode_get_mtime(inode);
+ fuse_change_attributes_common(inode, attr, sx, attr_valid, cache_mask,
+ evict_ctr);
oldsize = inode->i_size;
/*
@@ -301,19 +398,35 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
if (inval)
invalidate_inode_pages2(inode->i_mapping);
}
+
+ if (IS_ENABLED(CONFIG_FUSE_DAX))
+ fuse_dax_dontcache(inode, attr->flags);
+}
+
+void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
+ struct fuse_statx *sx, u64 attr_valid,
+ u64 attr_version)
+{
+ fuse_change_attributes_i(inode, attr, sx, attr_valid, attr_version, 0);
}
-static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
+static void fuse_init_submount_lookup(struct fuse_submount_lookup *sl,
+ u64 nodeid)
+{
+ sl->nodeid = nodeid;
+ refcount_set(&sl->count, 1);
+}
+
+static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr,
+ struct fuse_conn *fc)
{
inode->i_mode = attr->mode & S_IFMT;
inode->i_size = attr->size;
- inode->i_mtime.tv_sec = attr->mtime;
- inode->i_mtime.tv_nsec = attr->mtimensec;
- inode->i_ctime.tv_sec = attr->ctime;
- inode->i_ctime.tv_nsec = attr->ctimensec;
+ inode_set_mtime(inode, attr->mtime, attr->mtimensec);
+ inode_set_ctime(inode, attr->ctime, attr->ctimensec);
if (S_ISREG(inode->i_mode)) {
fuse_init_common(inode);
- fuse_init_file_inode(inode);
+ fuse_init_file_inode(inode, attr->flags);
} else if (S_ISDIR(inode->i_mode))
fuse_init_dir(inode);
else if (S_ISLNK(inode->i_mode))
@@ -325,6 +438,12 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
new_decode_dev(attr->rdev));
} else
BUG();
+ /*
+ * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL
+ * so they see the exact same behavior as before.
+ */
+ if (!fc->posix_acl)
+ inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
}
static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
@@ -345,7 +464,8 @@ static int fuse_inode_set(struct inode *inode, void *_nodeidp)
struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
int generation, struct fuse_attr *attr,
- u64 attr_valid, u64 attr_version)
+ u64 attr_valid, u64 attr_version,
+ u64 evict_ctr)
{
struct inode *inode;
struct fuse_inode *fi;
@@ -360,12 +480,22 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
*/
if (fc->auto_submounts && (attr->flags & FUSE_ATTR_SUBMOUNT) &&
S_ISDIR(attr->mode)) {
+ struct fuse_inode *fi;
+
inode = new_inode(sb);
if (!inode)
return NULL;
- fuse_init_inode(inode, attr);
- get_fuse_inode(inode)->nodeid = nodeid;
+ fuse_init_inode(inode, attr, fc);
+ fi = get_fuse_inode(inode);
+ fi->nodeid = nodeid;
+ fi->submount_lookup = fuse_alloc_submount_lookup();
+ if (!fi->submount_lookup) {
+ iput(inode);
+ return NULL;
+ }
+ /* Sets nlookup = 1 on fi->submount_lookup->nlookup */
+ fuse_init_submount_lookup(fi->submount_lookup, nodeid);
inode->i_flags |= S_AUTOMOUNT;
goto done;
}
@@ -375,26 +505,29 @@ retry:
if (!inode)
return NULL;
- if ((inode->i_state & I_NEW)) {
+ if ((inode_state_read_once(inode) & I_NEW)) {
inode->i_flags |= S_NOATIME;
if (!fc->writeback_cache || !S_ISREG(attr->mode))
inode->i_flags |= S_NOCMTIME;
inode->i_generation = generation;
- fuse_init_inode(inode, attr);
+ fuse_init_inode(inode, attr, fc);
unlock_new_inode(inode);
} else if (fuse_stale_inode(inode, generation, attr)) {
/* nodeid was reused, any I/O on the old inode should fail */
fuse_make_bad(inode);
- iput(inode);
- goto retry;
+ if (inode != d_inode(sb->s_root)) {
+ remove_inode_hash(inode);
+ iput(inode);
+ goto retry;
+ }
}
-done:
fi = get_fuse_inode(inode);
spin_lock(&fi->lock);
fi->nlookup++;
spin_unlock(&fi->lock);
- fuse_change_attributes(inode, attr, attr_valid, attr_version);
-
+done:
+ fuse_change_attributes_i(inode, attr, NULL, attr_valid, attr_version,
+ evict_ctr);
return inode;
}
@@ -452,6 +585,17 @@ int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid,
return 0;
}
+void fuse_try_prune_one_inode(struct fuse_conn *fc, u64 nodeid)
+{
+ struct inode *inode;
+
+ inode = fuse_ilookup(fc, nodeid, NULL);
+ if (!inode)
+ return;
+ d_prune_aliases(inode);
+ iput(inode);
+}
+
bool fuse_lock_inode(struct inode *inode)
{
bool locked = false;
@@ -474,8 +618,14 @@ static void fuse_umount_begin(struct super_block *sb)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
- if (!fc->no_force_umount)
- fuse_abort_conn(fc);
+ if (fc->no_force_umount)
+ return;
+
+ fuse_abort_conn(fc);
+
+ // Only retire block-device-based superblocks.
+ if (sb->s_bdev != NULL)
+ retire_super(sb);
}
static void fuse_send_destroy(struct fuse_mount *fm)
@@ -640,8 +790,8 @@ static const struct fs_parameter_spec fuse_fs_parameters[] = {
fsparam_string ("source", OPT_SOURCE),
fsparam_u32 ("fd", OPT_FD),
fsparam_u32oct ("rootmode", OPT_ROOTMODE),
- fsparam_u32 ("user_id", OPT_USER_ID),
- fsparam_u32 ("group_id", OPT_GROUP_ID),
+ fsparam_uid ("user_id", OPT_USER_ID),
+ fsparam_gid ("group_id", OPT_GROUP_ID),
fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS),
fsparam_flag ("allow_other", OPT_ALLOW_OTHER),
fsparam_u32 ("max_read", OPT_MAX_READ),
@@ -655,6 +805,8 @@ static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param)
struct fs_parse_result result;
struct fuse_fs_context *ctx = fsc->fs_private;
int opt;
+ kuid_t kuid;
+ kgid_t kgid;
if (fsc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
/*
@@ -699,16 +851,26 @@ static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param)
break;
case OPT_USER_ID:
- ctx->user_id = make_kuid(fsc->user_ns, result.uint_32);
- if (!uid_valid(ctx->user_id))
+ kuid = result.uid;
+ /*
+ * The requested uid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kuid_has_mapping(fsc->user_ns, kuid))
return invalfc(fsc, "Invalid user_id");
+ ctx->user_id = kuid;
ctx->user_id_present = true;
break;
case OPT_GROUP_ID:
- ctx->group_id = make_kgid(fsc->user_ns, result.uint_32);
- if (!gid_valid(ctx->group_id))
+ kgid = result.gid;
+ /*
+ * The requested gid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kgid_has_mapping(fsc->user_ns, kgid))
return invalfc(fsc, "Invalid group_id");
+ ctx->group_id = kgid;
ctx->group_id_present = true;
break;
@@ -767,8 +929,12 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
seq_printf(m, ",blksize=%lu", sb->s_blocksize);
}
#ifdef CONFIG_FUSE_DAX
- if (fc->dax)
- seq_puts(m, ",dax");
+ if (fc->dax_mode == FUSE_DAX_ALWAYS)
+ seq_puts(m, ",dax=always");
+ else if (fc->dax_mode == FUSE_DAX_NEVER)
+ seq_puts(m, ",dax=never");
+ else if (fc->dax_mode == FUSE_DAX_INODE_USER)
+ seq_puts(m, ",dax=inode");
#endif
return 0;
@@ -789,7 +955,7 @@ static void fuse_iqueue_init(struct fuse_iqueue *fiq,
fiq->priv = priv;
}
-static void fuse_pqueue_init(struct fuse_pqueue *fpq)
+void fuse_pqueue_init(struct fuse_pqueue *fpq)
{
unsigned int i;
@@ -810,6 +976,8 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
init_rwsem(&fc->killsb);
refcount_set(&fc->count, 1);
atomic_set(&fc->dev_count, 1);
+ atomic_set(&fc->epoch, 1);
+ INIT_WORK(&fc->epoch_work, fuse_epoch_work);
init_waitqueue_head(&fc->blocked_waitq);
fuse_iqueue_init(&fc->iq, fiq_ops, fiq_priv);
INIT_LIST_HEAD(&fc->bg_queue);
@@ -824,11 +992,17 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
fc->initialized = 0;
fc->connected = 1;
atomic64_set(&fc->attr_version, 1);
+ atomic64_set(&fc->evict_ctr, 1);
get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
fc->user_ns = get_user_ns(user_ns);
fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
- fc->max_pages_limit = FUSE_MAX_MAX_PAGES;
+ fc->max_pages_limit = fuse_max_pages_limit;
+ fc->name_max = FUSE_NAME_LOW_MAX;
+ fc->timeout.req_timeout = 0;
+
+ if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
+ fuse_backing_files_init(fc);
INIT_LIST_HEAD(&fc->mounts);
list_add(&fm->fc_entry, &fc->mounts);
@@ -836,25 +1010,40 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
}
EXPORT_SYMBOL_GPL(fuse_conn_init);
+static void delayed_release(struct rcu_head *p)
+{
+ struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu);
+
+ fuse_uring_destruct(fc);
+
+ put_user_ns(fc->user_ns);
+ fc->release(fc);
+}
+
void fuse_conn_put(struct fuse_conn *fc)
{
- if (refcount_dec_and_test(&fc->count)) {
- struct fuse_iqueue *fiq = &fc->iq;
- struct fuse_sync_bucket *bucket;
-
- if (IS_ENABLED(CONFIG_FUSE_DAX))
- fuse_dax_conn_free(fc);
- if (fiq->ops->release)
- fiq->ops->release(fiq);
- put_pid_ns(fc->pid_ns);
- put_user_ns(fc->user_ns);
- bucket = rcu_dereference_protected(fc->curr_bucket, 1);
- if (bucket) {
- WARN_ON(atomic_read(&bucket->count) != 1);
- kfree(bucket);
- }
- fc->release(fc);
+ struct fuse_iqueue *fiq = &fc->iq;
+ struct fuse_sync_bucket *bucket;
+
+ if (!refcount_dec_and_test(&fc->count))
+ return;
+
+ if (IS_ENABLED(CONFIG_FUSE_DAX))
+ fuse_dax_conn_free(fc);
+ if (fc->timeout.req_timeout)
+ cancel_delayed_work_sync(&fc->timeout.work);
+ cancel_work_sync(&fc->epoch_work);
+ if (fiq->ops->release)
+ fiq->ops->release(fiq);
+ put_pid_ns(fc->pid_ns);
+ bucket = rcu_dereference_protected(fc->curr_bucket, 1);
+ if (bucket) {
+ WARN_ON(atomic_read(&bucket->count) != 1);
+ kfree(bucket);
}
+ if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
+ fuse_backing_files_free(fc);
+ call_rcu(&fc->rcu, delayed_release);
}
EXPORT_SYMBOL_GPL(fuse_conn_put);
@@ -865,7 +1054,7 @@ struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
}
EXPORT_SYMBOL_GPL(fuse_conn_get);
-static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode)
+static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned int mode)
{
struct fuse_attr attr;
memset(&attr, 0, sizeof(attr));
@@ -873,7 +1062,7 @@ static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode)
attr.mode = mode;
attr.ino = FUSE_ROOT_ID;
attr.nlink = 1;
- return fuse_iget(sb, 1, 0, &attr, 0, 0);
+ return fuse_iget(sb, FUSE_ROOT_ID, 0, &attr, 0, 0, 0);
}
struct fuse_inode_handle {
@@ -957,7 +1146,7 @@ static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
}
*max_len = len;
- return parent ? 0x82 : 0x81;
+ return parent ? FILEID_INO64_GEN_PARENT : FILEID_INO64_GEN;
}
static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
@@ -965,7 +1154,8 @@ static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
{
struct fuse_inode_handle handle;
- if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3)
+ if ((fh_type != FILEID_INO64_GEN &&
+ fh_type != FILEID_INO64_GEN_PARENT) || fh_len < 3)
return NULL;
handle.nodeid = (u64) fid->raw[0] << 32;
@@ -979,7 +1169,7 @@ static struct dentry *fuse_fh_to_parent(struct super_block *sb,
{
struct fuse_inode_handle parent;
- if (fh_type != 0x82 || fh_len < 6)
+ if (fh_type != FILEID_INO64_GEN_PARENT || fh_len < 6)
return NULL;
parent.nodeid = (u64) fid->raw[3] << 32;
@@ -1015,6 +1205,11 @@ static struct dentry *fuse_get_parent(struct dentry *child)
return parent;
}
+/* only for fid encoding; no support for file handle */
+static const struct export_operations fuse_export_fid_operations = {
+ .encode_fh = fuse_encode_fh,
+};
+
static const struct export_operations fuse_export_operations = {
.fh_to_dentry = fuse_fh_to_dentry,
.fh_to_parent = fuse_fh_to_parent,
@@ -1027,14 +1222,14 @@ static const struct super_operations fuse_super_operations = {
.free_inode = fuse_free_inode,
.evict_inode = fuse_evict_inode,
.write_inode = fuse_write_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
.umount_begin = fuse_umount_begin,
.statfs = fuse_statfs,
.sync_fs = fuse_sync_fs,
.show_options = fuse_show_options,
};
-static void sanitize_global_limit(unsigned *limit)
+static void sanitize_global_limit(unsigned int *limit)
{
/*
* The default maximum number of async requests is calculated to consume
@@ -1055,7 +1250,7 @@ static int set_global_limit(const char *val, const struct kernel_param *kp)
if (rv)
return rv;
- sanitize_global_limit((unsigned *)kp->arg);
+ sanitize_global_limit((unsigned int *)kp->arg);
return 0;
}
@@ -1087,6 +1282,34 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
spin_unlock(&fc->bg_lock);
}
+static void set_request_timeout(struct fuse_conn *fc, unsigned int timeout)
+{
+ fc->timeout.req_timeout = secs_to_jiffies(timeout);
+ INIT_DELAYED_WORK(&fc->timeout.work, fuse_check_timeout);
+ queue_delayed_work(system_percpu_wq, &fc->timeout.work,
+ fuse_timeout_timer_freq);
+}
+
+static void init_server_timeout(struct fuse_conn *fc, unsigned int timeout)
+{
+ if (!timeout && !fuse_max_req_timeout && !fuse_default_req_timeout)
+ return;
+
+ if (!timeout)
+ timeout = fuse_default_req_timeout;
+
+ if (fuse_max_req_timeout) {
+ if (timeout)
+ timeout = min(fuse_max_req_timeout, timeout);
+ else
+ timeout = fuse_max_req_timeout;
+ }
+
+ timeout = max(FUSE_TIMEOUT_TIMER_FREQ, timeout);
+
+ set_request_timeout(fc, timeout);
+}
+
struct fuse_init_args {
struct fuse_args args;
struct fuse_init_in in;
@@ -1105,83 +1328,142 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
ok = false;
else {
unsigned long ra_pages;
+ unsigned int timeout = 0;
process_init_limits(fc, arg);
if (arg->minor >= 6) {
+ u64 flags = arg->flags;
+
+ if (flags & FUSE_INIT_EXT)
+ flags |= (u64) arg->flags2 << 32;
+
ra_pages = arg->max_readahead / PAGE_SIZE;
- if (arg->flags & FUSE_ASYNC_READ)
+ if (flags & FUSE_ASYNC_READ)
fc->async_read = 1;
- if (!(arg->flags & FUSE_POSIX_LOCKS))
+ if (!(flags & FUSE_POSIX_LOCKS))
fc->no_lock = 1;
if (arg->minor >= 17) {
- if (!(arg->flags & FUSE_FLOCK_LOCKS))
+ if (!(flags & FUSE_FLOCK_LOCKS))
fc->no_flock = 1;
} else {
- if (!(arg->flags & FUSE_POSIX_LOCKS))
+ if (!(flags & FUSE_POSIX_LOCKS))
fc->no_flock = 1;
}
- if (arg->flags & FUSE_ATOMIC_O_TRUNC)
+ if (flags & FUSE_ATOMIC_O_TRUNC)
fc->atomic_o_trunc = 1;
if (arg->minor >= 9) {
/* LOOKUP has dependency on proto version */
- if (arg->flags & FUSE_EXPORT_SUPPORT)
+ if (flags & FUSE_EXPORT_SUPPORT)
fc->export_support = 1;
}
- if (arg->flags & FUSE_BIG_WRITES)
+ if (flags & FUSE_BIG_WRITES)
fc->big_writes = 1;
- if (arg->flags & FUSE_DONT_MASK)
+ if (flags & FUSE_DONT_MASK)
fc->dont_mask = 1;
- if (arg->flags & FUSE_AUTO_INVAL_DATA)
+ if (flags & FUSE_AUTO_INVAL_DATA)
fc->auto_inval_data = 1;
- else if (arg->flags & FUSE_EXPLICIT_INVAL_DATA)
+ else if (flags & FUSE_EXPLICIT_INVAL_DATA)
fc->explicit_inval_data = 1;
- if (arg->flags & FUSE_DO_READDIRPLUS) {
+ if (flags & FUSE_DO_READDIRPLUS) {
fc->do_readdirplus = 1;
- if (arg->flags & FUSE_READDIRPLUS_AUTO)
+ if (flags & FUSE_READDIRPLUS_AUTO)
fc->readdirplus_auto = 1;
}
- if (arg->flags & FUSE_ASYNC_DIO)
+ if (flags & FUSE_ASYNC_DIO)
fc->async_dio = 1;
- if (arg->flags & FUSE_WRITEBACK_CACHE)
+ if (flags & FUSE_WRITEBACK_CACHE)
fc->writeback_cache = 1;
- if (arg->flags & FUSE_PARALLEL_DIROPS)
+ if (flags & FUSE_PARALLEL_DIROPS)
fc->parallel_dirops = 1;
- if (arg->flags & FUSE_HANDLE_KILLPRIV)
+ if (flags & FUSE_HANDLE_KILLPRIV)
fc->handle_killpriv = 1;
if (arg->time_gran && arg->time_gran <= 1000000000)
fm->sb->s_time_gran = arg->time_gran;
- if ((arg->flags & FUSE_POSIX_ACL)) {
+ if ((flags & FUSE_POSIX_ACL)) {
fc->default_permissions = 1;
fc->posix_acl = 1;
- fm->sb->s_xattr = fuse_acl_xattr_handlers;
}
- if (arg->flags & FUSE_CACHE_SYMLINKS)
+ if (flags & FUSE_CACHE_SYMLINKS)
fc->cache_symlinks = 1;
- if (arg->flags & FUSE_ABORT_ERROR)
+ if (flags & FUSE_ABORT_ERROR)
fc->abort_err = 1;
- if (arg->flags & FUSE_MAX_PAGES) {
+ if (flags & FUSE_MAX_PAGES) {
fc->max_pages =
min_t(unsigned int, fc->max_pages_limit,
max_t(unsigned int, arg->max_pages, 1));
+
+ /*
+ * PATH_MAX file names might need two pages for
+ * ops like rename
+ */
+ if (fc->max_pages > 1)
+ fc->name_max = FUSE_NAME_MAX;
}
- if (IS_ENABLED(CONFIG_FUSE_DAX) &&
- arg->flags & FUSE_MAP_ALIGNMENT &&
- !fuse_dax_check_alignment(fc, arg->map_alignment)) {
- ok = false;
+ if (IS_ENABLED(CONFIG_FUSE_DAX)) {
+ if (flags & FUSE_MAP_ALIGNMENT &&
+ !fuse_dax_check_alignment(fc, arg->map_alignment)) {
+ ok = false;
+ }
+ if (flags & FUSE_HAS_INODE_DAX)
+ fc->inode_dax = 1;
}
- if (arg->flags & FUSE_HANDLE_KILLPRIV_V2) {
+ if (flags & FUSE_HANDLE_KILLPRIV_V2) {
fc->handle_killpriv_v2 = 1;
fm->sb->s_flags |= SB_NOSEC;
}
- if (arg->flags & FUSE_SETXATTR_EXT)
+ if (flags & FUSE_SETXATTR_EXT)
fc->setxattr_ext = 1;
+ if (flags & FUSE_SECURITY_CTX)
+ fc->init_security = 1;
+ if (flags & FUSE_CREATE_SUPP_GROUP)
+ fc->create_supp_group = 1;
+ if (flags & FUSE_DIRECT_IO_ALLOW_MMAP)
+ fc->direct_io_allow_mmap = 1;
+ /*
+ * max_stack_depth is the max stack depth of FUSE fs,
+ * so it has to be at least 1 to support passthrough
+ * to backing files.
+ *
+ * with max_stack_depth > 1, the backing files can be
+ * on a stacked fs (e.g. overlayfs) themselves and with
+ * max_stack_depth == 1, FUSE fs can be stacked as the
+ * underlying fs of a stacked fs (e.g. overlayfs).
+ *
+ * Also don't allow the combination of FUSE_PASSTHROUGH
+ * and FUSE_WRITEBACK_CACHE, current design doesn't handle
+ * them together.
+ */
+ if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH) &&
+ (flags & FUSE_PASSTHROUGH) &&
+ arg->max_stack_depth > 0 &&
+ arg->max_stack_depth <= FILESYSTEM_MAX_STACK_DEPTH &&
+ !(flags & FUSE_WRITEBACK_CACHE)) {
+ fc->passthrough = 1;
+ fc->max_stack_depth = arg->max_stack_depth;
+ fm->sb->s_stack_depth = arg->max_stack_depth;
+ }
+ if (flags & FUSE_NO_EXPORT_SUPPORT)
+ fm->sb->s_export_op = &fuse_export_fid_operations;
+ if (flags & FUSE_ALLOW_IDMAP) {
+ if (fc->default_permissions)
+ fm->sb->s_iflags &= ~SB_I_NOIDMAP;
+ else
+ ok = false;
+ }
+ if (flags & FUSE_OVER_IO_URING && fuse_uring_enabled())
+ fc->io_uring = 1;
+
+ if (flags & FUSE_REQUEST_TIMEOUT)
+ timeout = arg->request_timeout;
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
fc->no_flock = 1;
}
+ init_server_timeout(fc, timeout);
+
fm->sb->s_bdi->ra_pages =
min(fm->sb->s_bdi->ra_pages, ra_pages);
fc->minor = arg->minor;
@@ -1200,16 +1482,17 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
wake_up_all(&fc->blocked_waitq);
}
-void fuse_send_init(struct fuse_mount *fm)
+static struct fuse_init_args *fuse_new_init(struct fuse_mount *fm)
{
struct fuse_init_args *ia;
+ u64 flags;
ia = kzalloc(sizeof(*ia), GFP_KERNEL | __GFP_NOFAIL);
ia->in.major = FUSE_KERNEL_VERSION;
ia->in.minor = FUSE_KERNEL_MINOR_VERSION;
ia->in.max_readahead = fm->sb->s_bdi->ra_pages * PAGE_SIZE;
- ia->in.flags |=
+ flags =
FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
@@ -1219,13 +1502,31 @@ void fuse_send_init(struct fuse_mount *fm)
FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS |
FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA |
- FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT;
+ FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
+ FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
+ FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP |
+ FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP |
+ FUSE_REQUEST_TIMEOUT;
#ifdef CONFIG_FUSE_DAX
if (fm->fc->dax)
- ia->in.flags |= FUSE_MAP_ALIGNMENT;
+ flags |= FUSE_MAP_ALIGNMENT;
+ if (fuse_is_inode_dax_mode(fm->fc->dax_mode))
+ flags |= FUSE_HAS_INODE_DAX;
#endif
if (fm->fc->auto_submounts)
- ia->in.flags |= FUSE_SUBMOUNTS;
+ flags |= FUSE_SUBMOUNTS;
+ if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
+ flags |= FUSE_PASSTHROUGH;
+
+ /*
+ * This is just an information flag for fuse server. No need to check
+ * the reply - server is either sending IORING_OP_URING_CMD or not.
+ */
+ if (fuse_uring_enabled())
+ flags |= FUSE_OVER_IO_URING;
+
+ ia->in.flags = flags;
+ ia->in.flags2 = flags >> 32;
ia->args.opcode = FUSE_INIT;
ia->args.in_numargs = 1;
@@ -1240,17 +1541,37 @@ void fuse_send_init(struct fuse_mount *fm)
ia->args.out_args[0].value = &ia->out;
ia->args.force = true;
ia->args.nocreds = true;
- ia->args.end = process_init_reply;
- if (fuse_simple_background(fm, &ia->args, GFP_KERNEL) != 0)
- process_init_reply(fm, &ia->args, -ENOTCONN);
+ return ia;
+}
+
+int fuse_send_init(struct fuse_mount *fm)
+{
+ struct fuse_init_args *ia = fuse_new_init(fm);
+ int err;
+
+ if (fm->fc->sync_init) {
+ err = fuse_simple_request(fm, &ia->args);
+ /* Ignore size of init reply */
+ if (err > 0)
+ err = 0;
+ } else {
+ ia->args.end = process_init_reply;
+ err = fuse_simple_background(fm, &ia->args, GFP_KERNEL);
+ if (!err)
+ return 0;
+ }
+ process_init_reply(fm, &ia->args, err);
+ if (fm->fc->conn_error)
+ return -ENOTCONN;
+ return 0;
}
EXPORT_SYMBOL_GPL(fuse_send_init);
void fuse_free_conn(struct fuse_conn *fc)
{
WARN_ON(!list_empty(&fc->devices));
- kfree_rcu(fc, rcu);
+ kfree(fc);
}
EXPORT_SYMBOL_GPL(fuse_free_conn);
@@ -1273,8 +1594,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
if (err)
return err;
- /* fuse does it's own writeback accounting */
- sb->s_bdi->capabilities &= ~BDI_CAP_WRITEBACK_ACCT;
sb->s_bdi->capabilities |= BDI_CAP_STRICTLIMIT;
/*
@@ -1357,20 +1676,24 @@ EXPORT_SYMBOL_GPL(fuse_dev_free);
static void fuse_fill_attr_from_inode(struct fuse_attr *attr,
const struct fuse_inode *fi)
{
+ struct timespec64 atime = inode_get_atime(&fi->inode);
+ struct timespec64 mtime = inode_get_mtime(&fi->inode);
+ struct timespec64 ctime = inode_get_ctime(&fi->inode);
+
*attr = (struct fuse_attr){
.ino = fi->inode.i_ino,
.size = fi->inode.i_size,
.blocks = fi->inode.i_blocks,
- .atime = fi->inode.i_atime.tv_sec,
- .mtime = fi->inode.i_mtime.tv_sec,
- .ctime = fi->inode.i_ctime.tv_sec,
- .atimensec = fi->inode.i_atime.tv_nsec,
- .mtimensec = fi->inode.i_mtime.tv_nsec,
- .ctimensec = fi->inode.i_ctime.tv_nsec,
+ .atime = atime.tv_sec,
+ .mtime = mtime.tv_sec,
+ .ctime = ctime.tv_sec,
+ .atimensec = atime.tv_nsec,
+ .mtimensec = mtime.tv_nsec,
+ .ctimensec = ctime.tv_nsec,
.mode = fi->inode.i_mode,
.nlink = fi->inode.i_nlink,
- .uid = fi->inode.i_uid.val,
- .gid = fi->inode.i_gid.val,
+ .uid = __kuid_val(fi->inode.i_uid),
+ .gid = __kgid_val(fi->inode.i_gid),
.rdev = fi->inode.i_rdev,
.blksize = 1u << fi->inode.i_blkbits,
};
@@ -1385,16 +1708,10 @@ static void fuse_sb_defaults(struct super_block *sb)
sb->s_time_gran = 1;
sb->s_export_op = &fuse_export_operations;
sb->s_iflags |= SB_I_IMA_UNVERIFIABLE_SIGNATURE;
+ sb->s_iflags |= SB_I_NOIDMAP;
if (sb->s_user_ns != &init_user_ns)
sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER;
sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
-
- /*
- * If we are not in the initial user namespace posix
- * acls must be translated.
- */
- if (sb->s_user_ns != &init_user_ns)
- sb->s_xattr = fuse_no_acl_xattr_handlers;
}
static int fuse_fill_super_submount(struct super_block *sb,
@@ -1404,6 +1721,8 @@ static int fuse_fill_super_submount(struct super_block *sb,
struct super_block *parent_sb = parent_fi->inode.i_sb;
struct fuse_attr root_attr;
struct inode *root;
+ struct fuse_submount_lookup *sl;
+ struct fuse_inode *fi;
fuse_sb_defaults(sb);
fm->sb = sb;
@@ -1412,6 +1731,7 @@ static int fuse_fill_super_submount(struct super_block *sb,
sb->s_bdi = bdi_get(parent_sb->s_bdi);
sb->s_xattr = parent_sb->s_xattr;
+ sb->s_export_op = parent_sb->s_export_op;
sb->s_time_gran = parent_sb->s_time_gran;
sb->s_blocksize = parent_sb->s_blocksize;
sb->s_blocksize_bits = parent_sb->s_blocksize_bits;
@@ -1420,18 +1740,34 @@ static int fuse_fill_super_submount(struct super_block *sb,
return -ENOMEM;
fuse_fill_attr_from_inode(&root_attr, parent_fi);
- root = fuse_iget(sb, parent_fi->nodeid, 0, &root_attr, 0, 0);
+ root = fuse_iget(sb, parent_fi->nodeid, 0, &root_attr, 0, 0,
+ fuse_get_evict_ctr(fm->fc));
/*
* This inode is just a duplicate, so it is not looked up and
* its nlookup should not be incremented. fuse_iget() does
* that, though, so undo it here.
*/
- get_fuse_inode(root)->nlookup--;
- sb->s_d_op = &fuse_dentry_operations;
+ fi = get_fuse_inode(root);
+ fi->nlookup--;
+
+ set_default_d_op(sb, &fuse_dentry_operations);
sb->s_root = d_make_root(root);
if (!sb->s_root)
return -ENOMEM;
+ /*
+ * Grab the parent's submount_lookup pointer and take a
+ * reference on the shared nlookup from the parent. This is to
+ * prevent the last forget for this nodeid from getting
+ * triggered until all users have finished with it.
+ */
+ sl = parent_fi->submount_lookup;
+ WARN_ON(!sl);
+ if (sl) {
+ refcount_inc(&sl->count);
+ fi->submount_lookup = sl;
+ }
+
return 0;
}
@@ -1506,6 +1842,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
if (!sb_set_blocksize(sb, ctx->blksize))
goto err;
#endif
+ fc->sync_fs = 1;
} else {
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
@@ -1514,7 +1851,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
sb->s_subtype = ctx->subtype;
ctx->subtype = NULL;
if (IS_ENABLED(CONFIG_FUSE_DAX)) {
- err = fuse_dax_conn_alloc(fc, ctx->dax_dev);
+ err = fuse_dax_conn_alloc(fc, ctx->dax_mode, ctx->dax_dev);
if (err)
goto err;
}
@@ -1549,17 +1886,19 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
err = -ENOMEM;
root = fuse_get_root_inode(sb, ctx->rootmode);
- sb->s_d_op = &fuse_root_dentry_operations;
+ set_default_d_op(sb, &fuse_dentry_operations);
root_dentry = d_make_root(root);
if (!root_dentry)
goto err_dev_free;
- /* Root dentry doesn't have .d_revalidate */
- sb->s_d_op = &fuse_dentry_operations;
mutex_lock(&fuse_mutex);
err = -EINVAL;
- if (ctx->fudptr && *ctx->fudptr)
- goto err_unlock;
+ if (ctx->fudptr && *ctx->fudptr) {
+ if (*ctx->fudptr == FUSE_DEV_SYNC_INIT)
+ fc->sync_init = 1;
+ else
+ goto err_unlock;
+ }
err = fuse_ctl_add_conn(fc);
if (err)
@@ -1567,8 +1906,10 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
list_add_tail(&fc->entry, &fuse_conn_list);
sb->s_root = root_dentry;
- if (ctx->fudptr)
+ if (ctx->fudptr) {
*ctx->fudptr = fud;
+ wake_up_all(&fuse_dev_waitq);
+ }
mutex_unlock(&fuse_mutex);
return 0;
@@ -1589,6 +1930,7 @@ EXPORT_SYMBOL_GPL(fuse_fill_super_common);
static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
{
struct fuse_fs_context *ctx = fsc->fs_private;
+ struct fuse_mount *fm;
int err;
if (!ctx->file || !ctx->rootmode_present ||
@@ -1609,8 +1951,10 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
return err;
/* file->private_data shall be visible on all CPUs after this */
smp_mb();
- fuse_send_init(get_fuse_mount_super(sb));
- return 0;
+
+ fm = get_fuse_mount_super(sb);
+
+ return fuse_send_init(fm);
}
/*
@@ -1671,7 +2015,7 @@ static int fuse_get_tree(struct fs_context *fsc)
* Allow creating a fuse mount with an already initialized fuse
* connection
*/
- fud = READ_ONCE(ctx->file->private_data);
+ fud = __fuse_get_dev(ctx->file);
if (ctx->file->f_op == &fuse_dev_operations && fud) {
fsc->sget_key = fud->fc;
sb = sget_fc(fsc, fuse_test_super, fuse_set_no_super);
@@ -1772,7 +2116,7 @@ static void fuse_sb_destroy(struct super_block *sb)
void fuse_mount_destroy(struct fuse_mount *fm)
{
fuse_conn_put(fm->fc);
- kfree(fm);
+ kfree_rcu(fm, rcu);
}
EXPORT_SYMBOL(fuse_mount_destroy);
@@ -1786,7 +2130,7 @@ static void fuse_kill_sb_anon(struct super_block *sb)
static struct file_system_type fuse_fs_type = {
.owner = THIS_MODULE,
.name = "fuse",
- .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT,
+ .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
.init_fs_context = fuse_init_fs_context,
.parameters = fuse_fs_parameters,
.kill_sb = fuse_kill_sb_anon,
@@ -1807,7 +2151,7 @@ static struct file_system_type fuseblk_fs_type = {
.init_fs_context = fuse_init_fs_context,
.parameters = fuse_fs_parameters,
.kill_sb = fuse_kill_sb_blk,
- .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
+ .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("fuseblk");
@@ -1858,8 +2202,14 @@ static int __init fuse_fs_init(void)
if (err)
goto out3;
+ err = fuse_sysctl_register();
+ if (err)
+ goto out4;
+
return 0;
+ out4:
+ unregister_filesystem(&fuse_fs_type);
out3:
unregister_fuseblk();
out2:
@@ -1870,6 +2220,7 @@ static int __init fuse_fs_init(void)
static void fuse_fs_cleanup(void)
{
+ fuse_sysctl_unregister();
unregister_filesystem(&fuse_fs_type);
unregister_fuseblk();
@@ -1935,6 +2286,8 @@ static int __init fuse_init(void)
if (res)
goto err_sysfs_cleanup;
+ fuse_dentry_tree_init();
+
sanitize_global_limit(&max_user_bgreq);
sanitize_global_limit(&max_user_congthresh);
@@ -1954,6 +2307,7 @@ static void __exit fuse_exit(void)
{
pr_debug("exit\n");
+ fuse_dentry_tree_cleanup();
fuse_ctl_cleanup();
fuse_sysfs_cleanup();
fuse_fs_cleanup();
diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c
index fbc09dab1f85..fdc175e93f74 100644
--- a/fs/fuse/ioctl.c
+++ b/fs/fuse/ioctl.c
@@ -8,6 +8,29 @@
#include <linux/uio.h>
#include <linux/compat.h>
#include <linux/fileattr.h>
+#include <linux/fsverity.h>
+
+#define FUSE_VERITY_ENABLE_ARG_MAX_PAGES 256
+
+static ssize_t fuse_send_ioctl(struct fuse_mount *fm, struct fuse_args *args,
+ struct fuse_ioctl_out *outarg)
+{
+ ssize_t ret;
+
+ args->out_args[0].size = sizeof(*outarg);
+ args->out_args[0].value = outarg;
+
+ ret = fuse_simple_request(fm, args);
+
+ /* Translate ENOSYS, which shouldn't be returned from fs */
+ if (ret == -ENOSYS)
+ ret = -ENOTTY;
+
+ if (ret >= 0 && outarg->result == -ENOSYS)
+ outarg->result = -ENOTTY;
+
+ return ret;
+}
/*
* CUSE servers compiled on 32bit broke on 64bit kernels because the
@@ -97,6 +120,53 @@ static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
return 0;
}
+/* For fs-verity, determine iov lengths from input */
+static int fuse_setup_measure_verity(unsigned long arg, struct iovec *iov)
+{
+ __u16 digest_size;
+ struct fsverity_digest __user *uarg = (void __user *)arg;
+
+ if (copy_from_user(&digest_size, &uarg->digest_size, sizeof(digest_size)))
+ return -EFAULT;
+
+ if (digest_size > SIZE_MAX - sizeof(struct fsverity_digest))
+ return -EINVAL;
+
+ iov->iov_len = sizeof(struct fsverity_digest) + digest_size;
+
+ return 0;
+}
+
+static int fuse_setup_enable_verity(unsigned long arg, struct iovec *iov,
+ unsigned int *in_iovs)
+{
+ struct fsverity_enable_arg enable;
+ struct fsverity_enable_arg __user *uarg = (void __user *)arg;
+ const __u32 max_buffer_len = FUSE_VERITY_ENABLE_ARG_MAX_PAGES * PAGE_SIZE;
+
+ if (copy_from_user(&enable, uarg, sizeof(enable)))
+ return -EFAULT;
+
+ if (enable.salt_size > max_buffer_len || enable.sig_size > max_buffer_len)
+ return -ENOMEM;
+
+ if (enable.salt_size > 0) {
+ iov++;
+ (*in_iovs)++;
+
+ iov->iov_base = u64_to_user_ptr(enable.salt_ptr);
+ iov->iov_len = enable.salt_size;
+ }
+
+ if (enable.sig_size > 0) {
+ iov++;
+ (*in_iovs)++;
+
+ iov->iov_base = u64_to_user_ptr(enable.sig_ptr);
+ iov->iov_len = enable.sig_size;
+ }
+ return 0;
+}
/*
* For ioctls, there is no generic way to determine how much memory
@@ -170,7 +240,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
#else
if (flags & FUSE_IOCTL_COMPAT) {
inarg.flags |= FUSE_IOCTL_32BIT;
-#ifdef CONFIG_X86_X32
+#ifdef CONFIG_X86_X32_ABI
if (in_x32_syscall())
inarg.flags |= FUSE_IOCTL_COMPAT_X32;
#endif
@@ -181,12 +251,12 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
err = -ENOMEM;
- ap.pages = fuse_pages_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs);
+ ap.folios = fuse_folios_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs);
iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
- if (!ap.pages || !iov_page)
+ if (!ap.folios || !iov_page)
goto out;
- fuse_page_descs_length_init(ap.descs, 0, fm->fc->max_pages);
+ fuse_folio_descs_length_init(ap.descs, 0, fm->fc->max_pages);
/*
* If restricted, initialize IO parameters as encoded in @cmd.
@@ -207,6 +277,18 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
out_iov = iov;
out_iovs = 1;
}
+
+ err = 0;
+ switch (cmd) {
+ case FS_IOC_MEASURE_VERITY:
+ err = fuse_setup_measure_verity(arg, iov);
+ break;
+ case FS_IOC_ENABLE_VERITY:
+ err = fuse_setup_enable_verity(arg, iov, &in_iovs);
+ break;
+ }
+ if (err)
+ goto out;
}
retry:
@@ -224,14 +306,13 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
err = -ENOMEM;
if (max_pages > fm->fc->max_pages)
goto out;
- while (ap.num_pages < max_pages) {
- ap.pages[ap.num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
- if (!ap.pages[ap.num_pages])
+ while (ap.num_folios < max_pages) {
+ ap.folios[ap.num_folios] = folio_alloc(GFP_KERNEL | __GFP_HIGHMEM, 0);
+ if (!ap.folios[ap.num_folios])
goto out;
- ap.num_pages++;
+ ap.num_folios++;
}
-
/* okay, let's send it to the client */
ap.args.opcode = FUSE_IOCTL;
ap.args.nodeid = ff->nodeid;
@@ -244,22 +325,20 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
ap.args.in_pages = true;
err = -EFAULT;
- iov_iter_init(&ii, WRITE, in_iov, in_iovs, in_size);
- for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
- c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii);
+ iov_iter_init(&ii, ITER_SOURCE, in_iov, in_iovs, in_size);
+ for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_folios); i++) {
+ c = copy_folio_from_iter(ap.folios[i], 0, PAGE_SIZE, &ii);
if (c != PAGE_SIZE && iov_iter_count(&ii))
goto out;
}
}
ap.args.out_numargs = 2;
- ap.args.out_args[0].size = sizeof(outarg);
- ap.args.out_args[0].value = &outarg;
ap.args.out_args[1].size = out_size;
ap.args.out_pages = true;
ap.args.out_argvar = true;
- transferred = fuse_simple_request(fm, &ap.args);
+ transferred = fuse_send_ioctl(fm, &ap.args, &outarg);
err = transferred;
if (transferred < 0)
goto out;
@@ -286,7 +365,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
goto out;
- vaddr = kmap_local_page(ap.pages[0]);
+ vaddr = kmap_local_folio(ap.folios[0], 0);
err = fuse_copy_ioctl_iovec(fm->fc, iov_page, vaddr,
transferred, in_iovs + out_iovs,
(flags & FUSE_IOCTL_COMPAT) != 0);
@@ -313,18 +392,18 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
goto out;
err = -EFAULT;
- iov_iter_init(&ii, READ, out_iov, out_iovs, transferred);
- for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
- c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii);
+ iov_iter_init(&ii, ITER_DEST, out_iov, out_iovs, transferred);
+ for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_folios); i++) {
+ c = copy_folio_to_iter(ap.folios[i], 0, PAGE_SIZE, &ii);
if (c != PAGE_SIZE && iov_iter_count(&ii))
goto out;
}
err = 0;
out:
free_page((unsigned long) iov_page);
- while (ap.num_pages)
- __free_page(ap.pages[--ap.num_pages]);
- kfree(ap.pages);
+ while (ap.num_folios)
+ folio_put(ap.folios[--ap.num_folios]);
+ kfree(ap.folios);
return err ? err : outarg.result;
}
@@ -388,15 +467,16 @@ static int fuse_priv_ioctl(struct inode *inode, struct fuse_file *ff,
args.in_args[1].size = inarg.in_size;
args.in_args[1].value = ptr;
args.out_numargs = 2;
- args.out_args[0].size = sizeof(outarg);
- args.out_args[0].value = &outarg;
args.out_args[1].size = inarg.out_size;
args.out_args[1].value = ptr;
- err = fuse_simple_request(fm, &args);
- if (!err && outarg.flags & FUSE_IOCTL_RETRY)
- err = -EIO;
-
+ err = fuse_send_ioctl(fm, &args, &outarg);
+ if (!err) {
+ if (outarg.result < 0)
+ err = outarg.result;
+ else if (outarg.flags & FUSE_IOCTL_RETRY)
+ err = -EIO;
+ }
return err;
}
@@ -405,6 +485,12 @@ static struct fuse_file *fuse_priv_ioctl_prepare(struct inode *inode)
struct fuse_mount *fm = get_fuse_mount(inode);
bool isdir = S_ISDIR(inode->i_mode);
+ if (!fuse_allow_current_process(fm->fc))
+ return ERR_PTR(-EACCES);
+
+ if (fuse_is_bad(inode))
+ return ERR_PTR(-EIO);
+
if (!S_ISREG(inode->i_mode) && !isdir)
return ERR_PTR(-ENOTTY);
@@ -416,7 +502,7 @@ static void fuse_priv_ioctl_cleanup(struct inode *inode, struct fuse_file *ff)
fuse_file_release(inode, ff, O_RDONLY, NULL, S_ISDIR(inode->i_mode));
}
-int fuse_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int fuse_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct fuse_file *ff;
@@ -453,8 +539,8 @@ cleanup:
return err;
}
-int fuse_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa)
+int fuse_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct fuse_file *ff;
diff --git a/fs/fuse/iomode.c b/fs/fuse/iomode.c
new file mode 100644
index 000000000000..3728933188f3
--- /dev/null
+++ b/fs/fuse/iomode.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FUSE inode io modes.
+ *
+ * Copyright (c) 2024 CTERA Networks.
+ */
+
+#include "fuse_i.h"
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+
+/*
+ * Return true if need to wait for new opens in caching mode.
+ */
+static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi)
+{
+ return READ_ONCE(fi->iocachectr) < 0 && !fuse_inode_backing(fi);
+}
+
+/*
+ * Called on cached file open() and on first mmap() of direct_io file.
+ * Takes cached_io inode mode reference to be dropped on file release.
+ *
+ * Blocks new parallel dio writes and waits for the in-progress parallel dio
+ * writes to complete.
+ */
+int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff)
+{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ /* There are no io modes if server does not implement open */
+ if (!ff->args)
+ return 0;
+
+ spin_lock(&fi->lock);
+ /*
+ * Setting the bit advises new direct-io writes to use an exclusive
+ * lock - without it the wait below might be forever.
+ */
+ while (fuse_is_io_cache_wait(fi)) {
+ set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
+ spin_unlock(&fi->lock);
+ wait_event(fi->direct_io_waitq, !fuse_is_io_cache_wait(fi));
+ spin_lock(&fi->lock);
+ }
+
+ /*
+ * Check if inode entered passthrough io mode while waiting for parallel
+ * dio write completion.
+ */
+ if (fuse_inode_backing(fi)) {
+ clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
+ spin_unlock(&fi->lock);
+ return -ETXTBSY;
+ }
+
+ WARN_ON(ff->iomode == IOM_UNCACHED);
+ if (ff->iomode == IOM_NONE) {
+ ff->iomode = IOM_CACHED;
+ if (fi->iocachectr == 0)
+ set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
+ fi->iocachectr++;
+ }
+ spin_unlock(&fi->lock);
+ return 0;
+}
+
+static void fuse_file_cached_io_release(struct fuse_file *ff,
+ struct fuse_inode *fi)
+{
+ spin_lock(&fi->lock);
+ WARN_ON(fi->iocachectr <= 0);
+ WARN_ON(ff->iomode != IOM_CACHED);
+ ff->iomode = IOM_NONE;
+ fi->iocachectr--;
+ if (fi->iocachectr == 0)
+ clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
+ spin_unlock(&fi->lock);
+}
+
+/* Start strictly uncached io mode where cache access is not allowed */
+int fuse_inode_uncached_io_start(struct fuse_inode *fi, struct fuse_backing *fb)
+{
+ struct fuse_backing *oldfb;
+ int err = 0;
+
+ spin_lock(&fi->lock);
+ /* deny conflicting backing files on same fuse inode */
+ oldfb = fuse_inode_backing(fi);
+ if (fb && oldfb && oldfb != fb) {
+ err = -EBUSY;
+ goto unlock;
+ }
+ if (fi->iocachectr > 0) {
+ err = -ETXTBSY;
+ goto unlock;
+ }
+ fi->iocachectr--;
+
+ /* fuse inode holds a single refcount of backing file */
+ if (fb && !oldfb) {
+ oldfb = fuse_inode_backing_set(fi, fb);
+ WARN_ON_ONCE(oldfb != NULL);
+ } else {
+ fuse_backing_put(fb);
+ }
+unlock:
+ spin_unlock(&fi->lock);
+ return err;
+}
+
+/* Takes uncached_io inode mode reference to be dropped on file release */
+static int fuse_file_uncached_io_open(struct inode *inode,
+ struct fuse_file *ff,
+ struct fuse_backing *fb)
+{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+ int err;
+
+ err = fuse_inode_uncached_io_start(fi, fb);
+ if (err)
+ return err;
+
+ WARN_ON(ff->iomode != IOM_NONE);
+ ff->iomode = IOM_UNCACHED;
+ return 0;
+}
+
+void fuse_inode_uncached_io_end(struct fuse_inode *fi)
+{
+ struct fuse_backing *oldfb = NULL;
+
+ spin_lock(&fi->lock);
+ WARN_ON(fi->iocachectr >= 0);
+ fi->iocachectr++;
+ if (!fi->iocachectr) {
+ wake_up(&fi->direct_io_waitq);
+ oldfb = fuse_inode_backing_set(fi, NULL);
+ }
+ spin_unlock(&fi->lock);
+ if (oldfb)
+ fuse_backing_put(oldfb);
+}
+
+/* Drop uncached_io reference from passthrough open */
+static void fuse_file_uncached_io_release(struct fuse_file *ff,
+ struct fuse_inode *fi)
+{
+ WARN_ON(ff->iomode != IOM_UNCACHED);
+ ff->iomode = IOM_NONE;
+ fuse_inode_uncached_io_end(fi);
+}
+
+/*
+ * Open flags that are allowed in combination with FOPEN_PASSTHROUGH.
+ * A combination of FOPEN_PASSTHROUGH and FOPEN_DIRECT_IO means that read/write
+ * operations go directly to the server, but mmap is done on the backing file.
+ * FOPEN_PASSTHROUGH mode should not co-exist with any users of the fuse inode
+ * page cache, so FOPEN_KEEP_CACHE is a strange and undesired combination.
+ */
+#define FOPEN_PASSTHROUGH_MASK \
+ (FOPEN_PASSTHROUGH | FOPEN_DIRECT_IO | FOPEN_PARALLEL_DIRECT_WRITES | \
+ FOPEN_NOFLUSH)
+
+static int fuse_file_passthrough_open(struct inode *inode, struct file *file)
+{
+ struct fuse_file *ff = file->private_data;
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_backing *fb;
+ int err;
+
+ /* Check allowed conditions for file open in passthrough mode */
+ if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH) || !fc->passthrough ||
+ (ff->open_flags & ~FOPEN_PASSTHROUGH_MASK))
+ return -EINVAL;
+
+ fb = fuse_passthrough_open(file, ff->args->open_outarg.backing_id);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ /* First passthrough file open denies caching inode io mode */
+ err = fuse_file_uncached_io_open(inode, ff, fb);
+ if (!err)
+ return 0;
+
+ fuse_passthrough_release(ff, fb);
+ fuse_backing_put(fb);
+
+ return err;
+}
+
+/* Request access to submit new io to inode via open file */
+int fuse_file_io_open(struct file *file, struct inode *inode)
+{
+ struct fuse_file *ff = file->private_data;
+ struct fuse_inode *fi = get_fuse_inode(inode);
+ int err;
+
+ /*
+ * io modes are not relevant with DAX and with server that does not
+ * implement open.
+ */
+ if (FUSE_IS_DAX(inode) || !ff->args)
+ return 0;
+
+ /*
+ * Server is expected to use FOPEN_PASSTHROUGH for all opens of an inode
+ * which is already open for passthrough.
+ */
+ err = -EINVAL;
+ if (fuse_inode_backing(fi) && !(ff->open_flags & FOPEN_PASSTHROUGH))
+ goto fail;
+
+ /*
+ * FOPEN_PARALLEL_DIRECT_WRITES requires FOPEN_DIRECT_IO.
+ */
+ if (!(ff->open_flags & FOPEN_DIRECT_IO))
+ ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES;
+
+ /*
+ * First passthrough file open denies caching inode io mode.
+ * First caching file open enters caching inode io mode.
+ *
+ * Note that if user opens a file open with O_DIRECT, but server did
+ * not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT,
+ * so we put the inode in caching mode to prevent parallel dio.
+ */
+ if ((ff->open_flags & FOPEN_DIRECT_IO) &&
+ !(ff->open_flags & FOPEN_PASSTHROUGH))
+ return 0;
+
+ if (ff->open_flags & FOPEN_PASSTHROUGH)
+ err = fuse_file_passthrough_open(inode, file);
+ else
+ err = fuse_file_cached_io_open(inode, ff);
+ if (err)
+ goto fail;
+
+ return 0;
+
+fail:
+ pr_debug("failed to open file in requested io mode (open_flags=0x%x, err=%i).\n",
+ ff->open_flags, err);
+ /*
+ * The file open mode determines the inode io mode.
+ * Using incorrect open mode is a server mistake, which results in
+ * user visible failure of open() with EIO error.
+ */
+ return -EIO;
+}
+
+/* No more pending io and no new io possible to inode via open/mmapped file */
+void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
+{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ /*
+ * Last passthrough file close allows caching inode io mode.
+ * Last caching file close exits caching inode io mode.
+ */
+ switch (ff->iomode) {
+ case IOM_NONE:
+ /* Nothing to do */
+ break;
+ case IOM_UNCACHED:
+ fuse_file_uncached_io_release(ff, fi);
+ break;
+ case IOM_CACHED:
+ fuse_file_cached_io_release(ff, fi);
+ break;
+ }
+}
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
new file mode 100644
index 000000000000..72de97c03d0e
--- /dev/null
+++ b/fs/fuse/passthrough.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FUSE passthrough to backing file.
+ *
+ * Copyright (c) 2023 CTERA Networks.
+ */
+
+#include "fuse_i.h"
+
+#include <linux/file.h>
+#include <linux/backing-file.h>
+#include <linux/splice.h>
+
+static void fuse_file_accessed(struct file *file)
+{
+ struct inode *inode = file_inode(file);
+
+ fuse_invalidate_atime(inode);
+}
+
+static void fuse_passthrough_end_write(struct kiocb *iocb, ssize_t ret)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ fuse_write_update_attr(inode, iocb->ki_pos, ret);
+}
+
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ struct fuse_file *ff = file->private_data;
+ struct file *backing_file = fuse_file_passthrough(ff);
+ size_t count = iov_iter_count(iter);
+ ssize_t ret;
+ struct backing_file_ctx ctx = {
+ .cred = ff->cred,
+ .accessed = fuse_file_accessed,
+ };
+
+
+ pr_debug("%s: backing_file=0x%p, pos=%lld, len=%zu\n", __func__,
+ backing_file, iocb->ki_pos, count);
+
+ if (!count)
+ return 0;
+
+ ret = backing_file_read_iter(backing_file, iter, iocb, iocb->ki_flags,
+ &ctx);
+
+ return ret;
+}
+
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb,
+ struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct fuse_file *ff = file->private_data;
+ struct file *backing_file = fuse_file_passthrough(ff);
+ size_t count = iov_iter_count(iter);
+ ssize_t ret;
+ struct backing_file_ctx ctx = {
+ .cred = ff->cred,
+ .end_write = fuse_passthrough_end_write,
+ };
+
+ pr_debug("%s: backing_file=0x%p, pos=%lld, len=%zu\n", __func__,
+ backing_file, iocb->ki_pos, count);
+
+ if (!count)
+ return 0;
+
+ inode_lock(inode);
+ ret = backing_file_write_iter(backing_file, iter, iocb, iocb->ki_flags,
+ &ctx);
+ inode_unlock(inode);
+
+ return ret;
+}
+
+ssize_t fuse_passthrough_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ struct fuse_file *ff = in->private_data;
+ struct file *backing_file = fuse_file_passthrough(ff);
+ struct backing_file_ctx ctx = {
+ .cred = ff->cred,
+ .accessed = fuse_file_accessed,
+ };
+ struct kiocb iocb;
+ ssize_t ret;
+
+ pr_debug("%s: backing_file=0x%p, pos=%lld, len=%zu, flags=0x%x\n", __func__,
+ backing_file, *ppos, len, flags);
+
+ init_sync_kiocb(&iocb, in);
+ iocb.ki_pos = *ppos;
+ ret = backing_file_splice_read(backing_file, &iocb, pipe, len, flags, &ctx);
+ *ppos = iocb.ki_pos;
+
+ return ret;
+}
+
+ssize_t fuse_passthrough_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *ppos,
+ size_t len, unsigned int flags)
+{
+ struct fuse_file *ff = out->private_data;
+ struct file *backing_file = fuse_file_passthrough(ff);
+ struct inode *inode = file_inode(out);
+ ssize_t ret;
+ struct backing_file_ctx ctx = {
+ .cred = ff->cred,
+ .end_write = fuse_passthrough_end_write,
+ };
+ struct kiocb iocb;
+
+ pr_debug("%s: backing_file=0x%p, pos=%lld, len=%zu, flags=0x%x\n", __func__,
+ backing_file, *ppos, len, flags);
+
+ inode_lock(inode);
+ init_sync_kiocb(&iocb, out);
+ iocb.ki_pos = *ppos;
+ ret = backing_file_splice_write(pipe, backing_file, &iocb, len, flags, &ctx);
+ *ppos = iocb.ki_pos;
+ inode_unlock(inode);
+
+ return ret;
+}
+
+ssize_t fuse_passthrough_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct fuse_file *ff = file->private_data;
+ struct file *backing_file = fuse_file_passthrough(ff);
+ struct backing_file_ctx ctx = {
+ .cred = ff->cred,
+ .accessed = fuse_file_accessed,
+ };
+
+ pr_debug("%s: backing_file=0x%p, start=%lu, end=%lu\n", __func__,
+ backing_file, vma->vm_start, vma->vm_end);
+
+ return backing_file_mmap(backing_file, vma, &ctx);
+}
+
+/*
+ * Setup passthrough to a backing file.
+ *
+ * Returns an fb object with elevated refcount to be stored in fuse inode.
+ */
+struct fuse_backing *fuse_passthrough_open(struct file *file, int backing_id)
+{
+ struct fuse_file *ff = file->private_data;
+ struct fuse_conn *fc = ff->fm->fc;
+ struct fuse_backing *fb = NULL;
+ struct file *backing_file;
+ int err;
+
+ err = -EINVAL;
+ if (backing_id <= 0)
+ goto out;
+
+ err = -ENOENT;
+ fb = fuse_backing_lookup(fc, backing_id);
+ if (!fb)
+ goto out;
+
+ /* Allocate backing file per fuse file to store fuse path */
+ backing_file = backing_file_open(&file->f_path, file->f_flags,
+ &fb->file->f_path, fb->cred);
+ err = PTR_ERR(backing_file);
+ if (IS_ERR(backing_file)) {
+ fuse_backing_put(fb);
+ goto out;
+ }
+
+ err = 0;
+ ff->passthrough = backing_file;
+ ff->cred = get_cred(fb->cred);
+out:
+ pr_debug("%s: backing_id=%d, fb=0x%p, backing_file=0x%p, err=%i\n", __func__,
+ backing_id, fb, ff->passthrough, err);
+
+ return err ? ERR_PTR(err) : fb;
+}
+
+void fuse_passthrough_release(struct fuse_file *ff, struct fuse_backing *fb)
+{
+ pr_debug("%s: fb=0x%p, backing_file=0x%p\n", __func__,
+ fb, ff->passthrough);
+
+ fput(ff->passthrough);
+ ff->passthrough = NULL;
+ put_cred(ff->cred);
+ ff->cred = NULL;
+}
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index b4e565711045..c2aae2eef086 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -77,8 +77,10 @@ static void fuse_add_dirent_to_cache(struct file *file,
goto unlock;
addr = kmap_local_page(page);
- if (!offset)
+ if (!offset) {
clear_page(addr);
+ SetPageUptodate(page);
+ }
memcpy(addr + offset, dirent, reclen);
kunmap_local(addr);
fi->rdc.size = (index << PAGE_SHIFT) + offset + reclen;
@@ -118,7 +120,7 @@ static bool fuse_emit(struct file *file, struct dir_context *ctx,
fuse_add_dirent_to_cache(file, dirent, ctx->pos);
return dir_emit(ctx, dirent->name, dirent->namelen, dirent->ino,
- dirent->type);
+ dirent->type | FILLDIR_FLAG_NOINTR);
}
static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
@@ -147,7 +149,7 @@ static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
static int fuse_direntplus_link(struct file *file,
struct fuse_direntplus *direntplus,
- u64 attr_version)
+ u64 attr_version, u64 evict_ctr)
{
struct fuse_entry_out *o = &direntplus->entry_out;
struct fuse_dirent *dirent = &direntplus->dirent;
@@ -159,6 +161,7 @@ static int fuse_direntplus_link(struct file *file,
struct fuse_conn *fc;
struct inode *inode;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ int epoch;
if (!o->nodeid) {
/*
@@ -188,6 +191,7 @@ static int fuse_direntplus_link(struct file *file,
return -EIO;
fc = get_fuse_conn(dir);
+ epoch = atomic_read(&fc->epoch);
name.hash = full_name_hash(parent, name.name, name.len);
dentry = d_lookup(parent, &name);
@@ -221,8 +225,8 @@ retry:
spin_unlock(&fi->lock);
forget_all_cached_acls(inode);
- fuse_change_attributes(inode, &o->attr,
- entry_attr_timeout(o),
+ fuse_change_attributes(inode, &o->attr, NULL,
+ ATTR_TIMEOUT(o),
attr_version);
/*
* The other branch comes via fuse_iget()
@@ -230,8 +234,8 @@ retry:
*/
} else {
inode = fuse_iget(dir->i_sb, o->nodeid, o->generation,
- &o->attr, entry_attr_timeout(o),
- attr_version);
+ &o->attr, ATTR_TIMEOUT(o),
+ attr_version, evict_ctr);
if (!inode)
inode = ERR_PTR(-ENOMEM);
@@ -241,11 +245,20 @@ retry:
dput(dentry);
dentry = alias;
}
- if (IS_ERR(dentry))
+ if (IS_ERR(dentry)) {
+ if (!IS_ERR(inode)) {
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ spin_lock(&fi->lock);
+ fi->nlookup--;
+ spin_unlock(&fi->lock);
+ }
return PTR_ERR(dentry);
+ }
}
if (fc->readdirplus_auto)
set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
+ dentry->d_time = epoch;
fuse_change_entry_timeout(dentry, o);
dput(dentry);
@@ -274,7 +287,8 @@ static void fuse_force_forget(struct file *file, u64 nodeid)
}
static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
- struct dir_context *ctx, u64 attr_version)
+ struct dir_context *ctx, u64 attr_version,
+ u64 evict_ctr)
{
struct fuse_direntplus *direntplus;
struct fuse_dirent *dirent;
@@ -309,7 +323,7 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
buf += reclen;
nbytes -= reclen;
- ret = fuse_direntplus_link(file, direntplus, attr_version);
+ ret = fuse_direntplus_link(file, direntplus, attr_version, evict_ctr);
if (ret)
fuse_force_forget(file, direntplus->entry_out.nodeid);
}
@@ -321,34 +335,32 @@ static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx)
{
int plus;
ssize_t res;
- struct page *page;
struct inode *inode = file_inode(file);
struct fuse_mount *fm = get_fuse_mount(inode);
+ struct fuse_conn *fc = fm->fc;
struct fuse_io_args ia = {};
- struct fuse_args_pages *ap = &ia.ap;
- struct fuse_page_desc desc = { .length = PAGE_SIZE };
- u64 attr_version = 0;
+ struct fuse_args *args = &ia.ap.args;
+ void *buf;
+ size_t bufsize = clamp((unsigned int) ctx->count, PAGE_SIZE, fc->max_pages << PAGE_SHIFT);
+ u64 attr_version = 0, evict_ctr = 0;
bool locked;
- page = alloc_page(GFP_KERNEL);
- if (!page)
+ buf = kvmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
return -ENOMEM;
+ args->out_args[0].value = buf;
+
plus = fuse_use_readdirplus(inode, ctx);
- ap->args.out_pages = true;
- ap->num_pages = 1;
- ap->pages = &page;
- ap->descs = &desc;
if (plus) {
attr_version = fuse_get_attr_version(fm->fc);
- fuse_read_args_fill(&ia, file, ctx->pos, PAGE_SIZE,
- FUSE_READDIRPLUS);
+ evict_ctr = fuse_get_evict_ctr(fm->fc);
+ fuse_read_args_fill(&ia, file, ctx->pos, bufsize, FUSE_READDIRPLUS);
} else {
- fuse_read_args_fill(&ia, file, ctx->pos, PAGE_SIZE,
- FUSE_READDIR);
+ fuse_read_args_fill(&ia, file, ctx->pos, bufsize, FUSE_READDIR);
}
locked = fuse_lock_inode(inode);
- res = fuse_simple_request(fm, &ap->args);
+ res = fuse_simple_request(fm, args);
fuse_unlock_inode(inode, locked);
if (res >= 0) {
if (!res) {
@@ -357,15 +369,14 @@ static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx)
if (ff->open_flags & FOPEN_CACHE_DIR)
fuse_readdir_cache_end(file, ctx->pos);
} else if (plus) {
- res = parse_dirplusfile(page_address(page), res,
- file, ctx, attr_version);
+ res = parse_dirplusfile(buf, res, file, ctx, attr_version,
+ evict_ctr);
} else {
- res = parse_dirfile(page_address(page), res, file,
- ctx);
+ res = parse_dirfile(buf, res, file, ctx);
}
}
- __free_page(page);
+ kvfree(buf);
fuse_invalidate_atime(inode);
return res;
}
@@ -406,7 +417,7 @@ static enum fuse_parse_result fuse_parse_cache(struct fuse_file *ff,
if (ff->readdir.pos == ctx->pos) {
res = FOUND_SOME;
if (!dir_emit(ctx, dirent->name, dirent->namelen,
- dirent->ino, dirent->type))
+ dirent->ino, dirent->type | FILLDIR_FLAG_NOINTR))
return FOUND_ALL;
ctx->pos = dirent->off;
}
@@ -466,7 +477,7 @@ retry_locked:
if (!fi->rdc.cached) {
/* Starting cache? Set cache mtime. */
if (!ctx->pos && !fi->rdc.size) {
- fi->rdc.mtime = inode->i_mtime;
+ fi->rdc.mtime = inode_get_mtime(inode);
fi->rdc.iversion = inode_query_iversion(inode);
}
spin_unlock(&fi->rdc.lock);
@@ -478,8 +489,10 @@ retry_locked:
* changed, and reset the cache if so.
*/
if (!ctx->pos) {
+ struct timespec64 mtime = inode_get_mtime(inode);
+
if (inode_peek_iversion(inode) != fi->rdc.iversion ||
- !timespec64_equal(&fi->rdc.mtime, &inode->i_mtime)) {
+ !timespec64_equal(&fi->rdc.mtime, &mtime)) {
fuse_rdc_reset(inode);
goto retry_locked;
}
@@ -516,6 +529,12 @@ retry_locked:
page = find_get_page_flags(file->f_mapping, index,
FGP_ACCESSED | FGP_LOCK);
+ /* Page gone missing, then re-added to cache, but not initialized? */
+ if (page && !PageUptodate(page)) {
+ unlock_page(page);
+ put_page(page);
+ page = NULL;
+ }
spin_lock(&fi->rdc.lock);
if (!page) {
/*
@@ -539,9 +558,9 @@ retry_locked:
* Contents of the page are now protected against changing by holding
* the page lock.
*/
- addr = kmap(page);
+ addr = kmap_local_page(page);
res = fuse_parse_cache(ff, addr, size, ctx);
- kunmap(page);
+ kunmap_local(addr);
unlock_page(page);
put_page(page);
@@ -574,15 +593,11 @@ int fuse_readdir(struct file *file, struct dir_context *ctx)
if (fuse_is_bad(inode))
return -EIO;
- mutex_lock(&ff->readdir.lock);
-
err = UNCACHED;
if (ff->open_flags & FOPEN_CACHE_DIR)
err = fuse_readdir_cached(file, ctx);
if (err == UNCACHED)
err = fuse_readdir_uncached(file, ctx);
- mutex_unlock(&ff->readdir.lock);
-
return err;
}
diff --git a/fs/fuse/sysctl.c b/fs/fuse/sysctl.c
new file mode 100644
index 000000000000..e2d921abcb88
--- /dev/null
+++ b/fs/fuse/sysctl.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/fs/fuse/fuse_sysctl.c
+ *
+ * Sysctl interface to fuse parameters
+ */
+#include <linux/sysctl.h>
+
+#include "fuse_i.h"
+
+static struct ctl_table_header *fuse_table_header;
+
+/* Bound by fuse_init_out max_pages, which is a u16 */
+static unsigned int sysctl_fuse_max_pages_limit = 65535;
+
+/*
+ * fuse_init_out request timeouts are u16.
+ * This goes up to ~18 hours, which is plenty for a timeout.
+ */
+static unsigned int sysctl_fuse_req_timeout_limit = 65535;
+
+static const struct ctl_table fuse_sysctl_table[] = {
+ {
+ .procname = "max_pages_limit",
+ .data = &fuse_max_pages_limit,
+ .maxlen = sizeof(fuse_max_pages_limit),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = &sysctl_fuse_max_pages_limit,
+ },
+ {
+ .procname = "default_request_timeout",
+ .data = &fuse_default_req_timeout,
+ .maxlen = sizeof(fuse_default_req_timeout),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sysctl_fuse_req_timeout_limit,
+ },
+ {
+ .procname = "max_request_timeout",
+ .data = &fuse_max_req_timeout,
+ .maxlen = sizeof(fuse_max_req_timeout),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sysctl_fuse_req_timeout_limit,
+ },
+};
+
+int fuse_sysctl_register(void)
+{
+ fuse_table_header = register_sysctl("fs/fuse", fuse_sysctl_table);
+ if (!fuse_table_header)
+ return -ENOMEM;
+ return 0;
+}
+
+void fuse_sysctl_unregister(void)
+{
+ unregister_sysctl_table(fuse_table_header);
+ fuse_table_header = NULL;
+}
diff --git a/fs/fuse/trace.c b/fs/fuse/trace.c
new file mode 100644
index 000000000000..93bd72efc98c
--- /dev/null
+++ b/fs/fuse/trace.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "dev_uring_i.h"
+#include "fuse_i.h"
+#include "fuse_dev_i.h"
+
+#include <linux/pagemap.h>
+
+#define CREATE_TRACE_POINTS
+#include "fuse_trace.h"
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 4cfa4bc1f579..b2f6486fe1d5 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -7,7 +7,9 @@
#include <linux/fs.h>
#include <linux/dax.h>
#include <linux/pci.h>
-#include <linux/pfn_t.h>
+#include <linux/interrupt.h>
+#include <linux/group_cpus.h>
+#include <linux/memremap.h>
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_fs.h>
@@ -15,8 +17,10 @@
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/highmem.h>
+#include <linux/cleanup.h>
#include <linux/uio.h>
#include "fuse_i.h"
+#include "fuse_dev_i.h"
/* Used to help calculate the FUSE connection's max_pages limit for a request's
* size. Parts of the struct fuse_req are sliced into scattergather lists in
@@ -30,6 +34,9 @@
static DEFINE_MUTEX(virtio_fs_mutex);
static LIST_HEAD(virtio_fs_instances);
+/* The /sys/fs/virtio_fs/ kset */
+static struct kset *virtio_fs_kset;
+
enum {
VQ_HIPRIO,
VQ_REQUEST
@@ -44,17 +51,19 @@ struct virtio_fs_vq {
struct work_struct done_work;
struct list_head queued_reqs;
struct list_head end_reqs; /* End these requests */
- struct delayed_work dispatch_work;
+ struct work_struct dispatch_work;
struct fuse_dev *fud;
bool connected;
long in_flight;
struct completion in_flight_zero; /* No inflight requests */
+ struct kobject *kobj;
char name[VQ_NAME_LEN];
} ____cacheline_aligned_in_smp;
/* A virtio-fs device instance */
struct virtio_fs {
- struct kref refcount;
+ struct kobject kobj;
+ struct kobject *mqs_kobj;
struct list_head list; /* on virtio_fs_instances */
char *tag;
struct virtio_fs_vq *vqs;
@@ -62,6 +71,8 @@ struct virtio_fs {
unsigned int num_request_queues; /* number of request queues */
struct dax_device *dax_dev;
+ unsigned int *mq_map; /* index = cpu id, value = request vq id */
+
/* DAX memory window where file contents are mapped */
void *window_kaddr;
phys_addr_t window_phys_addr;
@@ -86,14 +97,24 @@ struct virtio_fs_req_work {
};
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
- struct fuse_req *req, bool in_flight);
+ struct fuse_req *req, bool in_flight,
+ gfp_t gfp);
+
+static const struct constant_table dax_param_enums[] = {
+ {"always", FUSE_DAX_ALWAYS },
+ {"never", FUSE_DAX_NEVER },
+ {"inode", FUSE_DAX_INODE_USER },
+ {}
+};
enum {
OPT_DAX,
+ OPT_DAX_ENUM,
};
static const struct fs_parameter_spec virtio_fs_parameters[] = {
fsparam_flag("dax", OPT_DAX),
+ fsparam_enum("dax", OPT_DAX_ENUM, dax_param_enums),
{}
};
@@ -110,7 +131,10 @@ static int virtio_fs_parse_param(struct fs_context *fsc,
switch (opt) {
case OPT_DAX:
- ctx->dax = 1;
+ ctx->dax_mode = FUSE_DAX_ALWAYS;
+ break;
+ case OPT_DAX_ENUM:
+ ctx->dax_mode = result.uint_32;
break;
default:
return -EINVAL;
@@ -148,27 +172,125 @@ static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
complete(&fsvq->in_flight_zero);
}
-static void release_virtio_fs_obj(struct kref *ref)
+static ssize_t tag_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj);
+
+ return sysfs_emit(buf, "%s\n", fs->tag);
+}
+
+static struct kobj_attribute virtio_fs_tag_attr = __ATTR_RO(tag);
+
+static struct attribute *virtio_fs_attrs[] = {
+ &virtio_fs_tag_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(virtio_fs);
+
+static void virtio_fs_ktype_release(struct kobject *kobj)
{
- struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
+ struct virtio_fs *vfs = container_of(kobj, struct virtio_fs, kobj);
+ kfree(vfs->mq_map);
kfree(vfs->vqs);
kfree(vfs);
}
+static const struct kobj_type virtio_fs_ktype = {
+ .release = virtio_fs_ktype_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = virtio_fs_groups,
+};
+
+static struct virtio_fs_vq *virtio_fs_kobj_to_vq(struct virtio_fs *fs,
+ struct kobject *kobj)
+{
+ int i;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ if (kobj == fs->vqs[i].kobj)
+ return &fs->vqs[i];
+ }
+ return NULL;
+}
+
+static ssize_t name_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
+ struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
+
+ if (!fsvq)
+ return -EINVAL;
+ return sysfs_emit(buf, "%s\n", fsvq->name);
+}
+
+static struct kobj_attribute virtio_fs_vq_name_attr = __ATTR_RO(name);
+
+static ssize_t cpu_list_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
+ struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
+ unsigned int cpu, qid;
+ const size_t size = PAGE_SIZE - 1;
+ bool first = true;
+ int ret = 0, pos = 0;
+
+ if (!fsvq)
+ return -EINVAL;
+
+ qid = fsvq->vq->index;
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid)) {
+ if (first)
+ ret = snprintf(buf + pos, size - pos, "%u", cpu);
+ else
+ ret = snprintf(buf + pos, size - pos, ", %u", cpu);
+
+ if (ret >= size - pos)
+ break;
+ first = false;
+ pos += ret;
+ }
+ }
+ ret = snprintf(buf + pos, size + 1 - pos, "\n");
+ return pos + ret;
+}
+
+static struct kobj_attribute virtio_fs_vq_cpu_list_attr = __ATTR_RO(cpu_list);
+
+static struct attribute *virtio_fs_vq_attrs[] = {
+ &virtio_fs_vq_name_attr.attr,
+ &virtio_fs_vq_cpu_list_attr.attr,
+ NULL
+};
+
+static struct attribute_group virtio_fs_vq_attr_group = {
+ .attrs = virtio_fs_vq_attrs,
+};
+
/* Make sure virtiofs_mutex is held */
+static void virtio_fs_put_locked(struct virtio_fs *fs)
+{
+ lockdep_assert_held(&virtio_fs_mutex);
+
+ kobject_put(&fs->kobj);
+}
+
static void virtio_fs_put(struct virtio_fs *fs)
{
- kref_put(&fs->refcount, release_virtio_fs_obj);
+ mutex_lock(&virtio_fs_mutex);
+ virtio_fs_put_locked(fs);
+ mutex_unlock(&virtio_fs_mutex);
}
static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
{
struct virtio_fs *vfs = fiq->priv;
- mutex_lock(&virtio_fs_mutex);
virtio_fs_put(vfs);
- mutex_unlock(&virtio_fs_mutex);
}
static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
@@ -189,7 +311,7 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
}
flush_work(&fsvq->done_work);
- flush_delayed_work(&fsvq->dispatch_work);
+ flush_work(&fsvq->dispatch_work);
}
static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
@@ -229,27 +351,107 @@ static void virtio_fs_start_all_queues(struct virtio_fs *fs)
}
}
+static void virtio_fs_delete_queues_sysfs(struct virtio_fs *fs)
+{
+ struct virtio_fs_vq *fsvq;
+ int i;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ fsvq = &fs->vqs[i];
+ kobject_put(fsvq->kobj);
+ }
+}
+
+static int virtio_fs_add_queues_sysfs(struct virtio_fs *fs)
+{
+ struct virtio_fs_vq *fsvq;
+ char buff[12];
+ int i, j, ret;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ fsvq = &fs->vqs[i];
+
+ sprintf(buff, "%d", i);
+ fsvq->kobj = kobject_create_and_add(buff, fs->mqs_kobj);
+ if (!fsvq->kobj) {
+ ret = -ENOMEM;
+ goto out_del;
+ }
+
+ ret = sysfs_create_group(fsvq->kobj, &virtio_fs_vq_attr_group);
+ if (ret) {
+ kobject_put(fsvq->kobj);
+ goto out_del;
+ }
+ }
+
+ return 0;
+
+out_del:
+ for (j = 0; j < i; j++) {
+ fsvq = &fs->vqs[j];
+ kobject_put(fsvq->kobj);
+ }
+ return ret;
+}
+
/* Add a new instance to the list or return -EEXIST if tag name exists*/
-static int virtio_fs_add_instance(struct virtio_fs *fs)
+static int virtio_fs_add_instance(struct virtio_device *vdev,
+ struct virtio_fs *fs)
{
struct virtio_fs *fs2;
- bool duplicate = false;
+ int ret;
mutex_lock(&virtio_fs_mutex);
list_for_each_entry(fs2, &virtio_fs_instances, list) {
- if (strcmp(fs->tag, fs2->tag) == 0)
- duplicate = true;
+ if (strcmp(fs->tag, fs2->tag) == 0) {
+ mutex_unlock(&virtio_fs_mutex);
+ return -EEXIST;
+ }
+ }
+
+ /* Use the virtio_device's index as a unique identifier, there is no
+ * need to allocate our own identifiers because the virtio_fs instance
+ * is only visible to userspace as long as the underlying virtio_device
+ * exists.
+ */
+ fs->kobj.kset = virtio_fs_kset;
+ ret = kobject_add(&fs->kobj, NULL, "%d", vdev->index);
+ if (ret < 0)
+ goto out_unlock;
+
+ fs->mqs_kobj = kobject_create_and_add("mqs", &fs->kobj);
+ if (!fs->mqs_kobj) {
+ ret = -ENOMEM;
+ goto out_del;
}
- if (!duplicate)
- list_add_tail(&fs->list, &virtio_fs_instances);
+ ret = sysfs_create_link(&fs->kobj, &vdev->dev.kobj, "device");
+ if (ret < 0)
+ goto out_put;
+
+ ret = virtio_fs_add_queues_sysfs(fs);
+ if (ret)
+ goto out_remove;
+
+ list_add_tail(&fs->list, &virtio_fs_instances);
mutex_unlock(&virtio_fs_mutex);
- if (duplicate)
- return -EEXIST;
+ kobject_uevent(&fs->kobj, KOBJ_ADD);
+
return 0;
+
+out_remove:
+ sysfs_remove_link(&fs->kobj, "device");
+out_put:
+ kobject_put(fs->mqs_kobj);
+out_del:
+ kobject_del(&fs->kobj);
+out_unlock:
+ mutex_unlock(&virtio_fs_mutex);
+ return ret;
}
/* Return the virtio_fs with a given tag, or NULL */
@@ -261,7 +463,7 @@ static struct virtio_fs *virtio_fs_find_instance(const char *tag)
list_for_each_entry(fs, &virtio_fs_instances, list) {
if (strcmp(fs->tag, tag) == 0) {
- kref_get(&fs->refcount);
+ kobject_get(&fs->kobj);
goto found;
}
}
@@ -310,6 +512,17 @@ static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
return -ENOMEM;
memcpy(fs->tag, tag_buf, len);
fs->tag[len] = '\0';
+
+ /* While the VIRTIO specification allows any character, newlines are
+ * awkward on mount(8) command-lines and cause problems in the sysfs
+ * "tag" attr and uevent TAG= properties. Forbid them.
+ */
+ if (strchr(fs->tag, '\n')) {
+ dev_dbg(&vdev->dev, "refusing virtiofs tag with newline character\n");
+ return -EINVAL;
+ }
+
+ dev_info(&vdev->dev, "discovered new tag: %s\n", fs->tag);
return 0;
}
@@ -332,7 +545,11 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work)
kfree(req);
dec_in_flight_req(fsvq);
}
- } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
+ } while (!virtqueue_enable_cb(vq));
+
+ if (!list_empty(&fsvq->queued_reqs))
+ schedule_work(&fsvq->dispatch_work);
+
spin_unlock(&fsvq->lock);
}
@@ -340,7 +557,7 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work)
{
struct fuse_req *req;
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
- dispatch_work.work);
+ dispatch_work);
int ret;
pr_debug("virtio-fs: worker %s called.\n", __func__);
@@ -360,6 +577,8 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work)
/* Dispatch pending requests */
while (1) {
+ unsigned int flags;
+
spin_lock(&fsvq->lock);
req = list_first_entry_or_null(&fsvq->queued_reqs,
struct fuse_req, list);
@@ -370,13 +589,13 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work)
list_del_init(&req->list);
spin_unlock(&fsvq->lock);
- ret = virtio_fs_enqueue_req(fsvq, req, true);
+ flags = memalloc_nofs_save();
+ ret = virtio_fs_enqueue_req(fsvq, req, true, GFP_KERNEL);
+ memalloc_nofs_restore(flags);
if (ret < 0) {
- if (ret == -ENOMEM || ret == -ENOSPC) {
+ if (ret == -ENOSPC) {
spin_lock(&fsvq->lock);
list_add_tail(&req->list, &fsvq->queued_reqs);
- schedule_delayed_work(&fsvq->dispatch_work,
- msecs_to_jiffies(1));
spin_unlock(&fsvq->lock);
return;
}
@@ -419,12 +638,10 @@ static int send_forget_request(struct virtio_fs_vq *fsvq,
ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
if (ret < 0) {
- if (ret == -ENOMEM || ret == -ENOSPC) {
+ if (ret == -ENOSPC) {
pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
ret);
list_add_tail(&forget->list, &fsvq->queued_reqs);
- schedule_delayed_work(&fsvq->dispatch_work,
- msecs_to_jiffies(1));
if (!in_flight)
inc_in_flight_req(fsvq);
/* Queue is full */
@@ -456,7 +673,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
{
struct virtio_fs_forget *forget;
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
- dispatch_work.work);
+ dispatch_work);
pr_debug("virtio-fs: worker %s called.\n", __func__);
while (1) {
spin_lock(&fsvq->lock);
@@ -475,7 +692,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
}
/* Allocate and copy args into req->argbuf */
-static int copy_args_to_argbuf(struct fuse_req *req)
+static int copy_args_to_argbuf(struct fuse_req *req, gfp_t gfp)
{
struct fuse_args *args = req->args;
unsigned int offset = 0;
@@ -489,7 +706,7 @@ static int copy_args_to_argbuf(struct fuse_req *req)
len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
fuse_len_args(num_out, args->out_args);
- req->argbuf = kmalloc(len, GFP_ATOMIC);
+ req->argbuf = kmalloc(len, gfp);
if (!req->argbuf)
return -ENOMEM;
@@ -545,11 +762,10 @@ static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
static void virtio_fs_request_complete(struct fuse_req *req,
struct virtio_fs_vq *fsvq)
{
- struct fuse_pqueue *fpq = &fsvq->fud->pq;
struct fuse_args *args;
struct fuse_args_pages *ap;
unsigned int len, i, thislen;
- struct page *page;
+ struct folio *folio;
/*
* TODO verify that server properly follows FUSE protocol
@@ -561,12 +777,12 @@ static void virtio_fs_request_complete(struct fuse_req *req,
if (args->out_pages && args->page_zeroing) {
len = args->out_args[args->out_numargs - 1].size;
ap = container_of(args, typeof(*ap), args);
- for (i = 0; i < ap->num_pages; i++) {
+ for (i = 0; i < ap->num_folios; i++) {
thislen = ap->descs[i].length;
if (len < thislen) {
WARN_ON(ap->descs[i].offset);
- page = ap->pages[i];
- zero_user_segment(page, len, thislen);
+ folio = ap->folios[i];
+ folio_zero_segment(folio, len, thislen);
len = 0;
} else {
len -= thislen;
@@ -574,9 +790,7 @@ static void virtio_fs_request_complete(struct fuse_req *req,
}
}
- spin_lock(&fpq->lock);
clear_bit(FR_SENT, &req->flags);
- spin_unlock(&fpq->lock);
fuse_request_end(req);
spin_lock(&fsvq->lock);
@@ -614,7 +828,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
list_move_tail(&req->list, &reqs);
spin_unlock(&fpq->lock);
}
- } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
+ } while (!virtqueue_enable_cb(vq));
spin_unlock(&fsvq->lock);
/* End requests */
@@ -634,6 +848,50 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
virtio_fs_request_complete(req, fsvq);
}
}
+
+ /* Try to push previously queued requests, as the queue might no longer be full */
+ spin_lock(&fsvq->lock);
+ if (!list_empty(&fsvq->queued_reqs))
+ schedule_work(&fsvq->dispatch_work);
+ spin_unlock(&fsvq->lock);
+}
+
+static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
+{
+ const struct cpumask *mask, *masks;
+ unsigned int q, cpu, nr_masks;
+
+ /* First attempt to map using existing transport layer affinities
+ * e.g. PCIe MSI-X
+ */
+ if (!vdev->config->get_vq_affinity)
+ goto fallback;
+
+ for (q = 0; q < fs->num_request_queues; q++) {
+ mask = vdev->config->get_vq_affinity(vdev, VQ_REQUEST + q);
+ if (!mask)
+ goto fallback;
+
+ for_each_cpu(cpu, mask)
+ fs->mq_map[cpu] = q + VQ_REQUEST;
+ }
+
+ return;
+fallback:
+ /* Attempt to map evenly in groups over the CPUs */
+ masks = group_cpus_evenly(fs->num_request_queues, &nr_masks);
+ /* If even this fails we default to all CPUs use first request queue */
+ if (!masks) {
+ for_each_possible_cpu(cpu)
+ fs->mq_map[cpu] = VQ_REQUEST;
+ return;
+ }
+
+ for (q = 0; q < fs->num_request_queues; q++) {
+ for_each_cpu(cpu, &masks[q % nr_masks])
+ fs->mq_map[cpu] = q + VQ_REQUEST;
+ }
+ kfree(masks);
}
/* Virtqueue interrupt handler */
@@ -657,12 +915,12 @@ static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
if (vq_type == VQ_REQUEST) {
INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
- INIT_DELAYED_WORK(&fsvq->dispatch_work,
- virtio_fs_request_dispatch_work);
+ INIT_WORK(&fsvq->dispatch_work,
+ virtio_fs_request_dispatch_work);
} else {
INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
- INIT_DELAYED_WORK(&fsvq->dispatch_work,
- virtio_fs_hiprio_dispatch_work);
+ INIT_WORK(&fsvq->dispatch_work,
+ virtio_fs_hiprio_dispatch_work);
}
}
@@ -670,9 +928,13 @@ static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
static int virtio_fs_setup_vqs(struct virtio_device *vdev,
struct virtio_fs *fs)
{
+ struct virtqueue_info *vqs_info;
struct virtqueue **vqs;
- vq_callback_t **callbacks;
- const char **names;
+ /* Specify pre_vectors to ensure that the queues before the
+ * request queues (e.g. hiprio) don't claim any of the CPUs in
+ * the multi-queue mapping and interrupt affinities
+ */
+ struct irq_affinity desc = { .pre_vectors = VQ_REQUEST };
unsigned int i;
int ret = 0;
@@ -681,24 +943,27 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
if (fs->num_request_queues == 0)
return -EINVAL;
+ /* Truncate nr of request queues to nr_cpu_id */
+ fs->num_request_queues = min_t(unsigned int, fs->num_request_queues,
+ nr_cpu_ids);
fs->nvqs = VQ_REQUEST + fs->num_request_queues;
fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
if (!fs->vqs)
return -ENOMEM;
vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
- callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
- GFP_KERNEL);
- names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
- if (!vqs || !callbacks || !names) {
+ fs->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*fs->mq_map), GFP_KERNEL,
+ dev_to_node(&vdev->dev));
+ vqs_info = kcalloc(fs->nvqs, sizeof(*vqs_info), GFP_KERNEL);
+ if (!vqs || !vqs_info || !fs->mq_map) {
ret = -ENOMEM;
goto out;
}
/* Initialize the hiprio/forget request virtqueue */
- callbacks[VQ_HIPRIO] = virtio_fs_vq_done;
+ vqs_info[VQ_HIPRIO].callback = virtio_fs_vq_done;
virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
- names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
+ vqs_info[VQ_HIPRIO].name = fs->vqs[VQ_HIPRIO].name;
/* Initialize the requests virtqueues */
for (i = VQ_REQUEST; i < fs->nvqs; i++) {
@@ -706,11 +971,11 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
- callbacks[i] = virtio_fs_vq_done;
- names[i] = fs->vqs[i].name;
+ vqs_info[i].callback = virtio_fs_vq_done;
+ vqs_info[i].name = fs->vqs[i].name;
}
- ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
+ ret = virtio_find_vqs(vdev, fs->nvqs, vqs, vqs_info, &desc);
if (ret < 0)
goto out;
@@ -719,17 +984,17 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
virtio_fs_start_all_queues(fs);
out:
- kfree(names);
- kfree(callbacks);
+ kfree(vqs_info);
kfree(vqs);
- if (ret)
+ if (ret) {
kfree(fs->vqs);
+ kfree(fs->mq_map);
+ }
return ret;
}
/* Free virtqueues (device must already be reset) */
-static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
- struct virtio_fs *fs)
+static void virtio_fs_cleanup_vqs(struct virtio_device *vdev)
{
vdev->config->del_vqs(vdev);
}
@@ -739,43 +1004,31 @@ static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
* offset.
*/
static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode,
+ void **kaddr, unsigned long *pfn)
{
struct virtio_fs *fs = dax_get_private(dax_dev);
phys_addr_t offset = PFN_PHYS(pgoff);
- size_t max_nr_pages = fs->window_len/PAGE_SIZE - pgoff;
+ size_t max_nr_pages = fs->window_len / PAGE_SIZE - pgoff;
if (kaddr)
*kaddr = fs->window_kaddr + offset;
if (pfn)
- *pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
- PFN_DEV | PFN_MAP);
+ *pfn = PHYS_PFN(fs->window_phys_addr + offset);
return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
}
-static size_t virtio_fs_copy_from_iter(struct dax_device *dax_dev,
- pgoff_t pgoff, void *addr,
- size_t bytes, struct iov_iter *i)
-{
- return copy_from_iter(addr, bytes, i);
-}
-
-static size_t virtio_fs_copy_to_iter(struct dax_device *dax_dev,
- pgoff_t pgoff, void *addr,
- size_t bytes, struct iov_iter *i)
-{
- return copy_to_iter(addr, bytes, i);
-}
-
static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
pgoff_t pgoff, size_t nr_pages)
{
long rc;
void *kaddr;
- rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
+ rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, &kaddr,
+ NULL);
if (rc < 0)
- return rc;
+ return dax_mem2blk_err(rc);
+
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
return 0;
@@ -783,8 +1036,6 @@ static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
static const struct dax_operations virtio_fs_dax_ops = {
.direct_access = virtio_fs_direct_access,
- .copy_from_iter = virtio_fs_copy_from_iter,
- .copy_to_iter = virtio_fs_copy_to_iter,
.zero_page_range = virtio_fs_zero_page_range,
};
@@ -796,8 +1047,11 @@ static void virtio_fs_cleanup_dax(void *data)
put_dax(dax_dev);
}
+DEFINE_FREE(cleanup_dax, struct dax_dev *, if (!IS_ERR_OR_NULL(_T)) virtio_fs_cleanup_dax(_T))
+
static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
{
+ struct dax_device *dax_dev __free(cleanup_dax) = NULL;
struct virtio_shm_region cache_reg;
struct dev_pagemap *pgmap;
bool have_cache;
@@ -805,6 +1059,12 @@ static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
if (!IS_ENABLED(CONFIG_FUSE_DAX))
return 0;
+ dax_dev = alloc_dax(fs, &virtio_fs_dax_ops);
+ if (IS_ERR(dax_dev)) {
+ int rc = PTR_ERR(dax_dev);
+ return rc == -EOPNOTSUPP ? 0 : rc;
+ }
+
/* Get cache region */
have_cache = virtio_get_shm_region(vdev, &cache_reg,
(u8)VIRTIO_FS_SHMCAP_ID_CACHE);
@@ -850,10 +1110,7 @@ static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
__func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
- fs->dax_dev = alloc_dax(fs, NULL, &virtio_fs_dax_ops, 0);
- if (IS_ERR(fs->dax_dev))
- return PTR_ERR(fs->dax_dev);
-
+ fs->dax_dev = no_free_ptr(dax_dev);
return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
fs->dax_dev);
}
@@ -866,7 +1123,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
fs = kzalloc(sizeof(*fs), GFP_KERNEL);
if (!fs)
return -ENOMEM;
- kref_init(&fs->refcount);
+ kobject_init(&fs->kobj, &virtio_fs_ktype);
vdev->priv = fs;
ret = virtio_fs_read_tag(vdev, fs);
@@ -877,7 +1134,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
if (ret < 0)
goto out;
- /* TODO vq affinity */
+ virtio_fs_map_queues(vdev, fs);
ret = virtio_fs_setup_dax(vdev, fs);
if (ret < 0)
@@ -888,20 +1145,19 @@ static int virtio_fs_probe(struct virtio_device *vdev)
*/
virtio_device_ready(vdev);
- ret = virtio_fs_add_instance(fs);
+ ret = virtio_fs_add_instance(vdev, fs);
if (ret < 0)
goto out_vqs;
return 0;
out_vqs:
- vdev->config->reset(vdev);
- virtio_fs_cleanup_vqs(vdev, fs);
- kfree(fs->vqs);
+ virtio_reset_device(vdev);
+ virtio_fs_cleanup_vqs(vdev);
out:
vdev->priv = NULL;
- kfree(fs);
+ kobject_put(&fs->kobj);
return ret;
}
@@ -925,14 +1181,18 @@ static void virtio_fs_remove(struct virtio_device *vdev)
mutex_lock(&virtio_fs_mutex);
/* This device is going away. No one should get new reference */
list_del_init(&fs->list);
+ virtio_fs_delete_queues_sysfs(fs);
+ sysfs_remove_link(&fs->kobj, "device");
+ kobject_put(fs->mqs_kobj);
+ kobject_del(&fs->kobj);
virtio_fs_stop_all_queues(fs);
virtio_fs_drain_all_queues_locked(fs);
- vdev->config->reset(vdev);
- virtio_fs_cleanup_vqs(vdev, fs);
+ virtio_reset_device(vdev);
+ virtio_fs_cleanup_vqs(vdev);
vdev->priv = NULL;
/* Put device reference on virtio_fs object */
- virtio_fs_put(fs);
+ virtio_fs_put_locked(fs);
mutex_unlock(&virtio_fs_mutex);
}
@@ -960,7 +1220,6 @@ static const unsigned int feature_table[] = {};
static struct virtio_driver virtio_fs_driver = {
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.feature_table = feature_table,
.feature_table_size = ARRAY_SIZE(feature_table),
@@ -972,22 +1231,13 @@ static struct virtio_driver virtio_fs_driver = {
#endif
};
-static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
-__releases(fiq->lock)
+static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link)
{
- struct fuse_forget_link *link;
struct virtio_fs_forget *forget;
struct virtio_fs_forget_req *req;
- struct virtio_fs *fs;
- struct virtio_fs_vq *fsvq;
- u64 unique;
-
- link = fuse_dequeue_forget(fiq, 1, NULL);
- unique = fuse_get_unique(fiq);
-
- fs = fiq->priv;
- fsvq = &fs->vqs[VQ_HIPRIO];
- spin_unlock(&fiq->lock);
+ struct virtio_fs *fs = fiq->priv;
+ struct virtio_fs_vq *fsvq = &fs->vqs[VQ_HIPRIO];
+ u64 unique = fuse_get_unique(fiq);
/* Allocate a buffer for the request */
forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
@@ -1007,8 +1257,7 @@ __releases(fiq->lock)
kfree(link);
}
-static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
-__releases(fiq->lock)
+static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
/*
* TODO interrupts.
@@ -1017,19 +1266,18 @@ __releases(fiq->lock)
* Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
* with shared lock between host and guest.
*/
- spin_unlock(&fiq->lock);
}
/* Count number of scatter-gather elements required */
-static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs,
- unsigned int num_pages,
- unsigned int total_len)
+static unsigned int sg_count_fuse_folios(struct fuse_folio_desc *folio_descs,
+ unsigned int num_folios,
+ unsigned int total_len)
{
unsigned int i;
unsigned int this_len;
- for (i = 0; i < num_pages && total_len; i++) {
- this_len = min(page_descs[i].length, total_len);
+ for (i = 0; i < num_folios && total_len; i++) {
+ this_len = min(folio_descs[i].length, total_len);
total_len -= this_len;
}
@@ -1048,8 +1296,8 @@ static unsigned int sg_count_fuse_req(struct fuse_req *req)
if (args->in_pages) {
size = args->in_args[args->in_numargs - 1].size;
- total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
- size);
+ total_sgs += sg_count_fuse_folios(ap->descs, ap->num_folios,
+ size);
}
if (!test_bit(FR_ISREPLY, &req->flags))
@@ -1062,27 +1310,27 @@ static unsigned int sg_count_fuse_req(struct fuse_req *req)
if (args->out_pages) {
size = args->out_args[args->out_numargs - 1].size;
- total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
- size);
+ total_sgs += sg_count_fuse_folios(ap->descs, ap->num_folios,
+ size);
}
return total_sgs;
}
-/* Add pages to scatter-gather list and return number of elements used */
-static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
- struct page **pages,
- struct fuse_page_desc *page_descs,
- unsigned int num_pages,
- unsigned int total_len)
+/* Add folios to scatter-gather list and return number of elements used */
+static unsigned int sg_init_fuse_folios(struct scatterlist *sg,
+ struct folio **folios,
+ struct fuse_folio_desc *folio_descs,
+ unsigned int num_folios,
+ unsigned int total_len)
{
unsigned int i;
unsigned int this_len;
- for (i = 0; i < num_pages && total_len; i++) {
+ for (i = 0; i < num_folios && total_len; i++) {
sg_init_table(&sg[i], 1);
- this_len = min(page_descs[i].length, total_len);
- sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset);
+ this_len = min(folio_descs[i].length, total_len);
+ sg_set_folio(&sg[i], folios[i], this_len, folio_descs[i].offset);
total_len -= this_len;
}
@@ -1107,10 +1355,10 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg,
sg_init_one(&sg[total_sgs++], argbuf, len);
if (argpages)
- total_sgs += sg_init_fuse_pages(&sg[total_sgs],
- ap->pages, ap->descs,
- ap->num_pages,
- args[numargs - 1].size);
+ total_sgs += sg_init_fuse_folios(&sg[total_sgs],
+ ap->folios, ap->descs,
+ ap->num_folios,
+ args[numargs - 1].size);
if (len_used)
*len_used = len;
@@ -1120,7 +1368,8 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg,
/* Add a request to a virtqueue and kick the device */
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
- struct fuse_req *req, bool in_flight)
+ struct fuse_req *req, bool in_flight,
+ gfp_t gfp)
{
/* requests need at least 4 elements */
struct scatterlist *stack_sgs[6];
@@ -1133,7 +1382,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
unsigned int out_sgs = 0;
unsigned int in_sgs = 0;
unsigned int total_sgs;
- unsigned int i;
+ unsigned int i, hash;
int ret;
bool notify;
struct fuse_pqueue *fpq;
@@ -1141,8 +1390,8 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
/* Does the sglist fit on the stack? */
total_sgs = sg_count_fuse_req(req);
if (total_sgs > ARRAY_SIZE(stack_sgs)) {
- sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
- sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
+ sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), gfp);
+ sg = kmalloc_array(total_sgs, sizeof(sg[0]), gfp);
if (!sgs || !sg) {
ret = -ENOMEM;
goto out;
@@ -1150,7 +1399,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
}
/* Use a bounce buffer since stack args cannot be mapped */
- ret = copy_args_to_argbuf(req);
+ ret = copy_args_to_argbuf(req, gfp);
if (ret < 0)
goto out;
@@ -1193,8 +1442,9 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
/* Request successfully sent. */
fpq = &fsvq->fud->pq;
+ hash = fuse_req_hash(req->in.h.unique);
spin_lock(&fpq->lock);
- list_add_tail(&req->list, fpq->processing);
+ list_add_tail(&req->list, &fpq->processing[hash]);
spin_unlock(&fpq->lock);
set_bit(FR_SENT, &req->flags);
/* matches barrier in request_wait_answer() */
@@ -1222,33 +1472,30 @@ out:
return ret;
}
-static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
-__releases(fiq->lock)
+static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
{
- unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
+ unsigned int queue_id;
struct virtio_fs *fs;
- struct fuse_req *req;
struct virtio_fs_vq *fsvq;
int ret;
- WARN_ON(list_empty(&fiq->pending));
- req = list_last_entry(&fiq->pending, struct fuse_req, list);
+ fuse_request_assign_unique(fiq, req);
+
clear_bit(FR_PENDING, &req->flags);
- list_del_init(&req->list);
- WARN_ON(!list_empty(&fiq->pending));
- spin_unlock(&fiq->lock);
fs = fiq->priv;
+ queue_id = fs->mq_map[raw_smp_processor_id()];
- pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
- __func__, req->in.h.opcode, req->in.h.unique,
+ pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n",
+ __func__, req->in.h.opcode, req->in.h.unique,
req->in.h.nodeid, req->in.h.len,
- fuse_len_args(req->args->out_numargs, req->args->out_args));
+ fuse_len_args(req->args->out_numargs, req->args->out_args),
+ queue_id);
fsvq = &fs->vqs[queue_id];
- ret = virtio_fs_enqueue_req(fsvq, req, false);
+ ret = virtio_fs_enqueue_req(fsvq, req, false, GFP_ATOMIC);
if (ret < 0) {
- if (ret == -ENOMEM || ret == -ENOSPC) {
+ if (ret == -ENOSPC) {
/*
* Virtqueue full. Retry submission from worker
* context as we might be holding fc->bg_lock.
@@ -1256,8 +1503,6 @@ __releases(fiq->lock)
spin_lock(&fsvq->lock);
list_add_tail(&req->list, &fsvq->queued_reqs);
inc_in_flight_req(fsvq);
- schedule_delayed_work(&fsvq->dispatch_work,
- msecs_to_jiffies(1));
spin_unlock(&fsvq->lock);
return;
}
@@ -1267,17 +1512,17 @@ __releases(fiq->lock)
/* Can't end request in submission context. Use a worker */
spin_lock(&fsvq->lock);
list_add_tail(&req->list, &fsvq->end_reqs);
- schedule_delayed_work(&fsvq->dispatch_work, 0);
+ schedule_work(&fsvq->dispatch_work);
spin_unlock(&fsvq->lock);
return;
}
}
static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
- .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
- .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
- .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
- .release = virtio_fs_fiq_release,
+ .send_forget = virtio_fs_send_forget,
+ .send_interrupt = virtio_fs_send_interrupt,
+ .send_req = virtio_fs_send_req,
+ .release = virtio_fs_fiq_release,
};
static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
@@ -1326,8 +1571,8 @@ static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
/* virtiofs allocates and installs its own fuse devices */
ctx->fudptr = NULL;
- if (ctx->dax) {
- if (!fs->dax_dev) {
+ if (ctx->dax_mode != FUSE_DAX_NEVER) {
+ if (ctx->dax_mode == FUSE_DAX_ALWAYS && !fs->dax_dev) {
err = -EINVAL;
pr_err("virtio-fs: dax can't be enabled as filesystem"
" device does not support it.\n");
@@ -1421,6 +1666,9 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
unsigned int virtqueue_size;
int err = -EIO;
+ if (!fsc->source)
+ return invalf(fsc, "No source specified");
+
/* This gets a reference on virtio_fs object. This ptr gets installed
* in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
* to drop the reference to this object.
@@ -1449,6 +1697,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
fc->delete_stale = true;
fc->auto_submounts = true;
fc->sync_fs = true;
+ fc->use_pages_for_kvec_io = true;
/* Tell FUSE to split requests that exceed the virtqueue's size */
fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
@@ -1477,9 +1726,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
out_err:
kfree(fc);
- mutex_lock(&virtio_fs_mutex);
virtio_fs_put(fs);
- mutex_unlock(&virtio_fs_mutex);
return err;
}
@@ -1509,23 +1756,59 @@ static struct file_system_type virtio_fs_type = {
.name = "virtiofs",
.init_fs_context = virtio_fs_init_fs_context,
.kill_sb = virtio_kill_sb,
+ .fs_flags = FS_ALLOW_IDMAP,
};
+static int virtio_fs_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
+{
+ const struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj);
+
+ add_uevent_var(env, "TAG=%s", fs->tag);
+ return 0;
+}
+
+static const struct kset_uevent_ops virtio_fs_uevent_ops = {
+ .uevent = virtio_fs_uevent,
+};
+
+static int __init virtio_fs_sysfs_init(void)
+{
+ virtio_fs_kset = kset_create_and_add("virtiofs", &virtio_fs_uevent_ops,
+ fs_kobj);
+ if (!virtio_fs_kset)
+ return -ENOMEM;
+ return 0;
+}
+
+static void virtio_fs_sysfs_exit(void)
+{
+ kset_unregister(virtio_fs_kset);
+ virtio_fs_kset = NULL;
+}
+
static int __init virtio_fs_init(void)
{
int ret;
- ret = register_virtio_driver(&virtio_fs_driver);
+ ret = virtio_fs_sysfs_init();
if (ret < 0)
return ret;
+ ret = register_virtio_driver(&virtio_fs_driver);
+ if (ret < 0)
+ goto sysfs_exit;
+
ret = register_filesystem(&virtio_fs_type);
- if (ret < 0) {
- unregister_virtio_driver(&virtio_fs_driver);
- return ret;
- }
+ if (ret < 0)
+ goto unregister_virtio_driver;
return 0;
+
+unregister_virtio_driver:
+ unregister_virtio_driver(&virtio_fs_driver);
+sysfs_exit:
+ virtio_fs_sysfs_exit();
+ return ret;
}
module_init(virtio_fs_init);
@@ -1533,6 +1816,7 @@ static void __exit virtio_fs_exit(void)
{
unregister_filesystem(&virtio_fs_type);
unregister_virtio_driver(&virtio_fs_driver);
+ virtio_fs_sysfs_exit();
}
module_exit(virtio_fs_exit);
diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
index 0d3e7177fce0..93dfb06b6cea 100644
--- a/fs/fuse/xattr.c
+++ b/fs/fuse/xattr.c
@@ -81,7 +81,7 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
}
ret = fuse_simple_request(fm, &args);
if (!ret && !size)
- ret = min_t(ssize_t, outarg.size, XATTR_SIZE_MAX);
+ ret = min_t(size_t, outarg.size, XATTR_SIZE_MAX);
if (ret == -ENOSYS) {
fm->fc->no_getxattr = 1;
ret = -EOPNOTSUPP;
@@ -143,7 +143,7 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
}
ret = fuse_simple_request(fm, &args);
if (!ret && !size)
- ret = min_t(ssize_t, outarg.size, XATTR_LIST_MAX);
+ ret = min_t(size_t, outarg.size, XATTR_LIST_MAX);
if (ret > 0 && size)
ret = fuse_verify_xattr_list(list, ret);
if (ret == -ENOSYS) {
@@ -164,9 +164,10 @@ int fuse_removexattr(struct inode *inode, const char *name)
args.opcode = FUSE_REMOVEXATTR;
args.nodeid = get_node_id(inode);
- args.in_numargs = 1;
- args.in_args[0].size = strlen(name) + 1;
- args.in_args[0].value = name;
+ args.in_numargs = 2;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = strlen(name) + 1;
+ args.in_args[1].value = name;
err = fuse_simple_request(fm, &args);
if (err == -ENOSYS) {
fm->fc->no_removexattr = 1;
@@ -189,7 +190,7 @@ static int fuse_xattr_get(const struct xattr_handler *handler,
}
static int fuse_xattr_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value, size_t size,
int flags)
@@ -203,64 +204,13 @@ static int fuse_xattr_set(const struct xattr_handler *handler,
return fuse_setxattr(inode, name, value, size, flags, 0);
}
-static bool no_xattr_list(struct dentry *dentry)
-{
- return false;
-}
-
-static int no_xattr_get(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, void *value, size_t size)
-{
- return -EOPNOTSUPP;
-}
-
-static int no_xattr_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
- struct dentry *dentry, struct inode *nodee,
- const char *name, const void *value,
- size_t size, int flags)
-{
- return -EOPNOTSUPP;
-}
-
static const struct xattr_handler fuse_xattr_handler = {
.prefix = "",
.get = fuse_xattr_get,
.set = fuse_xattr_set,
};
-const struct xattr_handler *fuse_xattr_handlers[] = {
- &fuse_xattr_handler,
- NULL
-};
-
-const struct xattr_handler *fuse_acl_xattr_handlers[] = {
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
- &fuse_xattr_handler,
- NULL
-};
-
-static const struct xattr_handler fuse_no_acl_access_xattr_handler = {
- .name = XATTR_NAME_POSIX_ACL_ACCESS,
- .flags = ACL_TYPE_ACCESS,
- .list = no_xattr_list,
- .get = no_xattr_get,
- .set = no_xattr_set,
-};
-
-static const struct xattr_handler fuse_no_acl_default_xattr_handler = {
- .name = XATTR_NAME_POSIX_ACL_DEFAULT,
- .flags = ACL_TYPE_ACCESS,
- .list = no_xattr_list,
- .get = no_xattr_get,
- .set = no_xattr_set,
-};
-
-const struct xattr_handler *fuse_no_acl_xattr_handlers[] = {
- &fuse_no_acl_access_xattr_handler,
- &fuse_no_acl_default_xattr_handler,
+const struct xattr_handler * const fuse_xattr_handlers[] = {
&fuse_xattr_handler,
NULL
};
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 03c966840422..7bd231d16d4a 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config GFS2_FS
tristate "GFS2 file system support"
+ select BUFFER_HEAD
select FS_POSIX_ACL
select CRC32
- select LIBCRC32C
select QUOTACTL
select FS_IOMAP
help
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 734d1f05d823..443640e6fb9c 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -109,9 +109,10 @@ out:
return error;
}
-int gfs2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+int gfs2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
+ struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
bool need_unlock = false;
@@ -134,14 +135,14 @@ int gfs2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
mode = inode->i_mode;
if (type == ACL_TYPE_ACCESS && acl) {
- ret = posix_acl_update_mode(&init_user_ns, inode, &mode, &acl);
+ ret = posix_acl_update_mode(&nop_mnt_idmap, inode, &mode, &acl);
if (ret)
goto unlock;
}
ret = __gfs2_set_acl(inode, acl, type);
if (!ret && mode != inode->i_mode) {
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode->i_mode = mode;
mark_inode_dirty(inode);
}
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index cd180ca7c959..82f5b09c04e6 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -11,9 +11,9 @@
#define GFS2_ACL_MAX_ENTRIES(sdp) ((300 << (sdp)->sd_sb.sb_bsize_shift) >> 12)
-extern struct posix_acl *gfs2_get_acl(struct inode *inode, int type, bool rcu);
-extern int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
-extern int gfs2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
- struct posix_acl *acl, int type);
+struct posix_acl *gfs2_get_acl(struct inode *inode, int type, bool rcu);
+int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+int gfs2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct posix_acl *acl, int type);
#endif /* __ACL_DOT_H__ */
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 005e920f5d4a..e79ad087512a 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -37,27 +37,6 @@
#include "aops.h"
-void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
- unsigned int from, unsigned int len)
-{
- struct buffer_head *head = page_buffers(page);
- unsigned int bsize = head->b_size;
- struct buffer_head *bh;
- unsigned int to = from + len;
- unsigned int start, end;
-
- for (bh = head, start = 0; bh != head || !start;
- bh = bh->b_this_page, start = end) {
- end = start + bsize;
- if (end <= from)
- continue;
- if (start >= to)
- break;
- set_buffer_uptodate(bh);
- gfs2_trans_add_data(ip->i_gl, bh);
- }
-}
-
/**
* gfs2_get_block_noalloc - Fills in a buffer head with details about a block
* @inode: The inode
@@ -82,115 +61,90 @@ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
}
/**
- * gfs2_writepage - Write page for writeback mappings
- * @page: The page
- * @wbc: The writeback control
- */
-static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct inode *inode = page->mapping->host;
- struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct iomap_writepage_ctx wpc = { };
-
- if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
- goto out;
- if (current->journal_info)
- goto redirty;
- return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
-
-redirty:
- redirty_page_for_writepage(wbc, page);
-out:
- unlock_page(page);
- return 0;
-}
-
-/**
- * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
- * @page: The page to write
+ * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
+ * @folio: The folio to write
* @wbc: The writeback control
*
- * This is the same as calling block_write_full_page, but it also
+ * This is the same as calling block_write_full_folio, but it also
* writes pages outside of i_size
*/
-static int gfs2_write_jdata_page(struct page *page,
+static int gfs2_write_jdata_folio(struct folio *folio,
struct writeback_control *wbc)
{
- struct inode * const inode = page->mapping->host;
+ struct inode * const inode = folio->mapping->host;
loff_t i_size = i_size_read(inode);
- const pgoff_t end_index = i_size >> PAGE_SHIFT;
- unsigned offset;
/*
- * The page straddles i_size. It must be zeroed out on each and every
+ * The folio straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
- * the page size, the remaining memory is zeroed when mapped, and
+ * the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- offset = i_size & (PAGE_SIZE - 1);
- if (page->index == end_index && offset)
- zero_user_segment(page, offset, PAGE_SIZE);
+ if (folio_pos(folio) < i_size && i_size < folio_next_pos(folio))
+ folio_zero_segment(folio, offset_in_folio(folio, i_size),
+ folio_size(folio));
- return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
- end_buffer_async_write);
+ return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
+ wbc);
}
/**
- * __gfs2_jdata_writepage - The core of jdata writepage
- * @page: The page to write
+ * __gfs2_jdata_write_folio - The core of jdata writepage
+ * @folio: The folio to write
* @wbc: The writeback control
*
- * This is shared between writepage and writepages and implements the
- * core of the writepage operation. If a transaction is required then
- * PageChecked will have been set and the transaction will have
+ * Implements the core of write back. If a transaction is required then
+ * the checked flag will have been set and the transaction will have
* already been started before this is called.
*/
-
-static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
+static int __gfs2_jdata_write_folio(struct folio *folio,
+ struct writeback_control *wbc)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_sbd *sdp = GFS2_SB(inode);
- if (PageChecked(page)) {
- ClearPageChecked(page);
- if (!page_has_buffers(page)) {
- create_empty_buffers(page, inode->i_sb->s_blocksize,
- BIT(BH_Dirty)|BIT(BH_Uptodate));
+ if (folio_test_checked(folio)) {
+ folio_clear_checked(folio);
+ if (!folio_buffers(folio)) {
+ create_empty_buffers(folio,
+ inode->i_sb->s_blocksize,
+ BIT(BH_Dirty)|BIT(BH_Uptodate));
}
- gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
+ gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio));
}
- return gfs2_write_jdata_page(page, wbc);
+ return gfs2_write_jdata_folio(folio, wbc);
}
/**
- * gfs2_jdata_writepage - Write complete page
- * @page: Page to write
+ * gfs2_jdata_writeback - Write jdata folios to the log
+ * @mapping: The mapping to write
* @wbc: The writeback control
*
* Returns: errno
- *
*/
-
-static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
+int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
+ struct folio *folio = NULL;
+ int error;
- if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
- goto out;
- if (PageChecked(page) || current->journal_info)
- goto out_ignore;
- return __gfs2_jdata_writepage(page, wbc);
+ BUG_ON(current->journal_info);
+ if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
+ return 0;
-out_ignore:
- redirty_page_for_writepage(wbc, page);
-out:
- unlock_page(page);
- return 0;
+ while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
+ if (folio_test_checked(folio)) {
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
+ continue;
+ }
+ error = __gfs2_jdata_write_folio(folio, wbc);
+ }
+
+ return error;
}
/**
@@ -204,99 +158,99 @@ static int gfs2_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
- struct iomap_writepage_ctx wpc = { };
+ struct iomap_writepage_ctx wpc = {
+ .inode = mapping->host,
+ .wbc = wbc,
+ .ops = &gfs2_writeback_ops,
+ };
int ret;
/*
- * Even if we didn't write any pages here, we might still be holding
+ * Even if we didn't write enough pages here, we might still be holding
* dirty pages in the ail. We forcibly flush the ail because we don't
* want balance_dirty_pages() to loop indefinitely trying to write out
* pages held in the ail that it can't find.
*/
- ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
- if (ret == 0)
+ ret = iomap_writepages(&wpc);
+ if (ret == 0 && wbc->nr_to_write > 0)
set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
return ret;
}
/**
- * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
+ * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
* @mapping: The mapping
* @wbc: The writeback control
- * @pvec: The vector of pages
- * @nr_pages: The number of pages to write
+ * @fbatch: The batch of folios
* @done_index: Page index
*
* Returns: non-zero if loop should terminate, zero otherwise
*/
-static int gfs2_write_jdata_pagevec(struct address_space *mapping,
+static int gfs2_write_jdata_batch(struct address_space *mapping,
struct writeback_control *wbc,
- struct pagevec *pvec,
- int nr_pages,
+ struct folio_batch *fbatch,
pgoff_t *done_index)
{
struct inode *inode = mapping->host;
struct gfs2_sbd *sdp = GFS2_SB(inode);
- unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
+ unsigned nrblocks;
int i;
int ret;
+ size_t size = 0;
+ int nr_folios = folio_batch_count(fbatch);
+
+ for (i = 0; i < nr_folios; i++)
+ size += folio_size(fbatch->folios[i]);
+ nrblocks = size >> inode->i_blkbits;
ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
if (ret < 0)
return ret;
- for(i = 0; i < nr_pages; i++) {
- struct page *page = pvec->pages[i];
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch->folios[i];
- *done_index = page->index;
+ *done_index = folio->index;
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != mapping)) {
+ if (unlikely(folio->mapping != mapping)) {
continue_unlock:
- unlock_page(page);
+ folio_unlock(folio);
continue;
}
- if (!PageDirty(page)) {
+ if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
- if (PageWriteback(page)) {
+ if (folio_test_writeback(folio)) {
if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
else
goto continue_unlock;
}
- BUG_ON(PageWriteback(page));
- if (!clear_page_dirty_for_io(page))
+ BUG_ON(folio_test_writeback(folio));
+ if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
trace_wbc_writepage(wbc, inode_to_bdi(inode));
- ret = __gfs2_jdata_writepage(page, wbc);
+ ret = __gfs2_jdata_write_folio(folio, wbc);
if (unlikely(ret)) {
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
- unlock_page(page);
- ret = 0;
- } else {
-
- /*
- * done_index is set past this page,
- * so media errors will not choke
- * background writeout for the entire
- * file. This has consequences for
- * range_cyclic semantics (ie. it may
- * not be suitable for data integrity
- * writeout).
- */
- *done_index = page->index + 1;
- ret = 1;
- break;
- }
+ /*
+ * done_index is set past this page, so media errors
+ * will not choke background writeout for the entire
+ * file. This has consequences for range_cyclic
+ * semantics (ie. it may not be suitable for data
+ * integrity writeout).
+ */
+ *done_index = folio_next_index(folio);
+ ret = 1;
+ break;
}
/*
@@ -330,8 +284,8 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
{
int ret = 0;
int done = 0;
- struct pagevec pvec;
- int nr_pages;
+ struct folio_batch fbatch;
+ int nr_folios;
pgoff_t writeback_index;
pgoff_t index;
pgoff_t end;
@@ -340,7 +294,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
int range_whole = 0;
xa_mark_t tag;
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
@@ -356,27 +310,25 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
}
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag = PAGECACHE_TAG_TOWRITE;
- else
- tag = PAGECACHE_TAG_DIRTY;
+ tag = wbc_to_tag(wbc);
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && (index <= end)) {
- nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
- tag);
- if (nr_pages == 0)
+ nr_folios = filemap_get_folios_tag(mapping, &index, end,
+ tag, &fbatch);
+ if (nr_folios == 0)
break;
- ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
+ ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
+ &done_index);
if (ret)
done = 1;
if (ret > 0)
ret = 0;
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
}
@@ -423,83 +375,69 @@ static int gfs2_jdata_writepages(struct address_space *mapping,
}
/**
- * stuffed_readpage - Fill in a Linux page with stuffed file data
+ * stuffed_read_folio - Fill in a Linux folio with stuffed file data
* @ip: the inode
- * @page: the page
+ * @folio: the folio
*
* Returns: errno
*/
-static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
+static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio)
{
- struct buffer_head *dibh;
- u64 dsize = i_size_read(&ip->i_inode);
- void *kaddr;
- int error;
+ struct buffer_head *dibh = NULL;
+ size_t dsize = i_size_read(&ip->i_inode);
+ void *from = NULL;
+ int error = 0;
/*
* Due to the order of unstuffing files and ->fault(), we can be
- * asked for a zero page in the case of a stuffed file being extended,
+ * asked for a zero folio in the case of a stuffed file being extended,
* so we need to supply one here. It doesn't happen often.
*/
- if (unlikely(page->index)) {
- zero_user(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
- return 0;
+ if (unlikely(folio->index)) {
+ dsize = 0;
+ } else {
+ error = gfs2_meta_inode_buffer(ip, &dibh);
+ if (error)
+ goto out;
+ from = dibh->b_data + sizeof(struct gfs2_dinode);
}
- error = gfs2_meta_inode_buffer(ip, &dibh);
- if (error)
- return error;
-
- kaddr = kmap_atomic(page);
- if (dsize > gfs2_max_stuffed_size(ip))
- dsize = gfs2_max_stuffed_size(ip);
- memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
- memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
- kunmap_atomic(kaddr);
- flush_dcache_page(page);
+ folio_fill_tail(folio, 0, from, dsize);
brelse(dibh);
- SetPageUptodate(page);
+out:
+ folio_end_read(folio, error == 0);
- return 0;
+ return error;
}
-
-static int __gfs2_readpage(void *file, struct page *page)
+/**
+ * gfs2_read_folio - read a folio from a file
+ * @file: The file to read
+ * @folio: The folio in the file
+ */
+static int gfs2_read_folio(struct file *file, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- int error;
+ int error = 0;
if (!gfs2_is_jdata(ip) ||
- (i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
- error = iomap_readpage(page, &gfs2_iomap_ops);
+ (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
+ iomap_bio_read_folio(folio, &gfs2_iomap_ops);
} else if (gfs2_is_stuffed(ip)) {
- error = stuffed_readpage(ip, page);
- unlock_page(page);
+ error = stuffed_read_folio(ip, folio);
} else {
- error = mpage_readpage(page, gfs2_block_map);
+ error = mpage_read_folio(folio, gfs2_block_map);
}
- if (unlikely(gfs2_withdrawn(sdp)))
+ if (gfs2_withdrawn(sdp))
return -EIO;
return error;
}
/**
- * gfs2_readpage - read a page of a file
- * @file: The file to read
- * @page: The page of the file
- */
-
-static int gfs2_readpage(struct file *file, struct page *page)
-{
- return __gfs2_readpage(file, page);
-}
-
-/**
* gfs2_internal_read - read an internal file
* @ip: The gfs2 inode
* @buf: The buffer to fill
@@ -508,31 +446,29 @@ static int gfs2_readpage(struct file *file, struct page *page)
*
*/
-int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
- unsigned size)
+ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
+ size_t size)
{
struct address_space *mapping = ip->i_inode.i_mapping;
unsigned long index = *pos >> PAGE_SHIFT;
- unsigned offset = *pos & (PAGE_SIZE - 1);
- unsigned copied = 0;
- unsigned amt;
- struct page *page;
- void *p;
+ size_t copied = 0;
do {
- amt = size - copied;
- if (offset + size > PAGE_SIZE)
- amt = PAGE_SIZE - offset;
- page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
- if (IS_ERR(page))
- return PTR_ERR(page);
- p = kmap_atomic(page);
- memcpy(buf + copied, p + offset, amt);
- kunmap_atomic(p);
- put_page(page);
- copied += amt;
- index++;
- offset = 0;
+ size_t offset, chunk;
+ struct folio *folio;
+
+ folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
+ if (IS_ERR(folio)) {
+ if (PTR_ERR(folio) == -EINTR)
+ continue;
+ return PTR_ERR(folio);
+ }
+ offset = *pos + copied - folio_pos(folio);
+ chunk = min(size - copied, folio_size(folio) - offset);
+ memcpy_from_folio(buf + copied, folio, offset, chunk);
+ index = folio_next_index(folio);
+ folio_put(folio);
+ copied += chunk;
} while(copied < size);
(*pos) += size;
return size;
@@ -563,7 +499,7 @@ static void gfs2_readahead(struct readahead_control *rac)
else if (gfs2_is_jdata(ip))
mpage_readahead(rac, gfs2_block_map);
else
- iomap_readahead(rac, &gfs2_iomap_ops);
+ iomap_bio_readahead(rac, &gfs2_iomap_ops);
}
/**
@@ -606,18 +542,12 @@ out:
gfs2_trans_end(sdp);
}
-/**
- * jdata_set_page_dirty - Page dirtying function
- * @page: The page to dirty
- *
- * Returns: 1 if it dirtyed the page, or 0 otherwise
- */
-
-static int jdata_set_page_dirty(struct page *page)
+static bool gfs2_jdata_dirty_folio(struct address_space *mapping,
+ struct folio *folio)
{
if (current->journal_info)
- SetPageChecked(page);
- return __set_page_dirty_buffers(page);
+ folio_set_checked(folio);
+ return block_dirty_folio(mapping, folio);
}
/**
@@ -672,22 +602,23 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
unlock_buffer(bh);
}
-static void gfs2_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
+ size_t length)
{
- struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
- unsigned int stop = offset + length;
- int partial_page = (offset || length < PAGE_SIZE);
+ struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
+ size_t stop = offset + length;
+ int partial_page = (offset || length < folio_size(folio));
struct buffer_head *bh, *head;
unsigned long pos = 0;
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
if (!partial_page)
- ClearPageChecked(page);
- if (!page_has_buffers(page))
+ folio_clear_checked(folio);
+ head = folio_buffers(folio);
+ if (!head)
goto out;
- bh = head = page_buffers(page);
+ bh = head;
do {
if (pos + bh->b_size > stop)
return;
@@ -699,42 +630,44 @@ static void gfs2_invalidatepage(struct page *page, unsigned int offset,
} while (bh != head);
out:
if (!partial_page)
- try_to_release_page(page, 0);
+ filemap_release_folio(folio, 0);
}
/**
- * gfs2_releasepage - free the metadata associated with a page
- * @page: the page that's being released
+ * gfs2_release_folio - free the metadata associated with a folio
+ * @folio: the folio that's being released
* @gfp_mask: passed from Linux VFS, ignored by us
*
- * Calls try_to_free_buffers() to free the buffers and put the page if the
+ * Calls try_to_free_buffers() to free the buffers and put the folio if the
* buffers can be released.
*
- * Returns: 1 if the page was put or else 0
+ * Returns: true if the folio was put or else false
*/
-int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
+bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct buffer_head *bh, *head;
struct gfs2_bufdata *bd;
- if (!page_has_buffers(page))
- return 0;
+ head = folio_buffers(folio);
+ if (!head)
+ return false;
/*
- * From xfs_vm_releasepage: mm accommodates an old ext3 case where
- * clean pages might not have had the dirty bit cleared. Thus, it can
- * send actual dirty pages to ->releasepage() via shrink_active_list().
+ * mm accommodates an old ext3 case where clean folios might
+ * not have had the dirty bit cleared. Thus, it can send actual
+ * dirty folios to ->release_folio() via shrink_active_list().
*
- * As a workaround, we skip pages that contain dirty buffers below.
- * Once ->releasepage isn't called on dirty pages anymore, we can warn
- * on dirty buffers like we used to here again.
+ * As a workaround, we skip folios that contain dirty buffers
+ * below. Once ->release_folio isn't called on dirty folios
+ * anymore, we can warn on dirty buffers like we used to here
+ * again.
*/
gfs2_log_lock(sdp);
- head = bh = page_buffers(page);
+ bh = head;
do {
if (atomic_read(&bh->b_count))
goto cannot_release;
@@ -744,9 +677,9 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
goto cannot_release;
bh = bh->b_this_page;
- } while(bh != head);
+ } while (bh != head);
- head = bh = page_buffers(page);
+ bh = head;
do {
bd = bh->b_private;
if (bd) {
@@ -767,39 +700,37 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
} while (bh != head);
gfs2_log_unlock(sdp);
- return try_to_free_buffers(page);
+ return try_to_free_buffers(folio);
cannot_release:
gfs2_log_unlock(sdp);
- return 0;
+ return false;
}
static const struct address_space_operations gfs2_aops = {
- .writepage = gfs2_writepage,
.writepages = gfs2_writepages,
- .readpage = gfs2_readpage,
+ .read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
- .set_page_dirty = __set_page_dirty_nobuffers,
- .releasepage = iomap_releasepage,
- .invalidatepage = iomap_invalidatepage,
+ .dirty_folio = iomap_dirty_folio,
+ .release_folio = iomap_release_folio,
+ .invalidate_folio = iomap_invalidate_folio,
.bmap = gfs2_bmap,
- .direct_IO = noop_direct_IO,
- .migratepage = iomap_migrate_page,
+ .migrate_folio = filemap_migrate_folio,
.is_partially_uptodate = iomap_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
};
static const struct address_space_operations gfs2_jdata_aops = {
- .writepage = gfs2_jdata_writepage,
.writepages = gfs2_jdata_writepages,
- .readpage = gfs2_readpage,
+ .read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
- .set_page_dirty = jdata_set_page_dirty,
+ .dirty_folio = gfs2_jdata_dirty_folio,
.bmap = gfs2_bmap,
- .invalidatepage = gfs2_invalidatepage,
- .releasepage = gfs2_releasepage,
+ .migrate_folio = buffer_migrate_folio,
+ .invalidate_folio = gfs2_invalidate_folio,
+ .release_folio = gfs2_release_folio,
.is_partially_uptodate = block_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
};
void gfs2_set_aops(struct inode *inode)
diff --git a/fs/gfs2/aops.h b/fs/gfs2/aops.h
index ff9877a68780..bf002522a782 100644
--- a/fs/gfs2/aops.h
+++ b/fs/gfs2/aops.h
@@ -8,8 +8,7 @@
#include "incore.h"
-extern void adjust_fs_space(struct inode *inode);
-extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
- unsigned int from, unsigned int len);
+void adjust_fs_space(struct inode *inode);
+int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc);
#endif /* __AOPS_DOT_H__ */
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 7235d539e969..131091520de6 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -43,56 +43,51 @@ struct metapath {
static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
/**
- * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
+ * gfs2_unstuffer_folio - unstuff a stuffed inode into a block cached by a folio
* @ip: the inode
* @dibh: the dinode buffer
* @block: the block number that was allocated
- * @page: The (optional) page. This is looked up if @page is NULL
+ * @folio: The folio.
*
* Returns: errno
*/
-
-static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
- u64 block, struct page *page)
+static int gfs2_unstuffer_folio(struct gfs2_inode *ip, struct buffer_head *dibh,
+ u64 block, struct folio *folio)
{
struct inode *inode = &ip->i_inode;
- if (!PageUptodate(page)) {
- void *kaddr = kmap(page);
+ if (!folio_test_uptodate(folio)) {
+ void *kaddr = kmap_local_folio(folio, 0);
u64 dsize = i_size_read(inode);
- if (dsize > gfs2_max_stuffed_size(ip))
- dsize = gfs2_max_stuffed_size(ip);
-
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
- memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
- kunmap(page);
+ memset(kaddr + dsize, 0, folio_size(folio) - dsize);
+ kunmap_local(kaddr);
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
if (gfs2_is_jdata(ip)) {
- struct buffer_head *bh;
+ struct buffer_head *bh = folio_buffers(folio);
- if (!page_has_buffers(page))
- create_empty_buffers(page, BIT(inode->i_blkbits),
- BIT(BH_Uptodate));
+ if (!bh)
+ bh = create_empty_buffers(folio,
+ BIT(inode->i_blkbits), BIT(BH_Uptodate));
- bh = page_buffers(page);
if (!buffer_mapped(bh))
map_bh(bh, inode->i_sb, block);
set_buffer_uptodate(bh);
gfs2_trans_add_data(ip->i_gl, bh);
} else {
- set_page_dirty(page);
+ folio_mark_dirty(folio);
gfs2_ordered_add_inode(ip);
}
return 0;
}
-static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct page *page)
+static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct folio *folio)
{
struct buffer_head *bh, *dibh;
struct gfs2_dinode *di;
@@ -109,7 +104,7 @@ static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct page *page)
and write it out to disk */
unsigned int n = 1;
- error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
+ error = gfs2_alloc_blocks(ip, &block, &n, 0);
if (error)
goto out_brelse;
if (isdir) {
@@ -121,7 +116,7 @@ static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct page *page)
dibh, sizeof(struct gfs2_dinode));
brelse(bh);
} else {
- error = gfs2_unstuffer_page(ip, dibh, block, page);
+ error = gfs2_unstuffer_folio(ip, dibh, block, folio);
if (error)
goto out_brelse;
}
@@ -160,17 +155,17 @@ out_brelse:
int gfs2_unstuff_dinode(struct gfs2_inode *ip)
{
struct inode *inode = &ip->i_inode;
- struct page *page;
+ struct folio *folio;
int error;
down_write(&ip->i_rw_mutex);
- page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
- error = -ENOMEM;
- if (!page)
+ folio = filemap_grab_folio(inode->i_mapping, 0);
+ error = PTR_ERR(folio);
+ if (IS_ERR(folio))
goto out;
- error = __gfs2_unstuff_inode(ip, page);
- unlock_page(page);
- put_page(page);
+ error = __gfs2_unstuff_inode(ip, folio);
+ folio_unlock(folio);
+ folio_put(folio);
out:
up_write(&ip->i_rw_mutex);
return error;
@@ -310,9 +305,8 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
if (trylock_buffer(rabh)) {
if (!buffer_uptodate(rabh)) {
rabh->b_end_io = end_buffer_read_sync;
- submit_bh(REQ_OP_READ,
- REQ_RAHEAD | REQ_META | REQ_PRIO,
- rabh);
+ submit_bh(REQ_OP_READ | REQ_RAHEAD | REQ_META |
+ REQ_PRIO, rabh);
continue;
}
unlock_buffer(rabh);
@@ -321,6 +315,12 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
}
}
+static inline struct buffer_head *
+metapath_dibh(struct metapath *mp)
+{
+ return mp->mp_bh[0];
+}
+
static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
unsigned int x, unsigned int h)
{
@@ -419,13 +419,12 @@ static void release_metapath(struct metapath *mp)
* gfs2_extent_length - Returns length of an extent of blocks
* @bh: The metadata block
* @ptr: Current position in @bh
- * @limit: Max extent length to return
* @eob: Set to 1 if we hit "end of block"
*
* Returns: The length of the extent (minimum of one block)
*/
-static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
+static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, int *eob)
{
const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
const __be64 *first = ptr;
@@ -606,9 +605,9 @@ out:
return ret;
}
-static inline __be64 *gfs2_indirect_init(struct metapath *mp,
- struct gfs2_glock *gl, unsigned int i,
- unsigned offset, u64 bn)
+static inline void gfs2_indirect_init(struct metapath *mp,
+ struct gfs2_glock *gl, unsigned int i,
+ unsigned offset, u64 bn)
{
__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
((i > 1) ? sizeof(struct gfs2_meta_header) :
@@ -621,7 +620,6 @@ static inline __be64 *gfs2_indirect_init(struct metapath *mp,
gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
ptr += offset;
*ptr = cpu_to_be64(bn);
- return ptr;
}
enum alloc_state {
@@ -665,7 +663,7 @@ static int __gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct buffer_head *dibh = mp->mp_bh[0];
+ struct buffer_head *dibh = metapath_dibh(mp);
u64 bn;
unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
size_t dblks = iomap->length >> inode->i_blkbits;
@@ -707,7 +705,7 @@ static int __gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
i = mp->mp_aheight;
do {
n = blks - alloced;
- ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
+ ret = gfs2_alloc_blocks(ip, &bn, &n, 0);
if (ret)
goto out;
alloced += n;
@@ -918,7 +916,7 @@ unstuff:
goto do_alloc;
bh = mp->mp_bh[ip->i_height - 1];
- len = gfs2_extent_length(bh, ptr, len, &eob);
+ len = gfs2_extent_length(bh, ptr, &eob);
iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
iomap->length = len << inode->i_blkbits;
@@ -940,7 +938,7 @@ do_alloc:
else if (height == ip->i_height)
ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
else
- iomap->length = size - pos;
+ iomap->length = size - iomap->offset;
} else if (flags & IOMAP_WRITE) {
u64 alloc_size;
@@ -961,36 +959,56 @@ hole_found:
goto out;
}
-static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
- unsigned len)
+static struct folio *
+gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len)
{
+ struct inode *inode = iter->inode;
+ struct gfs2_inode *ip = GFS2_I(inode);
unsigned int blockmask = i_blocksize(inode) - 1;
struct gfs2_sbd *sdp = GFS2_SB(inode);
unsigned int blocks;
+ struct folio *folio;
+ int status;
+
+ if (!gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip))
+ return iomap_get_folio(iter, pos, len);
blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
- return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
+ status = gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
+ if (status)
+ return ERR_PTR(status);
+
+ folio = iomap_get_folio(iter, pos, len);
+ if (IS_ERR(folio))
+ gfs2_trans_end(sdp);
+ return folio;
}
-static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
- unsigned copied, struct page *page)
+static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos,
+ unsigned copied, struct folio *folio)
{
struct gfs2_trans *tr = current->journal_info;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- if (page && !gfs2_is_stuffed(ip))
- gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
+ if (gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip))
+ gfs2_trans_add_databufs(ip->i_gl, folio,
+ offset_in_folio(folio, pos),
+ copied);
- if (tr->tr_num_buf_new)
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ folio_unlock(folio);
+ folio_put(folio);
- gfs2_trans_end(sdp);
+ if (gfs2_is_jdata(ip) || gfs2_is_stuffed(ip)) {
+ if (tr->tr_num_buf_new)
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ gfs2_trans_end(sdp);
+ }
}
-static const struct iomap_page_ops gfs2_iomap_page_ops = {
- .page_prepare = gfs2_iomap_page_prepare,
- .page_done = gfs2_iomap_page_done,
+const struct iomap_write_ops gfs2_iomap_write_ops = {
+ .get_folio = gfs2_iomap_get_folio,
+ .put_folio = gfs2_iomap_put_folio,
};
static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
@@ -1065,8 +1083,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
gfs2_trans_end(sdp);
}
- if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
- iomap->page_ops = &gfs2_iomap_page_ops;
return 0;
out_trans_end:
@@ -1154,13 +1170,12 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
if (length != written && (iomap->flags & IOMAP_F_NEW)) {
/* Deallocate blocks that were just allocated. */
- loff_t blockmask = i_blocksize(inode) - 1;
- loff_t end = (pos + length) & ~blockmask;
+ loff_t hstart = round_up(pos + written, i_blocksize(inode));
+ loff_t hend = iomap->offset + iomap->length;
- pos = (pos + written + blockmask) & ~blockmask;
- if (pos < end) {
- truncate_pagecache_range(inode, pos, end - 1);
- punch_hole(ip, pos, end - pos);
+ if (hstart < hend) {
+ truncate_pagecache_range(inode, hstart, hend - 1);
+ punch_hole(ip, hstart, hend - hstart);
}
}
@@ -1283,13 +1298,16 @@ int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
/*
* NOTE: Never call gfs2_block_zero_range with an open transaction because it
* uses iomap write to perform its actions, which begin their own transactions
- * (iomap_begin, page_prepare, etc.)
+ * (iomap_begin, get_folio, etc.)
*/
-static int gfs2_block_zero_range(struct inode *inode, loff_t from,
- unsigned int length)
+static int gfs2_block_zero_range(struct inode *inode, loff_t from, loff_t length)
{
BUG_ON(current->journal_info);
- return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
+ if (from >= inode->i_size)
+ return 0;
+ length = min(length, inode->i_size - from);
+ return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops,
+ &gfs2_iomap_write_ops, NULL);
}
#define GFS2_JTRUNC_REVOKES 8192
@@ -1378,7 +1396,7 @@ static int trunc_start(struct inode *inode, u64 newsize)
ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
i_size_write(inode, newsize);
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_dinode_out(ip, dibh->b_data);
if (journaled)
@@ -1575,8 +1593,7 @@ out_unlock:
/* Every transaction boundary, we rewrite the dinode
to keep its di_blocks current in case of failure. */
- ip->i_inode.i_mtime = ip->i_inode.i_ctime =
- current_time(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
@@ -1708,7 +1725,8 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
struct buffer_head *dibh, *bh;
struct gfs2_holder rd_gh;
unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
- u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
+ unsigned int bsize = 1 << bsize_shift;
+ u64 lblock = (offset + bsize - 1) >> bsize_shift;
__u16 start_list[GFS2_MAX_META_HEIGHT];
__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
unsigned int start_aligned, end_aligned;
@@ -1719,10 +1737,10 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
u64 prev_bnr = 0;
__be64 *start, *end;
- if (offset >= maxsize) {
+ if (offset + bsize - 1 >= maxsize) {
/*
- * The starting point lies beyond the allocated meta-data;
- * there are no blocks do deallocate.
+ * The starting point lies beyond the allocated metadata;
+ * there are no blocks to deallocate.
*/
return 0;
}
@@ -1816,7 +1834,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
gfs2_assert_withdraw(sdp, bh);
if (gfs2_assert_withdraw(sdp,
prev_bnr != bh->b_blocknr)) {
- fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
+ fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u, "
"s_h:%u, mp_h:%u\n",
(unsigned long long)ip->i_no_addr,
prev_bnr, ip->i_height, strip_h, mp_h);
@@ -1942,7 +1960,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
gfs2_statfs_change(sdp, 0, +btotal, 0);
gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
ip->i_inode.i_gid);
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
up_write(&ip->i_rw_mutex);
@@ -1985,7 +2003,7 @@ static int trunc_end(struct gfs2_inode *ip)
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
gfs2_ordered_del_inode(ip);
}
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
gfs2_trans_add_meta(ip->i_gl, dibh);
@@ -2027,14 +2045,6 @@ static int do_shrink(struct inode *inode, u64 newsize)
return error;
}
-void gfs2_trim_blocks(struct inode *inode)
-{
- int ret;
-
- ret = do_shrink(inode, inode->i_size);
- WARN_ON(ret != 0);
-}
-
/**
* do_grow - Touch and update inode size
* @inode: The inode
@@ -2094,7 +2104,7 @@ static int do_grow(struct inode *inode, u64 size)
goto do_end_trans;
truncate_setsize(inode, size);
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
@@ -2146,7 +2156,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
ret = do_shrink(inode, newsize);
out:
- gfs2_rs_delete(ip, NULL);
+ gfs2_rs_delete(ip);
gfs2_qa_put(ip);
return ret;
}
@@ -2462,23 +2472,26 @@ out:
return error;
}
-static int gfs2_map_blocks(struct iomap_writepage_ctx *wpc, struct inode *inode,
- loff_t offset)
+static ssize_t gfs2_writeback_range(struct iomap_writepage_ctx *wpc,
+ struct folio *folio, u64 offset, unsigned int len, u64 end_pos)
{
- int ret;
-
- if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(inode))))
+ if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(wpc->inode))))
return -EIO;
- if (offset >= wpc->iomap.offset &&
- offset < wpc->iomap.offset + wpc->iomap.length)
- return 0;
+ if (offset < wpc->iomap.offset ||
+ offset >= wpc->iomap.offset + wpc->iomap.length) {
+ int ret;
- memset(&wpc->iomap, 0, sizeof(wpc->iomap));
- ret = gfs2_iomap_get(inode, offset, INT_MAX, &wpc->iomap);
- return ret;
+ memset(&wpc->iomap, 0, sizeof(wpc->iomap));
+ ret = gfs2_iomap_get(wpc->inode, offset, INT_MAX, &wpc->iomap);
+ if (ret)
+ return ret;
+ }
+
+ return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
}
const struct iomap_writeback_ops gfs2_writeback_ops = {
- .map_blocks = gfs2_map_blocks,
+ .writeback_range = gfs2_writeback_range,
+ .writeback_submit = iomap_ioend_writeback_submit,
};
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
index 53cce6c08e81..6cdc72dd55a3 100644
--- a/fs/gfs2/bmap.h
+++ b/fs/gfs2/bmap.h
@@ -44,27 +44,27 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
}
extern const struct iomap_ops gfs2_iomap_ops;
+extern const struct iomap_write_ops gfs2_iomap_write_ops;
extern const struct iomap_writeback_ops gfs2_writeback_ops;
-extern int gfs2_unstuff_dinode(struct gfs2_inode *ip);
-extern int gfs2_block_map(struct inode *inode, sector_t lblock,
- struct buffer_head *bh, int create);
-extern int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
- struct iomap *iomap);
-extern int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length,
- struct iomap *iomap);
-extern int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock,
- unsigned int *extlen);
-extern int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
- unsigned *extlen, bool *new);
-extern int gfs2_setattr_size(struct inode *inode, u64 size);
-extern void gfs2_trim_blocks(struct inode *inode);
-extern int gfs2_truncatei_resume(struct gfs2_inode *ip);
-extern int gfs2_file_dealloc(struct gfs2_inode *ip);
-extern int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
- unsigned int len);
-extern int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd);
-extern void gfs2_free_journal_extents(struct gfs2_jdesc *jd);
-extern int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length);
+int gfs2_unstuff_dinode(struct gfs2_inode *ip);
+int gfs2_block_map(struct inode *inode, sector_t lblock,
+ struct buffer_head *bh, int create);
+int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
+ struct iomap *iomap);
+int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length,
+ struct iomap *iomap);
+int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock,
+ unsigned int *extlen);
+int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
+ unsigned *extlen, bool *new);
+int gfs2_setattr_size(struct inode *inode, u64 size);
+int gfs2_truncatei_resume(struct gfs2_inode *ip);
+int gfs2_file_dealloc(struct gfs2_inode *ip);
+int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
+ unsigned int len);
+int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd);
+void gfs2_free_journal_extents(struct gfs2_jdesc *jd);
+int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length);
#endif /* __BMAP_DOT_H__ */
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 2e215e8c3c88..95050e719233 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -21,7 +21,9 @@
/**
* gfs2_drevalidate - Check directory lookup consistency
- * @dentry: the mapping to check
+ * @dir: expected parent directory inode
+ * @name: expexted name
+ * @dentry: dentry to check
* @flags: lookup flags
*
* Check to make sure the lookup necessary to arrive at this inode from its
@@ -30,50 +32,43 @@
* Returns: 1 if the dentry is ok, 0 if it isn't
*/
-static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags)
+static int gfs2_drevalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- struct dentry *parent;
- struct gfs2_sbd *sdp;
- struct gfs2_inode *dip;
+ struct gfs2_sbd *sdp = GFS2_SB(dir);
+ struct gfs2_inode *dip = GFS2_I(dir);
struct inode *inode;
struct gfs2_holder d_gh;
struct gfs2_inode *ip = NULL;
- int error, valid = 0;
+ int error, valid;
int had_lock = 0;
if (flags & LOOKUP_RCU)
return -ECHILD;
- parent = dget_parent(dentry);
- sdp = GFS2_SB(d_inode(parent));
- dip = GFS2_I(d_inode(parent));
inode = d_inode(dentry);
if (inode) {
if (is_bad_inode(inode))
- goto out;
+ return 0;
ip = GFS2_I(inode);
}
- if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) {
- valid = 1;
- goto out;
- }
+ if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
+ return 1;
had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
if (!had_lock) {
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
if (error)
- goto out;
+ return 0;
}
- error = gfs2_dir_check(d_inode(parent), &dentry->d_name, ip);
+ error = gfs2_dir_check(dir, name, ip);
valid = inode ? !error : (error == -ENOENT);
if (!had_lock)
gfs2_glock_dq_uninit(&d_gh);
-out:
- dput(parent);
return valid;
}
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 42b7dfffb5e7..509e2f0d97e7 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -60,6 +60,7 @@
#include <linux/crc32.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
+#include <linux/log2.h>
#include "gfs2.h"
#include "incore.h"
@@ -130,7 +131,7 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
if (ip->i_inode.i_size < offset + size)
i_size_write(&ip->i_inode, offset + size);
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
@@ -227,7 +228,7 @@ out:
if (ip->i_inode.i_size < offset + copied)
i_size_write(&ip->i_inode, offset + copied);
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
@@ -562,15 +563,18 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
int ret = 0;
ret = gfs2_dirent_offset(GFS2_SB(inode), buf);
- if (ret < 0)
- goto consist_inode;
-
+ if (ret < 0) {
+ gfs2_consist_inode(GFS2_I(inode));
+ return ERR_PTR(-EIO);
+ }
offset = ret;
prev = NULL;
dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len);
- if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1))
- goto consist_inode;
+ if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1)) {
+ gfs2_consist_inode(GFS2_I(inode));
+ return ERR_PTR(-EIO);
+ }
do {
ret = scan(dent, name, opaque);
if (ret)
@@ -582,8 +586,10 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len);
if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size,
- len, 0))
- goto consist_inode;
+ len, 0)) {
+ gfs2_consist_inode(GFS2_I(inode));
+ return ERR_PTR(-EIO);
+ }
} while(1);
switch(ret) {
@@ -597,10 +603,6 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
BUG_ON(ret > 0);
return ERR_PTR(ret);
}
-
-consist_inode:
- gfs2_consist_inode(GFS2_I(inode));
- return ERR_PTR(-EIO);
}
static int dirent_check_reclen(struct gfs2_inode *dip,
@@ -609,14 +611,16 @@ static int dirent_check_reclen(struct gfs2_inode *dip,
const void *ptr = d;
u16 rec_len = be16_to_cpu(d->de_rec_len);
- if (unlikely(rec_len < sizeof(struct gfs2_dirent)))
- goto broken;
+ if (unlikely(rec_len < sizeof(struct gfs2_dirent))) {
+ gfs2_consist_inode(dip);
+ return -EIO;
+ }
ptr += rec_len;
if (ptr < end_p)
return rec_len;
if (ptr == end_p)
return -ENOENT;
-broken:
+
gfs2_consist_inode(dip);
return -EIO;
}
@@ -868,7 +872,7 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh,
struct gfs2_dirent *dent;
struct timespec64 tv = current_time(inode);
- error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
+ error = gfs2_alloc_blocks(ip, &bn, &n, 0);
if (error)
return NULL;
bh = gfs2_meta_new(ip->i_gl, bn);
@@ -909,7 +913,6 @@ static int dir_make_exhash(struct inode *inode)
struct qstr args;
struct buffer_head *bh, *dibh;
struct gfs2_leaf *leaf;
- int y;
u32 x;
__be64 *lp;
u64 bn;
@@ -976,9 +979,7 @@ static int dir_make_exhash(struct inode *inode)
i_size_write(inode, sdp->sd_sb.sb_bsize / 2);
gfs2_add_inode_blocks(&dip->i_inode, 1);
dip->i_diskflags |= GFS2_DIF_EXHASH;
-
- for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
- dip->i_depth = y;
+ dip->i_depth = ilog2(sdp->sd_hash_ptrs);
gfs2_dinode_out(dip, dibh->b_data);
@@ -1508,9 +1509,8 @@ static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
continue;
}
bh->b_end_io = end_buffer_read_sync;
- submit_bh(REQ_OP_READ,
- REQ_RAHEAD | REQ_META | REQ_PRIO,
- bh);
+ submit_bh(REQ_OP_READ | REQ_RAHEAD | REQ_META |
+ REQ_PRIO, bh);
continue;
}
brelse(bh);
@@ -1815,7 +1815,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
gfs2_inum_out(nip, dent);
dent->de_type = cpu_to_be16(IF2DT(nip->i_inode.i_mode));
dent->de_rahead = cpu_to_be16(gfs2_inode_ra_len(nip));
- tv = current_time(&ip->i_inode);
+ tv = inode_set_ctime_current(&ip->i_inode);
if (ip->i_diskflags & GFS2_DIF_EXHASH) {
leaf = (struct gfs2_leaf *)bh->b_data;
be16_add_cpu(&leaf->lf_entries, 1);
@@ -1826,7 +1826,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
da->bh = NULL;
brelse(bh);
ip->i_entries++;
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = tv;
+ inode_set_mtime_to_ts(&ip->i_inode, tv);
if (S_ISDIR(nip->i_inode.i_mode))
inc_nlink(&ip->i_inode);
mark_inode_dirty(inode);
@@ -1877,7 +1877,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
const struct qstr *name = &dentry->d_name;
struct gfs2_dirent *dent, *prev = NULL;
struct buffer_head *bh;
- struct timespec64 tv = current_time(&dip->i_inode);
+ struct timespec64 tv;
/* Returns _either_ the entry (if its first in block) or the
previous entry otherwise */
@@ -1897,6 +1897,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
}
dirent_del(dip, bh, prev, dent);
+ tv = inode_set_ctime_current(&dip->i_inode);
if (dip->i_diskflags & GFS2_DIF_EXHASH) {
struct gfs2_leaf *leaf = (struct gfs2_leaf *)bh->b_data;
u16 entries = be16_to_cpu(leaf->lf_entries);
@@ -1911,7 +1912,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
if (!dip->i_entries)
gfs2_consist_inode(dip);
dip->i_entries--;
- dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv;
+ inode_set_mtime_to_ts(&dip->i_inode, tv);
if (d_is_dir(dentry))
drop_nlink(&dip->i_inode);
mark_inode_dirty(&dip->i_inode);
@@ -1952,7 +1953,7 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
dent->de_type = cpu_to_be16(new_type);
brelse(bh);
- dip->i_inode.i_mtime = dip->i_inode.i_ctime = current_time(&dip->i_inode);
+ inode_set_mtime_to_ts(&dip->i_inode, inode_set_ctime_current(&dip->i_inode));
mark_inode_dirty_sync(&dip->i_inode);
return 0;
}
@@ -2017,7 +2018,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
l_blocks++;
}
- gfs2_rlist_alloc(&rlist);
+ gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE);
for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index 5b76480c17c9..25a857c78b53 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -23,32 +23,32 @@ struct gfs2_diradd {
int save_loc;
};
-extern struct inode *gfs2_dir_search(struct inode *dir,
- const struct qstr *filename,
- bool fail_on_exist);
-extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
- const struct gfs2_inode *ip);
-extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
- const struct gfs2_inode *ip, struct gfs2_diradd *da);
+struct inode *gfs2_dir_search(struct inode *dir,
+ const struct qstr *filename,
+ bool fail_on_exist);
+int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
+ const struct gfs2_inode *ip);
+int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
+ const struct gfs2_inode *ip, struct gfs2_diradd *da);
static inline void gfs2_dir_no_add(struct gfs2_diradd *da)
{
brelse(da->bh);
da->bh = NULL;
}
-extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
-extern int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
- struct file_ra_state *f_ra);
-extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
- const struct gfs2_inode *nip, unsigned int new_type);
+int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
+int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
+ struct file_ra_state *f_ra);
+int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
+ const struct gfs2_inode *nip, unsigned int new_type);
-extern int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
+int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
-extern int gfs2_diradd_alloc_required(struct inode *dir,
- const struct qstr *filename,
- struct gfs2_diradd *da);
-extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
- struct buffer_head **bhp);
-extern void gfs2_dir_hash_inval(struct gfs2_inode *ip);
+int gfs2_diradd_alloc_required(struct inode *dir,
+ const struct qstr *filename,
+ struct gfs2_diradd *da);
+int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
+ struct buffer_head **bhp);
+void gfs2_dir_hash_inval(struct gfs2_inode *ip);
static inline u32 gfs2_disk_hash(const char *data, int len)
{
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 756d05779200..3334c394ce9c 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -66,7 +66,7 @@ struct get_name_filldir {
char *name;
};
-static int get_name_filldir(struct dir_context *ctx, const char *name,
+static bool get_name_filldir(struct dir_context *ctx, const char *name,
int length, loff_t offset, u64 inum,
unsigned int type)
{
@@ -74,12 +74,12 @@ static int get_name_filldir(struct dir_context *ctx, const char *name,
container_of(ctx, struct get_name_filldir, ctx);
if (inum != gnfd->inum.no_addr)
- return 0;
+ return true;
memcpy(gnfd->name, name, length);
gnfd->name[length] = 0;
- return 1;
+ return false;
}
static int gfs2_get_name(struct dentry *parent, char *name,
@@ -138,8 +138,6 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
return ERR_PTR(-ESTALE);
inode = gfs2_lookup_by_inum(sdp, inum->no_addr, inum->no_formal_ino,
GFS2_BLKST_DINODE);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
return d_obtain_alias(inode);
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index adafaaf7d24d..b2d23c98c996 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/gfs2_ondisk.h>
#include <linux/falloc.h>
#include <linux/swap.h>
@@ -154,7 +155,7 @@ static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags)
return fsflags;
}
-int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int gfs2_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
@@ -235,7 +236,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
goto out;
if (!IS_IMMUTABLE(inode)) {
- error = gfs2_permission(&init_user_ns, inode, MAY_WRITE);
+ error = gfs2_permission(&nop_mnt_idmap, inode, MAY_WRITE);
if (error)
goto out;
}
@@ -250,6 +251,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
error = filemap_fdatawait(inode->i_mapping);
if (error)
goto out;
+ truncate_inode_pages(inode->i_mapping, 0);
if (new_flags & GFS2_DIF_JDATA)
gfs2_ordered_del_inode(ip);
}
@@ -259,7 +261,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
goto out_trans_end;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
gfs2_trans_add_meta(ip->i_gl, bh);
ip->i_diskflags = new_flags;
gfs2_dinode_out(ip, bh->b_data);
@@ -273,8 +275,8 @@ out:
return error;
}
-int gfs2_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa)
+int gfs2_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
u32 fsflags = fa->flags, gfsflags = 0;
@@ -375,23 +377,23 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
}
/**
- * gfs2_allocate_page_backing - Allocate blocks for a write fault
- * @page: The (locked) page to allocate backing for
+ * gfs2_allocate_folio_backing - Allocate blocks for a write fault
+ * @folio: The (locked) folio to allocate backing for
* @length: Size of the allocation
*
- * We try to allocate all the blocks required for the page in one go. This
+ * We try to allocate all the blocks required for the folio in one go. This
* might fail for various reasons, so we keep trying until all the blocks to
- * back this page are allocated. If some of the blocks are already allocated,
+ * back this folio are allocated. If some of the blocks are already allocated,
* that is ok too.
*/
-static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
+static int gfs2_allocate_folio_backing(struct folio *folio, size_t length)
{
- u64 pos = page_offset(page);
+ u64 pos = folio_pos(folio);
do {
struct iomap iomap = { };
- if (gfs2_iomap_alloc(page->mapping->host, pos, length, &iomap))
+ if (gfs2_iomap_alloc(folio->mapping->host, pos, length, &iomap))
return -EIO;
if (length < iomap.length)
@@ -413,16 +415,16 @@ static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct gfs2_alloc_parms ap = { .aflags = 0, };
- u64 offset = page_offset(page);
+ struct gfs2_alloc_parms ap = {};
+ u64 pos = folio_pos(folio);
unsigned int data_blocks, ind_blocks, rblocks;
vm_fault_t ret = VM_FAULT_LOCKED;
struct gfs2_holder gh;
- unsigned int length;
+ size_t length;
loff_t size;
int err;
@@ -431,27 +433,27 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
err = gfs2_glock_nq(&gh);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_uninit;
}
- /* Check page index against inode size */
+ /* Check folio index against inode size */
size = i_size_read(inode);
- if (offset >= size) {
+ if (pos >= size) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
- /* Update file times before taking page lock */
+ /* Update file times before taking folio lock */
file_update_time(vmf->vma->vm_file);
- /* page is wholly or partially inside EOF */
- if (size - offset < PAGE_SIZE)
- length = size - offset;
+ /* folio is wholly or partially inside EOF */
+ if (size - pos < folio_size(folio))
+ length = size - pos;
else
- length = PAGE_SIZE;
+ length = folio_size(folio);
- gfs2_size_hint(vmf->vma->vm_file, offset, length);
+ gfs2_size_hint(vmf->vma->vm_file, pos, length);
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
@@ -462,18 +464,19 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
*/
if (!gfs2_is_stuffed(ip) &&
- !gfs2_write_alloc_required(ip, offset, length)) {
- lock_page(page);
- if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
+ !gfs2_write_alloc_required(ip, pos, length)) {
+ folio_lock(folio);
+ if (!folio_test_uptodate(folio) ||
+ folio->mapping != inode->i_mapping) {
ret = VM_FAULT_NOPAGE;
- unlock_page(page);
+ folio_unlock(folio);
}
goto out_unlock;
}
err = gfs2_rindex_update(sdp);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_unlock;
}
@@ -481,12 +484,12 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
ap.target = data_blocks + ind_blocks;
err = gfs2_quota_lock_check(ip, &ap);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_unlock;
}
err = gfs2_inplace_reserve(ip, &ap);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_quota_unlock;
}
@@ -499,35 +502,35 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
}
err = gfs2_trans_begin(sdp, rblocks, 0);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_trans_fail;
}
- /* Unstuff, if required, and allocate backing blocks for page */
+ /* Unstuff, if required, and allocate backing blocks for folio */
if (gfs2_is_stuffed(ip)) {
err = gfs2_unstuff_dinode(ip);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_trans_end;
}
}
- lock_page(page);
+ folio_lock(folio);
/* If truncated, we must retry the operation, we may have raced
* with the glock demotion code.
*/
- if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
+ if (!folio_test_uptodate(folio) || folio->mapping != inode->i_mapping) {
ret = VM_FAULT_NOPAGE;
goto out_page_locked;
}
- err = gfs2_allocate_page_backing(page, length);
+ err = gfs2_allocate_folio_backing(folio, length);
if (err)
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
out_page_locked:
if (ret != VM_FAULT_LOCKED)
- unlock_page(page);
+ folio_unlock(folio);
out_trans_end:
gfs2_trans_end(sdp);
out_trans_fail:
@@ -539,8 +542,8 @@ out_unlock:
out_uninit:
gfs2_holder_uninit(&gh);
if (ret == VM_FAULT_LOCKED) {
- set_page_dirty(page);
- wait_for_stable_page(page);
+ folio_mark_dirty(folio);
+ folio_wait_stable(folio);
}
sb_end_pagefault(inode->i_sb);
return ret;
@@ -557,7 +560,7 @@ static vm_fault_t gfs2_fault(struct vm_fault *vmf)
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
err = gfs2_glock_nq(&gh);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_uninit;
}
ret = filemap_fault(vmf);
@@ -629,6 +632,9 @@ int gfs2_open_common(struct inode *inode, struct file *file)
ret = generic_file_open(inode, file);
if (ret)
return ret;
+
+ if (!gfs2_is_jdata(GFS2_I(inode)))
+ file->f_mode |= FMODE_CAN_ODIRECT;
}
fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
@@ -704,10 +710,11 @@ static int gfs2_release(struct inode *inode, struct file *file)
kfree(file->private_data);
file->private_data = NULL;
- if (gfs2_rs_active(&ip->i_res))
- gfs2_rs_delete(ip, &inode->i_writecount);
- if (file->f_mode & FMODE_WRITE)
+ if (file->f_mode & FMODE_WRITE) {
+ if (gfs2_rs_active(&ip->i_res))
+ gfs2_rs_delete(ip);
gfs2_qa_put(ip);
+ }
return 0;
}
@@ -737,7 +744,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
{
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
- int sync_state = inode->i_state & I_DIRTY;
+ int sync_state = inode_state_read_once(inode) & I_DIRTY;
struct gfs2_inode *ip = GFS2_I(inode);
int ret = 0, ret1 = 0;
@@ -769,33 +776,35 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
return ret ? ret : ret1;
}
-static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
+static inline bool should_fault_in_pages(struct iov_iter *i,
+ struct kiocb *iocb,
size_t *prev_count,
size_t *window_size)
{
- char __user *p = i->iov[0].iov_base + i->iov_offset;
size_t count = iov_iter_count(i);
- int pages = 1;
+ size_t size, offs;
- if (likely(!count))
+ if (!count)
return false;
- if (ret <= 0 && ret != -EFAULT)
- return false;
- if (!iter_is_iovec(i))
+ if (!user_backed_iter(i))
return false;
- if (*prev_count != count || !*window_size) {
- int pages, nr_dirtied;
+ /*
+ * Try to fault in multiple pages initially. When that doesn't result
+ * in any progress, fall back to a single page.
+ */
+ size = PAGE_SIZE;
+ offs = offset_in_page(iocb->ki_pos);
+ if (*prev_count != count) {
+ size_t nr_dirtied;
- pages = min_t(int, BIO_MAX_VECS,
- DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE));
nr_dirtied = max(current->nr_dirtied_pause -
- current->nr_dirtied, 1);
- pages = min(pages, nr_dirtied);
+ current->nr_dirtied, 8);
+ size = min_t(size_t, SZ_1M, nr_dirtied << PAGE_SHIFT);
}
*prev_count = count;
- *window_size = (size_t)PAGE_SIZE * pages - offset_in_page(p);
+ *window_size = size - offs;
return true;
}
@@ -805,13 +814,13 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
struct file *file = iocb->ki_filp;
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
size_t prev_count = 0, window_size = 0;
- size_t written = 0;
+ size_t read = 0;
ssize_t ret;
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
- * that the inode glock may be dropped, fault in the pages manually,
+ * that the inode glock should be dropped, fault in the pages manually,
* and retry.
*
* Unlike generic_file_read_iter, for reads, iomap_dio_rw can trigger
@@ -833,35 +842,33 @@ retry:
ret = gfs2_glock_nq(gh);
if (ret)
goto out_uninit;
-retry_under_glock:
pagefault_disable();
to->nofault = true;
ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
- IOMAP_DIO_PARTIAL, written);
+ IOMAP_DIO_PARTIAL, NULL, read);
to->nofault = false;
pagefault_enable();
+ if (ret <= 0 && ret != -EFAULT)
+ goto out_unlock;
+ /* No increment (+=) because iomap_dio_rw returns a cumulative value. */
if (ret > 0)
- written = ret;
-
- if (should_fault_in_pages(ret, to, &prev_count, &window_size)) {
- size_t leftover;
+ read = ret;
- gfs2_holder_allow_demote(gh);
- leftover = fault_in_iov_iter_writeable(to, window_size);
- gfs2_holder_disallow_demote(gh);
- if (leftover != window_size) {
- if (!gfs2_holder_queued(gh))
- goto retry;
- goto retry_under_glock;
- }
+ if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
+ gfs2_glock_dq(gh);
+ window_size -= fault_in_iov_iter_writeable(to, window_size);
+ if (window_size)
+ goto retry;
}
+out_unlock:
if (gfs2_holder_queued(gh))
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh);
+ /* User space doesn't expect partial success. */
if (ret < 0)
return ret;
- return written;
+ return read;
}
static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
@@ -871,13 +878,14 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
struct inode *inode = file->f_mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
size_t prev_count = 0, window_size = 0;
- size_t read = 0;
+ size_t written = 0;
+ bool enough_retries;
ssize_t ret;
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
- * that the inode glock may be dropped, fault in the pages manually,
+ * that the inode glock should be dropped, fault in the pages manually,
* and retry.
*
* For writes, iomap_dio_rw only triggers manual page faults, so we
@@ -897,41 +905,45 @@ retry:
ret = gfs2_glock_nq(gh);
if (ret)
goto out_uninit;
-retry_under_glock:
/* Silently fall back to buffered I/O when writing beyond EOF */
if (iocb->ki_pos + iov_iter_count(from) > i_size_read(&ip->i_inode))
- goto out;
+ goto out_unlock;
from->nofault = true;
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
- IOMAP_DIO_PARTIAL, read);
+ IOMAP_DIO_PARTIAL, NULL, written);
from->nofault = false;
-
- if (ret == -ENOTBLK)
- ret = 0;
+ if (ret <= 0) {
+ if (ret == -ENOTBLK)
+ ret = 0;
+ if (ret != -EFAULT)
+ goto out_unlock;
+ }
+ /* No increment (+=) because iomap_dio_rw returns a cumulative value. */
if (ret > 0)
- read = ret;
-
- if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
- size_t leftover;
+ written = ret;
- gfs2_holder_allow_demote(gh);
- leftover = fault_in_iov_iter_readable(from, window_size);
- gfs2_holder_disallow_demote(gh);
- if (leftover != window_size) {
- if (!gfs2_holder_queued(gh))
+ enough_retries = prev_count == iov_iter_count(from) &&
+ window_size <= PAGE_SIZE;
+ if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
+ gfs2_glock_dq(gh);
+ window_size -= fault_in_iov_iter_readable(from, window_size);
+ if (window_size) {
+ if (!enough_retries)
goto retry;
- goto retry_under_glock;
+ /* fall back to buffered I/O */
+ ret = 0;
}
}
-out:
+out_unlock:
if (gfs2_holder_queued(gh))
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh);
+ /* User space doesn't expect partial success. */
if (ret < 0)
return ret;
- return read;
+ return written;
}
static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -939,30 +951,29 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct gfs2_inode *ip;
struct gfs2_holder gh;
size_t prev_count = 0, window_size = 0;
- size_t written = 0;
+ size_t read = 0;
ssize_t ret;
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
- * that the inode glock may be dropped, fault in the pages manually,
+ * that the inode glock should be dropped, fault in the pages manually,
* and retry.
*/
- if (iocb->ki_flags & IOCB_DIRECT) {
- ret = gfs2_file_direct_read(iocb, to, &gh);
- if (likely(ret != -ENOTBLK))
- return ret;
- iocb->ki_flags &= ~IOCB_DIRECT;
- }
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return gfs2_file_direct_read(iocb, to, &gh);
+
+ pagefault_disable();
iocb->ki_flags |= IOCB_NOIO;
ret = generic_file_read_iter(iocb, to);
iocb->ki_flags &= ~IOCB_NOIO;
+ pagefault_enable();
if (ret >= 0) {
if (!iov_iter_count(to))
return ret;
- written = ret;
- } else {
+ read = ret;
+ } else if (ret != -EFAULT) {
if (ret != -EAGAIN)
return ret;
if (iocb->ki_flags & IOCB_NOWAIT)
@@ -974,33 +985,26 @@ retry:
ret = gfs2_glock_nq(&gh);
if (ret)
goto out_uninit;
-retry_under_glock:
pagefault_disable();
ret = generic_file_read_iter(iocb, to);
pagefault_enable();
+ if (ret <= 0 && ret != -EFAULT)
+ goto out_unlock;
if (ret > 0)
- written += ret;
-
- if (should_fault_in_pages(ret, to, &prev_count, &window_size)) {
- size_t leftover;
+ read += ret;
- gfs2_holder_allow_demote(&gh);
- leftover = fault_in_iov_iter_writeable(to, window_size);
- gfs2_holder_disallow_demote(&gh);
- if (leftover != window_size) {
- if (!gfs2_holder_queued(&gh)) {
- if (written)
- goto out_uninit;
- goto retry;
- }
- goto retry_under_glock;
- }
+ if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
+ gfs2_glock_dq(&gh);
+ window_size -= fault_in_iov_iter_writeable(to, window_size);
+ if (window_size)
+ goto retry;
}
+out_unlock:
if (gfs2_holder_queued(&gh))
gfs2_glock_dq(&gh);
out_uninit:
gfs2_holder_uninit(&gh);
- return written ? written : ret;
+ return read ? read : ret;
}
static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
@@ -1013,13 +1017,14 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_holder *statfs_gh = NULL;
size_t prev_count = 0, window_size = 0;
- size_t read = 0;
+ size_t orig_count = iov_iter_count(from);
+ size_t written = 0;
ssize_t ret;
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
- * that the inode glock may be dropped, fault in the pages manually,
+ * that the inode glock should be dropped, fault in the pages manually,
* and retry.
*/
@@ -1030,11 +1035,19 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
}
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
+ if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
retry:
+ window_size -= fault_in_iov_iter_readable(from, window_size);
+ if (!window_size) {
+ ret = -EFAULT;
+ goto out_uninit;
+ }
+ from->count = min(from->count, window_size);
+ }
ret = gfs2_glock_nq(gh);
if (ret)
goto out_uninit;
-retry_under_glock:
+
if (inode == sdp->sd_rindex) {
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
@@ -1044,42 +1057,32 @@ retry_under_glock:
goto out_unlock;
}
- current->backing_dev_info = inode_to_bdi(inode);
pagefault_disable();
- ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+ ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops,
+ &gfs2_iomap_write_ops, NULL);
pagefault_enable();
- current->backing_dev_info = NULL;
- if (ret > 0) {
- iocb->ki_pos += ret;
- read += ret;
- }
+ if (ret > 0)
+ written += ret;
if (inode == sdp->sd_rindex)
gfs2_glock_dq_uninit(statfs_gh);
- if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
- size_t leftover;
+ if (ret <= 0 && ret != -EFAULT)
+ goto out_unlock;
- gfs2_holder_allow_demote(gh);
- leftover = fault_in_iov_iter_readable(from, window_size);
- gfs2_holder_disallow_demote(gh);
- if (leftover != window_size) {
- if (!gfs2_holder_queued(gh)) {
- if (read)
- goto out_uninit;
- goto retry;
- }
- goto retry_under_glock;
- }
+ from->count = orig_count - written;
+ if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
+ gfs2_glock_dq(gh);
+ goto retry;
}
out_unlock:
if (gfs2_holder_queued(gh))
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh);
- if (statfs_gh)
- kfree(statfs_gh);
- return read ? read : ret;
+ kfree(statfs_gh);
+ from->count = orig_count - written;
+ return written ? written : ret;
}
/**
@@ -1120,14 +1123,16 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret)
goto out_unlock;
- ret = file_update_time(file);
- if (ret)
- goto out_unlock;
-
if (iocb->ki_flags & IOCB_DIRECT) {
struct address_space *mapping = file->f_mapping;
ssize_t buffered, ret2;
+ /*
+ * Note that under direct I/O, we don't allow and inode
+ * timestamp updates, so we're not calling file_update_time()
+ * here.
+ */
+
ret = gfs2_file_direct_write(iocb, from, &gh);
if (ret < 0 || !iov_iter_count(from))
goto out_unlock;
@@ -1154,6 +1159,10 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!ret || ret2 > 0)
ret += ret2;
} else {
+ ret = file_update_time(file);
+ if (ret)
+ goto out_unlock;
+
ret = gfs2_file_buffered_write(iocb, from, &gh);
if (likely(ret > 0))
ret = generic_write_sync(iocb, ret);
@@ -1245,7 +1254,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
struct inode *inode = file_inode(file);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_alloc_parms ap = { .aflags = 0, };
+ struct gfs2_alloc_parms ap = {};
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
loff_t bytes, max_bytes, max_blks;
int error;
@@ -1433,25 +1442,44 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ int ret;
- if (!(fl->fl_flags & FL_POSIX))
+ if (!(fl->c.flc_flags & FL_POSIX))
return -ENOLCK;
- if (cmd == F_CANCELLK) {
- /* Hack: */
- cmd = F_SETLK;
- fl->fl_type = F_UNLCK;
- }
- if (unlikely(gfs2_withdrawn(sdp))) {
- if (fl->fl_type == F_UNLCK)
+ if (gfs2_withdrawn(sdp)) {
+ if (lock_is_unlock(fl))
locks_lock_file_wait(file, fl);
return -EIO;
}
- if (IS_GETLK(cmd))
- return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
- else if (fl->fl_type == F_UNLCK)
- return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
- else
- return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
+ down_read(&ls->ls_sem);
+ ret = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ if (cmd == F_CANCELLK)
+ ret = dlm_posix_cancel(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else if (IS_GETLK(cmd))
+ ret = dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else if (lock_is_unlock(fl))
+ ret = dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else
+ ret = dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
+ }
+ up_read(&ls->ls_sem);
+ return ret;
+}
+
+static void __flock_holder_uninit(struct file *file, struct gfs2_holder *fl_gh)
+{
+ struct gfs2_glock *gl = gfs2_glock_hold(fl_gh->gh_gl);
+
+ /*
+ * Make sure gfs2_glock_put() won't sleep under the file->f_lock
+ * spinlock.
+ */
+
+ spin_lock(&file->f_lock);
+ gfs2_holder_uninit(fl_gh);
+ spin_unlock(&file->f_lock);
+ gfs2_glock_put(gl);
}
static int do_flock(struct file *file, int cmd, struct file_lock *fl)
@@ -1465,8 +1493,10 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
int error = 0;
int sleeptime;
- state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
- flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
+ state = lock_is_write(fl) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
+ flags = GL_EXACT | GL_NOPID;
+ if (!IS_SETLKW(cmd))
+ flags |= LM_FLAG_TRY_1CB;
mutex_lock(&fp->f_fl_mutex);
@@ -1475,8 +1505,8 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
if (fl_gh->gh_state == state)
goto out;
locks_init_lock(&request);
- request.fl_type = F_UNLCK;
- request.fl_flags = FL_FLOCK;
+ request.c.flc_type = F_UNLCK;
+ request.c.flc_flags = FL_FLOCK;
locks_lock_file_wait(file, &request);
gfs2_glock_dq(fl_gh);
gfs2_holder_reinit(state, flags, fl_gh);
@@ -1485,19 +1515,21 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
&gfs2_flock_glops, CREATE, &gl);
if (error)
goto out;
+ spin_lock(&file->f_lock);
gfs2_holder_init(gl, state, flags, fl_gh);
+ spin_unlock(&file->f_lock);
gfs2_glock_put(gl);
}
for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
error = gfs2_glock_nq(fl_gh);
if (error != GLR_TRYFAILED)
break;
- fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
- fl_gh->gh_error = 0;
+ fl_gh->gh_flags &= ~LM_FLAG_TRY_1CB;
+ fl_gh->gh_flags |= LM_FLAG_TRY;
msleep(sleeptime);
}
if (error) {
- gfs2_holder_uninit(fl_gh);
+ __flock_holder_uninit(file, fl_gh);
if (error == GLR_TRYFAILED)
error = -EAGAIN;
} else {
@@ -1519,7 +1551,7 @@ static void do_unflock(struct file *file, struct file_lock *fl)
locks_lock_file_wait(file, fl);
if (gfs2_holder_initialized(fl_gh)) {
gfs2_glock_dq(fl_gh);
- gfs2_holder_uninit(fl_gh);
+ __flock_holder_uninit(file, fl_gh);
}
mutex_unlock(&fp->f_fl_mutex);
}
@@ -1535,10 +1567,10 @@ static void do_unflock(struct file *file, struct file_lock *fl)
static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
{
- if (!(fl->fl_flags & FL_FLOCK))
+ if (!(fl->c.flc_flags & FL_FLOCK))
return -ENOLCK;
- if (fl->fl_type == F_UNLCK) {
+ if (lock_is_unlock(fl)) {
do_unflock(file, fl);
return 0;
} else {
@@ -1559,10 +1591,11 @@ const struct file_operations gfs2_file_fops = {
.fsync = gfs2_fsync,
.lock = gfs2_lock,
.flock = gfs2_flock,
- .splice_read = generic_file_splice_read,
+ .splice_read = copy_splice_read,
.splice_write = gfs2_file_splice_write,
.setlease = simple_nosetlease,
.fallocate = gfs2_fallocate,
+ .fop_flags = FOP_ASYNC_LOCK,
};
const struct file_operations gfs2_dir_fops = {
@@ -1575,6 +1608,7 @@ const struct file_operations gfs2_dir_fops = {
.lock = gfs2_lock,
.flock = gfs2_flock,
.llseek = default_llseek,
+ .fop_flags = FOP_ASYNC_LOCK,
};
#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
@@ -1590,7 +1624,7 @@ const struct file_operations gfs2_file_fops_nolock = {
.open = gfs2_open,
.release = gfs2_release,
.fsync = gfs2_fsync,
- .splice_read = generic_file_splice_read,
+ .splice_read = copy_splice_read,
.splice_write = gfs2_file_splice_write,
.setlease = generic_setlease,
.fallocate = gfs2_fallocate,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 19f38aee1b61..92e029104d8a 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -33,6 +33,9 @@
#include <linux/list_sort.h>
#include <linux/lockref.h>
#include <linux/rhashtable.h>
+#include <linux/pid_namespace.h>
+#include <linux/file.h>
+#include <linux/random.h>
#include "gfs2.h"
#include "incore.h"
@@ -58,11 +61,10 @@ struct gfs2_glock_iter {
typedef void (*glock_examiner) (struct gfs2_glock * gl);
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
-static void __gfs2_glock_dq(struct gfs2_holder *gh);
+static void request_demote(struct gfs2_glock *gl, unsigned int state,
+ unsigned long delay, bool remote);
static struct dentry *gfs2_root;
-static struct workqueue_struct *glock_workqueue;
-struct workqueue_struct *gfs2_delete_workqueue;
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(lru_lock);
@@ -127,50 +129,51 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
kfree(gl->gl_lksb.sb_lvbptr);
- if (gl->gl_ops->go_flags & GLOF_ASPACE)
- kmem_cache_free(gfs2_glock_aspace_cachep, gl);
- else
+ if (gl->gl_ops->go_flags & GLOF_ASPACE) {
+ struct gfs2_glock_aspace *gla =
+ container_of(gl, struct gfs2_glock_aspace, glock);
+ kmem_cache_free(gfs2_glock_aspace_cachep, gla);
+ } else
kmem_cache_free(gfs2_glock_cachep, gl);
}
-/**
- * glock_blocked_by_withdraw - determine if we can still use a glock
- * @gl: the glock
- *
- * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
- * when we're withdrawn. For example, to maintain metadata integrity, we should
- * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
- * iopen or the transaction glocks may be safely used because none of their
- * metadata goes through the journal. So in general, we should disallow all
- * glocks that are journaled, and allow all the others. One exception is:
- * we need to allow our active journal to be promoted and demoted so others
- * may recover it and we can reacquire it when they're done.
- */
-static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
+static void __gfs2_glock_free(struct gfs2_glock *gl)
{
+ rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
+ smp_mb();
+ wake_up_glock(gl);
+ call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
+}
+
+void gfs2_glock_free(struct gfs2_glock *gl) {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- if (likely(!gfs2_withdrawn(sdp)))
- return false;
- if (gl->gl_ops->go_flags & GLOF_NONDISK)
- return false;
- if (!sdp->sd_jdesc ||
- gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
- return false;
- return true;
+ __gfs2_glock_free(gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_kill_wait);
}
-void gfs2_glock_free(struct gfs2_glock *gl)
-{
+void gfs2_glock_free_later(struct gfs2_glock *gl) {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
- rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
- smp_mb();
- wake_up_glock(gl);
- call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
+ spin_lock(&lru_lock);
+ list_add(&gl->gl_lru, &sdp->sd_dead_glocks);
+ spin_unlock(&lru_lock);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_glock_wait);
+ wake_up(&sdp->sd_kill_wait);
+}
+
+static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp)
+{
+ struct list_head *list = &sdp->sd_dead_glocks;
+
+ while(!list_empty(list)) {
+ struct gfs2_glock *gl;
+
+ gl = list_first_entry(list, struct gfs2_glock, gl_lru);
+ list_del_init(&gl->gl_lru);
+ __gfs2_glock_free(gl);
+ }
}
/**
@@ -179,46 +182,16 @@ void gfs2_glock_free(struct gfs2_glock *gl)
*
*/
-void gfs2_glock_hold(struct gfs2_glock *gl)
+struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl)
{
GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
lockref_get(&gl->gl_lockref);
+ return gl;
}
-/**
- * demote_ok - Check to see if it's ok to unlock a glock
- * @gl: the glock
- *
- * Returns: 1 if it's ok
- */
-
-static int demote_ok(const struct gfs2_glock *gl)
-{
- const struct gfs2_glock_operations *glops = gl->gl_ops;
-
- if (gl->gl_state == LM_ST_UNLOCKED)
- return 0;
- /*
- * Note that demote_ok is used for the lru process of disposing of
- * glocks. For this purpose, we don't care if the glock's holders
- * have the HIF_MAY_DEMOTE flag set or not. If someone is using
- * them, don't demote.
- */
- if (!list_empty(&gl->gl_holders))
- return 0;
- if (glops->go_demote_ok)
- return glops->go_demote_ok(gl);
- return 1;
-}
-
-
-void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
+static void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
{
- if (!(gl->gl_ops->go_flags & GLOF_LRU))
- return;
-
spin_lock(&lru_lock);
-
list_move_tail(&gl->gl_lru, &lru_list);
if (!test_bit(GLF_LRU, &gl->gl_flags)) {
@@ -231,9 +204,6 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
{
- if (!(gl->gl_ops->go_flags & GLOF_LRU))
- return;
-
spin_lock(&lru_lock);
if (test_bit(GLF_LRU, &gl->gl_flags)) {
list_del_init(&gl->gl_lru);
@@ -247,8 +217,10 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
* Enqueue the glock on the work queue. Passes one glock reference on to the
* work queue.
*/
-static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
- if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
+static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ if (!queue_delayed_work(sdp->sd_glock_wq, &gl->gl_work, delay)) {
/*
* We are holding the lockref spinlock, and the work was still
* queued above. The queued work (glock_work_func) takes that
@@ -260,21 +232,14 @@ static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay)
}
}
-static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
- spin_lock(&gl->gl_lockref.lock);
- __gfs2_glock_queue_work(gl, delay);
- spin_unlock(&gl->gl_lockref.lock);
-}
-
static void __gfs2_glock_put(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct address_space *mapping = gfs2_glock2aspace(gl);
lockref_mark_dead(&gl->gl_lockref);
-
- gfs2_glock_remove_from_lru(gl);
spin_unlock(&gl->gl_lockref.lock);
+ gfs2_glock_remove_from_lru(gl);
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
if (mapping) {
truncate_inode_pages_final(mapping);
@@ -285,12 +250,18 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}
-/*
- * Cause the glock to be put in work queue context.
- */
-void gfs2_glock_queue_put(struct gfs2_glock *gl)
+static bool __gfs2_glock_put_or_lock(struct gfs2_glock *gl)
{
- gfs2_glock_queue_work(gl, 0);
+ if (lockref_put_or_lock(&gl->gl_lockref))
+ return true;
+ GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
+ if (gl->gl_state != LM_ST_UNLOCKED) {
+ gl->gl_lockref.count--;
+ gfs2_glock_add_to_lru(gl);
+ spin_unlock(&gl->gl_lockref.lock);
+ return true;
+ }
+ return false;
}
/**
@@ -301,15 +272,28 @@ void gfs2_glock_queue_put(struct gfs2_glock *gl)
void gfs2_glock_put(struct gfs2_glock *gl)
{
- /* last put could call sleepable dlm api */
- might_sleep();
-
- if (lockref_put_or_lock(&gl->gl_lockref))
+ if (__gfs2_glock_put_or_lock(gl))
return;
__gfs2_glock_put(gl);
}
+/*
+ * gfs2_glock_put_async - Decrement reference count without sleeping
+ * @gl: The glock to put
+ *
+ * Decrement the reference count on glock immediately unless it is the last
+ * reference. Defer putting the last reference to work queue context.
+ */
+void gfs2_glock_put_async(struct gfs2_glock *gl)
+{
+ if (__gfs2_glock_put_or_lock(gl))
+ return;
+
+ gfs2_glock_queue_work(gl, 0);
+ spin_unlock(&gl->gl_lockref.lock);
+}
+
/**
* may_grant - check if it's ok to grant a new lock
* @gl: The glock
@@ -389,7 +373,7 @@ static void do_error(struct gfs2_glock *gl, const int ret)
struct gfs2_holder *gh, *tmp;
list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
- if (!test_bit(HIF_WAIT, &gh->gh_iflags))
+ if (test_bit(HIF_HOLDER, &gh->gh_iflags))
continue;
if (ret & LM_OUT_ERROR)
gh->gh_error = -EIO;
@@ -404,40 +388,6 @@ static void do_error(struct gfs2_glock *gl, const int ret)
}
/**
- * demote_incompat_holders - demote incompatible demoteable holders
- * @gl: the glock we want to promote
- * @new_gh: the new holder to be promoted
- */
-static void demote_incompat_holders(struct gfs2_glock *gl,
- struct gfs2_holder *new_gh)
-{
- struct gfs2_holder *gh;
-
- /*
- * Demote incompatible holders before we make ourselves eligible.
- * (This holder may or may not allow auto-demoting, but we don't want
- * to demote the new holder before it's even granted.)
- */
- list_for_each_entry(gh, &gl->gl_holders, gh_list) {
- /*
- * Since holders are at the front of the list, we stop when we
- * find the first non-holder.
- */
- if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
- return;
- if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags) &&
- !may_grant(gl, new_gh, gh)) {
- /*
- * We should not recurse into do_promote because
- * __gfs2_glock_dq only calls handle_callback,
- * gfs2_glock_add_to_lru and __gfs2_glock_queue_work.
- */
- __gfs2_glock_dq(gh);
- }
- }
-}
-
-/**
* find_first_holder - find the first "holder" gh
* @gl: the glock
*/
@@ -455,32 +405,11 @@ static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
return NULL;
}
-/**
- * find_first_strong_holder - find the first non-demoteable holder
- * @gl: the glock
- *
- * Find the first holder that doesn't have the HIF_MAY_DEMOTE flag set.
- */
-static inline struct gfs2_holder *
-find_first_strong_holder(struct gfs2_glock *gl)
-{
- struct gfs2_holder *gh;
-
- list_for_each_entry(gh, &gl->gl_holders, gh_list) {
- if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
- return NULL;
- if (!test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags))
- return gh;
- }
- return NULL;
-}
-
/*
* gfs2_instantiate - Call the glops instantiate function
- * @gl: The glock
+ * @gh: The glock holder
*
- * Returns: 0 if instantiate was successful, 2 if type specific operation is
- * underway, or error.
+ * Returns: 0 if instantiate was successful, or error.
*/
int gfs2_instantiate(struct gfs2_holder *gh)
{
@@ -490,13 +419,13 @@ int gfs2_instantiate(struct gfs2_holder *gh)
again:
if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags))
- return 0;
+ goto done;
/*
* Since we unlock the lockref lock, we set a flag to indicate
* instantiate is in progress.
*/
- if (test_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
+ if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG,
TASK_UNINTERRUPTIBLE);
/*
@@ -509,84 +438,53 @@ again:
goto again;
}
- set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
-
- ret = glops->go_instantiate(gh);
+ ret = glops->go_instantiate(gl);
if (!ret)
clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
- clear_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
- smp_mb__after_atomic();
- wake_up_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG);
- return ret;
+ clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
+ if (ret)
+ return ret;
+
+done:
+ if (glops->go_held)
+ return glops->go_held(gh);
+ return 0;
}
/**
* do_promote - promote as many requests as possible on the current queue
* @gl: The glock
- *
- * Returns: 1 if there is a blocked holder at the head of the list, or 2
- * if a type specific operation is underway.
*/
-static int do_promote(struct gfs2_glock *gl)
-__releases(&gl->gl_lockref.lock)
-__acquires(&gl->gl_lockref.lock)
+static void do_promote(struct gfs2_glock *gl)
{
- struct gfs2_holder *gh, *tmp, *first_gh;
- bool incompat_holders_demoted = false;
- bool lock_released;
- int ret;
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_holder *gh, *current_gh;
-restart:
- first_gh = find_first_strong_holder(gl);
- list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
- lock_released = false;
+ if (gfs2_withdrawn(sdp)) {
+ do_error(gl, LM_OUT_ERROR);
+ return;
+ }
+
+ current_gh = find_first_holder(gl);
+ list_for_each_entry(gh, &gl->gl_holders, gh_list) {
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
continue;
- if (!may_grant(gl, first_gh, gh)) {
+ if (!may_grant(gl, current_gh, gh)) {
/*
- * If we get here, it means we may not grant this holder for
- * some reason. If this holder is the head of the list, it
- * means we have a blocked holder at the head, so return 1.
+ * If we get here, it means we may not grant this
+ * holder for some reason.
*/
- if (gh->gh_list.prev == &gl->gl_holders)
- return 1;
- do_error(gl, 0);
+ if (current_gh)
+ do_error(gl, 0); /* Fail queued try locks */
break;
}
- if (!incompat_holders_demoted) {
- demote_incompat_holders(gl, first_gh);
- incompat_holders_demoted = true;
- first_gh = gh;
- }
- if (test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags) &&
- !(gh->gh_flags & GL_SKIP) && gl->gl_ops->go_instantiate) {
- lock_released = true;
- spin_unlock(&gl->gl_lockref.lock);
- ret = gfs2_instantiate(gh);
- spin_lock(&gl->gl_lockref.lock);
- if (ret) {
- if (ret == 1)
- return 2;
- gh->gh_error = ret;
- list_del_init(&gh->gh_list);
- trace_gfs2_glock_queue(gh, 0);
- gfs2_holder_wake(gh);
- goto restart;
- }
- }
set_bit(HIF_HOLDER, &gh->gh_iflags);
trace_gfs2_promote(gh);
gfs2_holder_wake(gh);
- /*
- * If we released the gl_lockref.lock the holders list may have
- * changed. For that reason, we start again at the start of
- * the holders queue.
- */
- if (lock_released)
- goto restart;
+ if (!current_gh)
+ current_gh = gh;
}
- return 0;
}
/**
@@ -606,6 +504,23 @@ static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
}
/**
+ * find_last_waiter - find the last gh that's waiting for the glock
+ * @gl: the glock
+ *
+ * This also is a fast way of finding out if there are any waiters.
+ */
+
+static inline struct gfs2_holder *find_last_waiter(const struct gfs2_glock *gl)
+{
+ struct gfs2_holder *gh;
+
+ if (list_empty(&gl->gl_holders))
+ return NULL;
+ gh = list_last_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
+ return test_bit(HIF_HOLDER, &gh->gh_iflags) ? NULL : gh;
+}
+
+/**
* state_change - record that the glock is now in a different state
* @gl: the glock
* @new_state: the new state
@@ -613,18 +528,6 @@ static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{
- int held1, held2;
-
- held1 = (gl->gl_state != LM_ST_UNLOCKED);
- held2 = (new_state != LM_ST_UNLOCKED);
-
- if (held1 != held2) {
- GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
- if (held2)
- gl->gl_lockref.count++;
- else
- gl->gl_lockref.count--;
- }
if (new_state != gl->gl_target)
/* shorten our minimum hold time */
gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
@@ -633,11 +536,11 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
gl->gl_tchange = jiffies;
}
-static void gfs2_set_demote(struct gfs2_glock *gl)
+static void gfs2_set_demote(int nr, struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- set_bit(GLF_DEMOTE, &gl->gl_flags);
+ set_bit(nr, &gl->gl_flags);
smp_mb();
wake_up(&sdp->sd_async_glock_wait);
}
@@ -660,30 +563,31 @@ static void gfs2_demote_wake(struct gfs2_glock *gl)
static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
- struct gfs2_holder *gh;
- unsigned state = ret & LM_OUT_ST_MASK;
- int rv;
- spin_lock(&gl->gl_lockref.lock);
- trace_gfs2_glock_state_change(gl, state);
- state_change(gl, state);
- gh = find_first_waiter(gl);
+ if (!(ret & ~LM_OUT_ST_MASK)) {
+ unsigned state = ret & LM_OUT_ST_MASK;
+
+ trace_gfs2_glock_state_change(gl, state);
+ state_change(gl, state);
+ }
/* Demote to UN request arrived during demote to SH or DF */
if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
- state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
+ gl->gl_state != LM_ST_UNLOCKED &&
+ gl->gl_demote_state == LM_ST_UNLOCKED)
gl->gl_target = LM_ST_UNLOCKED;
/* Check for state != intended state */
- if (unlikely(state != gl->gl_target)) {
+ if (unlikely(gl->gl_state != gl->gl_target)) {
+ struct gfs2_holder *gh = find_first_waiter(gl);
+
if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
- /* move to back of queue and try next entry */
if (ret & LM_OUT_CANCELED) {
- if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
- list_move_tail(&gh->gh_list, &gl->gl_holders);
- gh = find_first_waiter(gl);
- gl->gl_target = gh->gh_state;
- goto retry;
+ list_del_init(&gh->gh_list);
+ trace_gfs2_glock_queue(gh, 0);
+ gfs2_holder_wake(gh);
+ gl->gl_target = gl->gl_state;
+ goto out;
}
/* Some error or failed "try lock" - report it */
if ((ret & LM_OUT_ERROR) ||
@@ -693,10 +597,9 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
goto out;
}
}
- switch(state) {
+ switch(gl->gl_state) {
/* Unlocked due to conversion deadlock, try again */
case LM_ST_UNLOCKED:
-retry:
do_xmote(gl, gh, gl->gl_target);
break;
/* Conversion fails, unlock and try again */
@@ -705,19 +608,24 @@ retry:
do_xmote(gl, gh, LM_ST_UNLOCKED);
break;
default: /* Everything else */
- fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
- gl->gl_target, state);
+ fs_err(gl->gl_name.ln_sbd,
+ "glock %u:%llu requested=%u ret=%u\n",
+ gl->gl_name.ln_type, gl->gl_name.ln_number,
+ gl->gl_req, ret);
GLOCK_BUG_ON(gl, 1);
}
- spin_unlock(&gl->gl_lockref.lock);
return;
}
/* Fast path - we got what we asked for */
- if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
+ if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
+ clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
gfs2_demote_wake(gl);
- if (state != LM_ST_UNLOCKED) {
+ }
+ if (gl->gl_state != LM_ST_UNLOCKED) {
if (glops->go_xmote_bh) {
+ int rv;
+
spin_unlock(&gl->gl_lockref.lock);
rv = glops->go_xmote_bh(gl);
spin_lock(&gl->gl_lockref.lock);
@@ -726,24 +634,11 @@ retry:
goto out;
}
}
- rv = do_promote(gl);
- if (rv == 2)
- goto out_locked;
+ do_promote(gl);
}
out:
- clear_bit(GLF_LOCK, &gl->gl_flags);
-out_locked:
- spin_unlock(&gl->gl_lockref.lock);
-}
-
-static bool is_system_glock(struct gfs2_glock *gl)
-{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
-
- if (gl == m_ip->i_gl)
- return true;
- return false;
+ if (!test_bit(GLF_CANCELING, &gl->gl_flags))
+ clear_bit(GLF_LOCK, &gl->gl_flags);
}
/**
@@ -754,127 +649,93 @@ static bool is_system_glock(struct gfs2_glock *gl)
*
*/
-static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
+static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh,
+ unsigned int target)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int ret;
- if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) &&
- gh && !(gh->gh_flags & LM_FLAG_NOEXP))
- return;
- lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
- LM_FLAG_PRIORITY);
+ /*
+ * When a filesystem is withdrawing, the remaining cluster nodes will
+ * take care of recovering the withdrawing node's journal. We only
+ * need to make sure that once we trigger remote recovery, we won't
+ * write to the shared block device anymore. This means that here,
+ *
+ * - no new writes to the filesystem must be triggered (->go_sync()).
+ *
+ * - any cached data should be discarded by calling ->go_inval(), dirty
+ * or not and journaled or unjournaled.
+ *
+ * - no more dlm locking operations should be issued (->lm_lock()).
+ */
+
GLOCK_BUG_ON(gl, gl->gl_state == target);
GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
- if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
- glops->go_inval) {
- /*
- * If another process is already doing the invalidate, let that
- * finish first. The glock state machine will get back to this
- * holder again later.
- */
- if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS,
- &gl->gl_flags))
- return;
- do_error(gl, 0); /* Fail queued try locks */
- }
- gl->gl_req = target;
- set_bit(GLF_BLOCKING, &gl->gl_flags);
- if ((gl->gl_req == LM_ST_UNLOCKED) ||
- (gl->gl_state == LM_ST_EXCLUSIVE) ||
- (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
- clear_bit(GLF_BLOCKING, &gl->gl_flags);
+
+ if (!glops->go_inval || !glops->go_sync)
+ goto skip_inval;
+
spin_unlock(&gl->gl_lockref.lock);
- if (glops->go_sync) {
+ if (!gfs2_withdrawn(sdp)) {
ret = glops->go_sync(gl);
- /* If we had a problem syncing (due to io errors or whatever,
- * we should not invalidate the metadata or tell dlm to
- * release the glock to other nodes.
- */
if (ret) {
if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
- fs_err(sdp, "Error %d syncing glock \n", ret);
+ fs_err(sdp, "Error %d syncing glock\n", ret);
gfs2_dump_glock(NULL, gl, true);
+ gfs2_withdraw(sdp);
}
- goto skip_inval;
}
}
- if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
- /*
- * The call to go_sync should have cleared out the ail list.
- * If there are still items, we have a problem. We ought to
- * withdraw, but we can't because the withdraw code also uses
- * glocks. Warn about the error, dump the glock, then fall
- * through and wait for logd to do the withdraw for us.
- */
- if ((atomic_read(&gl->gl_ail_count) != 0) &&
- (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
- gfs2_glock_assert_warn(gl,
- !atomic_read(&gl->gl_ail_count));
- gfs2_dump_glock(NULL, gl, true);
- }
+
+ if (target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED)
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
- clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
- }
+ spin_lock(&gl->gl_lockref.lock);
skip_inval:
- gfs2_glock_hold(gl);
- /*
- * Check for an error encountered since we called go_sync and go_inval.
- * If so, we can't withdraw from the glock code because the withdraw
- * code itself uses glocks (see function signal_our_withdraw) to
- * change the mount to read-only. Most importantly, we must not call
- * dlm to unlock the glock until the journal is in a known good state
- * (after journal replay) otherwise other nodes may use the object
- * (rgrp or dinode) and then later, journal replay will corrupt the
- * file system. The best we can do here is wait for the logd daemon
- * to see sd_log_error and withdraw, and in the meantime, requeue the
- * work for later.
- *
- * We make a special exception for some system glocks, such as the
- * system statfs inode glock, which needs to be granted before the
- * gfs2_quotad daemon can exit, and that exit needs to finish before
- * we can unmount the withdrawn file system.
- *
- * However, if we're just unlocking the lock (say, for unmount, when
- * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
- * then it's okay to tell dlm to unlock it.
- */
- if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
- gfs2_withdraw_delayed(sdp);
- if (glock_blocked_by_withdraw(gl) &&
- (target != LM_ST_UNLOCKED ||
- test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
- if (!is_system_glock(gl)) {
- gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
- goto out;
- } else {
- clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
- }
+ if (gfs2_withdrawn(sdp)) {
+ if (target != LM_ST_UNLOCKED)
+ target = LM_OUT_ERROR;
+ goto out;
}
- if (sdp->sd_lockstruct.ls_ops->lm_lock) {
- /* lock_dlm */
- ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
- if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
- target == LM_ST_UNLOCKED &&
- test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
- finish_xmote(gl, target);
- gfs2_glock_queue_work(gl, 0);
- } else if (ret) {
+ if (ls->ls_ops->lm_lock) {
+ set_bit(GLF_PENDING_REPLY, &gl->gl_flags);
+ spin_unlock(&gl->gl_lockref.lock);
+ ret = ls->ls_ops->lm_lock(gl, target, gh ? gh->gh_flags : 0);
+ spin_lock(&gl->gl_lockref.lock);
+
+ if (!ret) {
+ /* The operation will be completed asynchronously. */
+ gl->gl_lockref.count++;
+ return;
+ }
+ clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
+
+ if (ret == -ENODEV) {
+ /*
+ * The lockspace has been released and the lock has
+ * been unlocked implicitly.
+ */
+ if (target != LM_ST_UNLOCKED) {
+ target = LM_OUT_ERROR;
+ goto out;
+ }
+ } else {
fs_err(sdp, "lm_lock ret %d\n", ret);
GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
+ return;
}
- } else { /* lock_nolock */
- finish_xmote(gl, target);
- gfs2_glock_queue_work(gl, 0);
}
+
out:
- spin_lock(&gl->gl_lockref.lock);
+ /* Complete the operation now. */
+ finish_xmote(gl, target);
+ gl->gl_lockref.count++;
+ gfs2_glock_queue_work(gl, 0);
}
/**
@@ -888,16 +749,26 @@ static void run_queue(struct gfs2_glock *gl, const int nonblock)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
- struct gfs2_holder *gh = NULL;
- int ret;
+ struct gfs2_holder *gh;
- if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
+ if (test_bit(GLF_LOCK, &gl->gl_flags))
return;
+ set_bit(GLF_LOCK, &gl->gl_flags);
+ /*
+ * The GLF_DEMOTE_IN_PROGRESS flag is only set intermittently during
+ * locking operations. We have just started a locking operation by
+ * setting the GLF_LOCK flag, so the GLF_DEMOTE_IN_PROGRESS flag must
+ * be cleared.
+ */
GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
- if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
- gl->gl_demote_state != gl->gl_state) {
+ if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
+ if (gl->gl_demote_state == gl->gl_state) {
+ gfs2_demote_wake(gl);
+ goto promote;
+ }
+
if (find_first_holder(gl))
goto out_unlock;
if (nonblock)
@@ -905,34 +776,67 @@ __acquires(&gl->gl_lockref.lock)
set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
gl->gl_target = gl->gl_demote_state;
- } else {
- if (test_bit(GLF_DEMOTE, &gl->gl_flags))
- gfs2_demote_wake(gl);
- ret = do_promote(gl);
- if (ret == 0)
- goto out_unlock;
- if (ret == 2)
- goto out;
- gh = find_first_waiter(gl);
- gl->gl_target = gh->gh_state;
- if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
- do_error(gl, 0); /* Fail queued try locks */
+ do_xmote(gl, NULL, gl->gl_target);
+ return;
}
+
+promote:
+ do_promote(gl);
+ if (find_first_holder(gl))
+ goto out_unlock;
+ gh = find_first_waiter(gl);
+ if (!gh)
+ goto out_unlock;
+ if (nonblock)
+ goto out_sched;
+ gl->gl_target = gh->gh_state;
+ if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
+ do_error(gl, 0); /* Fail queued try locks */
do_xmote(gl, gh, gl->gl_target);
-out:
return;
out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
- smp_mb__after_atomic();
gl->gl_lockref.count++;
- __gfs2_glock_queue_work(gl, 0);
+ gfs2_glock_queue_work(gl, 0);
return;
out_unlock:
clear_bit(GLF_LOCK, &gl->gl_flags);
- smp_mb__after_atomic();
- return;
+}
+
+/**
+ * glock_set_object - set the gl_object field of a glock
+ * @gl: the glock
+ * @object: the object
+ */
+void glock_set_object(struct gfs2_glock *gl, void *object)
+{
+ void *prev_object;
+
+ spin_lock(&gl->gl_lockref.lock);
+ prev_object = gl->gl_object;
+ gl->gl_object = object;
+ spin_unlock(&gl->gl_lockref.lock);
+ if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL))
+ gfs2_dump_glock(NULL, gl, true);
+}
+
+/**
+ * glock_clear_object - clear the gl_object field of a glock
+ * @gl: the glock
+ * @object: object the glock currently points at
+ */
+void glock_clear_object(struct gfs2_glock *gl, void *object)
+{
+ void *prev_object;
+
+ spin_lock(&gl->gl_lockref.lock);
+ prev_object = gl->gl_object;
+ gl->gl_object = NULL;
+ spin_unlock(&gl->gl_lockref.lock);
+ if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object))
+ gfs2_dump_glock(NULL, gl, true);
}
void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
@@ -967,50 +871,76 @@ static void gfs2_glock_poke(struct gfs2_glock *gl)
gfs2_holder_uninit(&gh);
}
-static bool gfs2_try_evict(struct gfs2_glock *gl)
+static struct gfs2_inode *gfs2_grab_existing_inode(struct gfs2_glock *gl)
{
struct gfs2_inode *ip;
- bool evicted = false;
- /*
- * If there is contention on the iopen glock and we have an inode, try
- * to grab and release the inode so that it can be evicted. This will
- * allow the remote node to go ahead and delete the inode without us
- * having to do it, which will avoid rgrp glock thrashing.
- *
- * The remote node is likely still holding the corresponding inode
- * glock, so it will run before we get to verify that the delete has
- * happened below.
- */
spin_lock(&gl->gl_lockref.lock);
ip = gl->gl_object;
if (ip && !igrab(&ip->i_inode))
ip = NULL;
spin_unlock(&gl->gl_lockref.lock);
if (ip) {
- struct gfs2_glock *inode_gl = NULL;
+ wait_on_new_inode(&ip->i_inode);
+ if (is_bad_inode(&ip->i_inode)) {
+ iput(&ip->i_inode);
+ ip = NULL;
+ }
+ }
+ return ip;
+}
- gl->gl_no_formal_ino = ip->i_no_formal_ino;
- set_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
+static void gfs2_try_to_evict(struct gfs2_glock *gl)
+{
+ struct gfs2_inode *ip;
+
+ /*
+ * If there is contention on the iopen glock and we have an inode, try
+ * to grab and release the inode so that it can be evicted. The
+ * GLF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode
+ * should not be deleted locally. This will allow the remote node to
+ * go ahead and delete the inode without us having to do it, which will
+ * avoid rgrp glock thrashing.
+ *
+ * The remote node is likely still holding the corresponding inode
+ * glock, so it will run before we get to verify that the delete has
+ * happened below. (Verification is triggered by the call to
+ * gfs2_queue_verify_delete() in gfs2_evict_inode().)
+ */
+ ip = gfs2_grab_existing_inode(gl);
+ if (ip) {
+ set_bit(GLF_DEFER_DELETE, &gl->gl_flags);
d_prune_aliases(&ip->i_inode);
iput(&ip->i_inode);
+ clear_bit(GLF_DEFER_DELETE, &gl->gl_flags);
/* If the inode was evicted, gl->gl_object will now be NULL. */
- spin_lock(&gl->gl_lockref.lock);
- ip = gl->gl_object;
+ ip = gfs2_grab_existing_inode(gl);
if (ip) {
- inode_gl = ip->i_gl;
- lockref_get(&inode_gl->gl_lockref);
- clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
+ gfs2_glock_poke(ip->i_gl);
+ iput(&ip->i_inode);
}
- spin_unlock(&gl->gl_lockref.lock);
- if (inode_gl) {
- gfs2_glock_poke(inode_gl);
- gfs2_glock_put(inode_gl);
- }
- evicted = !ip;
}
- return evicted;
+}
+
+bool gfs2_queue_try_to_evict(struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
+ return false;
+ return !mod_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, 0);
+}
+
+bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later)
+{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ unsigned long delay;
+
+ if (test_and_set_bit(GLF_VERIFY_DELETE, &gl->gl_flags))
+ return false;
+ delay = later ? HZ + get_random_long() % (HZ * 9) : 0;
+ return queue_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, delay);
}
static void delete_work_func(struct work_struct *work)
@@ -1018,45 +948,34 @@ static void delete_work_func(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct inode *inode;
- u64 no_addr = gl->gl_name.ln_number;
+ bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
- spin_lock(&gl->gl_lockref.lock);
- clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
- spin_unlock(&gl->gl_lockref.lock);
+ /*
+ * Check for the GLF_VERIFY_DELETE above: this ensures that we won't
+ * immediately process GLF_VERIFY_DELETE work that the below call to
+ * gfs2_try_to_evict() queues.
+ */
- if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
- /*
- * If we can evict the inode, give the remote node trying to
- * delete the inode some time before verifying that the delete
- * has happened. Otherwise, if we cause contention on the inode glock
- * immediately, the remote node will think that we still have
- * the inode in use, and so it will give up waiting.
- *
- * If we can't evict the inode, signal to the remote node that
- * the inode is still in use. We'll later try to delete the
- * inode locally in gfs2_evict_inode.
- *
- * FIXME: We only need to verify that the remote node has
- * deleted the inode because nodes before this remote delete
- * rework won't cooperate. At a later time, when we no longer
- * care about compatibility with such nodes, we can skip this
- * step entirely.
- */
- if (gfs2_try_evict(gl)) {
- if (gfs2_queue_delete_work(gl, 5 * HZ))
+ if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
+ gfs2_try_to_evict(gl);
+
+ if (verify_delete) {
+ u64 no_addr = gl->gl_name.ln_number;
+ struct inode *inode;
+
+ inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
+ GFS2_BLKST_UNLINKED);
+ if (IS_ERR(inode)) {
+ if (PTR_ERR(inode) == -EAGAIN &&
+ !test_bit(SDF_KILL, &sdp->sd_flags) &&
+ gfs2_queue_verify_delete(gl, true))
return;
+ } else {
+ d_prune_aliases(inode);
+ iput(inode);
}
- goto out;
}
- inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
- GFS2_BLKST_UNLINKED);
- if (!IS_ERR_OR_NULL(inode)) {
- d_prune_aliases(inode);
- iput(inode);
- }
-out:
gfs2_glock_put(gl);
}
@@ -1066,43 +985,44 @@ static void glock_work_func(struct work_struct *work)
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
unsigned int drop_refs = 1;
- if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+ spin_lock(&gl->gl_lockref.lock);
+ if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags)) {
+ clear_bit(GLF_HAVE_REPLY, &gl->gl_flags);
finish_xmote(gl, gl->gl_reply);
drop_refs++;
}
- spin_lock(&gl->gl_lockref.lock);
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
- unsigned long holdtime, now = jiffies;
+ if (gl->gl_name.ln_type == LM_TYPE_INODE) {
+ unsigned long holdtime, now = jiffies;
- holdtime = gl->gl_tchange + gl->gl_hold_time;
- if (time_before(now, holdtime))
- delay = holdtime - now;
+ holdtime = gl->gl_tchange + gl->gl_hold_time;
+ if (time_before(now, holdtime))
+ delay = holdtime - now;
+ }
if (!delay) {
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
- gfs2_set_demote(gl);
+ gfs2_set_demote(GLF_DEMOTE, gl);
}
}
run_queue(gl, 0);
if (delay) {
/* Keep one glock reference for the work we requeue. */
drop_refs--;
- if (gl->gl_name.ln_type != LM_TYPE_INODE)
- delay = 0;
- __gfs2_glock_queue_work(gl, delay);
+ gfs2_glock_queue_work(gl, delay);
}
- /*
- * Drop the remaining glock references manually here. (Mind that
- * __gfs2_glock_queue_work depends on the lockref spinlock begin held
- * here as well.)
- */
+ /* Drop the remaining glock references manually. */
+ GLOCK_BUG_ON(gl, gl->gl_lockref.count < drop_refs);
gl->gl_lockref.count -= drop_refs;
if (!gl->gl_lockref.count) {
- __gfs2_glock_put(gl);
- return;
+ if (gl->gl_state == LM_ST_UNLOCKED) {
+ __gfs2_glock_put(gl);
+ return;
+ }
+ gfs2_glock_add_to_lru(gl);
}
spin_unlock(&gl->gl_lockref.lock);
}
@@ -1138,6 +1058,8 @@ again:
out:
rcu_read_unlock();
finish_wait(wq, &wait.wait);
+ if (gl)
+ gfs2_glock_remove_from_lru(gl);
return gl;
}
@@ -1158,51 +1080,51 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops, int create,
struct gfs2_glock **glp)
{
- struct super_block *s = sdp->sd_vfs;
struct lm_lockname name = { .ln_number = number,
.ln_type = glops->go_type,
.ln_sbd = sdp };
struct gfs2_glock *gl, *tmp;
struct address_space *mapping;
- struct kmem_cache *cachep;
- int ret = 0;
gl = find_insert_glock(&name, NULL);
- if (gl) {
- *glp = gl;
- return 0;
- }
+ if (gl)
+ goto found;
if (!create)
return -ENOENT;
- if (glops->go_flags & GLOF_ASPACE)
- cachep = gfs2_glock_aspace_cachep;
- else
- cachep = gfs2_glock_cachep;
- gl = kmem_cache_alloc(cachep, GFP_NOFS);
- if (!gl)
- return -ENOMEM;
-
+ if (glops->go_flags & GLOF_ASPACE) {
+ struct gfs2_glock_aspace *gla =
+ kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_NOFS);
+ if (!gla)
+ return -ENOMEM;
+ gl = &gla->glock;
+ } else {
+ gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS);
+ if (!gl)
+ return -ENOMEM;
+ }
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
+ gl->gl_ops = glops;
if (glops->go_flags & GLOF_LVB) {
gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
if (!gl->gl_lksb.sb_lvbptr) {
- kmem_cache_free(cachep, gl);
+ gfs2_glock_dealloc(&gl->gl_rcu);
return -ENOMEM;
}
}
atomic_inc(&sdp->sd_glock_disposal);
gl->gl_node.next = NULL;
- gl->gl_flags = glops->go_instantiate ? BIT(GLF_INSTANTIATE_NEEDED) : 0;
+ gl->gl_flags = BIT(GLF_INITIAL);
+ if (glops->go_instantiate)
+ gl->gl_flags |= BIT(GLF_INSTANTIATE_NEEDED);
gl->gl_name = name;
+ lockref_init(&gl->gl_lockref);
lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
- gl->gl_lockref.count = 1;
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
- gl->gl_ops = glops;
gl->gl_dstamp = 0;
preempt_disable();
/* We use the global stats to estimate the initial per-glock stats */
@@ -1219,56 +1141,52 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
mapping = gfs2_glock2aspace(gl);
if (mapping) {
+ gfp_t gfp_mask;
+
mapping->a_ops = &gfs2_meta_aops;
- mapping->host = s->s_bdev->bd_inode;
+ mapping->host = sdp->sd_inode;
mapping->flags = 0;
- mapping_set_gfp_mask(mapping, GFP_NOFS);
- mapping->private_data = NULL;
+ gfp_mask = mapping_gfp_mask(sdp->sd_inode->i_mapping);
+ mapping_set_gfp_mask(mapping, gfp_mask);
+ mapping->i_private_data = NULL;
mapping->writeback_index = 0;
}
tmp = find_insert_glock(&name, gl);
- if (!tmp) {
- *glp = gl;
- goto out;
- }
- if (IS_ERR(tmp)) {
- ret = PTR_ERR(tmp);
- goto out_free;
- }
- *glp = tmp;
+ if (tmp) {
+ gfs2_glock_dealloc(&gl->gl_rcu);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_kill_wait);
-out_free:
- kfree(gl->gl_lksb.sb_lvbptr);
- kmem_cache_free(cachep, gl);
- if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_glock_wait);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ gl = tmp;
+ }
-out:
- return ret;
+found:
+ *glp = gl;
+ return 0;
}
/**
- * gfs2_holder_init - initialize a struct gfs2_holder in the default way
+ * __gfs2_holder_init - initialize a struct gfs2_holder in the default way
* @gl: the glock
* @state: the state we're requesting
* @flags: the modifier flags
* @gh: the holder structure
- *
+ * @ip: caller's return address for debugging
*/
void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
struct gfs2_holder *gh, unsigned long ip)
{
INIT_LIST_HEAD(&gh->gh_list);
- gh->gh_gl = gl;
+ gh->gh_gl = gfs2_glock_hold(gl);
gh->gh_ip = ip;
gh->gh_owner_pid = get_pid(task_pid(current));
gh->gh_state = state;
gh->gh_flags = flags;
- gh->gh_error = 0;
gh->gh_iflags = 0;
- gfs2_glock_hold(gl);
}
/**
@@ -1317,6 +1235,25 @@ static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
}
/**
+ * gfs2_glock_holder_ready - holder is ready and its error code can be collected
+ * @gh: the glock holder
+ *
+ * Called when a glock holder no longer needs to be waited for because it is
+ * now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has
+ * failed (gh_error != 0).
+ */
+
+int gfs2_glock_holder_ready(struct gfs2_holder *gh)
+{
+ if (gh->gh_error || (gh->gh_flags & GL_SKIP))
+ return gh->gh_error;
+ gh->gh_error = gfs2_instantiate(gh);
+ if (gh->gh_error)
+ gfs2_glock_dq(gh);
+ return gh->gh_error;
+}
+
+/**
* gfs2_glock_wait - wait on a glock acquisition
* @gh: the glock holder
*
@@ -1330,7 +1267,7 @@ int gfs2_glock_wait(struct gfs2_holder *gh)
might_sleep();
wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
gfs2_glock_update_hold_time(gh->gh_gl, start_time);
- return gh->gh_error;
+ return gfs2_glock_holder_ready(gh);
}
static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
@@ -1358,7 +1295,6 @@ int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
int i, ret = 0, timeout = 0;
unsigned long start_time = jiffies;
- bool keep_waiting;
might_sleep();
/*
@@ -1368,58 +1304,38 @@ int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
for (i = 0; i < num_gh; i++)
timeout += ghs[i].gh_gl->gl_hold_time << 1;
-wait_for_dlm:
if (!wait_event_timeout(sdp->sd_async_glock_wait,
- !glocks_pending(num_gh, ghs), timeout))
+ !glocks_pending(num_gh, ghs), timeout)) {
ret = -ESTALE; /* request timed out. */
+ goto out;
+ }
- /*
- * If dlm granted all our requests, we need to adjust the glock
- * minimum hold time values according to how long we waited.
- *
- * If our request timed out, we need to repeatedly release any held
- * glocks we acquired thus far to allow dlm to acquire the remaining
- * glocks without deadlocking. We cannot currently cancel outstanding
- * glock acquisitions.
- *
- * The HIF_WAIT bit tells us which requests still need a response from
- * dlm.
- *
- * If dlm sent us any errors, we return the first error we find.
- */
- keep_waiting = false;
for (i = 0; i < num_gh; i++) {
- /* Skip holders we have already dequeued below. */
- if (!gfs2_holder_queued(&ghs[i]))
- continue;
- /* Skip holders with a pending DLM response. */
- if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
- keep_waiting = true;
- continue;
- }
+ struct gfs2_holder *gh = &ghs[i];
+ int ret2;
- if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) {
- if (ret == -ESTALE)
- gfs2_glock_dq(&ghs[i]);
- else
- gfs2_glock_update_hold_time(ghs[i].gh_gl,
- start_time);
+ if (test_bit(HIF_HOLDER, &gh->gh_iflags)) {
+ gfs2_glock_update_hold_time(gh->gh_gl,
+ start_time);
}
+ ret2 = gfs2_glock_holder_ready(gh);
if (!ret)
- ret = ghs[i].gh_error;
+ ret = ret2;
}
- if (keep_waiting)
- goto wait_for_dlm;
+out:
+ if (ret) {
+ for (i = 0; i < num_gh; i++) {
+ struct gfs2_holder *gh = &ghs[i];
- /*
- * At this point, we've either acquired all locks or released them all.
- */
+ gfs2_glock_dq(gh);
+ }
+ }
return ret;
}
/**
- * handle_callback - process a demote request
+ * request_demote - process a demote request
* @gl: the glock
* @state: the state the caller wants us to change to
* @delay: zero to demote immediately; otherwise pending demote
@@ -1429,13 +1345,10 @@ wait_for_dlm:
* practise: LM_ST_SHARED and LM_ST_UNLOCKED
*/
-static void handle_callback(struct gfs2_glock *gl, unsigned int state,
- unsigned long delay, bool remote)
+static void request_demote(struct gfs2_glock *gl, unsigned int state,
+ unsigned long delay, bool remote)
{
- if (delay)
- set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
- else
- gfs2_set_demote(gl);
+ gfs2_set_demote(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, gl);
if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
gl->gl_demote_state = state;
gl->gl_demote_time = jiffies;
@@ -1467,6 +1380,31 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
va_end(args);
}
+static bool gfs2_should_queue_trylock(struct gfs2_glock *gl,
+ struct gfs2_holder *gh)
+{
+ struct gfs2_holder *current_gh, *gh2;
+
+ current_gh = find_first_holder(gl);
+ if (current_gh && !may_grant(gl, current_gh, gh))
+ return false;
+
+ list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
+ if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
+ continue;
+ if (!(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
+ return false;
+ }
+ return true;
+}
+
+static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
+{
+ if (!(gh->gh_flags & GL_NOPID))
+ return true;
+ return !test_bit(HIF_HOLDER, &gh->gh_iflags);
+}
+
/**
* add_to_queue - Add a holder to the wait queue (but look for recursion)
* @gh: the holder structure to add
@@ -1478,65 +1416,35 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
*/
static inline void add_to_queue(struct gfs2_holder *gh)
-__releases(&gl->gl_lockref.lock)
-__acquires(&gl->gl_lockref.lock)
{
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct list_head *insert_pt = NULL;
struct gfs2_holder *gh2;
- int try_futile = 0;
GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
GLOCK_BUG_ON(gl, true);
- if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
- if (test_bit(GLF_LOCK, &gl->gl_flags)) {
- struct gfs2_holder *first_gh;
-
- first_gh = find_first_strong_holder(gl);
- try_futile = !may_grant(gl, first_gh, gh);
- }
- if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
- goto fail;
+ if ((gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
+ !gfs2_should_queue_trylock(gl, gh)) {
+ gh->gh_error = GLR_TRYFAILED;
+ gfs2_holder_wake(gh);
+ return;
}
list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
- if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
- (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK) &&
- !test_bit(HIF_MAY_DEMOTE, &gh2->gh_iflags)))
- goto trap_recursive;
- if (try_futile &&
- !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
-fail:
- gh->gh_error = GLR_TRYFAILED;
- gfs2_holder_wake(gh);
- return;
- }
- if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
+ if (likely(gh2->gh_owner_pid != gh->gh_owner_pid))
+ continue;
+ if (gh->gh_gl->gl_ops->go_type == LM_TYPE_FLOCK)
+ continue;
+ if (!pid_is_meaningful(gh2))
continue;
- if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
- insert_pt = &gh2->gh_list;
+ goto trap_recursive;
}
trace_gfs2_glock_queue(gh, 1);
gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
- if (likely(insert_pt == NULL)) {
- list_add_tail(&gh->gh_list, &gl->gl_holders);
- if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
- goto do_cancel;
- return;
- }
- list_add_tail(&gh->gh_list, insert_pt);
-do_cancel:
- gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
- if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
- spin_unlock(&gl->gl_lockref.lock);
- if (sdp->sd_lockstruct.ls_ops->lm_cancel)
- sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
- spin_lock(&gl->gl_lockref.lock);
- }
+ list_add_tail(&gh->gh_list, &gl->gl_holders);
return;
trap_recursive:
@@ -1564,25 +1472,44 @@ trap_recursive:
int gfs2_glock_nq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
- int error = 0;
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ int error;
- if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
+ if (gfs2_withdrawn(sdp))
return -EIO;
- if (test_bit(GLF_LRU, &gl->gl_flags))
- gfs2_glock_remove_from_lru(gl);
+ if (gh->gh_flags & GL_NOBLOCK) {
+ struct gfs2_holder *current_gh;
+ error = -ECHILD;
+ spin_lock(&gl->gl_lockref.lock);
+ if (find_last_waiter(gl))
+ goto unlock;
+ current_gh = find_first_holder(gl);
+ if (!may_grant(gl, current_gh, gh))
+ goto unlock;
+ set_bit(HIF_HOLDER, &gh->gh_iflags);
+ list_add_tail(&gh->gh_list, &gl->gl_holders);
+ trace_gfs2_promote(gh);
+ error = 0;
+unlock:
+ spin_unlock(&gl->gl_lockref.lock);
+ return error;
+ }
+
+ gh->gh_error = 0;
spin_lock(&gl->gl_lockref.lock);
add_to_queue(gh);
- if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
- test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
- set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+ if (unlikely((LM_FLAG_RECOVER & gh->gh_flags) &&
+ test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))) {
+ set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
gl->gl_lockref.count++;
- __gfs2_glock_queue_work(gl, 0);
+ gfs2_glock_queue_work(gl, 0);
}
run_queue(gl, 1);
spin_unlock(&gl->gl_lockref.lock);
+ error = 0;
if (!(gh->gh_flags & GL_ASYNC))
error = gfs2_glock_wait(gh);
@@ -1601,90 +1528,40 @@ int gfs2_glock_poll(struct gfs2_holder *gh)
return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
}
-static inline bool needs_demote(struct gfs2_glock *gl)
-{
- return (test_bit(GLF_DEMOTE, &gl->gl_flags) ||
- test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags));
-}
-
static void __gfs2_glock_dq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
unsigned delay = 0;
int fast_path = 0;
/*
- * This while loop is similar to function demote_incompat_holders:
- * If the glock is due to be demoted (which may be from another node
- * or even if this holder is GL_NOCACHE), the weak holders are
- * demoted as well, allowing the glock to be demoted.
+ * This holder should not be cached, so mark it for demote.
+ * Note: this should be done before the glock_needs_demote
+ * check below.
*/
- while (gh) {
- /*
- * If we're in the process of file system withdraw, we cannot
- * just dequeue any glocks until our journal is recovered, lest
- * we introduce file system corruption. We need two exceptions
- * to this rule: We need to allow unlocking of nondisk glocks
- * and the glock for our own journal that needs recovery.
- */
- if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
- glock_blocked_by_withdraw(gl) &&
- gh->gh_gl != sdp->sd_jinode_gl) {
- sdp->sd_glock_dqs_held++;
- spin_unlock(&gl->gl_lockref.lock);
- might_sleep();
- wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
- TASK_UNINTERRUPTIBLE);
- spin_lock(&gl->gl_lockref.lock);
- }
-
- /*
- * This holder should not be cached, so mark it for demote.
- * Note: this should be done before the check for needs_demote
- * below.
- */
- if (gh->gh_flags & GL_NOCACHE)
- handle_callback(gl, LM_ST_UNLOCKED, 0, false);
+ if (gh->gh_flags & GL_NOCACHE)
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
- list_del_init(&gh->gh_list);
- clear_bit(HIF_HOLDER, &gh->gh_iflags);
- trace_gfs2_glock_queue(gh, 0);
+ list_del_init(&gh->gh_list);
+ clear_bit(HIF_HOLDER, &gh->gh_iflags);
+ trace_gfs2_glock_queue(gh, 0);
- /*
- * If there hasn't been a demote request we are done.
- * (Let the remaining holders, if any, keep holding it.)
- */
- if (!needs_demote(gl)) {
- if (list_empty(&gl->gl_holders))
- fast_path = 1;
- break;
- }
- /*
- * If we have another strong holder (we cannot auto-demote)
- * we are done. It keeps holding it until it is done.
- */
- if (find_first_strong_holder(gl))
- break;
-
- /*
- * If we have a weak holder at the head of the list, it
- * (and all others like it) must be auto-demoted. If there
- * are no more weak holders, we exit the while loop.
- */
- gh = find_first_holder(gl);
+ /*
+ * If there hasn't been a demote request we are done.
+ * (Let the remaining holders, if any, keep holding it.)
+ */
+ if (!glock_needs_demote(gl)) {
+ if (list_empty(&gl->gl_holders))
+ fast_path = 1;
}
- if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
- gfs2_glock_add_to_lru(gl);
-
if (unlikely(!fast_path)) {
gl->gl_lockref.count++;
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
!test_bit(GLF_DEMOTE, &gl->gl_flags) &&
gl->gl_name.ln_type == LM_TYPE_INODE)
delay = gl->gl_hold_time;
- __gfs2_glock_queue_work(gl, delay);
+ gfs2_glock_queue_work(gl, delay);
}
}
@@ -1698,7 +1575,32 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
struct gfs2_glock *gl = gh->gh_gl;
spin_lock(&gl->gl_lockref.lock);
+ if (!gfs2_holder_queued(gh)) {
+ /*
+ * May have already been dequeued because the locking request
+ * was GL_ASYNC and it has failed in the meantime.
+ */
+ goto out;
+ }
+
+ if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
+ !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
+ test_bit(GLF_LOCK, &gl->gl_flags) &&
+ !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
+ !test_bit(GLF_CANCELING, &gl->gl_flags)) {
+ set_bit(GLF_CANCELING, &gl->gl_flags);
+ spin_unlock(&gl->gl_lockref.lock);
+ gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
+ wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
+ spin_lock(&gl->gl_lockref.lock);
+ clear_bit(GLF_CANCELING, &gl->gl_flags);
+ clear_bit(GLF_LOCK, &gl->gl_flags);
+ if (!gfs2_holder_queued(gh))
+ goto out;
+ }
+
__gfs2_glock_dq(gh);
+out:
spin_unlock(&gl->gl_lockref.lock);
}
@@ -1773,7 +1675,7 @@ static int glock_compare(const void *arg_a, const void *arg_b)
}
/**
- * nq_m_sync - synchonously acquire more than one glock in deadlock free order
+ * nq_m_sync - synchronously acquire more than one glock in deadlock free order
* @num_gh: the number of structures
* @ghs: an array of struct gfs2_holder structures
* @p: placeholder for the holder structure to pass back
@@ -1794,8 +1696,6 @@ static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
for (x = 0; x < num_gh; x++) {
- p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
-
error = gfs2_glock_nq(p[x]);
if (error) {
while (x--)
@@ -1812,7 +1712,6 @@ static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
* @num_gh: the number of structures
* @ghs: an array of struct gfs2_holder structures
*
- *
* Returns: 0 on success (all glocks acquired),
* errno on failure (no glocks acquired)
*/
@@ -1827,7 +1726,6 @@ int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
case 0:
return 0;
case 1:
- ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
return gfs2_glock_nq(ghs);
default:
if (num_gh <= 4)
@@ -1861,45 +1759,24 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
{
- struct gfs2_holder mock_gh = { .gh_gl = gl, .gh_state = state, };
unsigned long delay = 0;
- unsigned long holdtime;
- unsigned long now = jiffies;
gfs2_glock_hold(gl);
spin_lock(&gl->gl_lockref.lock);
- holdtime = gl->gl_tchange + gl->gl_hold_time;
if (!list_empty(&gl->gl_holders) &&
gl->gl_name.ln_type == LM_TYPE_INODE) {
+ unsigned long now = jiffies;
+ unsigned long holdtime;
+
+ holdtime = gl->gl_tchange + gl->gl_hold_time;
+
if (time_before(now, holdtime))
delay = holdtime - now;
- if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
+ if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags))
delay = gl->gl_hold_time;
}
- /*
- * Note 1: We cannot call demote_incompat_holders from handle_callback
- * or gfs2_set_demote due to recursion problems like: gfs2_glock_dq ->
- * handle_callback -> demote_incompat_holders -> gfs2_glock_dq
- * Plus, we only want to demote the holders if the request comes from
- * a remote cluster node because local holder conflicts are resolved
- * elsewhere.
- *
- * Note 2: if a remote node wants this glock in EX mode, lock_dlm will
- * request that we set our state to UNLOCKED. Here we mock up a holder
- * to make it look like someone wants the lock EX locally. Any SH
- * and DF requests should be able to share the lock without demoting.
- *
- * Note 3: We only want to demote the demoteable holders when there
- * are no more strong holders. The demoteable holders might as well
- * keep the glock until the last strong holder is done with it.
- */
- if (!find_first_strong_holder(gl)) {
- if (state == LM_ST_UNLOCKED)
- mock_gh.gh_state = LM_ST_EXCLUSIVE;
- demote_incompat_holders(gl, &mock_gh);
- }
- handle_callback(gl, state, delay, true);
- __gfs2_glock_queue_work(gl, delay);
+ request_demote(gl, state, delay, true);
+ gfs2_glock_queue_work(gl, delay);
spin_unlock(&gl->gl_lockref.lock);
}
@@ -1909,7 +1786,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
*
* Glocks are not frozen if (a) the result of the dlm operation is
* an error, (b) the locking operation was an unlock operation or
- * (c) if there is a "noexp" flagged request anywhere in the queue
+ * (c) if there is a "recover" flagged request anywhere in the queue
*
* Returns: 1 if freezing should occur, 0 otherwise
*/
@@ -1926,7 +1803,7 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
continue;
- if (LM_FLAG_NOEXP & gh->gh_flags)
+ if (LM_FLAG_RECOVER & gh->gh_flags)
return 0;
}
@@ -1947,19 +1824,20 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
spin_lock(&gl->gl_lockref.lock);
+ clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
gl->gl_reply = ret;
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
if (gfs2_should_freeze(gl)) {
- set_bit(GLF_FROZEN, &gl->gl_flags);
+ set_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
return;
}
}
gl->gl_lockref.count++;
- set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
- __gfs2_glock_queue_work(gl, 0);
+ set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
+ gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
@@ -1979,6 +1857,16 @@ static int glock_cmp(void *priv, const struct list_head *a,
return 0;
}
+static bool can_free_glock(struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ return !test_bit(GLF_LOCK, &gl->gl_flags) &&
+ !gl->gl_lockref.count &&
+ (!test_bit(GLF_LFLUSH, &gl->gl_flags) ||
+ test_bit(SDF_KILL, &sdp->sd_flags));
+}
+
/**
* gfs2_dispose_glock_lru - Demote a list of glocks
* @list: The list to dispose of
@@ -1993,37 +1881,38 @@ static int glock_cmp(void *priv, const struct list_head *a,
* private)
*/
-static void gfs2_dispose_glock_lru(struct list_head *list)
+static unsigned long gfs2_dispose_glock_lru(struct list_head *list)
__releases(&lru_lock)
__acquires(&lru_lock)
{
struct gfs2_glock *gl;
+ unsigned long freed = 0;
list_sort(NULL, list, glock_cmp);
while(!list_empty(list)) {
gl = list_first_entry(list, struct gfs2_glock, gl_lru);
- list_del_init(&gl->gl_lru);
- clear_bit(GLF_LRU, &gl->gl_flags);
if (!spin_trylock(&gl->gl_lockref.lock)) {
add_back_to_lru:
- list_add(&gl->gl_lru, &lru_list);
- set_bit(GLF_LRU, &gl->gl_flags);
- atomic_inc(&lru_count);
+ list_move(&gl->gl_lru, &lru_list);
continue;
}
- if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
+ if (!can_free_glock(gl)) {
spin_unlock(&gl->gl_lockref.lock);
goto add_back_to_lru;
}
+ list_del_init(&gl->gl_lru);
+ atomic_dec(&lru_count);
+ clear_bit(GLF_LRU, &gl->gl_flags);
+ freed++;
gl->gl_lockref.count++;
- if (demote_ok(gl))
- handle_callback(gl, LM_ST_UNLOCKED, 0, false);
- WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
- __gfs2_glock_queue_work(gl, 0);
+ if (gl->gl_state != LM_ST_UNLOCKED)
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
+ gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
cond_resched_lock(&lru_lock);
}
+ return freed;
}
/**
@@ -2035,30 +1924,21 @@ add_back_to_lru:
* gfs2_dispose_glock_lru() above.
*/
-static long gfs2_scan_glock_lru(int nr)
+static unsigned long gfs2_scan_glock_lru(unsigned long nr)
{
- struct gfs2_glock *gl;
- LIST_HEAD(skipped);
+ struct gfs2_glock *gl, *next;
LIST_HEAD(dispose);
- long freed = 0;
+ unsigned long freed = 0;
spin_lock(&lru_lock);
- while ((nr-- >= 0) && !list_empty(&lru_list)) {
- gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru);
-
- /* Test for being demotable */
- if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
+ list_for_each_entry_safe(gl, next, &lru_list, gl_lru) {
+ if (!nr--)
+ break;
+ if (can_free_glock(gl))
list_move(&gl->gl_lru, &dispose);
- atomic_dec(&lru_count);
- freed++;
- continue;
- }
-
- list_move(&gl->gl_lru, &skipped);
}
- list_splice(&skipped, &lru_list);
if (!list_empty(&dispose))
- gfs2_dispose_glock_lru(&dispose);
+ freed = gfs2_dispose_glock_lru(&dispose);
spin_unlock(&lru_lock);
return freed;
@@ -2078,11 +1958,7 @@ static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
return vfs_pressure_ratio(atomic_read(&lru_count));
}
-static struct shrinker glock_shrinker = {
- .seeks = DEFAULT_SEEKS,
- .count_objects = gfs2_glock_shrink_count,
- .scan_objects = gfs2_glock_shrink_scan,
-};
+static struct shrinker *glock_shrinker;
/**
* glock_hash_walk - Call a function for glock in a hash bucket
@@ -2115,37 +1991,21 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
rhashtable_walk_exit(&iter);
}
-bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
-{
- bool queued;
-
- spin_lock(&gl->gl_lockref.lock);
- queued = queue_delayed_work(gfs2_delete_workqueue,
- &gl->gl_delete, delay);
- if (queued)
- set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
- spin_unlock(&gl->gl_lockref.lock);
- return queued;
-}
-
void gfs2_cancel_delete_work(struct gfs2_glock *gl)
{
- if (cancel_delayed_work(&gl->gl_delete)) {
- clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+ clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags);
+ clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
+ if (cancel_delayed_work(&gl->gl_delete))
gfs2_glock_put(gl);
- }
-}
-
-bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
-{
- return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
}
static void flush_delete_work(struct gfs2_glock *gl)
{
if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
if (cancel_delayed_work(&gl->gl_delete)) {
- queue_delayed_work(gfs2_delete_workqueue,
+ queue_delayed_work(sdp->sd_delete_wq,
&gl->gl_delete, 0);
}
}
@@ -2154,7 +2014,7 @@ static void flush_delete_work(struct gfs2_glock *gl)
void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
{
glock_hash_walk(flush_delete_work, sdp);
- flush_workqueue(gfs2_delete_workqueue);
+ flush_workqueue(sdp->sd_delete_wq);
}
/**
@@ -2165,12 +2025,16 @@ void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
static void thaw_glock(struct gfs2_glock *gl)
{
- if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
+ if (!test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))
return;
if (!lockref_get_not_dead(&gl->gl_lockref))
return;
- set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+
+ gfs2_glock_remove_from_lru(gl);
+ spin_lock(&gl->gl_lockref.lock);
+ set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
gfs2_glock_queue_work(gl, 0);
+ spin_unlock(&gl->gl_lockref.lock);
}
/**
@@ -2187,8 +2051,8 @@ static void clear_glock(struct gfs2_glock *gl)
if (!__lockref_is_dead(&gl->gl_lockref)) {
gl->gl_lockref.count++;
if (gl->gl_state != LM_ST_UNLOCKED)
- handle_callback(gl, LM_ST_UNLOCKED, 0, false);
- __gfs2_glock_queue_work(gl, 0);
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
+ gfs2_glock_queue_work(gl, 0);
}
spin_unlock(&gl->gl_lockref.lock);
}
@@ -2216,6 +2080,28 @@ static void dump_glock_func(struct gfs2_glock *gl)
dump_glock(NULL, gl, true);
}
+static void withdraw_glock(struct gfs2_glock *gl)
+{
+ spin_lock(&gl->gl_lockref.lock);
+ if (!__lockref_is_dead(&gl->gl_lockref)) {
+ /*
+ * We don't want to write back any more dirty data. Unlock the
+ * remaining inode and resource group glocks; this will cause
+ * their ->go_inval() hooks to toss out all the remaining
+ * cached data, dirty or not.
+ */
+ if (gl->gl_ops->go_inval && gl->gl_state != LM_ST_UNLOCKED)
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
+ do_error(gl, LM_OUT_ERROR); /* remove pending waiters */
+ }
+ spin_unlock(&gl->gl_lockref.lock);
+}
+
+void gfs2_withdraw_glocks(struct gfs2_sbd *sdp)
+{
+ glock_hash_walk(withdraw_glock, sdp);
+}
+
/**
* gfs2_gl_hash_clear - Empty out the glock hash table
* @sdp: the filesystem
@@ -2225,28 +2111,31 @@ static void dump_glock_func(struct gfs2_glock *gl)
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{
+ unsigned long start = jiffies;
+ bool timed_out = false;
+
set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
- flush_workqueue(glock_workqueue);
+ flush_workqueue(sdp->sd_glock_wq);
glock_hash_walk(clear_glock, sdp);
- flush_workqueue(glock_workqueue);
- wait_event_timeout(sdp->sd_glock_wait,
- atomic_read(&sdp->sd_glock_disposal) == 0,
- HZ * 600);
- glock_hash_walk(dump_glock_func, sdp);
-}
-
-void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
-{
- struct gfs2_glock *gl = ip->i_gl;
- int ret;
-
- ret = gfs2_truncatei_resume(ip);
- gfs2_glock_assert_withdraw(gl, ret == 0);
+ flush_workqueue(sdp->sd_glock_wq);
- spin_lock(&gl->gl_lockref.lock);
- clear_bit(GLF_LOCK, &gl->gl_flags);
- run_queue(gl, 1);
- spin_unlock(&gl->gl_lockref.lock);
+ while (!timed_out) {
+ wait_event_timeout(sdp->sd_kill_wait,
+ !atomic_read(&sdp->sd_glock_disposal),
+ HZ * 60);
+ if (!atomic_read(&sdp->sd_glock_disposal))
+ break;
+ timed_out = time_after(jiffies, start + (HZ * 600));
+ fs_warn(sdp, "%u glocks left after %u seconds%s\n",
+ atomic_read(&sdp->sd_glock_disposal),
+ jiffies_to_msecs(jiffies - start) / 1000,
+ timed_out ? ":" : "; still waiting");
+ }
+ gfs2_lm_unmount(sdp);
+ gfs2_free_dead_glocks(sdp);
+ glock_hash_walk(dump_glock_func, sdp);
+ destroy_workqueue(sdp->sd_glock_wq);
+ sdp->sd_glock_wq = NULL;
}
static const char *state2str(unsigned state)
@@ -2271,12 +2160,10 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
*p++ = 't';
if (flags & LM_FLAG_TRY_1CB)
*p++ = 'T';
- if (flags & LM_FLAG_NOEXP)
+ if (flags & LM_FLAG_RECOVER)
*p++ = 'e';
if (flags & LM_FLAG_ANY)
*p++ = 'A';
- if (flags & LM_FLAG_PRIORITY)
- *p++ = 'p';
if (flags & LM_FLAG_NODE_SCOPE)
*p++ = 'n';
if (flags & GL_ASYNC)
@@ -2289,8 +2176,6 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
*p++ = 'H';
if (test_bit(HIF_WAIT, &iflags))
*p++ = 'W';
- if (test_bit(HIF_MAY_DEMOTE, &iflags))
- *p++ = 'D';
if (flags & GL_SKIP)
*p++ = 's';
*p = 0;
@@ -2308,19 +2193,24 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
const char *fs_id_buf)
{
- struct task_struct *gh_owner = NULL;
+ const char *comm = "(none)";
+ pid_t owner_pid = 0;
char flags_buf[32];
rcu_read_lock();
- if (gh->gh_owner_pid)
+ if (pid_is_meaningful(gh)) {
+ struct task_struct *gh_owner;
+
+ comm = "(ended)";
+ owner_pid = pid_nr(gh->gh_owner_pid);
gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
+ if (gh_owner)
+ comm = gh_owner->comm;
+ }
gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
fs_id_buf, state2str(gh->gh_state),
hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
- gh->gh_error,
- gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
- gh_owner ? gh_owner->comm : "(ended)",
- (void *)gh->gh_ip);
+ gh->gh_error, (long)owner_pid, comm, (void *)gh->gh_ip);
rcu_read_unlock();
}
@@ -2341,13 +2231,13 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'y';
if (test_bit(GLF_LFLUSH, gflags))
*p++ = 'f';
- if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
- *p++ = 'i';
- if (test_bit(GLF_REPLY_PENDING, gflags))
+ if (test_bit(GLF_PENDING_REPLY, gflags))
+ *p++ = 'R';
+ if (test_bit(GLF_HAVE_REPLY, gflags))
*p++ = 'r';
if (test_bit(GLF_INITIAL, gflags))
- *p++ = 'I';
- if (test_bit(GLF_FROZEN, gflags))
+ *p++ = 'a';
+ if (test_bit(GLF_HAVE_FROZEN_REPLY, gflags))
*p++ = 'F';
if (!list_empty(&gl->gl_holders))
*p++ = 'q';
@@ -2357,14 +2247,18 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'o';
if (test_bit(GLF_BLOCKING, gflags))
*p++ = 'b';
- if (test_bit(GLF_PENDING_DELETE, gflags))
- *p++ = 'P';
- if (test_bit(GLF_FREEING, gflags))
- *p++ = 'x';
if (test_bit(GLF_INSTANTIATE_NEEDED, gflags))
*p++ = 'n';
if (test_bit(GLF_INSTANTIATE_IN_PROG, gflags))
*p++ = 'N';
+ if (test_bit(GLF_TRY_TO_EVICT, gflags))
+ *p++ = 'e';
+ if (test_bit(GLF_VERIFY_DELETE, gflags))
+ *p++ = 'E';
+ if (test_bit(GLF_DEFER_DELETE, gflags))
+ *p++ = 's';
+ if (test_bit(GLF_CANCELING, gflags))
+ *p++ = 'C';
*p = 0;
return buf;
}
@@ -2508,28 +2402,16 @@ int __init gfs2_glock_init(void)
if (ret < 0)
return ret;
- glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
- WQ_HIGHPRI | WQ_FREEZABLE, 0);
- if (!glock_workqueue) {
- rhashtable_destroy(&gl_hash_table);
- return -ENOMEM;
- }
- gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
- WQ_MEM_RECLAIM | WQ_FREEZABLE,
- 0);
- if (!gfs2_delete_workqueue) {
- destroy_workqueue(glock_workqueue);
+ glock_shrinker = shrinker_alloc(0, "gfs2-glock");
+ if (!glock_shrinker) {
rhashtable_destroy(&gl_hash_table);
return -ENOMEM;
}
- ret = register_shrinker(&glock_shrinker);
- if (ret) {
- destroy_workqueue(gfs2_delete_workqueue);
- destroy_workqueue(glock_workqueue);
- rhashtable_destroy(&gl_hash_table);
- return ret;
- }
+ glock_shrinker->count_objects = gfs2_glock_shrink_count;
+ glock_shrinker->scan_objects = gfs2_glock_shrink_scan;
+
+ shrinker_register(glock_shrinker);
for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
init_waitqueue_head(glock_wait_table + i);
@@ -2539,10 +2421,8 @@ int __init gfs2_glock_init(void)
void gfs2_glock_exit(void)
{
- unregister_shrinker(&glock_shrinker);
+ shrinker_free(glock_shrinker);
rhashtable_destroy(&gl_hash_table);
- destroy_workqueue(glock_workqueue);
- destroy_workqueue(gfs2_delete_workqueue);
}
static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
@@ -2552,8 +2432,7 @@ static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
if (gl) {
if (n == 0)
return;
- if (!lockref_put_not_zero(&gl->gl_lockref))
- gfs2_glock_queue_put(gl);
+ gfs2_glock_put_async(gl);
}
for (;;) {
gl = rhashtable_walk_next(&gi->hti);
@@ -2735,6 +2614,168 @@ static const struct file_operations gfs2_glstats_fops = {
.release = gfs2_glocks_release,
};
+struct gfs2_glockfd_iter {
+ struct super_block *sb;
+ unsigned int tgid;
+ struct task_struct *task;
+ unsigned int fd;
+ struct file *file;
+};
+
+static struct task_struct *gfs2_glockfd_next_task(struct gfs2_glockfd_iter *i)
+{
+ struct pid_namespace *ns = task_active_pid_ns(current);
+ struct pid *pid;
+
+ if (i->task)
+ put_task_struct(i->task);
+
+ rcu_read_lock();
+retry:
+ i->task = NULL;
+ pid = find_ge_pid(i->tgid, ns);
+ if (pid) {
+ i->tgid = pid_nr_ns(pid, ns);
+ i->task = pid_task(pid, PIDTYPE_TGID);
+ if (!i->task) {
+ i->tgid++;
+ goto retry;
+ }
+ get_task_struct(i->task);
+ }
+ rcu_read_unlock();
+ return i->task;
+}
+
+static struct file *gfs2_glockfd_next_file(struct gfs2_glockfd_iter *i)
+{
+ if (i->file) {
+ fput(i->file);
+ i->file = NULL;
+ }
+
+ for(;; i->fd++) {
+ i->file = fget_task_next(i->task, &i->fd);
+ if (!i->file) {
+ i->fd = 0;
+ break;
+ }
+
+ if (file_inode(i->file)->i_sb == i->sb)
+ break;
+
+ fput(i->file);
+ }
+ return i->file;
+}
+
+static void *gfs2_glockfd_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct gfs2_glockfd_iter *i = seq->private;
+
+ if (*pos)
+ return NULL;
+ while (gfs2_glockfd_next_task(i)) {
+ if (gfs2_glockfd_next_file(i))
+ return i;
+ i->tgid++;
+ }
+ return NULL;
+}
+
+static void *gfs2_glockfd_seq_next(struct seq_file *seq, void *iter_ptr,
+ loff_t *pos)
+{
+ struct gfs2_glockfd_iter *i = seq->private;
+
+ (*pos)++;
+ i->fd++;
+ do {
+ if (gfs2_glockfd_next_file(i))
+ return i;
+ i->tgid++;
+ } while (gfs2_glockfd_next_task(i));
+ return NULL;
+}
+
+static void gfs2_glockfd_seq_stop(struct seq_file *seq, void *iter_ptr)
+{
+ struct gfs2_glockfd_iter *i = seq->private;
+
+ if (i->file)
+ fput(i->file);
+ if (i->task)
+ put_task_struct(i->task);
+}
+
+static void gfs2_glockfd_seq_show_flock(struct seq_file *seq,
+ struct gfs2_glockfd_iter *i)
+{
+ struct gfs2_file *fp = i->file->private_data;
+ struct gfs2_holder *fl_gh = &fp->f_fl_gh;
+ struct lm_lockname gl_name = { .ln_type = LM_TYPE_RESERVED };
+
+ if (!READ_ONCE(fl_gh->gh_gl))
+ return;
+
+ spin_lock(&i->file->f_lock);
+ if (gfs2_holder_initialized(fl_gh))
+ gl_name = fl_gh->gh_gl->gl_name;
+ spin_unlock(&i->file->f_lock);
+
+ if (gl_name.ln_type != LM_TYPE_RESERVED) {
+ seq_printf(seq, "%d %u %u/%llx\n",
+ i->tgid, i->fd, gl_name.ln_type,
+ (unsigned long long)gl_name.ln_number);
+ }
+}
+
+static int gfs2_glockfd_seq_show(struct seq_file *seq, void *iter_ptr)
+{
+ struct gfs2_glockfd_iter *i = seq->private;
+ struct inode *inode = file_inode(i->file);
+ struct gfs2_glock *gl;
+
+ inode_lock_shared(inode);
+ gl = GFS2_I(inode)->i_iopen_gh.gh_gl;
+ if (gl) {
+ seq_printf(seq, "%d %u %u/%llx\n",
+ i->tgid, i->fd, gl->gl_name.ln_type,
+ (unsigned long long)gl->gl_name.ln_number);
+ }
+ gfs2_glockfd_seq_show_flock(seq, i);
+ inode_unlock_shared(inode);
+ return 0;
+}
+
+static const struct seq_operations gfs2_glockfd_seq_ops = {
+ .start = gfs2_glockfd_seq_start,
+ .next = gfs2_glockfd_seq_next,
+ .stop = gfs2_glockfd_seq_stop,
+ .show = gfs2_glockfd_seq_show,
+};
+
+static int gfs2_glockfd_open(struct inode *inode, struct file *file)
+{
+ struct gfs2_glockfd_iter *i;
+ struct gfs2_sbd *sdp = inode->i_private;
+
+ i = __seq_open_private(file, &gfs2_glockfd_seq_ops,
+ sizeof(struct gfs2_glockfd_iter));
+ if (!i)
+ return -ENOMEM;
+ i->sb = sdp->sd_vfs;
+ return 0;
+}
+
+static const struct file_operations gfs2_glockfd_fops = {
+ .owner = THIS_MODULE,
+ .open = gfs2_glockfd_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats);
void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
@@ -2744,6 +2785,9 @@ void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
&gfs2_glocks_fops);
+ debugfs_create_file("glockfd", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
+ &gfs2_glockfd_fops);
+
debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
&gfs2_glstats_fops);
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 4f8642301801..55d5985f32a0 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -58,23 +58,19 @@ enum {
* LM_FLAG_TRY_1CB
* Send one blocking callback if TRY is set and the lock is not granted.
*
- * LM_FLAG_NOEXP
+ * LM_FLAG_RECOVER
* GFS sets this flag on lock requests it makes while doing journal recovery.
- * These special requests should not be blocked due to the recovery like
- * ordinary locks would be.
+ * While ordinary requests are blocked until the end of recovery, requests
+ * with this flag set do proceed.
*
* LM_FLAG_ANY
* A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
* also be granted in SHARED. The preferred state is whichever is compatible
* with other granted locks, or the specified state if no other locks exist.
*
- * LM_FLAG_PRIORITY
- * Override fairness considerations. Suppose a lock is held in a shared state
- * and there is a pending request for the deferred state. A shared lock
- * request with the priority flag would be allowed to bypass the deferred
- * request and directly join the other shared lock. A shared lock request
- * without the priority flag might be forced to wait until the deferred
- * requested had acquired and released the lock.
+ * In addition, when a lock is already held in EX mode locally, a SHARED or
+ * DEFERRED mode request with the LM_FLAG_ANY flag set will be granted.
+ * (The LM_FLAG_ANY flag is only use for SHARED mode requests currently.)
*
* LM_FLAG_NODE_SCOPE
* This holder agrees to share the lock within this node. In other words,
@@ -84,14 +80,15 @@ enum {
#define LM_FLAG_TRY 0x0001
#define LM_FLAG_TRY_1CB 0x0002
-#define LM_FLAG_NOEXP 0x0004
+#define LM_FLAG_RECOVER 0x0004
#define LM_FLAG_ANY 0x0008
-#define LM_FLAG_PRIORITY 0x0010
#define LM_FLAG_NODE_SCOPE 0x0020
#define GL_ASYNC 0x0040
#define GL_EXACT 0x0080
#define GL_SKIP 0x0100
+#define GL_NOPID 0x0200
#define GL_NOCACHE 0x0400
+#define GL_NOBLOCK 0x0800
/*
* lm_async_cb return flags
@@ -99,12 +96,22 @@ enum {
* LM_OUT_ST_MASK
* Masks the lower two bits of lock state in the returned value.
*
+ * LM_OUT_TRY_AGAIN
+ * The trylock request failed.
+ *
+ * LM_OUT_DEADLOCK
+ * The lock request failed because it would deadlock.
+ *
* LM_OUT_CANCELED
* The lock request was canceled.
*
+ * LM_OUT_ERROR
+ * The lock request timed out or failed.
*/
#define LM_OUT_ST_MASK 0x00000003
+#define LM_OUT_TRY_AGAIN 0x00000020
+#define LM_OUT_DEADLOCK 0x00000010
#define LM_OUT_CANCELED 0x00000008
#define LM_OUT_ERROR 0x00000004
@@ -129,7 +136,7 @@ struct lm_lockops {
void (*lm_first_done) (struct gfs2_sbd *sdp);
void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid,
unsigned int result);
- void (*lm_unmount) (struct gfs2_sbd *sdp);
+ void (*lm_unmount) (struct gfs2_sbd *sdp, bool clean);
void (*lm_withdraw) (struct gfs2_sbd *sdp);
void (*lm_put_lock) (struct gfs2_glock *gl);
int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
@@ -138,7 +145,11 @@ struct lm_lockops {
const match_table_t *lm_tokens;
};
-extern struct workqueue_struct *gfs2_delete_workqueue;
+struct gfs2_glock_aspace {
+ struct gfs2_glock glock;
+ struct address_space mapping;
+};
+
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
{
struct gfs2_holder *gh;
@@ -150,8 +161,6 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
break;
- if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags))
- continue;
if (gh->gh_owner_pid == pid)
goto out;
}
@@ -162,61 +171,50 @@ out:
return gh;
}
-static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
-{
- return gl->gl_state == LM_ST_EXCLUSIVE;
-}
-
-static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
-{
- return gl->gl_state == LM_ST_DEFERRED;
-}
-
-static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
-{
- return gl->gl_state == LM_ST_SHARED;
-}
-
static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
{
- if (gl->gl_ops->go_flags & GLOF_ASPACE)
- return (struct address_space *)(gl + 1);
+ if (gl->gl_ops->go_flags & GLOF_ASPACE) {
+ struct gfs2_glock_aspace *gla =
+ container_of(gl, struct gfs2_glock_aspace, glock);
+ return &gla->mapping;
+ }
return NULL;
}
-extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
- const struct gfs2_glock_operations *glops,
- int create, struct gfs2_glock **glp);
-extern void gfs2_glock_hold(struct gfs2_glock *gl);
-extern void gfs2_glock_put(struct gfs2_glock *gl);
-extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
+int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
+ const struct gfs2_glock_operations *glops,
+ int create, struct gfs2_glock **glp);
+struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl);
+void gfs2_glock_put(struct gfs2_glock *gl);
+void gfs2_glock_put_async(struct gfs2_glock *gl);
-extern void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
- u16 flags, struct gfs2_holder *gh,
- unsigned long ip);
+void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
+ u16 flags, struct gfs2_holder *gh,
+ unsigned long ip);
static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
u16 flags, struct gfs2_holder *gh) {
__gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
}
-extern void gfs2_holder_reinit(unsigned int state, u16 flags,
- struct gfs2_holder *gh);
-extern void gfs2_holder_uninit(struct gfs2_holder *gh);
-extern int gfs2_glock_nq(struct gfs2_holder *gh);
-extern int gfs2_glock_poll(struct gfs2_holder *gh);
-extern int gfs2_instantiate(struct gfs2_holder *gh);
-extern int gfs2_glock_wait(struct gfs2_holder *gh);
-extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
-extern void gfs2_glock_dq(struct gfs2_holder *gh);
-extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
-extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
-extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
- const struct gfs2_glock_operations *glops,
- unsigned int state, u16 flags,
- struct gfs2_holder *gh);
-extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
-extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
-extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
+void gfs2_holder_reinit(unsigned int state, u16 flags,
+ struct gfs2_holder *gh);
+void gfs2_holder_uninit(struct gfs2_holder *gh);
+int gfs2_glock_nq(struct gfs2_holder *gh);
+int gfs2_glock_poll(struct gfs2_holder *gh);
+int gfs2_instantiate(struct gfs2_holder *gh);
+int gfs2_glock_holder_ready(struct gfs2_holder *gh);
+int gfs2_glock_wait(struct gfs2_holder *gh);
+int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
+void gfs2_glock_dq(struct gfs2_holder *gh);
+void gfs2_glock_dq_wait(struct gfs2_holder *gh);
+void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
+int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
+ const struct gfs2_glock_operations *glops,
+ unsigned int state, u16 flags,
+ struct gfs2_holder *gh);
+int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
+void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
+void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
bool fsid);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \
gfs2_dump_glock(NULL, gl, true); \
@@ -230,7 +228,7 @@ extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
while (0)
-extern __printf(2, 3)
+__printf(2, 3)
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
/**
@@ -258,25 +256,28 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
return error;
}
-extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
-extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
-extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
-extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
-extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
-extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
-extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
-extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
-extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
-extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
-extern void gfs2_glock_free(struct gfs2_glock *gl);
-
-extern int __init gfs2_glock_init(void);
-extern void gfs2_glock_exit(void);
-
-extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
-extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
-extern void gfs2_register_debugfs(void);
-extern void gfs2_unregister_debugfs(void);
+void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
+void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
+bool gfs2_queue_try_to_evict(struct gfs2_glock *gl);
+bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later);
+void gfs2_cancel_delete_work(struct gfs2_glock *gl);
+void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
+void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
+void gfs2_withdraw_glocks(struct gfs2_sbd *sdp);
+void gfs2_glock_thaw(struct gfs2_sbd *sdp);
+void gfs2_glock_free(struct gfs2_glock *gl);
+void gfs2_glock_free_later(struct gfs2_glock *gl);
+
+int __init gfs2_glock_init(void);
+void gfs2_glock_exit(void);
+
+void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
+void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
+void gfs2_register_debugfs(void);
+void gfs2_unregister_debugfs(void);
+
+void glock_set_object(struct gfs2_glock *gl, void *object);
+void glock_clear_object(struct gfs2_glock *gl, void *object);
extern const struct lm_lockops gfs2_dlm_ops;
@@ -295,65 +296,13 @@ static inline bool gfs2_holder_queued(struct gfs2_holder *gh)
return !list_empty(&gh->gh_list);
}
-/**
- * glock_set_object - set the gl_object field of a glock
- * @gl: the glock
- * @object: the object
- */
-static inline void glock_set_object(struct gfs2_glock *gl, void *object)
-{
- spin_lock(&gl->gl_lockref.lock);
- if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL))
- gfs2_dump_glock(NULL, gl, true);
- gl->gl_object = object;
- spin_unlock(&gl->gl_lockref.lock);
-}
+void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
+bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
-/**
- * glock_clear_object - clear the gl_object field of a glock
- * @gl: the glock
- * @object: the object
- *
- * I'd love to similarly add this:
- * else if (gfs2_assert_warn(gl->gl_sbd, gl->gl_object == object))
- * gfs2_dump_glock(NULL, gl, true);
- * Unfortunately, that's not possible because as soon as gfs2_delete_inode
- * frees the block in the rgrp, another process can reassign it for an I_NEW
- * inode in gfs2_create_inode because that calls new_inode, not gfs2_iget.
- * That means gfs2_delete_inode may subsequently try to call this function
- * for a glock that's already pointing to a brand new inode. If we clear the
- * new inode's gl_object, we'll introduce metadata corruption. Function
- * gfs2_delete_inode calls clear_inode which calls gfs2_clear_inode which also
- * tries to clear gl_object, so it's more than just gfs2_delete_inode.
- *
- */
-static inline void glock_clear_object(struct gfs2_glock *gl, void *object)
-{
- spin_lock(&gl->gl_lockref.lock);
- if (gl->gl_object == object)
- gl->gl_object = NULL;
- spin_unlock(&gl->gl_lockref.lock);
-}
-
-static inline void gfs2_holder_allow_demote(struct gfs2_holder *gh)
+static inline bool glock_needs_demote(struct gfs2_glock *gl)
{
- struct gfs2_glock *gl = gh->gh_gl;
-
- spin_lock(&gl->gl_lockref.lock);
- set_bit(HIF_MAY_DEMOTE, &gh->gh_iflags);
- spin_unlock(&gl->gl_lockref.lock);
-}
-
-static inline void gfs2_holder_disallow_demote(struct gfs2_holder *gh)
-{
- struct gfs2_glock *gl = gh->gh_gl;
-
- spin_lock(&gl->gl_lockref.lock);
- clear_bit(HIF_MAY_DEMOTE, &gh->gh_iflags);
- spin_unlock(&gl->gl_lockref.lock);
+ return (test_bit(GLF_DEMOTE, &gl->gl_flags) ||
+ test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags));
}
-extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
-extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
-
#endif /* __GLOCK_DOT_H__ */
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 650ad77c4d0b..2173ccf5034b 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -11,6 +11,7 @@
#include <linux/bio.h>
#include <linux/posix_acl.h>
#include <linux/security.h>
+#include <linux/log2.h>
#include "gfs2.h"
#include "incore.h"
@@ -29,8 +30,6 @@
struct workqueue_struct *gfs2_freeze_wq;
-extern struct workqueue_struct *gfs2_control_wq;
-
static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
@@ -39,12 +38,12 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
"AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
"state 0x%lx\n",
bh, (unsigned long long)bh->b_blocknr, bh->b_state,
- bh->b_page->mapping, bh->b_page->flags);
+ bh->b_folio->mapping, bh->b_folio->flags.f);
fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
gl->gl_name.ln_type, gl->gl_name.ln_number,
gfs2_glock2aspace(gl));
gfs2_lm(sdp, "AIL error\n");
- gfs2_withdraw_delayed(sdp);
+ gfs2_withdraw(sdp);
}
/**
@@ -90,7 +89,7 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct gfs2_trans tr;
unsigned int revokes;
- int ret;
+ int ret = 0;
revokes = atomic_read(&gl->gl_ail_count);
@@ -124,15 +123,18 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
memset(&tr, 0, sizeof(tr));
set_bit(TR_ONSTACK, &tr.tr_flags);
ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
- if (ret)
+ if (ret) {
+ fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret);
goto flush;
+ }
__gfs2_ail_flush(gl, 0, revokes);
gfs2_trans_end(sdp);
flush:
- gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
- GFS2_LFC_AIL_EMPTY_GL);
- return 0;
+ if (!ret)
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_AIL_EMPTY_GL);
+ return ret;
}
void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
@@ -162,7 +164,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct address_space *metamapping = &sdp->sd_aspace;
+ struct address_space *metamapping = gfs2_aspace(sdp);
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
const unsigned bsize = sdp->sd_sb.sb_bsize;
loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
@@ -193,7 +195,7 @@ static int rgrp_go_sync(struct gfs2_glock *gl)
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
int error;
- if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
+ if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
return 0;
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
@@ -219,19 +221,22 @@ static int rgrp_go_sync(struct gfs2_glock *gl)
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct address_space *mapping = &sdp->sd_aspace;
+ struct address_space *mapping = gfs2_aspace(sdp);
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
const unsigned bsize = sdp->sd_sb.sb_bsize;
- loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
- loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
+ loff_t start, end;
+ if (!rgd)
+ return;
+ start = (rgd->rd_addr * bsize) & PAGE_MASK;
+ end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
gfs2_rgrp_brelse(rgd);
WARN_ON_ONCE(!(flags & DIO_METADATA));
+ gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
truncate_inode_pages_range(mapping, start, end);
- set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
}
-static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
+static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
const char *fs_id_buf)
{
struct gfs2_rgrpd *rgd = gl->gl_object;
@@ -324,7 +329,9 @@ static int inode_go_sync(struct gfs2_glock *gl)
ret = gfs2_inode_metasync(gl);
if (!error)
error = ret;
- gfs2_ail_empty_gl(gl);
+ ret = gfs2_ail_empty_gl(gl);
+ if (!error)
+ error = ret;
/*
* Writeback of the data mapping may cause the dirty flag to be set
* so we have to clear it again here.
@@ -352,6 +359,8 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
struct gfs2_inode *ip = gfs2_glock2inode(gl);
+ gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
+
if (flags & DIO_METADATA) {
struct address_space *mapping = gfs2_glock2aspace(gl);
truncate_inode_pages(mapping, 0);
@@ -375,61 +384,51 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
gfs2_clear_glop_pending(ip);
}
-/**
- * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
- * @gl: the glock
- *
- * Returns: 1 if it's ok
- */
-
-static int inode_go_demote_ok(const struct gfs2_glock *gl)
-{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
-
- if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
- return 0;
-
- return 1;
-}
-
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
const struct gfs2_dinode *str = buf;
- struct timespec64 atime;
+ struct timespec64 atime, iatime;
u16 height, depth;
umode_t mode = be32_to_cpu(str->di_mode);
- bool is_new = ip->i_inode.i_state & I_NEW;
+ struct inode *inode = &ip->i_inode;
+ bool is_new = inode_state_read_once(inode) & I_NEW;
- if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
- goto corrupt;
- if (unlikely(!is_new && inode_wrong_type(&ip->i_inode, mode)))
- goto corrupt;
+ if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
+ if (unlikely(!is_new && inode_wrong_type(inode, mode))) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
- ip->i_inode.i_mode = mode;
+ inode->i_mode = mode;
if (is_new) {
- ip->i_inode.i_rdev = 0;
+ inode->i_rdev = 0;
switch (mode & S_IFMT) {
case S_IFBLK:
case S_IFCHR:
- ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
- be32_to_cpu(str->di_minor));
+ inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
+ be32_to_cpu(str->di_minor));
break;
}
}
- i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
- i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
- set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
- i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
- gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
+ i_uid_write(inode, be32_to_cpu(str->di_uid));
+ i_gid_write(inode, be32_to_cpu(str->di_gid));
+ set_nlink(inode, be32_to_cpu(str->di_nlink));
+ i_size_write(inode, be64_to_cpu(str->di_size));
+ gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
atime.tv_sec = be64_to_cpu(str->di_atime);
atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
- if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
- ip->i_inode.i_atime = atime;
- ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
- ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
- ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
- ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
+ iatime = inode_get_atime(inode);
+ if (timespec64_compare(&iatime, &atime) < 0)
+ inode_set_atime_to_ts(inode, atime);
+ inode_set_mtime(inode, be64_to_cpu(str->di_mtime),
+ be32_to_cpu(str->di_mtime_nsec));
+ inode_set_ctime(inode, be64_to_cpu(str->di_ctime),
+ be32_to_cpu(str->di_ctime_nsec));
ip->i_goal = be64_to_cpu(str->di_goal_meta);
ip->i_generation = be64_to_cpu(str->di_generation);
@@ -437,25 +436,35 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
ip->i_diskflags = be32_to_cpu(str->di_flags);
ip->i_eattr = be64_to_cpu(str->di_eattr);
/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
- gfs2_set_inode_flags(&ip->i_inode);
+ gfs2_set_inode_flags(inode);
height = be16_to_cpu(str->di_height);
- if (unlikely(height > GFS2_MAX_META_HEIGHT))
- goto corrupt;
+ if (unlikely(height > sdp->sd_max_height)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
ip->i_height = (u8)height;
depth = be16_to_cpu(str->di_depth);
- if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
- goto corrupt;
+ if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
+ if ((ip->i_diskflags & GFS2_DIF_EXHASH) &&
+ depth < ilog2(sdp->sd_hash_ptrs)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
ip->i_depth = (u8)depth;
ip->i_entries = be32_to_cpu(str->di_entries);
- if (S_ISREG(ip->i_inode.i_mode))
- gfs2_set_aops(&ip->i_inode);
+ if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
+ if (S_ISREG(inode->i_mode))
+ gfs2_set_aops(inode);
return 0;
-corrupt:
- gfs2_consist_inode(ip);
- return -EIO;
}
/**
@@ -465,7 +474,7 @@ corrupt:
* Returns: errno
*/
-int gfs2_inode_refresh(struct gfs2_inode *ip)
+static int gfs2_inode_refresh(struct gfs2_inode *ip)
{
struct buffer_head *dibh;
int error;
@@ -481,40 +490,45 @@ int gfs2_inode_refresh(struct gfs2_inode *ip)
/**
* inode_go_instantiate - read in an inode if necessary
- * @gh: The glock holder
+ * @gl: The glock
*
* Returns: errno
*/
-static int inode_go_instantiate(struct gfs2_holder *gh)
+static int inode_go_instantiate(struct gfs2_glock *gl)
{
- struct gfs2_glock *gl = gh->gh_gl;
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct gfs2_inode *ip = gl->gl_object;
- int error = 0;
+ struct gfs2_glock *io_gl;
+ int error;
if (!ip) /* no inode to populate - read it in later */
- goto out;
+ return 0;
error = gfs2_inode_refresh(ip);
if (error)
- goto out;
+ return error;
+ io_gl = ip->i_iopen_gh.gh_gl;
+ io_gl->gl_no_formal_ino = ip->i_no_formal_ino;
+ return 0;
+}
+
+static int inode_go_held(struct gfs2_holder *gh)
+{
+ struct gfs2_glock *gl = gh->gh_gl;
+ struct gfs2_inode *ip = gl->gl_object;
+ int error = 0;
+
+ if (!ip) /* no inode to populate - read it in later */
+ return 0;
if (gh->gh_state != LM_ST_DEFERRED)
inode_dio_wait(&ip->i_inode);
if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
(gl->gl_state == LM_ST_EXCLUSIVE) &&
- (gh->gh_state == LM_ST_EXCLUSIVE)) {
- spin_lock(&sdp->sd_trunc_lock);
- if (list_empty(&ip->i_trunc_list))
- list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
- spin_unlock(&sdp->sd_trunc_lock);
- wake_up(&sdp->sd_quota_wait);
- error = 1;
- }
+ (gh->gh_state == LM_ST_EXCLUSIVE))
+ error = gfs2_truncatei_resume(ip);
-out:
return error;
}
@@ -526,71 +540,54 @@ out:
*
*/
-static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
+static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
const char *fs_id_buf)
{
struct gfs2_inode *ip = gl->gl_object;
- struct inode *inode = &ip->i_inode;
- unsigned long nrpages;
+ const struct inode *inode = &ip->i_inode;
if (ip == NULL)
return;
- xa_lock_irq(&inode->i_data.i_pages);
- nrpages = inode->i_data.nrpages;
- xa_unlock_irq(&inode->i_data.i_pages);
-
gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
"p:%lu\n", fs_id_buf,
(unsigned long long)ip->i_no_formal_ino,
(unsigned long long)ip->i_no_addr,
- IF2DT(ip->i_inode.i_mode), ip->i_flags,
+ IF2DT(inode->i_mode), ip->i_flags,
(unsigned int)ip->i_diskflags,
- (unsigned long long)i_size_read(inode), nrpages);
+ (unsigned long long)i_size_read(inode),
+ inode->i_data.nrpages);
}
/**
- * freeze_go_sync - promote/demote the freeze glock
+ * freeze_go_callback - A cluster node is requesting a freeze
* @gl: the glock
+ * @remote: true if this came from a different cluster node
*/
-static int freeze_go_sync(struct gfs2_glock *gl)
+static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
{
- int error = 0;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct super_block *sb = sdp->sd_vfs;
+
+ if (!remote ||
+ (gl->gl_state != LM_ST_SHARED &&
+ gl->gl_state != LM_ST_UNLOCKED) ||
+ gl->gl_demote_state != LM_ST_UNLOCKED)
+ return;
/*
- * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
- * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
- * all the nodes should have the freeze glock in SH mode and they all
- * call do_xmote: One for EX and the others for UN. They ALL must
- * freeze locally, and they ALL must queue freeze work. The freeze_work
- * calls freeze_func, which tries to reacquire the freeze glock in SH,
- * effectively waiting for the thaw on the node who holds it in EX.
- * Once thawed, the work func acquires the freeze glock in
- * SH and everybody goes back to thawed.
+ * Try to get an active super block reference to prevent racing with
+ * unmount (see super_trylock_shared()). But note that unmount isn't
+ * the only place where a write lock on s_umount is taken, and we can
+ * fail here because of things like remount as well.
*/
- if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
- !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
- atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
- error = freeze_super(sdp->sd_vfs);
- if (error) {
- fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
- error);
- if (gfs2_withdrawn(sdp)) {
- atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
- return 0;
- }
- gfs2_assert_withdraw(sdp, 0);
- }
- queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
- if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
- gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
- GFS2_LFC_FREEZE_GO_SYNC);
- else /* read-only mounts */
- atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
+ if (down_read_trylock(&sb->s_umount)) {
+ atomic_inc(&sb->s_active);
+ up_read(&sb->s_umount);
+ if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
+ deactivate_super(sb);
}
- return 0;
}
/**
@@ -608,31 +605,18 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl)
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
- error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
- if (gfs2_assert_withdraw_delayed(sdp, !error))
+ error = gfs2_find_jhead(sdp->sd_jdesc, &head);
+ if (gfs2_assert_withdraw(sdp, !error))
return error;
- if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
- GFS2_LOG_HEAD_UNMOUNT))
+ if (gfs2_assert_withdraw(sdp, head.lh_flags &
+ GFS2_LOG_HEAD_UNMOUNT))
return -EIO;
- sdp->sd_log_sequence = head.lh_sequence + 1;
- gfs2_log_pointers_init(sdp, head.lh_blkno);
+ gfs2_log_pointers_init(sdp, &head);
}
return 0;
}
/**
- * freeze_go_demote_ok
- * @gl: the glock
- *
- * Always returns 0
- */
-
-static int freeze_go_demote_ok(const struct gfs2_glock *gl)
-{
- return 0;
-}
-
-/**
* iopen_go_callback - schedule the dcache entry for the inode to be deleted
* @gl: the glock
* @remote: true if this came from a different cluster node
@@ -644,104 +628,29 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
struct gfs2_inode *ip = gl->gl_object;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- if (!remote || sb_rdonly(sdp->sd_vfs))
+ if (!remote || test_bit(SDF_KILL, &sdp->sd_flags))
return;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
gl->gl_lockref.count++;
- if (!queue_delayed_work(gfs2_delete_workqueue,
- &gl->gl_delete, 0))
+ if (!gfs2_queue_try_to_evict(gl))
gl->gl_lockref.count--;
}
}
-static int iopen_go_demote_ok(const struct gfs2_glock *gl)
-{
- return !gfs2_delete_work_queued(gl);
-}
-
-/**
- * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
- * @gl: glock being freed
- *
- * For now, this is only used for the journal inode glock. In withdraw
- * situations, we need to wait for the glock to be freed so that we know
- * other nodes may proceed with recovery / journal replay.
- */
-static void inode_go_free(struct gfs2_glock *gl)
-{
- /* Note that we cannot reference gl_object because it's already set
- * to NULL by this point in its lifecycle. */
- if (!test_bit(GLF_FREEING, &gl->gl_flags))
- return;
- clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
- wake_up_bit(&gl->gl_flags, GLF_FREEING);
-}
-
-/**
- * nondisk_go_callback - used to signal when a node did a withdraw
- * @gl: the nondisk glock
- * @remote: true if this came from a different cluster node
- *
- */
-static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
-{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
-
- /* Ignore the callback unless it's from another node, and it's the
- live lock. */
- if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
- return;
-
- /* First order of business is to cancel the demote request. We don't
- * really want to demote a nondisk glock. At best it's just to inform
- * us of another node's withdraw. We'll keep it in SH mode. */
- clear_bit(GLF_DEMOTE, &gl->gl_flags);
- clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
-
- /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
- if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
- test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
- test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
- return;
-
- /* We only care when a node wants us to unlock, because that means
- * they want a journal recovered. */
- if (gl->gl_demote_state != LM_ST_UNLOCKED)
- return;
-
- if (sdp->sd_args.ar_spectator) {
- fs_warn(sdp, "Spectator node cannot recover journals.\n");
- return;
- }
-
- fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
- set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
- /*
- * We can't call remote_withdraw directly here or gfs2_recover_journal
- * because this is called from the glock unlock function and the
- * remote_withdraw needs to enqueue and dequeue the same "live" glock
- * we were called from. So we queue it to the control work queue in
- * lock_dlm.
- */
- queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
-}
-
const struct gfs2_glock_operations gfs2_meta_glops = {
.go_type = LM_TYPE_META,
- .go_flags = GLOF_NONDISK,
};
const struct gfs2_glock_operations gfs2_inode_glops = {
.go_sync = inode_go_sync,
.go_inval = inode_go_inval,
- .go_demote_ok = inode_go_demote_ok,
.go_instantiate = inode_go_instantiate,
+ .go_held = inode_go_held,
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
- .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
- .go_free = inode_go_free,
+ .go_flags = GLOF_ASPACE | GLOF_LVB,
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -754,40 +663,33 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
};
const struct gfs2_glock_operations gfs2_freeze_glops = {
- .go_sync = freeze_go_sync,
.go_xmote_bh = freeze_go_xmote_bh,
- .go_demote_ok = freeze_go_demote_ok,
+ .go_callback = freeze_go_callback,
.go_type = LM_TYPE_NONDISK,
- .go_flags = GLOF_NONDISK,
};
const struct gfs2_glock_operations gfs2_iopen_glops = {
.go_type = LM_TYPE_IOPEN,
.go_callback = iopen_go_callback,
- .go_demote_ok = iopen_go_demote_ok,
- .go_flags = GLOF_LRU | GLOF_NONDISK,
+ .go_dump = inode_go_dump,
.go_subclass = 1,
};
const struct gfs2_glock_operations gfs2_flock_glops = {
.go_type = LM_TYPE_FLOCK,
- .go_flags = GLOF_LRU | GLOF_NONDISK,
};
const struct gfs2_glock_operations gfs2_nondisk_glops = {
.go_type = LM_TYPE_NONDISK,
- .go_flags = GLOF_NONDISK,
- .go_callback = nondisk_go_callback,
};
const struct gfs2_glock_operations gfs2_quota_glops = {
.go_type = LM_TYPE_QUOTA,
- .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
+ .go_flags = GLOF_LVB,
};
const struct gfs2_glock_operations gfs2_journal_glops = {
.go_type = LM_TYPE_JOURNAL,
- .go_flags = GLOF_NONDISK,
};
const struct gfs2_glock_operations *gfs2_glops_list[] = {
diff --git a/fs/gfs2/glops.h b/fs/gfs2/glops.h
index 695898afcaf1..9341423798df 100644
--- a/fs/gfs2/glops.h
+++ b/fs/gfs2/glops.h
@@ -22,7 +22,7 @@ extern const struct gfs2_glock_operations gfs2_quota_glops;
extern const struct gfs2_glock_operations gfs2_journal_glops;
extern const struct gfs2_glock_operations *gfs2_glops_list[];
-extern int gfs2_inode_metasync(struct gfs2_glock *gl);
-extern void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync);
+int gfs2_inode_metasync(struct gfs2_glock *gl);
+void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync);
#endif /* __GLOPS_DOT_H__ */
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 8c00fb389ae5..d05d8fe4e456 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -218,19 +218,16 @@ struct gfs2_glock_operations {
int (*go_sync) (struct gfs2_glock *gl);
int (*go_xmote_bh)(struct gfs2_glock *gl);
void (*go_inval) (struct gfs2_glock *gl, int flags);
- int (*go_demote_ok) (const struct gfs2_glock *gl);
- int (*go_instantiate) (struct gfs2_holder *gh);
- void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl,
+ int (*go_instantiate) (struct gfs2_glock *gl);
+ int (*go_held)(struct gfs2_holder *gh);
+ void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl,
const char *fs_id_buf);
void (*go_callback)(struct gfs2_glock *gl, bool remote);
- void (*go_free)(struct gfs2_glock *gl);
const int go_subclass;
const int go_type;
const unsigned long go_flags;
#define GLOF_ASPACE 1 /* address space attached */
#define GLOF_LVB 2 /* Lock Value Block attached */
-#define GLOF_LRU 4 /* LRU managed */
-#define GLOF_NONDISK 8 /* not I/O related */
};
enum {
@@ -251,7 +248,6 @@ struct gfs2_lkstats {
enum {
/* States */
- HIF_MAY_DEMOTE = 1,
HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
HIF_WAIT = 10,
};
@@ -321,16 +317,18 @@ enum {
GLF_DEMOTE_IN_PROGRESS = 5,
GLF_DIRTY = 6,
GLF_LFLUSH = 7,
- GLF_INVALIDATE_IN_PROGRESS = 8,
- GLF_REPLY_PENDING = 9,
+ GLF_HAVE_REPLY = 9,
GLF_INITIAL = 10,
- GLF_FROZEN = 11,
+ GLF_HAVE_FROZEN_REPLY = 11,
GLF_INSTANTIATE_IN_PROG = 12, /* instantiate happening now */
GLF_LRU = 13,
GLF_OBJECT = 14, /* Used only for tracing */
GLF_BLOCKING = 15,
- GLF_PENDING_DELETE = 17,
- GLF_FREEING = 18, /* Wait for glock to be freed */
+ GLF_TRY_TO_EVICT = 17, /* iopen glocks only */
+ GLF_VERIFY_DELETE = 18, /* iopen glocks only */
+ GLF_PENDING_REPLY = 19,
+ GLF_DEFER_DELETE = 20, /* iopen glocks only */
+ GLF_CANCELING = 21,
};
struct gfs2_glock {
@@ -373,11 +371,8 @@ struct gfs2_glock {
enum {
GIF_QD_LOCKED = 1,
- GIF_ALLOC_FAILED = 2,
GIF_SW_PAGED = 3,
- GIF_FREE_VFS_INODE = 5,
GIF_GLOP_PENDING = 6,
- GIF_DEFERRED_DELETE = 7,
};
struct gfs2_inode {
@@ -396,7 +391,6 @@ struct gfs2_inode {
atomic_t i_sizehint; /* hint of the write size */
struct rw_semaphore i_rw_mutex;
struct list_head i_ordered;
- struct list_head i_trunc_list;
__be64 *i_hash_cache;
u32 i_entries;
u32 i_diskflags;
@@ -452,7 +446,7 @@ struct gfs2_quota_data {
s64 qd_change_sync;
unsigned int qd_slot;
- unsigned int qd_slot_count;
+ unsigned int qd_slot_ref;
struct buffer_head *qd_bh;
struct gfs2_quota_change *qd_bh_qc;
@@ -523,8 +517,6 @@ struct gfs2_jdesc {
struct list_head jd_revoke_list;
unsigned int jd_replay_tail;
-
- u64 jd_no_addr;
};
struct gfs2_statfs_change_host {
@@ -537,6 +529,7 @@ struct gfs2_statfs_change_host {
#define GFS2_QUOTA_OFF 0
#define GFS2_QUOTA_ACCOUNT 1
#define GFS2_QUOTA_ON 2
+#define GFS2_QUOTA_QUIET 3 /* on but not complaining */
#define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
#define GFS2_DATA_WRITEBACK 1
@@ -544,8 +537,7 @@ struct gfs2_statfs_change_host {
#define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
#define GFS2_ERRORS_WITHDRAW 0
-#define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */
-#define GFS2_ERRORS_RO 2 /* place holder for future feature */
+#define GFS2_ERRORS_DEACTIVATE 1
#define GFS2_ERRORS_PANIC 3
struct gfs2_args {
@@ -561,7 +553,7 @@ struct gfs2_args {
unsigned int ar_data:2; /* ordered/writeback */
unsigned int ar_meta:1; /* mount metafs */
unsigned int ar_discard:1; /* discard requests */
- unsigned int ar_errors:2; /* errors=withdraw | panic */
+ unsigned int ar_errors:2; /* errors=withdraw | deactivate | panic */
unsigned int ar_nobarrier:1; /* do not send barriers */
unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
unsigned int ar_got_rgrplvb:1; /* Was the rgrplvb opt given? */
@@ -587,6 +579,7 @@ struct gfs2_tune {
unsigned int gt_complain_secs;
unsigned int gt_statfs_quantum;
unsigned int gt_statfs_slow;
+ unsigned int gt_withdraw_helper_timeout;
};
enum {
@@ -600,18 +593,10 @@ enum {
SDF_RORECOVERY = 7, /* read only recovery */
SDF_SKIP_DLM_UNLOCK = 8,
SDF_FORCE_AIL_FLUSH = 9,
- SDF_FS_FROZEN = 10,
- SDF_WITHDRAWING = 11, /* Will withdraw eventually */
- SDF_WITHDRAW_IN_PROG = 12, /* Withdraw is in progress */
- SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */
- SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are
- withdrawing */
-};
-
-enum gfs2_freeze_state {
- SFS_UNFROZEN = 0,
- SFS_STARTING_FREEZE = 1,
- SFS_FROZEN = 2,
+ SDF_FREEZE_INITIATOR = 10,
+ SDF_KILL = 15,
+ SDF_EVICTING = 16,
+ SDF_FROZEN = 17,
};
#define GFS2_FSNAME_LEN 256
@@ -661,6 +646,8 @@ struct lm_lockstruct {
struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
char *ls_lvb_bits;
+ struct rw_semaphore ls_sem;
+
spinlock_t ls_recover_spin; /* protects following fields */
unsigned long ls_recover_flags; /* DFL_ */
uint32_t ls_recover_mount; /* gen in first recover_done cb */
@@ -719,11 +706,13 @@ struct gfs2_sbd {
struct gfs2_glock *sd_rename_gl;
struct gfs2_glock *sd_freeze_gl;
struct work_struct sd_freeze_work;
- wait_queue_head_t sd_glock_wait;
+ struct work_struct sd_withdraw_work;
+ wait_queue_head_t sd_kill_wait;
wait_queue_head_t sd_async_glock_wait;
atomic_t sd_glock_disposal;
struct completion sd_locking_init;
- struct completion sd_wdack;
+ struct completion sd_withdraw_helper;
+ int sd_withdraw_helper_status;
struct delayed_work sd_control_work;
/* Inode Stuff */
@@ -764,7 +753,6 @@ struct gfs2_sbd {
struct gfs2_jdesc *sd_jdesc;
struct gfs2_holder sd_journal_gh;
struct gfs2_holder sd_jinode_gh;
- struct gfs2_glock *sd_jinode_gl;
struct gfs2_holder sd_sc_gh;
struct buffer_head *sd_sc_bh;
@@ -772,6 +760,11 @@ struct gfs2_sbd {
struct completion sd_journal_ready;
+ /* Workqueue stuff */
+
+ struct workqueue_struct *sd_glock_wq;
+ struct workqueue_struct *sd_delete_wq;
+
/* Daemon stuff */
struct task_struct *sd_logd_process;
@@ -781,11 +774,8 @@ struct gfs2_sbd {
struct list_head sd_quota_list;
atomic_t sd_quota_count;
- struct mutex sd_quota_mutex;
struct mutex sd_quota_sync_mutex;
wait_queue_head_t sd_quota_wait;
- struct list_head sd_trunc_list;
- spinlock_t sd_trunc_lock;
unsigned int sd_quota_slots;
unsigned long *sd_quota_bitmap;
@@ -795,7 +785,7 @@ struct gfs2_sbd {
/* Log stuff */
- struct address_space sd_aspace;
+ struct inode *sd_inode;
spinlock_t sd_log_lock;
@@ -824,7 +814,6 @@ struct gfs2_sbd {
atomic_t sd_log_in_flight;
wait_queue_head_t sd_log_flush_wait;
int sd_log_error; /* First log error */
- wait_queue_head_t sd_withdraw_wait;
unsigned int sd_log_tail;
unsigned int sd_log_flush_tail;
@@ -837,8 +826,8 @@ struct gfs2_sbd {
/* For quiescing the filesystem */
struct gfs2_holder sd_freeze_gh;
- atomic_t sd_freeze_state;
struct mutex sd_freeze_mutex;
+ struct list_head sd_dead_glocks;
char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
char sd_table_name[GFS2_FSNAME_LEN];
@@ -848,9 +837,15 @@ struct gfs2_sbd {
unsigned long sd_last_warning;
struct dentry *debugfs_dir; /* debugfs directory */
- unsigned long sd_glock_dqs_held;
};
+#define GFS2_BAD_INO 1
+
+static inline struct address_space *gfs2_aspace(struct gfs2_sbd *sdp)
+{
+ return sdp->sd_inode->i_mapping;
+}
+
static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
{
gl->gl_stats.stats[which]++;
@@ -864,7 +859,7 @@ static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
preempt_enable();
}
-extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
+struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
static inline unsigned gfs2_max_stuffed_size(const struct gfs2_inode *ip)
{
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6424b903e885..36618e353199 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -40,37 +40,6 @@ static const struct inode_operations gfs2_file_iops;
static const struct inode_operations gfs2_dir_iops;
static const struct inode_operations gfs2_symlink_iops;
-static int iget_test(struct inode *inode, void *opaque)
-{
- u64 no_addr = *(u64 *)opaque;
-
- return GFS2_I(inode)->i_no_addr == no_addr;
-}
-
-static int iget_set(struct inode *inode, void *opaque)
-{
- u64 no_addr = *(u64 *)opaque;
-
- GFS2_I(inode)->i_no_addr = no_addr;
- inode->i_ino = no_addr;
- return 0;
-}
-
-static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
-{
- struct inode *inode;
-
-repeat:
- inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
- if (!inode)
- return inode;
- if (is_bad_inode(inode)) {
- iput(inode);
- goto repeat;
- }
- return inode;
-}
-
/**
* gfs2_set_iop - Sets inode operations
* @inode: The inode with correct i_mode filled in
@@ -104,6 +73,35 @@ static void gfs2_set_iop(struct inode *inode)
}
}
+static int iget_test(struct inode *inode, void *opaque)
+{
+ u64 no_addr = *(u64 *)opaque;
+
+ return GFS2_I(inode)->i_no_addr == no_addr;
+}
+
+static int iget_set(struct inode *inode, void *opaque)
+{
+ u64 no_addr = *(u64 *)opaque;
+
+ GFS2_I(inode)->i_no_addr = no_addr;
+ inode->i_ino = no_addr;
+ return 0;
+}
+
+void gfs2_setup_inode(struct inode *inode)
+{
+ gfp_t gfp_mask;
+
+ /*
+ * Ensure all page cache allocations are done from GFP_NOFS context to
+ * prevent direct reclaim recursion back into the filesystem and blowing
+ * stacks or deadlocking.
+ */
+ gfp_mask = mapping_gfp_mask(inode->i_mapping);
+ mapping_set_gfp_mask(inode->i_mapping, gfp_mask & ~__GFP_FS);
+}
+
/**
* gfs2_inode_lookup - Lookup an inode
* @sb: The super block
@@ -132,35 +130,52 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
{
struct inode *inode;
struct gfs2_inode *ip;
- struct gfs2_glock *io_gl = NULL;
struct gfs2_holder i_gh;
int error;
gfs2_holder_mark_uninitialized(&i_gh);
- inode = gfs2_iget(sb, no_addr);
+ inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
if (!inode)
return ERR_PTR(-ENOMEM);
ip = GFS2_I(inode);
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_glock *io_gl;
+ int extra_flags = 0;
- error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
+ gfs2_setup_inode(inode);
+ error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE,
+ &ip->i_gl);
if (unlikely(error))
goto fail;
- flush_delayed_work(&ip->i_gl->gl_work);
- error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+ error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE,
+ &io_gl);
if (unlikely(error))
goto fail;
- if (blktype != GFS2_BLKST_UNLINKED)
+
+ /*
+ * The only caller that sets @blktype to GFS2_BLKST_UNLINKED is
+ * delete_work_func(). Make sure not to cancel the delete work
+ * from within itself here.
+ */
+ if (blktype == GFS2_BLKST_UNLINKED)
+ extra_flags |= LM_FLAG_TRY;
+ else
gfs2_cancel_delete_work(io_gl);
+ error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED,
+ GL_EXACT | GL_NOPID | extra_flags,
+ &ip->i_iopen_gh);
+ gfs2_glock_put(io_gl);
+ if (unlikely(error))
+ goto fail;
if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
/*
* The GL_SKIP flag indicates to skip reading the inode
- * block. We read the inode with gfs2_inode_refresh
+ * block. We read the inode when instantiating it
* after possibly checking the block type.
*/
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
@@ -181,24 +196,22 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
}
}
- glock_set_object(ip->i_gl, ip);
set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
- error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
- if (unlikely(error))
- goto fail;
- glock_set_object(ip->i_iopen_gh.gh_gl, ip);
- gfs2_glock_put(io_gl);
- io_gl = NULL;
/* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
- inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
- inode->i_atime.tv_nsec = 0;
+ inode_set_atime(inode,
+ 1LL << (8 * sizeof(inode_get_atime_sec(inode)) - 1),
+ 0);
+
+ glock_set_object(ip->i_gl, ip);
if (type == DT_UNKNOWN) {
/* Inode glock must be locked already */
error = gfs2_instantiate(&i_gh);
- if (error)
+ if (error) {
+ glock_clear_object(ip->i_gl, ip);
goto fail;
+ }
} else {
ip->i_no_formal_ino = no_formal_ino;
inode->i_mode = DT2IF(type);
@@ -206,33 +219,31 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
if (gfs2_holder_initialized(&i_gh))
gfs2_glock_dq_uninit(&i_gh);
+ glock_set_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_set_iop(inode);
+ unlock_new_inode(inode);
}
if (no_formal_ino && ip->i_no_formal_ino &&
no_formal_ino != ip->i_no_formal_ino) {
- error = -ESTALE;
- if (inode->i_state & I_NEW)
- goto fail;
iput(inode);
- return ERR_PTR(error);
+ return ERR_PTR(-ESTALE);
}
- if (inode->i_state & I_NEW)
- unlock_new_inode(inode);
-
return inode;
fail:
- if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
- glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
+ if (error == GLR_TRYFAILED)
+ error = -EAGAIN;
+ if (gfs2_holder_initialized(&ip->i_iopen_gh))
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
- }
- if (io_gl)
- gfs2_glock_put(io_gl);
if (gfs2_holder_initialized(&i_gh))
gfs2_glock_dq_uninit(&i_gh);
+ if (ip->i_gl) {
+ gfs2_glock_put(ip->i_gl);
+ ip->i_gl = NULL;
+ }
iget_failed(inode);
return ERR_PTR(error);
}
@@ -269,21 +280,28 @@ fail_iput:
}
-struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
+/**
+ * gfs2_lookup_meta - Look up an inode in a metadata directory
+ * @dip: The directory
+ * @name: The name of the inode
+ */
+struct inode *gfs2_lookup_meta(struct inode *dip, const char *name)
{
struct qstr qstr;
struct inode *inode;
+
gfs2_str2qstr(&qstr, name);
inode = gfs2_lookupi(dip, &qstr, 1);
- /* gfs2_lookupi has inconsistent callers: vfs
- * related routines expect NULL for no entry found,
- * gfs2_lookup_simple callers expect ENOENT
- * and do not check for NULL.
+ if (IS_ERR_OR_NULL(inode))
+ return inode ? inode : ERR_PTR(-ENOENT);
+
+ /*
+ * Must not call back into the filesystem when allocating
+ * pages in the metadata inode's address space.
*/
- if (inode == NULL)
- return ERR_PTR(-ENOENT);
- else
- return inode;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+
+ return inode;
}
@@ -328,7 +346,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
}
if (!is_root) {
- error = gfs2_permission(&init_user_ns, dir, MAY_EXEC);
+ error = gfs2_permission(&nop_mnt_idmap, dir, MAY_EXEC);
if (error)
goto out;
}
@@ -358,7 +376,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
{
int error;
- error = gfs2_permission(&init_user_ns, &dip->i_inode,
+ error = gfs2_permission(&nop_mnt_idmap, &dip->i_inode,
MAY_WRITE | MAY_EXEC);
if (error)
return error;
@@ -415,13 +433,18 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags, unsigned *dblocks)
if (error)
goto out_ipreserv;
- error = gfs2_alloc_blocks(ip, &ip->i_no_addr, dblocks, 1, &ip->i_generation);
+ error = gfs2_alloc_blocks(ip, &ip->i_no_addr, dblocks, 1);
+ if (error)
+ goto out_trans_end;
+
ip->i_no_formal_ino = ip->i_generation;
ip->i_inode.i_ino = ip->i_no_addr;
ip->i_goal = ip->i_no_addr;
+ if (*dblocks > 1)
+ ip->i_eattr = ip->i_no_addr + 1;
+out_trans_end:
gfs2_trans_end(sdp);
-
out_ipreserv:
gfs2_inplace_release(ip);
out_quota:
@@ -430,6 +453,72 @@ out:
return error;
}
+static void gfs2_final_release_pages(struct gfs2_inode *ip)
+{
+ struct inode *inode = &ip->i_inode;
+ struct gfs2_glock *gl = ip->i_gl;
+
+ /* This can only happen during incomplete inode creation. */
+ if (unlikely(!gl))
+ return;
+
+ truncate_inode_pages(gfs2_glock2aspace(gl), 0);
+ truncate_inode_pages(&inode->i_data, 0);
+
+ if (atomic_read(&gl->gl_revokes) == 0) {
+ clear_bit(GLF_LFLUSH, &gl->gl_flags);
+ clear_bit(GLF_DIRTY, &gl->gl_flags);
+ }
+}
+
+int gfs2_dinode_dealloc(struct gfs2_inode *ip)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ struct gfs2_rgrpd *rgd;
+ struct gfs2_holder gh;
+ int error;
+
+ if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
+
+ gfs2_rindex_update(sdp);
+
+ error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
+ if (error)
+ return error;
+
+ rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
+ if (!rgd) {
+ gfs2_consist_inode(ip);
+ error = -EIO;
+ goto out_qs;
+ }
+
+ error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &gh);
+ if (error)
+ goto out_qs;
+
+ error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
+ sdp->sd_jdesc->jd_blocks);
+ if (error)
+ goto out_rg_gunlock;
+
+ gfs2_free_di(rgd, ip);
+
+ gfs2_final_release_pages(ip);
+
+ gfs2_trans_end(sdp);
+
+out_rg_gunlock:
+ gfs2_glock_dq_uninit(&gh);
+out_qs:
+ gfs2_quota_unhold(ip);
+ return error;
+}
+
static void gfs2_init_dir(struct buffer_head *dibh,
const struct gfs2_inode *parent)
{
@@ -599,6 +688,12 @@ static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
* @size: The initial size of the inode (ignored for directories)
* @excl: Force fail if inode exists
*
+ * FIXME: Change to allocate the disk blocks and write them out in the same
+ * transaction. That way, we can no longer end up in a situation in which an
+ * inode is allocated, the node crashes, and the block looks like a valid
+ * inode. (With atomic creates in place, we will also no longer need to zero
+ * the link count and dirty the inode here on failure.)
+ *
* Returns: 0 on success, or error code
*/
@@ -609,15 +704,16 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
{
const struct qstr *name = &dentry->d_name;
struct posix_acl *default_acl, *acl;
- struct gfs2_holder ghs[2];
+ struct gfs2_holder d_gh, gh;
struct inode *inode = NULL;
struct gfs2_inode *dip = GFS2_I(dir), *ip;
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_glock *io_gl;
- int error, free_vfs_inode = 1;
+ int error, dealloc_error;
u32 aflags = 0;
unsigned blocks = 1;
struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, };
+ bool xattr_initialized = false;
if (!name->len || name->len > GFS2_FNAMESIZE)
return -ENAMETOOLONG;
@@ -630,10 +726,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (error)
goto fail;
- error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
+ error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, &d_gh);
if (error)
goto fail;
- gfs2_holder_mark_uninitialized(ghs + 1);
+ gfs2_holder_mark_uninitialized(&gh);
error = create_ok(dip, name, mode);
if (error)
@@ -644,7 +740,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (!IS_ERR(inode)) {
if (S_ISDIR(inode->i_mode)) {
iput(inode);
- inode = ERR_PTR(-EISDIR);
+ inode = NULL;
+ error = -EISDIR;
goto fail_gunlock;
}
d_instantiate(dentry, inode);
@@ -655,7 +752,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
else
error = finish_no_open(file, NULL);
}
- gfs2_glock_dq_uninit(ghs);
+ gfs2_glock_dq_uninit(&d_gh);
goto fail;
} else if (error != -ENOENT) {
goto fail_gunlock;
@@ -669,12 +766,13 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = -ENOMEM;
if (!inode)
goto fail_gunlock;
+ gfs2_setup_inode(inode);
+ ip = GFS2_I(inode);
error = posix_acl_create(dir, &mode, &default_acl, &acl);
if (error)
goto fail_gunlock;
- ip = GFS2_I(inode);
error = gfs2_qa_get(ip);
if (error)
goto fail_free_acls;
@@ -683,7 +781,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
set_nlink(inode, S_ISDIR(mode) ? 2 : 1);
inode->i_rdev = dev;
inode->i_size = size;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
munge_mode_uid_gid(dip, inode);
check_and_update_goal(dip);
ip->i_goal = dip->i_goal;
@@ -729,52 +827,57 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
if (error)
- goto fail_free_inode;
- flush_delayed_work(&ip->i_gl->gl_work);
+ goto fail_dealloc_inode;
error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
if (error)
- goto fail_free_inode;
+ goto fail_dealloc_inode;
gfs2_cancel_delete_work(io_gl);
+ io_gl->gl_no_formal_ino = ip->i_no_formal_ino;
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+retry:
+ error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr);
+ if (error == -EBUSY)
+ goto retry;
if (error)
goto fail_gunlock2;
- glock_set_object(ip->i_gl, ip);
- error = gfs2_trans_begin(sdp, blocks, 0);
+ error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT | GL_NOPID,
+ &ip->i_iopen_gh);
if (error)
goto fail_gunlock2;
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
+ if (error)
+ goto fail_gunlock3;
+ clear_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
+
+ error = gfs2_trans_begin(sdp, blocks, 0);
+ if (error)
+ goto fail_gunlock3;
+
if (blocks > 1) {
- ip->i_eattr = ip->i_no_addr + 1;
gfs2_init_xattr(ip);
+ xattr_initialized = true;
}
init_dinode(dip, ip, symname);
gfs2_trans_end(sdp);
- error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
- if (error)
- goto fail_gunlock2;
-
+ glock_set_object(ip->i_gl, ip);
glock_set_object(io_gl, ip);
gfs2_set_iop(inode);
- insert_inode_hash(inode);
- free_vfs_inode = 0; /* After this point, the inode is no longer
- considered free. Any failures need to undo
- the gfs2 structures. */
if (default_acl) {
error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
if (error)
- goto fail_gunlock3;
+ goto fail_gunlock4;
posix_acl_release(default_acl);
default_acl = NULL;
}
if (acl) {
error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
if (error)
- goto fail_gunlock3;
+ goto fail_gunlock4;
posix_acl_release(acl);
acl = NULL;
}
@@ -782,11 +885,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
&gfs2_initxattrs, NULL);
if (error)
- goto fail_gunlock3;
+ goto fail_gunlock4;
error = link_dinode(dip, name, ip, &da);
if (error)
- goto fail_gunlock3;
+ goto fail_gunlock4;
mark_inode_dirty(inode);
d_instantiate(dentry, inode);
@@ -796,43 +899,53 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
file->f_mode |= FMODE_CREATED;
error = finish_open(file, dentry, gfs2_open_common);
}
- gfs2_glock_dq_uninit(ghs);
+ gfs2_glock_dq_uninit(&d_gh);
gfs2_qa_put(ip);
- gfs2_glock_dq_uninit(ghs + 1);
+ gfs2_glock_dq_uninit(&gh);
gfs2_glock_put(io_gl);
gfs2_qa_put(dip);
+ unlock_new_inode(inode);
return error;
-fail_gunlock3:
+fail_gunlock4:
+ glock_clear_object(ip->i_gl, ip);
glock_clear_object(io_gl, ip);
+fail_gunlock3:
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_gunlock2:
- glock_clear_object(io_gl, ip);
gfs2_glock_put(io_gl);
+fail_dealloc_inode:
+ dealloc_error = 0;
+ if (ip->i_eattr)
+ dealloc_error = gfs2_ea_dealloc(ip, xattr_initialized);
+ clear_nlink(inode);
+ mark_inode_dirty(inode);
+ if (!dealloc_error)
+ dealloc_error = gfs2_dinode_dealloc(ip);
+ if (dealloc_error)
+ fs_warn(sdp, "%s: %d\n", __func__, dealloc_error);
+ ip->i_no_addr = 0;
fail_free_inode:
if (ip->i_gl) {
- glock_clear_object(ip->i_gl, ip);
- if (free_vfs_inode) /* else evict will do the put for us */
- gfs2_glock_put(ip->i_gl);
+ gfs2_glock_put(ip->i_gl);
+ ip->i_gl = NULL;
}
- gfs2_rs_delete(ip, NULL);
+ gfs2_rs_deltree(&ip->i_res);
gfs2_qa_put(ip);
fail_free_acls:
posix_acl_release(default_acl);
posix_acl_release(acl);
fail_gunlock:
gfs2_dir_no_add(&da);
- gfs2_glock_dq_uninit(ghs);
+ gfs2_glock_dq_uninit(&d_gh);
if (!IS_ERR_OR_NULL(inode)) {
- clear_nlink(inode);
- if (!free_vfs_inode)
- mark_inode_dirty(inode);
- set_bit(free_vfs_inode ? GIF_FREE_VFS_INODE : GIF_ALLOC_FAILED,
- &GFS2_I(inode)->i_flags);
- iput(inode);
+ if (inode_state_read_once(inode) & I_NEW)
+ iget_failed(inode);
+ else
+ iput(inode);
}
- if (gfs2_holder_initialized(ghs + 1))
- gfs2_glock_dq_uninit(ghs + 1);
+ if (gfs2_holder_initialized(&gh))
+ gfs2_glock_dq_uninit(&gh);
fail:
gfs2_qa_put(dip);
return error;
@@ -840,7 +953,7 @@ fail:
/**
* gfs2_create - Create a file
- * @mnt_userns: User namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @dir: The directory in which to create the file
* @dentry: The dentry of the new file
* @mode: The mode of the new file
@@ -849,7 +962,7 @@ fail:
* Returns: errno
*/
-static int gfs2_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int gfs2_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl);
@@ -930,7 +1043,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
struct gfs2_sbd *sdp = GFS2_SB(dir);
struct inode *inode = d_inode(old_dentry);
struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_holder ghs[2];
+ struct gfs2_holder d_gh, gh;
struct buffer_head *dibh;
struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, };
int error;
@@ -942,14 +1055,14 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (error)
return error;
- gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
- gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
+ gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, &d_gh);
+ gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
- error = gfs2_glock_nq(ghs); /* parent */
+ error = gfs2_glock_nq(&d_gh);
if (error)
goto out_parent;
- error = gfs2_glock_nq(ghs + 1); /* child */
+ error = gfs2_glock_nq(&gh);
if (error)
goto out_child;
@@ -957,7 +1070,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (inode->i_nlink == 0)
goto out_gunlock;
- error = gfs2_permission(&init_user_ns, dir, MAY_WRITE | MAY_EXEC);
+ error = gfs2_permission(&nop_mnt_idmap, dir, MAY_WRITE | MAY_EXEC);
if (error)
goto out_gunlock;
@@ -981,9 +1094,6 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
error = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
goto out_gunlock;
- error = -EINVAL;
- if (!ip->i_inode.i_nlink)
- goto out_gunlock;
error = -EMLINK;
if (ip->i_inode.i_nlink == (u32)-1)
goto out_gunlock;
@@ -1021,7 +1131,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
gfs2_trans_add_meta(ip->i_gl, dibh);
inc_nlink(&ip->i_inode);
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_ctime_current(&ip->i_inode);
ihold(inode);
d_instantiate(dentry, inode);
mark_inode_dirty(inode);
@@ -1038,13 +1148,13 @@ out_gunlock_q:
gfs2_quota_unlock(dip);
out_gunlock:
gfs2_dir_no_add(&da);
- gfs2_glock_dq(ghs + 1);
+ gfs2_glock_dq(&gh);
out_child:
- gfs2_glock_dq(ghs);
+ gfs2_glock_dq(&d_gh);
out_parent:
gfs2_qa_put(dip);
- gfs2_holder_uninit(ghs);
- gfs2_holder_uninit(ghs + 1);
+ gfs2_holder_uninit(&d_gh);
+ gfs2_holder_uninit(&gh);
return error;
}
@@ -1075,7 +1185,7 @@ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
if (IS_APPEND(&dip->i_inode))
return -EPERM;
- error = gfs2_permission(&init_user_ns, &dip->i_inode,
+ error = gfs2_permission(&nop_mnt_idmap, &dip->i_inode,
MAY_WRITE | MAY_EXEC);
if (error)
return error;
@@ -1106,7 +1216,7 @@ static int gfs2_unlink_inode(struct gfs2_inode *dip,
return error;
ip->i_entries = 0;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
if (S_ISDIR(inode->i_mode))
clear_nlink(inode);
else
@@ -1135,7 +1245,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
struct gfs2_sbd *sdp = GFS2_SB(dir);
struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_holder ghs[3];
+ struct gfs2_holder d_gh, r_gh, gh;
struct gfs2_rgrpd *rgd;
int error;
@@ -1145,21 +1255,21 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
error = -EROFS;
- gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
- gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
+ gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, &d_gh);
+ gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
if (!rgd)
goto out_inodes;
- gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE, ghs + 2);
+ gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE, &r_gh);
- error = gfs2_glock_nq(ghs); /* parent */
+ error = gfs2_glock_nq(&d_gh);
if (error)
goto out_parent;
- error = gfs2_glock_nq(ghs + 1); /* child */
+ error = gfs2_glock_nq(&gh);
if (error)
goto out_child;
@@ -1173,7 +1283,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
goto out_rgrp;
}
- error = gfs2_glock_nq(ghs + 2); /* rgrp */
+ error = gfs2_glock_nq(&r_gh); /* rgrp */
if (error)
goto out_rgrp;
@@ -1189,22 +1299,22 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
gfs2_trans_end(sdp);
out_gunlock:
- gfs2_glock_dq(ghs + 2);
+ gfs2_glock_dq(&r_gh);
out_rgrp:
- gfs2_glock_dq(ghs + 1);
+ gfs2_glock_dq(&gh);
out_child:
- gfs2_glock_dq(ghs);
+ gfs2_glock_dq(&d_gh);
out_parent:
- gfs2_holder_uninit(ghs + 2);
+ gfs2_holder_uninit(&r_gh);
out_inodes:
- gfs2_holder_uninit(ghs + 1);
- gfs2_holder_uninit(ghs);
+ gfs2_holder_uninit(&gh);
+ gfs2_holder_uninit(&d_gh);
return error;
}
/**
* gfs2_symlink - Create a symlink
- * @mnt_userns: User namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @dir: The directory to create the symlink in
* @dentry: The dentry to put the symlink in
* @symname: The thing which the link points to
@@ -1212,7 +1322,7 @@ out_inodes:
* Returns: errno
*/
-static int gfs2_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+static int gfs2_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
unsigned int size;
@@ -1226,24 +1336,25 @@ static int gfs2_symlink(struct user_namespace *mnt_userns, struct inode *dir,
/**
* gfs2_mkdir - Make a directory
- * @mnt_userns: User namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @dir: The parent directory of the new one
* @dentry: The dentry of the new directory
* @mode: The mode of the new directory
*
- * Returns: errno
+ * Returns: the dentry, or ERR_PTR(errno)
*/
-static int gfs2_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *gfs2_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir));
- return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0);
+
+ return ERR_PTR(gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0));
}
/**
* gfs2_mknod - Make a special file
- * @mnt_userns: User namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @dir: The directory in which the special file will reside
* @dentry: The dentry of the special file
* @mode: The mode of the special file
@@ -1251,7 +1362,7 @@ static int gfs2_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
*
*/
-static int gfs2_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+static int gfs2_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t dev)
{
return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0);
@@ -1272,27 +1383,19 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags,
umode_t mode)
{
- struct dentry *d;
bool excl = !!(flags & O_EXCL);
- if (!d_in_lookup(dentry))
- goto skip_lookup;
-
- d = __gfs2_lookup(dir, dentry, file);
- if (IS_ERR(d))
- return PTR_ERR(d);
- if (d != NULL)
- dentry = d;
- if (d_really_is_positive(dentry)) {
- if (!(file->f_mode & FMODE_OPENED))
+ if (d_in_lookup(dentry)) {
+ struct dentry *d = __gfs2_lookup(dir, dentry, file);
+ if (file->f_mode & FMODE_OPENED) {
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+ dput(d);
+ return excl && (flags & O_CREAT) ? -EEXIST : 0;
+ }
+ if (d || d_really_is_positive(dentry))
return finish_no_open(file, d);
- dput(d);
- return excl && (flags & O_CREAT) ? -EEXIST : 0;
}
-
- BUG_ON(d != NULL);
-
-skip_lookup:
if (!(flags & O_CREAT))
return -ENOENT;
@@ -1363,7 +1466,7 @@ static int update_moved_ino(struct gfs2_inode *ip, struct gfs2_inode *ndip,
if (dir_rename)
return gfs2_dir_mvino(ip, &gfs2_qdotdot, ndip, DT_DIR);
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_ctime_current(&ip->i_inode);
mark_inode_dirty_sync(&ip->i_inode);
return 0;
}
@@ -1501,7 +1604,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
}
}
} else {
- error = gfs2_permission(&init_user_ns, ndir,
+ error = gfs2_permission(&nop_mnt_idmap, ndir,
MAY_WRITE | MAY_EXEC);
if (error)
goto out_gunlock;
@@ -1538,7 +1641,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
/* Check out the dir to be renamed */
if (dir_rename) {
- error = gfs2_permission(&init_user_ns, d_inode(odentry),
+ error = gfs2_permission(&nop_mnt_idmap, d_inode(odentry),
MAY_WRITE);
if (error)
goto out_gunlock;
@@ -1702,13 +1805,13 @@ static int gfs2_exchange(struct inode *odir, struct dentry *odentry,
goto out_gunlock;
if (S_ISDIR(old_mode)) {
- error = gfs2_permission(&init_user_ns, odentry->d_inode,
+ error = gfs2_permission(&nop_mnt_idmap, odentry->d_inode,
MAY_WRITE);
if (error)
goto out_gunlock;
}
if (S_ISDIR(new_mode)) {
- error = gfs2_permission(&init_user_ns, ndentry->d_inode,
+ error = gfs2_permission(&nop_mnt_idmap, ndentry->d_inode,
MAY_WRITE);
if (error)
goto out_gunlock;
@@ -1763,7 +1866,7 @@ out:
return error;
}
-static int gfs2_rename2(struct user_namespace *mnt_userns, struct inode *odir,
+static int gfs2_rename2(struct mnt_idmap *idmap, struct inode *odir,
struct dentry *odentry, struct inode *ndir,
struct dentry *ndentry, unsigned int flags)
{
@@ -1838,7 +1941,7 @@ out:
/**
* gfs2_permission
- * @mnt_userns: User namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @inode: The inode
* @mask: The mask to be tested
*
@@ -1849,19 +1952,27 @@ out:
* Returns: errno
*/
-int gfs2_permission(struct user_namespace *mnt_userns, struct inode *inode,
+int gfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask)
{
+ int may_not_block = mask & MAY_NOT_BLOCK;
struct gfs2_inode *ip;
struct gfs2_holder i_gh;
+ struct gfs2_glock *gl;
int error;
gfs2_holder_mark_uninitialized(&i_gh);
ip = GFS2_I(inode);
- if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
- if (mask & MAY_NOT_BLOCK)
+ gl = rcu_dereference_check(ip->i_gl, !may_not_block);
+ if (unlikely(!gl)) {
+ /* inode is getting torn down, must be RCU mode */
+ WARN_ON_ONCE(!may_not_block);
+ return -ECHILD;
+ }
+ if (gfs2_glock_is_locked_by_me(gl) == NULL) {
+ if (may_not_block)
return -ECHILD;
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+ error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
if (error)
return error;
}
@@ -1869,7 +1980,7 @@ int gfs2_permission(struct user_namespace *mnt_userns, struct inode *inode,
if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
error = -EPERM;
else
- error = generic_permission(&init_user_ns, inode, mask);
+ error = generic_permission(&nop_mnt_idmap, inode, mask);
if (gfs2_holder_initialized(&i_gh))
gfs2_glock_dq_uninit(&i_gh);
@@ -1878,7 +1989,7 @@ int gfs2_permission(struct user_namespace *mnt_userns, struct inode *inode,
static int __gfs2_setattr_simple(struct inode *inode, struct iattr *attr)
{
- setattr_copy(&init_user_ns, inode, attr);
+ setattr_copy(&nop_mnt_idmap, inode, attr);
mark_inode_dirty(inode);
return 0;
}
@@ -1906,7 +2017,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
kuid_t ouid, nuid;
kgid_t ogid, ngid;
int error;
- struct gfs2_alloc_parms ap;
+ struct gfs2_alloc_parms ap = {};
ouid = inode->i_uid;
ogid = inode->i_gid;
@@ -1963,7 +2074,7 @@ out:
/**
* gfs2_setattr - Change attributes on an inode
- * @mnt_userns: User namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @dentry: The dentry which is changing
* @attr: The structure describing the change
*
@@ -1973,7 +2084,7 @@ out:
* Returns: errno
*/
-static int gfs2_setattr(struct user_namespace *mnt_userns,
+static int gfs2_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
@@ -1989,11 +2100,11 @@ static int gfs2_setattr(struct user_namespace *mnt_userns,
if (error)
goto out;
- error = may_setattr(&init_user_ns, inode, attr->ia_valid);
+ error = may_setattr(&nop_mnt_idmap, inode, attr->ia_valid);
if (error)
goto error;
- error = setattr_prepare(&init_user_ns, dentry, attr);
+ error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (error)
goto error;
@@ -2004,7 +2115,7 @@ static int gfs2_setattr(struct user_namespace *mnt_userns,
else {
error = gfs2_setattr_simple(inode, attr);
if (!error && attr->ia_valid & ATTR_MODE)
- error = posix_acl_chmod(&init_user_ns, inode,
+ error = posix_acl_chmod(&nop_mnt_idmap, dentry,
inode->i_mode);
}
@@ -2019,7 +2130,7 @@ out:
/**
* gfs2_getattr - Read out an inode's attributes
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @path: Object to query
* @stat: The inode's stats
* @request_mask: Mask of STATX_xxx flags indicating the caller's interests
@@ -2034,7 +2145,7 @@ out:
* Returns: errno
*/
-static int gfs2_getattr(struct user_namespace *mnt_userns,
+static int gfs2_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
@@ -2063,7 +2174,7 @@ static int gfs2_getattr(struct user_namespace *mnt_userns,
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
- generic_fillattr(&init_user_ns, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (gfs2_holder_initialized(&gh))
gfs2_glock_dq_uninit(&gh);
@@ -2131,8 +2242,7 @@ loff_t gfs2_seek_hole(struct file *file, loff_t offset)
return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
}
-static int gfs2_update_time(struct inode *inode, struct timespec64 *time,
- int flags)
+static int gfs2_update_time(struct inode *inode, int flags)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_glock *gl = ip->i_gl;
@@ -2140,14 +2250,15 @@ static int gfs2_update_time(struct inode *inode, struct timespec64 *time,
int error;
gh = gfs2_glock_is_locked_by_me(gl);
- if (gh && !gfs2_glock_is_held_excl(gl)) {
+ if (gh && gl->gl_state != LM_ST_EXCLUSIVE) {
gfs2_glock_dq(gh);
gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, gh);
error = gfs2_glock_nq(gh);
if (error)
return error;
}
- return generic_update_time(inode, time, flags);
+ generic_update_time(inode, flags);
+ return 0;
}
static const struct inode_operations gfs2_file_iops = {
@@ -2156,7 +2267,7 @@ static const struct inode_operations gfs2_file_iops = {
.getattr = gfs2_getattr,
.listxattr = gfs2_listxattr,
.fiemap = gfs2_fiemap,
- .get_acl = gfs2_get_acl,
+ .get_inode_acl = gfs2_get_acl,
.set_acl = gfs2_set_acl,
.update_time = gfs2_update_time,
.fileattr_get = gfs2_fileattr_get,
@@ -2178,7 +2289,7 @@ static const struct inode_operations gfs2_dir_iops = {
.getattr = gfs2_getattr,
.listxattr = gfs2_listxattr,
.fiemap = gfs2_fiemap,
- .get_acl = gfs2_get_acl,
+ .get_inode_acl = gfs2_get_acl,
.set_acl = gfs2_set_acl,
.update_time = gfs2_update_time,
.atomic_open = gfs2_atomic_open,
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 7b2c1f390db7..2fcd96dd1361 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -12,10 +12,10 @@
#include <linux/mm.h>
#include "util.h"
-extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
-extern int gfs2_internal_read(struct gfs2_inode *ip,
- char *buf, loff_t *pos, unsigned size);
-extern void gfs2_set_aops(struct inode *inode);
+bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask);
+ssize_t gfs2_internal_read(struct gfs2_inode *ip,
+ char *buf, loff_t *pos, size_t size);
+void gfs2_set_aops(struct inode *inode);
static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
{
@@ -44,19 +44,17 @@ static inline int gfs2_is_dir(const struct gfs2_inode *ip)
static inline void gfs2_set_inode_blocks(struct inode *inode, u64 blocks)
{
- inode->i_blocks = blocks <<
- (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
+ inode->i_blocks = blocks << (inode->i_blkbits - SECTOR_SHIFT);
}
static inline u64 gfs2_get_inode_blocks(const struct inode *inode)
{
- return inode->i_blocks >>
- (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
+ return inode->i_blocks >> (inode->i_blkbits - SECTOR_SHIFT);
}
static inline void gfs2_add_inode_blocks(struct inode *inode, s64 change)
{
- change <<= inode->i_blkbits - GFS2_BASIC_BLOCK_SHIFT;
+ change <<= inode->i_blkbits - SECTOR_SHIFT;
gfs2_assert(GFS2_SB(inode), (change >= 0 || inode->i_blocks >= -change));
inode->i_blocks += change;
}
@@ -88,33 +86,33 @@ err:
return -EIO;
}
-extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
- u64 no_addr, u64 no_formal_ino,
- unsigned int blktype);
-extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
- u64 no_formal_ino,
- unsigned int blktype);
-
-extern int gfs2_inode_refresh(struct gfs2_inode *ip);
-
-extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
- int is_root);
-extern int gfs2_permission(struct user_namespace *mnt_userns,
- struct inode *inode, int mask);
-extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
-extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
-extern int gfs2_open_common(struct inode *inode, struct file *file);
-extern loff_t gfs2_seek_data(struct file *file, loff_t offset);
-extern loff_t gfs2_seek_hole(struct file *file, loff_t offset);
+void gfs2_setup_inode(struct inode *inode);
+struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
+ u64 no_addr, u64 no_formal_ino,
+ unsigned int blktype);
+struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
+ u64 no_formal_ino,
+ unsigned int blktype);
+int gfs2_dinode_dealloc(struct gfs2_inode *ip);
+
+struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
+ int is_root);
+int gfs2_permission(struct mnt_idmap *idmap,
+ struct inode *inode, int mask);
+struct inode *gfs2_lookup_meta(struct inode *dip, const char *name);
+void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
+int gfs2_open_common(struct inode *inode, struct file *file);
+loff_t gfs2_seek_data(struct file *file, loff_t offset);
+loff_t gfs2_seek_hole(struct file *file, loff_t offset);
extern const struct file_operations gfs2_file_fops_nolock;
extern const struct file_operations gfs2_dir_fops_nolock;
-extern int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa);
-extern int gfs2_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa);
-extern void gfs2_set_inode_flags(struct inode *inode);
-
+int gfs2_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
+int gfs2_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa);
+void gfs2_set_inode_flags(struct inode *inode);
+
#ifdef CONFIG_GFS2_FS_LOCKING_DLM
extern const struct file_operations gfs2_file_fops;
extern const struct file_operations gfs2_dir_fops;
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 50578f881e6d..b8d249925395 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -15,9 +15,6 @@
#include <linux/sched/signal.h>
#include "incore.h"
-#include "glock.h"
-#include "glops.h"
-#include "recovery.h"
#include "util.h"
#include "sys.h"
#include "trace_gfs2.h"
@@ -58,6 +55,7 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
/**
* gfs2_update_reply_times - Update locking statistics
* @gl: The glock to update
+ * @blocking: The operation may have been blocking
*
* This assumes that gl->gl_dstamp has been set earlier.
*
@@ -72,12 +70,12 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
* TRY_1CB flags are set are classified as non-blocking. All
* other DLM requests are counted as (potentially) blocking.
*/
-static inline void gfs2_update_reply_times(struct gfs2_glock *gl)
+static inline void gfs2_update_reply_times(struct gfs2_glock *gl,
+ bool blocking)
{
struct gfs2_pcpu_lkstats *lks;
const unsigned gltype = gl->gl_name.ln_type;
- unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ?
- GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
+ unsigned index = blocking ? GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
s64 rtt;
preempt_disable();
@@ -119,9 +117,18 @@ static inline void gfs2_update_request_times(struct gfs2_glock *gl)
static void gdlm_ast(void *arg)
{
struct gfs2_glock *gl = arg;
- unsigned ret = gl->gl_state;
+ bool blocking;
+ unsigned ret;
+
+ blocking = test_bit(GLF_BLOCKING, &gl->gl_flags);
+ gfs2_update_reply_times(gl, blocking);
+ clear_bit(GLF_BLOCKING, &gl->gl_flags);
+
+ /* If the glock is dead, we only react to a dlm_unlock() reply. */
+ if (__lockref_is_dead(&gl->gl_lockref) &&
+ gl->gl_lksb.sb_status != -DLM_EUNLOCK)
+ return;
- gfs2_update_reply_times(gl);
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
@@ -129,18 +136,19 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
- if (gl->gl_ops->go_free)
- gl->gl_ops->go_free(gl);
gfs2_glock_free(gl);
return;
case -DLM_ECANCEL: /* Cancel while getting lock */
- ret |= LM_OUT_CANCELED;
+ ret = LM_OUT_CANCELED;
goto out;
case -EAGAIN: /* Try lock fails */
+ ret = LM_OUT_TRY_AGAIN;
+ goto out;
case -EDEADLK: /* Deadlock detected */
+ ret = LM_OUT_DEADLOCK;
goto out;
case -ETIMEDOUT: /* Canceled due to timeout */
- ret |= LM_OUT_ERROR;
+ ret = LM_OUT_ERROR;
goto out;
case 0: /* Success */
break;
@@ -149,20 +157,22 @@ static void gdlm_ast(void *arg)
}
ret = gl->gl_req;
- if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) {
- if (gl->gl_req == LM_ST_SHARED)
- ret = LM_ST_DEFERRED;
- else if (gl->gl_req == LM_ST_DEFERRED)
- ret = LM_ST_SHARED;
- else
- BUG();
- }
- set_bit(GLF_INITIAL, &gl->gl_flags);
+ /*
+ * The GLF_INITIAL flag is initially set for new glocks. Upon the
+ * first successful new (non-conversion) request, we clear this flag to
+ * indicate that a DLM lock exists and that gl->gl_lksb.sb_lkid is the
+ * identifier to use for identifying it.
+ *
+ * Any failed initial requests do not create a DLM lock, so we ignore
+ * the gl->gl_lksb.sb_lkid values that come with such requests.
+ */
+
+ clear_bit(GLF_INITIAL, &gl->gl_flags);
gfs2_glock_complete(gl, ret);
return;
out:
- if (!test_bit(GLF_INITIAL, &gl->gl_flags))
+ if (test_bit(GLF_INITIAL, &gl->gl_flags))
gl->gl_lksb.sb_lkid = 0;
gfs2_glock_complete(gl, ret);
}
@@ -171,6 +181,9 @@ static void gdlm_bast(void *arg, int mode)
{
struct gfs2_glock *gl = arg;
+ if (__lockref_is_dead(&gl->gl_lockref))
+ return;
+
switch (mode) {
case DLM_LOCK_EX:
gfs2_glock_cb(gl, LM_ST_UNLOCKED);
@@ -206,8 +219,21 @@ static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate)
return -1;
}
+/* Taken from fs/dlm/lock.c. */
+
+static bool middle_conversion(int cur, int req)
+{
+ return (cur == DLM_LOCK_PR && req == DLM_LOCK_CW) ||
+ (cur == DLM_LOCK_CW && req == DLM_LOCK_PR);
+}
+
+static bool down_conversion(int cur, int req)
+{
+ return !middle_conversion(cur, req) && req < cur;
+}
+
static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
- const int req)
+ const int req, bool blocking)
{
u32 lkf = 0;
@@ -222,23 +248,16 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
lkf |= DLM_LKF_NOQUEUEBAST;
}
- if (gfs_flags & LM_FLAG_PRIORITY) {
- lkf |= DLM_LKF_NOORDER;
- lkf |= DLM_LKF_HEADQUE;
- }
-
- if (gfs_flags & LM_FLAG_ANY) {
- if (req == DLM_LOCK_PR)
- lkf |= DLM_LKF_ALTCW;
- else if (req == DLM_LOCK_CW)
- lkf |= DLM_LKF_ALTPR;
- else
- BUG();
- }
-
- if (gl->gl_lksb.sb_lkid != 0) {
+ if (!test_bit(GLF_INITIAL, &gl->gl_flags)) {
lkf |= DLM_LKF_CONVERT;
- if (test_bit(GLF_BLOCKING, &gl->gl_flags))
+
+ /*
+ * The DLM_LKF_QUECVT flag needs to be set for "first come,
+ * first served" semantics, but it must only be set for
+ * "upward" lock conversions or else DLM will reject the
+ * request as invalid.
+ */
+ if (blocking)
lkf |= DLM_LKF_QUECVT;
}
@@ -258,80 +277,123 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
unsigned int flags)
{
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
- int req;
+ bool blocking;
+ int cur, req;
u32 lkf;
char strname[GDLM_STRNAME_BYTES] = "";
+ int error;
+ gl->gl_req = req_state;
+ cur = make_mode(gl->gl_name.ln_sbd, gl->gl_state);
req = make_mode(gl->gl_name.ln_sbd, req_state);
- lkf = make_flags(gl, flags, req);
+ blocking = !down_conversion(cur, req) &&
+ !(flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB));
+ lkf = make_flags(gl, flags, req, blocking);
+ if (blocking)
+ set_bit(GLF_BLOCKING, &gl->gl_flags);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
- if (gl->gl_lksb.sb_lkid) {
- gfs2_update_request_times(gl);
- } else {
+ if (test_bit(GLF_INITIAL, &gl->gl_flags)) {
memset(strname, ' ', GDLM_STRNAME_BYTES - 1);
strname[GDLM_STRNAME_BYTES - 1] = '\0';
gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type);
gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number);
gl->gl_dstamp = ktime_get_real();
+ } else {
+ gfs2_update_request_times(gl);
}
/*
* Submit the actual lock request.
*/
- return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
- GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
+again:
+ down_read(&ls->ls_sem);
+ error = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
+ GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
+ }
+ up_read(&ls->ls_sem);
+ if (error == -EBUSY) {
+ msleep(20);
+ goto again;
+ }
+ return error;
}
static void gdlm_put_lock(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ uint32_t flags = 0;
int error;
- if (gl->gl_lksb.sb_lkid == 0) {
+ BUG_ON(!__lockref_is_dead(&gl->gl_lockref));
+
+ if (test_bit(GLF_INITIAL, &gl->gl_flags)) {
gfs2_glock_free(gl);
return;
}
- clear_bit(GLF_BLOCKING, &gl->gl_flags);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_update_request_times(gl);
- /* don't want to call dlm if we've unmounted the lock protocol */
- if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
- gfs2_glock_free(gl);
+ /*
+ * When the lockspace is released, all remaining glocks will be
+ * unlocked automatically. This is more efficient than unlocking them
+ * individually, but when the lock is held in DLM_LOCK_EX or
+ * DLM_LOCK_PW mode, the lock value block (LVB) would be lost.
+ */
+
+ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
+ (!gl->gl_lksb.sb_lvbptr || gl->gl_state != LM_ST_EXCLUSIVE)) {
+ gfs2_glock_free_later(gl);
return;
}
- /* don't want to skip dlm_unlock writing the lvb when lock has one */
- if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
- !gl->gl_lksb.sb_lvbptr) {
+ if (gl->gl_lksb.sb_lvbptr)
+ flags |= DLM_LKF_VALBLK;
+
+again:
+ down_read(&ls->ls_sem);
+ error = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, flags,
+ NULL, gl);
+ }
+ up_read(&ls->ls_sem);
+ if (error == -EBUSY) {
+ msleep(20);
+ goto again;
+ }
+
+ if (error == -ENODEV) {
gfs2_glock_free(gl);
return;
}
- error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
- NULL, gl);
if (error) {
fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number, error);
- return;
}
}
static void gdlm_cancel(struct gfs2_glock *gl)
{
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
- dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
+
+ down_read(&ls->ls_sem);
+ if (likely(ls->ls_dlm != NULL)) {
+ dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
+ }
+ up_read(&ls->ls_sem);
}
/*
* dlm/gfs2 recovery coordination using dlm_recover callbacks
*
- * 0. gfs2 checks for another cluster node withdraw, needing journal replay
* 1. dlm_controld sees lockspace members change
* 2. dlm_controld blocks dlm-kernel locking activity
* 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
@@ -506,7 +568,11 @@ static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name)
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error;
- error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
+ down_read(&ls->ls_sem);
+ error = -ENODEV;
+ if (likely(ls->ls_dlm != NULL))
+ error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
+ up_read(&ls->ls_sem);
if (error) {
fs_err(sdp, "%s lkid %x error %d\n",
name, lksb->sb_lkid, error);
@@ -533,9 +599,14 @@ static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags,
memset(strname, 0, GDLM_STRNAME_BYTES);
snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num);
- error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
- strname, GDLM_STRNAME_BYTES - 1,
- 0, sync_wait_cb, ls, NULL);
+ down_read(&ls->ls_sem);
+ error = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
+ strname, GDLM_STRNAME_BYTES - 1,
+ 0, sync_wait_cb, ls, NULL);
+ }
+ up_read(&ls->ls_sem);
if (error) {
fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n",
name, lksb->sb_lkid, flags, mode, error);
@@ -580,28 +651,6 @@ static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
&ls->ls_control_lksb, "control_lock");
}
-/**
- * remote_withdraw - react to a node withdrawing from the file system
- * @sdp: The superblock
- */
-static void remote_withdraw(struct gfs2_sbd *sdp)
-{
- struct gfs2_jdesc *jd;
- int ret = 0, count = 0;
-
- list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
- if (jd->jd_jid == sdp->sd_lockstruct.ls_jid)
- continue;
- ret = gfs2_recover_journal(jd, true);
- if (ret)
- break;
- count++;
- }
-
- /* Now drop the additional reference we acquired */
- fs_err(sdp, "Journals checked: %d, ret = %d.\n", count, ret);
-}
-
static void gfs2_control_func(struct work_struct *work)
{
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
@@ -612,13 +661,6 @@ static void gfs2_control_func(struct work_struct *work)
int recover_size;
int i, error;
- /* First check for other nodes that may have done a withdraw. */
- if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) {
- remote_withdraw(sdp);
- clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
- return;
- }
-
spin_lock(&ls->ls_recover_spin);
/*
* No MOUNT_DONE means we're still mounting; control_mount()
@@ -942,14 +984,15 @@ locks_done:
if (sdp->sd_args.ar_spectator) {
fs_info(sdp, "Recovery is required. Waiting for a "
"non-spectator to mount.\n");
+ spin_unlock(&ls->ls_recover_spin);
msleep_interruptible(1000);
} else {
fs_info(sdp, "control_mount wait1 block %u start %u "
"mount %u lvb %u flags %lx\n", block_gen,
start_gen, mount_gen, lvb_gen,
ls->ls_recover_flags);
+ spin_unlock(&ls->ls_recover_spin);
}
- spin_unlock(&ls->ls_recover_spin);
goto restart;
}
@@ -1045,7 +1088,7 @@ restart:
/*
* Expand static jid arrays if necessary (by increments of RECOVER_SIZE_INC)
- * to accomodate the largest slot number. (NB dlm slot numbers start at 1,
+ * to accommodate the largest slot number. (NB dlm slot numbers start at 1,
* gfs2 jids start at 0, so jid = slot - 1)
*/
@@ -1261,6 +1304,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
*/
INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func);
+ ls->ls_dlm = NULL;
spin_lock_init(&ls->ls_recover_spin);
ls->ls_recover_flags = 0;
ls->ls_recover_mount = 0;
@@ -1289,12 +1333,13 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
memcpy(cluster, table, strlen(table) - strlen(fsname));
fsname++;
- flags = DLM_LSFL_FS | DLM_LSFL_NEWEXCL;
+ flags = DLM_LSFL_NEWEXCL;
/*
* create/join lockspace
*/
+ init_rwsem(&ls->ls_sem);
error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE,
&gdlm_lockspace_ops, sdp, &ops_result,
&ls->ls_dlm);
@@ -1338,7 +1383,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
return 0;
fail_release:
- dlm_release_lockspace(ls->ls_dlm, 2);
+ dlm_release_lockspace(ls->ls_dlm, DLM_RELEASE_NORMAL);
fail_free:
free_recover_size(ls);
fail:
@@ -1358,7 +1403,15 @@ static void gdlm_first_done(struct gfs2_sbd *sdp)
fs_err(sdp, "mount first_done error %d\n", error);
}
-static void gdlm_unmount(struct gfs2_sbd *sdp)
+/*
+ * gdlm_unmount - release our lockspace
+ * @sdp: the superblock
+ * @clean: Indicates whether or not the remaining nodes in the cluster should
+ * perform recovery. Recovery is necessary when a node withdraws and
+ * its journal remains dirty. Recovery isn't necessary when a node
+ * cleanly unmounts a filesystem.
+ */
+static void gdlm_unmount(struct gfs2_sbd *sdp, bool clean)
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
@@ -1374,10 +1427,14 @@ static void gdlm_unmount(struct gfs2_sbd *sdp)
/* mounted_lock and control_lock will be purged in dlm recovery */
release:
+ down_write(&ls->ls_sem);
if (ls->ls_dlm) {
- dlm_release_lockspace(ls->ls_dlm, 2);
+ dlm_release_lockspace(ls->ls_dlm,
+ clean ? DLM_RELEASE_NORMAL :
+ DLM_RELEASE_RECOVER);
ls->ls_dlm = NULL;
}
+ up_write(&ls->ls_sem);
free_recover_size(ls);
}
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index f0ee3ff6f9a8..8312cd2cdae4 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -31,6 +31,7 @@
#include "dir.h"
#include "trace_gfs2.h"
#include "trans.h"
+#include "aops.h"
static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
@@ -111,10 +112,8 @@ __acquires(&sdp->sd_ail_lock)
&tr->tr_ail2_list);
continue;
}
- if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
+ if (!cmpxchg(&sdp->sd_log_error, 0, -EIO))
gfs2_io_error_bh(sdp, bh);
- gfs2_withdraw_delayed(sdp);
- }
}
if (gfs2_withdrawn(sdp)) {
@@ -127,11 +126,15 @@ __acquires(&sdp->sd_ail_lock)
continue;
gl = bd->bd_gl;
list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
- mapping = bh->b_page->mapping;
+ mapping = bh->b_folio->mapping;
if (!mapping)
continue;
spin_unlock(&sdp->sd_ail_lock);
- ret = generic_writepages(mapping, wbc);
+ BUG_ON(GFS2_SB(mapping->host) != sdp);
+ if (gfs2_is_jdata(GFS2_I(mapping->host)))
+ ret = gfs2_jdata_writeback(mapping, wbc);
+ else
+ ret = mapping->a_ops->writepages(mapping, wbc);
if (need_resched()) {
blk_finish_plug(plug);
cond_resched();
@@ -140,6 +143,7 @@ __acquires(&sdp->sd_ail_lock)
spin_lock(&sdp->sd_ail_lock);
if (ret == -ENODATA) /* if a jdata write into a new hole */
ret = 0; /* ignore it */
+ mapping_set_error(mapping, ret);
if (ret || wbc->nr_to_write <= 0)
break;
return -EBUSY;
@@ -222,8 +226,7 @@ out:
spin_unlock(&sdp->sd_ail_lock);
blk_finish_plug(&plug);
if (ret) {
- gfs2_lm(sdp, "gfs2_ail1_start_one (generic_writepages) "
- "returned: %d\n", ret);
+ gfs2_lm(sdp, "gfs2_ail1_start_one returned: %d\n", ret);
gfs2_withdraw(sdp);
}
trace_gfs2_ail_flush(sdp, wbc, 0);
@@ -319,10 +322,8 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
continue;
}
if (!buffer_uptodate(bh) &&
- !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
+ !cmpxchg(&sdp->sd_log_error, 0, -EIO))
gfs2_io_error_bh(sdp, bh);
- gfs2_withdraw_delayed(sdp);
- }
/*
* If we have space for revokes and the bd is no longer on any
* buf list, we can just add a revoke for it immediately and
@@ -344,14 +345,15 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
* @sdp: The superblock
* @max_revokes: If non-zero, add revokes where appropriate
*
- * Tries to empty the ail1 lists, starting with the oldest first
+ * Tries to empty the ail1 lists, starting with the oldest first.
+ * Returns %true if the ail1 list is now empty.
*/
-static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
+static bool gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
{
struct gfs2_trans *tr, *s;
int oldest_tr = 1;
- int ret;
+ bool empty;
spin_lock(&sdp->sd_ail_lock);
list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
@@ -361,15 +363,10 @@ static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
oldest_tr = 0;
}
gfs2_log_update_flush_tail(sdp);
- ret = list_empty(&sdp->sd_ail1_list);
+ empty = list_empty(&sdp->sd_ail1_list);
spin_unlock(&sdp->sd_ail_lock);
- if (test_bit(SDF_WITHDRAWING, &sdp->sd_flags)) {
- gfs2_lm(sdp, "fatal: I/O error(s)\n");
- gfs2_withdraw(sdp);
- }
-
- return ret;
+ return empty;
}
static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
@@ -782,7 +779,7 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
{
if (atomic_dec_return(&gl->gl_revokes) == 0) {
clear_bit(GLF_LFLUSH, &gl->gl_flags);
- gfs2_glock_queue_put(gl);
+ gfs2_glock_put_async(gl);
}
}
@@ -823,7 +820,7 @@ void gfs2_flush_revokes(struct gfs2_sbd *sdp)
void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
u64 seq, u32 tail, u32 lblock, u32 flags,
- int op_flags)
+ blk_opf_t op_flags)
{
struct gfs2_log_header *lh;
u32 hash, crc;
@@ -905,10 +902,10 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
{
- int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
- enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
+ blk_opf_t op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
+ struct super_block *sb = sdp->sd_vfs;
- gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
+ gfs2_assert_withdraw(sdp, sb->s_writers.frozen != SB_FREEZE_COMPLETE);
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
gfs2_ordered_wait(sdp);
@@ -967,8 +964,9 @@ void gfs2_ail_drain(struct gfs2_sbd *sdp)
static void empty_ail1_list(struct gfs2_sbd *sdp)
{
unsigned long start = jiffies;
+ bool empty = false;
- for (;;) {
+ while (!empty) {
if (time_after(jiffies, start + (HZ * 600))) {
fs_err(sdp, "Error: In %s for 10 minutes! t=%d\n",
__func__, current->journal_info ? 1 : 0);
@@ -977,8 +975,10 @@ static void empty_ail1_list(struct gfs2_sbd *sdp)
}
gfs2_ail1_start(sdp);
gfs2_ail1_wait(sdp);
- if (gfs2_ail1_empty(sdp, 0))
- return;
+ empty = gfs2_ail1_empty(sdp, 0);
+
+ if (gfs2_withdrawn(sdp))
+ break;
}
}
@@ -1028,7 +1028,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
{
struct gfs2_trans *tr = NULL;
unsigned int reserved_blocks = 0, used_blocks = 0;
- enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
+ bool frozen = test_bit(SDF_FROZEN, &sdp->sd_flags);
unsigned int first_log_head;
unsigned int reserved_revokes = 0;
@@ -1040,7 +1040,8 @@ repeat:
* Do this check while holding the log_flush_lock to prevent new
* buffers from being added to the ail via gfs2_pin()
*/
- if (gfs2_withdrawn(sdp) || !test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
+ if (gfs2_withdrawn(sdp) ||
+ !test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
goto out;
/* Log might have been flushed while we waited for the flush lock */
@@ -1059,8 +1060,8 @@ repeat:
if (tr) {
sdp->sd_log_tr = NULL;
tr->tr_first = first_log_head;
- if (unlikely (state == SFS_FROZEN)) {
- if (gfs2_assert_withdraw_delayed(sdp,
+ if (unlikely(frozen)) {
+ if (gfs2_assert_withdraw(sdp,
!tr->tr_num_buf_new && !tr->tr_num_databuf_new))
goto out_withdraw;
}
@@ -1084,8 +1085,8 @@ repeat:
if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- if (unlikely(state == SFS_FROZEN))
- if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
+ if (unlikely(frozen))
+ if (gfs2_assert_withdraw(sdp, !reserved_revokes))
goto out_withdraw;
gfs2_ordered_write(sdp);
@@ -1094,7 +1095,8 @@ repeat:
lops_before_commit(sdp, tr);
if (gfs2_withdrawn(sdp))
goto out_withdraw;
- gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
+ if (sdp->sd_jdesc)
+ gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
if (gfs2_withdrawn(sdp))
goto out_withdraw;
@@ -1128,8 +1130,6 @@ repeat:
if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
GFS2_LOG_HEAD_FLUSH_FREEZE))
gfs2_log_shutdown(sdp);
- if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
- atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
}
out_end:
@@ -1141,13 +1141,11 @@ out_end:
reserved_blocks += (reserved_revokes - sdp->sd_ldptrs) / sdp->sd_inptrs;
out:
if (used_blocks != reserved_blocks) {
- gfs2_assert_withdraw_delayed(sdp, used_blocks < reserved_blocks);
+ gfs2_assert_withdraw(sdp, used_blocks < reserved_blocks);
gfs2_log_release(sdp, reserved_blocks - used_blocks);
}
up_write(&sdp->sd_log_flush_lock);
gfs2_trans_free(sdp, tr);
- if (gfs2_withdrawing(sdp))
- gfs2_withdraw(sdp);
trace_gfs2_log_flush(sdp, 0, flags);
return;
@@ -1222,6 +1220,21 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
gfs2_log_unlock(sdp);
}
+static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
+{
+ return atomic_read(&sdp->sd_log_pinned) +
+ atomic_read(&sdp->sd_log_blks_needed) >=
+ atomic_read(&sdp->sd_log_thresh1);
+}
+
+static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
+{
+ return sdp->sd_jdesc->jd_blocks -
+ atomic_read(&sdp->sd_log_blks_free) +
+ atomic_read(&sdp->sd_log_blks_needed) >=
+ atomic_read(&sdp->sd_log_thresh2);
+}
+
/**
* gfs2_log_commit - Commit a transaction to the log
* @sdp: the filesystem
@@ -1241,9 +1254,7 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
{
log_refund(sdp, tr);
- if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
- ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
- atomic_read(&sdp->sd_log_thresh2)))
+ if (gfs2_ail_flush_reqd(sdp) || gfs2_jrnl_flush_reqd(sdp))
wake_up(&sdp->sd_logd_waitq);
}
@@ -1266,24 +1277,6 @@ static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
}
-static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
-{
- return (atomic_read(&sdp->sd_log_pinned) +
- atomic_read(&sdp->sd_log_blks_needed) >=
- atomic_read(&sdp->sd_log_thresh1));
-}
-
-static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
-{
- unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
-
- if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
- return 1;
-
- return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
- atomic_read(&sdp->sd_log_thresh2);
-}
-
/**
* gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
* @data: Pointer to GFS2 superblock
@@ -1296,24 +1289,11 @@ int gfs2_logd(void *data)
{
struct gfs2_sbd *sdp = data;
unsigned long t = 1;
- DEFINE_WAIT(wait);
+ set_freezable();
while (!kthread_should_stop()) {
-
- if (gfs2_withdrawn(sdp)) {
- msleep_interruptible(HZ);
- continue;
- }
- /* Check for errors writing to the journal */
- if (sdp->sd_log_error) {
- gfs2_lm(sdp,
- "GFS2: fsid=%s: error %d: "
- "withdrawing the file system to "
- "prevent further damage.\n",
- sdp->sd_fsname, sdp->sd_log_error);
- gfs2_withdraw(sdp);
- continue;
- }
+ if (gfs2_withdrawn(sdp))
+ break;
if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
gfs2_ail1_empty(sdp, 0);
@@ -1321,7 +1301,9 @@ int gfs2_logd(void *data)
GFS2_LFC_LOGD_JFLUSH_REQD);
}
- if (gfs2_ail_flush_reqd(sdp)) {
+ if (test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
+ gfs2_ail_flush_reqd(sdp)) {
+ clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
gfs2_ail1_start(sdp);
gfs2_ail1_wait(sdp);
gfs2_ail1_empty(sdp, 0);
@@ -1331,19 +1313,13 @@ int gfs2_logd(void *data)
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
- try_to_freeze();
-
- do {
- prepare_to_wait(&sdp->sd_logd_waitq, &wait,
- TASK_INTERRUPTIBLE);
- if (!gfs2_ail_flush_reqd(sdp) &&
- !gfs2_jrnl_flush_reqd(sdp) &&
- !kthread_should_stop())
- t = schedule_timeout(t);
- } while(t && !gfs2_ail_flush_reqd(sdp) &&
- !gfs2_jrnl_flush_reqd(sdp) &&
- !kthread_should_stop());
- finish_wait(&sdp->sd_logd_waitq, &wait);
+ t = wait_event_freezable_timeout(sdp->sd_logd_waitq,
+ test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
+ gfs2_ail_flush_reqd(sdp) ||
+ gfs2_jrnl_flush_reqd(sdp) ||
+ gfs2_withdrawn(sdp) ||
+ kthread_should_stop(),
+ t);
}
return 0;
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index fc905c2af53c..fc30ebdad83a 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -44,17 +44,6 @@ __releases(&sdp->sd_log_lock)
spin_unlock(&sdp->sd_log_lock);
}
-static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
- unsigned int value)
-{
- if (++value == sdp->sd_jdesc->jd_blocks) {
- value = 0;
- }
- sdp->sd_log_tail = value;
- sdp->sd_log_flush_tail = value;
- sdp->sd_log_head = value;
-}
-
static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
@@ -70,29 +59,29 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
}
}
-extern void gfs2_ordered_del_inode(struct gfs2_inode *ip);
-extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
-extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
-extern bool gfs2_log_is_empty(struct gfs2_sbd *sdp);
-extern void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes);
-extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
-extern bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
- unsigned int *extra_revokes);
-extern void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
- unsigned int *extra_revokes);
-extern void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
- u64 seq, u32 tail, u32 lblock, u32 flags,
- int op_flags);
-extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
- u32 type);
-extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
-extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
-extern void log_flush_wait(struct gfs2_sbd *sdp);
+void gfs2_ordered_del_inode(struct gfs2_inode *ip);
+unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
+void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
+bool gfs2_log_is_empty(struct gfs2_sbd *sdp);
+void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes);
+void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
+bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+ unsigned int *extra_revokes);
+void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+ unsigned int *extra_revokes);
+void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ u64 seq, u32 tail, u32 lblock, u32 flags,
+ blk_opf_t op_flags);
+void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
+ u32 type);
+void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
+void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
+void log_flush_wait(struct gfs2_sbd *sdp);
-extern int gfs2_logd(void *data);
-extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
-extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
-extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
-extern void gfs2_ail_drain(struct gfs2_sbd *sdp);
+int gfs2_logd(void *data);
+void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
+void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
+void gfs2_flush_revokes(struct gfs2_sbd *sdp);
+void gfs2_ail_drain(struct gfs2_sbd *sdp);
#endif /* __LOG_DOT_H__ */
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index ca0bb3a73912..97ebe457c00a 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -49,7 +49,7 @@ void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
if (test_set_buffer_pinned(bh))
gfs2_assert_withdraw(sdp, 0);
if (!buffer_uptodate(bh))
- gfs2_io_error_bh_wd(sdp, bh);
+ gfs2_io_error_bh(sdp, bh);
bd = bh->b_private;
/* If this buffer is in the AIL and it has already been written
* to in-place disk block, remove it from the AIL.
@@ -157,7 +157,9 @@ u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
/**
* gfs2_end_log_write_bh - end log write of pagecache data with buffers
* @sdp: The superblock
- * @bvec: The bio_vec
+ * @folio: The folio
+ * @offset: The first byte within the folio that completed
+ * @size: The number of bytes that completed
* @error: The i/o status
*
* This finds the relevant buffers and unlocks them and sets the
@@ -166,17 +168,13 @@ u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
* that is pinned in the pagecache.
*/
-static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
- struct bio_vec *bvec,
- blk_status_t error)
+static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct folio *folio,
+ size_t offset, size_t size, blk_status_t error)
{
struct buffer_head *bh, *next;
- struct page *page = bvec->bv_page;
- unsigned size;
- bh = page_buffers(page);
- size = bvec->bv_len;
- while (bh_offset(bh) < bvec->bv_offset)
+ bh = folio_buffers(folio);
+ while (bh_offset(bh) < offset)
bh = bh->b_this_page;
do {
if (error)
@@ -186,7 +184,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
size -= bh->b_size;
brelse(bh);
bh = next;
- } while(bh && size);
+ } while (bh && size);
}
/**
@@ -203,23 +201,24 @@ static void gfs2_end_log_write(struct bio *bio)
{
struct gfs2_sbd *sdp = bio->bi_private;
struct bio_vec *bvec;
- struct page *page;
struct bvec_iter_all iter_all;
if (bio->bi_status) {
- if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
+ int err = blk_status_to_errno(bio->bi_status);
+
+ if (!cmpxchg(&sdp->sd_log_error, 0, err))
fs_err(sdp, "Error %d writing to journal, jid=%u\n",
- bio->bi_status, sdp->sd_jdesc->jd_jid);
- gfs2_withdraw_delayed(sdp);
- /* prevent more writes to the journal */
- clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- wake_up(&sdp->sd_logd_waitq);
+ err, sdp->sd_jdesc->jd_jid);
+ gfs2_withdraw(sdp);
}
bio_for_each_segment_all(bvec, bio, iter_all) {
- page = bvec->bv_page;
- if (page_has_buffers(page))
- gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
+ struct page *page = bvec->bv_page;
+ struct folio *folio = page_folio(page);
+
+ if (folio && folio_buffers(folio))
+ gfs2_end_log_write_bh(sdp, folio, bvec->bv_offset,
+ bvec->bv_len, bio->bi_status);
else
mempool_free(page, gfs2_page_pool);
}
@@ -238,7 +237,7 @@ static void gfs2_end_log_write(struct bio *bio)
* there is no pending bio, then this is a no-op.
*/
-void gfs2_log_submit_bio(struct bio **biop, int opf)
+void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf)
{
struct bio *bio = *biop;
if (bio) {
@@ -265,10 +264,9 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
bio_end_io_t *end_io)
{
struct super_block *sb = sdp->sd_vfs;
- struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
+ struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO);
bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
- bio_set_dev(bio, sb->s_bdev);
bio->bi_end_io = end_io;
bio->bi_private = sdp;
@@ -293,7 +291,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
*/
static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
- struct bio **biop, int op,
+ struct bio **biop, enum req_op op,
bio_end_io_t *end_io, bool flush)
{
struct bio *bio = *biop;
@@ -360,8 +358,8 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
gfs2_log_incr_head(sdp);
- gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size,
- bh_offset(bh), dblock);
+ gfs2_log_write(sdp, sdp->sd_jdesc, folio_page(bh->b_folio, 0),
+ bh->b_size, bh_offset(bh), dblock);
}
/**
@@ -392,46 +390,40 @@ static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
* Simply unlock the pages in the bio. The main thread will wait on them and
* process them in order as necessary.
*/
-
static void gfs2_end_log_read(struct bio *bio)
{
- struct page *page;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
-
- bio_for_each_segment_all(bvec, bio, iter_all) {
- page = bvec->bv_page;
- if (bio->bi_status) {
- int err = blk_status_to_errno(bio->bi_status);
+ int error = blk_status_to_errno(bio->bi_status);
+ struct folio_iter fi;
- SetPageError(page);
- mapping_set_error(page->mapping, err);
- }
- unlock_page(page);
+ bio_for_each_folio_all(fi, bio) {
+ /* We're abusing wb_err to get the error to gfs2_find_jhead */
+ filemap_set_wb_err(fi.folio->mapping, error);
+ folio_end_read(fi.folio, !error);
}
bio_put(bio);
}
/**
- * gfs2_jhead_pg_srch - Look for the journal head in a given page.
+ * gfs2_jhead_folio_search - Look for the journal head in a given page.
* @jd: The journal descriptor
* @head: The journal head to start from
- * @page: The page to look in
+ * @folio: The folio to look in
*
* Returns: 1 if found, 0 otherwise.
*/
-
-static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
- struct gfs2_log_header_host *head,
- struct page *page)
+static bool gfs2_jhead_folio_search(struct gfs2_jdesc *jd,
+ struct gfs2_log_header_host *head,
+ struct folio *folio)
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct gfs2_log_header_host lh;
- void *kaddr = kmap_atomic(page);
+ void *kaddr;
unsigned int offset;
bool ret = false;
+ VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
+ kaddr = kmap_local_folio(folio, 0);
for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
if (lh.lh_sequence >= head->lh_sequence)
@@ -442,7 +434,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
}
}
}
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
return ret;
}
@@ -453,48 +445,46 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
* @head: The journal head to start from
* @done: If set, perform only cleanup, else search and set if found.
*
- * Find the page with 'index' in the journal's mapping. Search the page for
+ * Find the folio with 'index' in the journal's mapping. Search the folio for
* the journal head if requested (cleanup == false). Release refs on the
- * page so the page cache can reclaim it (put_page() twice). We grabbed a
- * reference on this page two times, first when we did a find_or_create_page()
- * to obtain the page to add it to the bio and second when we do a
- * find_get_page() here to get the page to wait on while I/O on it is being
+ * folio so the page cache can reclaim it. We grabbed a
+ * reference on this folio twice, first when we did a filemap_grab_folio()
+ * to obtain the folio to add it to the bio and second when we do a
+ * filemap_get_folio() here to get the folio to wait on while I/O on it is being
* completed.
- * This function is also used to free up a page we might've grabbed but not
+ * This function is also used to free up a folio we might've grabbed but not
* used. Maybe we added it to a bio, but not submitted it for I/O. Or we
* submitted the I/O, but we already found the jhead so we only need to drop
- * our references to the page.
+ * our references to the folio.
*/
static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
struct gfs2_log_header_host *head,
bool *done)
{
- struct page *page;
+ struct folio *folio;
- page = find_get_page(jd->jd_inode->i_mapping, index);
- wait_on_page_locked(page);
+ folio = filemap_get_folio(jd->jd_inode->i_mapping, index);
- if (PageError(page))
+ folio_wait_locked(folio);
+ if (!folio_test_uptodate(folio))
*done = true;
if (!*done)
- *done = gfs2_jhead_pg_srch(jd, head, page);
+ *done = gfs2_jhead_folio_search(jd, head, folio);
- put_page(page); /* Once for find_get_page */
- put_page(page); /* Once more for find_or_create_page */
+ /* filemap_get_folio() and the earlier filemap_grab_folio() */
+ folio_put_refs(folio, 2);
}
static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
{
struct bio *new;
- new = bio_alloc(GFP_NOIO, nr_iovecs);
- bio_copy_dev(new, prev);
+ new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
+ bio_clone_blkg_association(new, prev);
new->bi_iter.bi_sector = bio_end_sector(prev);
- new->bi_opf = prev->bi_opf;
- new->bi_write_hint = prev->bi_write_hint;
- bio_chain(new, prev);
+ bio_chain(prev, new);
submit_bio(prev);
return new;
}
@@ -503,15 +493,13 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
* gfs2_find_jhead - find the head of a log
* @jd: The journal descriptor
* @head: The log descriptor for the head of the log is returned here
- * @keep_cache: If set inode pages will not be truncated
*
* Do a search of a journal by reading it in large chunks using bios and find
* the valid log entry with the highest sequence number. (i.e. the log head)
*
* Returns: 0 on success, errno otherwise
*/
-int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
- bool keep_cache)
+int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct address_space *mapping = jd->jd_inode->i_mapping;
@@ -521,9 +509,9 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
unsigned int shift = PAGE_SHIFT - bsize_shift;
unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
struct gfs2_journal_extent *je;
- int sz, ret = 0;
+ int ret = 0;
struct bio *bio = NULL;
- struct page *page = NULL;
+ struct folio *folio = NULL;
bool done = false;
errseq_t since;
@@ -536,11 +524,11 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
u64 dblock = je->dblock;
for (; block < je->lblock + je->blocks; block++, dblock++) {
- if (!page) {
- page = find_or_create_page(mapping,
- block >> shift, GFP_NOFS);
- if (!page) {
- ret = -ENOMEM;
+ if (!folio) {
+ folio = filemap_grab_folio(mapping,
+ block >> shift);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
done = true;
goto out;
}
@@ -551,8 +539,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
sector_t sector = dblock << sdp->sd_fsb2bb_shift;
if (bio_end_sector(bio) == sector) {
- sz = bio_add_page(bio, page, bsize, off);
- if (sz == bsize)
+ if (bio_add_folio(bio, folio, bsize, off))
goto block_added;
}
if (off) {
@@ -572,12 +559,11 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
bio->bi_opf = REQ_OP_READ;
add_block_to_new_bio:
- sz = bio_add_page(bio, page, bsize, off);
- BUG_ON(sz != bsize);
+ bio_add_folio_nofail(bio, folio, bsize, off);
block_added:
off += bsize;
- if (off == PAGE_SIZE)
- page = NULL;
+ if (off == folio_size(folio))
+ folio = NULL;
if (blocks_submitted <= blocks_read + max_blocks) {
/* Keep at least one bio in flight */
continue;
@@ -601,8 +587,7 @@ out:
if (!ret)
ret = filemap_check_wb_err(mapping, since);
- if (!keep_cache)
- truncate_inode_pages(mapping, 0);
+ truncate_inode_pages(mapping, 0);
return ret;
}
@@ -625,15 +610,13 @@ static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
static void gfs2_check_magic(struct buffer_head *bh)
{
- void *kaddr;
__be32 *ptr;
clear_buffer_escaped(bh);
- kaddr = kmap_atomic(bh->b_page);
- ptr = kaddr + bh_offset(bh);
+ ptr = kmap_local_folio(bh->b_folio, bh_offset(bh));
if (*ptr == cpu_to_be32(GFS2_MAGIC))
set_buffer_escaped(bh);
- kunmap_atomic(kaddr);
+ kunmap_local(ptr);
}
static int blocknr_cmp(void *priv, const struct list_head *a,
@@ -699,14 +682,12 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
lock_buffer(bd2->bd_bh);
if (buffer_escaped(bd2->bd_bh)) {
- void *kaddr;
+ void *p;
+
page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
- ptr = page_address(page);
- kaddr = kmap_atomic(bd2->bd_bh->b_page);
- memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
- bd2->bd_bh->b_size);
- kunmap_atomic(kaddr);
- *(__be32 *)ptr = 0;
+ p = page_address(page);
+ memcpy_from_page(p, page, bh_offset(bd2->bd_bh), bd2->bd_bh->b_size);
+ *(__be32 *)p = 0;
clear_buffer_escaped(bd2->bd_bh);
unlock_buffer(bd2->bd_bh);
brelse(bd2->bd_bh);
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index f707601597dc..be740bf33666 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -11,16 +11,18 @@
#include "incore.h"
extern const struct gfs2_log_operations *gfs2_log_ops[];
-extern void gfs2_log_incr_head(struct gfs2_sbd *sdp);
-extern u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn);
-extern void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
- struct page *page, unsigned size, unsigned offset,
- u64 blkno);
-extern void gfs2_log_submit_bio(struct bio **biop, int opf);
-extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
-extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
- struct gfs2_log_header_host *head, bool keep_cache);
-extern void gfs2_drain_revokes(struct gfs2_sbd *sdp);
+
+void gfs2_log_incr_head(struct gfs2_sbd *sdp);
+u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn);
+void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ struct page *page, unsigned size, unsigned offset,
+ u64 blkno);
+void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf);
+void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
+int gfs2_find_jhead(struct gfs2_jdesc *jd,
+ struct gfs2_log_header_host *head);
+void gfs2_drain_revokes(struct gfs2_sbd *sdp);
+
static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
{
return sdp->sd_ldptrs;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 28d0eb23e18e..9d65719353fa 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -38,7 +38,6 @@ static void gfs2_init_inode_once(void *foo)
inode_init_once(&ip->i_inode);
atomic_set(&ip->i_sizehint, 0);
init_rwsem(&ip->i_rw_mutex);
- INIT_LIST_HEAD(&ip->i_trunc_list);
INIT_LIST_HEAD(&ip->i_ordered);
ip->i_qadata = NULL;
gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
@@ -52,7 +51,6 @@ static void gfs2_init_glock_once(void *foo)
{
struct gfs2_glock *gl = foo;
- spin_lock_init(&gl->gl_lockref.lock);
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_lru);
INIT_LIST_HEAD(&gl->gl_ail_list);
@@ -62,11 +60,10 @@ static void gfs2_init_glock_once(void *foo)
static void gfs2_init_gl_aspace_once(void *foo)
{
- struct gfs2_glock *gl = foo;
- struct address_space *mapping = (struct address_space *)(gl + 1);
+ struct gfs2_glock_aspace *gla = foo;
- gfs2_init_glock_once(gl);
- address_space_init_once(mapping);
+ gfs2_init_glock_once(&gla->glock);
+ address_space_init_once(&gla->mapping);
}
/**
@@ -104,8 +101,7 @@ static int __init init_gfs2_fs(void)
goto fail_cachep1;
gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
- sizeof(struct gfs2_glock) +
- sizeof(struct address_space),
+ sizeof(struct gfs2_glock_aspace),
0, 0, gfs2_init_gl_aspace_once);
if (!gfs2_glock_aspace_cachep)
@@ -114,7 +110,6 @@ static int __init init_gfs2_fs(void)
gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
sizeof(struct gfs2_inode),
0, SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|
SLAB_ACCOUNT,
gfs2_init_inode_once);
if (!gfs2_inode_cachep)
@@ -150,22 +145,15 @@ static int __init init_gfs2_fs(void)
if (!gfs2_trans_cachep)
goto fail_cachep8;
- error = register_shrinker(&gfs2_qd_shrinker);
+ error = gfs2_qd_shrinker_init();
if (error)
goto fail_shrinker;
- error = register_filesystem(&gfs2_fs_type);
- if (error)
- goto fail_fs1;
-
- error = register_filesystem(&gfs2meta_fs_type);
- if (error)
- goto fail_fs2;
-
error = -ENOMEM;
- gfs_recovery_wq = alloc_workqueue("gfs_recovery",
- WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
- if (!gfs_recovery_wq)
+ gfs2_recovery_wq = alloc_workqueue("gfs2_recovery",
+ WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_PERCPU,
+ 0);
+ if (!gfs2_recovery_wq)
goto fail_wq1;
gfs2_control_wq = alloc_workqueue("gfs2_control",
@@ -173,7 +161,7 @@ static int __init init_gfs2_fs(void)
if (!gfs2_control_wq)
goto fail_wq2;
- gfs2_freeze_wq = alloc_workqueue("freeze_workqueue", 0, 0);
+ gfs2_freeze_wq = alloc_workqueue("gfs2_freeze", WQ_PERCPU, 0);
if (!gfs2_freeze_wq)
goto fail_wq3;
@@ -183,23 +171,31 @@ static int __init init_gfs2_fs(void)
goto fail_mempool;
gfs2_register_debugfs();
+ error = register_filesystem(&gfs2_fs_type);
+ if (error)
+ goto fail_fs1;
+
+ error = register_filesystem(&gfs2meta_fs_type);
+ if (error)
+ goto fail_fs2;
+
pr_info("GFS2 installed\n");
return 0;
+fail_fs2:
+ unregister_filesystem(&gfs2_fs_type);
+fail_fs1:
+ mempool_destroy(gfs2_page_pool);
fail_mempool:
destroy_workqueue(gfs2_freeze_wq);
fail_wq3:
destroy_workqueue(gfs2_control_wq);
fail_wq2:
- destroy_workqueue(gfs_recovery_wq);
+ destroy_workqueue(gfs2_recovery_wq);
fail_wq1:
- unregister_filesystem(&gfs2meta_fs_type);
-fail_fs2:
- unregister_filesystem(&gfs2_fs_type);
-fail_fs1:
- unregister_shrinker(&gfs2_qd_shrinker);
+ gfs2_qd_shrinker_exit();
fail_shrinker:
kmem_cache_destroy(gfs2_trans_cachep);
fail_cachep8:
@@ -232,12 +228,12 @@ fail_lru:
static void __exit exit_gfs2_fs(void)
{
- unregister_shrinker(&gfs2_qd_shrinker);
+ gfs2_qd_shrinker_exit();
gfs2_glock_exit();
gfs2_unregister_debugfs();
unregister_filesystem(&gfs2_fs_type);
unregister_filesystem(&gfs2meta_fs_type);
- destroy_workqueue(gfs_recovery_wq);
+ destroy_workqueue(gfs2_recovery_wq);
destroy_workqueue(gfs2_control_wq);
destroy_workqueue(gfs2_freeze_wq);
list_lru_destroy(&gfs2_qd_lru);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 72d30a682ece..e4356198d8d8 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -30,16 +30,16 @@
#include "util.h"
#include "trace_gfs2.h"
-static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
+static void gfs2_aspace_write_folio(struct folio *folio,
+ struct writeback_control *wbc)
{
struct buffer_head *bh, *head;
int nr_underway = 0;
- int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
+ blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
- BUG_ON(!PageLocked(page));
- BUG_ON(!page_has_buffers(page));
+ BUG_ON(!folio_test_locked(folio));
- head = page_buffers(page);
+ head = folio_buffers(folio);
bh = head;
do {
@@ -55,7 +55,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
if (wbc->sync_mode != WB_SYNC_NONE) {
lock_buffer(bh);
} else if (!trylock_buffer(bh)) {
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
continue;
}
if (test_clear_buffer_dirty(bh)) {
@@ -66,38 +66,52 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
} while ((bh = bh->b_this_page) != head);
/*
- * The page and its buffers are protected by PageWriteback(), so we can
- * drop the bh refcounts early.
+ * The folio and its buffers are protected from truncation by
+ * the writeback flag, so we can drop the bh refcounts early.
*/
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
+ BUG_ON(folio_test_writeback(folio));
+ folio_start_writeback(folio);
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
- submit_bh(REQ_OP_WRITE, write_flags, bh);
+ submit_bh(REQ_OP_WRITE | write_flags, bh);
nr_underway++;
}
bh = next;
} while (bh != head);
- unlock_page(page);
+ folio_unlock(folio);
if (nr_underway == 0)
- end_page_writeback(page);
+ folio_end_writeback(folio);
+}
- return 0;
+static int gfs2_aspace_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct folio *folio = NULL;
+ int error;
+
+ while ((folio = writeback_iter(mapping, wbc, folio, &error)))
+ gfs2_aspace_write_folio(folio, wbc);
+
+ return error;
}
const struct address_space_operations gfs2_meta_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .writepage = gfs2_aspace_writepage,
- .releasepage = gfs2_releasepage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .writepages = gfs2_aspace_writepages,
+ .release_folio = gfs2_release_folio,
+ .migrate_folio = buffer_migrate_folio_norefs,
};
const struct address_space_operations gfs2_rgrp_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .writepage = gfs2_aspace_writepage,
- .releasepage = gfs2_releasepage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .writepages = gfs2_aspace_writepages,
+ .release_folio = gfs2_release_folio,
+ .migrate_folio = buffer_migrate_folio_norefs,
};
/**
@@ -113,50 +127,45 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
{
struct address_space *mapping = gfs2_glock2aspace(gl);
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct page *page;
+ struct folio *folio;
struct buffer_head *bh;
unsigned int shift;
unsigned long index;
unsigned int bufnum;
if (mapping == NULL)
- mapping = &sdp->sd_aspace;
+ mapping = gfs2_aspace(sdp);
shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
index = blkno >> shift; /* convert block to page */
bufnum = blkno - (index << shift); /* block buf index within page */
if (create) {
- for (;;) {
- page = grab_cache_page(mapping, index);
- if (page)
- break;
- yield();
- }
- if (!page_has_buffers(page))
- create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mapping_gfp_mask(mapping) | __GFP_NOFAIL);
+ bh = folio_buffers(folio);
+ if (!bh)
+ bh = create_empty_buffers(folio,
+ sdp->sd_sb.sb_bsize, 0);
} else {
- page = find_get_page_flags(mapping, index,
- FGP_LOCK|FGP_ACCESSED);
- if (!page)
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED, 0);
+ if (IS_ERR(folio))
return NULL;
- if (!page_has_buffers(page)) {
- bh = NULL;
- goto out_unlock;
- }
+ bh = folio_buffers(folio);
}
- /* Locate header for our buffer within our page */
- for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
- /* Do nothing */;
- get_bh(bh);
+ if (!bh)
+ goto out_unlock;
+ bh = get_nth_bh(bh, bufnum);
if (!buffer_mapped(bh))
map_bh(bh, sdp->sd_vfs, blkno);
out_unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return bh;
}
@@ -191,15 +200,14 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
static void gfs2_meta_read_endio(struct bio *bio)
{
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
- struct buffer_head *bh = page_buffers(page);
- unsigned int len = bvec->bv_len;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
+ struct buffer_head *bh = folio_buffers(folio);
+ size_t len = fi.length;
- while (bh_offset(bh) < bvec->bv_offset)
+ while (bh_offset(bh) < fi.offset)
bh = bh->b_this_page;
do {
struct buffer_head *next = bh->b_this_page;
@@ -215,19 +223,17 @@ static void gfs2_meta_read_endio(struct bio *bio)
* Submit several consecutive buffer head I/O requests as a single bio I/O
* request. (See submit_bh_wbc.)
*/
-static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
- int num)
+static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num)
{
while (num > 0) {
struct buffer_head *bh = *bhs;
struct bio *bio;
- bio = bio_alloc(GFP_NOIO, num);
- bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio_set_dev(bio, bh->b_bdev);
+ bio = bio_alloc(bh->b_bdev, num, opf, GFP_NOIO);
+ bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> SECTOR_SHIFT);
while (num > 0) {
bh = *bhs;
- if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
+ if (!bio_add_folio(bio, bh->b_folio, bh->b_size, bh_offset(bh))) {
BUG_ON(bio->bi_iter.bi_size == 0);
break;
}
@@ -235,7 +241,6 @@ static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
num--;
}
bio->bi_end_io = gfs2_meta_read_endio;
- bio_set_op_attrs(bio, op, op_flags);
submit_bio(bio);
}
}
@@ -258,7 +263,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
struct buffer_head *bh, *bhs[2];
int num = 0;
- if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp)) {
+ if (gfs2_withdrawn(sdp)) {
*bhp = NULL;
return -EIO;
}
@@ -288,7 +293,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
}
}
- gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num);
+ gfs2_submit_bhs(REQ_OP_READ | REQ_META | REQ_PRIO, bhs, num);
if (!(flags & DIO_WAIT))
return 0;
@@ -297,7 +302,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
if (unlikely(!buffer_uptodate(bh))) {
struct gfs2_trans *tr = current->journal_info;
if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
- gfs2_io_error_bh_wd(sdp, bh);
+ gfs2_io_error_bh(sdp, bh);
brelse(bh);
*bhp = NULL;
return -EIO;
@@ -316,7 +321,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
{
- if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp))
+ if (gfs2_withdrawn(sdp))
return -EIO;
wait_on_buffer(bh);
@@ -324,10 +329,10 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
if (!buffer_uptodate(bh)) {
struct gfs2_trans *tr = current->journal_info;
if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
- gfs2_io_error_bh_wd(sdp, bh);
+ gfs2_io_error_bh(sdp, bh);
return -EIO;
}
- if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp))
+ if (gfs2_withdrawn(sdp))
return -EIO;
return 0;
@@ -335,7 +340,7 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
void gfs2_remove_from_journal(struct buffer_head *bh, int meta)
{
- struct address_space *mapping = bh->b_page->mapping;
+ struct address_space *mapping = bh->b_folio->mapping;
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct gfs2_bufdata *bd = bh->b_private;
struct gfs2_trans *tr = current->journal_info;
@@ -406,26 +411,20 @@ static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno)
{
struct address_space *mapping = ip->i_inode.i_mapping;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct page *page;
+ struct folio *folio;
struct buffer_head *bh;
unsigned int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
unsigned long index = blkno >> shift; /* convert block to page */
unsigned int bufnum = blkno - (index << shift);
- page = find_get_page_flags(mapping, index, FGP_LOCK|FGP_ACCESSED);
- if (!page)
- return NULL;
- if (!page_has_buffers(page)) {
- unlock_page(page);
- put_page(page);
+ folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED, 0);
+ if (IS_ERR(folio))
return NULL;
- }
- /* Locate header for our buffer within our page */
- for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
- /* Do nothing */;
- get_bh(bh);
- unlock_page(page);
- put_page(page);
+ bh = folio_buffers(folio);
+ if (bh)
+ bh = get_nth_bh(bh, bufnum);
+ folio_unlock(folio);
+ folio_put(folio);
return bh;
}
@@ -443,6 +442,10 @@ void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
struct buffer_head *bh;
int ty;
+ /* This can only happen during incomplete inode creation. */
+ if (!ip->i_gl)
+ return;
+
gfs2_ail1_wipe(sdp, bstart, blen);
while (blen) {
ty = REMOVE_META;
@@ -526,8 +529,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (buffer_uptodate(first_bh))
goto out;
- if (!buffer_locked(first_bh))
- ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &first_bh);
+ bh_read_nowait(first_bh, REQ_META | REQ_PRIO);
dblock++;
extlen--;
@@ -535,10 +537,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
while (extlen) {
bh = gfs2_getbuf(gl, dblock, CREATE);
- if (!buffer_uptodate(bh) && !buffer_locked(bh))
- ll_rw_block(REQ_OP_READ,
- REQ_RAHEAD | REQ_META | REQ_PRIO,
- 1, &bh);
+ bh_readahead(bh, REQ_RAHEAD | REQ_META | REQ_PRIO);
brelse(bh);
dblock++;
extlen--;
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index 21880d72081a..b7c8a6684d02 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -40,29 +40,29 @@ extern const struct address_space_operations gfs2_rgrp_aops;
static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
{
struct inode *inode = mapping->host;
- if (mapping->a_ops == &gfs2_meta_aops)
- return (((struct gfs2_glock *)mapping) - 1)->gl_name.ln_sbd;
- else if (mapping->a_ops == &gfs2_rgrp_aops)
- return container_of(mapping, struct gfs2_sbd, sd_aspace);
- else
+ if (mapping->a_ops == &gfs2_meta_aops) {
+ struct gfs2_glock_aspace *gla =
+ container_of(mapping, struct gfs2_glock_aspace, mapping);
+ return gla->glock.gl_name.ln_sbd;
+ } else
return inode->i_sb->s_fs_info;
}
-extern struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
-extern int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
- int rahead, struct buffer_head **bhp);
-extern int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
-extern struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno,
- int create);
+struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
+int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
+ int rahead, struct buffer_head **bhp);
+int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
+struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno,
+ int create);
enum {
REMOVE_JDATA = 0,
REMOVE_META = 1,
};
-extern void gfs2_remove_from_journal(struct buffer_head *bh, int meta);
-extern void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
-extern int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num,
- struct buffer_head **bhp);
+void gfs2_remove_from_journal(struct buffer_head *bh, int meta);
+void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
+int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num,
+ struct buffer_head **bhp);
static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip,
struct buffer_head **bhp)
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 7f8410d8fdc1..e7a88b717991 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -60,19 +60,21 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
gt->gt_new_files_jdata = 0;
gt->gt_max_readahead = BIT(18);
gt->gt_complain_secs = 10;
+ gt->gt_withdraw_helper_timeout = 5;
}
void free_sbd(struct gfs2_sbd *sdp)
{
- if (sdp->sd_lkstats)
- free_percpu(sdp->sd_lkstats);
+ struct super_block *sb = sdp->sd_vfs;
+
+ free_percpu(sdp->sd_lkstats);
+ sb->s_fs_info = NULL;
kfree(sdp);
}
static struct gfs2_sbd *init_sbd(struct super_block *sb)
{
struct gfs2_sbd *sdp;
- struct address_space *mapping;
sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
if (!sdp)
@@ -87,11 +89,11 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
gfs2_tune_init(&sdp->sd_tune);
- init_waitqueue_head(&sdp->sd_glock_wait);
+ init_waitqueue_head(&sdp->sd_kill_wait);
init_waitqueue_head(&sdp->sd_async_glock_wait);
atomic_set(&sdp->sd_glock_disposal, 0);
init_completion(&sdp->sd_locking_init);
- init_completion(&sdp->sd_wdack);
+ init_completion(&sdp->sd_withdraw_helper);
spin_lock_init(&sdp->sd_statfs_spin);
spin_lock_init(&sdp->sd_rindex_spin);
@@ -103,25 +105,12 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
init_completion(&sdp->sd_journal_ready);
INIT_LIST_HEAD(&sdp->sd_quota_list);
- mutex_init(&sdp->sd_quota_mutex);
mutex_init(&sdp->sd_quota_sync_mutex);
init_waitqueue_head(&sdp->sd_quota_wait);
- INIT_LIST_HEAD(&sdp->sd_trunc_list);
- spin_lock_init(&sdp->sd_trunc_lock);
spin_lock_init(&sdp->sd_bitmap_lock);
INIT_LIST_HEAD(&sdp->sd_sc_inodes_list);
- mapping = &sdp->sd_aspace;
-
- address_space_init_once(mapping);
- mapping->a_ops = &gfs2_rgrp_aops;
- mapping->host = sb->s_bdev->bd_inode;
- mapping->flags = 0;
- mapping_set_gfp_mask(mapping, GFP_NOFS);
- mapping->private_data = NULL;
- mapping->writeback_index = 0;
-
spin_lock_init(&sdp->sd_log_lock);
atomic_set(&sdp->sd_log_pinned, 0);
INIT_LIST_HEAD(&sdp->sd_log_revokes);
@@ -137,8 +126,8 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
init_rwsem(&sdp->sd_log_flush_lock);
atomic_set(&sdp->sd_log_in_flight, 0);
init_waitqueue_head(&sdp->sd_log_flush_wait);
- atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
mutex_init(&sdp->sd_freeze_mutex);
+ INIT_LIST_HEAD(&sdp->sd_dead_glocks);
return sdp;
@@ -175,31 +164,22 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
return -EINVAL;
}
- if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE ||
+ if (sb->sb_bsize < SECTOR_SIZE || sb->sb_bsize > PAGE_SIZE ||
(sb->sb_bsize & (sb->sb_bsize - 1))) {
pr_warn("Invalid block size\n");
return -EINVAL;
}
-
+ if (sb->sb_bsize_shift != ffs(sb->sb_bsize) - 1) {
+ pr_warn("Invalid block size shift\n");
+ return -EINVAL;
+ }
return 0;
}
-static void end_bio_io_page(struct bio *bio)
-{
- struct page *page = bio->bi_private;
-
- if (!bio->bi_status)
- SetPageUptodate(page);
- else
- pr_warn("error %d reading superblock\n", bio->bi_status);
- unlock_page(page);
-}
-
-static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
+static void gfs2_sb_in(struct gfs2_sbd *sdp, const struct gfs2_sb *str)
{
struct gfs2_sb_host *sb = &sdp->sd_sb;
struct super_block *s = sdp->sd_vfs;
- const struct gfs2_sb *str = buf;
sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
sb->sb_type = be32_to_cpu(str->sb_header.mh_type);
@@ -214,7 +194,7 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
- memcpy(&s->s_uuid, str->sb_uuid, 16);
+ super_set_uuid(s, str->sb_uuid, 16);
}
/**
@@ -238,38 +218,22 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
{
- struct super_block *sb = sdp->sd_vfs;
- struct gfs2_sb *p;
- struct page *page;
- struct bio *bio;
+ struct gfs2_sb *sb;
+ int err;
- page = alloc_page(GFP_NOFS);
- if (unlikely(!page))
+ sb = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (unlikely(!sb))
return -ENOMEM;
-
- ClearPageUptodate(page);
- ClearPageDirty(page);
- lock_page(page);
-
- bio = bio_alloc(GFP_NOFS, 1);
- bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
- bio_set_dev(bio, sb->s_bdev);
- bio_add_page(bio, page, PAGE_SIZE, 0);
-
- bio->bi_end_io = end_bio_io_page;
- bio->bi_private = page;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META);
- submit_bio(bio);
- wait_on_page_locked(page);
- bio_put(bio);
- if (!PageUptodate(page)) {
- __free_page(page);
- return -EIO;
- }
- p = kmap(page);
- gfs2_sb_in(sdp, p);
- kunmap(page);
- __free_page(page);
+ err = bdev_rw_virt(sdp->sd_vfs->s_bdev,
+ sector << (sdp->sd_vfs->s_blocksize_bits - SECTOR_SHIFT),
+ sb, PAGE_SIZE, REQ_OP_READ | REQ_META);
+ if (err) {
+ pr_warn("error %d reading superblock\n", err);
+ kfree(sb);
+ return err;
+ }
+ gfs2_sb_in(sdp, sb);
+ kfree(sb);
return gfs2_check_sb(sdp, silent);
}
@@ -294,8 +258,7 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
return error;
}
- sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
- GFS2_BASIC_BLOCK_SHIFT;
+ sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - SECTOR_SHIFT;
sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_dinode)) / sizeof(u64);
@@ -385,8 +348,10 @@ static int init_names(struct gfs2_sbd *sdp, int silent)
if (!table[0])
table = sdp->sd_vfs->s_id;
- strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN);
- strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN);
+ BUILD_BUG_ON(GFS2_LOCKNAME_LEN > GFS2_FSNAME_LEN);
+
+ strscpy(sdp->sd_proto_name, proto, GFS2_LOCKNAME_LEN);
+ strscpy(sdp->sd_table_name, table, GFS2_LOCKNAME_LEN);
table = sdp->sd_table_name;
while ((table = strchr(table, '/')))
@@ -405,7 +370,8 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
error = gfs2_glock_nq_num(sdp,
GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
- LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE,
+ LM_ST_EXCLUSIVE,
+ LM_FLAG_RECOVER | GL_NOCACHE | GL_NOPID,
mount_gh);
if (error) {
fs_err(sdp, "can't acquire mount glock: %d\n", error);
@@ -415,7 +381,7 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
error = gfs2_glock_nq_num(sdp,
GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_EXACT,
+ LM_FLAG_RECOVER | GL_EXACT | GL_NOPID,
&sdp->sd_live_gh);
if (error) {
fs_err(sdp, "can't acquire live glock: %d\n", error);
@@ -432,7 +398,7 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops,
CREATE, &sdp->sd_freeze_gl);
if (error) {
- fs_err(sdp, "can't create transaction glock: %d\n", error);
+ fs_err(sdp, "can't create freeze glock: %d\n", error);
goto fail_rename;
}
@@ -520,7 +486,9 @@ static int init_sb(struct gfs2_sbd *sdp, int silent)
sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
goto out;
}
- sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
+ ret = -EINVAL;
+ if (!sb_set_blocksize(sb, sdp->sd_sb.sb_bsize))
+ goto out;
/* Get the root inode */
no_addr = sdp->sd_sb.sb_root_dir.no_addr;
@@ -575,8 +543,6 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
mutex_lock(&sdp->sd_jindex_mutex);
for (;;) {
- struct gfs2_inode *jip;
-
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
if (error)
break;
@@ -617,8 +583,6 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
d_mark_dontcache(jd->jd_inode);
spin_lock(&sdp->sd_jindex_spin);
jd->jd_jid = sdp->sd_journals++;
- jip = GFS2_I(jd->jd_inode);
- jd->jd_no_addr = jip->i_no_addr;
list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
spin_unlock(&sdp->sd_jindex_spin);
}
@@ -647,7 +611,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
struct gfs2_jdesc *jd;
struct gfs2_inode *ip;
- sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs");
+ sdp->sd_statfs_inode = gfs2_lookup_meta(master, "statfs");
if (IS_ERR(sdp->sd_statfs_inode)) {
error = PTR_ERR(sdp->sd_statfs_inode);
fs_err(sdp, "can't read in statfs inode: %d\n", error);
@@ -656,7 +620,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
if (sdp->sd_args.ar_spectator)
goto out;
- pn = gfs2_lookup_simple(master, "per_node");
+ pn = gfs2_lookup_meta(master, "per_node");
if (IS_ERR(pn)) {
error = PTR_ERR(pn);
fs_err(sdp, "can't find per_node directory: %d\n", error);
@@ -673,7 +637,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
goto free_local;
}
sprintf(buf, "statfs_change%u", jd->jd_jid);
- lsi->si_sc_inode = gfs2_lookup_simple(pn, buf);
+ lsi->si_sc_inode = gfs2_lookup_meta(pn, buf);
if (IS_ERR(lsi->si_sc_inode)) {
error = PTR_ERR(lsi->si_sc_inode);
fs_err(sdp, "can't find local \"sc\" file#%u: %d\n",
@@ -691,7 +655,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
iput(pn);
pn = NULL;
ip = GFS2_I(sdp->sd_sc_inode);
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_NOPID,
&sdp->sd_sc_gh);
if (error) {
fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
@@ -732,15 +696,13 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
struct inode *master = d_inode(sdp->sd_master_dir);
struct gfs2_holder ji_gh;
struct gfs2_inode *ip;
- int jindex = 1;
int error = 0;
- if (undo) {
- jindex = 0;
+ gfs2_holder_mark_uninitialized(&ji_gh);
+ if (undo)
goto fail_statfs;
- }
- sdp->sd_jindex = gfs2_lookup_simple(master, "jindex");
+ sdp->sd_jindex = gfs2_lookup_meta(master, "jindex");
if (IS_ERR(sdp->sd_jindex)) {
fs_err(sdp, "can't lookup journal index: %d\n", error);
return PTR_ERR(sdp->sd_jindex);
@@ -780,7 +742,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
&gfs2_journal_glops,
LM_ST_EXCLUSIVE,
- LM_FLAG_NOEXP | GL_NOCACHE,
+ LM_FLAG_RECOVER | GL_NOPID,
&sdp->sd_journal_gh);
if (error) {
fs_err(sdp, "can't acquire journal glock: %d\n", error);
@@ -788,9 +750,9 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
}
ip = GFS2_I(sdp->sd_jdesc->jd_inode);
- sdp->sd_jinode_gl = ip->i_gl;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE,
+ LM_FLAG_RECOVER | GL_EXACT |
+ GL_NOCACHE | GL_NOPID,
&sdp->sd_jinode_gh);
if (error) {
fs_err(sdp, "can't acquire journal inode glock: %d\n",
@@ -849,24 +811,20 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
sdp->sd_log_idle = 1;
set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags);
gfs2_glock_dq_uninit(&ji_gh);
- jindex = 0;
INIT_WORK(&sdp->sd_freeze_work, gfs2_freeze_func);
return 0;
fail_statfs:
uninit_statfs(sdp);
fail_jinode_gh:
- /* A withdraw may have done dq/uninit so now we need to check it */
- if (!sdp->sd_args.ar_spectator &&
- gfs2_holder_initialized(&sdp->sd_jinode_gh))
+ if (!sdp->sd_args.ar_spectator)
gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
fail_journal_gh:
- if (!sdp->sd_args.ar_spectator &&
- gfs2_holder_initialized(&sdp->sd_journal_gh))
+ if (!sdp->sd_args.ar_spectator)
gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
fail_jindex:
gfs2_jindex_free(sdp);
- if (jindex)
+ if (gfs2_holder_initialized(&ji_gh))
gfs2_glock_dq_uninit(&ji_gh);
fail:
iput(sdp->sd_jindex);
@@ -889,7 +847,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
goto fail;
/* Read in the resource index inode */
- sdp->sd_rindex = gfs2_lookup_simple(master, "rindex");
+ sdp->sd_rindex = gfs2_lookup_meta(master, "rindex");
if (IS_ERR(sdp->sd_rindex)) {
error = PTR_ERR(sdp->sd_rindex);
fs_err(sdp, "can't get resource index inode: %d\n", error);
@@ -898,7 +856,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
sdp->sd_rindex_uptodate = 0;
/* Read in the quota inode */
- sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota");
+ sdp->sd_quota_inode = gfs2_lookup_meta(master, "quota");
if (IS_ERR(sdp->sd_quota_inode)) {
error = PTR_ERR(sdp->sd_quota_inode);
fs_err(sdp, "can't get quota file inode: %d\n", error);
@@ -942,7 +900,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
if (undo)
goto fail_qc_gh;
- pn = gfs2_lookup_simple(master, "per_node");
+ pn = gfs2_lookup_meta(master, "per_node");
if (IS_ERR(pn)) {
error = PTR_ERR(pn);
fs_err(sdp, "can't find per_node directory: %d\n", error);
@@ -950,7 +908,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
}
sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
- sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
+ sdp->sd_qc_inode = gfs2_lookup_meta(pn, buf);
if (IS_ERR(sdp->sd_qc_inode)) {
error = PTR_ERR(sdp->sd_qc_inode);
fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
@@ -961,7 +919,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
pn = NULL;
ip = GFS2_I(sdp->sd_qc_inode);
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_NOPID,
&sdp->sd_qc_gh);
if (error) {
fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
@@ -1075,8 +1033,8 @@ hostdata_error:
void gfs2_lm_unmount(struct gfs2_sbd *sdp)
{
const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
- if (likely(!gfs2_withdrawn(sdp)) && lm->lm_unmount)
- lm->lm_unmount(sdp);
+ if (!gfs2_withdrawn(sdp) && lm->lm_unmount)
+ lm->lm_unmount(sdp, true);
}
static int wait_on_journal(struct gfs2_sbd *sdp)
@@ -1104,29 +1062,46 @@ static int init_threads(struct gfs2_sbd *sdp)
struct task_struct *p;
int error = 0;
- p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
+ p = kthread_create(gfs2_logd, sdp, "gfs2_logd/%s", sdp->sd_fsname);
if (IS_ERR(p)) {
error = PTR_ERR(p);
- fs_err(sdp, "can't start logd thread: %d\n", error);
+ fs_err(sdp, "can't create logd thread: %d\n", error);
return error;
}
+ get_task_struct(p);
sdp->sd_logd_process = p;
- p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
+ p = kthread_create(gfs2_quotad, sdp, "gfs2_quotad/%s", sdp->sd_fsname);
if (IS_ERR(p)) {
error = PTR_ERR(p);
- fs_err(sdp, "can't start quotad thread: %d\n", error);
+ fs_err(sdp, "can't create quotad thread: %d\n", error);
goto fail;
}
+ get_task_struct(p);
sdp->sd_quotad_process = p;
+
+ wake_up_process(sdp->sd_logd_process);
+ wake_up_process(sdp->sd_quotad_process);
return 0;
fail:
- kthread_stop(sdp->sd_logd_process);
+ kthread_stop_put(sdp->sd_logd_process);
sdp->sd_logd_process = NULL;
return error;
}
+void gfs2_destroy_threads(struct gfs2_sbd *sdp)
+{
+ if (sdp->sd_logd_process) {
+ kthread_stop_put(sdp->sd_logd_process);
+ sdp->sd_logd_process = NULL;
+ }
+ if (sdp->sd_quotad_process) {
+ kthread_stop_put(sdp->sd_quotad_process);
+ sdp->sd_quotad_process = NULL;
+ }
+}
+
/**
* gfs2_fill_super - Read in superblock
* @sb: The VFS superblock
@@ -1140,7 +1115,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
int silent = fc->sb_flags & SB_SILENT;
struct gfs2_sbd *sdp;
struct gfs2_holder mount_gh;
- struct gfs2_holder freeze_gh;
+ struct address_space *mapping;
int error;
sdp = init_sbd(sb);
@@ -1162,7 +1137,8 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_flags |= SB_NOSEC;
sb->s_magic = GFS2_MAGIC;
sb->s_op = &gfs2_super_ops;
- sb->s_d_op = &gfs2_dops;
+
+ set_default_d_op(sb, &gfs2_dops);
sb->s_export_op = &gfs2_export_ops;
sb->s_qcop = &gfs2_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
@@ -1172,10 +1148,12 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
/* Set up the buffer cache and fill in some fake block size values
to allow us to read-in the on-disk superblock. */
- sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
+ sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, SECTOR_SIZE);
+ error = -EINVAL;
+ if (!sdp->sd_sb.sb_bsize)
+ goto fail_free;
sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
- sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
- GFS2_BASIC_BLOCK_SHIFT;
+ sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - SECTOR_SHIFT;
sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
@@ -1188,15 +1166,41 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
sdp->sd_tune.gt_statfs_quantum = 30;
}
+ /* Set up an address space for metadata writes */
+ sdp->sd_inode = new_inode(sb);
+ error = -ENOMEM;
+ if (!sdp->sd_inode)
+ goto fail_free;
+ sdp->sd_inode->i_ino = GFS2_BAD_INO;
+ sdp->sd_inode->i_size = OFFSET_MAX;
+
+ mapping = gfs2_aspace(sdp);
+ mapping->a_ops = &gfs2_rgrp_aops;
+ gfs2_setup_inode(sdp->sd_inode);
+
error = init_names(sdp, silent);
if (error)
- goto fail_free;
+ goto fail_iput;
snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
+ error = -ENOMEM;
+ sdp->sd_glock_wq = alloc_workqueue("gfs2-glock/%s",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE | WQ_PERCPU,
+ 0,
+ sdp->sd_fsname);
+ if (!sdp->sd_glock_wq)
+ goto fail_iput;
+
+ sdp->sd_delete_wq = alloc_workqueue("gfs2-delete/%s",
+ WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_PERCPU, 0,
+ sdp->sd_fsname);
+ if (!sdp->sd_delete_wq)
+ goto fail_glock_wq;
+
error = gfs2_sys_fs_add(sdp);
if (error)
- goto fail_free;
+ goto fail_delete_wq;
gfs2_create_debugfs_file(sdp);
@@ -1204,6 +1208,8 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
if (error)
goto fail_debug;
+ INIT_WORK(&sdp->sd_withdraw_work, gfs2_withdraw_func);
+
error = init_locking(sdp, &mount_gh, DO);
if (error)
goto fail_lm;
@@ -1257,27 +1263,20 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
if (!sb_rdonly(sb)) {
error = init_threads(sdp);
- if (error) {
- gfs2_withdraw_delayed(sdp);
+ if (error)
goto fail_per_node;
- }
}
- error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
+ error = gfs2_freeze_lock_shared(sdp);
if (error)
goto fail_per_node;
if (!sb_rdonly(sb))
error = gfs2_make_fs_rw(sdp);
- gfs2_freeze_unlock(&freeze_gh);
if (error) {
- if (sdp->sd_quotad_process)
- kthread_stop(sdp->sd_quotad_process);
- sdp->sd_quotad_process = NULL;
- if (sdp->sd_logd_process)
- kthread_stop(sdp->sd_logd_process);
- sdp->sd_logd_process = NULL;
+ gfs2_freeze_unlock(sdp);
+ gfs2_destroy_threads(sdp);
fs_err(sdp, "can't make FS RW: %d\n", error);
goto fail_per_node;
}
@@ -1306,9 +1305,15 @@ fail_lm:
fail_debug:
gfs2_delete_debugfs_file(sdp);
gfs2_sys_fs_del(sdp);
+fail_delete_wq:
+ destroy_workqueue(sdp->sd_delete_wq);
+fail_glock_wq:
+ if (sdp->sd_glock_wq)
+ destroy_workqueue(sdp->sd_glock_wq);
+fail_iput:
+ iput(sdp->sd_inode);
fail_free:
free_sbd(sdp);
- sb->s_fs_info = NULL;
return error;
}
@@ -1375,6 +1380,7 @@ static const struct constant_table gfs2_param_quota[] = {
{"off", GFS2_QUOTA_OFF},
{"account", GFS2_QUOTA_ACCOUNT},
{"on", GFS2_QUOTA_ON},
+ {"quiet", GFS2_QUOTA_QUIET},
{}
};
@@ -1390,12 +1396,14 @@ static const struct constant_table gfs2_param_data[] = {
};
enum opt_errors {
- Opt_errors_withdraw = GFS2_ERRORS_WITHDRAW,
- Opt_errors_panic = GFS2_ERRORS_PANIC,
+ Opt_errors_withdraw = GFS2_ERRORS_WITHDRAW,
+ Opt_errors_deactivate = GFS2_ERRORS_DEACTIVATE,
+ Opt_errors_panic = GFS2_ERRORS_PANIC,
};
static const struct constant_table gfs2_param_errors[] = {
{"withdraw", Opt_errors_withdraw },
+ {"deactivate", Opt_errors_deactivate },
{"panic", Opt_errors_panic },
{}
};
@@ -1443,13 +1451,13 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
switch (o) {
case Opt_lockproto:
- strlcpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN);
+ strscpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN);
break;
case Opt_locktable:
- strlcpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN);
+ strscpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN);
break;
case Opt_hostdata:
- strlcpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN);
+ strscpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN);
break;
case Opt_spectator:
args->ar_spectator = 1;
@@ -1582,12 +1590,6 @@ static int gfs2_reconfigure(struct fs_context *fc)
fc->sb_flags |= SB_RDONLY;
if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
- struct gfs2_holder freeze_gh;
-
- error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
- if (error)
- return -EINVAL;
-
if (fc->sb_flags & SB_RDONLY) {
gfs2_make_fs_ro(sdp);
} else {
@@ -1595,7 +1597,6 @@ static int gfs2_reconfigure(struct fs_context *fc)
if (error)
errorfc(fc, "unable to remount read-write");
}
- gfs2_freeze_unlock(&freeze_gh);
}
sdp->sd_args = *newargs;
@@ -1717,6 +1718,55 @@ static int gfs2_meta_init_fs_context(struct fs_context *fc)
return 0;
}
+/**
+ * gfs2_evict_inodes - evict inodes cooperatively
+ * @sb: the superblock
+ *
+ * When evicting an inode with a zero link count, we are trying to upgrade the
+ * inode's iopen glock from SH to EX mode in order to determine if we can
+ * delete the inode. The other nodes are supposed to evict the inode from
+ * their caches if they can, and to poke the inode's inode glock if they cannot
+ * do so. Either behavior allows gfs2_upgrade_iopen_glock() to proceed
+ * quickly, but if the other nodes are not cooperating, the lock upgrading
+ * attempt will time out. Since inodes are evicted sequentially, this can add
+ * up quickly.
+ *
+ * Function evict_inodes() tries to keep the s_inode_list_lock list locked over
+ * a long time, which prevents other inodes from being evicted concurrently.
+ * This precludes the cooperative behavior we are looking for. This special
+ * version of evict_inodes() avoids that.
+ *
+ * Modeled after drop_pagecache_sb().
+ */
+static void gfs2_evict_inodes(struct super_block *sb)
+{
+ struct inode *inode, *toput_inode = NULL;
+ struct gfs2_sbd *sdp = sb->s_fs_info;
+
+ set_bit(SDF_EVICTING, &sdp->sd_flags);
+
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ spin_lock(&inode->i_lock);
+ if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) &&
+ !need_resched()) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+ __iget(inode);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&sb->s_inode_list_lock);
+
+ iput(toput_inode);
+ toput_inode = inode;
+
+ cond_resched();
+ spin_lock(&sb->s_inode_list_lock);
+ }
+ spin_unlock(&sb->s_inode_list_lock);
+ iput(toput_inode);
+}
+
static void gfs2_kill_sb(struct super_block *sb)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
@@ -1732,6 +1782,18 @@ static void gfs2_kill_sb(struct super_block *sb)
sdp->sd_root_dir = NULL;
sdp->sd_master_dir = NULL;
shrink_dcache_sb(sb);
+
+ gfs2_evict_inodes(sb);
+
+ /*
+ * Flush and then drain the delete workqueue here (via
+ * destroy_workqueue()) to ensure that any delete work that
+ * may be running will also see the SDF_KILL flag.
+ */
+ set_bit(SDF_KILL, &sdp->sd_flags);
+ gfs2_flush_delete_work(sdp);
+ destroy_workqueue(sdp->sd_delete_wq);
+
kill_block_super(sb);
}
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index be0997e24d60..b1692f12a602 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -106,58 +106,68 @@ static inline void spin_unlock_bucket(unsigned int hash)
static void gfs2_qd_dealloc(struct rcu_head *rcu)
{
struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
+ struct gfs2_sbd *sdp = qd->qd_sbd;
+
kmem_cache_free(gfs2_quotad_cachep, qd);
+ if (atomic_dec_and_test(&sdp->sd_quota_count))
+ wake_up(&sdp->sd_kill_wait);
}
-static void gfs2_qd_dispose(struct list_head *list)
+static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
{
- struct gfs2_quota_data *qd;
- struct gfs2_sbd *sdp;
-
- while (!list_empty(list)) {
- qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
- sdp = qd->qd_gl->gl_name.ln_sbd;
-
- list_del(&qd->qd_lru);
+ struct gfs2_sbd *sdp = qd->qd_sbd;
- /* Free from the filesystem-specific list */
- spin_lock(&qd_lock);
- list_del(&qd->qd_list);
- spin_unlock(&qd_lock);
+ spin_lock(&qd_lock);
+ list_del(&qd->qd_list);
+ spin_unlock(&qd_lock);
- spin_lock_bucket(qd->qd_hash);
- hlist_bl_del_rcu(&qd->qd_hlist);
- spin_unlock_bucket(qd->qd_hash);
+ spin_lock_bucket(qd->qd_hash);
+ hlist_bl_del_rcu(&qd->qd_hlist);
+ spin_unlock_bucket(qd->qd_hash);
+ if (!gfs2_withdrawn(sdp)) {
gfs2_assert_warn(sdp, !qd->qd_change);
- gfs2_assert_warn(sdp, !qd->qd_slot_count);
+ gfs2_assert_warn(sdp, !qd->qd_slot_ref);
gfs2_assert_warn(sdp, !qd->qd_bh_count);
+ }
- gfs2_glock_put(qd->qd_gl);
- atomic_dec(&sdp->sd_quota_count);
+ gfs2_glock_put(qd->qd_gl);
+ call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
+}
- /* Delete it from the common reclaim list */
- call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
+static void gfs2_qd_list_dispose(struct list_head *list)
+{
+ struct gfs2_quota_data *qd;
+
+ while (!list_empty(list)) {
+ qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
+ list_del(&qd->qd_lru);
+
+ gfs2_qd_dispose(qd);
}
}
static enum lru_status gfs2_qd_isolate(struct list_head *item,
- struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, void *arg)
{
struct list_head *dispose = arg;
- struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
+ struct gfs2_quota_data *qd =
+ list_entry(item, struct gfs2_quota_data, qd_lru);
+ enum lru_status status;
if (!spin_trylock(&qd->qd_lockref.lock))
return LRU_SKIP;
+ status = LRU_SKIP;
if (qd->qd_lockref.count == 0) {
lockref_mark_dead(&qd->qd_lockref);
list_lru_isolate_move(lru, &qd->qd_lru, dispose);
+ status = LRU_REMOVED;
}
spin_unlock(&qd->qd_lockref.lock);
- return LRU_REMOVED;
+ return status;
}
static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
@@ -172,7 +182,7 @@ static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
gfs2_qd_isolate, &dispose);
- gfs2_qd_dispose(&dispose);
+ gfs2_qd_list_dispose(&dispose);
return freed;
}
@@ -183,13 +193,26 @@ static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
}
-struct shrinker gfs2_qd_shrinker = {
- .count_objects = gfs2_qd_shrink_count,
- .scan_objects = gfs2_qd_shrink_scan,
- .seeks = DEFAULT_SEEKS,
- .flags = SHRINKER_NUMA_AWARE,
-};
+static struct shrinker *gfs2_qd_shrinker;
+int __init gfs2_qd_shrinker_init(void)
+{
+ gfs2_qd_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "gfs2-qd");
+ if (!gfs2_qd_shrinker)
+ return -ENOMEM;
+
+ gfs2_qd_shrinker->count_objects = gfs2_qd_shrink_count;
+ gfs2_qd_shrinker->scan_objects = gfs2_qd_shrink_scan;
+
+ shrinker_register(gfs2_qd_shrinker);
+
+ return 0;
+}
+
+void gfs2_qd_shrinker_exit(void)
+{
+ shrinker_free(gfs2_qd_shrinker);
+}
static u64 qd2index(struct gfs2_quota_data *qd)
{
@@ -200,12 +223,7 @@ static u64 qd2index(struct gfs2_quota_data *qd)
static u64 qd2offset(struct gfs2_quota_data *qd)
{
- u64 offset;
-
- offset = qd2index(qd);
- offset *= sizeof(struct gfs2_quota);
-
- return offset;
+ return qd2index(qd) * sizeof(struct gfs2_quota);
}
static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
@@ -218,8 +236,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
return NULL;
qd->qd_sbd = sdp;
- qd->qd_lockref.count = 1;
- spin_lock_init(&qd->qd_lockref.lock);
+ lockref_init(&qd->qd_lockref);
qd->qd_id = qid;
qd->qd_slot = -1;
INIT_LIST_HEAD(&qd->qd_lru);
@@ -250,7 +267,7 @@ static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
if (qd->qd_sbd != sdp)
continue;
if (lockref_get_not_dead(&qd->qd_lockref)) {
- list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
+ list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru);
return qd;
}
}
@@ -297,22 +314,33 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
}
-static void qd_hold(struct gfs2_quota_data *qd)
+static void __qd_hold(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
- gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
- lockref_get(&qd->qd_lockref);
+ struct gfs2_sbd *sdp = qd->qd_sbd;
+ gfs2_assert(sdp, qd->qd_lockref.count > 0);
+ qd->qd_lockref.count++;
}
static void qd_put(struct gfs2_quota_data *qd)
{
+ struct gfs2_sbd *sdp;
+
if (lockref_put_or_lock(&qd->qd_lockref))
return;
+ BUG_ON(__lockref_is_dead(&qd->qd_lockref));
+ sdp = qd->qd_sbd;
+ if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
+ lockref_mark_dead(&qd->qd_lockref);
+ spin_unlock(&qd->qd_lockref.lock);
+
+ gfs2_qd_dispose(qd);
+ return;
+ }
+
qd->qd_lockref.count = 0;
- list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
+ list_lru_add_obj(&gfs2_qd_lru, &qd->qd_lru);
spin_unlock(&qd->qd_lockref.lock);
-
}
static int slot_get(struct gfs2_quota_data *qd)
@@ -322,20 +350,19 @@ static int slot_get(struct gfs2_quota_data *qd)
int error = 0;
spin_lock(&sdp->sd_bitmap_lock);
- if (qd->qd_slot_count != 0)
- goto out;
-
- error = -ENOSPC;
- bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
- if (bit < sdp->sd_quota_slots) {
+ if (qd->qd_slot_ref == 0) {
+ bit = find_first_zero_bit(sdp->sd_quota_bitmap,
+ sdp->sd_quota_slots);
+ if (bit >= sdp->sd_quota_slots) {
+ error = -ENOSPC;
+ goto out;
+ }
set_bit(bit, sdp->sd_quota_bitmap);
qd->qd_slot = bit;
- error = 0;
-out:
- qd->qd_slot_count++;
}
+ qd->qd_slot_ref++;
+out:
spin_unlock(&sdp->sd_bitmap_lock);
-
return error;
}
@@ -344,8 +371,8 @@ static void slot_hold(struct gfs2_quota_data *qd)
struct gfs2_sbd *sdp = qd->qd_sbd;
spin_lock(&sdp->sd_bitmap_lock);
- gfs2_assert(sdp, qd->qd_slot_count);
- qd->qd_slot_count++;
+ gfs2_assert(sdp, qd->qd_slot_ref);
+ qd->qd_slot_ref++;
spin_unlock(&sdp->sd_bitmap_lock);
}
@@ -354,8 +381,8 @@ static void slot_put(struct gfs2_quota_data *qd)
struct gfs2_sbd *sdp = qd->qd_sbd;
spin_lock(&sdp->sd_bitmap_lock);
- gfs2_assert(sdp, qd->qd_slot_count);
- if (!--qd->qd_slot_count) {
+ gfs2_assert(sdp, qd->qd_slot_ref);
+ if (!--qd->qd_slot_ref) {
BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
qd->qd_slot = -1;
}
@@ -364,133 +391,124 @@ static void slot_put(struct gfs2_quota_data *qd)
static int bh_get(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
- struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
+ struct gfs2_sbd *sdp = qd->qd_sbd;
+ struct inode *inode = sdp->sd_qc_inode;
+ struct gfs2_inode *ip = GFS2_I(inode);
unsigned int block, offset;
- struct buffer_head *bh;
+ struct buffer_head *bh = NULL;
+ struct iomap iomap = { };
int error;
- struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
- mutex_lock(&sdp->sd_quota_mutex);
-
- if (qd->qd_bh_count++) {
- mutex_unlock(&sdp->sd_quota_mutex);
+ spin_lock(&qd->qd_lockref.lock);
+ if (qd->qd_bh_count) {
+ qd->qd_bh_count++;
+ spin_unlock(&qd->qd_lockref.lock);
return 0;
}
+ spin_unlock(&qd->qd_lockref.lock);
block = qd->qd_slot / sdp->sd_qc_per_block;
offset = qd->qd_slot % sdp->sd_qc_per_block;
- bh_map.b_size = BIT(ip->i_inode.i_blkbits);
- error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
+ error = gfs2_iomap_get(inode,
+ (loff_t)block << inode->i_blkbits,
+ i_blocksize(inode), &iomap);
if (error)
- goto fail;
- error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh);
+ return error;
+ error = -ENOENT;
+ if (iomap.type != IOMAP_MAPPED)
+ return error;
+
+ error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
+ DIO_WAIT, 0, &bh);
if (error)
- goto fail;
+ return error;
error = -EIO;
if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
- goto fail_brelse;
-
- qd->qd_bh = bh;
- qd->qd_bh_qc = (struct gfs2_quota_change *)
- (bh->b_data + sizeof(struct gfs2_meta_header) +
- offset * sizeof(struct gfs2_quota_change));
-
- mutex_unlock(&sdp->sd_quota_mutex);
+ goto out;
- return 0;
+ spin_lock(&qd->qd_lockref.lock);
+ if (qd->qd_bh == NULL) {
+ qd->qd_bh = bh;
+ qd->qd_bh_qc = (struct gfs2_quota_change *)
+ (bh->b_data + sizeof(struct gfs2_meta_header) +
+ offset * sizeof(struct gfs2_quota_change));
+ bh = NULL;
+ }
+ qd->qd_bh_count++;
+ spin_unlock(&qd->qd_lockref.lock);
+ error = 0;
-fail_brelse:
+out:
brelse(bh);
-fail:
- qd->qd_bh_count--;
- mutex_unlock(&sdp->sd_quota_mutex);
return error;
}
static void bh_put(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
+ struct buffer_head *bh = NULL;
- mutex_lock(&sdp->sd_quota_mutex);
+ spin_lock(&qd->qd_lockref.lock);
gfs2_assert(sdp, qd->qd_bh_count);
if (!--qd->qd_bh_count) {
- brelse(qd->qd_bh);
+ bh = qd->qd_bh;
qd->qd_bh = NULL;
qd->qd_bh_qc = NULL;
}
- mutex_unlock(&sdp->sd_quota_mutex);
+ spin_unlock(&qd->qd_lockref.lock);
+ brelse(bh);
}
-static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
- u64 *sync_gen)
+static bool qd_grab_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
+ u64 sync_gen)
{
+ bool ret = false;
+
+ spin_lock(&qd->qd_lockref.lock);
if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
!test_bit(QDF_CHANGE, &qd->qd_flags) ||
- (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
- return 0;
+ qd->qd_sync_gen >= sync_gen)
+ goto out;
- if (!lockref_get_not_dead(&qd->qd_lockref))
- return 0;
+ if (__lockref_is_dead(&qd->qd_lockref))
+ goto out;
+ qd->qd_lockref.count++;
list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
set_bit(QDF_LOCKED, &qd->qd_flags);
qd->qd_change_sync = qd->qd_change;
slot_hold(qd);
- return 1;
+ ret = true;
+
+out:
+ spin_unlock(&qd->qd_lockref.lock);
+ return ret;
}
-static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
+static void qd_ungrab_sync(struct gfs2_quota_data *qd)
{
- struct gfs2_quota_data *qd = NULL;
- int error;
- int found = 0;
-
- *qdp = NULL;
-
- if (sb_rdonly(sdp->sd_vfs))
- return 0;
-
- spin_lock(&qd_lock);
-
- list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
- found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
- if (found)
- break;
- }
-
- if (!found)
- qd = NULL;
-
- spin_unlock(&qd_lock);
-
- if (qd) {
- gfs2_assert_warn(sdp, qd->qd_change_sync);
- error = bh_get(qd);
- if (error) {
- clear_bit(QDF_LOCKED, &qd->qd_flags);
- slot_put(qd);
- qd_put(qd);
- return error;
- }
- }
-
- *qdp = qd;
-
- return 0;
+ clear_bit(QDF_LOCKED, &qd->qd_flags);
+ slot_put(qd);
+ qd_put(qd);
}
-static void qd_unlock(struct gfs2_quota_data *qd)
+static void qdsb_put(struct gfs2_quota_data *qd)
{
- gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
- test_bit(QDF_LOCKED, &qd->qd_flags));
- clear_bit(QDF_LOCKED, &qd->qd_flags);
bh_put(qd);
slot_put(qd);
qd_put(qd);
}
+static void qd_unlock(struct gfs2_quota_data *qd)
+{
+ spin_lock(&qd->qd_lockref.lock);
+ gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
+ clear_bit(QDF_LOCKED, &qd->qd_flags);
+ spin_unlock(&qd->qd_lockref.lock);
+ qdsb_put(qd);
+}
+
static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
struct gfs2_quota_data **qdp)
{
@@ -517,13 +535,6 @@ fail:
return error;
}
-static void qdsb_put(struct gfs2_quota_data *qd)
-{
- bh_put(qd);
- slot_put(qd);
- qd_put(qd);
-}
-
/**
* gfs2_qa_get - make sure we have a quota allocations data structure,
* if necessary
@@ -531,34 +542,42 @@ static void qdsb_put(struct gfs2_quota_data *qd)
*/
int gfs2_qa_get(struct gfs2_inode *ip)
{
- int error = 0;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ struct inode *inode = &ip->i_inode;
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0;
- down_write(&ip->i_rw_mutex);
+ spin_lock(&inode->i_lock);
if (ip->i_qadata == NULL) {
- ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
- if (!ip->i_qadata) {
- error = -ENOMEM;
- goto out;
- }
+ struct gfs2_qadata *tmp;
+
+ spin_unlock(&inode->i_lock);
+ tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
+ if (!tmp)
+ return -ENOMEM;
+
+ spin_lock(&inode->i_lock);
+ if (ip->i_qadata == NULL)
+ ip->i_qadata = tmp;
+ else
+ kmem_cache_free(gfs2_qadata_cachep, tmp);
}
ip->i_qadata->qa_ref++;
-out:
- up_write(&ip->i_rw_mutex);
- return error;
+ spin_unlock(&inode->i_lock);
+ return 0;
}
void gfs2_qa_put(struct gfs2_inode *ip)
{
- down_write(&ip->i_rw_mutex);
+ struct inode *inode = &ip->i_inode;
+
+ spin_lock(&inode->i_lock);
if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
ip->i_qadata = NULL;
}
- up_write(&ip->i_rw_mutex);
+ spin_unlock(&inode->i_lock);
}
int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
@@ -579,6 +598,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
error = -EIO;
+ gfs2_qa_put(ip);
goto out;
}
@@ -651,72 +671,81 @@ static int sort_qd(const void *a, const void *b)
static void do_qc(struct gfs2_quota_data *qd, s64 change)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
struct gfs2_quota_change *qc = qd->qd_bh_qc;
+ bool needs_put = false;
s64 x;
- mutex_lock(&sdp->sd_quota_mutex);
gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
- if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
- qc->qc_change = 0;
+ /*
+ * The QDF_CHANGE flag indicates that the slot in the quota change file
+ * is used. Here, we use the value of qc->qc_change when the slot is
+ * used, and we assume a value of 0 otherwise.
+ */
+
+ spin_lock(&qd->qd_lockref.lock);
+
+ x = 0;
+ if (test_bit(QDF_CHANGE, &qd->qd_flags))
+ x = be64_to_cpu(qc->qc_change);
+ x += change;
+ qd->qd_change += change;
+
+ if (!x && test_bit(QDF_CHANGE, &qd->qd_flags)) {
+ /* The slot in the quota change file becomes unused. */
+ clear_bit(QDF_CHANGE, &qd->qd_flags);
+ qc->qc_flags = 0;
+ qc->qc_id = 0;
+ needs_put = true;
+ } else if (x && !test_bit(QDF_CHANGE, &qd->qd_flags)) {
+ /* The slot in the quota change file becomes used. */
+ set_bit(QDF_CHANGE, &qd->qd_flags);
+ __qd_hold(qd);
+ slot_hold(qd);
+
qc->qc_flags = 0;
if (qd->qd_id.type == USRQUOTA)
qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
}
-
- x = be64_to_cpu(qc->qc_change) + change;
qc->qc_change = cpu_to_be64(x);
- spin_lock(&qd_lock);
- qd->qd_change = x;
- spin_unlock(&qd_lock);
+ spin_unlock(&qd->qd_lockref.lock);
- if (!x) {
- gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
- clear_bit(QDF_CHANGE, &qd->qd_flags);
- qc->qc_flags = 0;
- qc->qc_id = 0;
+ if (needs_put) {
slot_put(qd);
qd_put(qd);
- } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
- qd_hold(qd);
- slot_hold(qd);
}
-
if (change < 0) /* Reset quiet flag if we freed some blocks */
clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
- mutex_unlock(&sdp->sd_quota_mutex);
}
-static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
+static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index,
unsigned off, void *buf, unsigned bytes)
{
+ struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
struct inode *inode = &ip->i_inode;
- struct gfs2_sbd *sdp = GFS2_SB(inode);
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
struct buffer_head *bh;
- void *kaddr;
u64 blk;
unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
unsigned to_write = bytes, pg_off = off;
- int done = 0;
blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
boff = off % bsize;
- page = find_or_create_page(mapping, index, GFP_NOFS);
- if (!page)
- return -ENOMEM;
- if (!page_has_buffers(page))
- create_empty_buffers(page, bsize, 0);
+ folio = filemap_grab_folio(mapping, index);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ bh = folio_buffers(folio);
+ if (!bh)
+ bh = create_empty_buffers(folio, bsize, 0);
- bh = page_buffers(page);
- while (!done) {
- /* Find the beginning block within the page */
+ for (;;) {
+ /* Find the beginning block within the folio */
if (pg_off >= ((bnum * bsize) + bsize)) {
bh = bh->b_this_page;
bnum++;
@@ -729,20 +758,14 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
goto unlock_out;
/* If it's a newly allocated disk block, zero it */
if (buffer_new(bh))
- zero_user(page, bnum * bsize, bh->b_size);
+ folio_zero_range(folio, bnum * bsize,
+ bh->b_size);
}
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
set_buffer_uptodate(bh);
- if (!buffer_uptodate(bh)) {
- ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- goto unlock_out;
- }
- if (gfs2_is_jdata(ip))
- gfs2_trans_add_data(ip->i_gl, bh);
- else
- gfs2_ordered_add_inode(ip);
+ if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
+ goto unlock_out;
+ gfs2_trans_add_data(ip->i_gl, bh);
/* If we need to write to the next block as well */
if (to_write > (bsize - boff)) {
@@ -751,31 +774,29 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
boff = pg_off % bsize;
continue;
}
- done = 1;
+ break;
}
- /* Write to the page, now that we have setup the buffer(s) */
- kaddr = kmap_atomic(page);
- memcpy(kaddr + off, buf, bytes);
- flush_dcache_page(page);
- kunmap_atomic(kaddr);
- unlock_page(page);
- put_page(page);
+ /* Write to the folio, now that we have setup the buffer(s) */
+ memcpy_to_folio(folio, off, buf, bytes);
+ flush_dcache_folio(folio);
+ folio_unlock(folio);
+ folio_put(folio);
return 0;
unlock_out:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return -EIO;
}
-static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
+static int gfs2_write_disk_quota(struct gfs2_sbd *sdp, struct gfs2_quota *qp,
loff_t loc)
{
unsigned long pg_beg;
unsigned pg_off, nbytes, overflow = 0;
- int pg_oflow = 0, error;
+ int error;
void *ptr;
nbytes = sizeof(struct gfs2_quota);
@@ -784,17 +805,15 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
pg_off = offset_in_page(loc);
/* If the quota straddles a page boundary, split the write in two */
- if ((pg_off + nbytes) > PAGE_SIZE) {
- pg_oflow = 1;
+ if ((pg_off + nbytes) > PAGE_SIZE)
overflow = (pg_off + nbytes) - PAGE_SIZE;
- }
ptr = qp;
- error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
+ error = gfs2_write_buf_to_page(sdp, pg_beg, pg_off, ptr,
nbytes - overflow);
/* If there's an overflow, write the remaining bytes to the next page */
- if (!error && pg_oflow)
- error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
+ if (!error && overflow)
+ error = gfs2_write_buf_to_page(sdp, pg_beg + 1, 0,
ptr + nbytes - overflow,
overflow);
return error;
@@ -802,7 +821,7 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
/**
* gfs2_adjust_quota - adjust record of current block usage
- * @ip: The quota inode
+ * @sdp: The superblock
* @loc: Offset of the entry in the quota file
* @change: The amount of usage change to record
* @qd: The quota data
@@ -814,12 +833,12 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
* Returns: 0 or -ve on error
*/
-static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
+static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
s64 change, struct gfs2_quota_data *qd,
struct qc_dqblk *fdq)
{
+ struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
struct inode *inode = &ip->i_inode;
- struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_quota q;
int err;
u64 size;
@@ -836,10 +855,10 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
return err;
loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
- err = -EIO;
be64_add_cpu(&q.qu_value, change);
if (((s64)be64_to_cpu(q.qu_value)) < 0)
q.qu_value = 0; /* Never go negative on quota usage */
+ spin_lock(&qd->qd_lockref.lock);
qd->qd_qb.qb_value = q.qu_value;
if (fdq) {
if (fdq->d_fieldmask & QC_SPC_SOFT) {
@@ -855,13 +874,14 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
qd->qd_qb.qb_value = q.qu_value;
}
}
+ spin_unlock(&qd->qd_lockref.lock);
- err = gfs2_write_disk_quota(ip, &q, loc);
+ err = gfs2_write_disk_quota(sdp, &q, loc);
if (!err) {
size = loc + sizeof(struct gfs2_quota);
if (size > inode->i_size)
i_size_write(inode, size);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
set_bit(QDF_REFRESH, &qd->qd_flags);
}
@@ -869,11 +889,12 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
return err;
}
-static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
+static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda,
+ u64 sync_gen)
{
- struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = (*qda)->qd_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
- struct gfs2_alloc_parms ap = { .aflags = 0, };
+ struct gfs2_alloc_parms ap = {};
unsigned int data_blocks, ind_blocks;
struct gfs2_holder *ghs, i_gh;
unsigned int qx, x;
@@ -883,18 +904,12 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
unsigned int nalloc = 0, blocks;
int error;
- error = gfs2_qa_get(ip);
- if (error)
- return error;
-
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
- if (!ghs) {
- error = -ENOMEM;
- goto out;
- }
+ if (!ghs)
+ return -ENOMEM;
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
inode_lock(&ip->i_inode);
@@ -943,7 +958,8 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
for (x = 0; x < num_qd; x++) {
qd = qda[x];
offset = qd2offset(qd);
- error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
+ error = gfs2_adjust_quota(sdp, offset, qd->qd_change_sync, qd,
+ NULL);
if (error)
goto out_end_trans;
@@ -951,8 +967,6 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
set_bit(QDF_REFRESH, &qd->qd_flags);
}
- error = 0;
-
out_end_trans:
gfs2_trans_end(sdp);
out_ipres:
@@ -966,8 +980,15 @@ out_dq:
kfree(ghs);
gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
-out:
- gfs2_qa_put(ip);
+ if (!error) {
+ for (x = 0; x < num_qd; x++) {
+ qd = qda[x];
+ spin_lock(&qd->qd_lockref.lock);
+ if (qd->qd_sync_gen < sync_gen)
+ qd->qd_sync_gen = sync_gen;
+ spin_unlock(&qd->qd_lockref.lock);
+ }
+ }
return error;
}
@@ -991,7 +1012,9 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
qlvb->qb_limit = q.qu_limit;
qlvb->qb_warn = q.qu_warn;
qlvb->qb_value = q.qu_value;
+ spin_lock(&qd->qd_lockref.lock);
qd->qd_qb = *qlvb;
+ spin_unlock(&qd->qd_lockref.lock);
return 0;
}
@@ -999,11 +1022,12 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
struct gfs2_holder *q_gh)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
struct gfs2_holder i_gh;
int error;
+ gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd);
restart:
error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
if (error)
@@ -1012,7 +1036,9 @@ restart:
if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
force_refresh = FORCE;
+ spin_lock(&qd->qd_lockref.lock);
qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
+ spin_unlock(&qd->qd_lockref.lock);
if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
gfs2_glock_dq_uninit(q_gh);
@@ -1049,9 +1075,9 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_quota_data *qd;
u32 x;
- int error = 0;
+ int error;
- if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
+ if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0;
error = gfs2_quota_hold(ip, uid, gid);
@@ -1079,56 +1105,56 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
return error;
}
-static int need_sync(struct gfs2_quota_data *qd)
+static bool need_sync(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
struct gfs2_tune *gt = &sdp->sd_tune;
- s64 value;
+ s64 value, change, limit;
unsigned int num, den;
- int do_sync = 1;
+ int ret = false;
+ spin_lock(&qd->qd_lockref.lock);
if (!qd->qd_qb.qb_limit)
- return 0;
+ goto out;
- spin_lock(&qd_lock);
- value = qd->qd_change;
- spin_unlock(&qd_lock);
+ change = qd->qd_change;
+ if (change <= 0)
+ goto out;
+ value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
+ limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
+ if (value >= limit)
+ goto out;
spin_lock(&gt->gt_spin);
num = gt->gt_quota_scale_num;
den = gt->gt_quota_scale_den;
spin_unlock(&gt->gt_spin);
- if (value < 0)
- do_sync = 0;
- else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
- (s64)be64_to_cpu(qd->qd_qb.qb_limit))
- do_sync = 0;
- else {
- value *= gfs2_jindex_size(sdp) * num;
- value = div_s64(value, den);
- value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
- if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
- do_sync = 0;
- }
+ change *= gfs2_jindex_size(sdp) * num;
+ change = div_s64(change, den);
+ if (value + change < limit)
+ goto out;
- return do_sync;
+ ret = true;
+out:
+ spin_unlock(&qd->qd_lockref.lock);
+ return ret;
}
void gfs2_quota_unlock(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_quota_data *qda[4];
+ struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS];
unsigned int count = 0;
u32 x;
- int found;
if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
return;
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
struct gfs2_quota_data *qd;
- int sync;
+ bool sync;
+ int error;
qd = ip->i_qadata->qa_qd[x];
sync = need_sync(qd);
@@ -1138,17 +1164,16 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
continue;
spin_lock(&qd_lock);
- found = qd_check_sync(sdp, qd, NULL);
+ sync = qd_grab_sync(sdp, qd, U64_MAX);
spin_unlock(&qd_lock);
- if (!found)
+ if (!sync)
continue;
gfs2_assert_warn(sdp, qd->qd_change_sync);
- if (bh_get(qd)) {
- clear_bit(QDF_LOCKED, &qd->qd_flags);
- slot_put(qd);
- qd_put(qd);
+ error = bh_get(qd);
+ if (error) {
+ qd_ungrab_sync(qd);
continue;
}
@@ -1156,7 +1181,9 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
}
if (count) {
- do_sync(count, qda);
+ u64 sync_gen = READ_ONCE(sdp->sd_quota_sync_gen);
+
+ do_sync(count, qda, sync_gen);
for (x = 0; x < count; x++)
qd_unlock(qda[x]);
}
@@ -1166,16 +1193,16 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
#define MAX_LINE 256
-static int print_message(struct gfs2_quota_data *qd, char *type)
+static void print_message(struct gfs2_quota_data *qd, char *type)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
-
- fs_info(sdp, "quota %s for %s %u\n",
- type,
- (qd->qd_id.type == USRQUOTA) ? "user" : "group",
- from_kqid(&init_user_ns, qd->qd_id));
+ struct gfs2_sbd *sdp = qd->qd_sbd;
- return 0;
+ if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) {
+ fs_info(sdp, "quota %s for %s %u\n",
+ type,
+ (qd->qd_id.type == USRQUOTA) ? "user" : "group",
+ from_kqid(&init_user_ns, qd->qd_id));
+ }
}
/**
@@ -1215,12 +1242,12 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
qid_eq(qd->qd_id, make_kqid_gid(gid))))
continue;
+ spin_lock(&qd->qd_lockref.lock);
warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
- spin_lock(&qd_lock);
value += qd->qd_change;
- spin_unlock(&qd_lock);
+ spin_unlock(&qd->qd_lockref.lock);
if (limit > 0 && (limit - value) < ap->allowed)
ap->allowed = limit - value;
@@ -1245,7 +1272,8 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
* HZ)) {
quota_send_warning(qd->qd_id,
sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
- error = print_message(qd, "warning");
+ print_message(qd, "warning");
+ error = 0;
qd->qd_last_warn = jiffies;
}
}
@@ -1259,7 +1287,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
u32 x;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
+ if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ||
gfs2_assert_warn(sdp, change))
return;
if (ip->i_diskflags & GFS2_DIF_SYSTEM)
@@ -1283,40 +1311,55 @@ int gfs2_quota_sync(struct super_block *sb, int type)
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_data **qda;
unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
- unsigned int num_qd;
- unsigned int x;
+ u64 sync_gen;
int error = 0;
+ if (sb_rdonly(sdp->sd_vfs))
+ return 0;
+
qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
if (!qda)
return -ENOMEM;
mutex_lock(&sdp->sd_quota_sync_mutex);
- sdp->sd_quota_sync_gen++;
+ sync_gen = sdp->sd_quota_sync_gen + 1;
do {
- num_qd = 0;
+ struct gfs2_quota_data *iter;
+ unsigned int num_qd = 0;
+ unsigned int x;
- for (;;) {
- error = qd_fish(sdp, qda + num_qd);
- if (error || !qda[num_qd])
- break;
- if (++num_qd == max_qd)
- break;
+ spin_lock(&qd_lock);
+ list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
+ if (qd_grab_sync(sdp, iter, sync_gen)) {
+ qda[num_qd++] = iter;
+ if (num_qd == max_qd)
+ break;
+ }
}
+ spin_unlock(&qd_lock);
- if (num_qd) {
- if (!error)
- error = do_sync(num_qd, qda);
+ if (!num_qd)
+ break;
+
+ for (x = 0; x < num_qd; x++) {
+ error = bh_get(qda[x]);
if (!error)
- for (x = 0; x < num_qd; x++)
- qda[x]->qd_sync_gen =
- sdp->sd_quota_sync_gen;
+ continue;
- for (x = 0; x < num_qd; x++)
- qd_unlock(qda[x]);
+ while (x < num_qd)
+ qd_ungrab_sync(qda[--num_qd]);
+ break;
+ }
+
+ if (!error) {
+ WRITE_ONCE(sdp->sd_quota_sync_gen, sync_gen);
+ error = do_sync(num_qd, qda, sync_gen);
}
- } while (!error && num_qd == max_qd);
+
+ for (x = 0; x < num_qd; x++)
+ qd_unlock(qda[x]);
+ } while (!error);
mutex_unlock(&sdp->sd_quota_sync_mutex);
kfree(qda);
@@ -1351,6 +1394,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
unsigned int found = 0;
unsigned int hash;
unsigned int bm_size;
+ struct buffer_head *bh;
u64 dblock;
u32 extlen = 0;
int error;
@@ -1370,8 +1414,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
return error;
for (x = 0; x < blocks; x++) {
- struct buffer_head *bh;
- const struct gfs2_quota_change *qc;
+ struct gfs2_quota_change *qc;
unsigned int y;
if (!extlen) {
@@ -1384,15 +1427,13 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
if (!bh)
goto fail;
- if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
- brelse(bh);
- goto fail;
- }
+ if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
+ goto fail_brelse;
- qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
+ qc = (struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
y++, slot++) {
- struct gfs2_quota_data *qd;
+ struct gfs2_quota_data *old_qd, *qd;
s64 qc_change = be64_to_cpu(qc->qc_change);
u32 qc_flags = be32_to_cpu(qc->qc_flags);
enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
@@ -1405,29 +1446,51 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
hash = gfs2_qd_hash(sdp, qc_id);
qd = qd_alloc(hash, sdp, qc_id);
- if (qd == NULL) {
- brelse(bh);
- goto fail;
- }
+ if (qd == NULL)
+ goto fail_brelse;
+ qd->qd_lockref.count = 0;
set_bit(QDF_CHANGE, &qd->qd_flags);
qd->qd_change = qc_change;
qd->qd_slot = slot;
- qd->qd_slot_count = 1;
+ qd->qd_slot_ref = 1;
spin_lock(&qd_lock);
+ spin_lock_bucket(hash);
+ old_qd = gfs2_qd_search_bucket(hash, sdp, qc_id);
+ if (old_qd) {
+ fs_err(sdp, "Corruption found in quota_change%u"
+ "file: duplicate identifier in "
+ "slot %u\n",
+ sdp->sd_jdesc->jd_jid, slot);
+
+ spin_unlock_bucket(hash);
+ spin_unlock(&qd_lock);
+ qd_put(old_qd);
+
+ gfs2_glock_put(qd->qd_gl);
+ kmem_cache_free(gfs2_quotad_cachep, qd);
+
+ /* zero out the duplicate slot */
+ lock_buffer(bh);
+ memset(qc, 0, sizeof(*qc));
+ mark_buffer_dirty(bh);
+ unlock_buffer(bh);
+
+ continue;
+ }
BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
list_add(&qd->qd_list, &sdp->sd_quota_list);
atomic_inc(&sdp->sd_quota_count);
- spin_unlock(&qd_lock);
-
- spin_lock_bucket(hash);
hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
spin_unlock_bucket(hash);
+ spin_unlock(&qd_lock);
found++;
}
+ if (buffer_dirty(bh))
+ sync_dirty_buffer(bh);
brelse(bh);
dblock++;
extlen--;
@@ -1438,6 +1501,10 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
return 0;
+fail_brelse:
+ if (buffer_dirty(bh))
+ sync_dirty_buffer(bh);
+ brelse(bh);
fail:
gfs2_quota_cleanup(sdp);
return error;
@@ -1445,36 +1512,36 @@ fail:
void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
{
- struct list_head *head = &sdp->sd_quota_list;
struct gfs2_quota_data *qd;
+ LIST_HEAD(dispose);
+ int count;
- spin_lock(&qd_lock);
- while (!list_empty(head)) {
- qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
-
- list_del(&qd->qd_list);
-
- /* Also remove if this qd exists in the reclaim list */
- list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
- atomic_dec(&sdp->sd_quota_count);
- spin_unlock(&qd_lock);
-
- spin_lock_bucket(qd->qd_hash);
- hlist_bl_del_rcu(&qd->qd_hlist);
- spin_unlock_bucket(qd->qd_hash);
-
- gfs2_assert_warn(sdp, !qd->qd_change);
- gfs2_assert_warn(sdp, !qd->qd_slot_count);
- gfs2_assert_warn(sdp, !qd->qd_bh_count);
+ BUG_ON(!test_bit(SDF_NORECOVERY, &sdp->sd_flags) &&
+ test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
- gfs2_glock_put(qd->qd_gl);
- call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
+ spin_lock(&qd_lock);
+ list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
+ spin_lock(&qd->qd_lockref.lock);
+ if (qd->qd_lockref.count != 0) {
+ spin_unlock(&qd->qd_lockref.lock);
+ continue;
+ }
+ lockref_mark_dead(&qd->qd_lockref);
+ spin_unlock(&qd->qd_lockref.lock);
- spin_lock(&qd_lock);
+ list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru);
+ list_add(&qd->qd_lru, &dispose);
}
spin_unlock(&qd_lock);
- gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
+ gfs2_qd_list_dispose(&dispose);
+
+ wait_event_timeout(sdp->sd_kill_wait,
+ (count = atomic_read(&sdp->sd_quota_count)) == 0,
+ HZ * 60);
+
+ if (count != 0)
+ fs_err(sdp, "%d left-over quota data objects\n", count);
kvfree(sdp->sd_quota_bitmap);
sdp->sd_quota_bitmap = NULL;
@@ -1491,39 +1558,6 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
}
}
-static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
- int (*fxn)(struct super_block *sb, int type),
- unsigned long t, unsigned long *timeo,
- unsigned int *new_timeo)
-{
- if (t >= *timeo) {
- int error = fxn(sdp->sd_vfs, 0);
- quotad_error(sdp, msg, error);
- *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
- } else {
- *timeo -= t;
- }
-}
-
-static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
-{
- struct gfs2_inode *ip;
-
- while(1) {
- ip = NULL;
- spin_lock(&sdp->sd_trunc_lock);
- if (!list_empty(&sdp->sd_trunc_list)) {
- ip = list_first_entry(&sdp->sd_trunc_list,
- struct gfs2_inode, i_trunc_list);
- list_del_init(&ip->i_trunc_list);
- }
- spin_unlock(&sdp->sd_trunc_lock);
- if (ip == NULL)
- return;
- gfs2_glock_finish_truncate(ip);
- }
-}
-
void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
if (!sdp->sd_statfs_force_sync) {
sdp->sd_statfs_force_sync = 1;
@@ -1541,49 +1575,51 @@ void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
int gfs2_quotad(void *data)
{
struct gfs2_sbd *sdp = data;
- struct gfs2_tune *tune = &sdp->sd_tune;
- unsigned long statfs_timeo = 0;
- unsigned long quotad_timeo = 0;
- unsigned long t = 0;
- DEFINE_WAIT(wait);
- int empty;
+ unsigned long now = jiffies;
+ unsigned long statfs_deadline = now;
+ unsigned long quotad_deadline = now;
+ set_freezable();
while (!kthread_should_stop()) {
+ unsigned long t;
if (gfs2_withdrawn(sdp))
- goto bypass;
- /* Update the master statfs file */
- if (sdp->sd_statfs_force_sync) {
- int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
+ break;
+
+ now = jiffies;
+ if (sdp->sd_statfs_force_sync ||
+ time_after(now, statfs_deadline)) {
+ unsigned int quantum;
+ int error;
+
+ /* Update the master statfs file */
+ error = gfs2_statfs_sync(sdp->sd_vfs, 0);
quotad_error(sdp, "statfs", error);
- statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
- }
- else
- quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
- &statfs_timeo,
- &tune->gt_statfs_quantum);
- /* Update quota file */
- quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
- &quotad_timeo, &tune->gt_quota_quantum);
+ quantum = gfs2_tune_get(sdp, gt_statfs_quantum);
+ statfs_deadline = now + quantum * HZ;
+ }
+ if (time_after(now, quotad_deadline)) {
+ unsigned int quantum;
+ int error;
- /* Check for & recover partially truncated inodes */
- quotad_check_trunc_list(sdp);
+ /* Update the quota file */
+ error = gfs2_quota_sync(sdp->sd_vfs, 0);
+ quotad_error(sdp, "sync", error);
- try_to_freeze();
+ quantum = gfs2_tune_get(sdp, gt_quota_quantum);
+ quotad_deadline = now + quantum * HZ;
+ }
-bypass:
- t = min(quotad_timeo, statfs_timeo);
+ t = min(statfs_deadline - now, quotad_deadline - now);
+ wait_event_freezable_timeout(sdp->sd_quota_wait,
+ sdp->sd_statfs_force_sync ||
+ gfs2_withdrawn(sdp) ||
+ kthread_should_stop(),
+ t);
- prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
- spin_lock(&sdp->sd_trunc_lock);
- empty = list_empty(&sdp->sd_trunc_list);
- spin_unlock(&sdp->sd_trunc_lock);
- if (empty && !sdp->sd_statfs_force_sync)
- t -= schedule_timeout(t);
- else
+ if (sdp->sd_statfs_force_sync)
t = 0;
- finish_wait(&sdp->sd_quota_wait, &wait);
}
return 0;
@@ -1596,6 +1632,8 @@ static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
memset(state, 0, sizeof(*state));
switch (sdp->sd_args.ar_quota) {
+ case GFS2_QUOTA_QUIET:
+ fallthrough;
case GFS2_QUOTA_ON:
state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
@@ -1724,7 +1762,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
if (gfs2_is_stuffed(ip))
alloc_required = 1;
if (alloc_required) {
- struct gfs2_alloc_parms ap = { .aflags = 0, };
+ struct gfs2_alloc_parms ap = {};
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
blocks = 1 + data_blocks + ind_blocks;
@@ -1742,7 +1780,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
goto out_release;
/* Apply changes */
- error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
+ error = gfs2_adjust_quota(sdp, offset, 0, qd, fdq);
if (!error)
clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 21ada332d555..988f38dc5b2c 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -15,27 +15,27 @@ struct gfs2_sbd;
#define NO_UID_QUOTA_CHANGE INVALID_UID
#define NO_GID_QUOTA_CHANGE INVALID_GID
-extern int gfs2_qa_get(struct gfs2_inode *ip);
-extern void gfs2_qa_put(struct gfs2_inode *ip);
-extern int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
-extern void gfs2_quota_unhold(struct gfs2_inode *ip);
+int gfs2_qa_get(struct gfs2_inode *ip);
+void gfs2_qa_put(struct gfs2_inode *ip);
+int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
+void gfs2_quota_unhold(struct gfs2_inode *ip);
-extern int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
-extern void gfs2_quota_unlock(struct gfs2_inode *ip);
+int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
+void gfs2_quota_unlock(struct gfs2_inode *ip);
-extern int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
- struct gfs2_alloc_parms *ap);
-extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
- kuid_t uid, kgid_t gid);
+int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
+ struct gfs2_alloc_parms *ap);
+void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
+ kuid_t uid, kgid_t gid);
-extern int gfs2_quota_sync(struct super_block *sb, int type);
-extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
+int gfs2_quota_sync(struct super_block *sb, int type);
+int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
-extern int gfs2_quota_init(struct gfs2_sbd *sdp);
-extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
-extern int gfs2_quotad(void *data);
+int gfs2_quota_init(struct gfs2_sbd *sdp);
+void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
+int gfs2_quotad(void *data);
-extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
+void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
struct gfs2_alloc_parms *ap)
@@ -44,13 +44,13 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
int ret;
ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
- if (capable(CAP_SYS_RESOURCE) ||
- sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
+ if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ||
+ capable(CAP_SYS_RESOURCE))
return 0;
ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (ret)
return ret;
- if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
+ if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
return 0;
ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid, ap);
if (ret)
@@ -59,8 +59,10 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
}
extern const struct quotactl_ops gfs2_quotactl_ops;
-extern struct shrinker gfs2_qd_shrinker;
+int __init gfs2_qd_shrinker_init(void);
+void gfs2_qd_shrinker_exit(void);
extern struct list_lru gfs2_qd_lru;
-extern void __init gfs2_quota_hash_init(void);
+
+void __init gfs2_quota_hash_init(void);
#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 016ed1b2ca1d..8c8202c68b64 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -27,7 +27,7 @@
#include "util.h"
#include "dir.h"
-struct workqueue_struct *gfs_recovery_wq;
+struct workqueue_struct *gfs2_recovery_wq;
int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
struct buffer_head **bh)
@@ -55,17 +55,16 @@ int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
{
struct list_head *head = &jd->jd_revoke_list;
- struct gfs2_revoke_replay *rr;
- int found = 0;
+ struct gfs2_revoke_replay *rr = NULL, *iter;
- list_for_each_entry(rr, head, rr_list) {
- if (rr->rr_blkno == blkno) {
- found = 1;
+ list_for_each_entry(iter, head, rr_list) {
+ if (iter->rr_blkno == blkno) {
+ rr = iter;
break;
}
}
- if (found) {
+ if (rr) {
rr->rr_where = where;
return 0;
}
@@ -83,18 +82,17 @@ int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
{
- struct gfs2_revoke_replay *rr;
+ struct gfs2_revoke_replay *rr = NULL, *iter;
int wrap, a, b, revoke;
- int found = 0;
- list_for_each_entry(rr, &jd->jd_revoke_list, rr_list) {
- if (rr->rr_blkno == blkno) {
- found = 1;
+ list_for_each_entry(iter, &jd->jd_revoke_list, rr_list) {
+ if (iter->rr_blkno == blkno) {
+ rr = iter;
break;
}
}
- if (!found)
+ if (!rr)
return 0;
wrap = (rr->rr_where < jd->jd_replay_tail);
@@ -120,6 +118,7 @@ void gfs2_revoke_clean(struct gfs2_jdesc *jd)
int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh,
unsigned int blkno, struct gfs2_log_header_host *head)
{
+ const u32 zero = 0;
u32 hash, crc;
if (lh->lh_header.mh_magic != cpu_to_be32(GFS2_MAGIC) ||
@@ -128,7 +127,7 @@ int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh,
return 1;
hash = crc32(~0, lh, LH_V1_SIZE - 4);
- hash = ~crc32_le_shift(hash, 4); /* assume lh_hash is zero */
+ hash = ~crc32(hash, &zero, 4); /* assume lh_hash is zero */
if (be32_to_cpu(lh->lh_hash) != hash)
return 1;
@@ -265,16 +264,12 @@ static void clean_journal(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head)
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
- u32 lblock = head->lh_blkno;
- gfs2_replay_incr_blk(jd, &lblock);
- gfs2_write_log_header(sdp, jd, head->lh_sequence + 1, 0, lblock,
+ gfs2_replay_incr_blk(jd, &head->lh_blkno);
+ head->lh_sequence++;
+ gfs2_write_log_header(sdp, jd, head->lh_sequence, 0, head->lh_blkno,
GFS2_LOG_HEAD_UNMOUNT | GFS2_LOG_HEAD_RECOVERY,
REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC);
- if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) {
- sdp->sd_log_flush_head = lblock;
- gfs2_log_incr_head(sdp);
- }
}
@@ -406,7 +401,7 @@ void gfs2_recover_func(struct work_struct *work)
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct gfs2_log_header_host head;
- struct gfs2_holder j_gh, ji_gh, thaw_gh;
+ struct gfs2_holder j_gh, ji_gh;
ktime_t t_start, t_jlck, t_jhd, t_tlck, t_rep;
int ro = 0;
unsigned int pass;
@@ -422,14 +417,15 @@ void gfs2_recover_func(struct work_struct *work)
if (sdp->sd_args.ar_spectator)
goto fail;
if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
- fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
+ fs_info(sdp, "jid=%u: Trying to acquire journal glock...\n",
jd->jd_jid);
jlocked = 1;
- /* Acquire the journal lock so we can do recovery */
+ /* Acquire the journal glock so we can do recovery */
error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
LM_ST_EXCLUSIVE,
- LM_FLAG_NOEXP | LM_FLAG_TRY | GL_NOCACHE,
+ LM_FLAG_RECOVER | LM_FLAG_TRY |
+ GL_NOCACHE,
&j_gh);
switch (error) {
case 0:
@@ -445,7 +441,8 @@ void gfs2_recover_func(struct work_struct *work)
}
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_NOCACHE, &ji_gh);
+ LM_FLAG_RECOVER | GL_NOCACHE,
+ &ji_gh);
if (error)
goto fail_gunlock_j;
} else {
@@ -459,7 +456,7 @@ void gfs2_recover_func(struct work_struct *work)
if (error)
goto fail_gunlock_ji;
- error = gfs2_find_jhead(jd, &head, true);
+ error = gfs2_find_jhead(jd, &head);
if (error)
goto fail_gunlock_ji;
t_jhd = ktime_get();
@@ -467,14 +464,14 @@ void gfs2_recover_func(struct work_struct *work)
ktime_ms_delta(t_jhd, t_jlck));
if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
- fs_info(sdp, "jid=%u: Acquiring the transaction lock...\n",
- jd->jd_jid);
-
- /* Acquire a shared hold on the freeze lock */
+ mutex_lock(&sdp->sd_freeze_mutex);
- error = gfs2_freeze_lock(sdp, &thaw_gh, LM_FLAG_PRIORITY);
- if (error)
+ if (test_bit(SDF_FROZEN, &sdp->sd_flags)) {
+ mutex_unlock(&sdp->sd_freeze_mutex);
+ fs_warn(sdp, "jid=%u: Can't replay: filesystem "
+ "is frozen\n", jd->jd_jid);
goto fail_gunlock_ji;
+ }
if (test_bit(SDF_RORECOVERY, &sdp->sd_flags)) {
ro = 1;
@@ -498,7 +495,7 @@ void gfs2_recover_func(struct work_struct *work)
fs_warn(sdp, "jid=%u: Can't replay: read-only block "
"device\n", jd->jd_jid);
error = -EROFS;
- goto fail_gunlock_thaw;
+ goto fail_gunlock_nofreeze;
}
t_tlck = ktime_get();
@@ -516,7 +513,7 @@ void gfs2_recover_func(struct work_struct *work)
lops_after_scan(jd, error, pass);
if (error) {
up_read(&sdp->sd_log_flush_lock);
- goto fail_gunlock_thaw;
+ goto fail_gunlock_nofreeze;
}
}
@@ -524,7 +521,7 @@ void gfs2_recover_func(struct work_struct *work)
clean_journal(jd, &head);
up_read(&sdp->sd_log_flush_lock);
- gfs2_freeze_unlock(&thaw_gh);
+ mutex_unlock(&sdp->sd_freeze_mutex);
t_rep = ktime_get();
fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, "
"jhead:%lldms, tlck:%lldms, replay:%lldms]\n",
@@ -535,6 +532,9 @@ void gfs2_recover_func(struct work_struct *work)
ktime_ms_delta(t_rep, t_tlck));
}
+ if (jd->jd_jid == sdp->sd_lockstruct.ls_jid)
+ gfs2_log_pointers_init(sdp, &head);
+
gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS);
if (jlocked) {
@@ -545,8 +545,8 @@ void gfs2_recover_func(struct work_struct *work)
fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
goto done;
-fail_gunlock_thaw:
- gfs2_freeze_unlock(&thaw_gh);
+fail_gunlock_nofreeze:
+ mutex_unlock(&sdp->sd_freeze_mutex);
fail_gunlock_ji:
if (jlocked) {
gfs2_glock_dq_uninit(&ji_gh);
@@ -572,7 +572,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait)
return -EBUSY;
/* we have JDF_RECOVERY, queue should always succeed */
- rv = queue_work(gfs_recovery_wq, &jd->jd_work);
+ rv = queue_work(gfs2_recovery_wq, &jd->jd_work);
BUG_ON(!rv);
if (wait)
@@ -582,3 +582,13 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait)
return wait ? jd->jd_recover_error : 0;
}
+void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
+ struct gfs2_log_header_host *head)
+{
+ sdp->sd_log_sequence = head->lh_sequence + 1;
+ gfs2_replay_incr_blk(sdp->sd_jdesc, &head->lh_blkno);
+ sdp->sd_log_tail = head->lh_blkno;
+ sdp->sd_log_flush_head = head->lh_blkno;
+ sdp->sd_log_flush_tail = head->lh_blkno;
+ sdp->sd_log_head = head->lh_blkno;
+}
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 0d30f8e804f4..5a5ba72ecd75 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -9,7 +9,7 @@
#include "incore.h"
-extern struct workqueue_struct *gfs_recovery_wq;
+extern struct workqueue_struct *gfs2_recovery_wq;
static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, u32 *blk)
{
@@ -17,17 +17,19 @@ static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, u32 *blk)
*blk = 0;
}
-extern int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
+int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
struct buffer_head **bh);
-extern int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
-extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
-extern void gfs2_revoke_clean(struct gfs2_jdesc *jd);
+int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
+int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
+void gfs2_revoke_clean(struct gfs2_jdesc *jd);
-extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
-extern void gfs2_recover_func(struct work_struct *work);
-extern int __get_log_header(struct gfs2_sbd *sdp,
- const struct gfs2_log_header *lh, unsigned int blkno,
+int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
+void gfs2_recover_func(struct work_struct *work);
+int __get_log_header(struct gfs2_sbd *sdp,
+ const struct gfs2_log_header *lh, unsigned int blkno,
+ struct gfs2_log_header_host *head);
+void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
struct gfs2_log_header_host *head);
#endif /* __RECOVERY_DOT_H__ */
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 0fb3c01bc557..b14e54b38ee8 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -159,13 +159,13 @@ static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm, bool use_clone)
}
/**
- * gfs2_bit_search
+ * gfs2_bit_search - search bitmap for a state
* @ptr: Pointer to bitmap data
* @mask: Mask to use (normally 0x55555.... but adjusted for search start)
* @state: The state we are searching for
*
- * We xor the bitmap data with a patter which is the bitwise opposite
- * of what we are looking for, this gives rise to a pattern of ones
+ * We xor the bitmap data with a pattern which is the bitwise opposite
+ * of what we are looking for. This gives rise to a pattern of ones
* wherever there is a match. Since we have two bits per entry, we
* take this pattern, shift it down by one place and then and it with
* the original. All the even bit positions (0,2,4, etc) then represent
@@ -680,13 +680,14 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
/**
* gfs2_rs_delete - delete a multi-block reservation
* @ip: The inode for this reservation
- * @wcount: The inode's write count, or NULL
*
*/
-void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
+void gfs2_rs_delete(struct gfs2_inode *ip)
{
+ struct inode *inode = &ip->i_inode;
+
down_write(&ip->i_rw_mutex);
- if ((wcount == NULL) || (atomic_read(wcount) <= 1))
+ if (atomic_read(&inode->i_writecount) <= 1)
gfs2_rs_deltree(&ip->i_res);
up_write(&ip->i_rw_mutex);
}
@@ -813,11 +814,11 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
bi = rgd->rd_bits + (length - 1);
if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) {
gfs2_lm(sdp,
- "ri_addr = %llu\n"
- "ri_length = %u\n"
- "ri_data0 = %llu\n"
- "ri_data = %u\n"
- "ri_bitbytes = %u\n"
+ "ri_addr=%llu "
+ "ri_length=%u "
+ "ri_data0=%llu "
+ "ri_data=%u "
+ "ri_bitbytes=%u "
"start=%u len=%u offset=%u\n",
(unsigned long long)rgd->rd_addr,
rgd->rd_length,
@@ -922,15 +923,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
spin_lock_init(&rgd->rd_rsspin);
mutex_init(&rgd->rd_mutex);
- error = compute_bitstructs(rgd);
- if (error)
- goto fail;
-
error = gfs2_glock_get(sdp, rgd->rd_addr,
&gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
if (error)
goto fail;
+ error = compute_bitstructs(rgd);
+ if (error)
+ goto fail_glock;
+
rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
rgd->rd_flags &= ~GFS2_RDF_PREFERRED;
if (rgd->rd_data > sdp->sd_max_rg_data)
@@ -944,6 +945,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
}
error = 0; /* someone else read in the rgrp; free it and ignore it */
+fail_glock:
gfs2_glock_put(rgd->rd_gl);
fail:
@@ -1186,7 +1188,7 @@ static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd)
/**
* gfs2_rgrp_go_instantiate - Read in a RG's header and bitmaps
- * @gh: the glock holder representing the rgrpd to read in
+ * @gl: the glock representing the rgrpd to read in
*
* Read in all of a Resource Group's header and bitmap blocks.
* Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
@@ -1194,9 +1196,8 @@ static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd)
* Returns: errno
*/
-int gfs2_rgrp_go_instantiate(struct gfs2_holder *gh)
+int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl)
{
- struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_rgrpd *rgd = gl->gl_object;
struct gfs2_sbd *sdp = rgd->rd_sbd;
unsigned int length = rgd->rd_length;
@@ -1313,7 +1314,7 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
u64 blk;
sector_t start = 0;
sector_t nr_blks = 0;
- int rv;
+ int rv = -EIO;
unsigned int x;
u32 trimmed = 0;
u8 diff;
@@ -1369,7 +1370,7 @@ fail:
if (sdp->sd_args.ar_discard)
fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv);
sdp->sd_args.ar_discard = 0;
- return -EIO;
+ return rv;
}
/**
@@ -1384,7 +1385,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
{
struct inode *inode = file_inode(filp);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
+ struct block_device *bdev = sdp->sd_vfs->s_bdev;
struct buffer_head *bh;
struct gfs2_rgrpd *rgd;
struct gfs2_rgrpd *rgd_end;
@@ -1403,7 +1404,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
return -EROFS;
- if (!blk_queue_discard(q))
+ if (!bdev_max_discard_sectors(bdev))
return -EOPNOTSUPP;
if (copy_from_user(&r, argp, sizeof(r)))
@@ -1415,8 +1416,8 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
start = r.start >> bs_shift;
end = start + (r.len >> bs_shift);
- minlen = max_t(u64, r.minlen,
- q->limits.discard_granularity) >> bs_shift;
+ minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize);
+ minlen = max_t(u64, minlen, bdev_discard_granularity(bdev)) >> bs_shift;
if (end <= start || minlen > sdp->sd_max_rg_data)
return -EINVAL;
@@ -1878,7 +1879,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
*/
ip = gl->gl_object;
- if (ip || !gfs2_queue_delete_work(gl, 0))
+ if (ip || !gfs2_queue_verify_delete(gl, false))
gfs2_glock_put(gl);
else
found++;
@@ -1966,7 +1967,7 @@ static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
}
/**
- * gfs2_rgrp_used_recently
+ * gfs2_rgrp_used_recently - test if an rgrp has been used recently
* @rs: The block reservation with the rgrp to test
* @msecs: The time limit in milliseconds
*
@@ -1986,10 +1987,8 @@ static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
{
const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- u32 skip;
- get_random_bytes(&skip, sizeof(skip));
- return skip % sdp->sd_rgrps;
+ return get_random_u32() % sdp->sd_rgrps;
}
static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
@@ -2305,7 +2304,7 @@ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
(unsigned long long)rgd->rd_addr, rgd->rd_flags,
rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
rgd->rd_requested, rgd->rd_reserved, rgd->rd_extfail_pt);
- if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
+ if (rgd->rd_sbd->sd_args.ar_rgrplvb && rgd->rd_rgl) {
struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
gfs2_print_dbg(seq, "%s L: f:%02x b:%u i:%u\n", fs_id_buf,
@@ -2410,13 +2409,12 @@ static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
* @bn: Used to return the starting block number
* @nblocks: requested number of blocks/extent length (value/result)
* @dinode: 1 if we're allocating a dinode block, else 0
- * @generation: the generation number of the inode
*
* Returns: 0 or error
*/
int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
- bool dinode, u64 *generation)
+ bool dinode)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *dibh;
@@ -2476,10 +2474,13 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
rbm.rgd->rd_free -= *nblocks;
spin_unlock(&rbm.rgd->rd_rsspin);
if (dinode) {
+ u64 generation;
+
rbm.rgd->rd_dinodes++;
- *generation = rbm.rgd->rd_igeneration++;
- if (*generation == 0)
- *generation = rbm.rgd->rd_igeneration++;
+ generation = rbm.rgd->rd_igeneration++;
+ if (generation == 0)
+ generation = rbm.rgd->rd_igeneration++;
+ ip->i_generation = generation;
}
gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
@@ -2583,8 +2584,8 @@ void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
- rgrp_unlock_local(rgd);
be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
+ rgrp_unlock_local(rgd);
gfs2_statfs_change(sdp, 0, +1, -1);
trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
@@ -2718,12 +2719,15 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
* gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
* and initialize an array of glock holders for them
* @rlist: the list of resource groups
+ * @state: the state we're requesting
+ * @flags: the modifier flags
*
* FIXME: Don't use NOFAIL
*
*/
-void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist)
+void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
+ unsigned int state, u16 flags)
{
unsigned int x;
@@ -2731,8 +2735,8 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist)
sizeof(struct gfs2_holder),
GFP_NOFS | __GFP_NOFAIL);
for (x = 0; x < rlist->rl_rgrps; x++)
- gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, LM_ST_EXCLUSIVE,
- LM_FLAG_NODE_SCOPE, &rlist->rl_ghs[x]);
+ gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, state, flags,
+ &rlist->rl_ghs[x]);
}
/**
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 3e2ca1fb4305..8d20e99385db 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -22,38 +22,38 @@ struct gfs2_rgrpd;
struct gfs2_sbd;
struct gfs2_holder;
-extern void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd);
+void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd);
-extern struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact);
-extern struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp);
-extern struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
+struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact);
+struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp);
+struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
-extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
-extern int gfs2_rindex_update(struct gfs2_sbd *sdp);
-extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
-extern int gfs2_rgrp_go_instantiate(struct gfs2_holder *gh);
-extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
+void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
+int gfs2_rindex_update(struct gfs2_sbd *sdp);
+void gfs2_free_clones(struct gfs2_rgrpd *rgd);
+int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl);
+void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
-extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
+struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
#define GFS2_AF_ORLOV 1
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip,
- struct gfs2_alloc_parms *ap);
-extern void gfs2_inplace_release(struct gfs2_inode *ip);
-
-extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
- bool dinode, u64 *generation);
-
-extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
-extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
-extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
- u64 bstart, u32 blen, int meta);
-extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
- u64 bstart, u32 blen);
-extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
-extern void gfs2_unlink_di(struct inode *inode);
-extern int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
- unsigned int type);
+int gfs2_inplace_reserve(struct gfs2_inode *ip,
+ struct gfs2_alloc_parms *ap);
+void gfs2_inplace_release(struct gfs2_inode *ip);
+
+int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
+ bool dinode);
+
+void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
+void gfs2_rs_delete(struct gfs2_inode *ip);
+void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen, int meta);
+void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen);
+void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
+void gfs2_unlink_di(struct inode *inode);
+int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
+ unsigned int type);
struct gfs2_rgrp_list {
unsigned int rl_rgrps;
@@ -62,17 +62,19 @@ struct gfs2_rgrp_list {
struct gfs2_holder *rl_ghs;
};
-extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
- u64 block);
-extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist);
-extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
-extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
-extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
- const char *fs_id_buf);
-extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
- struct buffer_head *bh,
- const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed);
-extern int gfs2_fitrim(struct file *filp, void __user *argp);
+void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
+ u64 block);
+void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
+ unsigned int state, u16 flags);
+void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
+u64 gfs2_ri_total(struct gfs2_sbd *sdp);
+void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
+ const char *fs_id_buf);
+int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
+ struct buffer_head *bh,
+ const struct gfs2_bitmap *bi, unsigned minlen,
+ u64 *ptrimmed);
+int gfs2_fitrim(struct file *filp, void __user *argp);
/* This is how to tell if a reservation is in the rgrp tree: */
static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs)
@@ -87,9 +89,9 @@ static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
return first <= block && block < last;
}
-extern void check_and_update_goal(struct gfs2_inode *ip);
+void check_and_update_goal(struct gfs2_inode *ip);
-extern void rgrp_lock_local(struct gfs2_rgrpd *rgd);
-extern void rgrp_unlock_local(struct gfs2_rgrpd *rgd);
+void rgrp_lock_local(struct gfs2_rgrpd *rgd);
+void rgrp_unlock_local(struct gfs2_rgrpd *rgd);
#endif /* __RGRP_DOT_H__ */
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 5b121371508a..f6cd907b3ec6 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -44,10 +44,10 @@
#include "xattr.h"
#include "lops.h"
-enum dinode_demise {
- SHOULD_DELETE_DINODE,
- SHOULD_NOT_DELETE_DINODE,
- SHOULD_DEFER_EVICTION,
+enum evict_behavior {
+ EVICT_SHOULD_DELETE,
+ EVICT_SHOULD_SKIP_DELETE,
+ EVICT_SHOULD_DEFER_DELETE,
};
/**
@@ -67,9 +67,13 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp)
sdp->sd_journals = 0;
spin_unlock(&sdp->sd_jindex_spin);
+ down_write(&sdp->sd_log_flush_lock);
sdp->sd_jdesc = NULL;
+ up_write(&sdp->sd_log_flush_lock);
+
while (!list_empty(&list)) {
jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
+ BUG_ON(jd->jd_log_bio);
gfs2_free_journal_extents(jd);
list_del(&jd->jd_list);
iput(jd->jd_inode);
@@ -130,28 +134,22 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
{
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
struct gfs2_glock *j_gl = ip->i_gl;
- struct gfs2_log_header_host head;
int error;
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
if (gfs2_withdrawn(sdp))
return -EIO;
- error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
- if (error || gfs2_withdrawn(sdp))
- return error;
-
- if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
- gfs2_consist(sdp);
+ if (sdp->sd_log_sequence == 0) {
+ fs_err(sdp, "unknown status of our own journal jid %d",
+ sdp->sd_lockstruct.ls_jid);
return -EIO;
}
- /* Initialize some head of the log stuff */
- sdp->sd_log_sequence = head.lh_sequence + 1;
- gfs2_log_pointers_init(sdp, head.lh_blkno);
-
error = gfs2_quota_init(sdp);
- if (!error && !gfs2_withdrawn(sdp))
+ if (!error && gfs2_withdrawn(sdp))
+ error = -EIO;
+ if (!error)
set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
return error;
}
@@ -328,7 +326,12 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
struct lfcc *lfcc;
LIST_HEAD(list);
struct gfs2_log_header_host lh;
- int error;
+ int error, error2;
+
+ /*
+ * Grab all the journal glocks in SH mode. We are *probably* doing
+ * that to prevent recovery.
+ */
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
@@ -345,16 +348,19 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
list_add(&lfcc->list, &list);
}
+ gfs2_freeze_unlock(sdp);
+
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
- LM_FLAG_NOEXP, &sdp->sd_freeze_gh);
+ LM_FLAG_RECOVER | GL_NOPID,
+ &sdp->sd_freeze_gh);
if (error)
- goto out;
+ goto relock_shared;
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
error = gfs2_jdesc_check(jd);
if (error)
break;
- error = gfs2_find_jhead(jd, &lh, false);
+ error = gfs2_find_jhead(jd, &lh);
if (error)
break;
if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
@@ -363,8 +369,14 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
}
}
- if (error)
- gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ if (!error)
+ goto out; /* success */
+
+ gfs2_freeze_unlock(sdp);
+
+relock_shared:
+ error2 = gfs2_freeze_lock_shared(sdp);
+ gfs2_assert_withdraw(sdp, !error2);
out:
while (!list_empty(&list)) {
@@ -378,6 +390,7 @@ out:
void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
{
+ const struct inode *inode = &ip->i_inode;
struct gfs2_dinode *str = buf;
str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
@@ -385,15 +398,15 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
- str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
- str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
- str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
- str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
- str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
- str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
- str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
- str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
- str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
+ str->di_mode = cpu_to_be32(inode->i_mode);
+ str->di_uid = cpu_to_be32(i_uid_read(inode));
+ str->di_gid = cpu_to_be32(i_gid_read(inode));
+ str->di_nlink = cpu_to_be32(inode->i_nlink);
+ str->di_size = cpu_to_be64(i_size_read(inode));
+ str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
+ str->di_atime = cpu_to_be64(inode_get_atime_sec(inode));
+ str->di_mtime = cpu_to_be64(inode_get_mtime_sec(inode));
+ str->di_ctime = cpu_to_be64(inode_get_ctime_sec(inode));
str->di_goal_meta = cpu_to_be64(ip->i_goal);
str->di_goal_data = cpu_to_be64(ip->i_goal);
@@ -401,16 +414,16 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
str->di_flags = cpu_to_be32(ip->i_diskflags);
str->di_height = cpu_to_be16(ip->i_height);
- str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
+ str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) &&
!(ip->i_diskflags & GFS2_DIF_EXHASH) ?
GFS2_FORMAT_DE : 0);
str->di_depth = cpu_to_be16(ip->i_depth);
str->di_entries = cpu_to_be32(ip->i_entries);
str->di_eattr = cpu_to_be64(ip->i_eattr);
- str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
- str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
- str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
+ str->di_atime_nsec = cpu_to_be32(inode_get_atime_nsec(inode));
+ str->di_mtime_nsec = cpu_to_be32(inode_get_mtime_nsec(inode));
+ str->di_ctime_nsec = cpu_to_be32(inode_get_ctime_nsec(inode));
}
/**
@@ -457,7 +470,7 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
* @flags: The type of dirty
*
* Unfortunately it can be called under any combination of inode
- * glock and transaction lock, so we have to check carefully.
+ * glock and freeze glock, so we have to check carefully.
*
* At the moment this deals only with atime - it should be possible
* to expand that role in future, once a review of the locking has
@@ -474,7 +487,11 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
int need_endtrans = 0;
int ret;
- if (unlikely(gfs2_withdrawn(sdp)))
+ /* This can only happen during incomplete inode creation. */
+ if (unlikely(!ip->i_gl))
+ return;
+
+ if (gfs2_withdrawn(sdp))
return;
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
@@ -521,38 +538,32 @@ void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
{
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- gfs2_flush_delete_work(sdp);
- if (!log_write_allowed && current == sdp->sd_quotad_process)
- fs_warn(sdp, "The quotad daemon is withdrawing.\n");
- else if (sdp->sd_quotad_process)
- kthread_stop(sdp->sd_quotad_process);
- sdp->sd_quotad_process = NULL;
+ if (!test_bit(SDF_KILL, &sdp->sd_flags))
+ gfs2_flush_delete_work(sdp);
- if (!log_write_allowed && current == sdp->sd_logd_process)
- fs_warn(sdp, "The logd daemon is withdrawing.\n");
- else if (sdp->sd_logd_process)
- kthread_stop(sdp->sd_logd_process);
- sdp->sd_logd_process = NULL;
+ gfs2_destroy_threads(sdp);
if (log_write_allowed) {
gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_statfs_sync(sdp->sd_vfs, 0);
+ /* We do two log flushes here. The first one commits dirty inodes
+ * and rgrps to the journal, but queues up revokes to the ail list.
+ * The second flush writes out and removes the revokes.
+ *
+ * The first must be done before the FLUSH_SHUTDOWN code
+ * clears the LIVE flag, otherwise it will not be able to start
+ * a transaction to write its revokes, and the error will cause
+ * a withdraw of the file system. */
+ gfs2_log_flush(sdp, NULL, GFS2_LFC_MAKE_FS_RO);
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
GFS2_LFC_MAKE_FS_RO);
wait_event_timeout(sdp->sd_log_waitq,
gfs2_log_is_empty(sdp),
HZ * 5);
gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
- } else {
- wait_event_timeout(sdp->sd_log_waitq,
- gfs2_log_is_empty(sdp),
- HZ * 5);
}
gfs2_quota_cleanup(sdp);
-
- if (!log_write_allowed)
- sdp->sd_vfs->s_flags |= SB_RDONLY;
}
/**
@@ -583,15 +594,23 @@ restart:
}
spin_unlock(&sdp->sd_jindex_spin);
- if (!sb_rdonly(sb)) {
+ if (!sb_rdonly(sb))
gfs2_make_fs_ro(sdp);
+ else {
+ if (gfs2_withdrawn(sdp))
+ gfs2_destroy_threads(sdp);
+
+ gfs2_quota_cleanup(sdp);
}
- WARN_ON(gfs2_withdrawing(sdp));
+
+ flush_work(&sdp->sd_withdraw_work);
/* At this point, we're through modifying the disk */
/* Release stuff */
+ gfs2_freeze_unlock(sdp);
+
iput(sdp->sd_jindex);
iput(sdp->sd_statfs_inode);
iput(sdp->sd_rindex);
@@ -617,12 +636,9 @@ restart:
gfs2_jindex_free(sdp);
/* Take apart glock structures and buffer lists */
gfs2_gl_hash_clear(sdp);
- truncate_inode_pages_final(&sdp->sd_aspace);
+ iput(sdp->sd_inode);
gfs2_delete_debugfs_file(sdp);
- /* Unmount the locking protocol */
- gfs2_lm_unmount(sdp);
- /* At this point, we're through participating in the lockspace */
gfs2_sys_fs_del(sdp);
free_sbd(sdp);
}
@@ -646,59 +662,94 @@ static int gfs2_sync_fs(struct super_block *sb, int wait)
return sdp->sd_log_error;
}
-void gfs2_freeze_func(struct work_struct *work)
+static int gfs2_do_thaw(struct gfs2_sbd *sdp, enum freeze_holder who, const void *freeze_owner)
{
+ struct super_block *sb = sdp->sd_vfs;
int error;
- struct gfs2_holder freeze_gh;
+
+ error = gfs2_freeze_lock_shared(sdp);
+ if (error)
+ goto fail;
+ error = thaw_super(sb, who, freeze_owner);
+ if (!error)
+ return 0;
+
+fail:
+ fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", error);
+ gfs2_assert_withdraw(sdp, 0);
+ return error;
+}
+
+void gfs2_freeze_func(struct work_struct *work)
+{
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
struct super_block *sb = sdp->sd_vfs;
+ int error;
- atomic_inc(&sb->s_active);
- error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
- if (error) {
- gfs2_assert_withdraw(sdp, 0);
- } else {
- atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
- error = thaw_super(sb);
- if (error) {
- fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
- error);
- gfs2_assert_withdraw(sdp, 0);
- }
- gfs2_freeze_unlock(&freeze_gh);
- }
+ mutex_lock(&sdp->sd_freeze_mutex);
+ error = -EBUSY;
+ if (test_bit(SDF_FROZEN, &sdp->sd_flags))
+ goto freeze_failed;
+
+ error = freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
+ if (error)
+ goto freeze_failed;
+
+ gfs2_freeze_unlock(sdp);
+ set_bit(SDF_FROZEN, &sdp->sd_flags);
+
+ error = gfs2_do_thaw(sdp, FREEZE_HOLDER_USERSPACE, NULL);
+ if (error)
+ goto out;
+
+ clear_bit(SDF_FROZEN, &sdp->sd_flags);
+ goto out;
+
+freeze_failed:
+ fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error);
+
+out:
+ mutex_unlock(&sdp->sd_freeze_mutex);
deactivate_super(sb);
- clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
- wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
- return;
}
/**
- * gfs2_freeze - prevent further writes to the filesystem
+ * gfs2_freeze_super - prevent further writes to the filesystem
* @sb: the VFS structure for the filesystem
+ * @who: freeze flags
+ * @freeze_owner: owner of the freeze
*
*/
-static int gfs2_freeze(struct super_block *sb)
+static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who,
+ const void *freeze_owner)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
int error;
- mutex_lock(&sdp->sd_freeze_mutex);
- if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
- error = -EBUSY;
- goto out;
+ if (!mutex_trylock(&sdp->sd_freeze_mutex))
+ return -EBUSY;
+ if (test_bit(SDF_FROZEN, &sdp->sd_flags)) {
+ mutex_unlock(&sdp->sd_freeze_mutex);
+ return -EBUSY;
}
for (;;) {
- if (gfs2_withdrawn(sdp)) {
- error = -EINVAL;
+ error = freeze_super(sb, who, freeze_owner);
+ if (error) {
+ fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
+ error);
goto out;
}
error = gfs2_lock_fs_check_clean(sdp);
- if (!error)
+ if (!error) {
+ set_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
+ set_bit(SDF_FROZEN, &sdp->sd_flags);
break;
+ }
+
+ (void)gfs2_do_thaw(sdp, who, freeze_owner);
if (error == -EBUSY)
fs_err(sdp, "waiting for recovery before freeze\n");
@@ -712,32 +763,58 @@ static int gfs2_freeze(struct super_block *sb)
fs_err(sdp, "retrying...\n");
msleep(1000);
}
- set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
+
out:
mutex_unlock(&sdp->sd_freeze_mutex);
return error;
}
+static int gfs2_freeze_fs(struct super_block *sb)
+{
+ struct gfs2_sbd *sdp = sb->s_fs_info;
+
+ if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
+ GFS2_LFC_FREEZE_GO_SYNC);
+ if (gfs2_withdrawn(sdp))
+ return -EIO;
+ }
+ return 0;
+}
+
/**
- * gfs2_unfreeze - reallow writes to the filesystem
+ * gfs2_thaw_super - reallow writes to the filesystem
* @sb: the VFS structure for the filesystem
+ * @who: freeze flags
+ * @freeze_owner: owner of the freeze
*
*/
-static int gfs2_unfreeze(struct super_block *sb)
+static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who,
+ const void *freeze_owner)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
+ int error;
- mutex_lock(&sdp->sd_freeze_mutex);
- if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
- !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
+ if (!mutex_trylock(&sdp->sd_freeze_mutex))
+ return -EBUSY;
+ if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags)) {
mutex_unlock(&sdp->sd_freeze_mutex);
return -EINVAL;
}
- gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ atomic_inc(&sb->s_active);
+ gfs2_freeze_unlock(sdp);
+
+ error = gfs2_do_thaw(sdp, who, freeze_owner);
+
+ if (!error) {
+ clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
+ clear_bit(SDF_FROZEN, &sdp->sd_flags);
+ }
mutex_unlock(&sdp->sd_freeze_mutex);
- return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
+ deactivate_super(sb);
+ return error;
}
/**
@@ -903,6 +980,7 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_files = sc.sc_dinodes + sc.sc_free;
buf->f_ffree = sc.sc_free;
buf->f_namelen = GFS2_FNAMESIZE;
+ buf->f_fsid = uuid_to_fsid(sb->s_uuid.b);
return 0;
}
@@ -925,12 +1003,12 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
static int gfs2_drop_inode(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
- if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
- inode->i_nlink &&
+ if (inode->i_nlink &&
gfs2_holder_initialized(&ip->i_iopen_gh)) {
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
- if (test_bit(GLF_DEMOTE, &gl->gl_flags))
+ if (glock_needs_demote(gl))
clear_nlink(inode);
}
@@ -945,22 +1023,18 @@ static int gfs2_drop_inode(struct inode *inode)
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
gfs2_glock_hold(gl);
- if (!gfs2_queue_delete_work(gl, 0))
- gfs2_glock_queue_put(gl);
+ if (!gfs2_queue_verify_delete(gl, true))
+ gfs2_glock_put_async(gl);
return 0;
}
- return generic_drop_inode(inode);
-}
+ /*
+ * No longer cache inodes when trying to evict them all.
+ */
+ if (test_bit(SDF_EVICTING, &sdp->sd_flags))
+ return 1;
-static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
-{
- do {
- if (d1 == d2)
- return 1;
- d1 = d1->d_parent;
- } while (!IS_ROOT(d1));
- return 0;
+ return inode_generic_drop(inode);
}
/**
@@ -975,9 +1049,16 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
{
struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
struct gfs2_args *args = &sdp->sd_args;
- int val;
+ unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
+
+ spin_lock(&sdp->sd_tune.gt_spin);
+ logd_secs = sdp->sd_tune.gt_logd_secs;
+ quota_quantum = sdp->sd_tune.gt_quota_quantum;
+ statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
+ statfs_slow = sdp->sd_tune.gt_statfs_slow;
+ spin_unlock(&sdp->sd_tune.gt_spin);
- if (is_ancestor(root, sdp->sd_master_dir))
+ if (is_subdir(root, sdp->sd_master_dir))
seq_puts(s, ",meta");
if (args->ar_lockproto[0])
seq_show_option(s, "lockproto", args->ar_lockproto);
@@ -1005,6 +1086,9 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
case GFS2_QUOTA_ON:
state = "on";
break;
+ case GFS2_QUOTA_QUIET:
+ state = "quiet";
+ break;
default:
state = "unknown";
break;
@@ -1030,17 +1114,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
}
if (args->ar_discard)
seq_puts(s, ",discard");
- val = sdp->sd_tune.gt_logd_secs;
- if (val != 30)
- seq_printf(s, ",commit=%d", val);
- val = sdp->sd_tune.gt_statfs_quantum;
- if (val != 30)
- seq_printf(s, ",statfs_quantum=%d", val);
- else if (sdp->sd_tune.gt_statfs_slow)
+ if (logd_secs != 30)
+ seq_printf(s, ",commit=%d", logd_secs);
+ if (statfs_quantum != 30)
+ seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
+ else if (statfs_slow)
seq_puts(s, ",statfs_quantum=0");
- val = sdp->sd_tune.gt_quota_quantum;
- if (val != 60)
- seq_printf(s, ",quota_quantum=%d", val);
+ if (quota_quantum != 60)
+ seq_printf(s, ",quota_quantum=%d", quota_quantum);
if (args->ar_statfs_percent)
seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
@@ -1050,6 +1131,9 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
case GFS2_ERRORS_WITHDRAW:
state = "withdraw";
break;
+ case GFS2_ERRORS_DEACTIVATE:
+ state = "deactivate";
+ break;
case GFS2_ERRORS_PANIC:
state = "panic";
break;
@@ -1070,70 +1154,6 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
return 0;
}
-static void gfs2_final_release_pages(struct gfs2_inode *ip)
-{
- struct inode *inode = &ip->i_inode;
- struct gfs2_glock *gl = ip->i_gl;
-
- truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
- truncate_inode_pages(&inode->i_data, 0);
-
- if (atomic_read(&gl->gl_revokes) == 0) {
- clear_bit(GLF_LFLUSH, &gl->gl_flags);
- clear_bit(GLF_DIRTY, &gl->gl_flags);
- }
-}
-
-static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
-{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_rgrpd *rgd;
- struct gfs2_holder gh;
- int error;
-
- if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
- gfs2_consist_inode(ip);
- return -EIO;
- }
-
- error = gfs2_rindex_update(sdp);
- if (error)
- return error;
-
- error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
- if (error)
- return error;
-
- rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
- if (!rgd) {
- gfs2_consist_inode(ip);
- error = -EIO;
- goto out_qs;
- }
-
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
- LM_FLAG_NODE_SCOPE, &gh);
- if (error)
- goto out_qs;
-
- error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
- sdp->sd_jdesc->jd_blocks);
- if (error)
- goto out_rg_gunlock;
-
- gfs2_free_di(rgd, ip);
-
- gfs2_final_release_pages(ip);
-
- gfs2_trans_end(sdp);
-
-out_rg_gunlock:
- gfs2_glock_dq_uninit(&gh);
-out_qs:
- gfs2_quota_unhold(ip);
- return error;
-}
-
/**
* gfs2_glock_put_eventually
* @gl: The glock to put
@@ -1145,58 +1165,60 @@ out_qs:
static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
{
if (current->flags & PF_MEMALLOC)
- gfs2_glock_queue_put(gl);
+ gfs2_glock_put_async(gl);
else
gfs2_glock_put(gl);
}
-static bool gfs2_upgrade_iopen_glock(struct inode *inode)
+static enum evict_behavior gfs2_upgrade_iopen_glock(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_holder *gh = &ip->i_iopen_gh;
- long timeout = 5 * HZ;
int error;
gh->gh_flags |= GL_NOCACHE;
gfs2_glock_dq_wait(gh);
/*
- * If there are no other lock holders, we'll get the lock immediately.
+ * If there are no other lock holders, we will immediately get
+ * exclusive access to the iopen glock here.
+ *
* Otherwise, the other nodes holding the lock will be notified about
- * our locking request. If they don't have the inode open, they'll
- * evict the cached inode and release the lock. Otherwise, if they
- * poke the inode glock, we'll take this as an indication that they
- * still need the iopen glock and that they'll take care of deleting
- * the inode when they're done. As a last resort, if another node
- * keeps holding the iopen glock without showing any activity on the
- * inode glock, we'll eventually time out.
+ * our locking request (see iopen_go_callback()). If they do not have
+ * the inode open, they are expected to evict the cached inode and
+ * release the lock, allowing us to proceed.
*
- * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
- * locking request as an optimization to notify lock holders as soon as
- * possible. Without that flag, they'd be notified implicitly by the
- * second locking request.
+ * Otherwise, if they cannot evict the inode, they are expected to poke
+ * the inode glock (note: not the iopen glock). We will notice that
+ * and stop waiting for the iopen glock immediately. The other node(s)
+ * are then expected to take care of deleting the inode when they no
+ * longer use it.
+ *
+ * As a last resort, if another node keeps holding the iopen glock
+ * without showing any activity on the inode glock, we will eventually
+ * time out and fail the iopen glock upgrade.
*/
- gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
- error = gfs2_glock_nq(gh);
- if (error != GLR_TRYFAILED)
- return !error;
-
gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
error = gfs2_glock_nq(gh);
if (error)
- return false;
+ return EVICT_SHOULD_SKIP_DELETE;
- timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
+ wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
!test_bit(HIF_WAIT, &gh->gh_iflags) ||
- test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
- timeout);
+ glock_needs_demote(ip->i_gl),
+ 5 * HZ);
if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
gfs2_glock_dq(gh);
- return false;
+ if (glock_needs_demote(ip->i_gl))
+ return EVICT_SHOULD_SKIP_DELETE;
+ return EVICT_SHOULD_DEFER_DELETE;
}
- return true;
+ error = gfs2_glock_holder_ready(gh);
+ if (error)
+ return EVICT_SHOULD_SKIP_DELETE;
+ return EVICT_SHOULD_DELETE;
}
/**
@@ -1209,62 +1231,47 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode)
*
* Returns: the fate of the dinode
*/
-static enum dinode_demise evict_should_delete(struct inode *inode,
- struct gfs2_holder *gh)
+static enum evict_behavior evict_should_delete(struct inode *inode,
+ struct gfs2_holder *gh)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct super_block *sb = inode->i_sb;
struct gfs2_sbd *sdp = sb->s_fs_info;
int ret;
- if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
- BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
- goto should_delete;
- }
-
- if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
- return SHOULD_DEFER_EVICTION;
+ if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
+ test_bit(GLF_DEFER_DELETE, &ip->i_iopen_gh.gh_gl->gl_flags))
+ return EVICT_SHOULD_DEFER_DELETE;
/* Deletes should never happen under memory pressure anymore. */
if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
- return SHOULD_DEFER_EVICTION;
+ return EVICT_SHOULD_DEFER_DELETE;
/* Must not read inode block until block type has been verified */
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
- if (unlikely(ret)) {
- glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
- gfs2_glock_dq_uninit(&ip->i_iopen_gh);
- return SHOULD_DEFER_EVICTION;
- }
+ if (unlikely(ret))
+ return EVICT_SHOULD_SKIP_DELETE;
if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
- return SHOULD_NOT_DELETE_DINODE;
+ return EVICT_SHOULD_SKIP_DELETE;
ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
if (ret)
- return SHOULD_NOT_DELETE_DINODE;
+ return EVICT_SHOULD_SKIP_DELETE;
- if (test_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags)) {
- ret = gfs2_instantiate(gh);
- if (ret)
- return SHOULD_NOT_DELETE_DINODE;
- }
+ ret = gfs2_instantiate(gh);
+ if (ret)
+ return EVICT_SHOULD_SKIP_DELETE;
/*
* The inode may have been recreated in the meantime.
*/
if (inode->i_nlink)
- return SHOULD_NOT_DELETE_DINODE;
+ return EVICT_SHOULD_SKIP_DELETE;
-should_delete:
if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
- test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
- if (!gfs2_upgrade_iopen_glock(inode)) {
- gfs2_holder_uninit(&ip->i_iopen_gh);
- return SHOULD_NOT_DELETE_DINODE;
- }
- }
- return SHOULD_DELETE_DINODE;
+ test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
+ return gfs2_upgrade_iopen_glock(inode);
+ return EVICT_SHOULD_DELETE;
}
/**
@@ -1284,7 +1291,7 @@ static int evict_unlinked_inode(struct inode *inode)
}
if (ip->i_eattr) {
- ret = gfs2_ea_dealloc(ip);
+ ret = gfs2_ea_dealloc(ip, true);
if (ret)
goto out;
}
@@ -1295,13 +1302,22 @@ static int evict_unlinked_inode(struct inode *inode)
goto out;
}
- /* We're about to clear the bitmap for the dinode, but as soon as we
- do, gfs2_create_inode can create another inode at the same block
- location and try to set gl_object again. We clear gl_object here so
- that subsequent inode creates don't see an old gl_object. */
- glock_clear_object(ip->i_gl, ip);
+ /*
+ * As soon as we clear the bitmap for the dinode, gfs2_create_inode()
+ * can get called to recreate it, or even gfs2_inode_lookup() if the
+ * inode was recreated on another node in the meantime.
+ *
+ * However, inserting the new inode into the inode hash table will not
+ * succeed until the old inode is removed, and that only happens after
+ * ->evict_inode() returns. The new inode is attached to its inode and
+ * iopen glocks after inserting it into the inode hash table, so at
+ * that point we can be sure that both glocks are unused.
+ */
+
ret = gfs2_dinode_dealloc(ip);
- gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
+ if (!ret && ip->i_gl)
+ gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
+
out:
return ret;
}
@@ -1366,21 +1382,35 @@ static void gfs2_evict_inode(struct inode *inode)
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
+ enum evict_behavior behavior;
int ret;
- if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) {
- clear_inode(inode);
- return;
- }
-
- if (inode->i_nlink || sb_rdonly(sb))
+ gfs2_holder_mark_uninitialized(&gh);
+ if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr)
goto out;
- gfs2_holder_mark_uninitialized(&gh);
- ret = evict_should_delete(inode, &gh);
- if (ret == SHOULD_DEFER_EVICTION)
+ /*
+ * In case of an incomplete mount, gfs2_evict_inode() may be called for
+ * system files without having an active journal to write to. In that
+ * case, skip the filesystem evict.
+ */
+ if (!sdp->sd_jdesc)
goto out;
- if (ret == SHOULD_DELETE_DINODE)
+
+ behavior = evict_should_delete(inode, &gh);
+ if (behavior == EVICT_SHOULD_DEFER_DELETE &&
+ !test_bit(SDF_KILL, &sdp->sd_flags)) {
+ struct gfs2_glock *io_gl = ip->i_iopen_gh.gh_gl;
+
+ if (io_gl) {
+ gfs2_glock_hold(io_gl);
+ if (!gfs2_queue_verify_delete(io_gl, true))
+ gfs2_glock_put(io_gl);
+ goto out;
+ }
+ behavior = EVICT_SHOULD_SKIP_DELETE;
+ }
+ if (behavior == EVICT_SHOULD_DELETE)
ret = evict_unlinked_inode(inode);
else
ret = evict_linked_inode(inode);
@@ -1388,53 +1418,50 @@ static void gfs2_evict_inode(struct inode *inode)
if (gfs2_rs_active(&ip->i_res))
gfs2_rs_deltree(&ip->i_res);
- if (gfs2_holder_initialized(&gh)) {
- glock_clear_object(ip->i_gl, ip);
- gfs2_glock_dq_uninit(&gh);
- }
if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
out:
+ if (gfs2_holder_initialized(&gh))
+ gfs2_glock_dq_uninit(&gh);
truncate_inode_pages_final(&inode->i_data);
if (ip->i_qadata)
gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
- gfs2_rs_delete(ip, NULL);
+ gfs2_rs_deltree(&ip->i_res);
gfs2_ordered_del_inode(ip);
clear_inode(inode);
gfs2_dir_hash_inval(ip);
- if (ip->i_gl) {
- glock_clear_object(ip->i_gl, ip);
- wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
- gfs2_glock_add_to_lru(ip->i_gl);
- gfs2_glock_put_eventually(ip->i_gl);
- ip->i_gl = NULL;
- }
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
glock_clear_object(gl, ip);
- if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
- gfs2_glock_dq(&ip->i_iopen_gh);
- }
gfs2_glock_hold(gl);
- gfs2_holder_uninit(&ip->i_iopen_gh);
+ ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
gfs2_glock_put_eventually(gl);
}
+ if (ip->i_gl) {
+ glock_clear_object(ip->i_gl, ip);
+ wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
+ gfs2_glock_put_eventually(ip->i_gl);
+ rcu_assign_pointer(ip->i_gl, NULL);
+ }
}
static struct inode *gfs2_alloc_inode(struct super_block *sb)
{
struct gfs2_inode *ip;
- ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
+ ip = alloc_inode_sb(sb, gfs2_inode_cachep, GFP_KERNEL);
if (!ip)
return NULL;
+ ip->i_no_addr = 0;
+ ip->i_no_formal_ino = 0;
ip->i_flags = 0;
ip->i_gl = NULL;
gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
memset(&ip->i_res, 0, sizeof(ip->i_res));
RB_CLEAR_NODE(&ip->i_res.rs_node);
+ ip->i_diskflags = 0;
ip->i_rahead = 0;
return &ip->i_inode;
}
@@ -1444,7 +1471,7 @@ static void gfs2_free_inode(struct inode *inode)
kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
}
-extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
+void free_local_statfs_inodes(struct gfs2_sbd *sdp)
{
struct local_statfs_inode *lsi, *safe;
@@ -1459,8 +1486,8 @@ extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
}
}
-extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
- unsigned int index)
+struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
+ unsigned int index)
{
struct local_statfs_inode *lsi;
@@ -1481,8 +1508,9 @@ const struct super_operations gfs2_super_ops = {
.evict_inode = gfs2_evict_inode,
.put_super = gfs2_put_super,
.sync_fs = gfs2_sync_fs,
- .freeze_super = gfs2_freeze,
- .thaw_super = gfs2_unfreeze,
+ .freeze_super = gfs2_freeze_super,
+ .freeze_fs = gfs2_freeze_fs,
+ .thaw_super = gfs2_thaw_super,
.statfs = gfs2_statfs,
.drop_inode = gfs2_drop_inode,
.show_options = gfs2_show_options,
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 58d13fd77aed..173f1e74c2a9 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -15,7 +15,7 @@
#define GFS2_FS_FORMAT_MIN (1801)
#define GFS2_FS_FORMAT_MAX (1802)
-extern void gfs2_lm_unmount(struct gfs2_sbd *sdp);
+void gfs2_lm_unmount(struct gfs2_sbd *sdp);
static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
{
@@ -26,31 +26,32 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
return x;
}
-extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
+void gfs2_jindex_free(struct gfs2_sbd *sdp);
-extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
-extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
-extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
- struct gfs2_inode **ipp);
+struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
+int gfs2_jdesc_check(struct gfs2_jdesc *jd);
+int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
+ struct gfs2_inode **ipp);
-extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
-extern void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
-extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
-extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
-extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
- s64 dinodes);
-extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
- const void *buf);
-extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
- void *buf);
-extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
-extern int gfs2_statfs_sync(struct super_block *sb, int type);
-extern void gfs2_freeze_func(struct work_struct *work);
+int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
+void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
+void gfs2_online_uevent(struct gfs2_sbd *sdp);
+void gfs2_destroy_threads(struct gfs2_sbd *sdp);
+int gfs2_statfs_init(struct gfs2_sbd *sdp);
+void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
+ s64 dinodes);
+void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
+ const void *buf);
+void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
+ void *buf);
+void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
+int gfs2_statfs_sync(struct super_block *sb, int type);
+void gfs2_freeze_func(struct work_struct *work);
-extern void free_local_statfs_inodes(struct gfs2_sbd *sdp);
-extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
- unsigned int index);
-extern void free_sbd(struct gfs2_sbd *sdp);
+void free_local_statfs_inodes(struct gfs2_sbd *sdp);
+struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
+ unsigned int index);
+void free_sbd(struct gfs2_sbd *sdp);
extern struct file_system_type gfs2_fs_type;
extern struct file_system_type gfs2meta_fs_type;
@@ -58,8 +59,8 @@ extern const struct export_operations gfs2_export_ops;
extern const struct super_operations gfs2_super_ops;
extern const struct dentry_operations gfs2_dops;
-extern const struct xattr_handler *gfs2_xattr_handlers_max[];
-extern const struct xattr_handler **gfs2_xattr_handlers_min;
+extern const struct xattr_handler * const gfs2_xattr_handlers_max[];
+extern const struct xattr_handler * const *gfs2_xattr_handlers_min;
#endif /* __SUPER_DOT_H__ */
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index c0a34d9ddee4..7051db9dbea0 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -15,7 +15,7 @@
#include <linux/kobject.h>
#include <linux/uaccess.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include "gfs2.h"
#include "incore.h"
@@ -59,7 +59,7 @@ static struct kset *gfs2_kset;
static ssize_t id_show(struct gfs2_sbd *sdp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%u:%u\n",
+ return sysfs_emit(buf, "%u:%u\n",
MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev));
}
@@ -68,7 +68,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
unsigned long f = sdp->sd_flags;
ssize_t s;
- s = snprintf(buf, PAGE_SIZE,
+ s = sysfs_emit(buf,
"Journal Checked: %d\n"
"Journal Live: %d\n"
"Journal ID: %d\n"
@@ -82,11 +82,9 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
"RO Recovery: %d\n"
"Skip DLM Unlock: %d\n"
"Force AIL Flush: %d\n"
+ "FS Freeze Initiator: %d\n"
"FS Frozen: %d\n"
- "Withdrawing: %d\n"
- "Withdraw In Prog: %d\n"
- "Remote Withdraw: %d\n"
- "Withdraw Recovery: %d\n"
+ "Killing: %d\n"
"sd_log_error: %d\n"
"sd_log_flush_lock: %d\n"
"sd_log_num_revoke: %u\n"
@@ -96,7 +94,10 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
"sd_log_flush_head: %d\n"
"sd_log_flush_tail: %d\n"
"sd_log_blks_reserved: %d\n"
- "sd_log_revokes_available: %d\n",
+ "sd_log_revokes_available: %d\n"
+ "sd_log_pinned: %d\n"
+ "sd_log_thresh1: %d\n"
+ "sd_log_thresh2: %d\n",
test_bit(SDF_JOURNAL_CHECKED, &f),
test_bit(SDF_JOURNAL_LIVE, &f),
(sdp->sd_jdesc ? sdp->sd_jdesc->jd_jid : 0),
@@ -110,11 +111,9 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
test_bit(SDF_RORECOVERY, &f),
test_bit(SDF_SKIP_DLM_UNLOCK, &f),
test_bit(SDF_FORCE_AIL_FLUSH, &f),
- test_bit(SDF_FS_FROZEN, &f),
- test_bit(SDF_WITHDRAWING, &f),
- test_bit(SDF_WITHDRAW_IN_PROG, &f),
- test_bit(SDF_REMOTE_WITHDRAW, &f),
- test_bit(SDF_WITHDRAW_RECOVERY, &f),
+ test_bit(SDF_FREEZE_INITIATOR, &f),
+ test_bit(SDF_FROZEN, &f),
+ test_bit(SDF_KILL, &f),
sdp->sd_log_error,
rwsem_is_locked(&sdp->sd_log_flush_lock),
sdp->sd_log_num_revoke,
@@ -124,13 +123,16 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
sdp->sd_log_flush_head,
sdp->sd_log_flush_tail,
sdp->sd_log_blks_reserved,
- atomic_read(&sdp->sd_log_revokes_available));
+ atomic_read(&sdp->sd_log_revokes_available),
+ atomic_read(&sdp->sd_log_pinned),
+ atomic_read(&sdp->sd_log_thresh1),
+ atomic_read(&sdp->sd_log_thresh2));
return s;
}
static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
+ return sysfs_emit(buf, "%s\n", sdp->sd_fsname);
}
static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
@@ -140,7 +142,7 @@ static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
buf[0] = '\0';
if (uuid_is_null(&s->s_uuid))
return 0;
- return snprintf(buf, PAGE_SIZE, "%pUB\n", &s->s_uuid);
+ return sysfs_emit(buf, "%pUB\n", &s->s_uuid);
}
static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
@@ -148,7 +150,7 @@ static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
struct super_block *sb = sdp->sd_vfs;
int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1;
- return snprintf(buf, PAGE_SIZE, "%d\n", frozen);
+ return sysfs_emit(buf, "%d\n", frozen);
}
static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
@@ -164,10 +166,10 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
switch (n) {
case 0:
- error = thaw_super(sdp->sd_vfs);
+ error = thaw_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE, NULL);
break;
case 1:
- error = freeze_super(sdp->sd_vfs);
+ error = freeze_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE, NULL);
break;
default:
return -EINVAL;
@@ -184,7 +186,7 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
{
unsigned int b = gfs2_withdrawn(sdp);
- return snprintf(buf, PAGE_SIZE, "%u\n", b);
+ return sysfs_emit(buf, "%u\n", b);
}
static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
@@ -326,7 +328,7 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len
return -EINVAL;
if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
fs_info(sdp, "demote interface used\n");
- rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl);
+ rv = gfs2_glock_get(sdp, glnum, glops, NO_CREATE, &gl);
if (rv)
return rv;
gfs2_glock_cb(gl, glmode);
@@ -387,7 +389,7 @@ static struct kobj_type gfs2_ktype = {
static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf)
{
const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops;
- return sprintf(buf, "%s\n", ops->lm_proto_name);
+ return sysfs_emit(buf, "%s\n", ops->lm_proto_name);
}
static ssize_t block_show(struct gfs2_sbd *sdp, char *buf)
@@ -398,7 +400,7 @@ static ssize_t block_show(struct gfs2_sbd *sdp, char *buf)
if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))
val = 1;
- ret = sprintf(buf, "%d\n", val);
+ ret = sysfs_emit(buf, "%d\n", val);
return ret;
}
@@ -423,33 +425,27 @@ static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
return len;
}
-static ssize_t wdack_show(struct gfs2_sbd *sdp, char *buf)
-{
- int val = completion_done(&sdp->sd_wdack) ? 1 : 0;
-
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t wdack_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
+static ssize_t withdraw_helper_status_store(struct gfs2_sbd *sdp,
+ const char *buf,
+ size_t len)
{
int ret, val;
ret = kstrtoint(buf, 0, &val);
if (ret)
return ret;
-
- if ((val == 1) &&
- !strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
- complete(&sdp->sd_wdack);
- else
+ if (val < 0 || val > 1)
return -EINVAL;
+
+ sdp->sd_withdraw_helper_status = val;
+ complete(&sdp->sd_withdraw_helper);
return len;
}
static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf)
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- return sprintf(buf, "%d\n", ls->ls_first);
+ return sysfs_emit(buf, "%d\n", ls->ls_first);
}
static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
@@ -482,7 +478,7 @@ out:
static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf)
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- return sprintf(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags));
+ return sysfs_emit(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags));
}
int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid)
@@ -540,18 +536,18 @@ out:
static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf)
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- return sprintf(buf, "%d\n", ls->ls_recover_jid_done);
+ return sysfs_emit(buf, "%d\n", ls->ls_recover_jid_done);
}
static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf)
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- return sprintf(buf, "%d\n", ls->ls_recover_jid_status);
+ return sysfs_emit(buf, "%d\n", ls->ls_recover_jid_status);
}
static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
{
- return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
+ return sysfs_emit(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
}
static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
@@ -589,7 +585,7 @@ static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
GDLM_ATTR(block, 0644, block_show, block_store);
-GDLM_ATTR(withdraw, 0644, wdack_show, wdack_store);
+GDLM_ATTR(withdraw, 0200, NULL, withdraw_helper_status_store);
GDLM_ATTR(jid, 0644, jid_show, jid_store);
GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store);
GDLM_ATTR(first_done, 0444, first_done_show, NULL);
@@ -616,7 +612,7 @@ static struct attribute *lock_module_attrs[] = {
static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%u %u\n",
+ return sysfs_emit(buf, "%u %u\n",
sdp->sd_tune.gt_quota_scale_num,
sdp->sd_tune.gt_quota_scale_den);
}
@@ -669,7 +665,7 @@ static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store)
#define TUNE_ATTR_2(name, store) \
static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
{ \
- return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \
+ return sysfs_emit(buf, "%u\n", sdp->sd_tune.gt_##name); \
} \
TUNE_ATTR_3(name, name##_show, store)
@@ -688,6 +684,7 @@ TUNE_ATTR(statfs_slow, 0);
TUNE_ATTR(new_files_jdata, 0);
TUNE_ATTR(statfs_quantum, 1);
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
+TUNE_ATTR(withdraw_helper_timeout, 1);
static struct attribute *tune_attrs[] = {
&tune_attr_quota_warn_period.attr,
@@ -698,6 +695,7 @@ static struct attribute *tune_attrs[] = {
&tune_attr_statfs_quantum.attr,
&tune_attr_quota_scale.attr,
&tune_attr_new_files_jdata.attr,
+ &tune_attr_withdraw_helper_timeout.attr,
NULL,
};
@@ -754,7 +752,6 @@ fail_reg:
fs_err(sdp, "error %d adding sysfs files\n", error);
kobject_put(&sdp->sd_kobj);
wait_for_completion(&sdp->sd_kobj_unregister);
- sb->s_fs_info = NULL;
return error;
}
@@ -767,11 +764,10 @@ void gfs2_sys_fs_del(struct gfs2_sbd *sdp)
wait_for_completion(&sdp->sd_kobj_unregister);
}
-static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env)
+static int gfs2_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
- struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
- struct super_block *s = sdp->sd_vfs;
+ const struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
+ const struct super_block *s = sdp->sd_vfs;
add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index a5deb9f86831..fcfbf68ec725 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -52,13 +52,19 @@
{(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
{(1UL << GLF_DIRTY), "y" }, \
{(1UL << GLF_LFLUSH), "f" }, \
- {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
- {(1UL << GLF_REPLY_PENDING), "r" }, \
- {(1UL << GLF_INITIAL), "I" }, \
- {(1UL << GLF_FROZEN), "F" }, \
+ {(1UL << GLF_PENDING_REPLY), "R" }, \
+ {(1UL << GLF_HAVE_REPLY), "r" }, \
+ {(1UL << GLF_INITIAL), "a" }, \
+ {(1UL << GLF_HAVE_FROZEN_REPLY), "F" }, \
{(1UL << GLF_LRU), "L" }, \
{(1UL << GLF_OBJECT), "o" }, \
- {(1UL << GLF_BLOCKING), "b" })
+ {(1UL << GLF_BLOCKING), "b" }, \
+ {(1UL << GLF_INSTANTIATE_NEEDED), "n" }, \
+ {(1UL << GLF_INSTANTIATE_IN_PROG), "N" }, \
+ {(1UL << GLF_TRY_TO_EVICT), "e" }, \
+ {(1UL << GLF_VERIFY_DELETE), "E" }, \
+ {(1UL << GLF_DEFER_DELETE), "s" }, \
+ {(1UL << GLF_CANCELING), "C" })
#ifndef NUMPTY
#define NUMPTY
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 63fec11ef2ce..6df65540e13d 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -49,7 +49,7 @@ int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
}
BUG_ON(blocks == 0 && revokes == 0);
- if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
+ if (gfs2_withdrawn(sdp))
return -EROFS;
tr->tr_ip = ip;
@@ -85,25 +85,30 @@ int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
*/
down_read(&sdp->sd_log_flush_lock);
+ if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)))
+ goto out_not_live;
if (gfs2_log_try_reserve(sdp, tr, &extra_revokes))
goto reserved;
+
up_read(&sdp->sd_log_flush_lock);
gfs2_log_reserve(sdp, tr, &extra_revokes);
down_read(&sdp->sd_log_flush_lock);
-
-reserved:
- gfs2_log_release_revokes(sdp, extra_revokes);
if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
- gfs2_log_release_revokes(sdp, tr->tr_revokes);
- up_read(&sdp->sd_log_flush_lock);
+ revokes = tr->tr_revokes + extra_revokes;
+ gfs2_log_release_revokes(sdp, revokes);
gfs2_log_release(sdp, tr->tr_reserved);
- sb_end_intwrite(sdp->sd_vfs);
- return -EROFS;
+ goto out_not_live;
}
+reserved:
+ gfs2_log_release_revokes(sdp, extra_revokes);
current->journal_info = tr;
-
return 0;
+
+out_not_live:
+ up_read(&sdp->sd_log_flush_lock);
+ sb_end_intwrite(sdp->sd_vfs);
+ return -EROFS;
}
int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
@@ -226,14 +231,35 @@ out:
unlock_buffer(bh);
}
+void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio,
+ size_t from, size_t len)
+{
+ struct buffer_head *head = folio_buffers(folio);
+ unsigned int bsize = head->b_size;
+ struct buffer_head *bh;
+ size_t to = from + len;
+ size_t start, end;
+
+ for (bh = head, start = 0; bh != head || !start;
+ bh = bh->b_this_page, start = end) {
+ end = start + bsize;
+ if (end <= from)
+ continue;
+ if (start >= to)
+ break;
+ set_buffer_uptodate(bh);
+ gfs2_trans_add_data(gl, bh);
+ }
+}
+
void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct super_block *sb = sdp->sd_vfs;
struct gfs2_bufdata *bd;
struct gfs2_meta_header *mh;
struct gfs2_trans *tr = current->journal_info;
- enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
lock_buffer(bh);
if (buffer_pinned(bh)) {
@@ -245,12 +271,12 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
if (bd == NULL) {
gfs2_log_unlock(sdp);
unlock_buffer(bh);
- lock_page(bh->b_page);
+ folio_lock(bh->b_folio);
if (bh->b_private == NULL)
bd = gfs2_alloc_bufdata(gl, bh);
else
bd = bh->b_private;
- unlock_page(bh->b_page);
+ folio_unlock(bh->b_folio);
lock_buffer(bh);
gfs2_log_lock(sdp);
}
@@ -267,13 +293,15 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
(unsigned long long)bd->bd_bh->b_blocknr);
BUG();
}
- if (unlikely(state == SFS_FROZEN)) {
- fs_info(sdp, "GFS2:adding buf while frozen\n");
- gfs2_assert_withdraw(sdp, 0);
- }
- if (unlikely(gfs2_withdrawn(sdp))) {
+ if (gfs2_withdrawn(sdp)) {
fs_info(sdp, "GFS2:adding buf while withdrawn! 0x%llx\n",
(unsigned long long)bd->bd_bh->b_blocknr);
+ goto out_unlock;
+ }
+ if (unlikely(sb->s_writers.frozen == SB_FREEZE_COMPLETE)) {
+ fs_info(sdp, "GFS2:adding buf while frozen\n");
+ gfs2_withdraw(sdp);
+ goto out_unlock;
}
gfs2_pin(sdp, bd->bd_bh);
mh->__pad0 = cpu_to_be64(0);
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index c76ad9a4c75a..790c55f59e61 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -34,17 +34,19 @@ static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned
return rgd->rd_length;
}
-extern int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
- unsigned int blocks, unsigned int revokes,
- unsigned long ip);
-extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
- unsigned int revokes);
-
-extern void gfs2_trans_end(struct gfs2_sbd *sdp);
-extern void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
-extern void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
-extern void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
-extern void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
-extern void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr);
+int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
+ unsigned int blocks, unsigned int revokes,
+ unsigned long ip);
+int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
+ unsigned int revokes);
+
+void gfs2_trans_end(struct gfs2_sbd *sdp);
+void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
+void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio,
+ size_t from, size_t len);
+void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
+void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
+void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
+void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr);
#endif /* __TRANS_DOT_H__ */
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 8241029a2a5d..02603200846d 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -9,6 +9,7 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
+#include <linux/kthread.h>
#include <linux/crc32.h>
#include <linux/gfs2_ondisk.h>
#include <linux/delay.h>
@@ -57,7 +58,7 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
struct gfs2_inode *ip;
ip = GFS2_I(jd->jd_inode);
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP |
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_RECOVER |
GL_EXACT | GL_NOCACHE, &j_gh);
if (error) {
if (verbose)
@@ -72,7 +73,7 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
"mount.\n");
goto out_unlock;
}
- error = gfs2_find_jhead(jd, &head, false);
+ error = gfs2_find_jhead(jd, &head);
if (error) {
if (verbose)
fs_err(sdp, "Error parsing journal for spectator "
@@ -93,214 +94,53 @@ out_unlock:
}
/**
- * gfs2_freeze_lock - hold the freeze glock
+ * gfs2_freeze_lock_shared - hold the freeze glock
* @sdp: the superblock
- * @freeze_gh: pointer to the requested holder
- * @caller_flags: any additional flags needed by the caller
*/
-int gfs2_freeze_lock(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh,
- int caller_flags)
+int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp)
{
- int flags = LM_FLAG_NOEXP | GL_EXACT | caller_flags;
+ int flags = LM_FLAG_RECOVER | GL_EXACT;
int error;
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
- freeze_gh);
+ &sdp->sd_freeze_gh);
if (error && error != GLR_TRYFAILED)
- fs_err(sdp, "can't lock the freeze lock: %d\n", error);
+ fs_err(sdp, "can't lock the freeze glock: %d\n", error);
return error;
}
-void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh)
+void gfs2_freeze_unlock(struct gfs2_sbd *sdp)
{
- if (gfs2_holder_initialized(freeze_gh))
- gfs2_glock_dq_uninit(freeze_gh);
+ if (gfs2_holder_initialized(&sdp->sd_freeze_gh))
+ gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
}
-static void signal_our_withdraw(struct gfs2_sbd *sdp)
+static void do_withdraw(struct gfs2_sbd *sdp)
{
- struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl;
- struct inode *inode;
- struct gfs2_inode *ip;
- struct gfs2_glock *i_gl;
- u64 no_formal_ino;
- int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- int ret = 0;
- int tries;
-
- if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
+ down_write(&sdp->sd_log_flush_lock);
+ if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+ up_write(&sdp->sd_log_flush_lock);
return;
-
- gfs2_ail_drain(sdp); /* frees all transactions */
- inode = sdp->sd_jdesc->jd_inode;
- ip = GFS2_I(inode);
- i_gl = ip->i_gl;
- no_formal_ino = ip->i_no_formal_ino;
-
- /* Prevent any glock dq until withdraw recovery is complete */
- set_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
- /*
- * Don't tell dlm we're bailing until we have no more buffers in the
- * wind. If journal had an IO error, the log code should just purge
- * the outstanding buffers rather than submitting new IO. Making the
- * file system read-only will flush the journal, etc.
- *
- * During a normal unmount, gfs2_make_fs_ro calls gfs2_log_shutdown
- * which clears SDF_JOURNAL_LIVE. In a withdraw, we must not write
- * any UNMOUNT log header, so we can't call gfs2_log_shutdown, and
- * therefore we need to clear SDF_JOURNAL_LIVE manually.
- */
- clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- if (!sb_rdonly(sdp->sd_vfs)) {
- struct gfs2_holder freeze_gh;
-
- gfs2_holder_mark_uninitialized(&freeze_gh);
- if (sdp->sd_freeze_gl &&
- !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
- ret = gfs2_freeze_lock(sdp, &freeze_gh,
- log_write_allowed ? 0 : LM_FLAG_TRY);
- if (ret == GLR_TRYFAILED)
- ret = 0;
- }
- if (!ret)
- gfs2_make_fs_ro(sdp);
- gfs2_freeze_unlock(&freeze_gh);
- }
-
- if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
- if (!ret)
- ret = -EIO;
- clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
- goto skip_recovery;
- }
- /*
- * Drop the glock for our journal so another node can recover it.
- */
- if (gfs2_holder_initialized(&sdp->sd_journal_gh)) {
- gfs2_glock_dq_wait(&sdp->sd_journal_gh);
- gfs2_holder_uninit(&sdp->sd_journal_gh);
- }
- sdp->sd_jinode_gh.gh_flags |= GL_NOCACHE;
- gfs2_glock_dq(&sdp->sd_jinode_gh);
- if (test_bit(SDF_FS_FROZEN, &sdp->sd_flags)) {
- /* Make sure gfs2_unfreeze works if partially-frozen */
- flush_work(&sdp->sd_freeze_work);
- atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
- thaw_super(sdp->sd_vfs);
- } else {
- wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE,
- TASK_UNINTERRUPTIBLE);
- }
-
- /*
- * holder_uninit to force glock_put, to force dlm to let go
- */
- gfs2_holder_uninit(&sdp->sd_jinode_gh);
-
- /*
- * Note: We need to be careful here:
- * Our iput of jd_inode will evict it. The evict will dequeue its
- * glock, but the glock dq will wait for the withdraw unless we have
- * exception code in glock_dq.
- */
- iput(inode);
- /*
- * Wait until the journal inode's glock is freed. This allows try locks
- * on other nodes to be successful, otherwise we remain the owner of
- * the glock as far as dlm is concerned.
- */
- if (i_gl->gl_ops->go_free) {
- set_bit(GLF_FREEING, &i_gl->gl_flags);
- wait_on_bit(&i_gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
}
+ clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+ up_write(&sdp->sd_log_flush_lock);
- /*
- * Dequeue the "live" glock, but keep a reference so it's never freed.
- */
- gfs2_glock_hold(live_gl);
- gfs2_glock_dq_wait(&sdp->sd_live_gh);
- /*
- * We enqueue the "live" glock in EX so that all other nodes
- * get a demote request and act on it. We don't really want the
- * lock in EX, so we send a "try" lock with 1CB to produce a callback.
- */
- fs_warn(sdp, "Requesting recovery of jid %d.\n",
- sdp->sd_lockstruct.ls_jid);
- gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | LM_FLAG_NOEXP,
- &sdp->sd_live_gh);
- msleep(GL_GLOCK_MAX_HOLD);
- /*
- * This will likely fail in a cluster, but succeed standalone:
- */
- ret = gfs2_glock_nq(&sdp->sd_live_gh);
+ gfs2_ail_drain(sdp); /* frees all transactions */
- /*
- * If we actually got the "live" lock in EX mode, there are no other
- * nodes available to replay our journal. So we try to replay it
- * ourselves. We hold the "live" glock to prevent other mounters
- * during recovery, then just dequeue it and reacquire it in our
- * normal SH mode. Just in case the problem that caused us to
- * withdraw prevents us from recovering our journal (e.g. io errors
- * and such) we still check if the journal is clean before proceeding
- * but we may wait forever until another mounter does the recovery.
- */
- if (ret == 0) {
- fs_warn(sdp, "No other mounters found. Trying to recover our "
- "own journal jid %d.\n", sdp->sd_lockstruct.ls_jid);
- if (gfs2_recover_journal(sdp->sd_jdesc, 1))
- fs_warn(sdp, "Unable to recover our journal jid %d.\n",
- sdp->sd_lockstruct.ls_jid);
- gfs2_glock_dq_wait(&sdp->sd_live_gh);
- gfs2_holder_reinit(LM_ST_SHARED, LM_FLAG_NOEXP | GL_EXACT,
- &sdp->sd_live_gh);
- gfs2_glock_nq(&sdp->sd_live_gh);
- }
+ wake_up(&sdp->sd_logd_waitq);
+ wake_up(&sdp->sd_quota_wait);
- gfs2_glock_queue_put(live_gl); /* drop extra reference we acquired */
- clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
+ wait_event_timeout(sdp->sd_log_waitq,
+ gfs2_log_is_empty(sdp),
+ HZ * 5);
- /*
- * At this point our journal is evicted, so we need to get a new inode
- * for it. Once done, we need to call gfs2_find_jhead which
- * calls gfs2_map_journal_extents to map it for us again.
- *
- * Note that we don't really want it to look up a FREE block. The
- * GFS2_BLKST_FREE simply overrides a block check in gfs2_inode_lookup
- * which would otherwise fail because it requires grabbing an rgrp
- * glock, which would fail with -EIO because we're withdrawing.
- */
- inode = gfs2_inode_lookup(sdp->sd_vfs, DT_UNKNOWN,
- sdp->sd_jdesc->jd_no_addr, no_formal_ino,
- GFS2_BLKST_FREE);
- if (IS_ERR(inode)) {
- fs_warn(sdp, "Reprocessing of jid %d failed with %ld.\n",
- sdp->sd_lockstruct.ls_jid, PTR_ERR(inode));
- goto skip_recovery;
- }
- sdp->sd_jdesc->jd_inode = inode;
- d_mark_dontcache(inode);
+ sdp->sd_vfs->s_flags |= SB_RDONLY;
/*
- * Now wait until recovery is complete.
+ * Dequeue any pending non-system glock holders that can no
+ * longer be granted because the file system is withdrawn.
*/
- for (tries = 0; tries < 10; tries++) {
- ret = check_journal_clean(sdp, sdp->sd_jdesc, false);
- if (!ret)
- break;
- msleep(HZ);
- fs_warn(sdp, "Waiting for journal recovery jid %d.\n",
- sdp->sd_lockstruct.ls_jid);
- }
-skip_recovery:
- if (!ret)
- fs_warn(sdp, "Journal recovery complete for jid %d.\n",
- sdp->sd_lockstruct.ls_jid);
- else
- fs_warn(sdp, "Journal recovery skipped for jid %d until next "
- "mount.\n", sdp->sd_lockstruct.ls_jid);
- fs_warn(sdp, "Glock dequeues delayed: %lu\n", sdp->sd_glock_dqs_held);
- sdp->sd_glock_dqs_held = 0;
- wake_up_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY);
+ gfs2_withdraw_glocks(sdp);
}
void gfs2_lm(struct gfs2_sbd *sdp, const char *fmt, ...)
@@ -319,50 +159,108 @@ void gfs2_lm(struct gfs2_sbd *sdp, const char *fmt, ...)
va_end(args);
}
-int gfs2_withdraw(struct gfs2_sbd *sdp)
+/**
+ * gfs2_offline_uevent - run gfs2_withdraw_helper
+ * @sdp: The GFS2 superblock
+ */
+static bool gfs2_offline_uevent(struct gfs2_sbd *sdp)
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- const struct lm_lockops *lm = ls->ls_ops;
+ long timeout;
- if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW &&
- test_and_set_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
- if (!test_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags))
- return -1;
+ /* Skip protocol "lock_nolock" which doesn't require shared storage. */
+ if (!ls->ls_ops->lm_lock)
+ return false;
+
+ /*
+ * The gfs2_withdraw_helper replies by writing one of the following
+ * status codes to "/sys$DEVPATH/lock_module/withdraw":
+ *
+ * 0 - The shared block device has been marked inactive. Future write
+ * operations will fail.
+ *
+ * 1 - The shared block device may still be active and carry out
+ * write operations.
+ *
+ * If the "offline" uevent isn't reacted upon in time, the event
+ * handler is assumed to have failed.
+ */
- wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_IN_PROG,
- TASK_UNINTERRUPTIBLE);
- return -1;
+ sdp->sd_withdraw_helper_status = -1;
+ kobject_uevent(&sdp->sd_kobj, KOBJ_OFFLINE);
+ timeout = gfs2_tune_get(sdp, gt_withdraw_helper_timeout) * HZ;
+ wait_for_completion_timeout(&sdp->sd_withdraw_helper, timeout);
+ if (sdp->sd_withdraw_helper_status == -1) {
+ fs_err(sdp, "%s timed out\n", "gfs2_withdraw_helper");
+ } else {
+ fs_err(sdp, "%s %s with status %d\n",
+ "gfs2_withdraw_helper",
+ sdp->sd_withdraw_helper_status == 0 ?
+ "succeeded" : "failed",
+ sdp->sd_withdraw_helper_status);
}
+ return sdp->sd_withdraw_helper_status == 0;
+}
- set_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
+void gfs2_withdraw_func(struct work_struct *work)
+{
+ struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_withdraw_work);
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ const struct lm_lockops *lm = ls->ls_ops;
+ bool device_inactive;
- if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) {
- fs_err(sdp, "about to withdraw this file system\n");
- BUG_ON(sdp->sd_args.ar_debug);
+ if (test_bit(SDF_KILL, &sdp->sd_flags))
+ return;
- signal_our_withdraw(sdp);
+ BUG_ON(sdp->sd_args.ar_debug);
- kobject_uevent(&sdp->sd_kobj, KOBJ_OFFLINE);
+ /*
+ * Try to deactivate the shared block device so that no more I/O will
+ * go through. If successful, we can immediately trigger remote
+ * recovery. Otherwise, we must first empty out all our local caches.
+ */
- if (!strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
- wait_for_completion(&sdp->sd_wdack);
+ device_inactive = gfs2_offline_uevent(sdp);
- if (lm->lm_unmount) {
- fs_err(sdp, "telling LM to unmount\n");
- lm->lm_unmount(sdp);
+ if (sdp->sd_args.ar_errors == GFS2_ERRORS_DEACTIVATE && !device_inactive)
+ panic("GFS2: fsid=%s: panic requested\n", sdp->sd_fsname);
+
+ if (lm->lm_unmount) {
+ if (device_inactive) {
+ lm->lm_unmount(sdp, false);
+ do_withdraw(sdp);
+ } else {
+ do_withdraw(sdp);
+ lm->lm_unmount(sdp, false);
}
- set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
- fs_err(sdp, "File system withdrawn\n");
+ } else {
+ do_withdraw(sdp);
+ }
+
+ fs_err(sdp, "file system withdrawn\n");
+}
+
+void gfs2_withdraw(struct gfs2_sbd *sdp)
+{
+ if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW ||
+ sdp->sd_args.ar_errors == GFS2_ERRORS_DEACTIVATE) {
+ if (test_and_set_bit(SDF_WITHDRAWN, &sdp->sd_flags))
+ return;
+
dump_stack();
- clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
- smp_mb__after_atomic();
- wake_up_bit(&sdp->sd_flags, SDF_WITHDRAW_IN_PROG);
+ /*
+ * There is no need to withdraw when the superblock hasn't been
+ * fully initialized, yet.
+ */
+ if (!(sdp->sd_vfs->s_flags & SB_BORN))
+ return;
+ fs_err(sdp, "about to withdraw this file system\n");
+ schedule_work(&sdp->sd_withdraw_work);
+ return;
}
if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC)
panic("GFS2: fsid=%s: panic requested\n", sdp->sd_fsname);
-
- return -1;
}
/*
@@ -370,28 +268,17 @@ int gfs2_withdraw(struct gfs2_sbd *sdp)
*/
void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
- const char *function, char *file, unsigned int line,
- bool delayed)
+ const char *function, char *file, unsigned int line)
{
if (gfs2_withdrawn(sdp))
return;
fs_err(sdp,
- "fatal: assertion \"%s\" failed\n"
- " function = %s, file = %s, line = %u\n",
+ "fatal: assertion \"%s\" failed - "
+ "function = %s, file = %s, line = %u\n",
assertion, function, file, line);
- /*
- * If errors=panic was specified on mount, it won't help to delay the
- * withdraw.
- */
- if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC)
- delayed = false;
-
- if (delayed)
- gfs2_withdraw_delayed(sdp);
- else
- gfs2_withdraw(sdp);
+ gfs2_withdraw(sdp);
dump_stack();
}
@@ -408,7 +295,8 @@ void gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
return;
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW)
- fs_warn(sdp, "warning: assertion \"%s\" failed at function = %s, file = %s, line = %u\n",
+ fs_warn(sdp, "warning: assertion \"%s\" failed - "
+ "function = %s, file = %s, line = %u\n",
assertion, function, file, line);
if (sdp->sd_args.ar_debug)
@@ -417,10 +305,10 @@ void gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
dump_stack();
if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC)
- panic("GFS2: fsid=%s: warning: assertion \"%s\" failed\n"
- "GFS2: fsid=%s: function = %s, file = %s, line = %u\n",
+ panic("GFS2: fsid=%s: warning: assertion \"%s\" failed - "
+ "function = %s, file = %s, line = %u\n",
sdp->sd_fsname, assertion,
- sdp->sd_fsname, function, file, line);
+ function, file, line);
sdp->sd_last_warning = jiffies;
}
@@ -433,7 +321,8 @@ void gfs2_consist_i(struct gfs2_sbd *sdp, const char *function,
char *file, unsigned int line)
{
gfs2_lm(sdp,
- "fatal: filesystem consistency error - function = %s, file = %s, line = %u\n",
+ "fatal: filesystem consistency error - "
+ "function = %s, file = %s, line = %u\n",
function, file, line);
gfs2_withdraw(sdp);
}
@@ -448,9 +337,9 @@ void gfs2_consist_inode_i(struct gfs2_inode *ip,
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
gfs2_lm(sdp,
- "fatal: filesystem consistency error\n"
- " inode = %llu %llu\n"
- " function = %s, file = %s, line = %u\n",
+ "fatal: filesystem consistency error - "
+ "inode = %llu %llu, "
+ "function = %s, file = %s, line = %u\n",
(unsigned long long)ip->i_no_formal_ino,
(unsigned long long)ip->i_no_addr,
function, file, line);
@@ -471,9 +360,9 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
gfs2_rgrp_dump(NULL, rgd, fs_id_buf);
gfs2_lm(sdp,
- "fatal: filesystem consistency error\n"
- " RG = %llu\n"
- " function = %s, file = %s, line = %u\n",
+ "fatal: filesystem consistency error - "
+ "RG = %llu, "
+ "function = %s, file = %s, line = %u\n",
(unsigned long long)rgd->rd_addr,
function, file, line);
gfs2_dump_glock(NULL, rgd->rd_gl, 1);
@@ -482,46 +371,36 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
/*
* gfs2_meta_check_ii - Flag a magic number consistency error and withdraw
- * Returns: -1 if this call withdrew the machine,
- * -2 if it was already withdrawn
*/
-int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *type, const char *function, char *file,
- unsigned int line)
+void gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ const char *function, char *file,
+ unsigned int line)
{
- int me;
-
gfs2_lm(sdp,
- "fatal: invalid metadata block\n"
- " bh = %llu (%s)\n"
- " function = %s, file = %s, line = %u\n",
- (unsigned long long)bh->b_blocknr, type,
+ "fatal: invalid metadata block - "
+ "bh = %llu (bad magic number), "
+ "function = %s, file = %s, line = %u\n",
+ (unsigned long long)bh->b_blocknr,
function, file, line);
- me = gfs2_withdraw(sdp);
- return (me) ? -1 : -2;
+ gfs2_withdraw(sdp);
}
/*
* gfs2_metatype_check_ii - Flag a metadata type consistency error and withdraw
- * Returns: -1 if this call withdrew the machine,
- * -2 if it was already withdrawn
*/
-int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
- u16 type, u16 t, const char *function,
- char *file, unsigned int line)
+void gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ u16 type, u16 t, const char *function,
+ char *file, unsigned int line)
{
- int me;
-
gfs2_lm(sdp,
- "fatal: invalid metadata block\n"
- " bh = %llu (type: exp=%u, found=%u)\n"
- " function = %s, file = %s, line = %u\n",
+ "fatal: invalid metadata block - "
+ "bh = %llu (type: exp=%u, found=%u), "
+ "function = %s, file = %s, line = %u\n",
(unsigned long long)bh->b_blocknr, type, t,
function, file, line);
- me = gfs2_withdraw(sdp);
- return (me) ? -1 : -2;
+ gfs2_withdraw(sdp);
}
/*
@@ -530,33 +409,29 @@ int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
* 0 if it was already withdrawn
*/
-int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file,
- unsigned int line)
+void gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file,
+ unsigned int line)
{
gfs2_lm(sdp,
- "fatal: I/O error\n"
- " function = %s, file = %s, line = %u\n",
+ "fatal: I/O error - "
+ "function = %s, file = %s, line = %u\n",
function, file, line);
- return gfs2_withdraw(sdp);
+ gfs2_withdraw(sdp);
}
/*
- * gfs2_io_error_bh_i - Flag a buffer I/O error
- * @withdraw: withdraw the filesystem
+ * gfs2_io_error_bh_i - Flag a buffer I/O error and withdraw
*/
void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *function, char *file, unsigned int line,
- bool withdraw)
+ const char *function, char *file, unsigned int line)
{
if (gfs2_withdrawn(sdp))
return;
- fs_err(sdp, "fatal: I/O error\n"
- " block = %llu\n"
- " function = %s, file = %s, line = %u\n",
+ fs_err(sdp, "fatal: I/O error - "
+ "block = %llu, "
+ "function = %s, file = %s, line = %u\n",
(unsigned long long)bh->b_blocknr, function, file, line);
- if (withdraw)
- gfs2_withdraw(sdp);
+ gfs2_withdraw(sdp);
}
-
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 78ec190f4155..ffcc47d6b0b4 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -37,24 +37,14 @@ do { \
void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
- const char *function, char *file, unsigned int line,
- bool delayed);
+ const char *function, char *file, unsigned int line);
#define gfs2_assert_withdraw(sdp, assertion) \
({ \
bool _bool = (assertion); \
if (unlikely(!_bool)) \
gfs2_assert_withdraw_i((sdp), #assertion, \
- __func__, __FILE__, __LINE__, false); \
- !_bool; \
- })
-
-#define gfs2_assert_withdraw_delayed(sdp, assertion) \
- ({ \
- bool _bool = (assertion); \
- if (unlikely(!_bool)) \
- gfs2_assert_withdraw_i((sdp), #assertion, \
- __func__, __FILE__, __LINE__, true); \
+ __func__, __FILE__, __LINE__); \
!_bool; \
})
@@ -91,9 +81,9 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
gfs2_consist_rgrpd_i((rgd), __func__, __FILE__, __LINE__)
-int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *type, const char *function,
- char *file, unsigned int line);
+void gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ const char *function,
+ char *file, unsigned int line);
static inline int gfs2_meta_check(struct gfs2_sbd *sdp,
struct buffer_head *bh)
@@ -108,10 +98,10 @@ static inline int gfs2_meta_check(struct gfs2_sbd *sdp,
return 0;
}
-int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
- u16 type, u16 t,
- const char *function,
- char *file, unsigned int line);
+void gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ u16 type, u16 t,
+ const char *function,
+ char *file, unsigned int line);
static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp,
struct buffer_head *bh,
@@ -122,12 +112,16 @@ static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp,
struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
u32 magic = be32_to_cpu(mh->mh_magic);
u16 t = be32_to_cpu(mh->mh_type);
- if (unlikely(magic != GFS2_MAGIC))
- return gfs2_meta_check_ii(sdp, bh, "magic number", function,
- file, line);
- if (unlikely(t != type))
- return gfs2_metatype_check_ii(sdp, bh, type, t, function,
- file, line);
+ if (unlikely(magic != GFS2_MAGIC)) {
+ gfs2_meta_check_ii(sdp, bh, function,
+ file, line);
+ return -EIO;
+ }
+ if (unlikely(t != type)) {
+ gfs2_metatype_check_ii(sdp, bh, type, t, function,
+ file, line);
+ return -EIO;
+ }
return 0;
}
@@ -144,28 +138,23 @@ static inline void gfs2_metatype_set(struct buffer_head *bh, u16 type,
}
-int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
- char *file, unsigned int line);
+void gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
+ char *file, unsigned int line);
-extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
- bool verbose);
-extern int gfs2_freeze_lock(struct gfs2_sbd *sdp,
- struct gfs2_holder *freeze_gh, int caller_flags);
-extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
+int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ bool verbose);
+int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp);
+void gfs2_freeze_unlock(struct gfs2_sbd *sdp);
#define gfs2_io_error(sdp) \
gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *function, char *file, unsigned int line,
- bool withdraw);
-
-#define gfs2_io_error_bh_wd(sdp, bh) \
-gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, true)
+ const char *function, char *file, unsigned int line);
#define gfs2_io_error_bh(sdp, bh) \
-gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, false)
+gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__)
extern struct kmem_cache *gfs2_glock_cachep;
@@ -190,37 +179,12 @@ static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
}
/**
- * gfs2_withdraw_delayed - withdraw as soon as possible without deadlocks
- * @sdp: the superblock
- */
-static inline void gfs2_withdraw_delayed(struct gfs2_sbd *sdp)
-{
- set_bit(SDF_WITHDRAWING, &sdp->sd_flags);
-}
-
-/**
- * gfs2_withdrawn - test whether the file system is withdrawing or withdrawn
+ * gfs2_withdrawn - test whether the file system is withdrawn
* @sdp: the superblock
*/
static inline bool gfs2_withdrawn(struct gfs2_sbd *sdp)
{
- return test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
- test_bit(SDF_WITHDRAWING, &sdp->sd_flags);
-}
-
-/**
- * gfs2_withdrawing - check if a withdraw is pending
- * @sdp: the superblock
- */
-static inline bool gfs2_withdrawing(struct gfs2_sbd *sdp)
-{
- return test_bit(SDF_WITHDRAWING, &sdp->sd_flags) &&
- !test_bit(SDF_WITHDRAWN, &sdp->sd_flags);
-}
-
-static inline bool gfs2_withdraw_in_prog(struct gfs2_sbd *sdp)
-{
- return test_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
+ return unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags));
}
#define gfs2_tune_get(sdp, field) \
@@ -228,6 +192,8 @@ gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
__printf(2, 3)
void gfs2_lm(struct gfs2_sbd *sdp, const char *fmt, ...);
-int gfs2_withdraw(struct gfs2_sbd *sdp);
+
+void gfs2_withdraw_func(struct work_struct *work);
+void gfs2_withdraw(struct gfs2_sbd *sdp);
#endif /* __UTIL_DOT_H__ */
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 0c5650fe1fd1..df9c93de94c7 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -96,30 +96,34 @@ static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
return -EIO;
for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
- if (!GFS2_EA_REC_LEN(ea))
- goto fail;
+ if (!GFS2_EA_REC_LEN(ea)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
- bh->b_data + bh->b_size))
- goto fail;
- if (!gfs2_eatype_valid(sdp, ea->ea_type))
- goto fail;
+ bh->b_data + bh->b_size)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
+ if (!gfs2_eatype_valid(sdp, ea->ea_type)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
error = ea_call(ip, bh, ea, prev, data);
if (error)
return error;
if (GFS2_EA_IS_LAST(ea)) {
if ((char *)GFS2_EA2NEXT(ea) !=
- bh->b_data + bh->b_size)
- goto fail;
+ bh->b_data + bh->b_size) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
break;
}
}
return error;
-
-fail:
- gfs2_consist_inode(ip);
- return -EIO;
}
static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
@@ -311,7 +315,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
ea->ea_num_ptrs = 0;
}
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_ctime_current(&ip->i_inode);
__mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
gfs2_trans_end(sdp);
@@ -639,7 +643,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
u64 block;
int error;
- error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
+ error = gfs2_alloc_blocks(ip, &block, &n, 0);
if (error)
return error;
gfs2_trans_remove_revoke(sdp, block, 1);
@@ -701,7 +705,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
int mh_size = sizeof(struct gfs2_meta_header);
unsigned int n = 1;
- error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
+ error = gfs2_alloc_blocks(ip, &block, &n, 0);
if (error)
return error;
gfs2_trans_remove_revoke(sdp, block, 1);
@@ -763,7 +767,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
if (error)
goto out_end_trans;
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_ctime_current(&ip->i_inode);
__mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
out_end_trans:
@@ -888,7 +892,7 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
if (es->es_el)
ea_set_remove_stuffed(ip, es->es_el);
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_ctime_current(&ip->i_inode);
__mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
gfs2_trans_end(GFS2_SB(&ip->i_inode));
@@ -1002,7 +1006,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
} else {
u64 blk;
unsigned int n = 1;
- error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
+ error = gfs2_alloc_blocks(ip, &blk, &n, 0);
if (error)
return error;
gfs2_trans_remove_revoke(sdp, blk, 1);
@@ -1106,7 +1110,7 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
ea->ea_type = GFS2_EATYPE_UNUSED;
}
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_ctime_current(&ip->i_inode);
__mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
gfs2_trans_end(GFS2_SB(&ip->i_inode));
@@ -1225,7 +1229,7 @@ int __gfs2_xattr_set(struct inode *inode, const char *name,
}
static int gfs2_xattr_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -1313,7 +1317,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
else
goto out;
- gfs2_rlist_alloc(&rlist);
+ gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE);
for (x = 0; x < rlist.rl_rgrps; x++) {
rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
@@ -1379,7 +1383,7 @@ out:
return error;
}
-static int ea_dealloc_block(struct gfs2_inode *ip)
+static int ea_dealloc_block(struct gfs2_inode *ip, bool initialized)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd;
@@ -1412,11 +1416,13 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
ip->i_eattr = 0;
gfs2_add_inode_blocks(&ip->i_inode, -1);
- error = gfs2_meta_inode_buffer(ip, &dibh);
- if (!error) {
- gfs2_trans_add_meta(ip->i_gl, dibh);
- gfs2_dinode_out(ip, dibh->b_data);
- brelse(dibh);
+ if (initialized) {
+ error = gfs2_meta_inode_buffer(ip, &dibh);
+ if (!error) {
+ gfs2_trans_add_meta(ip->i_gl, dibh);
+ gfs2_dinode_out(ip, dibh->b_data);
+ brelse(dibh);
+ }
}
gfs2_trans_end(sdp);
@@ -1429,11 +1435,12 @@ out_gunlock:
/**
* gfs2_ea_dealloc - deallocate the extended attribute fork
* @ip: the inode
+ * @initialized: xattrs have been initialized
*
* Returns: errno
*/
-int gfs2_ea_dealloc(struct gfs2_inode *ip)
+int gfs2_ea_dealloc(struct gfs2_inode *ip, bool initialized)
{
int error;
@@ -1445,17 +1452,19 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
if (error)
return error;
- error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
- if (error)
- goto out_quota;
-
- if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
- error = ea_dealloc_indirect(ip);
+ if (initialized) {
+ error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
if (error)
goto out_quota;
+
+ if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
+ error = ea_dealloc_indirect(ip);
+ if (error)
+ goto out_quota;
+ }
}
- error = ea_dealloc_block(ip);
+ error = ea_dealloc_block(ip, initialized);
out_quota:
gfs2_quota_unhold(ip);
@@ -1490,16 +1499,14 @@ static const struct xattr_handler gfs2_xattr_trusted_handler = {
.set = gfs2_xattr_set,
};
-const struct xattr_handler *gfs2_xattr_handlers_max[] = {
+const struct xattr_handler * const gfs2_xattr_handlers_max[] = {
/* GFS2_FS_FORMAT_MAX */
&gfs2_xattr_trusted_handler,
/* GFS2_FS_FORMAT_MIN */
&gfs2_xattr_user_handler,
&gfs2_xattr_security_handler,
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
NULL,
};
-const struct xattr_handler **gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1;
+const struct xattr_handler * const *gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1;
diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h
index 2aed9d7d483d..3c9788e0e137 100644
--- a/fs/gfs2/xattr.h
+++ b/fs/gfs2/xattr.h
@@ -50,14 +50,14 @@ struct gfs2_ea_location {
struct gfs2_ea_header *el_prev;
};
-extern int __gfs2_xattr_set(struct inode *inode, const char *name,
- const void *value, size_t size,
- int flags, int type);
-extern ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size);
-extern int gfs2_ea_dealloc(struct gfs2_inode *ip);
+int __gfs2_xattr_set(struct inode *inode, const char *name,
+ const void *value, size_t size,
+ int flags, int type);
+ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size);
+int gfs2_ea_dealloc(struct gfs2_inode *ip, bool initialized);
/* Exported to acl.c */
-extern int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
+int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
#endif /* __EATTR_DOT_H__ */
diff --git a/fs/hfs/.kunitconfig b/fs/hfs/.kunitconfig
new file mode 100644
index 000000000000..5caa9af1e3bb
--- /dev/null
+++ b/fs/hfs/.kunitconfig
@@ -0,0 +1,7 @@
+CONFIG_KUNIT=y
+CONFIG_HFS_FS=y
+CONFIG_HFS_KUNIT_TEST=y
+CONFIG_BLOCK=y
+CONFIG_BUFFER_HEAD=y
+CONFIG_NLS=y
+CONFIG_LEGACY_DIRECT_IO=y
diff --git a/fs/hfs/Kconfig b/fs/hfs/Kconfig
index 129926b5142d..7f3cbe43b4b7 100644
--- a/fs/hfs/Kconfig
+++ b/fs/hfs/Kconfig
@@ -2,7 +2,9 @@
config HFS_FS
tristate "Apple Macintosh file system support"
depends on BLOCK
+ select BUFFER_HEAD
select NLS
+ select LEGACY_DIRECT_IO
help
If you say Y here, you will be able to mount Macintosh-formatted
floppy disks and hard drive partitions with full read-write access.
@@ -11,3 +13,18 @@ config HFS_FS
To compile this file system support as a module, choose M here: the
module will be called hfs.
+
+config HFS_KUNIT_TEST
+ tristate "KUnit tests for HFS filesystem" if !KUNIT_ALL_TESTS
+ depends on HFS_FS && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds KUnit tests for the HFS filesystem.
+
+ KUnit tests run during boot and output the results to the debug
+ log in TAP format (https://testanything.org/). Only useful for
+ kernel devs running KUnit test harness and are not for inclusion
+ into a production build.
+
+ For more information on KUnit and unit tests in general please
+ refer to the KUnit documentation in Documentation/dev-tools/kunit/.
diff --git a/fs/hfs/Makefile b/fs/hfs/Makefile
index b65459bf3dc4..a7c9ce6b4609 100644
--- a/fs/hfs/Makefile
+++ b/fs/hfs/Makefile
@@ -9,3 +9,5 @@ hfs-objs := bitmap.o bfind.o bnode.o brec.o btree.o \
catalog.o dir.o extent.o inode.o attr.o mdb.o \
part_tbl.o string.o super.o sysdep.o trans.o
+# KUnit tests
+obj-$(CONFIG_HFS_KUNIT_TEST) += string_test.o
diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c
index 2bd54efaf416..f8395cdd1adf 100644
--- a/fs/hfs/attr.c
+++ b/fs/hfs/attr.c
@@ -121,7 +121,7 @@ static int hfs_xattr_get(const struct xattr_handler *handler,
}
static int hfs_xattr_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value, size_t size,
int flags)
@@ -146,7 +146,7 @@ static const struct xattr_handler hfs_type_handler = {
.set = hfs_xattr_set,
};
-const struct xattr_handler *hfs_xattr_handlers[] = {
+const struct xattr_handler * const hfs_xattr_handlers[] = {
&hfs_creator_handler,
&hfs_type_handler,
NULL
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
index ef9498a6e88a..d56e47bdc517 100644
--- a/fs/hfs/bfind.c
+++ b/fs/hfs/bfind.c
@@ -16,14 +16,17 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
{
void *ptr;
+ if (!tree || !fd)
+ return -EINVAL;
+
fd->tree = tree;
fd->bnode = NULL;
- ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
+ ptr = kzalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
fd->search_key = ptr;
fd->key = ptr + tree->max_key_len + 2;
- hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
+ hfs_dbg("cnid %d, caller %ps\n",
tree->cnid, __builtin_return_address(0));
switch (tree->cnid) {
case HFS_CAT_CNID:
@@ -45,7 +48,7 @@ void hfs_find_exit(struct hfs_find_data *fd)
{
hfs_bnode_put(fd->bnode);
kfree(fd->search_key);
- hfs_dbg(BNODE_REFS, "find_exit: %d (%p)\n",
+ hfs_dbg("cnid %d, caller %ps\n",
fd->tree->cnid, __builtin_return_address(0));
mutex_unlock(&fd->tree->tree_lock);
fd->tree = NULL;
@@ -112,6 +115,12 @@ int hfs_brec_find(struct hfs_find_data *fd)
__be32 data;
int height, res;
+ fd->record = -1;
+ fd->keyoffset = -1;
+ fd->keylength = -1;
+ fd->entryoffset = -1;
+ fd->entrylength = -1;
+
tree = fd->tree;
if (fd->bnode)
hfs_bnode_put(fd->bnode);
@@ -158,7 +167,7 @@ release:
return res;
}
-int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len)
+int hfs_brec_read(struct hfs_find_data *fd, void *rec, u32 rec_len)
{
int res;
diff --git a/fs/hfs/bitmap.c b/fs/hfs/bitmap.c
index 28307bc9ec1e..5e84833a4743 100644
--- a/fs/hfs/bitmap.c
+++ b/fs/hfs/bitmap.c
@@ -158,7 +158,7 @@ u32 hfs_vbm_search_free(struct super_block *sb, u32 goal, u32 *num_bits)
}
}
- hfs_dbg(BITMAP, "alloc_bits: %u,%u\n", pos, *num_bits);
+ hfs_dbg("pos %u, num_bits %u\n", pos, *num_bits);
HFS_SB(sb)->free_ablocks -= *num_bits;
hfs_bitmap_dirty(sb);
out:
@@ -200,7 +200,7 @@ int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count)
if (!count)
return 0;
- hfs_dbg(BITMAP, "clear_bits: %u,%u\n", start, count);
+ hfs_dbg("start %u, count %u\n", start, count);
/* are all of the bits in range? */
if ((start + count) > HFS_SB(sb)->fs_ablocks)
return -2;
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index c0a73a6ffb28..13d58c51fc46 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -15,13 +15,68 @@
#include "btree.h"
-void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
+static inline
+bool is_bnode_offset_valid(struct hfs_bnode *node, u32 off)
+{
+ bool is_valid = off < node->tree->node_size;
+
+ if (!is_valid) {
+ pr_err("requested invalid offset: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off);
+ }
+
+ return is_valid;
+}
+
+static inline
+u32 check_and_correct_requested_length(struct hfs_bnode *node, u32 off, u32 len)
+{
+ unsigned int node_size;
+
+ if (!is_bnode_offset_valid(node, off))
+ return 0;
+
+ node_size = node->tree->node_size;
+
+ if ((off + len) > node_size) {
+ u32 new_len = node_size - off;
+
+ pr_err("requested length has been corrected: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, "
+ "requested_len %u, corrected_len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len, new_len);
+
+ return new_len;
+ }
+
+ return len;
+}
+
+void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len)
{
struct page *page;
- int pagenum;
- int bytes_read;
- int bytes_to_read;
- void *vaddr;
+ u32 pagenum;
+ u32 bytes_read;
+ u32 bytes_to_read;
+
+ if (!is_bnode_offset_valid(node, off))
+ return;
+
+ if (len == 0) {
+ pr_err("requested zero length: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len);
+ return;
+ }
+
+ len = check_and_correct_requested_length(node, off, len);
off += node->page_offset;
pagenum = off >> PAGE_SHIFT;
@@ -31,18 +86,16 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
if (pagenum >= node->tree->pages_per_bnode)
break;
page = node->page[pagenum];
- bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
+ bytes_to_read = min_t(u32, len - bytes_read, PAGE_SIZE - off);
- vaddr = kmap_atomic(page);
- memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
- kunmap_atomic(vaddr);
+ memcpy_from_page(buf + bytes_read, page, off, bytes_to_read);
pagenum++;
off = 0; /* page offset only applies to the first page */
}
}
-u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
+u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off)
{
__be16 data;
// optimize later...
@@ -50,7 +103,7 @@ u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
return be16_to_cpu(data);
}
-u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
+u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off)
{
u8 data;
// optimize later...
@@ -58,10 +111,10 @@ u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
return data;
}
-void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
+void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off)
{
struct hfs_btree *tree;
- int key_len;
+ u32 key_len;
tree = node->tree;
if (node->type == HFS_NODE_LEAF ||
@@ -70,79 +123,117 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
else
key_len = tree->max_key_len + 1;
+ if (key_len > sizeof(hfs_btree_key) || key_len < 1) {
+ memset(key, 0, sizeof(hfs_btree_key));
+ pr_err("hfs: Invalid key length: %u\n", key_len);
+ return;
+ }
+
hfs_bnode_read(node, key, off, key_len);
}
-void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
+void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len)
{
struct page *page;
+ if (!is_bnode_offset_valid(node, off))
+ return;
+
+ if (len == 0) {
+ pr_err("requested zero length: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len);
+ return;
+ }
+
+ len = check_and_correct_requested_length(node, off, len);
+
off += node->page_offset;
page = node->page[0];
- memcpy(kmap(page) + off, buf, len);
- kunmap(page);
+ memcpy_to_page(page, off, buf, len);
set_page_dirty(page);
}
-void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
+void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data)
{
__be16 v = cpu_to_be16(data);
// optimize later...
hfs_bnode_write(node, &v, off, 2);
}
-void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data)
+void hfs_bnode_write_u8(struct hfs_bnode *node, u32 off, u8 data)
{
// optimize later...
hfs_bnode_write(node, &data, off, 1);
}
-void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
+void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len)
{
struct page *page;
+ if (!is_bnode_offset_valid(node, off))
+ return;
+
+ if (len == 0) {
+ pr_err("requested zero length: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len);
+ return;
+ }
+
+ len = check_and_correct_requested_length(node, off, len);
+
off += node->page_offset;
page = node->page[0];
- memset(kmap(page) + off, 0, len);
- kunmap(page);
+ memzero_page(page, off, len);
set_page_dirty(page);
}
-void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
- struct hfs_bnode *src_node, int src, int len)
+void hfs_bnode_copy(struct hfs_bnode *dst_node, u32 dst,
+ struct hfs_bnode *src_node, u32 src, u32 len)
{
struct page *src_page, *dst_page;
- hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
+ hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
if (!len)
return;
+
+ len = check_and_correct_requested_length(src_node, src, len);
+ len = check_and_correct_requested_length(dst_node, dst, len);
+
src += src_node->page_offset;
dst += dst_node->page_offset;
src_page = src_node->page[0];
dst_page = dst_node->page[0];
- memcpy(kmap(dst_page) + dst, kmap(src_page) + src, len);
- kunmap(src_page);
- kunmap(dst_page);
+ memcpy_page(dst_page, dst, src_page, src, len);
set_page_dirty(dst_page);
}
-void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
+void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len)
{
struct page *page;
void *ptr;
- hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
+ hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
if (!len)
return;
+
+ len = check_and_correct_requested_length(node, src, len);
+ len = check_and_correct_requested_length(node, dst, len);
+
src += node->page_offset;
dst += node->page_offset;
page = node->page[0];
- ptr = kmap(page);
+ ptr = kmap_local_page(page);
memmove(ptr + dst, ptr + src, len);
- kunmap(page);
+ kunmap_local(ptr);
set_page_dirty(page);
}
@@ -152,16 +243,16 @@ void hfs_bnode_dump(struct hfs_bnode *node)
__be32 cnid;
int i, off, key_off;
- hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
+ hfs_dbg("node %d\n", node->this);
hfs_bnode_read(node, &desc, 0, sizeof(desc));
- hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
+ hfs_dbg("next %d, prev %d, type %d, height %d, num_recs %d\n",
be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
desc.type, desc.height, be16_to_cpu(desc.num_recs));
off = node->tree->node_size - 2;
for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
key_off = hfs_bnode_read_u16(node, off);
- hfs_dbg_cont(BNODE_MOD, " %d", key_off);
+ hfs_dbg(" key_off %d", key_off);
if (i && node->type == HFS_NODE_INDEX) {
int tmp;
@@ -169,18 +260,18 @@ void hfs_bnode_dump(struct hfs_bnode *node)
tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1;
else
tmp = node->tree->max_key_len + 1;
- hfs_dbg_cont(BNODE_MOD, " (%d,%d",
- tmp, hfs_bnode_read_u8(node, key_off));
+ hfs_dbg(" (%d,%d",
+ tmp, hfs_bnode_read_u8(node, key_off));
hfs_bnode_read(node, &cnid, key_off + tmp, 4);
- hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
+ hfs_dbg(", cnid %d)", be32_to_cpu(cnid));
} else if (i && node->type == HFS_NODE_LEAF) {
int tmp;
tmp = hfs_bnode_read_u8(node, key_off);
- hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
+ hfs_dbg(" (%d)", tmp);
}
}
- hfs_dbg_cont(BNODE_MOD, "\n");
+ hfs_dbg("\n");
}
void hfs_bnode_unlink(struct hfs_bnode *node)
@@ -270,7 +361,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
node->this = cnid;
set_bit(HFS_BNODE_NEW, &node->flags);
atomic_set(&node->refcnt, 1);
- hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
+ hfs_dbg("cnid %d, node %d, refcnt 1\n",
node->tree->cnid, node->this);
init_waitqueue_head(&node->lock_wq);
spin_lock(&tree->hash_lock);
@@ -281,6 +372,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
tree->node_hash[hash] = node;
tree->node_hash_cnt++;
} else {
+ hfs_bnode_get(node2);
spin_unlock(&tree->hash_lock);
kfree(node);
wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
@@ -296,10 +388,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
page = read_mapping_page(mapping, block++, NULL);
if (IS_ERR(page))
goto fail;
- if (PageError(page)) {
- put_page(page);
- goto fail;
- }
node->page[i] = page;
}
@@ -313,7 +401,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
{
struct hfs_bnode **p;
- hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
+ hfs_dbg("cnid %d, node %d, refcnt %d\n",
node->tree->cnid, node->this, atomic_read(&node->refcnt));
for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
*p && *p != node; p = &(*p)->next_hash)
@@ -350,13 +438,14 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
if (!test_bit(HFS_BNODE_NEW, &node->flags))
return node;
- desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
+ desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
+ node->page_offset);
node->prev = be32_to_cpu(desc->prev);
node->next = be32_to_cpu(desc->next);
node->num_recs = be16_to_cpu(desc->num_recs);
node->type = desc->type;
node->height = desc->height;
- kunmap(node->page[0]);
+ kunmap_local(desc);
switch (node->type) {
case HFS_NODE_HEADER:
@@ -440,14 +529,12 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
}
pagep = node->page;
- memset(kmap(*pagep) + node->page_offset, 0,
- min((int)PAGE_SIZE, (int)tree->node_size));
+ memzero_page(*pagep, node->page_offset,
+ min((int)PAGE_SIZE, (int)tree->node_size));
set_page_dirty(*pagep);
- kunmap(*pagep);
for (i = 1; i < tree->pages_per_bnode; i++) {
- memset(kmap(*++pagep), 0, PAGE_SIZE);
+ memzero_page(*++pagep, 0, PAGE_SIZE);
set_page_dirty(*pagep);
- kunmap(*pagep);
}
clear_bit(HFS_BNODE_NEW, &node->flags);
wake_up(&node->lock_wq);
@@ -459,7 +546,7 @@ void hfs_bnode_get(struct hfs_bnode *node)
{
if (node) {
atomic_inc(&node->refcnt);
- hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
+ hfs_dbg("cnid %d, node %d, refcnt %d\n",
node->tree->cnid, node->this,
atomic_read(&node->refcnt));
}
@@ -472,7 +559,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
struct hfs_btree *tree = node->tree;
int i;
- hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
+ hfs_dbg("cnid %d, node %d, refcnt %d\n",
node->tree->cnid, node->this,
atomic_read(&node->refcnt));
BUG_ON(!atomic_read(&node->refcnt));
@@ -487,6 +574,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
hfs_bnode_unhash(node);
spin_unlock(&tree->hash_lock);
+ hfs_bnode_clear(node, 0, tree->node_size);
hfs_bmap_free(node);
hfs_bnode_free(node);
return;
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 896396554bcc..5a2f740ddefd 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -62,7 +62,7 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
return retval;
}
-int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
+int hfs_brec_insert(struct hfs_find_data *fd, void *entry, u32 entry_len)
{
struct hfs_btree *tree;
struct hfs_bnode *node, *new_node;
@@ -94,7 +94,7 @@ again:
end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
end_off = hfs_bnode_read_u16(node, end_rec_off);
end_rec_off -= 2;
- hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n",
+ hfs_dbg("rec %d, size %d, end_off %d, end_rec_off %d\n",
rec, size, end_off, end_rec_off);
if (size > end_rec_off - end_off) {
if (new_node)
@@ -179,6 +179,7 @@ int hfs_brec_remove(struct hfs_find_data *fd)
struct hfs_btree *tree;
struct hfs_bnode *node, *parent;
int end_off, rec_off, data_off, size;
+ int src, dst, len;
tree = fd->tree;
node = fd->bnode;
@@ -191,7 +192,7 @@ again:
mark_inode_dirty(tree->inode);
}
hfs_bnode_dump(node);
- hfs_dbg(BNODE_MOD, "remove_rec: %d, %d\n",
+ hfs_dbg("rec %d, len %d\n",
fd->record, fd->keylength + fd->entrylength);
if (!--node->num_recs) {
hfs_bnode_unlink(node);
@@ -208,10 +209,14 @@ again:
}
hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs);
- if (rec_off == end_off)
- goto skip;
size = fd->keylength + fd->entrylength;
+ if (rec_off == end_off) {
+ src = fd->keyoffset;
+ hfs_bnode_clear(node, src, size);
+ goto skip;
+ }
+
do {
data_off = hfs_bnode_read_u16(node, rec_off);
hfs_bnode_write_u16(node, rec_off + 2, data_off - size);
@@ -219,9 +224,23 @@ again:
} while (rec_off >= end_off);
/* fill hole */
- hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size,
- data_off - fd->keyoffset - size);
+ dst = fd->keyoffset;
+ src = fd->keyoffset + size;
+ len = data_off - src;
+
+ hfs_bnode_move(node, dst, src, len);
+
+ src = dst + len;
+ len = data_off - src;
+
+ hfs_bnode_clear(node, src, len);
+
skip:
+ /*
+ * Remove the obsolete offset to free space.
+ */
+ hfs_bnode_write_u16(node, end_off, 0);
+
hfs_bnode_dump(node);
if (!fd->record)
hfs_brec_update_parent(fd);
@@ -242,7 +261,7 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
if (IS_ERR(new_node))
return new_node;
hfs_bnode_get(node);
- hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n",
+ hfs_dbg("this %d, new %d, next %d\n",
node->this, new_node->this, node->next);
new_node->next = node->next;
new_node->prev = node->this;
@@ -378,7 +397,7 @@ again:
newkeylen = (hfs_bnode_read_u8(node, 14) | 1) + 1;
else
fd->keylength = newkeylen = tree->max_key_len + 1;
- hfs_dbg(BNODE_MOD, "update_rec: %d, %d, %d\n",
+ hfs_dbg("rec %d, keylength %d, newkeylen %d\n",
rec, fd->keylength, newkeylen);
rec_off = tree->node_size - (rec + 2) * 2;
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 19017d296173..7bc425283d49 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -21,8 +21,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
struct hfs_btree *tree;
struct hfs_btree_header_rec *head;
struct address_space *mapping;
- struct page *page;
+ struct folio *folio;
+ struct buffer_head *bh;
unsigned int size;
+ u16 dblock;
+ sector_t start_block;
+ loff_t offset;
tree = kzalloc(sizeof(*tree), GFP_KERNEL);
if (!tree)
@@ -38,7 +42,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
tree->inode = iget_locked(sb, id);
if (!tree->inode)
goto free_tree;
- BUG_ON(!(tree->inode->i_state & I_NEW));
+ BUG_ON(!(inode_state_read_once(tree->inode) & I_NEW));
{
struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
HFS_I(tree->inode)->flags = 0;
@@ -75,12 +79,41 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
unlock_new_inode(tree->inode);
mapping = tree->inode->i_mapping;
- page = read_mapping_page(mapping, 0, NULL);
- if (IS_ERR(page))
+ folio = filemap_grab_folio(mapping, 0);
+ if (IS_ERR(folio))
goto free_inode;
+ folio_zero_range(folio, 0, folio_size(folio));
+
+ dblock = hfs_ext_find_block(HFS_I(tree->inode)->first_extents, 0);
+ start_block = HFS_SB(sb)->fs_start + (dblock * HFS_SB(sb)->fs_div);
+
+ size = folio_size(folio);
+ offset = 0;
+ while (size > 0) {
+ size_t len;
+
+ bh = sb_bread(sb, start_block);
+ if (!bh) {
+ pr_err("unable to read tree header\n");
+ goto put_folio;
+ }
+
+ len = min_t(size_t, folio_size(folio), sb->s_blocksize);
+ memcpy_to_folio(folio, offset, bh->b_data, sb->s_blocksize);
+
+ brelse(bh);
+
+ start_block++;
+ offset += len;
+ size -= len;
+ }
+
+ folio_mark_uptodate(folio);
+
/* Load the header */
- head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
+ head = (struct hfs_btree_header_rec *)(kmap_local_folio(folio, 0) +
+ sizeof(struct hfs_bnode_desc));
tree->root = be32_to_cpu(head->root);
tree->leaf_count = be32_to_cpu(head->leaf_count);
tree->leaf_head = be32_to_cpu(head->leaf_head);
@@ -94,22 +127,22 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
size = tree->node_size;
if (!is_power_of_2(size))
- goto fail_page;
+ goto fail_folio;
if (!tree->node_count)
- goto fail_page;
+ goto fail_folio;
switch (id) {
case HFS_EXT_CNID:
if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
pr_err("invalid extent max_key_len %d\n",
tree->max_key_len);
- goto fail_page;
+ goto fail_folio;
}
break;
case HFS_CAT_CNID:
if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
pr_err("invalid catalog max_key_len %d\n",
tree->max_key_len);
- goto fail_page;
+ goto fail_folio;
}
break;
default:
@@ -119,12 +152,16 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
tree->node_size_shift = ffs(size) - 1;
tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- kunmap(page);
- put_page(page);
+ kunmap_local(head);
+ folio_unlock(folio);
+ folio_put(folio);
return tree;
-fail_page:
- put_page(page);
+fail_folio:
+ kunmap_local(head);
+put_folio:
+ folio_unlock(folio);
+ folio_put(folio);
free_inode:
tree->inode->i_mapping->a_ops = &hfs_aops;
iput(tree->inode);
@@ -169,7 +206,8 @@ void hfs_btree_write(struct hfs_btree *tree)
return;
/* Load the header */
page = node->page[0];
- head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
+ head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
+ sizeof(struct hfs_bnode_desc));
head->root = cpu_to_be32(tree->root);
head->leaf_count = cpu_to_be32(tree->leaf_count);
@@ -180,7 +218,7 @@ void hfs_btree_write(struct hfs_btree *tree)
head->attributes = cpu_to_be32(tree->attributes);
head->depth = cpu_to_be16(tree->depth);
- kunmap(page);
+ kunmap_local(head);
set_page_dirty(page);
hfs_bnode_put(node);
}
@@ -221,7 +259,7 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
}
/* Make sure @tree has enough space for the @rsvd_nodes */
-int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
+int hfs_bmap_reserve(struct hfs_btree *tree, u32 rsvd_nodes)
{
struct inode *inode = tree->inode;
u32 count;
@@ -268,7 +306,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
- data = kmap(*pagep);
+ data = kmap_local_page(*pagep);
off &= ~PAGE_MASK;
idx = 0;
@@ -281,7 +319,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
idx += i;
data[off] |= m;
set_page_dirty(*pagep);
- kunmap(*pagep);
+ kunmap_local(data);
tree->free_nodes--;
mark_inode_dirty(tree->inode);
hfs_bnode_put(node);
@@ -290,14 +328,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
}
}
if (++off >= PAGE_SIZE) {
- kunmap(*pagep);
- data = kmap(*++pagep);
+ kunmap_local(data);
+ data = kmap_local_page(*++pagep);
off = 0;
}
idx += 8;
len--;
}
- kunmap(*pagep);
+ kunmap_local(data);
nidx = node->next;
if (!nidx) {
printk(KERN_DEBUG "create new bmap node...\n");
@@ -313,7 +351,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
off = off16;
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
- data = kmap(*pagep);
+ data = kmap_local_page(*pagep);
off &= ~PAGE_MASK;
}
}
@@ -326,7 +364,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
u32 nidx;
u8 *data, byte, m;
- hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
+ hfs_dbg("node %u\n", node->this);
tree = node->tree;
nidx = node->this;
node = hfs_bnode_find(tree, 0);
@@ -360,20 +398,20 @@ void hfs_bmap_free(struct hfs_bnode *node)
}
off += node->page_offset + nidx / 8;
page = node->page[off >> PAGE_SHIFT];
- data = kmap(page);
+ data = kmap_local_page(page);
off &= ~PAGE_MASK;
m = 1 << (~nidx & 7);
byte = data[off];
if (!(byte & m)) {
pr_crit("trying to free free bnode %u(%d)\n",
node->this, node->type);
- kunmap(page);
+ kunmap_local(data);
hfs_bnode_put(node);
return;
}
data[off] = byte & ~m;
set_page_dirty(page);
- kunmap(page);
+ kunmap_local(data);
hfs_bnode_put(node);
tree->free_nodes++;
mark_inode_dirty(tree->inode);
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
index 0e6baee93245..99be858b2446 100644
--- a/fs/hfs/btree.h
+++ b/fs/hfs/btree.h
@@ -86,87 +86,46 @@ struct hfs_find_data {
/* btree.c */
-extern struct hfs_btree *hfs_btree_open(struct super_block *, u32, btree_keycmp);
-extern void hfs_btree_close(struct hfs_btree *);
-extern void hfs_btree_write(struct hfs_btree *);
-extern int hfs_bmap_reserve(struct hfs_btree *, int);
-extern struct hfs_bnode * hfs_bmap_alloc(struct hfs_btree *);
+extern struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id,
+ btree_keycmp keycmp);
+extern void hfs_btree_close(struct hfs_btree *tree);
+extern void hfs_btree_write(struct hfs_btree *tree);
+extern int hfs_bmap_reserve(struct hfs_btree *tree, u32 rsvd_nodes);
+extern struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree);
extern void hfs_bmap_free(struct hfs_bnode *node);
/* bnode.c */
-extern void hfs_bnode_read(struct hfs_bnode *, void *, int, int);
-extern u16 hfs_bnode_read_u16(struct hfs_bnode *, int);
-extern u8 hfs_bnode_read_u8(struct hfs_bnode *, int);
-extern void hfs_bnode_read_key(struct hfs_bnode *, void *, int);
-extern void hfs_bnode_write(struct hfs_bnode *, void *, int, int);
-extern void hfs_bnode_write_u16(struct hfs_bnode *, int, u16);
-extern void hfs_bnode_write_u8(struct hfs_bnode *, int, u8);
-extern void hfs_bnode_clear(struct hfs_bnode *, int, int);
-extern void hfs_bnode_copy(struct hfs_bnode *, int,
- struct hfs_bnode *, int, int);
-extern void hfs_bnode_move(struct hfs_bnode *, int, int, int);
-extern void hfs_bnode_dump(struct hfs_bnode *);
-extern void hfs_bnode_unlink(struct hfs_bnode *);
-extern struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *, u32);
-extern struct hfs_bnode *hfs_bnode_find(struct hfs_btree *, u32);
-extern void hfs_bnode_unhash(struct hfs_bnode *);
-extern void hfs_bnode_free(struct hfs_bnode *);
-extern struct hfs_bnode *hfs_bnode_create(struct hfs_btree *, u32);
-extern void hfs_bnode_get(struct hfs_bnode *);
-extern void hfs_bnode_put(struct hfs_bnode *);
+extern void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len);
+extern u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off);
+extern u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off);
+extern void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off);
+extern void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len);
+extern void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data);
+extern void hfs_bnode_write_u8(struct hfs_bnode *node, u32 off, u8 data);
+extern void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len);
+extern void hfs_bnode_copy(struct hfs_bnode *dst_node, u32 dst,
+ struct hfs_bnode *src_node, u32 src, u32 len);
+extern void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len);
+extern void hfs_bnode_dump(struct hfs_bnode *node);
+extern void hfs_bnode_unlink(struct hfs_bnode *node);
+extern struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid);
+extern struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num);
+extern void hfs_bnode_unhash(struct hfs_bnode *node);
+extern void hfs_bnode_free(struct hfs_bnode *node);
+extern struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num);
+extern void hfs_bnode_get(struct hfs_bnode *node);
+extern void hfs_bnode_put(struct hfs_bnode *node);
/* brec.c */
-extern u16 hfs_brec_lenoff(struct hfs_bnode *, u16, u16 *);
-extern u16 hfs_brec_keylen(struct hfs_bnode *, u16);
-extern int hfs_brec_insert(struct hfs_find_data *, void *, int);
-extern int hfs_brec_remove(struct hfs_find_data *);
+extern u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off);
+extern u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec);
+extern int hfs_brec_insert(struct hfs_find_data *fd, void *entry, u32 entry_len);
+extern int hfs_brec_remove(struct hfs_find_data *fd);
/* bfind.c */
-extern int hfs_find_init(struct hfs_btree *, struct hfs_find_data *);
-extern void hfs_find_exit(struct hfs_find_data *);
-extern int __hfs_brec_find(struct hfs_bnode *, struct hfs_find_data *);
-extern int hfs_brec_find(struct hfs_find_data *);
-extern int hfs_brec_read(struct hfs_find_data *, void *, int);
-extern int hfs_brec_goto(struct hfs_find_data *, int);
-
-
-struct hfs_bnode_desc {
- __be32 next; /* (V) Number of the next node at this level */
- __be32 prev; /* (V) Number of the prev node at this level */
- u8 type; /* (F) The type of node */
- u8 height; /* (F) The level of this node (leaves=1) */
- __be16 num_recs; /* (V) The number of records in this node */
- u16 reserved;
-} __packed;
-
-#define HFS_NODE_INDEX 0x00 /* An internal (index) node */
-#define HFS_NODE_HEADER 0x01 /* The tree header node (node 0) */
-#define HFS_NODE_MAP 0x02 /* Holds part of the bitmap of used nodes */
-#define HFS_NODE_LEAF 0xFF /* A leaf (ndNHeight==1) node */
-
-struct hfs_btree_header_rec {
- __be16 depth; /* (V) The number of levels in this B-tree */
- __be32 root; /* (V) The node number of the root node */
- __be32 leaf_count; /* (V) The number of leaf records */
- __be32 leaf_head; /* (V) The number of the first leaf node */
- __be32 leaf_tail; /* (V) The number of the last leaf node */
- __be16 node_size; /* (F) The number of bytes in a node (=512) */
- __be16 max_key_len; /* (F) The length of a key in an index node */
- __be32 node_count; /* (V) The total number of nodes */
- __be32 free_nodes; /* (V) The number of unused nodes */
- u16 reserved1;
- __be32 clump_size; /* (F) clump size. not usually used. */
- u8 btree_type; /* (F) BTree type */
- u8 reserved2;
- __be32 attributes; /* (F) attributes */
- u32 reserved3[16];
-} __packed;
-
-#define BTREE_ATTR_BADCLOSE 0x00000001 /* b-tree not closed properly. not
- used by hfsplus. */
-#define HFS_TREE_BIGKEYS 0x00000002 /* key length is u16 instead of u8.
- used by hfsplus. */
-#define HFS_TREE_VARIDXKEYS 0x00000004 /* variable key length instead of
- max key length. use din catalog
- b-tree but not in extents
- b-tree (hfsplus). */
+extern int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd);
+extern void hfs_find_exit(struct hfs_find_data *fd);
+extern int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd);
+extern int hfs_brec_find(struct hfs_find_data *fd);
+extern int hfs_brec_read(struct hfs_find_data *fd, void *rec, u32 rec_len);
+extern int hfs_brec_goto(struct hfs_find_data *fd, int cnt);
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index d365bf0b8c77..b80ba40e3877 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -87,7 +87,7 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i
int entry_size;
int err;
- hfs_dbg(CAT_MOD, "create_cat: %s,%u(%d)\n",
+ hfs_dbg("name %s, cnid %u, i_nlink %d\n",
str->name, cnid, inode->i_nlink);
if (dir->i_size >= HFS_MAX_VALENCE)
return -ENOSPC;
@@ -133,7 +133,7 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i
goto err1;
dir->i_size++;
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
hfs_find_exit(&fd);
return 0;
@@ -211,6 +211,124 @@ int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
return hfs_brec_find(fd);
}
+static inline
+void hfs_set_next_unused_CNID(struct super_block *sb,
+ u32 deleted_cnid, u32 found_cnid)
+{
+ if (found_cnid < HFS_FIRSTUSER_CNID) {
+ atomic64_cmpxchg(&HFS_SB(sb)->next_id,
+ deleted_cnid + 1, HFS_FIRSTUSER_CNID);
+ } else {
+ atomic64_cmpxchg(&HFS_SB(sb)->next_id,
+ deleted_cnid + 1, found_cnid + 1);
+ }
+}
+
+/*
+ * hfs_correct_next_unused_CNID()
+ *
+ * Correct the next unused CNID of Catalog Tree.
+ */
+static
+int hfs_correct_next_unused_CNID(struct super_block *sb, u32 cnid)
+{
+ struct hfs_btree *cat_tree;
+ struct hfs_bnode *node;
+ s64 leaf_head;
+ s64 leaf_tail;
+ s64 node_id;
+
+ hfs_dbg("cnid %u, next_id %lld\n",
+ cnid, atomic64_read(&HFS_SB(sb)->next_id));
+
+ if ((cnid + 1) < atomic64_read(&HFS_SB(sb)->next_id)) {
+ /* next ID should be unchanged */
+ return 0;
+ }
+
+ cat_tree = HFS_SB(sb)->cat_tree;
+ leaf_head = cat_tree->leaf_head;
+ leaf_tail = cat_tree->leaf_tail;
+
+ if (leaf_head > leaf_tail) {
+ pr_err("node is corrupted: leaf_head %lld, leaf_tail %lld\n",
+ leaf_head, leaf_tail);
+ return -ERANGE;
+ }
+
+ node = hfs_bnode_find(cat_tree, leaf_tail);
+ if (IS_ERR(node)) {
+ pr_err("fail to find leaf node: node ID %lld\n",
+ leaf_tail);
+ return -ENOENT;
+ }
+
+ node_id = leaf_tail;
+
+ do {
+ int i;
+
+ if (node_id != leaf_tail) {
+ node = hfs_bnode_find(cat_tree, node_id);
+ if (IS_ERR(node))
+ return -ENOENT;
+ }
+
+ hfs_dbg("node %lld, leaf_tail %lld, leaf_head %lld\n",
+ node_id, leaf_tail, leaf_head);
+
+ hfs_bnode_dump(node);
+
+ for (i = node->num_recs - 1; i >= 0; i--) {
+ hfs_cat_rec rec;
+ u16 off, len, keylen;
+ int entryoffset;
+ int entrylength;
+ u32 found_cnid;
+
+ len = hfs_brec_lenoff(node, i, &off);
+ keylen = hfs_brec_keylen(node, i);
+ if (keylen == 0) {
+ pr_err("fail to get the keylen: "
+ "node_id %lld, record index %d\n",
+ node_id, i);
+ return -EINVAL;
+ }
+
+ entryoffset = off + keylen;
+ entrylength = len - keylen;
+
+ if (entrylength > sizeof(rec)) {
+ pr_err("unexpected record length: "
+ "entrylength %d\n",
+ entrylength);
+ return -EINVAL;
+ }
+
+ hfs_bnode_read(node, &rec, entryoffset, entrylength);
+
+ if (rec.type == HFS_CDR_DIR) {
+ found_cnid = be32_to_cpu(rec.dir.DirID);
+ hfs_dbg("found_cnid %u\n", found_cnid);
+ hfs_set_next_unused_CNID(sb, cnid, found_cnid);
+ hfs_bnode_put(node);
+ return 0;
+ } else if (rec.type == HFS_CDR_FIL) {
+ found_cnid = be32_to_cpu(rec.file.FlNum);
+ hfs_dbg("found_cnid %u\n", found_cnid);
+ hfs_set_next_unused_CNID(sb, cnid, found_cnid);
+ hfs_bnode_put(node);
+ return 0;
+ }
+ }
+
+ node_id = node->prev;
+ hfs_bnode_put(node);
+
+ } while (node_id >= leaf_head);
+
+ return -ENOENT;
+}
/*
* hfs_cat_delete()
@@ -225,7 +343,7 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, const struct qstr *str)
struct hfs_readdir_data *rd;
int res, type;
- hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
+ hfs_dbg("name %s, cnid %u\n", str ? str->name : NULL, cnid);
sb = dir->i_sb;
res = hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
if (res)
@@ -269,8 +387,13 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, const struct qstr *str)
}
dir->i_size--;
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
+
+ res = hfs_correct_next_unused_CNID(sb, cnid);
+ if (res)
+ goto out;
+
res = 0;
out:
hfs_find_exit(&fd);
@@ -294,7 +417,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
int entry_size, type;
int err;
- hfs_dbg(CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
+ hfs_dbg("cnid %u - (ino %lu, name %s) - (ino %lu, name %s)\n",
cnid, src_dir->i_ino, src_name->name,
dst_dir->i_ino, dst_name->name);
sb = src_dir->i_sb;
@@ -337,7 +460,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
if (err)
goto out;
dst_dir->i_size++;
- dst_dir->i_mtime = dst_dir->i_ctime = current_time(dst_dir);
+ inode_set_mtime_to_ts(dst_dir, inode_set_ctime_current(dst_dir));
mark_inode_dirty(dst_dir);
/* finally remove the old entry */
@@ -349,7 +472,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
if (err)
goto out;
src_dir->i_size--;
- src_dir->i_mtime = src_dir->i_ctime = current_time(src_dir);
+ inode_set_mtime_to_ts(src_dir, inode_set_ctime_current(src_dir));
mark_inode_dirty(src_dir);
type = entry.type;
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 527f6e46cbe8..86a6b317b474 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -189,7 +189,7 @@ static int hfs_dir_release(struct inode *inode, struct file *file)
* a directory and return a corresponding inode, given the inode for
* the directory and the name (and its length) of the new file.
*/
-static int hfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int hfs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct inode *inode;
@@ -219,26 +219,26 @@ static int hfs_create(struct user_namespace *mnt_userns, struct inode *dir,
* in a directory, given the inode for the parent directory and the
* name (and its length) of the new directory.
*/
-static int hfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
int res;
inode = hfs_new_inode(dir, &dentry->d_name, S_IFDIR | mode);
if (!inode)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode);
if (res) {
clear_nlink(inode);
hfs_delete_inode(inode);
iput(inode);
- return res;
+ return ERR_PTR(res);
}
d_instantiate(dentry, inode);
mark_inode_dirty(inode);
- return 0;
+ return NULL;
}
/*
@@ -263,7 +263,7 @@ static int hfs_remove(struct inode *dir, struct dentry *dentry)
if (res)
return res;
clear_nlink(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
hfs_delete_inode(inode);
mark_inode_dirty(inode);
return 0;
@@ -280,7 +280,7 @@ static int hfs_remove(struct inode *dir, struct dentry *dentry)
* new file/directory.
* XXX: how do you handle must_be dir?
*/
-static int hfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+static int hfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index 263d5028d9d1..a097908b269d 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -71,7 +71,7 @@ int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
*
* Find a block within an extent record
*/
-static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
+u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
{
int i;
u16 count;
@@ -209,12 +209,12 @@ static void hfs_dump_extent(struct hfs_extent *extent)
{
int i;
- hfs_dbg(EXTENT, " ");
+ hfs_dbg("extent: ");
for (i = 0; i < 3; i++)
- hfs_dbg_cont(EXTENT, " %u:%u",
- be16_to_cpu(extent[i].block),
- be16_to_cpu(extent[i].count));
- hfs_dbg_cont(EXTENT, "\n");
+ hfs_dbg(" block %u, count %u",
+ be16_to_cpu(extent[i].block),
+ be16_to_cpu(extent[i].count));
+ hfs_dbg("\n");
}
static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
@@ -411,10 +411,11 @@ int hfs_extend_file(struct inode *inode)
goto out;
}
- hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
+ hfs_dbg("ino %lu, start %u, len %u\n", inode->i_ino, start, len);
if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
if (!HFS_I(inode)->first_blocks) {
- hfs_dbg(EXTENT, "first extents\n");
+ hfs_dbg("first_extent: start %u, len %u\n",
+ start, len);
/* no extents yet */
HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
@@ -456,7 +457,7 @@ out:
return res;
insert_extent:
- hfs_dbg(EXTENT, "insert new extent\n");
+ hfs_dbg("insert new extent\n");
res = hfs_ext_write_extent(inode);
if (res)
goto out;
@@ -481,21 +482,21 @@ void hfs_file_truncate(struct inode *inode)
u32 size;
int res;
- hfs_dbg(INODE, "truncate: %lu, %Lu -> %Lu\n",
+ hfs_dbg("ino %lu, phys_size %llu -> i_size %llu\n",
inode->i_ino, (long long)HFS_I(inode)->phys_size,
inode->i_size);
if (inode->i_size > HFS_I(inode)->phys_size) {
struct address_space *mapping = inode->i_mapping;
- void *fsdata;
- struct page *page;
+ void *fsdata = NULL;
+ struct folio *folio;
/* XXX: Can use generic_cont_expand? */
size = inode->i_size - 1;
- res = pagecache_write_begin(NULL, mapping, size+1, 0, 0,
- &page, &fsdata);
+ res = hfs_write_begin(NULL, mapping, size + 1, 0, &folio,
+ &fsdata);
if (!res) {
- res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
- page, fsdata);
+ res = generic_write_end(NULL, mapping, size + 1, 0, 0,
+ folio, fsdata);
}
if (res)
inode->i_size = HFS_I(inode)->phys_size;
diff --git a/fs/hfs/hfs.h b/fs/hfs/hfs.h
index 6f194d0768b6..3f2293ff6fdd 100644
--- a/fs/hfs/hfs.h
+++ b/fs/hfs/hfs.h
@@ -9,274 +9,7 @@
#ifndef _HFS_H
#define _HFS_H
-/* offsets to various blocks */
-#define HFS_DD_BLK 0 /* Driver Descriptor block */
-#define HFS_PMAP_BLK 1 /* First block of partition map */
-#define HFS_MDB_BLK 2 /* Block (w/i partition) of MDB */
-
-/* magic numbers for various disk blocks */
-#define HFS_DRVR_DESC_MAGIC 0x4552 /* "ER": driver descriptor map */
-#define HFS_OLD_PMAP_MAGIC 0x5453 /* "TS": old-type partition map */
-#define HFS_NEW_PMAP_MAGIC 0x504D /* "PM": new-type partition map */
-#define HFS_SUPER_MAGIC 0x4244 /* "BD": HFS MDB (super block) */
-#define HFS_MFS_SUPER_MAGIC 0xD2D7 /* MFS MDB (super block) */
-
-/* various FIXED size parameters */
-#define HFS_SECTOR_SIZE 512 /* size of an HFS sector */
-#define HFS_SECTOR_SIZE_BITS 9 /* log_2(HFS_SECTOR_SIZE) */
-#define HFS_NAMELEN 31 /* maximum length of an HFS filename */
-#define HFS_MAX_NAMELEN 128
-#define HFS_MAX_VALENCE 32767U
-
-/* Meanings of the drAtrb field of the MDB,
- * Reference: _Inside Macintosh: Files_ p. 2-61
- */
-#define HFS_SB_ATTRIB_HLOCK (1 << 7)
-#define HFS_SB_ATTRIB_UNMNT (1 << 8)
-#define HFS_SB_ATTRIB_SPARED (1 << 9)
-#define HFS_SB_ATTRIB_INCNSTNT (1 << 11)
-#define HFS_SB_ATTRIB_SLOCK (1 << 15)
-
-/* Some special File ID numbers */
-#define HFS_POR_CNID 1 /* Parent Of the Root */
-#define HFS_ROOT_CNID 2 /* ROOT directory */
-#define HFS_EXT_CNID 3 /* EXTents B-tree */
-#define HFS_CAT_CNID 4 /* CATalog B-tree */
-#define HFS_BAD_CNID 5 /* BAD blocks file */
-#define HFS_ALLOC_CNID 6 /* ALLOCation file (HFS+) */
-#define HFS_START_CNID 7 /* STARTup file (HFS+) */
-#define HFS_ATTR_CNID 8 /* ATTRibutes file (HFS+) */
-#define HFS_EXCH_CNID 15 /* ExchangeFiles temp id */
-#define HFS_FIRSTUSER_CNID 16
-
-/* values for hfs_cat_rec.cdrType */
-#define HFS_CDR_DIR 0x01 /* folder (directory) */
-#define HFS_CDR_FIL 0x02 /* file */
-#define HFS_CDR_THD 0x03 /* folder (directory) thread */
-#define HFS_CDR_FTH 0x04 /* file thread */
-
-/* legal values for hfs_ext_key.FkType and hfs_file.fork */
-#define HFS_FK_DATA 0x00
-#define HFS_FK_RSRC 0xFF
-
-/* bits in hfs_fil_entry.Flags */
-#define HFS_FIL_LOCK 0x01 /* locked */
-#define HFS_FIL_THD 0x02 /* file thread */
-#define HFS_FIL_DOPEN 0x04 /* data fork open */
-#define HFS_FIL_ROPEN 0x08 /* resource fork open */
-#define HFS_FIL_DIR 0x10 /* directory (always clear) */
-#define HFS_FIL_NOCOPY 0x40 /* copy-protected file */
-#define HFS_FIL_USED 0x80 /* open */
-
-/* bits in hfs_dir_entry.Flags. dirflags is 16 bits. */
-#define HFS_DIR_LOCK 0x01 /* locked */
-#define HFS_DIR_THD 0x02 /* directory thread */
-#define HFS_DIR_INEXPFOLDER 0x04 /* in a shared area */
-#define HFS_DIR_MOUNTED 0x08 /* mounted */
-#define HFS_DIR_DIR 0x10 /* directory (always set) */
-#define HFS_DIR_EXPFOLDER 0x20 /* share point */
-
-/* bits hfs_finfo.fdFlags */
-#define HFS_FLG_INITED 0x0100
-#define HFS_FLG_LOCKED 0x1000
-#define HFS_FLG_INVISIBLE 0x4000
-
-/*======== HFS structures as they appear on the disk ========*/
-
-/* Pascal-style string of up to 31 characters */
-struct hfs_name {
- u8 len;
- u8 name[HFS_NAMELEN];
-} __packed;
-
-struct hfs_point {
- __be16 v;
- __be16 h;
-} __packed;
-
-struct hfs_rect {
- __be16 top;
- __be16 left;
- __be16 bottom;
- __be16 right;
-} __packed;
-
-struct hfs_finfo {
- __be32 fdType;
- __be32 fdCreator;
- __be16 fdFlags;
- struct hfs_point fdLocation;
- __be16 fdFldr;
-} __packed;
-
-struct hfs_fxinfo {
- __be16 fdIconID;
- u8 fdUnused[8];
- __be16 fdComment;
- __be32 fdPutAway;
-} __packed;
-
-struct hfs_dinfo {
- struct hfs_rect frRect;
- __be16 frFlags;
- struct hfs_point frLocation;
- __be16 frView;
-} __packed;
-
-struct hfs_dxinfo {
- struct hfs_point frScroll;
- __be32 frOpenChain;
- __be16 frUnused;
- __be16 frComment;
- __be32 frPutAway;
-} __packed;
-
-union hfs_finder_info {
- struct {
- struct hfs_finfo finfo;
- struct hfs_fxinfo fxinfo;
- } file;
- struct {
- struct hfs_dinfo dinfo;
- struct hfs_dxinfo dxinfo;
- } dir;
-} __packed;
-
-/* Cast to a pointer to a generic bkey */
-#define HFS_BKEY(X) (((void)((X)->KeyLen)), ((struct hfs_bkey *)(X)))
-
-/* The key used in the catalog b-tree: */
-struct hfs_cat_key {
- u8 key_len; /* number of bytes in the key */
- u8 reserved; /* padding */
- __be32 ParID; /* CNID of the parent dir */
- struct hfs_name CName; /* The filename of the entry */
-} __packed;
-
-/* The key used in the extents b-tree: */
-struct hfs_ext_key {
- u8 key_len; /* number of bytes in the key */
- u8 FkType; /* HFS_FK_{DATA,RSRC} */
- __be32 FNum; /* The File ID of the file */
- __be16 FABN; /* allocation blocks number*/
-} __packed;
-
-typedef union hfs_btree_key {
- u8 key_len; /* number of bytes in the key */
- struct hfs_cat_key cat;
- struct hfs_ext_key ext;
-} hfs_btree_key;
-
-#define HFS_MAX_CAT_KEYLEN (sizeof(struct hfs_cat_key) - sizeof(u8))
-#define HFS_MAX_EXT_KEYLEN (sizeof(struct hfs_ext_key) - sizeof(u8))
-
-typedef union hfs_btree_key btree_key;
-
-struct hfs_extent {
- __be16 block;
- __be16 count;
-};
-typedef struct hfs_extent hfs_extent_rec[3];
-
-/* The catalog record for a file */
-struct hfs_cat_file {
- s8 type; /* The type of entry */
- u8 reserved;
- u8 Flags; /* Flags such as read-only */
- s8 Typ; /* file version number = 0 */
- struct hfs_finfo UsrWds; /* data used by the Finder */
- __be32 FlNum; /* The CNID */
- __be16 StBlk; /* obsolete */
- __be32 LgLen; /* The logical EOF of the data fork*/
- __be32 PyLen; /* The physical EOF of the data fork */
- __be16 RStBlk; /* obsolete */
- __be32 RLgLen; /* The logical EOF of the rsrc fork */
- __be32 RPyLen; /* The physical EOF of the rsrc fork */
- __be32 CrDat; /* The creation date */
- __be32 MdDat; /* The modified date */
- __be32 BkDat; /* The last backup date */
- struct hfs_fxinfo FndrInfo; /* more data for the Finder */
- __be16 ClpSize; /* number of bytes to allocate
- when extending files */
- hfs_extent_rec ExtRec; /* first extent record
- for the data fork */
- hfs_extent_rec RExtRec; /* first extent record
- for the resource fork */
- u32 Resrv; /* reserved by Apple */
-} __packed;
-
-/* the catalog record for a directory */
-struct hfs_cat_dir {
- s8 type; /* The type of entry */
- u8 reserved;
- __be16 Flags; /* flags */
- __be16 Val; /* Valence: number of files and
- dirs in the directory */
- __be32 DirID; /* The CNID */
- __be32 CrDat; /* The creation date */
- __be32 MdDat; /* The modification date */
- __be32 BkDat; /* The last backup date */
- struct hfs_dinfo UsrInfo; /* data used by the Finder */
- struct hfs_dxinfo FndrInfo; /* more data used by Finder */
- u8 Resrv[16]; /* reserved by Apple */
-} __packed;
-
-/* the catalog record for a thread */
-struct hfs_cat_thread {
- s8 type; /* The type of entry */
- u8 reserved[9]; /* reserved by Apple */
- __be32 ParID; /* CNID of parent directory */
- struct hfs_name CName; /* The name of this entry */
-} __packed;
-
-/* A catalog tree record */
-typedef union hfs_cat_rec {
- s8 type; /* The type of entry */
- struct hfs_cat_file file;
- struct hfs_cat_dir dir;
- struct hfs_cat_thread thread;
-} hfs_cat_rec;
-
-struct hfs_mdb {
- __be16 drSigWord; /* Signature word indicating fs type */
- __be32 drCrDate; /* fs creation date/time */
- __be32 drLsMod; /* fs modification date/time */
- __be16 drAtrb; /* fs attributes */
- __be16 drNmFls; /* number of files in root directory */
- __be16 drVBMSt; /* location (in 512-byte blocks)
- of the volume bitmap */
- __be16 drAllocPtr; /* location (in allocation blocks)
- to begin next allocation search */
- __be16 drNmAlBlks; /* number of allocation blocks */
- __be32 drAlBlkSiz; /* bytes in an allocation block */
- __be32 drClpSiz; /* clumpsize, the number of bytes to
- allocate when extending a file */
- __be16 drAlBlSt; /* location (in 512-byte blocks)
- of the first allocation block */
- __be32 drNxtCNID; /* CNID to assign to the next
- file or directory created */
- __be16 drFreeBks; /* number of free allocation blocks */
- u8 drVN[28]; /* the volume label */
- __be32 drVolBkUp; /* fs backup date/time */
- __be16 drVSeqNum; /* backup sequence number */
- __be32 drWrCnt; /* fs write count */
- __be32 drXTClpSiz; /* clumpsize for the extents B-tree */
- __be32 drCTClpSiz; /* clumpsize for the catalog B-tree */
- __be16 drNmRtDirs; /* number of directories in
- the root directory */
- __be32 drFilCnt; /* number of files in the fs */
- __be32 drDirCnt; /* number of directories in the fs */
- u8 drFndrInfo[32]; /* data used by the Finder */
- __be16 drEmbedSigWord; /* embedded volume signature */
- __be32 drEmbedExtent; /* starting block number (xdrStABN)
- and number of allocation blocks
- (xdrNumABlks) occupied by embedded
- volume */
- __be32 drXTFlSize; /* bytes in the extents B-tree */
- hfs_extent_rec drXTExtRec; /* extents B-tree's first 3 extents */
- __be32 drCTFlSize; /* bytes in the catalog B-tree */
- hfs_extent_rec drCTExtRec; /* catalog B-tree's first 3 extents */
-} __packed;
+#include <linux/hfs_common.h>
/*======== Data structures kept in memory ========*/
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index b8eb0322a3e5..e94dbc04a1e4 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -9,12 +9,6 @@
#ifndef _LINUX_HFS_FS_H
#define _LINUX_HFS_FS_H
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/mutex.h>
@@ -27,32 +21,6 @@
#include "hfs.h"
-#define DBG_BNODE_REFS 0x00000001
-#define DBG_BNODE_MOD 0x00000002
-#define DBG_CAT_MOD 0x00000004
-#define DBG_INODE 0x00000008
-#define DBG_SUPER 0x00000010
-#define DBG_EXTENT 0x00000020
-#define DBG_BITMAP 0x00000040
-
-//#define DBG_MASK (DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD|DBG_CAT_MOD|DBG_BITMAP)
-//#define DBG_MASK (DBG_BNODE_MOD|DBG_CAT_MOD|DBG_INODE)
-//#define DBG_MASK (DBG_CAT_MOD|DBG_BNODE_REFS|DBG_INODE|DBG_EXTENT)
-#define DBG_MASK (0)
-
-#define hfs_dbg(flg, fmt, ...) \
-do { \
- if (DBG_##flg & DBG_MASK) \
- printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
-} while (0)
-
-#define hfs_dbg_cont(flg, fmt, ...) \
-do { \
- if (DBG_##flg & DBG_MASK) \
- pr_cont(fmt, ##__VA_ARGS__); \
-} while (0)
-
-
/*
* struct hfs_inode_info
*
@@ -112,13 +80,13 @@ struct hfs_sb_info {
the extents b-tree */
struct hfs_btree *cat_tree; /* Information about
the catalog b-tree */
- u32 file_count; /* The number of
+ atomic64_t file_count; /* The number of
regular files in
the filesystem */
- u32 folder_count; /* The number of
+ atomic64_t folder_count; /* The number of
directories in the
filesystem */
- u32 next_id; /* The next available
+ atomic64_t next_id; /* The next available
file id number */
u32 clumpablks; /* The number of allocation
blocks to try to add when
@@ -171,71 +139,90 @@ struct hfs_sb_info {
#define HFS_FLG_ALT_MDB_DIRTY 2
/* bitmap.c */
-extern u32 hfs_vbm_search_free(struct super_block *, u32, u32 *);
-extern int hfs_clear_vbm_bits(struct super_block *, u16, u16);
+extern u32 hfs_vbm_search_free(struct super_block *sb, u32 goal, u32 *num_bits);
+extern int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count);
/* catalog.c */
-extern int hfs_cat_keycmp(const btree_key *, const btree_key *);
+extern int hfs_cat_keycmp(const btree_key *key1, const btree_key *key2);
struct hfs_find_data;
-extern int hfs_cat_find_brec(struct super_block *, u32, struct hfs_find_data *);
-extern int hfs_cat_create(u32, struct inode *, const struct qstr *, struct inode *);
-extern int hfs_cat_delete(u32, struct inode *, const struct qstr *);
-extern int hfs_cat_move(u32, struct inode *, const struct qstr *,
- struct inode *, const struct qstr *);
-extern void hfs_cat_build_key(struct super_block *, btree_key *, u32, const struct qstr *);
+extern int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
+ struct hfs_find_data *fd);
+extern int hfs_cat_create(u32 cnid, struct inode *dir,
+ const struct qstr *str, struct inode *inode);
+extern int hfs_cat_delete(u32 cnid, struct inode *dir, const struct qstr *str);
+extern int hfs_cat_move(u32 cnid, struct inode *src_dir,
+ const struct qstr *src_name,
+ struct inode *dst_dir,
+ const struct qstr *dst_name);
+extern void hfs_cat_build_key(struct super_block *sb, btree_key *key,
+ u32 parent, const struct qstr *name);
/* dir.c */
extern const struct file_operations hfs_dir_operations;
extern const struct inode_operations hfs_dir_inode_operations;
/* extent.c */
-extern int hfs_ext_keycmp(const btree_key *, const btree_key *);
-extern int hfs_free_fork(struct super_block *, struct hfs_cat_file *, int);
-extern int hfs_ext_write_extent(struct inode *);
-extern int hfs_extend_file(struct inode *);
-extern void hfs_file_truncate(struct inode *);
+extern int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2);
+extern u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off);
+extern int hfs_free_fork(struct super_block *sb,
+ struct hfs_cat_file *file, int type);
+extern int hfs_ext_write_extent(struct inode *inode);
+extern int hfs_extend_file(struct inode *inode);
+extern void hfs_file_truncate(struct inode *inode);
-extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
+extern int hfs_get_block(struct inode *inode, sector_t block,
+ struct buffer_head *bh_result, int create);
/* inode.c */
extern const struct address_space_operations hfs_aops;
extern const struct address_space_operations hfs_btree_aops;
-extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t);
-extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
-extern int hfs_write_inode(struct inode *, struct writeback_control *);
-extern int hfs_inode_setattr(struct user_namespace *, struct dentry *,
- struct iattr *);
+int hfs_write_begin(const struct kiocb *iocb, struct address_space *mapping,
+ loff_t pos, unsigned int len, struct folio **foliop,
+ void **fsdata);
+extern struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name,
+ umode_t mode);
+extern void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext,
+ __be32 *log_size, __be32 *phys_size);
+extern int hfs_write_inode(struct inode *inode, struct writeback_control *wbc);
+extern int hfs_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr);
extern void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
- __be32 log_size, __be32 phys_size, u32 clump_size);
-extern struct inode *hfs_iget(struct super_block *, struct hfs_cat_key *, hfs_cat_rec *);
-extern void hfs_evict_inode(struct inode *);
-extern void hfs_delete_inode(struct inode *);
+ __be32 __log_size, __be32 phys_size,
+ u32 clump_size);
+extern struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key,
+ hfs_cat_rec *rec);
+extern void hfs_evict_inode(struct inode *inode);
+extern void hfs_delete_inode(struct inode *inode);
/* attr.c */
-extern const struct xattr_handler *hfs_xattr_handlers[];
+extern const struct xattr_handler * const hfs_xattr_handlers[];
/* mdb.c */
-extern int hfs_mdb_get(struct super_block *);
-extern void hfs_mdb_commit(struct super_block *);
-extern void hfs_mdb_close(struct super_block *);
-extern void hfs_mdb_put(struct super_block *);
+extern int hfs_mdb_get(struct super_block *sb);
+extern void hfs_mdb_commit(struct super_block *sb);
+extern void hfs_mdb_close(struct super_block *sb);
+extern void hfs_mdb_put(struct super_block *sb);
/* part_tbl.c */
-extern int hfs_part_find(struct super_block *, sector_t *, sector_t *);
+extern int hfs_part_find(struct super_block *sb,
+ sector_t *part_start, sector_t *part_size);
/* string.c */
extern const struct dentry_operations hfs_dentry_operations;
-extern int hfs_hash_dentry(const struct dentry *, struct qstr *);
-extern int hfs_strcmp(const unsigned char *, unsigned int,
- const unsigned char *, unsigned int);
+extern int hfs_hash_dentry(const struct dentry *dentry, struct qstr *this);
+extern int hfs_strcmp(const unsigned char *s1, unsigned int len1,
+ const unsigned char *s2, unsigned int len2);
extern int hfs_compare_dentry(const struct dentry *dentry,
- unsigned int len, const char *str, const struct qstr *name);
+ unsigned int len, const char *str,
+ const struct qstr *name);
/* trans.c */
-extern void hfs_asc2mac(struct super_block *, struct hfs_name *, const struct qstr *);
-extern int hfs_mac2asc(struct super_block *, char *, const struct hfs_name *);
+extern void hfs_asc2mac(struct super_block *sb,
+ struct hfs_name *out, const struct qstr *in);
+extern int hfs_mac2asc(struct super_block *sb,
+ char *out, const struct hfs_name *in);
/* super.c */
extern void hfs_mark_mdb_dirty(struct super_block *sb);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 2a5143246282..524db1389737 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -29,14 +29,9 @@ static const struct inode_operations hfs_file_inode_operations;
#define HFS_VALID_MODE_BITS (S_IFREG | S_IFDIR | S_IRWXUGO)
-static int hfs_writepage(struct page *page, struct writeback_control *wbc)
+static int hfs_read_folio(struct file *file, struct folio *folio)
{
- return block_write_full_page(page, hfs_get_block, wbc);
-}
-
-static int hfs_readpage(struct file *file, struct page *page)
-{
- return block_read_full_page(page, hfs_get_block);
+ return block_read_full_folio(folio, hfs_get_block);
}
static void hfs_write_failed(struct address_space *mapping, loff_t to)
@@ -49,14 +44,13 @@ static void hfs_write_failed(struct address_space *mapping, loff_t to)
}
}
-static int hfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+int hfs_write_begin(const struct kiocb *iocb, struct address_space *mapping,
+ loff_t pos, unsigned int len, struct folio **foliop,
+ void **fsdata)
{
int ret;
- *pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
hfs_get_block,
&HFS_I(mapping->host)->phys_size);
if (unlikely(ret))
@@ -70,14 +64,15 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, hfs_get_block);
}
-static int hfs_releasepage(struct page *page, gfp_t mask)
+static bool hfs_release_folio(struct folio *folio, gfp_t mask)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct hfs_btree *tree;
struct hfs_bnode *node;
u32 nidx;
- int i, res = 1;
+ int i;
+ bool res = true;
switch (inode->i_ino) {
case HFS_EXT_CNID:
@@ -88,27 +83,27 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
break;
default:
BUG();
- return 0;
+ return false;
}
if (!tree)
- return 0;
+ return false;
if (tree->node_size >= PAGE_SIZE) {
- nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
+ nidx = folio->index >> (tree->node_size_shift - PAGE_SHIFT);
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, nidx);
if (!node)
;
else if (atomic_read(&node->refcnt))
- res = 0;
+ res = false;
if (res && node) {
hfs_bnode_unhash(node);
hfs_bnode_free(node);
}
spin_unlock(&tree->hash_lock);
} else {
- nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
+ nidx = folio->index << (PAGE_SHIFT - tree->node_size_shift);
i = 1 << (PAGE_SHIFT - tree->node_size_shift);
spin_lock(&tree->hash_lock);
do {
@@ -116,7 +111,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
if (!node)
continue;
if (atomic_read(&node->refcnt)) {
- res = 0;
+ res = false;
break;
}
hfs_bnode_unhash(node);
@@ -124,7 +119,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
} while (--i && nidx < tree->node_count);
spin_unlock(&tree->hash_lock);
}
- return res ? try_to_free_buffers(page) : 0;
+ return res ? try_to_free_buffers(folio) : false;
}
static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
@@ -159,24 +154,27 @@ static int hfs_writepages(struct address_space *mapping,
}
const struct address_space_operations hfs_btree_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = hfs_readpage,
- .writepage = hfs_writepage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = hfs_read_folio,
+ .writepages = hfs_writepages,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
+ .migrate_folio = buffer_migrate_folio,
.bmap = hfs_bmap,
- .releasepage = hfs_releasepage,
+ .release_folio = hfs_release_folio,
};
const struct address_space_operations hfs_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = hfs_readpage,
- .writepage = hfs_writepage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = hfs_read_folio,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
.bmap = hfs_bmap,
.direct_IO = hfs_direct_IO,
.writepages = hfs_writepages,
+ .migrate_folio = buffer_migrate_folio,
};
/*
@@ -186,6 +184,10 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
{
struct super_block *sb = dir->i_sb;
struct inode *inode = new_inode(sb);
+ s64 next_id;
+ s64 file_count;
+ s64 folder_count;
+
if (!inode)
return NULL;
@@ -193,18 +195,22 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
spin_lock_init(&HFS_I(inode)->open_dir_lock);
hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
- inode->i_ino = HFS_SB(sb)->next_id++;
+ next_id = atomic64_inc_return(&HFS_SB(sb)->next_id);
+ BUG_ON(next_id > U32_MAX);
+ inode->i_ino = (u32)next_id;
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
set_nlink(inode, 1);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
HFS_I(inode)->flags = 0;
HFS_I(inode)->rsrc_inode = NULL;
HFS_I(inode)->fs_blocks = 0;
+ HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
if (S_ISDIR(mode)) {
inode->i_size = 2;
- HFS_SB(sb)->folder_count++;
+ folder_count = atomic64_inc_return(&HFS_SB(sb)->folder_count);
+ BUG_ON(folder_count > U32_MAX);
if (dir->i_ino == HFS_ROOT_CNID)
HFS_SB(sb)->root_dirs++;
inode->i_op = &hfs_dir_inode_operations;
@@ -213,7 +219,8 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
inode->i_mode &= ~HFS_SB(inode->i_sb)->s_dir_umask;
} else if (S_ISREG(mode)) {
HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
- HFS_SB(sb)->file_count++;
+ file_count = atomic64_inc_return(&HFS_SB(sb)->file_count);
+ BUG_ON(file_count > U32_MAX);
if (dir->i_ino == HFS_ROOT_CNID)
HFS_SB(sb)->root_files++;
inode->i_op = &hfs_file_inode_operations;
@@ -243,16 +250,19 @@ void hfs_delete_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
- hfs_dbg(INODE, "delete_inode: %lu\n", inode->i_ino);
+ hfs_dbg("ino %lu\n", inode->i_ino);
if (S_ISDIR(inode->i_mode)) {
- HFS_SB(sb)->folder_count--;
+ BUG_ON(atomic64_read(&HFS_SB(sb)->folder_count) > U32_MAX);
+ atomic64_dec(&HFS_SB(sb)->folder_count);
if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
HFS_SB(sb)->root_dirs--;
set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
hfs_mark_mdb_dirty(sb);
return;
}
- HFS_SB(sb)->file_count--;
+
+ BUG_ON(atomic64_read(&HFS_SB(sb)->file_count) > U32_MAX);
+ atomic64_dec(&HFS_SB(sb)->file_count);
if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
HFS_SB(sb)->root_files--;
if (S_ISREG(inode->i_mode)) {
@@ -277,6 +287,8 @@ void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
for (count = 0, i = 0; i < 3; i++)
count += be16_to_cpu(ext[i].count);
HFS_I(inode)->first_blocks = count;
+ HFS_I(inode)->cached_start = 0;
+ HFS_I(inode)->cached_blocks = 0;
inode->i_size = HFS_I(inode)->phys_size = log_size;
HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
@@ -353,8 +365,8 @@ static int hfs_read_inode(struct inode *inode, void *data)
inode->i_mode |= S_IWUGO;
inode->i_mode &= ~hsb->s_file_umask;
inode->i_mode |= S_IFREG;
- inode->i_ctime = inode->i_atime = inode->i_mtime =
- hfs_m_to_utime(rec->file.MdDat);
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, hfs_m_to_utime(rec->file.MdDat))));
inode->i_op = &hfs_file_inode_operations;
inode->i_fop = &hfs_file_operations;
inode->i_mapping->a_ops = &hfs_aops;
@@ -364,8 +376,8 @@ static int hfs_read_inode(struct inode *inode, void *data)
inode->i_size = be16_to_cpu(rec->dir.Val) + 2;
HFS_I(inode)->fs_blocks = 0;
inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
- inode->i_ctime = inode->i_atime = inode->i_mtime =
- hfs_m_to_utime(rec->dir.MdDat);
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, hfs_m_to_utime(rec->dir.MdDat))));
inode->i_op = &hfs_dir_inode_operations;
inode->i_fop = &hfs_dir_operations;
break;
@@ -401,7 +413,7 @@ struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key, hfs_cat_
return NULL;
}
inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data);
- if (inode && (inode->i_state & I_NEW))
+ if (inode && (inode_state_read_once(inode) & I_NEW))
unlock_new_inode(inode);
return inode;
}
@@ -425,7 +437,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
hfs_cat_rec rec;
int res;
- hfs_dbg(INODE, "hfs_write_inode: %lu\n", inode->i_ino);
+ hfs_dbg("ino %lu\n", inode->i_ino);
res = hfs_ext_write_extent(inode);
if (res)
return res;
@@ -456,25 +468,30 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
/* panic? */
return -EIO;
+ res = -EIO;
+ if (HFS_I(main_inode)->cat_key.CName.len > HFS_NAMELEN)
+ goto out;
fd.search_key->cat = HFS_I(main_inode)->cat_key;
if (hfs_brec_find(&fd))
- /* panic? */
goto out;
if (S_ISDIR(main_inode->i_mode)) {
- WARN_ON(fd.entrylength < sizeof(struct hfs_cat_dir));
+ if (fd.entrylength < sizeof(struct hfs_cat_dir))
+ goto out;
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_dir));
if (rec.type != HFS_CDR_DIR ||
be32_to_cpu(rec.dir.DirID) != inode->i_ino) {
}
- rec.dir.MdDat = hfs_u_to_mtime(inode->i_mtime);
+ rec.dir.MdDat = hfs_u_to_mtime(inode_get_mtime(inode));
rec.dir.Val = cpu_to_be16(inode->i_size - 2);
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_dir));
} else if (HFS_IS_RSRC(inode)) {
+ if (fd.entrylength < sizeof(struct hfs_cat_file))
+ goto out;
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
hfs_inode_write_fork(inode, rec.file.RExtRec,
@@ -482,7 +499,8 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
} else {
- WARN_ON(fd.entrylength < sizeof(struct hfs_cat_file));
+ if (fd.entrylength < sizeof(struct hfs_cat_file))
+ goto out;
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
if (rec.type != HFS_CDR_FIL ||
@@ -494,14 +512,15 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
else
rec.file.Flags |= HFS_FIL_LOCK;
hfs_inode_write_fork(inode, rec.file.ExtRec, &rec.file.LgLen, &rec.file.PyLen);
- rec.file.MdDat = hfs_u_to_mtime(inode->i_mtime);
+ rec.file.MdDat = hfs_u_to_mtime(inode_get_mtime(inode));
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
}
+ res = 0;
out:
hfs_find_exit(&fd);
- return 0;
+ return res;
}
static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
@@ -602,14 +621,14 @@ static int hfs_file_release(struct inode *inode, struct file *file)
* correspond to the same HFS file.
*/
-int hfs_inode_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int hfs_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
int error;
- error = setattr_prepare(&init_user_ns, dentry,
+ error = setattr_prepare(&nop_mnt_idmap, dentry,
attr); /* basic permission checks */
if (error)
return error;
@@ -645,11 +664,10 @@ int hfs_inode_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
truncate_setsize(inode, attr->ia_size);
hfs_file_truncate(inode);
- inode->i_atime = inode->i_mtime = inode->i_ctime =
- current_time(inode);
+ simple_inode_init_ts(inode);
}
- setattr_copy(&init_user_ns, inode, attr);
+ setattr_copy(&nop_mnt_idmap, inode, attr);
mark_inode_dirty(inode);
return 0;
}
@@ -684,8 +702,9 @@ static const struct file_operations hfs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
- .splice_read = generic_file_splice_read,
+ .mmap_prepare = generic_file_mmap_prepare,
+ .splice_read = filemap_splice_read,
+ .splice_write = iter_file_splice_write,
.fsync = hfs_file_fsync,
.open = hfs_file_open,
.release = hfs_file_release,
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 5beb82652435..53f3fae60217 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -9,7 +9,7 @@
*/
#include <linux/cdrom.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/nls.h>
#include <linux/slab.h>
@@ -150,11 +150,11 @@ int hfs_mdb_get(struct super_block *sb)
/* These parameters are read from and written to the MDB */
HFS_SB(sb)->free_ablocks = be16_to_cpu(mdb->drFreeBks);
- HFS_SB(sb)->next_id = be32_to_cpu(mdb->drNxtCNID);
+ atomic64_set(&HFS_SB(sb)->next_id, be32_to_cpu(mdb->drNxtCNID));
HFS_SB(sb)->root_files = be16_to_cpu(mdb->drNmFls);
HFS_SB(sb)->root_dirs = be16_to_cpu(mdb->drNmRtDirs);
- HFS_SB(sb)->file_count = be32_to_cpu(mdb->drFilCnt);
- HFS_SB(sb)->folder_count = be32_to_cpu(mdb->drDirCnt);
+ atomic64_set(&HFS_SB(sb)->file_count, be32_to_cpu(mdb->drFilCnt));
+ atomic64_set(&HFS_SB(sb)->folder_count, be32_to_cpu(mdb->drDirCnt));
/* TRY to get the alternate (backup) MDB. */
sect = part_start + part_size - 2;
@@ -172,7 +172,7 @@ int hfs_mdb_get(struct super_block *sb)
pr_warn("continuing without an alternate MDB\n");
}
- HFS_SB(sb)->bitmap = kmalloc(8192, GFP_KERNEL);
+ HFS_SB(sb)->bitmap = kzalloc(8192, GFP_KERNEL);
if (!HFS_SB(sb)->bitmap)
goto out;
@@ -273,11 +273,17 @@ void hfs_mdb_commit(struct super_block *sb)
/* These parameters may have been modified, so write them back */
mdb->drLsMod = hfs_mtime();
mdb->drFreeBks = cpu_to_be16(HFS_SB(sb)->free_ablocks);
- mdb->drNxtCNID = cpu_to_be32(HFS_SB(sb)->next_id);
+ BUG_ON(atomic64_read(&HFS_SB(sb)->next_id) > U32_MAX);
+ mdb->drNxtCNID =
+ cpu_to_be32((u32)atomic64_read(&HFS_SB(sb)->next_id));
mdb->drNmFls = cpu_to_be16(HFS_SB(sb)->root_files);
mdb->drNmRtDirs = cpu_to_be16(HFS_SB(sb)->root_dirs);
- mdb->drFilCnt = cpu_to_be32(HFS_SB(sb)->file_count);
- mdb->drDirCnt = cpu_to_be32(HFS_SB(sb)->folder_count);
+ BUG_ON(atomic64_read(&HFS_SB(sb)->file_count) > U32_MAX);
+ mdb->drFilCnt =
+ cpu_to_be32((u32)atomic64_read(&HFS_SB(sb)->file_count));
+ BUG_ON(atomic64_read(&HFS_SB(sb)->folder_count) > U32_MAX);
+ mdb->drDirCnt =
+ cpu_to_be32((u32)atomic64_read(&HFS_SB(sb)->folder_count));
/* write MDB to disk */
mark_buffer_dirty(HFS_SB(sb)->mdb_bh);
diff --git a/fs/hfs/string.c b/fs/hfs/string.c
index 3912209153a8..0cfa35e82abc 100644
--- a/fs/hfs/string.c
+++ b/fs/hfs/string.c
@@ -16,6 +16,8 @@
#include "hfs_fs.h"
#include <linux/dcache.h>
+#include <kunit/visibility.h>
+
/*================ File-local variables ================*/
/*
@@ -65,6 +67,7 @@ int hfs_hash_dentry(const struct dentry *dentry, struct qstr *this)
this->hash = end_name_hash(hash);
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(hfs_hash_dentry);
/*
* Compare two strings in the HFS filename character ordering
@@ -87,6 +90,7 @@ int hfs_strcmp(const unsigned char *s1, unsigned int len1,
}
return len1 - len2;
}
+EXPORT_SYMBOL_IF_KUNIT(hfs_strcmp);
/*
* Test for equality of two strings in the HFS filename character ordering.
@@ -112,3 +116,4 @@ int hfs_compare_dentry(const struct dentry *dentry,
}
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(hfs_compare_dentry);
diff --git a/fs/hfs/string_test.c b/fs/hfs/string_test.c
new file mode 100644
index 000000000000..e1bf6f954312
--- /dev/null
+++ b/fs/hfs/string_test.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit tests for HFS string operations
+ *
+ * Copyright (C) 2025 Viacheslav Dubeyko <slava@dubeyko.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/dcache.h>
+#include "hfs_fs.h"
+
+/* Test hfs_strcmp function */
+static void hfs_strcmp_test(struct kunit *test)
+{
+ /* Test equal strings */
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("hello", 5, "hello", 5));
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("test", 4, "test", 4));
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("", 0, "", 0));
+
+ /* Test unequal strings */
+ KUNIT_EXPECT_NE(test, 0, hfs_strcmp("hello", 5, "world", 5));
+ KUNIT_EXPECT_NE(test, 0, hfs_strcmp("test", 4, "testing", 7));
+
+ /* Test different lengths */
+ KUNIT_EXPECT_LT(test, hfs_strcmp("test", 4, "testing", 7), 0);
+ KUNIT_EXPECT_GT(test, hfs_strcmp("testing", 7, "test", 4), 0);
+
+ /* Test case insensitive comparison (HFS should handle case) */
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("Test", 4, "TEST", 4));
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("hello", 5, "HELLO", 5));
+
+ /* Test with special characters */
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("file.txt", 8, "file.txt", 8));
+ KUNIT_EXPECT_NE(test, 0, hfs_strcmp("file.txt", 8, "file.dat", 8));
+
+ /* Test boundary cases */
+ KUNIT_EXPECT_EQ(test, 0, hfs_strcmp("a", 1, "a", 1));
+ KUNIT_EXPECT_NE(test, 0, hfs_strcmp("a", 1, "b", 1));
+}
+
+/* Test hfs_hash_dentry function */
+static void hfs_hash_dentry_test(struct kunit *test)
+{
+ struct qstr test_name1, test_name2, test_name3;
+ struct dentry dentry = {};
+ char name1[] = "testfile";
+ char name2[] = "TestFile";
+ char name3[] = "different";
+
+ /* Initialize test strings */
+ test_name1.name = name1;
+ test_name1.len = strlen(name1);
+ test_name1.hash = 0;
+
+ test_name2.name = name2;
+ test_name2.len = strlen(name2);
+ test_name2.hash = 0;
+
+ test_name3.name = name3;
+ test_name3.len = strlen(name3);
+ test_name3.hash = 0;
+
+ /* Test hashing */
+ KUNIT_EXPECT_EQ(test, 0, hfs_hash_dentry(&dentry, &test_name1));
+ KUNIT_EXPECT_EQ(test, 0, hfs_hash_dentry(&dentry, &test_name2));
+ KUNIT_EXPECT_EQ(test, 0, hfs_hash_dentry(&dentry, &test_name3));
+
+ /* Case insensitive names should hash the same */
+ KUNIT_EXPECT_EQ(test, test_name1.hash, test_name2.hash);
+
+ /* Different names should have different hashes */
+ KUNIT_EXPECT_NE(test, test_name1.hash, test_name3.hash);
+}
+
+/* Test hfs_compare_dentry function */
+static void hfs_compare_dentry_test(struct kunit *test)
+{
+ struct qstr test_name;
+ struct dentry dentry = {};
+ char name[] = "TestFile";
+
+ test_name.name = name;
+ test_name.len = strlen(name);
+
+ /* Test exact match */
+ KUNIT_EXPECT_EQ(test, 0, hfs_compare_dentry(&dentry, 8,
+ "TestFile", &test_name));
+
+ /* Test case insensitive match */
+ KUNIT_EXPECT_EQ(test, 0, hfs_compare_dentry(&dentry, 8,
+ "testfile", &test_name));
+ KUNIT_EXPECT_EQ(test, 0, hfs_compare_dentry(&dentry, 8,
+ "TESTFILE", &test_name));
+
+ /* Test different names */
+ KUNIT_EXPECT_EQ(test, 1, hfs_compare_dentry(&dentry, 8,
+ "DiffFile", &test_name));
+
+ /* Test different lengths */
+ KUNIT_EXPECT_EQ(test, 1, hfs_compare_dentry(&dentry, 7,
+ "TestFil", &test_name));
+ KUNIT_EXPECT_EQ(test, 1, hfs_compare_dentry(&dentry, 9,
+ "TestFiles", &test_name));
+
+ /* Test empty string */
+ test_name.name = "";
+ test_name.len = 0;
+ KUNIT_EXPECT_EQ(test, 0, hfs_compare_dentry(&dentry, 0, "", &test_name));
+
+ /* Test HFS_NAMELEN boundary */
+ test_name.name = "This_is_a_very_long_filename_that_exceeds_normal_limits";
+ test_name.len = strlen(test_name.name);
+ KUNIT_EXPECT_EQ(test, 0, hfs_compare_dentry(&dentry, HFS_NAMELEN,
+ "This_is_a_very_long_filename_th", &test_name));
+}
+
+static struct kunit_case hfs_string_test_cases[] = {
+ KUNIT_CASE(hfs_strcmp_test),
+ KUNIT_CASE(hfs_hash_dentry_test),
+ KUNIT_CASE(hfs_compare_dentry_test),
+ {}
+};
+
+static struct kunit_suite hfs_string_test_suite = {
+ .name = "hfs_string",
+ .test_cases = hfs_string_test_cases,
+};
+
+kunit_test_suite(hfs_string_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for HFS string operations");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 12d9bae39363..47f50fa555a4 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -15,10 +15,11 @@
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/mount.h>
#include <linux/init.h>
#include <linux/nls.h>
-#include <linux/parser.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/vfs.h>
@@ -28,6 +29,7 @@
static struct kmem_cache *hfs_inode_cachep;
+MODULE_DESCRIPTION("Apple Macintosh file system support");
MODULE_LICENSE("GPL");
static int hfs_sync_fs(struct super_block *sb, int wait)
@@ -110,21 +112,24 @@ static int hfs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-static int hfs_remount(struct super_block *sb, int *flags, char *data)
+static int hfs_reconfigure(struct fs_context *fc)
{
+ struct super_block *sb = fc->root->d_sb;
+
sync_filesystem(sb);
- *flags |= SB_NODIRATIME;
- if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
+ fc->sb_flags |= SB_NODIRATIME;
+ if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (!(*flags & SB_RDONLY)) {
+
+ if (!(fc->sb_flags & SB_RDONLY)) {
if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended. leaving read-only.\n");
sb->s_flags |= SB_RDONLY;
- *flags |= SB_RDONLY;
+ fc->sb_flags |= SB_RDONLY;
} else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) {
pr_warn("filesystem is marked locked, leaving read-only.\n");
sb->s_flags |= SB_RDONLY;
- *flags |= SB_RDONLY;
+ fc->sb_flags |= SB_RDONLY;
}
}
return 0;
@@ -162,7 +167,7 @@ static struct inode *hfs_alloc_inode(struct super_block *sb)
{
struct hfs_inode_info *i;
- i = kmem_cache_alloc(hfs_inode_cachep, GFP_KERNEL);
+ i = alloc_inode_sb(sb, hfs_inode_cachep, GFP_KERNEL);
return i ? &i->vfs_inode : NULL;
}
@@ -179,7 +184,6 @@ static const struct super_operations hfs_super_operations = {
.put_super = hfs_put_super,
.sync_fs = hfs_sync_fs,
.statfs = hfs_statfs,
- .remount_fs = hfs_remount,
.show_options = hfs_show_options,
};
@@ -187,181 +191,112 @@ enum {
opt_uid, opt_gid, opt_umask, opt_file_umask, opt_dir_umask,
opt_part, opt_session, opt_type, opt_creator, opt_quiet,
opt_codepage, opt_iocharset,
- opt_err
};
-static const match_table_t tokens = {
- { opt_uid, "uid=%u" },
- { opt_gid, "gid=%u" },
- { opt_umask, "umask=%o" },
- { opt_file_umask, "file_umask=%o" },
- { opt_dir_umask, "dir_umask=%o" },
- { opt_part, "part=%u" },
- { opt_session, "session=%u" },
- { opt_type, "type=%s" },
- { opt_creator, "creator=%s" },
- { opt_quiet, "quiet" },
- { opt_codepage, "codepage=%s" },
- { opt_iocharset, "iocharset=%s" },
- { opt_err, NULL }
+static const struct fs_parameter_spec hfs_param_spec[] = {
+ fsparam_u32 ("uid", opt_uid),
+ fsparam_u32 ("gid", opt_gid),
+ fsparam_u32oct ("umask", opt_umask),
+ fsparam_u32oct ("file_umask", opt_file_umask),
+ fsparam_u32oct ("dir_umask", opt_dir_umask),
+ fsparam_u32 ("part", opt_part),
+ fsparam_u32 ("session", opt_session),
+ fsparam_string ("type", opt_type),
+ fsparam_string ("creator", opt_creator),
+ fsparam_flag ("quiet", opt_quiet),
+ fsparam_string ("codepage", opt_codepage),
+ fsparam_string ("iocharset", opt_iocharset),
+ {}
};
-static inline int match_fourchar(substring_t *arg, u32 *result)
-{
- if (arg->to - arg->from != 4)
- return -EINVAL;
- memcpy(result, arg->from, 4);
- return 0;
-}
-
/*
- * parse_options()
+ * hfs_parse_param()
*
- * adapted from linux/fs/msdos/inode.c written 1992,93 by Werner Almesberger
- * This function is called by hfs_read_super() to parse the mount options.
+ * This function is called by the vfs to parse the mount options.
*/
-static int parse_options(char *options, struct hfs_sb_info *hsb)
+static int hfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int tmp, token;
-
- /* initialize the sb with defaults */
- hsb->s_uid = current_uid();
- hsb->s_gid = current_gid();
- hsb->s_file_umask = 0133;
- hsb->s_dir_umask = 0022;
- hsb->s_type = hsb->s_creator = cpu_to_be32(0x3f3f3f3f); /* == '????' */
- hsb->s_quiet = 0;
- hsb->part = -1;
- hsb->session = -1;
-
- if (!options)
- return 1;
-
- while ((p = strsep(&options, ",")) != NULL) {
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case opt_uid:
- if (match_int(&args[0], &tmp)) {
- pr_err("uid requires an argument\n");
- return 0;
- }
- hsb->s_uid = make_kuid(current_user_ns(), (uid_t)tmp);
- if (!uid_valid(hsb->s_uid)) {
- pr_err("invalid uid %d\n", tmp);
- return 0;
- }
- break;
- case opt_gid:
- if (match_int(&args[0], &tmp)) {
- pr_err("gid requires an argument\n");
- return 0;
- }
- hsb->s_gid = make_kgid(current_user_ns(), (gid_t)tmp);
- if (!gid_valid(hsb->s_gid)) {
- pr_err("invalid gid %d\n", tmp);
- return 0;
- }
- break;
- case opt_umask:
- if (match_octal(&args[0], &tmp)) {
- pr_err("umask requires a value\n");
- return 0;
- }
- hsb->s_file_umask = (umode_t)tmp;
- hsb->s_dir_umask = (umode_t)tmp;
- break;
- case opt_file_umask:
- if (match_octal(&args[0], &tmp)) {
- pr_err("file_umask requires a value\n");
- return 0;
- }
- hsb->s_file_umask = (umode_t)tmp;
- break;
- case opt_dir_umask:
- if (match_octal(&args[0], &tmp)) {
- pr_err("dir_umask requires a value\n");
- return 0;
- }
- hsb->s_dir_umask = (umode_t)tmp;
- break;
- case opt_part:
- if (match_int(&args[0], &hsb->part)) {
- pr_err("part requires an argument\n");
- return 0;
- }
- break;
- case opt_session:
- if (match_int(&args[0], &hsb->session)) {
- pr_err("session requires an argument\n");
- return 0;
- }
- break;
- case opt_type:
- if (match_fourchar(&args[0], &hsb->s_type)) {
- pr_err("type requires a 4 character value\n");
- return 0;
- }
- break;
- case opt_creator:
- if (match_fourchar(&args[0], &hsb->s_creator)) {
- pr_err("creator requires a 4 character value\n");
- return 0;
- }
- break;
- case opt_quiet:
- hsb->s_quiet = 1;
- break;
- case opt_codepage:
- if (hsb->nls_disk) {
- pr_err("unable to change codepage\n");
- return 0;
- }
- p = match_strdup(&args[0]);
- if (p)
- hsb->nls_disk = load_nls(p);
- if (!hsb->nls_disk) {
- pr_err("unable to load codepage \"%s\"\n", p);
- kfree(p);
- return 0;
- }
- kfree(p);
- break;
- case opt_iocharset:
- if (hsb->nls_io) {
- pr_err("unable to change iocharset\n");
- return 0;
- }
- p = match_strdup(&args[0]);
- if (p)
- hsb->nls_io = load_nls(p);
- if (!hsb->nls_io) {
- pr_err("unable to load iocharset \"%s\"\n", p);
- kfree(p);
- return 0;
- }
- kfree(p);
- break;
- default:
- return 0;
- }
- }
+ struct hfs_sb_info *hsb = fc->s_fs_info;
+ struct fs_parse_result result;
+ int opt;
+
+ /* hfs does not honor any fs-specific options on remount */
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE)
+ return 0;
- if (hsb->nls_disk && !hsb->nls_io) {
- hsb->nls_io = load_nls_default();
+ opt = fs_parse(fc, hfs_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case opt_uid:
+ hsb->s_uid = result.uid;
+ break;
+ case opt_gid:
+ hsb->s_gid = result.gid;
+ break;
+ case opt_umask:
+ hsb->s_file_umask = (umode_t)result.uint_32;
+ hsb->s_dir_umask = (umode_t)result.uint_32;
+ break;
+ case opt_file_umask:
+ hsb->s_file_umask = (umode_t)result.uint_32;
+ break;
+ case opt_dir_umask:
+ hsb->s_dir_umask = (umode_t)result.uint_32;
+ break;
+ case opt_part:
+ hsb->part = result.uint_32;
+ break;
+ case opt_session:
+ hsb->session = result.uint_32;
+ break;
+ case opt_type:
+ if (strlen(param->string) != 4) {
+ pr_err("type requires a 4 character value\n");
+ return -EINVAL;
+ }
+ memcpy(&hsb->s_type, param->string, 4);
+ break;
+ case opt_creator:
+ if (strlen(param->string) != 4) {
+ pr_err("creator requires a 4 character value\n");
+ return -EINVAL;
+ }
+ memcpy(&hsb->s_creator, param->string, 4);
+ break;
+ case opt_quiet:
+ hsb->s_quiet = 1;
+ break;
+ case opt_codepage:
+ if (hsb->nls_disk) {
+ pr_err("unable to change codepage\n");
+ return -EINVAL;
+ }
+ hsb->nls_disk = load_nls(param->string);
+ if (!hsb->nls_disk) {
+ pr_err("unable to load codepage \"%s\"\n",
+ param->string);
+ return -EINVAL;
+ }
+ break;
+ case opt_iocharset:
+ if (hsb->nls_io) {
+ pr_err("unable to change iocharset\n");
+ return -EINVAL;
+ }
+ hsb->nls_io = load_nls(param->string);
if (!hsb->nls_io) {
- pr_err("unable to load default iocharset\n");
- return 0;
+ pr_err("unable to load iocharset \"%s\"\n",
+ param->string);
+ return -EINVAL;
}
+ break;
+ default:
+ return -EINVAL;
}
- hsb->s_dir_umask &= 0777;
- hsb->s_file_umask &= 0577;
- return 1;
+ return 0;
}
/*
@@ -375,29 +310,29 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
* hfs_btree_init() to get the necessary data about the extents and
* catalog B-trees and, finally, reading the root inode into memory.
*/
-static int hfs_fill_super(struct super_block *sb, void *data, int silent)
+static int hfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
- struct hfs_sb_info *sbi;
+ struct hfs_sb_info *sbi = HFS_SB(sb);
struct hfs_find_data fd;
hfs_cat_rec rec;
struct inode *root_inode;
+ int silent = fc->sb_flags & SB_SILENT;
int res;
- sbi = kzalloc(sizeof(struct hfs_sb_info), GFP_KERNEL);
- if (!sbi)
- return -ENOMEM;
+ atomic64_set(&sbi->file_count, 0);
+ atomic64_set(&sbi->folder_count, 0);
+ atomic64_set(&sbi->next_id, 0);
+
+ /* load_nls_default does not fail */
+ if (sbi->nls_disk && !sbi->nls_io)
+ sbi->nls_io = load_nls_default();
+ sbi->s_dir_umask &= 0777;
+ sbi->s_file_umask &= 0577;
- sbi->sb = sb;
- sb->s_fs_info = sbi;
spin_lock_init(&sbi->work_lock);
INIT_DELAYED_WORK(&sbi->mdb_work, flush_mdb);
- res = -EINVAL;
- if (!parse_options((char *)data, sbi)) {
- pr_err("unable to parse mount options\n");
- goto bail;
- }
-
+ sbi->sb = sb;
sb->s_op = &hfs_super_operations;
sb->s_xattr = hfs_xattr_handlers;
sb->s_flags |= SB_NODIRATIME;
@@ -418,11 +353,13 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
goto bail_no_root;
res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
if (!res) {
- if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
+ if (fd.entrylength != sizeof(rec.dir)) {
res = -EIO;
goto bail_hfs_find;
}
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
+ if (rec.type != HFS_CDR_DIR)
+ res = -EIO;
}
if (res)
goto bail_hfs_find;
@@ -432,7 +369,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
if (!root_inode)
goto bail_no_root;
- sb->s_d_op = &hfs_dentry_operations;
+ set_default_d_op(sb, &hfs_dentry_operations);
res = -ENOMEM;
sb->s_root = d_make_root(root_inode);
if (!sb->s_root)
@@ -450,18 +387,56 @@ bail:
return res;
}
-static struct dentry *hfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int hfs_get_tree(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, hfs_fill_super);
+ return get_tree_bdev(fc, hfs_fill_super);
+}
+
+static void hfs_free_fc(struct fs_context *fc)
+{
+ kfree(fc->s_fs_info);
+}
+
+static const struct fs_context_operations hfs_context_ops = {
+ .parse_param = hfs_parse_param,
+ .get_tree = hfs_get_tree,
+ .reconfigure = hfs_reconfigure,
+ .free = hfs_free_fc,
+};
+
+static int hfs_init_fs_context(struct fs_context *fc)
+{
+ struct hfs_sb_info *hsb;
+
+ hsb = kzalloc(sizeof(struct hfs_sb_info), GFP_KERNEL);
+ if (!hsb)
+ return -ENOMEM;
+
+ fc->s_fs_info = hsb;
+ fc->ops = &hfs_context_ops;
+
+ if (fc->purpose != FS_CONTEXT_FOR_RECONFIGURE) {
+ /* initialize options with defaults */
+ hsb->s_uid = current_uid();
+ hsb->s_gid = current_gid();
+ hsb->s_file_umask = 0133;
+ hsb->s_dir_umask = 0022;
+ hsb->s_type = cpu_to_be32(0x3f3f3f3f); /* == '????' */
+ hsb->s_creator = cpu_to_be32(0x3f3f3f3f); /* == '????' */
+ hsb->s_quiet = 0;
+ hsb->part = -1;
+ hsb->session = -1;
+ }
+
+ return 0;
}
static struct file_system_type hfs_fs_type = {
.owner = THIS_MODULE,
.name = "hfs",
- .mount = hfs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = hfs_init_fs_context,
};
MODULE_ALIAS_FS("hfs");
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 2875961fdc10..ef54fc8093cf 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -13,7 +13,8 @@
/* dentry case-handling: just lowercase everything */
-static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags)
+static int hfs_revalidate_dentry(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
int diff;
@@ -28,9 +29,13 @@ static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags)
/* fix up inode on a timezone change */
diff = sys_tz.tz_minuteswest * 60 - HFS_I(inode)->tz_secondswest;
if (diff) {
- inode->i_ctime.tv_sec += diff;
- inode->i_atime.tv_sec += diff;
- inode->i_mtime.tv_sec += diff;
+ struct timespec64 ts = inode_get_ctime(inode);
+
+ inode_set_ctime(inode, ts.tv_sec + diff, ts.tv_nsec);
+ ts = inode_get_atime(inode);
+ inode_set_atime(inode, ts.tv_sec + diff, ts.tv_nsec);
+ ts = inode_get_mtime(inode);
+ inode_set_mtime(inode, ts.tv_sec + diff, ts.tv_nsec);
HFS_I(inode)->tz_secondswest += diff;
}
return 1;
diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c
index 39f5e343bf4d..fdb0edb8a607 100644
--- a/fs/hfs/trans.c
+++ b/fs/hfs/trans.c
@@ -109,7 +109,7 @@ void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, const struct qstr
if (nls_io) {
wchar_t ch;
- while (srclen > 0) {
+ while (srclen > 0 && dstlen > 0) {
size = nls_io->char2uni(src, srclen, &ch);
if (size < 0) {
ch = '?';
diff --git a/fs/hfsplus/.kunitconfig b/fs/hfsplus/.kunitconfig
new file mode 100644
index 000000000000..6c96dc7e872c
--- /dev/null
+++ b/fs/hfsplus/.kunitconfig
@@ -0,0 +1,8 @@
+CONFIG_KUNIT=y
+CONFIG_HFSPLUS_FS=y
+CONFIG_HFSPLUS_KUNIT_TEST=y
+CONFIG_BLOCK=y
+CONFIG_BUFFER_HEAD=y
+CONFIG_NLS=y
+CONFIG_NLS_UTF8=y
+CONFIG_LEGACY_DIRECT_IO=y
diff --git a/fs/hfsplus/Kconfig b/fs/hfsplus/Kconfig
index 7d4229aecec0..ca8401cb6954 100644
--- a/fs/hfsplus/Kconfig
+++ b/fs/hfsplus/Kconfig
@@ -2,8 +2,10 @@
config HFSPLUS_FS
tristate "Apple Extended HFS file system support"
depends on BLOCK
+ select BUFFER_HEAD
select NLS
select NLS_UTF8
+ select LEGACY_DIRECT_IO
help
If you say Y here, you will be able to mount extended format
Macintosh-formatted hard drive partitions with full read-write access.
@@ -12,3 +14,18 @@ config HFSPLUS_FS
MacOS 8. It includes all Mac specific filesystem data such as
data forks and creator codes, but it also has several UNIX
style features such as file ownership and permissions.
+
+config HFSPLUS_KUNIT_TEST
+ tristate "KUnit tests for HFS+ filesystem" if !KUNIT_ALL_TESTS
+ depends on HFSPLUS_FS && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds KUnit tests for the HFS+ filesystem.
+
+ KUnit tests run during boot and output the results to the debug
+ log in TAP format (https://testanything.org/). Only useful for
+ kernel devs running KUnit test harness and are not for inclusion
+ into a production build.
+
+ For more information on KUnit and unit tests in general please
+ refer to the KUnit documentation in Documentation/dev-tools/kunit/.
diff --git a/fs/hfsplus/Makefile b/fs/hfsplus/Makefile
index 9ed20e64b983..f2a9ae697e81 100644
--- a/fs/hfsplus/Makefile
+++ b/fs/hfsplus/Makefile
@@ -8,3 +8,6 @@ obj-$(CONFIG_HFSPLUS_FS) += hfsplus.o
hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \
bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o \
attributes.o xattr.o xattr_user.o xattr_security.o xattr_trusted.o
+
+# KUnit tests
+obj-$(CONFIG_HFSPLUS_KUNIT_TEST) += unicode_test.o
diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
index eeebe80c6be4..ba26980cc503 100644
--- a/fs/hfsplus/attributes.c
+++ b/fs/hfsplus/attributes.c
@@ -139,7 +139,7 @@ int hfsplus_find_attr(struct super_block *sb, u32 cnid,
{
int err = 0;
- hfs_dbg(ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid);
+ hfs_dbg("name %s, cnid %d\n", name ? name : NULL, cnid);
if (!HFSPLUS_SB(sb)->attr_tree) {
pr_err("attributes file doesn't exist\n");
@@ -201,7 +201,7 @@ int hfsplus_create_attr(struct inode *inode,
int entry_size;
int err;
- hfs_dbg(ATTR_MOD, "create_attr: %s,%ld\n",
+ hfs_dbg("name %s, ino %ld\n",
name ? name : NULL, inode->i_ino);
if (!HFSPLUS_SB(sb)->attr_tree) {
@@ -310,7 +310,7 @@ int hfsplus_delete_attr(struct inode *inode, const char *name)
struct super_block *sb = inode->i_sb;
struct hfs_find_data fd;
- hfs_dbg(ATTR_MOD, "delete_attr: %s,%ld\n",
+ hfs_dbg("name %s, ino %ld\n",
name ? name : NULL, inode->i_ino);
if (!HFSPLUS_SB(sb)->attr_tree) {
@@ -356,7 +356,7 @@ int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid)
int err = 0;
struct hfs_find_data fd;
- hfs_dbg(ATTR_MOD, "delete_all_attrs: %d\n", cnid);
+ hfs_dbg("cnid %d\n", cnid);
if (!HFSPLUS_SB(dir->i_sb)->attr_tree) {
pr_err("attributes file doesn't exist\n");
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
index ca2ba8c9f82e..336d654861c5 100644
--- a/fs/hfsplus/bfind.c
+++ b/fs/hfsplus/bfind.c
@@ -18,26 +18,15 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
fd->tree = tree;
fd->bnode = NULL;
- ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
+ ptr = kzalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
fd->search_key = ptr;
fd->key = ptr + tree->max_key_len + 2;
- hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
+ hfs_dbg("cnid %d, caller %ps\n",
tree->cnid, __builtin_return_address(0));
- switch (tree->cnid) {
- case HFSPLUS_CAT_CNID:
- mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
- break;
- case HFSPLUS_EXT_CNID:
- mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
- break;
- case HFSPLUS_ATTR_CNID:
- mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
- break;
- default:
- BUG();
- }
+ mutex_lock_nested(&tree->tree_lock,
+ hfsplus_btree_lock_class(tree));
return 0;
}
@@ -45,7 +34,7 @@ void hfs_find_exit(struct hfs_find_data *fd)
{
hfs_bnode_put(fd->bnode);
kfree(fd->search_key);
- hfs_dbg(BNODE_REFS, "find_exit: %d (%p)\n",
+ hfs_dbg("cnid %d, caller %ps\n",
fd->tree->cnid, __builtin_return_address(0));
mutex_unlock(&fd->tree->tree_lock);
fd->tree = NULL;
@@ -169,6 +158,12 @@ int hfs_brec_find(struct hfs_find_data *fd, search_strategy_t do_key_compare)
__be32 data;
int height, res;
+ fd->record = -1;
+ fd->keyoffset = -1;
+ fd->keylength = -1;
+ fd->entryoffset = -1;
+ fd->entrylength = -1;
+
tree = fd->tree;
if (fd->bnode)
hfs_bnode_put(fd->bnode);
@@ -215,7 +210,7 @@ release:
return res;
}
-int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len)
+int hfs_brec_read(struct hfs_find_data *fd, void *rec, u32 rec_len)
{
int res;
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index cebce0cfe340..1b3af8c87cad 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -31,7 +31,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
if (!len)
return size;
- hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
+ hfs_dbg("size %u, offset %u, len %u\n", size, offset, len);
mutex_lock(&sbi->alloc_mutex);
mapping = sbi->alloc_file->i_mapping;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
@@ -39,7 +39,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
start = size;
goto out;
}
- pptr = kmap(page);
+ pptr = kmap_local_page(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
i = offset % 32;
offset &= ~(PAGE_CACHE_BITS - 1);
@@ -74,7 +74,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
}
curr++;
}
- kunmap(page);
+ kunmap_local(pptr);
offset += PAGE_CACHE_BITS;
if (offset >= size)
break;
@@ -84,20 +84,20 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
start = size;
goto out;
}
- curr = pptr = kmap(page);
+ curr = pptr = kmap_local_page(page);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
else
end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
}
- hfs_dbg(BITMAP, "bitmap full\n");
+ hfs_dbg("bitmap full\n");
start = size;
goto out;
found:
start = offset + (curr - pptr) * 32 + i;
if (start >= size) {
- hfs_dbg(BITMAP, "bitmap full\n");
+ hfs_dbg("bitmap full\n");
goto out;
}
/* do any partial u32 at the start */
@@ -127,7 +127,7 @@ found:
len -= 32;
}
set_page_dirty(page);
- kunmap(page);
+ kunmap_local(pptr);
offset += PAGE_CACHE_BITS;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
NULL);
@@ -135,7 +135,7 @@ found:
start = size;
goto out;
}
- pptr = kmap(page);
+ pptr = kmap_local_page(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
}
@@ -151,11 +151,11 @@ last:
done:
*curr = cpu_to_be32(n);
set_page_dirty(page);
- kunmap(page);
+ kunmap_local(pptr);
*max = offset + (curr - pptr) * 32 + i - start;
sbi->free_blocks -= *max;
hfsplus_mark_mdb_dirty(sb);
- hfs_dbg(BITMAP, "-> %u,%u\n", start, *max);
+ hfs_dbg("start %u, max %u\n", start, *max);
out:
mutex_unlock(&sbi->alloc_mutex);
return start;
@@ -174,7 +174,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
if (!count)
return 0;
- hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count);
+ hfs_dbg("offset %u, count %u\n", offset, count);
/* are all of the bits in range? */
if ((offset + count) > sbi->total_blocks)
return -ENOENT;
@@ -185,7 +185,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
page = read_mapping_page(mapping, pnr, NULL);
if (IS_ERR(page))
goto kaboom;
- pptr = kmap(page);
+ pptr = kmap_local_page(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
end = pptr + PAGE_CACHE_BITS / 32;
len = count;
@@ -215,11 +215,11 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
if (!count)
break;
set_page_dirty(page);
- kunmap(page);
+ kunmap_local(pptr);
page = read_mapping_page(mapping, ++pnr, NULL);
if (IS_ERR(page))
goto kaboom;
- pptr = kmap(page);
+ pptr = kmap_local_page(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
}
@@ -231,7 +231,7 @@ done:
}
out:
set_page_dirty(page);
- kunmap(page);
+ kunmap_local(pptr);
sbi->free_blocks += len;
hfsplus_mark_mdb_dirty(sb);
mutex_unlock(&sbi->alloc_mutex);
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 177fae4e6581..191661af9677 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -18,29 +18,42 @@
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
+
/* Copy a specified range of bytes from the raw data of a node */
-void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
+void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len)
{
struct page **pagep;
- int l;
+ u32 l;
+
+ if (!is_bnode_offset_valid(node, off))
+ return;
+
+ if (len == 0) {
+ pr_err("requested zero length: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len);
+ return;
+ }
+
+ len = check_and_correct_requested_length(node, off, len);
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
off &= ~PAGE_MASK;
- l = min_t(int, len, PAGE_SIZE - off);
- memcpy(buf, kmap(*pagep) + off, l);
- kunmap(*pagep);
+ l = min_t(u32, len, PAGE_SIZE - off);
+ memcpy_from_page(buf, *pagep, off, l);
while ((len -= l) != 0) {
buf += l;
- l = min_t(int, len, PAGE_SIZE);
- memcpy(buf, kmap(*++pagep), l);
- kunmap(*pagep);
+ l = min_t(u32, len, PAGE_SIZE);
+ memcpy_from_page(buf, *++pagep, 0, l);
}
}
-u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
+u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off)
{
__be16 data;
/* TODO: optimize later... */
@@ -48,7 +61,7 @@ u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
return be16_to_cpu(data);
}
-u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
+u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off)
{
u8 data;
/* TODO: optimize later... */
@@ -56,10 +69,10 @@ u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
return data;
}
-void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
+void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off)
{
struct hfs_btree *tree;
- int key_len;
+ u32 key_len;
tree = node->tree;
if (node->type == HFS_NODE_LEAF ||
@@ -69,70 +82,104 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
else
key_len = tree->max_key_len + 2;
+ if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) {
+ memset(key, 0, sizeof(hfsplus_btree_key));
+ pr_err("hfsplus: Invalid key length: %u\n", key_len);
+ return;
+ }
+
hfs_bnode_read(node, key, off, key_len);
}
-void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
+void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len)
{
struct page **pagep;
- int l;
+ u32 l;
+
+ if (!is_bnode_offset_valid(node, off))
+ return;
+
+ if (len == 0) {
+ pr_err("requested zero length: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len);
+ return;
+ }
+
+ len = check_and_correct_requested_length(node, off, len);
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
off &= ~PAGE_MASK;
- l = min_t(int, len, PAGE_SIZE - off);
- memcpy(kmap(*pagep) + off, buf, l);
+ l = min_t(u32, len, PAGE_SIZE - off);
+ memcpy_to_page(*pagep, off, buf, l);
set_page_dirty(*pagep);
- kunmap(*pagep);
while ((len -= l) != 0) {
buf += l;
- l = min_t(int, len, PAGE_SIZE);
- memcpy(kmap(*++pagep), buf, l);
+ l = min_t(u32, len, PAGE_SIZE);
+ memcpy_to_page(*++pagep, 0, buf, l);
set_page_dirty(*pagep);
- kunmap(*pagep);
}
}
-void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
+void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data)
{
__be16 v = cpu_to_be16(data);
/* TODO: optimize later... */
hfs_bnode_write(node, &v, off, 2);
}
-void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
+void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len)
{
struct page **pagep;
- int l;
+ u32 l;
+
+ if (!is_bnode_offset_valid(node, off))
+ return;
+
+ if (len == 0) {
+ pr_err("requested zero length: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len);
+ return;
+ }
+
+ len = check_and_correct_requested_length(node, off, len);
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
off &= ~PAGE_MASK;
- l = min_t(int, len, PAGE_SIZE - off);
- memset(kmap(*pagep) + off, 0, l);
+ l = min_t(u32, len, PAGE_SIZE - off);
+ memzero_page(*pagep, off, l);
set_page_dirty(*pagep);
- kunmap(*pagep);
while ((len -= l) != 0) {
- l = min_t(int, len, PAGE_SIZE);
- memset(kmap(*++pagep), 0, l);
+ l = min_t(u32, len, PAGE_SIZE);
+ memzero_page(*++pagep, 0, l);
set_page_dirty(*pagep);
- kunmap(*pagep);
}
}
-void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
- struct hfs_bnode *src_node, int src, int len)
+void hfs_bnode_copy(struct hfs_bnode *dst_node, u32 dst,
+ struct hfs_bnode *src_node, u32 src, u32 len)
{
struct page **src_page, **dst_page;
- int l;
+ u32 l;
- hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
+ hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
if (!len)
return;
+
+ len = check_and_correct_requested_length(src_node, src, len);
+ len = check_and_correct_requested_length(dst_node, dst, len);
+
src += src_node->page_offset;
dst += dst_node->page_offset;
src_page = src_node->page + (src >> PAGE_SHIFT);
@@ -141,25 +188,21 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
dst &= ~PAGE_MASK;
if (src == dst) {
- l = min_t(int, len, PAGE_SIZE - src);
- memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
- kunmap(*src_page);
+ l = min_t(u32, len, PAGE_SIZE - src);
+ memcpy_page(*dst_page, src, *src_page, src, l);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
while ((len -= l) != 0) {
- l = min_t(int, len, PAGE_SIZE);
- memcpy(kmap(*++dst_page), kmap(*++src_page), l);
- kunmap(*src_page);
+ l = min_t(u32, len, PAGE_SIZE);
+ memcpy_page(*++dst_page, 0, *++src_page, 0, l);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
}
} else {
void *src_ptr, *dst_ptr;
do {
- src_ptr = kmap(*src_page) + src;
- dst_ptr = kmap(*dst_page) + dst;
+ dst_ptr = kmap_local_page(*dst_page) + dst;
+ src_ptr = kmap_local_page(*src_page) + src;
if (PAGE_SIZE - src < PAGE_SIZE - dst) {
l = PAGE_SIZE - src;
src = 0;
@@ -171,9 +214,9 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
}
l = min(len, l);
memcpy(dst_ptr, src_ptr, l);
- kunmap(*src_page);
+ kunmap_local(src_ptr);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_local(dst_ptr);
if (!dst)
dst_page++;
else
@@ -182,14 +225,19 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
}
}
-void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
+void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len)
{
struct page **src_page, **dst_page;
- int l;
+ void *src_ptr, *dst_ptr;
+ u32 l;
- hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
+ hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
if (!len)
return;
+
+ len = check_and_correct_requested_length(node, src, len);
+ len = check_and_correct_requested_length(node, dst, len);
+
src += node->page_offset;
dst += node->page_offset;
if (dst > src) {
@@ -202,27 +250,28 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
if (src == dst) {
while (src < len) {
- memmove(kmap(*dst_page), kmap(*src_page), src);
- kunmap(*src_page);
+ dst_ptr = kmap_local_page(*dst_page);
+ src_ptr = kmap_local_page(*src_page);
+ memmove(dst_ptr, src_ptr, src);
+ kunmap_local(src_ptr);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_local(dst_ptr);
len -= src;
src = PAGE_SIZE;
src_page--;
dst_page--;
}
src -= len;
- memmove(kmap(*dst_page) + src,
- kmap(*src_page) + src, len);
- kunmap(*src_page);
+ dst_ptr = kmap_local_page(*dst_page);
+ src_ptr = kmap_local_page(*src_page);
+ memmove(dst_ptr + src, src_ptr + src, len);
+ kunmap_local(src_ptr);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_local(dst_ptr);
} else {
- void *src_ptr, *dst_ptr;
-
do {
- src_ptr = kmap(*src_page) + src;
- dst_ptr = kmap(*dst_page) + dst;
+ dst_ptr = kmap_local_page(*dst_page) + dst;
+ src_ptr = kmap_local_page(*src_page) + src;
if (src < dst) {
l = src;
src = PAGE_SIZE;
@@ -234,9 +283,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
}
l = min(len, l);
memmove(dst_ptr - l, src_ptr - l, l);
- kunmap(*src_page);
+ kunmap_local(src_ptr);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_local(dst_ptr);
if (dst == PAGE_SIZE)
dst_page--;
else
@@ -250,27 +299,28 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
dst &= ~PAGE_MASK;
if (src == dst) {
- l = min_t(int, len, PAGE_SIZE - src);
- memmove(kmap(*dst_page) + src,
- kmap(*src_page) + src, l);
- kunmap(*src_page);
+ l = min_t(u32, len, PAGE_SIZE - src);
+
+ dst_ptr = kmap_local_page(*dst_page) + src;
+ src_ptr = kmap_local_page(*src_page) + src;
+ memmove(dst_ptr, src_ptr, l);
+ kunmap_local(src_ptr);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_local(dst_ptr);
while ((len -= l) != 0) {
- l = min_t(int, len, PAGE_SIZE);
- memmove(kmap(*++dst_page),
- kmap(*++src_page), l);
- kunmap(*src_page);
+ l = min_t(u32, len, PAGE_SIZE);
+ dst_ptr = kmap_local_page(*++dst_page);
+ src_ptr = kmap_local_page(*++src_page);
+ memmove(dst_ptr, src_ptr, l);
+ kunmap_local(src_ptr);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_local(dst_ptr);
}
} else {
- void *src_ptr, *dst_ptr;
-
do {
- src_ptr = kmap(*src_page) + src;
- dst_ptr = kmap(*dst_page) + dst;
+ dst_ptr = kmap_local_page(*dst_page) + dst;
+ src_ptr = kmap_local_page(*src_page) + src;
if (PAGE_SIZE - src <
PAGE_SIZE - dst) {
l = PAGE_SIZE - src;
@@ -283,9 +333,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
}
l = min(len, l);
memmove(dst_ptr, src_ptr, l);
- kunmap(*src_page);
+ kunmap_local(src_ptr);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_local(dst_ptr);
if (!dst)
dst_page++;
else
@@ -301,16 +351,16 @@ void hfs_bnode_dump(struct hfs_bnode *node)
__be32 cnid;
int i, off, key_off;
- hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
+ hfs_dbg("node %d\n", node->this);
hfs_bnode_read(node, &desc, 0, sizeof(desc));
- hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
+ hfs_dbg("next %d, prev %d, type %d, height %d, num_recs %d\n",
be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
desc.type, desc.height, be16_to_cpu(desc.num_recs));
off = node->tree->node_size - 2;
for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
key_off = hfs_bnode_read_u16(node, off);
- hfs_dbg(BNODE_MOD, " %d", key_off);
+ hfs_dbg(" key_off %d", key_off);
if (i && node->type == HFS_NODE_INDEX) {
int tmp;
@@ -319,17 +369,17 @@ void hfs_bnode_dump(struct hfs_bnode *node)
tmp = hfs_bnode_read_u16(node, key_off) + 2;
else
tmp = node->tree->max_key_len + 2;
- hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
+ hfs_dbg(" (%d", tmp);
hfs_bnode_read(node, &cnid, key_off + tmp, 4);
- hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
+ hfs_dbg(", cnid %d)", be32_to_cpu(cnid));
} else if (i && node->type == HFS_NODE_LEAF) {
int tmp;
tmp = hfs_bnode_read_u16(node, key_off);
- hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
+ hfs_dbg(" (%d)", tmp);
}
}
- hfs_dbg_cont(BNODE_MOD, "\n");
+ hfs_dbg("\n");
}
void hfs_bnode_unlink(struct hfs_bnode *node)
@@ -365,7 +415,7 @@ void hfs_bnode_unlink(struct hfs_bnode *node)
/* move down? */
if (!node->prev && !node->next)
- hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
+ hfs_dbg("btree delete level\n");
if (!node->parent) {
tree->root = 0;
tree->depth = 0;
@@ -420,7 +470,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
node->this = cnid;
set_bit(HFS_BNODE_NEW, &node->flags);
atomic_set(&node->refcnt, 1);
- hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
+ hfs_dbg("cnid %d, node %d, refcnt 1\n",
node->tree->cnid, node->this);
init_waitqueue_head(&node->lock_wq);
spin_lock(&tree->hash_lock);
@@ -431,6 +481,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
tree->node_hash[hash] = node;
tree->node_hash_cnt++;
} else {
+ hfs_bnode_get(node2);
spin_unlock(&tree->hash_lock);
kfree(node);
wait_event(node2->lock_wq,
@@ -447,10 +498,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
page = read_mapping_page(mapping, block, NULL);
if (IS_ERR(page))
goto fail;
- if (PageError(page)) {
- put_page(page);
- goto fail;
- }
node->page[i] = page;
}
@@ -464,7 +511,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
{
struct hfs_bnode **p;
- hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
+ hfs_dbg("cnid %d, node %d, refcnt %d\n",
node->tree->cnid, node->this, atomic_read(&node->refcnt));
for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
*p && *p != node; p = &(*p)->next_hash)
@@ -502,14 +549,14 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
if (!test_bit(HFS_BNODE_NEW, &node->flags))
return node;
- desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
- node->page_offset);
+ desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
+ node->page_offset);
node->prev = be32_to_cpu(desc->prev);
node->next = be32_to_cpu(desc->next);
node->num_recs = be16_to_cpu(desc->num_recs);
node->type = desc->type;
node->height = desc->height;
- kunmap(node->page[0]);
+ kunmap_local(desc);
switch (node->type) {
case HFS_NODE_HEADER:
@@ -593,14 +640,12 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
}
pagep = node->page;
- memset(kmap(*pagep) + node->page_offset, 0,
- min_t(int, PAGE_SIZE, tree->node_size));
+ memzero_page(*pagep, node->page_offset,
+ min_t(int, PAGE_SIZE, tree->node_size));
set_page_dirty(*pagep);
- kunmap(*pagep);
for (i = 1; i < tree->pages_per_bnode; i++) {
- memset(kmap(*++pagep), 0, PAGE_SIZE);
+ memzero_page(*++pagep, 0, PAGE_SIZE);
set_page_dirty(*pagep);
- kunmap(*pagep);
}
clear_bit(HFS_BNODE_NEW, &node->flags);
wake_up(&node->lock_wq);
@@ -612,7 +657,7 @@ void hfs_bnode_get(struct hfs_bnode *node)
{
if (node) {
atomic_inc(&node->refcnt);
- hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
+ hfs_dbg("cnid %d, node %d, refcnt %d\n",
node->tree->cnid, node->this,
atomic_read(&node->refcnt));
}
@@ -625,7 +670,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
struct hfs_btree *tree = node->tree;
int i;
- hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
+ hfs_dbg("cnid %d, node %d, refcnt %d\n",
node->tree->cnid, node->this,
atomic_read(&node->refcnt));
BUG_ON(!atomic_read(&node->refcnt));
@@ -660,6 +705,5 @@ bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
- return tree->cnid == HFSPLUS_CAT_CNID &&
- volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
+ return volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
}
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 1918544a7871..6796c1a80e99 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -60,7 +60,7 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
return retval;
}
-int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
+int hfs_brec_insert(struct hfs_find_data *fd, void *entry, u32 entry_len)
{
struct hfs_btree *tree;
struct hfs_bnode *node, *new_node;
@@ -92,7 +92,7 @@ again:
end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
end_off = hfs_bnode_read_u16(node, end_rec_off);
end_rec_off -= 2;
- hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n",
+ hfs_dbg("rec %d, size %d, end_off %d, end_rec_off %d\n",
rec, size, end_off, end_rec_off);
if (size > end_rec_off - end_off) {
if (new_node)
@@ -193,7 +193,7 @@ again:
mark_inode_dirty(tree->inode);
}
hfs_bnode_dump(node);
- hfs_dbg(BNODE_MOD, "remove_rec: %d, %d\n",
+ hfs_dbg("rec %d, len %d\n",
fd->record, fd->keylength + fd->entrylength);
if (!--node->num_recs) {
hfs_bnode_unlink(node);
@@ -246,7 +246,7 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
if (IS_ERR(new_node))
return new_node;
hfs_bnode_get(node);
- hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n",
+ hfs_dbg("this %d - new %d - next %d\n",
node->this, new_node->this, node->next);
new_node->next = node->next;
new_node->prev = node->this;
@@ -383,7 +383,7 @@ again:
newkeylen = hfs_bnode_read_u16(node, 14) + 2;
else
fd->keylength = newkeylen = tree->max_key_len + 2;
- hfs_dbg(BNODE_MOD, "update_rec: %d, %d, %d\n",
+ hfs_dbg("rec %d, keylength %d, newkeylen %d\n",
rec, fd->keylength, newkeylen);
rec_off = tree->node_size - (rec + 2) * 2;
@@ -395,7 +395,7 @@ again:
end_off = hfs_bnode_read_u16(parent, end_rec_off);
if (end_rec_off - end_off < diff) {
- hfs_dbg(BNODE_MOD, "splitting index node\n");
+ hfs_dbg("splitting index node\n");
fd->bnode = parent;
new_node = hfs_bnode_split(fd);
if (IS_ERR(new_node))
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 66774f4cb4fd..229f25dc7c49 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -163,7 +163,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
goto free_inode;
/* Load the header */
- head = (struct hfs_btree_header_rec *)(kmap(page) +
+ head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
sizeof(struct hfs_bnode_desc));
tree->root = be32_to_cpu(head->root);
tree->leaf_count = be32_to_cpu(head->leaf_count);
@@ -240,11 +240,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
(tree->node_size + PAGE_SIZE - 1) >>
PAGE_SHIFT;
- kunmap(page);
+ kunmap_local(head);
put_page(page);
return tree;
fail_page:
+ kunmap_local(head);
put_page(page);
free_inode:
tree->inode->i_mapping->a_ops = &hfsplus_aops;
@@ -291,7 +292,7 @@ int hfs_btree_write(struct hfs_btree *tree)
return -EIO;
/* Load the header */
page = node->page[0];
- head = (struct hfs_btree_header_rec *)(kmap(page) +
+ head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
sizeof(struct hfs_bnode_desc));
head->root = cpu_to_be32(tree->root);
@@ -303,7 +304,7 @@ int hfs_btree_write(struct hfs_btree *tree)
head->attributes = cpu_to_be32(tree->attributes);
head->depth = cpu_to_be16(tree->depth);
- kunmap(page);
+ kunmap_local(head);
set_page_dirty(page);
hfs_bnode_put(node);
return 0;
@@ -343,7 +344,7 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
}
/* Make sure @tree has enough space for the @rsvd_nodes */
-int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
+int hfs_bmap_reserve(struct hfs_btree *tree, u32 rsvd_nodes)
{
struct inode *inode = tree->inode;
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
@@ -392,9 +393,15 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
len = hfs_brec_lenoff(node, 2, &off16);
off = off16;
+ if (!is_bnode_offset_valid(node, off)) {
+ hfs_bnode_put(node);
+ return ERR_PTR(-EIO);
+ }
+ len = check_and_correct_requested_length(node, off, len);
+
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
- data = kmap(*pagep);
+ data = kmap_local_page(*pagep);
off &= ~PAGE_MASK;
idx = 0;
@@ -407,7 +414,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
idx += i;
data[off] |= m;
set_page_dirty(*pagep);
- kunmap(*pagep);
+ kunmap_local(data);
tree->free_nodes--;
mark_inode_dirty(tree->inode);
hfs_bnode_put(node);
@@ -417,17 +424,17 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
}
}
if (++off >= PAGE_SIZE) {
- kunmap(*pagep);
- data = kmap(*++pagep);
+ kunmap_local(data);
+ data = kmap_local_page(*++pagep);
off = 0;
}
idx += 8;
len--;
}
- kunmap(*pagep);
+ kunmap_local(data);
nidx = node->next;
if (!nidx) {
- hfs_dbg(BNODE_MOD, "create new bmap node\n");
+ hfs_dbg("create new bmap node\n");
next_node = hfs_bmap_new_bmap(node, idx);
} else
next_node = hfs_bnode_find(tree, nidx);
@@ -440,7 +447,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
off = off16;
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
- data = kmap(*pagep);
+ data = kmap_local_page(*pagep);
off &= ~PAGE_MASK;
}
}
@@ -453,7 +460,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
u32 nidx;
u8 *data, byte, m;
- hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
+ hfs_dbg("node %u\n", node->this);
BUG_ON(!node->this);
tree = node->tree;
nidx = node->this;
@@ -490,7 +497,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
}
off += node->page_offset + nidx / 8;
page = node->page[off >> PAGE_SHIFT];
- data = kmap(page);
+ data = kmap_local_page(page);
off &= ~PAGE_MASK;
m = 1 << (~nidx & 7);
byte = data[off];
@@ -498,13 +505,13 @@ void hfs_bmap_free(struct hfs_bnode *node)
pr_crit("trying to free free bnode "
"%u(%d)\n",
node->this, node->type);
- kunmap(page);
+ kunmap_local(data);
hfs_bnode_put(node);
return;
}
data[off] = byte & ~m;
set_page_dirty(page);
- kunmap(page);
+ kunmap_local(data);
hfs_bnode_put(node);
tree->free_nodes++;
mark_inode_dirty(tree->inode);
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 35472cba750e..02c1eee4a4b8 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -259,7 +259,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
int entry_size;
int err;
- hfs_dbg(CAT_MOD, "create_cat: %s,%u(%d)\n",
+ hfs_dbg("name %s, cnid %u, i_nlink %d\n",
str->name, cnid, inode->i_nlink);
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (err)
@@ -312,7 +312,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
dir->i_size++;
if (S_ISDIR(inode->i_mode))
hfsplus_subfolders_inc(dir);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
hfs_find_exit(&fd);
@@ -336,7 +336,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
int err, off;
u16 type;
- hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
+ hfs_dbg("name %s, cnid %u\n", str ? str->name : NULL, cnid);
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (err)
return err;
@@ -417,7 +417,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
dir->i_size--;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_dec(dir);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
if (type == HFSPLUS_FILE || type == HFSPLUS_FOLDER) {
@@ -441,7 +441,7 @@ int hfsplus_rename_cat(u32 cnid,
int entry_size, type;
int err;
- hfs_dbg(CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
+ hfs_dbg("cnid %u - ino %lu, name %s - ino %lu, name %s\n",
cnid, src_dir->i_ino, src_name->name,
dst_dir->i_ino, dst_name->name);
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
@@ -494,7 +494,7 @@ int hfsplus_rename_cat(u32 cnid,
dst_dir->i_size++;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_inc(dst_dir);
- dst_dir->i_mtime = dst_dir->i_ctime = current_time(dst_dir);
+ inode_set_mtime_to_ts(dst_dir, inode_set_ctime_current(dst_dir));
/* finally remove the old entry */
err = hfsplus_cat_build_key(sb, src_fd.search_key,
@@ -511,7 +511,7 @@ int hfsplus_rename_cat(u32 cnid,
src_dir->i_size--;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_dec(src_dir);
- src_dir->i_mtime = src_dir->i_ctime = current_time(src_dir);
+ inode_set_mtime_to_ts(src_dir, inode_set_ctime_current(src_dir));
/* remove old thread entry */
hfsplus_cat_build_key_with_cnid(sb, src_fd.search_key, cnid);
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 84714bbccc12..cadf0b5f9342 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -204,7 +204,7 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
fd.entrylength);
type = be16_to_cpu(entry.type);
len = NLS_MAX_CHARSET_SIZE * HFSPLUS_MAX_STRLEN;
- err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len);
+ err = hfsplus_uni2asc_str(sb, &fd.key->cat.name, strbuf, &len);
if (err)
goto out;
if (type == HFSPLUS_FOLDER) {
@@ -346,7 +346,7 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
inc_nlink(inode);
hfsplus_instantiate(dst_dentry, inode, cnid);
ihold(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
sbi->file_count++;
hfsplus_mark_mdb_dirty(dst_dir->i_sb);
@@ -405,7 +405,7 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
hfsplus_delete_inode(inode);
} else
sbi->file_count--;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
out:
mutex_unlock(&sbi->vh_mutex);
@@ -426,7 +426,7 @@ static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry)
if (res)
goto out;
clear_nlink(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
hfsplus_delete_inode(inode);
mark_inode_dirty(inode);
out:
@@ -434,7 +434,7 @@ out:
return res;
}
-static int hfsplus_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+static int hfsplus_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
@@ -476,7 +476,7 @@ out:
return res;
}
-static int hfsplus_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+static int hfsplus_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
@@ -517,19 +517,19 @@ out:
return res;
}
-static int hfsplus_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int hfsplus_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
- return hfsplus_mknod(&init_user_ns, dir, dentry, mode, 0);
+ return hfsplus_mknod(&nop_mnt_idmap, dir, dentry, mode, 0);
}
-static int hfsplus_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hfsplus_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- return hfsplus_mknod(&init_user_ns, dir, dentry, mode | S_IFDIR, 0);
+ return ERR_PTR(hfsplus_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFDIR, 0));
}
-static int hfsplus_rename(struct user_namespace *mnt_userns,
+static int hfsplus_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
@@ -552,8 +552,13 @@ static int hfsplus_rename(struct user_namespace *mnt_userns,
res = hfsplus_rename_cat((u32)(unsigned long)old_dentry->d_fsdata,
old_dir, &old_dentry->d_name,
new_dir, &new_dentry->d_name);
- if (!res)
+ if (!res) {
new_dentry->d_fsdata = old_dentry->d_fsdata;
+
+ res = hfsplus_cat_write_inode(old_dir);
+ if (!res)
+ res = hfsplus_cat_write_inode(new_dir);
+ }
return res;
}
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 7054a542689f..8e886514d27f 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -275,7 +275,7 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
mutex_unlock(&hip->extents_lock);
done:
- hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n",
+ hfs_dbg("ino %lu, iblock %llu - dblock %u\n",
inode->i_ino, (long long)iblock, dblock);
mask = (1 << sbi->fs_shift) - 1;
@@ -298,12 +298,12 @@ static void hfsplus_dump_extent(struct hfsplus_extent *extent)
{
int i;
- hfs_dbg(EXTENT, " ");
+ hfs_dbg("extent ");
for (i = 0; i < 8; i++)
- hfs_dbg_cont(EXTENT, " %u:%u",
- be32_to_cpu(extent[i].start_block),
- be32_to_cpu(extent[i].block_count));
- hfs_dbg_cont(EXTENT, "\n");
+ hfs_dbg(" start_block %u, block_count %u",
+ be32_to_cpu(extent[i].start_block),
+ be32_to_cpu(extent[i].block_count));
+ hfs_dbg("\n");
}
static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
@@ -342,9 +342,6 @@ static int hfsplus_free_extents(struct super_block *sb,
int i;
int err = 0;
- /* Mapping the allocation file may lock the extent tree */
- WARN_ON(mutex_is_locked(&HFSPLUS_SB(sb)->ext_tree->tree_lock));
-
hfsplus_dump_extent(extent);
for (i = 0; i < 8; extent++, i++) {
count = be32_to_cpu(extent->block_count);
@@ -362,8 +359,7 @@ found:
if (count <= block_nr) {
err = hfsplus_block_free(sb, start, count);
if (err) {
- pr_err("can't free extent\n");
- hfs_dbg(EXTENT, " start: %u count: %u\n",
+ pr_err("can't free extent: start %u, count %u\n",
start, count);
}
extent->block_count = 0;
@@ -373,8 +369,7 @@ found:
count -= block_nr;
err = hfsplus_block_free(sb, start + count, block_nr);
if (err) {
- pr_err("can't free extent\n");
- hfs_dbg(EXTENT, " start: %u count: %u\n",
+ pr_err("can't free extent: start %u, count %u\n",
start, count);
}
extent->block_count = cpu_to_be32(count);
@@ -430,7 +425,8 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid,
hfsplus_free_extents(sb, ext_entry, total_blocks - start,
total_blocks);
total_blocks = start;
- mutex_lock(&fd.tree->tree_lock);
+ mutex_lock_nested(&fd.tree->tree_lock,
+ hfsplus_btree_lock_class(fd.tree));
} while (total_blocks > blocks);
hfs_find_exit(&fd);
@@ -448,9 +444,9 @@ int hfsplus_file_extend(struct inode *inode, bool zeroout)
if (sbi->alloc_file->i_size * 8 <
sbi->total_blocks - sbi->free_blocks + 8) {
/* extend alloc file */
- pr_err("extend alloc file! (%llu,%u,%u)\n",
- sbi->alloc_file->i_size * 8,
- sbi->total_blocks, sbi->free_blocks);
+ pr_err_ratelimited("extend alloc file! (%llu,%u,%u)\n",
+ sbi->alloc_file->i_size * 8,
+ sbi->total_blocks, sbi->free_blocks);
return -ENOSPC;
}
@@ -480,11 +476,12 @@ int hfsplus_file_extend(struct inode *inode, bool zeroout)
goto out;
}
- hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
+ hfs_dbg("ino %lu, start %u, len %u\n", inode->i_ino, start, len);
if (hip->alloc_blocks <= hip->first_blocks) {
if (!hip->first_blocks) {
- hfs_dbg(EXTENT, "first extents\n");
+ hfs_dbg("first_extent: start %u, len %u\n",
+ start, len);
/* no extents yet */
hip->first_extents[0].start_block = cpu_to_be32(start);
hip->first_extents[0].block_count = cpu_to_be32(len);
@@ -523,7 +520,7 @@ out:
return res;
insert_extent:
- hfs_dbg(EXTENT, "insert new extent\n");
+ hfs_dbg("insert new extent\n");
res = hfsplus_ext_write_extent_locked(inode);
if (res)
goto out;
@@ -548,21 +545,21 @@ void hfsplus_file_truncate(struct inode *inode)
u32 alloc_cnt, blk_cnt, start;
int res;
- hfs_dbg(INODE, "truncate: %lu, %llu -> %llu\n",
+ hfs_dbg("ino %lu, phys_size %llu -> i_size %llu\n",
inode->i_ino, (long long)hip->phys_size, inode->i_size);
if (inode->i_size > hip->phys_size) {
struct address_space *mapping = inode->i_mapping;
- struct page *page;
- void *fsdata;
+ struct folio *folio;
+ void *fsdata = NULL;
loff_t size = inode->i_size;
- res = pagecache_write_begin(NULL, mapping, size, 0, 0,
- &page, &fsdata);
+ res = hfsplus_write_begin(NULL, mapping, size, 0,
+ &folio, &fsdata);
if (res)
return;
- res = pagecache_write_end(NULL, mapping, size,
- 0, 0, page, fsdata);
+ res = generic_write_end(NULL, mapping, size, 0, 0,
+ folio, fsdata);
if (res < 0)
return;
mark_inode_dirty(inode);
@@ -592,7 +589,8 @@ void hfsplus_file_truncate(struct inode *inode)
alloc_cnt, alloc_cnt - blk_cnt);
hfsplus_dump_extent(hip->first_extents);
hip->first_blocks = blk_cnt;
- mutex_lock(&fd.tree->tree_lock);
+ mutex_lock_nested(&fd.tree->tree_lock,
+ hfsplus_btree_lock_class(fd.tree));
break;
}
res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
@@ -606,7 +604,8 @@ void hfsplus_file_truncate(struct inode *inode)
hfsplus_free_extents(sb, hip->cached_extents,
alloc_cnt - start, alloc_cnt - blk_cnt);
hfsplus_dump_extent(hip->cached_extents);
- mutex_lock(&fd.tree->tree_lock);
+ mutex_lock_nested(&fd.tree->tree_lock,
+ hfsplus_btree_lock_class(fd.tree));
if (blk_cnt > start) {
hip->extent_state |= HFSPLUS_EXT_DIRTY;
break;
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 1798949f269b..45fe3a12ecba 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -11,46 +11,13 @@
#ifndef _LINUX_HFSPLUS_FS_H
#define _LINUX_HFSPLUS_FS_H
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
+#include <linux/fs_context.h>
#include "hfsplus_raw.h"
-#define DBG_BNODE_REFS 0x00000001
-#define DBG_BNODE_MOD 0x00000002
-#define DBG_CAT_MOD 0x00000004
-#define DBG_INODE 0x00000008
-#define DBG_SUPER 0x00000010
-#define DBG_EXTENT 0x00000020
-#define DBG_BITMAP 0x00000040
-#define DBG_ATTR_MOD 0x00000080
-
-#if 0
-#define DBG_MASK (DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD)
-#define DBG_MASK (DBG_BNODE_MOD|DBG_CAT_MOD|DBG_INODE)
-#define DBG_MASK (DBG_CAT_MOD|DBG_BNODE_REFS|DBG_INODE|DBG_EXTENT)
-#endif
-#define DBG_MASK (0)
-
-#define hfs_dbg(flg, fmt, ...) \
-do { \
- if (DBG_##flg & DBG_MASK) \
- printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
-} while (0)
-
-#define hfs_dbg_cont(flg, fmt, ...) \
-do { \
- if (DBG_##flg & DBG_MASK) \
- pr_cont(fmt, ##__VA_ARGS__); \
-} while (0)
-
/* Runtime config options */
#define HFSPLUS_DEF_CR_TYPE 0x3F3F3F3F /* '????' */
@@ -156,6 +123,7 @@ struct hfsplus_sb_info {
/* Runtime variables */
u32 blockoffset;
+ u32 min_io_size;
sector_t part_start;
sector_t sect_count;
int fs_shift;
@@ -190,6 +158,7 @@ struct hfsplus_sb_info {
int work_queued; /* non-zero delayed work is queued */
struct delayed_work sync_work; /* FS sync delayed work */
spinlock_t work_lock; /* protects sync_work and work_queued */
+ struct rcu_head rcu;
};
#define HFSPLUS_SB_WRITEBACKUP 0
@@ -198,6 +167,8 @@ struct hfsplus_sb_info {
#define HFSPLUS_SB_HFSX 3
#define HFSPLUS_SB_CASEFOLD 4
#define HFSPLUS_SB_NOBARRIER 5
+#define HFSPLUS_SB_UID 6
+#define HFSPLUS_SB_GID 7
static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb)
{
@@ -304,7 +275,7 @@ struct hfsplus_readdir_data {
*/
static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
{
- return max_t(unsigned short, bdev_logical_block_size(sb->s_bdev),
+ return max_t(unsigned short, HFSPLUS_SB(sb)->min_io_size,
HFSPLUS_SECTOR_SIZE);
}
@@ -385,21 +356,21 @@ u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors,
struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id);
void hfs_btree_close(struct hfs_btree *tree);
int hfs_btree_write(struct hfs_btree *tree);
-int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes);
+int hfs_bmap_reserve(struct hfs_btree *tree, u32 rsvd_nodes);
struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree);
void hfs_bmap_free(struct hfs_bnode *node);
/* bnode.c */
-void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len);
-u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off);
-u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off);
-void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off);
-void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len);
-void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data);
-void hfs_bnode_clear(struct hfs_bnode *node, int off, int len);
-void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
- struct hfs_bnode *src_node, int src, int len);
-void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len);
+void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len);
+u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off);
+u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off);
+void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off);
+void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len);
+void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data);
+void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len);
+void hfs_bnode_copy(struct hfs_bnode *dst_node, u32 dst,
+ struct hfs_bnode *src_node, u32 src, u32 len);
+void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len);
void hfs_bnode_dump(struct hfs_bnode *node);
void hfs_bnode_unlink(struct hfs_bnode *node);
struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid);
@@ -414,7 +385,7 @@ bool hfs_bnode_need_zeroout(struct hfs_btree *tree);
/* brec.c */
u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off);
u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec);
-int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len);
+int hfs_brec_insert(struct hfs_find_data *fd, void *entry, u32 entry_len);
int hfs_brec_remove(struct hfs_find_data *fd);
/* bfind.c */
@@ -427,7 +398,7 @@ int hfs_find_rec_by_key(struct hfs_bnode *bnode, struct hfs_find_data *fd,
int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd,
search_strategy_t rec_found);
int hfs_brec_find(struct hfs_find_data *fd, search_strategy_t do_key_compare);
-int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len);
+int hfs_brec_read(struct hfs_find_data *fd, void *rec, u32 rec_len);
int hfs_brec_goto(struct hfs_find_data *fd, int cnt);
/* catalog.c */
@@ -468,6 +439,10 @@ extern const struct address_space_operations hfsplus_aops;
extern const struct address_space_operations hfsplus_btree_aops;
extern const struct dentry_operations hfsplus_dentry_operations;
+int hfsplus_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, struct folio **foliop,
+ void **fsdata);
struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
umode_t mode);
void hfsplus_delete_inode(struct inode *inode);
@@ -477,22 +452,21 @@ void hfsplus_inode_write_fork(struct inode *inode,
struct hfsplus_fork_raw *fork);
int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd);
int hfsplus_cat_write_inode(struct inode *inode);
-int hfsplus_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int hfsplus_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask,
unsigned int query_flags);
int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync);
-int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa);
-int hfsplus_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa);
+int hfsplus_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
+int hfsplus_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa);
/* ioctl.c */
long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
/* options.c */
void hfsplus_fill_defaults(struct hfsplus_sb_info *opts);
-int hfsplus_parse_options_remount(char *input, int *force);
-int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi);
+int hfsplus_parse_param(struct fs_context *fc, struct fs_parameter *param);
int hfsplus_show_options(struct seq_file *seq, struct dentry *root);
/* part_tbl.c */
@@ -502,6 +476,8 @@ int hfs_part_find(struct super_block *sb, sector_t *part_start,
/* super.c */
struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino);
void hfsplus_mark_mdb_dirty(struct super_block *sb);
+void hfsplus_prepare_volume_header_for_commit(struct hfsplus_vh *vhdr);
+int hfsplus_commit_superblock(struct super_block *sb);
/* tables.c */
extern u16 hfsplus_case_fold_table[];
@@ -513,8 +489,12 @@ int hfsplus_strcasecmp(const struct hfsplus_unistr *s1,
const struct hfsplus_unistr *s2);
int hfsplus_strcmp(const struct hfsplus_unistr *s1,
const struct hfsplus_unistr *s2);
-int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr,
- char *astr, int *len_p);
+int hfsplus_uni2asc_str(struct super_block *sb,
+ const struct hfsplus_unistr *ustr, char *astr,
+ int *len_p);
+int hfsplus_uni2asc_xattr_str(struct super_block *sb,
+ const struct hfsplus_attr_unistr *ustr,
+ char *astr, int *len_p);
int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
int max_unistr_len, const char *astr, int len);
int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str);
@@ -523,7 +503,7 @@ int hfsplus_compare_dentry(const struct dentry *dentry, unsigned int len,
/* wrapper.c */
int hfsplus_submit_bio(struct super_block *sb, sector_t sector, void *buf,
- void **data, int op, int op_flags);
+ void **data, blk_opf_t opf);
int hfsplus_read_wrapper(struct super_block *sb);
/*
@@ -548,6 +528,69 @@ static inline __be32 __hfsp_ut2mt(time64_t ut)
return cpu_to_be32(lower_32_bits(ut) + HFSPLUS_UTC_OFFSET);
}
+static inline enum hfsplus_btree_mutex_classes
+hfsplus_btree_lock_class(struct hfs_btree *tree)
+{
+ enum hfsplus_btree_mutex_classes class;
+
+ switch (tree->cnid) {
+ case HFSPLUS_CAT_CNID:
+ class = CATALOG_BTREE_MUTEX;
+ break;
+ case HFSPLUS_EXT_CNID:
+ class = EXTENTS_BTREE_MUTEX;
+ break;
+ case HFSPLUS_ATTR_CNID:
+ class = ATTR_BTREE_MUTEX;
+ break;
+ default:
+ BUG();
+ }
+ return class;
+}
+
+static inline
+bool is_bnode_offset_valid(struct hfs_bnode *node, u32 off)
+{
+ bool is_valid = off < node->tree->node_size;
+
+ if (!is_valid) {
+ pr_err("requested invalid offset: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off);
+ }
+
+ return is_valid;
+}
+
+static inline
+u32 check_and_correct_requested_length(struct hfs_bnode *node, u32 off, u32 len)
+{
+ unsigned int node_size;
+
+ if (!is_bnode_offset_valid(node, off))
+ return 0;
+
+ node_size = node->tree->node_size;
+
+ if ((off + len) > node_size) {
+ u32 new_len = node_size - off;
+
+ pr_err("requested length has been corrected: "
+ "NODE: id %u, type %#x, height %u, "
+ "node_size %u, offset %u, "
+ "requested_len %u, corrected_len %u\n",
+ node->this, node->type, node->height,
+ node->tree->node_size, off, len, new_len);
+
+ return new_len;
+ }
+
+ return len;
+}
+
/* compatibility */
#define hfsp_mt2ut(t) (struct timespec64){ .tv_sec = __hfsp_mt2ut(t) }
#define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec)
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index 456e87aec7fd..83b5dbde924b 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -15,394 +15,6 @@
#define _LINUX_HFSPLUS_RAW_H
#include <linux/types.h>
-
-/* Some constants */
-#define HFSPLUS_SECTOR_SIZE 512
-#define HFSPLUS_SECTOR_SHIFT 9
-#define HFSPLUS_VOLHEAD_SECTOR 2
-#define HFSPLUS_VOLHEAD_SIG 0x482b
-#define HFSPLUS_VOLHEAD_SIGX 0x4858
-#define HFSPLUS_SUPER_MAGIC 0x482b
-#define HFSPLUS_MIN_VERSION 4
-#define HFSPLUS_CURRENT_VERSION 5
-
-#define HFSP_WRAP_MAGIC 0x4244
-#define HFSP_WRAP_ATTRIB_SLOCK 0x8000
-#define HFSP_WRAP_ATTRIB_SPARED 0x0200
-
-#define HFSP_WRAPOFF_SIG 0x00
-#define HFSP_WRAPOFF_ATTRIB 0x0A
-#define HFSP_WRAPOFF_ABLKSIZE 0x14
-#define HFSP_WRAPOFF_ABLKSTART 0x1C
-#define HFSP_WRAPOFF_EMBEDSIG 0x7C
-#define HFSP_WRAPOFF_EMBEDEXT 0x7E
-
-#define HFSP_HIDDENDIR_NAME \
- "\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80HFS+ Private Data"
-
-#define HFSP_HARDLINK_TYPE 0x686c6e6b /* 'hlnk' */
-#define HFSP_HFSPLUS_CREATOR 0x6866732b /* 'hfs+' */
-
-#define HFSP_SYMLINK_TYPE 0x736c6e6b /* 'slnk' */
-#define HFSP_SYMLINK_CREATOR 0x72686170 /* 'rhap' */
-
-#define HFSP_MOUNT_VERSION 0x482b4c78 /* 'H+Lx' */
-
-/* Structures used on disk */
-
-typedef __be32 hfsplus_cnid;
-typedef __be16 hfsplus_unichr;
-
-#define HFSPLUS_MAX_STRLEN 255
-#define HFSPLUS_ATTR_MAX_STRLEN 127
-
-/* A "string" as used in filenames, etc. */
-struct hfsplus_unistr {
- __be16 length;
- hfsplus_unichr unicode[HFSPLUS_MAX_STRLEN];
-} __packed;
-
-/*
- * A "string" is used in attributes file
- * for name of extended attribute
- */
-struct hfsplus_attr_unistr {
- __be16 length;
- hfsplus_unichr unicode[HFSPLUS_ATTR_MAX_STRLEN];
-} __packed;
-
-/* POSIX permissions */
-struct hfsplus_perm {
- __be32 owner;
- __be32 group;
- u8 rootflags;
- u8 userflags;
- __be16 mode;
- __be32 dev;
-} __packed;
-
-#define HFSPLUS_FLG_NODUMP 0x01
-#define HFSPLUS_FLG_IMMUTABLE 0x02
-#define HFSPLUS_FLG_APPEND 0x04
-
-/* A single contiguous area of a file */
-struct hfsplus_extent {
- __be32 start_block;
- __be32 block_count;
-} __packed;
-typedef struct hfsplus_extent hfsplus_extent_rec[8];
-
-/* Information for a "Fork" in a file */
-struct hfsplus_fork_raw {
- __be64 total_size;
- __be32 clump_size;
- __be32 total_blocks;
- hfsplus_extent_rec extents;
-} __packed;
-
-/* HFS+ Volume Header */
-struct hfsplus_vh {
- __be16 signature;
- __be16 version;
- __be32 attributes;
- __be32 last_mount_vers;
- u32 reserved;
-
- __be32 create_date;
- __be32 modify_date;
- __be32 backup_date;
- __be32 checked_date;
-
- __be32 file_count;
- __be32 folder_count;
-
- __be32 blocksize;
- __be32 total_blocks;
- __be32 free_blocks;
-
- __be32 next_alloc;
- __be32 rsrc_clump_sz;
- __be32 data_clump_sz;
- hfsplus_cnid next_cnid;
-
- __be32 write_count;
- __be64 encodings_bmp;
-
- u32 finder_info[8];
-
- struct hfsplus_fork_raw alloc_file;
- struct hfsplus_fork_raw ext_file;
- struct hfsplus_fork_raw cat_file;
- struct hfsplus_fork_raw attr_file;
- struct hfsplus_fork_raw start_file;
-} __packed;
-
-/* HFS+ volume attributes */
-#define HFSPLUS_VOL_UNMNT (1 << 8)
-#define HFSPLUS_VOL_SPARE_BLK (1 << 9)
-#define HFSPLUS_VOL_NOCACHE (1 << 10)
-#define HFSPLUS_VOL_INCNSTNT (1 << 11)
-#define HFSPLUS_VOL_NODEID_REUSED (1 << 12)
-#define HFSPLUS_VOL_JOURNALED (1 << 13)
-#define HFSPLUS_VOL_SOFTLOCK (1 << 15)
-#define HFSPLUS_VOL_UNUSED_NODE_FIX (1 << 31)
-
-/* HFS+ BTree node descriptor */
-struct hfs_bnode_desc {
- __be32 next;
- __be32 prev;
- s8 type;
- u8 height;
- __be16 num_recs;
- u16 reserved;
-} __packed;
-
-/* HFS+ BTree node types */
-#define HFS_NODE_INDEX 0x00 /* An internal (index) node */
-#define HFS_NODE_HEADER 0x01 /* The tree header node (node 0) */
-#define HFS_NODE_MAP 0x02 /* Holds part of the bitmap of used nodes */
-#define HFS_NODE_LEAF 0xFF /* A leaf (ndNHeight==1) node */
-
-/* HFS+ BTree header */
-struct hfs_btree_header_rec {
- __be16 depth;
- __be32 root;
- __be32 leaf_count;
- __be32 leaf_head;
- __be32 leaf_tail;
- __be16 node_size;
- __be16 max_key_len;
- __be32 node_count;
- __be32 free_nodes;
- u16 reserved1;
- __be32 clump_size;
- u8 btree_type;
- u8 key_type;
- __be32 attributes;
- u32 reserved3[16];
-} __packed;
-
-/* BTree attributes */
-#define HFS_TREE_BIGKEYS 2
-#define HFS_TREE_VARIDXKEYS 4
-
-/* HFS+ BTree misc info */
-#define HFSPLUS_TREE_HEAD 0
-#define HFSPLUS_NODE_MXSZ 32768
-#define HFSPLUS_ATTR_TREE_NODE_SIZE 8192
-#define HFSPLUS_BTREE_HDR_NODE_RECS_COUNT 3
-#define HFSPLUS_BTREE_HDR_USER_BYTES 128
-
-/* Some special File ID numbers (stolen from hfs.h) */
-#define HFSPLUS_POR_CNID 1 /* Parent Of the Root */
-#define HFSPLUS_ROOT_CNID 2 /* ROOT directory */
-#define HFSPLUS_EXT_CNID 3 /* EXTents B-tree */
-#define HFSPLUS_CAT_CNID 4 /* CATalog B-tree */
-#define HFSPLUS_BAD_CNID 5 /* BAD blocks file */
-#define HFSPLUS_ALLOC_CNID 6 /* ALLOCation file */
-#define HFSPLUS_START_CNID 7 /* STARTup file */
-#define HFSPLUS_ATTR_CNID 8 /* ATTRibutes file */
-#define HFSPLUS_EXCH_CNID 15 /* ExchangeFiles temp id */
-#define HFSPLUS_FIRSTUSER_CNID 16 /* first available user id */
-
-/* btree key type */
-#define HFSPLUS_KEY_CASEFOLDING 0xCF /* case-insensitive */
-#define HFSPLUS_KEY_BINARY 0xBC /* case-sensitive */
-
-/* HFS+ catalog entry key */
-struct hfsplus_cat_key {
- __be16 key_len;
- hfsplus_cnid parent;
- struct hfsplus_unistr name;
-} __packed;
-
-#define HFSPLUS_CAT_KEYLEN (sizeof(struct hfsplus_cat_key))
-
-/* Structs from hfs.h */
-struct hfsp_point {
- __be16 v;
- __be16 h;
-} __packed;
-
-struct hfsp_rect {
- __be16 top;
- __be16 left;
- __be16 bottom;
- __be16 right;
-} __packed;
-
-
-/* HFS directory info (stolen from hfs.h */
-struct DInfo {
- struct hfsp_rect frRect;
- __be16 frFlags;
- struct hfsp_point frLocation;
- __be16 frView;
-} __packed;
-
-struct DXInfo {
- struct hfsp_point frScroll;
- __be32 frOpenChain;
- __be16 frUnused;
- __be16 frComment;
- __be32 frPutAway;
-} __packed;
-
-/* HFS+ folder data (part of an hfsplus_cat_entry) */
-struct hfsplus_cat_folder {
- __be16 type;
- __be16 flags;
- __be32 valence;
- hfsplus_cnid id;
- __be32 create_date;
- __be32 content_mod_date;
- __be32 attribute_mod_date;
- __be32 access_date;
- __be32 backup_date;
- struct hfsplus_perm permissions;
- struct DInfo user_info;
- struct DXInfo finder_info;
- __be32 text_encoding;
- __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */
-} __packed;
-
-/* HFS file info (stolen from hfs.h) */
-struct FInfo {
- __be32 fdType;
- __be32 fdCreator;
- __be16 fdFlags;
- struct hfsp_point fdLocation;
- __be16 fdFldr;
-} __packed;
-
-struct FXInfo {
- __be16 fdIconID;
- u8 fdUnused[8];
- __be16 fdComment;
- __be32 fdPutAway;
-} __packed;
-
-/* HFS+ file data (part of a cat_entry) */
-struct hfsplus_cat_file {
- __be16 type;
- __be16 flags;
- u32 reserved1;
- hfsplus_cnid id;
- __be32 create_date;
- __be32 content_mod_date;
- __be32 attribute_mod_date;
- __be32 access_date;
- __be32 backup_date;
- struct hfsplus_perm permissions;
- struct FInfo user_info;
- struct FXInfo finder_info;
- __be32 text_encoding;
- u32 reserved2;
-
- struct hfsplus_fork_raw data_fork;
- struct hfsplus_fork_raw rsrc_fork;
-} __packed;
-
-/* File and folder flag bits */
-#define HFSPLUS_FILE_LOCKED 0x0001
-#define HFSPLUS_FILE_THREAD_EXISTS 0x0002
-#define HFSPLUS_XATTR_EXISTS 0x0004
-#define HFSPLUS_ACL_EXISTS 0x0008
-#define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count
- * (HFSX only) */
-
-/* HFS+ catalog thread (part of a cat_entry) */
-struct hfsplus_cat_thread {
- __be16 type;
- s16 reserved;
- hfsplus_cnid parentID;
- struct hfsplus_unistr nodeName;
-} __packed;
-
-#define HFSPLUS_MIN_THREAD_SZ 10
-
-/* A data record in the catalog tree */
-typedef union {
- __be16 type;
- struct hfsplus_cat_folder folder;
- struct hfsplus_cat_file file;
- struct hfsplus_cat_thread thread;
-} __packed hfsplus_cat_entry;
-
-/* HFS+ catalog entry type */
-#define HFSPLUS_FOLDER 0x0001
-#define HFSPLUS_FILE 0x0002
-#define HFSPLUS_FOLDER_THREAD 0x0003
-#define HFSPLUS_FILE_THREAD 0x0004
-
-/* HFS+ extents tree key */
-struct hfsplus_ext_key {
- __be16 key_len;
- u8 fork_type;
- u8 pad;
- hfsplus_cnid cnid;
- __be32 start_block;
-} __packed;
-
-#define HFSPLUS_EXT_KEYLEN sizeof(struct hfsplus_ext_key)
-
-#define HFSPLUS_XATTR_FINDER_INFO_NAME "com.apple.FinderInfo"
-#define HFSPLUS_XATTR_ACL_NAME "com.apple.system.Security"
-
-#define HFSPLUS_ATTR_INLINE_DATA 0x10
-#define HFSPLUS_ATTR_FORK_DATA 0x20
-#define HFSPLUS_ATTR_EXTENTS 0x30
-
-/* HFS+ attributes tree key */
-struct hfsplus_attr_key {
- __be16 key_len;
- __be16 pad;
- hfsplus_cnid cnid;
- __be32 start_block;
- struct hfsplus_attr_unistr key_name;
-} __packed;
-
-#define HFSPLUS_ATTR_KEYLEN sizeof(struct hfsplus_attr_key)
-
-/* HFS+ fork data attribute */
-struct hfsplus_attr_fork_data {
- __be32 record_type;
- __be32 reserved;
- struct hfsplus_fork_raw the_fork;
-} __packed;
-
-/* HFS+ extension attribute */
-struct hfsplus_attr_extents {
- __be32 record_type;
- __be32 reserved;
- struct hfsplus_extent extents;
-} __packed;
-
-#define HFSPLUS_MAX_INLINE_DATA_SIZE 3802
-
-/* HFS+ attribute inline data */
-struct hfsplus_attr_inline_data {
- __be32 record_type;
- __be32 reserved1;
- u8 reserved2[6];
- __be16 length;
- u8 raw_bytes[HFSPLUS_MAX_INLINE_DATA_SIZE];
-} __packed;
-
-/* A data record in the attributes tree */
-typedef union {
- __be32 record_type;
- struct hfsplus_attr_fork_data fork_data;
- struct hfsplus_attr_extents extents;
- struct hfsplus_attr_inline_data inline_data;
-} __packed hfsplus_attr_entry;
-
-/* HFS+ generic BTree key */
-typedef union {
- __be16 key_len;
- struct hfsplus_cat_key cat;
- struct hfsplus_ext_key ext;
- struct hfsplus_attr_key attr;
-} __packed hfsplus_btree_key;
+#include <linux/hfs_common.h>
#endif
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index d08a8d1d40a4..7ae6745ca7ae 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -23,14 +23,9 @@
#include "hfsplus_raw.h"
#include "xattr.h"
-static int hfsplus_readpage(struct file *file, struct page *page)
+static int hfsplus_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, hfsplus_get_block);
-}
-
-static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
-{
- return block_write_full_page(page, hfsplus_get_block, wbc);
+ return block_read_full_folio(folio, hfsplus_get_block);
}
static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
@@ -43,14 +38,14 @@ static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
}
}
-static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+int hfsplus_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping, loff_t pos,
+ unsigned len, struct folio **foliop,
+ void **fsdata)
{
int ret;
- *pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
hfsplus_get_block,
&HFSPLUS_I(mapping->host)->phys_size);
if (unlikely(ret))
@@ -64,14 +59,15 @@ static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, hfsplus_get_block);
}
-static int hfsplus_releasepage(struct page *page, gfp_t mask)
+static bool hfsplus_release_folio(struct folio *folio, gfp_t mask)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct hfs_btree *tree;
struct hfs_bnode *node;
u32 nidx;
- int i, res = 1;
+ int i;
+ bool res = true;
switch (inode->i_ino) {
case HFSPLUS_EXT_CNID:
@@ -85,26 +81,26 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
break;
default:
BUG();
- return 0;
+ return false;
}
if (!tree)
- return 0;
+ return false;
if (tree->node_size >= PAGE_SIZE) {
- nidx = page->index >>
+ nidx = folio->index >>
(tree->node_size_shift - PAGE_SHIFT);
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, nidx);
if (!node)
;
else if (atomic_read(&node->refcnt))
- res = 0;
+ res = false;
if (res && node) {
hfs_bnode_unhash(node);
hfs_bnode_free(node);
}
spin_unlock(&tree->hash_lock);
} else {
- nidx = page->index <<
+ nidx = folio->index <<
(PAGE_SHIFT - tree->node_size_shift);
i = 1 << (PAGE_SHIFT - tree->node_size_shift);
spin_lock(&tree->hash_lock);
@@ -113,7 +109,7 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
if (!node)
continue;
if (atomic_read(&node->refcnt)) {
- res = 0;
+ res = false;
break;
}
hfs_bnode_unhash(node);
@@ -121,7 +117,7 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
} while (--i && nidx < tree->node_count);
spin_unlock(&tree->hash_lock);
}
- return res ? try_to_free_buffers(page) : 0;
+ return res ? try_to_free_buffers(folio) : false;
}
static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
@@ -156,24 +152,27 @@ static int hfsplus_writepages(struct address_space *mapping,
}
const struct address_space_operations hfsplus_btree_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = hfsplus_readpage,
- .writepage = hfsplus_writepage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = hfsplus_read_folio,
+ .writepages = hfsplus_writepages,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
+ .migrate_folio = buffer_migrate_folio,
.bmap = hfsplus_bmap,
- .releasepage = hfsplus_releasepage,
+ .release_folio = hfsplus_release_folio,
};
const struct address_space_operations hfsplus_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = hfsplus_readpage,
- .writepage = hfsplus_writepage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = hfsplus_read_folio,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
.bmap = hfsplus_bmap,
.direct_IO = hfsplus_direct_IO,
.writepages = hfsplus_writepages,
+ .migrate_folio = buffer_migrate_folio,
};
const struct dentry_operations hfsplus_dentry_operations = {
@@ -181,20 +180,36 @@ const struct dentry_operations hfsplus_dentry_operations = {
.d_compare = hfsplus_compare_dentry,
};
-static void hfsplus_get_perms(struct inode *inode,
- struct hfsplus_perm *perms, int dir)
+static int hfsplus_get_perms(struct inode *inode,
+ struct hfsplus_perm *perms, int dir)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
u16 mode;
mode = be16_to_cpu(perms->mode);
+ if (dir) {
+ if (mode && !S_ISDIR(mode))
+ goto bad_type;
+ } else if (mode) {
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ case S_IFLNK:
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
+ break;
+ default:
+ goto bad_type;
+ }
+ }
i_uid_write(inode, be32_to_cpu(perms->owner));
- if (!i_uid_read(inode) && !mode)
+ if ((test_bit(HFSPLUS_SB_UID, &sbi->flags)) || (!i_uid_read(inode) && !mode))
inode->i_uid = sbi->uid;
i_gid_write(inode, be32_to_cpu(perms->group));
- if (!i_gid_read(inode) && !mode)
+ if ((test_bit(HFSPLUS_SB_GID, &sbi->flags)) || (!i_gid_read(inode) && !mode))
inode->i_gid = sbi->gid;
if (dir) {
@@ -213,6 +228,10 @@ static void hfsplus_get_perms(struct inode *inode,
inode->i_flags |= S_APPEND;
else
inode->i_flags &= ~S_APPEND;
+ return 0;
+bad_type:
+ pr_err("invalid file type 0%04o for inode %lu\n", mode, inode->i_ino);
+ return -EIO;
}
static int hfsplus_file_open(struct inode *inode, struct file *file)
@@ -244,13 +263,13 @@ static int hfsplus_file_release(struct inode *inode, struct file *file)
return 0;
}
-static int hfsplus_setattr(struct user_namespace *mnt_userns,
+static int hfsplus_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error;
- error = setattr_prepare(&init_user_ns, dentry, attr);
+ error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (error)
return error;
@@ -265,16 +284,16 @@ static int hfsplus_setattr(struct user_namespace *mnt_userns,
}
truncate_setsize(inode, attr->ia_size);
hfsplus_file_truncate(inode);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
}
- setattr_copy(&init_user_ns, inode, attr);
+ setattr_copy(&nop_mnt_idmap, inode, attr);
mark_inode_dirty(inode);
return 0;
}
-int hfsplus_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int hfsplus_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask,
unsigned int query_flags)
{
@@ -296,7 +315,7 @@ int hfsplus_getattr(struct user_namespace *mnt_userns, const struct path *path,
stat->attributes_mask |= STATX_ATTR_APPEND | STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP;
- generic_fillattr(&init_user_ns, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
return 0;
}
@@ -306,6 +325,7 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
struct inode *inode = file->f_mapping->host;
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
+ struct hfsplus_vh *vhdr = sbi->s_vhdr;
int error = 0, error2;
error = file_write_and_wait_range(file, start, end);
@@ -349,6 +369,14 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
error = error2;
}
+ mutex_lock(&sbi->vh_mutex);
+ hfsplus_prepare_volume_header_for_commit(vhdr);
+ mutex_unlock(&sbi->vh_mutex);
+
+ error2 = hfsplus_commit_superblock(inode->i_sb);
+ if (!error)
+ error = error2;
+
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
blkdev_issue_flush(inode->i_sb->s_bdev);
@@ -369,8 +397,9 @@ static const struct file_operations hfsplus_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
- .splice_read = generic_file_splice_read,
+ .mmap_prepare = generic_file_mmap_prepare,
+ .splice_read = filemap_splice_read,
+ .splice_write = iter_file_splice_write,
.fsync = hfsplus_file_fsync,
.open = hfsplus_file_open,
.release = hfsplus_file_release,
@@ -388,9 +417,9 @@ struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
return NULL;
inode->i_ino = sbi->next_cnid++;
- inode_init_owner(&init_user_ns, inode, dir, mode);
+ inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
set_nlink(inode, 1);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
hip = HFSPLUS_I(inode);
INIT_LIST_HEAD(&hip->open_dir_list);
@@ -509,15 +538,23 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
if (type == HFSPLUS_FOLDER) {
struct hfsplus_cat_folder *folder = &entry.folder;
- WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_folder));
+ if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) {
+ pr_err("bad catalog folder entry\n");
+ res = -EIO;
+ goto out;
+ }
hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
sizeof(struct hfsplus_cat_folder));
- hfsplus_get_perms(inode, &folder->permissions, 1);
+ res = hfsplus_get_perms(inode, &folder->permissions, 1);
+ if (res)
+ goto out;
set_nlink(inode, 1);
inode->i_size = 2 + be32_to_cpu(folder->valence);
- inode->i_atime = hfsp_mt2ut(folder->access_date);
- inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
- inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
+ inode_set_atime_to_ts(inode, hfsp_mt2ut(folder->access_date));
+ inode_set_mtime_to_ts(inode,
+ hfsp_mt2ut(folder->content_mod_date));
+ inode_set_ctime_to_ts(inode,
+ hfsp_mt2ut(folder->attribute_mod_date));
HFSPLUS_I(inode)->create_date = folder->create_date;
HFSPLUS_I(inode)->fs_blocks = 0;
if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
@@ -529,13 +566,19 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
} else if (type == HFSPLUS_FILE) {
struct hfsplus_cat_file *file = &entry.file;
- WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_file));
+ if (fd->entrylength < sizeof(struct hfsplus_cat_file)) {
+ pr_err("bad catalog file entry\n");
+ res = -EIO;
+ goto out;
+ }
hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
sizeof(struct hfsplus_cat_file));
hfsplus_inode_read_fork(inode, HFSPLUS_IS_RSRC(inode) ?
&file->rsrc_fork : &file->data_fork);
- hfsplus_get_perms(inode, &file->permissions, 0);
+ res = hfsplus_get_perms(inode, &file->permissions, 0);
+ if (res)
+ goto out;
set_nlink(inode, 1);
if (S_ISREG(inode->i_mode)) {
if (file->permissions.dev)
@@ -552,14 +595,17 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
init_special_inode(inode, inode->i_mode,
be32_to_cpu(file->permissions.dev));
}
- inode->i_atime = hfsp_mt2ut(file->access_date);
- inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
- inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
+ inode_set_atime_to_ts(inode, hfsp_mt2ut(file->access_date));
+ inode_set_mtime_to_ts(inode,
+ hfsp_mt2ut(file->content_mod_date));
+ inode_set_ctime_to_ts(inode,
+ hfsp_mt2ut(file->attribute_mod_date));
HFSPLUS_I(inode)->create_date = file->create_date;
} else {
pr_err("bad catalog entry used to create inode\n");
res = -EIO;
}
+out:
return res;
}
@@ -568,6 +614,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
struct inode *main_inode = inode;
struct hfs_find_data fd;
hfsplus_cat_entry entry;
+ int res = 0;
if (HFSPLUS_IS_RSRC(inode))
main_inode = HFSPLUS_I(inode)->rsrc_inode;
@@ -586,14 +633,18 @@ int hfsplus_cat_write_inode(struct inode *inode)
if (S_ISDIR(main_inode->i_mode)) {
struct hfsplus_cat_folder *folder = &entry.folder;
- WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_folder));
+ if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
+ pr_err("bad catalog folder entry\n");
+ res = -EIO;
+ goto out;
+ }
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_folder));
/* simple node checks? */
hfsplus_cat_set_perms(inode, &folder->permissions);
- folder->access_date = hfsp_ut2mt(inode->i_atime);
- folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
- folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
+ folder->access_date = hfsp_ut2mt(inode_get_atime(inode));
+ folder->content_mod_date = hfsp_ut2mt(inode_get_mtime(inode));
+ folder->attribute_mod_date = hfsp_ut2mt(inode_get_ctime(inode));
folder->valence = cpu_to_be32(inode->i_size - 2);
if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
folder->subfolders =
@@ -611,7 +662,11 @@ int hfsplus_cat_write_inode(struct inode *inode)
} else {
struct hfsplus_cat_file *file = &entry.file;
- WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_file));
+ if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
+ pr_err("bad catalog file entry\n");
+ res = -EIO;
+ goto out;
+ }
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
hfsplus_inode_write_fork(inode, &file->data_fork);
@@ -622,9 +677,9 @@ int hfsplus_cat_write_inode(struct inode *inode)
file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
else
file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
- file->access_date = hfsp_ut2mt(inode->i_atime);
- file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
- file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
+ file->access_date = hfsp_ut2mt(inode_get_atime(inode));
+ file->content_mod_date = hfsp_ut2mt(inode_get_mtime(inode));
+ file->attribute_mod_date = hfsp_ut2mt(inode_get_ctime(inode));
hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
}
@@ -632,10 +687,10 @@ int hfsplus_cat_write_inode(struct inode *inode)
set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
out:
hfs_find_exit(&fd);
- return 0;
+ return res;
}
-int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int hfsplus_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
@@ -653,8 +708,8 @@ int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa)
return 0;
}
-int hfsplus_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa)
+int hfsplus_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
@@ -680,7 +735,7 @@ int hfsplus_fileattr_set(struct user_namespace *mnt_userns,
else
hip->userflags &= ~HFSPLUS_FLG_NODUMP;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return 0;
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index 5661a2e24d03..40d04dba13ac 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -40,7 +40,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
/* Directory containing the bootable system */
vh->finder_info[0] = bvh->finder_info[0] =
- cpu_to_be32(parent_ino(dentry));
+ cpu_to_be32(d_parent_ino(dentry));
/*
* Bootloader. Just using the inode here breaks in the case of
@@ -51,7 +51,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
/* Per spec, the OS X system folder - same as finder_info[0] here */
vh->finder_info[5] = bvh->finder_info[5] =
- cpu_to_be32(parent_ino(dentry));
+ cpu_to_be32(d_parent_ino(dentry));
mutex_unlock(&sbi->vh_mutex);
return 0;
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index 047e05c57560..9b377481f397 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -12,7 +12,9 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/parser.h>
+#include <linux/fs_struct.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/nls.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
@@ -23,26 +25,23 @@ enum {
opt_creator, opt_type,
opt_umask, opt_uid, opt_gid,
opt_part, opt_session, opt_nls,
- opt_nodecompose, opt_decompose,
- opt_barrier, opt_nobarrier,
- opt_force, opt_err
+ opt_decompose, opt_barrier,
+ opt_force,
};
-static const match_table_t tokens = {
- { opt_creator, "creator=%s" },
- { opt_type, "type=%s" },
- { opt_umask, "umask=%o" },
- { opt_uid, "uid=%u" },
- { opt_gid, "gid=%u" },
- { opt_part, "part=%u" },
- { opt_session, "session=%u" },
- { opt_nls, "nls=%s" },
- { opt_decompose, "decompose" },
- { opt_nodecompose, "nodecompose" },
- { opt_barrier, "barrier" },
- { opt_nobarrier, "nobarrier" },
- { opt_force, "force" },
- { opt_err, NULL }
+static const struct fs_parameter_spec hfs_param_spec[] = {
+ fsparam_string ("creator", opt_creator),
+ fsparam_string ("type", opt_type),
+ fsparam_u32oct ("umask", opt_umask),
+ fsparam_u32 ("uid", opt_uid),
+ fsparam_u32 ("gid", opt_gid),
+ fsparam_u32 ("part", opt_part),
+ fsparam_u32 ("session", opt_session),
+ fsparam_string ("nls", opt_nls),
+ fsparam_flag_no ("decompose", opt_decompose),
+ fsparam_flag_no ("barrier", opt_barrier),
+ fsparam_flag ("force", opt_force),
+ {}
};
/* Initialize an options object to reasonable defaults */
@@ -60,158 +59,89 @@ void hfsplus_fill_defaults(struct hfsplus_sb_info *opts)
opts->session = -1;
}
-/* convert a "four byte character" to a 32 bit int with error checks */
-static inline int match_fourchar(substring_t *arg, u32 *result)
+/* Parse options from mount. Returns nonzero errno on failure */
+int hfsplus_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- if (arg->to - arg->from != 4)
- return -EINVAL;
- memcpy(result, arg->from, 4);
- return 0;
-}
-
-int hfsplus_parse_options_remount(char *input, int *force)
-{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int token;
-
- if (!input)
- return 1;
-
- while ((p = strsep(&input, ",")) != NULL) {
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case opt_force:
- *force = 1;
- break;
- default:
- break;
+ struct hfsplus_sb_info *sbi = fc->s_fs_info;
+ struct fs_parse_result result;
+ int opt;
+
+ /*
+ * Only the force option is examined during remount, all others
+ * are ignored.
+ */
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE &&
+ strncmp(param->key, "force", 5))
+ return 0;
+
+ opt = fs_parse(fc, hfs_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case opt_creator:
+ if (strlen(param->string) != 4) {
+ pr_err("creator requires a 4 character value\n");
+ return -EINVAL;
}
- }
-
- return 1;
-}
-
-/* Parse options from mount. Returns 0 on failure */
-/* input is the options passed to mount() as a string */
-int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
-{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int tmp, token;
-
- if (!input)
- goto done;
-
- while ((p = strsep(&input, ",")) != NULL) {
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case opt_creator:
- if (match_fourchar(&args[0], &sbi->creator)) {
- pr_err("creator requires a 4 character value\n");
- return 0;
- }
- break;
- case opt_type:
- if (match_fourchar(&args[0], &sbi->type)) {
- pr_err("type requires a 4 character value\n");
- return 0;
- }
- break;
- case opt_umask:
- if (match_octal(&args[0], &tmp)) {
- pr_err("umask requires a value\n");
- return 0;
- }
- sbi->umask = (umode_t)tmp;
- break;
- case opt_uid:
- if (match_int(&args[0], &tmp)) {
- pr_err("uid requires an argument\n");
- return 0;
- }
- sbi->uid = make_kuid(current_user_ns(), (uid_t)tmp);
- if (!uid_valid(sbi->uid)) {
- pr_err("invalid uid specified\n");
- return 0;
- }
- break;
- case opt_gid:
- if (match_int(&args[0], &tmp)) {
- pr_err("gid requires an argument\n");
- return 0;
- }
- sbi->gid = make_kgid(current_user_ns(), (gid_t)tmp);
- if (!gid_valid(sbi->gid)) {
- pr_err("invalid gid specified\n");
- return 0;
- }
- break;
- case opt_part:
- if (match_int(&args[0], &sbi->part)) {
- pr_err("part requires an argument\n");
- return 0;
- }
- break;
- case opt_session:
- if (match_int(&args[0], &sbi->session)) {
- pr_err("session requires an argument\n");
- return 0;
- }
- break;
- case opt_nls:
- if (sbi->nls) {
- pr_err("unable to change nls mapping\n");
- return 0;
- }
- p = match_strdup(&args[0]);
- if (p)
- sbi->nls = load_nls(p);
- if (!sbi->nls) {
- pr_err("unable to load nls mapping \"%s\"\n",
- p);
- kfree(p);
- return 0;
- }
- kfree(p);
- break;
- case opt_decompose:
- clear_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags);
- break;
- case opt_nodecompose:
+ memcpy(&sbi->creator, param->string, 4);
+ break;
+ case opt_type:
+ if (strlen(param->string) != 4) {
+ pr_err("type requires a 4 character value\n");
+ return -EINVAL;
+ }
+ memcpy(&sbi->type, param->string, 4);
+ break;
+ case opt_umask:
+ sbi->umask = (umode_t)result.uint_32;
+ break;
+ case opt_uid:
+ sbi->uid = result.uid;
+ set_bit(HFSPLUS_SB_UID, &sbi->flags);
+ break;
+ case opt_gid:
+ sbi->gid = result.gid;
+ set_bit(HFSPLUS_SB_GID, &sbi->flags);
+ break;
+ case opt_part:
+ sbi->part = result.uint_32;
+ break;
+ case opt_session:
+ sbi->session = result.uint_32;
+ break;
+ case opt_nls:
+ if (sbi->nls) {
+ pr_err("unable to change nls mapping\n");
+ return -EINVAL;
+ }
+ sbi->nls = load_nls(param->string);
+ if (!sbi->nls) {
+ pr_err("unable to load nls mapping \"%s\"\n",
+ param->string);
+ return -EINVAL;
+ }
+ break;
+ case opt_decompose:
+ if (result.negated)
set_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags);
- break;
- case opt_barrier:
- clear_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags);
- break;
- case opt_nobarrier:
+ else
+ clear_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags);
+ break;
+ case opt_barrier:
+ if (result.negated)
set_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags);
- break;
- case opt_force:
- set_bit(HFSPLUS_SB_FORCE, &sbi->flags);
- break;
- default:
- return 0;
- }
- }
-
-done:
- if (!sbi->nls) {
- /* try utf8 first, as this is the old default behaviour */
- sbi->nls = load_nls("utf8");
- if (!sbi->nls)
- sbi->nls = load_nls_default();
- if (!sbi->nls)
- return 0;
+ else
+ clear_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags);
+ break;
+ case opt_force:
+ set_bit(HFSPLUS_SB_FORCE, &sbi->flags);
+ break;
+ default:
+ return -EINVAL;
}
- return 1;
+ return 0;
}
int hfsplus_show_options(struct seq_file *seq, struct dentry *root)
diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c
index 63164ebc52fa..9ec21664eda6 100644
--- a/fs/hfsplus/part_tbl.c
+++ b/fs/hfsplus/part_tbl.c
@@ -112,8 +112,7 @@ static int hfs_parse_new_pmap(struct super_block *sb, void *buf,
if ((u8 *)pm - (u8 *)buf >= buf_size) {
res = hfsplus_submit_bio(sb,
*part_start + HFS_PMAP_BLK + i,
- buf, (void **)&pm, REQ_OP_READ,
- 0);
+ buf, (void **)&pm, REQ_OP_READ);
if (res)
return res;
}
@@ -137,7 +136,7 @@ int hfs_part_find(struct super_block *sb,
return -ENOMEM;
res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK,
- buf, &data, REQ_OP_READ, 0);
+ buf, &data, REQ_OP_READ);
if (res)
goto out;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index b9e3db3f855f..aaffa9e060a0 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -14,6 +14,7 @@
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/slab.h>
#include <linux/vfs.h>
#include <linux/nls.h>
@@ -64,16 +65,29 @@ struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
- INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
- spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock);
- mutex_init(&HFSPLUS_I(inode)->extents_lock);
- HFSPLUS_I(inode)->flags = 0;
+ atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
+ HFSPLUS_I(inode)->first_blocks = 0;
+ HFSPLUS_I(inode)->clump_blocks = 0;
+ HFSPLUS_I(inode)->alloc_blocks = 0;
+ HFSPLUS_I(inode)->cached_start = U32_MAX;
+ HFSPLUS_I(inode)->cached_blocks = 0;
+ memset(HFSPLUS_I(inode)->first_extents, 0, sizeof(hfsplus_extent_rec));
+ memset(HFSPLUS_I(inode)->cached_extents, 0, sizeof(hfsplus_extent_rec));
HFSPLUS_I(inode)->extent_state = 0;
+ mutex_init(&HFSPLUS_I(inode)->extents_lock);
HFSPLUS_I(inode)->rsrc_inode = NULL;
- atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
+ HFSPLUS_I(inode)->create_date = 0;
+ HFSPLUS_I(inode)->linkid = 0;
+ HFSPLUS_I(inode)->flags = 0;
+ HFSPLUS_I(inode)->fs_blocks = 0;
+ HFSPLUS_I(inode)->userflags = 0;
+ HFSPLUS_I(inode)->subfolders = 0;
+ INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
+ spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock);
+ HFSPLUS_I(inode)->phys_size = 0;
if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
inode->i_ino == HFSPLUS_ROOT_CNID) {
@@ -149,7 +163,7 @@ static int hfsplus_write_inode(struct inode *inode,
{
int err;
- hfs_dbg(INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
+ hfs_dbg("ino %lu\n", inode->i_ino);
err = hfsplus_ext_write_extent(inode);
if (err)
@@ -164,7 +178,7 @@ static int hfsplus_write_inode(struct inode *inode,
static void hfsplus_evict_inode(struct inode *inode)
{
- hfs_dbg(INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
+ hfs_dbg("ino %lu\n", inode->i_ino);
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
if (HFSPLUS_IS_RSRC(inode)) {
@@ -173,17 +187,62 @@ static void hfsplus_evict_inode(struct inode *inode)
}
}
-static int hfsplus_sync_fs(struct super_block *sb, int wait)
+int hfsplus_commit_superblock(struct super_block *sb)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct hfsplus_vh *vhdr = sbi->s_vhdr;
int write_backup = 0;
+ int error = 0, error2;
+
+ hfs_dbg("starting...\n");
+
+ mutex_lock(&sbi->vh_mutex);
+ mutex_lock(&sbi->alloc_mutex);
+ vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
+ vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
+ vhdr->folder_count = cpu_to_be32(sbi->folder_count);
+ vhdr->file_count = cpu_to_be32(sbi->file_count);
+
+ hfs_dbg("free_blocks %u, next_cnid %u, folder_count %u, file_count %u\n",
+ sbi->free_blocks, sbi->next_cnid,
+ sbi->folder_count, sbi->file_count);
+
+ if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
+ memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr));
+ write_backup = 1;
+ }
+
+ error2 = hfsplus_submit_bio(sb,
+ sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
+ sbi->s_vhdr_buf, NULL, REQ_OP_WRITE);
+ if (!error)
+ error = error2;
+ if (!write_backup)
+ goto out;
+
+ error2 = hfsplus_submit_bio(sb,
+ sbi->part_start + sbi->sect_count - 2,
+ sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE);
+ if (!error)
+ error = error2;
+out:
+ mutex_unlock(&sbi->alloc_mutex);
+ mutex_unlock(&sbi->vh_mutex);
+
+ hfs_dbg("finished: err %d\n", error);
+
+ return error;
+}
+
+static int hfsplus_sync_fs(struct super_block *sb, int wait)
+{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
int error, error2;
if (!wait)
return 0;
- hfs_dbg(SUPER, "hfsplus_sync_fs\n");
+ hfs_dbg("starting...\n");
/*
* Explicitly write out the special metadata inodes.
@@ -207,40 +266,15 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
if (!error)
error = error2;
- mutex_lock(&sbi->vh_mutex);
- mutex_lock(&sbi->alloc_mutex);
- vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
- vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
- vhdr->folder_count = cpu_to_be32(sbi->folder_count);
- vhdr->file_count = cpu_to_be32(sbi->file_count);
-
- if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
- memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr));
- write_backup = 1;
- }
-
- error2 = hfsplus_submit_bio(sb,
- sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
- sbi->s_vhdr_buf, NULL, REQ_OP_WRITE,
- REQ_SYNC);
+ error2 = hfsplus_commit_superblock(sb);
if (!error)
error = error2;
- if (!write_backup)
- goto out;
-
- error2 = hfsplus_submit_bio(sb,
- sbi->part_start + sbi->sect_count - 2,
- sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE,
- REQ_SYNC);
- if (!error)
- error2 = error;
-out:
- mutex_unlock(&sbi->alloc_mutex);
- mutex_unlock(&sbi->vh_mutex);
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
blkdev_issue_flush(sb->s_bdev);
+ hfs_dbg("finished: err %d\n", error);
+
return error;
}
@@ -277,11 +311,19 @@ void hfsplus_mark_mdb_dirty(struct super_block *sb)
spin_unlock(&sbi->work_lock);
}
+static void delayed_free(struct rcu_head *p)
+{
+ struct hfsplus_sb_info *sbi = container_of(p, struct hfsplus_sb_info, rcu);
+
+ unload_nls(sbi->nls);
+ kfree(sbi);
+}
+
static void hfsplus_put_super(struct super_block *sb)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
- hfs_dbg(SUPER, "hfsplus_put_super\n");
+ hfs_dbg("starting...\n");
cancel_delayed_work_sync(&sbi->sync_work);
@@ -295,16 +337,16 @@ static void hfsplus_put_super(struct super_block *sb)
hfsplus_sync_fs(sb, 1);
}
+ iput(sbi->alloc_file);
+ iput(sbi->hidden_dir);
hfs_btree_close(sbi->attr_tree);
hfs_btree_close(sbi->cat_tree);
hfs_btree_close(sbi->ext_tree);
- iput(sbi->alloc_file);
- iput(sbi->hidden_dir);
kfree(sbi->s_vhdr_buf);
kfree(sbi->s_backup_vhdr_buf);
- unload_nls(sbi->nls);
- kfree(sb->s_fs_info);
- sb->s_fs_info = NULL;
+ call_rcu(&sbi->rcu, delayed_free);
+
+ hfs_dbg("finished\n");
}
static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -326,34 +368,33 @@ static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
+static int hfsplus_reconfigure(struct fs_context *fc)
{
+ struct super_block *sb = fc->root->d_sb;
+
sync_filesystem(sb);
- if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
+ if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (!(*flags & SB_RDONLY)) {
- struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
- int force = 0;
-
- if (!hfsplus_parse_options_remount(data, &force))
- return -EINVAL;
+ if (!(fc->sb_flags & SB_RDONLY)) {
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+ struct hfsplus_vh *vhdr = sbi->s_vhdr;
if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n");
sb->s_flags |= SB_RDONLY;
- *flags |= SB_RDONLY;
- } else if (force) {
+ fc->sb_flags |= SB_RDONLY;
+ } else if (test_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
/* nothing */
} else if (vhdr->attributes &
cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
pr_warn("filesystem is marked locked, leaving read-only.\n");
sb->s_flags |= SB_RDONLY;
- *flags |= SB_RDONLY;
+ fc->sb_flags |= SB_RDONLY;
} else if (vhdr->attributes &
cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
pr_warn("filesystem is marked journaled, leaving read-only.\n");
sb->s_flags |= SB_RDONLY;
- *flags |= SB_RDONLY;
+ fc->sb_flags |= SB_RDONLY;
}
}
return 0;
@@ -367,38 +408,42 @@ static const struct super_operations hfsplus_sops = {
.put_super = hfsplus_put_super,
.sync_fs = hfsplus_sync_fs,
.statfs = hfsplus_statfs,
- .remount_fs = hfsplus_remount,
.show_options = hfsplus_show_options,
};
-static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
+void hfsplus_prepare_volume_header_for_commit(struct hfsplus_vh *vhdr)
+{
+ vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
+ vhdr->modify_date = hfsp_now2mt();
+ be32_add_cpu(&vhdr->write_count, 1);
+ vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
+ vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
+}
+
+static int hfsplus_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct hfsplus_vh *vhdr;
- struct hfsplus_sb_info *sbi;
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
hfsplus_cat_entry entry;
struct hfs_find_data fd;
struct inode *root, *inode;
struct qstr str;
- struct nls_table *nls = NULL;
+ struct nls_table *nls;
u64 last_fs_block, last_fs_page;
+ int silent = fc->sb_flags & SB_SILENT;
int err;
- err = -ENOMEM;
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- goto out;
-
- sb->s_fs_info = sbi;
mutex_init(&sbi->alloc_mutex);
mutex_init(&sbi->vh_mutex);
spin_lock_init(&sbi->work_lock);
INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
- hfsplus_fill_defaults(sbi);
err = -EINVAL;
- if (!hfsplus_parse_options(data, sbi)) {
- pr_err("unable to parse mount options\n");
- goto out_unload_nls;
+ if (!sbi->nls) {
+ /* try utf8 first, as this is the old default behaviour */
+ sbi->nls = load_nls("utf8");
+ if (!sbi->nls)
+ sbi->nls = load_nls_default();
}
/* temporarily use utf8 to correctly find the hidden dir below */
@@ -507,7 +552,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
goto out_put_alloc_file;
}
- sb->s_d_op = &hfsplus_dentry_operations;
+ set_default_d_op(sb, &hfsplus_dentry_operations);
sb->s_root = d_make_root(root);
if (!sb->s_root) {
err = -ENOMEM;
@@ -525,7 +570,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
hfs_find_exit(&fd);
if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
- err = -EINVAL;
+ err = -EIO;
goto out_put_root;
}
inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
@@ -542,11 +587,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
* H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
* all three are registered with Apple for our use
*/
- vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
- vhdr->modify_date = hfsp_now2mt();
- be32_add_cpu(&vhdr->write_count, 1);
- vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
- vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
+ hfsplus_prepare_volume_header_for_commit(vhdr);
hfsplus_sync_fs(sb, 1);
if (!sbi->hidden_dir) {
@@ -610,7 +651,6 @@ out_unload_nls:
unload_nls(sbi->nls);
unload_nls(nls);
kfree(sbi);
-out:
return err;
}
@@ -624,7 +664,7 @@ static struct inode *hfsplus_alloc_inode(struct super_block *sb)
{
struct hfsplus_inode_info *i;
- i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL);
+ i = alloc_inode_sb(sb, hfsplus_inode_cachep, GFP_KERNEL);
return i ? &i->vfs_inode : NULL;
}
@@ -635,18 +675,46 @@ static void hfsplus_free_inode(struct inode *inode)
#define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
-static struct dentry *hfsplus_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int hfsplus_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, hfsplus_fill_super);
+}
+
+static void hfsplus_free_fc(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super);
+ kfree(fc->s_fs_info);
+}
+
+static const struct fs_context_operations hfsplus_context_ops = {
+ .parse_param = hfsplus_parse_param,
+ .get_tree = hfsplus_get_tree,
+ .reconfigure = hfsplus_reconfigure,
+ .free = hfsplus_free_fc,
+};
+
+static int hfsplus_init_fs_context(struct fs_context *fc)
+{
+ struct hfsplus_sb_info *sbi;
+
+ sbi = kzalloc(sizeof(struct hfsplus_sb_info), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+
+ if (fc->purpose != FS_CONTEXT_FOR_RECONFIGURE)
+ hfsplus_fill_defaults(sbi);
+
+ fc->s_fs_info = sbi;
+ fc->ops = &hfsplus_context_ops;
+
+ return 0;
}
static struct file_system_type hfsplus_fs_type = {
.owner = THIS_MODULE,
.name = "hfsplus",
- .mount = hfsplus_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = hfsplus_init_fs_context,
};
MODULE_ALIAS_FS("hfsplus");
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index 73342c925a4b..d3a142f4518b 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -11,6 +11,9 @@
#include <linux/types.h>
#include <linux/nls.h>
+
+#include <kunit/visibility.h>
+
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
@@ -40,6 +43,18 @@ int hfsplus_strcasecmp(const struct hfsplus_unistr *s1,
p1 = s1->unicode;
p2 = s2->unicode;
+ if (len1 > HFSPLUS_MAX_STRLEN) {
+ len1 = HFSPLUS_MAX_STRLEN;
+ pr_err("invalid length %u has been corrected to %d\n",
+ be16_to_cpu(s1->length), len1);
+ }
+
+ if (len2 > HFSPLUS_MAX_STRLEN) {
+ len2 = HFSPLUS_MAX_STRLEN;
+ pr_err("invalid length %u has been corrected to %d\n",
+ be16_to_cpu(s2->length), len2);
+ }
+
while (1) {
c1 = c2 = 0;
@@ -60,6 +75,7 @@ int hfsplus_strcasecmp(const struct hfsplus_unistr *s1,
return 0;
}
}
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_strcasecmp);
/* Compare names as a sequence of 16-bit unsigned integers */
int hfsplus_strcmp(const struct hfsplus_unistr *s1,
@@ -74,6 +90,18 @@ int hfsplus_strcmp(const struct hfsplus_unistr *s1,
p1 = s1->unicode;
p2 = s2->unicode;
+ if (len1 > HFSPLUS_MAX_STRLEN) {
+ len1 = HFSPLUS_MAX_STRLEN;
+ pr_err("invalid length %u has been corrected to %d\n",
+ be16_to_cpu(s1->length), len1);
+ }
+
+ if (len2 > HFSPLUS_MAX_STRLEN) {
+ len2 = HFSPLUS_MAX_STRLEN;
+ pr_err("invalid length %u has been corrected to %d\n",
+ be16_to_cpu(s2->length), len2);
+ }
+
for (len = min(len1, len2); len > 0; len--) {
c1 = be16_to_cpu(*p1);
c2 = be16_to_cpu(*p2);
@@ -86,7 +114,7 @@ int hfsplus_strcmp(const struct hfsplus_unistr *s1,
return len1 < len2 ? -1 :
len1 > len2 ? 1 : 0;
}
-
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_strcmp);
#define Hangul_SBase 0xac00
#define Hangul_LBase 0x1100
@@ -119,9 +147,9 @@ static u16 *hfsplus_compose_lookup(u16 *p, u16 cc)
return NULL;
}
-int hfsplus_uni2asc(struct super_block *sb,
- const struct hfsplus_unistr *ustr,
- char *astr, int *len_p)
+static int hfsplus_uni2asc(struct super_block *sb,
+ const struct hfsplus_unistr *ustr,
+ int max_len, char *astr, int *len_p)
{
const hfsplus_unichr *ip;
struct nls_table *nls = HFSPLUS_SB(sb)->nls;
@@ -132,7 +160,14 @@ int hfsplus_uni2asc(struct super_block *sb,
op = astr;
ip = ustr->unicode;
+
ustrlen = be16_to_cpu(ustr->length);
+ if (ustrlen > max_len) {
+ ustrlen = max_len;
+ pr_err("invalid length %u has been corrected to %d\n",
+ be16_to_cpu(ustr->length), ustrlen);
+ }
+
len = *len_p;
ce1 = NULL;
compose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
@@ -249,6 +284,23 @@ out:
return res;
}
+inline int hfsplus_uni2asc_str(struct super_block *sb,
+ const struct hfsplus_unistr *ustr, char *astr,
+ int *len_p)
+{
+ return hfsplus_uni2asc(sb, ustr, HFSPLUS_MAX_STRLEN, astr, len_p);
+}
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_uni2asc_str);
+
+inline int hfsplus_uni2asc_xattr_str(struct super_block *sb,
+ const struct hfsplus_attr_unistr *ustr,
+ char *astr, int *len_p)
+{
+ return hfsplus_uni2asc(sb, (const struct hfsplus_unistr *)ustr,
+ HFSPLUS_ATTR_MAX_STRLEN, astr, len_p);
+}
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_uni2asc_xattr_str);
+
/*
* Convert one or more ASCII characters into a single unicode character.
* Returns the number of ASCII characters corresponding to the unicode char.
@@ -375,6 +427,7 @@ int hfsplus_asc2uni(struct super_block *sb,
return -ENAMETOOLONG;
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_asc2uni);
/*
* Hash a string to an integer as appropriate for the HFS+ filesystem.
@@ -427,6 +480,7 @@ int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str)
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_hash_dentry);
/*
* Compare strings with HFS+ filename ordering.
@@ -518,3 +572,4 @@ int hfsplus_compare_dentry(const struct dentry *dentry,
return 1;
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(hfsplus_compare_dentry);
diff --git a/fs/hfsplus/unicode_test.c b/fs/hfsplus/unicode_test.c
new file mode 100644
index 000000000000..5a7a6859efe3
--- /dev/null
+++ b/fs/hfsplus/unicode_test.c
@@ -0,0 +1,1579 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit tests for HFS+ Unicode string operations
+ *
+ * Copyright (C) 2025 Viacheslav Dubeyko <slava@dubeyko.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/nls.h>
+#include <linux/dcache.h>
+#include <linux/stringhash.h>
+#include "hfsplus_fs.h"
+
+struct test_mock_string_env {
+ struct hfsplus_unistr str1;
+ struct hfsplus_unistr str2;
+ char *buf;
+ u32 buf_size;
+};
+
+static struct test_mock_string_env *setup_mock_str_env(u32 buf_size)
+{
+ struct test_mock_string_env *env;
+
+ env = kzalloc(sizeof(struct test_mock_string_env), GFP_KERNEL);
+ if (!env)
+ return NULL;
+
+ env->buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!env->buf) {
+ kfree(env);
+ return NULL;
+ }
+
+ env->buf_size = buf_size;
+
+ return env;
+}
+
+static void free_mock_str_env(struct test_mock_string_env *env)
+{
+ if (env->buf)
+ kfree(env->buf);
+ kfree(env);
+}
+
+/* Helper function to create hfsplus_unistr */
+static void create_unistr(struct hfsplus_unistr *ustr, const char *ascii_str)
+{
+ int len = strlen(ascii_str);
+ int i;
+
+ memset(ustr->unicode, 0, sizeof(ustr->unicode));
+
+ ustr->length = cpu_to_be16(len);
+ for (i = 0; i < len && i < HFSPLUS_MAX_STRLEN; i++)
+ ustr->unicode[i] = cpu_to_be16((u16)ascii_str[i]);
+}
+
+static void corrupt_unistr(struct hfsplus_unistr *ustr)
+{
+ ustr->length = cpu_to_be16(U16_MAX);
+}
+
+/* Test hfsplus_strcasecmp function */
+static void hfsplus_strcasecmp_test(struct kunit *test)
+{
+ struct test_mock_string_env *mock_env;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ /* Test identical strings */
+ create_unistr(&mock_env->str1, "hello");
+ create_unistr(&mock_env->str2, "hello");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test case insensitive comparison */
+ create_unistr(&mock_env->str1, "Hello");
+ create_unistr(&mock_env->str2, "hello");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "HELLO");
+ create_unistr(&mock_env->str2, "hello");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test different strings */
+ create_unistr(&mock_env->str1, "apple");
+ create_unistr(&mock_env->str2, "banana");
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "zebra");
+ create_unistr(&mock_env->str2, "apple");
+ KUNIT_EXPECT_GT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test different lengths */
+ create_unistr(&mock_env->str1, "test");
+ create_unistr(&mock_env->str2, "testing");
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "testing");
+ create_unistr(&mock_env->str2, "test");
+ KUNIT_EXPECT_GT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test empty strings */
+ create_unistr(&mock_env->str1, "");
+ create_unistr(&mock_env->str2, "");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "");
+ create_unistr(&mock_env->str2, "test");
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test single characters */
+ create_unistr(&mock_env->str1, "A");
+ create_unistr(&mock_env->str2, "a");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "A");
+ create_unistr(&mock_env->str2, "B");
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test maximum length strings */
+ memset(mock_env->buf, 'a', HFSPLUS_MAX_STRLEN);
+ mock_env->buf[HFSPLUS_MAX_STRLEN] = '\0';
+ create_unistr(&mock_env->str1, mock_env->buf);
+ create_unistr(&mock_env->str2, mock_env->buf);
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Change one character in the middle */
+ mock_env->buf[HFSPLUS_MAX_STRLEN / 2] = 'b';
+ create_unistr(&mock_env->str2, mock_env->buf);
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test corrupted strings */
+ create_unistr(&mock_env->str1, "");
+ corrupt_unistr(&mock_env->str1);
+ create_unistr(&mock_env->str2, "");
+ KUNIT_EXPECT_NE(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "");
+ create_unistr(&mock_env->str2, "");
+ corrupt_unistr(&mock_env->str2);
+ KUNIT_EXPECT_NE(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "test");
+ corrupt_unistr(&mock_env->str1);
+ create_unistr(&mock_env->str2, "testing");
+ KUNIT_EXPECT_GT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "test");
+ create_unistr(&mock_env->str2, "testing");
+ corrupt_unistr(&mock_env->str2);
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "testing");
+ corrupt_unistr(&mock_env->str1);
+ create_unistr(&mock_env->str2, "test");
+ KUNIT_EXPECT_GT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "testing");
+ create_unistr(&mock_env->str2, "test");
+ corrupt_unistr(&mock_env->str2);
+ KUNIT_EXPECT_LT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ free_mock_str_env(mock_env);
+}
+
+/* Test hfsplus_strcmp function (case-sensitive) */
+static void hfsplus_strcmp_test(struct kunit *test)
+{
+ struct test_mock_string_env *mock_env;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ /* Test identical strings */
+ create_unistr(&mock_env->str1, "hello");
+ create_unistr(&mock_env->str2, "hello");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test case sensitive comparison - should NOT be equal */
+ create_unistr(&mock_env->str1, "Hello");
+ create_unistr(&mock_env->str2, "hello");
+ KUNIT_EXPECT_NE(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+ /* 'H' < 'h' in Unicode */
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test lexicographic ordering */
+ create_unistr(&mock_env->str1, "apple");
+ create_unistr(&mock_env->str2, "banana");
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "zebra");
+ create_unistr(&mock_env->str2, "apple");
+ KUNIT_EXPECT_GT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test different lengths with common prefix */
+ create_unistr(&mock_env->str1, "test");
+ create_unistr(&mock_env->str2, "testing");
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "testing");
+ create_unistr(&mock_env->str2, "test");
+ KUNIT_EXPECT_GT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test empty strings */
+ create_unistr(&mock_env->str1, "");
+ create_unistr(&mock_env->str2, "");
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test maximum length strings */
+ memset(mock_env->buf, 'a', HFSPLUS_MAX_STRLEN);
+ mock_env->buf[HFSPLUS_MAX_STRLEN] = '\0';
+ create_unistr(&mock_env->str1, mock_env->buf);
+ create_unistr(&mock_env->str2, mock_env->buf);
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Change one character in the middle */
+ mock_env->buf[HFSPLUS_MAX_STRLEN / 2] = 'b';
+ create_unistr(&mock_env->str2, mock_env->buf);
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test corrupted strings */
+ create_unistr(&mock_env->str1, "");
+ corrupt_unistr(&mock_env->str1);
+ create_unistr(&mock_env->str2, "");
+ KUNIT_EXPECT_NE(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "");
+ create_unistr(&mock_env->str2, "");
+ corrupt_unistr(&mock_env->str2);
+ KUNIT_EXPECT_NE(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ create_unistr(&mock_env->str1, "test");
+ corrupt_unistr(&mock_env->str1);
+ create_unistr(&mock_env->str2, "testing");
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "test");
+ create_unistr(&mock_env->str2, "testing");
+ corrupt_unistr(&mock_env->str2);
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "testing");
+ corrupt_unistr(&mock_env->str1);
+ create_unistr(&mock_env->str2, "test");
+ KUNIT_EXPECT_GT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ create_unistr(&mock_env->str1, "testing");
+ create_unistr(&mock_env->str2, "test");
+ corrupt_unistr(&mock_env->str2);
+ KUNIT_EXPECT_GT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ free_mock_str_env(mock_env);
+}
+
+/* Test Unicode edge cases */
+static void hfsplus_unicode_edge_cases_test(struct kunit *test)
+{
+ struct test_mock_string_env *mock_env;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ /* Test with special characters */
+ mock_env->str1.length = cpu_to_be16(3);
+ mock_env->str1.unicode[0] = cpu_to_be16(0x00E9); /* é */
+ mock_env->str1.unicode[1] = cpu_to_be16(0x00F1); /* ñ */
+ mock_env->str1.unicode[2] = cpu_to_be16(0x00FC); /* ü */
+
+ mock_env->str2.length = cpu_to_be16(3);
+ mock_env->str2.unicode[0] = cpu_to_be16(0x00E9); /* é */
+ mock_env->str2.unicode[1] = cpu_to_be16(0x00F1); /* ñ */
+ mock_env->str2.unicode[2] = cpu_to_be16(0x00FC); /* ü */
+
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test with different special characters */
+ mock_env->str2.unicode[1] = cpu_to_be16(0x00F2); /* ò */
+ KUNIT_EXPECT_NE(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test null characters within string (should be handled correctly) */
+ mock_env->str1.length = cpu_to_be16(3);
+ mock_env->str1.unicode[0] = cpu_to_be16('a');
+ mock_env->str1.unicode[1] = cpu_to_be16(0x0000); /* null */
+ mock_env->str1.unicode[2] = cpu_to_be16('b');
+
+ mock_env->str2.length = cpu_to_be16(3);
+ mock_env->str2.unicode[0] = cpu_to_be16('a');
+ mock_env->str2.unicode[1] = cpu_to_be16(0x0000); /* null */
+ mock_env->str2.unicode[2] = cpu_to_be16('b');
+
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ free_mock_str_env(mock_env);
+}
+
+/* Test boundary conditions */
+static void hfsplus_unicode_boundary_test(struct kunit *test)
+{
+ struct test_mock_string_env *mock_env;
+ int i;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ /* Test maximum length boundary */
+ mock_env->str1.length = cpu_to_be16(HFSPLUS_MAX_STRLEN);
+ mock_env->str2.length = cpu_to_be16(HFSPLUS_MAX_STRLEN);
+
+ for (i = 0; i < HFSPLUS_MAX_STRLEN; i++) {
+ mock_env->str1.unicode[i] = cpu_to_be16('A');
+ mock_env->str2.unicode[i] = cpu_to_be16('A');
+ }
+
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Change last character */
+ mock_env->str2.unicode[HFSPLUS_MAX_STRLEN - 1] = cpu_to_be16('B');
+ KUNIT_EXPECT_LT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ /* Test zero length strings */
+ mock_env->str1.length = cpu_to_be16(0);
+ mock_env->str2.length = cpu_to_be16(0);
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2));
+ KUNIT_EXPECT_EQ(test, 0, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2));
+
+ /* Test one character vs empty */
+ mock_env->str1.length = cpu_to_be16(1);
+ mock_env->str1.unicode[0] = cpu_to_be16('A');
+ mock_env->str2.length = cpu_to_be16(0);
+ KUNIT_EXPECT_GT(test, hfsplus_strcmp(&mock_env->str1,
+ &mock_env->str2), 0);
+ KUNIT_EXPECT_GT(test, hfsplus_strcasecmp(&mock_env->str1,
+ &mock_env->str2), 0);
+
+ free_mock_str_env(mock_env);
+}
+
+/* Mock superblock and NLS table for testing hfsplus_uni2asc */
+struct test_mock_sb {
+ struct nls_table nls;
+ struct hfsplus_sb_info sb_info;
+ struct super_block sb;
+};
+
+static struct test_mock_sb *setup_mock_sb(void)
+{
+ struct test_mock_sb *ptr;
+
+ ptr = kzalloc(sizeof(struct test_mock_sb), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ ptr->nls.charset = "utf8";
+ ptr->nls.uni2char = NULL; /* Will use default behavior */
+ ptr->sb_info.nls = &ptr->nls;
+ ptr->sb.s_fs_info = &ptr->sb_info;
+
+ /* Set default flags - no decomposition, no case folding */
+ clear_bit(HFSPLUS_SB_NODECOMPOSE, &ptr->sb_info.flags);
+ clear_bit(HFSPLUS_SB_CASEFOLD, &ptr->sb_info.flags);
+
+ return ptr;
+}
+
+static void free_mock_sb(struct test_mock_sb *ptr)
+{
+ kfree(ptr);
+}
+
+/* Simple uni2char implementation for testing */
+static int test_uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ if (uni < 0x80) {
+ *out = (unsigned char)uni;
+ return 1;
+ }
+
+ /* For non-ASCII, just use '?' as fallback */
+ *out = '?';
+ return 1;
+}
+
+/* Test hfsplus_uni2asc basic functionality */
+static void hfsplus_uni2asc_basic_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int len, result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.uni2char = test_uni2char;
+
+ /* Test simple ASCII string conversion */
+ create_unistr(&mock_env->str1, "hello");
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 5, len);
+ KUNIT_EXPECT_STREQ(test, "hello", mock_env->buf);
+
+ /* Test empty string */
+ create_unistr(&mock_env->str1, "");
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 0, len);
+
+ /* Test single character */
+ create_unistr(&mock_env->str1, "A");
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 1, len);
+ KUNIT_EXPECT_EQ(test, 'A', mock_env->buf[0]);
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test special character handling */
+static void hfsplus_uni2asc_special_chars_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int len, result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.uni2char = test_uni2char;
+
+ /* Test null character conversion (should become 0x2400) */
+ mock_env->str1.length = cpu_to_be16(1);
+ mock_env->str1.unicode[0] = cpu_to_be16(0x0000);
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 1, len);
+ /* Our test implementation returns '?' for non-ASCII */
+ KUNIT_EXPECT_EQ(test, '?', mock_env->buf[0]);
+
+ /* Test forward slash conversion (should become colon) */
+ mock_env->str1.length = cpu_to_be16(1);
+ mock_env->str1.unicode[0] = cpu_to_be16('/');
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 1, len);
+ KUNIT_EXPECT_EQ(test, ':', mock_env->buf[0]);
+
+ /* Test string with mixed special characters */
+ mock_env->str1.length = cpu_to_be16(3);
+ mock_env->str1.unicode[0] = cpu_to_be16('a');
+ mock_env->str1.unicode[1] = cpu_to_be16('/');
+ mock_env->str1.unicode[2] = cpu_to_be16('b');
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 3, len);
+ KUNIT_EXPECT_EQ(test, 'a', mock_env->buf[0]);
+ KUNIT_EXPECT_EQ(test, ':', mock_env->buf[1]);
+ KUNIT_EXPECT_EQ(test, 'b', mock_env->buf[2]);
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test buffer length handling */
+static void hfsplus_uni2asc_buffer_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int len, result;
+
+ mock_env = setup_mock_str_env(10);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.uni2char = test_uni2char;
+
+ /* Test insufficient buffer space */
+ create_unistr(&mock_env->str1, "toolongstring");
+ len = 5; /* Buffer too small */
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, -ENAMETOOLONG, result);
+ KUNIT_EXPECT_EQ(test, 5, len); /* Should be set to consumed length */
+
+ /* Test exact buffer size */
+ create_unistr(&mock_env->str1, "exact");
+ len = 5;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 5, len);
+
+ /* Test zero length buffer */
+ create_unistr(&mock_env->str1, "test");
+ len = 0;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, -ENAMETOOLONG, result);
+ KUNIT_EXPECT_EQ(test, 0, len);
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test corrupted unicode string handling */
+static void hfsplus_uni2asc_corrupted_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int len, result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.uni2char = test_uni2char;
+
+ /* Test corrupted length (too large) */
+ create_unistr(&mock_env->str1, "test");
+ corrupt_unistr(&mock_env->str1); /* Sets length to U16_MAX */
+ len = mock_env->buf_size;
+
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ /* Should still work but with corrected length */
+ KUNIT_EXPECT_EQ(test, 0, result);
+ /*
+ * Length should be corrected to HFSPLUS_MAX_STRLEN
+ * and processed accordingly
+ */
+ KUNIT_EXPECT_GT(test, len, 0);
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test edge cases and boundary conditions */
+static void hfsplus_uni2asc_edge_cases_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int len, result;
+ int i;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN * 2);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.uni2char = test_uni2char;
+
+ /* Test maximum length string */
+ mock_env->str1.length = cpu_to_be16(HFSPLUS_MAX_STRLEN);
+ for (i = 0; i < HFSPLUS_MAX_STRLEN; i++)
+ mock_env->str1.unicode[i] = cpu_to_be16('a');
+
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, HFSPLUS_MAX_STRLEN, len);
+
+ /* Verify all characters are 'a' */
+ for (i = 0; i < HFSPLUS_MAX_STRLEN; i++)
+ KUNIT_EXPECT_EQ(test, 'a', mock_env->buf[i]);
+
+ /* Test string with high Unicode values (non-ASCII) */
+ mock_env->str1.length = cpu_to_be16(3);
+ mock_env->str1.unicode[0] = cpu_to_be16(0x00E9); /* é */
+ mock_env->str1.unicode[1] = cpu_to_be16(0x00F1); /* ñ */
+ mock_env->str1.unicode[2] = cpu_to_be16(0x00FC); /* ü */
+ len = mock_env->buf_size;
+ result = hfsplus_uni2asc_str(&mock_sb->sb, &mock_env->str1,
+ mock_env->buf, &len);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 3, len);
+ /* Our test implementation converts non-ASCII to '?' */
+ KUNIT_EXPECT_EQ(test, '?', mock_env->buf[0]);
+ KUNIT_EXPECT_EQ(test, '?', mock_env->buf[1]);
+ KUNIT_EXPECT_EQ(test, '?', mock_env->buf[2]);
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Simple char2uni implementation for testing */
+static int test_char2uni(const unsigned char *rawstring,
+ int boundlen, wchar_t *uni)
+{
+ if (boundlen <= 0)
+ return -EINVAL;
+
+ *uni = (wchar_t)*rawstring;
+ return 1;
+}
+
+/* Helper function to check unicode string contents */
+static void check_unistr_content(struct kunit *test,
+ struct hfsplus_unistr *ustr,
+ const char *expected_ascii)
+{
+ int expected_len = strlen(expected_ascii);
+ int actual_len = be16_to_cpu(ustr->length);
+ int i;
+
+ KUNIT_EXPECT_EQ(test, expected_len, actual_len);
+
+ for (i = 0; i < expected_len && i < actual_len; i++) {
+ u16 expected_char = (u16)expected_ascii[i];
+ u16 actual_char = be16_to_cpu(ustr->unicode[i]);
+
+ KUNIT_EXPECT_EQ(test, expected_char, actual_char);
+ }
+}
+
+/* Test hfsplus_asc2uni basic functionality */
+static void hfsplus_asc2uni_basic_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test simple ASCII string conversion */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, "hello", 5);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &mock_env->str1, "hello");
+
+ /* Test empty string */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, "", 0);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 0, be16_to_cpu(mock_env->str1.length));
+
+ /* Test single character */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, "A", 1);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &mock_env->str1, "A");
+
+ /* Test null-terminated string with explicit length */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, "test\0extra", 4);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &mock_env->str1, "test");
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test special character handling in asc2uni */
+static void hfsplus_asc2uni_special_chars_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test colon conversion (should become forward slash) */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, ":", 1);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 1, be16_to_cpu(mock_env->str1.length));
+ KUNIT_EXPECT_EQ(test, '/', be16_to_cpu(mock_env->str1.unicode[0]));
+
+ /* Test string with mixed special characters */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, "a:b", 3);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 3, be16_to_cpu(mock_env->str1.length));
+ KUNIT_EXPECT_EQ(test, 'a', be16_to_cpu(mock_env->str1.unicode[0]));
+ KUNIT_EXPECT_EQ(test, '/', be16_to_cpu(mock_env->str1.unicode[1]));
+ KUNIT_EXPECT_EQ(test, 'b', be16_to_cpu(mock_env->str1.unicode[2]));
+
+ /* Test multiple special characters */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, ":::", 3);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 3, be16_to_cpu(mock_env->str1.length));
+ KUNIT_EXPECT_EQ(test, '/', be16_to_cpu(mock_env->str1.unicode[0]));
+ KUNIT_EXPECT_EQ(test, '/', be16_to_cpu(mock_env->str1.unicode[1]));
+ KUNIT_EXPECT_EQ(test, '/', be16_to_cpu(mock_env->str1.unicode[2]));
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test buffer length limits */
+static void hfsplus_asc2uni_buffer_limits_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 10);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test exact maximum length */
+ memset(mock_env->buf, 'a', HFSPLUS_MAX_STRLEN);
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &mock_env->str1, HFSPLUS_MAX_STRLEN,
+ mock_env->buf, HFSPLUS_MAX_STRLEN);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, HFSPLUS_MAX_STRLEN,
+ be16_to_cpu(mock_env->str1.length));
+
+ /* Test exceeding maximum length */
+ memset(mock_env->buf, 'a', HFSPLUS_MAX_STRLEN + 5);
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &mock_env->str1, HFSPLUS_MAX_STRLEN,
+ mock_env->buf, HFSPLUS_MAX_STRLEN + 5);
+
+ KUNIT_EXPECT_EQ(test, -ENAMETOOLONG, result);
+ KUNIT_EXPECT_EQ(test, HFSPLUS_MAX_STRLEN,
+ be16_to_cpu(mock_env->str1.length));
+
+ /* Test with smaller max_unistr_len */
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &mock_env->str1, 5, "toolongstring", 13);
+
+ KUNIT_EXPECT_EQ(test, -ENAMETOOLONG, result);
+ KUNIT_EXPECT_EQ(test, 5, be16_to_cpu(mock_env->str1.length));
+
+ /* Test zero max length */
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1, 0, "test", 4);
+
+ KUNIT_EXPECT_EQ(test, -ENAMETOOLONG, result);
+ KUNIT_EXPECT_EQ(test, 0, be16_to_cpu(mock_env->str1.length));
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test error handling and edge cases */
+static void hfsplus_asc2uni_edge_cases_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct hfsplus_unistr ustr;
+ char test_str[] = {'a', '\0', 'b'};
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test zero length input */
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &ustr, HFSPLUS_MAX_STRLEN, "test", 0);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 0, be16_to_cpu(ustr.length));
+
+ /* Test input with length mismatch */
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &ustr, HFSPLUS_MAX_STRLEN, "hello", 3);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &ustr, "hel");
+
+ /* Test with various printable ASCII characters */
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &ustr, HFSPLUS_MAX_STRLEN, "ABC123!@#", 9);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &ustr, "ABC123!@#");
+
+ /* Test null character in the middle */
+ result = hfsplus_asc2uni(&mock_sb->sb,
+ &ustr, HFSPLUS_MAX_STRLEN, test_str, 3);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, 3, be16_to_cpu(ustr.length));
+ KUNIT_EXPECT_EQ(test, 'a', be16_to_cpu(ustr.unicode[0]));
+ KUNIT_EXPECT_EQ(test, 0, be16_to_cpu(ustr.unicode[1]));
+ KUNIT_EXPECT_EQ(test, 'b', be16_to_cpu(ustr.unicode[2]));
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test decomposition flag behavior */
+static void hfsplus_asc2uni_decompose_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ int result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test with decomposition disabled (default) */
+ clear_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str1,
+ HFSPLUS_MAX_STRLEN, "test", 4);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &mock_env->str1, "test");
+
+ /* Test with decomposition enabled */
+ set_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+ result = hfsplus_asc2uni(&mock_sb->sb, &mock_env->str2,
+ HFSPLUS_MAX_STRLEN, "test", 4);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ check_unistr_content(test, &mock_env->str2, "test");
+
+ /* For simple ASCII, both should produce the same result */
+ KUNIT_EXPECT_EQ(test,
+ be16_to_cpu(mock_env->str1.length),
+ be16_to_cpu(mock_env->str2.length));
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Mock dentry for testing hfsplus_hash_dentry */
+static struct dentry test_dentry;
+
+static void setup_mock_dentry(struct super_block *sb)
+{
+ memset(&test_dentry, 0, sizeof(test_dentry));
+ test_dentry.d_sb = sb;
+}
+
+/* Helper function to create qstr */
+static void create_qstr(struct qstr *str, const char *name)
+{
+ str->name = name;
+ str->len = strlen(name);
+ str->hash = 0; /* Will be set by hash function */
+}
+
+/* Test hfsplus_hash_dentry basic functionality */
+static void hfsplus_hash_dentry_basic_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr str1, str2;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test basic string hashing */
+ create_qstr(&str1, "hello");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_NE(test, 0, str1.hash);
+
+ /* Test that identical strings produce identical hashes */
+ create_qstr(&str2, "hello");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, str1.hash, str2.hash);
+
+ /* Test empty string */
+ create_qstr(&str1, "");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+
+ /* Empty string should still produce a hash */
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test single character */
+ create_qstr(&str1, "A");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_NE(test, 0, str1.hash);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test case folding behavior in hash */
+static void hfsplus_hash_dentry_casefold_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr str1, str2;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test with case folding disabled (default) */
+ clear_bit(HFSPLUS_SB_CASEFOLD, &mock_sb->sb_info.flags);
+
+ create_qstr(&str1, "Hello");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&str2, "hello");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /*
+ * Without case folding, different cases
+ * should produce different hashes
+ */
+ KUNIT_EXPECT_NE(test, str1.hash, str2.hash);
+
+ /* Test with case folding enabled */
+ set_bit(HFSPLUS_SB_CASEFOLD, &mock_sb->sb_info.flags);
+
+ create_qstr(&str1, "Hello");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&str2, "hello");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* With case folding, different cases should produce same hash */
+ KUNIT_EXPECT_EQ(test, str1.hash, str2.hash);
+
+ /* Test mixed case */
+ create_qstr(&str1, "HeLLo");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_EQ(test, str1.hash, str2.hash);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test special character handling in hash */
+static void hfsplus_hash_dentry_special_chars_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr str1, str2;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test colon conversion (: becomes /) */
+ create_qstr(&str1, "file:name");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&str2, "file/name");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* After conversion, these should produce the same hash */
+ KUNIT_EXPECT_EQ(test, str1.hash, str2.hash);
+
+ /* Test multiple special characters */
+ create_qstr(&str1, ":::");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&str2, "///");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ KUNIT_EXPECT_EQ(test, str1.hash, str2.hash);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test decomposition flag behavior in hash */
+static void hfsplus_hash_dentry_decompose_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr str1, str2;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test with decomposition disabled (default) */
+ clear_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+
+ create_qstr(&str1, "test");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test with decomposition enabled */
+ set_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+
+ create_qstr(&str2, "test");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /*
+ * For simple ASCII, decomposition shouldn't change
+ * the hash much but the function should still work correctly
+ */
+ KUNIT_EXPECT_NE(test, 0, str2.hash);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test hash consistency and distribution */
+static void hfsplus_hash_dentry_consistency_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr str1, str2, str3;
+ unsigned long hash1;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test that same string always produces same hash */
+ create_qstr(&str1, "consistent");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+ hash1 = str1.hash;
+
+ create_qstr(&str2, "consistent");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ KUNIT_EXPECT_EQ(test, hash1, str2.hash);
+
+ /* Test that different strings produce different hashes */
+ create_qstr(&str3, "different");
+ result = hfsplus_hash_dentry(&test_dentry, &str3);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ KUNIT_EXPECT_NE(test, str1.hash, str3.hash);
+
+ /* Test similar strings should have different hashes */
+ create_qstr(&str1, "file1");
+ result = hfsplus_hash_dentry(&test_dentry, &str1);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&str2, "file2");
+ result = hfsplus_hash_dentry(&test_dentry, &str2);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ KUNIT_EXPECT_NE(test, str1.hash, str2.hash);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test edge cases and boundary conditions */
+static void hfsplus_hash_dentry_edge_cases_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct test_mock_string_env *mock_env;
+ struct qstr str;
+ int result;
+
+ mock_env = setup_mock_str_env(HFSPLUS_MAX_STRLEN + 1);
+ KUNIT_ASSERT_NOT_NULL(test, mock_env);
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test very long filename */
+ memset(mock_env->buf, 'a', mock_env->buf_size - 1);
+ mock_env->buf[mock_env->buf_size - 1] = '\0';
+
+ create_qstr(&str, mock_env->buf);
+ result = hfsplus_hash_dentry(&test_dentry, &str);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_NE(test, 0, str.hash);
+
+ /* Test filename with all printable ASCII characters */
+ create_qstr(&str, "!@#$%^&*()_+-=[]{}|;':\",./<>?");
+ result = hfsplus_hash_dentry(&test_dentry, &str);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_NE(test, 0, str.hash);
+
+ /* Test with embedded null (though not typical for filenames) */
+ str.name = "file\0hidden";
+ str.len = 11; /* Include the null and text after it */
+ str.hash = 0;
+ result = hfsplus_hash_dentry(&test_dentry, &str);
+
+ KUNIT_EXPECT_EQ(test, 0, result);
+ KUNIT_EXPECT_NE(test, 0, str.hash);
+
+ free_mock_str_env(mock_env);
+ free_mock_sb(mock_sb);
+}
+
+/* Test hfsplus_compare_dentry basic functionality */
+static void hfsplus_compare_dentry_basic_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test identical strings */
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "hello", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test different strings - lexicographic order */
+ create_qstr(&name, "world");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "hello", &name);
+ KUNIT_EXPECT_LT(test, result, 0); /* "hello" < "world" */
+
+ result = hfsplus_compare_dentry(&test_dentry, 5, "world", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "world", &name);
+ KUNIT_EXPECT_GT(test, result, 0); /* "world" > "hello" */
+
+ /* Test empty strings */
+ create_qstr(&name, "");
+ result = hfsplus_compare_dentry(&test_dentry, 0, "", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test one empty, one non-empty */
+ create_qstr(&name, "test");
+ result = hfsplus_compare_dentry(&test_dentry, 0, "", &name);
+ KUNIT_EXPECT_LT(test, result, 0); /* "" < "test" */
+
+ create_qstr(&name, "");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "test", &name);
+ KUNIT_EXPECT_GT(test, result, 0); /* "test" > "" */
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test case folding behavior in comparison */
+static void hfsplus_compare_dentry_casefold_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test with case folding disabled (default) */
+ clear_bit(HFSPLUS_SB_CASEFOLD, &mock_sb->sb_info.flags);
+
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "Hello", &name);
+ /* Case sensitive: "Hello" != "hello" */
+ KUNIT_EXPECT_NE(test, 0, result);
+
+ create_qstr(&name, "Hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "hello", &name);
+ /* Case sensitive: "hello" != "Hello" */
+ KUNIT_EXPECT_NE(test, 0, result);
+
+ /* Test with case folding enabled */
+ set_bit(HFSPLUS_SB_CASEFOLD, &mock_sb->sb_info.flags);
+
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "Hello", &name);
+ /* Case insensitive: "Hello" == "hello" */
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&name, "Hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "hello", &name);
+ /* Case insensitive: "hello" == "Hello" */
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test mixed case */
+ create_qstr(&name, "TeSt");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "test", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&name, "test");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "TEST", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test special character handling in comparison */
+static void hfsplus_compare_dentry_special_chars_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test colon conversion (: becomes /) */
+ create_qstr(&name, "file/name");
+ result = hfsplus_compare_dentry(&test_dentry, 9, "file:name", &name);
+ /* "file:name" == "file/name" after conversion */
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ create_qstr(&name, "file:name");
+ result = hfsplus_compare_dentry(&test_dentry, 9, "file/name", &name);
+ /* "file/name" == "file:name" after conversion */
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test multiple special characters */
+ create_qstr(&name, "///");
+ result = hfsplus_compare_dentry(&test_dentry, 3, ":::", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test mixed special and regular characters */
+ create_qstr(&name, "a/b:c");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "a:b/c", &name);
+ /* Both become "a/b/c" after conversion */
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test length differences */
+static void hfsplus_compare_dentry_length_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test different lengths with common prefix */
+ create_qstr(&name, "testing");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "test", &name);
+ KUNIT_EXPECT_LT(test, result, 0); /* "test" < "testing" */
+
+ create_qstr(&name, "test");
+ result = hfsplus_compare_dentry(&test_dentry, 7, "testing", &name);
+ KUNIT_EXPECT_GT(test, result, 0); /* "testing" > "test" */
+
+ /* Test exact length match */
+ create_qstr(&name, "exact");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "exact", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test length parameter vs actual string content */
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 3, "hel", &name);
+ KUNIT_EXPECT_LT(test, result, 0); /* "hel" < "hello" */
+
+ /* Test longer first string but shorter length parameter */
+ create_qstr(&name, "hi");
+ result = hfsplus_compare_dentry(&test_dentry, 2, "hello", &name);
+ /* "he" < "hi" (only first 2 chars compared) */
+ KUNIT_EXPECT_LT(test, result, 0);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test decomposition flag behavior */
+static void hfsplus_compare_dentry_decompose_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test with decomposition disabled (default) */
+ clear_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+
+ create_qstr(&name, "test");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "test", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test with decomposition enabled */
+ set_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+
+ create_qstr(&name, "test");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "test", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* For simple ASCII, decomposition shouldn't affect the result */
+ create_qstr(&name, "different");
+ result = hfsplus_compare_dentry(&test_dentry, 4, "test", &name);
+ KUNIT_EXPECT_NE(test, 0, result);
+
+ free_mock_sb(mock_sb);
+}
+
+/* Test edge cases and boundary conditions */
+static void hfsplus_compare_dentry_edge_cases_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ char *long_str;
+ char *long_str2;
+ u32 str_size = HFSPLUS_MAX_STRLEN + 1;
+ struct qstr null_name = {
+ .name = "a\0b",
+ .len = 3,
+ .hash = 0
+ };
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ long_str = kzalloc(str_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, long_str);
+
+ long_str2 = kzalloc(str_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, long_str2);
+
+ /* Test very long strings */
+ memset(long_str, 'a', str_size - 1);
+ long_str[str_size - 1] = '\0';
+
+ create_qstr(&name, long_str);
+ result = hfsplus_compare_dentry(&test_dentry, str_size - 1,
+ long_str, &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test with difference at the end of long strings */
+ memset(long_str2, 'a', str_size - 1);
+ long_str2[str_size - 1] = '\0';
+ long_str2[str_size - 2] = 'b';
+ create_qstr(&name, long_str2);
+ result = hfsplus_compare_dentry(&test_dentry, str_size - 1,
+ long_str, &name);
+ KUNIT_EXPECT_LT(test, result, 0); /* 'a' < 'b' */
+
+ /* Test single character differences */
+ create_qstr(&name, "b");
+ result = hfsplus_compare_dentry(&test_dentry, 1, "a", &name);
+ KUNIT_EXPECT_LT(test, result, 0); /* 'a' < 'b' */
+
+ create_qstr(&name, "a");
+ result = hfsplus_compare_dentry(&test_dentry, 1, "b", &name);
+ KUNIT_EXPECT_GT(test, result, 0); /* 'b' > 'a' */
+
+ /* Test with null characters in the middle */
+ result = hfsplus_compare_dentry(&test_dentry, 3, "a\0b", &null_name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test all printable ASCII characters */
+ create_qstr(&name, "!@#$%^&*()");
+ result = hfsplus_compare_dentry(&test_dentry, 10, "!@#$%^&*()", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ kfree(long_str);
+ kfree(long_str2);
+ free_mock_sb(mock_sb);
+}
+
+/* Test combined flag behaviors */
+static void hfsplus_compare_dentry_combined_flags_test(struct kunit *test)
+{
+ struct test_mock_sb *mock_sb;
+ struct qstr name;
+ int result;
+
+ mock_sb = setup_mock_sb();
+ KUNIT_ASSERT_NOT_NULL(test, mock_sb);
+
+ setup_mock_dentry(&mock_sb->sb);
+ mock_sb->nls.char2uni = test_char2uni;
+
+ /* Test with both casefold and decompose enabled */
+ set_bit(HFSPLUS_SB_CASEFOLD, &mock_sb->sb_info.flags);
+ set_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "HELLO", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test special chars with case folding */
+ create_qstr(&name, "File/Name");
+ result = hfsplus_compare_dentry(&test_dentry, 9, "file:name", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ /* Test with both flags disabled */
+ clear_bit(HFSPLUS_SB_CASEFOLD, &mock_sb->sb_info.flags);
+ clear_bit(HFSPLUS_SB_NODECOMPOSE, &mock_sb->sb_info.flags);
+
+ create_qstr(&name, "hello");
+ result = hfsplus_compare_dentry(&test_dentry, 5, "HELLO", &name);
+ KUNIT_EXPECT_NE(test, 0, result); /* Case sensitive */
+
+ /* But special chars should still be converted */
+ create_qstr(&name, "file/name");
+ result = hfsplus_compare_dentry(&test_dentry, 9, "file:name", &name);
+ KUNIT_EXPECT_EQ(test, 0, result);
+
+ free_mock_sb(mock_sb);
+}
+
+static struct kunit_case hfsplus_unicode_test_cases[] = {
+ KUNIT_CASE(hfsplus_strcasecmp_test),
+ KUNIT_CASE(hfsplus_strcmp_test),
+ KUNIT_CASE(hfsplus_unicode_edge_cases_test),
+ KUNIT_CASE(hfsplus_unicode_boundary_test),
+ KUNIT_CASE(hfsplus_uni2asc_basic_test),
+ KUNIT_CASE(hfsplus_uni2asc_special_chars_test),
+ KUNIT_CASE(hfsplus_uni2asc_buffer_test),
+ KUNIT_CASE(hfsplus_uni2asc_corrupted_test),
+ KUNIT_CASE(hfsplus_uni2asc_edge_cases_test),
+ KUNIT_CASE(hfsplus_asc2uni_basic_test),
+ KUNIT_CASE(hfsplus_asc2uni_special_chars_test),
+ KUNIT_CASE(hfsplus_asc2uni_buffer_limits_test),
+ KUNIT_CASE(hfsplus_asc2uni_edge_cases_test),
+ KUNIT_CASE(hfsplus_asc2uni_decompose_test),
+ KUNIT_CASE(hfsplus_hash_dentry_basic_test),
+ KUNIT_CASE(hfsplus_hash_dentry_casefold_test),
+ KUNIT_CASE(hfsplus_hash_dentry_special_chars_test),
+ KUNIT_CASE(hfsplus_hash_dentry_decompose_test),
+ KUNIT_CASE(hfsplus_hash_dentry_consistency_test),
+ KUNIT_CASE(hfsplus_hash_dentry_edge_cases_test),
+ KUNIT_CASE(hfsplus_compare_dentry_basic_test),
+ KUNIT_CASE(hfsplus_compare_dentry_casefold_test),
+ KUNIT_CASE(hfsplus_compare_dentry_special_chars_test),
+ KUNIT_CASE(hfsplus_compare_dentry_length_test),
+ KUNIT_CASE(hfsplus_compare_dentry_decompose_test),
+ KUNIT_CASE(hfsplus_compare_dentry_edge_cases_test),
+ KUNIT_CASE(hfsplus_compare_dentry_combined_flags_test),
+ {}
+};
+
+static struct kunit_suite hfsplus_unicode_test_suite = {
+ .name = "hfsplus_unicode",
+ .test_cases = hfsplus_unicode_test_cases,
+};
+
+kunit_test_suite(hfsplus_unicode_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for HFS+ Unicode string operations");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 51ae6f1eb4a5..30cf4fe78b3d 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -12,8 +12,7 @@
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/cdrom.h>
-#include <linux/genhd.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
@@ -31,8 +30,7 @@ struct hfsplus_wd {
* @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
* @buf: buffer for I/O
* @data: output pointer for location of requested data
- * @op: direction of I/O
- * @op_flags: request op flags
+ * @opf: I/O operation type and flags
*
* The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
* HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
@@ -44,52 +42,25 @@ struct hfsplus_wd {
* that starts at the rounded-down address. As long as the data was
* read using hfsplus_submit_bio() and the same buffer is used things
* will work correctly.
+ *
+ * Returns: %0 on success else -errno code
*/
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
- void *buf, void **data, int op, int op_flags)
+ void *buf, void **data, blk_opf_t opf)
{
- struct bio *bio;
- int ret = 0;
- u64 io_size;
- loff_t start;
- int offset;
+ u64 io_size = hfsplus_min_io_size(sb);
+ loff_t start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
+ int offset = start & (io_size - 1);
+
+ if ((opf & REQ_OP_MASK) != REQ_OP_WRITE && data)
+ *data = (u8 *)buf + offset;
/*
- * Align sector to hardware sector size and find offset. We
- * assume that io_size is a power of two, which _should_
- * be true.
+ * Align sector to hardware sector size and find offset. We assume that
+ * io_size is a power of two, which _should_ be true.
*/
- io_size = hfsplus_min_io_size(sb);
- start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
- offset = start & (io_size - 1);
sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
-
- bio = bio_alloc(GFP_NOIO, 1);
- bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, sb->s_bdev);
- bio_set_op_attrs(bio, op, op_flags);
-
- if (op != WRITE && data)
- *data = (u8 *)buf + offset;
-
- while (io_size > 0) {
- unsigned int page_offset = offset_in_page(buf);
- unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
- io_size);
-
- ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
- if (ret != len) {
- ret = -EIO;
- goto out;
- }
- io_size -= len;
- buf = (u8 *)buf + len;
- }
-
- ret = submit_bio_wait(bio);
-out:
- bio_put(bio);
- return ret < 0 ? ret : 0;
+ return bdev_rw_virt(sb->s_bdev, sector, buf, io_size, opf);
}
static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
@@ -173,6 +144,8 @@ int hfsplus_read_wrapper(struct super_block *sb)
if (!blocksize)
goto out;
+ sbi->min_io_size = blocksize;
+
if (hfsplus_get_last_session(sb, &part_start, &part_size))
goto out;
@@ -187,7 +160,7 @@ int hfsplus_read_wrapper(struct super_block *sb)
reread:
error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR,
sbi->s_vhdr_buf, (void **)&sbi->s_vhdr,
- REQ_OP_READ, 0);
+ REQ_OP_READ);
if (error)
goto out_free_backup_vhdr;
@@ -219,8 +192,7 @@ reread:
error = hfsplus_submit_bio(sb, part_start + part_size - 2,
sbi->s_backup_vhdr_buf,
- (void **)&sbi->s_backup_vhdr, REQ_OP_READ,
- 0);
+ (void **)&sbi->s_backup_vhdr, REQ_OP_READ);
if (error)
goto out_free_backup_vhdr;
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index e2855ceefd39..da95a9de9a65 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -13,7 +13,7 @@
static int hfsplus_removexattr(struct inode *inode, const char *name);
-const struct xattr_handler *hfsplus_xattr_handlers[] = {
+const struct xattr_handler * const hfsplus_xattr_handlers[] = {
&hfsplus_xattr_osx_handler,
&hfsplus_xattr_user_handler,
&hfsplus_xattr_trusted_handler,
@@ -64,7 +64,7 @@ static void hfsplus_init_header_node(struct inode *attr_file,
u32 used_bmp_bytes;
u64 tmp;
- hfs_dbg(ATTR_MOD, "init_hdr_attr_file: clump %u, node_size %u\n",
+ hfs_dbg("clump %u, node_size %u\n",
clump_size, node_size);
/* The end of the node contains list of record offsets */
@@ -132,7 +132,7 @@ static int hfsplus_create_attributes_file(struct super_block *sb)
struct page *page;
int old_state = HFSPLUS_EMPTY_ATTR_TREE;
- hfs_dbg(ATTR_MOD, "create_attr_file: ino %d\n", HFSPLUS_ATTR_CNID);
+ hfs_dbg("ino %d\n", HFSPLUS_ATTR_CNID);
check_attr_tree_state_again:
switch (atomic_read(&sbi->attr_tree_state)) {
@@ -172,7 +172,11 @@ check_attr_tree_state_again:
return PTR_ERR(attr_file);
}
- BUG_ON(i_size_read(attr_file) != 0);
+ if (i_size_read(attr_file) != 0) {
+ err = -EIO;
+ pr_err("detected inconsistent attributes file, running fsck.hfsplus is recommended.\n");
+ goto end_attr_file_creation;
+ }
hip = HFSPLUS_I(attr_file);
@@ -257,14 +261,12 @@ end_attr_file_creation:
int __hfsplus_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
- int err = 0;
+ int err;
struct hfs_find_data cat_fd;
hfsplus_cat_entry entry;
u16 cat_entry_flags, cat_entry_type;
- u16 folder_finderinfo_len = sizeof(struct DInfo) +
- sizeof(struct DXInfo);
- u16 file_finderinfo_len = sizeof(struct FInfo) +
- sizeof(struct FXInfo);
+ u16 folder_finderinfo_len = sizeof(DInfo) + sizeof(DXInfo);
+ u16 file_finderinfo_len = sizeof(FInfo) + sizeof(FXInfo);
if ((!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) ||
@@ -296,7 +298,7 @@ int __hfsplus_setxattr(struct inode *inode, const char *name,
sizeof(hfsplus_cat_entry));
if (be16_to_cpu(entry.type) == HFSPLUS_FOLDER) {
if (size == folder_finderinfo_len) {
- memcpy(&entry.folder.user_info, value,
+ memcpy(&entry.folder.info, value,
folder_finderinfo_len);
hfs_bnode_write(cat_fd.bnode, &entry,
cat_fd.entryoffset,
@@ -309,7 +311,7 @@ int __hfsplus_setxattr(struct inode *inode, const char *name,
}
} else if (be16_to_cpu(entry.type) == HFSPLUS_FILE) {
if (size == file_finderinfo_len) {
- memcpy(&entry.file.user_info, value,
+ memcpy(&entry.file.info, value,
file_finderinfo_len);
hfs_bnode_write(cat_fd.bnode, &entry,
cat_fd.entryoffset,
@@ -400,21 +402,19 @@ static int name_len(const char *xattr_name, int xattr_name_len)
return len;
}
-static int copy_name(char *buffer, const char *xattr_name, int name_len)
+static ssize_t copy_name(char *buffer, const char *xattr_name, int name_len)
{
- int len = name_len;
- int offset = 0;
-
- if (!is_known_namespace(xattr_name)) {
- memcpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN);
- offset += XATTR_MAC_OSX_PREFIX_LEN;
- len += XATTR_MAC_OSX_PREFIX_LEN;
- }
+ ssize_t len;
- strncpy(buffer + offset, xattr_name, name_len);
- memset(buffer + offset + name_len, 0, 1);
- len += 1;
+ if (!is_known_namespace(xattr_name))
+ len = scnprintf(buffer, name_len + XATTR_MAC_OSX_PREFIX_LEN,
+ "%s%s", XATTR_MAC_OSX_PREFIX, xattr_name);
+ else
+ len = strscpy(buffer, xattr_name, name_len + 1);
+ /* include NUL-byte in length for non-empty name */
+ if (len >= 0)
+ len++;
return len;
}
@@ -442,11 +442,11 @@ static ssize_t hfsplus_getxattr_finder_info(struct inode *inode,
ssize_t res = 0;
struct hfs_find_data fd;
u16 entry_type;
- u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo);
- u16 file_rec_len = sizeof(struct FInfo) + sizeof(struct FXInfo);
+ u16 folder_rec_len = sizeof(DInfo) + sizeof(DXInfo);
+ u16 file_rec_len = sizeof(FInfo) + sizeof(FXInfo);
u16 record_len = max(folder_rec_len, file_rec_len);
- u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
- u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
+ u8 folder_finder_info[sizeof(DInfo) + sizeof(DXInfo)];
+ u8 file_finder_info[sizeof(FInfo) + sizeof(FXInfo)];
if (size >= record_len) {
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
@@ -494,7 +494,7 @@ ssize_t __hfsplus_getxattr(struct inode *inode, const char *name,
__be32 xattr_record_type;
u32 record_type;
u16 record_length = 0;
- ssize_t res = 0;
+ ssize_t res;
if ((!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) ||
@@ -606,12 +606,12 @@ static inline int can_list(const char *xattr_name)
static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry,
char *buffer, size_t size)
{
- ssize_t res = 0;
+ ssize_t res;
struct inode *inode = d_inode(dentry);
struct hfs_find_data fd;
u16 entry_type;
- u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
- u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
+ u8 folder_finder_info[sizeof(DInfo) + sizeof(DXInfo)];
+ u8 file_finder_info[sizeof(FInfo) + sizeof(FXInfo)];
unsigned long len, found_bit;
int xattr_name_len, symbols_count;
@@ -627,14 +627,14 @@ static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry,
entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
if (entry_type == HFSPLUS_FOLDER) {
- len = sizeof(struct DInfo) + sizeof(struct DXInfo);
+ len = sizeof(DInfo) + sizeof(DXInfo);
hfs_bnode_read(fd.bnode, folder_finder_info,
fd.entryoffset +
offsetof(struct hfsplus_cat_folder, user_info),
len);
found_bit = find_first_bit((void *)folder_finder_info, len*8);
} else if (entry_type == HFSPLUS_FILE) {
- len = sizeof(struct FInfo) + sizeof(struct FXInfo);
+ len = sizeof(FInfo) + sizeof(FXInfo);
hfs_bnode_read(fd.bnode, file_finder_info,
fd.entryoffset +
offsetof(struct hfsplus_cat_file, user_info),
@@ -674,10 +674,9 @@ end_listxattr_finder_info:
ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
ssize_t err;
- ssize_t res = 0;
+ ssize_t res;
struct inode *inode = d_inode(dentry);
struct hfs_find_data fd;
- u16 key_len = 0;
struct hfsplus_attr_key attr_key;
char *strbuf;
int xattr_name_len;
@@ -699,7 +698,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
return err;
}
- strbuf = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
+ strbuf = kzalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
if (!strbuf) {
res = -ENOMEM;
@@ -719,7 +718,8 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
}
for (;;) {
- key_len = hfs_bnode_read_u16(fd.bnode, fd.keyoffset);
+ u16 key_len = hfs_bnode_read_u16(fd.bnode, fd.keyoffset);
+
if (key_len == 0 || key_len > fd.tree->max_key_len) {
pr_err("invalid xattr key length: %d\n", key_len);
res = -EIO;
@@ -733,9 +733,9 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
goto end_listxattr;
xattr_name_len = NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN;
- if (hfsplus_uni2asc(inode->i_sb,
- (const struct hfsplus_unistr *)&fd.key->attr.key_name,
- strbuf, &xattr_name_len)) {
+ if (hfsplus_uni2asc_xattr_str(inode->i_sb,
+ &fd.key->attr.key_name, strbuf,
+ &xattr_name_len)) {
pr_err("unicode conversion failed\n");
res = -EIO;
goto end_listxattr;
@@ -766,12 +766,12 @@ out:
static int hfsplus_removexattr(struct inode *inode, const char *name)
{
- int err = 0;
+ int err;
struct hfs_find_data cat_fd;
u16 flags;
u16 cat_entry_type;
- int is_xattr_acl_deleted = 0;
- int is_all_xattrs_deleted = 0;
+ int is_xattr_acl_deleted;
+ int is_all_xattrs_deleted;
if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
return -EOPNOTSUPP;
@@ -857,7 +857,7 @@ static int hfsplus_osx_getxattr(const struct xattr_handler *handler,
}
static int hfsplus_osx_setxattr(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
index d14e362b3eba..15cc55e41410 100644
--- a/fs/hfsplus/xattr.h
+++ b/fs/hfsplus/xattr.h
@@ -17,7 +17,7 @@ extern const struct xattr_handler hfsplus_xattr_user_handler;
extern const struct xattr_handler hfsplus_xattr_trusted_handler;
extern const struct xattr_handler hfsplus_xattr_security_handler;
-extern const struct xattr_handler *hfsplus_xattr_handlers[];
+extern const struct xattr_handler * const hfsplus_xattr_handlers[];
int __hfsplus_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags);
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
index c1c7a16cbf21..90f68ec119cd 100644
--- a/fs/hfsplus/xattr_security.c
+++ b/fs/hfsplus/xattr_security.c
@@ -23,7 +23,7 @@ static int hfsplus_security_getxattr(const struct xattr_handler *handler,
}
static int hfsplus_security_setxattr(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c
index e150372ec564..fdbaebc1c49a 100644
--- a/fs/hfsplus/xattr_trusted.c
+++ b/fs/hfsplus/xattr_trusted.c
@@ -22,7 +22,7 @@ static int hfsplus_trusted_getxattr(const struct xattr_handler *handler,
}
static int hfsplus_trusted_setxattr(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c
index a6b60b153916..6464b6c3d58d 100644
--- a/fs/hfsplus/xattr_user.c
+++ b/fs/hfsplus/xattr_user.c
@@ -22,7 +22,7 @@ static int hfsplus_user_getxattr(const struct xattr_handler *handler,
}
static int hfsplus_user_setxattr(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/hostfs/Makefile b/fs/hostfs/Makefile
index d5beaffad43b..16be592e8085 100644
--- a/fs/hostfs/Makefile
+++ b/fs/hostfs/Makefile
@@ -3,9 +3,11 @@
# Licensed under the GPL
#
-hostfs-objs := hostfs_kern.o hostfs_user.o
+hostfs-objs := hostfs_kern.o
-obj-y :=
+hostfs-builtin-$(CONFIG_HOSTFS) += hostfs_user.o hostfs_user_exp.o
+
+obj-y := $(hostfs-builtin-y) $(hostfs-builtin-m)
obj-$(CONFIG_HOSTFS) += hostfs.o
-include arch/um/scripts/Makefile.rules
+include $(srctree)/arch/um/scripts/Makefile.rules
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 69cb796f6270..aa02599b770f 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -3,40 +3,8 @@
#define __UM_FS_HOSTFS
#include <os.h>
+#include <generated/asm-offsets.h>
-/*
- * These are exactly the same definitions as in fs.h, but the names are
- * changed so that this file can be included in both kernel and user files.
- */
-
-#define HOSTFS_ATTR_MODE 1
-#define HOSTFS_ATTR_UID 2
-#define HOSTFS_ATTR_GID 4
-#define HOSTFS_ATTR_SIZE 8
-#define HOSTFS_ATTR_ATIME 16
-#define HOSTFS_ATTR_MTIME 32
-#define HOSTFS_ATTR_CTIME 64
-#define HOSTFS_ATTR_ATIME_SET 128
-#define HOSTFS_ATTR_MTIME_SET 256
-
-/* This one is unused by hostfs. */
-#define HOSTFS_ATTR_FORCE 512 /* Not a change, but a change it */
-#define HOSTFS_ATTR_ATTR_FLAG 1024
-
-/*
- * If you are very careful, you'll notice that these two are missing:
- *
- * #define ATTR_KILL_SUID 2048
- * #define ATTR_KILL_SGID 4096
- *
- * and this is because they were added in 2.5 development.
- * Actually, they are not needed by most ->setattr() methods - they are set by
- * callers of notify_change() to notify that the setuid/setgid bits must be
- * dropped.
- * notify_change() will delete those flags, make sure attr->ia_valid & ATTR_MODE
- * is on, and remove the appropriate bits from attr->ia_mode (attr is a
- * "struct iattr *"). -BlaisorBlade
- */
struct hostfs_timespec {
long long tv_sec;
long long tv_nsec;
@@ -60,11 +28,13 @@ struct hostfs_stat {
unsigned int uid;
unsigned int gid;
unsigned long long size;
- struct hostfs_timespec atime, mtime, ctime;
+ struct hostfs_timespec atime, mtime, ctime, btime;
unsigned int blksize;
unsigned long long blocks;
- unsigned int maj;
- unsigned int min;
+ struct {
+ unsigned int maj;
+ unsigned int min;
+ } rdev, dev;
};
extern int stat_file(const char *path, struct hostfs_stat *p, int fd);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index d5c9d886cd9f..51d26aa2b93e 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -14,17 +14,26 @@
#include <linux/statfs.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <linux/writeback.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/namei.h>
#include "hostfs.h"
#include <init.h>
#include <kern.h>
+struct hostfs_fs_info {
+ char *host_root_path;
+};
+
struct hostfs_inode_info {
int fd;
fmode_t mode;
struct inode vfs_inode;
struct mutex open_mutex;
+ dev_t dev;
+ struct hostfs_timespec btime;
};
static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
@@ -49,6 +58,7 @@ static int __init hostfs_args(char *options, int *add)
{
char *ptr;
+ *add = 0;
ptr = strchr(options, ',');
if (ptr != NULL)
*ptr++ = '\0';
@@ -86,30 +96,17 @@ __uml_setup("hostfs=", hostfs_args,
static char *__dentry_name(struct dentry *dentry, char *name)
{
char *p = dentry_path_raw(dentry, name, PATH_MAX);
- char *root;
- size_t len;
-
- root = dentry->d_sb->s_fs_info;
- len = strlen(root);
- if (IS_ERR(p)) {
- __putname(name);
- return NULL;
- }
+ struct hostfs_fs_info *fsi = dentry->d_sb->s_fs_info;
+ char *root = fsi->host_root_path;
+ size_t len = strlen(root);
- /*
- * This function relies on the fact that dentry_path_raw() will place
- * the path name at the end of the provided buffer.
- */
- BUG_ON(p + strlen(p) + 1 != name + PATH_MAX);
-
- strlcpy(name, root, PATH_MAX);
- if (len > p - name) {
+ if (IS_ERR(p) || len > p - name) {
__putname(name);
return NULL;
}
- if (p > name + len)
- strcpy(name + len, p);
+ memcpy(name, root, len);
+ memmove(name + len, p, name + PATH_MAX - p);
return name;
}
@@ -181,14 +178,6 @@ static char *follow_link(char *link)
return ERR_PTR(n);
}
-static struct inode *hostfs_iget(struct super_block *sb)
-{
- struct inode *inode = new_inode(sb);
- if (!inode)
- return ERR_PTR(-ENOMEM);
- return inode;
-}
-
static int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf)
{
/*
@@ -202,8 +191,10 @@ static int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf)
long long f_bavail;
long long f_files;
long long f_ffree;
+ struct hostfs_fs_info *fsi;
- err = do_statfs(dentry->d_sb->s_fs_info,
+ fsi = dentry->d_sb->s_fs_info;
+ err = do_statfs(fsi->host_root_path,
&sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files,
&f_ffree, &sf->f_fsid, sizeof(sf->f_fsid),
&sf->f_namelen);
@@ -222,11 +213,12 @@ static struct inode *hostfs_alloc_inode(struct super_block *sb)
{
struct hostfs_inode_info *hi;
- hi = kmem_cache_alloc(hostfs_inode_cache, GFP_KERNEL_ACCOUNT);
+ hi = alloc_inode_sb(sb, hostfs_inode_cache, GFP_KERNEL_ACCOUNT);
if (hi == NULL)
return NULL;
hi->fd = -1;
hi->mode = 0;
+ hi->dev = 0;
inode_init_once(&hi->vfs_inode);
mutex_init(&hi->open_mutex);
return &hi->vfs_inode;
@@ -239,6 +231,7 @@ static void hostfs_evict_inode(struct inode *inode)
if (HOSTFS_I(inode)->fd != -1) {
close_file(&HOSTFS_I(inode)->fd);
HOSTFS_I(inode)->fd = -1;
+ HOSTFS_I(inode)->dev = 0;
}
}
@@ -249,7 +242,11 @@ static void hostfs_free_inode(struct inode *inode)
static int hostfs_show_options(struct seq_file *seq, struct dentry *root)
{
- const char *root_path = root->d_sb->s_fs_info;
+ struct hostfs_fs_info *fsi;
+ const char *root_path;
+
+ fsi = root->d_sb->s_fs_info;
+ root_path = fsi->host_root_path;
size_t offset = strlen(root_ino) + 1;
if (strlen(root_path) > offset)
@@ -264,6 +261,7 @@ static int hostfs_show_options(struct seq_file *seq, struct dentry *root)
static const struct super_operations hostfs_sbops = {
.alloc_inode = hostfs_alloc_inode,
.free_inode = hostfs_free_inode,
+ .drop_inode = inode_just_drop,
.evict_inode = hostfs_evict_inode,
.statfs = hostfs_statfs,
.show_options = hostfs_show_options,
@@ -380,11 +378,11 @@ static int hostfs_fsync(struct file *file, loff_t start, loff_t end,
static const struct file_operations hostfs_file_fops = {
.llseek = generic_file_llseek,
- .splice_read = generic_file_splice_read,
+ .splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.open = hostfs_open,
.release = hostfs_file_release,
.fsync = hostfs_fsync,
@@ -398,96 +396,85 @@ static const struct file_operations hostfs_dir_fops = {
.fsync = hostfs_fsync,
};
-static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
+static int hostfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
- char *buffer;
- loff_t base = page_offset(page);
- int count = PAGE_SIZE;
- int end_index = inode->i_size >> PAGE_SHIFT;
- int err;
-
- if (page->index >= end_index)
- count = inode->i_size & (PAGE_SIZE-1);
-
- buffer = kmap(page);
-
- err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count);
- if (err != count) {
- ClearPageUptodate(page);
- goto out;
+ struct folio *folio = NULL;
+ loff_t i_size = i_size_read(inode);
+ int err = 0;
+
+ while ((folio = writeback_iter(mapping, wbc, folio, &err))) {
+ loff_t pos = folio_pos(folio);
+ size_t count = folio_size(folio);
+ char *buffer;
+ int ret;
+
+ if (count > i_size - pos)
+ count = i_size - pos;
+
+ buffer = kmap_local_folio(folio, 0);
+ ret = write_file(HOSTFS_I(inode)->fd, &pos, buffer, count);
+ kunmap_local(buffer);
+ folio_unlock(folio);
+ if (ret != count) {
+ err = ret < 0 ? ret : -EIO;
+ mapping_set_error(mapping, err);
+ }
}
- if (base > inode->i_size)
- inode->i_size = base;
-
- if (PageError(page))
- ClearPageError(page);
- err = 0;
-
- out:
- kunmap(page);
-
- unlock_page(page);
return err;
}
-static int hostfs_readpage(struct file *file, struct page *page)
+static int hostfs_read_folio(struct file *file, struct folio *folio)
{
char *buffer;
- loff_t start = page_offset(page);
+ loff_t start = folio_pos(folio);
int bytes_read, ret = 0;
- buffer = kmap(page);
+ buffer = kmap_local_folio(folio, 0);
bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
PAGE_SIZE);
- if (bytes_read < 0) {
- ClearPageUptodate(page);
- SetPageError(page);
+ if (bytes_read < 0)
ret = bytes_read;
- goto out;
- }
-
- memset(buffer + bytes_read, 0, PAGE_SIZE - bytes_read);
-
- ClearPageError(page);
- SetPageUptodate(page);
+ else
+ buffer = folio_zero_tail(folio, bytes_read, buffer + bytes_read);
+ kunmap_local(buffer);
- out:
- flush_dcache_page(page);
- kunmap(page);
- unlock_page(page);
+ folio_end_read(folio, ret == 0);
return ret;
}
-static int hostfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int hostfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
- *pagep = grab_cache_page_write_begin(mapping, index, flags);
- if (!*pagep)
- return -ENOMEM;
+ *foliop = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(*foliop))
+ return PTR_ERR(*foliop);
return 0;
}
-static int hostfs_write_end(struct file *file, struct address_space *mapping,
+static int hostfs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
void *buffer;
- unsigned from = pos & (PAGE_SIZE - 1);
+ size_t from = offset_in_folio(folio, pos);
int err;
- buffer = kmap(page);
- err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied);
- kunmap(page);
+ buffer = kmap_local_folio(folio, from);
+ err = write_file(FILE_HOSTFS_I(iocb->ki_filp)->fd, &pos, buffer, copied);
+ kunmap_local(buffer);
- if (!PageUptodate(page) && err == PAGE_SIZE)
- SetPageUptodate(page);
+ if (!folio_test_uptodate(folio) && err == folio_size(folio))
+ folio_mark_uptodate(folio);
/*
* If err > 0, write_file has added err to pos, so we are comparing
@@ -495,32 +482,50 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
*/
if (err > 0 && (pos > inode->i_size))
inode->i_size = pos;
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return err;
}
static const struct address_space_operations hostfs_aops = {
- .writepage = hostfs_writepage,
- .readpage = hostfs_readpage,
- .set_page_dirty = __set_page_dirty_nobuffers,
+ .writepages = hostfs_writepages,
+ .read_folio = hostfs_read_folio,
+ .dirty_folio = filemap_dirty_folio,
.write_begin = hostfs_write_begin,
.write_end = hostfs_write_end,
+ .migrate_folio = filemap_migrate_folio,
};
-static int read_name(struct inode *ino, char *name)
+static int hostfs_inode_update(struct inode *ino, const struct hostfs_stat *st)
{
- dev_t rdev;
- struct hostfs_stat st;
- int err = stat_file(name, &st, -1);
- if (err)
- return err;
+ set_nlink(ino, st->nlink);
+ i_uid_write(ino, st->uid);
+ i_gid_write(ino, st->gid);
+ inode_set_atime_to_ts(ino, (struct timespec64){
+ st->atime.tv_sec,
+ st->atime.tv_nsec,
+ });
+ inode_set_mtime_to_ts(ino, (struct timespec64){
+ st->mtime.tv_sec,
+ st->mtime.tv_nsec,
+ });
+ inode_set_ctime(ino, st->ctime.tv_sec, st->ctime.tv_nsec);
+ ino->i_size = st->size;
+ ino->i_blocks = st->blocks;
+ return 0;
+}
+
+static int hostfs_inode_set(struct inode *ino, void *data)
+{
+ struct hostfs_stat *st = data;
+ dev_t dev, rdev;
/* Reencode maj and min with the kernel encoding.*/
- rdev = MKDEV(st.maj, st.min);
+ rdev = MKDEV(st->rdev.maj, st->rdev.min);
+ dev = MKDEV(st->dev.maj, st->dev.min);
- switch (st.mode & S_IFMT) {
+ switch (st->mode & S_IFMT) {
case S_IFLNK:
ino->i_op = &hostfs_link_iops;
break;
@@ -532,7 +537,7 @@ static int read_name(struct inode *ino, char *name)
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
- init_special_inode(ino, st.mode & S_IFMT, rdev);
+ init_special_inode(ino, st->mode & S_IFMT, rdev);
ino->i_op = &hostfs_iops;
break;
case S_IFREG:
@@ -544,80 +549,92 @@ static int read_name(struct inode *ino, char *name)
return -EIO;
}
- ino->i_ino = st.ino;
- ino->i_mode = st.mode;
- set_nlink(ino, st.nlink);
- i_uid_write(ino, st.uid);
- i_gid_write(ino, st.gid);
- ino->i_atime = (struct timespec64){ st.atime.tv_sec, st.atime.tv_nsec };
- ino->i_mtime = (struct timespec64){ st.mtime.tv_sec, st.mtime.tv_nsec };
- ino->i_ctime = (struct timespec64){ st.ctime.tv_sec, st.ctime.tv_nsec };
- ino->i_size = st.size;
- ino->i_blocks = st.blocks;
- return 0;
+ HOSTFS_I(ino)->dev = dev;
+ HOSTFS_I(ino)->btime = st->btime;
+ ino->i_ino = st->ino;
+ ino->i_mode = st->mode;
+ return hostfs_inode_update(ino, st);
}
-static int hostfs_create(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode, bool excl)
+static int hostfs_inode_test(struct inode *inode, void *data)
+{
+ const struct hostfs_stat *st = data;
+ dev_t dev = MKDEV(st->dev.maj, st->dev.min);
+
+ return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev &&
+ (inode->i_mode & S_IFMT) == (st->mode & S_IFMT) &&
+ HOSTFS_I(inode)->btime.tv_sec == st->btime.tv_sec &&
+ HOSTFS_I(inode)->btime.tv_nsec == st->btime.tv_nsec;
+}
+
+static struct inode *hostfs_iget(struct super_block *sb, char *name)
{
struct inode *inode;
- char *name;
- int error, fd;
+ struct hostfs_stat st;
+ int err = stat_file(name, &st, -1);
- inode = hostfs_iget(dir->i_sb);
- if (IS_ERR(inode)) {
- error = PTR_ERR(inode);
- goto out;
+ if (err)
+ return ERR_PTR(err);
+
+ inode = iget5_locked(sb, st.ino, hostfs_inode_test, hostfs_inode_set,
+ &st);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (inode_state_read_once(inode) & I_NEW) {
+ unlock_new_inode(inode);
+ } else {
+ spin_lock(&inode->i_lock);
+ hostfs_inode_update(inode, &st);
+ spin_unlock(&inode->i_lock);
}
- error = -ENOMEM;
+ return inode;
+}
+
+static int hostfs_create(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
+{
+ struct inode *inode;
+ char *name;
+ int fd;
+
name = dentry_name(dentry);
if (name == NULL)
- goto out_put;
+ return -ENOMEM;
fd = file_create(name, mode & 0777);
- if (fd < 0)
- error = fd;
- else
- error = read_name(inode, name);
+ if (fd < 0) {
+ __putname(name);
+ return fd;
+ }
+ inode = hostfs_iget(dir->i_sb, name);
__putname(name);
- if (error)
- goto out_put;
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
HOSTFS_I(inode)->fd = fd;
HOSTFS_I(inode)->mode = FMODE_READ | FMODE_WRITE;
d_instantiate(dentry, inode);
return 0;
-
- out_put:
- iput(inode);
- out:
- return error;
}
static struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
unsigned int flags)
{
- struct inode *inode;
+ struct inode *inode = NULL;
char *name;
- int err;
- inode = hostfs_iget(ino->i_sb);
- if (IS_ERR(inode))
- goto out;
-
- err = -ENOMEM;
name = dentry_name(dentry);
- if (name) {
- err = read_name(inode, name);
- __putname(name);
- }
- if (err) {
- iput(inode);
- inode = (err == -ENOENT) ? NULL : ERR_PTR(err);
- }
- out:
+ if (name == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ inode = hostfs_iget(ino->i_sb, name);
+ __putname(name);
+ if (inode == ERR_PTR(-ENOENT))
+ inode = NULL;
+
return d_splice_alias(inode, dentry);
}
@@ -656,7 +673,7 @@ static int hostfs_unlink(struct inode *ino, struct dentry *dentry)
return err;
}
-static int hostfs_symlink(struct user_namespace *mnt_userns, struct inode *ino,
+static int hostfs_symlink(struct mnt_idmap *idmap, struct inode *ino,
struct dentry *dentry, const char *to)
{
char *file;
@@ -669,17 +686,25 @@ static int hostfs_symlink(struct user_namespace *mnt_userns, struct inode *ino,
return err;
}
-static int hostfs_mkdir(struct user_namespace *mnt_userns, struct inode *ino,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hostfs_mkdir(struct mnt_idmap *idmap, struct inode *ino,
+ struct dentry *dentry, umode_t mode)
{
+ struct inode *inode;
char *file;
int err;
if ((file = dentry_name(dentry)) == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
err = do_mkdir(file, mode);
+ if (err) {
+ dentry = ERR_PTR(err);
+ } else {
+ inode = hostfs_iget(dentry->d_sb, file);
+ d_drop(dentry);
+ dentry = d_splice_alias(inode, dentry);
+ }
__putname(file);
- return err;
+ return dentry;
}
static int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
@@ -694,45 +719,33 @@ static int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
return err;
}
-static int hostfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+static int hostfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode *inode;
char *name;
int err;
- inode = hostfs_iget(dir->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto out;
- }
-
- err = -ENOMEM;
name = dentry_name(dentry);
if (name == NULL)
- goto out_put;
+ return -ENOMEM;
err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
- if (err)
- goto out_free;
+ if (err) {
+ __putname(name);
+ return err;
+ }
- err = read_name(inode, name);
+ inode = hostfs_iget(dir->i_sb, name);
__putname(name);
- if (err)
- goto out_put;
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
d_instantiate(dentry, inode);
return 0;
-
- out_free:
- __putname(name);
- out_put:
- iput(inode);
- out:
- return err;
}
-static int hostfs_rename2(struct user_namespace *mnt_userns,
+static int hostfs_rename2(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
@@ -761,7 +774,7 @@ static int hostfs_rename2(struct user_namespace *mnt_userns,
return err;
}
-static int hostfs_permission(struct user_namespace *mnt_userns,
+static int hostfs_permission(struct mnt_idmap *idmap,
struct inode *ino, int desired)
{
char *name;
@@ -784,11 +797,11 @@ static int hostfs_permission(struct user_namespace *mnt_userns,
err = access_file(name, r, w, x);
__putname(name);
if (!err)
- err = generic_permission(&init_user_ns, ino, desired);
+ err = generic_permission(&nop_mnt_idmap, ino, desired);
return err;
}
-static int hostfs_setattr(struct user_namespace *mnt_userns,
+static int hostfs_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
@@ -798,7 +811,7 @@ static int hostfs_setattr(struct user_namespace *mnt_userns,
int fd = HOSTFS_I(inode)->fd;
- err = setattr_prepare(&init_user_ns, dentry, attr);
+ err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (err)
return err;
@@ -855,7 +868,7 @@ static int hostfs_setattr(struct user_namespace *mnt_userns,
attr->ia_size != i_size_read(inode))
truncate_setsize(inode, attr->ia_size);
- setattr_copy(&init_user_ns, inode, attr);
+ setattr_copy(&nop_mnt_idmap, inode, attr);
mark_inode_dirty(inode);
return 0;
}
@@ -912,67 +925,140 @@ static const struct inode_operations hostfs_link_iops = {
.get_link = hostfs_get_link,
};
-static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
+static int hostfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct hostfs_fs_info *fsi = sb->s_fs_info;
struct inode *root_inode;
- char *host_root_path, *req_root = d;
int err;
sb->s_blocksize = 1024;
sb->s_blocksize_bits = 10;
sb->s_magic = HOSTFS_SUPER_MAGIC;
sb->s_op = &hostfs_sbops;
- sb->s_d_op = &simple_dentry_operations;
+ sb->s_d_flags = DCACHE_DONTCACHE;
sb->s_maxbytes = MAX_LFS_FILESIZE;
+ err = super_setup_bdi(sb);
+ if (err)
+ return err;
- /* NULL is printed as '(null)' by printf(): avoid that. */
- if (req_root == NULL)
- req_root = "";
+ root_inode = hostfs_iget(sb, fsi->host_root_path);
+ if (IS_ERR(root_inode))
+ return PTR_ERR(root_inode);
- err = -ENOMEM;
- sb->s_fs_info = host_root_path =
- kasprintf(GFP_KERNEL, "%s/%s", root_ino, req_root);
- if (host_root_path == NULL)
- goto out;
+ if (S_ISLNK(root_inode->i_mode)) {
+ char *name;
- root_inode = new_inode(sb);
- if (!root_inode)
- goto out;
+ iput(root_inode);
+ name = follow_link(fsi->host_root_path);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
- err = read_name(root_inode, host_root_path);
- if (err)
- goto out_put;
-
- if (S_ISLNK(root_inode->i_mode)) {
- char *name = follow_link(host_root_path);
- if (IS_ERR(name)) {
- err = PTR_ERR(name);
- goto out_put;
- }
- err = read_name(root_inode, name);
+ root_inode = hostfs_iget(sb, name);
kfree(name);
- if (err)
- goto out_put;
+ if (IS_ERR(root_inode))
+ return PTR_ERR(root_inode);
}
- err = -ENOMEM;
sb->s_root = d_make_root(root_inode);
if (sb->s_root == NULL)
- goto out;
+ return -ENOMEM;
return 0;
+}
-out_put:
- iput(root_inode);
-out:
- return err;
+enum hostfs_parma {
+ Opt_hostfs,
+};
+
+static const struct fs_parameter_spec hostfs_param_specs[] = {
+ fsparam_string_empty("hostfs", Opt_hostfs),
+ {}
+};
+
+static int hostfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct hostfs_fs_info *fsi = fc->s_fs_info;
+ struct fs_parse_result result;
+ char *host_root, *tmp_root;
+ int opt;
+
+ opt = fs_parse(fc, hostfs_param_specs, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_hostfs:
+ host_root = param->string;
+ if (!*host_root)
+ break;
+ tmp_root = kasprintf(GFP_KERNEL, "%s%s",
+ fsi->host_root_path, host_root);
+ if (!tmp_root)
+ return -ENOMEM;
+ kfree(fsi->host_root_path);
+ fsi->host_root_path = tmp_root;
+ break;
+ }
+
+ return 0;
}
-static struct dentry *hostfs_read_sb(struct file_system_type *type,
- int flags, const char *dev_name,
- void *data)
+static int hostfs_parse_monolithic(struct fs_context *fc, void *data)
{
- return mount_nodev(type, flags, data, hostfs_fill_sb_common);
+ struct hostfs_fs_info *fsi = fc->s_fs_info;
+ char *tmp_root, *host_root = (char *)data;
+
+ /* NULL is printed as '(null)' by printf(): avoid that. */
+ if (host_root == NULL)
+ return 0;
+
+ tmp_root = kasprintf(GFP_KERNEL, "%s%s", fsi->host_root_path, host_root);
+ if (!tmp_root)
+ return -ENOMEM;
+ kfree(fsi->host_root_path);
+ fsi->host_root_path = tmp_root;
+ return 0;
+}
+
+static int hostfs_fc_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, hostfs_fill_super);
+}
+
+static void hostfs_fc_free(struct fs_context *fc)
+{
+ struct hostfs_fs_info *fsi = fc->s_fs_info;
+
+ if (!fsi)
+ return;
+
+ kfree(fsi->host_root_path);
+ kfree(fsi);
+}
+
+static const struct fs_context_operations hostfs_context_ops = {
+ .parse_monolithic = hostfs_parse_monolithic,
+ .parse_param = hostfs_parse_param,
+ .get_tree = hostfs_fc_get_tree,
+ .free = hostfs_fc_free,
+};
+
+static int hostfs_init_fs_context(struct fs_context *fc)
+{
+ struct hostfs_fs_info *fsi;
+
+ fsi = kzalloc(sizeof(*fsi), GFP_KERNEL);
+ if (!fsi)
+ return -ENOMEM;
+
+ fsi->host_root_path = kasprintf(GFP_KERNEL, "%s/", root_ino);
+ if (!fsi->host_root_path) {
+ kfree(fsi);
+ return -ENOMEM;
+ }
+ fc->s_fs_info = fsi;
+ fc->ops = &hostfs_context_ops;
+ return 0;
}
static void hostfs_kill_sb(struct super_block *s)
@@ -982,11 +1068,11 @@ static void hostfs_kill_sb(struct super_block *s)
}
static struct file_system_type hostfs_type = {
- .owner = THIS_MODULE,
- .name = "hostfs",
- .mount = hostfs_read_sb,
- .kill_sb = hostfs_kill_sb,
- .fs_flags = 0,
+ .owner = THIS_MODULE,
+ .name = "hostfs",
+ .init_fs_context = hostfs_init_fs_context,
+ .kill_sb = hostfs_kill_sb,
+ .fs_flags = 0,
};
MODULE_ALIAS_FS("hostfs");
@@ -1006,4 +1092,5 @@ static void __exit exit_hostfs(void)
module_init(init_hostfs)
module_exit(exit_hostfs)
+MODULE_DESCRIPTION("User-Mode Linux Host filesystem");
MODULE_LICENSE("GPL");
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index 5ecc4706172b..3bcd9f35e70b 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -18,37 +18,48 @@
#include "hostfs.h"
#include <utime.h>
-static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
+static void statx_to_hostfs(const struct statx *buf, struct hostfs_stat *p)
{
- p->ino = buf->st_ino;
- p->mode = buf->st_mode;
- p->nlink = buf->st_nlink;
- p->uid = buf->st_uid;
- p->gid = buf->st_gid;
- p->size = buf->st_size;
- p->atime.tv_sec = buf->st_atime;
- p->atime.tv_nsec = 0;
- p->ctime.tv_sec = buf->st_ctime;
- p->ctime.tv_nsec = 0;
- p->mtime.tv_sec = buf->st_mtime;
- p->mtime.tv_nsec = 0;
- p->blksize = buf->st_blksize;
- p->blocks = buf->st_blocks;
- p->maj = os_major(buf->st_rdev);
- p->min = os_minor(buf->st_rdev);
+ p->ino = buf->stx_ino;
+ p->mode = buf->stx_mode;
+ p->nlink = buf->stx_nlink;
+ p->uid = buf->stx_uid;
+ p->gid = buf->stx_gid;
+ p->size = buf->stx_size;
+ p->atime.tv_sec = buf->stx_atime.tv_sec;
+ p->atime.tv_nsec = buf->stx_atime.tv_nsec;
+ p->ctime.tv_sec = buf->stx_ctime.tv_sec;
+ p->ctime.tv_nsec = buf->stx_ctime.tv_nsec;
+ p->mtime.tv_sec = buf->stx_mtime.tv_sec;
+ p->mtime.tv_nsec = buf->stx_mtime.tv_nsec;
+ if (buf->stx_mask & STATX_BTIME) {
+ p->btime.tv_sec = buf->stx_btime.tv_sec;
+ p->btime.tv_nsec = buf->stx_btime.tv_nsec;
+ } else {
+ memset(&p->btime, 0, sizeof(p->btime));
+ }
+ p->blksize = buf->stx_blksize;
+ p->blocks = buf->stx_blocks;
+ p->rdev.maj = buf->stx_rdev_major;
+ p->rdev.min = buf->stx_rdev_minor;
+ p->dev.maj = buf->stx_dev_major;
+ p->dev.min = buf->stx_dev_minor;
}
int stat_file(const char *path, struct hostfs_stat *p, int fd)
{
- struct stat64 buf;
+ struct statx buf;
+ int flags = AT_SYMLINK_NOFOLLOW;
if (fd >= 0) {
- if (fstat64(fd, &buf) < 0)
- return -errno;
- } else if (lstat64(path, &buf) < 0) {
- return -errno;
+ flags |= AT_EMPTY_PATH;
+ path = "";
}
- stat64_to_hostfs(&buf, p);
+
+ if ((statx(fd, path, flags, STATX_BASIC_STATS | STATX_BTIME, &buf)) < 0)
+ return -errno;
+
+ statx_to_hostfs(&buf, p);
return 0;
}
diff --git a/fs/hostfs/hostfs_user_exp.c b/fs/hostfs/hostfs_user_exp.c
new file mode 100644
index 000000000000..250c91c55c46
--- /dev/null
+++ b/fs/hostfs/hostfs_user_exp.c
@@ -0,0 +1,28 @@
+#include <linux/module.h>
+#include "hostfs.h"
+
+EXPORT_SYMBOL_GPL(stat_file);
+EXPORT_SYMBOL_GPL(access_file);
+EXPORT_SYMBOL_GPL(open_file);
+EXPORT_SYMBOL_GPL(open_dir);
+EXPORT_SYMBOL_GPL(seek_dir);
+EXPORT_SYMBOL_GPL(read_dir);
+EXPORT_SYMBOL_GPL(read_file);
+EXPORT_SYMBOL_GPL(write_file);
+EXPORT_SYMBOL_GPL(lseek_file);
+EXPORT_SYMBOL_GPL(fsync_file);
+EXPORT_SYMBOL_GPL(replace_file);
+EXPORT_SYMBOL_GPL(close_file);
+EXPORT_SYMBOL_GPL(close_dir);
+EXPORT_SYMBOL_GPL(file_create);
+EXPORT_SYMBOL_GPL(set_attr);
+EXPORT_SYMBOL_GPL(make_symlink);
+EXPORT_SYMBOL_GPL(unlink_file);
+EXPORT_SYMBOL_GPL(do_mkdir);
+EXPORT_SYMBOL_GPL(hostfs_do_rmdir);
+EXPORT_SYMBOL_GPL(do_mknod);
+EXPORT_SYMBOL_GPL(link_file);
+EXPORT_SYMBOL_GPL(hostfs_do_readlink);
+EXPORT_SYMBOL_GPL(rename_file);
+EXPORT_SYMBOL_GPL(rename2_file);
+EXPORT_SYMBOL_GPL(do_statfs);
diff --git a/fs/hpfs/Kconfig b/fs/hpfs/Kconfig
index ec975f466877..ac1e9318e65a 100644
--- a/fs/hpfs/Kconfig
+++ b/fs/hpfs/Kconfig
@@ -2,6 +2,7 @@
config HPFS_FS
tristate "OS/2 HPFS file system support"
depends on BLOCK
+ select BUFFER_HEAD
select FS_IOMAP
help
OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c
index c14c9a035ee0..a4f5321eafae 100644
--- a/fs/hpfs/anode.c
+++ b/fs/hpfs/anode.c
@@ -27,7 +27,7 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
a = le32_to_cpu(btree->u.internal[i].down);
brelse(bh);
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
goto go_down;
}
hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a);
@@ -69,12 +69,13 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
int n;
unsigned fs;
int c1, c2 = 0;
+
if (fnod) {
if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
- btree = &fnode->btree;
+ btree = GET_BTREE_PTR(&fnode->btree);
} else {
if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
}
a = node;
go_down:
@@ -91,7 +92,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
if (hpfs_sb(s)->sb_chk)
if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1;
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
goto go_down;
}
if (n >= 0) {
@@ -151,7 +152,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
}
brelse(bh);
bh = bh1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
}
btree->n_free_nodes--; n = btree->n_used_nodes++;
le16_add_cpu(&btree->first_free, 12);
@@ -168,10 +169,10 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
if (up != node || !fnod) {
if (!(anode = hpfs_map_anode(s, up, &bh))) return -1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
} else {
if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1;
- btree = &fnode->btree;
+ btree = GET_BTREE_PTR(&fnode->btree);
}
if (btree->n_free_nodes) {
btree->n_free_nodes--; n = btree->n_used_nodes++;
@@ -206,8 +207,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
anode->btree.n_used_nodes = 1;
anode->btree.n_free_nodes = 59;
anode->btree.first_free = cpu_to_le16(16);
- anode->btree.u.internal[0].down = cpu_to_le32(a);
- anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
+ GET_BTREE_PTR(&anode->btree)->u.internal[0].down = cpu_to_le32(a);
+ GET_BTREE_PTR(&anode->btree)->u.internal[0].file_secno = cpu_to_le32(-1);
mark_buffer_dirty(bh);
brelse(bh);
if ((anode = hpfs_map_anode(s, a, &bh))) {
@@ -229,20 +230,20 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
brelse(bh2);
return -1;
}
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
} else {
if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
brelse(bh2);
return -1;
}
- btree = &fnode->btree;
+ btree = GET_BTREE_PTR(&fnode->btree);
}
ranode->up = cpu_to_le32(node);
memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
if (fnod)
ranode->btree.flags |= BP_fnode_parent;
- ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
- if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
+ GET_BTREE_PTR(&ranode->btree)->n_free_nodes = (bp_internal(GET_BTREE_PTR(&ranode->btree)) ? 60 : 40) - GET_BTREE_PTR(&ranode->btree)->n_used_nodes;
+ if (bp_internal(GET_BTREE_PTR(&ranode->btree))) for (n = 0; n < GET_BTREE_PTR(&ranode->btree)->n_used_nodes; n++) {
struct anode *unode;
if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
unode->up = cpu_to_le32(ra);
@@ -291,7 +292,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
return;
if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
- btree1 = &anode->btree;
+ btree1 = GET_BTREE_PTR(&anode->btree);
level++;
pos = 0;
}
@@ -307,7 +308,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
ano = le32_to_cpu(anode->up);
if (--level) {
if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
- btree1 = &anode->btree;
+ btree1 = GET_BTREE_PTR(&anode->btree);
} else btree1 = btree;
for (i = 0; i < btree1->n_used_nodes; i++) {
if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
@@ -332,7 +333,7 @@ static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec)
struct anode *anode;
struct buffer_head *bh;
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
- return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh);
+ return hpfs_bplus_lookup(s, NULL, GET_BTREE_PTR(&anode->btree), sec, bh);
}
int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
@@ -388,7 +389,7 @@ void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len)
struct buffer_head *bh;
if (ano) {
if (!(anode = hpfs_map_anode(s, a, &bh))) return;
- hpfs_remove_btree(s, &anode->btree);
+ hpfs_remove_btree(s, GET_BTREE_PTR(&anode->btree));
brelse(bh);
hpfs_free_sectors(s, a, 1);
} else hpfs_free_sectors(s, a, (len + 511) >> 9);
@@ -407,10 +408,10 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
int c1, c2 = 0;
if (fno) {
if (!(fnode = hpfs_map_fnode(s, f, &bh))) return;
- btree = &fnode->btree;
+ btree = GET_BTREE_PTR(&fnode->btree);
} else {
if (!(anode = hpfs_map_anode(s, f, &bh))) return;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
}
if (!secs) {
hpfs_remove_btree(s, btree);
@@ -448,7 +449,7 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
return;
if (!(anode = hpfs_map_anode(s, node, &bh))) return;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
}
nodes = btree->n_used_nodes + btree->n_free_nodes;
for (i = 0; i < btree->n_used_nodes; i++)
@@ -485,7 +486,7 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
struct extended_attribute *ea;
struct extended_attribute *ea_end;
if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
- if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
+ if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, GET_BTREE_PTR(&fnode->btree));
else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
ea_end = fnode_end_ea(fnode);
for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index f32f15669996..ceb50b2dc91a 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -247,7 +247,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
result = ERR_PTR(-ENOMEM);
goto bail1;
}
- if (result->i_state & I_NEW) {
+ if (inode_state_read_once(result) & I_NEW) {
hpfs_init_inode(result);
if (de->directory)
hpfs_read_inode(result);
@@ -277,14 +277,16 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
* inode.
*/
- if (!result->i_ctime.tv_sec) {
- if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date))))
- result->i_ctime.tv_sec = 1;
- result->i_ctime.tv_nsec = 0;
- result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date));
- result->i_mtime.tv_nsec = 0;
- result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date));
- result->i_atime.tv_nsec = 0;
+ if (!inode_get_ctime_sec(result)) {
+ time64_t csec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date));
+
+ inode_set_ctime(result, csec ? csec : 1, 0);
+ inode_set_mtime(result,
+ local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date)),
+ 0);
+ inode_set_atime(result,
+ local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date)),
+ 0);
hpfs_result->i_ea_size = le32_to_cpu(de->ea_size);
if (!hpfs_result->i_ea_mode && de->read_only)
result->i_mode &= ~0222;
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
index 102ba18e561f..2149d3ca530b 100644
--- a/fs/hpfs/ea.c
+++ b/fs/hpfs/ea.c
@@ -41,7 +41,7 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len)
struct buffer_head *bh;
struct anode *anode;
if ((anode = hpfs_map_anode(s, a, &bh))) {
- hpfs_remove_btree(s, &anode->btree);
+ hpfs_remove_btree(s, GET_BTREE_PTR(&anode->btree));
brelse(bh);
hpfs_free_sectors(s, a, 1);
}
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index fb37f57130aa..29e876705369 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -51,7 +51,9 @@ static secno hpfs_bmap(struct inode *inode, unsigned file_secno, unsigned *n_sec
return hpfs_inode->i_disk_sec + n;
}
if (!(fnode = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) return 0;
- disk_secno = hpfs_bplus_lookup(inode->i_sb, inode, &fnode->btree, file_secno, bh);
+ disk_secno = hpfs_bplus_lookup(inode->i_sb, inode,
+ GET_BTREE_PTR(&fnode->btree),
+ file_secno, bh);
if (disk_secno == -1) return 0;
if (hpfs_chk_sectors(inode->i_sb, disk_secno, 1, "bmap")) return 0;
n = file_secno - hpfs_inode->i_file_sec;
@@ -158,14 +160,9 @@ static const struct iomap_ops hpfs_iomap_ops = {
.iomap_begin = hpfs_iomap_begin,
};
-static int hpfs_readpage(struct file *file, struct page *page)
+static int hpfs_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, hpfs_get_block);
-}
-
-static int hpfs_writepage(struct page *page, struct writeback_control *wbc)
-{
- return block_write_full_page(page, hpfs_get_block, wbc);
+ return mpage_read_folio(folio, hpfs_get_block);
}
static void hpfs_readahead(struct readahead_control *rac)
@@ -193,14 +190,14 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to)
hpfs_unlock(inode->i_sb);
}
-static int hpfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int hpfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret;
- *pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
hpfs_get_block,
&hpfs_i(mapping->host)->mmu_private);
if (unlikely(ret))
@@ -209,13 +206,14 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
-static int hpfs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *pagep, void *fsdata)
+static int hpfs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
int err;
- err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
+ err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
if (err < len)
hpfs_write_failed(mapping, pos + len);
if (!(err < 0)) {
@@ -245,14 +243,15 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
const struct address_space_operations hpfs_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = hpfs_readpage,
- .writepage = hpfs_writepage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = hpfs_read_folio,
.readahead = hpfs_readahead,
.writepages = hpfs_writepages,
.write_begin = hpfs_write_begin,
.write_end = hpfs_write_end,
- .bmap = _hpfs_bmap
+ .bmap = _hpfs_bmap,
+ .migrate_folio = buffer_migrate_folio,
};
const struct file_operations hpfs_file_ops =
@@ -260,10 +259,10 @@ const struct file_operations hpfs_file_ops =
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.release = hpfs_file_release,
.fsync = hpfs_file_fsync,
- .splice_read = generic_file_splice_read,
+ .splice_read = filemap_splice_read,
.unlocked_ioctl = hpfs_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
diff --git a/fs/hpfs/hpfs.h b/fs/hpfs/hpfs.h
index 281dec8f636b..353f73c914d9 100644
--- a/fs/hpfs/hpfs.h
+++ b/fs/hpfs/hpfs.h
@@ -394,27 +394,45 @@ enum {
BP_binary_search = 0x40,
BP_internal = 0x80
};
+
+/**
+ * GET_BTREE_PTR() - Get a pointer to struct bplus_header
+ *
+ * Wrapper around container_of() to retrieve a pointer to struct
+ * bplus_header from a pointer to struct bplus_header_fixed.
+ *
+ * @ptr: Pointer to struct bplus_header_fixed.
+ *
+ */
+#define GET_BTREE_PTR(ptr) \
+ container_of(ptr, struct bplus_header, __hdr)
+
struct bplus_header
{
- u8 flags; /* bit 0 - high bit of first free entry offset
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(bplus_header_fixed, __hdr,
+ u8 flags; /* bit 0 - high bit of first free entry offset
bit 5 - we're pointed to by an fnode,
the data btree or some ea or the
main ea bootage pointer ea_secno
bit 6 - suggest binary search (unused)
bit 7 - 1 -> (internal) tree of anodes
0 -> (leaf) list of extents */
- u8 fill[3];
- u8 n_free_nodes; /* free nodes in following array */
- u8 n_used_nodes; /* used nodes in following array */
- __le16 first_free; /* offset from start of header to
+ u8 fill[3];
+ u8 n_free_nodes; /* free nodes in following array */
+ u8 n_used_nodes; /* used nodes in following array */
+ __le16 first_free; /* offset from start of header to
first free node in array */
- union {
- /* (internal) 2-word entries giving subtree pointers */
- DECLARE_FLEX_ARRAY(struct bplus_internal_node, internal);
- /* (external) 3-word entries giving sector runs */
- DECLARE_FLEX_ARRAY(struct bplus_leaf_node, external);
- } u;
+ );
+ union {
+ /* (internal) 2-word entries giving subtree pointers */
+ DECLARE_FLEX_ARRAY(struct bplus_internal_node, internal);
+ /* (external) 3-word entries giving sector runs */
+ DECLARE_FLEX_ARRAY(struct bplus_leaf_node, external);
+ } u;
};
+static_assert(offsetof(struct bplus_header, u.internal) == sizeof(struct bplus_header_fixed),
+ "struct member likely outside of struct_group_tagged()");
static inline bool bp_internal(struct bplus_header *bp)
{
@@ -453,7 +471,7 @@ struct fnode
__le16 flags; /* bit 1 set -> ea_secno is an anode */
/* bit 8 set -> directory. first & only extent
points to dnode. */
- struct bplus_header btree; /* b+ tree, 8 extents or 12 subtrees */
+ struct bplus_header_fixed btree; /* b+ tree, 8 extents or 12 subtrees */
union {
struct bplus_leaf_node external[8];
struct bplus_internal_node internal[12];
@@ -495,7 +513,7 @@ struct anode
__le32 self; /* pointer to this anode */
__le32 up; /* parent anode or fnode */
- struct bplus_header btree; /* b+tree, 40 extents or 60 subtrees */
+ struct bplus_header_fixed btree; /* b+tree, 40 extents or 60 subtrees */
union {
struct bplus_leaf_node external[40];
struct bplus_internal_node internal[60];
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 167ec6884642..237c1c23e855 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/blkdev.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "hpfs.h"
@@ -280,7 +280,7 @@ void hpfs_init_inode(struct inode *);
void hpfs_read_inode(struct inode *);
void hpfs_write_inode(struct inode *);
void hpfs_write_inode_nolock(struct inode *);
-int hpfs_setattr(struct user_namespace *, struct dentry *, struct iattr *);
+int hpfs_setattr(struct mnt_idmap *, struct dentry *, struct iattr *);
void hpfs_write_if_changed(struct inode *);
void hpfs_evict_inode(struct inode *);
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 82208cc28ebd..93d528f4f4f2 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -36,9 +36,9 @@ void hpfs_init_inode(struct inode *i)
hpfs_inode->i_rddir_off = NULL;
hpfs_inode->i_dirty = 0;
- i->i_ctime.tv_sec = i->i_ctime.tv_nsec = 0;
- i->i_mtime.tv_sec = i->i_mtime.tv_nsec = 0;
- i->i_atime.tv_sec = i->i_atime.tv_nsec = 0;
+ inode_set_ctime(i, 0, 0);
+ inode_set_mtime(i, 0, 0);
+ inode_set_atime(i, 0, 0);
}
void hpfs_read_inode(struct inode *i)
@@ -184,7 +184,7 @@ void hpfs_write_inode(struct inode *i)
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
struct inode *parent;
if (i->i_ino == hpfs_sb(i->i_sb)->sb_root) return;
- if (hpfs_inode->i_rddir_off && !atomic_read(&i->i_count)) {
+ if (hpfs_inode->i_rddir_off && !icount_read(i)) {
if (*hpfs_inode->i_rddir_off)
pr_err("write_inode: some position still there\n");
kfree(hpfs_inode->i_rddir_off);
@@ -196,7 +196,7 @@ void hpfs_write_inode(struct inode *i)
parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir);
if (parent) {
hpfs_inode->i_dirty = 0;
- if (parent->i_state & I_NEW) {
+ if (inode_state_read_once(parent) & I_NEW) {
hpfs_init_inode(parent);
hpfs_read_inode(parent);
unlock_new_inode(parent);
@@ -230,9 +230,9 @@ void hpfs_write_inode_nolock(struct inode *i)
}
hpfs_write_inode_ea(i, fnode);
if (de) {
- de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
- de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
- de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec));
+ de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_mtime_sec(i)));
+ de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_atime_sec(i)));
+ de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime_sec(i)));
de->read_only = !(i->i_mode & 0222);
de->ea_size = cpu_to_le32(hpfs_inode->i_ea_size);
hpfs_mark_4buffers_dirty(&qbh);
@@ -240,9 +240,9 @@ void hpfs_write_inode_nolock(struct inode *i)
}
if (S_ISDIR(i->i_mode)) {
if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) {
- de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
- de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
- de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec));
+ de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_mtime_sec(i)));
+ de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_atime_sec(i)));
+ de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime_sec(i)));
de->read_only = !(i->i_mode & 0222);
de->ea_size = cpu_to_le32(/*hpfs_inode->i_ea_size*/0);
de->file_size = cpu_to_le32(0);
@@ -257,7 +257,7 @@ void hpfs_write_inode_nolock(struct inode *i)
brelse(bh);
}
-int hpfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int hpfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
@@ -275,7 +275,7 @@ int hpfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size)
goto out_unlock;
- error = setattr_prepare(&init_user_ns, dentry, attr);
+ error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (error)
goto out_unlock;
@@ -289,7 +289,7 @@ int hpfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
hpfs_truncate(inode);
}
- setattr_copy(&init_user_ns, inode, attr);
+ setattr_copy(&nop_mnt_idmap, inode, attr);
hpfs_write_inode(inode);
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index ecd9fccd1663..be73233502f8 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -178,14 +178,14 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
}
if (!fnode_is_dir(fnode)) {
if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
- (bp_internal(&fnode->btree) ? 12 : 8)) {
+ (bp_internal(GET_BTREE_PTR(&fnode->btree)) ? 12 : 8)) {
hpfs_error(s,
"bad number of nodes in fnode %08lx",
(unsigned long)ino);
goto bail;
}
if (le16_to_cpu(fnode->btree.first_free) !=
- 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) {
+ 8 + fnode->btree.n_used_nodes * (bp_internal(GET_BTREE_PTR(&fnode->btree)) ? 8 : 12)) {
hpfs_error(s,
"bad first_free pointer in fnode %08lx",
(unsigned long)ino);
@@ -233,12 +233,12 @@ struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buff
goto bail;
}
if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
- (bp_internal(&anode->btree) ? 60 : 40)) {
+ (bp_internal(GET_BTREE_PTR(&anode->btree)) ? 60 : 40)) {
hpfs_error(s, "bad number of nodes in anode %08x", ano);
goto bail;
}
if (le16_to_cpu(anode->btree.first_free) !=
- 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) {
+ 8 + anode->btree.n_used_nodes * (bp_internal(GET_BTREE_PTR(&anode->btree)) ? 8 : 12)) {
hpfs_error(s, "bad first_free pointer in anode %08x", ano);
goto bail;
}
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index d73f8a67168e..353e13a615f5 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -12,16 +12,15 @@
static void hpfs_update_directory_times(struct inode *dir)
{
time64_t t = local_to_gmt(dir->i_sb, local_get_seconds(dir->i_sb));
- if (t == dir->i_mtime.tv_sec &&
- t == dir->i_ctime.tv_sec)
+ if (t == inode_get_mtime_sec(dir) &&
+ t == inode_get_ctime_sec(dir))
return;
- dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t;
- dir->i_mtime.tv_nsec = dir->i_ctime.tv_nsec = 0;
+ inode_set_mtime_to_ts(dir, inode_set_ctime(dir, t, 0));
hpfs_write_inode_nolock(dir);
}
-static int hpfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
@@ -36,7 +35,7 @@ static int hpfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
int r;
struct hpfs_dirent dee;
int err;
- if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
+ if ((err = hpfs_chk_name(name, &len))) return ERR_PTR(err==-ENOENT ? -EINVAL : err);
hpfs_lock(dir->i_sb);
err = -ENOSPC;
fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
@@ -53,16 +52,16 @@ static int hpfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
dee.fnode = cpu_to_le32(fno);
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
- if (!result)
+ if (!result) {
+ err = -ENOMEM;
goto bail2;
+ }
hpfs_init_inode(result);
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
hpfs_i(result)->i_dno = dno;
- result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
- result->i_ctime.tv_nsec = 0;
- result->i_mtime.tv_nsec = 0;
- result->i_atime.tv_nsec = 0;
+ inode_set_mtime_to_ts(result,
+ inode_set_atime_to_ts(result, inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0)));
hpfs_i(result)->i_ea_size = 0;
result->i_mode |= S_IFDIR;
result->i_op = &hpfs_dir_iops;
@@ -115,7 +114,7 @@ static int hpfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
hpfs_update_directory_times(dir);
d_instantiate(dentry, result);
hpfs_unlock(dir->i_sb);
- return 0;
+ return NULL;
bail3:
iput(result);
bail2:
@@ -126,10 +125,10 @@ bail1:
hpfs_free_sectors(dir->i_sb, fno, 1);
bail:
hpfs_unlock(dir->i_sb);
- return err;
+ return ERR_PTR(err);
}
-static int hpfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int hpfs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
const unsigned char *name = dentry->d_name.name;
@@ -156,9 +155,10 @@ static int hpfs_create(struct user_namespace *mnt_userns, struct inode *dir,
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
- if (!result)
+ if (!result) {
+ err = -ENOMEM;
goto bail1;
-
+ }
hpfs_init_inode(result);
result->i_ino = fno;
result->i_mode |= S_IFREG;
@@ -167,10 +167,8 @@ static int hpfs_create(struct user_namespace *mnt_userns, struct inode *dir,
result->i_fop = &hpfs_file_ops;
set_nlink(result, 1);
hpfs_i(result)->i_parent_dir = dir->i_ino;
- result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
- result->i_ctime.tv_nsec = 0;
- result->i_mtime.tv_nsec = 0;
- result->i_atime.tv_nsec = 0;
+ inode_set_mtime_to_ts(result,
+ inode_set_atime_to_ts(result, inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0)));
hpfs_i(result)->i_ea_size = 0;
if (dee.read_only)
result->i_mode &= ~0222;
@@ -217,7 +215,7 @@ bail:
return err;
}
-static int hpfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+static int hpfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
const unsigned char *name = dentry->d_name.name;
@@ -244,16 +242,15 @@ static int hpfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
- if (!result)
+ if (!result) {
+ err = -ENOMEM;
goto bail1;
-
+ }
hpfs_init_inode(result);
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
- result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
- result->i_ctime.tv_nsec = 0;
- result->i_mtime.tv_nsec = 0;
- result->i_atime.tv_nsec = 0;
+ inode_set_mtime_to_ts(result,
+ inode_set_atime_to_ts(result, inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0)));
hpfs_i(result)->i_ea_size = 0;
result->i_uid = current_fsuid();
result->i_gid = current_fsgid();
@@ -292,7 +289,7 @@ bail:
return err;
}
-static int hpfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+static int hpfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symlink)
{
const unsigned char *name = dentry->d_name.name;
@@ -321,15 +318,15 @@ static int hpfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
- if (!result)
+ if (!result) {
+ err = -ENOMEM;
goto bail1;
+ }
result->i_ino = fno;
hpfs_init_inode(result);
hpfs_i(result)->i_parent_dir = dir->i_ino;
- result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
- result->i_ctime.tv_nsec = 0;
- result->i_mtime.tv_nsec = 0;
- result->i_atime.tv_nsec = 0;
+ inode_set_mtime_to_ts(result,
+ inode_set_atime_to_ts(result, inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0)));
hpfs_i(result)->i_ea_size = 0;
result->i_mode = S_IFLNK | 0777;
result->i_uid = current_fsuid();
@@ -479,10 +476,10 @@ out:
return err;
}
-static int hpfs_symlink_readpage(struct file *file, struct page *page)
+static int hpfs_symlink_read_folio(struct file *file, struct folio *folio)
{
- char *link = page_address(page);
- struct inode *i = page->mapping->host;
+ char *link = folio_address(folio);
+ struct inode *i = folio->mapping->host;
struct fnode *fnode;
struct buffer_head *bh;
int err;
@@ -493,25 +490,17 @@ static int hpfs_symlink_readpage(struct file *file, struct page *page)
goto fail;
err = hpfs_read_ea(i->i_sb, fnode, "SYMLINK", link, PAGE_SIZE);
brelse(bh);
- if (err)
- goto fail;
- hpfs_unlock(i->i_sb);
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
-
fail:
hpfs_unlock(i->i_sb);
- SetPageError(page);
- unlock_page(page);
+ folio_end_read(folio, err == 0);
return err;
}
const struct address_space_operations hpfs_symlink_aops = {
- .readpage = hpfs_symlink_readpage
+ .read_folio = hpfs_symlink_read_folio
};
-static int hpfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+static int hpfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index a7dbfc892022..371aa6de8075 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -9,7 +9,9 @@
#include "hpfs_fn.h"
#include <linux/module.h>
-#include <linux/parser.h>
+#include <linux/fs_struct.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/init.h>
#include <linux/statfs.h>
#include <linux/magic.h>
@@ -90,7 +92,7 @@ void hpfs_error(struct super_block *s, const char *fmt, ...)
hpfs_sb(s)->sb_was_error = 1;
}
-/*
+/*
* A little trick to detect cycles in many hpfs structures and don't let the
* kernel crash on corrupted filesystem. When first called, set c2 to 0.
*
@@ -232,7 +234,7 @@ static struct kmem_cache * hpfs_inode_cachep;
static struct inode *hpfs_alloc_inode(struct super_block *sb)
{
struct hpfs_inode_info *ei;
- ei = kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
+ ei = alloc_inode_sb(sb, hpfs_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
return &ei->vfs_inode;
@@ -255,7 +257,7 @@ static int init_inodecache(void)
hpfs_inode_cachep = kmem_cache_create("hpfs_inode_cache",
sizeof(struct hpfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
if (hpfs_inode_cachep == NULL)
return -ENOMEM;
@@ -272,146 +274,70 @@ static void destroy_inodecache(void)
kmem_cache_destroy(hpfs_inode_cachep);
}
-/*
- * A tiny parser for option strings, stolen from dosfs.
- * Stolen again from read-only hpfs.
- * And updated for table-driven option parsing.
- */
-
enum {
- Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case_lower, Opt_case_asis,
- Opt_check_none, Opt_check_normal, Opt_check_strict,
- Opt_err_cont, Opt_err_ro, Opt_err_panic,
- Opt_eas_no, Opt_eas_ro, Opt_eas_rw,
- Opt_chkdsk_no, Opt_chkdsk_errors, Opt_chkdsk_always,
- Opt_timeshift, Opt_err,
+ Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case,
+ Opt_check, Opt_err, Opt_eas, Opt_chkdsk, Opt_timeshift,
};
-static const match_table_t tokens = {
- {Opt_help, "help"},
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_umask, "umask=%o"},
- {Opt_case_lower, "case=lower"},
- {Opt_case_asis, "case=asis"},
- {Opt_check_none, "check=none"},
- {Opt_check_normal, "check=normal"},
- {Opt_check_strict, "check=strict"},
- {Opt_err_cont, "errors=continue"},
- {Opt_err_ro, "errors=remount-ro"},
- {Opt_err_panic, "errors=panic"},
- {Opt_eas_no, "eas=no"},
- {Opt_eas_ro, "eas=ro"},
- {Opt_eas_rw, "eas=rw"},
- {Opt_chkdsk_no, "chkdsk=no"},
- {Opt_chkdsk_errors, "chkdsk=errors"},
- {Opt_chkdsk_always, "chkdsk=always"},
- {Opt_timeshift, "timeshift=%d"},
- {Opt_err, NULL},
+static const struct constant_table hpfs_param_case[] = {
+ {"asis", 0},
+ {"lower", 1},
+ {}
};
-static int parse_opts(char *opts, kuid_t *uid, kgid_t *gid, umode_t *umask,
- int *lowercase, int *eas, int *chk, int *errs,
- int *chkdsk, int *timeshift)
-{
- char *p;
- int option;
+static const struct constant_table hpfs_param_check[] = {
+ {"none", 0},
+ {"normal", 1},
+ {"strict", 2},
+ {}
+};
- if (!opts)
- return 1;
+static const struct constant_table hpfs_param_err[] = {
+ {"continue", 0},
+ {"remount-ro", 1},
+ {"panic", 2},
+ {}
+};
- /*pr_info("Parsing opts: '%s'\n",opts);*/
-
- while ((p = strsep(&opts, ",")) != NULL) {
- substring_t args[MAX_OPT_ARGS];
- int token;
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_help:
- return 2;
- case Opt_uid:
- if (match_int(args, &option))
- return 0;
- *uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(*uid))
- return 0;
- break;
- case Opt_gid:
- if (match_int(args, &option))
- return 0;
- *gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(*gid))
- return 0;
- break;
- case Opt_umask:
- if (match_octal(args, &option))
- return 0;
- *umask = option;
- break;
- case Opt_case_lower:
- *lowercase = 1;
- break;
- case Opt_case_asis:
- *lowercase = 0;
- break;
- case Opt_check_none:
- *chk = 0;
- break;
- case Opt_check_normal:
- *chk = 1;
- break;
- case Opt_check_strict:
- *chk = 2;
- break;
- case Opt_err_cont:
- *errs = 0;
- break;
- case Opt_err_ro:
- *errs = 1;
- break;
- case Opt_err_panic:
- *errs = 2;
- break;
- case Opt_eas_no:
- *eas = 0;
- break;
- case Opt_eas_ro:
- *eas = 1;
- break;
- case Opt_eas_rw:
- *eas = 2;
- break;
- case Opt_chkdsk_no:
- *chkdsk = 0;
- break;
- case Opt_chkdsk_errors:
- *chkdsk = 1;
- break;
- case Opt_chkdsk_always:
- *chkdsk = 2;
- break;
- case Opt_timeshift:
- {
- int m = 1;
- char *rhs = args[0].from;
- if (!rhs || !*rhs)
- return 0;
- if (*rhs == '-') m = -1;
- if (*rhs == '+' || *rhs == '-') rhs++;
- *timeshift = simple_strtoul(rhs, &rhs, 0) * m;
- if (*rhs)
- return 0;
- break;
- }
- default:
- return 0;
- }
- }
- return 1;
-}
+static const struct constant_table hpfs_param_eas[] = {
+ {"no", 0},
+ {"ro", 1},
+ {"rw", 2},
+ {}
+};
+
+static const struct constant_table hpfs_param_chkdsk[] = {
+ {"no", 0},
+ {"errors", 1},
+ {"always", 2},
+ {}
+};
+
+static const struct fs_parameter_spec hpfs_param_spec[] = {
+ fsparam_flag ("help", Opt_help),
+ fsparam_uid ("uid", Opt_uid),
+ fsparam_gid ("gid", Opt_gid),
+ fsparam_u32oct ("umask", Opt_umask),
+ fsparam_enum ("case", Opt_case, hpfs_param_case),
+ fsparam_enum ("check", Opt_check, hpfs_param_check),
+ fsparam_enum ("errors", Opt_err, hpfs_param_err),
+ fsparam_enum ("eas", Opt_eas, hpfs_param_eas),
+ fsparam_enum ("chkdsk", Opt_chkdsk, hpfs_param_chkdsk),
+ fsparam_s32 ("timeshift", Opt_timeshift),
+ {}
+};
+
+struct hpfs_fc_context {
+ kuid_t uid;
+ kgid_t gid;
+ umode_t umask;
+ int lowercase;
+ int eas;
+ int chk;
+ int errs;
+ int chkdsk;
+ int timeshift;
+};
static inline void hpfs_help(void)
{
@@ -439,49 +365,88 @@ HPFS filesystem options:\n\
\n");
}
-static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
+static int hpfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- kuid_t uid;
- kgid_t gid;
- umode_t umask;
- int lowercase, eas, chk, errs, chkdsk, timeshift;
- int o;
+ struct hpfs_fc_context *ctx = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, hpfs_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_help:
+ hpfs_help();
+ return -EINVAL;
+ case Opt_uid:
+ ctx->uid = result.uid;
+ break;
+ case Opt_gid:
+ ctx->gid = result.gid;
+ break;
+ case Opt_umask:
+ ctx->umask = result.uint_32;
+ break;
+ case Opt_case:
+ ctx->lowercase = result.uint_32;
+ break;
+ case Opt_check:
+ ctx->chk = result.uint_32;
+ break;
+ case Opt_err:
+ ctx->errs = result.uint_32;
+ break;
+ case Opt_eas:
+ ctx->eas = result.uint_32;
+ break;
+ case Opt_chkdsk:
+ ctx->chkdsk = result.uint_32;
+ break;
+ case Opt_timeshift:
+ {
+ char *rhs = param->string;
+ int timeshift;
+
+ if (kstrtoint(rhs, 0, &timeshift))
+ return -EINVAL;
+ ctx->timeshift = timeshift;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hpfs_reconfigure(struct fs_context *fc)
+{
+ struct hpfs_fc_context *ctx = fc->fs_private;
+ struct super_block *s = fc->root->d_sb;
struct hpfs_sb_info *sbi = hpfs_sb(s);
sync_filesystem(s);
- *flags |= SB_NOATIME;
+ fc->sb_flags |= SB_NOATIME;
hpfs_lock(s);
- uid = sbi->sb_uid; gid = sbi->sb_gid;
- umask = 0777 & ~sbi->sb_mode;
- lowercase = sbi->sb_lowercase;
- eas = sbi->sb_eas; chk = sbi->sb_chk; chkdsk = sbi->sb_chkdsk;
- errs = sbi->sb_err; timeshift = sbi->sb_timeshift;
-
- if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase,
- &eas, &chk, &errs, &chkdsk, &timeshift))) {
- pr_err("bad mount options.\n");
- goto out_err;
- }
- if (o == 2) {
- hpfs_help();
- goto out_err;
- }
- if (timeshift != sbi->sb_timeshift) {
+
+ if (ctx->timeshift != sbi->sb_timeshift) {
pr_err("timeshift can't be changed using remount.\n");
goto out_err;
}
unmark_dirty(s);
- sbi->sb_uid = uid; sbi->sb_gid = gid;
- sbi->sb_mode = 0777 & ~umask;
- sbi->sb_lowercase = lowercase;
- sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk;
- sbi->sb_err = errs; sbi->sb_timeshift = timeshift;
+ sbi->sb_uid = ctx->uid; sbi->sb_gid = ctx->gid;
+ sbi->sb_mode = 0777 & ~ctx->umask;
+ sbi->sb_lowercase = ctx->lowercase;
+ sbi->sb_eas = ctx->eas; sbi->sb_chk = ctx->chk;
+ sbi->sb_chkdsk = ctx->chkdsk;
+ sbi->sb_err = ctx->errs; sbi->sb_timeshift = ctx->timeshift;
- if (!(*flags & SB_RDONLY)) mark_dirty(s, 1);
+ if (!(fc->sb_flags & SB_RDONLY)) mark_dirty(s, 1);
hpfs_unlock(s);
return 0;
@@ -530,30 +495,24 @@ static const struct super_operations hpfs_sops =
.evict_inode = hpfs_evict_inode,
.put_super = hpfs_put_super,
.statfs = hpfs_statfs,
- .remount_fs = hpfs_remount_fs,
.show_options = hpfs_show_options,
};
-static int hpfs_fill_super(struct super_block *s, void *options, int silent)
+static int hpfs_fill_super(struct super_block *s, struct fs_context *fc)
{
+ struct hpfs_fc_context *ctx = fc->fs_private;
struct buffer_head *bh0, *bh1, *bh2;
struct hpfs_boot_block *bootblock;
struct hpfs_super_block *superblock;
struct hpfs_spare_block *spareblock;
struct hpfs_sb_info *sbi;
struct inode *root;
-
- kuid_t uid;
- kgid_t gid;
- umode_t umask;
- int lowercase, eas, chk, errs, chkdsk, timeshift;
+ int silent = fc->sb_flags & SB_SILENT;
dnode_secno root_dno;
struct hpfs_dirent *de = NULL;
struct quad_buffer_head qbh;
- int o;
-
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi) {
return -ENOMEM;
@@ -563,26 +522,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
mutex_init(&sbi->hpfs_mutex);
hpfs_lock(s);
- uid = current_uid();
- gid = current_gid();
- umask = current_umask();
- lowercase = 0;
- eas = 2;
- chk = 1;
- errs = 1;
- chkdsk = 1;
- timeshift = 0;
-
- if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase,
- &eas, &chk, &errs, &chkdsk, &timeshift))) {
- pr_err("bad mount options.\n");
- goto bail0;
- }
- if (o==2) {
- hpfs_help();
- goto bail0;
- }
-
/*sbi->sb_mounting = 1;*/
sb_set_blocksize(s, 512);
sbi->sb_fs_size = -1;
@@ -612,7 +551,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
/* Fill superblock stuff */
s->s_magic = HPFS_SUPER_MAGIC;
s->s_op = &hpfs_sops;
- s->s_d_op = &hpfs_dentry_operations;
+ set_default_d_op(s, &hpfs_dentry_operations);
s->s_time_min = local_to_gmt(s, 0);
s->s_time_max = local_to_gmt(s, U32_MAX);
@@ -622,17 +561,17 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
sbi->sb_dirband_start = le32_to_cpu(superblock->dir_band_start);
sbi->sb_dirband_size = le32_to_cpu(superblock->n_dir_band);
sbi->sb_dmap = le32_to_cpu(superblock->dir_band_bitmap);
- sbi->sb_uid = uid;
- sbi->sb_gid = gid;
- sbi->sb_mode = 0777 & ~umask;
+ sbi->sb_uid = ctx->uid;
+ sbi->sb_gid = ctx->gid;
+ sbi->sb_mode = 0777 & ~ctx->umask;
sbi->sb_n_free = -1;
sbi->sb_n_free_dnodes = -1;
- sbi->sb_lowercase = lowercase;
- sbi->sb_eas = eas;
- sbi->sb_chk = chk;
- sbi->sb_chkdsk = chkdsk;
- sbi->sb_err = errs;
- sbi->sb_timeshift = timeshift;
+ sbi->sb_lowercase = ctx->lowercase;
+ sbi->sb_eas = ctx->eas;
+ sbi->sb_chk = ctx->chk;
+ sbi->sb_chkdsk = ctx->chkdsk;
+ sbi->sb_err = ctx->errs;
+ sbi->sb_timeshift = ctx->timeshift;
sbi->sb_was_error = 0;
sbi->sb_cp_table = NULL;
sbi->sb_c_bitmap = -1;
@@ -653,7 +592,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
/* Check for general fs errors*/
if (spareblock->dirty && !spareblock->old_wrote) {
- if (errs == 2) {
+ if (sbi->sb_err == 2) {
pr_err("Improperly stopped, not mounted\n");
goto bail4;
}
@@ -667,16 +606,16 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
}
if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) {
- if (errs >= 2) {
+ if (sbi->sb_err >= 2) {
pr_err("Spare dnodes used, try chkdsk\n");
mark_dirty(s, 0);
goto bail4;
}
hpfs_error(s, "warning: spare dnodes used, try chkdsk");
- if (errs == 0)
+ if (sbi->sb_err == 0)
pr_err("Proceeding, but your filesystem could be corrupted if you delete files or directories\n");
}
- if (chk) {
+ if (sbi->sb_chk) {
unsigned a;
if (le32_to_cpu(superblock->dir_band_end) - le32_to_cpu(superblock->dir_band_start) + 1 != le32_to_cpu(superblock->n_dir_band) ||
le32_to_cpu(superblock->dir_band_end) < le32_to_cpu(superblock->dir_band_start) || le32_to_cpu(superblock->n_dir_band) > 0x4000) {
@@ -725,12 +664,15 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
if (!de)
hpfs_error(s, "unable to find root dir");
else {
- root->i_atime.tv_sec = local_to_gmt(s, le32_to_cpu(de->read_date));
- root->i_atime.tv_nsec = 0;
- root->i_mtime.tv_sec = local_to_gmt(s, le32_to_cpu(de->write_date));
- root->i_mtime.tv_nsec = 0;
- root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date));
- root->i_ctime.tv_nsec = 0;
+ inode_set_atime(root,
+ local_to_gmt(s, le32_to_cpu(de->read_date)),
+ 0);
+ inode_set_mtime(root,
+ local_to_gmt(s, le32_to_cpu(de->write_date)),
+ 0);
+ inode_set_ctime(root,
+ local_to_gmt(s, le32_to_cpu(de->creation_date)),
+ 0);
hpfs_i(root)->i_ea_size = le32_to_cpu(de->ea_size);
hpfs_i(root)->i_parent_dir = root->i_ino;
if (root->i_size == -1)
@@ -752,18 +694,70 @@ bail0:
return -EINVAL;
}
-static struct dentry *hpfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int hpfs_get_tree(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, hpfs_fill_super);
+ return get_tree_bdev(fc, hpfs_fill_super);
}
+static void hpfs_free_fc(struct fs_context *fc)
+{
+ kfree(fc->fs_private);
+}
+
+static const struct fs_context_operations hpfs_fc_context_ops = {
+ .parse_param = hpfs_parse_param,
+ .get_tree = hpfs_get_tree,
+ .reconfigure = hpfs_reconfigure,
+ .free = hpfs_free_fc,
+};
+
+static int hpfs_init_fs_context(struct fs_context *fc)
+{
+ struct hpfs_fc_context *ctx;
+
+ ctx = kzalloc(sizeof(struct hpfs_fc_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ struct super_block *sb = fc->root->d_sb;
+ struct hpfs_sb_info *sbi = hpfs_sb(sb);
+
+ ctx->uid = sbi->sb_uid;
+ ctx->gid = sbi->sb_gid;
+ ctx->umask = 0777 & ~sbi->sb_mode;
+ ctx->lowercase = sbi->sb_lowercase;
+ ctx->eas = sbi->sb_eas;
+ ctx->chk = sbi->sb_chk;
+ ctx->chkdsk = sbi->sb_chkdsk;
+ ctx->errs = sbi->sb_err;
+ ctx->timeshift = sbi->sb_timeshift;
+
+ } else {
+ ctx->uid = current_uid();
+ ctx->gid = current_gid();
+ ctx->umask = current_umask();
+ ctx->lowercase = 0;
+ ctx->eas = 2;
+ ctx->chk = 1;
+ ctx->errs = 1;
+ ctx->chkdsk = 1;
+ ctx->timeshift = 0;
+ }
+
+ fc->fs_private = ctx;
+ fc->ops = &hpfs_fc_context_ops;
+
+ return 0;
+};
+
static struct file_system_type hpfs_fs_type = {
.owner = THIS_MODULE,
.name = "hpfs",
- .mount = hpfs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = hpfs_init_fs_context,
+ .parameters = hpfs_param_spec,
};
MODULE_ALIAS_FS("hpfs");
@@ -790,4 +784,5 @@ static void __exit exit_hpfs_fs(void)
module_init(init_hpfs_fs)
module_exit(exit_hpfs_fs)
+MODULE_DESCRIPTION("OS/2 HPFS file system support");
MODULE_LICENSE("GPL");
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 49d2e686be74..3b4c152c5c73 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -11,7 +11,6 @@
#include <linux/thread_info.h>
#include <asm/current.h>
-#include <linux/sched/signal.h> /* remove ASAP */
#include <linux/falloc.h>
#include <linux/fs.h>
#include <linux/mount.h>
@@ -40,9 +39,11 @@
#include <linux/uaccess.h>
#include <linux/sched/mm.h>
-static const struct super_operations hugetlbfs_ops;
+#define CREATE_TRACE_POINTS
+#include <trace/events/hugetlbfs.h>
+
static const struct address_space_operations hugetlbfs_aops;
-const struct file_operations hugetlbfs_file_operations;
+static const struct file_operations hugetlbfs_file_operations;
static const struct inode_operations hugetlbfs_dir_inode_operations;
static const struct inode_operations hugetlbfs_inode_operations;
@@ -75,49 +76,16 @@ enum hugetlb_param {
};
static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
- fsparam_u32 ("gid", Opt_gid),
+ fsparam_gid ("gid", Opt_gid),
fsparam_string("min_size", Opt_min_size),
fsparam_u32oct("mode", Opt_mode),
fsparam_string("nr_inodes", Opt_nr_inodes),
fsparam_string("pagesize", Opt_pagesize),
fsparam_string("size", Opt_size),
- fsparam_u32 ("uid", Opt_uid),
+ fsparam_uid ("uid", Opt_uid),
{}
};
-#ifdef CONFIG_NUMA
-static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
- struct inode *inode, pgoff_t index)
-{
- vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
- index);
-}
-
-static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
-{
- mpol_cond_put(vma->vm_policy);
-}
-#else
-static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
- struct inode *inode, pgoff_t index)
-{
-}
-
-static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
-{
-}
-#endif
-
-static void huge_pagevec_release(struct pagevec *pvec)
-{
- int i;
-
- for (i = 0; i < pagevec_count(pvec); ++i)
- put_page(pvec->pages[i]);
-
- pagevec_reinit(pvec);
-}
-
/*
* Mask used when checking the page offset value passed in via system
* calls. This value will be converted to a loff_t which is signed.
@@ -128,13 +96,20 @@ static void huge_pagevec_release(struct pagevec *pvec)
#define PGOFF_LOFFT_MAX \
(((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
-static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int hugetlb_file_mmap_prepare_success(const struct vm_area_struct *vma)
+{
+ /* Unfortunate we have to reassign vma->vm_private_data. */
+ return hugetlb_vma_lock_alloc((struct vm_area_struct *)vma);
+}
+
+static int hugetlbfs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
struct inode *inode = file_inode(file);
- struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
loff_t len, vma_len;
int ret;
struct hstate *h = hstate_file(file);
+ vm_flags_t vm_flags;
/*
* vma address alignment (but not the pgoff alignment) has
@@ -144,12 +119,8 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
* way when do_mmap unwinds (may be important on powerpc
* and ia64).
*/
- vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
- vma->vm_ops = &hugetlb_vm_ops;
-
- ret = seal_check_future_write(info->seals, vma);
- if (ret)
- return ret;
+ desc->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
+ desc->vm_ops = &hugetlb_vm_ops;
/*
* page based offset in vm_pgoff could be sufficiently large to
@@ -158,16 +129,16 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
* sizeof(unsigned long). So, only check in those instances.
*/
if (sizeof(unsigned long) == sizeof(loff_t)) {
- if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
+ if (desc->pgoff & PGOFF_LOFFT_MAX)
return -EINVAL;
}
/* must be huge page aligned */
- if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
+ if (desc->pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
return -EINVAL;
- vma_len = (loff_t)(vma->vm_end - vma->vm_start);
- len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ vma_len = (loff_t)vma_desc_size(desc);
+ len = vma_len + ((loff_t)desc->pgoff << PAGE_SHIFT);
/* check for overflow */
if (len < vma_len)
return -EINVAL;
@@ -176,18 +147,41 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
ret = -ENOMEM;
- if (!hugetlb_reserve_pages(inode,
- vma->vm_pgoff >> huge_page_order(h),
- len >> huge_page_shift(h), vma,
- vma->vm_flags))
+
+ vm_flags = desc->vm_flags;
+ /*
+ * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip
+ * reserving here. Note: only for SHM hugetlbfs file, the inode
+ * flag S_PRIVATE is set.
+ */
+ if (inode->i_flags & S_PRIVATE)
+ vm_flags |= VM_NORESERVE;
+
+ if (hugetlb_reserve_pages(inode,
+ desc->pgoff >> huge_page_order(h),
+ len >> huge_page_shift(h), desc,
+ vm_flags) < 0)
goto out;
ret = 0;
- if (vma->vm_flags & VM_WRITE && inode->i_size < len)
+ if ((desc->vm_flags & VM_WRITE) && inode->i_size < len)
i_size_write(inode, len);
out:
inode_unlock(inode);
+ if (!ret) {
+ /* Allocate the VMA lock after we set it up. */
+ desc->action.success_hook = hugetlb_file_mmap_prepare_success;
+ /*
+ * We cannot permit the rmap finding this VMA in the time
+ * between the VMA being inserted into the VMA tree and the
+ * completion/success hook being invoked.
+ *
+ * This is because we establish a per-VMA hugetlb lock which can
+ * be raced by rmap.
+ */
+ desc->action.hide_from_rmap_until_complete = true;
+ }
return ret;
}
@@ -195,128 +189,52 @@ out:
* Called under mmap_write_lock(mm).
*/
-#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-static unsigned long
-hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct hstate *h = hstate_file(file);
- struct vm_unmapped_area_info info;
-
- info.flags = 0;
- info.length = len;
- info.low_limit = current->mm->mmap_base;
- info.high_limit = TASK_SIZE;
- info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
- return vm_unmapped_area(&info);
-}
-
-static unsigned long
-hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct hstate *h = hstate_file(file);
- struct vm_unmapped_area_info info;
-
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
- info.low_limit = max(PAGE_SIZE, mmap_min_addr);
- info.high_limit = current->mm->mmap_base;
- info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
- addr = vm_unmapped_area(&info);
-
- /*
- * A failed mmap() very likely causes application failure,
- * so fall back to the bottom-up function here. This scenario
- * can happen with large stack limits and large mmap()
- * allocations.
- */
- if (unlikely(offset_in_page(addr))) {
- VM_BUG_ON(addr != -ENOMEM);
- info.flags = 0;
- info.low_limit = current->mm->mmap_base;
- info.high_limit = TASK_SIZE;
- addr = vm_unmapped_area(&info);
- }
-
- return addr;
-}
-
-static unsigned long
+unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ unsigned long addr0 = 0;
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- if (flags & MAP_FIXED) {
- if (prepare_hugepage_range(file, addr, len))
- return -EINVAL;
- return addr;
- }
-
- if (addr) {
- addr = ALIGN(addr, huge_page_size(h));
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
- return addr;
- }
+ if ((flags & MAP_FIXED) && (addr & ~huge_page_mask(h)))
+ return -EINVAL;
+ if (addr)
+ addr0 = ALIGN(addr, huge_page_size(h));
- /*
- * Use mm->get_unmapped_area value as a hint to use topdown routine.
- * If architectures have special needs, they should define their own
- * version of hugetlb_get_unmapped_area.
- */
- if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
- return hugetlb_get_unmapped_area_topdown(file, addr, len,
- pgoff, flags);
- return hugetlb_get_unmapped_area_bottomup(file, addr, len,
- pgoff, flags);
+ return mm_get_unmapped_area_vmflags(file, addr0, len, pgoff, flags, 0);
}
-#endif
-static size_t
-hugetlbfs_read_actor(struct page *page, unsigned long offset,
- struct iov_iter *to, unsigned long size)
+/*
+ * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset.
+ * Returns the maximum number of bytes one can read without touching the 1st raw
+ * HWPOISON page.
+ */
+static size_t adjust_range_hwpoison(struct folio *folio, size_t offset,
+ size_t bytes)
{
- size_t copied = 0;
- int i, chunksize;
-
- /* Find which 4k chunk and offset with in that chunk */
- i = offset >> PAGE_SHIFT;
- offset = offset & ~PAGE_MASK;
-
- while (size) {
- size_t n;
- chunksize = PAGE_SIZE;
- if (offset)
- chunksize -= offset;
- if (chunksize > size)
- chunksize = size;
- n = copy_page_to_iter(&page[i], offset, chunksize, to);
- copied += n;
- if (n != chunksize)
- return copied;
- offset = 0;
- size -= chunksize;
- i++;
- }
- return copied;
+ struct page *page = folio_page(folio, offset / PAGE_SIZE);
+ size_t safe_bytes;
+
+ if (is_raw_hwpoison_page_in_hugepage(page))
+ return 0;
+ /* Safe to read the remaining bytes in this page. */
+ safe_bytes = PAGE_SIZE - (offset % PAGE_SIZE);
+ page++;
+
+ /* Check each remaining page as long as we are not done yet. */
+ for (; safe_bytes < bytes; safe_bytes += PAGE_SIZE, page++)
+ if (is_raw_hwpoison_page_in_hugepage(page))
+ break;
+
+ return min(safe_bytes, bytes);
}
/*
* Support for read() - Find the page attached to f_mapping and copy out the
- * data. Its *very* similar to generic_file_buffered_read(), we can't use that
- * since it has PAGE_SIZE assumptions.
+ * data. This provides functionality similar to filemap_read().
*/
static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
@@ -331,8 +249,8 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
ssize_t retval = 0;
while (iov_iter_count(to)) {
- struct page *page;
- size_t nr, copied;
+ struct folio *folio;
+ size_t nr, copied, want;
/* nr is the maximum number of bytes to copy from this page */
nr = huge_page_size(h);
@@ -349,22 +267,38 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
}
nr = nr - offset;
- /* Find the page */
- page = find_lock_page(mapping, index);
- if (unlikely(page == NULL)) {
+ /* Find the folio */
+ folio = filemap_lock_hugetlb_folio(h, mapping, index);
+ if (IS_ERR(folio)) {
/*
* We have a HOLE, zero out the user-buffer for the
* length of the hole or request.
*/
copied = iov_iter_zero(nr, to);
} else {
- unlock_page(page);
+ folio_unlock(folio);
+
+ if (!folio_test_hwpoison(folio))
+ want = nr;
+ else {
+ /*
+ * Adjust how many bytes safe to read without
+ * touching the 1st raw HWPOISON page after
+ * offset.
+ */
+ want = adjust_range_hwpoison(folio, offset, nr);
+ if (want == 0) {
+ folio_put(folio);
+ retval = -EIO;
+ break;
+ }
+ }
/*
- * We have the page, copy it to user space buffer.
+ * We have the folio, copy it to user space buffer.
*/
- copied = hugetlbfs_read_actor(page, offset, to, nr);
- put_page(page);
+ copied = copy_folio_to_iter(folio, offset, want, to);
+ folio_put(folio);
}
offset += copied;
retval += copied;
@@ -380,65 +314,244 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
return retval;
}
-static int hugetlbfs_write_begin(struct file *file,
+static int hugetlbfs_write_begin(const struct kiocb *iocb,
struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
return -EINVAL;
}
-static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+static int hugetlbfs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
BUG();
return -EINVAL;
}
-static void remove_huge_page(struct page *page)
+static void hugetlb_delete_from_page_cache(struct folio *folio)
{
- ClearPageDirty(page);
- ClearPageUptodate(page);
- delete_from_page_cache(page);
+ folio_clear_dirty(folio);
+ folio_clear_uptodate(folio);
+ filemap_remove_folio(folio);
+}
+
+/*
+ * Called with i_mmap_rwsem held for inode based vma maps. This makes
+ * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault
+ * mutex for the page in the mapping. So, we can not race with page being
+ * faulted into the vma.
+ */
+static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+ pte_t *ptep, pte;
+
+ ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma)));
+ if (!ptep)
+ return false;
+
+ pte = huge_ptep_get(vma->vm_mm, addr, ptep);
+ if (huge_pte_none(pte) || !pte_present(pte))
+ return false;
+
+ if (pte_pfn(pte) == pfn)
+ return true;
+
+ return false;
+}
+
+/*
+ * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
+ * No, because the interval tree returns us only those vmas
+ * which overlap the truncated area starting at pgoff,
+ * and no vma on a 32-bit arch can span beyond the 4GB.
+ */
+static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
+{
+ unsigned long offset = 0;
+
+ if (vma->vm_pgoff < start)
+ offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
+
+ return vma->vm_start + offset;
+}
+
+static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
+{
+ unsigned long t_end;
+
+ if (!end)
+ return vma->vm_end;
+
+ t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start;
+ if (t_end > vma->vm_end)
+ t_end = vma->vm_end;
+ return t_end;
+}
+
+/*
+ * Called with hugetlb fault mutex held. Therefore, no more mappings to
+ * this folio can be created while executing the routine.
+ */
+static void hugetlb_unmap_file_folio(struct hstate *h,
+ struct address_space *mapping,
+ struct folio *folio, pgoff_t index)
+{
+ struct rb_root_cached *root = &mapping->i_mmap;
+ struct hugetlb_vma_lock *vma_lock;
+ unsigned long pfn = folio_pfn(folio);
+ struct vm_area_struct *vma;
+ unsigned long v_start;
+ unsigned long v_end;
+ pgoff_t start, end;
+
+ start = index * pages_per_huge_page(h);
+ end = (index + 1) * pages_per_huge_page(h);
+
+ i_mmap_lock_write(mapping);
+retry:
+ vma_lock = NULL;
+ vma_interval_tree_foreach(vma, root, start, end - 1) {
+ v_start = vma_offset_start(vma, start);
+ v_end = vma_offset_end(vma, end);
+
+ if (!hugetlb_vma_maps_pfn(vma, v_start, pfn))
+ continue;
+
+ if (!hugetlb_vma_trylock_write(vma)) {
+ vma_lock = vma->vm_private_data;
+ /*
+ * If we can not get vma lock, we need to drop
+ * immap_sema and take locks in order. First,
+ * take a ref on the vma_lock structure so that
+ * we can be guaranteed it will not go away when
+ * dropping immap_sema.
+ */
+ kref_get(&vma_lock->refs);
+ break;
+ }
+
+ unmap_hugepage_range(vma, v_start, v_end, NULL,
+ ZAP_FLAG_DROP_MARKER);
+ hugetlb_vma_unlock_write(vma);
+ }
+
+ i_mmap_unlock_write(mapping);
+
+ if (vma_lock) {
+ /*
+ * Wait on vma_lock. We know it is still valid as we have
+ * a reference. We must 'open code' vma locking as we do
+ * not know if vma_lock is still attached to vma.
+ */
+ down_write(&vma_lock->rw_sema);
+ i_mmap_lock_write(mapping);
+
+ vma = vma_lock->vma;
+ if (!vma) {
+ /*
+ * If lock is no longer attached to vma, then just
+ * unlock, drop our reference and retry looking for
+ * other vmas.
+ */
+ up_write(&vma_lock->rw_sema);
+ kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
+ goto retry;
+ }
+
+ /*
+ * vma_lock is still attached to vma. Check to see if vma
+ * still maps page and if so, unmap.
+ */
+ v_start = vma_offset_start(vma, start);
+ v_end = vma_offset_end(vma, end);
+ if (hugetlb_vma_maps_pfn(vma, v_start, pfn))
+ unmap_hugepage_range(vma, v_start, v_end, NULL,
+ ZAP_FLAG_DROP_MARKER);
+
+ kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
+ hugetlb_vma_unlock_write(vma);
+
+ goto retry;
+ }
}
static void
-hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
+hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
+ zap_flags_t zap_flags)
{
struct vm_area_struct *vma;
/*
- * end == 0 indicates that the entire range after
- * start should be unmapped.
+ * end == 0 indicates that the entire range after start should be
+ * unmapped. Note, end is exclusive, whereas the interval tree takes
+ * an inclusive "last".
*/
- vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
- unsigned long v_offset;
+ vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
+ unsigned long v_start;
unsigned long v_end;
+ if (!hugetlb_vma_trylock_write(vma))
+ continue;
+
+ v_start = vma_offset_start(vma, start);
+ v_end = vma_offset_end(vma, end);
+
+ unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags);
+
/*
- * Can the expression below overflow on 32-bit arches?
- * No, because the interval tree returns us only those vmas
- * which overlap the truncated area starting at pgoff,
- * and no vma on a 32-bit arch can span beyond the 4GB.
+ * Note that vma lock only exists for shared/non-private
+ * vmas. Therefore, lock is not held when calling
+ * unmap_hugepage_range for private vmas.
*/
- if (vma->vm_pgoff < start)
- v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
- else
- v_offset = 0;
-
- if (!end)
- v_end = vma->vm_end;
- else {
- v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
- + vma->vm_start;
- if (v_end > vma->vm_end)
- v_end = vma->vm_end;
- }
+ hugetlb_vma_unlock_write(vma);
+ }
+}
+
+/*
+ * Called with hugetlb fault mutex held.
+ * Returns true if page was actually removed, false otherwise.
+ */
+static bool remove_inode_single_folio(struct hstate *h, struct inode *inode,
+ struct address_space *mapping,
+ struct folio *folio, pgoff_t index,
+ bool truncate_op)
+{
+ bool ret = false;
+
+ /*
+ * If folio is mapped, it was faulted in after being
+ * unmapped in caller or hugetlb_vmdelete_list() skips
+ * unmapping it due to fail to grab lock. Unmap (again)
+ * while holding the fault mutex. The mutex will prevent
+ * faults until we finish removing the folio. Hold folio
+ * lock to guarantee no concurrent migration.
+ */
+ folio_lock(folio);
+ if (unlikely(folio_mapped(folio)))
+ hugetlb_unmap_file_folio(h, mapping, folio, index);
- unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
- NULL);
+ /*
+ * We must remove the folio from page cache before removing
+ * the region/ reserve map (hugetlb_unreserve_pages). In
+ * rare out of memory conditions, removal of the region/reserve
+ * map could fail. Correspondingly, the subpool and global
+ * reserve usage count can need to be adjusted.
+ */
+ VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio);
+ hugetlb_delete_from_page_cache(folio);
+ ret = true;
+ if (!truncate_op) {
+ if (unlikely(hugetlb_unreserve_pages(inode, index,
+ index + 1, 1)))
+ hugetlb_fix_reserve_counts(inode);
}
+
+ folio_unlock(folio);
+ return ret;
}
/*
@@ -448,10 +561,10 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
* truncation is indicated by end of range being LLONG_MAX
* In this case, we first scan the range and release found pages.
* After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
- * maps and global counts. Page faults can not race with truncation
- * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents
- * page faults in the truncated range by checking i_size. i_size is
- * modified while holding i_mmap_rwsem.
+ * maps and global counts. Page faults can race with truncation.
+ * During faults, hugetlb_no_page() checks i_size before page allocation,
+ * and again after obtaining page table lock. It will 'back out'
+ * allocations in the truncated range.
* hole punch is indicated if end is not LLONG_MAX
* In the hole punch case we scan the range and release found pages.
* Only when releasing a page is the associated region/reserve map
@@ -466,94 +579,47 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
{
struct hstate *h = hstate_inode(inode);
struct address_space *mapping = &inode->i_data;
- const pgoff_t start = lstart >> huge_page_shift(h);
- const pgoff_t end = lend >> huge_page_shift(h);
- struct pagevec pvec;
+ const pgoff_t end = lend >> PAGE_SHIFT;
+ struct folio_batch fbatch;
pgoff_t next, index;
int i, freed = 0;
bool truncate_op = (lend == LLONG_MAX);
- pagevec_init(&pvec);
- next = start;
- while (next < end) {
- /*
- * When no more pages are found, we are done.
- */
- if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
- break;
-
- for (i = 0; i < pagevec_count(&pvec); ++i) {
- struct page *page = pvec.pages[i];
+ folio_batch_init(&fbatch);
+ next = lstart >> PAGE_SHIFT;
+ while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); ++i) {
+ struct folio *folio = fbatch.folios[i];
u32 hash = 0;
- index = page->index;
- if (!truncate_op) {
- /*
- * Only need to hold the fault mutex in the
- * hole punch case. This prevents races with
- * page faults. Races are not possible in the
- * case of truncation.
- */
- hash = hugetlb_fault_mutex_hash(mapping, index);
- mutex_lock(&hugetlb_fault_mutex_table[hash]);
- }
+ index = folio->index >> huge_page_order(h);
+ hash = hugetlb_fault_mutex_hash(mapping, index);
+ mutex_lock(&hugetlb_fault_mutex_table[hash]);
/*
- * If page is mapped, it was faulted in after being
- * unmapped in caller. Unmap (again) now after taking
- * the fault mutex. The mutex will prevent faults
- * until we finish removing the page.
- *
- * This race can only happen in the hole punch case.
- * Getting here in a truncate operation is a bug.
+ * Remove folio that was part of folio_batch.
*/
- if (unlikely(page_mapped(page))) {
- BUG_ON(truncate_op);
-
- mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_lock_write(mapping);
- mutex_lock(&hugetlb_fault_mutex_table[hash]);
- hugetlb_vmdelete_list(&mapping->i_mmap,
- index * pages_per_huge_page(h),
- (index + 1) * pages_per_huge_page(h));
- i_mmap_unlock_write(mapping);
- }
+ if (remove_inode_single_folio(h, inode, mapping, folio,
+ index, truncate_op))
+ freed++;
- lock_page(page);
- /*
- * We must free the huge page and remove from page
- * cache (remove_huge_page) BEFORE removing the
- * region/reserve map (hugetlb_unreserve_pages). In
- * rare out of memory conditions, removal of the
- * region/reserve map could fail. Correspondingly,
- * the subpool and global reserve usage count can need
- * to be adjusted.
- */
- VM_BUG_ON(HPageRestoreReserve(page));
- remove_huge_page(page);
- freed++;
- if (!truncate_op) {
- if (unlikely(hugetlb_unreserve_pages(inode,
- index, index + 1, 1)))
- hugetlb_fix_reserve_counts(inode);
- }
-
- unlock_page(page);
- if (!truncate_op)
- mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
}
- huge_pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
}
if (truncate_op)
- (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
+ (void)hugetlb_unreserve_pages(inode,
+ lstart >> huge_page_shift(h),
+ LONG_MAX, freed);
}
static void hugetlbfs_evict_inode(struct inode *inode)
{
struct resv_map *resv_map;
+ trace_hugetlbfs_evict_inode(inode);
remove_inode_hugepages(inode, 0, LLONG_MAX);
/*
@@ -562,7 +628,7 @@ static void hugetlbfs_evict_inode(struct inode *inode)
* at inode creation time. If this is a device special inode,
* i_mapping may not point to the original address space.
*/
- resv_map = (struct resv_map *)(&inode->i_data)->private_data;
+ resv_map = (struct resv_map *)(&inode->i_data)->i_private_data;
/* Only regular and link inodes have associated reserve maps */
if (resv_map)
resv_map_release(&resv_map->refs);
@@ -578,49 +644,88 @@ static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
BUG_ON(offset & ~huge_page_mask(h));
pgoff = offset >> PAGE_SHIFT;
- i_mmap_lock_write(mapping);
i_size_write(inode, offset);
+ i_mmap_lock_write(mapping);
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
- hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
+ hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
+ ZAP_FLAG_DROP_MARKER);
i_mmap_unlock_write(mapping);
remove_inode_hugepages(inode, offset, LLONG_MAX);
}
+static void hugetlbfs_zero_partial_page(struct hstate *h,
+ struct address_space *mapping,
+ loff_t start,
+ loff_t end)
+{
+ pgoff_t idx = start >> huge_page_shift(h);
+ struct folio *folio;
+
+ folio = filemap_lock_hugetlb_folio(h, mapping, idx);
+ if (IS_ERR(folio))
+ return;
+
+ start = start & ~huge_page_mask(h);
+ end = end & ~huge_page_mask(h);
+ if (!end)
+ end = huge_page_size(h);
+
+ folio_zero_segment(folio, (size_t)start, (size_t)end);
+
+ folio_unlock(folio);
+ folio_put(folio);
+}
+
static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
+ struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
+ struct address_space *mapping = inode->i_mapping;
struct hstate *h = hstate_inode(inode);
loff_t hpage_size = huge_page_size(h);
loff_t hole_start, hole_end;
/*
- * For hole punch round up the beginning offset of the hole and
- * round down the end.
+ * hole_start and hole_end indicate the full pages within the hole.
*/
hole_start = round_up(offset, hpage_size);
hole_end = round_down(offset + len, hpage_size);
- if (hole_end > hole_start) {
- struct address_space *mapping = inode->i_mapping;
- struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
+ inode_lock(inode);
- inode_lock(inode);
+ /* protected by i_rwsem */
+ if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
+ inode_unlock(inode);
+ return -EPERM;
+ }
- /* protected by i_rwsem */
- if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
- inode_unlock(inode);
- return -EPERM;
- }
+ i_mmap_lock_write(mapping);
- i_mmap_lock_write(mapping);
+ /* If range starts before first full page, zero partial page. */
+ if (offset < hole_start)
+ hugetlbfs_zero_partial_page(h, mapping,
+ offset, min(offset + len, hole_start));
+
+ /* Unmap users of full pages in the hole. */
+ if (hole_end > hole_start) {
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
hugetlb_vmdelete_list(&mapping->i_mmap,
- hole_start >> PAGE_SHIFT,
- hole_end >> PAGE_SHIFT);
- i_mmap_unlock_write(mapping);
- remove_inode_hugepages(inode, hole_start, hole_end);
- inode_unlock(inode);
+ hole_start >> PAGE_SHIFT,
+ hole_end >> PAGE_SHIFT, 0);
}
+ /* If range extends beyond last full page, zero partial page. */
+ if ((offset + len) > hole_end && (offset + len) > hole_start)
+ hugetlbfs_zero_partial_page(h, mapping,
+ hole_end, offset + len);
+
+ i_mmap_unlock_write(mapping);
+
+ /* Remove full pages from the file. */
+ if (hole_end > hole_start)
+ remove_inode_hugepages(inode, hole_start, hole_end);
+
+ inode_unlock(inode);
+
return 0;
}
@@ -642,8 +747,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
- if (mode & FALLOC_FL_PUNCH_HOLE)
- return hugetlbfs_punch_hole(inode, offset, len);
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ error = hugetlbfs_punch_hole(inode, offset, len);
+ goto out_nolock;
+ }
/*
* Default preallocate case.
@@ -667,11 +774,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
/*
* Initialize a pseudo vma as this is required by the huge page
- * allocation routines. If NUMA is configured, use page index
- * as input to create an allocation policy.
+ * allocation routines.
*/
vma_init(&pseudo_vma, mm);
- pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
+ vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
pseudo_vma.vm_file = file;
for (index = start; index < end; index++) {
@@ -679,7 +785,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
* This is supposed to be the vaddr where the page is being
* faulted in, but we have no vaddr here.
*/
- struct page *page;
+ struct folio *folio;
unsigned long addr;
cond_resched();
@@ -693,74 +799,68 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
break;
}
- /* Set numa allocation policy based on index */
- hugetlb_set_vma_policy(&pseudo_vma, inode, index);
-
/* addr is the offset within the file (zero based) */
addr = index * hpage_size;
- /*
- * fault mutex taken here, protects against fault path
- * and hole punch. inode_lock previously taken protects
- * against truncation.
- */
+ /* mutex taken here, fault path and hole punch */
hash = hugetlb_fault_mutex_hash(mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/* See if already present in mapping to avoid alloc/free */
- page = find_get_page(mapping, index);
- if (page) {
- put_page(page);
+ folio = filemap_get_folio(mapping, index << huge_page_order(h));
+ if (!IS_ERR(folio)) {
+ folio_put(folio);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- hugetlb_drop_vma_policy(&pseudo_vma);
continue;
}
/*
- * Allocate page without setting the avoid_reserve argument.
+ * Allocate folio without setting the avoid_reserve argument.
* There certainly are no reserves associated with the
* pseudo_vma. However, there could be shared mappings with
* reserves for the file at the inode level. If we fallocate
- * pages in these areas, we need to consume the reserves
+ * folios in these areas, we need to consume the reserves
* to keep reservation accounting consistent.
*/
- page = alloc_huge_page(&pseudo_vma, addr, 0);
- hugetlb_drop_vma_policy(&pseudo_vma);
- if (IS_ERR(page)) {
+ folio = alloc_hugetlb_folio(&pseudo_vma, addr, false);
+ if (IS_ERR(folio)) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- error = PTR_ERR(page);
+ error = PTR_ERR(folio);
goto out;
}
- clear_huge_page(page, addr, pages_per_huge_page(h));
- __SetPageUptodate(page);
- error = huge_add_to_page_cache(page, mapping, index);
+ folio_zero_user(folio, addr);
+ __folio_mark_uptodate(folio);
+ error = hugetlb_add_to_page_cache(folio, mapping, index);
if (unlikely(error)) {
- restore_reserve_on_error(h, &pseudo_vma, addr, page);
- put_page(page);
+ restore_reserve_on_error(h, &pseudo_vma, addr, folio);
+ folio_put(folio);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
goto out;
}
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- SetHPageMigratable(page);
+ folio_set_hugetlb_migratable(folio);
/*
- * unlock_page because locked by add_to_page_cache()
- * put_page() due to reference from alloc_huge_page()
+ * folio_unlock because locked by hugetlb_add_to_page_cache()
+ * folio_put() due to reference from alloc_hugetlb_folio()
*/
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
}
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
i_size_write(inode, offset + len);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
out:
inode_unlock(inode);
+
+out_nolock:
+ trace_hugetlbfs_fallocate(inode, mode, offset, len, error);
return error;
}
-static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
+static int hugetlbfs_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
@@ -769,10 +869,12 @@ static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
unsigned int ia_valid = attr->ia_valid;
struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
- error = setattr_prepare(&init_user_ns, dentry, attr);
+ error = setattr_prepare(idmap, dentry, attr);
if (error)
return error;
+ trace_hugetlbfs_setattr(inode, dentry, attr);
+
if (ia_valid & ATTR_SIZE) {
loff_t oldsize = inode->i_size;
loff_t newsize = attr->ia_size;
@@ -786,7 +888,7 @@ static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
hugetlb_vmtruncate(inode, newsize);
}
- setattr_copy(&init_user_ns, inode, attr);
+ setattr_copy(idmap, inode, attr);
mark_inode_dirty(inode);
return 0;
}
@@ -802,7 +904,7 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
inode->i_mode = S_IFDIR | ctx->mode;
inode->i_uid = ctx->uid;
inode->i_gid = ctx->gid;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &hugetlbfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
@@ -821,6 +923,7 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
static struct inode *hugetlbfs_get_inode(struct super_block *sb,
+ struct mnt_idmap *idmap,
struct inode *dir,
umode_t mode, dev_t dev)
{
@@ -842,12 +945,12 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
inode->i_ino = get_next_ino();
- inode_init_owner(&init_user_ns, inode, dir, mode);
+ inode_init_owner(idmap, inode, dir, mode);
lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
&hugetlbfs_i_mmap_rwsem_key);
inode->i_mapping->a_ops = &hugetlbfs_aops;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
- inode->i_mapping->private_data = resv_map;
+ simple_inode_init_ts(inode);
+ inode->i_mapping->i_private_data = resv_map;
info->seals = F_SEAL_SEAL;
switch (mode & S_IFMT) {
default:
@@ -870,6 +973,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
break;
}
lockdep_annotate_inode_mutex_key(inode);
+ trace_hugetlbfs_alloc_inode(inode, dir, mode);
} else {
if (resv_map)
kref_put(&resv_map->refs, resv_map_release);
@@ -881,114 +985,100 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
/*
* File creation. Allocate an inode, and we're done..
*/
-static int do_hugetlbfs_mknod(struct inode *dir,
- struct dentry *dentry,
- umode_t mode,
- dev_t dev,
- bool tmpfile)
+static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode *inode;
- int error = -ENOSPC;
- inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
- if (inode) {
- dir->i_ctime = dir->i_mtime = current_time(dir);
- if (tmpfile) {
- d_tmpfile(dentry, inode);
- } else {
- d_instantiate(dentry, inode);
- dget(dentry);/* Extra count - pin the dentry in core */
- }
- error = 0;
- }
- return error;
-}
-
-static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode, dev_t dev)
-{
- return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
+ inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, dev);
+ if (!inode)
+ return -ENOSPC;
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ d_make_persistent(dentry, inode);
+ return 0;
}
-static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry,
+ int retval = hugetlbfs_mknod(idmap, dir, dentry,
mode | S_IFDIR, 0);
if (!retval)
inc_nlink(dir);
- return retval;
+ return ERR_PTR(retval);
}
-static int hugetlbfs_create(struct user_namespace *mnt_userns,
+static int hugetlbfs_create(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
- return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
+ return hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
}
-static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns,
- struct inode *dir, struct dentry *dentry,
+static int hugetlbfs_tmpfile(struct mnt_idmap *idmap,
+ struct inode *dir, struct file *file,
umode_t mode)
{
- return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
+ struct inode *inode;
+
+ inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode | S_IFREG, 0);
+ if (!inode)
+ return -ENOSPC;
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ d_tmpfile(file, inode);
+ return finish_open_simple(file, 0);
}
-static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
+static int hugetlbfs_symlink(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry,
const char *symname)
{
+ const umode_t mode = S_IFLNK|S_IRWXUGO;
struct inode *inode;
int error = -ENOSPC;
- inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
+ inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, 0);
if (inode) {
int l = strlen(symname)+1;
error = page_symlink(inode, symname, l);
- if (!error) {
- d_instantiate(dentry, inode);
- dget(dentry);
- } else
+ if (!error)
+ d_make_persistent(dentry, inode);
+ else
iput(inode);
}
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
return error;
}
-static int hugetlbfs_migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page,
+#ifdef CONFIG_MIGRATION
+static int hugetlbfs_migrate_folio(struct address_space *mapping,
+ struct folio *dst, struct folio *src,
enum migrate_mode mode)
{
int rc;
- rc = migrate_huge_page_move_mapping(mapping, newpage, page);
- if (rc != MIGRATEPAGE_SUCCESS)
+ rc = migrate_huge_page_move_mapping(mapping, dst, src);
+ if (rc)
return rc;
- if (hugetlb_page_subpool(page)) {
- hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
- hugetlb_set_page_subpool(page, NULL);
+ if (hugetlb_folio_subpool(src)) {
+ hugetlb_set_folio_subpool(dst,
+ hugetlb_folio_subpool(src));
+ hugetlb_set_folio_subpool(src, NULL);
}
- if (mode != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
- else
- migrate_page_states(newpage, page);
+ folio_migrate_flags(dst, src);
- return MIGRATEPAGE_SUCCESS;
+ return 0;
}
+#else
+#define hugetlbfs_migrate_folio NULL
+#endif
-static int hugetlbfs_error_remove_page(struct address_space *mapping,
- struct page *page)
+static int hugetlbfs_error_remove_folio(struct address_space *mapping,
+ struct folio *folio)
{
- struct inode *inode = mapping->host;
- pgoff_t index = page->index;
-
- remove_huge_page(page);
- if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
- hugetlb_fix_reserve_counts(inode);
-
return 0;
}
@@ -1036,22 +1126,24 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
struct hstate *h = hstate_inode(d_inode(dentry));
+ u64 id = huge_encode_dev(dentry->d_sb->s_dev);
+ buf->f_fsid = u64_to_fsid(id);
buf->f_type = HUGETLBFS_MAGIC;
buf->f_bsize = huge_page_size(h);
if (sbinfo) {
spin_lock(&sbinfo->stat_lock);
- /* If no limits set, just report 0 for max/free/used
+ /* If no limits set, just report 0 or -1 for max/free/used
* blocks, like simple_statfs() */
if (sbinfo->spool) {
long free_pages;
- spin_lock(&sbinfo->spool->lock);
+ spin_lock_irq(&sbinfo->spool->lock);
buf->f_blocks = sbinfo->spool->max_hpages;
free_pages = sbinfo->spool->max_hpages
- sbinfo->spool->used_hpages;
buf->f_bavail = buf->f_bfree = free_pages;
- spin_unlock(&sbinfo->spool->lock);
+ spin_unlock_irq(&sbinfo->spool->lock);
buf->f_files = sbinfo->max_inodes;
buf->f_ffree = sbinfo->free_inodes;
}
@@ -1109,60 +1201,49 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
return NULL;
- p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
+ p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL);
if (unlikely(!p)) {
hugetlbfs_inc_free_inodes(sbinfo);
return NULL;
}
-
- /*
- * Any time after allocation, hugetlbfs_destroy_inode can be called
- * for the inode. mpol_free_shared_policy is unconditionally called
- * as part of hugetlbfs_destroy_inode. So, initialize policy here
- * in case of a quick call to destroy.
- *
- * Note that the policy is initialized even if we are creating a
- * private inode. This simplifies hugetlbfs_destroy_inode.
- */
- mpol_shared_policy_init(&p->policy, NULL);
-
return &p->vfs_inode;
}
static void hugetlbfs_free_inode(struct inode *inode)
{
+ trace_hugetlbfs_free_inode(inode);
kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
}
static void hugetlbfs_destroy_inode(struct inode *inode)
{
hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
- mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
}
static const struct address_space_operations hugetlbfs_aops = {
.write_begin = hugetlbfs_write_begin,
.write_end = hugetlbfs_write_end,
- .set_page_dirty = __set_page_dirty_no_writeback,
- .migratepage = hugetlbfs_migrate_page,
- .error_remove_page = hugetlbfs_error_remove_page,
+ .dirty_folio = noop_dirty_folio,
+ .migrate_folio = hugetlbfs_migrate_folio,
+ .error_remove_folio = hugetlbfs_error_remove_folio,
};
static void init_once(void *foo)
{
- struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
+ struct hugetlbfs_inode_info *ei = foo;
inode_init_once(&ei->vfs_inode);
}
-const struct file_operations hugetlbfs_file_operations = {
+static const struct file_operations hugetlbfs_file_operations = {
.read_iter = hugetlbfs_read_iter,
- .mmap = hugetlbfs_file_mmap,
+ .mmap_prepare = hugetlbfs_file_mmap_prepare,
.fsync = noop_fsync,
.get_unmapped_area = hugetlb_get_unmapped_area,
.llseek = default_llseek,
.fallocate = hugetlbfs_fallocate,
+ .fop_flags = FOP_HUGE_PAGES,
};
static const struct inode_operations hugetlbfs_dir_inode_operations = {
@@ -1222,6 +1303,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
{
struct hugetlbfs_fs_context *ctx = fc->fs_private;
struct fs_parse_result result;
+ struct hstate *h;
char *rest;
unsigned long ps;
int opt;
@@ -1232,15 +1314,11 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
switch (opt) {
case Opt_uid:
- ctx->uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(ctx->uid))
- goto bad_val;
+ ctx->uid = result.uid;
return 0;
case Opt_gid:
- ctx->gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(ctx->gid))
- goto bad_val;
+ ctx->gid = result.gid;
return 0;
case Opt_mode:
@@ -1249,7 +1327,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
case Opt_size:
/* memparse() will accept a K/M/G without a digit */
- if (!isdigit(param->string[0]))
+ if (!param->string || !isdigit(param->string[0]))
goto bad_val;
ctx->max_size_opt = memparse(param->string, &rest);
ctx->max_val_type = SIZE_STD;
@@ -1259,23 +1337,24 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
case Opt_nr_inodes:
/* memparse() will accept a K/M/G without a digit */
- if (!isdigit(param->string[0]))
+ if (!param->string || !isdigit(param->string[0]))
goto bad_val;
ctx->nr_inodes = memparse(param->string, &rest);
return 0;
case Opt_pagesize:
ps = memparse(param->string, &rest);
- ctx->hstate = size_to_hstate(ps);
- if (!ctx->hstate) {
- pr_err("Unsupported page size %lu MB\n", ps >> 20);
+ h = size_to_hstate(ps);
+ if (!h) {
+ pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
return -EINVAL;
}
+ ctx->hstate = h;
return 0;
case Opt_min_size:
/* memparse() will accept a K/M/G without a digit */
- if (!isdigit(param->string[0]))
+ if (!param->string || !isdigit(param->string[0]))
goto bad_val;
ctx->min_size_opt = memparse(param->string, &rest);
ctx->min_val_type = SIZE_STD;
@@ -1344,7 +1423,7 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
/*
* Allocate and initialize subpool if maximum or minimum size is
* specified. Any needed reservations (for minimum size) are taken
- * taken when the subpool is created.
+ * when the subpool is created.
*/
if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
sbinfo->spool = hugepage_new_subpool(ctx->hstate,
@@ -1358,6 +1437,7 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
sb->s_magic = HUGETLBFS_MAGIC;
sb->s_op = &hugetlbfs_ops;
+ sb->s_d_flags = DCACHE_DONTCACHE;
sb->s_time_gran = 1;
/*
@@ -1420,7 +1500,8 @@ static struct file_system_type hugetlbfs_fs_type = {
.name = "hugetlbfs",
.init_fs_context = hugetlbfs_init_fs_context,
.parameters = hugetlb_fs_parameters,
- .kill_sb = kill_litter_super,
+ .kill_sb = kill_anon_super,
+ .fs_flags = FS_ALLOW_IDMAP,
};
static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
@@ -1474,7 +1555,9 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
}
file = ERR_PTR(-ENOSPC);
- inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
+ /* hugetlbfs_vfsmount[] mounts do not use idmapped mounts. */
+ inode = hugetlbfs_get_inode(mnt->mnt_sb, &nop_mnt_idmap, NULL,
+ S_IFREG | S_IRWXUGO, 0);
if (!inode)
goto out;
if (creat_flags == HUGETLB_SHMFS_INODE)
@@ -1483,9 +1566,9 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
inode->i_size = size;
clear_nlink(inode);
- if (!hugetlb_reserve_pages(inode, 0,
+ if (hugetlb_reserve_pages(inode, 0,
size >> huge_page_shift(hstate_inode(inode)), NULL,
- acctflag))
+ acctflag) < 0)
file = ERR_PTR(-ENOMEM);
else
file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
@@ -1509,12 +1592,12 @@ static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
} else {
struct hugetlbfs_fs_context *ctx = fc->fs_private;
ctx->hstate = h;
- mnt = fc_mount(fc);
+ mnt = fc_mount_longterm(fc);
put_fs_context(fc);
}
if (IS_ERR(mnt))
pr_err("Cannot mount internal hugetlbfs for page size %luK",
- huge_page_size(h) >> 10);
+ huge_page_size(h) / SZ_1K);
return mnt;
}
diff --git a/fs/init.c b/fs/init.c
index 5c36adaa9b44..e0f5429c0a49 100644
--- a/fs/init.c
+++ b/fs/init.c
@@ -149,17 +149,16 @@ int __init init_mknod(const char *filename, umode_t mode, unsigned int dev)
else if (!(S_ISBLK(mode) || S_ISCHR(mode)))
return -EINVAL;
- dentry = kern_path_create(AT_FDCWD, filename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, filename, &path, 0);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (!IS_POSIXACL(path.dentry->d_inode))
- mode &= ~current_umask();
+ mode = mode_strip_umask(d_inode(path.dentry), mode);
error = security_path_mknod(&path, dentry, mode, dev);
if (!error)
- error = vfs_mknod(mnt_user_ns(path.mnt), path.dentry->d_inode,
- dentry, mode, new_decode_dev(dev));
- done_path_create(&path, dentry);
+ error = vfs_mknod(mnt_idmap(path.mnt), path.dentry->d_inode,
+ dentry, mode, new_decode_dev(dev), NULL);
+ end_creating_path(&path, dentry);
return error;
}
@@ -167,14 +166,14 @@ int __init init_link(const char *oldname, const char *newname)
{
struct dentry *new_dentry;
struct path old_path, new_path;
- struct user_namespace *mnt_userns;
+ struct mnt_idmap *idmap;
int error;
error = kern_path(oldname, 0, &old_path);
if (error)
return error;
- new_dentry = kern_path_create(AT_FDCWD, newname, &new_path, 0);
+ new_dentry = start_creating_path(AT_FDCWD, newname, &new_path, 0);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out;
@@ -182,17 +181,17 @@ int __init init_link(const char *oldname, const char *newname)
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto out_dput;
- mnt_userns = mnt_user_ns(new_path.mnt);
- error = may_linkat(mnt_userns, &old_path);
+ idmap = mnt_idmap(new_path.mnt);
+ error = may_linkat(idmap, &old_path);
if (unlikely(error))
goto out_dput;
error = security_path_link(old_path.dentry, &new_path, new_dentry);
if (error)
goto out_dput;
- error = vfs_link(old_path.dentry, mnt_userns, new_path.dentry->d_inode,
+ error = vfs_link(old_path.dentry, idmap, new_path.dentry->d_inode,
new_dentry, NULL);
out_dput:
- done_path_create(&new_path, new_dentry);
+ end_creating_path(&new_path, new_dentry);
out:
path_put(&old_path);
return error;
@@ -204,14 +203,14 @@ int __init init_symlink(const char *oldname, const char *newname)
struct path path;
int error;
- dentry = kern_path_create(AT_FDCWD, newname, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, newname, &path, 0);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
error = security_path_symlink(&path, dentry, oldname);
if (!error)
- error = vfs_symlink(mnt_user_ns(path.mnt), path.dentry->d_inode,
- dentry, oldname);
- done_path_create(&path, dentry);
+ error = vfs_symlink(mnt_idmap(path.mnt), path.dentry->d_inode,
+ dentry, oldname, NULL);
+ end_creating_path(&path, dentry);
return error;
}
@@ -226,16 +225,19 @@ int __init init_mkdir(const char *pathname, umode_t mode)
struct path path;
int error;
- dentry = kern_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY);
+ dentry = start_creating_path(AT_FDCWD, pathname, &path,
+ LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (!IS_POSIXACL(path.dentry->d_inode))
- mode &= ~current_umask();
+ mode = mode_strip_umask(d_inode(path.dentry), mode);
error = security_path_mkdir(&path, dentry, mode);
- if (!error)
- error = vfs_mkdir(mnt_user_ns(path.mnt), path.dentry->d_inode,
- dentry, mode);
- done_path_create(&path, dentry);
+ if (!error) {
+ dentry = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
+ dentry, mode, NULL);
+ if (IS_ERR(dentry))
+ error = PTR_ERR(dentry);
+ }
+ end_creating_path(&path, dentry);
return error;
}
diff --git a/fs/inode.c b/fs/inode.c
index 3eba0940ffcf..521383223d8a 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -5,6 +5,7 @@
*/
#include <linux/export.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/mm.h>
#include <linux/backing-dev.h>
#include <linux/hash.h>
@@ -15,19 +16,24 @@
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/posix_acl.h>
-#include <linux/prefetch.h>
#include <linux/buffer_head.h> /* for inode_has_buffers */
#include <linux/ratelimit.h>
#include <linux/list_lru.h>
#include <linux/iversion.h>
+#include <linux/rw_hint.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
#include <trace/events/writeback.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/timestamp.h>
+
#include "internal.h"
/*
* Inode locking rules:
*
* inode->i_lock protects:
- * inode->i_state, inode->i_hash, __iget()
+ * inode->i_state, inode->i_hash, __iget(), inode->i_io_list
* Inode LRU list locks protect:
* inode->i_sb->s_inode_lru, inode->i_lru
* inode->i_sb->s_inode_list_lock protects:
@@ -54,9 +60,9 @@
* inode_hash_lock
*/
-static unsigned int i_hash_mask __read_mostly;
-static unsigned int i_hash_shift __read_mostly;
-static struct hlist_head *inode_hashtable __read_mostly;
+static unsigned int i_hash_mask __ro_after_init;
+static unsigned int i_hash_shift __ro_after_init;
+static struct hlist_head *inode_hashtable __ro_after_init;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
/*
@@ -67,15 +73,10 @@ const struct address_space_operations empty_aops = {
};
EXPORT_SYMBOL(empty_aops);
-/*
- * Statistics gathering..
- */
-struct inodes_stat_t inodes_stat;
-
static DEFINE_PER_CPU(unsigned long, nr_inodes);
static DEFINE_PER_CPU(unsigned long, nr_unused);
-static struct kmem_cache *inode_cachep __read_mostly;
+static struct kmem_cache *inode_cachep __ro_after_init;
static long get_nr_inodes(void)
{
@@ -102,17 +103,110 @@ long get_nr_dirty_inodes(void)
return nr_dirty > 0 ? nr_dirty : 0;
}
+#ifdef CONFIG_DEBUG_FS
+static DEFINE_PER_CPU(long, mg_ctime_updates);
+static DEFINE_PER_CPU(long, mg_fine_stamps);
+static DEFINE_PER_CPU(long, mg_ctime_swaps);
+
+static unsigned long get_mg_ctime_updates(void)
+{
+ unsigned long sum = 0;
+ int i;
+
+ for_each_possible_cpu(i)
+ sum += data_race(per_cpu(mg_ctime_updates, i));
+ return sum;
+}
+
+static unsigned long get_mg_fine_stamps(void)
+{
+ unsigned long sum = 0;
+ int i;
+
+ for_each_possible_cpu(i)
+ sum += data_race(per_cpu(mg_fine_stamps, i));
+ return sum;
+}
+
+static unsigned long get_mg_ctime_swaps(void)
+{
+ unsigned long sum = 0;
+ int i;
+
+ for_each_possible_cpu(i)
+ sum += data_race(per_cpu(mg_ctime_swaps, i));
+ return sum;
+}
+
+#define mgtime_counter_inc(__var) this_cpu_inc(__var)
+
+static int mgts_show(struct seq_file *s, void *p)
+{
+ unsigned long ctime_updates = get_mg_ctime_updates();
+ unsigned long ctime_swaps = get_mg_ctime_swaps();
+ unsigned long fine_stamps = get_mg_fine_stamps();
+ unsigned long floor_swaps = timekeeping_get_mg_floor_swaps();
+
+ seq_printf(s, "%lu %lu %lu %lu\n",
+ ctime_updates, ctime_swaps, fine_stamps, floor_swaps);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mgts);
+
+static int __init mg_debugfs_init(void)
+{
+ debugfs_create_file("multigrain_timestamps", S_IFREG | S_IRUGO, NULL, NULL, &mgts_fops);
+ return 0;
+}
+late_initcall(mg_debugfs_init);
+
+#else /* ! CONFIG_DEBUG_FS */
+
+#define mgtime_counter_inc(__var) do { } while (0)
+
+#endif /* CONFIG_DEBUG_FS */
+
/*
* Handle nr_inode sysctl
*/
#ifdef CONFIG_SYSCTL
-int proc_nr_inodes(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+/*
+ * Statistics gathering..
+ */
+static struct inodes_stat_t inodes_stat;
+
+static int proc_nr_inodes(const struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
inodes_stat.nr_inodes = get_nr_inodes();
inodes_stat.nr_unused = get_nr_inodes_unused();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
+
+static const struct ctl_table inodes_sysctls[] = {
+ {
+ .procname = "inode-nr",
+ .data = &inodes_stat,
+ .maxlen = 2*sizeof(long),
+ .mode = 0444,
+ .proc_handler = proc_nr_inodes,
+ },
+ {
+ .procname = "inode-state",
+ .data = &inodes_stat,
+ .maxlen = 7*sizeof(long),
+ .mode = 0444,
+ .proc_handler = proc_nr_inodes,
+ },
+};
+
+static int __init init_fs_inode_sysctls(void)
+{
+ register_sysctl_init("fs", inodes_sysctls);
+ return 0;
+}
+early_initcall(init_fs_inode_sysctls);
#endif
static int no_open(struct inode *inode, struct file *file)
@@ -121,14 +215,16 @@ static int no_open(struct inode *inode, struct file *file)
}
/**
- * inode_init_always - perform inode structure initialisation
+ * inode_init_always_gfp - perform inode structure initialisation
* @sb: superblock inode belongs to
* @inode: inode to initialise
+ * @gfp: allocation flags
*
* These are initializations that need to be done on every inode
* allocation as the fields are not initialised by slab allocation.
+ * If there are additional allocations required @gfp is used.
*/
-int inode_init_always(struct super_block *sb, struct inode *inode)
+int inode_init_always_gfp(struct super_block *sb, struct inode *inode, gfp_t gfp)
{
static const struct inode_operations empty_iops;
static const struct file_operations no_open_fops = {.open = no_open};
@@ -137,6 +233,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_sb = sb;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
+ inode_state_assign_raw(inode, 0);
atomic64_set(&inode->i_sequence, 0);
atomic_set(&inode->i_count, 1);
inode->i_op = &empty_iops;
@@ -146,6 +243,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_opflags = 0;
if (sb->s_xattr)
inode->i_opflags |= IOP_XATTR;
+ if (sb->s_type->fs_flags & FS_MGTIME)
+ inode->i_opflags |= IOP_MGTIME;
i_uid_write(inode, 0);
i_gid_write(inode, 0);
atomic_set(&inode->i_writecount, 0);
@@ -167,8 +266,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_wb_frn_history = 0;
#endif
- if (security_inode_alloc(inode))
- goto out;
spin_lock_init(&inode->i_lock);
lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
@@ -180,20 +277,20 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
mapping->a_ops = &empty_aops;
mapping->host = inode;
mapping->flags = 0;
- if (sb->s_type->fs_flags & FS_THP_SUPPORT)
- __set_bit(AS_THP_SUPPORT, &mapping->flags);
mapping->wb_err = 0;
atomic_set(&mapping->i_mmap_writable, 0);
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
atomic_set(&mapping->nr_thps, 0);
#endif
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
- mapping->private_data = NULL;
+ mapping->i_private_data = NULL;
mapping->writeback_index = 0;
init_rwsem(&mapping->invalidate_lock);
lockdep_set_class_and_name(&mapping->invalidate_lock,
&sb->s_type->invalidate_lock_key,
"mapping.invalidate_lock");
+ if (sb->s_iflags & SB_I_STABLE_WRITES)
+ mapping_set_stable_writes(mapping);
inode->i_private = NULL;
inode->i_mapping = mapping;
INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
@@ -205,13 +302,15 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_fsnotify_mask = 0;
#endif
inode->i_flctx = NULL;
+
+ if (unlikely(security_inode_alloc(inode, gfp)))
+ return -ENOMEM;
+
this_cpu_inc(nr_inodes);
return 0;
-out:
- return -ENOMEM;
}
-EXPORT_SYMBOL(inode_init_always);
+EXPORT_SYMBOL(inode_init_always_gfp);
void free_inode_nonrcu(struct inode *inode)
{
@@ -228,7 +327,17 @@ static void i_callback(struct rcu_head *head)
free_inode_nonrcu(inode);
}
-static struct inode *alloc_inode(struct super_block *sb)
+/**
+ * alloc_inode - obtain an inode
+ * @sb: superblock
+ *
+ * Allocates a new inode for given superblock.
+ * Inode wont be chained in superblock s_inodes list
+ * This means :
+ * - fs can't be unmount
+ * - quotas, fsnotify, writeback can't work
+ */
+struct inode *alloc_inode(struct super_block *sb)
{
const struct super_operations *ops = sb->s_op;
struct inode *inode;
@@ -236,7 +345,7 @@ static struct inode *alloc_inode(struct super_block *sb)
if (ops->alloc_inode)
inode = ops->alloc_inode(sb);
else
- inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
+ inode = alloc_inode_sb(sb, inode_cachep, GFP_KERNEL);
if (!inode)
return NULL;
@@ -362,7 +471,7 @@ EXPORT_SYMBOL(set_nlink);
void inc_nlink(struct inode *inode)
{
if (unlikely(inode->i_nlink == 0)) {
- WARN_ON(!(inode->i_state & I_LINKABLE));
+ WARN_ON(!(inode_state_read_once(inode) & I_LINKABLE));
atomic_long_dec(&inode->i_sb->s_remove_count);
}
@@ -374,8 +483,8 @@ static void __address_space_init_once(struct address_space *mapping)
{
xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
init_rwsem(&mapping->i_mmap_rwsem);
- INIT_LIST_HEAD(&mapping->private_list);
- spin_lock_init(&mapping->private_lock);
+ INIT_LIST_HEAD(&mapping->i_private_list);
+ spin_lock_init(&mapping->i_private_lock);
mapping->i_mmap = RB_ROOT_CACHED;
}
@@ -399,6 +508,7 @@ void inode_init_once(struct inode *inode)
INIT_LIST_HEAD(&inode->i_io_list);
INIT_LIST_HEAD(&inode->i_wb_list);
INIT_LIST_HEAD(&inode->i_lru);
+ INIT_LIST_HEAD(&inode->i_sb_list);
__address_space_init_once(&inode->i_data);
i_size_ordered_init(inode);
}
@@ -412,14 +522,6 @@ static void init_once(void *foo)
}
/*
- * inode->i_lock must be held
- */
-void __iget(struct inode *inode)
-{
- atomic_inc(&inode->i_count);
-}
-
-/*
* get additional reference to inode; caller must already hold one.
*/
void ihold(struct inode *inode)
@@ -428,57 +530,144 @@ void ihold(struct inode *inode)
}
EXPORT_SYMBOL(ihold);
-static void __inode_add_lru(struct inode *inode, bool rotate)
+struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
+ struct inode *inode, u32 bit)
{
- if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
+ void *bit_address;
+
+ bit_address = inode_state_wait_address(inode, bit);
+ init_wait_var_entry(wqe, bit_address, 0);
+ return __var_waitqueue(bit_address);
+}
+EXPORT_SYMBOL(inode_bit_waitqueue);
+
+void wait_on_new_inode(struct inode *inode)
+{
+ struct wait_bit_queue_entry wqe;
+ struct wait_queue_head *wq_head;
+
+ spin_lock(&inode->i_lock);
+ if (!(inode_state_read(inode) & I_NEW)) {
+ spin_unlock(&inode->i_lock);
+ return;
+ }
+
+ wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW);
+ for (;;) {
+ prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
+ if (!(inode_state_read(inode) & I_NEW))
+ break;
+ spin_unlock(&inode->i_lock);
+ schedule();
+ spin_lock(&inode->i_lock);
+ }
+ finish_wait(wq_head, &wqe.wq_entry);
+ WARN_ON(inode_state_read(inode) & I_NEW);
+ spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL(wait_on_new_inode);
+
+static void __inode_lru_list_add(struct inode *inode, bool rotate)
+{
+ lockdep_assert_held(&inode->i_lock);
+
+ if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
return;
- if (atomic_read(&inode->i_count))
+ if (icount_read(inode))
return;
if (!(inode->i_sb->s_flags & SB_ACTIVE))
return;
if (!mapping_shrinkable(&inode->i_data))
return;
- if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
+ if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_inc(nr_unused);
else if (rotate)
- inode->i_state |= I_REFERENCED;
+ inode_state_set(inode, I_REFERENCED);
}
/*
* Add inode to LRU if needed (inode is unused and clean).
- *
- * Needs inode->i_lock held.
*/
-void inode_add_lru(struct inode *inode)
+void inode_lru_list_add(struct inode *inode)
{
- __inode_add_lru(inode, false);
+ __inode_lru_list_add(inode, false);
}
static void inode_lru_list_del(struct inode *inode)
{
- if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
+ if (list_empty(&inode->i_lru))
+ return;
+
+ if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_dec(nr_unused);
}
+static void inode_pin_lru_isolating(struct inode *inode)
+{
+ lockdep_assert_held(&inode->i_lock);
+ WARN_ON(inode_state_read(inode) & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
+ inode_state_set(inode, I_LRU_ISOLATING);
+}
+
+static void inode_unpin_lru_isolating(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ WARN_ON(!(inode_state_read(inode) & I_LRU_ISOLATING));
+ inode_state_clear(inode, I_LRU_ISOLATING);
+ /* Called with inode->i_lock which ensures memory ordering. */
+ inode_wake_up_bit(inode, __I_LRU_ISOLATING);
+ spin_unlock(&inode->i_lock);
+}
+
+static void inode_wait_for_lru_isolating(struct inode *inode)
+{
+ struct wait_bit_queue_entry wqe;
+ struct wait_queue_head *wq_head;
+
+ lockdep_assert_held(&inode->i_lock);
+ if (!(inode_state_read(inode) & I_LRU_ISOLATING))
+ return;
+
+ wq_head = inode_bit_waitqueue(&wqe, inode, __I_LRU_ISOLATING);
+ for (;;) {
+ prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
+ /*
+ * Checking I_LRU_ISOLATING with inode->i_lock guarantees
+ * memory ordering.
+ */
+ if (!(inode_state_read(inode) & I_LRU_ISOLATING))
+ break;
+ spin_unlock(&inode->i_lock);
+ schedule();
+ spin_lock(&inode->i_lock);
+ }
+ finish_wait(wq_head, &wqe.wq_entry);
+ WARN_ON(inode_state_read(inode) & I_LRU_ISOLATING);
+}
+
/**
* inode_sb_list_add - add inode to the superblock list of inodes
* @inode: inode to add
*/
void inode_sb_list_add(struct inode *inode)
{
- spin_lock(&inode->i_sb->s_inode_list_lock);
- list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
- spin_unlock(&inode->i_sb->s_inode_list_lock);
+ struct super_block *sb = inode->i_sb;
+
+ spin_lock(&sb->s_inode_list_lock);
+ list_add(&inode->i_sb_list, &sb->s_inodes);
+ spin_unlock(&sb->s_inode_list_lock);
}
EXPORT_SYMBOL_GPL(inode_sb_list_add);
static inline void inode_sb_list_del(struct inode *inode)
{
+ struct super_block *sb = inode->i_sb;
+
if (!list_empty(&inode->i_sb_list)) {
- spin_lock(&inode->i_sb->s_inode_list_lock);
+ spin_lock(&sb->s_inode_list_lock);
list_del_init(&inode->i_sb_list);
- spin_unlock(&inode->i_sb->s_inode_list_lock);
+ spin_unlock(&sb->s_inode_list_lock);
}
}
@@ -528,11 +717,65 @@ void __remove_inode_hash(struct inode *inode)
}
EXPORT_SYMBOL(__remove_inode_hash);
+void dump_mapping(const struct address_space *mapping)
+{
+ struct inode *host;
+ const struct address_space_operations *a_ops;
+ struct hlist_node *dentry_first;
+ struct dentry *dentry_ptr;
+ struct dentry dentry;
+ char fname[64] = {};
+ unsigned long ino;
+
+ /*
+ * If mapping is an invalid pointer, we don't want to crash
+ * accessing it, so probe everything depending on it carefully.
+ */
+ if (get_kernel_nofault(host, &mapping->host) ||
+ get_kernel_nofault(a_ops, &mapping->a_ops)) {
+ pr_warn("invalid mapping:%px\n", mapping);
+ return;
+ }
+
+ if (!host) {
+ pr_warn("aops:%ps\n", a_ops);
+ return;
+ }
+
+ if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
+ get_kernel_nofault(ino, &host->i_ino)) {
+ pr_warn("aops:%ps invalid inode:%px\n", a_ops, host);
+ return;
+ }
+
+ if (!dentry_first) {
+ pr_warn("aops:%ps ino:%lx\n", a_ops, ino);
+ return;
+ }
+
+ dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
+ if (get_kernel_nofault(dentry, dentry_ptr) ||
+ !dentry.d_parent || !dentry.d_name.name) {
+ pr_warn("aops:%ps ino:%lx invalid dentry:%px\n",
+ a_ops, ino, dentry_ptr);
+ return;
+ }
+
+ if (strncpy_from_kernel_nofault(fname, dentry.d_name.name, 63) < 0)
+ strscpy(fname, "<invalid>");
+ /*
+ * Even if strncpy_from_kernel_nofault() succeeded,
+ * the fname could be unreliable
+ */
+ pr_warn("aops:%ps ino:%lx dentry name(?):\"%s\"\n",
+ a_ops, ino, fname);
+}
+
void clear_inode(struct inode *inode)
{
/*
* We have to cycle the i_pages lock here because reclaim can be in the
- * process of removing the last page (in __delete_from_page_cache())
+ * process of removing the last page (in __filemap_remove_folio())
* and we must not free the mapping under it.
*/
xa_lock_irq(&inode->i_data.i_pages);
@@ -546,12 +789,12 @@ void clear_inode(struct inode *inode)
* nor even WARN_ON(!mapping_empty).
*/
xa_unlock_irq(&inode->i_data.i_pages);
- BUG_ON(!list_empty(&inode->i_data.private_list));
- BUG_ON(!(inode->i_state & I_FREEING));
- BUG_ON(inode->i_state & I_CLEAR);
+ BUG_ON(!list_empty(&inode->i_data.i_private_list));
+ BUG_ON(!(inode_state_read_once(inode) & I_FREEING));
+ BUG_ON(inode_state_read_once(inode) & I_CLEAR);
BUG_ON(!list_empty(&inode->i_wb_list));
/* don't need i_lock here, no concurrent mods to i_state */
- inode->i_state = I_FREEING | I_CLEAR;
+ inode_state_assign_raw(inode, I_FREEING | I_CLEAR);
}
EXPORT_SYMBOL(clear_inode);
@@ -572,14 +815,15 @@ static void evict(struct inode *inode)
{
const struct super_operations *op = inode->i_sb->s_op;
- BUG_ON(!(inode->i_state & I_FREEING));
+ BUG_ON(!(inode_state_read_once(inode) & I_FREEING));
BUG_ON(!list_empty(&inode->i_lru));
- if (!list_empty(&inode->i_io_list))
- inode_io_list_del(inode);
-
+ inode_io_list_del(inode);
inode_sb_list_del(inode);
+ spin_lock(&inode->i_lock);
+ inode_wait_for_lru_isolating(inode);
+
/*
* Wait for flusher thread to be done with the inode so that filesystem
* does not start destroying it while writeback is still running. Since
@@ -587,6 +831,7 @@ static void evict(struct inode *inode)
* the inode. We just have to wait for running writeback to finish.
*/
inode_wait_for_writeback(inode);
+ spin_unlock(&inode->i_lock);
if (op->evict_inode) {
op->evict_inode(inode);
@@ -599,10 +844,19 @@ static void evict(struct inode *inode)
remove_inode_hash(inode);
- spin_lock(&inode->i_lock);
- wake_up_bit(&inode->i_state, __I_NEW);
- BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
- spin_unlock(&inode->i_lock);
+ /*
+ * Wake up waiters in __wait_on_freeing_inode().
+ *
+ * It is an invariant that any thread we need to wake up is already
+ * accounted for before remove_inode_hash() acquires ->i_lock -- both
+ * sides take the lock and sleep is aborted if the inode is found
+ * unhashed. Thus either the sleeper wins and goes off CPU, or removal
+ * wins and the sleeper aborts after testing with the lock.
+ *
+ * This also means we don't need any fences for the call below.
+ */
+ inode_wake_up_bit(inode, __I_NEW);
+ BUG_ON(inode_state_read_once(inode) != (I_FREEING | I_CLEAR));
destroy_inode(inode);
}
@@ -638,22 +892,26 @@ static void dispose_list(struct list_head *head)
*/
void evict_inodes(struct super_block *sb)
{
- struct inode *inode, *next;
+ struct inode *inode;
LIST_HEAD(dispose);
again:
spin_lock(&sb->s_inode_list_lock);
- list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
- if (atomic_read(&inode->i_count))
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ if (icount_read(inode))
continue;
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ if (icount_read(inode)) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+ if (inode_state_read(inode) & (I_NEW | I_FREEING | I_WILL_FREE)) {
spin_unlock(&inode->i_lock);
continue;
}
- inode->i_state |= I_FREEING;
+ inode_state_set(inode, I_FREEING);
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
list_add(&inode->i_lru, &dispose);
@@ -676,59 +934,6 @@ again:
}
EXPORT_SYMBOL_GPL(evict_inodes);
-/**
- * invalidate_inodes - attempt to free all inodes on a superblock
- * @sb: superblock to operate on
- * @kill_dirty: flag to guide handling of dirty inodes
- *
- * Attempts to free all inodes for a given superblock. If there were any
- * busy inodes return a non-zero value, else zero.
- * If @kill_dirty is set, discard dirty inodes too, otherwise treat
- * them as busy.
- */
-int invalidate_inodes(struct super_block *sb, bool kill_dirty)
-{
- int busy = 0;
- struct inode *inode, *next;
- LIST_HEAD(dispose);
-
-again:
- spin_lock(&sb->s_inode_list_lock);
- list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
- spin_lock(&inode->i_lock);
- if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
- spin_unlock(&inode->i_lock);
- continue;
- }
- if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
- spin_unlock(&inode->i_lock);
- busy = 1;
- continue;
- }
- if (atomic_read(&inode->i_count)) {
- spin_unlock(&inode->i_lock);
- busy = 1;
- continue;
- }
-
- inode->i_state |= I_FREEING;
- inode_lru_list_del(inode);
- spin_unlock(&inode->i_lock);
- list_add(&inode->i_lru, &dispose);
- if (need_resched()) {
- spin_unlock(&sb->s_inode_list_lock);
- cond_resched();
- dispose_list(&dispose);
- goto again;
- }
- }
- spin_unlock(&sb->s_inode_list_lock);
-
- dispose_list(&dispose);
-
- return busy;
-}
-
/*
* Isolate the inode from the LRU in preparation for freeing it.
*
@@ -741,7 +946,7 @@ again:
* with this flag set because they are the inodes that are out of order.
*/
static enum lru_status inode_lru_isolate(struct list_head *item,
- struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, void *arg)
{
struct list_head *freeable = arg;
struct inode *inode = container_of(item, struct inode, i_lru);
@@ -759,8 +964,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
* unreclaimable for a while. Remove them lazily here; iput,
* sync, or the last page cache deletion will requeue them.
*/
- if (atomic_read(&inode->i_count) ||
- (inode->i_state & ~I_REFERENCED) ||
+ if (icount_read(inode) ||
+ (inode_state_read(inode) & ~I_REFERENCED) ||
!mapping_shrinkable(&inode->i_data)) {
list_lru_isolate(lru, &inode->i_lru);
spin_unlock(&inode->i_lock);
@@ -769,8 +974,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
}
/* Recently referenced inodes get one more pass */
- if (inode->i_state & I_REFERENCED) {
- inode->i_state &= ~I_REFERENCED;
+ if (inode_state_read(inode) & I_REFERENCED) {
+ inode_state_clear(inode, I_REFERENCED);
spin_unlock(&inode->i_lock);
return LRU_ROTATE;
}
@@ -781,9 +986,9 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
* be under pressure before the cache inside the highmem zone.
*/
if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
- __iget(inode);
+ inode_pin_lru_isolating(inode);
spin_unlock(&inode->i_lock);
- spin_unlock(lru_lock);
+ spin_unlock(&lru->lock);
if (remove_inode_buffers(inode)) {
unsigned long reap;
reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
@@ -791,16 +996,14 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
__count_vm_events(KSWAPD_INODESTEAL, reap);
else
__count_vm_events(PGINODESTEAL, reap);
- if (current->reclaim_state)
- current->reclaim_state->reclaimed_slab += reap;
+ mm_account_reclaimed_pages(reap);
}
- iput(inode);
- spin_lock(lru_lock);
+ inode_unpin_lru_isolating(inode);
return LRU_RETRY;
}
- WARN_ON(inode->i_state & I_NEW);
- inode->i_state |= I_FREEING;
+ WARN_ON(inode_state_read(inode) & I_NEW);
+ inode_state_set(inode, I_FREEING);
list_lru_isolate_move(lru, &inode->i_lru, freeable);
spin_unlock(&inode->i_lock);
@@ -825,36 +1028,47 @@ long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
return freed;
}
-static void __wait_on_freeing_inode(struct inode *inode);
+static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked);
/*
* Called with the inode lock held.
*/
static struct inode *find_inode(struct super_block *sb,
struct hlist_head *head,
int (*test)(struct inode *, void *),
- void *data)
+ void *data, bool is_inode_hash_locked,
+ bool *isnew)
{
struct inode *inode = NULL;
+ if (is_inode_hash_locked)
+ lockdep_assert_held(&inode_hash_lock);
+ else
+ lockdep_assert_not_held(&inode_hash_lock);
+
+ rcu_read_lock();
repeat:
- hlist_for_each_entry(inode, head, i_hash) {
+ hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_sb != sb)
continue;
if (!test(inode, data))
continue;
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
- __wait_on_freeing_inode(inode);
+ if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
+ __wait_on_freeing_inode(inode, is_inode_hash_locked);
goto repeat;
}
- if (unlikely(inode->i_state & I_CREATING)) {
+ if (unlikely(inode_state_read(inode) & I_CREATING)) {
spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
return ERR_PTR(-ESTALE);
}
__iget(inode);
+ *isnew = !!(inode_state_read(inode) & I_NEW);
spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
return inode;
}
+ rcu_read_unlock();
return NULL;
}
@@ -863,29 +1077,40 @@ repeat:
* iget_locked for details.
*/
static struct inode *find_inode_fast(struct super_block *sb,
- struct hlist_head *head, unsigned long ino)
+ struct hlist_head *head, unsigned long ino,
+ bool is_inode_hash_locked, bool *isnew)
{
struct inode *inode = NULL;
+ if (is_inode_hash_locked)
+ lockdep_assert_held(&inode_hash_lock);
+ else
+ lockdep_assert_not_held(&inode_hash_lock);
+
+ rcu_read_lock();
repeat:
- hlist_for_each_entry(inode, head, i_hash) {
+ hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_ino != ino)
continue;
if (inode->i_sb != sb)
continue;
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
- __wait_on_freeing_inode(inode);
+ if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
+ __wait_on_freeing_inode(inode, is_inode_hash_locked);
goto repeat;
}
- if (unlikely(inode->i_state & I_CREATING)) {
+ if (unlikely(inode_state_read(inode) & I_CREATING)) {
spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
return ERR_PTR(-ESTALE);
}
__iget(inode);
+ *isnew = !!(inode_state_read(inode) & I_NEW);
spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
return inode;
}
+ rcu_read_unlock();
return NULL;
}
@@ -932,29 +1157,6 @@ unsigned int get_next_ino(void)
EXPORT_SYMBOL(get_next_ino);
/**
- * new_inode_pseudo - obtain an inode
- * @sb: superblock
- *
- * Allocates a new inode for given superblock.
- * Inode wont be chained in superblock s_inodes list
- * This means :
- * - fs can't be unmount
- * - quotas, fsnotify, writeback can't work
- */
-struct inode *new_inode_pseudo(struct super_block *sb)
-{
- struct inode *inode = alloc_inode(sb);
-
- if (inode) {
- spin_lock(&inode->i_lock);
- inode->i_state = 0;
- spin_unlock(&inode->i_lock);
- INIT_LIST_HEAD(&inode->i_sb_list);
- }
- return inode;
-}
-
-/**
* new_inode - obtain an inode
* @sb: superblock
*
@@ -970,9 +1172,7 @@ struct inode *new_inode(struct super_block *sb)
{
struct inode *inode;
- spin_lock_prefetch(&sb->s_inode_list_lock);
-
- inode = new_inode_pseudo(sb);
+ inode = alloc_inode(sb);
if (inode)
inode_sb_list_add(inode);
return inode;
@@ -988,9 +1188,8 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode)
/* Set new key only if filesystem hasn't already changed it */
if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
/*
- * ensure nobody is actually holding i_mutex
+ * ensure nobody is actually holding i_rwsem
*/
- // mutex_destroy(&inode->i_mutex);
init_rwsem(&inode->i_rwsem);
lockdep_set_class(&inode->i_rwsem,
&type->i_mutex_dir_key);
@@ -1011,10 +1210,9 @@ void unlock_new_inode(struct inode *inode)
{
lockdep_annotate_inode_mutex_key(inode);
spin_lock(&inode->i_lock);
- WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW & ~I_CREATING;
- smp_mb();
- wake_up_bit(&inode->i_state, __I_NEW);
+ WARN_ON(!(inode_state_read(inode) & I_NEW));
+ inode_state_clear(inode, I_NEW | I_CREATING);
+ inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(unlock_new_inode);
@@ -1023,10 +1221,9 @@ void discard_new_inode(struct inode *inode)
{
lockdep_annotate_inode_mutex_key(inode);
spin_lock(&inode->i_lock);
- WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW;
- smp_mb();
- wake_up_bit(&inode->i_state, __I_NEW);
+ WARN_ON(!(inode_state_read(inode) & I_NEW));
+ inode_state_clear(inode, I_NEW);
+ inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
iput(inode);
}
@@ -1035,7 +1232,7 @@ EXPORT_SYMBOL(discard_new_inode);
/**
* lock_two_nondirectories - take two i_mutexes on non-directory objects
*
- * Lock any non-NULL argument that is not a directory.
+ * Lock any non-NULL argument. Passed objects must not be directories.
* Zero, one or two objects may be locked by this function.
*
* @inode1: first inode to lock
@@ -1043,12 +1240,15 @@ EXPORT_SYMBOL(discard_new_inode);
*/
void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
{
+ if (inode1)
+ WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
+ if (inode2)
+ WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
if (inode1 > inode2)
swap(inode1, inode2);
-
- if (inode1 && !S_ISDIR(inode1->i_mode))
+ if (inode1)
inode_lock(inode1);
- if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
+ if (inode2 && inode2 != inode1)
inode_lock_nested(inode2, I_MUTEX_NONDIR2);
}
EXPORT_SYMBOL(lock_two_nondirectories);
@@ -1060,10 +1260,14 @@ EXPORT_SYMBOL(lock_two_nondirectories);
*/
void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
{
- if (inode1 && !S_ISDIR(inode1->i_mode))
+ if (inode1) {
+ WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
inode_unlock(inode1);
- if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
+ }
+ if (inode2 && inode2 != inode1) {
+ WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
inode_unlock(inode2);
+ }
}
EXPORT_SYMBOL(unlock_two_nondirectories);
@@ -1074,18 +1278,18 @@ EXPORT_SYMBOL(unlock_two_nondirectories);
* @test: callback used for comparisons between inodes
* @set: callback used to initialize a new struct inode
* @data: opaque data pointer to pass to @test and @set
+ * @isnew: pointer to a bool which will indicate whether I_NEW is set
*
* Search for the inode specified by @hashval and @data in the inode cache,
- * and if present it is return it with an increased reference count. This is
- * a variant of iget5_locked() for callers that don't want to fail on memory
- * allocation of inode.
+ * and if present return it with an increased reference count. This is a
+ * variant of iget5_locked() that doesn't allocate an inode.
*
- * If the inode is not in cache, insert the pre-allocated inode to cache and
+ * If the inode is not present in the cache, insert the pre-allocated inode and
* return it locked, hashed, and with the I_NEW flag set. The file system gets
* to fill it in before unlocking it via unlock_new_inode().
*
- * Note both @test and @set are called with the inode_hash_lock held, so can't
- * sleep.
+ * Note that both @test and @set are called with the inode_hash_lock held, so
+ * they can't sleep.
*/
struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
int (*test)(struct inode *, void *),
@@ -1093,11 +1297,13 @@ struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
{
struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
struct inode *old;
- bool creating = inode->i_state & I_CREATING;
+ bool isnew;
+
+ might_sleep();
again:
spin_lock(&inode_hash_lock);
- old = find_inode(inode->i_sb, head, test, data);
+ old = find_inode(inode->i_sb, head, test, data, true, &isnew);
if (unlikely(old)) {
/*
* Uhhuh, somebody else created the same inode under us.
@@ -1106,7 +1312,8 @@ again:
spin_unlock(&inode_hash_lock);
if (IS_ERR(old))
return NULL;
- wait_on_inode(old);
+ if (unlikely(isnew))
+ wait_on_new_inode(old);
if (unlikely(inode_unhashed(old))) {
iput(old);
goto again;
@@ -1115,8 +1322,8 @@ again:
}
if (set && unlikely(set(inode, data))) {
- inode = NULL;
- goto unlock;
+ spin_unlock(&inode_hash_lock);
+ return NULL;
}
/*
@@ -1124,14 +1331,19 @@ again:
* caller is responsible for filling in the contents
*/
spin_lock(&inode->i_lock);
- inode->i_state |= I_NEW;
+ inode_state_set(inode, I_NEW);
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
- if (!creating)
- inode_sb_list_add(inode);
-unlock:
+
spin_unlock(&inode_hash_lock);
+ /*
+ * Add inode to the sb list if it's not already. It has I_NEW at this
+ * point, so it should be safe to test i_sb_list locklessly.
+ */
+ if (list_empty(&inode->i_sb_list))
+ inode_sb_list_add(inode);
+
return inode;
}
EXPORT_SYMBOL(inode_insert5);
@@ -1145,16 +1357,16 @@ EXPORT_SYMBOL(inode_insert5);
* @data: opaque data pointer to pass to @test and @set
*
* Search for the inode specified by @hashval and @data in the inode cache,
- * and if present it is return it with an increased reference count. This is
- * a generalized version of iget_locked() for file systems where the inode
+ * and if present return it with an increased reference count. This is a
+ * generalized version of iget_locked() for file systems where the inode
* number is not sufficient for unique identification of an inode.
*
- * If the inode is not in cache, allocate a new inode and return it locked,
- * hashed, and with the I_NEW flag set. The file system gets to fill it in
- * before unlocking it via unlock_new_inode().
+ * If the inode is not present in the cache, allocate and insert a new inode
+ * and return it locked, hashed, and with the I_NEW flag set. The file system
+ * gets to fill it in before unlocking it via unlock_new_inode().
*
- * Note both @test and @set are called with the inode_hash_lock held, so can't
- * sleep.
+ * Note that both @test and @set are called with the inode_hash_lock held, so
+ * they can't sleep.
*/
struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *),
@@ -1166,7 +1378,6 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
struct inode *new = alloc_inode(sb);
if (new) {
- new->i_state = 0;
inode = inode_insert5(new, hashval, test, set, data);
if (unlikely(inode != new))
destroy_inode(new);
@@ -1177,6 +1388,51 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
EXPORT_SYMBOL(iget5_locked);
/**
+ * iget5_locked_rcu - obtain an inode from a mounted file system
+ * @sb: super block of file system
+ * @hashval: hash value (usually inode number) to get
+ * @test: callback used for comparisons between inodes
+ * @set: callback used to initialize a new struct inode
+ * @data: opaque data pointer to pass to @test and @set
+ *
+ * This is equivalent to iget5_locked, except the @test callback must
+ * tolerate the inode not being stable, including being mid-teardown.
+ */
+struct inode *iget5_locked_rcu(struct super_block *sb, unsigned long hashval,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *data)
+{
+ struct hlist_head *head = inode_hashtable + hash(sb, hashval);
+ struct inode *inode, *new;
+ bool isnew;
+
+ might_sleep();
+
+again:
+ inode = find_inode(sb, head, test, data, false, &isnew);
+ if (inode) {
+ if (IS_ERR(inode))
+ return NULL;
+ if (unlikely(isnew))
+ wait_on_new_inode(inode);
+ if (unlikely(inode_unhashed(inode))) {
+ iput(inode);
+ goto again;
+ }
+ return inode;
+ }
+
+ new = alloc_inode(sb);
+ if (new) {
+ inode = inode_insert5(new, hashval, test, set, data);
+ if (unlikely(inode != new))
+ destroy_inode(new);
+ }
+ return inode;
+}
+EXPORT_SYMBOL_GPL(iget5_locked_rcu);
+
+/**
* iget_locked - obtain an inode from a mounted file system
* @sb: super block of file system
* @ino: inode number to get
@@ -1193,14 +1449,17 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
+ bool isnew;
+
+ might_sleep();
+
again:
- spin_lock(&inode_hash_lock);
- inode = find_inode_fast(sb, head, ino);
- spin_unlock(&inode_hash_lock);
+ inode = find_inode_fast(sb, head, ino, false, &isnew);
if (inode) {
if (IS_ERR(inode))
return NULL;
- wait_on_inode(inode);
+ if (unlikely(isnew))
+ wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@@ -1214,15 +1473,15 @@ again:
spin_lock(&inode_hash_lock);
/* We released the lock, so.. */
- old = find_inode_fast(sb, head, ino);
+ old = find_inode_fast(sb, head, ino, true, &isnew);
if (!old) {
inode->i_ino = ino;
spin_lock(&inode->i_lock);
- inode->i_state = I_NEW;
+ inode_state_assign(inode, I_NEW);
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
- inode_sb_list_add(inode);
spin_unlock(&inode_hash_lock);
+ inode_sb_list_add(inode);
/* Return the locked inode with I_NEW set, the
* caller is responsible for filling in the contents
@@ -1240,7 +1499,8 @@ again:
if (IS_ERR(old))
return NULL;
inode = old;
- wait_on_inode(inode);
+ if (unlikely(isnew))
+ wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@@ -1311,7 +1571,7 @@ EXPORT_SYMBOL(iunique);
struct inode *igrab(struct inode *inode)
{
spin_lock(&inode->i_lock);
- if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
+ if (!(inode_state_read(inode) & (I_FREEING | I_WILL_FREE))) {
__iget(inode);
spin_unlock(&inode->i_lock);
} else {
@@ -1344,13 +1604,13 @@ EXPORT_SYMBOL(igrab);
* Note2: @test is called with the inode_hash_lock held, so can't sleep.
*/
struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
- int (*test)(struct inode *, void *), void *data)
+ int (*test)(struct inode *, void *), void *data, bool *isnew)
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
spin_lock(&inode_hash_lock);
- inode = find_inode(sb, head, test, data);
+ inode = find_inode(sb, head, test, data, true, isnew);
spin_unlock(&inode_hash_lock);
return IS_ERR(inode) ? NULL : inode;
@@ -1378,10 +1638,15 @@ struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
struct inode *inode;
+ bool isnew;
+
+ might_sleep();
+
again:
- inode = ilookup5_nowait(sb, hashval, test, data);
+ inode = ilookup5_nowait(sb, hashval, test, data, &isnew);
if (inode) {
- wait_on_inode(inode);
+ if (unlikely(isnew))
+ wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@@ -1403,15 +1668,18 @@ struct inode *ilookup(struct super_block *sb, unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
+ bool isnew;
+
+ might_sleep();
+
again:
- spin_lock(&inode_hash_lock);
- inode = find_inode_fast(sb, head, ino);
- spin_unlock(&inode_hash_lock);
+ inode = find_inode_fast(sb, head, ino, false, &isnew);
if (inode) {
if (IS_ERR(inode))
return NULL;
- wait_on_inode(inode);
+ if (unlikely(isnew))
+ wait_on_new_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
@@ -1503,7 +1771,7 @@ struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_sb == sb &&
- !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
+ !(inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE)) &&
test(inode, data))
return inode;
}
@@ -1542,7 +1810,7 @@ struct inode *find_inode_by_ino_rcu(struct super_block *sb,
hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_ino == ino &&
inode->i_sb == sb &&
- !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
+ !(inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE)))
return inode;
}
return NULL;
@@ -1554,6 +1822,9 @@ int insert_inode_locked(struct inode *inode)
struct super_block *sb = inode->i_sb;
ino_t ino = inode->i_ino;
struct hlist_head *head = inode_hashtable + hash(sb, ino);
+ bool isnew;
+
+ might_sleep();
while (1) {
struct inode *old = NULL;
@@ -1564,7 +1835,7 @@ int insert_inode_locked(struct inode *inode)
if (old->i_sb != sb)
continue;
spin_lock(&old->i_lock);
- if (old->i_state & (I_FREEING|I_WILL_FREE)) {
+ if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) {
spin_unlock(&old->i_lock);
continue;
}
@@ -1572,21 +1843,23 @@ int insert_inode_locked(struct inode *inode)
}
if (likely(!old)) {
spin_lock(&inode->i_lock);
- inode->i_state |= I_NEW | I_CREATING;
+ inode_state_set(inode, I_NEW | I_CREATING);
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
return 0;
}
- if (unlikely(old->i_state & I_CREATING)) {
+ if (unlikely(inode_state_read(old) & I_CREATING)) {
spin_unlock(&old->i_lock);
spin_unlock(&inode_hash_lock);
return -EBUSY;
}
__iget(old);
+ isnew = !!(inode_state_read(old) & I_NEW);
spin_unlock(&old->i_lock);
spin_unlock(&inode_hash_lock);
- wait_on_inode(old);
+ if (isnew)
+ wait_on_new_inode(old);
if (unlikely(!inode_unhashed(old))) {
iput(old);
return -EBUSY;
@@ -1601,7 +1874,9 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
{
struct inode *old;
- inode->i_state |= I_CREATING;
+ might_sleep();
+
+ inode_state_set_raw(inode, I_CREATING);
old = inode_insert5(inode, hashval, test, NULL, data);
if (old != inode) {
@@ -1613,11 +1888,11 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
EXPORT_SYMBOL(insert_inode_locked4);
-int generic_delete_inode(struct inode *inode)
+int inode_just_drop(struct inode *inode)
{
return 1;
}
-EXPORT_SYMBOL(generic_delete_inode);
+EXPORT_SYMBOL(inode_just_drop);
/*
* Called when we're dropping the last reference
@@ -1633,40 +1908,44 @@ static void iput_final(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
const struct super_operations *op = inode->i_sb->s_op;
- unsigned long state;
int drop;
- WARN_ON(inode->i_state & I_NEW);
+ WARN_ON(inode_state_read(inode) & I_NEW);
+ VFS_BUG_ON_INODE(atomic_read(&inode->i_count) != 0, inode);
if (op->drop_inode)
drop = op->drop_inode(inode);
else
- drop = generic_drop_inode(inode);
+ drop = inode_generic_drop(inode);
if (!drop &&
- !(inode->i_state & I_DONTCACHE) &&
+ !(inode_state_read(inode) & I_DONTCACHE) &&
(sb->s_flags & SB_ACTIVE)) {
- __inode_add_lru(inode, true);
+ __inode_lru_list_add(inode, true);
spin_unlock(&inode->i_lock);
return;
}
- state = inode->i_state;
- if (!drop) {
- WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
+ /*
+ * Re-check ->i_count in case the ->drop_inode() hooks played games.
+ * Note we only execute this if the verdict was to drop the inode.
+ */
+ VFS_BUG_ON_INODE(atomic_read(&inode->i_count) != 0, inode);
+
+ if (drop) {
+ inode_state_set(inode, I_FREEING);
+ } else {
+ inode_state_set(inode, I_WILL_FREE);
spin_unlock(&inode->i_lock);
write_inode_now(inode, 1);
spin_lock(&inode->i_lock);
- state = inode->i_state;
- WARN_ON(state & I_NEW);
- state &= ~I_WILL_FREE;
+ WARN_ON(inode_state_read(inode) & I_NEW);
+ inode_state_replace(inode, I_WILL_FREE, I_FREEING);
}
- WRITE_ONCE(inode->i_state, state | I_FREEING);
- if (!list_empty(&inode->i_lru))
- inode_lru_list_del(inode);
+ inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
evict(inode);
@@ -1683,23 +1962,61 @@ static void iput_final(struct inode *inode)
*/
void iput(struct inode *inode)
{
- if (!inode)
+ might_sleep();
+ if (unlikely(!inode))
return;
- BUG_ON(inode->i_state & I_CLEAR);
+
retry:
- if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
- if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
- atomic_inc(&inode->i_count);
- spin_unlock(&inode->i_lock);
- trace_writeback_lazytime_iput(inode);
- mark_inode_dirty_sync(inode);
- goto retry;
- }
- iput_final(inode);
+ lockdep_assert_not_held(&inode->i_lock);
+ VFS_BUG_ON_INODE(inode_state_read_once(inode) & (I_FREEING | I_CLEAR), inode);
+ /*
+ * Note this assert is technically racy as if the count is bogusly
+ * equal to one, then two CPUs racing to further drop it can both
+ * conclude it's fine.
+ */
+ VFS_BUG_ON_INODE(atomic_read(&inode->i_count) < 1, inode);
+
+ if (atomic_add_unless(&inode->i_count, -1, 1))
+ return;
+
+ if ((inode_state_read_once(inode) & I_DIRTY_TIME) && inode->i_nlink) {
+ trace_writeback_lazytime_iput(inode);
+ mark_inode_dirty_sync(inode);
+ goto retry;
}
+
+ spin_lock(&inode->i_lock);
+ if (unlikely((inode_state_read(inode) & I_DIRTY_TIME) && inode->i_nlink)) {
+ spin_unlock(&inode->i_lock);
+ goto retry;
+ }
+
+ if (!atomic_dec_and_test(&inode->i_count)) {
+ spin_unlock(&inode->i_lock);
+ return;
+ }
+
+ /*
+ * iput_final() drops ->i_lock, we can't assert on it as the inode may
+ * be deallocated by the time the call returns.
+ */
+ iput_final(inode);
}
EXPORT_SYMBOL(iput);
+/**
+ * iput_not_last - put an inode assuming this is not the last reference
+ * @inode: inode to put
+ */
+void iput_not_last(struct inode *inode)
+{
+ VFS_BUG_ON_INODE(inode_state_read_once(inode) & (I_FREEING | I_CLEAR), inode);
+ VFS_BUG_ON_INODE(atomic_read(&inode->i_count) < 2, inode);
+
+ WARN_ON(atomic_sub_return(1, &inode->i_count) == 0);
+}
+EXPORT_SYMBOL(iput_not_last);
+
#ifdef CONFIG_BLOCK
/**
* bmap - find a block number in a file
@@ -1728,61 +2045,115 @@ EXPORT_SYMBOL(bmap);
/*
* With relative atime, only update atime if the previous atime is
- * earlier than either the ctime or mtime or if at least a day has
- * passed since the last atime update.
+ * earlier than or equal to either the ctime or mtime,
+ * or if at least a day has passed since the last atime update.
*/
-static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
+static bool relatime_need_update(struct vfsmount *mnt, struct inode *inode,
struct timespec64 now)
{
+ struct timespec64 atime, mtime, ctime;
if (!(mnt->mnt_flags & MNT_RELATIME))
- return 1;
+ return true;
/*
- * Is mtime younger than atime? If yes, update atime:
+ * Is mtime younger than or equal to atime? If yes, update atime:
*/
- if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
- return 1;
+ atime = inode_get_atime(inode);
+ mtime = inode_get_mtime(inode);
+ if (timespec64_compare(&mtime, &atime) >= 0)
+ return true;
/*
- * Is ctime younger than atime? If yes, update atime:
+ * Is ctime younger than or equal to atime? If yes, update atime:
*/
- if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0)
- return 1;
+ ctime = inode_get_ctime(inode);
+ if (timespec64_compare(&ctime, &atime) >= 0)
+ return true;
/*
* Is the previous atime value older than a day? If yes,
* update atime:
*/
- if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
- return 1;
+ if ((long)(now.tv_sec - atime.tv_sec) >= 24*60*60)
+ return true;
/*
* Good, we can skip the atime update:
*/
- return 0;
+ return false;
}
-int generic_update_time(struct inode *inode, struct timespec64 *time, int flags)
+/**
+ * inode_update_timestamps - update the timestamps on the inode
+ * @inode: inode to be updated
+ * @flags: S_* flags that needed to be updated
+ *
+ * The update_time function is called when an inode's timestamps need to be
+ * updated for a read or write operation. This function handles updating the
+ * actual timestamps. It's up to the caller to ensure that the inode is marked
+ * dirty appropriately.
+ *
+ * In the case where any of S_MTIME, S_CTIME, or S_VERSION need to be updated,
+ * attempt to update all three of them. S_ATIME updates can be handled
+ * independently of the rest.
+ *
+ * Returns a set of S_* flags indicating which values changed.
+ */
+int inode_update_timestamps(struct inode *inode, int flags)
{
- int dirty_flags = 0;
+ int updated = 0;
+ struct timespec64 now;
- if (flags & (S_ATIME | S_CTIME | S_MTIME)) {
- if (flags & S_ATIME)
- inode->i_atime = *time;
- if (flags & S_CTIME)
- inode->i_ctime = *time;
- if (flags & S_MTIME)
- inode->i_mtime = *time;
-
- if (inode->i_sb->s_flags & SB_LAZYTIME)
- dirty_flags |= I_DIRTY_TIME;
- else
- dirty_flags |= I_DIRTY_SYNC;
+ if (flags & (S_MTIME|S_CTIME|S_VERSION)) {
+ struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 mtime = inode_get_mtime(inode);
+
+ now = inode_set_ctime_current(inode);
+ if (!timespec64_equal(&now, &ctime))
+ updated |= S_CTIME;
+ if (!timespec64_equal(&now, &mtime)) {
+ inode_set_mtime_to_ts(inode, now);
+ updated |= S_MTIME;
+ }
+ if (IS_I_VERSION(inode) && inode_maybe_inc_iversion(inode, updated))
+ updated |= S_VERSION;
+ } else {
+ now = current_time(inode);
}
- if ((flags & S_VERSION) && inode_maybe_inc_iversion(inode, false))
- dirty_flags |= I_DIRTY_SYNC;
+ if (flags & S_ATIME) {
+ struct timespec64 atime = inode_get_atime(inode);
+
+ if (!timespec64_equal(&now, &atime)) {
+ inode_set_atime_to_ts(inode, now);
+ updated |= S_ATIME;
+ }
+ }
+ return updated;
+}
+EXPORT_SYMBOL(inode_update_timestamps);
+/**
+ * generic_update_time - update the timestamps on the inode
+ * @inode: inode to be updated
+ * @flags: S_* flags that needed to be updated
+ *
+ * The update_time function is called when an inode's timestamps need to be
+ * updated for a read or write operation. In the case where any of S_MTIME, S_CTIME,
+ * or S_VERSION need to be updated we attempt to update all three of them. S_ATIME
+ * updates can be handled done independently of the rest.
+ *
+ * Returns a S_* mask indicating which fields were updated.
+ */
+int generic_update_time(struct inode *inode, int flags)
+{
+ int updated = inode_update_timestamps(inode, flags);
+ int dirty_flags = 0;
+
+ if (updated & (S_ATIME|S_MTIME|S_CTIME))
+ dirty_flags = inode->i_sb->s_flags & SB_LAZYTIME ? I_DIRTY_TIME : I_DIRTY_SYNC;
+ if (updated & S_VERSION)
+ dirty_flags |= I_DIRTY_SYNC;
__mark_inode_dirty(inode, dirty_flags);
- return 0;
+ return updated;
}
EXPORT_SYMBOL(generic_update_time);
@@ -1790,11 +2161,12 @@ EXPORT_SYMBOL(generic_update_time);
* This does the actual work of updating an inodes time or version. Must have
* had called mnt_want_write() before calling this.
*/
-int inode_update_time(struct inode *inode, struct timespec64 *time, int flags)
+int inode_update_time(struct inode *inode, int flags)
{
if (inode->i_op->update_time)
- return inode->i_op->update_time(inode, time, flags);
- return generic_update_time(inode, time, flags);
+ return inode->i_op->update_time(inode, flags);
+ generic_update_time(inode, flags);
+ return 0;
}
EXPORT_SYMBOL(inode_update_time);
@@ -1810,7 +2182,7 @@ EXPORT_SYMBOL(inode_update_time);
bool atime_needs_update(const struct path *path, struct inode *inode)
{
struct vfsmount *mnt = path->mnt;
- struct timespec64 now;
+ struct timespec64 now, atime;
if (inode->i_flags & S_NOATIME)
return false;
@@ -1818,7 +2190,7 @@ bool atime_needs_update(const struct path *path, struct inode *inode)
/* Atime updates will likely cause i_uid and i_gid to be written
* back improprely if their true value is unknown to the vfs.
*/
- if (HAS_UNMAPPED_ID(mnt_user_ns(mnt), inode))
+ if (HAS_UNMAPPED_ID(mnt_idmap(mnt), inode))
return false;
if (IS_NOATIME(inode))
@@ -1836,7 +2208,8 @@ bool atime_needs_update(const struct path *path, struct inode *inode)
if (!relatime_need_update(mnt, inode, now))
return false;
- if (timespec64_equal(&inode->i_atime, &now))
+ atime = inode_get_atime(inode);
+ if (timespec64_equal(&atime, &now))
return false;
return true;
@@ -1846,7 +2219,6 @@ void touch_atime(const struct path *path)
{
struct vfsmount *mnt = path->mnt;
struct inode *inode = d_inode(path->dentry);
- struct timespec64 now;
if (!atime_needs_update(path, inode))
return;
@@ -1854,7 +2226,7 @@ void touch_atime(const struct path *path)
if (!sb_start_write_trylock(inode->i_sb))
return;
- if (__mnt_want_write(mnt) != 0)
+ if (mnt_get_write_access(mnt) != 0)
goto skip_update;
/*
* File systems can error out when updating inodes if they need to
@@ -1865,49 +2237,20 @@ void touch_atime(const struct path *path)
* We may also fail on filesystems that have the ability to make parts
* of the fs read only, e.g. subvolumes in Btrfs.
*/
- now = current_time(inode);
- inode_update_time(inode, &now, S_ATIME);
- __mnt_drop_write(mnt);
+ inode_update_time(inode, S_ATIME);
+ mnt_put_write_access(mnt);
skip_update:
sb_end_write(inode->i_sb);
}
EXPORT_SYMBOL(touch_atime);
/*
- * The logic we want is
- *
- * if suid or (sgid and xgrp)
- * remove privs
- */
-int should_remove_suid(struct dentry *dentry)
-{
- umode_t mode = d_inode(dentry)->i_mode;
- int kill = 0;
-
- /* suid always must be killed */
- if (unlikely(mode & S_ISUID))
- kill = ATTR_KILL_SUID;
-
- /*
- * sgid without any exec bits is just a mandatory locking mark; leave
- * it alone. If some exec bits are set, it's a real sgid; kill it.
- */
- if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
- kill |= ATTR_KILL_SGID;
-
- if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
- return kill;
-
- return 0;
-}
-EXPORT_SYMBOL(should_remove_suid);
-
-/*
* Return mask of changes for notify_change() that need to be done as a
* response to write or truncate. Return 0 if nothing has to be changed.
* Negative value on error (change should be denied).
*/
-int dentry_needs_remove_privs(struct dentry *dentry)
+int dentry_needs_remove_privs(struct mnt_idmap *idmap,
+ struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
int mask = 0;
@@ -1916,7 +2259,7 @@ int dentry_needs_remove_privs(struct dentry *dentry)
if (IS_NOSEC(inode))
return 0;
- mask = should_remove_suid(dentry);
+ mask = setattr_should_drop_suidgid(idmap, inode);
ret = security_inode_need_killpriv(dentry);
if (ret < 0)
return ret;
@@ -1925,7 +2268,7 @@ int dentry_needs_remove_privs(struct dentry *dentry)
return mask;
}
-static int __remove_privs(struct user_namespace *mnt_userns,
+static int __remove_privs(struct mnt_idmap *idmap,
struct dentry *dentry, int kill)
{
struct iattr newattrs;
@@ -1935,109 +2278,208 @@ static int __remove_privs(struct user_namespace *mnt_userns,
* Note we call this on write, so notify_change will not
* encounter any conflicting delegations:
*/
- return notify_change(mnt_userns, dentry, &newattrs, NULL);
+ return notify_change(idmap, dentry, &newattrs, NULL);
}
-/*
- * Remove special file priviledges (suid, capabilities) when file is written
- * to or truncated.
- */
-int file_remove_privs(struct file *file)
+static int file_remove_privs_flags(struct file *file, unsigned int flags)
{
struct dentry *dentry = file_dentry(file);
struct inode *inode = file_inode(file);
- int kill;
int error = 0;
+ int kill;
- /*
- * Fast path for nothing security related.
- * As well for non-regular files, e.g. blkdev inodes.
- * For example, blkdev_write_iter() might get here
- * trying to remove privs which it is not allowed to.
- */
if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
return 0;
- kill = dentry_needs_remove_privs(dentry);
+ kill = dentry_needs_remove_privs(file_mnt_idmap(file), dentry);
if (kill < 0)
return kill;
- if (kill)
- error = __remove_privs(file_mnt_user_ns(file), dentry, kill);
+
+ if (kill) {
+ if (flags & IOCB_NOWAIT)
+ return -EAGAIN;
+
+ error = __remove_privs(file_mnt_idmap(file), dentry, kill);
+ }
+
if (!error)
inode_has_no_xattr(inode);
-
return error;
}
+
+/**
+ * file_remove_privs - remove special file privileges (suid, capabilities)
+ * @file: file to remove privileges from
+ *
+ * When file is modified by a write or truncation ensure that special
+ * file privileges are removed.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int file_remove_privs(struct file *file)
+{
+ return file_remove_privs_flags(file, 0);
+}
EXPORT_SYMBOL(file_remove_privs);
/**
- * file_update_time - update mtime and ctime time
- * @file: file accessed
+ * current_time - Return FS time (possibly fine-grained)
+ * @inode: inode.
+ *
+ * Return the current time truncated to the time granularity supported by
+ * the fs, as suitable for a ctime/mtime change. If the ctime is flagged
+ * as having been QUERIED, get a fine-grained timestamp, but don't update
+ * the floor.
*
- * Update the mtime and ctime members of an inode and mark the inode
- * for writeback. Note that this function is meant exclusively for
- * usage in the file write path of filesystems, and filesystems may
- * choose to explicitly ignore update via this function with the
- * S_NOCMTIME inode flag, e.g. for network filesystem where these
- * timestamps are handled by the server. This can return an error for
- * file systems who need to allocate space in order to update an inode.
+ * For a multigrain inode, this is effectively an estimate of the timestamp
+ * that a file would receive. An actual update must go through
+ * inode_set_ctime_current().
*/
+struct timespec64 current_time(struct inode *inode)
+{
+ struct timespec64 now;
+ u32 cns;
-int file_update_time(struct file *file)
+ ktime_get_coarse_real_ts64_mg(&now);
+
+ if (!is_mgtime(inode))
+ goto out;
+
+ /* If nothing has queried it, then coarse time is fine */
+ cns = smp_load_acquire(&inode->i_ctime_nsec);
+ if (cns & I_CTIME_QUERIED) {
+ /*
+ * If there is no apparent change, then get a fine-grained
+ * timestamp.
+ */
+ if (now.tv_nsec == (cns & ~I_CTIME_QUERIED))
+ ktime_get_real_ts64(&now);
+ }
+out:
+ return timestamp_truncate(now, inode);
+}
+EXPORT_SYMBOL(current_time);
+
+static int file_update_time_flags(struct file *file, unsigned int flags)
{
struct inode *inode = file_inode(file);
- struct timespec64 now;
- int sync_it = 0;
- int ret;
+ struct timespec64 now, ts;
+ int sync_mode = 0;
+ int ret = 0;
/* First try to exhaust all avenues to not sync */
if (IS_NOCMTIME(inode))
return 0;
+ if (unlikely(file->f_mode & FMODE_NOCMTIME))
+ return 0;
now = current_time(inode);
- if (!timespec64_equal(&inode->i_mtime, &now))
- sync_it = S_MTIME;
-
- if (!timespec64_equal(&inode->i_ctime, &now))
- sync_it |= S_CTIME;
+ ts = inode_get_mtime(inode);
+ if (!timespec64_equal(&ts, &now))
+ sync_mode |= S_MTIME;
+ ts = inode_get_ctime(inode);
+ if (!timespec64_equal(&ts, &now))
+ sync_mode |= S_CTIME;
if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
- sync_it |= S_VERSION;
-
- if (!sync_it)
- return 0;
+ sync_mode |= S_VERSION;
- /* Finally allowed to write? Takes lock. */
- if (__mnt_want_write_file(file))
+ if (!sync_mode)
return 0;
- ret = inode_update_time(inode, &now, sync_it);
- __mnt_drop_write_file(file);
+ if (flags & IOCB_NOWAIT)
+ return -EAGAIN;
+ if (mnt_get_write_access_file(file))
+ return 0;
+ ret = inode_update_time(inode, sync_mode);
+ mnt_put_write_access_file(file);
return ret;
}
+
+/**
+ * file_update_time - update mtime and ctime time
+ * @file: file accessed
+ *
+ * Update the mtime and ctime members of an inode and mark the inode for
+ * writeback. Note that this function is meant exclusively for usage in
+ * the file write path of filesystems, and filesystems may choose to
+ * explicitly ignore updates via this function with the _NOCMTIME inode
+ * flag, e.g. for network filesystem where these imestamps are handled
+ * by the server. This can return an error for file systems who need to
+ * allocate space in order to update an inode.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int file_update_time(struct file *file)
+{
+ return file_update_time_flags(file, 0);
+}
EXPORT_SYMBOL(file_update_time);
-/* Caller must hold the file's inode lock */
-int file_modified(struct file *file)
+/**
+ * file_modified_flags - handle mandated vfs changes when modifying a file
+ * @file: file that was modified
+ * @flags: kiocb flags
+ *
+ * When file has been modified ensure that special
+ * file privileges are removed and time settings are updated.
+ *
+ * If IOCB_NOWAIT is set, special file privileges will not be removed and
+ * time settings will not be updated. It will return -EAGAIN.
+ *
+ * Context: Caller must hold the file's inode lock.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+static int file_modified_flags(struct file *file, int flags)
{
- int err;
+ int ret;
/*
* Clear the security bits if the process is not being run by root.
* This keeps people from modifying setuid and setgid binaries.
*/
- err = file_remove_privs(file);
- if (err)
- return err;
-
- if (unlikely(file->f_mode & FMODE_NOCMTIME))
- return 0;
+ ret = file_remove_privs_flags(file, flags);
+ if (ret)
+ return ret;
+ return file_update_time_flags(file, flags);
+}
- return file_update_time(file);
+/**
+ * file_modified - handle mandated vfs changes when modifying a file
+ * @file: file that was modified
+ *
+ * When file has been modified ensure that special
+ * file privileges are removed and time settings are updated.
+ *
+ * Context: Caller must hold the file's inode lock.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int file_modified(struct file *file)
+{
+ return file_modified_flags(file, 0);
}
EXPORT_SYMBOL(file_modified);
+/**
+ * kiocb_modified - handle mandated vfs changes when modifying a file
+ * @iocb: iocb that was modified
+ *
+ * When file has been modified ensure that special
+ * file privileges are removed and time settings are updated.
+ *
+ * Context: Caller must hold the file's inode lock.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int kiocb_modified(struct kiocb *iocb)
+{
+ return file_modified_flags(iocb->ki_filp, iocb->ki_flags);
+}
+EXPORT_SYMBOL_GPL(kiocb_modified);
+
int inode_needs_sync(struct inode *inode)
{
if (IS_SYNC(inode))
@@ -2059,17 +2501,31 @@ EXPORT_SYMBOL(inode_needs_sync);
* wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
* will DTRT.
*/
-static void __wait_on_freeing_inode(struct inode *inode)
+static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked)
{
- wait_queue_head_t *wq;
- DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
- wq = bit_waitqueue(&inode->i_state, __I_NEW);
- prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+ struct wait_bit_queue_entry wqe;
+ struct wait_queue_head *wq_head;
+
+ /*
+ * Handle racing against evict(), see that routine for more details.
+ */
+ if (unlikely(inode_unhashed(inode))) {
+ WARN_ON(is_inode_hash_locked);
+ spin_unlock(&inode->i_lock);
+ return;
+ }
+
+ wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW);
+ prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
spin_unlock(&inode->i_lock);
- spin_unlock(&inode_hash_lock);
+ rcu_read_unlock();
+ if (is_inode_hash_locked)
+ spin_unlock(&inode_hash_lock);
schedule();
- finish_wait(wq, &wait.wq_entry);
- spin_lock(&inode_hash_lock);
+ finish_wait(wq_head, &wqe.wq_entry);
+ if (is_inode_hash_locked)
+ spin_lock(&inode_hash_lock);
+ rcu_read_lock();
}
static __initdata unsigned long ihash_entries;
@@ -2112,7 +2568,7 @@ void __init inode_init(void)
sizeof(struct inode),
0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
/* Hash may have been set up in inode_init_early */
@@ -2134,82 +2590,86 @@ void __init inode_init(void)
void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
{
inode->i_mode = mode;
- if (S_ISCHR(mode)) {
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFCHR:
inode->i_fop = &def_chr_fops;
inode->i_rdev = rdev;
- } else if (S_ISBLK(mode)) {
- inode->i_fop = &def_blk_fops;
+ break;
+ case S_IFBLK:
+ if (IS_ENABLED(CONFIG_BLOCK))
+ inode->i_fop = &def_blk_fops;
inode->i_rdev = rdev;
- } else if (S_ISFIFO(mode))
+ break;
+ case S_IFIFO:
inode->i_fop = &pipefifo_fops;
- else if (S_ISSOCK(mode))
- ; /* leave it no_open_fops */
- else
+ break;
+ case S_IFSOCK:
+ /* leave it no_open_fops */
+ break;
+ default:
printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
" inode %s:%lu\n", mode, inode->i_sb->s_id,
inode->i_ino);
+ break;
+ }
}
EXPORT_SYMBOL(init_special_inode);
/**
* inode_init_owner - Init uid,gid,mode for new inode according to posix standards
- * @mnt_userns: User namespace of the mount the inode was created from
+ * @idmap: idmap of the mount the inode was created from
* @inode: New inode
* @dir: Directory inode
* @mode: mode of the new inode
*
- * If the inode has been created through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions
+ * If the inode has been created through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions
* and initializing i_uid and i_gid. On non-idmapped mounts or if permission
- * checking is to be performed on the raw inode simply passs init_user_ns.
+ * checking is to be performed on the raw inode simply pass @nop_mnt_idmap.
*/
-void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode,
+void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
const struct inode *dir, umode_t mode)
{
- inode_fsuid_set(inode, mnt_userns);
+ inode_fsuid_set(inode, idmap);
if (dir && dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
/* Directories are special, and always inherit S_ISGID */
if (S_ISDIR(mode))
mode |= S_ISGID;
- else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
- !in_group_p(i_gid_into_mnt(mnt_userns, dir)) &&
- !capable_wrt_inode_uidgid(mnt_userns, dir, CAP_FSETID))
- mode &= ~S_ISGID;
} else
- inode_fsgid_set(inode, mnt_userns);
+ inode_fsgid_set(inode, idmap);
inode->i_mode = mode;
}
EXPORT_SYMBOL(inode_init_owner);
/**
* inode_owner_or_capable - check current task permissions to inode
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @inode: inode being checked
*
* Return true if current either has CAP_FOWNER in a namespace with the
* inode owner uid mapped, or owns the file.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*/
-bool inode_owner_or_capable(struct user_namespace *mnt_userns,
+bool inode_owner_or_capable(struct mnt_idmap *idmap,
const struct inode *inode)
{
- kuid_t i_uid;
+ vfsuid_t vfsuid;
struct user_namespace *ns;
- i_uid = i_uid_into_mnt(mnt_userns, inode);
- if (uid_eq(current_fsuid(), i_uid))
+ vfsuid = i_uid_into_vfsuid(idmap, inode);
+ if (vfsuid_eq_kuid(vfsuid, current_fsuid()))
return true;
ns = current_user_ns();
- if (kuid_has_mapping(ns, i_uid) && ns_capable(ns, CAP_FOWNER))
+ if (vfsuid_has_mapping(ns, vfsuid) && ns_capable(ns, CAP_FOWNER))
return true;
return false;
}
@@ -2218,18 +2678,11 @@ EXPORT_SYMBOL(inode_owner_or_capable);
/*
* Direct i/o helper functions
*/
-static void __inode_dio_wait(struct inode *inode)
+bool inode_dio_finished(const struct inode *inode)
{
- wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
- DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
-
- do {
- prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
- if (atomic_read(&inode->i_dio_count))
- schedule();
- } while (atomic_read(&inode->i_dio_count));
- finish_wait(wq, &q.wq_entry);
+ return atomic_read(&inode->i_dio_count) == 0;
}
+EXPORT_SYMBOL(inode_dio_finished);
/**
* inode_dio_wait - wait for outstanding DIO requests to finish
@@ -2239,19 +2692,25 @@ static void __inode_dio_wait(struct inode *inode)
* proceed with a truncate or equivalent operation.
*
* Must be called under a lock that serializes taking new references
- * to i_dio_count, usually by inode->i_mutex.
+ * to i_dio_count, usually by inode->i_rwsem.
*/
void inode_dio_wait(struct inode *inode)
{
- if (atomic_read(&inode->i_dio_count))
- __inode_dio_wait(inode);
+ wait_var_event(&inode->i_dio_count, inode_dio_finished(inode));
}
EXPORT_SYMBOL(inode_dio_wait);
+void inode_dio_wait_interruptible(struct inode *inode)
+{
+ wait_var_event_interruptible(&inode->i_dio_count,
+ inode_dio_finished(inode));
+}
+EXPORT_SYMBOL(inode_dio_wait_interruptible);
+
/*
* inode_set_flags - atomically set some inode flags
*
- * Note: the caller should be holding i_mutex, or else be sure that
+ * Note: the caller should be holding i_rwsem exclusively, or else be sure that
* they have exclusive access to the inode structure (i.e., while the
* inode is being instantiated). The reason for the cmpxchg() loop
* --- which wouldn't be necessary if all code paths which modify
@@ -2259,7 +2718,7 @@ EXPORT_SYMBOL(inode_dio_wait);
* code path which doesn't today so we use cmpxchg() out of an abundance
* of caution.
*
- * In the long run, i_mutex is overkill, and we should probably look
+ * In the long run, i_rwsem is overkill, and we should probably look
* at using the i_lock spinlock to protect i_flags, and then make sure
* it is so documented in include/linux/fs.h and that all code follows
* the locking convention!!
@@ -2278,6 +2737,16 @@ void inode_nohighmem(struct inode *inode)
}
EXPORT_SYMBOL(inode_nohighmem);
+struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts)
+{
+ trace_inode_set_ctime_to_ts(inode, &ts);
+ set_normalized_timespec64(&ts, ts.tv_sec, ts.tv_nsec);
+ inode->i_ctime_sec = ts.tv_sec;
+ inode->i_ctime_nsec = ts.tv_nsec;
+ return ts;
+}
+EXPORT_SYMBOL(inode_set_ctime_to_ts);
+
/**
* timestamp_truncate - Truncate timespec to a granularity
* @t: Timespec
@@ -2310,26 +2779,229 @@ struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
EXPORT_SYMBOL(timestamp_truncate);
/**
- * current_time - Return FS time
- * @inode: inode.
+ * inode_set_ctime_current - set the ctime to current_time
+ * @inode: inode
*
- * Return the current time truncated to the time granularity supported by
- * the fs.
+ * Set the inode's ctime to the current value for the inode. Returns the
+ * current value that was assigned. If this is not a multigrain inode, then we
+ * set it to the later of the coarse time and floor value.
+ *
+ * If it is multigrain, then we first see if the coarse-grained timestamp is
+ * distinct from what is already there. If so, then use that. Otherwise, get a
+ * fine-grained timestamp.
*
- * Note that inode and inode->sb cannot be NULL.
- * Otherwise, the function warns and returns time without truncation.
+ * After that, try to swap the new value into i_ctime_nsec. Accept the
+ * resulting ctime, regardless of the outcome of the swap. If it has
+ * already been replaced, then that timestamp is later than the earlier
+ * unacceptable one, and is thus acceptable.
*/
-struct timespec64 current_time(struct inode *inode)
+struct timespec64 inode_set_ctime_current(struct inode *inode)
{
struct timespec64 now;
+ u32 cns, cur;
- ktime_get_coarse_real_ts64(&now);
+ ktime_get_coarse_real_ts64_mg(&now);
+ now = timestamp_truncate(now, inode);
- if (unlikely(!inode->i_sb)) {
- WARN(1, "current_time() called with uninitialized super_block in the inode");
- return now;
+ /* Just return that if this is not a multigrain fs */
+ if (!is_mgtime(inode)) {
+ inode_set_ctime_to_ts(inode, now);
+ goto out;
}
- return timestamp_truncate(now, inode);
+ /*
+ * A fine-grained time is only needed if someone has queried
+ * for timestamps, and the current coarse grained time isn't
+ * later than what's already there.
+ */
+ cns = smp_load_acquire(&inode->i_ctime_nsec);
+ if (cns & I_CTIME_QUERIED) {
+ struct timespec64 ctime = { .tv_sec = inode->i_ctime_sec,
+ .tv_nsec = cns & ~I_CTIME_QUERIED };
+
+ if (timespec64_compare(&now, &ctime) <= 0) {
+ ktime_get_real_ts64_mg(&now);
+ now = timestamp_truncate(now, inode);
+ mgtime_counter_inc(mg_fine_stamps);
+ }
+ }
+ mgtime_counter_inc(mg_ctime_updates);
+
+ /* No need to cmpxchg if it's exactly the same */
+ if (cns == now.tv_nsec && inode->i_ctime_sec == now.tv_sec) {
+ trace_ctime_xchg_skip(inode, &now);
+ goto out;
+ }
+ cur = cns;
+retry:
+ /* Try to swap the nsec value into place. */
+ if (try_cmpxchg(&inode->i_ctime_nsec, &cur, now.tv_nsec)) {
+ /* If swap occurred, then we're (mostly) done */
+ inode->i_ctime_sec = now.tv_sec;
+ trace_ctime_ns_xchg(inode, cns, now.tv_nsec, cur);
+ mgtime_counter_inc(mg_ctime_swaps);
+ } else {
+ /*
+ * Was the change due to someone marking the old ctime QUERIED?
+ * If so then retry the swap. This can only happen once since
+ * the only way to clear I_CTIME_QUERIED is to stamp the inode
+ * with a new ctime.
+ */
+ if (!(cns & I_CTIME_QUERIED) && (cns | I_CTIME_QUERIED) == cur) {
+ cns = cur;
+ goto retry;
+ }
+ /* Otherwise, keep the existing ctime */
+ now.tv_sec = inode->i_ctime_sec;
+ now.tv_nsec = cur & ~I_CTIME_QUERIED;
+ }
+out:
+ return now;
}
-EXPORT_SYMBOL(current_time);
+EXPORT_SYMBOL(inode_set_ctime_current);
+
+/**
+ * inode_set_ctime_deleg - try to update the ctime on a delegated inode
+ * @inode: inode to update
+ * @update: timespec64 to set the ctime
+ *
+ * Attempt to atomically update the ctime on behalf of a delegation holder.
+ *
+ * The nfs server can call back the holder of a delegation to get updated
+ * inode attributes, including the mtime. When updating the mtime, update
+ * the ctime to a value at least equal to that.
+ *
+ * This can race with concurrent updates to the inode, in which
+ * case the update is skipped.
+ *
+ * Note that this works even when multigrain timestamps are not enabled,
+ * so it is used in either case.
+ */
+struct timespec64 inode_set_ctime_deleg(struct inode *inode, struct timespec64 update)
+{
+ struct timespec64 now, cur_ts;
+ u32 cur, old;
+
+ /* pairs with try_cmpxchg below */
+ cur = smp_load_acquire(&inode->i_ctime_nsec);
+ cur_ts.tv_nsec = cur & ~I_CTIME_QUERIED;
+ cur_ts.tv_sec = inode->i_ctime_sec;
+
+ /* If the update is older than the existing value, skip it. */
+ if (timespec64_compare(&update, &cur_ts) <= 0)
+ return cur_ts;
+
+ ktime_get_coarse_real_ts64_mg(&now);
+
+ /* Clamp the update to "now" if it's in the future */
+ if (timespec64_compare(&update, &now) > 0)
+ update = now;
+
+ update = timestamp_truncate(update, inode);
+
+ /* No need to update if the values are already the same */
+ if (timespec64_equal(&update, &cur_ts))
+ return cur_ts;
+
+ /*
+ * Try to swap the nsec value into place. If it fails, that means
+ * it raced with an update due to a write or similar activity. That
+ * stamp takes precedence, so just skip the update.
+ */
+retry:
+ old = cur;
+ if (try_cmpxchg(&inode->i_ctime_nsec, &cur, update.tv_nsec)) {
+ inode->i_ctime_sec = update.tv_sec;
+ mgtime_counter_inc(mg_ctime_swaps);
+ return update;
+ }
+
+ /*
+ * Was the change due to another task marking the old ctime QUERIED?
+ *
+ * If so, then retry the swap. This can only happen once since
+ * the only way to clear I_CTIME_QUERIED is to stamp the inode
+ * with a new ctime.
+ */
+ if (!(old & I_CTIME_QUERIED) && (cur == (old | I_CTIME_QUERIED)))
+ goto retry;
+
+ /* Otherwise, it was a new timestamp. */
+ cur_ts.tv_sec = inode->i_ctime_sec;
+ cur_ts.tv_nsec = cur & ~I_CTIME_QUERIED;
+ return cur_ts;
+}
+EXPORT_SYMBOL(inode_set_ctime_deleg);
+
+/**
+ * in_group_or_capable - check whether caller is CAP_FSETID privileged
+ * @idmap: idmap of the mount @inode was found from
+ * @inode: inode to check
+ * @vfsgid: the new/current vfsgid of @inode
+ *
+ * Check whether @vfsgid is in the caller's group list or if the caller is
+ * privileged with CAP_FSETID over @inode. This can be used to determine
+ * whether the setgid bit can be kept or must be dropped.
+ *
+ * Return: true if the caller is sufficiently privileged, false if not.
+ */
+bool in_group_or_capable(struct mnt_idmap *idmap,
+ const struct inode *inode, vfsgid_t vfsgid)
+{
+ if (vfsgid_in_group_p(vfsgid))
+ return true;
+ if (capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(in_group_or_capable);
+
+/**
+ * mode_strip_sgid - handle the sgid bit for non-directories
+ * @idmap: idmap of the mount the inode was created from
+ * @dir: parent directory inode
+ * @mode: mode of the file to be created in @dir
+ *
+ * If the @mode of the new file has both the S_ISGID and S_IXGRP bit
+ * raised and @dir has the S_ISGID bit raised ensure that the caller is
+ * either in the group of the parent directory or they have CAP_FSETID
+ * in their user namespace and are privileged over the parent directory.
+ * In all other cases, strip the S_ISGID bit from @mode.
+ *
+ * Return: the new mode to use for the file
+ */
+umode_t mode_strip_sgid(struct mnt_idmap *idmap,
+ const struct inode *dir, umode_t mode)
+{
+ if ((mode & (S_ISGID | S_IXGRP)) != (S_ISGID | S_IXGRP))
+ return mode;
+ if (S_ISDIR(mode) || !dir || !(dir->i_mode & S_ISGID))
+ return mode;
+ if (in_group_or_capable(idmap, dir, i_gid_into_vfsgid(idmap, dir)))
+ return mode;
+ return mode & ~S_ISGID;
+}
+EXPORT_SYMBOL(mode_strip_sgid);
+
+#ifdef CONFIG_DEBUG_VFS
+/*
+ * Dump an inode.
+ *
+ * TODO: add a proper inode dumping routine, this is a stub to get debug off the
+ * ground.
+ *
+ * TODO: handle getting to fs type with get_kernel_nofault()?
+ * See dump_mapping() above.
+ */
+void dump_inode(struct inode *inode, const char *reason)
+{
+ struct super_block *sb = inode->i_sb;
+
+ pr_warn("%s encountered for inode %px\n"
+ "fs %s mode %ho opflags 0x%hx flags 0x%x state 0x%x count %d\n",
+ reason, inode, sb->s_type->name, inode->i_mode, inode->i_opflags,
+ inode->i_flags, inode_state_read_once(inode), atomic_read(&inode->i_count));
+}
+
+EXPORT_SYMBOL(dump_inode);
+#endif
diff --git a/fs/internal.h b/fs/internal.h
index 7979ff8d168c..ab638d41ab81 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -14,30 +14,26 @@ struct path;
struct mount;
struct shrink_control;
struct fs_context;
-struct user_namespace;
struct pipe_inode_info;
+struct iov_iter;
+struct mnt_idmap;
+struct ns_common;
/*
* block/bdev.c
*/
#ifdef CONFIG_BLOCK
extern void __init bdev_cache_init(void);
-
-void emergency_thaw_bdev(struct super_block *sb);
#else
static inline void bdev_cache_init(void)
{
}
-static inline int emergency_thaw_bdev(struct super_block *sb)
-{
- return 0;
-}
#endif /* CONFIG_BLOCK */
/*
* buffer.c
*/
-int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
+int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block, const struct iomap *iomap);
/*
@@ -57,37 +53,45 @@ extern int finish_clean_context(struct fs_context *fc);
* namei.c
*/
extern int filename_lookup(int dfd, struct filename *name, unsigned flags,
- struct path *path, struct path *root);
-extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
- const char *, unsigned int, struct path *);
+ struct path *path, const struct path *root);
int do_rmdir(int dfd, struct filename *name);
int do_unlinkat(int dfd, struct filename *name);
-int may_linkat(struct user_namespace *mnt_userns, struct path *link);
+int may_linkat(struct mnt_idmap *idmap, const struct path *link);
int do_renameat2(int olddfd, struct filename *oldname, int newdfd,
struct filename *newname, unsigned int flags);
int do_mkdirat(int dfd, struct filename *name, umode_t mode);
int do_symlinkat(struct filename *from, int newdfd, struct filename *to);
int do_linkat(int olddfd, struct filename *old, int newdfd,
struct filename *new, int flags);
+int vfs_tmpfile(struct mnt_idmap *idmap,
+ const struct path *parentpath,
+ struct file *file, umode_t mode);
+struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
+struct dentry *start_dirop(struct dentry *parent, struct qstr *name,
+ unsigned int lookup_flags);
+int lookup_noperm_common(struct qstr *qname, struct dentry *base);
/*
* namespace.c
*/
extern struct vfsmount *lookup_mnt(const struct path *);
-extern int finish_automount(struct vfsmount *, struct path *);
+extern int finish_automount(struct vfsmount *, const struct path *);
extern int sb_prepare_remount_readonly(struct super_block *);
extern void __init mnt_init(void);
-extern int __mnt_want_write_file(struct file *);
-extern void __mnt_drop_write_file(struct file *);
+int mnt_get_write_access_file(struct file *file);
+void mnt_put_write_access_file(struct file *file);
extern void dissolve_on_fput(struct vfsmount *);
+extern bool may_mount(void);
-int path_mount(const char *dev_name, struct path *path,
+int path_mount(const char *dev_name, const struct path *path,
const char *type_page, unsigned long flags, void *data_page);
-int path_umount(struct path *path, int flags);
+int path_umount(const struct path *path, int flags);
+
+int show_path(struct seq_file *m, struct dentry *root);
/*
* fs_struct.c
@@ -97,17 +101,81 @@ extern void chroot_fs_refs(const struct path *, const struct path *);
/*
* file_table.c
*/
-extern struct file *alloc_empty_file(int, const struct cred *);
-extern struct file *alloc_empty_file_noaccount(int, const struct cred *);
+struct file *alloc_empty_file(int flags, const struct cred *cred);
+struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred);
+struct file *alloc_empty_backing_file(int flags, const struct cred *cred);
+void backing_file_set_user_path(struct file *f, const struct path *path);
+
+static inline void file_put_write_access(struct file *file)
+{
+ put_write_access(file->f_inode);
+ mnt_put_write_access(file->f_path.mnt);
+ if (unlikely(file->f_mode & FMODE_BACKING))
+ mnt_put_write_access(backing_file_user_path(file)->mnt);
+}
+
+static inline void put_file_access(struct file *file)
+{
+ if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
+ i_readcount_dec(file->f_inode);
+ } else if (file->f_mode & FMODE_WRITER) {
+ file_put_write_access(file);
+ }
+}
+
+void fput_close_sync(struct file *);
+void fput_close(struct file *);
/*
* super.c
*/
extern int reconfigure_super(struct fs_context *);
-extern bool trylock_super(struct super_block *sb);
+extern bool super_trylock_shared(struct super_block *sb);
struct super_block *user_get_super(dev_t, bool excl);
void put_super(struct super_block *sb);
extern bool mount_capable(struct fs_context *);
+int sb_init_dio_done_wq(struct super_block *sb);
+
+/*
+ * Prepare superblock for changing its read-only state (i.e., either remount
+ * read-write superblock read-only or vice versa). After this function returns
+ * mnt_is_readonly() will return true for any mount of the superblock if its
+ * caller is able to observe any changes done by the remount. This holds until
+ * sb_end_ro_state_change() is called.
+ */
+static inline void sb_start_ro_state_change(struct super_block *sb)
+{
+ WRITE_ONCE(sb->s_readonly_remount, 1);
+ /*
+ * For RO->RW transition, the barrier pairs with the barrier in
+ * mnt_is_readonly() making sure if mnt_is_readonly() sees SB_RDONLY
+ * cleared, it will see s_readonly_remount set.
+ * For RW->RO transition, the barrier pairs with the barrier in
+ * mnt_get_write_access() before the mnt_is_readonly() check.
+ * The barrier makes sure if mnt_get_write_access() sees MNT_WRITE_HOLD
+ * already cleared, it will see s_readonly_remount set.
+ */
+ smp_wmb();
+}
+
+/*
+ * Ends section changing read-only state of the superblock. After this function
+ * returns if mnt_is_readonly() returns false, the caller will be able to
+ * observe all the changes remount did to the superblock.
+ */
+static inline void sb_end_ro_state_change(struct super_block *sb)
+{
+ /*
+ * This barrier provides release semantics that pairs with
+ * the smp_rmb() acquire semantics in mnt_is_readonly().
+ * This barrier pair ensure that when mnt_is_readonly() sees
+ * 0 for sb->s_readonly_remount, it will also see all the
+ * preceding flag changes that were made during the RO state
+ * change.
+ */
+ smp_wmb();
+ WRITE_ONCE(sb->s_readonly_remount, 0);
+}
/*
* open.c
@@ -125,9 +193,10 @@ extern struct file *do_file_open_root(const struct path *,
const char *, const struct open_flags *);
extern struct open_how build_open_how(int flags, umode_t mode);
extern int build_open_flags(const struct open_how *how, struct open_flags *op);
-extern int __close_fd_get_file(unsigned int fd, struct file **res);
+struct file *file_close_fd_locked(struct files_struct *files, unsigned fd);
-long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
+int do_ftruncate(struct file *file, loff_t length, int small);
+int do_sys_ftruncate(unsigned int fd, loff_t length, int small);
int chmod_common(const struct path *path, umode_t mode);
int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
int flag);
@@ -138,13 +207,14 @@ extern int vfs_open(const struct path *, struct file *);
* inode.c
*/
extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
-extern int dentry_needs_remove_privs(struct dentry *dentry);
+int dentry_needs_remove_privs(struct mnt_idmap *, struct dentry *dentry);
+bool in_group_or_capable(struct mnt_idmap *idmap,
+ const struct inode *inode, vfsgid_t vfsgid);
/*
* fs-writeback.c
*/
extern long get_nr_dirty_inodes(void);
-extern int invalidate_inodes(struct super_block *, bool);
/*
* dcache.c
@@ -156,11 +226,10 @@ extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *)
extern char *simple_dname(struct dentry *, char *, int);
extern void dput_to_list(struct dentry *, struct list_head *);
extern void shrink_dentry_list(struct list_head *);
-
-/*
- * read_write.c
- */
-extern int rw_verify_area(int, struct file *, const loff_t *, size_t);
+extern void shrink_dcache_for_umount(struct super_block *);
+extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
+extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
+ const struct qstr *name, unsigned *seq);
/*
* pipe.c
@@ -177,20 +246,115 @@ extern void mnt_pin_kill(struct mount *m);
* fs/nsfs.c
*/
extern const struct dentry_operations ns_dentry_operations;
-
-/* direct-io.c: */
-int sb_init_dio_done_wq(struct super_block *sb);
+int open_namespace(struct ns_common *ns);
/*
* fs/stat.c:
*/
-int do_statx(int dfd, const char __user *filename, unsigned flags,
+
+int do_statx(int dfd, struct filename *filename, unsigned int flags,
unsigned int mask, struct statx __user *buffer);
+int do_statx_fd(int fd, unsigned int flags, unsigned int mask,
+ struct statx __user *buffer);
/*
* fs/splice.c:
*/
-long splice_file_to_pipe(struct file *in,
- struct pipe_inode_info *opipe,
- loff_t *offset,
- size_t len, unsigned int flags);
+ssize_t splice_file_to_pipe(struct file *in,
+ struct pipe_inode_info *opipe,
+ loff_t *offset,
+ size_t len, unsigned int flags);
+
+/*
+ * fs/xattr.c:
+ */
+struct xattr_name {
+ char name[XATTR_NAME_MAX + 1];
+};
+
+struct kernel_xattr_ctx {
+ /* Value of attribute */
+ union {
+ const void __user *cvalue;
+ void __user *value;
+ };
+ void *kvalue;
+ size_t size;
+ /* Attribute name */
+ struct xattr_name *kname;
+ unsigned int flags;
+};
+
+ssize_t file_getxattr(struct file *file, struct kernel_xattr_ctx *ctx);
+ssize_t filename_getxattr(int dfd, struct filename *filename,
+ unsigned int lookup_flags, struct kernel_xattr_ctx *ctx);
+int file_setxattr(struct file *file, struct kernel_xattr_ctx *ctx);
+int filename_setxattr(int dfd, struct filename *filename,
+ unsigned int lookup_flags, struct kernel_xattr_ctx *ctx);
+int setxattr_copy(const char __user *name, struct kernel_xattr_ctx *ctx);
+int import_xattr_name(struct xattr_name *kname, const char __user *name);
+
+int may_write_xattr(struct mnt_idmap *idmap, struct inode *inode);
+
+#ifdef CONFIG_FS_POSIX_ACL
+int do_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ const char *acl_name, const void *kvalue, size_t size);
+ssize_t do_get_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ const char *acl_name, void *kvalue, size_t size);
+#else
+static inline int do_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name,
+ const void *kvalue, size_t size)
+{
+ return -EOPNOTSUPP;
+}
+static inline ssize_t do_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name,
+ void *kvalue, size_t size)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *pos);
+
+/*
+ * fs/attr.c
+ */
+struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns);
+struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap);
+void mnt_idmap_put(struct mnt_idmap *idmap);
+struct stashed_operations {
+ struct dentry *(*stash_dentry)(struct dentry **stashed,
+ struct dentry *dentry);
+ void (*put_data)(void *data);
+ int (*init_inode)(struct inode *inode, void *data);
+};
+int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data,
+ struct path *path);
+void stashed_dentry_prune(struct dentry *dentry);
+struct dentry *stash_dentry(struct dentry **stashed, struct dentry *dentry);
+struct dentry *stashed_dentry_get(struct dentry **stashed);
+/**
+ * path_mounted - check whether path is mounted
+ * @path: path to check
+ *
+ * Determine whether @path refers to the root of a mount.
+ *
+ * Return: true if @path is the root of a mount, false if not.
+ */
+static inline bool path_mounted(const struct path *path)
+{
+ return path->mnt->mnt_root == path->dentry;
+}
+void file_f_owner_release(struct file *file);
+bool file_seek_cur_needs_f_lock(struct file *file);
+int statmount_mnt_idmap(struct mnt_idmap *idmap, struct seq_file *seq, bool uid_map);
+struct dentry *find_next_child(struct dentry *parent, struct dentry *prev);
+int anon_inode_getattr(struct mnt_idmap *idmap, const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags);
+int anon_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr);
+void pidfs_get_root(struct path *path);
+void nsfs_get_root(struct path *path);
diff --git a/fs/io-wq.c b/fs/io-wq.c
deleted file mode 100644
index afd955d53db9..000000000000
--- a/fs/io-wq.c
+++ /dev/null
@@ -1,1362 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Basic worker thread pool for io_uring
- *
- * Copyright (C) 2019 Jens Axboe
- *
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/sched/signal.h>
-#include <linux/percpu.h>
-#include <linux/slab.h>
-#include <linux/rculist_nulls.h>
-#include <linux/cpu.h>
-#include <linux/tracehook.h>
-#include <linux/audit.h>
-#include <uapi/linux/io_uring.h>
-
-#include "io-wq.h"
-
-#define WORKER_IDLE_TIMEOUT (5 * HZ)
-
-enum {
- IO_WORKER_F_UP = 1, /* up and active */
- IO_WORKER_F_RUNNING = 2, /* account as running */
- IO_WORKER_F_FREE = 4, /* worker on free list */
- IO_WORKER_F_BOUND = 8, /* is doing bounded work */
-};
-
-enum {
- IO_WQ_BIT_EXIT = 0, /* wq exiting */
-};
-
-enum {
- IO_ACCT_STALLED_BIT = 0, /* stalled on hash */
-};
-
-/*
- * One for each thread in a wqe pool
- */
-struct io_worker {
- refcount_t ref;
- unsigned flags;
- struct hlist_nulls_node nulls_node;
- struct list_head all_list;
- struct task_struct *task;
- struct io_wqe *wqe;
-
- struct io_wq_work *cur_work;
- spinlock_t lock;
-
- struct completion ref_done;
-
- unsigned long create_state;
- struct callback_head create_work;
- int create_index;
-
- union {
- struct rcu_head rcu;
- struct work_struct work;
- };
-};
-
-#if BITS_PER_LONG == 64
-#define IO_WQ_HASH_ORDER 6
-#else
-#define IO_WQ_HASH_ORDER 5
-#endif
-
-#define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
-
-struct io_wqe_acct {
- unsigned nr_workers;
- unsigned max_workers;
- int index;
- atomic_t nr_running;
- struct io_wq_work_list work_list;
- unsigned long flags;
-};
-
-enum {
- IO_WQ_ACCT_BOUND,
- IO_WQ_ACCT_UNBOUND,
- IO_WQ_ACCT_NR,
-};
-
-/*
- * Per-node worker thread pool
- */
-struct io_wqe {
- raw_spinlock_t lock;
- struct io_wqe_acct acct[2];
-
- int node;
-
- struct hlist_nulls_head free_list;
- struct list_head all_list;
-
- struct wait_queue_entry wait;
-
- struct io_wq *wq;
- struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
-
- cpumask_var_t cpu_mask;
-};
-
-/*
- * Per io_wq state
- */
-struct io_wq {
- unsigned long state;
-
- free_work_fn *free_work;
- io_wq_work_fn *do_work;
-
- struct io_wq_hash *hash;
-
- atomic_t worker_refs;
- struct completion worker_done;
-
- struct hlist_node cpuhp_node;
-
- struct task_struct *task;
-
- struct io_wqe *wqes[];
-};
-
-static enum cpuhp_state io_wq_online;
-
-struct io_cb_cancel_data {
- work_cancel_fn *fn;
- void *data;
- int nr_running;
- int nr_pending;
- bool cancel_all;
-};
-
-static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
-static void io_wqe_dec_running(struct io_worker *worker);
-static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
- struct io_wqe_acct *acct,
- struct io_cb_cancel_data *match);
-static void create_worker_cb(struct callback_head *cb);
-
-static bool io_worker_get(struct io_worker *worker)
-{
- return refcount_inc_not_zero(&worker->ref);
-}
-
-static void io_worker_release(struct io_worker *worker)
-{
- if (refcount_dec_and_test(&worker->ref))
- complete(&worker->ref_done);
-}
-
-static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound)
-{
- return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
-}
-
-static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
- struct io_wq_work *work)
-{
- return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND));
-}
-
-static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
-{
- return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND);
-}
-
-static void io_worker_ref_put(struct io_wq *wq)
-{
- if (atomic_dec_and_test(&wq->worker_refs))
- complete(&wq->worker_done);
-}
-
-static void io_worker_cancel_cb(struct io_worker *worker)
-{
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
- struct io_wqe *wqe = worker->wqe;
- struct io_wq *wq = wqe->wq;
-
- atomic_dec(&acct->nr_running);
- raw_spin_lock(&worker->wqe->lock);
- acct->nr_workers--;
- raw_spin_unlock(&worker->wqe->lock);
- io_worker_ref_put(wq);
- clear_bit_unlock(0, &worker->create_state);
- io_worker_release(worker);
-}
-
-static bool io_task_worker_match(struct callback_head *cb, void *data)
-{
- struct io_worker *worker;
-
- if (cb->func != create_worker_cb)
- return false;
- worker = container_of(cb, struct io_worker, create_work);
- return worker == data;
-}
-
-static void io_worker_exit(struct io_worker *worker)
-{
- struct io_wqe *wqe = worker->wqe;
- struct io_wq *wq = wqe->wq;
-
- while (1) {
- struct callback_head *cb = task_work_cancel_match(wq->task,
- io_task_worker_match, worker);
-
- if (!cb)
- break;
- io_worker_cancel_cb(worker);
- }
-
- io_worker_release(worker);
- wait_for_completion(&worker->ref_done);
-
- raw_spin_lock(&wqe->lock);
- if (worker->flags & IO_WORKER_F_FREE)
- hlist_nulls_del_rcu(&worker->nulls_node);
- list_del_rcu(&worker->all_list);
- preempt_disable();
- io_wqe_dec_running(worker);
- worker->flags = 0;
- current->flags &= ~PF_IO_WORKER;
- preempt_enable();
- raw_spin_unlock(&wqe->lock);
-
- kfree_rcu(worker, rcu);
- io_worker_ref_put(wqe->wq);
- do_exit(0);
-}
-
-static inline bool io_acct_run_queue(struct io_wqe_acct *acct)
-{
- if (!wq_list_empty(&acct->work_list) &&
- !test_bit(IO_ACCT_STALLED_BIT, &acct->flags))
- return true;
- return false;
-}
-
-/*
- * Check head of free list for an available worker. If one isn't available,
- * caller must create one.
- */
-static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
- struct io_wqe_acct *acct)
- __must_hold(RCU)
-{
- struct hlist_nulls_node *n;
- struct io_worker *worker;
-
- /*
- * Iterate free_list and see if we can find an idle worker to
- * activate. If a given worker is on the free_list but in the process
- * of exiting, keep trying.
- */
- hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
- if (!io_worker_get(worker))
- continue;
- if (io_wqe_get_acct(worker) != acct) {
- io_worker_release(worker);
- continue;
- }
- if (wake_up_process(worker->task)) {
- io_worker_release(worker);
- return true;
- }
- io_worker_release(worker);
- }
-
- return false;
-}
-
-/*
- * We need a worker. If we find a free one, we're good. If not, and we're
- * below the max number of workers, create one.
- */
-static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
-{
- /*
- * Most likely an attempt to queue unbounded work on an io_wq that
- * wasn't setup with any unbounded workers.
- */
- if (unlikely(!acct->max_workers))
- pr_warn_once("io-wq is not configured for unbound workers");
-
- raw_spin_lock(&wqe->lock);
- if (acct->nr_workers >= acct->max_workers) {
- raw_spin_unlock(&wqe->lock);
- return true;
- }
- acct->nr_workers++;
- raw_spin_unlock(&wqe->lock);
- atomic_inc(&acct->nr_running);
- atomic_inc(&wqe->wq->worker_refs);
- return create_io_worker(wqe->wq, wqe, acct->index);
-}
-
-static void io_wqe_inc_running(struct io_worker *worker)
-{
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
-
- atomic_inc(&acct->nr_running);
-}
-
-static void create_worker_cb(struct callback_head *cb)
-{
- struct io_worker *worker;
- struct io_wq *wq;
- struct io_wqe *wqe;
- struct io_wqe_acct *acct;
- bool do_create = false;
-
- worker = container_of(cb, struct io_worker, create_work);
- wqe = worker->wqe;
- wq = wqe->wq;
- acct = &wqe->acct[worker->create_index];
- raw_spin_lock(&wqe->lock);
- if (acct->nr_workers < acct->max_workers) {
- acct->nr_workers++;
- do_create = true;
- }
- raw_spin_unlock(&wqe->lock);
- if (do_create) {
- create_io_worker(wq, wqe, worker->create_index);
- } else {
- atomic_dec(&acct->nr_running);
- io_worker_ref_put(wq);
- }
- clear_bit_unlock(0, &worker->create_state);
- io_worker_release(worker);
-}
-
-static bool io_queue_worker_create(struct io_worker *worker,
- struct io_wqe_acct *acct,
- task_work_func_t func)
-{
- struct io_wqe *wqe = worker->wqe;
- struct io_wq *wq = wqe->wq;
-
- /* raced with exit, just ignore create call */
- if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
- goto fail;
- if (!io_worker_get(worker))
- goto fail;
- /*
- * create_state manages ownership of create_work/index. We should
- * only need one entry per worker, as the worker going to sleep
- * will trigger the condition, and waking will clear it once it
- * runs the task_work.
- */
- if (test_bit(0, &worker->create_state) ||
- test_and_set_bit_lock(0, &worker->create_state))
- goto fail_release;
-
- init_task_work(&worker->create_work, func);
- worker->create_index = acct->index;
- if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
- clear_bit_unlock(0, &worker->create_state);
- return true;
- }
- clear_bit_unlock(0, &worker->create_state);
-fail_release:
- io_worker_release(worker);
-fail:
- atomic_dec(&acct->nr_running);
- io_worker_ref_put(wq);
- return false;
-}
-
-static void io_wqe_dec_running(struct io_worker *worker)
- __must_hold(wqe->lock)
-{
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
- struct io_wqe *wqe = worker->wqe;
-
- if (!(worker->flags & IO_WORKER_F_UP))
- return;
-
- if (atomic_dec_and_test(&acct->nr_running) && io_acct_run_queue(acct)) {
- atomic_inc(&acct->nr_running);
- atomic_inc(&wqe->wq->worker_refs);
- io_queue_worker_create(worker, acct, create_worker_cb);
- }
-}
-
-/*
- * Worker will start processing some work. Move it to the busy list, if
- * it's currently on the freelist
- */
-static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
- struct io_wq_work *work)
- __must_hold(wqe->lock)
-{
- if (worker->flags & IO_WORKER_F_FREE) {
- worker->flags &= ~IO_WORKER_F_FREE;
- hlist_nulls_del_init_rcu(&worker->nulls_node);
- }
-}
-
-/*
- * No work, worker going to sleep. Move to freelist, and unuse mm if we
- * have one attached. Dropping the mm may potentially sleep, so we drop
- * the lock in that case and return success. Since the caller has to
- * retry the loop in that case (we changed task state), we don't regrab
- * the lock if we return success.
- */
-static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
- __must_hold(wqe->lock)
-{
- if (!(worker->flags & IO_WORKER_F_FREE)) {
- worker->flags |= IO_WORKER_F_FREE;
- hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
- }
-}
-
-static inline unsigned int io_get_work_hash(struct io_wq_work *work)
-{
- return work->flags >> IO_WQ_HASH_SHIFT;
-}
-
-static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
-{
- struct io_wq *wq = wqe->wq;
-
- spin_lock_irq(&wq->hash->wait.lock);
- if (list_empty(&wqe->wait.entry)) {
- __add_wait_queue(&wq->hash->wait, &wqe->wait);
- if (!test_bit(hash, &wq->hash->map)) {
- __set_current_state(TASK_RUNNING);
- list_del_init(&wqe->wait.entry);
- }
- }
- spin_unlock_irq(&wq->hash->wait.lock);
-}
-
-static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
- struct io_worker *worker)
- __must_hold(wqe->lock)
-{
- struct io_wq_work_node *node, *prev;
- struct io_wq_work *work, *tail;
- unsigned int stall_hash = -1U;
- struct io_wqe *wqe = worker->wqe;
-
- wq_list_for_each(node, prev, &acct->work_list) {
- unsigned int hash;
-
- work = container_of(node, struct io_wq_work, list);
-
- /* not hashed, can run anytime */
- if (!io_wq_is_hashed(work)) {
- wq_list_del(&acct->work_list, node, prev);
- return work;
- }
-
- hash = io_get_work_hash(work);
- /* all items with this hash lie in [work, tail] */
- tail = wqe->hash_tail[hash];
-
- /* hashed, can run if not already running */
- if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
- wqe->hash_tail[hash] = NULL;
- wq_list_cut(&acct->work_list, &tail->list, prev);
- return work;
- }
- if (stall_hash == -1U)
- stall_hash = hash;
- /* fast forward to a next hash, for-each will fix up @prev */
- node = &tail->list;
- }
-
- if (stall_hash != -1U) {
- /*
- * Set this before dropping the lock to avoid racing with new
- * work being added and clearing the stalled bit.
- */
- set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
- raw_spin_unlock(&wqe->lock);
- io_wait_on_hash(wqe, stall_hash);
- raw_spin_lock(&wqe->lock);
- }
-
- return NULL;
-}
-
-static bool io_flush_signals(void)
-{
- if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) {
- __set_current_state(TASK_RUNNING);
- tracehook_notify_signal();
- return true;
- }
- return false;
-}
-
-static void io_assign_current_work(struct io_worker *worker,
- struct io_wq_work *work)
-{
- if (work) {
- io_flush_signals();
- cond_resched();
- }
-
- spin_lock(&worker->lock);
- worker->cur_work = work;
- spin_unlock(&worker->lock);
-}
-
-static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
-
-static void io_worker_handle_work(struct io_worker *worker)
- __releases(wqe->lock)
-{
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
- struct io_wqe *wqe = worker->wqe;
- struct io_wq *wq = wqe->wq;
- bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
-
- do {
- struct io_wq_work *work;
-get_next:
- /*
- * If we got some work, mark us as busy. If we didn't, but
- * the list isn't empty, it means we stalled on hashed work.
- * Mark us stalled so we don't keep looking for work when we
- * can't make progress, any work completion or insertion will
- * clear the stalled flag.
- */
- work = io_get_next_work(acct, worker);
- if (work)
- __io_worker_busy(wqe, worker, work);
-
- raw_spin_unlock(&wqe->lock);
- if (!work)
- break;
- io_assign_current_work(worker, work);
- __set_current_state(TASK_RUNNING);
-
- /* handle a whole dependent link */
- do {
- struct io_wq_work *next_hashed, *linked;
- unsigned int hash = io_get_work_hash(work);
-
- next_hashed = wq_next_work(work);
-
- if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
- work->flags |= IO_WQ_WORK_CANCEL;
- wq->do_work(work);
- io_assign_current_work(worker, NULL);
-
- linked = wq->free_work(work);
- work = next_hashed;
- if (!work && linked && !io_wq_is_hashed(linked)) {
- work = linked;
- linked = NULL;
- }
- io_assign_current_work(worker, work);
- if (linked)
- io_wqe_enqueue(wqe, linked);
-
- if (hash != -1U && !next_hashed) {
- clear_bit(hash, &wq->hash->map);
- clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
- if (wq_has_sleeper(&wq->hash->wait))
- wake_up(&wq->hash->wait);
- raw_spin_lock(&wqe->lock);
- /* skip unnecessary unlock-lock wqe->lock */
- if (!work)
- goto get_next;
- raw_spin_unlock(&wqe->lock);
- }
- } while (work);
-
- raw_spin_lock(&wqe->lock);
- } while (1);
-}
-
-static int io_wqe_worker(void *data)
-{
- struct io_worker *worker = data;
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
- struct io_wqe *wqe = worker->wqe;
- struct io_wq *wq = wqe->wq;
- bool last_timeout = false;
- char buf[TASK_COMM_LEN];
-
- worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
-
- snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
- set_task_comm(current, buf);
-
- audit_alloc_kernel(current);
-
- while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
- long ret;
-
- set_current_state(TASK_INTERRUPTIBLE);
-loop:
- raw_spin_lock(&wqe->lock);
- if (io_acct_run_queue(acct)) {
- io_worker_handle_work(worker);
- goto loop;
- }
- /* timed out, exit unless we're the last worker */
- if (last_timeout && acct->nr_workers > 1) {
- acct->nr_workers--;
- raw_spin_unlock(&wqe->lock);
- __set_current_state(TASK_RUNNING);
- break;
- }
- last_timeout = false;
- __io_worker_idle(wqe, worker);
- raw_spin_unlock(&wqe->lock);
- if (io_flush_signals())
- continue;
- ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
- if (signal_pending(current)) {
- struct ksignal ksig;
-
- if (!get_signal(&ksig))
- continue;
- break;
- }
- last_timeout = !ret;
- }
-
- if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
- raw_spin_lock(&wqe->lock);
- io_worker_handle_work(worker);
- }
-
- audit_free(current);
- io_worker_exit(worker);
- return 0;
-}
-
-/*
- * Called when a worker is scheduled in. Mark us as currently running.
- */
-void io_wq_worker_running(struct task_struct *tsk)
-{
- struct io_worker *worker = tsk->pf_io_worker;
-
- if (!worker)
- return;
- if (!(worker->flags & IO_WORKER_F_UP))
- return;
- if (worker->flags & IO_WORKER_F_RUNNING)
- return;
- worker->flags |= IO_WORKER_F_RUNNING;
- io_wqe_inc_running(worker);
-}
-
-/*
- * Called when worker is going to sleep. If there are no workers currently
- * running and we have work pending, wake up a free one or create a new one.
- */
-void io_wq_worker_sleeping(struct task_struct *tsk)
-{
- struct io_worker *worker = tsk->pf_io_worker;
-
- if (!worker)
- return;
- if (!(worker->flags & IO_WORKER_F_UP))
- return;
- if (!(worker->flags & IO_WORKER_F_RUNNING))
- return;
-
- worker->flags &= ~IO_WORKER_F_RUNNING;
-
- raw_spin_lock(&worker->wqe->lock);
- io_wqe_dec_running(worker);
- raw_spin_unlock(&worker->wqe->lock);
-}
-
-static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
- struct task_struct *tsk)
-{
- tsk->pf_io_worker = worker;
- worker->task = tsk;
- set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
- tsk->flags |= PF_NO_SETAFFINITY;
-
- raw_spin_lock(&wqe->lock);
- hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
- list_add_tail_rcu(&worker->all_list, &wqe->all_list);
- worker->flags |= IO_WORKER_F_FREE;
- raw_spin_unlock(&wqe->lock);
- wake_up_new_task(tsk);
-}
-
-static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
-{
- return true;
-}
-
-static inline bool io_should_retry_thread(long err)
-{
- switch (err) {
- case -EAGAIN:
- case -ERESTARTSYS:
- case -ERESTARTNOINTR:
- case -ERESTARTNOHAND:
- return true;
- default:
- return false;
- }
-}
-
-static void create_worker_cont(struct callback_head *cb)
-{
- struct io_worker *worker;
- struct task_struct *tsk;
- struct io_wqe *wqe;
-
- worker = container_of(cb, struct io_worker, create_work);
- clear_bit_unlock(0, &worker->create_state);
- wqe = worker->wqe;
- tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
- if (!IS_ERR(tsk)) {
- io_init_new_worker(wqe, worker, tsk);
- io_worker_release(worker);
- return;
- } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
-
- atomic_dec(&acct->nr_running);
- raw_spin_lock(&wqe->lock);
- acct->nr_workers--;
- if (!acct->nr_workers) {
- struct io_cb_cancel_data match = {
- .fn = io_wq_work_match_all,
- .cancel_all = true,
- };
-
- while (io_acct_cancel_pending_work(wqe, acct, &match))
- raw_spin_lock(&wqe->lock);
- }
- raw_spin_unlock(&wqe->lock);
- io_worker_ref_put(wqe->wq);
- kfree(worker);
- return;
- }
-
- /* re-create attempts grab a new worker ref, drop the existing one */
- io_worker_release(worker);
- schedule_work(&worker->work);
-}
-
-static void io_workqueue_create(struct work_struct *work)
-{
- struct io_worker *worker = container_of(work, struct io_worker, work);
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
-
- if (!io_queue_worker_create(worker, acct, create_worker_cont))
- kfree(worker);
-}
-
-static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
-{
- struct io_wqe_acct *acct = &wqe->acct[index];
- struct io_worker *worker;
- struct task_struct *tsk;
-
- __set_current_state(TASK_RUNNING);
-
- worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
- if (!worker) {
-fail:
- atomic_dec(&acct->nr_running);
- raw_spin_lock(&wqe->lock);
- acct->nr_workers--;
- raw_spin_unlock(&wqe->lock);
- io_worker_ref_put(wq);
- return false;
- }
-
- refcount_set(&worker->ref, 1);
- worker->wqe = wqe;
- spin_lock_init(&worker->lock);
- init_completion(&worker->ref_done);
-
- if (index == IO_WQ_ACCT_BOUND)
- worker->flags |= IO_WORKER_F_BOUND;
-
- tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
- if (!IS_ERR(tsk)) {
- io_init_new_worker(wqe, worker, tsk);
- } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
- kfree(worker);
- goto fail;
- } else {
- INIT_WORK(&worker->work, io_workqueue_create);
- schedule_work(&worker->work);
- }
-
- return true;
-}
-
-/*
- * Iterate the passed in list and call the specific function for each
- * worker that isn't exiting
- */
-static bool io_wq_for_each_worker(struct io_wqe *wqe,
- bool (*func)(struct io_worker *, void *),
- void *data)
-{
- struct io_worker *worker;
- bool ret = false;
-
- list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
- if (io_worker_get(worker)) {
- /* no task if node is/was offline */
- if (worker->task)
- ret = func(worker, data);
- io_worker_release(worker);
- if (ret)
- break;
- }
- }
-
- return ret;
-}
-
-static bool io_wq_worker_wake(struct io_worker *worker, void *data)
-{
- set_notify_signal(worker->task);
- wake_up_process(worker->task);
- return false;
-}
-
-static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
-{
- struct io_wq *wq = wqe->wq;
-
- do {
- work->flags |= IO_WQ_WORK_CANCEL;
- wq->do_work(work);
- work = wq->free_work(work);
- } while (work);
-}
-
-static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
-{
- struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
- unsigned int hash;
- struct io_wq_work *tail;
-
- if (!io_wq_is_hashed(work)) {
-append:
- wq_list_add_tail(&work->list, &acct->work_list);
- return;
- }
-
- hash = io_get_work_hash(work);
- tail = wqe->hash_tail[hash];
- wqe->hash_tail[hash] = work;
- if (!tail)
- goto append;
-
- wq_list_add_after(&work->list, &tail->list, &acct->work_list);
-}
-
-static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
-{
- return work == data;
-}
-
-static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
-{
- struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
- unsigned work_flags = work->flags;
- bool do_create;
-
- /*
- * If io-wq is exiting for this task, or if the request has explicitly
- * been marked as one that should not get executed, cancel it here.
- */
- if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
- (work->flags & IO_WQ_WORK_CANCEL)) {
- io_run_cancel(work, wqe);
- return;
- }
-
- raw_spin_lock(&wqe->lock);
- io_wqe_insert_work(wqe, work);
- clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
-
- rcu_read_lock();
- do_create = !io_wqe_activate_free_worker(wqe, acct);
- rcu_read_unlock();
-
- raw_spin_unlock(&wqe->lock);
-
- if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
- !atomic_read(&acct->nr_running))) {
- bool did_create;
-
- did_create = io_wqe_create_worker(wqe, acct);
- if (likely(did_create))
- return;
-
- raw_spin_lock(&wqe->lock);
- /* fatal condition, failed to create the first worker */
- if (!acct->nr_workers) {
- struct io_cb_cancel_data match = {
- .fn = io_wq_work_match_item,
- .data = work,
- .cancel_all = false,
- };
-
- if (io_acct_cancel_pending_work(wqe, acct, &match))
- raw_spin_lock(&wqe->lock);
- }
- raw_spin_unlock(&wqe->lock);
- }
-}
-
-void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
-{
- struct io_wqe *wqe = wq->wqes[numa_node_id()];
-
- io_wqe_enqueue(wqe, work);
-}
-
-/*
- * Work items that hash to the same value will not be done in parallel.
- * Used to limit concurrent writes, generally hashed by inode.
- */
-void io_wq_hash_work(struct io_wq_work *work, void *val)
-{
- unsigned int bit;
-
- bit = hash_ptr(val, IO_WQ_HASH_ORDER);
- work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
-}
-
-static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
-{
- struct io_cb_cancel_data *match = data;
-
- /*
- * Hold the lock to avoid ->cur_work going out of scope, caller
- * may dereference the passed in work.
- */
- spin_lock(&worker->lock);
- if (worker->cur_work &&
- match->fn(worker->cur_work, match->data)) {
- set_notify_signal(worker->task);
- match->nr_running++;
- }
- spin_unlock(&worker->lock);
-
- return match->nr_running && !match->cancel_all;
-}
-
-static inline void io_wqe_remove_pending(struct io_wqe *wqe,
- struct io_wq_work *work,
- struct io_wq_work_node *prev)
-{
- struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
- unsigned int hash = io_get_work_hash(work);
- struct io_wq_work *prev_work = NULL;
-
- if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
- if (prev)
- prev_work = container_of(prev, struct io_wq_work, list);
- if (prev_work && io_get_work_hash(prev_work) == hash)
- wqe->hash_tail[hash] = prev_work;
- else
- wqe->hash_tail[hash] = NULL;
- }
- wq_list_del(&acct->work_list, &work->list, prev);
-}
-
-static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
- struct io_wqe_acct *acct,
- struct io_cb_cancel_data *match)
- __releases(wqe->lock)
-{
- struct io_wq_work_node *node, *prev;
- struct io_wq_work *work;
-
- wq_list_for_each(node, prev, &acct->work_list) {
- work = container_of(node, struct io_wq_work, list);
- if (!match->fn(work, match->data))
- continue;
- io_wqe_remove_pending(wqe, work, prev);
- raw_spin_unlock(&wqe->lock);
- io_run_cancel(work, wqe);
- match->nr_pending++;
- /* not safe to continue after unlock */
- return true;
- }
-
- return false;
-}
-
-static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
- struct io_cb_cancel_data *match)
-{
- int i;
-retry:
- raw_spin_lock(&wqe->lock);
- for (i = 0; i < IO_WQ_ACCT_NR; i++) {
- struct io_wqe_acct *acct = io_get_acct(wqe, i == 0);
-
- if (io_acct_cancel_pending_work(wqe, acct, match)) {
- if (match->cancel_all)
- goto retry;
- return;
- }
- }
- raw_spin_unlock(&wqe->lock);
-}
-
-static void io_wqe_cancel_running_work(struct io_wqe *wqe,
- struct io_cb_cancel_data *match)
-{
- rcu_read_lock();
- io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
- rcu_read_unlock();
-}
-
-enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
- void *data, bool cancel_all)
-{
- struct io_cb_cancel_data match = {
- .fn = cancel,
- .data = data,
- .cancel_all = cancel_all,
- };
- int node;
-
- /*
- * First check pending list, if we're lucky we can just remove it
- * from there. CANCEL_OK means that the work is returned as-new,
- * no completion will be posted for it.
- */
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
-
- io_wqe_cancel_pending_work(wqe, &match);
- if (match.nr_pending && !match.cancel_all)
- return IO_WQ_CANCEL_OK;
- }
-
- /*
- * Now check if a free (going busy) or busy worker has the work
- * currently running. If we find it there, we'll return CANCEL_RUNNING
- * as an indication that we attempt to signal cancellation. The
- * completion will run normally in this case.
- */
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
-
- io_wqe_cancel_running_work(wqe, &match);
- if (match.nr_running && !match.cancel_all)
- return IO_WQ_CANCEL_RUNNING;
- }
-
- if (match.nr_running)
- return IO_WQ_CANCEL_RUNNING;
- if (match.nr_pending)
- return IO_WQ_CANCEL_OK;
- return IO_WQ_CANCEL_NOTFOUND;
-}
-
-static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
- int sync, void *key)
-{
- struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
- int i;
-
- list_del_init(&wait->entry);
-
- rcu_read_lock();
- for (i = 0; i < IO_WQ_ACCT_NR; i++) {
- struct io_wqe_acct *acct = &wqe->acct[i];
-
- if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
- io_wqe_activate_free_worker(wqe, acct);
- }
- rcu_read_unlock();
- return 1;
-}
-
-struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
-{
- int ret, node, i;
- struct io_wq *wq;
-
- if (WARN_ON_ONCE(!data->free_work || !data->do_work))
- return ERR_PTR(-EINVAL);
- if (WARN_ON_ONCE(!bounded))
- return ERR_PTR(-EINVAL);
-
- wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL);
- if (!wq)
- return ERR_PTR(-ENOMEM);
- ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
- if (ret)
- goto err_wq;
-
- refcount_inc(&data->hash->refs);
- wq->hash = data->hash;
- wq->free_work = data->free_work;
- wq->do_work = data->do_work;
-
- ret = -ENOMEM;
- for_each_node(node) {
- struct io_wqe *wqe;
- int alloc_node = node;
-
- if (!node_online(alloc_node))
- alloc_node = NUMA_NO_NODE;
- wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
- if (!wqe)
- goto err;
- if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
- goto err;
- cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
- wq->wqes[node] = wqe;
- wqe->node = alloc_node;
- wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
- wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
- task_rlimit(current, RLIMIT_NPROC);
- INIT_LIST_HEAD(&wqe->wait.entry);
- wqe->wait.func = io_wqe_hash_wake;
- for (i = 0; i < IO_WQ_ACCT_NR; i++) {
- struct io_wqe_acct *acct = &wqe->acct[i];
-
- acct->index = i;
- atomic_set(&acct->nr_running, 0);
- INIT_WQ_LIST(&acct->work_list);
- }
- wqe->wq = wq;
- raw_spin_lock_init(&wqe->lock);
- INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
- INIT_LIST_HEAD(&wqe->all_list);
- }
-
- wq->task = get_task_struct(data->task);
- atomic_set(&wq->worker_refs, 1);
- init_completion(&wq->worker_done);
- return wq;
-err:
- io_wq_put_hash(data->hash);
- cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
- for_each_node(node) {
- if (!wq->wqes[node])
- continue;
- free_cpumask_var(wq->wqes[node]->cpu_mask);
- kfree(wq->wqes[node]);
- }
-err_wq:
- kfree(wq);
- return ERR_PTR(ret);
-}
-
-static bool io_task_work_match(struct callback_head *cb, void *data)
-{
- struct io_worker *worker;
-
- if (cb->func != create_worker_cb && cb->func != create_worker_cont)
- return false;
- worker = container_of(cb, struct io_worker, create_work);
- return worker->wqe->wq == data;
-}
-
-void io_wq_exit_start(struct io_wq *wq)
-{
- set_bit(IO_WQ_BIT_EXIT, &wq->state);
-}
-
-static void io_wq_exit_workers(struct io_wq *wq)
-{
- struct callback_head *cb;
- int node;
-
- if (!wq->task)
- return;
-
- while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
- struct io_worker *worker;
-
- worker = container_of(cb, struct io_worker, create_work);
- io_worker_cancel_cb(worker);
- }
-
- rcu_read_lock();
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
-
- io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
- }
- rcu_read_unlock();
- io_worker_ref_put(wq);
- wait_for_completion(&wq->worker_done);
-
- for_each_node(node) {
- spin_lock_irq(&wq->hash->wait.lock);
- list_del_init(&wq->wqes[node]->wait.entry);
- spin_unlock_irq(&wq->hash->wait.lock);
- }
- put_task_struct(wq->task);
- wq->task = NULL;
-}
-
-static void io_wq_destroy(struct io_wq *wq)
-{
- int node;
-
- cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
-
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
- struct io_cb_cancel_data match = {
- .fn = io_wq_work_match_all,
- .cancel_all = true,
- };
- io_wqe_cancel_pending_work(wqe, &match);
- free_cpumask_var(wqe->cpu_mask);
- kfree(wqe);
- }
- io_wq_put_hash(wq->hash);
- kfree(wq);
-}
-
-void io_wq_put_and_exit(struct io_wq *wq)
-{
- WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
-
- io_wq_exit_workers(wq);
- io_wq_destroy(wq);
-}
-
-struct online_data {
- unsigned int cpu;
- bool online;
-};
-
-static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
-{
- struct online_data *od = data;
-
- if (od->online)
- cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask);
- else
- cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask);
- return false;
-}
-
-static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
-{
- struct online_data od = {
- .cpu = cpu,
- .online = online
- };
- int i;
-
- rcu_read_lock();
- for_each_node(i)
- io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od);
- rcu_read_unlock();
- return 0;
-}
-
-static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
-{
- struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
-
- return __io_wq_cpu_online(wq, cpu, true);
-}
-
-static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
-{
- struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
-
- return __io_wq_cpu_online(wq, cpu, false);
-}
-
-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
-{
- int i;
-
- rcu_read_lock();
- for_each_node(i) {
- struct io_wqe *wqe = wq->wqes[i];
-
- if (mask)
- cpumask_copy(wqe->cpu_mask, mask);
- else
- cpumask_copy(wqe->cpu_mask, cpumask_of_node(i));
- }
- rcu_read_unlock();
- return 0;
-}
-
-/*
- * Set max number of unbounded workers, returns old value. If new_count is 0,
- * then just return the old value.
- */
-int io_wq_max_workers(struct io_wq *wq, int *new_count)
-{
- int prev[IO_WQ_ACCT_NR];
- bool first_node = true;
- int i, node;
-
- BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
- BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
- BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2);
-
- for (i = 0; i < 2; i++) {
- if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
- new_count[i] = task_rlimit(current, RLIMIT_NPROC);
- }
-
- for (i = 0; i < IO_WQ_ACCT_NR; i++)
- prev[i] = 0;
-
- rcu_read_lock();
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
- struct io_wqe_acct *acct;
-
- raw_spin_lock(&wqe->lock);
- for (i = 0; i < IO_WQ_ACCT_NR; i++) {
- acct = &wqe->acct[i];
- if (first_node)
- prev[i] = max_t(int, acct->max_workers, prev[i]);
- if (new_count[i])
- acct->max_workers = new_count[i];
- }
- raw_spin_unlock(&wqe->lock);
- first_node = false;
- }
- rcu_read_unlock();
-
- for (i = 0; i < IO_WQ_ACCT_NR; i++)
- new_count[i] = prev[i];
-
- return 0;
-}
-
-static __init int io_wq_init(void)
-{
- int ret;
-
- ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
- io_wq_cpu_online, io_wq_cpu_offline);
- if (ret < 0)
- return ret;
- io_wq_online = ret;
- return 0;
-}
-subsys_initcall(io_wq_init);
diff --git a/fs/io-wq.h b/fs/io-wq.h
deleted file mode 100644
index 41bf37674a49..000000000000
--- a/fs/io-wq.h
+++ /dev/null
@@ -1,205 +0,0 @@
-#ifndef INTERNAL_IO_WQ_H
-#define INTERNAL_IO_WQ_H
-
-#include <linux/refcount.h>
-
-struct io_wq;
-
-enum {
- IO_WQ_WORK_CANCEL = 1,
- IO_WQ_WORK_HASHED = 2,
- IO_WQ_WORK_UNBOUND = 4,
- IO_WQ_WORK_CONCURRENT = 16,
-
- IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
-};
-
-enum io_wq_cancel {
- IO_WQ_CANCEL_OK, /* cancelled before started */
- IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
- IO_WQ_CANCEL_NOTFOUND, /* work not found */
-};
-
-struct io_wq_work_node {
- struct io_wq_work_node *next;
-};
-
-struct io_wq_work_list {
- struct io_wq_work_node *first;
- struct io_wq_work_node *last;
-};
-
-#define wq_list_for_each(pos, prv, head) \
- for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
-
-#define wq_list_for_each_resume(pos, prv) \
- for (; pos; prv = pos, pos = (pos)->next)
-
-#define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
-#define INIT_WQ_LIST(list) do { \
- (list)->first = NULL; \
-} while (0)
-
-static inline void wq_list_add_after(struct io_wq_work_node *node,
- struct io_wq_work_node *pos,
- struct io_wq_work_list *list)
-{
- struct io_wq_work_node *next = pos->next;
-
- pos->next = node;
- node->next = next;
- if (!next)
- list->last = node;
-}
-
-static inline void wq_list_add_tail(struct io_wq_work_node *node,
- struct io_wq_work_list *list)
-{
- node->next = NULL;
- if (!list->first) {
- list->last = node;
- WRITE_ONCE(list->first, node);
- } else {
- list->last->next = node;
- list->last = node;
- }
-}
-
-static inline void wq_list_add_head(struct io_wq_work_node *node,
- struct io_wq_work_list *list)
-{
- node->next = list->first;
- if (!node->next)
- list->last = node;
- WRITE_ONCE(list->first, node);
-}
-
-static inline void wq_list_cut(struct io_wq_work_list *list,
- struct io_wq_work_node *last,
- struct io_wq_work_node *prev)
-{
- /* first in the list, if prev==NULL */
- if (!prev)
- WRITE_ONCE(list->first, last->next);
- else
- prev->next = last->next;
-
- if (last == list->last)
- list->last = prev;
- last->next = NULL;
-}
-
-static inline void __wq_list_splice(struct io_wq_work_list *list,
- struct io_wq_work_node *to)
-{
- list->last->next = to->next;
- to->next = list->first;
- INIT_WQ_LIST(list);
-}
-
-static inline bool wq_list_splice(struct io_wq_work_list *list,
- struct io_wq_work_node *to)
-{
- if (!wq_list_empty(list)) {
- __wq_list_splice(list, to);
- return true;
- }
- return false;
-}
-
-static inline void wq_stack_add_head(struct io_wq_work_node *node,
- struct io_wq_work_node *stack)
-{
- node->next = stack->next;
- stack->next = node;
-}
-
-static inline void wq_list_del(struct io_wq_work_list *list,
- struct io_wq_work_node *node,
- struct io_wq_work_node *prev)
-{
- wq_list_cut(list, node, prev);
-}
-
-static inline
-struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack)
-{
- struct io_wq_work_node *node = stack->next;
-
- stack->next = node->next;
- return node;
-}
-
-struct io_wq_work {
- struct io_wq_work_node list;
- unsigned flags;
-};
-
-static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
-{
- if (!work->list.next)
- return NULL;
-
- return container_of(work->list.next, struct io_wq_work, list);
-}
-
-typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
-typedef void (io_wq_work_fn)(struct io_wq_work *);
-
-struct io_wq_hash {
- refcount_t refs;
- unsigned long map;
- struct wait_queue_head wait;
-};
-
-static inline void io_wq_put_hash(struct io_wq_hash *hash)
-{
- if (refcount_dec_and_test(&hash->refs))
- kfree(hash);
-}
-
-struct io_wq_data {
- struct io_wq_hash *hash;
- struct task_struct *task;
- io_wq_work_fn *do_work;
- free_work_fn *free_work;
-};
-
-struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
-void io_wq_exit_start(struct io_wq *wq);
-void io_wq_put_and_exit(struct io_wq *wq);
-
-void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
-void io_wq_hash_work(struct io_wq_work *work, void *val);
-
-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
-int io_wq_max_workers(struct io_wq *wq, int *new_count);
-
-static inline bool io_wq_is_hashed(struct io_wq_work *work)
-{
- return work->flags & IO_WQ_WORK_HASHED;
-}
-
-typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
-
-enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
- void *data, bool cancel_all);
-
-#if defined(CONFIG_IO_WQ)
-extern void io_wq_worker_sleeping(struct task_struct *);
-extern void io_wq_worker_running(struct task_struct *);
-#else
-static inline void io_wq_worker_sleeping(struct task_struct *tsk)
-{
-}
-static inline void io_wq_worker_running(struct task_struct *tsk)
-{
-}
-#endif
-
-static inline bool io_wq_current_is_worker(void)
-{
- return in_task() && (current->flags & PF_IO_WORKER) &&
- current->pf_io_worker;
-}
-#endif
diff --git a/fs/io_uring.c b/fs/io_uring.c
deleted file mode 100644
index b07196b4511c..000000000000
--- a/fs/io_uring.c
+++ /dev/null
@@ -1,11127 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Shared application/kernel submission and completion ring pairs, for
- * supporting fast/efficient IO.
- *
- * A note on the read/write ordering memory barriers that are matched between
- * the application and kernel side.
- *
- * After the application reads the CQ ring tail, it must use an
- * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
- * before writing the tail (using smp_load_acquire to read the tail will
- * do). It also needs a smp_mb() before updating CQ head (ordering the
- * entry load(s) with the head store), pairing with an implicit barrier
- * through a control-dependency in io_get_cqe (smp_store_release to
- * store head will do). Failure to do so could lead to reading invalid
- * CQ entries.
- *
- * Likewise, the application must use an appropriate smp_wmb() before
- * writing the SQ tail (ordering SQ entry stores with the tail store),
- * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
- * to store the tail will do). And it needs a barrier ordering the SQ
- * head load before writing new SQ entries (smp_load_acquire to read
- * head will do).
- *
- * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
- * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
- * updating the SQ tail; a full memory barrier smp_mb() is needed
- * between.
- *
- * Also see the examples in the liburing library:
- *
- * git://git.kernel.dk/liburing
- *
- * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
- * from data shared between the kernel and application. This is done both
- * for ordering purposes, but also to ensure that once a value is loaded from
- * data that the application could potentially modify, it remains stable.
- *
- * Copyright (C) 2018-2019 Jens Axboe
- * Copyright (c) 2018-2019 Christoph Hellwig
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/syscalls.h>
-#include <linux/compat.h>
-#include <net/compat.h>
-#include <linux/refcount.h>
-#include <linux/uio.h>
-#include <linux/bits.h>
-
-#include <linux/sched/signal.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/fdtable.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/percpu.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/bvec.h>
-#include <linux/net.h>
-#include <net/sock.h>
-#include <net/af_unix.h>
-#include <net/scm.h>
-#include <linux/anon_inodes.h>
-#include <linux/sched/mm.h>
-#include <linux/uaccess.h>
-#include <linux/nospec.h>
-#include <linux/sizes.h>
-#include <linux/hugetlb.h>
-#include <linux/highmem.h>
-#include <linux/namei.h>
-#include <linux/fsnotify.h>
-#include <linux/fadvise.h>
-#include <linux/eventpoll.h>
-#include <linux/splice.h>
-#include <linux/task_work.h>
-#include <linux/pagemap.h>
-#include <linux/io_uring.h>
-#include <linux/tracehook.h>
-#include <linux/audit.h>
-#include <linux/security.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/io_uring.h>
-
-#include <uapi/linux/io_uring.h>
-
-#include "internal.h"
-#include "io-wq.h"
-
-#define IORING_MAX_ENTRIES 32768
-#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
-#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
-
-/* only define max */
-#define IORING_MAX_FIXED_FILES (1U << 15)
-#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
- IORING_REGISTER_LAST + IORING_OP_LAST)
-
-#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
-#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
-#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
-
-#define IORING_MAX_REG_BUFFERS (1U << 14)
-
-#define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
- IOSQE_IO_HARDLINK | IOSQE_ASYNC)
-
-#define SQE_VALID_FLAGS (SQE_COMMON_FLAGS|IOSQE_BUFFER_SELECT|IOSQE_IO_DRAIN)
-
-#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
- REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
- REQ_F_ASYNC_DATA)
-
-#define IO_TCTX_REFS_CACHE_NR (1U << 10)
-
-struct io_uring {
- u32 head ____cacheline_aligned_in_smp;
- u32 tail ____cacheline_aligned_in_smp;
-};
-
-/*
- * This data is shared with the application through the mmap at offsets
- * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
- *
- * The offsets to the member fields are published through struct
- * io_sqring_offsets when calling io_uring_setup.
- */
-struct io_rings {
- /*
- * Head and tail offsets into the ring; the offsets need to be
- * masked to get valid indices.
- *
- * The kernel controls head of the sq ring and the tail of the cq ring,
- * and the application controls tail of the sq ring and the head of the
- * cq ring.
- */
- struct io_uring sq, cq;
- /*
- * Bitmasks to apply to head and tail offsets (constant, equals
- * ring_entries - 1)
- */
- u32 sq_ring_mask, cq_ring_mask;
- /* Ring sizes (constant, power of 2) */
- u32 sq_ring_entries, cq_ring_entries;
- /*
- * Number of invalid entries dropped by the kernel due to
- * invalid index stored in array
- *
- * Written by the kernel, shouldn't be modified by the
- * application (i.e. get number of "new events" by comparing to
- * cached value).
- *
- * After a new SQ head value was read by the application this
- * counter includes all submissions that were dropped reaching
- * the new SQ head (and possibly more).
- */
- u32 sq_dropped;
- /*
- * Runtime SQ flags
- *
- * Written by the kernel, shouldn't be modified by the
- * application.
- *
- * The application needs a full memory barrier before checking
- * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
- */
- u32 sq_flags;
- /*
- * Runtime CQ flags
- *
- * Written by the application, shouldn't be modified by the
- * kernel.
- */
- u32 cq_flags;
- /*
- * Number of completion events lost because the queue was full;
- * this should be avoided by the application by making sure
- * there are not more requests pending than there is space in
- * the completion queue.
- *
- * Written by the kernel, shouldn't be modified by the
- * application (i.e. get number of "new events" by comparing to
- * cached value).
- *
- * As completion events come in out of order this counter is not
- * ordered with any other data.
- */
- u32 cq_overflow;
- /*
- * Ring buffer of completion events.
- *
- * The kernel writes completion events fresh every time they are
- * produced, so the application is allowed to modify pending
- * entries.
- */
- struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
-};
-
-enum io_uring_cmd_flags {
- IO_URING_F_COMPLETE_DEFER = 1,
- IO_URING_F_UNLOCKED = 2,
- /* int's last bit, sign checks are usually faster than a bit test */
- IO_URING_F_NONBLOCK = INT_MIN,
-};
-
-struct io_mapped_ubuf {
- u64 ubuf;
- u64 ubuf_end;
- unsigned int nr_bvecs;
- unsigned long acct_pages;
- struct bio_vec bvec[];
-};
-
-struct io_ring_ctx;
-
-struct io_overflow_cqe {
- struct io_uring_cqe cqe;
- struct list_head list;
-};
-
-struct io_fixed_file {
- /* file * with additional FFS_* flags */
- unsigned long file_ptr;
-};
-
-struct io_rsrc_put {
- struct list_head list;
- u64 tag;
- union {
- void *rsrc;
- struct file *file;
- struct io_mapped_ubuf *buf;
- };
-};
-
-struct io_file_table {
- struct io_fixed_file *files;
-};
-
-struct io_rsrc_node {
- struct percpu_ref refs;
- struct list_head node;
- struct list_head rsrc_list;
- struct io_rsrc_data *rsrc_data;
- struct llist_node llist;
- bool done;
-};
-
-typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
-
-struct io_rsrc_data {
- struct io_ring_ctx *ctx;
-
- u64 **tags;
- unsigned int nr;
- rsrc_put_fn *do_put;
- atomic_t refs;
- struct completion done;
- bool quiesce;
-};
-
-struct io_buffer {
- struct list_head list;
- __u64 addr;
- __u32 len;
- __u16 bid;
-};
-
-struct io_restriction {
- DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
- DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
- u8 sqe_flags_allowed;
- u8 sqe_flags_required;
- bool registered;
-};
-
-enum {
- IO_SQ_THREAD_SHOULD_STOP = 0,
- IO_SQ_THREAD_SHOULD_PARK,
-};
-
-struct io_sq_data {
- refcount_t refs;
- atomic_t park_pending;
- struct mutex lock;
-
- /* ctx's that are using this sqd */
- struct list_head ctx_list;
-
- struct task_struct *thread;
- struct wait_queue_head wait;
-
- unsigned sq_thread_idle;
- int sq_cpu;
- pid_t task_pid;
- pid_t task_tgid;
-
- unsigned long state;
- struct completion exited;
-};
-
-#define IO_COMPL_BATCH 32
-#define IO_REQ_CACHE_SIZE 32
-#define IO_REQ_ALLOC_BATCH 8
-
-struct io_submit_link {
- struct io_kiocb *head;
- struct io_kiocb *last;
-};
-
-struct io_submit_state {
- /* inline/task_work completion list, under ->uring_lock */
- struct io_wq_work_node free_list;
- /* batch completion logic */
- struct io_wq_work_list compl_reqs;
- struct io_submit_link link;
-
- bool plug_started;
- bool need_plug;
- unsigned short submit_nr;
- struct blk_plug plug;
-};
-
-struct io_ring_ctx {
- /* const or read-mostly hot data */
- struct {
- struct percpu_ref refs;
-
- struct io_rings *rings;
- unsigned int flags;
- unsigned int compat: 1;
- unsigned int drain_next: 1;
- unsigned int eventfd_async: 1;
- unsigned int restricted: 1;
- unsigned int off_timeout_used: 1;
- unsigned int drain_active: 1;
- } ____cacheline_aligned_in_smp;
-
- /* submission data */
- struct {
- struct mutex uring_lock;
-
- /*
- * Ring buffer of indices into array of io_uring_sqe, which is
- * mmapped by the application using the IORING_OFF_SQES offset.
- *
- * This indirection could e.g. be used to assign fixed
- * io_uring_sqe entries to operations and only submit them to
- * the queue when needed.
- *
- * The kernel modifies neither the indices array nor the entries
- * array.
- */
- u32 *sq_array;
- struct io_uring_sqe *sq_sqes;
- unsigned cached_sq_head;
- unsigned sq_entries;
- struct list_head defer_list;
-
- /*
- * Fixed resources fast path, should be accessed only under
- * uring_lock, and updated through io_uring_register(2)
- */
- struct io_rsrc_node *rsrc_node;
- int rsrc_cached_refs;
- struct io_file_table file_table;
- unsigned nr_user_files;
- unsigned nr_user_bufs;
- struct io_mapped_ubuf **user_bufs;
-
- struct io_submit_state submit_state;
- struct list_head timeout_list;
- struct list_head ltimeout_list;
- struct list_head cq_overflow_list;
- struct xarray io_buffers;
- struct xarray personalities;
- u32 pers_next;
- unsigned sq_thread_idle;
- } ____cacheline_aligned_in_smp;
-
- /* IRQ completion list, under ->completion_lock */
- struct io_wq_work_list locked_free_list;
- unsigned int locked_free_nr;
-
- const struct cred *sq_creds; /* cred used for __io_sq_thread() */
- struct io_sq_data *sq_data; /* if using sq thread polling */
-
- struct wait_queue_head sqo_sq_wait;
- struct list_head sqd_list;
-
- unsigned long check_cq_overflow;
-
- struct {
- unsigned cached_cq_tail;
- unsigned cq_entries;
- struct eventfd_ctx *cq_ev_fd;
- struct wait_queue_head cq_wait;
- unsigned cq_extra;
- atomic_t cq_timeouts;
- unsigned cq_last_tm_flush;
- } ____cacheline_aligned_in_smp;
-
- struct {
- spinlock_t completion_lock;
-
- spinlock_t timeout_lock;
-
- /*
- * ->iopoll_list is protected by the ctx->uring_lock for
- * io_uring instances that don't use IORING_SETUP_SQPOLL.
- * For SQPOLL, only the single threaded io_sq_thread() will
- * manipulate the list, hence no extra locking is needed there.
- */
- struct io_wq_work_list iopoll_list;
- struct hlist_head *cancel_hash;
- unsigned cancel_hash_bits;
- bool poll_multi_queue;
- } ____cacheline_aligned_in_smp;
-
- struct io_restriction restrictions;
-
- /* slow path rsrc auxilary data, used by update/register */
- struct {
- struct io_rsrc_node *rsrc_backup_node;
- struct io_mapped_ubuf *dummy_ubuf;
- struct io_rsrc_data *file_data;
- struct io_rsrc_data *buf_data;
-
- struct delayed_work rsrc_put_work;
- struct llist_head rsrc_put_llist;
- struct list_head rsrc_ref_list;
- spinlock_t rsrc_ref_lock;
- };
-
- /* Keep this last, we don't need it for the fast path */
- struct {
- #if defined(CONFIG_UNIX)
- struct socket *ring_sock;
- #endif
- /* hashed buffered write serialization */
- struct io_wq_hash *hash_map;
-
- /* Only used for accounting purposes */
- struct user_struct *user;
- struct mm_struct *mm_account;
-
- /* ctx exit and cancelation */
- struct llist_head fallback_llist;
- struct delayed_work fallback_work;
- struct work_struct exit_work;
- struct list_head tctx_list;
- struct completion ref_comp;
- u32 iowq_limits[2];
- bool iowq_limits_set;
- };
-};
-
-struct io_uring_task {
- /* submission side */
- int cached_refs;
- struct xarray xa;
- struct wait_queue_head wait;
- const struct io_ring_ctx *last;
- struct io_wq *io_wq;
- struct percpu_counter inflight;
- atomic_t inflight_tracked;
- atomic_t in_idle;
-
- spinlock_t task_lock;
- struct io_wq_work_list task_list;
- struct callback_head task_work;
- bool task_running;
-};
-
-/*
- * First field must be the file pointer in all the
- * iocb unions! See also 'struct kiocb' in <linux/fs.h>
- */
-struct io_poll_iocb {
- struct file *file;
- struct wait_queue_head *head;
- __poll_t events;
- bool done;
- bool canceled;
- struct wait_queue_entry wait;
-};
-
-struct io_poll_update {
- struct file *file;
- u64 old_user_data;
- u64 new_user_data;
- __poll_t events;
- bool update_events;
- bool update_user_data;
-};
-
-struct io_close {
- struct file *file;
- int fd;
- u32 file_slot;
-};
-
-struct io_timeout_data {
- struct io_kiocb *req;
- struct hrtimer timer;
- struct timespec64 ts;
- enum hrtimer_mode mode;
- u32 flags;
-};
-
-struct io_accept {
- struct file *file;
- struct sockaddr __user *addr;
- int __user *addr_len;
- int flags;
- u32 file_slot;
- unsigned long nofile;
-};
-
-struct io_sync {
- struct file *file;
- loff_t len;
- loff_t off;
- int flags;
- int mode;
-};
-
-struct io_cancel {
- struct file *file;
- u64 addr;
-};
-
-struct io_timeout {
- struct file *file;
- u32 off;
- u32 target_seq;
- struct list_head list;
- /* head of the link, used by linked timeouts only */
- struct io_kiocb *head;
- /* for linked completions */
- struct io_kiocb *prev;
-};
-
-struct io_timeout_rem {
- struct file *file;
- u64 addr;
-
- /* timeout update */
- struct timespec64 ts;
- u32 flags;
- bool ltimeout;
-};
-
-struct io_rw {
- /* NOTE: kiocb has the file as the first member, so don't do it here */
- struct kiocb kiocb;
- u64 addr;
- u64 len;
-};
-
-struct io_connect {
- struct file *file;
- struct sockaddr __user *addr;
- int addr_len;
-};
-
-struct io_sr_msg {
- struct file *file;
- union {
- struct compat_msghdr __user *umsg_compat;
- struct user_msghdr __user *umsg;
- void __user *buf;
- };
- int msg_flags;
- int bgid;
- size_t len;
-};
-
-struct io_open {
- struct file *file;
- int dfd;
- u32 file_slot;
- struct filename *filename;
- struct open_how how;
- unsigned long nofile;
-};
-
-struct io_rsrc_update {
- struct file *file;
- u64 arg;
- u32 nr_args;
- u32 offset;
-};
-
-struct io_fadvise {
- struct file *file;
- u64 offset;
- u32 len;
- u32 advice;
-};
-
-struct io_madvise {
- struct file *file;
- u64 addr;
- u32 len;
- u32 advice;
-};
-
-struct io_epoll {
- struct file *file;
- int epfd;
- int op;
- int fd;
- struct epoll_event event;
-};
-
-struct io_splice {
- struct file *file_out;
- struct file *file_in;
- loff_t off_out;
- loff_t off_in;
- u64 len;
- unsigned int flags;
-};
-
-struct io_provide_buf {
- struct file *file;
- __u64 addr;
- __u32 len;
- __u32 bgid;
- __u16 nbufs;
- __u16 bid;
-};
-
-struct io_statx {
- struct file *file;
- int dfd;
- unsigned int mask;
- unsigned int flags;
- const char __user *filename;
- struct statx __user *buffer;
-};
-
-struct io_shutdown {
- struct file *file;
- int how;
-};
-
-struct io_rename {
- struct file *file;
- int old_dfd;
- int new_dfd;
- struct filename *oldpath;
- struct filename *newpath;
- int flags;
-};
-
-struct io_unlink {
- struct file *file;
- int dfd;
- int flags;
- struct filename *filename;
-};
-
-struct io_mkdir {
- struct file *file;
- int dfd;
- umode_t mode;
- struct filename *filename;
-};
-
-struct io_symlink {
- struct file *file;
- int new_dfd;
- struct filename *oldpath;
- struct filename *newpath;
-};
-
-struct io_hardlink {
- struct file *file;
- int old_dfd;
- int new_dfd;
- struct filename *oldpath;
- struct filename *newpath;
- int flags;
-};
-
-struct io_async_connect {
- struct sockaddr_storage address;
-};
-
-struct io_async_msghdr {
- struct iovec fast_iov[UIO_FASTIOV];
- /* points to an allocated iov, if NULL we use fast_iov instead */
- struct iovec *free_iov;
- struct sockaddr __user *uaddr;
- struct msghdr msg;
- struct sockaddr_storage addr;
-};
-
-struct io_rw_state {
- struct iov_iter iter;
- struct iov_iter_state iter_state;
- struct iovec fast_iov[UIO_FASTIOV];
-};
-
-struct io_async_rw {
- struct io_rw_state s;
- const struct iovec *free_iovec;
- size_t bytes_done;
- struct wait_page_queue wpq;
-};
-
-enum {
- REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
- REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
- REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
- REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
- REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
- REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
-
- /* first byte is taken by user flags, shift it to not overlap */
- REQ_F_FAIL_BIT = 8,
- REQ_F_INFLIGHT_BIT,
- REQ_F_CUR_POS_BIT,
- REQ_F_NOWAIT_BIT,
- REQ_F_LINK_TIMEOUT_BIT,
- REQ_F_NEED_CLEANUP_BIT,
- REQ_F_POLLED_BIT,
- REQ_F_BUFFER_SELECTED_BIT,
- REQ_F_COMPLETE_INLINE_BIT,
- REQ_F_REISSUE_BIT,
- REQ_F_CREDS_BIT,
- REQ_F_REFCOUNT_BIT,
- REQ_F_ARM_LTIMEOUT_BIT,
- REQ_F_ASYNC_DATA_BIT,
- /* keep async read/write and isreg together and in order */
- REQ_F_SUPPORT_NOWAIT_BIT,
- REQ_F_ISREG_BIT,
-
- /* not a real bit, just to check we're not overflowing the space */
- __REQ_F_LAST_BIT,
-};
-
-enum {
- /* ctx owns file */
- REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
- /* drain existing IO first */
- REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
- /* linked sqes */
- REQ_F_LINK = BIT(REQ_F_LINK_BIT),
- /* doesn't sever on completion < 0 */
- REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
- /* IOSQE_ASYNC */
- REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
- /* IOSQE_BUFFER_SELECT */
- REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
-
- /* fail rest of links */
- REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
- /* on inflight list, should be cancelled and waited on exit reliably */
- REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
- /* read/write uses file position */
- REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
- /* must not punt to workers */
- REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
- /* has or had linked timeout */
- REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
- /* needs cleanup */
- REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
- /* already went through poll handler */
- REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
- /* buffer already selected */
- REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
- /* completion is deferred through io_comp_state */
- REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
- /* caller should reissue async */
- REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
- /* supports async reads/writes */
- REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
- /* regular file */
- REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
- /* has creds assigned */
- REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
- /* skip refcounting if not set */
- REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
- /* there is a linked timeout that has to be armed */
- REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
- /* ->async_data allocated */
- REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
-};
-
-struct async_poll {
- struct io_poll_iocb poll;
- struct io_poll_iocb *double_poll;
-};
-
-typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
-
-struct io_task_work {
- union {
- struct io_wq_work_node node;
- struct llist_node fallback_node;
- };
- io_req_tw_func_t func;
-};
-
-enum {
- IORING_RSRC_FILE = 0,
- IORING_RSRC_BUFFER = 1,
-};
-
-/*
- * NOTE! Each of the iocb union members has the file pointer
- * as the first entry in their struct definition. So you can
- * access the file pointer through any of the sub-structs,
- * or directly as just 'ki_filp' in this struct.
- */
-struct io_kiocb {
- union {
- struct file *file;
- struct io_rw rw;
- struct io_poll_iocb poll;
- struct io_poll_update poll_update;
- struct io_accept accept;
- struct io_sync sync;
- struct io_cancel cancel;
- struct io_timeout timeout;
- struct io_timeout_rem timeout_rem;
- struct io_connect connect;
- struct io_sr_msg sr_msg;
- struct io_open open;
- struct io_close close;
- struct io_rsrc_update rsrc_update;
- struct io_fadvise fadvise;
- struct io_madvise madvise;
- struct io_epoll epoll;
- struct io_splice splice;
- struct io_provide_buf pbuf;
- struct io_statx statx;
- struct io_shutdown shutdown;
- struct io_rename rename;
- struct io_unlink unlink;
- struct io_mkdir mkdir;
- struct io_symlink symlink;
- struct io_hardlink hardlink;
- };
-
- u8 opcode;
- /* polled IO has completed */
- u8 iopoll_completed;
- u16 buf_index;
- unsigned int flags;
-
- u64 user_data;
- u32 result;
- u32 cflags;
-
- struct io_ring_ctx *ctx;
- struct task_struct *task;
-
- struct percpu_ref *fixed_rsrc_refs;
- /* store used ubuf, so we can prevent reloading */
- struct io_mapped_ubuf *imu;
-
- /* used by request caches, completion batching and iopoll */
- struct io_wq_work_node comp_list;
- atomic_t refs;
- struct io_kiocb *link;
- struct io_task_work io_task_work;
- /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
- struct hlist_node hash_node;
- /* internal polling, see IORING_FEAT_FAST_POLL */
- struct async_poll *apoll;
- /* opcode allocated if it needs to store data for async defer */
- void *async_data;
- struct io_wq_work work;
- /* custom credentials, valid IFF REQ_F_CREDS is set */
- const struct cred *creds;
- /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
- struct io_buffer *kbuf;
-};
-
-struct io_tctx_node {
- struct list_head ctx_node;
- struct task_struct *task;
- struct io_ring_ctx *ctx;
-};
-
-struct io_defer_entry {
- struct list_head list;
- struct io_kiocb *req;
- u32 seq;
-};
-
-struct io_op_def {
- /* needs req->file assigned */
- unsigned needs_file : 1;
- /* should block plug */
- unsigned plug : 1;
- /* hash wq insertion if file is a regular file */
- unsigned hash_reg_file : 1;
- /* unbound wq insertion if file is a non-regular file */
- unsigned unbound_nonreg_file : 1;
- /* set if opcode supports polled "wait" */
- unsigned pollin : 1;
- unsigned pollout : 1;
- /* op supports buffer selection */
- unsigned buffer_select : 1;
- /* do prep async if is going to be punted */
- unsigned needs_async_setup : 1;
- /* opcode is not supported by this kernel */
- unsigned not_supported : 1;
- /* skip auditing */
- unsigned audit_skip : 1;
- /* size of async data needed, if any */
- unsigned short async_size;
-};
-
-static const struct io_op_def io_op_defs[] = {
- [IORING_OP_NOP] = {},
- [IORING_OP_READV] = {
- .needs_file = 1,
- .unbound_nonreg_file = 1,
- .pollin = 1,
- .buffer_select = 1,
- .needs_async_setup = 1,
- .plug = 1,
- .audit_skip = 1,
- .async_size = sizeof(struct io_async_rw),
- },
- [IORING_OP_WRITEV] = {
- .needs_file = 1,
- .hash_reg_file = 1,
- .unbound_nonreg_file = 1,
- .pollout = 1,
- .needs_async_setup = 1,
- .plug = 1,
- .audit_skip = 1,
- .async_size = sizeof(struct io_async_rw),
- },
- [IORING_OP_FSYNC] = {
- .needs_file = 1,
- .audit_skip = 1,
- },
- [IORING_OP_READ_FIXED] = {
- .needs_file = 1,
- .unbound_nonreg_file = 1,
- .pollin = 1,
- .plug = 1,
- .audit_skip = 1,
- .async_size = sizeof(struct io_async_rw),
- },
- [IORING_OP_WRITE_FIXED] = {
- .needs_file = 1,
- .hash_reg_file = 1,
- .unbound_nonreg_file = 1,
- .pollout = 1,
- .plug = 1,
- .audit_skip = 1,
- .async_size = sizeof(struct io_async_rw),
- },
- [IORING_OP_POLL_ADD] = {
- .needs_file = 1,
- .unbound_nonreg_file = 1,
- .audit_skip = 1,
- },
- [IORING_OP_POLL_REMOVE] = {
- .audit_skip = 1,
- },
- [IORING_OP_SYNC_FILE_RANGE] = {
- .needs_file = 1,
- .audit_skip = 1,
- },
- [IORING_OP_SENDMSG] = {
- .needs_file = 1,
- .unbound_nonreg_file = 1,
- .pollout = 1,
- .needs_async_setup = 1,
- .async_size = sizeof(struct io_async_msghdr),
- },
- [IORING_OP_RECVMSG] = {
- .needs_file = 1,
- .unbound_nonreg_file = 1,
- .pollin = 1,
- .buffer_select = 1,
- .needs_async_setup = 1,
- .async_size = sizeof(struct io_async_msghdr),
- },
- [IORING_OP_TIMEOUT] = {
- .audit_skip = 1,
- .async_size = sizeof(struct io_timeout_data),
- },
- [IORING_OP_TIMEOUT_REMOVE] = {
- /* used by timeout updates' prep() */
- .audit_skip = 1,
- },
- [IORING_OP_ACCEPT] = {
- .needs_file = 1,
- .unbound_nonreg_file = 1,
- .pollin = 1,
- },
- [IORING_OP_ASYNC_CANCEL] = {
- .audit_skip = 1,
- },
- [IORING_OP_LINK_TIMEOUT] = {
- .audit_skip = 1,
- .async_size = sizeof(struct io_timeout_data),
- },
- [IORING_OP_CONNECT] = {
- .needs_file = 1,
- .unbound_nonreg_file = 1,
- .pollout = 1,
- .needs_async_setup = 1,
- .async_size = sizeof(struct io_async_connect),
- },
- [IORING_OP_FALLOCATE] = {
- .needs_file = 1,
- },
- [IORING_OP_OPENAT] = {},
- [IORING_OP_CLOSE] = {},
- [IORING_OP_FILES_UPDATE] = {
- .audit_skip = 1,
- },
- [IORING_OP_STATX] = {
- .audit_skip = 1,
- },
- [IORING_OP_READ] = {
- .needs_file = 1,
- .unbound_nonreg_file = 1,
- .pollin = 1,
- .buffer_select = 1,
- .plug = 1,
- .audit_skip = 1,
- .async_size = sizeof(struct io_async_rw),
- },
- [IORING_OP_WRITE] = {
- .needs_file = 1,
- .hash_reg_file = 1,
- .unbound_nonreg_file = 1,
- .pollout = 1,
- .plug = 1,
- .audit_skip = 1,
- .async_size = sizeof(struct io_async_rw),
- },
- [IORING_OP_FADVISE] = {
- .needs_file = 1,
- .audit_skip = 1,
- },
- [IORING_OP_MADVISE] = {},
- [IORING_OP_SEND] = {
- .needs_file = 1,
- .unbound_nonreg_file = 1,
- .pollout = 1,
- .audit_skip = 1,
- },
- [IORING_OP_RECV] = {
- .needs_file = 1,
- .unbound_nonreg_file = 1,
- .pollin = 1,
- .buffer_select = 1,
- .audit_skip = 1,
- },
- [IORING_OP_OPENAT2] = {
- },
- [IORING_OP_EPOLL_CTL] = {
- .unbound_nonreg_file = 1,
- .audit_skip = 1,
- },
- [IORING_OP_SPLICE] = {
- .needs_file = 1,
- .hash_reg_file = 1,
- .unbound_nonreg_file = 1,
- .audit_skip = 1,
- },
- [IORING_OP_PROVIDE_BUFFERS] = {
- .audit_skip = 1,
- },
- [IORING_OP_REMOVE_BUFFERS] = {
- .audit_skip = 1,
- },
- [IORING_OP_TEE] = {
- .needs_file = 1,
- .hash_reg_file = 1,
- .unbound_nonreg_file = 1,
- .audit_skip = 1,
- },
- [IORING_OP_SHUTDOWN] = {
- .needs_file = 1,
- },
- [IORING_OP_RENAMEAT] = {},
- [IORING_OP_UNLINKAT] = {},
- [IORING_OP_MKDIRAT] = {},
- [IORING_OP_SYMLINKAT] = {},
- [IORING_OP_LINKAT] = {},
-};
-
-/* requests with any of those set should undergo io_disarm_next() */
-#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
-
-static bool io_disarm_next(struct io_kiocb *req);
-static void io_uring_del_tctx_node(unsigned long index);
-static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
- struct task_struct *task,
- bool cancel_all);
-static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
-
-static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
- s32 res, u32 cflags);
-static void io_put_req(struct io_kiocb *req);
-static void io_put_req_deferred(struct io_kiocb *req);
-static void io_dismantle_req(struct io_kiocb *req);
-static void io_queue_linked_timeout(struct io_kiocb *req);
-static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
- struct io_uring_rsrc_update2 *up,
- unsigned nr_args);
-static void io_clean_op(struct io_kiocb *req);
-static struct file *io_file_get(struct io_ring_ctx *ctx,
- struct io_kiocb *req, int fd, bool fixed);
-static void __io_queue_sqe(struct io_kiocb *req);
-static void io_rsrc_put_work(struct work_struct *work);
-
-static void io_req_task_queue(struct io_kiocb *req);
-static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
-static int io_req_prep_async(struct io_kiocb *req);
-
-static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
- unsigned int issue_flags, u32 slot_index);
-static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
-
-static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
-
-static struct kmem_cache *req_cachep;
-
-static const struct file_operations io_uring_fops;
-
-struct sock *io_uring_get_socket(struct file *file)
-{
-#if defined(CONFIG_UNIX)
- if (file->f_op == &io_uring_fops) {
- struct io_ring_ctx *ctx = file->private_data;
-
- return ctx->ring_sock->sk;
- }
-#endif
- return NULL;
-}
-EXPORT_SYMBOL(io_uring_get_socket);
-
-static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
-{
- if (!*locked) {
- mutex_lock(&ctx->uring_lock);
- *locked = true;
- }
-}
-
-#define io_for_each_link(pos, head) \
- for (pos = (head); pos; pos = pos->link)
-
-/*
- * Shamelessly stolen from the mm implementation of page reference checking,
- * see commit f958d7b528b1 for details.
- */
-#define req_ref_zero_or_close_to_overflow(req) \
- ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
-
-static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
-{
- WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
- return atomic_inc_not_zero(&req->refs);
-}
-
-static inline bool req_ref_put_and_test(struct io_kiocb *req)
-{
- if (likely(!(req->flags & REQ_F_REFCOUNT)))
- return true;
-
- WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
- return atomic_dec_and_test(&req->refs);
-}
-
-static inline void req_ref_put(struct io_kiocb *req)
-{
- WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
- WARN_ON_ONCE(req_ref_put_and_test(req));
-}
-
-static inline void req_ref_get(struct io_kiocb *req)
-{
- WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
- WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
- atomic_inc(&req->refs);
-}
-
-static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
-{
- if (!wq_list_empty(&ctx->submit_state.compl_reqs))
- __io_submit_flush_completions(ctx);
-}
-
-static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
-{
- if (!(req->flags & REQ_F_REFCOUNT)) {
- req->flags |= REQ_F_REFCOUNT;
- atomic_set(&req->refs, nr);
- }
-}
-
-static inline void io_req_set_refcount(struct io_kiocb *req)
-{
- __io_req_set_refcount(req, 1);
-}
-
-#define IO_RSRC_REF_BATCH 100
-
-static inline void io_req_put_rsrc_locked(struct io_kiocb *req,
- struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
-{
- struct percpu_ref *ref = req->fixed_rsrc_refs;
-
- if (ref) {
- if (ref == &ctx->rsrc_node->refs)
- ctx->rsrc_cached_refs++;
- else
- percpu_ref_put(ref);
- }
-}
-
-static inline void io_req_put_rsrc(struct io_kiocb *req, struct io_ring_ctx *ctx)
-{
- if (req->fixed_rsrc_refs)
- percpu_ref_put(req->fixed_rsrc_refs);
-}
-
-static __cold void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
-{
- if (ctx->rsrc_cached_refs) {
- percpu_ref_put_many(&ctx->rsrc_node->refs, ctx->rsrc_cached_refs);
- ctx->rsrc_cached_refs = 0;
- }
-}
-
-static void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
-{
- ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH;
- percpu_ref_get_many(&ctx->rsrc_node->refs, IO_RSRC_REF_BATCH);
-}
-
-static inline void io_req_set_rsrc_node(struct io_kiocb *req,
- struct io_ring_ctx *ctx)
-{
- if (!req->fixed_rsrc_refs) {
- req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
- ctx->rsrc_cached_refs--;
- if (unlikely(ctx->rsrc_cached_refs < 0))
- io_rsrc_refs_refill(ctx);
- }
-}
-
-static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
-{
- bool got = percpu_ref_tryget(ref);
-
- /* already at zero, wait for ->release() */
- if (!got)
- wait_for_completion(compl);
- percpu_ref_resurrect(ref);
- if (got)
- percpu_ref_put(ref);
-}
-
-static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
- bool cancel_all)
-{
- struct io_kiocb *req;
-
- if (task && head->task != task)
- return false;
- if (cancel_all)
- return true;
-
- io_for_each_link(req, head) {
- if (req->flags & REQ_F_INFLIGHT)
- return true;
- }
- return false;
-}
-
-static inline bool req_has_async_data(struct io_kiocb *req)
-{
- return req->flags & REQ_F_ASYNC_DATA;
-}
-
-static inline void req_set_fail(struct io_kiocb *req)
-{
- req->flags |= REQ_F_FAIL;
-}
-
-static inline void req_fail_link_node(struct io_kiocb *req, int res)
-{
- req_set_fail(req);
- req->result = res;
-}
-
-static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
-{
- struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
-
- complete(&ctx->ref_comp);
-}
-
-static inline bool io_is_timeout_noseq(struct io_kiocb *req)
-{
- return !req->timeout.off;
-}
-
-static __cold void io_fallback_req_func(struct work_struct *work)
-{
- struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
- fallback_work.work);
- struct llist_node *node = llist_del_all(&ctx->fallback_llist);
- struct io_kiocb *req, *tmp;
- bool locked = false;
-
- percpu_ref_get(&ctx->refs);
- llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
- req->io_task_work.func(req, &locked);
-
- if (locked) {
- io_submit_flush_completions(ctx);
- mutex_unlock(&ctx->uring_lock);
- }
- percpu_ref_put(&ctx->refs);
-}
-
-static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
-{
- struct io_ring_ctx *ctx;
- int hash_bits;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return NULL;
-
- /*
- * Use 5 bits less than the max cq entries, that should give us around
- * 32 entries per hash list if totally full and uniformly spread.
- */
- hash_bits = ilog2(p->cq_entries);
- hash_bits -= 5;
- if (hash_bits <= 0)
- hash_bits = 1;
- ctx->cancel_hash_bits = hash_bits;
- ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
- GFP_KERNEL);
- if (!ctx->cancel_hash)
- goto err;
- __hash_init(ctx->cancel_hash, 1U << hash_bits);
-
- ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
- if (!ctx->dummy_ubuf)
- goto err;
- /* set invalid range, so io_import_fixed() fails meeting it */
- ctx->dummy_ubuf->ubuf = -1UL;
-
- if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
- PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
- goto err;
-
- ctx->flags = p->flags;
- init_waitqueue_head(&ctx->sqo_sq_wait);
- INIT_LIST_HEAD(&ctx->sqd_list);
- INIT_LIST_HEAD(&ctx->cq_overflow_list);
- init_completion(&ctx->ref_comp);
- xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
- xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
- mutex_init(&ctx->uring_lock);
- init_waitqueue_head(&ctx->cq_wait);
- spin_lock_init(&ctx->completion_lock);
- spin_lock_init(&ctx->timeout_lock);
- INIT_WQ_LIST(&ctx->iopoll_list);
- INIT_LIST_HEAD(&ctx->defer_list);
- INIT_LIST_HEAD(&ctx->timeout_list);
- INIT_LIST_HEAD(&ctx->ltimeout_list);
- spin_lock_init(&ctx->rsrc_ref_lock);
- INIT_LIST_HEAD(&ctx->rsrc_ref_list);
- INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
- init_llist_head(&ctx->rsrc_put_llist);
- INIT_LIST_HEAD(&ctx->tctx_list);
- ctx->submit_state.free_list.next = NULL;
- INIT_WQ_LIST(&ctx->locked_free_list);
- INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
- INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
- return ctx;
-err:
- kfree(ctx->dummy_ubuf);
- kfree(ctx->cancel_hash);
- kfree(ctx);
- return NULL;
-}
-
-static void io_account_cq_overflow(struct io_ring_ctx *ctx)
-{
- struct io_rings *r = ctx->rings;
-
- WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
- ctx->cq_extra--;
-}
-
-static bool req_need_defer(struct io_kiocb *req, u32 seq)
-{
- if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
- struct io_ring_ctx *ctx = req->ctx;
-
- return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
- }
-
- return false;
-}
-
-#define FFS_NOWAIT 0x1UL
-#define FFS_ISREG 0x2UL
-#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG)
-
-static inline bool io_req_ffs_set(struct io_kiocb *req)
-{
- return req->flags & REQ_F_FIXED_FILE;
-}
-
-static inline void io_req_track_inflight(struct io_kiocb *req)
-{
- if (!(req->flags & REQ_F_INFLIGHT)) {
- req->flags |= REQ_F_INFLIGHT;
- atomic_inc(&current->io_uring->inflight_tracked);
- }
-}
-
-static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
-{
- if (WARN_ON_ONCE(!req->link))
- return NULL;
-
- req->flags &= ~REQ_F_ARM_LTIMEOUT;
- req->flags |= REQ_F_LINK_TIMEOUT;
-
- /* linked timeouts should have two refs once prep'ed */
- io_req_set_refcount(req);
- __io_req_set_refcount(req->link, 2);
- return req->link;
-}
-
-static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
-{
- if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
- return NULL;
- return __io_prep_linked_timeout(req);
-}
-
-static void io_prep_async_work(struct io_kiocb *req)
-{
- const struct io_op_def *def = &io_op_defs[req->opcode];
- struct io_ring_ctx *ctx = req->ctx;
-
- if (!(req->flags & REQ_F_CREDS)) {
- req->flags |= REQ_F_CREDS;
- req->creds = get_current_cred();
- }
-
- req->work.list.next = NULL;
- req->work.flags = 0;
- if (req->flags & REQ_F_FORCE_ASYNC)
- req->work.flags |= IO_WQ_WORK_CONCURRENT;
-
- if (req->flags & REQ_F_ISREG) {
- if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
- io_wq_hash_work(&req->work, file_inode(req->file));
- } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
- if (def->unbound_nonreg_file)
- req->work.flags |= IO_WQ_WORK_UNBOUND;
- }
-
- switch (req->opcode) {
- case IORING_OP_SPLICE:
- case IORING_OP_TEE:
- if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
- req->work.flags |= IO_WQ_WORK_UNBOUND;
- break;
- }
-}
-
-static void io_prep_async_link(struct io_kiocb *req)
-{
- struct io_kiocb *cur;
-
- if (req->flags & REQ_F_LINK_TIMEOUT) {
- struct io_ring_ctx *ctx = req->ctx;
-
- spin_lock(&ctx->completion_lock);
- io_for_each_link(cur, req)
- io_prep_async_work(cur);
- spin_unlock(&ctx->completion_lock);
- } else {
- io_for_each_link(cur, req)
- io_prep_async_work(cur);
- }
-}
-
-static inline void io_req_add_compl_list(struct io_kiocb *req)
-{
- struct io_submit_state *state = &req->ctx->submit_state;
-
- wq_list_add_tail(&req->comp_list, &state->compl_reqs);
-}
-
-static void io_queue_async_work(struct io_kiocb *req, bool *dont_use)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *link = io_prep_linked_timeout(req);
- struct io_uring_task *tctx = req->task->io_uring;
-
- BUG_ON(!tctx);
- BUG_ON(!tctx->io_wq);
-
- /* init ->work of the whole link before punting */
- io_prep_async_link(req);
-
- /*
- * Not expected to happen, but if we do have a bug where this _can_
- * happen, catch it here and ensure the request is marked as
- * canceled. That will make io-wq go through the usual work cancel
- * procedure rather than attempt to run this request (or create a new
- * worker for it).
- */
- if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
- req->work.flags |= IO_WQ_WORK_CANCEL;
-
- trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
- &req->work, req->flags);
- io_wq_enqueue(tctx->io_wq, &req->work);
- if (link)
- io_queue_linked_timeout(link);
-}
-
-static void io_kill_timeout(struct io_kiocb *req, int status)
- __must_hold(&req->ctx->completion_lock)
- __must_hold(&req->ctx->timeout_lock)
-{
- struct io_timeout_data *io = req->async_data;
-
- if (hrtimer_try_to_cancel(&io->timer) != -1) {
- if (status)
- req_set_fail(req);
- atomic_set(&req->ctx->cq_timeouts,
- atomic_read(&req->ctx->cq_timeouts) + 1);
- list_del_init(&req->timeout.list);
- io_cqring_fill_event(req->ctx, req->user_data, status, 0);
- io_put_req_deferred(req);
- }
-}
-
-static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
-{
- while (!list_empty(&ctx->defer_list)) {
- struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
- struct io_defer_entry, list);
-
- if (req_need_defer(de->req, de->seq))
- break;
- list_del_init(&de->list);
- io_req_task_queue(de->req);
- kfree(de);
- }
-}
-
-static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
- __must_hold(&ctx->completion_lock)
-{
- u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
-
- spin_lock_irq(&ctx->timeout_lock);
- while (!list_empty(&ctx->timeout_list)) {
- u32 events_needed, events_got;
- struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
- struct io_kiocb, timeout.list);
-
- if (io_is_timeout_noseq(req))
- break;
-
- /*
- * Since seq can easily wrap around over time, subtract
- * the last seq at which timeouts were flushed before comparing.
- * Assuming not more than 2^31-1 events have happened since,
- * these subtractions won't have wrapped, so we can check if
- * target is in [last_seq, current_seq] by comparing the two.
- */
- events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
- events_got = seq - ctx->cq_last_tm_flush;
- if (events_got < events_needed)
- break;
-
- list_del_init(&req->timeout.list);
- io_kill_timeout(req, 0);
- }
- ctx->cq_last_tm_flush = seq;
- spin_unlock_irq(&ctx->timeout_lock);
-}
-
-static __cold void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
-{
- if (ctx->off_timeout_used)
- io_flush_timeouts(ctx);
- if (ctx->drain_active)
- io_queue_deferred(ctx);
-}
-
-static inline void io_commit_cqring(struct io_ring_ctx *ctx)
-{
- if (unlikely(ctx->off_timeout_used || ctx->drain_active))
- __io_commit_cqring_flush(ctx);
- /* order cqe stores with ring update */
- smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
-}
-
-static inline bool io_sqring_full(struct io_ring_ctx *ctx)
-{
- struct io_rings *r = ctx->rings;
-
- return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
-}
-
-static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
-{
- return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
-}
-
-static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
-{
- struct io_rings *rings = ctx->rings;
- unsigned tail, mask = ctx->cq_entries - 1;
-
- /*
- * writes to the cq entry need to come after reading head; the
- * control dependency is enough as we're using WRITE_ONCE to
- * fill the cq entry
- */
- if (__io_cqring_events(ctx) == ctx->cq_entries)
- return NULL;
-
- tail = ctx->cached_cq_tail++;
- return &rings->cqes[tail & mask];
-}
-
-static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
-{
- if (likely(!ctx->cq_ev_fd))
- return false;
- if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
- return false;
- return !ctx->eventfd_async || io_wq_current_is_worker();
-}
-
-/*
- * This should only get called when at least one event has been posted.
- * Some applications rely on the eventfd notification count only changing
- * IFF a new CQE has been added to the CQ ring. There's no depedency on
- * 1:1 relationship between how many times this function is called (and
- * hence the eventfd count) and number of CQEs posted to the CQ ring.
- */
-static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
-{
- /*
- * wake_up_all() may seem excessive, but io_wake_function() and
- * io_should_wake() handle the termination of the loop and only
- * wake as many waiters as we need to.
- */
- if (wq_has_sleeper(&ctx->cq_wait))
- wake_up_all(&ctx->cq_wait);
- if (io_should_trigger_evfd(ctx))
- eventfd_signal(ctx->cq_ev_fd, 1);
-}
-
-static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
-{
- /* see waitqueue_active() comment */
- smp_mb();
-
- if (ctx->flags & IORING_SETUP_SQPOLL) {
- if (waitqueue_active(&ctx->cq_wait))
- wake_up_all(&ctx->cq_wait);
- }
- if (io_should_trigger_evfd(ctx))
- eventfd_signal(ctx->cq_ev_fd, 1);
-}
-
-/* Returns true if there are no backlogged entries after the flush */
-static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
-{
- bool all_flushed, posted;
-
- if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
- return false;
-
- posted = false;
- spin_lock(&ctx->completion_lock);
- while (!list_empty(&ctx->cq_overflow_list)) {
- struct io_uring_cqe *cqe = io_get_cqe(ctx);
- struct io_overflow_cqe *ocqe;
-
- if (!cqe && !force)
- break;
- ocqe = list_first_entry(&ctx->cq_overflow_list,
- struct io_overflow_cqe, list);
- if (cqe)
- memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
- else
- io_account_cq_overflow(ctx);
-
- posted = true;
- list_del(&ocqe->list);
- kfree(ocqe);
- }
-
- all_flushed = list_empty(&ctx->cq_overflow_list);
- if (all_flushed) {
- clear_bit(0, &ctx->check_cq_overflow);
- WRITE_ONCE(ctx->rings->sq_flags,
- ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
- }
-
- if (posted)
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- if (posted)
- io_cqring_ev_posted(ctx);
- return all_flushed;
-}
-
-static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
-{
- bool ret = true;
-
- if (test_bit(0, &ctx->check_cq_overflow)) {
- /* iopoll syncs against uring_lock, not completion_lock */
- if (ctx->flags & IORING_SETUP_IOPOLL)
- mutex_lock(&ctx->uring_lock);
- ret = __io_cqring_overflow_flush(ctx, false);
- if (ctx->flags & IORING_SETUP_IOPOLL)
- mutex_unlock(&ctx->uring_lock);
- }
-
- return ret;
-}
-
-/* must to be called somewhat shortly after putting a request */
-static inline void io_put_task(struct task_struct *task, int nr)
-{
- struct io_uring_task *tctx = task->io_uring;
-
- if (likely(task == current)) {
- tctx->cached_refs += nr;
- } else {
- percpu_counter_sub(&tctx->inflight, nr);
- if (unlikely(atomic_read(&tctx->in_idle)))
- wake_up(&tctx->wait);
- put_task_struct_many(task, nr);
- }
-}
-
-static void io_task_refs_refill(struct io_uring_task *tctx)
-{
- unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
-
- percpu_counter_add(&tctx->inflight, refill);
- refcount_add(refill, &current->usage);
- tctx->cached_refs += refill;
-}
-
-static inline void io_get_task_refs(int nr)
-{
- struct io_uring_task *tctx = current->io_uring;
-
- tctx->cached_refs -= nr;
- if (unlikely(tctx->cached_refs < 0))
- io_task_refs_refill(tctx);
-}
-
-static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
- s32 res, u32 cflags)
-{
- struct io_overflow_cqe *ocqe;
-
- ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
- if (!ocqe) {
- /*
- * If we're in ring overflow flush mode, or in task cancel mode,
- * or cannot allocate an overflow entry, then we need to drop it
- * on the floor.
- */
- io_account_cq_overflow(ctx);
- return false;
- }
- if (list_empty(&ctx->cq_overflow_list)) {
- set_bit(0, &ctx->check_cq_overflow);
- WRITE_ONCE(ctx->rings->sq_flags,
- ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
-
- }
- ocqe->cqe.user_data = user_data;
- ocqe->cqe.res = res;
- ocqe->cqe.flags = cflags;
- list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
- return true;
-}
-
-static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
- s32 res, u32 cflags)
-{
- struct io_uring_cqe *cqe;
-
- trace_io_uring_complete(ctx, user_data, res, cflags);
-
- /*
- * If we can't get a cq entry, userspace overflowed the
- * submission (by quite a lot). Increment the overflow count in
- * the ring.
- */
- cqe = io_get_cqe(ctx);
- if (likely(cqe)) {
- WRITE_ONCE(cqe->user_data, user_data);
- WRITE_ONCE(cqe->res, res);
- WRITE_ONCE(cqe->flags, cflags);
- return true;
- }
- return io_cqring_event_overflow(ctx, user_data, res, cflags);
-}
-
-/* not as hot to bloat with inlining */
-static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
- s32 res, u32 cflags)
-{
- return __io_cqring_fill_event(ctx, user_data, res, cflags);
-}
-
-static void io_req_complete_post(struct io_kiocb *req, s32 res,
- u32 cflags)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- spin_lock(&ctx->completion_lock);
- __io_cqring_fill_event(ctx, req->user_data, res, cflags);
- /*
- * If we're the last reference to this request, add to our locked
- * free_list cache.
- */
- if (req_ref_put_and_test(req)) {
- if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
- if (req->flags & IO_DISARM_MASK)
- io_disarm_next(req);
- if (req->link) {
- io_req_task_queue(req->link);
- req->link = NULL;
- }
- }
- io_req_put_rsrc(req, ctx);
- io_dismantle_req(req);
- io_put_task(req->task, 1);
- wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
- ctx->locked_free_nr++;
- }
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
-}
-
-static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
- u32 cflags)
-{
- req->result = res;
- req->cflags = cflags;
- req->flags |= REQ_F_COMPLETE_INLINE;
-}
-
-static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
- s32 res, u32 cflags)
-{
- if (issue_flags & IO_URING_F_COMPLETE_DEFER)
- io_req_complete_state(req, res, cflags);
- else
- io_req_complete_post(req, res, cflags);
-}
-
-static inline void io_req_complete(struct io_kiocb *req, s32 res)
-{
- __io_req_complete(req, 0, res, 0);
-}
-
-static void io_req_complete_failed(struct io_kiocb *req, s32 res)
-{
- req_set_fail(req);
- io_req_complete_post(req, res, 0);
-}
-
-static void io_req_complete_fail_submit(struct io_kiocb *req)
-{
- /*
- * We don't submit, fail them all, for that replace hardlinks with
- * normal links. Extra REQ_F_LINK is tolerated.
- */
- req->flags &= ~REQ_F_HARDLINK;
- req->flags |= REQ_F_LINK;
- io_req_complete_failed(req, req->result);
-}
-
-/*
- * Don't initialise the fields below on every allocation, but do that in
- * advance and keep them valid across allocations.
- */
-static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
-{
- req->ctx = ctx;
- req->link = NULL;
- req->async_data = NULL;
- /* not necessary, but safer to zero */
- req->result = 0;
-}
-
-static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
- struct io_submit_state *state)
-{
- spin_lock(&ctx->completion_lock);
- wq_list_splice(&ctx->locked_free_list, &state->free_list);
- ctx->locked_free_nr = 0;
- spin_unlock(&ctx->completion_lock);
-}
-
-/* Returns true IFF there are requests in the cache */
-static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
-{
- struct io_submit_state *state = &ctx->submit_state;
-
- /*
- * If we have more than a batch's worth of requests in our IRQ side
- * locked cache, grab the lock and move them over to our submission
- * side cache.
- */
- if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
- io_flush_cached_locked_reqs(ctx, state);
- return !!state->free_list.next;
-}
-
-/*
- * A request might get retired back into the request caches even before opcode
- * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
- * Because of that, io_alloc_req() should be called only under ->uring_lock
- * and with extra caution to not get a request that is still worked on.
- */
-static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
-{
- struct io_submit_state *state = &ctx->submit_state;
- gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
- void *reqs[IO_REQ_ALLOC_BATCH];
- struct io_kiocb *req;
- int ret, i;
-
- if (likely(state->free_list.next || io_flush_cached_reqs(ctx)))
- return true;
-
- ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
-
- /*
- * Bulk alloc is all-or-nothing. If we fail to get a batch,
- * retry single alloc to be on the safe side.
- */
- if (unlikely(ret <= 0)) {
- reqs[0] = kmem_cache_alloc(req_cachep, gfp);
- if (!reqs[0])
- return false;
- ret = 1;
- }
-
- percpu_ref_get_many(&ctx->refs, ret);
- for (i = 0; i < ret; i++) {
- req = reqs[i];
-
- io_preinit_req(req, ctx);
- wq_stack_add_head(&req->comp_list, &state->free_list);
- }
- return true;
-}
-
-static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
-{
- if (unlikely(!ctx->submit_state.free_list.next))
- return __io_alloc_req_refill(ctx);
- return true;
-}
-
-static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
-{
- struct io_wq_work_node *node;
-
- node = wq_stack_extract(&ctx->submit_state.free_list);
- return container_of(node, struct io_kiocb, comp_list);
-}
-
-static inline void io_put_file(struct file *file)
-{
- if (file)
- fput(file);
-}
-
-static inline void io_dismantle_req(struct io_kiocb *req)
-{
- unsigned int flags = req->flags;
-
- if (unlikely(flags & IO_REQ_CLEAN_FLAGS))
- io_clean_op(req);
- if (!(flags & REQ_F_FIXED_FILE))
- io_put_file(req->file);
-}
-
-static __cold void __io_free_req(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- io_req_put_rsrc(req, ctx);
- io_dismantle_req(req);
- io_put_task(req->task, 1);
-
- spin_lock(&ctx->completion_lock);
- wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
- ctx->locked_free_nr++;
- spin_unlock(&ctx->completion_lock);
-}
-
-static inline void io_remove_next_linked(struct io_kiocb *req)
-{
- struct io_kiocb *nxt = req->link;
-
- req->link = nxt->link;
- nxt->link = NULL;
-}
-
-static bool io_kill_linked_timeout(struct io_kiocb *req)
- __must_hold(&req->ctx->completion_lock)
- __must_hold(&req->ctx->timeout_lock)
-{
- struct io_kiocb *link = req->link;
-
- if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
- struct io_timeout_data *io = link->async_data;
-
- io_remove_next_linked(req);
- link->timeout.head = NULL;
- if (hrtimer_try_to_cancel(&io->timer) != -1) {
- list_del(&link->timeout.list);
- io_cqring_fill_event(link->ctx, link->user_data,
- -ECANCELED, 0);
- io_put_req_deferred(link);
- return true;
- }
- }
- return false;
-}
-
-static void io_fail_links(struct io_kiocb *req)
- __must_hold(&req->ctx->completion_lock)
-{
- struct io_kiocb *nxt, *link = req->link;
-
- req->link = NULL;
- while (link) {
- long res = -ECANCELED;
-
- if (link->flags & REQ_F_FAIL)
- res = link->result;
-
- nxt = link->link;
- link->link = NULL;
-
- trace_io_uring_fail_link(req, link);
- io_cqring_fill_event(link->ctx, link->user_data, res, 0);
- io_put_req_deferred(link);
- link = nxt;
- }
-}
-
-static bool io_disarm_next(struct io_kiocb *req)
- __must_hold(&req->ctx->completion_lock)
-{
- bool posted = false;
-
- if (req->flags & REQ_F_ARM_LTIMEOUT) {
- struct io_kiocb *link = req->link;
-
- req->flags &= ~REQ_F_ARM_LTIMEOUT;
- if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
- io_remove_next_linked(req);
- io_cqring_fill_event(link->ctx, link->user_data,
- -ECANCELED, 0);
- io_put_req_deferred(link);
- posted = true;
- }
- } else if (req->flags & REQ_F_LINK_TIMEOUT) {
- struct io_ring_ctx *ctx = req->ctx;
-
- spin_lock_irq(&ctx->timeout_lock);
- posted = io_kill_linked_timeout(req);
- spin_unlock_irq(&ctx->timeout_lock);
- }
- if (unlikely((req->flags & REQ_F_FAIL) &&
- !(req->flags & REQ_F_HARDLINK))) {
- posted |= (req->link != NULL);
- io_fail_links(req);
- }
- return posted;
-}
-
-static void __io_req_find_next_prep(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
- bool posted;
-
- spin_lock(&ctx->completion_lock);
- posted = io_disarm_next(req);
- if (posted)
- io_commit_cqring(req->ctx);
- spin_unlock(&ctx->completion_lock);
- if (posted)
- io_cqring_ev_posted(ctx);
-}
-
-static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
-{
- struct io_kiocb *nxt;
-
- if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
- return NULL;
- /*
- * If LINK is set, we have dependent requests in this chain. If we
- * didn't fail this request, queue the first one up, moving any other
- * dependencies to the next request. In case of failure, fail the rest
- * of the chain.
- */
- if (unlikely(req->flags & IO_DISARM_MASK))
- __io_req_find_next_prep(req);
- nxt = req->link;
- req->link = NULL;
- return nxt;
-}
-
-static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
-{
- if (!ctx)
- return;
- if (*locked) {
- io_submit_flush_completions(ctx);
- mutex_unlock(&ctx->uring_lock);
- *locked = false;
- }
- percpu_ref_put(&ctx->refs);
-}
-
-static void tctx_task_work(struct callback_head *cb)
-{
- bool locked = false;
- struct io_ring_ctx *ctx = NULL;
- struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
- task_work);
-
- while (1) {
- struct io_wq_work_node *node;
-
- if (!tctx->task_list.first && locked)
- io_submit_flush_completions(ctx);
-
- spin_lock_irq(&tctx->task_lock);
- node = tctx->task_list.first;
- INIT_WQ_LIST(&tctx->task_list);
- if (!node)
- tctx->task_running = false;
- spin_unlock_irq(&tctx->task_lock);
- if (!node)
- break;
-
- do {
- struct io_wq_work_node *next = node->next;
- struct io_kiocb *req = container_of(node, struct io_kiocb,
- io_task_work.node);
-
- if (req->ctx != ctx) {
- ctx_flush_and_put(ctx, &locked);
- ctx = req->ctx;
- /* if not contended, grab and improve batching */
- locked = mutex_trylock(&ctx->uring_lock);
- percpu_ref_get(&ctx->refs);
- }
- req->io_task_work.func(req, &locked);
- node = next;
- } while (node);
-
- cond_resched();
- }
-
- ctx_flush_and_put(ctx, &locked);
-}
-
-static void io_req_task_work_add(struct io_kiocb *req)
-{
- struct task_struct *tsk = req->task;
- struct io_uring_task *tctx = tsk->io_uring;
- enum task_work_notify_mode notify;
- struct io_wq_work_node *node;
- unsigned long flags;
- bool running;
-
- WARN_ON_ONCE(!tctx);
-
- spin_lock_irqsave(&tctx->task_lock, flags);
- wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
- running = tctx->task_running;
- if (!running)
- tctx->task_running = true;
- spin_unlock_irqrestore(&tctx->task_lock, flags);
-
- /* task_work already pending, we're done */
- if (running)
- return;
-
- /*
- * SQPOLL kernel thread doesn't need notification, just a wakeup. For
- * all other cases, use TWA_SIGNAL unconditionally to ensure we're
- * processing task_work. There's no reliable way to tell if TWA_RESUME
- * will do the job.
- */
- notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
- if (likely(!task_work_add(tsk, &tctx->task_work, notify))) {
- if (notify == TWA_NONE)
- wake_up_process(tsk);
- return;
- }
-
- spin_lock_irqsave(&tctx->task_lock, flags);
- tctx->task_running = false;
- node = tctx->task_list.first;
- INIT_WQ_LIST(&tctx->task_list);
- spin_unlock_irqrestore(&tctx->task_lock, flags);
-
- while (node) {
- req = container_of(node, struct io_kiocb, io_task_work.node);
- node = node->next;
- if (llist_add(&req->io_task_work.fallback_node,
- &req->ctx->fallback_llist))
- schedule_delayed_work(&req->ctx->fallback_work, 1);
- }
-}
-
-static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- /* not needed for normal modes, but SQPOLL depends on it */
- io_tw_lock(ctx, locked);
- io_req_complete_failed(req, req->result);
-}
-
-static void io_req_task_submit(struct io_kiocb *req, bool *locked)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- io_tw_lock(ctx, locked);
- /* req->task == current here, checking PF_EXITING is safe */
- if (likely(!(req->task->flags & PF_EXITING)))
- __io_queue_sqe(req);
- else
- io_req_complete_failed(req, -EFAULT);
-}
-
-static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
-{
- req->result = ret;
- req->io_task_work.func = io_req_task_cancel;
- io_req_task_work_add(req);
-}
-
-static void io_req_task_queue(struct io_kiocb *req)
-{
- req->io_task_work.func = io_req_task_submit;
- io_req_task_work_add(req);
-}
-
-static void io_req_task_queue_reissue(struct io_kiocb *req)
-{
- req->io_task_work.func = io_queue_async_work;
- io_req_task_work_add(req);
-}
-
-static inline void io_queue_next(struct io_kiocb *req)
-{
- struct io_kiocb *nxt = io_req_find_next(req);
-
- if (nxt)
- io_req_task_queue(nxt);
-}
-
-static void io_free_req(struct io_kiocb *req)
-{
- io_queue_next(req);
- __io_free_req(req);
-}
-
-static void io_free_req_work(struct io_kiocb *req, bool *locked)
-{
- io_free_req(req);
-}
-
-static void io_free_batch_list(struct io_ring_ctx *ctx,
- struct io_wq_work_node *node)
- __must_hold(&ctx->uring_lock)
-{
- struct task_struct *task = NULL;
- int task_refs = 0;
-
- do {
- struct io_kiocb *req = container_of(node, struct io_kiocb,
- comp_list);
-
- if (unlikely(req->flags & REQ_F_REFCOUNT)) {
- node = req->comp_list.next;
- if (!req_ref_put_and_test(req))
- continue;
- }
-
- io_req_put_rsrc_locked(req, ctx);
- io_queue_next(req);
- io_dismantle_req(req);
-
- if (req->task != task) {
- if (task)
- io_put_task(task, task_refs);
- task = req->task;
- task_refs = 0;
- }
- task_refs++;
- node = req->comp_list.next;
- wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
- } while (node);
-
- if (task)
- io_put_task(task, task_refs);
-}
-
-static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
-{
- struct io_wq_work_node *node, *prev;
- struct io_submit_state *state = &ctx->submit_state;
-
- spin_lock(&ctx->completion_lock);
- wq_list_for_each(node, prev, &state->compl_reqs) {
- struct io_kiocb *req = container_of(node, struct io_kiocb,
- comp_list);
-
- __io_cqring_fill_event(ctx, req->user_data, req->result,
- req->cflags);
- }
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
-
- io_free_batch_list(ctx, state->compl_reqs.first);
- INIT_WQ_LIST(&state->compl_reqs);
-}
-
-/*
- * Drop reference to request, return next in chain (if there is one) if this
- * was the last reference to this request.
- */
-static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
-{
- struct io_kiocb *nxt = NULL;
-
- if (req_ref_put_and_test(req)) {
- nxt = io_req_find_next(req);
- __io_free_req(req);
- }
- return nxt;
-}
-
-static inline void io_put_req(struct io_kiocb *req)
-{
- if (req_ref_put_and_test(req))
- io_free_req(req);
-}
-
-static inline void io_put_req_deferred(struct io_kiocb *req)
-{
- if (req_ref_put_and_test(req)) {
- req->io_task_work.func = io_free_req_work;
- io_req_task_work_add(req);
- }
-}
-
-static unsigned io_cqring_events(struct io_ring_ctx *ctx)
-{
- /* See comment at the top of this file */
- smp_rmb();
- return __io_cqring_events(ctx);
-}
-
-static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
-{
- struct io_rings *rings = ctx->rings;
-
- /* make sure SQ entry isn't read before tail */
- return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
-}
-
-static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
-{
- unsigned int cflags;
-
- cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
- cflags |= IORING_CQE_F_BUFFER;
- req->flags &= ~REQ_F_BUFFER_SELECTED;
- kfree(kbuf);
- return cflags;
-}
-
-static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
-{
- if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
- return 0;
- return io_put_kbuf(req, req->kbuf);
-}
-
-static inline bool io_run_task_work(void)
-{
- if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
- __set_current_state(TASK_RUNNING);
- tracehook_notify_signal();
- return true;
- }
-
- return false;
-}
-
-static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
-{
- struct io_wq_work_node *pos, *start, *prev;
- unsigned int poll_flags = BLK_POLL_NOSLEEP;
- DEFINE_IO_COMP_BATCH(iob);
- int nr_events = 0;
-
- /*
- * Only spin for completions if we don't have multiple devices hanging
- * off our complete list.
- */
- if (ctx->poll_multi_queue || force_nonspin)
- poll_flags |= BLK_POLL_ONESHOT;
-
- wq_list_for_each(pos, start, &ctx->iopoll_list) {
- struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
- struct kiocb *kiocb = &req->rw.kiocb;
- int ret;
-
- /*
- * Move completed and retryable entries to our local lists.
- * If we find a request that requires polling, break out
- * and complete those lists first, if we have entries there.
- */
- if (READ_ONCE(req->iopoll_completed))
- break;
-
- ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags);
- if (unlikely(ret < 0))
- return ret;
- else if (ret)
- poll_flags |= BLK_POLL_ONESHOT;
-
- /* iopoll may have completed current req */
- if (!rq_list_empty(iob.req_list) ||
- READ_ONCE(req->iopoll_completed))
- break;
- }
-
- if (!rq_list_empty(iob.req_list))
- iob.complete(&iob);
- else if (!pos)
- return 0;
-
- prev = start;
- wq_list_for_each_resume(pos, prev) {
- struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
-
- /* order with io_complete_rw_iopoll(), e.g. ->result updates */
- if (!smp_load_acquire(&req->iopoll_completed))
- break;
- __io_cqring_fill_event(ctx, req->user_data, req->result,
- io_put_rw_kbuf(req));
- nr_events++;
- }
-
- if (unlikely(!nr_events))
- return 0;
-
- io_commit_cqring(ctx);
- io_cqring_ev_posted_iopoll(ctx);
- pos = start ? start->next : ctx->iopoll_list.first;
- wq_list_cut(&ctx->iopoll_list, prev, start);
- io_free_batch_list(ctx, pos);
- return nr_events;
-}
-
-/*
- * We can't just wait for polled events to come to us, we have to actively
- * find and complete them.
- */
-static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
-{
- if (!(ctx->flags & IORING_SETUP_IOPOLL))
- return;
-
- mutex_lock(&ctx->uring_lock);
- while (!wq_list_empty(&ctx->iopoll_list)) {
- /* let it sleep and repeat later if can't complete a request */
- if (io_do_iopoll(ctx, true) == 0)
- break;
- /*
- * Ensure we allow local-to-the-cpu processing to take place,
- * in this case we need to ensure that we reap all events.
- * Also let task_work, etc. to progress by releasing the mutex
- */
- if (need_resched()) {
- mutex_unlock(&ctx->uring_lock);
- cond_resched();
- mutex_lock(&ctx->uring_lock);
- }
- }
- mutex_unlock(&ctx->uring_lock);
-}
-
-static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
-{
- unsigned int nr_events = 0;
- int ret = 0;
-
- /*
- * We disallow the app entering submit/complete with polling, but we
- * still need to lock the ring to prevent racing with polled issue
- * that got punted to a workqueue.
- */
- mutex_lock(&ctx->uring_lock);
- /*
- * Don't enter poll loop if we already have events pending.
- * If we do, we can potentially be spinning for commands that
- * already triggered a CQE (eg in error).
- */
- if (test_bit(0, &ctx->check_cq_overflow))
- __io_cqring_overflow_flush(ctx, false);
- if (io_cqring_events(ctx))
- goto out;
- do {
- /*
- * If a submit got punted to a workqueue, we can have the
- * application entering polling for a command before it gets
- * issued. That app will hold the uring_lock for the duration
- * of the poll right here, so we need to take a breather every
- * now and then to ensure that the issue has a chance to add
- * the poll to the issued list. Otherwise we can spin here
- * forever, while the workqueue is stuck trying to acquire the
- * very same mutex.
- */
- if (wq_list_empty(&ctx->iopoll_list)) {
- u32 tail = ctx->cached_cq_tail;
-
- mutex_unlock(&ctx->uring_lock);
- io_run_task_work();
- mutex_lock(&ctx->uring_lock);
-
- /* some requests don't go through iopoll_list */
- if (tail != ctx->cached_cq_tail ||
- wq_list_empty(&ctx->iopoll_list))
- break;
- }
- ret = io_do_iopoll(ctx, !min);
- if (ret < 0)
- break;
- nr_events += ret;
- ret = 0;
- } while (nr_events < min && !need_resched());
-out:
- mutex_unlock(&ctx->uring_lock);
- return ret;
-}
-
-static void kiocb_end_write(struct io_kiocb *req)
-{
- /*
- * Tell lockdep we inherited freeze protection from submission
- * thread.
- */
- if (req->flags & REQ_F_ISREG) {
- struct super_block *sb = file_inode(req->file)->i_sb;
-
- __sb_writers_acquired(sb, SB_FREEZE_WRITE);
- sb_end_write(sb);
- }
-}
-
-#ifdef CONFIG_BLOCK
-static bool io_resubmit_prep(struct io_kiocb *req)
-{
- struct io_async_rw *rw = req->async_data;
-
- if (!req_has_async_data(req))
- return !io_req_prep_async(req);
- iov_iter_restore(&rw->s.iter, &rw->s.iter_state);
- return true;
-}
-
-static bool io_rw_should_reissue(struct io_kiocb *req)
-{
- umode_t mode = file_inode(req->file)->i_mode;
- struct io_ring_ctx *ctx = req->ctx;
-
- if (!S_ISBLK(mode) && !S_ISREG(mode))
- return false;
- if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
- !(ctx->flags & IORING_SETUP_IOPOLL)))
- return false;
- /*
- * If ref is dying, we might be running poll reap from the exit work.
- * Don't attempt to reissue from that path, just let it fail with
- * -EAGAIN.
- */
- if (percpu_ref_is_dying(&ctx->refs))
- return false;
- /*
- * Play it safe and assume not safe to re-import and reissue if we're
- * not in the original thread group (or in task context).
- */
- if (!same_thread_group(req->task, current) || !in_task())
- return false;
- return true;
-}
-#else
-static bool io_resubmit_prep(struct io_kiocb *req)
-{
- return false;
-}
-static bool io_rw_should_reissue(struct io_kiocb *req)
-{
- return false;
-}
-#endif
-
-static bool __io_complete_rw_common(struct io_kiocb *req, long res)
-{
- if (req->rw.kiocb.ki_flags & IOCB_WRITE)
- kiocb_end_write(req);
- if (unlikely(res != req->result)) {
- if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
- io_rw_should_reissue(req)) {
- req->flags |= REQ_F_REISSUE;
- return true;
- }
- req_set_fail(req);
- req->result = res;
- }
- return false;
-}
-
-static void io_req_task_complete(struct io_kiocb *req, bool *locked)
-{
- unsigned int cflags = io_put_rw_kbuf(req);
- int res = req->result;
-
- if (*locked) {
- io_req_complete_state(req, res, cflags);
- io_req_add_compl_list(req);
- } else {
- io_req_complete_post(req, res, cflags);
- }
-}
-
-static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
- unsigned int issue_flags)
-{
- if (__io_complete_rw_common(req, res))
- return;
- __io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
-}
-
-static void io_complete_rw(struct kiocb *kiocb, long res)
-{
- struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
-
- if (__io_complete_rw_common(req, res))
- return;
- req->result = res;
- req->io_task_work.func = io_req_task_complete;
- io_req_task_work_add(req);
-}
-
-static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
-{
- struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
-
- if (kiocb->ki_flags & IOCB_WRITE)
- kiocb_end_write(req);
- if (unlikely(res != req->result)) {
- if (res == -EAGAIN && io_rw_should_reissue(req)) {
- req->flags |= REQ_F_REISSUE;
- return;
- }
- req->result = res;
- }
-
- /* order with io_iopoll_complete() checking ->iopoll_completed */
- smp_store_release(&req->iopoll_completed, 1);
-}
-
-/*
- * After the iocb has been issued, it's safe to be found on the poll list.
- * Adding the kiocb to the list AFTER submission ensures that we don't
- * find it from a io_do_iopoll() thread before the issuer is done
- * accessing the kiocb cookie.
- */
-static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_ring_ctx *ctx = req->ctx;
- const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
-
- /* workqueue context doesn't hold uring_lock, grab it now */
- if (unlikely(needs_lock))
- mutex_lock(&ctx->uring_lock);
-
- /*
- * Track whether we have multiple files in our lists. This will impact
- * how we do polling eventually, not spinning if we're on potentially
- * different devices.
- */
- if (wq_list_empty(&ctx->iopoll_list)) {
- ctx->poll_multi_queue = false;
- } else if (!ctx->poll_multi_queue) {
- struct io_kiocb *list_req;
-
- list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
- comp_list);
- if (list_req->file != req->file)
- ctx->poll_multi_queue = true;
- }
-
- /*
- * For fast devices, IO may have already completed. If it has, add
- * it to the front so we find it first.
- */
- if (READ_ONCE(req->iopoll_completed))
- wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
- else
- wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
-
- if (unlikely(needs_lock)) {
- /*
- * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
- * in sq thread task context or in io worker task context. If
- * current task context is sq thread, we don't need to check
- * whether should wake up sq thread.
- */
- if ((ctx->flags & IORING_SETUP_SQPOLL) &&
- wq_has_sleeper(&ctx->sq_data->wait))
- wake_up(&ctx->sq_data->wait);
-
- mutex_unlock(&ctx->uring_lock);
- }
-}
-
-static bool io_bdev_nowait(struct block_device *bdev)
-{
- return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
-}
-
-/*
- * If we tracked the file through the SCM inflight mechanism, we could support
- * any file. For now, just ensure that anything potentially problematic is done
- * inline.
- */
-static bool __io_file_supports_nowait(struct file *file, umode_t mode)
-{
- if (S_ISBLK(mode)) {
- if (IS_ENABLED(CONFIG_BLOCK) &&
- io_bdev_nowait(I_BDEV(file->f_mapping->host)))
- return true;
- return false;
- }
- if (S_ISSOCK(mode))
- return true;
- if (S_ISREG(mode)) {
- if (IS_ENABLED(CONFIG_BLOCK) &&
- io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
- file->f_op != &io_uring_fops)
- return true;
- return false;
- }
-
- /* any ->read/write should understand O_NONBLOCK */
- if (file->f_flags & O_NONBLOCK)
- return true;
- return file->f_mode & FMODE_NOWAIT;
-}
-
-/*
- * If we tracked the file through the SCM inflight mechanism, we could support
- * any file. For now, just ensure that anything potentially problematic is done
- * inline.
- */
-static unsigned int io_file_get_flags(struct file *file)
-{
- umode_t mode = file_inode(file)->i_mode;
- unsigned int res = 0;
-
- if (S_ISREG(mode))
- res |= FFS_ISREG;
- if (__io_file_supports_nowait(file, mode))
- res |= FFS_NOWAIT;
- return res;
-}
-
-static inline bool io_file_supports_nowait(struct io_kiocb *req)
-{
- return req->flags & REQ_F_SUPPORT_NOWAIT;
-}
-
-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct kiocb *kiocb = &req->rw.kiocb;
- struct file *file = req->file;
- unsigned ioprio;
- int ret;
-
- if (!io_req_ffs_set(req))
- req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
-
- kiocb->ki_pos = READ_ONCE(sqe->off);
- if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
- req->flags |= REQ_F_CUR_POS;
- kiocb->ki_pos = file->f_pos;
- }
- kiocb->ki_flags = iocb_flags(file);
- ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
- if (unlikely(ret))
- return ret;
-
- /*
- * If the file is marked O_NONBLOCK, still allow retry for it if it
- * supports async. Otherwise it's impossible to use O_NONBLOCK files
- * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
- */
- if ((kiocb->ki_flags & IOCB_NOWAIT) ||
- ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
- req->flags |= REQ_F_NOWAIT;
-
- if (ctx->flags & IORING_SETUP_IOPOLL) {
- if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
- return -EOPNOTSUPP;
-
- kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
- kiocb->ki_complete = io_complete_rw_iopoll;
- req->iopoll_completed = 0;
- } else {
- if (kiocb->ki_flags & IOCB_HIPRI)
- return -EINVAL;
- kiocb->ki_complete = io_complete_rw;
- }
-
- ioprio = READ_ONCE(sqe->ioprio);
- if (ioprio) {
- ret = ioprio_check_cap(ioprio);
- if (ret)
- return ret;
-
- kiocb->ki_ioprio = ioprio;
- } else {
- kiocb->ki_ioprio = get_current_ioprio();
- }
-
- req->imu = NULL;
- req->rw.addr = READ_ONCE(sqe->addr);
- req->rw.len = READ_ONCE(sqe->len);
- req->buf_index = READ_ONCE(sqe->buf_index);
- return 0;
-}
-
-static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
-{
- switch (ret) {
- case -EIOCBQUEUED:
- break;
- case -ERESTARTSYS:
- case -ERESTARTNOINTR:
- case -ERESTARTNOHAND:
- case -ERESTART_RESTARTBLOCK:
- /*
- * We can't just restart the syscall, since previously
- * submitted sqes may already be in progress. Just fail this
- * IO with EINTR.
- */
- ret = -EINTR;
- fallthrough;
- default:
- kiocb->ki_complete(kiocb, ret);
- }
-}
-
-static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
- unsigned int issue_flags)
-{
- struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
- struct io_async_rw *io = req->async_data;
-
- /* add previously done IO, if any */
- if (req_has_async_data(req) && io->bytes_done > 0) {
- if (ret < 0)
- ret = io->bytes_done;
- else
- ret += io->bytes_done;
- }
-
- if (req->flags & REQ_F_CUR_POS)
- req->file->f_pos = kiocb->ki_pos;
- if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
- __io_complete_rw(req, ret, 0, issue_flags);
- else
- io_rw_done(kiocb, ret);
-
- if (req->flags & REQ_F_REISSUE) {
- req->flags &= ~REQ_F_REISSUE;
- if (io_resubmit_prep(req)) {
- io_req_task_queue_reissue(req);
- } else {
- unsigned int cflags = io_put_rw_kbuf(req);
- struct io_ring_ctx *ctx = req->ctx;
-
- req_set_fail(req);
- if (issue_flags & IO_URING_F_UNLOCKED) {
- mutex_lock(&ctx->uring_lock);
- __io_req_complete(req, issue_flags, ret, cflags);
- mutex_unlock(&ctx->uring_lock);
- } else {
- __io_req_complete(req, issue_flags, ret, cflags);
- }
- }
- }
-}
-
-static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
- struct io_mapped_ubuf *imu)
-{
- size_t len = req->rw.len;
- u64 buf_end, buf_addr = req->rw.addr;
- size_t offset;
-
- if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
- return -EFAULT;
- /* not inside the mapped region */
- if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
- return -EFAULT;
-
- /*
- * May not be a start of buffer, set size appropriately
- * and advance us to the beginning.
- */
- offset = buf_addr - imu->ubuf;
- iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
-
- if (offset) {
- /*
- * Don't use iov_iter_advance() here, as it's really slow for
- * using the latter parts of a big fixed buffer - it iterates
- * over each segment manually. We can cheat a bit here, because
- * we know that:
- *
- * 1) it's a BVEC iter, we set it up
- * 2) all bvecs are PAGE_SIZE in size, except potentially the
- * first and last bvec
- *
- * So just find our index, and adjust the iterator afterwards.
- * If the offset is within the first bvec (or the whole first
- * bvec, just use iov_iter_advance(). This makes it easier
- * since we can just skip the first segment, which may not
- * be PAGE_SIZE aligned.
- */
- const struct bio_vec *bvec = imu->bvec;
-
- if (offset <= bvec->bv_len) {
- iov_iter_advance(iter, offset);
- } else {
- unsigned long seg_skip;
-
- /* skip first vec */
- offset -= bvec->bv_len;
- seg_skip = 1 + (offset >> PAGE_SHIFT);
-
- iter->bvec = bvec + seg_skip;
- iter->nr_segs -= seg_skip;
- iter->count -= bvec->bv_len + offset;
- iter->iov_offset = offset & ~PAGE_MASK;
- }
- }
-
- return 0;
-}
-
-static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
-{
- struct io_mapped_ubuf *imu = req->imu;
- u16 index, buf_index = req->buf_index;
-
- if (likely(!imu)) {
- struct io_ring_ctx *ctx = req->ctx;
-
- if (unlikely(buf_index >= ctx->nr_user_bufs))
- return -EFAULT;
- io_req_set_rsrc_node(req, ctx);
- index = array_index_nospec(buf_index, ctx->nr_user_bufs);
- imu = READ_ONCE(ctx->user_bufs[index]);
- req->imu = imu;
- }
- return __io_import_fixed(req, rw, iter, imu);
-}
-
-static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
-{
- if (needs_lock)
- mutex_unlock(&ctx->uring_lock);
-}
-
-static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
-{
- /*
- * "Normal" inline submissions always hold the uring_lock, since we
- * grab it from the system call. Same is true for the SQPOLL offload.
- * The only exception is when we've detached the request and issue it
- * from an async worker thread, grab the lock for that case.
- */
- if (needs_lock)
- mutex_lock(&ctx->uring_lock);
-}
-
-static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
- int bgid, unsigned int issue_flags)
-{
- struct io_buffer *kbuf = req->kbuf;
- struct io_buffer *head;
- bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
-
- if (req->flags & REQ_F_BUFFER_SELECTED)
- return kbuf;
-
- io_ring_submit_lock(req->ctx, needs_lock);
-
- lockdep_assert_held(&req->ctx->uring_lock);
-
- head = xa_load(&req->ctx->io_buffers, bgid);
- if (head) {
- if (!list_empty(&head->list)) {
- kbuf = list_last_entry(&head->list, struct io_buffer,
- list);
- list_del(&kbuf->list);
- } else {
- kbuf = head;
- xa_erase(&req->ctx->io_buffers, bgid);
- }
- if (*len > kbuf->len)
- *len = kbuf->len;
- req->flags |= REQ_F_BUFFER_SELECTED;
- req->kbuf = kbuf;
- } else {
- kbuf = ERR_PTR(-ENOBUFS);
- }
-
- io_ring_submit_unlock(req->ctx, needs_lock);
- return kbuf;
-}
-
-static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
- unsigned int issue_flags)
-{
- struct io_buffer *kbuf;
- u16 bgid;
-
- bgid = req->buf_index;
- kbuf = io_buffer_select(req, len, bgid, issue_flags);
- if (IS_ERR(kbuf))
- return kbuf;
- return u64_to_user_ptr(kbuf->addr);
-}
-
-#ifdef CONFIG_COMPAT
-static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
- unsigned int issue_flags)
-{
- struct compat_iovec __user *uiov;
- compat_ssize_t clen;
- void __user *buf;
- ssize_t len;
-
- uiov = u64_to_user_ptr(req->rw.addr);
- if (!access_ok(uiov, sizeof(*uiov)))
- return -EFAULT;
- if (__get_user(clen, &uiov->iov_len))
- return -EFAULT;
- if (clen < 0)
- return -EINVAL;
-
- len = clen;
- buf = io_rw_buffer_select(req, &len, issue_flags);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
- iov[0].iov_base = buf;
- iov[0].iov_len = (compat_size_t) len;
- return 0;
-}
-#endif
-
-static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
- unsigned int issue_flags)
-{
- struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
- void __user *buf;
- ssize_t len;
-
- if (copy_from_user(iov, uiov, sizeof(*uiov)))
- return -EFAULT;
-
- len = iov[0].iov_len;
- if (len < 0)
- return -EINVAL;
- buf = io_rw_buffer_select(req, &len, issue_flags);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
- iov[0].iov_base = buf;
- iov[0].iov_len = len;
- return 0;
-}
-
-static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
- unsigned int issue_flags)
-{
- if (req->flags & REQ_F_BUFFER_SELECTED) {
- struct io_buffer *kbuf = req->kbuf;
-
- iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
- iov[0].iov_len = kbuf->len;
- return 0;
- }
- if (req->rw.len != 1)
- return -EINVAL;
-
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
- return io_compat_import(req, iov, issue_flags);
-#endif
-
- return __io_iov_buffer_select(req, iov, issue_flags);
-}
-
-static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
- struct io_rw_state *s,
- unsigned int issue_flags)
-{
- struct iov_iter *iter = &s->iter;
- u8 opcode = req->opcode;
- struct iovec *iovec;
- void __user *buf;
- size_t sqe_len;
- ssize_t ret;
-
- BUILD_BUG_ON(ERR_PTR(0) != NULL);
-
- if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED)
- return ERR_PTR(io_import_fixed(req, rw, iter));
-
- /* buffer index only valid with fixed read/write, or buffer select */
- if (unlikely(req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)))
- return ERR_PTR(-EINVAL);
-
- buf = u64_to_user_ptr(req->rw.addr);
- sqe_len = req->rw.len;
-
- if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
- if (req->flags & REQ_F_BUFFER_SELECT) {
- buf = io_rw_buffer_select(req, &sqe_len, issue_flags);
- if (IS_ERR(buf))
- return ERR_CAST(buf);
- req->rw.len = sqe_len;
- }
-
- ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter);
- return ERR_PTR(ret);
- }
-
- iovec = s->fast_iov;
- if (req->flags & REQ_F_BUFFER_SELECT) {
- ret = io_iov_buffer_select(req, iovec, issue_flags);
- if (!ret)
- iov_iter_init(iter, rw, iovec, 1, iovec->iov_len);
- return ERR_PTR(ret);
- }
-
- ret = __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
- req->ctx->compat);
- if (unlikely(ret < 0))
- return ERR_PTR(ret);
- return iovec;
-}
-
-static inline int io_import_iovec(int rw, struct io_kiocb *req,
- struct iovec **iovec, struct io_rw_state *s,
- unsigned int issue_flags)
-{
- *iovec = __io_import_iovec(rw, req, s, issue_flags);
- if (unlikely(IS_ERR(*iovec)))
- return PTR_ERR(*iovec);
-
- iov_iter_save_state(&s->iter, &s->iter_state);
- return 0;
-}
-
-static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
-{
- return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
-}
-
-/*
- * For files that don't have ->read_iter() and ->write_iter(), handle them
- * by looping over ->read() or ->write() manually.
- */
-static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
-{
- struct kiocb *kiocb = &req->rw.kiocb;
- struct file *file = req->file;
- ssize_t ret = 0;
-
- /*
- * Don't support polled IO through this interface, and we can't
- * support non-blocking either. For the latter, this just causes
- * the kiocb to be handled from an async context.
- */
- if (kiocb->ki_flags & IOCB_HIPRI)
- return -EOPNOTSUPP;
- if ((kiocb->ki_flags & IOCB_NOWAIT) &&
- !(kiocb->ki_filp->f_flags & O_NONBLOCK))
- return -EAGAIN;
-
- while (iov_iter_count(iter)) {
- struct iovec iovec;
- ssize_t nr;
-
- if (!iov_iter_is_bvec(iter)) {
- iovec = iov_iter_iovec(iter);
- } else {
- iovec.iov_base = u64_to_user_ptr(req->rw.addr);
- iovec.iov_len = req->rw.len;
- }
-
- if (rw == READ) {
- nr = file->f_op->read(file, iovec.iov_base,
- iovec.iov_len, io_kiocb_ppos(kiocb));
- } else {
- nr = file->f_op->write(file, iovec.iov_base,
- iovec.iov_len, io_kiocb_ppos(kiocb));
- }
-
- if (nr < 0) {
- if (!ret)
- ret = nr;
- break;
- }
- if (!iov_iter_is_bvec(iter)) {
- iov_iter_advance(iter, nr);
- } else {
- req->rw.len -= nr;
- req->rw.addr += nr;
- }
- ret += nr;
- if (nr != iovec.iov_len)
- break;
- }
-
- return ret;
-}
-
-static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
- const struct iovec *fast_iov, struct iov_iter *iter)
-{
- struct io_async_rw *rw = req->async_data;
-
- memcpy(&rw->s.iter, iter, sizeof(*iter));
- rw->free_iovec = iovec;
- rw->bytes_done = 0;
- /* can only be fixed buffers, no need to do anything */
- if (iov_iter_is_bvec(iter))
- return;
- if (!iovec) {
- unsigned iov_off = 0;
-
- rw->s.iter.iov = rw->s.fast_iov;
- if (iter->iov != fast_iov) {
- iov_off = iter->iov - fast_iov;
- rw->s.iter.iov += iov_off;
- }
- if (rw->s.fast_iov != fast_iov)
- memcpy(rw->s.fast_iov + iov_off, fast_iov + iov_off,
- sizeof(struct iovec) * iter->nr_segs);
- } else {
- req->flags |= REQ_F_NEED_CLEANUP;
- }
-}
-
-static inline bool io_alloc_async_data(struct io_kiocb *req)
-{
- WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
- req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
- if (req->async_data) {
- req->flags |= REQ_F_ASYNC_DATA;
- return false;
- }
- return true;
-}
-
-static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
- struct io_rw_state *s, bool force)
-{
- if (!force && !io_op_defs[req->opcode].needs_async_setup)
- return 0;
- if (!req_has_async_data(req)) {
- struct io_async_rw *iorw;
-
- if (io_alloc_async_data(req)) {
- kfree(iovec);
- return -ENOMEM;
- }
-
- io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
- iorw = req->async_data;
- /* we've copied and mapped the iter, ensure state is saved */
- iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
- }
- return 0;
-}
-
-static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
-{
- struct io_async_rw *iorw = req->async_data;
- struct iovec *iov;
- int ret;
-
- /* submission path, ->uring_lock should already be taken */
- ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
- if (unlikely(ret < 0))
- return ret;
-
- iorw->bytes_done = 0;
- iorw->free_iovec = iov;
- if (iov)
- req->flags |= REQ_F_NEED_CLEANUP;
- return 0;
-}
-
-static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- if (unlikely(!(req->file->f_mode & FMODE_READ)))
- return -EBADF;
- return io_prep_rw(req, sqe);
-}
-
-/*
- * This is our waitqueue callback handler, registered through __folio_lock_async()
- * when we initially tried to do the IO with the iocb armed our waitqueue.
- * This gets called when the page is unlocked, and we generally expect that to
- * happen when the page IO is completed and the page is now uptodate. This will
- * queue a task_work based retry of the operation, attempting to copy the data
- * again. If the latter fails because the page was NOT uptodate, then we will
- * do a thread based blocking retry of the operation. That's the unexpected
- * slow path.
- */
-static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
- int sync, void *arg)
-{
- struct wait_page_queue *wpq;
- struct io_kiocb *req = wait->private;
- struct wait_page_key *key = arg;
-
- wpq = container_of(wait, struct wait_page_queue, wait);
-
- if (!wake_page_match(wpq, key))
- return 0;
-
- req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
- list_del_init(&wait->entry);
- io_req_task_queue(req);
- return 1;
-}
-
-/*
- * This controls whether a given IO request should be armed for async page
- * based retry. If we return false here, the request is handed to the async
- * worker threads for retry. If we're doing buffered reads on a regular file,
- * we prepare a private wait_page_queue entry and retry the operation. This
- * will either succeed because the page is now uptodate and unlocked, or it
- * will register a callback when the page is unlocked at IO completion. Through
- * that callback, io_uring uses task_work to setup a retry of the operation.
- * That retry will attempt the buffered read again. The retry will generally
- * succeed, or in rare cases where it fails, we then fall back to using the
- * async worker threads for a blocking retry.
- */
-static bool io_rw_should_retry(struct io_kiocb *req)
-{
- struct io_async_rw *rw = req->async_data;
- struct wait_page_queue *wait = &rw->wpq;
- struct kiocb *kiocb = &req->rw.kiocb;
-
- /* never retry for NOWAIT, we just complete with -EAGAIN */
- if (req->flags & REQ_F_NOWAIT)
- return false;
-
- /* Only for buffered IO */
- if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
- return false;
-
- /*
- * just use poll if we can, and don't attempt if the fs doesn't
- * support callback based unlocks
- */
- if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
- return false;
-
- wait->wait.func = io_async_buf_func;
- wait->wait.private = req;
- wait->wait.flags = 0;
- INIT_LIST_HEAD(&wait->wait.entry);
- kiocb->ki_flags |= IOCB_WAITQ;
- kiocb->ki_flags &= ~IOCB_NOWAIT;
- kiocb->ki_waitq = wait;
- return true;
-}
-
-static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
-{
- if (likely(req->file->f_op->read_iter))
- return call_read_iter(req->file, &req->rw.kiocb, iter);
- else if (req->file->f_op->read)
- return loop_rw_iter(READ, req, iter);
- else
- return -EINVAL;
-}
-
-static bool need_read_all(struct io_kiocb *req)
-{
- return req->flags & REQ_F_ISREG ||
- S_ISBLK(file_inode(req->file)->i_mode);
-}
-
-static int io_read(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_rw_state __s, *s = &__s;
- struct iovec *iovec;
- struct kiocb *kiocb = &req->rw.kiocb;
- bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
- struct io_async_rw *rw;
- ssize_t ret, ret2;
-
- if (!req_has_async_data(req)) {
- ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
- if (unlikely(ret < 0))
- return ret;
- } else {
- rw = req->async_data;
- s = &rw->s;
- /*
- * We come here from an earlier attempt, restore our state to
- * match in case it doesn't. It's cheap enough that we don't
- * need to make this conditional.
- */
- iov_iter_restore(&s->iter, &s->iter_state);
- iovec = NULL;
- }
- req->result = iov_iter_count(&s->iter);
-
- if (force_nonblock) {
- /* If the file doesn't support async, just async punt */
- if (unlikely(!io_file_supports_nowait(req))) {
- ret = io_setup_async_rw(req, iovec, s, true);
- return ret ?: -EAGAIN;
- }
- kiocb->ki_flags |= IOCB_NOWAIT;
- } else {
- /* Ensure we clear previously set non-block flag */
- kiocb->ki_flags &= ~IOCB_NOWAIT;
- }
-
- ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
- if (unlikely(ret)) {
- kfree(iovec);
- return ret;
- }
-
- ret = io_iter_do_read(req, &s->iter);
-
- if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
- req->flags &= ~REQ_F_REISSUE;
- /* IOPOLL retry should happen for io-wq threads */
- if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
- goto done;
- /* no retry on NONBLOCK nor RWF_NOWAIT */
- if (req->flags & REQ_F_NOWAIT)
- goto done;
- ret = 0;
- } else if (ret == -EIOCBQUEUED) {
- goto out_free;
- } else if (ret == req->result || ret <= 0 || !force_nonblock ||
- (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
- /* read all, failed, already did sync or don't want to retry */
- goto done;
- }
-
- /*
- * Don't depend on the iter state matching what was consumed, or being
- * untouched in case of error. Restore it and we'll advance it
- * manually if we need to.
- */
- iov_iter_restore(&s->iter, &s->iter_state);
-
- ret2 = io_setup_async_rw(req, iovec, s, true);
- if (ret2)
- return ret2;
-
- iovec = NULL;
- rw = req->async_data;
- s = &rw->s;
- /*
- * Now use our persistent iterator and state, if we aren't already.
- * We've restored and mapped the iter to match.
- */
-
- do {
- /*
- * We end up here because of a partial read, either from
- * above or inside this loop. Advance the iter by the bytes
- * that were consumed.
- */
- iov_iter_advance(&s->iter, ret);
- if (!iov_iter_count(&s->iter))
- break;
- rw->bytes_done += ret;
- iov_iter_save_state(&s->iter, &s->iter_state);
-
- /* if we can retry, do so with the callbacks armed */
- if (!io_rw_should_retry(req)) {
- kiocb->ki_flags &= ~IOCB_WAITQ;
- return -EAGAIN;
- }
-
- /*
- * Now retry read with the IOCB_WAITQ parts set in the iocb. If
- * we get -EIOCBQUEUED, then we'll get a notification when the
- * desired page gets unlocked. We can also get a partial read
- * here, and if we do, then just retry at the new offset.
- */
- ret = io_iter_do_read(req, &s->iter);
- if (ret == -EIOCBQUEUED)
- return 0;
- /* we got some bytes, but not all. retry. */
- kiocb->ki_flags &= ~IOCB_WAITQ;
- iov_iter_restore(&s->iter, &s->iter_state);
- } while (ret > 0);
-done:
- kiocb_done(kiocb, ret, issue_flags);
-out_free:
- /* it's faster to check here then delegate to kfree */
- if (iovec)
- kfree(iovec);
- return 0;
-}
-
-static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
- return -EBADF;
- req->rw.kiocb.ki_hint = ki_hint_validate(file_write_hint(req->file));
- return io_prep_rw(req, sqe);
-}
-
-static int io_write(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_rw_state __s, *s = &__s;
- struct iovec *iovec;
- struct kiocb *kiocb = &req->rw.kiocb;
- bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
- ssize_t ret, ret2;
-
- if (!req_has_async_data(req)) {
- ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
- if (unlikely(ret < 0))
- return ret;
- } else {
- struct io_async_rw *rw = req->async_data;
-
- s = &rw->s;
- iov_iter_restore(&s->iter, &s->iter_state);
- iovec = NULL;
- }
- req->result = iov_iter_count(&s->iter);
-
- if (force_nonblock) {
- /* If the file doesn't support async, just async punt */
- if (unlikely(!io_file_supports_nowait(req)))
- goto copy_iov;
-
- /* file path doesn't support NOWAIT for non-direct_IO */
- if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
- (req->flags & REQ_F_ISREG))
- goto copy_iov;
-
- kiocb->ki_flags |= IOCB_NOWAIT;
- } else {
- /* Ensure we clear previously set non-block flag */
- kiocb->ki_flags &= ~IOCB_NOWAIT;
- }
-
- ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
- if (unlikely(ret))
- goto out_free;
-
- /*
- * Open-code file_start_write here to grab freeze protection,
- * which will be released by another thread in
- * io_complete_rw(). Fool lockdep by telling it the lock got
- * released so that it doesn't complain about the held lock when
- * we return to userspace.
- */
- if (req->flags & REQ_F_ISREG) {
- sb_start_write(file_inode(req->file)->i_sb);
- __sb_writers_release(file_inode(req->file)->i_sb,
- SB_FREEZE_WRITE);
- }
- kiocb->ki_flags |= IOCB_WRITE;
-
- if (likely(req->file->f_op->write_iter))
- ret2 = call_write_iter(req->file, kiocb, &s->iter);
- else if (req->file->f_op->write)
- ret2 = loop_rw_iter(WRITE, req, &s->iter);
- else
- ret2 = -EINVAL;
-
- if (req->flags & REQ_F_REISSUE) {
- req->flags &= ~REQ_F_REISSUE;
- ret2 = -EAGAIN;
- }
-
- /*
- * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
- * retry them without IOCB_NOWAIT.
- */
- if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
- ret2 = -EAGAIN;
- /* no retry on NONBLOCK nor RWF_NOWAIT */
- if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
- goto done;
- if (!force_nonblock || ret2 != -EAGAIN) {
- /* IOPOLL retry should happen for io-wq threads */
- if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
- goto copy_iov;
-done:
- kiocb_done(kiocb, ret2, issue_flags);
- } else {
-copy_iov:
- iov_iter_restore(&s->iter, &s->iter_state);
- ret = io_setup_async_rw(req, iovec, s, false);
- return ret ?: -EAGAIN;
- }
-out_free:
- /* it's reportedly faster than delegating the null check to kfree() */
- if (iovec)
- kfree(iovec);
- return ret;
-}
-
-static int io_renameat_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- struct io_rename *ren = &req->rename;
- const char __user *oldf, *newf;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
- return -EINVAL;
- if (unlikely(req->flags & REQ_F_FIXED_FILE))
- return -EBADF;
-
- ren->old_dfd = READ_ONCE(sqe->fd);
- oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
- newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
- ren->new_dfd = READ_ONCE(sqe->len);
- ren->flags = READ_ONCE(sqe->rename_flags);
-
- ren->oldpath = getname(oldf);
- if (IS_ERR(ren->oldpath))
- return PTR_ERR(ren->oldpath);
-
- ren->newpath = getname(newf);
- if (IS_ERR(ren->newpath)) {
- putname(ren->oldpath);
- return PTR_ERR(ren->newpath);
- }
-
- req->flags |= REQ_F_NEED_CLEANUP;
- return 0;
-}
-
-static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_rename *ren = &req->rename;
- int ret;
-
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
- ren->newpath, ren->flags);
-
- req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < 0)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-}
-
-static int io_unlinkat_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- struct io_unlink *un = &req->unlink;
- const char __user *fname;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
- sqe->splice_fd_in)
- return -EINVAL;
- if (unlikely(req->flags & REQ_F_FIXED_FILE))
- return -EBADF;
-
- un->dfd = READ_ONCE(sqe->fd);
-
- un->flags = READ_ONCE(sqe->unlink_flags);
- if (un->flags & ~AT_REMOVEDIR)
- return -EINVAL;
-
- fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
- un->filename = getname(fname);
- if (IS_ERR(un->filename))
- return PTR_ERR(un->filename);
-
- req->flags |= REQ_F_NEED_CLEANUP;
- return 0;
-}
-
-static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_unlink *un = &req->unlink;
- int ret;
-
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- if (un->flags & AT_REMOVEDIR)
- ret = do_rmdir(un->dfd, un->filename);
- else
- ret = do_unlinkat(un->dfd, un->filename);
-
- req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < 0)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-}
-
-static int io_mkdirat_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- struct io_mkdir *mkd = &req->mkdir;
- const char __user *fname;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
- sqe->splice_fd_in)
- return -EINVAL;
- if (unlikely(req->flags & REQ_F_FIXED_FILE))
- return -EBADF;
-
- mkd->dfd = READ_ONCE(sqe->fd);
- mkd->mode = READ_ONCE(sqe->len);
-
- fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
- mkd->filename = getname(fname);
- if (IS_ERR(mkd->filename))
- return PTR_ERR(mkd->filename);
-
- req->flags |= REQ_F_NEED_CLEANUP;
- return 0;
-}
-
-static int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_mkdir *mkd = &req->mkdir;
- int ret;
-
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
-
- req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < 0)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-}
-
-static int io_symlinkat_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- struct io_symlink *sl = &req->symlink;
- const char __user *oldpath, *newpath;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
- sqe->splice_fd_in)
- return -EINVAL;
- if (unlikely(req->flags & REQ_F_FIXED_FILE))
- return -EBADF;
-
- sl->new_dfd = READ_ONCE(sqe->fd);
- oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
- newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
-
- sl->oldpath = getname(oldpath);
- if (IS_ERR(sl->oldpath))
- return PTR_ERR(sl->oldpath);
-
- sl->newpath = getname(newpath);
- if (IS_ERR(sl->newpath)) {
- putname(sl->oldpath);
- return PTR_ERR(sl->newpath);
- }
-
- req->flags |= REQ_F_NEED_CLEANUP;
- return 0;
-}
-
-static int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_symlink *sl = &req->symlink;
- int ret;
-
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
-
- req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < 0)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-}
-
-static int io_linkat_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- struct io_hardlink *lnk = &req->hardlink;
- const char __user *oldf, *newf;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
- return -EINVAL;
- if (unlikely(req->flags & REQ_F_FIXED_FILE))
- return -EBADF;
-
- lnk->old_dfd = READ_ONCE(sqe->fd);
- lnk->new_dfd = READ_ONCE(sqe->len);
- oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
- newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
- lnk->flags = READ_ONCE(sqe->hardlink_flags);
-
- lnk->oldpath = getname(oldf);
- if (IS_ERR(lnk->oldpath))
- return PTR_ERR(lnk->oldpath);
-
- lnk->newpath = getname(newf);
- if (IS_ERR(lnk->newpath)) {
- putname(lnk->oldpath);
- return PTR_ERR(lnk->newpath);
- }
-
- req->flags |= REQ_F_NEED_CLEANUP;
- return 0;
-}
-
-static int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_hardlink *lnk = &req->hardlink;
- int ret;
-
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
- lnk->newpath, lnk->flags);
-
- req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < 0)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-}
-
-static int io_shutdown_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
-#if defined(CONFIG_NET)
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
- sqe->buf_index || sqe->splice_fd_in))
- return -EINVAL;
-
- req->shutdown.how = READ_ONCE(sqe->len);
- return 0;
-#else
- return -EOPNOTSUPP;
-#endif
-}
-
-static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
-{
-#if defined(CONFIG_NET)
- struct socket *sock;
- int ret;
-
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- sock = sock_from_file(req->file);
- if (unlikely(!sock))
- return -ENOTSOCK;
-
- ret = __sys_shutdown_sock(sock, req->shutdown.how);
- if (ret < 0)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-#else
- return -EOPNOTSUPP;
-#endif
-}
-
-static int __io_splice_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- struct io_splice *sp = &req->splice;
- unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
- sp->file_in = NULL;
- sp->len = READ_ONCE(sqe->len);
- sp->flags = READ_ONCE(sqe->splice_flags);
-
- if (unlikely(sp->flags & ~valid_flags))
- return -EINVAL;
-
- sp->file_in = io_file_get(req->ctx, req, READ_ONCE(sqe->splice_fd_in),
- (sp->flags & SPLICE_F_FD_IN_FIXED));
- if (!sp->file_in)
- return -EBADF;
- req->flags |= REQ_F_NEED_CLEANUP;
- return 0;
-}
-
-static int io_tee_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
- return -EINVAL;
- return __io_splice_prep(req, sqe);
-}
-
-static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_splice *sp = &req->splice;
- struct file *in = sp->file_in;
- struct file *out = sp->file_out;
- unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
- long ret = 0;
-
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
- if (sp->len)
- ret = do_tee(in, out, sp->len, flags);
-
- if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
- io_put_file(in);
- req->flags &= ~REQ_F_NEED_CLEANUP;
-
- if (ret != sp->len)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-}
-
-static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct io_splice *sp = &req->splice;
-
- sp->off_in = READ_ONCE(sqe->splice_off_in);
- sp->off_out = READ_ONCE(sqe->off);
- return __io_splice_prep(req, sqe);
-}
-
-static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_splice *sp = &req->splice;
- struct file *in = sp->file_in;
- struct file *out = sp->file_out;
- unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
- loff_t *poff_in, *poff_out;
- long ret = 0;
-
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
- poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
-
- if (sp->len)
- ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
-
- if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
- io_put_file(in);
- req->flags &= ~REQ_F_NEED_CLEANUP;
-
- if (ret != sp->len)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-}
-
-/*
- * IORING_OP_NOP just posts a completion event, nothing else.
- */
-static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
- __io_req_complete(req, issue_flags, 0, 0);
- return 0;
-}
-
-static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- if (!req->file)
- return -EBADF;
-
- if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
- sqe->splice_fd_in))
- return -EINVAL;
-
- req->sync.flags = READ_ONCE(sqe->fsync_flags);
- if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
- return -EINVAL;
-
- req->sync.off = READ_ONCE(sqe->off);
- req->sync.len = READ_ONCE(sqe->len);
- return 0;
-}
-
-static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
-{
- loff_t end = req->sync.off + req->sync.len;
- int ret;
-
- /* fsync always requires a blocking context */
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- ret = vfs_fsync_range(req->file, req->sync.off,
- end > 0 ? end : LLONG_MAX,
- req->sync.flags & IORING_FSYNC_DATASYNC);
- if (ret < 0)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-}
-
-static int io_fallocate_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
- sqe->splice_fd_in)
- return -EINVAL;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
- req->sync.off = READ_ONCE(sqe->off);
- req->sync.len = READ_ONCE(sqe->addr);
- req->sync.mode = READ_ONCE(sqe->len);
- return 0;
-}
-
-static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
-{
- int ret;
-
- /* fallocate always requiring blocking context */
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
- ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
- req->sync.len);
- if (ret < 0)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-}
-
-static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- const char __user *fname;
- int ret;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (unlikely(sqe->ioprio || sqe->buf_index))
- return -EINVAL;
- if (unlikely(req->flags & REQ_F_FIXED_FILE))
- return -EBADF;
-
- /* open.how should be already initialised */
- if (!(req->open.how.flags & O_PATH) && force_o_largefile())
- req->open.how.flags |= O_LARGEFILE;
-
- req->open.dfd = READ_ONCE(sqe->fd);
- fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
- req->open.filename = getname(fname);
- if (IS_ERR(req->open.filename)) {
- ret = PTR_ERR(req->open.filename);
- req->open.filename = NULL;
- return ret;
- }
-
- req->open.file_slot = READ_ONCE(sqe->file_index);
- if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
- return -EINVAL;
-
- req->open.nofile = rlimit(RLIMIT_NOFILE);
- req->flags |= REQ_F_NEED_CLEANUP;
- return 0;
-}
-
-static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- u64 mode = READ_ONCE(sqe->len);
- u64 flags = READ_ONCE(sqe->open_flags);
-
- req->open.how = build_open_how(flags, mode);
- return __io_openat_prep(req, sqe);
-}
-
-static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct open_how __user *how;
- size_t len;
- int ret;
-
- how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
- len = READ_ONCE(sqe->len);
- if (len < OPEN_HOW_SIZE_VER0)
- return -EINVAL;
-
- ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
- len);
- if (ret)
- return ret;
-
- return __io_openat_prep(req, sqe);
-}
-
-static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct open_flags op;
- struct file *file;
- bool resolve_nonblock, nonblock_set;
- bool fixed = !!req->open.file_slot;
- int ret;
-
- ret = build_open_flags(&req->open.how, &op);
- if (ret)
- goto err;
- nonblock_set = op.open_flag & O_NONBLOCK;
- resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
- if (issue_flags & IO_URING_F_NONBLOCK) {
- /*
- * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
- * it'll always -EAGAIN
- */
- if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
- return -EAGAIN;
- op.lookup_flags |= LOOKUP_CACHED;
- op.open_flag |= O_NONBLOCK;
- }
-
- if (!fixed) {
- ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
- if (ret < 0)
- goto err;
- }
-
- file = do_filp_open(req->open.dfd, req->open.filename, &op);
- if (IS_ERR(file)) {
- /*
- * We could hang on to this 'fd' on retrying, but seems like
- * marginal gain for something that is now known to be a slower
- * path. So just put it, and we'll get a new one when we retry.
- */
- if (!fixed)
- put_unused_fd(ret);
-
- ret = PTR_ERR(file);
- /* only retry if RESOLVE_CACHED wasn't already set by application */
- if (ret == -EAGAIN &&
- (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
- return -EAGAIN;
- goto err;
- }
-
- if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
- file->f_flags &= ~O_NONBLOCK;
- fsnotify_open(file);
-
- if (!fixed)
- fd_install(ret, file);
- else
- ret = io_install_fixed_file(req, file, issue_flags,
- req->open.file_slot - 1);
-err:
- putname(req->open.filename);
- req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < 0)
- req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
-}
-
-static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
-{
- return io_openat2(req, issue_flags);
-}
-
-static int io_remove_buffers_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- struct io_provide_buf *p = &req->pbuf;
- u64 tmp;
-
- if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
- sqe->splice_fd_in)
- return -EINVAL;
-
- tmp = READ_ONCE(sqe->fd);
- if (!tmp || tmp > USHRT_MAX)
- return -EINVAL;
-
- memset(p, 0, sizeof(*p));
- p->nbufs = tmp;
- p->bgid = READ_ONCE(sqe->buf_group);
- return 0;
-}
-
-static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
- int bgid, unsigned nbufs)
-{
- unsigned i = 0;
-
- /* shouldn't happen */
- if (!nbufs)
- return 0;
-
- /* the head kbuf is the list itself */
- while (!list_empty(&buf->list)) {
- struct io_buffer *nxt;
-
- nxt = list_first_entry(&buf->list, struct io_buffer, list);
- list_del(&nxt->list);
- kfree(nxt);
- if (++i == nbufs)
- return i;
- }
- i++;
- kfree(buf);
- xa_erase(&ctx->io_buffers, bgid);
-
- return i;
-}
-
-static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_provide_buf *p = &req->pbuf;
- struct io_ring_ctx *ctx = req->ctx;
- struct io_buffer *head;
- int ret = 0;
- bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
-
- io_ring_submit_lock(ctx, needs_lock);
-
- lockdep_assert_held(&ctx->uring_lock);
-
- ret = -ENOENT;
- head = xa_load(&ctx->io_buffers, p->bgid);
- if (head)
- ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
- if (ret < 0)
- req_set_fail(req);
-
- /* complete before unlock, IOPOLL may need the lock */
- __io_req_complete(req, issue_flags, ret, 0);
- io_ring_submit_unlock(ctx, needs_lock);
- return 0;
-}
-
-static int io_provide_buffers_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- unsigned long size, tmp_check;
- struct io_provide_buf *p = &req->pbuf;
- u64 tmp;
-
- if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
- return -EINVAL;
-
- tmp = READ_ONCE(sqe->fd);
- if (!tmp || tmp > USHRT_MAX)
- return -E2BIG;
- p->nbufs = tmp;
- p->addr = READ_ONCE(sqe->addr);
- p->len = READ_ONCE(sqe->len);
-
- if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
- &size))
- return -EOVERFLOW;
- if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
- return -EOVERFLOW;
-
- size = (unsigned long)p->len * p->nbufs;
- if (!access_ok(u64_to_user_ptr(p->addr), size))
- return -EFAULT;
-
- p->bgid = READ_ONCE(sqe->buf_group);
- tmp = READ_ONCE(sqe->off);
- if (tmp > USHRT_MAX)
- return -E2BIG;
- p->bid = tmp;
- return 0;
-}
-
-static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
-{
- struct io_buffer *buf;
- u64 addr = pbuf->addr;
- int i, bid = pbuf->bid;
-
- for (i = 0; i < pbuf->nbufs; i++) {
- buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
- if (!buf)
- break;
-
- buf->addr = addr;
- buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
- buf->bid = bid;
- addr += pbuf->len;
- bid++;
- if (!*head) {
- INIT_LIST_HEAD(&buf->list);
- *head = buf;
- } else {
- list_add_tail(&buf->list, &(*head)->list);
- }
- }
-
- return i ? i : -ENOMEM;
-}
-
-static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_provide_buf *p = &req->pbuf;
- struct io_ring_ctx *ctx = req->ctx;
- struct io_buffer *head, *list;
- int ret = 0;
- bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
-
- io_ring_submit_lock(ctx, needs_lock);
-
- lockdep_assert_held(&ctx->uring_lock);
-
- list = head = xa_load(&ctx->io_buffers, p->bgid);
-
- ret = io_add_buffers(p, &head);
- if (ret >= 0 && !list) {
- ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
- if (ret < 0)
- __io_remove_buffers(ctx, head, p->bgid, -1U);
- }
- if (ret < 0)
- req_set_fail(req);
- /* complete before unlock, IOPOLL may need the lock */
- __io_req_complete(req, issue_flags, ret, 0);
- io_ring_submit_unlock(ctx, needs_lock);
- return 0;
-}
-
-static int io_epoll_ctl_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
-#if defined(CONFIG_EPOLL)
- if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
- return -EINVAL;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
- req->epoll.epfd = READ_ONCE(sqe->fd);
- req->epoll.op = READ_ONCE(sqe->len);
- req->epoll.fd = READ_ONCE(sqe->off);
-
- if (ep_op_has_event(req->epoll.op)) {
- struct epoll_event __user *ev;
-
- ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
- if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
- return -EFAULT;
- }
-
- return 0;
-#else
- return -EOPNOTSUPP;
-#endif
-}
-
-static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
-{
-#if defined(CONFIG_EPOLL)
- struct io_epoll *ie = &req->epoll;
- int ret;
- bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
-
- ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
- if (force_nonblock && ret == -EAGAIN)
- return -EAGAIN;
-
- if (ret < 0)
- req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
-#else
- return -EOPNOTSUPP;
-#endif
-}
-
-static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
-#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
- if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
- return -EINVAL;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
- req->madvise.addr = READ_ONCE(sqe->addr);
- req->madvise.len = READ_ONCE(sqe->len);
- req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
- return 0;
-#else
- return -EOPNOTSUPP;
-#endif
-}
-
-static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
-{
-#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
- struct io_madvise *ma = &req->madvise;
- int ret;
-
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
- if (ret < 0)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-#else
- return -EOPNOTSUPP;
-#endif
-}
-
-static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
- return -EINVAL;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
- req->fadvise.offset = READ_ONCE(sqe->off);
- req->fadvise.len = READ_ONCE(sqe->len);
- req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
- return 0;
-}
-
-static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_fadvise *fa = &req->fadvise;
- int ret;
-
- if (issue_flags & IO_URING_F_NONBLOCK) {
- switch (fa->advice) {
- case POSIX_FADV_NORMAL:
- case POSIX_FADV_RANDOM:
- case POSIX_FADV_SEQUENTIAL:
- break;
- default:
- return -EAGAIN;
- }
- }
-
- ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
- if (ret < 0)
- req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
-}
-
-static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
- return -EINVAL;
- if (req->flags & REQ_F_FIXED_FILE)
- return -EBADF;
-
- req->statx.dfd = READ_ONCE(sqe->fd);
- req->statx.mask = READ_ONCE(sqe->len);
- req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
- req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
- req->statx.flags = READ_ONCE(sqe->statx_flags);
-
- return 0;
-}
-
-static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_statx *ctx = &req->statx;
- int ret;
-
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
- ctx->buffer);
-
- if (ret < 0)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-}
-
-static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
- sqe->rw_flags || sqe->buf_index)
- return -EINVAL;
- if (req->flags & REQ_F_FIXED_FILE)
- return -EBADF;
-
- req->close.fd = READ_ONCE(sqe->fd);
- req->close.file_slot = READ_ONCE(sqe->file_index);
- if (req->close.file_slot && req->close.fd)
- return -EINVAL;
-
- return 0;
-}
-
-static int io_close(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct files_struct *files = current->files;
- struct io_close *close = &req->close;
- struct fdtable *fdt;
- struct file *file = NULL;
- int ret = -EBADF;
-
- if (req->close.file_slot) {
- ret = io_close_fixed(req, issue_flags);
- goto err;
- }
-
- spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- if (close->fd >= fdt->max_fds) {
- spin_unlock(&files->file_lock);
- goto err;
- }
- file = fdt->fd[close->fd];
- if (!file || file->f_op == &io_uring_fops) {
- spin_unlock(&files->file_lock);
- file = NULL;
- goto err;
- }
-
- /* if the file has a flush method, be safe and punt to async */
- if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
- spin_unlock(&files->file_lock);
- return -EAGAIN;
- }
-
- ret = __close_fd_get_file(close->fd, &file);
- spin_unlock(&files->file_lock);
- if (ret < 0) {
- if (ret == -ENOENT)
- ret = -EBADF;
- goto err;
- }
-
- /* No ->flush() or already async, safely close from here */
- ret = filp_close(file, current->files);
-err:
- if (ret < 0)
- req_set_fail(req);
- if (file)
- fput(file);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
-}
-
-static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
- sqe->splice_fd_in))
- return -EINVAL;
-
- req->sync.off = READ_ONCE(sqe->off);
- req->sync.len = READ_ONCE(sqe->len);
- req->sync.flags = READ_ONCE(sqe->sync_range_flags);
- return 0;
-}
-
-static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
-{
- int ret;
-
- /* sync_file_range always requires a blocking context */
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- ret = sync_file_range(req->file, req->sync.off, req->sync.len,
- req->sync.flags);
- if (ret < 0)
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
-}
-
-#if defined(CONFIG_NET)
-static int io_setup_async_msg(struct io_kiocb *req,
- struct io_async_msghdr *kmsg)
-{
- struct io_async_msghdr *async_msg = req->async_data;
-
- if (async_msg)
- return -EAGAIN;
- if (io_alloc_async_data(req)) {
- kfree(kmsg->free_iov);
- return -ENOMEM;
- }
- async_msg = req->async_data;
- req->flags |= REQ_F_NEED_CLEANUP;
- memcpy(async_msg, kmsg, sizeof(*kmsg));
- async_msg->msg.msg_name = &async_msg->addr;
- /* if were using fast_iov, set it to the new one */
- if (!async_msg->free_iov)
- async_msg->msg.msg_iter.iov = async_msg->fast_iov;
-
- return -EAGAIN;
-}
-
-static int io_sendmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- iomsg->msg.msg_name = &iomsg->addr;
- iomsg->free_iov = iomsg->fast_iov;
- return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
- req->sr_msg.msg_flags, &iomsg->free_iov);
-}
-
-static int io_sendmsg_prep_async(struct io_kiocb *req)
-{
- int ret;
-
- ret = io_sendmsg_copy_hdr(req, req->async_data);
- if (!ret)
- req->flags |= REQ_F_NEED_CLEANUP;
- return ret;
-}
-
-static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct io_sr_msg *sr = &req->sr_msg;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
- sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
- sr->len = READ_ONCE(sqe->len);
- sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
- if (sr->msg_flags & MSG_DONTWAIT)
- req->flags |= REQ_F_NOWAIT;
-
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
- sr->msg_flags |= MSG_CMSG_COMPAT;
-#endif
- return 0;
-}
-
-static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_async_msghdr iomsg, *kmsg;
- struct socket *sock;
- unsigned flags;
- int min_ret = 0;
- int ret;
-
- sock = sock_from_file(req->file);
- if (unlikely(!sock))
- return -ENOTSOCK;
-
- if (req_has_async_data(req)) {
- kmsg = req->async_data;
- } else {
- ret = io_sendmsg_copy_hdr(req, &iomsg);
- if (ret)
- return ret;
- kmsg = &iomsg;
- }
-
- flags = req->sr_msg.msg_flags;
- if (issue_flags & IO_URING_F_NONBLOCK)
- flags |= MSG_DONTWAIT;
- if (flags & MSG_WAITALL)
- min_ret = iov_iter_count(&kmsg->msg.msg_iter);
-
- ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
- if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
- return io_setup_async_msg(req, kmsg);
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
-
- /* fast path, check for non-NULL to avoid function call */
- if (kmsg->free_iov)
- kfree(kmsg->free_iov);
- req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < min_ret)
- req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
-}
-
-static int io_send(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_sr_msg *sr = &req->sr_msg;
- struct msghdr msg;
- struct iovec iov;
- struct socket *sock;
- unsigned flags;
- int min_ret = 0;
- int ret;
-
- sock = sock_from_file(req->file);
- if (unlikely(!sock))
- return -ENOTSOCK;
-
- ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
- if (unlikely(ret))
- return ret;
-
- msg.msg_name = NULL;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_namelen = 0;
-
- flags = req->sr_msg.msg_flags;
- if (issue_flags & IO_URING_F_NONBLOCK)
- flags |= MSG_DONTWAIT;
- if (flags & MSG_WAITALL)
- min_ret = iov_iter_count(&msg.msg_iter);
-
- msg.msg_flags = flags;
- ret = sock_sendmsg(sock, &msg);
- if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
- return -EAGAIN;
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
-
- if (ret < min_ret)
- req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
-}
-
-static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- struct io_sr_msg *sr = &req->sr_msg;
- struct iovec __user *uiov;
- size_t iov_len;
- int ret;
-
- ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
- &iomsg->uaddr, &uiov, &iov_len);
- if (ret)
- return ret;
-
- if (req->flags & REQ_F_BUFFER_SELECT) {
- if (iov_len > 1)
- return -EINVAL;
- if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
- return -EFAULT;
- sr->len = iomsg->fast_iov[0].iov_len;
- iomsg->free_iov = NULL;
- } else {
- iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
- &iomsg->free_iov, &iomsg->msg.msg_iter,
- false);
- if (ret > 0)
- ret = 0;
- }
-
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- struct io_sr_msg *sr = &req->sr_msg;
- struct compat_iovec __user *uiov;
- compat_uptr_t ptr;
- compat_size_t len;
- int ret;
-
- ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
- &ptr, &len);
- if (ret)
- return ret;
-
- uiov = compat_ptr(ptr);
- if (req->flags & REQ_F_BUFFER_SELECT) {
- compat_ssize_t clen;
-
- if (len > 1)
- return -EINVAL;
- if (!access_ok(uiov, sizeof(*uiov)))
- return -EFAULT;
- if (__get_user(clen, &uiov->iov_len))
- return -EFAULT;
- if (clen < 0)
- return -EINVAL;
- sr->len = clen;
- iomsg->free_iov = NULL;
- } else {
- iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
- UIO_FASTIOV, &iomsg->free_iov,
- &iomsg->msg.msg_iter, true);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-#endif
-
-static int io_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- iomsg->msg.msg_name = &iomsg->addr;
-
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
- return __io_compat_recvmsg_copy_hdr(req, iomsg);
-#endif
-
- return __io_recvmsg_copy_hdr(req, iomsg);
-}
-
-static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
- unsigned int issue_flags)
-{
- struct io_sr_msg *sr = &req->sr_msg;
-
- return io_buffer_select(req, &sr->len, sr->bgid, issue_flags);
-}
-
-static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
-{
- return io_put_kbuf(req, req->kbuf);
-}
-
-static int io_recvmsg_prep_async(struct io_kiocb *req)
-{
- int ret;
-
- ret = io_recvmsg_copy_hdr(req, req->async_data);
- if (!ret)
- req->flags |= REQ_F_NEED_CLEANUP;
- return ret;
-}
-
-static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct io_sr_msg *sr = &req->sr_msg;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
- sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
- sr->len = READ_ONCE(sqe->len);
- sr->bgid = READ_ONCE(sqe->buf_group);
- sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
- if (sr->msg_flags & MSG_DONTWAIT)
- req->flags |= REQ_F_NOWAIT;
-
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
- sr->msg_flags |= MSG_CMSG_COMPAT;
-#endif
- return 0;
-}
-
-static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_async_msghdr iomsg, *kmsg;
- struct socket *sock;
- struct io_buffer *kbuf;
- unsigned flags;
- int min_ret = 0;
- int ret, cflags = 0;
- bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
-
- sock = sock_from_file(req->file);
- if (unlikely(!sock))
- return -ENOTSOCK;
-
- if (req_has_async_data(req)) {
- kmsg = req->async_data;
- } else {
- ret = io_recvmsg_copy_hdr(req, &iomsg);
- if (ret)
- return ret;
- kmsg = &iomsg;
- }
-
- if (req->flags & REQ_F_BUFFER_SELECT) {
- kbuf = io_recv_buffer_select(req, issue_flags);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
- kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
- kmsg->fast_iov[0].iov_len = req->sr_msg.len;
- iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
- 1, req->sr_msg.len);
- }
-
- flags = req->sr_msg.msg_flags;
- if (force_nonblock)
- flags |= MSG_DONTWAIT;
- if (flags & MSG_WAITALL)
- min_ret = iov_iter_count(&kmsg->msg.msg_iter);
-
- ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
- kmsg->uaddr, flags);
- if (force_nonblock && ret == -EAGAIN)
- return io_setup_async_msg(req, kmsg);
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
-
- if (req->flags & REQ_F_BUFFER_SELECTED)
- cflags = io_put_recv_kbuf(req);
- /* fast path, check for non-NULL to avoid function call */
- if (kmsg->free_iov)
- kfree(kmsg->free_iov);
- req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
- req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, cflags);
- return 0;
-}
-
-static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_buffer *kbuf;
- struct io_sr_msg *sr = &req->sr_msg;
- struct msghdr msg;
- void __user *buf = sr->buf;
- struct socket *sock;
- struct iovec iov;
- unsigned flags;
- int min_ret = 0;
- int ret, cflags = 0;
- bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
-
- sock = sock_from_file(req->file);
- if (unlikely(!sock))
- return -ENOTSOCK;
-
- if (req->flags & REQ_F_BUFFER_SELECT) {
- kbuf = io_recv_buffer_select(req, issue_flags);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
- buf = u64_to_user_ptr(kbuf->addr);
- }
-
- ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
- if (unlikely(ret))
- goto out_free;
-
- msg.msg_name = NULL;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_namelen = 0;
- msg.msg_iocb = NULL;
- msg.msg_flags = 0;
-
- flags = req->sr_msg.msg_flags;
- if (force_nonblock)
- flags |= MSG_DONTWAIT;
- if (flags & MSG_WAITALL)
- min_ret = iov_iter_count(&msg.msg_iter);
-
- ret = sock_recvmsg(sock, &msg, flags);
- if (force_nonblock && ret == -EAGAIN)
- return -EAGAIN;
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
-out_free:
- if (req->flags & REQ_F_BUFFER_SELECTED)
- cflags = io_put_recv_kbuf(req);
- if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
- req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, cflags);
- return 0;
-}
-
-static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct io_accept *accept = &req->accept;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->ioprio || sqe->len || sqe->buf_index)
- return -EINVAL;
-
- accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
- accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
- accept->flags = READ_ONCE(sqe->accept_flags);
- accept->nofile = rlimit(RLIMIT_NOFILE);
-
- accept->file_slot = READ_ONCE(sqe->file_index);
- if (accept->file_slot && ((req->open.how.flags & O_CLOEXEC) ||
- (accept->flags & SOCK_CLOEXEC)))
- return -EINVAL;
- if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
- return -EINVAL;
- if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
- accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
- return 0;
-}
-
-static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_accept *accept = &req->accept;
- bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
- unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
- bool fixed = !!accept->file_slot;
- struct file *file;
- int ret, fd;
-
- if (req->file->f_flags & O_NONBLOCK)
- req->flags |= REQ_F_NOWAIT;
-
- if (!fixed) {
- fd = __get_unused_fd_flags(accept->flags, accept->nofile);
- if (unlikely(fd < 0))
- return fd;
- }
- file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
- accept->flags);
- if (IS_ERR(file)) {
- if (!fixed)
- put_unused_fd(fd);
- ret = PTR_ERR(file);
- if (ret == -EAGAIN && force_nonblock)
- return -EAGAIN;
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
- req_set_fail(req);
- } else if (!fixed) {
- fd_install(fd, file);
- ret = fd;
- } else {
- ret = io_install_fixed_file(req, file, issue_flags,
- accept->file_slot - 1);
- }
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
-}
-
-static int io_connect_prep_async(struct io_kiocb *req)
-{
- struct io_async_connect *io = req->async_data;
- struct io_connect *conn = &req->connect;
-
- return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
-}
-
-static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct io_connect *conn = &req->connect;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
- sqe->splice_fd_in)
- return -EINVAL;
-
- conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
- conn->addr_len = READ_ONCE(sqe->addr2);
- return 0;
-}
-
-static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_async_connect __io, *io;
- unsigned file_flags;
- int ret;
- bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
-
- if (req_has_async_data(req)) {
- io = req->async_data;
- } else {
- ret = move_addr_to_kernel(req->connect.addr,
- req->connect.addr_len,
- &__io.address);
- if (ret)
- goto out;
- io = &__io;
- }
-
- file_flags = force_nonblock ? O_NONBLOCK : 0;
-
- ret = __sys_connect_file(req->file, &io->address,
- req->connect.addr_len, file_flags);
- if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
- if (req_has_async_data(req))
- return -EAGAIN;
- if (io_alloc_async_data(req)) {
- ret = -ENOMEM;
- goto out;
- }
- memcpy(req->async_data, &__io, sizeof(__io));
- return -EAGAIN;
- }
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
-out:
- if (ret < 0)
- req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
-}
-#else /* !CONFIG_NET */
-#define IO_NETOP_FN(op) \
-static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
-{ \
- return -EOPNOTSUPP; \
-}
-
-#define IO_NETOP_PREP(op) \
-IO_NETOP_FN(op) \
-static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
-{ \
- return -EOPNOTSUPP; \
-} \
-
-#define IO_NETOP_PREP_ASYNC(op) \
-IO_NETOP_PREP(op) \
-static int io_##op##_prep_async(struct io_kiocb *req) \
-{ \
- return -EOPNOTSUPP; \
-}
-
-IO_NETOP_PREP_ASYNC(sendmsg);
-IO_NETOP_PREP_ASYNC(recvmsg);
-IO_NETOP_PREP_ASYNC(connect);
-IO_NETOP_PREP(accept);
-IO_NETOP_FN(send);
-IO_NETOP_FN(recv);
-#endif /* CONFIG_NET */
-
-struct io_poll_table {
- struct poll_table_struct pt;
- struct io_kiocb *req;
- int nr_entries;
- int error;
-};
-
-static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
- __poll_t mask, io_req_tw_func_t func)
-{
- /* for instances that support it check for an event match first: */
- if (mask && !(mask & poll->events))
- return 0;
-
- trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
-
- list_del_init(&poll->wait.entry);
-
- req->result = mask;
- req->io_task_work.func = func;
-
- /*
- * If this fails, then the task is exiting. When a task exits, the
- * work gets canceled, so just cancel this request as well instead
- * of executing it. We can't safely execute it anyway, as we may not
- * have the needed state needed for it anyway.
- */
- io_req_task_work_add(req);
- return 1;
-}
-
-static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
- __acquires(&req->ctx->completion_lock)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- /* req->task == current here, checking PF_EXITING is safe */
- if (unlikely(req->task->flags & PF_EXITING))
- WRITE_ONCE(poll->canceled, true);
-
- if (!req->result && !READ_ONCE(poll->canceled)) {
- struct poll_table_struct pt = { ._key = poll->events };
-
- req->result = vfs_poll(req->file, &pt) & poll->events;
- }
-
- spin_lock(&ctx->completion_lock);
- if (!req->result && !READ_ONCE(poll->canceled)) {
- add_wait_queue(poll->head, &poll->wait);
- return true;
- }
-
- return false;
-}
-
-static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
-{
- /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
- if (req->opcode == IORING_OP_POLL_ADD)
- return req->async_data;
- return req->apoll->double_poll;
-}
-
-static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
-{
- if (req->opcode == IORING_OP_POLL_ADD)
- return &req->poll;
- return &req->apoll->poll;
-}
-
-static void io_poll_remove_double(struct io_kiocb *req)
- __must_hold(&req->ctx->completion_lock)
-{
- struct io_poll_iocb *poll = io_poll_get_double(req);
-
- lockdep_assert_held(&req->ctx->completion_lock);
-
- if (poll && poll->head) {
- struct wait_queue_head *head = poll->head;
-
- spin_lock_irq(&head->lock);
- list_del_init(&poll->wait.entry);
- if (poll->wait.private)
- req_ref_put(req);
- poll->head = NULL;
- spin_unlock_irq(&head->lock);
- }
-}
-
-static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
- __must_hold(&req->ctx->completion_lock)
-{
- struct io_ring_ctx *ctx = req->ctx;
- unsigned flags = IORING_CQE_F_MORE;
- int error;
-
- if (READ_ONCE(req->poll.canceled)) {
- error = -ECANCELED;
- req->poll.events |= EPOLLONESHOT;
- } else {
- error = mangle_poll(mask);
- }
- if (req->poll.events & EPOLLONESHOT)
- flags = 0;
- if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
- req->poll.events |= EPOLLONESHOT;
- flags = 0;
- }
- if (flags & IORING_CQE_F_MORE)
- ctx->cq_extra++;
-
- return !(flags & IORING_CQE_F_MORE);
-}
-
-static void io_poll_task_func(struct io_kiocb *req, bool *locked)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *nxt;
-
- if (io_poll_rewait(req, &req->poll)) {
- spin_unlock(&ctx->completion_lock);
- } else {
- bool done;
-
- if (req->poll.done) {
- spin_unlock(&ctx->completion_lock);
- return;
- }
- done = __io_poll_complete(req, req->result);
- if (done) {
- io_poll_remove_double(req);
- hash_del(&req->hash_node);
- req->poll.done = true;
- } else {
- req->result = 0;
- add_wait_queue(req->poll.head, &req->poll.wait);
- }
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
-
- if (done) {
- nxt = io_put_req_find_next(req);
- if (nxt)
- io_req_task_submit(nxt, locked);
- }
- }
-}
-
-static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
- int sync, void *key)
-{
- struct io_kiocb *req = wait->private;
- struct io_poll_iocb *poll = io_poll_get_single(req);
- __poll_t mask = key_to_poll(key);
- unsigned long flags;
-
- /* for instances that support it check for an event match first: */
- if (mask && !(mask & poll->events))
- return 0;
- if (!(poll->events & EPOLLONESHOT))
- return poll->wait.func(&poll->wait, mode, sync, key);
-
- list_del_init(&wait->entry);
-
- if (poll->head) {
- bool done;
-
- spin_lock_irqsave(&poll->head->lock, flags);
- done = list_empty(&poll->wait.entry);
- if (!done)
- list_del_init(&poll->wait.entry);
- /* make sure double remove sees this as being gone */
- wait->private = NULL;
- spin_unlock_irqrestore(&poll->head->lock, flags);
- if (!done) {
- /* use wait func handler, so it matches the rq type */
- poll->wait.func(&poll->wait, mode, sync, key);
- }
- }
- req_ref_put(req);
- return 1;
-}
-
-static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
- wait_queue_func_t wake_func)
-{
- poll->head = NULL;
- poll->done = false;
- poll->canceled = false;
-#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
- /* mask in events that we always want/need */
- poll->events = events | IO_POLL_UNMASK;
- INIT_LIST_HEAD(&poll->wait.entry);
- init_waitqueue_func_entry(&poll->wait, wake_func);
-}
-
-static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
- struct wait_queue_head *head,
- struct io_poll_iocb **poll_ptr)
-{
- struct io_kiocb *req = pt->req;
-
- /*
- * The file being polled uses multiple waitqueues for poll handling
- * (e.g. one for read, one for write). Setup a separate io_poll_iocb
- * if this happens.
- */
- if (unlikely(pt->nr_entries)) {
- struct io_poll_iocb *poll_one = poll;
-
- /* double add on the same waitqueue head, ignore */
- if (poll_one->head == head)
- return;
- /* already have a 2nd entry, fail a third attempt */
- if (*poll_ptr) {
- if ((*poll_ptr)->head == head)
- return;
- pt->error = -EINVAL;
- return;
- }
- /*
- * Can't handle multishot for double wait for now, turn it
- * into one-shot mode.
- */
- if (!(poll_one->events & EPOLLONESHOT))
- poll_one->events |= EPOLLONESHOT;
- poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
- if (!poll) {
- pt->error = -ENOMEM;
- return;
- }
- io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
- req_ref_get(req);
- poll->wait.private = req;
-
- *poll_ptr = poll;
- if (req->opcode == IORING_OP_POLL_ADD)
- req->flags |= REQ_F_ASYNC_DATA;
- }
-
- pt->nr_entries++;
- poll->head = head;
-
- if (poll->events & EPOLLEXCLUSIVE)
- add_wait_queue_exclusive(head, &poll->wait);
- else
- add_wait_queue(head, &poll->wait);
-}
-
-static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
- struct poll_table_struct *p)
-{
- struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
- struct async_poll *apoll = pt->req->apoll;
-
- __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
-}
-
-static void io_async_task_func(struct io_kiocb *req, bool *locked)
-{
- struct async_poll *apoll = req->apoll;
- struct io_ring_ctx *ctx = req->ctx;
-
- trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
-
- if (io_poll_rewait(req, &apoll->poll)) {
- spin_unlock(&ctx->completion_lock);
- return;
- }
-
- hash_del(&req->hash_node);
- io_poll_remove_double(req);
- apoll->poll.done = true;
- spin_unlock(&ctx->completion_lock);
-
- if (!READ_ONCE(apoll->poll.canceled))
- io_req_task_submit(req, locked);
- else
- io_req_complete_failed(req, -ECANCELED);
-}
-
-static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
- void *key)
-{
- struct io_kiocb *req = wait->private;
- struct io_poll_iocb *poll = &req->apoll->poll;
-
- trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
- key_to_poll(key));
-
- return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
-}
-
-static void io_poll_req_insert(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct hlist_head *list;
-
- list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
- hlist_add_head(&req->hash_node, list);
-}
-
-static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
- struct io_poll_iocb *poll,
- struct io_poll_table *ipt, __poll_t mask,
- wait_queue_func_t wake_func)
- __acquires(&ctx->completion_lock)
-{
- struct io_ring_ctx *ctx = req->ctx;
- bool cancel = false;
-
- INIT_HLIST_NODE(&req->hash_node);
- io_init_poll_iocb(poll, mask, wake_func);
- poll->file = req->file;
- poll->wait.private = req;
-
- ipt->pt._key = mask;
- ipt->req = req;
- ipt->error = 0;
- ipt->nr_entries = 0;
-
- mask = vfs_poll(req->file, &ipt->pt) & poll->events;
- if (unlikely(!ipt->nr_entries) && !ipt->error)
- ipt->error = -EINVAL;
-
- spin_lock(&ctx->completion_lock);
- if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
- io_poll_remove_double(req);
- if (likely(poll->head)) {
- spin_lock_irq(&poll->head->lock);
- if (unlikely(list_empty(&poll->wait.entry))) {
- if (ipt->error)
- cancel = true;
- ipt->error = 0;
- mask = 0;
- }
- if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
- list_del_init(&poll->wait.entry);
- else if (cancel)
- WRITE_ONCE(poll->canceled, true);
- else if (!poll->done) /* actually waiting for an event */
- io_poll_req_insert(req);
- spin_unlock_irq(&poll->head->lock);
- }
-
- return mask;
-}
-
-enum {
- IO_APOLL_OK,
- IO_APOLL_ABORTED,
- IO_APOLL_READY
-};
-
-static int io_arm_poll_handler(struct io_kiocb *req)
-{
- const struct io_op_def *def = &io_op_defs[req->opcode];
- struct io_ring_ctx *ctx = req->ctx;
- struct async_poll *apoll;
- struct io_poll_table ipt;
- __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI;
-
- if (!def->pollin && !def->pollout)
- return IO_APOLL_ABORTED;
- if (!file_can_poll(req->file) || (req->flags & REQ_F_POLLED))
- return IO_APOLL_ABORTED;
-
- if (def->pollin) {
- mask |= POLLIN | POLLRDNORM;
-
- /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
- if ((req->opcode == IORING_OP_RECVMSG) &&
- (req->sr_msg.msg_flags & MSG_ERRQUEUE))
- mask &= ~POLLIN;
- } else {
- mask |= POLLOUT | POLLWRNORM;
- }
-
- apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
- if (unlikely(!apoll))
- return IO_APOLL_ABORTED;
- apoll->double_poll = NULL;
- req->apoll = apoll;
- req->flags |= REQ_F_POLLED;
- ipt.pt._qproc = io_async_queue_proc;
- io_req_set_refcount(req);
-
- ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
- io_async_wake);
- spin_unlock(&ctx->completion_lock);
- if (ret || ipt.error)
- return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
-
- trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
- mask, apoll->poll.events);
- return IO_APOLL_OK;
-}
-
-static bool __io_poll_remove_one(struct io_kiocb *req,
- struct io_poll_iocb *poll, bool do_cancel)
- __must_hold(&req->ctx->completion_lock)
-{
- bool do_complete = false;
-
- if (!poll->head)
- return false;
- spin_lock_irq(&poll->head->lock);
- if (do_cancel)
- WRITE_ONCE(poll->canceled, true);
- if (!list_empty(&poll->wait.entry)) {
- list_del_init(&poll->wait.entry);
- do_complete = true;
- }
- spin_unlock_irq(&poll->head->lock);
- hash_del(&req->hash_node);
- return do_complete;
-}
-
-static bool io_poll_remove_one(struct io_kiocb *req)
- __must_hold(&req->ctx->completion_lock)
-{
- bool do_complete;
-
- io_poll_remove_double(req);
- do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
-
- if (do_complete) {
- io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
- io_commit_cqring(req->ctx);
- req_set_fail(req);
- io_put_req_deferred(req);
- }
- return do_complete;
-}
-
-/*
- * Returns true if we found and killed one or more poll requests
- */
-static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
- struct task_struct *tsk, bool cancel_all)
-{
- struct hlist_node *tmp;
- struct io_kiocb *req;
- int posted = 0, i;
-
- spin_lock(&ctx->completion_lock);
- for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
- struct hlist_head *list;
-
- list = &ctx->cancel_hash[i];
- hlist_for_each_entry_safe(req, tmp, list, hash_node) {
- if (io_match_task(req, tsk, cancel_all))
- posted += io_poll_remove_one(req);
- }
- }
- spin_unlock(&ctx->completion_lock);
-
- if (posted)
- io_cqring_ev_posted(ctx);
-
- return posted != 0;
-}
-
-static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
- bool poll_only)
- __must_hold(&ctx->completion_lock)
-{
- struct hlist_head *list;
- struct io_kiocb *req;
-
- list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
- hlist_for_each_entry(req, list, hash_node) {
- if (sqe_addr != req->user_data)
- continue;
- if (poll_only && req->opcode != IORING_OP_POLL_ADD)
- continue;
- return req;
- }
- return NULL;
-}
-
-static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
- bool poll_only)
- __must_hold(&ctx->completion_lock)
-{
- struct io_kiocb *req;
-
- req = io_poll_find(ctx, sqe_addr, poll_only);
- if (!req)
- return -ENOENT;
- if (io_poll_remove_one(req))
- return 0;
-
- return -EALREADY;
-}
-
-static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
- unsigned int flags)
-{
- u32 events;
-
- events = READ_ONCE(sqe->poll32_events);
-#ifdef __BIG_ENDIAN
- events = swahw32(events);
-#endif
- if (!(flags & IORING_POLL_ADD_MULTI))
- events |= EPOLLONESHOT;
- return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
-}
-
-static int io_poll_update_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- struct io_poll_update *upd = &req->poll_update;
- u32 flags;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
- return -EINVAL;
- flags = READ_ONCE(sqe->len);
- if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
- IORING_POLL_ADD_MULTI))
- return -EINVAL;
- /* meaningless without update */
- if (flags == IORING_POLL_ADD_MULTI)
- return -EINVAL;
-
- upd->old_user_data = READ_ONCE(sqe->addr);
- upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
- upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
-
- upd->new_user_data = READ_ONCE(sqe->off);
- if (!upd->update_user_data && upd->new_user_data)
- return -EINVAL;
- if (upd->update_events)
- upd->events = io_poll_parse_events(sqe, flags);
- else if (sqe->poll32_events)
- return -EINVAL;
-
- return 0;
-}
-
-static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
- void *key)
-{
- struct io_kiocb *req = wait->private;
- struct io_poll_iocb *poll = &req->poll;
-
- return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
-}
-
-static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
- struct poll_table_struct *p)
-{
- struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
-
- __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
-}
-
-static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct io_poll_iocb *poll = &req->poll;
- u32 flags;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
- return -EINVAL;
- flags = READ_ONCE(sqe->len);
- if (flags & ~IORING_POLL_ADD_MULTI)
- return -EINVAL;
-
- io_req_set_refcount(req);
- poll->events = io_poll_parse_events(sqe, flags);
- return 0;
-}
-
-static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_poll_iocb *poll = &req->poll;
- struct io_ring_ctx *ctx = req->ctx;
- struct io_poll_table ipt;
- __poll_t mask;
- bool done;
-
- ipt.pt._qproc = io_poll_queue_proc;
-
- mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
- io_poll_wake);
-
- if (mask) { /* no async, we'd stolen it */
- ipt.error = 0;
- done = __io_poll_complete(req, mask);
- io_commit_cqring(req->ctx);
- }
- spin_unlock(&ctx->completion_lock);
-
- if (mask) {
- io_cqring_ev_posted(ctx);
- if (done)
- io_put_req(req);
- }
- return ipt.error;
-}
-
-static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *preq;
- bool completing;
- int ret;
-
- spin_lock(&ctx->completion_lock);
- preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
- if (!preq) {
- ret = -ENOENT;
- goto err;
- }
-
- if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
- completing = true;
- ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
- goto err;
- }
-
- /*
- * Don't allow racy completion with singleshot, as we cannot safely
- * update those. For multishot, if we're racing with completion, just
- * let completion re-add it.
- */
- completing = !__io_poll_remove_one(preq, &preq->poll, false);
- if (completing && (preq->poll.events & EPOLLONESHOT)) {
- ret = -EALREADY;
- goto err;
- }
- /* we now have a detached poll request. reissue. */
- ret = 0;
-err:
- if (ret < 0) {
- spin_unlock(&ctx->completion_lock);
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
- }
- /* only mask one event flags, keep behavior flags */
- if (req->poll_update.update_events) {
- preq->poll.events &= ~0xffff;
- preq->poll.events |= req->poll_update.events & 0xffff;
- preq->poll.events |= IO_POLL_UNMASK;
- }
- if (req->poll_update.update_user_data)
- preq->user_data = req->poll_update.new_user_data;
- spin_unlock(&ctx->completion_lock);
-
- /* complete update request, we're done with it */
- io_req_complete(req, ret);
-
- if (!completing) {
- ret = io_poll_add(preq, issue_flags);
- if (ret < 0) {
- req_set_fail(preq);
- io_req_complete(preq, ret);
- }
- }
- return 0;
-}
-
-static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
-{
- struct io_timeout_data *data = req->async_data;
-
- if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
- req_set_fail(req);
- io_req_complete_post(req, -ETIME, 0);
-}
-
-static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
-{
- struct io_timeout_data *data = container_of(timer,
- struct io_timeout_data, timer);
- struct io_kiocb *req = data->req;
- struct io_ring_ctx *ctx = req->ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->timeout_lock, flags);
- list_del_init(&req->timeout.list);
- atomic_set(&req->ctx->cq_timeouts,
- atomic_read(&req->ctx->cq_timeouts) + 1);
- spin_unlock_irqrestore(&ctx->timeout_lock, flags);
-
- req->io_task_work.func = io_req_task_timeout;
- io_req_task_work_add(req);
- return HRTIMER_NORESTART;
-}
-
-static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
- __u64 user_data)
- __must_hold(&ctx->timeout_lock)
-{
- struct io_timeout_data *io;
- struct io_kiocb *req;
- bool found = false;
-
- list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
- found = user_data == req->user_data;
- if (found)
- break;
- }
- if (!found)
- return ERR_PTR(-ENOENT);
-
- io = req->async_data;
- if (hrtimer_try_to_cancel(&io->timer) == -1)
- return ERR_PTR(-EALREADY);
- list_del_init(&req->timeout.list);
- return req;
-}
-
-static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
- __must_hold(&ctx->completion_lock)
- __must_hold(&ctx->timeout_lock)
-{
- struct io_kiocb *req = io_timeout_extract(ctx, user_data);
-
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- req_set_fail(req);
- io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
- io_put_req_deferred(req);
- return 0;
-}
-
-static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
-{
- switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
- case IORING_TIMEOUT_BOOTTIME:
- return CLOCK_BOOTTIME;
- case IORING_TIMEOUT_REALTIME:
- return CLOCK_REALTIME;
- default:
- /* can't happen, vetted at prep time */
- WARN_ON_ONCE(1);
- fallthrough;
- case 0:
- return CLOCK_MONOTONIC;
- }
-}
-
-static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
- struct timespec64 *ts, enum hrtimer_mode mode)
- __must_hold(&ctx->timeout_lock)
-{
- struct io_timeout_data *io;
- struct io_kiocb *req;
- bool found = false;
-
- list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
- found = user_data == req->user_data;
- if (found)
- break;
- }
- if (!found)
- return -ENOENT;
-
- io = req->async_data;
- if (hrtimer_try_to_cancel(&io->timer) == -1)
- return -EALREADY;
- hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
- io->timer.function = io_link_timeout_fn;
- hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
- return 0;
-}
-
-static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
- struct timespec64 *ts, enum hrtimer_mode mode)
- __must_hold(&ctx->timeout_lock)
-{
- struct io_kiocb *req = io_timeout_extract(ctx, user_data);
- struct io_timeout_data *data;
-
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- req->timeout.off = 0; /* noseq */
- data = req->async_data;
- list_add_tail(&req->timeout.list, &ctx->timeout_list);
- hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
- data->timer.function = io_timeout_fn;
- hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
- return 0;
-}
-
-static int io_timeout_remove_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- struct io_timeout_rem *tr = &req->timeout_rem;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
- return -EINVAL;
- if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
- return -EINVAL;
-
- tr->ltimeout = false;
- tr->addr = READ_ONCE(sqe->addr);
- tr->flags = READ_ONCE(sqe->timeout_flags);
- if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
- if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
- return -EINVAL;
- if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
- tr->ltimeout = true;
- if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
- return -EINVAL;
- if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
- return -EFAULT;
- } else if (tr->flags) {
- /* timeout removal doesn't support flags */
- return -EINVAL;
- }
-
- return 0;
-}
-
-static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
-{
- return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
- : HRTIMER_MODE_REL;
-}
-
-/*
- * Remove or update an existing timeout command
- */
-static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_timeout_rem *tr = &req->timeout_rem;
- struct io_ring_ctx *ctx = req->ctx;
- int ret;
-
- if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
- spin_lock(&ctx->completion_lock);
- spin_lock_irq(&ctx->timeout_lock);
- ret = io_timeout_cancel(ctx, tr->addr);
- spin_unlock_irq(&ctx->timeout_lock);
- spin_unlock(&ctx->completion_lock);
- } else {
- enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
-
- spin_lock_irq(&ctx->timeout_lock);
- if (tr->ltimeout)
- ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
- else
- ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
- spin_unlock_irq(&ctx->timeout_lock);
- }
-
- if (ret < 0)
- req_set_fail(req);
- io_req_complete_post(req, ret, 0);
- return 0;
-}
-
-static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool is_timeout_link)
-{
- struct io_timeout_data *data;
- unsigned flags;
- u32 off = READ_ONCE(sqe->off);
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
- sqe->splice_fd_in)
- return -EINVAL;
- if (off && is_timeout_link)
- return -EINVAL;
- flags = READ_ONCE(sqe->timeout_flags);
- if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
- IORING_TIMEOUT_ETIME_SUCCESS))
- return -EINVAL;
- /* more than one clock specified is invalid, obviously */
- if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
- return -EINVAL;
-
- INIT_LIST_HEAD(&req->timeout.list);
- req->timeout.off = off;
- if (unlikely(off && !req->ctx->off_timeout_used))
- req->ctx->off_timeout_used = true;
-
- if (WARN_ON_ONCE(req_has_async_data(req)))
- return -EFAULT;
- if (io_alloc_async_data(req))
- return -ENOMEM;
-
- data = req->async_data;
- data->req = req;
- data->flags = flags;
-
- if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
- return -EFAULT;
-
- data->mode = io_translate_timeout_mode(flags);
- hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
-
- if (is_timeout_link) {
- struct io_submit_link *link = &req->ctx->submit_state.link;
-
- if (!link->head)
- return -EINVAL;
- if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
- return -EINVAL;
- req->timeout.head = link->last;
- link->last->flags |= REQ_F_ARM_LTIMEOUT;
- }
- return 0;
-}
-
-static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct io_timeout_data *data = req->async_data;
- struct list_head *entry;
- u32 tail, off = req->timeout.off;
-
- spin_lock_irq(&ctx->timeout_lock);
-
- /*
- * sqe->off holds how many events that need to occur for this
- * timeout event to be satisfied. If it isn't set, then this is
- * a pure timeout request, sequence isn't used.
- */
- if (io_is_timeout_noseq(req)) {
- entry = ctx->timeout_list.prev;
- goto add;
- }
-
- tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
- req->timeout.target_seq = tail + off;
-
- /* Update the last seq here in case io_flush_timeouts() hasn't.
- * This is safe because ->completion_lock is held, and submissions
- * and completions are never mixed in the same ->completion_lock section.
- */
- ctx->cq_last_tm_flush = tail;
-
- /*
- * Insertion sort, ensuring the first entry in the list is always
- * the one we need first.
- */
- list_for_each_prev(entry, &ctx->timeout_list) {
- struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
- timeout.list);
-
- if (io_is_timeout_noseq(nxt))
- continue;
- /* nxt.seq is behind @tail, otherwise would've been completed */
- if (off >= nxt->timeout.target_seq - tail)
- break;
- }
-add:
- list_add(&req->timeout.list, entry);
- data->timer.function = io_timeout_fn;
- hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
- spin_unlock_irq(&ctx->timeout_lock);
- return 0;
-}
-
-struct io_cancel_data {
- struct io_ring_ctx *ctx;
- u64 user_data;
-};
-
-static bool io_cancel_cb(struct io_wq_work *work, void *data)
-{
- struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- struct io_cancel_data *cd = data;
-
- return req->ctx == cd->ctx && req->user_data == cd->user_data;
-}
-
-static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
- struct io_ring_ctx *ctx)
-{
- struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
- enum io_wq_cancel cancel_ret;
- int ret = 0;
-
- if (!tctx || !tctx->io_wq)
- return -ENOENT;
-
- cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
- switch (cancel_ret) {
- case IO_WQ_CANCEL_OK:
- ret = 0;
- break;
- case IO_WQ_CANCEL_RUNNING:
- ret = -EALREADY;
- break;
- case IO_WQ_CANCEL_NOTFOUND:
- ret = -ENOENT;
- break;
- }
-
- return ret;
-}
-
-static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
-{
- struct io_ring_ctx *ctx = req->ctx;
- int ret;
-
- WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
-
- ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
- if (ret != -ENOENT)
- return ret;
-
- spin_lock(&ctx->completion_lock);
- spin_lock_irq(&ctx->timeout_lock);
- ret = io_timeout_cancel(ctx, sqe_addr);
- spin_unlock_irq(&ctx->timeout_lock);
- if (ret != -ENOENT)
- goto out;
- ret = io_poll_cancel(ctx, sqe_addr, false);
-out:
- spin_unlock(&ctx->completion_lock);
- return ret;
-}
-
-static int io_async_cancel_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
- return -EINVAL;
- if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
- sqe->splice_fd_in)
- return -EINVAL;
-
- req->cancel.addr = READ_ONCE(sqe->addr);
- return 0;
-}
-
-static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_ring_ctx *ctx = req->ctx;
- u64 sqe_addr = req->cancel.addr;
- bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
- struct io_tctx_node *node;
- int ret;
-
- ret = io_try_cancel_userdata(req, sqe_addr);
- if (ret != -ENOENT)
- goto done;
-
- /* slow path, try all io-wq's */
- io_ring_submit_lock(ctx, needs_lock);
- ret = -ENOENT;
- list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
- struct io_uring_task *tctx = node->task->io_uring;
-
- ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
- if (ret != -ENOENT)
- break;
- }
- io_ring_submit_unlock(ctx, needs_lock);
-done:
- if (ret < 0)
- req_set_fail(req);
- io_req_complete_post(req, ret, 0);
- return 0;
-}
-
-static int io_rsrc_update_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
- return -EINVAL;
- if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
- return -EINVAL;
-
- req->rsrc_update.offset = READ_ONCE(sqe->off);
- req->rsrc_update.nr_args = READ_ONCE(sqe->len);
- if (!req->rsrc_update.nr_args)
- return -EINVAL;
- req->rsrc_update.arg = READ_ONCE(sqe->addr);
- return 0;
-}
-
-static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_ring_ctx *ctx = req->ctx;
- bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
- struct io_uring_rsrc_update2 up;
- int ret;
-
- up.offset = req->rsrc_update.offset;
- up.data = req->rsrc_update.arg;
- up.nr = 0;
- up.tags = 0;
- up.resv = 0;
-
- io_ring_submit_lock(ctx, needs_lock);
- ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
- &up, req->rsrc_update.nr_args);
- io_ring_submit_unlock(ctx, needs_lock);
-
- if (ret < 0)
- req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, 0);
- return 0;
-}
-
-static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- switch (req->opcode) {
- case IORING_OP_NOP:
- return 0;
- case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- case IORING_OP_READ:
- return io_read_prep(req, sqe);
- case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- case IORING_OP_WRITE:
- return io_write_prep(req, sqe);
- case IORING_OP_POLL_ADD:
- return io_poll_add_prep(req, sqe);
- case IORING_OP_POLL_REMOVE:
- return io_poll_update_prep(req, sqe);
- case IORING_OP_FSYNC:
- return io_fsync_prep(req, sqe);
- case IORING_OP_SYNC_FILE_RANGE:
- return io_sfr_prep(req, sqe);
- case IORING_OP_SENDMSG:
- case IORING_OP_SEND:
- return io_sendmsg_prep(req, sqe);
- case IORING_OP_RECVMSG:
- case IORING_OP_RECV:
- return io_recvmsg_prep(req, sqe);
- case IORING_OP_CONNECT:
- return io_connect_prep(req, sqe);
- case IORING_OP_TIMEOUT:
- return io_timeout_prep(req, sqe, false);
- case IORING_OP_TIMEOUT_REMOVE:
- return io_timeout_remove_prep(req, sqe);
- case IORING_OP_ASYNC_CANCEL:
- return io_async_cancel_prep(req, sqe);
- case IORING_OP_LINK_TIMEOUT:
- return io_timeout_prep(req, sqe, true);
- case IORING_OP_ACCEPT:
- return io_accept_prep(req, sqe);
- case IORING_OP_FALLOCATE:
- return io_fallocate_prep(req, sqe);
- case IORING_OP_OPENAT:
- return io_openat_prep(req, sqe);
- case IORING_OP_CLOSE:
- return io_close_prep(req, sqe);
- case IORING_OP_FILES_UPDATE:
- return io_rsrc_update_prep(req, sqe);
- case IORING_OP_STATX:
- return io_statx_prep(req, sqe);
- case IORING_OP_FADVISE:
- return io_fadvise_prep(req, sqe);
- case IORING_OP_MADVISE:
- return io_madvise_prep(req, sqe);
- case IORING_OP_OPENAT2:
- return io_openat2_prep(req, sqe);
- case IORING_OP_EPOLL_CTL:
- return io_epoll_ctl_prep(req, sqe);
- case IORING_OP_SPLICE:
- return io_splice_prep(req, sqe);
- case IORING_OP_PROVIDE_BUFFERS:
- return io_provide_buffers_prep(req, sqe);
- case IORING_OP_REMOVE_BUFFERS:
- return io_remove_buffers_prep(req, sqe);
- case IORING_OP_TEE:
- return io_tee_prep(req, sqe);
- case IORING_OP_SHUTDOWN:
- return io_shutdown_prep(req, sqe);
- case IORING_OP_RENAMEAT:
- return io_renameat_prep(req, sqe);
- case IORING_OP_UNLINKAT:
- return io_unlinkat_prep(req, sqe);
- case IORING_OP_MKDIRAT:
- return io_mkdirat_prep(req, sqe);
- case IORING_OP_SYMLINKAT:
- return io_symlinkat_prep(req, sqe);
- case IORING_OP_LINKAT:
- return io_linkat_prep(req, sqe);
- }
-
- printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
- req->opcode);
- return -EINVAL;
-}
-
-static int io_req_prep_async(struct io_kiocb *req)
-{
- if (!io_op_defs[req->opcode].needs_async_setup)
- return 0;
- if (WARN_ON_ONCE(req_has_async_data(req)))
- return -EFAULT;
- if (io_alloc_async_data(req))
- return -EAGAIN;
-
- switch (req->opcode) {
- case IORING_OP_READV:
- return io_rw_prep_async(req, READ);
- case IORING_OP_WRITEV:
- return io_rw_prep_async(req, WRITE);
- case IORING_OP_SENDMSG:
- return io_sendmsg_prep_async(req);
- case IORING_OP_RECVMSG:
- return io_recvmsg_prep_async(req);
- case IORING_OP_CONNECT:
- return io_connect_prep_async(req);
- }
- printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
- req->opcode);
- return -EFAULT;
-}
-
-static u32 io_get_sequence(struct io_kiocb *req)
-{
- u32 seq = req->ctx->cached_sq_head;
-
- /* need original cached_sq_head, but it was increased for each req */
- io_for_each_link(req, req)
- seq--;
- return seq;
-}
-
-static __cold void io_drain_req(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct io_defer_entry *de;
- int ret;
- u32 seq = io_get_sequence(req);
-
- /* Still need defer if there is pending req in defer list. */
- if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
-queue:
- ctx->drain_active = false;
- io_req_task_queue(req);
- return;
- }
-
- ret = io_req_prep_async(req);
- if (ret) {
-fail:
- io_req_complete_failed(req, ret);
- return;
- }
- io_prep_async_link(req);
- de = kmalloc(sizeof(*de), GFP_KERNEL);
- if (!de) {
- ret = -ENOMEM;
- goto fail;
- }
-
- spin_lock(&ctx->completion_lock);
- if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
- spin_unlock(&ctx->completion_lock);
- kfree(de);
- goto queue;
- }
-
- trace_io_uring_defer(ctx, req, req->user_data);
- de->req = req;
- de->seq = seq;
- list_add_tail(&de->list, &ctx->defer_list);
- spin_unlock(&ctx->completion_lock);
-}
-
-static void io_clean_op(struct io_kiocb *req)
-{
- if (req->flags & REQ_F_BUFFER_SELECTED) {
- kfree(req->kbuf);
- req->kbuf = NULL;
- }
-
- if (req->flags & REQ_F_NEED_CLEANUP) {
- switch (req->opcode) {
- case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- case IORING_OP_READ:
- case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- case IORING_OP_WRITE: {
- struct io_async_rw *io = req->async_data;
-
- kfree(io->free_iovec);
- break;
- }
- case IORING_OP_RECVMSG:
- case IORING_OP_SENDMSG: {
- struct io_async_msghdr *io = req->async_data;
-
- kfree(io->free_iov);
- break;
- }
- case IORING_OP_SPLICE:
- case IORING_OP_TEE:
- if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
- io_put_file(req->splice.file_in);
- break;
- case IORING_OP_OPENAT:
- case IORING_OP_OPENAT2:
- if (req->open.filename)
- putname(req->open.filename);
- break;
- case IORING_OP_RENAMEAT:
- putname(req->rename.oldpath);
- putname(req->rename.newpath);
- break;
- case IORING_OP_UNLINKAT:
- putname(req->unlink.filename);
- break;
- case IORING_OP_MKDIRAT:
- putname(req->mkdir.filename);
- break;
- case IORING_OP_SYMLINKAT:
- putname(req->symlink.oldpath);
- putname(req->symlink.newpath);
- break;
- case IORING_OP_LINKAT:
- putname(req->hardlink.oldpath);
- putname(req->hardlink.newpath);
- break;
- }
- }
- if ((req->flags & REQ_F_POLLED) && req->apoll) {
- kfree(req->apoll->double_poll);
- kfree(req->apoll);
- req->apoll = NULL;
- }
- if (req->flags & REQ_F_INFLIGHT) {
- struct io_uring_task *tctx = req->task->io_uring;
-
- atomic_dec(&tctx->inflight_tracked);
- }
- if (req->flags & REQ_F_CREDS)
- put_cred(req->creds);
- if (req->flags & REQ_F_ASYNC_DATA) {
- kfree(req->async_data);
- req->async_data = NULL;
- }
- req->flags &= ~IO_REQ_CLEAN_FLAGS;
-}
-
-static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
-{
- const struct cred *creds = NULL;
- int ret;
-
- if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
- creds = override_creds(req->creds);
-
- if (!io_op_defs[req->opcode].audit_skip)
- audit_uring_entry(req->opcode);
-
- switch (req->opcode) {
- case IORING_OP_NOP:
- ret = io_nop(req, issue_flags);
- break;
- case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- case IORING_OP_READ:
- ret = io_read(req, issue_flags);
- break;
- case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- case IORING_OP_WRITE:
- ret = io_write(req, issue_flags);
- break;
- case IORING_OP_FSYNC:
- ret = io_fsync(req, issue_flags);
- break;
- case IORING_OP_POLL_ADD:
- ret = io_poll_add(req, issue_flags);
- break;
- case IORING_OP_POLL_REMOVE:
- ret = io_poll_update(req, issue_flags);
- break;
- case IORING_OP_SYNC_FILE_RANGE:
- ret = io_sync_file_range(req, issue_flags);
- break;
- case IORING_OP_SENDMSG:
- ret = io_sendmsg(req, issue_flags);
- break;
- case IORING_OP_SEND:
- ret = io_send(req, issue_flags);
- break;
- case IORING_OP_RECVMSG:
- ret = io_recvmsg(req, issue_flags);
- break;
- case IORING_OP_RECV:
- ret = io_recv(req, issue_flags);
- break;
- case IORING_OP_TIMEOUT:
- ret = io_timeout(req, issue_flags);
- break;
- case IORING_OP_TIMEOUT_REMOVE:
- ret = io_timeout_remove(req, issue_flags);
- break;
- case IORING_OP_ACCEPT:
- ret = io_accept(req, issue_flags);
- break;
- case IORING_OP_CONNECT:
- ret = io_connect(req, issue_flags);
- break;
- case IORING_OP_ASYNC_CANCEL:
- ret = io_async_cancel(req, issue_flags);
- break;
- case IORING_OP_FALLOCATE:
- ret = io_fallocate(req, issue_flags);
- break;
- case IORING_OP_OPENAT:
- ret = io_openat(req, issue_flags);
- break;
- case IORING_OP_CLOSE:
- ret = io_close(req, issue_flags);
- break;
- case IORING_OP_FILES_UPDATE:
- ret = io_files_update(req, issue_flags);
- break;
- case IORING_OP_STATX:
- ret = io_statx(req, issue_flags);
- break;
- case IORING_OP_FADVISE:
- ret = io_fadvise(req, issue_flags);
- break;
- case IORING_OP_MADVISE:
- ret = io_madvise(req, issue_flags);
- break;
- case IORING_OP_OPENAT2:
- ret = io_openat2(req, issue_flags);
- break;
- case IORING_OP_EPOLL_CTL:
- ret = io_epoll_ctl(req, issue_flags);
- break;
- case IORING_OP_SPLICE:
- ret = io_splice(req, issue_flags);
- break;
- case IORING_OP_PROVIDE_BUFFERS:
- ret = io_provide_buffers(req, issue_flags);
- break;
- case IORING_OP_REMOVE_BUFFERS:
- ret = io_remove_buffers(req, issue_flags);
- break;
- case IORING_OP_TEE:
- ret = io_tee(req, issue_flags);
- break;
- case IORING_OP_SHUTDOWN:
- ret = io_shutdown(req, issue_flags);
- break;
- case IORING_OP_RENAMEAT:
- ret = io_renameat(req, issue_flags);
- break;
- case IORING_OP_UNLINKAT:
- ret = io_unlinkat(req, issue_flags);
- break;
- case IORING_OP_MKDIRAT:
- ret = io_mkdirat(req, issue_flags);
- break;
- case IORING_OP_SYMLINKAT:
- ret = io_symlinkat(req, issue_flags);
- break;
- case IORING_OP_LINKAT:
- ret = io_linkat(req, issue_flags);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (!io_op_defs[req->opcode].audit_skip)
- audit_uring_exit(!ret, ret);
-
- if (creds)
- revert_creds(creds);
- if (ret)
- return ret;
- /* If the op doesn't have a file, we're not polling for it */
- if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file)
- io_iopoll_req_issued(req, issue_flags);
-
- return 0;
-}
-
-static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
-{
- struct io_kiocb *req = container_of(work, struct io_kiocb, work);
-
- req = io_put_req_find_next(req);
- return req ? &req->work : NULL;
-}
-
-static void io_wq_submit_work(struct io_wq_work *work)
-{
- struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- unsigned int issue_flags = IO_URING_F_UNLOCKED;
- bool needs_poll = false;
- struct io_kiocb *timeout;
- int ret = 0;
-
- /* one will be dropped by ->io_free_work() after returning to io-wq */
- if (!(req->flags & REQ_F_REFCOUNT))
- __io_req_set_refcount(req, 2);
- else
- req_ref_get(req);
-
- timeout = io_prep_linked_timeout(req);
- if (timeout)
- io_queue_linked_timeout(timeout);
-
- /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
- if (work->flags & IO_WQ_WORK_CANCEL) {
- io_req_task_queue_fail(req, -ECANCELED);
- return;
- }
-
- if (req->flags & REQ_F_FORCE_ASYNC) {
- const struct io_op_def *def = &io_op_defs[req->opcode];
- bool opcode_poll = def->pollin || def->pollout;
-
- if (opcode_poll && file_can_poll(req->file)) {
- needs_poll = true;
- issue_flags |= IO_URING_F_NONBLOCK;
- }
- }
-
- do {
- ret = io_issue_sqe(req, issue_flags);
- if (ret != -EAGAIN)
- break;
- /*
- * We can get EAGAIN for iopolled IO even though we're
- * forcing a sync submission from here, since we can't
- * wait for request slots on the block side.
- */
- if (!needs_poll) {
- cond_resched();
- continue;
- }
-
- if (io_arm_poll_handler(req) == IO_APOLL_OK)
- return;
- /* aborted or ready, in either case retry blocking */
- needs_poll = false;
- issue_flags &= ~IO_URING_F_NONBLOCK;
- } while (1);
-
- /* avoid locking problems by failing it from a clean context */
- if (ret)
- io_req_task_queue_fail(req, ret);
-}
-
-static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
- unsigned i)
-{
- return &table->files[i];
-}
-
-static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
- int index)
-{
- struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
-
- return (struct file *) (slot->file_ptr & FFS_MASK);
-}
-
-static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
-{
- unsigned long file_ptr = (unsigned long) file;
-
- file_ptr |= io_file_get_flags(file);
- file_slot->file_ptr = file_ptr;
-}
-
-static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
- struct io_kiocb *req, int fd)
-{
- struct file *file;
- unsigned long file_ptr;
-
- if (unlikely((unsigned int)fd >= ctx->nr_user_files))
- return NULL;
- fd = array_index_nospec(fd, ctx->nr_user_files);
- file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
- file = (struct file *) (file_ptr & FFS_MASK);
- file_ptr &= ~FFS_MASK;
- /* mask in overlapping REQ_F and FFS bits */
- req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
- io_req_set_rsrc_node(req, ctx);
- return file;
-}
-
-static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
- struct io_kiocb *req, int fd)
-{
- struct file *file = fget(fd);
-
- trace_io_uring_file_get(ctx, fd);
-
- /* we don't allow fixed io_uring files */
- if (file && unlikely(file->f_op == &io_uring_fops))
- io_req_track_inflight(req);
- return file;
-}
-
-static inline struct file *io_file_get(struct io_ring_ctx *ctx,
- struct io_kiocb *req, int fd, bool fixed)
-{
- if (fixed)
- return io_file_get_fixed(ctx, req, fd);
- else
- return io_file_get_normal(ctx, req, fd);
-}
-
-static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
-{
- struct io_kiocb *prev = req->timeout.prev;
- int ret;
-
- if (prev) {
- ret = io_try_cancel_userdata(req, prev->user_data);
- io_req_complete_post(req, ret ?: -ETIME, 0);
- io_put_req(prev);
- } else {
- io_req_complete_post(req, -ETIME, 0);
- }
-}
-
-static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
-{
- struct io_timeout_data *data = container_of(timer,
- struct io_timeout_data, timer);
- struct io_kiocb *prev, *req = data->req;
- struct io_ring_ctx *ctx = req->ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->timeout_lock, flags);
- prev = req->timeout.head;
- req->timeout.head = NULL;
-
- /*
- * We don't expect the list to be empty, that will only happen if we
- * race with the completion of the linked work.
- */
- if (prev) {
- io_remove_next_linked(prev);
- if (!req_ref_inc_not_zero(prev))
- prev = NULL;
- }
- list_del(&req->timeout.list);
- req->timeout.prev = prev;
- spin_unlock_irqrestore(&ctx->timeout_lock, flags);
-
- req->io_task_work.func = io_req_task_link_timeout;
- io_req_task_work_add(req);
- return HRTIMER_NORESTART;
-}
-
-static void io_queue_linked_timeout(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- spin_lock_irq(&ctx->timeout_lock);
- /*
- * If the back reference is NULL, then our linked request finished
- * before we got a chance to setup the timer
- */
- if (req->timeout.head) {
- struct io_timeout_data *data = req->async_data;
-
- data->timer.function = io_link_timeout_fn;
- hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
- data->mode);
- list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
- }
- spin_unlock_irq(&ctx->timeout_lock);
- /* drop submission reference */
- io_put_req(req);
-}
-
-static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
- __must_hold(&req->ctx->uring_lock)
-{
- struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
-
- switch (io_arm_poll_handler(req)) {
- case IO_APOLL_READY:
- io_req_task_queue(req);
- break;
- case IO_APOLL_ABORTED:
- /*
- * Queued up for async execution, worker will release
- * submit reference when the iocb is actually submitted.
- */
- io_queue_async_work(req, NULL);
- break;
- }
-
- if (linked_timeout)
- io_queue_linked_timeout(linked_timeout);
-}
-
-static inline void __io_queue_sqe(struct io_kiocb *req)
- __must_hold(&req->ctx->uring_lock)
-{
- struct io_kiocb *linked_timeout;
- int ret;
-
- ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
-
- if (req->flags & REQ_F_COMPLETE_INLINE) {
- io_req_add_compl_list(req);
- return;
- }
- /*
- * We async punt it if the file wasn't marked NOWAIT, or if the file
- * doesn't support non-blocking read/write attempts
- */
- if (likely(!ret)) {
- linked_timeout = io_prep_linked_timeout(req);
- if (linked_timeout)
- io_queue_linked_timeout(linked_timeout);
- } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
- io_queue_sqe_arm_apoll(req);
- } else {
- io_req_complete_failed(req, ret);
- }
-}
-
-static void io_queue_sqe_fallback(struct io_kiocb *req)
- __must_hold(&req->ctx->uring_lock)
-{
- if (req->flags & REQ_F_FAIL) {
- io_req_complete_fail_submit(req);
- } else if (unlikely(req->ctx->drain_active)) {
- io_drain_req(req);
- } else {
- int ret = io_req_prep_async(req);
-
- if (unlikely(ret))
- io_req_complete_failed(req, ret);
- else
- io_queue_async_work(req, NULL);
- }
-}
-
-static inline void io_queue_sqe(struct io_kiocb *req)
- __must_hold(&req->ctx->uring_lock)
-{
- if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))))
- __io_queue_sqe(req);
- else
- io_queue_sqe_fallback(req);
-}
-
-/*
- * Check SQE restrictions (opcode and flags).
- *
- * Returns 'true' if SQE is allowed, 'false' otherwise.
- */
-static inline bool io_check_restriction(struct io_ring_ctx *ctx,
- struct io_kiocb *req,
- unsigned int sqe_flags)
-{
- if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
- return false;
-
- if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
- ctx->restrictions.sqe_flags_required)
- return false;
-
- if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
- ctx->restrictions.sqe_flags_required))
- return false;
-
- return true;
-}
-
-static void io_init_req_drain(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *head = ctx->submit_state.link.head;
-
- ctx->drain_active = true;
- if (head) {
- /*
- * If we need to drain a request in the middle of a link, drain
- * the head request and the next request/link after the current
- * link. Considering sequential execution of links,
- * IOSQE_IO_DRAIN will be maintained for every request of our
- * link.
- */
- head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
- ctx->drain_next = true;
- }
-}
-
-static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
- __must_hold(&ctx->uring_lock)
-{
- unsigned int sqe_flags;
- int personality;
- u8 opcode;
-
- /* req is partially pre-initialised, see io_preinit_req() */
- req->opcode = opcode = READ_ONCE(sqe->opcode);
- /* same numerical values with corresponding REQ_F_*, safe to copy */
- req->flags = sqe_flags = READ_ONCE(sqe->flags);
- req->user_data = READ_ONCE(sqe->user_data);
- req->file = NULL;
- req->fixed_rsrc_refs = NULL;
- req->task = current;
-
- if (unlikely(opcode >= IORING_OP_LAST)) {
- req->opcode = 0;
- return -EINVAL;
- }
- if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
- /* enforce forwards compatibility on users */
- if (sqe_flags & ~SQE_VALID_FLAGS)
- return -EINVAL;
- if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
- !io_op_defs[opcode].buffer_select)
- return -EOPNOTSUPP;
- if (sqe_flags & IOSQE_IO_DRAIN)
- io_init_req_drain(req);
- }
- if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
- if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
- return -EACCES;
- /* knock it to the slow queue path, will be drained there */
- if (ctx->drain_active)
- req->flags |= REQ_F_FORCE_ASYNC;
- /* if there is no link, we're at "next" request and need to drain */
- if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
- ctx->drain_next = false;
- ctx->drain_active = true;
- req->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
- }
- }
-
- if (io_op_defs[opcode].needs_file) {
- struct io_submit_state *state = &ctx->submit_state;
-
- /*
- * Plug now if we have more than 2 IO left after this, and the
- * target is potentially a read/write to block based storage.
- */
- if (state->need_plug && io_op_defs[opcode].plug) {
- state->plug_started = true;
- state->need_plug = false;
- blk_start_plug_nr_ios(&state->plug, state->submit_nr);
- }
-
- req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
- (sqe_flags & IOSQE_FIXED_FILE));
- if (unlikely(!req->file))
- return -EBADF;
- }
-
- personality = READ_ONCE(sqe->personality);
- if (personality) {
- int ret;
-
- req->creds = xa_load(&ctx->personalities, personality);
- if (!req->creds)
- return -EINVAL;
- get_cred(req->creds);
- ret = security_uring_override_creds(req->creds);
- if (ret) {
- put_cred(req->creds);
- return ret;
- }
- req->flags |= REQ_F_CREDS;
- }
-
- return io_req_prep(req, sqe);
-}
-
-static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
- __must_hold(&ctx->uring_lock)
-{
- struct io_submit_link *link = &ctx->submit_state.link;
- int ret;
-
- ret = io_init_req(ctx, req, sqe);
- if (unlikely(ret)) {
- trace_io_uring_req_failed(sqe, ret);
-
- /* fail even hard links since we don't submit */
- if (link->head) {
- /*
- * we can judge a link req is failed or cancelled by if
- * REQ_F_FAIL is set, but the head is an exception since
- * it may be set REQ_F_FAIL because of other req's failure
- * so let's leverage req->result to distinguish if a head
- * is set REQ_F_FAIL because of its failure or other req's
- * failure so that we can set the correct ret code for it.
- * init result here to avoid affecting the normal path.
- */
- if (!(link->head->flags & REQ_F_FAIL))
- req_fail_link_node(link->head, -ECANCELED);
- } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
- /*
- * the current req is a normal req, we should return
- * error and thus break the submittion loop.
- */
- io_req_complete_failed(req, ret);
- return ret;
- }
- req_fail_link_node(req, ret);
- }
-
- /* don't need @sqe from now on */
- trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
- req->flags, true,
- ctx->flags & IORING_SETUP_SQPOLL);
-
- /*
- * If we already have a head request, queue this one for async
- * submittal once the head completes. If we don't have a head but
- * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
- * submitted sync once the chain is complete. If none of those
- * conditions are true (normal request), then just queue it.
- */
- if (link->head) {
- struct io_kiocb *head = link->head;
-
- if (!(req->flags & REQ_F_FAIL)) {
- ret = io_req_prep_async(req);
- if (unlikely(ret)) {
- req_fail_link_node(req, ret);
- if (!(head->flags & REQ_F_FAIL))
- req_fail_link_node(head, -ECANCELED);
- }
- }
- trace_io_uring_link(ctx, req, head);
- link->last->link = req;
- link->last = req;
-
- if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
- return 0;
- /* last request of a link, enqueue the link */
- link->head = NULL;
- req = head;
- } else if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
- link->head = req;
- link->last = req;
- return 0;
- }
-
- io_queue_sqe(req);
- return 0;
-}
-
-/*
- * Batched submission is done, ensure local IO is flushed out.
- */
-static void io_submit_state_end(struct io_ring_ctx *ctx)
-{
- struct io_submit_state *state = &ctx->submit_state;
-
- if (state->link.head)
- io_queue_sqe(state->link.head);
- /* flush only after queuing links as they can generate completions */
- io_submit_flush_completions(ctx);
- if (state->plug_started)
- blk_finish_plug(&state->plug);
-}
-
-/*
- * Start submission side cache.
- */
-static void io_submit_state_start(struct io_submit_state *state,
- unsigned int max_ios)
-{
- state->plug_started = false;
- state->need_plug = max_ios > 2;
- state->submit_nr = max_ios;
- /* set only head, no need to init link_last in advance */
- state->link.head = NULL;
-}
-
-static void io_commit_sqring(struct io_ring_ctx *ctx)
-{
- struct io_rings *rings = ctx->rings;
-
- /*
- * Ensure any loads from the SQEs are done at this point,
- * since once we write the new head, the application could
- * write new data to them.
- */
- smp_store_release(&rings->sq.head, ctx->cached_sq_head);
-}
-
-/*
- * Fetch an sqe, if one is available. Note this returns a pointer to memory
- * that is mapped by userspace. This means that care needs to be taken to
- * ensure that reads are stable, as we cannot rely on userspace always
- * being a good citizen. If members of the sqe are validated and then later
- * used, it's important that those reads are done through READ_ONCE() to
- * prevent a re-load down the line.
- */
-static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
-{
- unsigned head, mask = ctx->sq_entries - 1;
- unsigned sq_idx = ctx->cached_sq_head++ & mask;
-
- /*
- * The cached sq head (or cq tail) serves two purposes:
- *
- * 1) allows us to batch the cost of updating the user visible
- * head updates.
- * 2) allows the kernel side to track the head on its own, even
- * though the application is the one updating it.
- */
- head = READ_ONCE(ctx->sq_array[sq_idx]);
- if (likely(head < ctx->sq_entries))
- return &ctx->sq_sqes[head];
-
- /* drop invalid entries */
- ctx->cq_extra--;
- WRITE_ONCE(ctx->rings->sq_dropped,
- READ_ONCE(ctx->rings->sq_dropped) + 1);
- return NULL;
-}
-
-static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
- __must_hold(&ctx->uring_lock)
-{
- unsigned int entries = io_sqring_entries(ctx);
- int submitted = 0;
-
- if (unlikely(!entries))
- return 0;
- /* make sure SQ entry isn't read before tail */
- nr = min3(nr, ctx->sq_entries, entries);
- io_get_task_refs(nr);
-
- io_submit_state_start(&ctx->submit_state, nr);
- do {
- const struct io_uring_sqe *sqe;
- struct io_kiocb *req;
-
- if (unlikely(!io_alloc_req_refill(ctx))) {
- if (!submitted)
- submitted = -EAGAIN;
- break;
- }
- req = io_alloc_req(ctx);
- sqe = io_get_sqe(ctx);
- if (unlikely(!sqe)) {
- wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
- break;
- }
- /* will complete beyond this point, count as submitted */
- submitted++;
- if (io_submit_sqe(ctx, req, sqe))
- break;
- } while (submitted < nr);
-
- if (unlikely(submitted != nr)) {
- int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
- int unused = nr - ref_used;
-
- current->io_uring->cached_refs += unused;
- }
-
- io_submit_state_end(ctx);
- /* Commit SQ ring head once we've consumed and submitted all SQEs */
- io_commit_sqring(ctx);
-
- return submitted;
-}
-
-static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
-{
- return READ_ONCE(sqd->state);
-}
-
-static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
-{
- /* Tell userspace we may need a wakeup call */
- spin_lock(&ctx->completion_lock);
- WRITE_ONCE(ctx->rings->sq_flags,
- ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
- spin_unlock(&ctx->completion_lock);
-}
-
-static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
-{
- spin_lock(&ctx->completion_lock);
- WRITE_ONCE(ctx->rings->sq_flags,
- ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
- spin_unlock(&ctx->completion_lock);
-}
-
-static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
-{
- unsigned int to_submit;
- int ret = 0;
-
- to_submit = io_sqring_entries(ctx);
- /* if we're handling multiple rings, cap submit size for fairness */
- if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
- to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
-
- if (!wq_list_empty(&ctx->iopoll_list) || to_submit) {
- const struct cred *creds = NULL;
-
- if (ctx->sq_creds != current_cred())
- creds = override_creds(ctx->sq_creds);
-
- mutex_lock(&ctx->uring_lock);
- if (!wq_list_empty(&ctx->iopoll_list))
- io_do_iopoll(ctx, true);
-
- /*
- * Don't submit if refs are dying, good for io_uring_register(),
- * but also it is relied upon by io_ring_exit_work()
- */
- if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
- !(ctx->flags & IORING_SETUP_R_DISABLED))
- ret = io_submit_sqes(ctx, to_submit);
- mutex_unlock(&ctx->uring_lock);
-
- if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
- wake_up(&ctx->sqo_sq_wait);
- if (creds)
- revert_creds(creds);
- }
-
- return ret;
-}
-
-static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
-{
- struct io_ring_ctx *ctx;
- unsigned sq_thread_idle = 0;
-
- list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
- sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
- sqd->sq_thread_idle = sq_thread_idle;
-}
-
-static bool io_sqd_handle_event(struct io_sq_data *sqd)
-{
- bool did_sig = false;
- struct ksignal ksig;
-
- if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
- signal_pending(current)) {
- mutex_unlock(&sqd->lock);
- if (signal_pending(current))
- did_sig = get_signal(&ksig);
- cond_resched();
- mutex_lock(&sqd->lock);
- }
- return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
-}
-
-static int io_sq_thread(void *data)
-{
- struct io_sq_data *sqd = data;
- struct io_ring_ctx *ctx;
- unsigned long timeout = 0;
- char buf[TASK_COMM_LEN];
- DEFINE_WAIT(wait);
-
- snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
- set_task_comm(current, buf);
-
- if (sqd->sq_cpu != -1)
- set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
- else
- set_cpus_allowed_ptr(current, cpu_online_mask);
- current->flags |= PF_NO_SETAFFINITY;
-
- audit_alloc_kernel(current);
-
- mutex_lock(&sqd->lock);
- while (1) {
- bool cap_entries, sqt_spin = false;
-
- if (io_sqd_events_pending(sqd) || signal_pending(current)) {
- if (io_sqd_handle_event(sqd))
- break;
- timeout = jiffies + sqd->sq_thread_idle;
- }
-
- cap_entries = !list_is_singular(&sqd->ctx_list);
- list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
- int ret = __io_sq_thread(ctx, cap_entries);
-
- if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
- sqt_spin = true;
- }
- if (io_run_task_work())
- sqt_spin = true;
-
- if (sqt_spin || !time_after(jiffies, timeout)) {
- cond_resched();
- if (sqt_spin)
- timeout = jiffies + sqd->sq_thread_idle;
- continue;
- }
-
- prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
- if (!io_sqd_events_pending(sqd) && !current->task_works) {
- bool needs_sched = true;
-
- list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
- io_ring_set_wakeup_flag(ctx);
-
- if ((ctx->flags & IORING_SETUP_IOPOLL) &&
- !wq_list_empty(&ctx->iopoll_list)) {
- needs_sched = false;
- break;
- }
- if (io_sqring_entries(ctx)) {
- needs_sched = false;
- break;
- }
- }
-
- if (needs_sched) {
- mutex_unlock(&sqd->lock);
- schedule();
- mutex_lock(&sqd->lock);
- }
- list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
- io_ring_clear_wakeup_flag(ctx);
- }
-
- finish_wait(&sqd->wait, &wait);
- timeout = jiffies + sqd->sq_thread_idle;
- }
-
- io_uring_cancel_generic(true, sqd);
- sqd->thread = NULL;
- list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
- io_ring_set_wakeup_flag(ctx);
- io_run_task_work();
- mutex_unlock(&sqd->lock);
-
- audit_free(current);
-
- complete(&sqd->exited);
- do_exit(0);
-}
-
-struct io_wait_queue {
- struct wait_queue_entry wq;
- struct io_ring_ctx *ctx;
- unsigned cq_tail;
- unsigned nr_timeouts;
-};
-
-static inline bool io_should_wake(struct io_wait_queue *iowq)
-{
- struct io_ring_ctx *ctx = iowq->ctx;
- int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
-
- /*
- * Wake up if we have enough events, or if a timeout occurred since we
- * started waiting. For timeouts, we always want to return to userspace,
- * regardless of event count.
- */
- return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
-}
-
-static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
- int wake_flags, void *key)
-{
- struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
- wq);
-
- /*
- * Cannot safely flush overflowed CQEs from here, ensure we wake up
- * the task, and the next invocation will do it.
- */
- if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
- return autoremove_wake_function(curr, mode, wake_flags, key);
- return -1;
-}
-
-static int io_run_task_work_sig(void)
-{
- if (io_run_task_work())
- return 1;
- if (!signal_pending(current))
- return 0;
- if (test_thread_flag(TIF_NOTIFY_SIGNAL))
- return -ERESTARTSYS;
- return -EINTR;
-}
-
-/* when returns >0, the caller should retry */
-static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
- struct io_wait_queue *iowq,
- signed long *timeout)
-{
- int ret;
-
- /* make sure we run task_work before checking for signals */
- ret = io_run_task_work_sig();
- if (ret || io_should_wake(iowq))
- return ret;
- /* let the caller flush overflows, retry */
- if (test_bit(0, &ctx->check_cq_overflow))
- return 1;
-
- *timeout = schedule_timeout(*timeout);
- return !*timeout ? -ETIME : 1;
-}
-
-/*
- * Wait until events become available, if we don't already have some. The
- * application must reap them itself, as they reside on the shared cq ring.
- */
-static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
- const sigset_t __user *sig, size_t sigsz,
- struct __kernel_timespec __user *uts)
-{
- struct io_wait_queue iowq;
- struct io_rings *rings = ctx->rings;
- signed long timeout = MAX_SCHEDULE_TIMEOUT;
- int ret;
-
- do {
- io_cqring_overflow_flush(ctx);
- if (io_cqring_events(ctx) >= min_events)
- return 0;
- if (!io_run_task_work())
- break;
- } while (1);
-
- if (uts) {
- struct timespec64 ts;
-
- if (get_timespec64(&ts, uts))
- return -EFAULT;
- timeout = timespec64_to_jiffies(&ts);
- }
-
- if (sig) {
-#ifdef CONFIG_COMPAT
- if (in_compat_syscall())
- ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
- sigsz);
- else
-#endif
- ret = set_user_sigmask(sig, sigsz);
-
- if (ret)
- return ret;
- }
-
- init_waitqueue_func_entry(&iowq.wq, io_wake_function);
- iowq.wq.private = current;
- INIT_LIST_HEAD(&iowq.wq.entry);
- iowq.ctx = ctx;
- iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
- iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
-
- trace_io_uring_cqring_wait(ctx, min_events);
- do {
- /* if we can't even flush overflow, don't wait for more */
- if (!io_cqring_overflow_flush(ctx)) {
- ret = -EBUSY;
- break;
- }
- prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
- TASK_INTERRUPTIBLE);
- ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
- finish_wait(&ctx->cq_wait, &iowq.wq);
- cond_resched();
- } while (ret > 0);
-
- restore_saved_sigmask_unless(ret == -EINTR);
-
- return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
-}
-
-static void io_free_page_table(void **table, size_t size)
-{
- unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
-
- for (i = 0; i < nr_tables; i++)
- kfree(table[i]);
- kfree(table);
-}
-
-static __cold void **io_alloc_page_table(size_t size)
-{
- unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
- size_t init_size = size;
- void **table;
-
- table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
- if (!table)
- return NULL;
-
- for (i = 0; i < nr_tables; i++) {
- unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
-
- table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
- if (!table[i]) {
- io_free_page_table(table, init_size);
- return NULL;
- }
- size -= this_size;
- }
- return table;
-}
-
-static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
-{
- percpu_ref_exit(&ref_node->refs);
- kfree(ref_node);
-}
-
-static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
-{
- struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
- struct io_ring_ctx *ctx = node->rsrc_data->ctx;
- unsigned long flags;
- bool first_add = false;
-
- spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
- node->done = true;
-
- while (!list_empty(&ctx->rsrc_ref_list)) {
- node = list_first_entry(&ctx->rsrc_ref_list,
- struct io_rsrc_node, node);
- /* recycle ref nodes in order */
- if (!node->done)
- break;
- list_del(&node->node);
- first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
- }
- spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
-
- if (first_add)
- mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
-}
-
-static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
-{
- struct io_rsrc_node *ref_node;
-
- ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
- if (!ref_node)
- return NULL;
-
- if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
- 0, GFP_KERNEL)) {
- kfree(ref_node);
- return NULL;
- }
- INIT_LIST_HEAD(&ref_node->node);
- INIT_LIST_HEAD(&ref_node->rsrc_list);
- ref_node->done = false;
- return ref_node;
-}
-
-static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
- struct io_rsrc_data *data_to_kill)
- __must_hold(&ctx->uring_lock)
-{
- WARN_ON_ONCE(!ctx->rsrc_backup_node);
- WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
-
- io_rsrc_refs_drop(ctx);
-
- if (data_to_kill) {
- struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
-
- rsrc_node->rsrc_data = data_to_kill;
- spin_lock_irq(&ctx->rsrc_ref_lock);
- list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
- spin_unlock_irq(&ctx->rsrc_ref_lock);
-
- atomic_inc(&data_to_kill->refs);
- percpu_ref_kill(&rsrc_node->refs);
- ctx->rsrc_node = NULL;
- }
-
- if (!ctx->rsrc_node) {
- ctx->rsrc_node = ctx->rsrc_backup_node;
- ctx->rsrc_backup_node = NULL;
- }
-}
-
-static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
-{
- if (ctx->rsrc_backup_node)
- return 0;
- ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
- return ctx->rsrc_backup_node ? 0 : -ENOMEM;
-}
-
-static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
- struct io_ring_ctx *ctx)
-{
- int ret;
-
- /* As we may drop ->uring_lock, other task may have started quiesce */
- if (data->quiesce)
- return -ENXIO;
-
- data->quiesce = true;
- do {
- ret = io_rsrc_node_switch_start(ctx);
- if (ret)
- break;
- io_rsrc_node_switch(ctx, data);
-
- /* kill initial ref, already quiesced if zero */
- if (atomic_dec_and_test(&data->refs))
- break;
- mutex_unlock(&ctx->uring_lock);
- flush_delayed_work(&ctx->rsrc_put_work);
- ret = wait_for_completion_interruptible(&data->done);
- if (!ret) {
- mutex_lock(&ctx->uring_lock);
- break;
- }
-
- atomic_inc(&data->refs);
- /* wait for all works potentially completing data->done */
- flush_delayed_work(&ctx->rsrc_put_work);
- reinit_completion(&data->done);
-
- ret = io_run_task_work_sig();
- mutex_lock(&ctx->uring_lock);
- } while (ret >= 0);
- data->quiesce = false;
-
- return ret;
-}
-
-static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
-{
- unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
- unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
-
- return &data->tags[table_idx][off];
-}
-
-static void io_rsrc_data_free(struct io_rsrc_data *data)
-{
- size_t size = data->nr * sizeof(data->tags[0][0]);
-
- if (data->tags)
- io_free_page_table((void **)data->tags, size);
- kfree(data);
-}
-
-static __cold int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
- u64 __user *utags, unsigned nr,
- struct io_rsrc_data **pdata)
-{
- struct io_rsrc_data *data;
- int ret = -ENOMEM;
- unsigned i;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
- if (!data->tags) {
- kfree(data);
- return -ENOMEM;
- }
-
- data->nr = nr;
- data->ctx = ctx;
- data->do_put = do_put;
- if (utags) {
- ret = -EFAULT;
- for (i = 0; i < nr; i++) {
- u64 *tag_slot = io_get_tag_slot(data, i);
-
- if (copy_from_user(tag_slot, &utags[i],
- sizeof(*tag_slot)))
- goto fail;
- }
- }
-
- atomic_set(&data->refs, 1);
- init_completion(&data->done);
- *pdata = data;
- return 0;
-fail:
- io_rsrc_data_free(data);
- return ret;
-}
-
-static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
-{
- table->files = kvcalloc(nr_files, sizeof(table->files[0]),
- GFP_KERNEL_ACCOUNT);
- return !!table->files;
-}
-
-static void io_free_file_tables(struct io_file_table *table)
-{
- kvfree(table->files);
- table->files = NULL;
-}
-
-static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
-{
-#if defined(CONFIG_UNIX)
- if (ctx->ring_sock) {
- struct sock *sock = ctx->ring_sock->sk;
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
- kfree_skb(skb);
- }
-#else
- int i;
-
- for (i = 0; i < ctx->nr_user_files; i++) {
- struct file *file;
-
- file = io_file_from_index(ctx, i);
- if (file)
- fput(file);
- }
-#endif
- io_free_file_tables(&ctx->file_table);
- io_rsrc_data_free(ctx->file_data);
- ctx->file_data = NULL;
- ctx->nr_user_files = 0;
-}
-
-static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
-{
- int ret;
-
- if (!ctx->file_data)
- return -ENXIO;
- ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
- if (!ret)
- __io_sqe_files_unregister(ctx);
- return ret;
-}
-
-static void io_sq_thread_unpark(struct io_sq_data *sqd)
- __releases(&sqd->lock)
-{
- WARN_ON_ONCE(sqd->thread == current);
-
- /*
- * Do the dance but not conditional clear_bit() because it'd race with
- * other threads incrementing park_pending and setting the bit.
- */
- clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
- if (atomic_dec_return(&sqd->park_pending))
- set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
- mutex_unlock(&sqd->lock);
-}
-
-static void io_sq_thread_park(struct io_sq_data *sqd)
- __acquires(&sqd->lock)
-{
- WARN_ON_ONCE(sqd->thread == current);
-
- atomic_inc(&sqd->park_pending);
- set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
- mutex_lock(&sqd->lock);
- if (sqd->thread)
- wake_up_process(sqd->thread);
-}
-
-static void io_sq_thread_stop(struct io_sq_data *sqd)
-{
- WARN_ON_ONCE(sqd->thread == current);
- WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
-
- set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
- mutex_lock(&sqd->lock);
- if (sqd->thread)
- wake_up_process(sqd->thread);
- mutex_unlock(&sqd->lock);
- wait_for_completion(&sqd->exited);
-}
-
-static void io_put_sq_data(struct io_sq_data *sqd)
-{
- if (refcount_dec_and_test(&sqd->refs)) {
- WARN_ON_ONCE(atomic_read(&sqd->park_pending));
-
- io_sq_thread_stop(sqd);
- kfree(sqd);
- }
-}
-
-static void io_sq_thread_finish(struct io_ring_ctx *ctx)
-{
- struct io_sq_data *sqd = ctx->sq_data;
-
- if (sqd) {
- io_sq_thread_park(sqd);
- list_del_init(&ctx->sqd_list);
- io_sqd_update_thread_idle(sqd);
- io_sq_thread_unpark(sqd);
-
- io_put_sq_data(sqd);
- ctx->sq_data = NULL;
- }
-}
-
-static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
-{
- struct io_ring_ctx *ctx_attach;
- struct io_sq_data *sqd;
- struct fd f;
-
- f = fdget(p->wq_fd);
- if (!f.file)
- return ERR_PTR(-ENXIO);
- if (f.file->f_op != &io_uring_fops) {
- fdput(f);
- return ERR_PTR(-EINVAL);
- }
-
- ctx_attach = f.file->private_data;
- sqd = ctx_attach->sq_data;
- if (!sqd) {
- fdput(f);
- return ERR_PTR(-EINVAL);
- }
- if (sqd->task_tgid != current->tgid) {
- fdput(f);
- return ERR_PTR(-EPERM);
- }
-
- refcount_inc(&sqd->refs);
- fdput(f);
- return sqd;
-}
-
-static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
- bool *attached)
-{
- struct io_sq_data *sqd;
-
- *attached = false;
- if (p->flags & IORING_SETUP_ATTACH_WQ) {
- sqd = io_attach_sq_data(p);
- if (!IS_ERR(sqd)) {
- *attached = true;
- return sqd;
- }
- /* fall through for EPERM case, setup new sqd/task */
- if (PTR_ERR(sqd) != -EPERM)
- return sqd;
- }
-
- sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
- if (!sqd)
- return ERR_PTR(-ENOMEM);
-
- atomic_set(&sqd->park_pending, 0);
- refcount_set(&sqd->refs, 1);
- INIT_LIST_HEAD(&sqd->ctx_list);
- mutex_init(&sqd->lock);
- init_waitqueue_head(&sqd->wait);
- init_completion(&sqd->exited);
- return sqd;
-}
-
-#if defined(CONFIG_UNIX)
-/*
- * Ensure the UNIX gc is aware of our file set, so we are certain that
- * the io_uring can be safely unregistered on process exit, even if we have
- * loops in the file referencing.
- */
-static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
-{
- struct sock *sk = ctx->ring_sock->sk;
- struct scm_fp_list *fpl;
- struct sk_buff *skb;
- int i, nr_files;
-
- fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
- if (!fpl)
- return -ENOMEM;
-
- skb = alloc_skb(0, GFP_KERNEL);
- if (!skb) {
- kfree(fpl);
- return -ENOMEM;
- }
-
- skb->sk = sk;
-
- nr_files = 0;
- fpl->user = get_uid(current_user());
- for (i = 0; i < nr; i++) {
- struct file *file = io_file_from_index(ctx, i + offset);
-
- if (!file)
- continue;
- fpl->fp[nr_files] = get_file(file);
- unix_inflight(fpl->user, fpl->fp[nr_files]);
- nr_files++;
- }
-
- if (nr_files) {
- fpl->max = SCM_MAX_FD;
- fpl->count = nr_files;
- UNIXCB(skb).fp = fpl;
- skb->destructor = unix_destruct_scm;
- refcount_add(skb->truesize, &sk->sk_wmem_alloc);
- skb_queue_head(&sk->sk_receive_queue, skb);
-
- for (i = 0; i < nr_files; i++)
- fput(fpl->fp[i]);
- } else {
- kfree_skb(skb);
- kfree(fpl);
- }
-
- return 0;
-}
-
-/*
- * If UNIX sockets are enabled, fd passing can cause a reference cycle which
- * causes regular reference counting to break down. We rely on the UNIX
- * garbage collection to take care of this problem for us.
- */
-static int io_sqe_files_scm(struct io_ring_ctx *ctx)
-{
- unsigned left, total;
- int ret = 0;
-
- total = 0;
- left = ctx->nr_user_files;
- while (left) {
- unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
-
- ret = __io_sqe_files_scm(ctx, this_files, total);
- if (ret)
- break;
- left -= this_files;
- total += this_files;
- }
-
- if (!ret)
- return 0;
-
- while (total < ctx->nr_user_files) {
- struct file *file = io_file_from_index(ctx, total);
-
- if (file)
- fput(file);
- total++;
- }
-
- return ret;
-}
-#else
-static int io_sqe_files_scm(struct io_ring_ctx *ctx)
-{
- return 0;
-}
-#endif
-
-static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
-{
- struct file *file = prsrc->file;
-#if defined(CONFIG_UNIX)
- struct sock *sock = ctx->ring_sock->sk;
- struct sk_buff_head list, *head = &sock->sk_receive_queue;
- struct sk_buff *skb;
- int i;
-
- __skb_queue_head_init(&list);
-
- /*
- * Find the skb that holds this file in its SCM_RIGHTS. When found,
- * remove this entry and rearrange the file array.
- */
- skb = skb_dequeue(head);
- while (skb) {
- struct scm_fp_list *fp;
-
- fp = UNIXCB(skb).fp;
- for (i = 0; i < fp->count; i++) {
- int left;
-
- if (fp->fp[i] != file)
- continue;
-
- unix_notinflight(fp->user, fp->fp[i]);
- left = fp->count - 1 - i;
- if (left) {
- memmove(&fp->fp[i], &fp->fp[i + 1],
- left * sizeof(struct file *));
- }
- fp->count--;
- if (!fp->count) {
- kfree_skb(skb);
- skb = NULL;
- } else {
- __skb_queue_tail(&list, skb);
- }
- fput(file);
- file = NULL;
- break;
- }
-
- if (!file)
- break;
-
- __skb_queue_tail(&list, skb);
-
- skb = skb_dequeue(head);
- }
-
- if (skb_peek(&list)) {
- spin_lock_irq(&head->lock);
- while ((skb = __skb_dequeue(&list)) != NULL)
- __skb_queue_tail(head, skb);
- spin_unlock_irq(&head->lock);
- }
-#else
- fput(file);
-#endif
-}
-
-static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
-{
- struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
- struct io_ring_ctx *ctx = rsrc_data->ctx;
- struct io_rsrc_put *prsrc, *tmp;
-
- list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
- list_del(&prsrc->list);
-
- if (prsrc->tag) {
- bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
-
- io_ring_submit_lock(ctx, lock_ring);
- spin_lock(&ctx->completion_lock);
- io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
- ctx->cq_extra++;
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
- io_ring_submit_unlock(ctx, lock_ring);
- }
-
- rsrc_data->do_put(ctx, prsrc);
- kfree(prsrc);
- }
-
- io_rsrc_node_destroy(ref_node);
- if (atomic_dec_and_test(&rsrc_data->refs))
- complete(&rsrc_data->done);
-}
-
-static void io_rsrc_put_work(struct work_struct *work)
-{
- struct io_ring_ctx *ctx;
- struct llist_node *node;
-
- ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
- node = llist_del_all(&ctx->rsrc_put_llist);
-
- while (node) {
- struct io_rsrc_node *ref_node;
- struct llist_node *next = node->next;
-
- ref_node = llist_entry(node, struct io_rsrc_node, llist);
- __io_rsrc_put_work(ref_node);
- node = next;
- }
-}
-
-static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
- unsigned nr_args, u64 __user *tags)
-{
- __s32 __user *fds = (__s32 __user *) arg;
- struct file *file;
- int fd, ret;
- unsigned i;
-
- if (ctx->file_data)
- return -EBUSY;
- if (!nr_args)
- return -EINVAL;
- if (nr_args > IORING_MAX_FIXED_FILES)
- return -EMFILE;
- if (nr_args > rlimit(RLIMIT_NOFILE))
- return -EMFILE;
- ret = io_rsrc_node_switch_start(ctx);
- if (ret)
- return ret;
- ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
- &ctx->file_data);
- if (ret)
- return ret;
-
- ret = -ENOMEM;
- if (!io_alloc_file_tables(&ctx->file_table, nr_args))
- goto out_free;
-
- for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
- if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
- ret = -EFAULT;
- goto out_fput;
- }
- /* allow sparse sets */
- if (fd == -1) {
- ret = -EINVAL;
- if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
- goto out_fput;
- continue;
- }
-
- file = fget(fd);
- ret = -EBADF;
- if (unlikely(!file))
- goto out_fput;
-
- /*
- * Don't allow io_uring instances to be registered. If UNIX
- * isn't enabled, then this causes a reference cycle and this
- * instance can never get freed. If UNIX is enabled we'll
- * handle it just fine, but there's still no point in allowing
- * a ring fd as it doesn't support regular read/write anyway.
- */
- if (file->f_op == &io_uring_fops) {
- fput(file);
- goto out_fput;
- }
- io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
- }
-
- ret = io_sqe_files_scm(ctx);
- if (ret) {
- __io_sqe_files_unregister(ctx);
- return ret;
- }
-
- io_rsrc_node_switch(ctx, NULL);
- return ret;
-out_fput:
- for (i = 0; i < ctx->nr_user_files; i++) {
- file = io_file_from_index(ctx, i);
- if (file)
- fput(file);
- }
- io_free_file_tables(&ctx->file_table);
- ctx->nr_user_files = 0;
-out_free:
- io_rsrc_data_free(ctx->file_data);
- ctx->file_data = NULL;
- return ret;
-}
-
-static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
- int index)
-{
-#if defined(CONFIG_UNIX)
- struct sock *sock = ctx->ring_sock->sk;
- struct sk_buff_head *head = &sock->sk_receive_queue;
- struct sk_buff *skb;
-
- /*
- * See if we can merge this file into an existing skb SCM_RIGHTS
- * file set. If there's no room, fall back to allocating a new skb
- * and filling it in.
- */
- spin_lock_irq(&head->lock);
- skb = skb_peek(head);
- if (skb) {
- struct scm_fp_list *fpl = UNIXCB(skb).fp;
-
- if (fpl->count < SCM_MAX_FD) {
- __skb_unlink(skb, head);
- spin_unlock_irq(&head->lock);
- fpl->fp[fpl->count] = get_file(file);
- unix_inflight(fpl->user, fpl->fp[fpl->count]);
- fpl->count++;
- spin_lock_irq(&head->lock);
- __skb_queue_head(head, skb);
- } else {
- skb = NULL;
- }
- }
- spin_unlock_irq(&head->lock);
-
- if (skb) {
- fput(file);
- return 0;
- }
-
- return __io_sqe_files_scm(ctx, 1, index);
-#else
- return 0;
-#endif
-}
-
-static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
- struct io_rsrc_node *node, void *rsrc)
-{
- struct io_rsrc_put *prsrc;
-
- prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
- if (!prsrc)
- return -ENOMEM;
-
- prsrc->tag = *io_get_tag_slot(data, idx);
- prsrc->rsrc = rsrc;
- list_add(&prsrc->list, &node->rsrc_list);
- return 0;
-}
-
-static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
- unsigned int issue_flags, u32 slot_index)
-{
- struct io_ring_ctx *ctx = req->ctx;
- bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
- bool needs_switch = false;
- struct io_fixed_file *file_slot;
- int ret = -EBADF;
-
- io_ring_submit_lock(ctx, needs_lock);
- if (file->f_op == &io_uring_fops)
- goto err;
- ret = -ENXIO;
- if (!ctx->file_data)
- goto err;
- ret = -EINVAL;
- if (slot_index >= ctx->nr_user_files)
- goto err;
-
- slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
- file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
-
- if (file_slot->file_ptr) {
- struct file *old_file;
-
- ret = io_rsrc_node_switch_start(ctx);
- if (ret)
- goto err;
-
- old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
- ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
- ctx->rsrc_node, old_file);
- if (ret)
- goto err;
- file_slot->file_ptr = 0;
- needs_switch = true;
- }
-
- *io_get_tag_slot(ctx->file_data, slot_index) = 0;
- io_fixed_file_set(file_slot, file);
- ret = io_sqe_file_register(ctx, file, slot_index);
- if (ret) {
- file_slot->file_ptr = 0;
- goto err;
- }
-
- ret = 0;
-err:
- if (needs_switch)
- io_rsrc_node_switch(ctx, ctx->file_data);
- io_ring_submit_unlock(ctx, needs_lock);
- if (ret)
- fput(file);
- return ret;
-}
-
-static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
-{
- unsigned int offset = req->close.file_slot - 1;
- struct io_ring_ctx *ctx = req->ctx;
- bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
- struct io_fixed_file *file_slot;
- struct file *file;
- int ret, i;
-
- io_ring_submit_lock(ctx, needs_lock);
- ret = -ENXIO;
- if (unlikely(!ctx->file_data))
- goto out;
- ret = -EINVAL;
- if (offset >= ctx->nr_user_files)
- goto out;
- ret = io_rsrc_node_switch_start(ctx);
- if (ret)
- goto out;
-
- i = array_index_nospec(offset, ctx->nr_user_files);
- file_slot = io_fixed_file_slot(&ctx->file_table, i);
- ret = -EBADF;
- if (!file_slot->file_ptr)
- goto out;
-
- file = (struct file *)(file_slot->file_ptr & FFS_MASK);
- ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
- if (ret)
- goto out;
-
- file_slot->file_ptr = 0;
- io_rsrc_node_switch(ctx, ctx->file_data);
- ret = 0;
-out:
- io_ring_submit_unlock(ctx, needs_lock);
- return ret;
-}
-
-static int __io_sqe_files_update(struct io_ring_ctx *ctx,
- struct io_uring_rsrc_update2 *up,
- unsigned nr_args)
-{
- u64 __user *tags = u64_to_user_ptr(up->tags);
- __s32 __user *fds = u64_to_user_ptr(up->data);
- struct io_rsrc_data *data = ctx->file_data;
- struct io_fixed_file *file_slot;
- struct file *file;
- int fd, i, err = 0;
- unsigned int done;
- bool needs_switch = false;
-
- if (!ctx->file_data)
- return -ENXIO;
- if (up->offset + nr_args > ctx->nr_user_files)
- return -EINVAL;
-
- for (done = 0; done < nr_args; done++) {
- u64 tag = 0;
-
- if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
- copy_from_user(&fd, &fds[done], sizeof(fd))) {
- err = -EFAULT;
- break;
- }
- if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
- err = -EINVAL;
- break;
- }
- if (fd == IORING_REGISTER_FILES_SKIP)
- continue;
-
- i = array_index_nospec(up->offset + done, ctx->nr_user_files);
- file_slot = io_fixed_file_slot(&ctx->file_table, i);
-
- if (file_slot->file_ptr) {
- file = (struct file *)(file_slot->file_ptr & FFS_MASK);
- err = io_queue_rsrc_removal(data, up->offset + done,
- ctx->rsrc_node, file);
- if (err)
- break;
- file_slot->file_ptr = 0;
- needs_switch = true;
- }
- if (fd != -1) {
- file = fget(fd);
- if (!file) {
- err = -EBADF;
- break;
- }
- /*
- * Don't allow io_uring instances to be registered. If
- * UNIX isn't enabled, then this causes a reference
- * cycle and this instance can never get freed. If UNIX
- * is enabled we'll handle it just fine, but there's
- * still no point in allowing a ring fd as it doesn't
- * support regular read/write anyway.
- */
- if (file->f_op == &io_uring_fops) {
- fput(file);
- err = -EBADF;
- break;
- }
- *io_get_tag_slot(data, up->offset + done) = tag;
- io_fixed_file_set(file_slot, file);
- err = io_sqe_file_register(ctx, file, i);
- if (err) {
- file_slot->file_ptr = 0;
- fput(file);
- break;
- }
- }
- }
-
- if (needs_switch)
- io_rsrc_node_switch(ctx, data);
- return done ? done : err;
-}
-
-static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
- struct task_struct *task)
-{
- struct io_wq_hash *hash;
- struct io_wq_data data;
- unsigned int concurrency;
-
- mutex_lock(&ctx->uring_lock);
- hash = ctx->hash_map;
- if (!hash) {
- hash = kzalloc(sizeof(*hash), GFP_KERNEL);
- if (!hash) {
- mutex_unlock(&ctx->uring_lock);
- return ERR_PTR(-ENOMEM);
- }
- refcount_set(&hash->refs, 1);
- init_waitqueue_head(&hash->wait);
- ctx->hash_map = hash;
- }
- mutex_unlock(&ctx->uring_lock);
-
- data.hash = hash;
- data.task = task;
- data.free_work = io_wq_free_work;
- data.do_work = io_wq_submit_work;
-
- /* Do QD, or 4 * CPUS, whatever is smallest */
- concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
-
- return io_wq_create(concurrency, &data);
-}
-
-static __cold int io_uring_alloc_task_context(struct task_struct *task,
- struct io_ring_ctx *ctx)
-{
- struct io_uring_task *tctx;
- int ret;
-
- tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
- if (unlikely(!tctx))
- return -ENOMEM;
-
- ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
- if (unlikely(ret)) {
- kfree(tctx);
- return ret;
- }
-
- tctx->io_wq = io_init_wq_offload(ctx, task);
- if (IS_ERR(tctx->io_wq)) {
- ret = PTR_ERR(tctx->io_wq);
- percpu_counter_destroy(&tctx->inflight);
- kfree(tctx);
- return ret;
- }
-
- xa_init(&tctx->xa);
- init_waitqueue_head(&tctx->wait);
- atomic_set(&tctx->in_idle, 0);
- atomic_set(&tctx->inflight_tracked, 0);
- task->io_uring = tctx;
- spin_lock_init(&tctx->task_lock);
- INIT_WQ_LIST(&tctx->task_list);
- init_task_work(&tctx->task_work, tctx_task_work);
- return 0;
-}
-
-void __io_uring_free(struct task_struct *tsk)
-{
- struct io_uring_task *tctx = tsk->io_uring;
-
- WARN_ON_ONCE(!xa_empty(&tctx->xa));
- WARN_ON_ONCE(tctx->io_wq);
- WARN_ON_ONCE(tctx->cached_refs);
-
- percpu_counter_destroy(&tctx->inflight);
- kfree(tctx);
- tsk->io_uring = NULL;
-}
-
-static __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
- struct io_uring_params *p)
-{
- int ret;
-
- /* Retain compatibility with failing for an invalid attach attempt */
- if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
- IORING_SETUP_ATTACH_WQ) {
- struct fd f;
-
- f = fdget(p->wq_fd);
- if (!f.file)
- return -ENXIO;
- if (f.file->f_op != &io_uring_fops) {
- fdput(f);
- return -EINVAL;
- }
- fdput(f);
- }
- if (ctx->flags & IORING_SETUP_SQPOLL) {
- struct task_struct *tsk;
- struct io_sq_data *sqd;
- bool attached;
-
- ret = security_uring_sqpoll();
- if (ret)
- return ret;
-
- sqd = io_get_sq_data(p, &attached);
- if (IS_ERR(sqd)) {
- ret = PTR_ERR(sqd);
- goto err;
- }
-
- ctx->sq_creds = get_current_cred();
- ctx->sq_data = sqd;
- ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
- if (!ctx->sq_thread_idle)
- ctx->sq_thread_idle = HZ;
-
- io_sq_thread_park(sqd);
- list_add(&ctx->sqd_list, &sqd->ctx_list);
- io_sqd_update_thread_idle(sqd);
- /* don't attach to a dying SQPOLL thread, would be racy */
- ret = (attached && !sqd->thread) ? -ENXIO : 0;
- io_sq_thread_unpark(sqd);
-
- if (ret < 0)
- goto err;
- if (attached)
- return 0;
-
- if (p->flags & IORING_SETUP_SQ_AFF) {
- int cpu = p->sq_thread_cpu;
-
- ret = -EINVAL;
- if (cpu >= nr_cpu_ids || !cpu_online(cpu))
- goto err_sqpoll;
- sqd->sq_cpu = cpu;
- } else {
- sqd->sq_cpu = -1;
- }
-
- sqd->task_pid = current->pid;
- sqd->task_tgid = current->tgid;
- tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
- if (IS_ERR(tsk)) {
- ret = PTR_ERR(tsk);
- goto err_sqpoll;
- }
-
- sqd->thread = tsk;
- ret = io_uring_alloc_task_context(tsk, ctx);
- wake_up_new_task(tsk);
- if (ret)
- goto err;
- } else if (p->flags & IORING_SETUP_SQ_AFF) {
- /* Can't have SQ_AFF without SQPOLL */
- ret = -EINVAL;
- goto err;
- }
-
- return 0;
-err_sqpoll:
- complete(&ctx->sq_data->exited);
-err:
- io_sq_thread_finish(ctx);
- return ret;
-}
-
-static inline void __io_unaccount_mem(struct user_struct *user,
- unsigned long nr_pages)
-{
- atomic_long_sub(nr_pages, &user->locked_vm);
-}
-
-static inline int __io_account_mem(struct user_struct *user,
- unsigned long nr_pages)
-{
- unsigned long page_limit, cur_pages, new_pages;
-
- /* Don't allow more pages than we can safely lock */
- page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
- do {
- cur_pages = atomic_long_read(&user->locked_vm);
- new_pages = cur_pages + nr_pages;
- if (new_pages > page_limit)
- return -ENOMEM;
- } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
- new_pages) != cur_pages);
-
- return 0;
-}
-
-static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
-{
- if (ctx->user)
- __io_unaccount_mem(ctx->user, nr_pages);
-
- if (ctx->mm_account)
- atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
-}
-
-static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
-{
- int ret;
-
- if (ctx->user) {
- ret = __io_account_mem(ctx->user, nr_pages);
- if (ret)
- return ret;
- }
-
- if (ctx->mm_account)
- atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
-
- return 0;
-}
-
-static void io_mem_free(void *ptr)
-{
- struct page *page;
-
- if (!ptr)
- return;
-
- page = virt_to_head_page(ptr);
- if (put_page_testzero(page))
- free_compound_page(page);
-}
-
-static void *io_mem_alloc(size_t size)
-{
- gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
- __GFP_NORETRY | __GFP_ACCOUNT;
-
- return (void *) __get_free_pages(gfp_flags, get_order(size));
-}
-
-static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
- size_t *sq_offset)
-{
- struct io_rings *rings;
- size_t off, sq_array_size;
-
- off = struct_size(rings, cqes, cq_entries);
- if (off == SIZE_MAX)
- return SIZE_MAX;
-
-#ifdef CONFIG_SMP
- off = ALIGN(off, SMP_CACHE_BYTES);
- if (off == 0)
- return SIZE_MAX;
-#endif
-
- if (sq_offset)
- *sq_offset = off;
-
- sq_array_size = array_size(sizeof(u32), sq_entries);
- if (sq_array_size == SIZE_MAX)
- return SIZE_MAX;
-
- if (check_add_overflow(off, sq_array_size, &off))
- return SIZE_MAX;
-
- return off;
-}
-
-static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
-{
- struct io_mapped_ubuf *imu = *slot;
- unsigned int i;
-
- if (imu != ctx->dummy_ubuf) {
- for (i = 0; i < imu->nr_bvecs; i++)
- unpin_user_page(imu->bvec[i].bv_page);
- if (imu->acct_pages)
- io_unaccount_mem(ctx, imu->acct_pages);
- kvfree(imu);
- }
- *slot = NULL;
-}
-
-static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
-{
- io_buffer_unmap(ctx, &prsrc->buf);
- prsrc->buf = NULL;
-}
-
-static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
-{
- unsigned int i;
-
- for (i = 0; i < ctx->nr_user_bufs; i++)
- io_buffer_unmap(ctx, &ctx->user_bufs[i]);
- kfree(ctx->user_bufs);
- io_rsrc_data_free(ctx->buf_data);
- ctx->user_bufs = NULL;
- ctx->buf_data = NULL;
- ctx->nr_user_bufs = 0;
-}
-
-static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
-{
- int ret;
-
- if (!ctx->buf_data)
- return -ENXIO;
-
- ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
- if (!ret)
- __io_sqe_buffers_unregister(ctx);
- return ret;
-}
-
-static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
- void __user *arg, unsigned index)
-{
- struct iovec __user *src;
-
-#ifdef CONFIG_COMPAT
- if (ctx->compat) {
- struct compat_iovec __user *ciovs;
- struct compat_iovec ciov;
-
- ciovs = (struct compat_iovec __user *) arg;
- if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
- return -EFAULT;
-
- dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
- dst->iov_len = ciov.iov_len;
- return 0;
- }
-#endif
- src = (struct iovec __user *) arg;
- if (copy_from_user(dst, &src[index], sizeof(*dst)))
- return -EFAULT;
- return 0;
-}
-
-/*
- * Not super efficient, but this is just a registration time. And we do cache
- * the last compound head, so generally we'll only do a full search if we don't
- * match that one.
- *
- * We check if the given compound head page has already been accounted, to
- * avoid double accounting it. This allows us to account the full size of the
- * page, not just the constituent pages of a huge page.
- */
-static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
- int nr_pages, struct page *hpage)
-{
- int i, j;
-
- /* check current page array */
- for (i = 0; i < nr_pages; i++) {
- if (!PageCompound(pages[i]))
- continue;
- if (compound_head(pages[i]) == hpage)
- return true;
- }
-
- /* check previously registered pages */
- for (i = 0; i < ctx->nr_user_bufs; i++) {
- struct io_mapped_ubuf *imu = ctx->user_bufs[i];
-
- for (j = 0; j < imu->nr_bvecs; j++) {
- if (!PageCompound(imu->bvec[j].bv_page))
- continue;
- if (compound_head(imu->bvec[j].bv_page) == hpage)
- return true;
- }
- }
-
- return false;
-}
-
-static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
- int nr_pages, struct io_mapped_ubuf *imu,
- struct page **last_hpage)
-{
- int i, ret;
-
- imu->acct_pages = 0;
- for (i = 0; i < nr_pages; i++) {
- if (!PageCompound(pages[i])) {
- imu->acct_pages++;
- } else {
- struct page *hpage;
-
- hpage = compound_head(pages[i]);
- if (hpage == *last_hpage)
- continue;
- *last_hpage = hpage;
- if (headpage_already_acct(ctx, pages, i, hpage))
- continue;
- imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
- }
- }
-
- if (!imu->acct_pages)
- return 0;
-
- ret = io_account_mem(ctx, imu->acct_pages);
- if (ret)
- imu->acct_pages = 0;
- return ret;
-}
-
-static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
- struct io_mapped_ubuf **pimu,
- struct page **last_hpage)
-{
- struct io_mapped_ubuf *imu = NULL;
- struct vm_area_struct **vmas = NULL;
- struct page **pages = NULL;
- unsigned long off, start, end, ubuf;
- size_t size;
- int ret, pret, nr_pages, i;
-
- if (!iov->iov_base) {
- *pimu = ctx->dummy_ubuf;
- return 0;
- }
-
- ubuf = (unsigned long) iov->iov_base;
- end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- start = ubuf >> PAGE_SHIFT;
- nr_pages = end - start;
-
- *pimu = NULL;
- ret = -ENOMEM;
-
- pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
- if (!pages)
- goto done;
-
- vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
- GFP_KERNEL);
- if (!vmas)
- goto done;
-
- imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
- if (!imu)
- goto done;
-
- ret = 0;
- mmap_read_lock(current->mm);
- pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
- pages, vmas);
- if (pret == nr_pages) {
- /* don't support file backed memory */
- for (i = 0; i < nr_pages; i++) {
- struct vm_area_struct *vma = vmas[i];
-
- if (vma_is_shmem(vma))
- continue;
- if (vma->vm_file &&
- !is_file_hugepages(vma->vm_file)) {
- ret = -EOPNOTSUPP;
- break;
- }
- }
- } else {
- ret = pret < 0 ? pret : -EFAULT;
- }
- mmap_read_unlock(current->mm);
- if (ret) {
- /*
- * if we did partial map, or found file backed vmas,
- * release any pages we did get
- */
- if (pret > 0)
- unpin_user_pages(pages, pret);
- goto done;
- }
-
- ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
- if (ret) {
- unpin_user_pages(pages, pret);
- goto done;
- }
-
- off = ubuf & ~PAGE_MASK;
- size = iov->iov_len;
- for (i = 0; i < nr_pages; i++) {
- size_t vec_len;
-
- vec_len = min_t(size_t, size, PAGE_SIZE - off);
- imu->bvec[i].bv_page = pages[i];
- imu->bvec[i].bv_len = vec_len;
- imu->bvec[i].bv_offset = off;
- off = 0;
- size -= vec_len;
- }
- /* store original address for later verification */
- imu->ubuf = ubuf;
- imu->ubuf_end = ubuf + iov->iov_len;
- imu->nr_bvecs = nr_pages;
- *pimu = imu;
- ret = 0;
-done:
- if (ret)
- kvfree(imu);
- kvfree(pages);
- kvfree(vmas);
- return ret;
-}
-
-static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
-{
- ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
- return ctx->user_bufs ? 0 : -ENOMEM;
-}
-
-static int io_buffer_validate(struct iovec *iov)
-{
- unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
-
- /*
- * Don't impose further limits on the size and buffer
- * constraints here, we'll -EINVAL later when IO is
- * submitted if they are wrong.
- */
- if (!iov->iov_base)
- return iov->iov_len ? -EFAULT : 0;
- if (!iov->iov_len)
- return -EFAULT;
-
- /* arbitrary limit, but we need something */
- if (iov->iov_len > SZ_1G)
- return -EFAULT;
-
- if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
- return -EOVERFLOW;
-
- return 0;
-}
-
-static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
- unsigned int nr_args, u64 __user *tags)
-{
- struct page *last_hpage = NULL;
- struct io_rsrc_data *data;
- int i, ret;
- struct iovec iov;
-
- if (ctx->user_bufs)
- return -EBUSY;
- if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
- return -EINVAL;
- ret = io_rsrc_node_switch_start(ctx);
- if (ret)
- return ret;
- ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
- if (ret)
- return ret;
- ret = io_buffers_map_alloc(ctx, nr_args);
- if (ret) {
- io_rsrc_data_free(data);
- return ret;
- }
-
- for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
- ret = io_copy_iov(ctx, &iov, arg, i);
- if (ret)
- break;
- ret = io_buffer_validate(&iov);
- if (ret)
- break;
- if (!iov.iov_base && *io_get_tag_slot(data, i)) {
- ret = -EINVAL;
- break;
- }
-
- ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
- &last_hpage);
- if (ret)
- break;
- }
-
- WARN_ON_ONCE(ctx->buf_data);
-
- ctx->buf_data = data;
- if (ret)
- __io_sqe_buffers_unregister(ctx);
- else
- io_rsrc_node_switch(ctx, NULL);
- return ret;
-}
-
-static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
- struct io_uring_rsrc_update2 *up,
- unsigned int nr_args)
-{
- u64 __user *tags = u64_to_user_ptr(up->tags);
- struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
- struct page *last_hpage = NULL;
- bool needs_switch = false;
- __u32 done;
- int i, err;
-
- if (!ctx->buf_data)
- return -ENXIO;
- if (up->offset + nr_args > ctx->nr_user_bufs)
- return -EINVAL;
-
- for (done = 0; done < nr_args; done++) {
- struct io_mapped_ubuf *imu;
- int offset = up->offset + done;
- u64 tag = 0;
-
- err = io_copy_iov(ctx, &iov, iovs, done);
- if (err)
- break;
- if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
- err = -EFAULT;
- break;
- }
- err = io_buffer_validate(&iov);
- if (err)
- break;
- if (!iov.iov_base && tag) {
- err = -EINVAL;
- break;
- }
- err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
- if (err)
- break;
-
- i = array_index_nospec(offset, ctx->nr_user_bufs);
- if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
- err = io_queue_rsrc_removal(ctx->buf_data, offset,
- ctx->rsrc_node, ctx->user_bufs[i]);
- if (unlikely(err)) {
- io_buffer_unmap(ctx, &imu);
- break;
- }
- ctx->user_bufs[i] = NULL;
- needs_switch = true;
- }
-
- ctx->user_bufs[i] = imu;
- *io_get_tag_slot(ctx->buf_data, offset) = tag;
- }
-
- if (needs_switch)
- io_rsrc_node_switch(ctx, ctx->buf_data);
- return done ? done : err;
-}
-
-static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
-{
- __s32 __user *fds = arg;
- int fd;
-
- if (ctx->cq_ev_fd)
- return -EBUSY;
-
- if (copy_from_user(&fd, fds, sizeof(*fds)))
- return -EFAULT;
-
- ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
- if (IS_ERR(ctx->cq_ev_fd)) {
- int ret = PTR_ERR(ctx->cq_ev_fd);
-
- ctx->cq_ev_fd = NULL;
- return ret;
- }
-
- return 0;
-}
-
-static int io_eventfd_unregister(struct io_ring_ctx *ctx)
-{
- if (ctx->cq_ev_fd) {
- eventfd_ctx_put(ctx->cq_ev_fd);
- ctx->cq_ev_fd = NULL;
- return 0;
- }
-
- return -ENXIO;
-}
-
-static void io_destroy_buffers(struct io_ring_ctx *ctx)
-{
- struct io_buffer *buf;
- unsigned long index;
-
- xa_for_each(&ctx->io_buffers, index, buf) {
- __io_remove_buffers(ctx, buf, index, -1U);
- cond_resched();
- }
-}
-
-static void io_req_caches_free(struct io_ring_ctx *ctx)
-{
- struct io_submit_state *state = &ctx->submit_state;
- int nr = 0;
-
- mutex_lock(&ctx->uring_lock);
- io_flush_cached_locked_reqs(ctx, state);
-
- while (state->free_list.next) {
- struct io_wq_work_node *node;
- struct io_kiocb *req;
-
- node = wq_stack_extract(&state->free_list);
- req = container_of(node, struct io_kiocb, comp_list);
- kmem_cache_free(req_cachep, req);
- nr++;
- }
- if (nr)
- percpu_ref_put_many(&ctx->refs, nr);
- mutex_unlock(&ctx->uring_lock);
-}
-
-static void io_wait_rsrc_data(struct io_rsrc_data *data)
-{
- if (data && !atomic_dec_and_test(&data->refs))
- wait_for_completion(&data->done);
-}
-
-static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
-{
- io_sq_thread_finish(ctx);
-
- if (ctx->mm_account) {
- mmdrop(ctx->mm_account);
- ctx->mm_account = NULL;
- }
-
- io_rsrc_refs_drop(ctx);
- /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
- io_wait_rsrc_data(ctx->buf_data);
- io_wait_rsrc_data(ctx->file_data);
-
- mutex_lock(&ctx->uring_lock);
- if (ctx->buf_data)
- __io_sqe_buffers_unregister(ctx);
- if (ctx->file_data)
- __io_sqe_files_unregister(ctx);
- if (ctx->rings)
- __io_cqring_overflow_flush(ctx, true);
- mutex_unlock(&ctx->uring_lock);
- io_eventfd_unregister(ctx);
- io_destroy_buffers(ctx);
- if (ctx->sq_creds)
- put_cred(ctx->sq_creds);
-
- /* there are no registered resources left, nobody uses it */
- if (ctx->rsrc_node)
- io_rsrc_node_destroy(ctx->rsrc_node);
- if (ctx->rsrc_backup_node)
- io_rsrc_node_destroy(ctx->rsrc_backup_node);
- flush_delayed_work(&ctx->rsrc_put_work);
- flush_delayed_work(&ctx->fallback_work);
-
- WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
- WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
-
-#if defined(CONFIG_UNIX)
- if (ctx->ring_sock) {
- ctx->ring_sock->file = NULL; /* so that iput() is called */
- sock_release(ctx->ring_sock);
- }
-#endif
- WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
-
- io_mem_free(ctx->rings);
- io_mem_free(ctx->sq_sqes);
-
- percpu_ref_exit(&ctx->refs);
- free_uid(ctx->user);
- io_req_caches_free(ctx);
- if (ctx->hash_map)
- io_wq_put_hash(ctx->hash_map);
- kfree(ctx->cancel_hash);
- kfree(ctx->dummy_ubuf);
- kfree(ctx);
-}
-
-static __poll_t io_uring_poll(struct file *file, poll_table *wait)
-{
- struct io_ring_ctx *ctx = file->private_data;
- __poll_t mask = 0;
-
- poll_wait(file, &ctx->cq_wait, wait);
- /*
- * synchronizes with barrier from wq_has_sleeper call in
- * io_commit_cqring
- */
- smp_rmb();
- if (!io_sqring_full(ctx))
- mask |= EPOLLOUT | EPOLLWRNORM;
-
- /*
- * Don't flush cqring overflow list here, just do a simple check.
- * Otherwise there could possible be ABBA deadlock:
- * CPU0 CPU1
- * ---- ----
- * lock(&ctx->uring_lock);
- * lock(&ep->mtx);
- * lock(&ctx->uring_lock);
- * lock(&ep->mtx);
- *
- * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
- * pushs them to do the flush.
- */
- if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
- mask |= EPOLLIN | EPOLLRDNORM;
-
- return mask;
-}
-
-static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
-{
- const struct cred *creds;
-
- creds = xa_erase(&ctx->personalities, id);
- if (creds) {
- put_cred(creds);
- return 0;
- }
-
- return -EINVAL;
-}
-
-struct io_tctx_exit {
- struct callback_head task_work;
- struct completion completion;
- struct io_ring_ctx *ctx;
-};
-
-static __cold void io_tctx_exit_cb(struct callback_head *cb)
-{
- struct io_uring_task *tctx = current->io_uring;
- struct io_tctx_exit *work;
-
- work = container_of(cb, struct io_tctx_exit, task_work);
- /*
- * When @in_idle, we're in cancellation and it's racy to remove the
- * node. It'll be removed by the end of cancellation, just ignore it.
- */
- if (!atomic_read(&tctx->in_idle))
- io_uring_del_tctx_node((unsigned long)work->ctx);
- complete(&work->completion);
-}
-
-static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
-{
- struct io_kiocb *req = container_of(work, struct io_kiocb, work);
-
- return req->ctx == data;
-}
-
-static __cold void io_ring_exit_work(struct work_struct *work)
-{
- struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
- unsigned long timeout = jiffies + HZ * 60 * 5;
- unsigned long interval = HZ / 20;
- struct io_tctx_exit exit;
- struct io_tctx_node *node;
- int ret;
-
- /*
- * If we're doing polled IO and end up having requests being
- * submitted async (out-of-line), then completions can come in while
- * we're waiting for refs to drop. We need to reap these manually,
- * as nobody else will be looking for them.
- */
- do {
- io_uring_try_cancel_requests(ctx, NULL, true);
- if (ctx->sq_data) {
- struct io_sq_data *sqd = ctx->sq_data;
- struct task_struct *tsk;
-
- io_sq_thread_park(sqd);
- tsk = sqd->thread;
- if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
- io_wq_cancel_cb(tsk->io_uring->io_wq,
- io_cancel_ctx_cb, ctx, true);
- io_sq_thread_unpark(sqd);
- }
-
- io_req_caches_free(ctx);
-
- if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
- /* there is little hope left, don't run it too often */
- interval = HZ * 60;
- }
- } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
-
- init_completion(&exit.completion);
- init_task_work(&exit.task_work, io_tctx_exit_cb);
- exit.ctx = ctx;
- /*
- * Some may use context even when all refs and requests have been put,
- * and they are free to do so while still holding uring_lock or
- * completion_lock, see io_req_task_submit(). Apart from other work,
- * this lock/unlock section also waits them to finish.
- */
- mutex_lock(&ctx->uring_lock);
- while (!list_empty(&ctx->tctx_list)) {
- WARN_ON_ONCE(time_after(jiffies, timeout));
-
- node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
- ctx_node);
- /* don't spin on a single task if cancellation failed */
- list_rotate_left(&ctx->tctx_list);
- ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
- if (WARN_ON_ONCE(ret))
- continue;
-
- mutex_unlock(&ctx->uring_lock);
- wait_for_completion(&exit.completion);
- mutex_lock(&ctx->uring_lock);
- }
- mutex_unlock(&ctx->uring_lock);
- spin_lock(&ctx->completion_lock);
- spin_unlock(&ctx->completion_lock);
-
- io_ring_ctx_free(ctx);
-}
-
-/* Returns true if we found and killed one or more timeouts */
-static __cold bool io_kill_timeouts(struct io_ring_ctx *ctx,
- struct task_struct *tsk, bool cancel_all)
-{
- struct io_kiocb *req, *tmp;
- int canceled = 0;
-
- spin_lock(&ctx->completion_lock);
- spin_lock_irq(&ctx->timeout_lock);
- list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
- if (io_match_task(req, tsk, cancel_all)) {
- io_kill_timeout(req, -ECANCELED);
- canceled++;
- }
- }
- spin_unlock_irq(&ctx->timeout_lock);
- if (canceled != 0)
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- if (canceled != 0)
- io_cqring_ev_posted(ctx);
- return canceled != 0;
-}
-
-static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
-{
- unsigned long index;
- struct creds *creds;
-
- mutex_lock(&ctx->uring_lock);
- percpu_ref_kill(&ctx->refs);
- if (ctx->rings)
- __io_cqring_overflow_flush(ctx, true);
- xa_for_each(&ctx->personalities, index, creds)
- io_unregister_personality(ctx, index);
- mutex_unlock(&ctx->uring_lock);
-
- io_kill_timeouts(ctx, NULL, true);
- io_poll_remove_all(ctx, NULL, true);
-
- /* if we failed setting up the ctx, we might not have any rings */
- io_iopoll_try_reap_events(ctx);
-
- INIT_WORK(&ctx->exit_work, io_ring_exit_work);
- /*
- * Use system_unbound_wq to avoid spawning tons of event kworkers
- * if we're exiting a ton of rings at the same time. It just adds
- * noise and overhead, there's no discernable change in runtime
- * over using system_wq.
- */
- queue_work(system_unbound_wq, &ctx->exit_work);
-}
-
-static int io_uring_release(struct inode *inode, struct file *file)
-{
- struct io_ring_ctx *ctx = file->private_data;
-
- file->private_data = NULL;
- io_ring_ctx_wait_and_kill(ctx);
- return 0;
-}
-
-struct io_task_cancel {
- struct task_struct *task;
- bool all;
-};
-
-static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
-{
- struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- struct io_task_cancel *cancel = data;
- bool ret;
-
- if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
- struct io_ring_ctx *ctx = req->ctx;
-
- /* protect against races with linked timeouts */
- spin_lock(&ctx->completion_lock);
- ret = io_match_task(req, cancel->task, cancel->all);
- spin_unlock(&ctx->completion_lock);
- } else {
- ret = io_match_task(req, cancel->task, cancel->all);
- }
- return ret;
-}
-
-static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
- struct task_struct *task,
- bool cancel_all)
-{
- struct io_defer_entry *de;
- LIST_HEAD(list);
-
- spin_lock(&ctx->completion_lock);
- list_for_each_entry_reverse(de, &ctx->defer_list, list) {
- if (io_match_task(de->req, task, cancel_all)) {
- list_cut_position(&list, &ctx->defer_list, &de->list);
- break;
- }
- }
- spin_unlock(&ctx->completion_lock);
- if (list_empty(&list))
- return false;
-
- while (!list_empty(&list)) {
- de = list_first_entry(&list, struct io_defer_entry, list);
- list_del_init(&de->list);
- io_req_complete_failed(de->req, -ECANCELED);
- kfree(de);
- }
- return true;
-}
-
-static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
-{
- struct io_tctx_node *node;
- enum io_wq_cancel cret;
- bool ret = false;
-
- mutex_lock(&ctx->uring_lock);
- list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
- struct io_uring_task *tctx = node->task->io_uring;
-
- /*
- * io_wq will stay alive while we hold uring_lock, because it's
- * killed after ctx nodes, which requires to take the lock.
- */
- if (!tctx || !tctx->io_wq)
- continue;
- cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
- ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
- }
- mutex_unlock(&ctx->uring_lock);
-
- return ret;
-}
-
-static __cold void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
- struct task_struct *task,
- bool cancel_all)
-{
- struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
- struct io_uring_task *tctx = task ? task->io_uring : NULL;
-
- while (1) {
- enum io_wq_cancel cret;
- bool ret = false;
-
- if (!task) {
- ret |= io_uring_try_cancel_iowq(ctx);
- } else if (tctx && tctx->io_wq) {
- /*
- * Cancels requests of all rings, not only @ctx, but
- * it's fine as the task is in exit/exec.
- */
- cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
- &cancel, true);
- ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
- }
-
- /* SQPOLL thread does its own polling */
- if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
- (ctx->sq_data && ctx->sq_data->thread == current)) {
- while (!wq_list_empty(&ctx->iopoll_list)) {
- io_iopoll_try_reap_events(ctx);
- ret = true;
- }
- }
-
- ret |= io_cancel_defer_files(ctx, task, cancel_all);
- ret |= io_poll_remove_all(ctx, task, cancel_all);
- ret |= io_kill_timeouts(ctx, task, cancel_all);
- if (task)
- ret |= io_run_task_work();
- if (!ret)
- break;
- cond_resched();
- }
-}
-
-static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
-{
- struct io_uring_task *tctx = current->io_uring;
- struct io_tctx_node *node;
- int ret;
-
- if (unlikely(!tctx)) {
- ret = io_uring_alloc_task_context(current, ctx);
- if (unlikely(ret))
- return ret;
-
- tctx = current->io_uring;
- if (ctx->iowq_limits_set) {
- unsigned int limits[2] = { ctx->iowq_limits[0],
- ctx->iowq_limits[1], };
-
- ret = io_wq_max_workers(tctx->io_wq, limits);
- if (ret)
- return ret;
- }
- }
- if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
- node = kmalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
- node->ctx = ctx;
- node->task = current;
-
- ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
- node, GFP_KERNEL));
- if (ret) {
- kfree(node);
- return ret;
- }
-
- mutex_lock(&ctx->uring_lock);
- list_add(&node->ctx_node, &ctx->tctx_list);
- mutex_unlock(&ctx->uring_lock);
- }
- tctx->last = ctx;
- return 0;
-}
-
-/*
- * Note that this task has used io_uring. We use it for cancelation purposes.
- */
-static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
-{
- struct io_uring_task *tctx = current->io_uring;
-
- if (likely(tctx && tctx->last == ctx))
- return 0;
- return __io_uring_add_tctx_node(ctx);
-}
-
-/*
- * Remove this io_uring_file -> task mapping.
- */
-static __cold void io_uring_del_tctx_node(unsigned long index)
-{
- struct io_uring_task *tctx = current->io_uring;
- struct io_tctx_node *node;
-
- if (!tctx)
- return;
- node = xa_erase(&tctx->xa, index);
- if (!node)
- return;
-
- WARN_ON_ONCE(current != node->task);
- WARN_ON_ONCE(list_empty(&node->ctx_node));
-
- mutex_lock(&node->ctx->uring_lock);
- list_del(&node->ctx_node);
- mutex_unlock(&node->ctx->uring_lock);
-
- if (tctx->last == node->ctx)
- tctx->last = NULL;
- kfree(node);
-}
-
-static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
-{
- struct io_wq *wq = tctx->io_wq;
- struct io_tctx_node *node;
- unsigned long index;
-
- xa_for_each(&tctx->xa, index, node) {
- io_uring_del_tctx_node(index);
- cond_resched();
- }
- if (wq) {
- /*
- * Must be after io_uring_del_task_file() (removes nodes under
- * uring_lock) to avoid race with io_uring_try_cancel_iowq().
- */
- io_wq_put_and_exit(wq);
- tctx->io_wq = NULL;
- }
-}
-
-static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
-{
- if (tracked)
- return atomic_read(&tctx->inflight_tracked);
- return percpu_counter_sum(&tctx->inflight);
-}
-
-static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
-{
- struct io_uring_task *tctx = task->io_uring;
- unsigned int refs = tctx->cached_refs;
-
- if (refs) {
- tctx->cached_refs = 0;
- percpu_counter_sub(&tctx->inflight, refs);
- put_task_struct_many(task, refs);
- }
-}
-
-/*
- * Find any io_uring ctx that this task has registered or done IO on, and cancel
- * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
- */
-static __cold void io_uring_cancel_generic(bool cancel_all,
- struct io_sq_data *sqd)
-{
- struct io_uring_task *tctx = current->io_uring;
- struct io_ring_ctx *ctx;
- s64 inflight;
- DEFINE_WAIT(wait);
-
- WARN_ON_ONCE(sqd && sqd->thread != current);
-
- if (!current->io_uring)
- return;
- if (tctx->io_wq)
- io_wq_exit_start(tctx->io_wq);
-
- atomic_inc(&tctx->in_idle);
- do {
- io_uring_drop_tctx_refs(current);
- /* read completions before cancelations */
- inflight = tctx_inflight(tctx, !cancel_all);
- if (!inflight)
- break;
-
- if (!sqd) {
- struct io_tctx_node *node;
- unsigned long index;
-
- xa_for_each(&tctx->xa, index, node) {
- /* sqpoll task will cancel all its requests */
- if (node->ctx->sq_data)
- continue;
- io_uring_try_cancel_requests(node->ctx, current,
- cancel_all);
- }
- } else {
- list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
- io_uring_try_cancel_requests(ctx, current,
- cancel_all);
- }
-
- prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
- io_uring_drop_tctx_refs(current);
- /*
- * If we've seen completions, retry without waiting. This
- * avoids a race where a completion comes in before we did
- * prepare_to_wait().
- */
- if (inflight == tctx_inflight(tctx, !cancel_all))
- schedule();
- finish_wait(&tctx->wait, &wait);
- } while (1);
- atomic_dec(&tctx->in_idle);
-
- io_uring_clean_tctx(tctx);
- if (cancel_all) {
- /* for exec all current's requests should be gone, kill tctx */
- __io_uring_free(current);
- }
-}
-
-void __io_uring_cancel(bool cancel_all)
-{
- io_uring_cancel_generic(cancel_all, NULL);
-}
-
-static void *io_uring_validate_mmap_request(struct file *file,
- loff_t pgoff, size_t sz)
-{
- struct io_ring_ctx *ctx = file->private_data;
- loff_t offset = pgoff << PAGE_SHIFT;
- struct page *page;
- void *ptr;
-
- switch (offset) {
- case IORING_OFF_SQ_RING:
- case IORING_OFF_CQ_RING:
- ptr = ctx->rings;
- break;
- case IORING_OFF_SQES:
- ptr = ctx->sq_sqes;
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
-
- page = virt_to_head_page(ptr);
- if (sz > page_size(page))
- return ERR_PTR(-EINVAL);
-
- return ptr;
-}
-
-#ifdef CONFIG_MMU
-
-static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
-{
- size_t sz = vma->vm_end - vma->vm_start;
- unsigned long pfn;
- void *ptr;
-
- ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
- if (IS_ERR(ptr))
- return PTR_ERR(ptr);
-
- pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
- return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
-}
-
-#else /* !CONFIG_MMU */
-
-static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
-{
- return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
-}
-
-static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
-{
- return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
-}
-
-static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags)
-{
- void *ptr;
-
- ptr = io_uring_validate_mmap_request(file, pgoff, len);
- if (IS_ERR(ptr))
- return PTR_ERR(ptr);
-
- return (unsigned long) ptr;
-}
-
-#endif /* !CONFIG_MMU */
-
-static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
-{
- DEFINE_WAIT(wait);
-
- do {
- if (!io_sqring_full(ctx))
- break;
- prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
-
- if (!io_sqring_full(ctx))
- break;
- schedule();
- } while (!signal_pending(current));
-
- finish_wait(&ctx->sqo_sq_wait, &wait);
- return 0;
-}
-
-static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
- struct __kernel_timespec __user **ts,
- const sigset_t __user **sig)
-{
- struct io_uring_getevents_arg arg;
-
- /*
- * If EXT_ARG isn't set, then we have no timespec and the argp pointer
- * is just a pointer to the sigset_t.
- */
- if (!(flags & IORING_ENTER_EXT_ARG)) {
- *sig = (const sigset_t __user *) argp;
- *ts = NULL;
- return 0;
- }
-
- /*
- * EXT_ARG is set - ensure we agree on the size of it and copy in our
- * timespec and sigset_t pointers if good.
- */
- if (*argsz != sizeof(arg))
- return -EINVAL;
- if (copy_from_user(&arg, argp, sizeof(arg)))
- return -EFAULT;
- *sig = u64_to_user_ptr(arg.sigmask);
- *argsz = arg.sigmask_sz;
- *ts = u64_to_user_ptr(arg.ts);
- return 0;
-}
-
-SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
- u32, min_complete, u32, flags, const void __user *, argp,
- size_t, argsz)
-{
- struct io_ring_ctx *ctx;
- int submitted = 0;
- struct fd f;
- long ret;
-
- io_run_task_work();
-
- if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
- IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
- return -EINVAL;
-
- f = fdget(fd);
- if (unlikely(!f.file))
- return -EBADF;
-
- ret = -EOPNOTSUPP;
- if (unlikely(f.file->f_op != &io_uring_fops))
- goto out_fput;
-
- ret = -ENXIO;
- ctx = f.file->private_data;
- if (unlikely(!percpu_ref_tryget(&ctx->refs)))
- goto out_fput;
-
- ret = -EBADFD;
- if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
- goto out;
-
- /*
- * For SQ polling, the thread will do all submissions and completions.
- * Just return the requested submit count, and wake the thread if
- * we were asked to.
- */
- ret = 0;
- if (ctx->flags & IORING_SETUP_SQPOLL) {
- io_cqring_overflow_flush(ctx);
-
- if (unlikely(ctx->sq_data->thread == NULL)) {
- ret = -EOWNERDEAD;
- goto out;
- }
- if (flags & IORING_ENTER_SQ_WAKEUP)
- wake_up(&ctx->sq_data->wait);
- if (flags & IORING_ENTER_SQ_WAIT) {
- ret = io_sqpoll_wait_sq(ctx);
- if (ret)
- goto out;
- }
- submitted = to_submit;
- } else if (to_submit) {
- ret = io_uring_add_tctx_node(ctx);
- if (unlikely(ret))
- goto out;
- mutex_lock(&ctx->uring_lock);
- submitted = io_submit_sqes(ctx, to_submit);
- mutex_unlock(&ctx->uring_lock);
-
- if (submitted != to_submit)
- goto out;
- }
- if (flags & IORING_ENTER_GETEVENTS) {
- const sigset_t __user *sig;
- struct __kernel_timespec __user *ts;
-
- ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
- if (unlikely(ret))
- goto out;
-
- min_complete = min(min_complete, ctx->cq_entries);
-
- /*
- * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
- * space applications don't need to do io completion events
- * polling again, they can rely on io_sq_thread to do polling
- * work, which can reduce cpu usage and uring_lock contention.
- */
- if (ctx->flags & IORING_SETUP_IOPOLL &&
- !(ctx->flags & IORING_SETUP_SQPOLL)) {
- ret = io_iopoll_check(ctx, min_complete);
- } else {
- ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
- }
- }
-
-out:
- percpu_ref_put(&ctx->refs);
-out_fput:
- fdput(f);
- return submitted ? submitted : ret;
-}
-
-#ifdef CONFIG_PROC_FS
-static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
- const struct cred *cred)
-{
- struct user_namespace *uns = seq_user_ns(m);
- struct group_info *gi;
- kernel_cap_t cap;
- unsigned __capi;
- int g;
-
- seq_printf(m, "%5d\n", id);
- seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
- seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
- seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
- seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
- seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
- seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
- seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
- seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
- seq_puts(m, "\n\tGroups:\t");
- gi = cred->group_info;
- for (g = 0; g < gi->ngroups; g++) {
- seq_put_decimal_ull(m, g ? " " : "",
- from_kgid_munged(uns, gi->gid[g]));
- }
- seq_puts(m, "\n\tCapEff:\t");
- cap = cred->cap_effective;
- CAP_FOR_EACH_U32(__capi)
- seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
- seq_putc(m, '\n');
- return 0;
-}
-
-static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
- struct seq_file *m)
-{
- struct io_sq_data *sq = NULL;
- struct io_overflow_cqe *ocqe;
- struct io_rings *r = ctx->rings;
- unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
- unsigned int sq_head = READ_ONCE(r->sq.head);
- unsigned int sq_tail = READ_ONCE(r->sq.tail);
- unsigned int cq_head = READ_ONCE(r->cq.head);
- unsigned int cq_tail = READ_ONCE(r->cq.tail);
- unsigned int sq_entries, cq_entries;
- bool has_lock;
- unsigned int i;
-
- /*
- * we may get imprecise sqe and cqe info if uring is actively running
- * since we get cached_sq_head and cached_cq_tail without uring_lock
- * and sq_tail and cq_head are changed by userspace. But it's ok since
- * we usually use these info when it is stuck.
- */
- seq_printf(m, "SqMask:\t\t0x%x\n", sq_mask);
- seq_printf(m, "SqHead:\t%u\n", sq_head);
- seq_printf(m, "SqTail:\t%u\n", sq_tail);
- seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
- seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
- seq_printf(m, "CqHead:\t%u\n", cq_head);
- seq_printf(m, "CqTail:\t%u\n", cq_tail);
- seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
- seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head);
- sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
- for (i = 0; i < sq_entries; i++) {
- unsigned int entry = i + sq_head;
- unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
- struct io_uring_sqe *sqe;
-
- if (sq_idx > sq_mask)
- continue;
- sqe = &ctx->sq_sqes[sq_idx];
- seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n",
- sq_idx, sqe->opcode, sqe->fd, sqe->flags,
- sqe->user_data);
- }
- seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
- cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
- for (i = 0; i < cq_entries; i++) {
- unsigned int entry = i + cq_head;
- struct io_uring_cqe *cqe = &r->cqes[entry & cq_mask];
-
- seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n",
- entry & cq_mask, cqe->user_data, cqe->res,
- cqe->flags);
- }
-
- /*
- * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
- * since fdinfo case grabs it in the opposite direction of normal use
- * cases. If we fail to get the lock, we just don't iterate any
- * structures that could be going away outside the io_uring mutex.
- */
- has_lock = mutex_trylock(&ctx->uring_lock);
-
- if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
- sq = ctx->sq_data;
- if (!sq->thread)
- sq = NULL;
- }
-
- seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
- seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
- seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
- for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
- struct file *f = io_file_from_index(ctx, i);
-
- if (f)
- seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
- else
- seq_printf(m, "%5u: <none>\n", i);
- }
- seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
- for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
- struct io_mapped_ubuf *buf = ctx->user_bufs[i];
- unsigned int len = buf->ubuf_end - buf->ubuf;
-
- seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
- }
- if (has_lock && !xa_empty(&ctx->personalities)) {
- unsigned long index;
- const struct cred *cred;
-
- seq_printf(m, "Personalities:\n");
- xa_for_each(&ctx->personalities, index, cred)
- io_uring_show_cred(m, index, cred);
- }
- if (has_lock)
- mutex_unlock(&ctx->uring_lock);
-
- seq_puts(m, "PollList:\n");
- spin_lock(&ctx->completion_lock);
- for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
- struct hlist_head *list = &ctx->cancel_hash[i];
- struct io_kiocb *req;
-
- hlist_for_each_entry(req, list, hash_node)
- seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
- req->task->task_works != NULL);
- }
-
- seq_puts(m, "CqOverflowList:\n");
- list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
- struct io_uring_cqe *cqe = &ocqe->cqe;
-
- seq_printf(m, " user_data=%llu, res=%d, flags=%x\n",
- cqe->user_data, cqe->res, cqe->flags);
-
- }
-
- spin_unlock(&ctx->completion_lock);
-}
-
-static __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
-{
- struct io_ring_ctx *ctx = f->private_data;
-
- if (percpu_ref_tryget(&ctx->refs)) {
- __io_uring_show_fdinfo(ctx, m);
- percpu_ref_put(&ctx->refs);
- }
-}
-#endif
-
-static const struct file_operations io_uring_fops = {
- .release = io_uring_release,
- .mmap = io_uring_mmap,
-#ifndef CONFIG_MMU
- .get_unmapped_area = io_uring_nommu_get_unmapped_area,
- .mmap_capabilities = io_uring_nommu_mmap_capabilities,
-#endif
- .poll = io_uring_poll,
-#ifdef CONFIG_PROC_FS
- .show_fdinfo = io_uring_show_fdinfo,
-#endif
-};
-
-static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
- struct io_uring_params *p)
-{
- struct io_rings *rings;
- size_t size, sq_array_offset;
-
- /* make sure these are sane, as we already accounted them */
- ctx->sq_entries = p->sq_entries;
- ctx->cq_entries = p->cq_entries;
-
- size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
- if (size == SIZE_MAX)
- return -EOVERFLOW;
-
- rings = io_mem_alloc(size);
- if (!rings)
- return -ENOMEM;
-
- ctx->rings = rings;
- ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
- rings->sq_ring_mask = p->sq_entries - 1;
- rings->cq_ring_mask = p->cq_entries - 1;
- rings->sq_ring_entries = p->sq_entries;
- rings->cq_ring_entries = p->cq_entries;
-
- size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
- if (size == SIZE_MAX) {
- io_mem_free(ctx->rings);
- ctx->rings = NULL;
- return -EOVERFLOW;
- }
-
- ctx->sq_sqes = io_mem_alloc(size);
- if (!ctx->sq_sqes) {
- io_mem_free(ctx->rings);
- ctx->rings = NULL;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
-{
- int ret, fd;
-
- fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
- if (fd < 0)
- return fd;
-
- ret = io_uring_add_tctx_node(ctx);
- if (ret) {
- put_unused_fd(fd);
- return ret;
- }
- fd_install(fd, file);
- return fd;
-}
-
-/*
- * Allocate an anonymous fd, this is what constitutes the application
- * visible backing of an io_uring instance. The application mmaps this
- * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
- * we have to tie this fd to a socket for file garbage collection purposes.
- */
-static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
-{
- struct file *file;
-#if defined(CONFIG_UNIX)
- int ret;
-
- ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
- &ctx->ring_sock);
- if (ret)
- return ERR_PTR(ret);
-#endif
-
- file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
- O_RDWR | O_CLOEXEC, NULL);
-#if defined(CONFIG_UNIX)
- if (IS_ERR(file)) {
- sock_release(ctx->ring_sock);
- ctx->ring_sock = NULL;
- } else {
- ctx->ring_sock->file = file;
- }
-#endif
- return file;
-}
-
-static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
- struct io_uring_params __user *params)
-{
- struct io_ring_ctx *ctx;
- struct file *file;
- int ret;
-
- if (!entries)
- return -EINVAL;
- if (entries > IORING_MAX_ENTRIES) {
- if (!(p->flags & IORING_SETUP_CLAMP))
- return -EINVAL;
- entries = IORING_MAX_ENTRIES;
- }
-
- /*
- * Use twice as many entries for the CQ ring. It's possible for the
- * application to drive a higher depth than the size of the SQ ring,
- * since the sqes are only used at submission time. This allows for
- * some flexibility in overcommitting a bit. If the application has
- * set IORING_SETUP_CQSIZE, it will have passed in the desired number
- * of CQ ring entries manually.
- */
- p->sq_entries = roundup_pow_of_two(entries);
- if (p->flags & IORING_SETUP_CQSIZE) {
- /*
- * If IORING_SETUP_CQSIZE is set, we do the same roundup
- * to a power-of-two, if it isn't already. We do NOT impose
- * any cq vs sq ring sizing.
- */
- if (!p->cq_entries)
- return -EINVAL;
- if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
- if (!(p->flags & IORING_SETUP_CLAMP))
- return -EINVAL;
- p->cq_entries = IORING_MAX_CQ_ENTRIES;
- }
- p->cq_entries = roundup_pow_of_two(p->cq_entries);
- if (p->cq_entries < p->sq_entries)
- return -EINVAL;
- } else {
- p->cq_entries = 2 * p->sq_entries;
- }
-
- ctx = io_ring_ctx_alloc(p);
- if (!ctx)
- return -ENOMEM;
- ctx->compat = in_compat_syscall();
- if (!capable(CAP_IPC_LOCK))
- ctx->user = get_uid(current_user());
-
- /*
- * This is just grabbed for accounting purposes. When a process exits,
- * the mm is exited and dropped before the files, hence we need to hang
- * on to this mm purely for the purposes of being able to unaccount
- * memory (locked/pinned vm). It's not used for anything else.
- */
- mmgrab(current->mm);
- ctx->mm_account = current->mm;
-
- ret = io_allocate_scq_urings(ctx, p);
- if (ret)
- goto err;
-
- ret = io_sq_offload_create(ctx, p);
- if (ret)
- goto err;
- /* always set a rsrc node */
- ret = io_rsrc_node_switch_start(ctx);
- if (ret)
- goto err;
- io_rsrc_node_switch(ctx, NULL);
-
- memset(&p->sq_off, 0, sizeof(p->sq_off));
- p->sq_off.head = offsetof(struct io_rings, sq.head);
- p->sq_off.tail = offsetof(struct io_rings, sq.tail);
- p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
- p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
- p->sq_off.flags = offsetof(struct io_rings, sq_flags);
- p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
- p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
-
- memset(&p->cq_off, 0, sizeof(p->cq_off));
- p->cq_off.head = offsetof(struct io_rings, cq.head);
- p->cq_off.tail = offsetof(struct io_rings, cq.tail);
- p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
- p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
- p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
- p->cq_off.cqes = offsetof(struct io_rings, cqes);
- p->cq_off.flags = offsetof(struct io_rings, cq_flags);
-
- p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
- IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
- IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
- IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
- IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
- IORING_FEAT_RSRC_TAGS;
-
- if (copy_to_user(params, p, sizeof(*p))) {
- ret = -EFAULT;
- goto err;
- }
-
- file = io_uring_get_file(ctx);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
- goto err;
- }
-
- /*
- * Install ring fd as the very last thing, so we don't risk someone
- * having closed it before we finish setup
- */
- ret = io_uring_install_fd(ctx, file);
- if (ret < 0) {
- /* fput will clean it up */
- fput(file);
- return ret;
- }
-
- trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
- return ret;
-err:
- io_ring_ctx_wait_and_kill(ctx);
- return ret;
-}
-
-/*
- * Sets up an aio uring context, and returns the fd. Applications asks for a
- * ring size, we return the actual sq/cq ring sizes (among other things) in the
- * params structure passed in.
- */
-static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
-{
- struct io_uring_params p;
- int i;
-
- if (copy_from_user(&p, params, sizeof(p)))
- return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
- if (p.resv[i])
- return -EINVAL;
- }
-
- if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
- IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
- IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
- IORING_SETUP_R_DISABLED))
- return -EINVAL;
-
- return io_uring_create(entries, &p, params);
-}
-
-SYSCALL_DEFINE2(io_uring_setup, u32, entries,
- struct io_uring_params __user *, params)
-{
- return io_uring_setup(entries, params);
-}
-
-static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
- unsigned nr_args)
-{
- struct io_uring_probe *p;
- size_t size;
- int i, ret;
-
- size = struct_size(p, ops, nr_args);
- if (size == SIZE_MAX)
- return -EOVERFLOW;
- p = kzalloc(size, GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- ret = -EFAULT;
- if (copy_from_user(p, arg, size))
- goto out;
- ret = -EINVAL;
- if (memchr_inv(p, 0, size))
- goto out;
-
- p->last_op = IORING_OP_LAST - 1;
- if (nr_args > IORING_OP_LAST)
- nr_args = IORING_OP_LAST;
-
- for (i = 0; i < nr_args; i++) {
- p->ops[i].op = i;
- if (!io_op_defs[i].not_supported)
- p->ops[i].flags = IO_URING_OP_SUPPORTED;
- }
- p->ops_len = i;
-
- ret = 0;
- if (copy_to_user(arg, p, size))
- ret = -EFAULT;
-out:
- kfree(p);
- return ret;
-}
-
-static int io_register_personality(struct io_ring_ctx *ctx)
-{
- const struct cred *creds;
- u32 id;
- int ret;
-
- creds = get_current_cred();
-
- ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
- XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
- if (ret < 0) {
- put_cred(creds);
- return ret;
- }
- return id;
-}
-
-static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
- void __user *arg, unsigned int nr_args)
-{
- struct io_uring_restriction *res;
- size_t size;
- int i, ret;
-
- /* Restrictions allowed only if rings started disabled */
- if (!(ctx->flags & IORING_SETUP_R_DISABLED))
- return -EBADFD;
-
- /* We allow only a single restrictions registration */
- if (ctx->restrictions.registered)
- return -EBUSY;
-
- if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
- return -EINVAL;
-
- size = array_size(nr_args, sizeof(*res));
- if (size == SIZE_MAX)
- return -EOVERFLOW;
-
- res = memdup_user(arg, size);
- if (IS_ERR(res))
- return PTR_ERR(res);
-
- ret = 0;
-
- for (i = 0; i < nr_args; i++) {
- switch (res[i].opcode) {
- case IORING_RESTRICTION_REGISTER_OP:
- if (res[i].register_op >= IORING_REGISTER_LAST) {
- ret = -EINVAL;
- goto out;
- }
-
- __set_bit(res[i].register_op,
- ctx->restrictions.register_op);
- break;
- case IORING_RESTRICTION_SQE_OP:
- if (res[i].sqe_op >= IORING_OP_LAST) {
- ret = -EINVAL;
- goto out;
- }
-
- __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
- break;
- case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
- ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
- break;
- case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
- ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- }
-
-out:
- /* Reset all restrictions if an error happened */
- if (ret != 0)
- memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
- else
- ctx->restrictions.registered = true;
-
- kfree(res);
- return ret;
-}
-
-static int io_register_enable_rings(struct io_ring_ctx *ctx)
-{
- if (!(ctx->flags & IORING_SETUP_R_DISABLED))
- return -EBADFD;
-
- if (ctx->restrictions.registered)
- ctx->restricted = 1;
-
- ctx->flags &= ~IORING_SETUP_R_DISABLED;
- if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
- wake_up(&ctx->sq_data->wait);
- return 0;
-}
-
-static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
- struct io_uring_rsrc_update2 *up,
- unsigned nr_args)
-{
- __u32 tmp;
- int err;
-
- if (up->resv)
- return -EINVAL;
- if (check_add_overflow(up->offset, nr_args, &tmp))
- return -EOVERFLOW;
- err = io_rsrc_node_switch_start(ctx);
- if (err)
- return err;
-
- switch (type) {
- case IORING_RSRC_FILE:
- return __io_sqe_files_update(ctx, up, nr_args);
- case IORING_RSRC_BUFFER:
- return __io_sqe_buffers_update(ctx, up, nr_args);
- }
- return -EINVAL;
-}
-
-static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
- unsigned nr_args)
-{
- struct io_uring_rsrc_update2 up;
-
- if (!nr_args)
- return -EINVAL;
- memset(&up, 0, sizeof(up));
- if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
- return -EFAULT;
- return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
-}
-
-static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
- unsigned size, unsigned type)
-{
- struct io_uring_rsrc_update2 up;
-
- if (size != sizeof(up))
- return -EINVAL;
- if (copy_from_user(&up, arg, sizeof(up)))
- return -EFAULT;
- if (!up.nr || up.resv)
- return -EINVAL;
- return __io_register_rsrc_update(ctx, type, &up, up.nr);
-}
-
-static __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
- unsigned int size, unsigned int type)
-{
- struct io_uring_rsrc_register rr;
-
- /* keep it extendible */
- if (size != sizeof(rr))
- return -EINVAL;
-
- memset(&rr, 0, sizeof(rr));
- if (copy_from_user(&rr, arg, size))
- return -EFAULT;
- if (!rr.nr || rr.resv || rr.resv2)
- return -EINVAL;
-
- switch (type) {
- case IORING_RSRC_FILE:
- return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
- rr.nr, u64_to_user_ptr(rr.tags));
- case IORING_RSRC_BUFFER:
- return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
- rr.nr, u64_to_user_ptr(rr.tags));
- }
- return -EINVAL;
-}
-
-static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
- void __user *arg, unsigned len)
-{
- struct io_uring_task *tctx = current->io_uring;
- cpumask_var_t new_mask;
- int ret;
-
- if (!tctx || !tctx->io_wq)
- return -EINVAL;
-
- if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
- return -ENOMEM;
-
- cpumask_clear(new_mask);
- if (len > cpumask_size())
- len = cpumask_size();
-
- if (copy_from_user(new_mask, arg, len)) {
- free_cpumask_var(new_mask);
- return -EFAULT;
- }
-
- ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
- free_cpumask_var(new_mask);
- return ret;
-}
-
-static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
-{
- struct io_uring_task *tctx = current->io_uring;
-
- if (!tctx || !tctx->io_wq)
- return -EINVAL;
-
- return io_wq_cpu_affinity(tctx->io_wq, NULL);
-}
-
-static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
- void __user *arg)
- __must_hold(&ctx->uring_lock)
-{
- struct io_tctx_node *node;
- struct io_uring_task *tctx = NULL;
- struct io_sq_data *sqd = NULL;
- __u32 new_count[2];
- int i, ret;
-
- if (copy_from_user(new_count, arg, sizeof(new_count)))
- return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(new_count); i++)
- if (new_count[i] > INT_MAX)
- return -EINVAL;
-
- if (ctx->flags & IORING_SETUP_SQPOLL) {
- sqd = ctx->sq_data;
- if (sqd) {
- /*
- * Observe the correct sqd->lock -> ctx->uring_lock
- * ordering. Fine to drop uring_lock here, we hold
- * a ref to the ctx.
- */
- refcount_inc(&sqd->refs);
- mutex_unlock(&ctx->uring_lock);
- mutex_lock(&sqd->lock);
- mutex_lock(&ctx->uring_lock);
- if (sqd->thread)
- tctx = sqd->thread->io_uring;
- }
- } else {
- tctx = current->io_uring;
- }
-
- BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
-
- for (i = 0; i < ARRAY_SIZE(new_count); i++)
- if (new_count[i])
- ctx->iowq_limits[i] = new_count[i];
- ctx->iowq_limits_set = true;
-
- if (tctx && tctx->io_wq) {
- ret = io_wq_max_workers(tctx->io_wq, new_count);
- if (ret)
- goto err;
- } else {
- memset(new_count, 0, sizeof(new_count));
- }
-
- if (sqd) {
- mutex_unlock(&sqd->lock);
- io_put_sq_data(sqd);
- }
-
- if (copy_to_user(arg, new_count, sizeof(new_count)))
- return -EFAULT;
-
- /* that's it for SQPOLL, only the SQPOLL task creates requests */
- if (sqd)
- return 0;
-
- /* now propagate the restriction to all registered users */
- list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
- struct io_uring_task *tctx = node->task->io_uring;
-
- if (WARN_ON_ONCE(!tctx->io_wq))
- continue;
-
- for (i = 0; i < ARRAY_SIZE(new_count); i++)
- new_count[i] = ctx->iowq_limits[i];
- /* ignore errors, it always returns zero anyway */
- (void)io_wq_max_workers(tctx->io_wq, new_count);
- }
- return 0;
-err:
- if (sqd) {
- mutex_unlock(&sqd->lock);
- io_put_sq_data(sqd);
- }
- return ret;
-}
-
-static bool io_register_op_must_quiesce(int op)
-{
- switch (op) {
- case IORING_REGISTER_BUFFERS:
- case IORING_UNREGISTER_BUFFERS:
- case IORING_REGISTER_FILES:
- case IORING_UNREGISTER_FILES:
- case IORING_REGISTER_FILES_UPDATE:
- case IORING_REGISTER_PROBE:
- case IORING_REGISTER_PERSONALITY:
- case IORING_UNREGISTER_PERSONALITY:
- case IORING_REGISTER_FILES2:
- case IORING_REGISTER_FILES_UPDATE2:
- case IORING_REGISTER_BUFFERS2:
- case IORING_REGISTER_BUFFERS_UPDATE:
- case IORING_REGISTER_IOWQ_AFF:
- case IORING_UNREGISTER_IOWQ_AFF:
- case IORING_REGISTER_IOWQ_MAX_WORKERS:
- return false;
- default:
- return true;
- }
-}
-
-static __cold int io_ctx_quiesce(struct io_ring_ctx *ctx)
-{
- long ret;
-
- percpu_ref_kill(&ctx->refs);
-
- /*
- * Drop uring mutex before waiting for references to exit. If another
- * thread is currently inside io_uring_enter() it might need to grab the
- * uring_lock to make progress. If we hold it here across the drain
- * wait, then we can deadlock. It's safe to drop the mutex here, since
- * no new references will come in after we've killed the percpu ref.
- */
- mutex_unlock(&ctx->uring_lock);
- do {
- ret = wait_for_completion_interruptible_timeout(&ctx->ref_comp, HZ);
- if (ret) {
- ret = min(0L, ret);
- break;
- }
-
- ret = io_run_task_work_sig();
- io_req_caches_free(ctx);
- } while (ret >= 0);
- mutex_lock(&ctx->uring_lock);
-
- if (ret)
- io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
- return ret;
-}
-
-static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
- void __user *arg, unsigned nr_args)
- __releases(ctx->uring_lock)
- __acquires(ctx->uring_lock)
-{
- int ret;
-
- /*
- * We're inside the ring mutex, if the ref is already dying, then
- * someone else killed the ctx or is already going through
- * io_uring_register().
- */
- if (percpu_ref_is_dying(&ctx->refs))
- return -ENXIO;
-
- if (ctx->restricted) {
- if (opcode >= IORING_REGISTER_LAST)
- return -EINVAL;
- opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
- if (!test_bit(opcode, ctx->restrictions.register_op))
- return -EACCES;
- }
-
- if (io_register_op_must_quiesce(opcode)) {
- ret = io_ctx_quiesce(ctx);
- if (ret)
- return ret;
- }
-
- switch (opcode) {
- case IORING_REGISTER_BUFFERS:
- ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
- break;
- case IORING_UNREGISTER_BUFFERS:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_sqe_buffers_unregister(ctx);
- break;
- case IORING_REGISTER_FILES:
- ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
- break;
- case IORING_UNREGISTER_FILES:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_sqe_files_unregister(ctx);
- break;
- case IORING_REGISTER_FILES_UPDATE:
- ret = io_register_files_update(ctx, arg, nr_args);
- break;
- case IORING_REGISTER_EVENTFD:
- case IORING_REGISTER_EVENTFD_ASYNC:
- ret = -EINVAL;
- if (nr_args != 1)
- break;
- ret = io_eventfd_register(ctx, arg);
- if (ret)
- break;
- if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
- ctx->eventfd_async = 1;
- else
- ctx->eventfd_async = 0;
- break;
- case IORING_UNREGISTER_EVENTFD:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_eventfd_unregister(ctx);
- break;
- case IORING_REGISTER_PROBE:
- ret = -EINVAL;
- if (!arg || nr_args > 256)
- break;
- ret = io_probe(ctx, arg, nr_args);
- break;
- case IORING_REGISTER_PERSONALITY:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_register_personality(ctx);
- break;
- case IORING_UNREGISTER_PERSONALITY:
- ret = -EINVAL;
- if (arg)
- break;
- ret = io_unregister_personality(ctx, nr_args);
- break;
- case IORING_REGISTER_ENABLE_RINGS:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_register_enable_rings(ctx);
- break;
- case IORING_REGISTER_RESTRICTIONS:
- ret = io_register_restrictions(ctx, arg, nr_args);
- break;
- case IORING_REGISTER_FILES2:
- ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
- break;
- case IORING_REGISTER_FILES_UPDATE2:
- ret = io_register_rsrc_update(ctx, arg, nr_args,
- IORING_RSRC_FILE);
- break;
- case IORING_REGISTER_BUFFERS2:
- ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
- break;
- case IORING_REGISTER_BUFFERS_UPDATE:
- ret = io_register_rsrc_update(ctx, arg, nr_args,
- IORING_RSRC_BUFFER);
- break;
- case IORING_REGISTER_IOWQ_AFF:
- ret = -EINVAL;
- if (!arg || !nr_args)
- break;
- ret = io_register_iowq_aff(ctx, arg, nr_args);
- break;
- case IORING_UNREGISTER_IOWQ_AFF:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_unregister_iowq_aff(ctx);
- break;
- case IORING_REGISTER_IOWQ_MAX_WORKERS:
- ret = -EINVAL;
- if (!arg || nr_args != 2)
- break;
- ret = io_register_iowq_max_workers(ctx, arg);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (io_register_op_must_quiesce(opcode)) {
- /* bring the ctx back to life */
- percpu_ref_reinit(&ctx->refs);
- reinit_completion(&ctx->ref_comp);
- }
- return ret;
-}
-
-SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
- void __user *, arg, unsigned int, nr_args)
-{
- struct io_ring_ctx *ctx;
- long ret = -EBADF;
- struct fd f;
-
- f = fdget(fd);
- if (!f.file)
- return -EBADF;
-
- ret = -EOPNOTSUPP;
- if (f.file->f_op != &io_uring_fops)
- goto out_fput;
-
- ctx = f.file->private_data;
-
- io_run_task_work();
-
- mutex_lock(&ctx->uring_lock);
- ret = __io_uring_register(ctx, opcode, arg, nr_args);
- mutex_unlock(&ctx->uring_lock);
- trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
- ctx->cq_ev_fd != NULL, ret);
-out_fput:
- fdput(f);
- return ret;
-}
-
-static int __init io_uring_init(void)
-{
-#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
- BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
- BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
-} while (0)
-
-#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
- __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
- BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
- BUILD_BUG_SQE_ELEM(0, __u8, opcode);
- BUILD_BUG_SQE_ELEM(1, __u8, flags);
- BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
- BUILD_BUG_SQE_ELEM(4, __s32, fd);
- BUILD_BUG_SQE_ELEM(8, __u64, off);
- BUILD_BUG_SQE_ELEM(8, __u64, addr2);
- BUILD_BUG_SQE_ELEM(16, __u64, addr);
- BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
- BUILD_BUG_SQE_ELEM(24, __u32, len);
- BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
- BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
- BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
- BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
- BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
- BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
- BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
- BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
- BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
- BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
- BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
- BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
- BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
- BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
- BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
- BUILD_BUG_SQE_ELEM(32, __u64, user_data);
- BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
- BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
- BUILD_BUG_SQE_ELEM(42, __u16, personality);
- BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
- BUILD_BUG_SQE_ELEM(44, __u32, file_index);
-
- BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
- sizeof(struct io_uring_rsrc_update));
- BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
- sizeof(struct io_uring_rsrc_update2));
-
- /* ->buf_index is u16 */
- BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
-
- /* should fit into one byte */
- BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
- BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
- BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
-
- BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
- BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
-
- req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
- SLAB_ACCOUNT);
- return 0;
-};
-__initcall(io_uring_init);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 504e69578112..1c152c2b1b67 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -41,7 +41,7 @@
*
* Returns 0 on success, -errno on error.
*/
-long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+static int vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int error = -ENOTTY;
@@ -54,7 +54,6 @@ long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
out:
return error;
}
-EXPORT_SYMBOL(vfs_ioctl);
static int ioctl_fibmap(struct file *filp, int __user *p)
{
@@ -109,9 +108,6 @@ static int ioctl_fibmap(struct file *filp, int __user *p)
* Returns 0 on success, -errno on error, 1 if this was the last
* extent that will fit in user array.
*/
-#define SET_UNKNOWN_FLAGS (FIEMAP_EXTENT_DELALLOC)
-#define SET_NO_UNMOUNTED_IO_FLAGS (FIEMAP_EXTENT_DATA_ENCRYPTED)
-#define SET_NOT_ALIGNED_FLAGS (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
u64 phys, u64 len, u32 flags)
{
@@ -127,6 +123,10 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
return 1;
+#define SET_UNKNOWN_FLAGS (FIEMAP_EXTENT_DELALLOC)
+#define SET_NO_UNMOUNTED_IO_FLAGS (FIEMAP_EXTENT_DATA_ENCRYPTED)
+#define SET_NOT_ALIGNED_FLAGS (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
+
if (flags & SET_UNKNOWN_FLAGS)
flags |= FIEMAP_EXTENT_UNKNOWN;
if (flags & SET_NO_UNMOUNTED_IO_FLAGS)
@@ -173,7 +173,7 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (*len == 0)
return -EINVAL;
- if (start > maxbytes)
+ if (start >= maxbytes)
return -EFBIG;
/*
@@ -227,19 +227,16 @@ static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap)
return error;
}
-static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
- u64 off, u64 olen, u64 destoff)
+static int ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
+ u64 off, u64 olen, u64 destoff)
{
- struct fd src_file = fdget(srcfd);
+ CLASS(fd, src_file)(srcfd);
loff_t cloned;
int ret;
- if (!src_file.file)
+ if (fd_empty(src_file))
return -EBADF;
- ret = -EXDEV;
- if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
- goto fdput;
- cloned = vfs_clone_file_range(src_file.file, off, dst_file, destoff,
+ cloned = vfs_clone_file_range(fd_file(src_file), off, dst_file, destoff,
olen, 0);
if (cloned < 0)
ret = cloned;
@@ -247,13 +244,11 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
ret = -EINVAL;
else
ret = 0;
-fdput:
- fdput(src_file);
return ret;
}
-static long ioctl_file_clone_range(struct file *file,
- struct file_clone_range __user *argp)
+static int ioctl_file_clone_range(struct file *file,
+ struct file_clone_range __user *argp)
{
struct file_clone_range args;
@@ -400,8 +395,8 @@ static int ioctl_fsfreeze(struct file *filp)
/* Freeze */
if (sb->s_op->freeze_super)
- return sb->s_op->freeze_super(sb);
- return freeze_super(sb);
+ return sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
+ return freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
}
static int ioctl_fsthaw(struct file *filp)
@@ -413,8 +408,8 @@ static int ioctl_fsthaw(struct file *filp)
/* Thaw */
if (sb->s_op->thaw_super)
- return sb->s_op->thaw_super(sb);
- return thaw_super(sb);
+ return sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
+ return thaw_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
}
static int ioctl_file_dedupe_range(struct file *file,
@@ -430,7 +425,7 @@ static int ioctl_file_dedupe_range(struct file *file,
goto out;
}
- size = offsetof(struct file_dedupe_range __user, info[count]);
+ size = struct_size(same, info, count);
if (size > PAGE_SIZE) {
ret = -ENOMEM;
goto out;
@@ -457,313 +452,31 @@ out:
return ret;
}
-/**
- * fileattr_fill_xflags - initialize fileattr with xflags
- * @fa: fileattr pointer
- * @xflags: FS_XFLAG_* flags
- *
- * Set ->fsx_xflags, ->fsx_valid and ->flags (translated xflags). All
- * other fields are zeroed.
- */
-void fileattr_fill_xflags(struct fileattr *fa, u32 xflags)
-{
- memset(fa, 0, sizeof(*fa));
- fa->fsx_valid = true;
- fa->fsx_xflags = xflags;
- if (fa->fsx_xflags & FS_XFLAG_IMMUTABLE)
- fa->flags |= FS_IMMUTABLE_FL;
- if (fa->fsx_xflags & FS_XFLAG_APPEND)
- fa->flags |= FS_APPEND_FL;
- if (fa->fsx_xflags & FS_XFLAG_SYNC)
- fa->flags |= FS_SYNC_FL;
- if (fa->fsx_xflags & FS_XFLAG_NOATIME)
- fa->flags |= FS_NOATIME_FL;
- if (fa->fsx_xflags & FS_XFLAG_NODUMP)
- fa->flags |= FS_NODUMP_FL;
- if (fa->fsx_xflags & FS_XFLAG_DAX)
- fa->flags |= FS_DAX_FL;
- if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
- fa->flags |= FS_PROJINHERIT_FL;
-}
-EXPORT_SYMBOL(fileattr_fill_xflags);
-
-/**
- * fileattr_fill_flags - initialize fileattr with flags
- * @fa: fileattr pointer
- * @flags: FS_*_FL flags
- *
- * Set ->flags, ->flags_valid and ->fsx_xflags (translated flags).
- * All other fields are zeroed.
- */
-void fileattr_fill_flags(struct fileattr *fa, u32 flags)
-{
- memset(fa, 0, sizeof(*fa));
- fa->flags_valid = true;
- fa->flags = flags;
- if (fa->flags & FS_SYNC_FL)
- fa->fsx_xflags |= FS_XFLAG_SYNC;
- if (fa->flags & FS_IMMUTABLE_FL)
- fa->fsx_xflags |= FS_XFLAG_IMMUTABLE;
- if (fa->flags & FS_APPEND_FL)
- fa->fsx_xflags |= FS_XFLAG_APPEND;
- if (fa->flags & FS_NODUMP_FL)
- fa->fsx_xflags |= FS_XFLAG_NODUMP;
- if (fa->flags & FS_NOATIME_FL)
- fa->fsx_xflags |= FS_XFLAG_NOATIME;
- if (fa->flags & FS_DAX_FL)
- fa->fsx_xflags |= FS_XFLAG_DAX;
- if (fa->flags & FS_PROJINHERIT_FL)
- fa->fsx_xflags |= FS_XFLAG_PROJINHERIT;
-}
-EXPORT_SYMBOL(fileattr_fill_flags);
-
-/**
- * vfs_fileattr_get - retrieve miscellaneous file attributes
- * @dentry: the object to retrieve from
- * @fa: fileattr pointer
- *
- * Call i_op->fileattr_get() callback, if exists.
- *
- * Return: 0 on success, or a negative error on failure.
- */
-int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
-{
- struct inode *inode = d_inode(dentry);
-
- if (!inode->i_op->fileattr_get)
- return -ENOIOCTLCMD;
-
- return inode->i_op->fileattr_get(dentry, fa);
-}
-EXPORT_SYMBOL(vfs_fileattr_get);
-
-/**
- * copy_fsxattr_to_user - copy fsxattr to userspace.
- * @fa: fileattr pointer
- * @ufa: fsxattr user pointer
- *
- * Return: 0 on success, or -EFAULT on failure.
- */
-int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa)
-{
- struct fsxattr xfa;
-
- memset(&xfa, 0, sizeof(xfa));
- xfa.fsx_xflags = fa->fsx_xflags;
- xfa.fsx_extsize = fa->fsx_extsize;
- xfa.fsx_nextents = fa->fsx_nextents;
- xfa.fsx_projid = fa->fsx_projid;
- xfa.fsx_cowextsize = fa->fsx_cowextsize;
-
- if (copy_to_user(ufa, &xfa, sizeof(xfa)))
- return -EFAULT;
-
- return 0;
-}
-EXPORT_SYMBOL(copy_fsxattr_to_user);
-
-static int copy_fsxattr_from_user(struct fileattr *fa,
- struct fsxattr __user *ufa)
-{
- struct fsxattr xfa;
-
- if (copy_from_user(&xfa, ufa, sizeof(xfa)))
- return -EFAULT;
-
- fileattr_fill_xflags(fa, xfa.fsx_xflags);
- fa->fsx_extsize = xfa.fsx_extsize;
- fa->fsx_nextents = xfa.fsx_nextents;
- fa->fsx_projid = xfa.fsx_projid;
- fa->fsx_cowextsize = xfa.fsx_cowextsize;
-
- return 0;
-}
-
-/*
- * Generic function to check FS_IOC_FSSETXATTR/FS_IOC_SETFLAGS values and reject
- * any invalid configurations.
- *
- * Note: must be called with inode lock held.
- */
-static int fileattr_set_prepare(struct inode *inode,
- const struct fileattr *old_ma,
- struct fileattr *fa)
+static int ioctl_getfsuuid(struct file *file, void __user *argp)
{
- int err;
+ struct super_block *sb = file_inode(file)->i_sb;
+ struct fsuuid2 u = { .len = sb->s_uuid_len, };
- /*
- * The IMMUTABLE and APPEND_ONLY flags can only be changed by
- * the relevant capability.
- */
- if ((fa->flags ^ old_ma->flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) &&
- !capable(CAP_LINUX_IMMUTABLE))
- return -EPERM;
-
- err = fscrypt_prepare_setflags(inode, old_ma->flags, fa->flags);
- if (err)
- return err;
-
- /*
- * Project Quota ID state is only allowed to change from within the init
- * namespace. Enforce that restriction only if we are trying to change
- * the quota ID state. Everything else is allowed in user namespaces.
- */
- if (current_user_ns() != &init_user_ns) {
- if (old_ma->fsx_projid != fa->fsx_projid)
- return -EINVAL;
- if ((old_ma->fsx_xflags ^ fa->fsx_xflags) &
- FS_XFLAG_PROJINHERIT)
- return -EINVAL;
- } else {
- /*
- * Caller is allowed to change the project ID. If it is being
- * changed, make sure that the new value is valid.
- */
- if (old_ma->fsx_projid != fa->fsx_projid &&
- !projid_valid(make_kprojid(&init_user_ns, fa->fsx_projid)))
- return -EINVAL;
- }
-
- /* Check extent size hints. */
- if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode))
- return -EINVAL;
-
- if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) &&
- !S_ISDIR(inode->i_mode))
- return -EINVAL;
-
- if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) &&
- !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- return -EINVAL;
-
- /*
- * It is only valid to set the DAX flag on regular files and
- * directories on filesystems.
- */
- if ((fa->fsx_xflags & FS_XFLAG_DAX) &&
- !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
- return -EINVAL;
-
- /* Extent size hints of zero turn off the flags. */
- if (fa->fsx_extsize == 0)
- fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT);
- if (fa->fsx_cowextsize == 0)
- fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
-
- return 0;
-}
-
-/**
- * vfs_fileattr_set - change miscellaneous file attributes
- * @mnt_userns: user namespace of the mount
- * @dentry: the object to change
- * @fa: fileattr pointer
- *
- * After verifying permissions, call i_op->fileattr_set() callback, if
- * exists.
- *
- * Verifying attributes involves retrieving current attributes with
- * i_op->fileattr_get(), this also allows initializing attributes that have
- * not been set by the caller to current values. Inode lock is held
- * thoughout to prevent racing with another instance.
- *
- * Return: 0 on success, or a negative error on failure.
- */
-int vfs_fileattr_set(struct user_namespace *mnt_userns, struct dentry *dentry,
- struct fileattr *fa)
-{
- struct inode *inode = d_inode(dentry);
- struct fileattr old_ma = {};
- int err;
-
- if (!inode->i_op->fileattr_set)
- return -ENOIOCTLCMD;
-
- if (!inode_owner_or_capable(mnt_userns, inode))
- return -EPERM;
+ if (!sb->s_uuid_len)
+ return -ENOTTY;
- inode_lock(inode);
- err = vfs_fileattr_get(dentry, &old_ma);
- if (!err) {
- /* initialize missing bits from old_ma */
- if (fa->flags_valid) {
- fa->fsx_xflags |= old_ma.fsx_xflags & ~FS_XFLAG_COMMON;
- fa->fsx_extsize = old_ma.fsx_extsize;
- fa->fsx_nextents = old_ma.fsx_nextents;
- fa->fsx_projid = old_ma.fsx_projid;
- fa->fsx_cowextsize = old_ma.fsx_cowextsize;
- } else {
- fa->flags |= old_ma.flags & ~FS_COMMON_FL;
- }
- err = fileattr_set_prepare(inode, &old_ma, fa);
- if (!err)
- err = inode->i_op->fileattr_set(mnt_userns, dentry, fa);
- }
- inode_unlock(inode);
+ memcpy(&u.uuid[0], &sb->s_uuid, sb->s_uuid_len);
- return err;
+ return copy_to_user(argp, &u, sizeof(u)) ? -EFAULT : 0;
}
-EXPORT_SYMBOL(vfs_fileattr_set);
-static int ioctl_getflags(struct file *file, unsigned int __user *argp)
+static int ioctl_get_fs_sysfs_path(struct file *file, void __user *argp)
{
- struct fileattr fa = { .flags_valid = true }; /* hint only */
- int err;
+ struct super_block *sb = file_inode(file)->i_sb;
- err = vfs_fileattr_get(file->f_path.dentry, &fa);
- if (!err)
- err = put_user(fa.flags, argp);
- return err;
-}
-
-static int ioctl_setflags(struct file *file, unsigned int __user *argp)
-{
- struct user_namespace *mnt_userns = file_mnt_user_ns(file);
- struct dentry *dentry = file->f_path.dentry;
- struct fileattr fa;
- unsigned int flags;
- int err;
-
- err = get_user(flags, argp);
- if (!err) {
- err = mnt_want_write_file(file);
- if (!err) {
- fileattr_fill_flags(&fa, flags);
- err = vfs_fileattr_set(mnt_userns, dentry, &fa);
- mnt_drop_write_file(file);
- }
- }
- return err;
-}
+ if (!strlen(sb->s_sysfs_name))
+ return -ENOTTY;
-static int ioctl_fsgetxattr(struct file *file, void __user *argp)
-{
- struct fileattr fa = { .fsx_valid = true }; /* hint only */
- int err;
+ struct fs_sysfs_path u = {};
- err = vfs_fileattr_get(file->f_path.dentry, &fa);
- if (!err)
- err = copy_fsxattr_to_user(&fa, argp);
+ u.len = scnprintf(u.name, sizeof(u.name), "%s/%s", sb->s_type->name, sb->s_sysfs_name);
- return err;
-}
-
-static int ioctl_fssetxattr(struct file *file, void __user *argp)
-{
- struct user_namespace *mnt_userns = file_mnt_user_ns(file);
- struct dentry *dentry = file->f_path.dentry;
- struct fileattr fa;
- int err;
-
- err = copy_fsxattr_from_user(&fa, argp);
- if (!err) {
- err = mnt_want_write_file(file);
- if (!err) {
- err = vfs_fileattr_set(mnt_userns, dentry, &fa);
- mnt_drop_write_file(file);
- }
- }
- return err;
+ return copy_to_user(argp, &u, sizeof(u)) ? -EFAULT : 0;
}
/*
@@ -772,6 +485,9 @@ static int ioctl_fssetxattr(struct file *file, void __user *argp)
*
* When you add any new common ioctls to the switches above and below,
* please ensure they have compatible arguments in compat mode.
+ *
+ * The LSM mailing list should also be notified of any command additions or
+ * changes, as specific LSMs may be affected.
*/
static int do_vfs_ioctl(struct file *filp, unsigned int fd,
unsigned int cmd, unsigned long arg)
@@ -795,7 +511,8 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd,
return ioctl_fioasync(fd, filp, argp);
case FIOQSIZE:
- if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) ||
+ if (S_ISDIR(inode->i_mode) ||
+ (S_ISREG(inode->i_mode) && !IS_ANON_FILE(inode)) ||
S_ISLNK(inode->i_mode)) {
loff_t res = inode_get_bytes(inode);
return copy_to_user(argp, &res, sizeof(res)) ?
@@ -830,7 +547,7 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd,
return ioctl_file_dedupe_range(filp, argp);
case FIONREAD:
- if (!S_ISREG(inode->i_mode))
+ if (!S_ISREG(inode->i_mode) || IS_ANON_FILE(inode))
return vfs_ioctl(filp, cmd, arg);
return put_user(i_size_read(inode) - filp->f_pos,
@@ -848,8 +565,14 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd,
case FS_IOC_FSSETXATTR:
return ioctl_fssetxattr(filp, argp);
+ case FS_IOC_GETFSUUID:
+ return ioctl_getfsuuid(filp, argp);
+
+ case FS_IOC_GETFSSYSFSPATH:
+ return ioctl_get_fs_sysfs_path(filp, argp);
+
default:
- if (S_ISREG(inode->i_mode))
+ if (S_ISREG(inode->i_mode) && !IS_ANON_FILE(inode))
return file_ioctl(filp, cmd, argp);
break;
}
@@ -859,28 +582,29 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd,
SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
{
- struct fd f = fdget(fd);
+ CLASS(fd, f)(fd);
int error;
- if (!f.file)
+ if (fd_empty(f))
return -EBADF;
- error = security_file_ioctl(f.file, cmd, arg);
+ error = security_file_ioctl(fd_file(f), cmd, arg);
if (error)
- goto out;
+ return error;
- error = do_vfs_ioctl(f.file, fd, cmd, arg);
+ error = do_vfs_ioctl(fd_file(f), fd, cmd, arg);
if (error == -ENOIOCTLCMD)
- error = vfs_ioctl(f.file, cmd, arg);
+ error = vfs_ioctl(fd_file(f), cmd, arg);
-out:
- fdput(f);
return error;
}
#ifdef CONFIG_COMPAT
/**
* compat_ptr_ioctl - generic implementation of .compat_ioctl file operation
+ * @file: The file to operate on.
+ * @cmd: The ioctl command number.
+ * @arg: The argument to the ioctl.
*
* This is not normally called as a function, but instead set in struct
* file_operations as
@@ -914,36 +638,35 @@ EXPORT_SYMBOL(compat_ptr_ioctl);
COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
compat_ulong_t, arg)
{
- struct fd f = fdget(fd);
+ CLASS(fd, f)(fd);
int error;
- if (!f.file)
+ if (fd_empty(f))
return -EBADF;
- /* RED-PEN how should LSM module know it's handling 32bit? */
- error = security_file_ioctl(f.file, cmd, arg);
+ error = security_file_ioctl_compat(fd_file(f), cmd, arg);
if (error)
- goto out;
+ return error;
switch (cmd) {
/* FICLONE takes an int argument, so don't use compat_ptr() */
case FICLONE:
- error = ioctl_file_clone(f.file, arg, 0, 0, 0);
+ error = ioctl_file_clone(fd_file(f), arg, 0, 0, 0);
break;
#if defined(CONFIG_X86_64)
/* these get messy on amd64 due to alignment differences */
case FS_IOC_RESVSP_32:
case FS_IOC_RESVSP64_32:
- error = compat_ioctl_preallocate(f.file, 0, compat_ptr(arg));
+ error = compat_ioctl_preallocate(fd_file(f), 0, compat_ptr(arg));
break;
case FS_IOC_UNRESVSP_32:
case FS_IOC_UNRESVSP64_32:
- error = compat_ioctl_preallocate(f.file, FALLOC_FL_PUNCH_HOLE,
+ error = compat_ioctl_preallocate(fd_file(f), FALLOC_FL_PUNCH_HOLE,
compat_ptr(arg));
break;
case FS_IOC_ZERO_RANGE_32:
- error = compat_ioctl_preallocate(f.file, FALLOC_FL_ZERO_RANGE,
+ error = compat_ioctl_preallocate(fd_file(f), FALLOC_FL_ZERO_RANGE,
compat_ptr(arg));
break;
#endif
@@ -963,21 +686,17 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
* argument.
*/
default:
- error = do_vfs_ioctl(f.file, fd, cmd,
+ error = do_vfs_ioctl(fd_file(f), fd, cmd,
(unsigned long)compat_ptr(arg));
if (error != -ENOIOCTLCMD)
break;
- if (f.file->f_op->compat_ioctl)
- error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
+ if (fd_file(f)->f_op->compat_ioctl)
+ error = fd_file(f)->f_op->compat_ioctl(fd_file(f), cmd, arg);
if (error == -ENOIOCTLCMD)
error = -ENOTTY;
break;
}
-
- out:
- fdput(f);
-
return error;
}
#endif
diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile
index 4143a3ff89db..a572b8808524 100644
--- a/fs/iomap/Makefile
+++ b/fs/iomap/Makefile
@@ -4,14 +4,16 @@
# All Rights Reserved.
#
-ccflags-y += -I $(srctree)/$(src) # needed for trace events
+ccflags-y += -I $(src) # needed for trace events
obj-$(CONFIG_FS_IOMAP) += iomap.o
iomap-y += trace.o \
- buffered-io.o \
- direct-io.o \
- fiemap.o \
iter.o \
- seek.o
+ buffered-io.o
+iomap-$(CONFIG_BLOCK) += direct-io.o \
+ ioend.o \
+ fiemap.o \
+ seek.o \
+ bio.o
iomap-$(CONFIG_SWAP) += swapfile.o
diff --git a/fs/iomap/bio.c b/fs/iomap/bio.c
new file mode 100644
index 000000000000..fc045f2e4c45
--- /dev/null
+++ b/fs/iomap/bio.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ * Copyright (C) 2016-2023 Christoph Hellwig.
+ */
+#include <linux/iomap.h>
+#include <linux/pagemap.h>
+#include "internal.h"
+#include "trace.h"
+
+static void iomap_read_end_io(struct bio *bio)
+{
+ int error = blk_status_to_errno(bio->bi_status);
+ struct folio_iter fi;
+
+ bio_for_each_folio_all(fi, bio)
+ iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
+ bio_put(bio);
+}
+
+static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
+{
+ struct bio *bio = ctx->read_ctx;
+
+ if (bio)
+ submit_bio(bio);
+}
+
+static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t plen)
+{
+ struct folio *folio = ctx->cur_folio;
+ const struct iomap *iomap = &iter->iomap;
+ loff_t pos = iter->pos;
+ size_t poff = offset_in_folio(folio, pos);
+ loff_t length = iomap_length(iter);
+ sector_t sector;
+ struct bio *bio = ctx->read_ctx;
+
+ sector = iomap_sector(iomap, pos);
+ if (!bio || bio_end_sector(bio) != sector ||
+ !bio_add_folio(bio, folio, plen, poff)) {
+ gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
+ gfp_t orig_gfp = gfp;
+ unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
+
+ if (bio)
+ submit_bio(bio);
+
+ if (ctx->rac) /* same as readahead_gfp_mask */
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
+ gfp);
+ /*
+ * If the bio_alloc fails, try it again for a single page to
+ * avoid having to deal with partial page reads. This emulates
+ * what do_mpage_read_folio does.
+ */
+ if (!bio)
+ bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
+ if (ctx->rac)
+ bio->bi_opf |= REQ_RAHEAD;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_end_io = iomap_read_end_io;
+ bio_add_folio_nofail(bio, folio, plen, poff);
+ ctx->read_ctx = bio;
+ }
+ return 0;
+}
+
+const struct iomap_read_ops iomap_bio_read_ops = {
+ .read_folio_range = iomap_bio_read_folio_range,
+ .submit_read = iomap_bio_submit_read,
+};
+EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
+
+int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos, size_t len)
+{
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ struct bio_vec bvec;
+ struct bio bio;
+
+ bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
+ bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
+ bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
+ return submit_bio_wait(&bio);
+}
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 1753c26c8e76..e5c1ca440d93 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1,99 +1,291 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Red Hat, Inc.
- * Copyright (C) 2016-2019 Christoph Hellwig.
+ * Copyright (C) 2016-2023 Christoph Hellwig.
*/
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
#include <linux/iomap.h>
-#include <linux/pagemap.h>
-#include <linux/uio.h>
#include <linux/buffer_head.h>
-#include <linux/dax.h>
#include <linux/writeback.h>
-#include <linux/list_sort.h>
#include <linux/swap.h>
-#include <linux/bio.h>
-#include <linux/sched/signal.h>
#include <linux/migrate.h>
+#include "internal.h"
#include "trace.h"
#include "../internal.h"
/*
- * Structure allocated for each page or THP when block size < page size
- * to track sub-page uptodate status and I/O completions.
+ * Structure allocated for each folio to track per-block uptodate, dirty state
+ * and I/O completions.
*/
-struct iomap_page {
- atomic_t read_bytes_pending;
+struct iomap_folio_state {
+ spinlock_t state_lock;
+ unsigned int read_bytes_pending;
atomic_t write_bytes_pending;
- spinlock_t uptodate_lock;
- unsigned long uptodate[];
-};
-static inline struct iomap_page *to_iomap_page(struct page *page)
-{
/*
- * per-block data is stored in the head page. Callers should
- * not be dealing with tail pages, and if they are, they can
- * call thp_head() first.
+ * Each block has two bits in this bitmap:
+ * Bits [0..blocks_per_folio) has the uptodate status.
+ * Bits [b_p_f...(2*b_p_f)) has the dirty status.
*/
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ unsigned long state[];
+};
+
+static inline bool ifs_is_fully_uptodate(struct folio *folio,
+ struct iomap_folio_state *ifs)
+{
+ struct inode *inode = folio->mapping->host;
- if (page_has_private(page))
- return (struct iomap_page *)page_private(page);
- return NULL;
+ return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
}
-static struct bio_set iomap_ioend_bioset;
+/*
+ * Find the next uptodate block in the folio. end_blk is inclusive.
+ * If no uptodate block is found, this will return end_blk + 1.
+ */
+static unsigned ifs_next_uptodate_block(struct folio *folio,
+ unsigned start_blk, unsigned end_blk)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ return find_next_bit(ifs->state, end_blk + 1, start_blk);
+}
-static struct iomap_page *
-iomap_page_create(struct inode *inode, struct page *page)
+/*
+ * Find the next non-uptodate block in the folio. end_blk is inclusive.
+ * If no non-uptodate block is found, this will return end_blk + 1.
+ */
+static unsigned ifs_next_nonuptodate_block(struct folio *folio,
+ unsigned start_blk, unsigned end_blk)
{
- struct iomap_page *iop = to_iomap_page(page);
- unsigned int nr_blocks = i_blocks_per_page(inode, page);
-
- if (iop || nr_blocks <= 1)
- return iop;
-
- iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
- GFP_NOFS | __GFP_NOFAIL);
- spin_lock_init(&iop->uptodate_lock);
- if (PageUptodate(page))
- bitmap_fill(iop->uptodate, nr_blocks);
- attach_page_private(page, iop);
- return iop;
+ struct iomap_folio_state *ifs = folio->private;
+
+ return find_next_zero_bit(ifs->state, end_blk + 1, start_blk);
}
-static void
-iomap_page_release(struct page *page)
+static bool ifs_set_range_uptodate(struct folio *folio,
+ struct iomap_folio_state *ifs, size_t off, size_t len)
{
- struct iomap_page *iop = detach_page_private(page);
- unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
+ struct inode *inode = folio->mapping->host;
+ unsigned int first_blk = off >> inode->i_blkbits;
+ unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
+ unsigned int nr_blks = last_blk - first_blk + 1;
+
+ bitmap_set(ifs->state, first_blk, nr_blks);
+ return ifs_is_fully_uptodate(folio, ifs);
+}
+
+static void iomap_set_range_uptodate(struct folio *folio, size_t off,
+ size_t len)
+{
+ struct iomap_folio_state *ifs = folio->private;
+ unsigned long flags;
+ bool uptodate = true;
- if (!iop)
+ if (folio_test_uptodate(folio))
return;
- WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
- WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
- WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
- PageUptodate(page));
- kfree(iop);
+
+ if (ifs) {
+ spin_lock_irqsave(&ifs->state_lock, flags);
+ uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
+ spin_unlock_irqrestore(&ifs->state_lock, flags);
+ }
+
+ if (uptodate)
+ folio_mark_uptodate(folio);
}
/*
- * Calculate the range inside the page that we actually need to read.
+ * Find the next dirty block in the folio. end_blk is inclusive.
+ * If no dirty block is found, this will return end_blk + 1.
*/
-static void
-iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
- loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
+static unsigned ifs_next_dirty_block(struct folio *folio,
+ unsigned start_blk, unsigned end_blk)
+{
+ struct iomap_folio_state *ifs = folio->private;
+ struct inode *inode = folio->mapping->host;
+ unsigned int blks = i_blocks_per_folio(inode, folio);
+
+ return find_next_bit(ifs->state, blks + end_blk + 1,
+ blks + start_blk) - blks;
+}
+
+/*
+ * Find the next clean block in the folio. end_blk is inclusive.
+ * If no clean block is found, this will return end_blk + 1.
+ */
+static unsigned ifs_next_clean_block(struct folio *folio,
+ unsigned start_blk, unsigned end_blk)
{
+ struct iomap_folio_state *ifs = folio->private;
+ struct inode *inode = folio->mapping->host;
+ unsigned int blks = i_blocks_per_folio(inode, folio);
+
+ return find_next_zero_bit(ifs->state, blks + end_blk + 1,
+ blks + start_blk) - blks;
+}
+
+static unsigned ifs_find_dirty_range(struct folio *folio,
+ struct iomap_folio_state *ifs, u64 *range_start, u64 range_end)
+{
+ struct inode *inode = folio->mapping->host;
+ unsigned start_blk =
+ offset_in_folio(folio, *range_start) >> inode->i_blkbits;
+ unsigned end_blk = min_not_zero(
+ offset_in_folio(folio, range_end) >> inode->i_blkbits,
+ i_blocks_per_folio(inode, folio)) - 1;
+ unsigned nblks;
+
+ start_blk = ifs_next_dirty_block(folio, start_blk, end_blk);
+ if (start_blk > end_blk)
+ return 0;
+ if (start_blk == end_blk)
+ nblks = 1;
+ else
+ nblks = ifs_next_clean_block(folio, start_blk + 1, end_blk) -
+ start_blk;
+
+ *range_start = folio_pos(folio) + (start_blk << inode->i_blkbits);
+ return nblks << inode->i_blkbits;
+}
+
+static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start,
+ u64 range_end)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ if (*range_start >= range_end)
+ return 0;
+
+ if (ifs)
+ return ifs_find_dirty_range(folio, ifs, range_start, range_end);
+ return range_end - *range_start;
+}
+
+static void ifs_clear_range_dirty(struct folio *folio,
+ struct iomap_folio_state *ifs, size_t off, size_t len)
+{
+ struct inode *inode = folio->mapping->host;
+ unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
+ unsigned int first_blk = (off >> inode->i_blkbits);
+ unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
+ unsigned int nr_blks = last_blk - first_blk + 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ifs->state_lock, flags);
+ bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
+ spin_unlock_irqrestore(&ifs->state_lock, flags);
+}
+
+static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ if (ifs)
+ ifs_clear_range_dirty(folio, ifs, off, len);
+}
+
+static void ifs_set_range_dirty(struct folio *folio,
+ struct iomap_folio_state *ifs, size_t off, size_t len)
+{
+ struct inode *inode = folio->mapping->host;
+ unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
+ unsigned int first_blk = (off >> inode->i_blkbits);
+ unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
+ unsigned int nr_blks = last_blk - first_blk + 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ifs->state_lock, flags);
+ bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
+ spin_unlock_irqrestore(&ifs->state_lock, flags);
+}
+
+static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ if (ifs)
+ ifs_set_range_dirty(folio, ifs, off, len);
+}
+
+static struct iomap_folio_state *ifs_alloc(struct inode *inode,
+ struct folio *folio, unsigned int flags)
+{
+ struct iomap_folio_state *ifs = folio->private;
+ unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
+ gfp_t gfp;
+
+ if (ifs || nr_blocks <= 1)
+ return ifs;
+
+ if (flags & IOMAP_NOWAIT)
+ gfp = GFP_NOWAIT;
+ else
+ gfp = GFP_NOFS | __GFP_NOFAIL;
+
+ /*
+ * ifs->state tracks two sets of state flags when the
+ * filesystem block size is smaller than the folio size.
+ * The first state tracks per-block uptodate and the
+ * second tracks per-block dirty state.
+ */
+ ifs = kzalloc(struct_size(ifs, state,
+ BITS_TO_LONGS(2 * nr_blocks)), gfp);
+ if (!ifs)
+ return ifs;
+
+ spin_lock_init(&ifs->state_lock);
+ if (folio_test_uptodate(folio))
+ bitmap_set(ifs->state, 0, nr_blocks);
+ if (folio_test_dirty(folio))
+ bitmap_set(ifs->state, nr_blocks, nr_blocks);
+ folio_attach_private(folio, ifs);
+
+ return ifs;
+}
+
+static void ifs_free(struct folio *folio)
+{
+ struct iomap_folio_state *ifs = folio_detach_private(folio);
+
+ if (!ifs)
+ return;
+ WARN_ON_ONCE(ifs->read_bytes_pending != 0);
+ WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
+ WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
+ folio_test_uptodate(folio));
+ kfree(ifs);
+}
+
+/*
+ * Calculate how many bytes to truncate based off the number of blocks to
+ * truncate and the end position to start truncating from.
+ */
+static size_t iomap_bytes_to_truncate(loff_t end_pos, unsigned block_bits,
+ unsigned blocks_truncated)
+{
+ unsigned block_size = 1 << block_bits;
+ unsigned block_offset = end_pos & (block_size - 1);
+
+ if (!block_offset)
+ return blocks_truncated << block_bits;
+
+ return ((blocks_truncated - 1) << block_bits) + block_offset;
+}
+
+/*
+ * Calculate the range inside the folio that we actually need to read.
+ */
+static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+ loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
+{
+ struct iomap_folio_state *ifs = folio->private;
loff_t orig_pos = *pos;
loff_t isize = i_size_read(inode);
unsigned block_bits = inode->i_blkbits;
unsigned block_size = (1 << block_bits);
- unsigned poff = offset_in_page(*pos);
- unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
+ size_t poff = offset_in_folio(folio, *pos);
+ size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
+ size_t orig_plen = plen;
unsigned first = poff >> block_bits;
unsigned last = (poff + plen - 1) >> block_bits;
@@ -102,25 +294,30 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
* per-block uptodate status and adjust the offset and length if needed
* to avoid reading in already uptodate ranges.
*/
- if (iop) {
- unsigned int i;
-
- /* move forward for each leading block marked uptodate */
- for (i = first; i <= last; i++) {
- if (!test_bit(i, iop->uptodate))
- break;
- *pos += block_size;
- poff += block_size;
- plen -= block_size;
- first++;
+ if (ifs) {
+ unsigned int next, blocks_skipped;
+
+ next = ifs_next_nonuptodate_block(folio, first, last);
+ blocks_skipped = next - first;
+
+ if (blocks_skipped) {
+ unsigned long block_offset = *pos & (block_size - 1);
+ unsigned bytes_skipped =
+ (blocks_skipped << block_bits) - block_offset;
+
+ *pos += bytes_skipped;
+ poff += bytes_skipped;
+ plen -= bytes_skipped;
}
+ first = next;
/* truncate len if we find any trailing uptodate block(s) */
- for ( ; i <= last; i++) {
- if (test_bit(i, iop->uptodate)) {
- plen -= (last - i + 1) * block_size;
- last = i - 1;
- break;
+ if (++next <= last) {
+ next = ifs_next_uptodate_block(folio, next, last);
+ if (next <= last) {
+ plen -= iomap_bytes_to_truncate(*pos + plen,
+ block_bits, last - next + 1);
+ last = next - 1;
}
}
}
@@ -130,255 +327,287 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
* handle both halves separately so that we properly zero data in the
* page cache for blocks that are entirely outside of i_size.
*/
- if (orig_pos <= isize && orig_pos + length > isize) {
- unsigned end = offset_in_page(isize - 1) >> block_bits;
+ if (orig_pos <= isize && orig_pos + orig_plen > isize) {
+ unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
if (first <= end && last > end)
- plen -= (last - end) * block_size;
+ plen -= iomap_bytes_to_truncate(*pos + plen, block_bits,
+ last - end);
}
*offp = poff;
*lenp = plen;
}
-static void
-iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
+ loff_t pos)
{
- struct iomap_page *iop = to_iomap_page(page);
- struct inode *inode = page->mapping->host;
- unsigned first = off >> inode->i_blkbits;
- unsigned last = (off + len - 1) >> inode->i_blkbits;
- unsigned long flags;
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
- spin_lock_irqsave(&iop->uptodate_lock, flags);
- bitmap_set(iop->uptodate, first, last - first + 1);
- if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page)))
- SetPageUptodate(page);
- spin_unlock_irqrestore(&iop->uptodate_lock, flags);
+ return srcmap->type != IOMAP_MAPPED ||
+ (srcmap->flags & IOMAP_F_NEW) ||
+ pos >= i_size_read(iter->inode);
}
-static void
-iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+/**
+ * iomap_read_inline_data - copy inline data into the page cache
+ * @iter: iteration structure
+ * @folio: folio to copy to
+ *
+ * Copy the inline data in @iter into @folio and zero out the rest of the folio.
+ * Only a single IOMAP_INLINE extent is allowed at the end of each file.
+ * Returns zero for success to complete the read, or the usual negative errno.
+ */
+static int iomap_read_inline_data(const struct iomap_iter *iter,
+ struct folio *folio)
{
- if (PageError(page))
- return;
+ const struct iomap *iomap = iomap_iter_srcmap(iter);
+ size_t size = i_size_read(iter->inode) - iomap->offset;
+ size_t offset = offset_in_folio(folio, iomap->offset);
- if (page_has_private(page))
- iomap_iop_set_range_uptodate(page, off, len);
- else
- SetPageUptodate(page);
-}
+ if (WARN_ON_ONCE(!iomap->inline_data))
+ return -EIO;
-static void
-iomap_read_page_end_io(struct bio_vec *bvec, int error)
-{
- struct page *page = bvec->bv_page;
- struct iomap_page *iop = to_iomap_page(page);
+ if (folio_test_uptodate(folio))
+ return 0;
- if (unlikely(error)) {
- ClearPageUptodate(page);
- SetPageError(page);
- } else {
- iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
- }
+ if (WARN_ON_ONCE(size > iomap->length))
+ return -EIO;
+ if (offset > 0)
+ ifs_alloc(iter->inode, folio, iter->flags);
- if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
- unlock_page(page);
+ folio_fill_tail(folio, offset, iomap->inline_data, size);
+ iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
+ return 0;
}
-static void
-iomap_read_end_io(struct bio *bio)
+void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
+ int error)
{
- int error = blk_status_to_errno(bio->bi_status);
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct iomap_folio_state *ifs = folio->private;
+ bool uptodate = !error;
+ bool finished = true;
+
+ if (ifs) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ifs->state_lock, flags);
+ if (!error)
+ uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
+ ifs->read_bytes_pending -= len;
+ finished = !ifs->read_bytes_pending;
+ spin_unlock_irqrestore(&ifs->state_lock, flags);
+ }
- bio_for_each_segment_all(bvec, bio, iter_all)
- iomap_read_page_end_io(bvec, error);
- bio_put(bio);
+ if (finished)
+ folio_end_read(folio, uptodate);
}
+EXPORT_SYMBOL_GPL(iomap_finish_folio_read);
-struct iomap_readpage_ctx {
- struct page *cur_page;
- bool cur_page_in_bio;
- struct bio *bio;
- struct readahead_control *rac;
-};
-
-static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
- struct page *page)
+static void iomap_read_init(struct folio *folio)
{
- const struct iomap *iomap = iomap_iter_srcmap(iter);
- size_t size = i_size_read(iter->inode) - iomap->offset;
- size_t poff = offset_in_page(iomap->offset);
- void *addr;
+ struct iomap_folio_state *ifs = folio->private;
- if (PageUptodate(page))
- return PAGE_SIZE - poff;
+ if (ifs) {
+ size_t len = folio_size(folio);
- if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
- return -EIO;
- if (WARN_ON_ONCE(size > PAGE_SIZE -
- offset_in_page(iomap->inline_data)))
- return -EIO;
- if (WARN_ON_ONCE(size > iomap->length))
- return -EIO;
- if (poff > 0)
- iomap_page_create(iter->inode, page);
-
- addr = kmap_local_page(page) + poff;
- memcpy(addr, iomap->inline_data, size);
- memset(addr + size, 0, PAGE_SIZE - poff - size);
- kunmap_local(addr);
- iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff);
- return PAGE_SIZE - poff;
+ /*
+ * ifs->read_bytes_pending is used to track how many bytes are
+ * read in asynchronously by the IO helper. We need to track
+ * this so that we can know when the IO helper has finished
+ * reading in all the necessary ranges of the folio and can end
+ * the read.
+ *
+ * Increase ->read_bytes_pending by the folio size to start, and
+ * add a +1 bias. We'll subtract the bias and any uptodate /
+ * zeroed ranges that did not require IO in iomap_read_end()
+ * after we're done processing the folio.
+ *
+ * We do this because otherwise, we would have to increment
+ * ifs->read_bytes_pending every time a range in the folio needs
+ * to be read in, which can get expensive since the spinlock
+ * needs to be held whenever modifying ifs->read_bytes_pending.
+ *
+ * We add the bias to ensure the read has not been ended on the
+ * folio when iomap_read_end() is called, even if the IO helper
+ * has already finished reading in the entire folio.
+ */
+ spin_lock_irq(&ifs->state_lock);
+ WARN_ON_ONCE(ifs->read_bytes_pending != 0);
+ ifs->read_bytes_pending = len + 1;
+ spin_unlock_irq(&ifs->state_lock);
+ }
}
-static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
- loff_t pos)
+/*
+ * This ends IO if no bytes were submitted to an IO helper.
+ *
+ * Otherwise, this calibrates ifs->read_bytes_pending to represent only the
+ * submitted bytes (see comment in iomap_read_init()). If all bytes submitted
+ * have already been completed by the IO helper, then this will end the read.
+ * Else the IO helper will end the read after all submitted ranges have been
+ * read.
+ */
+static void iomap_read_end(struct folio *folio, size_t bytes_submitted)
{
- const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ struct iomap_folio_state *ifs = folio->private;
- return srcmap->type != IOMAP_MAPPED ||
- (srcmap->flags & IOMAP_F_NEW) ||
- pos >= i_size_read(iter->inode);
+ if (ifs) {
+ bool end_read, uptodate;
+
+ spin_lock_irq(&ifs->state_lock);
+ if (!ifs->read_bytes_pending) {
+ WARN_ON_ONCE(bytes_submitted);
+ spin_unlock_irq(&ifs->state_lock);
+ folio_unlock(folio);
+ return;
+ }
+
+ /*
+ * Subtract any bytes that were initially accounted to
+ * read_bytes_pending but skipped for IO. The +1 accounts for
+ * the bias we added in iomap_read_init().
+ */
+ ifs->read_bytes_pending -=
+ (folio_size(folio) + 1 - bytes_submitted);
+
+ /*
+ * If !ifs->read_bytes_pending, this means all pending reads by
+ * the IO helper have already completed, which means we need to
+ * end the folio read here. If ifs->read_bytes_pending != 0,
+ * the IO helper will end the folio read.
+ */
+ end_read = !ifs->read_bytes_pending;
+ if (end_read)
+ uptodate = ifs_is_fully_uptodate(folio, ifs);
+ spin_unlock_irq(&ifs->state_lock);
+ if (end_read)
+ folio_end_read(folio, uptodate);
+ } else if (!bytes_submitted) {
+ /*
+ * If there were no bytes submitted, this means we are
+ * responsible for unlocking the folio here, since no IO helper
+ * has taken ownership of it. If there were bytes submitted,
+ * then the IO helper will end the read via
+ * iomap_finish_folio_read().
+ */
+ folio_unlock(folio);
+ }
}
-static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
- struct iomap_readpage_ctx *ctx, loff_t offset)
+static int iomap_read_folio_iter(struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t *bytes_submitted)
{
const struct iomap *iomap = &iter->iomap;
- loff_t pos = iter->pos + offset;
- loff_t length = iomap_length(iter) - offset;
- struct page *page = ctx->cur_page;
- struct iomap_page *iop;
- loff_t orig_pos = pos;
- unsigned poff, plen;
- sector_t sector;
-
- if (iomap->type == IOMAP_INLINE)
- return min(iomap_read_inline_data(iter, page), length);
-
- /* zero post-eof blocks as the page may be mapped */
- iop = iomap_page_create(iter->inode, page);
- iomap_adjust_read_range(iter->inode, iop, &pos, length, &poff, &plen);
- if (plen == 0)
- goto done;
-
- if (iomap_block_needs_zeroing(iter, pos)) {
- zero_user(page, poff, plen);
- iomap_set_range_uptodate(page, poff, plen);
- goto done;
+ loff_t pos = iter->pos;
+ loff_t length = iomap_length(iter);
+ struct folio *folio = ctx->cur_folio;
+ size_t poff, plen;
+ loff_t pos_diff;
+ int ret;
+
+ if (iomap->type == IOMAP_INLINE) {
+ ret = iomap_read_inline_data(iter, folio);
+ if (ret)
+ return ret;
+ return iomap_iter_advance(iter, length);
}
- ctx->cur_page_in_bio = true;
- if (iop)
- atomic_add(plen, &iop->read_bytes_pending);
+ ifs_alloc(iter->inode, folio, iter->flags);
- sector = iomap_sector(iomap, pos);
- if (!ctx->bio ||
- bio_end_sector(ctx->bio) != sector ||
- bio_add_page(ctx->bio, page, plen, poff) != plen) {
- gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
- gfp_t orig_gfp = gfp;
- unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
+ length = min_t(loff_t, length,
+ folio_size(folio) - offset_in_folio(folio, pos));
+ while (length) {
+ iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff,
+ &plen);
- if (ctx->bio)
- submit_bio(ctx->bio);
+ pos_diff = pos - iter->pos;
+ if (WARN_ON_ONCE(pos_diff + plen > length))
+ return -EIO;
- if (ctx->rac) /* same as readahead_gfp_mask */
- gfp |= __GFP_NORETRY | __GFP_NOWARN;
- ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs));
- /*
- * If the bio_alloc fails, try it again for a single page to
- * avoid having to deal with partial page reads. This emulates
- * what do_mpage_readpage does.
- */
- if (!ctx->bio)
- ctx->bio = bio_alloc(orig_gfp, 1);
- ctx->bio->bi_opf = REQ_OP_READ;
- if (ctx->rac)
- ctx->bio->bi_opf |= REQ_RAHEAD;
- ctx->bio->bi_iter.bi_sector = sector;
- bio_set_dev(ctx->bio, iomap->bdev);
- ctx->bio->bi_end_io = iomap_read_end_io;
- __bio_add_page(ctx->bio, page, plen, poff);
+ ret = iomap_iter_advance(iter, pos_diff);
+ if (ret)
+ return ret;
+
+ if (plen == 0)
+ return 0;
+
+ /* zero post-eof blocks as the page may be mapped */
+ if (iomap_block_needs_zeroing(iter, pos)) {
+ folio_zero_range(folio, poff, plen);
+ iomap_set_range_uptodate(folio, poff, plen);
+ } else {
+ if (!*bytes_submitted)
+ iomap_read_init(folio);
+ ret = ctx->ops->read_folio_range(iter, ctx, plen);
+ if (ret)
+ return ret;
+ *bytes_submitted += plen;
+ }
+
+ ret = iomap_iter_advance(iter, plen);
+ if (ret)
+ return ret;
+ length -= pos_diff + plen;
+ pos = iter->pos;
}
-done:
- /*
- * Move the caller beyond our range so that it keeps making progress.
- * For that, we have to include any leading non-uptodate ranges, but
- * we can skip trailing ones as they will be handled in the next
- * iteration.
- */
- return pos - orig_pos + plen;
+ return 0;
}
-int
-iomap_readpage(struct page *page, const struct iomap_ops *ops)
+void iomap_read_folio(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx)
{
+ struct folio *folio = ctx->cur_folio;
struct iomap_iter iter = {
- .inode = page->mapping->host,
- .pos = page_offset(page),
- .len = PAGE_SIZE,
- };
- struct iomap_readpage_ctx ctx = {
- .cur_page = page,
+ .inode = folio->mapping->host,
+ .pos = folio_pos(folio),
+ .len = folio_size(folio),
};
+ size_t bytes_submitted = 0;
int ret;
- trace_iomap_readpage(page->mapping->host, 1);
+ trace_iomap_readpage(iter.inode, 1);
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
+ iter.status = iomap_read_folio_iter(&iter, ctx,
+ &bytes_submitted);
- if (ret < 0)
- SetPageError(page);
+ if (ctx->ops->submit_read)
+ ctx->ops->submit_read(ctx);
- if (ctx.bio) {
- submit_bio(ctx.bio);
- WARN_ON_ONCE(!ctx.cur_page_in_bio);
- } else {
- WARN_ON_ONCE(ctx.cur_page_in_bio);
- unlock_page(page);
- }
-
- /*
- * Just like mpage_readahead and block_read_full_page, we always
- * return 0 and just mark the page as PageError on errors. This
- * should be cleaned up throughout the stack eventually.
- */
- return 0;
+ iomap_read_end(folio, bytes_submitted);
}
-EXPORT_SYMBOL_GPL(iomap_readpage);
+EXPORT_SYMBOL_GPL(iomap_read_folio);
-static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
- struct iomap_readpage_ctx *ctx)
+static int iomap_readahead_iter(struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t *cur_bytes_submitted)
{
- loff_t length = iomap_length(iter);
- loff_t done, ret;
-
- for (done = 0; done < length; done += ret) {
- if (ctx->cur_page && offset_in_page(iter->pos + done) == 0) {
- if (!ctx->cur_page_in_bio)
- unlock_page(ctx->cur_page);
- put_page(ctx->cur_page);
- ctx->cur_page = NULL;
+ int ret;
+
+ while (iomap_length(iter)) {
+ if (ctx->cur_folio &&
+ offset_in_folio(ctx->cur_folio, iter->pos) == 0) {
+ iomap_read_end(ctx->cur_folio, *cur_bytes_submitted);
+ ctx->cur_folio = NULL;
}
- if (!ctx->cur_page) {
- ctx->cur_page = readahead_page(ctx->rac);
- ctx->cur_page_in_bio = false;
+ if (!ctx->cur_folio) {
+ ctx->cur_folio = readahead_folio(ctx->rac);
+ if (WARN_ON_ONCE(!ctx->cur_folio))
+ return -EINVAL;
+ *cur_bytes_submitted = 0;
}
- ret = iomap_readpage_iter(iter, ctx, done);
+ ret = iomap_read_folio_iter(iter, ctx, cur_bytes_submitted);
+ if (ret)
+ return ret;
}
- return done;
+ return 0;
}
/**
* iomap_readahead - Attempt to read pages from a file.
- * @rac: Describes the pages to be read.
* @ops: The operations vector for the filesystem.
+ * @ctx: The ctx used for issuing readahead.
*
* This function is for filesystems to call to implement their readahead
* address_space operation.
@@ -390,123 +619,126 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
* function is called with memalloc_nofs set, so allocations will not cause
* the filesystem to be reentered.
*/
-void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
+void iomap_readahead(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx)
{
+ struct readahead_control *rac = ctx->rac;
struct iomap_iter iter = {
.inode = rac->mapping->host,
.pos = readahead_pos(rac),
.len = readahead_length(rac),
};
- struct iomap_readpage_ctx ctx = {
- .rac = rac,
- };
+ size_t cur_bytes_submitted;
trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
while (iomap_iter(&iter, ops) > 0)
- iter.processed = iomap_readahead_iter(&iter, &ctx);
-
- if (ctx.bio)
- submit_bio(ctx.bio);
- if (ctx.cur_page) {
- if (!ctx.cur_page_in_bio)
- unlock_page(ctx.cur_page);
- put_page(ctx.cur_page);
- }
+ iter.status = iomap_readahead_iter(&iter, ctx,
+ &cur_bytes_submitted);
+
+ if (ctx->ops->submit_read)
+ ctx->ops->submit_read(ctx);
+
+ if (ctx->cur_folio)
+ iomap_read_end(ctx->cur_folio, cur_bytes_submitted);
}
EXPORT_SYMBOL_GPL(iomap_readahead);
/*
- * iomap_is_partially_uptodate checks whether blocks within a page are
+ * iomap_is_partially_uptodate checks whether blocks within a folio are
* uptodate or not.
*
- * Returns true if all blocks which correspond to a file portion
- * we want to read within the page are uptodate.
+ * Returns true if all blocks which correspond to the specified part
+ * of the folio are uptodate.
*/
-int
-iomap_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count)
+bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
{
- struct iomap_page *iop = to_iomap_page(page);
- struct inode *inode = page->mapping->host;
- unsigned len, first, last;
- unsigned i;
+ struct iomap_folio_state *ifs = folio->private;
+ struct inode *inode = folio->mapping->host;
+ unsigned first, last;
- /* Limit range to one page */
- len = min_t(unsigned, PAGE_SIZE - from, count);
+ if (!ifs)
+ return false;
- /* First and last blocks in range within page */
- first = from >> inode->i_blkbits;
- last = (from + len - 1) >> inode->i_blkbits;
+ /* Caller's range may extend past the end of this folio */
+ count = min(folio_size(folio) - from, count);
- if (iop) {
- for (i = first; i <= last; i++)
- if (!test_bit(i, iop->uptodate))
- return 0;
- return 1;
- }
+ /* First and last blocks in range within folio */
+ first = from >> inode->i_blkbits;
+ last = (from + count - 1) >> inode->i_blkbits;
- return 0;
+ return ifs_next_nonuptodate_block(folio, first, last) > last;
}
EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
-int
-iomap_releasepage(struct page *page, gfp_t gfp_mask)
+/**
+ * iomap_get_folio - get a folio reference for writing
+ * @iter: iteration structure
+ * @pos: start offset of write
+ * @len: Suggested size of folio to create.
+ *
+ * Returns a locked reference to the folio at @pos, or an error pointer if the
+ * folio could not be obtained.
+ */
+struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
{
- trace_iomap_releasepage(page->mapping->host, page_offset(page),
- PAGE_SIZE);
+ fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
+
+ if (iter->flags & IOMAP_NOWAIT)
+ fgp |= FGP_NOWAIT;
+ if (iter->flags & IOMAP_DONTCACHE)
+ fgp |= FGP_DONTCACHE;
+ fgp |= fgf_set_order(len);
+
+ return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
+ fgp, mapping_gfp_mask(iter->inode->i_mapping));
+}
+EXPORT_SYMBOL_GPL(iomap_get_folio);
+
+bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
+{
+ trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
+ folio_size(folio));
/*
- * mm accommodates an old ext3 case where clean pages might not have had
- * the dirty bit cleared. Thus, it can send actual dirty pages to
- * ->releasepage() via shrink_active_list(); skip those here.
+ * If the folio is dirty, we refuse to release our metadata because
+ * it may be partially dirty. Once we track per-block dirty state,
+ * we can release the metadata if every block is dirty.
*/
- if (PageDirty(page) || PageWriteback(page))
- return 0;
- iomap_page_release(page);
- return 1;
+ if (folio_test_dirty(folio))
+ return false;
+ ifs_free(folio);
+ return true;
}
-EXPORT_SYMBOL_GPL(iomap_releasepage);
+EXPORT_SYMBOL_GPL(iomap_release_folio);
-void
-iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
+void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
{
- trace_iomap_invalidatepage(page->mapping->host, offset, len);
+ trace_iomap_invalidate_folio(folio->mapping->host,
+ folio_pos(folio) + offset, len);
/*
- * If we're invalidating the entire page, clear the dirty state from it
- * and release it to avoid unnecessary buildup of the LRU.
+ * If we're invalidating the entire folio, clear the dirty state
+ * from it and release it to avoid unnecessary buildup of the LRU.
*/
- if (offset == 0 && len == PAGE_SIZE) {
- WARN_ON_ONCE(PageWriteback(page));
- cancel_dirty_page(page);
- iomap_page_release(page);
+ if (offset == 0 && len == folio_size(folio)) {
+ WARN_ON_ONCE(folio_test_writeback(folio));
+ folio_cancel_dirty(folio);
+ ifs_free(folio);
}
}
-EXPORT_SYMBOL_GPL(iomap_invalidatepage);
+EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
-#ifdef CONFIG_MIGRATION
-int
-iomap_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode)
+bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
{
- int ret;
-
- ret = migrate_page_move_mapping(mapping, newpage, page, 0);
- if (ret != MIGRATEPAGE_SUCCESS)
- return ret;
-
- if (page_has_private(page))
- attach_page_private(newpage, detach_page_private(page));
+ struct inode *inode = mapping->host;
+ size_t len = folio_size(folio);
- if (mode != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
- else
- migrate_page_states(newpage, page);
- return MIGRATEPAGE_SUCCESS;
+ ifs_alloc(inode, folio, 0);
+ iomap_set_range_dirty(folio, 0, len);
+ return filemap_dirty_folio(mapping, folio);
}
-EXPORT_SYMBOL_GPL(iomap_migrate_page);
-#endif /* CONFIG_MIGRATION */
+EXPORT_SYMBOL_GPL(iomap_dirty_folio);
static void
iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
@@ -518,428 +750,875 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
* write started inside the existing inode size.
*/
if (pos + len > i_size)
- truncate_pagecache_range(inode, max(pos, i_size), pos + len);
-}
-
-static int
-iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
- unsigned plen, const struct iomap *iomap)
-{
- struct bio_vec bvec;
- struct bio bio;
-
- bio_init(&bio, &bvec, 1);
- bio.bi_opf = REQ_OP_READ;
- bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
- bio_set_dev(&bio, iomap->bdev);
- __bio_add_page(&bio, page, plen, poff);
- return submit_bio_wait(&bio);
+ truncate_pagecache_range(inode, max(pos, i_size),
+ pos + len - 1);
}
-static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
- unsigned len, struct page *page)
+static int __iomap_write_begin(const struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, size_t len,
+ struct folio *folio)
{
- const struct iomap *srcmap = iomap_iter_srcmap(iter);
- struct iomap_page *iop = iomap_page_create(iter->inode, page);
+ struct iomap_folio_state *ifs;
+ loff_t pos = iter->pos;
loff_t block_size = i_blocksize(iter->inode);
loff_t block_start = round_down(pos, block_size);
loff_t block_end = round_up(pos + len, block_size);
- unsigned from = offset_in_page(pos), to = from + len, poff, plen;
+ unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
+ size_t from = offset_in_folio(folio, pos), to = from + len;
+ size_t poff, plen;
+
+ /*
+ * If the write or zeroing completely overlaps the current folio, then
+ * entire folio will be dirtied so there is no need for
+ * per-block state tracking structures to be attached to this folio.
+ * For the unshare case, we must read in the ondisk contents because we
+ * are not changing pagecache contents.
+ */
+ if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
+ pos + len >= folio_next_pos(folio))
+ return 0;
+
+ ifs = ifs_alloc(iter->inode, folio, iter->flags);
+ if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
+ return -EAGAIN;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
return 0;
- ClearPageError(page);
do {
- iomap_adjust_read_range(iter->inode, iop, &block_start,
+ iomap_adjust_read_range(iter->inode, folio, &block_start,
block_end - block_start, &poff, &plen);
if (plen == 0)
break;
- if (!(iter->flags & IOMAP_UNSHARE) &&
- (from <= poff || from >= poff + plen) &&
- (to <= poff || to >= poff + plen))
+ /*
+ * If the read range will be entirely overwritten by the write,
+ * we can skip having to zero/read it in.
+ */
+ if (!(iter->flags & IOMAP_UNSHARE) && from <= poff &&
+ to >= poff + plen)
continue;
if (iomap_block_needs_zeroing(iter, block_start)) {
if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
return -EIO;
- zero_user_segments(page, poff, from, to, poff + plen);
+ folio_zero_segments(folio, poff, from, to, poff + plen);
} else {
- int status = iomap_read_page_sync(block_start, page,
- poff, plen, srcmap);
+ int status;
+
+ if (iter->flags & IOMAP_NOWAIT)
+ return -EAGAIN;
+
+ if (write_ops && write_ops->read_folio_range)
+ status = write_ops->read_folio_range(iter,
+ folio, block_start, plen);
+ else
+ status = iomap_bio_read_folio_range_sync(iter,
+ folio, block_start, plen);
if (status)
return status;
}
- iomap_set_range_uptodate(page, poff, plen);
+ iomap_set_range_uptodate(folio, poff, plen);
} while ((block_start += plen) < block_end);
return 0;
}
-static int iomap_write_begin_inline(const struct iomap_iter *iter,
- struct page *page)
+static struct folio *__iomap_get_folio(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, size_t len)
{
- int ret;
+ loff_t pos = iter->pos;
+
+ if (!mapping_large_folio_support(iter->inode->i_mapping))
+ len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
+
+ if (iter->fbatch) {
+ struct folio *folio = folio_batch_next(iter->fbatch);
+
+ if (!folio)
+ return NULL;
+
+ /*
+ * The folio mapping generally shouldn't have changed based on
+ * fs locks, but be consistent with filemap lookup and retry
+ * the iter if it does.
+ */
+ folio_lock(folio);
+ if (unlikely(folio->mapping != iter->inode->i_mapping)) {
+ iter->iomap.flags |= IOMAP_F_STALE;
+ folio_unlock(folio);
+ return NULL;
+ }
+
+ folio_get(folio);
+ return folio;
+ }
+
+ if (write_ops && write_ops->get_folio)
+ return write_ops->get_folio(iter, pos, len);
+ return iomap_get_folio(iter, pos, len);
+}
+
+static void __iomap_put_folio(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, size_t ret,
+ struct folio *folio)
+{
+ loff_t pos = iter->pos;
+
+ if (write_ops && write_ops->put_folio) {
+ write_ops->put_folio(iter->inode, pos, ret, folio);
+ } else {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+}
+
+/* trim pos and bytes to within a given folio */
+static loff_t iomap_trim_folio_range(struct iomap_iter *iter,
+ struct folio *folio, size_t *offset, u64 *bytes)
+{
+ loff_t pos = iter->pos;
+ size_t fsize = folio_size(folio);
+
+ WARN_ON_ONCE(pos < folio_pos(folio));
+ WARN_ON_ONCE(pos >= folio_pos(folio) + fsize);
+
+ *offset = offset_in_folio(folio, pos);
+ *bytes = min(*bytes, fsize - *offset);
+ return pos;
+}
+
+static int iomap_write_begin_inline(const struct iomap_iter *iter,
+ struct folio *folio)
+{
/* needs more work for the tailpacking case; disable for now */
if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
return -EIO;
- ret = iomap_read_inline_data(iter, page);
- if (ret < 0)
- return ret;
- return 0;
+ return iomap_read_inline_data(iter, folio);
}
-static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
- unsigned len, struct page **pagep)
+/*
+ * Grab and prepare a folio for write based on iter state. Returns the folio,
+ * offset, and length. Callers can optionally pass a max length *plen,
+ * otherwise init to zero.
+ */
+static int iomap_write_begin(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops, struct folio **foliop,
+ size_t *poffset, u64 *plen)
{
- const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
- struct page *page;
+ loff_t pos;
+ u64 len = min_t(u64, SIZE_MAX, iomap_length(iter));
+ struct folio *folio;
int status = 0;
- BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
- if (srcmap != &iter->iomap)
- BUG_ON(pos + len > srcmap->offset + srcmap->length);
+ len = min_not_zero(len, *plen);
+ *foliop = NULL;
+ *plen = 0;
if (fatal_signal_pending(current))
return -EINTR;
- if (page_ops && page_ops->page_prepare) {
- status = page_ops->page_prepare(iter->inode, pos, len);
- if (status)
- return status;
+ folio = __iomap_get_folio(iter, write_ops, len);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ /*
+ * No folio means we're done with a batch. We still have range to
+ * process so return and let the caller iterate and refill the batch.
+ */
+ if (!folio) {
+ WARN_ON_ONCE(!iter->fbatch);
+ return 0;
}
- page = grab_cache_page_write_begin(iter->inode->i_mapping,
- pos >> PAGE_SHIFT, AOP_FLAG_NOFS);
- if (!page) {
- status = -ENOMEM;
- goto out_no_page;
+ /*
+ * Now we have a locked folio, before we do anything with it we need to
+ * check that the iomap we have cached is not stale. The inode extent
+ * mapping can change due to concurrent IO in flight (e.g.
+ * IOMAP_UNWRITTEN state can change and memory reclaim could have
+ * reclaimed a previously partially written page at this index after IO
+ * completion before this write reaches this file offset) and hence we
+ * could do the wrong thing here (zero a page range incorrectly or fail
+ * to zero) and corrupt data.
+ */
+ if (write_ops && write_ops->iomap_valid) {
+ bool iomap_valid = write_ops->iomap_valid(iter->inode,
+ &iter->iomap);
+ if (!iomap_valid) {
+ iter->iomap.flags |= IOMAP_F_STALE;
+ status = 0;
+ goto out_unlock;
+ }
}
+ /*
+ * The folios in a batch may not be contiguous. If we've skipped
+ * forward, advance the iter to the pos of the current folio. If the
+ * folio starts beyond the end of the mapping, it may have been trimmed
+ * since the lookup for whatever reason. Return a NULL folio to
+ * terminate the op.
+ */
+ if (folio_pos(folio) > iter->pos) {
+ len = min_t(u64, folio_pos(folio) - iter->pos,
+ iomap_length(iter));
+ status = iomap_iter_advance(iter, len);
+ len = iomap_length(iter);
+ if (status || !len)
+ goto out_unlock;
+ }
+
+ pos = iomap_trim_folio_range(iter, folio, poffset, &len);
+
if (srcmap->type == IOMAP_INLINE)
- status = iomap_write_begin_inline(iter, page);
+ status = iomap_write_begin_inline(iter, folio);
else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
- status = __block_write_begin_int(page, pos, len, NULL, srcmap);
+ status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
else
- status = __iomap_write_begin(iter, pos, len, page);
+ status = __iomap_write_begin(iter, write_ops, len, folio);
if (unlikely(status))
goto out_unlock;
- *pagep = page;
+ *foliop = folio;
+ *plen = len;
return 0;
out_unlock:
- unlock_page(page);
- put_page(page);
- iomap_write_failed(iter->inode, pos, len);
-
-out_no_page:
- if (page_ops && page_ops->page_done)
- page_ops->page_done(iter->inode, pos, 0, NULL);
+ __iomap_put_folio(iter, write_ops, 0, folio);
return status;
}
-static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
- size_t copied, struct page *page)
+static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
+ size_t copied, struct folio *folio)
{
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
/*
* The blocks that were entirely written will now be uptodate, so we
- * don't have to worry about a readpage reading them and overwriting a
+ * don't have to worry about a read_folio reading them and overwriting a
* partial write. However, if we've encountered a short write and only
* partially written into a block, it will not be marked uptodate, so a
- * readpage might come in and destroy our partial write.
+ * read_folio might come in and destroy our partial write.
*
* Do the simplest thing and just treat any short write to a
* non-uptodate page as a zero-length write, and force the caller to
* redo the whole thing.
*/
- if (unlikely(copied < len && !PageUptodate(page)))
- return 0;
- iomap_set_range_uptodate(page, offset_in_page(pos), len);
- __set_page_dirty_nobuffers(page);
- return copied;
+ if (unlikely(copied < len && !folio_test_uptodate(folio)))
+ return false;
+ iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
+ iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
+ filemap_dirty_folio(inode->i_mapping, folio);
+ return true;
}
-static size_t iomap_write_end_inline(const struct iomap_iter *iter,
- struct page *page, loff_t pos, size_t copied)
+static bool iomap_write_end_inline(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos, size_t copied)
{
const struct iomap *iomap = &iter->iomap;
void *addr;
- WARN_ON_ONCE(!PageUptodate(page));
+ WARN_ON_ONCE(!folio_test_uptodate(folio));
BUG_ON(!iomap_inline_data_valid(iomap));
- flush_dcache_page(page);
- addr = kmap_local_page(page) + pos;
+ if (WARN_ON_ONCE(!iomap->inline_data))
+ return false;
+
+ flush_dcache_folio(folio);
+ addr = kmap_local_folio(folio, pos);
memcpy(iomap_inline_data(iomap, pos), addr, copied);
kunmap_local(addr);
mark_inode_dirty(iter->inode);
- return copied;
+ return true;
}
-/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
-static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
- size_t copied, struct page *page)
+/*
+ * Returns true if all copied bytes have been written to the pagecache,
+ * otherwise return false.
+ */
+static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
+ struct folio *folio)
{
- const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
- loff_t old_size = iter->inode->i_size;
- size_t ret;
-
- if (srcmap->type == IOMAP_INLINE) {
- ret = iomap_write_end_inline(iter, page, pos, copied);
- } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
- ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
- copied, page, NULL);
- } else {
- ret = __iomap_write_end(iter->inode, pos, len, copied, page);
- }
+ loff_t pos = iter->pos;
- /*
- * Update the in-memory inode size after copying the data into the page
- * cache. It's up to the file system to write the updated size to disk,
- * preferably after I/O completion so that no stale data is exposed.
- */
- if (pos + ret > old_size) {
- i_size_write(iter->inode, pos + ret);
- iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
- }
- unlock_page(page);
+ if (srcmap->type == IOMAP_INLINE)
+ return iomap_write_end_inline(iter, folio, pos, copied);
- if (old_size < pos)
- pagecache_isize_extended(iter->inode, old_size, pos);
- if (page_ops && page_ops->page_done)
- page_ops->page_done(iter->inode, pos, ret, page);
- put_page(page);
+ if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
+ size_t bh_written;
- if (ret < len)
- iomap_write_failed(iter->inode, pos, len);
- return ret;
+ bh_written = block_write_end(pos, len, copied, folio);
+ WARN_ON_ONCE(bh_written != copied && bh_written != 0);
+ return bh_written == copied;
+ }
+
+ return __iomap_write_end(iter->inode, pos, len, copied, folio);
}
-static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
+static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
+ const struct iomap_write_ops *write_ops)
{
- loff_t length = iomap_length(iter);
- loff_t pos = iter->pos;
- ssize_t written = 0;
- long status = 0;
+ ssize_t total_written = 0;
+ int status = 0;
+ struct address_space *mapping = iter->inode->i_mapping;
+ size_t chunk = mapping_max_folio_size(mapping);
+ unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
do {
- struct page *page;
- unsigned long offset; /* Offset into pagecache page */
- unsigned long bytes; /* Bytes to write to page */
+ struct folio *folio;
+ loff_t old_size;
+ size_t offset; /* Offset into folio */
+ u64 bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
+ u64 written; /* Bytes have been written */
+ loff_t pos;
+
+ bytes = iov_iter_count(i);
+retry:
+ offset = iter->pos & (chunk - 1);
+ bytes = min(chunk - offset, bytes);
+ status = balance_dirty_pages_ratelimited_flags(mapping,
+ bdp_flags);
+ if (unlikely(status))
+ break;
- offset = offset_in_page(pos);
- bytes = min_t(unsigned long, PAGE_SIZE - offset,
- iov_iter_count(i));
-again:
- if (bytes > length)
- bytes = length;
+ if (bytes > iomap_length(iter))
+ bytes = iomap_length(iter);
/*
* Bring in the user page that we'll copy from _first_.
* Otherwise there's a nasty deadlock on copying from the
* same page as we're writing to, without it being marked
* up-to-date.
+ *
+ * For async buffered writes the assumption is that the user
+ * page has already been faulted in. This can be optimized by
+ * faulting the user page.
*/
- if (unlikely(fault_in_iov_iter_readable(i, bytes))) {
+ if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
status = -EFAULT;
break;
}
- status = iomap_write_begin(iter, pos, bytes, &page);
- if (unlikely(status))
+ status = iomap_write_begin(iter, write_ops, &folio, &offset,
+ &bytes);
+ if (unlikely(status)) {
+ iomap_write_failed(iter->inode, iter->pos, bytes);
+ break;
+ }
+ if (iter->iomap.flags & IOMAP_F_STALE)
break;
- if (mapping_writably_mapped(iter->inode->i_mapping))
- flush_dcache_page(page);
+ pos = iter->pos;
- copied = copy_page_from_iter_atomic(page, offset, bytes, i);
+ if (mapping_writably_mapped(mapping))
+ flush_dcache_folio(folio);
- status = iomap_write_end(iter, pos, bytes, copied, page);
+ copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
+ written = iomap_write_end(iter, bytes, copied, folio) ?
+ copied : 0;
- if (unlikely(copied != status))
- iov_iter_revert(i, copied - status);
+ /*
+ * Update the in-memory inode size after copying the data into
+ * the page cache. It's up to the file system to write the
+ * updated size to disk, preferably after I/O completion so that
+ * no stale data is exposed. Only once that's done can we
+ * unlock and release the folio.
+ */
+ old_size = iter->inode->i_size;
+ if (pos + written > old_size) {
+ i_size_write(iter->inode, pos + written);
+ iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
+ }
+ __iomap_put_folio(iter, write_ops, written, folio);
+
+ if (old_size < pos)
+ pagecache_isize_extended(iter->inode, old_size, pos);
cond_resched();
- if (unlikely(status == 0)) {
+ if (unlikely(written == 0)) {
/*
* A short copy made iomap_write_end() reject the
* thing entirely. Might be memory poisoning
* halfway through, might be a race with munmap,
* might be severe memory pressure.
*/
- if (copied)
+ iomap_write_failed(iter->inode, pos, bytes);
+ iov_iter_revert(i, copied);
+
+ if (chunk > PAGE_SIZE)
+ chunk /= 2;
+ if (copied) {
bytes = copied;
- goto again;
+ goto retry;
+ }
+ } else {
+ total_written += written;
+ iomap_iter_advance(iter, written);
}
- pos += status;
- written += status;
- length -= status;
+ } while (iov_iter_count(i) && iomap_length(iter));
- balance_dirty_pages_ratelimited(iter->inode->i_mapping);
- } while (iov_iter_count(i) && length);
-
- return written ? written : status;
+ return total_written ? 0 : status;
}
ssize_t
iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private)
{
struct iomap_iter iter = {
.inode = iocb->ki_filp->f_mapping->host,
.pos = iocb->ki_pos,
.len = iov_iter_count(i),
.flags = IOMAP_WRITE,
+ .private = private,
};
- int ret;
+ ssize_t ret;
+
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ iter.flags |= IOMAP_NOWAIT;
+ if (iocb->ki_flags & IOCB_DONTCACHE)
+ iter.flags |= IOMAP_DONTCACHE;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_write_iter(&iter, i);
- if (iter.pos == iocb->ki_pos)
+ iter.status = iomap_write_iter(&iter, i, write_ops);
+
+ if (unlikely(iter.pos == iocb->ki_pos))
return ret;
- return iter.pos - iocb->ki_pos;
+ ret = iter.pos - iocb->ki_pos;
+ iocb->ki_pos = iter.pos;
+ return ret;
}
EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
-static loff_t iomap_unshare_iter(struct iomap_iter *iter)
+static void iomap_write_delalloc_ifs_punch(struct inode *inode,
+ struct folio *folio, loff_t start_byte, loff_t end_byte,
+ struct iomap *iomap, iomap_punch_t punch)
+{
+ unsigned int first_blk, last_blk;
+ loff_t last_byte;
+ u8 blkbits = inode->i_blkbits;
+ struct iomap_folio_state *ifs;
+
+ /*
+ * When we have per-block dirty tracking, there can be
+ * blocks within a folio which are marked uptodate
+ * but not dirty. In that case it is necessary to punch
+ * out such blocks to avoid leaking any delalloc blocks.
+ */
+ ifs = folio->private;
+ if (!ifs)
+ return;
+
+ last_byte = min_t(loff_t, end_byte - 1, folio_next_pos(folio) - 1);
+ first_blk = offset_in_folio(folio, start_byte) >> blkbits;
+ last_blk = offset_in_folio(folio, last_byte) >> blkbits;
+ while ((first_blk = ifs_next_clean_block(folio, first_blk, last_blk))
+ <= last_blk) {
+ punch(inode, folio_pos(folio) + (first_blk << blkbits),
+ 1 << blkbits, iomap);
+ first_blk++;
+ }
+}
+
+static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
+ loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
+ struct iomap *iomap, iomap_punch_t punch)
+{
+ if (!folio_test_dirty(folio))
+ return;
+
+ /* if dirty, punch up to offset */
+ if (start_byte > *punch_start_byte) {
+ punch(inode, *punch_start_byte, start_byte - *punch_start_byte,
+ iomap);
+ }
+
+ /* Punch non-dirty blocks within folio */
+ iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte,
+ iomap, punch);
+
+ /*
+ * Make sure the next punch start is correctly bound to
+ * the end of this data range, not the end of the folio.
+ */
+ *punch_start_byte = min_t(loff_t, end_byte, folio_next_pos(folio));
+}
+
+/*
+ * Scan the data range passed to us for dirty page cache folios. If we find a
+ * dirty folio, punch out the preceding range and update the offset from which
+ * the next punch will start from.
+ *
+ * We can punch out storage reservations under clean pages because they either
+ * contain data that has been written back - in which case the delalloc punch
+ * over that range is a no-op - or they have been read faults in which case they
+ * contain zeroes and we can remove the delalloc backing range and any new
+ * writes to those pages will do the normal hole filling operation...
+ *
+ * This makes the logic simple: we only need to keep the delalloc extents only
+ * over the dirty ranges of the page cache.
+ *
+ * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
+ * simplify range iterations.
+ */
+static void iomap_write_delalloc_scan(struct inode *inode,
+ loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
+ struct iomap *iomap, iomap_punch_t punch)
+{
+ while (start_byte < end_byte) {
+ struct folio *folio;
+
+ /* grab locked page */
+ folio = filemap_lock_folio(inode->i_mapping,
+ start_byte >> PAGE_SHIFT);
+ if (IS_ERR(folio)) {
+ start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
+ PAGE_SIZE;
+ continue;
+ }
+
+ iomap_write_delalloc_punch(inode, folio, punch_start_byte,
+ start_byte, end_byte, iomap, punch);
+
+ /* move offset to start of next folio in range */
+ start_byte = folio_next_pos(folio);
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+}
+
+/*
+ * When a short write occurs, the filesystem might need to use ->iomap_end
+ * to remove space reservations created in ->iomap_begin.
+ *
+ * For filesystems that use delayed allocation, there can be dirty pages over
+ * the delalloc extent outside the range of a short write but still within the
+ * delalloc extent allocated for this iomap if the write raced with page
+ * faults.
+ *
+ * Punch out all the delalloc blocks in the range given except for those that
+ * have dirty data still pending in the page cache - those are going to be
+ * written and so must still retain the delalloc backing for writeback.
+ *
+ * The punch() callback *must* only punch delalloc extents in the range passed
+ * to it. It must skip over all other types of extents in the range and leave
+ * them completely unchanged. It must do this punch atomically with respect to
+ * other extent modifications.
+ *
+ * The punch() callback may be called with a folio locked to prevent writeback
+ * extent allocation racing at the edge of the range we are currently punching.
+ * The locked folio may or may not cover the range being punched, so it is not
+ * safe for the punch() callback to lock folios itself.
+ *
+ * Lock order is:
+ *
+ * inode->i_rwsem (shared or exclusive)
+ * inode->i_mapping->invalidate_lock (exclusive)
+ * folio_lock()
+ * ->punch
+ * internal filesystem allocation lock
+ *
+ * As we are scanning the page cache for data, we don't need to reimplement the
+ * wheel - mapping_seek_hole_data() does exactly what we need to identify the
+ * start and end of data ranges correctly even for sub-folio block sizes. This
+ * byte range based iteration is especially convenient because it means we
+ * don't have to care about variable size folios, nor where the start or end of
+ * the data range lies within a folio, if they lie within the same folio or even
+ * if there are multiple discontiguous data ranges within the folio.
+ *
+ * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
+ * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
+ * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
+ * date. A write page fault can then mark it dirty. If we then fail a write()
+ * beyond EOF into that up to date cached range, we allocate a delalloc block
+ * beyond EOF and then have to punch it out. Because the range is up to date,
+ * mapping_seek_hole_data() will return it, and we will skip the punch because
+ * the folio is dirty. THis is incorrect - we always need to punch out delalloc
+ * beyond EOF in this case as writeback will never write back and covert that
+ * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
+ * resulting in always punching out the range from the EOF to the end of the
+ * range the iomap spans.
+ *
+ * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
+ * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
+ * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
+ * returns the end of the data range (data_end). Using closed intervals would
+ * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
+ * the code to subtle off-by-one bugs....
+ */
+void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
+ loff_t end_byte, unsigned flags, struct iomap *iomap,
+ iomap_punch_t punch)
+{
+ loff_t punch_start_byte = start_byte;
+ loff_t scan_end_byte = min(i_size_read(inode), end_byte);
+
+ /*
+ * The caller must hold invalidate_lock to avoid races with page faults
+ * re-instantiating folios and dirtying them via ->page_mkwrite whilst
+ * we walk the cache and perform delalloc extent removal. Failing to do
+ * this can leave dirty pages with no space reservation in the cache.
+ */
+ lockdep_assert_held_write(&inode->i_mapping->invalidate_lock);
+
+ while (start_byte < scan_end_byte) {
+ loff_t data_end;
+
+ start_byte = mapping_seek_hole_data(inode->i_mapping,
+ start_byte, scan_end_byte, SEEK_DATA);
+ /*
+ * If there is no more data to scan, all that is left is to
+ * punch out the remaining range.
+ *
+ * Note that mapping_seek_hole_data is only supposed to return
+ * either an offset or -ENXIO, so WARN on any other error as
+ * that would be an API change without updating the callers.
+ */
+ if (start_byte == -ENXIO || start_byte == scan_end_byte)
+ break;
+ if (WARN_ON_ONCE(start_byte < 0))
+ return;
+ WARN_ON_ONCE(start_byte < punch_start_byte);
+ WARN_ON_ONCE(start_byte > scan_end_byte);
+
+ /*
+ * We find the end of this contiguous cached data range by
+ * seeking from start_byte to the beginning of the next hole.
+ */
+ data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
+ scan_end_byte, SEEK_HOLE);
+ if (WARN_ON_ONCE(data_end < 0))
+ return;
+
+ /*
+ * If we race with post-direct I/O invalidation of the page cache,
+ * there might be no data left at start_byte.
+ */
+ if (data_end == start_byte)
+ continue;
+
+ WARN_ON_ONCE(data_end < start_byte);
+ WARN_ON_ONCE(data_end > scan_end_byte);
+
+ iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte,
+ data_end, iomap, punch);
+
+ /* The next data search starts at the end of this one. */
+ start_byte = data_end;
+ }
+
+ if (punch_start_byte < end_byte)
+ punch(inode, punch_start_byte, end_byte - punch_start_byte,
+ iomap);
+}
+EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
+
+static int iomap_unshare_iter(struct iomap_iter *iter,
+ const struct iomap_write_ops *write_ops)
{
struct iomap *iomap = &iter->iomap;
- const struct iomap *srcmap = iomap_iter_srcmap(iter);
- loff_t pos = iter->pos;
- loff_t length = iomap_length(iter);
- long status = 0;
- loff_t written = 0;
+ u64 bytes = iomap_length(iter);
+ int status;
- /* don't bother with blocks that are not shared to start with */
- if (!(iomap->flags & IOMAP_F_SHARED))
- return length;
- /* don't bother with holes or unwritten extents */
- if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
- return length;
+ if (!iomap_want_unshare_iter(iter))
+ return iomap_iter_advance(iter, bytes);
do {
- unsigned long offset = offset_in_page(pos);
- unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
- struct page *page;
+ struct folio *folio;
+ size_t offset;
+ bool ret;
- status = iomap_write_begin(iter, pos, bytes, &page);
+ bytes = min_t(u64, SIZE_MAX, bytes);
+ status = iomap_write_begin(iter, write_ops, &folio, &offset,
+ &bytes);
if (unlikely(status))
return status;
+ if (iomap->flags & IOMAP_F_STALE)
+ break;
- status = iomap_write_end(iter, pos, bytes, bytes, page);
- if (WARN_ON_ONCE(status == 0))
+ ret = iomap_write_end(iter, bytes, bytes, folio);
+ __iomap_put_folio(iter, write_ops, bytes, folio);
+ if (WARN_ON_ONCE(!ret))
return -EIO;
cond_resched();
- pos += status;
- written += status;
- length -= status;
-
balance_dirty_pages_ratelimited(iter->inode->i_mapping);
- } while (length);
- return written;
+ status = iomap_iter_advance(iter, bytes);
+ if (status)
+ break;
+ } while ((bytes = iomap_length(iter)) > 0);
+
+ return status;
}
int
iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops)
{
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
- .len = len,
.flags = IOMAP_WRITE | IOMAP_UNSHARE,
};
+ loff_t size = i_size_read(inode);
int ret;
+ if (pos < 0 || pos >= size)
+ return 0;
+
+ iter.len = min(len, size - pos);
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_unshare_iter(&iter);
+ iter.status = iomap_unshare_iter(&iter, write_ops);
return ret;
}
EXPORT_SYMBOL_GPL(iomap_file_unshare);
-static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
+/*
+ * Flush the remaining range of the iter and mark the current mapping stale.
+ * This is used when zero range sees an unwritten mapping that may have had
+ * dirty pagecache over it.
+ */
+static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
+{
+ struct address_space *mapping = i->inode->i_mapping;
+ loff_t end = i->pos + i->len - 1;
+
+ i->iomap.flags |= IOMAP_F_STALE;
+ return filemap_write_and_wait_range(mapping, i->pos, end);
+}
+
+static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
+ const struct iomap_write_ops *write_ops)
{
- struct page *page;
+ u64 bytes = iomap_length(iter);
int status;
- unsigned offset = offset_in_page(pos);
- unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
- status = iomap_write_begin(iter, pos, bytes, &page);
- if (status)
- return status;
+ do {
+ struct folio *folio;
+ size_t offset;
+ bool ret;
+
+ bytes = min_t(u64, SIZE_MAX, bytes);
+ status = iomap_write_begin(iter, write_ops, &folio, &offset,
+ &bytes);
+ if (status)
+ return status;
+ if (iter->iomap.flags & IOMAP_F_STALE)
+ break;
+
+ /* a NULL folio means we're done with a folio batch */
+ if (!folio) {
+ status = iomap_iter_advance_full(iter);
+ break;
+ }
- zero_user(page, offset, bytes);
- mark_page_accessed(page);
+ /* warn about zeroing folios beyond eof that won't write back */
+ WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size);
- return iomap_write_end(iter, pos, bytes, bytes, page);
+ trace_iomap_zero_iter(iter->inode, folio_pos(folio) + offset,
+ bytes);
+
+ folio_zero_range(folio, offset, bytes);
+ folio_mark_accessed(folio);
+
+ ret = iomap_write_end(iter, bytes, bytes, folio);
+ __iomap_put_folio(iter, write_ops, bytes, folio);
+ if (WARN_ON_ONCE(!ret))
+ return -EIO;
+
+ status = iomap_iter_advance(iter, bytes);
+ if (status)
+ break;
+ } while ((bytes = iomap_length(iter)) > 0);
+
+ if (did_zero)
+ *did_zero = true;
+ return status;
}
-static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
+loff_t
+iomap_fill_dirty_folios(
+ struct iomap_iter *iter,
+ loff_t offset,
+ loff_t length)
{
- struct iomap *iomap = &iter->iomap;
- const struct iomap *srcmap = iomap_iter_srcmap(iter);
- loff_t pos = iter->pos;
- loff_t length = iomap_length(iter);
- loff_t written = 0;
+ struct address_space *mapping = iter->inode->i_mapping;
+ pgoff_t start = offset >> PAGE_SHIFT;
+ pgoff_t end = (offset + length - 1) >> PAGE_SHIFT;
- /* already zeroed? we're done. */
- if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
- return length;
+ iter->fbatch = kmalloc(sizeof(struct folio_batch), GFP_KERNEL);
+ if (!iter->fbatch)
+ return offset + length;
+ folio_batch_init(iter->fbatch);
- do {
- s64 bytes;
-
- if (IS_DAX(iter->inode))
- bytes = dax_iomap_zero(pos, length, iomap);
- else
- bytes = __iomap_zero_iter(iter, pos, length);
- if (bytes < 0)
- return bytes;
-
- pos += bytes;
- length -= bytes;
- written += bytes;
- if (did_zero)
- *did_zero = true;
- } while (length > 0);
-
- return written;
+ filemap_get_folios_dirty(mapping, &start, end, iter->fbatch);
+ return (start << PAGE_SHIFT);
}
+EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios);
int
iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private)
{
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
.len = len,
.flags = IOMAP_ZERO,
+ .private = private,
};
+ struct address_space *mapping = inode->i_mapping;
int ret;
+ bool range_dirty;
- while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_zero_iter(&iter, did_zero);
+ /*
+ * To avoid an unconditional flush, check pagecache state and only flush
+ * if dirty and the fs returns a mapping that might convert on
+ * writeback.
+ */
+ range_dirty = filemap_range_needs_writeback(mapping, iter.pos,
+ iter.pos + iter.len - 1);
+ while ((ret = iomap_iter(&iter, ops)) > 0) {
+ const struct iomap *srcmap = iomap_iter_srcmap(&iter);
+
+ if (WARN_ON_ONCE(iter.fbatch &&
+ srcmap->type != IOMAP_UNWRITTEN))
+ return -EIO;
+
+ if (!iter.fbatch &&
+ (srcmap->type == IOMAP_HOLE ||
+ srcmap->type == IOMAP_UNWRITTEN)) {
+ s64 status;
+
+ if (range_dirty) {
+ range_dirty = false;
+ status = iomap_zero_iter_flush_and_stale(&iter);
+ } else {
+ status = iomap_iter_advance_full(&iter);
+ }
+ iter.status = status;
+ continue;
+ }
+
+ iter.status = iomap_zero_iter(&iter, did_zero, write_ops);
+ }
return ret;
}
EXPORT_SYMBOL_GPL(iomap_zero_range);
int
iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private)
{
unsigned int blocksize = i_blocksize(inode);
unsigned int off = pos & (blocksize - 1);
@@ -947,575 +1626,294 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
/* Block boundary? Nothing to do */
if (!off)
return 0;
- return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
+ return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
+ write_ops, private);
}
EXPORT_SYMBOL_GPL(iomap_truncate_page);
-static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
- struct page *page)
+static int iomap_folio_mkwrite_iter(struct iomap_iter *iter,
+ struct folio *folio)
{
loff_t length = iomap_length(iter);
int ret;
if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
- ret = __block_write_begin_int(page, iter->pos, length, NULL,
+ ret = __block_write_begin_int(folio, iter->pos, length, NULL,
&iter->iomap);
if (ret)
return ret;
- block_commit_write(page, 0, length);
+ block_commit_write(folio, 0, length);
} else {
- WARN_ON_ONCE(!PageUptodate(page));
- set_page_dirty(page);
+ WARN_ON_ONCE(!folio_test_uptodate(folio));
+ folio_mark_dirty(folio);
}
- return length;
+ return iomap_iter_advance(iter, length);
}
-vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
+vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
+ void *private)
{
struct iomap_iter iter = {
.inode = file_inode(vmf->vma->vm_file),
.flags = IOMAP_WRITE | IOMAP_FAULT,
+ .private = private,
};
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
ssize_t ret;
- lock_page(page);
- ret = page_mkwrite_check_truncate(page, iter.inode);
+ folio_lock(folio);
+ ret = folio_mkwrite_check_truncate(folio, iter.inode);
if (ret < 0)
goto out_unlock;
- iter.pos = page_offset(page);
+ iter.pos = folio_pos(folio);
iter.len = ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_page_mkwrite_iter(&iter, page);
+ iter.status = iomap_folio_mkwrite_iter(&iter, folio);
if (ret < 0)
goto out_unlock;
- wait_for_stable_page(page);
+ folio_wait_stable(folio);
return VM_FAULT_LOCKED;
out_unlock:
- unlock_page(page);
- return block_page_mkwrite_return(ret);
+ folio_unlock(folio);
+ return vmf_fs_error(ret);
}
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
-static void
-iomap_finish_page_writeback(struct inode *inode, struct page *page,
- int error, unsigned int len)
+static void iomap_writeback_init(struct inode *inode, struct folio *folio)
{
- struct iomap_page *iop = to_iomap_page(page);
-
- if (error) {
- SetPageError(page);
- mapping_set_error(inode->i_mapping, error);
- }
-
- WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
- WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
-
- if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
- end_page_writeback(page);
-}
-
-/*
- * We're now finished for good with this ioend structure. Update the page
- * state, release holds on bios, and finally free up memory. Do not use the
- * ioend after this.
- */
-static void
-iomap_finish_ioend(struct iomap_ioend *ioend, int error)
-{
- struct inode *inode = ioend->io_inode;
- struct bio *bio = &ioend->io_inline_bio;
- struct bio *last = ioend->io_bio, *next;
- u64 start = bio->bi_iter.bi_sector;
- loff_t offset = ioend->io_offset;
- bool quiet = bio_flagged(bio, BIO_QUIET);
-
- for (bio = &ioend->io_inline_bio; bio; bio = next) {
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct iomap_folio_state *ifs = folio->private;
+ WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
+ if (ifs) {
+ WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
/*
- * For the last bio, bi_private points to the ioend, so we
- * need to explicitly end the iteration here.
+ * Set this to the folio size. After processing the folio for
+ * writeback in iomap_writeback_folio(), we'll subtract any
+ * ranges not written back.
+ *
+ * We do this because otherwise, we would have to atomically
+ * increment ifs->write_bytes_pending every time a range in the
+ * folio needs to be written back.
*/
- if (bio == last)
- next = NULL;
- else
- next = bio->bi_private;
-
- /* walk each page on bio, ending page IO on them */
- bio_for_each_segment_all(bv, bio, iter_all)
- iomap_finish_page_writeback(inode, bv->bv_page, error,
- bv->bv_len);
- bio_put(bio);
- }
- /* The ioend has been freed by bio_put() */
-
- if (unlikely(error && !quiet)) {
- printk_ratelimited(KERN_ERR
-"%s: writeback error on inode %lu, offset %lld, sector %llu",
- inode->i_sb->s_id, inode->i_ino, offset, start);
+ atomic_set(&ifs->write_bytes_pending, folio_size(folio));
}
}
-void
-iomap_finish_ioends(struct iomap_ioend *ioend, int error)
+void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
+ size_t len)
{
- struct list_head tmp;
+ struct iomap_folio_state *ifs = folio->private;
- list_replace_init(&ioend->io_list, &tmp);
- iomap_finish_ioend(ioend, error);
-
- while (!list_empty(&tmp)) {
- ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
- list_del_init(&ioend->io_list);
- iomap_finish_ioend(ioend, error);
- }
-}
-EXPORT_SYMBOL_GPL(iomap_finish_ioends);
-
-/*
- * We can merge two adjacent ioends if they have the same set of work to do.
- */
-static bool
-iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
-{
- if (ioend->io_bio->bi_status != next->io_bio->bi_status)
- return false;
- if ((ioend->io_flags & IOMAP_F_SHARED) ^
- (next->io_flags & IOMAP_F_SHARED))
- return false;
- if ((ioend->io_type == IOMAP_UNWRITTEN) ^
- (next->io_type == IOMAP_UNWRITTEN))
- return false;
- if (ioend->io_offset + ioend->io_size != next->io_offset)
- return false;
- return true;
-}
-
-void
-iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
-{
- struct iomap_ioend *next;
-
- INIT_LIST_HEAD(&ioend->io_list);
-
- while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
- io_list))) {
- if (!iomap_ioend_can_merge(ioend, next))
- break;
- list_move_tail(&next->io_list, &ioend->io_list);
- ioend->io_size += next->io_size;
- }
-}
-EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
-
-static int
-iomap_ioend_compare(void *priv, const struct list_head *a,
- const struct list_head *b)
-{
- struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
- struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
-
- if (ia->io_offset < ib->io_offset)
- return -1;
- if (ia->io_offset > ib->io_offset)
- return 1;
- return 0;
-}
+ WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
+ WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
-void
-iomap_sort_ioends(struct list_head *ioend_list)
-{
- list_sort(NULL, ioend_list, iomap_ioend_compare);
+ if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
+ folio_end_writeback(folio);
}
-EXPORT_SYMBOL_GPL(iomap_sort_ioends);
+EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
-static void iomap_writepage_end_bio(struct bio *bio)
+static int iomap_writeback_range(struct iomap_writepage_ctx *wpc,
+ struct folio *folio, u64 pos, u32 rlen, u64 end_pos,
+ size_t *bytes_submitted)
{
- struct iomap_ioend *ioend = bio->bi_private;
-
- iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
-}
+ do {
+ ssize_t ret;
-/*
- * Submit the final bio for an ioend.
- *
- * If @error is non-zero, it means that we have a situation where some part of
- * the submission process has failed after we've marked pages for writeback
- * and unlocked them. In this situation, we need to fail the bio instead of
- * submitting it. This typically only happens on a filesystem shutdown.
- */
-static int
-iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
- int error)
-{
- ioend->io_bio->bi_private = ioend;
- ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
+ ret = wpc->ops->writeback_range(wpc, folio, pos, rlen, end_pos);
+ if (WARN_ON_ONCE(ret == 0 || ret > rlen))
+ return -EIO;
+ if (ret < 0)
+ return ret;
+ rlen -= ret;
+ pos += ret;
- if (wpc->ops->prepare_ioend)
- error = wpc->ops->prepare_ioend(ioend, error);
- if (error) {
/*
- * If we're failing the IO now, just mark the ioend with an
- * error and finish it. This will run IO completion immediately
- * as there is only one reference to the ioend at this point in
- * time.
+ * Holes are not written back by ->writeback_range, so track
+ * if we did handle anything that is not a hole here.
*/
- ioend->io_bio->bi_status = errno_to_blk_status(error);
- bio_endio(ioend->io_bio);
- return error;
- }
+ if (wpc->iomap.type != IOMAP_HOLE)
+ *bytes_submitted += ret;
+ } while (rlen);
- submit_bio(ioend->io_bio);
return 0;
}
-static struct iomap_ioend *
-iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
- loff_t offset, sector_t sector, struct writeback_control *wbc)
-{
- struct iomap_ioend *ioend;
- struct bio *bio;
-
- bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset);
- bio_set_dev(bio, wpc->iomap.bdev);
- bio->bi_iter.bi_sector = sector;
- bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
- bio->bi_write_hint = inode->i_write_hint;
- wbc_init_bio(wbc, bio);
-
- ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
- INIT_LIST_HEAD(&ioend->io_list);
- ioend->io_type = wpc->iomap.type;
- ioend->io_flags = wpc->iomap.flags;
- ioend->io_inode = inode;
- ioend->io_size = 0;
- ioend->io_offset = offset;
- ioend->io_bio = bio;
- return ioend;
-}
-
/*
- * Allocate a new bio, and chain the old bio to the new one.
+ * Check interaction of the folio with the file end.
*
- * Note that we have to perform the chaining in this unintuitive order
- * so that the bi_private linkage is set up in the right direction for the
- * traversal in iomap_finish_ioend().
+ * If the folio is entirely beyond i_size, return false. If it straddles
+ * i_size, adjust end_pos and zero all data beyond i_size.
*/
-static struct bio *
-iomap_chain_bio(struct bio *prev)
+static bool iomap_writeback_handle_eof(struct folio *folio, struct inode *inode,
+ u64 *end_pos)
{
- struct bio *new;
-
- new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
- bio_copy_dev(new, prev);/* also copies over blkcg information */
- new->bi_iter.bi_sector = bio_end_sector(prev);
- new->bi_opf = prev->bi_opf;
- new->bi_write_hint = prev->bi_write_hint;
-
- bio_chain(prev, new);
- bio_get(prev); /* for iomap_finish_ioend */
- submit_bio(prev);
- return new;
-}
+ u64 isize = i_size_read(inode);
-static bool
-iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
- sector_t sector)
-{
- if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
- (wpc->ioend->io_flags & IOMAP_F_SHARED))
- return false;
- if (wpc->iomap.type != wpc->ioend->io_type)
- return false;
- if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
- return false;
- if (sector != bio_end_sector(wpc->ioend->io_bio))
- return false;
- return true;
-}
+ if (*end_pos > isize) {
+ size_t poff = offset_in_folio(folio, isize);
+ pgoff_t end_index = isize >> PAGE_SHIFT;
-/*
- * Test to see if we have an existing ioend structure that we could append to
- * first; otherwise finish off the current ioend and start another.
- */
-static void
-iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
- struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct list_head *iolist)
-{
- sector_t sector = iomap_sector(&wpc->iomap, offset);
- unsigned len = i_blocksize(inode);
- unsigned poff = offset & (PAGE_SIZE - 1);
-
- if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
- if (wpc->ioend)
- list_add(&wpc->ioend->io_list, iolist);
- wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
- }
+ /*
+ * If the folio is entirely ouside of i_size, skip it.
+ *
+ * This can happen due to a truncate operation that is in
+ * progress and in that case truncate will finish it off once
+ * we've dropped the folio lock.
+ *
+ * Note that the pgoff_t used for end_index is an unsigned long.
+ * If the given offset is greater than 16TB on a 32-bit system,
+ * then if we checked if the folio is fully outside i_size with
+ * "if (folio->index >= end_index + 1)", "end_index + 1" would
+ * overflow and evaluate to 0. Hence this folio would be
+ * redirtied and written out repeatedly, which would result in
+ * an infinite loop; the user program performing this operation
+ * would hang. Instead, we can detect this situation by
+ * checking if the folio is totally beyond i_size or if its
+ * offset is just equal to the EOF.
+ */
+ if (folio->index > end_index ||
+ (folio->index == end_index && poff == 0))
+ return false;
- if (bio_add_page(wpc->ioend->io_bio, page, len, poff) != len) {
- wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
- __bio_add_page(wpc->ioend->io_bio, page, len, poff);
+ /*
+ * The folio straddles i_size.
+ *
+ * It must be zeroed out on each and every writepage invocation
+ * because it may be mmapped:
+ *
+ * A file is mapped in multiples of the page size. For a
+ * file that is not a multiple of the page size, the
+ * remaining memory is zeroed when mapped, and writes to that
+ * region are not written out to the file.
+ *
+ * Also adjust the end_pos to the end of file and skip writeback
+ * for all blocks entirely beyond i_size.
+ */
+ folio_zero_segment(folio, poff, folio_size(folio));
+ *end_pos = isize;
}
- if (iop)
- atomic_add(len, &iop->write_bytes_pending);
- wpc->ioend->io_size += len;
- wbc_account_cgroup_owner(wbc, page, len);
+ return true;
}
-/*
- * We implement an immediate ioend submission policy here to avoid needing to
- * chain multiple ioends and hence nest mempool allocations which can violate
- * the forward progress guarantees we need to provide. The current ioend we're
- * adding blocks to is cached in the writepage context, and if the new block
- * doesn't append to the cached ioend, it will create a new ioend and cache that
- * instead.
- *
- * If a new ioend is created and cached, the old ioend is returned and queued
- * locally for submission once the entire page is processed or an error has been
- * detected. While ioends are submitted immediately after they are completed,
- * batching optimisations are provided by higher level block plugging.
- *
- * At the end of a writeback pass, there will be a cached ioend remaining on the
- * writepage context that the caller will need to submit.
- */
-static int
-iomap_writepage_map(struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct inode *inode,
- struct page *page, u64 end_offset)
+int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio)
{
- struct iomap_page *iop = iomap_page_create(inode, page);
- struct iomap_ioend *ioend, *next;
- unsigned len = i_blocksize(inode);
- u64 file_offset; /* file offset of page */
- int error = 0, count = 0, i;
- LIST_HEAD(submit_list);
+ struct iomap_folio_state *ifs = folio->private;
+ struct inode *inode = wpc->inode;
+ u64 pos = folio_pos(folio);
+ u64 end_pos = pos + folio_size(folio);
+ u64 end_aligned = 0;
+ size_t bytes_submitted = 0;
+ int error = 0;
+ u32 rlen;
+
+ WARN_ON_ONCE(!folio_test_locked(folio));
+ WARN_ON_ONCE(folio_test_dirty(folio));
+ WARN_ON_ONCE(folio_test_writeback(folio));
+
+ trace_iomap_writeback_folio(inode, pos, folio_size(folio));
+
+ if (!iomap_writeback_handle_eof(folio, inode, &end_pos))
+ return 0;
+ WARN_ON_ONCE(end_pos <= pos);
- WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
+ if (i_blocks_per_folio(inode, folio) > 1) {
+ if (!ifs) {
+ ifs = ifs_alloc(inode, folio, 0);
+ iomap_set_range_dirty(folio, 0, end_pos - pos);
+ }
+
+ iomap_writeback_init(inode, folio);
+ }
/*
- * Walk through the page to find areas to write back. If we run off the
- * end of the current map or find the current map invalid, grab a new
- * one.
+ * Set the writeback bit ASAP, as the I/O completion for the single
+ * block per folio case happen hit as soon as we're submitting the bio.
*/
- for (i = 0, file_offset = page_offset(page);
- i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
- i++, file_offset += len) {
- if (iop && !test_bit(i, iop->uptodate))
- continue;
+ folio_start_writeback(folio);
- error = wpc->ops->map_blocks(wpc, inode, file_offset);
+ /*
+ * Walk through the folio to find dirty areas to write back.
+ */
+ end_aligned = round_up(end_pos, i_blocksize(inode));
+ while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
+ error = iomap_writeback_range(wpc, folio, pos, rlen, end_pos,
+ &bytes_submitted);
if (error)
break;
- if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
- continue;
- if (wpc->iomap.type == IOMAP_HOLE)
- continue;
- iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
- &submit_list);
- count++;
+ pos += rlen;
}
- WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
- WARN_ON_ONCE(!PageLocked(page));
- WARN_ON_ONCE(PageWriteback(page));
- WARN_ON_ONCE(PageDirty(page));
+ if (bytes_submitted)
+ wpc->nr_folios++;
/*
- * We cannot cancel the ioend directly here on error. We may have
- * already set other pages under writeback and hence we have to run I/O
- * completion to mark the error state of the pages under writeback
- * appropriately.
+ * We can have dirty bits set past end of file in page_mkwrite path
+ * while mapping the last partial folio. Hence it's better to clear
+ * all the dirty bits in the folio here.
*/
- if (unlikely(error)) {
- /*
- * Let the filesystem know what portion of the current page
- * failed to map. If the page hasn't been added to ioend, it
- * won't be affected by I/O completion and we must unlock it
- * now.
- */
- if (wpc->ops->discard_page)
- wpc->ops->discard_page(page, file_offset);
- if (!count) {
- ClearPageUptodate(page);
- unlock_page(page);
- goto done;
- }
- }
-
- set_page_writeback(page);
- unlock_page(page);
+ iomap_clear_range_dirty(folio, 0, folio_size(folio));
/*
- * Preserve the original error if there was one; catch
- * submission errors here and propagate into subsequent ioend
- * submissions.
+ * Usually the writeback bit is cleared by the I/O completion handler.
+ * But we may end up either not actually writing any blocks, or (when
+ * there are multiple blocks in a folio) all I/O might have finished
+ * already at this point. In that case we need to clear the writeback
+ * bit ourselves right after unlocking the page.
*/
- list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
- int error2;
-
- list_del_init(&ioend->io_list);
- error2 = iomap_submit_ioend(wpc, ioend, error);
- if (error2 && !error)
- error = error2;
+ if (ifs) {
+ /*
+ * Subtract any bytes that were initially accounted to
+ * write_bytes_pending but skipped for writeback.
+ */
+ size_t bytes_not_submitted = folio_size(folio) -
+ bytes_submitted;
+
+ if (bytes_not_submitted)
+ iomap_finish_folio_write(inode, folio,
+ bytes_not_submitted);
+ } else if (!bytes_submitted) {
+ folio_end_writeback(folio);
}
- /*
- * We can end up here with no error and nothing to write only if we race
- * with a partial page truncate on a sub-page block sized filesystem.
- */
- if (!count)
- end_page_writeback(page);
-done:
- mapping_set_error(page->mapping, error);
+ mapping_set_error(inode->i_mapping, error);
return error;
}
+EXPORT_SYMBOL_GPL(iomap_writeback_folio);
-/*
- * Write out a dirty page.
- *
- * For delalloc space on the page, we need to allocate space and flush it.
- * For unwritten space on the page, we need to start the conversion to
- * regular allocated space.
- */
-static int
-iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
+int
+iomap_writepages(struct iomap_writepage_ctx *wpc)
{
- struct iomap_writepage_ctx *wpc = data;
- struct inode *inode = page->mapping->host;
- pgoff_t end_index;
- u64 end_offset;
- loff_t offset;
-
- trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
+ struct address_space *mapping = wpc->inode->i_mapping;
+ struct folio *folio = NULL;
+ int error;
/*
- * Refuse to write the page out if we're called from reclaim context.
- *
- * This avoids stack overflows when called from deeply used stacks in
- * random callers for direct reclaim or memcg reclaim. We explicitly
- * allow reclaim from kswapd as the stack usage there is relatively low.
- *
- * This should never happen except in the case of a VM regression so
- * warn about it.
+ * Writeback from reclaim context should never happen except in the case
+ * of a VM regression so warn about it and refuse to write the data.
*/
- if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
+ if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) ==
PF_MEMALLOC))
- goto redirty;
+ return -EIO;
+
+ while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) {
+ error = iomap_writeback_folio(wpc, folio);
+ folio_unlock(folio);
+ }
/*
- * Is this page beyond the end of the file?
+ * If @error is non-zero, it means that we have a situation where some
+ * part of the submission process has failed after we've marked pages
+ * for writeback.
*
- * The page index is less than the end_index, adjust the end_offset
- * to the highest offset that this page should represent.
- * -----------------------------------------------------
- * | file mapping | <EOF> |
- * -----------------------------------------------------
- * | Page ... | Page N-2 | Page N-1 | Page N | |
- * ^--------------------------------^----------|--------
- * | desired writeback range | see else |
- * ---------------------------------^------------------|
+ * We cannot cancel the writeback directly in that case, so always call
+ * ->writeback_submit to run the I/O completion handler to clear the
+ * writeback bit and let the file system proess the errors.
*/
- offset = i_size_read(inode);
- end_index = offset >> PAGE_SHIFT;
- if (page->index < end_index)
- end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
- else {
- /*
- * Check whether the page to write out is beyond or straddles
- * i_size or not.
- * -------------------------------------------------------
- * | file mapping | <EOF> |
- * -------------------------------------------------------
- * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
- * ^--------------------------------^-----------|---------
- * | | Straddles |
- * ---------------------------------^-----------|--------|
- */
- unsigned offset_into_page = offset & (PAGE_SIZE - 1);
-
- /*
- * Skip the page if it's fully outside i_size, e.g. due to a
- * truncate operation that's in progress. We must redirty the
- * page so that reclaim stops reclaiming it. Otherwise
- * iomap_vm_releasepage() is called on it and gets confused.
- *
- * Note that the end_index is unsigned long. If the given
- * offset is greater than 16TB on a 32-bit system then if we
- * checked if the page is fully outside i_size with
- * "if (page->index >= end_index + 1)", "end_index + 1" would
- * overflow and evaluate to 0. Hence this page would be
- * redirtied and written out repeatedly, which would result in
- * an infinite loop; the user program performing this operation
- * would hang. Instead, we can detect this situation by
- * checking if the page is totally beyond i_size or if its
- * offset is just equal to the EOF.
- */
- if (page->index > end_index ||
- (page->index == end_index && offset_into_page == 0))
- goto redirty;
-
- /*
- * The page straddles i_size. It must be zeroed out on each
- * and every writepage invocation because it may be mmapped.
- * "A file is mapped in multiples of the page size. For a file
- * that is not a multiple of the page size, the remaining
- * memory is zeroed when mapped, and writes to that region are
- * not written out to the file."
- */
- zero_user_segment(page, offset_into_page, PAGE_SIZE);
-
- /* Adjust the end_offset to the end of file */
- end_offset = offset;
- }
-
- return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
-
-redirty:
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
-}
-
-int
-iomap_writepage(struct page *page, struct writeback_control *wbc,
- struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops)
-{
- int ret;
-
- wpc->ops = ops;
- ret = iomap_do_writepage(page, wbc, wpc);
- if (!wpc->ioend)
- return ret;
- return iomap_submit_ioend(wpc, wpc->ioend, ret);
-}
-EXPORT_SYMBOL_GPL(iomap_writepage);
-
-int
-iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
- struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops)
-{
- int ret;
-
- wpc->ops = ops;
- ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
- if (!wpc->ioend)
- return ret;
- return iomap_submit_ioend(wpc, wpc->ioend, ret);
+ if (wpc->wb_ctx)
+ return wpc->ops->writeback_submit(wpc, error);
+ return error;
}
EXPORT_SYMBOL_GPL(iomap_writepages);
-
-static int __init iomap_init(void)
-{
- return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
- offsetof(struct iomap_ioend, io_inline_bio),
- BIOSET_NEED_BVECS);
-}
-fs_initcall(iomap_init);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index b4dc51063d36..8e273408453a 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -1,15 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Red Hat, Inc.
- * Copyright (c) 2016-2021 Christoph Hellwig.
+ * Copyright (c) 2016-2025 Christoph Hellwig.
*/
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
+#include <linux/fscrypt.h>
+#include <linux/pagemap.h>
#include <linux/iomap.h>
-#include <linux/backing-dev.h>
-#include <linux/uio.h>
#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
#include "trace.h"
#include "../internal.h"
@@ -18,10 +16,12 @@
* Private flags for iomap_dio, must not overlap with the public ones in
* iomap.h:
*/
-#define IOMAP_DIO_WRITE_FUA (1 << 28)
-#define IOMAP_DIO_NEED_SYNC (1 << 29)
-#define IOMAP_DIO_WRITE (1 << 30)
-#define IOMAP_DIO_DIRTY (1 << 31)
+#define IOMAP_DIO_NO_INVALIDATE (1U << 26)
+#define IOMAP_DIO_COMP_WORK (1U << 27)
+#define IOMAP_DIO_WRITE_THROUGH (1U << 28)
+#define IOMAP_DIO_NEED_SYNC (1U << 29)
+#define IOMAP_DIO_WRITE (1U << 30)
+#define IOMAP_DIO_DIRTY (1U << 31)
struct iomap_dio {
struct kiocb *iocb;
@@ -39,7 +39,6 @@ struct iomap_dio {
struct {
struct iov_iter *iter;
struct task_struct *waiter;
- struct bio *poll_bio;
} submit;
/* used for aio completion: */
@@ -49,27 +48,40 @@ struct iomap_dio {
};
};
+static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
+ struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
+{
+ if (dio->dops && dio->dops->bio_set)
+ return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf,
+ GFP_KERNEL, dio->dops->bio_set);
+ return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL);
+}
+
static void iomap_dio_submit_bio(const struct iomap_iter *iter,
struct iomap_dio *dio, struct bio *bio, loff_t pos)
{
+ struct kiocb *iocb = dio->iocb;
+
atomic_inc(&dio->ref);
- if (dio->iocb->ki_flags & IOCB_HIPRI) {
- bio_set_polled(bio, dio->iocb);
- dio->submit.poll_bio = bio;
+ /* Sync dio can't be polled reliably */
+ if ((iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(iocb)) {
+ bio_set_polled(bio, iocb);
+ WRITE_ONCE(iocb->private, bio);
}
- if (dio->dops && dio->dops->submit_io)
+ if (dio->dops && dio->dops->submit_io) {
dio->dops->submit_io(iter, bio, pos);
- else
+ } else {
+ WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_ANON_WRITE);
submit_bio(bio);
+ }
}
ssize_t iomap_dio_complete(struct iomap_dio *dio)
{
const struct iomap_dio_ops *dops = dio->dops;
struct kiocb *iocb = dio->iocb;
- struct inode *inode = file_inode(iocb->ki_filp);
loff_t offset = iocb->ki_pos;
ssize_t ret = dio->error;
@@ -82,7 +94,6 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
if (offset + ret > dio->i_size &&
!(dio->flags & IOMAP_DIO_WRITE))
ret = dio->i_size - offset;
- iocb->ki_pos += ret;
}
/*
@@ -97,29 +108,26 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
* ->end_io() when necessary, otherwise a racing buffer read would cache
* zeros from unwritten extents.
*/
- if (!dio->error && dio->size &&
- (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
- int err;
- err = invalidate_inode_pages2_range(inode->i_mapping,
- offset >> PAGE_SHIFT,
- (offset + dio->size - 1) >> PAGE_SHIFT);
- if (err)
- dio_warn_stale_pagecache(iocb->ki_filp);
- }
+ if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE) &&
+ !(dio->flags & IOMAP_DIO_NO_INVALIDATE))
+ kiocb_invalidate_post_direct_write(iocb, dio->size);
inode_dio_end(file_inode(iocb->ki_filp));
- /*
- * If this is a DSYNC write, make sure we push it to stable storage now
- * that we've written data.
- */
- if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
- ret = generic_write_sync(iocb, ret);
- if (ret > 0)
- ret += dio->done_before;
+ if (ret > 0) {
+ iocb->ki_pos += ret;
+ /*
+ * If this is a DSYNC write, make sure we push it to stable
+ * storage now that we've written data.
+ */
+ if (dio->flags & IOMAP_DIO_NEED_SYNC)
+ ret = generic_write_sync(iocb, ret);
+ if (ret > 0)
+ ret += dio->done_before;
+ }
+ trace_iomap_dio_complete(iocb, dio->error, ret);
kfree(dio);
-
return ret;
}
EXPORT_SYMBOL_GPL(iomap_dio_complete);
@@ -142,7 +150,67 @@ static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
cmpxchg(&dio->error, 0, ret);
}
-static void iomap_dio_bio_end_io(struct bio *bio)
+/*
+ * Called when dio->ref reaches zero from an I/O completion.
+ */
+static void iomap_dio_done(struct iomap_dio *dio)
+{
+ struct kiocb *iocb = dio->iocb;
+
+ if (dio->wait_for_completion) {
+ /*
+ * Synchronous I/O, task itself will handle any completion work
+ * that needs after IO. All we need to do is wake the task.
+ */
+ struct task_struct *waiter = dio->submit.waiter;
+
+ WRITE_ONCE(dio->submit.waiter, NULL);
+ blk_wake_io_task(waiter);
+ return;
+ }
+
+ /*
+ * Always run error completions in user context. These are not
+ * performance critical and some code relies on taking sleeping locks
+ * for error handling.
+ */
+ if (dio->error)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
+
+ /*
+ * Never invalidate pages from this context to avoid deadlocks with
+ * buffered I/O completions when called from the ioend workqueue,
+ * or avoid sleeping when called directly from ->bi_end_io.
+ * Tough luck if you hit the tiny race with someone dirtying the range
+ * right between this check and the actual completion.
+ */
+ if ((dio->flags & IOMAP_DIO_WRITE) &&
+ !(dio->flags & IOMAP_DIO_COMP_WORK)) {
+ if (dio->iocb->ki_filp->f_mapping->nrpages)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
+ else
+ dio->flags |= IOMAP_DIO_NO_INVALIDATE;
+ }
+
+ if (dio->flags & IOMAP_DIO_COMP_WORK) {
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ /*
+ * Async DIO completion that requires filesystem level
+ * completion work gets punted to a work queue to complete as
+ * the operation may require more IO to be issued to finalise
+ * filesystem metadata changes or guarantee data integrity.
+ */
+ INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
+ queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
+ return;
+ }
+
+ WRITE_ONCE(iocb->private, NULL);
+ iomap_dio_complete_work(&dio->aio.work);
+}
+
+void iomap_dio_bio_end_io(struct bio *bio)
{
struct iomap_dio *dio = bio->bi_private;
bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
@@ -150,22 +218,8 @@ static void iomap_dio_bio_end_io(struct bio *bio)
if (bio->bi_status)
iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
- if (atomic_dec_and_test(&dio->ref)) {
- if (dio->wait_for_completion) {
- struct task_struct *waiter = dio->submit.waiter;
- WRITE_ONCE(dio->submit.waiter, NULL);
- blk_wake_io_task(waiter);
- } else if (dio->flags & IOMAP_DIO_WRITE) {
- struct inode *inode = file_inode(dio->iocb->ki_filp);
-
- WRITE_ONCE(dio->iocb->private, NULL);
- INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
- queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
- } else {
- WRITE_ONCE(dio->iocb->private, NULL);
- iomap_dio_complete_work(&dio->aio.work);
- }
- }
+ if (atomic_dec_and_test(&dio->ref))
+ iomap_dio_done(dio);
if (should_dirty) {
bio_check_pages_dirty(bio);
@@ -174,97 +228,187 @@ static void iomap_dio_bio_end_io(struct bio *bio)
bio_put(bio);
}
}
+EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
-static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
+u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend)
+{
+ struct iomap_dio *dio = ioend->io_bio.bi_private;
+ bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
+ u32 vec_count = ioend->io_bio.bi_vcnt;
+
+ if (ioend->io_error)
+ iomap_dio_set_error(dio, ioend->io_error);
+
+ if (atomic_dec_and_test(&dio->ref)) {
+ /*
+ * Try to avoid another context switch for the completion given
+ * that we are already called from the ioend completion
+ * workqueue.
+ */
+ dio->flags &= ~IOMAP_DIO_COMP_WORK;
+ iomap_dio_done(dio);
+ }
+
+ if (should_dirty) {
+ bio_check_pages_dirty(&ioend->io_bio);
+ } else {
+ bio_release_pages(&ioend->io_bio, false);
+ bio_put(&ioend->io_bio);
+ }
+
+ /*
+ * Return the number of bvecs completed as even direct I/O completions
+ * do significant per-folio work and we'll still want to give up the
+ * CPU after a lot of completions.
+ */
+ return vec_count;
+}
+
+static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
loff_t pos, unsigned len)
{
- struct page *page = ZERO_PAGE(0);
- int flags = REQ_SYNC | REQ_IDLE;
+ struct inode *inode = file_inode(dio->iocb->ki_filp);
struct bio *bio;
+ struct folio *zero_folio = largest_zero_folio();
+ int nr_vecs = max(1, i_blocksize(inode) / folio_size(zero_folio));
+
+ if (!len)
+ return 0;
+
+ /*
+ * This limit shall never be reached as most filesystems have a
+ * maximum blocksize of 64k.
+ */
+ if (WARN_ON_ONCE(nr_vecs > BIO_MAX_VECS))
+ return -EINVAL;
- bio = bio_alloc(GFP_KERNEL, 1);
- bio_set_dev(bio, iter->iomap.bdev);
+ bio = iomap_dio_alloc_bio(iter, dio, nr_vecs,
+ REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
+ fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
+ GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- get_page(page);
- __bio_add_page(bio, page, len, 0);
- bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
- iomap_dio_submit_bio(iter, dio, bio, pos);
-}
+ while (len > 0) {
+ unsigned int io_len = min(len, folio_size(zero_folio));
-/*
- * Figure out the bio's operation flags from the dio request, the
- * mapping, and whether or not we want FUA. Note that we can end up
- * clearing the WRITE_FUA flag in the dio request.
- */
-static inline unsigned int iomap_dio_bio_opflags(struct iomap_dio *dio,
- const struct iomap *iomap, bool use_fua)
-{
- unsigned int opflags = REQ_SYNC | REQ_IDLE;
-
- if (!(dio->flags & IOMAP_DIO_WRITE)) {
- WARN_ON_ONCE(iomap->flags & IOMAP_F_ZONE_APPEND);
- return REQ_OP_READ;
+ bio_add_folio_nofail(bio, zero_folio, io_len, 0);
+ len -= io_len;
}
+ iomap_dio_submit_bio(iter, dio, bio, pos);
- if (iomap->flags & IOMAP_F_ZONE_APPEND)
- opflags |= REQ_OP_ZONE_APPEND;
- else
- opflags |= REQ_OP_WRITE;
-
- if (use_fua)
- opflags |= REQ_FUA;
- else
- dio->flags &= ~IOMAP_DIO_WRITE_FUA;
-
- return opflags;
+ return 0;
}
-static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
- struct iomap_dio *dio)
+static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
{
const struct iomap *iomap = &iter->iomap;
struct inode *inode = iter->inode;
- unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
unsigned int fs_block_size = i_blocksize(inode), pad;
- unsigned int align = iov_iter_alignment(dio->submit.iter);
- loff_t length = iomap_length(iter);
+ const loff_t length = iomap_length(iter);
loff_t pos = iter->pos;
- unsigned int bio_opf;
+ blk_opf_t bio_opf = REQ_SYNC | REQ_IDLE;
struct bio *bio;
bool need_zeroout = false;
- bool use_fua = false;
int nr_pages, ret = 0;
- size_t copied = 0;
+ u64 copied = 0;
size_t orig_count;
+ unsigned int alignment;
+
+ /*
+ * File systems that write out of place and always allocate new blocks
+ * need each bio to be block aligned as that's the unit of allocation.
+ */
+ if (dio->flags & IOMAP_DIO_FSBLOCK_ALIGNED)
+ alignment = fs_block_size;
+ else
+ alignment = bdev_logical_block_size(iomap->bdev);
- if ((pos | length | align) & ((1 << blkbits) - 1))
+ if ((pos | length) & (alignment - 1))
return -EINVAL;
- if (iomap->type == IOMAP_UNWRITTEN) {
- dio->flags |= IOMAP_DIO_UNWRITTEN;
- need_zeroout = true;
- }
+ if (dio->flags & IOMAP_DIO_WRITE) {
+ bool need_completion_work = true;
- if (iomap->flags & IOMAP_F_SHARED)
- dio->flags |= IOMAP_DIO_COW;
+ switch (iomap->type) {
+ case IOMAP_MAPPED:
+ /*
+ * Directly mapped I/O does not inherently need to do
+ * work at I/O completion time. But there are various
+ * cases below where this will get set again.
+ */
+ need_completion_work = false;
+ break;
+ case IOMAP_UNWRITTEN:
+ dio->flags |= IOMAP_DIO_UNWRITTEN;
+ need_zeroout = true;
+ break;
+ default:
+ break;
+ }
+
+ if (iomap->flags & IOMAP_F_ATOMIC_BIO) {
+ /*
+ * Ensure that the mapping covers the full write
+ * length, otherwise it won't be submitted as a single
+ * bio, which is required to use hardware atomics.
+ */
+ if (length != iter->len)
+ return -EINVAL;
+ bio_opf |= REQ_ATOMIC;
+ }
+
+ if (iomap->flags & IOMAP_F_SHARED) {
+ /*
+ * Unsharing of needs to update metadata at I/O
+ * completion time.
+ */
+ need_completion_work = true;
+ dio->flags |= IOMAP_DIO_COW;
+ }
+
+ if (iomap->flags & IOMAP_F_NEW) {
+ /*
+ * Newly allocated blocks might need recording in
+ * metadata at I/O completion time.
+ */
+ need_completion_work = true;
+ need_zeroout = true;
+ }
+
+ /*
+ * Use a FUA write if we need datasync semantics and this is a
+ * pure overwrite that doesn't require any metadata updates.
+ *
+ * This allows us to avoid cache flushes on I/O completion.
+ */
+ if (dio->flags & IOMAP_DIO_WRITE_THROUGH) {
+ if (!need_completion_work &&
+ !(iomap->flags & IOMAP_F_DIRTY) &&
+ (!bdev_write_cache(iomap->bdev) ||
+ bdev_fua(iomap->bdev)))
+ bio_opf |= REQ_FUA;
+ else
+ dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
+ }
- if (iomap->flags & IOMAP_F_NEW) {
- need_zeroout = true;
- } else if (iomap->type == IOMAP_MAPPED) {
/*
- * Use a FUA write if we need datasync semantics, this is a pure
- * data IO that doesn't require any metadata updates (including
- * after IO completion such as unwritten extent conversion) and
- * the underlying device supports FUA. This allows us to avoid
- * cache flushes on IO completion.
+ * We can only do inline completion for pure overwrites that
+ * don't require additional I/O at completion time.
+ *
+ * This rules out writes that need zeroing or metdata updates to
+ * convert unwritten or shared extents.
+ *
+ * Writes that extend i_size are also not supported, but this is
+ * handled in __iomap_dio_rw().
*/
- if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
- (dio->flags & IOMAP_DIO_WRITE_FUA) &&
- blk_queue_fua(bdev_get_queue(iomap->bdev)))
- use_fua = true;
+ if (need_completion_work)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
+
+ bio_opf |= REQ_OP_WRITE;
+ } else {
+ bio_opf |= REQ_OP_READ;
}
/*
@@ -279,25 +423,21 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
goto out;
/*
- * We can only poll for single bio I/Os.
+ * The rules for polled IO completions follow the guidelines as the
+ * ones we set for inline and deferred completions. If none of those
+ * are available for this IO, clear the polled flag.
*/
- if (need_zeroout ||
- ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode)))
+ if (dio->flags & IOMAP_DIO_COMP_WORK)
dio->iocb->ki_flags &= ~IOCB_HIPRI;
if (need_zeroout) {
/* zero out from the start of the block to the write offset */
pad = pos & (fs_block_size - 1);
- if (pad)
- iomap_dio_zero(iter, dio, pos - pad, pad);
- }
- /*
- * Set the operation flags early so that bio_iov_iter_get_pages
- * can set up the page vector appropriately for a ZONE_APPEND
- * operation.
- */
- bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua);
+ ret = iomap_dio_zero(iter, dio, pos - pad, pad);
+ if (ret)
+ goto out;
+ }
nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
do {
@@ -308,16 +448,17 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
goto out;
}
- bio = bio_alloc(GFP_KERNEL, nr_pages);
- bio_set_dev(bio, iomap->bdev);
+ bio = iomap_dio_alloc_bio(iter, dio, nr_pages, bio_opf);
+ fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
+ GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
- bio->bi_write_hint = dio->iocb->ki_hint;
+ bio->bi_write_hint = inode->i_write_hint;
bio->bi_ioprio = dio->iocb->ki_ioprio;
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- bio->bi_opf = bio_opf;
- ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
+ ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
+ alignment - 1);
if (unlikely(ret)) {
/*
* We have to stop part way through an IO. We must fall
@@ -330,12 +471,21 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
}
n = bio->bi_iter.bi_size;
- if (dio->flags & IOMAP_DIO_WRITE) {
- task_io_account_write(n);
- } else {
- if (dio->flags & IOMAP_DIO_DIRTY)
- bio_set_pages_dirty(bio);
+ if (WARN_ON_ONCE((bio_opf & REQ_ATOMIC) && n != length)) {
+ /*
+ * An atomic write bio must cover the complete length,
+ * which it doesn't, so error. We may need to zero out
+ * the tail (complete FS block), similar to when
+ * bio_iov_iter_get_pages() returns an error, above.
+ */
+ ret = -EINVAL;
+ bio_put(bio);
+ goto zero_tail;
}
+ if (dio->flags & IOMAP_DIO_WRITE)
+ task_io_account_write(n);
+ else if (dio->flags & IOMAP_DIO_DIRTY)
+ bio_set_pages_dirty(bio);
dio->size += n;
copied += n;
@@ -363,36 +513,38 @@ zero_tail:
/* zero out from the end of the write to the end of the block */
pad = pos & (fs_block_size - 1);
if (pad)
- iomap_dio_zero(iter, dio, pos, fs_block_size - pad);
+ ret = iomap_dio_zero(iter, dio, pos,
+ fs_block_size - pad);
}
out:
/* Undo iter limitation to current extent */
iov_iter_reexpand(dio->submit.iter, orig_count - copied);
if (copied)
- return copied;
+ return iomap_iter_advance(iter, copied);
return ret;
}
-static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter,
- struct iomap_dio *dio)
+static int iomap_dio_hole_iter(struct iomap_iter *iter, struct iomap_dio *dio)
{
loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
dio->size += length;
if (!length)
return -EFAULT;
- return length;
+ return iomap_iter_advance(iter, length);
}
-static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
- struct iomap_dio *dio)
+static int iomap_dio_inline_iter(struct iomap_iter *iomi, struct iomap_dio *dio)
{
const struct iomap *iomap = &iomi->iomap;
struct iov_iter *iter = dio->submit.iter;
void *inline_data = iomap_inline_data(iomap, iomi->pos);
loff_t length = iomap_length(iomi);
loff_t pos = iomi->pos;
- size_t copied;
+ u64 copied;
+
+ if (WARN_ON_ONCE(!inline_data))
+ return -EIO;
if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap)))
return -EIO;
@@ -414,11 +566,10 @@ static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
dio->size += copied;
if (!copied)
return -EFAULT;
- return copied;
+ return iomap_iter_advance(iomi, copied);
}
-static loff_t iomap_dio_iter(const struct iomap_iter *iter,
- struct iomap_dio *dio)
+static int iomap_dio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
{
switch (iter->iomap.type) {
case IOMAP_HOLE:
@@ -472,21 +623,23 @@ static loff_t iomap_dio_iter(const struct iomap_iter *iter,
struct iomap_dio *
__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
- unsigned int dio_flags, size_t done_before)
+ unsigned int dio_flags, void *private, size_t done_before)
{
- struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = file_inode(iocb->ki_filp);
struct iomap_iter iomi = {
.inode = inode,
.pos = iocb->ki_pos,
.len = iov_iter_count(iter),
.flags = IOMAP_DIRECT,
+ .private = private,
};
- loff_t end = iomi.pos + iomi.len - 1, ret = 0;
bool wait_for_completion =
is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
struct blk_plug plug;
struct iomap_dio *dio;
+ loff_t ret = 0;
+
+ trace_iomap_dio_rw_begin(iocb, iter, dio_flags, done_before);
if (!iomi.len)
return NULL;
@@ -506,87 +659,98 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->submit.iter = iter;
dio->submit.waiter = current;
- dio->submit.poll_bio = NULL;
+
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ iomi.flags |= IOMAP_NOWAIT;
+
+ if (dio_flags & IOMAP_DIO_FSBLOCK_ALIGNED)
+ dio->flags |= IOMAP_DIO_FSBLOCK_ALIGNED;
if (iov_iter_rw(iter) == READ) {
if (iomi.pos >= dio->i_size)
goto out_free_dio;
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (filemap_range_needs_writeback(mapping, iomi.pos,
- end)) {
- ret = -EAGAIN;
- goto out_free_dio;
- }
- iomi.flags |= IOMAP_NOWAIT;
- }
-
- if (iter_is_iovec(iter))
+ if (user_backed_iter(iter))
dio->flags |= IOMAP_DIO_DIRTY;
+
+ ret = kiocb_write_and_wait(iocb, iomi.len);
+ if (ret)
+ goto out_free_dio;
} else {
iomi.flags |= IOMAP_WRITE;
dio->flags |= IOMAP_DIO_WRITE;
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (filemap_range_has_page(mapping, iomi.pos, end)) {
- ret = -EAGAIN;
+ if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
+ ret = -EAGAIN;
+ if (iomi.pos >= dio->i_size ||
+ iomi.pos + iomi.len > dio->i_size)
goto out_free_dio;
- }
- iomi.flags |= IOMAP_NOWAIT;
+ iomi.flags |= IOMAP_OVERWRITE_ONLY;
}
+ if (iocb->ki_flags & IOCB_ATOMIC)
+ iomi.flags |= IOMAP_ATOMIC;
+
/* for data sync or sync, we need sync completion processing */
- if (iocb->ki_flags & IOCB_DSYNC)
+ if (iocb_is_dsync(iocb)) {
dio->flags |= IOMAP_DIO_NEED_SYNC;
+ /*
+ * For datasync only writes, we optimistically try using
+ * WRITE_THROUGH for this IO. This flag requires either
+ * FUA writes through the device's write cache, or a
+ * normal write to a device without a volatile write
+ * cache. For the former, Any non-FUA write that occurs
+ * will clear this flag, hence we know before completion
+ * whether a cache flush is necessary.
+ */
+ if (!(iocb->ki_flags & IOCB_SYNC))
+ dio->flags |= IOMAP_DIO_WRITE_THROUGH;
+ }
+
/*
- * For datasync only writes, we optimistically try using FUA for
- * this IO. Any non-FUA write that occurs will clear this flag,
- * hence we know before completion whether a cache flush is
- * necessary.
+ * i_size updates must to happen from process context.
*/
- if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
- dio->flags |= IOMAP_DIO_WRITE_FUA;
- }
-
- if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
- ret = -EAGAIN;
- if (iomi.pos >= dio->i_size ||
- iomi.pos + iomi.len > dio->i_size)
- goto out_free_dio;
- iomi.flags |= IOMAP_OVERWRITE_ONLY;
- }
-
- ret = filemap_write_and_wait_range(mapping, iomi.pos, end);
- if (ret)
- goto out_free_dio;
+ if (iomi.pos + iomi.len > dio->i_size)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
- if (iov_iter_rw(iter) == WRITE) {
/*
* Try to invalidate cache pages for the range we are writing.
* If this invalidation fails, let the caller fall back to
* buffered I/O.
*/
- if (invalidate_inode_pages2_range(mapping,
- iomi.pos >> PAGE_SHIFT, end >> PAGE_SHIFT)) {
- trace_iomap_dio_invalidate_fail(inode, iomi.pos,
- iomi.len);
- ret = -ENOTBLK;
+ ret = kiocb_invalidate_pages(iocb, iomi.len);
+ if (ret) {
+ if (ret != -EAGAIN) {
+ trace_iomap_dio_invalidate_fail(inode, iomi.pos,
+ iomi.len);
+ if (iocb->ki_flags & IOCB_ATOMIC) {
+ /*
+ * folio invalidation failed, maybe
+ * this is transient, unlock and see if
+ * the caller tries again.
+ */
+ ret = -EAGAIN;
+ } else {
+ /* fall back to buffered write */
+ ret = -ENOTBLK;
+ }
+ }
goto out_free_dio;
}
+ }
- if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
- ret = sb_init_dio_done_wq(inode->i_sb);
- if (ret < 0)
- goto out_free_dio;
- }
+ if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
+ ret = sb_init_dio_done_wq(inode->i_sb);
+ if (ret < 0)
+ goto out_free_dio;
}
inode_dio_begin(inode);
blk_start_plug(&plug);
while ((ret = iomap_iter(&iomi, ops)) > 0) {
- iomi.processed = iomap_dio_iter(&iomi, dio);
+ iomi.status = iomap_dio_iter(&iomi, dio);
/*
* We can only poll for single bio I/Os.
@@ -619,13 +783,17 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomap_dio_set_error(dio, ret);
/*
- * If all the writes we issued were FUA, we don't need to flush the
- * cache on IO completion. Clear the sync flag for this case.
+ * If all the writes we issued were already written through to the
+ * media, we don't need to flush the cache on IO completion. Clear the
+ * sync flag for this case.
+ *
+ * Otherwise clear the inline completion flag if any sync work is
+ * needed, as that needs to be performed from process context.
*/
- if (dio->flags & IOMAP_DIO_WRITE_FUA)
+ if (dio->flags & IOMAP_DIO_WRITE_THROUGH)
dio->flags &= ~IOMAP_DIO_NEED_SYNC;
-
- WRITE_ONCE(iocb->private, dio->submit.poll_bio);
+ else if (dio->flags & IOMAP_DIO_NEED_SYNC)
+ dio->flags |= IOMAP_DIO_COMP_WORK;
/*
* We are about to drop our additional submission reference, which
@@ -644,17 +812,17 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
*/
dio->wait_for_completion = wait_for_completion;
if (!atomic_dec_and_test(&dio->ref)) {
- if (!wait_for_completion)
+ if (!wait_for_completion) {
+ trace_iomap_dio_rw_queued(inode, iomi.pos, iomi.len);
return ERR_PTR(-EIOCBQUEUED);
+ }
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!READ_ONCE(dio->submit.waiter))
break;
- if (!dio->submit.poll_bio ||
- !bio_poll(dio->submit.poll_bio, NULL, 0))
- blk_io_schedule();
+ blk_io_schedule();
}
__set_current_state(TASK_RUNNING);
}
@@ -672,11 +840,12 @@ EXPORT_SYMBOL_GPL(__iomap_dio_rw);
ssize_t
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
- unsigned int dio_flags, size_t done_before)
+ unsigned int dio_flags, void *private, size_t done_before)
{
struct iomap_dio *dio;
- dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, done_before);
+ dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private,
+ done_before);
if (IS_ERR_OR_NULL(dio))
return PTR_ERR_OR_ZERO(dio);
return iomap_dio_complete(dio);
diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c
index 66cf267c68ae..d11dadff8286 100644
--- a/fs/iomap/fiemap.c
+++ b/fs/iomap/fiemap.c
@@ -2,11 +2,9 @@
/*
* Copyright (c) 2016-2021 Christoph Hellwig.
*/
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
#include <linux/iomap.h>
#include <linux/fiemap.h>
+#include <linux/pagemap.h>
static int iomap_to_fiemap(struct fiemap_extent_info *fi,
const struct iomap *iomap, u32 flags)
@@ -38,24 +36,23 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
iomap->length, flags);
}
-static loff_t iomap_fiemap_iter(const struct iomap_iter *iter,
+static int iomap_fiemap_iter(struct iomap_iter *iter,
struct fiemap_extent_info *fi, struct iomap *prev)
{
int ret;
if (iter->iomap.type == IOMAP_HOLE)
- return iomap_length(iter);
+ goto advance;
ret = iomap_to_fiemap(fi, prev, 0);
*prev = iter->iomap;
- switch (ret) {
- case 0: /* success */
- return iomap_length(iter);
- case 1: /* extent array full */
- return 0;
- default: /* error */
+ if (ret < 0)
return ret;
- }
+ if (ret == 1) /* extent array full */
+ return 0;
+
+advance:
+ return iomap_iter_advance_full(iter);
}
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
@@ -77,7 +74,7 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
return ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_fiemap_iter(&iter, fi, &prev);
+ iter.status = iomap_fiemap_iter(&iter, fi, &prev);
if (prev.type != IOMAP_HOLE) {
ret = iomap_to_fiemap(fi, &prev, FIEMAP_EXTENT_LAST);
@@ -113,7 +110,7 @@ iomap_bmap(struct address_space *mapping, sector_t bno,
while ((ret = iomap_iter(&iter, ops)) > 0) {
if (iter.iomap.type == IOMAP_MAPPED)
bno = iomap_sector(&iter.iomap, iter.pos) >> blkshift;
- /* leave iter.processed unset to abort loop */
+ /* leave iter.status unset to abort loop */
}
if (ret)
return 0;
diff --git a/fs/iomap/internal.h b/fs/iomap/internal.h
new file mode 100644
index 000000000000..3a4e4aad2bd1
--- /dev/null
+++ b/fs/iomap/internal.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IOMAP_INTERNAL_H
+#define _IOMAP_INTERNAL_H 1
+
+#define IOEND_BATCH_SIZE 4096
+
+u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend);
+
+#ifdef CONFIG_BLOCK
+int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos, size_t len);
+#else
+static inline int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos, size_t len)
+{
+ WARN_ON_ONCE(1);
+ return -EIO;
+}
+#endif /* CONFIG_BLOCK */
+
+#endif /* _IOMAP_INTERNAL_H */
diff --git a/fs/iomap/ioend.c b/fs/iomap/ioend.c
new file mode 100644
index 000000000000..86f44922ed3b
--- /dev/null
+++ b/fs/iomap/ioend.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2025 Christoph Hellwig.
+ */
+#include <linux/iomap.h>
+#include <linux/list_sort.h>
+#include <linux/pagemap.h>
+#include <linux/writeback.h>
+#include "internal.h"
+#include "trace.h"
+
+struct bio_set iomap_ioend_bioset;
+EXPORT_SYMBOL_GPL(iomap_ioend_bioset);
+
+struct iomap_ioend *iomap_init_ioend(struct inode *inode,
+ struct bio *bio, loff_t file_offset, u16 ioend_flags)
+{
+ struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
+
+ atomic_set(&ioend->io_remaining, 1);
+ ioend->io_error = 0;
+ ioend->io_parent = NULL;
+ INIT_LIST_HEAD(&ioend->io_list);
+ ioend->io_flags = ioend_flags;
+ ioend->io_inode = inode;
+ ioend->io_offset = file_offset;
+ ioend->io_size = bio->bi_iter.bi_size;
+ ioend->io_sector = bio->bi_iter.bi_sector;
+ ioend->io_private = NULL;
+ return ioend;
+}
+EXPORT_SYMBOL_GPL(iomap_init_ioend);
+
+/*
+ * We're now finished for good with this ioend structure. Update the folio
+ * state, release holds on bios, and finally free up memory. Do not use the
+ * ioend after this.
+ */
+static u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
+{
+ struct inode *inode = ioend->io_inode;
+ struct bio *bio = &ioend->io_bio;
+ struct folio_iter fi;
+ u32 folio_count = 0;
+
+ if (ioend->io_error) {
+ mapping_set_error(inode->i_mapping, ioend->io_error);
+ if (!bio_flagged(bio, BIO_QUIET)) {
+ pr_err_ratelimited(
+"%s: writeback error on inode %lu, offset %lld, sector %llu",
+ inode->i_sb->s_id, inode->i_ino,
+ ioend->io_offset, ioend->io_sector);
+ }
+ }
+
+ /* walk all folios in bio, ending page IO on them */
+ bio_for_each_folio_all(fi, bio) {
+ iomap_finish_folio_write(inode, fi.folio, fi.length);
+ folio_count++;
+ }
+
+ bio_put(bio); /* frees the ioend */
+ return folio_count;
+}
+
+static void ioend_writeback_end_bio(struct bio *bio)
+{
+ struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
+
+ ioend->io_error = blk_status_to_errno(bio->bi_status);
+ iomap_finish_ioend_buffered(ioend);
+}
+
+/*
+ * We cannot cancel the ioend directly in case of an error, so call the bio end
+ * I/O handler with the error status here to run the normal I/O completion
+ * handler.
+ */
+int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error)
+{
+ struct iomap_ioend *ioend = wpc->wb_ctx;
+
+ if (!ioend->io_bio.bi_end_io)
+ ioend->io_bio.bi_end_io = ioend_writeback_end_bio;
+
+ if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE))
+ error = -EIO;
+
+ if (error) {
+ ioend->io_bio.bi_status = errno_to_blk_status(error);
+ bio_endio(&ioend->io_bio);
+ return error;
+ }
+
+ submit_bio(&ioend->io_bio);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_ioend_writeback_submit);
+
+static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
+ loff_t pos, u16 ioend_flags)
+{
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
+ REQ_OP_WRITE | wbc_to_write_flags(wpc->wbc),
+ GFP_NOFS, &iomap_ioend_bioset);
+ bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
+ bio->bi_write_hint = wpc->inode->i_write_hint;
+ wbc_init_bio(wpc->wbc, bio);
+ wpc->nr_folios = 0;
+ return iomap_init_ioend(wpc->inode, bio, pos, ioend_flags);
+}
+
+static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
+ u16 ioend_flags)
+{
+ struct iomap_ioend *ioend = wpc->wb_ctx;
+
+ if (ioend_flags & IOMAP_IOEND_BOUNDARY)
+ return false;
+ if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
+ (ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
+ return false;
+ if (pos != ioend->io_offset + ioend->io_size)
+ return false;
+ if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) &&
+ iomap_sector(&wpc->iomap, pos) != bio_end_sector(&ioend->io_bio))
+ return false;
+ /*
+ * Limit ioend bio chain lengths to minimise IO completion latency. This
+ * also prevents long tight loops ending page writeback on all the
+ * folios in the ioend.
+ */
+ if (wpc->nr_folios >= IOEND_BATCH_SIZE)
+ return false;
+ return true;
+}
+
+/*
+ * Test to see if we have an existing ioend structure that we could append to
+ * first; otherwise finish off the current ioend and start another.
+ *
+ * If a new ioend is created and cached, the old ioend is submitted to the block
+ * layer instantly. Batching optimisations are provided by higher level block
+ * plugging.
+ *
+ * At the end of a writeback pass, there will be a cached ioend remaining on the
+ * writepage context that the caller will need to submit.
+ */
+ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
+ loff_t pos, loff_t end_pos, unsigned int dirty_len)
+{
+ struct iomap_ioend *ioend = wpc->wb_ctx;
+ size_t poff = offset_in_folio(folio, pos);
+ unsigned int ioend_flags = 0;
+ unsigned int map_len = min_t(u64, dirty_len,
+ wpc->iomap.offset + wpc->iomap.length - pos);
+ int error;
+
+ trace_iomap_add_to_ioend(wpc->inode, pos, dirty_len, &wpc->iomap);
+
+ WARN_ON_ONCE(!folio->private && map_len < dirty_len);
+
+ switch (wpc->iomap.type) {
+ case IOMAP_INLINE:
+ WARN_ON_ONCE(1);
+ return -EIO;
+ case IOMAP_HOLE:
+ return map_len;
+ default:
+ break;
+ }
+
+ if (wpc->iomap.type == IOMAP_UNWRITTEN)
+ ioend_flags |= IOMAP_IOEND_UNWRITTEN;
+ if (wpc->iomap.flags & IOMAP_F_SHARED)
+ ioend_flags |= IOMAP_IOEND_SHARED;
+ if (folio_test_dropbehind(folio))
+ ioend_flags |= IOMAP_IOEND_DONTCACHE;
+ if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
+ ioend_flags |= IOMAP_IOEND_BOUNDARY;
+
+ if (!ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) {
+new_ioend:
+ if (ioend) {
+ error = wpc->ops->writeback_submit(wpc, 0);
+ if (error)
+ return error;
+ }
+ wpc->wb_ctx = ioend = iomap_alloc_ioend(wpc, pos, ioend_flags);
+ }
+
+ if (!bio_add_folio(&ioend->io_bio, folio, map_len, poff))
+ goto new_ioend;
+
+ /*
+ * Clamp io_offset and io_size to the incore EOF so that ondisk
+ * file size updates in the ioend completion are byte-accurate.
+ * This avoids recovering files with zeroed tail regions when
+ * writeback races with appending writes:
+ *
+ * Thread 1: Thread 2:
+ * ------------ -----------
+ * write [A, A+B]
+ * update inode size to A+B
+ * submit I/O [A, A+BS]
+ * write [A+B, A+B+C]
+ * update inode size to A+B+C
+ * <I/O completes, updates disk size to min(A+B+C, A+BS)>
+ * <power failure>
+ *
+ * After reboot:
+ * 1) with A+B+C < A+BS, the file has zero padding in range
+ * [A+B, A+B+C]
+ *
+ * |< Block Size (BS) >|
+ * |DDDDDDDDDDDD0000000000000|
+ * ^ ^ ^
+ * A A+B A+B+C
+ * (EOF)
+ *
+ * 2) with A+B+C > A+BS, the file has zero padding in range
+ * [A+B, A+BS]
+ *
+ * |< Block Size (BS) >|< Block Size (BS) >|
+ * |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
+ * ^ ^ ^ ^
+ * A A+B A+BS A+B+C
+ * (EOF)
+ *
+ * D = Valid Data
+ * 0 = Zero Padding
+ *
+ * Note that this defeats the ability to chain the ioends of
+ * appending writes.
+ */
+ ioend->io_size += map_len;
+ if (ioend->io_offset + ioend->io_size > end_pos)
+ ioend->io_size = end_pos - ioend->io_offset;
+
+ wbc_account_cgroup_owner(wpc->wbc, folio, map_len);
+ return map_len;
+}
+EXPORT_SYMBOL_GPL(iomap_add_to_ioend);
+
+static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
+{
+ if (ioend->io_parent) {
+ struct bio *bio = &ioend->io_bio;
+
+ ioend = ioend->io_parent;
+ bio_put(bio);
+ }
+
+ if (error)
+ cmpxchg(&ioend->io_error, 0, error);
+
+ if (!atomic_dec_and_test(&ioend->io_remaining))
+ return 0;
+ if (ioend->io_flags & IOMAP_IOEND_DIRECT)
+ return iomap_finish_ioend_direct(ioend);
+ return iomap_finish_ioend_buffered(ioend);
+}
+
+/*
+ * Ioend completion routine for merged bios. This can only be called from task
+ * contexts as merged ioends can be of unbound length. Hence we have to break up
+ * the writeback completions into manageable chunks to avoid long scheduler
+ * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
+ * good batch processing throughput without creating adverse scheduler latency
+ * conditions.
+ */
+void iomap_finish_ioends(struct iomap_ioend *ioend, int error)
+{
+ struct list_head tmp;
+ u32 completions;
+
+ might_sleep();
+
+ list_replace_init(&ioend->io_list, &tmp);
+ completions = iomap_finish_ioend(ioend, error);
+
+ while (!list_empty(&tmp)) {
+ if (completions > IOEND_BATCH_SIZE * 8) {
+ cond_resched();
+ completions = 0;
+ }
+ ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
+ list_del_init(&ioend->io_list);
+ completions += iomap_finish_ioend(ioend, error);
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_finish_ioends);
+
+/*
+ * We can merge two adjacent ioends if they have the same set of work to do.
+ */
+static bool iomap_ioend_can_merge(struct iomap_ioend *ioend,
+ struct iomap_ioend *next)
+{
+ if (ioend->io_bio.bi_status != next->io_bio.bi_status)
+ return false;
+ if (next->io_flags & IOMAP_IOEND_BOUNDARY)
+ return false;
+ if ((ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
+ (next->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
+ return false;
+ if (ioend->io_offset + ioend->io_size != next->io_offset)
+ return false;
+ /*
+ * Do not merge physically discontiguous ioends. The filesystem
+ * completion functions will have to iterate the physical
+ * discontiguities even if we merge the ioends at a logical level, so
+ * we don't gain anything by merging physical discontiguities here.
+ *
+ * We cannot use bio->bi_iter.bi_sector here as it is modified during
+ * submission so does not point to the start sector of the bio at
+ * completion.
+ */
+ if (ioend->io_sector + (ioend->io_size >> SECTOR_SHIFT) !=
+ next->io_sector)
+ return false;
+ return true;
+}
+
+void iomap_ioend_try_merge(struct iomap_ioend *ioend,
+ struct list_head *more_ioends)
+{
+ struct iomap_ioend *next;
+
+ INIT_LIST_HEAD(&ioend->io_list);
+
+ while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
+ io_list))) {
+ if (!iomap_ioend_can_merge(ioend, next))
+ break;
+ list_move_tail(&next->io_list, &ioend->io_list);
+ ioend->io_size += next->io_size;
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
+
+static int iomap_ioend_compare(void *priv, const struct list_head *a,
+ const struct list_head *b)
+{
+ struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
+ struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
+
+ if (ia->io_offset < ib->io_offset)
+ return -1;
+ if (ia->io_offset > ib->io_offset)
+ return 1;
+ return 0;
+}
+
+void iomap_sort_ioends(struct list_head *ioend_list)
+{
+ list_sort(NULL, ioend_list, iomap_ioend_compare);
+}
+EXPORT_SYMBOL_GPL(iomap_sort_ioends);
+
+/*
+ * Split up to the first @max_len bytes from @ioend if the ioend covers more
+ * than @max_len bytes.
+ *
+ * If @is_append is set, the split will be based on the hardware limits for
+ * REQ_OP_ZONE_APPEND commands and can be less than @max_len if the hardware
+ * limits don't allow the entire @max_len length.
+ *
+ * The bio embedded into @ioend must be a REQ_OP_WRITE because the block layer
+ * does not allow splitting REQ_OP_ZONE_APPEND bios. The file systems has to
+ * switch the operation after this call, but before submitting the bio.
+ */
+struct iomap_ioend *iomap_split_ioend(struct iomap_ioend *ioend,
+ unsigned int max_len, bool is_append)
+{
+ struct bio *bio = &ioend->io_bio;
+ struct iomap_ioend *split_ioend;
+ unsigned int nr_segs;
+ int sector_offset;
+ struct bio *split;
+
+ if (is_append) {
+ struct queue_limits *lim = bdev_limits(bio->bi_bdev);
+
+ max_len = min(max_len,
+ lim->max_zone_append_sectors << SECTOR_SHIFT);
+
+ sector_offset = bio_split_rw_at(bio, lim, &nr_segs, max_len);
+ if (unlikely(sector_offset < 0))
+ return ERR_PTR(sector_offset);
+ if (!sector_offset)
+ return NULL;
+ } else {
+ if (bio->bi_iter.bi_size <= max_len)
+ return NULL;
+ sector_offset = max_len >> SECTOR_SHIFT;
+ }
+
+ /* ensure the split ioend is still block size aligned */
+ sector_offset = ALIGN_DOWN(sector_offset << SECTOR_SHIFT,
+ i_blocksize(ioend->io_inode)) >> SECTOR_SHIFT;
+
+ split = bio_split(bio, sector_offset, GFP_NOFS, &iomap_ioend_bioset);
+ if (IS_ERR(split))
+ return ERR_CAST(split);
+ split->bi_private = bio->bi_private;
+ split->bi_end_io = bio->bi_end_io;
+
+ split_ioend = iomap_init_ioend(ioend->io_inode, split, ioend->io_offset,
+ ioend->io_flags);
+ split_ioend->io_parent = ioend;
+
+ atomic_inc(&ioend->io_remaining);
+ ioend->io_offset += split_ioend->io_size;
+ ioend->io_size -= split_ioend->io_size;
+
+ split_ioend->io_sector = ioend->io_sector;
+ if (!is_append)
+ ioend->io_sector += (split_ioend->io_size >> SECTOR_SHIFT);
+ return split_ioend;
+}
+EXPORT_SYMBOL_GPL(iomap_split_ioend);
+
+static int __init iomap_ioend_init(void)
+{
+ return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
+ offsetof(struct iomap_ioend, io_bio),
+ BIOSET_NEED_BVECS);
+}
+fs_initcall(iomap_ioend_init);
diff --git a/fs/iomap/iter.c b/fs/iomap/iter.c
index a1c7592d2ade..8692e5e41c6d 100644
--- a/fs/iomap/iter.c
+++ b/fs/iomap/iter.c
@@ -3,29 +3,30 @@
* Copyright (C) 2010 Red Hat, Inc.
* Copyright (c) 2016-2021 Christoph Hellwig.
*/
-#include <linux/fs.h>
#include <linux/iomap.h>
#include "trace.h"
-static inline int iomap_iter_advance(struct iomap_iter *iter)
+static inline void iomap_iter_reset_iomap(struct iomap_iter *iter)
{
- /* handle the previous iteration (if any) */
- if (iter->iomap.length) {
- if (iter->processed <= 0)
- return iter->processed;
- if (WARN_ON_ONCE(iter->processed > iomap_length(iter)))
- return -EIO;
- iter->pos += iter->processed;
- iter->len -= iter->processed;
- if (!iter->len)
- return 0;
+ if (iter->fbatch) {
+ folio_batch_release(iter->fbatch);
+ kfree(iter->fbatch);
+ iter->fbatch = NULL;
}
- /* clear the state for the next iteration */
- iter->processed = 0;
+ iter->status = 0;
memset(&iter->iomap, 0, sizeof(iter->iomap));
memset(&iter->srcmap, 0, sizeof(iter->srcmap));
- return 1;
+}
+
+/* Advance the current iterator position and decrement the remaining length */
+int iomap_iter_advance(struct iomap_iter *iter, u64 count)
+{
+ if (WARN_ON_ONCE(count > iomap_length(iter)))
+ return -EIO;
+ iter->pos += count;
+ iter->len -= count;
+ return 0;
}
static inline void iomap_iter_done(struct iomap_iter *iter)
@@ -33,6 +34,9 @@ static inline void iomap_iter_done(struct iomap_iter *iter)
WARN_ON_ONCE(iter->iomap.offset > iter->pos);
WARN_ON_ONCE(iter->iomap.length == 0);
WARN_ON_ONCE(iter->iomap.offset + iter->iomap.length <= iter->pos);
+ WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_STALE);
+
+ iter->iter_start_pos = iter->pos;
trace_iomap_iter_dstmap(iter->inode, &iter->iomap);
if (iter->srcmap.type != IOMAP_HOLE)
@@ -51,26 +55,58 @@ static inline void iomap_iter_done(struct iomap_iter *iter)
* function must be called in a loop that continues as long it returns a
* positive value. If 0 or a negative value is returned, the caller must not
* return to the loop body. Within a loop body, there are two ways to break out
- * of the loop body: leave @iter.processed unchanged, or set it to a negative
+ * of the loop body: leave @iter.status unchanged, or set it to a negative
* errno.
*/
int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops)
{
+ bool stale = iter->iomap.flags & IOMAP_F_STALE;
+ ssize_t advanced;
+ u64 olen;
int ret;
- if (iter->iomap.length && ops->iomap_end) {
- ret = ops->iomap_end(iter->inode, iter->pos, iomap_length(iter),
- iter->processed > 0 ? iter->processed : 0,
- iter->flags, &iter->iomap);
- if (ret < 0 && !iter->processed)
+ trace_iomap_iter(iter, ops, _RET_IP_);
+
+ if (!iter->iomap.length)
+ goto begin;
+
+ /*
+ * Calculate how far the iter was advanced and the original length bytes
+ * for ->iomap_end().
+ */
+ advanced = iter->pos - iter->iter_start_pos;
+ olen = iter->len + advanced;
+
+ if (ops->iomap_end) {
+ ret = ops->iomap_end(iter->inode, iter->iter_start_pos,
+ iomap_length_trim(iter, iter->iter_start_pos,
+ olen),
+ advanced, iter->flags, &iter->iomap);
+ if (ret < 0 && !advanced)
return ret;
}
- trace_iomap_iter(iter, ops, _RET_IP_);
- ret = iomap_iter_advance(iter);
+ /* detect old return semantics where this would advance */
+ if (WARN_ON_ONCE(iter->status > 0))
+ iter->status = -EIO;
+
+ /*
+ * Use iter->len to determine whether to continue onto the next mapping.
+ * Explicitly terminate on error status or if the current iter has not
+ * advanced at all (i.e. no work was done for some reason) unless the
+ * mapping has been marked stale and needs to be reprocessed.
+ */
+ if (iter->status < 0)
+ ret = iter->status;
+ else if (iter->len == 0 || (!advanced && !stale))
+ ret = 0;
+ else
+ ret = 1;
+ iomap_iter_reset_iomap(iter);
if (ret <= 0)
return ret;
+begin:
ret = ops->iomap_begin(iter->inode, iter->pos, iter->len, iter->flags,
&iter->iomap, &iter->srcmap);
if (ret < 0)
diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
index a845c012b50c..6cbc587c93da 100644
--- a/fs/iomap/seek.c
+++ b/fs/iomap/seek.c
@@ -3,14 +3,10 @@
* Copyright (C) 2017 Red Hat, Inc.
* Copyright (c) 2018-2021 Christoph Hellwig.
*/
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
#include <linux/iomap.h>
#include <linux/pagemap.h>
-#include <linux/pagevec.h>
-static loff_t iomap_seek_hole_iter(const struct iomap_iter *iter,
+static int iomap_seek_hole_iter(struct iomap_iter *iter,
loff_t *hole_pos)
{
loff_t length = iomap_length(iter);
@@ -20,13 +16,13 @@ static loff_t iomap_seek_hole_iter(const struct iomap_iter *iter,
*hole_pos = mapping_seek_hole_data(iter->inode->i_mapping,
iter->pos, iter->pos + length, SEEK_HOLE);
if (*hole_pos == iter->pos + length)
- return length;
+ return iomap_iter_advance(iter, length);
return 0;
case IOMAP_HOLE:
*hole_pos = iter->pos;
return 0;
default:
- return length;
+ return iomap_iter_advance(iter, length);
}
}
@@ -47,7 +43,7 @@ iomap_seek_hole(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
iter.len = size - pos;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_seek_hole_iter(&iter, &pos);
+ iter.status = iomap_seek_hole_iter(&iter, &pos);
if (ret < 0)
return ret;
if (iter.len) /* found hole before EOF */
@@ -56,19 +52,19 @@ iomap_seek_hole(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
}
EXPORT_SYMBOL_GPL(iomap_seek_hole);
-static loff_t iomap_seek_data_iter(const struct iomap_iter *iter,
+static int iomap_seek_data_iter(struct iomap_iter *iter,
loff_t *hole_pos)
{
loff_t length = iomap_length(iter);
switch (iter->iomap.type) {
case IOMAP_HOLE:
- return length;
+ return iomap_iter_advance(iter, length);
case IOMAP_UNWRITTEN:
*hole_pos = mapping_seek_hole_data(iter->inode->i_mapping,
iter->pos, iter->pos + length, SEEK_DATA);
if (*hole_pos < 0)
- return length;
+ return iomap_iter_advance(iter, length);
return 0;
default:
*hole_pos = iter->pos;
@@ -93,7 +89,7 @@ iomap_seek_data(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
iter.len = size - pos;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_seek_data_iter(&iter, &pos);
+ iter.status = iomap_seek_data_iter(&iter, &pos);
if (ret < 0)
return ret;
if (iter.len) /* found data before EOF */
diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
index 5fc0ac36dee3..0db77c449467 100644
--- a/fs/iomap/swapfile.c
+++ b/fs/iomap/swapfile.c
@@ -3,9 +3,6 @@
* Copyright (C) 2018 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <darrick.wong@oracle.com>
*/
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/fs.h>
#include <linux/iomap.h>
#include <linux/swap.h>
@@ -94,7 +91,7 @@ static int iomap_swapfile_fail(struct iomap_swapfile_info *isi, const char *str)
* swap only cares about contiguous page-aligned physical extents and makes no
* distinction between written and unwritten extents.
*/
-static loff_t iomap_swapfile_iter(const struct iomap_iter *iter,
+static int iomap_swapfile_iter(struct iomap_iter *iter,
struct iomap *iomap, struct iomap_swapfile_info *isi)
{
switch (iomap->type) {
@@ -132,7 +129,8 @@ static loff_t iomap_swapfile_iter(const struct iomap_iter *iter,
return error;
memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
}
- return iomap_length(iter);
+
+ return iomap_iter_advance_full(iter);
}
/*
@@ -166,7 +164,7 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
return ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_swapfile_iter(&iter, &iter.iomap, &isi);
+ iter.status = iomap_swapfile_iter(&iter, &iter.iomap, &isi);
if (ret < 0)
return ret;
@@ -189,7 +187,6 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
*pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
sis->max = isi.nr_pages;
sis->pages = isi.nr_pages - 1;
- sis->highest_bit = isi.nr_pages - 1;
return isi.nr_extents;
}
EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
index 65e39785c284..532787277b16 100644
--- a/fs/iomap/trace.h
+++ b/fs/iomap/trace.h
@@ -79,10 +79,12 @@ DECLARE_EVENT_CLASS(iomap_range_class,
DEFINE_EVENT(iomap_range_class, name, \
TP_PROTO(struct inode *inode, loff_t off, u64 len),\
TP_ARGS(inode, off, len))
-DEFINE_RANGE_EVENT(iomap_writepage);
-DEFINE_RANGE_EVENT(iomap_releasepage);
-DEFINE_RANGE_EVENT(iomap_invalidatepage);
+DEFINE_RANGE_EVENT(iomap_writeback_folio);
+DEFINE_RANGE_EVENT(iomap_release_folio);
+DEFINE_RANGE_EVENT(iomap_invalidate_folio);
DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
+DEFINE_RANGE_EVENT(iomap_dio_rw_queued);
+DEFINE_RANGE_EVENT(iomap_zero_iter);
#define IOMAP_TYPE_STRINGS \
{ IOMAP_HOLE, "HOLE" }, \
@@ -97,7 +99,12 @@ DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
{ IOMAP_REPORT, "REPORT" }, \
{ IOMAP_FAULT, "FAULT" }, \
{ IOMAP_DIRECT, "DIRECT" }, \
- { IOMAP_NOWAIT, "NOWAIT" }
+ { IOMAP_NOWAIT, "NOWAIT" }, \
+ { IOMAP_OVERWRITE_ONLY, "OVERWRITE_ONLY" }, \
+ { IOMAP_UNSHARE, "UNSHARE" }, \
+ { IOMAP_DAX, "DAX" }, \
+ { IOMAP_ATOMIC, "ATOMIC" }, \
+ { IOMAP_DONTCACHE, "DONTCACHE" }
#define IOMAP_F_FLAGS_STRINGS \
{ IOMAP_F_NEW, "NEW" }, \
@@ -105,7 +112,20 @@ DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
{ IOMAP_F_SHARED, "SHARED" }, \
{ IOMAP_F_MERGED, "MERGED" }, \
{ IOMAP_F_BUFFER_HEAD, "BH" }, \
- { IOMAP_F_SIZE_CHANGED, "SIZE_CHANGED" }
+ { IOMAP_F_XATTR, "XATTR" }, \
+ { IOMAP_F_BOUNDARY, "BOUNDARY" }, \
+ { IOMAP_F_ANON_WRITE, "ANON_WRITE" }, \
+ { IOMAP_F_ATOMIC_BIO, "ATOMIC_BIO" }, \
+ { IOMAP_F_PRIVATE, "PRIVATE" }, \
+ { IOMAP_F_SIZE_CHANGED, "SIZE_CHANGED" }, \
+ { IOMAP_F_STALE, "STALE" }
+
+
+#define IOMAP_DIO_STRINGS \
+ {IOMAP_DIO_FORCE_WAIT, "DIO_FORCE_WAIT" }, \
+ {IOMAP_DIO_OVERWRITE_ONLY, "DIO_OVERWRITE_ONLY" }, \
+ {IOMAP_DIO_PARTIAL, "DIO_PARTIAL" }, \
+ {IOMAP_DIO_FSBLOCK_ALIGNED, "DIO_FSBLOCK_ALIGNED" }
DECLARE_EVENT_CLASS(iomap_class,
TP_PROTO(struct inode *inode, struct iomap *iomap),
@@ -131,7 +151,7 @@ DECLARE_EVENT_CLASS(iomap_class,
__entry->bdev = iomap->bdev ? iomap->bdev->bd_dev : 0;
),
TP_printk("dev %d:%d ino 0x%llx bdev %d:%d addr 0x%llx offset 0x%llx "
- "length 0x%llx type %s flags %s",
+ "length 0x%llx type %s (0x%x) flags %s (0x%x)",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
MAJOR(__entry->bdev), MINOR(__entry->bdev),
@@ -139,7 +159,9 @@ DECLARE_EVENT_CLASS(iomap_class,
__entry->offset,
__entry->length,
__print_symbolic(__entry->type, IOMAP_TYPE_STRINGS),
- __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS))
+ __entry->type,
+ __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS),
+ __entry->flags)
)
#define DEFINE_IOMAP_EVENT(name) \
@@ -149,6 +171,50 @@ DEFINE_EVENT(iomap_class, name, \
DEFINE_IOMAP_EVENT(iomap_iter_dstmap);
DEFINE_IOMAP_EVENT(iomap_iter_srcmap);
+TRACE_EVENT(iomap_add_to_ioend,
+ TP_PROTO(struct inode *inode, u64 pos, unsigned int dirty_len,
+ struct iomap *iomap),
+ TP_ARGS(inode, pos, dirty_len, iomap),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, ino)
+ __field(u64, pos)
+ __field(u64, dirty_len)
+ __field(u64, addr)
+ __field(loff_t, offset)
+ __field(u64, length)
+ __field(u16, type)
+ __field(u16, flags)
+ __field(dev_t, bdev)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pos = pos;
+ __entry->dirty_len = dirty_len;
+ __entry->addr = iomap->addr;
+ __entry->offset = iomap->offset;
+ __entry->length = iomap->length;
+ __entry->type = iomap->type;
+ __entry->flags = iomap->flags;
+ __entry->bdev = iomap->bdev ? iomap->bdev->bd_dev : 0;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx bdev %d:%d pos 0x%llx dirty len 0x%llx "
+ "addr 0x%llx offset 0x%llx length 0x%llx type %s (0x%x) flags %s (0x%x)",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ MAJOR(__entry->bdev), MINOR(__entry->bdev),
+ __entry->pos,
+ __entry->dirty_len,
+ __entry->addr,
+ __entry->offset,
+ __entry->length,
+ __print_symbolic(__entry->type, IOMAP_TYPE_STRINGS),
+ __entry->type,
+ __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS),
+ __entry->flags)
+);
+
TRACE_EVENT(iomap_iter,
TP_PROTO(struct iomap_iter *iter, const void *ops,
unsigned long caller),
@@ -158,6 +224,7 @@ TRACE_EVENT(iomap_iter,
__field(u64, ino)
__field(loff_t, pos)
__field(u64, length)
+ __field(int, status)
__field(unsigned int, flags)
__field(const void *, ops)
__field(unsigned long, caller)
@@ -167,21 +234,95 @@ TRACE_EVENT(iomap_iter,
__entry->ino = iter->inode->i_ino;
__entry->pos = iter->pos;
__entry->length = iomap_length(iter);
+ __entry->status = iter->status;
__entry->flags = iter->flags;
__entry->ops = ops;
__entry->caller = caller;
),
- TP_printk("dev %d:%d ino 0x%llx pos 0x%llx length 0x%llx flags %s (0x%x) ops %ps caller %pS",
+ TP_printk("dev %d:%d ino 0x%llx pos 0x%llx length 0x%llx status %d flags %s (0x%x) ops %ps caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->pos,
__entry->length,
+ __entry->status,
__print_flags(__entry->flags, "|", IOMAP_FLAGS_STRINGS),
__entry->flags,
__entry->ops,
(void *)__entry->caller)
);
+TRACE_EVENT(iomap_dio_rw_begin,
+ TP_PROTO(struct kiocb *iocb, struct iov_iter *iter,
+ unsigned int dio_flags, size_t done_before),
+ TP_ARGS(iocb, iter, dio_flags, done_before),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, isize)
+ __field(loff_t, pos)
+ __field(size_t, count)
+ __field(size_t, done_before)
+ __field(int, ki_flags)
+ __field(unsigned int, dio_flags)
+ __field(bool, aio)
+ ),
+ TP_fast_assign(
+ __entry->dev = file_inode(iocb->ki_filp)->i_sb->s_dev;
+ __entry->ino = file_inode(iocb->ki_filp)->i_ino;
+ __entry->isize = file_inode(iocb->ki_filp)->i_size;
+ __entry->pos = iocb->ki_pos;
+ __entry->count = iov_iter_count(iter);
+ __entry->done_before = done_before;
+ __entry->ki_flags = iocb->ki_flags;
+ __entry->dio_flags = dio_flags;
+ __entry->aio = !is_sync_kiocb(iocb);
+ ),
+ TP_printk("dev %d:%d ino 0x%lx size 0x%llx offset 0x%llx length 0x%zx done_before 0x%zx flags %s dio_flags %s aio %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->isize,
+ __entry->pos,
+ __entry->count,
+ __entry->done_before,
+ __print_flags(__entry->ki_flags, "|", TRACE_IOCB_STRINGS),
+ __print_flags(__entry->dio_flags, "|", IOMAP_DIO_STRINGS),
+ __entry->aio)
+);
+
+TRACE_EVENT(iomap_dio_complete,
+ TP_PROTO(struct kiocb *iocb, int error, ssize_t ret),
+ TP_ARGS(iocb, error, ret),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, isize)
+ __field(loff_t, pos)
+ __field(int, ki_flags)
+ __field(bool, aio)
+ __field(int, error)
+ __field(ssize_t, ret)
+ ),
+ TP_fast_assign(
+ __entry->dev = file_inode(iocb->ki_filp)->i_sb->s_dev;
+ __entry->ino = file_inode(iocb->ki_filp)->i_ino;
+ __entry->isize = file_inode(iocb->ki_filp)->i_size;
+ __entry->pos = iocb->ki_pos;
+ __entry->ki_flags = iocb->ki_flags;
+ __entry->aio = !is_sync_kiocb(iocb);
+ __entry->error = error;
+ __entry->ret = ret;
+ ),
+ TP_printk("dev %d:%d ino 0x%lx size 0x%llx offset 0x%llx flags %s aio %d error %d ret %zd",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->isize,
+ __entry->pos,
+ __print_flags(__entry->ki_flags, "|", TRACE_IOCB_STRINGS),
+ __entry->aio,
+ __entry->error,
+ __entry->ret)
+);
+
#endif /* _IOMAP_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/isofs/Kconfig b/fs/isofs/Kconfig
index 08ffd37b9bb8..51434f2a471b 100644
--- a/fs/isofs/Kconfig
+++ b/fs/isofs/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config ISO9660_FS
tristate "ISO 9660 CDROM file system support"
+ select BUFFER_HEAD
help
This is the standard file system used on CD-ROMs. It was previously
known as "High Sierra File System" and is called "hsfs" on other
diff --git a/fs/isofs/Makefile b/fs/isofs/Makefile
index 6498fd2b0f60..b25bc542a22b 100644
--- a/fs/isofs/Makefile
+++ b/fs/isofs/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_ISO9660_FS) += isofs.o
-isofs-objs-y := namei.o inode.o dir.o util.o rock.o export.o
-isofs-objs-$(CONFIG_JOLIET) += joliet.o
-isofs-objs-$(CONFIG_ZISOFS) += compress.o
-isofs-objs := $(isofs-objs-y)
+isofs-y := namei.o inode.o dir.o util.o rock.o export.o
+isofs-$(CONFIG_JOLIET) += joliet.o
+isofs-$(CONFIG_ZISOFS) += compress.o
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index bc12ac7e2312..5f3b6da0e022 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -67,8 +67,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
for ( i = 0 ; i < pcount ; i++ ) {
if (!pages[i])
continue;
- memset(page_address(pages[i]), 0, PAGE_SIZE);
- flush_dcache_page(pages[i]);
+ memzero_page(pages[i], 0, PAGE_SIZE);
SetPageUptodate(pages[i]);
}
return ((loff_t)pcount) << PAGE_SHIFT;
@@ -82,7 +81,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
return 0;
}
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
- ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
+ bh_read_batch(haveblocks, bhs);
curbh = 0;
curpage = 0;
@@ -120,7 +119,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
zerr != Z_STREAM_END) {
if (!stream.avail_out) {
if (pages[curpage]) {
- stream.next_out = page_address(pages[curpage])
+ stream.next_out = kmap_local_page(pages[curpage])
+ poffset;
stream.avail_out = PAGE_SIZE - poffset;
poffset = 0;
@@ -176,6 +175,10 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
flush_dcache_page(pages[curpage]);
SetPageUptodate(pages[curpage]);
}
+ if (stream.next_out != (unsigned char *)zisofs_sink_page) {
+ kunmap_local(stream.next_out);
+ stream.next_out = NULL;
+ }
curpage++;
}
if (!stream.avail_in)
@@ -183,6 +186,8 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
}
inflate_out:
zlib_inflateEnd(&stream);
+ if (stream.next_out && stream.next_out != (unsigned char *)zisofs_sink_page)
+ kunmap_local(stream.next_out);
z_eio:
mutex_unlock(&zisofs_zlib_lock);
@@ -283,9 +288,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
}
if (poffset && *pages) {
- memset(page_address(*pages) + poffset, 0,
- PAGE_SIZE - poffset);
- flush_dcache_page(*pages);
+ memzero_page(*pages, poffset, PAGE_SIZE - poffset);
SetPageUptodate(*pages);
}
return 0;
@@ -296,7 +299,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
* per reference. We inject the additional pages into the page
* cache as a form of readahead.
*/
-static int zisofs_readpage(struct file *file, struct page *page)
+static int zisofs_read_folio(struct file *file, struct folio *folio)
{
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
@@ -307,16 +310,15 @@ static int zisofs_readpage(struct file *file, struct page *page)
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
struct page **pages;
- pgoff_t index = page->index, end_index;
+ pgoff_t index = folio->index, end_index;
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
/*
- * If this page is wholly outside i_size we just return zero;
+ * If this folio is wholly outside i_size we just return zero;
* do_generic_file_read() will handle this for us
*/
if (index >= end_index) {
- SetPageUptodate(page);
- unlock_page(page);
+ folio_end_read(folio, true);
return 0;
}
@@ -334,18 +336,14 @@ static int zisofs_readpage(struct file *file, struct page *page)
pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
sizeof(*pages), GFP_KERNEL);
if (!pages) {
- unlock_page(page);
+ folio_unlock(folio);
return -ENOMEM;
}
- pages[full_page] = page;
+ pages[full_page] = &folio->page;
for (i = 0; i < pcount; i++, index++) {
if (i != full_page)
pages[i] = grab_cache_page_nowait(mapping, index);
- if (pages[i]) {
- ClearPageError(pages[i]);
- kmap(pages[i]);
- }
}
err = zisofs_fill_pages(inode, full_page, pcount, pages);
@@ -354,9 +352,6 @@ static int zisofs_readpage(struct file *file, struct page *page)
for (i = 0; i < pcount; i++) {
if (pages[i]) {
flush_dcache_page(pages[i]);
- if (i == full_page && err)
- SetPageError(pages[i]);
- kunmap(pages[i]);
unlock_page(pages[i]);
if (i != full_page)
put_page(pages[i]);
@@ -369,7 +364,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
}
const struct address_space_operations zisofs_aops = {
- .readpage = zisofs_readpage,
+ .read_folio = zisofs_read_folio,
/* No bmap operation supported */
};
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index eb2f8273e6f1..09df40b612fb 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -147,7 +147,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
de = tmpde;
}
/* Basic sanity check, whether name doesn't exceed dir entry */
- if (de_len < de->name_len[0] +
+ if (de_len < sizeof(struct iso_directory_record) ||
+ de_len < de->name_len[0] +
sizeof(struct iso_directory_record)) {
printk(KERN_NOTICE "iso9660: Corrupted directory entry"
" in block %lu of inode %lu\n", block,
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index 35768a63fb1d..421d247fae52 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -180,7 +180,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb,
return NULL;
return isofs_export_iget(sb,
- fh_len > 2 ? ifid->parent_block : 0,
+ fh_len > 3 ? ifid->parent_block : 0,
ifid->parent_offset,
fh_len > 4 ? ifid->parent_generation : 0);
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 0c6eacfcbeef..b7cbe126faf3 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -21,11 +21,12 @@
#include <linux/ctype.h>
#include <linux/statfs.h>
#include <linux/cdrom.h>
-#include <linux/parser.h>
#include <linux/mpage.h>
#include <linux/user_namespace.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include "isofs.h"
#include "zisofs.h"
@@ -70,7 +71,7 @@ static struct kmem_cache *isofs_inode_cachep;
static struct inode *isofs_alloc_inode(struct super_block *sb)
{
struct iso_inode_info *ei;
- ei = kmem_cache_alloc(isofs_inode_cachep, GFP_KERNEL);
+ ei = alloc_inode_sb(sb, isofs_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
return &ei->vfs_inode;
@@ -93,7 +94,7 @@ static int __init init_inodecache(void)
isofs_inode_cachep = kmem_cache_create("isofs_inode_cache",
sizeof(struct iso_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
if (!isofs_inode_cachep)
return -ENOMEM;
@@ -110,10 +111,10 @@ static void destroy_inodecache(void)
kmem_cache_destroy(isofs_inode_cachep);
}
-static int isofs_remount(struct super_block *sb, int *flags, char *data)
+static int isofs_reconfigure(struct fs_context *fc)
{
- sync_filesystem(sb);
- if (!(*flags & SB_RDONLY))
+ sync_filesystem(fc->root->d_sb);
+ if (!(fc->sb_flags & SB_RDONLY))
return -EROFS;
return 0;
}
@@ -123,7 +124,6 @@ static const struct super_operations isofs_sops = {
.free_inode = isofs_free_inode,
.put_super = isofs_put_super,
.statfs = isofs_statfs,
- .remount_fs = isofs_remount,
.show_options = isofs_show_options,
};
@@ -145,7 +145,7 @@ static const struct dentry_operations isofs_dentry_ops[] = {
#endif
};
-struct iso9660_options{
+struct isofs_options{
unsigned int rock:1;
unsigned int joliet:1;
unsigned int cruft:1;
@@ -289,197 +289,153 @@ isofs_dentry_cmpi_ms(const struct dentry *dentry,
#endif
enum {
- Opt_block, Opt_check_r, Opt_check_s, Opt_cruft, Opt_gid, Opt_ignore,
- Opt_iocharset, Opt_map_a, Opt_map_n, Opt_map_o, Opt_mode, Opt_nojoliet,
- Opt_norock, Opt_sb, Opt_session, Opt_uid, Opt_unhide, Opt_utf8, Opt_err,
- Opt_nocompress, Opt_hide, Opt_showassoc, Opt_dmode, Opt_overriderockperm,
+ Opt_block, Opt_check, Opt_cruft, Opt_gid, Opt_ignore, Opt_iocharset,
+ Opt_map, Opt_mode, Opt_nojoliet, Opt_norock, Opt_sb, Opt_session,
+ Opt_uid, Opt_unhide, Opt_utf8, Opt_err, Opt_nocompress, Opt_hide,
+ Opt_showassoc, Opt_dmode, Opt_overriderockperm,
};
-static const match_table_t tokens = {
- {Opt_norock, "norock"},
- {Opt_nojoliet, "nojoliet"},
- {Opt_unhide, "unhide"},
- {Opt_hide, "hide"},
- {Opt_showassoc, "showassoc"},
- {Opt_cruft, "cruft"},
- {Opt_utf8, "utf8"},
- {Opt_iocharset, "iocharset=%s"},
- {Opt_map_a, "map=acorn"},
- {Opt_map_a, "map=a"},
- {Opt_map_n, "map=normal"},
- {Opt_map_n, "map=n"},
- {Opt_map_o, "map=off"},
- {Opt_map_o, "map=o"},
- {Opt_session, "session=%u"},
- {Opt_sb, "sbsector=%u"},
- {Opt_check_r, "check=relaxed"},
- {Opt_check_r, "check=r"},
- {Opt_check_s, "check=strict"},
- {Opt_check_s, "check=s"},
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_mode, "mode=%u"},
- {Opt_dmode, "dmode=%u"},
- {Opt_overriderockperm, "overriderockperm"},
- {Opt_block, "block=%u"},
- {Opt_ignore, "conv=binary"},
- {Opt_ignore, "conv=b"},
- {Opt_ignore, "conv=text"},
- {Opt_ignore, "conv=t"},
- {Opt_ignore, "conv=mtext"},
- {Opt_ignore, "conv=m"},
- {Opt_ignore, "conv=auto"},
- {Opt_ignore, "conv=a"},
- {Opt_nocompress, "nocompress"},
- {Opt_err, NULL}
+static const struct constant_table isofs_param_map[] = {
+ {"acorn", 'a'},
+ {"a", 'a'},
+ {"normal", 'n'},
+ {"n", 'n'},
+ {"off", 'o'},
+ {"o", 'o'},
+ {}
};
-static int parse_options(char *options, struct iso9660_options *popt)
-{
- char *p;
- int option;
- unsigned int uv;
-
- popt->map = 'n';
- popt->rock = 1;
- popt->joliet = 1;
- popt->cruft = 0;
- popt->hide = 0;
- popt->showassoc = 0;
- popt->check = 'u'; /* unset */
- popt->nocompress = 0;
- popt->blocksize = 1024;
- popt->fmode = popt->dmode = ISOFS_INVALID_MODE;
- popt->uid_set = 0;
- popt->gid_set = 0;
- popt->gid = GLOBAL_ROOT_GID;
- popt->uid = GLOBAL_ROOT_UID;
- popt->iocharset = NULL;
- popt->overriderockperm = 0;
- popt->session=-1;
- popt->sbsector=-1;
- if (!options)
- return 1;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
- substring_t args[MAX_OPT_ARGS];
- unsigned n;
-
- if (!*p)
- continue;
+static const struct constant_table isofs_param_check[] = {
+ {"relaxed", 'r'},
+ {"r", 'r'},
+ {"strict", 's'},
+ {"s", 's'},
+ {}
+};
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_norock:
- popt->rock = 0;
- break;
- case Opt_nojoliet:
- popt->joliet = 0;
- break;
- case Opt_hide:
- popt->hide = 1;
- break;
- case Opt_unhide:
- case Opt_showassoc:
- popt->showassoc = 1;
- break;
- case Opt_cruft:
- popt->cruft = 1;
- break;
+static const struct fs_parameter_spec isofs_param_spec[] = {
+ fsparam_flag ("norock", Opt_norock),
+ fsparam_flag ("nojoliet", Opt_nojoliet),
+ fsparam_flag ("unhide", Opt_unhide),
+ fsparam_flag ("hide", Opt_hide),
+ fsparam_flag ("showassoc", Opt_showassoc),
+ fsparam_flag ("cruft", Opt_cruft),
+ fsparam_flag ("utf8", Opt_utf8),
+ fsparam_string ("iocharset", Opt_iocharset),
+ fsparam_enum ("map", Opt_map, isofs_param_map),
+ fsparam_u32 ("session", Opt_session),
+ fsparam_u32 ("sbsector", Opt_sb),
+ fsparam_enum ("check", Opt_check, isofs_param_check),
+ fsparam_uid ("uid", Opt_uid),
+ fsparam_gid ("gid", Opt_gid),
+ /* Note: mode/dmode historically accepted %u not strictly %o */
+ fsparam_u32 ("mode", Opt_mode),
+ fsparam_u32 ("dmode", Opt_dmode),
+ fsparam_flag ("overriderockperm", Opt_overriderockperm),
+ fsparam_u32 ("block", Opt_block),
+ fsparam_string ("conv", Opt_ignore),
+ fsparam_flag ("nocompress", Opt_nocompress),
+ {}
+};
+
+static int isofs_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct isofs_options *popt = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
+ unsigned int n;
+
+ /* There are no remountable options */
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE)
+ return 0;
+
+ opt = fs_parse(fc, isofs_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_norock:
+ popt->rock = 0;
+ break;
+ case Opt_nojoliet:
+ popt->joliet = 0;
+ break;
+ case Opt_hide:
+ popt->hide = 1;
+ break;
+ case Opt_unhide:
+ case Opt_showassoc:
+ popt->showassoc = 1;
+ break;
+ case Opt_cruft:
+ popt->cruft = 1;
+ break;
#ifdef CONFIG_JOLIET
- case Opt_utf8:
- kfree(popt->iocharset);
- popt->iocharset = kstrdup("utf8", GFP_KERNEL);
- if (!popt->iocharset)
- return 0;
- break;
- case Opt_iocharset:
- kfree(popt->iocharset);
- popt->iocharset = match_strdup(&args[0]);
- if (!popt->iocharset)
- return 0;
- break;
+ case Opt_utf8:
+ kfree(popt->iocharset);
+ popt->iocharset = kstrdup("utf8", GFP_KERNEL);
+ if (!popt->iocharset)
+ return -ENOMEM;
+ break;
+ case Opt_iocharset:
+ kfree(popt->iocharset);
+ popt->iocharset = kstrdup(param->string, GFP_KERNEL);
+ if (!popt->iocharset)
+ return -ENOMEM;
+ break;
#endif
- case Opt_map_a:
- popt->map = 'a';
- break;
- case Opt_map_o:
- popt->map = 'o';
- break;
- case Opt_map_n:
- popt->map = 'n';
- break;
- case Opt_session:
- if (match_int(&args[0], &option))
- return 0;
- n = option;
- /*
- * Track numbers are supposed to be in range 1-99, the
- * mount option starts indexing at 0.
- */
- if (n >= 99)
- return 0;
- popt->session = n + 1;
- break;
- case Opt_sb:
- if (match_int(&args[0], &option))
- return 0;
- popt->sbsector = option;
- break;
- case Opt_check_r:
- popt->check = 'r';
- break;
- case Opt_check_s:
- popt->check = 's';
- break;
- case Opt_ignore:
- break;
- case Opt_uid:
- if (match_uint(&args[0], &uv))
- return 0;
- popt->uid = make_kuid(current_user_ns(), uv);
- if (!uid_valid(popt->uid))
- return 0;
- popt->uid_set = 1;
- break;
- case Opt_gid:
- if (match_uint(&args[0], &uv))
- return 0;
- popt->gid = make_kgid(current_user_ns(), uv);
- if (!gid_valid(popt->gid))
- return 0;
- popt->gid_set = 1;
- break;
- case Opt_mode:
- if (match_int(&args[0], &option))
- return 0;
- popt->fmode = option;
- break;
- case Opt_dmode:
- if (match_int(&args[0], &option))
- return 0;
- popt->dmode = option;
- break;
- case Opt_overriderockperm:
- popt->overriderockperm = 1;
- break;
- case Opt_block:
- if (match_int(&args[0], &option))
- return 0;
- n = option;
- if (n != 512 && n != 1024 && n != 2048)
- return 0;
- popt->blocksize = n;
- break;
- case Opt_nocompress:
- popt->nocompress = 1;
- break;
- default:
- return 0;
- }
+ case Opt_map:
+ popt->map = result.uint_32;
+ break;
+ case Opt_session:
+ n = result.uint_32;
+ /*
+ * Track numbers are supposed to be in range 1-99, the
+ * mount option starts indexing at 0.
+ */
+ if (n >= 99)
+ return -EINVAL;
+ popt->session = n + 1;
+ break;
+ case Opt_sb:
+ popt->sbsector = result.uint_32;
+ break;
+ case Opt_check:
+ popt->check = result.uint_32;
+ break;
+ case Opt_ignore:
+ break;
+ case Opt_uid:
+ popt->uid = result.uid;
+ popt->uid_set = 1;
+ break;
+ case Opt_gid:
+ popt->gid = result.gid;
+ popt->gid_set = 1;
+ break;
+ case Opt_mode:
+ popt->fmode = result.uint_32;
+ break;
+ case Opt_dmode:
+ popt->dmode = result.uint_32;
+ break;
+ case Opt_overriderockperm:
+ popt->overriderockperm = 1;
+ break;
+ case Opt_block:
+ n = result.uint_32;
+ if (n != 512 && n != 1024 && n != 2048)
+ return -EINVAL;
+ popt->blocksize = n;
+ break;
+ case Opt_nocompress:
+ popt->nocompress = 1;
+ break;
+ default:
+ return -EINVAL;
}
- return 1;
+ return 0;
}
/*
@@ -615,7 +571,7 @@ static bool rootdir_empty(struct super_block *sb, unsigned long block)
/*
* Initialize the superblock and read the root inode.
*/
-static int isofs_fill_super(struct super_block *s, void *data, int silent)
+static int isofs_fill_super(struct super_block *s, struct fs_context *fc)
{
struct buffer_head *bh = NULL, *pri_bh = NULL;
struct hs_primary_descriptor *h_pri = NULL;
@@ -623,7 +579,7 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
struct iso_supplementary_descriptor *sec = NULL;
struct iso_directory_record *rootp;
struct inode *inode;
- struct iso9660_options opt;
+ struct isofs_options *opt = fc->fs_private;
struct isofs_sb_info *sbi;
unsigned long first_data_zone;
int joliet_level = 0;
@@ -631,15 +587,13 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
int orig_zonesize;
int table, error = -EINVAL;
unsigned int vol_desc_start;
+ int silent = fc->sb_flags & SB_SILENT;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
s->s_fs_info = sbi;
- if (!parse_options((char *)data, &opt))
- goto out_freesbi;
-
/*
* First of all, get the hardware blocksize for this device.
* If we don't know what it is, or the hardware blocksize is
@@ -655,14 +609,19 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
bdev_logical_block_size(s->s_bdev));
goto out_freesbi;
}
- opt.blocksize = sb_min_blocksize(s, opt.blocksize);
+ opt->blocksize = sb_min_blocksize(s, opt->blocksize);
+ if (!opt->blocksize) {
+ printk(KERN_ERR
+ "ISOFS: unable to set blocksize\n");
+ goto out_freesbi;
+ }
sbi->s_high_sierra = 0; /* default is iso9660 */
- sbi->s_session = opt.session;
- sbi->s_sbsector = opt.sbsector;
+ sbi->s_session = opt->session;
+ sbi->s_sbsector = opt->sbsector;
- vol_desc_start = (opt.sbsector != -1) ?
- opt.sbsector : isofs_get_last_session(s,opt.session);
+ vol_desc_start = (opt->sbsector != -1) ?
+ opt->sbsector : isofs_get_last_session(s, opt->session);
for (iso_blknum = vol_desc_start+16;
iso_blknum < vol_desc_start+100; iso_blknum++) {
@@ -696,7 +655,7 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
else if (isonum_711(vdp->type) == ISO_VD_SUPPLEMENTARY) {
sec = (struct iso_supplementary_descriptor *)vdp;
if (sec->escape[0] == 0x25 && sec->escape[1] == 0x2f) {
- if (opt.joliet) {
+ if (opt->joliet) {
if (sec->escape[2] == 0x40)
joliet_level = 1;
else if (sec->escape[2] == 0x43)
@@ -721,7 +680,7 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
goto out_freebh;
sbi->s_high_sierra = 1;
- opt.rock = 0;
+ opt->rock = 0;
h_pri = (struct hs_primary_descriptor *)vdp;
goto root_found;
}
@@ -749,7 +708,7 @@ root_found:
goto out_freebh;
}
- if (joliet_level && (!pri || !opt.rock)) {
+ if (joliet_level && (!pri || !opt->rock)) {
/* This is the case of Joliet with the norock mount flag.
* A disc with both Joliet and Rock Ridge is handled later
*/
@@ -780,7 +739,7 @@ root_found:
* blocks that were 512 bytes (which should only very rarely
* happen.)
*/
- if (orig_zonesize < opt.blocksize)
+ if (orig_zonesize < opt->blocksize)
goto out_bad_size;
/* RDE: convert log zone size to bit shift */
@@ -865,10 +824,10 @@ root_found:
#ifdef CONFIG_JOLIET
if (joliet_level) {
- char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT;
+ char *p = opt->iocharset ? opt->iocharset : CONFIG_NLS_DEFAULT;
if (strcmp(p, "utf8") != 0) {
- sbi->s_nls_iocharset = opt.iocharset ?
- load_nls(opt.iocharset) : load_nls_default();
+ sbi->s_nls_iocharset = opt->iocharset ?
+ load_nls(opt->iocharset) : load_nls_default();
if (!sbi->s_nls_iocharset)
goto out_freesbi;
}
@@ -876,29 +835,29 @@ root_found:
#endif
s->s_op = &isofs_sops;
s->s_export_op = &isofs_export_ops;
- sbi->s_mapping = opt.map;
- sbi->s_rock = (opt.rock ? 2 : 0);
+ sbi->s_mapping = opt->map;
+ sbi->s_rock = (opt->rock ? 2 : 0);
sbi->s_rock_offset = -1; /* initial offset, will guess until SP is found*/
- sbi->s_cruft = opt.cruft;
- sbi->s_hide = opt.hide;
- sbi->s_showassoc = opt.showassoc;
- sbi->s_uid = opt.uid;
- sbi->s_gid = opt.gid;
- sbi->s_uid_set = opt.uid_set;
- sbi->s_gid_set = opt.gid_set;
- sbi->s_nocompress = opt.nocompress;
- sbi->s_overriderockperm = opt.overriderockperm;
+ sbi->s_cruft = opt->cruft;
+ sbi->s_hide = opt->hide;
+ sbi->s_showassoc = opt->showassoc;
+ sbi->s_uid = opt->uid;
+ sbi->s_gid = opt->gid;
+ sbi->s_uid_set = opt->uid_set;
+ sbi->s_gid_set = opt->gid_set;
+ sbi->s_nocompress = opt->nocompress;
+ sbi->s_overriderockperm = opt->overriderockperm;
/*
* It would be incredibly stupid to allow people to mark every file
* on the disk as suid, so we merely allow them to set the default
* permissions.
*/
- if (opt.fmode != ISOFS_INVALID_MODE)
- sbi->s_fmode = opt.fmode & 0777;
+ if (opt->fmode != ISOFS_INVALID_MODE)
+ sbi->s_fmode = opt->fmode & 0777;
else
sbi->s_fmode = ISOFS_INVALID_MODE;
- if (opt.dmode != ISOFS_INVALID_MODE)
- sbi->s_dmode = opt.dmode & 0777;
+ if (opt->dmode != ISOFS_INVALID_MODE)
+ sbi->s_dmode = opt->dmode & 0777;
else
sbi->s_dmode = ISOFS_INVALID_MODE;
@@ -908,8 +867,22 @@ root_found:
* we then decide whether to use the Joliet descriptor.
*/
inode = isofs_iget(s, sbi->s_firstdatazone, 0);
- if (IS_ERR(inode))
- goto out_no_root;
+
+ /*
+ * Fix for broken CDs with a corrupt root inode but a correct Joliet
+ * root directory.
+ */
+ if (IS_ERR(inode)) {
+ if (joliet_level && sbi->s_firstdatazone != first_data_zone) {
+ printk(KERN_NOTICE
+ "ISOFS: root inode is unusable. "
+ "Disabling Rock Ridge and switching to Joliet.");
+ sbi->s_rock = 0;
+ inode = NULL;
+ } else {
+ goto out_no_root;
+ }
+ }
/*
* Fix for broken CDs with Rock Ridge and empty ISO root directory but
@@ -946,12 +919,12 @@ root_found:
}
}
- if (opt.check == 'u') {
+ if (opt->check == 'u') {
/* Only Joliet is case insensitive by default */
if (joliet_level)
- opt.check = 'r';
+ opt->check = 'r';
else
- opt.check = 's';
+ opt->check = 's';
}
sbi->s_joliet_level = joliet_level;
@@ -966,12 +939,12 @@ root_found:
table = 0;
if (joliet_level)
table += 2;
- if (opt.check == 'r')
+ if (opt->check == 'r')
table++;
- sbi->s_check = opt.check;
+ sbi->s_check = opt->check;
if (table)
- s->s_d_op = &isofs_dentry_ops[table - 1];
+ set_default_d_op(s, &isofs_dentry_ops[table - 1]);
/* get the root dentry */
s->s_root = d_make_root(inode);
@@ -980,8 +953,6 @@ root_found:
goto out_no_inode;
}
- kfree(opt.iocharset);
-
return 0;
/*
@@ -1009,7 +980,7 @@ out_bad_zone_size:
goto out_freebh;
out_bad_size:
printk(KERN_WARNING "ISOFS: Logical zone size(%d) < hardware blocksize(%u)\n",
- orig_zonesize, opt.blocksize);
+ orig_zonesize, opt->blocksize);
goto out_freebh;
out_unknown_format:
if (!silent)
@@ -1019,7 +990,6 @@ out_freebh:
brelse(bh);
brelse(pri_bh);
out_freesbi:
- kfree(opt.iocharset);
kfree(sbi);
s->s_fs_info = NULL;
return error;
@@ -1174,9 +1144,9 @@ struct buffer_head *isofs_bread(struct inode *inode, sector_t block)
return sb_bread(inode->i_sb, blknr);
}
-static int isofs_readpage(struct file *file, struct page *page)
+static int isofs_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, isofs_get_block);
+ return mpage_read_folio(folio, isofs_get_block);
}
static void isofs_readahead(struct readahead_control *rac)
@@ -1190,7 +1160,7 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
}
static const struct address_space_operations isofs_aops = {
- .readpage = isofs_readpage,
+ .read_folio = isofs_read_folio,
.readahead = isofs_readahead,
.bmap = _isofs_bmap
};
@@ -1277,13 +1247,11 @@ static int isofs_read_level3_size(struct inode *inode)
} while (more_entries);
out:
kfree(tmpde);
- if (bh)
- brelse(bh);
+ brelse(bh);
return 0;
out_nomem:
- if (bh)
- brelse(bh);
+ brelse(bh);
return -ENOMEM;
out_noread:
@@ -1312,6 +1280,7 @@ static int isofs_read_inode(struct inode *inode, int relocated)
unsigned long offset;
struct iso_inode_info *ei = ISOFS_I(inode);
int ret = -EIO;
+ struct timespec64 ts;
block = ei->i_iget5_block;
bh = sb_bread(inode->i_sb, block);
@@ -1424,13 +1393,10 @@ static int isofs_read_inode(struct inode *inode, int relocated)
inode->i_ino, de->flags[-high_sierra]);
}
#endif
-
- inode->i_mtime.tv_sec =
- inode->i_atime.tv_sec =
- inode->i_ctime.tv_sec = iso_date(de->date, high_sierra);
- inode->i_mtime.tv_nsec =
- inode->i_atime.tv_nsec =
- inode->i_ctime.tv_nsec = 0;
+ ts = iso_date(de->date, high_sierra ? ISO_DATE_HIGH_SIERRA : 0);
+ inode_set_ctime_to_ts(inode, ts);
+ inode_set_atime_to_ts(inode, ts);
+ inode_set_mtime_to_ts(inode, ts);
ei->i_first_extent = (isonum_733(de->extent) +
isonum_711(de->ext_attr_length));
@@ -1479,15 +1445,21 @@ static int isofs_read_inode(struct inode *inode, int relocated)
inode->i_op = &page_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_data.a_ops = &isofs_symlink_aops;
- } else
+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
/* XXX - parse_rock_ridge_inode() had already set i_rdev. */
init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ } else {
+ printk(KERN_DEBUG "ISOFS: Invalid file type 0%04o for inode %lu.\n",
+ inode->i_mode, inode->i_ino);
+ ret = -EIO;
+ goto fail;
+ }
ret = 0;
out:
kfree(tmpde);
- if (bh)
- brelse(bh);
+ brelse(bh);
return ret;
out_badread:
@@ -1548,7 +1520,7 @@ struct inode *__isofs_iget(struct super_block *sb,
if (!inode)
return ERR_PTR(-ENOMEM);
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
ret = isofs_read_inode(inode, relocated);
if (ret < 0) {
iget_failed(inode);
@@ -1561,18 +1533,66 @@ struct inode *__isofs_iget(struct super_block *sb,
return inode;
}
-static struct dentry *isofs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int isofs_get_tree(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
+ return get_tree_bdev(fc, isofs_fill_super);
+}
+
+static void isofs_free_fc(struct fs_context *fc)
+{
+ struct isofs_options *opt = fc->fs_private;
+
+ kfree(opt->iocharset);
+ kfree(opt);
+}
+
+static const struct fs_context_operations isofs_context_ops = {
+ .parse_param = isofs_parse_param,
+ .get_tree = isofs_get_tree,
+ .reconfigure = isofs_reconfigure,
+ .free = isofs_free_fc,
+};
+
+static int isofs_init_fs_context(struct fs_context *fc)
+{
+ struct isofs_options *opt;
+
+ opt = kzalloc(sizeof(*opt), GFP_KERNEL);
+ if (!opt)
+ return -ENOMEM;
+
+ opt->map = 'n';
+ opt->rock = 1;
+ opt->joliet = 1;
+ opt->cruft = 0;
+ opt->hide = 0;
+ opt->showassoc = 0;
+ opt->check = 'u'; /* unset */
+ opt->nocompress = 0;
+ opt->blocksize = 1024;
+ opt->fmode = opt->dmode = ISOFS_INVALID_MODE;
+ opt->uid_set = 0;
+ opt->gid_set = 0;
+ opt->gid = GLOBAL_ROOT_GID;
+ opt->uid = GLOBAL_ROOT_UID;
+ opt->iocharset = NULL;
+ opt->overriderockperm = 0;
+ opt->session = -1;
+ opt->sbsector = -1;
+
+ fc->fs_private = opt;
+ fc->ops = &isofs_context_ops;
+
+ return 0;
}
static struct file_system_type iso9660_fs_type = {
.owner = THIS_MODULE,
.name = "iso9660",
- .mount = isofs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = isofs_init_fs_context,
+ .parameters = isofs_param_spec,
};
MODULE_ALIAS_FS("iso9660");
MODULE_ALIAS("iso9660");
@@ -1612,4 +1632,5 @@ static void __exit exit_iso9660_fs(void)
module_init(init_iso9660_fs)
module_exit(exit_iso9660_fs)
+MODULE_DESCRIPTION("ISO 9660 CDROM file system support");
MODULE_LICENSE("GPL");
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index dcdc191ed183..506555837533 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -3,7 +3,7 @@
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
#include <linux/iso_fs.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
enum isofs_file_format {
isofs_file_normal = 0,
@@ -106,7 +106,9 @@ static inline unsigned int isonum_733(u8 *p)
/* Ignore bigendian datum due to broken mastering programs */
return get_unaligned_le32(p);
}
-extern int iso_date(u8 *, int);
+#define ISO_DATE_HIGH_SIERRA (1 << 0)
+#define ISO_DATE_LONG_FORM (1 << 1)
+struct timespec64 iso_date(u8 *p, int flags);
struct inode; /* To make gcc happy */
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 4880146babaf..576498245b9d 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -412,7 +412,12 @@ repeat:
}
}
break;
- case SIG('T', 'F'):
+ case SIG('T', 'F'): {
+ int flags, size, slen;
+
+ flags = rr->u.TF.flags & TF_LONG_FORM ? ISO_DATE_LONG_FORM : 0;
+ size = rr->u.TF.flags & TF_LONG_FORM ? 17 : 7;
+ slen = rr->len - 5;
/*
* Some RRIP writers incorrectly place ctime in the
* TF_CREATE field. Try to handle this correctly for
@@ -420,31 +425,28 @@ repeat:
*/
/* Rock ridge never appears on a High Sierra disk */
cnt = 0;
- if (rr->u.TF.flags & TF_CREATE) {
- inode->i_ctime.tv_sec =
- iso_date(rr->u.TF.times[cnt++].time,
- 0);
- inode->i_ctime.tv_nsec = 0;
+ if ((rr->u.TF.flags & TF_CREATE) && size <= slen) {
+ inode_set_ctime_to_ts(inode,
+ iso_date(rr->u.TF.data + size * cnt++, flags));
+ slen -= size;
}
- if (rr->u.TF.flags & TF_MODIFY) {
- inode->i_mtime.tv_sec =
- iso_date(rr->u.TF.times[cnt++].time,
- 0);
- inode->i_mtime.tv_nsec = 0;
+ if ((rr->u.TF.flags & TF_MODIFY) && size <= slen) {
+ inode_set_mtime_to_ts(inode,
+ iso_date(rr->u.TF.data + size * cnt++, flags));
+ slen -= size;
}
- if (rr->u.TF.flags & TF_ACCESS) {
- inode->i_atime.tv_sec =
- iso_date(rr->u.TF.times[cnt++].time,
- 0);
- inode->i_atime.tv_nsec = 0;
+ if ((rr->u.TF.flags & TF_ACCESS) && size <= slen) {
+ inode_set_atime_to_ts(inode,
+ iso_date(rr->u.TF.data + size * cnt++, flags));
+ slen -= size;
}
- if (rr->u.TF.flags & TF_ATTRIBUTES) {
- inode->i_ctime.tv_sec =
- iso_date(rr->u.TF.times[cnt++].time,
- 0);
- inode->i_ctime.tv_nsec = 0;
+ if ((rr->u.TF.flags & TF_ATTRIBUTES) && size <= slen) {
+ inode_set_ctime_to_ts(inode,
+ iso_date(rr->u.TF.data + size * cnt++, flags));
+ slen -= size;
}
break;
+ }
case SIG('S', 'L'):
{
int slen;
@@ -533,9 +535,9 @@ repeat:
inode->i_rdev = reloc->i_rdev;
inode->i_size = reloc->i_size;
inode->i_blocks = reloc->i_blocks;
- inode->i_atime = reloc->i_atime;
- inode->i_ctime = reloc->i_ctime;
- inode->i_mtime = reloc->i_mtime;
+ inode_set_atime_to_ts(inode, inode_get_atime(reloc));
+ inode_set_ctime_to_ts(inode, inode_get_ctime(reloc));
+ inode_set_mtime_to_ts(inode, inode_get_mtime(reloc));
iput(reloc);
break;
#ifdef CONFIG_ZISOFS
@@ -687,15 +689,15 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
}
/*
- * readpage() for symlinks: reads symlink contents into the page and either
+ * read_folio() for symlinks: reads symlink contents into the folio and either
* makes it uptodate and returns 0 or returns error (-EIO)
*/
-static int rock_ridge_symlink_readpage(struct file *file, struct page *page)
+static int rock_ridge_symlink_read_folio(struct file *file, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct iso_inode_info *ei = ISOFS_I(inode);
struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
- char *link = page_address(page);
+ char *link = folio_address(folio);
unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
struct buffer_head *bh;
char *rpnt = link;
@@ -782,9 +784,10 @@ repeat:
goto fail;
brelse(bh);
*rpnt = '\0';
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
+ ret = 0;
+end:
+ folio_end_read(folio, ret == 0);
+ return ret;
/* error exit from macro */
out:
@@ -798,11 +801,10 @@ out_bad_span:
fail:
brelse(bh);
error:
- SetPageError(page);
- unlock_page(page);
- return -EIO;
+ ret = -EIO;
+ goto end;
}
const struct address_space_operations isofs_symlink_aops = {
- .readpage = rock_ridge_symlink_readpage
+ .read_folio = rock_ridge_symlink_read_folio
};
diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
index ee9660e9671c..c0856fa9bb6a 100644
--- a/fs/isofs/rock.h
+++ b/fs/isofs/rock.h
@@ -44,7 +44,7 @@ struct RR_PN_s {
struct SL_component {
__u8 flags;
__u8 len;
- __u8 text[];
+ __u8 text[] __counted_by(len);
} __attribute__ ((packed));
struct RR_SL_s {
@@ -65,13 +65,9 @@ struct RR_PL_s {
__u8 location[8];
};
-struct stamp {
- __u8 time[7]; /* actually 6 unsigned, 1 signed */
-} __attribute__ ((packed));
-
struct RR_TF_s {
__u8 flags;
- struct stamp times[]; /* Variable number of these beasts */
+ __u8 data[];
} __attribute__ ((packed));
/* Linux-specific extension for transparent decompression */
diff --git a/fs/isofs/util.c b/fs/isofs/util.c
index e88dba721661..42f479da0b28 100644
--- a/fs/isofs/util.c
+++ b/fs/isofs/util.c
@@ -16,29 +16,44 @@
* to GMT. Thus we should always be correct.
*/
-int iso_date(u8 *p, int flag)
+struct timespec64 iso_date(u8 *p, int flags)
{
int year, month, day, hour, minute, second, tz;
- int crtime;
+ struct timespec64 ts;
+
+ if (flags & ISO_DATE_LONG_FORM) {
+ year = (p[0] - '0') * 1000 +
+ (p[1] - '0') * 100 +
+ (p[2] - '0') * 10 +
+ (p[3] - '0') - 1900;
+ month = ((p[4] - '0') * 10 + (p[5] - '0'));
+ day = ((p[6] - '0') * 10 + (p[7] - '0'));
+ hour = ((p[8] - '0') * 10 + (p[9] - '0'));
+ minute = ((p[10] - '0') * 10 + (p[11] - '0'));
+ second = ((p[12] - '0') * 10 + (p[13] - '0'));
+ ts.tv_nsec = ((p[14] - '0') * 10 + (p[15] - '0')) * 10000000;
+ tz = p[16];
+ } else {
+ year = p[0];
+ month = p[1];
+ day = p[2];
+ hour = p[3];
+ minute = p[4];
+ second = p[5];
+ ts.tv_nsec = 0;
+ /* High sierra has no time zone */
+ tz = flags & ISO_DATE_HIGH_SIERRA ? 0 : p[6];
+ }
- year = p[0];
- month = p[1];
- day = p[2];
- hour = p[3];
- minute = p[4];
- second = p[5];
- if (flag == 0) tz = p[6]; /* High sierra has no time zone */
- else tz = 0;
-
if (year < 0) {
- crtime = 0;
+ ts.tv_sec = 0;
} else {
- crtime = mktime64(year+1900, month, day, hour, minute, second);
+ ts.tv_sec = mktime64(year+1900, month, day, hour, minute, second);
/* sign extend */
if (tz & 0x80)
tz |= (-1 << 8);
-
+
/*
* The timezone offset is unreliable on some disks,
* so we make a sanity check. In no case is it ever
@@ -65,7 +80,7 @@ int iso_date(u8 *p, int flag)
* for pointing out the sign error.
*/
if (-52 <= tz && tz <= 52)
- crtime -= tz * 15 * 60;
+ ts.tv_sec -= tz * 15 * 60;
}
- return crtime;
-}
+ return ts;
+}
diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig
index 4ad2c67f93f1..9c19e1512101 100644
--- a/fs/jbd2/Kconfig
+++ b/fs/jbd2/Kconfig
@@ -2,8 +2,6 @@
config JBD2
tristate
select CRC32
- select CRYPTO
- select CRYPTO_CRC32C
help
This is a generic journaling layer for block devices that support
both 32-bit and 64-bit block numbers. It is currently used by
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 746132998c57..de89c5bef607 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -27,7 +27,7 @@
*
* Called with j_list_lock held.
*/
-static inline void __buffer_unlink_first(struct journal_head *jh)
+static inline void __buffer_unlink(struct journal_head *jh)
{
transaction_t *transaction = jh->b_cp_transaction;
@@ -41,57 +41,6 @@ static inline void __buffer_unlink_first(struct journal_head *jh)
}
/*
- * Unlink a buffer from a transaction checkpoint(io) list.
- *
- * Called with j_list_lock held.
- */
-static inline void __buffer_unlink(struct journal_head *jh)
-{
- transaction_t *transaction = jh->b_cp_transaction;
-
- __buffer_unlink_first(jh);
- if (transaction->t_checkpoint_io_list == jh) {
- transaction->t_checkpoint_io_list = jh->b_cpnext;
- if (transaction->t_checkpoint_io_list == jh)
- transaction->t_checkpoint_io_list = NULL;
- }
-}
-
-/*
- * Move a buffer from the checkpoint list to the checkpoint io list
- *
- * Called with j_list_lock held
- */
-static inline void __buffer_relink_io(struct journal_head *jh)
-{
- transaction_t *transaction = jh->b_cp_transaction;
-
- __buffer_unlink_first(jh);
-
- if (!transaction->t_checkpoint_io_list) {
- jh->b_cpnext = jh->b_cpprev = jh;
- } else {
- jh->b_cpnext = transaction->t_checkpoint_io_list;
- jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
- jh->b_cpprev->b_cpnext = jh;
- jh->b_cpnext->b_cpprev = jh;
- }
- transaction->t_checkpoint_io_list = jh;
-}
-
-/*
- * Check a checkpoint buffer could be release or not.
- *
- * Requires j_list_lock
- */
-static inline bool __cp_buffer_busy(struct journal_head *jh)
-{
- struct buffer_head *bh = jh2bh(jh);
-
- return (jh->b_transaction || buffer_locked(bh) || buffer_dirty(bh));
-}
-
-/*
* __jbd2_log_wait_for_space: wait until there is space in the journal.
*
* Called under j-state_lock *only*. It will be unlocked if we have to wait
@@ -130,17 +79,23 @@ __releases(&journal->j_state_lock)
if (space_left < nblocks) {
int chkpt = journal->j_checkpoint_transactions != NULL;
tid_t tid = 0;
+ bool has_transaction = false;
- if (journal->j_committing_transaction)
+ if (journal->j_committing_transaction) {
tid = journal->j_committing_transaction->t_tid;
+ has_transaction = true;
+ }
spin_unlock(&journal->j_list_lock);
write_unlock(&journal->j_state_lock);
if (chkpt) {
jbd2_log_do_checkpoint(journal);
- } else if (jbd2_cleanup_journal_tail(journal) == 0) {
- /* We were able to recover space; yay! */
+ } else if (jbd2_cleanup_journal_tail(journal) <= 0) {
+ /*
+ * We were able to recover space or the
+ * journal was aborted due to an error.
+ */
;
- } else if (tid) {
+ } else if (has_transaction) {
/*
* jbd2_journal_commit_transaction() may want
* to take the checkpoint_mutex if JBD2_FLUSHED
@@ -158,7 +113,7 @@ __releases(&journal->j_state_lock)
"journal space in %s\n", __func__,
journal->j_devname);
WARN_ON(1);
- jbd2_journal_abort(journal, -EIO);
+ jbd2_journal_abort(journal, -ENOSPC);
}
write_lock(&journal->j_state_lock);
} else {
@@ -176,13 +131,14 @@ __flush_batch(journal_t *journal, int *batch_count)
blk_start_plug(&plug);
for (i = 0; i < *batch_count; i++)
- write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC);
+ write_dirty_buffer(journal->j_chkpt_bhs[i], JBD2_JOURNAL_REQ_FLAGS);
blk_finish_plug(&plug);
for (i = 0; i < *batch_count; i++) {
struct buffer_head *bh = journal->j_chkpt_bhs[i];
BUFFER_TRACE(bh, "brelse");
__brelse(bh);
+ journal->j_chkpt_bhs[i] = NULL;
}
*batch_count = 0;
}
@@ -203,7 +159,7 @@ int jbd2_log_do_checkpoint(journal_t *journal)
tid_t this_tid;
int result, batch_count = 0;
- jbd_debug(1, "Start checkpoint\n");
+ jbd2_debug(1, "Start checkpoint\n");
/*
* First thing: if there are any transactions in the log which
@@ -212,7 +168,7 @@ int jbd2_log_do_checkpoint(journal_t *journal)
*/
result = jbd2_cleanup_journal_tail(journal);
trace_jbd2_checkpoint(journal, result);
- jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
+ jbd2_debug(1, "cleanup_journal_tail returned %d\n", result);
if (result <= 0)
return result;
@@ -242,15 +198,6 @@ restart:
jh = transaction->t_checkpoint_list;
bh = jh2bh(jh);
- if (buffer_locked(bh)) {
- get_bh(bh);
- spin_unlock(&journal->j_list_lock);
- wait_on_buffer(bh);
- /* the journal_head may have gone by now */
- BUFFER_TRACE(bh, "brelse");
- __brelse(bh);
- goto retry;
- }
if (jh->b_transaction != NULL) {
transaction_t *t = jh->b_transaction;
tid_t tid = t->t_tid;
@@ -285,30 +232,50 @@ restart:
spin_lock(&journal->j_list_lock);
goto restart;
}
- if (!buffer_dirty(bh)) {
+ if (!trylock_buffer(bh)) {
+ /*
+ * The buffer is locked, it may be writing back, or
+ * flushing out in the last couple of cycles, or
+ * re-adding into a new transaction, need to check
+ * it again until it's unlocked.
+ */
+ get_bh(bh);
+ spin_unlock(&journal->j_list_lock);
+ wait_on_buffer(bh);
+ /* the journal_head may have gone by now */
+ BUFFER_TRACE(bh, "brelse");
+ __brelse(bh);
+ goto retry;
+ } else if (!buffer_dirty(bh)) {
+ unlock_buffer(bh);
BUFFER_TRACE(bh, "remove from checkpoint");
- if (__jbd2_journal_remove_checkpoint(jh))
- /* The transaction was released; we're done */
+ /*
+ * If the transaction was released or the checkpoint
+ * list was empty, we're done.
+ */
+ if (__jbd2_journal_remove_checkpoint(jh) ||
+ !transaction->t_checkpoint_list)
goto out;
- continue;
+ } else {
+ unlock_buffer(bh);
+ /*
+ * We are about to write the buffer, it could be
+ * raced by some other transaction shrink or buffer
+ * re-log logic once we release the j_list_lock,
+ * leave it on the checkpoint list and check status
+ * again to make sure it's clean.
+ */
+ BUFFER_TRACE(bh, "queue");
+ get_bh(bh);
+ J_ASSERT_BH(bh, !buffer_jwrite(bh));
+ journal->j_chkpt_bhs[batch_count++] = bh;
+ transaction->t_chp_stats.cs_written++;
+ transaction->t_checkpoint_list = jh->b_cpnext;
}
- /*
- * Important: we are about to write the buffer, and
- * possibly block, while still holding the journal
- * lock. We cannot afford to let the transaction
- * logic start messing around with this buffer before
- * we write it to disk, as that would break
- * recoverability.
- */
- BUFFER_TRACE(bh, "queue");
- get_bh(bh);
- J_ASSERT_BH(bh, !buffer_jwrite(bh));
- journal->j_chkpt_bhs[batch_count++] = bh;
- __buffer_relink_io(jh);
- transaction->t_chp_stats.cs_written++;
+
if ((batch_count == JBD2_NR_BATCH) ||
- need_resched() ||
- spin_needbreak(&journal->j_list_lock))
+ need_resched() || spin_needbreak(&journal->j_list_lock) ||
+ jh2bh(transaction->t_checkpoint_list) == journal->j_chkpt_bhs[0])
goto unlock_and_flush;
}
@@ -318,42 +285,11 @@ restart:
retry:
if (batch_count)
__flush_batch(journal, &batch_count);
+ cond_resched();
spin_lock(&journal->j_list_lock);
goto restart;
}
- /*
- * Now we issued all of the transaction's buffers, let's deal
- * with the buffers that are out for I/O.
- */
-restart2:
- /* Did somebody clean up the transaction in the meanwhile? */
- if (journal->j_checkpoint_transactions != transaction ||
- transaction->t_tid != this_tid)
- goto out;
-
- while (transaction->t_checkpoint_io_list) {
- jh = transaction->t_checkpoint_io_list;
- bh = jh2bh(jh);
- if (buffer_locked(bh)) {
- get_bh(bh);
- spin_unlock(&journal->j_list_lock);
- wait_on_buffer(bh);
- /* the journal_head may have gone by now */
- BUFFER_TRACE(bh, "brelse");
- __brelse(bh);
- spin_lock(&journal->j_list_lock);
- goto restart2;
- }
-
- /*
- * Now in whatever state the buffer currently is, we
- * know that it has been written out and so we can
- * drop it from the list
- */
- if (__jbd2_journal_remove_checkpoint(jh))
- break;
- }
out:
spin_unlock(&journal->j_list_lock);
result = jbd2_cleanup_journal_tail(journal);
@@ -409,49 +345,9 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
/* Checkpoint list management */
/*
- * journal_clean_one_cp_list
- *
- * Find all the written-back checkpoint buffers in the given list and
- * release them. If 'destroy' is set, clean all buffers unconditionally.
- *
- * Called with j_list_lock held.
- * Returns 1 if we freed the transaction, 0 otherwise.
- */
-static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
-{
- struct journal_head *last_jh;
- struct journal_head *next_jh = jh;
-
- if (!jh)
- return 0;
-
- last_jh = jh->b_cpprev;
- do {
- jh = next_jh;
- next_jh = jh->b_cpnext;
-
- if (!destroy && __cp_buffer_busy(jh))
- return 0;
-
- if (__jbd2_journal_remove_checkpoint(jh))
- return 1;
- /*
- * This function only frees up some memory
- * if possible so we dont have an obligation
- * to finish processing. Bail out if preemption
- * requested:
- */
- if (need_resched())
- return 0;
- } while (jh != last_jh);
-
- return 0;
-}
-
-/*
* journal_shrink_one_cp_list
*
- * Find 'nr_to_scan' written-back checkpoint buffers in the given list
+ * Find all the written-back checkpoint buffers in the given list
* and try to release them. If the whole transaction is released, set
* the 'released' parameter. Return the number of released checkpointed
* buffers.
@@ -459,7 +355,7 @@ static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
* Called with j_list_lock held.
*/
static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
- unsigned long *nr_to_scan,
+ enum jbd2_shrink_type type,
bool *released)
{
struct journal_head *last_jh;
@@ -467,7 +363,8 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
unsigned long nr_freed = 0;
int ret;
- if (!jh || *nr_to_scan == 0)
+ *released = false;
+ if (!jh)
return 0;
last_jh = jh->b_cpprev;
@@ -475,12 +372,18 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
jh = next_jh;
next_jh = jh->b_cpnext;
- (*nr_to_scan)--;
- if (__cp_buffer_busy(jh))
- continue;
+ if (type == JBD2_SHRINK_DESTROY) {
+ ret = __jbd2_journal_remove_checkpoint(jh);
+ } else {
+ ret = jbd2_journal_try_remove_checkpoint(jh);
+ if (ret < 0) {
+ if (type == JBD2_SHRINK_BUSY_SKIP)
+ continue;
+ break;
+ }
+ }
nr_freed++;
- ret = __jbd2_journal_remove_checkpoint(jh);
if (ret) {
*released = true;
break;
@@ -488,7 +391,7 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
if (need_resched())
break;
- } while (jh != last_jh && *nr_to_scan);
+ } while (jh != last_jh);
return nr_freed;
}
@@ -506,11 +409,12 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
unsigned long *nr_to_scan)
{
transaction_t *transaction, *last_transaction, *next_transaction;
- bool released;
+ bool __maybe_unused released;
tid_t first_tid = 0, last_tid = 0, next_tid = 0;
tid_t tid = 0;
unsigned long nr_freed = 0;
- unsigned long nr_scanned = *nr_to_scan;
+ unsigned long freed;
+ bool first_set = false;
again:
spin_lock(&journal->j_list_lock);
@@ -530,8 +434,10 @@ again:
else
transaction = journal->j_checkpoint_transactions;
- if (!first_tid)
+ if (!first_set) {
first_tid = transaction->t_tid;
+ first_set = true;
+ }
last_transaction = journal->j_checkpoint_transactions->t_cpprev;
next_transaction = transaction;
last_tid = last_transaction->t_tid;
@@ -539,19 +445,11 @@ again:
transaction = next_transaction;
next_transaction = transaction->t_cpnext;
tid = transaction->t_tid;
- released = false;
-
- nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_list,
- nr_to_scan, &released);
- if (*nr_to_scan == 0)
- break;
- if (need_resched() || spin_needbreak(&journal->j_list_lock))
- break;
- if (released)
- continue;
- nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_io_list,
- nr_to_scan, &released);
+ freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list,
+ JBD2_SHRINK_BUSY_SKIP, &released);
+ nr_freed += freed;
+ (*nr_to_scan) -= min(*nr_to_scan, freed);
if (*nr_to_scan == 0)
break;
if (need_resched() || spin_needbreak(&journal->j_list_lock))
@@ -569,12 +467,11 @@ again:
spin_unlock(&journal->j_list_lock);
cond_resched();
- if (*nr_to_scan && next_tid)
+ if (*nr_to_scan && journal->j_shrink_transaction)
goto again;
out:
- nr_scanned -= *nr_to_scan;
trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid,
- nr_freed, nr_scanned, next_tid);
+ nr_freed, next_tid);
return nr_freed;
}
@@ -583,14 +480,20 @@ out:
* journal_clean_checkpoint_list
*
* Find all the written-back checkpoint buffers in the journal and release them.
- * If 'destroy' is set, release all buffers unconditionally.
+ * If 'type' is JBD2_SHRINK_DESTROY, release all buffers unconditionally. If
+ * 'type' is JBD2_SHRINK_BUSY_STOP, will stop release buffers if encounters a
+ * busy buffer. To avoid wasting CPU cycles scanning the buffer list in some
+ * cases, don't pass JBD2_SHRINK_BUSY_SKIP 'type' for this function.
*
* Called with j_list_lock held.
*/
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal,
+ enum jbd2_shrink_type type)
{
transaction_t *transaction, *last_transaction, *next_transaction;
- int ret;
+ bool released;
+
+ WARN_ON_ONCE(type == JBD2_SHRINK_BUSY_SKIP);
transaction = journal->j_checkpoint_transactions;
if (!transaction)
@@ -601,8 +504,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
do {
transaction = next_transaction;
next_transaction = transaction->t_cpnext;
- ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
- destroy);
+ journal_shrink_one_cp_list(transaction->t_checkpoint_list,
+ type, &released);
/*
* This function only frees up some memory if possible so we
* dont have an obligation to finish processing. Bail out if
@@ -610,23 +513,12 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
*/
if (need_resched())
return;
- if (ret)
- continue;
- /*
- * It is essential that we are as careful as in the case of
- * t_checkpoint_list with removing the buffer from the list as
- * we can possibly see not yet submitted buffers on io_list
- */
- ret = journal_clean_one_cp_list(transaction->
- t_checkpoint_io_list, destroy);
- if (need_resched())
- return;
/*
* Stop scanning if we couldn't free the transaction. This
* avoids pointless scanning of transactions which still
* weren't checkpointed.
*/
- if (!ret)
+ if (!released)
return;
} while (transaction != last_transaction);
}
@@ -647,7 +539,7 @@ void jbd2_journal_destroy_checkpoint(journal_t *journal)
spin_unlock(&journal->j_list_lock);
break;
}
- __jbd2_journal_clean_checkpoint_list(journal, true);
+ __jbd2_journal_clean_checkpoint_list(journal, JBD2_SHRINK_DESTROY);
spin_unlock(&journal->j_list_lock);
cond_resched();
}
@@ -676,7 +568,6 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
struct transaction_chp_stats_s *stats;
transaction_t *transaction;
journal_t *journal;
- struct buffer_head *bh = jh2bh(jh);
JBUFFER_TRACE(jh, "entry");
@@ -689,23 +580,13 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
JBUFFER_TRACE(jh, "removing from transaction");
- /*
- * If we have failed to write the buffer out to disk, the filesystem
- * may become inconsistent. We cannot abort the journal here since
- * we hold j_list_lock and we have to be careful about races with
- * jbd2_journal_destroy(). So mark the writeback IO error in the
- * journal here and we abort the journal later from a better context.
- */
- if (buffer_write_io_error(bh))
- set_bit(JBD2_CHECKPOINT_IO_ERROR, &journal->j_atomic_flags);
-
__buffer_unlink(jh);
jh->b_cp_transaction = NULL;
percpu_counter_dec(&journal->j_checkpoint_jh_count);
jbd2_journal_put_journal_head(jh);
/* Is this transaction empty? */
- if (transaction->t_checkpoint_list || transaction->t_checkpoint_io_list)
+ if (transaction->t_checkpoint_list)
return 0;
/*
@@ -737,6 +618,36 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
}
/*
+ * Check the checkpoint buffer and try to remove it from the checkpoint
+ * list if it's clean. Returns -EBUSY if it is not clean, returns 1 if
+ * it frees the transaction, 0 otherwise.
+ *
+ * This function is called with j_list_lock held.
+ */
+int jbd2_journal_try_remove_checkpoint(struct journal_head *jh)
+{
+ struct buffer_head *bh = jh2bh(jh);
+
+ if (jh->b_transaction)
+ return -EBUSY;
+ if (!trylock_buffer(bh))
+ return -EBUSY;
+ if (buffer_dirty(bh)) {
+ unlock_buffer(bh);
+ return -EBUSY;
+ }
+ unlock_buffer(bh);
+
+ /*
+ * Buffer is clean and the IO has finished (we held the buffer
+ * lock) so the checkpoint is done. We can safely remove the
+ * buffer from this transaction.
+ */
+ JBUFFER_TRACE(jh, "remove from checkpoint list");
+ return __jbd2_journal_remove_checkpoint(jh);
+}
+
+/*
* journal_insert_checkpoint: put a committed buffer onto a checkpoint
* list so that we know when it is safe to clean the transaction out of
* the log.
@@ -797,12 +708,11 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact
J_ASSERT(transaction->t_forget == NULL);
J_ASSERT(transaction->t_shadow_list == NULL);
J_ASSERT(transaction->t_checkpoint_list == NULL);
- J_ASSERT(transaction->t_checkpoint_io_list == NULL);
J_ASSERT(atomic_read(&transaction->t_updates) == 0);
J_ASSERT(journal->j_committing_transaction != transaction);
J_ASSERT(journal->j_running_transaction != transaction);
trace_jbd2_drop_transaction(journal, transaction);
- jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
+ jbd2_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 3cc4ab2ba7f4..7203d2d2624d 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -57,32 +57,30 @@ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
* So here, we have a buffer which has just come off the forget list. Look to
* see if we can strip all buffers from the backing page.
*
- * Called under lock_journal(), and possibly under journal_datalist_lock. The
- * caller provided us with a ref against the buffer, and we drop that here.
+ * Called under j_list_lock. The caller provided us with a ref against the
+ * buffer, and we drop that here.
*/
static void release_buffer_page(struct buffer_head *bh)
{
- struct page *page;
+ struct folio *folio;
if (buffer_dirty(bh))
goto nope;
if (atomic_read(&bh->b_count) != 1)
goto nope;
- page = bh->b_page;
- if (!page)
- goto nope;
- if (page->mapping)
+ folio = bh->b_folio;
+ if (folio->mapping)
goto nope;
/* OK, it's a truncated page */
- if (!trylock_page(page))
+ if (!folio_trylock(folio))
goto nope;
- get_page(page);
+ folio_get(folio);
__brelse(bh);
- try_to_free_buffers(page);
- unlock_page(page);
- put_page(page);
+ try_to_free_buffers(folio);
+ folio_unlock(folio);
+ folio_put(folio);
return;
nope:
@@ -101,7 +99,7 @@ static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
h->h_chksum_type = 0;
h->h_chksum_size = 0;
h->h_chksum[0] = 0;
- csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
+ csum = jbd2_chksum(j->j_csum_seed, bh->b_data, j->j_blocksize);
h->h_chksum[0] = cpu_to_be32(csum);
}
@@ -120,8 +118,8 @@ static int journal_submit_commit_record(journal_t *journal,
{
struct commit_header *tmp;
struct buffer_head *bh;
- int ret;
struct timespec64 now;
+ blk_opf_t write_flags = REQ_OP_WRITE | JBD2_JOURNAL_REQ_FLAGS;
*cbh = NULL;
@@ -153,13 +151,11 @@ static int journal_submit_commit_record(journal_t *journal,
if (journal->j_flags & JBD2_BARRIER &&
!jbd2_has_feature_async_commit(journal))
- ret = submit_bh(REQ_OP_WRITE,
- REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
- else
- ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
+ write_flags |= REQ_PREFLUSH | REQ_FUA;
+ submit_bh(write_flags, bh);
*cbh = bh;
- return ret;
+ return 0;
}
/*
@@ -181,40 +177,14 @@ static int journal_wait_on_commit_record(journal_t *journal,
return ret;
}
-/*
- * write the filemap data using writepage() address_space_operations.
- * We don't do block allocation here even for delalloc. We don't
- * use writepages() because with delayed allocation we may be doing
- * block allocation in writepages().
- */
-int jbd2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
-{
- struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = mapping->nrpages * 2,
- .range_start = jinode->i_dirty_start,
- .range_end = jinode->i_dirty_end,
- };
-
- /*
- * submit the inode data buffers. We use writepage
- * instead of writepages. Because writepages can do
- * block allocation with delalloc. We need to write
- * only allocated blocks here.
- */
- return generic_writepages(mapping, &wbc);
-}
-
/* Send all the data buffers related to an inode */
-int jbd2_submit_inode_data(struct jbd2_inode *jinode)
+int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode)
{
-
if (!jinode || !(jinode->i_flags & JI_WRITE_DATA))
return 0;
trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
- return jbd2_journal_submit_inode_data_buffers(jinode);
+ return journal->j_submit_inode_data_buffers(jinode);
}
EXPORT_SYMBOL(jbd2_submit_inode_data);
@@ -300,6 +270,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
if (!ret)
ret = err;
}
+ cond_resched();
spin_lock(&journal->j_list_lock);
jinode->i_flags &= ~JI_COMMIT_RUNNING;
smp_mb();
@@ -328,14 +299,12 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
{
- struct page *page = bh->b_page;
char *addr;
__u32 checksum;
- addr = kmap_atomic(page);
- checksum = crc32_be(crc32_sum,
- (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
- kunmap_atomic(addr);
+ addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
+ checksum = crc32_be(crc32_sum, addr, bh->b_size);
+ kunmap_local(addr);
return checksum;
}
@@ -352,7 +321,6 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
struct buffer_head *bh, __u32 sequence)
{
journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
- struct page *page = bh->b_page;
__u8 *addr;
__u32 csum32;
__be32 seq;
@@ -361,11 +329,10 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
return;
seq = cpu_to_be32(sequence);
- addr = kmap_atomic(page);
- csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
- csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
- bh->b_size);
- kunmap_atomic(addr);
+ addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
+ csum32 = jbd2_chksum(j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
+ csum32 = jbd2_chksum(csum32, addr, bh->b_size);
+ kunmap_local(addr);
if (jbd2_has_feature_csum3(j))
tag3->t_checksum = cpu_to_be32(csum32);
@@ -386,7 +353,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
struct buffer_head *descriptor;
struct buffer_head **wbuf = journal->j_wbuf;
int bufs;
- int flags;
+ int escape;
int err;
unsigned long long blocknr;
ktime_t start_time;
@@ -419,7 +386,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
/* Do we need to erase the effects of a prior jbd2_journal_flush? */
if (journal->j_flags & JBD2_FLUSHED) {
- jbd_debug(3, "super block updated\n");
+ jbd2_debug(3, "super block updated\n");
mutex_lock_io(&journal->j_checkpoint_mutex);
/*
* We hold j_checkpoint_mutex so tail cannot change under us.
@@ -429,11 +396,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
*/
jbd2_journal_update_sb_log_tail(journal,
journal->j_tail_sequence,
- journal->j_tail,
- REQ_SYNC);
+ journal->j_tail, 0);
mutex_unlock(&journal->j_checkpoint_mutex);
} else {
- jbd_debug(3, "superblock not updated\n");
+ jbd2_debug(3, "superblock not updated\n");
}
J_ASSERT(journal->j_running_transaction != NULL);
@@ -465,7 +431,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
commit_transaction = journal->j_running_transaction;
trace_jbd2_start_commit(journal, commit_transaction);
- jbd_debug(1, "JBD2: starting commit of transaction %d\n",
+ jbd2_debug(1, "JBD2: starting commit of transaction %d\n",
commit_transaction->t_tid);
write_lock(&journal->j_state_lock);
@@ -484,24 +450,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
stats.run.rs_locked);
- spin_lock(&commit_transaction->t_handle_lock);
- while (atomic_read(&commit_transaction->t_updates)) {
- DEFINE_WAIT(wait);
+ // waits for any t_updates to finish
+ jbd2_journal_wait_updates(journal);
- prepare_to_wait(&journal->j_wait_updates, &wait,
- TASK_UNINTERRUPTIBLE);
- if (atomic_read(&commit_transaction->t_updates)) {
- spin_unlock(&commit_transaction->t_handle_lock);
- write_unlock(&journal->j_state_lock);
- schedule();
- write_lock(&journal->j_state_lock);
- spin_lock(&commit_transaction->t_handle_lock);
- }
- finish_wait(&journal->j_wait_updates, &wait);
- }
- spin_unlock(&commit_transaction->t_handle_lock);
commit_transaction->t_state = T_SWITCH;
- write_unlock(&journal->j_state_lock);
J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
journal->j_max_transaction_buffers);
@@ -521,6 +473,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* has reserved. This is consistent with the existing behaviour
* that multiple jbd2_journal_get_write_access() calls to the same
* buffer are perfectly permissible.
+ * We use journal->j_state_lock here to serialize processing of
+ * t_reserved_list with eviction of buffers from journal_unmap_buffer().
*/
while (commit_transaction->t_reserved_list) {
jh = commit_transaction->t_reserved_list;
@@ -540,16 +494,17 @@ void jbd2_journal_commit_transaction(journal_t *journal)
jbd2_journal_refile_buffer(journal, jh);
}
+ write_unlock(&journal->j_state_lock);
/*
* Now try to drop any written-back buffers from the journal's
* checkpoint lists. We do this *before* commit because it potentially
* frees some memory
*/
spin_lock(&journal->j_list_lock);
- __jbd2_journal_clean_checkpoint_list(journal, false);
+ __jbd2_journal_clean_checkpoint_list(journal, JBD2_SHRINK_BUSY_STOP);
spin_unlock(&journal->j_list_lock);
- jbd_debug(3, "JBD2: commit phase 1\n");
+ jbd2_debug(3, "JBD2: commit phase 1\n");
/*
* Clear revoked flag to reflect there is no revoked buffers
@@ -562,13 +517,13 @@ void jbd2_journal_commit_transaction(journal_t *journal)
*/
jbd2_journal_switch_revoke_table(journal);
+ write_lock(&journal->j_state_lock);
/*
* Reserved credits cannot be claimed anymore, free them
*/
atomic_sub(atomic_read(&journal->j_reserved_credits),
&commit_transaction->t_outstanding_credits);
- write_lock(&journal->j_state_lock);
trace_jbd2_commit_flushing(journal, commit_transaction);
stats.run.rs_flushing = jiffies;
stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
@@ -579,10 +534,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
journal->j_running_transaction = NULL;
start_time = ktime_get();
commit_transaction->t_log_start = journal->j_head;
- wake_up(&journal->j_wait_transaction_locked);
+ wake_up_all(&journal->j_wait_transaction_locked);
write_unlock(&journal->j_state_lock);
- jbd_debug(3, "JBD2: commit phase 2a\n");
+ jbd2_debug(3, "JBD2: commit phase 2a\n");
/*
* Now start flushing things to disk, in the order they appear
@@ -595,7 +550,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
blk_start_plug(&plug);
jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
- jbd_debug(3, "JBD2: commit phase 2b\n");
+ jbd2_debug(3, "JBD2: commit phase 2b\n");
/*
* Way to go: we have now written out all of the data for a
@@ -616,7 +571,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
J_ASSERT(commit_transaction->t_nr_buffers <=
atomic_read(&commit_transaction->t_outstanding_credits));
- err = 0;
bufs = 0;
descriptor = NULL;
while (commit_transaction->t_buffers) {
@@ -651,7 +605,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
if (!descriptor) {
J_ASSERT (bufs == 0);
- jbd_debug(4, "JBD2: get descriptor\n");
+ jbd2_debug(4, "JBD2: get descriptor\n");
descriptor = jbd2_journal_get_descriptor_buffer(
commit_transaction,
@@ -661,7 +615,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
continue;
}
- jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
+ jbd2_debug(4, "JBD2: got buffer %llu (%p)\n",
(unsigned long long)descriptor->b_blocknr,
descriptor->b_data);
tagp = &descriptor->b_data[sizeof(journal_header_t)];
@@ -706,19 +660,15 @@ void jbd2_journal_commit_transaction(journal_t *journal)
*/
set_bit(BH_JWrite, &jh2bh(jh)->b_state);
JBUFFER_TRACE(jh, "ph3: write metadata");
- flags = jbd2_journal_write_metadata_buffer(commit_transaction,
+ escape = jbd2_journal_write_metadata_buffer(commit_transaction,
jh, &wbuf[bufs], blocknr);
- if (flags < 0) {
- jbd2_journal_abort(journal, flags);
- continue;
- }
jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
/* Record the new block's tag in the current descriptor
buffer */
tag_flag = 0;
- if (flags & 1)
+ if (escape)
tag_flag |= JBD2_FLAG_ESCAPE;
if (!first_tag)
tag_flag |= JBD2_FLAG_SAME_UUID;
@@ -746,7 +696,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
commit_transaction->t_buffers == NULL ||
space_left < tag_bytes + 16 + csum_size) {
- jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
+ jbd2_debug(4, "JBD2: Submit %d IOs\n", bufs);
/* Write an end-of-descriptor marker before
submitting the IOs. "tag" still points to
@@ -760,6 +710,7 @@ start_journal_io:
for (i = 0; i < bufs; i++) {
struct buffer_head *bh = wbuf[i];
+
/*
* Compute checksum.
*/
@@ -772,7 +723,8 @@ start_journal_io:
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync;
- submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
+ submit_bh(REQ_OP_WRITE | JBD2_JOURNAL_REQ_FLAGS,
+ bh);
}
cond_resched();
@@ -786,10 +738,8 @@ start_journal_io:
err = journal_finish_inode_data_buffers(journal, commit_transaction);
if (err) {
printk(KERN_WARNING
- "JBD2: Detected IO errors while flushing file data "
- "on %s\n", journal->j_devname);
- if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
- jbd2_journal_abort(journal, err);
+ "JBD2: Detected IO errors %d while flushing file data on %s\n",
+ err, journal->j_devname);
err = 0;
}
@@ -810,19 +760,19 @@ start_journal_io:
if (first_block < journal->j_tail)
freed += journal->j_last - journal->j_first;
/* Update tail only if we free significant amount of space */
- if (freed < jbd2_journal_get_max_txn_bufs(journal))
+ if (freed < journal->j_max_transaction_buffers)
update_tail = 0;
}
J_ASSERT(commit_transaction->t_state == T_COMMIT);
commit_transaction->t_state = T_COMMIT_DFLUSH;
write_unlock(&journal->j_state_lock);
- /*
+ /*
* If the journal is not located on the file system device,
* then we must flush the file system device before we issue
- * the commit record
+ * the commit record and update the journal tail sequence.
*/
- if (commit_transaction->t_need_data_flush &&
+ if ((commit_transaction->t_need_data_flush || update_tail) &&
(journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(journal->j_fs_dev);
@@ -848,7 +798,7 @@ start_journal_io:
so we incur less scheduling load.
*/
- jbd_debug(3, "JBD2: commit phase 3\n");
+ jbd2_debug(3, "JBD2: commit phase 3\n");
while (!list_empty(&io_bufs)) {
struct buffer_head *bh = list_entry(io_bufs.prev,
@@ -891,7 +841,7 @@ start_journal_io:
J_ASSERT (commit_transaction->t_shadow_list == NULL);
- jbd_debug(3, "JBD2: commit phase 4\n");
+ jbd2_debug(3, "JBD2: commit phase 4\n");
/* Here we wait for the revoke record and descriptor record buffers */
while (!list_empty(&log_bufs)) {
@@ -915,7 +865,7 @@ start_journal_io:
if (err)
jbd2_journal_abort(journal, err);
- jbd_debug(3, "JBD2: commit phase 5\n");
+ jbd2_debug(3, "JBD2: commit phase 5\n");
write_lock(&journal->j_state_lock);
J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
commit_transaction->t_state = T_COMMIT_JFLUSH;
@@ -954,7 +904,7 @@ start_journal_io:
transaction can be removed from any checkpoint list it was on
before. */
- jbd_debug(3, "JBD2: commit phase 6\n");
+ jbd2_debug(3, "JBD2: commit phase 6\n");
J_ASSERT(list_empty(&commit_transaction->t_inode_list));
J_ASSERT(commit_transaction->t_buffers == NULL);
@@ -1052,7 +1002,7 @@ restart_loop:
* already detached from the mapping and buffers cannot
* get reused.
*/
- mapping = READ_ONCE(bh->b_page->mapping);
+ mapping = READ_ONCE(bh->b_folio->mapping);
if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
clear_buffer_mapped(bh);
clear_buffer_new(bh);
@@ -1131,7 +1081,7 @@ restart_loop:
/* Done with this transaction! */
- jbd_debug(3, "JBD2: commit phase 7\n");
+ jbd2_debug(3, "JBD2: commit phase 7\n");
J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
@@ -1151,7 +1101,7 @@ restart_loop:
commit_transaction->t_state = T_COMMIT_CALLBACK;
J_ASSERT(commit_transaction == journal->j_committing_transaction);
- journal->j_commit_sequence = commit_transaction->t_tid;
+ WRITE_ONCE(journal->j_commit_sequence, commit_transaction->t_tid);
journal->j_committing_transaction = NULL;
commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
@@ -1170,10 +1120,10 @@ restart_loop:
if (journal->j_commit_callback)
journal->j_commit_callback(journal, commit_transaction);
if (journal->j_fc_cleanup_callback)
- journal->j_fc_cleanup_callback(journal, 1);
+ journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid);
trace_jbd2_end_commit(journal, commit_transaction);
- jbd_debug(1, "JBD2: commit %d complete, head %d\n",
+ jbd2_debug(1, "JBD2: commit %d complete, head %d\n",
journal->j_commit_sequence, journal->j_tail_sequence);
write_lock(&journal->j_state_lock);
@@ -1182,8 +1132,7 @@ restart_loop:
spin_lock(&journal->j_list_lock);
commit_transaction->t_state = T_FINISHED;
/* Check if the transaction can be dropped now that we are finished */
- if (commit_transaction->t_checkpoint_list == NULL &&
- commit_transaction->t_checkpoint_io_list == NULL) {
+ if (commit_transaction->t_checkpoint_list == NULL) {
__jbd2_journal_drop_transaction(journal, commit_transaction);
jbd2_journal_free_transaction(commit_transaction);
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 35302bc192eb..c973162d5b31 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -49,8 +49,7 @@
#include <asm/page.h>
#ifdef CONFIG_JBD2_DEBUG
-ushort jbd2_journal_enable_debug __read_mostly;
-EXPORT_SYMBOL(jbd2_journal_enable_debug);
+static ushort jbd2_journal_enable_debug __read_mostly;
module_param_named(jbd2_debug, jbd2_journal_enable_debug, ushort, 0644);
MODULE_PARM_DESC(jbd2_debug, "Debugging level for jbd2");
@@ -81,17 +80,15 @@ EXPORT_SYMBOL(jbd2_journal_errno);
EXPORT_SYMBOL(jbd2_journal_ack_err);
EXPORT_SYMBOL(jbd2_journal_clear_err);
EXPORT_SYMBOL(jbd2_log_wait_commit);
-EXPORT_SYMBOL(jbd2_log_start_commit);
EXPORT_SYMBOL(jbd2_journal_start_commit);
EXPORT_SYMBOL(jbd2_journal_force_commit_nested);
EXPORT_SYMBOL(jbd2_journal_wipe);
-EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
-EXPORT_SYMBOL(jbd2_journal_invalidatepage);
+EXPORT_SYMBOL(jbd2_journal_blocks_per_folio);
+EXPORT_SYMBOL(jbd2_journal_invalidate_folio);
EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
EXPORT_SYMBOL(jbd2_journal_force_commit);
EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);
EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait);
-EXPORT_SYMBOL(jbd2_journal_submit_inode_data_buffers);
EXPORT_SYMBOL(jbd2_journal_finish_inode_data_buffers);
EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
@@ -115,26 +112,17 @@ void __jbd2_debug(int level, const char *file, const char *func,
printk(KERN_DEBUG "%s: (%s, %u): %pV", file, func, line, &vaf);
va_end(args);
}
-EXPORT_SYMBOL(__jbd2_debug);
#endif
/* Checksumming functions */
-static int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
-{
- if (!jbd2_journal_has_csum_v2or3_feature(j))
- return 1;
-
- return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
-}
-
-static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
+static __be32 jbd2_superblock_csum(journal_superblock_t *sb)
{
__u32 csum;
__be32 old_csum;
old_csum = sb->s_checksum;
sb->s_checksum = 0;
- csum = jbd2_chksum(j, ~0, (char *)sb, sizeof(journal_superblock_t));
+ csum = jbd2_chksum(~0, (char *)sb, sizeof(journal_superblock_t));
sb->s_checksum = old_csum;
return cpu_to_be32(csum);
@@ -146,7 +134,7 @@ static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
static void commit_timeout(struct timer_list *t)
{
- journal_t *journal = from_timer(journal, t, j_commit_timer);
+ journal_t *journal = timer_container_of(journal, t, j_commit_timer);
wake_up_process(journal->j_task);
}
@@ -203,13 +191,13 @@ loop:
if (journal->j_flags & JBD2_UNMOUNT)
goto end_loop;
- jbd_debug(1, "commit_sequence=%u, commit_request=%u\n",
+ jbd2_debug(1, "commit_sequence=%u, commit_request=%u\n",
journal->j_commit_sequence, journal->j_commit_request);
if (journal->j_commit_sequence != journal->j_commit_request) {
- jbd_debug(1, "OK, requests differ\n");
+ jbd2_debug(1, "OK, requests differ\n");
write_unlock(&journal->j_state_lock);
- del_timer_sync(&journal->j_commit_timer);
+ timer_delete_sync(&journal->j_commit_timer);
jbd2_journal_commit_transaction(journal);
write_lock(&journal->j_state_lock);
goto loop;
@@ -222,7 +210,7 @@ loop:
* good idea, because that depends on threads that may
* be already stopped.
*/
- jbd_debug(1, "Now suspending kjournald2\n");
+ jbd2_debug(1, "Now suspending kjournald2\n");
write_unlock(&journal->j_state_lock);
try_to_freeze();
write_lock(&journal->j_state_lock);
@@ -232,19 +220,12 @@ loop:
* so we don't sleep
*/
DEFINE_WAIT(wait);
- int should_sleep = 1;
prepare_to_wait(&journal->j_wait_commit, &wait,
TASK_INTERRUPTIBLE);
- if (journal->j_commit_sequence != journal->j_commit_request)
- should_sleep = 0;
transaction = journal->j_running_transaction;
- if (transaction && time_after_eq(jiffies,
- transaction->t_expires))
- should_sleep = 0;
- if (journal->j_flags & JBD2_UNMOUNT)
- should_sleep = 0;
- if (should_sleep) {
+ if (transaction == NULL ||
+ time_before(jiffies, transaction->t_expires)) {
write_unlock(&journal->j_state_lock);
schedule();
write_lock(&journal->j_state_lock);
@@ -252,7 +233,7 @@ loop:
finish_wait(&journal->j_wait_commit, &wait);
}
- jbd_debug(1, "kjournald2 wakes\n");
+ jbd2_debug(1, "kjournald2 wakes\n");
/*
* Were we woken up by a commit wakeup event?
@@ -260,15 +241,15 @@ loop:
transaction = journal->j_running_transaction;
if (transaction && time_after_eq(jiffies, transaction->t_expires)) {
journal->j_commit_request = transaction->t_tid;
- jbd_debug(1, "woke because of timeout\n");
+ jbd2_debug(1, "woke because of timeout\n");
}
goto loop;
end_loop:
- del_timer_sync(&journal->j_commit_timer);
+ timer_delete_sync(&journal->j_commit_timer);
journal->j_task = NULL;
wake_up(&journal->j_wait_done_commit);
- jbd_debug(1, "Journal thread exiting.\n");
+ jbd2_debug(1, "Journal thread exiting.\n");
write_unlock(&journal->j_state_lock);
return 0;
}
@@ -300,6 +281,16 @@ static void journal_kill_thread(journal_t *journal)
write_unlock(&journal->j_state_lock);
}
+static inline bool jbd2_data_needs_escaping(char *data)
+{
+ return *((__be32 *)data) == cpu_to_be32(JBD2_MAGIC_NUMBER);
+}
+
+static inline void jbd2_data_do_escape(char *data)
+{
+ *((unsigned int *)data) = 0;
+}
+
/*
* jbd2_journal_write_metadata_buffer: write a metadata buffer to the journal.
*
@@ -327,12 +318,8 @@ static void journal_kill_thread(journal_t *journal)
*
*
* Return value:
- * <0: Error
- * >=0: Finished OK
- *
- * On success:
- * Bit 0 set == escape performed on the data
- * Bit 1 set == buffer copy-out performed (kfree the data after IO)
+ * =0: Finished OK without escape
+ * =1: Finished OK with escape
*/
int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
@@ -340,12 +327,9 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct buffer_head **bh_out,
sector_t blocknr)
{
- int need_copy_out = 0;
- int done_copy_out = 0;
int do_escape = 0;
- char *mapped_data;
struct buffer_head *new_bh;
- struct page *new_page;
+ struct folio *new_folio;
unsigned int new_offset;
struct buffer_head *bh_in = jh2bh(jh_in);
journal_t *journal = transaction->t_journal;
@@ -367,87 +351,64 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
atomic_set(&new_bh->b_count, 1);
spin_lock(&jh_in->b_state_lock);
-repeat:
/*
* If a new transaction has already done a buffer copy-out, then
* we use that version of the data for the commit.
*/
if (jh_in->b_frozen_data) {
- done_copy_out = 1;
- new_page = virt_to_page(jh_in->b_frozen_data);
- new_offset = offset_in_page(jh_in->b_frozen_data);
+ new_folio = virt_to_folio(jh_in->b_frozen_data);
+ new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data);
+ do_escape = jbd2_data_needs_escaping(jh_in->b_frozen_data);
+ if (do_escape)
+ jbd2_data_do_escape(jh_in->b_frozen_data);
} else {
- new_page = jh2bh(jh_in)->b_page;
- new_offset = offset_in_page(jh2bh(jh_in)->b_data);
- }
+ char *tmp;
+ char *mapped_data;
- mapped_data = kmap_atomic(new_page);
- /*
- * Fire data frozen trigger if data already wasn't frozen. Do this
- * before checking for escaping, as the trigger may modify the magic
- * offset. If a copy-out happens afterwards, it will have the correct
- * data in the buffer.
- */
- if (!done_copy_out)
- jbd2_buffer_frozen_trigger(jh_in, mapped_data + new_offset,
+ new_folio = bh_in->b_folio;
+ new_offset = offset_in_folio(new_folio, bh_in->b_data);
+ mapped_data = kmap_local_folio(new_folio, new_offset);
+ /*
+ * Fire data frozen trigger if data already wasn't frozen. Do
+ * this before checking for escaping, as the trigger may modify
+ * the magic offset. If a copy-out happens afterwards, it will
+ * have the correct data in the buffer.
+ */
+ jbd2_buffer_frozen_trigger(jh_in, mapped_data,
jh_in->b_triggers);
-
- /*
- * Check for escaping
- */
- if (*((__be32 *)(mapped_data + new_offset)) ==
- cpu_to_be32(JBD2_MAGIC_NUMBER)) {
- need_copy_out = 1;
- do_escape = 1;
- }
- kunmap_atomic(mapped_data);
-
- /*
- * Do we need to do a data copy?
- */
- if (need_copy_out && !done_copy_out) {
- char *tmp;
+ do_escape = jbd2_data_needs_escaping(mapped_data);
+ kunmap_local(mapped_data);
+ /*
+ * Do we need to do a data copy?
+ */
+ if (!do_escape)
+ goto escape_done;
spin_unlock(&jh_in->b_state_lock);
- tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
- if (!tmp) {
- brelse(new_bh);
- return -ENOMEM;
- }
+ tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS | __GFP_NOFAIL);
spin_lock(&jh_in->b_state_lock);
if (jh_in->b_frozen_data) {
jbd2_free(tmp, bh_in->b_size);
- goto repeat;
+ goto copy_done;
}
jh_in->b_frozen_data = tmp;
- mapped_data = kmap_atomic(new_page);
- memcpy(tmp, mapped_data + new_offset, bh_in->b_size);
- kunmap_atomic(mapped_data);
-
- new_page = virt_to_page(tmp);
- new_offset = offset_in_page(tmp);
- done_copy_out = 1;
-
+ memcpy_from_folio(tmp, new_folio, new_offset, bh_in->b_size);
/*
* This isn't strictly necessary, as we're using frozen
* data for the escaping, but it keeps consistency with
* b_frozen_data usage.
*/
jh_in->b_frozen_triggers = jh_in->b_triggers;
- }
- /*
- * Did we need to do an escaping? Now we've done all the
- * copying, we can finally do so.
- */
- if (do_escape) {
- mapped_data = kmap_atomic(new_page);
- *((unsigned int *)(mapped_data + new_offset)) = 0;
- kunmap_atomic(mapped_data);
+copy_done:
+ new_folio = virt_to_folio(jh_in->b_frozen_data);
+ new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data);
+ jbd2_data_do_escape(jh_in->b_frozen_data);
}
- set_bh_page(new_bh, new_page, new_offset);
+escape_done:
+ folio_set_bh(new_bh, new_folio, new_offset);
new_bh->b_size = bh_in->b_size;
new_bh->b_bdev = journal->j_dev;
new_bh->b_blocknr = blocknr;
@@ -469,7 +430,7 @@ repeat:
set_buffer_shadow(bh_in);
spin_unlock(&jh_in->b_state_lock);
- return do_escape | (done_copy_out << 1);
+ return do_escape;
}
/*
@@ -481,7 +442,7 @@ repeat:
* Called with j_state_lock locked for writing.
* Returns true if a transaction commit was started.
*/
-int __jbd2_log_start_commit(journal_t *journal, tid_t target)
+static int __jbd2_log_start_commit(journal_t *journal, tid_t target)
{
/* Return if the txn has already requested to be committed */
if (journal->j_commit_request == target)
@@ -500,7 +461,7 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t target)
*/
journal->j_commit_request = target;
- jbd_debug(1, "JBD2: requesting commit %u/%u\n",
+ jbd2_debug(1, "JBD2: requesting commit %u/%u\n",
journal->j_commit_request,
journal->j_commit_sequence);
journal->j_running_transaction->t_requested = jiffies;
@@ -642,7 +603,7 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
{
int ret = 0;
- transaction_t *commit_trans;
+ transaction_t *commit_trans, *running_trans;
if (!(journal->j_flags & JBD2_BARRIER))
return 0;
@@ -652,6 +613,16 @@ int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
goto out;
commit_trans = journal->j_committing_transaction;
if (!commit_trans || commit_trans->t_tid != tid) {
+ running_trans = journal->j_running_transaction;
+ /*
+ * The query transaction hasn't started committing,
+ * it must still be running.
+ */
+ if (WARN_ON_ONCE(!running_trans ||
+ running_trans->t_tid != tid))
+ goto out;
+
+ running_trans->t_need_data_flush = 1;
ret = 1;
goto out;
}
@@ -705,7 +676,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
}
#endif
while (tid_gt(tid, journal->j_commit_sequence)) {
- jbd_debug(1, "JBD2: want %u, j_commit_sequence=%u\n",
+ jbd2_debug(1, "JBD2: want %u, j_commit_sequence=%u\n",
tid, journal->j_commit_sequence);
read_unlock(&journal->j_state_lock);
wake_up(&journal->j_wait_commit);
@@ -739,7 +710,7 @@ int jbd2_fc_begin_commit(journal_t *journal, tid_t tid)
return -EINVAL;
write_lock(&journal->j_state_lock);
- if (tid <= journal->j_commit_sequence) {
+ if (tid_geq(journal->j_commit_sequence, tid)) {
write_unlock(&journal->j_state_lock);
return -EALREADY;
}
@@ -769,7 +740,7 @@ EXPORT_SYMBOL(jbd2_fc_begin_commit);
static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback)
{
if (journal->j_fc_cleanup_callback)
- journal->j_fc_cleanup_callback(journal, 0);
+ journal->j_fc_cleanup_callback(journal, 0, tid);
write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
if (fallback)
@@ -802,17 +773,7 @@ EXPORT_SYMBOL(jbd2_fc_end_commit_fallback);
/* Return 1 when transaction with given tid has already committed. */
int jbd2_transaction_committed(journal_t *journal, tid_t tid)
{
- int ret = 1;
-
- read_lock(&journal->j_state_lock);
- if (journal->j_running_transaction &&
- journal->j_running_transaction->t_tid == tid)
- ret = 0;
- if (journal->j_committing_transaction &&
- journal->j_committing_transaction->t_tid == tid)
- ret = 0;
- read_unlock(&journal->j_state_lock);
- return ret;
+ return tid_geq(READ_ONCE(journal->j_commit_sequence), tid);
}
EXPORT_SYMBOL(jbd2_transaction_committed);
@@ -878,17 +839,12 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out)
*bh_out = NULL;
- if (journal->j_fc_off + journal->j_fc_first < journal->j_fc_last) {
- fc_off = journal->j_fc_off;
- blocknr = journal->j_fc_first + fc_off;
- journal->j_fc_off++;
- } else {
- ret = -EINVAL;
- }
-
- if (ret)
- return ret;
+ if (journal->j_fc_off + journal->j_fc_first >= journal->j_fc_last)
+ return -EINVAL;
+ fc_off = journal->j_fc_off;
+ blocknr = journal->j_fc_first + fc_off;
+ journal->j_fc_off++;
ret = jbd2_journal_bmap(journal, blocknr, &pblock);
if (ret)
return ret;
@@ -897,7 +853,6 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out)
if (!bh)
return -ENOMEM;
-
journal->j_fc_wbuf[fc_off] = bh;
*bh_out = bh;
@@ -924,17 +879,23 @@ int jbd2_fc_wait_bufs(journal_t *journal, int num_blks)
for (i = j_fc_off - 1; i >= j_fc_off - num_blks; i--) {
bh = journal->j_fc_wbuf[i];
wait_on_buffer(bh);
+ /*
+ * Update j_fc_off so jbd2_fc_release_bufs can release remain
+ * buffer head.
+ */
+ if (unlikely(!buffer_uptodate(bh))) {
+ journal->j_fc_off = i + 1;
+ return -EIO;
+ }
put_bh(bh);
journal->j_fc_wbuf[i] = NULL;
- if (unlikely(!buffer_uptodate(bh)))
- return -EIO;
}
return 0;
}
EXPORT_SYMBOL(jbd2_fc_wait_bufs);
-int jbd2_fc_release_bufs(journal_t *journal)
+void jbd2_fc_release_bufs(journal_t *journal)
{
struct buffer_head *bh;
int i, j_fc_off;
@@ -948,8 +909,6 @@ int jbd2_fc_release_bufs(journal_t *journal)
put_bh(bh);
journal->j_fc_wbuf[i] = NULL;
}
-
- return 0;
}
EXPORT_SYMBOL(jbd2_fc_release_bufs);
@@ -965,18 +924,21 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
{
int err = 0;
unsigned long long ret;
- sector_t block = 0;
+ sector_t block = blocknr;
- if (journal->j_inode) {
- block = blocknr;
+ if (journal->j_bmap) {
+ err = journal->j_bmap(journal, &block);
+ if (err == 0)
+ *retp = block;
+ } else if (journal->j_inode) {
ret = bmap(journal->j_inode, &block);
if (ret || !block) {
printk(KERN_ALERT "%s: journal block not found "
"at offset %lu on %s\n",
__func__, blocknr, journal->j_devname);
+ jbd2_journal_abort(journal, ret ? ret : -EFSCORRUPTED);
err = -EIO;
- jbd2_journal_abort(journal, err);
} else {
*retp = block;
}
@@ -993,7 +955,7 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
* descriptor blocks we do need to generate bona fide buffers.
*
* After the caller of jbd2_journal_get_descriptor_buffer() has finished modifying
- * the buffer's contents they really should run flush_dcache_page(bh->b_page).
+ * the buffer's contents they really should run flush_dcache_folio(bh->b_folio).
* But we don't bother doing that, so there will be coherency problems with
* mmaps of blockdevs which hold live JBD-controlled filesystems.
*/
@@ -1038,7 +1000,7 @@ void jbd2_descriptor_block_csum_set(journal_t *j, struct buffer_head *bh)
tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
sizeof(struct jbd2_journal_block_tail));
tail->t_checksum = 0;
- csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
+ csum = jbd2_chksum(j->j_csum_seed, bh->b_data, j->j_blocksize);
tail->t_checksum = cpu_to_be32(csum);
}
@@ -1104,8 +1066,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
* space and if we lose sb update during power failure we'd replay
* old transaction with possibly newly overwritten data.
*/
- ret = jbd2_journal_update_sb_log_tail(journal, tid, block,
- REQ_SYNC | REQ_FUA);
+ ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_FUA);
if (ret)
goto out;
@@ -1115,7 +1076,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
freed += journal->j_last - journal->j_first;
trace_jbd2_update_log_tail(journal, tid, block, freed);
- jbd_debug(1,
+ jbd2_debug(1,
"Cleaning journal tail from %u to %u (offset %lu), "
"freeing %lu\n",
journal->j_tail_sequence, tid, block, freed);
@@ -1210,7 +1171,7 @@ static const struct seq_operations jbd2_seq_info_ops = {
static int jbd2_seq_info_open(struct inode *inode, struct file *file)
{
- journal_t *journal = PDE_DATA(inode);
+ journal_t *journal = pde_data(inode);
struct jbd2_stats_proc_session *s;
int rc, size;
@@ -1285,6 +1246,8 @@ static int jbd2_min_tag_size(void)
/**
* jbd2_journal_shrink_scan()
+ * @shrink: shrinker to work on
+ * @sc: reclaim request to process
*
* Scan the checkpointed buffer on the checkpoint list and release the
* journal_head.
@@ -1292,7 +1255,7 @@ static int jbd2_min_tag_size(void)
static unsigned long jbd2_journal_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- journal_t *journal = container_of(shrink, journal_t, j_shrinker);
+ journal_t *journal = shrink->private_data;
unsigned long nr_to_scan = sc->nr_to_scan;
unsigned long nr_shrunk;
unsigned long count;
@@ -1310,13 +1273,15 @@ static unsigned long jbd2_journal_shrink_scan(struct shrinker *shrink,
/**
* jbd2_journal_shrink_count()
+ * @shrink: shrinker to work on
+ * @sc: reclaim request to process
*
* Count the number of checkpoint buffers on the checkpoint list.
*/
static unsigned long jbd2_journal_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
- journal_t *journal = container_of(shrink, journal_t, j_shrinker);
+ journal_t *journal = shrink->private_data;
unsigned long count;
count = percpu_counter_read_positive(&journal->j_checkpoint_jh_count);
@@ -1326,27 +1291,255 @@ static unsigned long jbd2_journal_shrink_count(struct shrinker *shrink,
}
/*
+ * If the journal init or create aborts, we need to mark the journal
+ * superblock as being NULL to prevent the journal destroy from writing
+ * back a bogus superblock.
+ */
+static void journal_fail_superblock(journal_t *journal)
+{
+ struct buffer_head *bh = journal->j_sb_buffer;
+ brelse(bh);
+ journal->j_sb_buffer = NULL;
+}
+
+/*
+ * Check the superblock for a given journal, performing initial
+ * validation of the format.
+ */
+static int journal_check_superblock(journal_t *journal)
+{
+ journal_superblock_t *sb = journal->j_superblock;
+ int num_fc_blks;
+ int err = -EINVAL;
+
+ if (sb->s_header.h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER) ||
+ sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
+ printk(KERN_WARNING "JBD2: no valid journal superblock found\n");
+ return err;
+ }
+
+ if (be32_to_cpu(sb->s_header.h_blocktype) != JBD2_SUPERBLOCK_V1 &&
+ be32_to_cpu(sb->s_header.h_blocktype) != JBD2_SUPERBLOCK_V2) {
+ printk(KERN_WARNING "JBD2: unrecognised superblock format ID\n");
+ return err;
+ }
+
+ if (be32_to_cpu(sb->s_maxlen) > journal->j_total_len) {
+ printk(KERN_WARNING "JBD2: journal file too short\n");
+ return err;
+ }
+
+ if (be32_to_cpu(sb->s_first) == 0 ||
+ be32_to_cpu(sb->s_first) >= journal->j_total_len) {
+ printk(KERN_WARNING
+ "JBD2: Invalid start block of journal: %u\n",
+ be32_to_cpu(sb->s_first));
+ return err;
+ }
+
+ /*
+ * If this is a V2 superblock, then we have to check the
+ * features flags on it.
+ */
+ if (!jbd2_format_support_feature(journal))
+ return 0;
+
+ if ((sb->s_feature_ro_compat &
+ ~cpu_to_be32(JBD2_KNOWN_ROCOMPAT_FEATURES)) ||
+ (sb->s_feature_incompat &
+ ~cpu_to_be32(JBD2_KNOWN_INCOMPAT_FEATURES))) {
+ printk(KERN_WARNING "JBD2: Unrecognised features on journal\n");
+ return err;
+ }
+
+ num_fc_blks = jbd2_has_feature_fast_commit(journal) ?
+ jbd2_journal_get_num_fc_blks(sb) : 0;
+ if (be32_to_cpu(sb->s_maxlen) < JBD2_MIN_JOURNAL_BLOCKS ||
+ be32_to_cpu(sb->s_maxlen) - JBD2_MIN_JOURNAL_BLOCKS < num_fc_blks) {
+ printk(KERN_ERR "JBD2: journal file too short %u,%d\n",
+ be32_to_cpu(sb->s_maxlen), num_fc_blks);
+ return err;
+ }
+
+ if (jbd2_has_feature_csum2(journal) &&
+ jbd2_has_feature_csum3(journal)) {
+ /* Can't have checksum v2 and v3 at the same time! */
+ printk(KERN_ERR "JBD2: Can't enable checksumming v2 and v3 "
+ "at the same time!\n");
+ return err;
+ }
+
+ if (jbd2_journal_has_csum_v2or3(journal) &&
+ jbd2_has_feature_checksum(journal)) {
+ /* Can't have checksum v1 and v2 on at the same time! */
+ printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2/3 "
+ "at the same time!\n");
+ return err;
+ }
+
+ if (jbd2_journal_has_csum_v2or3(journal)) {
+ if (sb->s_checksum_type != JBD2_CRC32C_CHKSUM) {
+ printk(KERN_ERR "JBD2: Unknown checksum type\n");
+ return err;
+ }
+
+ /* Check superblock checksum */
+ if (sb->s_checksum != jbd2_superblock_csum(sb)) {
+ printk(KERN_ERR "JBD2: journal checksum error\n");
+ err = -EFSBADCRC;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int journal_revoke_records_per_block(journal_t *journal)
+{
+ int record_size;
+ int space = journal->j_blocksize - sizeof(jbd2_journal_revoke_header_t);
+
+ if (jbd2_has_feature_64bit(journal))
+ record_size = 8;
+ else
+ record_size = 4;
+
+ if (jbd2_journal_has_csum_v2or3(journal))
+ space -= sizeof(struct jbd2_journal_block_tail);
+ return space / record_size;
+}
+
+static int jbd2_journal_get_max_txn_bufs(journal_t *journal)
+{
+ return (journal->j_total_len - journal->j_fc_wbufsize) / 3;
+}
+
+/*
+ * Base amount of descriptor blocks we reserve for each transaction.
+ */
+static int jbd2_descriptor_blocks_per_trans(journal_t *journal)
+{
+ int tag_space = journal->j_blocksize - sizeof(journal_header_t);
+ int tags_per_block;
+
+ /* Subtract UUID */
+ tag_space -= 16;
+ if (jbd2_journal_has_csum_v2or3(journal))
+ tag_space -= sizeof(struct jbd2_journal_block_tail);
+ /* Commit code leaves a slack space of 16 bytes at the end of block */
+ tags_per_block = (tag_space - 16) / journal_tag_bytes(journal);
+ /*
+ * Revoke descriptors are accounted separately so we need to reserve
+ * space for commit block and normal transaction descriptor blocks.
+ */
+ return 1 + DIV_ROUND_UP(jbd2_journal_get_max_txn_bufs(journal),
+ tags_per_block);
+}
+
+/*
+ * Initialize number of blocks each transaction reserves for its bookkeeping
+ * and maximum number of blocks a transaction can use. This needs to be called
+ * after the journal size and the fastcommit area size are initialized.
+ */
+static void jbd2_journal_init_transaction_limits(journal_t *journal)
+{
+ journal->j_revoke_records_per_block =
+ journal_revoke_records_per_block(journal);
+ journal->j_transaction_overhead_buffers =
+ jbd2_descriptor_blocks_per_trans(journal);
+ journal->j_max_transaction_buffers =
+ jbd2_journal_get_max_txn_bufs(journal);
+}
+
+/*
+ * Load the on-disk journal superblock and read the key fields into the
+ * journal_t.
+ */
+static int journal_load_superblock(journal_t *journal)
+{
+ int err;
+ struct buffer_head *bh;
+ journal_superblock_t *sb;
+
+ bh = getblk_unmovable(journal->j_dev, journal->j_blk_offset,
+ journal->j_blocksize);
+ if (bh)
+ err = bh_read(bh, 0);
+ if (!bh || err < 0) {
+ pr_err("%s: Cannot read journal superblock\n", __func__);
+ brelse(bh);
+ return -EIO;
+ }
+
+ journal->j_sb_buffer = bh;
+ sb = (journal_superblock_t *)bh->b_data;
+ journal->j_superblock = sb;
+ err = journal_check_superblock(journal);
+ if (err) {
+ journal_fail_superblock(journal);
+ return err;
+ }
+
+ journal->j_tail_sequence = be32_to_cpu(sb->s_sequence);
+ journal->j_tail = be32_to_cpu(sb->s_start);
+ journal->j_first = be32_to_cpu(sb->s_first);
+ journal->j_errno = be32_to_cpu(sb->s_errno);
+ journal->j_last = be32_to_cpu(sb->s_maxlen);
+
+ if (be32_to_cpu(sb->s_maxlen) < journal->j_total_len)
+ journal->j_total_len = be32_to_cpu(sb->s_maxlen);
+ /* Precompute checksum seed for all metadata */
+ if (jbd2_journal_has_csum_v2or3(journal))
+ journal->j_csum_seed = jbd2_chksum(~0, sb->s_uuid,
+ sizeof(sb->s_uuid));
+ /* After journal features are set, we can compute transaction limits */
+ jbd2_journal_init_transaction_limits(journal);
+
+ if (jbd2_has_feature_fast_commit(journal)) {
+ journal->j_fc_last = be32_to_cpu(sb->s_maxlen);
+ journal->j_last = journal->j_fc_last -
+ jbd2_journal_get_num_fc_blks(sb);
+ journal->j_fc_first = journal->j_last + 1;
+ journal->j_fc_off = 0;
+ }
+
+ return 0;
+}
+
+
+/*
* Management for journal control blocks: functions to create and
* destroy journal_t structures, and to initialise and read existing
* journal blocks from disk. */
-/* First: create and setup a journal_t object in memory. We initialise
- * very few fields yet: that has to wait until we have created the
- * journal structures from from scratch, or loaded them from disk. */
+/* The journal_init_common() function creates and fills a journal_t object
+ * in memory. It calls journal_load_superblock() to load the on-disk journal
+ * superblock and initialize the journal_t object.
+ */
static journal_t *journal_init_common(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int blocksize)
{
- static struct lock_class_key jbd2_trans_commit_key;
journal_t *journal;
int err;
- struct buffer_head *bh;
int n;
journal = kzalloc(sizeof(*journal), GFP_KERNEL);
if (!journal)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+
+ lockdep_register_key(&journal->jbd2_trans_commit_key);
+ journal->j_blocksize = blocksize;
+ journal->j_dev = bdev;
+ journal->j_fs_dev = fs_dev;
+ journal->j_blk_offset = start;
+ journal->j_total_len = len;
+ jbd2_init_fs_dev_write_error(journal);
+
+ err = journal_load_superblock(journal);
+ if (err)
+ goto err_cleanup;
init_waitqueue_head(&journal->j_wait_transaction_locked);
init_waitqueue_head(&journal->j_wait_done_commit);
@@ -1359,12 +1552,15 @@ static journal_t *journal_init_common(struct block_device *bdev,
mutex_init(&journal->j_checkpoint_mutex);
spin_lock_init(&journal->j_revoke_lock);
spin_lock_init(&journal->j_list_lock);
+ spin_lock_init(&journal->j_history_lock);
rwlock_init(&journal->j_state_lock);
journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE);
journal->j_min_batch_time = 0;
journal->j_max_batch_time = 15000; /* 15ms */
atomic_set(&journal->j_reserved_credits, 0);
+ lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle",
+ &journal->jbd2_trans_commit_key, 0);
/* The journal is marked for error until we succeed with recovery! */
journal->j_flags = JBD2_ABORT;
@@ -1374,18 +1570,11 @@ static journal_t *journal_init_common(struct block_device *bdev,
if (err)
goto err_cleanup;
- spin_lock_init(&journal->j_history_lock);
-
- lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle",
- &jbd2_trans_commit_key, 0);
-
- /* journal descriptor can store up to n blocks -bzzz */
- journal->j_blocksize = blocksize;
- journal->j_dev = bdev;
- journal->j_fs_dev = fs_dev;
- journal->j_blk_offset = start;
- journal->j_total_len = len;
- /* We need enough buffers to write out full descriptor block. */
+ /*
+ * journal descriptor can store up to n blocks, we need enough
+ * buffers to write out full descriptor block.
+ */
+ err = -ENOMEM;
n = journal->j_blocksize / jbd2_min_tag_size();
journal->j_wbufsize = n;
journal->j_fc_wbuf = NULL;
@@ -1394,36 +1583,37 @@ static journal_t *journal_init_common(struct block_device *bdev,
if (!journal->j_wbuf)
goto err_cleanup;
- bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
- if (!bh) {
- pr_err("%s: Cannot get buffer for journal superblock\n",
- __func__);
+ err = percpu_counter_init(&journal->j_checkpoint_jh_count, 0,
+ GFP_KERNEL);
+ if (err)
goto err_cleanup;
- }
- journal->j_sb_buffer = bh;
- journal->j_superblock = (journal_superblock_t *)bh->b_data;
journal->j_shrink_transaction = NULL;
- journal->j_shrinker.scan_objects = jbd2_journal_shrink_scan;
- journal->j_shrinker.count_objects = jbd2_journal_shrink_count;
- journal->j_shrinker.seeks = DEFAULT_SEEKS;
- journal->j_shrinker.batch = journal->j_max_transaction_buffers;
- if (percpu_counter_init(&journal->j_checkpoint_jh_count, 0, GFP_KERNEL))
- goto err_cleanup;
-
- if (register_shrinker(&journal->j_shrinker)) {
- percpu_counter_destroy(&journal->j_checkpoint_jh_count);
+ journal->j_shrinker = shrinker_alloc(0, "jbd2-journal:(%u:%u)",
+ MAJOR(bdev->bd_dev),
+ MINOR(bdev->bd_dev));
+ if (!journal->j_shrinker) {
+ err = -ENOMEM;
goto err_cleanup;
}
+
+ journal->j_shrinker->scan_objects = jbd2_journal_shrink_scan;
+ journal->j_shrinker->count_objects = jbd2_journal_shrink_count;
+ journal->j_shrinker->private_data = journal;
+
+ shrinker_register(journal->j_shrinker);
+
return journal;
err_cleanup:
- brelse(journal->j_sb_buffer);
+ percpu_counter_destroy(&journal->j_checkpoint_jh_count);
kfree(journal->j_wbuf);
jbd2_journal_destroy_revoke(journal);
+ journal_fail_superblock(journal);
+ lockdep_unregister_key(&journal->jbd2_trans_commit_key);
kfree(journal);
- return NULL;
+ return ERR_PTR(err);
}
/* jbd2_journal_init_dev and jbd2_journal_init_inode:
@@ -1456,10 +1646,11 @@ journal_t *jbd2_journal_init_dev(struct block_device *bdev,
journal_t *journal;
journal = journal_init_common(bdev, fs_dev, start, len, blocksize);
- if (!journal)
- return NULL;
+ if (IS_ERR(journal))
+ return ERR_CAST(journal);
- bdevname(journal->j_dev, journal->j_devname);
+ snprintf(journal->j_devname, sizeof(journal->j_devname),
+ "%pg", journal->j_dev);
strreplace(journal->j_devname, '/', '!');
jbd2_stats_proc_init(journal);
@@ -1478,50 +1669,35 @@ journal_t *jbd2_journal_init_inode(struct inode *inode)
{
journal_t *journal;
sector_t blocknr;
- char *p;
int err = 0;
blocknr = 0;
err = bmap(inode, &blocknr);
-
if (err || !blocknr) {
- pr_err("%s: Cannot locate journal superblock\n",
- __func__);
- return NULL;
+ pr_err("%s: Cannot locate journal superblock\n", __func__);
+ return err ? ERR_PTR(err) : ERR_PTR(-EINVAL);
}
- jbd_debug(1, "JBD2: inode %s/%ld, size %lld, bits %d, blksize %ld\n",
+ jbd2_debug(1, "JBD2: inode %s/%ld, size %lld, bits %d, blksize %ld\n",
inode->i_sb->s_id, inode->i_ino, (long long) inode->i_size,
inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
journal = journal_init_common(inode->i_sb->s_bdev, inode->i_sb->s_bdev,
blocknr, inode->i_size >> inode->i_sb->s_blocksize_bits,
inode->i_sb->s_blocksize);
- if (!journal)
- return NULL;
+ if (IS_ERR(journal))
+ return ERR_CAST(journal);
journal->j_inode = inode;
- bdevname(journal->j_dev, journal->j_devname);
- p = strreplace(journal->j_devname, '/', '!');
- sprintf(p, "-%lu", journal->j_inode->i_ino);
+ snprintf(journal->j_devname, sizeof(journal->j_devname),
+ "%pg-%lu", journal->j_dev, journal->j_inode->i_ino);
+ strreplace(journal->j_devname, '/', '!');
jbd2_stats_proc_init(journal);
return journal;
}
/*
- * If the journal init or create aborts, we need to mark the journal
- * superblock as being NULL to prevent the journal destroy from writing
- * back a bogus superblock.
- */
-static void journal_fail_superblock(journal_t *journal)
-{
- struct buffer_head *bh = journal->j_sb_buffer;
- brelse(bh);
- journal->j_sb_buffer = NULL;
-}
-
-/*
* Given a journal_t structure, initialise the various fields for
* startup of a new journaling session. We use this both when creating
* a journal, and after recovering an old journal to reset it for
@@ -1545,16 +1721,27 @@ static int journal_reset(journal_t *journal)
journal->j_first = first;
journal->j_last = last;
- journal->j_head = journal->j_first;
- journal->j_tail = journal->j_first;
+ if (journal->j_head != 0 && journal->j_flags & JBD2_CYCLE_RECORD) {
+ /*
+ * Disable the cycled recording mode if the journal head block
+ * number is not correct.
+ */
+ if (journal->j_head < first || journal->j_head >= last) {
+ printk(KERN_WARNING "JBD2: Incorrect Journal head block %lu, "
+ "disable journal_cycle_record\n",
+ journal->j_head);
+ journal->j_head = journal->j_first;
+ }
+ } else {
+ journal->j_head = journal->j_first;
+ }
+ journal->j_tail = journal->j_head;
journal->j_free = journal->j_last - journal->j_first;
journal->j_tail_sequence = journal->j_transaction_sequence;
journal->j_commit_sequence = journal->j_transaction_sequence - 1;
journal->j_commit_request = journal->j_commit_sequence;
- journal->j_max_transaction_buffers = jbd2_journal_get_max_txn_bufs(journal);
-
/*
* Now that journal recovery is done, turn fast commits off here. This
* way, if fast commit was enabled before the crash but if now FS has
@@ -1569,7 +1756,7 @@ static int journal_reset(journal_t *journal)
* attempting a write to a potential-readonly device.
*/
if (sb->s_start == 0) {
- jbd_debug(1, "JBD2: Skipping superblock update on recovered sb "
+ jbd2_debug(1, "JBD2: Skipping superblock update on recovered sb "
"(start %ld, seq %u, errno %d)\n",
journal->j_tail, journal->j_tail_sequence,
journal->j_errno);
@@ -1585,8 +1772,7 @@ static int journal_reset(journal_t *journal)
*/
jbd2_journal_update_sb_log_tail(journal,
journal->j_tail_sequence,
- journal->j_tail,
- REQ_SYNC | REQ_FUA);
+ journal->j_tail, REQ_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
}
return jbd2_journal_start_thread(journal);
@@ -1596,11 +1782,11 @@ static int journal_reset(journal_t *journal)
* This function expects that the caller will have locked the journal
* buffer head, and will return with it unlocked
*/
-static int jbd2_write_superblock(journal_t *journal, int write_flags)
+static int jbd2_write_superblock(journal_t *journal, blk_opf_t write_flags)
{
struct buffer_head *bh = journal->j_sb_buffer;
journal_superblock_t *sb = journal->j_superblock;
- int ret;
+ int ret = 0;
/* Buffer got discarded which means block device got invalidated */
if (!buffer_mapped(bh)) {
@@ -1608,9 +1794,16 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
return -EIO;
}
- trace_jbd2_write_superblock(journal, write_flags);
+ /*
+ * Always set high priority flags to exempt from block layer's
+ * QOS policies, e.g. writeback throttle.
+ */
+ write_flags |= JBD2_JOURNAL_REQ_FLAGS;
if (!(journal->j_flags & JBD2_BARRIER))
write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
+
+ trace_jbd2_write_superblock(journal, write_flags);
+
if (buffer_write_io_error(bh)) {
/*
* Oh, dear. A previous attempt to write the journal
@@ -1627,10 +1820,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
set_buffer_uptodate(bh);
}
if (jbd2_journal_has_csum_v2or3(journal))
- sb->s_checksum = jbd2_superblock_csum(journal, sb);
+ sb->s_checksum = jbd2_superblock_csum(sb);
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
- ret = submit_bh(REQ_OP_WRITE, write_flags, bh);
+ submit_bh(REQ_OP_WRITE | write_flags, bh);
wait_on_buffer(bh);
if (buffer_write_io_error(bh)) {
clear_buffer_write_io_error(bh);
@@ -1638,9 +1831,8 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
ret = -EIO;
}
if (ret) {
- printk(KERN_ERR "JBD2: Error %d detected when updating "
- "journal superblock for %s.\n", ret,
- journal->j_devname);
+ printk(KERN_ERR "JBD2: I/O error when updating journal superblock for %s.\n",
+ journal->j_devname);
if (!is_journal_aborted(journal))
jbd2_journal_abort(journal, ret);
}
@@ -1653,39 +1845,40 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
* @journal: The journal to update.
* @tail_tid: TID of the new transaction at the tail of the log
* @tail_block: The first block of the transaction at the tail of the log
- * @write_op: With which operation should we write the journal sb
+ * @write_flags: Flags for the journal sb write operation
*
* Update a journal's superblock information about log tail and write it to
* disk, waiting for the IO to complete.
*/
int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
- unsigned long tail_block, int write_op)
+ unsigned long tail_block,
+ blk_opf_t write_flags)
{
journal_superblock_t *sb = journal->j_superblock;
int ret;
if (is_journal_aborted(journal))
return -EIO;
- if (test_bit(JBD2_CHECKPOINT_IO_ERROR, &journal->j_atomic_flags)) {
- jbd2_journal_abort(journal, -EIO);
+ ret = jbd2_check_fs_dev_write_error(journal);
+ if (ret) {
+ jbd2_journal_abort(journal, ret);
return -EIO;
}
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
- jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
+ jbd2_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
tail_block, tail_tid);
lock_buffer(journal->j_sb_buffer);
sb->s_sequence = cpu_to_be32(tail_tid);
sb->s_start = cpu_to_be32(tail_block);
- ret = jbd2_write_superblock(journal, write_op);
+ ret = jbd2_write_superblock(journal, write_flags);
if (ret)
goto out;
/* Log is no longer empty */
write_lock(&journal->j_state_lock);
- WARN_ON(!sb->s_sequence);
journal->j_flags &= ~JBD2_FLUSHED;
write_unlock(&journal->j_state_lock);
@@ -1696,12 +1889,12 @@ out:
/**
* jbd2_mark_journal_empty() - Mark on disk journal as empty.
* @journal: The journal to update.
- * @write_op: With which operation should we write the journal sb
+ * @write_flags: Flags for the journal sb write operation
*
* Update a journal's dynamic superblock fields to show that journal is empty.
* Write updated superblock to disk waiting for IO to complete.
*/
-static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
+static void jbd2_mark_journal_empty(journal_t *journal, blk_opf_t write_flags)
{
journal_superblock_t *sb = journal->j_superblock;
bool had_fast_commit = false;
@@ -1713,11 +1906,12 @@ static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
return;
}
- jbd_debug(1, "JBD2: Marking journal as empty (seq %u)\n",
+ jbd2_debug(1, "JBD2: Marking journal as empty (seq %u)\n",
journal->j_tail_sequence);
sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
sb->s_start = cpu_to_be32(0);
+ sb->s_head = cpu_to_be32(journal->j_head);
if (jbd2_has_feature_fast_commit(journal)) {
/*
* When journal is clean, no need to commit fast commit flag and
@@ -1727,12 +1921,12 @@ static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
had_fast_commit = true;
}
- jbd2_write_superblock(journal, write_op);
+ jbd2_write_superblock(journal, write_flags);
if (had_fast_commit)
jbd2_set_feature_fast_commit(journal);
- /* Log is no longer empty */
+ /* Log is empty */
write_lock(&journal->j_state_lock);
journal->j_flags |= JBD2_FLUSHED;
write_unlock(&journal->j_state_lock);
@@ -1756,7 +1950,6 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags)
unsigned long block, log_offset; /* logical */
unsigned long long phys_block, block_start, block_stop; /* physical */
loff_t byte_start, byte_stop, byte_count;
- struct request_queue *q = bdev_get_queue(journal->j_dev);
/* flags must be set to either discard or zeroout */
if ((flags & ~JBD2_JOURNAL_FLUSH_VALID) || !flags ||
@@ -1764,10 +1957,8 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags)
(flags & JBD2_JOURNAL_FLUSH_ZEROOUT)))
return -EINVAL;
- if (!q)
- return -ENXIO;
-
- if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) && !blk_queue_discard(q))
+ if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) &&
+ !bdev_max_discard_sectors(journal->j_dev))
return -EOPNOTSUPP;
/*
@@ -1783,17 +1974,15 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags)
return err;
}
- if (block_start == ~0ULL) {
- block_start = phys_block;
- block_stop = block_start - 1;
- }
+ if (block_start == ~0ULL)
+ block_stop = block_start = phys_block;
/*
* last block not contiguous with current block,
* process last contiguous region and return to this block on
* next loop
*/
- if (phys_block != block_stop + 1) {
+ if (phys_block != block_stop) {
block--;
} else {
block_stop++;
@@ -1812,17 +2001,16 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags)
*/
byte_start = block_start * journal->j_blocksize;
byte_stop = block_stop * journal->j_blocksize;
- byte_count = (block_stop - block_start + 1) *
- journal->j_blocksize;
+ byte_count = (block_stop - block_start) * journal->j_blocksize;
- truncate_inode_pages_range(journal->j_dev->bd_inode->i_mapping,
- byte_start, byte_stop);
+ truncate_inode_pages_range(journal->j_dev->bd_mapping,
+ byte_start, byte_stop - 1);
if (flags & JBD2_JOURNAL_FLUSH_DISCARD) {
err = blkdev_issue_discard(journal->j_dev,
byte_start >> SECTOR_SHIFT,
byte_count >> SECTOR_SHIFT,
- GFP_NOFS, 0);
+ GFP_NOFS);
} else if (flags & JBD2_JOURNAL_FLUSH_ZEROOUT) {
err = blkdev_issue_zeroout(journal->j_dev,
byte_start >> SECTOR_SHIFT,
@@ -1831,7 +2019,7 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags)
}
if (unlikely(err != 0)) {
- pr_err("JBD2: (error %d) unable to wipe journal at physical blocks %llu - %llu",
+ pr_err("JBD2: (error %d) unable to wipe journal at physical blocks [%llu, %llu)",
err, block_start, block_stop);
return err;
}
@@ -1859,183 +2047,13 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
errcode = journal->j_errno;
if (errcode == -ESHUTDOWN)
errcode = 0;
- jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
+ jbd2_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
sb->s_errno = cpu_to_be32(errcode);
- jbd2_write_superblock(journal, REQ_SYNC | REQ_FUA);
+ jbd2_write_superblock(journal, REQ_FUA);
}
EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
-static int journal_revoke_records_per_block(journal_t *journal)
-{
- int record_size;
- int space = journal->j_blocksize - sizeof(jbd2_journal_revoke_header_t);
-
- if (jbd2_has_feature_64bit(journal))
- record_size = 8;
- else
- record_size = 4;
-
- if (jbd2_journal_has_csum_v2or3(journal))
- space -= sizeof(struct jbd2_journal_block_tail);
- return space / record_size;
-}
-
-/*
- * Read the superblock for a given journal, performing initial
- * validation of the format.
- */
-static int journal_get_superblock(journal_t *journal)
-{
- struct buffer_head *bh;
- journal_superblock_t *sb;
- int err = -EIO;
-
- bh = journal->j_sb_buffer;
-
- J_ASSERT(bh != NULL);
- if (!buffer_uptodate(bh)) {
- ll_rw_block(REQ_OP_READ, 0, 1, &bh);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh)) {
- printk(KERN_ERR
- "JBD2: IO error reading journal superblock\n");
- goto out;
- }
- }
-
- if (buffer_verified(bh))
- return 0;
-
- sb = journal->j_superblock;
-
- err = -EINVAL;
-
- if (sb->s_header.h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER) ||
- sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
- printk(KERN_WARNING "JBD2: no valid journal superblock found\n");
- goto out;
- }
-
- switch(be32_to_cpu(sb->s_header.h_blocktype)) {
- case JBD2_SUPERBLOCK_V1:
- journal->j_format_version = 1;
- break;
- case JBD2_SUPERBLOCK_V2:
- journal->j_format_version = 2;
- break;
- default:
- printk(KERN_WARNING "JBD2: unrecognised superblock format ID\n");
- goto out;
- }
-
- if (be32_to_cpu(sb->s_maxlen) < journal->j_total_len)
- journal->j_total_len = be32_to_cpu(sb->s_maxlen);
- else if (be32_to_cpu(sb->s_maxlen) > journal->j_total_len) {
- printk(KERN_WARNING "JBD2: journal file too short\n");
- goto out;
- }
-
- if (be32_to_cpu(sb->s_first) == 0 ||
- be32_to_cpu(sb->s_first) >= journal->j_total_len) {
- printk(KERN_WARNING
- "JBD2: Invalid start block of journal: %u\n",
- be32_to_cpu(sb->s_first));
- goto out;
- }
-
- if (jbd2_has_feature_csum2(journal) &&
- jbd2_has_feature_csum3(journal)) {
- /* Can't have checksum v2 and v3 at the same time! */
- printk(KERN_ERR "JBD2: Can't enable checksumming v2 and v3 "
- "at the same time!\n");
- goto out;
- }
-
- if (jbd2_journal_has_csum_v2or3_feature(journal) &&
- jbd2_has_feature_checksum(journal)) {
- /* Can't have checksum v1 and v2 on at the same time! */
- printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2/3 "
- "at the same time!\n");
- goto out;
- }
-
- if (!jbd2_verify_csum_type(journal, sb)) {
- printk(KERN_ERR "JBD2: Unknown checksum type\n");
- goto out;
- }
-
- /* Load the checksum driver */
- if (jbd2_journal_has_csum_v2or3_feature(journal)) {
- journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
- err = PTR_ERR(journal->j_chksum_driver);
- journal->j_chksum_driver = NULL;
- goto out;
- }
- }
-
- if (jbd2_journal_has_csum_v2or3(journal)) {
- /* Check superblock checksum */
- if (sb->s_checksum != jbd2_superblock_csum(journal, sb)) {
- printk(KERN_ERR "JBD2: journal checksum error\n");
- err = -EFSBADCRC;
- goto out;
- }
-
- /* Precompute checksum seed for all metadata */
- journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
- sizeof(sb->s_uuid));
- }
-
- journal->j_revoke_records_per_block =
- journal_revoke_records_per_block(journal);
- set_buffer_verified(bh);
-
- return 0;
-
-out:
- journal_fail_superblock(journal);
- return err;
-}
-
-/*
- * Load the on-disk journal superblock and read the key fields into the
- * journal_t.
- */
-
-static int load_superblock(journal_t *journal)
-{
- int err;
- journal_superblock_t *sb;
- int num_fc_blocks;
-
- err = journal_get_superblock(journal);
- if (err)
- return err;
-
- sb = journal->j_superblock;
-
- journal->j_tail_sequence = be32_to_cpu(sb->s_sequence);
- journal->j_tail = be32_to_cpu(sb->s_start);
- journal->j_first = be32_to_cpu(sb->s_first);
- journal->j_errno = be32_to_cpu(sb->s_errno);
- journal->j_last = be32_to_cpu(sb->s_maxlen);
-
- if (jbd2_has_feature_fast_commit(journal)) {
- journal->j_fc_last = be32_to_cpu(sb->s_maxlen);
- num_fc_blocks = jbd2_journal_get_num_fc_blks(sb);
- if (journal->j_last - num_fc_blocks >= JBD2_MIN_JOURNAL_BLOCKS)
- journal->j_last = journal->j_fc_last - num_fc_blocks;
- journal->j_fc_first = journal->j_last + 1;
- journal->j_fc_off = 0;
- }
-
- return 0;
-}
-
-
/**
* jbd2_journal_load() - Read journal from disk.
* @journal: Journal to act on.
@@ -2047,26 +2065,7 @@ static int load_superblock(journal_t *journal)
int jbd2_journal_load(journal_t *journal)
{
int err;
- journal_superblock_t *sb;
-
- err = load_superblock(journal);
- if (err)
- return err;
-
- sb = journal->j_superblock;
- /* If this is a V2 superblock, then we have to check the
- * features flags on it. */
-
- if (journal->j_format_version >= 2) {
- if ((sb->s_feature_ro_compat &
- ~cpu_to_be32(JBD2_KNOWN_ROCOMPAT_FEATURES)) ||
- (sb->s_feature_incompat &
- ~cpu_to_be32(JBD2_KNOWN_INCOMPAT_FEATURES))) {
- printk(KERN_WARNING
- "JBD2: Unrecognised features on journal\n");
- return -EINVAL;
- }
- }
+ journal_superblock_t *sb = journal->j_superblock;
/*
* Create a slab for this blocksize
@@ -2077,8 +2076,11 @@ int jbd2_journal_load(journal_t *journal)
/* Let the recovery code check whether it needs to recover any
* data from the journal. */
- if (jbd2_journal_recover(journal))
- goto recovery_error;
+ err = jbd2_journal_recover(journal);
+ if (err) {
+ pr_warn("JBD2: journal recovery failed\n");
+ return err;
+ }
if (journal->j_failed_commit) {
printk(KERN_ERR "JBD2: journal transaction %u on %s "
@@ -2095,15 +2097,14 @@ int jbd2_journal_load(journal_t *journal)
/* OK, we've finished with the dynamic journal bits:
* reinitialise the dynamic contents of the superblock in memory
* and reset them on disk. */
- if (journal_reset(journal))
- goto recovery_error;
+ err = journal_reset(journal);
+ if (err) {
+ pr_warn("JBD2: journal reset failed\n");
+ return err;
+ }
journal->j_flags |= JBD2_LOADED;
return 0;
-
-recovery_error:
- printk(KERN_WARNING "JBD2: recovery failed\n");
- return -EIO;
}
/**
@@ -2153,13 +2154,15 @@ int jbd2_journal_destroy(journal_t *journal)
/*
* OK, all checkpoint transactions have been checked, now check the
- * write out io error flag and abort the journal if some buffer failed
- * to write back to the original location, otherwise the filesystem
- * may become inconsistent.
+ * writeback errseq of fs dev and abort the journal if some buffer
+ * failed to write back to the original location, otherwise the
+ * filesystem may become inconsistent.
*/
- if (!is_journal_aborted(journal) &&
- test_bit(JBD2_CHECKPOINT_IO_ERROR, &journal->j_atomic_flags))
- jbd2_journal_abort(journal, -EIO);
+ if (!is_journal_aborted(journal)) {
+ int ret = jbd2_check_fs_dev_write_error(journal);
+ if (ret)
+ jbd2_journal_abort(journal, ret);
+ }
if (journal->j_sb_buffer) {
if (!is_journal_aborted(journal)) {
@@ -2170,27 +2173,25 @@ int jbd2_journal_destroy(journal_t *journal)
++journal->j_transaction_sequence;
write_unlock(&journal->j_state_lock);
- jbd2_mark_journal_empty(journal,
- REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
+ jbd2_mark_journal_empty(journal, REQ_PREFLUSH | REQ_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
} else
err = -EIO;
brelse(journal->j_sb_buffer);
}
- if (journal->j_shrinker.flags & SHRINKER_REGISTERED) {
+ if (journal->j_shrinker) {
percpu_counter_destroy(&journal->j_checkpoint_jh_count);
- unregister_shrinker(&journal->j_shrinker);
+ shrinker_free(journal->j_shrinker);
}
if (journal->j_proc_entry)
jbd2_stats_proc_exit(journal);
iput(journal->j_inode);
if (journal->j_revoke)
jbd2_journal_destroy_revoke(journal);
- if (journal->j_chksum_driver)
- crypto_free_shash(journal->j_chksum_driver);
kfree(journal->j_fc_wbuf);
kfree(journal->j_wbuf);
+ lockdep_unregister_key(&journal->jbd2_trans_commit_key);
kfree(journal);
return err;
@@ -2215,11 +2216,7 @@ int jbd2_journal_check_used_features(journal_t *journal, unsigned long compat,
if (!compat && !ro && !incompat)
return 1;
- /* Load journal superblock if it is not loaded yet. */
- if (journal->j_format_version == 0 &&
- journal_get_superblock(journal) != 0)
- return 0;
- if (journal->j_format_version == 1)
+ if (!jbd2_format_support_feature(journal))
return 0;
sb = journal->j_superblock;
@@ -2249,11 +2246,7 @@ int jbd2_journal_check_available_features(journal_t *journal, unsigned long comp
if (!compat && !ro && !incompat)
return 1;
- /* We can support any known requested features iff the
- * superblock is in version 2. Otherwise we fail to support any
- * extended sb features. */
-
- if (journal->j_format_version != 2)
+ if (!jbd2_format_support_feature(journal))
return 0;
if ((compat & JBD2_KNOWN_COMPAT_FEATURES) == compat &&
@@ -2287,8 +2280,6 @@ jbd2_journal_initialize_fast_commit(journal_t *journal)
journal->j_fc_first = journal->j_last + 1;
journal->j_fc_off = 0;
journal->j_free = journal->j_last - journal->j_first;
- journal->j_max_transaction_buffers =
- jbd2_journal_get_max_txn_bufs(journal);
return 0;
}
@@ -2331,7 +2322,7 @@ int jbd2_journal_set_features(journal_t *journal, unsigned long compat,
compat & JBD2_FEATURE_COMPAT_CHECKSUM)
compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
- jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
+ jbd2_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
compat, ro, incompat);
sb = journal->j_superblock;
@@ -2343,27 +2334,15 @@ int jbd2_journal_set_features(journal_t *journal, unsigned long compat,
}
}
- /* Load the checksum driver if necessary */
- if ((journal->j_chksum_driver == NULL) &&
- INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
- journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
- journal->j_chksum_driver = NULL;
- return 0;
- }
- /* Precompute checksum seed for all metadata */
- journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
- sizeof(sb->s_uuid));
- }
-
lock_buffer(journal->j_sb_buffer);
- /* If enabling v3 checksums, update superblock */
+ /* If enabling v3 checksums, update superblock and precompute seed */
if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
sb->s_feature_compat &=
~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
+ journal->j_csum_seed = jbd2_chksum(~0, sb->s_uuid,
+ sizeof(sb->s_uuid));
}
/* If enabling v1 checksums, downgrade superblock */
@@ -2375,9 +2354,14 @@ int jbd2_journal_set_features(journal_t *journal, unsigned long compat,
sb->s_feature_compat |= cpu_to_be32(compat);
sb->s_feature_ro_compat |= cpu_to_be32(ro);
sb->s_feature_incompat |= cpu_to_be32(incompat);
+ /*
+ * Update the checksum now so that it is valid even for read-only
+ * filesystems where jbd2_write_superblock() doesn't get called.
+ */
+ if (jbd2_journal_has_csum_v2or3(journal))
+ sb->s_checksum = jbd2_superblock_csum(sb);
unlock_buffer(journal->j_sb_buffer);
- journal->j_revoke_records_per_block =
- journal_revoke_records_per_block(journal);
+ jbd2_journal_init_transaction_limits(journal);
return 1;
#undef COMPAT_FEATURE_ON
@@ -2400,16 +2384,23 @@ void jbd2_journal_clear_features(journal_t *journal, unsigned long compat,
{
journal_superblock_t *sb;
- jbd_debug(1, "Clear features 0x%lx/0x%lx/0x%lx\n",
+ jbd2_debug(1, "Clear features 0x%lx/0x%lx/0x%lx\n",
compat, ro, incompat);
sb = journal->j_superblock;
+ lock_buffer(journal->j_sb_buffer);
sb->s_feature_compat &= ~cpu_to_be32(compat);
sb->s_feature_ro_compat &= ~cpu_to_be32(ro);
sb->s_feature_incompat &= ~cpu_to_be32(incompat);
- journal->j_revoke_records_per_block =
- journal_revoke_records_per_block(journal);
+ /*
+ * Update the checksum now so that it is valid even for read-only
+ * filesystems where jbd2_write_superblock() doesn't get called.
+ */
+ if (jbd2_journal_has_csum_v2or3(journal))
+ sb->s_checksum = jbd2_superblock_csum(sb);
+ unlock_buffer(journal->j_sb_buffer);
+ jbd2_journal_init_transaction_limits(journal);
}
EXPORT_SYMBOL(jbd2_journal_clear_features);
@@ -2480,7 +2471,7 @@ int jbd2_journal_flush(journal_t *journal, unsigned int flags)
* the magic code for a fully-recovered superblock. Any future
* commits of data to the journal will restore the current
* s_start value. */
- jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
+ jbd2_mark_journal_empty(journal, REQ_FUA);
if (flags)
err = __jbd2_journal_erase(journal, flags);
@@ -2512,16 +2503,12 @@ out:
int jbd2_journal_wipe(journal_t *journal, int write)
{
- int err = 0;
+ int err;
J_ASSERT (!(journal->j_flags & JBD2_LOADED));
- err = load_superblock(journal);
- if (err)
- return err;
-
if (!journal->j_tail)
- goto no_recovery;
+ return 0;
printk(KERN_WARNING "JBD2: %s recovery information on journal\n",
write ? "Clearing" : "Ignoring");
@@ -2530,11 +2517,10 @@ int jbd2_journal_wipe(journal_t *journal, int write)
if (write) {
/* Lock to make assertions happy... */
mutex_lock_io(&journal->j_checkpoint_mutex);
- jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
+ jbd2_mark_journal_empty(journal, REQ_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
}
- no_recovery:
return err;
}
@@ -2688,9 +2674,10 @@ void jbd2_journal_ack_err(journal_t *journal)
write_unlock(&journal->j_state_lock);
}
-int jbd2_journal_blocks_per_page(struct inode *inode)
+int jbd2_journal_blocks_per_folio(struct inode *inode)
{
- return 1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
+ return 1 << (PAGE_SHIFT + mapping_max_folio_order(inode->i_mapping) -
+ inode->i_sb->s_blocksize_bits);
}
/*
@@ -2857,13 +2844,12 @@ static struct journal_head *journal_alloc_journal_head(void)
#endif
ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS);
if (!ret) {
- jbd_debug(1, "out of memory for journal_head\n");
+ jbd2_debug(1, "out of memory for journal_head\n");
pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
ret = kmem_cache_zalloc(jbd2_journal_head_cache,
GFP_NOFS | __GFP_NOFAIL);
}
- if (ret)
- spin_lock_init(&ret->b_state_lock);
+ spin_lock_init(&ret->b_state_lock);
return ret;
}
@@ -2932,7 +2918,7 @@ repeat:
} else {
J_ASSERT_BH(bh,
(atomic_read(&bh->b_count) > 0) ||
- (bh->b_page && bh->b_page->mapping));
+ (bh->b_folio && bh->b_folio->mapping));
if (!new_jh) {
jbd_unlock_bh_journal_head(bh);
@@ -2970,6 +2956,7 @@ struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh)
jbd_unlock_bh_journal_head(bh);
return jh;
}
+EXPORT_SYMBOL(jbd2_journal_grab_journal_head);
static void __journal_remove_journal_head(struct buffer_head *bh)
{
@@ -3022,6 +3009,7 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
jbd_unlock_bh_journal_head(bh);
}
}
+EXPORT_SYMBOL(jbd2_journal_put_journal_head);
/*
* Initialize jbd inode head
@@ -3186,6 +3174,7 @@ static void __exit journal_exit(void)
jbd2_journal_destroy_caches();
}
+MODULE_DESCRIPTION("Generic filesystem journal-writing module");
MODULE_LICENSE("GPL");
module_init(journal_init);
module_exit(journal_exit);
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 8ca3527189f8..cac8c2cd4a92 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -19,6 +19,7 @@
#include <linux/errno.h>
#include <linux/crc32.h>
#include <linux/blkdev.h>
+#include <linux/string_choices.h>
#endif
/*
@@ -29,6 +30,7 @@ struct recovery_info
{
tid_t start_transaction;
tid_t end_transaction;
+ unsigned long head_block;
int nr_replays;
int nr_revokes;
@@ -37,7 +39,7 @@ struct recovery_info
static int do_one_pass(journal_t *journal,
struct recovery_info *info, enum passtype pass);
-static int scan_revoke_records(journal_t *, struct buffer_head *,
+static int scan_revoke_records(journal_t *, enum passtype, struct buffer_head *,
tid_t, struct recovery_info *);
#ifdef __KERNEL__
@@ -63,9 +65,8 @@ static void journal_brelse_array(struct buffer_head *b[], int n)
*/
#define MAXBUF 8
-static int do_readahead(journal_t *journal, unsigned int start)
+static void do_readahead(journal_t *journal, unsigned int start)
{
- int err;
unsigned int max, nbufs, next;
unsigned long long blocknr;
struct buffer_head *bh;
@@ -83,7 +84,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
nbufs = 0;
for (next = start; next < max; next++) {
- err = jbd2_journal_bmap(journal, next, &blocknr);
+ int err = jbd2_journal_bmap(journal, next, &blocknr);
if (err) {
printk(KERN_ERR "JBD2: bad block at offset %u\n",
@@ -92,15 +93,13 @@ static int do_readahead(journal_t *journal, unsigned int start)
}
bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
- if (!bh) {
- err = -ENOMEM;
+ if (!bh)
goto failed;
- }
if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
bufs[nbufs++] = bh;
if (nbufs == MAXBUF) {
- ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
+ bh_readahead_batch(nbufs, bufs, 0);
journal_brelse_array(bufs, nbufs);
nbufs = 0;
}
@@ -109,13 +108,11 @@ static int do_readahead(journal_t *journal, unsigned int start)
}
if (nbufs)
- ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
- err = 0;
+ bh_readahead_batch(nbufs, bufs, 0);
failed:
if (nbufs)
journal_brelse_array(bufs, nbufs);
- return err;
}
#endif /* __KERNEL__ */
@@ -152,9 +149,14 @@ static int jread(struct buffer_head **bhp, journal_t *journal,
return -ENOMEM;
if (!buffer_uptodate(bh)) {
- /* If this is a brand new buffer, start readahead.
- Otherwise, we assume we are already reading it. */
- if (!buffer_req(bh))
+ /*
+ * If this is a brand new buffer, start readahead.
+ * Otherwise, we assume we are already reading it.
+ */
+ bool need_readahead = !buffer_req(bh);
+
+ bh_read_nowait(bh, 0);
+ if (need_readahead)
do_readahead(journal, offset);
wait_on_buffer(bh);
}
@@ -183,7 +185,7 @@ static int jbd2_descriptor_block_csum_verify(journal_t *j, void *buf)
j->j_blocksize - sizeof(struct jbd2_journal_block_tail));
provided = tail->t_checksum;
tail->t_checksum = 0;
- calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+ calculated = jbd2_chksum(j->j_csum_seed, buf, j->j_blocksize);
tail->t_checksum = provided;
return provided == cpu_to_be32(calculated);
@@ -224,12 +226,8 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
/* Make sure we wrap around the log correctly! */
#define wrap(journal, var) \
do { \
- unsigned long _wrap_last = \
- jbd2_has_feature_fast_commit(journal) ? \
- (journal)->j_fc_last : (journal)->j_last; \
- \
- if (var >= _wrap_last) \
- var -= (_wrap_last - (journal)->j_first); \
+ if (var >= (journal)->j_last) \
+ var -= ((journal)->j_last - (journal)->j_first); \
} while (0)
static int fc_do_one_pass(journal_t *journal,
@@ -245,17 +243,18 @@ static int fc_do_one_pass(journal_t *journal,
return 0;
while (next_fc_block <= journal->j_fc_last) {
- jbd_debug(3, "Fast commit replay: next block %ld\n",
+ jbd2_debug(3, "Fast commit replay: next block %ld\n",
next_fc_block);
err = jread(&bh, journal, next_fc_block);
if (err) {
- jbd_debug(3, "Fast commit replay: read error\n");
+ jbd2_debug(3, "Fast commit replay: read error\n");
break;
}
err = journal->j_fc_replay_callback(journal, bh, pass,
next_fc_block - journal->j_fc_first,
expected_commit_id);
+ brelse(bh);
next_fc_block++;
if (err < 0 || err == JBD2_FC_REPLAY_STOP)
break;
@@ -263,7 +262,7 @@ static int fc_do_one_pass(journal_t *journal,
}
if (err)
- jbd_debug(3, "Fast commit replay failed, err = %d\n", err);
+ jbd2_debug(3, "Fast commit replay failed, err = %d\n", err);
return err;
}
@@ -283,23 +282,24 @@ static int fc_do_one_pass(journal_t *journal,
int jbd2_journal_recover(journal_t *journal)
{
int err, err2;
- journal_superblock_t * sb;
-
struct recovery_info info;
memset(&info, 0, sizeof(info));
- sb = journal->j_superblock;
/*
* The journal superblock's s_start field (the current log head)
* is always zero if, and only if, the journal was cleanly
- * unmounted.
+ * unmounted. We use its in-memory version j_tail here because
+ * jbd2_journal_wipe() could have updated it without updating journal
+ * superblock.
*/
+ if (!journal->j_tail) {
+ journal_superblock_t *sb = journal->j_superblock;
- if (!sb->s_start) {
- jbd_debug(1, "No recovery required, last transaction %d\n",
- be32_to_cpu(sb->s_sequence));
+ jbd2_debug(1, "No recovery required, last transaction %d, head block %u\n",
+ be32_to_cpu(sb->s_sequence), be32_to_cpu(sb->s_head));
journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1;
+ journal->j_head = be32_to_cpu(sb->s_head);
return 0;
}
@@ -309,20 +309,32 @@ int jbd2_journal_recover(journal_t *journal)
if (!err)
err = do_one_pass(journal, &info, PASS_REPLAY);
- jbd_debug(1, "JBD2: recovery, exit status %d, "
+ jbd2_debug(1, "JBD2: recovery, exit status %d, "
"recovered transactions %u to %u\n",
err, info.start_transaction, info.end_transaction);
- jbd_debug(1, "JBD2: Replayed %d and revoked %d/%d blocks\n",
+ jbd2_debug(1, "JBD2: Replayed %d and revoked %d/%d blocks\n",
info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
/* Restart the log at the next transaction ID, thus invalidating
* any existing commit records in the log. */
journal->j_transaction_sequence = ++info.end_transaction;
+ journal->j_head = info.head_block;
+ jbd2_debug(1, "JBD2: last transaction %d, head block %lu\n",
+ journal->j_transaction_sequence, journal->j_head);
jbd2_journal_clear_revoke(journal);
+ /* Free revoke table allocated for replay */
+ if (journal->j_revoke != journal->j_revoke_table[0] &&
+ journal->j_revoke != journal->j_revoke_table[1]) {
+ jbd2_journal_destroy_revoke_table(journal->j_revoke);
+ journal->j_revoke = journal->j_revoke_table[1];
+ }
err2 = sync_blockdev(journal->j_fs_dev);
if (!err)
err = err2;
+ err2 = jbd2_check_fs_dev_write_error(journal);
+ if (!err)
+ err = err2;
/* Make sure all replayed data is on permanent storage */
if (journal->j_flags & JBD2_BARRIER) {
err2 = blkdev_issue_flush(journal->j_fs_dev);
@@ -358,15 +370,17 @@ int jbd2_journal_skip_recovery(journal_t *journal)
if (err) {
printk(KERN_ERR "JBD2: error %d scanning journal\n", err);
++journal->j_transaction_sequence;
+ journal->j_head = journal->j_first;
} else {
#ifdef CONFIG_JBD2_DEBUG
int dropped = info.end_transaction -
be32_to_cpu(journal->j_superblock->s_sequence);
- jbd_debug(1,
+ jbd2_debug(1,
"JBD2: ignoring %d transaction%s from the journal.\n",
- dropped, (dropped == 1) ? "" : "s");
+ dropped, str_plural(dropped));
#endif
journal->j_transaction_sequence = ++info.end_transaction;
+ journal->j_head = info.head_block;
}
journal->j_tail = 0;
@@ -426,12 +440,33 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
h = buf;
provided = h->h_chksum[0];
h->h_chksum[0] = 0;
- calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+ calculated = jbd2_chksum(j->j_csum_seed, buf, j->j_blocksize);
h->h_chksum[0] = provided;
return provided == cpu_to_be32(calculated);
}
+static bool jbd2_commit_block_csum_verify_partial(journal_t *j, void *buf)
+{
+ struct commit_header *h;
+ __be32 provided;
+ __u32 calculated;
+ void *tmpbuf;
+
+ tmpbuf = kzalloc(j->j_blocksize, GFP_KERNEL);
+ if (!tmpbuf)
+ return false;
+
+ memcpy(tmpbuf, buf, sizeof(struct commit_header));
+ h = tmpbuf;
+ provided = h->h_chksum[0];
+ h->h_chksum[0] = 0;
+ calculated = jbd2_chksum(j->j_csum_seed, tmpbuf, j->j_blocksize);
+ kfree(tmpbuf);
+
+ return provided == cpu_to_be32(calculated);
+}
+
static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
journal_block_tag3_t *tag3,
void *buf, __u32 sequence)
@@ -443,8 +478,8 @@ static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
return 1;
seq = cpu_to_be32(sequence);
- csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
- csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
+ csum32 = jbd2_chksum(j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
+ csum32 = jbd2_chksum(csum32, buf, j->j_blocksize);
if (jbd2_has_feature_csum3(j))
return tag3->t_checksum == cpu_to_be32(csum32);
@@ -452,21 +487,116 @@ static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
return tag->t_checksum == cpu_to_be16(csum32);
}
+static __always_inline int jbd2_do_replay(journal_t *journal,
+ struct recovery_info *info,
+ struct buffer_head *bh,
+ unsigned long *next_log_block,
+ unsigned int next_commit_ID)
+{
+ char *tagp;
+ int flags;
+ int ret = 0;
+ int tag_bytes = journal_tag_bytes(journal);
+ int descr_csum_size = 0;
+ unsigned long io_block;
+ journal_block_tag_t tag;
+ struct buffer_head *obh;
+ struct buffer_head *nbh;
+
+ if (jbd2_journal_has_csum_v2or3(journal))
+ descr_csum_size = sizeof(struct jbd2_journal_block_tail);
+
+ tagp = &bh->b_data[sizeof(journal_header_t)];
+ while (tagp - bh->b_data + tag_bytes <=
+ journal->j_blocksize - descr_csum_size) {
+ int err;
+
+ memcpy(&tag, tagp, sizeof(tag));
+ flags = be16_to_cpu(tag.t_flags);
+
+ io_block = (*next_log_block)++;
+ wrap(journal, *next_log_block);
+ err = jread(&obh, journal, io_block);
+ if (err) {
+ /* Recover what we can, but report failure at the end. */
+ ret = err;
+ pr_err("JBD2: IO error %d recovering block %lu in log\n",
+ err, io_block);
+ } else {
+ unsigned long long blocknr;
+
+ J_ASSERT(obh != NULL);
+ blocknr = read_tag_block(journal, &tag);
+
+ /* If the block has been revoked, then we're all done here. */
+ if (jbd2_journal_test_revoke(journal, blocknr,
+ next_commit_ID)) {
+ brelse(obh);
+ ++info->nr_revoke_hits;
+ goto skip_write;
+ }
+
+ /* Look for block corruption */
+ if (!jbd2_block_tag_csum_verify(journal, &tag,
+ (journal_block_tag3_t *)tagp,
+ obh->b_data, next_commit_ID)) {
+ brelse(obh);
+ ret = -EFSBADCRC;
+ pr_err("JBD2: Invalid checksum recovering data block %llu in journal block %lu\n",
+ blocknr, io_block);
+ goto skip_write;
+ }
+
+ /* Find a buffer for the new data being restored */
+ nbh = __getblk(journal->j_fs_dev, blocknr,
+ journal->j_blocksize);
+ if (nbh == NULL) {
+ pr_err("JBD2: Out of memory during recovery.\n");
+ brelse(obh);
+ return -ENOMEM;
+ }
+
+ lock_buffer(nbh);
+ memcpy(nbh->b_data, obh->b_data, journal->j_blocksize);
+ if (flags & JBD2_FLAG_ESCAPE) {
+ *((__be32 *)nbh->b_data) =
+ cpu_to_be32(JBD2_MAGIC_NUMBER);
+ }
+
+ BUFFER_TRACE(nbh, "marking dirty");
+ set_buffer_uptodate(nbh);
+ mark_buffer_dirty(nbh);
+ BUFFER_TRACE(nbh, "marking uptodate");
+ ++info->nr_replays;
+ unlock_buffer(nbh);
+ brelse(obh);
+ brelse(nbh);
+ }
+
+skip_write:
+ tagp += tag_bytes;
+ if (!(flags & JBD2_FLAG_SAME_UUID))
+ tagp += 16;
+
+ if (flags & JBD2_FLAG_LAST_TAG)
+ break;
+ }
+
+ return ret;
+}
+
static int do_one_pass(journal_t *journal,
struct recovery_info *info, enum passtype pass)
{
unsigned int first_commit_ID, next_commit_ID;
- unsigned long next_log_block;
+ unsigned long next_log_block, head_block;
int err, success = 0;
journal_superblock_t * sb;
journal_header_t * tmp;
- struct buffer_head * bh;
+ struct buffer_head *bh = NULL;
unsigned int sequence;
int blocktype;
- int tag_bytes = journal_tag_bytes(journal);
__u32 crc32_sum = ~0; /* Transactional Checksums */
- int descr_csum_size = 0;
- int block_error = 0;
bool need_check_commit_time = false;
__u64 last_trans_commit_time = 0, commit_time;
@@ -479,12 +609,38 @@ static int do_one_pass(journal_t *journal,
sb = journal->j_superblock;
next_commit_ID = be32_to_cpu(sb->s_sequence);
next_log_block = be32_to_cpu(sb->s_start);
+ head_block = next_log_block;
first_commit_ID = next_commit_ID;
if (pass == PASS_SCAN)
info->start_transaction = first_commit_ID;
+ else if (pass == PASS_REVOKE) {
+ /*
+ * Would the default revoke table have too long hash chains
+ * during replay?
+ */
+ if (info->nr_revokes > JOURNAL_REVOKE_DEFAULT_HASH * 16) {
+ unsigned int hash_size;
+
+ /*
+ * Aim for average chain length of 8, limit at 1M
+ * entries to avoid problems with malicious
+ * filesystems.
+ */
+ hash_size = min(roundup_pow_of_two(info->nr_revokes / 8),
+ 1U << 20);
+ journal->j_revoke =
+ jbd2_journal_init_revoke_table(hash_size);
+ if (!journal->j_revoke) {
+ printk(KERN_ERR
+ "JBD2: failed to allocate revoke table for replay with %u entries. "
+ "Journal replay may be slow.\n", hash_size);
+ journal->j_revoke = journal->j_revoke_table[1];
+ }
+ }
+ }
- jbd_debug(1, "Starting recovery pass %d\n", pass);
+ jbd2_debug(1, "Starting recovery pass %d\n", pass);
/*
* Now we walk through the log, transaction by transaction,
@@ -494,12 +650,6 @@ static int do_one_pass(journal_t *journal,
*/
while (1) {
- int flags;
- char * tagp;
- journal_block_tag_t tag;
- struct buffer_head * obh;
- struct buffer_head * nbh;
-
cond_resched();
/* If we already know where to stop the log traversal,
@@ -510,16 +660,16 @@ static int do_one_pass(journal_t *journal,
if (tid_geq(next_commit_ID, info->end_transaction))
break;
- jbd_debug(2, "Scanning for sequence ID %u at %lu/%lu\n",
- next_commit_ID, next_log_block,
- jbd2_has_feature_fast_commit(journal) ?
- journal->j_fc_last : journal->j_last);
+ jbd2_debug(2, "Scanning for sequence ID %u at %lu/%lu\n",
+ next_commit_ID, next_log_block, journal->j_last);
/* Skip over each chunk of the transaction looking
* either the next descriptor block or the final commit
* record. */
- jbd_debug(3, "JBD2: checking block %ld\n", next_log_block);
+ jbd2_debug(3, "JBD2: checking block %ld\n", next_log_block);
+ brelse(bh);
+ bh = NULL;
err = jread(&bh, journal, next_log_block);
if (err)
goto failed;
@@ -535,20 +685,16 @@ static int do_one_pass(journal_t *journal,
tmp = (journal_header_t *)bh->b_data;
- if (tmp->h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER)) {
- brelse(bh);
+ if (tmp->h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER))
break;
- }
blocktype = be32_to_cpu(tmp->h_blocktype);
sequence = be32_to_cpu(tmp->h_sequence);
- jbd_debug(3, "Found magic %d, sequence %d\n",
+ jbd2_debug(3, "Found magic %d, sequence %d\n",
blocktype, sequence);
- if (sequence != next_commit_ID) {
- brelse(bh);
+ if (sequence != next_commit_ID)
break;
- }
/* OK, we have a valid descriptor block which matches
* all of the sequence number checks. What are we going
@@ -557,11 +703,7 @@ static int do_one_pass(journal_t *journal,
switch(blocktype) {
case JBD2_DESCRIPTOR_BLOCK:
/* Verify checksum first */
- if (jbd2_journal_has_csum_v2or3(journal))
- descr_csum_size =
- sizeof(struct jbd2_journal_block_tail);
- if (descr_csum_size > 0 &&
- !jbd2_descriptor_block_csum_verify(journal,
+ if (!jbd2_descriptor_block_csum_verify(journal,
bh->b_data)) {
/*
* PASS_SCAN can see stale blocks due to lazy
@@ -571,11 +713,10 @@ static int do_one_pass(journal_t *journal,
pr_err("JBD2: Invalid checksum recovering block %lu in log\n",
next_log_block);
err = -EFSBADCRC;
- brelse(bh);
goto failed;
}
need_check_commit_time = true;
- jbd_debug(1,
+ jbd2_debug(1,
"invalid descriptor block found in %lu\n",
next_log_block);
}
@@ -587,125 +728,39 @@ static int do_one_pass(journal_t *journal,
if (pass != PASS_REPLAY) {
if (pass == PASS_SCAN &&
jbd2_has_feature_checksum(journal) &&
- !need_check_commit_time &&
!info->end_transaction) {
if (calc_chksums(journal, bh,
&next_log_block,
- &crc32_sum)) {
- put_bh(bh);
+ &crc32_sum))
break;
- }
- put_bh(bh);
continue;
}
next_log_block += count_tags(journal, bh);
wrap(journal, next_log_block);
- put_bh(bh);
continue;
}
- /* A descriptor block: we can now write all of
- * the data blocks. Yay, useful work is finally
- * getting done here! */
-
- tagp = &bh->b_data[sizeof(journal_header_t)];
- while ((tagp - bh->b_data + tag_bytes)
- <= journal->j_blocksize - descr_csum_size) {
- unsigned long io_block;
-
- memcpy(&tag, tagp, sizeof(tag));
- flags = be16_to_cpu(tag.t_flags);
-
- io_block = next_log_block++;
- wrap(journal, next_log_block);
- err = jread(&obh, journal, io_block);
- if (err) {
- /* Recover what we can, but
- * report failure at the end. */
- success = err;
- printk(KERN_ERR
- "JBD2: IO error %d recovering "
- "block %ld in log\n",
- err, io_block);
- } else {
- unsigned long long blocknr;
-
- J_ASSERT(obh != NULL);
- blocknr = read_tag_block(journal,
- &tag);
-
- /* If the block has been
- * revoked, then we're all done
- * here. */
- if (jbd2_journal_test_revoke
- (journal, blocknr,
- next_commit_ID)) {
- brelse(obh);
- ++info->nr_revoke_hits;
- goto skip_write;
- }
-
- /* Look for block corruption */
- if (!jbd2_block_tag_csum_verify(
- journal, &tag, (journal_block_tag3_t *)tagp,
- obh->b_data, be32_to_cpu(tmp->h_sequence))) {
- brelse(obh);
- success = -EFSBADCRC;
- printk(KERN_ERR "JBD2: Invalid "
- "checksum recovering "
- "data block %llu in "
- "log\n", blocknr);
- block_error = 1;
- goto skip_write;
- }
-
- /* Find a buffer for the new
- * data being restored */
- nbh = __getblk(journal->j_fs_dev,
- blocknr,
- journal->j_blocksize);
- if (nbh == NULL) {
- printk(KERN_ERR
- "JBD2: Out of memory "
- "during recovery.\n");
- err = -ENOMEM;
- brelse(bh);
- brelse(obh);
- goto failed;
- }
-
- lock_buffer(nbh);
- memcpy(nbh->b_data, obh->b_data,
- journal->j_blocksize);
- if (flags & JBD2_FLAG_ESCAPE) {
- *((__be32 *)nbh->b_data) =
- cpu_to_be32(JBD2_MAGIC_NUMBER);
- }
-
- BUFFER_TRACE(nbh, "marking dirty");
- set_buffer_uptodate(nbh);
- mark_buffer_dirty(nbh);
- BUFFER_TRACE(nbh, "marking uptodate");
- ++info->nr_replays;
- /* ll_rw_block(WRITE, 1, &nbh); */
- unlock_buffer(nbh);
- brelse(obh);
- brelse(nbh);
- }
-
- skip_write:
- tagp += tag_bytes;
- if (!(flags & JBD2_FLAG_SAME_UUID))
- tagp += 16;
-
- if (flags & JBD2_FLAG_LAST_TAG)
- break;
+ /*
+ * A descriptor block: we can now write all of the
+ * data blocks. Yay, useful work is finally getting
+ * done here!
+ */
+ err = jbd2_do_replay(journal, info, bh, &next_log_block,
+ next_commit_ID);
+ if (err) {
+ if (err == -ENOMEM)
+ goto failed;
+ success = err;
}
- brelse(bh);
continue;
case JBD2_COMMIT_BLOCK:
+ if (pass != PASS_SCAN) {
+ next_commit_ID++;
+ continue;
+ }
+
/* How to differentiate between interrupted commit
* and journal corruption ?
*
@@ -750,7 +805,6 @@ static int do_one_pass(journal_t *journal,
pr_err("JBD2: Invalid checksum found in transaction %u\n",
next_commit_ID);
err = -EFSBADCRC;
- brelse(bh);
goto failed;
}
ignore_crc_mismatch:
@@ -758,9 +812,8 @@ static int do_one_pass(journal_t *journal,
* It likely does not belong to same journal,
* just end this recovery with success.
*/
- jbd_debug(1, "JBD2: Invalid checksum ignored in transaction %u, likely stale data\n",
+ jbd2_debug(1, "JBD2: Invalid checksum ignored in transaction %u, likely stale data\n",
next_commit_ID);
- brelse(bh);
goto done;
}
@@ -770,8 +823,7 @@ static int do_one_pass(journal_t *journal,
* much to do other than move on to the next sequence
* number.
*/
- if (pass == PASS_SCAN &&
- jbd2_has_feature_checksum(journal)) {
+ if (jbd2_has_feature_checksum(journal)) {
struct commit_header *cbh =
(struct commit_header *)bh->b_data;
unsigned found_chksum =
@@ -780,7 +832,6 @@ static int do_one_pass(journal_t *journal,
if (info->end_transaction) {
journal->j_failed_commit =
info->end_transaction;
- brelse(bh);
break;
}
@@ -796,63 +847,71 @@ static int do_one_pass(journal_t *journal,
goto chksum_error;
crc32_sum = ~0;
+ goto chksum_ok;
}
- if (pass == PASS_SCAN &&
- !jbd2_commit_block_csum_verify(journal,
- bh->b_data)) {
- chksum_error:
- if (commit_time < last_trans_commit_time)
- goto ignore_crc_mismatch;
- info->end_transaction = next_commit_ID;
-
- if (!jbd2_has_feature_async_commit(journal)) {
- journal->j_failed_commit =
- next_commit_ID;
- brelse(bh);
- break;
- }
+
+ if (jbd2_commit_block_csum_verify(journal, bh->b_data))
+ goto chksum_ok;
+
+ if (jbd2_commit_block_csum_verify_partial(journal,
+ bh->b_data)) {
+ pr_notice("JBD2: Find incomplete commit block in transaction %u block %lu\n",
+ next_commit_ID, next_log_block);
+ goto chksum_ok;
}
- if (pass == PASS_SCAN)
- last_trans_commit_time = commit_time;
- brelse(bh);
+
+chksum_error:
+ if (commit_time < last_trans_commit_time)
+ goto ignore_crc_mismatch;
+ info->end_transaction = next_commit_ID;
+ info->head_block = head_block;
+
+ if (!jbd2_has_feature_async_commit(journal)) {
+ journal->j_failed_commit = next_commit_ID;
+ break;
+ }
+
+chksum_ok:
+ last_trans_commit_time = commit_time;
+ head_block = next_log_block;
next_commit_ID++;
continue;
case JBD2_REVOKE_BLOCK:
/*
+ * If we aren't in the SCAN or REVOKE pass, then we can
+ * just skip over this block.
+ */
+ if (pass != PASS_REVOKE && pass != PASS_SCAN)
+ continue;
+
+ /*
* Check revoke block crc in pass_scan, if csum verify
* failed, check commit block time later.
*/
if (pass == PASS_SCAN &&
!jbd2_descriptor_block_csum_verify(journal,
bh->b_data)) {
- jbd_debug(1, "JBD2: invalid revoke block found in %lu\n",
+ jbd2_debug(1, "JBD2: invalid revoke block found in %lu\n",
next_log_block);
need_check_commit_time = true;
}
- /* If we aren't in the REVOKE pass, then we can
- * just skip over this block. */
- if (pass != PASS_REVOKE) {
- brelse(bh);
- continue;
- }
- err = scan_revoke_records(journal, bh,
+ err = scan_revoke_records(journal, pass, bh,
next_commit_ID, info);
- brelse(bh);
if (err)
goto failed;
continue;
default:
- jbd_debug(3, "Unrecognised magic %d, end of scan.\n",
+ jbd2_debug(3, "Unrecognised magic %d, end of scan.\n",
blocktype);
- brelse(bh);
goto done;
}
}
done:
+ brelse(bh);
/*
* We broke out of the log scan loop: either we came to the
* known end of the log or we found an unexpected block in the
@@ -863,6 +922,8 @@ static int do_one_pass(journal_t *journal,
if (pass == PASS_SCAN) {
if (!info->end_transaction)
info->end_transaction = next_commit_ID;
+ if (!info->head_block)
+ info->head_block = head_block;
} else {
/* It's really bad news if different passes end up at
* different places (but possible due to IO errors). */
@@ -881,18 +942,18 @@ static int do_one_pass(journal_t *journal,
success = err;
}
- if (block_error && success == 0)
- success = -EIO;
return success;
failed:
+ brelse(bh);
return err;
}
/* Scan a revoke record, marking all blocks mentioned as revoked. */
-static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
- tid_t sequence, struct recovery_info *info)
+static int scan_revoke_records(journal_t *journal, enum passtype pass,
+ struct buffer_head *bh, tid_t sequence,
+ struct recovery_info *info)
{
jbd2_journal_revoke_header_t *header;
int offset, max;
@@ -913,6 +974,11 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
if (jbd2_has_feature_64bit(journal))
record_len = 8;
+ if (pass == PASS_SCAN) {
+ info->nr_revokes += (max - offset) / record_len;
+ return 0;
+ }
+
while (offset + record_len <= max) {
unsigned long long blocknr;
int err;
@@ -925,7 +991,6 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
err = jbd2_journal_set_revoke(journal, blocknr, sequence);
if (err)
return err;
- ++info->nr_revokes;
}
return 0;
}
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index fa608788b93d..1467f6790747 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -215,7 +215,7 @@ int __init jbd2_journal_init_revoke_table_cache(void)
return 0;
}
-static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
+struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
{
int shift = 0;
int tmp = hash_size;
@@ -231,7 +231,7 @@ static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
table->hash_size = hash_size;
table->hash_shift = shift;
table->hash_table =
- kmalloc_array(hash_size, sizeof(struct list_head), GFP_KERNEL);
+ kvmalloc_array(hash_size, sizeof(struct list_head), GFP_KERNEL);
if (!table->hash_table) {
kmem_cache_free(jbd2_revoke_table_cache, table);
table = NULL;
@@ -245,7 +245,7 @@ out:
return table;
}
-static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table)
+void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table)
{
int i;
struct list_head *hash_list;
@@ -255,7 +255,7 @@ static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table)
J_ASSERT(list_empty(hash_list));
}
- kfree(table->hash_table);
+ kvfree(table->hash_table);
kmem_cache_free(jbd2_revoke_table_cache, table);
}
@@ -345,7 +345,8 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
bh = bh_in;
if (!bh) {
- bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
+ bh = __find_get_block_nonatomic(bdev, blocknr,
+ journal->j_blocksize);
if (bh)
BUFFER_TRACE(bh, "found on hash");
}
@@ -355,7 +356,8 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
/* If there is a different buffer_head lying around in
* memory anywhere... */
- bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
+ bh2 = __find_get_block_nonatomic(bdev, blocknr,
+ journal->j_blocksize);
if (bh2) {
/* ... and it has RevokeValid status... */
if (bh2 != bh && buffer_revokevalid(bh2))
@@ -398,7 +400,7 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
}
handle->h_revoke_credits--;
- jbd_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in);
+ jbd2_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in);
err = insert_revoke_hash(journal, blocknr,
handle->h_transaction->t_tid);
BUFFER_TRACE(bh_in, "exit");
@@ -420,15 +422,14 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
* do not trust the Revoked bit on buffers unless RevokeValid is also
* set.
*/
-int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
+void jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
{
struct jbd2_revoke_record_s *record;
journal_t *journal = handle->h_transaction->t_journal;
int need_cancel;
- int did_revoke = 0; /* akpm: debug */
struct buffer_head *bh = jh2bh(jh);
- jbd_debug(4, "journal_head %p, cancelling revoke\n", jh);
+ jbd2_debug(4, "journal_head %p, cancelling revoke\n", jh);
/* Is the existing Revoke bit valid? If so, we trust it, and
* only perform the full cancel if the revoke bit is set. If
@@ -444,13 +445,12 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
if (need_cancel) {
record = find_revoke_record(journal, bh->b_blocknr);
if (record) {
- jbd_debug(4, "cancelled existing revoke on "
+ jbd2_debug(4, "cancelled existing revoke on "
"blocknr %llu\n", (unsigned long long)bh->b_blocknr);
spin_lock(&journal->j_revoke_lock);
list_del(&record->hash);
spin_unlock(&journal->j_revoke_lock);
kmem_cache_free(jbd2_revoke_record_cache, record);
- did_revoke = 1;
}
}
@@ -466,18 +466,18 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
* state machine will get very upset later on. */
if (need_cancel) {
struct buffer_head *bh2;
- bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size);
+ bh2 = __find_get_block_nonatomic(bh->b_bdev, bh->b_blocknr,
+ bh->b_size);
if (bh2) {
if (bh2 != bh)
clear_buffer_revoked(bh2);
__brelse(bh2);
}
}
- return did_revoke;
}
/*
- * journal_clear_revoked_flag clears revoked flag of buffers in
+ * jbd2_clear_buffer_revoked_flags clears revoked flag of buffers in
* revoke table to reflect there is no revoked buffers in the next
* transaction which is going to be started.
*/
@@ -495,9 +495,9 @@ void jbd2_clear_buffer_revoked_flags(journal_t *journal)
struct jbd2_revoke_record_s *record;
struct buffer_head *bh;
record = (struct jbd2_revoke_record_s *)list_entry;
- bh = __find_get_block(journal->j_fs_dev,
- record->blocknr,
- journal->j_blocksize);
+ bh = __find_get_block_nonatomic(journal->j_fs_dev,
+ record->blocknr,
+ journal->j_blocksize);
if (bh) {
clear_buffer_revoked(bh);
__brelse(bh);
@@ -506,9 +506,9 @@ void jbd2_clear_buffer_revoked_flags(journal_t *journal)
}
}
-/* journal_switch_revoke table select j_revoke for next transaction
- * we do not want to suspend any processing until all revokes are
- * written -bzzz
+/* jbd2_journal_switch_revoke_table table select j_revoke for next
+ * transaction we do not want to suspend any processing until all
+ * revokes are written -bzzz
*/
void jbd2_journal_switch_revoke_table(journal_t *journal)
{
@@ -560,7 +560,7 @@ void jbd2_journal_write_revoke_records(transaction_t *transaction,
}
if (descriptor)
flush_descriptor(journal, descriptor, offset);
- jbd_debug(1, "Wrote %d revoke records\n", count);
+ jbd2_debug(1, "Wrote %d revoke records\n", count);
}
/*
@@ -654,7 +654,7 @@ static void flush_descriptor(journal_t *journal,
set_buffer_jwrite(descriptor);
BUFFER_TRACE(descriptor, "write");
set_buffer_dirty(descriptor);
- write_dirty_buffer(descriptor, REQ_SYNC);
+ write_dirty_buffer(descriptor, JBD2_JOURNAL_REQ_FLAGS);
}
#endif
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 6a3caedd2285..dca4b5d8aaaa 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -63,28 +63,6 @@ void jbd2_journal_free_transaction(transaction_t *transaction)
}
/*
- * Base amount of descriptor blocks we reserve for each transaction.
- */
-static int jbd2_descriptor_blocks_per_trans(journal_t *journal)
-{
- int tag_space = journal->j_blocksize - sizeof(journal_header_t);
- int tags_per_block;
-
- /* Subtract UUID */
- tag_space -= 16;
- if (jbd2_journal_has_csum_v2or3(journal))
- tag_space -= sizeof(struct jbd2_journal_block_tail);
- /* Commit code leaves a slack space of 16 bytes at the end of block */
- tags_per_block = (tag_space - 16) / journal_tag_bytes(journal);
- /*
- * Revoke descriptors are accounted separately so we need to reserve
- * space for commit block and normal transaction descriptor blocks.
- */
- return 1 + DIV_ROUND_UP(journal->j_max_transaction_buffers,
- tags_per_block);
-}
-
-/*
* jbd2_get_transaction: obtain a new transaction_t object.
*
* Simply initialise a new transaction. Initialize it in
@@ -107,15 +85,13 @@ static void jbd2_get_transaction(journal_t *journal,
transaction->t_start_time = ktime_get();
transaction->t_tid = journal->j_transaction_sequence++;
transaction->t_expires = jiffies + journal->j_commit_interval;
- spin_lock_init(&transaction->t_handle_lock);
atomic_set(&transaction->t_updates, 0);
atomic_set(&transaction->t_outstanding_credits,
- jbd2_descriptor_blocks_per_trans(journal) +
+ journal->j_transaction_overhead_buffers +
atomic_read(&journal->j_reserved_credits));
atomic_set(&transaction->t_outstanding_revokes, 0);
atomic_set(&transaction->t_handle_count, 0);
INIT_LIST_HEAD(&transaction->t_inode_list);
- INIT_LIST_HEAD(&transaction->t_private_list);
/* Set up the commit timer for the new transaction. */
journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
@@ -137,28 +113,21 @@ static void jbd2_get_transaction(journal_t *journal,
*/
/*
- * Update transaction's maximum wait time, if debugging is enabled.
- *
- * In order for t_max_wait to be reliable, it must be protected by a
- * lock. But doing so will mean that start_this_handle() can not be
- * run in parallel on SMP systems, which limits our scalability. So
- * unless debugging is enabled, we no longer update t_max_wait, which
- * means that maximum wait time reported by the jbd2_run_stats
- * tracepoint will always be zero.
+ * t_max_wait is carefully updated here with use of atomic compare exchange.
+ * Note that there could be multiplre threads trying to do this simultaneously
+ * hence using cmpxchg to avoid any use of locks in this case.
*/
static inline void update_t_max_wait(transaction_t *transaction,
unsigned long ts)
{
-#ifdef CONFIG_JBD2_DEBUG
- if (jbd2_journal_enable_debug &&
- time_after(transaction->t_start, ts)) {
- ts = jbd2_time_diff(ts, transaction->t_start);
- spin_lock(&transaction->t_handle_lock);
- if (ts > transaction->t_max_wait)
- transaction->t_max_wait = ts;
- spin_unlock(&transaction->t_handle_lock);
+ unsigned long oldts, newts;
+
+ if (time_after(transaction->t_start, ts)) {
+ newts = jbd2_time_diff(ts, transaction->t_start);
+ oldts = READ_ONCE(transaction->t_max_wait);
+ while (oldts < newts)
+ oldts = cmpxchg(&transaction->t_max_wait, oldts, newts);
}
-#endif
}
/*
@@ -173,7 +142,7 @@ static void wait_transaction_locked(journal_t *journal)
int need_to_start;
tid_t tid = journal->j_running_transaction->t_tid;
- prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
+ prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait,
TASK_UNINTERRUPTIBLE);
need_to_start = !tid_geq(journal->j_commit_request, tid);
read_unlock(&journal->j_state_lock);
@@ -199,7 +168,7 @@ static void wait_transaction_switching(journal_t *journal)
read_unlock(&journal->j_state_lock);
return;
}
- prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
+ prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait,
TASK_UNINTERRUPTIBLE);
read_unlock(&journal->j_state_lock);
/*
@@ -218,6 +187,13 @@ static void sub_reserved_credits(journal_t *journal, int blocks)
wake_up(&journal->j_wait_reserved);
}
+/* Maximum number of blocks for user transaction payload */
+static int jbd2_max_user_trans_buffers(journal_t *journal)
+{
+ return journal->j_max_transaction_buffers -
+ journal->j_transaction_overhead_buffers;
+}
+
/*
* Wait until we can add credits for handle to the running transaction. Called
* with j_state_lock held for reading. Returns 0 if handle joined the running
@@ -267,12 +243,12 @@ __must_hold(&journal->j_state_lock)
* big to fit this handle? Wait until reserved credits are freed.
*/
if (atomic_read(&journal->j_reserved_credits) + total >
- journal->j_max_transaction_buffers) {
+ jbd2_max_user_trans_buffers(journal)) {
read_unlock(&journal->j_state_lock);
jbd2_might_wait_for_commit(journal);
wait_event(journal->j_wait_reserved,
atomic_read(&journal->j_reserved_credits) + total <=
- journal->j_max_transaction_buffers);
+ jbd2_max_user_trans_buffers(journal));
__acquire(&journal->j_state_lock); /* fake out sparse */
return 1;
}
@@ -312,14 +288,14 @@ __must_hold(&journal->j_state_lock)
needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits);
/* We allow at most half of a transaction to be reserved */
- if (needed > journal->j_max_transaction_buffers / 2) {
+ if (needed > jbd2_max_user_trans_buffers(journal) / 2) {
sub_reserved_credits(journal, rsv_blocks);
atomic_sub(total, &t->t_outstanding_credits);
read_unlock(&journal->j_state_lock);
jbd2_might_wait_for_commit(journal);
wait_event(journal->j_wait_reserved,
atomic_read(&journal->j_reserved_credits) + rsv_blocks
- <= journal->j_max_transaction_buffers / 2);
+ <= jbd2_max_user_trans_buffers(journal) / 2);
__acquire(&journal->j_state_lock); /* fake out sparse */
return 1;
}
@@ -349,12 +325,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
* size and limit the number of total credits to not exceed maximum
* transaction size per operation.
*/
- if ((rsv_blocks > journal->j_max_transaction_buffers / 2) ||
- (rsv_blocks + blocks > journal->j_max_transaction_buffers)) {
+ if (rsv_blocks > jbd2_max_user_trans_buffers(journal) / 2 ||
+ rsv_blocks + blocks > jbd2_max_user_trans_buffers(journal)) {
printk(KERN_ERR "JBD2: %s wants too many credits "
"credits:%d rsv_credits:%d max:%d\n",
current->comm, blocks, rsv_blocks,
- journal->j_max_transaction_buffers);
+ jbd2_max_user_trans_buffers(journal));
WARN_ON(1);
return -ENOSPC;
}
@@ -378,7 +354,7 @@ alloc_transaction:
return -ENOMEM;
}
- jbd_debug(3, "New handle %p going live.\n", handle);
+ jbd2_debug(3, "New handle %p going live.\n", handle);
/*
* We need to hold j_state_lock until t_updates has been incremented,
@@ -449,7 +425,7 @@ repeat:
}
/* OK, account for the buffers that this operation expects to
- * use and add the handle to the running transaction.
+ * use and add the handle to the running transaction.
*/
update_t_max_wait(transaction, ts);
handle->h_transaction = transaction;
@@ -458,14 +434,14 @@ repeat:
handle->h_start_jiffies = jiffies;
atomic_inc(&transaction->t_updates);
atomic_inc(&transaction->t_handle_count);
- jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
+ jbd2_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
handle, blocks,
atomic_read(&transaction->t_outstanding_credits),
jbd2_log_space_left(journal));
read_unlock(&journal->j_state_lock);
current->journal_info = handle;
- rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_);
+ rwsem_acquire_read(&journal->j_trans_commit_map, 0, 1, _THIS_IP_);
jbd2_journal_free_transaction(new_transaction);
/*
* Ensure that no allocations done while the transaction is open are
@@ -679,7 +655,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records)
/* Don't extend a locked-down transaction! */
if (transaction->t_state != T_RUNNING) {
- jbd_debug(3, "denied handle %p %d blocks: "
+ jbd2_debug(3, "denied handle %p %d blocks: "
"transaction not running\n", handle, nblocks);
goto error_out;
}
@@ -690,15 +666,14 @@ int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records)
DIV_ROUND_UP(
handle->h_revoke_credits_requested,
journal->j_revoke_records_per_block);
- spin_lock(&transaction->t_handle_lock);
wanted = atomic_add_return(nblocks,
&transaction->t_outstanding_credits);
if (wanted > journal->j_max_transaction_buffers) {
- jbd_debug(3, "denied handle %p %d blocks: "
+ jbd2_debug(3, "denied handle %p %d blocks: "
"transaction too large\n", handle, nblocks);
atomic_sub(nblocks, &transaction->t_outstanding_credits);
- goto unlock;
+ goto error_out;
}
trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
@@ -713,9 +688,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records)
handle->h_revoke_credits_requested += revoke_records;
result = 0;
- jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
-unlock:
- spin_unlock(&transaction->t_handle_lock);
+ jbd2_debug(3, "extended handle %p by %d\n", handle, nblocks);
error_out:
read_unlock(&journal->j_state_lock);
return result;
@@ -803,7 +776,7 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int revoke_records,
* First unlink the handle from its current transaction, and start the
* commit on that.
*/
- jbd_debug(2, "restarting handle %p\n", handle);
+ jbd2_debug(2, "restarting handle %p\n", handle);
stop_this_handle(handle);
handle->h_transaction = NULL;
@@ -836,6 +809,43 @@ int jbd2_journal_restart(handle_t *handle, int nblocks)
}
EXPORT_SYMBOL(jbd2_journal_restart);
+/*
+ * Waits for any outstanding t_updates to finish.
+ * This is called with write j_state_lock held.
+ */
+void jbd2_journal_wait_updates(journal_t *journal)
+{
+ DEFINE_WAIT(wait);
+
+ while (1) {
+ /*
+ * Note that the running transaction can get freed under us if
+ * this transaction is getting committed in
+ * jbd2_journal_commit_transaction() ->
+ * jbd2_journal_free_transaction(). This can only happen when we
+ * release j_state_lock -> schedule() -> acquire j_state_lock.
+ * Hence we should everytime retrieve new j_running_transaction
+ * value (after j_state_lock release acquire cycle), else it may
+ * lead to use-after-free of old freed transaction.
+ */
+ transaction_t *transaction = journal->j_running_transaction;
+
+ if (!transaction)
+ break;
+
+ prepare_to_wait(&journal->j_wait_updates, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (!atomic_read(&transaction->t_updates)) {
+ finish_wait(&journal->j_wait_updates, &wait);
+ break;
+ }
+ write_unlock(&journal->j_state_lock);
+ schedule();
+ finish_wait(&journal->j_wait_updates, &wait);
+ write_lock(&journal->j_state_lock);
+ }
+}
+
/**
* jbd2_journal_lock_updates () - establish a transaction barrier.
* @journal: Journal to establish a barrier on.
@@ -848,8 +858,6 @@ EXPORT_SYMBOL(jbd2_journal_restart);
*/
void jbd2_journal_lock_updates(journal_t *journal)
{
- DEFINE_WAIT(wait);
-
jbd2_might_wait_for_commit(journal);
write_lock(&journal->j_state_lock);
@@ -863,27 +871,9 @@ void jbd2_journal_lock_updates(journal_t *journal)
write_lock(&journal->j_state_lock);
}
- /* Wait until there are no running updates */
- while (1) {
- transaction_t *transaction = journal->j_running_transaction;
-
- if (!transaction)
- break;
+ /* Wait until there are no running t_updates */
+ jbd2_journal_wait_updates(journal);
- spin_lock(&transaction->t_handle_lock);
- prepare_to_wait(&journal->j_wait_updates, &wait,
- TASK_UNINTERRUPTIBLE);
- if (!atomic_read(&transaction->t_updates)) {
- spin_unlock(&transaction->t_handle_lock);
- finish_wait(&journal->j_wait_updates, &wait);
- break;
- }
- spin_unlock(&transaction->t_handle_lock);
- write_unlock(&journal->j_state_lock);
- schedule();
- finish_wait(&journal->j_wait_updates, &wait);
- write_lock(&journal->j_state_lock);
- }
write_unlock(&journal->j_state_lock);
/*
@@ -911,7 +901,7 @@ void jbd2_journal_unlock_updates (journal_t *journal)
write_lock(&journal->j_state_lock);
--journal->j_barrier_count;
write_unlock(&journal->j_state_lock);
- wake_up(&journal->j_wait_transaction_locked);
+ wake_up_all(&journal->j_wait_transaction_locked);
}
static void warn_dirty_buffer(struct buffer_head *bh)
@@ -926,19 +916,15 @@ static void warn_dirty_buffer(struct buffer_head *bh)
/* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
static void jbd2_freeze_jh_data(struct journal_head *jh)
{
- struct page *page;
- int offset;
char *source;
struct buffer_head *bh = jh2bh(jh);
J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n");
- page = bh->b_page;
- offset = offset_in_page(bh->b_data);
- source = kmap_atomic(page);
+ source = kmap_local_folio(bh->b_folio, bh_offset(bh));
/* Fire data frozen trigger just before we copy the data */
- jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers);
- memcpy(jh->b_frozen_data, source + offset, bh->b_size);
- kunmap_atomic(source);
+ jbd2_buffer_frozen_trigger(jh, source, jh->b_triggers);
+ memcpy(jh->b_frozen_data, source, bh->b_size);
+ kunmap_local(source);
/*
* Now that the frozen data is saved off, we need to store any matching
@@ -970,7 +956,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
journal = transaction->t_journal;
- jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
+ jbd2_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
JBUFFER_TRACE(jh, "entry");
repeat:
@@ -1001,36 +987,28 @@ repeat:
* ie. locked but not dirty) or tune2fs (which may actually have
* the buffer dirtied, ugh.) */
- if (buffer_dirty(bh)) {
- /*
- * First question: is this buffer already part of the current
- * transaction or the existing committing transaction?
- */
- if (jh->b_transaction) {
- J_ASSERT_JH(jh,
- jh->b_transaction == transaction ||
- jh->b_transaction ==
- journal->j_committing_transaction);
- if (jh->b_next_transaction)
- J_ASSERT_JH(jh, jh->b_next_transaction ==
- transaction);
- warn_dirty_buffer(bh);
- }
+ if (buffer_dirty(bh) && jh->b_transaction) {
+ warn_dirty_buffer(bh);
/*
- * In any case we need to clean the dirty flag and we must
- * do it under the buffer lock to be sure we don't race
- * with running write-out.
+ * We need to clean the dirty flag and we must do it under the
+ * buffer lock to be sure we don't race with running write-out.
*/
JBUFFER_TRACE(jh, "Journalling dirty buffer");
clear_buffer_dirty(bh);
+ /*
+ * The buffer is going to be added to BJ_Reserved list now and
+ * nothing guarantees jbd2_journal_dirty_metadata() will be
+ * ever called for it. So we need to set jbddirty bit here to
+ * make sure the buffer is dirtied and written out when the
+ * journaling machinery is done with it.
+ */
set_buffer_jbddirty(bh);
}
- unlock_buffer(bh);
-
error = -EROFS;
if (is_handle_aborted(handle)) {
spin_unlock(&jh->b_state_lock);
+ unlock_buffer(bh);
goto out;
}
error = 0;
@@ -1040,8 +1018,10 @@ repeat:
* b_next_transaction points to it
*/
if (jh->b_transaction == transaction ||
- jh->b_next_transaction == transaction)
+ jh->b_next_transaction == transaction) {
+ unlock_buffer(bh);
goto done;
+ }
/*
* this is the first time this transaction is touching this buffer,
@@ -1065,10 +1045,24 @@ repeat:
*/
smp_wmb();
spin_lock(&journal->j_list_lock);
+ if (test_clear_buffer_dirty(bh)) {
+ /*
+ * Execute buffer dirty clearing and jh->b_transaction
+ * assignment under journal->j_list_lock locked to
+ * prevent bh being removed from checkpoint list if
+ * the buffer is in an intermediate state (not dirty
+ * and jh->b_transaction is NULL).
+ */
+ JBUFFER_TRACE(jh, "Journalling dirty buffer");
+ set_buffer_jbddirty(bh);
+ }
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
spin_unlock(&journal->j_list_lock);
+ unlock_buffer(bh);
goto done;
}
+ unlock_buffer(bh);
+
/*
* If there is already a copy-out version of this buffer, then we don't
* need to make another one
@@ -1218,11 +1212,26 @@ out:
int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
{
struct journal_head *jh;
+ journal_t *journal;
int rc;
if (is_handle_aborted(handle))
return -EROFS;
+ journal = handle->h_transaction->t_journal;
+ rc = jbd2_check_fs_dev_write_error(journal);
+ if (rc) {
+ /*
+ * If the fs dev has writeback errors, it may have failed
+ * to async write out metadata buffers in the background.
+ * In this case, we could read old data from disk and write
+ * it out again, which may lead to on-disk filesystem
+ * inconsistency. Aborting journal can avoid it happen.
+ */
+ jbd2_journal_abort(journal, rc);
+ return -EIO;
+ }
+
if (jbd2_write_access_granted(handle, bh, false))
return 0;
@@ -1262,7 +1271,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh = jbd2_journal_add_journal_head(bh);
int err;
- jbd_debug(5, "journal_head %p\n", jh);
+ jbd2_debug(5, "journal_head %p\n", jh);
err = -EROFS;
if (is_handle_aborted(handle))
goto out;
@@ -1276,14 +1285,23 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
* committing transaction's lists, but it HAS to be in Forget state in
* that case: the transaction must have deleted the buffer for it to be
* reused here.
+ * In the case of file system data inconsistency, for example, if the
+ * block bitmap of a referenced block is not set, it can lead to the
+ * situation where a block being committed is allocated and used again.
+ * As a result, the following condition will not be satisfied, so here
+ * we directly trigger a JBD abort instead of immediately invoking
+ * bugon.
*/
spin_lock(&jh->b_state_lock);
- J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
- jh->b_transaction == NULL ||
- (jh->b_transaction == journal->j_committing_transaction &&
- jh->b_jlist == BJ_Forget)));
+ if (!(jh->b_transaction == transaction || jh->b_transaction == NULL ||
+ (jh->b_transaction == journal->j_committing_transaction &&
+ jh->b_jlist == BJ_Forget)) || jh->b_next_transaction != NULL) {
+ err = -EROFS;
+ spin_unlock(&jh->b_state_lock);
+ jbd2_journal_abort(journal, err);
+ goto out;
+ }
- J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
if (jh->b_transaction == NULL) {
@@ -1477,8 +1495,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
int ret = 0;
- if (is_handle_aborted(handle))
- return -EROFS;
if (!buffer_jbd(bh))
return -EUCLEAN;
@@ -1487,7 +1503,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
* of the running transaction.
*/
jh = bh2jh(bh);
- jbd_debug(5, "journal_head %p\n", jh);
+ jbd2_debug(5, "journal_head %p\n", jh);
JBUFFER_TRACE(jh, "entry");
/*
@@ -1503,7 +1519,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
jh->b_next_transaction == transaction);
spin_unlock(&jh->b_state_lock);
}
- if (jh->b_modified == 1) {
+ if (data_race(jh->b_modified == 1)) {
/* If it's in our transaction it must be in BJ_Metadata list. */
if (data_race(jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata)) {
@@ -1522,9 +1538,22 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
goto out;
}
- journal = transaction->t_journal;
spin_lock(&jh->b_state_lock);
+ if (is_handle_aborted(handle)) {
+ /*
+ * Check journal aborting with @jh->b_state_lock locked,
+ * since 'jh->b_transaction' could be replaced with
+ * 'jh->b_next_transaction' during old transaction
+ * committing if journal aborted, which may fail
+ * assertion on 'jh->b_frozen_data == NULL'.
+ */
+ ret = -EROFS;
+ goto out_unlock_bh;
+ }
+
+ journal = transaction->t_journal;
+
if (jh->b_modified == 0) {
/*
* This buffer's got modified and becoming part
@@ -1640,6 +1669,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
int drop_reserve = 0;
int err = 0;
int was_modified = 0;
+ int wait_for_writeback = 0;
if (is_handle_aborted(handle))
return -EROFS;
@@ -1757,25 +1787,28 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
* Otherwise, if the buffer has been written to disk,
* it is safe to remove the checkpoint and drop it.
*/
- if (!buffer_dirty(bh)) {
- __jbd2_journal_remove_checkpoint(jh);
+ if (jbd2_journal_try_remove_checkpoint(jh) >= 0) {
spin_unlock(&journal->j_list_lock);
goto drop;
}
/*
- * The buffer is still not written to disk, we should
- * attach this buffer to current transaction so that the
- * buffer can be checkpointed only after the current
- * transaction commits.
+ * The buffer has not yet been written to disk. We should
+ * either clear the buffer or ensure that the ongoing I/O
+ * is completed, and attach this buffer to current
+ * transaction so that the buffer can be checkpointed only
+ * after the current transaction commits.
*/
clear_buffer_dirty(bh);
+ wait_for_writeback = 1;
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
spin_unlock(&journal->j_list_lock);
}
drop:
__brelse(bh);
spin_unlock(&jh->b_state_lock);
+ if (wait_for_writeback)
+ wait_on_buffer(bh);
jbd2_journal_put_journal_head(jh);
if (drop_reserve) {
/* no need to reserve log space for this block -bzzz */
@@ -1809,7 +1842,7 @@ int jbd2_journal_stop(handle_t *handle)
pid_t pid;
if (--handle->h_ref > 0) {
- jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
+ jbd2_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
handle->h_ref);
if (is_handle_aborted(handle))
return -EIO;
@@ -1829,7 +1862,7 @@ int jbd2_journal_stop(handle_t *handle)
if (is_handle_aborted(handle))
err = -EIO;
- jbd_debug(4, "Handle %p going down\n", handle);
+ jbd2_debug(4, "Handle %p going down\n", handle);
trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
tid, handle->h_type, handle->h_line_no,
jiffies - handle->h_start_jiffies,
@@ -1907,7 +1940,7 @@ int jbd2_journal_stop(handle_t *handle)
* completes the commit thread, it just doesn't write
* anything to disk. */
- jbd_debug(2, "transaction too old, requesting commit for "
+ jbd2_debug(2, "transaction too old, requesting commit for "
"handle %p\n", handle);
/* This is non-blocking */
jbd2_log_start_commit(journal, tid);
@@ -2058,54 +2091,10 @@ static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
jh->b_transaction = NULL;
}
-void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
-{
- struct buffer_head *bh = jh2bh(jh);
-
- /* Get reference so that buffer cannot be freed before we unlock it */
- get_bh(bh);
- spin_lock(&jh->b_state_lock);
- spin_lock(&journal->j_list_lock);
- __jbd2_journal_unfile_buffer(jh);
- spin_unlock(&journal->j_list_lock);
- spin_unlock(&jh->b_state_lock);
- jbd2_journal_put_journal_head(jh);
- __brelse(bh);
-}
-
-/*
- * Called from jbd2_journal_try_to_free_buffers().
- *
- * Called under jh->b_state_lock
- */
-static void
-__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
-{
- struct journal_head *jh;
-
- jh = bh2jh(bh);
-
- if (buffer_locked(bh) || buffer_dirty(bh))
- goto out;
-
- if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
- goto out;
-
- spin_lock(&journal->j_list_lock);
- if (jh->b_cp_transaction != NULL) {
- /* written-back checkpointed metadata buffer */
- JBUFFER_TRACE(jh, "remove from checkpoint list");
- __jbd2_journal_remove_checkpoint(jh);
- }
- spin_unlock(&journal->j_list_lock);
-out:
- return;
-}
-
/**
* jbd2_journal_try_to_free_buffers() - try to free page buffers.
* @journal: journal for operation
- * @page: to try and free
+ * @folio: Folio to detach data from.
*
* For all the buffers on this page,
* if they are fully written out ordered data, move them onto BUF_CLEAN
@@ -2134,17 +2123,17 @@ out:
* cannot happen because we never reallocate freed data as metadata
* while the data is part of a transaction. Yes?
*
- * Return 0 on failure, 1 on success
+ * Return false on failure, true on success
*/
-int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page)
+bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio)
{
struct buffer_head *head;
struct buffer_head *bh;
- int ret = 0;
+ bool ret = false;
- J_ASSERT(PageLocked(page));
+ J_ASSERT(folio_test_locked(folio));
- head = page_buffers(page);
+ head = folio_buffers(folio);
bh = head;
do {
struct journal_head *jh;
@@ -2159,14 +2148,20 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page)
continue;
spin_lock(&jh->b_state_lock);
- __journal_try_to_free_buffer(journal, bh);
+ if (!jh->b_transaction && !jh->b_next_transaction) {
+ spin_lock(&journal->j_list_lock);
+ /* Remove written-back checkpointed metadata buffer */
+ if (jh->b_cp_transaction != NULL)
+ jbd2_journal_try_remove_checkpoint(jh);
+ spin_unlock(&journal->j_list_lock);
+ }
spin_unlock(&jh->b_state_lock);
jbd2_journal_put_journal_head(jh);
if (buffer_jbd(bh))
goto busy;
} while ((bh = bh->b_this_page) != head);
- ret = try_to_free_buffers(page);
+ ret = try_to_free_buffers(folio);
busy:
return ret;
}
@@ -2194,7 +2189,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
/*
* We don't want to write the buffer anymore, clear the
* bit so that we don't confuse checks in
- * __journal_file_buffer
+ * __jbd2_journal_file_buffer
*/
clear_buffer_dirty(bh);
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
@@ -2208,14 +2203,14 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
}
/*
- * jbd2_journal_invalidatepage
+ * jbd2_journal_invalidate_folio
*
* This code is tricky. It has a number of cases to deal with.
*
* There are two invariants which this code relies on:
*
- * i_size must be updated on disk before we start calling invalidatepage on the
- * data.
+ * i_size must be updated on disk before we start calling invalidate_folio
+ * on the data.
*
* This is done in ext3 by defining an ext3_setattr method which
* updates i_size before truncate gets going. By maintaining this
@@ -2360,6 +2355,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
jbd2_journal_put_journal_head(jh);
+ /* Already zapped buffer? Nothing to do... */
+ if (!bh->b_bdev)
+ return 0;
return -EBUSY;
}
/*
@@ -2417,9 +2415,9 @@ zap_buffer_unlocked:
}
/**
- * jbd2_journal_invalidatepage()
+ * jbd2_journal_invalidate_folio()
* @journal: journal to use for flush...
- * @page: page to flush
+ * @folio: folio to flush
* @offset: start of the range to invalidate
* @length: length of the range to invalidate
*
@@ -2428,30 +2426,29 @@ zap_buffer_unlocked:
* the page is straddling i_size. Caller then has to wait for current commit
* and try again.
*/
-int jbd2_journal_invalidatepage(journal_t *journal,
- struct page *page,
- unsigned int offset,
- unsigned int length)
+int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio,
+ size_t offset, size_t length)
{
struct buffer_head *head, *bh, *next;
unsigned int stop = offset + length;
unsigned int curr_off = 0;
- int partial_page = (offset || length < PAGE_SIZE);
+ int partial_page = (offset || length < folio_size(folio));
int may_free = 1;
int ret = 0;
- if (!PageLocked(page))
+ if (!folio_test_locked(folio))
BUG();
- if (!page_has_buffers(page))
+ head = folio_buffers(folio);
+ if (!head)
return 0;
- BUG_ON(stop > PAGE_SIZE || stop < length);
+ BUG_ON(stop > folio_size(folio) || stop < length);
/* We will potentially be playing with lists other than just the
* data lists (especially for journaled data mode), so be
* cautious in our locking. */
- head = bh = page_buffers(page);
+ bh = head;
do {
unsigned int next_off = curr_off + bh->b_size;
next = bh->b_this_page;
@@ -2474,8 +2471,8 @@ int jbd2_journal_invalidatepage(journal_t *journal,
} while (bh != head);
if (!partial_page) {
- if (may_free && try_to_free_buffers(page))
- J_ASSERT(!page_has_buffers(page));
+ if (may_free && try_to_free_buffers(folio))
+ J_ASSERT(!folio_buffers(folio));
}
return 0;
}
@@ -2654,7 +2651,7 @@ static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
return -EROFS;
journal = transaction->t_journal;
- jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
+ jbd2_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
transaction->t_tid);
spin_lock(&journal->j_list_lock);
diff --git a/fs/jffs2/Kconfig b/fs/jffs2/Kconfig
index 7c96bc107218..560187d61562 100644
--- a/fs/jffs2/Kconfig
+++ b/fs/jffs2/Kconfig
@@ -151,8 +151,9 @@ config JFFS2_RUBIN
RUBINMIPS and DYNRUBIN compressors. Say 'N' if unsure.
choice
- prompt "JFFS2 default compression mode" if JFFS2_COMPRESSION_OPTIONS
+ prompt "JFFS2 default compression mode"
default JFFS2_CMODE_PRIORITY
+ depends on JFFS2_COMPRESSION_OPTIONS
depends on JFFS2_FS
help
You can set here the default compression mode of JFFS2 from
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index e945e3484788..888a7ceb6479 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -229,10 +229,11 @@ static int __jffs2_set_acl(struct inode *inode, int xprefix, struct posix_acl *a
return rc;
}
-int jffs2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+int jffs2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
int rc, xprefix;
+ struct inode *inode = d_inode(dentry);
switch (type) {
case ACL_TYPE_ACCESS:
@@ -240,7 +241,7 @@ int jffs2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
if (acl) {
umode_t mode;
- rc = posix_acl_update_mode(&init_user_ns, inode, &mode,
+ rc = posix_acl_update_mode(&nop_mnt_idmap, inode, &mode,
&acl);
if (rc)
return rc;
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index 9d9fb7cf093e..e976b8cb82cf 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -28,7 +28,7 @@ struct jffs2_acl_header {
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
struct posix_acl *jffs2_get_acl(struct inode *inode, int type, bool rcu);
-int jffs2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+int jffs2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type);
extern int jffs2_init_acl_pre(struct inode *, struct inode *, umode_t *);
extern int jffs2_init_acl_post(struct inode *);
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 2b4d5013dc5d..bb0ee1a59e71 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -44,8 +44,8 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index);
if (IS_ERR(tsk)) {
- pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n",
- -PTR_ERR(tsk));
+ pr_warn("fork failed for JFFS2 garbage collect thread: %pe\n",
+ tsk);
complete(&c->gc_thread_exit);
ret = PTR_ERR(tsk);
} else {
@@ -161,5 +161,5 @@ static int jffs2_garbage_collect_thread(void *_c)
spin_lock(&c->erase_completion_lock);
c->gc_task = NULL;
spin_unlock(&c->erase_completion_lock);
- complete_and_exit(&c->gc_thread_exit, 0);
+ kthread_complete_and_exit(&c->gc_thread_exit, 0);
}
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index b288c8ae1236..6ae9d6fefb86 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -211,7 +211,10 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
ic->scan_dents = NULL;
cond_resched();
}
- jffs2_build_xattr_subsystem(c);
+ ret = jffs2_build_xattr_subsystem(c);
+ if (ret)
+ goto exit;
+
c->flags &= ~JFFS2_SB_FLAG_BUILDING;
dbg_fsbuild("FS build complete\n");
@@ -415,13 +418,15 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c)
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
ret = -EIO;
- goto out_free;
+ goto out_sum_exit;
}
jffs2_calc_trigger_levels(c);
return 0;
+ out_sum_exit:
+ jffs2_sum_exit(c);
out_free:
kvfree(c->blocks);
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index 4849a4c9a0e2..764f19dec3f0 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -364,20 +364,25 @@ void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
int __init jffs2_compressors_init(void)
{
+ int ret = 0;
/* Registering compressors */
-#ifdef CONFIG_JFFS2_ZLIB
- jffs2_zlib_init();
-#endif
-#ifdef CONFIG_JFFS2_RTIME
- jffs2_rtime_init();
-#endif
-#ifdef CONFIG_JFFS2_RUBIN
- jffs2_rubinmips_init();
- jffs2_dynrubin_init();
-#endif
-#ifdef CONFIG_JFFS2_LZO
- jffs2_lzo_init();
-#endif
+ ret = jffs2_zlib_init();
+ if (ret)
+ goto exit;
+ ret = jffs2_rtime_init();
+ if (ret)
+ goto exit_zlib;
+ ret = jffs2_rubinmips_init();
+ if (ret)
+ goto exit_rtime;
+ ret = jffs2_dynrubin_init();
+ if (ret)
+ goto exit_runinmips;
+ ret = jffs2_lzo_init();
+ if (ret)
+ goto exit_dynrubin;
+
+
/* Setting default compression mode */
#ifdef CONFIG_JFFS2_CMODE_NONE
jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
@@ -396,23 +401,26 @@ int __init jffs2_compressors_init(void)
#endif
#endif
return 0;
+
+exit_dynrubin:
+ jffs2_dynrubin_exit();
+exit_runinmips:
+ jffs2_rubinmips_exit();
+exit_rtime:
+ jffs2_rtime_exit();
+exit_zlib:
+ jffs2_zlib_exit();
+exit:
+ return ret;
}
int jffs2_compressors_exit(void)
{
/* Unregistering compressors */
-#ifdef CONFIG_JFFS2_LZO
jffs2_lzo_exit();
-#endif
-#ifdef CONFIG_JFFS2_RUBIN
jffs2_dynrubin_exit();
jffs2_rubinmips_exit();
-#endif
-#ifdef CONFIG_JFFS2_RTIME
jffs2_rtime_exit();
-#endif
-#ifdef CONFIG_JFFS2_ZLIB
jffs2_zlib_exit();
-#endif
return 0;
}
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h
index 5e91d578f4ed..3716b6b7924c 100644
--- a/fs/jffs2/compr.h
+++ b/fs/jffs2/compr.h
@@ -88,18 +88,32 @@ int jffs2_rubinmips_init(void);
void jffs2_rubinmips_exit(void);
int jffs2_dynrubin_init(void);
void jffs2_dynrubin_exit(void);
+#else
+static inline int jffs2_rubinmips_init(void) { return 0; }
+static inline void jffs2_rubinmips_exit(void) {}
+static inline int jffs2_dynrubin_init(void) { return 0; }
+static inline void jffs2_dynrubin_exit(void) {}
#endif
#ifdef CONFIG_JFFS2_RTIME
-int jffs2_rtime_init(void);
-void jffs2_rtime_exit(void);
+extern int jffs2_rtime_init(void);
+extern void jffs2_rtime_exit(void);
+#else
+static inline int jffs2_rtime_init(void) { return 0; }
+static inline void jffs2_rtime_exit(void) {}
#endif
#ifdef CONFIG_JFFS2_ZLIB
-int jffs2_zlib_init(void);
-void jffs2_zlib_exit(void);
+extern int jffs2_zlib_init(void);
+extern void jffs2_zlib_exit(void);
+#else
+static inline int jffs2_zlib_init(void) { return 0; }
+static inline void jffs2_zlib_exit(void) {}
#endif
#ifdef CONFIG_JFFS2_LZO
-int jffs2_lzo_init(void);
-void jffs2_lzo_exit(void);
+extern int jffs2_lzo_init(void);
+extern void jffs2_lzo_exit(void);
+#else
+static inline int jffs2_lzo_init(void) { return 0; }
+static inline void jffs2_lzo_exit(void) {}
#endif
#endif /* __JFFS2_COMPR_H__ */
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
index 79e771ab624f..3bd9d2f3bece 100644
--- a/fs/jffs2/compr_rtime.c
+++ b/fs/jffs2/compr_rtime.c
@@ -95,6 +95,9 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
positions[value]=outpos;
if (repeat) {
+ if ((outpos + repeat) > destlen) {
+ return 1;
+ }
if (backoffs + repeat >= outpos) {
while(repeat) {
cpage_out[outpos++] = cpage_out[backoffs++];
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
index 556de100ebd5..9854253d0108 100644
--- a/fs/jffs2/compr_rubin.c
+++ b/fs/jffs2/compr_rubin.c
@@ -276,11 +276,6 @@ static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in,
end_rubin(&rs);
- if (outpos > pos) {
- /* We failed */
- return -1;
- }
-
/* Tell the caller how much we managed to compress,
* and how much space it took */
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index 9d26b1b9fc01..0925caab23c4 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -157,7 +157,7 @@ __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c,
kfree(buf);
}
-void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c)
+static void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c)
{
struct jffs2_eraseblock *jeb;
uint32_t free = 0, dirty = 0, used = 0, wasted = 0,
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index c0aabbcbfd58..dd91f725ded6 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -24,20 +24,20 @@
static int jffs2_readdir (struct file *, struct dir_context *);
-static int jffs2_create (struct user_namespace *, struct inode *,
+static int jffs2_create (struct mnt_idmap *, struct inode *,
struct dentry *, umode_t, bool);
static struct dentry *jffs2_lookup (struct inode *,struct dentry *,
unsigned int);
static int jffs2_link (struct dentry *,struct inode *,struct dentry *);
static int jffs2_unlink (struct inode *,struct dentry *);
-static int jffs2_symlink (struct user_namespace *, struct inode *,
+static int jffs2_symlink (struct mnt_idmap *, struct inode *,
struct dentry *, const char *);
-static int jffs2_mkdir (struct user_namespace *, struct inode *,struct dentry *,
- umode_t);
+static struct dentry *jffs2_mkdir (struct mnt_idmap *, struct inode *,struct dentry *,
+ umode_t);
static int jffs2_rmdir (struct inode *,struct dentry *);
-static int jffs2_mknod (struct user_namespace *, struct inode *,struct dentry *,
+static int jffs2_mknod (struct mnt_idmap *, struct inode *,struct dentry *,
umode_t,dev_t);
-static int jffs2_rename (struct user_namespace *, struct inode *,
+static int jffs2_rename (struct mnt_idmap *, struct inode *,
struct dentry *, struct inode *, struct dentry *,
unsigned int);
@@ -62,7 +62,7 @@ const struct inode_operations jffs2_dir_inode_operations =
.rmdir = jffs2_rmdir,
.mknod = jffs2_mknod,
.rename = jffs2_rename,
- .get_acl = jffs2_get_acl,
+ .get_inode_acl = jffs2_get_acl,
.set_acl = jffs2_set_acl,
.setattr = jffs2_setattr,
.listxattr = jffs2_listxattr,
@@ -160,7 +160,7 @@ static int jffs2_readdir(struct file *file, struct dir_context *ctx)
/***********************************************************************/
-static int jffs2_create(struct user_namespace *mnt_userns, struct inode *dir_i,
+static int jffs2_create(struct mnt_idmap *idmap, struct inode *dir_i,
struct dentry *dentry, umode_t mode, bool excl)
{
struct jffs2_raw_inode *ri;
@@ -204,7 +204,8 @@ static int jffs2_create(struct user_namespace *mnt_userns, struct inode *dir_i,
if (ret)
goto fail;
- dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(ri->ctime))));
jffs2_free_raw_inode(ri);
@@ -237,7 +238,8 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
if (dead_f->inocache)
set_nlink(d_inode(dentry), dead_f->inocache->pino_nlink);
if (!ret)
- dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(now)));
return ret;
}
/***********************************************************************/
@@ -271,7 +273,8 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
set_nlink(d_inode(old_dentry), ++f->inocache->pino_nlink);
mutex_unlock(&f->sem);
d_instantiate(dentry, d_inode(old_dentry));
- dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(now)));
ihold(d_inode(old_dentry));
}
return ret;
@@ -279,7 +282,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
/***********************************************************************/
-static int jffs2_symlink (struct user_namespace *mnt_userns, struct inode *dir_i,
+static int jffs2_symlink (struct mnt_idmap *idmap, struct inode *dir_i,
struct dentry *dentry, const char *target)
{
struct jffs2_inode_info *f, *dir_f;
@@ -422,7 +425,8 @@ static int jffs2_symlink (struct user_namespace *mnt_userns, struct inode *dir_i
goto fail;
}
- dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(rd->mctime))));
jffs2_free_raw_dirent(rd);
@@ -442,8 +446,8 @@ static int jffs2_symlink (struct user_namespace *mnt_userns, struct inode *dir_i
}
-static int jffs2_mkdir (struct user_namespace *mnt_userns, struct inode *dir_i,
- struct dentry *dentry, umode_t mode)
+static struct dentry *jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
+ struct dentry *dentry, umode_t mode)
{
struct jffs2_inode_info *f, *dir_f;
struct jffs2_sb_info *c;
@@ -460,7 +464,7 @@ static int jffs2_mkdir (struct user_namespace *mnt_userns, struct inode *dir_i,
ri = jffs2_alloc_raw_inode();
if (!ri)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
c = JFFS2_SB_INFO(dir_i->i_sb);
@@ -473,7 +477,7 @@ static int jffs2_mkdir (struct user_namespace *mnt_userns, struct inode *dir_i,
if (ret) {
jffs2_free_raw_inode(ri);
- return ret;
+ return ERR_PTR(ret);
}
inode = jffs2_new_inode(dir_i, mode, ri);
@@ -481,7 +485,7 @@ static int jffs2_mkdir (struct user_namespace *mnt_userns, struct inode *dir_i,
if (IS_ERR(inode)) {
jffs2_free_raw_inode(ri);
jffs2_complete_reservation(c);
- return PTR_ERR(inode);
+ return ERR_CAST(inode);
}
inode->i_op = &jffs2_dir_inode_operations;
@@ -566,7 +570,8 @@ static int jffs2_mkdir (struct user_namespace *mnt_userns, struct inode *dir_i,
goto fail;
}
- dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(rd->mctime))));
inc_nlink(dir_i);
jffs2_free_raw_dirent(rd);
@@ -579,11 +584,11 @@ static int jffs2_mkdir (struct user_namespace *mnt_userns, struct inode *dir_i,
jffs2_complete_reservation(c);
d_instantiate_new(dentry, inode);
- return 0;
+ return NULL;
fail:
iget_failed(inode);
- return ret;
+ return ERR_PTR(ret);
}
static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
@@ -607,14 +612,15 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
dentry->d_name.len, f, now);
if (!ret) {
- dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(now)));
clear_nlink(d_inode(dentry));
drop_nlink(dir_i);
}
return ret;
}
-static int jffs2_mknod (struct user_namespace *mnt_userns, struct inode *dir_i,
+static int jffs2_mknod (struct mnt_idmap *idmap, struct inode *dir_i,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct jffs2_inode_info *f, *dir_f;
@@ -743,7 +749,8 @@ static int jffs2_mknod (struct user_namespace *mnt_userns, struct inode *dir_i,
goto fail;
}
- dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(rd->mctime))));
jffs2_free_raw_dirent(rd);
@@ -762,7 +769,7 @@ static int jffs2_mknod (struct user_namespace *mnt_userns, struct inode *dir_i,
return ret;
}
-static int jffs2_rename (struct user_namespace *mnt_userns,
+static int jffs2_rename (struct mnt_idmap *idmap,
struct inode *old_dir_i, struct dentry *old_dentry,
struct inode *new_dir_i, struct dentry *new_dentry,
unsigned int flags)
@@ -864,14 +871,18 @@ static int jffs2_rename (struct user_namespace *mnt_userns,
* caller won't do it on its own since we are returning an error.
*/
d_invalidate(new_dentry);
- new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
+ inode_set_mtime_to_ts(new_dir_i,
+ inode_set_ctime_to_ts(new_dir_i, ITIME(now)));
return ret;
}
if (d_is_dir(old_dentry))
drop_nlink(old_dir_i);
- new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
+ inode_set_mtime_to_ts(old_dir_i,
+ inode_set_ctime_to_ts(old_dir_i, ITIME(now)));
+ inode_set_mtime_to_ts(new_dir_i,
+ inode_set_ctime_to_ts(new_dir_i, ITIME(now)));
return 0;
}
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 7e9abdb89712..fda9f4d6093f 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -43,9 +43,9 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n",
__func__,
jeb->offset, jeb->offset, jeb->offset + c->sector_size);
- instr = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
+ instr = kzalloc(sizeof(struct erase_info), GFP_KERNEL);
if (!instr) {
- pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
+ pr_warn("kzalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
@@ -57,8 +57,6 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
return;
}
- memset(instr, 0, sizeof(*instr));
-
instr->addr = jeb->offset;
instr->len = c->sector_size;
@@ -340,10 +338,9 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
} while(--retlen);
mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
if (retlen) {
- pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
- *wordebuf,
- jeb->offset +
- c->sector_size-retlen * sizeof(*wordebuf));
+ *bad_offset = jeb->offset + c->sector_size - retlen * sizeof(*wordebuf);
+ pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n",
+ *wordebuf, *bad_offset);
return -EIO;
}
return 0;
@@ -428,7 +425,9 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
.totlen = cpu_to_je32(c->cleanmarker_size)
};
- jffs2_prealloc_raw_node_refs(c, jeb, 1);
+ ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
+ if (ret)
+ goto filebad;
marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 4fc8cd698d1a..b697f3c259ef 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -21,13 +21,15 @@
#include <linux/jffs2.h>
#include "nodelist.h"
-static int jffs2_write_end(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *pg, void *fsdata);
-static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
-static int jffs2_readpage (struct file *filp, struct page *pg);
+static int jffs2_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata);
+static int jffs2_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata);
+static int jffs2_read_folio(struct file *filp, struct folio *folio);
int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
@@ -54,9 +56,9 @@ const struct file_operations jffs2_file_operations =
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.unlocked_ioctl=jffs2_ioctl,
- .mmap = generic_file_readonly_mmap,
+ .mmap_prepare = generic_file_readonly_mmap_prepare,
.fsync = jffs2_fsync,
- .splice_read = generic_file_splice_read,
+ .splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
};
@@ -64,7 +66,7 @@ const struct file_operations jffs2_file_operations =
const struct inode_operations jffs2_file_inode_operations =
{
- .get_acl = jffs2_get_acl,
+ .get_inode_acl = jffs2_get_acl,
.set_acl = jffs2_set_acl,
.setattr = jffs2_setattr,
.listxattr = jffs2_listxattr,
@@ -72,95 +74,82 @@ const struct inode_operations jffs2_file_inode_operations =
const struct address_space_operations jffs2_file_address_operations =
{
- .readpage = jffs2_readpage,
+ .read_folio = jffs2_read_folio,
.write_begin = jffs2_write_begin,
.write_end = jffs2_write_end,
};
-static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
+static int jffs2_do_readpage_nolock(struct inode *inode, struct folio *folio)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- unsigned char *pg_buf;
+ unsigned char *kaddr;
int ret;
jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
- __func__, inode->i_ino, pg->index << PAGE_SHIFT);
+ __func__, inode->i_ino, folio->index << PAGE_SHIFT);
- BUG_ON(!PageLocked(pg));
+ BUG_ON(!folio_test_locked(folio));
- pg_buf = kmap(pg);
- /* FIXME: Can kmap fail? */
-
- ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
+ kaddr = kmap_local_folio(folio, 0);
+ ret = jffs2_read_inode_range(c, f, kaddr, folio->index << PAGE_SHIFT,
PAGE_SIZE);
+ kunmap_local(kaddr);
- if (ret) {
- ClearPageUptodate(pg);
- SetPageError(pg);
- } else {
- SetPageUptodate(pg);
- ClearPageError(pg);
- }
+ if (!ret)
+ folio_mark_uptodate(folio);
- flush_dcache_page(pg);
- kunmap(pg);
+ flush_dcache_folio(folio);
jffs2_dbg(2, "readpage finished\n");
return ret;
}
-int jffs2_do_readpage_unlock(void *data, struct page *pg)
+int __jffs2_read_folio(struct file *file, struct folio *folio)
{
- int ret = jffs2_do_readpage_nolock(data, pg);
- unlock_page(pg);
+ int ret = jffs2_do_readpage_nolock(folio->mapping->host, folio);
+ folio_unlock(folio);
return ret;
}
-
-static int jffs2_readpage (struct file *filp, struct page *pg)
+static int jffs2_read_folio(struct file *file, struct folio *folio)
{
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host);
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(folio->mapping->host);
int ret;
mutex_lock(&f->sem);
- ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
+ ret = __jffs2_read_folio(file, folio);
mutex_unlock(&f->sem);
return ret;
}
-static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int jffs2_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
- struct page *pg;
+ struct folio *folio;
struct inode *inode = mapping->host;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
pgoff_t index = pos >> PAGE_SHIFT;
- uint32_t pageofs = index << PAGE_SHIFT;
int ret = 0;
- pg = grab_cache_page_write_begin(mapping, index, flags);
- if (!pg)
- return -ENOMEM;
- *pagep = pg;
-
jffs2_dbg(1, "%s()\n", __func__);
- if (pageofs > inode->i_size) {
- /* Make new hole frag from old EOF to new page */
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+ if (pos > inode->i_size) {
+ /* Make new hole frag from old EOF to new position */
struct jffs2_raw_inode ri;
struct jffs2_full_dnode *fn;
uint32_t alloc_len;
- jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
- (unsigned int)inode->i_size, pageofs);
+ jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new position\n",
+ (unsigned int)inode->i_size, (uint32_t)pos);
ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret)
- goto out_page;
+ goto out_err;
mutex_lock(&f->sem);
memset(&ri, 0, sizeof(ri));
@@ -175,10 +164,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
ri.mode = cpu_to_jemode(inode->i_mode);
ri.uid = cpu_to_je16(i_uid_read(inode));
ri.gid = cpu_to_je16(i_gid_read(inode));
- ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
+ ri.isize = cpu_to_je32((uint32_t)pos);
ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW());
ri.offset = cpu_to_je32(inode->i_size);
- ri.dsize = cpu_to_je32(pageofs - inode->i_size);
+ ri.dsize = cpu_to_je32((uint32_t)pos - inode->i_size);
ri.csize = cpu_to_je32(0);
ri.compr = JFFS2_COMPR_ZERO;
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
@@ -190,7 +179,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
ret = PTR_ERR(fn);
jffs2_complete_reservation(c);
mutex_unlock(&f->sem);
- goto out_page;
+ goto out_err;
}
ret = jffs2_add_full_dnode_to_inode(c, f, fn);
if (f->metadata) {
@@ -205,37 +194,54 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
jffs2_free_full_dnode(fn);
jffs2_complete_reservation(c);
mutex_unlock(&f->sem);
- goto out_page;
+ goto out_err;
}
jffs2_complete_reservation(c);
- inode->i_size = pageofs;
+ inode->i_size = pos;
mutex_unlock(&f->sem);
}
/*
- * Read in the page if it wasn't already present. Cannot optimize away
- * the whole page write case until jffs2_write_end can handle the
+ * While getting a page and reading data in, lock c->alloc_sem until
+ * the page is Uptodate. Otherwise GC task may attempt to read the same
+ * page in read_cache_page(), which causes a deadlock.
+ */
+ mutex_lock(&c->alloc_sem);
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
+ goto release_sem;
+ }
+ *foliop = folio;
+
+ /*
+ * Read in the folio if it wasn't already present. Cannot optimize away
+ * the whole folio write case until jffs2_write_end can handle the
* case of a short-copy.
*/
- if (!PageUptodate(pg)) {
+ if (!folio_test_uptodate(folio)) {
mutex_lock(&f->sem);
- ret = jffs2_do_readpage_nolock(inode, pg);
+ ret = jffs2_do_readpage_nolock(inode, folio);
mutex_unlock(&f->sem);
- if (ret)
- goto out_page;
+ if (ret) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto release_sem;
+ }
}
- jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
- return ret;
+ jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags.f);
-out_page:
- unlock_page(pg);
- put_page(pg);
+release_sem:
+ mutex_unlock(&c->alloc_sem);
+out_err:
return ret;
}
-static int jffs2_write_end(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *pg, void *fsdata)
+static int jffs2_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
/* Actually commit the write from the page cache page we're looking at.
* For now, we write the full page out each time. It sucks, but it's simple
@@ -249,16 +255,17 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
unsigned aligned_start = start & ~3;
int ret = 0;
uint32_t writtenlen = 0;
+ void *buf;
- jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
- __func__, inode->i_ino, pg->index << PAGE_SHIFT,
- start, end, pg->flags);
+ jffs2_dbg(1, "%s(): ino #%lu, page at 0x%llx, range %d-%d, flags %lx\n",
+ __func__, inode->i_ino, folio_pos(folio),
+ start, end, folio->flags.f);
/* We need to avoid deadlock with page_cache_read() in
- jffs2_garbage_collect_pass(). So the page must be
+ jffs2_garbage_collect_pass(). So the folio must be
up to date to prevent page_cache_read() from trying
to re-lock it. */
- BUG_ON(!PageUptodate(pg));
+ BUG_ON(!folio_test_uptodate(folio));
if (end == PAGE_SIZE) {
/* When writing out the end of a page, write out the
@@ -273,8 +280,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
if (!ri) {
jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
__func__);
- unlock_page(pg);
- put_page(pg);
+ folio_unlock(folio);
+ folio_put(folio);
return -ENOMEM;
}
@@ -286,20 +293,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
ri->isize = cpu_to_je32((uint32_t)inode->i_size);
ri->atime = ri->ctime = ri->mtime = cpu_to_je32(JFFS2_NOW());
- /* In 2.4, it was already kmapped by generic_file_write(). Doesn't
- hurt to do it again. The alternative is ifdefs, which are ugly. */
- kmap(pg);
-
- ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
- (pg->index << PAGE_SHIFT) + aligned_start,
+ buf = kmap_local_folio(folio, aligned_start);
+ ret = jffs2_write_inode_range(c, f, ri, buf,
+ folio_pos(folio) + aligned_start,
end - aligned_start, &writtenlen);
+ kunmap_local(buf);
- kunmap(pg);
-
- if (ret) {
- /* There was an error writing. */
- SetPageError(pg);
- }
+ if (ret)
+ mapping_set_error(mapping, ret);
/* Adjust writtenlen for the padding we did, so we don't confuse our caller */
writtenlen -= min(writtenlen, (start - aligned_start));
@@ -309,7 +310,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
inode->i_size = pos + writtenlen;
inode->i_blocks = (inode->i_size + 511) >> 9;
- inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(ri->ctime))));
}
}
@@ -321,13 +323,12 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
it gets reread */
jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n",
__func__);
- SetPageError(pg);
- ClearPageUptodate(pg);
+ folio_clear_uptodate(folio);
}
jffs2_dbg(1, "%s() returning %d\n",
__func__, writtenlen > 0 ? writtenlen : ret);
- unlock_page(pg);
- put_page(pg);
+ folio_unlock(folio);
+ folio_put(folio);
return writtenlen > 0 ? writtenlen : ret;
}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 2ac410477c4f..764bba8ba999 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -113,9 +113,9 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
- ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
- ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
- ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
+ ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode_get_atime(inode)));
+ ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode_get_mtime(inode)));
+ ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode_get_ctime(inode)));
ri->offset = cpu_to_je32(0);
ri->csize = ri->dsize = cpu_to_je32(mdatalen);
@@ -147,9 +147,9 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
return PTR_ERR(new_metadata);
}
/* It worked. Update the inode */
- inode->i_atime = ITIME(je32_to_cpu(ri->atime));
- inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
- inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
+ inode_set_atime_to_ts(inode, ITIME(je32_to_cpu(ri->atime)));
+ inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(ri->ctime)));
+ inode_set_mtime_to_ts(inode, ITIME(je32_to_cpu(ri->mtime)));
inode->i_mode = jemode_to_cpu(ri->mode);
i_uid_write(inode, je16_to_cpu(ri->uid));
i_gid_write(inode, je16_to_cpu(ri->gid));
@@ -178,7 +178,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
jffs2_complete_reservation(c);
/* We have to do the truncate_setsize() without f->sem held, since
- some pages may be locked and waiting for it in readpage().
+ some pages may be locked and waiting for it in read_folio().
We are protected from a simultaneous write() extending i_size
back past iattr->ia_size, because do_truncate() holds the
generic inode semaphore. */
@@ -190,19 +190,19 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
return 0;
}
-int jffs2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int jffs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
int rc;
- rc = setattr_prepare(&init_user_ns, dentry, iattr);
+ rc = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
if (rc)
return rc;
rc = jffs2_do_setattr(inode, iattr);
if (!rc && (iattr->ia_valid & ATTR_MODE))
- rc = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
+ rc = posix_acl_chmod(&nop_mnt_idmap, dentry, inode->i_mode);
return rc;
}
@@ -265,7 +265,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
f = JFFS2_INODE_INFO(inode);
@@ -282,9 +282,9 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
i_uid_write(inode, je16_to_cpu(latest_node.uid));
i_gid_write(inode, je16_to_cpu(latest_node.gid));
inode->i_size = je32_to_cpu(latest_node.isize);
- inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
- inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
- inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
+ inode_set_atime_to_ts(inode, ITIME(je32_to_cpu(latest_node.atime)));
+ inode_set_mtime_to_ts(inode, ITIME(je32_to_cpu(latest_node.mtime)));
+ inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(latest_node.ctime)));
set_nlink(inode, f->inocache->pino_nlink);
@@ -373,7 +373,7 @@ void jffs2_dirty_inode(struct inode *inode, int flags)
{
struct iattr iattr;
- if (!(inode->i_state & I_DIRTY_DATASYNC)) {
+ if (!(inode_state_read_once(inode) & I_DIRTY_DATASYNC)) {
jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
__func__, inode->i_ino);
return;
@@ -386,9 +386,9 @@ void jffs2_dirty_inode(struct inode *inode, int flags)
iattr.ia_mode = inode->i_mode;
iattr.ia_uid = inode->i_uid;
iattr.ia_gid = inode->i_gid;
- iattr.ia_atime = inode->i_atime;
- iattr.ia_mtime = inode->i_mtime;
- iattr.ia_ctime = inode->i_ctime;
+ iattr.ia_atime = inode_get_atime(inode);
+ iattr.ia_mtime = inode_get_mtime(inode);
+ iattr.ia_ctime = inode_get_ctime(inode);
jffs2_do_setattr(inode, &iattr);
}
@@ -403,7 +403,7 @@ int jffs2_do_remount_fs(struct super_block *sb, struct fs_context *fc)
/* We stop if it was running, then restart if it needs to.
This also catches the case where it was stopped and this
is just a remount to restart it.
- Flush the writebuffer, if neccecary, else we loose it */
+ Flush the writebuffer, if necessary, else we loose it */
if (!sb_rdonly(sb)) {
jffs2_stop_garbage_collect_thread(c);
mutex_lock(&c->alloc_sem);
@@ -475,8 +475,8 @@ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_r
inode->i_mode = jemode_to_cpu(ri->mode);
i_gid_write(inode, je16_to_cpu(ri->gid));
i_uid_write(inode, je16_to_cpu(ri->uid));
- inode->i_atime = inode->i_ctime = inode->i_mtime = current_time(inode);
- ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
+ simple_inode_init_ts(inode);
+ ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode_get_mtime(inode)));
inode->i_blocks = 0;
inode->i_size = 0;
@@ -603,8 +603,9 @@ out_root:
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
kvfree(c->blocks);
- out_inohash:
jffs2_clear_xattr_subsystem(c);
+ jffs2_sum_exit(c);
+ out_inohash:
kfree(c->inocache_list);
out_wbuf:
jffs2_flash_cleanup(c);
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 373b3b7c9f44..1b833bbffcf5 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -82,7 +82,7 @@ again:
nextlist = &c->erasable_list;
} else if (!list_empty(&c->erasable_pending_wbuf_list)) {
- /* There are blocks are wating for the wbuf sync */
+ /* There are blocks are waiting for the wbuf sync */
jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n");
spin_unlock(&c->erase_completion_lock);
jffs2_flush_wbuf_pad(c);
@@ -1171,7 +1171,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
uint32_t alloclen, offset, orig_end, orig_start;
int ret = 0;
unsigned char *comprbuf = NULL, *writebuf;
- struct page *page;
+ struct folio *folio;
unsigned char *pg_ptr;
memset(&ri, 0, sizeof(ri));
@@ -1317,25 +1317,25 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
BUG_ON(start > orig_start);
}
- /* The rules state that we must obtain the page lock *before* f->sem, so
+ /* The rules state that we must obtain the folio lock *before* f->sem, so
* drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
* actually going to *change* so we're safe; we only allow reading.
*
* It is important to note that jffs2_write_begin() will ensure that its
- * page is marked Uptodate before allocating space. That means that if we
- * end up here trying to GC the *same* page that jffs2_write_begin() is
- * trying to write out, read_cache_page() will not deadlock. */
+ * folio is marked uptodate before allocating space. That means that if we
+ * end up here trying to GC the *same* folio that jffs2_write_begin() is
+ * trying to write out, read_cache_folio() will not deadlock. */
mutex_unlock(&f->sem);
- page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
- jffs2_do_readpage_unlock, inode);
- if (IS_ERR(page)) {
- pr_warn("read_cache_page() returned error: %ld\n",
- PTR_ERR(page));
+ folio = read_cache_folio(inode->i_mapping, start >> PAGE_SHIFT,
+ __jffs2_read_folio, NULL);
+ if (IS_ERR(folio)) {
+ pr_warn("read_cache_folio() returned error: %ld\n",
+ PTR_ERR(folio));
mutex_lock(&f->sem);
- return PTR_ERR(page);
+ return PTR_ERR(folio);
}
- pg_ptr = kmap(page);
+ pg_ptr = kmap_local_folio(folio, 0);
mutex_lock(&f->sem);
offset = start;
@@ -1400,7 +1400,6 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
}
}
- kunmap(page);
- put_page(page);
+ folio_release_kmap(folio, pg_ptr);
return ret;
}
diff --git a/fs/jffs2/jffs2_fs_i.h b/fs/jffs2/jffs2_fs_i.h
index 2e4a86763c07..93a2951538ce 100644
--- a/fs/jffs2/jffs2_fs_i.h
+++ b/fs/jffs2/jffs2_fs_i.h
@@ -18,11 +18,11 @@
#include <linux/mutex.h>
struct jffs2_inode_info {
- /* We need an internal mutex similar to inode->i_mutex.
+ /* We need an internal mutex similar to inode->i_rwsem.
Unfortunately, we can't used the existing one, because
either the GC would deadlock, or we'd have to release it
before letting GC proceed. Or we'd have to put ugliness
- into the GC code so it didn't attempt to obtain the i_mutex
+ into the GC code so it didn't attempt to obtain the i_rwsem
for the inode(s) which are already locked */
struct mutex sem;
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index ce1189793288..411de8b361b2 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -33,27 +33,19 @@ static struct kmem_cache *xattr_ref_cache;
int __init jffs2_create_slab_caches(void)
{
- full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
- sizeof(struct jffs2_full_dnode),
- 0, 0, NULL);
+ full_dnode_slab = KMEM_CACHE(jffs2_full_dnode, 0);
if (!full_dnode_slab)
goto err;
- raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
- sizeof(struct jffs2_raw_dirent),
- 0, SLAB_HWCACHE_ALIGN, NULL);
+ raw_dirent_slab = KMEM_CACHE(jffs2_raw_dirent, SLAB_HWCACHE_ALIGN);
if (!raw_dirent_slab)
goto err;
- raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
- sizeof(struct jffs2_raw_inode),
- 0, SLAB_HWCACHE_ALIGN, NULL);
+ raw_inode_slab = KMEM_CACHE(jffs2_raw_inode, SLAB_HWCACHE_ALIGN);
if (!raw_inode_slab)
goto err;
- tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
- sizeof(struct jffs2_tmp_dnode_info),
- 0, 0, NULL);
+ tmp_dnode_info_slab = KMEM_CACHE(jffs2_tmp_dnode_info, 0);
if (!tmp_dnode_info_slab)
goto err;
@@ -63,28 +55,20 @@ int __init jffs2_create_slab_caches(void)
if (!raw_node_ref_slab)
goto err;
- node_frag_slab = kmem_cache_create("jffs2_node_frag",
- sizeof(struct jffs2_node_frag),
- 0, 0, NULL);
+ node_frag_slab = KMEM_CACHE(jffs2_node_frag, 0);
if (!node_frag_slab)
goto err;
- inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
- sizeof(struct jffs2_inode_cache),
- 0, 0, NULL);
+ inode_cache_slab = KMEM_CACHE(jffs2_inode_cache, 0);
if (!inode_cache_slab)
goto err;
#ifdef CONFIG_JFFS2_FS_XATTR
- xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
- sizeof(struct jffs2_xattr_datum),
- 0, 0, NULL);
+ xattr_datum_cache = KMEM_CACHE(jffs2_xattr_datum, 0);
if (!xattr_datum_cache)
goto err;
- xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
- sizeof(struct jffs2_xattr_ref),
- 0, 0, NULL);
+ xattr_ref_cache = KMEM_CACHE(jffs2_xattr_ref, 0);
if (!xattr_ref_cache)
goto err;
#endif
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index a7bbe879cfc3..3fb9f9807b66 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -15,6 +15,7 @@
#include <linux/mtd/mtd.h>
#include <linux/compiler.h>
#include <linux/sched/signal.h>
+#include <linux/string_choices.h>
#include "nodelist.h"
#include "debug.h"
@@ -49,28 +50,31 @@ static int jffs2_rp_can_write(struct jffs2_sb_info *c)
return 0;
}
+static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
+ uint32_t *len, uint32_t sumsize);
+
/**
* jffs2_reserve_space - request physical space to write nodes to flash
* @c: superblock info
* @minsize: Minimum acceptable size of allocation
* @len: Returned value of allocation length
* @prio: Allocation type - ALLOC_{NORMAL,DELETION}
+ * @sumsize: summary size requested or JFFS2_SUMMARY_NOSUM_SIZE for no summary
+ *
+ * Requests a block of physical space on the flash.
*
- * Requests a block of physical space on the flash. Returns zero for success
- * and puts 'len' into the appropriate place, or returns -ENOSPC or other
- * error if appropriate. Doesn't return len since that's
+ * Returns: %0 for success and puts 'len' into the appropriate place,
+ * or returns -ENOSPC or other error if appropriate.
+ * Doesn't return len since that's already returned in @len.
*
- * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
+ * If it returns %0, jffs2_reserve_space() also downs the per-filesystem
* allocation semaphore, to prevent more than one allocation from being
- * active at any time. The semaphore is later released by jffs2_commit_allocation()
+ * active at any time. The semaphore is later released by jffs2_commit_allocation().
*
* jffs2_reserve_space() may trigger garbage collection in order to make room
* for the requested allocation.
*/
-static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
- uint32_t *len, uint32_t sumsize);
-
int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
uint32_t *len, int prio, uint32_t sumsize)
{
@@ -314,9 +318,9 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
And there's no space left. At all. */
pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
c->nr_erasing_blocks, c->nr_free_blocks,
- list_empty(&c->erasable_list) ? "yes" : "no",
- list_empty(&c->erasing_list) ? "yes" : "no",
- list_empty(&c->erase_pending_list) ? "yes" : "no");
+ str_yes_no(list_empty(&c->erasable_list)),
+ str_yes_no(list_empty(&c->erasing_list)),
+ str_yes_no(list_empty(&c->erase_pending_list)));
return -ENOSPC;
}
@@ -488,13 +492,16 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
/**
* jffs2_add_physical_node_ref - add a physical node reference to the list
* @c: superblock info
- * @new: new node reference to add
+ * @ofs: offset in the block
* @len: length of this physical node
+ * @ic: inode cache pointer
*
* Should only be used to report nodes for which space has been allocated
* by jffs2_reserve_space.
*
* Must be called with the alloc_sem held.
+ *
+ * Returns: pointer to new node on success or -errno code on error
*/
struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
@@ -624,8 +631,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
ref->flash_offset, jeb->used_size);
BUG();
})
- jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
- ref_offset(ref), freed_len);
+ jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
+ ref_offset(ref), freed_len);
jeb->unchecked_size -= freed_len;
c->unchecked_size -= freed_len;
} else {
@@ -635,8 +642,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
ref->flash_offset, jeb->used_size);
BUG();
})
- jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
- ref_offset(ref), freed_len);
+ jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
+ ref_offset(ref), freed_len);
jeb->used_size -= freed_len;
c->used_size -= freed_len;
}
@@ -877,7 +884,7 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c)
jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
__func__, c->nr_free_blocks, c->nr_erasing_blocks,
- c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
+ c->dirty_size, nr_very_dirty, str_yes_no(ret));
return ret;
}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 173eccac691d..86ab014a349c 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -35,9 +35,9 @@ struct kvec;
#define ITIME(sec) ((struct timespec64){sec, 0})
#define JFFS2_NOW() JFFS2_CLAMP_TIME(ktime_get_real_seconds())
#define I_SEC(tv) JFFS2_CLAMP_TIME((tv).tv_sec)
-#define JFFS2_F_I_CTIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_ctime)
-#define JFFS2_F_I_MTIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_mtime)
-#define JFFS2_F_I_ATIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_atime)
+#define JFFS2_F_I_CTIME(f) I_SEC(inode_get_ctime(OFNI_EDONI_2SFFJ(f)))
+#define JFFS2_F_I_MTIME(f) I_SEC(inode_get_mtime(OFNI_EDONI_2SFFJ(f)))
+#define JFFS2_F_I_ATIME(f) I_SEC(inode_get_atime(OFNI_EDONI_2SFFJ(f)))
#define sleep_on_spinunlock(wq, s) \
do { \
DECLARE_WAITQUEUE(__wait, current); \
@@ -155,7 +155,7 @@ extern const struct file_operations jffs2_file_operations;
extern const struct inode_operations jffs2_file_inode_operations;
extern const struct address_space_operations jffs2_file_address_operations;
int jffs2_fsync(struct file *, loff_t, loff_t, int);
-int jffs2_do_readpage_unlock(void *data, struct page *pg);
+int __jffs2_read_folio(struct file *file, struct folio *folio);
/* ioctl.c */
long jffs2_ioctl(struct file *, unsigned int, unsigned long);
@@ -164,7 +164,7 @@ long jffs2_ioctl(struct file *, unsigned int, unsigned long);
extern const struct inode_operations jffs2_symlink_inode_operations;
/* fs.c */
-int jffs2_setattr (struct user_namespace *, struct dentry *, struct iattr *);
+int jffs2_setattr (struct mnt_idmap *, struct dentry *, struct iattr *);
int jffs2_do_setattr (struct inode *, struct iattr *);
struct inode *jffs2_iget(struct super_block *, unsigned long);
void jffs2_evict_inode (struct inode *);
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 03b4f99614be..f987f78a894e 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -72,7 +72,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
if (err != -EOPNOTSUPP)
JFFS2_WARNING("MTD point failed: error code %d.\n", err);
} else
- pointed = 1; /* succefully pointed to device */
+ pointed = 1; /* successfully pointed to device */
#endif
if (!pointed) {
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index b676056826be..62879c218d4b 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -136,7 +136,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
if (!s) {
JFFS2_WARNING("Can't allocate memory for summary\n");
ret = -ENOMEM;
- goto out;
+ goto out_buf;
}
}
@@ -256,7 +256,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n",
__func__, skip);
- jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
+ ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
+ if (ret)
+ goto out;
jffs2_scan_dirty_space(c, c->nextblock, skip);
}
#endif
@@ -275,13 +277,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
}
ret = 0;
out:
+ jffs2_sum_reset_collected(s);
+ kfree(s);
+ out_buf:
if (buf_size)
kfree(flashbuf);
#ifndef __ECOS
else
mtd_unpoint(c->mtd, 0, c->mtd->size);
#endif
- kfree(s);
return ret;
}
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index aef5522551db..437f3a2c1b54 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -57,7 +57,7 @@ static int jffs2_security_getxattr(const struct xattr_handler *handler,
}
static int jffs2_security_setxattr(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 4fe64519870f..d83372d3e1a0 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -858,7 +858,10 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
spin_unlock(&c->erase_completion_lock);
jeb = c->nextblock;
- jffs2_prealloc_raw_node_refs(c, jeb, 1);
+ ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
+
+ if (ret)
+ goto out;
if (!c->summary->sum_num || !c->summary->sum_list_head) {
JFFS2_WARNING("Empty summary info!!!\n");
@@ -872,6 +875,8 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
datasize += padsize;
ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize);
+
+out:
spin_lock(&c->erase_completion_lock);
return ret;
}
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 81ca58c10b72..4545f885c41e 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -39,7 +39,7 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
{
struct jffs2_inode_info *f;
- f = kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL);
+ f = alloc_inode_sb(sb, jffs2_inode_cachep, GFP_KERNEL);
if (!f)
return NULL;
return &f->vfs_inode;
@@ -58,6 +58,7 @@ static void jffs2_i_init_once(void *foo)
struct jffs2_inode_info *f = foo;
mutex_init(&f->sem);
+ f->target = NULL;
inode_init_once(&f->vfs_inode);
}
@@ -150,6 +151,7 @@ static struct dentry *jffs2_get_parent(struct dentry *child)
}
static const struct export_operations jffs2_export_ops = {
+ .encode_fh = generic_encode_ino32_fh,
.get_parent = jffs2_get_parent,
.fh_to_dentry = jffs2_fh_to_dentry,
.fh_to_parent = jffs2_fh_to_parent,
@@ -386,7 +388,7 @@ static int __init init_jffs2_fs(void)
jffs2_inode_cachep = kmem_cache_create("jffs2_i",
sizeof(struct jffs2_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
jffs2_i_init_once);
if (!jffs2_inode_cachep) {
pr_err("error: Failed to initialise inode cache\n");
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index c6821a509481..bb815a002984 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -584,7 +584,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
size_t retlen;
/* Nothing to do if not write-buffering the flash. In particular, we shouldn't
- del_timer() the timer we never initialised. */
+ call timer_delete() on the timer we never initialised. */
if (!jffs2_is_writebuffered(c))
return 0;
@@ -1035,7 +1035,7 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
{
int i, ret;
int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
ops.mode = MTD_OPS_AUTO_OOB;
ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
@@ -1076,7 +1076,7 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
ops.mode = MTD_OPS_AUTO_OOB;
@@ -1101,7 +1101,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb)
{
int ret;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
ops.mode = MTD_OPS_AUTO_OOB;
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index da3e18503c65..defb4162c3d5 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -772,10 +772,10 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
}
#define XREF_TMPHASH_SIZE (128)
-void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
+int jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
{
struct jffs2_xattr_ref *ref, *_ref;
- struct jffs2_xattr_ref *xref_tmphash[XREF_TMPHASH_SIZE];
+ struct jffs2_xattr_ref **xref_tmphash;
struct jffs2_xattr_datum *xd, *_xd;
struct jffs2_inode_cache *ic;
struct jffs2_raw_node_ref *raw;
@@ -784,9 +784,12 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
+ xref_tmphash = kcalloc(XREF_TMPHASH_SIZE,
+ sizeof(struct jffs2_xattr_ref *), GFP_KERNEL);
+ if (!xref_tmphash)
+ return -ENOMEM;
+
/* Phase.1 : Merge same xref */
- for (i=0; i < XREF_TMPHASH_SIZE; i++)
- xref_tmphash[i] = NULL;
for (ref=c->xref_temp; ref; ref=_ref) {
struct jffs2_xattr_ref *tmp;
@@ -884,6 +887,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
"%u of xref (%u dead, %u orphan) found.\n",
xdatum_count, xdatum_unchecked_count, xdatum_orphan_count,
xref_count, xref_dead_count, xref_orphan_count);
+ kfree(xref_tmphash);
+ return 0;
}
struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
@@ -915,21 +920,18 @@ struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
* do_jffs2_setxattr(inode, xprefix, xname, buffer, size, flags)
* is an implementation of setxattr handler on jffs2.
* -------------------------------------------------- */
-const struct xattr_handler *jffs2_xattr_handlers[] = {
+const struct xattr_handler * const jffs2_xattr_handlers[] = {
&jffs2_user_xattr_handler,
#ifdef CONFIG_JFFS2_FS_SECURITY
&jffs2_security_xattr_handler,
#endif
-#ifdef CONFIG_JFFS2_FS_POSIX_ACL
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
-#endif
&jffs2_trusted_xattr_handler,
NULL
};
-static const struct xattr_handler *xprefix_to_handler(int xprefix) {
- const struct xattr_handler *ret;
+static const char *jffs2_xattr_prefix(int xprefix, struct dentry *dentry)
+{
+ const struct xattr_handler *ret = NULL;
switch (xprefix) {
case JFFS2_XPREFIX_USER:
@@ -942,20 +944,23 @@ static const struct xattr_handler *xprefix_to_handler(int xprefix) {
#endif
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
case JFFS2_XPREFIX_ACL_ACCESS:
- ret = &posix_acl_access_xattr_handler;
+ ret = &nop_posix_acl_access;
break;
case JFFS2_XPREFIX_ACL_DEFAULT:
- ret = &posix_acl_default_xattr_handler;
+ ret = &nop_posix_acl_default;
break;
#endif
case JFFS2_XPREFIX_TRUSTED:
ret = &jffs2_trusted_xattr_handler;
break;
default:
- ret = NULL;
- break;
+ return NULL;
}
- return ret;
+
+ if (!xattr_handler_can_list(ret, dentry))
+ return NULL;
+
+ return xattr_prefix(ret);
}
ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
@@ -966,7 +971,6 @@ ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
struct jffs2_inode_cache *ic = f->inocache;
struct jffs2_xattr_ref *ref, **pref;
struct jffs2_xattr_datum *xd;
- const struct xattr_handler *xhandle;
const char *prefix;
ssize_t prefix_len, len, rc;
int retry = 0;
@@ -998,10 +1002,10 @@ ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
goto out;
}
}
- xhandle = xprefix_to_handler(xd->xprefix);
- if (!xhandle || (xhandle->list && !xhandle->list(dentry)))
+
+ prefix = jffs2_xattr_prefix(xd->xprefix, dentry);
+ if (!prefix)
continue;
- prefix = xhandle->prefix ?: xhandle->name;
prefix_len = strlen(prefix);
rc = prefix_len + xd->name_len + 1;
@@ -1106,6 +1110,9 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
return rc;
request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size);
+ if (request > c->sector_size - c->cleanmarker_size)
+ return -ERANGE;
+
rc = jffs2_reserve_space(c, request, &length,
ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE);
if (rc) {
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
index 720007b2fd65..7e7de093ec0a 100644
--- a/fs/jffs2/xattr.h
+++ b/fs/jffs2/xattr.h
@@ -71,7 +71,7 @@ static inline int is_xattr_ref_dead(struct jffs2_xattr_ref *ref)
#ifdef CONFIG_JFFS2_FS_XATTR
extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c);
-extern void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c);
+extern int jffs2_build_xattr_subsystem(struct jffs2_sb_info *c);
extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c);
extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
@@ -94,7 +94,7 @@ extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname
extern int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
const char *buffer, size_t size, int flags);
-extern const struct xattr_handler *jffs2_xattr_handlers[];
+extern const struct xattr_handler * const jffs2_xattr_handlers[];
extern const struct xattr_handler jffs2_user_xattr_handler;
extern const struct xattr_handler jffs2_trusted_xattr_handler;
@@ -103,7 +103,7 @@ extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
#else
#define jffs2_init_xattr_subsystem(c)
-#define jffs2_build_xattr_subsystem(c)
+#define jffs2_build_xattr_subsystem(c) (0)
#define jffs2_clear_xattr_subsystem(c)
#define jffs2_xattr_do_crccheck_inode(c, ic)
diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c
index cc3f24883e7d..b7c5da2d89bd 100644
--- a/fs/jffs2/xattr_trusted.c
+++ b/fs/jffs2/xattr_trusted.c
@@ -25,7 +25,7 @@ static int jffs2_trusted_getxattr(const struct xattr_handler *handler,
}
static int jffs2_trusted_setxattr(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c
index fb945977c013..f64edce4927b 100644
--- a/fs/jffs2/xattr_user.c
+++ b/fs/jffs2/xattr_user.c
@@ -25,7 +25,7 @@ static int jffs2_user_getxattr(const struct xattr_handler *handler,
}
static int jffs2_user_setxattr(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/jfs/Kconfig b/fs/jfs/Kconfig
index 05cb0e8e4382..3728cf4d944d 100644
--- a/fs/jfs/Kconfig
+++ b/fs/jfs/Kconfig
@@ -1,8 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config JFS_FS
tristate "JFS filesystem support"
+ select BUFFER_HEAD
select NLS
+ select NLS_UCS2_UTILS
select CRC32
+ select LEGACY_DIRECT_IO
help
This is a port of IBM's Journaled Filesystem . More information is
available in the file <file:Documentation/admin-guide/jfs.rst>.
diff --git a/fs/jfs/Makefile b/fs/jfs/Makefile
index 285ec189ed5c..b769bbf8bdc2 100644
--- a/fs/jfs/Makefile
+++ b/fs/jfs/Makefile
@@ -9,9 +9,7 @@ jfs-y := super.o file.o inode.o namei.o jfs_mount.o jfs_umount.o \
jfs_xtree.o jfs_imap.o jfs_debug.o jfs_dmap.o \
jfs_unicode.o jfs_dtree.o jfs_inode.o jfs_discard.o \
jfs_extent.o symlink.o jfs_metapage.o \
- jfs_logmgr.o jfs_txnmgr.o jfs_uniupr.o \
+ jfs_logmgr.o jfs_txnmgr.o \
resize.o xattr.o ioctl.o
jfs-$(CONFIG_JFS_POSIX_ACL) += acl.o
-
-ccflags-y := -D_JFS_4K
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index a653f34c6e26..1de3602c98de 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -94,18 +94,19 @@ out:
return rc;
}
-int jfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+int jfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
int rc;
tid_t tid;
int update_mode = 0;
+ struct inode *inode = d_inode(dentry);
umode_t mode = inode->i_mode;
tid = txBegin(inode->i_sb, 0);
mutex_lock(&JFS_IP(inode)->commit_mutex);
if (type == ACL_TYPE_ACCESS && acl) {
- rc = posix_acl_update_mode(&init_user_ns, inode, &mode, &acl);
+ rc = posix_acl_update_mode(&nop_mnt_idmap, inode, &mode, &acl);
if (rc)
goto end_tx;
if (mode != inode->i_mode)
@@ -115,7 +116,7 @@ int jfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
if (!rc) {
if (update_mode) {
inode->i_mode = mode;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
}
rc = txCommit(tid, 1, &inode, 0);
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 1d732fd223d4..87ad042221e7 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -26,8 +26,8 @@ int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
return rc;
inode_lock(inode);
- if (!(inode->i_state & I_DIRTY_ALL) ||
- (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) {
+ if (!(inode_state_read_once(inode) & I_DIRTY_ALL) ||
+ (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))) {
/* Make sure committed changes hit the disk */
jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1);
inode_unlock(inode);
@@ -44,6 +44,9 @@ static int jfs_open(struct inode *inode, struct file *file)
{
int rc;
+ if (S_ISREG(inode->i_mode) && inode->i_size < 0)
+ return -EIO;
+
if ((rc = dquot_file_open(inode, file)))
return rc;
@@ -85,24 +88,24 @@ static int jfs_release(struct inode *inode, struct file *file)
return 0;
}
-int jfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int jfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
int rc;
- rc = setattr_prepare(&init_user_ns, dentry, iattr);
+ rc = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
if (rc)
return rc;
- if (is_quota_modification(inode, iattr)) {
+ if (is_quota_modification(&nop_mnt_idmap, inode, iattr)) {
rc = dquot_initialize(inode);
if (rc)
return rc;
}
if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
(iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
- rc = dquot_transfer(inode, iattr);
+ rc = dquot_transfer(&nop_mnt_idmap, inode, iattr);
if (rc)
return rc;
}
@@ -119,11 +122,11 @@ int jfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
jfs_truncate(inode);
}
- setattr_copy(&init_user_ns, inode, iattr);
+ setattr_copy(&nop_mnt_idmap, inode, iattr);
mark_inode_dirty(inode);
if (iattr->ia_valid & ATTR_MODE)
- rc = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
+ rc = posix_acl_chmod(&nop_mnt_idmap, dentry, inode->i_mode);
return rc;
}
@@ -133,7 +136,7 @@ const struct inode_operations jfs_file_inode_operations = {
.fileattr_get = jfs_fileattr_get,
.fileattr_set = jfs_fileattr_set,
#ifdef CONFIG_JFS_POSIX_ACL
- .get_acl = jfs_get_acl,
+ .get_inode_acl = jfs_get_acl,
.set_acl = jfs_set_acl,
#endif
};
@@ -143,8 +146,8 @@ const struct file_operations jfs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
- .splice_read = generic_file_splice_read,
+ .mmap_prepare = generic_file_mmap_prepare,
+ .splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.fsync = jfs_fsync,
.release = jfs_release,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 57ab424c05ff..4709762713ef 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -29,7 +29,7 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
ret = diRead(inode);
@@ -59,9 +59,15 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
*/
inode->i_link[inode->i_size] = '\0';
}
- } else {
+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
inode->i_op = &jfs_file_inode_operations;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ } else {
+ printk(KERN_DEBUG "JFS: Invalid file type 0%04o for inode %lu.\n",
+ inode->i_mode, inode->i_ino);
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
unlock_new_inode(inode);
return inode;
@@ -145,13 +151,14 @@ void jfs_evict_inode(struct inode *inode)
if (!inode->i_nlink && !is_bad_inode(inode)) {
dquot_initialize(inode);
+ truncate_inode_pages_final(&inode->i_data);
if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
- truncate_inode_pages_final(&inode->i_data);
+ struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap;
if (test_cflag(COMMIT_Freewmap, inode))
jfs_free_zero_link(inode);
- if (JFS_SBI(inode->i_sb)->ipimap)
+ if (ipimap && JFS_IP(ipimap)->i_imap)
diFree(inode);
/*
@@ -223,18 +230,9 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
* this as a hole
*/
goto unlock;
-#ifdef _JFS_4K
XADoffset(&xad, lblock64);
XADlength(&xad, xlen);
XADaddress(&xad, xaddr);
-#else /* _JFS_4K */
- /*
- * As long as block size = 4K, this isn't a problem.
- * We should mark the whole page not ABNR, but how
- * will we know to mark the other blocks BH_New?
- */
- BUG();
-#endif /* _JFS_4K */
rc = extRecord(ip, &xad);
if (rc)
goto unlock;
@@ -251,7 +249,6 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
/*
* Allocate a new block
*/
-#ifdef _JFS_4K
if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad)))
goto unlock;
rc = extAlloc(ip, xlen, lblock64, &xad, false);
@@ -262,14 +259,6 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
map_bh(bh_result, ip->i_sb, addressXAD(&xad));
bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits;
-#else /* _JFS_4K */
- /*
- * We need to do whatever it takes to keep all but the last buffers
- * in 4K pages - see jfs_write.c
- */
- BUG();
-#endif /* _JFS_4K */
-
unlock:
/*
* Release lock on inode
@@ -281,20 +270,15 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
return rc;
}
-static int jfs_writepage(struct page *page, struct writeback_control *wbc)
-{
- return block_write_full_page(page, jfs_get_block, wbc);
-}
-
static int jfs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
return mpage_writepages(mapping, wbc, jfs_get_block);
}
-static int jfs_readpage(struct file *file, struct page *page)
+static int jfs_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, jfs_get_block);
+ return mpage_read_folio(folio, jfs_get_block);
}
static void jfs_readahead(struct readahead_control *rac)
@@ -312,20 +296,33 @@ static void jfs_write_failed(struct address_space *mapping, loff_t to)
}
}
-static int jfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int jfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret;
- ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
- jfs_get_block);
+ ret = block_write_begin(mapping, pos, len, foliop, jfs_get_block);
if (unlikely(ret))
jfs_write_failed(mapping, pos + len);
return ret;
}
+static int jfs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
+{
+ int ret;
+
+ ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
+ if (ret < len)
+ jfs_write_failed(mapping, pos + len);
+ return ret;
+}
+
static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping, block, jfs_get_block);
@@ -357,15 +354,16 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
const struct address_space_operations jfs_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = jfs_readpage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = jfs_read_folio,
.readahead = jfs_readahead,
- .writepage = jfs_writepage,
.writepages = jfs_writepages,
.write_begin = jfs_write_begin,
- .write_end = nobh_write_end,
+ .write_end = jfs_write_end,
.bmap = jfs_bmap,
.direct_IO = jfs_direct_IO,
+ .migrate_folio = buffer_migrate_folio,
};
/*
@@ -379,7 +377,7 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length)
ASSERT(length >= 0);
- if (test_cflag(COMMIT_Nolink, ip)) {
+ if (test_cflag(COMMIT_Nolink, ip) || isReadOnly(ip)) {
xtTruncate(0, ip, length, COMMIT_WMAP);
return;
}
@@ -403,7 +401,7 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length)
break;
}
- ip->i_mtime = ip->i_ctime = current_time(ip);
+ inode_set_mtime_to_ts(ip, inode_set_ctime_current(ip));
mark_inode_dirty(ip);
txCommit(tid, 1, &ip, 0);
@@ -416,7 +414,7 @@ void jfs_truncate(struct inode *ip)
{
jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size);
- nobh_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block);
+ block_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block);
IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
jfs_truncate_nolock(ip, ip->i_size);
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index 03a845ab4f00..563f148be8af 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -57,7 +57,7 @@ static long jfs_map_ext2(unsigned long flags, int from)
return mapped;
}
-int jfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int jfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct jfs_inode_info *jfs_inode = JFS_IP(d_inode(dentry));
unsigned int flags = jfs_inode->mode2 & JFS_FL_USER_VISIBLE;
@@ -70,8 +70,8 @@ int jfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
return 0;
}
-int jfs_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa)
+int jfs_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct jfs_inode_info *jfs_inode = JFS_IP(inode);
@@ -96,7 +96,7 @@ int jfs_fileattr_set(struct user_namespace *mnt_userns,
jfs_inode->mode2 = flags;
jfs_set_inode_flags(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return 0;
@@ -110,14 +110,13 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case FITRIM:
{
struct super_block *sb = inode->i_sb;
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
struct fstrim_range range;
s64 ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!blk_queue_discard(q)) {
+ if (!bdev_max_discard_sectors(sb->s_bdev)) {
jfs_warn("FITRIM not supported on device");
return -EOPNOTSUPP;
}
@@ -127,7 +126,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EFAULT;
range.minlen = max_t(unsigned int, range.minlen,
- q->limits.discard_granularity);
+ bdev_discard_granularity(sb->s_bdev));
ret = jfs_ioc_trim(inode, &range);
if (ret < 0)
diff --git a/fs/jfs/jfs_acl.h b/fs/jfs/jfs_acl.h
index 3de40286d31f..f892e54d0fcd 100644
--- a/fs/jfs/jfs_acl.h
+++ b/fs/jfs/jfs_acl.h
@@ -8,7 +8,7 @@
#ifdef CONFIG_JFS_POSIX_ACL
struct posix_acl *jfs_get_acl(struct inode *inode, int type, bool rcu);
-int jfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+int jfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type);
int jfs_init_acl(tid_t, struct inode *, struct inode *);
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h
index 6b231d0d0071..603aae17a693 100644
--- a/fs/jfs/jfs_dinode.h
+++ b/fs/jfs/jfs_dinode.h
@@ -96,7 +96,7 @@ struct dinode {
#define di_gengen u._file._u1._imap._gengen
union {
- xtpage_t _xtroot;
+ xtroot_t _xtroot;
struct {
u8 unused[16]; /* 16: */
dxd_t _dxd; /* 16: */
diff --git a/fs/jfs/jfs_discard.c b/fs/jfs/jfs_discard.c
index 575cb2ba74fc..4b660296caf3 100644
--- a/fs/jfs/jfs_discard.c
+++ b/fs/jfs/jfs_discard.c
@@ -65,7 +65,7 @@ void jfs_issue_discard(struct inode *ip, u64 blkno, u64 nblocks)
int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
{
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
- struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
+ struct bmap *bmp;
struct super_block *sb = ipbmap->i_sb;
int agno, agno_end;
u64 start, end, minlen;
@@ -83,10 +83,16 @@ int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
if (minlen == 0)
minlen = 1;
- if (minlen > bmp->db_agsize ||
+ down_read(&sb->s_umount);
+ bmp = JFS_SBI(ip->i_sb)->bmap;
+
+ if (bmp == NULL ||
+ minlen > bmp->db_agsize ||
start >= bmp->db_mapsize ||
- range->len < sb->s_blocksize)
+ range->len < sb->s_blocksize) {
+ up_read(&sb->s_umount);
return -EINVAL;
+ }
if (end >= bmp->db_mapsize)
end = bmp->db_mapsize - 1;
@@ -100,6 +106,8 @@ int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
trimmed += dbDiscardAG(ip, agno, minlen);
agno++;
}
+
+ up_read(&sb->s_umount);
range->len = trimmed << sb->s_blocksize_bits;
return 0;
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 91f4ec93dab1..cdfa699cd7c8 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -63,10 +63,10 @@
*/
static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
-static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval);
-static int dbBackSplit(dmtree_t * tp, int leafno);
-static int dbJoin(dmtree_t * tp, int leafno, int newval);
-static void dbAdjTree(dmtree_t * tp, int leafno, int newval);
+static void dbSplit(dmtree_t *tp, int leafno, int splitsz, int newval, bool is_ctl);
+static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl);
+static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl);
+static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl);
static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
int level);
static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
@@ -87,7 +87,7 @@ static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
static int dbFindBits(u32 word, int l2nb);
static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
+static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl);
static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
@@ -148,13 +148,14 @@ static const s8 budtab[256] = {
* 0 - success
* -ENOMEM - insufficient memory
* -EIO - i/o error
+ * -EINVAL - wrong bmap data
*/
int dbMount(struct inode *ipbmap)
{
struct bmap *bmp;
struct dbmap_disk *dbmp_le;
struct metapage *mp;
- int i;
+ int i, err;
/*
* allocate/initialize the in-memory bmap descriptor
@@ -169,8 +170,8 @@ int dbMount(struct inode *ipbmap)
BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
PSIZE, 0);
if (mp == NULL) {
- kfree(bmp);
- return -EIO;
+ err = -EIO;
+ goto err_kfree_bmp;
}
/* copy the on-disk bmap descriptor to its in-memory version. */
@@ -187,6 +188,24 @@ int dbMount(struct inode *ipbmap)
bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
+
+ if ((bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) ||
+ (bmp->db_l2nbperpage < 0) ||
+ !bmp->db_numag || (bmp->db_numag > MAXAG) ||
+ (bmp->db_maxag >= MAXAG) || (bmp->db_maxag < 0) ||
+ (bmp->db_agpref >= MAXAG) || (bmp->db_agpref < 0) ||
+ (bmp->db_agheight < 0) || (bmp->db_agheight > (L2LPERCTL >> 1)) ||
+ (bmp->db_agwidth < 1) || (bmp->db_agwidth > (LPERCTL / MAXAG)) ||
+ (bmp->db_agwidth > (1 << (L2LPERCTL - (bmp->db_agheight << 1)))) ||
+ (bmp->db_agstart < 0) ||
+ (bmp->db_agstart > (CTLTREESIZE - 1 - bmp->db_agwidth * (MAXAG - 1))) ||
+ (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) ||
+ (bmp->db_agl2size < 0) ||
+ ((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
+
for (i = 0; i < MAXAG; i++)
bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]);
bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize);
@@ -207,6 +226,12 @@ int dbMount(struct inode *ipbmap)
BMAP_LOCK_INIT(bmp);
return (0);
+
+err_release_metapage:
+ release_metapage(mp);
+err_kfree_bmp:
+ kfree(bmp);
+ return err;
}
@@ -240,6 +265,7 @@ int dbUnmount(struct inode *ipbmap, int mounterror)
/* free the memory for the in-memory bmap. */
kfree(bmp);
+ JFS_SBI(ipbmap->i_sb)->bmap = NULL;
return (0);
}
@@ -378,7 +404,8 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
}
/* write the last buffer. */
- write_metapage(mp);
+ if (mp)
+ write_metapage(mp);
IREAD_UNLOCK(ipbmap);
@@ -614,7 +641,7 @@ int dbNextAG(struct inode *ipbmap)
* average free space.
*/
for (i = 0 ; i < bmp->db_numag; i++, agpref++) {
- if (agpref == bmp->db_numag)
+ if (agpref >= bmp->db_numag)
agpref = 0;
if (atomic_read(&bmp->db_active[agpref]))
@@ -861,74 +888,6 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
return (rc);
}
-#ifdef _NOTYET
-/*
- * NAME: dbAllocExact()
- *
- * FUNCTION: try to allocate the requested extent;
- *
- * PARAMETERS:
- * ip - pointer to in-core inode;
- * blkno - extent address;
- * nblocks - extent length;
- *
- * RETURN VALUES:
- * 0 - success
- * -ENOSPC - insufficient disk resources
- * -EIO - i/o error
- */
-int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
-{
- int rc;
- struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
- struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
- struct dmap *dp;
- s64 lblkno;
- struct metapage *mp;
-
- IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
-
- /*
- * validate extent request:
- *
- * note: defragfs policy:
- * max 64 blocks will be moved.
- * allocation request size must be satisfied from a single dmap.
- */
- if (nblocks <= 0 || nblocks > BPERDMAP || blkno >= bmp->db_mapsize) {
- IREAD_UNLOCK(ipbmap);
- return -EINVAL;
- }
-
- if (nblocks > ((s64) 1 << bmp->db_maxfreebud)) {
- /* the free space is no longer available */
- IREAD_UNLOCK(ipbmap);
- return -ENOSPC;
- }
-
- /* read in the dmap covering the extent */
- lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
- mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
- if (mp == NULL) {
- IREAD_UNLOCK(ipbmap);
- return -EIO;
- }
- dp = (struct dmap *) mp->data;
-
- /* try to allocate the requested extent */
- rc = dbAllocNext(bmp, dp, blkno, nblocks);
-
- IREAD_UNLOCK(ipbmap);
-
- if (rc == 0)
- mark_metapage_dirty(mp);
-
- release_metapage(mp);
-
- return (rc);
-}
-#endif /* _NOTYET */
-
/*
* NAME: dbReAlloc()
*
@@ -1430,6 +1389,12 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
(1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
+ if (ti < 0 || ti >= le32_to_cpu(dcp->nleafs)) {
+ jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n");
+ release_metapage(mp);
+ return -EIO;
+ }
+
/* dmap control page trees fan-out by 4 and a single allocation
* group may be described by 1 or 2 subtrees within the ag level
* dmap control page, depending upon the ag size. examine the ag's
@@ -1656,6 +1621,8 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
} else if (rc == -ENOSPC) {
/* search for next smaller log2 block */
l2nb = BLKSTOL2(nblocks) - 1;
+ if (unlikely(l2nb < 0))
+ break;
nblocks = 1LL << l2nb;
} else {
/* Trim any already allocated blocks */
@@ -1747,7 +1714,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
* dbFindLeaf() returns the index of the leaf at which
* free space was found.
*/
- rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx);
+ rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx, true);
/* release the buffer.
*/
@@ -1848,6 +1815,11 @@ dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
return -EIO;
dp = (struct dmap *) mp->data;
+ if (dp->tree.budmin < 0) {
+ release_metapage(mp);
+ return -EIO;
+ }
+
/* try to allocate the blocks.
*/
rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results);
@@ -1994,9 +1966,12 @@ dbAllocDmapLev(struct bmap * bmp,
* free space. if sufficient free space is found, dbFindLeaf()
* returns the index of the leaf at which free space was found.
*/
- if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
+ if (dbFindLeaf((dmtree_t *) &dp->tree, l2nb, &leafidx, false))
return -ENOSPC;
+ if (leafidx < 0)
+ return -EIO;
+
/* determine the block number within the file system corresponding
* to the leaf at which free space was found.
*/
@@ -2130,7 +2105,7 @@ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
* system.
*/
if (dp->tree.stree[word] == NOFREE)
- dbBackSplit((dmtree_t *) & dp->tree, word);
+ dbBackSplit((dmtree_t *)&dp->tree, word, false);
dbAllocBits(bmp, dp, blkno, nblocks);
}
@@ -2216,7 +2191,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
* the binary system of the leaves if need be.
*/
dbSplit(tp, word, BUDMIN,
- dbMaxBud((u8 *) & dp->wmap[word]));
+ dbMaxBud((u8 *)&dp->wmap[word]), false);
word += 1;
} else {
@@ -2256,7 +2231,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
* system of the leaves to reflect the current
* allocation (size).
*/
- dbSplit(tp, word, size, NOFREE);
+ dbSplit(tp, word, size, NOFREE, false);
/* get the number of dmap words handled */
nw = BUDSIZE(size, BUDMIN);
@@ -2363,7 +2338,7 @@ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
/* update the leaf for this dmap word.
*/
rc = dbJoin(tp, word,
- dbMaxBud((u8 *) & dp->wmap[word]));
+ dbMaxBud((u8 *)&dp->wmap[word]), false);
if (rc)
return rc;
@@ -2396,7 +2371,7 @@ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
/* update the leaf.
*/
- rc = dbJoin(tp, word, size);
+ rc = dbJoin(tp, word, size, false);
if (rc)
return rc;
@@ -2548,16 +2523,16 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
* that it is at the front of a binary buddy system.
*/
if (oldval == NOFREE) {
- rc = dbBackSplit((dmtree_t *) dcp, leafno);
+ rc = dbBackSplit((dmtree_t *)dcp, leafno, true);
if (rc) {
release_metapage(mp);
return rc;
}
oldval = dcp->stree[ti];
}
- dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval);
+ dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval, true);
} else {
- rc = dbJoin((dmtree_t *) dcp, leafno, newval);
+ rc = dbJoin((dmtree_t *) dcp, leafno, newval, true);
if (rc) {
release_metapage(mp);
return rc;
@@ -2588,7 +2563,7 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
*/
if (alloc) {
dbJoin((dmtree_t *) dcp, leafno,
- oldval);
+ oldval, true);
} else {
/* the dbJoin() above might have
* caused a larger binary buddy system
@@ -2598,9 +2573,9 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
*/
if (dcp->stree[ti] == NOFREE)
dbBackSplit((dmtree_t *)
- dcp, leafno);
+ dcp, leafno, true);
dbSplit((dmtree_t *) dcp, leafno,
- dcp->budmin, oldval);
+ dcp->budmin, oldval, true);
}
/* release the buffer and return the error.
@@ -2648,7 +2623,7 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
-static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
+static void dbSplit(dmtree_t *tp, int leafno, int splitsz, int newval, bool is_ctl)
{
int budsz;
int cursz;
@@ -2670,7 +2645,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
while (cursz >= splitsz) {
/* update the buddy's leaf with its new value.
*/
- dbAdjTree(tp, leafno ^ budsz, cursz);
+ dbAdjTree(tp, leafno ^ budsz, cursz, is_ctl);
/* on to the next size and buddy.
*/
@@ -2682,7 +2657,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
/* adjust the dmap tree to reflect the specified leaf's new
* value.
*/
- dbAdjTree(tp, leafno, newval);
+ dbAdjTree(tp, leafno, newval, is_ctl);
}
@@ -2713,7 +2688,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
-static int dbBackSplit(dmtree_t * tp, int leafno)
+static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl)
{
int budsz, bud, w, bsz, size;
int cursz;
@@ -2764,7 +2739,7 @@ static int dbBackSplit(dmtree_t * tp, int leafno)
* system in two.
*/
cursz = leaf[bud] - 1;
- dbSplit(tp, bud, cursz, cursz);
+ dbSplit(tp, bud, cursz, cursz, is_ctl);
break;
}
}
@@ -2792,7 +2767,7 @@ static int dbBackSplit(dmtree_t * tp, int leafno)
*
* RETURN VALUES: none
*/
-static int dbJoin(dmtree_t * tp, int leafno, int newval)
+static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl)
{
int budsz, buddy;
s8 *leaf;
@@ -2847,12 +2822,12 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
if (leafno < buddy) {
/* leafno is the left buddy.
*/
- dbAdjTree(tp, buddy, NOFREE);
+ dbAdjTree(tp, buddy, NOFREE, is_ctl);
} else {
/* buddy is the left buddy and becomes
* leafno.
*/
- dbAdjTree(tp, leafno, NOFREE);
+ dbAdjTree(tp, leafno, NOFREE, is_ctl);
leafno = buddy;
}
@@ -2865,7 +2840,7 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
/* update the leaf value.
*/
- dbAdjTree(tp, leafno, newval);
+ dbAdjTree(tp, leafno, newval, is_ctl);
return 0;
}
@@ -2886,15 +2861,20 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
*
* RETURN VALUES: none
*/
-static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
+static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl)
{
int lp, pp, k;
- int max;
+ int max, size;
+
+ size = is_ctl ? CTLTREESIZE : TREESIZE;
/* pick up the index of the leaf for this leafno.
*/
lp = leafno + le32_to_cpu(tp->dmt_leafidx);
+ if (WARN_ON_ONCE(lp >= size || lp < 0))
+ return;
+
/* is the current value the same as the old value ? if so,
* there is nothing to do.
*/
@@ -2908,6 +2888,9 @@ static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
/* bubble the new value up the tree as required.
*/
for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) {
+ if (lp == 0)
+ break;
+
/* get the index of the first leaf of the 4 leaf
* group containing the specified leaf (leafno).
*/
@@ -2955,14 +2938,19 @@ static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
* leafidx - return pointer to be set to the index of the leaf
* describing at least l2nb free blocks if sufficient
* free blocks are found.
+ * is_ctl - determines if the tree is of type ctl
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient free blocks.
*/
-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
+static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl)
{
int ti, n = 0, k, x = 0;
+ int max_size, max_idx;
+
+ max_size = is_ctl ? CTLTREESIZE : TREESIZE;
+ max_idx = is_ctl ? LPERCTL : LPERDMAP;
/* first check the root of the tree to see if there is
* sufficient free space.
@@ -2983,6 +2971,8 @@ static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
/* sufficient free space found. move to the next
* level (or quit if this is the last level).
*/
+ if (x + n > max_size)
+ return -ENOSPC;
if (l2nb <= tp->dmt_stree[x + n])
break;
}
@@ -2992,6 +2982,8 @@ static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
*/
assert(n < 4);
}
+ if (le32_to_cpu(tp->dmt_leafidx) >= max_idx)
+ return -ENOSPC;
/* set the return to the leftmost leaf describing sufficient
* free space.
@@ -3036,7 +3028,7 @@ static int dbFindBits(u32 word, int l2nb)
/* scan the word for nb free bits at nb alignments.
*/
- for (bitno = 0; mask != 0; bitno += nb, mask >>= nb) {
+ for (bitno = 0; mask != 0; bitno += nb, mask = (mask >> nb)) {
if ((mask & word) == mask)
break;
}
@@ -3408,7 +3400,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
oldl2agsize = bmp->db_agl2size;
bmp->db_agl2size = l2agsize;
- bmp->db_agsize = 1 << l2agsize;
+ bmp->db_agsize = (s64)1 << l2agsize;
/* compute new number of AG */
agno = bmp->db_numag;
@@ -3671,8 +3663,8 @@ void dbFinalizeBmap(struct inode *ipbmap)
* system size is not a multiple of the group size).
*/
inactfree = (inactags && ag_rem) ?
- ((inactags - 1) << bmp->db_agl2size) + ag_rem
- : inactags << bmp->db_agl2size;
+ (((s64)inactags - 1) << bmp->db_agl2size) + ag_rem
+ : ((s64)inactags << bmp->db_agl2size);
/* determine how many free blocks are in the active
* allocation groups plus the average number of free blocks
@@ -3895,7 +3887,7 @@ static int dbInitTree(struct dmaptree * dtp)
l2max = le32_to_cpu(dtp->l2nleafs) + dtp->budmin;
/*
- * configure the leaf levevl into binary buddy system
+ * configure the leaf level into binary buddy system
*
* Try to combine buddies starting with a buddy size of 1
* (i.e. two leaves). At a buddy size of 1 two buddy leaves
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 837d42f61464..0ab83bb7bbdf 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -117,7 +117,8 @@ do { \
if (!(RC)) { \
if (((P)->header.nextindex > \
(((BN) == 0) ? DTROOTMAXSLOT : (P)->header.maxslot)) || \
- ((BN) && ((P)->header.maxslot > DTPAGEMAXSLOT))) { \
+ ((BN) && (((P)->header.maxslot > DTPAGEMAXSLOT) || \
+ ((P)->header.stblindex >= DTPAGEMAXSLOT)))) { \
BT_PUTPAGE(MP); \
jfs_error((IP)->i_sb, \
"DT_GETPAGE: dtree page corrupt\n"); \
@@ -633,6 +634,11 @@ int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
for (base = 0, lim = p->header.nextindex; lim; lim >>= 1) {
index = base + (lim >> 1);
+ if (stbl[index] < 0) {
+ rc = -EIO;
+ goto out;
+ }
+
if (p->header.flag & BT_LEAF) {
/* uppercase leaf name to compare */
cmp =
@@ -829,6 +835,8 @@ int dtInsert(tid_t tid, struct inode *ip,
* the full page.
*/
DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
+ if (p->header.freelist == 0)
+ return -EINVAL;
/*
* insert entry for new key
@@ -1970,7 +1978,7 @@ static int dtSplitRoot(tid_t tid,
do {
f = &rp->slot[fsi];
fsi = f->next;
- } while (fsi != -1);
+ } while (fsi >= 0);
f->next = n;
}
@@ -2423,304 +2431,6 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
return 0;
}
-#ifdef _NOTYET
-/*
- * NAME: dtRelocate()
- *
- * FUNCTION: relocate dtpage (internal or leaf) of directory;
- * This function is mainly used by defragfs utility.
- */
-int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
- s64 nxaddr)
-{
- int rc = 0;
- struct metapage *mp, *pmp, *lmp, *rmp;
- dtpage_t *p, *pp, *rp = 0, *lp= 0;
- s64 bn;
- int index;
- struct btstack btstack;
- pxd_t *pxd;
- s64 oxaddr, nextbn, prevbn;
- int xlen, xsize;
- struct tlock *tlck;
- struct dt_lock *dtlck;
- struct pxd_lock *pxdlock;
- s8 *stbl;
- struct lv *lv;
-
- oxaddr = addressPXD(opxd);
- xlen = lengthPXD(opxd);
-
- jfs_info("dtRelocate: lmxaddr:%Ld xaddr:%Ld:%Ld xlen:%d",
- (long long)lmxaddr, (long long)oxaddr, (long long)nxaddr,
- xlen);
-
- /*
- * 1. get the internal parent dtpage covering
- * router entry for the tartget page to be relocated;
- */
- rc = dtSearchNode(ip, lmxaddr, opxd, &btstack);
- if (rc)
- return rc;
-
- /* retrieve search result */
- DT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
- jfs_info("dtRelocate: parent router entry validated.");
-
- /*
- * 2. relocate the target dtpage
- */
- /* read in the target page from src extent */
- DT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
- if (rc) {
- /* release the pinned parent page */
- DT_PUTPAGE(pmp);
- return rc;
- }
-
- /*
- * read in sibling pages if any to update sibling pointers;
- */
- rmp = NULL;
- if (p->header.next) {
- nextbn = le64_to_cpu(p->header.next);
- DT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
- if (rc) {
- DT_PUTPAGE(mp);
- DT_PUTPAGE(pmp);
- return (rc);
- }
- }
-
- lmp = NULL;
- if (p->header.prev) {
- prevbn = le64_to_cpu(p->header.prev);
- DT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc);
- if (rc) {
- DT_PUTPAGE(mp);
- DT_PUTPAGE(pmp);
- if (rmp)
- DT_PUTPAGE(rmp);
- return (rc);
- }
- }
-
- /* at this point, all xtpages to be updated are in memory */
-
- /*
- * update sibling pointers of sibling dtpages if any;
- */
- if (lmp) {
- tlck = txLock(tid, ip, lmp, tlckDTREE | tlckRELINK);
- dtlck = (struct dt_lock *) & tlck->lock;
- /* linelock header */
- ASSERT(dtlck->index == 0);
- lv = & dtlck->lv[0];
- lv->offset = 0;
- lv->length = 1;
- dtlck->index++;
-
- lp->header.next = cpu_to_le64(nxaddr);
- DT_PUTPAGE(lmp);
- }
-
- if (rmp) {
- tlck = txLock(tid, ip, rmp, tlckDTREE | tlckRELINK);
- dtlck = (struct dt_lock *) & tlck->lock;
- /* linelock header */
- ASSERT(dtlck->index == 0);
- lv = & dtlck->lv[0];
- lv->offset = 0;
- lv->length = 1;
- dtlck->index++;
-
- rp->header.prev = cpu_to_le64(nxaddr);
- DT_PUTPAGE(rmp);
- }
-
- /*
- * update the target dtpage to be relocated
- *
- * write LOG_REDOPAGE of LOG_NEW type for dst page
- * for the whole target page (logredo() will apply
- * after image and update bmap for allocation of the
- * dst extent), and update bmap for allocation of
- * the dst extent;
- */
- tlck = txLock(tid, ip, mp, tlckDTREE | tlckNEW);
- dtlck = (struct dt_lock *) & tlck->lock;
- /* linelock header */
- ASSERT(dtlck->index == 0);
- lv = & dtlck->lv[0];
-
- /* update the self address in the dtpage header */
- pxd = &p->header.self;
- PXDaddress(pxd, nxaddr);
-
- /* the dst page is the same as the src page, i.e.,
- * linelock for afterimage of the whole page;
- */
- lv->offset = 0;
- lv->length = p->header.maxslot;
- dtlck->index++;
-
- /* update the buffer extent descriptor of the dtpage */
- xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
-
- /* unpin the relocated page */
- DT_PUTPAGE(mp);
- jfs_info("dtRelocate: target dtpage relocated.");
-
- /* the moved extent is dtpage, then a LOG_NOREDOPAGE log rec
- * needs to be written (in logredo(), the LOG_NOREDOPAGE log rec
- * will also force a bmap update ).
- */
-
- /*
- * 3. acquire maplock for the source extent to be freed;
- */
- /* for dtpage relocation, write a LOG_NOREDOPAGE record
- * for the source dtpage (logredo() will init NoRedoPage
- * filter and will also update bmap for free of the source
- * dtpage), and upadte bmap for free of the source dtpage;
- */
- tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
- pxdlock = (struct pxd_lock *) & tlck->lock;
- pxdlock->flag = mlckFREEPXD;
- PXDaddress(&pxdlock->pxd, oxaddr);
- PXDlength(&pxdlock->pxd, xlen);
- pxdlock->index = 1;
-
- /*
- * 4. update the parent router entry for relocation;
- *
- * acquire tlck for the parent entry covering the target dtpage;
- * write LOG_REDOPAGE to apply after image only;
- */
- jfs_info("dtRelocate: update parent router entry.");
- tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
- dtlck = (struct dt_lock *) & tlck->lock;
- lv = & dtlck->lv[dtlck->index];
-
- /* update the PXD with the new address */
- stbl = DT_GETSTBL(pp);
- pxd = (pxd_t *) & pp->slot[stbl[index]];
- PXDaddress(pxd, nxaddr);
- lv->offset = stbl[index];
- lv->length = 1;
- dtlck->index++;
-
- /* unpin the parent dtpage */
- DT_PUTPAGE(pmp);
-
- return rc;
-}
-
-/*
- * NAME: dtSearchNode()
- *
- * FUNCTION: Search for an dtpage containing a specified address
- * This function is mainly used by defragfs utility.
- *
- * NOTE: Search result on stack, the found page is pinned at exit.
- * The result page must be an internal dtpage.
- * lmxaddr give the address of the left most page of the
- * dtree level, in which the required dtpage resides.
- */
-static int dtSearchNode(struct inode *ip, s64 lmxaddr, pxd_t * kpxd,
- struct btstack * btstack)
-{
- int rc = 0;
- s64 bn;
- struct metapage *mp;
- dtpage_t *p;
- int psize = 288; /* initial in-line directory */
- s8 *stbl;
- int i;
- pxd_t *pxd;
- struct btframe *btsp;
-
- BT_CLR(btstack); /* reset stack */
-
- /*
- * descend tree to the level with specified leftmost page
- *
- * by convention, root bn = 0.
- */
- for (bn = 0;;) {
- /* get/pin the page to search */
- DT_GETPAGE(ip, bn, mp, psize, p, rc);
- if (rc)
- return rc;
-
- /* does the xaddr of leftmost page of the levevl
- * matches levevl search key ?
- */
- if (p->header.flag & BT_ROOT) {
- if (lmxaddr == 0)
- break;
- } else if (addressPXD(&p->header.self) == lmxaddr)
- break;
-
- /*
- * descend down to leftmost child page
- */
- if (p->header.flag & BT_LEAF) {
- DT_PUTPAGE(mp);
- return -ESTALE;
- }
-
- /* get the leftmost entry */
- stbl = DT_GETSTBL(p);
- pxd = (pxd_t *) & p->slot[stbl[0]];
-
- /* get the child page block address */
- bn = addressPXD(pxd);
- psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize;
- /* unpin the parent page */
- DT_PUTPAGE(mp);
- }
-
- /*
- * search each page at the current levevl
- */
- loop:
- stbl = DT_GETSTBL(p);
- for (i = 0; i < p->header.nextindex; i++) {
- pxd = (pxd_t *) & p->slot[stbl[i]];
-
- /* found the specified router entry */
- if (addressPXD(pxd) == addressPXD(kpxd) &&
- lengthPXD(pxd) == lengthPXD(kpxd)) {
- btsp = btstack->top;
- btsp->bn = bn;
- btsp->index = i;
- btsp->mp = mp;
-
- return 0;
- }
- }
-
- /* get the right sibling page if any */
- if (p->header.next)
- bn = le64_to_cpu(p->header.next);
- else {
- DT_PUTPAGE(mp);
- return -ESTALE;
- }
-
- /* unpin current page */
- DT_PUTPAGE(mp);
-
- /* get the right sibling page */
- DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
-
- goto loop;
-}
-#endif /* _NOTYET */
-
/*
* dtRelink()
*
@@ -2903,7 +2613,7 @@ void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot)
* fsck.jfs should really fix this, but it currently does not.
* Called from jfs_readdir when bad index is detected.
*/
-static void add_missing_indices(struct inode *inode, s64 bn)
+static int add_missing_indices(struct inode *inode, s64 bn)
{
struct ldtentry *d;
struct dt_lock *dtlck;
@@ -2912,7 +2622,7 @@ static void add_missing_indices(struct inode *inode, s64 bn)
struct lv *lv;
struct metapage *mp;
dtpage_t *p;
- int rc;
+ int rc = 0;
s8 *stbl;
tid_t tid;
struct tlock *tlck;
@@ -2937,6 +2647,16 @@ static void add_missing_indices(struct inode *inode, s64 bn)
stbl = DT_GETSTBL(p);
for (i = 0; i < p->header.nextindex; i++) {
+ if (stbl[i] < 0) {
+ jfs_err("jfs: add_missing_indices: Invalid stbl[%d] = %d for inode %ld, block = %lld",
+ i, stbl[i], (long)inode->i_ino, (long long)bn);
+ rc = -EIO;
+
+ DT_PUTPAGE(mp);
+ txAbort(tid, 0);
+ goto end;
+ }
+
d = (struct ldtentry *) &p->slot[stbl[i]];
index = le32_to_cpu(d->index);
if ((index < 2) || (index >= JFS_IP(inode)->next_index)) {
@@ -2954,6 +2674,7 @@ static void add_missing_indices(struct inode *inode, s64 bn)
(void) txCommit(tid, 1, &inode, 0);
end:
txEnd(tid);
+ return rc;
}
/*
@@ -3182,6 +2903,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
stbl = DT_GETSTBL(p);
for (i = index; i < p->header.nextindex; i++) {
+ if (stbl[i] < 0 || stbl[i] >= DTPAGEMAXSLOT) {
+ jfs_err("JFS: Invalid stbl[%d] = %d for inode %ld, block = %lld",
+ i, stbl[i], (long)ip->i_ino, (long long)bn);
+ free_page(dirent_buf);
+ DT_PUTPAGE(mp);
+ return -EIO;
+ }
+
d = (struct ldtentry *) & p->slot[stbl[i]];
if (((long) jfs_dirent + d->namlen + 1) >
@@ -3299,7 +3028,8 @@ skip_one:
}
if (fix_page) {
- add_missing_indices(ip, bn);
+ if ((rc = add_missing_indices(ip, bn)))
+ goto out;
page_fixed = 1;
}
@@ -3377,6 +3107,13 @@ static int dtReadFirst(struct inode *ip, struct btstack * btstack)
/* get the leftmost entry */
stbl = DT_GETSTBL(p);
+
+ if (stbl[0] < 0 || stbl[0] >= DTPAGEMAXSLOT) {
+ DT_PUTPAGE(mp);
+ jfs_error(ip->i_sb, "stbl[0] out of bound\n");
+ return -EIO;
+ }
+
xd = (pxd_t *) & p->slot[stbl[0]];
/* get the child page block address */
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index bb4a342a193d..46529bcc8297 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -16,9 +16,6 @@
* forward references
*/
static int extBalloc(struct inode *, s64, s64 *, s64 *);
-#ifdef _NOTYET
-static int extBrealloc(struct inode *, s64, s64, s64 *, s64 *);
-#endif
static s64 extRoundDown(s64 nb);
#define DPD(a) (printk("(a): %d\n",(a)))
@@ -77,6 +74,11 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
int rc;
int xflag;
+ if (isReadOnly(ip)) {
+ jfs_error(ip->i_sb, "read-only filesystem\n");
+ return -EIO;
+ }
+
/* This blocks if we are low on resources */
txBeginAnon(ip->i_sb);
@@ -169,7 +171,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
/*
* COMMIT_SyncList flags an anonymous tlock on page that is on
* sync list.
- * We need to commit the inode to get the page written disk.
+ * We need to commit the inode to get the page written to the disk.
*/
if (test_and_clear_cflag(COMMIT_Synclist,ip))
jfs_commit_inode(ip, 0);
@@ -177,162 +179,6 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
return (0);
}
-
-#ifdef _NOTYET
-/*
- * NAME: extRealloc()
- *
- * FUNCTION: extend the allocation of a file extent containing a
- * partial back last page.
- *
- * PARAMETERS:
- * ip - the inode of the file.
- * cp - cbuf for the partial backed last page.
- * xlen - request size of the resulting extent.
- * xp - pointer to an xad. on successful exit, the xad
- * describes the newly allocated extent.
- * abnr - bool indicating whether the newly allocated extent
- * should be marked as allocated but not recorded.
- *
- * RETURN VALUES:
- * 0 - success
- * -EIO - i/o error.
- * -ENOSPC - insufficient disk resources.
- */
-int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
-{
- struct super_block *sb = ip->i_sb;
- s64 xaddr, xlen, nxaddr, delta, xoff;
- s64 ntail, nextend, ninsert;
- int rc, nbperpage = JFS_SBI(sb)->nbperpage;
- int xflag;
-
- /* This blocks if we are low on resources */
- txBeginAnon(ip->i_sb);
-
- mutex_lock(&JFS_IP(ip)->commit_mutex);
- /* validate extent length */
- if (nxlen > MAXXLEN)
- nxlen = MAXXLEN;
-
- /* get the extend (partial) page's disk block address and
- * number of blocks.
- */
- xaddr = addressXAD(xp);
- xlen = lengthXAD(xp);
- xoff = offsetXAD(xp);
-
- /* if the extend page is abnr and if the request is for
- * the extent to be allocated and recorded,
- * make the page allocated and recorded.
- */
- if ((xp->flag & XAD_NOTRECORDED) && !abnr) {
- xp->flag = 0;
- if ((rc = xtUpdate(0, ip, xp)))
- goto exit;
- }
-
- /* try to allocated the request number of blocks for the
- * extent. dbRealloc() first tries to satisfy the request
- * by extending the allocation in place. otherwise, it will
- * try to allocate a new set of blocks large enough for the
- * request. in satisfying a request, dbReAlloc() may allocate
- * less than what was request but will always allocate enough
- * space as to satisfy the extend page.
- */
- if ((rc = extBrealloc(ip, xaddr, xlen, &nxlen, &nxaddr)))
- goto exit;
-
- /* Allocat blocks to quota. */
- rc = dquot_alloc_block(ip, nxlen);
- if (rc) {
- dbFree(ip, nxaddr, (s64) nxlen);
- mutex_unlock(&JFS_IP(ip)->commit_mutex);
- return rc;
- }
-
- delta = nxlen - xlen;
-
- /* check if the extend page is not abnr but the request is abnr
- * and the allocated disk space is for more than one page. if this
- * is the case, there is a miss match of abnr between the extend page
- * and the one or more pages following the extend page. as a result,
- * two extents will have to be manipulated. the first will be that
- * of the extent of the extend page and will be manipulated thru
- * an xtExtend() or an xtTailgate(), depending upon whether the
- * disk allocation occurred as an inplace extension. the second
- * extent will be manipulated (created) through an xtInsert() and
- * will be for the pages following the extend page.
- */
- if (abnr && (!(xp->flag & XAD_NOTRECORDED)) && (nxlen > nbperpage)) {
- ntail = nbperpage;
- nextend = ntail - xlen;
- ninsert = nxlen - nbperpage;
-
- xflag = XAD_NOTRECORDED;
- } else {
- ntail = nxlen;
- nextend = delta;
- ninsert = 0;
-
- xflag = xp->flag;
- }
-
- /* if we were able to extend the disk allocation in place,
- * extend the extent. otherwise, move the extent to a
- * new disk location.
- */
- if (xaddr == nxaddr) {
- /* extend the extent */
- if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
- dbFree(ip, xaddr + xlen, delta);
- dquot_free_block(ip, nxlen);
- goto exit;
- }
- } else {
- /*
- * move the extent to a new location:
- *
- * xtTailgate() accounts for relocated tail extent;
- */
- if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
- dbFree(ip, nxaddr, nxlen);
- dquot_free_block(ip, nxlen);
- goto exit;
- }
- }
-
-
- /* check if we need to also insert a new extent */
- if (ninsert) {
- /* perform the insert. if it fails, free the blocks
- * to be inserted and make it appear that we only did
- * the xtExtend() or xtTailgate() above.
- */
- xaddr = nxaddr + ntail;
- if (xtInsert (0, ip, xflag, xoff + ntail, (int) ninsert,
- &xaddr, 0)) {
- dbFree(ip, xaddr, (s64) ninsert);
- delta = nextend;
- nxlen = ntail;
- xflag = 0;
- }
- }
-
- /* set the return results */
- XADaddress(xp, nxaddr);
- XADlength(xp, nxlen);
- XADoffset(xp, xoff);
- xp->flag = xflag;
-
- mark_inode_dirty(ip);
-exit:
- mutex_unlock(&JFS_IP(ip)->commit_mutex);
- return (rc);
-}
-#endif /* _NOTYET */
-
-
/*
* NAME: extHint()
*
@@ -412,6 +258,11 @@ int extRecord(struct inode *ip, xad_t * xp)
{
int rc;
+ if (isReadOnly(ip)) {
+ jfs_error(ip->i_sb, "read-only filesystem\n");
+ return -EIO;
+ }
+
txBeginAnon(ip->i_sb);
mutex_lock(&JFS_IP(ip)->commit_mutex);
@@ -423,44 +274,6 @@ int extRecord(struct inode *ip, xad_t * xp)
return rc;
}
-
-#ifdef _NOTYET
-/*
- * NAME: extFill()
- *
- * FUNCTION: allocate disk space for a file page that represents
- * a file hole.
- *
- * PARAMETERS:
- * ip - the inode of the file.
- * cp - cbuf of the file page represent the hole.
- *
- * RETURN VALUES:
- * 0 - success
- * -EIO - i/o error.
- * -ENOSPC - insufficient disk resources.
- */
-int extFill(struct inode *ip, xad_t * xp)
-{
- int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage;
- s64 blkno = offsetXAD(xp) >> ip->i_blkbits;
-
-// assert(ISSPARSE(ip));
-
- /* initialize the extent allocation hint */
- XADaddress(xp, 0);
-
- /* allocate an extent to fill the hole */
- if ((rc = extAlloc(ip, nbperpage, blkno, xp, false)))
- return (rc);
-
- assert(lengthPXD(xp) == nbperpage);
-
- return (0);
-}
-#endif /* _NOTYET */
-
-
/*
* NAME: extBalloc()
*
@@ -508,6 +321,11 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
* blocks in the map. in that case, we'll start off with the
* maximum free.
*/
+
+ /* give up if no space left */
+ if (bmp->db_maxfreebud == -1)
+ return -ENOSPC;
+
max = (s64) 1 << bmp->db_maxfreebud;
if (*nblocks >= max && *nblocks > nbperpage)
nb = nblks = (max > nbperpage) ? max : nbperpage;
@@ -550,64 +368,6 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
return (0);
}
-
-#ifdef _NOTYET
-/*
- * NAME: extBrealloc()
- *
- * FUNCTION: attempt to extend an extent's allocation.
- *
- * Initially, we will try to extend the extent's allocation
- * in place. If this fails, we'll try to move the extent
- * to a new set of blocks. If moving the extent, we initially
- * will try to allocate disk blocks for the requested size
- * (newnblks). if this fails (new contiguous free blocks not
- * available), we'll try to allocate a smaller number of
- * blocks (producing a smaller extent), with this smaller
- * number of blocks consisting of the requested number of
- * blocks rounded down to the next smaller power of 2
- * number (i.e. 16 -> 8). We'll continue to round down and
- * retry the allocation until the number of blocks to allocate
- * is smaller than the number of blocks per page.
- *
- * PARAMETERS:
- * ip - the inode of the file.
- * blkno - starting block number of the extents current allocation.
- * nblks - number of blocks within the extents current allocation.
- * newnblks - pointer to a s64 value. on entry, this value is the
- * new desired extent size (number of blocks). on
- * successful exit, this value is set to the extent's actual
- * new size (new number of blocks).
- * newblkno - the starting block number of the extents new allocation.
- *
- * RETURN VALUES:
- * 0 - success
- * -EIO - i/o error.
- * -ENOSPC - insufficient disk resources.
- */
-static int
-extBrealloc(struct inode *ip,
- s64 blkno, s64 nblks, s64 * newnblks, s64 * newblkno)
-{
- int rc;
-
- /* try to extend in place */
- if ((rc = dbExtend(ip, blkno, nblks, *newnblks - nblks)) == 0) {
- *newblkno = blkno;
- return (0);
- } else {
- if (rc != -ENOSPC)
- return (rc);
- }
-
- /* in place extension not possible.
- * try to move the extent to a new set of blocks.
- */
- return (extBalloc(ip, blkno, newnblks, newblkno));
-}
-#endif /* _NOTYET */
-
-
/*
* NAME: extRoundDown()
*
diff --git a/fs/jfs/jfs_extent.h b/fs/jfs/jfs_extent.h
index 1c984214e95e..a0ee4ccea66e 100644
--- a/fs/jfs/jfs_extent.h
+++ b/fs/jfs/jfs_extent.h
@@ -10,9 +10,7 @@
(addressPXD(&(JFS_IP(ip)->ixpxd)) + lengthPXD(&(JFS_IP(ip)->ixpxd)) - 1)
extern int extAlloc(struct inode *, s64, s64, xad_t *, bool);
-extern int extFill(struct inode *, xad_t *);
extern int extHint(struct inode *, s64, xad_t *);
-extern int extRealloc(struct inode *, s64, xad_t *, bool);
extern int extRecord(struct inode *, xad_t *);
#endif /* _H_JFS_EXTENT */
diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h
index b5d702df7111..8794281f8ffd 100644
--- a/fs/jfs/jfs_filsys.h
+++ b/fs/jfs/jfs_filsys.h
@@ -24,6 +24,7 @@
#define JFS_ERR_REMOUNT_RO 0x00000002 /* remount read-only */
#define JFS_ERR_CONTINUE 0x00000004 /* continue */
#define JFS_ERR_PANIC 0x00000008 /* panic */
+#define JFS_ERR_MASK (JFS_ERR_REMOUNT_RO|JFS_ERR_CONTINUE|JFS_ERR_PANIC)
/* Quota support */
#define JFS_USRQUOTA 0x00000010
@@ -122,7 +123,9 @@
#define NUM_INODE_PER_IAG INOSPERIAG
#define MINBLOCKSIZE 512
+#define L2MINBLOCKSIZE 9
#define MAXBLOCKSIZE 4096
+#define L2MAXBLOCKSIZE 12
#define MAXFILESIZE ((s64)1 << 52)
#define JFS_LINK_MAX 0xffffffff
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 799d3837e7c2..ecb8e05b8b84 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -102,7 +102,7 @@ int diMount(struct inode *ipimap)
* allocate/initialize the in-memory inode map control structure
*/
/* allocate the in-memory inode map control structure. */
- imap = kmalloc(sizeof(struct inomap), GFP_KERNEL);
+ imap = kzalloc(sizeof(struct inomap), GFP_KERNEL);
if (imap == NULL)
return -ENOMEM;
@@ -193,6 +193,7 @@ int diUnmount(struct inode *ipimap, int mounterror)
* free in-memory control structure
*/
kfree(imap);
+ JFS_IP(ipimap)->i_imap = NULL;
return (0);
}
@@ -289,7 +290,7 @@ int diSync(struct inode *ipimap)
int diRead(struct inode *ip)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
- int iagno, ino, extno, rc;
+ int iagno, ino, extno, rc, agno;
struct inode *ipimap;
struct dinode *dp;
struct iag *iagp;
@@ -310,8 +311,8 @@ int diRead(struct inode *ip)
iagno = INOTOIAG(ip->i_ino);
/* read the iag */
- imap = JFS_IP(ipimap)->i_imap;
IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
+ imap = JFS_IP(ipimap)->i_imap;
rc = diIAGRead(imap, iagno, &mp);
IREAD_UNLOCK(ipimap);
if (rc) {
@@ -338,8 +339,11 @@ int diRead(struct inode *ip)
/* get the ag for the iag */
agstart = le64_to_cpu(iagp->agstart);
+ agno = BLKTOAG(agstart, JFS_SBI(ip->i_sb));
release_metapage(mp);
+ if (agno >= MAXAG || agno < 0)
+ return -EIO;
rel_inode = (ino & (INOSPERPAGE - 1));
pageno = blkno >> sbi->l2nbperpage;
@@ -452,7 +456,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
dp += inum % 8; /* 8 inodes per 4K page */
/* copy on-disk inode to in-memory inode */
- if ((copy_from_dinode(dp, ip)) != 0) {
+ if ((copy_from_dinode(dp, ip) != 0) || (ip->i_nlink == 0)) {
/* handle bad return by returning NULL for ip */
set_nlink(ip, 1); /* Don't want iput() deleting it */
iput(ip);
@@ -669,7 +673,7 @@ int diWrite(tid_t tid, struct inode *ip)
* This is the special xtree inside the directory for storing
* the directory table
*/
- xtpage_t *p, *xp;
+ xtroot_t *p, *xp;
xad_t *xad;
jfs_ip->xtlid = 0;
@@ -683,7 +687,7 @@ int diWrite(tid_t tid, struct inode *ip)
* copy xtree root from inode to dinode:
*/
p = &jfs_ip->i_xtroot;
- xp = (xtpage_t *) &dp->di_dirtable;
+ xp = (xtroot_t *) &dp->di_dirtable;
lv = ilinelock->lv;
for (n = 0; n < ilinelock->index; n++, lv++) {
memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
@@ -712,7 +716,7 @@ int diWrite(tid_t tid, struct inode *ip)
* regular file: 16 byte (XAD slot) granularity
*/
if (type & tlckXTREE) {
- xtpage_t *p, *xp;
+ xtroot_t *p, *xp;
xad_t *xad;
/*
@@ -1319,7 +1323,7 @@ diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
int diAlloc(struct inode *pip, bool dir, struct inode *ip)
{
int rc, ino, iagno, addext, extno, bitno, sword;
- int nwords, rem, i, agno;
+ int nwords, rem, i, agno, dn_numag;
u32 mask, inosmap, extsmap;
struct inode *ipimap;
struct metapage *mp;
@@ -1355,6 +1359,9 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
/* get the ag number of this iag */
agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
+ dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag;
+ if (agno < 0 || agno > dn_numag || agno >= MAXAG)
+ return -EIO;
if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
/*
@@ -2175,6 +2182,9 @@ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
/* get the ag and iag numbers for this iag.
*/
agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
+ if (agno >= MAXAG || agno < 0)
+ return -EIO;
+
iagno = le32_to_cpu(iagp->iagnum);
/* check if this is the last free extent within the
@@ -3019,14 +3029,23 @@ static void duplicateIXtree(struct super_block *sb, s64 blkno,
*
* RETURN VALUES:
* 0 - success
- * -ENOMEM - insufficient memory
+ * -EINVAL - unexpected inode type
*/
static int copy_from_dinode(struct dinode * dip, struct inode *ip)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+ int fileset = le32_to_cpu(dip->di_fileset);
+
+ switch (fileset) {
+ case AGGR_RESERVED_I: case AGGREGATE_I: case BMAP_I:
+ case LOG_I: case BADBLOCK_I: case FILESYSTEM_I:
+ break;
+ default:
+ return -EINVAL;
+ }
- jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
+ jfs_ip->fileset = fileset;
jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
jfs_set_inode_flags(ip);
@@ -3060,12 +3079,12 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
}
ip->i_size = le64_to_cpu(dip->di_size);
- ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec);
- ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec);
- ip->i_mtime.tv_sec = le32_to_cpu(dip->di_mtime.tv_sec);
- ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec);
- ip->i_ctime.tv_sec = le32_to_cpu(dip->di_ctime.tv_sec);
- ip->i_ctime.tv_nsec = le32_to_cpu(dip->di_ctime.tv_nsec);
+ inode_set_atime(ip, le32_to_cpu(dip->di_atime.tv_sec),
+ le32_to_cpu(dip->di_atime.tv_nsec));
+ inode_set_mtime(ip, le32_to_cpu(dip->di_mtime.tv_sec),
+ le32_to_cpu(dip->di_mtime.tv_nsec));
+ inode_set_ctime(ip, le32_to_cpu(dip->di_ctime.tv_sec),
+ le32_to_cpu(dip->di_ctime.tv_nsec));
ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks));
ip->i_generation = le32_to_cpu(dip->di_gen);
@@ -3137,12 +3156,12 @@ static void copy_to_dinode(struct dinode * dip, struct inode *ip)
else /* Leave the original permissions alone */
dip->di_mode = cpu_to_le32(jfs_ip->mode2);
- dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec);
- dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec);
- dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec);
- dip->di_ctime.tv_nsec = cpu_to_le32(ip->i_ctime.tv_nsec);
- dip->di_mtime.tv_sec = cpu_to_le32(ip->i_mtime.tv_sec);
- dip->di_mtime.tv_nsec = cpu_to_le32(ip->i_mtime.tv_nsec);
+ dip->di_atime.tv_sec = cpu_to_le32(inode_get_atime_sec(ip));
+ dip->di_atime.tv_nsec = cpu_to_le32(inode_get_atime_nsec(ip));
+ dip->di_ctime.tv_sec = cpu_to_le32(inode_get_ctime_sec(ip));
+ dip->di_ctime.tv_nsec = cpu_to_le32(inode_get_ctime_nsec(ip));
+ dip->di_mtime.tv_sec = cpu_to_le32(inode_get_mtime_sec(ip));
+ dip->di_mtime.tv_nsec = cpu_to_le32(inode_get_mtime_nsec(ip));
dip->di_ixpxd = jfs_ip->ixpxd; /* in-memory pxd's are little-endian */
dip->di_acl = jfs_ip->acl; /* as are dxd's */
dip->di_ea = jfs_ip->ea;
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 721def69e732..5aaafedb8fbc 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -66,7 +66,7 @@ struct jfs_inode_info {
lid_t xtlid; /* lid of xtree lock on directory */
union {
struct {
- xtpage_t _xtroot; /* 288: xtree root */
+ xtroot_t _xtroot; /* 288: xtree root */
struct inomap *_imap; /* 4: inode map header */
} file;
struct {
@@ -76,14 +76,14 @@ struct jfs_inode_info {
struct {
unchar _unused[16]; /* 16: */
dxd_t _dxd; /* 16: */
- /* _inline may overflow into _inline_ea when needed */
+ /* _inline_sym may overflow into _inline_ea when needed */
/* _inline_ea may overlay the last part of
* file._xtroot if maxentry = XTROOTINITSLOT
*/
union {
struct {
/* 128: inline symlink */
- unchar _inline[128];
+ unchar _inline_sym[128];
/* 128: inline extended attr */
unchar _inline_ea[128];
};
@@ -92,7 +92,7 @@ struct jfs_inode_info {
} link;
} u;
#ifdef CONFIG_QUOTA
- struct dquot *i_dquot[MAXQUOTAS];
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
u32 dev; /* will die when we get wide dev_t */
struct inode vfs_inode;
@@ -101,7 +101,7 @@ struct jfs_inode_info {
#define i_imap u.file._imap
#define i_dirtable u.dir._table
#define i_dtroot u.dir._dtroot
-#define i_inline u.link._inline
+#define i_inline u.link._inline_sym
#define i_inline_ea u.link._inline_ea
#define i_inline_all u.link._inline_all
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 59379089e939..f10f295d1502 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -64,7 +64,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
goto fail_put;
}
- inode_init_owner(&init_user_ns, inode, parent, mode);
+ inode_init_owner(&nop_mnt_idmap, inode, parent, mode);
/*
* New inodes need to save sane values on disk when
* uid & gid mount options are used
@@ -97,8 +97,8 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
jfs_inode->mode2 |= inode->i_mode;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
- jfs_inode->otime = inode->i_ctime.tv_sec;
+ simple_inode_init_ts(inode);
+ jfs_inode->otime = inode_get_ctime_sec(inode);
inode->i_generation = JFS_SBI(sb)->gengen++;
jfs_inode->cflag = 0;
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 7de961a81862..2c6c81c8cb9f 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -9,9 +9,9 @@ struct fid;
extern struct inode *ialloc(struct inode *, umode_t);
extern int jfs_fsync(struct file *, loff_t, loff_t, int);
-extern int jfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
-extern int jfs_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa);
+extern int jfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
+extern int jfs_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa);
extern long jfs_ioctl(struct file *, unsigned int, unsigned long);
extern struct inode *jfs_iget(struct super_block *, unsigned long);
extern int jfs_commit_inode(struct inode *, int);
@@ -28,7 +28,7 @@ extern struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type);
extern void jfs_set_inode_flags(struct inode *);
extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
-extern int jfs_setattr(struct user_namespace *, struct dentry *, struct iattr *);
+extern int jfs_setattr(struct mnt_idmap *, struct dentry *, struct iattr *);
extern const struct address_space_operations jfs_aops;
extern const struct inode_operations jfs_dir_inode_operations;
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 78fd136ac13b..b343c5ea1159 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -388,14 +388,6 @@ lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot;
linelock = (struct linelock *) & tlck->lock;
}
-#ifdef _JFS_WIP
- else if (tlck->flag & tlckINLINELOCK) {
-
- inlinelock = (struct inlinelock *) & tlck;
- p = (caddr_t) & inlinelock->pxd;
- linelock = (struct linelock *) & tlck;
- }
-#endif /* _JFS_WIP */
else {
jfs_err("lmWriteRecord: UFO tlck:0x%p", tlck);
return 0; /* Probably should trap */
@@ -1066,7 +1058,7 @@ void jfs_syncpt(struct jfs_log *log, int hard_sync)
int lmLogOpen(struct super_block *sb)
{
int rc;
- struct block_device *bdev;
+ struct file *bdev_file;
struct jfs_log *log;
struct jfs_sb_info *sbi = JFS_SBI(sb);
@@ -1078,7 +1070,7 @@ int lmLogOpen(struct super_block *sb)
mutex_lock(&jfs_log_mutex);
list_for_each_entry(log, &jfs_external_logs, journal_list) {
- if (log->bdev->bd_dev == sbi->logdev) {
+ if (file_bdev(log->bdev_file)->bd_dev == sbi->logdev) {
if (!uuid_equal(&log->uuid, &sbi->loguuid)) {
jfs_warn("wrong uuid on JFS journal");
mutex_unlock(&jfs_log_mutex);
@@ -1108,14 +1100,14 @@ int lmLogOpen(struct super_block *sb)
* file systems to log may have n-to-1 relationship;
*/
- bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
- log);
- if (IS_ERR(bdev)) {
- rc = PTR_ERR(bdev);
+ bdev_file = bdev_file_open_by_dev(sbi->logdev,
+ BLK_OPEN_READ | BLK_OPEN_WRITE, log, NULL);
+ if (IS_ERR(bdev_file)) {
+ rc = PTR_ERR(bdev_file);
goto free;
}
- log->bdev = bdev;
+ log->bdev_file = bdev_file;
uuid_copy(&log->uuid, &sbi->loguuid);
/*
@@ -1149,7 +1141,7 @@ journal_found:
lbmLogShutdown(log);
close: /* close external log device */
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ bdev_fput(bdev_file);
free: /* free log descriptor */
mutex_unlock(&jfs_log_mutex);
@@ -1170,7 +1162,7 @@ static int open_inline_log(struct super_block *sb)
init_waitqueue_head(&log->syncwait);
set_bit(log_INLINELOG, &log->flag);
- log->bdev = sb->s_bdev;
+ log->bdev_file = sb->s_bdev_file;
log->base = addressPXD(&JFS_SBI(sb)->logpxd);
log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >>
(L2LOGPSIZE - sb->s_blocksize_bits);
@@ -1207,7 +1199,6 @@ static int open_dummy_log(struct super_block *sb)
init_waitqueue_head(&dummy_log->syncwait);
dummy_log->no_integrity = 1;
/* Make up some stuff */
- dummy_log->base = 0;
dummy_log->size = 1024;
rc = lmLogInit(dummy_log);
if (rc) {
@@ -1444,7 +1435,7 @@ int lmLogClose(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_log *log = sbi->log;
- struct block_device *bdev;
+ struct file *bdev_file;
int rc = 0;
jfs_info("lmLogClose: log:0x%p", log);
@@ -1490,10 +1481,10 @@ int lmLogClose(struct super_block *sb)
* external log as separate logical volume
*/
list_del(&log->journal_list);
- bdev = log->bdev;
+ bdev_file = log->bdev_file;
rc = lmLogShutdown(log);
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ bdev_fput(bdev_file);
kfree(log);
@@ -1608,7 +1599,7 @@ void jfs_flush_journal(struct jfs_log *log, int wait)
mp, sizeof(struct metapage), 0);
print_hex_dump(KERN_ERR, "page: ",
DUMP_PREFIX_ADDRESS, 16,
- sizeof(long), mp->page,
+ sizeof(long), mp->folio,
sizeof(struct page), 0);
} else
print_hex_dump(KERN_ERR, "tblock:",
@@ -1980,17 +1971,13 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
bp->l_flag |= lbmREAD;
- bio = bio_alloc(GFP_NOFS, 1);
-
+ bio = bio_alloc(file_bdev(log->bdev_file), 1, REQ_OP_READ, GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
- bio_set_dev(bio, log->bdev);
-
- bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
+ __bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
- bio->bi_opf = REQ_OP_READ;
/*check if journaling to disk has been disabled*/
if (log->no_integrity) {
bio->bi_iter.bi_size = 0;
@@ -2122,19 +2109,21 @@ static void lbmStartIO(struct lbuf * bp)
{
struct bio *bio;
struct jfs_log *log = bp->l_log;
+ struct block_device *bdev = NULL;
jfs_info("lbmStartIO");
- bio = bio_alloc(GFP_NOFS, 1);
- bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
- bio_set_dev(bio, log->bdev);
+ if (!log->no_integrity)
+ bdev = file_bdev(log->bdev_file);
- bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
+ bio = bio_alloc(bdev, 1, REQ_OP_WRITE | REQ_SYNC,
+ GFP_NOFS);
+ bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
+ __bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
/* check if journaling to disk has been disabled */
if (log->no_integrity) {
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h
index 805877ce5020..8b8994e48cd0 100644
--- a/fs/jfs/jfs_logmgr.h
+++ b/fs/jfs/jfs_logmgr.h
@@ -356,7 +356,7 @@ struct jfs_log {
* before writing syncpt.
*/
struct list_head journal_list; /* Global list */
- struct block_device *bdev; /* 4: log lv pointer */
+ struct file *bdev_file; /* 4: log lv pointer */
int serial; /* 4: log mount serial number */
s64 base; /* @8: log extent address (inline log ) */
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 104ae698443e..871cf4fb3636 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -4,6 +4,7 @@
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*/
+#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -14,6 +15,7 @@
#include <linux/mempool.h>
#include <linux/seq_file.h>
#include <linux/writeback.h>
+#include <linux/migrate.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_filsys.h"
@@ -46,9 +48,9 @@ static inline void __lock_metapage(struct metapage *mp)
do {
set_current_state(TASK_UNINTERRUPTIBLE);
if (metapage_locked(mp)) {
- unlock_page(mp->page);
+ folio_unlock(mp->folio);
io_schedule();
- lock_page(mp->page);
+ folio_lock(mp->folio);
}
} while (trylock_metapage(mp));
__set_current_state(TASK_RUNNING);
@@ -56,7 +58,7 @@ static inline void __lock_metapage(struct metapage *mp)
}
/*
- * Must have mp->page locked
+ * Must have mp->folio locked
*/
static inline void lock_metapage(struct metapage *mp)
{
@@ -75,36 +77,36 @@ static mempool_t *metapage_mempool;
struct meta_anchor {
int mp_count;
atomic_t io_count;
+ blk_status_t status;
struct metapage *mp[MPS_PER_PAGE];
};
-#define mp_anchor(page) ((struct meta_anchor *)page_private(page))
-static inline struct metapage *page_to_mp(struct page *page, int offset)
+static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
{
- if (!PagePrivate(page))
+ struct meta_anchor *anchor = folio->private;
+
+ if (!anchor)
return NULL;
- return mp_anchor(page)->mp[offset >> L2PSIZE];
+ return anchor->mp[offset >> L2PSIZE];
}
-static inline int insert_metapage(struct page *page, struct metapage *mp)
+static inline int insert_metapage(struct folio *folio, struct metapage *mp)
{
struct meta_anchor *a;
int index;
int l2mp_blocks; /* log2 blocks per metapage */
- if (PagePrivate(page))
- a = mp_anchor(page);
- else {
+ a = folio->private;
+ if (!a) {
a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
if (!a)
return -ENOMEM;
- set_page_private(page, (unsigned long)a);
- SetPagePrivate(page);
- kmap(page);
+ folio_attach_private(folio, a);
+ kmap(&folio->page);
}
if (mp) {
- l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
+ l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
a->mp_count++;
a->mp[index] = mp;
@@ -113,10 +115,10 @@ static inline int insert_metapage(struct page *page, struct metapage *mp)
return 0;
}
-static inline void remove_metapage(struct page *page, struct metapage *mp)
+static inline void remove_metapage(struct folio *folio, struct metapage *mp)
{
- struct meta_anchor *a = mp_anchor(page);
- int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
+ struct meta_anchor *a = folio->private;
+ int l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
int index;
index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
@@ -126,48 +128,134 @@ static inline void remove_metapage(struct page *page, struct metapage *mp)
a->mp[index] = NULL;
if (--a->mp_count == 0) {
kfree(a);
- set_page_private(page, 0);
- ClearPagePrivate(page);
- kunmap(page);
+ folio_detach_private(folio);
+ kunmap(&folio->page);
}
}
-static inline void inc_io(struct page *page)
+static inline void inc_io(struct folio *folio)
{
- atomic_inc(&mp_anchor(page)->io_count);
+ struct meta_anchor *anchor = folio->private;
+
+ atomic_inc(&anchor->io_count);
}
-static inline void dec_io(struct page *page, void (*handler) (struct page *))
+static inline void dec_io(struct folio *folio, blk_status_t status,
+ void (*handler)(struct folio *, blk_status_t))
{
- if (atomic_dec_and_test(&mp_anchor(page)->io_count))
- handler(page);
+ struct meta_anchor *anchor = folio->private;
+
+ if (anchor->status == BLK_STS_OK)
+ anchor->status = status;
+
+ if (atomic_dec_and_test(&anchor->io_count))
+ handler(folio, anchor->status);
}
+#ifdef CONFIG_MIGRATION
+static int __metapage_migrate_folio(struct address_space *mapping,
+ struct folio *dst, struct folio *src,
+ enum migrate_mode mode)
+{
+ struct meta_anchor *src_anchor = src->private;
+ struct metapage *mps[MPS_PER_PAGE] = {0};
+ struct metapage *mp;
+ int i, rc;
+
+ for (i = 0; i < MPS_PER_PAGE; i++) {
+ mp = src_anchor->mp[i];
+ if (mp && metapage_locked(mp))
+ return -EAGAIN;
+ }
+
+ rc = filemap_migrate_folio(mapping, dst, src, mode);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < MPS_PER_PAGE; i++) {
+ mp = src_anchor->mp[i];
+ if (!mp)
+ continue;
+ if (unlikely(insert_metapage(dst, mp))) {
+ /* If error, roll-back previosly inserted pages */
+ for (int j = 0 ; j < i; j++) {
+ if (mps[j])
+ remove_metapage(dst, mps[j]);
+ }
+ return -EAGAIN;
+ }
+ mps[i] = mp;
+ }
+
+ /* Update the metapage and remove it from src */
+ for (i = 0; i < MPS_PER_PAGE; i++) {
+ mp = mps[i];
+ if (mp) {
+ int page_offset = mp->data - folio_address(src);
+
+ mp->data = folio_address(dst) + page_offset;
+ mp->folio = dst;
+ remove_metapage(src, mp);
+ }
+ }
+
+ return 0;
+}
+#endif /* CONFIG_MIGRATION */
+
#else
-static inline struct metapage *page_to_mp(struct page *page, int offset)
+
+static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
{
- return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
+ return folio->private;
}
-static inline int insert_metapage(struct page *page, struct metapage *mp)
+static inline int insert_metapage(struct folio *folio, struct metapage *mp)
{
if (mp) {
- set_page_private(page, (unsigned long)mp);
- SetPagePrivate(page);
- kmap(page);
+ folio_attach_private(folio, mp);
+ kmap(&folio->page);
}
return 0;
}
-static inline void remove_metapage(struct page *page, struct metapage *mp)
+static inline void remove_metapage(struct folio *folio, struct metapage *mp)
{
- set_page_private(page, 0);
- ClearPagePrivate(page);
- kunmap(page);
+ folio_detach_private(folio);
+ kunmap(&folio->page);
}
-#define inc_io(page) do {} while(0)
-#define dec_io(page, handler) handler(page)
+#define inc_io(folio) do {} while(0)
+#define dec_io(folio, status, handler) handler(folio, status)
+
+#ifdef CONFIG_MIGRATION
+static int __metapage_migrate_folio(struct address_space *mapping,
+ struct folio *dst, struct folio *src,
+ enum migrate_mode mode)
+{
+ struct metapage *mp;
+ int page_offset;
+ int rc;
+
+ mp = folio_to_mp(src, 0);
+ if (metapage_locked(mp))
+ return -EAGAIN;
+
+ rc = filemap_migrate_folio(mapping, dst, src, mode);
+ if (rc)
+ return rc;
+
+ if (unlikely(insert_metapage(dst, mp)))
+ return -EAGAIN;
+
+ page_offset = mp->data - folio_address(src);
+ mp->data = folio_address(dst) + page_offset;
+ mp->folio = dst;
+ remove_metapage(src, mp);
+
+ return 0;
+}
+#endif /* CONFIG_MIGRATION */
#endif
@@ -218,12 +306,12 @@ void metapage_exit(void)
kmem_cache_destroy(metapage_cache);
}
-static inline void drop_metapage(struct page *page, struct metapage *mp)
+static inline void drop_metapage(struct folio *folio, struct metapage *mp)
{
if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
test_bit(META_io, &mp->flag))
return;
- remove_metapage(page, mp);
+ remove_metapage(folio, mp);
INCREMENT(mpStat.pagefree);
free_metapage(mp);
}
@@ -257,23 +345,20 @@ static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
return lblock;
}
-static void last_read_complete(struct page *page)
+static void last_read_complete(struct folio *folio, blk_status_t status)
{
- if (!PageError(page))
- SetPageUptodate(page);
- unlock_page(page);
+ if (status)
+ printk(KERN_ERR "Read error %d at %#llx\n", status,
+ folio_pos(folio));
+
+ folio_end_read(folio, status == 0);
}
static void metapage_read_end_io(struct bio *bio)
{
- struct page *page = bio->bi_private;
-
- if (bio->bi_status) {
- printk(KERN_ERR "metapage_read_end_io: I/O error\n");
- SetPageError(page);
- }
+ struct folio *folio = bio->bi_private;
- dec_io(page, last_read_complete);
+ dec_io(folio, bio->bi_status, last_read_complete);
bio_put(bio);
}
@@ -299,13 +384,19 @@ static void remove_from_logsync(struct metapage *mp)
LOGSYNC_UNLOCK(log, flags);
}
-static void last_write_complete(struct page *page)
+static void last_write_complete(struct folio *folio, blk_status_t status)
{
struct metapage *mp;
unsigned int offset;
+ if (status) {
+ int err = blk_status_to_errno(status);
+ printk(KERN_ERR "metapage_write_end_io: I/O error\n");
+ mapping_set_error(folio->mapping, err);
+ }
+
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
- mp = page_to_mp(page, offset);
+ mp = folio_to_mp(folio, offset);
if (mp && test_bit(META_io, &mp->flag)) {
if (mp->lsn)
remove_from_logsync(mp);
@@ -316,28 +407,25 @@ static void last_write_complete(struct page *page)
* safe unless I have the page locked
*/
}
- end_page_writeback(page);
+ folio_end_writeback(folio);
}
static void metapage_write_end_io(struct bio *bio)
{
- struct page *page = bio->bi_private;
+ struct folio *folio = bio->bi_private;
- BUG_ON(!PagePrivate(page));
+ BUG_ON(!folio->private);
- if (bio->bi_status) {
- printk(KERN_ERR "metapage_write_end_io: I/O error\n");
- SetPageError(page);
- }
- dec_io(page, last_write_complete);
+ dec_io(folio, bio->bi_status, last_write_complete);
bio_put(bio);
}
-static int metapage_writepage(struct page *page, struct writeback_control *wbc)
+static int metapage_write_folio(struct folio *folio,
+ struct writeback_control *wbc)
{
struct bio *bio = NULL;
int block_offset; /* block offset of mp within page */
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
int len;
int xlen;
@@ -353,14 +441,13 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
int offset;
int bad_blocks = 0;
- page_start = (sector_t)page->index <<
- (PAGE_SHIFT - inode->i_blkbits);
- BUG_ON(!PageLocked(page));
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
+ page_start = folio_pos(folio) >> inode->i_blkbits;
+ BUG_ON(!folio_test_locked(folio));
+ BUG_ON(folio_test_writeback(folio));
+ folio_start_writeback(folio);
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
- mp = page_to_mp(page, offset);
+ mp = folio_to_mp(folio, offset);
if (!mp || !test_bit(META_dirty, &mp->flag))
continue;
@@ -389,22 +476,20 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
continue;
}
/* Not contiguous */
- if (bio_add_page(bio, page, bio_bytes, bio_offset) <
- bio_bytes)
- goto add_failed;
+ bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
/*
* Increment counter before submitting i/o to keep
* count from hitting zero before we're through
*/
- inc_io(page);
+ inc_io(folio);
if (!bio->bi_iter.bi_size)
goto dump_bio;
submit_bio(bio);
nr_underway++;
bio = NULL;
} else
- inc_io(page);
- xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
+ inc_io(folio);
+ xlen = (folio_size(folio) - offset) >> inode->i_blkbits;
pblock = metapage_get_blocks(inode, lblock, &xlen);
if (!pblock) {
printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
@@ -417,12 +502,10 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
}
len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
- bio = bio_alloc(GFP_NOFS, 1);
- bio_set_dev(bio, inode->i_sb->s_bdev);
+ bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_write_end_io;
- bio->bi_private = page;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_private = folio;
/* Don't call bio_add_page yet, we may add to this vec */
bio_offset = offset;
@@ -432,8 +515,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
next_block = lblock + len;
}
if (bio) {
- if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
- goto add_failed;
+ bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
if (!bio->bi_iter.bi_size)
goto dump_bio;
@@ -441,49 +523,58 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
nr_underway++;
}
if (redirty)
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
- unlock_page(page);
+ folio_unlock(folio);
if (bad_blocks)
goto err_out;
if (nr_underway == 0)
- end_page_writeback(page);
+ folio_end_writeback(folio);
return 0;
-add_failed:
- /* We should never reach here, since we're only adding one vec */
- printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
- goto skip;
dump_bio:
print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
4, bio, sizeof(*bio), 0);
-skip:
bio_put(bio);
- unlock_page(page);
- dec_io(page, last_write_complete);
+ folio_unlock(folio);
+ dec_io(folio, BLK_STS_OK, last_write_complete);
err_out:
while (bad_blocks--)
- dec_io(page, last_write_complete);
+ dec_io(folio, BLK_STS_OK, last_write_complete);
return -EIO;
}
-static int metapage_readpage(struct file *fp, struct page *page)
+static int metapage_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct blk_plug plug;
+ struct folio *folio = NULL;
+ int err;
+
+ blk_start_plug(&plug);
+ while ((folio = writeback_iter(mapping, wbc, folio, &err)))
+ err = metapage_write_folio(folio, wbc);
+ blk_finish_plug(&plug);
+
+ return err;
+}
+
+static int metapage_read_folio(struct file *fp, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct bio *bio = NULL;
int block_offset;
- int blocks_per_page = i_blocks_per_page(inode, page);
+ int blocks_per_page = i_blocks_per_folio(inode, folio);
sector_t page_start; /* address of page in fs blocks */
sector_t pblock;
int xlen;
unsigned int len;
int offset;
- BUG_ON(!PageLocked(page));
- page_start = (sector_t)page->index <<
- (PAGE_SHIFT - inode->i_blkbits);
+ BUG_ON(!folio_test_locked(folio));
+ page_start = folio_pos(folio) >> inode->i_blkbits;
block_offset = 0;
while (block_offset < blocks_per_page) {
@@ -491,23 +582,21 @@ static int metapage_readpage(struct file *fp, struct page *page)
pblock = metapage_get_blocks(inode, page_start + block_offset,
&xlen);
if (pblock) {
- if (!PagePrivate(page))
- insert_metapage(page, NULL);
- inc_io(page);
+ if (!folio->private)
+ insert_metapage(folio, NULL);
+ inc_io(folio);
if (bio)
submit_bio(bio);
- bio = bio_alloc(GFP_NOFS, 1);
- bio_set_dev(bio, inode->i_sb->s_bdev);
+ bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
+ GFP_NOFS);
bio->bi_iter.bi_sector =
pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_read_end_io;
- bio->bi_private = page;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_private = folio;
len = xlen << inode->i_blkbits;
offset = block_offset << inode->i_blkbits;
- if (bio_add_page(bio, page, len, offset) < len)
- goto add_failed;
+ bio_add_folio_nofail(bio, folio, len, offset);
block_offset += xlen;
} else
block_offset++;
@@ -515,62 +604,80 @@ static int metapage_readpage(struct file *fp, struct page *page)
if (bio)
submit_bio(bio);
else
- unlock_page(page);
+ folio_unlock(folio);
return 0;
-
-add_failed:
- printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
- bio_put(bio);
- dec_io(page, last_read_complete);
- return -EIO;
}
-static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
+static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
{
struct metapage *mp;
- int ret = 1;
+ bool ret = true;
int offset;
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
- mp = page_to_mp(page, offset);
+ mp = folio_to_mp(folio, offset);
if (!mp)
continue;
- jfs_info("metapage_releasepage: mp = 0x%p", mp);
+ jfs_info("metapage_release_folio: mp = 0x%p", mp);
if (mp->count || mp->nohomeok ||
test_bit(META_dirty, &mp->flag)) {
jfs_info("count = %ld, nohomeok = %d", mp->count,
mp->nohomeok);
- ret = 0;
+ ret = false;
continue;
}
if (mp->lsn)
remove_from_logsync(mp);
- remove_metapage(page, mp);
+ remove_metapage(folio, mp);
INCREMENT(mpStat.pagefree);
free_metapage(mp);
}
return ret;
}
-static void metapage_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+#ifdef CONFIG_MIGRATION
+/*
+ * metapage_migrate_folio - Migration function for JFS metapages
+ */
+static int metapage_migrate_folio(struct address_space *mapping,
+ struct folio *dst, struct folio *src,
+ enum migrate_mode mode)
{
- BUG_ON(offset || length < PAGE_SIZE);
+ int expected_count;
- BUG_ON(PageWriteback(page));
+ if (!src->private)
+ return filemap_migrate_folio(mapping, dst, src, mode);
- metapage_releasepage(page, 0);
+ /* Check whether page does not have extra refs before we do more work */
+ expected_count = folio_expected_ref_count(src) + 1;
+ if (folio_ref_count(src) != expected_count)
+ return -EAGAIN;
+ return __metapage_migrate_folio(mapping, dst, src, mode);
+}
+#else
+#define metapage_migrate_folio NULL
+#endif /* CONFIG_MIGRATION */
+
+static void metapage_invalidate_folio(struct folio *folio, size_t offset,
+ size_t length)
+{
+ BUG_ON(offset || length < folio_size(folio));
+
+ BUG_ON(folio_test_writeback(folio));
+
+ metapage_release_folio(folio, 0);
}
const struct address_space_operations jfs_metapage_aops = {
- .readpage = metapage_readpage,
- .writepage = metapage_writepage,
- .releasepage = metapage_releasepage,
- .invalidatepage = metapage_invalidatepage,
- .set_page_dirty = __set_page_dirty_nobuffers,
+ .read_folio = metapage_read_folio,
+ .writepages = metapage_writepages,
+ .release_folio = metapage_release_folio,
+ .invalidate_folio = metapage_invalidate_folio,
+ .dirty_folio = filemap_dirty_folio,
+ .migrate_folio = metapage_migrate_folio,
};
struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
@@ -581,7 +688,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
int l2bsize;
struct address_space *mapping;
struct metapage *mp = NULL;
- struct page *page;
+ struct folio *folio;
unsigned long page_index;
unsigned long page_offset;
@@ -612,22 +719,22 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
}
if (new && (PSIZE == PAGE_SIZE)) {
- page = grab_cache_page(mapping, page_index);
- if (!page) {
- jfs_err("grab_cache_page failed!");
+ folio = filemap_grab_folio(mapping, page_index);
+ if (IS_ERR(folio)) {
+ jfs_err("filemap_grab_folio failed!");
return NULL;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
} else {
- page = read_mapping_page(mapping, page_index, NULL);
- if (IS_ERR(page) || !PageUptodate(page)) {
+ folio = read_mapping_folio(mapping, page_index, NULL);
+ if (IS_ERR(folio)) {
jfs_err("read_mapping_page failed!");
return NULL;
}
- lock_page(page);
+ folio_lock(folio);
}
- mp = page_to_mp(page, page_offset);
+ mp = folio_to_mp(folio, page_offset);
if (mp) {
if (mp->logical_size != size) {
jfs_error(inode->i_sb,
@@ -653,16 +760,16 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
mp = alloc_metapage(GFP_NOFS);
if (!mp)
goto unlock;
- mp->page = page;
+ mp->folio = folio;
mp->sb = inode->i_sb;
mp->flag = 0;
mp->xflag = COMMIT_PAGE;
mp->count = 1;
mp->nohomeok = 0;
mp->logical_size = size;
- mp->data = page_address(page) + page_offset;
+ mp->data = folio_address(folio) + page_offset;
mp->index = lblock;
- if (unlikely(insert_metapage(page, mp))) {
+ if (unlikely(insert_metapage(folio, mp))) {
free_metapage(mp);
goto unlock;
}
@@ -674,92 +781,118 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
memset(mp->data, 0, PSIZE);
}
- unlock_page(page);
+ folio_unlock(folio);
jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
return mp;
unlock:
- unlock_page(page);
+ folio_unlock(folio);
return NULL;
}
void grab_metapage(struct metapage * mp)
{
jfs_info("grab_metapage: mp = 0x%p", mp);
- get_page(mp->page);
- lock_page(mp->page);
+ folio_get(mp->folio);
+ folio_lock(mp->folio);
mp->count++;
lock_metapage(mp);
- unlock_page(mp->page);
+ folio_unlock(mp->folio);
+}
+
+static int metapage_write_one(struct folio *folio)
+{
+ struct address_space *mapping = folio->mapping;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = folio_nr_pages(folio),
+ };
+ int ret = 0;
+
+ BUG_ON(!folio_test_locked(folio));
+
+ folio_wait_writeback(folio);
+
+ if (folio_clear_dirty_for_io(folio)) {
+ folio_get(folio);
+ ret = metapage_write_folio(folio, &wbc);
+ if (ret == 0)
+ folio_wait_writeback(folio);
+ folio_put(folio);
+ } else {
+ folio_unlock(folio);
+ }
+
+ if (!ret)
+ ret = filemap_check_errors(mapping);
+ return ret;
}
void force_metapage(struct metapage *mp)
{
- struct page *page = mp->page;
+ struct folio *folio = mp->folio;
jfs_info("force_metapage: mp = 0x%p", mp);
set_bit(META_forcewrite, &mp->flag);
clear_bit(META_sync, &mp->flag);
- get_page(page);
- lock_page(page);
- set_page_dirty(page);
- if (write_one_page(page))
- jfs_error(mp->sb, "write_one_page() failed\n");
+ folio_get(folio);
+ folio_lock(folio);
+ folio_mark_dirty(folio);
+ if (metapage_write_one(folio))
+ jfs_error(mp->sb, "metapage_write_one() failed\n");
clear_bit(META_forcewrite, &mp->flag);
- put_page(page);
+ folio_put(folio);
}
void hold_metapage(struct metapage *mp)
{
- lock_page(mp->page);
+ folio_lock(mp->folio);
}
void put_metapage(struct metapage *mp)
{
if (mp->count || mp->nohomeok) {
/* Someone else will release this */
- unlock_page(mp->page);
+ folio_unlock(mp->folio);
return;
}
- get_page(mp->page);
+ folio_get(mp->folio);
mp->count++;
lock_metapage(mp);
- unlock_page(mp->page);
+ folio_unlock(mp->folio);
release_metapage(mp);
}
void release_metapage(struct metapage * mp)
{
- struct page *page = mp->page;
+ struct folio *folio = mp->folio;
jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
- BUG_ON(!page);
-
- lock_page(page);
+ folio_lock(folio);
unlock_metapage(mp);
assert(mp->count);
if (--mp->count || mp->nohomeok) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return;
}
if (test_bit(META_dirty, &mp->flag)) {
- set_page_dirty(page);
+ folio_mark_dirty(folio);
if (test_bit(META_sync, &mp->flag)) {
clear_bit(META_sync, &mp->flag);
- if (write_one_page(page))
- jfs_error(mp->sb, "write_one_page() failed\n");
- lock_page(page); /* write_one_page unlocks the page */
+ if (metapage_write_one(folio))
+ jfs_error(mp->sb, "metapage_write_one() failed\n");
+ folio_lock(folio);
}
} else if (mp->lsn) /* discard_metapage doesn't remove it */
remove_from_logsync(mp);
/* Try to keep metapages from using up too much memory */
- drop_metapage(page, mp);
+ drop_metapage(folio, mp);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
}
void __invalidate_metapages(struct inode *ip, s64 addr, int len)
@@ -771,7 +904,6 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
struct address_space *mapping =
JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
struct metapage *mp;
- struct page *page;
unsigned int offset;
/*
@@ -780,11 +912,12 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
*/
for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
lblock += BlocksPerPage) {
- page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
- if (!page)
+ struct folio *folio = filemap_lock_folio(mapping,
+ lblock >> l2BlocksPerPage);
+ if (IS_ERR(folio))
continue;
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
- mp = page_to_mp(page, offset);
+ mp = folio_to_mp(folio, offset);
if (!mp)
continue;
if (mp->index < addr)
@@ -797,8 +930,8 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
if (mp->lsn)
remove_from_logsync(mp);
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
}
}
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index 4179f9df4deb..2e5015c2705b 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -24,7 +24,7 @@ struct metapage {
wait_queue_head_t wait;
/* implementation */
- struct page *page;
+ struct folio *folio;
struct super_block *sb;
unsigned int logical_size;
@@ -90,14 +90,14 @@ static inline void discard_metapage(struct metapage *mp)
static inline void metapage_nohomeok(struct metapage *mp)
{
- struct page *page = mp->page;
- lock_page(page);
+ struct folio *folio = mp->folio;
+ folio_lock(folio);
if (!mp->nohomeok++) {
mark_metapage_dirty(mp);
- get_page(page);
- wait_on_page_writeback(page);
+ folio_get(folio);
+ folio_wait_writeback(folio);
}
- unlock_page(page);
+ folio_unlock(folio);
}
/*
@@ -107,7 +107,7 @@ static inline void metapage_nohomeok(struct metapage *mp)
static inline void metapage_wait_for_io(struct metapage *mp)
{
if (test_bit(META_io, &mp->flag))
- wait_on_page_writeback(mp->page);
+ folio_wait_writeback(mp->folio);
}
/*
@@ -116,7 +116,7 @@ static inline void metapage_wait_for_io(struct metapage *mp)
static inline void _metapage_homeok(struct metapage *mp)
{
if (!--mp->nohomeok)
- put_page(mp->page);
+ folio_put(mp->folio);
}
static inline void metapage_homeok(struct metapage *mp)
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index aa4ff7bcaff2..52e6b58c5dbd 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -172,15 +172,15 @@ int jfs_mount(struct super_block *sb)
}
jfs_info("jfs_mount: ipimap:0x%p", ipimap);
- /* map further access of per fileset inodes by the fileset inode */
- sbi->ipimap = ipimap;
-
/* initialize fileset inode allocation map */
if ((rc = diMount(ipimap))) {
jfs_err("jfs_mount: diMount failed w/rc = %d", rc);
goto err_ipimap;
}
+ /* map further access of per fileset inodes by the fileset inode */
+ sbi->ipimap = ipimap;
+
return rc;
/*
@@ -234,11 +234,15 @@ int jfs_mount_rw(struct super_block *sb, int remount)
truncate_inode_pages(sbi->ipimap->i_mapping, 0);
truncate_inode_pages(sbi->ipbmap->i_mapping, 0);
+
+ IWRITE_LOCK(sbi->ipimap, RDWRLOCK_IMAP);
diUnmount(sbi->ipimap, 1);
if ((rc = diMount(sbi->ipimap))) {
+ IWRITE_UNLOCK(sbi->ipimap);
jfs_err("jfs_mount_rw: diMount failed!");
return rc;
}
+ IWRITE_UNLOCK(sbi->ipimap);
dbUnmount(sbi->ipbmap, 1);
if ((rc = dbMount(sbi->ipbmap))) {
@@ -307,13 +311,11 @@ static int chkSuper(struct super_block *sb)
}
bsize = le32_to_cpu(j_sb->s_bsize);
-#ifdef _JFS_4K
if (bsize != PSIZE) {
- jfs_err("Currently only 4K block size supported!");
+ jfs_err("Only 4K block size supported!");
rc = -EINVAL;
goto out;
}
-#endif /* _JFS_4K */
jfs_info("superblock: flag:0x%08x state:0x%08x size:0x%Lx",
le32_to_cpu(j_sb->s_flag), le32_to_cpu(j_sb->s_state),
@@ -323,13 +325,13 @@ static int chkSuper(struct super_block *sb)
if ((j_sb->s_flag & cpu_to_le32(JFS_BAD_SAIT)) !=
cpu_to_le32(JFS_BAD_SAIT)) {
expected_AIM_bytesize = 2 * PSIZE;
- AIM_bytesize = lengthPXD(&(j_sb->s_aim2)) * bsize;
+ AIM_bytesize = lengthPXD(&j_sb->s_aim2) * bsize;
expected_AIT_bytesize = 4 * PSIZE;
- AIT_bytesize = lengthPXD(&(j_sb->s_ait2)) * bsize;
- AIM_byte_addr = addressPXD(&(j_sb->s_aim2)) * bsize;
- AIT_byte_addr = addressPXD(&(j_sb->s_ait2)) * bsize;
+ AIT_bytesize = lengthPXD(&j_sb->s_ait2) * bsize;
+ AIM_byte_addr = addressPXD(&j_sb->s_aim2) * bsize;
+ AIT_byte_addr = addressPXD(&j_sb->s_ait2) * bsize;
byte_addr_diff0 = AIT_byte_addr - AIM_byte_addr;
- fsckwsp_addr = addressPXD(&(j_sb->s_fsckpxd)) * bsize;
+ fsckwsp_addr = addressPXD(&j_sb->s_fsckpxd) * bsize;
byte_addr_diff1 = fsckwsp_addr - AIT_byte_addr;
if ((AIM_bytesize != expected_AIM_bytesize) ||
(AIT_bytesize != expected_AIT_bytesize) ||
@@ -428,7 +430,8 @@ int updateSuper(struct super_block *sb, uint state)
if (state == FM_MOUNT) {
/* record log's dev_t and mount serial number */
- j_sb->s_logdev = cpu_to_le32(new_encode_dev(sbi->log->bdev->bd_dev));
+ j_sb->s_logdev = cpu_to_le32(
+ new_encode_dev(file_bdev(sbi->log->bdev_file)->bd_dev));
j_sb->s_logserial = cpu_to_le32(sbi->log->serial);
} else if (state == FM_CLEAN) {
/*
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 042bbe6d8ac2..c16578af3a77 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -272,14 +272,15 @@ int txInit(void)
if (TxBlock == NULL)
return -ENOMEM;
- for (k = 1; k < nTxBlock - 1; k++) {
- TxBlock[k].next = k + 1;
+ for (k = 0; k < nTxBlock; k++) {
init_waitqueue_head(&TxBlock[k].gcwait);
init_waitqueue_head(&TxBlock[k].waitor);
}
+
+ for (k = 1; k < nTxBlock - 1; k++) {
+ TxBlock[k].next = k + 1;
+ }
TxBlock[k].next = 0;
- init_waitqueue_head(&TxBlock[k].gcwait);
- init_waitqueue_head(&TxBlock[k].waitor);
TxAnchor.freetid = 1;
init_waitqueue_head(&TxAnchor.freewait);
@@ -354,6 +355,11 @@ tid_t txBegin(struct super_block *sb, int flag)
jfs_info("txBegin: flag = 0x%x", flag);
log = JFS_SBI(sb)->log;
+ if (!log) {
+ jfs_error(sb, "read-only filesystem\n");
+ return 0;
+ }
+
TXN_LOCK();
INCREMENT(TxStat.txBegin);
@@ -778,7 +784,7 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
if (mp->xflag & COMMIT_PAGE)
p = (xtpage_t *) mp->data;
else
- p = &jfs_ip->i_xtroot;
+ p = (xtpage_t *) &jfs_ip->i_xtroot;
xtlck->lwm.offset =
le16_to_cpu(p->header.nextindex);
}
@@ -1281,7 +1287,7 @@ int txCommit(tid_t tid, /* transaction identifier */
* to verify this, only a trivial s/I_LOCK/I_SYNC/ was done.
* Joern
*/
- if (tblk->u.ip->i_state & I_SYNC)
+ if (inode_state_read_once(tblk->u.ip) & I_SYNC)
tblk->xflag &= ~COMMIT_LAZY;
}
@@ -1490,40 +1496,6 @@ static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
tlck->flag |= tlckWRITEPAGE;
} else
jfs_err("diLog: UFO type tlck:0x%p", tlck);
-#ifdef _JFS_WIP
- /*
- * alloc/free external EA extent
- *
- * a maplock for txUpdateMap() to update bPWMAP for alloc/free
- * of the extent has been formatted at txLock() time;
- */
- else {
- assert(tlck->type & tlckEA);
-
- /* log LOG_UPDATEMAP for logredo() to update bmap for
- * alloc of new (and free of old) external EA extent;
- */
- lrd->type = cpu_to_le16(LOG_UPDATEMAP);
- pxdlock = (struct pxd_lock *) & tlck->lock;
- nlock = pxdlock->index;
- for (i = 0; i < nlock; i++, pxdlock++) {
- if (pxdlock->flag & mlckALLOCPXD)
- lrd->log.updatemap.type =
- cpu_to_le16(LOG_ALLOCPXD);
- else
- lrd->log.updatemap.type =
- cpu_to_le16(LOG_FREEPXD);
- lrd->log.updatemap.nxd = cpu_to_le16(1);
- lrd->log.updatemap.pxd = pxdlock->pxd;
- lrd->backchain =
- cpu_to_le32(lmLog(log, tblk, lrd, NULL));
- }
-
- /* update bmap */
- tlck->flag |= tlckUPDATEMAP;
- }
-#endif /* _JFS_WIP */
-
return;
}
@@ -1705,7 +1677,7 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
if (tlck->type & tlckBTROOT) {
lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
- p = &JFS_IP(ip)->i_xtroot;
+ p = (xtpage_t *) &JFS_IP(ip)->i_xtroot;
if (S_ISDIR(ip->i_mode))
lrd->log.redopage.type |=
cpu_to_le16(LOG_DIR_XTREE);
@@ -2731,6 +2703,7 @@ int jfs_lazycommit(void *arg)
unsigned long flags;
struct jfs_sb_info *sbi;
+ set_freezable();
do {
LAZY_LOCK(flags);
jfs_commit_thread_waking = 0; /* OK to wake another thread */
@@ -2913,6 +2886,7 @@ int jfs_sync(void *arg)
struct jfs_inode_info *jfs_ip;
tid_t tid;
+ set_freezable();
do {
/*
* write each inode on the anonymous inode list
diff --git a/fs/jfs/jfs_umount.c b/fs/jfs/jfs_umount.c
index 3e8b13e6aa01..8ec43f53f686 100644
--- a/fs/jfs/jfs_umount.c
+++ b/fs/jfs/jfs_umount.c
@@ -68,7 +68,6 @@ int jfs_umount(struct super_block *sb)
/*
* close secondary aggregate inode allocation map
*/
- ipaimap2 = sbi->ipaimap2;
if (ipaimap2) {
diUnmount(ipaimap2, 0);
diFreeSpecial(ipaimap2);
@@ -78,7 +77,6 @@ int jfs_umount(struct super_block *sb)
/*
* close aggregate inode allocation map
*/
- ipaimap = sbi->ipaimap;
diUnmount(ipaimap, 0);
diFreeSpecial(ipaimap);
sbi->ipaimap = NULL;
@@ -89,7 +87,7 @@ int jfs_umount(struct super_block *sb)
dbUnmount(ipbmap, 0);
diFreeSpecial(ipbmap);
- sbi->ipimap = NULL;
+ sbi->ipbmap = NULL;
/*
* Make sure all metadata makes it to disk before we mark
diff --git a/fs/jfs/jfs_unicode.h b/fs/jfs/jfs_unicode.h
index 9db62d047daa..b6a78d4aef1b 100644
--- a/fs/jfs/jfs_unicode.h
+++ b/fs/jfs/jfs_unicode.h
@@ -8,16 +8,9 @@
#include <linux/slab.h>
#include <asm/byteorder.h>
+#include "../nls/nls_ucs2_data.h"
#include "jfs_types.h"
-typedef struct {
- wchar_t start;
- wchar_t end;
- signed char *table;
-} UNICASERANGE;
-
-extern signed char UniUpperTable[512];
-extern UNICASERANGE UniUpperRange[];
extern int get_UCSname(struct component_name *, struct dentry *);
extern int jfs_strfromUCS_le(char *, const __le16 *, int, struct nls_table *);
@@ -107,12 +100,12 @@ static inline wchar_t *UniStrncpy_from_le(wchar_t * ucs1, const __le16 * ucs2,
*/
static inline wchar_t UniToupper(wchar_t uc)
{
- UNICASERANGE *rp;
+ const struct UniCaseRange *rp;
- if (uc < sizeof(UniUpperTable)) { /* Latin characters */
- return uc + UniUpperTable[uc]; /* Use base tables */
+ if (uc < sizeof(NlsUniUpperTable)) { /* Latin characters */
+ return uc + NlsUniUpperTable[uc]; /* Use base tables */
} else {
- rp = UniUpperRange; /* Use range tables */
+ rp = NlsUniUpperRange; /* Use range tables */
while (rp->start) {
if (uc < rp->start) /* Before start of range */
return uc; /* Uppercase = input */
diff --git a/fs/jfs/jfs_uniupr.c b/fs/jfs/jfs_uniupr.c
deleted file mode 100644
index d0b18c7befb8..000000000000
--- a/fs/jfs/jfs_uniupr.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) International Business Machines Corp., 2000-2002
- */
-
-#include <linux/fs.h>
-#include "jfs_unicode.h"
-
-/*
- * Latin upper case
- */
-signed char UniUpperTable[512] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 000-00f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 010-01f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 020-02f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 030-03f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 040-04f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 050-05f */
- 0,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 060-06f */
- -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, 0, 0, 0, 0, 0, /* 070-07f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 080-08f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 090-09f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0a0-0af */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0b0-0bf */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0c0-0cf */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0d0-0df */
- -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 0e0-0ef */
- -32,-32,-32,-32,-32,-32,-32, 0,-32,-32,-32,-32,-32,-32,-32,121, /* 0f0-0ff */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 100-10f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 110-11f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 120-12f */
- 0, 0, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0, /* 130-13f */
- -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, /* 140-14f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 150-15f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 160-16f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0, /* 170-17f */
- 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, /* 180-18f */
- 0, 0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, /* 190-19f */
- 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, /* 1a0-1af */
- -1, 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, /* 1b0-1bf */
- 0, 0, 0, 0, 0, -1, -2, 0, -1, -2, 0, -1, -2, 0, -1, 0, /* 1c0-1cf */
- -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,-79, 0, -1, /* 1d0-1df */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e0-1ef */
- 0, 0, -1, -2, 0, -1, 0, 0, 0, -1, 0, -1, 0, -1, 0, -1, /* 1f0-1ff */
-};
-
-/* Upper case range - Greek */
-static signed char UniCaseRangeU03a0[47] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-38,-37,-37,-37, /* 3a0-3af */
- 0,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 3b0-3bf */
- -32,-32,-31,-32,-32,-32,-32,-32,-32,-32,-32,-32,-64,-63,-63,
-};
-
-/* Upper case range - Cyrillic */
-static signed char UniCaseRangeU0430[48] = {
- -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 430-43f */
- -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 440-44f */
- 0,-80,-80,-80,-80,-80,-80,-80,-80,-80,-80,-80,-80, 0,-80,-80, /* 450-45f */
-};
-
-/* Upper case range - Extended cyrillic */
-static signed char UniCaseRangeU0490[61] = {
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 490-49f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 4a0-4af */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 4b0-4bf */
- 0, 0, -1, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1,
-};
-
-/* Upper case range - Extended latin and greek */
-static signed char UniCaseRangeU1e00[509] = {
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e00-1e0f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e10-1e1f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e20-1e2f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e30-1e3f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e40-1e4f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e50-1e5f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e60-1e6f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e70-1e7f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e80-1e8f */
- 0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0,-59, 0, -1, 0, -1, /* 1e90-1e9f */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ea0-1eaf */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1eb0-1ebf */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ec0-1ecf */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ed0-1edf */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ee0-1eef */
- 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0, /* 1ef0-1eff */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f00-1f0f */
- 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f10-1f1f */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f20-1f2f */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f30-1f3f */
- 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f40-1f4f */
- 0, 8, 0, 8, 0, 8, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f50-1f5f */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f60-1f6f */
- 74, 74, 86, 86, 86, 86,100,100, 0, 0,112,112,126,126, 0, 0, /* 1f70-1f7f */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f80-1f8f */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f90-1f9f */
- 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fa0-1faf */
- 8, 8, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fb0-1fbf */
- 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fc0-1fcf */
- 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fd0-1fdf */
- 8, 8, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fe0-1fef */
- 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-};
-
-/* Upper case range - Wide latin */
-static signed char UniCaseRangeUff40[27] = {
- 0,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* ff40-ff4f */
- -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,
-};
-
-/*
- * Upper Case Range
- */
-UNICASERANGE UniUpperRange[] = {
- { 0x03a0, 0x03ce, UniCaseRangeU03a0 },
- { 0x0430, 0x045f, UniCaseRangeU0430 },
- { 0x0490, 0x04cc, UniCaseRangeU0490 },
- { 0x1e00, 0x1ffc, UniCaseRangeU1e00 },
- { 0xff40, 0xff5a, UniCaseRangeUff40 },
- { 0 }
-};
diff --git a/fs/jfs/jfs_xattr.h b/fs/jfs/jfs_xattr.h
index c50167a7bc50..ec67d8554d2c 100644
--- a/fs/jfs/jfs_xattr.h
+++ b/fs/jfs/jfs_xattr.h
@@ -25,7 +25,7 @@ struct jfs_ea_list {
struct jfs_ea ea[]; /* Variable length list */
};
-/* Macros for defining maxiumum number of bytes supported for EAs */
+/* Macros for defining maximum number of bytes supported for EAs */
#define MAXEASIZE 65535
#define MAXEALISTSIZE MAXEASIZE
@@ -46,7 +46,7 @@ extern int __jfs_setxattr(tid_t, struct inode *, const char *, const void *,
extern ssize_t __jfs_getxattr(struct inode *, const char *, void *, size_t);
extern ssize_t jfs_listxattr(struct dentry *, char *, size_t);
-extern const struct xattr_handler *jfs_xattr_handlers[];
+extern const struct xattr_handler * const jfs_xattr_handlers[];
#ifdef CONFIG_JFS_SECURITY
extern int jfs_init_security(tid_t, struct inode *, struct inode *,
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index 3148e9b35f3b..28c3cf960c6f 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -49,26 +49,6 @@
#define XT_PAGE(IP, MP) BT_PAGE(IP, MP, xtpage_t, i_xtroot)
-/* get page buffer for specified block address */
-/* ToDo: Replace this ugly macro with a function */
-#define XT_GETPAGE(IP, BN, MP, SIZE, P, RC) \
-do { \
- BT_GETPAGE(IP, BN, MP, xtpage_t, SIZE, P, RC, i_xtroot); \
- if (!(RC)) { \
- if ((le16_to_cpu((P)->header.nextindex) < XTENTRYSTART) || \
- (le16_to_cpu((P)->header.nextindex) > \
- le16_to_cpu((P)->header.maxentry)) || \
- (le16_to_cpu((P)->header.maxentry) > \
- (((BN) == 0) ? XTROOTMAXSLOT : PSIZE >> L2XTSLOTSIZE))) { \
- jfs_error((IP)->i_sb, \
- "XT_GETPAGE: xtree page corrupt\n"); \
- BT_PUTPAGE(MP); \
- MP = NULL; \
- RC = -EIO; \
- } \
- } \
-} while (0)
-
/* for consistency */
#define XT_PUTPAGE(MP) BT_PUTPAGE(MP)
@@ -114,16 +94,41 @@ static int xtSplitPage(tid_t tid, struct inode *ip, struct xtsplit * split,
static int xtSplitRoot(tid_t tid, struct inode *ip,
struct xtsplit * split, struct metapage ** rmpp);
-#ifdef _STILL_TO_PORT
-static int xtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
- xtpage_t * fp, struct btstack * btstack);
+/*
+ * xt_getpage()
+ *
+ * function: get the page buffer for a specified block address.
+ *
+ * parameters:
+ * ip - pointer to the inode
+ * bn - block number (s64) of the xtree page to be retrieved;
+ * mp - pointer to a metapage pointer where the page buffer is returned;
+ *
+ * returns:
+ * A pointer to the xtree page (xtpage_t) on success, -EIO on error.
+ */
-static int xtSearchNode(struct inode *ip,
- xad_t * xad,
- int *cmpp, struct btstack * btstack, int flag);
+static inline xtpage_t *xt_getpage(struct inode *ip, s64 bn, struct metapage **mp)
+{
+ xtpage_t *p;
+ int rc;
-static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp);
-#endif /* _STILL_TO_PORT */
+ BT_GETPAGE(ip, bn, *mp, xtpage_t, PSIZE, p, rc, i_xtroot);
+
+ if (rc)
+ return ERR_PTR(rc);
+ if ((le16_to_cpu(p->header.nextindex) < XTENTRYSTART) ||
+ (le16_to_cpu(p->header.nextindex) >
+ le16_to_cpu(p->header.maxentry)) ||
+ (le16_to_cpu(p->header.maxentry) >
+ ((bn == 0) ? XTROOTMAXSLOT : PSIZE >> L2XTSLOTSIZE))) {
+ jfs_error(ip->i_sb, "xt_getpage: xtree page corrupt\n");
+ BT_PUTPAGE(*mp);
+ *mp = NULL;
+ return ERR_PTR(-EIO);
+ }
+ return p;
+}
/*
* xtLookup()
@@ -227,7 +232,6 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
int *cmpp, struct btstack * btstack, int flag)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
- int rc = 0;
int cmp = 1; /* init for empty page */
s64 bn; /* block number */
struct metapage *mp; /* page buffer */
@@ -263,9 +267,9 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
*/
for (bn = 0;;) {
/* get/pin the page to search */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
/* try sequential access heuristics with the previous
* access entry in target leaf page:
@@ -818,10 +822,10 @@ xtSplitUp(tid_t tid,
* insert router entry in parent for new right child page <rp>
*/
/* get/pin the parent page <sp> */
- XT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc);
- if (rc) {
+ sp = xt_getpage(ip, parent->bn, &smp);
+ if (IS_ERR(sp)) {
XT_PUTPAGE(rcmp);
- return rc;
+ return PTR_ERR(sp);
}
/*
@@ -1073,10 +1077,10 @@ xtSplitPage(tid_t tid, struct inode *ip,
* update previous pointer of old next/right page of <sp>
*/
if (nextbn != 0) {
- XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
- if (rc) {
+ p = xt_getpage(ip, nextbn, &mp);
+ if (IS_ERR(p)) {
XT_PUTPAGE(rmp);
- goto clean_up;
+ return PTR_ERR(p);
}
BT_MARK_DIRTY(mp, ip);
@@ -1224,7 +1228,7 @@ xtSplitRoot(tid_t tid,
struct xtlock *xtlck;
int rc;
- sp = &JFS_IP(ip)->i_xtroot;
+ sp = (xtpage_t *) &JFS_IP(ip)->i_xtroot;
INCREMENT(xtStat.split);
@@ -1428,9 +1432,9 @@ int xtExtend(tid_t tid, /* transaction id */
return rc;
/* get back old page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
/*
* if leaf root has been split, original root has been
* copied to new child page, i.e., original entry now
@@ -1444,9 +1448,9 @@ int xtExtend(tid_t tid, /* transaction id */
XT_PUTPAGE(mp);
/* get new child page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
@@ -1493,189 +1497,6 @@ int xtExtend(tid_t tid, /* transaction id */
return rc;
}
-#ifdef _NOTYET
-/*
- * xtTailgate()
- *
- * function: split existing 'tail' extent
- * (split offset >= start offset of tail extent), and
- * relocate and extend the split tail half;
- *
- * note: existing extent may or may not have been committed.
- * caller is responsible for pager buffer cache update, and
- * working block allocation map update;
- * update pmap: free old split tail extent, alloc new extent;
- */
-int xtTailgate(tid_t tid, /* transaction id */
- struct inode *ip, s64 xoff, /* split/new extent offset */
- s32 xlen, /* new extent length */
- s64 xaddr, /* new extent address */
- int flag)
-{
- int rc = 0;
- int cmp;
- struct metapage *mp; /* meta-page buffer */
- xtpage_t *p; /* base B+-tree index page */
- s64 bn;
- int index, nextindex, llen, rlen;
- struct btstack btstack; /* traverse stack */
- struct xtsplit split; /* split information */
- xad_t *xad;
- struct tlock *tlck;
- struct xtlock *xtlck = 0;
- struct tlock *mtlck;
- struct maplock *pxdlock;
-
-/*
-printf("xtTailgate: nxoff:0x%lx nxlen:0x%x nxaddr:0x%lx\n",
- (ulong)xoff, xlen, (ulong)xaddr);
-*/
-
- /* there must exist extent to be tailgated */
- if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, XT_INSERT)))
- return rc;
-
- /* retrieve search result */
- XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
-
- if (cmp != 0) {
- XT_PUTPAGE(mp);
- jfs_error(ip->i_sb, "couldn't find extent\n");
- return -EIO;
- }
-
- /* entry found must be last entry */
- nextindex = le16_to_cpu(p->header.nextindex);
- if (index != nextindex - 1) {
- XT_PUTPAGE(mp);
- jfs_error(ip->i_sb, "the entry found is not the last entry\n");
- return -EIO;
- }
-
- BT_MARK_DIRTY(mp, ip);
- /*
- * acquire tlock of the leaf page containing original entry
- */
- if (!test_cflag(COMMIT_Nolink, ip)) {
- tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
- xtlck = (struct xtlock *) & tlck->lock;
- }
-
- /* completely replace extent ? */
- xad = &p->xad[index];
-/*
-printf("xtTailgate: xoff:0x%lx xlen:0x%x xaddr:0x%lx\n",
- (ulong)offsetXAD(xad), lengthXAD(xad), (ulong)addressXAD(xad));
-*/
- if ((llen = xoff - offsetXAD(xad)) == 0)
- goto updateOld;
-
- /*
- * partially replace extent: insert entry for new extent
- */
-//insertNew:
- /*
- * if the leaf page is full, insert the new entry and
- * propagate up the router entry for the new page from split
- *
- * The xtSplitUp() will insert the entry and unpin the leaf page.
- */
- if (nextindex == le16_to_cpu(p->header.maxentry)) {
- /* xtSpliUp() unpins leaf pages */
- split.mp = mp;
- split.index = index + 1;
- split.flag = XAD_NEW;
- split.off = xoff; /* split offset */
- split.len = xlen;
- split.addr = xaddr;
- split.pxdlist = NULL;
- if ((rc = xtSplitUp(tid, ip, &split, &btstack)))
- return rc;
-
- /* get back old page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
- /*
- * if leaf root has been split, original root has been
- * copied to new child page, i.e., original entry now
- * resides on the new child page;
- */
- if (p->header.flag & BT_INTERNAL) {
- ASSERT(p->header.nextindex ==
- cpu_to_le16(XTENTRYSTART + 1));
- xad = &p->xad[XTENTRYSTART];
- bn = addressXAD(xad);
- XT_PUTPAGE(mp);
-
- /* get new child page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
-
- BT_MARK_DIRTY(mp, ip);
- if (!test_cflag(COMMIT_Nolink, ip)) {
- tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
- xtlck = (struct xtlock *) & tlck->lock;
- }
- }
- }
- /*
- * insert the new entry into the leaf page
- */
- else {
- /* insert the new entry: mark the entry NEW */
- xad = &p->xad[index + 1];
- XT_PUTENTRY(xad, XAD_NEW, xoff, xlen, xaddr);
-
- /* advance next available entry index */
- le16_add_cpu(&p->header.nextindex, 1);
- }
-
- /* get back old XAD */
- xad = &p->xad[index];
-
- /*
- * truncate/relocate old extent at split offset
- */
- updateOld:
- /* update dmap for old/committed/truncated extent */
- rlen = lengthXAD(xad) - llen;
- if (!(xad->flag & XAD_NEW)) {
- /* free from PWMAP at commit */
- if (!test_cflag(COMMIT_Nolink, ip)) {
- mtlck = txMaplock(tid, ip, tlckMAP);
- pxdlock = (struct maplock *) & mtlck->lock;
- pxdlock->flag = mlckFREEPXD;
- PXDaddress(&pxdlock->pxd, addressXAD(xad) + llen);
- PXDlength(&pxdlock->pxd, rlen);
- pxdlock->index = 1;
- }
- } else
- /* free from WMAP */
- dbFree(ip, addressXAD(xad) + llen, (s64) rlen);
-
- if (llen)
- /* truncate */
- XADlength(xad, llen);
- else
- /* replace */
- XT_PUTENTRY(xad, XAD_NEW, xoff, xlen, xaddr);
-
- if (!test_cflag(COMMIT_Nolink, ip)) {
- xtlck->lwm.offset = (xtlck->lwm.offset) ?
- min(index, (int)xtlck->lwm.offset) : index;
- xtlck->lwm.length = le16_to_cpu(p->header.nextindex) -
- xtlck->lwm.offset;
- }
-
- /* unpin the leaf page */
- XT_PUTPAGE(mp);
-
- return rc;
-}
-#endif /* _NOTYET */
-
/*
* xtUpdate()
*
@@ -1753,32 +1574,12 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
newindex = index + 1;
nextindex = le16_to_cpu(p->header.nextindex);
-#ifdef _JFS_WIP_NOCOALESCE
- if (xoff < nxoff)
- goto updateRight;
-
- /*
- * replace XAD with nXAD
- */
- replace: /* (nxoff == xoff) */
- if (nxlen == xlen) {
- /* replace XAD with nXAD:recorded */
- *xad = *nxad;
- xad->flag = xflag & ~XAD_NOTRECORDED;
-
- goto out;
- } else /* (nxlen < xlen) */
- goto updateLeft;
-#endif /* _JFS_WIP_NOCOALESCE */
-
-/* #ifdef _JFS_WIP_COALESCE */
if (xoff < nxoff)
goto coalesceRight;
/*
* coalesce with left XAD
*/
-//coalesceLeft: /* (xoff == nxoff) */
/* is XAD first entry of page ? */
if (index == XTENTRYSTART)
goto replace;
@@ -1897,7 +1698,6 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
jfs_error(ip->i_sb, "xoff >= nxoff\n");
return -EIO;
}
-/* #endif _JFS_WIP_COALESCE */
/*
* split XAD into (lXAD, nXAD):
@@ -1926,9 +1726,9 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
return rc;
/* get back old page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
/*
* if leaf root has been split, original root has been
* copied to new child page, i.e., original entry now
@@ -1942,9 +1742,9 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
XT_PUTPAGE(mp);
/* get new child page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
@@ -2003,9 +1803,9 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
XT_PUTPAGE(mp);
/* get new right page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
@@ -2079,9 +1879,9 @@ printf("xtUpdate.updateLeft.split p:0x%p\n", p);
return rc;
/* get back old page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
/*
* if leaf root has been split, original root has been
@@ -2096,9 +1896,9 @@ printf("xtUpdate.updateLeft.split p:0x%p\n", p);
XT_PUTPAGE(mp);
/* get new child page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
@@ -2305,752 +2105,6 @@ int xtAppend(tid_t tid, /* transaction id */
return rc;
}
-#ifdef _STILL_TO_PORT
-
-/* - TBD for defragmentaion/reorganization -
- *
- * xtDelete()
- *
- * function:
- * delete the entry with the specified key.
- *
- * N.B.: whole extent of the entry is assumed to be deleted.
- *
- * parameter:
- *
- * return:
- * ENOENT: if the entry is not found.
- *
- * exception:
- */
-int xtDelete(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag)
-{
- int rc = 0;
- struct btstack btstack;
- int cmp;
- s64 bn;
- struct metapage *mp;
- xtpage_t *p;
- int index, nextindex;
- struct tlock *tlck;
- struct xtlock *xtlck;
-
- /*
- * find the matching entry; xtSearch() pins the page
- */
- if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0)))
- return rc;
-
- XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
- if (cmp) {
- /* unpin the leaf page */
- XT_PUTPAGE(mp);
- return -ENOENT;
- }
-
- /*
- * delete the entry from the leaf page
- */
- nextindex = le16_to_cpu(p->header.nextindex);
- le16_add_cpu(&p->header.nextindex, -1);
-
- /*
- * if the leaf page bocome empty, free the page
- */
- if (p->header.nextindex == cpu_to_le16(XTENTRYSTART))
- return (xtDeleteUp(tid, ip, mp, p, &btstack));
-
- BT_MARK_DIRTY(mp, ip);
- /*
- * acquire a transaction lock on the leaf page;
- *
- * action:xad deletion;
- */
- tlck = txLock(tid, ip, mp, tlckXTREE);
- xtlck = (struct xtlock *) & tlck->lock;
- xtlck->lwm.offset =
- (xtlck->lwm.offset) ? min(index, xtlck->lwm.offset) : index;
-
- /* if delete from middle, shift left/compact the remaining entries */
- if (index < nextindex - 1)
- memmove(&p->xad[index], &p->xad[index + 1],
- (nextindex - index - 1) * sizeof(xad_t));
-
- XT_PUTPAGE(mp);
-
- return 0;
-}
-
-
-/* - TBD for defragmentaion/reorganization -
- *
- * xtDeleteUp()
- *
- * function:
- * free empty pages as propagating deletion up the tree
- *
- * parameter:
- *
- * return:
- */
-static int
-xtDeleteUp(tid_t tid, struct inode *ip,
- struct metapage * fmp, xtpage_t * fp, struct btstack * btstack)
-{
- int rc = 0;
- struct metapage *mp;
- xtpage_t *p;
- int index, nextindex;
- s64 xaddr;
- int xlen;
- struct btframe *parent;
- struct tlock *tlck;
- struct xtlock *xtlck;
-
- /*
- * keep root leaf page which has become empty
- */
- if (fp->header.flag & BT_ROOT) {
- /* keep the root page */
- fp->header.flag &= ~BT_INTERNAL;
- fp->header.flag |= BT_LEAF;
- fp->header.nextindex = cpu_to_le16(XTENTRYSTART);
-
- /* XT_PUTPAGE(fmp); */
-
- return 0;
- }
-
- /*
- * free non-root leaf page
- */
- if ((rc = xtRelink(tid, ip, fp))) {
- XT_PUTPAGE(fmp);
- return rc;
- }
-
- xaddr = addressPXD(&fp->header.self);
- xlen = lengthPXD(&fp->header.self);
- /* free the page extent */
- dbFree(ip, xaddr, (s64) xlen);
-
- /* free the buffer page */
- discard_metapage(fmp);
-
- /*
- * propagate page deletion up the index tree
- *
- * If the delete from the parent page makes it empty,
- * continue all the way up the tree.
- * stop if the root page is reached (which is never deleted) or
- * if the entry deletion does not empty the page.
- */
- while ((parent = BT_POP(btstack)) != NULL) {
- /* get/pin the parent page <sp> */
- XT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
-
- index = parent->index;
-
- /* delete the entry for the freed child page from parent.
- */
- nextindex = le16_to_cpu(p->header.nextindex);
-
- /*
- * the parent has the single entry being deleted:
- * free the parent page which has become empty.
- */
- if (nextindex == 1) {
- if (p->header.flag & BT_ROOT) {
- /* keep the root page */
- p->header.flag &= ~BT_INTERNAL;
- p->header.flag |= BT_LEAF;
- p->header.nextindex =
- cpu_to_le16(XTENTRYSTART);
-
- /* XT_PUTPAGE(mp); */
-
- break;
- } else {
- /* free the parent page */
- if ((rc = xtRelink(tid, ip, p)))
- return rc;
-
- xaddr = addressPXD(&p->header.self);
- /* free the page extent */
- dbFree(ip, xaddr,
- (s64) JFS_SBI(ip->i_sb)->nbperpage);
-
- /* unpin/free the buffer page */
- discard_metapage(mp);
-
- /* propagate up */
- continue;
- }
- }
- /*
- * the parent has other entries remaining:
- * delete the router entry from the parent page.
- */
- else {
- BT_MARK_DIRTY(mp, ip);
- /*
- * acquire a transaction lock on the leaf page;
- *
- * action:xad deletion;
- */
- tlck = txLock(tid, ip, mp, tlckXTREE);
- xtlck = (struct xtlock *) & tlck->lock;
- xtlck->lwm.offset =
- (xtlck->lwm.offset) ? min(index,
- xtlck->lwm.
- offset) : index;
-
- /* if delete from middle,
- * shift left/compact the remaining entries in the page
- */
- if (index < nextindex - 1)
- memmove(&p->xad[index], &p->xad[index + 1],
- (nextindex - index -
- 1) << L2XTSLOTSIZE);
-
- le16_add_cpu(&p->header.nextindex, -1);
- jfs_info("xtDeleteUp(entry): 0x%lx[%d]",
- (ulong) parent->bn, index);
- }
-
- /* unpin the parent page */
- XT_PUTPAGE(mp);
-
- /* exit propagation up */
- break;
- }
-
- return 0;
-}
-
-
-/*
- * NAME: xtRelocate()
- *
- * FUNCTION: relocate xtpage or data extent of regular file;
- * This function is mainly used by defragfs utility.
- *
- * NOTE: This routine does not have the logic to handle
- * uncommitted allocated extent. The caller should call
- * txCommit() to commit all the allocation before call
- * this routine.
- */
-int
-xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
- s64 nxaddr, /* new xaddr */
- int xtype)
-{ /* extent type: XTPAGE or DATAEXT */
- int rc = 0;
- struct tblock *tblk;
- struct tlock *tlck;
- struct xtlock *xtlck;
- struct metapage *mp, *pmp, *lmp, *rmp; /* meta-page buffer */
- xtpage_t *p, *pp, *rp, *lp; /* base B+-tree index page */
- xad_t *xad;
- pxd_t *pxd;
- s64 xoff, xsize;
- int xlen;
- s64 oxaddr, sxaddr, dxaddr, nextbn, prevbn;
- cbuf_t *cp;
- s64 offset, nbytes, nbrd, pno;
- int nb, npages, nblks;
- s64 bn;
- int cmp;
- int index;
- struct pxd_lock *pxdlock;
- struct btstack btstack; /* traverse stack */
-
- xtype = xtype & EXTENT_TYPE;
-
- xoff = offsetXAD(oxad);
- oxaddr = addressXAD(oxad);
- xlen = lengthXAD(oxad);
-
- /* validate extent offset */
- offset = xoff << JFS_SBI(ip->i_sb)->l2bsize;
- if (offset >= ip->i_size)
- return -ESTALE; /* stale extent */
-
- jfs_info("xtRelocate: xtype:%d xoff:0x%lx xlen:0x%x xaddr:0x%lx:0x%lx",
- xtype, (ulong) xoff, xlen, (ulong) oxaddr, (ulong) nxaddr);
-
- /*
- * 1. get and validate the parent xtpage/xad entry
- * covering the source extent to be relocated;
- */
- if (xtype == DATAEXT) {
- /* search in leaf entry */
- rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0);
- if (rc)
- return rc;
-
- /* retrieve search result */
- XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
-
- if (cmp) {
- XT_PUTPAGE(pmp);
- return -ESTALE;
- }
-
- /* validate for exact match with a single entry */
- xad = &pp->xad[index];
- if (addressXAD(xad) != oxaddr || lengthXAD(xad) != xlen) {
- XT_PUTPAGE(pmp);
- return -ESTALE;
- }
- } else { /* (xtype == XTPAGE) */
-
- /* search in internal entry */
- rc = xtSearchNode(ip, oxad, &cmp, &btstack, 0);
- if (rc)
- return rc;
-
- /* retrieve search result */
- XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
-
- if (cmp) {
- XT_PUTPAGE(pmp);
- return -ESTALE;
- }
-
- /* xtSearchNode() validated for exact match with a single entry
- */
- xad = &pp->xad[index];
- }
- jfs_info("xtRelocate: parent xad entry validated.");
-
- /*
- * 2. relocate the extent
- */
- if (xtype == DATAEXT) {
- /* if the extent is allocated-but-not-recorded
- * there is no real data to be moved in this extent,
- */
- if (xad->flag & XAD_NOTRECORDED)
- goto out;
- else
- /* release xtpage for cmRead()/xtLookup() */
- XT_PUTPAGE(pmp);
-
- /*
- * cmRelocate()
- *
- * copy target data pages to be relocated;
- *
- * data extent must start at page boundary and
- * multiple of page size (except the last data extent);
- * read in each page of the source data extent into cbuf,
- * update the cbuf extent descriptor of the page to be
- * homeward bound to new dst data extent
- * copy the data from the old extent to new extent.
- * copy is essential for compressed files to avoid problems
- * that can arise if there was a change in compression
- * algorithms.
- * it is a good strategy because it may disrupt cache
- * policy to keep the pages in memory afterwards.
- */
- offset = xoff << JFS_SBI(ip->i_sb)->l2bsize;
- assert((offset & CM_OFFSET) == 0);
- nbytes = xlen << JFS_SBI(ip->i_sb)->l2bsize;
- pno = offset >> CM_L2BSIZE;
- npages = (nbytes + (CM_BSIZE - 1)) >> CM_L2BSIZE;
-/*
- npages = ((offset + nbytes - 1) >> CM_L2BSIZE) -
- (offset >> CM_L2BSIZE) + 1;
-*/
- sxaddr = oxaddr;
- dxaddr = nxaddr;
-
- /* process the request one cache buffer at a time */
- for (nbrd = 0; nbrd < nbytes; nbrd += nb,
- offset += nb, pno++, npages--) {
- /* compute page size */
- nb = min(nbytes - nbrd, CM_BSIZE);
-
- /* get the cache buffer of the page */
- if (rc = cmRead(ip, offset, npages, &cp))
- break;
-
- assert(addressPXD(&cp->cm_pxd) == sxaddr);
- assert(!cp->cm_modified);
-
- /* bind buffer with the new extent address */
- nblks = nb >> JFS_IP(ip->i_sb)->l2bsize;
- cmSetXD(ip, cp, pno, dxaddr, nblks);
-
- /* release the cbuf, mark it as modified */
- cmPut(cp, true);
-
- dxaddr += nblks;
- sxaddr += nblks;
- }
-
- /* get back parent page */
- if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0)))
- return rc;
-
- XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
- jfs_info("xtRelocate: target data extent relocated.");
- } else { /* (xtype == XTPAGE) */
-
- /*
- * read in the target xtpage from the source extent;
- */
- XT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
- if (rc) {
- XT_PUTPAGE(pmp);
- return rc;
- }
-
- /*
- * read in sibling pages if any to update sibling pointers;
- */
- rmp = NULL;
- if (p->header.next) {
- nextbn = le64_to_cpu(p->header.next);
- XT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
- if (rc) {
- XT_PUTPAGE(pmp);
- XT_PUTPAGE(mp);
- return (rc);
- }
- }
-
- lmp = NULL;
- if (p->header.prev) {
- prevbn = le64_to_cpu(p->header.prev);
- XT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc);
- if (rc) {
- XT_PUTPAGE(pmp);
- XT_PUTPAGE(mp);
- if (rmp)
- XT_PUTPAGE(rmp);
- return (rc);
- }
- }
-
- /* at this point, all xtpages to be updated are in memory */
-
- /*
- * update sibling pointers of sibling xtpages if any;
- */
- if (lmp) {
- BT_MARK_DIRTY(lmp, ip);
- tlck = txLock(tid, ip, lmp, tlckXTREE | tlckRELINK);
- lp->header.next = cpu_to_le64(nxaddr);
- XT_PUTPAGE(lmp);
- }
-
- if (rmp) {
- BT_MARK_DIRTY(rmp, ip);
- tlck = txLock(tid, ip, rmp, tlckXTREE | tlckRELINK);
- rp->header.prev = cpu_to_le64(nxaddr);
- XT_PUTPAGE(rmp);
- }
-
- /*
- * update the target xtpage to be relocated
- *
- * update the self address of the target page
- * and write to destination extent;
- * redo image covers the whole xtpage since it is new page
- * to the destination extent;
- * update of bmap for the free of source extent
- * of the target xtpage itself:
- * update of bmap for the allocation of destination extent
- * of the target xtpage itself:
- * update of bmap for the extents covered by xad entries in
- * the target xtpage is not necessary since they are not
- * updated;
- * if not committed before this relocation,
- * target page may contain XAD_NEW entries which must
- * be scanned for bmap update (logredo() always
- * scan xtpage REDOPAGE image for bmap update);
- * if committed before this relocation (tlckRELOCATE),
- * scan may be skipped by commit() and logredo();
- */
- BT_MARK_DIRTY(mp, ip);
- /* tlckNEW init xtlck->lwm.offset = XTENTRYSTART; */
- tlck = txLock(tid, ip, mp, tlckXTREE | tlckNEW);
- xtlck = (struct xtlock *) & tlck->lock;
-
- /* update the self address in the xtpage header */
- pxd = &p->header.self;
- PXDaddress(pxd, nxaddr);
-
- /* linelock for the after image of the whole page */
- xtlck->lwm.length =
- le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset;
-
- /* update the buffer extent descriptor of target xtpage */
- xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
- bmSetXD(mp, nxaddr, xsize);
-
- /* unpin the target page to new homeward bound */
- XT_PUTPAGE(mp);
- jfs_info("xtRelocate: target xtpage relocated.");
- }
-
- /*
- * 3. acquire maplock for the source extent to be freed;
- *
- * acquire a maplock saving the src relocated extent address;
- * to free of the extent at commit time;
- */
- out:
- /* if DATAEXT relocation, write a LOG_UPDATEMAP record for
- * free PXD of the source data extent (logredo() will update
- * bmap for free of source data extent), and update bmap for
- * free of the source data extent;
- */
- if (xtype == DATAEXT)
- tlck = txMaplock(tid, ip, tlckMAP);
- /* if XTPAGE relocation, write a LOG_NOREDOPAGE record
- * for the source xtpage (logredo() will init NoRedoPage
- * filter and will also update bmap for free of the source
- * xtpage), and update bmap for free of the source xtpage;
- * N.B. We use tlckMAP instead of tlkcXTREE because there
- * is no buffer associated with this lock since the buffer
- * has been redirected to the target location.
- */
- else /* (xtype == XTPAGE) */
- tlck = txMaplock(tid, ip, tlckMAP | tlckRELOCATE);
-
- pxdlock = (struct pxd_lock *) & tlck->lock;
- pxdlock->flag = mlckFREEPXD;
- PXDaddress(&pxdlock->pxd, oxaddr);
- PXDlength(&pxdlock->pxd, xlen);
- pxdlock->index = 1;
-
- /*
- * 4. update the parent xad entry for relocation;
- *
- * acquire tlck for the parent entry with XAD_NEW as entry
- * update which will write LOG_REDOPAGE and update bmap for
- * allocation of XAD_NEW destination extent;
- */
- jfs_info("xtRelocate: update parent xad entry.");
- BT_MARK_DIRTY(pmp, ip);
- tlck = txLock(tid, ip, pmp, tlckXTREE | tlckGROW);
- xtlck = (struct xtlock *) & tlck->lock;
-
- /* update the XAD with the new destination extent; */
- xad = &pp->xad[index];
- xad->flag |= XAD_NEW;
- XADaddress(xad, nxaddr);
-
- xtlck->lwm.offset = min(index, xtlck->lwm.offset);
- xtlck->lwm.length = le16_to_cpu(pp->header.nextindex) -
- xtlck->lwm.offset;
-
- /* unpin the parent xtpage */
- XT_PUTPAGE(pmp);
-
- return rc;
-}
-
-
-/*
- * xtSearchNode()
- *
- * function: search for the internal xad entry covering specified extent.
- * This function is mainly used by defragfs utility.
- *
- * parameters:
- * ip - file object;
- * xad - extent to find;
- * cmpp - comparison result:
- * btstack - traverse stack;
- * flag - search process flag;
- *
- * returns:
- * btstack contains (bn, index) of search path traversed to the entry.
- * *cmpp is set to result of comparison with the entry returned.
- * the page containing the entry is pinned at exit.
- */
-static int xtSearchNode(struct inode *ip, xad_t * xad, /* required XAD entry */
- int *cmpp, struct btstack * btstack, int flag)
-{
- int rc = 0;
- s64 xoff, xaddr;
- int xlen;
- int cmp = 1; /* init for empty page */
- s64 bn; /* block number */
- struct metapage *mp; /* meta-page buffer */
- xtpage_t *p; /* page */
- int base, index, lim;
- struct btframe *btsp;
- s64 t64;
-
- BT_CLR(btstack);
-
- xoff = offsetXAD(xad);
- xlen = lengthXAD(xad);
- xaddr = addressXAD(xad);
-
- /*
- * search down tree from root:
- *
- * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
- * internal page, child page Pi contains entry with k, Ki <= K < Kj.
- *
- * if entry with search key K is not found
- * internal page search find the entry with largest key Ki
- * less than K which point to the child page to search;
- * leaf page search find the entry with smallest key Kj
- * greater than K so that the returned index is the position of
- * the entry to be shifted right for insertion of new entry.
- * for empty tree, search key is greater than any key of the tree.
- *
- * by convention, root bn = 0.
- */
- for (bn = 0;;) {
- /* get/pin the page to search */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
- if (p->header.flag & BT_LEAF) {
- XT_PUTPAGE(mp);
- return -ESTALE;
- }
-
- lim = le16_to_cpu(p->header.nextindex) - XTENTRYSTART;
-
- /*
- * binary search with search key K on the current page
- */
- for (base = XTENTRYSTART; lim; lim >>= 1) {
- index = base + (lim >> 1);
-
- XT_CMP(cmp, xoff, &p->xad[index], t64);
- if (cmp == 0) {
- /*
- * search hit
- *
- * verify for exact match;
- */
- if (xaddr == addressXAD(&p->xad[index]) &&
- xoff == offsetXAD(&p->xad[index])) {
- *cmpp = cmp;
-
- /* save search result */
- btsp = btstack->top;
- btsp->bn = bn;
- btsp->index = index;
- btsp->mp = mp;
-
- return 0;
- }
-
- /* descend/search its child page */
- goto next;
- }
-
- if (cmp > 0) {
- base = index + 1;
- --lim;
- }
- }
-
- /*
- * search miss - non-leaf page:
- *
- * base is the smallest index with key (Kj) greater than
- * search key (K) and may be zero or maxentry index.
- * if base is non-zero, decrement base by one to get the parent
- * entry of the child page to search.
- */
- index = base ? base - 1 : base;
-
- /*
- * go down to child page
- */
- next:
- /* get the child page block number */
- bn = addressXAD(&p->xad[index]);
-
- /* unpin the parent page */
- XT_PUTPAGE(mp);
- }
-}
-
-
-/*
- * xtRelink()
- *
- * function:
- * link around a freed page.
- *
- * Parameter:
- * int tid,
- * struct inode *ip,
- * xtpage_t *p)
- *
- * returns:
- */
-static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * p)
-{
- int rc = 0;
- struct metapage *mp;
- s64 nextbn, prevbn;
- struct tlock *tlck;
-
- nextbn = le64_to_cpu(p->header.next);
- prevbn = le64_to_cpu(p->header.prev);
-
- /* update prev pointer of the next page */
- if (nextbn != 0) {
- XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
-
- /*
- * acquire a transaction lock on the page;
- *
- * action: update prev pointer;
- */
- BT_MARK_DIRTY(mp, ip);
- tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK);
-
- /* the page may already have been tlock'd */
-
- p->header.prev = cpu_to_le64(prevbn);
-
- XT_PUTPAGE(mp);
- }
-
- /* update next pointer of the previous page */
- if (prevbn != 0) {
- XT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
-
- /*
- * acquire a transaction lock on the page;
- *
- * action: update next pointer;
- */
- BT_MARK_DIRTY(mp, ip);
- tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK);
-
- /* the page may already have been tlock'd */
-
- p->header.next = le64_to_cpu(nextbn);
-
- XT_PUTPAGE(mp);
- }
-
- return 0;
-}
-#endif /* _STILL_TO_PORT */
-
/*
* xtInitRoot()
@@ -3059,7 +2113,7 @@ static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * p)
*/
void xtInitRoot(tid_t tid, struct inode *ip)
{
- xtpage_t *p;
+ xtroot_t *p;
/*
* acquire a transaction lock on the root
@@ -3148,7 +2202,6 @@ void xtInitRoot(tid_t tid, struct inode *ip)
*/
s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
{
- int rc = 0;
s64 teof;
struct metapage *mp;
xtpage_t *p;
@@ -3229,9 +2282,9 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
* first access of each page:
*/
getPage:
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
/* process entries backward from last index */
index = le16_to_cpu(p->header.nextindex) - 1;
@@ -3467,9 +2520,9 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
/* get back the parent page */
bn = parent->bn;
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
index = parent->index;
@@ -3752,9 +2805,9 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
* first access of each page:
*/
getPage:
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
/* process entries backward from last index */
index = le16_to_cpu(p->header.nextindex) - 1;
@@ -3797,9 +2850,9 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
/* get back the parent page */
bn = parent->bn;
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
+ p = xt_getpage(ip, bn, &mp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
index = parent->index;
diff --git a/fs/jfs/jfs_xtree.h b/fs/jfs/jfs_xtree.h
index 5f51be8596b3..0f6cf5a1ce75 100644
--- a/fs/jfs/jfs_xtree.h
+++ b/fs/jfs/jfs_xtree.h
@@ -65,24 +65,33 @@ struct xadlist {
#define XTPAGEMAXSLOT 256
#define XTENTRYSTART 2
-/*
- * xtree page:
- */
-typedef union {
- struct xtheader {
- __le64 next; /* 8: */
- __le64 prev; /* 8: */
+struct xtheader {
+ __le64 next; /* 8: */
+ __le64 prev; /* 8: */
- u8 flag; /* 1: */
- u8 rsrvd1; /* 1: */
- __le16 nextindex; /* 2: next index = number of entries */
- __le16 maxentry; /* 2: max number of entries */
- __le16 rsrvd2; /* 2: */
+ u8 flag; /* 1: */
+ u8 rsrvd1; /* 1: */
+ __le16 nextindex; /* 2: next index = number of entries */
+ __le16 maxentry; /* 2: max number of entries */
+ __le16 rsrvd2; /* 2: */
- pxd_t self; /* 8: self */
- } header; /* (32) */
+ pxd_t self; /* 8: self */
+};
+/*
+ * xtree root (in inode):
+ */
+typedef union {
+ struct xtheader header;
xad_t xad[XTROOTMAXSLOT]; /* 16 * maxentry: xad array */
+} xtroot_t;
+
+/*
+ * xtree page:
+ */
+typedef union {
+ struct xtheader header;
+ xad_t xad[XTPAGEMAXSLOT]; /* 16 * maxentry: xad array */
} xtpage_t;
/*
@@ -95,17 +104,9 @@ extern int xtInsert(tid_t tid, struct inode *ip,
int xflag, s64 xoff, int xlen, s64 * xaddrp, int flag);
extern int xtExtend(tid_t tid, struct inode *ip, s64 xoff, int xlen,
int flag);
-#ifdef _NOTYET
-extern int xtTailgate(tid_t tid, struct inode *ip,
- s64 xoff, int xlen, s64 xaddr, int flag);
-#endif
extern int xtUpdate(tid_t tid, struct inode *ip, struct xad *nxad);
-extern int xtDelete(tid_t tid, struct inode *ip, s64 xoff, int xlen,
- int flag);
extern s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int type);
extern s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size);
-extern int xtRelocate(tid_t tid, struct inode *ip,
- xad_t * oxad, s64 nxaddr, int xtype);
extern int xtAppend(tid_t tid,
struct inode *ip, int xflag, s64 xoff, int maxblocks,
int *xlenp, s64 * xaddrp, int flag);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 9db4f5789c0e..65a218eba8fa 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -59,7 +59,7 @@ static inline void free_ea_wmap(struct inode *inode)
* RETURN: Errors from subroutines
*
*/
-static int jfs_create(struct user_namespace *mnt_userns, struct inode *dip,
+static int jfs_create(struct mnt_idmap *idmap, struct inode *dip,
struct dentry *dentry, umode_t mode, bool excl)
{
int rc = 0;
@@ -149,7 +149,7 @@ static int jfs_create(struct user_namespace *mnt_userns, struct inode *dip,
mark_inode_dirty(ip);
- dip->i_ctime = dip->i_mtime = current_time(dip);
+ inode_set_mtime_to_ts(dip, inode_set_ctime_current(dip));
mark_inode_dirty(dip);
@@ -187,13 +187,13 @@ static int jfs_create(struct user_namespace *mnt_userns, struct inode *dip,
* dentry - dentry of child directory
* mode - create mode (rwxrwxrwx).
*
- * RETURN: Errors from subroutines
+ * RETURN: ERR_PTR() of errors from subroutines.
*
* note:
* EACCES: user needs search+write permission on the parent directory
*/
-static int jfs_mkdir(struct user_namespace *mnt_userns, struct inode *dip,
- struct dentry *dentry, umode_t mode)
+static struct dentry *jfs_mkdir(struct mnt_idmap *idmap, struct inode *dip,
+ struct dentry *dentry, umode_t mode)
{
int rc = 0;
tid_t tid; /* transaction id */
@@ -284,7 +284,7 @@ static int jfs_mkdir(struct user_namespace *mnt_userns, struct inode *dip,
/* update parent directory inode */
inc_nlink(dip); /* for '..' from child directory */
- dip->i_ctime = dip->i_mtime = current_time(dip);
+ inode_set_mtime_to_ts(dip, inode_set_ctime_current(dip));
mark_inode_dirty(dip);
rc = txCommit(tid, 2, &iplist[0], 0);
@@ -308,7 +308,7 @@ static int jfs_mkdir(struct user_namespace *mnt_userns, struct inode *dip,
out1:
jfs_info("jfs_mkdir: rc:%d", rc);
- return rc;
+ return ERR_PTR(rc);
}
/*
@@ -390,7 +390,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
/* update parent directory's link count corresponding
* to ".." entry of the target directory deleted
*/
- dip->i_ctime = dip->i_mtime = current_time(dip);
+ inode_set_mtime_to_ts(dip, inode_set_ctime_current(dip));
inode_dec_link_count(dip);
/*
@@ -512,7 +512,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
ASSERT(ip->i_nlink);
- ip->i_ctime = dip->i_ctime = dip->i_mtime = current_time(ip);
+ inode_set_mtime_to_ts(dip,
+ inode_set_ctime_to_ts(dip, inode_set_ctime_current(ip)));
mark_inode_dirty(dip);
/* update target's inode */
@@ -799,6 +800,11 @@ static int jfs_link(struct dentry *old_dentry,
if (rc)
goto out;
+ if (isReadOnly(ip)) {
+ jfs_error(ip->i_sb, "read-only filesystem\n");
+ return -EROFS;
+ }
+
tid = txBegin(ip->i_sb, 0);
mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
@@ -822,8 +828,8 @@ static int jfs_link(struct dentry *old_dentry,
/* update object inode */
inc_nlink(ip); /* for new link */
- ip->i_ctime = current_time(ip);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ inode_set_ctime_current(ip);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
ihold(ip);
@@ -869,16 +875,16 @@ static int jfs_link(struct dentry *old_dentry,
* an intermediate result whose length exceeds PATH_MAX [XPG4.2]
*/
-static int jfs_symlink(struct user_namespace *mnt_userns, struct inode *dip,
+static int jfs_symlink(struct mnt_idmap *idmap, struct inode *dip,
struct dentry *dentry, const char *name)
{
int rc;
tid_t tid;
ino_t ino = 0;
struct component_name dname;
- int ssize; /* source pathname size */
+ u32 ssize; /* source pathname size */
struct btstack btstack;
- struct inode *ip = d_inode(dentry);
+ struct inode *ip;
s64 xlen = 0;
int bmask = 0, xsize;
s64 xaddr;
@@ -946,7 +952,7 @@ static int jfs_symlink(struct user_namespace *mnt_userns, struct inode *dip,
if (ssize <= IDATASIZE) {
ip->i_op = &jfs_fast_symlink_inode_operations;
- ip->i_link = JFS_IP(ip)->i_inline;
+ ip->i_link = JFS_IP(ip)->i_inline_all;
memcpy(ip->i_link, name, ssize);
ip->i_size = ssize - 1;
@@ -957,7 +963,7 @@ static int jfs_symlink(struct user_namespace *mnt_userns, struct inode *dip,
if (ssize > sizeof (JFS_IP(ip)->i_inline))
JFS_IP(ip)->mode2 &= ~INLINEEA;
- jfs_info("jfs_symlink: fast symlink added ssize:%d name:%s ",
+ jfs_info("jfs_symlink: fast symlink added ssize:%u name:%s ",
ssize, name);
}
/*
@@ -987,7 +993,7 @@ static int jfs_symlink(struct user_namespace *mnt_userns, struct inode *dip,
ip->i_size = ssize - 1;
while (ssize) {
/* This is kind of silly since PATH_MAX == 4K */
- int copy_size = min(ssize, PSIZE);
+ u32 copy_size = min_t(u32, ssize, PSIZE);
mp = get_metapage(ip, xaddr, PSIZE, 1);
@@ -1023,7 +1029,7 @@ static int jfs_symlink(struct user_namespace *mnt_userns, struct inode *dip,
mark_inode_dirty(ip);
- dip->i_ctime = dip->i_mtime = current_time(dip);
+ inode_set_mtime_to_ts(dip, inode_set_ctime_current(dip));
mark_inode_dirty(dip);
/*
* commit update of parent directory and link object
@@ -1059,7 +1065,7 @@ static int jfs_symlink(struct user_namespace *mnt_userns, struct inode *dip,
*
* FUNCTION: rename a file or directory
*/
-static int jfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+static int jfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
@@ -1200,7 +1206,7 @@ static int jfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
tblk->xflag |= COMMIT_DELETE;
tblk->u.ip = new_ip;
} else {
- new_ip->i_ctime = current_time(new_ip);
+ inode_set_ctime_current(new_ip);
mark_inode_dirty(new_ip);
}
} else {
@@ -1263,10 +1269,10 @@ static int jfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
/*
* Update ctime on changed/moved inodes & mark dirty
*/
- old_ip->i_ctime = current_time(old_ip);
+ inode_set_ctime_current(old_ip);
mark_inode_dirty(old_ip);
- new_dir->i_ctime = new_dir->i_mtime = current_time(new_dir);
+ inode_set_mtime_to_ts(new_dir, inode_set_ctime_current(new_dir));
mark_inode_dirty(new_dir);
/* Build list of inodes modified by this transaction */
@@ -1278,7 +1284,8 @@ static int jfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
if (old_dir != new_dir) {
iplist[ipcount++] = new_dir;
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ inode_set_mtime_to_ts(old_dir,
+ inode_set_ctime_current(old_dir));
mark_inode_dirty(old_dir);
}
@@ -1345,7 +1352,7 @@ static int jfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
*
* FUNCTION: Create a special file (device)
*/
-static int jfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+static int jfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct jfs_inode_info *jfs_ip;
@@ -1411,7 +1418,7 @@ static int jfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
mark_inode_dirty(ip);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
@@ -1525,14 +1532,15 @@ const struct inode_operations jfs_dir_inode_operations = {
.fileattr_get = jfs_fileattr_get,
.fileattr_set = jfs_fileattr_set,
#ifdef CONFIG_JFS_POSIX_ACL
- .get_acl = jfs_get_acl,
+ .get_inode_acl = jfs_get_acl,
.set_acl = jfs_set_acl,
#endif
};
+WRAP_DIR_ITER(jfs_readdir) // FIXME!
const struct file_operations jfs_dir_operations = {
.read = generic_read_dir,
- .iterate = jfs_readdir,
+ .iterate_shared = shared_jfs_readdir,
.fsync = jfs_fsync,
.unlocked_ioctl = jfs_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -1568,7 +1576,8 @@ out:
return result;
}
-static int jfs_ci_revalidate(struct dentry *dentry, unsigned int flags)
+static int jfs_ci_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
/*
* This is not negative dentry. Always valid.
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 24cbc9946e01..3cfb86c5a36e 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -6,11 +6,11 @@
#include <linux/fs.h>
#include <linux/module.h>
-#include <linux/parser.h>
#include <linux/completion.h>
#include <linux/vfs.h>
#include <linux/quotaops.h>
-#include <linux/mount.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/moduleparam.h>
#include <linux/kthread.h>
#include <linux/posix_acl.h>
@@ -102,7 +102,7 @@ static struct inode *jfs_alloc_inode(struct super_block *sb)
{
struct jfs_inode_info *jfs_inode;
- jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
+ jfs_inode = alloc_inode_sb(sb, jfs_inode_cachep, GFP_NOFS);
if (!jfs_inode)
return NULL;
#ifdef CONFIG_QUOTA
@@ -210,244 +210,195 @@ enum {
Opt_discard, Opt_nodiscard, Opt_discard_minblk
};
-static const match_table_t tokens = {
- {Opt_integrity, "integrity"},
- {Opt_nointegrity, "nointegrity"},
- {Opt_iocharset, "iocharset=%s"},
- {Opt_resize, "resize=%u"},
- {Opt_resize_nosize, "resize"},
- {Opt_errors, "errors=%s"},
- {Opt_ignore, "noquota"},
- {Opt_quota, "quota"},
- {Opt_usrquota, "usrquota"},
- {Opt_grpquota, "grpquota"},
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_umask, "umask=%u"},
- {Opt_discard, "discard"},
- {Opt_nodiscard, "nodiscard"},
- {Opt_discard_minblk, "discard=%u"},
- {Opt_err, NULL}
+static const struct constant_table jfs_param_errors[] = {
+ {"continue", JFS_ERR_CONTINUE},
+ {"remount-ro", JFS_ERR_REMOUNT_RO},
+ {"panic", JFS_ERR_PANIC},
+ {}
};
-static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
- int *flag)
-{
- void *nls_map = (void *)-1; /* -1: no change; NULL: none */
- char *p;
- struct jfs_sb_info *sbi = JFS_SBI(sb);
+static const struct fs_parameter_spec jfs_param_spec[] = {
+ fsparam_flag_no ("integrity", Opt_integrity),
+ fsparam_string ("iocharset", Opt_iocharset),
+ fsparam_u64 ("resize", Opt_resize),
+ fsparam_flag ("resize", Opt_resize_nosize),
+ fsparam_enum ("errors", Opt_errors, jfs_param_errors),
+ fsparam_flag ("quota", Opt_quota),
+ fsparam_flag ("noquota", Opt_ignore),
+ fsparam_flag ("usrquota", Opt_usrquota),
+ fsparam_flag ("grpquota", Opt_grpquota),
+ fsparam_uid ("uid", Opt_uid),
+ fsparam_gid ("gid", Opt_gid),
+ fsparam_u32oct ("umask", Opt_umask),
+ fsparam_flag ("discard", Opt_discard),
+ fsparam_u32 ("discard", Opt_discard_minblk),
+ fsparam_flag ("nodiscard", Opt_nodiscard),
+ {}
+};
- *newLVSize = 0;
-
- if (!options)
- return 1;
-
- while ((p = strsep(&options, ",")) != NULL) {
- substring_t args[MAX_OPT_ARGS];
- int token;
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_integrity:
- *flag &= ~JFS_NOINTEGRITY;
- break;
- case Opt_nointegrity:
- *flag |= JFS_NOINTEGRITY;
- break;
- case Opt_ignore:
- /* Silently ignore the quota options */
- /* Don't do anything ;-) */
- break;
- case Opt_iocharset:
- if (nls_map && nls_map != (void *) -1)
- unload_nls(nls_map);
- if (!strcmp(args[0].from, "none"))
- nls_map = NULL;
- else {
- nls_map = load_nls(args[0].from);
- if (!nls_map) {
- pr_err("JFS: charset not found\n");
- goto cleanup;
- }
- }
- break;
- case Opt_resize:
- {
- char *resize = args[0].from;
- int rc = kstrtoll(resize, 0, newLVSize);
+struct jfs_context {
+ int flag;
+ kuid_t uid;
+ kgid_t gid;
+ uint umask;
+ uint minblks_trim;
+ void *nls_map;
+ bool resize;
+ s64 newLVSize;
+};
- if (rc)
- goto cleanup;
- break;
- }
- case Opt_resize_nosize:
- {
- *newLVSize = sb_bdev_nr_blocks(sb);
- if (*newLVSize == 0)
- pr_err("JFS: Cannot determine volume size\n");
- break;
+static int jfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct jfs_context *ctx = fc->fs_private;
+ int reconfigure = (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE);
+ struct fs_parse_result result;
+ struct nls_table *nls_map;
+ int opt;
+
+ opt = fs_parse(fc, jfs_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_integrity:
+ if (result.negated)
+ ctx->flag |= JFS_NOINTEGRITY;
+ else
+ ctx->flag &= ~JFS_NOINTEGRITY;
+ break;
+ case Opt_ignore:
+ /* Silently ignore the quota options */
+ /* Don't do anything ;-) */
+ break;
+ case Opt_iocharset:
+ if (ctx->nls_map && ctx->nls_map != (void *) -1) {
+ unload_nls(ctx->nls_map);
+ ctx->nls_map = NULL;
}
- case Opt_errors:
- {
- char *errors = args[0].from;
- if (!errors || !*errors)
- goto cleanup;
- if (!strcmp(errors, "continue")) {
- *flag &= ~JFS_ERR_REMOUNT_RO;
- *flag &= ~JFS_ERR_PANIC;
- *flag |= JFS_ERR_CONTINUE;
- } else if (!strcmp(errors, "remount-ro")) {
- *flag &= ~JFS_ERR_CONTINUE;
- *flag &= ~JFS_ERR_PANIC;
- *flag |= JFS_ERR_REMOUNT_RO;
- } else if (!strcmp(errors, "panic")) {
- *flag &= ~JFS_ERR_CONTINUE;
- *flag &= ~JFS_ERR_REMOUNT_RO;
- *flag |= JFS_ERR_PANIC;
- } else {
- pr_err("JFS: %s is an invalid error handler\n",
- errors);
- goto cleanup;
+ if (!strcmp(param->string, "none"))
+ ctx->nls_map = NULL;
+ else {
+ nls_map = load_nls(param->string);
+ if (!nls_map) {
+ pr_err("JFS: charset not found\n");
+ return -EINVAL;
}
- break;
+ ctx->nls_map = nls_map;
}
+ break;
+ case Opt_resize:
+ if (!reconfigure)
+ return -EINVAL;
+ ctx->resize = true;
+ ctx->newLVSize = result.uint_64;
+ break;
+ case Opt_resize_nosize:
+ if (!reconfigure)
+ return -EINVAL;
+ ctx->resize = true;
+ break;
+ case Opt_errors:
+ ctx->flag &= ~JFS_ERR_MASK;
+ ctx->flag |= result.uint_32;
+ break;
#ifdef CONFIG_QUOTA
- case Opt_quota:
- case Opt_usrquota:
- *flag |= JFS_USRQUOTA;
- break;
- case Opt_grpquota:
- *flag |= JFS_GRPQUOTA;
- break;
+ case Opt_quota:
+ case Opt_usrquota:
+ ctx->flag |= JFS_USRQUOTA;
+ break;
+ case Opt_grpquota:
+ ctx->flag |= JFS_GRPQUOTA;
+ break;
#else
- case Opt_usrquota:
- case Opt_grpquota:
- case Opt_quota:
- pr_err("JFS: quota operations not supported\n");
- break;
+ case Opt_usrquota:
+ case Opt_grpquota:
+ case Opt_quota:
+ pr_err("JFS: quota operations not supported\n");
+ break;
#endif
- case Opt_uid:
- {
- char *uid = args[0].from;
- uid_t val;
- int rc = kstrtouint(uid, 0, &val);
-
- if (rc)
- goto cleanup;
- sbi->uid = make_kuid(current_user_ns(), val);
- if (!uid_valid(sbi->uid))
- goto cleanup;
- break;
- }
-
- case Opt_gid:
- {
- char *gid = args[0].from;
- gid_t val;
- int rc = kstrtouint(gid, 0, &val);
-
- if (rc)
- goto cleanup;
- sbi->gid = make_kgid(current_user_ns(), val);
- if (!gid_valid(sbi->gid))
- goto cleanup;
- break;
+ case Opt_uid:
+ ctx->uid = result.uid;
+ break;
+
+ case Opt_gid:
+ ctx->gid = result.gid;
+ break;
+
+ case Opt_umask:
+ if (result.uint_32 & ~0777) {
+ pr_err("JFS: Invalid value of umask\n");
+ return -EINVAL;
}
+ ctx->umask = result.uint_32;
+ break;
- case Opt_umask:
- {
- char *umask = args[0].from;
- int rc = kstrtouint(umask, 8, &sbi->umask);
-
- if (rc)
- goto cleanup;
- if (sbi->umask & ~0777) {
- pr_err("JFS: Invalid value of umask\n");
- goto cleanup;
- }
- break;
- }
-
- case Opt_discard:
- {
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
- /* if set to 1, even copying files will cause
- * trimming :O
- * -> user has more control over the online trimming
- */
- sbi->minblks_trim = 64;
- if (blk_queue_discard(q))
- *flag |= JFS_DISCARD;
- else
- pr_err("JFS: discard option not supported on device\n");
- break;
- }
+ case Opt_discard:
+ /* if set to 1, even copying files will cause
+ * trimming :O
+ * -> user has more control over the online trimming
+ */
+ ctx->minblks_trim = 64;
+ ctx->flag |= JFS_DISCARD;
+ break;
- case Opt_nodiscard:
- *flag &= ~JFS_DISCARD;
- break;
-
- case Opt_discard_minblk:
- {
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
- char *minblks_trim = args[0].from;
- int rc;
- if (blk_queue_discard(q)) {
- *flag |= JFS_DISCARD;
- rc = kstrtouint(minblks_trim, 0,
- &sbi->minblks_trim);
- if (rc)
- goto cleanup;
- } else
- pr_err("JFS: discard option not supported on device\n");
- break;
- }
+ case Opt_nodiscard:
+ ctx->flag &= ~JFS_DISCARD;
+ break;
- default:
- printk("jfs: Unrecognized mount option \"%s\" or missing value\n",
- p);
- goto cleanup;
- }
- }
+ case Opt_discard_minblk:
+ ctx->minblks_trim = result.uint_32;
+ ctx->flag |= JFS_DISCARD;
+ break;
- if (nls_map != (void *) -1) {
- /* Discard old (if remount) */
- unload_nls(sbi->nls_tab);
- sbi->nls_tab = nls_map;
+ default:
+ return -EINVAL;
}
- return 1;
-cleanup:
- if (nls_map && nls_map != (void *) -1)
- unload_nls(nls_map);
return 0;
}
-static int jfs_remount(struct super_block *sb, int *flags, char *data)
+static int jfs_reconfigure(struct fs_context *fc)
{
- s64 newLVSize = 0;
+ struct jfs_context *ctx = fc->fs_private;
+ struct super_block *sb = fc->root->d_sb;
+ int readonly = fc->sb_flags & SB_RDONLY;
int rc = 0;
- int flag = JFS_SBI(sb)->flag;
+ int flag = ctx->flag;
int ret;
sync_filesystem(sb);
- if (!parse_options(data, sb, &newLVSize, &flag))
- return -EINVAL;
- if (newLVSize) {
+ /* Transfer results of parsing to the sbi */
+ JFS_SBI(sb)->flag = ctx->flag;
+ JFS_SBI(sb)->uid = ctx->uid;
+ JFS_SBI(sb)->gid = ctx->gid;
+ JFS_SBI(sb)->umask = ctx->umask;
+ JFS_SBI(sb)->minblks_trim = ctx->minblks_trim;
+ if (ctx->nls_map != (void *) -1) {
+ unload_nls(JFS_SBI(sb)->nls_tab);
+ JFS_SBI(sb)->nls_tab = ctx->nls_map;
+ }
+ ctx->nls_map = NULL;
+
+ if (ctx->resize) {
if (sb_rdonly(sb)) {
pr_err("JFS: resize requires volume to be mounted read-write\n");
return -EROFS;
}
- rc = jfs_extendfs(sb, newLVSize, 0);
+
+ if (!ctx->newLVSize) {
+ ctx->newLVSize = sb_bdev_nr_blocks(sb);
+ if (ctx->newLVSize == 0)
+ pr_err("JFS: Cannot determine volume size\n");
+ }
+
+ rc = jfs_extendfs(sb, ctx->newLVSize, 0);
if (rc)
return rc;
}
- if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
+ if (sb_rdonly(sb) && !readonly) {
/*
* Invalidate any previously read metadata. fsck may have
* changed the on-disk data since we mounted r/o
@@ -463,7 +414,7 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
dquot_resume(sb, -1);
return ret;
}
- if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
+ if (!sb_rdonly(sb) && readonly) {
rc = dquot_suspend(sb, -1);
if (rc < 0)
return rc;
@@ -471,7 +422,7 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
JFS_SBI(sb)->flag = flag;
return rc;
}
- if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
+ if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY)) {
if (!sb_rdonly(sb)) {
rc = jfs_umount_rw(sb);
if (rc)
@@ -481,18 +432,20 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
ret = jfs_mount_rw(sb, 1);
return ret;
}
+ }
JFS_SBI(sb)->flag = flag;
return 0;
}
-static int jfs_fill_super(struct super_block *sb, void *data, int silent)
+static int jfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct jfs_context *ctx = fc->fs_private;
+ int silent = fc->sb_flags & SB_SILENT;
struct jfs_sb_info *sbi;
struct inode *inode;
int rc;
- s64 newLVSize = 0;
- int flag, ret = -EINVAL;
+ int ret = -EINVAL;
jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
@@ -505,24 +458,34 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_time_min = 0;
sb->s_time_max = U32_MAX;
sbi->sb = sb;
- sbi->uid = INVALID_UID;
- sbi->gid = INVALID_GID;
- sbi->umask = -1;
-
- /* initialize the mount flag and determine the default error handler */
- flag = JFS_ERR_REMOUNT_RO;
- if (!parse_options((char *) data, sb, &newLVSize, &flag))
- goto out_kfree;
- sbi->flag = flag;
+ /* Transfer results of parsing to the sbi */
+ sbi->flag = ctx->flag;
+ sbi->uid = ctx->uid;
+ sbi->gid = ctx->gid;
+ sbi->umask = ctx->umask;
+ if (ctx->nls_map != (void *) -1) {
+ unload_nls(sbi->nls_tab);
+ sbi->nls_tab = ctx->nls_map;
+ }
+ ctx->nls_map = NULL;
+
+ if (sbi->flag & JFS_DISCARD) {
+ if (!bdev_max_discard_sectors(sb->s_bdev)) {
+ pr_err("JFS: discard option not supported on device\n");
+ sbi->flag &= ~JFS_DISCARD;
+ } else {
+ sbi->minblks_trim = ctx->minblks_trim;
+ }
+ }
#ifdef CONFIG_JFS_POSIX_ACL
sb->s_flags |= SB_POSIXACL;
#endif
- if (newLVSize) {
+ if (ctx->resize) {
pr_err("resize option for remount only\n");
- goto out_kfree;
+ goto out_unload;
}
/*
@@ -579,7 +542,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_magic = JFS_SUPER_MAGIC;
if (sbi->mntflag & JFS_OS2)
- sb->s_d_op = &jfs_ci_dentry_operations;
+ set_default_d_op(sb, &jfs_ci_dentry_operations);
inode = jfs_iget(sb, ROOT_I);
if (IS_ERR(inode)) {
@@ -612,7 +575,6 @@ out_mount_failed:
sbi->direct_inode = NULL;
out_unload:
unload_nls(sbi->nls_tab);
-out_kfree:
kfree(sbi);
return ret;
}
@@ -668,10 +630,9 @@ out:
return rc;
}
-static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int jfs_get_tree(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super);
+ return get_tree_bdev(fc, jfs_fill_super);
}
static int jfs_sync_fs(struct super_block *sb, int wait)
@@ -749,8 +710,7 @@ static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
len = i_size-off;
toread = len;
while (toread > 0) {
- tocopy = sb->s_blocksize - offset < toread ?
- sb->s_blocksize - offset : toread;
+ tocopy = min_t(size_t, sb->s_blocksize - offset, toread);
tmp_bh.b_state = 0;
tmp_bh.b_size = i_blocksize(inode);
@@ -789,8 +749,7 @@ static ssize_t jfs_quota_write(struct super_block *sb, int type,
inode_lock(inode);
while (towrite > 0) {
- tocopy = sb->s_blocksize - offset < towrite ?
- sb->s_blocksize - offset : towrite;
+ tocopy = min_t(size_t, sb->s_blocksize - offset, towrite);
tmp_bh.b_state = 0;
tmp_bh.b_size = i_blocksize(inode);
@@ -807,7 +766,7 @@ static ssize_t jfs_quota_write(struct super_block *sb, int type,
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, tocopy);
- flush_dcache_page(bh->b_page);
+ flush_dcache_folio(bh->b_folio);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unlock_buffer(bh);
@@ -824,13 +783,13 @@ out:
}
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
inode_unlock(inode);
return len - towrite;
}
-static struct dquot **jfs_get_dquots(struct inode *inode)
+static struct dquot __rcu **jfs_get_dquots(struct inode *inode)
{
return JFS_IP(inode)->i_dquot;
}
@@ -892,7 +851,6 @@ static const struct super_operations jfs_super_operations = {
.freeze_fs = jfs_freeze,
.unfreeze_fs = jfs_unfreeze,
.statfs = jfs_statfs,
- .remount_fs = jfs_remount,
.show_options = jfs_show_options,
#ifdef CONFIG_QUOTA
.quota_read = jfs_quota_read,
@@ -902,17 +860,77 @@ static const struct super_operations jfs_super_operations = {
};
static const struct export_operations jfs_export_operations = {
+ .encode_fh = generic_encode_ino32_fh,
.fh_to_dentry = jfs_fh_to_dentry,
.fh_to_parent = jfs_fh_to_parent,
.get_parent = jfs_get_parent,
};
+static void jfs_init_options(struct fs_context *fc, struct jfs_context *ctx)
+{
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ struct super_block *sb = fc->root->d_sb;
+
+ /* Copy over current option values and mount flags */
+ ctx->uid = JFS_SBI(sb)->uid;
+ ctx->gid = JFS_SBI(sb)->gid;
+ ctx->umask = JFS_SBI(sb)->umask;
+ ctx->nls_map = (void *)-1;
+ ctx->minblks_trim = JFS_SBI(sb)->minblks_trim;
+ ctx->flag = JFS_SBI(sb)->flag;
+
+ } else {
+ /*
+ * Initialize the mount flag and determine the default
+ * error handler
+ */
+ ctx->flag = JFS_ERR_REMOUNT_RO;
+ ctx->uid = INVALID_UID;
+ ctx->gid = INVALID_GID;
+ ctx->umask = -1;
+ ctx->nls_map = (void *)-1;
+ }
+}
+
+static void jfs_free_fc(struct fs_context *fc)
+{
+ struct jfs_context *ctx = fc->fs_private;
+
+ if (ctx->nls_map != (void *) -1)
+ unload_nls(ctx->nls_map);
+ kfree(ctx);
+}
+
+static const struct fs_context_operations jfs_context_ops = {
+ .parse_param = jfs_parse_param,
+ .get_tree = jfs_get_tree,
+ .reconfigure = jfs_reconfigure,
+ .free = jfs_free_fc,
+};
+
+static int jfs_init_fs_context(struct fs_context *fc)
+{
+ struct jfs_context *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ jfs_init_options(fc, ctx);
+
+ fc->fs_private = ctx;
+ fc->ops = &jfs_context_ops;
+
+ return 0;
+}
+
static struct file_system_type jfs_fs_type = {
.owner = THIS_MODULE,
.name = "jfs",
- .mount = jfs_do_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = jfs_init_fs_context,
+ .parameters = jfs_param_spec,
};
MODULE_ALIAS_FS("jfs");
@@ -937,7 +955,7 @@ static int __init init_jfs_fs(void)
jfs_inode_cachep =
kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info),
- 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
+ 0, SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT,
offsetof(struct jfs_inode_info, i_inline_all),
sizeof_field(struct jfs_inode_info, i_inline_all),
init_once);
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index f9273f6901c8..11d7f74d207b 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -434,6 +434,8 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
int rc;
int quota_allocation = 0;
+ memset(&ea_buf->new_ea, 0, sizeof(ea_buf->new_ea));
+
/* When fsck.jfs clears a bad ea, it doesn't clear the size */
if (ji->ea.flag == 0)
ea_size = 0;
@@ -557,9 +559,16 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
size_check:
if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
- printk(KERN_ERR "ea_get: invalid extended attribute\n");
- print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
- ea_buf->xattr, ea_size, 1);
+ if (unlikely(EALIST_SIZE(ea_buf->xattr) > INT_MAX)) {
+ printk(KERN_ERR "ea_get: extended attribute size too large: %u > INT_MAX\n",
+ EALIST_SIZE(ea_buf->xattr));
+ } else {
+ int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr));
+
+ printk(KERN_ERR "ea_get: invalid extended attribute\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
+ ea_buf->xattr, size, 1);
+ }
ea_release(inode, ea_buf);
rc = -EIO;
goto clean_up;
@@ -647,7 +656,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
if (old_blocks)
dquot_free_block(inode, old_blocks);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
return 0;
}
@@ -795,7 +804,7 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
size_t buf_size)
{
struct jfs_ea_list *ealist;
- struct jfs_ea *ea;
+ struct jfs_ea *ea, *ealist_end;
struct ea_buffer ea_buf;
int xattr_size;
ssize_t size;
@@ -815,9 +824,16 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
goto not_found;
ealist = (struct jfs_ea_list *) ea_buf.xattr;
+ ealist_end = END_EALIST(ealist);
/* Find the named attribute */
- for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
+ for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) {
+ if (unlikely(ea + 1 > ealist_end) ||
+ unlikely(NEXT_EA(ea) > ealist_end)) {
+ size = -EUCLEAN;
+ goto release;
+ }
+
if ((namelen == ea->namelen) &&
memcmp(name, ea->name, namelen) == 0) {
/* Found it */
@@ -832,6 +848,7 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
memcpy(data, value, size);
goto release;
}
+ }
not_found:
size = -ENODATA;
release:
@@ -859,7 +876,7 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
ssize_t size = 0;
int xattr_size;
struct jfs_ea_list *ealist;
- struct jfs_ea *ea;
+ struct jfs_ea *ea, *ealist_end;
struct ea_buffer ea_buf;
down_read(&JFS_IP(inode)->xattr_sem);
@@ -874,9 +891,16 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
goto release;
ealist = (struct jfs_ea_list *) ea_buf.xattr;
+ ealist_end = END_EALIST(ealist);
/* compute required size of list */
- for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
+ for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) {
+ if (unlikely(ea + 1 > ealist_end) ||
+ unlikely(NEXT_EA(ea) > ealist_end)) {
+ size = -EUCLEAN;
+ goto release;
+ }
+
if (can_list(ea))
size += name_size(ea) + 1;
}
@@ -932,7 +956,7 @@ static int jfs_xattr_get(const struct xattr_handler *handler,
}
static int jfs_xattr_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -951,7 +975,7 @@ static int jfs_xattr_get_os2(const struct xattr_handler *handler,
}
static int jfs_xattr_set_os2(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -985,11 +1009,7 @@ static const struct xattr_handler jfs_trusted_xattr_handler = {
.set = jfs_xattr_set,
};
-const struct xattr_handler *jfs_xattr_handlers[] = {
-#ifdef CONFIG_JFS_POSIX_ACL
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
-#endif
+const struct xattr_handler * const jfs_xattr_handlers[] = {
&jfs_os2_xattr_handler,
&jfs_user_xattr_handler,
&jfs_security_xattr_handler,
diff --git a/fs/kernel_read_file.c b/fs/kernel_read_file.c
index 1b07550485b9..de32c95d823d 100644
--- a/fs/kernel_read_file.c
+++ b/fs/kernel_read_file.c
@@ -8,16 +8,16 @@
/**
* kernel_read_file() - read file contents into a kernel buffer
*
- * @file file to read from
- * @offset where to start reading from (see below).
- * @buf pointer to a "void *" buffer for reading into (if
+ * @file: file to read from
+ * @offset: where to start reading from (see below).
+ * @buf: pointer to a "void *" buffer for reading into (if
* *@buf is NULL, a buffer will be allocated, and
* @buf_size will be ignored)
- * @buf_size size of buf, if already allocated. If @buf not
+ * @buf_size: size of buf, if already allocated. If @buf not
* allocated, this is the largest size to allocate.
- * @file_size if non-NULL, the full size of @file will be
+ * @file_size: if non-NULL, the full size of @file will be
* written here.
- * @id the kernel_read_file_id identifying the type of
+ * @id: the kernel_read_file_id identifying the type of
* file contents being read (for LSMs to examine)
*
* @offset must be 0 unless both @buf and @file_size are non-NULL
@@ -29,15 +29,15 @@
* change between calls to kernel_read_file().
*
* Returns number of bytes read (no single read will be bigger
- * than INT_MAX), or negative on error.
+ * than SSIZE_MAX), or negative on error.
*
*/
-int kernel_read_file(struct file *file, loff_t offset, void **buf,
- size_t buf_size, size_t *file_size,
- enum kernel_read_file_id id)
+ssize_t kernel_read_file(struct file *file, loff_t offset, void **buf,
+ size_t buf_size, size_t *file_size,
+ enum kernel_read_file_id id)
{
loff_t i_size, pos;
- size_t copied;
+ ssize_t copied;
void *allocated = NULL;
bool whole_file;
int ret;
@@ -58,7 +58,7 @@ int kernel_read_file(struct file *file, loff_t offset, void **buf,
goto out;
}
/* The file is too big for sane activities. */
- if (i_size > INT_MAX) {
+ if (i_size > SSIZE_MAX) {
ret = -EFBIG;
goto out;
}
@@ -124,12 +124,12 @@ out:
}
EXPORT_SYMBOL_GPL(kernel_read_file);
-int kernel_read_file_from_path(const char *path, loff_t offset, void **buf,
- size_t buf_size, size_t *file_size,
- enum kernel_read_file_id id)
+ssize_t kernel_read_file_from_path(const char *path, loff_t offset, void **buf,
+ size_t buf_size, size_t *file_size,
+ enum kernel_read_file_id id)
{
struct file *file;
- int ret;
+ ssize_t ret;
if (!path || !*path)
return -EINVAL;
@@ -144,14 +144,14 @@ int kernel_read_file_from_path(const char *path, loff_t offset, void **buf,
}
EXPORT_SYMBOL_GPL(kernel_read_file_from_path);
-int kernel_read_file_from_path_initns(const char *path, loff_t offset,
- void **buf, size_t buf_size,
- size_t *file_size,
- enum kernel_read_file_id id)
+ssize_t kernel_read_file_from_path_initns(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id)
{
struct file *file;
struct path root;
- int ret;
+ ssize_t ret;
if (!path || !*path)
return -EINVAL;
@@ -171,19 +171,15 @@ int kernel_read_file_from_path_initns(const char *path, loff_t offset,
}
EXPORT_SYMBOL_GPL(kernel_read_file_from_path_initns);
-int kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
- size_t buf_size, size_t *file_size,
- enum kernel_read_file_id id)
+ssize_t kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
+ size_t buf_size, size_t *file_size,
+ enum kernel_read_file_id id)
{
- struct fd f = fdget(fd);
- int ret = -EBADF;
+ CLASS(fd, f)(fd);
- if (!f.file || !(f.file->f_mode & FMODE_READ))
- goto out;
+ if (fd_empty(f) || !(fd_file(f)->f_mode & FMODE_READ))
+ return -EBADF;
- ret = kernel_read_file(f.file, offset, buf, buf_size, file_size, id);
-out:
- fdput(f);
- return ret;
+ return kernel_read_file(fd_file(f), offset, buf, buf_size, file_size, id);
}
EXPORT_SYMBOL_GPL(kernel_read_file_from_fd);
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 8e0a1378a4b1..5c0efd6b239f 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -17,19 +17,29 @@
#include "kernfs-internal.h"
-DECLARE_RWSEM(kernfs_rwsem);
-static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */
-static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by rename_lock */
-static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
+/*
+ * Don't use rename_lock to piggy back on pr_cont_buf. We don't want to
+ * call pr_cont() while holding rename_lock. Because sometimes pr_cont()
+ * will perform wakeups when releasing console_sem. Holding rename_lock
+ * will introduce deadlock if the scheduler reads the kernfs_name in the
+ * wakeup path.
+ */
+static DEFINE_SPINLOCK(kernfs_pr_cont_lock);
+static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by pr_cont_lock */
#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
-static bool kernfs_active(struct kernfs_node *kn)
+static bool __kernfs_active(struct kernfs_node *kn)
{
- lockdep_assert_held(&kernfs_rwsem);
return atomic_read(&kn->active) >= 0;
}
+static bool kernfs_active(struct kernfs_node *kn)
+{
+ lockdep_assert_held(&kernfs_root(kn)->kernfs_rwsem);
+ return __kernfs_active(kn);
+}
+
static bool kernfs_lockdep(struct kernfs_node *kn)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -39,22 +49,14 @@ static bool kernfs_lockdep(struct kernfs_node *kn)
#endif
}
-static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
-{
- if (!kn)
- return strlcpy(buf, "(null)", buflen);
-
- return strlcpy(buf, kn->parent ? kn->name : "/", buflen);
-}
-
/* kernfs_node_depth - compute depth from @from to @to */
static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
{
size_t depth = 0;
- while (to->parent && to != from) {
+ while (rcu_dereference(to->__parent) && to != from) {
depth++;
- to = to->parent;
+ to = rcu_dereference(to->__parent);
}
return depth;
}
@@ -72,18 +74,18 @@ static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
db = kernfs_depth(rb->kn, b);
while (da > db) {
- a = a->parent;
+ a = rcu_dereference(a->__parent);
da--;
}
while (db > da) {
- b = b->parent;
+ b = rcu_dereference(b->__parent);
db--;
}
/* worst case b and a will be the same at root */
while (b != a) {
- b = b->parent;
- a = a->parent;
+ b = rcu_dereference(b->__parent);
+ a = rcu_dereference(a->__parent);
}
return a;
@@ -113,9 +115,9 @@ static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
* kn_to: /n1/n2/n3 [depth=3]
* result: /../..
*
- * [3] when @kn_to is NULL result will be "(null)"
+ * [3] when @kn_to is %NULL result will be "(null)"
*
- * Returns the length of the full path. If the full length is equal to or
+ * Return: the length of the constructed path. If the path would have been
* greater than @buflen, @buf contains the truncated path with the trailing
* '\0'. On error, -errno is returned.
*/
@@ -126,19 +128,17 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
struct kernfs_node *kn, *common;
const char parent_str[] = "/..";
size_t depth_from, depth_to, len = 0;
+ ssize_t copied;
int i, j;
if (!kn_to)
- return strlcpy(buf, "(null)", buflen);
+ return strscpy(buf, "(null)", buflen);
if (!kn_from)
kn_from = kernfs_root(kn_to)->kn;
if (kn_from == kn_to)
- return strlcpy(buf, "/", buflen);
-
- if (!buf)
- return -EINVAL;
+ return strscpy(buf, "/", buflen);
common = kernfs_common_ancestor(kn_from, kn_to);
if (WARN_ON(!common))
@@ -149,18 +149,22 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
buf[0] = '\0';
- for (i = 0; i < depth_from; i++)
- len += strlcpy(buf + len, parent_str,
- len < buflen ? buflen - len : 0);
+ for (i = 0; i < depth_from; i++) {
+ copied = strscpy(buf + len, parent_str, buflen - len);
+ if (copied < 0)
+ return copied;
+ len += copied;
+ }
/* Calculate how many bytes we need for the rest */
for (i = depth_to - 1; i >= 0; i--) {
+ const char *name;
+
for (kn = kn_to, j = 0; j < i; j++)
- kn = kn->parent;
- len += strlcpy(buf + len, "/",
- len < buflen ? buflen - len : 0);
- len += strlcpy(buf + len, kn->name,
- len < buflen ? buflen - len : 0);
+ kn = rcu_dereference(kn->__parent);
+
+ name = rcu_dereference(kn->name);
+ len += scnprintf(buf + len, buflen - len, "/%s", name);
}
return len;
@@ -173,22 +177,29 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
* @buflen: size of @buf
*
* Copies the name of @kn into @buf of @buflen bytes. The behavior is
- * similar to strlcpy(). It returns the length of @kn's name and if @buf
- * isn't long enough, it's filled upto @buflen-1 and nul terminated.
+ * similar to strscpy().
+ *
+ * Fills buffer with "(null)" if @kn is %NULL.
*
- * Fills buffer with "(null)" if @kn is NULL.
+ * Return: the resulting length of @buf. If @buf isn't long enough,
+ * it's filled up to @buflen-1 and nul terminated, and returns -E2BIG.
*
* This function can be called from any context.
*/
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
{
- unsigned long flags;
- int ret;
+ struct kernfs_node *kn_parent;
- spin_lock_irqsave(&kernfs_rename_lock, flags);
- ret = kernfs_name_locked(kn, buf, buflen);
- spin_unlock_irqrestore(&kernfs_rename_lock, flags);
- return ret;
+ if (!kn)
+ return strscpy(buf, "(null)", buflen);
+
+ guard(rcu)();
+ /*
+ * KERNFS_ROOT_INVARIANT_PARENT is ignored here. The name is RCU freed and
+ * the parent is either existing or not.
+ */
+ kn_parent = rcu_dereference(kn->__parent);
+ return strscpy(buf, kn_parent ? rcu_dereference(kn->name) : "/", buflen);
}
/**
@@ -203,20 +214,24 @@ int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
* path (which includes '..'s) as needed to reach from @from to @to is
* returned.
*
- * Returns the length of the full path. If the full length is equal to or
+ * Return: the length of the constructed path. If the path would have been
* greater than @buflen, @buf contains the truncated path with the trailing
* '\0'. On error, -errno is returned.
*/
int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
char *buf, size_t buflen)
{
- unsigned long flags;
- int ret;
+ struct kernfs_root *root;
- spin_lock_irqsave(&kernfs_rename_lock, flags);
- ret = kernfs_path_from_node_locked(to, from, buf, buflen);
- spin_unlock_irqrestore(&kernfs_rename_lock, flags);
- return ret;
+ guard(rcu)();
+ if (to) {
+ root = kernfs_root(to);
+ if (!(root->flags & KERNFS_ROOT_INVARIANT_PARENT)) {
+ guard(read_lock_irqsave)(&root->kernfs_rename_lock);
+ return kernfs_path_from_node_locked(to, from, buf, buflen);
+ }
+ }
+ return kernfs_path_from_node_locked(to, from, buf, buflen);
}
EXPORT_SYMBOL_GPL(kernfs_path_from_node);
@@ -230,12 +245,12 @@ void pr_cont_kernfs_name(struct kernfs_node *kn)
{
unsigned long flags;
- spin_lock_irqsave(&kernfs_rename_lock, flags);
+ spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
- kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
+ kernfs_name(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
pr_cont("%s", kernfs_pr_cont_buf);
- spin_unlock_irqrestore(&kernfs_rename_lock, flags);
+ spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags);
}
/**
@@ -249,24 +264,22 @@ void pr_cont_kernfs_path(struct kernfs_node *kn)
unsigned long flags;
int sz;
- spin_lock_irqsave(&kernfs_rename_lock, flags);
+ spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
- sz = kernfs_path_from_node_locked(kn, NULL, kernfs_pr_cont_buf,
- sizeof(kernfs_pr_cont_buf));
+ sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf,
+ sizeof(kernfs_pr_cont_buf));
if (sz < 0) {
- pr_cont("(error)");
- goto out;
- }
-
- if (sz >= sizeof(kernfs_pr_cont_buf)) {
- pr_cont("(name too long)");
+ if (sz == -E2BIG)
+ pr_cont("(name too long)");
+ else
+ pr_cont("(error)");
goto out;
}
pr_cont("%s", kernfs_pr_cont_buf);
out:
- spin_unlock_irqrestore(&kernfs_rename_lock, flags);
+ spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags);
}
/**
@@ -275,26 +288,30 @@ out:
*
* Determines @kn's parent, pins and returns it. This function can be
* called from any context.
+ *
+ * Return: parent node of @kn
*/
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
{
struct kernfs_node *parent;
+ struct kernfs_root *root;
unsigned long flags;
- spin_lock_irqsave(&kernfs_rename_lock, flags);
- parent = kn->parent;
+ root = kernfs_root(kn);
+ read_lock_irqsave(&root->kernfs_rename_lock, flags);
+ parent = kernfs_parent(kn);
kernfs_get(parent);
- spin_unlock_irqrestore(&kernfs_rename_lock, flags);
+ read_unlock_irqrestore(&root->kernfs_rename_lock, flags);
return parent;
}
/**
- * kernfs_name_hash
+ * kernfs_name_hash - calculate hash of @ns + @name
* @name: Null terminated string to hash
* @ns: Namespace tag to hash
*
- * Returns 31 bit hash of ns + name (so it fits in an off_t )
+ * Return: 31-bit hash of ns + name (so it fits in an off_t)
*/
static unsigned int kernfs_name_hash(const char *name, const void *ns)
{
@@ -323,13 +340,13 @@ static int kernfs_name_compare(unsigned int hash, const char *name,
return -1;
if (ns > kn->ns)
return 1;
- return strcmp(name, kn->name);
+ return strcmp(name, kernfs_rcu_name(kn));
}
static int kernfs_sd_compare(const struct kernfs_node *left,
const struct kernfs_node *right)
{
- return kernfs_name_compare(left->hash, left->name, left->ns, right);
+ return kernfs_name_compare(left->hash, kernfs_rcu_name(left), left->ns, right);
}
/**
@@ -342,13 +359,17 @@ static int kernfs_sd_compare(const struct kernfs_node *left,
* Locking:
* kernfs_rwsem held exclusive
*
- * RETURNS:
- * 0 on susccess -EEXIST on failure.
+ * Return:
+ * %0 on success, -EEXIST on failure.
*/
static int kernfs_link_sibling(struct kernfs_node *kn)
{
- struct rb_node **node = &kn->parent->dir.children.rb_node;
struct rb_node *parent = NULL;
+ struct kernfs_node *kn_parent;
+ struct rb_node **node;
+
+ kn_parent = kernfs_parent(kn);
+ node = &kn_parent->dir.children.rb_node;
while (*node) {
struct kernfs_node *pos;
@@ -367,12 +388,14 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
/* add new node and rebalance the tree */
rb_link_node(&kn->rb, parent, node);
- rb_insert_color(&kn->rb, &kn->parent->dir.children);
+ rb_insert_color(&kn->rb, &kn_parent->dir.children);
/* successfully added, account subdir number */
+ down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (kernfs_type(kn) == KERNFS_DIR)
- kn->parent->dir.subdirs++;
- kernfs_inc_rev(kn->parent);
+ kn_parent->dir.subdirs++;
+ kernfs_inc_rev(kn_parent);
+ up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
return 0;
}
@@ -382,22 +405,29 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
* @kn: kernfs_node of interest
*
* Try to unlink @kn from its sibling rbtree which starts from
- * kn->parent->dir.children. Returns %true if @kn was actually
- * removed, %false if @kn wasn't on the rbtree.
+ * kn->parent->dir.children.
+ *
+ * Return: %true if @kn was actually removed,
+ * %false if @kn wasn't on the rbtree.
*
* Locking:
* kernfs_rwsem held exclusive
*/
static bool kernfs_unlink_sibling(struct kernfs_node *kn)
{
+ struct kernfs_node *kn_parent;
+
if (RB_EMPTY_NODE(&kn->rb))
return false;
+ kn_parent = kernfs_parent(kn);
+ down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (kernfs_type(kn) == KERNFS_DIR)
- kn->parent->dir.subdirs--;
- kernfs_inc_rev(kn->parent);
+ kn_parent->dir.subdirs--;
+ kernfs_inc_rev(kn_parent);
+ up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
- rb_erase(&kn->rb, &kn->parent->dir.children);
+ rb_erase(&kn->rb, &kn_parent->dir.children);
RB_CLEAR_NODE(&kn->rb);
return true;
}
@@ -407,10 +437,10 @@ static bool kernfs_unlink_sibling(struct kernfs_node *kn)
* @kn: kernfs_node to get an active reference to
*
* Get an active reference of @kn. This function is noop if @kn
- * is NULL.
+ * is %NULL.
*
- * RETURNS:
- * Pointer to @kn on success, NULL on failure.
+ * Return:
+ * Pointer to @kn on success, %NULL on failure.
*/
struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
{
@@ -430,7 +460,7 @@ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
* @kn: kernfs_node to put an active reference to
*
* Put an active reference to @kn. This function is noop if @kn
- * is NULL.
+ * is %NULL.
*/
void kernfs_put_active(struct kernfs_node *kn)
{
@@ -452,19 +482,30 @@ void kernfs_put_active(struct kernfs_node *kn)
* kernfs_drain - drain kernfs_node
* @kn: kernfs_node to drain
*
- * Drain existing usages and nuke all existing mmaps of @kn. Mutiple
+ * Drain existing usages and nuke all existing mmaps of @kn. Multiple
* removers may invoke this function concurrently on @kn and all will
* return after draining is complete.
*/
static void kernfs_drain(struct kernfs_node *kn)
- __releases(&kernfs_rwsem) __acquires(&kernfs_rwsem)
+ __releases(&kernfs_root(kn)->kernfs_rwsem)
+ __acquires(&kernfs_root(kn)->kernfs_rwsem)
{
struct kernfs_root *root = kernfs_root(kn);
- lockdep_assert_held_write(&kernfs_rwsem);
+ lockdep_assert_held_write(&root->kernfs_rwsem);
WARN_ON_ONCE(kernfs_active(kn));
- up_write(&kernfs_rwsem);
+ /*
+ * Skip draining if already fully drained. This avoids draining and its
+ * lockdep annotations for nodes which have never been activated
+ * allowing embedding kernfs_remove() in create error paths without
+ * worrying about draining.
+ */
+ if (atomic_read(&kn->active) == KN_DEACTIVATED_BIAS &&
+ !kernfs_should_drain_open_files(kn))
+ return;
+
+ up_write(&root->kernfs_rwsem);
if (kernfs_lockdep(kn)) {
rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
@@ -472,7 +513,6 @@ static void kernfs_drain(struct kernfs_node *kn)
lock_contended(&kn->dep_map, _RET_IP_);
}
- /* but everyone should wait for draining */
wait_event(root->deactivate_waitq,
atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
@@ -481,9 +521,10 @@ static void kernfs_drain(struct kernfs_node *kn)
rwsem_release(&kn->dep_map, _RET_IP_);
}
- kernfs_drain_open_files(kn);
+ if (kernfs_should_drain_open_files(kn))
+ kernfs_drain_open_files(kn);
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
}
/**
@@ -499,6 +540,21 @@ void kernfs_get(struct kernfs_node *kn)
}
EXPORT_SYMBOL_GPL(kernfs_get);
+static void kernfs_free_rcu(struct rcu_head *rcu)
+{
+ struct kernfs_node *kn = container_of(rcu, struct kernfs_node, rcu);
+
+ /* If the whole node goes away, then name can't be used outside */
+ kfree_const(rcu_access_pointer(kn->name));
+
+ if (kn->iattr) {
+ simple_xattrs_free(&kn->iattr->xattrs, NULL);
+ kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
+ }
+
+ kmem_cache_free(kernfs_node_cache, kn);
+}
+
/**
* kernfs_put - put a reference count on a kernfs_node
* @kn: the target kernfs_node
@@ -518,25 +574,21 @@ void kernfs_put(struct kernfs_node *kn)
* Moving/renaming is always done while holding reference.
* kn->parent won't change beneath us.
*/
- parent = kn->parent;
+ parent = kernfs_parent(kn);
WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
"kernfs_put: %s/%s: released with incorrect active_ref %d\n",
- parent ? parent->name : "", kn->name, atomic_read(&kn->active));
+ parent ? rcu_dereference(parent->name) : "",
+ rcu_dereference(kn->name), atomic_read(&kn->active));
if (kernfs_type(kn) == KERNFS_LINK)
kernfs_put(kn->symlink.target_kn);
- kfree_const(kn->name);
-
- if (kn->iattr) {
- simple_xattrs_free(&kn->iattr->xattrs);
- kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
- }
- spin_lock(&kernfs_idr_lock);
+ spin_lock(&root->kernfs_idr_lock);
idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
- spin_unlock(&kernfs_idr_lock);
- kmem_cache_free(kernfs_node_cache, kn);
+ spin_unlock(&root->kernfs_idr_lock);
+
+ call_rcu(&kn->rcu, kernfs_free_rcu);
kn = parent;
if (kn) {
@@ -545,7 +597,7 @@ void kernfs_put(struct kernfs_node *kn)
} else {
/* just released the root kn, free @root too */
idr_destroy(&root->ino_idr);
- kfree(root);
+ kfree_rcu(root, rcu);
}
}
EXPORT_SYMBOL_GPL(kernfs_put);
@@ -554,7 +606,7 @@ EXPORT_SYMBOL_GPL(kernfs_put);
* kernfs_node_from_dentry - determine kernfs_node associated with a dentry
* @dentry: the dentry in question
*
- * Return the kernfs_node associated with @dentry. If @dentry is not a
+ * Return: the kernfs_node associated with @dentry. If @dentry is not a
* kernfs one, %NULL is returned.
*
* While the returned kernfs_node will stay accessible as long as @dentry
@@ -587,13 +639,13 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
goto err_out1;
idr_preload(GFP_KERNEL);
- spin_lock(&kernfs_idr_lock);
+ spin_lock(&root->kernfs_idr_lock);
ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
if (ret >= 0 && ret < root->last_id_lowbits)
root->id_highbits++;
id_highbits = root->id_highbits;
root->last_id_lowbits = ret;
- spin_unlock(&kernfs_idr_lock);
+ spin_unlock(&root->kernfs_idr_lock);
idr_preload_end();
if (ret < 0)
goto err_out2;
@@ -604,7 +656,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
RB_CLEAR_NODE(&kn->rb);
- kn->name = name;
+ rcu_assign_pointer(kn->name, name);
kn->mode = mode;
kn->flags = flags;
@@ -623,13 +675,18 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
if (parent) {
ret = security_kernfs_init_security(parent, kn);
if (ret)
- goto err_out3;
+ goto err_out4;
}
return kn;
+ err_out4:
+ simple_xattrs_free(&kn->iattr->xattrs, NULL);
+ kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
err_out3:
+ spin_lock(&root->kernfs_idr_lock);
idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
+ spin_unlock(&root->kernfs_idr_lock);
err_out2:
kmem_cache_free(kernfs_node_cache, kn);
err_out1:
@@ -644,11 +701,23 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
{
struct kernfs_node *kn;
+ if (parent->mode & S_ISGID) {
+ /* this code block imitates inode_init_owner() for
+ * kernfs
+ */
+
+ if (parent->iattr)
+ gid = parent->iattr->ia_gid;
+
+ if (flags & KERNFS_DIR)
+ mode |= S_ISGID;
+ }
+
kn = __kernfs_new_node(kernfs_root(parent), parent,
name, mode, uid, gid, flags);
if (kn) {
kernfs_get(parent);
- kn->parent = parent;
+ rcu_assign_pointer(kn->__parent, parent);
}
return kn;
}
@@ -661,8 +730,8 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
* @id's lower 32bits encode ino and upper gen. If the gen portion is
* zero, all generations are matched.
*
- * RETURNS:
- * NULL on failure. Return a kernfs node with reference counter incremented
+ * Return: %NULL on failure,
+ * otherwise a kernfs node with reference counter incremented.
*/
struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
u64 id)
@@ -671,7 +740,7 @@ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
ino_t ino = kernfs_id_ino(id);
u32 gen = kernfs_id_gen(id);
- spin_lock(&kernfs_idr_lock);
+ rcu_read_lock();
kn = idr_find(&root->ino_idr, (u32)ino);
if (!kn)
@@ -688,18 +757,17 @@ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
}
/*
- * ACTIVATED is protected with kernfs_mutex but it was clear when
- * @kn was added to idr and we just wanna see it set. No need to
- * grab kernfs_mutex.
+ * We should fail if @kn has never been activated and guarantee success
+ * if the caller knows that @kn is active. Both can be achieved by
+ * __kernfs_active() which tests @kn->active without kernfs_rwsem.
*/
- if (unlikely(!(kn->flags & KERNFS_ACTIVATED) ||
- !atomic_inc_not_zero(&kn->count)))
+ if (unlikely(!__kernfs_active(kn) || !atomic_inc_not_zero(&kn->count)))
goto err_unlock;
- spin_unlock(&kernfs_idr_lock);
+ rcu_read_unlock();
return kn;
err_unlock:
- spin_unlock(&kernfs_idr_lock);
+ rcu_read_unlock();
return NULL;
}
@@ -711,49 +779,52 @@ err_unlock:
* function increments nlink of the parent's inode if @kn is a
* directory and link into the children list of the parent.
*
- * RETURNS:
- * 0 on success, -EEXIST if entry with the given name already
+ * Return:
+ * %0 on success, -EEXIST if entry with the given name already
* exists.
*/
int kernfs_add_one(struct kernfs_node *kn)
{
- struct kernfs_node *parent = kn->parent;
+ struct kernfs_root *root = kernfs_root(kn);
struct kernfs_iattrs *ps_iattr;
+ struct kernfs_node *parent;
bool has_ns;
int ret;
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
+ parent = kernfs_parent(kn);
ret = -EINVAL;
has_ns = kernfs_ns_enabled(parent);
if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
- has_ns ? "required" : "invalid", parent->name, kn->name))
+ has_ns ? "required" : "invalid",
+ kernfs_rcu_name(parent), kernfs_rcu_name(kn)))
goto out_unlock;
if (kernfs_type(parent) != KERNFS_DIR)
goto out_unlock;
ret = -ENOENT;
- if (parent->flags & KERNFS_EMPTY_DIR)
- goto out_unlock;
-
- if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent))
+ if (parent->flags & (KERNFS_REMOVING | KERNFS_EMPTY_DIR))
goto out_unlock;
- kn->hash = kernfs_name_hash(kn->name, kn->ns);
+ kn->hash = kernfs_name_hash(kernfs_rcu_name(kn), kn->ns);
ret = kernfs_link_sibling(kn);
if (ret)
goto out_unlock;
/* Update timestamps on the parent */
+ down_write(&root->kernfs_iattr_rwsem);
+
ps_iattr = parent->iattr;
if (ps_iattr) {
ktime_get_real_ts64(&ps_iattr->ia_ctime);
ps_iattr->ia_mtime = ps_iattr->ia_ctime;
}
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_iattr_rwsem);
+ up_write(&root->kernfs_rwsem);
/*
* Activate the new node unless CREATE_DEACTIVATED is requested.
@@ -767,7 +838,7 @@ int kernfs_add_one(struct kernfs_node *kn)
return 0;
out_unlock:
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
return ret;
}
@@ -777,8 +848,9 @@ out_unlock:
* @name: name to look for
* @ns: the namespace tag to use
*
- * Look for kernfs_node with name @name under @parent. Returns pointer to
- * the found kernfs_node on success, %NULL on failure.
+ * Look for kernfs_node with name @name under @parent.
+ *
+ * Return: pointer to the found kernfs_node on success, %NULL on failure.
*/
static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
const unsigned char *name,
@@ -788,11 +860,11 @@ static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
bool has_ns = kernfs_ns_enabled(parent);
unsigned int hash;
- lockdep_assert_held(&kernfs_rwsem);
+ lockdep_assert_held(&kernfs_root(parent)->kernfs_rwsem);
if (has_ns != (bool)ns) {
WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
- has_ns ? "required" : "invalid", parent->name, name);
+ has_ns ? "required" : "invalid", kernfs_rcu_name(parent), name);
return NULL;
}
@@ -817,18 +889,17 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
const unsigned char *path,
const void *ns)
{
- size_t len;
+ ssize_t len;
char *p, *name;
- lockdep_assert_held_read(&kernfs_rwsem);
+ lockdep_assert_held_read(&kernfs_root(parent)->kernfs_rwsem);
- /* grab kernfs_rename_lock to piggy back on kernfs_pr_cont_buf */
- spin_lock_irq(&kernfs_rename_lock);
+ spin_lock_irq(&kernfs_pr_cont_lock);
- len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
+ len = strscpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
- if (len >= sizeof(kernfs_pr_cont_buf)) {
- spin_unlock_irq(&kernfs_rename_lock);
+ if (len < 0) {
+ spin_unlock_irq(&kernfs_pr_cont_lock);
return NULL;
}
@@ -840,7 +911,7 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
parent = kernfs_find_ns(parent, name, ns);
}
- spin_unlock_irq(&kernfs_rename_lock);
+ spin_unlock_irq(&kernfs_pr_cont_lock);
return parent;
}
@@ -852,18 +923,20 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
* @ns: the namespace tag to use
*
* Look for kernfs_node with name @name under @parent and get a reference
- * if found. This function may sleep and returns pointer to the found
- * kernfs_node on success, %NULL on failure.
+ * if found. This function may sleep.
+ *
+ * Return: pointer to the found kernfs_node on success, %NULL on failure.
*/
struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
const char *name, const void *ns)
{
struct kernfs_node *kn;
+ struct kernfs_root *root = kernfs_root(parent);
- down_read(&kernfs_rwsem);
+ down_read(&root->kernfs_rwsem);
kn = kernfs_find_ns(parent, name, ns);
kernfs_get(kn);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return kn;
}
@@ -876,29 +949,36 @@ EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
* @ns: the namespace tag to use
*
* Look for kernfs_node with path @path under @parent and get a reference
- * if found. This function may sleep and returns pointer to the found
- * kernfs_node on success, %NULL on failure.
+ * if found. This function may sleep.
+ *
+ * Return: pointer to the found kernfs_node on success, %NULL on failure.
*/
struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
const char *path, const void *ns)
{
struct kernfs_node *kn;
+ struct kernfs_root *root = kernfs_root(parent);
- down_read(&kernfs_rwsem);
+ down_read(&root->kernfs_rwsem);
kn = kernfs_walk_ns(parent, path, ns);
kernfs_get(kn);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return kn;
}
+unsigned int kernfs_root_flags(struct kernfs_node *kn)
+{
+ return kernfs_root(kn)->flags;
+}
+
/**
* kernfs_create_root - create a new kernfs hierarchy
* @scops: optional syscall operations for the hierarchy
* @flags: KERNFS_ROOT_* flags
* @priv: opaque data associated with the new directory
*
- * Returns the root of the new hierarchy on success, ERR_PTR() value on
+ * Return: the root of the new hierarchy on success, ERR_PTR() value on
* failure.
*/
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
@@ -912,7 +992,12 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
return ERR_PTR(-ENOMEM);
idr_init(&root->ino_idr);
+ spin_lock_init(&root->kernfs_idr_lock);
+ init_rwsem(&root->kernfs_rwsem);
+ init_rwsem(&root->kernfs_iattr_rwsem);
+ init_rwsem(&root->kernfs_supers_rwsem);
INIT_LIST_HEAD(&root->supers);
+ rwlock_init(&root->kernfs_rename_lock);
/*
* On 64bit ino setups, id is ino. On 32bit, low 32bits are ino.
@@ -957,7 +1042,24 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
*/
void kernfs_destroy_root(struct kernfs_root *root)
{
- kernfs_remove(root->kn); /* will also free @root */
+ /*
+ * kernfs_remove holds kernfs_rwsem from the root so the root
+ * shouldn't be freed during the operation.
+ */
+ kernfs_get(root->kn);
+ kernfs_remove(root->kn);
+ kernfs_put(root->kn); /* will also free @root */
+}
+
+/**
+ * kernfs_root_to_node - return the kernfs_node associated with a kernfs_root
+ * @root: root to use to lookup
+ *
+ * Return: @root's kernfs_node
+ */
+struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root)
+{
+ return root->kn;
}
/**
@@ -970,7 +1072,7 @@ void kernfs_destroy_root(struct kernfs_root *root)
* @priv: opaque data associated with the new directory
* @ns: optional namespace tag of the directory
*
- * Returns the created node on success, ERR_PTR() value on failure.
+ * Return: the created node on success, ERR_PTR() value on failure.
*/
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
@@ -1004,7 +1106,7 @@ struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
* @parent: parent in which to create a new directory
* @name: name of the new directory
*
- * Returns the created node on success, ERR_PTR() value on failure.
+ * Return: the created node on success, ERR_PTR() value on failure.
*/
struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
const char *name)
@@ -1032,32 +1134,43 @@ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
return ERR_PTR(rc);
}
-static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
+static int kernfs_dop_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- struct kernfs_node *kn;
+ struct kernfs_node *kn, *parent;
+ struct kernfs_root *root;
if (flags & LOOKUP_RCU)
return -ECHILD;
/* Negative hashed dentry? */
if (d_really_is_negative(dentry)) {
- struct kernfs_node *parent;
-
/* If the kernfs parent node has changed discard and
* proceed to ->lookup.
+ *
+ * There's nothing special needed here when getting the
+ * dentry parent, even if a concurrent rename is in
+ * progress. That's because the dentry is negative so
+ * it can only be the target of the rename and it will
+ * be doing a d_move() not a replace. Consequently the
+ * dentry d_parent won't change over the d_move().
+ *
+ * Also kernfs negative dentries transitioning from
+ * negative to positive during revalidate won't happen
+ * because they are invalidated on containing directory
+ * changes and the lookup re-done so that a new positive
+ * dentry can be properly created.
*/
- down_read(&kernfs_rwsem);
- spin_lock(&dentry->d_lock);
+ root = kernfs_root_from_sb(dentry->d_sb);
+ down_read(&root->kernfs_rwsem);
parent = kernfs_dentry_node(dentry->d_parent);
if (parent) {
if (kernfs_dir_changed(parent, dentry)) {
- spin_unlock(&dentry->d_lock);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return 0;
}
}
- spin_unlock(&dentry->d_lock);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
/* The kernfs parent node hasn't changed, leave the
* dentry negative and return success.
@@ -1066,29 +1179,31 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
}
kn = kernfs_dentry_node(dentry);
- down_read(&kernfs_rwsem);
+ root = kernfs_root(kn);
+ down_read(&root->kernfs_rwsem);
/* The kernfs node has been deactivated */
if (!kernfs_active(kn))
goto out_bad;
+ parent = kernfs_parent(kn);
/* The kernfs node has been moved? */
- if (kernfs_dentry_node(dentry->d_parent) != kn->parent)
+ if (kernfs_dentry_node(dentry->d_parent) != parent)
goto out_bad;
/* The kernfs node has been renamed */
- if (strcmp(dentry->d_name.name, kn->name) != 0)
+ if (strcmp(dentry->d_name.name, kernfs_rcu_name(kn)) != 0)
goto out_bad;
/* The kernfs node has been moved to a different namespace */
- if (kn->parent && kernfs_ns_enabled(kn->parent) &&
+ if (parent && kernfs_ns_enabled(parent) &&
kernfs_info(dentry->d_sb)->ns != kn->ns)
goto out_bad;
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return 1;
out_bad:
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return 0;
}
@@ -1102,10 +1217,12 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
{
struct kernfs_node *parent = dir->i_private;
struct kernfs_node *kn;
+ struct kernfs_root *root;
struct inode *inode = NULL;
const void *ns = NULL;
- down_read(&kernfs_rwsem);
+ root = kernfs_root(parent);
+ down_read(&root->kernfs_rwsem);
if (kernfs_ns_enabled(parent))
ns = kernfs_info(dir->i_sb)->ns;
@@ -1116,7 +1233,7 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
* create a negative.
*/
if (!kernfs_active(kn)) {
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return NULL;
}
inode = kernfs_get_inode(dir->i_sb, kn);
@@ -1131,30 +1248,30 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
*/
if (!IS_ERR(inode))
kernfs_set_rev(parent, dentry);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
/* instantiate and hash (possibly negative) dentry */
return d_splice_alias(inode, dentry);
}
-static int kernfs_iop_mkdir(struct user_namespace *mnt_userns,
- struct inode *dir, struct dentry *dentry,
- umode_t mode)
+static struct dentry *kernfs_iop_mkdir(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry,
+ umode_t mode)
{
struct kernfs_node *parent = dir->i_private;
struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops;
int ret;
if (!scops || !scops->mkdir)
- return -EPERM;
+ return ERR_PTR(-EPERM);
if (!kernfs_get_active(parent))
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
ret = scops->mkdir(parent, dentry->d_name.name, mode);
kernfs_put_active(parent);
- return ret;
+ return ERR_PTR(ret);
}
static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
@@ -1175,7 +1292,7 @@ static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
return ret;
}
-static int kernfs_iop_rename(struct user_namespace *mnt_userns,
+static int kernfs_iop_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
@@ -1248,13 +1365,15 @@ static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
* Find the next descendant to visit for post-order traversal of @root's
* descendants. @root is included in the iteration and the last node to be
* visited.
+ *
+ * Return: the next descendant to visit or %NULL when done.
*/
static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
struct kernfs_node *root)
{
struct rb_node *rbn;
- lockdep_assert_held_write(&kernfs_rwsem);
+ lockdep_assert_held_write(&kernfs_root(root)->kernfs_rwsem);
/* if first iteration, visit leftmost descendant which may be root */
if (!pos)
@@ -1270,7 +1389,22 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
return kernfs_leftmost_descendant(rb_to_kn(rbn));
/* no sibling left, visit parent */
- return pos->parent;
+ return kernfs_parent(pos);
+}
+
+static void kernfs_activate_one(struct kernfs_node *kn)
+{
+ lockdep_assert_held_write(&kernfs_root(kn)->kernfs_rwsem);
+
+ kn->flags |= KERNFS_ACTIVATED;
+
+ if (kernfs_active(kn) || (kn->flags & (KERNFS_HIDDEN | KERNFS_REMOVING)))
+ return;
+
+ WARN_ON_ONCE(rcu_access_pointer(kn->__parent) && RB_EMPTY_NODE(&kn->rb));
+ WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
+
+ atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
}
/**
@@ -1289,83 +1423,110 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
void kernfs_activate(struct kernfs_node *kn)
{
struct kernfs_node *pos;
+ struct kernfs_root *root = kernfs_root(kn);
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
pos = NULL;
- while ((pos = kernfs_next_descendant_post(pos, kn))) {
- if (pos->flags & KERNFS_ACTIVATED)
- continue;
+ while ((pos = kernfs_next_descendant_post(pos, kn)))
+ kernfs_activate_one(pos);
+
+ up_write(&root->kernfs_rwsem);
+}
+
+/**
+ * kernfs_show - show or hide a node
+ * @kn: kernfs_node to show or hide
+ * @show: whether to show or hide
+ *
+ * If @show is %false, @kn is marked hidden and deactivated. A hidden node is
+ * ignored in future activaitons. If %true, the mark is removed and activation
+ * state is restored. This function won't implicitly activate a new node in a
+ * %KERNFS_ROOT_CREATE_DEACTIVATED root which hasn't been activated yet.
+ *
+ * To avoid recursion complexities, directories aren't supported for now.
+ */
+void kernfs_show(struct kernfs_node *kn, bool show)
+{
+ struct kernfs_root *root = kernfs_root(kn);
+
+ if (WARN_ON_ONCE(kernfs_type(kn) == KERNFS_DIR))
+ return;
- WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb));
- WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS);
+ down_write(&root->kernfs_rwsem);
- atomic_sub(KN_DEACTIVATED_BIAS, &pos->active);
- pos->flags |= KERNFS_ACTIVATED;
+ if (show) {
+ kn->flags &= ~KERNFS_HIDDEN;
+ if (kn->flags & KERNFS_ACTIVATED)
+ kernfs_activate_one(kn);
+ } else {
+ kn->flags |= KERNFS_HIDDEN;
+ if (kernfs_active(kn))
+ atomic_add(KN_DEACTIVATED_BIAS, &kn->active);
+ kernfs_drain(kn);
}
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
}
static void __kernfs_remove(struct kernfs_node *kn)
{
- struct kernfs_node *pos;
+ struct kernfs_node *pos, *parent;
- lockdep_assert_held_write(&kernfs_rwsem);
+ /* Short-circuit if non-root @kn has already finished removal. */
+ if (!kn)
+ return;
+
+ lockdep_assert_held_write(&kernfs_root(kn)->kernfs_rwsem);
/*
- * Short-circuit if non-root @kn has already finished removal.
* This is for kernfs_remove_self() which plays with active ref
* after removal.
*/
- if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb)))
+ if (kernfs_parent(kn) && RB_EMPTY_NODE(&kn->rb))
return;
- pr_debug("kernfs %s: removing\n", kn->name);
+ pr_debug("kernfs %s: removing\n", kernfs_rcu_name(kn));
- /* prevent any new usage under @kn by deactivating all nodes */
+ /* prevent new usage by marking all nodes removing and deactivating */
pos = NULL;
- while ((pos = kernfs_next_descendant_post(pos, kn)))
+ while ((pos = kernfs_next_descendant_post(pos, kn))) {
+ pos->flags |= KERNFS_REMOVING;
if (kernfs_active(pos))
atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
+ }
/* deactivate and unlink the subtree node-by-node */
do {
pos = kernfs_leftmost_descendant(kn);
/*
- * kernfs_drain() drops kernfs_rwsem temporarily and @pos's
+ * kernfs_drain() may drop kernfs_rwsem temporarily and @pos's
* base ref could have been put by someone else by the time
* the function returns. Make sure it doesn't go away
* underneath us.
*/
kernfs_get(pos);
- /*
- * Drain iff @kn was activated. This avoids draining and
- * its lockdep annotations for nodes which have never been
- * activated and allows embedding kernfs_remove() in create
- * error paths without worrying about draining.
- */
- if (kn->flags & KERNFS_ACTIVATED)
- kernfs_drain(pos);
- else
- WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
-
+ kernfs_drain(pos);
+ parent = kernfs_parent(pos);
/*
* kernfs_unlink_sibling() succeeds once per node. Use it
* to decide who's responsible for cleanups.
*/
- if (!pos->parent || kernfs_unlink_sibling(pos)) {
+ if (!parent || kernfs_unlink_sibling(pos)) {
struct kernfs_iattrs *ps_iattr =
- pos->parent ? pos->parent->iattr : NULL;
+ parent ? parent->iattr : NULL;
/* update timestamps on the parent */
+ down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
+
if (ps_iattr) {
ktime_get_real_ts64(&ps_iattr->ia_ctime);
ps_iattr->ia_mtime = ps_iattr->ia_ctime;
}
+ up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
kernfs_put(pos);
}
@@ -1381,9 +1542,16 @@ static void __kernfs_remove(struct kernfs_node *kn)
*/
void kernfs_remove(struct kernfs_node *kn)
{
- down_write(&kernfs_rwsem);
+ struct kernfs_root *root;
+
+ if (!kn)
+ return;
+
+ root = kernfs_root(kn);
+
+ down_write(&root->kernfs_rwsem);
__kernfs_remove(kn);
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
}
/**
@@ -1417,8 +1585,9 @@ void kernfs_break_active_protection(struct kernfs_node *kn)
* invoked before finishing the kernfs operation. Note that while this
* function restores the active reference, it doesn't and can't actually
* restore the active protection - @kn may already or be in the process of
- * being removed. Once kernfs_break_active_protection() is invoked, that
- * protection is irreversibly gone for the kernfs operation instance.
+ * being drained and removed. Once kernfs_break_active_protection() is
+ * invoked, that protection is irreversibly gone for the kernfs operation
+ * instance.
*
* While this function may be called at any point after
* kernfs_break_active_protection() is invoked, its most useful location
@@ -1465,12 +1634,15 @@ void kernfs_unbreak_active_protection(struct kernfs_node *kn)
* the whole kernfs_ops which won the arbitration. This can be used to
* guarantee, for example, all concurrent writes to a "delete" file to
* finish only after the whole operation is complete.
+ *
+ * Return: %true if @kn is removed by this call, otherwise %false.
*/
bool kernfs_remove_self(struct kernfs_node *kn)
{
bool ret;
+ struct kernfs_root *root = kernfs_root(kn);
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
kernfs_break_active_protection(kn);
/*
@@ -1498,9 +1670,9 @@ bool kernfs_remove_self(struct kernfs_node *kn)
atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
break;
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
schedule();
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
}
finish_wait(waitq, &wait);
WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
@@ -1513,7 +1685,7 @@ bool kernfs_remove_self(struct kernfs_node *kn)
*/
kernfs_unbreak_active_protection(kn);
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
return ret;
}
@@ -1524,12 +1696,14 @@ bool kernfs_remove_self(struct kernfs_node *kn)
* @ns: namespace tag of the kernfs_node to remove
*
* Look for the kernfs_node with @name and @ns under @parent and remove it.
- * Returns 0 on success, -ENOENT if such entry doesn't exist.
+ *
+ * Return: %0 on success, -ENOENT if such entry doesn't exist.
*/
int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
const void *ns)
{
struct kernfs_node *kn;
+ struct kernfs_root *root;
if (!parent) {
WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
@@ -1537,13 +1711,17 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
return -ENOENT;
}
- down_write(&kernfs_rwsem);
+ root = kernfs_root(parent);
+ down_write(&root->kernfs_rwsem);
kn = kernfs_find_ns(parent, name, ns);
- if (kn)
+ if (kn) {
+ kernfs_get(kn);
__kernfs_remove(kn);
+ kernfs_put(kn);
+ }
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
if (kn)
return 0;
@@ -1557,28 +1735,42 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
* @new_parent: new parent to put @sd under
* @new_name: new name
* @new_ns: new namespace tag
+ *
+ * Return: %0 on success, -errno on failure.
*/
int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name, const void *new_ns)
{
struct kernfs_node *old_parent;
- const char *old_name = NULL;
+ struct kernfs_root *root;
+ const char *old_name;
int error;
/* can't move or rename root */
- if (!kn->parent)
+ if (!rcu_access_pointer(kn->__parent))
return -EINVAL;
- down_write(&kernfs_rwsem);
+ root = kernfs_root(kn);
+ down_write(&root->kernfs_rwsem);
error = -ENOENT;
if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
(new_parent->flags & KERNFS_EMPTY_DIR))
goto out;
+ old_parent = kernfs_parent(kn);
+ if (root->flags & KERNFS_ROOT_INVARIANT_PARENT) {
+ error = -EINVAL;
+ if (WARN_ON_ONCE(old_parent != new_parent))
+ goto out;
+ }
+
error = 0;
- if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
- (strcmp(kn->name, new_name) == 0))
+ old_name = kernfs_rcu_name(kn);
+ if (!new_name)
+ new_name = old_name;
+ if ((old_parent == new_parent) && (kn->ns == new_ns) &&
+ (strcmp(old_name, new_name) == 0))
goto out; /* nothing to rename */
error = -EEXIST;
@@ -1586,7 +1778,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
goto out;
/* rename kernfs_node */
- if (strcmp(kn->name, new_name) != 0) {
+ if (strcmp(old_name, new_name) != 0) {
error = -ENOMEM;
new_name = kstrdup_const(new_name, GFP_KERNEL);
if (!new_name)
@@ -1599,40 +1791,39 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
* Move to the appropriate place in the appropriate directories rbtree.
*/
kernfs_unlink_sibling(kn);
- kernfs_get(new_parent);
- /* rename_lock protects ->parent and ->name accessors */
- spin_lock_irq(&kernfs_rename_lock);
+ /* rename_lock protects ->parent accessors */
+ if (old_parent != new_parent) {
+ kernfs_get(new_parent);
+ write_lock_irq(&root->kernfs_rename_lock);
- old_parent = kn->parent;
- kn->parent = new_parent;
+ rcu_assign_pointer(kn->__parent, new_parent);
- kn->ns = new_ns;
- if (new_name) {
- old_name = kn->name;
- kn->name = new_name;
- }
+ kn->ns = new_ns;
+ if (new_name)
+ rcu_assign_pointer(kn->name, new_name);
- spin_unlock_irq(&kernfs_rename_lock);
+ write_unlock_irq(&root->kernfs_rename_lock);
+ kernfs_put(old_parent);
+ } else {
+ /* name assignment is RCU protected, parent is the same */
+ kn->ns = new_ns;
+ if (new_name)
+ rcu_assign_pointer(kn->name, new_name);
+ }
- kn->hash = kernfs_name_hash(kn->name, kn->ns);
+ kn->hash = kernfs_name_hash(new_name ?: old_name, kn->ns);
kernfs_link_sibling(kn);
- kernfs_put(old_parent);
- kfree_const(old_name);
+ if (new_name && !is_kernel_rodata((unsigned long)old_name))
+ kfree_rcu_mightsleep(old_name);
error = 0;
out:
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
return error;
}
-/* Relationship between mode and the DT_xxx types */
-static inline unsigned char dt_type(struct kernfs_node *kn)
-{
- return (kn->mode >> 12) & 15;
-}
-
static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
{
kernfs_put(filp->private_data);
@@ -1644,7 +1835,8 @@ static struct kernfs_node *kernfs_dir_pos(const void *ns,
{
if (pos) {
int valid = kernfs_active(pos) &&
- pos->parent == parent && hash == pos->hash;
+ rcu_access_pointer(pos->__parent) == parent &&
+ hash == pos->hash;
kernfs_put(pos);
if (!valid)
pos = NULL;
@@ -1694,11 +1886,14 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
struct dentry *dentry = file->f_path.dentry;
struct kernfs_node *parent = kernfs_dentry_node(dentry);
struct kernfs_node *pos = file->private_data;
+ struct kernfs_root *root;
const void *ns = NULL;
if (!dir_emit_dots(file, ctx))
return 0;
- down_read(&kernfs_rwsem);
+
+ root = kernfs_root(parent);
+ down_read(&root->kernfs_rwsem);
if (kernfs_ns_enabled(parent))
ns = kernfs_info(dentry->d_sb)->ns;
@@ -1706,8 +1901,8 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
pos;
pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
- const char *name = pos->name;
- unsigned int type = dt_type(pos);
+ const char *name = kernfs_rcu_name(pos);
+ unsigned int type = fs_umode_to_dtype(pos->mode);
int len = strlen(name);
ino_t ino = kernfs_ino(pos);
@@ -1715,12 +1910,12 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
file->private_data = pos;
kernfs_get(pos);
- up_read(&kernfs_rwsem);
- if (!dir_emit(ctx, name, len, ino, type))
+ if (!dir_emit(ctx, name, len, ino, type)) {
+ up_read(&root->kernfs_rwsem);
return 0;
- down_read(&kernfs_rwsem);
+ }
}
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
file->private_data = NULL;
ctx->pos = INT_MAX;
return 0;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 60e2a86c535e..9adf36e6364b 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -18,25 +18,13 @@
#include "kernfs-internal.h"
-/*
- * There's one kernfs_open_file for each open file and one kernfs_open_node
- * for each kernfs_node with one or more open files.
- *
- * kernfs_node->attr.open points to kernfs_open_node. attr.open is
- * protected by kernfs_open_node_lock.
- *
- * filp->private_data points to seq_file whose ->private points to
- * kernfs_open_file. kernfs_open_files are chained at
- * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
- */
-static DEFINE_SPINLOCK(kernfs_open_node_lock);
-static DEFINE_MUTEX(kernfs_open_file_mutex);
-
struct kernfs_open_node {
- atomic_t refcnt;
+ struct rcu_head rcu_head;
atomic_t event;
wait_queue_head_t poll;
struct list_head files; /* goes through kernfs_open_file.list */
+ unsigned int nr_mmapped;
+ unsigned int nr_to_release;
};
/*
@@ -45,13 +33,85 @@ struct kernfs_open_node {
* pending queue is implemented as a singly linked list of kernfs_nodes.
* The list is terminated with the self pointer so that whether a
* kernfs_node is on the list or not can be determined by testing the next
- * pointer for NULL.
+ * pointer for %NULL.
*/
#define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
static DEFINE_SPINLOCK(kernfs_notify_lock);
static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
+static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)
+{
+ int idx = hash_ptr(kn, NR_KERNFS_LOCK_BITS);
+
+ return &kernfs_locks->open_file_mutex[idx];
+}
+
+static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
+{
+ struct mutex *lock;
+
+ lock = kernfs_open_file_mutex_ptr(kn);
+
+ mutex_lock(lock);
+
+ return lock;
+}
+
+/**
+ * of_on - Get the kernfs_open_node of the specified kernfs_open_file
+ * @of: target kernfs_open_file
+ *
+ * Return: the kernfs_open_node of the kernfs_open_file
+ */
+static struct kernfs_open_node *of_on(struct kernfs_open_file *of)
+{
+ return rcu_dereference_protected(of->kn->attr.open,
+ !list_empty(&of->list));
+}
+
+/* Get active reference to kernfs node for an open file */
+static struct kernfs_open_file *kernfs_get_active_of(struct kernfs_open_file *of)
+{
+ /* Skip if file was already released */
+ if (unlikely(of->released))
+ return NULL;
+
+ if (!kernfs_get_active(of->kn))
+ return NULL;
+
+ return of;
+}
+
+static void kernfs_put_active_of(struct kernfs_open_file *of)
+{
+ return kernfs_put_active(of->kn);
+}
+
+/**
+ * kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn
+ *
+ * @kn: target kernfs_node.
+ *
+ * Fetch and return ->attr.open of @kn when caller holds the
+ * kernfs_open_file_mutex_ptr(kn).
+ *
+ * Update of ->attr.open happens under kernfs_open_file_mutex_ptr(kn). So when
+ * the caller guarantees that this mutex is being held, other updaters can't
+ * change ->attr.open and this means that we can safely deref ->attr.open
+ * outside RCU read-side critical section.
+ *
+ * The caller needs to make sure that kernfs_open_file_mutex is held.
+ *
+ * Return: @kn->attr.open when kernfs_open_file_mutex is held.
+ */
+static struct kernfs_open_node *
+kernfs_deref_open_node_locked(struct kernfs_node *kn)
+{
+ return rcu_dereference_protected(kn->attr.open,
+ lockdep_is_held(kernfs_open_file_mutex_ptr(kn)));
+}
+
static struct kernfs_open_file *kernfs_of(struct file *file)
{
return ((struct seq_file *)file->private_data)->private;
@@ -97,7 +157,7 @@ static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
if (ops->seq_stop)
ops->seq_stop(sf, v);
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
}
static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
@@ -110,7 +170,7 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
- if (!kernfs_get_active(of->kn))
+ if (!kernfs_get_active_of(of))
return ERR_PTR(-ENODEV);
ops = kernfs_ops(of->kn);
@@ -120,13 +180,8 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
if (next == ERR_PTR(-ENODEV))
kernfs_seq_stop_active(sf, next);
return next;
- } else {
- /*
- * The same behavior and code as single_open(). Returns
- * !NULL if pos is at the beginning; otherwise, NULL.
- */
- return NULL + !*ppos;
}
+ return single_start(sf, ppos);
}
static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
@@ -163,7 +218,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
{
struct kernfs_open_file *of = sf->private;
- of->event = atomic_read(&of->kn->attr.open->event);
+ of->event = atomic_read(&of_on(of)->event);
return of->kn->attr.ops->seq_show(sf, v);
}
@@ -201,20 +256,21 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
- if (!kernfs_get_active(of->kn)) {
+ if (!kernfs_get_active_of(of)) {
len = -ENODEV;
mutex_unlock(&of->mutex);
goto out_free;
}
- of->event = atomic_read(&of->kn->attr.open->event);
+ of->event = atomic_read(&of_on(of)->event);
+
ops = kernfs_ops(of->kn);
if (ops->read)
len = ops->read(of, buf, len, iocb->ki_pos);
else
len = -EINVAL;
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
mutex_unlock(&of->mutex);
if (len < 0)
@@ -249,7 +305,7 @@ static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter)
* There is no easy way for us to know if userspace is only doing a partial
* write, so we don't support them. We expect the entire buffer to come on
* the first write. Hint: if you're writing a value, first read the file,
- * modify only the the value you're changing, then write entire buffer
+ * modify only the value you're changing, then write entire buffer
* back.
*/
static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
@@ -285,7 +341,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
- if (!kernfs_get_active(of->kn)) {
+ if (!kernfs_get_active_of(of)) {
mutex_unlock(&of->mutex);
len = -ENODEV;
goto out_free;
@@ -297,7 +353,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
else
len = -EINVAL;
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
mutex_unlock(&of->mutex);
if (len > 0)
@@ -319,13 +375,13 @@ static void kernfs_vma_open(struct vm_area_struct *vma)
if (!of->vm_ops)
return;
- if (!kernfs_get_active(of->kn))
+ if (!kernfs_get_active_of(of))
return;
if (of->vm_ops->open)
of->vm_ops->open(vma);
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
}
static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
@@ -337,14 +393,14 @@ static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
if (!of->vm_ops)
return VM_FAULT_SIGBUS;
- if (!kernfs_get_active(of->kn))
+ if (!kernfs_get_active_of(of))
return VM_FAULT_SIGBUS;
ret = VM_FAULT_SIGBUS;
if (of->vm_ops->fault)
ret = of->vm_ops->fault(vmf);
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
return ret;
}
@@ -357,7 +413,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf)
if (!of->vm_ops)
return VM_FAULT_SIGBUS;
- if (!kernfs_get_active(of->kn))
+ if (!kernfs_get_active_of(of))
return VM_FAULT_SIGBUS;
ret = 0;
@@ -366,7 +422,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf)
else
file_update_time(file);
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
return ret;
}
@@ -380,71 +436,22 @@ static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
if (!of->vm_ops)
return -EINVAL;
- if (!kernfs_get_active(of->kn))
+ if (!kernfs_get_active_of(of))
return -EINVAL;
ret = -EINVAL;
if (of->vm_ops->access)
ret = of->vm_ops->access(vma, addr, buf, len, write);
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
return ret;
}
-#ifdef CONFIG_NUMA
-static int kernfs_vma_set_policy(struct vm_area_struct *vma,
- struct mempolicy *new)
-{
- struct file *file = vma->vm_file;
- struct kernfs_open_file *of = kernfs_of(file);
- int ret;
-
- if (!of->vm_ops)
- return 0;
-
- if (!kernfs_get_active(of->kn))
- return -EINVAL;
-
- ret = 0;
- if (of->vm_ops->set_policy)
- ret = of->vm_ops->set_policy(vma, new);
-
- kernfs_put_active(of->kn);
- return ret;
-}
-
-static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
- unsigned long addr)
-{
- struct file *file = vma->vm_file;
- struct kernfs_open_file *of = kernfs_of(file);
- struct mempolicy *pol;
-
- if (!of->vm_ops)
- return vma->vm_policy;
-
- if (!kernfs_get_active(of->kn))
- return vma->vm_policy;
-
- pol = vma->vm_policy;
- if (of->vm_ops->get_policy)
- pol = of->vm_ops->get_policy(vma, addr);
-
- kernfs_put_active(of->kn);
- return pol;
-}
-
-#endif
-
static const struct vm_operations_struct kernfs_vm_ops = {
.open = kernfs_vma_open,
.fault = kernfs_vma_fault,
.page_mkwrite = kernfs_vma_page_mkwrite,
.access = kernfs_vma_access,
-#ifdef CONFIG_NUMA
- .set_policy = kernfs_vma_set_policy,
- .get_policy = kernfs_vma_get_policy,
-#endif
};
static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
@@ -458,7 +465,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
* warnings and we don't want to add spurious locking dependency
* between the two. Check whether mmap is actually implemented
* without grabbing @of->mutex by testing HAS_MMAP flag. See the
- * comment in kernfs_file_open() for more details.
+ * comment in kernfs_fop_open() for more details.
*/
if (!(of->kn->flags & KERNFS_HAS_MMAP))
return -ENODEV;
@@ -466,7 +473,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
mutex_lock(&of->mutex);
rc = -ENODEV;
- if (!kernfs_get_active(of->kn))
+ if (!kernfs_get_active_of(of))
goto out_unlock;
ops = kernfs_ops(of->kn);
@@ -490,16 +497,18 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
* It is not possible to successfully wrap close.
* So error if someone is trying to use close.
*/
- rc = -EINVAL;
if (vma->vm_ops && vma->vm_ops->close)
goto out_put;
rc = 0;
- of->mmapped = true;
- of->vm_ops = vma->vm_ops;
+ if (!of->mmapped) {
+ of->mmapped = true;
+ of_on(of)->nr_mmapped++;
+ of->vm_ops = vma->vm_ops;
+ }
vma->vm_ops = &kernfs_vm_ops;
out_put:
- kernfs_put_active(of->kn);
+ kernfs_put_active_of(of);
out_unlock:
mutex_unlock(&of->mutex);
@@ -514,84 +523,88 @@ out_unlock:
* If @kn->attr.open exists, increment its reference count; otherwise,
* create one. @of is chained to the files list.
*
- * LOCKING:
+ * Locking:
* Kernel thread context (may sleep).
*
- * RETURNS:
- * 0 on success, -errno on failure.
+ * Return:
+ * %0 on success, -errno on failure.
*/
static int kernfs_get_open_node(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
- struct kernfs_open_node *on, *new_on = NULL;
-
- retry:
- mutex_lock(&kernfs_open_file_mutex);
- spin_lock_irq(&kernfs_open_node_lock);
+ struct kernfs_open_node *on;
+ struct mutex *mutex;
- if (!kn->attr.open && new_on) {
- kn->attr.open = new_on;
- new_on = NULL;
- }
+ mutex = kernfs_open_file_mutex_lock(kn);
+ on = kernfs_deref_open_node_locked(kn);
- on = kn->attr.open;
- if (on) {
- atomic_inc(&on->refcnt);
- list_add_tail(&of->list, &on->files);
+ if (!on) {
+ /* not there, initialize a new one */
+ on = kzalloc(sizeof(*on), GFP_KERNEL);
+ if (!on) {
+ mutex_unlock(mutex);
+ return -ENOMEM;
+ }
+ atomic_set(&on->event, 1);
+ init_waitqueue_head(&on->poll);
+ INIT_LIST_HEAD(&on->files);
+ rcu_assign_pointer(kn->attr.open, on);
}
- spin_unlock_irq(&kernfs_open_node_lock);
- mutex_unlock(&kernfs_open_file_mutex);
+ list_add_tail(&of->list, &on->files);
+ if (kn->flags & KERNFS_HAS_RELEASE)
+ on->nr_to_release++;
- if (on) {
- kfree(new_on);
- return 0;
- }
-
- /* not there, initialize a new one and retry */
- new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
- if (!new_on)
- return -ENOMEM;
-
- atomic_set(&new_on->refcnt, 0);
- atomic_set(&new_on->event, 1);
- init_waitqueue_head(&new_on->poll);
- INIT_LIST_HEAD(&new_on->files);
- goto retry;
+ mutex_unlock(mutex);
+ return 0;
}
/**
- * kernfs_put_open_node - put kernfs_open_node
- * @kn: target kernfs_nodet
+ * kernfs_unlink_open_file - Unlink @of from @kn.
+ *
+ * @kn: target kernfs_node
* @of: associated kernfs_open_file
+ * @open_failed: ->open() failed, cancel ->release()
*
- * Put @kn->attr.open and unlink @of from the files list. If
- * reference count reaches zero, disassociate and free it.
+ * Unlink @of from list of @kn's associated open files. If list of
+ * associated open files becomes empty, disassociate and free
+ * kernfs_open_node.
*
* LOCKING:
* None.
*/
-static void kernfs_put_open_node(struct kernfs_node *kn,
- struct kernfs_open_file *of)
+static void kernfs_unlink_open_file(struct kernfs_node *kn,
+ struct kernfs_open_file *of,
+ bool open_failed)
{
- struct kernfs_open_node *on = kn->attr.open;
- unsigned long flags;
+ struct kernfs_open_node *on;
+ struct mutex *mutex;
- mutex_lock(&kernfs_open_file_mutex);
- spin_lock_irqsave(&kernfs_open_node_lock, flags);
+ mutex = kernfs_open_file_mutex_lock(kn);
- if (of)
- list_del(&of->list);
+ on = kernfs_deref_open_node_locked(kn);
+ if (!on) {
+ mutex_unlock(mutex);
+ return;
+ }
- if (atomic_dec_and_test(&on->refcnt))
- kn->attr.open = NULL;
- else
- on = NULL;
+ if (of) {
+ if (kn->flags & KERNFS_HAS_RELEASE) {
+ WARN_ON_ONCE(of->released == open_failed);
+ if (open_failed)
+ on->nr_to_release--;
+ }
+ if (of->mmapped)
+ on->nr_mmapped--;
+ list_del(&of->list);
+ }
- spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
- mutex_unlock(&kernfs_open_file_mutex);
+ if (list_empty(&on->files)) {
+ rcu_assign_pointer(kn->attr.open, NULL);
+ kfree_rcu(on, rcu_head);
+ }
- kfree(on);
+ mutex_unlock(mutex);
}
static int kernfs_fop_open(struct inode *inode, struct file *file)
@@ -641,11 +654,18 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
* each file a separate locking class. Let's differentiate on
* whether the file has mmap or not for now.
*
- * Both paths of the branch look the same. They're supposed to
+ * For similar reasons, writable and readonly files are given different
+ * lockdep key, because the writable file /sys/power/resume may call vfs
+ * lookup helpers for arbitrary paths and readonly files can be read by
+ * overlayfs from vfs helpers when sysfs is a lower layer of overalyfs.
+ *
+ * All three cases look the same. They're supposed to
* look that way and give @of->mutex different static lockdep keys.
*/
if (has_mmap)
mutex_init(&of->mutex);
+ else if (file->f_mode & FMODE_WRITE)
+ mutex_init(&of->mutex);
else
mutex_init(&of->mutex);
@@ -711,7 +731,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
return 0;
err_put_node:
- kernfs_put_open_node(kn, of);
+ kernfs_unlink_open_file(kn, of, true);
err_seq_release:
seq_release(inode, file);
err_free:
@@ -729,11 +749,11 @@ static void kernfs_release_file(struct kernfs_node *kn,
/*
* @of is guaranteed to have no other file operations in flight and
* we just want to synchronize release and drain paths.
- * @kernfs_open_file_mutex is enough. @of->mutex can't be used
+ * @kernfs_open_file_mutex_ptr(kn) is enough. @of->mutex can't be used
* here because drain path may be called from places which can
* cause circular dependency.
*/
- lockdep_assert_held(&kernfs_open_file_mutex);
+ lockdep_assert_held(kernfs_open_file_mutex_ptr(kn));
if (!of->released) {
/*
@@ -743,6 +763,7 @@ static void kernfs_release_file(struct kernfs_node *kn,
*/
kn->attr.ops->release(of);
of->released = true;
+ of_on(of)->nr_to_release--;
}
}
@@ -752,12 +773,14 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
struct kernfs_open_file *of = kernfs_of(filp);
if (kn->flags & KERNFS_HAS_RELEASE) {
- mutex_lock(&kernfs_open_file_mutex);
+ struct mutex *mutex;
+
+ mutex = kernfs_open_file_mutex_lock(kn);
kernfs_release_file(kn, of);
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
}
- kernfs_put_open_node(kn, of);
+ kernfs_unlink_open_file(kn, of, false);
seq_release(inode, filp);
kfree(of->prealloc_buf);
kfree(of);
@@ -765,37 +788,54 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
return 0;
}
+bool kernfs_should_drain_open_files(struct kernfs_node *kn)
+{
+ struct kernfs_open_node *on;
+ bool ret;
+
+ /*
+ * @kn being deactivated guarantees that @kn->attr.open can't change
+ * beneath us making the lockless test below safe.
+ * Callers post kernfs_unbreak_active_protection may be counted in
+ * kn->active by now, do not WARN_ON because of them.
+ */
+
+ rcu_read_lock();
+ on = rcu_dereference(kn->attr.open);
+ ret = on && (on->nr_mmapped || on->nr_to_release);
+ rcu_read_unlock();
+
+ return ret;
+}
+
void kernfs_drain_open_files(struct kernfs_node *kn)
{
struct kernfs_open_node *on;
struct kernfs_open_file *of;
+ struct mutex *mutex;
- if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE)))
+ mutex = kernfs_open_file_mutex_lock(kn);
+ on = kernfs_deref_open_node_locked(kn);
+ if (!on) {
+ mutex_unlock(mutex);
return;
-
- spin_lock_irq(&kernfs_open_node_lock);
- on = kn->attr.open;
- if (on)
- atomic_inc(&on->refcnt);
- spin_unlock_irq(&kernfs_open_node_lock);
- if (!on)
- return;
-
- mutex_lock(&kernfs_open_file_mutex);
+ }
list_for_each_entry(of, &on->files, list) {
struct inode *inode = file_inode(of->file);
- if (kn->flags & KERNFS_HAS_MMAP)
+ if (of->mmapped) {
unmap_mapping_range(inode->i_mapping, 0, 0, 1);
+ of->mmapped = false;
+ on->nr_mmapped--;
+ }
if (kn->flags & KERNFS_HAS_RELEASE)
kernfs_release_file(kn, of);
}
- mutex_unlock(&kernfs_open_file_mutex);
-
- kernfs_put_open_node(kn, NULL);
+ WARN_ON_ONCE(on->nr_mmapped || on->nr_to_release);
+ mutex_unlock(mutex);
}
/*
@@ -814,8 +854,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
*/
__poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait)
{
- struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry);
- struct kernfs_open_node *on = kn->attr.open;
+ struct kernfs_open_node *on = of_on(of);
poll_wait(of->file, &on->poll, wait);
@@ -831,7 +870,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
__poll_t ret;
- if (!kernfs_get_active(kn))
+ if (!kernfs_get_active_of(of))
return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
if (kn->attr.ops->poll)
@@ -839,7 +878,34 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
else
ret = kernfs_generic_poll(of, wait);
- kernfs_put_active(kn);
+ kernfs_put_active_of(of);
+ return ret;
+}
+
+static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct kernfs_open_file *of = kernfs_of(file);
+ const struct kernfs_ops *ops;
+ loff_t ret;
+
+ /*
+ * @of->mutex nests outside active ref and is primarily to ensure that
+ * the ops aren't called concurrently for the same open file.
+ */
+ mutex_lock(&of->mutex);
+ if (!kernfs_get_active_of(of)) {
+ mutex_unlock(&of->mutex);
+ return -ENODEV;
+ }
+
+ ops = kernfs_ops(of->kn);
+ if (ops->llseek)
+ ret = ops->llseek(of, offset, whence);
+ else
+ ret = generic_file_llseek(file, offset, whence);
+
+ kernfs_put_active_of(of);
+ mutex_unlock(&of->mutex);
return ret;
}
@@ -847,6 +913,7 @@ static void kernfs_notify_workfn(struct work_struct *work)
{
struct kernfs_node *kn;
struct kernfs_super_info *info;
+ struct kernfs_root *root;
repeat:
/* pop one off the notify_list */
spin_lock_irq(&kernfs_notify_lock);
@@ -859,12 +926,15 @@ repeat:
kn->attr.notify_next = NULL;
spin_unlock_irq(&kernfs_notify_lock);
+ root = kernfs_root(kn);
/* kick fsnotify */
- down_write(&kernfs_rwsem);
+ down_read(&root->kernfs_supers_rwsem);
+ down_read(&root->kernfs_rwsem);
list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
struct kernfs_node *parent;
struct inode *p_inode = NULL;
+ const char *kn_name;
struct inode *inode;
struct qstr name;
@@ -878,7 +948,8 @@ repeat:
if (!inode)
continue;
- name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name));
+ kn_name = kernfs_rcu_name(kn);
+ name = QSTR(kn_name);
parent = kernfs_get_parent(kn);
if (parent) {
p_inode = ilookup(info->sb, kernfs_ino(parent));
@@ -898,7 +969,8 @@ repeat:
iput(inode);
}
- up_write(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
+ up_read(&root->kernfs_supers_rwsem);
kernfs_put(kn);
goto repeat;
}
@@ -920,13 +992,13 @@ void kernfs_notify(struct kernfs_node *kn)
return;
/* kick poll immediately */
- spin_lock_irqsave(&kernfs_open_node_lock, flags);
- on = kn->attr.open;
+ rcu_read_lock();
+ on = rcu_dereference(kn->attr.open);
if (on) {
atomic_inc(&on->event);
wake_up_interruptible(&on->poll);
}
- spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
+ rcu_read_unlock();
/* schedule work to kick fsnotify */
spin_lock_irqsave(&kernfs_notify_lock, flags);
@@ -943,13 +1015,13 @@ EXPORT_SYMBOL_GPL(kernfs_notify);
const struct file_operations kernfs_file_fops = {
.read_iter = kernfs_fop_read_iter,
.write_iter = kernfs_fop_write_iter,
- .llseek = generic_file_llseek,
+ .llseek = kernfs_fop_llseek,
.mmap = kernfs_fop_mmap,
.open = kernfs_fop_open,
.release = kernfs_fop_release,
.poll = kernfs_fop_poll,
.fsync = noop_fsync,
- .splice_read = generic_file_splice_read,
+ .splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
};
@@ -966,7 +1038,7 @@ const struct file_operations kernfs_file_fops = {
* @ns: optional namespace tag of the file
* @key: lockdep key for the file's active_ref, %NULL to disable lockdep
*
- * Returns the created node on success, ERR_PTR() value on error.
+ * Return: the created node on success, ERR_PTR() value on error.
*/
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
const char *name,
@@ -1000,7 +1072,7 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
#endif
/*
- * kn->attr.ops is accesible only while holding active ref. We
+ * kn->attr.ops is accessible only while holding active ref. We
* need to know whether some ops are implemented outside active
* ref. Cache their existence in flags.
*/
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index c0eae1725435..a36aaee98dce 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -24,45 +24,46 @@ static const struct inode_operations kernfs_iops = {
.listxattr = kernfs_iop_listxattr,
};
-static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, int alloc)
+static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, bool alloc)
{
- static DEFINE_MUTEX(iattr_mutex);
- struct kernfs_iattrs *ret;
+ struct kernfs_iattrs *ret __free(kfree) = NULL;
+ struct kernfs_iattrs *attr;
- mutex_lock(&iattr_mutex);
+ attr = READ_ONCE(kn->iattr);
+ if (attr || !alloc)
+ return attr;
- if (kn->iattr || !alloc)
- goto out_unlock;
-
- kn->iattr = kmem_cache_zalloc(kernfs_iattrs_cache, GFP_KERNEL);
- if (!kn->iattr)
- goto out_unlock;
+ ret = kmem_cache_zalloc(kernfs_iattrs_cache, GFP_KERNEL);
+ if (!ret)
+ return NULL;
/* assign default attributes */
- kn->iattr->ia_uid = GLOBAL_ROOT_UID;
- kn->iattr->ia_gid = GLOBAL_ROOT_GID;
-
- ktime_get_real_ts64(&kn->iattr->ia_atime);
- kn->iattr->ia_mtime = kn->iattr->ia_atime;
- kn->iattr->ia_ctime = kn->iattr->ia_atime;
-
- simple_xattrs_init(&kn->iattr->xattrs);
- atomic_set(&kn->iattr->nr_user_xattrs, 0);
- atomic_set(&kn->iattr->user_xattr_size, 0);
-out_unlock:
- ret = kn->iattr;
- mutex_unlock(&iattr_mutex);
- return ret;
+ ret->ia_uid = GLOBAL_ROOT_UID;
+ ret->ia_gid = GLOBAL_ROOT_GID;
+
+ ktime_get_real_ts64(&ret->ia_atime);
+ ret->ia_mtime = ret->ia_atime;
+ ret->ia_ctime = ret->ia_atime;
+
+ simple_xattrs_init(&ret->xattrs);
+ atomic_set(&ret->nr_user_xattrs, 0);
+ atomic_set(&ret->user_xattr_size, 0);
+
+ /* If someone raced us, recognize it. */
+ if (!try_cmpxchg(&kn->iattr, &attr, ret))
+ return READ_ONCE(kn->iattr);
+
+ return no_free_ptr(ret);
}
static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn)
{
- return __kernfs_iattrs(kn, 1);
+ return __kernfs_iattrs(kn, true);
}
static struct kernfs_iattrs *kernfs_iattrs_noalloc(struct kernfs_node *kn)
{
- return __kernfs_iattrs(kn, 0);
+ return __kernfs_iattrs(kn, false);
}
int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
@@ -94,30 +95,33 @@ int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
* @kn: target node
* @iattr: iattr to set
*
- * Returns 0 on success, -errno on failure.
+ * Return: %0 on success, -errno on failure.
*/
int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
{
int ret;
+ struct kernfs_root *root = kernfs_root(kn);
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_iattr_rwsem);
ret = __kernfs_setattr(kn, iattr);
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_iattr_rwsem);
return ret;
}
-int kernfs_iop_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int kernfs_iop_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
struct kernfs_node *kn = inode->i_private;
+ struct kernfs_root *root;
int error;
if (!kn)
return -EINVAL;
- down_write(&kernfs_rwsem);
- error = setattr_prepare(&init_user_ns, dentry, iattr);
+ root = kernfs_root(kn);
+ down_write(&root->kernfs_iattr_rwsem);
+ error = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
if (error)
goto out;
@@ -126,10 +130,10 @@ int kernfs_iop_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
goto out;
/* this ignores size changes */
- setattr_copy(&init_user_ns, inode, iattr);
+ setattr_copy(&nop_mnt_idmap, inode, iattr);
out:
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_iattr_rwsem);
return error;
}
@@ -148,8 +152,7 @@ ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size)
static inline void set_default_inode_attr(struct inode *inode, umode_t mode)
{
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime =
- inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
}
static inline void set_inode_attr(struct inode *inode,
@@ -157,16 +160,17 @@ static inline void set_inode_attr(struct inode *inode,
{
inode->i_uid = attrs->ia_uid;
inode->i_gid = attrs->ia_gid;
- inode->i_atime = attrs->ia_atime;
- inode->i_mtime = attrs->ia_mtime;
- inode->i_ctime = attrs->ia_ctime;
+ inode_set_atime_to_ts(inode, attrs->ia_atime);
+ inode_set_mtime_to_ts(inode, attrs->ia_mtime);
+ inode_set_ctime_to_ts(inode, attrs->ia_ctime);
}
static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
{
- struct kernfs_iattrs *attrs = kn->iattr;
+ struct kernfs_iattrs *attrs;
inode->i_mode = kn->mode;
+ attrs = kernfs_iattrs_noalloc(kn);
if (attrs)
/*
* kernfs_node has non-default attributes get them from
@@ -178,19 +182,18 @@ static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
set_nlink(inode, kn->dir.subdirs + 2);
}
-int kernfs_iop_getattr(struct user_namespace *mnt_userns,
+int kernfs_iop_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct kernfs_node *kn = inode->i_private;
+ struct kernfs_root *root = kernfs_root(kn);
- down_read(&kernfs_rwsem);
- spin_lock(&inode->i_lock);
+ down_read(&root->kernfs_iattr_rwsem);
kernfs_refresh_inode(kn, inode);
- generic_fillattr(&init_user_ns, inode, stat);
- spin_unlock(&inode->i_lock);
- up_read(&kernfs_rwsem);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
+ up_read(&root->kernfs_iattr_rwsem);
return 0;
}
@@ -237,18 +240,18 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
* allocated and basics are initialized. New inode is returned
* locked.
*
- * LOCKING:
+ * Locking:
* Kernel thread context (may sleep).
*
- * RETURNS:
- * Pointer to allocated inode on success, NULL on failure.
+ * Return:
+ * Pointer to allocated inode on success, %NULL on failure.
*/
struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
{
struct inode *inode;
inode = iget_locked(sb, kernfs_ino(kn));
- if (inode && (inode->i_state & I_NEW))
+ if (inode && (inode_state_read_once(inode) & I_NEW))
kernfs_init_inode(kn, inode);
return inode;
@@ -270,23 +273,23 @@ void kernfs_evict_inode(struct inode *inode)
kernfs_put(kn);
}
-int kernfs_iop_permission(struct user_namespace *mnt_userns,
+int kernfs_iop_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
struct kernfs_node *kn;
+ struct kernfs_root *root;
int ret;
if (mask & MAY_NOT_BLOCK)
return -ECHILD;
kn = inode->i_private;
+ root = kernfs_root(kn);
- down_read(&kernfs_rwsem);
- spin_lock(&inode->i_lock);
+ down_read(&root->kernfs_iattr_rwsem);
kernfs_refresh_inode(kn, inode);
- ret = generic_permission(&init_user_ns, inode, mask);
- spin_unlock(&inode->i_lock);
- up_read(&kernfs_rwsem);
+ ret = generic_permission(&nop_mnt_idmap, inode, mask);
+ up_read(&root->kernfs_iattr_rwsem);
return ret;
}
@@ -304,11 +307,19 @@ int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
const void *value, size_t size, int flags)
{
- struct kernfs_iattrs *attrs = kernfs_iattrs(kn);
+ struct simple_xattr *old_xattr;
+ struct kernfs_iattrs *attrs;
+
+ attrs = kernfs_iattrs(kn);
if (!attrs)
return -ENOMEM;
- return simple_xattr_set(&attrs->xattrs, name, value, size, flags, NULL);
+ old_xattr = simple_xattr_set(&attrs->xattrs, name, value, size, flags);
+ if (IS_ERR(old_xattr))
+ return PTR_ERR(old_xattr);
+
+ simple_xattr_free(old_xattr);
+ return 0;
}
static int kernfs_vfs_xattr_get(const struct xattr_handler *handler,
@@ -322,7 +333,7 @@ static int kernfs_vfs_xattr_get(const struct xattr_handler *handler,
}
static int kernfs_vfs_xattr_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *suffix, const void *value,
size_t size, int flags)
@@ -338,9 +349,10 @@ static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn,
struct simple_xattrs *xattrs,
const void *value, size_t size, int flags)
{
- atomic_t *sz = &kn->iattr->user_xattr_size;
- atomic_t *nr = &kn->iattr->nr_user_xattrs;
- ssize_t removed_size;
+ struct kernfs_iattrs *attr = kernfs_iattrs_noalloc(kn);
+ atomic_t *sz = &attr->user_xattr_size;
+ atomic_t *nr = &attr->nr_user_xattrs;
+ struct simple_xattr *old_xattr;
int ret;
if (atomic_inc_return(nr) > KERNFS_MAX_USER_XATTRS) {
@@ -353,13 +365,18 @@ static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn,
goto dec_size_out;
}
- ret = simple_xattr_set(xattrs, full_name, value, size, flags,
- &removed_size);
-
- if (!ret && removed_size >= 0)
- size = removed_size;
- else if (!ret)
+ old_xattr = simple_xattr_set(xattrs, full_name, value, size, flags);
+ if (!old_xattr)
return 0;
+
+ if (IS_ERR(old_xattr)) {
+ ret = PTR_ERR(old_xattr);
+ goto dec_size_out;
+ }
+
+ ret = 0;
+ size = old_xattr->size;
+ simple_xattr_free(old_xattr);
dec_size_out:
atomic_sub(size, sz);
dec_count_out:
@@ -372,24 +389,26 @@ static int kernfs_vfs_user_xattr_rm(struct kernfs_node *kn,
struct simple_xattrs *xattrs,
const void *value, size_t size, int flags)
{
- atomic_t *sz = &kn->iattr->user_xattr_size;
- atomic_t *nr = &kn->iattr->nr_user_xattrs;
- ssize_t removed_size;
- int ret;
+ struct kernfs_iattrs *attr = kernfs_iattrs_noalloc(kn);
+ atomic_t *sz = &attr->user_xattr_size;
+ atomic_t *nr = &attr->nr_user_xattrs;
+ struct simple_xattr *old_xattr;
- ret = simple_xattr_set(xattrs, full_name, value, size, flags,
- &removed_size);
+ old_xattr = simple_xattr_set(xattrs, full_name, value, size, flags);
+ if (!old_xattr)
+ return 0;
- if (removed_size >= 0) {
- atomic_sub(removed_size, sz);
- atomic_dec(nr);
- }
+ if (IS_ERR(old_xattr))
+ return PTR_ERR(old_xattr);
- return ret;
+ atomic_sub(old_xattr->size, sz);
+ atomic_dec(nr);
+ simple_xattr_free(old_xattr);
+ return 0;
}
static int kernfs_vfs_user_xattr_set(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *suffix, const void *value,
size_t size, int flags)
@@ -432,7 +451,7 @@ static const struct xattr_handler kernfs_user_xattr_handler = {
.set = kernfs_vfs_user_xattr_set,
};
-const struct xattr_handler *kernfs_xattr_handlers[] = {
+const struct xattr_handler * const kernfs_xattr_handlers[] = {
&kernfs_trusted_xattr_handler,
&kernfs_security_xattr_handler,
&kernfs_user_xattr_handler,
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index f9cc912c31e1..6061b6f70d2a 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -31,6 +31,32 @@ struct kernfs_iattrs {
atomic_t user_xattr_size;
};
+struct kernfs_root {
+ /* published fields */
+ struct kernfs_node *kn;
+ unsigned int flags; /* KERNFS_ROOT_* flags */
+
+ /* private fields, do not use outside kernfs proper */
+ struct idr ino_idr;
+ spinlock_t kernfs_idr_lock; /* root->ino_idr */
+ u32 last_id_lowbits;
+ u32 id_highbits;
+ struct kernfs_syscall_ops *syscall_ops;
+
+ /* list of kernfs_super_info of this root, protected by kernfs_rwsem */
+ struct list_head supers;
+
+ wait_queue_head_t deactivate_waitq;
+ struct rw_semaphore kernfs_rwsem;
+ struct rw_semaphore kernfs_iattr_rwsem;
+ struct rw_semaphore kernfs_supers_rwsem;
+
+ /* kn->parent and kn->name */
+ rwlock_t kernfs_rename_lock;
+
+ struct rcu_head rcu;
+};
+
/* +1 to avoid triggering overflow warning when negating it */
#define KN_DEACTIVATED_BIAS (INT_MIN + 1)
@@ -40,13 +66,16 @@ struct kernfs_iattrs {
* kernfs_root - find out the kernfs_root a kernfs_node belongs to
* @kn: kernfs_node of interest
*
- * Return the kernfs_root @kn belongs to.
+ * Return: the kernfs_root @kn belongs to.
*/
-static inline struct kernfs_root *kernfs_root(struct kernfs_node *kn)
+static inline struct kernfs_root *kernfs_root(const struct kernfs_node *kn)
{
+ const struct kernfs_node *knp;
/* if parent exists, it's always a dir; otherwise, @sd is a dir */
- if (kn->parent)
- kn = kn->parent;
+ guard(rcu)();
+ knp = rcu_dereference(kn->__parent);
+ if (knp)
+ kn = knp;
return kn->dir.root;
}
@@ -75,6 +104,38 @@ struct kernfs_super_info {
};
#define kernfs_info(SB) ((struct kernfs_super_info *)(SB->s_fs_info))
+static inline bool kernfs_root_is_locked(const struct kernfs_node *kn)
+{
+ return lockdep_is_held(&kernfs_root(kn)->kernfs_rwsem);
+}
+
+static inline bool kernfs_rename_is_locked(const struct kernfs_node *kn)
+{
+ return lockdep_is_held(&kernfs_root(kn)->kernfs_rename_lock);
+}
+
+static inline const char *kernfs_rcu_name(const struct kernfs_node *kn)
+{
+ return rcu_dereference_check(kn->name, kernfs_root_is_locked(kn));
+}
+
+static inline struct kernfs_node *kernfs_parent(const struct kernfs_node *kn)
+{
+ /*
+ * The kernfs_node::__parent remains valid within a RCU section. The kn
+ * can be reparented (and renamed) which changes the entry. This can be
+ * avoided by locking kernfs_root::kernfs_rwsem or
+ * kernfs_root::kernfs_rename_lock.
+ * Both locks can be used to obtain a reference on __parent. Once the
+ * reference count reaches 0 then the node is about to be freed
+ * and can not be renamed (or become a different parent) anymore.
+ */
+ return rcu_dereference_check(kn->__parent,
+ kernfs_root_is_locked(kn) ||
+ kernfs_rename_is_locked(kn) ||
+ !atomic_read(&kn->count));
+}
+
static inline struct kernfs_node *kernfs_dentry_node(struct dentry *dentry)
{
if (d_really_is_negative(dentry))
@@ -107,13 +168,13 @@ extern struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache;
/*
* inode.c
*/
-extern const struct xattr_handler *kernfs_xattr_handlers[];
+extern const struct xattr_handler * const kernfs_xattr_handlers[];
void kernfs_evict_inode(struct inode *inode);
-int kernfs_iop_permission(struct user_namespace *mnt_userns,
+int kernfs_iop_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask);
-int kernfs_iop_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int kernfs_iop_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *iattr);
-int kernfs_iop_getattr(struct user_namespace *mnt_userns,
+int kernfs_iop_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags);
ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size);
@@ -122,7 +183,6 @@ int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
/*
* dir.c
*/
-extern struct rw_semaphore kernfs_rwsem;
extern const struct dentry_operations kernfs_dops;
extern const struct file_operations kernfs_dir_fops;
extern const struct inode_operations kernfs_dir_iops;
@@ -140,6 +200,7 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
*/
extern const struct file_operations kernfs_file_fops;
+bool kernfs_should_drain_open_files(struct kernfs_node *kn);
void kernfs_drain_open_files(struct kernfs_node *kn);
/*
@@ -147,4 +208,8 @@ void kernfs_drain_open_files(struct kernfs_node *kn);
*/
extern const struct inode_operations kernfs_symlink_iops;
+/*
+ * kernfs locks
+ */
+extern struct kernfs_global_locks *kernfs_locks;
#endif /* __KERNFS_INTERNAL_H */
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index f2f909d09f52..3ac52e141766 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -16,10 +16,14 @@
#include <linux/namei.h>
#include <linux/seq_file.h>
#include <linux/exportfs.h>
+#include <linux/uuid.h>
+#include <linux/statfs.h>
#include "kernfs-internal.h"
-struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache;
+struct kmem_cache *kernfs_node_cache __ro_after_init;
+struct kmem_cache *kernfs_iattrs_cache __ro_after_init;
+struct kernfs_global_locks *kernfs_locks __ro_after_init;
static int kernfs_sop_show_options(struct seq_file *sf, struct dentry *dentry)
{
@@ -44,13 +48,35 @@ static int kernfs_sop_show_path(struct seq_file *sf, struct dentry *dentry)
return 0;
}
+static int kernfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ simple_statfs(dentry, buf);
+ buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
+ return 0;
+}
+
const struct super_operations kernfs_sops = {
- .statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
+ .statfs = kernfs_statfs,
+ .drop_inode = inode_just_drop,
.evict_inode = kernfs_evict_inode,
.show_options = kernfs_sop_show_options,
.show_path = kernfs_sop_show_path,
+
+ /*
+ * sysfs is built on top of kernfs and sysfs provides the power
+ * management infrastructure to support suspend/hibernate by
+ * writing to various files in /sys/power/. As filesystems may
+ * be automatically frozen during suspend/hibernate implementing
+ * freeze/thaw support for kernfs generically will cause
+ * deadlocks as the suspending/hibernation initiating task will
+ * hold a VFS lock that it will then wait upon to be released.
+ * If freeze/thaw for kernfs is needed talk to the VFS.
+ */
+ .freeze_fs = NULL,
+ .unfreeze_fs = NULL,
+ .freeze_super = NULL,
+ .thaw_super = NULL,
};
static int kernfs_encode_fh(struct inode *inode, __u32 *fh, int *max_len,
@@ -114,9 +140,6 @@ static struct dentry *__kernfs_fh_to_dentry(struct super_block *sb,
inode = kernfs_get_inode(sb, kn);
kernfs_put(kn);
- if (!inode)
- return ERR_PTR(-ESTALE);
-
return d_obtain_alias(inode);
}
@@ -137,8 +160,10 @@ static struct dentry *kernfs_fh_to_parent(struct super_block *sb,
static struct dentry *kernfs_get_parent_dentry(struct dentry *child)
{
struct kernfs_node *kn = kernfs_dentry_node(child);
+ struct kernfs_root *root = kernfs_root(kn);
- return d_obtain_alias(kernfs_get_inode(child->d_sb, kn->parent));
+ guard(rwsem_read)(&root->kernfs_rwsem);
+ return d_obtain_alias(kernfs_get_inode(child->d_sb, kernfs_parent(kn)));
}
static const struct export_operations kernfs_export_ops = {
@@ -152,7 +177,7 @@ static const struct export_operations kernfs_export_ops = {
* kernfs_root_from_sb - determine kernfs_root associated with a super_block
* @sb: the super_block in question
*
- * Return the kernfs_root associated with @sb. If @sb is not a kernfs one,
+ * Return: the kernfs_root associated with @sb. If @sb is not a kernfs one,
* %NULL is returned.
*/
struct kernfs_root *kernfs_root_from_sb(struct super_block *sb)
@@ -166,7 +191,7 @@ struct kernfs_root *kernfs_root_from_sb(struct super_block *sb)
* find the next ancestor in the path down to @child, where @parent was the
* ancestor whose descendant we want to find.
*
- * Say the path is /a/b/c/d. @child is d, @parent is NULL. We return the root
+ * Say the path is /a/b/c/d. @child is d, @parent is %NULL. We return the root
* node. If @parent is b, then we return the node for c.
* Passing in d as @parent is not ok.
*/
@@ -178,10 +203,10 @@ static struct kernfs_node *find_next_ancestor(struct kernfs_node *child,
return NULL;
}
- while (child->parent != parent) {
- if (!child->parent)
+ while (kernfs_parent(child) != parent) {
+ child = kernfs_parent(child);
+ if (!child)
return NULL;
- child = child->parent;
}
return child;
@@ -191,22 +216,35 @@ static struct kernfs_node *find_next_ancestor(struct kernfs_node *child,
* kernfs_node_dentry - get a dentry for the given kernfs_node
* @kn: kernfs_node for which a dentry is needed
* @sb: the kernfs super_block
+ *
+ * Return: the dentry pointer
*/
struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
struct super_block *sb)
{
struct dentry *dentry;
- struct kernfs_node *knparent = NULL;
+ struct kernfs_node *knparent;
+ struct kernfs_root *root;
BUG_ON(sb->s_op != &kernfs_sops);
dentry = dget(sb->s_root);
/* Check if this is the root kernfs_node */
- if (!kn->parent)
+ if (!rcu_access_pointer(kn->__parent))
return dentry;
- knparent = find_next_ancestor(kn, NULL);
+ root = kernfs_root(kn);
+ /*
+ * As long as kn is valid, its parent can not vanish. This is cgroup's
+ * kn so it can't have its parent replaced. Therefore it is safe to use
+ * the ancestor node outside of the RCU or locked section.
+ */
+ if (WARN_ON_ONCE(!(root->flags & KERNFS_ROOT_INVARIANT_PARENT)))
+ return ERR_PTR(-EINVAL);
+ scoped_guard(rcu) {
+ knparent = find_next_ancestor(kn, NULL);
+ }
if (WARN_ON(!knparent)) {
dput(dentry);
return ERR_PTR(-EINVAL);
@@ -215,17 +253,26 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
do {
struct dentry *dtmp;
struct kernfs_node *kntmp;
+ const char *name;
if (kn == knparent)
return dentry;
- kntmp = find_next_ancestor(kn, knparent);
- if (WARN_ON(!kntmp)) {
+
+ scoped_guard(rwsem_read, &root->kernfs_rwsem) {
+ kntmp = find_next_ancestor(kn, knparent);
+ if (WARN_ON(!kntmp)) {
+ dput(dentry);
+ return ERR_PTR(-EINVAL);
+ }
+ name = kstrdup(kernfs_rcu_name(kntmp), GFP_KERNEL);
+ }
+ if (!name) {
dput(dentry);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOMEM);
}
- dtmp = lookup_positive_unlocked(kntmp->name, dentry,
- strlen(kntmp->name));
+ dtmp = lookup_noperm_positive_unlocked(&QSTR(name), dentry);
dput(dentry);
+ kfree(name);
if (IS_ERR(dtmp))
return dtmp;
knparent = kntmp;
@@ -236,6 +283,7 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *kfc)
{
struct kernfs_super_info *info = kernfs_info(sb);
+ struct kernfs_root *kf_root = kfc->root;
struct inode *inode;
struct dentry *root;
@@ -250,14 +298,15 @@ static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *k
if (info->root->flags & KERNFS_ROOT_SUPPORT_EXPORTOP)
sb->s_export_op = &kernfs_export_ops;
sb->s_time_gran = 1;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
/* sysfs dentries and inodes don't require IO to create */
- sb->s_shrink.seeks = 0;
+ sb->s_shrink->seeks = 0;
/* get root inode, initialize and unlock it */
- down_read(&kernfs_rwsem);
+ down_read(&kf_root->kernfs_rwsem);
inode = kernfs_get_inode(sb, info->root->kn);
- up_read(&kernfs_rwsem);
+ up_read(&kf_root->kernfs_rwsem);
if (!inode) {
pr_debug("kernfs: could not get root inode\n");
return -ENOMEM;
@@ -270,7 +319,7 @@ static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *k
return -ENOMEM;
}
sb->s_root = root;
- sb->s_d_op = &kernfs_dops;
+ set_default_d_op(sb, &kernfs_dops);
return 0;
}
@@ -294,7 +343,7 @@ static int kernfs_set_super(struct super_block *sb, struct fs_context *fc)
* kernfs_super_ns - determine the namespace tag of a kernfs super_block
* @sb: super_block of interest
*
- * Return the namespace tag associated with kernfs super_block @sb.
+ * Return: the namespace tag associated with kernfs super_block @sb.
*/
const void *kernfs_super_ns(struct super_block *sb)
{
@@ -311,6 +360,8 @@ const void *kernfs_super_ns(struct super_block *sb)
* implementation, which should set the specified ->@fs_type and ->@flags, and
* specify the hierarchy and namespace tag to mount via ->@root and ->@ns,
* respectively.
+ *
+ * Return: %0 on success, -errno on failure.
*/
int kernfs_get_tree(struct fs_context *fc)
{
@@ -334,6 +385,7 @@ int kernfs_get_tree(struct fs_context *fc)
if (!sb->s_root) {
struct kernfs_super_info *info = kernfs_info(sb);
+ struct kernfs_root *root = kfc->root;
kfc->new_sb_created = true;
@@ -344,9 +396,13 @@ int kernfs_get_tree(struct fs_context *fc)
}
sb->s_flags |= SB_ACTIVE;
- down_write(&kernfs_rwsem);
+ uuid_t uuid;
+ uuid_gen(&uuid);
+ super_set_uuid(sb, uuid.b, sizeof(uuid));
+
+ down_write(&root->kernfs_supers_rwsem);
list_add(&info->node, &info->root->supers);
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_supers_rwsem);
}
fc->root = dget(sb->s_root);
@@ -371,10 +427,11 @@ void kernfs_free_fs_context(struct fs_context *fc)
void kernfs_kill_sb(struct super_block *sb)
{
struct kernfs_super_info *info = kernfs_info(sb);
+ struct kernfs_root *root = info->root;
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_supers_rwsem);
list_del(&info->node);
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_supers_rwsem);
/*
* Remove the superblock from fs_supers/s_instances
@@ -384,6 +441,22 @@ void kernfs_kill_sb(struct super_block *sb)
kfree(info);
}
+static void __init kernfs_mutex_init(void)
+{
+ int count;
+
+ for (count = 0; count < NR_KERNFS_LOCKS; count++)
+ mutex_init(&kernfs_locks->open_file_mutex[count]);
+}
+
+static void __init kernfs_lock_init(void)
+{
+ kernfs_locks = kmalloc(sizeof(struct kernfs_global_locks), GFP_KERNEL);
+ WARN_ON(!kernfs_locks);
+
+ kernfs_mutex_init();
+}
+
void __init kernfs_init(void)
{
kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
@@ -394,4 +467,6 @@ void __init kernfs_init(void)
kernfs_iattrs_cache = kmem_cache_create("kernfs_iattrs_cache",
sizeof(struct kernfs_iattrs),
0, SLAB_PANIC, NULL);
+
+ kernfs_lock_init();
}
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index 19a6c71c6ff5..0bd8a2143723 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -19,7 +19,7 @@
* @name: name of the symlink
* @target: target node for the symlink to point to
*
- * Returns the created node on success, ERR_PTR() value on error.
+ * Return: the created node on success, ERR_PTR() value on error.
* Ownership of the link matches ownership of the target.
*/
struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
@@ -62,10 +62,10 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
/* go up to the root, stop at the base */
base = parent;
- while (base->parent) {
- kn = target->parent;
- while (kn->parent && base != kn)
- kn = kn->parent;
+ while (kernfs_parent(base)) {
+ kn = kernfs_parent(target);
+ while (kernfs_parent(kn) && base != kn)
+ kn = kernfs_parent(kn);
if (base == kn)
break;
@@ -75,14 +75,14 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
strcpy(s, "../");
s += 3;
- base = base->parent;
+ base = kernfs_parent(base);
}
/* determine end of target string for reverse fillup */
kn = target;
- while (kn->parent && kn != base) {
- len += strlen(kn->name) + 1;
- kn = kn->parent;
+ while (kernfs_parent(kn) && kn != base) {
+ len += strlen(kernfs_rcu_name(kn)) + 1;
+ kn = kernfs_parent(kn);
}
/* check limits */
@@ -94,15 +94,16 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
/* reverse fillup of target string from target to base */
kn = target;
- while (kn->parent && kn != base) {
- int slen = strlen(kn->name);
+ while (kernfs_parent(kn) && kn != base) {
+ const char *name = kernfs_rcu_name(kn);
+ int slen = strlen(name);
len -= slen;
- memcpy(s + len, kn->name, slen);
+ memcpy(s + len, name, slen);
if (len)
s[--len] = '/';
- kn = kn->parent;
+ kn = kernfs_parent(kn);
}
return 0;
@@ -111,13 +112,15 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
static int kernfs_getlink(struct inode *inode, char *path)
{
struct kernfs_node *kn = inode->i_private;
- struct kernfs_node *parent = kn->parent;
+ struct kernfs_node *parent;
struct kernfs_node *target = kn->symlink.target_kn;
+ struct kernfs_root *root = kernfs_root(kn);
int error;
- down_read(&kernfs_rwsem);
+ down_read(&root->kernfs_rwsem);
+ parent = kernfs_parent(kn);
error = kernfs_get_target_path(parent, target, path);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return error;
}
diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
deleted file mode 100644
index e5403c587a58..000000000000
--- a/fs/ksmbd/connection.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2018 Samsung Electronics Co., Ltd.
- */
-
-#ifndef __KSMBD_CONNECTION_H__
-#define __KSMBD_CONNECTION_H__
-
-#include <linux/list.h>
-#include <linux/ip.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-#include <net/inet_connection_sock.h>
-#include <net/request_sock.h>
-#include <linux/kthread.h>
-#include <linux/nls.h>
-
-#include "smb_common.h"
-#include "ksmbd_work.h"
-
-#define KSMBD_SOCKET_BACKLOG 16
-
-/*
- * WARNING
- *
- * This is nothing but a HACK. Session status should move to channel
- * or to session. As of now we have 1 tcp_conn : 1 ksmbd_session, but
- * we need to change it to 1 tcp_conn : N ksmbd_sessions.
- */
-enum {
- KSMBD_SESS_NEW = 0,
- KSMBD_SESS_GOOD,
- KSMBD_SESS_EXITING,
- KSMBD_SESS_NEED_RECONNECT,
- KSMBD_SESS_NEED_NEGOTIATE
-};
-
-struct ksmbd_stats {
- atomic_t open_files_count;
- atomic64_t request_served;
-};
-
-struct ksmbd_transport;
-
-struct ksmbd_conn {
- struct smb_version_values *vals;
- struct smb_version_ops *ops;
- struct smb_version_cmds *cmds;
- unsigned int max_cmds;
- struct mutex srv_mutex;
- int status;
- unsigned int cli_cap;
- char *request_buf;
- struct ksmbd_transport *transport;
- struct nls_table *local_nls;
- struct list_head conns_list;
- /* smb session 1 per user */
- struct list_head sessions;
- unsigned long last_active;
- /* How many request are running currently */
- atomic_t req_running;
- /* References which are made for this Server object*/
- atomic_t r_count;
- unsigned short total_credits;
- unsigned short max_credits;
- spinlock_t credits_lock;
- wait_queue_head_t req_running_q;
- /* Lock to protect requests list*/
- spinlock_t request_lock;
- struct list_head requests;
- struct list_head async_requests;
- int connection_type;
- struct ksmbd_stats stats;
- char ClientGUID[SMB2_CLIENT_GUID_SIZE];
- union {
- /* pending trans request table */
- struct trans_state *recent_trans;
- /* Used by ntlmssp */
- char *ntlmssp_cryptkey;
- };
-
- spinlock_t llist_lock;
- struct list_head lock_list;
-
- struct preauth_integrity_info *preauth_info;
-
- bool need_neg;
- unsigned int auth_mechs;
- unsigned int preferred_auth_mech;
- bool sign;
- bool use_spnego:1;
- __u16 cli_sec_mode;
- __u16 srv_sec_mode;
- /* dialect index that server chose */
- __u16 dialect;
-
- char *mechToken;
-
- struct ksmbd_conn_ops *conn_ops;
-
- /* Preauth Session Table */
- struct list_head preauth_sess_table;
-
- struct sockaddr_storage peer_addr;
-
- /* Identifier for async message */
- struct ida async_ida;
-
- __le16 cipher_type;
- __le16 compress_algorithm;
- bool posix_ext_supported;
- bool signing_negotiated;
- __le16 signing_algorithm;
- bool binding;
-};
-
-struct ksmbd_conn_ops {
- int (*process_fn)(struct ksmbd_conn *conn);
- int (*terminate_fn)(struct ksmbd_conn *conn);
-};
-
-struct ksmbd_transport_ops {
- int (*prepare)(struct ksmbd_transport *t);
- void (*disconnect)(struct ksmbd_transport *t);
- int (*read)(struct ksmbd_transport *t, char *buf, unsigned int size);
- int (*writev)(struct ksmbd_transport *t, struct kvec *iovs, int niov,
- int size, bool need_invalidate_rkey,
- unsigned int remote_key);
- int (*rdma_read)(struct ksmbd_transport *t, void *buf, unsigned int len,
- u32 remote_key, u64 remote_offset, u32 remote_len);
- int (*rdma_write)(struct ksmbd_transport *t, void *buf,
- unsigned int len, u32 remote_key, u64 remote_offset,
- u32 remote_len);
-};
-
-struct ksmbd_transport {
- struct ksmbd_conn *conn;
- struct ksmbd_transport_ops *ops;
- struct task_struct *handler;
-};
-
-#define KSMBD_TCP_RECV_TIMEOUT (7 * HZ)
-#define KSMBD_TCP_SEND_TIMEOUT (5 * HZ)
-#define KSMBD_TCP_PEER_SOCKADDR(c) ((struct sockaddr *)&((c)->peer_addr))
-
-extern struct list_head conn_list;
-extern rwlock_t conn_list_lock;
-
-bool ksmbd_conn_alive(struct ksmbd_conn *conn);
-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
-struct ksmbd_conn *ksmbd_conn_alloc(void);
-void ksmbd_conn_free(struct ksmbd_conn *conn);
-bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
-int ksmbd_conn_write(struct ksmbd_work *work);
-int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, void *buf,
- unsigned int buflen, u32 remote_key, u64 remote_offset,
- u32 remote_len);
-int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, void *buf,
- unsigned int buflen, u32 remote_key, u64 remote_offset,
- u32 remote_len);
-void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
-int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
-void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
-int ksmbd_conn_handler_loop(void *p);
-int ksmbd_conn_transport_init(void);
-void ksmbd_conn_transport_destroy(void);
-
-/*
- * WARNING
- *
- * This is a hack. We will move status to a proper place once we land
- * a multi-sessions support.
- */
-static inline bool ksmbd_conn_good(struct ksmbd_work *work)
-{
- return work->conn->status == KSMBD_SESS_GOOD;
-}
-
-static inline bool ksmbd_conn_need_negotiate(struct ksmbd_work *work)
-{
- return work->conn->status == KSMBD_SESS_NEED_NEGOTIATE;
-}
-
-static inline bool ksmbd_conn_need_reconnect(struct ksmbd_work *work)
-{
- return work->conn->status == KSMBD_SESS_NEED_RECONNECT;
-}
-
-static inline bool ksmbd_conn_exiting(struct ksmbd_work *work)
-{
- return work->conn->status == KSMBD_SESS_EXITING;
-}
-
-static inline void ksmbd_conn_set_good(struct ksmbd_work *work)
-{
- work->conn->status = KSMBD_SESS_GOOD;
-}
-
-static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_work *work)
-{
- work->conn->status = KSMBD_SESS_NEED_NEGOTIATE;
-}
-
-static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_work *work)
-{
- work->conn->status = KSMBD_SESS_NEED_RECONNECT;
-}
-
-static inline void ksmbd_conn_set_exiting(struct ksmbd_work *work)
-{
- work->conn->status = KSMBD_SESS_EXITING;
-}
-#endif /* __CONNECTION_H__ */
diff --git a/fs/ksmbd/ksmbd_work.c b/fs/ksmbd/ksmbd_work.c
deleted file mode 100644
index fd58eb4809f6..000000000000
--- a/fs/ksmbd/ksmbd_work.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2019 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-
-#include "server.h"
-#include "connection.h"
-#include "ksmbd_work.h"
-#include "mgmt/ksmbd_ida.h"
-
-static struct kmem_cache *work_cache;
-static struct workqueue_struct *ksmbd_wq;
-
-struct ksmbd_work *ksmbd_alloc_work_struct(void)
-{
- struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL);
-
- if (work) {
- work->compound_fid = KSMBD_NO_FID;
- work->compound_pfid = KSMBD_NO_FID;
- INIT_LIST_HEAD(&work->request_entry);
- INIT_LIST_HEAD(&work->async_request_entry);
- INIT_LIST_HEAD(&work->fp_entry);
- INIT_LIST_HEAD(&work->interim_entry);
- }
- return work;
-}
-
-void ksmbd_free_work_struct(struct ksmbd_work *work)
-{
- WARN_ON(work->saved_cred != NULL);
-
- kvfree(work->response_buf);
- kvfree(work->aux_payload_buf);
- kfree(work->tr_buf);
- kvfree(work->request_buf);
- if (work->async_id)
- ksmbd_release_id(&work->conn->async_ida, work->async_id);
- kmem_cache_free(work_cache, work);
-}
-
-void ksmbd_work_pool_destroy(void)
-{
- kmem_cache_destroy(work_cache);
-}
-
-int ksmbd_work_pool_init(void)
-{
- work_cache = kmem_cache_create("ksmbd_work_cache",
- sizeof(struct ksmbd_work), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!work_cache)
- return -ENOMEM;
- return 0;
-}
-
-int ksmbd_workqueue_init(void)
-{
- ksmbd_wq = alloc_workqueue("ksmbd-io", 0, 0);
- if (!ksmbd_wq)
- return -ENOMEM;
- return 0;
-}
-
-void ksmbd_workqueue_destroy(void)
-{
- flush_workqueue(ksmbd_wq);
- destroy_workqueue(ksmbd_wq);
- ksmbd_wq = NULL;
-}
-
-bool ksmbd_queue_work(struct ksmbd_work *work)
-{
- return queue_work(ksmbd_wq, &work->work);
-}
diff --git a/fs/ksmbd/mgmt/user_config.c b/fs/ksmbd/mgmt/user_config.c
deleted file mode 100644
index 1019d3677d55..000000000000
--- a/fs/ksmbd/mgmt/user_config.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2018 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/slab.h>
-#include <linux/mm.h>
-
-#include "user_config.h"
-#include "../transport_ipc.h"
-
-struct ksmbd_user *ksmbd_login_user(const char *account)
-{
- struct ksmbd_login_response *resp;
- struct ksmbd_user *user = NULL;
-
- resp = ksmbd_ipc_login_request(account);
- if (!resp)
- return NULL;
-
- if (!(resp->status & KSMBD_USER_FLAG_OK))
- goto out;
-
- user = ksmbd_alloc_user(resp);
-out:
- kvfree(resp);
- return user;
-}
-
-struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp)
-{
- struct ksmbd_user *user = NULL;
-
- user = kmalloc(sizeof(struct ksmbd_user), GFP_KERNEL);
- if (!user)
- return NULL;
-
- user->name = kstrdup(resp->account, GFP_KERNEL);
- user->flags = resp->status;
- user->gid = resp->gid;
- user->uid = resp->uid;
- user->passkey_sz = resp->hash_sz;
- user->passkey = kmalloc(resp->hash_sz, GFP_KERNEL);
- if (user->passkey)
- memcpy(user->passkey, resp->hash, resp->hash_sz);
-
- if (!user->name || !user->passkey) {
- kfree(user->name);
- kfree(user->passkey);
- kfree(user);
- user = NULL;
- }
- return user;
-}
-
-void ksmbd_free_user(struct ksmbd_user *user)
-{
- ksmbd_ipc_logout_request(user->name, user->flags);
- kfree(user->name);
- kfree(user->passkey);
- kfree(user);
-}
-
-int ksmbd_anonymous_user(struct ksmbd_user *user)
-{
- if (user->name[0] == '\0')
- return 1;
- return 0;
-}
diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
deleted file mode 100644
index 8d8ffd8c6f19..000000000000
--- a/fs/ksmbd/mgmt/user_session.c
+++ /dev/null
@@ -1,369 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2018 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/rwsem.h>
-#include <linux/xarray.h>
-
-#include "ksmbd_ida.h"
-#include "user_session.h"
-#include "user_config.h"
-#include "tree_connect.h"
-#include "../transport_ipc.h"
-#include "../connection.h"
-#include "../vfs_cache.h"
-
-static DEFINE_IDA(session_ida);
-
-#define SESSION_HASH_BITS 3
-static DEFINE_HASHTABLE(sessions_table, SESSION_HASH_BITS);
-static DECLARE_RWSEM(sessions_table_lock);
-
-struct ksmbd_session_rpc {
- int id;
- unsigned int method;
- struct list_head list;
-};
-
-static void free_channel_list(struct ksmbd_session *sess)
-{
- struct channel *chann, *tmp;
-
- list_for_each_entry_safe(chann, tmp, &sess->ksmbd_chann_list,
- chann_list) {
- list_del(&chann->chann_list);
- kfree(chann);
- }
-}
-
-static void __session_rpc_close(struct ksmbd_session *sess,
- struct ksmbd_session_rpc *entry)
-{
- struct ksmbd_rpc_command *resp;
-
- resp = ksmbd_rpc_close(sess, entry->id);
- if (!resp)
- pr_err("Unable to close RPC pipe %d\n", entry->id);
-
- kvfree(resp);
- ksmbd_rpc_id_free(entry->id);
- kfree(entry);
-}
-
-static void ksmbd_session_rpc_clear_list(struct ksmbd_session *sess)
-{
- struct ksmbd_session_rpc *entry;
-
- while (!list_empty(&sess->rpc_handle_list)) {
- entry = list_entry(sess->rpc_handle_list.next,
- struct ksmbd_session_rpc,
- list);
-
- list_del(&entry->list);
- __session_rpc_close(sess, entry);
- }
-}
-
-static int __rpc_method(char *rpc_name)
-{
- if (!strcmp(rpc_name, "\\srvsvc") || !strcmp(rpc_name, "srvsvc"))
- return KSMBD_RPC_SRVSVC_METHOD_INVOKE;
-
- if (!strcmp(rpc_name, "\\wkssvc") || !strcmp(rpc_name, "wkssvc"))
- return KSMBD_RPC_WKSSVC_METHOD_INVOKE;
-
- if (!strcmp(rpc_name, "LANMAN") || !strcmp(rpc_name, "lanman"))
- return KSMBD_RPC_RAP_METHOD;
-
- if (!strcmp(rpc_name, "\\samr") || !strcmp(rpc_name, "samr"))
- return KSMBD_RPC_SAMR_METHOD_INVOKE;
-
- if (!strcmp(rpc_name, "\\lsarpc") || !strcmp(rpc_name, "lsarpc"))
- return KSMBD_RPC_LSARPC_METHOD_INVOKE;
-
- pr_err("Unsupported RPC: %s\n", rpc_name);
- return 0;
-}
-
-int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
-{
- struct ksmbd_session_rpc *entry;
- struct ksmbd_rpc_command *resp;
- int method;
-
- method = __rpc_method(rpc_name);
- if (!method)
- return -EINVAL;
-
- entry = kzalloc(sizeof(struct ksmbd_session_rpc), GFP_KERNEL);
- if (!entry)
- return -EINVAL;
-
- list_add(&entry->list, &sess->rpc_handle_list);
- entry->method = method;
- entry->id = ksmbd_ipc_id_alloc();
- if (entry->id < 0)
- goto error;
-
- resp = ksmbd_rpc_open(sess, entry->id);
- if (!resp)
- goto error;
-
- kvfree(resp);
- return entry->id;
-error:
- list_del(&entry->list);
- kfree(entry);
- return -EINVAL;
-}
-
-void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id)
-{
- struct ksmbd_session_rpc *entry;
-
- list_for_each_entry(entry, &sess->rpc_handle_list, list) {
- if (entry->id == id) {
- list_del(&entry->list);
- __session_rpc_close(sess, entry);
- break;
- }
- }
-}
-
-int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
-{
- struct ksmbd_session_rpc *entry;
-
- list_for_each_entry(entry, &sess->rpc_handle_list, list) {
- if (entry->id == id)
- return entry->method;
- }
- return 0;
-}
-
-void ksmbd_session_destroy(struct ksmbd_session *sess)
-{
- if (!sess)
- return;
-
- if (!atomic_dec_and_test(&sess->refcnt))
- return;
-
- list_del(&sess->sessions_entry);
-
- down_write(&sessions_table_lock);
- hash_del(&sess->hlist);
- up_write(&sessions_table_lock);
-
- if (sess->user)
- ksmbd_free_user(sess->user);
-
- ksmbd_tree_conn_session_logoff(sess);
- ksmbd_destroy_file_table(&sess->file_table);
- ksmbd_session_rpc_clear_list(sess);
- free_channel_list(sess);
- kfree(sess->Preauth_HashValue);
- ksmbd_release_id(&session_ida, sess->id);
- kfree(sess);
-}
-
-static struct ksmbd_session *__session_lookup(unsigned long long id)
-{
- struct ksmbd_session *sess;
-
- hash_for_each_possible(sessions_table, sess, hlist, id) {
- if (id == sess->id)
- return sess;
- }
- return NULL;
-}
-
-void ksmbd_session_register(struct ksmbd_conn *conn,
- struct ksmbd_session *sess)
-{
- sess->conn = conn;
- list_add(&sess->sessions_entry, &conn->sessions);
-}
-
-void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
-{
- struct ksmbd_session *sess;
-
- while (!list_empty(&conn->sessions)) {
- sess = list_entry(conn->sessions.next,
- struct ksmbd_session,
- sessions_entry);
-
- ksmbd_session_destroy(sess);
- }
-}
-
-static bool ksmbd_session_id_match(struct ksmbd_session *sess,
- unsigned long long id)
-{
- return sess->id == id;
-}
-
-struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
- unsigned long long id)
-{
- struct ksmbd_session *sess = NULL;
-
- list_for_each_entry(sess, &conn->sessions, sessions_entry) {
- if (ksmbd_session_id_match(sess, id))
- return sess;
- }
- return NULL;
-}
-
-int get_session(struct ksmbd_session *sess)
-{
- return atomic_inc_not_zero(&sess->refcnt);
-}
-
-void put_session(struct ksmbd_session *sess)
-{
- if (atomic_dec_and_test(&sess->refcnt))
- pr_err("get/%s seems to be mismatched.", __func__);
-}
-
-struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
-{
- struct ksmbd_session *sess;
-
- down_read(&sessions_table_lock);
- sess = __session_lookup(id);
- if (sess) {
- if (!get_session(sess))
- sess = NULL;
- }
- up_read(&sessions_table_lock);
-
- return sess;
-}
-
-struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
- unsigned long long id)
-{
- struct ksmbd_session *sess;
-
- sess = ksmbd_session_lookup(conn, id);
- if (!sess && conn->binding)
- sess = ksmbd_session_lookup_slowpath(id);
- return sess;
-}
-
-struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
- u64 sess_id)
-{
- struct preauth_session *sess;
-
- sess = kmalloc(sizeof(struct preauth_session), GFP_KERNEL);
- if (!sess)
- return NULL;
-
- sess->id = sess_id;
- memcpy(sess->Preauth_HashValue, conn->preauth_info->Preauth_HashValue,
- PREAUTH_HASHVALUE_SIZE);
- list_add(&sess->preauth_entry, &conn->preauth_sess_table);
-
- return sess;
-}
-
-static bool ksmbd_preauth_session_id_match(struct preauth_session *sess,
- unsigned long long id)
-{
- return sess->id == id;
-}
-
-struct preauth_session *ksmbd_preauth_session_lookup(struct ksmbd_conn *conn,
- unsigned long long id)
-{
- struct preauth_session *sess = NULL;
-
- list_for_each_entry(sess, &conn->preauth_sess_table, preauth_entry) {
- if (ksmbd_preauth_session_id_match(sess, id))
- return sess;
- }
- return NULL;
-}
-
-static int __init_smb2_session(struct ksmbd_session *sess)
-{
- int id = ksmbd_acquire_smb2_uid(&session_ida);
-
- if (id < 0)
- return -EINVAL;
- sess->id = id;
- return 0;
-}
-
-static struct ksmbd_session *__session_create(int protocol)
-{
- struct ksmbd_session *sess;
- int ret;
-
- sess = kzalloc(sizeof(struct ksmbd_session), GFP_KERNEL);
- if (!sess)
- return NULL;
-
- if (ksmbd_init_file_table(&sess->file_table))
- goto error;
-
- set_session_flag(sess, protocol);
- INIT_LIST_HEAD(&sess->sessions_entry);
- xa_init(&sess->tree_conns);
- INIT_LIST_HEAD(&sess->ksmbd_chann_list);
- INIT_LIST_HEAD(&sess->rpc_handle_list);
- sess->sequence_number = 1;
- atomic_set(&sess->refcnt, 1);
-
- switch (protocol) {
- case CIFDS_SESSION_FLAG_SMB2:
- ret = __init_smb2_session(sess);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (ret)
- goto error;
-
- ida_init(&sess->tree_conn_ida);
-
- if (protocol == CIFDS_SESSION_FLAG_SMB2) {
- down_write(&sessions_table_lock);
- hash_add(sessions_table, &sess->hlist, sess->id);
- up_write(&sessions_table_lock);
- }
- return sess;
-
-error:
- ksmbd_session_destroy(sess);
- return NULL;
-}
-
-struct ksmbd_session *ksmbd_smb2_session_create(void)
-{
- return __session_create(CIFDS_SESSION_FLAG_SMB2);
-}
-
-int ksmbd_acquire_tree_conn_id(struct ksmbd_session *sess)
-{
- int id = -EINVAL;
-
- if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB2))
- id = ksmbd_acquire_smb2_tid(&sess->tree_conn_ida);
-
- return id;
-}
-
-void ksmbd_release_tree_conn_id(struct ksmbd_session *sess, int id)
-{
- if (id >= 0)
- ksmbd_release_id(&sess->tree_conn_ida, id);
-}
diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
deleted file mode 100644
index ff5a2f01d34a..000000000000
--- a/fs/ksmbd/smb2pdu.h
+++ /dev/null
@@ -1,1708 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
- * Copyright (C) 2018 Samsung Electronics Co., Ltd.
- */
-
-#ifndef _SMB2PDU_H
-#define _SMB2PDU_H
-
-#include "ntlmssp.h"
-#include "smbacl.h"
-
-/*
- * Note that, due to trying to use names similar to the protocol specifications,
- * there are many mixed case field names in the structures below. Although
- * this does not match typical Linux kernel style, it is necessary to be
- * able to match against the protocol specfication.
- *
- * SMB2 commands
- * Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
- * (ie no useful data other than the SMB error code itself) and are marked such.
- * Knowing this helps avoid response buffer allocations and copy in some cases.
- */
-
-/* List of commands in host endian */
-#define SMB2_NEGOTIATE_HE 0x0000
-#define SMB2_SESSION_SETUP_HE 0x0001
-#define SMB2_LOGOFF_HE 0x0002 /* trivial request/resp */
-#define SMB2_TREE_CONNECT_HE 0x0003
-#define SMB2_TREE_DISCONNECT_HE 0x0004 /* trivial req/resp */
-#define SMB2_CREATE_HE 0x0005
-#define SMB2_CLOSE_HE 0x0006
-#define SMB2_FLUSH_HE 0x0007 /* trivial resp */
-#define SMB2_READ_HE 0x0008
-#define SMB2_WRITE_HE 0x0009
-#define SMB2_LOCK_HE 0x000A
-#define SMB2_IOCTL_HE 0x000B
-#define SMB2_CANCEL_HE 0x000C
-#define SMB2_ECHO_HE 0x000D
-#define SMB2_QUERY_DIRECTORY_HE 0x000E
-#define SMB2_CHANGE_NOTIFY_HE 0x000F
-#define SMB2_QUERY_INFO_HE 0x0010
-#define SMB2_SET_INFO_HE 0x0011
-#define SMB2_OPLOCK_BREAK_HE 0x0012
-
-/* The same list in little endian */
-#define SMB2_NEGOTIATE cpu_to_le16(SMB2_NEGOTIATE_HE)
-#define SMB2_SESSION_SETUP cpu_to_le16(SMB2_SESSION_SETUP_HE)
-#define SMB2_LOGOFF cpu_to_le16(SMB2_LOGOFF_HE)
-#define SMB2_TREE_CONNECT cpu_to_le16(SMB2_TREE_CONNECT_HE)
-#define SMB2_TREE_DISCONNECT cpu_to_le16(SMB2_TREE_DISCONNECT_HE)
-#define SMB2_CREATE cpu_to_le16(SMB2_CREATE_HE)
-#define SMB2_CLOSE cpu_to_le16(SMB2_CLOSE_HE)
-#define SMB2_FLUSH cpu_to_le16(SMB2_FLUSH_HE)
-#define SMB2_READ cpu_to_le16(SMB2_READ_HE)
-#define SMB2_WRITE cpu_to_le16(SMB2_WRITE_HE)
-#define SMB2_LOCK cpu_to_le16(SMB2_LOCK_HE)
-#define SMB2_IOCTL cpu_to_le16(SMB2_IOCTL_HE)
-#define SMB2_CANCEL cpu_to_le16(SMB2_CANCEL_HE)
-#define SMB2_ECHO cpu_to_le16(SMB2_ECHO_HE)
-#define SMB2_QUERY_DIRECTORY cpu_to_le16(SMB2_QUERY_DIRECTORY_HE)
-#define SMB2_CHANGE_NOTIFY cpu_to_le16(SMB2_CHANGE_NOTIFY_HE)
-#define SMB2_QUERY_INFO cpu_to_le16(SMB2_QUERY_INFO_HE)
-#define SMB2_SET_INFO cpu_to_le16(SMB2_SET_INFO_HE)
-#define SMB2_OPLOCK_BREAK cpu_to_le16(SMB2_OPLOCK_BREAK_HE)
-
-/*Create Action Flags*/
-#define FILE_SUPERSEDED 0x00000000
-#define FILE_OPENED 0x00000001
-#define FILE_CREATED 0x00000002
-#define FILE_OVERWRITTEN 0x00000003
-
-/*
- * Size of the session key (crypto key encrypted with the password
- */
-#define SMB2_NTLMV2_SESSKEY_SIZE 16
-#define SMB2_SIGNATURE_SIZE 16
-#define SMB2_HMACSHA256_SIZE 32
-#define SMB2_CMACAES_SIZE 16
-#define SMB3_GCM128_CRYPTKEY_SIZE 16
-#define SMB3_GCM256_CRYPTKEY_SIZE 32
-
-/*
- * Size of the smb3 encryption/decryption keys
- */
-#define SMB3_ENC_DEC_KEY_SIZE 32
-
-/*
- * Size of the smb3 signing key
- */
-#define SMB3_SIGN_KEY_SIZE 16
-
-#define CIFS_CLIENT_CHALLENGE_SIZE 8
-#define SMB_SERVER_CHALLENGE_SIZE 8
-
-/* SMB2 Max Credits */
-#define SMB2_MAX_CREDITS 8192
-
-#define SMB2_CLIENT_GUID_SIZE 16
-#define SMB2_CREATE_GUID_SIZE 16
-
-/* Maximum buffer size value we can send with 1 credit */
-#define SMB2_MAX_BUFFER_SIZE 65536
-
-#define NUMBER_OF_SMB2_COMMANDS 0x0013
-
-/* BB FIXME - analyze following length BB */
-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
-
-#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) /* 'B''M''S' */
-#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
-
-#define SMB21_DEFAULT_IOSIZE (1024 * 1024)
-#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
-#define SMB3_DEFAULT_TRANS_SIZE (1024 * 1024)
-#define SMB3_MIN_IOSIZE (64 * 1024)
-#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
-
-/*
- * SMB2 Header Definition
- *
- * "MBZ" : Must be Zero
- * "BB" : BugBug, Something to check/review/analyze later
- * "PDU" : "Protocol Data Unit" (ie a network "frame")
- *
- */
-
-#define __SMB2_HEADER_STRUCTURE_SIZE 64
-#define SMB2_HEADER_STRUCTURE_SIZE \
- cpu_to_le16(__SMB2_HEADER_STRUCTURE_SIZE)
-
-struct smb2_hdr {
- __be32 smb2_buf_length; /* big endian on wire */
- /*
- * length is only two or three bytes - with
- * one or two byte type preceding it that MBZ
- */
- __le32 ProtocolId; /* 0xFE 'S' 'M' 'B' */
- __le16 StructureSize; /* 64 */
- __le16 CreditCharge; /* MBZ */
- __le32 Status; /* Error from server */
- __le16 Command;
- __le16 CreditRequest; /* CreditResponse */
- __le32 Flags;
- __le32 NextCommand;
- __le64 MessageId;
- union {
- struct {
- __le32 ProcessId;
- __le32 TreeId;
- } __packed SyncId;
- __le64 AsyncId;
- } __packed Id;
- __le64 SessionId;
- __u8 Signature[16];
-} __packed;
-
-struct smb2_pdu {
- struct smb2_hdr hdr;
- __le16 StructureSize2; /* size of wct area (varies, request specific) */
-} __packed;
-
-#define SMB3_AES_CCM_NONCE 11
-#define SMB3_AES_GCM_NONCE 12
-
-struct smb2_transform_hdr {
- __be32 smb2_buf_length; /* big endian on wire */
- /*
- * length is only two or three bytes - with
- * one or two byte type preceding it that MBZ
- */
- __le32 ProtocolId; /* 0xFD 'S' 'M' 'B' */
- __u8 Signature[16];
- __u8 Nonce[16];
- __le32 OriginalMessageSize;
- __u16 Reserved1;
- __le16 Flags; /* EncryptionAlgorithm */
- __le64 SessionId;
-} __packed;
-
-/*
- * SMB2 flag definitions
- */
-#define SMB2_FLAGS_SERVER_TO_REDIR cpu_to_le32(0x00000001)
-#define SMB2_FLAGS_ASYNC_COMMAND cpu_to_le32(0x00000002)
-#define SMB2_FLAGS_RELATED_OPERATIONS cpu_to_le32(0x00000004)
-#define SMB2_FLAGS_SIGNED cpu_to_le32(0x00000008)
-#define SMB2_FLAGS_DFS_OPERATIONS cpu_to_le32(0x10000000)
-#define SMB2_FLAGS_REPLAY_OPERATIONS cpu_to_le32(0x20000000)
-
-/*
- * Definitions for SMB2 Protocol Data Units (network frames)
- *
- * See MS-SMB2.PDF specification for protocol details.
- * The Naming convention is the lower case version of the SMB2
- * command code name for the struct. Note that structures must be packed.
- *
- */
-
-#define SMB2_ERROR_STRUCTURE_SIZE2 9
-#define SMB2_ERROR_STRUCTURE_SIZE2_LE cpu_to_le16(SMB2_ERROR_STRUCTURE_SIZE2)
-
-struct smb2_err_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize;
- __u8 ErrorContextCount;
- __u8 Reserved;
- __le32 ByteCount; /* even if zero, at least one byte follows */
- __u8 ErrorData[1]; /* variable length */
-} __packed;
-
-struct smb2_negotiate_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 36 */
- __le16 DialectCount;
- __le16 SecurityMode;
- __le16 Reserved; /* MBZ */
- __le32 Capabilities;
- __u8 ClientGUID[SMB2_CLIENT_GUID_SIZE];
- /* In SMB3.02 and earlier next three were MBZ le64 ClientStartTime */
- __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
- __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */
- __le16 Reserved2;
- __le16 Dialects[1]; /* One dialect (vers=) at a time for now */
-} __packed;
-
-/* SecurityMode flags */
-#define SMB2_NEGOTIATE_SIGNING_ENABLED_LE cpu_to_le16(0x0001)
-#define SMB2_NEGOTIATE_SIGNING_REQUIRED 0x0002
-#define SMB2_NEGOTIATE_SIGNING_REQUIRED_LE cpu_to_le16(0x0002)
-/* Capabilities flags */
-#define SMB2_GLOBAL_CAP_DFS 0x00000001
-#define SMB2_GLOBAL_CAP_LEASING 0x00000002 /* Resp only New to SMB2.1 */
-#define SMB2_GLOBAL_CAP_LARGE_MTU 0X00000004 /* Resp only New to SMB2.1 */
-#define SMB2_GLOBAL_CAP_MULTI_CHANNEL 0x00000008 /* New to SMB3 */
-#define SMB2_GLOBAL_CAP_PERSISTENT_HANDLES 0x00000010 /* New to SMB3 */
-#define SMB2_GLOBAL_CAP_DIRECTORY_LEASING 0x00000020 /* New to SMB3 */
-#define SMB2_GLOBAL_CAP_ENCRYPTION 0x00000040 /* New to SMB3 */
-/* Internal types */
-#define SMB2_NT_FIND 0x00100000
-#define SMB2_LARGE_FILES 0x00200000
-
-#define SMB311_SALT_SIZE 32
-/* Hash Algorithm Types */
-#define SMB2_PREAUTH_INTEGRITY_SHA512 cpu_to_le16(0x0001)
-
-#define PREAUTH_HASHVALUE_SIZE 64
-
-struct preauth_integrity_info {
- /* PreAuth integrity Hash ID */
- __le16 Preauth_HashId;
- /* PreAuth integrity Hash Value */
- __u8 Preauth_HashValue[PREAUTH_HASHVALUE_SIZE];
-};
-
-/* offset is sizeof smb2_negotiate_rsp - 4 but rounded up to 8 bytes. */
-#ifdef CONFIG_SMB_SERVER_KERBEROS5
-/* sizeof(struct smb2_negotiate_rsp) - 4 =
- * header(64) + response(64) + GSS_LENGTH(96) + GSS_PADDING(0)
- */
-#define OFFSET_OF_NEG_CONTEXT 0xe0
-#else
-/* sizeof(struct smb2_negotiate_rsp) - 4 =
- * header(64) + response(64) + GSS_LENGTH(74) + GSS_PADDING(6)
- */
-#define OFFSET_OF_NEG_CONTEXT 0xd0
-#endif
-
-#define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1)
-#define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2)
-#define SMB2_COMPRESSION_CAPABILITIES cpu_to_le16(3)
-#define SMB2_NETNAME_NEGOTIATE_CONTEXT_ID cpu_to_le16(5)
-#define SMB2_SIGNING_CAPABILITIES cpu_to_le16(8)
-#define SMB2_POSIX_EXTENSIONS_AVAILABLE cpu_to_le16(0x100)
-
-struct smb2_neg_context {
- __le16 ContextType;
- __le16 DataLength;
- __le32 Reserved;
- /* Followed by array of data */
-} __packed;
-
-struct smb2_preauth_neg_context {
- __le16 ContextType; /* 1 */
- __le16 DataLength;
- __le32 Reserved;
- __le16 HashAlgorithmCount; /* 1 */
- __le16 SaltLength;
- __le16 HashAlgorithms; /* HashAlgorithms[0] since only one defined */
- __u8 Salt[SMB311_SALT_SIZE];
-} __packed;
-
-/* Encryption Algorithms Ciphers */
-#define SMB2_ENCRYPTION_AES128_CCM cpu_to_le16(0x0001)
-#define SMB2_ENCRYPTION_AES128_GCM cpu_to_le16(0x0002)
-#define SMB2_ENCRYPTION_AES256_CCM cpu_to_le16(0x0003)
-#define SMB2_ENCRYPTION_AES256_GCM cpu_to_le16(0x0004)
-
-struct smb2_encryption_neg_context {
- __le16 ContextType; /* 2 */
- __le16 DataLength;
- __le32 Reserved;
- /* CipherCount usally 2, but can be 3 when AES256-GCM enabled */
- __le16 CipherCount; /* AES-128-GCM and AES-128-CCM by default */
- __le16 Ciphers[];
-} __packed;
-
-#define SMB3_COMPRESS_NONE cpu_to_le16(0x0000)
-#define SMB3_COMPRESS_LZNT1 cpu_to_le16(0x0001)
-#define SMB3_COMPRESS_LZ77 cpu_to_le16(0x0002)
-#define SMB3_COMPRESS_LZ77_HUFF cpu_to_le16(0x0003)
-
-struct smb2_compression_ctx {
- __le16 ContextType; /* 3 */
- __le16 DataLength;
- __le32 Reserved;
- __le16 CompressionAlgorithmCount;
- __u16 Padding;
- __le32 Reserved1;
- __le16 CompressionAlgorithms[];
-} __packed;
-
-#define POSIX_CTXT_DATA_LEN 16
-struct smb2_posix_neg_context {
- __le16 ContextType; /* 0x100 */
- __le16 DataLength;
- __le32 Reserved;
- __u8 Name[16]; /* POSIX ctxt GUID 93AD25509CB411E7B42383DE968BCD7C */
-} __packed;
-
-struct smb2_netname_neg_context {
- __le16 ContextType; /* 0x100 */
- __le16 DataLength;
- __le32 Reserved;
- __le16 NetName[]; /* hostname of target converted to UCS-2 */
-} __packed;
-
-/* Signing algorithms */
-#define SIGNING_ALG_HMAC_SHA256 cpu_to_le16(0)
-#define SIGNING_ALG_AES_CMAC cpu_to_le16(1)
-#define SIGNING_ALG_AES_GMAC cpu_to_le16(2)
-
-struct smb2_signing_capabilities {
- __le16 ContextType; /* 8 */
- __le16 DataLength;
- __le32 Reserved;
- __le16 SigningAlgorithmCount;
- __le16 SigningAlgorithms[];
-} __packed;
-
-struct smb2_negotiate_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 65 */
- __le16 SecurityMode;
- __le16 DialectRevision;
- __le16 NegotiateContextCount; /* Prior to SMB3.1.1 was Reserved & MBZ */
- __u8 ServerGUID[16];
- __le32 Capabilities;
- __le32 MaxTransactSize;
- __le32 MaxReadSize;
- __le32 MaxWriteSize;
- __le64 SystemTime; /* MBZ */
- __le64 ServerStartTime;
- __le16 SecurityBufferOffset;
- __le16 SecurityBufferLength;
- __le32 NegotiateContextOffset; /* Pre:SMB3.1.1 was reserved/ignored */
- __u8 Buffer[1]; /* variable length GSS security buffer */
-} __packed;
-
-/* Flags */
-#define SMB2_SESSION_REQ_FLAG_BINDING 0x01
-#define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA 0x04
-
-#define SMB2_SESSION_EXPIRED (0)
-#define SMB2_SESSION_IN_PROGRESS BIT(0)
-#define SMB2_SESSION_VALID BIT(1)
-
-/* Flags */
-#define SMB2_SESSION_REQ_FLAG_BINDING 0x01
-#define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA 0x04
-
-struct smb2_sess_setup_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 25 */
- __u8 Flags;
- __u8 SecurityMode;
- __le32 Capabilities;
- __le32 Channel;
- __le16 SecurityBufferOffset;
- __le16 SecurityBufferLength;
- __le64 PreviousSessionId;
- __u8 Buffer[1]; /* variable length GSS security buffer */
-} __packed;
-
-/* Flags/Reserved for SMB3.1.1 */
-#define SMB2_SHAREFLAG_CLUSTER_RECONNECT 0x0001
-
-/* Currently defined SessionFlags */
-#define SMB2_SESSION_FLAG_IS_GUEST_LE cpu_to_le16(0x0001)
-#define SMB2_SESSION_FLAG_IS_NULL_LE cpu_to_le16(0x0002)
-#define SMB2_SESSION_FLAG_ENCRYPT_DATA_LE cpu_to_le16(0x0004)
-struct smb2_sess_setup_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 SessionFlags;
- __le16 SecurityBufferOffset;
- __le16 SecurityBufferLength;
- __u8 Buffer[1]; /* variable length GSS security buffer */
-} __packed;
-
-struct smb2_logoff_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_logoff_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_tree_connect_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 Reserved; /* Flags in SMB3.1.1 */
- __le16 PathOffset;
- __le16 PathLength;
- __u8 Buffer[1]; /* variable length */
-} __packed;
-
-struct smb2_tree_connect_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 16 */
- __u8 ShareType; /* see below */
- __u8 Reserved;
- __le32 ShareFlags; /* see below */
- __le32 Capabilities; /* see below */
- __le32 MaximalAccess;
-} __packed;
-
-/* Possible ShareType values */
-#define SMB2_SHARE_TYPE_DISK 0x01
-#define SMB2_SHARE_TYPE_PIPE 0x02
-#define SMB2_SHARE_TYPE_PRINT 0x03
-
-/*
- * Possible ShareFlags - exactly one and only one of the first 4 caching flags
- * must be set (any of the remaining, SHI1005, flags may be set individually
- * or in combination.
- */
-#define SMB2_SHAREFLAG_MANUAL_CACHING 0x00000000
-#define SMB2_SHAREFLAG_AUTO_CACHING 0x00000010
-#define SMB2_SHAREFLAG_VDO_CACHING 0x00000020
-#define SMB2_SHAREFLAG_NO_CACHING 0x00000030
-#define SHI1005_FLAGS_DFS 0x00000001
-#define SHI1005_FLAGS_DFS_ROOT 0x00000002
-#define SHI1005_FLAGS_RESTRICT_EXCLUSIVE_OPENS 0x00000100
-#define SHI1005_FLAGS_FORCE_SHARED_DELETE 0x00000200
-#define SHI1005_FLAGS_ALLOW_NAMESPACE_CACHING 0x00000400
-#define SHI1005_FLAGS_ACCESS_BASED_DIRECTORY_ENUM 0x00000800
-#define SHI1005_FLAGS_FORCE_LEVELII_OPLOCK 0x00001000
-#define SHI1005_FLAGS_ENABLE_HASH 0x00002000
-
-/* Possible share capabilities */
-#define SMB2_SHARE_CAP_DFS cpu_to_le32(0x00000008)
-
-struct smb2_tree_disconnect_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_tree_disconnect_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-#define ATTR_READONLY_LE cpu_to_le32(ATTR_READONLY)
-#define ATTR_HIDDEN_LE cpu_to_le32(ATTR_HIDDEN)
-#define ATTR_SYSTEM_LE cpu_to_le32(ATTR_SYSTEM)
-#define ATTR_DIRECTORY_LE cpu_to_le32(ATTR_DIRECTORY)
-#define ATTR_ARCHIVE_LE cpu_to_le32(ATTR_ARCHIVE)
-#define ATTR_NORMAL_LE cpu_to_le32(ATTR_NORMAL)
-#define ATTR_TEMPORARY_LE cpu_to_le32(ATTR_TEMPORARY)
-#define ATTR_SPARSE_FILE_LE cpu_to_le32(ATTR_SPARSE)
-#define ATTR_REPARSE_POINT_LE cpu_to_le32(ATTR_REPARSE)
-#define ATTR_COMPRESSED_LE cpu_to_le32(ATTR_COMPRESSED)
-#define ATTR_OFFLINE_LE cpu_to_le32(ATTR_OFFLINE)
-#define ATTR_NOT_CONTENT_INDEXED_LE cpu_to_le32(ATTR_NOT_CONTENT_INDEXED)
-#define ATTR_ENCRYPTED_LE cpu_to_le32(ATTR_ENCRYPTED)
-#define ATTR_INTEGRITY_STREAML_LE cpu_to_le32(0x00008000)
-#define ATTR_NO_SCRUB_DATA_LE cpu_to_le32(0x00020000)
-#define ATTR_MASK_LE cpu_to_le32(0x00007FB7)
-
-/* Oplock levels */
-#define SMB2_OPLOCK_LEVEL_NONE 0x00
-#define SMB2_OPLOCK_LEVEL_II 0x01
-#define SMB2_OPLOCK_LEVEL_EXCLUSIVE 0x08
-#define SMB2_OPLOCK_LEVEL_BATCH 0x09
-#define SMB2_OPLOCK_LEVEL_LEASE 0xFF
-/* Non-spec internal type */
-#define SMB2_OPLOCK_LEVEL_NOCHANGE 0x99
-
-/* Desired Access Flags */
-#define FILE_READ_DATA_LE cpu_to_le32(0x00000001)
-#define FILE_LIST_DIRECTORY_LE cpu_to_le32(0x00000001)
-#define FILE_WRITE_DATA_LE cpu_to_le32(0x00000002)
-#define FILE_ADD_FILE_LE cpu_to_le32(0x00000002)
-#define FILE_APPEND_DATA_LE cpu_to_le32(0x00000004)
-#define FILE_ADD_SUBDIRECTORY_LE cpu_to_le32(0x00000004)
-#define FILE_READ_EA_LE cpu_to_le32(0x00000008)
-#define FILE_WRITE_EA_LE cpu_to_le32(0x00000010)
-#define FILE_EXECUTE_LE cpu_to_le32(0x00000020)
-#define FILE_TRAVERSE_LE cpu_to_le32(0x00000020)
-#define FILE_DELETE_CHILD_LE cpu_to_le32(0x00000040)
-#define FILE_READ_ATTRIBUTES_LE cpu_to_le32(0x00000080)
-#define FILE_WRITE_ATTRIBUTES_LE cpu_to_le32(0x00000100)
-#define FILE_DELETE_LE cpu_to_le32(0x00010000)
-#define FILE_READ_CONTROL_LE cpu_to_le32(0x00020000)
-#define FILE_WRITE_DAC_LE cpu_to_le32(0x00040000)
-#define FILE_WRITE_OWNER_LE cpu_to_le32(0x00080000)
-#define FILE_SYNCHRONIZE_LE cpu_to_le32(0x00100000)
-#define FILE_ACCESS_SYSTEM_SECURITY_LE cpu_to_le32(0x01000000)
-#define FILE_MAXIMAL_ACCESS_LE cpu_to_le32(0x02000000)
-#define FILE_GENERIC_ALL_LE cpu_to_le32(0x10000000)
-#define FILE_GENERIC_EXECUTE_LE cpu_to_le32(0x20000000)
-#define FILE_GENERIC_WRITE_LE cpu_to_le32(0x40000000)
-#define FILE_GENERIC_READ_LE cpu_to_le32(0x80000000)
-#define DESIRED_ACCESS_MASK cpu_to_le32(0xF21F01FF)
-
-/* ShareAccess Flags */
-#define FILE_SHARE_READ_LE cpu_to_le32(0x00000001)
-#define FILE_SHARE_WRITE_LE cpu_to_le32(0x00000002)
-#define FILE_SHARE_DELETE_LE cpu_to_le32(0x00000004)
-#define FILE_SHARE_ALL_LE cpu_to_le32(0x00000007)
-
-/* CreateDisposition Flags */
-#define FILE_SUPERSEDE_LE cpu_to_le32(0x00000000)
-#define FILE_OPEN_LE cpu_to_le32(0x00000001)
-#define FILE_CREATE_LE cpu_to_le32(0x00000002)
-#define FILE_OPEN_IF_LE cpu_to_le32(0x00000003)
-#define FILE_OVERWRITE_LE cpu_to_le32(0x00000004)
-#define FILE_OVERWRITE_IF_LE cpu_to_le32(0x00000005)
-#define FILE_CREATE_MASK_LE cpu_to_le32(0x00000007)
-
-#define FILE_READ_DESIRED_ACCESS_LE (FILE_READ_DATA_LE | \
- FILE_READ_EA_LE | \
- FILE_GENERIC_READ_LE)
-#define FILE_WRITE_DESIRE_ACCESS_LE (FILE_WRITE_DATA_LE | \
- FILE_APPEND_DATA_LE | \
- FILE_WRITE_EA_LE | \
- FILE_WRITE_ATTRIBUTES_LE | \
- FILE_GENERIC_WRITE_LE)
-
-/* Impersonation Levels */
-#define IL_ANONYMOUS_LE cpu_to_le32(0x00000000)
-#define IL_IDENTIFICATION_LE cpu_to_le32(0x00000001)
-#define IL_IMPERSONATION_LE cpu_to_le32(0x00000002)
-#define IL_DELEGATE_LE cpu_to_le32(0x00000003)
-
-/* Create Context Values */
-#define SMB2_CREATE_EA_BUFFER "ExtA" /* extended attributes */
-#define SMB2_CREATE_SD_BUFFER "SecD" /* security descriptor */
-#define SMB2_CREATE_DURABLE_HANDLE_REQUEST "DHnQ"
-#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT "DHnC"
-#define SMB2_CREATE_ALLOCATION_SIZE "AlSi"
-#define SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST "MxAc"
-#define SMB2_CREATE_TIMEWARP_REQUEST "TWrp"
-#define SMB2_CREATE_QUERY_ON_DISK_ID "QFid"
-#define SMB2_CREATE_REQUEST_LEASE "RqLs"
-#define SMB2_CREATE_DURABLE_HANDLE_REQUEST_V2 "DH2Q"
-#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 "DH2C"
-#define SMB2_CREATE_APP_INSTANCE_ID "\x45\xBC\xA6\x6A\xEF\xA7\xF7\x4A\x90\x08\xFA\x46\x2E\x14\x4D\x74"
- #define SMB2_CREATE_APP_INSTANCE_VERSION "\xB9\x82\xD0\xB7\x3B\x56\x07\x4F\xA0\x7B\x52\x4A\x81\x16\xA0\x10"
-#define SVHDX_OPEN_DEVICE_CONTEXT 0x83CE6F1AD851E0986E34401CC9BCFCE9
-#define SMB2_CREATE_TAG_POSIX "\x93\xAD\x25\x50\x9C\xB4\x11\xE7\xB4\x23\x83\xDE\x96\x8B\xCD\x7C"
-
-struct smb2_create_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 57 */
- __u8 SecurityFlags;
- __u8 RequestedOplockLevel;
- __le32 ImpersonationLevel;
- __le64 SmbCreateFlags;
- __le64 Reserved;
- __le32 DesiredAccess;
- __le32 FileAttributes;
- __le32 ShareAccess;
- __le32 CreateDisposition;
- __le32 CreateOptions;
- __le16 NameOffset;
- __le16 NameLength;
- __le32 CreateContextsOffset;
- __le32 CreateContextsLength;
- __u8 Buffer[0];
-} __packed;
-
-struct smb2_create_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 89 */
- __u8 OplockLevel;
- __u8 Reserved;
- __le32 CreateAction;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 AllocationSize;
- __le64 EndofFile;
- __le32 FileAttributes;
- __le32 Reserved2;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 CreateContextsOffset;
- __le32 CreateContextsLength;
- __u8 Buffer[1];
-} __packed;
-
-struct create_context {
- __le32 Next;
- __le16 NameOffset;
- __le16 NameLength;
- __le16 Reserved;
- __le16 DataOffset;
- __le32 DataLength;
- __u8 Buffer[0];
-} __packed;
-
-struct create_durable_req_v2 {
- struct create_context ccontext;
- __u8 Name[8];
- __le32 Timeout;
- __le32 Flags;
- __u8 Reserved[8];
- __u8 CreateGuid[16];
-} __packed;
-
-struct create_durable_reconn_req {
- struct create_context ccontext;
- __u8 Name[8];
- union {
- __u8 Reserved[16];
- struct {
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- } Fid;
- } Data;
-} __packed;
-
-struct create_durable_reconn_v2_req {
- struct create_context ccontext;
- __u8 Name[8];
- struct {
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- } Fid;
- __u8 CreateGuid[16];
- __le32 Flags;
-} __packed;
-
-struct create_app_inst_id {
- struct create_context ccontext;
- __u8 Name[8];
- __u8 Reserved[8];
- __u8 AppInstanceId[16];
-} __packed;
-
-struct create_app_inst_id_vers {
- struct create_context ccontext;
- __u8 Name[8];
- __u8 Reserved[2];
- __u8 Padding[4];
- __le64 AppInstanceVersionHigh;
- __le64 AppInstanceVersionLow;
-} __packed;
-
-struct create_mxac_req {
- struct create_context ccontext;
- __u8 Name[8];
- __le64 Timestamp;
-} __packed;
-
-struct create_alloc_size_req {
- struct create_context ccontext;
- __u8 Name[8];
- __le64 AllocationSize;
-} __packed;
-
-struct create_posix {
- struct create_context ccontext;
- __u8 Name[16];
- __le32 Mode;
- __u32 Reserved;
-} __packed;
-
-struct create_durable_rsp {
- struct create_context ccontext;
- __u8 Name[8];
- union {
- __u8 Reserved[8];
- __u64 data;
- } Data;
-} __packed;
-
-struct create_durable_v2_rsp {
- struct create_context ccontext;
- __u8 Name[8];
- __le32 Timeout;
- __le32 Flags;
-} __packed;
-
-struct create_mxac_rsp {
- struct create_context ccontext;
- __u8 Name[8];
- __le32 QueryStatus;
- __le32 MaximalAccess;
-} __packed;
-
-struct create_disk_id_rsp {
- struct create_context ccontext;
- __u8 Name[8];
- __le64 DiskFileId;
- __le64 VolumeId;
- __u8 Reserved[16];
-} __packed;
-
-/* equivalent of the contents of SMB3.1.1 POSIX open context response */
-struct create_posix_rsp {
- struct create_context ccontext;
- __u8 Name[16];
- __le32 nlink;
- __le32 reparse_tag;
- __le32 mode;
- u8 SidBuffer[40];
-} __packed;
-
-#define SMB2_LEASE_NONE_LE cpu_to_le32(0x00)
-#define SMB2_LEASE_READ_CACHING_LE cpu_to_le32(0x01)
-#define SMB2_LEASE_HANDLE_CACHING_LE cpu_to_le32(0x02)
-#define SMB2_LEASE_WRITE_CACHING_LE cpu_to_le32(0x04)
-
-#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE cpu_to_le32(0x02)
-
-struct lease_context {
- __le64 LeaseKeyLow;
- __le64 LeaseKeyHigh;
- __le32 LeaseState;
- __le32 LeaseFlags;
- __le64 LeaseDuration;
-} __packed;
-
-struct lease_context_v2 {
- __le64 LeaseKeyLow;
- __le64 LeaseKeyHigh;
- __le32 LeaseState;
- __le32 LeaseFlags;
- __le64 LeaseDuration;
- __le64 ParentLeaseKeyLow;
- __le64 ParentLeaseKeyHigh;
- __le16 Epoch;
- __le16 Reserved;
-} __packed;
-
-struct create_lease {
- struct create_context ccontext;
- __u8 Name[8];
- struct lease_context lcontext;
-} __packed;
-
-struct create_lease_v2 {
- struct create_context ccontext;
- __u8 Name[8];
- struct lease_context_v2 lcontext;
- __u8 Pad[4];
-} __packed;
-
-/* Currently defined values for close flags */
-#define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB cpu_to_le16(0x0001)
-struct smb2_close_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 24 */
- __le16 Flags;
- __le32 Reserved;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
-} __packed;
-
-struct smb2_close_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* 60 */
- __le16 Flags;
- __le32 Reserved;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
- __le64 EndOfFile;
- __le32 Attributes;
-} __packed;
-
-struct smb2_flush_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 24 */
- __le16 Reserved1;
- __le32 Reserved2;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
-} __packed;
-
-struct smb2_flush_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize;
- __le16 Reserved;
-} __packed;
-
-struct smb2_buffer_desc_v1 {
- __le64 offset;
- __le32 token;
- __le32 length;
-} __packed;
-
-#define SMB2_CHANNEL_NONE cpu_to_le32(0x00000000)
-#define SMB2_CHANNEL_RDMA_V1 cpu_to_le32(0x00000001)
-#define SMB2_CHANNEL_RDMA_V1_INVALIDATE cpu_to_le32(0x00000002)
-
-struct smb2_read_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 49 */
- __u8 Padding; /* offset from start of SMB2 header to place read */
- __u8 Reserved;
- __le32 Length;
- __le64 Offset;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 MinimumCount;
- __le32 Channel; /* Reserved MBZ */
- __le32 RemainingBytes;
- __le16 ReadChannelInfoOffset; /* Reserved MBZ */
- __le16 ReadChannelInfoLength; /* Reserved MBZ */
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_read_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 17 */
- __u8 DataOffset;
- __u8 Reserved;
- __le32 DataLength;
- __le32 DataRemaining;
- __u32 Reserved2;
- __u8 Buffer[1];
-} __packed;
-
-/* For write request Flags field below the following flag is defined: */
-#define SMB2_WRITEFLAG_WRITE_THROUGH 0x00000001
-
-struct smb2_write_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 49 */
- __le16 DataOffset; /* offset from start of SMB2 header to write data */
- __le32 Length;
- __le64 Offset;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 Channel; /* Reserved MBZ */
- __le32 RemainingBytes;
- __le16 WriteChannelInfoOffset; /* Reserved MBZ */
- __le16 WriteChannelInfoLength; /* Reserved MBZ */
- __le32 Flags;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_write_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 17 */
- __u8 DataOffset;
- __u8 Reserved;
- __le32 DataLength;
- __le32 DataRemaining;
- __u32 Reserved2;
- __u8 Buffer[1];
-} __packed;
-
-#define SMB2_0_IOCTL_IS_FSCTL 0x00000001
-
-struct duplicate_extents_to_file {
- __u64 PersistentFileHandle; /* source file handle, opaque endianness */
- __u64 VolatileFileHandle;
- __le64 SourceFileOffset;
- __le64 TargetFileOffset;
- __le64 ByteCount; /* Bytes to be copied */
-} __packed;
-
-struct smb2_ioctl_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 57 */
- __le16 Reserved; /* offset from start of SMB2 header to write data */
- __le32 CntCode;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 InputOffset; /* Reserved MBZ */
- __le32 InputCount;
- __le32 MaxInputResponse;
- __le32 OutputOffset;
- __le32 OutputCount;
- __le32 MaxOutputResponse;
- __le32 Flags;
- __le32 Reserved2;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_ioctl_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 49 */
- __le16 Reserved; /* offset from start of SMB2 header to write data */
- __le32 CntCode;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 InputOffset; /* Reserved MBZ */
- __le32 InputCount;
- __le32 OutputOffset;
- __le32 OutputCount;
- __le32 Flags;
- __le32 Reserved2;
- __u8 Buffer[1];
-} __packed;
-
-struct validate_negotiate_info_req {
- __le32 Capabilities;
- __u8 Guid[SMB2_CLIENT_GUID_SIZE];
- __le16 SecurityMode;
- __le16 DialectCount;
- __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
-} __packed;
-
-struct validate_negotiate_info_rsp {
- __le32 Capabilities;
- __u8 Guid[SMB2_CLIENT_GUID_SIZE];
- __le16 SecurityMode;
- __le16 Dialect; /* Dialect in use for the connection */
-} __packed;
-
-struct smb_sockaddr_in {
- __be16 Port;
- __be32 IPv4address;
- __u8 Reserved[8];
-} __packed;
-
-struct smb_sockaddr_in6 {
- __be16 Port;
- __be32 FlowInfo;
- __u8 IPv6address[16];
- __be32 ScopeId;
-} __packed;
-
-#define INTERNETWORK 0x0002
-#define INTERNETWORKV6 0x0017
-
-struct sockaddr_storage_rsp {
- __le16 Family;
- union {
- struct smb_sockaddr_in addr4;
- struct smb_sockaddr_in6 addr6;
- };
-} __packed;
-
-#define RSS_CAPABLE 0x00000001
-#define RDMA_CAPABLE 0x00000002
-
-struct network_interface_info_ioctl_rsp {
- __le32 Next; /* next interface. zero if this is last one */
- __le32 IfIndex;
- __le32 Capability; /* RSS or RDMA Capable */
- __le32 Reserved;
- __le64 LinkSpeed;
- char SockAddr_Storage[128];
-} __packed;
-
-struct file_object_buf_type1_ioctl_rsp {
- __u8 ObjectId[16];
- __u8 BirthVolumeId[16];
- __u8 BirthObjectId[16];
- __u8 DomainId[16];
-} __packed;
-
-struct resume_key_ioctl_rsp {
- __le64 ResumeKey[3];
- __le32 ContextLength;
- __u8 Context[4]; /* ignored, Windows sets to 4 bytes of zero */
-} __packed;
-
-struct copychunk_ioctl_req {
- __le64 ResumeKey[3];
- __le32 ChunkCount;
- __le32 Reserved;
- __u8 Chunks[1]; /* array of srv_copychunk */
-} __packed;
-
-struct srv_copychunk {
- __le64 SourceOffset;
- __le64 TargetOffset;
- __le32 Length;
- __le32 Reserved;
-} __packed;
-
-struct copychunk_ioctl_rsp {
- __le32 ChunksWritten;
- __le32 ChunkBytesWritten;
- __le32 TotalBytesWritten;
-} __packed;
-
-struct file_sparse {
- __u8 SetSparse;
-} __packed;
-
-struct file_zero_data_information {
- __le64 FileOffset;
- __le64 BeyondFinalZero;
-} __packed;
-
-struct file_allocated_range_buffer {
- __le64 file_offset;
- __le64 length;
-} __packed;
-
-struct reparse_data_buffer {
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __u16 Reserved;
- __u8 DataBuffer[]; /* Variable Length */
-} __packed;
-
-/* Completion Filter flags for Notify */
-#define FILE_NOTIFY_CHANGE_FILE_NAME 0x00000001
-#define FILE_NOTIFY_CHANGE_DIR_NAME 0x00000002
-#define FILE_NOTIFY_CHANGE_NAME 0x00000003
-#define FILE_NOTIFY_CHANGE_ATTRIBUTES 0x00000004
-#define FILE_NOTIFY_CHANGE_SIZE 0x00000008
-#define FILE_NOTIFY_CHANGE_LAST_WRITE 0x00000010
-#define FILE_NOTIFY_CHANGE_LAST_ACCESS 0x00000020
-#define FILE_NOTIFY_CHANGE_CREATION 0x00000040
-#define FILE_NOTIFY_CHANGE_EA 0x00000080
-#define FILE_NOTIFY_CHANGE_SECURITY 0x00000100
-#define FILE_NOTIFY_CHANGE_STREAM_NAME 0x00000200
-#define FILE_NOTIFY_CHANGE_STREAM_SIZE 0x00000400
-#define FILE_NOTIFY_CHANGE_STREAM_WRITE 0x00000800
-
-/* Flags */
-#define SMB2_WATCH_TREE 0x0001
-
-struct smb2_notify_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 32 */
- __le16 Flags;
- __le32 OutputBufferLength;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __u32 CompletionFileter;
- __u32 Reserved;
-} __packed;
-
-struct smb2_notify_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 OutputBufferOffset;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-/* SMB2 Notify Action Flags */
-#define FILE_ACTION_ADDED 0x00000001
-#define FILE_ACTION_REMOVED 0x00000002
-#define FILE_ACTION_MODIFIED 0x00000003
-#define FILE_ACTION_RENAMED_OLD_NAME 0x00000004
-#define FILE_ACTION_RENAMED_NEW_NAME 0x00000005
-#define FILE_ACTION_ADDED_STREAM 0x00000006
-#define FILE_ACTION_REMOVED_STREAM 0x00000007
-#define FILE_ACTION_MODIFIED_STREAM 0x00000008
-#define FILE_ACTION_REMOVED_BY_DELETE 0x00000009
-
-#define SMB2_LOCKFLAG_SHARED 0x0001
-#define SMB2_LOCKFLAG_EXCLUSIVE 0x0002
-#define SMB2_LOCKFLAG_UNLOCK 0x0004
-#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
-#define SMB2_LOCKFLAG_MASK 0x0007
-
-struct smb2_lock_element {
- __le64 Offset;
- __le64 Length;
- __le32 Flags;
- __le32 Reserved;
-} __packed;
-
-struct smb2_lock_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 48 */
- __le16 LockCount;
- __le32 Reserved;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- /* Followed by at least one */
- struct smb2_lock_element locks[1];
-} __packed;
-
-struct smb2_lock_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_echo_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __u16 Reserved;
-} __packed;
-
-struct smb2_echo_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __u16 Reserved;
-} __packed;
-
-/* search (query_directory) Flags field */
-#define SMB2_RESTART_SCANS 0x01
-#define SMB2_RETURN_SINGLE_ENTRY 0x02
-#define SMB2_INDEX_SPECIFIED 0x04
-#define SMB2_REOPEN 0x10
-
-struct smb2_query_directory_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 33 */
- __u8 FileInformationClass;
- __u8 Flags;
- __le32 FileIndex;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le16 FileNameOffset;
- __le16 FileNameLength;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_query_directory_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 OutputBufferOffset;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-/* Possible InfoType values */
-#define SMB2_O_INFO_FILE 0x01
-#define SMB2_O_INFO_FILESYSTEM 0x02
-#define SMB2_O_INFO_SECURITY 0x03
-#define SMB2_O_INFO_QUOTA 0x04
-
-/* Security info type additionalinfo flags. See MS-SMB2 (2.2.37) or MS-DTYP */
-#define OWNER_SECINFO 0x00000001
-#define GROUP_SECINFO 0x00000002
-#define DACL_SECINFO 0x00000004
-#define SACL_SECINFO 0x00000008
-#define LABEL_SECINFO 0x00000010
-#define ATTRIBUTE_SECINFO 0x00000020
-#define SCOPE_SECINFO 0x00000040
-#define BACKUP_SECINFO 0x00010000
-#define UNPROTECTED_SACL_SECINFO 0x10000000
-#define UNPROTECTED_DACL_SECINFO 0x20000000
-#define PROTECTED_SACL_SECINFO 0x40000000
-#define PROTECTED_DACL_SECINFO 0x80000000
-
-struct smb2_query_info_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 41 */
- __u8 InfoType;
- __u8 FileInfoClass;
- __le32 OutputBufferLength;
- __le16 InputBufferOffset;
- __u16 Reserved;
- __le32 InputBufferLength;
- __le32 AdditionalInformation;
- __le32 Flags;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_query_info_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 OutputBufferOffset;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_set_info_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 33 */
- __u8 InfoType;
- __u8 FileInfoClass;
- __le32 BufferLength;
- __le16 BufferOffset;
- __u16 Reserved;
- __le32 AdditionalInformation;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_set_info_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 2 */
-} __packed;
-
-/* FILE Info response size */
-#define FILE_DIRECTORY_INFORMATION_SIZE 1
-#define FILE_FULL_DIRECTORY_INFORMATION_SIZE 2
-#define FILE_BOTH_DIRECTORY_INFORMATION_SIZE 3
-#define FILE_BASIC_INFORMATION_SIZE 40
-#define FILE_STANDARD_INFORMATION_SIZE 24
-#define FILE_INTERNAL_INFORMATION_SIZE 8
-#define FILE_EA_INFORMATION_SIZE 4
-#define FILE_ACCESS_INFORMATION_SIZE 4
-#define FILE_NAME_INFORMATION_SIZE 9
-#define FILE_RENAME_INFORMATION_SIZE 10
-#define FILE_LINK_INFORMATION_SIZE 11
-#define FILE_NAMES_INFORMATION_SIZE 12
-#define FILE_DISPOSITION_INFORMATION_SIZE 13
-#define FILE_POSITION_INFORMATION_SIZE 14
-#define FILE_FULL_EA_INFORMATION_SIZE 15
-#define FILE_MODE_INFORMATION_SIZE 4
-#define FILE_ALIGNMENT_INFORMATION_SIZE 4
-#define FILE_ALL_INFORMATION_SIZE 104
-#define FILE_ALLOCATION_INFORMATION_SIZE 19
-#define FILE_END_OF_FILE_INFORMATION_SIZE 20
-#define FILE_ALTERNATE_NAME_INFORMATION_SIZE 8
-#define FILE_STREAM_INFORMATION_SIZE 32
-#define FILE_PIPE_INFORMATION_SIZE 23
-#define FILE_PIPE_LOCAL_INFORMATION_SIZE 24
-#define FILE_PIPE_REMOTE_INFORMATION_SIZE 25
-#define FILE_MAILSLOT_QUERY_INFORMATION_SIZE 26
-#define FILE_MAILSLOT_SET_INFORMATION_SIZE 27
-#define FILE_COMPRESSION_INFORMATION_SIZE 16
-#define FILE_OBJECT_ID_INFORMATION_SIZE 29
-/* Number 30 not defined in documents */
-#define FILE_MOVE_CLUSTER_INFORMATION_SIZE 31
-#define FILE_QUOTA_INFORMATION_SIZE 32
-#define FILE_REPARSE_POINT_INFORMATION_SIZE 33
-#define FILE_NETWORK_OPEN_INFORMATION_SIZE 56
-#define FILE_ATTRIBUTE_TAG_INFORMATION_SIZE 8
-
-/* FS Info response size */
-#define FS_DEVICE_INFORMATION_SIZE 8
-#define FS_ATTRIBUTE_INFORMATION_SIZE 16
-#define FS_VOLUME_INFORMATION_SIZE 24
-#define FS_SIZE_INFORMATION_SIZE 24
-#define FS_FULL_SIZE_INFORMATION_SIZE 32
-#define FS_SECTOR_SIZE_INFORMATION_SIZE 28
-#define FS_OBJECT_ID_INFORMATION_SIZE 64
-#define FS_CONTROL_INFORMATION_SIZE 48
-#define FS_POSIX_INFORMATION_SIZE 56
-
-/* FS_ATTRIBUTE_File_System_Name */
-#define FS_TYPE_SUPPORT_SIZE 44
-struct fs_type_info {
- char *fs_name;
- long magic_number;
-} __packed;
-
-struct smb2_oplock_break {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 24 */
- __u8 OplockLevel;
- __u8 Reserved;
- __le32 Reserved2;
- __le64 PersistentFid;
- __le64 VolatileFid;
-} __packed;
-
-#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
-
-struct smb2_lease_break {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 44 */
- __le16 Epoch;
- __le32 Flags;
- __u8 LeaseKey[16];
- __le32 CurrentLeaseState;
- __le32 NewLeaseState;
- __le32 BreakReason;
- __le32 AccessMaskHint;
- __le32 ShareMaskHint;
-} __packed;
-
-struct smb2_lease_ack {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 36 */
- __le16 Reserved;
- __le32 Flags;
- __u8 LeaseKey[16];
- __le32 LeaseState;
- __le64 LeaseDuration;
-} __packed;
-
-/*
- * PDU infolevel structure definitions
- * BB consider moving to a different header
- */
-
-/* File System Information Classes */
-#define FS_VOLUME_INFORMATION 1 /* Query */
-#define FS_LABEL_INFORMATION 2 /* Set */
-#define FS_SIZE_INFORMATION 3 /* Query */
-#define FS_DEVICE_INFORMATION 4 /* Query */
-#define FS_ATTRIBUTE_INFORMATION 5 /* Query */
-#define FS_CONTROL_INFORMATION 6 /* Query, Set */
-#define FS_FULL_SIZE_INFORMATION 7 /* Query */
-#define FS_OBJECT_ID_INFORMATION 8 /* Query, Set */
-#define FS_DRIVER_PATH_INFORMATION 9 /* Query */
-#define FS_SECTOR_SIZE_INFORMATION 11 /* SMB3 or later. Query */
-#define FS_POSIX_INFORMATION 100 /* SMB3.1.1 POSIX. Query */
-
-struct smb2_fs_full_size_info {
- __le64 TotalAllocationUnits;
- __le64 CallerAvailableAllocationUnits;
- __le64 ActualAvailableAllocationUnits;
- __le32 SectorsPerAllocationUnit;
- __le32 BytesPerSector;
-} __packed;
-
-#define SSINFO_FLAGS_ALIGNED_DEVICE 0x00000001
-#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
-#define SSINFO_FLAGS_NO_SEEK_PENALTY 0x00000004
-#define SSINFO_FLAGS_TRIM_ENABLED 0x00000008
-
-/* sector size info struct */
-struct smb3_fs_ss_info {
- __le32 LogicalBytesPerSector;
- __le32 PhysicalBytesPerSectorForAtomicity;
- __le32 PhysicalBytesPerSectorForPerf;
- __le32 FSEffPhysicalBytesPerSectorForAtomicity;
- __le32 Flags;
- __le32 ByteOffsetForSectorAlignment;
- __le32 ByteOffsetForPartitionAlignment;
-} __packed;
-
-/* File System Control Information */
-struct smb2_fs_control_info {
- __le64 FreeSpaceStartFiltering;
- __le64 FreeSpaceThreshold;
- __le64 FreeSpaceStopFiltering;
- __le64 DefaultQuotaThreshold;
- __le64 DefaultQuotaLimit;
- __le32 FileSystemControlFlags;
- __le32 Padding;
-} __packed;
-
-/* partial list of QUERY INFO levels */
-#define FILE_DIRECTORY_INFORMATION 1
-#define FILE_FULL_DIRECTORY_INFORMATION 2
-#define FILE_BOTH_DIRECTORY_INFORMATION 3
-#define FILE_BASIC_INFORMATION 4
-#define FILE_STANDARD_INFORMATION 5
-#define FILE_INTERNAL_INFORMATION 6
-#define FILE_EA_INFORMATION 7
-#define FILE_ACCESS_INFORMATION 8
-#define FILE_NAME_INFORMATION 9
-#define FILE_RENAME_INFORMATION 10
-#define FILE_LINK_INFORMATION 11
-#define FILE_NAMES_INFORMATION 12
-#define FILE_DISPOSITION_INFORMATION 13
-#define FILE_POSITION_INFORMATION 14
-#define FILE_FULL_EA_INFORMATION 15
-#define FILE_MODE_INFORMATION 16
-#define FILE_ALIGNMENT_INFORMATION 17
-#define FILE_ALL_INFORMATION 18
-#define FILE_ALLOCATION_INFORMATION 19
-#define FILE_END_OF_FILE_INFORMATION 20
-#define FILE_ALTERNATE_NAME_INFORMATION 21
-#define FILE_STREAM_INFORMATION 22
-#define FILE_PIPE_INFORMATION 23
-#define FILE_PIPE_LOCAL_INFORMATION 24
-#define FILE_PIPE_REMOTE_INFORMATION 25
-#define FILE_MAILSLOT_QUERY_INFORMATION 26
-#define FILE_MAILSLOT_SET_INFORMATION 27
-#define FILE_COMPRESSION_INFORMATION 28
-#define FILE_OBJECT_ID_INFORMATION 29
-/* Number 30 not defined in documents */
-#define FILE_MOVE_CLUSTER_INFORMATION 31
-#define FILE_QUOTA_INFORMATION 32
-#define FILE_REPARSE_POINT_INFORMATION 33
-#define FILE_NETWORK_OPEN_INFORMATION 34
-#define FILE_ATTRIBUTE_TAG_INFORMATION 35
-#define FILE_TRACKING_INFORMATION 36
-#define FILEID_BOTH_DIRECTORY_INFORMATION 37
-#define FILEID_FULL_DIRECTORY_INFORMATION 38
-#define FILE_VALID_DATA_LENGTH_INFORMATION 39
-#define FILE_SHORT_NAME_INFORMATION 40
-#define FILE_SFIO_RESERVE_INFORMATION 44
-#define FILE_SFIO_VOLUME_INFORMATION 45
-#define FILE_HARD_LINK_INFORMATION 46
-#define FILE_NORMALIZED_NAME_INFORMATION 48
-#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
-#define FILE_STANDARD_LINK_INFORMATION 54
-
-#define OP_BREAK_STRUCT_SIZE_20 24
-#define OP_BREAK_STRUCT_SIZE_21 36
-
-struct smb2_file_access_info {
- __le32 AccessFlags;
-} __packed;
-
-struct smb2_file_alignment_info {
- __le32 AlignmentRequirement;
-} __packed;
-
-struct smb2_file_internal_info {
- __le64 IndexNumber;
-} __packed; /* level 6 Query */
-
-struct smb2_file_rename_info { /* encoding of request for level 10 */
- __u8 ReplaceIfExists; /* 1 = replace existing target with new */
- /* 0 = fail if target already exists */
- __u8 Reserved[7];
- __u64 RootDirectory; /* MBZ for network operations (why says spec?) */
- __le32 FileNameLength;
- char FileName[0]; /* New name to be assigned */
-} __packed; /* level 10 Set */
-
-struct smb2_file_link_info { /* encoding of request for level 11 */
- __u8 ReplaceIfExists; /* 1 = replace existing link with new */
- /* 0 = fail if link already exists */
- __u8 Reserved[7];
- __u64 RootDirectory; /* MBZ for network operations (why says spec?) */
- __le32 FileNameLength;
- char FileName[0]; /* Name to be assigned to new link */
-} __packed; /* level 11 Set */
-
-/*
- * This level 18, although with struct with same name is different from cifs
- * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
- * CurrentByteOffset.
- */
-struct smb2_file_all_info { /* data block encoding of response to level 18 */
- __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le32 Attributes;
- __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */
- __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
- __le64 EndOfFile; /* size ie offset to first free byte in file */
- __le32 NumberOfLinks; /* hard links */
- __u8 DeletePending;
- __u8 Directory;
- __u16 Pad2; /* End of FILE_STANDARD_INFO equivalent */
- __le64 IndexNumber;
- __le32 EASize;
- __le32 AccessFlags;
- __le64 CurrentByteOffset;
- __le32 Mode;
- __le32 AlignmentRequirement;
- __le32 FileNameLength;
- char FileName[1];
-} __packed; /* level 18 Query */
-
-struct smb2_file_basic_info { /* data block encoding of response to level 18 */
- __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le32 Attributes;
- __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */
-} __packed;
-
-struct smb2_file_alt_name_info {
- __le32 FileNameLength;
- char FileName[0];
-} __packed;
-
-struct smb2_file_stream_info {
- __le32 NextEntryOffset;
- __le32 StreamNameLength;
- __le64 StreamSize;
- __le64 StreamAllocationSize;
- char StreamName[0];
-} __packed;
-
-struct smb2_file_eof_info { /* encoding of request for level 10 */
- __le64 EndOfFile; /* new end of file value */
-} __packed; /* level 20 Set */
-
-struct smb2_file_ntwrk_info {
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 AllocationSize;
- __le64 EndOfFile;
- __le32 Attributes;
- __le32 Reserved;
-} __packed;
-
-struct smb2_file_standard_info {
- __le64 AllocationSize;
- __le64 EndOfFile;
- __le32 NumberOfLinks; /* hard links */
- __u8 DeletePending;
- __u8 Directory;
- __le16 Reserved;
-} __packed; /* level 18 Query */
-
-struct smb2_file_ea_info {
- __le32 EASize;
-} __packed;
-
-struct smb2_file_alloc_info {
- __le64 AllocationSize;
-} __packed;
-
-struct smb2_file_disposition_info {
- __u8 DeletePending;
-} __packed;
-
-struct smb2_file_pos_info {
- __le64 CurrentByteOffset;
-} __packed;
-
-#define FILE_MODE_INFO_MASK cpu_to_le32(0x0000103e)
-
-struct smb2_file_mode_info {
- __le32 Mode;
-} __packed;
-
-#define COMPRESSION_FORMAT_NONE 0x0000
-#define COMPRESSION_FORMAT_LZNT1 0x0002
-
-struct smb2_file_comp_info {
- __le64 CompressedFileSize;
- __le16 CompressionFormat;
- __u8 CompressionUnitShift;
- __u8 ChunkShift;
- __u8 ClusterShift;
- __u8 Reserved[3];
-} __packed;
-
-struct smb2_file_attr_tag_info {
- __le32 FileAttributes;
- __le32 ReparseTag;
-} __packed;
-
-#define SL_RESTART_SCAN 0x00000001
-#define SL_RETURN_SINGLE_ENTRY 0x00000002
-#define SL_INDEX_SPECIFIED 0x00000004
-
-struct smb2_ea_info_req {
- __le32 NextEntryOffset;
- __u8 EaNameLength;
- char name[1];
-} __packed; /* level 15 Query */
-
-struct smb2_ea_info {
- __le32 NextEntryOffset;
- __u8 Flags;
- __u8 EaNameLength;
- __le16 EaValueLength;
- char name[1];
- /* optionally followed by value */
-} __packed; /* level 15 Query */
-
-struct create_ea_buf_req {
- struct create_context ccontext;
- __u8 Name[8];
- struct smb2_ea_info ea;
-} __packed;
-
-struct create_sd_buf_req {
- struct create_context ccontext;
- __u8 Name[8];
- struct smb_ntsd ntsd;
-} __packed;
-
-/* Find File infolevels */
-#define SMB_FIND_FILE_POSIX_INFO 0x064
-
-/* Level 100 query info */
-struct smb311_posix_qinfo {
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 EndOfFile;
- __le64 AllocationSize;
- __le32 DosAttributes;
- __le64 Inode;
- __le32 DeviceId;
- __le32 Zero;
- /* beginning of POSIX Create Context Response */
- __le32 HardLinks;
- __le32 ReparseTag;
- __le32 Mode;
- u8 Sids[];
- /*
- * var sized owner SID
- * var sized group SID
- * le32 filenamelength
- * u8 filename[]
- */
-} __packed;
-
-struct smb2_posix_info {
- __le32 NextEntryOffset;
- __u32 Ignored;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 EndOfFile;
- __le64 AllocationSize;
- __le32 DosAttributes;
- __le64 Inode;
- __le32 DeviceId;
- __le32 Zero;
- /* beginning of POSIX Create Context Response */
- __le32 HardLinks;
- __le32 ReparseTag;
- __le32 Mode;
- u8 SidBuffer[40];
- __le32 name_len;
- u8 name[1];
- /*
- * var sized owner SID
- * var sized group SID
- * le32 filenamelength
- * u8 filename[]
- */
-} __packed;
-
-/* functions */
-void init_smb2_1_server(struct ksmbd_conn *conn);
-void init_smb3_0_server(struct ksmbd_conn *conn);
-void init_smb3_02_server(struct ksmbd_conn *conn);
-int init_smb3_11_server(struct ksmbd_conn *conn);
-
-void init_smb2_max_read_size(unsigned int sz);
-void init_smb2_max_write_size(unsigned int sz);
-void init_smb2_max_trans_size(unsigned int sz);
-
-bool is_smb2_neg_cmd(struct ksmbd_work *work);
-bool is_smb2_rsp(struct ksmbd_work *work);
-
-u16 get_smb2_cmd_val(struct ksmbd_work *work);
-void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err);
-int init_smb2_rsp_hdr(struct ksmbd_work *work);
-int smb2_allocate_rsp_buf(struct ksmbd_work *work);
-bool is_chained_smb2_message(struct ksmbd_work *work);
-int init_smb2_neg_rsp(struct ksmbd_work *work);
-void smb2_set_err_rsp(struct ksmbd_work *work);
-int smb2_check_user_session(struct ksmbd_work *work);
-int smb2_get_ksmbd_tcon(struct ksmbd_work *work);
-bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command);
-int smb2_check_sign_req(struct ksmbd_work *work);
-void smb2_set_sign_rsp(struct ksmbd_work *work);
-int smb3_check_sign_req(struct ksmbd_work *work);
-void smb3_set_sign_rsp(struct ksmbd_work *work);
-int find_matching_smb2_dialect(int start_index, __le16 *cli_dialects,
- __le16 dialects_count);
-struct file_lock *smb_flock_init(struct file *f);
-int setup_async_work(struct ksmbd_work *work, void (*fn)(void **),
- void **arg);
-void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status);
-struct channel *lookup_chann_list(struct ksmbd_session *sess,
- struct ksmbd_conn *conn);
-void smb3_preauth_hash_rsp(struct ksmbd_work *work);
-bool smb3_is_transform_hdr(void *buf);
-int smb3_decrypt_req(struct ksmbd_work *work);
-int smb3_encrypt_resp(struct ksmbd_work *work);
-bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work);
-int smb2_set_rsp_credits(struct ksmbd_work *work);
-
-/* smb2 misc functions */
-int ksmbd_smb2_check_message(struct ksmbd_work *work);
-
-/* smb2 command handlers */
-int smb2_handle_negotiate(struct ksmbd_work *work);
-int smb2_negotiate_request(struct ksmbd_work *work);
-int smb2_sess_setup(struct ksmbd_work *work);
-int smb2_tree_connect(struct ksmbd_work *work);
-int smb2_tree_disconnect(struct ksmbd_work *work);
-int smb2_session_logoff(struct ksmbd_work *work);
-int smb2_open(struct ksmbd_work *work);
-int smb2_query_info(struct ksmbd_work *work);
-int smb2_query_dir(struct ksmbd_work *work);
-int smb2_close(struct ksmbd_work *work);
-int smb2_echo(struct ksmbd_work *work);
-int smb2_set_info(struct ksmbd_work *work);
-int smb2_read(struct ksmbd_work *work);
-int smb2_write(struct ksmbd_work *work);
-int smb2_flush(struct ksmbd_work *work);
-int smb2_cancel(struct ksmbd_work *work);
-int smb2_lock(struct ksmbd_work *work);
-int smb2_ioctl(struct ksmbd_work *work);
-int smb2_oplock_break(struct ksmbd_work *work);
-int smb2_notify(struct ksmbd_work *ksmbd_work);
-
-#endif /* _SMB2PDU_H */
diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
deleted file mode 100644
index 6e79e7577f6b..000000000000
--- a/fs/ksmbd/smb_common.h
+++ /dev/null
@@ -1,534 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2018 Samsung Electronics Co., Ltd.
- */
-
-#ifndef __SMB_COMMON_H__
-#define __SMB_COMMON_H__
-
-#include <linux/kernel.h>
-
-#include "glob.h"
-#include "nterr.h"
-#include "smb2pdu.h"
-
-/* ksmbd's Specific ERRNO */
-#define ESHARE 50000
-
-#define SMB1_PROT 0
-#define SMB2_PROT 1
-#define SMB21_PROT 2
-/* multi-protocol negotiate request */
-#define SMB2X_PROT 3
-#define SMB30_PROT 4
-#define SMB302_PROT 5
-#define SMB311_PROT 6
-#define BAD_PROT 0xFFFF
-
-#define SMB1_VERSION_STRING "1.0"
-#define SMB20_VERSION_STRING "2.0"
-#define SMB21_VERSION_STRING "2.1"
-#define SMB30_VERSION_STRING "3.0"
-#define SMB302_VERSION_STRING "3.02"
-#define SMB311_VERSION_STRING "3.1.1"
-
-/* Dialects */
-#define SMB10_PROT_ID 0x00
-#define SMB20_PROT_ID 0x0202
-#define SMB21_PROT_ID 0x0210
-/* multi-protocol negotiate request */
-#define SMB2X_PROT_ID 0x02FF
-#define SMB30_PROT_ID 0x0300
-#define SMB302_PROT_ID 0x0302
-#define SMB311_PROT_ID 0x0311
-#define BAD_PROT_ID 0xFFFF
-
-#define SMB_ECHO_INTERVAL (60 * HZ)
-
-#define CIFS_DEFAULT_IOSIZE (64 * 1024)
-#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
-
-#define MAX_STREAM_PROT_LEN 0x00FFFFFF
-
-/* Responses when opening a file. */
-#define F_SUPERSEDED 0
-#define F_OPENED 1
-#define F_CREATED 2
-#define F_OVERWRITTEN 3
-
-/*
- * File Attribute flags
- */
-#define ATTR_READONLY 0x0001
-#define ATTR_HIDDEN 0x0002
-#define ATTR_SYSTEM 0x0004
-#define ATTR_VOLUME 0x0008
-#define ATTR_DIRECTORY 0x0010
-#define ATTR_ARCHIVE 0x0020
-#define ATTR_DEVICE 0x0040
-#define ATTR_NORMAL 0x0080
-#define ATTR_TEMPORARY 0x0100
-#define ATTR_SPARSE 0x0200
-#define ATTR_REPARSE 0x0400
-#define ATTR_COMPRESSED 0x0800
-#define ATTR_OFFLINE 0x1000
-#define ATTR_NOT_CONTENT_INDEXED 0x2000
-#define ATTR_ENCRYPTED 0x4000
-#define ATTR_POSIX_SEMANTICS 0x01000000
-#define ATTR_BACKUP_SEMANTICS 0x02000000
-#define ATTR_DELETE_ON_CLOSE 0x04000000
-#define ATTR_SEQUENTIAL_SCAN 0x08000000
-#define ATTR_RANDOM_ACCESS 0x10000000
-#define ATTR_NO_BUFFERING 0x20000000
-#define ATTR_WRITE_THROUGH 0x80000000
-
-#define ATTR_READONLY_LE cpu_to_le32(ATTR_READONLY)
-#define ATTR_HIDDEN_LE cpu_to_le32(ATTR_HIDDEN)
-#define ATTR_SYSTEM_LE cpu_to_le32(ATTR_SYSTEM)
-#define ATTR_DIRECTORY_LE cpu_to_le32(ATTR_DIRECTORY)
-#define ATTR_ARCHIVE_LE cpu_to_le32(ATTR_ARCHIVE)
-#define ATTR_NORMAL_LE cpu_to_le32(ATTR_NORMAL)
-#define ATTR_TEMPORARY_LE cpu_to_le32(ATTR_TEMPORARY)
-#define ATTR_SPARSE_FILE_LE cpu_to_le32(ATTR_SPARSE)
-#define ATTR_REPARSE_POINT_LE cpu_to_le32(ATTR_REPARSE)
-#define ATTR_COMPRESSED_LE cpu_to_le32(ATTR_COMPRESSED)
-#define ATTR_OFFLINE_LE cpu_to_le32(ATTR_OFFLINE)
-#define ATTR_NOT_CONTENT_INDEXED_LE cpu_to_le32(ATTR_NOT_CONTENT_INDEXED)
-#define ATTR_ENCRYPTED_LE cpu_to_le32(ATTR_ENCRYPTED)
-#define ATTR_INTEGRITY_STREAML_LE cpu_to_le32(0x00008000)
-#define ATTR_NO_SCRUB_DATA_LE cpu_to_le32(0x00020000)
-#define ATTR_MASK_LE cpu_to_le32(0x00007FB7)
-
-/* List of FileSystemAttributes - see 2.5.1 of MS-FSCC */
-#define FILE_SUPPORTS_SPARSE_VDL 0x10000000 /* faster nonsparse extend */
-#define FILE_SUPPORTS_BLOCK_REFCOUNTING 0x08000000 /* allow ioctl dup extents */
-#define FILE_SUPPORT_INTEGRITY_STREAMS 0x04000000
-#define FILE_SUPPORTS_USN_JOURNAL 0x02000000
-#define FILE_SUPPORTS_OPEN_BY_FILE_ID 0x01000000
-#define FILE_SUPPORTS_EXTENDED_ATTRIBUTES 0x00800000
-#define FILE_SUPPORTS_HARD_LINKS 0x00400000
-#define FILE_SUPPORTS_TRANSACTIONS 0x00200000
-#define FILE_SEQUENTIAL_WRITE_ONCE 0x00100000
-#define FILE_READ_ONLY_VOLUME 0x00080000
-#define FILE_NAMED_STREAMS 0x00040000
-#define FILE_SUPPORTS_ENCRYPTION 0x00020000
-#define FILE_SUPPORTS_OBJECT_IDS 0x00010000
-#define FILE_VOLUME_IS_COMPRESSED 0x00008000
-#define FILE_SUPPORTS_REMOTE_STORAGE 0x00000100
-#define FILE_SUPPORTS_REPARSE_POINTS 0x00000080
-#define FILE_SUPPORTS_SPARSE_FILES 0x00000040
-#define FILE_VOLUME_QUOTAS 0x00000020
-#define FILE_FILE_COMPRESSION 0x00000010
-#define FILE_PERSISTENT_ACLS 0x00000008
-#define FILE_UNICODE_ON_DISK 0x00000004
-#define FILE_CASE_PRESERVED_NAMES 0x00000002
-#define FILE_CASE_SENSITIVE_SEARCH 0x00000001
-
-#define FILE_READ_DATA 0x00000001 /* Data can be read from the file */
-#define FILE_WRITE_DATA 0x00000002 /* Data can be written to the file */
-#define FILE_APPEND_DATA 0x00000004 /* Data can be appended to the file */
-#define FILE_READ_EA 0x00000008 /* Extended attributes associated */
-/* with the file can be read */
-#define FILE_WRITE_EA 0x00000010 /* Extended attributes associated */
-/* with the file can be written */
-#define FILE_EXECUTE 0x00000020 /*Data can be read into memory from */
-/* the file using system paging I/O */
-#define FILE_DELETE_CHILD 0x00000040
-#define FILE_READ_ATTRIBUTES 0x00000080 /* Attributes associated with the */
-/* file can be read */
-#define FILE_WRITE_ATTRIBUTES 0x00000100 /* Attributes associated with the */
-/* file can be written */
-#define DELETE 0x00010000 /* The file can be deleted */
-#define READ_CONTROL 0x00020000 /* The access control list and */
-/* ownership associated with the */
-/* file can be read */
-#define WRITE_DAC 0x00040000 /* The access control list and */
-/* ownership associated with the */
-/* file can be written. */
-#define WRITE_OWNER 0x00080000 /* Ownership information associated */
-/* with the file can be written */
-#define SYNCHRONIZE 0x00100000 /* The file handle can waited on to */
-/* synchronize with the completion */
-/* of an input/output request */
-#define GENERIC_ALL 0x10000000
-#define GENERIC_EXECUTE 0x20000000
-#define GENERIC_WRITE 0x40000000
-#define GENERIC_READ 0x80000000
-/* In summary - Relevant file */
-/* access flags from CIFS are */
-/* file_read_data, file_write_data */
-/* file_execute, file_read_attributes*/
-/* write_dac, and delete. */
-
-#define FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES)
-#define FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
- | FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES)
-#define FILE_EXEC_RIGHTS (FILE_EXECUTE)
-
-#define SET_FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA \
- | FILE_READ_ATTRIBUTES \
- | DELETE | READ_CONTROL | WRITE_DAC \
- | WRITE_OWNER | SYNCHRONIZE)
-#define SET_FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
- | FILE_WRITE_EA \
- | FILE_DELETE_CHILD \
- | FILE_WRITE_ATTRIBUTES \
- | DELETE | READ_CONTROL | WRITE_DAC \
- | WRITE_OWNER | SYNCHRONIZE)
-#define SET_FILE_EXEC_RIGHTS (FILE_READ_EA | FILE_WRITE_EA | FILE_EXECUTE \
- | FILE_READ_ATTRIBUTES \
- | FILE_WRITE_ATTRIBUTES \
- | DELETE | READ_CONTROL | WRITE_DAC \
- | WRITE_OWNER | SYNCHRONIZE)
-
-#define SET_MINIMUM_RIGHTS (FILE_READ_EA | FILE_READ_ATTRIBUTES \
- | READ_CONTROL | SYNCHRONIZE)
-
-/* generic flags for file open */
-#define GENERIC_READ_FLAGS (READ_CONTROL | FILE_READ_DATA | \
- FILE_READ_ATTRIBUTES | \
- FILE_READ_EA | SYNCHRONIZE)
-
-#define GENERIC_WRITE_FLAGS (READ_CONTROL | FILE_WRITE_DATA | \
- FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | \
- FILE_APPEND_DATA | SYNCHRONIZE)
-
-#define GENERIC_EXECUTE_FLAGS (READ_CONTROL | FILE_EXECUTE | \
- FILE_READ_ATTRIBUTES | SYNCHRONIZE)
-
-#define GENERIC_ALL_FLAGS (DELETE | READ_CONTROL | WRITE_DAC | \
- WRITE_OWNER | SYNCHRONIZE | FILE_READ_DATA | \
- FILE_WRITE_DATA | FILE_APPEND_DATA | \
- FILE_READ_EA | FILE_WRITE_EA | \
- FILE_EXECUTE | FILE_DELETE_CHILD | \
- FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES)
-
-#define SMB1_PROTO_NUMBER cpu_to_le32(0x424d53ff)
-#define SMB_COM_NEGOTIATE 0x72
-
-#define SMB1_CLIENT_GUID_SIZE (16)
-struct smb_hdr {
- __be32 smb_buf_length;
- __u8 Protocol[4];
- __u8 Command;
- union {
- struct {
- __u8 ErrorClass;
- __u8 Reserved;
- __le16 Error;
- } __packed DosError;
- __le32 CifsError;
- } __packed Status;
- __u8 Flags;
- __le16 Flags2; /* note: le */
- __le16 PidHigh;
- union {
- struct {
- __le32 SequenceNumber; /* le */
- __u32 Reserved; /* zero */
- } __packed Sequence;
- __u8 SecuritySignature[8]; /* le */
- } __packed Signature;
- __u8 pad[2];
- __le16 Tid;
- __le16 Pid;
- __le16 Uid;
- __le16 Mid;
- __u8 WordCount;
-} __packed;
-
-struct smb_negotiate_req {
- struct smb_hdr hdr; /* wct = 0 */
- __le16 ByteCount;
- unsigned char DialectsArray[1];
-} __packed;
-
-struct smb_negotiate_rsp {
- struct smb_hdr hdr; /* wct = 17 */
- __le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
- __u8 SecurityMode;
- __le16 MaxMpxCount;
- __le16 MaxNumberVcs;
- __le32 MaxBufferSize;
- __le32 MaxRawSize;
- __le32 SessionKey;
- __le32 Capabilities; /* see below */
- __le32 SystemTimeLow;
- __le32 SystemTimeHigh;
- __le16 ServerTimeZone;
- __u8 EncryptionKeyLength;
- __le16 ByteCount;
- union {
- unsigned char EncryptionKey[8]; /* cap extended security off */
- /* followed by Domain name - if extended security is off */
- /* followed by 16 bytes of server GUID */
- /* then security blob if cap_extended_security negotiated */
- struct {
- unsigned char GUID[SMB1_CLIENT_GUID_SIZE];
- unsigned char SecurityBlob[1];
- } __packed extended_response;
- } __packed u;
-} __packed;
-
-struct filesystem_attribute_info {
- __le32 Attributes;
- __le32 MaxPathNameComponentLength;
- __le32 FileSystemNameLen;
- __le16 FileSystemName[1]; /* do not have to save this - get subset? */
-} __packed;
-
-struct filesystem_device_info {
- __le32 DeviceType;
- __le32 DeviceCharacteristics;
-} __packed; /* device info level 0x104 */
-
-struct filesystem_vol_info {
- __le64 VolumeCreationTime;
- __le32 SerialNumber;
- __le32 VolumeLabelSize;
- __le16 Reserved;
- __le16 VolumeLabel[1];
-} __packed;
-
-struct filesystem_info {
- __le64 TotalAllocationUnits;
- __le64 FreeAllocationUnits;
- __le32 SectorsPerAllocationUnit;
- __le32 BytesPerSector;
-} __packed; /* size info, level 0x103 */
-
-#define EXTENDED_INFO_MAGIC 0x43667364 /* Cfsd */
-#define STRING_LENGTH 28
-
-struct fs_extended_info {
- __le32 magic;
- __le32 version;
- __le32 release;
- __u64 rel_date;
- char version_string[STRING_LENGTH];
-} __packed;
-
-struct object_id_info {
- char objid[16];
- struct fs_extended_info extended_info;
-} __packed;
-
-struct file_directory_info {
- __le32 NextEntryOffset;
- __u32 FileIndex;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 EndOfFile;
- __le64 AllocationSize;
- __le32 ExtFileAttributes;
- __le32 FileNameLength;
- char FileName[1];
-} __packed; /* level 0x101 FF resp data */
-
-struct file_names_info {
- __le32 NextEntryOffset;
- __u32 FileIndex;
- __le32 FileNameLength;
- char FileName[1];
-} __packed; /* level 0xc FF resp data */
-
-struct file_full_directory_info {
- __le32 NextEntryOffset;
- __u32 FileIndex;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 EndOfFile;
- __le64 AllocationSize;
- __le32 ExtFileAttributes;
- __le32 FileNameLength;
- __le32 EaSize;
- char FileName[1];
-} __packed; /* level 0x102 FF resp */
-
-struct file_both_directory_info {
- __le32 NextEntryOffset;
- __u32 FileIndex;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 EndOfFile;
- __le64 AllocationSize;
- __le32 ExtFileAttributes;
- __le32 FileNameLength;
- __le32 EaSize; /* length of the xattrs */
- __u8 ShortNameLength;
- __u8 Reserved;
- __u8 ShortName[24];
- char FileName[1];
-} __packed; /* level 0x104 FFrsp data */
-
-struct file_id_both_directory_info {
- __le32 NextEntryOffset;
- __u32 FileIndex;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 EndOfFile;
- __le64 AllocationSize;
- __le32 ExtFileAttributes;
- __le32 FileNameLength;
- __le32 EaSize; /* length of the xattrs */
- __u8 ShortNameLength;
- __u8 Reserved;
- __u8 ShortName[24];
- __le16 Reserved2;
- __le64 UniqueId;
- char FileName[1];
-} __packed;
-
-struct file_id_full_dir_info {
- __le32 NextEntryOffset;
- __u32 FileIndex;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 EndOfFile;
- __le64 AllocationSize;
- __le32 ExtFileAttributes;
- __le32 FileNameLength;
- __le32 EaSize; /* EA size */
- __le32 Reserved;
- __le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
- char FileName[1];
-} __packed; /* level 0x105 FF rsp data */
-
-struct smb_version_values {
- char *version_string;
- __u16 protocol_id;
- __le16 lock_cmd;
- __u32 capabilities;
- __u32 max_read_size;
- __u32 max_write_size;
- __u32 max_trans_size;
- __u32 large_lock_type;
- __u32 exclusive_lock_type;
- __u32 shared_lock_type;
- __u32 unlock_lock_type;
- size_t header_size;
- size_t max_header_size;
- size_t read_rsp_size;
- unsigned int cap_unix;
- unsigned int cap_nt_find;
- unsigned int cap_large_files;
- __u16 signing_enabled;
- __u16 signing_required;
- size_t create_lease_size;
- size_t create_durable_size;
- size_t create_durable_v2_size;
- size_t create_mxac_size;
- size_t create_disk_id_size;
- size_t create_posix_size;
-};
-
-struct filesystem_posix_info {
- /* For undefined recommended transfer size return -1 in that field */
- __le32 OptimalTransferSize; /* bsize on some os, iosize on other os */
- __le32 BlockSize;
- /* The next three fields are in terms of the block size.
- * (above). If block size is unknown, 4096 would be a
- * reasonable block size for a server to report.
- * Note that returning the blocks/blocksavail removes need
- * to make a second call (to QFSInfo level 0x103 to get this info.
- * UserBlockAvail is typically less than or equal to BlocksAvail,
- * if no distinction is made return the same value in each
- */
- __le64 TotalBlocks;
- __le64 BlocksAvail; /* bfree */
- __le64 UserBlocksAvail; /* bavail */
- /* For undefined Node fields or FSID return -1 */
- __le64 TotalFileNodes;
- __le64 FreeFileNodes;
- __le64 FileSysIdentifier; /* fsid */
- /* NB Namelen comes from FILE_SYSTEM_ATTRIBUTE_INFO call */
- /* NB flags can come from FILE_SYSTEM_DEVICE_INFO call */
-} __packed;
-
-struct smb_version_ops {
- u16 (*get_cmd_val)(struct ksmbd_work *swork);
- int (*init_rsp_hdr)(struct ksmbd_work *swork);
- void (*set_rsp_status)(struct ksmbd_work *swork, __le32 err);
- int (*allocate_rsp_buf)(struct ksmbd_work *work);
- int (*set_rsp_credits)(struct ksmbd_work *work);
- int (*check_user_session)(struct ksmbd_work *work);
- int (*get_ksmbd_tcon)(struct ksmbd_work *work);
- bool (*is_sign_req)(struct ksmbd_work *work, unsigned int command);
- int (*check_sign_req)(struct ksmbd_work *work);
- void (*set_sign_rsp)(struct ksmbd_work *work);
- int (*generate_signingkey)(struct ksmbd_session *sess, struct ksmbd_conn *conn);
- int (*generate_encryptionkey)(struct ksmbd_session *sess);
- bool (*is_transform_hdr)(void *buf);
- int (*decrypt_req)(struct ksmbd_work *work);
- int (*encrypt_resp)(struct ksmbd_work *work);
-};
-
-struct smb_version_cmds {
- int (*proc)(struct ksmbd_work *swork);
-};
-
-static inline size_t
-smb2_hdr_size_no_buflen(struct smb_version_values *vals)
-{
- return vals->header_size - 4;
-}
-
-int ksmbd_min_protocol(void);
-int ksmbd_max_protocol(void);
-
-int ksmbd_lookup_protocol_idx(char *str);
-
-int ksmbd_verify_smb_message(struct ksmbd_work *work);
-bool ksmbd_smb_request(struct ksmbd_conn *conn);
-
-int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
-
-int ksmbd_init_smb_server(struct ksmbd_work *work);
-
-struct ksmbd_kstat;
-int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
- int info_level,
- struct ksmbd_file *dir,
- struct ksmbd_dir_info *d_info,
- char *search_pattern,
- int (*fn)(struct ksmbd_conn *,
- int,
- struct ksmbd_dir_info *,
- struct ksmbd_kstat *));
-
-int ksmbd_extract_shortname(struct ksmbd_conn *conn,
- const char *longname,
- char *shortname);
-
-int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command);
-
-int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp);
-int ksmbd_override_fsids(struct ksmbd_work *work);
-void ksmbd_revert_fsids(struct ksmbd_work *work);
-
-unsigned int ksmbd_server_side_copy_max_chunk_count(void);
-unsigned int ksmbd_server_side_copy_max_chunk_size(void);
-unsigned int ksmbd_server_side_copy_max_total_size(void);
-bool is_asterisk(char *p);
-__le32 smb_map_generic_desired_access(__le32 daccess);
-
-static inline unsigned int get_rfc1002_len(void *buf)
-{
- return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
-}
-
-static inline void inc_rfc1001_len(void *buf, int count)
-{
- be32_add_cpu((__be32 *)buf, count);
-}
-#endif /* __SMB_COMMON_H__ */
diff --git a/fs/ksmbd/smbacl.h b/fs/ksmbd/smbacl.h
deleted file mode 100644
index 73e08cad412b..000000000000
--- a/fs/ksmbd/smbacl.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1+ */
-/*
- * Copyright (c) International Business Machines Corp., 2007
- * Author(s): Steve French (sfrench@us.ibm.com)
- * Modified by Namjae Jeon (linkinjeon@kernel.org)
- */
-
-#ifndef _SMBACL_H
-#define _SMBACL_H
-
-#include <linux/fs.h>
-#include <linux/namei.h>
-#include <linux/posix_acl.h>
-
-#include "mgmt/tree_connect.h"
-
-#define NUM_AUTHS (6) /* number of authority fields */
-#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
-
-/*
- * ACE types - see MS-DTYP 2.4.4.1
- */
-enum {
- ACCESS_ALLOWED,
- ACCESS_DENIED,
-};
-
-/*
- * Security ID types
- */
-enum {
- SIDOWNER = 1,
- SIDGROUP,
- SIDCREATOR_OWNER,
- SIDCREATOR_GROUP,
- SIDUNIX_USER,
- SIDUNIX_GROUP,
- SIDNFS_USER,
- SIDNFS_GROUP,
- SIDNFS_MODE,
-};
-
-/* Revision for ACLs */
-#define SD_REVISION 1
-
-/* Control flags for Security Descriptor */
-#define OWNER_DEFAULTED 0x0001
-#define GROUP_DEFAULTED 0x0002
-#define DACL_PRESENT 0x0004
-#define DACL_DEFAULTED 0x0008
-#define SACL_PRESENT 0x0010
-#define SACL_DEFAULTED 0x0020
-#define DACL_TRUSTED 0x0040
-#define SERVER_SECURITY 0x0080
-#define DACL_AUTO_INHERIT_REQ 0x0100
-#define SACL_AUTO_INHERIT_REQ 0x0200
-#define DACL_AUTO_INHERITED 0x0400
-#define SACL_AUTO_INHERITED 0x0800
-#define DACL_PROTECTED 0x1000
-#define SACL_PROTECTED 0x2000
-#define RM_CONTROL_VALID 0x4000
-#define SELF_RELATIVE 0x8000
-
-/* ACE types - see MS-DTYP 2.4.4.1 */
-#define ACCESS_ALLOWED_ACE_TYPE 0x00
-#define ACCESS_DENIED_ACE_TYPE 0x01
-#define SYSTEM_AUDIT_ACE_TYPE 0x02
-#define SYSTEM_ALARM_ACE_TYPE 0x03
-#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
-#define ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05
-#define ACCESS_DENIED_OBJECT_ACE_TYPE 0x06
-#define SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07
-#define SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08
-#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
-#define ACCESS_DENIED_CALLBACK_ACE_TYPE 0x0A
-#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
-#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE 0x0C
-#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE 0x0D
-#define SYSTEM_ALARM_CALLBACK_ACE_TYPE 0x0E /* Reserved */
-#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
-#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
-#define SYSTEM_MANDATORY_LABEL_ACE_TYPE 0x11
-#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
-#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
-
-/* ACE flags */
-#define OBJECT_INHERIT_ACE 0x01
-#define CONTAINER_INHERIT_ACE 0x02
-#define NO_PROPAGATE_INHERIT_ACE 0x04
-#define INHERIT_ONLY_ACE 0x08
-#define INHERITED_ACE 0x10
-#define SUCCESSFUL_ACCESS_ACE_FLAG 0x40
-#define FAILED_ACCESS_ACE_FLAG 0x80
-
-/*
- * Maximum size of a string representation of a SID:
- *
- * The fields are unsigned values in decimal. So:
- *
- * u8: max 3 bytes in decimal
- * u32: max 10 bytes in decimal
- *
- * "S-" + 3 bytes for version field + 15 for authority field + NULL terminator
- *
- * For authority field, max is when all 6 values are non-zero and it must be
- * represented in hex. So "-0x" + 12 hex digits.
- *
- * Add 11 bytes for each subauthority field (10 bytes each + 1 for '-')
- */
-#define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
-#define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
-
-#define DOMAIN_USER_RID_LE cpu_to_le32(513)
-
-struct ksmbd_conn;
-
-struct smb_ntsd {
- __le16 revision; /* revision level */
- __le16 type;
- __le32 osidoffset;
- __le32 gsidoffset;
- __le32 sacloffset;
- __le32 dacloffset;
-} __packed;
-
-struct smb_sid {
- __u8 revision; /* revision level */
- __u8 num_subauth;
- __u8 authority[NUM_AUTHS];
- __le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
-} __packed;
-
-/* size of a struct cifs_sid, sans sub_auth array */
-#define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
-
-struct smb_acl {
- __le16 revision; /* revision level */
- __le16 size;
- __le32 num_aces;
-} __packed;
-
-struct smb_ace {
- __u8 type;
- __u8 flags;
- __le16 size;
- __le32 access_req;
- struct smb_sid sid; /* ie UUID of user or group who gets these perms */
-} __packed;
-
-struct smb_fattr {
- kuid_t cf_uid;
- kgid_t cf_gid;
- umode_t cf_mode;
- __le32 daccess;
- struct posix_acl *cf_acls;
- struct posix_acl *cf_dacls;
-};
-
-struct posix_ace_state {
- u32 allow;
- u32 deny;
-};
-
-struct posix_user_ace_state {
- union {
- kuid_t uid;
- kgid_t gid;
- };
- struct posix_ace_state perms;
-};
-
-struct posix_ace_state_array {
- int n;
- struct posix_user_ace_state aces[];
-};
-
-/*
- * while processing the nfsv4 ace, this maintains the partial permissions
- * calculated so far:
- */
-
-struct posix_acl_state {
- struct posix_ace_state owner;
- struct posix_ace_state group;
- struct posix_ace_state other;
- struct posix_ace_state everyone;
- struct posix_ace_state mask; /* deny unused in this case */
- struct posix_ace_state_array *users;
- struct posix_ace_state_array *groups;
-};
-
-int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
- int acl_len, struct smb_fattr *fattr);
-int build_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
- struct smb_ntsd *ppntsd, int addition_info,
- __u32 *secdesclen, struct smb_fattr *fattr);
-int init_acl_state(struct posix_acl_state *state, int cnt);
-void free_acl_state(struct posix_acl_state *state);
-void posix_state_to_acl(struct posix_acl_state *state,
- struct posix_acl_entry *pace);
-int compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid);
-bool smb_inherit_flags(int flags, bool is_dir);
-int smb_inherit_dacl(struct ksmbd_conn *conn, struct path *path,
- unsigned int uid, unsigned int gid);
-int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
- __le32 *pdaccess, int uid);
-int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
- struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
- bool type_check);
-void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid);
-void ksmbd_init_domain(u32 *sub_auth);
-
-static inline uid_t posix_acl_uid_translate(struct user_namespace *mnt_userns,
- struct posix_acl_entry *pace)
-{
- kuid_t kuid;
-
- /* If this is an idmapped mount, apply the idmapping. */
- kuid = kuid_into_mnt(mnt_userns, pace->e_uid);
-
- /* Translate the kuid into a userspace id ksmbd would see. */
- return from_kuid(&init_user_ns, kuid);
-}
-
-static inline gid_t posix_acl_gid_translate(struct user_namespace *mnt_userns,
- struct posix_acl_entry *pace)
-{
- kgid_t kgid;
-
- /* If this is an idmapped mount, apply the idmapping. */
- kgid = kgid_into_mnt(mnt_userns, pace->e_gid);
-
- /* Translate the kgid into a userspace id ksmbd would see. */
- return from_kgid(&init_user_ns, kgid);
-}
-
-#endif /* _SMBACL_H */
diff --git a/fs/ksmbd/smbstatus.h b/fs/ksmbd/smbstatus.h
deleted file mode 100644
index 108a8b6ed24a..000000000000
--- a/fs/ksmbd/smbstatus.h
+++ /dev/null
@@ -1,1822 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1+ */
-/*
- * fs/cifs/smb2status.h
- *
- * SMB2 Status code (network error) definitions
- * Definitions are from MS-ERREF
- *
- * Copyright (c) International Business Machines Corp., 2009,2011
- * Author(s): Steve French (sfrench@us.ibm.com)
- */
-
-/*
- * 0 1 2 3 4 5 6 7 8 9 0 A B C D E F 0 1 2 3 4 5 6 7 8 9 A B C D E F
- * SEV C N <-------Facility--------> <------Error Status Code------>
- *
- * C is set if "customer defined" error, N bit is reserved and MBZ
- */
-
-#define STATUS_SEVERITY_SUCCESS cpu_to_le32(0x0000)
-#define STATUS_SEVERITY_INFORMATIONAL cpu_to_le32(0x0001)
-#define STATUS_SEVERITY_WARNING cpu_to_le32(0x0002)
-#define STATUS_SEVERITY_ERROR cpu_to_le32(0x0003)
-
-struct ntstatus {
- /* Facility is the high 12 bits of the following field */
- __le32 Facility; /* low 2 bits Severity, next is Customer, then rsrvd */
- __le32 Code;
-};
-
-#define STATUS_SUCCESS 0x00000000
-#define STATUS_WAIT_0 cpu_to_le32(0x00000000)
-#define STATUS_WAIT_1 cpu_to_le32(0x00000001)
-#define STATUS_WAIT_2 cpu_to_le32(0x00000002)
-#define STATUS_WAIT_3 cpu_to_le32(0x00000003)
-#define STATUS_WAIT_63 cpu_to_le32(0x0000003F)
-#define STATUS_ABANDONED cpu_to_le32(0x00000080)
-#define STATUS_ABANDONED_WAIT_0 cpu_to_le32(0x00000080)
-#define STATUS_ABANDONED_WAIT_63 cpu_to_le32(0x000000BF)
-#define STATUS_USER_APC cpu_to_le32(0x000000C0)
-#define STATUS_KERNEL_APC cpu_to_le32(0x00000100)
-#define STATUS_ALERTED cpu_to_le32(0x00000101)
-#define STATUS_TIMEOUT cpu_to_le32(0x00000102)
-#define STATUS_PENDING cpu_to_le32(0x00000103)
-#define STATUS_REPARSE cpu_to_le32(0x00000104)
-#define STATUS_MORE_ENTRIES cpu_to_le32(0x00000105)
-#define STATUS_NOT_ALL_ASSIGNED cpu_to_le32(0x00000106)
-#define STATUS_SOME_NOT_MAPPED cpu_to_le32(0x00000107)
-#define STATUS_OPLOCK_BREAK_IN_PROGRESS cpu_to_le32(0x00000108)
-#define STATUS_VOLUME_MOUNTED cpu_to_le32(0x00000109)
-#define STATUS_RXACT_COMMITTED cpu_to_le32(0x0000010A)
-#define STATUS_NOTIFY_CLEANUP cpu_to_le32(0x0000010B)
-#define STATUS_NOTIFY_ENUM_DIR cpu_to_le32(0x0000010C)
-#define STATUS_NO_QUOTAS_FOR_ACCOUNT cpu_to_le32(0x0000010D)
-#define STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED cpu_to_le32(0x0000010E)
-#define STATUS_PAGE_FAULT_TRANSITION cpu_to_le32(0x00000110)
-#define STATUS_PAGE_FAULT_DEMAND_ZERO cpu_to_le32(0x00000111)
-#define STATUS_PAGE_FAULT_COPY_ON_WRITE cpu_to_le32(0x00000112)
-#define STATUS_PAGE_FAULT_GUARD_PAGE cpu_to_le32(0x00000113)
-#define STATUS_PAGE_FAULT_PAGING_FILE cpu_to_le32(0x00000114)
-#define STATUS_CACHE_PAGE_LOCKED cpu_to_le32(0x00000115)
-#define STATUS_CRASH_DUMP cpu_to_le32(0x00000116)
-#define STATUS_BUFFER_ALL_ZEROS cpu_to_le32(0x00000117)
-#define STATUS_REPARSE_OBJECT cpu_to_le32(0x00000118)
-#define STATUS_RESOURCE_REQUIREMENTS_CHANGED cpu_to_le32(0x00000119)
-#define STATUS_TRANSLATION_COMPLETE cpu_to_le32(0x00000120)
-#define STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY cpu_to_le32(0x00000121)
-#define STATUS_NOTHING_TO_TERMINATE cpu_to_le32(0x00000122)
-#define STATUS_PROCESS_NOT_IN_JOB cpu_to_le32(0x00000123)
-#define STATUS_PROCESS_IN_JOB cpu_to_le32(0x00000124)
-#define STATUS_VOLSNAP_HIBERNATE_READY cpu_to_le32(0x00000125)
-#define STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY cpu_to_le32(0x00000126)
-#define STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED cpu_to_le32(0x00000127)
-#define STATUS_INTERRUPT_STILL_CONNECTED cpu_to_le32(0x00000128)
-#define STATUS_PROCESS_CLONED cpu_to_le32(0x00000129)
-#define STATUS_FILE_LOCKED_WITH_ONLY_READERS cpu_to_le32(0x0000012A)
-#define STATUS_FILE_LOCKED_WITH_WRITERS cpu_to_le32(0x0000012B)
-#define STATUS_RESOURCEMANAGER_READ_ONLY cpu_to_le32(0x00000202)
-#define STATUS_WAIT_FOR_OPLOCK cpu_to_le32(0x00000367)
-#define DBG_EXCEPTION_HANDLED cpu_to_le32(0x00010001)
-#define DBG_CONTINUE cpu_to_le32(0x00010002)
-#define STATUS_FLT_IO_COMPLETE cpu_to_le32(0x001C0001)
-#define STATUS_OBJECT_NAME_EXISTS cpu_to_le32(0x40000000)
-#define STATUS_THREAD_WAS_SUSPENDED cpu_to_le32(0x40000001)
-#define STATUS_WORKING_SET_LIMIT_RANGE cpu_to_le32(0x40000002)
-#define STATUS_IMAGE_NOT_AT_BASE cpu_to_le32(0x40000003)
-#define STATUS_RXACT_STATE_CREATED cpu_to_le32(0x40000004)
-#define STATUS_SEGMENT_NOTIFICATION cpu_to_le32(0x40000005)
-#define STATUS_LOCAL_USER_SESSION_KEY cpu_to_le32(0x40000006)
-#define STATUS_BAD_CURRENT_DIRECTORY cpu_to_le32(0x40000007)
-#define STATUS_SERIAL_MORE_WRITES cpu_to_le32(0x40000008)
-#define STATUS_REGISTRY_RECOVERED cpu_to_le32(0x40000009)
-#define STATUS_FT_READ_RECOVERY_FROM_BACKUP cpu_to_le32(0x4000000A)
-#define STATUS_FT_WRITE_RECOVERY cpu_to_le32(0x4000000B)
-#define STATUS_SERIAL_COUNTER_TIMEOUT cpu_to_le32(0x4000000C)
-#define STATUS_NULL_LM_PASSWORD cpu_to_le32(0x4000000D)
-#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH cpu_to_le32(0x4000000E)
-#define STATUS_RECEIVE_PARTIAL cpu_to_le32(0x4000000F)
-#define STATUS_RECEIVE_EXPEDITED cpu_to_le32(0x40000010)
-#define STATUS_RECEIVE_PARTIAL_EXPEDITED cpu_to_le32(0x40000011)
-#define STATUS_EVENT_DONE cpu_to_le32(0x40000012)
-#define STATUS_EVENT_PENDING cpu_to_le32(0x40000013)
-#define STATUS_CHECKING_FILE_SYSTEM cpu_to_le32(0x40000014)
-#define STATUS_FATAL_APP_EXIT cpu_to_le32(0x40000015)
-#define STATUS_PREDEFINED_HANDLE cpu_to_le32(0x40000016)
-#define STATUS_WAS_UNLOCKED cpu_to_le32(0x40000017)
-#define STATUS_SERVICE_NOTIFICATION cpu_to_le32(0x40000018)
-#define STATUS_WAS_LOCKED cpu_to_le32(0x40000019)
-#define STATUS_LOG_HARD_ERROR cpu_to_le32(0x4000001A)
-#define STATUS_ALREADY_WIN32 cpu_to_le32(0x4000001B)
-#define STATUS_WX86_UNSIMULATE cpu_to_le32(0x4000001C)
-#define STATUS_WX86_CONTINUE cpu_to_le32(0x4000001D)
-#define STATUS_WX86_SINGLE_STEP cpu_to_le32(0x4000001E)
-#define STATUS_WX86_BREAKPOINT cpu_to_le32(0x4000001F)
-#define STATUS_WX86_EXCEPTION_CONTINUE cpu_to_le32(0x40000020)
-#define STATUS_WX86_EXCEPTION_LASTCHANCE cpu_to_le32(0x40000021)
-#define STATUS_WX86_EXCEPTION_CHAIN cpu_to_le32(0x40000022)
-#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE cpu_to_le32(0x40000023)
-#define STATUS_NO_YIELD_PERFORMED cpu_to_le32(0x40000024)
-#define STATUS_TIMER_RESUME_IGNORED cpu_to_le32(0x40000025)
-#define STATUS_ARBITRATION_UNHANDLED cpu_to_le32(0x40000026)
-#define STATUS_CARDBUS_NOT_SUPPORTED cpu_to_le32(0x40000027)
-#define STATUS_WX86_CREATEWX86TIB cpu_to_le32(0x40000028)
-#define STATUS_MP_PROCESSOR_MISMATCH cpu_to_le32(0x40000029)
-#define STATUS_HIBERNATED cpu_to_le32(0x4000002A)
-#define STATUS_RESUME_HIBERNATION cpu_to_le32(0x4000002B)
-#define STATUS_FIRMWARE_UPDATED cpu_to_le32(0x4000002C)
-#define STATUS_DRIVERS_LEAKING_LOCKED_PAGES cpu_to_le32(0x4000002D)
-#define STATUS_MESSAGE_RETRIEVED cpu_to_le32(0x4000002E)
-#define STATUS_SYSTEM_POWERSTATE_TRANSITION cpu_to_le32(0x4000002F)
-#define STATUS_ALPC_CHECK_COMPLETION_LIST cpu_to_le32(0x40000030)
-#define STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION cpu_to_le32(0x40000031)
-#define STATUS_ACCESS_AUDIT_BY_POLICY cpu_to_le32(0x40000032)
-#define STATUS_ABANDON_HIBERFILE cpu_to_le32(0x40000033)
-#define STATUS_BIZRULES_NOT_ENABLED cpu_to_le32(0x40000034)
-#define STATUS_WAKE_SYSTEM cpu_to_le32(0x40000294)
-#define STATUS_DS_SHUTTING_DOWN cpu_to_le32(0x40000370)
-#define DBG_REPLY_LATER cpu_to_le32(0x40010001)
-#define DBG_UNABLE_TO_PROVIDE_HANDLE cpu_to_le32(0x40010002)
-#define DBG_TERMINATE_THREAD cpu_to_le32(0x40010003)
-#define DBG_TERMINATE_PROCESS cpu_to_le32(0x40010004)
-#define DBG_CONTROL_C cpu_to_le32(0x40010005)
-#define DBG_PRINTEXCEPTION_C cpu_to_le32(0x40010006)
-#define DBG_RIPEXCEPTION cpu_to_le32(0x40010007)
-#define DBG_CONTROL_BREAK cpu_to_le32(0x40010008)
-#define DBG_COMMAND_EXCEPTION cpu_to_le32(0x40010009)
-#define RPC_NT_UUID_LOCAL_ONLY cpu_to_le32(0x40020056)
-#define RPC_NT_SEND_INCOMPLETE cpu_to_le32(0x400200AF)
-#define STATUS_CTX_CDM_CONNECT cpu_to_le32(0x400A0004)
-#define STATUS_CTX_CDM_DISCONNECT cpu_to_le32(0x400A0005)
-#define STATUS_SXS_RELEASE_ACTIVATION_CONTEXT cpu_to_le32(0x4015000D)
-#define STATUS_RECOVERY_NOT_NEEDED cpu_to_le32(0x40190034)
-#define STATUS_RM_ALREADY_STARTED cpu_to_le32(0x40190035)
-#define STATUS_LOG_NO_RESTART cpu_to_le32(0x401A000C)
-#define STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST cpu_to_le32(0x401B00EC)
-#define STATUS_GRAPHICS_PARTIAL_DATA_POPULATED cpu_to_le32(0x401E000A)
-#define STATUS_GRAPHICS_DRIVER_MISMATCH cpu_to_le32(0x401E0117)
-#define STATUS_GRAPHICS_MODE_NOT_PINNED cpu_to_le32(0x401E0307)
-#define STATUS_GRAPHICS_NO_PREFERRED_MODE cpu_to_le32(0x401E031E)
-#define STATUS_GRAPHICS_DATASET_IS_EMPTY cpu_to_le32(0x401E034B)
-#define STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET cpu_to_le32(0x401E034C)
-#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED \
- cpu_to_le32(0x401E0351)
-#define STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS cpu_to_le32(0x401E042F)
-#define STATUS_GRAPHICS_LEADLINK_START_DEFERRED cpu_to_le32(0x401E0437)
-#define STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY cpu_to_le32(0x401E0439)
-#define STATUS_GRAPHICS_START_DEFERRED cpu_to_le32(0x401E043A)
-#define STATUS_NDIS_INDICATION_REQUIRED cpu_to_le32(0x40230001)
-#define STATUS_GUARD_PAGE_VIOLATION cpu_to_le32(0x80000001)
-#define STATUS_DATATYPE_MISALIGNMENT cpu_to_le32(0x80000002)
-#define STATUS_BREAKPOINT cpu_to_le32(0x80000003)
-#define STATUS_SINGLE_STEP cpu_to_le32(0x80000004)
-#define STATUS_BUFFER_OVERFLOW cpu_to_le32(0x80000005)
-#define STATUS_NO_MORE_FILES cpu_to_le32(0x80000006)
-#define STATUS_WAKE_SYSTEM_DEBUGGER cpu_to_le32(0x80000007)
-#define STATUS_HANDLES_CLOSED cpu_to_le32(0x8000000A)
-#define STATUS_NO_INHERITANCE cpu_to_le32(0x8000000B)
-#define STATUS_GUID_SUBSTITUTION_MADE cpu_to_le32(0x8000000C)
-#define STATUS_PARTIAL_COPY cpu_to_le32(0x8000000D)
-#define STATUS_DEVICE_PAPER_EMPTY cpu_to_le32(0x8000000E)
-#define STATUS_DEVICE_POWERED_OFF cpu_to_le32(0x8000000F)
-#define STATUS_DEVICE_OFF_LINE cpu_to_le32(0x80000010)
-#define STATUS_DEVICE_BUSY cpu_to_le32(0x80000011)
-#define STATUS_NO_MORE_EAS cpu_to_le32(0x80000012)
-#define STATUS_INVALID_EA_NAME cpu_to_le32(0x80000013)
-#define STATUS_EA_LIST_INCONSISTENT cpu_to_le32(0x80000014)
-#define STATUS_INVALID_EA_FLAG cpu_to_le32(0x80000015)
-#define STATUS_VERIFY_REQUIRED cpu_to_le32(0x80000016)
-#define STATUS_EXTRANEOUS_INFORMATION cpu_to_le32(0x80000017)
-#define STATUS_RXACT_COMMIT_NECESSARY cpu_to_le32(0x80000018)
-#define STATUS_NO_MORE_ENTRIES cpu_to_le32(0x8000001A)
-#define STATUS_FILEMARK_DETECTED cpu_to_le32(0x8000001B)
-#define STATUS_MEDIA_CHANGED cpu_to_le32(0x8000001C)
-#define STATUS_BUS_RESET cpu_to_le32(0x8000001D)
-#define STATUS_END_OF_MEDIA cpu_to_le32(0x8000001E)
-#define STATUS_BEGINNING_OF_MEDIA cpu_to_le32(0x8000001F)
-#define STATUS_MEDIA_CHECK cpu_to_le32(0x80000020)
-#define STATUS_SETMARK_DETECTED cpu_to_le32(0x80000021)
-#define STATUS_NO_DATA_DETECTED cpu_to_le32(0x80000022)
-#define STATUS_REDIRECTOR_HAS_OPEN_HANDLES cpu_to_le32(0x80000023)
-#define STATUS_SERVER_HAS_OPEN_HANDLES cpu_to_le32(0x80000024)
-#define STATUS_ALREADY_DISCONNECTED cpu_to_le32(0x80000025)
-#define STATUS_LONGJUMP cpu_to_le32(0x80000026)
-#define STATUS_CLEANER_CARTRIDGE_INSTALLED cpu_to_le32(0x80000027)
-#define STATUS_PLUGPLAY_QUERY_VETOED cpu_to_le32(0x80000028)
-#define STATUS_UNWIND_CONSOLIDATE cpu_to_le32(0x80000029)
-#define STATUS_REGISTRY_HIVE_RECOVERED cpu_to_le32(0x8000002A)
-#define STATUS_DLL_MIGHT_BE_INSECURE cpu_to_le32(0x8000002B)
-#define STATUS_DLL_MIGHT_BE_INCOMPATIBLE cpu_to_le32(0x8000002C)
-#define STATUS_STOPPED_ON_SYMLINK cpu_to_le32(0x8000002D)
-#define STATUS_DEVICE_REQUIRES_CLEANING cpu_to_le32(0x80000288)
-#define STATUS_DEVICE_DOOR_OPEN cpu_to_le32(0x80000289)
-#define STATUS_DATA_LOST_REPAIR cpu_to_le32(0x80000803)
-#define DBG_EXCEPTION_NOT_HANDLED cpu_to_le32(0x80010001)
-#define STATUS_CLUSTER_NODE_ALREADY_UP cpu_to_le32(0x80130001)
-#define STATUS_CLUSTER_NODE_ALREADY_DOWN cpu_to_le32(0x80130002)
-#define STATUS_CLUSTER_NETWORK_ALREADY_ONLINE cpu_to_le32(0x80130003)
-#define STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE cpu_to_le32(0x80130004)
-#define STATUS_CLUSTER_NODE_ALREADY_MEMBER cpu_to_le32(0x80130005)
-#define STATUS_COULD_NOT_RESIZE_LOG cpu_to_le32(0x80190009)
-#define STATUS_NO_TXF_METADATA cpu_to_le32(0x80190029)
-#define STATUS_CANT_RECOVER_WITH_HANDLE_OPEN cpu_to_le32(0x80190031)
-#define STATUS_TXF_METADATA_ALREADY_PRESENT cpu_to_le32(0x80190041)
-#define STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET cpu_to_le32(0x80190042)
-#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED \
- cpu_to_le32(0x801B00EB)
-#define STATUS_FLT_BUFFER_TOO_SMALL cpu_to_le32(0x801C0001)
-#define STATUS_FVE_PARTIAL_METADATA cpu_to_le32(0x80210001)
-#define STATUS_UNSUCCESSFUL cpu_to_le32(0xC0000001)
-#define STATUS_NOT_IMPLEMENTED cpu_to_le32(0xC0000002)
-#define STATUS_INVALID_INFO_CLASS cpu_to_le32(0xC0000003)
-#define STATUS_INFO_LENGTH_MISMATCH cpu_to_le32(0xC0000004)
-#define STATUS_ACCESS_VIOLATION cpu_to_le32(0xC0000005)
-#define STATUS_IN_PAGE_ERROR cpu_to_le32(0xC0000006)
-#define STATUS_PAGEFILE_QUOTA cpu_to_le32(0xC0000007)
-#define STATUS_INVALID_HANDLE cpu_to_le32(0xC0000008)
-#define STATUS_BAD_INITIAL_STACK cpu_to_le32(0xC0000009)
-#define STATUS_BAD_INITIAL_PC cpu_to_le32(0xC000000A)
-#define STATUS_INVALID_CID cpu_to_le32(0xC000000B)
-#define STATUS_TIMER_NOT_CANCELED cpu_to_le32(0xC000000C)
-#define STATUS_INVALID_PARAMETER cpu_to_le32(0xC000000D)
-#define STATUS_NO_SUCH_DEVICE cpu_to_le32(0xC000000E)
-#define STATUS_NO_SUCH_FILE cpu_to_le32(0xC000000F)
-#define STATUS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0000010)
-#define STATUS_END_OF_FILE cpu_to_le32(0xC0000011)
-#define STATUS_WRONG_VOLUME cpu_to_le32(0xC0000012)
-#define STATUS_NO_MEDIA_IN_DEVICE cpu_to_le32(0xC0000013)
-#define STATUS_UNRECOGNIZED_MEDIA cpu_to_le32(0xC0000014)
-#define STATUS_NONEXISTENT_SECTOR cpu_to_le32(0xC0000015)
-#define STATUS_MORE_PROCESSING_REQUIRED cpu_to_le32(0xC0000016)
-#define STATUS_NO_MEMORY cpu_to_le32(0xC0000017)
-#define STATUS_CONFLICTING_ADDRESSES cpu_to_le32(0xC0000018)
-#define STATUS_NOT_MAPPED_VIEW cpu_to_le32(0xC0000019)
-#define STATUS_UNABLE_TO_FREE_VM cpu_to_le32(0xC000001A)
-#define STATUS_UNABLE_TO_DELETE_SECTION cpu_to_le32(0xC000001B)
-#define STATUS_INVALID_SYSTEM_SERVICE cpu_to_le32(0xC000001C)
-#define STATUS_ILLEGAL_INSTRUCTION cpu_to_le32(0xC000001D)
-#define STATUS_INVALID_LOCK_SEQUENCE cpu_to_le32(0xC000001E)
-#define STATUS_INVALID_VIEW_SIZE cpu_to_le32(0xC000001F)
-#define STATUS_INVALID_FILE_FOR_SECTION cpu_to_le32(0xC0000020)
-#define STATUS_ALREADY_COMMITTED cpu_to_le32(0xC0000021)
-#define STATUS_ACCESS_DENIED cpu_to_le32(0xC0000022)
-#define STATUS_BUFFER_TOO_SMALL cpu_to_le32(0xC0000023)
-#define STATUS_OBJECT_TYPE_MISMATCH cpu_to_le32(0xC0000024)
-#define STATUS_NONCONTINUABLE_EXCEPTION cpu_to_le32(0xC0000025)
-#define STATUS_INVALID_DISPOSITION cpu_to_le32(0xC0000026)
-#define STATUS_UNWIND cpu_to_le32(0xC0000027)
-#define STATUS_BAD_STACK cpu_to_le32(0xC0000028)
-#define STATUS_INVALID_UNWIND_TARGET cpu_to_le32(0xC0000029)
-#define STATUS_NOT_LOCKED cpu_to_le32(0xC000002A)
-#define STATUS_PARITY_ERROR cpu_to_le32(0xC000002B)
-#define STATUS_UNABLE_TO_DECOMMIT_VM cpu_to_le32(0xC000002C)
-#define STATUS_NOT_COMMITTED cpu_to_le32(0xC000002D)
-#define STATUS_INVALID_PORT_ATTRIBUTES cpu_to_le32(0xC000002E)
-#define STATUS_PORT_MESSAGE_TOO_LONG cpu_to_le32(0xC000002F)
-#define STATUS_INVALID_PARAMETER_MIX cpu_to_le32(0xC0000030)
-#define STATUS_INVALID_QUOTA_LOWER cpu_to_le32(0xC0000031)
-#define STATUS_DISK_CORRUPT_ERROR cpu_to_le32(0xC0000032)
-#define STATUS_OBJECT_NAME_INVALID cpu_to_le32(0xC0000033)
-#define STATUS_OBJECT_NAME_NOT_FOUND cpu_to_le32(0xC0000034)
-#define STATUS_OBJECT_NAME_COLLISION cpu_to_le32(0xC0000035)
-#define STATUS_PORT_DISCONNECTED cpu_to_le32(0xC0000037)
-#define STATUS_DEVICE_ALREADY_ATTACHED cpu_to_le32(0xC0000038)
-#define STATUS_OBJECT_PATH_INVALID cpu_to_le32(0xC0000039)
-#define STATUS_OBJECT_PATH_NOT_FOUND cpu_to_le32(0xC000003A)
-#define STATUS_OBJECT_PATH_SYNTAX_BAD cpu_to_le32(0xC000003B)
-#define STATUS_DATA_OVERRUN cpu_to_le32(0xC000003C)
-#define STATUS_DATA_LATE_ERROR cpu_to_le32(0xC000003D)
-#define STATUS_DATA_ERROR cpu_to_le32(0xC000003E)
-#define STATUS_CRC_ERROR cpu_to_le32(0xC000003F)
-#define STATUS_SECTION_TOO_BIG cpu_to_le32(0xC0000040)
-#define STATUS_PORT_CONNECTION_REFUSED cpu_to_le32(0xC0000041)
-#define STATUS_INVALID_PORT_HANDLE cpu_to_le32(0xC0000042)
-#define STATUS_SHARING_VIOLATION cpu_to_le32(0xC0000043)
-#define STATUS_QUOTA_EXCEEDED cpu_to_le32(0xC0000044)
-#define STATUS_INVALID_PAGE_PROTECTION cpu_to_le32(0xC0000045)
-#define STATUS_MUTANT_NOT_OWNED cpu_to_le32(0xC0000046)
-#define STATUS_SEMAPHORE_LIMIT_EXCEEDED cpu_to_le32(0xC0000047)
-#define STATUS_PORT_ALREADY_SET cpu_to_le32(0xC0000048)
-#define STATUS_SECTION_NOT_IMAGE cpu_to_le32(0xC0000049)
-#define STATUS_SUSPEND_COUNT_EXCEEDED cpu_to_le32(0xC000004A)
-#define STATUS_THREAD_IS_TERMINATING cpu_to_le32(0xC000004B)
-#define STATUS_BAD_WORKING_SET_LIMIT cpu_to_le32(0xC000004C)
-#define STATUS_INCOMPATIBLE_FILE_MAP cpu_to_le32(0xC000004D)
-#define STATUS_SECTION_PROTECTION cpu_to_le32(0xC000004E)
-#define STATUS_EAS_NOT_SUPPORTED cpu_to_le32(0xC000004F)
-#define STATUS_EA_TOO_LARGE cpu_to_le32(0xC0000050)
-#define STATUS_NONEXISTENT_EA_ENTRY cpu_to_le32(0xC0000051)
-#define STATUS_NO_EAS_ON_FILE cpu_to_le32(0xC0000052)
-#define STATUS_EA_CORRUPT_ERROR cpu_to_le32(0xC0000053)
-#define STATUS_FILE_LOCK_CONFLICT cpu_to_le32(0xC0000054)
-#define STATUS_LOCK_NOT_GRANTED cpu_to_le32(0xC0000055)
-#define STATUS_DELETE_PENDING cpu_to_le32(0xC0000056)
-#define STATUS_CTL_FILE_NOT_SUPPORTED cpu_to_le32(0xC0000057)
-#define STATUS_UNKNOWN_REVISION cpu_to_le32(0xC0000058)
-#define STATUS_REVISION_MISMATCH cpu_to_le32(0xC0000059)
-#define STATUS_INVALID_OWNER cpu_to_le32(0xC000005A)
-#define STATUS_INVALID_PRIMARY_GROUP cpu_to_le32(0xC000005B)
-#define STATUS_NO_IMPERSONATION_TOKEN cpu_to_le32(0xC000005C)
-#define STATUS_CANT_DISABLE_MANDATORY cpu_to_le32(0xC000005D)
-#define STATUS_NO_LOGON_SERVERS cpu_to_le32(0xC000005E)
-#define STATUS_NO_SUCH_LOGON_SESSION cpu_to_le32(0xC000005F)
-#define STATUS_NO_SUCH_PRIVILEGE cpu_to_le32(0xC0000060)
-#define STATUS_PRIVILEGE_NOT_HELD cpu_to_le32(0xC0000061)
-#define STATUS_INVALID_ACCOUNT_NAME cpu_to_le32(0xC0000062)
-#define STATUS_USER_EXISTS cpu_to_le32(0xC0000063)
-#define STATUS_NO_SUCH_USER cpu_to_le32(0xC0000064)
-#define STATUS_GROUP_EXISTS cpu_to_le32(0xC0000065)
-#define STATUS_NO_SUCH_GROUP cpu_to_le32(0xC0000066)
-#define STATUS_MEMBER_IN_GROUP cpu_to_le32(0xC0000067)
-#define STATUS_MEMBER_NOT_IN_GROUP cpu_to_le32(0xC0000068)
-#define STATUS_LAST_ADMIN cpu_to_le32(0xC0000069)
-#define STATUS_WRONG_PASSWORD cpu_to_le32(0xC000006A)
-#define STATUS_ILL_FORMED_PASSWORD cpu_to_le32(0xC000006B)
-#define STATUS_PASSWORD_RESTRICTION cpu_to_le32(0xC000006C)
-#define STATUS_LOGON_FAILURE cpu_to_le32(0xC000006D)
-#define STATUS_ACCOUNT_RESTRICTION cpu_to_le32(0xC000006E)
-#define STATUS_INVALID_LOGON_HOURS cpu_to_le32(0xC000006F)
-#define STATUS_INVALID_WORKSTATION cpu_to_le32(0xC0000070)
-#define STATUS_PASSWORD_EXPIRED cpu_to_le32(0xC0000071)
-#define STATUS_ACCOUNT_DISABLED cpu_to_le32(0xC0000072)
-#define STATUS_NONE_MAPPED cpu_to_le32(0xC0000073)
-#define STATUS_TOO_MANY_LUIDS_REQUESTED cpu_to_le32(0xC0000074)
-#define STATUS_LUIDS_EXHAUSTED cpu_to_le32(0xC0000075)
-#define STATUS_INVALID_SUB_AUTHORITY cpu_to_le32(0xC0000076)
-#define STATUS_INVALID_ACL cpu_to_le32(0xC0000077)
-#define STATUS_INVALID_SID cpu_to_le32(0xC0000078)
-#define STATUS_INVALID_SECURITY_DESCR cpu_to_le32(0xC0000079)
-#define STATUS_PROCEDURE_NOT_FOUND cpu_to_le32(0xC000007A)
-#define STATUS_INVALID_IMAGE_FORMAT cpu_to_le32(0xC000007B)
-#define STATUS_NO_TOKEN cpu_to_le32(0xC000007C)
-#define STATUS_BAD_INHERITANCE_ACL cpu_to_le32(0xC000007D)
-#define STATUS_RANGE_NOT_LOCKED cpu_to_le32(0xC000007E)
-#define STATUS_DISK_FULL cpu_to_le32(0xC000007F)
-#define STATUS_SERVER_DISABLED cpu_to_le32(0xC0000080)
-#define STATUS_SERVER_NOT_DISABLED cpu_to_le32(0xC0000081)
-#define STATUS_TOO_MANY_GUIDS_REQUESTED cpu_to_le32(0xC0000082)
-#define STATUS_GUIDS_EXHAUSTED cpu_to_le32(0xC0000083)
-#define STATUS_INVALID_ID_AUTHORITY cpu_to_le32(0xC0000084)
-#define STATUS_AGENTS_EXHAUSTED cpu_to_le32(0xC0000085)
-#define STATUS_INVALID_VOLUME_LABEL cpu_to_le32(0xC0000086)
-#define STATUS_SECTION_NOT_EXTENDED cpu_to_le32(0xC0000087)
-#define STATUS_NOT_MAPPED_DATA cpu_to_le32(0xC0000088)
-#define STATUS_RESOURCE_DATA_NOT_FOUND cpu_to_le32(0xC0000089)
-#define STATUS_RESOURCE_TYPE_NOT_FOUND cpu_to_le32(0xC000008A)
-#define STATUS_RESOURCE_NAME_NOT_FOUND cpu_to_le32(0xC000008B)
-#define STATUS_ARRAY_BOUNDS_EXCEEDED cpu_to_le32(0xC000008C)
-#define STATUS_FLOAT_DENORMAL_OPERAND cpu_to_le32(0xC000008D)
-#define STATUS_FLOAT_DIVIDE_BY_ZERO cpu_to_le32(0xC000008E)
-#define STATUS_FLOAT_INEXACT_RESULT cpu_to_le32(0xC000008F)
-#define STATUS_FLOAT_INVALID_OPERATION cpu_to_le32(0xC0000090)
-#define STATUS_FLOAT_OVERFLOW cpu_to_le32(0xC0000091)
-#define STATUS_FLOAT_STACK_CHECK cpu_to_le32(0xC0000092)
-#define STATUS_FLOAT_UNDERFLOW cpu_to_le32(0xC0000093)
-#define STATUS_INTEGER_DIVIDE_BY_ZERO cpu_to_le32(0xC0000094)
-#define STATUS_INTEGER_OVERFLOW cpu_to_le32(0xC0000095)
-#define STATUS_PRIVILEGED_INSTRUCTION cpu_to_le32(0xC0000096)
-#define STATUS_TOO_MANY_PAGING_FILES cpu_to_le32(0xC0000097)
-#define STATUS_FILE_INVALID cpu_to_le32(0xC0000098)
-#define STATUS_ALLOTTED_SPACE_EXCEEDED cpu_to_le32(0xC0000099)
-#define STATUS_INSUFFICIENT_RESOURCES cpu_to_le32(0xC000009A)
-#define STATUS_DFS_EXIT_PATH_FOUND cpu_to_le32(0xC000009B)
-#define STATUS_DEVICE_DATA_ERROR cpu_to_le32(0xC000009C)
-#define STATUS_DEVICE_NOT_CONNECTED cpu_to_le32(0xC000009D)
-#define STATUS_DEVICE_POWER_FAILURE cpu_to_le32(0xC000009E)
-#define STATUS_FREE_VM_NOT_AT_BASE cpu_to_le32(0xC000009F)
-#define STATUS_MEMORY_NOT_ALLOCATED cpu_to_le32(0xC00000A0)
-#define STATUS_WORKING_SET_QUOTA cpu_to_le32(0xC00000A1)
-#define STATUS_MEDIA_WRITE_PROTECTED cpu_to_le32(0xC00000A2)
-#define STATUS_DEVICE_NOT_READY cpu_to_le32(0xC00000A3)
-#define STATUS_INVALID_GROUP_ATTRIBUTES cpu_to_le32(0xC00000A4)
-#define STATUS_BAD_IMPERSONATION_LEVEL cpu_to_le32(0xC00000A5)
-#define STATUS_CANT_OPEN_ANONYMOUS cpu_to_le32(0xC00000A6)
-#define STATUS_BAD_VALIDATION_CLASS cpu_to_le32(0xC00000A7)
-#define STATUS_BAD_TOKEN_TYPE cpu_to_le32(0xC00000A8)
-#define STATUS_BAD_MASTER_BOOT_RECORD cpu_to_le32(0xC00000A9)
-#define STATUS_INSTRUCTION_MISALIGNMENT cpu_to_le32(0xC00000AA)
-#define STATUS_INSTANCE_NOT_AVAILABLE cpu_to_le32(0xC00000AB)
-#define STATUS_PIPE_NOT_AVAILABLE cpu_to_le32(0xC00000AC)
-#define STATUS_INVALID_PIPE_STATE cpu_to_le32(0xC00000AD)
-#define STATUS_PIPE_BUSY cpu_to_le32(0xC00000AE)
-#define STATUS_ILLEGAL_FUNCTION cpu_to_le32(0xC00000AF)
-#define STATUS_PIPE_DISCONNECTED cpu_to_le32(0xC00000B0)
-#define STATUS_PIPE_CLOSING cpu_to_le32(0xC00000B1)
-#define STATUS_PIPE_CONNECTED cpu_to_le32(0xC00000B2)
-#define STATUS_PIPE_LISTENING cpu_to_le32(0xC00000B3)
-#define STATUS_INVALID_READ_MODE cpu_to_le32(0xC00000B4)
-#define STATUS_IO_TIMEOUT cpu_to_le32(0xC00000B5)
-#define STATUS_FILE_FORCED_CLOSED cpu_to_le32(0xC00000B6)
-#define STATUS_PROFILING_NOT_STARTED cpu_to_le32(0xC00000B7)
-#define STATUS_PROFILING_NOT_STOPPED cpu_to_le32(0xC00000B8)
-#define STATUS_COULD_NOT_INTERPRET cpu_to_le32(0xC00000B9)
-#define STATUS_FILE_IS_A_DIRECTORY cpu_to_le32(0xC00000BA)
-#define STATUS_NOT_SUPPORTED cpu_to_le32(0xC00000BB)
-#define STATUS_REMOTE_NOT_LISTENING cpu_to_le32(0xC00000BC)
-#define STATUS_DUPLICATE_NAME cpu_to_le32(0xC00000BD)
-#define STATUS_BAD_NETWORK_PATH cpu_to_le32(0xC00000BE)
-#define STATUS_NETWORK_BUSY cpu_to_le32(0xC00000BF)
-#define STATUS_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC00000C0)
-#define STATUS_TOO_MANY_COMMANDS cpu_to_le32(0xC00000C1)
-#define STATUS_ADAPTER_HARDWARE_ERROR cpu_to_le32(0xC00000C2)
-#define STATUS_INVALID_NETWORK_RESPONSE cpu_to_le32(0xC00000C3)
-#define STATUS_UNEXPECTED_NETWORK_ERROR cpu_to_le32(0xC00000C4)
-#define STATUS_BAD_REMOTE_ADAPTER cpu_to_le32(0xC00000C5)
-#define STATUS_PRINT_QUEUE_FULL cpu_to_le32(0xC00000C6)
-#define STATUS_NO_SPOOL_SPACE cpu_to_le32(0xC00000C7)
-#define STATUS_PRINT_CANCELLED cpu_to_le32(0xC00000C8)
-#define STATUS_NETWORK_NAME_DELETED cpu_to_le32(0xC00000C9)
-#define STATUS_NETWORK_ACCESS_DENIED cpu_to_le32(0xC00000CA)
-#define STATUS_BAD_DEVICE_TYPE cpu_to_le32(0xC00000CB)
-#define STATUS_BAD_NETWORK_NAME cpu_to_le32(0xC00000CC)
-#define STATUS_TOO_MANY_NAMES cpu_to_le32(0xC00000CD)
-#define STATUS_TOO_MANY_SESSIONS cpu_to_le32(0xC00000CE)
-#define STATUS_SHARING_PAUSED cpu_to_le32(0xC00000CF)
-#define STATUS_REQUEST_NOT_ACCEPTED cpu_to_le32(0xC00000D0)
-#define STATUS_REDIRECTOR_PAUSED cpu_to_le32(0xC00000D1)
-#define STATUS_NET_WRITE_FAULT cpu_to_le32(0xC00000D2)
-#define STATUS_PROFILING_AT_LIMIT cpu_to_le32(0xC00000D3)
-#define STATUS_NOT_SAME_DEVICE cpu_to_le32(0xC00000D4)
-#define STATUS_FILE_RENAMED cpu_to_le32(0xC00000D5)
-#define STATUS_VIRTUAL_CIRCUIT_CLOSED cpu_to_le32(0xC00000D6)
-#define STATUS_NO_SECURITY_ON_OBJECT cpu_to_le32(0xC00000D7)
-#define STATUS_CANT_WAIT cpu_to_le32(0xC00000D8)
-#define STATUS_PIPE_EMPTY cpu_to_le32(0xC00000D9)
-#define STATUS_CANT_ACCESS_DOMAIN_INFO cpu_to_le32(0xC00000DA)
-#define STATUS_CANT_TERMINATE_SELF cpu_to_le32(0xC00000DB)
-#define STATUS_INVALID_SERVER_STATE cpu_to_le32(0xC00000DC)
-#define STATUS_INVALID_DOMAIN_STATE cpu_to_le32(0xC00000DD)
-#define STATUS_INVALID_DOMAIN_ROLE cpu_to_le32(0xC00000DE)
-#define STATUS_NO_SUCH_DOMAIN cpu_to_le32(0xC00000DF)
-#define STATUS_DOMAIN_EXISTS cpu_to_le32(0xC00000E0)
-#define STATUS_DOMAIN_LIMIT_EXCEEDED cpu_to_le32(0xC00000E1)
-#define STATUS_OPLOCK_NOT_GRANTED cpu_to_le32(0xC00000E2)
-#define STATUS_INVALID_OPLOCK_PROTOCOL cpu_to_le32(0xC00000E3)
-#define STATUS_INTERNAL_DB_CORRUPTION cpu_to_le32(0xC00000E4)
-#define STATUS_INTERNAL_ERROR cpu_to_le32(0xC00000E5)
-#define STATUS_GENERIC_NOT_MAPPED cpu_to_le32(0xC00000E6)
-#define STATUS_BAD_DESCRIPTOR_FORMAT cpu_to_le32(0xC00000E7)
-#define STATUS_INVALID_USER_BUFFER cpu_to_le32(0xC00000E8)
-#define STATUS_UNEXPECTED_IO_ERROR cpu_to_le32(0xC00000E9)
-#define STATUS_UNEXPECTED_MM_CREATE_ERR cpu_to_le32(0xC00000EA)
-#define STATUS_UNEXPECTED_MM_MAP_ERROR cpu_to_le32(0xC00000EB)
-#define STATUS_UNEXPECTED_MM_EXTEND_ERR cpu_to_le32(0xC00000EC)
-#define STATUS_NOT_LOGON_PROCESS cpu_to_le32(0xC00000ED)
-#define STATUS_LOGON_SESSION_EXISTS cpu_to_le32(0xC00000EE)
-#define STATUS_INVALID_PARAMETER_1 cpu_to_le32(0xC00000EF)
-#define STATUS_INVALID_PARAMETER_2 cpu_to_le32(0xC00000F0)
-#define STATUS_INVALID_PARAMETER_3 cpu_to_le32(0xC00000F1)
-#define STATUS_INVALID_PARAMETER_4 cpu_to_le32(0xC00000F2)
-#define STATUS_INVALID_PARAMETER_5 cpu_to_le32(0xC00000F3)
-#define STATUS_INVALID_PARAMETER_6 cpu_to_le32(0xC00000F4)
-#define STATUS_INVALID_PARAMETER_7 cpu_to_le32(0xC00000F5)
-#define STATUS_INVALID_PARAMETER_8 cpu_to_le32(0xC00000F6)
-#define STATUS_INVALID_PARAMETER_9 cpu_to_le32(0xC00000F7)
-#define STATUS_INVALID_PARAMETER_10 cpu_to_le32(0xC00000F8)
-#define STATUS_INVALID_PARAMETER_11 cpu_to_le32(0xC00000F9)
-#define STATUS_INVALID_PARAMETER_12 cpu_to_le32(0xC00000FA)
-#define STATUS_REDIRECTOR_NOT_STARTED cpu_to_le32(0xC00000FB)
-#define STATUS_REDIRECTOR_STARTED cpu_to_le32(0xC00000FC)
-#define STATUS_STACK_OVERFLOW cpu_to_le32(0xC00000FD)
-#define STATUS_NO_SUCH_PACKAGE cpu_to_le32(0xC00000FE)
-#define STATUS_BAD_FUNCTION_TABLE cpu_to_le32(0xC00000FF)
-#define STATUS_VARIABLE_NOT_FOUND cpu_to_le32(0xC0000100)
-#define STATUS_DIRECTORY_NOT_EMPTY cpu_to_le32(0xC0000101)
-#define STATUS_FILE_CORRUPT_ERROR cpu_to_le32(0xC0000102)
-#define STATUS_NOT_A_DIRECTORY cpu_to_le32(0xC0000103)
-#define STATUS_BAD_LOGON_SESSION_STATE cpu_to_le32(0xC0000104)
-#define STATUS_LOGON_SESSION_COLLISION cpu_to_le32(0xC0000105)
-#define STATUS_NAME_TOO_LONG cpu_to_le32(0xC0000106)
-#define STATUS_FILES_OPEN cpu_to_le32(0xC0000107)
-#define STATUS_CONNECTION_IN_USE cpu_to_le32(0xC0000108)
-#define STATUS_MESSAGE_NOT_FOUND cpu_to_le32(0xC0000109)
-#define STATUS_PROCESS_IS_TERMINATING cpu_to_le32(0xC000010A)
-#define STATUS_INVALID_LOGON_TYPE cpu_to_le32(0xC000010B)
-#define STATUS_NO_GUID_TRANSLATION cpu_to_le32(0xC000010C)
-#define STATUS_CANNOT_IMPERSONATE cpu_to_le32(0xC000010D)
-#define STATUS_IMAGE_ALREADY_LOADED cpu_to_le32(0xC000010E)
-#define STATUS_ABIOS_NOT_PRESENT cpu_to_le32(0xC000010F)
-#define STATUS_ABIOS_LID_NOT_EXIST cpu_to_le32(0xC0000110)
-#define STATUS_ABIOS_LID_ALREADY_OWNED cpu_to_le32(0xC0000111)
-#define STATUS_ABIOS_NOT_LID_OWNER cpu_to_le32(0xC0000112)
-#define STATUS_ABIOS_INVALID_COMMAND cpu_to_le32(0xC0000113)
-#define STATUS_ABIOS_INVALID_LID cpu_to_le32(0xC0000114)
-#define STATUS_ABIOS_SELECTOR_NOT_AVAILABLE cpu_to_le32(0xC0000115)
-#define STATUS_ABIOS_INVALID_SELECTOR cpu_to_le32(0xC0000116)
-#define STATUS_NO_LDT cpu_to_le32(0xC0000117)
-#define STATUS_INVALID_LDT_SIZE cpu_to_le32(0xC0000118)
-#define STATUS_INVALID_LDT_OFFSET cpu_to_le32(0xC0000119)
-#define STATUS_INVALID_LDT_DESCRIPTOR cpu_to_le32(0xC000011A)
-#define STATUS_INVALID_IMAGE_NE_FORMAT cpu_to_le32(0xC000011B)
-#define STATUS_RXACT_INVALID_STATE cpu_to_le32(0xC000011C)
-#define STATUS_RXACT_COMMIT_FAILURE cpu_to_le32(0xC000011D)
-#define STATUS_MAPPED_FILE_SIZE_ZERO cpu_to_le32(0xC000011E)
-#define STATUS_TOO_MANY_OPENED_FILES cpu_to_le32(0xC000011F)
-#define STATUS_CANCELLED cpu_to_le32(0xC0000120)
-#define STATUS_CANNOT_DELETE cpu_to_le32(0xC0000121)
-#define STATUS_INVALID_COMPUTER_NAME cpu_to_le32(0xC0000122)
-#define STATUS_FILE_DELETED cpu_to_le32(0xC0000123)
-#define STATUS_SPECIAL_ACCOUNT cpu_to_le32(0xC0000124)
-#define STATUS_SPECIAL_GROUP cpu_to_le32(0xC0000125)
-#define STATUS_SPECIAL_USER cpu_to_le32(0xC0000126)
-#define STATUS_MEMBERS_PRIMARY_GROUP cpu_to_le32(0xC0000127)
-#define STATUS_FILE_CLOSED cpu_to_le32(0xC0000128)
-#define STATUS_TOO_MANY_THREADS cpu_to_le32(0xC0000129)
-#define STATUS_THREAD_NOT_IN_PROCESS cpu_to_le32(0xC000012A)
-#define STATUS_TOKEN_ALREADY_IN_USE cpu_to_le32(0xC000012B)
-#define STATUS_PAGEFILE_QUOTA_EXCEEDED cpu_to_le32(0xC000012C)
-#define STATUS_COMMITMENT_LIMIT cpu_to_le32(0xC000012D)
-#define STATUS_INVALID_IMAGE_LE_FORMAT cpu_to_le32(0xC000012E)
-#define STATUS_INVALID_IMAGE_NOT_MZ cpu_to_le32(0xC000012F)
-#define STATUS_INVALID_IMAGE_PROTECT cpu_to_le32(0xC0000130)
-#define STATUS_INVALID_IMAGE_WIN_16 cpu_to_le32(0xC0000131)
-#define STATUS_LOGON_SERVER_CONFLICT cpu_to_le32(0xC0000132)
-#define STATUS_TIME_DIFFERENCE_AT_DC cpu_to_le32(0xC0000133)
-#define STATUS_SYNCHRONIZATION_REQUIRED cpu_to_le32(0xC0000134)
-#define STATUS_DLL_NOT_FOUND cpu_to_le32(0xC0000135)
-#define STATUS_OPEN_FAILED cpu_to_le32(0xC0000136)
-#define STATUS_IO_PRIVILEGE_FAILED cpu_to_le32(0xC0000137)
-#define STATUS_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000138)
-#define STATUS_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000139)
-#define STATUS_CONTROL_C_EXIT cpu_to_le32(0xC000013A)
-#define STATUS_LOCAL_DISCONNECT cpu_to_le32(0xC000013B)
-#define STATUS_REMOTE_DISCONNECT cpu_to_le32(0xC000013C)
-#define STATUS_REMOTE_RESOURCES cpu_to_le32(0xC000013D)
-#define STATUS_LINK_FAILED cpu_to_le32(0xC000013E)
-#define STATUS_LINK_TIMEOUT cpu_to_le32(0xC000013F)
-#define STATUS_INVALID_CONNECTION cpu_to_le32(0xC0000140)
-#define STATUS_INVALID_ADDRESS cpu_to_le32(0xC0000141)
-#define STATUS_DLL_INIT_FAILED cpu_to_le32(0xC0000142)
-#define STATUS_MISSING_SYSTEMFILE cpu_to_le32(0xC0000143)
-#define STATUS_UNHANDLED_EXCEPTION cpu_to_le32(0xC0000144)
-#define STATUS_APP_INIT_FAILURE cpu_to_le32(0xC0000145)
-#define STATUS_PAGEFILE_CREATE_FAILED cpu_to_le32(0xC0000146)
-#define STATUS_NO_PAGEFILE cpu_to_le32(0xC0000147)
-#define STATUS_INVALID_LEVEL cpu_to_le32(0xC0000148)
-#define STATUS_WRONG_PASSWORD_CORE cpu_to_le32(0xC0000149)
-#define STATUS_ILLEGAL_FLOAT_CONTEXT cpu_to_le32(0xC000014A)
-#define STATUS_PIPE_BROKEN cpu_to_le32(0xC000014B)
-#define STATUS_REGISTRY_CORRUPT cpu_to_le32(0xC000014C)
-#define STATUS_REGISTRY_IO_FAILED cpu_to_le32(0xC000014D)
-#define STATUS_NO_EVENT_PAIR cpu_to_le32(0xC000014E)
-#define STATUS_UNRECOGNIZED_VOLUME cpu_to_le32(0xC000014F)
-#define STATUS_SERIAL_NO_DEVICE_INITED cpu_to_le32(0xC0000150)
-#define STATUS_NO_SUCH_ALIAS cpu_to_le32(0xC0000151)
-#define STATUS_MEMBER_NOT_IN_ALIAS cpu_to_le32(0xC0000152)
-#define STATUS_MEMBER_IN_ALIAS cpu_to_le32(0xC0000153)
-#define STATUS_ALIAS_EXISTS cpu_to_le32(0xC0000154)
-#define STATUS_LOGON_NOT_GRANTED cpu_to_le32(0xC0000155)
-#define STATUS_TOO_MANY_SECRETS cpu_to_le32(0xC0000156)
-#define STATUS_SECRET_TOO_LONG cpu_to_le32(0xC0000157)
-#define STATUS_INTERNAL_DB_ERROR cpu_to_le32(0xC0000158)
-#define STATUS_FULLSCREEN_MODE cpu_to_le32(0xC0000159)
-#define STATUS_TOO_MANY_CONTEXT_IDS cpu_to_le32(0xC000015A)
-#define STATUS_LOGON_TYPE_NOT_GRANTED cpu_to_le32(0xC000015B)
-#define STATUS_NOT_REGISTRY_FILE cpu_to_le32(0xC000015C)
-#define STATUS_NT_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000015D)
-#define STATUS_DOMAIN_CTRLR_CONFIG_ERROR cpu_to_le32(0xC000015E)
-#define STATUS_FT_MISSING_MEMBER cpu_to_le32(0xC000015F)
-#define STATUS_ILL_FORMED_SERVICE_ENTRY cpu_to_le32(0xC0000160)
-#define STATUS_ILLEGAL_CHARACTER cpu_to_le32(0xC0000161)
-#define STATUS_UNMAPPABLE_CHARACTER cpu_to_le32(0xC0000162)
-#define STATUS_UNDEFINED_CHARACTER cpu_to_le32(0xC0000163)
-#define STATUS_FLOPPY_VOLUME cpu_to_le32(0xC0000164)
-#define STATUS_FLOPPY_ID_MARK_NOT_FOUND cpu_to_le32(0xC0000165)
-#define STATUS_FLOPPY_WRONG_CYLINDER cpu_to_le32(0xC0000166)
-#define STATUS_FLOPPY_UNKNOWN_ERROR cpu_to_le32(0xC0000167)
-#define STATUS_FLOPPY_BAD_REGISTERS cpu_to_le32(0xC0000168)
-#define STATUS_DISK_RECALIBRATE_FAILED cpu_to_le32(0xC0000169)
-#define STATUS_DISK_OPERATION_FAILED cpu_to_le32(0xC000016A)
-#define STATUS_DISK_RESET_FAILED cpu_to_le32(0xC000016B)
-#define STATUS_SHARED_IRQ_BUSY cpu_to_le32(0xC000016C)
-#define STATUS_FT_ORPHANING cpu_to_le32(0xC000016D)
-#define STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT cpu_to_le32(0xC000016E)
-#define STATUS_PARTITION_FAILURE cpu_to_le32(0xC0000172)
-#define STATUS_INVALID_BLOCK_LENGTH cpu_to_le32(0xC0000173)
-#define STATUS_DEVICE_NOT_PARTITIONED cpu_to_le32(0xC0000174)
-#define STATUS_UNABLE_TO_LOCK_MEDIA cpu_to_le32(0xC0000175)
-#define STATUS_UNABLE_TO_UNLOAD_MEDIA cpu_to_le32(0xC0000176)
-#define STATUS_EOM_OVERFLOW cpu_to_le32(0xC0000177)
-#define STATUS_NO_MEDIA cpu_to_le32(0xC0000178)
-#define STATUS_NO_SUCH_MEMBER cpu_to_le32(0xC000017A)
-#define STATUS_INVALID_MEMBER cpu_to_le32(0xC000017B)
-#define STATUS_KEY_DELETED cpu_to_le32(0xC000017C)
-#define STATUS_NO_LOG_SPACE cpu_to_le32(0xC000017D)
-#define STATUS_TOO_MANY_SIDS cpu_to_le32(0xC000017E)
-#define STATUS_LM_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000017F)
-#define STATUS_KEY_HAS_CHILDREN cpu_to_le32(0xC0000180)
-#define STATUS_CHILD_MUST_BE_VOLATILE cpu_to_le32(0xC0000181)
-#define STATUS_DEVICE_CONFIGURATION_ERROR cpu_to_le32(0xC0000182)
-#define STATUS_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC0000183)
-#define STATUS_INVALID_DEVICE_STATE cpu_to_le32(0xC0000184)
-#define STATUS_IO_DEVICE_ERROR cpu_to_le32(0xC0000185)
-#define STATUS_DEVICE_PROTOCOL_ERROR cpu_to_le32(0xC0000186)
-#define STATUS_BACKUP_CONTROLLER cpu_to_le32(0xC0000187)
-#define STATUS_LOG_FILE_FULL cpu_to_le32(0xC0000188)
-#define STATUS_TOO_LATE cpu_to_le32(0xC0000189)
-#define STATUS_NO_TRUST_LSA_SECRET cpu_to_le32(0xC000018A)
-#define STATUS_NO_TRUST_SAM_ACCOUNT cpu_to_le32(0xC000018B)
-#define STATUS_TRUSTED_DOMAIN_FAILURE cpu_to_le32(0xC000018C)
-#define STATUS_TRUSTED_RELATIONSHIP_FAILURE cpu_to_le32(0xC000018D)
-#define STATUS_EVENTLOG_FILE_CORRUPT cpu_to_le32(0xC000018E)
-#define STATUS_EVENTLOG_CANT_START cpu_to_le32(0xC000018F)
-#define STATUS_TRUST_FAILURE cpu_to_le32(0xC0000190)
-#define STATUS_MUTANT_LIMIT_EXCEEDED cpu_to_le32(0xC0000191)
-#define STATUS_NETLOGON_NOT_STARTED cpu_to_le32(0xC0000192)
-#define STATUS_ACCOUNT_EXPIRED cpu_to_le32(0xC0000193)
-#define STATUS_POSSIBLE_DEADLOCK cpu_to_le32(0xC0000194)
-#define STATUS_NETWORK_CREDENTIAL_CONFLICT cpu_to_le32(0xC0000195)
-#define STATUS_REMOTE_SESSION_LIMIT cpu_to_le32(0xC0000196)
-#define STATUS_EVENTLOG_FILE_CHANGED cpu_to_le32(0xC0000197)
-#define STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT cpu_to_le32(0xC0000198)
-#define STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT cpu_to_le32(0xC0000199)
-#define STATUS_NOLOGON_SERVER_TRUST_ACCOUNT cpu_to_le32(0xC000019A)
-#define STATUS_DOMAIN_TRUST_INCONSISTENT cpu_to_le32(0xC000019B)
-#define STATUS_FS_DRIVER_REQUIRED cpu_to_le32(0xC000019C)
-#define STATUS_IMAGE_ALREADY_LOADED_AS_DLL cpu_to_le32(0xC000019D)
-#define STATUS_NETWORK_OPEN_RESTRICTION cpu_to_le32(0xC0000201)
-#define STATUS_NO_USER_SESSION_KEY cpu_to_le32(0xC0000202)
-#define STATUS_USER_SESSION_DELETED cpu_to_le32(0xC0000203)
-#define STATUS_RESOURCE_LANG_NOT_FOUND cpu_to_le32(0xC0000204)
-#define STATUS_INSUFF_SERVER_RESOURCES cpu_to_le32(0xC0000205)
-#define STATUS_INVALID_BUFFER_SIZE cpu_to_le32(0xC0000206)
-#define STATUS_INVALID_ADDRESS_COMPONENT cpu_to_le32(0xC0000207)
-#define STATUS_INVALID_ADDRESS_WILDCARD cpu_to_le32(0xC0000208)
-#define STATUS_TOO_MANY_ADDRESSES cpu_to_le32(0xC0000209)
-#define STATUS_ADDRESS_ALREADY_EXISTS cpu_to_le32(0xC000020A)
-#define STATUS_ADDRESS_CLOSED cpu_to_le32(0xC000020B)
-#define STATUS_CONNECTION_DISCONNECTED cpu_to_le32(0xC000020C)
-#define STATUS_CONNECTION_RESET cpu_to_le32(0xC000020D)
-#define STATUS_TOO_MANY_NODES cpu_to_le32(0xC000020E)
-#define STATUS_TRANSACTION_ABORTED cpu_to_le32(0xC000020F)
-#define STATUS_TRANSACTION_TIMED_OUT cpu_to_le32(0xC0000210)
-#define STATUS_TRANSACTION_NO_RELEASE cpu_to_le32(0xC0000211)
-#define STATUS_TRANSACTION_NO_MATCH cpu_to_le32(0xC0000212)
-#define STATUS_TRANSACTION_RESPONDED cpu_to_le32(0xC0000213)
-#define STATUS_TRANSACTION_INVALID_ID cpu_to_le32(0xC0000214)
-#define STATUS_TRANSACTION_INVALID_TYPE cpu_to_le32(0xC0000215)
-#define STATUS_NOT_SERVER_SESSION cpu_to_le32(0xC0000216)
-#define STATUS_NOT_CLIENT_SESSION cpu_to_le32(0xC0000217)
-#define STATUS_CANNOT_LOAD_REGISTRY_FILE cpu_to_le32(0xC0000218)
-#define STATUS_DEBUG_ATTACH_FAILED cpu_to_le32(0xC0000219)
-#define STATUS_SYSTEM_PROCESS_TERMINATED cpu_to_le32(0xC000021A)
-#define STATUS_DATA_NOT_ACCEPTED cpu_to_le32(0xC000021B)
-#define STATUS_NO_BROWSER_SERVERS_FOUND cpu_to_le32(0xC000021C)
-#define STATUS_VDM_HARD_ERROR cpu_to_le32(0xC000021D)
-#define STATUS_DRIVER_CANCEL_TIMEOUT cpu_to_le32(0xC000021E)
-#define STATUS_REPLY_MESSAGE_MISMATCH cpu_to_le32(0xC000021F)
-#define STATUS_MAPPED_ALIGNMENT cpu_to_le32(0xC0000220)
-#define STATUS_IMAGE_CHECKSUM_MISMATCH cpu_to_le32(0xC0000221)
-#define STATUS_LOST_WRITEBEHIND_DATA cpu_to_le32(0xC0000222)
-#define STATUS_CLIENT_SERVER_PARAMETERS_INVALID cpu_to_le32(0xC0000223)
-#define STATUS_PASSWORD_MUST_CHANGE cpu_to_le32(0xC0000224)
-#define STATUS_NOT_FOUND cpu_to_le32(0xC0000225)
-#define STATUS_NOT_TINY_STREAM cpu_to_le32(0xC0000226)
-#define STATUS_RECOVERY_FAILURE cpu_to_le32(0xC0000227)
-#define STATUS_STACK_OVERFLOW_READ cpu_to_le32(0xC0000228)
-#define STATUS_FAIL_CHECK cpu_to_le32(0xC0000229)
-#define STATUS_DUPLICATE_OBJECTID cpu_to_le32(0xC000022A)
-#define STATUS_OBJECTID_EXISTS cpu_to_le32(0xC000022B)
-#define STATUS_CONVERT_TO_LARGE cpu_to_le32(0xC000022C)
-#define STATUS_RETRY cpu_to_le32(0xC000022D)
-#define STATUS_FOUND_OUT_OF_SCOPE cpu_to_le32(0xC000022E)
-#define STATUS_ALLOCATE_BUCKET cpu_to_le32(0xC000022F)
-#define STATUS_PROPSET_NOT_FOUND cpu_to_le32(0xC0000230)
-#define STATUS_MARSHALL_OVERFLOW cpu_to_le32(0xC0000231)
-#define STATUS_INVALID_VARIANT cpu_to_le32(0xC0000232)
-#define STATUS_DOMAIN_CONTROLLER_NOT_FOUND cpu_to_le32(0xC0000233)
-#define STATUS_ACCOUNT_LOCKED_OUT cpu_to_le32(0xC0000234)
-#define STATUS_HANDLE_NOT_CLOSABLE cpu_to_le32(0xC0000235)
-#define STATUS_CONNECTION_REFUSED cpu_to_le32(0xC0000236)
-#define STATUS_GRACEFUL_DISCONNECT cpu_to_le32(0xC0000237)
-#define STATUS_ADDRESS_ALREADY_ASSOCIATED cpu_to_le32(0xC0000238)
-#define STATUS_ADDRESS_NOT_ASSOCIATED cpu_to_le32(0xC0000239)
-#define STATUS_CONNECTION_INVALID cpu_to_le32(0xC000023A)
-#define STATUS_CONNECTION_ACTIVE cpu_to_le32(0xC000023B)
-#define STATUS_NETWORK_UNREACHABLE cpu_to_le32(0xC000023C)
-#define STATUS_HOST_UNREACHABLE cpu_to_le32(0xC000023D)
-#define STATUS_PROTOCOL_UNREACHABLE cpu_to_le32(0xC000023E)
-#define STATUS_PORT_UNREACHABLE cpu_to_le32(0xC000023F)
-#define STATUS_REQUEST_ABORTED cpu_to_le32(0xC0000240)
-#define STATUS_CONNECTION_ABORTED cpu_to_le32(0xC0000241)
-#define STATUS_BAD_COMPRESSION_BUFFER cpu_to_le32(0xC0000242)
-#define STATUS_USER_MAPPED_FILE cpu_to_le32(0xC0000243)
-#define STATUS_AUDIT_FAILED cpu_to_le32(0xC0000244)
-#define STATUS_TIMER_RESOLUTION_NOT_SET cpu_to_le32(0xC0000245)
-#define STATUS_CONNECTION_COUNT_LIMIT cpu_to_le32(0xC0000246)
-#define STATUS_LOGIN_TIME_RESTRICTION cpu_to_le32(0xC0000247)
-#define STATUS_LOGIN_WKSTA_RESTRICTION cpu_to_le32(0xC0000248)
-#define STATUS_IMAGE_MP_UP_MISMATCH cpu_to_le32(0xC0000249)
-#define STATUS_INSUFFICIENT_LOGON_INFO cpu_to_le32(0xC0000250)
-#define STATUS_BAD_DLL_ENTRYPOINT cpu_to_le32(0xC0000251)
-#define STATUS_BAD_SERVICE_ENTRYPOINT cpu_to_le32(0xC0000252)
-#define STATUS_LPC_REPLY_LOST cpu_to_le32(0xC0000253)
-#define STATUS_IP_ADDRESS_CONFLICT1 cpu_to_le32(0xC0000254)
-#define STATUS_IP_ADDRESS_CONFLICT2 cpu_to_le32(0xC0000255)
-#define STATUS_REGISTRY_QUOTA_LIMIT cpu_to_le32(0xC0000256)
-#define STATUS_PATH_NOT_COVERED cpu_to_le32(0xC0000257)
-#define STATUS_NO_CALLBACK_ACTIVE cpu_to_le32(0xC0000258)
-#define STATUS_LICENSE_QUOTA_EXCEEDED cpu_to_le32(0xC0000259)
-#define STATUS_PWD_TOO_SHORT cpu_to_le32(0xC000025A)
-#define STATUS_PWD_TOO_RECENT cpu_to_le32(0xC000025B)
-#define STATUS_PWD_HISTORY_CONFLICT cpu_to_le32(0xC000025C)
-#define STATUS_PLUGPLAY_NO_DEVICE cpu_to_le32(0xC000025E)
-#define STATUS_UNSUPPORTED_COMPRESSION cpu_to_le32(0xC000025F)
-#define STATUS_INVALID_HW_PROFILE cpu_to_le32(0xC0000260)
-#define STATUS_INVALID_PLUGPLAY_DEVICE_PATH cpu_to_le32(0xC0000261)
-#define STATUS_DRIVER_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000262)
-#define STATUS_DRIVER_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000263)
-#define STATUS_RESOURCE_NOT_OWNED cpu_to_le32(0xC0000264)
-#define STATUS_TOO_MANY_LINKS cpu_to_le32(0xC0000265)
-#define STATUS_QUOTA_LIST_INCONSISTENT cpu_to_le32(0xC0000266)
-#define STATUS_FILE_IS_OFFLINE cpu_to_le32(0xC0000267)
-#define STATUS_EVALUATION_EXPIRATION cpu_to_le32(0xC0000268)
-#define STATUS_ILLEGAL_DLL_RELOCATION cpu_to_le32(0xC0000269)
-#define STATUS_LICENSE_VIOLATION cpu_to_le32(0xC000026A)
-#define STATUS_DLL_INIT_FAILED_LOGOFF cpu_to_le32(0xC000026B)
-#define STATUS_DRIVER_UNABLE_TO_LOAD cpu_to_le32(0xC000026C)
-#define STATUS_DFS_UNAVAILABLE cpu_to_le32(0xC000026D)
-#define STATUS_VOLUME_DISMOUNTED cpu_to_le32(0xC000026E)
-#define STATUS_WX86_INTERNAL_ERROR cpu_to_le32(0xC000026F)
-#define STATUS_WX86_FLOAT_STACK_CHECK cpu_to_le32(0xC0000270)
-#define STATUS_VALIDATE_CONTINUE cpu_to_le32(0xC0000271)
-#define STATUS_NO_MATCH cpu_to_le32(0xC0000272)
-#define STATUS_NO_MORE_MATCHES cpu_to_le32(0xC0000273)
-#define STATUS_NOT_A_REPARSE_POINT cpu_to_le32(0xC0000275)
-#define STATUS_IO_REPARSE_TAG_INVALID cpu_to_le32(0xC0000276)
-#define STATUS_IO_REPARSE_TAG_MISMATCH cpu_to_le32(0xC0000277)
-#define STATUS_IO_REPARSE_DATA_INVALID cpu_to_le32(0xC0000278)
-#define STATUS_IO_REPARSE_TAG_NOT_HANDLED cpu_to_le32(0xC0000279)
-#define STATUS_REPARSE_POINT_NOT_RESOLVED cpu_to_le32(0xC0000280)
-#define STATUS_DIRECTORY_IS_A_REPARSE_POINT cpu_to_le32(0xC0000281)
-#define STATUS_RANGE_LIST_CONFLICT cpu_to_le32(0xC0000282)
-#define STATUS_SOURCE_ELEMENT_EMPTY cpu_to_le32(0xC0000283)
-#define STATUS_DESTINATION_ELEMENT_FULL cpu_to_le32(0xC0000284)
-#define STATUS_ILLEGAL_ELEMENT_ADDRESS cpu_to_le32(0xC0000285)
-#define STATUS_MAGAZINE_NOT_PRESENT cpu_to_le32(0xC0000286)
-#define STATUS_REINITIALIZATION_NEEDED cpu_to_le32(0xC0000287)
-#define STATUS_ENCRYPTION_FAILED cpu_to_le32(0xC000028A)
-#define STATUS_DECRYPTION_FAILED cpu_to_le32(0xC000028B)
-#define STATUS_RANGE_NOT_FOUND cpu_to_le32(0xC000028C)
-#define STATUS_NO_RECOVERY_POLICY cpu_to_le32(0xC000028D)
-#define STATUS_NO_EFS cpu_to_le32(0xC000028E)
-#define STATUS_WRONG_EFS cpu_to_le32(0xC000028F)
-#define STATUS_NO_USER_KEYS cpu_to_le32(0xC0000290)
-#define STATUS_FILE_NOT_ENCRYPTED cpu_to_le32(0xC0000291)
-#define STATUS_NOT_EXPORT_FORMAT cpu_to_le32(0xC0000292)
-#define STATUS_FILE_ENCRYPTED cpu_to_le32(0xC0000293)
-#define STATUS_WMI_GUID_NOT_FOUND cpu_to_le32(0xC0000295)
-#define STATUS_WMI_INSTANCE_NOT_FOUND cpu_to_le32(0xC0000296)
-#define STATUS_WMI_ITEMID_NOT_FOUND cpu_to_le32(0xC0000297)
-#define STATUS_WMI_TRY_AGAIN cpu_to_le32(0xC0000298)
-#define STATUS_SHARED_POLICY cpu_to_le32(0xC0000299)
-#define STATUS_POLICY_OBJECT_NOT_FOUND cpu_to_le32(0xC000029A)
-#define STATUS_POLICY_ONLY_IN_DS cpu_to_le32(0xC000029B)
-#define STATUS_VOLUME_NOT_UPGRADED cpu_to_le32(0xC000029C)
-#define STATUS_REMOTE_STORAGE_NOT_ACTIVE cpu_to_le32(0xC000029D)
-#define STATUS_REMOTE_STORAGE_MEDIA_ERROR cpu_to_le32(0xC000029E)
-#define STATUS_NO_TRACKING_SERVICE cpu_to_le32(0xC000029F)
-#define STATUS_SERVER_SID_MISMATCH cpu_to_le32(0xC00002A0)
-#define STATUS_DS_NO_ATTRIBUTE_OR_VALUE cpu_to_le32(0xC00002A1)
-#define STATUS_DS_INVALID_ATTRIBUTE_SYNTAX cpu_to_le32(0xC00002A2)
-#define STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED cpu_to_le32(0xC00002A3)
-#define STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS cpu_to_le32(0xC00002A4)
-#define STATUS_DS_BUSY cpu_to_le32(0xC00002A5)
-#define STATUS_DS_UNAVAILABLE cpu_to_le32(0xC00002A6)
-#define STATUS_DS_NO_RIDS_ALLOCATED cpu_to_le32(0xC00002A7)
-#define STATUS_DS_NO_MORE_RIDS cpu_to_le32(0xC00002A8)
-#define STATUS_DS_INCORRECT_ROLE_OWNER cpu_to_le32(0xC00002A9)
-#define STATUS_DS_RIDMGR_INIT_ERROR cpu_to_le32(0xC00002AA)
-#define STATUS_DS_OBJ_CLASS_VIOLATION cpu_to_le32(0xC00002AB)
-#define STATUS_DS_CANT_ON_NON_LEAF cpu_to_le32(0xC00002AC)
-#define STATUS_DS_CANT_ON_RDN cpu_to_le32(0xC00002AD)
-#define STATUS_DS_CANT_MOD_OBJ_CLASS cpu_to_le32(0xC00002AE)
-#define STATUS_DS_CROSS_DOM_MOVE_FAILED cpu_to_le32(0xC00002AF)
-#define STATUS_DS_GC_NOT_AVAILABLE cpu_to_le32(0xC00002B0)
-#define STATUS_DIRECTORY_SERVICE_REQUIRED cpu_to_le32(0xC00002B1)
-#define STATUS_REPARSE_ATTRIBUTE_CONFLICT cpu_to_le32(0xC00002B2)
-#define STATUS_CANT_ENABLE_DENY_ONLY cpu_to_le32(0xC00002B3)
-#define STATUS_FLOAT_MULTIPLE_FAULTS cpu_to_le32(0xC00002B4)
-#define STATUS_FLOAT_MULTIPLE_TRAPS cpu_to_le32(0xC00002B5)
-#define STATUS_DEVICE_REMOVED cpu_to_le32(0xC00002B6)
-#define STATUS_JOURNAL_DELETE_IN_PROGRESS cpu_to_le32(0xC00002B7)
-#define STATUS_JOURNAL_NOT_ACTIVE cpu_to_le32(0xC00002B8)
-#define STATUS_NOINTERFACE cpu_to_le32(0xC00002B9)
-#define STATUS_DS_ADMIN_LIMIT_EXCEEDED cpu_to_le32(0xC00002C1)
-#define STATUS_DRIVER_FAILED_SLEEP cpu_to_le32(0xC00002C2)
-#define STATUS_MUTUAL_AUTHENTICATION_FAILED cpu_to_le32(0xC00002C3)
-#define STATUS_CORRUPT_SYSTEM_FILE cpu_to_le32(0xC00002C4)
-#define STATUS_DATATYPE_MISALIGNMENT_ERROR cpu_to_le32(0xC00002C5)
-#define STATUS_WMI_READ_ONLY cpu_to_le32(0xC00002C6)
-#define STATUS_WMI_SET_FAILURE cpu_to_le32(0xC00002C7)
-#define STATUS_COMMITMENT_MINIMUM cpu_to_le32(0xC00002C8)
-#define STATUS_REG_NAT_CONSUMPTION cpu_to_le32(0xC00002C9)
-#define STATUS_TRANSPORT_FULL cpu_to_le32(0xC00002CA)
-#define STATUS_DS_SAM_INIT_FAILURE cpu_to_le32(0xC00002CB)
-#define STATUS_ONLY_IF_CONNECTED cpu_to_le32(0xC00002CC)
-#define STATUS_DS_SENSITIVE_GROUP_VIOLATION cpu_to_le32(0xC00002CD)
-#define STATUS_PNP_RESTART_ENUMERATION cpu_to_le32(0xC00002CE)
-#define STATUS_JOURNAL_ENTRY_DELETED cpu_to_le32(0xC00002CF)
-#define STATUS_DS_CANT_MOD_PRIMARYGROUPID cpu_to_le32(0xC00002D0)
-#define STATUS_SYSTEM_IMAGE_BAD_SIGNATURE cpu_to_le32(0xC00002D1)
-#define STATUS_PNP_REBOOT_REQUIRED cpu_to_le32(0xC00002D2)
-#define STATUS_POWER_STATE_INVALID cpu_to_le32(0xC00002D3)
-#define STATUS_DS_INVALID_GROUP_TYPE cpu_to_le32(0xC00002D4)
-#define STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D5)
-#define STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D6)
-#define STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D7)
-#define STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC00002D8)
-#define STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D9)
-#define STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER cpu_to_le32(0xC00002DA)
-#define STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER \
- cpu_to_le32(0xC00002DB)
-#define STATUS_DS_HAVE_PRIMARY_MEMBERS cpu_to_le32(0xC00002DC)
-#define STATUS_WMI_NOT_SUPPORTED cpu_to_le32(0xC00002DD)
-#define STATUS_INSUFFICIENT_POWER cpu_to_le32(0xC00002DE)
-#define STATUS_SAM_NEED_BOOTKEY_PASSWORD cpu_to_le32(0xC00002DF)
-#define STATUS_SAM_NEED_BOOTKEY_FLOPPY cpu_to_le32(0xC00002E0)
-#define STATUS_DS_CANT_START cpu_to_le32(0xC00002E1)
-#define STATUS_DS_INIT_FAILURE cpu_to_le32(0xC00002E2)
-#define STATUS_SAM_INIT_FAILURE cpu_to_le32(0xC00002E3)
-#define STATUS_DS_GC_REQUIRED cpu_to_le32(0xC00002E4)
-#define STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY cpu_to_le32(0xC00002E5)
-#define STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS cpu_to_le32(0xC00002E6)
-#define STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED cpu_to_le32(0xC00002E7)
-#define STATUS_MULTIPLE_FAULT_VIOLATION cpu_to_le32(0xC00002E8)
-#define STATUS_CURRENT_DOMAIN_NOT_ALLOWED cpu_to_le32(0xC00002E9)
-#define STATUS_CANNOT_MAKE cpu_to_le32(0xC00002EA)
-#define STATUS_SYSTEM_SHUTDOWN cpu_to_le32(0xC00002EB)
-#define STATUS_DS_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002EC)
-#define STATUS_DS_SAM_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002ED)
-#define STATUS_UNFINISHED_CONTEXT_DELETED cpu_to_le32(0xC00002EE)
-#define STATUS_NO_TGT_REPLY cpu_to_le32(0xC00002EF)
-#define STATUS_OBJECTID_NOT_FOUND cpu_to_le32(0xC00002F0)
-#define STATUS_NO_IP_ADDRESSES cpu_to_le32(0xC00002F1)
-#define STATUS_WRONG_CREDENTIAL_HANDLE cpu_to_le32(0xC00002F2)
-#define STATUS_CRYPTO_SYSTEM_INVALID cpu_to_le32(0xC00002F3)
-#define STATUS_MAX_REFERRALS_EXCEEDED cpu_to_le32(0xC00002F4)
-#define STATUS_MUST_BE_KDC cpu_to_le32(0xC00002F5)
-#define STATUS_STRONG_CRYPTO_NOT_SUPPORTED cpu_to_le32(0xC00002F6)
-#define STATUS_TOO_MANY_PRINCIPALS cpu_to_le32(0xC00002F7)
-#define STATUS_NO_PA_DATA cpu_to_le32(0xC00002F8)
-#define STATUS_PKINIT_NAME_MISMATCH cpu_to_le32(0xC00002F9)
-#define STATUS_SMARTCARD_LOGON_REQUIRED cpu_to_le32(0xC00002FA)
-#define STATUS_KDC_INVALID_REQUEST cpu_to_le32(0xC00002FB)
-#define STATUS_KDC_UNABLE_TO_REFER cpu_to_le32(0xC00002FC)
-#define STATUS_KDC_UNKNOWN_ETYPE cpu_to_le32(0xC00002FD)
-#define STATUS_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FE)
-#define STATUS_SERVER_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FF)
-#define STATUS_NOT_SUPPORTED_ON_SBS cpu_to_le32(0xC0000300)
-#define STATUS_WMI_GUID_DISCONNECTED cpu_to_le32(0xC0000301)
-#define STATUS_WMI_ALREADY_DISABLED cpu_to_le32(0xC0000302)
-#define STATUS_WMI_ALREADY_ENABLED cpu_to_le32(0xC0000303)
-#define STATUS_MFT_TOO_FRAGMENTED cpu_to_le32(0xC0000304)
-#define STATUS_COPY_PROTECTION_FAILURE cpu_to_le32(0xC0000305)
-#define STATUS_CSS_AUTHENTICATION_FAILURE cpu_to_le32(0xC0000306)
-#define STATUS_CSS_KEY_NOT_PRESENT cpu_to_le32(0xC0000307)
-#define STATUS_CSS_KEY_NOT_ESTABLISHED cpu_to_le32(0xC0000308)
-#define STATUS_CSS_SCRAMBLED_SECTOR cpu_to_le32(0xC0000309)
-#define STATUS_CSS_REGION_MISMATCH cpu_to_le32(0xC000030A)
-#define STATUS_CSS_RESETS_EXHAUSTED cpu_to_le32(0xC000030B)
-#define STATUS_PKINIT_FAILURE cpu_to_le32(0xC0000320)
-#define STATUS_SMARTCARD_SUBSYSTEM_FAILURE cpu_to_le32(0xC0000321)
-#define STATUS_NO_KERB_KEY cpu_to_le32(0xC0000322)
-#define STATUS_HOST_DOWN cpu_to_le32(0xC0000350)
-#define STATUS_UNSUPPORTED_PREAUTH cpu_to_le32(0xC0000351)
-#define STATUS_EFS_ALG_BLOB_TOO_BIG cpu_to_le32(0xC0000352)
-#define STATUS_PORT_NOT_SET cpu_to_le32(0xC0000353)
-#define STATUS_DEBUGGER_INACTIVE cpu_to_le32(0xC0000354)
-#define STATUS_DS_VERSION_CHECK_FAILURE cpu_to_le32(0xC0000355)
-#define STATUS_AUDITING_DISABLED cpu_to_le32(0xC0000356)
-#define STATUS_PRENT4_MACHINE_ACCOUNT cpu_to_le32(0xC0000357)
-#define STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC0000358)
-#define STATUS_INVALID_IMAGE_WIN_32 cpu_to_le32(0xC0000359)
-#define STATUS_INVALID_IMAGE_WIN_64 cpu_to_le32(0xC000035A)
-#define STATUS_BAD_BINDINGS cpu_to_le32(0xC000035B)
-#define STATUS_NETWORK_SESSION_EXPIRED cpu_to_le32(0xC000035C)
-#define STATUS_APPHELP_BLOCK cpu_to_le32(0xC000035D)
-#define STATUS_ALL_SIDS_FILTERED cpu_to_le32(0xC000035E)
-#define STATUS_NOT_SAFE_MODE_DRIVER cpu_to_le32(0xC000035F)
-#define STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT cpu_to_le32(0xC0000361)
-#define STATUS_ACCESS_DISABLED_BY_POLICY_PATH cpu_to_le32(0xC0000362)
-#define STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER cpu_to_le32(0xC0000363)
-#define STATUS_ACCESS_DISABLED_BY_POLICY_OTHER cpu_to_le32(0xC0000364)
-#define STATUS_FAILED_DRIVER_ENTRY cpu_to_le32(0xC0000365)
-#define STATUS_DEVICE_ENUMERATION_ERROR cpu_to_le32(0xC0000366)
-#define STATUS_MOUNT_POINT_NOT_RESOLVED cpu_to_le32(0xC0000368)
-#define STATUS_INVALID_DEVICE_OBJECT_PARAMETER cpu_to_le32(0xC0000369)
-#define STATUS_MCA_OCCURRED cpu_to_le32(0xC000036A)
-#define STATUS_DRIVER_BLOCKED_CRITICAL cpu_to_le32(0xC000036B)
-#define STATUS_DRIVER_BLOCKED cpu_to_le32(0xC000036C)
-#define STATUS_DRIVER_DATABASE_ERROR cpu_to_le32(0xC000036D)
-#define STATUS_SYSTEM_HIVE_TOO_LARGE cpu_to_le32(0xC000036E)
-#define STATUS_INVALID_IMPORT_OF_NON_DLL cpu_to_le32(0xC000036F)
-#define STATUS_NO_SECRETS cpu_to_le32(0xC0000371)
-#define STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY cpu_to_le32(0xC0000372)
-#define STATUS_FAILED_STACK_SWITCH cpu_to_le32(0xC0000373)
-#define STATUS_HEAP_CORRUPTION cpu_to_le32(0xC0000374)
-#define STATUS_SMARTCARD_WRONG_PIN cpu_to_le32(0xC0000380)
-#define STATUS_SMARTCARD_CARD_BLOCKED cpu_to_le32(0xC0000381)
-#define STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED cpu_to_le32(0xC0000382)
-#define STATUS_SMARTCARD_NO_CARD cpu_to_le32(0xC0000383)
-#define STATUS_SMARTCARD_NO_KEY_CONTAINER cpu_to_le32(0xC0000384)
-#define STATUS_SMARTCARD_NO_CERTIFICATE cpu_to_le32(0xC0000385)
-#define STATUS_SMARTCARD_NO_KEYSET cpu_to_le32(0xC0000386)
-#define STATUS_SMARTCARD_IO_ERROR cpu_to_le32(0xC0000387)
-#define STATUS_DOWNGRADE_DETECTED cpu_to_le32(0xC0000388)
-#define STATUS_SMARTCARD_CERT_REVOKED cpu_to_le32(0xC0000389)
-#define STATUS_ISSUING_CA_UNTRUSTED cpu_to_le32(0xC000038A)
-#define STATUS_REVOCATION_OFFLINE_C cpu_to_le32(0xC000038B)
-#define STATUS_PKINIT_CLIENT_FAILURE cpu_to_le32(0xC000038C)
-#define STATUS_SMARTCARD_CERT_EXPIRED cpu_to_le32(0xC000038D)
-#define STATUS_DRIVER_FAILED_PRIOR_UNLOAD cpu_to_le32(0xC000038E)
-#define STATUS_SMARTCARD_SILENT_CONTEXT cpu_to_le32(0xC000038F)
-#define STATUS_PER_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000401)
-#define STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000402)
-#define STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000403)
-#define STATUS_DS_NAME_NOT_UNIQUE cpu_to_le32(0xC0000404)
-#define STATUS_DS_DUPLICATE_ID_FOUND cpu_to_le32(0xC0000405)
-#define STATUS_DS_GROUP_CONVERSION_ERROR cpu_to_le32(0xC0000406)
-#define STATUS_VOLSNAP_PREPARE_HIBERNATE cpu_to_le32(0xC0000407)
-#define STATUS_USER2USER_REQUIRED cpu_to_le32(0xC0000408)
-#define STATUS_STACK_BUFFER_OVERRUN cpu_to_le32(0xC0000409)
-#define STATUS_NO_S4U_PROT_SUPPORT cpu_to_le32(0xC000040A)
-#define STATUS_CROSSREALM_DELEGATION_FAILURE cpu_to_le32(0xC000040B)
-#define STATUS_REVOCATION_OFFLINE_KDC cpu_to_le32(0xC000040C)
-#define STATUS_ISSUING_CA_UNTRUSTED_KDC cpu_to_le32(0xC000040D)
-#define STATUS_KDC_CERT_EXPIRED cpu_to_le32(0xC000040E)
-#define STATUS_KDC_CERT_REVOKED cpu_to_le32(0xC000040F)
-#define STATUS_PARAMETER_QUOTA_EXCEEDED cpu_to_le32(0xC0000410)
-#define STATUS_HIBERNATION_FAILURE cpu_to_le32(0xC0000411)
-#define STATUS_DELAY_LOAD_FAILED cpu_to_le32(0xC0000412)
-#define STATUS_AUTHENTICATION_FIREWALL_FAILED cpu_to_le32(0xC0000413)
-#define STATUS_VDM_DISALLOWED cpu_to_le32(0xC0000414)
-#define STATUS_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC0000415)
-#define STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE \
- cpu_to_le32(0xC0000416)
-#define STATUS_INVALID_CRUNTIME_PARAMETER cpu_to_le32(0xC0000417)
-#define STATUS_NTLM_BLOCKED cpu_to_le32(0xC0000418)
-#define STATUS_ASSERTION_FAILURE cpu_to_le32(0xC0000420)
-#define STATUS_VERIFIER_STOP cpu_to_le32(0xC0000421)
-#define STATUS_CALLBACK_POP_STACK cpu_to_le32(0xC0000423)
-#define STATUS_INCOMPATIBLE_DRIVER_BLOCKED cpu_to_le32(0xC0000424)
-#define STATUS_HIVE_UNLOADED cpu_to_le32(0xC0000425)
-#define STATUS_COMPRESSION_DISABLED cpu_to_le32(0xC0000426)
-#define STATUS_FILE_SYSTEM_LIMITATION cpu_to_le32(0xC0000427)
-#define STATUS_INVALID_IMAGE_HASH cpu_to_le32(0xC0000428)
-#define STATUS_NOT_CAPABLE cpu_to_le32(0xC0000429)
-#define STATUS_REQUEST_OUT_OF_SEQUENCE cpu_to_le32(0xC000042A)
-#define STATUS_IMPLEMENTATION_LIMIT cpu_to_le32(0xC000042B)
-#define STATUS_ELEVATION_REQUIRED cpu_to_le32(0xC000042C)
-#define STATUS_BEYOND_VDL cpu_to_le32(0xC0000432)
-#define STATUS_ENCOUNTERED_WRITE_IN_PROGRESS cpu_to_le32(0xC0000433)
-#define STATUS_PTE_CHANGED cpu_to_le32(0xC0000434)
-#define STATUS_PURGE_FAILED cpu_to_le32(0xC0000435)
-#define STATUS_CRED_REQUIRES_CONFIRMATION cpu_to_le32(0xC0000440)
-#define STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE cpu_to_le32(0xC0000441)
-#define STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER cpu_to_le32(0xC0000442)
-#define STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE cpu_to_le32(0xC0000443)
-#define STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE cpu_to_le32(0xC0000444)
-#define STATUS_CS_ENCRYPTION_FILE_NOT_CSE cpu_to_le32(0xC0000445)
-#define STATUS_INVALID_LABEL cpu_to_le32(0xC0000446)
-#define STATUS_DRIVER_PROCESS_TERMINATED cpu_to_le32(0xC0000450)
-#define STATUS_AMBIGUOUS_SYSTEM_DEVICE cpu_to_le32(0xC0000451)
-#define STATUS_SYSTEM_DEVICE_NOT_FOUND cpu_to_le32(0xC0000452)
-#define STATUS_RESTART_BOOT_APPLICATION cpu_to_le32(0xC0000453)
-#define STATUS_INVALID_TASK_NAME cpu_to_le32(0xC0000500)
-#define STATUS_INVALID_TASK_INDEX cpu_to_le32(0xC0000501)
-#define STATUS_THREAD_ALREADY_IN_TASK cpu_to_le32(0xC0000502)
-#define STATUS_CALLBACK_BYPASS cpu_to_le32(0xC0000503)
-#define STATUS_PORT_CLOSED cpu_to_le32(0xC0000700)
-#define STATUS_MESSAGE_LOST cpu_to_le32(0xC0000701)
-#define STATUS_INVALID_MESSAGE cpu_to_le32(0xC0000702)
-#define STATUS_REQUEST_CANCELED cpu_to_le32(0xC0000703)
-#define STATUS_RECURSIVE_DISPATCH cpu_to_le32(0xC0000704)
-#define STATUS_LPC_RECEIVE_BUFFER_EXPECTED cpu_to_le32(0xC0000705)
-#define STATUS_LPC_INVALID_CONNECTION_USAGE cpu_to_le32(0xC0000706)
-#define STATUS_LPC_REQUESTS_NOT_ALLOWED cpu_to_le32(0xC0000707)
-#define STATUS_RESOURCE_IN_USE cpu_to_le32(0xC0000708)
-#define STATUS_HARDWARE_MEMORY_ERROR cpu_to_le32(0xC0000709)
-#define STATUS_THREADPOOL_HANDLE_EXCEPTION cpu_to_le32(0xC000070A)
-#define STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED cpu_to_le32(0xC000070B)
-#define STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED \
- cpu_to_le32(0xC000070C)
-#define STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED \
- cpu_to_le32(0xC000070D)
-#define STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED \
- cpu_to_le32(0xC000070E)
-#define STATUS_THREADPOOL_RELEASED_DURING_OPERATION cpu_to_le32(0xC000070F)
-#define STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000710)
-#define STATUS_APC_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000711)
-#define STATUS_PROCESS_IS_PROTECTED cpu_to_le32(0xC0000712)
-#define STATUS_MCA_EXCEPTION cpu_to_le32(0xC0000713)
-#define STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE cpu_to_le32(0xC0000714)
-#define STATUS_SYMLINK_CLASS_DISABLED cpu_to_le32(0xC0000715)
-#define STATUS_INVALID_IDN_NORMALIZATION cpu_to_le32(0xC0000716)
-#define STATUS_NO_UNICODE_TRANSLATION cpu_to_le32(0xC0000717)
-#define STATUS_ALREADY_REGISTERED cpu_to_le32(0xC0000718)
-#define STATUS_CONTEXT_MISMATCH cpu_to_le32(0xC0000719)
-#define STATUS_PORT_ALREADY_HAS_COMPLETION_LIST cpu_to_le32(0xC000071A)
-#define STATUS_CALLBACK_RETURNED_THREAD_PRIORITY cpu_to_le32(0xC000071B)
-#define STATUS_INVALID_THREAD cpu_to_le32(0xC000071C)
-#define STATUS_CALLBACK_RETURNED_TRANSACTION cpu_to_le32(0xC000071D)
-#define STATUS_CALLBACK_RETURNED_LDR_LOCK cpu_to_le32(0xC000071E)
-#define STATUS_CALLBACK_RETURNED_LANG cpu_to_le32(0xC000071F)
-#define STATUS_CALLBACK_RETURNED_PRI_BACK cpu_to_le32(0xC0000720)
-#define STATUS_CALLBACK_RETURNED_THREAD_AFFINITY cpu_to_le32(0xC0000721)
-#define STATUS_DISK_REPAIR_DISABLED cpu_to_le32(0xC0000800)
-#define STATUS_DS_DOMAIN_RENAME_IN_PROGRESS cpu_to_le32(0xC0000801)
-#define STATUS_DISK_QUOTA_EXCEEDED cpu_to_le32(0xC0000802)
-#define STATUS_CONTENT_BLOCKED cpu_to_le32(0xC0000804)
-#define STATUS_BAD_CLUSTERS cpu_to_le32(0xC0000805)
-#define STATUS_VOLUME_DIRTY cpu_to_le32(0xC0000806)
-#define STATUS_FILE_CHECKED_OUT cpu_to_le32(0xC0000901)
-#define STATUS_CHECKOUT_REQUIRED cpu_to_le32(0xC0000902)
-#define STATUS_BAD_FILE_TYPE cpu_to_le32(0xC0000903)
-#define STATUS_FILE_TOO_LARGE cpu_to_le32(0xC0000904)
-#define STATUS_FORMS_AUTH_REQUIRED cpu_to_le32(0xC0000905)
-#define STATUS_VIRUS_INFECTED cpu_to_le32(0xC0000906)
-#define STATUS_VIRUS_DELETED cpu_to_le32(0xC0000907)
-#define STATUS_BAD_MCFG_TABLE cpu_to_le32(0xC0000908)
-#define STATUS_WOW_ASSERTION cpu_to_le32(0xC0009898)
-#define STATUS_INVALID_SIGNATURE cpu_to_le32(0xC000A000)
-#define STATUS_HMAC_NOT_SUPPORTED cpu_to_le32(0xC000A001)
-#define STATUS_IPSEC_QUEUE_OVERFLOW cpu_to_le32(0xC000A010)
-#define STATUS_ND_QUEUE_OVERFLOW cpu_to_le32(0xC000A011)
-#define STATUS_HOPLIMIT_EXCEEDED cpu_to_le32(0xC000A012)
-#define STATUS_PROTOCOL_NOT_SUPPORTED cpu_to_le32(0xC000A013)
-#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED \
- cpu_to_le32(0xC000A080)
-#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR \
- cpu_to_le32(0xC000A081)
-#define STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR cpu_to_le32(0xC000A082)
-#define STATUS_XML_PARSE_ERROR cpu_to_le32(0xC000A083)
-#define STATUS_XMLDSIG_ERROR cpu_to_le32(0xC000A084)
-#define STATUS_WRONG_COMPARTMENT cpu_to_le32(0xC000A085)
-#define STATUS_AUTHIP_FAILURE cpu_to_le32(0xC000A086)
-#define DBG_NO_STATE_CHANGE cpu_to_le32(0xC0010001)
-#define DBG_APP_NOT_IDLE cpu_to_le32(0xC0010002)
-#define RPC_NT_INVALID_STRING_BINDING cpu_to_le32(0xC0020001)
-#define RPC_NT_WRONG_KIND_OF_BINDING cpu_to_le32(0xC0020002)
-#define RPC_NT_INVALID_BINDING cpu_to_le32(0xC0020003)
-#define RPC_NT_PROTSEQ_NOT_SUPPORTED cpu_to_le32(0xC0020004)
-#define RPC_NT_INVALID_RPC_PROTSEQ cpu_to_le32(0xC0020005)
-#define RPC_NT_INVALID_STRING_UUID cpu_to_le32(0xC0020006)
-#define RPC_NT_INVALID_ENDPOINT_FORMAT cpu_to_le32(0xC0020007)
-#define RPC_NT_INVALID_NET_ADDR cpu_to_le32(0xC0020008)
-#define RPC_NT_NO_ENDPOINT_FOUND cpu_to_le32(0xC0020009)
-#define RPC_NT_INVALID_TIMEOUT cpu_to_le32(0xC002000A)
-#define RPC_NT_OBJECT_NOT_FOUND cpu_to_le32(0xC002000B)
-#define RPC_NT_ALREADY_REGISTERED cpu_to_le32(0xC002000C)
-#define RPC_NT_TYPE_ALREADY_REGISTERED cpu_to_le32(0xC002000D)
-#define RPC_NT_ALREADY_LISTENING cpu_to_le32(0xC002000E)
-#define RPC_NT_NO_PROTSEQS_REGISTERED cpu_to_le32(0xC002000F)
-#define RPC_NT_NOT_LISTENING cpu_to_le32(0xC0020010)
-#define RPC_NT_UNKNOWN_MGR_TYPE cpu_to_le32(0xC0020011)
-#define RPC_NT_UNKNOWN_IF cpu_to_le32(0xC0020012)
-#define RPC_NT_NO_BINDINGS cpu_to_le32(0xC0020013)
-#define RPC_NT_NO_PROTSEQS cpu_to_le32(0xC0020014)
-#define RPC_NT_CANT_CREATE_ENDPOINT cpu_to_le32(0xC0020015)
-#define RPC_NT_OUT_OF_RESOURCES cpu_to_le32(0xC0020016)
-#define RPC_NT_SERVER_UNAVAILABLE cpu_to_le32(0xC0020017)
-#define RPC_NT_SERVER_TOO_BUSY cpu_to_le32(0xC0020018)
-#define RPC_NT_INVALID_NETWORK_OPTIONS cpu_to_le32(0xC0020019)
-#define RPC_NT_NO_CALL_ACTIVE cpu_to_le32(0xC002001A)
-#define RPC_NT_CALL_FAILED cpu_to_le32(0xC002001B)
-#define RPC_NT_CALL_FAILED_DNE cpu_to_le32(0xC002001C)
-#define RPC_NT_PROTOCOL_ERROR cpu_to_le32(0xC002001D)
-#define RPC_NT_UNSUPPORTED_TRANS_SYN cpu_to_le32(0xC002001F)
-#define RPC_NT_UNSUPPORTED_TYPE cpu_to_le32(0xC0020021)
-#define RPC_NT_INVALID_TAG cpu_to_le32(0xC0020022)
-#define RPC_NT_INVALID_BOUND cpu_to_le32(0xC0020023)
-#define RPC_NT_NO_ENTRY_NAME cpu_to_le32(0xC0020024)
-#define RPC_NT_INVALID_NAME_SYNTAX cpu_to_le32(0xC0020025)
-#define RPC_NT_UNSUPPORTED_NAME_SYNTAX cpu_to_le32(0xC0020026)
-#define RPC_NT_UUID_NO_ADDRESS cpu_to_le32(0xC0020028)
-#define RPC_NT_DUPLICATE_ENDPOINT cpu_to_le32(0xC0020029)
-#define RPC_NT_UNKNOWN_AUTHN_TYPE cpu_to_le32(0xC002002A)
-#define RPC_NT_MAX_CALLS_TOO_SMALL cpu_to_le32(0xC002002B)
-#define RPC_NT_STRING_TOO_LONG cpu_to_le32(0xC002002C)
-#define RPC_NT_PROTSEQ_NOT_FOUND cpu_to_le32(0xC002002D)
-#define RPC_NT_PROCNUM_OUT_OF_RANGE cpu_to_le32(0xC002002E)
-#define RPC_NT_BINDING_HAS_NO_AUTH cpu_to_le32(0xC002002F)
-#define RPC_NT_UNKNOWN_AUTHN_SERVICE cpu_to_le32(0xC0020030)
-#define RPC_NT_UNKNOWN_AUTHN_LEVEL cpu_to_le32(0xC0020031)
-#define RPC_NT_INVALID_AUTH_IDENTITY cpu_to_le32(0xC0020032)
-#define RPC_NT_UNKNOWN_AUTHZ_SERVICE cpu_to_le32(0xC0020033)
-#define EPT_NT_INVALID_ENTRY cpu_to_le32(0xC0020034)
-#define EPT_NT_CANT_PERFORM_OP cpu_to_le32(0xC0020035)
-#define EPT_NT_NOT_REGISTERED cpu_to_le32(0xC0020036)
-#define RPC_NT_NOTHING_TO_EXPORT cpu_to_le32(0xC0020037)
-#define RPC_NT_INCOMPLETE_NAME cpu_to_le32(0xC0020038)
-#define RPC_NT_INVALID_VERS_OPTION cpu_to_le32(0xC0020039)
-#define RPC_NT_NO_MORE_MEMBERS cpu_to_le32(0xC002003A)
-#define RPC_NT_NOT_ALL_OBJS_UNEXPORTED cpu_to_le32(0xC002003B)
-#define RPC_NT_INTERFACE_NOT_FOUND cpu_to_le32(0xC002003C)
-#define RPC_NT_ENTRY_ALREADY_EXISTS cpu_to_le32(0xC002003D)
-#define RPC_NT_ENTRY_NOT_FOUND cpu_to_le32(0xC002003E)
-#define RPC_NT_NAME_SERVICE_UNAVAILABLE cpu_to_le32(0xC002003F)
-#define RPC_NT_INVALID_NAF_ID cpu_to_le32(0xC0020040)
-#define RPC_NT_CANNOT_SUPPORT cpu_to_le32(0xC0020041)
-#define RPC_NT_NO_CONTEXT_AVAILABLE cpu_to_le32(0xC0020042)
-#define RPC_NT_INTERNAL_ERROR cpu_to_le32(0xC0020043)
-#define RPC_NT_ZERO_DIVIDE cpu_to_le32(0xC0020044)
-#define RPC_NT_ADDRESS_ERROR cpu_to_le32(0xC0020045)
-#define RPC_NT_FP_DIV_ZERO cpu_to_le32(0xC0020046)
-#define RPC_NT_FP_UNDERFLOW cpu_to_le32(0xC0020047)
-#define RPC_NT_FP_OVERFLOW cpu_to_le32(0xC0020048)
-#define RPC_NT_CALL_IN_PROGRESS cpu_to_le32(0xC0020049)
-#define RPC_NT_NO_MORE_BINDINGS cpu_to_le32(0xC002004A)
-#define RPC_NT_GROUP_MEMBER_NOT_FOUND cpu_to_le32(0xC002004B)
-#define EPT_NT_CANT_CREATE cpu_to_le32(0xC002004C)
-#define RPC_NT_INVALID_OBJECT cpu_to_le32(0xC002004D)
-#define RPC_NT_NO_INTERFACES cpu_to_le32(0xC002004F)
-#define RPC_NT_CALL_CANCELLED cpu_to_le32(0xC0020050)
-#define RPC_NT_BINDING_INCOMPLETE cpu_to_le32(0xC0020051)
-#define RPC_NT_COMM_FAILURE cpu_to_le32(0xC0020052)
-#define RPC_NT_UNSUPPORTED_AUTHN_LEVEL cpu_to_le32(0xC0020053)
-#define RPC_NT_NO_PRINC_NAME cpu_to_le32(0xC0020054)
-#define RPC_NT_NOT_RPC_ERROR cpu_to_le32(0xC0020055)
-#define RPC_NT_SEC_PKG_ERROR cpu_to_le32(0xC0020057)
-#define RPC_NT_NOT_CANCELLED cpu_to_le32(0xC0020058)
-#define RPC_NT_INVALID_ASYNC_HANDLE cpu_to_le32(0xC0020062)
-#define RPC_NT_INVALID_ASYNC_CALL cpu_to_le32(0xC0020063)
-#define RPC_NT_PROXY_ACCESS_DENIED cpu_to_le32(0xC0020064)
-#define RPC_NT_NO_MORE_ENTRIES cpu_to_le32(0xC0030001)
-#define RPC_NT_SS_CHAR_TRANS_OPEN_FAIL cpu_to_le32(0xC0030002)
-#define RPC_NT_SS_CHAR_TRANS_SHORT_FILE cpu_to_le32(0xC0030003)
-#define RPC_NT_SS_IN_NULL_CONTEXT cpu_to_le32(0xC0030004)
-#define RPC_NT_SS_CONTEXT_MISMATCH cpu_to_le32(0xC0030005)
-#define RPC_NT_SS_CONTEXT_DAMAGED cpu_to_le32(0xC0030006)
-#define RPC_NT_SS_HANDLES_MISMATCH cpu_to_le32(0xC0030007)
-#define RPC_NT_SS_CANNOT_GET_CALL_HANDLE cpu_to_le32(0xC0030008)
-#define RPC_NT_NULL_REF_POINTER cpu_to_le32(0xC0030009)
-#define RPC_NT_ENUM_VALUE_OUT_OF_RANGE cpu_to_le32(0xC003000A)
-#define RPC_NT_BYTE_COUNT_TOO_SMALL cpu_to_le32(0xC003000B)
-#define RPC_NT_BAD_STUB_DATA cpu_to_le32(0xC003000C)
-#define RPC_NT_INVALID_ES_ACTION cpu_to_le32(0xC0030059)
-#define RPC_NT_WRONG_ES_VERSION cpu_to_le32(0xC003005A)
-#define RPC_NT_WRONG_STUB_VERSION cpu_to_le32(0xC003005B)
-#define RPC_NT_INVALID_PIPE_OBJECT cpu_to_le32(0xC003005C)
-#define RPC_NT_INVALID_PIPE_OPERATION cpu_to_le32(0xC003005D)
-#define RPC_NT_WRONG_PIPE_VERSION cpu_to_le32(0xC003005E)
-#define RPC_NT_PIPE_CLOSED cpu_to_le32(0xC003005F)
-#define RPC_NT_PIPE_DISCIPLINE_ERROR cpu_to_le32(0xC0030060)
-#define RPC_NT_PIPE_EMPTY cpu_to_le32(0xC0030061)
-#define STATUS_PNP_BAD_MPS_TABLE cpu_to_le32(0xC0040035)
-#define STATUS_PNP_TRANSLATION_FAILED cpu_to_le32(0xC0040036)
-#define STATUS_PNP_IRQ_TRANSLATION_FAILED cpu_to_le32(0xC0040037)
-#define STATUS_PNP_INVALID_ID cpu_to_le32(0xC0040038)
-#define STATUS_IO_REISSUE_AS_CACHED cpu_to_le32(0xC0040039)
-#define STATUS_CTX_WINSTATION_NAME_INVALID cpu_to_le32(0xC00A0001)
-#define STATUS_CTX_INVALID_PD cpu_to_le32(0xC00A0002)
-#define STATUS_CTX_PD_NOT_FOUND cpu_to_le32(0xC00A0003)
-#define STATUS_CTX_CLOSE_PENDING cpu_to_le32(0xC00A0006)
-#define STATUS_CTX_NO_OUTBUF cpu_to_le32(0xC00A0007)
-#define STATUS_CTX_MODEM_INF_NOT_FOUND cpu_to_le32(0xC00A0008)
-#define STATUS_CTX_INVALID_MODEMNAME cpu_to_le32(0xC00A0009)
-#define STATUS_CTX_RESPONSE_ERROR cpu_to_le32(0xC00A000A)
-#define STATUS_CTX_MODEM_RESPONSE_TIMEOUT cpu_to_le32(0xC00A000B)
-#define STATUS_CTX_MODEM_RESPONSE_NO_CARRIER cpu_to_le32(0xC00A000C)
-#define STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE cpu_to_le32(0xC00A000D)
-#define STATUS_CTX_MODEM_RESPONSE_BUSY cpu_to_le32(0xC00A000E)
-#define STATUS_CTX_MODEM_RESPONSE_VOICE cpu_to_le32(0xC00A000F)
-#define STATUS_CTX_TD_ERROR cpu_to_le32(0xC00A0010)
-#define STATUS_CTX_LICENSE_CLIENT_INVALID cpu_to_le32(0xC00A0012)
-#define STATUS_CTX_LICENSE_NOT_AVAILABLE cpu_to_le32(0xC00A0013)
-#define STATUS_CTX_LICENSE_EXPIRED cpu_to_le32(0xC00A0014)
-#define STATUS_CTX_WINSTATION_NOT_FOUND cpu_to_le32(0xC00A0015)
-#define STATUS_CTX_WINSTATION_NAME_COLLISION cpu_to_le32(0xC00A0016)
-#define STATUS_CTX_WINSTATION_BUSY cpu_to_le32(0xC00A0017)
-#define STATUS_CTX_BAD_VIDEO_MODE cpu_to_le32(0xC00A0018)
-#define STATUS_CTX_GRAPHICS_INVALID cpu_to_le32(0xC00A0022)
-#define STATUS_CTX_NOT_CONSOLE cpu_to_le32(0xC00A0024)
-#define STATUS_CTX_CLIENT_QUERY_TIMEOUT cpu_to_le32(0xC00A0026)
-#define STATUS_CTX_CONSOLE_DISCONNECT cpu_to_le32(0xC00A0027)
-#define STATUS_CTX_CONSOLE_CONNECT cpu_to_le32(0xC00A0028)
-#define STATUS_CTX_SHADOW_DENIED cpu_to_le32(0xC00A002A)
-#define STATUS_CTX_WINSTATION_ACCESS_DENIED cpu_to_le32(0xC00A002B)
-#define STATUS_CTX_INVALID_WD cpu_to_le32(0xC00A002E)
-#define STATUS_CTX_WD_NOT_FOUND cpu_to_le32(0xC00A002F)
-#define STATUS_CTX_SHADOW_INVALID cpu_to_le32(0xC00A0030)
-#define STATUS_CTX_SHADOW_DISABLED cpu_to_le32(0xC00A0031)
-#define STATUS_RDP_PROTOCOL_ERROR cpu_to_le32(0xC00A0032)
-#define STATUS_CTX_CLIENT_LICENSE_NOT_SET cpu_to_le32(0xC00A0033)
-#define STATUS_CTX_CLIENT_LICENSE_IN_USE cpu_to_le32(0xC00A0034)
-#define STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE cpu_to_le32(0xC00A0035)
-#define STATUS_CTX_SHADOW_NOT_RUNNING cpu_to_le32(0xC00A0036)
-#define STATUS_CTX_LOGON_DISABLED cpu_to_le32(0xC00A0037)
-#define STATUS_CTX_SECURITY_LAYER_ERROR cpu_to_le32(0xC00A0038)
-#define STATUS_TS_INCOMPATIBLE_SESSIONS cpu_to_le32(0xC00A0039)
-#define STATUS_MUI_FILE_NOT_FOUND cpu_to_le32(0xC00B0001)
-#define STATUS_MUI_INVALID_FILE cpu_to_le32(0xC00B0002)
-#define STATUS_MUI_INVALID_RC_CONFIG cpu_to_le32(0xC00B0003)
-#define STATUS_MUI_INVALID_LOCALE_NAME cpu_to_le32(0xC00B0004)
-#define STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME cpu_to_le32(0xC00B0005)
-#define STATUS_MUI_FILE_NOT_LOADED cpu_to_le32(0xC00B0006)
-#define STATUS_RESOURCE_ENUM_USER_STOP cpu_to_le32(0xC00B0007)
-#define STATUS_CLUSTER_INVALID_NODE cpu_to_le32(0xC0130001)
-#define STATUS_CLUSTER_NODE_EXISTS cpu_to_le32(0xC0130002)
-#define STATUS_CLUSTER_JOIN_IN_PROGRESS cpu_to_le32(0xC0130003)
-#define STATUS_CLUSTER_NODE_NOT_FOUND cpu_to_le32(0xC0130004)
-#define STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND cpu_to_le32(0xC0130005)
-#define STATUS_CLUSTER_NETWORK_EXISTS cpu_to_le32(0xC0130006)
-#define STATUS_CLUSTER_NETWORK_NOT_FOUND cpu_to_le32(0xC0130007)
-#define STATUS_CLUSTER_NETINTERFACE_EXISTS cpu_to_le32(0xC0130008)
-#define STATUS_CLUSTER_NETINTERFACE_NOT_FOUND cpu_to_le32(0xC0130009)
-#define STATUS_CLUSTER_INVALID_REQUEST cpu_to_le32(0xC013000A)
-#define STATUS_CLUSTER_INVALID_NETWORK_PROVIDER cpu_to_le32(0xC013000B)
-#define STATUS_CLUSTER_NODE_DOWN cpu_to_le32(0xC013000C)
-#define STATUS_CLUSTER_NODE_UNREACHABLE cpu_to_le32(0xC013000D)
-#define STATUS_CLUSTER_NODE_NOT_MEMBER cpu_to_le32(0xC013000E)
-#define STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS cpu_to_le32(0xC013000F)
-#define STATUS_CLUSTER_INVALID_NETWORK cpu_to_le32(0xC0130010)
-#define STATUS_CLUSTER_NO_NET_ADAPTERS cpu_to_le32(0xC0130011)
-#define STATUS_CLUSTER_NODE_UP cpu_to_le32(0xC0130012)
-#define STATUS_CLUSTER_NODE_PAUSED cpu_to_le32(0xC0130013)
-#define STATUS_CLUSTER_NODE_NOT_PAUSED cpu_to_le32(0xC0130014)
-#define STATUS_CLUSTER_NO_SECURITY_CONTEXT cpu_to_le32(0xC0130015)
-#define STATUS_CLUSTER_NETWORK_NOT_INTERNAL cpu_to_le32(0xC0130016)
-#define STATUS_CLUSTER_POISONED cpu_to_le32(0xC0130017)
-#define STATUS_ACPI_INVALID_OPCODE cpu_to_le32(0xC0140001)
-#define STATUS_ACPI_STACK_OVERFLOW cpu_to_le32(0xC0140002)
-#define STATUS_ACPI_ASSERT_FAILED cpu_to_le32(0xC0140003)
-#define STATUS_ACPI_INVALID_INDEX cpu_to_le32(0xC0140004)
-#define STATUS_ACPI_INVALID_ARGUMENT cpu_to_le32(0xC0140005)
-#define STATUS_ACPI_FATAL cpu_to_le32(0xC0140006)
-#define STATUS_ACPI_INVALID_SUPERNAME cpu_to_le32(0xC0140007)
-#define STATUS_ACPI_INVALID_ARGTYPE cpu_to_le32(0xC0140008)
-#define STATUS_ACPI_INVALID_OBJTYPE cpu_to_le32(0xC0140009)
-#define STATUS_ACPI_INVALID_TARGETTYPE cpu_to_le32(0xC014000A)
-#define STATUS_ACPI_INCORRECT_ARGUMENT_COUNT cpu_to_le32(0xC014000B)
-#define STATUS_ACPI_ADDRESS_NOT_MAPPED cpu_to_le32(0xC014000C)
-#define STATUS_ACPI_INVALID_EVENTTYPE cpu_to_le32(0xC014000D)
-#define STATUS_ACPI_HANDLER_COLLISION cpu_to_le32(0xC014000E)
-#define STATUS_ACPI_INVALID_DATA cpu_to_le32(0xC014000F)
-#define STATUS_ACPI_INVALID_REGION cpu_to_le32(0xC0140010)
-#define STATUS_ACPI_INVALID_ACCESS_SIZE cpu_to_le32(0xC0140011)
-#define STATUS_ACPI_ACQUIRE_GLOBAL_LOCK cpu_to_le32(0xC0140012)
-#define STATUS_ACPI_ALREADY_INITIALIZED cpu_to_le32(0xC0140013)
-#define STATUS_ACPI_NOT_INITIALIZED cpu_to_le32(0xC0140014)
-#define STATUS_ACPI_INVALID_MUTEX_LEVEL cpu_to_le32(0xC0140015)
-#define STATUS_ACPI_MUTEX_NOT_OWNED cpu_to_le32(0xC0140016)
-#define STATUS_ACPI_MUTEX_NOT_OWNER cpu_to_le32(0xC0140017)
-#define STATUS_ACPI_RS_ACCESS cpu_to_le32(0xC0140018)
-#define STATUS_ACPI_INVALID_TABLE cpu_to_le32(0xC0140019)
-#define STATUS_ACPI_REG_HANDLER_FAILED cpu_to_le32(0xC0140020)
-#define STATUS_ACPI_POWER_REQUEST_FAILED cpu_to_le32(0xC0140021)
-#define STATUS_SXS_SECTION_NOT_FOUND cpu_to_le32(0xC0150001)
-#define STATUS_SXS_CANT_GEN_ACTCTX cpu_to_le32(0xC0150002)
-#define STATUS_SXS_INVALID_ACTCTXDATA_FORMAT cpu_to_le32(0xC0150003)
-#define STATUS_SXS_ASSEMBLY_NOT_FOUND cpu_to_le32(0xC0150004)
-#define STATUS_SXS_MANIFEST_FORMAT_ERROR cpu_to_le32(0xC0150005)
-#define STATUS_SXS_MANIFEST_PARSE_ERROR cpu_to_le32(0xC0150006)
-#define STATUS_SXS_ACTIVATION_CONTEXT_DISABLED cpu_to_le32(0xC0150007)
-#define STATUS_SXS_KEY_NOT_FOUND cpu_to_le32(0xC0150008)
-#define STATUS_SXS_VERSION_CONFLICT cpu_to_le32(0xC0150009)
-#define STATUS_SXS_WRONG_SECTION_TYPE cpu_to_le32(0xC015000A)
-#define STATUS_SXS_THREAD_QUERIES_DISABLED cpu_to_le32(0xC015000B)
-#define STATUS_SXS_ASSEMBLY_MISSING cpu_to_le32(0xC015000C)
-#define STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET cpu_to_le32(0xC015000E)
-#define STATUS_SXS_EARLY_DEACTIVATION cpu_to_le32(0xC015000F)
-#define STATUS_SXS_INVALID_DEACTIVATION cpu_to_le32(0xC0150010)
-#define STATUS_SXS_MULTIPLE_DEACTIVATION cpu_to_le32(0xC0150011)
-#define STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY \
- cpu_to_le32(0xC0150012)
-#define STATUS_SXS_PROCESS_TERMINATION_REQUESTED cpu_to_le32(0xC0150013)
-#define STATUS_SXS_CORRUPT_ACTIVATION_STACK cpu_to_le32(0xC0150014)
-#define STATUS_SXS_CORRUPTION cpu_to_le32(0xC0150015)
-#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE cpu_to_le32(0xC0150016)
-#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME cpu_to_le32(0xC0150017)
-#define STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE cpu_to_le32(0xC0150018)
-#define STATUS_SXS_IDENTITY_PARSE_ERROR cpu_to_le32(0xC0150019)
-#define STATUS_SXS_COMPONENT_STORE_CORRUPT cpu_to_le32(0xC015001A)
-#define STATUS_SXS_FILE_HASH_MISMATCH cpu_to_le32(0xC015001B)
-#define STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT \
- cpu_to_le32(0xC015001C)
-#define STATUS_SXS_IDENTITIES_DIFFERENT cpu_to_le32(0xC015001D)
-#define STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT cpu_to_le32(0xC015001E)
-#define STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY cpu_to_le32(0xC015001F)
-#define STATUS_ADVANCED_INSTALLER_FAILED cpu_to_le32(0xC0150020)
-#define STATUS_XML_ENCODING_MISMATCH cpu_to_le32(0xC0150021)
-#define STATUS_SXS_MANIFEST_TOO_BIG cpu_to_le32(0xC0150022)
-#define STATUS_SXS_SETTING_NOT_REGISTERED cpu_to_le32(0xC0150023)
-#define STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE cpu_to_le32(0xC0150024)
-#define STATUS_SMI_PRIMITIVE_INSTALLER_FAILED cpu_to_le32(0xC0150025)
-#define STATUS_GENERIC_COMMAND_FAILED cpu_to_le32(0xC0150026)
-#define STATUS_SXS_FILE_HASH_MISSING cpu_to_le32(0xC0150027)
-#define STATUS_TRANSACTIONAL_CONFLICT cpu_to_le32(0xC0190001)
-#define STATUS_INVALID_TRANSACTION cpu_to_le32(0xC0190002)
-#define STATUS_TRANSACTION_NOT_ACTIVE cpu_to_le32(0xC0190003)
-#define STATUS_TM_INITIALIZATION_FAILED cpu_to_le32(0xC0190004)
-#define STATUS_RM_NOT_ACTIVE cpu_to_le32(0xC0190005)
-#define STATUS_RM_METADATA_CORRUPT cpu_to_le32(0xC0190006)
-#define STATUS_TRANSACTION_NOT_JOINED cpu_to_le32(0xC0190007)
-#define STATUS_DIRECTORY_NOT_RM cpu_to_le32(0xC0190008)
-#define STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE cpu_to_le32(0xC019000A)
-#define STATUS_LOG_RESIZE_INVALID_SIZE cpu_to_le32(0xC019000B)
-#define STATUS_REMOTE_FILE_VERSION_MISMATCH cpu_to_le32(0xC019000C)
-#define STATUS_CRM_PROTOCOL_ALREADY_EXISTS cpu_to_le32(0xC019000F)
-#define STATUS_TRANSACTION_PROPAGATION_FAILED cpu_to_le32(0xC0190010)
-#define STATUS_CRM_PROTOCOL_NOT_FOUND cpu_to_le32(0xC0190011)
-#define STATUS_TRANSACTION_SUPERIOR_EXISTS cpu_to_le32(0xC0190012)
-#define STATUS_TRANSACTION_REQUEST_NOT_VALID cpu_to_le32(0xC0190013)
-#define STATUS_TRANSACTION_NOT_REQUESTED cpu_to_le32(0xC0190014)
-#define STATUS_TRANSACTION_ALREADY_ABORTED cpu_to_le32(0xC0190015)
-#define STATUS_TRANSACTION_ALREADY_COMMITTED cpu_to_le32(0xC0190016)
-#define STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER cpu_to_le32(0xC0190017)
-#define STATUS_CURRENT_TRANSACTION_NOT_VALID cpu_to_le32(0xC0190018)
-#define STATUS_LOG_GROWTH_FAILED cpu_to_le32(0xC0190019)
-#define STATUS_OBJECT_NO_LONGER_EXISTS cpu_to_le32(0xC0190021)
-#define STATUS_STREAM_MINIVERSION_NOT_FOUND cpu_to_le32(0xC0190022)
-#define STATUS_STREAM_MINIVERSION_NOT_VALID cpu_to_le32(0xC0190023)
-#define STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION \
- cpu_to_le32(0xC0190024)
-#define STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT cpu_to_le32(0xC0190025)
-#define STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS cpu_to_le32(0xC0190026)
-#define STATUS_HANDLE_NO_LONGER_VALID cpu_to_le32(0xC0190028)
-#define STATUS_LOG_CORRUPTION_DETECTED cpu_to_le32(0xC0190030)
-#define STATUS_RM_DISCONNECTED cpu_to_le32(0xC0190032)
-#define STATUS_ENLISTMENT_NOT_SUPERIOR cpu_to_le32(0xC0190033)
-#define STATUS_FILE_IDENTITY_NOT_PERSISTENT cpu_to_le32(0xC0190036)
-#define STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY cpu_to_le32(0xC0190037)
-#define STATUS_CANT_CROSS_RM_BOUNDARY cpu_to_le32(0xC0190038)
-#define STATUS_TXF_DIR_NOT_EMPTY cpu_to_le32(0xC0190039)
-#define STATUS_INDOUBT_TRANSACTIONS_EXIST cpu_to_le32(0xC019003A)
-#define STATUS_TM_VOLATILE cpu_to_le32(0xC019003B)
-#define STATUS_ROLLBACK_TIMER_EXPIRED cpu_to_le32(0xC019003C)
-#define STATUS_TXF_ATTRIBUTE_CORRUPT cpu_to_le32(0xC019003D)
-#define STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC019003E)
-#define STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED cpu_to_le32(0xC019003F)
-#define STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE cpu_to_le32(0xC0190040)
-#define STATUS_TRANSACTION_REQUIRED_PROMOTION cpu_to_le32(0xC0190043)
-#define STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION cpu_to_le32(0xC0190044)
-#define STATUS_TRANSACTIONS_NOT_FROZEN cpu_to_le32(0xC0190045)
-#define STATUS_TRANSACTION_FREEZE_IN_PROGRESS cpu_to_le32(0xC0190046)
-#define STATUS_NOT_SNAPSHOT_VOLUME cpu_to_le32(0xC0190047)
-#define STATUS_NO_SAVEPOINT_WITH_OPEN_FILES cpu_to_le32(0xC0190048)
-#define STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190049)
-#define STATUS_TM_IDENTITY_MISMATCH cpu_to_le32(0xC019004A)
-#define STATUS_FLOATED_SECTION cpu_to_le32(0xC019004B)
-#define STATUS_CANNOT_ACCEPT_TRANSACTED_WORK cpu_to_le32(0xC019004C)
-#define STATUS_CANNOT_ABORT_TRANSACTIONS cpu_to_le32(0xC019004D)
-#define STATUS_TRANSACTION_NOT_FOUND cpu_to_le32(0xC019004E)
-#define STATUS_RESOURCEMANAGER_NOT_FOUND cpu_to_le32(0xC019004F)
-#define STATUS_ENLISTMENT_NOT_FOUND cpu_to_le32(0xC0190050)
-#define STATUS_TRANSACTIONMANAGER_NOT_FOUND cpu_to_le32(0xC0190051)
-#define STATUS_TRANSACTIONMANAGER_NOT_ONLINE cpu_to_le32(0xC0190052)
-#define STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION \
- cpu_to_le32(0xC0190053)
-#define STATUS_TRANSACTION_NOT_ROOT cpu_to_le32(0xC0190054)
-#define STATUS_TRANSACTION_OBJECT_EXPIRED cpu_to_le32(0xC0190055)
-#define STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190056)
-#define STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED cpu_to_le32(0xC0190057)
-#define STATUS_TRANSACTION_RECORD_TOO_LONG cpu_to_le32(0xC0190058)
-#define STATUS_NO_LINK_TRACKING_IN_TRANSACTION cpu_to_le32(0xC0190059)
-#define STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION cpu_to_le32(0xC019005A)
-#define STATUS_TRANSACTION_INTEGRITY_VIOLATED cpu_to_le32(0xC019005B)
-#define STATUS_LOG_SECTOR_INVALID cpu_to_le32(0xC01A0001)
-#define STATUS_LOG_SECTOR_PARITY_INVALID cpu_to_le32(0xC01A0002)
-#define STATUS_LOG_SECTOR_REMAPPED cpu_to_le32(0xC01A0003)
-#define STATUS_LOG_BLOCK_INCOMPLETE cpu_to_le32(0xC01A0004)
-#define STATUS_LOG_INVALID_RANGE cpu_to_le32(0xC01A0005)
-#define STATUS_LOG_BLOCKS_EXHAUSTED cpu_to_le32(0xC01A0006)
-#define STATUS_LOG_READ_CONTEXT_INVALID cpu_to_le32(0xC01A0007)
-#define STATUS_LOG_RESTART_INVALID cpu_to_le32(0xC01A0008)
-#define STATUS_LOG_BLOCK_VERSION cpu_to_le32(0xC01A0009)
-#define STATUS_LOG_BLOCK_INVALID cpu_to_le32(0xC01A000A)
-#define STATUS_LOG_READ_MODE_INVALID cpu_to_le32(0xC01A000B)
-#define STATUS_LOG_METADATA_CORRUPT cpu_to_le32(0xC01A000D)
-#define STATUS_LOG_METADATA_INVALID cpu_to_le32(0xC01A000E)
-#define STATUS_LOG_METADATA_INCONSISTENT cpu_to_le32(0xC01A000F)
-#define STATUS_LOG_RESERVATION_INVALID cpu_to_le32(0xC01A0010)
-#define STATUS_LOG_CANT_DELETE cpu_to_le32(0xC01A0011)
-#define STATUS_LOG_CONTAINER_LIMIT_EXCEEDED cpu_to_le32(0xC01A0012)
-#define STATUS_LOG_START_OF_LOG cpu_to_le32(0xC01A0013)
-#define STATUS_LOG_POLICY_ALREADY_INSTALLED cpu_to_le32(0xC01A0014)
-#define STATUS_LOG_POLICY_NOT_INSTALLED cpu_to_le32(0xC01A0015)
-#define STATUS_LOG_POLICY_INVALID cpu_to_le32(0xC01A0016)
-#define STATUS_LOG_POLICY_CONFLICT cpu_to_le32(0xC01A0017)
-#define STATUS_LOG_PINNED_ARCHIVE_TAIL cpu_to_le32(0xC01A0018)
-#define STATUS_LOG_RECORD_NONEXISTENT cpu_to_le32(0xC01A0019)
-#define STATUS_LOG_RECORDS_RESERVED_INVALID cpu_to_le32(0xC01A001A)
-#define STATUS_LOG_SPACE_RESERVED_INVALID cpu_to_le32(0xC01A001B)
-#define STATUS_LOG_TAIL_INVALID cpu_to_le32(0xC01A001C)
-#define STATUS_LOG_FULL cpu_to_le32(0xC01A001D)
-#define STATUS_LOG_MULTIPLEXED cpu_to_le32(0xC01A001E)
-#define STATUS_LOG_DEDICATED cpu_to_le32(0xC01A001F)
-#define STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS cpu_to_le32(0xC01A0020)
-#define STATUS_LOG_ARCHIVE_IN_PROGRESS cpu_to_le32(0xC01A0021)
-#define STATUS_LOG_EPHEMERAL cpu_to_le32(0xC01A0022)
-#define STATUS_LOG_NOT_ENOUGH_CONTAINERS cpu_to_le32(0xC01A0023)
-#define STATUS_LOG_CLIENT_ALREADY_REGISTERED cpu_to_le32(0xC01A0024)
-#define STATUS_LOG_CLIENT_NOT_REGISTERED cpu_to_le32(0xC01A0025)
-#define STATUS_LOG_FULL_HANDLER_IN_PROGRESS cpu_to_le32(0xC01A0026)
-#define STATUS_LOG_CONTAINER_READ_FAILED cpu_to_le32(0xC01A0027)
-#define STATUS_LOG_CONTAINER_WRITE_FAILED cpu_to_le32(0xC01A0028)
-#define STATUS_LOG_CONTAINER_OPEN_FAILED cpu_to_le32(0xC01A0029)
-#define STATUS_LOG_CONTAINER_STATE_INVALID cpu_to_le32(0xC01A002A)
-#define STATUS_LOG_STATE_INVALID cpu_to_le32(0xC01A002B)
-#define STATUS_LOG_PINNED cpu_to_le32(0xC01A002C)
-#define STATUS_LOG_METADATA_FLUSH_FAILED cpu_to_le32(0xC01A002D)
-#define STATUS_LOG_INCONSISTENT_SECURITY cpu_to_le32(0xC01A002E)
-#define STATUS_LOG_APPENDED_FLUSH_FAILED cpu_to_le32(0xC01A002F)
-#define STATUS_LOG_PINNED_RESERVATION cpu_to_le32(0xC01A0030)
-#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC01B00EA)
-#define STATUS_FLT_NO_HANDLER_DEFINED cpu_to_le32(0xC01C0001)
-#define STATUS_FLT_CONTEXT_ALREADY_DEFINED cpu_to_le32(0xC01C0002)
-#define STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST cpu_to_le32(0xC01C0003)
-#define STATUS_FLT_DISALLOW_FAST_IO cpu_to_le32(0xC01C0004)
-#define STATUS_FLT_INVALID_NAME_REQUEST cpu_to_le32(0xC01C0005)
-#define STATUS_FLT_NOT_SAFE_TO_POST_OPERATION cpu_to_le32(0xC01C0006)
-#define STATUS_FLT_NOT_INITIALIZED cpu_to_le32(0xC01C0007)
-#define STATUS_FLT_FILTER_NOT_READY cpu_to_le32(0xC01C0008)
-#define STATUS_FLT_POST_OPERATION_CLEANUP cpu_to_le32(0xC01C0009)
-#define STATUS_FLT_INTERNAL_ERROR cpu_to_le32(0xC01C000A)
-#define STATUS_FLT_DELETING_OBJECT cpu_to_le32(0xC01C000B)
-#define STATUS_FLT_MUST_BE_NONPAGED_POOL cpu_to_le32(0xC01C000C)
-#define STATUS_FLT_DUPLICATE_ENTRY cpu_to_le32(0xC01C000D)
-#define STATUS_FLT_CBDQ_DISABLED cpu_to_le32(0xC01C000E)
-#define STATUS_FLT_DO_NOT_ATTACH cpu_to_le32(0xC01C000F)
-#define STATUS_FLT_DO_NOT_DETACH cpu_to_le32(0xC01C0010)
-#define STATUS_FLT_INSTANCE_ALTITUDE_COLLISION cpu_to_le32(0xC01C0011)
-#define STATUS_FLT_INSTANCE_NAME_COLLISION cpu_to_le32(0xC01C0012)
-#define STATUS_FLT_FILTER_NOT_FOUND cpu_to_le32(0xC01C0013)
-#define STATUS_FLT_VOLUME_NOT_FOUND cpu_to_le32(0xC01C0014)
-#define STATUS_FLT_INSTANCE_NOT_FOUND cpu_to_le32(0xC01C0015)
-#define STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND cpu_to_le32(0xC01C0016)
-#define STATUS_FLT_INVALID_CONTEXT_REGISTRATION cpu_to_le32(0xC01C0017)
-#define STATUS_FLT_NAME_CACHE_MISS cpu_to_le32(0xC01C0018)
-#define STATUS_FLT_NO_DEVICE_OBJECT cpu_to_le32(0xC01C0019)
-#define STATUS_FLT_VOLUME_ALREADY_MOUNTED cpu_to_le32(0xC01C001A)
-#define STATUS_FLT_ALREADY_ENLISTED cpu_to_le32(0xC01C001B)
-#define STATUS_FLT_CONTEXT_ALREADY_LINKED cpu_to_le32(0xC01C001C)
-#define STATUS_FLT_NO_WAITER_FOR_REPLY cpu_to_le32(0xC01C0020)
-#define STATUS_MONITOR_NO_DESCRIPTOR cpu_to_le32(0xC01D0001)
-#define STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT cpu_to_le32(0xC01D0002)
-#define STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM cpu_to_le32(0xC01D0003)
-#define STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK cpu_to_le32(0xC01D0004)
-#define STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED cpu_to_le32(0xC01D0005)
-#define STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK \
- cpu_to_le32(0xC01D0006)
-#define STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK \
- cpu_to_le32(0xC01D0007)
-#define STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA cpu_to_le32(0xC01D0008)
-#define STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK cpu_to_le32(0xC01D0009)
-#define STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER cpu_to_le32(0xC01E0000)
-#define STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER cpu_to_le32(0xC01E0001)
-#define STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER cpu_to_le32(0xC01E0002)
-#define STATUS_GRAPHICS_ADAPTER_WAS_RESET cpu_to_le32(0xC01E0003)
-#define STATUS_GRAPHICS_INVALID_DRIVER_MODEL cpu_to_le32(0xC01E0004)
-#define STATUS_GRAPHICS_PRESENT_MODE_CHANGED cpu_to_le32(0xC01E0005)
-#define STATUS_GRAPHICS_PRESENT_OCCLUDED cpu_to_le32(0xC01E0006)
-#define STATUS_GRAPHICS_PRESENT_DENIED cpu_to_le32(0xC01E0007)
-#define STATUS_GRAPHICS_CANNOTCOLORCONVERT cpu_to_le32(0xC01E0008)
-#define STATUS_GRAPHICS_NO_VIDEO_MEMORY cpu_to_le32(0xC01E0100)
-#define STATUS_GRAPHICS_CANT_LOCK_MEMORY cpu_to_le32(0xC01E0101)
-#define STATUS_GRAPHICS_ALLOCATION_BUSY cpu_to_le32(0xC01E0102)
-#define STATUS_GRAPHICS_TOO_MANY_REFERENCES cpu_to_le32(0xC01E0103)
-#define STATUS_GRAPHICS_TRY_AGAIN_LATER cpu_to_le32(0xC01E0104)
-#define STATUS_GRAPHICS_TRY_AGAIN_NOW cpu_to_le32(0xC01E0105)
-#define STATUS_GRAPHICS_ALLOCATION_INVALID cpu_to_le32(0xC01E0106)
-#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE cpu_to_le32(0xC01E0107)
-#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED cpu_to_le32(0xC01E0108)
-#define STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION cpu_to_le32(0xC01E0109)
-#define STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE cpu_to_le32(0xC01E0110)
-#define STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION cpu_to_le32(0xC01E0111)
-#define STATUS_GRAPHICS_ALLOCATION_CLOSED cpu_to_le32(0xC01E0112)
-#define STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE cpu_to_le32(0xC01E0113)
-#define STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE cpu_to_le32(0xC01E0114)
-#define STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE cpu_to_le32(0xC01E0115)
-#define STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST cpu_to_le32(0xC01E0116)
-#define STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE cpu_to_le32(0xC01E0200)
-#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0300)
-#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED cpu_to_le32(0xC01E0301)
-#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED \
- cpu_to_le32(0xC01E0302)
-#define STATUS_GRAPHICS_INVALID_VIDPN cpu_to_le32(0xC01E0303)
-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE cpu_to_le32(0xC01E0304)
-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET cpu_to_le32(0xC01E0305)
-#define STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED cpu_to_le32(0xC01E0306)
-#define STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET cpu_to_le32(0xC01E0308)
-#define STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET cpu_to_le32(0xC01E0309)
-#define STATUS_GRAPHICS_INVALID_FREQUENCY cpu_to_le32(0xC01E030A)
-#define STATUS_GRAPHICS_INVALID_ACTIVE_REGION cpu_to_le32(0xC01E030B)
-#define STATUS_GRAPHICS_INVALID_TOTAL_REGION cpu_to_le32(0xC01E030C)
-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE \
- cpu_to_le32(0xC01E0310)
-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE \
- cpu_to_le32(0xC01E0311)
-#define STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET cpu_to_le32(0xC01E0312)
-#define STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY cpu_to_le32(0xC01E0313)
-#define STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET cpu_to_le32(0xC01E0314)
-#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET cpu_to_le32(0xC01E0315)
-#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET cpu_to_le32(0xC01E0316)
-#define STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET cpu_to_le32(0xC01E0317)
-#define STATUS_GRAPHICS_TARGET_ALREADY_IN_SET cpu_to_le32(0xC01E0318)
-#define STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH cpu_to_le32(0xC01E0319)
-#define STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY cpu_to_le32(0xC01E031A)
-#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET \
- cpu_to_le32(0xC01E031B)
-#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE cpu_to_le32(0xC01E031C)
-#define STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET cpu_to_le32(0xC01E031D)
-#define STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET cpu_to_le32(0xC01E031F)
-#define STATUS_GRAPHICS_STALE_MODESET cpu_to_le32(0xC01E0320)
-#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET cpu_to_le32(0xC01E0321)
-#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE cpu_to_le32(0xC01E0322)
-#define STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN cpu_to_le32(0xC01E0323)
-#define STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0324)
-#define STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION \
- cpu_to_le32(0xC01E0325)
-#define STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES \
- cpu_to_le32(0xC01E0326)
-#define STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0327)
-#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE \
- cpu_to_le32(0xC01E0328)
-#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET \
- cpu_to_le32(0xC01E0329)
-#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET cpu_to_le32(0xC01E032A)
-#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR cpu_to_le32(0xC01E032B)
-#define STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET cpu_to_le32(0xC01E032C)
-#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET cpu_to_le32(0xC01E032D)
-#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE \
- cpu_to_le32(0xC01E032E)
-#define STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE cpu_to_le32(0xC01E032F)
-#define STATUS_GRAPHICS_RESOURCES_NOT_RELATED cpu_to_le32(0xC01E0330)
-#define STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0331)
-#define STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0332)
-#define STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET cpu_to_le32(0xC01E0333)
-#define STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER \
- cpu_to_le32(0xC01E0334)
-#define STATUS_GRAPHICS_NO_VIDPNMGR cpu_to_le32(0xC01E0335)
-#define STATUS_GRAPHICS_NO_ACTIVE_VIDPN cpu_to_le32(0xC01E0336)
-#define STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0337)
-#define STATUS_GRAPHICS_MONITOR_NOT_CONNECTED cpu_to_le32(0xC01E0338)
-#define STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0339)
-#define STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE cpu_to_le32(0xC01E033A)
-#define STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE cpu_to_le32(0xC01E033B)
-#define STATUS_GRAPHICS_INVALID_STRIDE cpu_to_le32(0xC01E033C)
-#define STATUS_GRAPHICS_INVALID_PIXELFORMAT cpu_to_le32(0xC01E033D)
-#define STATUS_GRAPHICS_INVALID_COLORBASIS cpu_to_le32(0xC01E033E)
-#define STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE cpu_to_le32(0xC01E033F)
-#define STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0340)
-#define STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT \
- cpu_to_le32(0xC01E0341)
-#define STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE cpu_to_le32(0xC01E0342)
-#define STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN cpu_to_le32(0xC01E0343)
-#define STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL cpu_to_le32(0xC01E0344)
-#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION \
- cpu_to_le32(0xC01E0345)
-#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED \
- cpu_to_le32(0xC01E0346)
-#define STATUS_GRAPHICS_INVALID_GAMMA_RAMP cpu_to_le32(0xC01E0347)
-#define STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED cpu_to_le32(0xC01E0348)
-#define STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED cpu_to_le32(0xC01E0349)
-#define STATUS_GRAPHICS_MODE_NOT_IN_MODESET cpu_to_le32(0xC01E034A)
-#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON \
- cpu_to_le32(0xC01E034D)
-#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE cpu_to_le32(0xC01E034E)
-#define STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE cpu_to_le32(0xC01E034F)
-#define STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS \
- cpu_to_le32(0xC01E0350)
-#define STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING cpu_to_le32(0xC01E0352)
-#define STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED cpu_to_le32(0xC01E0353)
-#define STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS cpu_to_le32(0xC01E0354)
-#define STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT cpu_to_le32(0xC01E0355)
-#define STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM cpu_to_le32(0xC01E0356)
-#define STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN \
- cpu_to_le32(0xC01E0357)
-#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT \
- cpu_to_le32(0xC01E0358)
-#define STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED cpu_to_le32(0xC01E0359)
-#define STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION \
- cpu_to_le32(0xC01E035A)
-#define STATUS_GRAPHICS_INVALID_CLIENT_TYPE cpu_to_le32(0xC01E035B)
-#define STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET cpu_to_le32(0xC01E035C)
-#define STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED \
- cpu_to_le32(0xC01E0400)
-#define STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED cpu_to_le32(0xC01E0401)
-#define STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER cpu_to_le32(0xC01E0430)
-#define STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED cpu_to_le32(0xC01E0431)
-#define STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED cpu_to_le32(0xC01E0432)
-#define STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY cpu_to_le32(0xC01E0433)
-#define STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED cpu_to_le32(0xC01E0434)
-#define STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON cpu_to_le32(0xC01E0435)
-#define STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE cpu_to_le32(0xC01E0436)
-#define STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER cpu_to_le32(0xC01E0438)
-#define STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED cpu_to_le32(0xC01E043B)
-#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS \
- cpu_to_le32(0xC01E051C)
-#define STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST cpu_to_le32(0xC01E051D)
-#define STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC01E051E)
-#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS \
- cpu_to_le32(0xC01E051F)
-#define STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED cpu_to_le32(0xC01E0520)
-#define STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST \
- cpu_to_le32(0xC01E0521)
-#define STATUS_GRAPHICS_OPM_NOT_SUPPORTED cpu_to_le32(0xC01E0500)
-#define STATUS_GRAPHICS_COPP_NOT_SUPPORTED cpu_to_le32(0xC01E0501)
-#define STATUS_GRAPHICS_UAB_NOT_SUPPORTED cpu_to_le32(0xC01E0502)
-#define STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS cpu_to_le32(0xC01E0503)
-#define STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E0504)
-#define STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST cpu_to_le32(0xC01E0505)
-#define STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME \
- cpu_to_le32(0xC01E0506)
-#define STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP \
- cpu_to_le32(0xC01E0507)
-#define STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED \
- cpu_to_le32(0xC01E0508)
-#define STATUS_GRAPHICS_OPM_INVALID_POINTER cpu_to_le32(0xC01E050A)
-#define STATUS_GRAPHICS_OPM_INTERNAL_ERROR cpu_to_le32(0xC01E050B)
-#define STATUS_GRAPHICS_OPM_INVALID_HANDLE cpu_to_le32(0xC01E050C)
-#define STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE \
- cpu_to_le32(0xC01E050D)
-#define STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH cpu_to_le32(0xC01E050E)
-#define STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED cpu_to_le32(0xC01E050F)
-#define STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED cpu_to_le32(0xC01E0510)
-#define STATUS_GRAPHICS_PVP_HFS_FAILED cpu_to_le32(0xC01E0511)
-#define STATUS_GRAPHICS_OPM_INVALID_SRM cpu_to_le32(0xC01E0512)
-#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP cpu_to_le32(0xC01E0513)
-#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP cpu_to_le32(0xC01E0514)
-#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA \
- cpu_to_le32(0xC01E0515)
-#define STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET cpu_to_le32(0xC01E0516)
-#define STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH cpu_to_le32(0xC01E0517)
-#define STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE \
- cpu_to_le32(0xC01E0518)
-#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS \
- cpu_to_le32(0xC01E051A)
-#define STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS \
- cpu_to_le32(0xC01E051B)
-#define STATUS_GRAPHICS_I2C_NOT_SUPPORTED cpu_to_le32(0xC01E0580)
-#define STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC01E0581)
-#define STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA cpu_to_le32(0xC01E0582)
-#define STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA cpu_to_le32(0xC01E0583)
-#define STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED cpu_to_le32(0xC01E0584)
-#define STATUS_GRAPHICS_DDCCI_INVALID_DATA cpu_to_le32(0xC01E0585)
-#define STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE \
- cpu_to_le32(0xC01E0586)
-#define STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING \
- cpu_to_le32(0xC01E0587)
-#define STATUS_GRAPHICS_MCA_INTERNAL_ERROR cpu_to_le32(0xC01E0588)
-#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND cpu_to_le32(0xC01E0589)
-#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH cpu_to_le32(0xC01E058A)
-#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM cpu_to_le32(0xC01E058B)
-#define STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE cpu_to_le32(0xC01E058C)
-#define STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS cpu_to_le32(0xC01E058D)
-#define STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED cpu_to_le32(0xC01E05E0)
-#define STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME \
- cpu_to_le32(0xC01E05E1)
-#define STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP \
- cpu_to_le32(0xC01E05E2)
-#define STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED cpu_to_le32(0xC01E05E3)
-#define STATUS_GRAPHICS_INVALID_POINTER cpu_to_le32(0xC01E05E4)
-#define STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE \
- cpu_to_le32(0xC01E05E5)
-#define STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E05E6)
-#define STATUS_GRAPHICS_INTERNAL_ERROR cpu_to_le32(0xC01E05E7)
-#define STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS cpu_to_le32(0xC01E05E8)
-#define STATUS_FVE_LOCKED_VOLUME cpu_to_le32(0xC0210000)
-#define STATUS_FVE_NOT_ENCRYPTED cpu_to_le32(0xC0210001)
-#define STATUS_FVE_BAD_INFORMATION cpu_to_le32(0xC0210002)
-#define STATUS_FVE_TOO_SMALL cpu_to_le32(0xC0210003)
-#define STATUS_FVE_FAILED_WRONG_FS cpu_to_le32(0xC0210004)
-#define STATUS_FVE_FAILED_BAD_FS cpu_to_le32(0xC0210005)
-#define STATUS_FVE_FS_NOT_EXTENDED cpu_to_le32(0xC0210006)
-#define STATUS_FVE_FS_MOUNTED cpu_to_le32(0xC0210007)
-#define STATUS_FVE_NO_LICENSE cpu_to_le32(0xC0210008)
-#define STATUS_FVE_ACTION_NOT_ALLOWED cpu_to_le32(0xC0210009)
-#define STATUS_FVE_BAD_DATA cpu_to_le32(0xC021000A)
-#define STATUS_FVE_VOLUME_NOT_BOUND cpu_to_le32(0xC021000B)
-#define STATUS_FVE_NOT_DATA_VOLUME cpu_to_le32(0xC021000C)
-#define STATUS_FVE_CONV_READ_ERROR cpu_to_le32(0xC021000D)
-#define STATUS_FVE_CONV_WRITE_ERROR cpu_to_le32(0xC021000E)
-#define STATUS_FVE_OVERLAPPED_UPDATE cpu_to_le32(0xC021000F)
-#define STATUS_FVE_FAILED_SECTOR_SIZE cpu_to_le32(0xC0210010)
-#define STATUS_FVE_FAILED_AUTHENTICATION cpu_to_le32(0xC0210011)
-#define STATUS_FVE_NOT_OS_VOLUME cpu_to_le32(0xC0210012)
-#define STATUS_FVE_KEYFILE_NOT_FOUND cpu_to_le32(0xC0210013)
-#define STATUS_FVE_KEYFILE_INVALID cpu_to_le32(0xC0210014)
-#define STATUS_FVE_KEYFILE_NO_VMK cpu_to_le32(0xC0210015)
-#define STATUS_FVE_TPM_DISABLED cpu_to_le32(0xC0210016)
-#define STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO cpu_to_le32(0xC0210017)
-#define STATUS_FVE_TPM_INVALID_PCR cpu_to_le32(0xC0210018)
-#define STATUS_FVE_TPM_NO_VMK cpu_to_le32(0xC0210019)
-#define STATUS_FVE_PIN_INVALID cpu_to_le32(0xC021001A)
-#define STATUS_FVE_AUTH_INVALID_APPLICATION cpu_to_le32(0xC021001B)
-#define STATUS_FVE_AUTH_INVALID_CONFIG cpu_to_le32(0xC021001C)
-#define STATUS_FVE_DEBUGGER_ENABLED cpu_to_le32(0xC021001D)
-#define STATUS_FVE_DRY_RUN_FAILED cpu_to_le32(0xC021001E)
-#define STATUS_FVE_BAD_METADATA_POINTER cpu_to_le32(0xC021001F)
-#define STATUS_FVE_OLD_METADATA_COPY cpu_to_le32(0xC0210020)
-#define STATUS_FVE_REBOOT_REQUIRED cpu_to_le32(0xC0210021)
-#define STATUS_FVE_RAW_ACCESS cpu_to_le32(0xC0210022)
-#define STATUS_FVE_RAW_BLOCKED cpu_to_le32(0xC0210023)
-#define STATUS_FWP_CALLOUT_NOT_FOUND cpu_to_le32(0xC0220001)
-#define STATUS_FWP_CONDITION_NOT_FOUND cpu_to_le32(0xC0220002)
-#define STATUS_FWP_FILTER_NOT_FOUND cpu_to_le32(0xC0220003)
-#define STATUS_FWP_LAYER_NOT_FOUND cpu_to_le32(0xC0220004)
-#define STATUS_FWP_PROVIDER_NOT_FOUND cpu_to_le32(0xC0220005)
-#define STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND cpu_to_le32(0xC0220006)
-#define STATUS_FWP_SUBLAYER_NOT_FOUND cpu_to_le32(0xC0220007)
-#define STATUS_FWP_NOT_FOUND cpu_to_le32(0xC0220008)
-#define STATUS_FWP_ALREADY_EXISTS cpu_to_le32(0xC0220009)
-#define STATUS_FWP_IN_USE cpu_to_le32(0xC022000A)
-#define STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS cpu_to_le32(0xC022000B)
-#define STATUS_FWP_WRONG_SESSION cpu_to_le32(0xC022000C)
-#define STATUS_FWP_NO_TXN_IN_PROGRESS cpu_to_le32(0xC022000D)
-#define STATUS_FWP_TXN_IN_PROGRESS cpu_to_le32(0xC022000E)
-#define STATUS_FWP_TXN_ABORTED cpu_to_le32(0xC022000F)
-#define STATUS_FWP_SESSION_ABORTED cpu_to_le32(0xC0220010)
-#define STATUS_FWP_INCOMPATIBLE_TXN cpu_to_le32(0xC0220011)
-#define STATUS_FWP_TIMEOUT cpu_to_le32(0xC0220012)
-#define STATUS_FWP_NET_EVENTS_DISABLED cpu_to_le32(0xC0220013)
-#define STATUS_FWP_INCOMPATIBLE_LAYER cpu_to_le32(0xC0220014)
-#define STATUS_FWP_KM_CLIENTS_ONLY cpu_to_le32(0xC0220015)
-#define STATUS_FWP_LIFETIME_MISMATCH cpu_to_le32(0xC0220016)
-#define STATUS_FWP_BUILTIN_OBJECT cpu_to_le32(0xC0220017)
-#define STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS cpu_to_le32(0xC0220018)
-#define STATUS_FWP_TOO_MANY_CALLOUTS cpu_to_le32(0xC0220018)
-#define STATUS_FWP_NOTIFICATION_DROPPED cpu_to_le32(0xC0220019)
-#define STATUS_FWP_TRAFFIC_MISMATCH cpu_to_le32(0xC022001A)
-#define STATUS_FWP_INCOMPATIBLE_SA_STATE cpu_to_le32(0xC022001B)
-#define STATUS_FWP_NULL_POINTER cpu_to_le32(0xC022001C)
-#define STATUS_FWP_INVALID_ENUMERATOR cpu_to_le32(0xC022001D)
-#define STATUS_FWP_INVALID_FLAGS cpu_to_le32(0xC022001E)
-#define STATUS_FWP_INVALID_NET_MASK cpu_to_le32(0xC022001F)
-#define STATUS_FWP_INVALID_RANGE cpu_to_le32(0xC0220020)
-#define STATUS_FWP_INVALID_INTERVAL cpu_to_le32(0xC0220021)
-#define STATUS_FWP_ZERO_LENGTH_ARRAY cpu_to_le32(0xC0220022)
-#define STATUS_FWP_NULL_DISPLAY_NAME cpu_to_le32(0xC0220023)
-#define STATUS_FWP_INVALID_ACTION_TYPE cpu_to_le32(0xC0220024)
-#define STATUS_FWP_INVALID_WEIGHT cpu_to_le32(0xC0220025)
-#define STATUS_FWP_MATCH_TYPE_MISMATCH cpu_to_le32(0xC0220026)
-#define STATUS_FWP_TYPE_MISMATCH cpu_to_le32(0xC0220027)
-#define STATUS_FWP_OUT_OF_BOUNDS cpu_to_le32(0xC0220028)
-#define STATUS_FWP_RESERVED cpu_to_le32(0xC0220029)
-#define STATUS_FWP_DUPLICATE_CONDITION cpu_to_le32(0xC022002A)
-#define STATUS_FWP_DUPLICATE_KEYMOD cpu_to_le32(0xC022002B)
-#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002C)
-#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER cpu_to_le32(0xC022002D)
-#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002E)
-#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT cpu_to_le32(0xC022002F)
-#define STATUS_FWP_INCOMPATIBLE_AUTH_METHOD cpu_to_le32(0xC0220030)
-#define STATUS_FWP_INCOMPATIBLE_DH_GROUP cpu_to_le32(0xC0220031)
-#define STATUS_FWP_EM_NOT_SUPPORTED cpu_to_le32(0xC0220032)
-#define STATUS_FWP_NEVER_MATCH cpu_to_le32(0xC0220033)
-#define STATUS_FWP_PROVIDER_CONTEXT_MISMATCH cpu_to_le32(0xC0220034)
-#define STATUS_FWP_INVALID_PARAMETER cpu_to_le32(0xC0220035)
-#define STATUS_FWP_TOO_MANY_SUBLAYERS cpu_to_le32(0xC0220036)
-#define STATUS_FWP_CALLOUT_NOTIFICATION_FAILED cpu_to_le32(0xC0220037)
-#define STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG cpu_to_le32(0xC0220038)
-#define STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG cpu_to_le32(0xC0220039)
-#define STATUS_FWP_TCPIP_NOT_READY cpu_to_le32(0xC0220100)
-#define STATUS_FWP_INJECT_HANDLE_CLOSING cpu_to_le32(0xC0220101)
-#define STATUS_FWP_INJECT_HANDLE_STALE cpu_to_le32(0xC0220102)
-#define STATUS_FWP_CANNOT_PEND cpu_to_le32(0xC0220103)
-#define STATUS_NDIS_CLOSING cpu_to_le32(0xC0230002)
-#define STATUS_NDIS_BAD_VERSION cpu_to_le32(0xC0230004)
-#define STATUS_NDIS_BAD_CHARACTERISTICS cpu_to_le32(0xC0230005)
-#define STATUS_NDIS_ADAPTER_NOT_FOUND cpu_to_le32(0xC0230006)
-#define STATUS_NDIS_OPEN_FAILED cpu_to_le32(0xC0230007)
-#define STATUS_NDIS_DEVICE_FAILED cpu_to_le32(0xC0230008)
-#define STATUS_NDIS_MULTICAST_FULL cpu_to_le32(0xC0230009)
-#define STATUS_NDIS_MULTICAST_EXISTS cpu_to_le32(0xC023000A)
-#define STATUS_NDIS_MULTICAST_NOT_FOUND cpu_to_le32(0xC023000B)
-#define STATUS_NDIS_REQUEST_ABORTED cpu_to_le32(0xC023000C)
-#define STATUS_NDIS_RESET_IN_PROGRESS cpu_to_le32(0xC023000D)
-#define STATUS_NDIS_INVALID_PACKET cpu_to_le32(0xC023000F)
-#define STATUS_NDIS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0230010)
-#define STATUS_NDIS_ADAPTER_NOT_READY cpu_to_le32(0xC0230011)
-#define STATUS_NDIS_INVALID_LENGTH cpu_to_le32(0xC0230014)
-#define STATUS_NDIS_INVALID_DATA cpu_to_le32(0xC0230015)
-#define STATUS_NDIS_BUFFER_TOO_SHORT cpu_to_le32(0xC0230016)
-#define STATUS_NDIS_INVALID_OID cpu_to_le32(0xC0230017)
-#define STATUS_NDIS_ADAPTER_REMOVED cpu_to_le32(0xC0230018)
-#define STATUS_NDIS_UNSUPPORTED_MEDIA cpu_to_le32(0xC0230019)
-#define STATUS_NDIS_GROUP_ADDRESS_IN_USE cpu_to_le32(0xC023001A)
-#define STATUS_NDIS_FILE_NOT_FOUND cpu_to_le32(0xC023001B)
-#define STATUS_NDIS_ERROR_READING_FILE cpu_to_le32(0xC023001C)
-#define STATUS_NDIS_ALREADY_MAPPED cpu_to_le32(0xC023001D)
-#define STATUS_NDIS_RESOURCE_CONFLICT cpu_to_le32(0xC023001E)
-#define STATUS_NDIS_MEDIA_DISCONNECTED cpu_to_le32(0xC023001F)
-#define STATUS_NDIS_INVALID_ADDRESS cpu_to_le32(0xC0230022)
-#define STATUS_NDIS_PAUSED cpu_to_le32(0xC023002A)
-#define STATUS_NDIS_INTERFACE_NOT_FOUND cpu_to_le32(0xC023002B)
-#define STATUS_NDIS_UNSUPPORTED_REVISION cpu_to_le32(0xC023002C)
-#define STATUS_NDIS_INVALID_PORT cpu_to_le32(0xC023002D)
-#define STATUS_NDIS_INVALID_PORT_STATE cpu_to_le32(0xC023002E)
-#define STATUS_NDIS_LOW_POWER_STATE cpu_to_le32(0xC023002F)
-#define STATUS_NDIS_NOT_SUPPORTED cpu_to_le32(0xC02300BB)
-#define STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED cpu_to_le32(0xC0232000)
-#define STATUS_NDIS_DOT11_MEDIA_IN_USE cpu_to_le32(0xC0232001)
-#define STATUS_NDIS_DOT11_POWER_STATE_INVALID cpu_to_le32(0xC0232002)
-#define STATUS_IPSEC_BAD_SPI cpu_to_le32(0xC0360001)
-#define STATUS_IPSEC_SA_LIFETIME_EXPIRED cpu_to_le32(0xC0360002)
-#define STATUS_IPSEC_WRONG_SA cpu_to_le32(0xC0360003)
-#define STATUS_IPSEC_REPLAY_CHECK_FAILED cpu_to_le32(0xC0360004)
-#define STATUS_IPSEC_INVALID_PACKET cpu_to_le32(0xC0360005)
-#define STATUS_IPSEC_INTEGRITY_CHECK_FAILED cpu_to_le32(0xC0360006)
-#define STATUS_IPSEC_CLEAR_TEXT_DROP cpu_to_le32(0xC0360007)
-
-#define STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP cpu_to_le32(0xC05D0000)
-#define STATUS_INVALID_LOCK_RANGE cpu_to_le32(0xC00001a1)
diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
deleted file mode 100644
index a2fd5a4d4cd5..000000000000
--- a/fs/ksmbd/transport_rdma.c
+++ /dev/null
@@ -1,2074 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2017, Microsoft Corporation.
- * Copyright (C) 2018, LG Electronics.
- *
- * Author(s): Long Li <longli@microsoft.com>,
- * Hyunchul Lee <hyc.lee@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- */
-
-#define SUBMOD_NAME "smb_direct"
-
-#include <linux/kthread.h>
-#include <linux/list.h>
-#include <linux/mempool.h>
-#include <linux/highmem.h>
-#include <linux/scatterlist.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/rdma_cm.h>
-#include <rdma/rw.h>
-
-#include "glob.h"
-#include "connection.h"
-#include "smb_common.h"
-#include "smbstatus.h"
-#include "transport_rdma.h"
-
-#define SMB_DIRECT_PORT 5445
-
-#define SMB_DIRECT_VERSION_LE cpu_to_le16(0x0100)
-
-/* SMB_DIRECT negotiation timeout in seconds */
-#define SMB_DIRECT_NEGOTIATE_TIMEOUT 120
-
-#define SMB_DIRECT_MAX_SEND_SGES 8
-#define SMB_DIRECT_MAX_RECV_SGES 1
-
-/*
- * Default maximum number of RDMA read/write outstanding on this connection
- * This value is possibly decreased during QP creation on hardware limit
- */
-#define SMB_DIRECT_CM_INITIATOR_DEPTH 8
-
-/* Maximum number of retries on data transfer operations */
-#define SMB_DIRECT_CM_RETRY 6
-/* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */
-#define SMB_DIRECT_CM_RNR_RETRY 0
-
-/*
- * User configurable initial values per SMB_DIRECT transport connection
- * as defined in [MS-SMBD] 3.1.1.1
- * Those may change after a SMB_DIRECT negotiation
- */
-/* The local peer's maximum number of credits to grant to the peer */
-static int smb_direct_receive_credit_max = 255;
-
-/* The remote peer's credit request of local peer */
-static int smb_direct_send_credit_target = 255;
-
-/* The maximum single message size can be sent to remote peer */
-static int smb_direct_max_send_size = 8192;
-
-/* The maximum fragmented upper-layer payload receive size supported */
-static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
-
-/* The maximum single-message size which can be received */
-static int smb_direct_max_receive_size = 8192;
-
-static int smb_direct_max_read_write_size = 1024 * 1024;
-
-static int smb_direct_max_outstanding_rw_ops = 8;
-
-static struct smb_direct_listener {
- struct rdma_cm_id *cm_id;
-} smb_direct_listener;
-
-static struct workqueue_struct *smb_direct_wq;
-
-enum smb_direct_status {
- SMB_DIRECT_CS_NEW = 0,
- SMB_DIRECT_CS_CONNECTED,
- SMB_DIRECT_CS_DISCONNECTING,
- SMB_DIRECT_CS_DISCONNECTED,
-};
-
-struct smb_direct_transport {
- struct ksmbd_transport transport;
-
- enum smb_direct_status status;
- bool full_packet_received;
- wait_queue_head_t wait_status;
-
- struct rdma_cm_id *cm_id;
- struct ib_cq *send_cq;
- struct ib_cq *recv_cq;
- struct ib_pd *pd;
- struct ib_qp *qp;
-
- int max_send_size;
- int max_recv_size;
- int max_fragmented_send_size;
- int max_fragmented_recv_size;
- int max_rdma_rw_size;
-
- spinlock_t reassembly_queue_lock;
- struct list_head reassembly_queue;
- int reassembly_data_length;
- int reassembly_queue_length;
- int first_entry_offset;
- wait_queue_head_t wait_reassembly_queue;
-
- spinlock_t receive_credit_lock;
- int recv_credits;
- int count_avail_recvmsg;
- int recv_credit_max;
- int recv_credit_target;
-
- spinlock_t recvmsg_queue_lock;
- struct list_head recvmsg_queue;
-
- spinlock_t empty_recvmsg_queue_lock;
- struct list_head empty_recvmsg_queue;
-
- int send_credit_target;
- atomic_t send_credits;
- spinlock_t lock_new_recv_credits;
- int new_recv_credits;
- atomic_t rw_avail_ops;
-
- wait_queue_head_t wait_send_credits;
- wait_queue_head_t wait_rw_avail_ops;
-
- mempool_t *sendmsg_mempool;
- struct kmem_cache *sendmsg_cache;
- mempool_t *recvmsg_mempool;
- struct kmem_cache *recvmsg_cache;
-
- wait_queue_head_t wait_send_payload_pending;
- atomic_t send_payload_pending;
- wait_queue_head_t wait_send_pending;
- atomic_t send_pending;
-
- struct delayed_work post_recv_credits_work;
- struct work_struct send_immediate_work;
- struct work_struct disconnect_work;
-
- bool negotiation_requested;
-};
-
-#define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
-
-enum {
- SMB_DIRECT_MSG_NEGOTIATE_REQ = 0,
- SMB_DIRECT_MSG_DATA_TRANSFER
-};
-
-static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops;
-
-struct smb_direct_send_ctx {
- struct list_head msg_list;
- int wr_cnt;
- bool need_invalidate_rkey;
- unsigned int remote_key;
-};
-
-struct smb_direct_sendmsg {
- struct smb_direct_transport *transport;
- struct ib_send_wr wr;
- struct list_head list;
- int num_sge;
- struct ib_sge sge[SMB_DIRECT_MAX_SEND_SGES];
- struct ib_cqe cqe;
- u8 packet[];
-};
-
-struct smb_direct_recvmsg {
- struct smb_direct_transport *transport;
- struct list_head list;
- int type;
- struct ib_sge sge;
- struct ib_cqe cqe;
- bool first_segment;
- u8 packet[];
-};
-
-struct smb_direct_rdma_rw_msg {
- struct smb_direct_transport *t;
- struct ib_cqe cqe;
- struct completion *completion;
- struct rdma_rw_ctx rw_ctx;
- struct sg_table sgt;
- struct scatterlist sg_list[0];
-};
-
-static inline int get_buf_page_count(void *buf, int size)
-{
- return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
- (uintptr_t)buf / PAGE_SIZE;
-}
-
-static void smb_direct_destroy_pools(struct smb_direct_transport *transport);
-static void smb_direct_post_recv_credits(struct work_struct *work);
-static int smb_direct_post_send_data(struct smb_direct_transport *t,
- struct smb_direct_send_ctx *send_ctx,
- struct kvec *iov, int niov,
- int remaining_data_length);
-
-static inline struct smb_direct_transport *
-smb_trans_direct_transfort(struct ksmbd_transport *t)
-{
- return container_of(t, struct smb_direct_transport, transport);
-}
-
-static inline void
-*smb_direct_recvmsg_payload(struct smb_direct_recvmsg *recvmsg)
-{
- return (void *)recvmsg->packet;
-}
-
-static inline bool is_receive_credit_post_required(int receive_credits,
- int avail_recvmsg_count)
-{
- return receive_credits <= (smb_direct_receive_credit_max >> 3) &&
- avail_recvmsg_count >= (receive_credits >> 2);
-}
-
-static struct
-smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
-{
- struct smb_direct_recvmsg *recvmsg = NULL;
-
- spin_lock(&t->recvmsg_queue_lock);
- if (!list_empty(&t->recvmsg_queue)) {
- recvmsg = list_first_entry(&t->recvmsg_queue,
- struct smb_direct_recvmsg,
- list);
- list_del(&recvmsg->list);
- }
- spin_unlock(&t->recvmsg_queue_lock);
- return recvmsg;
-}
-
-static void put_recvmsg(struct smb_direct_transport *t,
- struct smb_direct_recvmsg *recvmsg)
-{
- ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
- recvmsg->sge.length, DMA_FROM_DEVICE);
-
- spin_lock(&t->recvmsg_queue_lock);
- list_add(&recvmsg->list, &t->recvmsg_queue);
- spin_unlock(&t->recvmsg_queue_lock);
-}
-
-static struct
-smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
-{
- struct smb_direct_recvmsg *recvmsg = NULL;
-
- spin_lock(&t->empty_recvmsg_queue_lock);
- if (!list_empty(&t->empty_recvmsg_queue)) {
- recvmsg = list_first_entry(&t->empty_recvmsg_queue,
- struct smb_direct_recvmsg, list);
- list_del(&recvmsg->list);
- }
- spin_unlock(&t->empty_recvmsg_queue_lock);
- return recvmsg;
-}
-
-static void put_empty_recvmsg(struct smb_direct_transport *t,
- struct smb_direct_recvmsg *recvmsg)
-{
- ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
- recvmsg->sge.length, DMA_FROM_DEVICE);
-
- spin_lock(&t->empty_recvmsg_queue_lock);
- list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
- spin_unlock(&t->empty_recvmsg_queue_lock);
-}
-
-static void enqueue_reassembly(struct smb_direct_transport *t,
- struct smb_direct_recvmsg *recvmsg,
- int data_length)
-{
- spin_lock(&t->reassembly_queue_lock);
- list_add_tail(&recvmsg->list, &t->reassembly_queue);
- t->reassembly_queue_length++;
- /*
- * Make sure reassembly_data_length is updated after list and
- * reassembly_queue_length are updated. On the dequeue side
- * reassembly_data_length is checked without a lock to determine
- * if reassembly_queue_length and list is up to date
- */
- virt_wmb();
- t->reassembly_data_length += data_length;
- spin_unlock(&t->reassembly_queue_lock);
-}
-
-static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t)
-{
- if (!list_empty(&t->reassembly_queue))
- return list_first_entry(&t->reassembly_queue,
- struct smb_direct_recvmsg, list);
- else
- return NULL;
-}
-
-static void smb_direct_disconnect_rdma_work(struct work_struct *work)
-{
- struct smb_direct_transport *t =
- container_of(work, struct smb_direct_transport,
- disconnect_work);
-
- if (t->status == SMB_DIRECT_CS_CONNECTED) {
- t->status = SMB_DIRECT_CS_DISCONNECTING;
- rdma_disconnect(t->cm_id);
- }
-}
-
-static void
-smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t)
-{
- if (t->status == SMB_DIRECT_CS_CONNECTED)
- queue_work(smb_direct_wq, &t->disconnect_work);
-}
-
-static void smb_direct_send_immediate_work(struct work_struct *work)
-{
- struct smb_direct_transport *t = container_of(work,
- struct smb_direct_transport, send_immediate_work);
-
- if (t->status != SMB_DIRECT_CS_CONNECTED)
- return;
-
- smb_direct_post_send_data(t, NULL, NULL, 0, 0);
-}
-
-static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
-{
- struct smb_direct_transport *t;
- struct ksmbd_conn *conn;
-
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- return NULL;
-
- t->cm_id = cm_id;
- cm_id->context = t;
-
- t->status = SMB_DIRECT_CS_NEW;
- init_waitqueue_head(&t->wait_status);
-
- spin_lock_init(&t->reassembly_queue_lock);
- INIT_LIST_HEAD(&t->reassembly_queue);
- t->reassembly_data_length = 0;
- t->reassembly_queue_length = 0;
- init_waitqueue_head(&t->wait_reassembly_queue);
- init_waitqueue_head(&t->wait_send_credits);
- init_waitqueue_head(&t->wait_rw_avail_ops);
-
- spin_lock_init(&t->receive_credit_lock);
- spin_lock_init(&t->recvmsg_queue_lock);
- INIT_LIST_HEAD(&t->recvmsg_queue);
-
- spin_lock_init(&t->empty_recvmsg_queue_lock);
- INIT_LIST_HEAD(&t->empty_recvmsg_queue);
-
- init_waitqueue_head(&t->wait_send_payload_pending);
- atomic_set(&t->send_payload_pending, 0);
- init_waitqueue_head(&t->wait_send_pending);
- atomic_set(&t->send_pending, 0);
-
- spin_lock_init(&t->lock_new_recv_credits);
-
- INIT_DELAYED_WORK(&t->post_recv_credits_work,
- smb_direct_post_recv_credits);
- INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work);
- INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work);
-
- conn = ksmbd_conn_alloc();
- if (!conn)
- goto err;
- conn->transport = KSMBD_TRANS(t);
- KSMBD_TRANS(t)->conn = conn;
- KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
- return t;
-err:
- kfree(t);
- return NULL;
-}
-
-static void free_transport(struct smb_direct_transport *t)
-{
- struct smb_direct_recvmsg *recvmsg;
-
- wake_up_interruptible(&t->wait_send_credits);
-
- ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n");
- wait_event(t->wait_send_payload_pending,
- atomic_read(&t->send_payload_pending) == 0);
- wait_event(t->wait_send_pending,
- atomic_read(&t->send_pending) == 0);
-
- cancel_work_sync(&t->disconnect_work);
- cancel_delayed_work_sync(&t->post_recv_credits_work);
- cancel_work_sync(&t->send_immediate_work);
-
- if (t->qp) {
- ib_drain_qp(t->qp);
- ib_destroy_qp(t->qp);
- }
-
- ksmbd_debug(RDMA, "drain the reassembly queue\n");
- do {
- spin_lock(&t->reassembly_queue_lock);
- recvmsg = get_first_reassembly(t);
- if (recvmsg) {
- list_del(&recvmsg->list);
- spin_unlock(&t->reassembly_queue_lock);
- put_recvmsg(t, recvmsg);
- } else {
- spin_unlock(&t->reassembly_queue_lock);
- }
- } while (recvmsg);
- t->reassembly_data_length = 0;
-
- if (t->send_cq)
- ib_free_cq(t->send_cq);
- if (t->recv_cq)
- ib_free_cq(t->recv_cq);
- if (t->pd)
- ib_dealloc_pd(t->pd);
- if (t->cm_id)
- rdma_destroy_id(t->cm_id);
-
- smb_direct_destroy_pools(t);
- ksmbd_conn_free(KSMBD_TRANS(t)->conn);
- kfree(t);
-}
-
-static struct smb_direct_sendmsg
-*smb_direct_alloc_sendmsg(struct smb_direct_transport *t)
-{
- struct smb_direct_sendmsg *msg;
-
- msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL);
- if (!msg)
- return ERR_PTR(-ENOMEM);
- msg->transport = t;
- INIT_LIST_HEAD(&msg->list);
- msg->num_sge = 0;
- return msg;
-}
-
-static void smb_direct_free_sendmsg(struct smb_direct_transport *t,
- struct smb_direct_sendmsg *msg)
-{
- int i;
-
- if (msg->num_sge > 0) {
- ib_dma_unmap_single(t->cm_id->device,
- msg->sge[0].addr, msg->sge[0].length,
- DMA_TO_DEVICE);
- for (i = 1; i < msg->num_sge; i++)
- ib_dma_unmap_page(t->cm_id->device,
- msg->sge[i].addr, msg->sge[i].length,
- DMA_TO_DEVICE);
- }
- mempool_free(msg, t->sendmsg_mempool);
-}
-
-static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg)
-{
- switch (recvmsg->type) {
- case SMB_DIRECT_MSG_DATA_TRANSFER: {
- struct smb_direct_data_transfer *req =
- (struct smb_direct_data_transfer *)recvmsg->packet;
- struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet
- + le32_to_cpu(req->data_offset) - 4);
- ksmbd_debug(RDMA,
- "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n",
- le16_to_cpu(req->credits_granted),
- le16_to_cpu(req->credits_requested),
- req->data_length, req->remaining_data_length,
- hdr->ProtocolId, hdr->Command);
- break;
- }
- case SMB_DIRECT_MSG_NEGOTIATE_REQ: {
- struct smb_direct_negotiate_req *req =
- (struct smb_direct_negotiate_req *)recvmsg->packet;
- ksmbd_debug(RDMA,
- "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n",
- le16_to_cpu(req->min_version),
- le16_to_cpu(req->max_version),
- le16_to_cpu(req->credits_requested),
- le32_to_cpu(req->preferred_send_size),
- le32_to_cpu(req->max_receive_size),
- le32_to_cpu(req->max_fragmented_size));
- if (le16_to_cpu(req->min_version) > 0x0100 ||
- le16_to_cpu(req->max_version) < 0x0100)
- return -EOPNOTSUPP;
- if (le16_to_cpu(req->credits_requested) <= 0 ||
- le32_to_cpu(req->max_receive_size) <= 128 ||
- le32_to_cpu(req->max_fragmented_size) <=
- 128 * 1024)
- return -ECONNABORTED;
-
- break;
- }
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct smb_direct_recvmsg *recvmsg;
- struct smb_direct_transport *t;
-
- recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
- t = recvmsg->transport;
-
- if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
- if (wc->status != IB_WC_WR_FLUSH_ERR) {
- pr_err("Recv error. status='%s (%d)' opcode=%d\n",
- ib_wc_status_msg(wc->status), wc->status,
- wc->opcode);
- smb_direct_disconnect_rdma_connection(t);
- }
- put_empty_recvmsg(t, recvmsg);
- return;
- }
-
- ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n",
- ib_wc_status_msg(wc->status), wc->status,
- wc->opcode);
-
- ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr,
- recvmsg->sge.length, DMA_FROM_DEVICE);
-
- switch (recvmsg->type) {
- case SMB_DIRECT_MSG_NEGOTIATE_REQ:
- if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
- put_empty_recvmsg(t, recvmsg);
- return;
- }
- t->negotiation_requested = true;
- t->full_packet_received = true;
- wake_up_interruptible(&t->wait_status);
- break;
- case SMB_DIRECT_MSG_DATA_TRANSFER: {
- struct smb_direct_data_transfer *data_transfer =
- (struct smb_direct_data_transfer *)recvmsg->packet;
- unsigned int data_length;
- int avail_recvmsg_count, receive_credits;
-
- if (wc->byte_len <
- offsetof(struct smb_direct_data_transfer, padding)) {
- put_empty_recvmsg(t, recvmsg);
- return;
- }
-
- data_length = le32_to_cpu(data_transfer->data_length);
- if (data_length) {
- if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
- (u64)data_length) {
- put_empty_recvmsg(t, recvmsg);
- return;
- }
-
- if (t->full_packet_received)
- recvmsg->first_segment = true;
-
- if (le32_to_cpu(data_transfer->remaining_data_length))
- t->full_packet_received = false;
- else
- t->full_packet_received = true;
-
- enqueue_reassembly(t, recvmsg, (int)data_length);
- wake_up_interruptible(&t->wait_reassembly_queue);
-
- spin_lock(&t->receive_credit_lock);
- receive_credits = --(t->recv_credits);
- avail_recvmsg_count = t->count_avail_recvmsg;
- spin_unlock(&t->receive_credit_lock);
- } else {
- put_empty_recvmsg(t, recvmsg);
-
- spin_lock(&t->receive_credit_lock);
- receive_credits = --(t->recv_credits);
- avail_recvmsg_count = ++(t->count_avail_recvmsg);
- spin_unlock(&t->receive_credit_lock);
- }
-
- t->recv_credit_target =
- le16_to_cpu(data_transfer->credits_requested);
- atomic_add(le16_to_cpu(data_transfer->credits_granted),
- &t->send_credits);
-
- if (le16_to_cpu(data_transfer->flags) &
- SMB_DIRECT_RESPONSE_REQUESTED)
- queue_work(smb_direct_wq, &t->send_immediate_work);
-
- if (atomic_read(&t->send_credits) > 0)
- wake_up_interruptible(&t->wait_send_credits);
-
- if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
- mod_delayed_work(smb_direct_wq,
- &t->post_recv_credits_work, 0);
- break;
- }
- default:
- break;
- }
-}
-
-static int smb_direct_post_recv(struct smb_direct_transport *t,
- struct smb_direct_recvmsg *recvmsg)
-{
- struct ib_recv_wr wr;
- int ret;
-
- recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device,
- recvmsg->packet, t->max_recv_size,
- DMA_FROM_DEVICE);
- ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr);
- if (ret)
- return ret;
- recvmsg->sge.length = t->max_recv_size;
- recvmsg->sge.lkey = t->pd->local_dma_lkey;
- recvmsg->cqe.done = recv_done;
-
- wr.wr_cqe = &recvmsg->cqe;
- wr.next = NULL;
- wr.sg_list = &recvmsg->sge;
- wr.num_sge = 1;
-
- ret = ib_post_recv(t->qp, &wr, NULL);
- if (ret) {
- pr_err("Can't post recv: %d\n", ret);
- ib_dma_unmap_single(t->cm_id->device,
- recvmsg->sge.addr, recvmsg->sge.length,
- DMA_FROM_DEVICE);
- smb_direct_disconnect_rdma_connection(t);
- return ret;
- }
- return ret;
-}
-
-static int smb_direct_read(struct ksmbd_transport *t, char *buf,
- unsigned int size)
-{
- struct smb_direct_recvmsg *recvmsg;
- struct smb_direct_data_transfer *data_transfer;
- int to_copy, to_read, data_read, offset;
- u32 data_length, remaining_data_length, data_offset;
- int rc;
- struct smb_direct_transport *st = smb_trans_direct_transfort(t);
-
-again:
- if (st->status != SMB_DIRECT_CS_CONNECTED) {
- pr_err("disconnected\n");
- return -ENOTCONN;
- }
-
- /*
- * No need to hold the reassembly queue lock all the time as we are
- * the only one reading from the front of the queue. The transport
- * may add more entries to the back of the queue at the same time
- */
- if (st->reassembly_data_length >= size) {
- int queue_length;
- int queue_removed = 0;
-
- /*
- * Need to make sure reassembly_data_length is read before
- * reading reassembly_queue_length and calling
- * get_first_reassembly. This call is lock free
- * as we never read at the end of the queue which are being
- * updated in SOFTIRQ as more data is received
- */
- virt_rmb();
- queue_length = st->reassembly_queue_length;
- data_read = 0;
- to_read = size;
- offset = st->first_entry_offset;
- while (data_read < size) {
- recvmsg = get_first_reassembly(st);
- data_transfer = smb_direct_recvmsg_payload(recvmsg);
- data_length = le32_to_cpu(data_transfer->data_length);
- remaining_data_length =
- le32_to_cpu(data_transfer->remaining_data_length);
- data_offset = le32_to_cpu(data_transfer->data_offset);
-
- /*
- * The upper layer expects RFC1002 length at the
- * beginning of the payload. Return it to indicate
- * the total length of the packet. This minimize the
- * change to upper layer packet processing logic. This
- * will be eventually remove when an intermediate
- * transport layer is added
- */
- if (recvmsg->first_segment && size == 4) {
- unsigned int rfc1002_len =
- data_length + remaining_data_length;
- *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
- data_read = 4;
- recvmsg->first_segment = false;
- ksmbd_debug(RDMA,
- "returning rfc1002 length %d\n",
- rfc1002_len);
- goto read_rfc1002_done;
- }
-
- to_copy = min_t(int, data_length - offset, to_read);
- memcpy(buf + data_read, (char *)data_transfer + data_offset + offset,
- to_copy);
-
- /* move on to the next buffer? */
- if (to_copy == data_length - offset) {
- queue_length--;
- /*
- * No need to lock if we are not at the
- * end of the queue
- */
- if (queue_length) {
- list_del(&recvmsg->list);
- } else {
- spin_lock_irq(&st->reassembly_queue_lock);
- list_del(&recvmsg->list);
- spin_unlock_irq(&st->reassembly_queue_lock);
- }
- queue_removed++;
- put_recvmsg(st, recvmsg);
- offset = 0;
- } else {
- offset += to_copy;
- }
-
- to_read -= to_copy;
- data_read += to_copy;
- }
-
- spin_lock_irq(&st->reassembly_queue_lock);
- st->reassembly_data_length -= data_read;
- st->reassembly_queue_length -= queue_removed;
- spin_unlock_irq(&st->reassembly_queue_lock);
-
- spin_lock(&st->receive_credit_lock);
- st->count_avail_recvmsg += queue_removed;
- if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) {
- spin_unlock(&st->receive_credit_lock);
- mod_delayed_work(smb_direct_wq,
- &st->post_recv_credits_work, 0);
- } else {
- spin_unlock(&st->receive_credit_lock);
- }
-
- st->first_entry_offset = offset;
- ksmbd_debug(RDMA,
- "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
- data_read, st->reassembly_data_length,
- st->first_entry_offset);
-read_rfc1002_done:
- return data_read;
- }
-
- ksmbd_debug(RDMA, "wait_event on more data\n");
- rc = wait_event_interruptible(st->wait_reassembly_queue,
- st->reassembly_data_length >= size ||
- st->status != SMB_DIRECT_CS_CONNECTED);
- if (rc)
- return -EINTR;
-
- goto again;
-}
-
-static void smb_direct_post_recv_credits(struct work_struct *work)
-{
- struct smb_direct_transport *t = container_of(work,
- struct smb_direct_transport, post_recv_credits_work.work);
- struct smb_direct_recvmsg *recvmsg;
- int receive_credits, credits = 0;
- int ret;
- int use_free = 1;
-
- spin_lock(&t->receive_credit_lock);
- receive_credits = t->recv_credits;
- spin_unlock(&t->receive_credit_lock);
-
- if (receive_credits < t->recv_credit_target) {
- while (true) {
- if (use_free)
- recvmsg = get_free_recvmsg(t);
- else
- recvmsg = get_empty_recvmsg(t);
- if (!recvmsg) {
- if (use_free) {
- use_free = 0;
- continue;
- } else {
- break;
- }
- }
-
- recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
- recvmsg->first_segment = false;
-
- ret = smb_direct_post_recv(t, recvmsg);
- if (ret) {
- pr_err("Can't post recv: %d\n", ret);
- put_recvmsg(t, recvmsg);
- break;
- }
- credits++;
- }
- }
-
- spin_lock(&t->receive_credit_lock);
- t->recv_credits += credits;
- t->count_avail_recvmsg -= credits;
- spin_unlock(&t->receive_credit_lock);
-
- spin_lock(&t->lock_new_recv_credits);
- t->new_recv_credits += credits;
- spin_unlock(&t->lock_new_recv_credits);
-
- if (credits)
- queue_work(smb_direct_wq, &t->send_immediate_work);
-}
-
-static void send_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct smb_direct_sendmsg *sendmsg, *sibling;
- struct smb_direct_transport *t;
- struct list_head *pos, *prev, *end;
-
- sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe);
- t = sendmsg->transport;
-
- ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n",
- ib_wc_status_msg(wc->status), wc->status,
- wc->opcode);
-
- if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
- pr_err("Send error. status='%s (%d)', opcode=%d\n",
- ib_wc_status_msg(wc->status), wc->status,
- wc->opcode);
- smb_direct_disconnect_rdma_connection(t);
- }
-
- if (sendmsg->num_sge > 1) {
- if (atomic_dec_and_test(&t->send_payload_pending))
- wake_up(&t->wait_send_payload_pending);
- } else {
- if (atomic_dec_and_test(&t->send_pending))
- wake_up(&t->wait_send_pending);
- }
-
- /* iterate and free the list of messages in reverse. the list's head
- * is invalid.
- */
- for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next;
- prev != end; pos = prev, prev = prev->prev) {
- sibling = container_of(pos, struct smb_direct_sendmsg, list);
- smb_direct_free_sendmsg(t, sibling);
- }
-
- sibling = container_of(pos, struct smb_direct_sendmsg, list);
- smb_direct_free_sendmsg(t, sibling);
-}
-
-static int manage_credits_prior_sending(struct smb_direct_transport *t)
-{
- int new_credits;
-
- spin_lock(&t->lock_new_recv_credits);
- new_credits = t->new_recv_credits;
- t->new_recv_credits = 0;
- spin_unlock(&t->lock_new_recv_credits);
-
- return new_credits;
-}
-
-static int smb_direct_post_send(struct smb_direct_transport *t,
- struct ib_send_wr *wr)
-{
- int ret;
-
- if (wr->num_sge > 1)
- atomic_inc(&t->send_payload_pending);
- else
- atomic_inc(&t->send_pending);
-
- ret = ib_post_send(t->qp, wr, NULL);
- if (ret) {
- pr_err("failed to post send: %d\n", ret);
- if (wr->num_sge > 1) {
- if (atomic_dec_and_test(&t->send_payload_pending))
- wake_up(&t->wait_send_payload_pending);
- } else {
- if (atomic_dec_and_test(&t->send_pending))
- wake_up(&t->wait_send_pending);
- }
- smb_direct_disconnect_rdma_connection(t);
- }
- return ret;
-}
-
-static void smb_direct_send_ctx_init(struct smb_direct_transport *t,
- struct smb_direct_send_ctx *send_ctx,
- bool need_invalidate_rkey,
- unsigned int remote_key)
-{
- INIT_LIST_HEAD(&send_ctx->msg_list);
- send_ctx->wr_cnt = 0;
- send_ctx->need_invalidate_rkey = need_invalidate_rkey;
- send_ctx->remote_key = remote_key;
-}
-
-static int smb_direct_flush_send_list(struct smb_direct_transport *t,
- struct smb_direct_send_ctx *send_ctx,
- bool is_last)
-{
- struct smb_direct_sendmsg *first, *last;
- int ret;
-
- if (list_empty(&send_ctx->msg_list))
- return 0;
-
- first = list_first_entry(&send_ctx->msg_list,
- struct smb_direct_sendmsg,
- list);
- last = list_last_entry(&send_ctx->msg_list,
- struct smb_direct_sendmsg,
- list);
-
- last->wr.send_flags = IB_SEND_SIGNALED;
- last->wr.wr_cqe = &last->cqe;
- if (is_last && send_ctx->need_invalidate_rkey) {
- last->wr.opcode = IB_WR_SEND_WITH_INV;
- last->wr.ex.invalidate_rkey = send_ctx->remote_key;
- }
-
- ret = smb_direct_post_send(t, &first->wr);
- if (!ret) {
- smb_direct_send_ctx_init(t, send_ctx,
- send_ctx->need_invalidate_rkey,
- send_ctx->remote_key);
- } else {
- atomic_add(send_ctx->wr_cnt, &t->send_credits);
- wake_up(&t->wait_send_credits);
- list_for_each_entry_safe(first, last, &send_ctx->msg_list,
- list) {
- smb_direct_free_sendmsg(t, first);
- }
- }
- return ret;
-}
-
-static int wait_for_credits(struct smb_direct_transport *t,
- wait_queue_head_t *waitq, atomic_t *credits)
-{
- int ret;
-
- do {
- if (atomic_dec_return(credits) >= 0)
- return 0;
-
- atomic_inc(credits);
- ret = wait_event_interruptible(*waitq,
- atomic_read(credits) > 0 ||
- t->status != SMB_DIRECT_CS_CONNECTED);
-
- if (t->status != SMB_DIRECT_CS_CONNECTED)
- return -ENOTCONN;
- else if (ret < 0)
- return ret;
- } while (true);
-}
-
-static int wait_for_send_credits(struct smb_direct_transport *t,
- struct smb_direct_send_ctx *send_ctx)
-{
- int ret;
-
- if (send_ctx &&
- (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) {
- ret = smb_direct_flush_send_list(t, send_ctx, false);
- if (ret)
- return ret;
- }
-
- return wait_for_credits(t, &t->wait_send_credits, &t->send_credits);
-}
-
-static int smb_direct_create_header(struct smb_direct_transport *t,
- int size, int remaining_data_length,
- struct smb_direct_sendmsg **sendmsg_out)
-{
- struct smb_direct_sendmsg *sendmsg;
- struct smb_direct_data_transfer *packet;
- int header_length;
- int ret;
-
- sendmsg = smb_direct_alloc_sendmsg(t);
- if (IS_ERR(sendmsg))
- return PTR_ERR(sendmsg);
-
- /* Fill in the packet header */
- packet = (struct smb_direct_data_transfer *)sendmsg->packet;
- packet->credits_requested = cpu_to_le16(t->send_credit_target);
- packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
-
- packet->flags = 0;
- packet->reserved = 0;
- if (!size)
- packet->data_offset = 0;
- else
- packet->data_offset = cpu_to_le32(24);
- packet->data_length = cpu_to_le32(size);
- packet->remaining_data_length = cpu_to_le32(remaining_data_length);
- packet->padding = 0;
-
- ksmbd_debug(RDMA,
- "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
- le16_to_cpu(packet->credits_requested),
- le16_to_cpu(packet->credits_granted),
- le32_to_cpu(packet->data_offset),
- le32_to_cpu(packet->data_length),
- le32_to_cpu(packet->remaining_data_length));
-
- /* Map the packet to DMA */
- header_length = sizeof(struct smb_direct_data_transfer);
- /* If this is a packet without payload, don't send padding */
- if (!size)
- header_length =
- offsetof(struct smb_direct_data_transfer, padding);
-
- sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
- (void *)packet,
- header_length,
- DMA_TO_DEVICE);
- ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
- if (ret) {
- smb_direct_free_sendmsg(t, sendmsg);
- return ret;
- }
-
- sendmsg->num_sge = 1;
- sendmsg->sge[0].length = header_length;
- sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
-
- *sendmsg_out = sendmsg;
- return 0;
-}
-
-static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries)
-{
- bool high = is_vmalloc_addr(buf);
- struct page *page;
- int offset, len;
- int i = 0;
-
- if (nentries < get_buf_page_count(buf, size))
- return -EINVAL;
-
- offset = offset_in_page(buf);
- buf -= offset;
- while (size > 0) {
- len = min_t(int, PAGE_SIZE - offset, size);
- if (high)
- page = vmalloc_to_page(buf);
- else
- page = kmap_to_page(buf);
-
- if (!sg_list)
- return -EINVAL;
- sg_set_page(sg_list, page, len, offset);
- sg_list = sg_next(sg_list);
-
- buf += PAGE_SIZE;
- size -= len;
- offset = 0;
- i++;
- }
- return i;
-}
-
-static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
- struct scatterlist *sg_list, int nentries,
- enum dma_data_direction dir)
-{
- int npages;
-
- npages = get_sg_list(buf, size, sg_list, nentries);
- if (npages <= 0)
- return -EINVAL;
- return ib_dma_map_sg(device, sg_list, npages, dir);
-}
-
-static int post_sendmsg(struct smb_direct_transport *t,
- struct smb_direct_send_ctx *send_ctx,
- struct smb_direct_sendmsg *msg)
-{
- int i;
-
- for (i = 0; i < msg->num_sge; i++)
- ib_dma_sync_single_for_device(t->cm_id->device,
- msg->sge[i].addr, msg->sge[i].length,
- DMA_TO_DEVICE);
-
- msg->cqe.done = send_done;
- msg->wr.opcode = IB_WR_SEND;
- msg->wr.sg_list = &msg->sge[0];
- msg->wr.num_sge = msg->num_sge;
- msg->wr.next = NULL;
-
- if (send_ctx) {
- msg->wr.wr_cqe = NULL;
- msg->wr.send_flags = 0;
- if (!list_empty(&send_ctx->msg_list)) {
- struct smb_direct_sendmsg *last;
-
- last = list_last_entry(&send_ctx->msg_list,
- struct smb_direct_sendmsg,
- list);
- last->wr.next = &msg->wr;
- }
- list_add_tail(&msg->list, &send_ctx->msg_list);
- send_ctx->wr_cnt++;
- return 0;
- }
-
- msg->wr.wr_cqe = &msg->cqe;
- msg->wr.send_flags = IB_SEND_SIGNALED;
- return smb_direct_post_send(t, &msg->wr);
-}
-
-static int smb_direct_post_send_data(struct smb_direct_transport *t,
- struct smb_direct_send_ctx *send_ctx,
- struct kvec *iov, int niov,
- int remaining_data_length)
-{
- int i, j, ret;
- struct smb_direct_sendmsg *msg;
- int data_length;
- struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1];
-
- ret = wait_for_send_credits(t, send_ctx);
- if (ret)
- return ret;
-
- data_length = 0;
- for (i = 0; i < niov; i++)
- data_length += iov[i].iov_len;
-
- ret = smb_direct_create_header(t, data_length, remaining_data_length,
- &msg);
- if (ret) {
- atomic_inc(&t->send_credits);
- return ret;
- }
-
- for (i = 0; i < niov; i++) {
- struct ib_sge *sge;
- int sg_cnt;
-
- sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1);
- sg_cnt = get_mapped_sg_list(t->cm_id->device,
- iov[i].iov_base, iov[i].iov_len,
- sg, SMB_DIRECT_MAX_SEND_SGES - 1,
- DMA_TO_DEVICE);
- if (sg_cnt <= 0) {
- pr_err("failed to map buffer\n");
- ret = -ENOMEM;
- goto err;
- } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) {
- pr_err("buffer not fitted into sges\n");
- ret = -E2BIG;
- ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt,
- DMA_TO_DEVICE);
- goto err;
- }
-
- for (j = 0; j < sg_cnt; j++) {
- sge = &msg->sge[msg->num_sge];
- sge->addr = sg_dma_address(&sg[j]);
- sge->length = sg_dma_len(&sg[j]);
- sge->lkey = t->pd->local_dma_lkey;
- msg->num_sge++;
- }
- }
-
- ret = post_sendmsg(t, send_ctx, msg);
- if (ret)
- goto err;
- return 0;
-err:
- smb_direct_free_sendmsg(t, msg);
- atomic_inc(&t->send_credits);
- return ret;
-}
-
-static int smb_direct_writev(struct ksmbd_transport *t,
- struct kvec *iov, int niovs, int buflen,
- bool need_invalidate, unsigned int remote_key)
-{
- struct smb_direct_transport *st = smb_trans_direct_transfort(t);
- int remaining_data_length;
- int start, i, j;
- int max_iov_size = st->max_send_size -
- sizeof(struct smb_direct_data_transfer);
- int ret;
- struct kvec vec;
- struct smb_direct_send_ctx send_ctx;
-
- if (st->status != SMB_DIRECT_CS_CONNECTED)
- return -ENOTCONN;
-
- //FIXME: skip RFC1002 header..
- buflen -= 4;
- iov[0].iov_base += 4;
- iov[0].iov_len -= 4;
-
- remaining_data_length = buflen;
- ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
-
- smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
- start = i = 0;
- buflen = 0;
- while (true) {
- buflen += iov[i].iov_len;
- if (buflen > max_iov_size) {
- if (i > start) {
- remaining_data_length -=
- (buflen - iov[i].iov_len);
- ret = smb_direct_post_send_data(st, &send_ctx,
- &iov[start], i - start,
- remaining_data_length);
- if (ret)
- goto done;
- } else {
- /* iov[start] is too big, break it */
- int nvec = (buflen + max_iov_size - 1) /
- max_iov_size;
-
- for (j = 0; j < nvec; j++) {
- vec.iov_base =
- (char *)iov[start].iov_base +
- j * max_iov_size;
- vec.iov_len =
- min_t(int, max_iov_size,
- buflen - max_iov_size * j);
- remaining_data_length -= vec.iov_len;
- ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1,
- remaining_data_length);
- if (ret)
- goto done;
- }
- i++;
- if (i == niovs)
- break;
- }
- start = i;
- buflen = 0;
- } else {
- i++;
- if (i == niovs) {
- /* send out all remaining vecs */
- remaining_data_length -= buflen;
- ret = smb_direct_post_send_data(st, &send_ctx,
- &iov[start], i - start,
- remaining_data_length);
- if (ret)
- goto done;
- break;
- }
- }
- }
-
-done:
- ret = smb_direct_flush_send_list(st, &send_ctx, true);
-
- /*
- * As an optimization, we don't wait for individual I/O to finish
- * before sending the next one.
- * Send them all and wait for pending send count to get to 0
- * that means all the I/Os have been out and we are good to return
- */
-
- wait_event(st->wait_send_payload_pending,
- atomic_read(&st->send_payload_pending) == 0);
- return ret;
-}
-
-static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
- enum dma_data_direction dir)
-{
- struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe,
- struct smb_direct_rdma_rw_msg, cqe);
- struct smb_direct_transport *t = msg->t;
-
- if (wc->status != IB_WC_SUCCESS) {
- pr_err("read/write error. opcode = %d, status = %s(%d)\n",
- wc->opcode, ib_wc_status_msg(wc->status), wc->status);
- smb_direct_disconnect_rdma_connection(t);
- }
-
- if (atomic_inc_return(&t->rw_avail_ops) > 0)
- wake_up(&t->wait_rw_avail_ops);
-
- rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
- msg->sg_list, msg->sgt.nents, dir);
- sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
- complete(msg->completion);
- kfree(msg);
-}
-
-static void read_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- read_write_done(cq, wc, DMA_FROM_DEVICE);
-}
-
-static void write_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- read_write_done(cq, wc, DMA_TO_DEVICE);
-}
-
-static int smb_direct_rdma_xmit(struct smb_direct_transport *t, void *buf,
- int buf_len, u32 remote_key, u64 remote_offset,
- u32 remote_len, bool is_read)
-{
- struct smb_direct_rdma_rw_msg *msg;
- int ret;
- DECLARE_COMPLETION_ONSTACK(completion);
- struct ib_send_wr *first_wr = NULL;
-
- ret = wait_for_credits(t, &t->wait_rw_avail_ops, &t->rw_avail_ops);
- if (ret < 0)
- return ret;
-
- /* TODO: mempool */
- msg = kmalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
- sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
- if (!msg) {
- atomic_inc(&t->rw_avail_ops);
- return -ENOMEM;
- }
-
- msg->sgt.sgl = &msg->sg_list[0];
- ret = sg_alloc_table_chained(&msg->sgt,
- get_buf_page_count(buf, buf_len),
- msg->sg_list, SG_CHUNK_SIZE);
- if (ret) {
- atomic_inc(&t->rw_avail_ops);
- kfree(msg);
- return -ENOMEM;
- }
-
- ret = get_sg_list(buf, buf_len, msg->sgt.sgl, msg->sgt.orig_nents);
- if (ret <= 0) {
- pr_err("failed to get pages\n");
- goto err;
- }
-
- ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
- msg->sg_list, get_buf_page_count(buf, buf_len),
- 0, remote_offset, remote_key,
- is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- if (ret < 0) {
- pr_err("failed to init rdma_rw_ctx: %d\n", ret);
- goto err;
- }
-
- msg->t = t;
- msg->cqe.done = is_read ? read_done : write_done;
- msg->completion = &completion;
- first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
- &msg->cqe, NULL);
-
- ret = ib_post_send(t->qp, first_wr, NULL);
- if (ret) {
- pr_err("failed to post send wr: %d\n", ret);
- goto err;
- }
-
- wait_for_completion(&completion);
- return 0;
-
-err:
- atomic_inc(&t->rw_avail_ops);
- if (first_wr)
- rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
- msg->sg_list, msg->sgt.nents,
- is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
- kfree(msg);
- return ret;
-}
-
-static int smb_direct_rdma_write(struct ksmbd_transport *t, void *buf,
- unsigned int buflen, u32 remote_key,
- u64 remote_offset, u32 remote_len)
-{
- return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
- remote_key, remote_offset,
- remote_len, false);
-}
-
-static int smb_direct_rdma_read(struct ksmbd_transport *t, void *buf,
- unsigned int buflen, u32 remote_key,
- u64 remote_offset, u32 remote_len)
-{
- return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
- remote_key, remote_offset,
- remote_len, true);
-}
-
-static void smb_direct_disconnect(struct ksmbd_transport *t)
-{
- struct smb_direct_transport *st = smb_trans_direct_transfort(t);
-
- ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id);
-
- smb_direct_disconnect_rdma_work(&st->disconnect_work);
- wait_event_interruptible(st->wait_status,
- st->status == SMB_DIRECT_CS_DISCONNECTED);
- free_transport(st);
-}
-
-static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
- struct rdma_cm_event *event)
-{
- struct smb_direct_transport *t = cm_id->context;
-
- ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
- cm_id, rdma_event_msg(event->event), event->event);
-
- switch (event->event) {
- case RDMA_CM_EVENT_ESTABLISHED: {
- t->status = SMB_DIRECT_CS_CONNECTED;
- wake_up_interruptible(&t->wait_status);
- break;
- }
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
- case RDMA_CM_EVENT_DISCONNECTED: {
- t->status = SMB_DIRECT_CS_DISCONNECTED;
- wake_up_interruptible(&t->wait_status);
- wake_up_interruptible(&t->wait_reassembly_queue);
- wake_up(&t->wait_send_credits);
- break;
- }
- case RDMA_CM_EVENT_CONNECT_ERROR: {
- t->status = SMB_DIRECT_CS_DISCONNECTED;
- wake_up_interruptible(&t->wait_status);
- break;
- }
- default:
- pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n",
- cm_id, rdma_event_msg(event->event),
- event->event);
- break;
- }
- return 0;
-}
-
-static void smb_direct_qpair_handler(struct ib_event *event, void *context)
-{
- struct smb_direct_transport *t = context;
-
- ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n",
- t->cm_id, ib_event_msg(event->event), event->event);
-
- switch (event->event) {
- case IB_EVENT_CQ_ERR:
- case IB_EVENT_QP_FATAL:
- smb_direct_disconnect_rdma_connection(t);
- break;
- default:
- break;
- }
-}
-
-static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
- int failed)
-{
- struct smb_direct_sendmsg *sendmsg;
- struct smb_direct_negotiate_resp *resp;
- int ret;
-
- sendmsg = smb_direct_alloc_sendmsg(t);
- if (IS_ERR(sendmsg))
- return -ENOMEM;
-
- resp = (struct smb_direct_negotiate_resp *)sendmsg->packet;
- if (failed) {
- memset(resp, 0, sizeof(*resp));
- resp->min_version = cpu_to_le16(0x0100);
- resp->max_version = cpu_to_le16(0x0100);
- resp->status = STATUS_NOT_SUPPORTED;
- } else {
- resp->status = STATUS_SUCCESS;
- resp->min_version = SMB_DIRECT_VERSION_LE;
- resp->max_version = SMB_DIRECT_VERSION_LE;
- resp->negotiated_version = SMB_DIRECT_VERSION_LE;
- resp->reserved = 0;
- resp->credits_requested =
- cpu_to_le16(t->send_credit_target);
- resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
- resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size);
- resp->preferred_send_size = cpu_to_le32(t->max_send_size);
- resp->max_receive_size = cpu_to_le32(t->max_recv_size);
- resp->max_fragmented_size =
- cpu_to_le32(t->max_fragmented_recv_size);
- }
-
- sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
- (void *)resp, sizeof(*resp),
- DMA_TO_DEVICE);
- ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
- if (ret) {
- smb_direct_free_sendmsg(t, sendmsg);
- return ret;
- }
-
- sendmsg->num_sge = 1;
- sendmsg->sge[0].length = sizeof(*resp);
- sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
-
- ret = post_sendmsg(t, NULL, sendmsg);
- if (ret) {
- smb_direct_free_sendmsg(t, sendmsg);
- return ret;
- }
-
- wait_event(t->wait_send_pending,
- atomic_read(&t->send_pending) == 0);
- return 0;
-}
-
-static int smb_direct_accept_client(struct smb_direct_transport *t)
-{
- struct rdma_conn_param conn_param;
- struct ib_port_immutable port_immutable;
- u32 ird_ord_hdr[2];
- int ret;
-
- memset(&conn_param, 0, sizeof(conn_param));
- conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom,
- SMB_DIRECT_CM_INITIATOR_DEPTH);
- conn_param.responder_resources = 0;
-
- t->cm_id->device->ops.get_port_immutable(t->cm_id->device,
- t->cm_id->port_num,
- &port_immutable);
- if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
- ird_ord_hdr[0] = conn_param.responder_resources;
- ird_ord_hdr[1] = 1;
- conn_param.private_data = ird_ord_hdr;
- conn_param.private_data_len = sizeof(ird_ord_hdr);
- } else {
- conn_param.private_data = NULL;
- conn_param.private_data_len = 0;
- }
- conn_param.retry_count = SMB_DIRECT_CM_RETRY;
- conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY;
- conn_param.flow_control = 0;
-
- ret = rdma_accept(t->cm_id, &conn_param);
- if (ret) {
- pr_err("error at rdma_accept: %d\n", ret);
- return ret;
- }
-
- wait_event_interruptible(t->wait_status,
- t->status != SMB_DIRECT_CS_NEW);
- if (t->status != SMB_DIRECT_CS_CONNECTED)
- return -ENOTCONN;
- return 0;
-}
-
-static int smb_direct_negotiate(struct smb_direct_transport *t)
-{
- int ret;
- struct smb_direct_recvmsg *recvmsg;
- struct smb_direct_negotiate_req *req;
-
- recvmsg = get_free_recvmsg(t);
- if (!recvmsg)
- return -ENOMEM;
- recvmsg->type = SMB_DIRECT_MSG_NEGOTIATE_REQ;
-
- ret = smb_direct_post_recv(t, recvmsg);
- if (ret) {
- pr_err("Can't post recv: %d\n", ret);
- goto out;
- }
-
- t->negotiation_requested = false;
- ret = smb_direct_accept_client(t);
- if (ret) {
- pr_err("Can't accept client\n");
- goto out;
- }
-
- smb_direct_post_recv_credits(&t->post_recv_credits_work.work);
-
- ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
- ret = wait_event_interruptible_timeout(t->wait_status,
- t->negotiation_requested ||
- t->status == SMB_DIRECT_CS_DISCONNECTED,
- SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ);
- if (ret <= 0 || t->status == SMB_DIRECT_CS_DISCONNECTED) {
- ret = ret < 0 ? ret : -ETIMEDOUT;
- goto out;
- }
-
- ret = smb_direct_check_recvmsg(recvmsg);
- if (ret == -ECONNABORTED)
- goto out;
-
- req = (struct smb_direct_negotiate_req *)recvmsg->packet;
- t->max_recv_size = min_t(int, t->max_recv_size,
- le32_to_cpu(req->preferred_send_size));
- t->max_send_size = min_t(int, t->max_send_size,
- le32_to_cpu(req->max_receive_size));
- t->max_fragmented_send_size =
- le32_to_cpu(req->max_fragmented_size);
-
- ret = smb_direct_send_negotiate_response(t, ret);
-out:
- if (recvmsg)
- put_recvmsg(t, recvmsg);
- return ret;
-}
-
-static int smb_direct_init_params(struct smb_direct_transport *t,
- struct ib_qp_cap *cap)
-{
- struct ib_device *device = t->cm_id->device;
- int max_send_sges, max_pages, max_rw_wrs, max_send_wrs;
-
- /* need 2 more sge. because a SMB_DIRECT header will be mapped,
- * and maybe a send buffer could be not page aligned.
- */
- t->max_send_size = smb_direct_max_send_size;
- max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 2;
- if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
- pr_err("max_send_size %d is too large\n", t->max_send_size);
- return -EINVAL;
- }
-
- /*
- * allow smb_direct_max_outstanding_rw_ops of in-flight RDMA
- * read/writes. HCA guarantees at least max_send_sge of sges for
- * a RDMA read/write work request, and if memory registration is used,
- * we need reg_mr, local_inv wrs for each read/write.
- */
- t->max_rdma_rw_size = smb_direct_max_read_write_size;
- max_pages = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
- max_rw_wrs = DIV_ROUND_UP(max_pages, SMB_DIRECT_MAX_SEND_SGES);
- max_rw_wrs += rdma_rw_mr_factor(device, t->cm_id->port_num,
- max_pages) * 2;
- max_rw_wrs *= smb_direct_max_outstanding_rw_ops;
-
- max_send_wrs = smb_direct_send_credit_target + max_rw_wrs;
- if (max_send_wrs > device->attrs.max_cqe ||
- max_send_wrs > device->attrs.max_qp_wr) {
- pr_err("consider lowering send_credit_target = %d, or max_outstanding_rw_ops = %d\n",
- smb_direct_send_credit_target,
- smb_direct_max_outstanding_rw_ops);
- pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
- device->attrs.max_cqe, device->attrs.max_qp_wr);
- return -EINVAL;
- }
-
- if (smb_direct_receive_credit_max > device->attrs.max_cqe ||
- smb_direct_receive_credit_max > device->attrs.max_qp_wr) {
- pr_err("consider lowering receive_credit_max = %d\n",
- smb_direct_receive_credit_max);
- pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
- device->attrs.max_cqe, device->attrs.max_qp_wr);
- return -EINVAL;
- }
-
- if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) {
- pr_err("warning: device max_send_sge = %d too small\n",
- device->attrs.max_send_sge);
- return -EINVAL;
- }
- if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) {
- pr_err("warning: device max_recv_sge = %d too small\n",
- device->attrs.max_recv_sge);
- return -EINVAL;
- }
-
- t->recv_credits = 0;
- t->count_avail_recvmsg = 0;
-
- t->recv_credit_max = smb_direct_receive_credit_max;
- t->recv_credit_target = 10;
- t->new_recv_credits = 0;
-
- t->send_credit_target = smb_direct_send_credit_target;
- atomic_set(&t->send_credits, 0);
- atomic_set(&t->rw_avail_ops, smb_direct_max_outstanding_rw_ops);
-
- t->max_send_size = smb_direct_max_send_size;
- t->max_recv_size = smb_direct_max_receive_size;
- t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
-
- cap->max_send_wr = max_send_wrs;
- cap->max_recv_wr = t->recv_credit_max;
- cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
- cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
- cap->max_inline_data = 0;
- cap->max_rdma_ctxs = 0;
- return 0;
-}
-
-static void smb_direct_destroy_pools(struct smb_direct_transport *t)
-{
- struct smb_direct_recvmsg *recvmsg;
-
- while ((recvmsg = get_free_recvmsg(t)))
- mempool_free(recvmsg, t->recvmsg_mempool);
- while ((recvmsg = get_empty_recvmsg(t)))
- mempool_free(recvmsg, t->recvmsg_mempool);
-
- mempool_destroy(t->recvmsg_mempool);
- t->recvmsg_mempool = NULL;
-
- kmem_cache_destroy(t->recvmsg_cache);
- t->recvmsg_cache = NULL;
-
- mempool_destroy(t->sendmsg_mempool);
- t->sendmsg_mempool = NULL;
-
- kmem_cache_destroy(t->sendmsg_cache);
- t->sendmsg_cache = NULL;
-}
-
-static int smb_direct_create_pools(struct smb_direct_transport *t)
-{
- char name[80];
- int i;
- struct smb_direct_recvmsg *recvmsg;
-
- snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t);
- t->sendmsg_cache = kmem_cache_create(name,
- sizeof(struct smb_direct_sendmsg) +
- sizeof(struct smb_direct_negotiate_resp),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!t->sendmsg_cache)
- return -ENOMEM;
-
- t->sendmsg_mempool = mempool_create(t->send_credit_target,
- mempool_alloc_slab, mempool_free_slab,
- t->sendmsg_cache);
- if (!t->sendmsg_mempool)
- goto err;
-
- snprintf(name, sizeof(name), "smb_direct_resp_%p", t);
- t->recvmsg_cache = kmem_cache_create(name,
- sizeof(struct smb_direct_recvmsg) +
- t->max_recv_size,
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!t->recvmsg_cache)
- goto err;
-
- t->recvmsg_mempool =
- mempool_create(t->recv_credit_max, mempool_alloc_slab,
- mempool_free_slab, t->recvmsg_cache);
- if (!t->recvmsg_mempool)
- goto err;
-
- INIT_LIST_HEAD(&t->recvmsg_queue);
-
- for (i = 0; i < t->recv_credit_max; i++) {
- recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL);
- if (!recvmsg)
- goto err;
- recvmsg->transport = t;
- list_add(&recvmsg->list, &t->recvmsg_queue);
- }
- t->count_avail_recvmsg = t->recv_credit_max;
-
- return 0;
-err:
- smb_direct_destroy_pools(t);
- return -ENOMEM;
-}
-
-static int smb_direct_create_qpair(struct smb_direct_transport *t,
- struct ib_qp_cap *cap)
-{
- int ret;
- struct ib_qp_init_attr qp_attr;
-
- t->pd = ib_alloc_pd(t->cm_id->device, 0);
- if (IS_ERR(t->pd)) {
- pr_err("Can't create RDMA PD\n");
- ret = PTR_ERR(t->pd);
- t->pd = NULL;
- return ret;
- }
-
- t->send_cq = ib_alloc_cq(t->cm_id->device, t,
- t->send_credit_target, 0, IB_POLL_WORKQUEUE);
- if (IS_ERR(t->send_cq)) {
- pr_err("Can't create RDMA send CQ\n");
- ret = PTR_ERR(t->send_cq);
- t->send_cq = NULL;
- goto err;
- }
-
- t->recv_cq = ib_alloc_cq(t->cm_id->device, t,
- cap->max_send_wr + cap->max_rdma_ctxs,
- 0, IB_POLL_WORKQUEUE);
- if (IS_ERR(t->recv_cq)) {
- pr_err("Can't create RDMA recv CQ\n");
- ret = PTR_ERR(t->recv_cq);
- t->recv_cq = NULL;
- goto err;
- }
-
- memset(&qp_attr, 0, sizeof(qp_attr));
- qp_attr.event_handler = smb_direct_qpair_handler;
- qp_attr.qp_context = t;
- qp_attr.cap = *cap;
- qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
- qp_attr.qp_type = IB_QPT_RC;
- qp_attr.send_cq = t->send_cq;
- qp_attr.recv_cq = t->recv_cq;
- qp_attr.port_num = ~0;
-
- ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr);
- if (ret) {
- pr_err("Can't create RDMA QP: %d\n", ret);
- goto err;
- }
-
- t->qp = t->cm_id->qp;
- t->cm_id->event_handler = smb_direct_cm_handler;
-
- return 0;
-err:
- if (t->qp) {
- ib_destroy_qp(t->qp);
- t->qp = NULL;
- }
- if (t->recv_cq) {
- ib_destroy_cq(t->recv_cq);
- t->recv_cq = NULL;
- }
- if (t->send_cq) {
- ib_destroy_cq(t->send_cq);
- t->send_cq = NULL;
- }
- if (t->pd) {
- ib_dealloc_pd(t->pd);
- t->pd = NULL;
- }
- return ret;
-}
-
-static int smb_direct_prepare(struct ksmbd_transport *t)
-{
- struct smb_direct_transport *st = smb_trans_direct_transfort(t);
- int ret;
- struct ib_qp_cap qp_cap;
-
- ret = smb_direct_init_params(st, &qp_cap);
- if (ret) {
- pr_err("Can't configure RDMA parameters\n");
- return ret;
- }
-
- ret = smb_direct_create_pools(st);
- if (ret) {
- pr_err("Can't init RDMA pool: %d\n", ret);
- return ret;
- }
-
- ret = smb_direct_create_qpair(st, &qp_cap);
- if (ret) {
- pr_err("Can't accept RDMA client: %d\n", ret);
- return ret;
- }
-
- ret = smb_direct_negotiate(st);
- if (ret) {
- pr_err("Can't negotiate: %d\n", ret);
- return ret;
- }
-
- st->status = SMB_DIRECT_CS_CONNECTED;
- return 0;
-}
-
-static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
-{
- if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
- return false;
- if (attrs->max_fast_reg_page_list_len == 0)
- return false;
- return true;
-}
-
-static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
-{
- struct smb_direct_transport *t;
-
- if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
- ksmbd_debug(RDMA,
- "Fast Registration Work Requests is not supported. device capabilities=%llx\n",
- new_cm_id->device->attrs.device_cap_flags);
- return -EPROTONOSUPPORT;
- }
-
- t = alloc_transport(new_cm_id);
- if (!t)
- return -ENOMEM;
-
- KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
- KSMBD_TRANS(t)->conn, "ksmbd:r%u",
- SMB_DIRECT_PORT);
- if (IS_ERR(KSMBD_TRANS(t)->handler)) {
- int ret = PTR_ERR(KSMBD_TRANS(t)->handler);
-
- pr_err("Can't start thread\n");
- free_transport(t);
- return ret;
- }
-
- return 0;
-}
-
-static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
- struct rdma_cm_event *event)
-{
- switch (event->event) {
- case RDMA_CM_EVENT_CONNECT_REQUEST: {
- int ret = smb_direct_handle_connect_request(cm_id);
-
- if (ret) {
- pr_err("Can't create transport: %d\n", ret);
- return ret;
- }
-
- ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n",
- cm_id);
- break;
- }
- default:
- pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n",
- cm_id, rdma_event_msg(event->event), event->event);
- break;
- }
- return 0;
-}
-
-static int smb_direct_listen(int port)
-{
- int ret;
- struct rdma_cm_id *cm_id;
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- .sin_addr.s_addr = htonl(INADDR_ANY),
- .sin_port = htons(port),
- };
-
- cm_id = rdma_create_id(&init_net, smb_direct_listen_handler,
- &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC);
- if (IS_ERR(cm_id)) {
- pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id));
- return PTR_ERR(cm_id);
- }
-
- ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
- if (ret) {
- pr_err("Can't bind: %d\n", ret);
- goto err;
- }
-
- smb_direct_listener.cm_id = cm_id;
-
- ret = rdma_listen(cm_id, 10);
- if (ret) {
- pr_err("Can't listen: %d\n", ret);
- goto err;
- }
- return 0;
-err:
- smb_direct_listener.cm_id = NULL;
- rdma_destroy_id(cm_id);
- return ret;
-}
-
-int ksmbd_rdma_init(void)
-{
- int ret;
-
- smb_direct_listener.cm_id = NULL;
-
- /* When a client is running out of send credits, the credits are
- * granted by the server's sending a packet using this queue.
- * This avoids the situation that a clients cannot send packets
- * for lack of credits
- */
- smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq",
- WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
- if (!smb_direct_wq)
- return -ENOMEM;
-
- ret = smb_direct_listen(SMB_DIRECT_PORT);
- if (ret) {
- destroy_workqueue(smb_direct_wq);
- smb_direct_wq = NULL;
- pr_err("Can't listen: %d\n", ret);
- return ret;
- }
-
- ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n",
- smb_direct_listener.cm_id);
- return 0;
-}
-
-int ksmbd_rdma_destroy(void)
-{
- if (smb_direct_listener.cm_id)
- rdma_destroy_id(smb_direct_listener.cm_id);
- smb_direct_listener.cm_id = NULL;
-
- if (smb_direct_wq) {
- flush_workqueue(smb_direct_wq);
- destroy_workqueue(smb_direct_wq);
- smb_direct_wq = NULL;
- }
- return 0;
-}
-
-bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
-{
- struct ib_device *ibdev;
- bool rdma_capable = false;
-
- ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
- if (ibdev) {
- if (rdma_frwr_is_supported(&ibdev->attrs))
- rdma_capable = true;
- ib_device_put(ibdev);
- }
- return rdma_capable;
-}
-
-static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
- .prepare = smb_direct_prepare,
- .disconnect = smb_direct_disconnect,
- .writev = smb_direct_writev,
- .read = smb_direct_read,
- .rdma_read = smb_direct_rdma_read,
- .rdma_write = smb_direct_rdma_write,
-};
diff --git a/fs/ksmbd/transport_rdma.h b/fs/ksmbd/transport_rdma.h
deleted file mode 100644
index 0fa8adc0776f..000000000000
--- a/fs/ksmbd/transport_rdma.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2017, Microsoft Corporation.
- * Copyright (C) 2018, LG Electronics.
- */
-
-#ifndef __KSMBD_TRANSPORT_RDMA_H__
-#define __KSMBD_TRANSPORT_RDMA_H__
-
-#define SMB_DIRECT_PORT 5445
-
-/* SMB DIRECT negotiation request packet [MS-SMBD] 2.2.1 */
-struct smb_direct_negotiate_req {
- __le16 min_version;
- __le16 max_version;
- __le16 reserved;
- __le16 credits_requested;
- __le32 preferred_send_size;
- __le32 max_receive_size;
- __le32 max_fragmented_size;
-} __packed;
-
-/* SMB DIRECT negotiation response packet [MS-SMBD] 2.2.2 */
-struct smb_direct_negotiate_resp {
- __le16 min_version;
- __le16 max_version;
- __le16 negotiated_version;
- __le16 reserved;
- __le16 credits_requested;
- __le16 credits_granted;
- __le32 status;
- __le32 max_readwrite_size;
- __le32 preferred_send_size;
- __le32 max_receive_size;
- __le32 max_fragmented_size;
-} __packed;
-
-#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
-
-/* SMB DIRECT data transfer packet with payload [MS-SMBD] 2.2.3 */
-struct smb_direct_data_transfer {
- __le16 credits_requested;
- __le16 credits_granted;
- __le16 flags;
- __le16 reserved;
- __le32 remaining_data_length;
- __le32 data_offset;
- __le32 data_length;
- __le32 padding;
- __u8 buffer[];
-} __packed;
-
-#ifdef CONFIG_SMB_SERVER_SMBDIRECT
-int ksmbd_rdma_init(void);
-int ksmbd_rdma_destroy(void);
-bool ksmbd_rdma_capable_netdev(struct net_device *netdev);
-#else
-static inline int ksmbd_rdma_init(void) { return 0; }
-static inline int ksmbd_rdma_destroy(void) { return 0; }
-static inline bool ksmbd_rdma_capable_netdev(struct net_device *netdev) { return false; }
-#endif
-
-#endif /* __KSMBD_TRANSPORT_RDMA_H__ */
diff --git a/fs/libfs.c b/fs/libfs.c
index ba7438ab9371..9264523be85c 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -15,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/namei.h>
#include <linux/exportfs.h>
+#include <linux/iversion.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h> /* sync_mapping_buffers */
#include <linux/fs_context.h>
@@ -22,17 +23,18 @@
#include <linux/fsnotify.h>
#include <linux/unicode.h>
#include <linux/fscrypt.h>
+#include <linux/pidfs.h>
#include <linux/uaccess.h>
#include "internal.h"
-int simple_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int simple_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask,
unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
- generic_fillattr(&init_user_ns, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
return 0;
}
@@ -40,6 +42,9 @@ EXPORT_SYMBOL(simple_getattr);
int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ u64 id = huge_encode_dev(dentry->d_sb->s_dev);
+
+ buf->f_fsid = u64_to_fsid(id);
buf->f_type = dentry->d_sb->s_magic;
buf->f_bsize = PAGE_SIZE;
buf->f_namelen = NAME_MAX;
@@ -57,11 +62,6 @@ int always_delete_dentry(const struct dentry *dentry)
}
EXPORT_SYMBOL(always_delete_dentry);
-const struct dentry_operations simple_dentry_operations = {
- .d_delete = always_delete_dentry,
-};
-EXPORT_SYMBOL(simple_dentry_operations);
-
/*
* Lookup the data. This is trivial - if the dentry didn't already
* exist, we know it is negative. Set d_op to delete negative dentries.
@@ -70,8 +70,14 @@ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned
{
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
- if (!dentry->d_sb->s_d_op)
- d_set_d_op(dentry, &simple_dentry_operations);
+ if (!dentry->d_op && !(dentry->d_flags & DCACHE_DONTCACHE)) {
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_DONTCACHE;
+ spin_unlock(&dentry->d_lock);
+ }
+ if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
+ return NULL;
+
d_add(dentry, NULL);
return NULL;
}
@@ -100,15 +106,16 @@ EXPORT_SYMBOL(dcache_dir_close);
* If no such element exists, NULL is returned.
*/
static struct dentry *scan_positives(struct dentry *cursor,
- struct list_head *p,
+ struct hlist_node **p,
loff_t count,
struct dentry *last)
{
struct dentry *dentry = cursor->d_parent, *found = NULL;
spin_lock(&dentry->d_lock);
- while ((p = p->next) != &dentry->d_subdirs) {
- struct dentry *d = list_entry(p, struct dentry, d_child);
+ while (*p) {
+ struct dentry *d = hlist_entry(*p, struct dentry, d_sib);
+ p = &d->d_sib.next;
// we must at least skip cursors, to avoid livelocks
if (d->d_flags & DCACHE_DENTRY_CURSOR)
continue;
@@ -122,8 +129,10 @@ static struct dentry *scan_positives(struct dentry *cursor,
count = 1;
}
if (need_resched()) {
- list_move(&cursor->d_child, p);
- p = &cursor->d_child;
+ if (!hlist_unhashed(&cursor->d_sib))
+ __hlist_del(&cursor->d_sib);
+ hlist_add_behind(&cursor->d_sib, &d->d_sib);
+ p = &cursor->d_sib.next;
spin_unlock(&dentry->d_lock);
cond_resched();
spin_lock(&dentry->d_lock);
@@ -155,13 +164,12 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
inode_lock_shared(dentry->d_inode);
if (offset > 2)
- to = scan_positives(cursor, &dentry->d_subdirs,
+ to = scan_positives(cursor, &dentry->d_children.first,
offset - 2, NULL);
spin_lock(&dentry->d_lock);
+ hlist_del_init(&cursor->d_sib);
if (to)
- list_move(&cursor->d_child, &to->d_child);
- else
- list_del_init(&cursor->d_child);
+ hlist_add_behind(&cursor->d_sib, &to->d_sib);
spin_unlock(&dentry->d_lock);
dput(to);
@@ -173,12 +181,6 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
}
EXPORT_SYMBOL(dcache_dir_lseek);
-/* Relationship between i_mode and the DT_xxx types */
-static inline unsigned char dt_type(struct inode *inode)
-{
- return (inode->i_mode >> 12) & 15;
-}
-
/*
* Directory is locked and all positive dentries in it are safe, since
* for ramfs-type trees they can't go away without unlink() or rmdir(),
@@ -189,32 +191,29 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
struct dentry *cursor = file->private_data;
- struct list_head *anchor = &dentry->d_subdirs;
struct dentry *next = NULL;
- struct list_head *p;
+ struct hlist_node **p;
if (!dir_emit_dots(file, ctx))
return 0;
if (ctx->pos == 2)
- p = anchor;
- else if (!list_empty(&cursor->d_child))
- p = &cursor->d_child;
+ p = &dentry->d_children.first;
else
- return 0;
+ p = &cursor->d_sib.next;
while ((next = scan_positives(cursor, p, 1, next)) != NULL) {
if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
- d_inode(next)->i_ino, dt_type(d_inode(next))))
+ d_inode(next)->i_ino,
+ fs_umode_to_dtype(d_inode(next)->i_mode)))
break;
ctx->pos++;
- p = &next->d_child;
+ p = &next->d_sib.next;
}
spin_lock(&dentry->d_lock);
+ hlist_del_init(&cursor->d_sib);
if (next)
- list_move_tail(&cursor->d_child, &next->d_child);
- else
- list_del_init(&cursor->d_child);
+ hlist_add_before(&cursor->d_sib, &next->d_sib);
spin_unlock(&dentry->d_lock);
dput(next);
@@ -243,14 +242,351 @@ const struct inode_operations simple_dir_inode_operations = {
};
EXPORT_SYMBOL(simple_dir_inode_operations);
-static struct dentry *find_next_child(struct dentry *parent, struct dentry *prev)
+/* simple_offset_add() never assigns these to a dentry */
+enum {
+ DIR_OFFSET_FIRST = 2, /* Find first real entry */
+ DIR_OFFSET_EOD = S32_MAX,
+};
+
+/* simple_offset_add() allocation range */
+enum {
+ DIR_OFFSET_MIN = DIR_OFFSET_FIRST + 1,
+ DIR_OFFSET_MAX = DIR_OFFSET_EOD - 1,
+};
+
+static void offset_set(struct dentry *dentry, long offset)
+{
+ dentry->d_fsdata = (void *)offset;
+}
+
+static long dentry2offset(struct dentry *dentry)
+{
+ return (long)dentry->d_fsdata;
+}
+
+static struct lock_class_key simple_offset_lock_class;
+
+/**
+ * simple_offset_init - initialize an offset_ctx
+ * @octx: directory offset map to be initialized
+ *
+ */
+void simple_offset_init(struct offset_ctx *octx)
+{
+ mt_init_flags(&octx->mt, MT_FLAGS_ALLOC_RANGE);
+ lockdep_set_class(&octx->mt.ma_lock, &simple_offset_lock_class);
+ octx->next_offset = DIR_OFFSET_MIN;
+}
+
+/**
+ * simple_offset_add - Add an entry to a directory's offset map
+ * @octx: directory offset ctx to be updated
+ * @dentry: new dentry being added
+ *
+ * Returns zero on success. @octx and the dentry's offset are updated.
+ * Otherwise, a negative errno value is returned.
+ */
+int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
+{
+ unsigned long offset;
+ int ret;
+
+ if (dentry2offset(dentry) != 0)
+ return -EBUSY;
+
+ ret = mtree_alloc_cyclic(&octx->mt, &offset, dentry, DIR_OFFSET_MIN,
+ DIR_OFFSET_MAX, &octx->next_offset,
+ GFP_KERNEL);
+ if (unlikely(ret < 0))
+ return ret == -EBUSY ? -ENOSPC : ret;
+
+ offset_set(dentry, offset);
+ return 0;
+}
+
+static int simple_offset_replace(struct offset_ctx *octx, struct dentry *dentry,
+ long offset)
+{
+ int ret;
+
+ ret = mtree_store(&octx->mt, offset, dentry, GFP_KERNEL);
+ if (ret)
+ return ret;
+ offset_set(dentry, offset);
+ return 0;
+}
+
+/**
+ * simple_offset_remove - Remove an entry to a directory's offset map
+ * @octx: directory offset ctx to be updated
+ * @dentry: dentry being removed
+ *
+ */
+void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry)
+{
+ long offset;
+
+ offset = dentry2offset(dentry);
+ if (offset == 0)
+ return;
+
+ mtree_erase(&octx->mt, offset);
+ offset_set(dentry, 0);
+}
+
+/**
+ * simple_offset_rename - handle directory offsets for rename
+ * @old_dir: parent directory of source entry
+ * @old_dentry: dentry of source entry
+ * @new_dir: parent_directory of destination entry
+ * @new_dentry: dentry of destination
+ *
+ * Caller provides appropriate serialization.
+ *
+ * User space expects the directory offset value of the replaced
+ * (new) directory entry to be unchanged after a rename.
+ *
+ * Returns zero on success, a negative errno value on failure.
+ */
+int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
{
- struct dentry *child = NULL;
- struct list_head *p = prev ? &prev->d_child : &parent->d_subdirs;
+ struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir);
+ struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir);
+ long new_offset = dentry2offset(new_dentry);
+
+ simple_offset_remove(old_ctx, old_dentry);
+
+ if (new_offset) {
+ offset_set(new_dentry, 0);
+ return simple_offset_replace(new_ctx, old_dentry, new_offset);
+ }
+ return simple_offset_add(new_ctx, old_dentry);
+}
+
+/**
+ * simple_offset_rename_exchange - exchange rename with directory offsets
+ * @old_dir: parent of dentry being moved
+ * @old_dentry: dentry being moved
+ * @new_dir: destination parent
+ * @new_dentry: destination dentry
+ *
+ * This API preserves the directory offset values. Caller provides
+ * appropriate serialization.
+ *
+ * Returns zero on success. Otherwise a negative errno is returned and the
+ * rename is rolled back.
+ */
+int simple_offset_rename_exchange(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry)
+{
+ struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir);
+ struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir);
+ long old_index = dentry2offset(old_dentry);
+ long new_index = dentry2offset(new_dentry);
+ int ret;
+
+ simple_offset_remove(old_ctx, old_dentry);
+ simple_offset_remove(new_ctx, new_dentry);
+
+ ret = simple_offset_replace(new_ctx, old_dentry, new_index);
+ if (ret)
+ goto out_restore;
+
+ ret = simple_offset_replace(old_ctx, new_dentry, old_index);
+ if (ret) {
+ simple_offset_remove(new_ctx, old_dentry);
+ goto out_restore;
+ }
+
+ ret = simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
+ if (ret) {
+ simple_offset_remove(new_ctx, old_dentry);
+ simple_offset_remove(old_ctx, new_dentry);
+ goto out_restore;
+ }
+ return 0;
+
+out_restore:
+ (void)simple_offset_replace(old_ctx, old_dentry, old_index);
+ (void)simple_offset_replace(new_ctx, new_dentry, new_index);
+ return ret;
+}
+
+/**
+ * simple_offset_destroy - Release offset map
+ * @octx: directory offset ctx that is about to be destroyed
+ *
+ * During fs teardown (eg. umount), a directory's offset map might still
+ * contain entries. xa_destroy() cleans out anything that remains.
+ */
+void simple_offset_destroy(struct offset_ctx *octx)
+{
+ mtree_destroy(&octx->mt);
+}
+
+/**
+ * offset_dir_llseek - Advance the read position of a directory descriptor
+ * @file: an open directory whose position is to be updated
+ * @offset: a byte offset
+ * @whence: enumerator describing the starting position for this update
+ *
+ * SEEK_END, SEEK_DATA, and SEEK_HOLE are not supported for directories.
+ *
+ * Returns the updated read position if successful; otherwise a
+ * negative errno is returned and the read position remains unchanged.
+ */
+static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ switch (whence) {
+ case SEEK_CUR:
+ offset += file->f_pos;
+ fallthrough;
+ case SEEK_SET:
+ if (offset >= 0)
+ break;
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
+
+ return vfs_setpos(file, offset, LONG_MAX);
+}
+
+static struct dentry *find_positive_dentry(struct dentry *parent,
+ struct dentry *dentry,
+ bool next)
+{
+ struct dentry *found = NULL;
spin_lock(&parent->d_lock);
- while ((p = p->next) != &parent->d_subdirs) {
- struct dentry *d = container_of(p, struct dentry, d_child);
+ if (next)
+ dentry = d_next_sibling(dentry);
+ else if (!dentry)
+ dentry = d_first_child(parent);
+ hlist_for_each_entry_from(dentry, d_sib) {
+ if (!simple_positive(dentry))
+ continue;
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ if (simple_positive(dentry))
+ found = dget_dlock(dentry);
+ spin_unlock(&dentry->d_lock);
+ if (likely(found))
+ break;
+ }
+ spin_unlock(&parent->d_lock);
+ return found;
+}
+
+static noinline_for_stack struct dentry *
+offset_dir_lookup(struct dentry *parent, loff_t offset)
+{
+ struct inode *inode = d_inode(parent);
+ struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
+ struct dentry *child, *found = NULL;
+
+ MA_STATE(mas, &octx->mt, offset, offset);
+
+ if (offset == DIR_OFFSET_FIRST)
+ found = find_positive_dentry(parent, NULL, false);
+ else {
+ rcu_read_lock();
+ child = mas_find_rev(&mas, DIR_OFFSET_MIN);
+ found = find_positive_dentry(parent, child, false);
+ rcu_read_unlock();
+ }
+ return found;
+}
+
+static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
+{
+ struct inode *inode = d_inode(dentry);
+
+ return dir_emit(ctx, dentry->d_name.name, dentry->d_name.len,
+ inode->i_ino, fs_umode_to_dtype(inode->i_mode));
+}
+
+static void offset_iterate_dir(struct file *file, struct dir_context *ctx)
+{
+ struct dentry *dir = file->f_path.dentry;
+ struct dentry *dentry;
+
+ dentry = offset_dir_lookup(dir, ctx->pos);
+ if (!dentry)
+ goto out_eod;
+ while (true) {
+ struct dentry *next;
+
+ ctx->pos = dentry2offset(dentry);
+ if (!offset_dir_emit(ctx, dentry))
+ break;
+
+ next = find_positive_dentry(dir, dentry, true);
+ dput(dentry);
+
+ if (!next)
+ goto out_eod;
+ dentry = next;
+ }
+ dput(dentry);
+ return;
+
+out_eod:
+ ctx->pos = DIR_OFFSET_EOD;
+}
+
+/**
+ * offset_readdir - Emit entries starting at offset @ctx->pos
+ * @file: an open directory to iterate over
+ * @ctx: directory iteration context
+ *
+ * Caller must hold @file's i_rwsem to prevent insertion or removal of
+ * entries during this call.
+ *
+ * On entry, @ctx->pos contains an offset that represents the first entry
+ * to be read from the directory.
+ *
+ * The operation continues until there are no more entries to read, or
+ * until the ctx->actor indicates there is no more space in the caller's
+ * output buffer.
+ *
+ * On return, @ctx->pos contains an offset that will read the next entry
+ * in this directory when offset_readdir() is called again with @ctx.
+ * Caller places this value in the d_off field of the last entry in the
+ * user's buffer.
+ *
+ * Return values:
+ * %0 - Complete
+ */
+static int offset_readdir(struct file *file, struct dir_context *ctx)
+{
+ struct dentry *dir = file->f_path.dentry;
+
+ lockdep_assert_held(&d_inode(dir)->i_rwsem);
+
+ if (!dir_emit_dots(file, ctx))
+ return 0;
+ if (ctx->pos != DIR_OFFSET_EOD)
+ offset_iterate_dir(file, ctx);
+ return 0;
+}
+
+const struct file_operations simple_offset_dir_operations = {
+ .llseek = offset_dir_llseek,
+ .iterate_shared = offset_readdir,
+ .read = generic_read_dir,
+ .fsync = noop_fsync,
+};
+
+struct dentry *find_next_child(struct dentry *parent, struct dentry *prev)
+{
+ struct dentry *child = NULL, *d;
+
+ spin_lock(&parent->d_lock);
+ d = prev ? d_next_sibling(prev) : d_first_child(parent);
+ hlist_for_each_entry_from(d, d_sib) {
if (simple_positive(d)) {
spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
if (simple_positive(d))
@@ -264,44 +600,45 @@ static struct dentry *find_next_child(struct dentry *parent, struct dentry *prev
dput(prev);
return child;
}
+EXPORT_SYMBOL(find_next_child);
-void simple_recursive_removal(struct dentry *dentry,
- void (*callback)(struct dentry *))
+static void __simple_recursive_removal(struct dentry *dentry,
+ void (*callback)(struct dentry *),
+ bool locked)
{
struct dentry *this = dget(dentry);
while (true) {
struct dentry *victim = NULL, *child;
struct inode *inode = this->d_inode;
- inode_lock(inode);
+ inode_lock_nested(inode, I_MUTEX_CHILD);
if (d_is_dir(this))
inode->i_flags |= S_DEAD;
while ((child = find_next_child(this, victim)) == NULL) {
// kill and ascend
// update metadata while it's still locked
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
clear_nlink(inode);
inode_unlock(inode);
victim = this;
this = this->d_parent;
inode = this->d_inode;
- inode_lock(inode);
+ if (!locked || victim != dentry)
+ inode_lock_nested(inode, I_MUTEX_CHILD);
if (simple_positive(victim)) {
d_invalidate(victim); // avoid lost mounts
- if (d_is_dir(victim))
- fsnotify_rmdir(inode, victim);
- else
- fsnotify_unlink(inode, victim);
if (callback)
callback(victim);
- dput(victim); // unpin it
+ fsnotify_delete(inode, d_inode(victim), victim);
+ d_make_discardable(victim);
}
if (victim == dentry) {
- inode->i_ctime = inode->i_mtime =
- current_time(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
if (d_is_dir(dentry))
drop_nlink(inode);
- inode_unlock(inode);
+ if (!locked)
+ inode_unlock(inode);
dput(dentry);
return;
}
@@ -310,8 +647,35 @@ void simple_recursive_removal(struct dentry *dentry,
this = child;
}
}
+
+void simple_recursive_removal(struct dentry *dentry,
+ void (*callback)(struct dentry *))
+{
+ return __simple_recursive_removal(dentry, callback, false);
+}
EXPORT_SYMBOL(simple_recursive_removal);
+void simple_remove_by_name(struct dentry *parent, const char *name,
+ void (*callback)(struct dentry *))
+{
+ struct dentry *dentry;
+
+ dentry = lookup_noperm_positive_unlocked(&QSTR(name), parent);
+ if (!IS_ERR(dentry)) {
+ simple_recursive_removal(dentry, callback);
+ dput(dentry); // paired with lookup_noperm_positive_unlocked()
+ }
+}
+EXPORT_SYMBOL(simple_remove_by_name);
+
+/* caller holds parent directory with I_MUTEX_PARENT */
+void locked_recursive_removal(struct dentry *dentry,
+ void (*callback)(struct dentry *))
+{
+ return __simple_recursive_removal(dentry, callback, true);
+}
+EXPORT_SYMBOL(locked_recursive_removal);
+
static const struct super_operations simple_super_operations = {
.statfs = simple_statfs,
};
@@ -326,8 +690,10 @@ static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
s->s_blocksize_bits = PAGE_SHIFT;
s->s_magic = ctx->magic;
s->s_op = ctx->ops ?: &simple_super_operations;
+ s->s_export_op = ctx->eops;
s->s_xattr = ctx->xattr;
s->s_time_gran = 1;
+ s->s_d_flags |= ctx->s_d_flags;
root = new_inode(s);
if (!root)
return -ENOMEM;
@@ -339,11 +705,11 @@ static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
*/
root->i_ino = 1;
root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
- root->i_atime = root->i_mtime = root->i_ctime = current_time(root);
+ simple_inode_init_ts(root);
s->s_root = d_make_root(root);
if (!s->s_root)
return -ENOMEM;
- s->s_d_op = ctx->dops;
+ set_default_d_op(s, ctx->dops);
return 0;
}
@@ -395,11 +761,11 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den
{
struct inode *inode = d_inode(old_dentry);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
inc_nlink(inode);
ihold(inode);
- dget(dentry);
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
return 0;
}
EXPORT_SYMBOL(simple_link);
@@ -410,7 +776,7 @@ int simple_empty(struct dentry *dentry)
int ret = 0;
spin_lock(&dentry->d_lock);
- list_for_each_entry(child, &dentry->d_subdirs, d_child) {
+ hlist_for_each_entry(child, &dentry->d_children, d_sib) {
spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
if (simple_positive(child)) {
spin_unlock(&child->d_lock);
@@ -425,13 +791,28 @@ out:
}
EXPORT_SYMBOL(simple_empty);
-int simple_unlink(struct inode *dir, struct dentry *dentry)
+void __simple_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
drop_nlink(inode);
- dput(dentry);
+}
+EXPORT_SYMBOL(__simple_unlink);
+
+void __simple_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ drop_nlink(d_inode(dentry));
+ __simple_unlink(dir, dentry);
+ drop_nlink(dir);
+}
+EXPORT_SYMBOL(__simple_rmdir);
+
+int simple_unlink(struct inode *dir, struct dentry *dentry)
+{
+ __simple_unlink(dir, dentry);
+ d_make_discardable(dentry);
return 0;
}
EXPORT_SYMBOL(simple_unlink);
@@ -441,13 +822,38 @@ int simple_rmdir(struct inode *dir, struct dentry *dentry)
if (!simple_empty(dentry))
return -ENOTEMPTY;
- drop_nlink(d_inode(dentry));
- simple_unlink(dir, dentry);
- drop_nlink(dir);
+ __simple_rmdir(dir, dentry);
+ d_make_discardable(dentry);
return 0;
}
EXPORT_SYMBOL(simple_rmdir);
+/**
+ * simple_rename_timestamp - update the various inode timestamps for rename
+ * @old_dir: old parent directory
+ * @old_dentry: dentry that is being renamed
+ * @new_dir: new parent directory
+ * @new_dentry: target for rename
+ *
+ * POSIX mandates that the old and new parent directories have their ctime and
+ * mtime updated, and that inodes of @old_dentry and @new_dentry (if any), have
+ * their ctime updated.
+ */
+void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct inode *newino = d_inode(new_dentry);
+
+ inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
+ if (new_dir != old_dir)
+ inode_set_mtime_to_ts(new_dir,
+ inode_set_ctime_current(new_dir));
+ inode_set_ctime_current(d_inode(old_dentry));
+ if (newino)
+ inode_set_ctime_current(newino);
+}
+EXPORT_SYMBOL_GPL(simple_rename_timestamp);
+
int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
@@ -463,20 +869,15 @@ int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
inc_nlink(old_dir);
}
}
- old_dir->i_ctime = old_dir->i_mtime =
- new_dir->i_ctime = new_dir->i_mtime =
- d_inode(old_dentry)->i_ctime =
- d_inode(new_dentry)->i_ctime = current_time(old_dir);
-
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
return 0;
}
EXPORT_SYMBOL_GPL(simple_rename_exchange);
-int simple_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+int simple_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
- struct inode *inode = d_inode(old_dentry);
int they_are_dirs = d_is_dir(old_dentry);
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
@@ -499,16 +900,14 @@ int simple_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
inc_nlink(new_dir);
}
- old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
- new_dir->i_mtime = inode->i_ctime = current_time(old_dir);
-
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
return 0;
}
EXPORT_SYMBOL(simple_rename);
/**
* simple_setattr - setattr for simple filesystem
- * @mnt_userns: user namespace of the target mount
+ * @idmap: idmap of the target mount
* @dentry: dentry
* @iattr: iattr structure
*
@@ -521,52 +920,51 @@ EXPORT_SYMBOL(simple_rename);
* on simple regular filesystems. Anything that needs to change on-disk
* or wire state on size changes needs its own setattr method.
*/
-int simple_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+int simple_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
int error;
- error = setattr_prepare(mnt_userns, dentry, iattr);
+ error = setattr_prepare(idmap, dentry, iattr);
if (error)
return error;
if (iattr->ia_valid & ATTR_SIZE)
truncate_setsize(inode, iattr->ia_size);
- setattr_copy(mnt_userns, inode, iattr);
+ setattr_copy(idmap, inode, iattr);
mark_inode_dirty(inode);
return 0;
}
EXPORT_SYMBOL(simple_setattr);
-static int simple_readpage(struct file *file, struct page *page)
+static int simple_read_folio(struct file *file, struct folio *folio)
{
- clear_highpage(page);
- flush_dcache_page(page);
- SetPageUptodate(page);
- unlock_page(page);
+ folio_zero_range(folio, 0, folio_size(folio));
+ flush_dcache_folio(folio);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
return 0;
}
-int simple_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+int simple_write_begin(const struct kiocb *iocb, struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
- struct page *page;
- pgoff_t index;
+ struct folio *folio;
- index = pos >> PAGE_SHIFT;
+ folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
+ *foliop = folio;
- *pagep = page;
+ if (!folio_test_uptodate(folio) && (len != folio_size(folio))) {
+ size_t from = offset_in_folio(folio, pos);
- if (!PageUptodate(page) && (len != PAGE_SIZE)) {
- unsigned from = pos & (PAGE_SIZE - 1);
-
- zero_user_segments(page, 0, from, from + len, PAGE_SIZE);
+ folio_zero_segments(folio, 0, from,
+ from + len, folio_size(folio));
}
return 0;
}
@@ -574,52 +972,54 @@ EXPORT_SYMBOL(simple_write_begin);
/**
* simple_write_end - .write_end helper for non-block-device FSes
- * @file: See .write_end of address_space_operations
+ * @iocb: kernel I/O control block
* @mapping: "
* @pos: "
* @len: "
* @copied: "
- * @page: "
+ * @folio: "
* @fsdata: "
*
- * simple_write_end does the minimum needed for updating a page after writing is
- * done. It has the same API signature as the .write_end of
+ * simple_write_end does the minimum needed for updating a folio after
+ * writing is done. It has the same API signature as the .write_end of
* address_space_operations vector. So it can just be set onto .write_end for
- * FSes that don't need any other processing. i_mutex is assumed to be held.
+ * FSes that don't need any other processing. i_rwsem is assumed to be held
+ * exclusively.
* Block based filesystems should use generic_write_end().
* NOTE: Even though i_size might get updated by this function, mark_inode_dirty
* is not called, so a filesystem that actually does store data in .write_inode
* should extend on what's done here with a call to mark_inode_dirty() in the
* case that i_size has changed.
*
- * Use *ONLY* with simple_readpage()
+ * Use *ONLY* with simple_read_folio()
*/
-static int simple_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+static int simple_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
loff_t last_pos = pos + copied;
- /* zero the stale part of the page if we did a short copy */
- if (!PageUptodate(page)) {
+ /* zero the stale part of the folio if we did a short copy */
+ if (!folio_test_uptodate(folio)) {
if (copied < len) {
- unsigned from = pos & (PAGE_SIZE - 1);
+ size_t from = offset_in_folio(folio, pos);
- zero_user(page, from + copied, len - copied);
+ folio_zero_range(folio, from + copied, len - copied);
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
/*
* No need to use i_size_read() here, the i_size
- * cannot change under us because we hold the i_mutex.
+ * cannot change under us because we hold the i_rwsem.
*/
if (last_pos > inode->i_size)
i_size_write(inode, last_pos);
- set_page_dirty(page);
- unlock_page(page);
- put_page(page);
+ folio_mark_dirty(folio);
+ folio_unlock(folio);
+ folio_put(folio);
return copied;
}
@@ -628,10 +1028,10 @@ static int simple_write_end(struct file *file, struct address_space *mapping,
* Provides ramfs-style behavior: data in the pagecache, but no writeback.
*/
const struct address_space_operations ram_aops = {
- .readpage = simple_readpage,
+ .read_folio = simple_read_folio,
.write_begin = simple_write_begin,
.write_end = simple_write_end,
- .set_page_dirty = __set_page_dirty_no_writeback,
+ .dirty_folio = noop_dirty_folio,
};
EXPORT_SYMBOL(ram_aops);
@@ -644,7 +1044,6 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
const struct tree_descr *files)
{
struct inode *inode;
- struct dentry *root;
struct dentry *dentry;
int i;
@@ -663,12 +1062,12 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
*/
inode->i_ino = 1;
inode->i_mode = S_IFDIR | 0755;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
set_nlink(inode, 2);
- root = d_make_root(inode);
- if (!root)
+ s->s_root = d_make_root(inode);
+ if (!s->s_root)
return -ENOMEM;
for (i = 0; !files->name || files->name[0]; i++, files++) {
if (!files->name)
@@ -680,27 +1079,22 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
"with an index of 1!\n", __func__,
s->s_type->name);
- dentry = d_alloc_name(root, files->name);
+ dentry = d_alloc_name(s->s_root, files->name);
if (!dentry)
- goto out;
+ return -ENOMEM;
inode = new_inode(s);
if (!inode) {
dput(dentry);
- goto out;
+ return -ENOMEM;
}
inode->i_mode = S_IFREG | files->mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
inode->i_fop = files->ops;
inode->i_ino = i;
- d_add(dentry, inode);
+ d_make_persistent(dentry, inode);
+ dput(dentry);
}
- s->s_root = root;
return 0;
-out:
- d_genocide(root);
- shrink_dcache_parent(root);
- dput(root);
- return -ENOMEM;
}
EXPORT_SYMBOL(simple_fill_super);
@@ -994,8 +1388,8 @@ out:
EXPORT_SYMBOL_GPL(simple_attr_read);
/* interpret the buffer as a number to call the set function with */
-ssize_t simple_attr_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
+static ssize_t simple_attr_write_xsigned(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos, bool is_signed)
{
struct simple_attr *attr;
unsigned long long val;
@@ -1016,7 +1410,10 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
goto out;
attr->set_buf[size] = '\0';
- ret = kstrtoull(attr->set_buf, 0, &val);
+ if (is_signed)
+ ret = kstrtoll(attr->set_buf, 0, &val);
+ else
+ ret = kstrtoull(attr->set_buf, 0, &val);
if (ret)
goto out;
ret = attr->set(attr->data, val);
@@ -1026,8 +1423,62 @@ out:
mutex_unlock(&attr->mutex);
return ret;
}
+
+ssize_t simple_attr_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return simple_attr_write_xsigned(file, buf, len, ppos, false);
+}
EXPORT_SYMBOL_GPL(simple_attr_write);
+ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return simple_attr_write_xsigned(file, buf, len, ppos, true);
+}
+EXPORT_SYMBOL_GPL(simple_attr_write_signed);
+
+/**
+ * generic_encode_ino32_fh - generic export_operations->encode_fh function
+ * @inode: the object to encode
+ * @fh: where to store the file handle fragment
+ * @max_len: maximum length to store there (in 4 byte units)
+ * @parent: parent directory inode, if wanted
+ *
+ * This generic encode_fh function assumes that the 32 inode number
+ * is suitable for locating an inode, and that the generation number
+ * can be used to check that it is still valid. It places them in the
+ * filehandle fragment where export_decode_fh expects to find them.
+ */
+int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len,
+ struct inode *parent)
+{
+ struct fid *fid = (void *)fh;
+ int len = *max_len;
+ int type = FILEID_INO32_GEN;
+
+ if (parent && (len < 4)) {
+ *max_len = 4;
+ return FILEID_INVALID;
+ } else if (len < 2) {
+ *max_len = 2;
+ return FILEID_INVALID;
+ }
+
+ len = 2;
+ fid->i32.ino = inode->i_ino;
+ fid->i32.gen = inode->i_generation;
+ if (parent) {
+ fid->i32.parent_ino = parent->i_ino;
+ fid->i32.parent_gen = parent->i_generation;
+ len = 4;
+ type = FILEID_INO32_GEN_PARENT;
+ }
+ *max_len = len;
+ return type;
+}
+EXPORT_SYMBOL_GPL(generic_encode_ino32_fh);
+
/**
* generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
* @sb: filesystem to do the file handle conversion on
@@ -1118,9 +1569,9 @@ int __generic_file_fsync(struct file *file, loff_t start, loff_t end,
inode_lock(inode);
ret = sync_mapping_buffers(inode->i_mapping);
- if (!(inode->i_state & I_DIRTY_ALL))
+ if (!(inode_state_read_once(inode) & I_DIRTY_ALL))
goto out;
- if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))
goto out;
err = sync_inode_metadata(inode, 1);
@@ -1172,13 +1623,17 @@ EXPORT_SYMBOL(generic_file_fsync);
int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
{
u64 last_fs_block = num_blocks - 1;
- u64 last_fs_page =
- last_fs_block >> (PAGE_SHIFT - blocksize_bits);
+ u64 last_fs_page, max_bytes;
+
+ if (check_shl_overflow(num_blocks, blocksize_bits, &max_bytes))
+ return -EFBIG;
+
+ last_fs_page = (max_bytes >> PAGE_SHIFT) - 1;
if (unlikely(num_blocks == 0))
return 0;
- if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT))
+ if (blocksize_bits < 9)
return -EINVAL;
if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
@@ -1198,17 +1653,6 @@ int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
EXPORT_SYMBOL(noop_fsync);
-void noop_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
-{
- /*
- * There is no page cache to invalidate in the dax case, however
- * we need this callback defined to prevent falling back to
- * block_invalidatepage() in do_invalidatepage().
- */
-}
-EXPORT_SYMBOL_GPL(noop_invalidatepage);
-
ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
/*
@@ -1231,7 +1675,7 @@ EXPORT_SYMBOL(kfree_link);
struct inode *alloc_anon_inode(struct super_block *s)
{
static const struct address_space_operations anon_aops = {
- .set_page_dirty = __set_page_dirty_no_writeback,
+ .dirty_folio = noop_dirty_folio,
};
struct inode *inode = new_inode_pseudo(s);
@@ -1247,12 +1691,16 @@ struct inode *alloc_anon_inode(struct super_block *s)
* list because mark_inode_dirty() will think
* that it already _is_ on the dirty list.
*/
- inode->i_state = I_DIRTY;
+ inode_state_assign_raw(inode, I_DIRTY);
+ /*
+ * Historically anonymous inodes don't have a type at all and
+ * userspace has come to rely on this.
+ */
inode->i_mode = S_IRUSR | S_IWUSR;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- inode->i_flags |= S_PRIVATE;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_flags |= S_PRIVATE | S_ANON_INODE;
+ simple_inode_init_ts(inode);
return inode;
}
EXPORT_SYMBOL(alloc_anon_inode);
@@ -1268,7 +1716,7 @@ EXPORT_SYMBOL(alloc_anon_inode);
* All arguments are ignored and it just returns -EINVAL.
*/
int
-simple_nosetlease(struct file *filp, long arg, struct file_lock **flp,
+simple_nosetlease(struct file *filp, int arg, struct file_lease **flp,
void **priv)
{
return -EINVAL;
@@ -1309,16 +1757,7 @@ static struct dentry *empty_dir_lookup(struct inode *dir, struct dentry *dentry,
return ERR_PTR(-ENOENT);
}
-static int empty_dir_getattr(struct user_namespace *mnt_userns,
- const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
-{
- struct inode *inode = d_inode(path->dentry);
- generic_fillattr(&init_user_ns, inode, stat);
- return 0;
-}
-
-static int empty_dir_setattr(struct user_namespace *mnt_userns,
+static int empty_dir_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr)
{
return -EPERM;
@@ -1331,9 +1770,7 @@ static ssize_t empty_dir_listxattr(struct dentry *dentry, char *list, size_t siz
static const struct inode_operations empty_dir_inode_operations = {
.lookup = empty_dir_lookup,
- .permission = generic_permission,
.setattr = empty_dir_setattr,
- .getattr = empty_dir_getattr,
.listxattr = empty_dir_listxattr,
};
@@ -1379,17 +1816,7 @@ bool is_empty_dir_inode(struct inode *inode)
(inode->i_op == &empty_dir_inode_operations);
}
-#ifdef CONFIG_UNICODE
-/*
- * Determine if the name of a dentry should be casefolded.
- *
- * Return: if names will need casefolding
- */
-static bool needs_casefold(const struct inode *dir)
-{
- return IS_CASEFOLDED(dir) && dir->i_sb->s_encoding;
-}
-
+#if IS_ENABLED(CONFIG_UNICODE)
/**
* generic_ci_d_compare - generic d_compare implementation for casefolding filesystems
* @dentry: dentry whose name we are checking against
@@ -1399,44 +1826,52 @@ static bool needs_casefold(const struct inode *dir)
*
* Return: 0 if names match, 1 if mismatch, or -ERRNO
*/
-static int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
- const char *str, const struct qstr *name)
+int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name)
{
- const struct dentry *parent = READ_ONCE(dentry->d_parent);
- const struct inode *dir = READ_ONCE(parent->d_inode);
- const struct super_block *sb = dentry->d_sb;
- const struct unicode_map *um = sb->s_encoding;
- struct qstr qstr = QSTR_INIT(str, len);
- char strbuf[DNAME_INLINE_LEN];
- int ret;
+ const struct dentry *parent;
+ const struct inode *dir;
+ union shortname_store strbuf;
+ struct qstr qstr;
- if (!dir || !needs_casefold(dir))
- goto fallback;
+ /*
+ * Attempt a case-sensitive match first. It is cheaper and
+ * should cover most lookups, including all the sane
+ * applications that expect a case-sensitive filesystem.
+ *
+ * This comparison is safe under RCU because the caller
+ * guarantees the consistency between str and len. See
+ * __d_lookup_rcu_op_compare() for details.
+ */
+ if (len == name->len && !memcmp(str, name->name, len))
+ return 0;
+
+ parent = READ_ONCE(dentry->d_parent);
+ dir = READ_ONCE(parent->d_inode);
+ if (!dir || !IS_CASEFOLDED(dir))
+ return 1;
+
+ qstr.len = len;
+ qstr.name = str;
/*
* If the dentry name is stored in-line, then it may be concurrently
* modified by a rename. If this happens, the VFS will eventually retry
* the lookup, so it doesn't matter what ->d_compare() returns.
* However, it's unsafe to call utf8_strncasecmp() with an unstable
* string. Therefore, we have to copy the name into a temporary buffer.
+ * As above, len is guaranteed to match str, so the shortname case
+ * is exactly when str points to ->d_shortname.
*/
- if (len <= DNAME_INLINE_LEN - 1) {
- memcpy(strbuf, str, len);
- strbuf[len] = 0;
- qstr.name = strbuf;
+ if (qstr.name == dentry->d_shortname.string) {
+ strbuf = dentry->d_shortname; // NUL is guaranteed to be in there
+ qstr.name = strbuf.string;
/* prevent compiler from optimizing out the temporary buffer */
barrier();
}
- ret = utf8_strncasecmp(um, name, &qstr);
- if (ret >= 0)
- return ret;
- if (sb_has_strict_encoding(sb))
- return -EINVAL;
-fallback:
- if (len != name->len)
- return 1;
- return !!memcmp(str, name->name, len);
+ return utf8_strncasecmp(dentry->d_sb->s_encoding, name, &qstr);
}
+EXPORT_SYMBOL(generic_ci_d_compare);
/**
* generic_ci_d_hash - generic d_hash implementation for casefolding filesystems
@@ -1445,14 +1880,14 @@ fallback:
*
* Return: 0 if hash was successful or unchanged, and -EINVAL on error
*/
-static int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str)
+int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str)
{
const struct inode *dir = READ_ONCE(dentry->d_inode);
struct super_block *sb = dentry->d_sb;
const struct unicode_map *um = sb->s_encoding;
- int ret = 0;
+ int ret;
- if (!dir || !needs_casefold(dir))
+ if (!dir || !IS_CASEFOLDED(dir))
return 0;
ret = utf8_casefold_hash(um, dentry, str);
@@ -1460,74 +1895,454 @@ static int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str)
return -EINVAL;
return 0;
}
+EXPORT_SYMBOL(generic_ci_d_hash);
static const struct dentry_operations generic_ci_dentry_ops = {
.d_hash = generic_ci_d_hash,
.d_compare = generic_ci_d_compare,
-};
-#endif
-
#ifdef CONFIG_FS_ENCRYPTION
-static const struct dentry_operations generic_encrypted_dentry_ops = {
.d_revalidate = fscrypt_d_revalidate,
-};
#endif
-
-#if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE)
-static const struct dentry_operations generic_encrypted_ci_dentry_ops = {
- .d_hash = generic_ci_d_hash,
- .d_compare = generic_ci_d_compare,
- .d_revalidate = fscrypt_d_revalidate,
};
-#endif
/**
- * generic_set_encrypted_ci_d_ops - helper for setting d_ops for given dentry
- * @dentry: dentry to set ops on
+ * generic_ci_match() - Match a name (case-insensitively) with a dirent.
+ * This is a filesystem helper for comparison with directory entries.
+ * generic_ci_d_compare should be used in VFS' ->d_compare instead.
*
- * Casefolded directories need d_hash and d_compare set, so that the dentries
- * contained in them are handled case-insensitively. Note that these operations
- * are needed on the parent directory rather than on the dentries in it, and
- * while the casefolding flag can be toggled on and off on an empty directory,
- * dentry_operations can't be changed later. As a result, if the filesystem has
- * casefolding support enabled at all, we have to give all dentries the
- * casefolding operations even if their inode doesn't have the casefolding flag
- * currently (and thus the casefolding ops would be no-ops for now).
+ * @parent: Inode of the parent of the dirent under comparison
+ * @name: name under lookup.
+ * @folded_name: Optional pre-folded name under lookup
+ * @de_name: Dirent name.
+ * @de_name_len: dirent name length.
*
- * Encryption works differently in that the only dentry operation it needs is
- * d_revalidate, which it only needs on dentries that have the no-key name flag.
- * The no-key flag can't be set "later", so we don't have to worry about that.
+ * Test whether a case-insensitive directory entry matches the filename
+ * being searched. If @folded_name is provided, it is used instead of
+ * recalculating the casefold of @name.
*
- * Finally, to maximize compatibility with overlayfs (which isn't compatible
- * with certain dentry operations) and to avoid taking an unnecessary
- * performance hit, we use custom dentry_operations for each possible
- * combination rather than always installing all operations.
+ * Return: > 0 if the directory entry matches, 0 if it doesn't match, or
+ * < 0 on error.
*/
-void generic_set_encrypted_ci_d_ops(struct dentry *dentry)
+int generic_ci_match(const struct inode *parent,
+ const struct qstr *name,
+ const struct qstr *folded_name,
+ const u8 *de_name, u32 de_name_len)
{
-#ifdef CONFIG_FS_ENCRYPTION
- bool needs_encrypt_ops = dentry->d_flags & DCACHE_NOKEY_NAME;
+ const struct super_block *sb = parent->i_sb;
+ const struct unicode_map *um = sb->s_encoding;
+ struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len);
+ struct qstr dirent = QSTR_INIT(de_name, de_name_len);
+ int res = 0;
+
+ if (IS_ENCRYPTED(parent)) {
+ const struct fscrypt_str encrypted_name =
+ FSTR_INIT((u8 *) de_name, de_name_len);
+
+ if (WARN_ON_ONCE(!fscrypt_has_encryption_key(parent)))
+ return -EINVAL;
+
+ decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL);
+ if (!decrypted_name.name)
+ return -ENOMEM;
+ res = fscrypt_fname_disk_to_usr(parent, 0, 0, &encrypted_name,
+ &decrypted_name);
+ if (res < 0) {
+ kfree(decrypted_name.name);
+ return res;
+ }
+ dirent.name = decrypted_name.name;
+ dirent.len = decrypted_name.len;
+ }
+
+ /*
+ * Attempt a case-sensitive match first. It is cheaper and
+ * should cover most lookups, including all the sane
+ * applications that expect a case-sensitive filesystem.
+ */
+
+ if (dirent.len == name->len &&
+ !memcmp(name->name, dirent.name, dirent.len))
+ goto out;
+
+ if (folded_name->name)
+ res = utf8_strncasecmp_folded(um, folded_name, &dirent);
+ else
+ res = utf8_strncasecmp(um, name, &dirent);
+
+out:
+ kfree(decrypted_name.name);
+ if (res < 0 && sb_has_strict_encoding(sb)) {
+ pr_err_ratelimited("Directory contains filename that is invalid UTF-8");
+ return 0;
+ }
+ return !res;
+}
+EXPORT_SYMBOL(generic_ci_match);
#endif
-#ifdef CONFIG_UNICODE
- bool needs_ci_ops = dentry->d_sb->s_encoding;
+
+#ifdef CONFIG_FS_ENCRYPTION
+static const struct dentry_operations generic_encrypted_dentry_ops = {
+ .d_revalidate = fscrypt_d_revalidate,
+};
#endif
-#if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE)
- if (needs_encrypt_ops && needs_ci_ops) {
- d_set_d_op(dentry, &generic_encrypted_ci_dentry_ops);
+
+/**
+ * generic_set_sb_d_ops - helper for choosing the set of
+ * filesystem-wide dentry operations for the enabled features
+ * @sb: superblock to be configured
+ *
+ * Filesystems supporting casefolding and/or fscrypt can call this
+ * helper at mount-time to configure default dentry_operations to the
+ * best set of dentry operations required for the enabled features.
+ * The helper must be called after these have been configured, but
+ * before the root dentry is created.
+ */
+void generic_set_sb_d_ops(struct super_block *sb)
+{
+#if IS_ENABLED(CONFIG_UNICODE)
+ if (sb->s_encoding) {
+ set_default_d_op(sb, &generic_ci_dentry_ops);
return;
}
#endif
#ifdef CONFIG_FS_ENCRYPTION
- if (needs_encrypt_ops) {
- d_set_d_op(dentry, &generic_encrypted_dentry_ops);
+ if (sb->s_cop) {
+ set_default_d_op(sb, &generic_encrypted_dentry_ops);
return;
}
#endif
-#ifdef CONFIG_UNICODE
- if (needs_ci_ops) {
- d_set_d_op(dentry, &generic_ci_dentry_ops);
- return;
+}
+EXPORT_SYMBOL(generic_set_sb_d_ops);
+
+/**
+ * inode_maybe_inc_iversion - increments i_version
+ * @inode: inode with the i_version that should be updated
+ * @force: increment the counter even if it's not necessary?
+ *
+ * Every time the inode is modified, the i_version field must be seen to have
+ * changed by any observer.
+ *
+ * If "force" is set or the QUERIED flag is set, then ensure that we increment
+ * the value, and clear the queried flag.
+ *
+ * In the common case where neither is set, then we can return "false" without
+ * updating i_version.
+ *
+ * If this function returns false, and no other metadata has changed, then we
+ * can avoid logging the metadata.
+ */
+bool inode_maybe_inc_iversion(struct inode *inode, bool force)
+{
+ u64 cur, new;
+
+ /*
+ * The i_version field is not strictly ordered with any other inode
+ * information, but the legacy inode_inc_iversion code used a spinlock
+ * to serialize increments.
+ *
+ * We add a full memory barrier to ensure that any de facto ordering
+ * with other state is preserved (either implicitly coming from cmpxchg
+ * or explicitly from smp_mb if we don't know upfront if we will execute
+ * the former).
+ *
+ * These barriers pair with inode_query_iversion().
+ */
+ cur = inode_peek_iversion_raw(inode);
+ if (!force && !(cur & I_VERSION_QUERIED)) {
+ smp_mb();
+ cur = inode_peek_iversion_raw(inode);
}
-#endif
+
+ do {
+ /* If flag is clear then we needn't do anything */
+ if (!force && !(cur & I_VERSION_QUERIED))
+ return false;
+
+ /* Since lowest bit is flag, add 2 to avoid it */
+ new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
+ } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
+ return true;
+}
+EXPORT_SYMBOL(inode_maybe_inc_iversion);
+
+/**
+ * inode_query_iversion - read i_version for later use
+ * @inode: inode from which i_version should be read
+ *
+ * Read the inode i_version counter. This should be used by callers that wish
+ * to store the returned i_version for later comparison. This will guarantee
+ * that a later query of the i_version will result in a different value if
+ * anything has changed.
+ *
+ * In this implementation, we fetch the current value, set the QUERIED flag and
+ * then try to swap it into place with a cmpxchg, if it wasn't already set. If
+ * that fails, we try again with the newly fetched value from the cmpxchg.
+ */
+u64 inode_query_iversion(struct inode *inode)
+{
+ u64 cur, new;
+ bool fenced = false;
+
+ /*
+ * Memory barriers (implicit in cmpxchg, explicit in smp_mb) pair with
+ * inode_maybe_inc_iversion(), see that routine for more details.
+ */
+ cur = inode_peek_iversion_raw(inode);
+ do {
+ /* If flag is already set, then no need to swap */
+ if (cur & I_VERSION_QUERIED) {
+ if (!fenced)
+ smp_mb();
+ break;
+ }
+
+ fenced = true;
+ new = cur | I_VERSION_QUERIED;
+ } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
+ return cur >> I_VERSION_QUERIED_SHIFT;
+}
+EXPORT_SYMBOL(inode_query_iversion);
+
+ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
+ ssize_t direct_written, ssize_t buffered_written)
+{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+ loff_t pos = iocb->ki_pos - buffered_written;
+ loff_t end = iocb->ki_pos - 1;
+ int err;
+
+ /*
+ * If the buffered write fallback returned an error, we want to return
+ * the number of bytes which were written by direct I/O, or the error
+ * code if that was zero.
+ *
+ * Note that this differs from normal direct-io semantics, which will
+ * return -EFOO even if some bytes were written.
+ */
+ if (unlikely(buffered_written < 0)) {
+ if (direct_written)
+ return direct_written;
+ return buffered_written;
+ }
+
+ /*
+ * We need to ensure that the page cache pages are written to disk and
+ * invalidated to preserve the expected O_DIRECT semantics.
+ */
+ err = filemap_write_and_wait_range(mapping, pos, end);
+ if (err < 0) {
+ /*
+ * We don't know how much we wrote, so just return the number of
+ * bytes which were direct-written
+ */
+ iocb->ki_pos -= buffered_written;
+ if (direct_written)
+ return direct_written;
+ return err;
+ }
+ invalidate_mapping_pages(mapping, pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
+ return direct_written + buffered_written;
+}
+EXPORT_SYMBOL_GPL(direct_write_fallback);
+
+/**
+ * simple_inode_init_ts - initialize the timestamps for a new inode
+ * @inode: inode to be initialized
+ *
+ * When a new inode is created, most filesystems set the timestamps to the
+ * current time. Add a helper to do this.
+ */
+struct timespec64 simple_inode_init_ts(struct inode *inode)
+{
+ struct timespec64 ts = inode_set_ctime_current(inode);
+
+ inode_set_atime_to_ts(inode, ts);
+ inode_set_mtime_to_ts(inode, ts);
+ return ts;
+}
+EXPORT_SYMBOL(simple_inode_init_ts);
+
+struct dentry *stashed_dentry_get(struct dentry **stashed)
+{
+ struct dentry *dentry;
+
+ guard(rcu)();
+ dentry = rcu_dereference(*stashed);
+ if (!dentry)
+ return NULL;
+ if (IS_ERR(dentry))
+ return dentry;
+ if (!lockref_get_not_dead(&dentry->d_lockref))
+ return NULL;
+ return dentry;
+}
+
+static struct dentry *prepare_anon_dentry(struct dentry **stashed,
+ struct super_block *sb,
+ void *data)
+{
+ struct dentry *dentry;
+ struct inode *inode;
+ const struct stashed_operations *sops = sb->s_fs_info;
+ int ret;
+
+ inode = new_inode_pseudo(sb);
+ if (!inode) {
+ sops->put_data(data);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ inode->i_flags |= S_IMMUTABLE;
+ inode->i_mode = S_IFREG;
+ simple_inode_init_ts(inode);
+
+ ret = sops->init_inode(inode, data);
+ if (ret < 0) {
+ iput(inode);
+ return ERR_PTR(ret);
+ }
+
+ /* Notice when this is changed. */
+ WARN_ON_ONCE(!S_ISREG(inode->i_mode));
+
+ dentry = d_alloc_anon(sb);
+ if (!dentry) {
+ iput(inode);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Store address of location where dentry's supposed to be stashed. */
+ dentry->d_fsdata = stashed;
+
+ /* @data is now owned by the fs */
+ d_instantiate(dentry, inode);
+ return dentry;
+}
+
+struct dentry *stash_dentry(struct dentry **stashed, struct dentry *dentry)
+{
+ guard(rcu)();
+ for (;;) {
+ struct dentry *old;
+
+ /* Assume any old dentry was cleared out. */
+ old = cmpxchg(stashed, NULL, dentry);
+ if (likely(!old))
+ return dentry;
+
+ /* Check if somebody else installed a reusable dentry. */
+ if (lockref_get_not_dead(&old->d_lockref))
+ return old;
+
+ /* There's an old dead dentry there, try to take it over. */
+ if (likely(try_cmpxchg(stashed, &old, dentry)))
+ return dentry;
+ }
+}
+
+/**
+ * path_from_stashed - create path from stashed or new dentry
+ * @stashed: where to retrieve or stash dentry
+ * @mnt: mnt of the filesystems to use
+ * @data: data to store in inode->i_private
+ * @path: path to create
+ *
+ * The function tries to retrieve a stashed dentry from @stashed. If the dentry
+ * is still valid then it will be reused. If the dentry isn't able the function
+ * will allocate a new dentry and inode. It will then check again whether it
+ * can reuse an existing dentry in case one has been added in the meantime or
+ * update @stashed with the newly added dentry.
+ *
+ * Special-purpose helper for nsfs and pidfs.
+ *
+ * Return: On success zero and on failure a negative error is returned.
+ */
+int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data,
+ struct path *path)
+{
+ struct dentry *dentry, *res;
+ const struct stashed_operations *sops = mnt->mnt_sb->s_fs_info;
+
+ /* See if dentry can be reused. */
+ res = stashed_dentry_get(stashed);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+ if (res) {
+ sops->put_data(data);
+ goto make_path;
+ }
+
+ /* Allocate a new dentry. */
+ dentry = prepare_anon_dentry(stashed, mnt->mnt_sb, data);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ /* Added a new dentry. @data is now owned by the filesystem. */
+ if (sops->stash_dentry)
+ res = sops->stash_dentry(stashed, dentry);
+ else
+ res = stash_dentry(stashed, dentry);
+ if (IS_ERR(res)) {
+ dput(dentry);
+ return PTR_ERR(res);
+ }
+ if (res != dentry)
+ dput(dentry);
+
+make_path:
+ path->dentry = res;
+ path->mnt = mntget(mnt);
+ VFS_WARN_ON_ONCE(path->dentry->d_fsdata != stashed);
+ VFS_WARN_ON_ONCE(d_inode(path->dentry)->i_private != data);
+ return 0;
+}
+
+void stashed_dentry_prune(struct dentry *dentry)
+{
+ struct dentry **stashed = dentry->d_fsdata;
+ struct inode *inode = d_inode(dentry);
+
+ if (WARN_ON_ONCE(!stashed))
+ return;
+
+ if (!inode)
+ return;
+
+ /*
+ * Only replace our own @dentry as someone else might've
+ * already cleared out @dentry and stashed their own
+ * dentry in there.
+ */
+ cmpxchg(stashed, dentry, NULL);
+}
+
+/**
+ * simple_start_creating - prepare to create a given name
+ * @parent: directory in which to prepare to create the name
+ * @name: the name to be created
+ *
+ * Required lock is taken and a lookup in performed prior to creating an
+ * object in a directory. No permission checking is performed.
+ *
+ * Returns: a negative dentry on which vfs_create() or similar may
+ * be attempted, or an error.
+ */
+struct dentry *simple_start_creating(struct dentry *parent, const char *name)
+{
+ struct qstr qname = QSTR(name);
+ int err;
+
+ err = lookup_noperm_common(&qname, parent);
+ if (err)
+ return ERR_PTR(err);
+ return start_dirop(parent, &qname, LOOKUP_CREATE | LOOKUP_EXCL);
+}
+EXPORT_SYMBOL(simple_start_creating);
+
+/* parent must have been held exclusive since simple_start_creating() */
+void simple_done_creating(struct dentry *child)
+{
+ inode_unlock(child->d_parent->d_inode);
+ dput(child);
}
-EXPORT_SYMBOL(generic_set_encrypted_ci_d_ops);
+EXPORT_SYMBOL(simple_done_creating);
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile
index 6d5e83ed4476..51bbe22d21e3 100644
--- a/fs/lockd/Makefile
+++ b/fs/lockd/Makefile
@@ -3,10 +3,11 @@
# Makefile for the linux lock manager stuff
#
+ccflags-y += -I$(src) # needed for trace events
+
obj-$(CONFIG_LOCKD) += lockd.o
-lockd-objs-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
- svcshare.o svcproc.o svcsubs.o mon.o xdr.o
-lockd-objs-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
-lockd-objs-$(CONFIG_PROC_FS) += procfs.o
-lockd-objs := $(lockd-objs-y)
+lockd-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
+ svcshare.o svcproc.o svcsubs.o mon.o trace.o xdr.o netlink.o
+lockd-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
+lockd-$(CONFIG_PROC_FS) += procfs.o
diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
index 7df6324ccb8a..527458db4525 100644
--- a/fs/lockd/clnt4xdr.c
+++ b/fs/lockd/clnt4xdr.c
@@ -243,7 +243,7 @@ static void encode_nlm4_holder(struct xdr_stream *xdr,
u64 l_offset, l_len;
__be32 *p;
- encode_bool(xdr, lock->fl.fl_type == F_RDLCK);
+ encode_bool(xdr, lock->fl.c.flc_type == F_RDLCK);
encode_int32(xdr, lock->svid);
encode_netobj(xdr, lock->oh.data, lock->oh.len);
@@ -261,7 +261,6 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
u32 exclusive;
int error;
__be32 *p;
- s32 end;
memset(lock, 0, sizeof(*lock));
locks_init_lock(fl);
@@ -271,7 +270,7 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
goto out_overflow;
exclusive = be32_to_cpup(p++);
lock->svid = be32_to_cpup(p);
- fl->fl_pid = (pid_t)lock->svid;
+ fl->c.flc_pid = (pid_t)lock->svid;
error = decode_netobj(xdr, &lock->oh);
if (unlikely(error))
@@ -281,17 +280,11 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
if (unlikely(p == NULL))
goto out_overflow;
- fl->fl_flags = FL_POSIX;
- fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK;
+ fl->c.flc_flags = FL_POSIX;
+ fl->c.flc_type = exclusive != 0 ? F_WRLCK : F_RDLCK;
p = xdr_decode_hyper(p, &l_offset);
xdr_decode_hyper(p, &l_len);
- end = l_offset + l_len - 1;
-
- fl->fl_start = (loff_t)l_offset;
- if (l_len == 0 || end < 0)
- fl->fl_end = OFFSET_MAX;
- else
- fl->fl_end = (loff_t)end;
+ nlm4svc_set_file_lock_range(fl, l_offset, l_len);
error = 0;
out:
return error;
@@ -364,7 +357,7 @@ static void nlm4_xdr_enc_testargs(struct rpc_rqst *req,
const struct nlm_lock *lock = &args->lock;
encode_cookie(xdr, &args->cookie);
- encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+ encode_bool(xdr, lock->fl.c.flc_type == F_WRLCK);
encode_nlm4_lock(xdr, lock);
}
@@ -387,7 +380,7 @@ static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req,
encode_cookie(xdr, &args->cookie);
encode_bool(xdr, args->block);
- encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+ encode_bool(xdr, lock->fl.c.flc_type == F_WRLCK);
encode_nlm4_lock(xdr, lock);
encode_bool(xdr, args->reclaim);
encode_int32(xdr, args->state);
@@ -410,7 +403,7 @@ static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req,
encode_cookie(xdr, &args->cookie);
encode_bool(xdr, args->block);
- encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+ encode_bool(xdr, lock->fl.c.flc_type == F_WRLCK);
encode_nlm4_lock(xdr, lock);
}
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index a5bb3f721a9d..a7e0519ec024 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -14,9 +14,12 @@
#include <linux/nfs_fs.h>
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/lockd.h>
#include <linux/kthread.h>
+#include "trace.h"
+
#define NLMDBG_FACILITY NLMDBG_CLIENT
/*
@@ -29,18 +32,6 @@ static int reclaimer(void *ptr);
* client perspective.
*/
-/*
- * This is the representation of a blocked client lock.
- */
-struct nlm_wait {
- struct list_head b_list; /* linked list */
- wait_queue_head_t b_wait; /* where to wait on */
- struct nlm_host * b_host;
- struct file_lock * b_lock; /* local file lock */
- unsigned short b_reclaim; /* got to reclaim lock */
- __be32 b_status; /* grant callback status */
-};
-
static LIST_HEAD(nlm_blocked);
static DEFINE_SPINLOCK(nlm_blocked_lock);
@@ -94,41 +85,48 @@ void nlmclnt_done(struct nlm_host *host)
}
EXPORT_SYMBOL_GPL(nlmclnt_done);
+void nlmclnt_prepare_block(struct nlm_wait *block, struct nlm_host *host, struct file_lock *fl)
+{
+ block->b_host = host;
+ block->b_lock = fl;
+ init_waitqueue_head(&block->b_wait);
+ block->b_status = nlm_lck_blocked;
+}
+
+struct rpc_clnt *nlmclnt_rpc_clnt(struct nlm_host *host)
+{
+ return host->h_rpcclnt;
+}
+EXPORT_SYMBOL_GPL(nlmclnt_rpc_clnt);
+
/*
* Queue up a lock for blocking so that the GRANTED request can see it
*/
-struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl)
+void nlmclnt_queue_block(struct nlm_wait *block)
{
- struct nlm_wait *block;
-
- block = kmalloc(sizeof(*block), GFP_KERNEL);
- if (block != NULL) {
- block->b_host = host;
- block->b_lock = fl;
- init_waitqueue_head(&block->b_wait);
- block->b_status = nlm_lck_blocked;
-
- spin_lock(&nlm_blocked_lock);
- list_add(&block->b_list, &nlm_blocked);
- spin_unlock(&nlm_blocked_lock);
- }
- return block;
+ spin_lock(&nlm_blocked_lock);
+ list_add(&block->b_list, &nlm_blocked);
+ spin_unlock(&nlm_blocked_lock);
}
-void nlmclnt_finish_block(struct nlm_wait *block)
+/*
+ * Dequeue the block and return its final status
+ */
+__be32 nlmclnt_dequeue_block(struct nlm_wait *block)
{
- if (block == NULL)
- return;
+ __be32 status;
+
spin_lock(&nlm_blocked_lock);
list_del(&block->b_list);
+ status = block->b_status;
spin_unlock(&nlm_blocked_lock);
- kfree(block);
+ return status;
}
/*
* Block on a lock
*/
-int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
+int nlmclnt_wait(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
{
long ret;
@@ -154,7 +152,6 @@ int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
/* Reset the lock status after a server reboot so we resend */
if (block->b_status == nlm_lck_denied_grace_period)
block->b_status = nlm_lck_blocked;
- req->a_res.status = block->b_status;
return 0;
}
@@ -188,7 +185,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
continue;
if (!rpc_cmp_addr(nlm_addr(block->b_host), addr))
continue;
- if (nfs_compare_fh(NFS_FH(locks_inode(fl_blocked->fl_file)), fh) != 0)
+ if (nfs_compare_fh(NFS_FH(file_inode(fl_blocked->c.flc_file)), fh) != 0)
continue;
/* Alright, we found a lock. Set the return status
* and wake up the caller
@@ -198,6 +195,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
res = nlm_granted;
}
spin_unlock(&nlm_blocked_lock);
+ trace_nlmclnt_grant(lock, addr, svc_addr_len(addr), res);
return res;
}
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 99fffc9cb958..cebcc283b7ce 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/nfs_fs.h>
#include <linux/utsname.h>
#include <linux/freezer.h>
@@ -19,6 +20,8 @@
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
+#include "trace.h"
+
#define NLMDBG_FACILITY NLMDBG_CLIENT
#define NLMCLNT_GRACE_WAIT (5*HZ)
#define NLMCLNT_POLL_TIMEOUT (30*HZ)
@@ -130,7 +133,8 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
char *nodename = req->a_host->h_rpcclnt->cl_nodename;
nlmclnt_next_cookie(&argp->cookie);
- memcpy(&lock->fh, NFS_FH(locks_inode(fl->fl_file)), sizeof(struct nfs_fh));
+ memcpy(&lock->fh, NFS_FH(file_inode(fl->c.flc_file)),
+ sizeof(struct nfs_fh));
lock->caller = nodename;
lock->oh.data = req->a_owner;
lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
@@ -139,7 +143,7 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
lock->svid = fl->fl_u.nfs_fl.owner->pid;
lock->fl.fl_start = fl->fl_start;
lock->fl.fl_end = fl->fl_end;
- lock->fl.fl_type = fl->fl_type;
+ lock->fl.c.flc_type = fl->c.flc_type;
}
static void nlmclnt_release_lockargs(struct nlm_rqst *req)
@@ -179,7 +183,7 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *dat
call->a_callback_data = data;
if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
- if (fl->fl_type != F_UNLCK) {
+ if (fl->c.flc_type != F_UNLCK) {
call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
status = nlmclnt_lock(call, fl);
} else
@@ -429,13 +433,14 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
{
int status;
- status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
+ status = nlmclnt_call(nfs_file_cred(fl->c.flc_file), req,
+ NLMPROC_TEST);
if (status < 0)
goto out;
switch (req->a_res.status) {
case nlm_granted:
- fl->fl_type = F_UNLCK;
+ fl->c.flc_type = F_UNLCK;
break;
case nlm_lck_denied:
/*
@@ -443,13 +448,16 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
*/
fl->fl_start = req->a_res.lock.fl.fl_start;
fl->fl_end = req->a_res.lock.fl.fl_end;
- fl->fl_type = req->a_res.lock.fl.fl_type;
- fl->fl_pid = -req->a_res.lock.fl.fl_pid;
+ fl->c.flc_type = req->a_res.lock.fl.c.flc_type;
+ fl->c.flc_pid = -req->a_res.lock.fl.c.flc_pid;
break;
default:
status = nlm_stat_to_errno(req->a_res.status);
}
out:
+ trace_nlmclnt_test(&req->a_args.lock,
+ (const struct sockaddr *)&req->a_host->h_addr,
+ req->a_host->h_addrlen, req->a_res.status);
nlmclnt_release_call(req);
return status;
}
@@ -479,14 +487,15 @@ static const struct file_lock_operations nlmclnt_lock_ops = {
static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
{
fl->fl_u.nfs_fl.state = 0;
- fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host, fl->fl_owner);
+ fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host,
+ fl->c.flc_owner);
INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
fl->fl_ops = &nlmclnt_lock_ops;
}
static int do_vfs_lock(struct file_lock *fl)
{
- return locks_lock_file_wait(fl->fl_file, fl);
+ return locks_lock_file_wait(fl->c.flc_file, fl);
}
/*
@@ -512,49 +521,60 @@ static int do_vfs_lock(struct file_lock *fl)
static int
nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
{
- const struct cred *cred = nfs_file_cred(fl->fl_file);
+ const struct cred *cred = nfs_file_cred(fl->c.flc_file);
struct nlm_host *host = req->a_host;
struct nlm_res *resp = &req->a_res;
- struct nlm_wait *block = NULL;
- unsigned char fl_flags = fl->fl_flags;
- unsigned char fl_type;
+ struct nlm_wait block;
+ unsigned char flags = fl->c.flc_flags;
+ unsigned char type;
+ __be32 b_status;
int status = -ENOLCK;
if (nsm_monitor(host) < 0)
goto out;
req->a_args.state = nsm_local_state;
- fl->fl_flags |= FL_ACCESS;
+ fl->c.flc_flags |= FL_ACCESS;
status = do_vfs_lock(fl);
- fl->fl_flags = fl_flags;
+ fl->c.flc_flags = flags;
if (status < 0)
goto out;
- block = nlmclnt_prepare_block(host, fl);
+ nlmclnt_prepare_block(&block, host, fl);
again:
/*
* Initialise resp->status to a valid non-zero value,
* since 0 == nlm_lck_granted
*/
resp->status = nlm_lck_blocked;
- for(;;) {
+
+ /*
+ * A GRANTED callback can come at any time -- even before the reply
+ * to the LOCK request arrives, so we queue the wait before
+ * requesting the lock.
+ */
+ nlmclnt_queue_block(&block);
+ for (;;) {
/* Reboot protection */
fl->fl_u.nfs_fl.state = host->h_state;
status = nlmclnt_call(cred, req, NLMPROC_LOCK);
if (status < 0)
break;
/* Did a reclaimer thread notify us of a server reboot? */
- if (resp->status == nlm_lck_denied_grace_period)
+ if (resp->status == nlm_lck_denied_grace_period)
continue;
if (resp->status != nlm_lck_blocked)
break;
/* Wait on an NLM blocking lock */
- status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
+ status = nlmclnt_wait(&block, req, NLMCLNT_POLL_TIMEOUT);
if (status < 0)
break;
- if (resp->status != nlm_lck_blocked)
+ if (block.b_status != nlm_lck_blocked)
break;
}
+ b_status = nlmclnt_dequeue_block(&block);
+ if (resp->status == nlm_lck_blocked)
+ resp->status = b_status;
/* if we were interrupted while blocking, then cancel the lock request
* and exit
@@ -563,7 +583,7 @@ again:
if (!req->a_args.block)
goto out_unlock;
if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
- goto out_unblock;
+ goto out;
}
if (resp->status == nlm_granted) {
@@ -574,11 +594,11 @@ again:
goto again;
}
/* Ensure the resulting lock will get added to granted list */
- fl->fl_flags |= FL_SLEEP;
+ fl->c.flc_flags |= FL_SLEEP;
if (do_vfs_lock(fl) < 0)
printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
up_read(&host->h_rwsem);
- fl->fl_flags = fl_flags;
+ fl->c.flc_flags = flags;
status = 0;
}
if (status < 0)
@@ -588,27 +608,30 @@ again:
* cases NLM_LCK_DENIED is returned for a permanent error. So
* turn it into an ENOLCK.
*/
- if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
+ if (resp->status == nlm_lck_denied && (flags & FL_SLEEP))
status = -ENOLCK;
else
status = nlm_stat_to_errno(resp->status);
-out_unblock:
- nlmclnt_finish_block(block);
out:
+ trace_nlmclnt_lock(&req->a_args.lock,
+ (const struct sockaddr *)&req->a_host->h_addr,
+ req->a_host->h_addrlen, req->a_res.status);
nlmclnt_release_call(req);
return status;
out_unlock:
/* Fatal error: ensure that we remove the lock altogether */
+ trace_nlmclnt_lock(&req->a_args.lock,
+ (const struct sockaddr *)&req->a_host->h_addr,
+ req->a_host->h_addrlen, req->a_res.status);
dprintk("lockd: lock attempt ended in fatal error.\n"
" Attempting to unlock.\n");
- nlmclnt_finish_block(block);
- fl_type = fl->fl_type;
- fl->fl_type = F_UNLCK;
+ type = fl->c.flc_type;
+ fl->c.flc_type = F_UNLCK;
down_read(&host->h_rwsem);
do_vfs_lock(fl);
up_read(&host->h_rwsem);
- fl->fl_type = fl_type;
- fl->fl_flags = fl_flags;
+ fl->c.flc_type = type;
+ fl->c.flc_flags = flags;
nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
return status;
}
@@ -631,12 +654,14 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
nlmclnt_setlockargs(req, fl);
req->a_args.reclaim = 1;
- status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
+ status = nlmclnt_call(nfs_file_cred(fl->c.flc_file), req,
+ NLMPROC_LOCK);
if (status >= 0 && req->a_res.status == nlm_granted)
return 0;
printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
- "(errno %d, status %d)\n", fl->fl_pid,
+ "(errno %d, status %d)\n",
+ fl->c.flc_pid,
status, ntohl(req->a_res.status));
/*
@@ -663,26 +688,26 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
struct nlm_host *host = req->a_host;
struct nlm_res *resp = &req->a_res;
int status;
- unsigned char fl_flags = fl->fl_flags;
+ unsigned char flags = fl->c.flc_flags;
/*
* Note: the server is supposed to either grant us the unlock
* request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
* case, we want to unlock.
*/
- fl->fl_flags |= FL_EXISTS;
+ fl->c.flc_flags |= FL_EXISTS;
down_read(&host->h_rwsem);
status = do_vfs_lock(fl);
up_read(&host->h_rwsem);
- fl->fl_flags = fl_flags;
+ fl->c.flc_flags = flags;
if (status == -ENOENT) {
status = 0;
goto out;
}
refcount_inc(&req->a_count);
- status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
- NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
+ status = nlmclnt_async_call(nfs_file_cred(fl->c.flc_file), req,
+ NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
if (status < 0)
goto out;
@@ -695,6 +720,9 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
/* What to do now? I'm out of my depth... */
status = -ENOLCK;
out:
+ trace_nlmclnt_unlock(&req->a_args.lock,
+ (const struct sockaddr *)&req->a_host->h_addr,
+ req->a_host->h_addrlen, req->a_res.status);
nlmclnt_release_call(req);
return status;
}
@@ -772,8 +800,8 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl
req->a_args.block = block;
refcount_inc(&req->a_count);
- status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
- NLMPROC_CANCEL, &nlmclnt_cancel_ops);
+ status = nlmclnt_async_call(nfs_file_cred(fl->c.flc_file), req,
+ NLMPROC_CANCEL, &nlmclnt_cancel_ops);
if (status == 0 && req->a_res.status == nlm_lck_denied)
status = -ENOLCK;
nlmclnt_release_call(req);
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
index 4df62f635529..6ea3448d2d31 100644
--- a/fs/lockd/clntxdr.c
+++ b/fs/lockd/clntxdr.c
@@ -2,8 +2,9 @@
/*
* linux/fs/lockd/clntxdr.c
*
- * XDR functions to encode/decode NLM version 3 RPC arguments and results.
- * NLM version 3 is backwards compatible with NLM versions 1 and 2.
+ * XDR functions to encode/decode NLM version 1 and 3 RPC
+ * arguments and results. NLM version 2 is not specified
+ * by a standard, thus it is not implemented.
*
* NLM client-side only.
*
@@ -238,7 +239,7 @@ static void encode_nlm_holder(struct xdr_stream *xdr,
u32 l_offset, l_len;
__be32 *p;
- encode_bool(xdr, lock->fl.fl_type == F_RDLCK);
+ encode_bool(xdr, lock->fl.c.flc_type == F_RDLCK);
encode_int32(xdr, lock->svid);
encode_netobj(xdr, lock->oh.data, lock->oh.len);
@@ -265,7 +266,7 @@ static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result)
goto out_overflow;
exclusive = be32_to_cpup(p++);
lock->svid = be32_to_cpup(p);
- fl->fl_pid = (pid_t)lock->svid;
+ fl->c.flc_pid = (pid_t)lock->svid;
error = decode_netobj(xdr, &lock->oh);
if (unlikely(error))
@@ -275,8 +276,8 @@ static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result)
if (unlikely(p == NULL))
goto out_overflow;
- fl->fl_flags = FL_POSIX;
- fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK;
+ fl->c.flc_flags = FL_POSIX;
+ fl->c.flc_type = exclusive != 0 ? F_WRLCK : F_RDLCK;
l_offset = be32_to_cpup(p++);
l_len = be32_to_cpup(p);
end = l_offset + l_len - 1;
@@ -357,7 +358,7 @@ static void nlm_xdr_enc_testargs(struct rpc_rqst *req,
const struct nlm_lock *lock = &args->lock;
encode_cookie(xdr, &args->cookie);
- encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+ encode_bool(xdr, lock->fl.c.flc_type == F_WRLCK);
encode_nlm_lock(xdr, lock);
}
@@ -380,7 +381,7 @@ static void nlm_xdr_enc_lockargs(struct rpc_rqst *req,
encode_cookie(xdr, &args->cookie);
encode_bool(xdr, args->block);
- encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+ encode_bool(xdr, lock->fl.c.flc_type == F_WRLCK);
encode_nlm_lock(xdr, lock);
encode_bool(xdr, args->reclaim);
encode_int32(xdr, args->state);
@@ -403,7 +404,7 @@ static void nlm_xdr_enc_cancargs(struct rpc_rqst *req,
encode_cookie(xdr, &args->cookie);
encode_bool(xdr, args->block);
- encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+ encode_bool(xdr, lock->fl.c.flc_type == F_WRLCK);
encode_nlm_lock(xdr, lock);
}
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index f802223e71ab..5e6877c37f73 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -117,7 +117,6 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
if (nsm != NULL)
refcount_inc(&nsm->sm_count);
else {
- host = NULL;
nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
ni->hostname, ni->hostname_len);
if (unlikely(nsm == NULL)) {
@@ -164,7 +163,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
host->h_addrbuf = nsm->sm_addrbuf;
host->net = ni->net;
host->h_cred = get_cred(ni->cred);
- strlcpy(host->nodename, utsname()->nodename, sizeof(host->nodename));
+ strscpy(host->nodename, utsname()->nodename, sizeof(host->nodename));
out:
return host;
@@ -441,7 +440,7 @@ nlm_bind_host(struct nlm_host *host)
if ((clnt = host->h_rpcclnt) != NULL) {
nlm_rebind_host(host);
} else {
- unsigned long increment = nlmsvc_timeout;
+ unsigned long increment = nlm_timeout * HZ;
struct rpc_timeout timeparms = {
.to_initval = increment,
.to_increment = increment,
@@ -629,6 +628,7 @@ nlm_shutdown_hosts_net(struct net *net)
rpc_shutdown_client(host->h_rpcclnt);
host->h_rpcclnt = NULL;
}
+ nlmsvc_free_host_resources(host);
}
/* Then, perform a garbage collection pass */
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 1d9488cf0534..b8fc732e1c67 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -18,7 +18,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "netns.h"
@@ -276,6 +276,9 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
{
struct nsm_handle *new;
+ if (!hostname)
+ return NULL;
+
new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL);
if (unlikely(new == NULL))
return NULL;
diff --git a/fs/lockd/netlink.c b/fs/lockd/netlink.c
new file mode 100644
index 000000000000..880c42b4f8c3
--- /dev/null
+++ b/fs/lockd/netlink.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/lockd.yaml */
+/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netlink.h"
+
+#include <uapi/linux/lockd_netlink.h>
+
+/* LOCKD_CMD_SERVER_SET - do */
+static const struct nla_policy lockd_server_set_nl_policy[LOCKD_A_SERVER_UDP_PORT + 1] = {
+ [LOCKD_A_SERVER_GRACETIME] = { .type = NLA_U32, },
+ [LOCKD_A_SERVER_TCP_PORT] = { .type = NLA_U16, },
+ [LOCKD_A_SERVER_UDP_PORT] = { .type = NLA_U16, },
+};
+
+/* Ops table for lockd */
+static const struct genl_split_ops lockd_nl_ops[] = {
+ {
+ .cmd = LOCKD_CMD_SERVER_SET,
+ .doit = lockd_nl_server_set_doit,
+ .policy = lockd_server_set_nl_policy,
+ .maxattr = LOCKD_A_SERVER_UDP_PORT,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = LOCKD_CMD_SERVER_GET,
+ .doit = lockd_nl_server_get_doit,
+ .flags = GENL_CMD_CAP_DO,
+ },
+};
+
+struct genl_family lockd_nl_family __ro_after_init = {
+ .name = LOCKD_FAMILY_NAME,
+ .version = LOCKD_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = lockd_nl_ops,
+ .n_split_ops = ARRAY_SIZE(lockd_nl_ops),
+};
diff --git a/fs/lockd/netlink.h b/fs/lockd/netlink.h
new file mode 100644
index 000000000000..d8408f077dd8
--- /dev/null
+++ b/fs/lockd/netlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/lockd.yaml */
+/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#ifndef _LINUX_LOCKD_GEN_H
+#define _LINUX_LOCKD_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/lockd_netlink.h>
+
+int lockd_nl_server_set_doit(struct sk_buff *skb, struct genl_info *info);
+int lockd_nl_server_get_doit(struct sk_buff *skb, struct genl_info *info);
+
+extern struct genl_family lockd_nl_family;
+
+#endif /* _LINUX_LOCKD_GEN_H */
diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
index 5bec78c8e431..88e8e2a97397 100644
--- a/fs/lockd/netns.h
+++ b/fs/lockd/netns.h
@@ -3,12 +3,16 @@
#define __LOCKD_NETNS_H__
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <net/netns/generic.h>
struct lockd_net {
unsigned int nlmsvc_users;
unsigned long next_gc;
unsigned long nrhosts;
+ u32 gracetime;
+ u16 tcp_port;
+ u16 udp_port;
struct delayed_work grace_period_end;
struct lock_manager lockd_manager;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index b220e1b91726..d68afa196535 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -24,7 +24,6 @@
#include <linux/uio.h>
#include <linux/smp.h>
#include <linux/mutex.h>
-#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/inetdevice.h>
@@ -42,10 +41,10 @@
#include "netns.h"
#include "procfs.h"
+#include "netlink.h"
#define NLMDBG_FACILITY NLMDBG_SVC
#define LOCKD_BUFSIZE (1024 + NLMSVC_XDRSIZE)
-#define ALLOWED_SIGS (sigmask(SIGKILL))
static struct svc_program nlmsvc_program;
@@ -54,12 +53,13 @@ EXPORT_SYMBOL_GPL(nlmsvc_ops);
static DEFINE_MUTEX(nlmsvc_mutex);
static unsigned int nlmsvc_users;
-static struct task_struct *nlmsvc_task;
-static struct svc_rqst *nlmsvc_rqst;
-unsigned long nlmsvc_timeout;
+static struct svc_serv *nlmsvc_serv;
-static atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0);
-static DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq);
+static void nlmsvc_request_retry(struct timer_list *tl)
+{
+ svc_wake_up(nlmsvc_serv);
+}
+DEFINE_TIMER(nlmsvc_retry, nlmsvc_request_retry);
unsigned int lockd_net_id;
@@ -68,12 +68,9 @@ unsigned int lockd_net_id;
* and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003
*/
static unsigned long nlm_grace_period;
-static unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
+unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
static int nlm_udpport, nlm_tcpport;
-/* RLIM_NOFILE defaults to 1024. That seems like a reasonable default here. */
-static unsigned int nlm_max_connections = 1024;
-
/*
* Constants needed for the sysctl interface.
*/
@@ -81,14 +78,20 @@ static const unsigned long nlm_grace_period_min = 0;
static const unsigned long nlm_grace_period_max = 240;
static const unsigned long nlm_timeout_min = 3;
static const unsigned long nlm_timeout_max = 20;
-static const int nlm_port_min = 0, nlm_port_max = 65535;
#ifdef CONFIG_SYSCTL
+static const int nlm_port_min = 0, nlm_port_max = 65535;
static struct ctl_table_header * nlm_sysctl_table;
#endif
-static unsigned long get_lockd_grace_period(void)
+static unsigned long get_lockd_grace_period(struct net *net)
{
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+ /* Return the net-ns specific grace period, if there is one */
+ if (ln->gracetime)
+ return ln->gracetime * HZ;
+
/* Note: nlm_timeout should always be nonzero */
if (nlm_grace_period)
return roundup(nlm_grace_period, nlm_timeout) * HZ;
@@ -107,7 +110,7 @@ static void grace_ender(struct work_struct *grace)
static void set_grace_period(struct net *net)
{
- unsigned long grace_period = get_lockd_grace_period();
+ unsigned long grace_period = get_lockd_grace_period(net);
struct lockd_net *ln = net_generic(net, lockd_net_id);
locks_start_grace(net, &ln->lockd_manager);
@@ -115,75 +118,40 @@ static void set_grace_period(struct net *net)
schedule_delayed_work(&ln->grace_period_end, grace_period);
}
-static void restart_grace(void)
-{
- if (nlmsvc_ops) {
- struct net *net = &init_net;
- struct lockd_net *ln = net_generic(net, lockd_net_id);
-
- cancel_delayed_work_sync(&ln->grace_period_end);
- locks_end_grace(&ln->lockd_manager);
- nlmsvc_invalidate_all();
- set_grace_period(net);
- }
-}
-
/*
* This is the lockd kernel thread
*/
static int
lockd(void *vrqstp)
{
- int err = 0;
struct svc_rqst *rqstp = vrqstp;
struct net *net = &init_net;
struct lockd_net *ln = net_generic(net, lockd_net_id);
+ svc_thread_init_status(rqstp, 0);
+
/* try_to_freeze() is called from svc_recv() */
set_freezable();
- /* Allow SIGKILL to tell lockd to drop all of its locks */
- allow_signal(SIGKILL);
-
dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
/*
* The main request loop. We don't terminate until the last
* NFS mount or NFS daemon has gone away.
*/
- while (!kthread_should_stop()) {
- long timeout = MAX_SCHEDULE_TIMEOUT;
- RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
-
- /* update sv_maxconn if it has changed */
- rqstp->rq_server->sv_maxconn = nlm_max_connections;
-
- if (signalled()) {
- flush_signals(current);
- restart_grace();
- continue;
- }
-
- timeout = nlmsvc_retry_blocked();
-
- /*
- * Find a socket with data available and call its
- * recvfrom routine.
- */
- err = svc_recv(rqstp, timeout);
- if (err == -EAGAIN || err == -EINTR)
- continue;
- dprintk("lockd: request from %s\n",
- svc_print_addr(rqstp, buf, sizeof(buf)));
-
- svc_process(rqstp);
+ while (!svc_thread_should_stop(rqstp)) {
+ nlmsvc_retry_blocked(rqstp);
+ svc_recv(rqstp);
}
- flush_signals(current);
if (nlmsvc_ops)
nlmsvc_invalidate_all();
nlm_shutdown_hosts();
cancel_delayed_work_sync(&ln->grace_period_end);
locks_end_grace(&ln->lockd_manager);
+
+ dprintk("lockd_down: service stopped\n");
+
+ svc_exit_thread(rqstp);
return 0;
}
@@ -196,8 +164,8 @@ static int create_lockd_listener(struct svc_serv *serv, const char *name,
xprt = svc_find_xprt(serv, name, net, family, 0);
if (xprt == NULL)
- return svc_create_xprt(serv, name, net, family, port,
- SVC_SOCK_DEFAULTS, cred);
+ return svc_xprt_create(serv, name, net, family, port,
+ SVC_SOCK_DEFAULTS, cred);
svc_xprt_put(xprt);
return 0;
}
@@ -205,15 +173,16 @@ static int create_lockd_listener(struct svc_serv *serv, const char *name,
static int create_lockd_family(struct svc_serv *serv, struct net *net,
const int family, const struct cred *cred)
{
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
int err;
- err = create_lockd_listener(serv, "udp", net, family, nlm_udpport,
- cred);
+ err = create_lockd_listener(serv, "udp", net, family,
+ ln->udp_port ? ln->udp_port : nlm_udpport, cred);
if (err < 0)
return err;
- return create_lockd_listener(serv, "tcp", net, family, nlm_tcpport,
- cred);
+ return create_lockd_listener(serv, "tcp", net, family,
+ ln->tcp_port ? ln->tcp_port : nlm_tcpport, cred);
}
/*
@@ -247,7 +216,7 @@ out_err:
if (warned++ == 0)
printk(KERN_WARNING
"lockd_up: makesock failed, error=%d\n", err);
- svc_shutdown_net(serv, net);
+ svc_xprt_destroy_all(serv, net, true);
return err;
}
@@ -285,13 +254,11 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
nlm_shutdown_hosts_net(net);
cancel_delayed_work_sync(&ln->grace_period_end);
locks_end_grace(&ln->lockd_manager);
- svc_shutdown_net(serv, net);
- dprintk("%s: per-net data destroyed; net=%x\n",
- __func__, net->ns.inum);
+ svc_xprt_destroy_all(serv, net, true);
}
} else {
- pr_err("%s: no users! task=%p, net=%x\n",
- __func__, nlmsvc_task, net->ns.inum);
+ pr_err("%s: no users! net=%x\n",
+ __func__, net->ns.inum);
BUG();
}
}
@@ -302,20 +269,16 @@ static int lockd_inetaddr_event(struct notifier_block *this,
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct sockaddr_in sin;
- if ((event != NETDEV_DOWN) ||
- !atomic_inc_not_zero(&nlm_ntf_refcnt))
+ if (event != NETDEV_DOWN)
goto out;
- if (nlmsvc_rqst) {
+ if (nlmsvc_serv) {
dprintk("lockd_inetaddr_event: removed %pI4\n",
&ifa->ifa_local);
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ifa->ifa_local;
- svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
- (struct sockaddr *)&sin);
+ svc_age_temp_xprts_now(nlmsvc_serv, (struct sockaddr *)&sin);
}
- atomic_dec(&nlm_ntf_refcnt);
- wake_up(&nlm_ntf_wq);
out:
return NOTIFY_DONE;
@@ -332,21 +295,17 @@ static int lockd_inet6addr_event(struct notifier_block *this,
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct sockaddr_in6 sin6;
- if ((event != NETDEV_DOWN) ||
- !atomic_inc_not_zero(&nlm_ntf_refcnt))
+ if (event != NETDEV_DOWN)
goto out;
- if (nlmsvc_rqst) {
+ if (nlmsvc_serv) {
dprintk("lockd_inet6addr_event: removed %pI6\n", &ifa->addr);
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ifa->addr;
if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin6.sin6_scope_id = ifa->idev->dev->ifindex;
- svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
- (struct sockaddr *)&sin6);
+ svc_age_temp_xprts_now(nlmsvc_serv, (struct sockaddr *)&sin6);
}
- atomic_dec(&nlm_ntf_refcnt);
- wake_up(&nlm_ntf_wq);
out:
return NOTIFY_DONE;
@@ -357,86 +316,14 @@ static struct notifier_block lockd_inet6addr_notifier = {
};
#endif
-static void lockd_unregister_notifiers(void)
-{
- unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
-#if IS_ENABLED(CONFIG_IPV6)
- unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
-#endif
- wait_event(nlm_ntf_wq, atomic_read(&nlm_ntf_refcnt) == 0);
-}
-
-static void lockd_svc_exit_thread(void)
-{
- atomic_dec(&nlm_ntf_refcnt);
- lockd_unregister_notifiers();
- svc_exit_thread(nlmsvc_rqst);
-}
-
-static int lockd_start_svc(struct svc_serv *serv)
+static int lockd_get(void)
{
+ struct svc_serv *serv;
int error;
- if (nlmsvc_rqst)
+ if (nlmsvc_serv) {
+ nlmsvc_users++;
return 0;
-
- /*
- * Create the kernel thread and wait for it to start.
- */
- nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
- if (IS_ERR(nlmsvc_rqst)) {
- error = PTR_ERR(nlmsvc_rqst);
- printk(KERN_WARNING
- "lockd_up: svc_rqst allocation failed, error=%d\n",
- error);
- lockd_unregister_notifiers();
- goto out_rqst;
- }
-
- atomic_inc(&nlm_ntf_refcnt);
- svc_sock_update_bufs(serv);
- serv->sv_maxconn = nlm_max_connections;
-
- nlmsvc_task = kthread_create(lockd, nlmsvc_rqst, "%s", serv->sv_name);
- if (IS_ERR(nlmsvc_task)) {
- error = PTR_ERR(nlmsvc_task);
- printk(KERN_WARNING
- "lockd_up: kthread_run failed, error=%d\n", error);
- goto out_task;
- }
- nlmsvc_rqst->rq_task = nlmsvc_task;
- wake_up_process(nlmsvc_task);
-
- dprintk("lockd_up: service started\n");
- return 0;
-
-out_task:
- lockd_svc_exit_thread();
- nlmsvc_task = NULL;
-out_rqst:
- nlmsvc_rqst = NULL;
- return error;
-}
-
-static const struct svc_serv_ops lockd_sv_ops = {
- .svo_shutdown = svc_rpcb_cleanup,
- .svo_enqueue_xprt = svc_xprt_do_enqueue,
-};
-
-static struct svc_serv *lockd_create_svc(void)
-{
- struct svc_serv *serv;
-
- /*
- * Check whether we're already up and running.
- */
- if (nlmsvc_rqst) {
- /*
- * Note: increase service usage, because later in case of error
- * svc_destroy() will be called.
- */
- svc_get(nlmsvc_rqst->rq_server);
- return nlmsvc_rqst->rq_server;
}
/*
@@ -447,21 +334,44 @@ static struct svc_serv *lockd_create_svc(void)
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
- if (!nlm_timeout)
- nlm_timeout = LOCKD_DFLT_TIMEO;
- nlmsvc_timeout = nlm_timeout * HZ;
-
- serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, &lockd_sv_ops);
+ serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, lockd);
if (!serv) {
printk(KERN_WARNING "lockd_up: create service failed\n");
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
+
+ error = svc_set_num_threads(serv, NULL, 1);
+ if (error < 0) {
+ svc_destroy(&serv);
+ return error;
+ }
+
+ nlmsvc_serv = serv;
register_inetaddr_notifier(&lockd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6)
register_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
dprintk("lockd_up: service created\n");
- return serv;
+ nlmsvc_users++;
+ return 0;
+}
+
+static void lockd_put(void)
+{
+ if (WARN(nlmsvc_users <= 0, "lockd_down: no users!\n"))
+ return;
+ if (--nlmsvc_users)
+ return;
+
+ unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
+#if IS_ENABLED(CONFIG_IPV6)
+ unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
+#endif
+
+ svc_set_num_threads(nlmsvc_serv, NULL, 0);
+ timer_delete_sync(&nlmsvc_retry);
+ svc_destroy(&nlmsvc_serv);
+ dprintk("lockd_down: service destroyed\n");
}
/*
@@ -469,36 +379,21 @@ static struct svc_serv *lockd_create_svc(void)
*/
int lockd_up(struct net *net, const struct cred *cred)
{
- struct svc_serv *serv;
int error;
mutex_lock(&nlmsvc_mutex);
- serv = lockd_create_svc();
- if (IS_ERR(serv)) {
- error = PTR_ERR(serv);
- goto err_create;
- }
+ error = lockd_get();
+ if (error)
+ goto err;
- error = lockd_up_net(serv, net, cred);
+ error = lockd_up_net(nlmsvc_serv, net, cred);
if (error < 0) {
- lockd_unregister_notifiers();
- goto err_put;
+ lockd_put();
+ goto err;
}
- error = lockd_start_svc(serv);
- if (error < 0) {
- lockd_down_net(serv, net);
- goto err_put;
- }
- nlmsvc_users++;
- /*
- * Note: svc_serv structures have an initial use count of 1,
- * so we exit through here on both success and failure.
- */
-err_put:
- svc_destroy(serv);
-err_create:
+err:
mutex_unlock(&nlmsvc_mutex);
return error;
}
@@ -511,27 +406,8 @@ void
lockd_down(struct net *net)
{
mutex_lock(&nlmsvc_mutex);
- lockd_down_net(nlmsvc_rqst->rq_server, net);
- if (nlmsvc_users) {
- if (--nlmsvc_users)
- goto out;
- } else {
- printk(KERN_ERR "lockd_down: no users! task=%p\n",
- nlmsvc_task);
- BUG();
- }
-
- if (!nlmsvc_task) {
- printk(KERN_ERR "lockd_down: no lockd running.\n");
- BUG();
- }
- kthread_stop(nlmsvc_task);
- dprintk("lockd_down: service stopped\n");
- lockd_svc_exit_thread();
- dprintk("lockd_down: service destroyed\n");
- nlmsvc_task = NULL;
- nlmsvc_rqst = NULL;
-out:
+ lockd_down_net(nlmsvc_serv, net);
+ lockd_put();
mutex_unlock(&nlmsvc_mutex);
}
EXPORT_SYMBOL_GPL(lockd_down);
@@ -542,7 +418,7 @@ EXPORT_SYMBOL_GPL(lockd_down);
* Sysctl parameters (same as module parameters, different interface).
*/
-static struct ctl_table nlm_sysctls[] = {
+static const struct ctl_table nlm_sysctls[] = {
{
.procname = "nlm_grace_period",
.data = &nlm_grace_period,
@@ -582,36 +458,18 @@ static struct ctl_table nlm_sysctls[] = {
{
.procname = "nsm_use_hostnames",
.data = &nsm_use_hostnames,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(bool),
.mode = 0644,
.proc_handler = proc_dobool,
},
{
.procname = "nsm_local_state",
.data = &nsm_local_state,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(nsm_local_state),
.mode = 0644,
- .proc_handler = proc_dointvec,
- },
- { }
-};
-
-static struct ctl_table nlm_sysctl_dir[] = {
- {
- .procname = "nfs",
- .mode = 0555,
- .child = nlm_sysctls,
- },
- { }
-};
-
-static struct ctl_table nlm_sysctl_root[] = {
- {
- .procname = "fs",
- .mode = 0555,
- .child = nlm_sysctl_dir,
+ .proc_handler = proc_douintvec,
+ .extra1 = SYSCTL_ZERO,
},
- { }
};
#endif /* CONFIG_SYSCTL */
@@ -643,7 +501,7 @@ static inline int is_callback(u32 proc)
}
-static int lockd_authenticate(struct svc_rqst *rqstp)
+static enum svc_auth_status lockd_authenticate(struct svc_rqst *rqstp)
{
rqstp->rq_client = NULL;
switch (rqstp->rq_authop->flavour) {
@@ -684,7 +542,6 @@ module_param_call(nlm_udpport, param_set_port, param_get_int,
module_param_call(nlm_tcpport, param_set_port, param_get_int,
&nlm_tcpport, 0644);
module_param(nsm_use_hostnames, bool, 0644);
-module_param(nlm_max_connections, uint, 0644);
static int lockd_init_net(struct net *net)
{
@@ -730,7 +587,7 @@ static int __init init_nlm(void)
#ifdef CONFIG_SYSCTL
err = -ENOMEM;
- nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root);
+ nlm_sysctl_table = register_sysctl("fs/nfs", nlm_sysctls);
if (nlm_sysctl_table == NULL)
goto err_sysctl;
#endif
@@ -738,6 +595,10 @@ static int __init init_nlm(void)
if (err)
goto err_pernet;
+ err = genl_register_family(&lockd_nl_family);
+ if (err)
+ goto err_netlink;
+
err = lockd_create_procfs();
if (err)
goto err_procfs;
@@ -745,6 +606,8 @@ static int __init init_nlm(void)
return 0;
err_procfs:
+ genl_unregister_family(&lockd_nl_family);
+err_netlink:
unregister_pernet_subsys(&lockd_net_ops);
err_pernet:
#ifdef CONFIG_SYSCTL
@@ -758,6 +621,7 @@ static void __exit exit_nlm(void)
{
/* FIXME: delete all NLM clients */
nlm_shutdown_hosts();
+ genl_unregister_family(&lockd_nl_family);
lockd_remove_procfs();
unregister_pernet_subsys(&lockd_net_ops);
#ifdef CONFIG_SYSCTL
@@ -771,17 +635,16 @@ module_exit(exit_nlm);
/**
* nlmsvc_dispatch - Process an NLM Request
* @rqstp: incoming request
- * @statp: pointer to location of accept_stat field in RPC Reply buffer
*
* Return values:
* %0: Processing complete; do not send a Reply
* %1: Processing complete; send Reply in rqstp->rq_res
*/
-static int nlmsvc_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+static int nlmsvc_dispatch(struct svc_rqst *rqstp)
{
const struct svc_procedure *procp = rqstp->rq_procinfo;
+ __be32 *statp = rqstp->rq_accept_statp;
- svcxdr_init_decode(rqstp);
if (!procp->pc_decode(rqstp, &rqstp->rq_arg_stream))
goto out_decode_err;
@@ -791,7 +654,6 @@ static int nlmsvc_dispatch(struct svc_rqst *rqstp, __be32 *statp)
if (*statp != rpc_success)
return 1;
- svcxdr_init_encode(rqstp);
if (!procp->pc_encode(rqstp, &rqstp->rq_res_stream))
goto out_encode_err;
@@ -809,7 +671,7 @@ out_encode_err:
/*
* Define NLM program and procedures
*/
-static unsigned int nlmsvc_version1_count[17];
+static DEFINE_PER_CPU_ALIGNED(unsigned long, nlmsvc_version1_count[17]);
static const struct svc_version nlmsvc_version1 = {
.vs_vers = 1,
.vs_nproc = 17,
@@ -818,26 +680,31 @@ static const struct svc_version nlmsvc_version1 = {
.vs_dispatch = nlmsvc_dispatch,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
-static unsigned int nlmsvc_version3_count[24];
+
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ nlmsvc_version3_count[ARRAY_SIZE(nlmsvc_procedures)]);
static const struct svc_version nlmsvc_version3 = {
.vs_vers = 3,
- .vs_nproc = 24,
+ .vs_nproc = ARRAY_SIZE(nlmsvc_procedures),
.vs_proc = nlmsvc_procedures,
.vs_count = nlmsvc_version3_count,
.vs_dispatch = nlmsvc_dispatch,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
+
#ifdef CONFIG_LOCKD_V4
-static unsigned int nlmsvc_version4_count[24];
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ nlmsvc_version4_count[ARRAY_SIZE(nlmsvc_procedures4)]);
static const struct svc_version nlmsvc_version4 = {
.vs_vers = 4,
- .vs_nproc = 24,
+ .vs_nproc = ARRAY_SIZE(nlmsvc_procedures4),
.vs_proc = nlmsvc_procedures4,
.vs_count = nlmsvc_version4_count,
.vs_dispatch = nlmsvc_dispatch,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
#endif
+
static const struct svc_version *nlmsvc_version[] = {
[1] = &nlmsvc_version1,
[3] = &nlmsvc_version3,
@@ -846,8 +713,6 @@ static const struct svc_version *nlmsvc_version[] = {
#endif
};
-static struct svc_stat nlmsvc_stats;
-
#define NLM_NRVERS ARRAY_SIZE(nlmsvc_version)
static struct svc_program nlmsvc_program = {
.pg_prog = NLM_PROGRAM, /* program number */
@@ -855,8 +720,98 @@ static struct svc_program nlmsvc_program = {
.pg_vers = nlmsvc_version, /* version table */
.pg_name = "lockd", /* service name */
.pg_class = "nfsd", /* share authentication with nfsd */
- .pg_stats = &nlmsvc_stats, /* stats table */
.pg_authenticate = &lockd_authenticate, /* export authentication */
.pg_init_request = svc_generic_init_request,
.pg_rpcbind_set = svc_generic_rpcbind_set,
};
+
+/**
+ * lockd_nl_server_set_doit - set the lockd server parameters via netlink
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * This updates the per-net values. When updating the values in the init_net
+ * namespace, also update the "legacy" global values.
+ *
+ * Return 0 on success or a negative errno.
+ */
+int lockd_nl_server_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+ const struct nlattr *attr;
+
+ if (GENL_REQ_ATTR_CHECK(info, LOCKD_A_SERVER_GRACETIME))
+ return -EINVAL;
+
+ if (info->attrs[LOCKD_A_SERVER_GRACETIME] ||
+ info->attrs[LOCKD_A_SERVER_TCP_PORT] ||
+ info->attrs[LOCKD_A_SERVER_UDP_PORT]) {
+ attr = info->attrs[LOCKD_A_SERVER_GRACETIME];
+ if (attr) {
+ u32 gracetime = nla_get_u32(attr);
+
+ if (gracetime > nlm_grace_period_max)
+ return -EINVAL;
+
+ ln->gracetime = gracetime;
+
+ if (net == &init_net)
+ nlm_grace_period = gracetime;
+ }
+
+ attr = info->attrs[LOCKD_A_SERVER_TCP_PORT];
+ if (attr) {
+ ln->tcp_port = nla_get_u16(attr);
+ if (net == &init_net)
+ nlm_tcpport = ln->tcp_port;
+ }
+
+ attr = info->attrs[LOCKD_A_SERVER_UDP_PORT];
+ if (attr) {
+ ln->udp_port = nla_get_u16(attr);
+ if (net == &init_net)
+ nlm_udpport = ln->udp_port;
+ }
+ }
+ return 0;
+}
+
+/**
+ * lockd_nl_server_get_doit - get lockd server parameters via netlink
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int lockd_nl_server_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+ void *hdr;
+ int err;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(skb, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_free_msg;
+ }
+
+ err = nla_put_u32(skb, LOCKD_A_SERVER_GRACETIME, ln->gracetime) ||
+ nla_put_u16(skb, LOCKD_A_SERVER_TCP_PORT, ln->tcp_port) ||
+ nla_put_u16(skb, LOCKD_A_SERVER_UDP_PORT, ln->udp_port);
+ if (err)
+ goto err_free_msg;
+
+ genlmsg_end(skb, hdr);
+
+ return genlmsg_reply(skb, info);
+err_free_msg:
+ nlmsg_free(skb);
+
+ return err;
+}
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 176b468a61c7..109e5caae8c7 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -32,6 +32,10 @@ nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
if (!nlmsvc_ops)
return nlm_lck_denied_nolocks;
+ if (lock->lock_start > OFFSET_MAX ||
+ (lock->lock_len && ((lock->lock_len - 1) > (OFFSET_MAX - lock->lock_start))))
+ return nlm4_fbig;
+
/* Obtain host handle */
if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len))
|| (argp->monitor && nsm_monitor(host) < 0))
@@ -42,17 +46,23 @@ nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
if (filp != NULL) {
int mode = lock_to_openmode(&lock->fl);
+ lock->fl.c.flc_flags = FL_POSIX;
+
error = nlm_lookup_file(rqstp, &file, lock);
if (error)
goto no_locks;
*filp = file;
/* Set up the missing parts of the file_lock structure */
- lock->fl.fl_file = file->f_file[mode];
- lock->fl.fl_pid = current->tgid;
+ lock->fl.c.flc_file = file->f_file[mode];
+ lock->fl.c.flc_pid = current->tgid;
+ lock->fl.fl_start = (loff_t)lock->lock_start;
+ lock->fl.fl_end = lock->lock_len ?
+ (loff_t)(lock->lock_start + lock->lock_len - 1) :
+ OFFSET_MAX;
lock->fl.fl_lmops = &nlmsvc_lock_operations;
nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid);
- if (!lock->fl.fl_owner) {
+ if (!lock->fl.c.flc_owner) {
/* lockowner allocation has failed */
nlmsvc_release_host(host);
return nlm_lck_denied_nolocks;
@@ -87,6 +97,7 @@ __nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_file *file;
+ struct nlm_lockowner *test_owner;
__be32 rc = rpc_success;
dprintk("lockd: TEST4 called\n");
@@ -96,14 +107,16 @@ __nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ test_owner = argp->lock.fl.c.flc_owner;
/* Now check for conflicting locks */
- resp->status = nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie);
+ resp->status = nlmsvc_testlock(rqstp, file, host, &argp->lock,
+ &resp->lock);
if (resp->status == nlm_drop_reply)
rc = rpc_drop_reply;
else
dprintk("lockd: TEST4 status %d\n", ntohl(resp->status));
- nlmsvc_release_lockowner(&argp->lock);
+ nlmsvc_put_lockowner(test_owner);
nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
@@ -131,18 +144,6 @@ __nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_res *resp)
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
-#if 0
- /* If supplied state doesn't match current state, we assume it's
- * an old request that time-warped somehow. Any error return would
- * do in this case because it's irrelevant anyway.
- *
- * NB: We don't retrieve the remote host's state yet.
- */
- if (host->h_nsmstate && host->h_nsmstate != argp->state) {
- resp->status = nlm_lck_denied_nolocks;
- } else
-#endif
-
/* Now try to lock the file */
resp->status = nlmsvc_lock(rqstp, file, host, &argp->lock,
argp->block, &argp->cookie,
@@ -511,6 +512,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_void,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_void),
+ .pc_argzero = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "NULL",
@@ -520,6 +522,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_testargs,
.pc_encode = nlm4svc_encode_testres,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+2+No+Rg,
.pc_name = "TEST",
@@ -529,6 +532,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_lockargs,
.pc_encode = nlm4svc_encode_res,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "LOCK",
@@ -538,6 +542,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_cancargs,
.pc_encode = nlm4svc_encode_res,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "CANCEL",
@@ -547,6 +552,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_unlockargs,
.pc_encode = nlm4svc_encode_res,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "UNLOCK",
@@ -556,6 +562,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_testargs,
.pc_encode = nlm4svc_encode_res,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "GRANTED",
@@ -565,6 +572,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_testargs,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "TEST_MSG",
@@ -574,6 +582,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_lockargs,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "LOCK_MSG",
@@ -583,6 +592,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_cancargs,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "CANCEL_MSG",
@@ -592,6 +602,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_unlockargs,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNLOCK_MSG",
@@ -601,6 +612,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_testargs,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "GRANTED_MSG",
@@ -610,6 +622,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_void,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_res),
+ .pc_argzero = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "TEST_RES",
@@ -619,6 +632,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_void,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_res),
+ .pc_argzero = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "LOCK_RES",
@@ -628,6 +642,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_void,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_res),
+ .pc_argzero = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "CANCEL_RES",
@@ -637,6 +652,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_void,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_res),
+ .pc_argzero = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNLOCK_RES",
@@ -646,6 +662,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_res,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_res),
+ .pc_argzero = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "GRANTED_RES",
@@ -655,6 +672,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_reboot,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_reboot),
+ .pc_argzero = sizeof(struct nlm_reboot),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "SM_NOTIFY",
@@ -664,6 +682,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_void,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_void),
+ .pc_argzero = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = 0,
.pc_name = "UNUSED",
@@ -673,6 +692,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_void,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_void),
+ .pc_argzero = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = 0,
.pc_name = "UNUSED",
@@ -682,6 +702,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_void,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_void),
+ .pc_argzero = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = 0,
.pc_name = "UNUSED",
@@ -691,6 +712,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_shareargs,
.pc_encode = nlm4svc_encode_shareres,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+1,
.pc_name = "SHARE",
@@ -700,6 +722,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_shareargs,
.pc_encode = nlm4svc_encode_shareres,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+1,
.pc_name = "UNSHARE",
@@ -709,6 +732,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_lockargs,
.pc_encode = nlm4svc_encode_res,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "NM_LOCK",
@@ -718,6 +742,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_decode = nlm4svc_decode_notify,
.pc_encode = nlm4svc_encode_void,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "FREE_ALL",
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index e9b85d8fd5fe..3a3d05cfe09a 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -30,8 +30,6 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/nlm.h>
#include <linux/lockd/lockd.h>
-#include <linux/kthread.h>
-#include <linux/exportfs.h>
#define NLMDBG_FACILITY NLMDBG_SVCLOCK
@@ -131,12 +129,14 @@ static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
static inline void
nlmsvc_remove_block(struct nlm_block *block)
{
+ spin_lock(&nlm_blocked_lock);
if (!list_empty(&block->b_list)) {
- spin_lock(&nlm_blocked_lock);
list_del_init(&block->b_list);
spin_unlock(&nlm_blocked_lock);
nlmsvc_release_block(block);
+ return;
}
+ spin_unlock(&nlm_blocked_lock);
}
/*
@@ -149,21 +149,25 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
struct file_lock *fl;
dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
- file, lock->fl.fl_pid,
+ file, lock->fl.c.flc_pid,
(long long)lock->fl.fl_start,
- (long long)lock->fl.fl_end, lock->fl.fl_type);
+ (long long)lock->fl.fl_end,
+ lock->fl.c.flc_type);
+ spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
fl = &block->b_call->a_args.lock.fl;
dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
- block->b_file, fl->fl_pid,
+ block->b_file, fl->c.flc_pid,
(long long)fl->fl_start,
- (long long)fl->fl_end, fl->fl_type,
+ (long long)fl->fl_end, fl->c.flc_type,
nlmdbg_cookie2a(&block->b_call->a_args.cookie));
if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
kref_get(&block->b_count);
+ spin_unlock(&nlm_blocked_lock);
return block;
}
}
+ spin_unlock(&nlm_blocked_lock);
return NULL;
}
@@ -185,16 +189,19 @@ nlmsvc_find_block(struct nlm_cookie *cookie)
{
struct nlm_block *block;
+ spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
goto found;
}
+ spin_unlock(&nlm_blocked_lock);
return NULL;
found:
dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
kref_get(&block->b_count);
+ spin_unlock(&nlm_blocked_lock);
return block;
}
@@ -237,7 +244,7 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
goto failed_free;
/* Set notifier function for VFS, and init args */
- call->a_args.lock.fl.fl_flags |= FL_SLEEP;
+ call->a_args.lock.fl.c.flc_flags |= FL_SLEEP;
call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
nlmclnt_next_cookie(&call->a_args.cookie);
@@ -317,6 +324,7 @@ void nlmsvc_traverse_blocks(struct nlm_host *host,
restart:
mutex_lock(&file->f_mutex);
+ spin_lock(&nlm_blocked_lock);
list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
if (!match(block->b_host, host))
continue;
@@ -325,11 +333,13 @@ restart:
if (list_empty(&block->b_list))
continue;
kref_get(&block->b_count);
+ spin_unlock(&nlm_blocked_lock);
mutex_unlock(&file->f_mutex);
nlmsvc_unlink_block(block);
nlmsvc_release_block(block);
goto restart;
}
+ spin_unlock(&nlm_blocked_lock);
mutex_unlock(&file->f_mutex);
}
@@ -340,7 +350,7 @@ nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
return lockowner;
}
-static void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
+void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
{
if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
return;
@@ -392,14 +402,14 @@ static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t
void
nlmsvc_release_lockowner(struct nlm_lock *lock)
{
- if (lock->fl.fl_owner)
- nlmsvc_put_lockowner(lock->fl.fl_owner);
+ if (lock->fl.c.flc_owner)
+ nlmsvc_put_lockowner(lock->fl.c.flc_owner);
}
void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
pid_t pid)
{
- fl->fl_owner = nlmsvc_find_lockowner(host, pid);
+ fl->c.flc_owner = nlmsvc_find_lockowner(host, pid);
}
/*
@@ -415,7 +425,7 @@ static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
/* set default data area */
call->a_args.lock.oh.data = call->a_owner;
- call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
+ call->a_args.lock.svid = ((struct nlm_lockowner *) lock->fl.c.flc_owner)->pid;
if (lock->oh.len > NLMCLNT_OHSIZE) {
void *data = kmalloc(lock->oh.len, GFP_KERNEL);
@@ -470,8 +480,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_host *host, struct nlm_lock *lock, int wait,
struct nlm_cookie *cookie, int reclaim)
{
+ struct inode *inode __maybe_unused = nlmsvc_file_inode(file);
struct nlm_block *block = NULL;
- struct inode *inode = nlmsvc_file_inode(file);
int error;
int mode;
int async_block = 0;
@@ -479,12 +489,16 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
inode->i_sb->s_id, inode->i_ino,
- lock->fl.fl_type, lock->fl.fl_pid,
+ lock->fl.c.flc_type,
+ lock->fl.c.flc_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end,
wait);
- if (inode->i_sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS) {
+ if (nlmsvc_file_cannot_lock(file))
+ return nlm_lck_denied_nolocks;
+
+ if (!locks_can_async_lock(nlmsvc_file_file(file)->f_op)) {
async_block = wait;
wait = 0;
}
@@ -502,7 +516,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
lock = &block->b_call->a_args.lock;
} else
- lock->fl.fl_flags &= ~FL_SLEEP;
+ lock->fl.c.flc_flags &= ~FL_SLEEP;
if (block->b_flags & B_QUEUED) {
dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
@@ -530,25 +544,40 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
}
+ spin_lock(&nlm_blocked_lock);
+ /*
+ * If this is a lock request for an already pending
+ * lock request we return nlm_lck_blocked without calling
+ * vfs_lock_file() again. Otherwise we have two pending
+ * requests on the underlaying ->lock() implementation but
+ * only one nlm_block to being granted by lm_grant().
+ */
+ if (locks_can_async_lock(nlmsvc_file_file(file)->f_op) &&
+ !list_empty(&block->b_list)) {
+ spin_unlock(&nlm_blocked_lock);
+ ret = nlm_lck_blocked;
+ goto out;
+ }
+
+ /* Append to list of blocked */
+ nlmsvc_insert_block_locked(block, NLM_NEVER);
+ spin_unlock(&nlm_blocked_lock);
+
if (!wait)
- lock->fl.fl_flags &= ~FL_SLEEP;
+ lock->fl.c.flc_flags &= ~FL_SLEEP;
mode = lock_to_openmode(&lock->fl);
error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
- lock->fl.fl_flags &= ~FL_SLEEP;
+ lock->fl.c.flc_flags &= ~FL_SLEEP;
dprintk("lockd: vfs_lock_file returned %d\n", error);
switch (error) {
case 0:
+ nlmsvc_remove_block(block);
ret = nlm_granted;
goto out;
case -EAGAIN:
- /*
- * If this is a blocking request for an
- * already pending lock request then we need
- * to put it back on lockd's block list
- */
- if (wait)
- break;
+ if (!wait)
+ nlmsvc_remove_block(block);
ret = async_block ? nlm_lck_blocked : nlm_lck_denied;
goto out;
case FILE_LOCK_DEFERRED:
@@ -559,17 +588,16 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
ret = nlmsvc_defer_lock_rqst(rqstp, block);
goto out;
case -EDEADLK:
+ nlmsvc_remove_block(block);
ret = nlm_deadlock;
goto out;
default: /* includes ENOLCK */
+ nlmsvc_remove_block(block);
ret = nlm_lck_denied_nolocks;
goto out;
}
ret = nlm_lck_blocked;
-
- /* Append to list of blocked */
- nlmsvc_insert_block(block, NLM_NEVER);
out:
mutex_unlock(&file->f_mutex);
nlmsvc_release_block(block);
@@ -583,28 +611,27 @@ out:
__be32
nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_host *host, struct nlm_lock *lock,
- struct nlm_lock *conflock, struct nlm_cookie *cookie)
+ struct nlm_lock *conflock)
{
int error;
int mode;
__be32 ret;
- struct nlm_lockowner *test_owner;
dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
nlmsvc_file_inode(file)->i_sb->s_id,
nlmsvc_file_inode(file)->i_ino,
- lock->fl.fl_type,
+ lock->fl.c.flc_type,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
+ if (nlmsvc_file_cannot_lock(file))
+ return nlm_lck_denied_nolocks;
+
if (locks_in_grace(SVC_NET(rqstp))) {
ret = nlm_lck_denied_grace_period;
goto out;
}
- /* If there's a conflicting lock, remember to clean up the test lock */
- test_owner = (struct nlm_lockowner *)lock->fl.fl_owner;
-
mode = lock_to_openmode(&lock->fl);
error = vfs_test_lock(file->f_file[mode], &lock->fl);
if (error) {
@@ -616,27 +643,23 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
}
- if (lock->fl.fl_type == F_UNLCK) {
+ if (lock->fl.c.flc_type == F_UNLCK) {
ret = nlm_granted;
goto out;
}
dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
- lock->fl.fl_type, (long long)lock->fl.fl_start,
+ lock->fl.c.flc_type, (long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
conflock->caller = "somehost"; /* FIXME */
conflock->len = strlen(conflock->caller);
conflock->oh.len = 0; /* don't return OH info */
- conflock->svid = lock->fl.fl_pid;
- conflock->fl.fl_type = lock->fl.fl_type;
+ conflock->svid = lock->fl.c.flc_pid;
+ conflock->fl.c.flc_type = lock->fl.c.flc_type;
conflock->fl.fl_start = lock->fl.fl_start;
conflock->fl.fl_end = lock->fl.fl_end;
locks_release_private(&lock->fl);
- /* Clean up the test lock */
- lock->fl.fl_owner = NULL;
- nlmsvc_put_lockowner(test_owner);
-
ret = nlm_lck_denied;
out:
return ret;
@@ -657,19 +680,24 @@ nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
nlmsvc_file_inode(file)->i_sb->s_id,
nlmsvc_file_inode(file)->i_ino,
- lock->fl.fl_pid,
+ lock->fl.c.flc_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
+ if (nlmsvc_file_cannot_lock(file))
+ return nlm_lck_denied_nolocks;
+
/* First, cancel any lock that might be there */
nlmsvc_cancel_blocked(net, file, lock);
- lock->fl.fl_type = F_UNLCK;
- if (file->f_file[O_RDONLY])
- error = vfs_lock_file(file->f_file[O_RDONLY], F_SETLK,
+ lock->fl.c.flc_type = F_UNLCK;
+ lock->fl.c.flc_file = file->f_file[O_RDONLY];
+ if (lock->fl.c.flc_file)
+ error = vfs_lock_file(lock->fl.c.flc_file, F_SETLK,
&lock->fl, NULL);
- if (file->f_file[O_WRONLY])
- error = vfs_lock_file(file->f_file[O_WRONLY], F_SETLK,
+ lock->fl.c.flc_file = file->f_file[O_WRONLY];
+ if (lock->fl.c.flc_file)
+ error |= vfs_lock_file(lock->fl.c.flc_file, F_SETLK,
&lock->fl, NULL);
return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
@@ -692,10 +720,13 @@ nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *l
dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
nlmsvc_file_inode(file)->i_sb->s_id,
nlmsvc_file_inode(file)->i_ino,
- lock->fl.fl_pid,
+ lock->fl.c.flc_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
+ if (nlmsvc_file_cannot_lock(file))
+ return nlm_lck_denied_nolocks;
+
if (locks_in_grace(net))
return nlm_lck_denied_grace_period;
@@ -703,9 +734,10 @@ nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *l
block = nlmsvc_lookup_block(file, lock);
mutex_unlock(&file->f_mutex);
if (block != NULL) {
- mode = lock_to_openmode(&lock->fl);
- vfs_cancel_lock(block->b_file->f_file[mode],
- &block->b_call->a_args.lock.fl);
+ struct file_lock *fl = &block->b_call->a_args.lock.fl;
+
+ mode = lock_to_openmode(fl);
+ vfs_cancel_lock(block->b_file->f_file[mode], fl);
status = nlmsvc_unlink_block(block);
nlmsvc_release_block(block);
}
@@ -844,12 +876,12 @@ nlmsvc_grant_blocked(struct nlm_block *block)
/* vfs_lock_file() can mangle fl_start and fl_end, but we need
* them unchanged for the GRANT_MSG
*/
- lock->fl.fl_flags |= FL_SLEEP;
+ lock->fl.c.flc_flags |= FL_SLEEP;
fl_start = lock->fl.fl_start;
fl_end = lock->fl.fl_end;
mode = lock_to_openmode(&lock->fl);
error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
- lock->fl.fl_flags &= ~FL_SLEEP;
+ lock->fl.c.flc_flags &= ~FL_SLEEP;
lock->fl.fl_start = fl_start;
lock->fl.fl_end = fl_end;
@@ -957,19 +989,32 @@ void
nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
{
struct nlm_block *block;
+ struct file_lock *fl;
+ int error;
- dprintk("grant_reply: looking for cookie %x, s=%d \n",
+ dprintk("grant_reply: looking for cookie %x, s=%d\n",
*(unsigned int *)(cookie->data), status);
if (!(block = nlmsvc_find_block(cookie)))
return;
- if (status == nlm_lck_denied_grace_period) {
+ switch (status) {
+ case nlm_lck_denied_grace_period:
/* Try again in a couple of seconds */
nlmsvc_insert_block(block, 10 * HZ);
- } else {
+ break;
+ case nlm_lck_denied:
+ /* Client doesn't want it, just unlock it */
+ nlmsvc_unlink_block(block);
+ fl = &block->b_call->a_args.lock.fl;
+ fl->c.flc_type = F_UNLCK;
+ error = vfs_lock_file(fl->c.flc_file, F_SETLK, fl, NULL);
+ if (error)
+ pr_warn("lockd: unable to unlock lock rejected by client!\n");
+ break;
+ default:
/*
- * Lock is now held by client, or has been rejected.
- * In both cases, the block should be removed.
+ * Either it was accepted or the status makes no sense
+ * just unlink it either way.
*/
nlmsvc_unlink_block(block);
}
@@ -998,14 +1043,14 @@ retry_deferred_block(struct nlm_block *block)
* picks up locks that can be granted, or grant notifications that must
* be retransmitted.
*/
-unsigned long
-nlmsvc_retry_blocked(void)
+void
+nlmsvc_retry_blocked(struct svc_rqst *rqstp)
{
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
struct nlm_block *block;
spin_lock(&nlm_blocked_lock);
- while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
+ while (!list_empty(&nlm_blocked) && !svc_thread_should_stop(rqstp)) {
block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
if (block->b_when == NLM_NEVER)
@@ -1028,5 +1073,6 @@ nlmsvc_retry_blocked(void)
}
spin_unlock(&nlm_blocked_lock);
- return timeout;
+ if (timeout < MAX_SCHEDULE_TIMEOUT)
+ mod_timer(&nlmsvc_retry, jiffies + timeout);
}
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 4dc1b40a489a..f53d5177f267 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -77,11 +77,12 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
/* Set up the missing parts of the file_lock structure */
mode = lock_to_openmode(&lock->fl);
- lock->fl.fl_file = file->f_file[mode];
- lock->fl.fl_pid = current->tgid;
+ lock->fl.c.flc_flags = FL_POSIX;
+ lock->fl.c.flc_file = file->f_file[mode];
+ lock->fl.c.flc_pid = current->tgid;
lock->fl.fl_lmops = &nlmsvc_lock_operations;
nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid);
- if (!lock->fl.fl_owner) {
+ if (!lock->fl.c.flc_owner) {
/* lockowner allocation has failed */
nlmsvc_release_host(host);
return nlm_lck_denied_nolocks;
@@ -116,6 +117,7 @@ __nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_file *file;
+ struct nlm_lockowner *test_owner;
__be32 rc = rpc_success;
dprintk("lockd: TEST called\n");
@@ -125,15 +127,18 @@ __nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ test_owner = argp->lock.fl.c.flc_owner;
+
/* Now check for conflicting locks */
- resp->status = cast_status(nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie));
+ resp->status = cast_status(nlmsvc_testlock(rqstp, file, host,
+ &argp->lock, &resp->lock));
if (resp->status == nlm_drop_reply)
rc = rpc_drop_reply;
else
dprintk("lockd: TEST status %d vers %d\n",
ntohl(resp->status), rqstp->rq_vers);
- nlmsvc_release_lockowner(&argp->lock);
+ nlmsvc_put_lockowner(test_owner);
nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
@@ -161,18 +166,6 @@ __nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_res *resp)
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
-#if 0
- /* If supplied state doesn't match current state, we assume it's
- * an old request that time-warped somehow. Any error return would
- * do in this case because it's irrelevant anyway.
- *
- * NB: We don't retrieve the remote host's state yet.
- */
- if (host->h_nsmstate && host->h_nsmstate != argp->state) {
- resp->status = nlm_lck_denied_nolocks;
- } else
-#endif
-
/* Now try to lock the file */
resp->status = cast_status(nlmsvc_lock(rqstp, file, host, &argp->lock,
argp->block, &argp->cookie,
@@ -552,6 +545,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_void,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_void),
+ .pc_argzero = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "NULL",
@@ -561,6 +555,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_testargs,
.pc_encode = nlmsvc_encode_testres,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+2+No+Rg,
.pc_name = "TEST",
@@ -570,6 +565,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_lockargs,
.pc_encode = nlmsvc_encode_res,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "LOCK",
@@ -579,6 +575,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_cancargs,
.pc_encode = nlmsvc_encode_res,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "CANCEL",
@@ -588,6 +585,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_unlockargs,
.pc_encode = nlmsvc_encode_res,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "UNLOCK",
@@ -597,6 +595,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_testargs,
.pc_encode = nlmsvc_encode_res,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "GRANTED",
@@ -606,6 +605,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_testargs,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "TEST_MSG",
@@ -615,6 +615,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_lockargs,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "LOCK_MSG",
@@ -624,6 +625,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_cancargs,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "CANCEL_MSG",
@@ -633,6 +635,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_unlockargs,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNLOCK_MSG",
@@ -642,6 +645,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_testargs,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "GRANTED_MSG",
@@ -651,6 +655,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_void,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_res),
+ .pc_argzero = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "TEST_RES",
@@ -660,6 +665,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_void,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_res),
+ .pc_argzero = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "LOCK_RES",
@@ -669,6 +675,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_void,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_res),
+ .pc_argzero = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "CANCEL_RES",
@@ -678,6 +685,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_void,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_res),
+ .pc_argzero = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNLOCK_RES",
@@ -687,6 +695,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_res,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_res),
+ .pc_argzero = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "GRANTED_RES",
@@ -696,6 +705,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_reboot,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_reboot),
+ .pc_argzero = sizeof(struct nlm_reboot),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "SM_NOTIFY",
@@ -705,6 +715,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_void,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_void),
+ .pc_argzero = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNUSED",
@@ -714,6 +725,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_void,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_void),
+ .pc_argzero = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNUSED",
@@ -723,6 +735,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_void,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_void),
+ .pc_argzero = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNUSED",
@@ -732,6 +745,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_shareargs,
.pc_encode = nlmsvc_encode_shareres,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+1,
.pc_name = "SHARE",
@@ -741,6 +755,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_shareargs,
.pc_encode = nlmsvc_encode_shareres,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+1,
.pc_name = "UNSHARE",
@@ -750,6 +765,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_lockargs,
.pc_encode = nlmsvc_encode_res,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "NM_LOCK",
@@ -759,6 +775,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_decode = nlmsvc_decode_notify,
.pc_encode = nlmsvc_encode_void,
.pc_argsize = sizeof(struct nlm_args),
+ .pc_argzero = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = 0,
.pc_name = "FREE_ALL",
diff --git a/fs/lockd/svcshare.c b/fs/lockd/svcshare.c
index ade4931b2da2..88c81ce1148d 100644
--- a/fs/lockd/svcshare.c
+++ b/fs/lockd/svcshare.c
@@ -32,6 +32,9 @@ nlmsvc_share_file(struct nlm_host *host, struct nlm_file *file,
struct xdr_netobj *oh = &argp->lock.oh;
u8 *ohdata;
+ if (nlmsvc_file_cannot_lock(file))
+ return nlm_lck_denied_nolocks;
+
for (share = file->f_shares; share; share = share->s_next) {
if (share->s_host == host && nlm_cmp_owner(share, oh))
goto update;
@@ -72,6 +75,9 @@ nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file,
struct nlm_share *share, **shpp;
struct xdr_netobj *oh = &argp->lock.oh;
+ if (nlmsvc_file_cannot_lock(file))
+ return nlm_lck_denied_nolocks;
+
for (shpp = &file->f_shares; (share = *shpp) != NULL;
shpp = &share->s_next) {
if (share->s_host == host && nlm_cmp_owner(share, oh)) {
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index cb3a7512c33e..9103896164f6 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -73,7 +73,7 @@ static inline unsigned int file_hash(struct nfs_fh *f)
int lock_to_openmode(struct file_lock *lock)
{
- return (lock->fl_type == F_WRLCK) ? O_WRONLY : O_RDONLY;
+ return lock_is_write(lock) ? O_WRONLY : O_RDONLY;
}
/*
@@ -176,22 +176,28 @@ nlm_delete_file(struct nlm_file *file)
}
}
-static int nlm_unlock_files(struct nlm_file *file)
+static int nlm_unlock_files(struct nlm_file *file, const struct file_lock *fl)
{
struct file_lock lock;
- struct file *f;
- lock.fl_type = F_UNLCK;
+ locks_init_lock(&lock);
+ lock.c.flc_type = F_UNLCK;
lock.fl_start = 0;
lock.fl_end = OFFSET_MAX;
- for (f = file->f_file[0]; f <= file->f_file[1]; f++) {
- if (f && vfs_lock_file(f, F_SETLK, &lock, NULL) < 0) {
- pr_warn("lockd: unlock failure in %s:%d\n",
- __FILE__, __LINE__);
- return 1;
- }
- }
+ lock.c.flc_owner = fl->c.flc_owner;
+ lock.c.flc_pid = fl->c.flc_pid;
+ lock.c.flc_flags = FL_POSIX;
+
+ lock.c.flc_file = file->f_file[O_RDONLY];
+ if (lock.c.flc_file && vfs_lock_file(lock.c.flc_file, F_SETLK, &lock, NULL))
+ goto out_err;
+ lock.c.flc_file = file->f_file[O_WRONLY];
+ if (lock.c.flc_file && vfs_lock_file(lock.c.flc_file, F_SETLK, &lock, NULL))
+ goto out_err;
return 0;
+out_err:
+ pr_warn("lockd: unlock failure in %s:%d\n", __FILE__, __LINE__);
+ return 1;
}
/*
@@ -204,7 +210,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
{
struct inode *inode = nlmsvc_file_inode(file);
struct file_lock *fl;
- struct file_lock_context *flctx = inode->i_flctx;
+ struct file_lock_context *flctx = locks_inode_context(inode);
struct nlm_host *lockhost;
if (!flctx || list_empty_careful(&flctx->flc_posix))
@@ -212,18 +218,18 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
again:
file->f_locks = 0;
spin_lock(&flctx->flc_lock);
- list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
+ for_each_file_lock(fl, &flctx->flc_posix) {
if (fl->fl_lmops != &nlmsvc_lock_operations)
continue;
/* update current lock count */
file->f_locks++;
- lockhost = ((struct nlm_lockowner *)fl->fl_owner)->host;
+ lockhost = ((struct nlm_lockowner *) fl->c.flc_owner)->host;
if (match(lockhost, host)) {
spin_unlock(&flctx->flc_lock);
- if (nlm_unlock_files(file))
+ if (nlm_unlock_files(file, fl))
return 1;
goto again;
}
@@ -259,14 +265,14 @@ nlm_file_inuse(struct nlm_file *file)
{
struct inode *inode = nlmsvc_file_inode(file);
struct file_lock *fl;
- struct file_lock_context *flctx = inode->i_flctx;
+ struct file_lock_context *flctx = locks_inode_context(inode);
if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
return 1;
if (flctx && !list_empty_careful(&flctx->flc_posix)) {
spin_lock(&flctx->flc_lock);
- list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
+ for_each_file_lock(fl, &flctx->flc_posix) {
if (fl->fl_lmops == &nlmsvc_lock_operations) {
spin_unlock(&flctx->flc_lock);
return 1;
@@ -280,11 +286,10 @@ nlm_file_inuse(struct nlm_file *file)
static void nlm_close_files(struct nlm_file *file)
{
- struct file *f;
-
- for (f = file->f_file[0]; f <= file->f_file[1]; f++)
- if (f)
- nlmsvc_ops->fclose(f);
+ if (file->f_file[O_RDONLY])
+ nlmsvc_ops->fclose(file->f_file[O_RDONLY]);
+ if (file->f_file[O_WRONLY])
+ nlmsvc_ops->fclose(file->f_file[O_WRONLY]);
}
/*
diff --git a/fs/lockd/trace.c b/fs/lockd/trace.c
new file mode 100644
index 000000000000..d9a6ff6e673c
--- /dev/null
+++ b/fs/lockd/trace.c
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/fs/lockd/trace.h b/fs/lockd/trace.h
new file mode 100644
index 000000000000..7461b13b6e74
--- /dev/null
+++ b/fs/lockd/trace.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lockd
+
+#if !defined(_TRACE_LOCKD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_LOCKD_H
+
+#include <linux/tracepoint.h>
+#include <linux/crc32.h>
+#include <linux/nfs.h>
+#include <linux/lockd/lockd.h>
+
+#ifdef CONFIG_LOCKD_V4
+#define NLM_STATUS_LIST \
+ nlm_status_code(LCK_GRANTED) \
+ nlm_status_code(LCK_DENIED) \
+ nlm_status_code(LCK_DENIED_NOLOCKS) \
+ nlm_status_code(LCK_BLOCKED) \
+ nlm_status_code(LCK_DENIED_GRACE_PERIOD) \
+ nlm_status_code(DEADLCK) \
+ nlm_status_code(ROFS) \
+ nlm_status_code(STALE_FH) \
+ nlm_status_code(FBIG) \
+ nlm_status_code_end(FAILED)
+#else
+#define NLM_STATUS_LIST \
+ nlm_status_code(LCK_GRANTED) \
+ nlm_status_code(LCK_DENIED) \
+ nlm_status_code(LCK_DENIED_NOLOCKS) \
+ nlm_status_code(LCK_BLOCKED) \
+ nlm_status_code_end(LCK_DENIED_GRACE_PERIOD)
+#endif
+
+#undef nlm_status_code
+#undef nlm_status_code_end
+#define nlm_status_code(x) TRACE_DEFINE_ENUM(NLM_##x);
+#define nlm_status_code_end(x) TRACE_DEFINE_ENUM(NLM_##x);
+
+NLM_STATUS_LIST
+
+#undef nlm_status_code
+#undef nlm_status_code_end
+#define nlm_status_code(x) { NLM_##x, #x },
+#define nlm_status_code_end(x) { NLM_##x, #x }
+
+#define show_nlm_status(x) __print_symbolic(x, NLM_STATUS_LIST)
+
+DECLARE_EVENT_CLASS(nlmclnt_lock_event,
+ TP_PROTO(
+ const struct nlm_lock *lock,
+ const struct sockaddr *addr,
+ unsigned int addrlen,
+ __be32 status
+ ),
+
+ TP_ARGS(lock, addr, addrlen, status),
+
+ TP_STRUCT__entry(
+ __field(u32, oh)
+ __field(u32, svid)
+ __field(u32, fh)
+ __field(unsigned long, status)
+ __field(u64, start)
+ __field(u64, len)
+ __sockaddr(addr, addrlen)
+ ),
+
+ TP_fast_assign(
+ __entry->oh = ~crc32_le(0xffffffff, lock->oh.data, lock->oh.len);
+ __entry->svid = lock->svid;
+ __entry->fh = nfs_fhandle_hash(&lock->fh);
+ __entry->start = lock->lock_start;
+ __entry->len = lock->lock_len;
+ __entry->status = be32_to_cpu(status);
+ __assign_sockaddr(addr, addr, addrlen);
+ ),
+
+ TP_printk(
+ "addr=%pISpc oh=0x%08x svid=0x%08x fh=0x%08x start=%llu len=%llu status=%s",
+ __get_sockaddr(addr), __entry->oh, __entry->svid,
+ __entry->fh, __entry->start, __entry->len,
+ show_nlm_status(__entry->status)
+ )
+);
+
+#define DEFINE_NLMCLNT_EVENT(name) \
+ DEFINE_EVENT(nlmclnt_lock_event, name, \
+ TP_PROTO( \
+ const struct nlm_lock *lock, \
+ const struct sockaddr *addr, \
+ unsigned int addrlen, \
+ __be32 status \
+ ), \
+ TP_ARGS(lock, addr, addrlen, status))
+
+DEFINE_NLMCLNT_EVENT(nlmclnt_test);
+DEFINE_NLMCLNT_EVENT(nlmclnt_lock);
+DEFINE_NLMCLNT_EVENT(nlmclnt_unlock);
+DEFINE_NLMCLNT_EVENT(nlmclnt_grant);
+
+#endif /* _TRACE_LOCKD_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index 2fb5748dae0c..adfcce2bf11b 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -88,8 +88,8 @@ svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock)
return false;
locks_init_lock(fl);
- fl->fl_flags = FL_POSIX;
- fl->fl_type = F_RDLCK;
+ fl->c.flc_flags = FL_POSIX;
+ fl->c.flc_type = F_RDLCK;
end = start + len - 1;
fl->fl_start = s32_to_loff_t(start);
if (len == 0 || end < 0)
@@ -107,7 +107,7 @@ svcxdr_encode_holder(struct xdr_stream *xdr, const struct nlm_lock *lock)
s32 start, len;
/* exclusive */
- if (xdr_stream_encode_bool(xdr, fl->fl_type != F_RDLCK) < 0)
+ if (xdr_stream_encode_bool(xdr, fl->c.flc_type != F_RDLCK) < 0)
return false;
if (xdr_stream_encode_u32(xdr, lock->svid) < 0)
return false;
@@ -164,7 +164,7 @@ nlmsvc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
if (!svcxdr_decode_lock(xdr, &argp->lock))
return false;
if (exclusive)
- argp->lock.fl.fl_type = F_WRLCK;
+ argp->lock.fl.c.flc_type = F_WRLCK;
return true;
}
@@ -184,7 +184,7 @@ nlmsvc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
if (!svcxdr_decode_lock(xdr, &argp->lock))
return false;
if (exclusive)
- argp->lock.fl.fl_type = F_WRLCK;
+ argp->lock.fl.c.flc_type = F_WRLCK;
if (xdr_stream_decode_bool(xdr, &argp->reclaim) < 0)
return false;
if (xdr_stream_decode_u32(xdr, &argp->state) < 0)
@@ -209,7 +209,7 @@ nlmsvc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
if (!svcxdr_decode_lock(xdr, &argp->lock))
return false;
if (exclusive)
- argp->lock.fl.fl_type = F_WRLCK;
+ argp->lock.fl.c.flc_type = F_WRLCK;
return true;
}
@@ -223,7 +223,7 @@ nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
return false;
if (!svcxdr_decode_lock(xdr, &argp->lock))
return false;
- argp->lock.fl.fl_type = F_UNLCK;
+ argp->lock.fl.c.flc_type = F_UNLCK;
return true;
}
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index 856267c0864b..e343c820301f 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -20,13 +20,6 @@
#include "svcxdr.h"
-static inline loff_t
-s64_to_loff_t(__s64 offset)
-{
- return (loff_t)offset;
-}
-
-
static inline s64
loff_t_to_s64(loff_t offset)
{
@@ -40,6 +33,17 @@ loff_t_to_s64(loff_t offset)
return res;
}
+void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len)
+{
+ s64 end = off + len - 1;
+
+ fl->fl_start = off;
+ if (len == 0 || end < 0)
+ fl->fl_end = OFFSET_MAX;
+ else
+ fl->fl_end = end;
+}
+
/*
* NLM file handles are defined by specification to be a variable-length
* XDR opaque no longer than 1024 bytes. However, this implementation
@@ -70,8 +74,6 @@ static bool
svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock)
{
struct file_lock *fl = &lock->fl;
- u64 len, start;
- s64 end;
if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len))
return false;
@@ -81,21 +83,14 @@ svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock)
return false;
if (xdr_stream_decode_u32(xdr, &lock->svid) < 0)
return false;
- if (xdr_stream_decode_u64(xdr, &start) < 0)
+ if (xdr_stream_decode_u64(xdr, &lock->lock_start) < 0)
return false;
- if (xdr_stream_decode_u64(xdr, &len) < 0)
+ if (xdr_stream_decode_u64(xdr, &lock->lock_len) < 0)
return false;
locks_init_lock(fl);
- fl->fl_flags = FL_POSIX;
- fl->fl_type = F_RDLCK;
- end = start + len - 1;
- fl->fl_start = s64_to_loff_t(start);
- if (len == 0 || end < 0)
- fl->fl_end = OFFSET_MAX;
- else
- fl->fl_end = s64_to_loff_t(end);
-
+ fl->c.flc_type = F_RDLCK;
+ nlm4svc_set_file_lock_range(fl, lock->lock_start, lock->lock_len);
return true;
}
@@ -106,7 +101,7 @@ svcxdr_encode_holder(struct xdr_stream *xdr, const struct nlm_lock *lock)
s64 start, len;
/* exclusive */
- if (xdr_stream_encode_bool(xdr, fl->fl_type != F_RDLCK) < 0)
+ if (xdr_stream_encode_bool(xdr, fl->c.flc_type != F_RDLCK) < 0)
return false;
if (xdr_stream_encode_u32(xdr, lock->svid) < 0)
return false;
@@ -163,7 +158,7 @@ nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
if (!svcxdr_decode_lock(xdr, &argp->lock))
return false;
if (exclusive)
- argp->lock.fl.fl_type = F_WRLCK;
+ argp->lock.fl.c.flc_type = F_WRLCK;
return true;
}
@@ -183,7 +178,7 @@ nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
if (!svcxdr_decode_lock(xdr, &argp->lock))
return false;
if (exclusive)
- argp->lock.fl.fl_type = F_WRLCK;
+ argp->lock.fl.c.flc_type = F_WRLCK;
if (xdr_stream_decode_bool(xdr, &argp->reclaim) < 0)
return false;
if (xdr_stream_decode_u32(xdr, &argp->state) < 0)
@@ -208,7 +203,7 @@ nlm4svc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
if (!svcxdr_decode_lock(xdr, &argp->lock))
return false;
if (exclusive)
- argp->lock.fl.fl_type = F_WRLCK;
+ argp->lock.fl.c.flc_type = F_WRLCK;
return true;
}
@@ -222,7 +217,7 @@ nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
return false;
if (!svcxdr_decode_lock(xdr, &argp->lock))
return false;
- argp->lock.fl.fl_type = F_UNLCK;
+ argp->lock.fl.c.flc_type = F_UNLCK;
return true;
}
@@ -272,7 +267,6 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
struct nlm_args *argp = rqstp->rq_argp;
struct nlm_lock *lock = &argp->lock;
- memset(lock, 0, sizeof(*lock));
locks_init_lock(&lock->fl);
lock->svid = ~(u32)0;
diff --git a/fs/locks.c b/fs/locks.c
index 0fca9d680978..9f565802a88c 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -48,10 +48,10 @@
* children.
*
*/
-
#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fdtable.h>
+#include <linux/filelock.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/security.h>
@@ -62,34 +62,67 @@
#include <linux/pid_namespace.h>
#include <linux/hashtable.h>
#include <linux/percpu.h>
+#include <linux/sysctl.h>
#define CREATE_TRACE_POINTS
#include <trace/events/filelock.h>
#include <linux/uaccess.h>
-#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
-#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
-#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
-#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
-#define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
+static struct file_lock *file_lock(struct file_lock_core *flc)
+{
+ return container_of(flc, struct file_lock, c);
+}
-static bool lease_breaking(struct file_lock *fl)
+static struct file_lease *file_lease(struct file_lock_core *flc)
{
- return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
+ return container_of(flc, struct file_lease, c);
}
-static int target_leasetype(struct file_lock *fl)
+static bool lease_breaking(struct file_lease *fl)
{
- if (fl->fl_flags & FL_UNLOCK_PENDING)
+ return fl->c.flc_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
+}
+
+static int target_leasetype(struct file_lease *fl)
+{
+ if (fl->c.flc_flags & FL_UNLOCK_PENDING)
return F_UNLCK;
- if (fl->fl_flags & FL_DOWNGRADE_PENDING)
+ if (fl->c.flc_flags & FL_DOWNGRADE_PENDING)
return F_RDLCK;
- return fl->fl_type;
-}
+ return fl->c.flc_type;
+}
+
+static int leases_enable = 1;
+static int lease_break_time = 45;
+
+#ifdef CONFIG_SYSCTL
+static const struct ctl_table locks_sysctls[] = {
+ {
+ .procname = "leases-enable",
+ .data = &leases_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#ifdef CONFIG_MMU
+ {
+ .procname = "lease-break-time",
+ .data = &lease_break_time,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif /* CONFIG_MMU */
+};
-int leases_enable = 1;
-int lease_break_time = 45;
+static int __init init_fs_locks_sysctls(void)
+{
+ register_sysctl_init("fs", locks_sysctls);
+ return 0;
+}
+early_initcall(init_fs_locks_sysctls);
+#endif /* CONFIG_SYSCTL */
/*
* The global file_lock_list is only used for displaying /proc/locks, so we
@@ -136,8 +169,9 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
*/
static DEFINE_SPINLOCK(blocked_lock_lock);
-static struct kmem_cache *flctx_cache __read_mostly;
-static struct kmem_cache *filelock_cache __read_mostly;
+static struct kmem_cache *flctx_cache __ro_after_init;
+static struct kmem_cache *filelock_cache __ro_after_init;
+static struct kmem_cache *filelease_cache __ro_after_init;
static struct file_lock_context *
locks_get_lock_context(struct inode *inode, int type)
@@ -145,7 +179,7 @@ locks_get_lock_context(struct inode *inode, int type)
struct file_lock_context *ctx;
/* paired with cmpxchg() below */
- ctx = smp_load_acquire(&inode->i_flctx);
+ ctx = locks_inode_context(inode);
if (likely(ctx) || type == F_UNLCK)
goto out;
@@ -164,7 +198,7 @@ locks_get_lock_context(struct inode *inode, int type)
*/
if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
kmem_cache_free(flctx_cache, ctx);
- ctx = smp_load_acquire(&inode->i_flctx);
+ ctx = locks_inode_context(inode);
}
out:
trace_locks_get_lock_context(inode, type, ctx);
@@ -174,11 +208,12 @@ out:
static void
locks_dump_ctx_list(struct list_head *list, char *list_type)
{
- struct file_lock *fl;
+ struct file_lock_core *flc;
- list_for_each_entry(fl, list, fl_list) {
- pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
- }
+ list_for_each_entry(flc, list, flc_list)
+ pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
+ list_type, flc->flc_owner, flc->flc_flags,
+ flc->flc_type, flc->flc_pid);
}
static void
@@ -199,25 +234,25 @@ locks_check_ctx_lists(struct inode *inode)
}
static void
-locks_check_ctx_file_list(struct file *filp, struct list_head *list,
- char *list_type)
+locks_check_ctx_file_list(struct file *filp, struct list_head *list, char *list_type)
{
- struct file_lock *fl;
- struct inode *inode = locks_inode(filp);
+ struct file_lock_core *flc;
+ struct inode *inode = file_inode(filp);
- list_for_each_entry(fl, list, fl_list)
- if (fl->fl_file == filp)
+ list_for_each_entry(flc, list, flc_list)
+ if (flc->flc_file == filp)
pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
" fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
list_type, MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev), inode->i_ino,
- fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
+ flc->flc_owner, flc->flc_flags,
+ flc->flc_type, flc->flc_pid);
}
void
locks_free_lock_context(struct inode *inode)
{
- struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock_context *ctx = locks_inode_context(inode);
if (unlikely(ctx)) {
locks_check_ctx_lists(inode);
@@ -225,13 +260,13 @@ locks_free_lock_context(struct inode *inode)
}
}
-static void locks_init_lock_heads(struct file_lock *fl)
+static void locks_init_lock_heads(struct file_lock_core *flc)
{
- INIT_HLIST_NODE(&fl->fl_link);
- INIT_LIST_HEAD(&fl->fl_list);
- INIT_LIST_HEAD(&fl->fl_blocked_requests);
- INIT_LIST_HEAD(&fl->fl_blocked_member);
- init_waitqueue_head(&fl->fl_wait);
+ INIT_HLIST_NODE(&flc->flc_link);
+ INIT_LIST_HEAD(&flc->flc_list);
+ INIT_LIST_HEAD(&flc->flc_blocked_requests);
+ INIT_LIST_HEAD(&flc->flc_blocked_member);
+ init_waitqueue_head(&flc->flc_wait);
}
/* Allocate an empty lock structure. */
@@ -240,19 +275,33 @@ struct file_lock *locks_alloc_lock(void)
struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
if (fl)
- locks_init_lock_heads(fl);
+ locks_init_lock_heads(&fl->c);
return fl;
}
EXPORT_SYMBOL_GPL(locks_alloc_lock);
+/* Allocate an empty lock structure. */
+struct file_lease *locks_alloc_lease(void)
+{
+ struct file_lease *fl = kmem_cache_zalloc(filelease_cache, GFP_KERNEL);
+
+ if (fl)
+ locks_init_lock_heads(&fl->c);
+
+ return fl;
+}
+EXPORT_SYMBOL_GPL(locks_alloc_lease);
+
void locks_release_private(struct file_lock *fl)
{
- BUG_ON(waitqueue_active(&fl->fl_wait));
- BUG_ON(!list_empty(&fl->fl_list));
- BUG_ON(!list_empty(&fl->fl_blocked_requests));
- BUG_ON(!list_empty(&fl->fl_blocked_member));
- BUG_ON(!hlist_unhashed(&fl->fl_link));
+ struct file_lock_core *flc = &fl->c;
+
+ BUG_ON(waitqueue_active(&flc->flc_wait));
+ BUG_ON(!list_empty(&flc->flc_list));
+ BUG_ON(!list_empty(&flc->flc_blocked_requests));
+ BUG_ON(!list_empty(&flc->flc_blocked_member));
+ BUG_ON(!hlist_unhashed(&flc->flc_link));
if (fl->fl_ops) {
if (fl->fl_ops->fl_release_private)
@@ -262,14 +311,41 @@ void locks_release_private(struct file_lock *fl)
if (fl->fl_lmops) {
if (fl->fl_lmops->lm_put_owner) {
- fl->fl_lmops->lm_put_owner(fl->fl_owner);
- fl->fl_owner = NULL;
+ fl->fl_lmops->lm_put_owner(flc->flc_owner);
+ flc->flc_owner = NULL;
}
fl->fl_lmops = NULL;
}
}
EXPORT_SYMBOL_GPL(locks_release_private);
+/**
+ * locks_owner_has_blockers - Check for blocking lock requests
+ * @flctx: file lock context
+ * @owner: lock owner
+ *
+ * Return values:
+ * %true: @owner has at least one blocker
+ * %false: @owner has no blockers
+ */
+bool locks_owner_has_blockers(struct file_lock_context *flctx, fl_owner_t owner)
+{
+ struct file_lock_core *flc;
+
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(flc, &flctx->flc_posix, flc_list) {
+ if (flc->flc_owner != owner)
+ continue;
+ if (!list_empty(&flc->flc_blocked_requests)) {
+ spin_unlock(&flctx->flc_lock);
+ return true;
+ }
+ }
+ spin_unlock(&flctx->flc_lock);
+ return false;
+}
+EXPORT_SYMBOL_GPL(locks_owner_has_blockers);
+
/* Free a lock which is not in use. */
void locks_free_lock(struct file_lock *fl)
{
@@ -278,35 +354,52 @@ void locks_free_lock(struct file_lock *fl)
}
EXPORT_SYMBOL(locks_free_lock);
+/* Free a lease which is not in use. */
+void locks_free_lease(struct file_lease *fl)
+{
+ kmem_cache_free(filelease_cache, fl);
+}
+EXPORT_SYMBOL(locks_free_lease);
+
static void
locks_dispose_list(struct list_head *dispose)
{
- struct file_lock *fl;
+ struct file_lock_core *flc;
while (!list_empty(dispose)) {
- fl = list_first_entry(dispose, struct file_lock, fl_list);
- list_del_init(&fl->fl_list);
- locks_free_lock(fl);
+ flc = list_first_entry(dispose, struct file_lock_core, flc_list);
+ list_del_init(&flc->flc_list);
+ if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
+ locks_free_lease(file_lease(flc));
+ else
+ locks_free_lock(file_lock(flc));
}
}
void locks_init_lock(struct file_lock *fl)
{
memset(fl, 0, sizeof(struct file_lock));
- locks_init_lock_heads(fl);
+ locks_init_lock_heads(&fl->c);
}
EXPORT_SYMBOL(locks_init_lock);
+void locks_init_lease(struct file_lease *fl)
+{
+ memset(fl, 0, sizeof(*fl));
+ locks_init_lock_heads(&fl->c);
+}
+EXPORT_SYMBOL(locks_init_lease);
+
/*
* Initialize a new lock from an existing file_lock structure.
*/
void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
{
- new->fl_owner = fl->fl_owner;
- new->fl_pid = fl->fl_pid;
- new->fl_file = NULL;
- new->fl_flags = fl->fl_flags;
- new->fl_type = fl->fl_type;
+ new->c.flc_owner = fl->c.flc_owner;
+ new->c.flc_pid = fl->c.flc_pid;
+ new->c.flc_file = NULL;
+ new->c.flc_flags = fl->c.flc_flags;
+ new->c.flc_type = fl->c.flc_type;
new->fl_start = fl->fl_start;
new->fl_end = fl->fl_end;
new->fl_lmops = fl->fl_lmops;
@@ -314,7 +407,7 @@ void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
if (fl->fl_lmops) {
if (fl->fl_lmops->lm_get_owner)
- fl->fl_lmops->lm_get_owner(fl->fl_owner);
+ fl->fl_lmops->lm_get_owner(fl->c.flc_owner);
}
}
EXPORT_SYMBOL(locks_copy_conflock);
@@ -326,7 +419,7 @@ void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
locks_copy_conflock(new, fl);
- new->fl_file = fl->fl_file;
+ new->c.flc_file = fl->c.flc_file;
new->fl_ops = fl->fl_ops;
if (fl->fl_ops) {
@@ -342,15 +435,17 @@ static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
/*
* As ctx->flc_lock is held, new requests cannot be added to
- * ->fl_blocked_requests, so we don't need a lock to check if it
+ * ->flc_blocked_requests, so we don't need a lock to check if it
* is empty.
*/
- if (list_empty(&fl->fl_blocked_requests))
+ if (list_empty(&fl->c.flc_blocked_requests))
return;
spin_lock(&blocked_lock_lock);
- list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
- list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
- f->fl_blocker = new;
+ list_splice_init(&fl->c.flc_blocked_requests,
+ &new->c.flc_blocked_requests);
+ list_for_each_entry(f, &new->c.flc_blocked_requests,
+ c.flc_blocked_member)
+ f->c.flc_blocker = &new->c;
spin_unlock(&blocked_lock_lock);
}
@@ -367,39 +462,25 @@ static inline int flock_translate_cmd(int cmd) {
}
/* Fill in a file_lock structure with an appropriate FLOCK lock. */
-static struct file_lock *
-flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
+static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
{
- int type = flock_translate_cmd(cmd);
-
- if (type < 0)
- return ERR_PTR(type);
-
- if (fl == NULL) {
- fl = locks_alloc_lock();
- if (fl == NULL)
- return ERR_PTR(-ENOMEM);
- } else {
- locks_init_lock(fl);
- }
+ locks_init_lock(fl);
- fl->fl_file = filp;
- fl->fl_owner = filp;
- fl->fl_pid = current->tgid;
- fl->fl_flags = FL_FLOCK;
- fl->fl_type = type;
+ fl->c.flc_file = filp;
+ fl->c.flc_owner = filp;
+ fl->c.flc_pid = current->tgid;
+ fl->c.flc_flags = FL_FLOCK;
+ fl->c.flc_type = type;
fl->fl_end = OFFSET_MAX;
-
- return fl;
}
-static int assign_type(struct file_lock *fl, long type)
+static int assign_type(struct file_lock_core *flc, int type)
{
switch (type) {
case F_RDLCK:
case F_WRLCK:
case F_UNLCK:
- fl->fl_type = type;
+ flc->flc_type = type;
break;
default:
return -EINVAL;
@@ -444,14 +525,14 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
} else
fl->fl_end = OFFSET_MAX;
- fl->fl_owner = current->files;
- fl->fl_pid = current->tgid;
- fl->fl_file = filp;
- fl->fl_flags = FL_POSIX;
+ fl->c.flc_owner = current->files;
+ fl->c.flc_pid = current->tgid;
+ fl->c.flc_file = filp;
+ fl->c.flc_flags = FL_POSIX;
fl->fl_ops = NULL;
fl->fl_lmops = NULL;
- return assign_type(fl, l->l_type);
+ return assign_type(&fl->c, l->l_type);
}
/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
@@ -472,16 +553,16 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
/* default lease lock manager operations */
static bool
-lease_break_callback(struct file_lock *fl)
+lease_break_callback(struct file_lease *fl)
{
kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
return false;
}
static void
-lease_setup(struct file_lock *fl, void **priv)
+lease_setup(struct file_lease *fl, void **priv)
{
- struct file *filp = fl->fl_file;
+ struct file *filp = fl->c.flc_file;
struct fasync_struct *fa = *priv;
/*
@@ -495,7 +576,7 @@ lease_setup(struct file_lock *fl, void **priv)
__f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
}
-static const struct lock_manager_operations lease_manager_ops = {
+static const struct lease_manager_operations lease_manager_ops = {
.lm_break = lease_break_callback,
.lm_change = lease_modify,
.lm_setup = lease_setup,
@@ -504,35 +585,32 @@ static const struct lock_manager_operations lease_manager_ops = {
/*
* Initialize a lease, use the default lock manager operations
*/
-static int lease_init(struct file *filp, long type, struct file_lock *fl)
+static int lease_init(struct file *filp, unsigned int flags, int type, struct file_lease *fl)
{
- if (assign_type(fl, type) != 0)
+ if (assign_type(&fl->c, type) != 0)
return -EINVAL;
- fl->fl_owner = filp;
- fl->fl_pid = current->tgid;
+ fl->c.flc_owner = filp;
+ fl->c.flc_pid = current->tgid;
- fl->fl_file = filp;
- fl->fl_flags = FL_LEASE;
- fl->fl_start = 0;
- fl->fl_end = OFFSET_MAX;
- fl->fl_ops = NULL;
+ fl->c.flc_file = filp;
+ fl->c.flc_flags = flags;
fl->fl_lmops = &lease_manager_ops;
return 0;
}
/* Allocate a file_lock initialised to this type of lease */
-static struct file_lock *lease_alloc(struct file *filp, long type)
+static struct file_lease *lease_alloc(struct file *filp, unsigned int flags, int type)
{
- struct file_lock *fl = locks_alloc_lock();
+ struct file_lease *fl = locks_alloc_lease();
int error = -ENOMEM;
if (fl == NULL)
return ERR_PTR(error);
- error = lease_init(filp, type, fl);
+ error = lease_init(filp, flags, type, fl);
if (error) {
- locks_free_lock(fl);
+ locks_free_lease(fl);
return ERR_PTR(error);
}
return fl;
@@ -549,26 +627,26 @@ static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
/*
* Check whether two locks have the same owner.
*/
-static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
+static int posix_same_owner(struct file_lock_core *fl1, struct file_lock_core *fl2)
{
- return fl1->fl_owner == fl2->fl_owner;
+ return fl1->flc_owner == fl2->flc_owner;
}
/* Must be called with the flc_lock held! */
-static void locks_insert_global_locks(struct file_lock *fl)
+static void locks_insert_global_locks(struct file_lock_core *flc)
{
struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
percpu_rwsem_assert_held(&file_rwsem);
spin_lock(&fll->lock);
- fl->fl_link_cpu = smp_processor_id();
- hlist_add_head(&fl->fl_link, &fll->hlist);
+ flc->flc_link_cpu = smp_processor_id();
+ hlist_add_head(&flc->flc_link, &fll->hlist);
spin_unlock(&fll->lock);
}
/* Must be called with the flc_lock held! */
-static void locks_delete_global_locks(struct file_lock *fl)
+static void locks_delete_global_locks(struct file_lock_core *flc)
{
struct file_lock_list_struct *fll;
@@ -579,33 +657,33 @@ static void locks_delete_global_locks(struct file_lock *fl)
* is done while holding the flc_lock, and new insertions into the list
* also require that it be held.
*/
- if (hlist_unhashed(&fl->fl_link))
+ if (hlist_unhashed(&flc->flc_link))
return;
- fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
+ fll = per_cpu_ptr(&file_lock_list, flc->flc_link_cpu);
spin_lock(&fll->lock);
- hlist_del_init(&fl->fl_link);
+ hlist_del_init(&flc->flc_link);
spin_unlock(&fll->lock);
}
static unsigned long
-posix_owner_key(struct file_lock *fl)
+posix_owner_key(struct file_lock_core *flc)
{
- return (unsigned long)fl->fl_owner;
+ return (unsigned long) flc->flc_owner;
}
-static void locks_insert_global_blocked(struct file_lock *waiter)
+static void locks_insert_global_blocked(struct file_lock_core *waiter)
{
lockdep_assert_held(&blocked_lock_lock);
- hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
+ hash_add(blocked_hash, &waiter->flc_link, posix_owner_key(waiter));
}
-static void locks_delete_global_blocked(struct file_lock *waiter)
+static void locks_delete_global_blocked(struct file_lock_core *waiter)
{
lockdep_assert_held(&blocked_lock_lock);
- hash_del(&waiter->fl_link);
+ hash_del(&waiter->flc_link);
}
/* Remove waiter from blocker's block list.
@@ -613,41 +691,39 @@ static void locks_delete_global_blocked(struct file_lock *waiter)
*
* Must be called with blocked_lock_lock held.
*/
-static void __locks_delete_block(struct file_lock *waiter)
+static void __locks_unlink_block(struct file_lock_core *waiter)
{
locks_delete_global_blocked(waiter);
- list_del_init(&waiter->fl_blocked_member);
+ list_del_init(&waiter->flc_blocked_member);
}
-static void __locks_wake_up_blocks(struct file_lock *blocker)
+static void __locks_wake_up_blocks(struct file_lock_core *blocker)
{
- while (!list_empty(&blocker->fl_blocked_requests)) {
- struct file_lock *waiter;
+ while (!list_empty(&blocker->flc_blocked_requests)) {
+ struct file_lock_core *waiter;
+ struct file_lock *fl;
- waiter = list_first_entry(&blocker->fl_blocked_requests,
- struct file_lock, fl_blocked_member);
- __locks_delete_block(waiter);
- if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
- waiter->fl_lmops->lm_notify(waiter);
+ waiter = list_first_entry(&blocker->flc_blocked_requests,
+ struct file_lock_core, flc_blocked_member);
+
+ fl = file_lock(waiter);
+ __locks_unlink_block(waiter);
+ if ((waiter->flc_flags & (FL_POSIX | FL_FLOCK)) &&
+ fl->fl_lmops && fl->fl_lmops->lm_notify)
+ fl->fl_lmops->lm_notify(fl);
else
- wake_up(&waiter->fl_wait);
+ locks_wake_up_waiter(waiter);
/*
- * The setting of fl_blocker to NULL marks the "done"
+ * The setting of flc_blocker to NULL marks the "done"
* point in deleting a block. Paired with acquire at the top
* of locks_delete_block().
*/
- smp_store_release(&waiter->fl_blocker, NULL);
+ smp_store_release(&waiter->flc_blocker, NULL);
}
}
-/**
- * locks_delete_block - stop waiting for a file lock
- * @waiter: the lock which was waiting
- *
- * lockd/nfsd need to disconnect the lock while working on it.
- */
-int locks_delete_block(struct file_lock *waiter)
+static int __locks_delete_block(struct file_lock_core *waiter)
{
int status = -ENOENT;
@@ -672,24 +748,35 @@ int locks_delete_block(struct file_lock *waiter)
* no new locks can be inserted into its fl_blocked_requests list, and
* can avoid doing anything further if the list is empty.
*/
- if (!smp_load_acquire(&waiter->fl_blocker) &&
- list_empty(&waiter->fl_blocked_requests))
+ if (!smp_load_acquire(&waiter->flc_blocker) &&
+ list_empty(&waiter->flc_blocked_requests))
return status;
spin_lock(&blocked_lock_lock);
- if (waiter->fl_blocker)
+ if (waiter->flc_blocker)
status = 0;
__locks_wake_up_blocks(waiter);
- __locks_delete_block(waiter);
+ __locks_unlink_block(waiter);
/*
* The setting of fl_blocker to NULL marks the "done" point in deleting
* a block. Paired with acquire at the top of this function.
*/
- smp_store_release(&waiter->fl_blocker, NULL);
+ smp_store_release(&waiter->flc_blocker, NULL);
spin_unlock(&blocked_lock_lock);
return status;
}
+
+/**
+ * locks_delete_block - stop waiting for a file lock
+ * @waiter: the lock which was waiting
+ *
+ * lockd/nfsd need to disconnect the lock while working on it.
+ */
+int locks_delete_block(struct file_lock *waiter)
+{
+ return __locks_delete_block(&waiter->c);
+}
EXPORT_SYMBOL(locks_delete_block);
/* Insert waiter into blocker's block list.
@@ -707,26 +794,28 @@ EXPORT_SYMBOL(locks_delete_block);
* waiters, and add beneath any waiter that blocks the new waiter.
* Thus wakeups don't happen until needed.
*/
-static void __locks_insert_block(struct file_lock *blocker,
- struct file_lock *waiter,
- bool conflict(struct file_lock *,
- struct file_lock *))
+static void __locks_insert_block(struct file_lock_core *blocker,
+ struct file_lock_core *waiter,
+ bool conflict(struct file_lock_core *,
+ struct file_lock_core *))
{
- struct file_lock *fl;
- BUG_ON(!list_empty(&waiter->fl_blocked_member));
+ struct file_lock_core *flc;
+ BUG_ON(!list_empty(&waiter->flc_blocked_member));
new_blocker:
- list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
- if (conflict(fl, waiter)) {
- blocker = fl;
+ list_for_each_entry(flc, &blocker->flc_blocked_requests, flc_blocked_member)
+ if (conflict(flc, waiter)) {
+ blocker = flc;
goto new_blocker;
}
- waiter->fl_blocker = blocker;
- list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
- if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
+ waiter->flc_blocker = blocker;
+ list_add_tail(&waiter->flc_blocked_member,
+ &blocker->flc_blocked_requests);
+
+ if ((blocker->flc_flags & (FL_POSIX|FL_OFDLCK)) == FL_POSIX)
locks_insert_global_blocked(waiter);
- /* The requests in waiter->fl_blocked are known to conflict with
+ /* The requests in waiter->flc_blocked are known to conflict with
* waiter, but might not conflict with blocker, or the requests
* and lock which block it. So they all need to be woken.
*/
@@ -734,10 +823,10 @@ new_blocker:
}
/* Must be called with flc_lock held. */
-static void locks_insert_block(struct file_lock *blocker,
- struct file_lock *waiter,
- bool conflict(struct file_lock *,
- struct file_lock *))
+static void locks_insert_block(struct file_lock_core *blocker,
+ struct file_lock_core *waiter,
+ bool conflict(struct file_lock_core *,
+ struct file_lock_core *))
{
spin_lock(&blocked_lock_lock);
__locks_insert_block(blocker, waiter, conflict);
@@ -749,7 +838,7 @@ static void locks_insert_block(struct file_lock *blocker,
*
* Must be called with the inode->flc_lock held!
*/
-static void locks_wake_up_blocks(struct file_lock *blocker)
+static void locks_wake_up_blocks(struct file_lock_core *blocker)
{
/*
* Avoid taking global lock if list is empty. This is safe since new
@@ -758,7 +847,7 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
* fl_blocked_requests list does not require the flc_lock, so we must
* recheck list_empty() after acquiring the blocked_lock_lock.
*/
- if (list_empty(&blocker->fl_blocked_requests))
+ if (list_empty(&blocker->flc_blocked_requests))
return;
spin_lock(&blocked_lock_lock);
@@ -767,39 +856,39 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
}
static void
-locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
+locks_insert_lock_ctx(struct file_lock_core *fl, struct list_head *before)
{
- list_add_tail(&fl->fl_list, before);
+ list_add_tail(&fl->flc_list, before);
locks_insert_global_locks(fl);
}
static void
-locks_unlink_lock_ctx(struct file_lock *fl)
+locks_unlink_lock_ctx(struct file_lock_core *fl)
{
locks_delete_global_locks(fl);
- list_del_init(&fl->fl_list);
+ list_del_init(&fl->flc_list);
locks_wake_up_blocks(fl);
}
static void
-locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
+locks_delete_lock_ctx(struct file_lock_core *fl, struct list_head *dispose)
{
locks_unlink_lock_ctx(fl);
if (dispose)
- list_add(&fl->fl_list, dispose);
+ list_add(&fl->flc_list, dispose);
else
- locks_free_lock(fl);
+ locks_free_lock(file_lock(fl));
}
/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
* checks for shared/exclusive status of overlapping locks.
*/
-static bool locks_conflict(struct file_lock *caller_fl,
- struct file_lock *sys_fl)
+static bool locks_conflict(struct file_lock_core *caller_flc,
+ struct file_lock_core *sys_flc)
{
- if (sys_fl->fl_type == F_WRLCK)
+ if (sys_flc->flc_type == F_WRLCK)
return true;
- if (caller_fl->fl_type == F_WRLCK)
+ if (caller_flc->flc_type == F_WRLCK)
return true;
return false;
}
@@ -807,35 +896,56 @@ static bool locks_conflict(struct file_lock *caller_fl,
/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
* checking before calling the locks_conflict().
*/
-static bool posix_locks_conflict(struct file_lock *caller_fl,
- struct file_lock *sys_fl)
+static bool posix_locks_conflict(struct file_lock_core *caller_flc,
+ struct file_lock_core *sys_flc)
{
+ struct file_lock *caller_fl = file_lock(caller_flc);
+ struct file_lock *sys_fl = file_lock(sys_flc);
+
/* POSIX locks owned by the same process do not conflict with
* each other.
*/
- if (posix_same_owner(caller_fl, sys_fl))
+ if (posix_same_owner(caller_flc, sys_flc))
return false;
/* Check whether they overlap */
if (!locks_overlap(caller_fl, sys_fl))
return false;
- return locks_conflict(caller_fl, sys_fl);
+ return locks_conflict(caller_flc, sys_flc);
+}
+
+/* Determine if lock sys_fl blocks lock caller_fl. Used on xx_GETLK
+ * path so checks for additional GETLK-specific things like F_UNLCK.
+ */
+static bool posix_test_locks_conflict(struct file_lock *caller_fl,
+ struct file_lock *sys_fl)
+{
+ struct file_lock_core *caller = &caller_fl->c;
+ struct file_lock_core *sys = &sys_fl->c;
+
+ /* F_UNLCK checks any locks on the same fd. */
+ if (lock_is_unlock(caller_fl)) {
+ if (!posix_same_owner(caller, sys))
+ return false;
+ return locks_overlap(caller_fl, sys_fl);
+ }
+ return posix_locks_conflict(caller, sys);
}
/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
* checking before calling the locks_conflict().
*/
-static bool flock_locks_conflict(struct file_lock *caller_fl,
- struct file_lock *sys_fl)
+static bool flock_locks_conflict(struct file_lock_core *caller_flc,
+ struct file_lock_core *sys_flc)
{
/* FLOCK locks referring to the same filp do not conflict with
* each other.
*/
- if (caller_fl->fl_file == sys_fl->fl_file)
+ if (caller_flc->flc_file == sys_flc->flc_file)
return false;
- return locks_conflict(caller_fl, sys_fl);
+ return locks_conflict(caller_flc, sys_flc);
}
void
@@ -843,22 +953,35 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
{
struct file_lock *cfl;
struct file_lock_context *ctx;
- struct inode *inode = locks_inode(filp);
+ struct inode *inode = file_inode(filp);
+ void *owner;
+ void (*func)(void);
- ctx = smp_load_acquire(&inode->i_flctx);
+ ctx = locks_inode_context(inode);
if (!ctx || list_empty_careful(&ctx->flc_posix)) {
- fl->fl_type = F_UNLCK;
+ fl->c.flc_type = F_UNLCK;
return;
}
+retry:
spin_lock(&ctx->flc_lock);
- list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
- if (posix_locks_conflict(fl, cfl)) {
- locks_copy_conflock(fl, cfl);
- goto out;
+ list_for_each_entry(cfl, &ctx->flc_posix, c.flc_list) {
+ if (!posix_test_locks_conflict(fl, cfl))
+ continue;
+ if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
+ && (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
+ owner = cfl->fl_lmops->lm_mod_owner;
+ func = cfl->fl_lmops->lm_expire_lock;
+ __module_get(owner);
+ spin_unlock(&ctx->flc_lock);
+ (*func)();
+ module_put(owner);
+ goto retry;
}
+ locks_copy_conflock(fl, cfl);
+ goto out;
}
- fl->fl_type = F_UNLCK;
+ fl->c.flc_type = F_UNLCK;
out:
spin_unlock(&ctx->flc_lock);
return;
@@ -900,25 +1023,27 @@ EXPORT_SYMBOL(posix_test_lock);
#define MAX_DEADLK_ITERATIONS 10
-/* Find a lock that the owner of the given block_fl is blocking on. */
-static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
+/* Find a lock that the owner of the given @blocker is blocking on. */
+static struct file_lock_core *what_owner_is_waiting_for(struct file_lock_core *blocker)
{
- struct file_lock *fl;
+ struct file_lock_core *flc;
- hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
- if (posix_same_owner(fl, block_fl)) {
- while (fl->fl_blocker)
- fl = fl->fl_blocker;
- return fl;
+ hash_for_each_possible(blocked_hash, flc, flc_link, posix_owner_key(blocker)) {
+ if (posix_same_owner(flc, blocker)) {
+ while (flc->flc_blocker)
+ flc = flc->flc_blocker;
+ return flc;
}
}
return NULL;
}
/* Must be called with the blocked_lock_lock held! */
-static int posix_locks_deadlock(struct file_lock *caller_fl,
- struct file_lock *block_fl)
+static bool posix_locks_deadlock(struct file_lock *caller_fl,
+ struct file_lock *block_fl)
{
+ struct file_lock_core *caller = &caller_fl->c;
+ struct file_lock_core *blocker = &block_fl->c;
int i = 0;
lockdep_assert_held(&blocked_lock_lock);
@@ -927,16 +1052,16 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
* This deadlock detector can't reasonably detect deadlocks with
* FL_OFDLCK locks, since they aren't owned by a process, per-se.
*/
- if (IS_OFDLCK(caller_fl))
- return 0;
+ if (caller->flc_flags & FL_OFDLCK)
+ return false;
- while ((block_fl = what_owner_is_waiting_for(block_fl))) {
+ while ((blocker = what_owner_is_waiting_for(blocker))) {
if (i++ > MAX_DEADLK_ITERATIONS)
- return 0;
- if (posix_same_owner(caller_fl, block_fl))
- return 1;
+ return false;
+ if (posix_same_owner(caller, blocker))
+ return true;
}
- return 0;
+ return false;
}
/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
@@ -955,14 +1080,14 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
bool found = false;
LIST_HEAD(dispose);
- ctx = locks_get_lock_context(inode, request->fl_type);
+ ctx = locks_get_lock_context(inode, request->c.flc_type);
if (!ctx) {
- if (request->fl_type != F_UNLCK)
+ if (request->c.flc_type != F_UNLCK)
return -ENOMEM;
- return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
+ return (request->c.flc_flags & FL_EXISTS) ? -ENOENT : 0;
}
- if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
+ if (!(request->c.flc_flags & FL_ACCESS) && (request->c.flc_type != F_UNLCK)) {
new_fl = locks_alloc_lock();
if (!new_fl)
return -ENOMEM;
@@ -970,41 +1095,41 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
- if (request->fl_flags & FL_ACCESS)
+ if (request->c.flc_flags & FL_ACCESS)
goto find_conflict;
- list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
- if (request->fl_file != fl->fl_file)
+ list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) {
+ if (request->c.flc_file != fl->c.flc_file)
continue;
- if (request->fl_type == fl->fl_type)
+ if (request->c.flc_type == fl->c.flc_type)
goto out;
found = true;
- locks_delete_lock_ctx(fl, &dispose);
+ locks_delete_lock_ctx(&fl->c, &dispose);
break;
}
- if (request->fl_type == F_UNLCK) {
- if ((request->fl_flags & FL_EXISTS) && !found)
+ if (lock_is_unlock(request)) {
+ if ((request->c.flc_flags & FL_EXISTS) && !found)
error = -ENOENT;
goto out;
}
find_conflict:
- list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
- if (!flock_locks_conflict(request, fl))
+ list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) {
+ if (!flock_locks_conflict(&request->c, &fl->c))
continue;
error = -EAGAIN;
- if (!(request->fl_flags & FL_SLEEP))
+ if (!(request->c.flc_flags & FL_SLEEP))
goto out;
error = FILE_LOCK_DEFERRED;
- locks_insert_block(fl, request, flock_locks_conflict);
+ locks_insert_block(&fl->c, &request->c, flock_locks_conflict);
goto out;
}
- if (request->fl_flags & FL_ACCESS)
+ if (request->c.flc_flags & FL_ACCESS)
goto out;
locks_copy_lock(new_fl, request);
locks_move_blocks(new_fl, request);
- locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
+ locks_insert_lock_ctx(&new_fl->c, &ctx->flc_flock);
new_fl = NULL;
error = 0;
@@ -1030,10 +1155,12 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
int error;
bool added = false;
LIST_HEAD(dispose);
+ void *owner;
+ void (*func)(void);
- ctx = locks_get_lock_context(inode, request->fl_type);
+ ctx = locks_get_lock_context(inode, request->c.flc_type);
if (!ctx)
- return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
+ return lock_is_unlock(request) ? 0 : -ENOMEM;
/*
* We may need two file_lock structures for this operation,
@@ -1041,13 +1168,14 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
*
* In some cases we can be sure, that no new locks will be needed
*/
- if (!(request->fl_flags & FL_ACCESS) &&
- (request->fl_type != F_UNLCK ||
+ if (!(request->c.flc_flags & FL_ACCESS) &&
+ (request->c.flc_type != F_UNLCK ||
request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
new_fl = locks_alloc_lock();
new_fl2 = locks_alloc_lock();
}
+retry:
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
/*
@@ -1055,14 +1183,25 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
* there are any, either return error or put the request on the
* blocker's list of waiters and the global blocked_hash.
*/
- if (request->fl_type != F_UNLCK) {
- list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
- if (!posix_locks_conflict(request, fl))
+ if (request->c.flc_type != F_UNLCK) {
+ list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) {
+ if (!posix_locks_conflict(&request->c, &fl->c))
continue;
+ if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
+ && (*fl->fl_lmops->lm_lock_expirable)(fl)) {
+ owner = fl->fl_lmops->lm_mod_owner;
+ func = fl->fl_lmops->lm_expire_lock;
+ __module_get(owner);
+ spin_unlock(&ctx->flc_lock);
+ percpu_up_read(&file_rwsem);
+ (*func)();
+ module_put(owner);
+ goto retry;
+ }
if (conflock)
locks_copy_conflock(conflock, fl);
error = -EAGAIN;
- if (!(request->fl_flags & FL_SLEEP))
+ if (!(request->c.flc_flags & FL_SLEEP))
goto out;
/*
* Deadlock detection and insertion into the blocked
@@ -1074,10 +1213,10 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
* Ensure that we don't find any locks blocked on this
* request during deadlock detection.
*/
- __locks_wake_up_blocks(request);
+ __locks_wake_up_blocks(&request->c);
if (likely(!posix_locks_deadlock(request, fl))) {
error = FILE_LOCK_DEFERRED;
- __locks_insert_block(fl, request,
+ __locks_insert_block(&fl->c, &request->c,
posix_locks_conflict);
}
spin_unlock(&blocked_lock_lock);
@@ -1087,22 +1226,22 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
/* If we're just looking for a conflict, we're done. */
error = 0;
- if (request->fl_flags & FL_ACCESS)
+ if (request->c.flc_flags & FL_ACCESS)
goto out;
/* Find the first old lock with the same owner as the new lock */
- list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
- if (posix_same_owner(request, fl))
+ list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) {
+ if (posix_same_owner(&request->c, &fl->c))
break;
}
/* Process locks with this owner. */
- list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
- if (!posix_same_owner(request, fl))
+ list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, c.flc_list) {
+ if (!posix_same_owner(&request->c, &fl->c))
break;
/* Detect adjacent or overlapping regions (if same lock type) */
- if (request->fl_type == fl->fl_type) {
+ if (request->c.flc_type == fl->c.flc_type) {
/* In all comparisons of start vs end, use
* "start - 1" rather than "end + 1". If end
* is OFFSET_MAX, end + 1 will become negative.
@@ -1129,7 +1268,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
else
request->fl_end = fl->fl_end;
if (added) {
- locks_delete_lock_ctx(fl, &dispose);
+ locks_delete_lock_ctx(&fl->c, &dispose);
continue;
}
request = fl;
@@ -1142,7 +1281,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
continue;
if (fl->fl_start > request->fl_end)
break;
- if (request->fl_type == F_UNLCK)
+ if (lock_is_unlock(request))
added = true;
if (fl->fl_start < request->fl_start)
left = fl;
@@ -1158,7 +1297,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
* one (This may happen several times).
*/
if (added) {
- locks_delete_lock_ctx(fl, &dispose);
+ locks_delete_lock_ctx(&fl->c, &dispose);
continue;
}
/*
@@ -1175,8 +1314,9 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
locks_move_blocks(new_fl, request);
request = new_fl;
new_fl = NULL;
- locks_insert_lock_ctx(request, &fl->fl_list);
- locks_delete_lock_ctx(fl, &dispose);
+ locks_insert_lock_ctx(&request->c,
+ &fl->c.flc_list);
+ locks_delete_lock_ctx(&fl->c, &dispose);
added = true;
}
}
@@ -1193,8 +1333,8 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
error = 0;
if (!added) {
- if (request->fl_type == F_UNLCK) {
- if (request->fl_flags & FL_EXISTS)
+ if (lock_is_unlock(request)) {
+ if (request->c.flc_flags & FL_EXISTS)
error = -ENOENT;
goto out;
}
@@ -1205,7 +1345,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
}
locks_copy_lock(new_fl, request);
locks_move_blocks(new_fl, request);
- locks_insert_lock_ctx(new_fl, &fl->fl_list);
+ locks_insert_lock_ctx(&new_fl->c, &fl->c.flc_list);
fl = new_fl;
new_fl = NULL;
}
@@ -1217,16 +1357,17 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
left = new_fl2;
new_fl2 = NULL;
locks_copy_lock(left, right);
- locks_insert_lock_ctx(left, &fl->fl_list);
+ locks_insert_lock_ctx(&left->c, &fl->c.flc_list);
}
right->fl_start = request->fl_end + 1;
- locks_wake_up_blocks(right);
+ locks_wake_up_blocks(&right->c);
}
if (left) {
left->fl_end = request->fl_start - 1;
- locks_wake_up_blocks(left);
+ locks_wake_up_blocks(&left->c);
}
out:
+ trace_posix_lock_inode(inode, request, error);
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
/*
@@ -1237,7 +1378,6 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
if (new_fl2)
locks_free_lock(new_fl2);
locks_dispose_list(&dispose);
- trace_posix_lock_inode(inode, request, error);
return error;
}
@@ -1259,7 +1399,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
int posix_lock_file(struct file *filp, struct file_lock *fl,
struct file_lock *conflock)
{
- return posix_lock_inode(locks_inode(filp), fl, conflock);
+ return posix_lock_inode(file_inode(filp), fl, conflock);
}
EXPORT_SYMBOL(posix_lock_file);
@@ -1278,8 +1418,8 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
error = posix_lock_inode(inode, fl, NULL);
if (error != FILE_LOCK_DEFERRED)
break;
- error = wait_event_interruptible(fl->fl_wait,
- list_empty(&fl->fl_blocked_member));
+ error = wait_event_interruptible(fl->c.flc_wait,
+ list_empty(&fl->c.flc_blocked_member));
if (error)
break;
}
@@ -1287,37 +1427,37 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
return error;
}
-static void lease_clear_pending(struct file_lock *fl, int arg)
+static void lease_clear_pending(struct file_lease *fl, int arg)
{
switch (arg) {
case F_UNLCK:
- fl->fl_flags &= ~FL_UNLOCK_PENDING;
+ fl->c.flc_flags &= ~FL_UNLOCK_PENDING;
fallthrough;
case F_RDLCK:
- fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
+ fl->c.flc_flags &= ~FL_DOWNGRADE_PENDING;
}
}
/* We already had a lease on this file; just change its type */
-int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
+int lease_modify(struct file_lease *fl, int arg, struct list_head *dispose)
{
- int error = assign_type(fl, arg);
+ int error = assign_type(&fl->c, arg);
if (error)
return error;
lease_clear_pending(fl, arg);
- locks_wake_up_blocks(fl);
+ locks_wake_up_blocks(&fl->c);
if (arg == F_UNLCK) {
- struct file *filp = fl->fl_file;
+ struct file *filp = fl->c.flc_file;
f_delown(filp);
- filp->f_owner.signum = 0;
- fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
+ file_f_owner(filp)->signum = 0;
+ fasync_helper(0, fl->c.flc_file, 0, &fl->fl_fasync);
if (fl->fl_fasync != NULL) {
printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
fl->fl_fasync = NULL;
}
- locks_delete_lock_ctx(fl, dispose);
+ locks_delete_lock_ctx(&fl->c, dispose);
}
return 0;
}
@@ -1334,11 +1474,11 @@ static bool past_time(unsigned long then)
static void time_out_leases(struct inode *inode, struct list_head *dispose)
{
struct file_lock_context *ctx = inode->i_flctx;
- struct file_lock *fl, *tmp;
+ struct file_lease *fl, *tmp;
lockdep_assert_held(&ctx->flc_lock);
- list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
+ list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) {
trace_time_out_leases(inode, fl);
if (past_time(fl->fl_downgrade_time))
lease_modify(fl, F_RDLCK, dispose);
@@ -1347,38 +1487,40 @@ static void time_out_leases(struct inode *inode, struct list_head *dispose)
}
}
-static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
+static bool leases_conflict(struct file_lock_core *lc, struct file_lock_core *bc)
{
bool rc;
+ struct file_lease *lease = file_lease(lc);
+ struct file_lease *breaker = file_lease(bc);
if (lease->fl_lmops->lm_breaker_owns_lease
&& lease->fl_lmops->lm_breaker_owns_lease(lease))
return false;
- if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
+ if ((bc->flc_flags & FL_LAYOUT) != (lc->flc_flags & FL_LAYOUT)) {
rc = false;
goto trace;
}
- if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
+ if ((bc->flc_flags & FL_DELEG) && (lc->flc_flags & FL_LEASE)) {
rc = false;
goto trace;
}
- rc = locks_conflict(breaker, lease);
+ rc = locks_conflict(bc, lc);
trace:
trace_leases_conflict(rc, lease, breaker);
return rc;
}
static bool
-any_leases_conflict(struct inode *inode, struct file_lock *breaker)
+any_leases_conflict(struct inode *inode, struct file_lease *breaker)
{
struct file_lock_context *ctx = inode->i_flctx;
- struct file_lock *fl;
+ struct file_lock_core *flc;
lockdep_assert_held(&ctx->flc_lock);
- list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
- if (leases_conflict(fl, breaker))
+ list_for_each_entry(flc, &ctx->flc_lease, flc_list) {
+ if (leases_conflict(flc, &breaker->c))
return true;
}
return false;
@@ -1387,32 +1529,38 @@ any_leases_conflict(struct inode *inode, struct file_lock *breaker)
/**
* __break_lease - revoke all outstanding leases on file
* @inode: the inode of the file to return
- * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
- * break all leases
- * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
- * only delegations
+ * @flags: LEASE_BREAK_* flags
*
* break_lease (inlined for speed) has checked there already is at least
* some kind of lock (maybe a lease) on this file. Leases are broken on
- * a call to open() or truncate(). This function can sleep unless you
- * specified %O_NONBLOCK to your open().
+ * a call to open() or truncate(). This function can block waiting for the
+ * lease break unless you specify LEASE_BREAK_NONBLOCK.
*/
-int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
+int __break_lease(struct inode *inode, unsigned int flags)
{
- int error = 0;
+ struct file_lease *new_fl, *fl, *tmp;
struct file_lock_context *ctx;
- struct file_lock *new_fl, *fl, *tmp;
unsigned long break_time;
- int want_write = (mode & O_ACCMODE) != O_RDONLY;
+ unsigned int type;
LIST_HEAD(dispose);
+ bool want_write = !(flags & LEASE_BREAK_OPEN_RDONLY);
+ int error = 0;
+
+ if (flags & LEASE_BREAK_LEASE)
+ type = FL_LEASE;
+ else if (flags & LEASE_BREAK_DELEG)
+ type = FL_DELEG;
+ else if (flags & LEASE_BREAK_LAYOUT)
+ type = FL_LAYOUT;
+ else
+ return -EINVAL;
- new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
+ new_fl = lease_alloc(NULL, type, want_write ? F_WRLCK : F_RDLCK);
if (IS_ERR(new_fl))
return PTR_ERR(new_fl);
- new_fl->fl_flags = type;
/* typically we will check that ctx is non-NULL before calling */
- ctx = smp_load_acquire(&inode->i_flctx);
+ ctx = locks_inode_context(inode);
if (!ctx) {
WARN_ON_ONCE(1);
goto free_lock;
@@ -1433,54 +1581,54 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
break_time++; /* so that 0 means no break time */
}
- list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
- if (!leases_conflict(fl, new_fl))
+ list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) {
+ if (!leases_conflict(&fl->c, &new_fl->c))
continue;
if (want_write) {
- if (fl->fl_flags & FL_UNLOCK_PENDING)
+ if (fl->c.flc_flags & FL_UNLOCK_PENDING)
continue;
- fl->fl_flags |= FL_UNLOCK_PENDING;
+ fl->c.flc_flags |= FL_UNLOCK_PENDING;
fl->fl_break_time = break_time;
} else {
if (lease_breaking(fl))
continue;
- fl->fl_flags |= FL_DOWNGRADE_PENDING;
+ fl->c.flc_flags |= FL_DOWNGRADE_PENDING;
fl->fl_downgrade_time = break_time;
}
if (fl->fl_lmops->lm_break(fl))
- locks_delete_lock_ctx(fl, &dispose);
+ locks_delete_lock_ctx(&fl->c, &dispose);
}
if (list_empty(&ctx->flc_lease))
goto out;
- if (mode & O_NONBLOCK) {
+ if (flags & LEASE_BREAK_NONBLOCK) {
trace_break_lease_noblock(inode, new_fl);
error = -EWOULDBLOCK;
goto out;
}
restart:
- fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
+ fl = list_first_entry(&ctx->flc_lease, struct file_lease, c.flc_list);
break_time = fl->fl_break_time;
if (break_time != 0)
break_time -= jiffies;
if (break_time == 0)
break_time++;
- locks_insert_block(fl, new_fl, leases_conflict);
+ locks_insert_block(&fl->c, &new_fl->c, leases_conflict);
trace_break_lease_block(inode, new_fl);
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
- error = wait_event_interruptible_timeout(new_fl->fl_wait,
- list_empty(&new_fl->fl_blocked_member),
- break_time);
+ error = wait_event_interruptible_timeout(new_fl->c.flc_wait,
+ list_empty(&new_fl->c.flc_blocked_member),
+ break_time);
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
trace_break_lease_unblock(inode, new_fl);
- locks_delete_block(new_fl);
+ __locks_delete_block(&new_fl->c);
if (error >= 0) {
/*
* Wait for the next conflicting lease that has not been
@@ -1497,7 +1645,7 @@ out:
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
free_lock:
- locks_free_lock(new_fl);
+ locks_free_lease(new_fl);
return error;
}
EXPORT_SYMBOL(__break_lease);
@@ -1515,14 +1663,14 @@ void lease_get_mtime(struct inode *inode, struct timespec64 *time)
{
bool has_lease = false;
struct file_lock_context *ctx;
- struct file_lock *fl;
+ struct file_lock_core *flc;
- ctx = smp_load_acquire(&inode->i_flctx);
+ ctx = locks_inode_context(inode);
if (ctx && !list_empty_careful(&ctx->flc_lease)) {
spin_lock(&ctx->flc_lock);
- fl = list_first_entry_or_null(&ctx->flc_lease,
- struct file_lock, fl_list);
- if (fl && (fl->fl_type == F_WRLCK))
+ flc = list_first_entry_or_null(&ctx->flc_lease,
+ struct file_lock_core, flc_list);
+ if (flc && flc->flc_type == F_WRLCK)
has_lease = true;
spin_unlock(&ctx->flc_lock);
}
@@ -1533,8 +1681,9 @@ void lease_get_mtime(struct inode *inode, struct timespec64 *time)
EXPORT_SYMBOL(lease_get_mtime);
/**
- * fcntl_getlease - Enquire what lease is currently active
+ * __fcntl_getlease - Enquire what lease is currently active
* @filp: the file
+ * @flavor: type of lease flags to check
*
* The value returned by this function will be one of
* (if no lease break is pending):
@@ -1555,23 +1704,24 @@ EXPORT_SYMBOL(lease_get_mtime);
* XXX: sfr & willy disagree over whether F_INPROGRESS
* should be returned to userspace.
*/
-int fcntl_getlease(struct file *filp)
+static int __fcntl_getlease(struct file *filp, unsigned int flavor)
{
- struct file_lock *fl;
- struct inode *inode = locks_inode(filp);
+ struct file_lease *fl;
+ struct inode *inode = file_inode(filp);
struct file_lock_context *ctx;
int type = F_UNLCK;
LIST_HEAD(dispose);
- ctx = smp_load_acquire(&inode->i_flctx);
+ ctx = locks_inode_context(inode);
if (ctx && !list_empty_careful(&ctx->flc_lease)) {
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
- list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
- if (fl->fl_file != filp)
+ list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
+ if (fl->c.flc_file != filp)
continue;
- type = target_leasetype(fl);
+ if (fl->c.flc_flags & flavor)
+ type = target_leasetype(fl);
break;
}
spin_unlock(&ctx->flc_lock);
@@ -1582,6 +1732,19 @@ int fcntl_getlease(struct file *filp)
return type;
}
+int fcntl_getlease(struct file *filp)
+{
+ return __fcntl_getlease(filp, FL_LEASE);
+}
+
+int fcntl_getdeleg(struct file *filp, struct delegation *deleg)
+{
+ if (deleg->d_flags != 0 || deleg->__pad != 0)
+ return -EINVAL;
+ deleg->d_type = __fcntl_getlease(filp, FL_DELEG);
+ return 0;
+}
+
/**
* check_conflicting_open - see if the given file points to an inode that has
* an existing open that would conflict with the
@@ -1594,9 +1757,9 @@ int fcntl_getlease(struct file *filp)
* conflict with the lease we're trying to set.
*/
static int
-check_conflicting_open(struct file *filp, const long arg, int flags)
+check_conflicting_open(struct file *filp, const int arg, int flags)
{
- struct inode *inode = locks_inode(filp);
+ struct inode *inode = file_inode(filp);
int self_wcount = 0, self_rcount = 0;
if (flags & FL_LAYOUT)
@@ -1629,18 +1792,22 @@ check_conflicting_open(struct file *filp, const long arg, int flags)
}
static int
-generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
+generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **priv)
{
- struct file_lock *fl, *my_fl = NULL, *lease;
- struct inode *inode = locks_inode(filp);
+ struct file_lease *fl, *my_fl = NULL, *lease;
+ struct inode *inode = file_inode(filp);
struct file_lock_context *ctx;
- bool is_deleg = (*flp)->fl_flags & FL_DELEG;
+ bool is_deleg = (*flp)->c.flc_flags & FL_DELEG;
int error;
LIST_HEAD(dispose);
lease = *flp;
trace_generic_add_lease(inode, lease);
+ error = file_f_owner_allocate(filp);
+ if (error)
+ return error;
+
/* Note that arg is never F_UNLCK here */
ctx = locks_get_lock_context(inode, arg);
if (!ctx)
@@ -1648,7 +1815,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
/*
* In the delegation case we need mutual exclusion with
- * a number of operations that take the i_mutex. We trylock
+ * a number of operations that take the i_rwsem. We trylock
* because delegations are an optional optimization, and if
* there's some chance of a conflict--we'd rather not
* bother, maybe that's a sign this just isn't a good file to
@@ -1657,17 +1824,10 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
if (is_deleg && !inode_trylock(inode))
return -EAGAIN;
- if (is_deleg && arg == F_WRLCK) {
- /* Write delegations are not currently supported: */
- inode_unlock(inode);
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
- error = check_conflicting_open(filp, arg, lease->fl_flags);
+ error = check_conflicting_open(filp, arg, lease->c.flc_flags);
if (error)
goto out;
@@ -1680,9 +1840,9 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
* except for this filp.
*/
error = -EAGAIN;
- list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
- if (fl->fl_file == filp &&
- fl->fl_owner == lease->fl_owner) {
+ list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
+ if (fl->c.flc_file == filp &&
+ fl->c.flc_owner == lease->c.flc_owner) {
my_fl = fl;
continue;
}
@@ -1697,7 +1857,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
* Modifying our existing lease is OK, but no getting a
* new lease if someone else is opening for write:
*/
- if (fl->fl_flags & FL_UNLOCK_PENDING)
+ if (fl->c.flc_flags & FL_UNLOCK_PENDING)
goto out;
}
@@ -1713,7 +1873,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
if (!leases_enable)
goto out;
- locks_insert_lock_ctx(lease, &ctx->flc_lease);
+ locks_insert_lock_ctx(&lease->c, &ctx->flc_lease);
/*
* The check in break_lease() is lockless. It's possible for another
* open to race in after we did the earlier check for a conflicting
@@ -1724,9 +1884,9 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
* precedes these checks.
*/
smp_mb();
- error = check_conflicting_open(filp, arg, lease->fl_flags);
+ error = check_conflicting_open(filp, arg, lease->c.flc_flags);
if (error) {
- locks_unlink_lock_ctx(lease);
+ locks_unlink_lock_ctx(&lease->c);
goto out;
}
@@ -1747,12 +1907,12 @@ out:
static int generic_delete_lease(struct file *filp, void *owner)
{
int error = -EAGAIN;
- struct file_lock *fl, *victim = NULL;
- struct inode *inode = locks_inode(filp);
+ struct file_lease *fl, *victim = NULL;
+ struct inode *inode = file_inode(filp);
struct file_lock_context *ctx;
LIST_HEAD(dispose);
- ctx = smp_load_acquire(&inode->i_flctx);
+ ctx = locks_inode_context(inode);
if (!ctx) {
trace_generic_delete_lease(inode, NULL);
return error;
@@ -1760,9 +1920,9 @@ static int generic_delete_lease(struct file *filp, void *owner)
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
- list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
- if (fl->fl_file == filp &&
- fl->fl_owner == owner) {
+ list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
+ if (fl->c.flc_file == filp &&
+ fl->c.flc_owner == owner) {
victim = fl;
break;
}
@@ -1787,25 +1947,22 @@ static int generic_delete_lease(struct file *filp, void *owner)
* The (input) flp->fl_lmops->lm_break function is required
* by break_lease().
*/
-int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
+int generic_setlease(struct file *filp, int arg, struct file_lease **flp,
void **priv)
{
- struct inode *inode = locks_inode(filp);
- int error;
+ struct inode *inode = file_inode(filp);
- if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
- return -EACCES;
- if (!S_ISREG(inode->i_mode))
+ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
return -EINVAL;
- error = security_file_lock(filp, arg);
- if (error)
- return error;
switch (arg) {
case F_UNLCK:
return generic_delete_lease(filp, *priv);
- case F_RDLCK:
case F_WRLCK:
+ if (S_ISDIR(inode->i_mode))
+ return -EINVAL;
+ fallthrough;
+ case F_RDLCK:
if (!(*flp)->fl_lmops->lm_break) {
WARN_ON_ONCE(1);
return -ENOLCK;
@@ -1818,7 +1975,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
}
EXPORT_SYMBOL(generic_setlease);
-#if IS_ENABLED(CONFIG_SRCU)
/*
* Kernel subsystems can register to be notified on any attempt to set
* a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
@@ -1834,7 +1990,7 @@ lease_notifier_chain_init(void)
}
static inline void
-setlease_notifier(long arg, struct file_lock *lease)
+setlease_notifier(int arg, struct file_lease *lease)
{
if (arg != F_UNLCK)
srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
@@ -1852,29 +2008,18 @@ void lease_unregister_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(lease_unregister_notifier);
-#else /* !IS_ENABLED(CONFIG_SRCU) */
-static inline void
-lease_notifier_chain_init(void)
-{
-}
-
-static inline void
-setlease_notifier(long arg, struct file_lock *lease)
-{
-}
-
-int lease_register_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-EXPORT_SYMBOL_GPL(lease_register_notifier);
-void lease_unregister_notifier(struct notifier_block *nb)
+int
+kernel_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv)
{
+ if (lease)
+ setlease_notifier(arg, *lease);
+ if (filp->f_op->setlease)
+ return filp->f_op->setlease(filp, arg, lease, priv);
+ else
+ return generic_setlease(filp, arg, lease, priv);
}
-EXPORT_SYMBOL_GPL(lease_unregister_notifier);
-
-#endif /* IS_ENABLED(CONFIG_SRCU) */
+EXPORT_SYMBOL_GPL(kernel_setlease);
/**
* vfs_setlease - sets a lease on an open file
@@ -1894,37 +2039,41 @@ EXPORT_SYMBOL_GPL(lease_unregister_notifier);
* may be NULL if the lm_setup operation doesn't require it.
*/
int
-vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
+vfs_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv)
{
- if (lease)
- setlease_notifier(arg, *lease);
- if (filp->f_op->setlease)
- return filp->f_op->setlease(filp, arg, lease, priv);
- else
- return generic_setlease(filp, arg, lease, priv);
+ struct inode *inode = file_inode(filp);
+ vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(filp), inode);
+ int error;
+
+ if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
+ return -EACCES;
+ error = security_file_lock(filp, arg);
+ if (error)
+ return error;
+ return kernel_setlease(filp, arg, lease, priv);
}
EXPORT_SYMBOL_GPL(vfs_setlease);
-static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
+static int do_fcntl_add_lease(unsigned int fd, struct file *filp, unsigned int flavor, int arg)
{
- struct file_lock *fl;
+ struct file_lease *fl;
struct fasync_struct *new;
int error;
- fl = lease_alloc(filp, arg);
+ fl = lease_alloc(filp, flavor, arg);
if (IS_ERR(fl))
return PTR_ERR(fl);
new = fasync_alloc();
if (!new) {
- locks_free_lock(fl);
+ locks_free_lease(fl);
return -ENOMEM;
}
new->fa_fd = fd;
error = vfs_setlease(filp, arg, &fl, (void **)&new);
if (fl)
- locks_free_lock(fl);
+ locks_free_lease(fl);
if (new)
fasync_free(new);
return error;
@@ -1940,11 +2089,35 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
* Note that you also need to call %F_SETSIG to
* receive a signal when the lease is broken.
*/
-int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
+int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
{
+ if (S_ISDIR(file_inode(filp)->i_mode))
+ return -EINVAL;
+
if (arg == F_UNLCK)
return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
- return do_fcntl_add_lease(fd, filp, arg);
+ return do_fcntl_add_lease(fd, filp, FL_LEASE, arg);
+}
+
+/**
+ * fcntl_setdeleg - sets a delegation on an open file
+ * @fd: open file descriptor
+ * @filp: file pointer
+ * @deleg: delegation request from userland
+ *
+ * Call this fcntl to establish a delegation on the file.
+ * Note that you also need to call %F_SETSIG to
+ * receive a signal when the lease is broken.
+ */
+int fcntl_setdeleg(unsigned int fd, struct file *filp, struct delegation *deleg)
+{
+ /* For now, no flags are supported */
+ if (deleg->d_flags != 0 || deleg->__pad != 0)
+ return -EINVAL;
+
+ if (deleg->d_type == F_UNLCK)
+ return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
+ return do_fcntl_add_lease(fd, filp, FL_DELEG, deleg->d_type);
}
/**
@@ -1962,8 +2135,8 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
error = flock_lock_inode(inode, fl);
if (error != FILE_LOCK_DEFERRED)
break;
- error = wait_event_interruptible(fl->fl_wait,
- list_empty(&fl->fl_blocked_member));
+ error = wait_event_interruptible(fl->c.flc_wait,
+ list_empty(&fl->c.flc_blocked_member));
if (error)
break;
}
@@ -1981,7 +2154,7 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
{
int res = 0;
- switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
+ switch (fl->c.flc_flags & (FL_POSIX|FL_FLOCK)) {
case FL_POSIX:
res = posix_lock_inode_wait(inode, fl);
break;
@@ -2012,21 +2185,8 @@ EXPORT_SYMBOL(locks_lock_inode_wait);
*/
SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
{
- struct fd f = fdget(fd);
- struct file_lock *lock;
- int can_sleep, unlock;
- int error;
-
- error = -EBADF;
- if (!f.file)
- goto out;
-
- can_sleep = !(cmd & LOCK_NB);
- cmd &= ~LOCK_NB;
- unlock = (cmd == LOCK_UN);
-
- if (!unlock && !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
- goto out_putf;
+ int can_sleep, error, type;
+ struct file_lock fl;
/*
* LOCK_MAND locks were broken for a long time in that they never
@@ -2037,37 +2197,39 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
* throw a warning to let people know that they don't actually work.
*/
if (cmd & LOCK_MAND) {
- pr_warn_once("Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n");
- error = 0;
- goto out_putf;
+ pr_warn_once("%s(%d): Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n", current->comm, current->pid);
+ return 0;
}
- lock = flock_make_lock(f.file, cmd, NULL);
- if (IS_ERR(lock)) {
- error = PTR_ERR(lock);
- goto out_putf;
- }
+ type = flock_translate_cmd(cmd & ~LOCK_NB);
+ if (type < 0)
+ return type;
- if (can_sleep)
- lock->fl_flags |= FL_SLEEP;
+ CLASS(fd, f)(fd);
+ if (fd_empty(f))
+ return -EBADF;
- error = security_file_lock(f.file, lock->fl_type);
+ if (type != F_UNLCK && !(fd_file(f)->f_mode & (FMODE_READ | FMODE_WRITE)))
+ return -EBADF;
+
+ flock_make_lock(fd_file(f), &fl, type);
+
+ error = security_file_lock(fd_file(f), fl.c.flc_type);
if (error)
- goto out_free;
+ return error;
- if (f.file->f_op->flock)
- error = f.file->f_op->flock(f.file,
- (can_sleep) ? F_SETLKW : F_SETLK,
- lock);
- else
- error = locks_lock_file_wait(f.file, lock);
+ can_sleep = !(cmd & LOCK_NB);
+ if (can_sleep)
+ fl.c.flc_flags |= FL_SLEEP;
- out_free:
- locks_free_lock(lock);
+ if (fd_file(f)->f_op->flock)
+ error = fd_file(f)->f_op->flock(fd_file(f),
+ (can_sleep) ? F_SETLKW : F_SETLK,
+ &fl);
+ else
+ error = locks_lock_file_wait(fd_file(f), &fl);
- out_putf:
- fdput(f);
- out:
+ locks_release_private(&fl);
return error;
}
@@ -2081,6 +2243,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
*/
int vfs_test_lock(struct file *filp, struct file_lock *fl)
{
+ WARN_ON_ONCE(filp != fl->c.flc_file);
if (filp->f_op->lock)
return filp->f_op->lock(filp, F_GETLK, fl);
posix_test_lock(filp, fl);
@@ -2093,27 +2256,30 @@ EXPORT_SYMBOL_GPL(vfs_test_lock);
* @fl: The file_lock who's fl_pid should be translated
* @ns: The namespace into which the pid should be translated
*
- * Used to tranlate a fl_pid into a namespace virtual pid number
+ * Used to translate a fl_pid into a namespace virtual pid number
*/
-static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
+static pid_t locks_translate_pid(struct file_lock_core *fl, struct pid_namespace *ns)
{
pid_t vnr;
struct pid *pid;
- if (IS_OFDLCK(fl))
+ if (fl->flc_flags & FL_OFDLCK)
return -1;
- if (IS_REMOTELCK(fl))
- return fl->fl_pid;
+
+ /* Remote locks report a negative pid value */
+ if (fl->flc_pid <= 0)
+ return fl->flc_pid;
+
/*
* If the flock owner process is dead and its pid has been already
* freed, the translation below won't work, but we still want to show
* flock owner pid number in init pidns.
*/
if (ns == &init_pid_ns)
- return (pid_t)fl->fl_pid;
+ return (pid_t) fl->flc_pid;
rcu_read_lock();
- pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
+ pid = find_pid_ns(fl->flc_pid, &init_pid_ns);
vnr = pid_nr_ns(pid, ns);
rcu_read_unlock();
return vnr;
@@ -2121,7 +2287,7 @@ static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
{
- flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
+ flock->l_pid = locks_translate_pid(&fl->c, task_active_pid_ns(current));
#if BITS_PER_LONG == 32
/*
* Make sure we can represent the posix lock via
@@ -2136,19 +2302,19 @@ static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
fl->fl_end - fl->fl_start + 1;
flock->l_whence = 0;
- flock->l_type = fl->fl_type;
+ flock->l_type = fl->c.flc_type;
return 0;
}
#if BITS_PER_LONG == 32
static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
{
- flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
+ flock->l_pid = locks_translate_pid(&fl->c, task_active_pid_ns(current));
flock->l_start = fl->fl_start;
flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
fl->fl_end - fl->fl_start + 1;
flock->l_whence = 0;
- flock->l_type = fl->fl_type;
+ flock->l_type = fl->c.flc_type;
}
#endif
@@ -2164,7 +2330,8 @@ int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
if (fl == NULL)
return -ENOMEM;
error = -EINVAL;
- if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
+ if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
+ && flock->l_type != F_WRLCK)
goto out;
error = flock_to_posix_lock(filp, fl, flock);
@@ -2176,16 +2343,16 @@ int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
if (flock->l_pid != 0)
goto out;
- fl->fl_flags |= FL_OFDLCK;
- fl->fl_owner = filp;
+ fl->c.flc_flags |= FL_OFDLCK;
+ fl->c.flc_owner = filp;
}
error = vfs_test_lock(filp, fl);
if (error)
goto out;
- flock->l_type = fl->fl_type;
- if (fl->fl_type != F_UNLCK) {
+ flock->l_type = fl->c.flc_type;
+ if (fl->c.flc_type != F_UNLCK) {
error = posix_lock_to_flock(flock, fl);
if (error)
goto out;
@@ -2212,11 +2379,13 @@ out:
* To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
* locks, the ->lock() interface may return asynchronously, before the lock has
* been granted or denied by the underlying filesystem, if (and only if)
- * lm_grant is set. Callers expecting ->lock() to return asynchronously
- * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
- * the request is for a blocking lock. When ->lock() does return asynchronously,
- * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
- * request completes.
+ * lm_grant is set. Additionally FOP_ASYNC_LOCK in file_operations fop_flags
+ * need to be set.
+ *
+ * Callers expecting ->lock() to return asynchronously will only use F_SETLK,
+ * not F_SETLKW; they will set FL_SLEEP if (and only if) the request is for a
+ * blocking lock. When ->lock() does return asynchronously, it must return
+ * FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock request completes.
* If the request is for non-blocking lock the file system should return
* FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
* with the result. If the request timed out the callback routine will return a
@@ -2230,6 +2399,7 @@ out:
*/
int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
{
+ WARN_ON_ONCE(filp != fl->c.flc_file);
if (filp->f_op->lock)
return filp->f_op->lock(filp, cmd, fl);
else
@@ -2242,7 +2412,7 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
{
int error;
- error = security_file_lock(filp, fl->fl_type);
+ error = security_file_lock(filp, fl->c.flc_type);
if (error)
return error;
@@ -2250,8 +2420,8 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
error = vfs_lock_file(filp, cmd, fl, NULL);
if (error != FILE_LOCK_DEFERRED)
break;
- error = wait_event_interruptible(fl->fl_wait,
- list_empty(&fl->fl_blocked_member));
+ error = wait_event_interruptible(fl->c.flc_wait,
+ list_empty(&fl->c.flc_blocked_member));
if (error)
break;
}
@@ -2264,13 +2434,13 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
static int
check_fmode_for_setlk(struct file_lock *fl)
{
- switch (fl->fl_type) {
+ switch (fl->c.flc_type) {
case F_RDLCK:
- if (!(fl->fl_file->f_mode & FMODE_READ))
+ if (!(fl->c.flc_file->f_mode & FMODE_READ))
return -EBADF;
break;
case F_WRLCK:
- if (!(fl->fl_file->f_mode & FMODE_WRITE))
+ if (!(fl->c.flc_file->f_mode & FMODE_WRITE))
return -EBADF;
}
return 0;
@@ -2283,7 +2453,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
struct flock *flock)
{
struct file_lock *file_lock = locks_alloc_lock();
- struct inode *inode = locks_inode(filp);
+ struct inode *inode = file_inode(filp);
struct file *f;
int error;
@@ -2309,8 +2479,8 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
goto out;
cmd = F_SETLK;
- file_lock->fl_flags |= FL_OFDLCK;
- file_lock->fl_owner = filp;
+ file_lock->c.flc_flags |= FL_OFDLCK;
+ file_lock->c.flc_owner = filp;
break;
case F_OFD_SETLKW:
error = -EINVAL;
@@ -2318,22 +2488,23 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
goto out;
cmd = F_SETLKW;
- file_lock->fl_flags |= FL_OFDLCK;
- file_lock->fl_owner = filp;
+ file_lock->c.flc_flags |= FL_OFDLCK;
+ file_lock->c.flc_owner = filp;
fallthrough;
case F_SETLKW:
- file_lock->fl_flags |= FL_SLEEP;
+ file_lock->c.flc_flags |= FL_SLEEP;
}
error = do_lock_file_wait(filp, cmd, file_lock);
/*
- * Attempt to detect a close/fcntl race and recover by releasing the
- * lock that was just acquired. There is no need to do that when we're
+ * Detect close/fcntl races and recover by zapping all POSIX locks
+ * associated with this file and our files_struct, just like on
+ * filp_flush(). There is no need to do that when we're
* unlocking though, or for OFD locks.
*/
- if (!error && file_lock->fl_type != F_UNLCK &&
- !(file_lock->fl_flags & FL_OFDLCK)) {
+ if (!error && file_lock->c.flc_type != F_UNLCK &&
+ !(file_lock->c.flc_flags & FL_OFDLCK)) {
struct files_struct *files = current->files;
/*
* We need that spin_lock here - it prevents reordering between
@@ -2344,9 +2515,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
f = files_lookup_fd_locked(files, fd);
spin_unlock(&files->file_lock);
if (f != filp) {
- file_lock->fl_type = F_UNLCK;
- error = do_lock_file_wait(filp, cmd, file_lock);
- WARN_ON_ONCE(error);
+ locks_remove_posix(filp, files);
error = -EBADF;
}
}
@@ -2370,7 +2539,8 @@ int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
return -ENOMEM;
error = -EINVAL;
- if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
+ if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
+ && flock->l_type != F_WRLCK)
goto out;
error = flock64_to_posix_lock(filp, fl, flock);
@@ -2382,17 +2552,16 @@ int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
if (flock->l_pid != 0)
goto out;
- cmd = F_GETLK64;
- fl->fl_flags |= FL_OFDLCK;
- fl->fl_owner = filp;
+ fl->c.flc_flags |= FL_OFDLCK;
+ fl->c.flc_owner = filp;
}
error = vfs_test_lock(filp, fl);
if (error)
goto out;
- flock->l_type = fl->fl_type;
- if (fl->fl_type != F_UNLCK)
+ flock->l_type = fl->c.flc_type;
+ if (fl->c.flc_type != F_UNLCK)
posix_lock_to_flock64(flock, fl);
out:
@@ -2432,8 +2601,8 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
goto out;
cmd = F_SETLK64;
- file_lock->fl_flags |= FL_OFDLCK;
- file_lock->fl_owner = filp;
+ file_lock->c.flc_flags |= FL_OFDLCK;
+ file_lock->c.flc_owner = filp;
break;
case F_OFD_SETLKW:
error = -EINVAL;
@@ -2441,22 +2610,23 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
goto out;
cmd = F_SETLKW64;
- file_lock->fl_flags |= FL_OFDLCK;
- file_lock->fl_owner = filp;
+ file_lock->c.flc_flags |= FL_OFDLCK;
+ file_lock->c.flc_owner = filp;
fallthrough;
case F_SETLKW64:
- file_lock->fl_flags |= FL_SLEEP;
+ file_lock->c.flc_flags |= FL_SLEEP;
}
error = do_lock_file_wait(filp, cmd, file_lock);
/*
- * Attempt to detect a close/fcntl race and recover by releasing the
- * lock that was just acquired. There is no need to do that when we're
+ * Detect close/fcntl races and recover by zapping all POSIX locks
+ * associated with this file and our files_struct, just like on
+ * filp_flush(). There is no need to do that when we're
* unlocking though, or for OFD locks.
*/
- if (!error && file_lock->fl_type != F_UNLCK &&
- !(file_lock->fl_flags & FL_OFDLCK)) {
+ if (!error && file_lock->c.flc_type != F_UNLCK &&
+ !(file_lock->c.flc_flags & FL_OFDLCK)) {
struct files_struct *files = current->files;
/*
* We need that spin_lock here - it prevents reordering between
@@ -2467,9 +2637,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
f = files_lookup_fd_locked(files, fd);
spin_unlock(&files->file_lock);
if (f != filp) {
- file_lock->fl_type = F_UNLCK;
- error = do_lock_file_wait(filp, cmd, file_lock);
- WARN_ON_ONCE(error);
+ locks_remove_posix(filp, files);
error = -EBADF;
}
}
@@ -2487,7 +2655,7 @@ out:
void locks_remove_posix(struct file *filp, fl_owner_t owner)
{
int error;
- struct inode *inode = locks_inode(filp);
+ struct inode *inode = file_inode(filp);
struct file_lock lock;
struct file_lock_context *ctx;
@@ -2496,18 +2664,18 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
* posix_lock_file(). Another process could be setting a lock on this
* file at the same time, but we wouldn't remove that lock anyway.
*/
- ctx = smp_load_acquire(&inode->i_flctx);
+ ctx = locks_inode_context(inode);
if (!ctx || list_empty(&ctx->flc_posix))
return;
locks_init_lock(&lock);
- lock.fl_type = F_UNLCK;
- lock.fl_flags = FL_POSIX | FL_CLOSE;
+ lock.c.flc_type = F_UNLCK;
+ lock.c.flc_flags = FL_POSIX | FL_CLOSE;
lock.fl_start = 0;
lock.fl_end = OFFSET_MAX;
- lock.fl_owner = owner;
- lock.fl_pid = current->tgid;
- lock.fl_file = filp;
+ lock.c.flc_owner = owner;
+ lock.c.flc_pid = current->tgid;
+ lock.c.flc_file = filp;
lock.fl_ops = NULL;
lock.fl_lmops = NULL;
@@ -2524,13 +2692,13 @@ static void
locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
{
struct file_lock fl;
- struct inode *inode = locks_inode(filp);
+ struct inode *inode = file_inode(filp);
if (list_empty(&flctx->flc_flock))
return;
- flock_make_lock(filp, LOCK_UN, &fl);
- fl.fl_flags |= FL_CLOSE;
+ flock_make_lock(filp, &fl, F_UNLCK);
+ fl.c.flc_flags |= FL_CLOSE;
if (filp->f_op->flock)
filp->f_op->flock(filp, F_SETLKW, &fl);
@@ -2545,7 +2713,7 @@ locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
static void
locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
{
- struct file_lock *fl, *tmp;
+ struct file_lease *fl, *tmp;
LIST_HEAD(dispose);
if (list_empty(&ctx->flc_lease))
@@ -2553,8 +2721,8 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
- list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
- if (filp == fl->fl_file)
+ list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list)
+ if (filp == fl->c.flc_file)
lease_modify(fl, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
@@ -2569,7 +2737,7 @@ void locks_remove_file(struct file *filp)
{
struct file_lock_context *ctx;
- ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
+ ctx = locks_inode_context(file_inode(filp));
if (!ctx)
return;
@@ -2598,12 +2766,36 @@ void locks_remove_file(struct file *filp)
*/
int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
{
+ WARN_ON_ONCE(filp != fl->c.flc_file);
if (filp->f_op->lock)
return filp->f_op->lock(filp, F_CANCELLK, fl);
return 0;
}
EXPORT_SYMBOL_GPL(vfs_cancel_lock);
+/**
+ * vfs_inode_has_locks - are any file locks held on @inode?
+ * @inode: inode to check for locks
+ *
+ * Return true if there are any FL_POSIX or FL_FLOCK locks currently
+ * set on @inode.
+ */
+bool vfs_inode_has_locks(struct inode *inode)
+{
+ struct file_lock_context *ctx;
+ bool ret;
+
+ ctx = locks_inode_context(inode);
+ if (!ctx)
+ return false;
+
+ spin_lock(&ctx->flc_lock);
+ ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock);
+ spin_unlock(&ctx->flc_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vfs_inode_has_locks);
+
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -2613,69 +2805,73 @@ struct locks_iterator {
loff_t li_pos;
};
-static void lock_get_status(struct seq_file *f, struct file_lock *fl,
+static void lock_get_status(struct seq_file *f, struct file_lock_core *flc,
loff_t id, char *pfx, int repeat)
{
struct inode *inode = NULL;
- unsigned int fl_pid;
+ unsigned int pid;
struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
- int type;
+ int type = flc->flc_type;
+ struct file_lock *fl = file_lock(flc);
+
+ pid = locks_translate_pid(flc, proc_pidns);
- fl_pid = locks_translate_pid(fl, proc_pidns);
/*
* If lock owner is dead (and pid is freed) or not visible in current
* pidns, zero is shown as a pid value. Check lock info from
* init_pid_ns to get saved lock pid value.
*/
-
- if (fl->fl_file != NULL)
- inode = locks_inode(fl->fl_file);
+ if (flc->flc_file != NULL)
+ inode = file_inode(flc->flc_file);
seq_printf(f, "%lld: ", id);
if (repeat)
seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
- if (IS_POSIX(fl)) {
- if (fl->fl_flags & FL_ACCESS)
+ if (flc->flc_flags & FL_POSIX) {
+ if (flc->flc_flags & FL_ACCESS)
seq_puts(f, "ACCESS");
- else if (IS_OFDLCK(fl))
+ else if (flc->flc_flags & FL_OFDLCK)
seq_puts(f, "OFDLCK");
else
seq_puts(f, "POSIX ");
seq_printf(f, " %s ",
(inode == NULL) ? "*NOINODE*" : "ADVISORY ");
- } else if (IS_FLOCK(fl)) {
+ } else if (flc->flc_flags & FL_FLOCK) {
seq_puts(f, "FLOCK ADVISORY ");
- } else if (IS_LEASE(fl)) {
- if (fl->fl_flags & FL_DELEG)
+ } else if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) {
+ struct file_lease *lease = file_lease(flc);
+
+ type = target_leasetype(lease);
+
+ if (flc->flc_flags & FL_DELEG)
seq_puts(f, "DELEG ");
else
seq_puts(f, "LEASE ");
- if (lease_breaking(fl))
+ if (lease_breaking(lease))
seq_puts(f, "BREAKING ");
- else if (fl->fl_file)
+ else if (flc->flc_file)
seq_puts(f, "ACTIVE ");
else
seq_puts(f, "BREAKER ");
} else {
seq_puts(f, "UNKNOWN UNKNOWN ");
}
- type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
(type == F_RDLCK) ? "READ" : "UNLCK");
if (inode) {
/* userspace relies on this representation of dev_t */
- seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
+ seq_printf(f, "%d %02x:%02x:%lu ", pid,
MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev), inode->i_ino);
} else {
- seq_printf(f, "%d <none>:0 ", fl_pid);
+ seq_printf(f, "%d <none>:0 ", pid);
}
- if (IS_POSIX(fl)) {
+ if (flc->flc_flags & FL_POSIX) {
if (fl->fl_end == OFFSET_MAX)
seq_printf(f, "%Ld EOF\n", fl->fl_start);
else
@@ -2685,17 +2881,18 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
}
}
-static struct file_lock *get_next_blocked_member(struct file_lock *node)
+static struct file_lock_core *get_next_blocked_member(struct file_lock_core *node)
{
- struct file_lock *tmp;
+ struct file_lock_core *tmp;
/* NULL node or root node */
- if (node == NULL || node->fl_blocker == NULL)
+ if (node == NULL || node->flc_blocker == NULL)
return NULL;
/* Next member in the linked list could be itself */
- tmp = list_next_entry(node, fl_blocked_member);
- if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member)
+ tmp = list_next_entry(node, flc_blocked_member);
+ if (list_entry_is_head(tmp, &node->flc_blocker->flc_blocked_requests,
+ flc_blocked_member)
|| tmp == node) {
return NULL;
}
@@ -2706,18 +2903,18 @@ static struct file_lock *get_next_blocked_member(struct file_lock *node)
static int locks_show(struct seq_file *f, void *v)
{
struct locks_iterator *iter = f->private;
- struct file_lock *cur, *tmp;
+ struct file_lock_core *cur, *tmp;
struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
int level = 0;
- cur = hlist_entry(v, struct file_lock, fl_link);
+ cur = hlist_entry(v, struct file_lock_core, flc_link);
if (locks_translate_pid(cur, proc_pidns) == 0)
return 0;
- /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests
- * is the left child of current node, the next silibing in fl_blocked_member is the
- * right child, we can alse get the parent of current node from fl_blocker, so this
+ /* View this crossed linked list as a binary tree, the first member of flc_blocked_requests
+ * is the left child of current node, the next silibing in flc_blocked_member is the
+ * right child, we can alse get the parent of current node from flc_blocker, so this
* question becomes traversal of a binary tree
*/
while (cur != NULL) {
@@ -2726,17 +2923,18 @@ static int locks_show(struct seq_file *f, void *v)
else
lock_get_status(f, cur, iter->li_pos, "", level);
- if (!list_empty(&cur->fl_blocked_requests)) {
+ if (!list_empty(&cur->flc_blocked_requests)) {
/* Turn left */
- cur = list_first_entry_or_null(&cur->fl_blocked_requests,
- struct file_lock, fl_blocked_member);
+ cur = list_first_entry_or_null(&cur->flc_blocked_requests,
+ struct file_lock_core,
+ flc_blocked_member);
level++;
} else {
/* Turn right */
tmp = get_next_blocked_member(cur);
/* Fall back to parent node */
- while (tmp == NULL && cur->fl_blocker != NULL) {
- cur = cur->fl_blocker;
+ while (tmp == NULL && cur->flc_blocker != NULL) {
+ cur = cur->flc_blocker;
level--;
tmp = get_next_blocked_member(cur);
}
@@ -2751,14 +2949,13 @@ static void __show_fd_locks(struct seq_file *f,
struct list_head *head, int *id,
struct file *filp, struct files_struct *files)
{
- struct file_lock *fl;
+ struct file_lock_core *fl;
- list_for_each_entry(fl, head, fl_list) {
+ list_for_each_entry(fl, head, flc_list) {
- if (filp != fl->fl_file)
+ if (filp != fl->flc_file)
continue;
- if (fl->fl_owner != files &&
- fl->fl_owner != filp)
+ if (fl->flc_owner != files && fl->flc_owner != filp)
continue;
(*id)++;
@@ -2770,11 +2967,11 @@ static void __show_fd_locks(struct seq_file *f,
void show_fd_locks(struct seq_file *f,
struct file *filp, struct files_struct *files)
{
- struct inode *inode = locks_inode(filp);
+ struct inode *inode = file_inode(filp);
struct file_lock_context *ctx;
int id = 0;
- ctx = smp_load_acquire(&inode->i_flctx);
+ ctx = locks_inode_context(inode);
if (!ctx)
return;
@@ -2837,6 +3034,9 @@ static int __init filelock_init(void)
filelock_cache = kmem_cache_create("file_lock_cache",
sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
+ filelease_cache = kmem_cache_create("file_lease_cache",
+ sizeof(struct file_lease), 0, SLAB_PANIC, NULL);
+
for_each_possible_cpu(i) {
struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 97c54d3a2227..e60a840999aa 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -11,7 +11,7 @@
/*
* Mbcache is a simple key-value store. Keys need not be unique, however
* key-value pairs are expected to be unique (we use this fact in
- * mb_cache_entry_delete()).
+ * mb_cache_entry_delete_or_get()).
*
* Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
* Ext4 also uses it for deduplication of xattr values stored in inodes.
@@ -37,7 +37,7 @@ struct mb_cache {
struct list_head c_list;
/* Number of entries in cache */
unsigned long c_entry_count;
- struct shrinker c_shrink;
+ struct shrinker *c_shrink;
/* Work for shrinking when the cache has too many entries */
struct work_struct c_shrink_work;
};
@@ -90,12 +90,19 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
return -ENOMEM;
INIT_LIST_HEAD(&entry->e_list);
- /* One ref for hash, one ref returned */
- atomic_set(&entry->e_refcnt, 1);
+ /*
+ * We create entry with two references. One reference is kept by the
+ * hash table, the other reference is used to protect us from
+ * mb_cache_entry_delete_or_get() until the entry is fully setup. This
+ * avoids nesting of cache->c_list_lock into hash table bit locks which
+ * is problematic for RT.
+ */
+ atomic_set(&entry->e_refcnt, 2);
entry->e_key = key;
entry->e_value = value;
- entry->e_reusable = reusable;
- entry->e_referenced = 0;
+ entry->e_flags = 0;
+ if (reusable)
+ set_bit(MBE_REUSABLE_B, &entry->e_flags);
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
@@ -107,24 +114,41 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
}
hlist_bl_add_head(&entry->e_hash_list, head);
hlist_bl_unlock(head);
-
spin_lock(&cache->c_list_lock);
list_add_tail(&entry->e_list, &cache->c_list);
- /* Grab ref for LRU list */
- atomic_inc(&entry->e_refcnt);
cache->c_entry_count++;
spin_unlock(&cache->c_list_lock);
+ mb_cache_entry_put(cache, entry);
return 0;
}
EXPORT_SYMBOL(mb_cache_entry_create);
-void __mb_cache_entry_free(struct mb_cache_entry *entry)
+void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry)
{
+ struct hlist_bl_head *head;
+
+ head = mb_cache_entry_head(cache, entry->e_key);
+ hlist_bl_lock(head);
+ hlist_bl_del(&entry->e_hash_list);
+ hlist_bl_unlock(head);
kmem_cache_free(mb_entry_cache, entry);
}
EXPORT_SYMBOL(__mb_cache_entry_free);
+/*
+ * mb_cache_entry_wait_unused - wait to be the last user of the entry
+ *
+ * @entry - entry to work on
+ *
+ * Wait to be the last user of the entry.
+ */
+void mb_cache_entry_wait_unused(struct mb_cache_entry *entry)
+{
+ wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 2);
+}
+EXPORT_SYMBOL(mb_cache_entry_wait_unused);
+
static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
struct mb_cache_entry *entry,
u32 key)
@@ -142,10 +166,10 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
while (node) {
entry = hlist_bl_entry(node, struct mb_cache_entry,
e_hash_list);
- if (entry->e_key == key && entry->e_reusable) {
- atomic_inc(&entry->e_refcnt);
+ if (entry->e_key == key &&
+ test_bit(MBE_REUSABLE_B, &entry->e_flags) &&
+ atomic_inc_not_zero(&entry->e_refcnt))
goto out;
- }
node = node->next;
}
entry = NULL;
@@ -205,10 +229,9 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
- if (entry->e_key == key && entry->e_value == value) {
- atomic_inc(&entry->e_refcnt);
+ if (entry->e_key == key && entry->e_value == value &&
+ atomic_inc_not_zero(&entry->e_refcnt))
goto out;
- }
}
entry = NULL;
out:
@@ -217,42 +240,42 @@ out:
}
EXPORT_SYMBOL(mb_cache_entry_get);
-/* mb_cache_entry_delete - remove a cache entry
+/* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
* @cache - cache we work with
* @key - key
* @value - value
*
- * Remove entry from cache @cache with key @key and value @value.
+ * Remove entry from cache @cache with key @key and value @value. The removal
+ * happens only if the entry is unused. The function returns NULL in case the
+ * entry was successfully removed or there's no entry in cache. Otherwise the
+ * function grabs reference of the entry that we failed to delete because it
+ * still has users and return it.
*/
-void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
+struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
+ u32 key, u64 value)
{
- struct hlist_bl_node *node;
- struct hlist_bl_head *head;
struct mb_cache_entry *entry;
- head = mb_cache_entry_head(cache, key);
- hlist_bl_lock(head);
- hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
- if (entry->e_key == key && entry->e_value == value) {
- /* We keep hash list reference to keep entry alive */
- hlist_bl_del_init(&entry->e_hash_list);
- hlist_bl_unlock(head);
- spin_lock(&cache->c_list_lock);
- if (!list_empty(&entry->e_list)) {
- list_del_init(&entry->e_list);
- if (!WARN_ONCE(cache->c_entry_count == 0,
- "mbcache: attempt to decrement c_entry_count past zero"))
- cache->c_entry_count--;
- atomic_dec(&entry->e_refcnt);
- }
- spin_unlock(&cache->c_list_lock);
- mb_cache_entry_put(cache, entry);
- return;
- }
- }
- hlist_bl_unlock(head);
+ entry = mb_cache_entry_get(cache, key, value);
+ if (!entry)
+ return NULL;
+
+ /*
+ * Drop the ref we got from mb_cache_entry_get() and the initial hash
+ * ref if we are the last user
+ */
+ if (atomic_cmpxchg(&entry->e_refcnt, 2, 0) != 2)
+ return entry;
+
+ spin_lock(&cache->c_list_lock);
+ if (!list_empty(&entry->e_list))
+ list_del_init(&entry->e_list);
+ cache->c_entry_count--;
+ spin_unlock(&cache->c_list_lock);
+ __mb_cache_entry_free(cache, entry);
+ return NULL;
}
-EXPORT_SYMBOL(mb_cache_entry_delete);
+EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
/* mb_cache_entry_touch - cache entry got used
* @cache - cache the entry belongs to
@@ -263,15 +286,14 @@ EXPORT_SYMBOL(mb_cache_entry_delete);
void mb_cache_entry_touch(struct mb_cache *cache,
struct mb_cache_entry *entry)
{
- entry->e_referenced = 1;
+ set_bit(MBE_REFERENCED_B, &entry->e_flags);
}
EXPORT_SYMBOL(mb_cache_entry_touch);
static unsigned long mb_cache_count(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct mb_cache *cache = container_of(shrink, struct mb_cache,
- c_shrink);
+ struct mb_cache *cache = shrink->private_data;
return cache->c_entry_count;
}
@@ -281,34 +303,24 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
unsigned long nr_to_scan)
{
struct mb_cache_entry *entry;
- struct hlist_bl_head *head;
unsigned long shrunk = 0;
spin_lock(&cache->c_list_lock);
while (nr_to_scan-- && !list_empty(&cache->c_list)) {
entry = list_first_entry(&cache->c_list,
struct mb_cache_entry, e_list);
- if (entry->e_referenced) {
- entry->e_referenced = 0;
+ /* Drop initial hash reference if there is no user */
+ if (test_bit(MBE_REFERENCED_B, &entry->e_flags) ||
+ atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) {
+ clear_bit(MBE_REFERENCED_B, &entry->e_flags);
list_move_tail(&entry->e_list, &cache->c_list);
continue;
}
list_del_init(&entry->e_list);
cache->c_entry_count--;
- /*
- * We keep LRU list reference so that entry doesn't go away
- * from under us.
- */
spin_unlock(&cache->c_list_lock);
- head = mb_cache_entry_head(cache, entry->e_key);
- hlist_bl_lock(head);
- if (!hlist_bl_unhashed(&entry->e_hash_list)) {
- hlist_bl_del_init(&entry->e_hash_list);
- atomic_dec(&entry->e_refcnt);
- }
- hlist_bl_unlock(head);
- if (mb_cache_entry_put(cache, entry))
- shrunk++;
+ __mb_cache_entry_free(cache, entry);
+ shrunk++;
cond_resched();
spin_lock(&cache->c_list_lock);
}
@@ -320,8 +332,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
static unsigned long mb_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct mb_cache *cache = container_of(shrink, struct mb_cache,
- c_shrink);
+ struct mb_cache *cache = shrink->private_data;
return mb_cache_shrink(cache, sc->nr_to_scan);
}
@@ -364,15 +375,19 @@ struct mb_cache *mb_cache_create(int bucket_bits)
for (i = 0; i < bucket_count; i++)
INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
- cache->c_shrink.count_objects = mb_cache_count;
- cache->c_shrink.scan_objects = mb_cache_scan;
- cache->c_shrink.seeks = DEFAULT_SEEKS;
- if (register_shrinker(&cache->c_shrink)) {
+ cache->c_shrink = shrinker_alloc(0, "mbcache-shrinker");
+ if (!cache->c_shrink) {
kfree(cache->c_hash);
kfree(cache);
goto err_out;
}
+ cache->c_shrink->count_objects = mb_cache_count;
+ cache->c_shrink->scan_objects = mb_cache_scan;
+ cache->c_shrink->private_data = cache;
+
+ shrinker_register(cache->c_shrink);
+
INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
return cache;
@@ -393,18 +408,13 @@ void mb_cache_destroy(struct mb_cache *cache)
{
struct mb_cache_entry *entry, *next;
- unregister_shrinker(&cache->c_shrink);
+ shrinker_free(cache->c_shrink);
/*
* We don't bother with any locking. Cache must not be used at this
* point.
*/
list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
- if (!hlist_bl_unhashed(&entry->e_hash_list)) {
- hlist_bl_del_init(&entry->e_hash_list);
- atomic_dec(&entry->e_refcnt);
- } else
- WARN_ON(1);
list_del(&entry->e_list);
WARN_ON(atomic_read(&entry->e_refcnt) != 1);
mb_cache_entry_put(cache, entry);
@@ -416,9 +426,7 @@ EXPORT_SYMBOL(mb_cache_destroy);
static int __init mbcache_init(void)
{
- mb_entry_cache = kmem_cache_create("mbcache",
- sizeof(struct mb_cache_entry), 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
+ mb_entry_cache = KMEM_CACHE(mb_cache_entry, SLAB_RECLAIM_ACCOUNT);
if (!mb_entry_cache)
return -ENOMEM;
return 0;
diff --git a/fs/minix/Kconfig b/fs/minix/Kconfig
index de2003974ff0..90ddfad2a75e 100644
--- a/fs/minix/Kconfig
+++ b/fs/minix/Kconfig
@@ -2,6 +2,7 @@
config MINIX_FS
tristate "Minix file system support"
depends on BLOCK
+ select BUFFER_HEAD
help
Minix is a simple operating system used in many classes about OS's.
The minix file system (method to organize files on a hard disk
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 9115948c624e..7da66ca184f4 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -210,7 +210,7 @@ void minix_free_inode(struct inode * inode)
mark_buffer_dirty(bh);
}
-struct inode *minix_new_inode(const struct inode *dir, umode_t mode, int *error)
+struct inode *minix_new_inode(const struct inode *dir, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct minix_sb_info *sbi = minix_sb(sb);
@@ -220,13 +220,10 @@ struct inode *minix_new_inode(const struct inode *dir, umode_t mode, int *error)
unsigned long j;
int i;
- if (!inode) {
- *error = -ENOMEM;
- return NULL;
- }
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
j = bits_per_zone;
bh = NULL;
- *error = -ENOSPC;
spin_lock(&bitmap_lock);
for (i = 0; i < sbi->s_imap_blocks; i++) {
bh = sbi->s_imap[i];
@@ -237,30 +234,29 @@ struct inode *minix_new_inode(const struct inode *dir, umode_t mode, int *error)
if (!bh || j >= bits_per_zone) {
spin_unlock(&bitmap_lock);
iput(inode);
- return NULL;
+ return ERR_PTR(-ENOSPC);
}
if (minix_test_and_set_bit(j, bh->b_data)) { /* shouldn't happen */
spin_unlock(&bitmap_lock);
printk("minix_new_inode: bit already set\n");
iput(inode);
- return NULL;
+ return ERR_PTR(-ENOSPC);
}
spin_unlock(&bitmap_lock);
mark_buffer_dirty(bh);
j += i * bits_per_zone;
if (!j || j > sbi->s_ninodes) {
iput(inode);
- return NULL;
+ return ERR_PTR(-ENOSPC);
}
- inode_init_owner(&init_user_ns, inode, dir, mode);
+ inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_ino = j;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ simple_inode_init_ts(inode);
inode->i_blocks = 0;
memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u));
insert_inode_hash(inode);
mark_inode_dirty(inode);
- *error = 0;
return inode;
}
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index dcfe5b25378b..19052fc47e9e 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -26,12 +26,6 @@ const struct file_operations minix_dir_operations = {
.fsync = generic_file_fsync,
};
-static inline void dir_put_page(struct page *page)
-{
- kunmap(page);
- put_page(page);
-}
-
/*
* Return the offset into page `page_nr' of the last valid
* byte in that page, plus one.
@@ -46,31 +40,39 @@ minix_last_byte(struct inode *inode, unsigned long page_nr)
return last_byte;
}
-static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
+static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct inode *dir = mapping->host;
- int err = 0;
- block_write_end(NULL, mapping, pos, len, len, page, NULL);
+
+ block_write_end(pos, len, len, folio);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
mark_inode_dirty(dir);
}
- if (IS_DIRSYNC(dir))
- err = write_one_page(page);
- else
- unlock_page(page);
+ folio_unlock(folio);
+}
+
+static int minix_handle_dirsync(struct inode *dir)
+{
+ int err;
+
+ err = filemap_write_and_wait(dir->i_mapping);
+ if (!err)
+ err = sync_inode_metadata(dir, 1);
return err;
}
-static struct page * dir_get_page(struct inode *dir, unsigned long n)
+static void *dir_get_folio(struct inode *dir, unsigned long n,
+ struct folio **foliop)
{
- struct address_space *mapping = dir->i_mapping;
- struct page *page = read_mapping_page(mapping, n, NULL);
- if (!IS_ERR(page))
- kmap(page);
- return page;
+ struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
+
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
+ *foliop = folio;
+ return kmap_local_folio(folio, 0);
}
static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
@@ -98,11 +100,11 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
for ( ; n < npages; n++, offset = 0) {
char *p, *kaddr, *limit;
- struct page *page = dir_get_page(inode, n);
+ struct folio *folio;
- if (IS_ERR(page))
+ kaddr = dir_get_folio(inode, n, &folio);
+ if (IS_ERR(kaddr))
continue;
- kaddr = (char *)page_address(page);
p = kaddr+offset;
limit = kaddr + minix_last_byte(inode, n) - chunk_size;
for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
@@ -121,13 +123,13 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
unsigned l = strnlen(name, sbi->s_namelen);
if (!dir_emit(ctx, name, l,
inumber, DT_UNKNOWN)) {
- dir_put_page(page);
+ folio_release_kmap(folio, p);
return 0;
}
}
ctx->pos += chunk_size;
}
- dir_put_page(page);
+ folio_release_kmap(folio, kaddr);
}
return 0;
}
@@ -143,12 +145,13 @@ static inline int namecompare(int len, int maxlen,
/*
* minix_find_entry()
*
- * finds an entry in the specified directory with the wanted name. It
- * returns the cache buffer in which the entry was found, and the entry
- * itself (as a parameter - res_dir). It does NOT read the inode of the
+ * finds an entry in the specified directory with the wanted name.
+ * It does NOT read the inode of the
* entry - you'll have to do that yourself if you want to.
+ *
+ * On Success folio_release_kmap() should be called on *foliop.
*/
-minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
+minix_dirent *minix_find_entry(struct dentry *dentry, struct folio **foliop)
{
const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len;
@@ -157,21 +160,18 @@ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
struct minix_sb_info * sbi = minix_sb(sb);
unsigned long n;
unsigned long npages = dir_pages(dir);
- struct page *page = NULL;
char *p;
char *namx;
__u32 inumber;
- *res_page = NULL;
for (n = 0; n < npages; n++) {
char *kaddr, *limit;
- page = dir_get_page(dir, n);
- if (IS_ERR(page))
+ kaddr = dir_get_folio(dir, n, foliop);
+ if (IS_ERR(kaddr))
continue;
- kaddr = (char*)page_address(page);
limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
if (sbi->s_version == MINIX_V3) {
@@ -188,12 +188,11 @@ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
if (namecompare(namelen, sbi->s_namelen, name, namx))
goto found;
}
- dir_put_page(page);
+ folio_release_kmap(*foliop, kaddr);
}
return NULL;
found:
- *res_page = page;
return (minix_dirent *)p;
}
@@ -204,7 +203,7 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
int namelen = dentry->d_name.len;
struct super_block * sb = dir->i_sb;
struct minix_sb_info * sbi = minix_sb(sb);
- struct page *page = NULL;
+ struct folio *folio = NULL;
unsigned long npages = dir_pages(dir);
unsigned long n;
char *kaddr, *p;
@@ -223,12 +222,10 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
for (n = 0; n <= npages; n++) {
char *limit, *dir_end;
- page = dir_get_page(dir, n);
- err = PTR_ERR(page);
- if (IS_ERR(page))
- goto out;
- lock_page(page);
- kaddr = (char*)page_address(page);
+ kaddr = dir_get_folio(dir, n, &folio);
+ if (IS_ERR(kaddr))
+ return PTR_ERR(kaddr);
+ folio_lock(folio);
dir_end = kaddr + minix_last_byte(dir, n);
limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
@@ -255,15 +252,15 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
if (namecompare(namelen, sbi->s_namelen, name, namx))
goto out_unlock;
}
- unlock_page(page);
- dir_put_page(page);
+ folio_unlock(folio);
+ folio_release_kmap(folio, kaddr);
}
BUG();
return -EINVAL;
got_it:
- pos = page_offset(page) + p - (char *)page_address(page);
- err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
+ pos = folio_pos(folio) + offset_in_folio(folio, p);
+ err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
if (err)
goto out_unlock;
memcpy (namx, name, namelen);
@@ -274,61 +271,59 @@ got_it:
memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
de->inode = inode->i_ino;
}
- err = dir_commit_chunk(page, pos, sbi->s_dirsize);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir_commit_chunk(folio, pos, sbi->s_dirsize);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
+ err = minix_handle_dirsync(dir);
out_put:
- dir_put_page(page);
-out:
+ folio_release_kmap(folio, kaddr);
return err;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
goto out_put;
}
-int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
+int minix_delete_entry(struct minix_dir_entry *de, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
- char *kaddr = page_address(page);
- loff_t pos = page_offset(page) + (char*)de - kaddr;
+ struct inode *inode = folio->mapping->host;
+ loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
unsigned len = sbi->s_dirsize;
int err;
- lock_page(page);
- err = minix_prepare_chunk(page, pos, len);
- if (err == 0) {
- if (sbi->s_version == MINIX_V3)
- ((minix3_dirent *) de)->inode = 0;
- else
- de->inode = 0;
- err = dir_commit_chunk(page, pos, len);
- } else {
- unlock_page(page);
+ folio_lock(folio);
+ err = minix_prepare_chunk(folio, pos, len);
+ if (err) {
+ folio_unlock(folio);
+ return err;
}
- dir_put_page(page);
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ if (sbi->s_version == MINIX_V3)
+ ((minix3_dirent *)de)->inode = 0;
+ else
+ de->inode = 0;
+ dir_commit_chunk(folio, pos, len);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
- return err;
+ return minix_handle_dirsync(inode);
}
int minix_make_empty(struct inode *inode, struct inode *dir)
{
- struct page *page = grab_cache_page(inode->i_mapping, 0);
+ struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
char *kaddr;
int err;
- if (!page)
- return -ENOMEM;
- err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ err = minix_prepare_chunk(folio, 0, 2 * sbi->s_dirsize);
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
goto fail;
}
- kaddr = kmap_atomic(page);
- memset(kaddr, 0, PAGE_SIZE);
+ kaddr = kmap_local_folio(folio, 0);
+ memset(kaddr, 0, folio_size(folio));
if (sbi->s_version == MINIX_V3) {
minix3_dirent *de3 = (minix3_dirent *)kaddr;
@@ -347,11 +342,12 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
de->inode = dir->i_ino;
strcpy(de->name, "..");
}
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
- err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
+ dir_commit_chunk(folio, 0, 2 * sbi->s_dirsize);
+ err = minix_handle_dirsync(inode);
fail:
- put_page(page);
+ folio_put(folio);
return err;
}
@@ -360,20 +356,19 @@ fail:
*/
int minix_empty_dir(struct inode * inode)
{
- struct page *page = NULL;
+ struct folio *folio = NULL;
unsigned long i, npages = dir_pages(inode);
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
- char *name;
+ char *name, *kaddr;
__u32 inumber;
for (i = 0; i < npages; i++) {
- char *p, *kaddr, *limit;
+ char *p, *limit;
- page = dir_get_page(inode, i);
- if (IS_ERR(page))
+ kaddr = dir_get_folio(inode, i, &folio);
+ if (IS_ERR(kaddr))
continue;
- kaddr = (char *)page_address(page);
limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
if (sbi->s_version == MINIX_V3) {
@@ -399,71 +394,65 @@ int minix_empty_dir(struct inode * inode)
goto not_empty;
}
}
- dir_put_page(page);
+ folio_release_kmap(folio, kaddr);
}
return 1;
not_empty:
- dir_put_page(page);
+ folio_release_kmap(folio, kaddr);
return 0;
}
/* Releases the page */
-void minix_set_link(struct minix_dir_entry *de, struct page *page,
- struct inode *inode)
+int minix_set_link(struct minix_dir_entry *de, struct folio *folio,
+ struct inode *inode)
{
- struct inode *dir = page->mapping->host;
+ struct inode *dir = folio->mapping->host;
struct minix_sb_info *sbi = minix_sb(dir->i_sb);
- loff_t pos = page_offset(page) +
- (char *)de-(char*)page_address(page);
+ loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
int err;
- lock_page(page);
-
- err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
- if (err == 0) {
- if (sbi->s_version == MINIX_V3)
- ((minix3_dirent *) de)->inode = inode->i_ino;
- else
- de->inode = inode->i_ino;
- err = dir_commit_chunk(page, pos, sbi->s_dirsize);
- } else {
- unlock_page(page);
+ folio_lock(folio);
+ err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
+ if (err) {
+ folio_unlock(folio);
+ return err;
}
- dir_put_page(page);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ if (sbi->s_version == MINIX_V3)
+ ((minix3_dirent *)de)->inode = inode->i_ino;
+ else
+ de->inode = inode->i_ino;
+ dir_commit_chunk(folio, pos, sbi->s_dirsize);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
+ return minix_handle_dirsync(dir);
}
-struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
+struct minix_dir_entry *minix_dotdot(struct inode *dir, struct folio **foliop)
{
- struct page *page = dir_get_page(dir, 0);
struct minix_sb_info *sbi = minix_sb(dir->i_sb);
- struct minix_dir_entry *de = NULL;
+ struct minix_dir_entry *de = dir_get_folio(dir, 0, foliop);
- if (!IS_ERR(page)) {
- de = minix_next_entry(page_address(page), sbi);
- *p = page;
- }
- return de;
+ if (!IS_ERR(de))
+ return minix_next_entry(de, sbi);
+ return NULL;
}
ino_t minix_inode_by_name(struct dentry *dentry)
{
- struct page *page;
- struct minix_dir_entry *de = minix_find_entry(dentry, &page);
+ struct folio *folio;
+ struct minix_dir_entry *de = minix_find_entry(dentry, &folio);
ino_t res = 0;
if (de) {
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
+ struct inode *inode = folio->mapping->host;
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
if (sbi->s_version == MINIX_V3)
res = ((minix3_dirent *) de)->inode;
else
res = de->inode;
- dir_put_page(page);
+ folio_release_kmap(folio, de);
}
return res;
}
diff --git a/fs/minix/file.c b/fs/minix/file.c
index 6a7bd2d9eec0..dca7ac71f049 100644
--- a/fs/minix/file.c
+++ b/fs/minix/file.c
@@ -17,18 +17,18 @@ const struct file_operations minix_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
+ .mmap_prepare = generic_file_mmap_prepare,
.fsync = generic_file_fsync,
- .splice_read = generic_file_splice_read,
+ .splice_read = filemap_splice_read,
};
-static int minix_setattr(struct user_namespace *mnt_userns,
+static int minix_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error;
- error = setattr_prepare(&init_user_ns, dentry, attr);
+ error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (error)
return error;
@@ -42,7 +42,7 @@ static int minix_setattr(struct user_namespace *mnt_userns,
minix_truncate(inode);
}
- setattr_copy(&init_user_ns, inode, attr);
+ setattr_copy(&nop_mnt_idmap, inode, attr);
mark_inode_dirty(inode);
return 0;
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index a71f1cf894b9..51ea9bdc813f 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -17,13 +17,30 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/highuid.h>
+#include <linux/mpage.h>
#include <linux/vfs.h>
#include <linux/writeback.h>
+#include <linux/fs_context.h>
static int minix_write_inode(struct inode *inode,
struct writeback_control *wbc);
static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
-static int minix_remount (struct super_block * sb, int * flags, char * data);
+
+void __minix_error_inode(struct inode *inode, const char *function,
+ unsigned int line, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk(KERN_CRIT "minix-fs error (device %s): %s:%d: "
+ "inode #%lu: comm %s: %pV\n",
+ inode->i_sb->s_id, function, line, inode->i_ino,
+ current->comm, &vaf);
+ va_end(args);
+}
static void minix_evict_inode(struct inode *inode)
{
@@ -63,7 +80,7 @@ static struct kmem_cache * minix_inode_cachep;
static struct inode *minix_alloc_inode(struct super_block *sb)
{
struct minix_inode_info *ei;
- ei = kmem_cache_alloc(minix_inode_cachep, GFP_KERNEL);
+ ei = alloc_inode_sb(sb, minix_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
return &ei->vfs_inode;
@@ -86,7 +103,7 @@ static int __init init_inodecache(void)
minix_inode_cachep = kmem_cache_create("minix_inode_cache",
sizeof(struct minix_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
if (minix_inode_cachep == NULL)
return -ENOMEM;
@@ -110,19 +127,19 @@ static const struct super_operations minix_sops = {
.evict_inode = minix_evict_inode,
.put_super = minix_put_super,
.statfs = minix_statfs,
- .remount_fs = minix_remount,
};
-static int minix_remount (struct super_block * sb, int * flags, char * data)
+static int minix_reconfigure(struct fs_context *fc)
{
- struct minix_sb_info * sbi = minix_sb(sb);
struct minix_super_block * ms;
+ struct super_block *sb = fc->root->d_sb;
+ struct minix_sb_info * sbi = sb->s_fs_info;
sync_filesystem(sb);
ms = sbi->s_ms;
- if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
+ if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (*flags & SB_RDONLY) {
+ if (fc->sb_flags & SB_RDONLY) {
if (ms->s_state & MINIX_VALID_FS ||
!(sbi->s_mount_state & MINIX_VALID_FS))
return 0;
@@ -169,7 +186,7 @@ static bool minix_check_superblock(struct super_block *sb)
return true;
}
-static int minix_fill_super(struct super_block *s, void *data, int silent)
+static int minix_fill_super(struct super_block *s, struct fs_context *fc)
{
struct buffer_head *bh;
struct buffer_head **map;
@@ -179,6 +196,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
struct inode *root_inode;
struct minix_sb_info *sbi;
int ret = -EINVAL;
+ int silent = fc->sb_flags & SB_SILENT;
sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
if (!sbi)
@@ -370,6 +388,23 @@ out:
return ret;
}
+static int minix_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, minix_fill_super);
+}
+
+static const struct fs_context_operations minix_context_ops = {
+ .get_tree = minix_get_tree,
+ .reconfigure = minix_reconfigure,
+};
+
+static int minix_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &minix_context_ops;
+
+ return 0;
+}
+
static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
@@ -397,19 +432,20 @@ static int minix_get_block(struct inode *inode, sector_t block,
return V2_minix_get_block(inode, block, bh_result, create);
}
-static int minix_writepage(struct page *page, struct writeback_control *wbc)
+static int minix_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- return block_write_full_page(page, minix_get_block, wbc);
+ return mpage_writepages(mapping, wbc, minix_get_block);
}
-static int minix_readpage(struct file *file, struct page *page)
+static int minix_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page,minix_get_block);
+ return block_read_full_folio(folio, minix_get_block);
}
-int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len)
+int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
{
- return __block_write_begin(page, pos, len, minix_get_block);
+ return __block_write_begin(folio, pos, len, minix_get_block);
}
static void minix_write_failed(struct address_space *mapping, loff_t to)
@@ -422,14 +458,14 @@ static void minix_write_failed(struct address_space *mapping, loff_t to)
}
}
-static int minix_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int minix_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, flags, pagep,
- minix_get_block);
+ ret = block_write_begin(mapping, pos, len, foliop, minix_get_block);
if (unlikely(ret))
minix_write_failed(mapping, pos + len);
@@ -442,12 +478,15 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
}
static const struct address_space_operations minix_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = minix_readpage,
- .writepage = minix_writepage,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
+ .read_folio = minix_read_folio,
+ .writepages = minix_writepages,
.write_begin = minix_write_begin,
.write_end = generic_write_end,
- .bmap = minix_bmap
+ .migrate_folio = buffer_migrate_folio,
+ .bmap = minix_bmap,
+ .direct_IO = noop_direct_IO
};
static const struct inode_operations minix_symlink_inode_operations = {
@@ -469,8 +508,14 @@ void minix_set_inode(struct inode *inode, dev_t rdev)
inode->i_op = &minix_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_mapping->a_ops = &minix_aops;
- } else
+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
init_special_inode(inode, inode->i_mode, rdev);
+ } else {
+ printk(KERN_DEBUG "MINIX-fs: Invalid file type 0%04o for inode %lu.\n",
+ inode->i_mode, inode->i_ino);
+ make_bad_inode(inode);
+ }
}
/*
@@ -500,10 +545,8 @@ static struct inode *V1_minix_iget(struct inode *inode)
i_gid_write(inode, raw_inode->i_gid);
set_nlink(inode, raw_inode->i_nlinks);
inode->i_size = raw_inode->i_size;
- inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = raw_inode->i_time;
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime(inode, raw_inode->i_time, 0)));
inode->i_blocks = 0;
for (i = 0; i < 9; i++)
minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
@@ -540,12 +583,9 @@ static struct inode *V2_minix_iget(struct inode *inode)
i_gid_write(inode, raw_inode->i_gid);
set_nlink(inode, raw_inode->i_nlinks);
inode->i_size = raw_inode->i_size;
- inode->i_mtime.tv_sec = raw_inode->i_mtime;
- inode->i_atime.tv_sec = raw_inode->i_atime;
- inode->i_ctime.tv_sec = raw_inode->i_ctime;
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
+ inode_set_mtime(inode, raw_inode->i_mtime, 0);
+ inode_set_atime(inode, raw_inode->i_atime, 0);
+ inode_set_ctime(inode, raw_inode->i_ctime, 0);
inode->i_blocks = 0;
for (i = 0; i < 10; i++)
minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
@@ -565,7 +605,7 @@ struct inode *minix_iget(struct super_block *sb, unsigned long ino)
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode_state_read_once(inode) & I_NEW))
return inode;
if (INODE_VERSION(inode) == MINIX_V1)
@@ -592,7 +632,7 @@ static struct buffer_head * V1_minix_update_inode(struct inode * inode)
raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
raw_inode->i_nlinks = inode->i_nlink;
raw_inode->i_size = inode->i_size;
- raw_inode->i_time = inode->i_mtime.tv_sec;
+ raw_inode->i_time = inode_get_mtime_sec(inode);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
else for (i = 0; i < 9; i++)
@@ -619,9 +659,9 @@ static struct buffer_head * V2_minix_update_inode(struct inode * inode)
raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
raw_inode->i_nlinks = inode->i_nlink;
raw_inode->i_size = inode->i_size;
- raw_inode->i_mtime = inode->i_mtime.tv_sec;
- raw_inode->i_atime = inode->i_atime.tv_sec;
- raw_inode->i_ctime = inode->i_ctime.tv_sec;
+ raw_inode->i_mtime = inode_get_mtime_sec(inode);
+ raw_inode->i_atime = inode_get_atime_sec(inode);
+ raw_inode->i_ctime = inode_get_ctime_sec(inode);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
else for (i = 0; i < 10; i++)
@@ -653,13 +693,13 @@ static int minix_write_inode(struct inode *inode, struct writeback_control *wbc)
return err;
}
-int minix_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int minix_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct super_block *sb = path->dentry->d_sb;
struct inode *inode = d_inode(path->dentry);
- generic_fillattr(&init_user_ns, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (INODE_VERSION(inode) == MINIX_V1)
stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb);
else
@@ -681,18 +721,12 @@ void minix_truncate(struct inode * inode)
V2_minix_truncate(inode);
}
-static struct dentry *minix_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_bdev(fs_type, flags, dev_name, data, minix_fill_super);
-}
-
static struct file_system_type minix_fs_type = {
- .owner = THIS_MODULE,
- .name = "minix",
- .mount = minix_mount,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .owner = THIS_MODULE,
+ .name = "minix",
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = minix_init_fs_context,
};
MODULE_ALIAS_FS("minix");
@@ -719,5 +753,6 @@ static void __exit exit_minix_fs(void)
module_init(init_minix_fs)
module_exit(exit_minix_fs)
+MODULE_DESCRIPTION("Minix file system");
MODULE_LICENSE("GPL");
diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c
index 446148792f41..dad131e30c05 100644
--- a/fs/minix/itree_common.c
+++ b/fs/minix/itree_common.c
@@ -131,7 +131,7 @@ static inline int splice_branch(struct inode *inode,
/* We are done with atomic stuff, now do the rest of housekeeping */
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
/* had we spliced it onto indirect block? */
if (where->bh)
@@ -350,7 +350,7 @@ do_indirects:
}
first_whole++;
}
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
}
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index 202173368025..2bfaf377f208 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -42,18 +42,21 @@ struct minix_sb_info {
unsigned short s_version;
};
-extern struct inode *minix_iget(struct super_block *, unsigned long);
-extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
-extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
-extern struct inode * minix_new_inode(const struct inode *, umode_t, int *);
-extern void minix_free_inode(struct inode * inode);
-extern unsigned long minix_count_free_inodes(struct super_block *sb);
-extern int minix_new_block(struct inode * inode);
-extern void minix_free_block(struct inode *inode, unsigned long block);
-extern unsigned long minix_count_free_blocks(struct super_block *sb);
-extern int minix_getattr(struct user_namespace *, const struct path *,
- struct kstat *, u32, unsigned int);
-extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
+void __minix_error_inode(struct inode *inode, const char *function,
+ unsigned int line, const char *fmt, ...);
+
+struct inode *minix_iget(struct super_block *, unsigned long);
+struct minix_inode *minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
+struct minix2_inode *minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
+struct inode *minix_new_inode(const struct inode *, umode_t);
+void minix_free_inode(struct inode *inode);
+unsigned long minix_count_free_inodes(struct super_block *sb);
+int minix_new_block(struct inode *inode);
+void minix_free_block(struct inode *inode, unsigned long block);
+unsigned long minix_count_free_blocks(struct super_block *sb);
+int minix_getattr(struct mnt_idmap *, const struct path *,
+ struct kstat *, u32, unsigned int);
+int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);
extern void V1_minix_truncate(struct inode *);
extern void V2_minix_truncate(struct inode *);
@@ -64,14 +67,15 @@ extern int V2_minix_get_block(struct inode *, long, struct buffer_head *, int);
extern unsigned V1_minix_blocks(loff_t, struct super_block *);
extern unsigned V2_minix_blocks(loff_t, struct super_block *);
-extern struct minix_dir_entry *minix_find_entry(struct dentry*, struct page**);
-extern int minix_add_link(struct dentry*, struct inode*);
-extern int minix_delete_entry(struct minix_dir_entry*, struct page*);
-extern int minix_make_empty(struct inode*, struct inode*);
-extern int minix_empty_dir(struct inode*);
-extern void minix_set_link(struct minix_dir_entry*, struct page*, struct inode*);
-extern struct minix_dir_entry *minix_dotdot(struct inode*, struct page**);
-extern ino_t minix_inode_by_name(struct dentry*);
+struct minix_dir_entry *minix_find_entry(struct dentry *, struct folio **);
+int minix_add_link(struct dentry*, struct inode*);
+int minix_delete_entry(struct minix_dir_entry *, struct folio *);
+int minix_make_empty(struct inode*, struct inode*);
+int minix_empty_dir(struct inode*);
+int minix_set_link(struct minix_dir_entry *de, struct folio *folio,
+ struct inode *inode);
+struct minix_dir_entry *minix_dotdot(struct inode*, struct folio **);
+ino_t minix_inode_by_name(struct dentry*);
extern const struct inode_operations minix_file_inode_operations;
extern const struct inode_operations minix_dir_inode_operations;
@@ -167,4 +171,10 @@ static inline int minix_test_bit(int nr, const void *vaddr)
#endif
+#define minix_error_inode(inode, fmt, ...) \
+ __minix_error_inode((inode), __func__, __LINE__, \
+ (fmt), ##__VA_ARGS__)
+
+#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
+
#endif /* FS_MINIX_H */
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 937fa5fae2b8..263e4ba8b1c8 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -33,71 +33,64 @@ static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, un
return d_splice_alias(inode, dentry);
}
-static int minix_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+static int minix_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
- int error;
struct inode *inode;
if (!old_valid_dev(rdev))
return -EINVAL;
- inode = minix_new_inode(dir, mode, &error);
+ inode = minix_new_inode(dir, mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
- if (inode) {
- minix_set_inode(inode, rdev);
- mark_inode_dirty(inode);
- error = add_nondir(dentry, inode);
- }
- return error;
+ minix_set_inode(inode, rdev);
+ mark_inode_dirty(inode);
+ return add_nondir(dentry, inode);
}
-static int minix_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static int minix_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ struct file *file, umode_t mode)
{
- int error;
- struct inode *inode = minix_new_inode(dir, mode, &error);
- if (inode) {
- minix_set_inode(inode, 0);
- mark_inode_dirty(inode);
- d_tmpfile(dentry, inode);
- }
- return error;
+ struct inode *inode = minix_new_inode(dir, mode);
+
+ if (IS_ERR(inode))
+ return finish_open_simple(file, PTR_ERR(inode));
+ minix_set_inode(inode, 0);
+ mark_inode_dirty(inode);
+ d_tmpfile(file, inode);
+ return finish_open_simple(file, 0);
}
-static int minix_create(struct user_namespace *mnt_userns, struct inode *dir,
+static int minix_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
- return minix_mknod(mnt_userns, dir, dentry, mode, 0);
+ return minix_mknod(&nop_mnt_idmap, dir, dentry, mode, 0);
}
-static int minix_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+static int minix_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
- int err = -ENAMETOOLONG;
int i = strlen(symname)+1;
struct inode * inode;
+ int err;
if (i > dir->i_sb->s_blocksize)
- goto out;
+ return -ENAMETOOLONG;
- inode = minix_new_inode(dir, S_IFLNK | 0777, &err);
- if (!inode)
- goto out;
+ inode = minix_new_inode(dir, S_IFLNK | 0777);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
minix_set_inode(inode, 0);
err = page_symlink(inode, symname, i);
- if (err)
- goto out_fail;
-
- err = add_nondir(dentry, inode);
-out:
- return err;
-
-out_fail:
- inode_dec_link_count(inode);
- iput(inode);
- goto out;
+ if (unlikely(err)) {
+ inode_dec_link_count(inode);
+ iput(inode);
+ return err;
+ }
+ return add_nondir(dentry, inode);
}
static int minix_link(struct dentry * old_dentry, struct inode * dir,
@@ -105,26 +98,24 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir,
{
struct inode *inode = d_inode(old_dentry);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode_inc_link_count(inode);
ihold(inode);
return add_nondir(dentry, inode);
}
-static int minix_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *minix_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode * inode;
int err;
- inode_inc_link_count(dir);
-
- inode = minix_new_inode(dir, S_IFDIR | mode, &err);
- if (!inode)
- goto out_dir;
+ inode = minix_new_inode(dir, S_IFDIR | mode);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ inode_inc_link_count(dir);
minix_set_inode(inode, 0);
-
inode_inc_link_count(inode);
err = minix_make_empty(inode, dir);
@@ -137,94 +128,120 @@ static int minix_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
d_instantiate(dentry, inode);
out:
- return err;
+ return ERR_PTR(err);
out_fail:
inode_dec_link_count(inode);
inode_dec_link_count(inode);
iput(inode);
-out_dir:
inode_dec_link_count(dir);
goto out;
}
static int minix_unlink(struct inode * dir, struct dentry *dentry)
{
- int err = -ENOENT;
struct inode * inode = d_inode(dentry);
- struct page * page;
+ struct folio *folio;
struct minix_dir_entry * de;
+ int err;
- de = minix_find_entry(dentry, &page);
+ if (inode->i_nlink == 0) {
+ minix_error_inode(inode, "inode has corrupted nlink");
+ return -EFSCORRUPTED;
+ }
+
+ de = minix_find_entry(dentry, &folio);
if (!de)
- goto end_unlink;
+ return -ENOENT;
+ err = minix_delete_entry(de, folio);
+ folio_release_kmap(folio, de);
- err = minix_delete_entry(de, page);
if (err)
- goto end_unlink;
-
- inode->i_ctime = dir->i_ctime;
+ return err;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode);
-end_unlink:
- return err;
+ return 0;
}
static int minix_rmdir(struct inode * dir, struct dentry *dentry)
{
struct inode * inode = d_inode(dentry);
- int err = -ENOTEMPTY;
+ int err = -EFSCORRUPTED;
- if (minix_empty_dir(inode)) {
- err = minix_unlink(dir, dentry);
- if (!err) {
- inode_dec_link_count(dir);
- inode_dec_link_count(inode);
- }
+ if (dir->i_nlink <= 2) {
+ minix_error_inode(dir, "inode has corrupted nlink");
+ goto out;
}
+
+ err = -ENOTEMPTY;
+ if (!minix_empty_dir(inode))
+ goto out;
+
+ err = minix_unlink(dir, dentry);
+ if (!err) {
+ inode_dec_link_count(dir);
+ inode_dec_link_count(inode);
+ }
+
+out:
return err;
}
-static int minix_rename(struct user_namespace *mnt_userns,
+static int minix_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
struct inode * old_inode = d_inode(old_dentry);
struct inode * new_inode = d_inode(new_dentry);
- struct page * dir_page = NULL;
+ struct folio * dir_folio = NULL;
struct minix_dir_entry * dir_de = NULL;
- struct page * old_page;
+ struct folio *old_folio;
struct minix_dir_entry * old_de;
int err = -ENOENT;
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
- old_de = minix_find_entry(old_dentry, &old_page);
+ old_de = minix_find_entry(old_dentry, &old_folio);
if (!old_de)
goto out;
if (S_ISDIR(old_inode->i_mode)) {
err = -EIO;
- dir_de = minix_dotdot(old_inode, &dir_page);
+ dir_de = minix_dotdot(old_inode, &dir_folio);
if (!dir_de)
goto out_old;
}
if (new_inode) {
- struct page * new_page;
+ struct folio *new_folio;
struct minix_dir_entry * new_de;
err = -ENOTEMPTY;
if (dir_de && !minix_empty_dir(new_inode))
goto out_dir;
+ err = -EFSCORRUPTED;
+ if (new_inode->i_nlink == 0 || (dir_de && new_inode->i_nlink != 2)) {
+ minix_error_inode(new_inode, "inode has corrupted nlink");
+ goto out_dir;
+ }
+
+ if (dir_de && old_dir->i_nlink <= 2) {
+ minix_error_inode(old_dir, "inode has corrupted nlink");
+ goto out_dir;
+ }
+
err = -ENOENT;
- new_de = minix_find_entry(new_dentry, &new_page);
+ new_de = minix_find_entry(new_dentry, &new_folio);
if (!new_de)
goto out_dir;
- minix_set_link(new_de, new_page, old_inode);
- new_inode->i_ctime = current_time(new_inode);
+ err = minix_set_link(new_de, new_folio, old_inode);
+ folio_release_kmap(new_folio, new_de);
+ if (err)
+ goto out_dir;
+ inode_set_ctime_current(new_inode);
if (dir_de)
drop_nlink(new_inode);
inode_dec_link_count(new_inode);
@@ -236,23 +253,22 @@ static int minix_rename(struct user_namespace *mnt_userns,
inode_inc_link_count(new_dir);
}
- minix_delete_entry(old_de, old_page);
+ err = minix_delete_entry(old_de, old_folio);
+ if (err)
+ goto out_dir;
+
mark_inode_dirty(old_inode);
if (dir_de) {
- minix_set_link(dir_de, dir_page, new_dir);
- inode_dec_link_count(old_dir);
+ err = minix_set_link(dir_de, dir_folio, new_dir);
+ if (!err)
+ inode_dec_link_count(old_dir);
}
- return 0;
-
out_dir:
- if (dir_de) {
- kunmap(dir_page);
- put_page(dir_page);
- }
+ if (dir_de)
+ folio_release_kmap(dir_folio, dir_de);
out_old:
- kunmap(old_page);
- put_page(old_page);
+ folio_release_kmap(old_folio, old_de);
out:
return err;
}
diff --git a/fs/mnt_idmapping.c b/fs/mnt_idmapping.c
new file mode 100644
index 000000000000..a37991fdb194
--- /dev/null
+++ b/fs/mnt_idmapping.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Christian Brauner <brauner@kernel.org> */
+
+#include <linux/cred.h>
+#include <linux/fs.h>
+#include <linux/mnt_idmapping.h>
+#include <linux/slab.h>
+#include <linux/user_namespace.h>
+#include <linux/seq_file.h>
+
+#include "internal.h"
+
+/*
+ * Outside of this file vfs{g,u}id_t are always created from k{g,u}id_t,
+ * never from raw values. These are just internal helpers.
+ */
+#define VFSUIDT_INIT_RAW(val) (vfsuid_t){ val }
+#define VFSGIDT_INIT_RAW(val) (vfsgid_t){ val }
+
+struct mnt_idmap {
+ struct uid_gid_map uid_map;
+ struct uid_gid_map gid_map;
+ refcount_t count;
+};
+
+/*
+ * Carries the initial idmapping of 0:0:4294967295 which is an identity
+ * mapping. This means that {g,u}id 0 is mapped to {g,u}id 0, {g,u}id 1 is
+ * mapped to {g,u}id 1, [...], {g,u}id 1000 to {g,u}id 1000, [...].
+ */
+struct mnt_idmap nop_mnt_idmap = {
+ .count = REFCOUNT_INIT(1),
+};
+EXPORT_SYMBOL_GPL(nop_mnt_idmap);
+
+/*
+ * Carries the invalid idmapping of a full 0-4294967295 {g,u}id range.
+ * This means that all {g,u}ids are mapped to INVALID_VFS{G,U}ID.
+ */
+struct mnt_idmap invalid_mnt_idmap = {
+ .count = REFCOUNT_INIT(1),
+};
+EXPORT_SYMBOL_GPL(invalid_mnt_idmap);
+
+/**
+ * initial_idmapping - check whether this is the initial mapping
+ * @ns: idmapping to check
+ *
+ * Check whether this is the initial mapping, mapping 0 to 0, 1 to 1,
+ * [...], 1000 to 1000 [...].
+ *
+ * Return: true if this is the initial mapping, false if not.
+ */
+static inline bool initial_idmapping(const struct user_namespace *ns)
+{
+ return ns == &init_user_ns;
+}
+
+/**
+ * make_vfsuid - map a filesystem kuid according to an idmapping
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @kuid : kuid to be mapped
+ *
+ * Take a @kuid and remap it from @fs_userns into @idmap. Use this
+ * function when preparing a @kuid to be reported to userspace.
+ *
+ * If initial_idmapping() determines that this is not an idmapped mount
+ * we can simply return @kuid unchanged.
+ * If initial_idmapping() tells us that the filesystem is not mounted with an
+ * idmapping we know the value of @kuid won't change when calling
+ * from_kuid() so we can simply retrieve the value via __kuid_val()
+ * directly.
+ *
+ * Return: @kuid mapped according to @idmap.
+ * If @kuid has no mapping in either @idmap or @fs_userns INVALID_UID is
+ * returned.
+ */
+
+vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns,
+ kuid_t kuid)
+{
+ uid_t uid;
+
+ if (idmap == &nop_mnt_idmap)
+ return VFSUIDT_INIT(kuid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_VFSUID;
+ if (initial_idmapping(fs_userns))
+ uid = __kuid_val(kuid);
+ else
+ uid = from_kuid(fs_userns, kuid);
+ if (uid == (uid_t)-1)
+ return INVALID_VFSUID;
+ return VFSUIDT_INIT_RAW(map_id_down(&idmap->uid_map, uid));
+}
+EXPORT_SYMBOL_GPL(make_vfsuid);
+
+/**
+ * make_vfsgid - map a filesystem kgid according to an idmapping
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @kgid : kgid to be mapped
+ *
+ * Take a @kgid and remap it from @fs_userns into @idmap. Use this
+ * function when preparing a @kgid to be reported to userspace.
+ *
+ * If initial_idmapping() determines that this is not an idmapped mount
+ * we can simply return @kgid unchanged.
+ * If initial_idmapping() tells us that the filesystem is not mounted with an
+ * idmapping we know the value of @kgid won't change when calling
+ * from_kgid() so we can simply retrieve the value via __kgid_val()
+ * directly.
+ *
+ * Return: @kgid mapped according to @idmap.
+ * If @kgid has no mapping in either @idmap or @fs_userns INVALID_GID is
+ * returned.
+ */
+vfsgid_t make_vfsgid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, kgid_t kgid)
+{
+ gid_t gid;
+
+ if (idmap == &nop_mnt_idmap)
+ return VFSGIDT_INIT(kgid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_VFSGID;
+ if (initial_idmapping(fs_userns))
+ gid = __kgid_val(kgid);
+ else
+ gid = from_kgid(fs_userns, kgid);
+ if (gid == (gid_t)-1)
+ return INVALID_VFSGID;
+ return VFSGIDT_INIT_RAW(map_id_down(&idmap->gid_map, gid));
+}
+EXPORT_SYMBOL_GPL(make_vfsgid);
+
+/**
+ * from_vfsuid - map a vfsuid into the filesystem idmapping
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsuid : vfsuid to be mapped
+ *
+ * Map @vfsuid into the filesystem idmapping. This function has to be used in
+ * order to e.g. write @vfsuid to inode->i_uid.
+ *
+ * Return: @vfsuid mapped into the filesystem idmapping
+ */
+kuid_t from_vfsuid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, vfsuid_t vfsuid)
+{
+ uid_t uid;
+
+ if (idmap == &nop_mnt_idmap)
+ return AS_KUIDT(vfsuid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_UID;
+ uid = map_id_up(&idmap->uid_map, __vfsuid_val(vfsuid));
+ if (uid == (uid_t)-1)
+ return INVALID_UID;
+ if (initial_idmapping(fs_userns))
+ return KUIDT_INIT(uid);
+ return make_kuid(fs_userns, uid);
+}
+EXPORT_SYMBOL_GPL(from_vfsuid);
+
+/**
+ * from_vfsgid - map a vfsgid into the filesystem idmapping
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsgid : vfsgid to be mapped
+ *
+ * Map @vfsgid into the filesystem idmapping. This function has to be used in
+ * order to e.g. write @vfsgid to inode->i_gid.
+ *
+ * Return: @vfsgid mapped into the filesystem idmapping
+ */
+kgid_t from_vfsgid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, vfsgid_t vfsgid)
+{
+ gid_t gid;
+
+ if (idmap == &nop_mnt_idmap)
+ return AS_KGIDT(vfsgid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_GID;
+ gid = map_id_up(&idmap->gid_map, __vfsgid_val(vfsgid));
+ if (gid == (gid_t)-1)
+ return INVALID_GID;
+ if (initial_idmapping(fs_userns))
+ return KGIDT_INIT(gid);
+ return make_kgid(fs_userns, gid);
+}
+EXPORT_SYMBOL_GPL(from_vfsgid);
+
+#ifdef CONFIG_MULTIUSER
+/**
+ * vfsgid_in_group_p() - check whether a vfsuid matches the caller's groups
+ * @vfsgid: the mnt gid to match
+ *
+ * This function can be used to determine whether @vfsuid matches any of the
+ * caller's groups.
+ *
+ * Return: 1 if vfsuid matches caller's groups, 0 if not.
+ */
+int vfsgid_in_group_p(vfsgid_t vfsgid)
+{
+ return in_group_p(AS_KGIDT(vfsgid));
+}
+#else
+int vfsgid_in_group_p(vfsgid_t vfsgid)
+{
+ return 1;
+}
+#endif
+EXPORT_SYMBOL_GPL(vfsgid_in_group_p);
+
+static int copy_mnt_idmap(struct uid_gid_map *map_from,
+ struct uid_gid_map *map_to)
+{
+ struct uid_gid_extent *forward, *reverse;
+ u32 nr_extents = READ_ONCE(map_from->nr_extents);
+ /* Pairs with smp_wmb() when writing the idmapping. */
+ smp_rmb();
+
+ /*
+ * Don't blindly copy @map_to into @map_from if nr_extents is
+ * smaller or equal to UID_GID_MAP_MAX_BASE_EXTENTS. Since we
+ * read @nr_extents someone could have written an idmapping and
+ * then we might end up with inconsistent data. So just don't do
+ * anything at all.
+ */
+ if (nr_extents == 0)
+ return -EINVAL;
+
+ /*
+ * Here we know that nr_extents is greater than zero which means
+ * a map has been written. Since idmappings can't be changed
+ * once they have been written we know that we can safely copy
+ * from @map_to into @map_from.
+ */
+
+ if (nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
+ *map_to = *map_from;
+ return 0;
+ }
+
+ forward = kmemdup_array(map_from->forward, nr_extents,
+ sizeof(struct uid_gid_extent),
+ GFP_KERNEL_ACCOUNT);
+ if (!forward)
+ return -ENOMEM;
+
+ reverse = kmemdup_array(map_from->reverse, nr_extents,
+ sizeof(struct uid_gid_extent),
+ GFP_KERNEL_ACCOUNT);
+ if (!reverse) {
+ kfree(forward);
+ return -ENOMEM;
+ }
+
+ /*
+ * The idmapping isn't exposed anywhere so we don't need to care
+ * about ordering between extent pointers and @nr_extents
+ * initialization.
+ */
+ map_to->forward = forward;
+ map_to->reverse = reverse;
+ map_to->nr_extents = nr_extents;
+ return 0;
+}
+
+static void free_mnt_idmap(struct mnt_idmap *idmap)
+{
+ if (idmap->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
+ kfree(idmap->uid_map.forward);
+ kfree(idmap->uid_map.reverse);
+ }
+ if (idmap->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
+ kfree(idmap->gid_map.forward);
+ kfree(idmap->gid_map.reverse);
+ }
+ kfree(idmap);
+}
+
+struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns)
+{
+ struct mnt_idmap *idmap;
+ int ret;
+
+ idmap = kzalloc(sizeof(struct mnt_idmap), GFP_KERNEL_ACCOUNT);
+ if (!idmap)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&idmap->count, 1);
+ ret = copy_mnt_idmap(&mnt_userns->uid_map, &idmap->uid_map);
+ if (!ret)
+ ret = copy_mnt_idmap(&mnt_userns->gid_map, &idmap->gid_map);
+ if (ret) {
+ free_mnt_idmap(idmap);
+ idmap = ERR_PTR(ret);
+ }
+ return idmap;
+}
+
+/**
+ * mnt_idmap_get - get a reference to an idmapping
+ * @idmap: the idmap to bump the reference on
+ *
+ * If @idmap is not the @nop_mnt_idmap bump the reference count.
+ *
+ * Return: @idmap with reference count bumped if @not_mnt_idmap isn't passed.
+ */
+struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap)
+{
+ if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap)
+ refcount_inc(&idmap->count);
+
+ return idmap;
+}
+EXPORT_SYMBOL_GPL(mnt_idmap_get);
+
+/**
+ * mnt_idmap_put - put a reference to an idmapping
+ * @idmap: the idmap to put the reference on
+ *
+ * If this is a non-initial idmapping, put the reference count when a mount is
+ * released and free it if we're the last user.
+ */
+void mnt_idmap_put(struct mnt_idmap *idmap)
+{
+ if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap &&
+ refcount_dec_and_test(&idmap->count))
+ free_mnt_idmap(idmap);
+}
+EXPORT_SYMBOL_GPL(mnt_idmap_put);
+
+int statmount_mnt_idmap(struct mnt_idmap *idmap, struct seq_file *seq, bool uid_map)
+{
+ struct uid_gid_map *map, *map_up;
+ u32 idx, nr_mappings;
+
+ if (!is_valid_mnt_idmap(idmap))
+ return 0;
+
+ /*
+ * Idmappings are shown relative to the caller's idmapping.
+ * This is both the most intuitive and most useful solution.
+ */
+ if (uid_map) {
+ map = &idmap->uid_map;
+ map_up = &current_user_ns()->uid_map;
+ } else {
+ map = &idmap->gid_map;
+ map_up = &current_user_ns()->gid_map;
+ }
+
+ for (idx = 0, nr_mappings = 0; idx < map->nr_extents; idx++) {
+ uid_t lower;
+ struct uid_gid_extent *extent;
+
+ if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
+ extent = &map->extent[idx];
+ else
+ extent = &map->forward[idx];
+
+ /*
+ * Verify that the whole range of the mapping can be
+ * resolved in the caller's idmapping. If it cannot be
+ * resolved skip the mapping.
+ */
+ lower = map_id_range_up(map_up, extent->lower_first, extent->count);
+ if (lower == (uid_t) -1)
+ continue;
+
+ seq_printf(seq, "%u %u %u", extent->first, lower, extent->count);
+
+ seq->count++; /* mappings are separated by \0 */
+ if (seq_has_overflowed(seq))
+ return -EAGAIN;
+
+ nr_mappings++;
+ }
+
+ return nr_mappings;
+}
diff --git a/fs/mount.h b/fs/mount.h
index 0b6e08cf8afb..2d28ef2a3aed 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -5,23 +5,29 @@
#include <linux/ns_common.h>
#include <linux/fs_pin.h>
+extern struct list_head notify_list;
+
struct mnt_namespace {
struct ns_common ns;
struct mount * root;
- /*
- * Traversal and modification of .list is protected by either
- * - taking namespace_sem for write, OR
- * - taking namespace_sem for read AND taking .ns_lock.
- */
- struct list_head list;
- spinlock_t ns_lock;
+ struct {
+ struct rb_root mounts; /* Protected by namespace_sem */
+ struct rb_node *mnt_last_node; /* last (rightmost) mount in the rbtree */
+ struct rb_node *mnt_first_node; /* first (leftmost) mount in the rbtree */
+ };
struct user_namespace *user_ns;
struct ucounts *ucounts;
- u64 seq; /* Sequence number to prevent loops */
- wait_queue_head_t poll;
+ wait_queue_head_t poll;
+ u64 seq_origin; /* Sequence number of origin mount namespace */
u64 event;
- unsigned int mounts; /* # of mounts in the namespace */
+#ifdef CONFIG_FSNOTIFY
+ __u32 n_fsnotify_mask;
+ struct fsnotify_mark_connector __rcu *n_fsnotify_marks;
+#endif
+ unsigned int nr_mounts; /* # of mounts in the namespace */
unsigned int pending_mounts;
+ refcount_t passive; /* number references not pinning @mounts */
+ bool is_anon;
} __randomize_layout;
struct mnt_pcp {
@@ -33,7 +39,6 @@ struct mountpoint {
struct hlist_node m_hash;
struct dentry *m_dentry;
struct hlist_head m_list;
- int m_count;
};
struct mount {
@@ -42,6 +47,7 @@ struct mount {
struct dentry *mnt_mountpoint;
struct vfsmount mnt;
union {
+ struct rb_node mnt_node; /* node in the ns->mounts rbtree */
struct rcu_head mnt_rcu;
struct llist_node mnt_llist;
};
@@ -53,13 +59,16 @@ struct mount {
#endif
struct list_head mnt_mounts; /* list of children, anchored here */
struct list_head mnt_child; /* and going through their mnt_child */
- struct list_head mnt_instance; /* mount instance on sb->s_mounts */
+ struct mount *mnt_next_for_sb; /* the next two fields are hlist_node, */
+ struct mount * __aligned(1) *mnt_pprev_for_sb;
+ /* except that LSB of pprev is stolen */
+#define WRITE_HOLD 1 /* ... for use by mnt_hold_writers() */
const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
struct list_head mnt_list;
struct list_head mnt_expire; /* link in fs-specific expiry list */
struct list_head mnt_share; /* circular list of shared mounts */
- struct list_head mnt_slave_list;/* list of slave mounts */
- struct list_head mnt_slave; /* slave list entry */
+ struct hlist_head mnt_slave_list;/* list of slave mounts */
+ struct hlist_node mnt_slave; /* slave list entry */
struct mount *mnt_master; /* slave is on master->mnt_slave_list */
struct mnt_namespace *mnt_ns; /* containing namespace */
struct mountpoint *mnt_mp; /* where is it mounted */
@@ -67,18 +76,38 @@ struct mount {
struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
struct hlist_node mnt_umount;
};
- struct list_head mnt_umounting; /* list entry for umount propagation */
#ifdef CONFIG_FSNOTIFY
struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;
+ struct list_head to_notify; /* need to queue notification */
+ struct mnt_namespace *prev_ns; /* previous namespace (NULL if none) */
#endif
- int mnt_id; /* mount identifier */
+ int mnt_t_flags; /* namespace_sem-protected flags */
+ int mnt_id; /* mount identifier, reused */
+ u64 mnt_id_unique; /* mount ID unique until reboot */
int mnt_group_id; /* peer group identifier */
int mnt_expiry_mark; /* true if marked for expiry */
struct hlist_head mnt_pins;
struct hlist_head mnt_stuck_children;
+ struct mount *overmount; /* mounted on ->mnt_root */
} __randomize_layout;
+enum {
+ T_SHARED = 1, /* mount is shared */
+ T_UNBINDABLE = 2, /* mount is unbindable */
+ T_MARKED = 4, /* internal mark for propagate_... */
+ T_UMOUNT_CANDIDATE = 8, /* for propagate_umount */
+
+ /*
+ * T_SHARED_MASK is the set of flags that should be cleared when a
+ * mount becomes shared. Currently, this is only the flag that says a
+ * mount cannot be bind mounted, since this is how we create a mount
+ * that shares events with another mount. If you add a new T_*
+ * flag, consider how it interacts with shared mounts.
+ */
+ T_SHARED_MASK = T_UNBINDABLE,
+};
+
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
static inline struct mount *real_mount(struct vfsmount *mnt)
@@ -86,7 +115,7 @@ static inline struct mount *real_mount(struct vfsmount *mnt)
return container_of(mnt, struct mount, mnt);
}
-static inline int mnt_has_parent(struct mount *mnt)
+static inline int mnt_has_parent(const struct mount *mnt)
{
return mnt != mnt->mnt_parent;
}
@@ -100,7 +129,6 @@ static inline int is_mounted(struct vfsmount *mnt)
extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
extern int __legitimize_mnt(struct vfsmount *, unsigned);
-extern bool legitimize_mnt(struct vfsmount *, unsigned);
static inline bool __path_is_mountpoint(const struct path *path)
{
@@ -119,22 +147,26 @@ static inline void detach_mounts(struct dentry *dentry)
static inline void get_mnt_ns(struct mnt_namespace *ns)
{
- refcount_inc(&ns->ns.count);
+ ns_ref_inc(ns);
}
extern seqlock_t mount_lock;
+DEFINE_LOCK_GUARD_0(mount_writer, write_seqlock(&mount_lock),
+ write_sequnlock(&mount_lock))
+DEFINE_LOCK_GUARD_0(mount_locked_reader, read_seqlock_excl(&mount_lock),
+ read_sequnlock_excl(&mount_lock))
+
struct proc_mounts {
struct mnt_namespace *ns;
struct path root;
int (*show)(struct seq_file *, struct vfsmount *);
- struct mount cursor;
};
extern const struct seq_operations mounts_op;
-extern bool __is_local_mountpoint(struct dentry *dentry);
-static inline bool is_local_mountpoint(struct dentry *dentry)
+extern bool __is_local_mountpoint(const struct dentry *dentry);
+static inline bool is_local_mountpoint(const struct dentry *dentry)
{
if (!d_mountpoint(dentry))
return false;
@@ -144,7 +176,90 @@ static inline bool is_local_mountpoint(struct dentry *dentry)
static inline bool is_anon_ns(struct mnt_namespace *ns)
{
- return ns->seq == 0;
+ return ns->is_anon;
+}
+
+static inline bool anon_ns_root(const struct mount *m)
+{
+ struct mnt_namespace *ns = READ_ONCE(m->mnt_ns);
+
+ return !IS_ERR_OR_NULL(ns) && is_anon_ns(ns) && m == ns->root;
+}
+
+static inline bool mnt_ns_attached(const struct mount *mnt)
+{
+ return !RB_EMPTY_NODE(&mnt->mnt_node);
+}
+
+static inline bool mnt_ns_empty(const struct mnt_namespace *ns)
+{
+ return RB_EMPTY_ROOT(&ns->mounts);
+}
+
+static inline void move_from_ns(struct mount *mnt)
+{
+ struct mnt_namespace *ns = mnt->mnt_ns;
+ WARN_ON(!mnt_ns_attached(mnt));
+ if (ns->mnt_last_node == &mnt->mnt_node)
+ ns->mnt_last_node = rb_prev(&mnt->mnt_node);
+ if (ns->mnt_first_node == &mnt->mnt_node)
+ ns->mnt_first_node = rb_next(&mnt->mnt_node);
+ rb_erase(&mnt->mnt_node, &ns->mounts);
+ RB_CLEAR_NODE(&mnt->mnt_node);
+}
+
+bool has_locked_children(struct mount *mnt, struct dentry *dentry);
+struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mnt_ns,
+ bool previous);
+
+static inline struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct mnt_namespace, ns);
+}
+
+#ifdef CONFIG_FSNOTIFY
+static inline void mnt_notify_add(struct mount *m)
+{
+ /* Optimize the case where there are no watches */
+ if ((m->mnt_ns && m->mnt_ns->n_fsnotify_marks) ||
+ (m->prev_ns && m->prev_ns->n_fsnotify_marks))
+ list_add_tail(&m->to_notify, &notify_list);
+ else
+ m->prev_ns = m->mnt_ns;
+}
+#else
+static inline void mnt_notify_add(struct mount *m)
+{
+}
+#endif
+
+static inline struct mount *topmost_overmount(struct mount *m)
+{
+ while (m->overmount)
+ m = m->overmount;
+ return m;
+}
+
+static inline bool __test_write_hold(struct mount * __aligned(1) *val)
+{
+ return (unsigned long)val & WRITE_HOLD;
+}
+
+static inline bool test_write_hold(const struct mount *m)
+{
+ return __test_write_hold(m->mnt_pprev_for_sb);
+}
+
+static inline void set_write_hold(struct mount *m)
+{
+ m->mnt_pprev_for_sb = (void *)((unsigned long)m->mnt_pprev_for_sb
+ | WRITE_HOLD);
+}
+
+static inline void clear_write_hold(struct mount *m)
+{
+ m->mnt_pprev_for_sb = (void *)((unsigned long)m->mnt_pprev_for_sb
+ & ~WRITE_HOLD);
}
-extern void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor);
+struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry);
diff --git a/fs/mpage.c b/fs/mpage.c
index 334e7d09aa65..7dae5afc2b9e 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -29,7 +29,6 @@
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
-#include <linux/cleancache.h>
#include "internal.h"
/*
@@ -37,89 +36,85 @@
*
* The mpage code never puts partial pages into a BIO (except for end-of-file).
* If a page does not map to a contiguous run of blocks then it simply falls
- * back to block_read_full_page().
+ * back to block_read_full_folio().
*
* Why is this? If a page's completion depends on a number of different BIOs
* which can complete in any order (or at the same time) then determining the
* status of that page is hard. See end_buffer_async_read() for the details.
* There is no point in duplicating all that complexity.
*/
-static void mpage_end_io(struct bio *bio)
+static void mpage_read_end_io(struct bio *bio)
{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
+ int err = blk_status_to_errno(bio->bi_status);
- bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
- page_endio(page, bio_op(bio),
- blk_status_to_errno(bio->bi_status));
+ bio_for_each_folio_all(fi, bio)
+ folio_end_read(fi.folio, err == 0);
+
+ bio_put(bio);
+}
+
+static void mpage_write_end_io(struct bio *bio)
+{
+ struct folio_iter fi;
+ int err = blk_status_to_errno(bio->bi_status);
+
+ bio_for_each_folio_all(fi, bio) {
+ if (err)
+ mapping_set_error(fi.folio->mapping, err);
+ folio_end_writeback(fi.folio);
}
bio_put(bio);
}
-static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
+static struct bio *mpage_bio_submit_read(struct bio *bio)
{
- bio->bi_end_io = mpage_end_io;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_end_io = mpage_read_end_io;
guard_bio_eod(bio);
submit_bio(bio);
return NULL;
}
-static struct bio *
-mpage_alloc(struct block_device *bdev,
- sector_t first_sector, int nr_vecs,
- gfp_t gfp_flags)
+static struct bio *mpage_bio_submit_write(struct bio *bio)
{
- struct bio *bio;
-
- /* Restrict the given (page cache) mask for slab allocations */
- gfp_flags &= GFP_KERNEL;
- bio = bio_alloc(gfp_flags, nr_vecs);
-
- if (bio == NULL && (current->flags & PF_MEMALLOC)) {
- while (!bio && (nr_vecs /= 2))
- bio = bio_alloc(gfp_flags, nr_vecs);
- }
-
- if (bio) {
- bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector = first_sector;
- }
- return bio;
+ bio->bi_end_io = mpage_write_end_io;
+ guard_bio_eod(bio);
+ submit_bio(bio);
+ return NULL;
}
/*
* support function for mpage_readahead. The fs supplied get_block might
* return an up to date buffer. This is used to map that buffer into
- * the page, which allows readpage to avoid triggering a duplicate call
+ * the page, which allows read_folio to avoid triggering a duplicate call
* to get_block.
*
* The idea is to avoid adding buffers to pages that don't already have
* them. So when the buffer is up to date and the page size == block size,
* this marks the page up to date instead of adding new buffers.
*/
-static void
-map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
+static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
+ int page_block)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct buffer_head *page_bh, *head;
int block = 0;
- if (!page_has_buffers(page)) {
+ head = folio_buffers(folio);
+ if (!head) {
/*
* don't make any buffers if there is only one buffer on
- * the page and the page just needs to be set up to date
+ * the folio and the folio just needs to be set up to date
*/
- if (inode->i_blkbits == PAGE_SHIFT &&
+ if (inode->i_blkbits == folio_shift(folio) &&
buffer_uptodate(bh)) {
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
return;
}
- create_empty_buffers(page, i_blocksize(inode), 0);
+ head = create_empty_buffers(folio, i_blocksize(inode), 0);
}
- head = page_buffers(page);
+
page_bh = head;
do {
if (block == page_block) {
@@ -135,7 +130,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
struct mpage_readpage_args {
struct bio *bio;
- struct page *page;
+ struct folio *folio;
unsigned int nr_pages;
bool is_readahead;
sector_t last_block_in_bio;
@@ -153,41 +148,38 @@ struct mpage_readpage_args {
* represent the validity of its disk mapping and to decide when to do the next
* get_block() call.
*/
-static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
+static void do_mpage_readpage(struct mpage_readpage_args *args)
{
- struct page *page = args->page;
- struct inode *inode = page->mapping->host;
+ struct folio *folio = args->folio;
+ struct inode *inode = folio->mapping->host;
const unsigned blkbits = inode->i_blkbits;
- const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
+ const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
const unsigned blocksize = 1 << blkbits;
struct buffer_head *map_bh = &args->map_bh;
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
- sector_t blocks[MAX_BUF_PER_PAGE];
+ sector_t first_block;
unsigned page_block;
- unsigned first_hole = blocks_per_page;
+ unsigned first_hole = blocks_per_folio;
struct block_device *bdev = NULL;
int length;
int fully_mapped = 1;
- int op_flags;
+ blk_opf_t opf = REQ_OP_READ;
unsigned nblocks;
unsigned relative_block;
- gfp_t gfp;
+ gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
if (args->is_readahead) {
- op_flags = REQ_RAHEAD;
- gfp = readahead_gfp_mask(page->mapping);
- } else {
- op_flags = 0;
- gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+ opf |= REQ_RAHEAD;
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
}
- if (page_has_buffers(page))
+ if (folio_buffers(folio))
goto confused;
- block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
- last_block = block_in_file + args->nr_pages * blocks_per_page;
+ block_in_file = folio_pos(folio) >> blkbits;
+ last_block = block_in_file + ((args->nr_pages * PAGE_SIZE) >> blkbits);
last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
if (last_block > last_block_in_file)
last_block = last_block_in_file;
@@ -203,15 +195,14 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
unsigned map_offset = block_in_file - args->first_logical_block;
unsigned last = nblocks - map_offset;
+ first_block = map_bh->b_blocknr + map_offset;
for (relative_block = 0; ; relative_block++) {
if (relative_block == last) {
clear_buffer_mapped(map_bh);
break;
}
- if (page_block == blocks_per_page)
+ if (page_block == blocks_per_folio)
break;
- blocks[page_block] = map_bh->b_blocknr + map_offset +
- relative_block;
page_block++;
block_in_file++;
}
@@ -219,10 +210,10 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
}
/*
- * Then do more get_blocks calls until we are done with this page.
+ * Then do more get_blocks calls until we are done with this folio.
*/
- map_bh->b_page = page;
- while (page_block < blocks_per_page) {
+ map_bh->b_folio = folio;
+ while (page_block < blocks_per_folio) {
map_bh->b_state = 0;
map_bh->b_size = 0;
@@ -235,7 +226,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
if (!buffer_mapped(map_bh)) {
fully_mapped = 0;
- if (first_hole == blocks_per_page)
+ if (first_hole == blocks_per_folio)
first_hole = page_block;
page_block++;
block_in_file++;
@@ -244,94 +235,85 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
/* some filesystems will copy data into the page during
* the get_block call, in which case we don't want to
- * read it again. map_buffer_to_page copies the data
- * we just collected from get_block into the page's buffers
- * so readpage doesn't have to repeat the get_block call
+ * read it again. map_buffer_to_folio copies the data
+ * we just collected from get_block into the folio's buffers
+ * so read_folio doesn't have to repeat the get_block call
*/
if (buffer_uptodate(map_bh)) {
- map_buffer_to_page(page, map_bh, page_block);
+ map_buffer_to_folio(folio, map_bh, page_block);
goto confused;
}
- if (first_hole != blocks_per_page)
+ if (first_hole != blocks_per_folio)
goto confused; /* hole -> non-hole */
/* Contiguous blocks? */
- if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
+ if (!page_block)
+ first_block = map_bh->b_blocknr;
+ else if (first_block + page_block != map_bh->b_blocknr)
goto confused;
nblocks = map_bh->b_size >> blkbits;
for (relative_block = 0; ; relative_block++) {
if (relative_block == nblocks) {
clear_buffer_mapped(map_bh);
break;
- } else if (page_block == blocks_per_page)
+ } else if (page_block == blocks_per_folio)
break;
- blocks[page_block] = map_bh->b_blocknr+relative_block;
page_block++;
block_in_file++;
}
bdev = map_bh->b_bdev;
}
- if (first_hole != blocks_per_page) {
- zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
+ if (first_hole != blocks_per_folio) {
+ folio_zero_segment(folio, first_hole << blkbits, folio_size(folio));
if (first_hole == 0) {
- SetPageUptodate(page);
- unlock_page(page);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
goto out;
}
} else if (fully_mapped) {
- SetPageMappedToDisk(page);
- }
-
- if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
- cleancache_get_page(page) == 0) {
- SetPageUptodate(page);
- goto confused;
+ folio_set_mappedtodisk(folio);
}
/*
- * This page will go to BIO. Do we need to send this BIO off first?
+ * This folio will go to BIO. Do we need to send this BIO off first?
*/
- if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
- args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
+ if (args->bio && (args->last_block_in_bio != first_block - 1))
+ args->bio = mpage_bio_submit_read(args->bio);
alloc_new:
if (args->bio == NULL) {
- if (first_hole == blocks_per_page) {
- if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
- page))
- goto out;
- }
- args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
- bio_max_segs(args->nr_pages), gfp);
+ args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), opf,
+ gfp);
if (args->bio == NULL)
goto confused;
+ args->bio->bi_iter.bi_sector = first_block << (blkbits - 9);
}
length = first_hole << blkbits;
- if (bio_add_page(args->bio, page, length, 0) < length) {
- args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
+ if (!bio_add_folio(args->bio, folio, length, 0)) {
+ args->bio = mpage_bio_submit_read(args->bio);
goto alloc_new;
}
relative_block = block_in_file - args->first_logical_block;
nblocks = map_bh->b_size >> blkbits;
if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
- (first_hole != blocks_per_page))
- args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
+ (first_hole != blocks_per_folio))
+ args->bio = mpage_bio_submit_read(args->bio);
else
- args->last_block_in_bio = blocks[blocks_per_page - 1];
+ args->last_block_in_bio = first_block + blocks_per_folio - 1;
out:
- return args->bio;
+ return;
confused:
if (args->bio)
- args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
- if (!PageUptodate(page))
- block_read_full_page(page, args->get_block);
+ args->bio = mpage_bio_submit_read(args->bio);
+ if (!folio_test_uptodate(folio))
+ block_read_full_folio(folio, args->get_block);
else
- unlock_page(page);
+ folio_unlock(folio);
goto out;
}
@@ -376,41 +358,46 @@ confused:
*/
void mpage_readahead(struct readahead_control *rac, get_block_t get_block)
{
- struct page *page;
+ struct folio *folio;
struct mpage_readpage_args args = {
.get_block = get_block,
.is_readahead = true,
};
- while ((page = readahead_page(rac))) {
- prefetchw(&page->flags);
- args.page = page;
+ while ((folio = readahead_folio(rac))) {
+ prefetchw(&folio->flags);
+ args.folio = folio;
args.nr_pages = readahead_count(rac);
- args.bio = do_mpage_readpage(&args);
- put_page(page);
+ do_mpage_readpage(&args);
+ /*
+ * If read ahead failed synchronously, it may cause by removed
+ * device, or some filesystem metadata error.
+ */
+ if (!folio_test_locked(folio) && !folio_test_uptodate(folio))
+ break;
}
if (args.bio)
- mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio);
+ mpage_bio_submit_read(args.bio);
}
EXPORT_SYMBOL(mpage_readahead);
/*
* This isn't called much at all
*/
-int mpage_readpage(struct page *page, get_block_t get_block)
+int mpage_read_folio(struct folio *folio, get_block_t get_block)
{
struct mpage_readpage_args args = {
- .page = page,
- .nr_pages = 1,
+ .folio = folio,
+ .nr_pages = folio_nr_pages(folio),
.get_block = get_block,
};
- args.bio = do_mpage_readpage(&args);
+ do_mpage_readpage(&args);
if (args.bio)
- mpage_bio_submit(REQ_OP_READ, 0, args.bio);
+ mpage_bio_submit_read(args.bio);
return 0;
}
-EXPORT_SYMBOL(mpage_readpage);
+EXPORT_SYMBOL(mpage_read_folio);
/*
* Writing is not so simple.
@@ -433,20 +420,19 @@ struct mpage_data {
struct bio *bio;
sector_t last_block_in_bio;
get_block_t *get_block;
- unsigned use_writepage;
};
/*
* We have our BIO, so we can now mark the buffers clean. Make
* sure to only clean buffers which we know we'll be writing.
*/
-static void clean_buffers(struct page *page, unsigned first_unmapped)
+static void clean_buffers(struct folio *folio, unsigned first_unmapped)
{
unsigned buffer_counter = 0;
- struct buffer_head *bh, *head;
- if (!page_has_buffers(page))
+ struct buffer_head *bh, *head = folio_buffers(folio);
+
+ if (!head)
return;
- head = page_buffers(page);
bh = head;
do {
@@ -458,50 +444,37 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
/*
* we cannot drop the bh if the page is not uptodate or a concurrent
- * readpage would fail to serialize with the bh and it would read from
+ * read_folio would fail to serialize with the bh and it would read from
* disk before we reach the platter.
*/
- if (buffer_heads_over_limit && PageUptodate(page))
- try_to_free_buffers(page);
+ if (buffer_heads_over_limit && folio_test_uptodate(folio))
+ try_to_free_buffers(folio);
}
-/*
- * For situations where we want to clean all buffers attached to a page.
- * We don't need to calculate how many buffers are attached to the page,
- * we just need to specify a number larger than the maximum number of buffers.
- */
-void clean_page_buffers(struct page *page)
+static int mpage_write_folio(struct writeback_control *wbc, struct folio *folio,
+ struct mpage_data *mpd)
{
- clean_buffers(page, ~0U);
-}
-
-static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
- void *data)
-{
- struct mpage_data *mpd = data;
struct bio *bio = mpd->bio;
- struct address_space *mapping = page->mapping;
- struct inode *inode = page->mapping->host;
+ struct address_space *mapping = folio->mapping;
+ struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits;
- unsigned long end_index;
- const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
+ const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
sector_t last_block;
sector_t block_in_file;
- sector_t blocks[MAX_BUF_PER_PAGE];
+ sector_t first_block;
unsigned page_block;
- unsigned first_unmapped = blocks_per_page;
+ unsigned first_unmapped = blocks_per_folio;
struct block_device *bdev = NULL;
int boundary = 0;
sector_t boundary_block = 0;
struct block_device *boundary_bdev = NULL;
- int length;
+ size_t length;
struct buffer_head map_bh;
loff_t i_size = i_size_read(inode);
int ret = 0;
- int op_flags = wbc_to_write_flags(wbc);
+ struct buffer_head *head = folio_buffers(folio);
- if (page_has_buffers(page)) {
- struct buffer_head *head = page_buffers(page);
+ if (head) {
struct buffer_head *bh = head;
/* If they're all mapped and dirty, do it */
@@ -511,25 +484,27 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
if (!buffer_mapped(bh)) {
/*
* unmapped dirty buffers are created by
- * __set_page_dirty_buffers -> mmapped data
+ * block_dirty_folio -> mmapped data
*/
if (buffer_dirty(bh))
goto confused;
- if (first_unmapped == blocks_per_page)
+ if (first_unmapped == blocks_per_folio)
first_unmapped = page_block;
continue;
}
- if (first_unmapped != blocks_per_page)
+ if (first_unmapped != blocks_per_folio)
goto confused; /* hole -> non-hole */
if (!buffer_dirty(bh) || !buffer_uptodate(bh))
goto confused;
if (page_block) {
- if (bh->b_blocknr != blocks[page_block-1] + 1)
+ if (bh->b_blocknr != first_block + page_block)
goto confused;
+ } else {
+ first_block = bh->b_blocknr;
}
- blocks[page_block++] = bh->b_blocknr;
+ page_block++;
boundary = buffer_boundary(bh);
if (boundary) {
boundary_block = bh->b_blocknr;
@@ -544,7 +519,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
/*
* Page has buffers, but they are all unmapped. The page was
* created by pagein or read over a hole which was handled by
- * block_read_full_page(). If this address_space is also
+ * block_read_full_folio(). If this address_space is also
* using mpage_readahead then this can rarely happen.
*/
goto confused;
@@ -553,16 +528,24 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
/*
* The page has no buffers: map it to disk
*/
- BUG_ON(!PageUptodate(page));
- block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
+ BUG_ON(!folio_test_uptodate(folio));
+ block_in_file = folio_pos(folio) >> blkbits;
+ /*
+ * Whole page beyond EOF? Skip allocating blocks to avoid leaking
+ * space.
+ */
+ if (block_in_file >= (i_size + (1 << blkbits) - 1) >> blkbits)
+ goto page_is_mapped;
last_block = (i_size - 1) >> blkbits;
- map_bh.b_page = page;
- for (page_block = 0; page_block < blocks_per_page; ) {
+ map_bh.b_folio = folio;
+ for (page_block = 0; page_block < blocks_per_folio; ) {
map_bh.b_state = 0;
map_bh.b_size = 1 << blkbits;
if (mpd->get_block(inode, block_in_file, &map_bh, 1))
goto confused;
+ if (!buffer_mapped(&map_bh))
+ goto confused;
if (buffer_new(&map_bh))
clean_bdev_bh_alias(&map_bh);
if (buffer_boundary(&map_bh)) {
@@ -570,10 +553,12 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
boundary_bdev = map_bh.b_bdev;
}
if (page_block) {
- if (map_bh.b_blocknr != blocks[page_block-1] + 1)
+ if (map_bh.b_blocknr != first_block + page_block)
goto confused;
+ } else {
+ first_block = map_bh.b_blocknr;
}
- blocks[page_block++] = map_bh.b_blocknr;
+ page_block++;
boundary = buffer_boundary(&map_bh);
bdev = map_bh.b_bdev;
if (block_in_file == last_block)
@@ -585,8 +570,11 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
first_unmapped = page_block;
page_is_mapped:
- end_index = i_size >> PAGE_SHIFT;
- if (page->index >= end_index) {
+ /* Don't bother writing beyond EOF, truncate will discard the folio */
+ if (folio_pos(folio) >= i_size)
+ goto confused;
+ length = folio_size(folio);
+ if (folio_pos(folio) + length > i_size) {
/*
* The page straddles i_size. It must be zeroed out on each
* and every writepage invocation because it may be mmapped.
@@ -595,31 +583,22 @@ page_is_mapped:
* is zeroed when mapped, and writes to that region are not
* written out to the file."
*/
- unsigned offset = i_size & (PAGE_SIZE - 1);
-
- if (page->index > end_index || !offset)
- goto confused;
- zero_user_segment(page, offset, PAGE_SIZE);
+ length = i_size - folio_pos(folio);
+ folio_zero_segment(folio, length, folio_size(folio));
}
/*
* This page will go to BIO. Do we need to send this BIO off first?
*/
- if (bio && mpd->last_block_in_bio != blocks[0] - 1)
- bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
+ if (bio && mpd->last_block_in_bio != first_block - 1)
+ bio = mpage_bio_submit_write(bio);
alloc_new:
if (bio == NULL) {
- if (first_unmapped == blocks_per_page) {
- if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
- page, wbc))
- goto out;
- }
- bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
- BIO_MAX_VECS, GFP_NOFS|__GFP_HIGH);
- if (bio == NULL)
- goto confused;
-
+ bio = bio_alloc(bdev, BIO_MAX_VECS,
+ REQ_OP_WRITE | wbc_to_write_flags(wbc),
+ GFP_NOFS);
+ bio->bi_iter.bi_sector = first_block << (blkbits - 9);
wbc_init_bio(wbc, bio);
bio->bi_write_hint = inode->i_write_hint;
}
@@ -629,42 +608,37 @@ alloc_new:
* the confused fail path above (OOM) will be very confused when
* it finds all bh marked clean (i.e. it will not write anything)
*/
- wbc_account_cgroup_owner(wbc, page, PAGE_SIZE);
+ wbc_account_cgroup_owner(wbc, folio, folio_size(folio));
length = first_unmapped << blkbits;
- if (bio_add_page(bio, page, length, 0) < length) {
- bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
+ if (!bio_add_folio(bio, folio, length, 0)) {
+ bio = mpage_bio_submit_write(bio);
goto alloc_new;
}
- clean_buffers(page, first_unmapped);
+ clean_buffers(folio, first_unmapped);
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
- unlock_page(page);
- if (boundary || (first_unmapped != blocks_per_page)) {
- bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
+ BUG_ON(folio_test_writeback(folio));
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+ if (boundary || (first_unmapped != blocks_per_folio)) {
+ bio = mpage_bio_submit_write(bio);
if (boundary_block) {
write_boundary_block(boundary_bdev,
boundary_block, 1 << blkbits);
}
} else {
- mpd->last_block_in_bio = blocks[blocks_per_page - 1];
+ mpd->last_block_in_bio = first_block + blocks_per_folio - 1;
}
goto out;
confused:
if (bio)
- bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
+ bio = mpage_bio_submit_write(bio);
- if (mpd->use_writepage) {
- ret = mapping->a_ops->writepage(page, wbc);
- } else {
- ret = -EAGAIN;
- goto out;
- }
/*
* The caller has a ref on the inode, so *mapping is stable
*/
+ ret = block_write_full_folio(folio, wbc, mpd->get_block);
mapping_set_error(mapping, ret);
out:
mpd->bio = bio;
@@ -676,66 +650,27 @@ out:
* @mapping: address space structure to write
* @wbc: subtract the number of written pages from *@wbc->nr_to_write
* @get_block: the filesystem's block mapper function.
- * If this is NULL then use a_ops->writepage. Otherwise, go
- * direct-to-BIO.
*
* This is a library function, which implements the writepages()
* address_space_operation.
- *
- * If a page is already under I/O, generic_writepages() skips it, even
- * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
- * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
- * and msync() need to guarantee that all the data which was dirty at the time
- * the call was made get new I/O started against them. If wbc->sync_mode is
- * WB_SYNC_ALL then we were called for data integrity and we must wait for
- * existing IO to complete.
*/
int
mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block)
{
+ struct mpage_data mpd = {
+ .get_block = get_block,
+ };
+ struct folio *folio = NULL;
struct blk_plug plug;
- int ret;
+ int error;
blk_start_plug(&plug);
-
- if (!get_block)
- ret = generic_writepages(mapping, wbc);
- else {
- struct mpage_data mpd = {
- .bio = NULL,
- .last_block_in_bio = 0,
- .get_block = get_block,
- .use_writepage = 1,
- };
-
- ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
- if (mpd.bio) {
- int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
- REQ_SYNC : 0);
- mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
- }
- }
+ while ((folio = writeback_iter(mapping, wbc, folio, &error)))
+ error = mpage_write_folio(wbc, folio, &mpd);
+ if (mpd.bio)
+ mpage_bio_submit_write(mpd.bio);
blk_finish_plug(&plug);
- return ret;
+ return error;
}
EXPORT_SYMBOL(mpage_writepages);
-
-int mpage_writepage(struct page *page, get_block_t get_block,
- struct writeback_control *wbc)
-{
- struct mpage_data mpd = {
- .bio = NULL,
- .last_block_in_bio = 0,
- .get_block = get_block,
- .use_writepage = 0,
- };
- int ret = __mpage_writepage(page, wbc, &mpd);
- if (mpd.bio) {
- int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
- REQ_SYNC : 0);
- mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
- }
- return ret;
-}
-EXPORT_SYMBOL(mpage_writepage);
diff --git a/fs/namei.c b/fs/namei.c
index 1f9d2187c765..bf0f66f0e9b9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -17,15 +17,16 @@
#include <linux/init.h>
#include <linux/export.h>
-#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/wordpart.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
+#include <linux/sched/mm.h>
#include <linux/fsnotify.h>
#include <linux/personality.h>
#include <linux/security.h>
-#include <linux/ima.h>
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/audit.h>
@@ -124,8 +125,15 @@
#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
+static inline void initname(struct filename *name, const char __user *uptr)
+{
+ name->uptr = uptr;
+ name->aname = NULL;
+ atomic_set(&name->refcnt, 1);
+}
+
struct filename *
-getname_flags(const char __user *filename, int flags, int *empty)
+getname_flags(const char __user *filename, int flags)
{
struct filename *result;
char *kname;
@@ -147,9 +155,20 @@ getname_flags(const char __user *filename, int flags, int *empty)
result->name = kname;
len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX);
- if (unlikely(len < 0)) {
- __putname(result);
- return ERR_PTR(len);
+ /*
+ * Handle both empty path and copy failure in one go.
+ */
+ if (unlikely(len <= 0)) {
+ if (unlikely(len < 0)) {
+ __putname(result);
+ return ERR_PTR(len);
+ }
+
+ /* The empty path is special. */
+ if (!(flags & LOOKUP_EMPTY)) {
+ __putname(result);
+ return ERR_PTR(-ENOENT);
+ }
}
/*
@@ -179,46 +198,50 @@ getname_flags(const char __user *filename, int flags, int *empty)
kfree(result);
return ERR_PTR(len);
}
+ /* The empty path is special. */
+ if (unlikely(!len) && !(flags & LOOKUP_EMPTY)) {
+ __putname(kname);
+ kfree(result);
+ return ERR_PTR(-ENOENT);
+ }
if (unlikely(len == PATH_MAX)) {
__putname(kname);
kfree(result);
return ERR_PTR(-ENAMETOOLONG);
}
}
-
- result->refcnt = 1;
- /* The empty path is special. */
- if (unlikely(!len)) {
- if (empty)
- *empty = 1;
- if (!(flags & LOOKUP_EMPTY)) {
- putname(result);
- return ERR_PTR(-ENOENT);
- }
- }
-
- result->uptr = filename;
- result->aname = NULL;
+ initname(result, filename);
audit_getname(result);
return result;
}
-struct filename *
-getname_uflags(const char __user *filename, int uflags)
+struct filename *getname_uflags(const char __user *filename, int uflags)
{
int flags = (uflags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
- return getname_flags(filename, flags, NULL);
+ return getname_flags(filename, flags);
}
-struct filename *
-getname(const char __user * filename)
+struct filename *__getname_maybe_null(const char __user *pathname)
{
- return getname_flags(filename, 0, NULL);
+ struct filename *name;
+ char c;
+
+ /* try to save on allocations; loss on um, though */
+ if (get_user(c, pathname))
+ return ERR_PTR(-EFAULT);
+ if (!c)
+ return NULL;
+
+ name = getname_flags(pathname, LOOKUP_EMPTY);
+ if (!IS_ERR(name) && !(name->name[0])) {
+ putname(name);
+ name = NULL;
+ }
+ return name;
}
-struct filename *
-getname_kernel(const char * filename)
+struct filename *getname_kernel(const char * filename)
{
struct filename *result;
int len = strlen(filename) + 1;
@@ -245,34 +268,39 @@ getname_kernel(const char * filename)
return ERR_PTR(-ENAMETOOLONG);
}
memcpy((char *)result->name, filename, len);
- result->uptr = NULL;
- result->aname = NULL;
- result->refcnt = 1;
+ initname(result, NULL);
audit_getname(result);
-
return result;
}
+EXPORT_SYMBOL(getname_kernel);
void putname(struct filename *name)
{
- if (IS_ERR(name))
+ int refcnt;
+
+ if (IS_ERR_OR_NULL(name))
return;
- BUG_ON(name->refcnt <= 0);
+ refcnt = atomic_read(&name->refcnt);
+ if (unlikely(refcnt != 1)) {
+ if (WARN_ON_ONCE(!refcnt))
+ return;
- if (--name->refcnt > 0)
- return;
+ if (!atomic_dec_and_test(&name->refcnt))
+ return;
+ }
- if (name->name != name->iname) {
+ if (unlikely(name->name != name->iname)) {
__putname(name->name);
kfree(name);
} else
__putname(name);
}
+EXPORT_SYMBOL(putname);
/**
* check_acl - perform ACL permission checking
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @inode: inode to check permissions on
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC ...)
*
@@ -280,13 +308,13 @@ void putname(struct filename *name)
* retrieve POSIX acls it needs to know whether it is called from a blocking or
* non-blocking context and thus cares about the MAY_NOT_BLOCK bit.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*/
-static int check_acl(struct user_namespace *mnt_userns,
+static int check_acl(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
#ifdef CONFIG_FS_POSIX_ACL
@@ -296,17 +324,17 @@ static int check_acl(struct user_namespace *mnt_userns,
acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
if (!acl)
return -EAGAIN;
- /* no ->get_acl() calls in RCU mode... */
+ /* no ->get_inode_acl() calls in RCU mode... */
if (is_uncached_acl(acl))
return -ECHILD;
- return posix_acl_permission(mnt_userns, inode, acl, mask);
+ return posix_acl_permission(idmap, inode, acl, mask);
}
- acl = get_acl(inode, ACL_TYPE_ACCESS);
+ acl = get_inode_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl) {
- int error = posix_acl_permission(mnt_userns, inode, acl, mask);
+ int error = posix_acl_permission(idmap, inode, acl, mask);
posix_acl_release(acl);
return error;
}
@@ -315,9 +343,28 @@ static int check_acl(struct user_namespace *mnt_userns,
return -EAGAIN;
}
+/*
+ * Very quick optimistic "we know we have no ACL's" check.
+ *
+ * Note that this is purely for ACL_TYPE_ACCESS, and purely
+ * for the "we have cached that there are no ACLs" case.
+ *
+ * If this returns true, we know there are no ACLs. But if
+ * it returns false, we might still not have ACLs (it could
+ * be the is_uncached_acl() case).
+ */
+static inline bool no_acl_inode(struct inode *inode)
+{
+#ifdef CONFIG_FS_POSIX_ACL
+ return likely(!READ_ONCE(inode->i_acl));
+#else
+ return true;
+#endif
+}
+
/**
* acl_permission_check - perform basic UNIX permission checking
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @inode: inode to check permissions on
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC ...)
*
@@ -325,21 +372,43 @@ static int check_acl(struct user_namespace *mnt_userns,
* function may retrieve POSIX acls it needs to know whether it is called from a
* blocking or non-blocking context and thus cares about the MAY_NOT_BLOCK bit.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*/
-static int acl_permission_check(struct user_namespace *mnt_userns,
+static int acl_permission_check(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
unsigned int mode = inode->i_mode;
- kuid_t i_uid;
+ vfsuid_t vfsuid;
+
+ /*
+ * Common cheap case: everybody has the requested
+ * rights, and there are no ACLs to check. No need
+ * to do any owner/group checks in that case.
+ *
+ * - 'mask&7' is the requested permission bit set
+ * - multiplying by 0111 spreads them out to all of ugo
+ * - '& ~mode' looks for missing inode permission bits
+ * - the '!' is for "no missing permissions"
+ *
+ * After that, we just need to check that there are no
+ * ACL's on the inode - do the 'IS_POSIXACL()' check last
+ * because it will dereference the ->i_sb pointer and we
+ * want to avoid that if at all possible.
+ */
+ if (!((mask & 7) * 0111 & ~mode)) {
+ if (no_acl_inode(inode))
+ return 0;
+ if (!IS_POSIXACL(inode))
+ return 0;
+ }
/* Are we the owner? If so, ACL's don't matter */
- i_uid = i_uid_into_mnt(mnt_userns, inode);
- if (likely(uid_eq(current_fsuid(), i_uid))) {
+ vfsuid = i_uid_into_vfsuid(idmap, inode);
+ if (likely(vfsuid_eq_kuid(vfsuid, current_fsuid()))) {
mask &= 7;
mode >>= 6;
return (mask & ~mode) ? -EACCES : 0;
@@ -347,7 +416,7 @@ static int acl_permission_check(struct user_namespace *mnt_userns,
/* Do we have ACL's? */
if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
- int error = check_acl(mnt_userns, inode, mask);
+ int error = check_acl(idmap, inode, mask);
if (error != -EAGAIN)
return error;
}
@@ -361,8 +430,8 @@ static int acl_permission_check(struct user_namespace *mnt_userns,
* about? Need to check group ownership if so.
*/
if (mask & (mode ^ (mode >> 3))) {
- kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
- if (in_group_p(kgid))
+ vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
+ if (vfsgid_in_group_p(vfsgid))
mode >>= 3;
}
@@ -372,7 +441,7 @@ static int acl_permission_check(struct user_namespace *mnt_userns,
/**
* generic_permission - check for access rights on a Posix-like filesystem
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @inode: inode to check access rights for
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC,
* %MAY_NOT_BLOCK ...)
@@ -386,13 +455,13 @@ static int acl_permission_check(struct user_namespace *mnt_userns,
* request cannot be satisfied (eg. requires blocking or too much complexity).
* It would then be called again in ref-walk mode.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*/
-int generic_permission(struct user_namespace *mnt_userns, struct inode *inode,
+int generic_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask)
{
int ret;
@@ -400,17 +469,17 @@ int generic_permission(struct user_namespace *mnt_userns, struct inode *inode,
/*
* Do the basic permission checks.
*/
- ret = acl_permission_check(mnt_userns, inode, mask);
+ ret = acl_permission_check(idmap, inode, mask);
if (ret != -EACCES)
return ret;
if (S_ISDIR(inode->i_mode)) {
/* DACs are overridable for directories */
if (!(mask & MAY_WRITE))
- if (capable_wrt_inode_uidgid(mnt_userns, inode,
+ if (capable_wrt_inode_uidgid(idmap, inode,
CAP_DAC_READ_SEARCH))
return 0;
- if (capable_wrt_inode_uidgid(mnt_userns, inode,
+ if (capable_wrt_inode_uidgid(idmap, inode,
CAP_DAC_OVERRIDE))
return 0;
return -EACCES;
@@ -421,7 +490,7 @@ int generic_permission(struct user_namespace *mnt_userns, struct inode *inode,
*/
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
if (mask == MAY_READ)
- if (capable_wrt_inode_uidgid(mnt_userns, inode,
+ if (capable_wrt_inode_uidgid(idmap, inode,
CAP_DAC_READ_SEARCH))
return 0;
/*
@@ -430,7 +499,7 @@ int generic_permission(struct user_namespace *mnt_userns, struct inode *inode,
* at least one exec bit set.
*/
if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
- if (capable_wrt_inode_uidgid(mnt_userns, inode,
+ if (capable_wrt_inode_uidgid(idmap, inode,
CAP_DAC_OVERRIDE))
return 0;
@@ -440,7 +509,7 @@ EXPORT_SYMBOL(generic_permission);
/**
* do_inode_permission - UNIX permission checking
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @inode: inode to check permissions on
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC ...)
*
@@ -449,19 +518,19 @@ EXPORT_SYMBOL(generic_permission);
* flag in inode->i_opflags, that says "this has not special
* permission function, use the fast case".
*/
-static inline int do_inode_permission(struct user_namespace *mnt_userns,
+static inline int do_inode_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
if (likely(inode->i_op->permission))
- return inode->i_op->permission(mnt_userns, inode, mask);
+ return inode->i_op->permission(idmap, inode, mask);
/* This gets set once for the inode lifetime */
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_FASTPERM;
spin_unlock(&inode->i_lock);
}
- return generic_permission(mnt_userns, inode, mask);
+ return generic_permission(idmap, inode, mask);
}
/**
@@ -471,10 +540,13 @@ static inline int do_inode_permission(struct user_namespace *mnt_userns,
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
* Separate out file-system wide checks from inode-specific permission checks.
+ *
+ * Note: lookup_inode_permission_may_exec() does not call here. If you add
+ * MAY_EXEC checks, adjust it.
*/
static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
{
- if (unlikely(mask & MAY_WRITE)) {
+ if (mask & MAY_WRITE) {
umode_t mode = inode->i_mode;
/* Nobody gets write access to a read-only fs. */
@@ -486,7 +558,7 @@ static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
/**
* inode_permission - Check for access rights to a given inode
- * @mnt_userns: User namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @inode: Inode to check permission on
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
@@ -496,20 +568,20 @@ static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
*
* When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
*/
-int inode_permission(struct user_namespace *mnt_userns,
+int inode_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
int retval;
retval = sb_permission(inode->i_sb, inode, mask);
- if (retval)
+ if (unlikely(retval))
return retval;
- if (unlikely(mask & MAY_WRITE)) {
+ if (mask & MAY_WRITE) {
/*
* Nobody gets write access to an immutable file.
*/
- if (IS_IMMUTABLE(inode))
+ if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
/*
@@ -517,22 +589,58 @@ int inode_permission(struct user_namespace *mnt_userns,
* written back improperly if their true value is unknown
* to the vfs.
*/
- if (HAS_UNMAPPED_ID(mnt_userns, inode))
+ if (unlikely(HAS_UNMAPPED_ID(idmap, inode)))
return -EACCES;
}
- retval = do_inode_permission(mnt_userns, inode, mask);
- if (retval)
+ retval = do_inode_permission(idmap, inode, mask);
+ if (unlikely(retval))
return retval;
retval = devcgroup_inode_permission(inode, mask);
- if (retval)
+ if (unlikely(retval))
return retval;
return security_inode_permission(inode, mask);
}
EXPORT_SYMBOL(inode_permission);
+/*
+ * lookup_inode_permission_may_exec - Check traversal right for given inode
+ *
+ * This is a special case routine for may_lookup() making assumptions specific
+ * to path traversal. Use inode_permission() if you are doing something else.
+ *
+ * Work is shaved off compared to inode_permission() as follows:
+ * - we know for a fact there is no MAY_WRITE to worry about
+ * - it is an invariant the inode is a directory
+ *
+ * Since majority of real-world traversal happens on inodes which grant it for
+ * everyone, we check it upfront and only resort to more expensive work if it
+ * fails.
+ *
+ * Filesystems which have their own ->permission hook and consequently miss out
+ * on IOP_FASTPERM can still get the optimization if they set IOP_FASTPERM_MAY_EXEC
+ * on their directory inodes.
+ */
+static __always_inline int lookup_inode_permission_may_exec(struct mnt_idmap *idmap,
+ struct inode *inode, int mask)
+{
+ /* Lookup already checked this to return -ENOTDIR */
+ VFS_BUG_ON_INODE(!S_ISDIR(inode->i_mode), inode);
+ VFS_BUG_ON((mask & ~MAY_NOT_BLOCK) != 0);
+
+ mask |= MAY_EXEC;
+
+ if (unlikely(!(inode->i_opflags & (IOP_FASTPERM | IOP_FASTPERM_MAY_EXEC))))
+ return inode_permission(idmap, inode, mask);
+
+ if (unlikely(((inode->i_mode & 0111) != 0111) || !no_acl_inode(inode)))
+ return inode_permission(idmap, inode, mask);
+
+ return security_inode_permission(inode, mask);
+}
+
/**
* path_get - get a reference to a path
* @path: path to get the reference to
@@ -566,7 +674,7 @@ struct nameidata {
struct path root;
struct inode *inode; /* path.dentry.d_inode */
unsigned int flags, state;
- unsigned seq, m_seq, r_seq;
+ unsigned seq, next_seq, m_seq, r_seq;
int last_type;
unsigned depth;
int total_link_count;
@@ -577,10 +685,11 @@ struct nameidata {
unsigned seq;
} *stack, internal[EMBEDDED_LEVELS];
struct filename *name;
+ const char *pathname;
struct nameidata *saved;
unsigned root_seq;
int dfd;
- kuid_t dir_uid;
+ vfsuid_t dir_vfsuid;
umode_t dir_mode;
} __randomize_layout;
@@ -595,6 +704,7 @@ static void __set_nameidata(struct nameidata *p, int dfd, struct filename *name)
p->depth = 0;
p->dfd = dfd;
p->name = name;
+ p->pathname = likely(name) ? name->name : "";
p->path.mnt = NULL;
p->path.dentry = NULL;
p->total_link_count = old ? old->total_link_count : 0;
@@ -639,6 +749,8 @@ static bool nd_alloc_stack(struct nameidata *nd)
/**
* path_connected - Verify that a dentry is below mnt.mnt_root
+ * @mnt: The mountpoint to check.
+ * @dentry: The dentry to check.
*
* Rename can sometimes move a file or directory outside of a bind
* mount, path_connected allows those cases to be detected.
@@ -664,9 +776,17 @@ static void drop_links(struct nameidata *nd)
}
}
+static void leave_rcu(struct nameidata *nd)
+{
+ nd->flags &= ~LOOKUP_RCU;
+ nd->seq = nd->next_seq = 0;
+ rcu_read_unlock();
+}
+
static void terminate_walk(struct nameidata *nd)
{
- drop_links(nd);
+ if (unlikely(nd->depth))
+ drop_links(nd);
if (!(nd->flags & LOOKUP_RCU)) {
int i;
path_put(&nd->path);
@@ -677,8 +797,7 @@ static void terminate_walk(struct nameidata *nd)
nd->state &= ~ND_ROOT_GRABBED;
}
} else {
- nd->flags &= ~LOOKUP_RCU;
- rcu_read_unlock();
+ leave_rcu(nd);
}
nd->depth = 0;
nd->path.mnt = NULL;
@@ -729,13 +848,6 @@ static bool legitimize_links(struct nameidata *nd)
static bool legitimize_root(struct nameidata *nd)
{
- /*
- * For scoped-lookups (where nd->root has been zeroed), we need to
- * restart the whole lookup from scratch -- because set_root() is wrong
- * for these lookups (nd->dfd is the root, not the filesystem root).
- */
- if (!nd->root.mnt && (nd->flags & LOOKUP_IS_SCOPED))
- return false;
/* Nothing to do if nd->root is zero or is managed by the VFS user. */
if (!nd->root.mnt || (nd->state & ND_ROOT_PRESET))
return true;
@@ -771,14 +883,13 @@ static bool try_to_unlazy(struct nameidata *nd)
BUG_ON(!(nd->flags & LOOKUP_RCU));
- nd->flags &= ~LOOKUP_RCU;
- if (unlikely(!legitimize_links(nd)))
+ if (unlikely(nd->depth && !legitimize_links(nd)))
goto out1;
if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
goto out;
if (unlikely(!legitimize_root(nd)))
goto out;
- rcu_read_unlock();
+ leave_rcu(nd);
BUG_ON(nd->inode != parent->d_inode);
return true;
@@ -786,7 +897,7 @@ out1:
nd->path.mnt = NULL;
nd->path.dentry = NULL;
out:
- rcu_read_unlock();
+ leave_rcu(nd);
return false;
}
@@ -794,24 +905,27 @@ out:
* try_to_unlazy_next - try to switch to ref-walk mode.
* @nd: nameidata pathwalk data
* @dentry: next dentry to step into
- * @seq: seq number to check @dentry against
* Returns: true on success, false on failure
*
- * Similar to to try_to_unlazy(), but here we have the next dentry already
+ * Similar to try_to_unlazy(), but here we have the next dentry already
* picked by rcu-walk and want to legitimize that in addition to the current
* nd->path and nd->root for ref-walk mode. Must be called from rcu-walk context.
* Nothing should touch nameidata between try_to_unlazy_next() failure and
* terminate_walk().
*/
-static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry, unsigned seq)
+static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry)
{
+ int res;
BUG_ON(!(nd->flags & LOOKUP_RCU));
- nd->flags &= ~LOOKUP_RCU;
- if (unlikely(!legitimize_links(nd)))
- goto out2;
- if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq)))
+ if (unlikely(nd->depth && !legitimize_links(nd)))
goto out2;
+ res = __legitimize_mnt(nd->path.mnt, nd->m_seq);
+ if (unlikely(res)) {
+ if (res > 0)
+ goto out2;
+ goto out1;
+ }
if (unlikely(!lockref_get_not_dead(&nd->path.dentry->d_lockref)))
goto out1;
@@ -824,7 +938,7 @@ static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry, unsi
*/
if (unlikely(!lockref_get_not_dead(&dentry->d_lockref)))
goto out;
- if (unlikely(read_seqcount_retry(&dentry->d_seq, seq)))
+ if (read_seqcount_retry(&dentry->d_seq, nd->next_seq))
goto out_dput;
/*
* Sequence counts matched. Now make sure that the root is
@@ -832,7 +946,7 @@ static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry, unsi
*/
if (unlikely(!legitimize_root(nd)))
goto out_dput;
- rcu_read_unlock();
+ leave_rcu(nd);
return true;
out2:
@@ -840,18 +954,19 @@ out2:
out1:
nd->path.dentry = NULL;
out:
- rcu_read_unlock();
+ leave_rcu(nd);
return false;
out_dput:
- rcu_read_unlock();
+ leave_rcu(nd);
dput(dentry);
return false;
}
-static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
+static inline int d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
- return dentry->d_op->d_revalidate(dentry, flags);
+ return dentry->d_op->d_revalidate(dir, name, dentry, flags);
else
return 1;
}
@@ -876,8 +991,8 @@ static int complete_walk(struct nameidata *nd)
* We don't want to zero nd->root for scoped-lookups or
* externally-managed nd->root.
*/
- if (!(nd->state & ND_ROOT_PRESET))
- if (!(nd->flags & LOOKUP_IS_SCOPED))
+ if (likely(!(nd->state & ND_ROOT_PRESET)))
+ if (likely(!(nd->flags & LOOKUP_IS_SCOPED)))
nd->root.mnt = NULL;
nd->flags &= ~LOOKUP_CACHED;
if (!try_to_unlazy(nd))
@@ -937,10 +1052,10 @@ static int set_root(struct nameidata *nd)
unsigned seq;
do {
- seq = read_seqcount_begin(&fs->seq);
+ seq = read_seqbegin(&fs->seq);
nd->root = fs->root;
nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
- } while (read_seqcount_retry(&fs->seq, seq));
+ } while (read_seqretry(&fs->seq, seq));
} else {
get_fs_root(fs, &nd->root);
nd->state |= ND_ROOT_GRABBED;
@@ -959,7 +1074,7 @@ static int nd_jump_root(struct nameidata *nd)
}
if (!nd->root.mnt) {
int error = set_root(nd);
- if (error)
+ if (unlikely(error))
return error;
}
if (nd->flags & LOOKUP_RCU) {
@@ -968,7 +1083,7 @@ static int nd_jump_root(struct nameidata *nd)
d = nd->path.dentry;
nd->inode = d->d_inode;
nd->seq = nd->root_seq;
- if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq)))
+ if (read_seqcount_retry(&d->d_seq, nd->seq))
return -ECHILD;
} else {
path_put(&nd->path);
@@ -984,7 +1099,7 @@ static int nd_jump_root(struct nameidata *nd)
* Helper to directly jump to a known parsed path from ->get_link,
* caller must have taken a reference to path beforehand.
*/
-int nd_jump_link(struct path *path)
+int nd_jump_link(const struct path *path)
{
int error = -ELOOP;
struct nameidata *nd = current->nameidata;
@@ -1020,14 +1135,64 @@ static inline void put_link(struct nameidata *nd)
path_put(&last->link);
}
-int sysctl_protected_symlinks __read_mostly = 0;
-int sysctl_protected_hardlinks __read_mostly = 0;
-int sysctl_protected_fifos __read_mostly;
-int sysctl_protected_regular __read_mostly;
+static int sysctl_protected_symlinks __read_mostly;
+static int sysctl_protected_hardlinks __read_mostly;
+static int sysctl_protected_fifos __read_mostly;
+static int sysctl_protected_regular __read_mostly;
+
+#ifdef CONFIG_SYSCTL
+static const struct ctl_table namei_sysctls[] = {
+ {
+ .procname = "protected_symlinks",
+ .data = &sysctl_protected_symlinks,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "protected_hardlinks",
+ .data = &sysctl_protected_hardlinks,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "protected_fifos",
+ .data = &sysctl_protected_fifos,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+ {
+ .procname = "protected_regular",
+ .data = &sysctl_protected_regular,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+};
+
+static int __init init_fs_namei_sysctls(void)
+{
+ register_sysctl_init("fs", namei_sysctls);
+ return 0;
+}
+fs_initcall(init_fs_namei_sysctls);
+
+#endif /* CONFIG_SYSCTL */
/**
* may_follow_link - Check symlink following for unsafe situations
* @nd: nameidata pathwalk data
+ * @inode: Used for idmapping.
*
* In the case of the sysctl_protected_symlinks sysctl being enabled,
* CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
@@ -1042,16 +1207,16 @@ int sysctl_protected_regular __read_mostly;
*/
static inline int may_follow_link(struct nameidata *nd, const struct inode *inode)
{
- struct user_namespace *mnt_userns;
- kuid_t i_uid;
+ struct mnt_idmap *idmap;
+ vfsuid_t vfsuid;
if (!sysctl_protected_symlinks)
return 0;
- mnt_userns = mnt_user_ns(nd->path.mnt);
- i_uid = i_uid_into_mnt(mnt_userns, inode);
+ idmap = mnt_idmap(nd->path.mnt);
+ vfsuid = i_uid_into_vfsuid(idmap, inode);
/* Allowed if owner and follower match. */
- if (uid_eq(current_cred()->fsuid, i_uid))
+ if (vfsuid_eq_kuid(vfsuid, current_fsuid()))
return 0;
/* Allowed if parent directory not sticky and world-writable. */
@@ -1059,7 +1224,7 @@ static inline int may_follow_link(struct nameidata *nd, const struct inode *inod
return 0;
/* Allowed if parent directory and link owner match. */
- if (uid_valid(nd->dir_uid) && uid_eq(nd->dir_uid, i_uid))
+ if (vfsuid_valid(nd->dir_vfsuid) && vfsuid_eq(nd->dir_vfsuid, vfsuid))
return 0;
if (nd->flags & LOOKUP_RCU)
@@ -1072,7 +1237,7 @@ static inline int may_follow_link(struct nameidata *nd, const struct inode *inod
/**
* safe_hardlink_source - Check for safe hardlink conditions
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @inode: the source inode to hardlink from
*
* Return false if at least one of the following conditions:
@@ -1083,7 +1248,7 @@ static inline int may_follow_link(struct nameidata *nd, const struct inode *inod
*
* Otherwise returns true.
*/
-static bool safe_hardlink_source(struct user_namespace *mnt_userns,
+static bool safe_hardlink_source(struct mnt_idmap *idmap,
struct inode *inode)
{
umode_t mode = inode->i_mode;
@@ -1101,7 +1266,7 @@ static bool safe_hardlink_source(struct user_namespace *mnt_userns,
return false;
/* Hardlinking to unreadable or unwritable sources is dangerous. */
- if (inode_permission(mnt_userns, inode, MAY_READ | MAY_WRITE))
+ if (inode_permission(idmap, inode, MAY_READ | MAY_WRITE))
return false;
return true;
@@ -1109,8 +1274,8 @@ static bool safe_hardlink_source(struct user_namespace *mnt_userns,
/**
* may_linkat - Check permissions for creating a hardlink
- * @mnt_userns: user namespace of the mount the inode was found from
- * @link: the source to hardlink from
+ * @idmap: idmap of the mount the inode was found from
+ * @link: the source to hardlink from
*
* Block hardlink when all of:
* - sysctl_protected_hardlinks enabled
@@ -1118,21 +1283,21 @@ static bool safe_hardlink_source(struct user_namespace *mnt_userns,
* - hardlink source is unsafe (see safe_hardlink_source() above)
* - not CAP_FOWNER in a namespace with the inode owner uid mapped
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*
* Returns 0 if successful, -ve on error.
*/
-int may_linkat(struct user_namespace *mnt_userns, struct path *link)
+int may_linkat(struct mnt_idmap *idmap, const struct path *link)
{
struct inode *inode = link->dentry->d_inode;
/* Inode writeback is not safe when the uid or gid are invalid. */
- if (!uid_valid(i_uid_into_mnt(mnt_userns, inode)) ||
- !gid_valid(i_gid_into_mnt(mnt_userns, inode)))
+ if (!vfsuid_valid(i_uid_into_vfsuid(idmap, inode)) ||
+ !vfsgid_valid(i_gid_into_vfsgid(idmap, inode)))
return -EOVERFLOW;
if (!sysctl_protected_hardlinks)
@@ -1141,8 +1306,8 @@ int may_linkat(struct user_namespace *mnt_userns, struct path *link)
/* Source inode owner (or CAP_FOWNER) can hardlink all they like,
* otherwise, it must be a safe source.
*/
- if (safe_hardlink_source(mnt_userns, inode) ||
- inode_owner_or_capable(mnt_userns, inode))
+ if (safe_hardlink_source(idmap, inode) ||
+ inode_owner_or_capable(idmap, inode))
return 0;
audit_log_path_denied(AUDIT_ANOM_LINK, "linkat");
@@ -1153,7 +1318,7 @@ int may_linkat(struct user_namespace *mnt_userns, struct path *link)
* may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
* should be allowed, or not, on files that already
* exist.
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @nd: nameidata pathwalk data
* @inode: the inode of the file to open
*
@@ -1168,37 +1333,56 @@ int may_linkat(struct user_namespace *mnt_userns, struct path *link)
* the directory doesn't have to be world writable: being group writable will
* be enough.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*
* Returns 0 if the open is allowed, -ve on error.
*/
-static int may_create_in_sticky(struct user_namespace *mnt_userns,
- struct nameidata *nd, struct inode *const inode)
+static int may_create_in_sticky(struct mnt_idmap *idmap, struct nameidata *nd,
+ struct inode *const inode)
{
umode_t dir_mode = nd->dir_mode;
- kuid_t dir_uid = nd->dir_uid;
+ vfsuid_t dir_vfsuid = nd->dir_vfsuid, i_vfsuid;
+
+ if (likely(!(dir_mode & S_ISVTX)))
+ return 0;
- if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
- (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
- likely(!(dir_mode & S_ISVTX)) ||
- uid_eq(i_uid_into_mnt(mnt_userns, inode), dir_uid) ||
- uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)))
+ if (S_ISREG(inode->i_mode) && !sysctl_protected_regular)
return 0;
- if (likely(dir_mode & 0002) ||
- (dir_mode & 0020 &&
- ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
- (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
- const char *operation = S_ISFIFO(inode->i_mode) ?
- "sticky_create_fifo" :
- "sticky_create_regular";
- audit_log_path_denied(AUDIT_ANOM_CREAT, operation);
+ if (S_ISFIFO(inode->i_mode) && !sysctl_protected_fifos)
+ return 0;
+
+ i_vfsuid = i_uid_into_vfsuid(idmap, inode);
+
+ if (vfsuid_eq(i_vfsuid, dir_vfsuid))
+ return 0;
+
+ if (vfsuid_eq_kuid(i_vfsuid, current_fsuid()))
+ return 0;
+
+ if (likely(dir_mode & 0002)) {
+ audit_log_path_denied(AUDIT_ANOM_CREAT, "sticky_create");
return -EACCES;
}
+
+ if (dir_mode & 0020) {
+ if (sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) {
+ audit_log_path_denied(AUDIT_ANOM_CREAT,
+ "sticky_create_fifo");
+ return -EACCES;
+ }
+
+ if (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode)) {
+ audit_log_path_denied(AUDIT_ANOM_CREAT,
+ "sticky_create_regular");
+ return -EACCES;
+ }
+ }
+
return 0;
}
@@ -1305,6 +1489,10 @@ static int follow_automount(struct path *path, int *count, unsigned lookup_flags
dentry->d_inode)
return -EISDIR;
+ /* No need to trigger automounts if mountpoint crossing is disabled. */
+ if (lookup_flags & LOOKUP_NO_XDEV)
+ return -EXDEV;
+
if (count && (*count)++ >= MAXSYMLINKS)
return -ELOOP;
@@ -1325,9 +1513,13 @@ static int __traverse_mounts(struct path *path, unsigned flags, bool *jumped,
int ret = 0;
while (flags & DCACHE_MANAGED_DENTRY) {
- /* Allow the filesystem to manage the transit without i_mutex
+ /* Allow the filesystem to manage the transit without i_rwsem
* being held. */
if (flags & DCACHE_MANAGE_TRANSIT) {
+ if (lookup_flags & LOOKUP_NO_XDEV) {
+ ret = -EXDEV;
+ break;
+ }
ret = path->dentry->d_op->d_manage(path, false);
flags = smp_load_acquire(&path->dentry->d_flags);
if (ret < 0)
@@ -1345,6 +1537,10 @@ static int __traverse_mounts(struct path *path, unsigned flags, bool *jumped,
// here we know it's positive
flags = path->dentry->d_flags;
need_mntput = true;
+ if (unlikely(lookup_flags & LOOKUP_NO_XDEV)) {
+ ret = -EXDEV;
+ break;
+ }
continue;
}
}
@@ -1406,11 +1602,11 @@ EXPORT_SYMBOL(follow_down_one);
* point, the filesystem owning that dentry may be queried as to whether the
* caller is permitted to proceed or not.
*/
-int follow_down(struct path *path)
+int follow_down(struct path *path, unsigned int flags)
{
struct vfsmount *mnt = path->mnt;
bool jumped;
- int ret = traverse_mounts(path, &jumped, NULL, 0);
+ int ret = traverse_mounts(path, &jumped, NULL, flags);
if (path->mnt != mnt)
mntput(mnt);
@@ -1422,8 +1618,7 @@ EXPORT_SYMBOL(follow_down);
* Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
* we meet a managed dentry that would need blocking.
*/
-static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
- struct inode **inode, unsigned *seqp)
+static bool __follow_mount_rcu(struct nameidata *nd, struct path *path)
{
struct dentry *dentry = path->dentry;
unsigned int flags = dentry->d_flags;
@@ -1452,15 +1647,12 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
path->mnt = &mounted->mnt;
dentry = path->dentry = mounted->mnt.mnt_root;
nd->state |= ND_JUMPED;
- *seqp = read_seqcount_begin(&dentry->d_seq);
- *inode = dentry->d_inode;
- /*
- * We don't need to re-check ->d_seq after this
- * ->d_inode read - there will be an RCU delay
- * between mount hash removal and ->mnt_root
- * becoming unpinned.
- */
+ nd->next_seq = read_seqcount_begin(&dentry->d_seq);
flags = dentry->d_flags;
+ // makes sure that non-RCU pathwalk could reach
+ // this state.
+ if (read_seqretry(&mount_lock, nd->m_seq))
+ return false;
continue;
}
if (read_seqretry(&mount_lock, nd->m_seq))
@@ -1471,8 +1663,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
}
static inline int handle_mounts(struct nameidata *nd, struct dentry *dentry,
- struct path *path, struct inode **inode,
- unsigned int *seqp)
+ struct path *path)
{
bool jumped;
int ret;
@@ -1480,31 +1671,25 @@ static inline int handle_mounts(struct nameidata *nd, struct dentry *dentry,
path->mnt = nd->path.mnt;
path->dentry = dentry;
if (nd->flags & LOOKUP_RCU) {
- unsigned int seq = *seqp;
- if (unlikely(!*inode))
- return -ENOENT;
- if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
+ unsigned int seq = nd->next_seq;
+ if (likely(!d_managed(dentry)))
return 0;
- if (!try_to_unlazy_next(nd, dentry, seq))
- return -ECHILD;
- // *path might've been clobbered by __follow_mount_rcu()
+ if (likely(__follow_mount_rcu(nd, path)))
+ return 0;
+ // *path and nd->next_seq might've been clobbered
path->mnt = nd->path.mnt;
path->dentry = dentry;
+ nd->next_seq = seq;
+ if (unlikely(!try_to_unlazy_next(nd, dentry)))
+ return -ECHILD;
}
ret = traverse_mounts(path, &jumped, &nd->total_link_count, nd->flags);
- if (jumped) {
- if (unlikely(nd->flags & LOOKUP_NO_XDEV))
- ret = -EXDEV;
- else
- nd->state |= ND_JUMPED;
- }
+ if (jumped)
+ nd->state |= ND_JUMPED;
if (unlikely(ret)) {
dput(path->dentry);
if (path->mnt != nd->path.mnt)
mntput(path->mnt);
- } else {
- *inode = d_backing_inode(path->dentry);
- *seqp = 0; /* out of RCU mode, so the value doesn't matter */
}
return ret;
}
@@ -1519,7 +1704,7 @@ static struct dentry *lookup_dcache(const struct qstr *name,
{
struct dentry *dentry = d_lookup(dir, name);
if (dentry) {
- int error = d_revalidate(dentry, flags);
+ int error = d_revalidate(dir->d_inode, name, dentry, flags);
if (unlikely(error <= 0)) {
if (!error)
d_invalidate(dentry);
@@ -1536,18 +1721,22 @@ static struct dentry *lookup_dcache(const struct qstr *name,
* dentries - as the matter of fact, this only gets called
* when directory is guaranteed to have no in-lookup children
* at all.
+ * Will return -ENOENT if name isn't found and LOOKUP_CREATE wasn't passed.
+ * Will return -EEXIST if name is found and LOOKUP_EXCL was passed.
*/
-static struct dentry *__lookup_hash(const struct qstr *name,
- struct dentry *base, unsigned int flags)
+struct dentry *lookup_one_qstr_excl(const struct qstr *name,
+ struct dentry *base, unsigned int flags)
{
- struct dentry *dentry = lookup_dcache(name, base, flags);
+ struct dentry *dentry;
struct dentry *old;
- struct inode *dir = base->d_inode;
+ struct inode *dir;
+ dentry = lookup_dcache(name, base, flags);
if (dentry)
- return dentry;
+ goto found;
/* Don't create child dentry for a dead directory. */
+ dir = base->d_inode;
if (unlikely(IS_DEADDIR(dir)))
return ERR_PTR(-ENOENT);
@@ -1560,12 +1749,36 @@ static struct dentry *__lookup_hash(const struct qstr *name,
dput(dentry);
dentry = old;
}
+found:
+ if (IS_ERR(dentry))
+ return dentry;
+ if (d_is_negative(dentry) && !(flags & LOOKUP_CREATE)) {
+ dput(dentry);
+ return ERR_PTR(-ENOENT);
+ }
+ if (d_is_positive(dentry) && (flags & LOOKUP_EXCL)) {
+ dput(dentry);
+ return ERR_PTR(-EEXIST);
+ }
return dentry;
}
+EXPORT_SYMBOL(lookup_one_qstr_excl);
-static struct dentry *lookup_fast(struct nameidata *nd,
- struct inode **inode,
- unsigned *seqp)
+/**
+ * lookup_fast - do fast lockless (but racy) lookup of a dentry
+ * @nd: current nameidata
+ *
+ * Do a fast, but racy lookup in the dcache for the given dentry, and
+ * revalidate it. Returns a valid dentry pointer or NULL if one wasn't
+ * found. On error, an ERR_PTR will be returned.
+ *
+ * If this function returns a valid dentry and the walk is no longer
+ * lazy, the dentry will carry a reference that must later be put. If
+ * RCU mode is still in force, then this is not the case and the dentry
+ * must be legitimized before use. If this returns NULL, then the walk
+ * will no longer be in RCU mode.
+ */
+static struct dentry *lookup_fast(struct nameidata *nd)
{
struct dentry *dentry, *parent = nd->path.dentry;
int status = 1;
@@ -1576,8 +1789,7 @@ static struct dentry *lookup_fast(struct nameidata *nd,
* going to fall back to non-racy lookup.
*/
if (nd->flags & LOOKUP_RCU) {
- unsigned seq;
- dentry = __d_lookup_rcu(parent, &nd->last, &seq);
+ dentry = __d_lookup_rcu(parent, &nd->last, &nd->next_seq);
if (unlikely(!dentry)) {
if (!try_to_unlazy(nd))
return ERR_PTR(-ECHILD);
@@ -1585,37 +1797,26 @@ static struct dentry *lookup_fast(struct nameidata *nd,
}
/*
- * This sequence count validates that the inode matches
- * the dentry name information from lookup.
- */
- *inode = d_backing_inode(dentry);
- if (unlikely(read_seqcount_retry(&dentry->d_seq, seq)))
- return ERR_PTR(-ECHILD);
-
- /*
* This sequence count validates that the parent had no
* changes while we did the lookup of the dentry above.
- *
- * The memory barrier in read_seqcount_begin of child is
- * enough, we can use __read_seqcount_retry here.
*/
- if (unlikely(__read_seqcount_retry(&parent->d_seq, nd->seq)))
+ if (read_seqcount_retry(&parent->d_seq, nd->seq))
return ERR_PTR(-ECHILD);
- *seqp = seq;
- status = d_revalidate(dentry, nd->flags);
+ status = d_revalidate(nd->inode, &nd->last, dentry, nd->flags);
if (likely(status > 0))
return dentry;
- if (!try_to_unlazy_next(nd, dentry, seq))
+ if (!try_to_unlazy_next(nd, dentry))
return ERR_PTR(-ECHILD);
if (status == -ECHILD)
/* we'd been told to redo it in non-rcu mode */
- status = d_revalidate(dentry, nd->flags);
+ status = d_revalidate(nd->inode, &nd->last,
+ dentry, nd->flags);
} else {
dentry = __d_lookup(parent, &nd->last);
if (unlikely(!dentry))
return NULL;
- status = d_revalidate(dentry, nd->flags);
+ status = d_revalidate(nd->inode, &nd->last, dentry, nd->flags);
}
if (unlikely(status <= 0)) {
if (!status)
@@ -1643,7 +1844,7 @@ again:
if (IS_ERR(dentry))
return dentry;
if (unlikely(!d_in_lookup(dentry))) {
- int error = d_revalidate(dentry, flags);
+ int error = d_revalidate(inode, name, dentry, flags);
if (unlikely(error <= 0)) {
if (!error) {
d_invalidate(dentry);
@@ -1664,7 +1865,7 @@ again:
return dentry;
}
-static struct dentry *lookup_slow(const struct qstr *name,
+static noinline struct dentry *lookup_slow(const struct qstr *name,
struct dentry *dir,
unsigned int flags)
{
@@ -1676,18 +1877,45 @@ static struct dentry *lookup_slow(const struct qstr *name,
return res;
}
-static inline int may_lookup(struct user_namespace *mnt_userns,
- struct nameidata *nd)
+static struct dentry *lookup_slow_killable(const struct qstr *name,
+ struct dentry *dir,
+ unsigned int flags)
{
- if (nd->flags & LOOKUP_RCU) {
- int err = inode_permission(mnt_userns, nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
- if (err != -ECHILD || !try_to_unlazy(nd))
- return err;
- }
- return inode_permission(mnt_userns, nd->inode, MAY_EXEC);
+ struct inode *inode = dir->d_inode;
+ struct dentry *res;
+
+ if (inode_lock_shared_killable(inode))
+ return ERR_PTR(-EINTR);
+ res = __lookup_slow(name, dir, flags);
+ inode_unlock_shared(inode);
+ return res;
+}
+
+static inline int may_lookup(struct mnt_idmap *idmap,
+ struct nameidata *restrict nd)
+{
+ int err, mask;
+
+ mask = nd->flags & LOOKUP_RCU ? MAY_NOT_BLOCK : 0;
+ err = lookup_inode_permission_may_exec(idmap, nd->inode, mask);
+ if (likely(!err))
+ return 0;
+
+ // If we failed, and we weren't in LOOKUP_RCU, it's final
+ if (!(nd->flags & LOOKUP_RCU))
+ return err;
+
+ // Drop out of RCU mode to make sure it wasn't transient
+ if (!try_to_unlazy(nd))
+ return -ECHILD; // redo it all non-lazy
+
+ if (err != -ECHILD) // hard error
+ return err;
+
+ return lookup_inode_permission_may_exec(idmap, nd->inode, 0);
}
-static int reserve_stack(struct nameidata *nd, struct path *link, unsigned seq)
+static int reserve_stack(struct nameidata *nd, struct path *link)
{
if (unlikely(nd->total_link_count++ >= MAXSYMLINKS))
return -ELOOP;
@@ -1702,9 +1930,9 @@ static int reserve_stack(struct nameidata *nd, struct path *link, unsigned seq)
if (nd->flags & LOOKUP_RCU) {
// we need to grab link before we do unlazy. And we can't skip
// unlazy even if we fail to grab the link - cleanup needs it
- bool grabbed_link = legitimize_path(nd, link, seq);
+ bool grabbed_link = legitimize_path(nd, link, nd->next_seq);
- if (!try_to_unlazy(nd) != 0 || !grabbed_link)
+ if (!try_to_unlazy(nd) || !grabbed_link)
return -ECHILD;
if (nd_alloc_stack(nd))
@@ -1715,13 +1943,23 @@ static int reserve_stack(struct nameidata *nd, struct path *link, unsigned seq)
enum {WALK_TRAILING = 1, WALK_MORE = 2, WALK_NOFOLLOW = 4};
-static const char *pick_link(struct nameidata *nd, struct path *link,
- struct inode *inode, unsigned seq, int flags)
+static noinline const char *pick_link(struct nameidata *nd, struct path *link,
+ struct inode *inode, int flags)
{
struct saved *last;
const char *res;
- int error = reserve_stack(nd, link, seq);
+ int error;
+
+ if (nd->flags & LOOKUP_RCU) {
+ /* make sure that d_is_symlink from step_into_slowpath() matches the inode */
+ if (read_seqcount_retry(&link->dentry->d_seq, nd->next_seq))
+ return ERR_PTR(-ECHILD);
+ } else {
+ if (link->mnt == nd->path.mnt)
+ mntget(link->mnt);
+ }
+ error = reserve_stack(nd, link);
if (unlikely(error)) {
if (!(nd->flags & LOOKUP_RCU))
path_put(link);
@@ -1730,7 +1968,7 @@ static const char *pick_link(struct nameidata *nd, struct path *link,
last = nd->stack + nd->depth++;
last->link = *link;
clear_delayed_call(&last->done);
- last->seq = seq;
+ last->seq = nd->next_seq;
if (flags & WALK_TRAILING) {
error = may_follow_link(nd, inode);
@@ -1742,13 +1980,13 @@ static const char *pick_link(struct nameidata *nd, struct path *link,
unlikely(link->mnt->mnt_flags & MNT_NOSYMFOLLOW))
return ERR_PTR(-ELOOP);
- if (!(nd->flags & LOOKUP_RCU)) {
+ if (unlikely(atime_needs_update(&last->link, inode))) {
+ if (nd->flags & LOOKUP_RCU) {
+ if (!try_to_unlazy(nd))
+ return ERR_PTR(-ECHILD);
+ }
touch_atime(&last->link);
cond_resched();
- } else if (atime_needs_update(&last->link, inode)) {
- if (!try_to_unlazy(nd))
- return ERR_PTR(-ECHILD);
- touch_atime(&last->link);
}
error = security_inode_follow_link(link->dentry, inode,
@@ -1792,43 +2030,68 @@ all_done: // pure jump
* to do this check without having to look at inode->i_op,
* so we keep a cache of "no, this doesn't need follow_link"
* for the common case.
+ *
+ * NOTE: dentry must be what nd->next_seq had been sampled from.
*/
-static const char *step_into(struct nameidata *nd, int flags,
- struct dentry *dentry, struct inode *inode, unsigned seq)
+static noinline const char *step_into_slowpath(struct nameidata *nd, int flags,
+ struct dentry *dentry)
{
struct path path;
- int err = handle_mounts(nd, dentry, &path, &inode, &seq);
+ struct inode *inode;
+ int err;
- if (err < 0)
+ err = handle_mounts(nd, dentry, &path);
+ if (unlikely(err < 0))
return ERR_PTR(err);
+ inode = path.dentry->d_inode;
if (likely(!d_is_symlink(path.dentry)) ||
((flags & WALK_TRAILING) && !(nd->flags & LOOKUP_FOLLOW)) ||
(flags & WALK_NOFOLLOW)) {
/* not a symlink or should not follow */
- if (!(nd->flags & LOOKUP_RCU)) {
+ if (nd->flags & LOOKUP_RCU) {
+ if (read_seqcount_retry(&path.dentry->d_seq, nd->next_seq))
+ return ERR_PTR(-ECHILD);
+ if (unlikely(!inode))
+ return ERR_PTR(-ENOENT);
+ } else {
dput(nd->path.dentry);
if (nd->path.mnt != path.mnt)
mntput(nd->path.mnt);
}
nd->path = path;
nd->inode = inode;
- nd->seq = seq;
+ nd->seq = nd->next_seq;
return NULL;
}
- if (nd->flags & LOOKUP_RCU) {
- /* make sure that d_is_symlink above matches inode */
- if (read_seqcount_retry(&path.dentry->d_seq, seq))
+ return pick_link(nd, &path, inode, flags);
+}
+
+static __always_inline const char *step_into(struct nameidata *nd, int flags,
+ struct dentry *dentry)
+{
+ /*
+ * In the common case we are in rcu-walk and traversing over a non-mounted on
+ * directory (as opposed to e.g., a symlink).
+ *
+ * We can handle that and negative entries with the checks below.
+ */
+ if (likely((nd->flags & LOOKUP_RCU) &&
+ !d_managed(dentry) && !d_is_symlink(dentry))) {
+ struct inode *inode = dentry->d_inode;
+ if (read_seqcount_retry(&dentry->d_seq, nd->next_seq))
return ERR_PTR(-ECHILD);
- } else {
- if (path.mnt == nd->path.mnt)
- mntget(path.mnt);
+ if (unlikely(!inode))
+ return ERR_PTR(-ENOENT);
+ nd->path.dentry = dentry;
+ /* nd->path.mnt is retained on purpose */
+ nd->inode = inode;
+ nd->seq = nd->next_seq;
+ return NULL;
}
- return pick_link(nd, &path, inode, seq, flags);
+ return step_into_slowpath(nd, flags, dentry);
}
-static struct dentry *follow_dotdot_rcu(struct nameidata *nd,
- struct inode **inodep,
- unsigned *seqp)
+static struct dentry *follow_dotdot_rcu(struct nameidata *nd)
{
struct dentry *parent, *old;
@@ -1845,30 +2108,30 @@ static struct dentry *follow_dotdot_rcu(struct nameidata *nd,
nd->path = path;
nd->inode = path.dentry->d_inode;
nd->seq = seq;
- if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
+ // makes sure that non-RCU pathwalk could reach this state
+ if (read_seqretry(&mount_lock, nd->m_seq))
return ERR_PTR(-ECHILD);
/* we know that mountpoint was pinned */
}
old = nd->path.dentry;
parent = old->d_parent;
- *inodep = parent->d_inode;
- *seqp = read_seqcount_begin(&parent->d_seq);
- if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq)))
+ nd->next_seq = read_seqcount_begin(&parent->d_seq);
+ // makes sure that non-RCU pathwalk could reach this state
+ if (read_seqcount_retry(&old->d_seq, nd->seq))
return ERR_PTR(-ECHILD);
if (unlikely(!path_connected(nd->path.mnt, parent)))
return ERR_PTR(-ECHILD);
return parent;
in_root:
- if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
+ if (read_seqretry(&mount_lock, nd->m_seq))
return ERR_PTR(-ECHILD);
if (unlikely(nd->flags & LOOKUP_BENEATH))
return ERR_PTR(-ECHILD);
- return NULL;
+ nd->next_seq = nd->seq;
+ return nd->path.dentry;
}
-static struct dentry *follow_dotdot(struct nameidata *nd,
- struct inode **inodep,
- unsigned *seqp)
+static struct dentry *follow_dotdot(struct nameidata *nd)
{
struct dentry *parent;
@@ -1892,15 +2155,12 @@ static struct dentry *follow_dotdot(struct nameidata *nd,
dput(parent);
return ERR_PTR(-ENOENT);
}
- *seqp = 0;
- *inodep = parent->d_inode;
return parent;
in_root:
if (unlikely(nd->flags & LOOKUP_BENEATH))
return ERR_PTR(-EXDEV);
- dget(nd->path.dentry);
- return NULL;
+ return dget(nd->path.dentry);
}
static const char *handle_dots(struct nameidata *nd, int type)
@@ -1908,26 +2168,19 @@ static const char *handle_dots(struct nameidata *nd, int type)
if (type == LAST_DOTDOT) {
const char *error = NULL;
struct dentry *parent;
- struct inode *inode;
- unsigned seq;
if (!nd->root.mnt) {
error = ERR_PTR(set_root(nd));
- if (error)
+ if (unlikely(error))
return error;
}
if (nd->flags & LOOKUP_RCU)
- parent = follow_dotdot_rcu(nd, &inode, &seq);
+ parent = follow_dotdot_rcu(nd);
else
- parent = follow_dotdot(nd, &inode, &seq);
+ parent = follow_dotdot(nd);
if (IS_ERR(parent))
return ERR_CAST(parent);
- if (unlikely(!parent))
- error = step_into(nd, WALK_NOFOLLOW,
- nd->path.dentry, nd->inode, nd->seq);
- else
- error = step_into(nd, WALK_NOFOLLOW,
- parent, inode, seq);
+ error = step_into(nd, WALK_NOFOLLOW, parent);
if (unlikely(error))
return error;
@@ -1939,31 +2192,29 @@ static const char *handle_dots(struct nameidata *nd, int type)
* some fallback).
*/
smp_rmb();
- if (unlikely(__read_seqcount_retry(&mount_lock.seqcount, nd->m_seq)))
+ if (__read_seqcount_retry(&mount_lock.seqcount, nd->m_seq))
return ERR_PTR(-EAGAIN);
- if (unlikely(__read_seqcount_retry(&rename_lock.seqcount, nd->r_seq)))
+ if (__read_seqcount_retry(&rename_lock.seqcount, nd->r_seq))
return ERR_PTR(-EAGAIN);
}
}
return NULL;
}
-static const char *walk_component(struct nameidata *nd, int flags)
+static __always_inline const char *walk_component(struct nameidata *nd, int flags)
{
struct dentry *dentry;
- struct inode *inode;
- unsigned seq;
/*
* "." and ".." are special - ".." especially so because it has
* to be able to know about the current root directory and
* parent relationships.
*/
if (unlikely(nd->last_type != LAST_NORM)) {
- if (!(flags & WALK_MORE) && nd->depth)
+ if (unlikely(nd->depth) && !(flags & WALK_MORE))
put_link(nd);
return handle_dots(nd, nd->last_type);
}
- dentry = lookup_fast(nd, &inode, &seq);
+ dentry = lookup_fast(nd);
if (IS_ERR(dentry))
return ERR_CAST(dentry);
if (unlikely(!dentry)) {
@@ -1971,9 +2222,9 @@ static const char *walk_component(struct nameidata *nd, int flags)
if (IS_ERR(dentry))
return ERR_CAST(dentry);
}
- if (!(flags & WALK_MORE) && nd->depth)
+ if (unlikely(nd->depth) && !(flags & WALK_MORE))
put_link(nd);
- return step_into(nd, flags, dentry, inode, seq);
+ return step_into(nd, flags, dentry);
}
/*
@@ -2129,21 +2380,39 @@ EXPORT_SYMBOL(hashlen_string);
/*
* Calculate the length and hash of the path component, and
- * return the "hash_len" as the result.
+ * return the length as the result.
*/
-static inline u64 hash_name(const void *salt, const char *name)
+static inline const char *hash_name(struct nameidata *nd,
+ const char *name,
+ unsigned long *lastword)
{
- unsigned long a = 0, b, x = 0, y = (unsigned long)salt;
+ unsigned long a, b, x, y = (unsigned long)nd->path.dentry;
unsigned long adata, bdata, mask, len;
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
- len = 0;
- goto inside;
+ /*
+ * The first iteration is special, because it can result in
+ * '.' and '..' and has no mixing other than the final fold.
+ */
+ a = load_unaligned_zeropad(name);
+ b = a ^ REPEAT_BYTE('/');
+ if (has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)) {
+ adata = prep_zero_mask(a, adata, &constants);
+ bdata = prep_zero_mask(b, bdata, &constants);
+ mask = create_zero_mask(adata | bdata);
+ a &= zero_bytemask(mask);
+ *lastword = a;
+ len = find_zero(mask);
+ nd->last.hash = fold_hash(a, y);
+ nd->last.len = len;
+ return name + len;
+ }
+ len = 0;
+ x = 0;
do {
HASH_MIX(x, y, a);
len += sizeof(unsigned long);
-inside:
a = load_unaligned_zeropad(name+len);
b = a ^ REPEAT_BYTE('/');
} while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
@@ -2151,11 +2420,25 @@ inside:
adata = prep_zero_mask(a, adata, &constants);
bdata = prep_zero_mask(b, bdata, &constants);
mask = create_zero_mask(adata | bdata);
- x ^= a & zero_bytemask(mask);
+ a &= zero_bytemask(mask);
+ x ^= a;
+ len += find_zero(mask);
+ *lastword = 0; // Multi-word components cannot be DOT or DOTDOT
- return hashlen_create(fold_hash(x, y), len + find_zero(mask));
+ nd->last.hash = fold_hash(x, y);
+ nd->last.len = len;
+ return name + len;
}
+/*
+ * Note that the 'last' word is always zero-masked, but
+ * was loaded as a possibly big-endian word.
+ */
+#ifdef __BIG_ENDIAN
+ #define LAST_WORD_IS_DOT (0x2eul << (BITS_PER_LONG-8))
+ #define LAST_WORD_IS_DOTDOT (0x2e2eul << (BITS_PER_LONG-16))
+#endif
+
#else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */
/* Return the hash of a string of known length */
@@ -2188,22 +2471,35 @@ EXPORT_SYMBOL(hashlen_string);
* We know there's a real path component here of at least
* one character.
*/
-static inline u64 hash_name(const void *salt, const char *name)
+static inline const char *hash_name(struct nameidata *nd, const char *name, unsigned long *lastword)
{
- unsigned long hash = init_name_hash(salt);
- unsigned long len = 0, c;
+ unsigned long hash = init_name_hash(nd->path.dentry);
+ unsigned long len = 0, c, last = 0;
c = (unsigned char)*name;
do {
+ last = (last << 8) + c;
len++;
hash = partial_name_hash(c, hash);
c = (unsigned char)name[len];
} while (c && c != '/');
- return hashlen_create(end_name_hash(hash), len);
+
+ // This is reliable for DOT or DOTDOT, since the component
+ // cannot contain NUL characters - top bits being zero means
+ // we cannot have had any other pathnames.
+ *lastword = last;
+ nd->last.hash = end_name_hash(hash);
+ nd->last.len = len;
+ return name + len;
}
#endif
+#ifndef LAST_WORD_IS_DOT
+ #define LAST_WORD_IS_DOT 0x2e
+ #define LAST_WORD_IS_DOTDOT 0x2e2e
+#endif
+
/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
@@ -2221,56 +2517,52 @@ static int link_path_walk(const char *name, struct nameidata *nd)
nd->flags |= LOOKUP_PARENT;
if (IS_ERR(name))
return PTR_ERR(name);
- while (*name=='/')
- name++;
- if (!*name) {
+ if (*name == '/') {
+ do {
+ name++;
+ } while (unlikely(*name == '/'));
+ }
+ if (unlikely(!*name)) {
nd->dir_mode = 0; // short-circuit the 'hardening' idiocy
return 0;
}
/* At this point we know we have a real path component. */
for(;;) {
- struct user_namespace *mnt_userns;
+ struct mnt_idmap *idmap;
const char *link;
- u64 hash_len;
- int type;
+ unsigned long lastword;
- mnt_userns = mnt_user_ns(nd->path.mnt);
- err = may_lookup(mnt_userns, nd);
- if (err)
+ idmap = mnt_idmap(nd->path.mnt);
+ err = may_lookup(idmap, nd);
+ if (unlikely(err))
return err;
- hash_len = hash_name(nd->path.dentry, name);
+ nd->last.name = name;
+ name = hash_name(nd, name, &lastword);
- type = LAST_NORM;
- if (name[0] == '.') switch (hashlen_len(hash_len)) {
- case 2:
- if (name[1] == '.') {
- type = LAST_DOTDOT;
- nd->state |= ND_JUMPED;
- }
- break;
- case 1:
- type = LAST_DOT;
- }
- if (likely(type == LAST_NORM)) {
- struct dentry *parent = nd->path.dentry;
+ switch(lastword) {
+ case LAST_WORD_IS_DOTDOT:
+ nd->last_type = LAST_DOTDOT;
+ nd->state |= ND_JUMPED;
+ break;
+
+ case LAST_WORD_IS_DOT:
+ nd->last_type = LAST_DOT;
+ break;
+
+ default:
+ nd->last_type = LAST_NORM;
nd->state &= ~ND_JUMPED;
+
+ struct dentry *parent = nd->path.dentry;
if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
- struct qstr this = { { .hash_len = hash_len }, .name = name };
- err = parent->d_op->d_hash(parent, &this);
+ err = parent->d_op->d_hash(parent, &nd->last);
if (err < 0)
return err;
- hash_len = this.hash_len;
- name = this.name;
}
}
- nd->last.hash_len = hash_len;
- nd->last.name = name;
- nd->last_type = type;
-
- name += hashlen_len(hash_len);
if (!*name)
goto OK;
/*
@@ -2283,8 +2575,8 @@ static int link_path_walk(const char *name, struct nameidata *nd)
if (unlikely(!*name)) {
OK:
/* pathname or trailing symlink, done */
- if (!depth) {
- nd->dir_uid = i_uid_into_mnt(mnt_userns, nd->inode);
+ if (likely(!depth)) {
+ nd->dir_vfsuid = i_uid_into_vfsuid(idmap, nd->inode);
nd->dir_mode = nd->inode->i_mode;
nd->flags &= ~LOOKUP_PARENT;
return 0;
@@ -2318,16 +2610,18 @@ OK:
static const char *path_init(struct nameidata *nd, unsigned flags)
{
int error;
- const char *s = nd->name->name;
+ const char *s = nd->pathname;
/* LOOKUP_CACHED requires RCU, ask caller to retry */
- if ((flags & (LOOKUP_RCU | LOOKUP_CACHED)) == LOOKUP_CACHED)
+ if (unlikely((flags & (LOOKUP_RCU | LOOKUP_CACHED)) == LOOKUP_CACHED))
return ERR_PTR(-EAGAIN);
- if (!*s)
+ if (unlikely(!*s))
flags &= ~LOOKUP_RCU;
if (flags & LOOKUP_RCU)
rcu_read_lock();
+ else
+ nd->seq = nd->next_seq = 0;
nd->flags = flags;
nd->state |= ND_JUMPED;
@@ -2336,7 +2630,7 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->r_seq = __read_seqcount_begin(&rename_lock.seqcount);
smp_rmb();
- if (nd->state & ND_ROOT_PRESET) {
+ if (unlikely(nd->state & ND_ROOT_PRESET)) {
struct dentry *root = nd->root.dentry;
struct inode *inode = root->d_inode;
if (*s && unlikely(!d_can_lookup(root)))
@@ -2355,7 +2649,7 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->root.mnt = NULL;
/* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */
- if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) {
+ if (*s == '/' && likely(!(flags & LOOKUP_IN_ROOT))) {
error = nd_jump_root(nd);
if (unlikely(error))
return ERR_PTR(error);
@@ -2369,31 +2663,35 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
unsigned seq;
do {
- seq = read_seqcount_begin(&fs->seq);
+ seq = read_seqbegin(&fs->seq);
nd->path = fs->pwd;
nd->inode = nd->path.dentry->d_inode;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
- } while (read_seqcount_retry(&fs->seq, seq));
+ } while (read_seqretry(&fs->seq, seq));
} else {
get_fs_pwd(current->fs, &nd->path);
nd->inode = nd->path.dentry->d_inode;
}
} else {
/* Caller must check execute permissions on the starting path component */
- struct fd f = fdget_raw(nd->dfd);
+ CLASS(fd_raw, f)(nd->dfd);
struct dentry *dentry;
- if (!f.file)
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- dentry = f.file->f_path.dentry;
+ if (flags & LOOKUP_LINKAT_EMPTY) {
+ if (fd_file(f)->f_cred != current_cred() &&
+ !ns_capable(fd_file(f)->f_cred->user_ns, CAP_DAC_READ_SEARCH))
+ return ERR_PTR(-ENOENT);
+ }
- if (*s && unlikely(!d_can_lookup(dentry))) {
- fdput(f);
+ dentry = fd_file(f)->f_path.dentry;
+
+ if (*s && unlikely(!d_can_lookup(dentry)))
return ERR_PTR(-ENOTDIR);
- }
- nd->path = f.file->f_path;
+ nd->path = fd_file(f)->f_path;
if (flags & LOOKUP_RCU) {
nd->inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
@@ -2401,11 +2699,10 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
path_get(&nd->path);
nd->inode = nd->path.dentry->d_inode;
}
- fdput(f);
}
/* For scoped-lookups we need to set the root to the dirfd as well. */
- if (flags & LOOKUP_IS_SCOPED) {
+ if (unlikely(flags & LOOKUP_IS_SCOPED)) {
nd->root = nd->path;
if (flags & LOOKUP_RCU) {
nd->root_seq = nd->seq;
@@ -2429,11 +2726,11 @@ static int handle_lookup_down(struct nameidata *nd)
{
if (!(nd->flags & LOOKUP_RCU))
dget(nd->path.dentry);
- return PTR_ERR(step_into(nd, WALK_NOFOLLOW,
- nd->path.dentry, nd->inode, nd->seq));
+ nd->next_seq = nd->seq;
+ return PTR_ERR(step_into(nd, WALK_NOFOLLOW, nd->path.dentry));
}
-/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
+/* Returns 0 and nd will be valid on success; Returns error, otherwise. */
static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path)
{
const char *s = path_init(nd, flags);
@@ -2468,7 +2765,7 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
}
int filename_lookup(int dfd, struct filename *name, unsigned flags,
- struct path *path, struct path *root)
+ struct path *path, const struct path *root)
{
int retval;
struct nameidata nd;
@@ -2488,7 +2785,7 @@ int filename_lookup(int dfd, struct filename *name, unsigned flags,
return retval;
}
-/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
+/* Returns 0 and nd will be valid on success; Returns error, otherwise. */
static int path_parentat(struct nameidata *nd, unsigned flags,
struct path *parent)
{
@@ -2506,16 +2803,17 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
}
/* Note: this does not consume "name" */
-static int filename_parentat(int dfd, struct filename *name,
- unsigned int flags, struct path *parent,
- struct qstr *last, int *type)
+static int __filename_parentat(int dfd, struct filename *name,
+ unsigned int flags, struct path *parent,
+ struct qstr *last, int *type,
+ const struct path *root)
{
int retval;
struct nameidata nd;
if (IS_ERR(name))
return PTR_ERR(name);
- set_nameidata(&nd, dfd, name, NULL);
+ set_nameidata(&nd, dfd, name, root);
retval = path_parentat(&nd, flags | LOOKUP_RCU, parent);
if (unlikely(retval == -ECHILD))
retval = path_parentat(&nd, flags, parent);
@@ -2530,38 +2828,159 @@ static int filename_parentat(int dfd, struct filename *name,
return retval;
}
+static int filename_parentat(int dfd, struct filename *name,
+ unsigned int flags, struct path *parent,
+ struct qstr *last, int *type)
+{
+ return __filename_parentat(dfd, name, flags, parent, last, type, NULL);
+}
+
+/**
+ * start_dirop - begin a create or remove dirop, performing locking and lookup
+ * @parent: the dentry of the parent in which the operation will occur
+ * @name: a qstr holding the name within that parent
+ * @lookup_flags: intent and other lookup flags.
+ *
+ * The lookup is performed and necessary locks are taken so that, on success,
+ * the returned dentry can be operated on safely.
+ * The qstr must already have the hash value calculated.
+ *
+ * Returns: a locked dentry, or an error.
+ *
+ */
+static struct dentry *__start_dirop(struct dentry *parent, struct qstr *name,
+ unsigned int lookup_flags,
+ unsigned int state)
+{
+ struct dentry *dentry;
+ struct inode *dir = d_inode(parent);
+
+ if (state == TASK_KILLABLE) {
+ int ret = down_write_killable_nested(&dir->i_rwsem,
+ I_MUTEX_PARENT);
+ if (ret)
+ return ERR_PTR(ret);
+ } else {
+ inode_lock_nested(dir, I_MUTEX_PARENT);
+ }
+ dentry = lookup_one_qstr_excl(name, parent, lookup_flags);
+ if (IS_ERR(dentry))
+ inode_unlock(dir);
+ return dentry;
+}
+
+struct dentry *start_dirop(struct dentry *parent, struct qstr *name,
+ unsigned int lookup_flags)
+{
+ return __start_dirop(parent, name, lookup_flags, TASK_NORMAL);
+}
+
+/**
+ * end_dirop - signal completion of a dirop
+ * @de: the dentry which was returned by start_dirop or similar.
+ *
+ * If the de is an error, nothing happens. Otherwise any lock taken to
+ * protect the dentry is dropped and the dentry itself is release (dput()).
+ */
+void end_dirop(struct dentry *de)
+{
+ if (!IS_ERR(de)) {
+ inode_unlock(de->d_parent->d_inode);
+ dput(de);
+ }
+}
+EXPORT_SYMBOL(end_dirop);
+
/* does lookup, returns the object with parent locked */
-static struct dentry *__kern_path_locked(struct filename *name, struct path *path)
+static struct dentry *__start_removing_path(int dfd, struct filename *name,
+ struct path *path)
{
+ struct path parent_path __free(path_put) = {};
struct dentry *d;
struct qstr last;
int type, error;
- error = filename_parentat(AT_FDCWD, name, 0, path, &last, &type);
+ error = filename_parentat(dfd, name, 0, &parent_path, &last, &type);
if (error)
return ERR_PTR(error);
- if (unlikely(type != LAST_NORM)) {
- path_put(path);
+ if (unlikely(type != LAST_NORM))
return ERR_PTR(-EINVAL);
- }
- inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
- d = __lookup_hash(&last, path->dentry, 0);
- if (IS_ERR(d)) {
- inode_unlock(path->dentry->d_inode);
- path_put(path);
- }
+ /* don't fail immediately if it's r/o, at least try to report other errors */
+ error = mnt_want_write(parent_path.mnt);
+ d = start_dirop(parent_path.dentry, &last, 0);
+ if (IS_ERR(d))
+ goto drop;
+ if (error)
+ goto fail;
+ path->dentry = no_free_ptr(parent_path.dentry);
+ path->mnt = no_free_ptr(parent_path.mnt);
+ return d;
+
+fail:
+ end_dirop(d);
+ d = ERR_PTR(error);
+drop:
+ if (!error)
+ mnt_drop_write(parent_path.mnt);
return d;
}
-struct dentry *kern_path_locked(const char *name, struct path *path)
+/**
+ * kern_path_parent: lookup path returning parent and target
+ * @name: path name
+ * @path: path to store parent in
+ *
+ * The path @name should end with a normal component, not "." or ".." or "/".
+ * A lookup is performed and if successful the parent information
+ * is store in @parent and the dentry is returned.
+ *
+ * The dentry maybe negative, the parent will be positive.
+ *
+ * Returns: dentry or error.
+ */
+struct dentry *kern_path_parent(const char *name, struct path *path)
+{
+ struct path parent_path __free(path_put) = {};
+ struct filename *filename __free(putname) = getname_kernel(name);
+ struct dentry *d;
+ struct qstr last;
+ int type, error;
+
+ error = filename_parentat(AT_FDCWD, filename, 0, &parent_path, &last, &type);
+ if (error)
+ return ERR_PTR(error);
+ if (unlikely(type != LAST_NORM))
+ return ERR_PTR(-EINVAL);
+
+ d = lookup_noperm_unlocked(&last, parent_path.dentry);
+ if (IS_ERR(d))
+ return d;
+ path->dentry = no_free_ptr(parent_path.dentry);
+ path->mnt = no_free_ptr(parent_path.mnt);
+ return d;
+}
+
+struct dentry *start_removing_path(const char *name, struct path *path)
{
struct filename *filename = getname_kernel(name);
- struct dentry *res = __kern_path_locked(filename, path);
+ struct dentry *res = __start_removing_path(AT_FDCWD, filename, path);
putname(filename);
return res;
}
+struct dentry *start_removing_user_path_at(int dfd,
+ const char __user *name,
+ struct path *path)
+{
+ struct filename *filename = getname(name);
+ struct dentry *res = __start_removing_path(dfd, filename, path);
+
+ putname(filename);
+ return res;
+}
+EXPORT_SYMBOL(start_removing_user_path_at);
+
int kern_path(const char *name, unsigned int flags, struct path *path)
{
struct filename *filename = getname_kernel(name);
@@ -2574,6 +2993,24 @@ int kern_path(const char *name, unsigned int flags, struct path *path)
EXPORT_SYMBOL(kern_path);
/**
+ * vfs_path_parent_lookup - lookup a parent path relative to a dentry-vfsmount pair
+ * @filename: filename structure
+ * @flags: lookup flags
+ * @parent: pointer to struct path to fill
+ * @last: last component
+ * @type: type of the last component
+ * @root: pointer to struct path of the base directory
+ */
+int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
+ struct path *parent, struct qstr *last, int *type,
+ const struct path *root)
+{
+ return __filename_parentat(AT_FDCWD, filename, flags, parent, last,
+ type, root);
+}
+EXPORT_SYMBOL(vfs_path_parent_lookup);
+
+/**
* vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
* @dentry: pointer to dentry of the base directory
* @mnt: pointer to vfs mount of the base directory
@@ -2597,20 +3034,17 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
}
EXPORT_SYMBOL(vfs_path_lookup);
-static int lookup_one_common(struct user_namespace *mnt_userns,
- const char *name, struct dentry *base, int len,
- struct qstr *this)
+int lookup_noperm_common(struct qstr *qname, struct dentry *base)
{
- this->name = name;
- this->len = len;
- this->hash = full_name_hash(base, name, len);
+ const char *name = qname->name;
+ u32 len = qname->len;
+
+ qname->hash = full_name_hash(base, name, len);
if (!len)
return -EACCES;
- if (unlikely(name[0] == '.')) {
- if (len < 2 || (len == 2 && name[1] == '.'))
- return -EACCES;
- }
+ if (is_dot_dotdot(name, len))
+ return -EACCES;
while (len--) {
unsigned int c = *(const unsigned char *)name++;
@@ -2622,150 +3056,482 @@ static int lookup_one_common(struct user_namespace *mnt_userns,
* to use its own hash..
*/
if (base->d_flags & DCACHE_OP_HASH) {
- int err = base->d_op->d_hash(base, this);
+ int err = base->d_op->d_hash(base, qname);
if (err < 0)
return err;
}
+ return 0;
+}
- return inode_permission(mnt_userns, base->d_inode, MAY_EXEC);
+static int lookup_one_common(struct mnt_idmap *idmap,
+ struct qstr *qname, struct dentry *base)
+{
+ int err;
+ err = lookup_noperm_common(qname, base);
+ if (err < 0)
+ return err;
+ return inode_permission(idmap, base->d_inode, MAY_EXEC);
}
/**
- * try_lookup_one_len - filesystem helper to lookup single pathname component
- * @name: pathname component to lookup
+ * try_lookup_noperm - filesystem helper to lookup single pathname component
+ * @name: qstr storing pathname component to lookup
* @base: base directory to lookup from
- * @len: maximum length @len should be interpreted to
*
* Look up a dentry by name in the dcache, returning NULL if it does not
- * currently exist. The function does not try to create a dentry.
+ * currently exist. The function does not try to create a dentry and if one
+ * is found it doesn't try to revalidate it.
*
* Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code.
+ * not be called by generic code. It does no permission checking.
+ *
+ * No locks need be held - only a counted reference to @base is needed.
*
- * The caller must hold base->i_mutex.
*/
-struct dentry *try_lookup_one_len(const char *name, struct dentry *base, int len)
+struct dentry *try_lookup_noperm(struct qstr *name, struct dentry *base)
{
- struct qstr this;
int err;
- WARN_ON_ONCE(!inode_is_locked(base->d_inode));
-
- err = lookup_one_common(&init_user_ns, name, base, len, &this);
+ err = lookup_noperm_common(name, base);
if (err)
return ERR_PTR(err);
- return lookup_dcache(&this, base, 0);
+ return d_lookup(base, name);
}
-EXPORT_SYMBOL(try_lookup_one_len);
+EXPORT_SYMBOL(try_lookup_noperm);
/**
- * lookup_one_len - filesystem helper to lookup single pathname component
- * @name: pathname component to lookup
+ * lookup_noperm - filesystem helper to lookup single pathname component
+ * @name: qstr storing pathname component to lookup
* @base: base directory to lookup from
- * @len: maximum length @len should be interpreted to
*
* Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code.
+ * not be called by generic code. It does no permission checking.
*
- * The caller must hold base->i_mutex.
+ * The caller must hold base->i_rwsem.
*/
-struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
+struct dentry *lookup_noperm(struct qstr *name, struct dentry *base)
{
struct dentry *dentry;
- struct qstr this;
int err;
WARN_ON_ONCE(!inode_is_locked(base->d_inode));
- err = lookup_one_common(&init_user_ns, name, base, len, &this);
+ err = lookup_noperm_common(name, base);
if (err)
return ERR_PTR(err);
- dentry = lookup_dcache(&this, base, 0);
- return dentry ? dentry : __lookup_slow(&this, base, 0);
+ dentry = lookup_dcache(name, base, 0);
+ return dentry ? dentry : __lookup_slow(name, base, 0);
}
-EXPORT_SYMBOL(lookup_one_len);
+EXPORT_SYMBOL(lookup_noperm);
/**
- * lookup_one - filesystem helper to lookup single pathname component
- * @mnt_userns: user namespace of the mount the lookup is performed from
- * @name: pathname component to lookup
+ * lookup_one - lookup single pathname component
+ * @idmap: idmap of the mount the lookup is performed from
+ * @name: qstr holding pathname component to lookup
* @base: base directory to lookup from
- * @len: maximum length @len should be interpreted to
*
- * Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code.
+ * This can be used for in-kernel filesystem clients such as file servers.
*
- * The caller must hold base->i_mutex.
+ * The caller must hold base->i_rwsem.
*/
-struct dentry *lookup_one(struct user_namespace *mnt_userns, const char *name,
- struct dentry *base, int len)
+struct dentry *lookup_one(struct mnt_idmap *idmap, struct qstr *name,
+ struct dentry *base)
{
struct dentry *dentry;
- struct qstr this;
int err;
WARN_ON_ONCE(!inode_is_locked(base->d_inode));
- err = lookup_one_common(mnt_userns, name, base, len, &this);
+ err = lookup_one_common(idmap, name, base);
if (err)
return ERR_PTR(err);
- dentry = lookup_dcache(&this, base, 0);
- return dentry ? dentry : __lookup_slow(&this, base, 0);
+ dentry = lookup_dcache(name, base, 0);
+ return dentry ? dentry : __lookup_slow(name, base, 0);
}
EXPORT_SYMBOL(lookup_one);
/**
- * lookup_one_len_unlocked - filesystem helper to lookup single pathname component
+ * lookup_one_unlocked - lookup single pathname component
+ * @idmap: idmap of the mount the lookup is performed from
+ * @name: qstr olding pathname component to lookup
+ * @base: base directory to lookup from
+ *
+ * This can be used for in-kernel filesystem clients such as file servers.
+ *
+ * Unlike lookup_one, it should be called without the parent
+ * i_rwsem held, and will take the i_rwsem itself if necessary.
+ */
+struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap, struct qstr *name,
+ struct dentry *base)
+{
+ int err;
+ struct dentry *ret;
+
+ err = lookup_one_common(idmap, name, base);
+ if (err)
+ return ERR_PTR(err);
+
+ ret = lookup_dcache(name, base, 0);
+ if (!ret)
+ ret = lookup_slow(name, base, 0);
+ return ret;
+}
+EXPORT_SYMBOL(lookup_one_unlocked);
+
+/**
+ * lookup_one_positive_killable - lookup single pathname component
+ * @idmap: idmap of the mount the lookup is performed from
+ * @name: qstr olding pathname component to lookup
+ * @base: base directory to lookup from
+ *
+ * This helper will yield ERR_PTR(-ENOENT) on negatives. The helper returns
+ * known positive or ERR_PTR(). This is what most of the users want.
+ *
+ * Note that pinned negative with unlocked parent _can_ become positive at any
+ * time, so callers of lookup_one_unlocked() need to be very careful; pinned
+ * positives have >d_inode stable, so this one avoids such problems.
+ *
+ * This can be used for in-kernel filesystem clients such as file servers.
+ *
+ * It should be called without the parent i_rwsem held, and will take
+ * the i_rwsem itself if necessary. If a fatal signal is pending or
+ * delivered, it will return %-EINTR if the lock is needed.
+ */
+struct dentry *lookup_one_positive_killable(struct mnt_idmap *idmap,
+ struct qstr *name,
+ struct dentry *base)
+{
+ int err;
+ struct dentry *ret;
+
+ err = lookup_one_common(idmap, name, base);
+ if (err)
+ return ERR_PTR(err);
+
+ ret = lookup_dcache(name, base, 0);
+ if (!ret)
+ ret = lookup_slow_killable(name, base, 0);
+ if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
+ dput(ret);
+ ret = ERR_PTR(-ENOENT);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(lookup_one_positive_killable);
+
+/**
+ * lookup_one_positive_unlocked - lookup single pathname component
+ * @idmap: idmap of the mount the lookup is performed from
+ * @name: qstr holding pathname component to lookup
+ * @base: base directory to lookup from
+ *
+ * This helper will yield ERR_PTR(-ENOENT) on negatives. The helper returns
+ * known positive or ERR_PTR(). This is what most of the users want.
+ *
+ * Note that pinned negative with unlocked parent _can_ become positive at any
+ * time, so callers of lookup_one_unlocked() need to be very careful; pinned
+ * positives have >d_inode stable, so this one avoids such problems.
+ *
+ * This can be used for in-kernel filesystem clients such as file servers.
+ *
+ * The helper should be called without i_rwsem held.
+ */
+struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap,
+ struct qstr *name,
+ struct dentry *base)
+{
+ struct dentry *ret = lookup_one_unlocked(idmap, name, base);
+
+ if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
+ dput(ret);
+ ret = ERR_PTR(-ENOENT);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(lookup_one_positive_unlocked);
+
+/**
+ * lookup_noperm_unlocked - filesystem helper to lookup single pathname component
* @name: pathname component to lookup
* @base: base directory to lookup from
- * @len: maximum length @len should be interpreted to
*
* Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code.
+ * not be called by generic code. It does no permission checking.
*
- * Unlike lookup_one_len, it should be called without the parent
- * i_mutex held, and will take the i_mutex itself if necessary.
+ * Unlike lookup_noperm(), it should be called without the parent
+ * i_rwsem held, and will take the i_rwsem itself if necessary.
+ *
+ * Unlike try_lookup_noperm() it *does* revalidate the dentry if it already
+ * existed.
*/
-struct dentry *lookup_one_len_unlocked(const char *name,
- struct dentry *base, int len)
+struct dentry *lookup_noperm_unlocked(struct qstr *name, struct dentry *base)
{
- struct qstr this;
- int err;
struct dentry *ret;
+ int err;
- err = lookup_one_common(&init_user_ns, name, base, len, &this);
+ err = lookup_noperm_common(name, base);
if (err)
return ERR_PTR(err);
- ret = lookup_dcache(&this, base, 0);
+ ret = lookup_dcache(name, base, 0);
if (!ret)
- ret = lookup_slow(&this, base, 0);
+ ret = lookup_slow(name, base, 0);
return ret;
}
-EXPORT_SYMBOL(lookup_one_len_unlocked);
+EXPORT_SYMBOL(lookup_noperm_unlocked);
/*
- * Like lookup_one_len_unlocked(), except that it yields ERR_PTR(-ENOENT)
+ * Like lookup_noperm_unlocked(), except that it yields ERR_PTR(-ENOENT)
* on negatives. Returns known positive or ERR_PTR(); that's what
* most of the users want. Note that pinned negative with unlocked parent
- * _can_ become positive at any time, so callers of lookup_one_len_unlocked()
+ * _can_ become positive at any time, so callers of lookup_noperm_unlocked()
* need to be very careful; pinned positives have ->d_inode stable, so
* this one avoids such problems.
*/
-struct dentry *lookup_positive_unlocked(const char *name,
- struct dentry *base, int len)
+struct dentry *lookup_noperm_positive_unlocked(struct qstr *name,
+ struct dentry *base)
{
- struct dentry *ret = lookup_one_len_unlocked(name, base, len);
+ struct dentry *ret;
+
+ ret = lookup_noperm_unlocked(name, base);
if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
dput(ret);
ret = ERR_PTR(-ENOENT);
}
return ret;
}
-EXPORT_SYMBOL(lookup_positive_unlocked);
+EXPORT_SYMBOL(lookup_noperm_positive_unlocked);
+
+/**
+ * start_creating - prepare to create a given name with permission checking
+ * @idmap: idmap of the mount
+ * @parent: directory in which to prepare to create the name
+ * @name: the name to be created
+ *
+ * Locks are taken and a lookup is performed prior to creating
+ * an object in a directory. Permission checking (MAY_EXEC) is performed
+ * against @idmap.
+ *
+ * If the name already exists, a positive dentry is returned, so
+ * behaviour is similar to O_CREAT without O_EXCL, which doesn't fail
+ * with -EEXIST.
+ *
+ * Returns: a negative or positive dentry, or an error.
+ */
+struct dentry *start_creating(struct mnt_idmap *idmap, struct dentry *parent,
+ struct qstr *name)
+{
+ int err = lookup_one_common(idmap, name, parent);
+
+ if (err)
+ return ERR_PTR(err);
+ return start_dirop(parent, name, LOOKUP_CREATE);
+}
+EXPORT_SYMBOL(start_creating);
+
+/**
+ * start_removing - prepare to remove a given name with permission checking
+ * @idmap: idmap of the mount
+ * @parent: directory in which to find the name
+ * @name: the name to be removed
+ *
+ * Locks are taken and a lookup in performed prior to removing
+ * an object from a directory. Permission checking (MAY_EXEC) is performed
+ * against @idmap.
+ *
+ * If the name doesn't exist, an error is returned.
+ *
+ * end_removing() should be called when removal is complete, or aborted.
+ *
+ * Returns: a positive dentry, or an error.
+ */
+struct dentry *start_removing(struct mnt_idmap *idmap, struct dentry *parent,
+ struct qstr *name)
+{
+ int err = lookup_one_common(idmap, name, parent);
+
+ if (err)
+ return ERR_PTR(err);
+ return start_dirop(parent, name, 0);
+}
+EXPORT_SYMBOL(start_removing);
+
+/**
+ * start_creating_killable - prepare to create a given name with permission checking
+ * @idmap: idmap of the mount
+ * @parent: directory in which to prepare to create the name
+ * @name: the name to be created
+ *
+ * Locks are taken and a lookup in performed prior to creating
+ * an object in a directory. Permission checking (MAY_EXEC) is performed
+ * against @idmap.
+ *
+ * If the name already exists, a positive dentry is returned.
+ *
+ * If a signal is received or was already pending, the function aborts
+ * with -EINTR;
+ *
+ * Returns: a negative or positive dentry, or an error.
+ */
+struct dentry *start_creating_killable(struct mnt_idmap *idmap,
+ struct dentry *parent,
+ struct qstr *name)
+{
+ int err = lookup_one_common(idmap, name, parent);
+
+ if (err)
+ return ERR_PTR(err);
+ return __start_dirop(parent, name, LOOKUP_CREATE, TASK_KILLABLE);
+}
+EXPORT_SYMBOL(start_creating_killable);
+
+/**
+ * start_removing_killable - prepare to remove a given name with permission checking
+ * @idmap: idmap of the mount
+ * @parent: directory in which to find the name
+ * @name: the name to be removed
+ *
+ * Locks are taken and a lookup in performed prior to removing
+ * an object from a directory. Permission checking (MAY_EXEC) is performed
+ * against @idmap.
+ *
+ * If the name doesn't exist, an error is returned.
+ *
+ * end_removing() should be called when removal is complete, or aborted.
+ *
+ * If a signal is received or was already pending, the function aborts
+ * with -EINTR;
+ *
+ * Returns: a positive dentry, or an error.
+ */
+struct dentry *start_removing_killable(struct mnt_idmap *idmap,
+ struct dentry *parent,
+ struct qstr *name)
+{
+ int err = lookup_one_common(idmap, name, parent);
+
+ if (err)
+ return ERR_PTR(err);
+ return __start_dirop(parent, name, 0, TASK_KILLABLE);
+}
+EXPORT_SYMBOL(start_removing_killable);
+
+/**
+ * start_creating_noperm - prepare to create a given name without permission checking
+ * @parent: directory in which to prepare to create the name
+ * @name: the name to be created
+ *
+ * Locks are taken and a lookup in performed prior to creating
+ * an object in a directory.
+ *
+ * If the name already exists, a positive dentry is returned.
+ *
+ * Returns: a negative or positive dentry, or an error.
+ */
+struct dentry *start_creating_noperm(struct dentry *parent,
+ struct qstr *name)
+{
+ int err = lookup_noperm_common(name, parent);
+
+ if (err)
+ return ERR_PTR(err);
+ return start_dirop(parent, name, LOOKUP_CREATE);
+}
+EXPORT_SYMBOL(start_creating_noperm);
+
+/**
+ * start_removing_noperm - prepare to remove a given name without permission checking
+ * @parent: directory in which to find the name
+ * @name: the name to be removed
+ *
+ * Locks are taken and a lookup in performed prior to removing
+ * an object from a directory.
+ *
+ * If the name doesn't exist, an error is returned.
+ *
+ * end_removing() should be called when removal is complete, or aborted.
+ *
+ * Returns: a positive dentry, or an error.
+ */
+struct dentry *start_removing_noperm(struct dentry *parent,
+ struct qstr *name)
+{
+ int err = lookup_noperm_common(name, parent);
+
+ if (err)
+ return ERR_PTR(err);
+ return start_dirop(parent, name, 0);
+}
+EXPORT_SYMBOL(start_removing_noperm);
+
+/**
+ * start_creating_dentry - prepare to create a given dentry
+ * @parent: directory from which dentry should be removed
+ * @child: the dentry to be removed
+ *
+ * A lock is taken to protect the dentry again other dirops and
+ * the validity of the dentry is checked: correct parent and still hashed.
+ *
+ * If the dentry is valid and negative a reference is taken and
+ * returned. If not an error is returned.
+ *
+ * end_creating() should be called when creation is complete, or aborted.
+ *
+ * Returns: the valid dentry, or an error.
+ */
+struct dentry *start_creating_dentry(struct dentry *parent,
+ struct dentry *child)
+{
+ inode_lock_nested(parent->d_inode, I_MUTEX_PARENT);
+ if (unlikely(IS_DEADDIR(parent->d_inode) ||
+ child->d_parent != parent ||
+ d_unhashed(child))) {
+ inode_unlock(parent->d_inode);
+ return ERR_PTR(-EINVAL);
+ }
+ if (d_is_positive(child)) {
+ inode_unlock(parent->d_inode);
+ return ERR_PTR(-EEXIST);
+ }
+ return dget(child);
+}
+EXPORT_SYMBOL(start_creating_dentry);
+
+/**
+ * start_removing_dentry - prepare to remove a given dentry
+ * @parent: directory from which dentry should be removed
+ * @child: the dentry to be removed
+ *
+ * A lock is taken to protect the dentry again other dirops and
+ * the validity of the dentry is checked: correct parent and still hashed.
+ *
+ * If the dentry is valid and positive, a reference is taken and
+ * returned. If not an error is returned.
+ *
+ * end_removing() should be called when removal is complete, or aborted.
+ *
+ * Returns: the valid dentry, or an error.
+ */
+struct dentry *start_removing_dentry(struct dentry *parent,
+ struct dentry *child)
+{
+ inode_lock_nested(parent->d_inode, I_MUTEX_PARENT);
+ if (unlikely(IS_DEADDIR(parent->d_inode) ||
+ child->d_parent != parent ||
+ d_unhashed(child))) {
+ inode_unlock(parent->d_inode);
+ return ERR_PTR(-EINVAL);
+ }
+ if (d_is_negative(child)) {
+ inode_unlock(parent->d_inode);
+ return ERR_PTR(-ENOENT);
+ }
+ return dget(child);
+}
+EXPORT_SYMBOL(start_removing_dentry);
#ifdef CONFIG_UNIX98_PTYS
int path_pts(struct path *path)
@@ -2784,37 +3550,37 @@ int path_pts(struct path *path)
dput(path->dentry);
path->dentry = parent;
child = d_hash_and_lookup(parent, &this);
- if (!child)
+ if (IS_ERR_OR_NULL(child))
return -ENOENT;
path->dentry = child;
dput(parent);
- follow_down(path);
+ follow_down(path, 0);
return 0;
}
#endif
-int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
- struct path *path, int *empty)
+int user_path_at(int dfd, const char __user *name, unsigned flags,
+ struct path *path)
{
- struct filename *filename = getname_flags(name, flags, empty);
+ struct filename *filename = getname_flags(name, flags);
int ret = filename_lookup(dfd, filename, flags, path, NULL);
putname(filename);
return ret;
}
-EXPORT_SYMBOL(user_path_at_empty);
+EXPORT_SYMBOL(user_path_at);
-int __check_sticky(struct user_namespace *mnt_userns, struct inode *dir,
+int __check_sticky(struct mnt_idmap *idmap, struct inode *dir,
struct inode *inode)
{
kuid_t fsuid = current_fsuid();
- if (uid_eq(i_uid_into_mnt(mnt_userns, inode), fsuid))
+ if (vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), fsuid))
return 0;
- if (uid_eq(i_uid_into_mnt(mnt_userns, dir), fsuid))
+ if (vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, dir), fsuid))
return 0;
- return !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FOWNER);
+ return !capable_wrt_inode_uidgid(idmap, inode, CAP_FOWNER);
}
EXPORT_SYMBOL(__check_sticky);
@@ -2838,7 +3604,7 @@ EXPORT_SYMBOL(__check_sticky);
* 11. We don't allow removal of NFS sillyrenamed files; it's handled by
* nfs_async_unlink().
*/
-static int may_delete(struct user_namespace *mnt_userns, struct inode *dir,
+static int may_delete(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *victim, bool isdir)
{
struct inode *inode = d_backing_inode(victim);
@@ -2851,21 +3617,21 @@ static int may_delete(struct user_namespace *mnt_userns, struct inode *dir,
BUG_ON(victim->d_parent->d_inode != dir);
/* Inode writeback is not safe when the uid or gid are invalid. */
- if (!uid_valid(i_uid_into_mnt(mnt_userns, inode)) ||
- !gid_valid(i_gid_into_mnt(mnt_userns, inode)))
+ if (!vfsuid_valid(i_uid_into_vfsuid(idmap, inode)) ||
+ !vfsgid_valid(i_gid_into_vfsgid(idmap, inode)))
return -EOVERFLOW;
audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
- error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
+ error = inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC);
if (error)
return error;
if (IS_APPEND(dir))
return -EPERM;
- if (check_sticky(mnt_userns, dir, inode) || IS_APPEND(inode) ||
+ if (check_sticky(idmap, dir, inode) || IS_APPEND(inode) ||
IS_IMMUTABLE(inode) || IS_SWAPFILE(inode) ||
- HAS_UNMAPPED_ID(mnt_userns, inode))
+ HAS_UNMAPPED_ID(idmap, inode))
return -EPERM;
if (isdir) {
if (!d_is_dir(victim))
@@ -2890,7 +3656,7 @@ static int may_delete(struct user_namespace *mnt_userns, struct inode *dir,
* 4. We should have write and exec permissions on dir
* 5. We can't do it if dir is immutable (done in permission())
*/
-static inline int may_create(struct user_namespace *mnt_userns,
+static inline int may_create(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *child)
{
audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
@@ -2898,10 +3664,43 @@ static inline int may_create(struct user_namespace *mnt_userns,
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
- if (!fsuidgid_has_mapping(dir->i_sb, mnt_userns))
+ if (!fsuidgid_has_mapping(dir->i_sb, idmap))
return -EOVERFLOW;
- return inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
+ return inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC);
+}
+
+// p1 != p2, both are on the same filesystem, ->s_vfs_rename_mutex is held
+static struct dentry *lock_two_directories(struct dentry *p1, struct dentry *p2)
+{
+ struct dentry *p = p1, *q = p2, *r;
+
+ while ((r = p->d_parent) != p2 && r != p)
+ p = r;
+ if (r == p2) {
+ // p is a child of p2 and an ancestor of p1 or p1 itself
+ inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
+ inode_lock_nested(p1->d_inode, I_MUTEX_PARENT2);
+ return p;
+ }
+ // p is the root of connected component that contains p1
+ // p2 does not occur on the path from p to p1
+ while ((r = q->d_parent) != p1 && r != p && r != q)
+ q = r;
+ if (r == p1) {
+ // q is a child of p1 and an ancestor of p2 or p2 itself
+ inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
+ inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
+ return q;
+ } else if (likely(r == p)) {
+ // both p2 and p1 are descendents of p
+ inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
+ inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
+ return NULL;
+ } else { // no common ancestor at the time we'd been called
+ mutex_unlock(&p1->d_sb->s_vfs_rename_mutex);
+ return ERR_PTR(-EXDEV);
+ }
}
/*
@@ -2909,34 +3708,57 @@ static inline int may_create(struct user_namespace *mnt_userns,
*/
struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
{
- struct dentry *p;
-
if (p1 == p2) {
inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
return NULL;
}
mutex_lock(&p1->d_sb->s_vfs_rename_mutex);
+ return lock_two_directories(p1, p2);
+}
+EXPORT_SYMBOL(lock_rename);
- p = d_ancestor(p2, p1);
- if (p) {
+/*
+ * c1 and p2 should be on the same fs.
+ */
+struct dentry *lock_rename_child(struct dentry *c1, struct dentry *p2)
+{
+ if (READ_ONCE(c1->d_parent) == p2) {
+ /*
+ * hopefully won't need to touch ->s_vfs_rename_mutex at all.
+ */
inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
- inode_lock_nested(p1->d_inode, I_MUTEX_CHILD);
- return p;
- }
+ /*
+ * now that p2 is locked, nobody can move in or out of it,
+ * so the test below is safe.
+ */
+ if (likely(c1->d_parent == p2))
+ return NULL;
- p = d_ancestor(p1, p2);
- if (p) {
- inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
- inode_lock_nested(p2->d_inode, I_MUTEX_CHILD);
- return p;
+ /*
+ * c1 got moved out of p2 while we'd been taking locks;
+ * unlock and fall back to slow case.
+ */
+ inode_unlock(p2->d_inode);
}
- inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
- inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
+ mutex_lock(&c1->d_sb->s_vfs_rename_mutex);
+ /*
+ * nobody can move out of any directories on this fs.
+ */
+ if (likely(c1->d_parent != p2))
+ return lock_two_directories(c1->d_parent, p2);
+
+ /*
+ * c1 got moved into p2 while we were taking locks;
+ * we need p2 locked and ->s_vfs_rename_mutex unlocked,
+ * for consistency with lock_rename().
+ */
+ inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
+ mutex_unlock(&c1->d_sb->s_vfs_rename_mutex);
return NULL;
}
-EXPORT_SYMBOL(lock_rename);
+EXPORT_SYMBOL(lock_rename_child);
void unlock_rename(struct dentry *p1, struct dentry *p2)
{
@@ -2949,36 +3771,365 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
EXPORT_SYMBOL(unlock_rename);
/**
+ * __start_renaming - lookup and lock names for rename
+ * @rd: rename data containing parents and flags, and
+ * for receiving found dentries
+ * @lookup_flags: extra flags to pass to ->lookup (e.g. LOOKUP_REVAL,
+ * LOOKUP_NO_SYMLINKS etc).
+ * @old_last: name of object in @rd.old_parent
+ * @new_last: name of object in @rd.new_parent
+ *
+ * Look up two names and ensure locks are in place for
+ * rename.
+ *
+ * On success the found dentries are stored in @rd.old_dentry,
+ * @rd.new_dentry and an extra ref is taken on @rd.old_parent.
+ * These references and the lock are dropped by end_renaming().
+ *
+ * The passed in qstrs must have the hash calculated, and no permission
+ * checking is performed.
+ *
+ * Returns: zero or an error.
+ */
+static int
+__start_renaming(struct renamedata *rd, int lookup_flags,
+ struct qstr *old_last, struct qstr *new_last)
+{
+ struct dentry *trap;
+ struct dentry *d1, *d2;
+ int target_flags = LOOKUP_RENAME_TARGET | LOOKUP_CREATE;
+ int err;
+
+ if (rd->flags & RENAME_EXCHANGE)
+ target_flags = 0;
+ if (rd->flags & RENAME_NOREPLACE)
+ target_flags |= LOOKUP_EXCL;
+
+ trap = lock_rename(rd->old_parent, rd->new_parent);
+ if (IS_ERR(trap))
+ return PTR_ERR(trap);
+
+ d1 = lookup_one_qstr_excl(old_last, rd->old_parent,
+ lookup_flags);
+ err = PTR_ERR(d1);
+ if (IS_ERR(d1))
+ goto out_unlock;
+
+ d2 = lookup_one_qstr_excl(new_last, rd->new_parent,
+ lookup_flags | target_flags);
+ err = PTR_ERR(d2);
+ if (IS_ERR(d2))
+ goto out_dput_d1;
+
+ if (d1 == trap) {
+ /* source is an ancestor of target */
+ err = -EINVAL;
+ goto out_dput_d2;
+ }
+
+ if (d2 == trap) {
+ /* target is an ancestor of source */
+ if (rd->flags & RENAME_EXCHANGE)
+ err = -EINVAL;
+ else
+ err = -ENOTEMPTY;
+ goto out_dput_d2;
+ }
+
+ rd->old_dentry = d1;
+ rd->new_dentry = d2;
+ dget(rd->old_parent);
+ return 0;
+
+out_dput_d2:
+ dput(d2);
+out_dput_d1:
+ dput(d1);
+out_unlock:
+ unlock_rename(rd->old_parent, rd->new_parent);
+ return err;
+}
+
+/**
+ * start_renaming - lookup and lock names for rename with permission checking
+ * @rd: rename data containing parents and flags, and
+ * for receiving found dentries
+ * @lookup_flags: extra flags to pass to ->lookup (e.g. LOOKUP_REVAL,
+ * LOOKUP_NO_SYMLINKS etc).
+ * @old_last: name of object in @rd.old_parent
+ * @new_last: name of object in @rd.new_parent
+ *
+ * Look up two names and ensure locks are in place for
+ * rename.
+ *
+ * On success the found dentries are stored in @rd.old_dentry,
+ * @rd.new_dentry. Also the refcount on @rd->old_parent is increased.
+ * These references and the lock are dropped by end_renaming().
+ *
+ * The passed in qstrs need not have the hash calculated, and basic
+ * eXecute permission checking is performed against @rd.mnt_idmap.
+ *
+ * Returns: zero or an error.
+ */
+int start_renaming(struct renamedata *rd, int lookup_flags,
+ struct qstr *old_last, struct qstr *new_last)
+{
+ int err;
+
+ err = lookup_one_common(rd->mnt_idmap, old_last, rd->old_parent);
+ if (err)
+ return err;
+ err = lookup_one_common(rd->mnt_idmap, new_last, rd->new_parent);
+ if (err)
+ return err;
+ return __start_renaming(rd, lookup_flags, old_last, new_last);
+}
+EXPORT_SYMBOL(start_renaming);
+
+static int
+__start_renaming_dentry(struct renamedata *rd, int lookup_flags,
+ struct dentry *old_dentry, struct qstr *new_last)
+{
+ struct dentry *trap;
+ struct dentry *d2;
+ int target_flags = LOOKUP_RENAME_TARGET | LOOKUP_CREATE;
+ int err;
+
+ if (rd->flags & RENAME_EXCHANGE)
+ target_flags = 0;
+ if (rd->flags & RENAME_NOREPLACE)
+ target_flags |= LOOKUP_EXCL;
+
+ /* Already have the dentry - need to be sure to lock the correct parent */
+ trap = lock_rename_child(old_dentry, rd->new_parent);
+ if (IS_ERR(trap))
+ return PTR_ERR(trap);
+ if (d_unhashed(old_dentry) ||
+ (rd->old_parent && rd->old_parent != old_dentry->d_parent)) {
+ /* dentry was removed, or moved and explicit parent requested */
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ d2 = lookup_one_qstr_excl(new_last, rd->new_parent,
+ lookup_flags | target_flags);
+ err = PTR_ERR(d2);
+ if (IS_ERR(d2))
+ goto out_unlock;
+
+ if (old_dentry == trap) {
+ /* source is an ancestor of target */
+ err = -EINVAL;
+ goto out_dput_d2;
+ }
+
+ if (d2 == trap) {
+ /* target is an ancestor of source */
+ if (rd->flags & RENAME_EXCHANGE)
+ err = -EINVAL;
+ else
+ err = -ENOTEMPTY;
+ goto out_dput_d2;
+ }
+
+ rd->old_dentry = dget(old_dentry);
+ rd->new_dentry = d2;
+ rd->old_parent = dget(old_dentry->d_parent);
+ return 0;
+
+out_dput_d2:
+ dput(d2);
+out_unlock:
+ unlock_rename(old_dentry->d_parent, rd->new_parent);
+ return err;
+}
+
+/**
+ * start_renaming_dentry - lookup and lock name for rename with permission checking
+ * @rd: rename data containing parents and flags, and
+ * for receiving found dentries
+ * @lookup_flags: extra flags to pass to ->lookup (e.g. LOOKUP_REVAL,
+ * LOOKUP_NO_SYMLINKS etc).
+ * @old_dentry: dentry of name to move
+ * @new_last: name of target in @rd.new_parent
+ *
+ * Look up target name and ensure locks are in place for
+ * rename.
+ *
+ * On success the found dentry is stored in @rd.new_dentry and
+ * @rd.old_parent is confirmed to be the parent of @old_dentry. If it
+ * was originally %NULL, it is set. In either case a reference is taken
+ * so that end_renaming() can have a stable reference to unlock.
+ *
+ * References and the lock can be dropped with end_renaming()
+ *
+ * The passed in qstr need not have the hash calculated, and basic
+ * eXecute permission checking is performed against @rd.mnt_idmap.
+ *
+ * Returns: zero or an error.
+ */
+int start_renaming_dentry(struct renamedata *rd, int lookup_flags,
+ struct dentry *old_dentry, struct qstr *new_last)
+{
+ int err;
+
+ err = lookup_one_common(rd->mnt_idmap, new_last, rd->new_parent);
+ if (err)
+ return err;
+ return __start_renaming_dentry(rd, lookup_flags, old_dentry, new_last);
+}
+EXPORT_SYMBOL(start_renaming_dentry);
+
+/**
+ * start_renaming_two_dentries - Lock to dentries in given parents for rename
+ * @rd: rename data containing parent
+ * @old_dentry: dentry of name to move
+ * @new_dentry: dentry to move to
+ *
+ * Ensure locks are in place for rename and check parentage is still correct.
+ *
+ * On success the two dentries are stored in @rd.old_dentry and
+ * @rd.new_dentry and @rd.old_parent and @rd.new_parent are confirmed to
+ * be the parents of the dentries.
+ *
+ * References and the lock can be dropped with end_renaming()
+ *
+ * Returns: zero or an error.
+ */
+int
+start_renaming_two_dentries(struct renamedata *rd,
+ struct dentry *old_dentry, struct dentry *new_dentry)
+{
+ struct dentry *trap;
+ int err;
+
+ /* Already have the dentry - need to be sure to lock the correct parent */
+ trap = lock_rename_child(old_dentry, rd->new_parent);
+ if (IS_ERR(trap))
+ return PTR_ERR(trap);
+ err = -EINVAL;
+ if (d_unhashed(old_dentry) ||
+ (rd->old_parent && rd->old_parent != old_dentry->d_parent))
+ /* old_dentry was removed, or moved and explicit parent requested */
+ goto out_unlock;
+ if (d_unhashed(new_dentry) ||
+ rd->new_parent != new_dentry->d_parent)
+ /* new_dentry was removed or moved */
+ goto out_unlock;
+
+ if (old_dentry == trap)
+ /* source is an ancestor of target */
+ goto out_unlock;
+
+ if (new_dentry == trap) {
+ /* target is an ancestor of source */
+ if (rd->flags & RENAME_EXCHANGE)
+ err = -EINVAL;
+ else
+ err = -ENOTEMPTY;
+ goto out_unlock;
+ }
+
+ err = -EEXIST;
+ if (d_is_positive(new_dentry) && (rd->flags & RENAME_NOREPLACE))
+ goto out_unlock;
+
+ rd->old_dentry = dget(old_dentry);
+ rd->new_dentry = dget(new_dentry);
+ rd->old_parent = dget(old_dentry->d_parent);
+ return 0;
+
+out_unlock:
+ unlock_rename(old_dentry->d_parent, rd->new_parent);
+ return err;
+}
+EXPORT_SYMBOL(start_renaming_two_dentries);
+
+void end_renaming(struct renamedata *rd)
+{
+ unlock_rename(rd->old_parent, rd->new_parent);
+ dput(rd->old_dentry);
+ dput(rd->new_dentry);
+ dput(rd->old_parent);
+}
+EXPORT_SYMBOL(end_renaming);
+
+/**
+ * vfs_prepare_mode - prepare the mode to be used for a new inode
+ * @idmap: idmap of the mount the inode was found from
+ * @dir: parent directory of the new inode
+ * @mode: mode of the new inode
+ * @mask_perms: allowed permission by the vfs
+ * @type: type of file to be created
+ *
+ * This helper consolidates and enforces vfs restrictions on the @mode of a new
+ * object to be created.
+ *
+ * Umask stripping depends on whether the filesystem supports POSIX ACLs (see
+ * the kernel documentation for mode_strip_umask()). Moving umask stripping
+ * after setgid stripping allows the same ordering for both non-POSIX ACL and
+ * POSIX ACL supporting filesystems.
+ *
+ * Note that it's currently valid for @type to be 0 if a directory is created.
+ * Filesystems raise that flag individually and we need to check whether each
+ * filesystem can deal with receiving S_IFDIR from the vfs before we enforce a
+ * non-zero type.
+ *
+ * Returns: mode to be passed to the filesystem
+ */
+static inline umode_t vfs_prepare_mode(struct mnt_idmap *idmap,
+ const struct inode *dir, umode_t mode,
+ umode_t mask_perms, umode_t type)
+{
+ mode = mode_strip_sgid(idmap, dir, mode);
+ mode = mode_strip_umask(dir, mode);
+
+ /*
+ * Apply the vfs mandated allowed permission mask and set the type of
+ * file to be created before we call into the filesystem.
+ */
+ mode &= (mask_perms & ~S_IFMT);
+ mode |= (type & S_IFMT);
+
+ return mode;
+}
+
+/**
* vfs_create - create new file
- * @mnt_userns: user namespace of the mount the inode was found from
- * @dir: inode of @dentry
- * @dentry: pointer to dentry of the base directory
- * @mode: mode of the new file
- * @want_excl: whether the file must not yet exist
+ * @idmap: idmap of the mount the inode was found from
+ * @dentry: dentry of the child file
+ * @mode: mode of the child file
+ * @di: returns parent inode, if the inode is delegated.
*
* Create a new file.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*/
-int vfs_create(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode, bool want_excl)
+int vfs_create(struct mnt_idmap *idmap, struct dentry *dentry, umode_t mode,
+ struct delegated_inode *di)
{
- int error = may_create(mnt_userns, dir, dentry);
+ struct inode *dir = d_inode(dentry->d_parent);
+ int error;
+
+ error = may_create(idmap, dir, dentry);
if (error)
return error;
if (!dir->i_op->create)
return -EACCES; /* shouldn't it be ENOSYS? */
- mode &= S_IALLUGO;
- mode |= S_IFREG;
+
+ mode = vfs_prepare_mode(idmap, dir, mode, S_IALLUGO, S_IFREG);
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
- error = dir->i_op->create(mnt_userns, dir, dentry, mode, want_excl);
+ error = try_break_deleg(dir, di);
+ if (error)
+ return error;
+ error = dir->i_op->create(idmap, dir, dentry, mode, true);
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -2990,7 +4141,7 @@ int vfs_mkobj(struct dentry *dentry, umode_t mode,
void *arg)
{
struct inode *dir = dentry->d_parent->d_inode;
- int error = may_create(&init_user_ns, dir, dentry);
+ int error = may_create(&nop_mnt_idmap, dir, dentry);
if (error)
return error;
@@ -3012,7 +4163,7 @@ bool may_open_dev(const struct path *path)
!(path->mnt->mnt_sb->s_iflags & SB_I_NODEV);
}
-static int may_open(struct user_namespace *mnt_userns, const struct path *path,
+static int may_open(struct mnt_idmap *idmap, const struct path *path,
int acc_mode, int flag)
{
struct dentry *dentry = path->dentry;
@@ -3046,9 +4197,11 @@ static int may_open(struct user_namespace *mnt_userns, const struct path *path,
if ((acc_mode & MAY_EXEC) && path_noexec(path))
return -EACCES;
break;
+ default:
+ VFS_BUG_ON_INODE(!IS_ANON_FILE(inode), inode);
}
- error = inode_permission(mnt_userns, inode, MAY_OPEN | acc_mode);
+ error = inode_permission(idmap, inode, MAY_OPEN | acc_mode);
if (error)
return error;
@@ -3063,13 +4216,13 @@ static int may_open(struct user_namespace *mnt_userns, const struct path *path,
}
/* O_NOATIME can only be set by the owner or superuser */
- if (flag & O_NOATIME && !inode_owner_or_capable(mnt_userns, inode))
+ if (flag & O_NOATIME && !inode_owner_or_capable(idmap, inode))
return -EPERM;
return 0;
}
-static int handle_truncate(struct user_namespace *mnt_userns, struct file *filp)
+static int handle_truncate(struct mnt_idmap *idmap, struct file *filp)
{
const struct path *path = &filp->f_path;
struct inode *inode = path->dentry->d_inode;
@@ -3077,9 +4230,9 @@ static int handle_truncate(struct user_namespace *mnt_userns, struct file *filp)
if (error)
return error;
- error = security_path_truncate(path);
+ error = security_file_truncate(filp);
if (!error) {
- error = do_truncate(mnt_userns, path->dentry, 0,
+ error = do_truncate(idmap, path->dentry, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
filp);
}
@@ -3094,7 +4247,7 @@ static inline int open_to_namei_flags(int flag)
return flag;
}
-static int may_o_create(struct user_namespace *mnt_userns,
+static int may_o_create(struct mnt_idmap *idmap,
const struct path *dir, struct dentry *dentry,
umode_t mode)
{
@@ -3102,10 +4255,10 @@ static int may_o_create(struct user_namespace *mnt_userns,
if (error)
return error;
- if (!fsuidgid_has_mapping(dir->dentry->d_sb, mnt_userns))
+ if (!fsuidgid_has_mapping(dir->dentry->d_sb, idmap))
return -EOVERFLOW;
- error = inode_permission(mnt_userns, dir->dentry->d_inode,
+ error = inode_permission(idmap, dir->dentry->d_inode,
MAY_WRITE | MAY_EXEC);
if (error)
return error;
@@ -3137,8 +4290,8 @@ static struct dentry *atomic_open(struct nameidata *nd, struct dentry *dentry,
if (nd->flags & LOOKUP_DIRECTORY)
open_flag |= O_DIRECTORY;
- file->f_path.dentry = DENTRY_NOT_SET;
- file->f_path.mnt = nd->path.mnt;
+ file->__f_path.dentry = DENTRY_NOT_SET;
+ file->__f_path.mnt = nd->path.mnt;
error = dir->i_op->atomic_open(dir, dentry, file,
open_to_namei_flags(open_flag), mode);
d_lookup_done(dentry);
@@ -3183,9 +4336,9 @@ static struct dentry *atomic_open(struct nameidata *nd, struct dentry *dentry,
*/
static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
const struct open_flags *op,
- bool got_write)
+ bool got_write, struct delegated_inode *delegated_inode)
{
- struct user_namespace *mnt_userns;
+ struct mnt_idmap *idmap;
struct dentry *dir = nd->path.dentry;
struct inode *dir_inode = dir->d_inode;
int open_flag = op->open_flag;
@@ -3208,7 +4361,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
if (d_in_lookup(dentry))
break;
- error = d_revalidate(dentry, nd->flags);
+ error = d_revalidate(dir_inode, &nd->last, dentry, nd->flags);
if (likely(error > 0))
break;
if (error)
@@ -3222,6 +4375,9 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
return dentry;
}
+ if (open_flag & O_CREAT)
+ audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
+
/*
* Checking write permission is tricky, bacuse we don't know if we are
* going to actually need it: O_CREAT opens should work as long as the
@@ -3233,14 +4389,13 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
*/
if (unlikely(!got_write))
open_flag &= ~O_TRUNC;
- mnt_userns = mnt_user_ns(nd->path.mnt);
+ idmap = mnt_idmap(nd->path.mnt);
if (open_flag & O_CREAT) {
if (open_flag & O_EXCL)
open_flag &= ~O_TRUNC;
- if (!IS_POSIXACL(dir->d_inode))
- mode &= ~current_umask();
+ mode = vfs_prepare_mode(idmap, dir->d_inode, mode, mode, mode);
if (likely(got_write))
- create_error = may_o_create(mnt_userns, &nd->path,
+ create_error = may_o_create(idmap, &nd->path,
dentry, mode);
else
create_error = -EROFS;
@@ -3270,6 +4425,11 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
/* Negative dentry, just create the file */
if (!dentry->d_inode && (open_flag & O_CREAT)) {
+ /* but break the directory lease first! */
+ error = try_break_deleg(dir_inode, delegated_inode);
+ if (error)
+ goto out_dput;
+
file->f_mode |= FMODE_CREATED;
audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE);
if (!dir_inode->i_op->create) {
@@ -3277,7 +4437,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
goto out_dput;
}
- error = dir_inode->i_op->create(mnt_userns, dir_inode, dentry,
+ error = dir_inode->i_op->create(idmap, dir_inode, dentry,
mode, open_flag & O_EXCL);
if (error)
goto out_dput;
@@ -3293,14 +4453,49 @@ out_dput:
return ERR_PTR(error);
}
+static inline bool trailing_slashes(struct nameidata *nd)
+{
+ return (bool)nd->last.name[nd->last.len];
+}
+
+static struct dentry *lookup_fast_for_open(struct nameidata *nd, int open_flag)
+{
+ struct dentry *dentry;
+
+ if (open_flag & O_CREAT) {
+ if (trailing_slashes(nd))
+ return ERR_PTR(-EISDIR);
+
+ /* Don't bother on an O_EXCL create */
+ if (open_flag & O_EXCL)
+ return NULL;
+ }
+
+ if (trailing_slashes(nd))
+ nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
+
+ dentry = lookup_fast(nd);
+ if (IS_ERR_OR_NULL(dentry))
+ return dentry;
+
+ if (open_flag & O_CREAT) {
+ /* Discard negative dentries. Need inode_lock to do the create */
+ if (!dentry->d_inode) {
+ if (!(nd->flags & LOOKUP_RCU))
+ dput(dentry);
+ dentry = NULL;
+ }
+ }
+ return dentry;
+}
+
static const char *open_last_lookups(struct nameidata *nd,
struct file *file, const struct open_flags *op)
{
+ struct delegated_inode delegated_inode = { };
struct dentry *dir = nd->path.dentry;
int open_flag = op->open_flag;
bool got_write = false;
- unsigned seq;
- struct inode *inode;
struct dentry *dentry;
const char *res;
@@ -3312,29 +4507,24 @@ static const char *open_last_lookups(struct nameidata *nd,
return handle_dots(nd, nd->last_type);
}
- if (!(open_flag & O_CREAT)) {
- if (nd->last.name[nd->last.len])
- nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
- /* we _can_ be in RCU mode here */
- dentry = lookup_fast(nd, &inode, &seq);
- if (IS_ERR(dentry))
- return ERR_CAST(dentry);
- if (likely(dentry))
- goto finish_lookup;
+ /* We _can_ be in RCU mode here */
+ dentry = lookup_fast_for_open(nd, open_flag);
+ if (IS_ERR(dentry))
+ return ERR_CAST(dentry);
- BUG_ON(nd->flags & LOOKUP_RCU);
+ if (likely(dentry))
+ goto finish_lookup;
+
+ if (!(open_flag & O_CREAT)) {
+ if (WARN_ON_ONCE(nd->flags & LOOKUP_RCU))
+ return ERR_PTR(-ECHILD);
} else {
- /* create side of things */
if (nd->flags & LOOKUP_RCU) {
if (!try_to_unlazy(nd))
return ERR_PTR(-ECHILD);
}
- audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
- /* trailing slashes? */
- if (unlikely(nd->last.name[nd->last.len]))
- return ERR_PTR(-EISDIR);
}
-
+retry:
if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
got_write = !mnt_want_write(nd->path.mnt);
/*
@@ -3347,9 +4537,13 @@ static const char *open_last_lookups(struct nameidata *nd,
inode_lock(dir->d_inode);
else
inode_lock_shared(dir->d_inode);
- dentry = lookup_open(nd, file, op, got_write);
- if (!IS_ERR(dentry) && (file->f_mode & FMODE_CREATED))
- fsnotify_create(dir->d_inode, dentry);
+ dentry = lookup_open(nd, file, op, got_write, &delegated_inode);
+ if (!IS_ERR(dentry)) {
+ if (file->f_mode & FMODE_CREATED)
+ fsnotify_create(dir->d_inode, dentry);
+ if (file->f_mode & FMODE_OPENED)
+ fsnotify_open(file);
+ }
if (open_flag & O_CREAT)
inode_unlock(dir->d_inode);
else
@@ -3358,8 +4552,16 @@ static const char *open_last_lookups(struct nameidata *nd,
if (got_write)
mnt_drop_write(nd->path.mnt);
- if (IS_ERR(dentry))
+ if (IS_ERR(dentry)) {
+ if (is_delegated(&delegated_inode)) {
+ int error = break_deleg_wait(&delegated_inode);
+
+ if (!error)
+ goto retry;
+ return ERR_PTR(error);
+ }
return ERR_CAST(dentry);
+ }
if (file->f_mode & (FMODE_OPENED | FMODE_CREATED)) {
dput(nd->path.dentry);
@@ -3370,7 +4572,7 @@ static const char *open_last_lookups(struct nameidata *nd,
finish_lookup:
if (nd->depth)
put_link(nd);
- res = step_into(nd, WALK_TRAILING, dentry, inode, seq);
+ res = step_into(nd, WALK_TRAILING, dentry);
if (unlikely(res))
nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
return res;
@@ -3382,7 +4584,7 @@ finish_lookup:
static int do_open(struct nameidata *nd,
struct file *file, const struct open_flags *op)
{
- struct user_namespace *mnt_userns;
+ struct mnt_idmap *idmap;
int open_flag = op->open_flag;
bool do_truncate;
int acc_mode;
@@ -3395,13 +4597,13 @@ static int do_open(struct nameidata *nd,
}
if (!(file->f_mode & FMODE_CREATED))
audit_inode(nd->name, nd->path.dentry, 0);
- mnt_userns = mnt_user_ns(nd->path.mnt);
+ idmap = mnt_idmap(nd->path.mnt);
if (open_flag & O_CREAT) {
if ((open_flag & O_EXCL) && !(file->f_mode & FMODE_CREATED))
return -EEXIST;
if (d_is_dir(nd->path.dentry))
return -EISDIR;
- error = may_create_in_sticky(mnt_userns, nd,
+ error = may_create_in_sticky(idmap, nd,
d_backing_inode(nd->path.dentry));
if (unlikely(error))
return error;
@@ -3421,13 +4623,13 @@ static int do_open(struct nameidata *nd,
return error;
do_truncate = true;
}
- error = may_open(mnt_userns, &nd->path, acc_mode, open_flag);
+ error = may_open(idmap, &nd->path, acc_mode, open_flag);
if (!error && !(file->f_mode & FMODE_OPENED))
error = vfs_open(&nd->path, file);
if (!error)
- error = ima_file_check(file, op->acc_mode);
+ error = security_file_post_open(file, op->acc_mode);
if (!error && do_truncate)
- error = handle_truncate(mnt_userns, file);
+ error = handle_truncate(idmap, file);
if (unlikely(error > 0)) {
WARN_ON(1);
error = -EINVAL;
@@ -3439,84 +4641,110 @@ static int do_open(struct nameidata *nd,
/**
* vfs_tmpfile - create tmpfile
- * @mnt_userns: user namespace of the mount the inode was found from
- * @dentry: pointer to dentry of the base directory
+ * @idmap: idmap of the mount the inode was found from
+ * @parentpath: pointer to the path of the base directory
+ * @file: file descriptor of the new tmpfile
* @mode: mode of the new tmpfile
- * @open_flag: flags
*
* Create a temporary file.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*/
-struct dentry *vfs_tmpfile(struct user_namespace *mnt_userns,
- struct dentry *dentry, umode_t mode, int open_flag)
+int vfs_tmpfile(struct mnt_idmap *idmap,
+ const struct path *parentpath,
+ struct file *file, umode_t mode)
{
- struct dentry *child = NULL;
- struct inode *dir = dentry->d_inode;
+ struct dentry *child;
+ struct inode *dir = d_inode(parentpath->dentry);
struct inode *inode;
int error;
+ int open_flag = file->f_flags;
/* we want directory to be writable */
- error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
+ error = inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC);
if (error)
- goto out_err;
- error = -EOPNOTSUPP;
+ return error;
if (!dir->i_op->tmpfile)
- goto out_err;
- error = -ENOMEM;
- child = d_alloc(dentry, &slash_name);
+ return -EOPNOTSUPP;
+ child = d_alloc(parentpath->dentry, &slash_name);
if (unlikely(!child))
- goto out_err;
- error = dir->i_op->tmpfile(mnt_userns, dir, child, mode);
+ return -ENOMEM;
+ file->__f_path.mnt = parentpath->mnt;
+ file->__f_path.dentry = child;
+ mode = vfs_prepare_mode(idmap, dir, mode, mode, mode);
+ error = dir->i_op->tmpfile(idmap, dir, file, mode);
+ dput(child);
+ if (file->f_mode & FMODE_OPENED)
+ fsnotify_open(file);
if (error)
- goto out_err;
- error = -ENOENT;
- inode = child->d_inode;
- if (unlikely(!inode))
- goto out_err;
+ return error;
+ /* Don't check for other permissions, the inode was just created */
+ error = may_open(idmap, &file->f_path, 0, file->f_flags);
+ if (error)
+ return error;
+ inode = file_inode(file);
if (!(open_flag & O_EXCL)) {
spin_lock(&inode->i_lock);
- inode->i_state |= I_LINKABLE;
+ inode_state_set(inode, I_LINKABLE);
spin_unlock(&inode->i_lock);
}
- ima_post_create_tmpfile(mnt_userns, inode);
- return child;
+ security_inode_post_create_tmpfile(idmap, inode);
+ return 0;
+}
-out_err:
- dput(child);
- return ERR_PTR(error);
+/**
+ * kernel_tmpfile_open - open a tmpfile for kernel internal use
+ * @idmap: idmap of the mount the inode was found from
+ * @parentpath: path of the base directory
+ * @mode: mode of the new tmpfile
+ * @open_flag: flags
+ * @cred: credentials for open
+ *
+ * Create and open a temporary file. The file is not accounted in nr_files,
+ * hence this is only for kernel internal use, and must not be installed into
+ * file tables or such.
+ */
+struct file *kernel_tmpfile_open(struct mnt_idmap *idmap,
+ const struct path *parentpath,
+ umode_t mode, int open_flag,
+ const struct cred *cred)
+{
+ struct file *file;
+ int error;
+
+ file = alloc_empty_file_noaccount(open_flag, cred);
+ if (IS_ERR(file))
+ return file;
+
+ error = vfs_tmpfile(idmap, parentpath, file, mode);
+ if (error) {
+ fput(file);
+ file = ERR_PTR(error);
+ }
+ return file;
}
-EXPORT_SYMBOL(vfs_tmpfile);
+EXPORT_SYMBOL(kernel_tmpfile_open);
static int do_tmpfile(struct nameidata *nd, unsigned flags,
const struct open_flags *op,
struct file *file)
{
- struct user_namespace *mnt_userns;
- struct dentry *child;
struct path path;
int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path);
+
if (unlikely(error))
return error;
error = mnt_want_write(path.mnt);
if (unlikely(error))
goto out;
- mnt_userns = mnt_user_ns(path.mnt);
- child = vfs_tmpfile(mnt_userns, path.dentry, op->mode, op->open_flag);
- error = PTR_ERR(child);
- if (IS_ERR(child))
+ error = vfs_tmpfile(mnt_idmap(path.mnt), &path, file, op->mode);
+ if (error)
goto out2;
- dput(path.dentry);
- path.dentry = child;
- audit_inode(nd->name, child, 0);
- /* Don't check for other permissions, the inode was just created */
- error = may_open(mnt_userns, &path, 0, op->open_flag);
- if (!error)
- error = vfs_open(&path, file);
+ audit_inode(nd->name, file->f_path.dentry, 0);
out2:
mnt_drop_write(path.mnt);
out:
@@ -3565,7 +4793,7 @@ static struct file *path_openat(struct nameidata *nd,
WARN_ON(1);
error = -EINVAL;
}
- fput(file);
+ fput_close(file);
if (error == -EOPENSTALE) {
if (flags & LOOKUP_RCU)
error = -ECHILD;
@@ -3623,18 +4851,13 @@ static struct dentry *filename_create(int dfd, struct filename *name,
{
struct dentry *dentry = ERR_PTR(-EEXIST);
struct qstr last;
+ bool want_dir = lookup_flags & LOOKUP_DIRECTORY;
+ unsigned int reval_flag = lookup_flags & LOOKUP_REVAL;
+ unsigned int create_flags = LOOKUP_CREATE | LOOKUP_EXCL;
int type;
- int err2;
int error;
- bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
- /*
- * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
- * other flags passed in are ignored!
- */
- lookup_flags &= LOOKUP_REVAL;
-
- error = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
+ error = filename_parentat(dfd, name, reval_flag, path, &last, &type);
if (error)
return ERR_PTR(error);
@@ -3646,49 +4869,34 @@ static struct dentry *filename_create(int dfd, struct filename *name,
goto out;
/* don't fail immediately if it's r/o, at least try to report other errors */
- err2 = mnt_want_write(path->mnt);
+ error = mnt_want_write(path->mnt);
/*
- * Do the final lookup.
+ * Do the final lookup. Suppress 'create' if there is a trailing
+ * '/', and a directory wasn't requested.
*/
- lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
- inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
- dentry = __lookup_hash(&last, path->dentry, lookup_flags);
+ if (last.name[last.len] && !want_dir)
+ create_flags &= ~LOOKUP_CREATE;
+ dentry = start_dirop(path->dentry, &last, reval_flag | create_flags);
if (IS_ERR(dentry))
- goto unlock;
+ goto out_drop_write;
- error = -EEXIST;
- if (d_is_positive(dentry))
+ if (unlikely(error))
goto fail;
- /*
- * Special case - lookup gave negative, but... we had foo/bar/
- * From the vfs_mknod() POV we just have a negative dentry -
- * all is fine. Let's be bastards - you had / on the end, you've
- * been asking for (non-existent) directory. -ENOENT for you.
- */
- if (unlikely(!is_dir && last.name[last.len])) {
- error = -ENOENT;
- goto fail;
- }
- if (unlikely(err2)) {
- error = err2;
- goto fail;
- }
return dentry;
fail:
- dput(dentry);
+ end_dirop(dentry);
dentry = ERR_PTR(error);
-unlock:
- inode_unlock(path->dentry->d_inode);
- if (!err2)
+out_drop_write:
+ if (!error)
mnt_drop_write(path->mnt);
out:
path_put(path);
return dentry;
}
-struct dentry *kern_path_create(int dfd, const char *pathname,
- struct path *path, unsigned int lookup_flags)
+struct dentry *start_creating_path(int dfd, const char *pathname,
+ struct path *path, unsigned int lookup_flags)
{
struct filename *filename = getname_kernel(pathname);
struct dentry *res = filename_create(dfd, filename, path, lookup_flags);
@@ -3696,19 +4904,30 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
putname(filename);
return res;
}
-EXPORT_SYMBOL(kern_path_create);
+EXPORT_SYMBOL(start_creating_path);
-void done_path_create(struct path *path, struct dentry *dentry)
+/**
+ * end_creating_path - finish a code section started by start_creating_path()
+ * @path: the path instantiated by start_creating_path()
+ * @dentry: the dentry returned by start_creating_path()
+ *
+ * end_creating_path() will unlock and locks taken by start_creating_path()
+ * and drop an references that were taken. It should only be called
+ * if start_creating_path() returned a non-error.
+ * If vfs_mkdir() was called and it returned an error, that error *should*
+ * be passed to end_creating_path() together with the path.
+ */
+void end_creating_path(const struct path *path, struct dentry *dentry)
{
- dput(dentry);
- inode_unlock(path->dentry->d_inode);
+ end_creating(dentry);
mnt_drop_write(path->mnt);
path_put(path);
}
-EXPORT_SYMBOL(done_path_create);
+EXPORT_SYMBOL(end_creating_path);
-inline struct dentry *user_path_create(int dfd, const char __user *pathname,
- struct path *path, unsigned int lookup_flags)
+inline struct dentry *start_creating_user_path(
+ int dfd, const char __user *pathname,
+ struct path *path, unsigned int lookup_flags)
{
struct filename *filename = getname(pathname);
struct dentry *res = filename_create(dfd, filename, path, lookup_flags);
@@ -3716,29 +4935,32 @@ inline struct dentry *user_path_create(int dfd, const char __user *pathname,
putname(filename);
return res;
}
-EXPORT_SYMBOL(user_path_create);
+EXPORT_SYMBOL(start_creating_user_path);
+
/**
* vfs_mknod - create device node or file
- * @mnt_userns: user namespace of the mount the inode was found from
- * @dir: inode of @dentry
- * @dentry: pointer to dentry of the base directory
- * @mode: mode of the new device node or file
- * @dev: device number of device to create
+ * @idmap: idmap of the mount the inode was found from
+ * @dir: inode of the parent directory
+ * @dentry: dentry of the child device node
+ * @mode: mode of the child device node
+ * @dev: device number of device to create
+ * @delegated_inode: returns parent inode, if the inode is delegated.
*
* Create a device node or file.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*/
-int vfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode, dev_t dev)
+int vfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t dev,
+ struct delegated_inode *delegated_inode)
{
bool is_whiteout = S_ISCHR(mode) && dev == WHITEOUT_DEV;
- int error = may_create(mnt_userns, dir, dentry);
+ int error = may_create(idmap, dir, dentry);
if (error)
return error;
@@ -3750,6 +4972,7 @@ int vfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
if (!dir->i_op->mknod)
return -EPERM;
+ mode = vfs_prepare_mode(idmap, dir, mode, mode, mode);
error = devcgroup_inode_mknod(mode, dev);
if (error)
return error;
@@ -3758,7 +4981,11 @@ int vfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
if (error)
return error;
- error = dir->i_op->mknod(mnt_userns, dir, dentry, mode, dev);
+ error = try_break_deleg(dir, delegated_inode);
+ if (error)
+ return error;
+
+ error = dir->i_op->mknod(idmap, dir, dentry, mode, dev);
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -3785,7 +5012,8 @@ static int may_mknod(umode_t mode)
static int do_mknodat(int dfd, struct filename *name, umode_t mode,
unsigned int dev)
{
- struct user_namespace *mnt_userns;
+ struct delegated_inode di = { };
+ struct mnt_idmap *idmap;
struct dentry *dentry;
struct path path;
int error;
@@ -3800,31 +5028,34 @@ retry:
if (IS_ERR(dentry))
goto out1;
- if (!IS_POSIXACL(path.dentry->d_inode))
- mode &= ~current_umask();
- error = security_path_mknod(&path, dentry, mode, dev);
+ error = security_path_mknod(&path, dentry,
+ mode_strip_umask(path.dentry->d_inode, mode), dev);
if (error)
goto out2;
- mnt_userns = mnt_user_ns(path.mnt);
+ idmap = mnt_idmap(path.mnt);
switch (mode & S_IFMT) {
case 0: case S_IFREG:
- error = vfs_create(mnt_userns, path.dentry->d_inode,
- dentry, mode, true);
+ error = vfs_create(idmap, dentry, mode, &di);
if (!error)
- ima_post_path_mknod(mnt_userns, dentry);
+ security_path_post_mknod(idmap, dentry);
break;
case S_IFCHR: case S_IFBLK:
- error = vfs_mknod(mnt_userns, path.dentry->d_inode,
- dentry, mode, new_decode_dev(dev));
+ error = vfs_mknod(idmap, path.dentry->d_inode,
+ dentry, mode, new_decode_dev(dev), &di);
break;
case S_IFIFO: case S_IFSOCK:
- error = vfs_mknod(mnt_userns, path.dentry->d_inode,
- dentry, mode, 0);
+ error = vfs_mknod(idmap, path.dentry->d_inode,
+ dentry, mode, 0, &di);
break;
}
out2:
- done_path_create(&path, dentry);
+ end_creating_path(&path, dentry);
+ if (is_delegated(&di)) {
+ error = break_deleg_wait(&di);
+ if (!error)
+ goto retry;
+ }
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
@@ -3846,44 +5077,70 @@ SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, d
}
/**
- * vfs_mkdir - create directory
- * @mnt_userns: user namespace of the mount the inode was found from
- * @dir: inode of @dentry
- * @dentry: pointer to dentry of the base directory
- * @mode: mode of the new directory
+ * vfs_mkdir - create directory returning correct dentry if possible
+ * @idmap: idmap of the mount the inode was found from
+ * @dir: inode of the parent directory
+ * @dentry: dentry of the child directory
+ * @mode: mode of the child directory
+ * @delegated_inode: returns parent inode, if the inode is delegated.
*
* Create a directory.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
+ *
+ * In the event that the filesystem does not use the *@dentry but leaves it
+ * negative or unhashes it and possibly splices a different one returning it,
+ * the original dentry is dput() and the alternate is returned.
+ *
+ * In case of an error the dentry is dput() and an ERR_PTR() is returned.
*/
-int vfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+struct dentry *vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode,
+ struct delegated_inode *delegated_inode)
{
- int error = may_create(mnt_userns, dir, dentry);
+ int error;
unsigned max_links = dir->i_sb->s_max_links;
+ struct dentry *de;
+ error = may_create(idmap, dir, dentry);
if (error)
- return error;
+ goto err;
+ error = -EPERM;
if (!dir->i_op->mkdir)
- return -EPERM;
+ goto err;
- mode &= (S_IRWXUGO|S_ISVTX);
+ mode = vfs_prepare_mode(idmap, dir, mode, S_IRWXUGO | S_ISVTX, 0);
error = security_inode_mkdir(dir, dentry, mode);
if (error)
- return error;
+ goto err;
+ error = -EMLINK;
if (max_links && dir->i_nlink >= max_links)
- return -EMLINK;
+ goto err;
- error = dir->i_op->mkdir(mnt_userns, dir, dentry, mode);
- if (!error)
- fsnotify_mkdir(dir, dentry);
- return error;
+ error = try_break_deleg(dir, delegated_inode);
+ if (error)
+ goto err;
+
+ de = dir->i_op->mkdir(idmap, dir, dentry, mode);
+ error = PTR_ERR(de);
+ if (IS_ERR(de))
+ goto err;
+ if (de) {
+ dput(dentry);
+ dentry = de;
+ }
+ fsnotify_mkdir(dir, dentry);
+ return dentry;
+
+err:
+ end_creating(dentry);
+ return ERR_PTR(error);
}
EXPORT_SYMBOL(vfs_mkdir);
@@ -3893,6 +5150,7 @@ int do_mkdirat(int dfd, struct filename *name, umode_t mode)
struct path path;
int error;
unsigned int lookup_flags = LOOKUP_DIRECTORY;
+ struct delegated_inode delegated_inode = { };
retry:
dentry = filename_create(dfd, name, &path, lookup_flags);
@@ -3900,16 +5158,20 @@ retry:
if (IS_ERR(dentry))
goto out_putname;
- if (!IS_POSIXACL(path.dentry->d_inode))
- mode &= ~current_umask();
- error = security_path_mkdir(&path, dentry, mode);
+ error = security_path_mkdir(&path, dentry,
+ mode_strip_umask(path.dentry->d_inode, mode));
if (!error) {
- struct user_namespace *mnt_userns;
- mnt_userns = mnt_user_ns(path.mnt);
- error = vfs_mkdir(mnt_userns, path.dentry->d_inode, dentry,
- mode);
+ dentry = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
+ dentry, mode, &delegated_inode);
+ if (IS_ERR(dentry))
+ error = PTR_ERR(dentry);
+ }
+ end_creating_path(&path, dentry);
+ if (is_delegated(&delegated_inode)) {
+ error = break_deleg_wait(&delegated_inode);
+ if (!error)
+ goto retry;
}
- done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
@@ -3931,22 +5193,23 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
/**
* vfs_rmdir - remove directory
- * @mnt_userns: user namespace of the mount the inode was found from
- * @dir: inode of @dentry
- * @dentry: pointer to dentry of the base directory
+ * @idmap: idmap of the mount the inode was found from
+ * @dir: inode of the parent directory
+ * @dentry: dentry of the child directory
+ * @delegated_inode: returns parent inode, if it's delegated.
*
* Remove a directory.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*/
-int vfs_rmdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry)
+int vfs_rmdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, struct delegated_inode *delegated_inode)
{
- int error = may_delete(mnt_userns, dir, dentry, 1);
+ int error = may_delete(idmap, dir, dentry, 1);
if (error)
return error;
@@ -3958,13 +5221,18 @@ int vfs_rmdir(struct user_namespace *mnt_userns, struct inode *dir,
inode_lock(dentry->d_inode);
error = -EBUSY;
- if (is_local_mountpoint(dentry))
+ if (is_local_mountpoint(dentry) ||
+ (dentry->d_inode->i_flags & S_KERNEL_FILE))
goto out;
error = security_inode_rmdir(dir, dentry);
if (error)
goto out;
+ error = try_break_deleg(dir, delegated_inode);
+ if (error)
+ goto out;
+
error = dir->i_op->rmdir(dir, dentry);
if (error)
goto out;
@@ -3973,26 +5241,25 @@ int vfs_rmdir(struct user_namespace *mnt_userns, struct inode *dir,
dentry->d_inode->i_flags |= S_DEAD;
dont_mount(dentry);
detach_mounts(dentry);
- fsnotify_rmdir(dir, dentry);
out:
inode_unlock(dentry->d_inode);
dput(dentry);
if (!error)
- d_delete(dentry);
+ d_delete_notify(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_rmdir);
int do_rmdir(int dfd, struct filename *name)
{
- struct user_namespace *mnt_userns;
int error;
struct dentry *dentry;
struct path path;
struct qstr last;
int type;
unsigned int lookup_flags = 0;
+ struct delegated_inode delegated_inode = { };
retry:
error = filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
if (error)
@@ -4014,27 +5281,26 @@ retry:
if (error)
goto exit2;
- inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
- dentry = __lookup_hash(&last, path.dentry, lookup_flags);
+ dentry = start_dirop(path.dentry, &last, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit3;
- if (!dentry->d_inode) {
- error = -ENOENT;
- goto exit4;
- }
error = security_path_rmdir(&path, dentry);
if (error)
goto exit4;
- mnt_userns = mnt_user_ns(path.mnt);
- error = vfs_rmdir(mnt_userns, path.dentry->d_inode, dentry);
+ error = vfs_rmdir(mnt_idmap(path.mnt), path.dentry->d_inode,
+ dentry, &delegated_inode);
exit4:
- dput(dentry);
+ end_dirop(dentry);
exit3:
- inode_unlock(path.dentry->d_inode);
mnt_drop_write(path.mnt);
exit2:
path_put(&path);
+ if (is_delegated(&delegated_inode)) {
+ error = break_deleg_wait(&delegated_inode);
+ if (!error)
+ goto retry;
+ }
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
@@ -4051,34 +5317,34 @@ SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
/**
* vfs_unlink - unlink a filesystem object
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
* @dir: parent directory
* @dentry: victim
* @delegated_inode: returns victim inode, if the inode is delegated.
*
- * The caller must hold dir->i_mutex.
+ * The caller must hold dir->i_rwsem exclusively.
*
* If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and
* return a reference to the inode in delegated_inode. The caller
* should then break the delegation on that inode and retry. Because
* breaking a delegation may take a long time, the caller should drop
- * dir->i_mutex before doing so.
+ * dir->i_rwsem before doing so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*/
-int vfs_unlink(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, struct inode **delegated_inode)
+int vfs_unlink(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, struct delegated_inode *delegated_inode)
{
struct inode *target = dentry->d_inode;
- int error = may_delete(mnt_userns, dir, dentry, 0);
+ int error = may_delete(idmap, dir, dentry, 0);
if (error)
return error;
@@ -4094,6 +5360,9 @@ int vfs_unlink(struct user_namespace *mnt_userns, struct inode *dir,
else {
error = security_inode_unlink(dir, dentry);
if (!error) {
+ error = try_break_deleg(dir, delegated_inode);
+ if (error)
+ goto out;
error = try_break_deleg(target, delegated_inode);
if (error)
goto out;
@@ -4101,7 +5370,6 @@ int vfs_unlink(struct user_namespace *mnt_userns, struct inode *dir,
if (!error) {
dont_mount(dentry);
detach_mounts(dentry);
- fsnotify_unlink(dir, dentry);
}
}
}
@@ -4109,9 +5377,11 @@ out:
inode_unlock(target);
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
- if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
+ if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+ fsnotify_unlink(dir, dentry);
+ } else if (!error) {
fsnotify_link_count(target);
- d_delete(dentry);
+ d_delete_notify(dir, dentry);
}
return error;
@@ -4120,7 +5390,7 @@ EXPORT_SYMBOL(vfs_unlink);
/*
* Make sure that the actual truncation of the file will occur outside its
- * directory's i_mutex. Truncate can take a long time if there is a lot of
+ * directory's i_rwsem. Truncate can take a long time if there is a lot of
* writeout happening, and we don't want to prevent access to the directory
* while waiting on the I/O.
*/
@@ -4131,73 +5401,62 @@ int do_unlinkat(int dfd, struct filename *name)
struct path path;
struct qstr last;
int type;
- struct inode *inode = NULL;
- struct inode *delegated_inode = NULL;
+ struct inode *inode;
+ struct delegated_inode delegated_inode = { };
unsigned int lookup_flags = 0;
retry:
error = filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
if (error)
- goto exit1;
+ goto exit_putname;
error = -EISDIR;
if (type != LAST_NORM)
- goto exit2;
+ goto exit_path_put;
error = mnt_want_write(path.mnt);
if (error)
- goto exit2;
+ goto exit_path_put;
retry_deleg:
- inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
- dentry = __lookup_hash(&last, path.dentry, lookup_flags);
+ dentry = start_dirop(path.dentry, &last, lookup_flags);
error = PTR_ERR(dentry);
- if (!IS_ERR(dentry)) {
- struct user_namespace *mnt_userns;
-
- /* Why not before? Because we want correct error value */
- if (last.name[last.len])
- goto slashes;
- inode = dentry->d_inode;
- if (d_is_negative(dentry))
- goto slashes;
- ihold(inode);
- error = security_path_unlink(&path, dentry);
- if (error)
- goto exit3;
- mnt_userns = mnt_user_ns(path.mnt);
- error = vfs_unlink(mnt_userns, path.dentry->d_inode, dentry,
- &delegated_inode);
-exit3:
- dput(dentry);
+ if (IS_ERR(dentry))
+ goto exit_drop_write;
+
+ /* Why not before? Because we want correct error value */
+ if (unlikely(last.name[last.len])) {
+ if (d_is_dir(dentry))
+ error = -EISDIR;
+ else
+ error = -ENOTDIR;
+ end_dirop(dentry);
+ goto exit_drop_write;
}
- inode_unlock(path.dentry->d_inode);
- if (inode)
- iput(inode); /* truncate the inode here */
- inode = NULL;
- if (delegated_inode) {
+ inode = dentry->d_inode;
+ ihold(inode);
+ error = security_path_unlink(&path, dentry);
+ if (error)
+ goto exit_end_dirop;
+ error = vfs_unlink(mnt_idmap(path.mnt), path.dentry->d_inode,
+ dentry, &delegated_inode);
+exit_end_dirop:
+ end_dirop(dentry);
+ iput(inode); /* truncate the inode here */
+ if (is_delegated(&delegated_inode)) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
+exit_drop_write:
mnt_drop_write(path.mnt);
-exit2:
+exit_path_put:
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
- inode = NULL;
goto retry;
}
-exit1:
+exit_putname:
putname(name);
return error;
-
-slashes:
- if (d_is_negative(dentry))
- error = -ENOENT;
- else if (d_is_dir(dentry))
- error = -EISDIR;
- else
- error = -ENOTDIR;
- goto exit3;
}
SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
@@ -4217,24 +5476,27 @@ SYSCALL_DEFINE1(unlink, const char __user *, pathname)
/**
* vfs_symlink - create symlink
- * @mnt_userns: user namespace of the mount the inode was found from
- * @dir: inode of @dentry
- * @dentry: pointer to dentry of the base directory
+ * @idmap: idmap of the mount the inode was found from
+ * @dir: inode of the parent directory
+ * @dentry: dentry of the child symlink file
* @oldname: name of the file to link to
+ * @delegated_inode: returns victim inode, if the inode is delegated.
*
* Create a symlink.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*/
-int vfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, const char *oldname)
+int vfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, const char *oldname,
+ struct delegated_inode *delegated_inode)
{
- int error = may_create(mnt_userns, dir, dentry);
+ int error;
+ error = may_create(idmap, dir, dentry);
if (error)
return error;
@@ -4245,7 +5507,11 @@ int vfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
if (error)
return error;
- error = dir->i_op->symlink(mnt_userns, dir, dentry, oldname);
+ error = try_break_deleg(dir, delegated_inode);
+ if (error)
+ return error;
+
+ error = dir->i_op->symlink(idmap, dir, dentry, oldname);
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -4258,6 +5524,7 @@ int do_symlinkat(struct filename *from, int newdfd, struct filename *to)
struct dentry *dentry;
struct path path;
unsigned int lookup_flags = 0;
+ struct delegated_inode delegated_inode = { };
if (IS_ERR(from)) {
error = PTR_ERR(from);
@@ -4270,14 +5537,15 @@ retry:
goto out_putnames;
error = security_path_symlink(&path, dentry, from->name);
- if (!error) {
- struct user_namespace *mnt_userns;
-
- mnt_userns = mnt_user_ns(path.mnt);
- error = vfs_symlink(mnt_userns, path.dentry->d_inode, dentry,
- from->name);
+ if (!error)
+ error = vfs_symlink(mnt_idmap(path.mnt), path.dentry->d_inode,
+ dentry, from->name, &delegated_inode);
+ end_creating_path(&path, dentry);
+ if (is_delegated(&delegated_inode)) {
+ error = break_deleg_wait(&delegated_inode);
+ if (!error)
+ goto retry;
}
- done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
@@ -4302,32 +5570,32 @@ SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newn
/**
* vfs_link - create a new link
* @old_dentry: object to be linked
- * @mnt_userns: the user namespace of the mount
+ * @idmap: idmap of the mount
* @dir: new parent
* @new_dentry: where to create the new link
* @delegated_inode: returns inode needing a delegation break
*
- * The caller must hold dir->i_mutex
+ * The caller must hold dir->i_rwsem exclusively.
*
* If vfs_link discovers a delegation on the to-be-linked file in need
* of breaking, it will return -EWOULDBLOCK and return a reference to the
* inode in delegated_inode. The caller should then break the delegation
* and retry. Because breaking a delegation may take a long time, the
- * caller should drop the i_mutex before doing so.
+ * caller should drop the i_rwsem before doing so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
*
- * If the inode has been found through an idmapped mount the user namespace of
- * the vfsmount must be passed through @mnt_userns. This function will then take
- * care to map the inode according to @mnt_userns before checking permissions.
+ * If the inode has been found through an idmapped mount the idmap of
+ * the vfsmount must be passed through @idmap. This function will then take
+ * care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
- * raw inode simply passs init_user_ns.
+ * raw inode simply pass @nop_mnt_idmap.
*/
-int vfs_link(struct dentry *old_dentry, struct user_namespace *mnt_userns,
+int vfs_link(struct dentry *old_dentry, struct mnt_idmap *idmap,
struct inode *dir, struct dentry *new_dentry,
- struct inode **delegated_inode)
+ struct delegated_inode *delegated_inode)
{
struct inode *inode = old_dentry->d_inode;
unsigned max_links = dir->i_sb->s_max_links;
@@ -4336,7 +5604,7 @@ int vfs_link(struct dentry *old_dentry, struct user_namespace *mnt_userns,
if (!inode)
return -ENOENT;
- error = may_create(mnt_userns, dir, new_dentry);
+ error = may_create(idmap, dir, new_dentry);
if (error)
return error;
@@ -4350,10 +5618,10 @@ int vfs_link(struct dentry *old_dentry, struct user_namespace *mnt_userns,
return -EPERM;
/*
* Updating the link count will likely cause i_uid and i_gid to
- * be writen back improperly if their true value is unknown to
+ * be written back improperly if their true value is unknown to
* the vfs.
*/
- if (HAS_UNMAPPED_ID(mnt_userns, inode))
+ if (HAS_UNMAPPED_ID(idmap, inode))
return -EPERM;
if (!dir->i_op->link)
return -EPERM;
@@ -4366,19 +5634,21 @@ int vfs_link(struct dentry *old_dentry, struct user_namespace *mnt_userns,
inode_lock(inode);
/* Make sure we don't allow creating hardlink to an unlinked file */
- if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
+ if (inode->i_nlink == 0 && !(inode_state_read_once(inode) & I_LINKABLE))
error = -ENOENT;
else if (max_links && inode->i_nlink >= max_links)
error = -EMLINK;
else {
- error = try_break_deleg(inode, delegated_inode);
+ error = try_break_deleg(dir, delegated_inode);
+ if (!error)
+ error = try_break_deleg(inode, delegated_inode);
if (!error)
error = dir->i_op->link(old_dentry, dir, new_dentry);
}
- if (!error && (inode->i_state & I_LINKABLE)) {
+ if (!error && (inode_state_read_once(inode) & I_LINKABLE)) {
spin_lock(&inode->i_lock);
- inode->i_state &= ~I_LINKABLE;
+ inode_state_clear(inode, I_LINKABLE);
spin_unlock(&inode->i_lock);
}
inode_unlock(inode);
@@ -4400,10 +5670,10 @@ EXPORT_SYMBOL(vfs_link);
int do_linkat(int olddfd, struct filename *old, int newdfd,
struct filename *new, int flags)
{
- struct user_namespace *mnt_userns;
+ struct mnt_idmap *idmap;
struct dentry *new_dentry;
struct path old_path, new_path;
- struct inode *delegated_inode = NULL;
+ struct delegated_inode delegated_inode = { };
int how = 0;
int error;
@@ -4412,14 +5682,13 @@ int do_linkat(int olddfd, struct filename *old, int newdfd,
goto out_putnames;
}
/*
- * To use null names we require CAP_DAC_READ_SEARCH
+ * To use null names we require CAP_DAC_READ_SEARCH or
+ * that the open-time creds of the dfd matches current.
* This ensures that not everyone will be able to create
- * handlink using the passed filedescriptor.
+ * a hardlink using the passed file descriptor.
*/
- if (flags & AT_EMPTY_PATH && !capable(CAP_DAC_READ_SEARCH)) {
- error = -ENOENT;
- goto out_putnames;
- }
+ if (flags & AT_EMPTY_PATH)
+ how |= LOOKUP_LINKAT_EMPTY;
if (flags & AT_SYMLINK_FOLLOW)
how |= LOOKUP_FOLLOW;
@@ -4437,18 +5706,18 @@ retry:
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto out_dput;
- mnt_userns = mnt_user_ns(new_path.mnt);
- error = may_linkat(mnt_userns, &old_path);
+ idmap = mnt_idmap(new_path.mnt);
+ error = may_linkat(idmap, &old_path);
if (unlikely(error))
goto out_dput;
error = security_path_link(old_path.dentry, &new_path, new_dentry);
if (error)
goto out_dput;
- error = vfs_link(old_path.dentry, mnt_userns, new_path.dentry->d_inode,
+ error = vfs_link(old_path.dentry, idmap, new_path.dentry->d_inode,
new_dentry, &delegated_inode);
out_dput:
- done_path_create(&new_path, new_dentry);
- if (delegated_inode) {
+ end_creating_path(&new_path, new_dentry);
+ if (is_delegated(&delegated_inode)) {
error = break_deleg_wait(&delegated_inode);
if (!error) {
path_put(&old_path);
@@ -4504,12 +5773,13 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
*
* a) we can get into loop creation.
* b) race potential - two innocent renames can create a loop together.
- * That's where 4.4 screws up. Current fix: serialization on
+ * That's where 4.4BSD screws up. Current fix: serialization on
* sb->s_vfs_rename_mutex. We might be more accurate, but that's another
* story.
- * c) we have to lock _four_ objects - parents and victim (if it exists),
- * and source (if it is not a directory).
- * And that - after we got ->i_mutex on parents (until then we don't know
+ * c) we may have to lock up to _four_ objects - parents and victim (if it exists),
+ * and source (if it's a non-directory or a subdirectory that moves to
+ * different parent).
+ * And that - after we got ->i_rwsem on parents (until then we don't know
* whether the target exists). Solution: try to be smart with locking
* order for inodes. We rely on the fact that tree topology may change
* only under ->s_vfs_rename_mutex _and_ that parent of the object we
@@ -4521,18 +5791,19 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
* has no more than 1 dentry. If "hybrid" objects will ever appear,
* we'd better make sure that there's no link(2) for them.
* d) conversion from fhandle to dentry may come in the wrong moment - when
- * we are removing the target. Solution: we will have to grab ->i_mutex
+ * we are removing the target. Solution: we will have to grab ->i_rwsem
* in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
- * ->i_mutex on parents, which works but leads to some truly excessive
+ * ->i_rwsem on parents, which works but leads to some truly excessive
* locking].
*/
int vfs_rename(struct renamedata *rd)
{
int error;
- struct inode *old_dir = rd->old_dir, *new_dir = rd->new_dir;
+ struct inode *old_dir = d_inode(rd->old_parent);
+ struct inode *new_dir = d_inode(rd->new_parent);
struct dentry *old_dentry = rd->old_dentry;
struct dentry *new_dentry = rd->new_dentry;
- struct inode **delegated_inode = rd->delegated_inode;
+ struct delegated_inode *delegated_inode = rd->delegated_inode;
unsigned int flags = rd->flags;
bool is_dir = d_is_dir(old_dentry);
struct inode *source = old_dentry->d_inode;
@@ -4540,24 +5811,25 @@ int vfs_rename(struct renamedata *rd)
bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links;
struct name_snapshot old_name;
+ bool lock_old_subdir, lock_new_subdir;
if (source == target)
return 0;
- error = may_delete(rd->old_mnt_userns, old_dir, old_dentry, is_dir);
+ error = may_delete(rd->mnt_idmap, old_dir, old_dentry, is_dir);
if (error)
return error;
if (!target) {
- error = may_create(rd->new_mnt_userns, new_dir, new_dentry);
+ error = may_create(rd->mnt_idmap, new_dir, new_dentry);
} else {
new_is_dir = d_is_dir(new_dentry);
if (!(flags & RENAME_EXCHANGE))
- error = may_delete(rd->new_mnt_userns, new_dir,
+ error = may_delete(rd->mnt_idmap, new_dir,
new_dentry, is_dir);
else
- error = may_delete(rd->new_mnt_userns, new_dir,
+ error = may_delete(rd->mnt_idmap, new_dir,
new_dentry, new_is_dir);
}
if (error)
@@ -4572,13 +5844,13 @@ int vfs_rename(struct renamedata *rd)
*/
if (new_dir != old_dir) {
if (is_dir) {
- error = inode_permission(rd->old_mnt_userns, source,
+ error = inode_permission(rd->mnt_idmap, source,
MAY_WRITE);
if (error)
return error;
}
if ((flags & RENAME_EXCHANGE) && new_is_dir) {
- error = inode_permission(rd->new_mnt_userns, target,
+ error = inode_permission(rd->mnt_idmap, target,
MAY_WRITE);
if (error)
return error;
@@ -4592,10 +5864,33 @@ int vfs_rename(struct renamedata *rd)
take_dentry_name_snapshot(&old_name, old_dentry);
dget(new_dentry);
- if (!is_dir || (flags & RENAME_EXCHANGE))
+ /*
+ * Lock children.
+ * The source subdirectory needs to be locked on cross-directory
+ * rename or cross-directory exchange since its parent changes.
+ * The target subdirectory needs to be locked on cross-directory
+ * exchange due to parent change and on any rename due to becoming
+ * a victim.
+ * Non-directories need locking in all cases (for NFS reasons);
+ * they get locked after any subdirectories (in inode address order).
+ *
+ * NOTE: WE ONLY LOCK UNRELATED DIRECTORIES IN CROSS-DIRECTORY CASE.
+ * NEVER, EVER DO THAT WITHOUT ->s_vfs_rename_mutex.
+ */
+ lock_old_subdir = new_dir != old_dir;
+ lock_new_subdir = new_dir != old_dir || !(flags & RENAME_EXCHANGE);
+ if (is_dir) {
+ if (lock_old_subdir)
+ inode_lock_nested(source, I_MUTEX_CHILD);
+ if (target && (!new_is_dir || lock_new_subdir))
+ inode_lock(target);
+ } else if (new_is_dir) {
+ if (lock_new_subdir)
+ inode_lock_nested(target, I_MUTEX_CHILD);
+ inode_lock(source);
+ } else {
lock_two_nondirectories(source, target);
- else if (target)
- inode_lock(target);
+ }
error = -EPERM;
if (IS_SWAPFILE(source) || (target && IS_SWAPFILE(target)))
@@ -4613,6 +5908,14 @@ int vfs_rename(struct renamedata *rd)
old_dir->i_nlink >= max_links)
goto out;
}
+ error = try_break_deleg(old_dir, delegated_inode);
+ if (error)
+ goto out;
+ if (new_dir != old_dir) {
+ error = try_break_deleg(new_dir, delegated_inode);
+ if (error)
+ goto out;
+ }
if (!is_dir) {
error = try_break_deleg(source, delegated_inode);
if (error)
@@ -4623,7 +5926,7 @@ int vfs_rename(struct renamedata *rd)
if (error)
goto out;
}
- error = old_dir->i_op->rename(rd->new_mnt_userns, old_dir, old_dentry,
+ error = old_dir->i_op->rename(rd->mnt_idmap, old_dir, old_dentry,
new_dir, new_dentry, flags);
if (error)
goto out;
@@ -4643,9 +5946,9 @@ int vfs_rename(struct renamedata *rd)
d_exchange(old_dentry, new_dentry);
}
out:
- if (!is_dir || (flags & RENAME_EXCHANGE))
- unlock_two_nondirectories(source, target);
- else if (target)
+ if (!is_dir || lock_old_subdir)
+ inode_unlock(source);
+ if (target && (!new_is_dir || lock_new_subdir))
inode_unlock(target);
dput(new_dentry);
if (!error) {
@@ -4666,13 +5969,11 @@ int do_renameat2(int olddfd, struct filename *from, int newdfd,
struct filename *to, unsigned int flags)
{
struct renamedata rd;
- struct dentry *old_dentry, *new_dentry;
- struct dentry *trap;
struct path old_path, new_path;
struct qstr old_last, new_last;
int old_type, new_type;
- struct inode *delegated_inode = NULL;
- unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET;
+ struct delegated_inode delegated_inode = { };
+ unsigned int lookup_flags = 0;
bool should_retry = false;
int error = -EINVAL;
@@ -4683,9 +5984,6 @@ int do_renameat2(int olddfd, struct filename *from, int newdfd,
(flags & RENAME_EXCHANGE))
goto put_names;
- if (flags & RENAME_EXCHANGE)
- target_flags = 0;
-
retry:
error = filename_parentat(olddfd, from, lookup_flags, &old_path,
&old_last, &old_type);
@@ -4715,73 +6013,42 @@ retry:
goto exit2;
retry_deleg:
- trap = lock_rename(new_path.dentry, old_path.dentry);
+ rd.old_parent = old_path.dentry;
+ rd.mnt_idmap = mnt_idmap(old_path.mnt);
+ rd.new_parent = new_path.dentry;
+ rd.delegated_inode = &delegated_inode;
+ rd.flags = flags;
- old_dentry = __lookup_hash(&old_last, old_path.dentry, lookup_flags);
- error = PTR_ERR(old_dentry);
- if (IS_ERR(old_dentry))
- goto exit3;
- /* source must exist */
- error = -ENOENT;
- if (d_is_negative(old_dentry))
- goto exit4;
- new_dentry = __lookup_hash(&new_last, new_path.dentry, lookup_flags | target_flags);
- error = PTR_ERR(new_dentry);
- if (IS_ERR(new_dentry))
- goto exit4;
- error = -EEXIST;
- if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry))
- goto exit5;
- if (flags & RENAME_EXCHANGE) {
- error = -ENOENT;
- if (d_is_negative(new_dentry))
- goto exit5;
+ error = __start_renaming(&rd, lookup_flags, &old_last, &new_last);
+ if (error)
+ goto exit_lock_rename;
- if (!d_is_dir(new_dentry)) {
+ if (flags & RENAME_EXCHANGE) {
+ if (!d_is_dir(rd.new_dentry)) {
error = -ENOTDIR;
if (new_last.name[new_last.len])
- goto exit5;
+ goto exit_unlock;
}
}
/* unless the source is a directory trailing slashes give -ENOTDIR */
- if (!d_is_dir(old_dentry)) {
+ if (!d_is_dir(rd.old_dentry)) {
error = -ENOTDIR;
if (old_last.name[old_last.len])
- goto exit5;
+ goto exit_unlock;
if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len])
- goto exit5;
+ goto exit_unlock;
}
- /* source should not be ancestor of target */
- error = -EINVAL;
- if (old_dentry == trap)
- goto exit5;
- /* target should not be an ancestor of source */
- if (!(flags & RENAME_EXCHANGE))
- error = -ENOTEMPTY;
- if (new_dentry == trap)
- goto exit5;
- error = security_path_rename(&old_path, old_dentry,
- &new_path, new_dentry, flags);
+ error = security_path_rename(&old_path, rd.old_dentry,
+ &new_path, rd.new_dentry, flags);
if (error)
- goto exit5;
-
- rd.old_dir = old_path.dentry->d_inode;
- rd.old_dentry = old_dentry;
- rd.old_mnt_userns = mnt_user_ns(old_path.mnt);
- rd.new_dir = new_path.dentry->d_inode;
- rd.new_dentry = new_dentry;
- rd.new_mnt_userns = mnt_user_ns(new_path.mnt);
- rd.delegated_inode = &delegated_inode;
- rd.flags = flags;
+ goto exit_unlock;
+
error = vfs_rename(&rd);
-exit5:
- dput(new_dentry);
-exit4:
- dput(old_dentry);
-exit3:
- unlock_rename(new_path.dentry, old_path.dentry);
- if (delegated_inode) {
+exit_unlock:
+ end_renaming(&rd);
+exit_lock_rename:
+ if (is_delegated(&delegated_inode)) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
@@ -4824,19 +6091,16 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
getname(newname), 0);
}
-int readlink_copy(char __user *buffer, int buflen, const char *link)
+int readlink_copy(char __user *buffer, int buflen, const char *link, int linklen)
{
- int len = PTR_ERR(link);
- if (IS_ERR(link))
- goto out;
+ int copylen;
- len = strlen(link);
- if (len > (unsigned) buflen)
- len = buflen;
- if (copy_to_user(buffer, link, len))
- len = -EFAULT;
-out:
- return len;
+ copylen = linklen;
+ if (unlikely(copylen > (unsigned) buflen))
+ copylen = buflen;
+ if (copy_to_user(buffer, link, copylen))
+ copylen = -EFAULT;
+ return copylen;
}
/**
@@ -4856,6 +6120,9 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
const char *link;
int res;
+ if (inode->i_opflags & IOP_CACHED_LINK)
+ return readlink_copy(buffer, buflen, inode->i_link, inode->i_linklen);
+
if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) {
if (unlikely(inode->i_op->readlink))
return inode->i_op->readlink(dentry, buffer, buflen);
@@ -4874,7 +6141,7 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
if (IS_ERR(link))
return PTR_ERR(link);
}
- res = readlink_copy(buffer, buflen, link);
+ res = readlink_copy(buffer, buflen, link, strlen(link));
do_delayed_call(&done);
return res;
}
@@ -4906,75 +6173,117 @@ const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done)
EXPORT_SYMBOL(vfs_get_link);
/* get the link contents into pagecache */
-const char *page_get_link(struct dentry *dentry, struct inode *inode,
- struct delayed_call *callback)
+static char *__page_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
{
- char *kaddr;
- struct page *page;
+ struct folio *folio;
struct address_space *mapping = inode->i_mapping;
if (!dentry) {
- page = find_get_page(mapping, 0);
- if (!page)
+ folio = filemap_get_folio(mapping, 0);
+ if (IS_ERR(folio))
return ERR_PTR(-ECHILD);
- if (!PageUptodate(page)) {
- put_page(page);
+ if (!folio_test_uptodate(folio)) {
+ folio_put(folio);
return ERR_PTR(-ECHILD);
}
} else {
- page = read_mapping_page(mapping, 0, NULL);
- if (IS_ERR(page))
- return (char*)page;
+ folio = read_mapping_folio(mapping, 0, NULL);
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
}
- set_delayed_call(callback, page_put_link, page);
+ set_delayed_call(callback, page_put_link, folio);
BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM);
- kaddr = page_address(page);
- nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
- return kaddr;
+ return folio_address(folio);
}
+const char *page_get_link_raw(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ return __page_get_link(dentry, inode, callback);
+}
+EXPORT_SYMBOL_GPL(page_get_link_raw);
+
+/**
+ * page_get_link() - An implementation of the get_link inode_operation.
+ * @dentry: The directory entry which is the symlink.
+ * @inode: The inode for the symlink.
+ * @callback: Used to drop the reference to the symlink.
+ *
+ * Filesystems which store their symlinks in the page cache should use
+ * this to implement the get_link() member of their inode_operations.
+ *
+ * Return: A pointer to the NUL-terminated symlink.
+ */
+const char *page_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ char *kaddr = __page_get_link(dentry, inode, callback);
+
+ if (!IS_ERR(kaddr))
+ nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
+ return kaddr;
+}
EXPORT_SYMBOL(page_get_link);
+/**
+ * page_put_link() - Drop the reference to the symlink.
+ * @arg: The folio which contains the symlink.
+ *
+ * This is used internally by page_get_link(). It is exported for use
+ * by filesystems which need to implement a variant of page_get_link()
+ * themselves. Despite the apparent symmetry, filesystems which use
+ * page_get_link() do not need to call page_put_link().
+ *
+ * The argument, while it has a void pointer type, must be a pointer to
+ * the folio which was retrieved from the page cache. The delayed_call
+ * infrastructure is used to drop the reference count once the caller
+ * is done with the symlink.
+ */
void page_put_link(void *arg)
{
- put_page(arg);
+ folio_put(arg);
}
EXPORT_SYMBOL(page_put_link);
int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
+ const char *link;
+ int res;
+
DEFINE_DELAYED_CALL(done);
- int res = readlink_copy(buffer, buflen,
- page_get_link(dentry, d_inode(dentry),
- &done));
+ link = page_get_link(dentry, d_inode(dentry), &done);
+ res = PTR_ERR(link);
+ if (!IS_ERR(link))
+ res = readlink_copy(buffer, buflen, link, strlen(link));
do_delayed_call(&done);
return res;
}
EXPORT_SYMBOL(page_readlink);
-/*
- * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
- */
-int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
+int page_symlink(struct inode *inode, const char *symname, int len)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page;
- void *fsdata;
+ const struct address_space_operations *aops = mapping->a_ops;
+ bool nofs = !mapping_gfp_constraint(mapping, __GFP_FS);
+ struct folio *folio;
+ void *fsdata = NULL;
int err;
- unsigned int flags = 0;
- if (nofs)
- flags |= AOP_FLAG_NOFS;
+ unsigned int flags;
retry:
- err = pagecache_write_begin(NULL, mapping, 0, len-1,
- flags, &page, &fsdata);
+ if (nofs)
+ flags = memalloc_nofs_save();
+ err = aops->write_begin(NULL, mapping, 0, len-1, &folio, &fsdata);
+ if (nofs)
+ memalloc_nofs_restore(flags);
if (err)
goto fail;
- memcpy(page_address(page), symname, len-1);
+ memcpy(folio_address(folio), symname, len - 1);
- err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
- page, fsdata);
+ err = aops->write_end(NULL, mapping, 0, len - 1, len - 1,
+ folio, fsdata);
if (err < 0)
goto fail;
if (err < len-1)
@@ -4985,13 +6294,6 @@ retry:
fail:
return err;
}
-EXPORT_SYMBOL(__page_symlink);
-
-int page_symlink(struct inode *inode, const char *symname, int len)
-{
- return __page_symlink(inode, symname, len,
- !mapping_gfp_constraint(inode->i_mapping, __GFP_FS));
-}
EXPORT_SYMBOL(page_symlink);
const struct inode_operations page_symlink_inode_operations = {
diff --git a/fs/namespace.c b/fs/namespace.c
index 659a8f39c61a..c58674a20cad 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -31,17 +31,20 @@
#include <uapi/linux/mount.h>
#include <linux/fs_context.h>
#include <linux/shmem_fs.h>
+#include <linux/mnt_idmapping.h>
+#include <linux/pidfs.h>
+#include <linux/nstree.h>
#include "pnode.h"
#include "internal.h"
/* Maximum number of mounts in a mount namespace */
-unsigned int sysctl_mount_max __read_mostly = 100000;
+static unsigned int sysctl_mount_max __read_mostly = 100000;
-static unsigned int m_hash_mask __read_mostly;
-static unsigned int m_hash_shift __read_mostly;
-static unsigned int mp_hash_mask __read_mostly;
-static unsigned int mp_hash_shift __read_mostly;
+static unsigned int m_hash_mask __ro_after_init;
+static unsigned int m_hash_shift __ro_after_init;
+static unsigned int mp_hash_mask __ro_after_init;
+static unsigned int mp_hash_shift __ro_after_init;
static __initdata unsigned long mhash_entries;
static int __init set_mhash_entries(char *str)
@@ -63,28 +66,60 @@ static int __init set_mphash_entries(char *str)
}
__setup("mphash_entries=", set_mphash_entries);
+static char * __initdata initramfs_options;
+static int __init initramfs_options_setup(char *str)
+{
+ initramfs_options = str;
+ return 1;
+}
+
+__setup("initramfs_options=", initramfs_options_setup);
+
static u64 event;
-static DEFINE_IDA(mnt_id_ida);
+static DEFINE_XARRAY_FLAGS(mnt_id_xa, XA_FLAGS_ALLOC);
static DEFINE_IDA(mnt_group_ida);
-static struct hlist_head *mount_hashtable __read_mostly;
-static struct hlist_head *mountpoint_hashtable __read_mostly;
-static struct kmem_cache *mnt_cache __read_mostly;
+/* Don't allow confusion with old 32bit mount ID */
+#define MNT_UNIQUE_ID_OFFSET (1ULL << 31)
+static u64 mnt_id_ctr = MNT_UNIQUE_ID_OFFSET;
+
+static struct hlist_head *mount_hashtable __ro_after_init;
+static struct hlist_head *mountpoint_hashtable __ro_after_init;
+static struct kmem_cache *mnt_cache __ro_after_init;
static DECLARE_RWSEM(namespace_sem);
static HLIST_HEAD(unmounted); /* protected by namespace_sem */
static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
+static struct mnt_namespace *emptied_ns; /* protected by namespace_sem */
+
+static inline void namespace_lock(void);
+static void namespace_unlock(void);
+DEFINE_LOCK_GUARD_0(namespace_excl, namespace_lock(), namespace_unlock())
+DEFINE_LOCK_GUARD_0(namespace_shared, down_read(&namespace_sem),
+ up_read(&namespace_sem))
+
+DEFINE_FREE(mntput, struct vfsmount *, if (!IS_ERR(_T)) mntput(_T))
+
+#ifdef CONFIG_FSNOTIFY
+LIST_HEAD(notify_list); /* protected by namespace_sem */
+#endif
+
+enum mount_kattr_flags_t {
+ MOUNT_KATTR_RECURSE = (1 << 0),
+ MOUNT_KATTR_IDMAP_REPLACE = (1 << 1),
+};
struct mount_kattr {
unsigned int attr_set;
unsigned int attr_clr;
unsigned int propagation;
unsigned int lookup_flags;
- bool recurse;
+ enum mount_kattr_flags_t kflags;
struct user_namespace *mnt_userns;
+ struct mnt_idmap *mnt_idmap;
};
/* /sys/fs */
-struct kobject *fs_kobj;
+struct kobject *fs_kobj __ro_after_init;
EXPORT_SYMBOL_GPL(fs_kobj);
/*
@@ -97,6 +132,65 @@ EXPORT_SYMBOL_GPL(fs_kobj);
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
+static void mnt_ns_release(struct mnt_namespace *ns)
+{
+ /* keep alive for {list,stat}mount() */
+ if (ns && refcount_dec_and_test(&ns->passive)) {
+ fsnotify_mntns_delete(ns);
+ put_user_ns(ns->user_ns);
+ kfree(ns);
+ }
+}
+DEFINE_FREE(mnt_ns_release, struct mnt_namespace *,
+ if (!IS_ERR(_T)) mnt_ns_release(_T))
+
+static void mnt_ns_release_rcu(struct rcu_head *rcu)
+{
+ mnt_ns_release(container_of(rcu, struct mnt_namespace, ns.ns_rcu));
+}
+
+static void mnt_ns_tree_remove(struct mnt_namespace *ns)
+{
+ /* remove from global mount namespace list */
+ if (ns_tree_active(ns))
+ ns_tree_remove(ns);
+
+ call_rcu(&ns->ns.ns_rcu, mnt_ns_release_rcu);
+}
+
+/*
+ * Lookup a mount namespace by id and take a passive reference count. Taking a
+ * passive reference means the mount namespace can be emptied if e.g., the last
+ * task holding an active reference exits. To access the mounts of the
+ * namespace the @namespace_sem must first be acquired. If the namespace has
+ * already shut down before acquiring @namespace_sem, {list,stat}mount() will
+ * see that the mount rbtree of the namespace is empty.
+ *
+ * Note the lookup is lockless protected by a sequence counter. We only
+ * need to guard against false negatives as false positives aren't
+ * possible. So if we didn't find a mount namespace and the sequence
+ * counter has changed we need to retry. If the sequence counter is
+ * still the same we know the search actually failed.
+ */
+static struct mnt_namespace *lookup_mnt_ns(u64 mnt_ns_id)
+{
+ struct mnt_namespace *mnt_ns;
+ struct ns_common *ns;
+
+ guard(rcu)();
+ ns = ns_tree_lookup_rcu(mnt_ns_id, CLONE_NEWNS);
+ if (!ns)
+ return NULL;
+
+ /*
+ * The last reference count is put with RCU delay so we can
+ * unconditonally acquire a reference here.
+ */
+ mnt_ns = container_of(ns, struct mnt_namespace, ns);
+ refcount_inc(&mnt_ns->passive);
+ return mnt_ns;
+}
+
static inline void lock_mount_hash(void)
{
write_seqlock(&mount_lock);
@@ -124,17 +218,19 @@ static inline struct hlist_head *mp_hash(struct dentry *dentry)
static int mnt_alloc_id(struct mount *mnt)
{
- int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
+ int res;
- if (res < 0)
- return res;
- mnt->mnt_id = res;
- return 0;
+ xa_lock(&mnt_id_xa);
+ res = __xa_alloc(&mnt_id_xa, &mnt->mnt_id, mnt, XA_LIMIT(1, INT_MAX), GFP_KERNEL);
+ if (!res)
+ mnt->mnt_id_unique = ++mnt_id_ctr;
+ xa_unlock(&mnt_id_xa);
+ return res;
}
static void mnt_free_id(struct mount *mnt)
{
- ida_free(&mnt_id_ida, mnt->mnt_id);
+ xa_erase(&mnt_id_xa, mnt->mnt_id);
}
/*
@@ -202,12 +298,13 @@ static struct mount *alloc_vfsmnt(const char *name)
if (err)
goto out_free_cache;
- if (name) {
+ if (name)
mnt->mnt_devname = kstrdup_const(name,
GFP_KERNEL_ACCOUNT);
- if (!mnt->mnt_devname)
- goto out_free_id;
- }
+ else
+ mnt->mnt_devname = "none";
+ if (!mnt->mnt_devname)
+ goto out_free_id;
#ifdef CONFIG_SMP
mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
@@ -226,12 +323,12 @@ static struct mount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_list);
INIT_LIST_HEAD(&mnt->mnt_expire);
INIT_LIST_HEAD(&mnt->mnt_share);
- INIT_LIST_HEAD(&mnt->mnt_slave_list);
- INIT_LIST_HEAD(&mnt->mnt_slave);
+ INIT_HLIST_HEAD(&mnt->mnt_slave_list);
+ INIT_HLIST_NODE(&mnt->mnt_slave);
INIT_HLIST_NODE(&mnt->mnt_mp_list);
- INIT_LIST_HEAD(&mnt->mnt_umounting);
INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
- mnt->mnt.mnt_userns = &init_user_ns;
+ RB_CLEAR_NODE(&mnt->mnt_node);
+ mnt->mnt.mnt_idmap = &nop_mnt_idmap;
}
return mnt;
@@ -265,7 +362,7 @@ out_free_cache:
* mnt_want/drop_write() will _keep_ the filesystem
* r/w.
*/
-bool __mnt_is_readonly(struct vfsmount *mnt)
+bool __mnt_is_readonly(const struct vfsmount *mnt)
{
return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
}
@@ -305,11 +402,18 @@ static unsigned int mnt_get_writers(struct mount *mnt)
#endif
}
-static int mnt_is_readonly(struct vfsmount *mnt)
+static int mnt_is_readonly(const struct vfsmount *mnt)
{
- if (mnt->mnt_sb->s_readonly_remount)
+ if (READ_ONCE(mnt->mnt_sb->s_readonly_remount))
return 1;
- /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
+ /*
+ * The barrier pairs with the barrier in sb_start_ro_state_change()
+ * making sure if we don't see s_readonly_remount set yet, we also will
+ * not see any superblock / mount flag changes done by remount.
+ * It also pairs with the barrier in sb_end_ro_state_change()
+ * assuring that if we see s_readonly_remount already cleared, we will
+ * see the values of superblock / mount flags updated by remount.
+ */
smp_rmb();
return __mnt_is_readonly(mnt);
}
@@ -321,16 +425,16 @@ static int mnt_is_readonly(struct vfsmount *mnt)
* can determine when writes are able to occur to a filesystem.
*/
/**
- * __mnt_want_write - get write access to a mount without freeze protection
+ * mnt_get_write_access - get write access to a mount without freeze protection
* @m: the mount on which to take a write
*
* This tells the low-level filesystem that a write is about to be performed to
* it, and makes sure that writes are allowed (mnt it read-write) before
* returning success. This operation does not protect against filesystem being
- * frozen. When the write operation is finished, __mnt_drop_write() must be
+ * frozen. When the write operation is finished, mnt_put_write_access() must be
* called. This is effectively a refcount.
*/
-int __mnt_want_write(struct vfsmount *m)
+int mnt_get_write_access(struct vfsmount *m)
{
struct mount *mnt = real_mount(m);
int ret = 0;
@@ -339,16 +443,34 @@ int __mnt_want_write(struct vfsmount *m)
mnt_inc_writers(mnt);
/*
* The store to mnt_inc_writers must be visible before we pass
- * MNT_WRITE_HOLD loop below, so that the slowpath can see our
- * incremented count after it has set MNT_WRITE_HOLD.
+ * WRITE_HOLD loop below, so that the slowpath can see our
+ * incremented count after it has set WRITE_HOLD.
*/
smp_mb();
- while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
- cpu_relax();
+ might_lock(&mount_lock.lock);
+ while (__test_write_hold(READ_ONCE(mnt->mnt_pprev_for_sb))) {
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ cpu_relax();
+ } else {
+ /*
+ * This prevents priority inversion, if the task
+ * setting WRITE_HOLD got preempted on a remote
+ * CPU, and it prevents life lock if the task setting
+ * WRITE_HOLD has a lower priority and is bound to
+ * the same CPU as the task that is spinning here.
+ */
+ preempt_enable();
+ read_seqlock_excl(&mount_lock);
+ read_sequnlock_excl(&mount_lock);
+ preempt_disable();
+ }
+ }
/*
- * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
- * be set to match its requirements. So we must not load that until
- * MNT_WRITE_HOLD is cleared.
+ * The barrier pairs with the barrier sb_start_ro_state_change() making
+ * sure that if we see WRITE_HOLD cleared, we will also see
+ * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in
+ * mnt_is_readonly() and bail in case we are racing with remount
+ * read-only.
*/
smp_rmb();
if (mnt_is_readonly(m)) {
@@ -359,6 +481,7 @@ int __mnt_want_write(struct vfsmount *m)
return ret;
}
+EXPORT_SYMBOL_GPL(mnt_get_write_access);
/**
* mnt_want_write - get write access to a mount
@@ -374,7 +497,7 @@ int mnt_want_write(struct vfsmount *m)
int ret;
sb_start_write(m->mnt_sb);
- ret = __mnt_want_write(m);
+ ret = mnt_get_write_access(m);
if (ret)
sb_end_write(m->mnt_sb);
return ret;
@@ -382,15 +505,15 @@ int mnt_want_write(struct vfsmount *m)
EXPORT_SYMBOL_GPL(mnt_want_write);
/**
- * __mnt_want_write_file - get write access to a file's mount
+ * mnt_get_write_access_file - get write access to a file's mount
* @file: the file who's mount on which to take a write
*
- * This is like __mnt_want_write, but if the file is already open for writing it
+ * This is like mnt_get_write_access, but if @file is already open for write it
* skips incrementing mnt_writers (since the open file already has a reference)
* and instead only does the check for emergency r/o remounts. This must be
- * paired with __mnt_drop_write_file.
+ * paired with mnt_put_write_access_file.
*/
-int __mnt_want_write_file(struct file *file)
+int mnt_get_write_access_file(struct file *file)
{
if (file->f_mode & FMODE_WRITER) {
/*
@@ -401,7 +524,7 @@ int __mnt_want_write_file(struct file *file)
return -EROFS;
return 0;
}
- return __mnt_want_write(file->f_path.mnt);
+ return mnt_get_write_access(file->f_path.mnt);
}
/**
@@ -418,7 +541,7 @@ int mnt_want_write_file(struct file *file)
int ret;
sb_start_write(file_inode(file)->i_sb);
- ret = __mnt_want_write_file(file);
+ ret = mnt_get_write_access_file(file);
if (ret)
sb_end_write(file_inode(file)->i_sb);
return ret;
@@ -426,19 +549,20 @@ int mnt_want_write_file(struct file *file)
EXPORT_SYMBOL_GPL(mnt_want_write_file);
/**
- * __mnt_drop_write - give up write access to a mount
+ * mnt_put_write_access - give up write access to a mount
* @mnt: the mount on which to give up write access
*
* Tells the low-level filesystem that we are done
* performing writes to it. Must be matched with
- * __mnt_want_write() call above.
+ * mnt_get_write_access() call above.
*/
-void __mnt_drop_write(struct vfsmount *mnt)
+void mnt_put_write_access(struct vfsmount *mnt)
{
preempt_disable();
mnt_dec_writers(real_mount(mnt));
preempt_enable();
}
+EXPORT_SYMBOL_GPL(mnt_put_write_access);
/**
* mnt_drop_write - give up write access to a mount
@@ -450,29 +574,47 @@ void __mnt_drop_write(struct vfsmount *mnt)
*/
void mnt_drop_write(struct vfsmount *mnt)
{
- __mnt_drop_write(mnt);
+ mnt_put_write_access(mnt);
sb_end_write(mnt->mnt_sb);
}
EXPORT_SYMBOL_GPL(mnt_drop_write);
-void __mnt_drop_write_file(struct file *file)
+void mnt_put_write_access_file(struct file *file)
{
if (!(file->f_mode & FMODE_WRITER))
- __mnt_drop_write(file->f_path.mnt);
+ mnt_put_write_access(file->f_path.mnt);
}
void mnt_drop_write_file(struct file *file)
{
- __mnt_drop_write_file(file);
+ mnt_put_write_access_file(file);
sb_end_write(file_inode(file)->i_sb);
}
EXPORT_SYMBOL(mnt_drop_write_file);
+/**
+ * mnt_hold_writers - prevent write access to the given mount
+ * @mnt: mnt to prevent write access to
+ *
+ * Prevents write access to @mnt if there are no active writers for @mnt.
+ * This function needs to be called and return successfully before changing
+ * properties of @mnt that need to remain stable for callers with write access
+ * to @mnt.
+ *
+ * After this functions has been called successfully callers must pair it with
+ * a call to mnt_unhold_writers() in order to stop preventing write access to
+ * @mnt.
+ *
+ * Context: This function expects to be in mount_locked_reader scope serializing
+ * setting WRITE_HOLD.
+ * Return: On success 0 is returned.
+ * On error, -EBUSY is returned.
+ */
static inline int mnt_hold_writers(struct mount *mnt)
{
- mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
+ set_write_hold(mnt);
/*
- * After storing MNT_WRITE_HOLD, we'll read the counters. This store
+ * After storing WRITE_HOLD, we'll read the counters. This store
* should be visible before we do.
*/
smp_mb();
@@ -488,9 +630,9 @@ static inline int mnt_hold_writers(struct mount *mnt)
* sum up each counter, if we read a counter before it is incremented,
* but then read another CPU's count which it has been subsequently
* decremented from -- we would see more decrements than we should.
- * MNT_WRITE_HOLD protects against this scenario, because
+ * WRITE_HOLD protects against this scenario, because
* mnt_want_write first increments count, then smp_mb, then spins on
- * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
+ * WRITE_HOLD, so it can't be decremented by another CPU while
* we're counting up here.
*/
if (mnt_get_writers(mnt) > 0)
@@ -499,14 +641,49 @@ static inline int mnt_hold_writers(struct mount *mnt)
return 0;
}
+/**
+ * mnt_unhold_writers - stop preventing write access to the given mount
+ * @mnt: mnt to stop preventing write access to
+ *
+ * Stop preventing write access to @mnt allowing callers to gain write access
+ * to @mnt again.
+ *
+ * This function can only be called after a call to mnt_hold_writers().
+ *
+ * Context: This function expects to be in the same mount_locked_reader scope
+ * as the matching mnt_hold_writers().
+ */
static inline void mnt_unhold_writers(struct mount *mnt)
{
+ if (!test_write_hold(mnt))
+ return;
/*
- * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
+ * MNT_READONLY must become visible before ~WRITE_HOLD, so writers
* that become unheld will see MNT_READONLY.
*/
smp_wmb();
- mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
+ clear_write_hold(mnt);
+}
+
+static inline void mnt_del_instance(struct mount *m)
+{
+ struct mount **p = m->mnt_pprev_for_sb;
+ struct mount *next = m->mnt_next_for_sb;
+
+ if (next)
+ next->mnt_pprev_for_sb = p;
+ *p = next;
+}
+
+static inline void mnt_add_instance(struct mount *m, struct super_block *s)
+{
+ struct mount *first = s->s_mounts;
+
+ if (first)
+ first->mnt_pprev_for_sb = &m->mnt_next_for_sb;
+ m->mnt_next_for_sb = first;
+ m->mnt_pprev_for_sb = &s->s_mounts;
+ s->s_mounts = m;
}
static int mnt_make_readonly(struct mount *mnt)
@@ -522,47 +699,37 @@ static int mnt_make_readonly(struct mount *mnt)
int sb_prepare_remount_readonly(struct super_block *sb)
{
- struct mount *mnt;
int err = 0;
- /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
+ /* Racy optimization. Recheck the counter under WRITE_HOLD */
if (atomic_long_read(&sb->s_remove_count))
return -EBUSY;
- lock_mount_hash();
- list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
- if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
- mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
- smp_mb();
- if (mnt_get_writers(mnt) > 0) {
- err = -EBUSY;
+ guard(mount_locked_reader)();
+
+ for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) {
+ if (!(m->mnt.mnt_flags & MNT_READONLY)) {
+ err = mnt_hold_writers(m);
+ if (err)
break;
- }
}
}
if (!err && atomic_long_read(&sb->s_remove_count))
err = -EBUSY;
- if (!err) {
- sb->s_readonly_remount = 1;
- smp_wmb();
- }
- list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
- if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
- mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
+ if (!err)
+ sb_start_ro_state_change(sb);
+ for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) {
+ if (test_write_hold(m))
+ clear_write_hold(m);
}
- unlock_mount_hash();
return err;
}
static void free_vfsmnt(struct mount *mnt)
{
- struct user_namespace *mnt_userns;
-
- mnt_userns = mnt_user_ns(&mnt->mnt);
- if (mnt_userns != &init_user_ns)
- put_user_ns(mnt_userns);
+ mnt_idmap_put(mnt_idmap(&mnt->mnt));
kfree_const(mnt->mnt_devname);
#ifdef CONFIG_SMP
free_percpu(mnt->mnt_pcp);
@@ -585,15 +752,11 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
return 0;
mnt = real_mount(bastard);
mnt_add_count(mnt, 1);
- smp_mb(); // see mntput_no_expire()
+ smp_mb(); // see mntput_no_expire() and do_umount()
if (likely(!read_seqretry(&mount_lock, seq)))
return 0;
- if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
- mnt_add_count(mnt, -1);
- return 1;
- }
lock_mount_hash();
- if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
+ if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) {
mnt_add_count(mnt, -1);
unlock_mount_hash();
return 1;
@@ -604,7 +767,7 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
}
/* call under rcu_read_lock */
-bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
+static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
{
int res = __legitimize_mnt(bastard, seq);
if (likely(!res))
@@ -617,9 +780,17 @@ bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
return false;
}
-/*
- * find the first mount at @dentry on vfsmount @mnt.
- * call under rcu_read_lock()
+/**
+ * __lookup_mnt - mount hash lookup
+ * @mnt: parent mount
+ * @dentry: dentry of mountpoint
+ *
+ * If @mnt has a child mount @c mounted on @dentry find and return it.
+ * Caller must either hold the spinlock component of @mount_lock or
+ * hold rcu_read_lock(), sample the seqcount component before the call
+ * and recheck it afterwards.
+ *
+ * Return: The child of @mnt mounted on @dentry or %NULL.
*/
struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
{
@@ -632,21 +803,12 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
return NULL;
}
-/*
- * lookup_mnt - Return the first child mount mounted at path
- *
- * "First" means first mounted chronologically. If you create the
- * following mounts:
- *
- * mount /dev/sda1 /mnt
- * mount /dev/sda2 /mnt
- * mount /dev/sda3 /mnt
- *
- * Then lookup_mnt() on the base /mnt dentry in the root mount will
- * return successively the root dentry and vfsmount of /dev/sda1, then
- * /dev/sda2, then /dev/sda3, then NULL.
+/**
+ * lookup_mnt - Return the child mount mounted at given location
+ * @path: location in the namespace
*
- * lookup_mnt takes a reference to the found vfsmount.
+ * Acquires and returns a new reference to mount at given location
+ * or %NULL if nothing is mounted there.
*/
struct vfsmount *lookup_mnt(const struct path *path)
{
@@ -664,21 +826,6 @@ struct vfsmount *lookup_mnt(const struct path *path)
return m;
}
-static inline void lock_ns_list(struct mnt_namespace *ns)
-{
- spin_lock(&ns->ns_lock);
-}
-
-static inline void unlock_ns_list(struct mnt_namespace *ns)
-{
- spin_unlock(&ns->ns_lock);
-}
-
-static inline bool mnt_is_cursor(struct mount *mnt)
-{
- return mnt->mnt.mnt_flags & MNT_CURSOR;
-}
-
/*
* __is_local_mountpoint - Test to see if dentry is a mountpoint in the
* current mount namespace.
@@ -694,63 +841,63 @@ static inline bool mnt_is_cursor(struct mount *mnt)
* namespace not just a mount that happens to have some specified
* parent mount.
*/
-bool __is_local_mountpoint(struct dentry *dentry)
+bool __is_local_mountpoint(const struct dentry *dentry)
{
struct mnt_namespace *ns = current->nsproxy->mnt_ns;
- struct mount *mnt;
- bool is_covered = false;
+ struct mount *mnt, *n;
- down_read(&namespace_sem);
- lock_ns_list(ns);
- list_for_each_entry(mnt, &ns->list, mnt_list) {
- if (mnt_is_cursor(mnt))
- continue;
- is_covered = (mnt->mnt_mountpoint == dentry);
- if (is_covered)
- break;
- }
- unlock_ns_list(ns);
- up_read(&namespace_sem);
+ guard(namespace_shared)();
+
+ rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node)
+ if (mnt->mnt_mountpoint == dentry)
+ return true;
- return is_covered;
+ return false;
}
-static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
+struct pinned_mountpoint {
+ struct hlist_node node;
+ struct mountpoint *mp;
+ struct mount *parent;
+};
+
+static bool lookup_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m)
{
struct hlist_head *chain = mp_hash(dentry);
struct mountpoint *mp;
hlist_for_each_entry(mp, chain, m_hash) {
if (mp->m_dentry == dentry) {
- mp->m_count++;
- return mp;
+ hlist_add_head(&m->node, &mp->m_list);
+ m->mp = mp;
+ return true;
}
}
- return NULL;
+ return false;
}
-static struct mountpoint *get_mountpoint(struct dentry *dentry)
+static int get_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m)
{
- struct mountpoint *mp, *new = NULL;
+ struct mountpoint *mp __free(kfree) = NULL;
+ bool found;
int ret;
if (d_mountpoint(dentry)) {
/* might be worth a WARN_ON() */
if (d_unlinked(dentry))
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
mountpoint:
read_seqlock_excl(&mount_lock);
- mp = lookup_mountpoint(dentry);
+ found = lookup_mountpoint(dentry, m);
read_sequnlock_excl(&mount_lock);
- if (mp)
- goto done;
+ if (found)
+ return 0;
}
- if (!new)
- new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
- if (!new)
- return ERR_PTR(-ENOMEM);
-
+ if (!mp)
+ mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
+ if (!mp)
+ return -ENOMEM;
/* Exactly one processes may set d_mounted */
ret = d_set_mounted(dentry);
@@ -760,34 +907,28 @@ mountpoint:
goto mountpoint;
/* The dentry is not available as a mountpoint? */
- mp = ERR_PTR(ret);
if (ret)
- goto done;
+ return ret;
/* Add the new mountpoint to the hash table */
read_seqlock_excl(&mount_lock);
- new->m_dentry = dget(dentry);
- new->m_count = 1;
- hlist_add_head(&new->m_hash, mp_hash(dentry));
- INIT_HLIST_HEAD(&new->m_list);
+ mp->m_dentry = dget(dentry);
+ hlist_add_head(&mp->m_hash, mp_hash(dentry));
+ INIT_HLIST_HEAD(&mp->m_list);
+ hlist_add_head(&m->node, &mp->m_list);
+ m->mp = no_free_ptr(mp);
read_sequnlock_excl(&mount_lock);
-
- mp = new;
- new = NULL;
-done:
- kfree(new);
- return mp;
+ return 0;
}
/*
* vfsmount lock must be held. Additionally, the caller is responsible
* for serializing calls for given disposal list.
*/
-static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
+static void maybe_free_mountpoint(struct mountpoint *mp, struct list_head *list)
{
- if (!--mp->m_count) {
+ if (hlist_empty(&mp->m_list)) {
struct dentry *dentry = mp->m_dentry;
- BUG_ON(!hlist_empty(&mp->m_list));
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_MOUNTED;
spin_unlock(&dentry->d_lock);
@@ -797,17 +938,33 @@ static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
}
}
-/* called with namespace_lock and vfsmount lock */
-static void put_mountpoint(struct mountpoint *mp)
+/*
+ * locks: mount_lock [read_seqlock_excl], namespace_sem [excl]
+ */
+static void unpin_mountpoint(struct pinned_mountpoint *m)
{
- __put_mountpoint(mp, &ex_mountpoints);
+ if (m->mp) {
+ hlist_del(&m->node);
+ maybe_free_mountpoint(m->mp, &ex_mountpoints);
+ }
}
-static inline int check_mnt(struct mount *mnt)
+static inline int check_mnt(const struct mount *mnt)
{
return mnt->mnt_ns == current->nsproxy->mnt_ns;
}
+static inline bool check_anonymous_mnt(struct mount *mnt)
+{
+ u64 seq;
+
+ if (!is_anon_ns(mnt->mnt_ns))
+ return false;
+
+ seq = mnt->mnt_ns->seq_origin;
+ return !seq || (seq == current->nsproxy->mnt_ns->ns.ns_id);
+}
+
/*
* vfsmount lock must be held for write
*/
@@ -831,11 +988,14 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
}
/*
- * vfsmount lock must be held for write
+ * locks: mount_lock[write_seqlock]
*/
-static struct mountpoint *unhash_mnt(struct mount *mnt)
+static void __umount_mnt(struct mount *mnt, struct list_head *shrink_list)
{
struct mountpoint *mp;
+ struct mount *parent = mnt->mnt_parent;
+ if (unlikely(parent->overmount == mnt))
+ parent->overmount = NULL;
mnt->mnt_parent = mnt;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
list_del_init(&mnt->mnt_child);
@@ -843,15 +1003,15 @@ static struct mountpoint *unhash_mnt(struct mount *mnt)
hlist_del_init(&mnt->mnt_mp_list);
mp = mnt->mnt_mp;
mnt->mnt_mp = NULL;
- return mp;
+ maybe_free_mountpoint(mp, shrink_list);
}
/*
- * vfsmount lock must be held for write
+ * locks: mount_lock[write_seqlock], namespace_sem[excl] (for ex_mountpoints)
*/
static void umount_mnt(struct mount *mnt)
{
- put_mountpoint(unhash_mnt(mnt));
+ __umount_mnt(mnt, &ex_mountpoints);
}
/*
@@ -861,36 +1021,48 @@ void mnt_set_mountpoint(struct mount *mnt,
struct mountpoint *mp,
struct mount *child_mnt)
{
- mp->m_count++;
- mnt_add_count(mnt, 1); /* essentially, that's mntget */
child_mnt->mnt_mountpoint = mp->m_dentry;
child_mnt->mnt_parent = mnt;
child_mnt->mnt_mp = mp;
hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
}
-static void __attach_mnt(struct mount *mnt, struct mount *parent)
+static void make_visible(struct mount *mnt)
{
+ struct mount *parent = mnt->mnt_parent;
+ if (unlikely(mnt->mnt_mountpoint == parent->mnt.mnt_root))
+ parent->overmount = mnt;
hlist_add_head_rcu(&mnt->mnt_hash,
m_hash(&parent->mnt, mnt->mnt_mountpoint));
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
}
-/*
- * vfsmount lock must be held for write
+/**
+ * attach_mnt - mount a mount, attach to @mount_hashtable and parent's
+ * list of child mounts
+ * @parent: the parent
+ * @mnt: the new mount
+ * @mp: the new mountpoint
+ *
+ * Mount @mnt at @mp on @parent. Then attach @mnt
+ * to @parent's child mount list and to @mount_hashtable.
+ *
+ * Note, when make_visible() is called @mnt->mnt_parent already points
+ * to the correct parent.
+ *
+ * Context: This function expects namespace_lock() and lock_mount_hash()
+ * to have been acquired in that order.
*/
-static void attach_mnt(struct mount *mnt,
- struct mount *parent,
- struct mountpoint *mp)
+static void attach_mnt(struct mount *mnt, struct mount *parent,
+ struct mountpoint *mp)
{
mnt_set_mountpoint(parent, mp, mnt);
- __attach_mnt(mnt, parent);
+ make_visible(mnt);
}
void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
{
struct mountpoint *old_mp = mnt->mnt_mp;
- struct mount *old_parent = mnt->mnt_parent;
list_del_init(&mnt->mnt_child);
hlist_del_init(&mnt->mnt_mp_list);
@@ -898,33 +1070,41 @@ void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct m
attach_mnt(mnt, parent, mp);
- put_mountpoint(old_mp);
- mnt_add_count(old_parent, -1);
+ maybe_free_mountpoint(old_mp, &ex_mountpoints);
}
-/*
- * vfsmount lock must be held for write
- */
-static void commit_tree(struct mount *mnt)
+static inline struct mount *node_to_mount(struct rb_node *node)
{
- struct mount *parent = mnt->mnt_parent;
- struct mount *m;
- LIST_HEAD(head);
- struct mnt_namespace *n = parent->mnt_ns;
-
- BUG_ON(parent == mnt);
+ return node ? rb_entry(node, struct mount, mnt_node) : NULL;
+}
- list_add_tail(&head, &mnt->mnt_list);
- list_for_each_entry(m, &head, mnt_list)
- m->mnt_ns = n;
+static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
+{
+ struct rb_node **link = &ns->mounts.rb_node;
+ struct rb_node *parent = NULL;
+ bool mnt_first_node = true, mnt_last_node = true;
- list_splice(&head, n->list.prev);
+ WARN_ON(mnt_ns_attached(mnt));
+ mnt->mnt_ns = ns;
+ while (*link) {
+ parent = *link;
+ if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique) {
+ link = &parent->rb_left;
+ mnt_last_node = false;
+ } else {
+ link = &parent->rb_right;
+ mnt_first_node = false;
+ }
+ }
- n->mounts += n->pending_mounts;
- n->pending_mounts = 0;
+ if (mnt_last_node)
+ ns->mnt_last_node = &mnt->mnt_node;
+ if (mnt_first_node)
+ ns->mnt_first_node = &mnt->mnt_node;
+ rb_link_node(&mnt->mnt_node, parent, link);
+ rb_insert_color(&mnt->mnt_node, &ns->mounts);
- __attach_mnt(mnt, parent);
- touch_mnt_namespace(n);
+ mnt_notify_add(mnt);
}
static struct mount *next_mnt(struct mount *p, struct mount *root)
@@ -953,6 +1133,38 @@ static struct mount *skip_mnt_tree(struct mount *p)
return p;
}
+/*
+ * vfsmount lock must be held for write
+ */
+static void commit_tree(struct mount *mnt)
+{
+ struct mnt_namespace *n = mnt->mnt_parent->mnt_ns;
+
+ if (!mnt_ns_attached(mnt)) {
+ for (struct mount *m = mnt; m; m = next_mnt(m, mnt))
+ mnt_add_to_ns(n, m);
+ n->nr_mounts += n->pending_mounts;
+ n->pending_mounts = 0;
+ }
+
+ make_visible(mnt);
+ touch_mnt_namespace(n);
+}
+
+static void setup_mnt(struct mount *m, struct dentry *root)
+{
+ struct super_block *s = root->d_sb;
+
+ atomic_inc(&s->s_active);
+ m->mnt.mnt_sb = s;
+ m->mnt.mnt_root = dget(root);
+ m->mnt_mountpoint = m->mnt.mnt_root;
+ m->mnt_parent = m;
+
+ guard(mount_locked_reader)();
+ mnt_add_instance(m, s);
+}
+
/**
* vfs_create_mount - Create a mount for a configured superblock
* @fc: The configuration context with the superblock attached
@@ -969,22 +1181,15 @@ struct vfsmount *vfs_create_mount(struct fs_context *fc)
if (!fc->root)
return ERR_PTR(-EINVAL);
- mnt = alloc_vfsmnt(fc->source ?: "none");
+ mnt = alloc_vfsmnt(fc->source);
if (!mnt)
return ERR_PTR(-ENOMEM);
if (fc->sb_flags & SB_KERNMOUNT)
mnt->mnt.mnt_flags = MNT_INTERNAL;
- atomic_inc(&fc->root->d_sb->s_active);
- mnt->mnt.mnt_sb = fc->root->d_sb;
- mnt->mnt.mnt_root = dget(fc->root);
- mnt->mnt_mountpoint = mnt->mnt.mnt_root;
- mnt->mnt_parent = mnt;
+ setup_mnt(mnt, fc->root);
- lock_mount_hash();
- list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
- unlock_mount_hash();
return &mnt->mnt;
}
EXPORT_SYMBOL(vfs_create_mount);
@@ -1000,6 +1205,15 @@ struct vfsmount *fc_mount(struct fs_context *fc)
}
EXPORT_SYMBOL(fc_mount);
+struct vfsmount *fc_mount_longterm(struct fs_context *fc)
+{
+ struct vfsmount *mnt = fc_mount(fc);
+ if (!IS_ERR(mnt))
+ real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
+ return mnt;
+}
+EXPORT_SYMBOL(fc_mount_longterm);
+
struct vfsmount *vfs_kern_mount(struct file_system_type *type,
int flags, const char *name,
void *data)
@@ -1016,8 +1230,7 @@ struct vfsmount *vfs_kern_mount(struct file_system_type *type,
return ERR_CAST(fc);
if (name)
- ret = vfs_parse_fs_string(fc, "source",
- name, strlen(name));
+ ret = vfs_parse_fs_string(fc, "source", name);
if (!ret)
ret = parse_monolithic_mount_data(fc, data);
if (!ret)
@@ -1030,25 +1243,9 @@ struct vfsmount *vfs_kern_mount(struct file_system_type *type,
}
EXPORT_SYMBOL_GPL(vfs_kern_mount);
-struct vfsmount *
-vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
- const char *name, void *data)
-{
- /* Until it is worked out how to pass the user namespace
- * through from the parent mount to the submount don't support
- * unprivileged mounts with submounts.
- */
- if (mountpoint->d_sb->s_user_ns != &init_user_ns)
- return ERR_PTR(-EPERM);
-
- return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
-}
-EXPORT_SYMBOL_GPL(vfs_submount);
-
static struct mount *clone_mnt(struct mount *old, struct dentry *root,
int flag)
{
- struct super_block *sb = old->mnt.mnt_sb;
struct mount *mnt;
int err;
@@ -1056,7 +1253,10 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
if (!mnt)
return ERR_PTR(-ENOMEM);
- if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
+ mnt->mnt.mnt_flags = READ_ONCE(old->mnt.mnt_flags) &
+ ~MNT_INTERNAL_FLAGS;
+
+ if (flag & (CL_SLAVE | CL_PRIVATE))
mnt->mnt_group_id = 0; /* not a peer of original */
else
mnt->mnt_group_id = old->mnt_group_id;
@@ -1067,45 +1267,26 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
goto out_free;
}
- mnt->mnt.mnt_flags = old->mnt.mnt_flags;
- mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
+ if (mnt->mnt_group_id)
+ set_mnt_shared(mnt);
- atomic_inc(&sb->s_active);
- mnt->mnt.mnt_userns = mnt_user_ns(&old->mnt);
- if (mnt->mnt.mnt_userns != &init_user_ns)
- mnt->mnt.mnt_userns = get_user_ns(mnt->mnt.mnt_userns);
- mnt->mnt.mnt_sb = sb;
- mnt->mnt.mnt_root = dget(root);
- mnt->mnt_mountpoint = mnt->mnt.mnt_root;
- mnt->mnt_parent = mnt;
- lock_mount_hash();
- list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
- unlock_mount_hash();
+ mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
- if ((flag & CL_SLAVE) ||
- ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
- list_add(&mnt->mnt_slave, &old->mnt_slave_list);
+ setup_mnt(mnt, root);
+
+ if (flag & CL_PRIVATE) // we are done with it
+ return mnt;
+
+ if (peers(mnt, old))
+ list_add(&mnt->mnt_share, &old->mnt_share);
+
+ if ((flag & CL_SLAVE) && old->mnt_group_id) {
+ hlist_add_head(&mnt->mnt_slave, &old->mnt_slave_list);
mnt->mnt_master = old;
- CLEAR_MNT_SHARED(mnt);
- } else if (!(flag & CL_PRIVATE)) {
- if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
- list_add(&mnt->mnt_share, &old->mnt_share);
- if (IS_MNT_SLAVE(old))
- list_add(&mnt->mnt_slave, &old->mnt_slave);
+ } else if (IS_MNT_SLAVE(old)) {
+ hlist_add_behind(&mnt->mnt_slave, &old->mnt_slave);
mnt->mnt_master = old->mnt_master;
- } else {
- CLEAR_MNT_SHARED(mnt);
- }
- if (flag & CL_MAKE_SHARED)
- set_mnt_shared(mnt);
-
- /* stick the duplicate mount on the same expiry list
- * as the original if that was on one */
- if (flag & CL_EXPIRE) {
- if (!list_empty(&old->mnt_expire))
- list_add(&mnt->mnt_expire, &old->mnt_expire);
}
-
return mnt;
out_free:
@@ -1155,26 +1336,12 @@ static void delayed_mntput(struct work_struct *unused)
}
static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
-static void mntput_no_expire(struct mount *mnt)
+static void noinline mntput_no_expire_slowpath(struct mount *mnt)
{
LIST_HEAD(list);
int count;
- rcu_read_lock();
- if (likely(READ_ONCE(mnt->mnt_ns))) {
- /*
- * Since we don't do lock_mount_hash() here,
- * ->mnt_ns can change under us. However, if it's
- * non-NULL, then there's a reference that won't
- * be dropped until after an RCU delay done after
- * turning ->mnt_ns NULL. So if we observe it
- * non-NULL under rcu_read_lock(), the reference
- * we are dropping is not the final one.
- */
- mnt_add_count(mnt, -1);
- rcu_read_unlock();
- return;
- }
+ VFS_BUG_ON(mnt->mnt_ns);
lock_mount_hash();
/*
* make sure that if __legitimize_mnt() has not seen us grab
@@ -1197,12 +1364,14 @@ static void mntput_no_expire(struct mount *mnt)
mnt->mnt.mnt_flags |= MNT_DOOMED;
rcu_read_unlock();
- list_del(&mnt->mnt_instance);
+ mnt_del_instance(mnt);
+ if (unlikely(!list_empty(&mnt->mnt_expire)))
+ list_del(&mnt->mnt_expire);
if (unlikely(!list_empty(&mnt->mnt_mounts))) {
struct mount *p, *tmp;
list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
- __put_mountpoint(unhash_mnt(p), &list);
+ __umount_mnt(p, &list);
hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
}
}
@@ -1223,13 +1392,33 @@ static void mntput_no_expire(struct mount *mnt)
cleanup_mnt(mnt);
}
+static void mntput_no_expire(struct mount *mnt)
+{
+ rcu_read_lock();
+ if (likely(READ_ONCE(mnt->mnt_ns))) {
+ /*
+ * Since we don't do lock_mount_hash() here,
+ * ->mnt_ns can change under us. However, if it's
+ * non-NULL, then there's a reference that won't
+ * be dropped until after an RCU delay done after
+ * turning ->mnt_ns NULL. So if we observe it
+ * non-NULL under rcu_read_lock(), the reference
+ * we are dropping is not the final one.
+ */
+ mnt_add_count(mnt, -1);
+ rcu_read_unlock();
+ return;
+ }
+ mntput_no_expire_slowpath(mnt);
+}
+
void mntput(struct vfsmount *mnt)
{
if (mnt) {
struct mount *m = real_mount(mnt);
- /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
+ /* avoid cacheline pingpong */
if (unlikely(m->mnt_expiry_mark))
- m->mnt_expiry_mark = 0;
+ WRITE_ONCE(m->mnt_expiry_mark, 0);
mntput_no_expire(m);
}
}
@@ -1243,6 +1432,17 @@ struct vfsmount *mntget(struct vfsmount *mnt)
}
EXPORT_SYMBOL(mntget);
+/*
+ * Make a mount point inaccessible to new lookups.
+ * Because there may still be current users, the caller MUST WAIT
+ * for an RCU grace period before destroying the mount point.
+ */
+void mnt_make_shortterm(struct vfsmount *mnt)
+{
+ if (mnt)
+ real_mount(mnt)->mnt_ns = NULL;
+}
+
/**
* path_is_mountpoint() - Check if path is a mount in the current namespace.
* @path: path to check
@@ -1283,65 +1483,81 @@ struct vfsmount *mnt_clone_internal(const struct path *path)
return &p->mnt;
}
-#ifdef CONFIG_PROC_FS
-static struct mount *mnt_list_next(struct mnt_namespace *ns,
- struct list_head *p)
+/*
+ * Returns the mount which either has the specified mnt_id, or has the next
+ * smallest id afer the specified one.
+ */
+static struct mount *mnt_find_id_at(struct mnt_namespace *ns, u64 mnt_id)
{
- struct mount *mnt, *ret = NULL;
+ struct rb_node *node = ns->mounts.rb_node;
+ struct mount *ret = NULL;
- lock_ns_list(ns);
- list_for_each_continue(p, &ns->list) {
- mnt = list_entry(p, typeof(*mnt), mnt_list);
- if (!mnt_is_cursor(mnt)) {
- ret = mnt;
- break;
+ while (node) {
+ struct mount *m = node_to_mount(node);
+
+ if (mnt_id <= m->mnt_id_unique) {
+ ret = node_to_mount(node);
+ if (mnt_id == m->mnt_id_unique)
+ break;
+ node = node->rb_left;
+ } else {
+ node = node->rb_right;
}
}
- unlock_ns_list(ns);
+ return ret;
+}
+/*
+ * Returns the mount which either has the specified mnt_id, or has the next
+ * greater id before the specified one.
+ */
+static struct mount *mnt_find_id_at_reverse(struct mnt_namespace *ns, u64 mnt_id)
+{
+ struct rb_node *node = ns->mounts.rb_node;
+ struct mount *ret = NULL;
+
+ while (node) {
+ struct mount *m = node_to_mount(node);
+
+ if (mnt_id >= m->mnt_id_unique) {
+ ret = node_to_mount(node);
+ if (mnt_id == m->mnt_id_unique)
+ break;
+ node = node->rb_right;
+ } else {
+ node = node->rb_left;
+ }
+ }
return ret;
}
+#ifdef CONFIG_PROC_FS
+
/* iterator; we want it to have access to namespace_sem, thus here... */
static void *m_start(struct seq_file *m, loff_t *pos)
{
struct proc_mounts *p = m->private;
- struct list_head *prev;
down_read(&namespace_sem);
- if (!*pos) {
- prev = &p->ns->list;
- } else {
- prev = &p->cursor.mnt_list;
- /* Read after we'd reached the end? */
- if (list_empty(prev))
- return NULL;
- }
-
- return mnt_list_next(p->ns, prev);
+ return mnt_find_id_at(p->ns, *pos);
}
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct proc_mounts *p = m->private;
- struct mount *mnt = v;
+ struct mount *next = NULL, *mnt = v;
+ struct rb_node *node = rb_next(&mnt->mnt_node);
++*pos;
- return mnt_list_next(p->ns, &mnt->mnt_list);
+ if (node) {
+ next = node_to_mount(node);
+ *pos = next->mnt_id_unique;
+ }
+ return next;
}
static void m_stop(struct seq_file *m, void *v)
{
- struct proc_mounts *p = m->private;
- struct mount *mnt = v;
-
- lock_ns_list(p->ns);
- if (mnt)
- list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list);
- else
- list_del_init(&p->cursor.mnt_list);
- unlock_ns_list(p->ns);
up_read(&namespace_sem);
}
@@ -1359,14 +1575,6 @@ const struct seq_operations mounts_op = {
.show = m_show,
};
-void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor)
-{
- down_read(&namespace_sem);
- lock_ns_list(ns);
- list_del(&cursor->mnt_list);
- unlock_ns_list(ns);
- up_read(&namespace_sem);
-}
#endif /* CONFIG_PROC_FS */
/**
@@ -1380,23 +1588,19 @@ void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor)
int may_umount_tree(struct vfsmount *m)
{
struct mount *mnt = real_mount(m);
- int actual_refs = 0;
- int minimum_refs = 0;
- struct mount *p;
- BUG_ON(!m);
+ bool busy = false;
/* write lock needed for mnt_get_count */
lock_mount_hash();
- for (p = mnt; p; p = next_mnt(p, mnt)) {
- actual_refs += mnt_get_count(p);
- minimum_refs += 2;
+ for (struct mount *p = mnt; p; p = next_mnt(p, mnt)) {
+ if (mnt_get_count(p) > (p == mnt ? 2 : 1)) {
+ busy = true;
+ break;
+ }
}
unlock_mount_hash();
- if (actual_refs > minimum_refs)
- return 0;
-
- return 1;
+ return !busy;
}
EXPORT_SYMBOL(may_umount_tree);
@@ -1428,17 +1632,80 @@ int may_umount(struct vfsmount *mnt)
EXPORT_SYMBOL(may_umount);
+#ifdef CONFIG_FSNOTIFY
+static void mnt_notify(struct mount *p)
+{
+ if (!p->prev_ns && p->mnt_ns) {
+ fsnotify_mnt_attach(p->mnt_ns, &p->mnt);
+ } else if (p->prev_ns && !p->mnt_ns) {
+ fsnotify_mnt_detach(p->prev_ns, &p->mnt);
+ } else if (p->prev_ns == p->mnt_ns) {
+ fsnotify_mnt_move(p->mnt_ns, &p->mnt);
+ } else {
+ fsnotify_mnt_detach(p->prev_ns, &p->mnt);
+ fsnotify_mnt_attach(p->mnt_ns, &p->mnt);
+ }
+ p->prev_ns = p->mnt_ns;
+}
+
+static void notify_mnt_list(void)
+{
+ struct mount *m, *tmp;
+ /*
+ * Notify about mounts that were added/reparented/detached/remain
+ * connected after unmount.
+ */
+ list_for_each_entry_safe(m, tmp, &notify_list, to_notify) {
+ mnt_notify(m);
+ list_del_init(&m->to_notify);
+ }
+}
+
+static bool need_notify_mnt_list(void)
+{
+ return !list_empty(&notify_list);
+}
+#else
+static void notify_mnt_list(void)
+{
+}
+
+static bool need_notify_mnt_list(void)
+{
+ return false;
+}
+#endif
+
+static void free_mnt_ns(struct mnt_namespace *);
static void namespace_unlock(void)
{
struct hlist_head head;
struct hlist_node *p;
struct mount *m;
+ struct mnt_namespace *ns = emptied_ns;
LIST_HEAD(list);
hlist_move_list(&unmounted, &head);
list_splice_init(&ex_mountpoints, &list);
+ emptied_ns = NULL;
- up_write(&namespace_sem);
+ if (need_notify_mnt_list()) {
+ /*
+ * No point blocking out concurrent readers while notifications
+ * are sent. This will also allow statmount()/listmount() to run
+ * concurrently.
+ */
+ downgrade_write(&namespace_sem);
+ notify_mnt_list();
+ up_read(&namespace_sem);
+ } else {
+ up_write(&namespace_sem);
+ }
+ if (unlikely(ns)) {
+ /* Make sure we notice when we leak mounts. */
+ VFS_WARN_ON_ONCE(!mnt_ns_empty(ns));
+ free_mnt_ns(ns);
+ }
shrink_dentry_list(&list);
@@ -1508,7 +1775,9 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
/* Gather the mounts to umount */
for (p = mnt; p; p = next_mnt(p, mnt)) {
p->mnt.mnt_flags |= MNT_UMOUNT;
- list_move(&p->mnt_list, &tmp_list);
+ if (mnt_ns_attached(p))
+ move_from_ns(p);
+ list_add_tail(&p->mnt_list, &tmp_list);
}
/* Hide the mounts from mnt_mounts */
@@ -1516,10 +1785,12 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
list_del_init(&p->mnt_child);
}
- /* Add propogated mounts to the tmp_list */
+ /* Add propagated mounts to the tmp_list */
if (how & UMOUNT_PROPAGATE)
propagate_umount(&tmp_list);
+ bulk_make_private(&tmp_list);
+
while (!list_empty(&tmp_list)) {
struct mnt_namespace *ns;
bool disconnect;
@@ -1528,7 +1799,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
list_del_init(&p->mnt_list);
ns = p->mnt_ns;
if (ns) {
- ns->mounts--;
+ ns->nr_mounts--;
__touch_mnt_namespace(ns);
}
p->mnt_ns = NULL;
@@ -1537,7 +1808,6 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
disconnect = disconnect_mount(p, how);
if (mnt_has_parent(p)) {
- mnt_add_count(p->mnt_parent, -1);
if (!disconnect) {
/* Don't forget about p */
list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
@@ -1545,9 +1815,21 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
umount_mnt(p);
}
}
- change_mnt_propagation(p, MS_PRIVATE);
if (disconnect)
hlist_add_head(&p->mnt_umount, &unmounted);
+
+ /*
+ * At this point p->mnt_ns is NULL, notification will be queued
+ * only if
+ *
+ * - p->prev_ns is non-NULL *and*
+ * - p->prev_ns->n_fsnotify_marks is non-NULL
+ *
+ * This will preclude queuing the mount if this is a cleanup
+ * after a failed copy_tree() or destruction of an anonymous
+ * namespace, etc.
+ */
+ mnt_notify_add(p);
}
}
@@ -1601,7 +1883,7 @@ static int do_umount(struct mount *mnt, int flags)
* all race cases, but it's a slowpath.
*/
lock_mount_hash();
- if (mnt_get_count(mnt) != 2) {
+ if (!list_empty(&mnt->mnt_mounts) || mnt_get_count(mnt) != 2) {
unlock_mount_hash();
return -EBUSY;
}
@@ -1647,22 +1929,27 @@ static int do_umount(struct mount *mnt, int flags)
namespace_lock();
lock_mount_hash();
- /* Recheck MNT_LOCKED with the locks held */
+ /* Repeat the earlier racy checks, now that we are holding the locks */
retval = -EINVAL;
+ if (!check_mnt(mnt))
+ goto out;
+
if (mnt->mnt.mnt_flags & MNT_LOCKED)
goto out;
+ if (!mnt_has_parent(mnt)) /* not the absolute root */
+ goto out;
+
event++;
if (flags & MNT_DETACH) {
- if (!list_empty(&mnt->mnt_list))
- umount_tree(mnt, UMOUNT_PROPAGATE);
+ umount_tree(mnt, UMOUNT_PROPAGATE);
retval = 0;
} else {
+ smp_mb(); // paired with __legitimize_mnt()
shrink_submounts(mnt);
retval = -EBUSY;
if (!propagate_mount_busy(mnt, 2)) {
- if (!list_empty(&mnt->mnt_list))
- umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
+ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
retval = 0;
}
}
@@ -1680,38 +1967,35 @@ out:
* detach_mounts allows lazily unmounting those mounts instead of
* leaking them.
*
- * The caller may hold dentry->d_inode->i_mutex.
+ * The caller may hold dentry->d_inode->i_rwsem.
*/
void __detach_mounts(struct dentry *dentry)
{
- struct mountpoint *mp;
+ struct pinned_mountpoint mp = {};
struct mount *mnt;
- namespace_lock();
- lock_mount_hash();
- mp = lookup_mountpoint(dentry);
- if (!mp)
- goto out_unlock;
+ guard(namespace_excl)();
+ guard(mount_writer)();
+
+ if (!lookup_mountpoint(dentry, &mp))
+ return;
event++;
- while (!hlist_empty(&mp->m_list)) {
- mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
+ while (mp.node.next) {
+ mnt = hlist_entry(mp.node.next, struct mount, mnt_mp_list);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
umount_mnt(mnt);
hlist_add_head(&mnt->mnt_umount, &unmounted);
}
else umount_tree(mnt, UMOUNT_CONNECTED);
}
- put_mountpoint(mp);
-out_unlock:
- unlock_mount_hash();
- namespace_unlock();
+ unpin_mountpoint(&mp);
}
/*
* Is the caller allowed to modify his namespace?
*/
-static inline bool may_mount(void)
+bool may_mount(void)
{
return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
}
@@ -1728,22 +2012,23 @@ static void warn_mandlock(void)
static int can_umount(const struct path *path, int flags)
{
struct mount *mnt = real_mount(path->mnt);
+ struct super_block *sb = path->dentry->d_sb;
if (!may_mount())
return -EPERM;
- if (path->dentry != path->mnt->mnt_root)
+ if (!path_mounted(path))
return -EINVAL;
if (!check_mnt(mnt))
return -EINVAL;
if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
return -EINVAL;
- if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
+ if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
// caller is responsible for flags being sane
-int path_umount(struct path *path, int flags)
+int path_umount(const struct path *path, int flags)
{
struct mount *mnt = real_mount(path->mnt);
int ret;
@@ -1795,14 +2080,15 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
static bool is_mnt_ns_file(struct dentry *dentry)
{
+ struct ns_common *ns;
+
/* Is this a proxy for a mount namespace? */
- return dentry->d_op == &ns_dentry_operations &&
- dentry->d_fsdata == &mntns_operations;
-}
+ if (dentry->d_op != &ns_dentry_operations)
+ return false;
-static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
-{
- return container_of(ns, struct mnt_namespace, ns);
+ ns = d_inode(dentry)->i_private;
+
+ return ns->ops == &mntns_operations;
}
struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
@@ -1810,132 +2096,227 @@ struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
return &mnt->ns;
}
+struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mntns, bool previous)
+{
+ struct ns_common *ns;
+
+ guard(rcu)();
+
+ for (;;) {
+ ns = ns_tree_adjoined_rcu(mntns, previous);
+ if (IS_ERR(ns))
+ return ERR_CAST(ns);
+
+ mntns = to_mnt_ns(ns);
+
+ /*
+ * The last passive reference count is put with RCU
+ * delay so accessing the mount namespace is not just
+ * safe but all relevant members are still valid.
+ */
+ if (!ns_capable_noaudit(mntns->user_ns, CAP_SYS_ADMIN))
+ continue;
+
+ /*
+ * We need an active reference count as we're persisting
+ * the mount namespace and it might already be on its
+ * deathbed.
+ */
+ if (!ns_ref_get(mntns))
+ continue;
+
+ return mntns;
+ }
+}
+
+struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry)
+{
+ if (!is_mnt_ns_file(dentry))
+ return NULL;
+
+ return to_mnt_ns(get_proc_ns(dentry->d_inode));
+}
+
static bool mnt_ns_loop(struct dentry *dentry)
{
/* Could bind mounting the mount namespace inode cause a
* mount namespace loop?
*/
- struct mnt_namespace *mnt_ns;
- if (!is_mnt_ns_file(dentry))
+ struct mnt_namespace *mnt_ns = mnt_ns_from_dentry(dentry);
+
+ if (!mnt_ns)
return false;
- mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
- return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
+ return current->nsproxy->mnt_ns->ns.ns_id >= mnt_ns->ns.ns_id;
}
-struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
+struct mount *copy_tree(struct mount *src_root, struct dentry *dentry,
int flag)
{
- struct mount *res, *p, *q, *r, *parent;
+ struct mount *res, *src_parent, *src_root_child, *src_mnt,
+ *dst_parent, *dst_mnt;
- if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
+ if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(src_root))
return ERR_PTR(-EINVAL);
if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
return ERR_PTR(-EINVAL);
- res = q = clone_mnt(mnt, dentry, flag);
- if (IS_ERR(q))
- return q;
+ res = dst_mnt = clone_mnt(src_root, dentry, flag);
+ if (IS_ERR(dst_mnt))
+ return dst_mnt;
- q->mnt_mountpoint = mnt->mnt_mountpoint;
+ src_parent = src_root;
- p = mnt;
- list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
- struct mount *s;
- if (!is_subdir(r->mnt_mountpoint, dentry))
+ list_for_each_entry(src_root_child, &src_root->mnt_mounts, mnt_child) {
+ if (!is_subdir(src_root_child->mnt_mountpoint, dentry))
continue;
- for (s = r; s; s = next_mnt(s, r)) {
+ for (src_mnt = src_root_child; src_mnt;
+ src_mnt = next_mnt(src_mnt, src_root_child)) {
if (!(flag & CL_COPY_UNBINDABLE) &&
- IS_MNT_UNBINDABLE(s)) {
- if (s->mnt.mnt_flags & MNT_LOCKED) {
+ IS_MNT_UNBINDABLE(src_mnt)) {
+ if (src_mnt->mnt.mnt_flags & MNT_LOCKED) {
/* Both unbindable and locked. */
- q = ERR_PTR(-EPERM);
+ dst_mnt = ERR_PTR(-EPERM);
goto out;
} else {
- s = skip_mnt_tree(s);
+ src_mnt = skip_mnt_tree(src_mnt);
continue;
}
}
if (!(flag & CL_COPY_MNT_NS_FILE) &&
- is_mnt_ns_file(s->mnt.mnt_root)) {
- s = skip_mnt_tree(s);
+ is_mnt_ns_file(src_mnt->mnt.mnt_root)) {
+ src_mnt = skip_mnt_tree(src_mnt);
continue;
}
- while (p != s->mnt_parent) {
- p = p->mnt_parent;
- q = q->mnt_parent;
+ while (src_parent != src_mnt->mnt_parent) {
+ src_parent = src_parent->mnt_parent;
+ dst_mnt = dst_mnt->mnt_parent;
}
- p = s;
- parent = q;
- q = clone_mnt(p, p->mnt.mnt_root, flag);
- if (IS_ERR(q))
+
+ src_parent = src_mnt;
+ dst_parent = dst_mnt;
+ dst_mnt = clone_mnt(src_mnt, src_mnt->mnt.mnt_root, flag);
+ if (IS_ERR(dst_mnt))
goto out;
lock_mount_hash();
- list_add_tail(&q->mnt_list, &res->mnt_list);
- attach_mnt(q, parent, p->mnt_mp);
+ if (src_mnt->mnt.mnt_flags & MNT_LOCKED)
+ dst_mnt->mnt.mnt_flags |= MNT_LOCKED;
+ if (unlikely(flag & CL_EXPIRE)) {
+ /* stick the duplicate mount on the same expiry
+ * list as the original if that was on one */
+ if (!list_empty(&src_mnt->mnt_expire))
+ list_add(&dst_mnt->mnt_expire,
+ &src_mnt->mnt_expire);
+ }
+ attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp);
unlock_mount_hash();
}
}
return res;
+
out:
if (res) {
lock_mount_hash();
umount_tree(res, UMOUNT_SYNC);
unlock_mount_hash();
}
- return q;
+ return dst_mnt;
}
-/* Caller should check returned pointer for errors */
+static inline bool extend_array(struct path **res, struct path **to_free,
+ unsigned n, unsigned *count, unsigned new_count)
+{
+ struct path *p;
+
+ if (likely(n < *count))
+ return true;
+ p = kmalloc_array(new_count, sizeof(struct path), GFP_KERNEL);
+ if (p && *count)
+ memcpy(p, *res, *count * sizeof(struct path));
+ *count = new_count;
+ kfree(*to_free);
+ *to_free = *res = p;
+ return p;
+}
-struct vfsmount *collect_mounts(const struct path *path)
+const struct path *collect_paths(const struct path *path,
+ struct path *prealloc, unsigned count)
{
- struct mount *tree;
- namespace_lock();
- if (!check_mnt(real_mount(path->mnt)))
- tree = ERR_PTR(-EINVAL);
- else
- tree = copy_tree(real_mount(path->mnt), path->dentry,
- CL_COPY_ALL | CL_PRIVATE);
- namespace_unlock();
- if (IS_ERR(tree))
- return ERR_CAST(tree);
- return &tree->mnt;
+ struct mount *root = real_mount(path->mnt);
+ struct mount *child;
+ struct path *res = prealloc, *to_free = NULL;
+ unsigned n = 0;
+
+ guard(namespace_shared)();
+
+ if (!check_mnt(root))
+ return ERR_PTR(-EINVAL);
+ if (!extend_array(&res, &to_free, 0, &count, 32))
+ return ERR_PTR(-ENOMEM);
+ res[n++] = *path;
+ list_for_each_entry(child, &root->mnt_mounts, mnt_child) {
+ if (!is_subdir(child->mnt_mountpoint, path->dentry))
+ continue;
+ for (struct mount *m = child; m; m = next_mnt(m, child)) {
+ if (!extend_array(&res, &to_free, n, &count, 2 * count))
+ return ERR_PTR(-ENOMEM);
+ res[n].mnt = &m->mnt;
+ res[n].dentry = m->mnt.mnt_root;
+ n++;
+ }
+ }
+ if (!extend_array(&res, &to_free, n, &count, count + 1))
+ return ERR_PTR(-ENOMEM);
+ memset(res + n, 0, (count - n) * sizeof(struct path));
+ for (struct path *p = res; p->mnt; p++)
+ path_get(p);
+ return res;
+}
+
+void drop_collected_paths(const struct path *paths, const struct path *prealloc)
+{
+ for (const struct path *p = paths; p->mnt; p++)
+ path_put(p);
+ if (paths != prealloc)
+ kfree(paths);
}
-static void free_mnt_ns(struct mnt_namespace *);
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
void dissolve_on_fput(struct vfsmount *mnt)
{
- struct mnt_namespace *ns;
- namespace_lock();
- lock_mount_hash();
- ns = real_mount(mnt)->mnt_ns;
- if (ns) {
- if (is_anon_ns(ns))
- umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
- else
- ns = NULL;
+ struct mount *m = real_mount(mnt);
+
+ /*
+ * m used to be the root of anon namespace; if it still is one,
+ * we need to dissolve the mount tree and free that namespace.
+ * Let's try to avoid taking namespace_sem if we can determine
+ * that there's nothing to do without it - rcu_read_lock() is
+ * enough to make anon_ns_root() memory-safe and once m has
+ * left its namespace, it's no longer our concern, since it will
+ * never become a root of anon ns again.
+ */
+
+ scoped_guard(rcu) {
+ if (!anon_ns_root(m))
+ return;
}
- unlock_mount_hash();
- namespace_unlock();
- if (ns)
- free_mnt_ns(ns);
-}
-void drop_collected_mounts(struct vfsmount *mnt)
-{
- namespace_lock();
- lock_mount_hash();
- umount_tree(real_mount(mnt), 0);
- unlock_mount_hash();
- namespace_unlock();
+ scoped_guard(namespace_excl) {
+ if (!anon_ns_root(m))
+ return;
+
+ emptied_ns = m->mnt_ns;
+ lock_mount_hash();
+ umount_tree(m, UMOUNT_CONNECTED);
+ unlock_mount_hash();
+ }
}
-static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+/* locks: namespace_shared && pinned(mnt) || mount_locked_reader */
+static bool __has_locked_children(struct mount *mnt, struct dentry *dentry)
{
struct mount *child;
@@ -1949,6 +2330,28 @@ static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
return false;
}
+bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+{
+ guard(mount_locked_reader)();
+ return __has_locked_children(mnt, dentry);
+}
+
+/*
+ * Check that there aren't references to earlier/same mount namespaces in the
+ * specified subtree. Such references can act as pins for mount namespaces
+ * that aren't checked by the mount-cycle checking code, thereby allowing
+ * cycles to be made.
+ *
+ * locks: mount_locked_reader || namespace_shared && pinned(subtree)
+ */
+static bool check_for_nsfs_mounts(struct mount *subtree)
+{
+ for (struct mount *p = subtree; p; p = next_mnt(p, subtree))
+ if (mnt_ns_loop(p->mnt.mnt_root))
+ return false;
+ return true;
+}
+
/**
* clone_private_mount - create a private clone of a path
* @path: path to clone
@@ -1957,6 +2360,8 @@ static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
* will not be attached anywhere in the namespace and will be private (i.e.
* changes to the originating mount won't be propagated into this).
*
+ * This assumes caller has called or done the equivalent of may_mount().
+ *
* Release with mntput().
*/
struct vfsmount *clone_private_mount(const struct path *path)
@@ -1964,48 +2369,42 @@ struct vfsmount *clone_private_mount(const struct path *path)
struct mount *old_mnt = real_mount(path->mnt);
struct mount *new_mnt;
- down_read(&namespace_sem);
+ guard(namespace_shared)();
+
if (IS_MNT_UNBINDABLE(old_mnt))
- goto invalid;
+ return ERR_PTR(-EINVAL);
- if (!check_mnt(old_mnt))
- goto invalid;
+ /*
+ * Make sure the source mount is acceptable.
+ * Anything mounted in our mount namespace is allowed.
+ * Otherwise, it must be the root of an anonymous mount
+ * namespace, and we need to make sure no namespace
+ * loops get created.
+ */
+ if (!check_mnt(old_mnt)) {
+ if (!anon_ns_root(old_mnt))
+ return ERR_PTR(-EINVAL);
- if (has_locked_children(old_mnt, path->dentry))
- goto invalid;
+ if (!check_for_nsfs_mounts(old_mnt))
+ return ERR_PTR(-EINVAL);
+ }
- new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
- up_read(&namespace_sem);
+ if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+
+ if (__has_locked_children(old_mnt, path->dentry))
+ return ERR_PTR(-EINVAL);
+ new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
if (IS_ERR(new_mnt))
- return ERR_CAST(new_mnt);
+ return ERR_PTR(-EINVAL);
/* Longterm mount to be removed by kern_unmount*() */
new_mnt->mnt_ns = MNT_NS_INTERNAL;
-
return &new_mnt->mnt;
-
-invalid:
- up_read(&namespace_sem);
- return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(clone_private_mount);
-int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
- struct vfsmount *root)
-{
- struct mount *mnt;
- int res = f(root, arg);
- if (res)
- return res;
- list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
- res = f(&mnt->mnt, arg);
- if (res)
- return res;
- }
- return 0;
-}
-
static void lock_mnt_tree(struct mount *mnt)
{
struct mount *p;
@@ -2027,7 +2426,7 @@ static void lock_mnt_tree(struct mount *mnt)
if (flags & MNT_NOEXEC)
flags |= MNT_LOCK_NOEXEC;
/* Don't allow unprivileged users to reveal what is under a mount */
- if (list_empty(&p->mnt_expire))
+ if (list_empty(&p->mnt_expire) && p != mnt)
flags |= MNT_LOCKED;
p->mnt.mnt_flags = flags;
}
@@ -2048,7 +2447,7 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
struct mount *p;
for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
- if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
+ if (!p->mnt_group_id) {
int err = mnt_alloc_group_id(p);
if (err) {
cleanup_group_ids(mnt, p);
@@ -2063,31 +2462,35 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
{
unsigned int max = READ_ONCE(sysctl_mount_max);
- unsigned int mounts = 0, old, pending, sum;
+ unsigned int mounts = 0;
struct mount *p;
+ if (ns->nr_mounts >= max)
+ return -ENOSPC;
+ max -= ns->nr_mounts;
+ if (ns->pending_mounts >= max)
+ return -ENOSPC;
+ max -= ns->pending_mounts;
+
for (p = mnt; p; p = next_mnt(p, mnt))
mounts++;
- old = ns->mounts;
- pending = ns->pending_mounts;
- sum = old + pending;
- if ((old > sum) ||
- (pending > sum) ||
- (max < sum) ||
- (mounts > (max - sum)))
+ if (mounts > max)
return -ENOSPC;
- ns->pending_mounts = pending + mounts;
+ ns->pending_mounts += mounts;
return 0;
}
-/*
- * @source_mnt : mount tree to be attached
- * @nd : place the mount tree @source_mnt is attached
- * @parent_nd : if non-null, detach the source_mnt from its parent and
- * store the parent mount and mountpoint dentry.
- * (done when source_mnt is moved)
+enum mnt_tree_flags_t {
+ MNT_TREE_BENEATH = BIT(0),
+ MNT_TREE_PROPAGATION = BIT(1),
+};
+
+/**
+ * attach_recursive_mnt - attach a source mount tree
+ * @source_mnt: mount tree to be attached
+ * @dest: the context for mounting at the place where the tree should go
*
* NOTE: in the table below explains the semantics when a source mount
* of a given type is attached to a destination mount of a given type.
@@ -2144,26 +2547,38 @@ int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
* applied to each mount in the tree.
* Must be called without spinlocks held, since this function can sleep
* in allocations.
+ *
+ * Context: The function expects namespace_lock() to be held.
+ * Return: If @source_mnt was successfully attached 0 is returned.
+ * Otherwise a negative error code is returned.
*/
static int attach_recursive_mnt(struct mount *source_mnt,
- struct mount *dest_mnt,
- struct mountpoint *dest_mp,
- bool moving)
+ const struct pinned_mountpoint *dest)
{
struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
+ struct mount *dest_mnt = dest->parent;
+ struct mountpoint *dest_mp = dest->mp;
HLIST_HEAD(tree_list);
struct mnt_namespace *ns = dest_mnt->mnt_ns;
- struct mountpoint *smp;
+ struct pinned_mountpoint root = {};
+ struct mountpoint *shorter = NULL;
struct mount *child, *p;
+ struct mount *top;
struct hlist_node *n;
- int err;
+ int err = 0;
+ bool moving = mnt_has_parent(source_mnt);
- /* Preallocate a mountpoint in case the new mounts need
- * to be tucked under other mounts.
+ /*
+ * Preallocate a mountpoint in case the new mounts need to be
+ * mounted beneath mounts on the same mountpoint.
*/
- smp = get_mountpoint(source_mnt->mnt.mnt_root);
- if (IS_ERR(smp))
- return PTR_ERR(smp);
+ for (top = source_mnt; unlikely(top->overmount); top = top->overmount) {
+ if (!shorter && is_mnt_ns_file(top->mnt.mnt_root))
+ shorter = top->mnt_mp;
+ }
+ err = get_mountpoint(top->mnt.mnt_root, &root);
+ if (err)
+ return err;
/* Is there space to add these mounts to the mount namespace? */
if (!moving) {
@@ -2177,41 +2592,60 @@ static int attach_recursive_mnt(struct mount *source_mnt,
if (err)
goto out;
err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
- lock_mount_hash();
- if (err)
- goto out_cleanup_ids;
+ }
+ lock_mount_hash();
+ if (err)
+ goto out_cleanup_ids;
+
+ if (IS_MNT_SHARED(dest_mnt)) {
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
set_mnt_shared(p);
- } else {
- lock_mount_hash();
}
+
if (moving) {
- unhash_mnt(source_mnt);
- attach_mnt(source_mnt, dest_mnt, dest_mp);
- touch_mnt_namespace(source_mnt->mnt_ns);
+ umount_mnt(source_mnt);
+ mnt_notify_add(source_mnt);
+ /* if the mount is moved, it should no longer be expired
+ * automatically */
+ list_del_init(&source_mnt->mnt_expire);
} else {
if (source_mnt->mnt_ns) {
/* move from anon - the caller will destroy */
- list_del_init(&source_mnt->mnt_ns->list);
+ emptied_ns = source_mnt->mnt_ns;
+ for (p = source_mnt; p; p = next_mnt(p, source_mnt))
+ move_from_ns(p);
}
- mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
- commit_tree(source_mnt);
}
+ mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
+ /*
+ * Now the original copy is in the same state as the secondaries -
+ * its root attached to mountpoint, but not hashed and all mounts
+ * in it are either in our namespace or in no namespace at all.
+ * Add the original to the list of copies and deal with the
+ * rest of work for all of them uniformly.
+ */
+ hlist_add_head(&source_mnt->mnt_hash, &tree_list);
+
hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
struct mount *q;
hlist_del_init(&child->mnt_hash);
- q = __lookup_mnt(&child->mnt_parent->mnt,
- child->mnt_mountpoint);
- if (q)
- mnt_change_mountpoint(child, smp, q);
/* Notice when we are propagating across user namespaces */
if (child->mnt_parent->mnt_ns->user_ns != user_ns)
lock_mnt_tree(child);
- child->mnt.mnt_flags &= ~MNT_LOCKED;
+ q = __lookup_mnt(&child->mnt_parent->mnt,
+ child->mnt_mountpoint);
commit_tree(child);
+ if (q) {
+ struct mount *r = topmost_overmount(child);
+ struct mountpoint *mp = root.mp;
+
+ if (unlikely(shorter) && child != source_mnt)
+ mp = shorter;
+ mnt_change_mountpoint(r, mp, q);
+ }
}
- put_mountpoint(smp);
+ unpin_mountpoint(&root);
unlock_mount_hash();
return 0;
@@ -2228,63 +2662,171 @@ static int attach_recursive_mnt(struct mount *source_mnt,
ns->pending_mounts = 0;
read_seqlock_excl(&mount_lock);
- put_mountpoint(smp);
+ unpin_mountpoint(&root);
read_sequnlock_excl(&mount_lock);
return err;
}
-static struct mountpoint *lock_mount(struct path *path)
+static inline struct mount *where_to_mount(const struct path *path,
+ struct dentry **dentry,
+ bool beneath)
{
- struct vfsmount *mnt;
- struct dentry *dentry = path->dentry;
-retry:
- inode_lock(dentry->d_inode);
- if (unlikely(cant_mount(dentry))) {
- inode_unlock(dentry->d_inode);
- return ERR_PTR(-ENOENT);
+ struct mount *m;
+
+ if (unlikely(beneath)) {
+ m = topmost_overmount(real_mount(path->mnt));
+ *dentry = m->mnt_mountpoint;
+ return m->mnt_parent;
}
- namespace_lock();
- mnt = lookup_mnt(path);
- if (likely(!mnt)) {
- struct mountpoint *mp = get_mountpoint(dentry);
- if (IS_ERR(mp)) {
+ m = __lookup_mnt(path->mnt, path->dentry);
+ if (unlikely(m)) {
+ m = topmost_overmount(m);
+ *dentry = m->mnt.mnt_root;
+ return m;
+ }
+ *dentry = path->dentry;
+ return real_mount(path->mnt);
+}
+
+/**
+ * do_lock_mount - acquire environment for mounting
+ * @path: target path
+ * @res: context to set up
+ * @beneath: whether the intention is to mount beneath @path
+ *
+ * To mount something at given location, we need
+ * namespace_sem locked exclusive
+ * inode of dentry we are mounting on locked exclusive
+ * struct mountpoint for that dentry
+ * struct mount we are mounting on
+ *
+ * Results are stored in caller-supplied context (pinned_mountpoint);
+ * on success we have res->parent and res->mp pointing to parent and
+ * mountpoint respectively and res->node inserted into the ->m_list
+ * of the mountpoint, making sure the mountpoint won't disappear.
+ * On failure we have res->parent set to ERR_PTR(-E...), res->mp
+ * left NULL, res->node - empty.
+ * In case of success do_lock_mount returns with locks acquired (in
+ * proper order - inode lock nests outside of namespace_sem).
+ *
+ * Request to mount on overmounted location is treated as "mount on
+ * top of whatever's overmounting it"; request to mount beneath
+ * a location - "mount immediately beneath the topmost mount at that
+ * place".
+ *
+ * In all cases the location must not have been unmounted and the
+ * chosen mountpoint must be allowed to be mounted on. For "beneath"
+ * case we also require the location to be at the root of a mount
+ * that has a parent (i.e. is not a root of some namespace).
+ */
+static void do_lock_mount(const struct path *path,
+ struct pinned_mountpoint *res,
+ bool beneath)
+{
+ int err;
+
+ if (unlikely(beneath) && !path_mounted(path)) {
+ res->parent = ERR_PTR(-EINVAL);
+ return;
+ }
+
+ do {
+ struct dentry *dentry, *d;
+ struct mount *m, *n;
+
+ scoped_guard(mount_locked_reader) {
+ m = where_to_mount(path, &dentry, beneath);
+ if (&m->mnt != path->mnt) {
+ mntget(&m->mnt);
+ dget(dentry);
+ }
+ }
+
+ inode_lock(dentry->d_inode);
+ namespace_lock();
+
+ // check if the chain of mounts (if any) has changed.
+ scoped_guard(mount_locked_reader)
+ n = where_to_mount(path, &d, beneath);
+
+ if (unlikely(n != m || dentry != d))
+ err = -EAGAIN; // something moved, retry
+ else if (unlikely(cant_mount(dentry) || !is_mounted(path->mnt)))
+ err = -ENOENT; // not to be mounted on
+ else if (beneath && &m->mnt == path->mnt && !m->overmount)
+ err = -EINVAL;
+ else
+ err = get_mountpoint(dentry, res);
+
+ if (unlikely(err)) {
+ res->parent = ERR_PTR(err);
namespace_unlock();
inode_unlock(dentry->d_inode);
- return mp;
+ } else {
+ res->parent = m;
}
- return mp;
- }
- namespace_unlock();
- inode_unlock(path->dentry->d_inode);
- path_put(path);
- path->mnt = mnt;
- dentry = path->dentry = dget(mnt->mnt_root);
- goto retry;
+ /*
+ * Drop the temporary references. This is subtle - on success
+ * we are doing that under namespace_sem, which would normally
+ * be forbidden. However, in that case we are guaranteed that
+ * refcounts won't reach zero, since we know that path->mnt
+ * is mounted and thus all mounts reachable from it are pinned
+ * and stable, along with their mountpoints and roots.
+ */
+ if (&m->mnt != path->mnt) {
+ dput(dentry);
+ mntput(&m->mnt);
+ }
+ } while (err == -EAGAIN);
}
-static void unlock_mount(struct mountpoint *where)
+static void __unlock_mount(struct pinned_mountpoint *m)
{
- struct dentry *dentry = where->m_dentry;
-
+ inode_unlock(m->mp->m_dentry->d_inode);
read_seqlock_excl(&mount_lock);
- put_mountpoint(where);
+ unpin_mountpoint(m);
read_sequnlock_excl(&mount_lock);
-
namespace_unlock();
- inode_unlock(dentry->d_inode);
}
-static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
+static inline void unlock_mount(struct pinned_mountpoint *m)
+{
+ if (!IS_ERR(m->parent))
+ __unlock_mount(m);
+}
+
+#define LOCK_MOUNT_MAYBE_BENEATH(mp, path, beneath) \
+ struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \
+ do_lock_mount((path), &mp, (beneath))
+#define LOCK_MOUNT(mp, path) LOCK_MOUNT_MAYBE_BENEATH(mp, (path), false)
+#define LOCK_MOUNT_EXACT(mp, path) \
+ struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \
+ lock_mount_exact((path), &mp)
+
+static int graft_tree(struct mount *mnt, const struct pinned_mountpoint *mp)
{
if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
return -EINVAL;
- if (d_is_dir(mp->m_dentry) !=
+ if (d_is_dir(mp->mp->m_dentry) !=
d_is_dir(mnt->mnt.mnt_root))
return -ENOTDIR;
- return attach_recursive_mnt(mnt, p, mp, false);
+ return attach_recursive_mnt(mnt, mp);
+}
+
+static int may_change_propagation(const struct mount *m)
+{
+ struct mnt_namespace *ns = m->mnt_ns;
+
+ // it must be mounted in some namespace
+ if (IS_ERR_OR_NULL(ns)) // is_mounted()
+ return -EINVAL;
+ // and the caller must be admin in userns of that namespace
+ if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+ return 0;
}
/*
@@ -2307,71 +2849,131 @@ static int flags_to_propagation_type(int ms_flags)
/*
* recursively change the type of the mountpoint.
*/
-static int do_change_type(struct path *path, int ms_flags)
+static int do_change_type(const struct path *path, int ms_flags)
{
struct mount *m;
struct mount *mnt = real_mount(path->mnt);
int recurse = ms_flags & MS_REC;
int type;
- int err = 0;
+ int err;
- if (path->dentry != path->mnt->mnt_root)
+ if (!path_mounted(path))
return -EINVAL;
type = flags_to_propagation_type(ms_flags);
if (!type)
return -EINVAL;
- namespace_lock();
+ guard(namespace_excl)();
+
+ err = may_change_propagation(mnt);
+ if (err)
+ return err;
+
if (type == MS_SHARED) {
err = invent_group_ids(mnt, recurse);
if (err)
- goto out_unlock;
+ return err;
}
- lock_mount_hash();
for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
change_mnt_propagation(m, type);
- unlock_mount_hash();
- out_unlock:
- namespace_unlock();
- return err;
+ return 0;
+}
+
+/* may_copy_tree() - check if a mount tree can be copied
+ * @path: path to the mount tree to be copied
+ *
+ * This helper checks if the caller may copy the mount tree starting
+ * from @path->mnt. The caller may copy the mount tree under the
+ * following circumstances:
+ *
+ * (1) The caller is located in the mount namespace of the mount tree.
+ * This also implies that the mount does not belong to an anonymous
+ * mount namespace.
+ * (2) The caller tries to copy an nfs mount referring to a mount
+ * namespace, i.e., the caller is trying to copy a mount namespace
+ * entry from nsfs.
+ * (3) The caller tries to copy a pidfs mount referring to a pidfd.
+ * (4) The caller is trying to copy a mount tree that belongs to an
+ * anonymous mount namespace.
+ *
+ * For that to be safe, this helper enforces that the origin mount
+ * namespace the anonymous mount namespace was created from is the
+ * same as the caller's mount namespace by comparing the sequence
+ * numbers.
+ *
+ * This is not strictly necessary. The current semantics of the new
+ * mount api enforce that the caller must be located in the same
+ * mount namespace as the mount tree it interacts with. Using the
+ * origin sequence number preserves these semantics even for
+ * anonymous mount namespaces. However, one could envision extending
+ * the api to directly operate across mount namespace if needed.
+ *
+ * The ownership of a non-anonymous mount namespace such as the
+ * caller's cannot change.
+ * => We know that the caller's mount namespace is stable.
+ *
+ * If the origin sequence number of the anonymous mount namespace is
+ * the same as the sequence number of the caller's mount namespace.
+ * => The owning namespaces are the same.
+ *
+ * ==> The earlier capability check on the owning namespace of the
+ * caller's mount namespace ensures that the caller has the
+ * ability to copy the mount tree.
+ *
+ * Returns true if the mount tree can be copied, false otherwise.
+ */
+static inline bool may_copy_tree(const struct path *path)
+{
+ struct mount *mnt = real_mount(path->mnt);
+ const struct dentry_operations *d_op;
+
+ if (check_mnt(mnt))
+ return true;
+
+ d_op = path->dentry->d_op;
+ if (d_op == &ns_dentry_operations)
+ return true;
+
+ if (d_op == &pidfs_dentry_operations)
+ return true;
+
+ if (!is_mounted(path->mnt))
+ return false;
+
+ return check_anonymous_mnt(mnt);
}
-static struct mount *__do_loopback(struct path *old_path, int recurse)
+
+static struct mount *__do_loopback(const struct path *old_path, int recurse)
{
- struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
+ struct mount *old = real_mount(old_path->mnt);
if (IS_MNT_UNBINDABLE(old))
- return mnt;
+ return ERR_PTR(-EINVAL);
- if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
- return mnt;
+ if (!may_copy_tree(old_path))
+ return ERR_PTR(-EINVAL);
- if (!recurse && has_locked_children(old, old_path->dentry))
- return mnt;
+ if (!recurse && __has_locked_children(old, old_path->dentry))
+ return ERR_PTR(-EINVAL);
if (recurse)
- mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
+ return copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
else
- mnt = clone_mnt(old, old_path->dentry, 0);
-
- if (!IS_ERR(mnt))
- mnt->mnt.mnt_flags &= ~MNT_LOCKED;
-
- return mnt;
+ return clone_mnt(old, old_path->dentry, 0);
}
/*
* do loopback mount.
*/
-static int do_loopback(struct path *path, const char *old_name,
- int recurse)
+static int do_loopback(const struct path *path, const char *old_name,
+ int recurse)
{
- struct path old_path;
- struct mount *mnt = NULL, *parent;
- struct mountpoint *mp;
+ struct path old_path __free(path_put) = {};
+ struct mount *mnt = NULL;
int err;
if (!old_name || !*old_name)
return -EINVAL;
@@ -2379,70 +2981,78 @@ static int do_loopback(struct path *path, const char *old_name,
if (err)
return err;
- err = -EINVAL;
if (mnt_ns_loop(old_path.dentry))
- goto out;
+ return -EINVAL;
- mp = lock_mount(path);
- if (IS_ERR(mp)) {
- err = PTR_ERR(mp);
- goto out;
- }
+ LOCK_MOUNT(mp, path);
+ if (IS_ERR(mp.parent))
+ return PTR_ERR(mp.parent);
- parent = real_mount(path->mnt);
- if (!check_mnt(parent))
- goto out2;
+ if (!check_mnt(mp.parent))
+ return -EINVAL;
mnt = __do_loopback(&old_path, recurse);
- if (IS_ERR(mnt)) {
- err = PTR_ERR(mnt);
- goto out2;
- }
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
- err = graft_tree(mnt, parent, mp);
+ err = graft_tree(mnt, &mp);
if (err) {
lock_mount_hash();
umount_tree(mnt, UMOUNT_SYNC);
unlock_mount_hash();
}
-out2:
- unlock_mount(mp);
-out:
- path_put(&old_path);
return err;
}
-static struct file *open_detached_copy(struct path *path, bool recursive)
+static struct mnt_namespace *get_detached_copy(const struct path *path, bool recursive)
{
- struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
- struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true);
+ struct mnt_namespace *ns, *mnt_ns = current->nsproxy->mnt_ns, *src_mnt_ns;
+ struct user_namespace *user_ns = mnt_ns->user_ns;
struct mount *mnt, *p;
- struct file *file;
+ ns = alloc_mnt_ns(user_ns, true);
if (IS_ERR(ns))
- return ERR_CAST(ns);
+ return ns;
+
+ guard(namespace_excl)();
+
+ /*
+ * Record the sequence number of the source mount namespace.
+ * This needs to hold namespace_sem to ensure that the mount
+ * doesn't get attached.
+ */
+ if (is_mounted(path->mnt)) {
+ src_mnt_ns = real_mount(path->mnt)->mnt_ns;
+ if (is_anon_ns(src_mnt_ns))
+ ns->seq_origin = src_mnt_ns->seq_origin;
+ else
+ ns->seq_origin = src_mnt_ns->ns.ns_id;
+ }
- namespace_lock();
mnt = __do_loopback(path, recursive);
if (IS_ERR(mnt)) {
- namespace_unlock();
- free_mnt_ns(ns);
+ emptied_ns = ns;
return ERR_CAST(mnt);
}
- lock_mount_hash();
for (p = mnt; p; p = next_mnt(p, mnt)) {
- p->mnt_ns = ns;
- ns->mounts++;
+ mnt_add_to_ns(ns, p);
+ ns->nr_mounts++;
}
ns->root = mnt;
- list_add_tail(&ns->list, &mnt->mnt_list);
- mntget(&mnt->mnt);
- unlock_mount_hash();
- namespace_unlock();
+ return ns;
+}
+
+static struct file *open_detached_copy(struct path *path, bool recursive)
+{
+ struct mnt_namespace *ns = get_detached_copy(path, recursive);
+ struct file *file;
+
+ if (IS_ERR(ns))
+ return ERR_CAST(ns);
mntput(path->mnt);
- path->mnt = &mnt->mnt;
+ path->mnt = mntget(&ns->root->mnt);
file = dentry_open(path, O_PATH, current_cred());
if (IS_ERR(file))
dissolve_on_fput(path->mnt);
@@ -2451,24 +3061,22 @@ static struct file *open_detached_copy(struct path *path, bool recursive)
return file;
}
-SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
+static struct file *vfs_open_tree(int dfd, const char __user *filename, unsigned int flags)
{
- struct file *file;
- struct path path;
+ int ret;
+ struct path path __free(path_put) = {};
int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
bool detached = flags & OPEN_TREE_CLONE;
- int error;
- int fd;
BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
OPEN_TREE_CLOEXEC))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (flags & AT_NO_AUTOMOUNT)
lookup_flags &= ~LOOKUP_AUTOMOUNT;
@@ -2478,28 +3086,21 @@ SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, fl
lookup_flags |= LOOKUP_EMPTY;
if (detached && !may_mount())
- return -EPERM;
+ return ERR_PTR(-EPERM);
- fd = get_unused_fd_flags(flags & O_CLOEXEC);
- if (fd < 0)
- return fd;
+ ret = user_path_at(dfd, filename, lookup_flags, &path);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
- error = user_path_at(dfd, filename, lookup_flags, &path);
- if (unlikely(error)) {
- file = ERR_PTR(error);
- } else {
- if (detached)
- file = open_detached_copy(&path, flags & AT_RECURSIVE);
- else
- file = dentry_open(&path, O_PATH, current_cred());
- path_put(&path);
- }
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- return PTR_ERR(file);
- }
- fd_install(fd, file);
- return fd;
+ if (detached)
+ return open_detached_copy(&path, flags & AT_RECURSIVE);
+
+ return dentry_open(&path, O_PATH, current_cred());
+}
+
+SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
+{
+ return FD_ADD(flags, vfs_open_tree(dfd, filename, flags));
}
/*
@@ -2556,25 +3157,33 @@ static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
touch_mnt_namespace(mnt->mnt_ns);
}
-static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
+static void mnt_warn_timestamp_expiry(const struct path *mountpoint,
+ struct vfsmount *mnt)
{
struct super_block *sb = mnt->mnt_sb;
if (!__mnt_is_readonly(mnt) &&
+ (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) &&
(ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
- char *buf = (char *)__get_free_page(GFP_KERNEL);
- char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
- struct tm tm;
+ char *buf, *mntpath;
- time64_to_tm(sb->s_time_max, 0, &tm);
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (buf)
+ mntpath = d_path(mountpoint, buf, PAGE_SIZE);
+ else
+ mntpath = ERR_PTR(-ENOMEM);
+ if (IS_ERR(mntpath))
+ mntpath = "(unknown)";
- pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
+ pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
sb->s_type->name,
is_mounted(mnt) ? "remounted" : "mounted",
- mntpath,
- tm.tm_year+1900, (unsigned long long)sb->s_time_max);
+ mntpath, &sb->s_time_max,
+ (unsigned long long)sb->s_time_max);
- free_page((unsigned long)buf);
+ sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
+ if (buf)
+ free_page((unsigned long)buf);
}
}
@@ -2583,7 +3192,7 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
* superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
* to mount(2).
*/
-static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
+static int do_reconfigure_mnt(const struct path *path, unsigned int mnt_flags)
{
struct super_block *sb = path->mnt->mnt_sb;
struct mount *mnt = real_mount(path->mnt);
@@ -2592,7 +3201,7 @@ static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
if (!check_mnt(mnt))
return -EINVAL;
- if (path->dentry != mnt->mnt.mnt_root)
+ if (!path_mounted(path))
return -EINVAL;
if (!can_change_locked_flags(mnt, mnt_flags))
@@ -2620,7 +3229,7 @@ static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
* If you've mounted a non-root directory somewhere and want to do remount
* on it - tough luck.
*/
-static int do_remount(struct path *path, int ms_flags, int sb_flags,
+static int do_remount(const struct path *path, int sb_flags,
int mnt_flags, void *data)
{
int err;
@@ -2631,7 +3240,7 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags,
if (!check_mnt(mnt))
return -EINVAL;
- if (path->dentry != path->mnt->mnt_root)
+ if (!path_mounted(path))
return -EINVAL;
if (!can_change_locked_flags(mnt, mnt_flags))
@@ -2641,7 +3250,12 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags,
if (IS_ERR(fc))
return PTR_ERR(fc);
+ /*
+ * Indicate to the filesystem that the remount request is coming
+ * from the legacy mount system call.
+ */
fc->oldapi = true;
+
err = parse_monolithic_mount_data(fc, data);
if (!err) {
down_write(&sb->s_umount);
@@ -2673,185 +3287,296 @@ static inline int tree_contains_unbindable(struct mount *mnt)
return 0;
}
-/*
- * Check that there aren't references to earlier/same mount namespaces in the
- * specified subtree. Such references can act as pins for mount namespaces
- * that aren't checked by the mount-cycle checking code, thereby allowing
- * cycles to be made.
- */
-static bool check_for_nsfs_mounts(struct mount *subtree)
-{
- struct mount *p;
- bool ret = false;
-
- lock_mount_hash();
- for (p = subtree; p; p = next_mnt(p, subtree))
- if (mnt_ns_loop(p->mnt.mnt_root))
- goto out;
-
- ret = true;
-out:
- unlock_mount_hash();
- return ret;
-}
-
-static int do_set_group(struct path *from_path, struct path *to_path)
+static int do_set_group(const struct path *from_path, const struct path *to_path)
{
- struct mount *from, *to;
+ struct mount *from = real_mount(from_path->mnt);
+ struct mount *to = real_mount(to_path->mnt);
int err;
- from = real_mount(from_path->mnt);
- to = real_mount(to_path->mnt);
-
- namespace_lock();
-
- err = -EINVAL;
- /* To and From must be mounted */
- if (!is_mounted(&from->mnt))
- goto out;
- if (!is_mounted(&to->mnt))
- goto out;
+ guard(namespace_excl)();
- err = -EPERM;
- /* We should be allowed to modify mount namespaces of both mounts */
- if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
- goto out;
- if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
- goto out;
+ err = may_change_propagation(from);
+ if (err)
+ return err;
+ err = may_change_propagation(to);
+ if (err)
+ return err;
- err = -EINVAL;
/* To and From paths should be mount roots */
- if (from_path->dentry != from_path->mnt->mnt_root)
- goto out;
- if (to_path->dentry != to_path->mnt->mnt_root)
- goto out;
+ if (!path_mounted(from_path))
+ return -EINVAL;
+ if (!path_mounted(to_path))
+ return -EINVAL;
/* Setting sharing groups is only allowed across same superblock */
if (from->mnt.mnt_sb != to->mnt.mnt_sb)
- goto out;
+ return -EINVAL;
/* From mount root should be wider than To mount root */
if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
- goto out;
+ return -EINVAL;
/* From mount should not have locked children in place of To's root */
- if (has_locked_children(from, to->mnt.mnt_root))
- goto out;
+ if (__has_locked_children(from, to->mnt.mnt_root))
+ return -EINVAL;
/* Setting sharing groups is only allowed on private mounts */
if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
- goto out;
+ return -EINVAL;
/* From should not be private */
if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
- goto out;
+ return -EINVAL;
if (IS_MNT_SLAVE(from)) {
- struct mount *m = from->mnt_master;
-
- list_add(&to->mnt_slave, &m->mnt_slave_list);
- to->mnt_master = m;
+ hlist_add_behind(&to->mnt_slave, &from->mnt_slave);
+ to->mnt_master = from->mnt_master;
}
if (IS_MNT_SHARED(from)) {
to->mnt_group_id = from->mnt_group_id;
list_add(&to->mnt_share, &from->mnt_share);
- lock_mount_hash();
set_mnt_shared(to);
- unlock_mount_hash();
}
+ return 0;
+}
- err = 0;
-out:
- namespace_unlock();
- return err;
+/**
+ * path_overmounted - check if path is overmounted
+ * @path: path to check
+ *
+ * Check if path is overmounted, i.e., if there's a mount on top of
+ * @path->mnt with @path->dentry as mountpoint.
+ *
+ * Context: namespace_sem must be held at least shared.
+ * MUST NOT be called under lock_mount_hash() (there one should just
+ * call __lookup_mnt() and check if it returns NULL).
+ * Return: If path is overmounted true is returned, false if not.
+ */
+static inline bool path_overmounted(const struct path *path)
+{
+ unsigned seq = read_seqbegin(&mount_lock);
+ bool no_child;
+
+ rcu_read_lock();
+ no_child = !__lookup_mnt(path->mnt, path->dentry);
+ rcu_read_unlock();
+ if (need_seqretry(&mount_lock, seq)) {
+ read_seqlock_excl(&mount_lock);
+ no_child = !__lookup_mnt(path->mnt, path->dentry);
+ read_sequnlock_excl(&mount_lock);
+ }
+ return unlikely(!no_child);
}
-static int do_move_mount(struct path *old_path, struct path *new_path)
+/*
+ * Check if there is a possibly empty chain of descent from p1 to p2.
+ * Locks: namespace_sem (shared) or mount_lock (read_seqlock_excl).
+ */
+static bool mount_is_ancestor(const struct mount *p1, const struct mount *p2)
{
- struct mnt_namespace *ns;
- struct mount *p;
- struct mount *old;
- struct mount *parent;
- struct mountpoint *mp, *old_mp;
- int err;
- bool attached;
+ while (p2 != p1 && mnt_has_parent(p2))
+ p2 = p2->mnt_parent;
+ return p2 == p1;
+}
- mp = lock_mount(new_path);
- if (IS_ERR(mp))
- return PTR_ERR(mp);
+/**
+ * can_move_mount_beneath - check that we can mount beneath the top mount
+ * @mnt_from: mount we are trying to move
+ * @mnt_to: mount under which to mount
+ * @mp: mountpoint of @mnt_to
+ *
+ * - Make sure that nothing can be mounted beneath the caller's current
+ * root or the rootfs of the namespace.
+ * - Make sure that the caller can unmount the topmost mount ensuring
+ * that the caller could reveal the underlying mountpoint.
+ * - Ensure that nothing has been mounted on top of @mnt_from before we
+ * grabbed @namespace_sem to avoid creating pointless shadow mounts.
+ * - Prevent mounting beneath a mount if the propagation relationship
+ * between the source mount, parent mount, and top mount would lead to
+ * nonsensical mount trees.
+ *
+ * Context: This function expects namespace_lock() to be held.
+ * Return: On success 0, and on error a negative error code is returned.
+ */
+static int can_move_mount_beneath(const struct mount *mnt_from,
+ const struct mount *mnt_to,
+ const struct mountpoint *mp)
+{
+ struct mount *parent_mnt_to = mnt_to->mnt_parent;
- old = real_mount(old_path->mnt);
- p = real_mount(new_path->mnt);
- parent = old->mnt_parent;
- attached = mnt_has_parent(old);
- old_mp = old->mnt_mp;
- ns = old->mnt_ns;
+ if (IS_MNT_LOCKED(mnt_to))
+ return -EINVAL;
- err = -EINVAL;
- /* The mountpoint must be in our namespace. */
- if (!check_mnt(p))
- goto out;
+ /* Avoid creating shadow mounts during mount propagation. */
+ if (mnt_from->overmount)
+ return -EINVAL;
- /* The thing moved must be mounted... */
- if (!is_mounted(&old->mnt))
- goto out;
+ /*
+ * Mounting beneath the rootfs only makes sense when the
+ * semantics of pivot_root(".", ".") are used.
+ */
+ if (&mnt_to->mnt == current->fs->root.mnt)
+ return -EINVAL;
+ if (parent_mnt_to == current->nsproxy->mnt_ns->root)
+ return -EINVAL;
- /* ... and either ours or the root of anon namespace */
- if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
- goto out;
+ if (mount_is_ancestor(mnt_to, mnt_from))
+ return -EINVAL;
- if (old->mnt.mnt_flags & MNT_LOCKED)
- goto out;
+ /*
+ * If the parent mount propagates to the child mount this would
+ * mean mounting @mnt_from on @mnt_to->mnt_parent and then
+ * propagating a copy @c of @mnt_from on top of @mnt_to. This
+ * defeats the whole purpose of mounting beneath another mount.
+ */
+ if (propagation_would_overmount(parent_mnt_to, mnt_to, mp))
+ return -EINVAL;
- if (old_path->dentry != old_path->mnt->mnt_root)
- goto out;
+ /*
+ * If @mnt_to->mnt_parent propagates to @mnt_from this would
+ * mean propagating a copy @c of @mnt_from on top of @mnt_from.
+ * Afterwards @mnt_from would be mounted on top of
+ * @mnt_to->mnt_parent and @mnt_to would be unmounted from
+ * @mnt->mnt_parent and remounted on @mnt_from. But since @c is
+ * already mounted on @mnt_from, @mnt_to would ultimately be
+ * remounted on top of @c. Afterwards, @mnt_from would be
+ * covered by a copy @c of @mnt_from and @c would be covered by
+ * @mnt_from itself. This defeats the whole purpose of mounting
+ * @mnt_from beneath @mnt_to.
+ */
+ if (check_mnt(mnt_from) &&
+ propagation_would_overmount(parent_mnt_to, mnt_from, mp))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* may_use_mount() - check if a mount tree can be used
+ * @mnt: vfsmount to be used
+ *
+ * This helper checks if the caller may use the mount tree starting
+ * from @path->mnt. The caller may use the mount tree under the
+ * following circumstances:
+ *
+ * (1) The caller is located in the mount namespace of the mount tree.
+ * This also implies that the mount does not belong to an anonymous
+ * mount namespace.
+ * (2) The caller is trying to use a mount tree that belongs to an
+ * anonymous mount namespace.
+ *
+ * For that to be safe, this helper enforces that the origin mount
+ * namespace the anonymous mount namespace was created from is the
+ * same as the caller's mount namespace by comparing the sequence
+ * numbers.
+ *
+ * The ownership of a non-anonymous mount namespace such as the
+ * caller's cannot change.
+ * => We know that the caller's mount namespace is stable.
+ *
+ * If the origin sequence number of the anonymous mount namespace is
+ * the same as the sequence number of the caller's mount namespace.
+ * => The owning namespaces are the same.
+ *
+ * ==> The earlier capability check on the owning namespace of the
+ * caller's mount namespace ensures that the caller has the
+ * ability to use the mount tree.
+ *
+ * Returns true if the mount tree can be used, false otherwise.
+ */
+static inline bool may_use_mount(struct mount *mnt)
+{
+ if (check_mnt(mnt))
+ return true;
- if (d_is_dir(new_path->dentry) !=
- d_is_dir(old_path->dentry))
- goto out;
/*
- * Don't move a mount residing in a shared parent.
+ * Make sure that noone unmounted the target path or somehow
+ * managed to get their hands on something purely kernel
+ * internal.
*/
- if (attached && IS_MNT_SHARED(parent))
- goto out;
+ if (!is_mounted(&mnt->mnt))
+ return false;
+
+ return check_anonymous_mnt(mnt);
+}
+
+static int do_move_mount(const struct path *old_path,
+ const struct path *new_path,
+ enum mnt_tree_flags_t flags)
+{
+ struct mount *old = real_mount(old_path->mnt);
+ int err;
+ bool beneath = flags & MNT_TREE_BENEATH;
+
+ if (!path_mounted(old_path))
+ return -EINVAL;
+
+ if (d_is_dir(new_path->dentry) != d_is_dir(old_path->dentry))
+ return -EINVAL;
+
+ LOCK_MOUNT_MAYBE_BENEATH(mp, new_path, beneath);
+ if (IS_ERR(mp.parent))
+ return PTR_ERR(mp.parent);
+
+ if (check_mnt(old)) {
+ /* if the source is in our namespace... */
+ /* ... it should be detachable from parent */
+ if (!mnt_has_parent(old) || IS_MNT_LOCKED(old))
+ return -EINVAL;
+ /* ... which should not be shared */
+ if (IS_MNT_SHARED(old->mnt_parent))
+ return -EINVAL;
+ /* ... and the target should be in our namespace */
+ if (!check_mnt(mp.parent))
+ return -EINVAL;
+ } else {
+ /*
+ * otherwise the source must be the root of some anon namespace.
+ */
+ if (!anon_ns_root(old))
+ return -EINVAL;
+ /*
+ * Bail out early if the target is within the same namespace -
+ * subsequent checks would've rejected that, but they lose
+ * some corner cases if we check it early.
+ */
+ if (old->mnt_ns == mp.parent->mnt_ns)
+ return -EINVAL;
+ /*
+ * Target should be either in our namespace or in an acceptable
+ * anon namespace, sensu check_anonymous_mnt().
+ */
+ if (!may_use_mount(mp.parent))
+ return -EINVAL;
+ }
+
+ if (beneath) {
+ struct mount *over = real_mount(new_path->mnt);
+
+ if (mp.parent != over->mnt_parent)
+ over = mp.parent->overmount;
+ err = can_move_mount_beneath(old, over, mp.mp);
+ if (err)
+ return err;
+ }
+
/*
* Don't move a mount tree containing unbindable mounts to a destination
* mount which is shared.
*/
- if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
- goto out;
- err = -ELOOP;
+ if (IS_MNT_SHARED(mp.parent) && tree_contains_unbindable(old))
+ return -EINVAL;
if (!check_for_nsfs_mounts(old))
- goto out;
- for (; mnt_has_parent(p); p = p->mnt_parent)
- if (p == old)
- goto out;
+ return -ELOOP;
+ if (mount_is_ancestor(old, mp.parent))
+ return -ELOOP;
- err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp,
- attached);
- if (err)
- goto out;
-
- /* if the mount is moved, it should no longer be expire
- * automatically */
- list_del_init(&old->mnt_expire);
- if (attached)
- put_mountpoint(old_mp);
-out:
- unlock_mount(mp);
- if (!err) {
- if (attached)
- mntput_no_expire(parent);
- else
- free_mnt_ns(ns);
- }
- return err;
+ return attach_recursive_mnt(old, &mp);
}
-static int do_move_mount_old(struct path *path, const char *old_name)
+static int do_move_mount_old(const struct path *path, const char *old_name)
{
- struct path old_path;
+ struct path old_path __free(path_put) = {};
int err;
if (!old_name || !*old_name)
@@ -2861,18 +3586,19 @@ static int do_move_mount_old(struct path *path, const char *old_name)
if (err)
return err;
- err = do_move_mount(&old_path, path);
- path_put(&old_path);
- return err;
+ return do_move_mount(&old_path, path, 0);
}
/*
* add a mount into a namespace's mount tree
*/
-static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
- struct path *path, int mnt_flags)
+static int do_add_mount(struct mount *newmnt, const struct pinned_mountpoint *mp,
+ int mnt_flags)
{
- struct mount *parent = real_mount(path->mnt);
+ struct mount *parent = mp->parent;
+
+ if (IS_ERR(parent))
+ return PTR_ERR(parent);
mnt_flags &= ~MNT_INTERNAL_FLAGS;
@@ -2886,15 +3612,15 @@ static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
}
/* Refuse the same filesystem on the same mount point */
- if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
- path->mnt->mnt_root == path->dentry)
+ if (parent->mnt.mnt_sb == newmnt->mnt.mnt_sb &&
+ parent->mnt.mnt_root == mp->mp->m_dentry)
return -EBUSY;
if (d_is_symlink(newmnt->mnt.mnt_root))
return -EINVAL;
newmnt->mnt.mnt_flags = mnt_flags;
- return graft_tree(newmnt, parent, mp);
+ return graft_tree(newmnt, mp);
}
static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
@@ -2903,40 +3629,32 @@ static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags
* Create a new mount using a superblock configuration and request it
* be added to the namespace tree.
*/
-static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
+static int do_new_mount_fc(struct fs_context *fc, const struct path *mountpoint,
unsigned int mnt_flags)
{
- struct vfsmount *mnt;
- struct mountpoint *mp;
- struct super_block *sb = fc->root->d_sb;
+ struct super_block *sb;
+ struct vfsmount *mnt __free(mntput) = fc_mount(fc);
int error;
- error = security_sb_kern_mount(sb);
- if (!error && mount_too_revealing(sb, &mnt_flags))
- error = -EPERM;
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
- if (unlikely(error)) {
- fc_drop_locked(fc);
+ sb = fc->root->d_sb;
+ error = security_sb_kern_mount(sb);
+ if (unlikely(error))
return error;
- }
-
- up_write(&sb->s_umount);
- mnt = vfs_create_mount(fc);
- if (IS_ERR(mnt))
- return PTR_ERR(mnt);
+ if (unlikely(mount_too_revealing(sb, &mnt_flags))) {
+ errorfcp(fc, "VFS", "Mount too revealing");
+ return -EPERM;
+ }
mnt_warn_timestamp_expiry(mountpoint, mnt);
- mp = lock_mount(mountpoint);
- if (IS_ERR(mp)) {
- mntput(mnt);
- return PTR_ERR(mp);
- }
- error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
- unlock_mount(mp);
- if (error < 0)
- mntput(mnt);
+ LOCK_MOUNT(mp, mountpoint);
+ error = do_add_mount(real_mount(mnt), &mp, mnt_flags);
+ if (!error)
+ retain_and_null_ptr(mnt); // consumed on success
return error;
}
@@ -2944,8 +3662,9 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
* create a new mount for userspace and request it to be added into the
* namespace's tree
*/
-static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
- int mnt_flags, const char *name, void *data)
+static int do_new_mount(const struct path *path, const char *fstype,
+ int sb_flags, int mnt_flags,
+ const char *name, void *data)
{
struct file_system_type *type;
struct fs_context *fc;
@@ -2975,28 +3694,53 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
if (IS_ERR(fc))
return PTR_ERR(fc);
+ /*
+ * Indicate to the filesystem that the mount request is coming
+ * from the legacy mount system call.
+ */
+ fc->oldapi = true;
+
if (subtype)
- err = vfs_parse_fs_string(fc, "subtype",
- subtype, strlen(subtype));
+ err = vfs_parse_fs_string(fc, "subtype", subtype);
if (!err && name)
- err = vfs_parse_fs_string(fc, "source", name, strlen(name));
+ err = vfs_parse_fs_string(fc, "source", name);
if (!err)
err = parse_monolithic_mount_data(fc, data);
if (!err && !mount_capable(fc))
err = -EPERM;
if (!err)
- err = vfs_get_tree(fc);
- if (!err)
err = do_new_mount_fc(fc, path, mnt_flags);
put_fs_context(fc);
return err;
}
-int finish_automount(struct vfsmount *m, struct path *path)
+static void lock_mount_exact(const struct path *path,
+ struct pinned_mountpoint *mp)
{
struct dentry *dentry = path->dentry;
- struct mountpoint *mp;
+ int err;
+
+ inode_lock(dentry->d_inode);
+ namespace_lock();
+ if (unlikely(cant_mount(dentry)))
+ err = -ENOENT;
+ else if (path_overmounted(path))
+ err = -EBUSY;
+ else
+ err = get_mountpoint(dentry, mp);
+ if (unlikely(err)) {
+ namespace_unlock();
+ inode_unlock(dentry->d_inode);
+ mp->parent = ERR_PTR(err);
+ } else {
+ mp->parent = real_mount(path->mnt);
+ }
+}
+
+int finish_automount(struct vfsmount *__m, const struct path *path)
+{
+ struct vfsmount *m __free(mntput) = __m;
struct mount *mnt;
int err;
@@ -3006,60 +3750,22 @@ int finish_automount(struct vfsmount *m, struct path *path)
return PTR_ERR(m);
mnt = real_mount(m);
- /* The new mount record should have at least 2 refs to prevent it being
- * expired before we get a chance to add it
- */
- BUG_ON(mnt_get_count(mnt) < 2);
- if (m->mnt_sb == path->mnt->mnt_sb &&
- m->mnt_root == dentry) {
- err = -ELOOP;
- goto discard;
- }
+ if (m->mnt_root == path->dentry)
+ return -ELOOP;
/*
- * we don't want to use lock_mount() - in this case finding something
+ * we don't want to use LOCK_MOUNT() - in this case finding something
* that overmounts our mountpoint to be means "quitely drop what we've
* got", not "try to mount it on top".
*/
- inode_lock(dentry->d_inode);
- namespace_lock();
- if (unlikely(cant_mount(dentry))) {
- err = -ENOENT;
- goto discard_locked;
- }
- rcu_read_lock();
- if (unlikely(__lookup_mnt(path->mnt, dentry))) {
- rcu_read_unlock();
- err = 0;
- goto discard_locked;
- }
- rcu_read_unlock();
- mp = get_mountpoint(dentry);
- if (IS_ERR(mp)) {
- err = PTR_ERR(mp);
- goto discard_locked;
- }
-
- err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
- unlock_mount(mp);
- if (unlikely(err))
- goto discard;
- mntput(m);
- return 0;
+ LOCK_MOUNT_EXACT(mp, path);
+ if (mp.parent == ERR_PTR(-EBUSY))
+ return 0;
-discard_locked:
- namespace_unlock();
- inode_unlock(dentry->d_inode);
-discard:
- /* remove m from any expiration list it may be on */
- if (!list_empty(&mnt->mnt_expire)) {
- namespace_lock();
- list_del_init(&mnt->mnt_expire);
- namespace_unlock();
- }
- mntput(m);
- mntput(m);
+ err = do_add_mount(mnt, &mp, path->mnt->mnt_flags | MNT_SHRINKABLE);
+ if (likely(!err))
+ retain_and_null_ptr(m);
return err;
}
@@ -3070,11 +3776,8 @@ discard:
*/
void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
{
- namespace_lock();
-
+ guard(mount_locked_reader)();
list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
-
- namespace_unlock();
}
EXPORT_SYMBOL(mnt_set_expiry);
@@ -3091,16 +3794,19 @@ void mark_mounts_for_expiry(struct list_head *mounts)
if (list_empty(mounts))
return;
- namespace_lock();
- lock_mount_hash();
+ guard(namespace_excl)();
+ guard(mount_writer)();
/* extract from the expiration list every vfsmount that matches the
* following criteria:
+ * - already mounted
* - only referenced by its parent vfsmount
* - still marked for expiry (marked on the last call here; marks are
* cleared by mntput())
*/
list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
+ if (!is_mounted(&mnt->mnt))
+ continue;
if (!xchg(&mnt->mnt_expiry_mark, 1) ||
propagate_mount_busy(mnt, 1))
continue;
@@ -3111,8 +3817,6 @@ void mark_mounts_for_expiry(struct list_head *mounts)
touch_mnt_namespace(mnt->mnt_ns);
umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
}
- unlock_mount_hash();
- namespace_unlock();
}
EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
@@ -3240,7 +3944,7 @@ static char *copy_mount_string(const void __user *data)
* Therefore, if this magic number is present, it carries no information
* and must be discarded.
*/
-int path_mount(const char *dev_name, struct path *path,
+int path_mount(const char *dev_name, const struct path *path,
const char *type_page, unsigned long flags, void *data_page)
{
unsigned int mnt_flags = 0, sb_flags;
@@ -3307,7 +4011,7 @@ int path_mount(const char *dev_name, struct path *path,
if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND))
return do_reconfigure_mnt(path, mnt_flags);
if (flags & MS_REMOUNT)
- return do_remount(path, flags, sb_flags, mnt_flags, data_page);
+ return do_remount(path, sb_flags, mnt_flags, data_page);
if (flags & MS_BIND)
return do_loopback(path, dev_name, flags & MS_REC);
if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
@@ -3319,18 +4023,16 @@ int path_mount(const char *dev_name, struct path *path,
data_page);
}
-long do_mount(const char *dev_name, const char __user *dir_name,
+int do_mount(const char *dev_name, const char __user *dir_name,
const char *type_page, unsigned long flags, void *data_page)
{
- struct path path;
+ struct path path __free(path_put) = {};
int ret;
ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
if (ret)
return ret;
- ret = path_mount(dev_name, &path, type_page, flags, data_page);
- path_put(&path);
- return ret;
+ return path_mount(dev_name, &path, type_page, flags, data_page);
}
static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
@@ -3346,21 +4048,11 @@ static void dec_mnt_namespaces(struct ucounts *ucounts)
static void free_mnt_ns(struct mnt_namespace *ns)
{
if (!is_anon_ns(ns))
- ns_free_inum(&ns->ns);
+ ns_common_free(ns);
dec_mnt_namespaces(ns->ucounts);
- put_user_ns(ns->user_ns);
- kfree(ns);
+ mnt_ns_tree_remove(ns);
}
-/*
- * Assign a sequence number so we can detect when we attempt to bind
- * mount a reference to an older mount namespace into the current
- * mount namespace, preventing reference counting loops. A 64bit
- * number incrementing at 10Ghz will take 12,427 years to wrap which
- * is effectively never, so we can ignore the possibility.
- */
-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
-
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
{
struct mnt_namespace *new_ns;
@@ -3376,32 +4068,34 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool a
dec_mnt_namespaces(ucounts);
return ERR_PTR(-ENOMEM);
}
- if (!anon) {
- ret = ns_alloc_inum(&new_ns->ns);
- if (ret) {
- kfree(new_ns);
- dec_mnt_namespaces(ucounts);
- return ERR_PTR(ret);
- }
+
+ if (anon)
+ ret = ns_common_init_inum(new_ns, MNT_NS_ANON_INO);
+ else
+ ret = ns_common_init(new_ns);
+ if (ret) {
+ kfree(new_ns);
+ dec_mnt_namespaces(ucounts);
+ return ERR_PTR(ret);
}
- new_ns->ns.ops = &mntns_operations;
- if (!anon)
- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
- refcount_set(&new_ns->ns.count, 1);
- INIT_LIST_HEAD(&new_ns->list);
+ ns_tree_gen_id(new_ns);
+
+ new_ns->is_anon = anon;
+ refcount_set(&new_ns->passive, 1);
+ new_ns->mounts = RB_ROOT;
init_waitqueue_head(&new_ns->poll);
- spin_lock_init(&new_ns->ns_lock);
new_ns->user_ns = get_user_ns(user_ns);
new_ns->ucounts = ucounts;
return new_ns;
}
__latent_entropy
-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
+struct mnt_namespace *copy_mnt_ns(u64 flags, struct mnt_namespace *ns,
struct user_namespace *user_ns, struct fs_struct *new_fs)
{
struct mnt_namespace *new_ns;
- struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
+ struct vfsmount *rootmnt __free(mntput) = NULL;
+ struct vfsmount *pwdmnt __free(mntput) = NULL;
struct mount *p, *q;
struct mount *old;
struct mount *new;
@@ -3420,24 +4114,21 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
if (IS_ERR(new_ns))
return new_ns;
- namespace_lock();
+ guard(namespace_excl)();
/* First pass: copy the tree topology */
copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
if (user_ns != ns->user_ns)
- copy_flags |= CL_SHARED_TO_SLAVE;
+ copy_flags |= CL_SLAVE;
new = copy_tree(old, old->mnt.mnt_root, copy_flags);
if (IS_ERR(new)) {
- namespace_unlock();
- free_mnt_ns(new_ns);
+ emptied_ns = new_ns;
return ERR_CAST(new);
}
if (user_ns != ns->user_ns) {
- lock_mount_hash();
+ guard(mount_writer)();
lock_mnt_tree(new);
- unlock_mount_hash();
}
new_ns->root = new;
- list_add_tail(&new_ns->list, &new->mnt_list);
/*
* Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -3447,8 +4138,8 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
p = old;
q = new;
while (p) {
- q->mnt_ns = new_ns;
- new_ns->mounts++;
+ mnt_add_to_ns(new_ns, q);
+ new_ns->nr_mounts++;
if (new_fs) {
if (&p->mnt == new_fs->root.mnt) {
new_fs->root.mnt = mntget(&q->mnt);
@@ -3463,16 +4154,11 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
q = next_mnt(q, new);
if (!q)
break;
+ // an mntns binding we'd skipped?
while (p->mnt.mnt_root != q->mnt.mnt_root)
- p = next_mnt(p, old);
+ p = next_mnt(skip_mnt_tree(p), old);
}
- namespace_unlock();
-
- if (rootmnt)
- mntput(rootmnt);
- if (pwdmnt)
- mntput(pwdmnt);
-
+ ns_tree_add_raw(new_ns);
return new_ns;
}
@@ -3489,10 +4175,9 @@ struct dentry *mount_subtree(struct vfsmount *m, const char *name)
mntput(m);
return ERR_CAST(ns);
}
- mnt->mnt_ns = ns;
ns->root = mnt;
- ns->mounts++;
- list_add(&mnt->mnt_list, &ns->list);
+ ns->nr_mounts++;
+ mnt_add_to_ns(ns, mnt);
err = vfs_path_lookup(m->mnt_root, m,
name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
@@ -3584,12 +4269,11 @@ static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
unsigned int, attr_flags)
{
+ struct path new_path __free(path_put) = {};
struct mnt_namespace *ns;
struct fs_context *fc;
- struct file *file;
- struct path newmount;
+ struct vfsmount *new_mnt;
struct mount *mnt;
- struct fd f;
unsigned int mnt_flags = 0;
long ret;
@@ -3617,45 +4301,45 @@ SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
return -EINVAL;
}
- f = fdget(fs_fd);
- if (!f.file)
+ CLASS(fd, f)(fs_fd);
+ if (fd_empty(f))
return -EBADF;
- ret = -EINVAL;
- if (f.file->f_op != &fscontext_fops)
- goto err_fsfd;
+ if (fd_file(f)->f_op != &fscontext_fops)
+ return -EINVAL;
- fc = f.file->private_data;
+ fc = fd_file(f)->private_data;
- ret = mutex_lock_interruptible(&fc->uapi_mutex);
- if (ret < 0)
- goto err_fsfd;
+ ACQUIRE(mutex_intr, uapi_mutex)(&fc->uapi_mutex);
+ ret = ACQUIRE_ERR(mutex_intr, &uapi_mutex);
+ if (ret)
+ return ret;
/* There must be a valid superblock or we can't mount it */
ret = -EINVAL;
if (!fc->root)
- goto err_unlock;
+ return ret;
ret = -EPERM;
if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
- pr_warn("VFS: Mount too revealing\n");
- goto err_unlock;
+ errorfcp(fc, "VFS", "Mount too revealing");
+ return ret;
}
ret = -EBUSY;
if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
- goto err_unlock;
+ return ret;
if (fc->sb_flags & SB_MANDLOCK)
warn_mandlock();
- newmount.mnt = vfs_create_mount(fc);
- if (IS_ERR(newmount.mnt)) {
- ret = PTR_ERR(newmount.mnt);
- goto err_unlock;
- }
- newmount.dentry = dget(fc->root);
- newmount.mnt->mnt_flags = mnt_flags;
+ new_mnt = vfs_create_mount(fc);
+ if (IS_ERR(new_mnt))
+ return PTR_ERR(new_mnt);
+ new_mnt->mnt_flags = mnt_flags;
+
+ new_path.dentry = dget(fc->root);
+ new_path.mnt = new_mnt;
/* We've done the mount bit - now move the file context into more or
* less the same state as if we'd done an fspick(). We don't want to
@@ -3665,41 +4349,43 @@ SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
vfs_clean_context(fc);
ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
- if (IS_ERR(ns)) {
- ret = PTR_ERR(ns);
- goto err_path;
- }
- mnt = real_mount(newmount.mnt);
- mnt->mnt_ns = ns;
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+ mnt = real_mount(new_path.mnt);
ns->root = mnt;
- ns->mounts = 1;
- list_add(&mnt->mnt_list, &ns->list);
- mntget(newmount.mnt);
+ ns->nr_mounts = 1;
+ mnt_add_to_ns(ns, mnt);
+ mntget(new_path.mnt);
+
+ FD_PREPARE(fdf, (flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0,
+ dentry_open(&new_path, O_PATH, fc->cred));
+ if (fdf.err) {
+ dissolve_on_fput(new_path.mnt);
+ return fdf.err;
+ }
- /* Attach to an apparent O_PATH fd with a note that we need to unmount
- * it, not just simply put it.
+ /*
+ * Attach to an apparent O_PATH fd with a note that we
+ * need to unmount it, not just simply put it.
*/
- file = dentry_open(&newmount, O_PATH, fc->cred);
- if (IS_ERR(file)) {
- dissolve_on_fput(newmount.mnt);
- ret = PTR_ERR(file);
- goto err_path;
- }
- file->f_mode |= FMODE_NEED_UNMOUNT;
+ fd_prepare_file(fdf)->f_mode |= FMODE_NEED_UNMOUNT;
+ return fd_publish(fdf);
+}
- ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0);
- if (ret >= 0)
- fd_install(ret, file);
- else
- fput(file);
-
-err_path:
- path_put(&newmount);
-err_unlock:
- mutex_unlock(&fc->uapi_mutex);
-err_fsfd:
- fdput(f);
- return ret;
+static inline int vfs_move_mount(const struct path *from_path,
+ const struct path *to_path,
+ enum mnt_tree_flags_t mflags)
+{
+ int ret;
+
+ ret = security_move_mount(from_path, to_path);
+ if (ret)
+ return ret;
+
+ if (mflags & MNT_TREE_PROPAGATION)
+ return do_set_group(from_path, to_path);
+
+ return do_move_mount(from_path, to_path, mflags);
}
/*
@@ -3715,8 +4401,12 @@ SYSCALL_DEFINE5(move_mount,
int, to_dfd, const char __user *, to_pathname,
unsigned int, flags)
{
- struct path from_path, to_path;
- unsigned int lflags;
+ struct path to_path __free(path_put) = {};
+ struct path from_path __free(path_put) = {};
+ struct filename *to_name __free(putname) = NULL;
+ struct filename *from_name __free(putname) = NULL;
+ unsigned int lflags, uflags;
+ enum mnt_tree_flags_t mflags = 0;
int ret = 0;
if (!may_mount())
@@ -3725,48 +4415,71 @@ SYSCALL_DEFINE5(move_mount,
if (flags & ~MOVE_MOUNT__MASK)
return -EINVAL;
- /* If someone gives a pathname, they aren't permitted to move
- * from an fd that requires unmount as we can't get at the flag
- * to clear it afterwards.
- */
- lflags = 0;
- if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW;
- if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
- if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
+ if ((flags & (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) ==
+ (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP))
+ return -EINVAL;
- ret = user_path_at(from_dfd, from_pathname, lflags, &from_path);
- if (ret < 0)
- return ret;
+ if (flags & MOVE_MOUNT_SET_GROUP) mflags |= MNT_TREE_PROPAGATION;
+ if (flags & MOVE_MOUNT_BENEATH) mflags |= MNT_TREE_BENEATH;
- lflags = 0;
- if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW;
- if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
- if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
+ uflags = 0;
+ if (flags & MOVE_MOUNT_T_EMPTY_PATH)
+ uflags = AT_EMPTY_PATH;
- ret = user_path_at(to_dfd, to_pathname, lflags, &to_path);
- if (ret < 0)
- goto out_from;
+ to_name = getname_maybe_null(to_pathname, uflags);
+ if (IS_ERR(to_name))
+ return PTR_ERR(to_name);
- ret = security_move_mount(&from_path, &to_path);
- if (ret < 0)
- goto out_to;
+ if (!to_name && to_dfd >= 0) {
+ CLASS(fd_raw, f_to)(to_dfd);
+ if (fd_empty(f_to))
+ return -EBADF;
- if (flags & MOVE_MOUNT_SET_GROUP)
- ret = do_set_group(&from_path, &to_path);
- else
- ret = do_move_mount(&from_path, &to_path);
+ to_path = fd_file(f_to)->f_path;
+ path_get(&to_path);
+ } else {
+ lflags = 0;
+ if (flags & MOVE_MOUNT_T_SYMLINKS)
+ lflags |= LOOKUP_FOLLOW;
+ if (flags & MOVE_MOUNT_T_AUTOMOUNTS)
+ lflags |= LOOKUP_AUTOMOUNT;
+ ret = filename_lookup(to_dfd, to_name, lflags, &to_path, NULL);
+ if (ret)
+ return ret;
+ }
-out_to:
- path_put(&to_path);
-out_from:
- path_put(&from_path);
- return ret;
+ uflags = 0;
+ if (flags & MOVE_MOUNT_F_EMPTY_PATH)
+ uflags = AT_EMPTY_PATH;
+
+ from_name = getname_maybe_null(from_pathname, uflags);
+ if (IS_ERR(from_name))
+ return PTR_ERR(from_name);
+
+ if (!from_name && from_dfd >= 0) {
+ CLASS(fd_raw, f_from)(from_dfd);
+ if (fd_empty(f_from))
+ return -EBADF;
+
+ return vfs_move_mount(&fd_file(f_from)->f_path, &to_path, mflags);
+ }
+
+ lflags = 0;
+ if (flags & MOVE_MOUNT_F_SYMLINKS)
+ lflags |= LOOKUP_FOLLOW;
+ if (flags & MOVE_MOUNT_F_AUTOMOUNTS)
+ lflags |= LOOKUP_AUTOMOUNT;
+ ret = filename_lookup(from_dfd, from_name, lflags, &from_path, NULL);
+ if (ret)
+ return ret;
+
+ return vfs_move_mount(&from_path, &to_path, mflags);
}
/*
* Return true if path is reachable from root
*
- * namespace_sem or mount_lock is held
+ * locks: mount_locked_reader || namespace_shared && is_mounted(mnt)
*/
bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
const struct path *root)
@@ -3780,11 +4493,8 @@ bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
bool path_is_under(const struct path *path1, const struct path *path2)
{
- bool res;
- read_seqlock_excl(&mount_lock);
- res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
- read_sequnlock_excl(&mount_lock);
- return res;
+ guard(mount_locked_reader)();
+ return is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
}
EXPORT_SYMBOL(path_is_under);
@@ -3816,9 +4526,10 @@ EXPORT_SYMBOL(path_is_under);
SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
const char __user *, put_old)
{
- struct path new, old, root;
+ struct path new __free(path_put) = {};
+ struct path old __free(path_put) = {};
+ struct path root __free(path_put) = {};
struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
- struct mountpoint *old_mp, *root_mp;
int error;
if (!may_mount())
@@ -3827,89 +4538,73 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
error = user_path_at(AT_FDCWD, new_root,
LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
if (error)
- goto out0;
+ return error;
error = user_path_at(AT_FDCWD, put_old,
LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
if (error)
- goto out1;
+ return error;
error = security_sb_pivotroot(&old, &new);
if (error)
- goto out2;
+ return error;
get_fs_root(current->fs, &root);
- old_mp = lock_mount(&old);
- error = PTR_ERR(old_mp);
- if (IS_ERR(old_mp))
- goto out3;
- error = -EINVAL;
+ LOCK_MOUNT(old_mp, &old);
+ old_mnt = old_mp.parent;
+ if (IS_ERR(old_mnt))
+ return PTR_ERR(old_mnt);
+
new_mnt = real_mount(new.mnt);
root_mnt = real_mount(root.mnt);
- old_mnt = real_mount(old.mnt);
ex_parent = new_mnt->mnt_parent;
root_parent = root_mnt->mnt_parent;
if (IS_MNT_SHARED(old_mnt) ||
IS_MNT_SHARED(ex_parent) ||
IS_MNT_SHARED(root_parent))
- goto out4;
+ return -EINVAL;
if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
- goto out4;
+ return -EINVAL;
if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
- goto out4;
- error = -ENOENT;
+ return -EINVAL;
if (d_unlinked(new.dentry))
- goto out4;
- error = -EBUSY;
+ return -ENOENT;
if (new_mnt == root_mnt || old_mnt == root_mnt)
- goto out4; /* loop, on the same file system */
- error = -EINVAL;
- if (root.mnt->mnt_root != root.dentry)
- goto out4; /* not a mountpoint */
+ return -EBUSY; /* loop, on the same file system */
+ if (!path_mounted(&root))
+ return -EINVAL; /* not a mountpoint */
if (!mnt_has_parent(root_mnt))
- goto out4; /* not attached */
- if (new.mnt->mnt_root != new.dentry)
- goto out4; /* not a mountpoint */
+ return -EINVAL; /* absolute root */
+ if (!path_mounted(&new))
+ return -EINVAL; /* not a mountpoint */
if (!mnt_has_parent(new_mnt))
- goto out4; /* not attached */
+ return -EINVAL; /* absolute root */
/* make sure we can reach put_old from new_root */
- if (!is_path_reachable(old_mnt, old.dentry, &new))
- goto out4;
+ if (!is_path_reachable(old_mnt, old_mp.mp->m_dentry, &new))
+ return -EINVAL;
/* make certain new is below the root */
if (!is_path_reachable(new_mnt, new.dentry, &root))
- goto out4;
+ return -EINVAL;
lock_mount_hash();
umount_mnt(new_mnt);
- root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */
if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
new_mnt->mnt.mnt_flags |= MNT_LOCKED;
root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
}
- /* mount old root on put_old */
- attach_mnt(root_mnt, old_mnt, old_mp);
/* mount new_root on / */
- attach_mnt(new_mnt, root_parent, root_mp);
- mnt_add_count(root_parent, -1);
+ attach_mnt(new_mnt, root_parent, root_mnt->mnt_mp);
+ umount_mnt(root_mnt);
+ /* mount old root on put_old */
+ attach_mnt(root_mnt, old_mnt, old_mp.mp);
touch_mnt_namespace(current->nsproxy->mnt_ns);
/* A moved mount should not expire automatically */
list_del_init(&new_mnt->mnt_expire);
- put_mountpoint(root_mp);
unlock_mount_hash();
+ mnt_notify_add(root_mnt);
+ mnt_notify_add(new_mnt);
chroot_fs_refs(&root, &new);
- error = 0;
-out4:
- unlock_mount(old_mp);
- if (!error)
- mntput_no_expire(ex_parent);
-out3:
- path_put(&root);
-out2:
- path_put(&old);
-out1:
- path_put(&new);
-out0:
- return error;
+ return 0;
}
static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
@@ -3927,28 +4622,35 @@ static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
{
struct vfsmount *m = &mnt->mnt;
+ struct user_namespace *fs_userns = m->mnt_sb->s_user_ns;
- if (!kattr->mnt_userns)
+ if (!kattr->mnt_idmap)
return 0;
/*
- * Once a mount has been idmapped we don't allow it to change its
- * mapping. It makes things simpler and callers can just create
- * another bind-mount they can idmap if they want to.
+ * Creating an idmapped mount with the filesystem wide idmapping
+ * doesn't make sense so block that. We don't allow mushy semantics.
+ */
+ if (kattr->mnt_userns == m->mnt_sb->s_user_ns)
+ return -EINVAL;
+
+ /*
+ * We only allow an mount to change it's idmapping if it has
+ * never been accessible to userspace.
*/
- if (mnt_user_ns(m) != &init_user_ns)
+ if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE) && is_idmapped_mnt(m))
return -EPERM;
/* The underlying filesystem doesn't support idmapped mounts yet. */
if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
return -EINVAL;
- /* Don't yet support filesystem mountable in user namespaces. */
- if (m->mnt_sb->s_user_ns != &init_user_ns)
+ /* The filesystem has turned off idmapped mounts. */
+ if (m->mnt_sb->s_iflags & SB_I_NOIDMAP)
return -EINVAL;
/* We're not controlling the superblock. */
- if (!capable(CAP_SYS_ADMIN))
+ if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
return -EPERM;
/* Mount has already been visible in the filesystem hierarchy. */
@@ -3958,107 +4660,111 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
return 0;
}
-static struct mount *mount_setattr_prepare(struct mount_kattr *kattr,
- struct mount *mnt, int *err)
+/**
+ * mnt_allow_writers() - check whether the attribute change allows writers
+ * @kattr: the new mount attributes
+ * @mnt: the mount to which @kattr will be applied
+ *
+ * Check whether thew new mount attributes in @kattr allow concurrent writers.
+ *
+ * Return: true if writers need to be held, false if not
+ */
+static inline bool mnt_allow_writers(const struct mount_kattr *kattr,
+ const struct mount *mnt)
{
- struct mount *m = mnt, *last = NULL;
-
- if (!is_mounted(&m->mnt)) {
- *err = -EINVAL;
- goto out;
- }
-
- if (!(mnt_has_parent(m) ? check_mnt(m) : is_anon_ns(m->mnt_ns))) {
- *err = -EINVAL;
- goto out;
- }
+ return (!(kattr->attr_set & MNT_READONLY) ||
+ (mnt->mnt.mnt_flags & MNT_READONLY)) &&
+ !kattr->mnt_idmap;
+}
- do {
- unsigned int flags;
+static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
+{
+ struct mount *m;
+ int err;
- flags = recalc_flags(kattr, m);
- if (!can_change_locked_flags(m, flags)) {
- *err = -EPERM;
- goto out;
+ for (m = mnt; m; m = next_mnt(m, mnt)) {
+ if (!can_change_locked_flags(m, recalc_flags(kattr, m))) {
+ err = -EPERM;
+ break;
}
- *err = can_idmap_mount(kattr, m);
- if (*err)
- goto out;
-
- last = m;
+ err = can_idmap_mount(kattr, m);
+ if (err)
+ break;
- if ((kattr->attr_set & MNT_READONLY) &&
- !(m->mnt.mnt_flags & MNT_READONLY)) {
- *err = mnt_hold_writers(m);
- if (*err)
- goto out;
+ if (!mnt_allow_writers(kattr, m)) {
+ err = mnt_hold_writers(m);
+ if (err) {
+ m = next_mnt(m, mnt);
+ break;
+ }
}
- } while (kattr->recurse && (m = next_mnt(m, mnt)));
-out:
- return last;
+ if (!(kattr->kflags & MOUNT_KATTR_RECURSE))
+ return 0;
+ }
+
+ if (err) {
+ /* undo all mnt_hold_writers() we'd done */
+ for (struct mount *p = mnt; p != m; p = next_mnt(p, mnt))
+ mnt_unhold_writers(p);
+ }
+ return err;
}
static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
{
- struct user_namespace *mnt_userns;
+ struct mnt_idmap *old_idmap;
- if (!kattr->mnt_userns)
+ if (!kattr->mnt_idmap)
return;
- mnt_userns = get_user_ns(kattr->mnt_userns);
- /* Pairs with smp_load_acquire() in mnt_user_ns(). */
- smp_store_release(&mnt->mnt.mnt_userns, mnt_userns);
+ old_idmap = mnt_idmap(&mnt->mnt);
+
+ /* Pairs with smp_load_acquire() in mnt_idmap(). */
+ smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap));
+ mnt_idmap_put(old_idmap);
}
-static void mount_setattr_commit(struct mount_kattr *kattr,
- struct mount *mnt, struct mount *last,
- int err)
+static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
{
- struct mount *m = mnt;
+ struct mount *m;
- do {
- if (!err) {
- unsigned int flags;
+ for (m = mnt; m; m = next_mnt(m, mnt)) {
+ unsigned int flags;
- do_idmap_mount(kattr, m);
- flags = recalc_flags(kattr, m);
- WRITE_ONCE(m->mnt.mnt_flags, flags);
- }
+ do_idmap_mount(kattr, m);
+ flags = recalc_flags(kattr, m);
+ WRITE_ONCE(m->mnt.mnt_flags, flags);
- /*
- * We either set MNT_READONLY above so make it visible
- * before ~MNT_WRITE_HOLD or we failed to recursively
- * apply mount options.
- */
- if ((kattr->attr_set & MNT_READONLY) &&
- (m->mnt.mnt_flags & MNT_WRITE_HOLD))
- mnt_unhold_writers(m);
+ /* If we had to hold writers unblock them. */
+ mnt_unhold_writers(m);
- if (!err && kattr->propagation)
+ if (kattr->propagation)
change_mnt_propagation(m, kattr->propagation);
-
- /*
- * On failure, only cleanup until we found the first mount
- * we failed to handle.
- */
- if (err && m == last)
+ if (!(kattr->kflags & MOUNT_KATTR_RECURSE))
break;
- } while (kattr->recurse && (m = next_mnt(m, mnt)));
-
- if (!err)
- touch_mnt_namespace(mnt->mnt_ns);
+ }
+ touch_mnt_namespace(mnt->mnt_ns);
}
-static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
+static int do_mount_setattr(const struct path *path, struct mount_kattr *kattr)
{
- struct mount *mnt = real_mount(path->mnt), *last = NULL;
+ struct mount *mnt = real_mount(path->mnt);
int err = 0;
- if (path->dentry != mnt->mnt.mnt_root)
+ if (!path_mounted(path))
return -EINVAL;
+ if (kattr->mnt_userns) {
+ struct mnt_idmap *mnt_idmap;
+
+ mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns);
+ if (IS_ERR(mnt_idmap))
+ return PTR_ERR(mnt_idmap);
+ kattr->mnt_idmap = mnt_idmap;
+ }
+
if (kattr->propagation) {
/*
* Only take namespace_lock() if we're actually changing
@@ -4066,7 +4772,7 @@ static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
*/
namespace_lock();
if (kattr->propagation == MS_SHARED) {
- err = invent_group_ids(mnt, kattr->recurse);
+ err = invent_group_ids(mnt, kattr->kflags & MOUNT_KATTR_RECURSE);
if (err) {
namespace_unlock();
return err;
@@ -4074,99 +4780,97 @@ static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
}
}
+ err = -EINVAL;
lock_mount_hash();
+ if (!anon_ns_root(mnt) && !check_mnt(mnt))
+ goto out;
+
/*
- * Get the mount tree in a shape where we can change mount
- * properties without failure.
+ * First, we get the mount tree in a shape where we can change mount
+ * properties without failure. If we succeeded to do so we commit all
+ * changes and if we failed we clean up.
*/
- last = mount_setattr_prepare(kattr, mnt, &err);
- if (last) /* Commit all changes or revert to the old state. */
- mount_setattr_commit(kattr, mnt, last, err);
+ err = mount_setattr_prepare(kattr, mnt);
+ if (!err)
+ mount_setattr_commit(kattr, mnt);
+out:
unlock_mount_hash();
if (kattr->propagation) {
- namespace_unlock();
if (err)
cleanup_group_ids(mnt, NULL);
+ namespace_unlock();
}
return err;
}
static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
- struct mount_kattr *kattr, unsigned int flags)
+ struct mount_kattr *kattr)
{
- int err = 0;
struct ns_common *ns;
struct user_namespace *mnt_userns;
- struct file *file;
if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
return 0;
- /*
- * We currently do not support clearing an idmapped mount. If this ever
- * is a use-case we can revisit this but for now let's keep it simple
- * and not allow it.
- */
- if (attr->attr_clr & MOUNT_ATTR_IDMAP)
- return -EINVAL;
+ if (attr->attr_clr & MOUNT_ATTR_IDMAP) {
+ /*
+ * We can only remove an idmapping if it's never been
+ * exposed to userspace.
+ */
+ if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE))
+ return -EINVAL;
+
+ /*
+ * Removal of idmappings is equivalent to setting
+ * nop_mnt_idmap.
+ */
+ if (!(attr->attr_set & MOUNT_ATTR_IDMAP)) {
+ kattr->mnt_idmap = &nop_mnt_idmap;
+ return 0;
+ }
+ }
if (attr->userns_fd > INT_MAX)
return -EINVAL;
- file = fget(attr->userns_fd);
- if (!file)
+ CLASS(fd, f)(attr->userns_fd);
+ if (fd_empty(f))
return -EBADF;
- if (!proc_ns_file(file)) {
- err = -EINVAL;
- goto out_fput;
- }
+ if (!proc_ns_file(fd_file(f)))
+ return -EINVAL;
- ns = get_proc_ns(file_inode(file));
- if (ns->ops->type != CLONE_NEWUSER) {
- err = -EINVAL;
- goto out_fput;
- }
+ ns = get_proc_ns(file_inode(fd_file(f)));
+ if (ns->ns_type != CLONE_NEWUSER)
+ return -EINVAL;
/*
- * The init_user_ns is used to indicate that a vfsmount is not idmapped.
- * This is simpler than just having to treat NULL as unmapped. Users
- * wanting to idmap a mount to init_user_ns can just use a namespace
- * with an identity mapping.
+ * The initial idmapping cannot be used to create an idmapped
+ * mount. We use the initial idmapping as an indicator of a mount
+ * that is not idmapped. It can simply be passed into helpers that
+ * are aware of idmapped mounts as a convenient shortcut. A user
+ * can just create a dedicated identity mapping to achieve the same
+ * result.
*/
mnt_userns = container_of(ns, struct user_namespace, ns);
- if (mnt_userns == &init_user_ns) {
- err = -EPERM;
- goto out_fput;
- }
- kattr->mnt_userns = get_user_ns(mnt_userns);
+ if (mnt_userns == &init_user_ns)
+ return -EPERM;
-out_fput:
- fput(file);
- return err;
+ /* We're not controlling the target namespace. */
+ if (!ns_capable(mnt_userns, CAP_SYS_ADMIN))
+ return -EPERM;
+
+ kattr->mnt_userns = get_user_ns(mnt_userns);
+ return 0;
}
static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
- struct mount_kattr *kattr, unsigned int flags)
+ struct mount_kattr *kattr)
{
- unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
-
- if (flags & AT_NO_AUTOMOUNT)
- lookup_flags &= ~LOOKUP_AUTOMOUNT;
- if (flags & AT_SYMLINK_NOFOLLOW)
- lookup_flags &= ~LOOKUP_FOLLOW;
- if (flags & AT_EMPTY_PATH)
- lookup_flags |= LOOKUP_EMPTY;
-
- *kattr = (struct mount_kattr) {
- .lookup_flags = lookup_flags,
- .recurse = !!(flags & AT_RECURSIVE),
- };
-
if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
return -EINVAL;
if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
@@ -4214,13 +4918,51 @@ static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
return -EINVAL;
}
- return build_mount_idmapped(attr, usize, kattr, flags);
+ return build_mount_idmapped(attr, usize, kattr);
}
static void finish_mount_kattr(struct mount_kattr *kattr)
{
- put_user_ns(kattr->mnt_userns);
- kattr->mnt_userns = NULL;
+ if (kattr->mnt_userns) {
+ put_user_ns(kattr->mnt_userns);
+ kattr->mnt_userns = NULL;
+ }
+
+ if (kattr->mnt_idmap)
+ mnt_idmap_put(kattr->mnt_idmap);
+}
+
+static int wants_mount_setattr(struct mount_attr __user *uattr, size_t usize,
+ struct mount_kattr *kattr)
+{
+ int ret;
+ struct mount_attr attr;
+
+ BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
+
+ if (unlikely(usize > PAGE_SIZE))
+ return -E2BIG;
+ if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
+ return -EINVAL;
+
+ if (!may_mount())
+ return -EPERM;
+
+ ret = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
+ if (ret)
+ return ret;
+
+ /* Don't bother walking through the mounts if this is a nop. */
+ if (attr.attr_set == 0 &&
+ attr.attr_clr == 0 &&
+ attr.propagation == 0)
+ return 0; /* Tell caller to not bother. */
+
+ ret = build_mount_kattr(&attr, usize, kattr);
+ if (ret < 0)
+ return ret;
+
+ return 1;
}
SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
@@ -4229,10 +4971,8 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
{
int err;
struct path target;
- struct mount_attr attr;
struct mount_kattr kattr;
-
- BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
+ unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
if (flags & ~(AT_EMPTY_PATH |
AT_RECURSIVE |
@@ -4240,66 +4980,1017 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
AT_NO_AUTOMOUNT))
return -EINVAL;
- if (unlikely(usize > PAGE_SIZE))
- return -E2BIG;
- if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
+ if (flags & AT_NO_AUTOMOUNT)
+ lookup_flags &= ~LOOKUP_AUTOMOUNT;
+ if (flags & AT_SYMLINK_NOFOLLOW)
+ lookup_flags &= ~LOOKUP_FOLLOW;
+ if (flags & AT_EMPTY_PATH)
+ lookup_flags |= LOOKUP_EMPTY;
+
+ kattr = (struct mount_kattr) {
+ .lookup_flags = lookup_flags,
+ };
+
+ if (flags & AT_RECURSIVE)
+ kattr.kflags |= MOUNT_KATTR_RECURSE;
+
+ err = wants_mount_setattr(uattr, usize, &kattr);
+ if (err <= 0)
+ return err;
+
+ err = user_path_at(dfd, path, kattr.lookup_flags, &target);
+ if (!err) {
+ err = do_mount_setattr(&target, &kattr);
+ path_put(&target);
+ }
+ finish_mount_kattr(&kattr);
+ return err;
+}
+
+SYSCALL_DEFINE5(open_tree_attr, int, dfd, const char __user *, filename,
+ unsigned, flags, struct mount_attr __user *, uattr,
+ size_t, usize)
+{
+ if (!uattr && usize)
return -EINVAL;
- if (!may_mount())
- return -EPERM;
+ FD_PREPARE(fdf, flags, vfs_open_tree(dfd, filename, flags));
+ if (fdf.err)
+ return fdf.err;
+
+ if (uattr) {
+ struct mount_kattr kattr = {};
+ struct file *file = fd_prepare_file(fdf);
+ int ret;
+
+ if (flags & OPEN_TREE_CLONE)
+ kattr.kflags = MOUNT_KATTR_IDMAP_REPLACE;
+ if (flags & AT_RECURSIVE)
+ kattr.kflags |= MOUNT_KATTR_RECURSE;
+
+ ret = wants_mount_setattr(uattr, usize, &kattr);
+ if (ret > 0) {
+ ret = do_mount_setattr(&file->f_path, &kattr);
+ finish_mount_kattr(&kattr);
+ }
+ if (ret)
+ return ret;
+ }
+
+ return fd_publish(fdf);
+}
+
+int show_path(struct seq_file *m, struct dentry *root)
+{
+ if (root->d_sb->s_op->show_path)
+ return root->d_sb->s_op->show_path(m, root);
+
+ seq_dentry(m, root, " \t\n\\");
+ return 0;
+}
+
+static struct vfsmount *lookup_mnt_in_ns(u64 id, struct mnt_namespace *ns)
+{
+ struct mount *mnt = mnt_find_id_at(ns, id);
+
+ if (!mnt || mnt->mnt_id_unique != id)
+ return NULL;
+
+ return &mnt->mnt;
+}
+
+struct kstatmount {
+ struct statmount __user *buf;
+ size_t bufsize;
+ struct vfsmount *mnt;
+ struct mnt_idmap *idmap;
+ u64 mask;
+ struct path root;
+ struct seq_file seq;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct statmount sm;
+};
+
+static u64 mnt_to_attr_flags(struct vfsmount *mnt)
+{
+ unsigned int mnt_flags = READ_ONCE(mnt->mnt_flags);
+ u64 attr_flags = 0;
+
+ if (mnt_flags & MNT_READONLY)
+ attr_flags |= MOUNT_ATTR_RDONLY;
+ if (mnt_flags & MNT_NOSUID)
+ attr_flags |= MOUNT_ATTR_NOSUID;
+ if (mnt_flags & MNT_NODEV)
+ attr_flags |= MOUNT_ATTR_NODEV;
+ if (mnt_flags & MNT_NOEXEC)
+ attr_flags |= MOUNT_ATTR_NOEXEC;
+ if (mnt_flags & MNT_NODIRATIME)
+ attr_flags |= MOUNT_ATTR_NODIRATIME;
+ if (mnt_flags & MNT_NOSYMFOLLOW)
+ attr_flags |= MOUNT_ATTR_NOSYMFOLLOW;
+
+ if (mnt_flags & MNT_NOATIME)
+ attr_flags |= MOUNT_ATTR_NOATIME;
+ else if (mnt_flags & MNT_RELATIME)
+ attr_flags |= MOUNT_ATTR_RELATIME;
+ else
+ attr_flags |= MOUNT_ATTR_STRICTATIME;
+
+ if (is_idmapped_mnt(mnt))
+ attr_flags |= MOUNT_ATTR_IDMAP;
+
+ return attr_flags;
+}
+
+static u64 mnt_to_propagation_flags(struct mount *m)
+{
+ u64 propagation = 0;
+
+ if (IS_MNT_SHARED(m))
+ propagation |= MS_SHARED;
+ if (IS_MNT_SLAVE(m))
+ propagation |= MS_SLAVE;
+ if (IS_MNT_UNBINDABLE(m))
+ propagation |= MS_UNBINDABLE;
+ if (!propagation)
+ propagation |= MS_PRIVATE;
+
+ return propagation;
+}
+
+u64 vfsmount_to_propagation_flags(struct vfsmount *mnt)
+{
+ return mnt_to_propagation_flags(real_mount(mnt));
+}
+EXPORT_SYMBOL_GPL(vfsmount_to_propagation_flags);
+
+static void statmount_sb_basic(struct kstatmount *s)
+{
+ struct super_block *sb = s->mnt->mnt_sb;
+
+ s->sm.mask |= STATMOUNT_SB_BASIC;
+ s->sm.sb_dev_major = MAJOR(sb->s_dev);
+ s->sm.sb_dev_minor = MINOR(sb->s_dev);
+ s->sm.sb_magic = sb->s_magic;
+ s->sm.sb_flags = sb->s_flags & (SB_RDONLY|SB_SYNCHRONOUS|SB_DIRSYNC|SB_LAZYTIME);
+}
+
+static void statmount_mnt_basic(struct kstatmount *s)
+{
+ struct mount *m = real_mount(s->mnt);
+
+ s->sm.mask |= STATMOUNT_MNT_BASIC;
+ s->sm.mnt_id = m->mnt_id_unique;
+ s->sm.mnt_parent_id = m->mnt_parent->mnt_id_unique;
+ s->sm.mnt_id_old = m->mnt_id;
+ s->sm.mnt_parent_id_old = m->mnt_parent->mnt_id;
+ s->sm.mnt_attr = mnt_to_attr_flags(&m->mnt);
+ s->sm.mnt_propagation = mnt_to_propagation_flags(m);
+ s->sm.mnt_peer_group = m->mnt_group_id;
+ s->sm.mnt_master = IS_MNT_SLAVE(m) ? m->mnt_master->mnt_group_id : 0;
+}
+
+static void statmount_propagate_from(struct kstatmount *s)
+{
+ struct mount *m = real_mount(s->mnt);
+
+ s->sm.mask |= STATMOUNT_PROPAGATE_FROM;
+ if (IS_MNT_SLAVE(m))
+ s->sm.propagate_from = get_dominating_id(m, &current->fs->root);
+}
+
+static int statmount_mnt_root(struct kstatmount *s, struct seq_file *seq)
+{
+ int ret;
+ size_t start = seq->count;
+
+ ret = show_path(seq, s->mnt->mnt_root);
+ if (ret)
+ return ret;
+
+ if (unlikely(seq_has_overflowed(seq)))
+ return -EAGAIN;
+
+ /*
+ * Unescape the result. It would be better if supplied string was not
+ * escaped in the first place, but that's a pretty invasive change.
+ */
+ seq->buf[seq->count] = '\0';
+ seq->count = start;
+ seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL));
+ return 0;
+}
+
+static int statmount_mnt_point(struct kstatmount *s, struct seq_file *seq)
+{
+ struct vfsmount *mnt = s->mnt;
+ struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+ int err;
+
+ err = seq_path_root(seq, &mnt_path, &s->root, "");
+ return err == SEQ_SKIP ? 0 : err;
+}
+
+static int statmount_fs_type(struct kstatmount *s, struct seq_file *seq)
+{
+ struct super_block *sb = s->mnt->mnt_sb;
+
+ seq_puts(seq, sb->s_type->name);
+ return 0;
+}
+
+static void statmount_fs_subtype(struct kstatmount *s, struct seq_file *seq)
+{
+ struct super_block *sb = s->mnt->mnt_sb;
+
+ if (sb->s_subtype)
+ seq_puts(seq, sb->s_subtype);
+}
+
+static int statmount_sb_source(struct kstatmount *s, struct seq_file *seq)
+{
+ struct super_block *sb = s->mnt->mnt_sb;
+ struct mount *r = real_mount(s->mnt);
+
+ if (sb->s_op->show_devname) {
+ size_t start = seq->count;
+ int ret;
+
+ ret = sb->s_op->show_devname(seq, s->mnt->mnt_root);
+ if (ret)
+ return ret;
+
+ if (unlikely(seq_has_overflowed(seq)))
+ return -EAGAIN;
+
+ /* Unescape the result */
+ seq->buf[seq->count] = '\0';
+ seq->count = start;
+ seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL));
+ } else {
+ seq_puts(seq, r->mnt_devname);
+ }
+ return 0;
+}
+
+static void statmount_mnt_ns_id(struct kstatmount *s, struct mnt_namespace *ns)
+{
+ s->sm.mask |= STATMOUNT_MNT_NS_ID;
+ s->sm.mnt_ns_id = ns->ns.ns_id;
+}
- err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
+static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq)
+{
+ struct vfsmount *mnt = s->mnt;
+ struct super_block *sb = mnt->mnt_sb;
+ size_t start = seq->count;
+ int err;
+
+ err = security_sb_show_options(seq, sb);
if (err)
return err;
- /* Don't bother walking through the mounts if this is a nop. */
- if (attr.attr_set == 0 &&
- attr.attr_clr == 0 &&
- attr.propagation == 0)
+ if (sb->s_op->show_options) {
+ err = sb->s_op->show_options(seq, mnt->mnt_root);
+ if (err)
+ return err;
+ }
+
+ if (unlikely(seq_has_overflowed(seq)))
+ return -EAGAIN;
+
+ if (seq->count == start)
+ return 0;
+
+ /* skip leading comma */
+ memmove(seq->buf + start, seq->buf + start + 1,
+ seq->count - start - 1);
+ seq->count--;
+
+ return 0;
+}
+
+static inline int statmount_opt_process(struct seq_file *seq, size_t start)
+{
+ char *buf_end, *opt_end, *src, *dst;
+ int count = 0;
+
+ if (unlikely(seq_has_overflowed(seq)))
+ return -EAGAIN;
+
+ buf_end = seq->buf + seq->count;
+ dst = seq->buf + start;
+ src = dst + 1; /* skip initial comma */
+
+ if (src >= buf_end) {
+ seq->count = start;
+ return 0;
+ }
+
+ *buf_end = '\0';
+ for (; src < buf_end; src = opt_end + 1) {
+ opt_end = strchrnul(src, ',');
+ *opt_end = '\0';
+ dst += string_unescape(src, dst, 0, UNESCAPE_OCTAL) + 1;
+ if (WARN_ON_ONCE(++count == INT_MAX))
+ return -EOVERFLOW;
+ }
+ seq->count = dst - 1 - seq->buf;
+ return count;
+}
+
+static int statmount_opt_array(struct kstatmount *s, struct seq_file *seq)
+{
+ struct vfsmount *mnt = s->mnt;
+ struct super_block *sb = mnt->mnt_sb;
+ size_t start = seq->count;
+ int err;
+
+ if (!sb->s_op->show_options)
return 0;
- err = build_mount_kattr(&attr, usize, &kattr, flags);
+ err = sb->s_op->show_options(seq, mnt->mnt_root);
if (err)
return err;
- err = user_path_at(dfd, path, kattr.lookup_flags, &target);
+ err = statmount_opt_process(seq, start);
+ if (err < 0)
+ return err;
+
+ s->sm.opt_num = err;
+ return 0;
+}
+
+static int statmount_opt_sec_array(struct kstatmount *s, struct seq_file *seq)
+{
+ struct vfsmount *mnt = s->mnt;
+ struct super_block *sb = mnt->mnt_sb;
+ size_t start = seq->count;
+ int err;
+
+ err = security_sb_show_options(seq, sb);
if (err)
return err;
- err = do_mount_setattr(&target, &kattr);
- finish_mount_kattr(&kattr);
- path_put(&target);
- return err;
+ err = statmount_opt_process(seq, start);
+ if (err < 0)
+ return err;
+
+ s->sm.opt_sec_num = err;
+ return 0;
+}
+
+static inline int statmount_mnt_uidmap(struct kstatmount *s, struct seq_file *seq)
+{
+ int ret;
+
+ ret = statmount_mnt_idmap(s->idmap, seq, true);
+ if (ret < 0)
+ return ret;
+
+ s->sm.mnt_uidmap_num = ret;
+ /*
+ * Always raise STATMOUNT_MNT_UIDMAP even if there are no valid
+ * mappings. This allows userspace to distinguish between a
+ * non-idmapped mount and an idmapped mount where none of the
+ * individual mappings are valid in the caller's idmapping.
+ */
+ if (is_valid_mnt_idmap(s->idmap))
+ s->sm.mask |= STATMOUNT_MNT_UIDMAP;
+ return 0;
+}
+
+static inline int statmount_mnt_gidmap(struct kstatmount *s, struct seq_file *seq)
+{
+ int ret;
+
+ ret = statmount_mnt_idmap(s->idmap, seq, false);
+ if (ret < 0)
+ return ret;
+
+ s->sm.mnt_gidmap_num = ret;
+ /*
+ * Always raise STATMOUNT_MNT_GIDMAP even if there are no valid
+ * mappings. This allows userspace to distinguish between a
+ * non-idmapped mount and an idmapped mount where none of the
+ * individual mappings are valid in the caller's idmapping.
+ */
+ if (is_valid_mnt_idmap(s->idmap))
+ s->sm.mask |= STATMOUNT_MNT_GIDMAP;
+ return 0;
+}
+
+static int statmount_string(struct kstatmount *s, u64 flag)
+{
+ int ret = 0;
+ size_t kbufsize;
+ struct seq_file *seq = &s->seq;
+ struct statmount *sm = &s->sm;
+ u32 start, *offp;
+
+ /* Reserve an empty string at the beginning for any unset offsets */
+ if (!seq->count)
+ seq_putc(seq, 0);
+
+ start = seq->count;
+
+ switch (flag) {
+ case STATMOUNT_FS_TYPE:
+ offp = &sm->fs_type;
+ ret = statmount_fs_type(s, seq);
+ break;
+ case STATMOUNT_MNT_ROOT:
+ offp = &sm->mnt_root;
+ ret = statmount_mnt_root(s, seq);
+ break;
+ case STATMOUNT_MNT_POINT:
+ offp = &sm->mnt_point;
+ ret = statmount_mnt_point(s, seq);
+ break;
+ case STATMOUNT_MNT_OPTS:
+ offp = &sm->mnt_opts;
+ ret = statmount_mnt_opts(s, seq);
+ break;
+ case STATMOUNT_OPT_ARRAY:
+ offp = &sm->opt_array;
+ ret = statmount_opt_array(s, seq);
+ break;
+ case STATMOUNT_OPT_SEC_ARRAY:
+ offp = &sm->opt_sec_array;
+ ret = statmount_opt_sec_array(s, seq);
+ break;
+ case STATMOUNT_FS_SUBTYPE:
+ offp = &sm->fs_subtype;
+ statmount_fs_subtype(s, seq);
+ break;
+ case STATMOUNT_SB_SOURCE:
+ offp = &sm->sb_source;
+ ret = statmount_sb_source(s, seq);
+ break;
+ case STATMOUNT_MNT_UIDMAP:
+ offp = &sm->mnt_uidmap;
+ ret = statmount_mnt_uidmap(s, seq);
+ break;
+ case STATMOUNT_MNT_GIDMAP:
+ offp = &sm->mnt_gidmap;
+ ret = statmount_mnt_gidmap(s, seq);
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ return -EINVAL;
+ }
+
+ /*
+ * If nothing was emitted, return to avoid setting the flag
+ * and terminating the buffer.
+ */
+ if (seq->count == start)
+ return ret;
+ if (unlikely(check_add_overflow(sizeof(*sm), seq->count, &kbufsize)))
+ return -EOVERFLOW;
+ if (kbufsize >= s->bufsize)
+ return -EOVERFLOW;
+
+ /* signal a retry */
+ if (unlikely(seq_has_overflowed(seq)))
+ return -EAGAIN;
+
+ if (ret)
+ return ret;
+
+ seq->buf[seq->count++] = '\0';
+ sm->mask |= flag;
+ *offp = start;
+ return 0;
+}
+
+static int copy_statmount_to_user(struct kstatmount *s)
+{
+ struct statmount *sm = &s->sm;
+ struct seq_file *seq = &s->seq;
+ char __user *str = ((char __user *)s->buf) + sizeof(*sm);
+ size_t copysize = min_t(size_t, s->bufsize, sizeof(*sm));
+
+ if (seq->count && copy_to_user(str, seq->buf, seq->count))
+ return -EFAULT;
+
+ /* Return the number of bytes copied to the buffer */
+ sm->size = copysize + seq->count;
+ if (copy_to_user(s->buf, sm, copysize))
+ return -EFAULT;
+
+ return 0;
+}
+
+static struct mount *listmnt_next(struct mount *curr, bool reverse)
+{
+ struct rb_node *node;
+
+ if (reverse)
+ node = rb_prev(&curr->mnt_node);
+ else
+ node = rb_next(&curr->mnt_node);
+
+ return node_to_mount(node);
+}
+
+static int grab_requested_root(struct mnt_namespace *ns, struct path *root)
+{
+ struct mount *first, *child;
+
+ rwsem_assert_held(&namespace_sem);
+
+ /* We're looking at our own ns, just use get_fs_root. */
+ if (ns == current->nsproxy->mnt_ns) {
+ get_fs_root(current->fs, root);
+ return 0;
+ }
+
+ /*
+ * We have to find the first mount in our ns and use that, however it
+ * may not exist, so handle that properly.
+ */
+ if (mnt_ns_empty(ns))
+ return -ENOENT;
+
+ first = child = ns->root;
+ for (;;) {
+ child = listmnt_next(child, false);
+ if (!child)
+ return -ENOENT;
+ if (child->mnt_parent == first)
+ break;
+ }
+
+ root->mnt = mntget(&child->mnt);
+ root->dentry = dget(root->mnt->mnt_root);
+ return 0;
}
+/* This must be updated whenever a new flag is added */
+#define STATMOUNT_SUPPORTED (STATMOUNT_SB_BASIC | \
+ STATMOUNT_MNT_BASIC | \
+ STATMOUNT_PROPAGATE_FROM | \
+ STATMOUNT_MNT_ROOT | \
+ STATMOUNT_MNT_POINT | \
+ STATMOUNT_FS_TYPE | \
+ STATMOUNT_MNT_NS_ID | \
+ STATMOUNT_MNT_OPTS | \
+ STATMOUNT_FS_SUBTYPE | \
+ STATMOUNT_SB_SOURCE | \
+ STATMOUNT_OPT_ARRAY | \
+ STATMOUNT_OPT_SEC_ARRAY | \
+ STATMOUNT_SUPPORTED_MASK | \
+ STATMOUNT_MNT_UIDMAP | \
+ STATMOUNT_MNT_GIDMAP)
+
+/* locks: namespace_shared */
+static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
+ struct mnt_namespace *ns)
+{
+ struct mount *m;
+ int err;
+
+ /* Has the namespace already been emptied? */
+ if (mnt_ns_id && mnt_ns_empty(ns))
+ return -ENOENT;
+
+ s->mnt = lookup_mnt_in_ns(mnt_id, ns);
+ if (!s->mnt)
+ return -ENOENT;
+
+ err = grab_requested_root(ns, &s->root);
+ if (err)
+ return err;
+
+ /*
+ * Don't trigger audit denials. We just want to determine what
+ * mounts to show users.
+ */
+ m = real_mount(s->mnt);
+ if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) &&
+ !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = security_sb_statfs(s->mnt->mnt_root);
+ if (err)
+ return err;
+
+ /*
+ * Note that mount properties in mnt->mnt_flags, mnt->mnt_idmap
+ * can change concurrently as we only hold the read-side of the
+ * namespace semaphore and mount properties may change with only
+ * the mount lock held.
+ *
+ * We could sample the mount lock sequence counter to detect
+ * those changes and retry. But it's not worth it. Worst that
+ * happens is that the mnt->mnt_idmap pointer is already changed
+ * while mnt->mnt_flags isn't or vica versa. So what.
+ *
+ * Both mnt->mnt_flags and mnt->mnt_idmap are set and retrieved
+ * via READ_ONCE()/WRITE_ONCE() and guard against theoretical
+ * torn read/write. That's all we care about right now.
+ */
+ s->idmap = mnt_idmap(s->mnt);
+ if (s->mask & STATMOUNT_MNT_BASIC)
+ statmount_mnt_basic(s);
+
+ if (s->mask & STATMOUNT_SB_BASIC)
+ statmount_sb_basic(s);
+
+ if (s->mask & STATMOUNT_PROPAGATE_FROM)
+ statmount_propagate_from(s);
+
+ if (s->mask & STATMOUNT_FS_TYPE)
+ err = statmount_string(s, STATMOUNT_FS_TYPE);
+
+ if (!err && s->mask & STATMOUNT_MNT_ROOT)
+ err = statmount_string(s, STATMOUNT_MNT_ROOT);
+
+ if (!err && s->mask & STATMOUNT_MNT_POINT)
+ err = statmount_string(s, STATMOUNT_MNT_POINT);
+
+ if (!err && s->mask & STATMOUNT_MNT_OPTS)
+ err = statmount_string(s, STATMOUNT_MNT_OPTS);
+
+ if (!err && s->mask & STATMOUNT_OPT_ARRAY)
+ err = statmount_string(s, STATMOUNT_OPT_ARRAY);
+
+ if (!err && s->mask & STATMOUNT_OPT_SEC_ARRAY)
+ err = statmount_string(s, STATMOUNT_OPT_SEC_ARRAY);
+
+ if (!err && s->mask & STATMOUNT_FS_SUBTYPE)
+ err = statmount_string(s, STATMOUNT_FS_SUBTYPE);
+
+ if (!err && s->mask & STATMOUNT_SB_SOURCE)
+ err = statmount_string(s, STATMOUNT_SB_SOURCE);
+
+ if (!err && s->mask & STATMOUNT_MNT_UIDMAP)
+ err = statmount_string(s, STATMOUNT_MNT_UIDMAP);
+
+ if (!err && s->mask & STATMOUNT_MNT_GIDMAP)
+ err = statmount_string(s, STATMOUNT_MNT_GIDMAP);
+
+ if (!err && s->mask & STATMOUNT_MNT_NS_ID)
+ statmount_mnt_ns_id(s, ns);
+
+ if (!err && s->mask & STATMOUNT_SUPPORTED_MASK) {
+ s->sm.mask |= STATMOUNT_SUPPORTED_MASK;
+ s->sm.supported_mask = STATMOUNT_SUPPORTED;
+ }
+
+ if (err)
+ return err;
+
+ /* Are there bits in the return mask not present in STATMOUNT_SUPPORTED? */
+ WARN_ON_ONCE(~STATMOUNT_SUPPORTED & s->sm.mask);
+
+ return 0;
+}
+
+static inline bool retry_statmount(const long ret, size_t *seq_size)
+{
+ if (likely(ret != -EAGAIN))
+ return false;
+ if (unlikely(check_mul_overflow(*seq_size, 2, seq_size)))
+ return false;
+ if (unlikely(*seq_size > MAX_RW_COUNT))
+ return false;
+ return true;
+}
+
+#define STATMOUNT_STRING_REQ (STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT | \
+ STATMOUNT_FS_TYPE | STATMOUNT_MNT_OPTS | \
+ STATMOUNT_FS_SUBTYPE | STATMOUNT_SB_SOURCE | \
+ STATMOUNT_OPT_ARRAY | STATMOUNT_OPT_SEC_ARRAY | \
+ STATMOUNT_MNT_UIDMAP | STATMOUNT_MNT_GIDMAP)
+
+static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq,
+ struct statmount __user *buf, size_t bufsize,
+ size_t seq_size)
+{
+ if (!access_ok(buf, bufsize))
+ return -EFAULT;
+
+ memset(ks, 0, sizeof(*ks));
+ ks->mask = kreq->param;
+ ks->buf = buf;
+ ks->bufsize = bufsize;
+
+ if (ks->mask & STATMOUNT_STRING_REQ) {
+ if (bufsize == sizeof(ks->sm))
+ return -EOVERFLOW;
+
+ ks->seq.buf = kvmalloc(seq_size, GFP_KERNEL_ACCOUNT);
+ if (!ks->seq.buf)
+ return -ENOMEM;
+
+ ks->seq.size = seq_size;
+ }
+
+ return 0;
+}
+
+static int copy_mnt_id_req(const struct mnt_id_req __user *req,
+ struct mnt_id_req *kreq)
+{
+ int ret;
+ size_t usize;
+
+ BUILD_BUG_ON(sizeof(struct mnt_id_req) != MNT_ID_REQ_SIZE_VER1);
+
+ ret = get_user(usize, &req->size);
+ if (ret)
+ return -EFAULT;
+ if (unlikely(usize > PAGE_SIZE))
+ return -E2BIG;
+ if (unlikely(usize < MNT_ID_REQ_SIZE_VER0))
+ return -EINVAL;
+ memset(kreq, 0, sizeof(*kreq));
+ ret = copy_struct_from_user(kreq, sizeof(*kreq), req, usize);
+ if (ret)
+ return ret;
+ if (kreq->mnt_ns_fd != 0 && kreq->mnt_ns_id)
+ return -EINVAL;
+ /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
+ if (kreq->mnt_id <= MNT_UNIQUE_ID_OFFSET)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * If the user requested a specific mount namespace id, look that up and return
+ * that, or if not simply grab a passive reference on our mount namespace and
+ * return that.
+ */
+static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq)
+{
+ struct mnt_namespace *mnt_ns;
+
+ if (kreq->mnt_ns_id) {
+ mnt_ns = lookup_mnt_ns(kreq->mnt_ns_id);
+ if (!mnt_ns)
+ return ERR_PTR(-ENOENT);
+ } else if (kreq->mnt_ns_fd) {
+ struct ns_common *ns;
+
+ CLASS(fd, f)(kreq->mnt_ns_fd);
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+
+ if (!proc_ns_file(fd_file(f)))
+ return ERR_PTR(-EINVAL);
+
+ ns = get_proc_ns(file_inode(fd_file(f)));
+ if (ns->ns_type != CLONE_NEWNS)
+ return ERR_PTR(-EINVAL);
+
+ mnt_ns = to_mnt_ns(ns);
+ refcount_inc(&mnt_ns->passive);
+ } else {
+ mnt_ns = current->nsproxy->mnt_ns;
+ refcount_inc(&mnt_ns->passive);
+ }
+
+ return mnt_ns;
+}
+
+SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
+ struct statmount __user *, buf, size_t, bufsize,
+ unsigned int, flags)
+{
+ struct mnt_namespace *ns __free(mnt_ns_release) = NULL;
+ struct kstatmount *ks __free(kfree) = NULL;
+ struct mnt_id_req kreq;
+ /* We currently support retrieval of 3 strings. */
+ size_t seq_size = 3 * PATH_MAX;
+ int ret;
+
+ if (flags)
+ return -EINVAL;
+
+ ret = copy_mnt_id_req(req, &kreq);
+ if (ret)
+ return ret;
+
+ ns = grab_requested_mnt_ns(&kreq);
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+
+ if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) &&
+ !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
+ return -ENOENT;
+
+ ks = kmalloc(sizeof(*ks), GFP_KERNEL_ACCOUNT);
+ if (!ks)
+ return -ENOMEM;
+
+retry:
+ ret = prepare_kstatmount(ks, &kreq, buf, bufsize, seq_size);
+ if (ret)
+ return ret;
+
+ scoped_guard(namespace_shared)
+ ret = do_statmount(ks, kreq.mnt_id, kreq.mnt_ns_id, ns);
+
+ if (!ret)
+ ret = copy_statmount_to_user(ks);
+ kvfree(ks->seq.buf);
+ path_put(&ks->root);
+ if (retry_statmount(ret, &seq_size))
+ goto retry;
+ return ret;
+}
+
+struct klistmount {
+ u64 last_mnt_id;
+ u64 mnt_parent_id;
+ u64 *kmnt_ids;
+ u32 nr_mnt_ids;
+ struct mnt_namespace *ns;
+ struct path root;
+};
+
+/* locks: namespace_shared */
+static ssize_t do_listmount(struct klistmount *kls, bool reverse)
+{
+ struct mnt_namespace *ns = kls->ns;
+ u64 mnt_parent_id = kls->mnt_parent_id;
+ u64 last_mnt_id = kls->last_mnt_id;
+ u64 *mnt_ids = kls->kmnt_ids;
+ size_t nr_mnt_ids = kls->nr_mnt_ids;
+ struct path orig;
+ struct mount *r, *first;
+ ssize_t ret;
+
+ rwsem_assert_held(&namespace_sem);
+
+ ret = grab_requested_root(ns, &kls->root);
+ if (ret)
+ return ret;
+
+ if (mnt_parent_id == LSMT_ROOT) {
+ orig = kls->root;
+ } else {
+ orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns);
+ if (!orig.mnt)
+ return -ENOENT;
+ orig.dentry = orig.mnt->mnt_root;
+ }
+
+ /*
+ * Don't trigger audit denials. We just want to determine what
+ * mounts to show users.
+ */
+ if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &kls->root) &&
+ !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = security_sb_statfs(orig.dentry);
+ if (ret)
+ return ret;
+
+ if (!last_mnt_id) {
+ if (reverse)
+ first = node_to_mount(ns->mnt_last_node);
+ else
+ first = node_to_mount(ns->mnt_first_node);
+ } else {
+ if (reverse)
+ first = mnt_find_id_at_reverse(ns, last_mnt_id - 1);
+ else
+ first = mnt_find_id_at(ns, last_mnt_id + 1);
+ }
+
+ for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r, reverse)) {
+ if (r->mnt_id_unique == mnt_parent_id)
+ continue;
+ if (!is_path_reachable(r, r->mnt.mnt_root, &orig))
+ continue;
+ *mnt_ids = r->mnt_id_unique;
+ mnt_ids++;
+ nr_mnt_ids--;
+ ret++;
+ }
+ return ret;
+}
+
+static void __free_klistmount_free(const struct klistmount *kls)
+{
+ path_put(&kls->root);
+ kvfree(kls->kmnt_ids);
+ mnt_ns_release(kls->ns);
+}
+
+static inline int prepare_klistmount(struct klistmount *kls, struct mnt_id_req *kreq,
+ size_t nr_mnt_ids)
+{
+ u64 last_mnt_id = kreq->param;
+ struct mnt_namespace *ns;
+
+ /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
+ if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET)
+ return -EINVAL;
+
+ kls->last_mnt_id = last_mnt_id;
+
+ kls->nr_mnt_ids = nr_mnt_ids;
+ kls->kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kls->kmnt_ids),
+ GFP_KERNEL_ACCOUNT);
+ if (!kls->kmnt_ids)
+ return -ENOMEM;
+
+ ns = grab_requested_mnt_ns(kreq);
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+ kls->ns = ns;
+
+ kls->mnt_parent_id = kreq->mnt_id;
+ return 0;
+}
+
+SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
+ u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags)
+{
+ struct klistmount kls __free(klistmount_free) = {};
+ const size_t maxcount = 1000000;
+ struct mnt_id_req kreq;
+ ssize_t ret;
+
+ if (flags & ~LISTMOUNT_REVERSE)
+ return -EINVAL;
+
+ /*
+ * If the mount namespace really has more than 1 million mounts the
+ * caller must iterate over the mount namespace (and reconsider their
+ * system design...).
+ */
+ if (unlikely(nr_mnt_ids > maxcount))
+ return -EOVERFLOW;
+
+ if (!access_ok(mnt_ids, nr_mnt_ids * sizeof(*mnt_ids)))
+ return -EFAULT;
+
+ ret = copy_mnt_id_req(req, &kreq);
+ if (ret)
+ return ret;
+
+ ret = prepare_klistmount(&kls, &kreq, nr_mnt_ids);
+ if (ret)
+ return ret;
+
+ if (kreq.mnt_ns_id && (kls.ns != current->nsproxy->mnt_ns) &&
+ !ns_capable_noaudit(kls.ns->user_ns, CAP_SYS_ADMIN))
+ return -ENOENT;
+
+ /*
+ * We only need to guard against mount topology changes as
+ * listmount() doesn't care about any mount properties.
+ */
+ scoped_guard(namespace_shared)
+ ret = do_listmount(&kls, (flags & LISTMOUNT_REVERSE));
+ if (ret <= 0)
+ return ret;
+
+ if (copy_to_user(mnt_ids, kls.kmnt_ids, ret * sizeof(*mnt_ids)))
+ return -EFAULT;
+
+ return ret;
+}
+
+struct mnt_namespace init_mnt_ns = {
+ .ns = NS_COMMON_INIT(init_mnt_ns),
+ .user_ns = &init_user_ns,
+ .passive = REFCOUNT_INIT(1),
+ .mounts = RB_ROOT,
+ .poll = __WAIT_QUEUE_HEAD_INITIALIZER(init_mnt_ns.poll),
+};
+
static void __init init_mount_tree(void)
{
struct vfsmount *mnt;
struct mount *m;
- struct mnt_namespace *ns;
struct path root;
- mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
+ mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", initramfs_options);
if (IS_ERR(mnt))
panic("Can't create rootfs");
- ns = alloc_mnt_ns(&init_user_ns, false);
- if (IS_ERR(ns))
- panic("Can't allocate initial namespace");
m = real_mount(mnt);
- m->mnt_ns = ns;
- ns->root = m;
- ns->mounts = 1;
- list_add(&m->mnt_list, &ns->list);
- init_task.nsproxy->mnt_ns = ns;
- get_mnt_ns(ns);
+ init_mnt_ns.root = m;
+ init_mnt_ns.nr_mounts = 1;
+ mnt_add_to_ns(&init_mnt_ns, m);
+ init_task.nsproxy->mnt_ns = &init_mnt_ns;
+ get_mnt_ns(&init_mnt_ns);
root.mnt = mnt;
root.dentry = mnt->mnt_root;
- mnt->mnt_flags |= MNT_LOCKED;
set_fs_pwd(current->fs, &root);
set_fs_root(current->fs, &root);
+
+ ns_tree_add(&init_mnt_ns);
}
void __init mnt_init(void)
@@ -4339,10 +6030,12 @@ void __init mnt_init(void)
void put_mnt_ns(struct mnt_namespace *ns)
{
- if (!refcount_dec_and_test(&ns->ns.count))
+ if (!ns_ref_put(ns))
return;
- drop_collected_mounts(&ns->root->mnt);
- free_mnt_ns(ns);
+ guard(namespace_excl)();
+ emptied_ns = ns;
+ guard(mount_writer)();
+ umount_tree(ns->root, 0);
}
struct vfsmount *kern_mount(struct file_system_type *type)
@@ -4363,8 +6056,8 @@ EXPORT_SYMBOL_GPL(kern_mount);
void kern_unmount(struct vfsmount *mnt)
{
/* release long term mount so mount point can be released */
- if (!IS_ERR_OR_NULL(mnt)) {
- real_mount(mnt)->mnt_ns = NULL;
+ if (!IS_ERR(mnt)) {
+ mnt_make_shortterm(mnt);
synchronize_rcu(); /* yecchhh... */
mntput(mnt);
}
@@ -4376,8 +6069,7 @@ void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
unsigned int i;
for (i = 0; i < num; i++)
- if (mnt[i])
- real_mount(mnt[i])->mnt_ns = NULL;
+ mnt_make_shortterm(mnt[i]);
synchronize_rcu_expedited();
for (i = 0; i < num; i++)
mntput(mnt[i]);
@@ -4392,25 +6084,18 @@ bool our_mnt(struct vfsmount *mnt)
bool current_chrooted(void)
{
/* Does the current process have a non-standard root */
- struct path ns_root;
- struct path fs_root;
- bool chrooted;
-
- /* Find the namespace root */
- ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
- ns_root.dentry = ns_root.mnt->mnt_root;
- path_get(&ns_root);
- while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
- ;
+ struct path fs_root __free(path_put) = {};
+ struct mount *root;
get_fs_root(current->fs, &fs_root);
- chrooted = !path_equal(&fs_root, &ns_root);
+ /* Find the namespace root */
+
+ guard(mount_locked_reader)();
- path_put(&fs_root);
- path_put(&ns_root);
+ root = topmost_overmount(current->nsproxy->mnt_ns->root);
- return chrooted;
+ return fs_root.mnt != &root->mnt || !path_mounted(&fs_root);
}
static bool mnt_already_visible(struct mnt_namespace *ns,
@@ -4418,18 +6103,13 @@ static bool mnt_already_visible(struct mnt_namespace *ns,
int *new_mnt_flags)
{
int new_flags = *new_mnt_flags;
- struct mount *mnt;
- bool visible = false;
+ struct mount *mnt, *n;
- down_read(&namespace_sem);
- lock_ns_list(ns);
- list_for_each_entry(mnt, &ns->list, mnt_list) {
+ guard(namespace_shared)();
+ rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) {
struct mount *child;
int mnt_flags;
- if (mnt_is_cursor(mnt))
- continue;
-
if (mnt->mnt.mnt_sb->s_type != sb->s_type)
continue;
@@ -4465,21 +6145,17 @@ static bool mnt_already_visible(struct mnt_namespace *ns,
/* Only worry about locked mounts */
if (!(child->mnt.mnt_flags & MNT_LOCKED))
continue;
- /* Is the directory permanetly empty? */
+ /* Is the directory permanently empty? */
if (!is_empty_dir_inode(inode))
goto next;
}
/* Preserve the locked attributes */
*new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
MNT_LOCK_ATIME);
- visible = true;
- goto found;
+ return true;
next: ;
}
-found:
- unlock_ns_list(ns);
- up_read(&namespace_sem);
- return visible;
+ return false;
}
static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
@@ -4590,9 +6266,29 @@ static struct user_namespace *mntns_owner(struct ns_common *ns)
const struct proc_ns_operations mntns_operations = {
.name = "mnt",
- .type = CLONE_NEWNS,
.get = mntns_get,
.put = mntns_put,
.install = mntns_install,
.owner = mntns_owner,
};
+
+#ifdef CONFIG_SYSCTL
+static const struct ctl_table fs_namespace_sysctls[] = {
+ {
+ .procname = "mount-max",
+ .data = &sysctl_mount_max,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ },
+};
+
+static int __init init_fs_namespace_sysctls(void)
+{
+ register_sysctl_init("fs", fs_namespace_sysctls);
+ return 0;
+}
+fs_initcall(init_fs_namespace_sysctls);
+
+#endif /* CONFIG_SYSCTL */
diff --git a/fs/netfs/Kconfig b/fs/netfs/Kconfig
index b4db21022cb4..7701c037c328 100644
--- a/fs/netfs/Kconfig
+++ b/fs/netfs/Kconfig
@@ -21,3 +21,40 @@ config NETFS_STATS
multi-CPU system these may be on cachelines that keep bouncing
between CPUs. On the other hand, the stats are very useful for
debugging purposes. Saying 'Y' here is recommended.
+
+config NETFS_DEBUG
+ bool "Enable dynamic debugging netfslib and FS-Cache"
+ depends on NETFS_SUPPORT
+ help
+ This permits debugging to be dynamically enabled in the local caching
+ management module. If this is set, the debugging output may be
+ enabled by setting bits in /sys/module/netfs/parameters/debug.
+
+config FSCACHE
+ bool "General filesystem local caching manager"
+ depends on NETFS_SUPPORT
+ help
+ This option enables a generic filesystem caching manager that can be
+ used by various network and other filesystems to cache data locally.
+ Different sorts of caches can be plugged in, depending on the
+ resources available.
+
+ See Documentation/filesystems/caching/fscache.rst for more information.
+
+config FSCACHE_STATS
+ bool "Gather statistical information on local caching"
+ depends on FSCACHE && PROC_FS
+ select NETFS_STATS
+ help
+ This option causes statistical information to be gathered on local
+ caching and exported through file:
+
+ /proc/fs/fscache/stats
+
+ The gathering of statistics adds a certain amount of overhead to
+ execution as there are a quite a few stats gathered, and on a
+ multi-CPU system these may be on cachelines that keep bouncing
+ between CPUs. On the other hand, the stats are very useful for
+ debugging purposes. Saying 'Y' here is recommended.
+
+ See Documentation/filesystems/caching/fscache.rst for more information.
diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile
index c15bfc966d96..b43188d64bd8 100644
--- a/fs/netfs/Makefile
+++ b/fs/netfs/Makefile
@@ -1,5 +1,36 @@
# SPDX-License-Identifier: GPL-2.0
-netfs-y := read_helper.o stats.o
+netfs-y := \
+ buffered_read.o \
+ buffered_write.o \
+ direct_read.o \
+ direct_write.o \
+ iterator.o \
+ locking.o \
+ main.o \
+ misc.o \
+ objects.o \
+ read_collect.o \
+ read_pgpriv2.o \
+ read_retry.o \
+ read_single.o \
+ rolling_buffer.o \
+ write_collect.o \
+ write_issue.o \
+ write_retry.o
-obj-$(CONFIG_NETFS_SUPPORT) := netfs.o
+netfs-$(CONFIG_NETFS_STATS) += stats.o
+
+netfs-$(CONFIG_FSCACHE) += \
+ fscache_cache.o \
+ fscache_cookie.o \
+ fscache_io.o \
+ fscache_main.o \
+ fscache_volume.o
+
+ifeq ($(CONFIG_PROC_FS),y)
+netfs-$(CONFIG_FSCACHE) += fscache_proc.o
+endif
+netfs-$(CONFIG_FSCACHE_STATS) += fscache_stats.o
+
+obj-$(CONFIG_NETFS_SUPPORT) += netfs.o
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
new file mode 100644
index 000000000000..37ab6f28b5ad
--- /dev/null
+++ b/fs/netfs/buffered_read.c
@@ -0,0 +1,834 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Network filesystem high-level buffered read support.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
+ unsigned long long *_start,
+ unsigned long long *_len,
+ unsigned long long i_size)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+ if (cres->ops && cres->ops->expand_readahead)
+ cres->ops->expand_readahead(cres, _start, _len, i_size);
+}
+
+static void netfs_rreq_expand(struct netfs_io_request *rreq,
+ struct readahead_control *ractl)
+{
+ /* Give the cache a chance to change the request parameters. The
+ * resultant request must contain the original region.
+ */
+ netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
+
+ /* Give the netfs a chance to change the request parameters. The
+ * resultant request must contain the original region.
+ */
+ if (rreq->netfs_ops->expand_readahead)
+ rreq->netfs_ops->expand_readahead(rreq);
+
+ /* Expand the request if the cache wants it to start earlier. Note
+ * that the expansion may get further extended if the VM wishes to
+ * insert THPs and the preferred start and/or end wind up in the middle
+ * of THPs.
+ *
+ * If this is the case, however, the THP size should be an integer
+ * multiple of the cache granule size, so we get a whole number of
+ * granules to deal with.
+ */
+ if (rreq->start != readahead_pos(ractl) ||
+ rreq->len != readahead_length(ractl)) {
+ readahead_expand(ractl, rreq->start, rreq->len);
+ rreq->start = readahead_pos(ractl);
+ rreq->len = readahead_length(ractl);
+
+ trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
+ netfs_read_trace_expanded);
+ }
+}
+
+/*
+ * Begin an operation, and fetch the stored zero point value from the cookie if
+ * available.
+ */
+static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx)
+{
+ return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx));
+}
+
+/*
+ * netfs_prepare_read_iterator - Prepare the subreq iterator for I/O
+ * @subreq: The subrequest to be set up
+ *
+ * Prepare the I/O iterator representing the read buffer on a subrequest for
+ * the filesystem to use for I/O (it can be passed directly to a socket). This
+ * is intended to be called from the ->issue_read() method once the filesystem
+ * has trimmed the request to the size it wants.
+ *
+ * Returns the limited size if successful and -ENOMEM if insufficient memory
+ * available.
+ *
+ * [!] NOTE: This must be run in the same thread as ->issue_read() was called
+ * in as we access the readahead_control struct.
+ */
+static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq,
+ struct readahead_control *ractl)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ size_t rsize = subreq->len;
+
+ if (subreq->source == NETFS_DOWNLOAD_FROM_SERVER)
+ rsize = umin(rsize, rreq->io_streams[0].sreq_max_len);
+
+ if (ractl) {
+ /* If we don't have sufficient folios in the rolling buffer,
+ * extract a folioq's worth from the readahead region at a time
+ * into the buffer. Note that this acquires a ref on each page
+ * that we will need to release later - but we don't want to do
+ * that until after we've started the I/O.
+ */
+ struct folio_batch put_batch;
+
+ folio_batch_init(&put_batch);
+ while (rreq->submitted < subreq->start + rsize) {
+ ssize_t added;
+
+ added = rolling_buffer_load_from_ra(&rreq->buffer, ractl,
+ &put_batch);
+ if (added < 0)
+ return added;
+ rreq->submitted += added;
+ }
+ folio_batch_release(&put_batch);
+ }
+
+ subreq->len = rsize;
+ if (unlikely(rreq->io_streams[0].sreq_max_segs)) {
+ size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize,
+ rreq->io_streams[0].sreq_max_segs);
+
+ if (limit < rsize) {
+ subreq->len = limit;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_limited);
+ }
+ }
+
+ subreq->io_iter = rreq->buffer.iter;
+
+ iov_iter_truncate(&subreq->io_iter, subreq->len);
+ rolling_buffer_advance(&rreq->buffer, subreq->len);
+ return subreq->len;
+}
+
+static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq,
+ loff_t i_size)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+ enum netfs_io_source source;
+
+ if (!cres->ops)
+ return NETFS_DOWNLOAD_FROM_SERVER;
+ source = cres->ops->prepare_read(subreq, i_size);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+ return source;
+
+}
+
+/*
+ * Issue a read against the cache.
+ * - Eats the caller's ref on subreq.
+ */
+static void netfs_read_cache_to_pagecache(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+ netfs_stat(&netfs_n_rh_read);
+ cres->ops->read(cres, subreq->start, &subreq->io_iter, NETFS_READ_HOLE_IGNORE,
+ netfs_cache_read_terminated, subreq);
+}
+
+static void netfs_queue_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq,
+ bool last_subreq)
+{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+
+ /* We add to the end of the list whilst the collector may be walking
+ * the list. The collector only goes nextwards and uses the lock to
+ * remove entries off of the front.
+ */
+ spin_lock(&rreq->lock);
+ list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+ stream->front = subreq;
+ if (!stream->active) {
+ stream->collected_to = stream->front->start;
+ /* Store list pointers before active flag */
+ smp_store_release(&stream->active, true);
+ }
+ }
+
+ if (last_subreq) {
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ }
+
+ spin_unlock(&rreq->lock);
+}
+
+static void netfs_issue_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ switch (subreq->source) {
+ case NETFS_DOWNLOAD_FROM_SERVER:
+ rreq->netfs_ops->issue_read(subreq);
+ break;
+ case NETFS_READ_FROM_CACHE:
+ netfs_read_cache_to_pagecache(rreq, subreq);
+ break;
+ default:
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ subreq->error = 0;
+ iov_iter_zero(subreq->len, &subreq->io_iter);
+ subreq->transferred = subreq->len;
+ netfs_read_subreq_terminated(subreq);
+ break;
+ }
+}
+
+/*
+ * Perform a read to the pagecache from a series of sources of different types,
+ * slicing up the region to be read according to available cache blocks and
+ * network rsize.
+ */
+static void netfs_read_to_pagecache(struct netfs_io_request *rreq,
+ struct readahead_control *ractl)
+{
+ struct netfs_inode *ictx = netfs_inode(rreq->inode);
+ unsigned long long start = rreq->start;
+ ssize_t size = rreq->len;
+ int ret = 0;
+
+ do {
+ struct netfs_io_subrequest *subreq;
+ enum netfs_io_source source = NETFS_SOURCE_UNKNOWN;
+ ssize_t slice;
+
+ subreq = netfs_alloc_subrequest(rreq);
+ if (!subreq) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ subreq->start = start;
+ subreq->len = size;
+
+ source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size);
+ subreq->source = source;
+ if (source == NETFS_DOWNLOAD_FROM_SERVER) {
+ unsigned long long zp = umin(ictx->zero_point, rreq->i_size);
+ size_t len = subreq->len;
+
+ if (unlikely(rreq->origin == NETFS_READ_SINGLE))
+ zp = rreq->i_size;
+ if (subreq->start >= zp) {
+ subreq->source = source = NETFS_FILL_WITH_ZEROES;
+ goto fill_with_zeroes;
+ }
+
+ if (len > zp - subreq->start)
+ len = zp - subreq->start;
+ if (len == 0) {
+ pr_err("ZERO-LEN READ: R=%08x[%x] l=%zx/%zx s=%llx z=%llx i=%llx",
+ rreq->debug_id, subreq->debug_index,
+ subreq->len, size,
+ subreq->start, ictx->zero_point, rreq->i_size);
+ break;
+ }
+ subreq->len = len;
+
+ netfs_stat(&netfs_n_rh_download);
+ if (rreq->netfs_ops->prepare_read) {
+ ret = rreq->netfs_ops->prepare_read(subreq);
+ if (ret < 0) {
+ subreq->error = ret;
+ /* Not queued - release both refs. */
+ netfs_put_subrequest(subreq,
+ netfs_sreq_trace_put_cancel);
+ netfs_put_subrequest(subreq,
+ netfs_sreq_trace_put_cancel);
+ break;
+ }
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+ }
+ goto issue;
+ }
+
+ fill_with_zeroes:
+ if (source == NETFS_FILL_WITH_ZEROES) {
+ subreq->source = NETFS_FILL_WITH_ZEROES;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ netfs_stat(&netfs_n_rh_zero);
+ goto issue;
+ }
+
+ if (source == NETFS_READ_FROM_CACHE) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ goto issue;
+ }
+
+ pr_err("Unexpected read source %u\n", source);
+ WARN_ON_ONCE(1);
+ break;
+
+ issue:
+ slice = netfs_prepare_read_iterator(subreq, ractl);
+ if (slice < 0) {
+ ret = slice;
+ subreq->error = ret;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_cancel);
+ /* Not queued - release both refs. */
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
+ break;
+ }
+ size -= slice;
+ start += slice;
+
+ netfs_queue_read(rreq, subreq, size <= 0);
+ netfs_issue_read(rreq, subreq);
+ cond_resched();
+ } while (size > 0);
+
+ if (unlikely(size > 0)) {
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ netfs_wake_collector(rreq);
+ }
+
+ /* Defer error return as we may need to wait for outstanding I/O. */
+ cmpxchg(&rreq->error, 0, ret);
+}
+
+/**
+ * netfs_readahead - Helper to manage a read request
+ * @ractl: The description of the readahead request
+ *
+ * Fulfil a readahead request by drawing data from the cache if possible, or
+ * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
+ * requests from different sources will get munged together. If necessary, the
+ * readahead window can be expanded in either direction to a more convenient
+ * alighment for RPC efficiency or to make storage in the cache feasible.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+void netfs_readahead(struct readahead_control *ractl)
+{
+ struct netfs_io_request *rreq;
+ struct netfs_inode *ictx = netfs_inode(ractl->mapping->host);
+ unsigned long long start = readahead_pos(ractl);
+ size_t size = readahead_length(ractl);
+ int ret;
+
+ rreq = netfs_alloc_request(ractl->mapping, ractl->file, start, size,
+ NETFS_READAHEAD);
+ if (IS_ERR(rreq))
+ return;
+
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags);
+
+ ret = netfs_begin_cache_read(rreq, ictx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto cleanup_free;
+
+ netfs_stat(&netfs_n_rh_readahead);
+ trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
+ netfs_read_trace_readahead);
+
+ netfs_rreq_expand(rreq, ractl);
+
+ rreq->submitted = rreq->start;
+ if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0)
+ goto cleanup_free;
+ netfs_read_to_pagecache(rreq, ractl);
+
+ return netfs_put_request(rreq, netfs_rreq_trace_put_return);
+
+cleanup_free:
+ return netfs_put_failed_request(rreq);
+}
+EXPORT_SYMBOL(netfs_readahead);
+
+/*
+ * Create a rolling buffer with a single occupying folio.
+ */
+static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct folio *folio,
+ unsigned int rollbuf_flags)
+{
+ ssize_t added;
+
+ if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0)
+ return -ENOMEM;
+
+ added = rolling_buffer_append(&rreq->buffer, folio, rollbuf_flags);
+ if (added < 0)
+ return added;
+ rreq->submitted = rreq->start + added;
+ return 0;
+}
+
+/*
+ * Read into gaps in a folio partially filled by a streaming write.
+ */
+static int netfs_read_gaps(struct file *file, struct folio *folio)
+{
+ struct netfs_io_request *rreq;
+ struct address_space *mapping = folio->mapping;
+ struct netfs_folio *finfo = netfs_folio_info(folio);
+ struct netfs_inode *ctx = netfs_inode(mapping->host);
+ struct folio *sink = NULL;
+ struct bio_vec *bvec;
+ unsigned int from = finfo->dirty_offset;
+ unsigned int to = from + finfo->dirty_len;
+ unsigned int off = 0, i = 0;
+ size_t flen = folio_size(folio);
+ size_t nr_bvec = flen / PAGE_SIZE + 2;
+ size_t part;
+ int ret;
+
+ _enter("%lx", folio->index);
+
+ rreq = netfs_alloc_request(mapping, file, folio_pos(folio), flen, NETFS_READ_GAPS);
+ if (IS_ERR(rreq)) {
+ ret = PTR_ERR(rreq);
+ goto alloc_error;
+ }
+
+ ret = netfs_begin_cache_read(rreq, ctx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto discard;
+
+ netfs_stat(&netfs_n_rh_read_folio);
+ trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_read_gaps);
+
+ /* Fiddle the buffer so that a gap at the beginning and/or a gap at the
+ * end get copied to, but the middle is discarded.
+ */
+ ret = -ENOMEM;
+ bvec = kmalloc_array(nr_bvec, sizeof(*bvec), GFP_KERNEL);
+ if (!bvec)
+ goto discard;
+
+ sink = folio_alloc(GFP_KERNEL, 0);
+ if (!sink) {
+ kfree(bvec);
+ goto discard;
+ }
+
+ trace_netfs_folio(folio, netfs_folio_trace_read_gaps);
+
+ rreq->direct_bv = bvec;
+ rreq->direct_bv_count = nr_bvec;
+ if (from > 0) {
+ bvec_set_folio(&bvec[i++], folio, from, 0);
+ off = from;
+ }
+ while (off < to) {
+ part = min_t(size_t, to - off, PAGE_SIZE);
+ bvec_set_folio(&bvec[i++], sink, part, 0);
+ off += part;
+ }
+ if (to < flen)
+ bvec_set_folio(&bvec[i++], folio, flen - to, to);
+ iov_iter_bvec(&rreq->buffer.iter, ITER_DEST, bvec, i, rreq->len);
+ rreq->submitted = rreq->start + flen;
+
+ netfs_read_to_pagecache(rreq, NULL);
+
+ if (sink)
+ folio_put(sink);
+
+ ret = netfs_wait_for_read(rreq);
+ if (ret >= 0) {
+ flush_dcache_folio(folio);
+ folio_mark_uptodate(folio);
+ }
+ folio_unlock(folio);
+ netfs_put_request(rreq, netfs_rreq_trace_put_return);
+ return ret < 0 ? ret : 0;
+
+discard:
+ netfs_put_failed_request(rreq);
+alloc_error:
+ folio_unlock(folio);
+ return ret;
+}
+
+/**
+ * netfs_read_folio - Helper to manage a read_folio request
+ * @file: The file to read from
+ * @folio: The folio to read
+ *
+ * Fulfil a read_folio request by drawing data from the cache if
+ * possible, or the netfs if not. Space beyond the EOF is zero-filled.
+ * Multiple I/O requests from different sources will get munged together.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+int netfs_read_folio(struct file *file, struct folio *folio)
+{
+ struct address_space *mapping = folio->mapping;
+ struct netfs_io_request *rreq;
+ struct netfs_inode *ctx = netfs_inode(mapping->host);
+ int ret;
+
+ if (folio_test_dirty(folio)) {
+ trace_netfs_folio(folio, netfs_folio_trace_read_gaps);
+ return netfs_read_gaps(file, folio);
+ }
+
+ _enter("%lx", folio->index);
+
+ rreq = netfs_alloc_request(mapping, file,
+ folio_pos(folio), folio_size(folio),
+ NETFS_READPAGE);
+ if (IS_ERR(rreq)) {
+ ret = PTR_ERR(rreq);
+ goto alloc_error;
+ }
+
+ ret = netfs_begin_cache_read(rreq, ctx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto discard;
+
+ netfs_stat(&netfs_n_rh_read_folio);
+ trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
+
+ /* Set up the output buffer */
+ ret = netfs_create_singular_buffer(rreq, folio, 0);
+ if (ret < 0)
+ goto discard;
+
+ netfs_read_to_pagecache(rreq, NULL);
+ ret = netfs_wait_for_read(rreq);
+ netfs_put_request(rreq, netfs_rreq_trace_put_return);
+ return ret < 0 ? ret : 0;
+
+discard:
+ netfs_put_failed_request(rreq);
+alloc_error:
+ folio_unlock(folio);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_read_folio);
+
+/*
+ * Prepare a folio for writing without reading first
+ * @folio: The folio being prepared
+ * @pos: starting position for the write
+ * @len: length of write
+ * @always_fill: T if the folio should always be completely filled/cleared
+ *
+ * In some cases, write_begin doesn't need to read at all:
+ * - full folio write
+ * - write that lies in a folio that is completely beyond EOF
+ * - write that covers the folio from start to EOF or beyond it
+ *
+ * If any of these criteria are met, then zero out the unwritten parts
+ * of the folio and return true. Otherwise, return false.
+ */
+static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
+ bool always_fill)
+{
+ struct inode *inode = folio_inode(folio);
+ loff_t i_size = i_size_read(inode);
+ size_t offset = offset_in_folio(folio, pos);
+ size_t plen = folio_size(folio);
+
+ if (unlikely(always_fill)) {
+ if (pos - offset + len <= i_size)
+ return false; /* Page entirely before EOF */
+ folio_zero_segment(folio, 0, plen);
+ folio_mark_uptodate(folio);
+ return true;
+ }
+
+ /* Full folio write */
+ if (offset == 0 && len >= plen)
+ return true;
+
+ /* Page entirely beyond the end of the file */
+ if (pos - offset >= i_size)
+ goto zero_out;
+
+ /* Write that covers from the start of the folio to EOF or beyond */
+ if (offset == 0 && (pos + len) >= i_size)
+ goto zero_out;
+
+ return false;
+zero_out:
+ folio_zero_segments(folio, 0, offset, offset + len, plen);
+ return true;
+}
+
+/**
+ * netfs_write_begin - Helper to prepare for writing [DEPRECATED]
+ * @ctx: The netfs context
+ * @file: The file to read from
+ * @mapping: The mapping to read from
+ * @pos: File position at which the write will begin
+ * @len: The length of the write (may extend beyond the end of the folio chosen)
+ * @_folio: Where to put the resultant folio
+ * @_fsdata: Place for the netfs to store a cookie
+ *
+ * Pre-read data for a write-begin request by drawing data from the cache if
+ * possible, or the netfs if not. Space beyond the EOF is zero-filled.
+ * Multiple I/O requests from different sources will get munged together.
+ *
+ * The calling netfs must provide a table of operations, only one of which,
+ * issue_read, is mandatory.
+ *
+ * The check_write_begin() operation can be provided to check for and flush
+ * conflicting writes once the folio is grabbed and locked. It is passed a
+ * pointer to the fsdata cookie that gets returned to the VM to be passed to
+ * write_end. It is permitted to sleep. It should return 0 if the request
+ * should go ahead or it may return an error. It may also unlock and put the
+ * folio, provided it sets ``*foliop`` to NULL, in which case a return of 0
+ * will cause the folio to be re-got and the process to be retried.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ *
+ * Note that this should be considered deprecated and netfs_perform_write()
+ * used instead.
+ */
+int netfs_write_begin(struct netfs_inode *ctx,
+ struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, struct folio **_folio,
+ void **_fsdata)
+{
+ struct netfs_io_request *rreq;
+ struct folio *folio;
+ pgoff_t index = pos >> PAGE_SHIFT;
+ int ret;
+
+retry:
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ if (ctx->ops->check_write_begin) {
+ /* Allow the netfs (eg. ceph) to flush conflicts. */
+ ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata);
+ if (ret < 0) {
+ trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
+ goto error;
+ }
+ if (!folio)
+ goto retry;
+ }
+
+ if (folio_test_uptodate(folio))
+ goto have_folio;
+
+ /* If the folio is beyond the EOF, we want to clear it - unless it's
+ * within the cache granule containing the EOF, in which case we need
+ * to preload the granule.
+ */
+ if (!netfs_is_cache_enabled(ctx) &&
+ netfs_skip_folio_read(folio, pos, len, false)) {
+ netfs_stat(&netfs_n_rh_write_zskip);
+ goto have_folio_no_wait;
+ }
+
+ rreq = netfs_alloc_request(mapping, file,
+ folio_pos(folio), folio_size(folio),
+ NETFS_READ_FOR_WRITE);
+ if (IS_ERR(rreq)) {
+ ret = PTR_ERR(rreq);
+ goto error;
+ }
+ rreq->no_unlock_folio = folio->index;
+ __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
+
+ ret = netfs_begin_cache_read(rreq, ctx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto error_put;
+
+ netfs_stat(&netfs_n_rh_write_begin);
+ trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
+
+ /* Set up the output buffer */
+ ret = netfs_create_singular_buffer(rreq, folio, 0);
+ if (ret < 0)
+ goto error_put;
+
+ netfs_read_to_pagecache(rreq, NULL);
+ ret = netfs_wait_for_read(rreq);
+ if (ret < 0)
+ goto error;
+ netfs_put_request(rreq, netfs_rreq_trace_put_return);
+
+have_folio:
+ ret = folio_wait_private_2_killable(folio);
+ if (ret < 0)
+ goto error;
+have_folio_no_wait:
+ *_folio = folio;
+ _leave(" = 0");
+ return 0;
+
+error_put:
+ netfs_put_failed_request(rreq);
+error:
+ if (folio) {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+ _leave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_write_begin);
+
+/*
+ * Preload the data into a folio we're proposing to write into.
+ */
+int netfs_prefetch_for_write(struct file *file, struct folio *folio,
+ size_t offset, size_t len)
+{
+ struct netfs_io_request *rreq;
+ struct address_space *mapping = folio->mapping;
+ struct netfs_inode *ctx = netfs_inode(mapping->host);
+ unsigned long long start = folio_pos(folio);
+ size_t flen = folio_size(folio);
+ int ret;
+
+ _enter("%zx @%llx", flen, start);
+
+ ret = -ENOMEM;
+
+ rreq = netfs_alloc_request(mapping, file, start, flen,
+ NETFS_READ_FOR_WRITE);
+ if (IS_ERR(rreq)) {
+ ret = PTR_ERR(rreq);
+ goto error;
+ }
+
+ rreq->no_unlock_folio = folio->index;
+ __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
+ ret = netfs_begin_cache_read(rreq, ctx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto error_put;
+
+ netfs_stat(&netfs_n_rh_write_begin);
+ trace_netfs_read(rreq, start, flen, netfs_read_trace_prefetch_for_write);
+
+ /* Set up the output buffer */
+ ret = netfs_create_singular_buffer(rreq, folio, NETFS_ROLLBUF_PAGECACHE_MARK);
+ if (ret < 0)
+ goto error_put;
+
+ netfs_read_to_pagecache(rreq, NULL);
+ ret = netfs_wait_for_read(rreq);
+ netfs_put_request(rreq, netfs_rreq_trace_put_return);
+ return ret < 0 ? ret : 0;
+
+error_put:
+ netfs_put_failed_request(rreq);
+error:
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/**
+ * netfs_buffered_read_iter - Filesystem buffered I/O read routine
+ * @iocb: kernel I/O control block
+ * @iter: destination for the data read
+ *
+ * This is the ->read_iter() routine for all filesystems that can use the page
+ * cache directly.
+ *
+ * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
+ * returned when no data can be read without waiting for I/O requests to
+ * complete; it doesn't prevent readahead.
+ *
+ * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
+ * shall be made for the read or for readahead. When no data can be read,
+ * -EAGAIN shall be returned. When readahead would be triggered, a partial,
+ * possibly empty read shall be returned.
+ *
+ * Return:
+ * * number of bytes copied, even for partial reads
+ * * negative error code (or 0 if IOCB_NOIO) if nothing was read
+ */
+ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct netfs_inode *ictx = netfs_inode(inode);
+ ssize_t ret;
+
+ if (WARN_ON_ONCE((iocb->ki_flags & IOCB_DIRECT) ||
+ test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags)))
+ return -EINVAL;
+
+ ret = netfs_start_io_read(inode);
+ if (ret == 0) {
+ ret = filemap_read(iocb, iter, 0);
+ netfs_end_io_read(inode);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(netfs_buffered_read_iter);
+
+/**
+ * netfs_file_read_iter - Generic filesystem read routine
+ * @iocb: kernel I/O control block
+ * @iter: destination for the data read
+ *
+ * This is the ->read_iter() routine for all filesystems that can use the page
+ * cache directly.
+ *
+ * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
+ * returned when no data can be read without waiting for I/O requests to
+ * complete; it doesn't prevent readahead.
+ *
+ * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
+ * shall be made for the read or for readahead. When no data can be read,
+ * -EAGAIN shall be returned. When readahead would be triggered, a partial,
+ * possibly empty read shall be returned.
+ *
+ * Return:
+ * * number of bytes copied, even for partial reads
+ * * negative error code (or 0 if IOCB_NOIO) if nothing was read
+ */
+ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct netfs_inode *ictx = netfs_inode(iocb->ki_filp->f_mapping->host);
+
+ if ((iocb->ki_flags & IOCB_DIRECT) ||
+ test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
+ return netfs_unbuffered_read_iter(iocb, iter);
+
+ return netfs_buffered_read_iter(iocb, iter);
+}
+EXPORT_SYMBOL(netfs_file_read_iter);
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
new file mode 100644
index 000000000000..f9d62abef2ac
--- /dev/null
+++ b/fs/netfs/buffered_write.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem high-level buffered write support.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/pagevec.h>
+#include "internal.h"
+
+static void __netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
+{
+ if (netfs_group)
+ folio_attach_private(folio, netfs_get_group(netfs_group));
+}
+
+static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
+{
+ void *priv = folio_get_private(folio);
+
+ if (unlikely(priv != netfs_group)) {
+ if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE))
+ folio_attach_private(folio, netfs_get_group(netfs_group));
+ else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE)
+ folio_detach_private(folio);
+ }
+}
+
+/*
+ * Grab a folio for writing and lock it. Attempt to allocate as large a folio
+ * as possible to hold as much of the remaining length as possible in one go.
+ */
+static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
+ loff_t pos, size_t part)
+{
+ pgoff_t index = pos / PAGE_SIZE;
+ fgf_t fgp_flags = FGP_WRITEBEGIN;
+
+ if (mapping_large_folio_support(mapping))
+ fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part);
+
+ return __filemap_get_folio(mapping, index, fgp_flags,
+ mapping_gfp_mask(mapping));
+}
+
+/*
+ * Update i_size and estimate the update to i_blocks to reflect the additional
+ * data written into the pagecache until we can find out from the server what
+ * the values actually are.
+ */
+void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
+ loff_t pos, size_t copied)
+{
+ loff_t i_size, end = pos + copied;
+ blkcnt_t add;
+ size_t gap;
+
+ if (end <= i_size_read(inode))
+ return;
+
+ if (ctx->ops->update_i_size) {
+ ctx->ops->update_i_size(inode, end);
+ return;
+ }
+
+ spin_lock(&inode->i_lock);
+
+ i_size = i_size_read(inode);
+ if (end > i_size) {
+ i_size_write(inode, end);
+#if IS_ENABLED(CONFIG_FSCACHE)
+ fscache_update_cookie(ctx->cache, NULL, &end);
+#endif
+
+ gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1));
+ if (copied > gap) {
+ add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE);
+
+ inode->i_blocks = min_t(blkcnt_t,
+ DIV_ROUND_UP(end, SECTOR_SIZE),
+ inode->i_blocks + add);
+ }
+ }
+ spin_unlock(&inode->i_lock);
+}
+
+/**
+ * netfs_perform_write - Copy data into the pagecache.
+ * @iocb: The operation parameters
+ * @iter: The source buffer
+ * @netfs_group: Grouping for dirty folios (eg. ceph snaps).
+ *
+ * Copy data into pagecache folios attached to the inode specified by @iocb.
+ * The caller must hold appropriate inode locks.
+ *
+ * Dirty folios are tagged with a netfs_folio struct if they're not up to date
+ * to indicate the range modified. Dirty folios may also be tagged with a
+ * netfs-specific grouping such that data from an old group gets flushed before
+ * a new one is started.
+ */
+ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
+ struct netfs_group *netfs_group)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct address_space *mapping = inode->i_mapping;
+ struct netfs_inode *ctx = netfs_inode(inode);
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .for_sync = true,
+ .nr_to_write = LONG_MAX,
+ .range_start = iocb->ki_pos,
+ .range_end = iocb->ki_pos + iter->count,
+ };
+ struct netfs_io_request *wreq = NULL;
+ struct folio *folio = NULL, *writethrough = NULL;
+ unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
+ ssize_t written = 0, ret, ret2;
+ loff_t pos = iocb->ki_pos;
+ size_t max_chunk = mapping_max_folio_size(mapping);
+ bool maybe_trouble = false;
+
+ if (unlikely(iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
+ ) {
+ wbc_attach_fdatawrite_inode(&wbc, mapping->host);
+
+ ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
+ if (ret < 0) {
+ wbc_detach_inode(&wbc);
+ goto out;
+ }
+
+ wreq = netfs_begin_writethrough(iocb, iter->count);
+ if (IS_ERR(wreq)) {
+ wbc_detach_inode(&wbc);
+ ret = PTR_ERR(wreq);
+ wreq = NULL;
+ goto out;
+ }
+ if (!is_sync_kiocb(iocb))
+ wreq->iocb = iocb;
+ netfs_stat(&netfs_n_wh_writethrough);
+ } else {
+ netfs_stat(&netfs_n_wh_buffered_write);
+ }
+
+ do {
+ struct netfs_folio *finfo;
+ struct netfs_group *group;
+ unsigned long long fpos;
+ size_t flen;
+ size_t offset; /* Offset into pagecache folio */
+ size_t part; /* Bytes to write to folio */
+ size_t copied; /* Bytes copied from user */
+
+ offset = pos & (max_chunk - 1);
+ part = min(max_chunk - offset, iov_iter_count(iter));
+
+ /* Bring in the user pages that we will copy from _first_ lest
+ * we hit a nasty deadlock on copying from the same page as
+ * we're writing to, without it being marked uptodate.
+ *
+ * Not only is this an optimisation, but it is also required to
+ * check that the address is actually valid, when atomic
+ * usercopies are used below.
+ *
+ * We rely on the page being held onto long enough by the LRU
+ * that we can grab it below if this causes it to be read.
+ */
+ ret = -EFAULT;
+ if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
+ break;
+
+ folio = netfs_grab_folio_for_write(mapping, pos, part);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
+ break;
+ }
+
+ flen = folio_size(folio);
+ fpos = folio_pos(folio);
+ offset = pos - fpos;
+ part = min_t(size_t, flen - offset, part);
+
+ /* Wait for writeback to complete. The writeback engine owns
+ * the info in folio->private and may change it until it
+ * removes the WB mark.
+ */
+ if (folio_get_private(folio) &&
+ folio_wait_writeback_killable(folio)) {
+ ret = written ? -EINTR : -ERESTARTSYS;
+ goto error_folio_unlock;
+ }
+
+ if (signal_pending(current)) {
+ ret = written ? -EINTR : -ERESTARTSYS;
+ goto error_folio_unlock;
+ }
+
+ /* Decide how we should modify a folio. We might be attempting
+ * to do write-streaming, in which case we don't want to a
+ * local RMW cycle if we can avoid it. If we're doing local
+ * caching or content crypto, we award that priority over
+ * avoiding RMW. If the file is open readably, then we also
+ * assume that we may want to read what we wrote.
+ */
+ finfo = netfs_folio_info(folio);
+ group = netfs_folio_group(folio);
+
+ if (unlikely(group != netfs_group) &&
+ group != NETFS_FOLIO_COPY_TO_CACHE)
+ goto flush_content;
+
+ if (folio_test_uptodate(folio)) {
+ if (mapping_writably_mapped(mapping))
+ flush_dcache_folio(folio);
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+ if (unlikely(copied == 0))
+ goto copy_failed;
+ netfs_set_group(folio, netfs_group);
+ trace_netfs_folio(folio, netfs_folio_is_uptodate);
+ goto copied;
+ }
+
+ /* If the page is above the zero-point then we assume that the
+ * server would just return a block of zeros or a short read if
+ * we try to read it.
+ */
+ if (fpos >= ctx->zero_point) {
+ folio_zero_segment(folio, 0, offset);
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+ if (unlikely(copied == 0))
+ goto copy_failed;
+ folio_zero_segment(folio, offset + copied, flen);
+ __netfs_set_group(folio, netfs_group);
+ folio_mark_uptodate(folio);
+ trace_netfs_folio(folio, netfs_modify_and_clear);
+ goto copied;
+ }
+
+ /* See if we can write a whole folio in one go. */
+ if (!maybe_trouble && offset == 0 && part >= flen) {
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+ if (unlikely(copied == 0))
+ goto copy_failed;
+ if (unlikely(copied < part)) {
+ maybe_trouble = true;
+ iov_iter_revert(iter, copied);
+ copied = 0;
+ folio_unlock(folio);
+ goto retry;
+ }
+ __netfs_set_group(folio, netfs_group);
+ folio_mark_uptodate(folio);
+ trace_netfs_folio(folio, netfs_whole_folio_modify);
+ goto copied;
+ }
+
+ /* We don't want to do a streaming write on a file that loses
+ * caching service temporarily because the backing store got
+ * culled and we don't really want to get a streaming write on
+ * a file that's open for reading as ->read_folio() then has to
+ * be able to flush it.
+ */
+ if ((file->f_mode & FMODE_READ) ||
+ netfs_is_cache_enabled(ctx)) {
+ if (finfo) {
+ netfs_stat(&netfs_n_wh_wstream_conflict);
+ goto flush_content;
+ }
+ ret = netfs_prefetch_for_write(file, folio, offset, part);
+ if (ret < 0) {
+ _debug("prefetch = %zd", ret);
+ goto error_folio_unlock;
+ }
+ /* Note that copy-to-cache may have been set. */
+
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+ if (unlikely(copied == 0))
+ goto copy_failed;
+ netfs_set_group(folio, netfs_group);
+ trace_netfs_folio(folio, netfs_just_prefetch);
+ goto copied;
+ }
+
+ if (!finfo) {
+ ret = -EIO;
+ if (WARN_ON(folio_get_private(folio)))
+ goto error_folio_unlock;
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+ if (unlikely(copied == 0))
+ goto copy_failed;
+ if (offset == 0 && copied == flen) {
+ __netfs_set_group(folio, netfs_group);
+ folio_mark_uptodate(folio);
+ trace_netfs_folio(folio, netfs_streaming_filled_page);
+ goto copied;
+ }
+
+ finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
+ if (!finfo) {
+ iov_iter_revert(iter, copied);
+ ret = -ENOMEM;
+ goto error_folio_unlock;
+ }
+ finfo->netfs_group = netfs_get_group(netfs_group);
+ finfo->dirty_offset = offset;
+ finfo->dirty_len = copied;
+ folio_attach_private(folio, (void *)((unsigned long)finfo |
+ NETFS_FOLIO_INFO));
+ trace_netfs_folio(folio, netfs_streaming_write);
+ goto copied;
+ }
+
+ /* We can continue a streaming write only if it continues on
+ * from the previous. If it overlaps, we must flush lest we
+ * suffer a partial copy and disjoint dirty regions.
+ */
+ if (offset == finfo->dirty_offset + finfo->dirty_len) {
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+ if (unlikely(copied == 0))
+ goto copy_failed;
+ finfo->dirty_len += copied;
+ if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
+ if (finfo->netfs_group)
+ folio_change_private(folio, finfo->netfs_group);
+ else
+ folio_detach_private(folio);
+ folio_mark_uptodate(folio);
+ kfree(finfo);
+ trace_netfs_folio(folio, netfs_streaming_cont_filled_page);
+ } else {
+ trace_netfs_folio(folio, netfs_streaming_write_cont);
+ }
+ goto copied;
+ }
+
+ /* Incompatible write; flush the folio and try again. */
+ flush_content:
+ trace_netfs_folio(folio, netfs_flush_content);
+ folio_unlock(folio);
+ folio_put(folio);
+ ret = filemap_write_and_wait_range(mapping, fpos, fpos + flen - 1);
+ if (ret < 0)
+ goto out;
+ continue;
+
+ copied:
+ flush_dcache_folio(folio);
+
+ /* Update the inode size if we moved the EOF marker */
+ netfs_update_i_size(ctx, inode, pos, copied);
+ pos += copied;
+ written += copied;
+
+ if (likely(!wreq)) {
+ folio_mark_dirty(folio);
+ folio_unlock(folio);
+ } else {
+ netfs_advance_writethrough(wreq, &wbc, folio, copied,
+ offset + copied == flen,
+ &writethrough);
+ /* Folio unlocked */
+ }
+ retry:
+ folio_put(folio);
+ folio = NULL;
+
+ ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
+ if (unlikely(ret < 0))
+ break;
+
+ cond_resched();
+ } while (iov_iter_count(iter));
+
+out:
+ if (likely(written)) {
+ /* Set indication that ctime and mtime got updated in case
+ * close is deferred.
+ */
+ set_bit(NETFS_ICTX_MODIFIED_ATTR, &ctx->flags);
+ if (unlikely(ctx->ops->post_modify))
+ ctx->ops->post_modify(inode);
+ }
+
+ if (unlikely(wreq)) {
+ ret2 = netfs_end_writethrough(wreq, &wbc, writethrough);
+ wbc_detach_inode(&wbc);
+ if (ret2 == -EIOCBQUEUED)
+ return ret2;
+ if (ret == 0 && ret2 < 0)
+ ret = ret2;
+ }
+
+ iocb->ki_pos += written;
+ _leave(" = %zd [%zd]", written, ret);
+ return written ? written : ret;
+
+copy_failed:
+ ret = -EFAULT;
+error_folio_unlock:
+ folio_unlock(folio);
+ folio_put(folio);
+ goto out;
+}
+EXPORT_SYMBOL(netfs_perform_write);
+
+/**
+ * netfs_buffered_write_iter_locked - write data to a file
+ * @iocb: IO state structure (file, offset, etc.)
+ * @from: iov_iter with data to write
+ * @netfs_group: Grouping for dirty folios (eg. ceph snaps).
+ *
+ * This function does all the work needed for actually writing data to a
+ * file. It does all basic checks, removes SUID from the file, updates
+ * modification times and calls proper subroutines depending on whether we
+ * do direct IO or a standard buffered write.
+ *
+ * The caller must hold appropriate locks around this function and have called
+ * generic_write_checks() already. The caller is also responsible for doing
+ * any necessary syncing afterwards.
+ *
+ * This function does *not* take care of syncing data in case of O_SYNC write.
+ * A caller has to handle it. This is mainly due to the fact that we want to
+ * avoid syncing under i_rwsem.
+ *
+ * Return:
+ * * number of bytes written, even for truncated writes
+ * * negative error code if no data has been written at all
+ */
+ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
+ struct netfs_group *netfs_group)
+{
+ struct file *file = iocb->ki_filp;
+ ssize_t ret;
+
+ trace_netfs_write_iter(iocb, from);
+
+ ret = file_remove_privs(file);
+ if (ret)
+ return ret;
+
+ ret = file_update_time(file);
+ if (ret)
+ return ret;
+
+ return netfs_perform_write(iocb, from, netfs_group);
+}
+EXPORT_SYMBOL(netfs_buffered_write_iter_locked);
+
+/**
+ * netfs_file_write_iter - write data to a file
+ * @iocb: IO state structure
+ * @from: iov_iter with data to write
+ *
+ * Perform a write to a file, writing into the pagecache if possible and doing
+ * an unbuffered write instead if not.
+ *
+ * Return:
+ * * Negative error code if no data has been written at all of
+ * vfs_fsync_range() failed for a synchronous write
+ * * Number of bytes written, even for truncated writes
+ */
+ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ struct netfs_inode *ictx = netfs_inode(inode);
+ ssize_t ret;
+
+ _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
+
+ if (!iov_iter_count(from))
+ return 0;
+
+ if ((iocb->ki_flags & IOCB_DIRECT) ||
+ test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
+ return netfs_unbuffered_write_iter(iocb, from);
+
+ ret = netfs_start_io_write(inode);
+ if (ret < 0)
+ return ret;
+
+ ret = generic_write_checks(iocb, from);
+ if (ret > 0)
+ ret = netfs_buffered_write_iter_locked(iocb, from, NULL);
+ netfs_end_io_write(inode);
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_file_write_iter);
+
+/*
+ * Notification that a previously read-only page is about to become writable.
+ * The caller indicates the precise page that needs to be written to, but
+ * we only track group on a per-folio basis, so we block more often than
+ * we might otherwise.
+ */
+vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
+{
+ struct netfs_group *group;
+ struct folio *folio = page_folio(vmf->page);
+ struct file *file = vmf->vma->vm_file;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = file_inode(file);
+ struct netfs_inode *ictx = netfs_inode(inode);
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ int err;
+
+ _enter("%lx", folio->index);
+
+ sb_start_pagefault(inode->i_sb);
+
+ if (folio_lock_killable(folio) < 0)
+ goto out;
+ if (folio->mapping != mapping)
+ goto unlock;
+ if (folio_wait_writeback_killable(folio) < 0)
+ goto unlock;
+
+ /* Can we see a streaming write here? */
+ if (WARN_ON(!folio_test_uptodate(folio))) {
+ ret = VM_FAULT_SIGBUS;
+ goto unlock;
+ }
+
+ group = netfs_folio_group(folio);
+ if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) {
+ folio_unlock(folio);
+ err = filemap_fdatawrite_range(mapping,
+ folio_pos(folio),
+ folio_next_pos(folio));
+ switch (err) {
+ case 0:
+ ret = VM_FAULT_RETRY;
+ goto out;
+ case -ENOMEM:
+ ret = VM_FAULT_OOM;
+ goto out;
+ default:
+ ret = VM_FAULT_SIGBUS;
+ goto out;
+ }
+ }
+
+ if (folio_test_dirty(folio))
+ trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
+ else
+ trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
+ netfs_set_group(folio, netfs_group);
+ file_update_time(file);
+ set_bit(NETFS_ICTX_MODIFIED_ATTR, &ictx->flags);
+ if (ictx->ops->post_modify)
+ ictx->ops->post_modify(inode);
+ ret = VM_FAULT_LOCKED;
+out:
+ sb_end_pagefault(inode->i_sb);
+ return ret;
+unlock:
+ folio_unlock(folio);
+ goto out;
+}
+EXPORT_SYMBOL(netfs_page_mkwrite);
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
new file mode 100644
index 000000000000..a498ee8d6674
--- /dev/null
+++ b/fs/netfs/direct_read.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Direct I/O support.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/sched/mm.h>
+#include <linux/task_io_accounting_ops.h>
+#include <linux/netfs.h>
+#include "internal.h"
+
+static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ size_t rsize;
+
+ rsize = umin(subreq->len, rreq->io_streams[0].sreq_max_len);
+ subreq->len = rsize;
+
+ if (unlikely(rreq->io_streams[0].sreq_max_segs)) {
+ size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize,
+ rreq->io_streams[0].sreq_max_segs);
+
+ if (limit < rsize) {
+ subreq->len = limit;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_limited);
+ }
+ }
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+
+ subreq->io_iter = rreq->buffer.iter;
+ iov_iter_truncate(&subreq->io_iter, subreq->len);
+ iov_iter_advance(&rreq->buffer.iter, subreq->len);
+}
+
+/*
+ * Perform a read to a buffer from the server, slicing up the region to be read
+ * according to the network rsize.
+ */
+static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
+{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ unsigned long long start = rreq->start;
+ ssize_t size = rreq->len;
+ int ret = 0;
+
+ do {
+ struct netfs_io_subrequest *subreq;
+ ssize_t slice;
+
+ subreq = netfs_alloc_subrequest(rreq);
+ if (!subreq) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ subreq->start = start;
+ subreq->len = size;
+
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+
+ spin_lock(&rreq->lock);
+ list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+ stream->front = subreq;
+ if (!stream->active) {
+ stream->collected_to = stream->front->start;
+ /* Store list pointers before active flag */
+ smp_store_release(&stream->active, true);
+ }
+ }
+ trace_netfs_sreq(subreq, netfs_sreq_trace_added);
+ spin_unlock(&rreq->lock);
+
+ netfs_stat(&netfs_n_rh_download);
+ if (rreq->netfs_ops->prepare_read) {
+ ret = rreq->netfs_ops->prepare_read(subreq);
+ if (ret < 0) {
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
+ break;
+ }
+ }
+
+ netfs_prepare_dio_read_iterator(subreq);
+ slice = subreq->len;
+ size -= slice;
+ start += slice;
+ rreq->submitted += slice;
+ if (size <= 0) {
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ }
+
+ rreq->netfs_ops->issue_read(subreq);
+
+ if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
+ netfs_wait_for_paused_read(rreq);
+ if (test_bit(NETFS_RREQ_FAILED, &rreq->flags))
+ break;
+ cond_resched();
+ } while (size > 0);
+
+ if (unlikely(size > 0)) {
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ netfs_wake_collector(rreq);
+ }
+
+ return ret;
+}
+
+/*
+ * Perform a read to an application buffer, bypassing the pagecache and the
+ * local disk cache.
+ */
+static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
+{
+ ssize_t ret;
+
+ _enter("R=%x %llx-%llx",
+ rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
+
+ if (rreq->len == 0) {
+ pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
+ netfs_put_request(rreq, netfs_rreq_trace_put_discard);
+ return -EIO;
+ }
+
+ // TODO: Use bounce buffer if requested
+
+ inode_dio_begin(rreq->inode);
+
+ ret = netfs_dispatch_unbuffered_reads(rreq);
+
+ if (!rreq->submitted) {
+ netfs_put_request(rreq, netfs_rreq_trace_put_no_submit);
+ inode_dio_end(rreq->inode);
+ ret = 0;
+ goto out;
+ }
+
+ if (sync)
+ ret = netfs_wait_for_read(rreq);
+ else
+ ret = -EIOCBQUEUED;
+out:
+ _leave(" = %zd", ret);
+ return ret;
+}
+
+/**
+ * netfs_unbuffered_read_iter_locked - Perform an unbuffered or direct I/O read
+ * @iocb: The I/O control descriptor describing the read
+ * @iter: The output buffer (also specifies read length)
+ *
+ * Perform an unbuffered I/O or direct I/O from the file in @iocb to the
+ * output buffer. No use is made of the pagecache.
+ *
+ * The caller must hold any appropriate locks.
+ */
+ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct netfs_io_request *rreq;
+ ssize_t ret;
+ size_t orig_count = iov_iter_count(iter);
+ bool sync = is_sync_kiocb(iocb);
+
+ _enter("");
+
+ if (!orig_count)
+ return 0; /* Don't update atime */
+
+ ret = kiocb_write_and_wait(iocb, orig_count);
+ if (ret < 0)
+ return ret;
+ file_accessed(iocb->ki_filp);
+
+ rreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
+ iocb->ki_pos, orig_count,
+ iocb->ki_flags & IOCB_DIRECT ?
+ NETFS_DIO_READ : NETFS_UNBUFFERED_READ);
+ if (IS_ERR(rreq))
+ return PTR_ERR(rreq);
+
+ netfs_stat(&netfs_n_rh_dio_read);
+ trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_dio_read);
+
+ /* If this is an async op, we have to keep track of the destination
+ * buffer for ourselves as the caller's iterator will be trashed when
+ * we return.
+ *
+ * In such a case, extract an iterator to represent as much of the the
+ * output buffer as we can manage. Note that the extraction might not
+ * be able to allocate a sufficiently large bvec array and may shorten
+ * the request.
+ */
+ if (user_backed_iter(iter)) {
+ ret = netfs_extract_user_iter(iter, rreq->len, &rreq->buffer.iter, 0);
+ if (ret < 0)
+ goto error_put;
+ rreq->direct_bv = (struct bio_vec *)rreq->buffer.iter.bvec;
+ rreq->direct_bv_count = ret;
+ rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
+ rreq->len = iov_iter_count(&rreq->buffer.iter);
+ } else {
+ rreq->buffer.iter = *iter;
+ rreq->len = orig_count;
+ rreq->direct_bv_unpin = false;
+ iov_iter_advance(iter, orig_count);
+ }
+
+ // TODO: Set up bounce buffer if needed
+
+ if (!sync) {
+ rreq->iocb = iocb;
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags);
+ }
+
+ ret = netfs_unbuffered_read(rreq, sync);
+ if (ret < 0)
+ goto out; /* May be -EIOCBQUEUED */
+ if (sync) {
+ // TODO: Copy from bounce buffer
+ iocb->ki_pos += rreq->transferred;
+ ret = rreq->transferred;
+ }
+
+out:
+ netfs_put_request(rreq, netfs_rreq_trace_put_return);
+ if (ret > 0)
+ orig_count -= ret;
+ return ret;
+
+error_put:
+ netfs_put_failed_request(rreq);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_unbuffered_read_iter_locked);
+
+/**
+ * netfs_unbuffered_read_iter - Perform an unbuffered or direct I/O read
+ * @iocb: The I/O control descriptor describing the read
+ * @iter: The output buffer (also specifies read length)
+ *
+ * Perform an unbuffered I/O or direct I/O from the file in @iocb to the
+ * output buffer. No use is made of the pagecache.
+ */
+ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ ssize_t ret;
+
+ if (!iter->count)
+ return 0; /* Don't update atime */
+
+ ret = netfs_start_io_direct(inode);
+ if (ret == 0) {
+ ret = netfs_unbuffered_read_iter_locked(iocb, iter);
+ netfs_end_io_direct(inode);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(netfs_unbuffered_read_iter);
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
new file mode 100644
index 000000000000..a9d1c3b2c084
--- /dev/null
+++ b/fs/netfs/direct_write.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Unbuffered and direct write support.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/uio.h>
+#include "internal.h"
+
+/*
+ * Perform an unbuffered write where we may have to do an RMW operation on an
+ * encrypted file. This can also be used for direct I/O writes.
+ */
+ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter,
+ struct netfs_group *netfs_group)
+{
+ struct netfs_io_request *wreq;
+ unsigned long long start = iocb->ki_pos;
+ unsigned long long end = start + iov_iter_count(iter);
+ ssize_t ret, n;
+ size_t len = iov_iter_count(iter);
+ bool async = !is_sync_kiocb(iocb);
+
+ _enter("");
+
+ /* We're going to need a bounce buffer if what we transmit is going to
+ * be different in some way to the source buffer, e.g. because it gets
+ * encrypted/compressed or because it needs expanding to a block size.
+ */
+ // TODO
+
+ _debug("uw %llx-%llx", start, end);
+
+ wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
+ iocb->ki_flags & IOCB_DIRECT ?
+ NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
+ if (IS_ERR(wreq))
+ return PTR_ERR(wreq);
+
+ wreq->io_streams[0].avail = true;
+ trace_netfs_write(wreq, (iocb->ki_flags & IOCB_DIRECT ?
+ netfs_write_trace_dio_write :
+ netfs_write_trace_unbuffered_write));
+
+ {
+ /* If this is an async op and we're not using a bounce buffer,
+ * we have to save the source buffer as the iterator is only
+ * good until we return. In such a case, extract an iterator
+ * to represent as much of the the output buffer as we can
+ * manage. Note that the extraction might not be able to
+ * allocate a sufficiently large bvec array and may shorten the
+ * request.
+ */
+ if (user_backed_iter(iter)) {
+ n = netfs_extract_user_iter(iter, len, &wreq->buffer.iter, 0);
+ if (n < 0) {
+ ret = n;
+ goto error_put;
+ }
+ wreq->direct_bv = (struct bio_vec *)wreq->buffer.iter.bvec;
+ wreq->direct_bv_count = n;
+ wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
+ } else {
+ /* If this is a kernel-generated async DIO request,
+ * assume that any resources the iterator points to
+ * (eg. a bio_vec array) will persist till the end of
+ * the op.
+ */
+ wreq->buffer.iter = *iter;
+ }
+ }
+
+ __set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
+ if (async)
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
+
+ /* Copy the data into the bounce buffer and encrypt it. */
+ // TODO
+
+ /* Dispatch the write. */
+ __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+ if (async)
+ wreq->iocb = iocb;
+ wreq->len = iov_iter_count(&wreq->buffer.iter);
+ ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
+ if (ret < 0) {
+ _debug("begin = %zd", ret);
+ goto out;
+ }
+
+ if (!async) {
+ ret = netfs_wait_for_write(wreq);
+ if (ret > 0)
+ iocb->ki_pos += ret;
+ } else {
+ ret = -EIOCBQUEUED;
+ }
+
+out:
+ netfs_put_request(wreq, netfs_rreq_trace_put_return);
+ return ret;
+
+error_put:
+ netfs_put_failed_request(wreq);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_unbuffered_write_iter_locked);
+
+/**
+ * netfs_unbuffered_write_iter - Unbuffered write to a file
+ * @iocb: IO state structure
+ * @from: iov_iter with data to write
+ *
+ * Do an unbuffered write to a file, writing the data directly to the server
+ * and not lodging the data in the pagecache.
+ *
+ * Return:
+ * * Negative error code if no data has been written at all of
+ * vfs_fsync_range() failed for a synchronous write
+ * * Number of bytes written, even for truncated writes
+ */
+ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
+ struct netfs_inode *ictx = netfs_inode(inode);
+ ssize_t ret;
+ loff_t pos = iocb->ki_pos;
+ unsigned long long end = pos + iov_iter_count(from) - 1;
+
+ _enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
+
+ if (!iov_iter_count(from))
+ return 0;
+
+ trace_netfs_write_iter(iocb, from);
+ netfs_stat(&netfs_n_wh_dio_write);
+
+ ret = netfs_start_io_direct(inode);
+ if (ret < 0)
+ return ret;
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto out;
+ ret = file_remove_privs(file);
+ if (ret < 0)
+ goto out;
+ ret = file_update_time(file);
+ if (ret < 0)
+ goto out;
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ /* We could block if there are any pages in the range. */
+ ret = -EAGAIN;
+ if (filemap_range_has_page(mapping, pos, end))
+ if (filemap_invalidate_inode(inode, true, pos, end))
+ goto out;
+ } else {
+ ret = filemap_write_and_wait_range(mapping, pos, end);
+ if (ret < 0)
+ goto out;
+ }
+
+ /*
+ * After a write we want buffered reads to be sure to go to disk to get
+ * the new data. We invalidate clean cached page from the region we're
+ * about to write. We do this *before* the write so that we can return
+ * without clobbering -EIOCBQUEUED from ->direct_IO().
+ */
+ ret = filemap_invalidate_inode(inode, true, pos, end);
+ if (ret < 0)
+ goto out;
+ end = iocb->ki_pos + iov_iter_count(from);
+ if (end > ictx->zero_point)
+ ictx->zero_point = end;
+
+ fscache_invalidate(netfs_i_cookie(ictx), NULL, i_size_read(inode),
+ FSCACHE_INVAL_DIO_WRITE);
+ ret = netfs_unbuffered_write_iter_locked(iocb, from, NULL);
+out:
+ netfs_end_io_direct(inode);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_unbuffered_write_iter);
diff --git a/fs/netfs/fscache_cache.c b/fs/netfs/fscache_cache.c
new file mode 100644
index 000000000000..8f70f8da064b
--- /dev/null
+++ b/fs/netfs/fscache_cache.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* FS-Cache cache handling
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define FSCACHE_DEBUG_LEVEL CACHE
+#include <linux/export.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+static LIST_HEAD(fscache_caches);
+DECLARE_RWSEM(fscache_addremove_sem);
+EXPORT_SYMBOL(fscache_addremove_sem);
+DECLARE_WAIT_QUEUE_HEAD(fscache_clearance_waiters);
+EXPORT_SYMBOL(fscache_clearance_waiters);
+
+static atomic_t fscache_cache_debug_id;
+
+/*
+ * Allocate a cache cookie.
+ */
+static struct fscache_cache *fscache_alloc_cache(const char *name)
+{
+ struct fscache_cache *cache;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ if (cache) {
+ if (name) {
+ cache->name = kstrdup(name, GFP_KERNEL);
+ if (!cache->name) {
+ kfree(cache);
+ return NULL;
+ }
+ }
+ refcount_set(&cache->ref, 1);
+ INIT_LIST_HEAD(&cache->cache_link);
+ cache->debug_id = atomic_inc_return(&fscache_cache_debug_id);
+ }
+ return cache;
+}
+
+static bool fscache_get_cache_maybe(struct fscache_cache *cache,
+ enum fscache_cache_trace where)
+{
+ bool success;
+ int ref;
+
+ success = __refcount_inc_not_zero(&cache->ref, &ref);
+ if (success)
+ trace_fscache_cache(cache->debug_id, ref + 1, where);
+ return success;
+}
+
+/*
+ * Look up a cache cookie.
+ */
+struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache)
+{
+ struct fscache_cache *candidate, *cache, *unnamed = NULL;
+
+ /* firstly check for the existence of the cache under read lock */
+ down_read(&fscache_addremove_sem);
+
+ list_for_each_entry(cache, &fscache_caches, cache_link) {
+ if (cache->name && name && strcmp(cache->name, name) == 0 &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_r;
+ if (!cache->name && !name &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_r;
+ }
+
+ if (!name) {
+ list_for_each_entry(cache, &fscache_caches, cache_link) {
+ if (cache->name &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_r;
+ }
+ }
+
+ up_read(&fscache_addremove_sem);
+
+ /* the cache does not exist - create a candidate */
+ candidate = fscache_alloc_cache(name);
+ if (!candidate)
+ return ERR_PTR(-ENOMEM);
+
+ /* write lock, search again and add if still not present */
+ down_write(&fscache_addremove_sem);
+
+ list_for_each_entry(cache, &fscache_caches, cache_link) {
+ if (cache->name && name && strcmp(cache->name, name) == 0 &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_w;
+ if (!cache->name) {
+ unnamed = cache;
+ if (!name &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_w;
+ }
+ }
+
+ if (unnamed && is_cache &&
+ fscache_get_cache_maybe(unnamed, fscache_cache_get_acquire))
+ goto use_unnamed_cache;
+
+ if (!name) {
+ list_for_each_entry(cache, &fscache_caches, cache_link) {
+ if (cache->name &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_w;
+ }
+ }
+
+ list_add_tail(&candidate->cache_link, &fscache_caches);
+ trace_fscache_cache(candidate->debug_id,
+ refcount_read(&candidate->ref),
+ fscache_cache_new_acquire);
+ up_write(&fscache_addremove_sem);
+ return candidate;
+
+got_cache_r:
+ up_read(&fscache_addremove_sem);
+ return cache;
+use_unnamed_cache:
+ cache = unnamed;
+ cache->name = candidate->name;
+ candidate->name = NULL;
+got_cache_w:
+ up_write(&fscache_addremove_sem);
+ kfree(candidate->name);
+ kfree(candidate);
+ return cache;
+}
+
+/**
+ * fscache_acquire_cache - Acquire a cache-level cookie.
+ * @name: The name of the cache.
+ *
+ * Get a cookie to represent an actual cache. If a name is given and there is
+ * a nameless cache record available, this will acquire that and set its name,
+ * directing all the volumes using it to this cache.
+ *
+ * The cache will be switched over to the preparing state if not currently in
+ * use, otherwise -EBUSY will be returned.
+ */
+struct fscache_cache *fscache_acquire_cache(const char *name)
+{
+ struct fscache_cache *cache;
+
+ ASSERT(name);
+ cache = fscache_lookup_cache(name, true);
+ if (IS_ERR(cache))
+ return cache;
+
+ if (!fscache_set_cache_state_maybe(cache,
+ FSCACHE_CACHE_IS_NOT_PRESENT,
+ FSCACHE_CACHE_IS_PREPARING)) {
+ pr_warn("Cache tag %s in use\n", name);
+ fscache_put_cache(cache, fscache_cache_put_cache);
+ return ERR_PTR(-EBUSY);
+ }
+
+ return cache;
+}
+EXPORT_SYMBOL(fscache_acquire_cache);
+
+/**
+ * fscache_put_cache - Release a cache-level cookie.
+ * @cache: The cache cookie to be released
+ * @where: An indication of where the release happened
+ *
+ * Release the caller's reference on a cache-level cookie. The @where
+ * indication should give information about the circumstances in which the call
+ * occurs and will be logged through a tracepoint.
+ */
+void fscache_put_cache(struct fscache_cache *cache,
+ enum fscache_cache_trace where)
+{
+ unsigned int debug_id;
+ bool zero;
+ int ref;
+
+ if (IS_ERR_OR_NULL(cache))
+ return;
+
+ debug_id = cache->debug_id;
+ zero = __refcount_dec_and_test(&cache->ref, &ref);
+ trace_fscache_cache(debug_id, ref - 1, where);
+
+ if (zero) {
+ down_write(&fscache_addremove_sem);
+ list_del_init(&cache->cache_link);
+ up_write(&fscache_addremove_sem);
+ kfree(cache->name);
+ kfree(cache);
+ }
+}
+
+/**
+ * fscache_relinquish_cache - Reset cache state and release cookie
+ * @cache: The cache cookie to be released
+ *
+ * Reset the state of a cache and release the caller's reference on a cache
+ * cookie.
+ */
+void fscache_relinquish_cache(struct fscache_cache *cache)
+{
+ enum fscache_cache_trace where =
+ (cache->state == FSCACHE_CACHE_IS_PREPARING) ?
+ fscache_cache_put_prep_failed :
+ fscache_cache_put_relinquish;
+
+ cache->ops = NULL;
+ cache->cache_priv = NULL;
+ fscache_set_cache_state(cache, FSCACHE_CACHE_IS_NOT_PRESENT);
+ fscache_put_cache(cache, where);
+}
+EXPORT_SYMBOL(fscache_relinquish_cache);
+
+/**
+ * fscache_add_cache - Declare a cache as being open for business
+ * @cache: The cache-level cookie representing the cache
+ * @ops: Table of cache operations to use
+ * @cache_priv: Private data for the cache record
+ *
+ * Add a cache to the system, making it available for netfs's to use.
+ *
+ * See Documentation/filesystems/caching/backend-api.rst for a complete
+ * description.
+ */
+int fscache_add_cache(struct fscache_cache *cache,
+ const struct fscache_cache_ops *ops,
+ void *cache_priv)
+{
+ int n_accesses;
+
+ _enter("{%s,%s}", ops->name, cache->name);
+
+ BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
+
+ /* Get a ref on the cache cookie and keep its n_accesses counter raised
+ * by 1 to prevent wakeups from transitioning it to 0 until we're
+ * withdrawing caching services from it.
+ */
+ n_accesses = atomic_inc_return(&cache->n_accesses);
+ trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
+ n_accesses, fscache_access_cache_pin);
+
+ down_write(&fscache_addremove_sem);
+
+ cache->ops = ops;
+ cache->cache_priv = cache_priv;
+ fscache_set_cache_state(cache, FSCACHE_CACHE_IS_ACTIVE);
+
+ up_write(&fscache_addremove_sem);
+ pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
+ _leave(" = 0 [%s]", cache->name);
+ return 0;
+}
+EXPORT_SYMBOL(fscache_add_cache);
+
+/**
+ * fscache_begin_cache_access - Pin a cache so it can be accessed
+ * @cache: The cache-level cookie
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Attempt to pin the cache to prevent it from going away whilst we're
+ * accessing it and returns true if successful. This works as follows:
+ *
+ * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
+ * then we return false to indicate access was not permitted.
+ *
+ * (2) If the cache tests as live, then we increment the n_accesses count and
+ * then recheck the liveness, ending the access if it ceased to be live.
+ *
+ * (3) When we end the access, we decrement n_accesses and wake up the any
+ * waiters if it reaches 0.
+ *
+ * (4) Whilst the cache is caching, n_accesses is kept artificially
+ * incremented to prevent wakeups from happening.
+ *
+ * (5) When the cache is taken offline, the state is changed to prevent new
+ * accesses, n_accesses is decremented and we wait for n_accesses to
+ * become 0.
+ */
+bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ if (!fscache_cache_is_live(cache))
+ return false;
+
+ n_accesses = atomic_inc_return(&cache->n_accesses);
+ smp_mb__after_atomic(); /* Reread live flag after n_accesses */
+ trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
+ n_accesses, why);
+ if (!fscache_cache_is_live(cache)) {
+ fscache_end_cache_access(cache, fscache_access_unlive);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * fscache_end_cache_access - Unpin a cache at the end of an access.
+ * @cache: The cache-level cookie
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Unpin a cache after we've accessed it. The @why indicator is merely
+ * provided for tracing purposes.
+ */
+void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ smp_mb__before_atomic();
+ n_accesses = atomic_dec_return(&cache->n_accesses);
+ trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
+ n_accesses, why);
+ if (n_accesses == 0)
+ wake_up_var(&cache->n_accesses);
+}
+
+/**
+ * fscache_io_error - Note a cache I/O error
+ * @cache: The record describing the cache
+ *
+ * Note that an I/O error occurred in a cache and that it should no longer be
+ * used for anything. This also reports the error into the kernel log.
+ *
+ * See Documentation/filesystems/caching/backend-api.rst for a complete
+ * description.
+ */
+void fscache_io_error(struct fscache_cache *cache)
+{
+ if (fscache_set_cache_state_maybe(cache,
+ FSCACHE_CACHE_IS_ACTIVE,
+ FSCACHE_CACHE_GOT_IOERROR))
+ pr_err("Cache '%s' stopped due to I/O error\n",
+ cache->name);
+}
+EXPORT_SYMBOL(fscache_io_error);
+
+/**
+ * fscache_withdraw_cache - Withdraw a cache from the active service
+ * @cache: The cache cookie
+ *
+ * Begin the process of withdrawing a cache from service. This stops new
+ * cache-level and volume-level accesses from taking place and waits for
+ * currently ongoing cache-level accesses to end.
+ */
+void fscache_withdraw_cache(struct fscache_cache *cache)
+{
+ int n_accesses;
+
+ pr_notice("Withdrawing cache \"%s\" (%u objs)\n",
+ cache->name, atomic_read(&cache->object_count));
+
+ fscache_set_cache_state(cache, FSCACHE_CACHE_IS_WITHDRAWN);
+
+ /* Allow wakeups on dec-to-0 */
+ n_accesses = atomic_dec_return(&cache->n_accesses);
+ trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
+ n_accesses, fscache_access_cache_unpin);
+
+ wait_var_event(&cache->n_accesses,
+ atomic_read(&cache->n_accesses) == 0);
+}
+EXPORT_SYMBOL(fscache_withdraw_cache);
+
+#ifdef CONFIG_PROC_FS
+static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] __nonstring = "-PAEW";
+
+/*
+ * Generate a list of caches in /proc/fs/fscache/caches
+ */
+static int fscache_caches_seq_show(struct seq_file *m, void *v)
+{
+ struct fscache_cache *cache;
+
+ if (v == &fscache_caches) {
+ seq_puts(m,
+ "CACHE REF VOLS OBJS ACCES S NAME\n"
+ "======== ===== ===== ===== ===== = ===============\n"
+ );
+ return 0;
+ }
+
+ cache = list_entry(v, struct fscache_cache, cache_link);
+ seq_printf(m,
+ "%08x %5d %5d %5d %5d %c %s\n",
+ cache->debug_id,
+ refcount_read(&cache->ref),
+ atomic_read(&cache->n_volumes),
+ atomic_read(&cache->object_count),
+ atomic_read(&cache->n_accesses),
+ fscache_cache_states[cache->state],
+ cache->name ?: "-");
+ return 0;
+}
+
+static void *fscache_caches_seq_start(struct seq_file *m, loff_t *_pos)
+ __acquires(fscache_addremove_sem)
+{
+ down_read(&fscache_addremove_sem);
+ return seq_list_start_head(&fscache_caches, *_pos);
+}
+
+static void *fscache_caches_seq_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ return seq_list_next(v, &fscache_caches, _pos);
+}
+
+static void fscache_caches_seq_stop(struct seq_file *m, void *v)
+ __releases(fscache_addremove_sem)
+{
+ up_read(&fscache_addremove_sem);
+}
+
+const struct seq_operations fscache_caches_seq_ops = {
+ .start = fscache_caches_seq_start,
+ .next = fscache_caches_seq_next,
+ .stop = fscache_caches_seq_stop,
+ .show = fscache_caches_seq_show,
+};
+#endif /* CONFIG_PROC_FS */
diff --git a/fs/netfs/fscache_cookie.c b/fs/netfs/fscache_cookie.c
new file mode 100644
index 000000000000..3d56fc73435f
--- /dev/null
+++ b/fs/netfs/fscache_cookie.c
@@ -0,0 +1,1184 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* netfs cookie management
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See Documentation/filesystems/caching/netfs-api.rst for more information on
+ * the netfs API.
+ */
+
+#define FSCACHE_DEBUG_LEVEL COOKIE
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+struct kmem_cache *fscache_cookie_jar;
+
+static void fscache_cookie_lru_timed_out(struct timer_list *timer);
+static void fscache_cookie_lru_worker(struct work_struct *work);
+static void fscache_cookie_worker(struct work_struct *work);
+static void fscache_unhash_cookie(struct fscache_cookie *cookie);
+static void fscache_perform_invalidation(struct fscache_cookie *cookie);
+
+#define fscache_cookie_hash_shift 15
+static struct hlist_bl_head fscache_cookie_hash[1 << fscache_cookie_hash_shift];
+static LIST_HEAD(fscache_cookies);
+static DEFINE_RWLOCK(fscache_cookies_lock);
+static LIST_HEAD(fscache_cookie_lru);
+static DEFINE_SPINLOCK(fscache_cookie_lru_lock);
+DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out);
+static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
+static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] __nonstring = "-LCAIFUWRD";
+static unsigned int fscache_lru_cookie_timeout = 10 * HZ;
+
+void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
+{
+ const u8 *k;
+
+ pr_err("%c-cookie c=%08x [fl=%lx na=%u nA=%u s=%c]\n",
+ prefix,
+ cookie->debug_id,
+ cookie->flags,
+ atomic_read(&cookie->n_active),
+ atomic_read(&cookie->n_accesses),
+ fscache_cookie_states[cookie->state]);
+ pr_err("%c-cookie V=%08x [%s]\n",
+ prefix,
+ cookie->volume->debug_id,
+ cookie->volume->key);
+
+ k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
+ cookie->inline_key : cookie->key;
+ pr_err("%c-key=[%u] '%*phN'\n", prefix, cookie->key_len, cookie->key_len, k);
+}
+
+static void fscache_free_cookie(struct fscache_cookie *cookie)
+{
+ if (WARN_ON_ONCE(!list_empty(&cookie->commit_link))) {
+ spin_lock(&fscache_cookie_lru_lock);
+ list_del_init(&cookie->commit_link);
+ spin_unlock(&fscache_cookie_lru_lock);
+ fscache_stat_d(&fscache_n_cookies_lru);
+ fscache_stat(&fscache_n_cookies_lru_removed);
+ }
+
+ if (WARN_ON_ONCE(test_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags))) {
+ fscache_print_cookie(cookie, 'F');
+ return;
+ }
+
+ write_lock(&fscache_cookies_lock);
+ list_del(&cookie->proc_link);
+ write_unlock(&fscache_cookies_lock);
+ if (cookie->aux_len > sizeof(cookie->inline_aux))
+ kfree(cookie->aux);
+ if (cookie->key_len > sizeof(cookie->inline_key))
+ kfree(cookie->key);
+ fscache_stat_d(&fscache_n_cookies);
+ kmem_cache_free(fscache_cookie_jar, cookie);
+}
+
+static void __fscache_queue_cookie(struct fscache_cookie *cookie)
+{
+ if (!queue_work(fscache_wq, &cookie->work))
+ fscache_put_cookie(cookie, fscache_cookie_put_over_queued);
+}
+
+static void fscache_queue_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where)
+{
+ fscache_get_cookie(cookie, where);
+ __fscache_queue_cookie(cookie);
+}
+
+/*
+ * Initialise the access gate on a cookie by setting a flag to prevent the
+ * state machine from being queued when the access counter transitions to 0.
+ * We're only interested in this when we withdraw caching services from the
+ * cookie.
+ */
+static void fscache_init_access_gate(struct fscache_cookie *cookie)
+{
+ int n_accesses;
+
+ n_accesses = atomic_read(&cookie->n_accesses);
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ n_accesses, fscache_access_cache_pin);
+ set_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags);
+}
+
+/**
+ * fscache_end_cookie_access - Unpin a cache at the end of an access.
+ * @cookie: A data file cookie
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Unpin a cache cookie after we've accessed it and bring a deferred
+ * relinquishment or withdrawal state into effect.
+ *
+ * The @why indicator is provided for tracing purposes.
+ */
+void fscache_end_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ smp_mb__before_atomic();
+ n_accesses = atomic_dec_return(&cookie->n_accesses);
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ n_accesses, why);
+ if (n_accesses == 0 &&
+ !test_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags))
+ fscache_queue_cookie(cookie, fscache_cookie_get_end_access);
+}
+EXPORT_SYMBOL(fscache_end_cookie_access);
+
+/*
+ * Pin the cache behind a cookie so that we can access it.
+ */
+static void __fscache_begin_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ n_accesses = atomic_inc_return(&cookie->n_accesses);
+ smp_mb__after_atomic(); /* (Future) read state after is-caching.
+ * Reread n_accesses after is-caching
+ */
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ n_accesses, why);
+}
+
+/**
+ * fscache_begin_cookie_access - Pin a cache so data can be accessed
+ * @cookie: A data file cookie
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Attempt to pin the cache to prevent it from going away whilst we're
+ * accessing data and returns true if successful. This works as follows:
+ *
+ * (1) If the cookie is not being cached (ie. FSCACHE_COOKIE_IS_CACHING is not
+ * set), we return false to indicate access was not permitted.
+ *
+ * (2) If the cookie is being cached, we increment its n_accesses count and
+ * then recheck the IS_CACHING flag, ending the access if it got cleared.
+ *
+ * (3) When we end the access, we decrement the cookie's n_accesses and wake
+ * up the any waiters if it reaches 0.
+ *
+ * (4) Whilst the cookie is actively being cached, its n_accesses is kept
+ * artificially incremented to prevent wakeups from happening.
+ *
+ * (5) When the cache is taken offline or if the cookie is culled, the flag is
+ * cleared to prevent new accesses, the cookie's n_accesses is decremented
+ * and we wait for it to become 0.
+ *
+ * The @why indicator are merely provided for tracing purposes.
+ */
+bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags))
+ return false;
+ __fscache_begin_cookie_access(cookie, why);
+ if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags) ||
+ !fscache_cache_is_live(cookie->volume->cache)) {
+ fscache_end_cookie_access(cookie, fscache_access_unlive);
+ return false;
+ }
+ return true;
+}
+
+static inline void wake_up_cookie_state(struct fscache_cookie *cookie)
+{
+ /* Use a barrier to ensure that waiters see the state variable
+ * change, as spin_unlock doesn't guarantee a barrier.
+ *
+ * See comments over wake_up_bit() and waitqueue_active().
+ */
+ smp_mb();
+ wake_up_var(&cookie->state);
+}
+
+/*
+ * Change the state a cookie is at and wake up anyone waiting for that. Impose
+ * an ordering between the stuff stored in the cookie and the state member.
+ * Paired with fscache_cookie_state().
+ */
+static void __fscache_set_cookie_state(struct fscache_cookie *cookie,
+ enum fscache_cookie_state state)
+{
+ smp_store_release(&cookie->state, state);
+}
+
+static void fscache_set_cookie_state(struct fscache_cookie *cookie,
+ enum fscache_cookie_state state)
+{
+ spin_lock(&cookie->lock);
+ __fscache_set_cookie_state(cookie, state);
+ spin_unlock(&cookie->lock);
+ wake_up_cookie_state(cookie);
+}
+
+/**
+ * fscache_cookie_lookup_negative - Note negative lookup
+ * @cookie: The cookie that was being looked up
+ *
+ * Note that some part of the metadata path in the cache doesn't exist and so
+ * we can release any waiting readers in the certain knowledge that there's
+ * nothing for them to actually read.
+ *
+ * This function uses no locking and must only be called from the state machine.
+ */
+void fscache_cookie_lookup_negative(struct fscache_cookie *cookie)
+{
+ set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_CREATING);
+}
+EXPORT_SYMBOL(fscache_cookie_lookup_negative);
+
+/**
+ * fscache_resume_after_invalidation - Allow I/O to resume after invalidation
+ * @cookie: The cookie that was invalidated
+ *
+ * Tell fscache that invalidation is sufficiently complete that I/O can be
+ * allowed again.
+ */
+void fscache_resume_after_invalidation(struct fscache_cookie *cookie)
+{
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
+}
+EXPORT_SYMBOL(fscache_resume_after_invalidation);
+
+/**
+ * fscache_caching_failed - Report that a failure stopped caching on a cookie
+ * @cookie: The cookie that was affected
+ *
+ * Tell fscache that caching on a cookie needs to be stopped due to some sort
+ * of failure.
+ *
+ * This function uses no locking and must only be called from the state machine.
+ */
+void fscache_caching_failed(struct fscache_cookie *cookie)
+{
+ clear_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags);
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_FAILED);
+ trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
+ fscache_cookie_failed);
+}
+EXPORT_SYMBOL(fscache_caching_failed);
+
+/*
+ * Set the index key in a cookie. The cookie struct has space for a 16-byte
+ * key plus length and hash, but if that's not big enough, it's instead a
+ * pointer to a buffer containing 3 bytes of hash, 1 byte of length and then
+ * the key data.
+ */
+static int fscache_set_key(struct fscache_cookie *cookie,
+ const void *index_key, size_t index_key_len)
+{
+ void *buf;
+ size_t buf_size;
+
+ buf_size = round_up(index_key_len, sizeof(__le32));
+
+ if (index_key_len > sizeof(cookie->inline_key)) {
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ cookie->key = buf;
+ } else {
+ buf = cookie->inline_key;
+ }
+
+ memcpy(buf, index_key, index_key_len);
+ cookie->key_hash = fscache_hash(cookie->volume->key_hash,
+ buf, buf_size);
+ return 0;
+}
+
+static bool fscache_cookie_same(const struct fscache_cookie *a,
+ const struct fscache_cookie *b)
+{
+ const void *ka, *kb;
+
+ if (a->key_hash != b->key_hash ||
+ a->volume != b->volume ||
+ a->key_len != b->key_len)
+ return false;
+
+ if (a->key_len <= sizeof(a->inline_key)) {
+ ka = &a->inline_key;
+ kb = &b->inline_key;
+ } else {
+ ka = a->key;
+ kb = b->key;
+ }
+ return memcmp(ka, kb, a->key_len) == 0;
+}
+
+static atomic_t fscache_cookie_debug_id = ATOMIC_INIT(1);
+
+/*
+ * Allocate a cookie.
+ */
+static struct fscache_cookie *fscache_alloc_cookie(
+ struct fscache_volume *volume,
+ u8 advice,
+ const void *index_key, size_t index_key_len,
+ const void *aux_data, size_t aux_data_len,
+ loff_t object_size)
+{
+ struct fscache_cookie *cookie;
+
+ /* allocate and initialise a cookie */
+ cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
+ if (!cookie)
+ return NULL;
+ fscache_stat(&fscache_n_cookies);
+
+ cookie->volume = volume;
+ cookie->advice = advice;
+ cookie->key_len = index_key_len;
+ cookie->aux_len = aux_data_len;
+ cookie->object_size = object_size;
+ if (object_size == 0)
+ __set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+
+ if (fscache_set_key(cookie, index_key, index_key_len) < 0)
+ goto nomem;
+
+ if (cookie->aux_len <= sizeof(cookie->inline_aux)) {
+ memcpy(cookie->inline_aux, aux_data, cookie->aux_len);
+ } else {
+ cookie->aux = kmemdup(aux_data, cookie->aux_len, GFP_KERNEL);
+ if (!cookie->aux)
+ goto nomem;
+ }
+
+ refcount_set(&cookie->ref, 1);
+ cookie->debug_id = atomic_inc_return(&fscache_cookie_debug_id);
+ spin_lock_init(&cookie->lock);
+ INIT_LIST_HEAD(&cookie->commit_link);
+ INIT_WORK(&cookie->work, fscache_cookie_worker);
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
+
+ write_lock(&fscache_cookies_lock);
+ list_add_tail(&cookie->proc_link, &fscache_cookies);
+ write_unlock(&fscache_cookies_lock);
+ fscache_see_cookie(cookie, fscache_cookie_new_acquire);
+ return cookie;
+
+nomem:
+ fscache_free_cookie(cookie);
+ return NULL;
+}
+
+static inline bool fscache_cookie_is_dropped(struct fscache_cookie *cookie)
+{
+ return READ_ONCE(cookie->state) == FSCACHE_COOKIE_STATE_DROPPED;
+}
+
+static void fscache_wait_on_collision(struct fscache_cookie *candidate,
+ struct fscache_cookie *wait_for)
+{
+ enum fscache_cookie_state *statep = &wait_for->state;
+
+ wait_var_event_timeout(statep, fscache_cookie_is_dropped(wait_for),
+ 20 * HZ);
+ if (!fscache_cookie_is_dropped(wait_for)) {
+ pr_notice("Potential collision c=%08x old: c=%08x",
+ candidate->debug_id, wait_for->debug_id);
+ wait_var_event(statep, fscache_cookie_is_dropped(wait_for));
+ }
+}
+
+/*
+ * Attempt to insert the new cookie into the hash. If there's a collision, we
+ * wait for the old cookie to complete if it's being relinquished and an error
+ * otherwise.
+ */
+static bool fscache_hash_cookie(struct fscache_cookie *candidate)
+{
+ struct fscache_cookie *cursor, *wait_for = NULL;
+ struct hlist_bl_head *h;
+ struct hlist_bl_node *p;
+ unsigned int bucket;
+
+ bucket = candidate->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
+ h = &fscache_cookie_hash[bucket];
+
+ hlist_bl_lock(h);
+ hlist_bl_for_each_entry(cursor, p, h, hash_link) {
+ if (fscache_cookie_same(candidate, cursor)) {
+ if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cursor->flags))
+ goto collision;
+ wait_for = fscache_get_cookie(cursor,
+ fscache_cookie_get_hash_collision);
+ break;
+ }
+ }
+
+ fscache_get_volume(candidate->volume, fscache_volume_get_cookie);
+ atomic_inc(&candidate->volume->n_cookies);
+ hlist_bl_add_head(&candidate->hash_link, h);
+ set_bit(FSCACHE_COOKIE_IS_HASHED, &candidate->flags);
+ hlist_bl_unlock(h);
+
+ if (wait_for) {
+ fscache_wait_on_collision(candidate, wait_for);
+ fscache_put_cookie(wait_for, fscache_cookie_put_hash_collision);
+ }
+ return true;
+
+collision:
+ trace_fscache_cookie(cursor->debug_id, refcount_read(&cursor->ref),
+ fscache_cookie_collision);
+ pr_err("Duplicate cookie detected\n");
+ fscache_print_cookie(cursor, 'O');
+ fscache_print_cookie(candidate, 'N');
+ hlist_bl_unlock(h);
+ return false;
+}
+
+/*
+ * Request a cookie to represent a data storage object within a volume.
+ *
+ * We never let on to the netfs about errors. We may set a negative cookie
+ * pointer, but that's okay
+ */
+struct fscache_cookie *__fscache_acquire_cookie(
+ struct fscache_volume *volume,
+ u8 advice,
+ const void *index_key, size_t index_key_len,
+ const void *aux_data, size_t aux_data_len,
+ loff_t object_size)
+{
+ struct fscache_cookie *cookie;
+
+ _enter("V=%x", volume->debug_id);
+
+ if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
+ return NULL;
+ if (!aux_data || !aux_data_len) {
+ aux_data = NULL;
+ aux_data_len = 0;
+ }
+
+ fscache_stat(&fscache_n_acquires);
+
+ cookie = fscache_alloc_cookie(volume, advice,
+ index_key, index_key_len,
+ aux_data, aux_data_len,
+ object_size);
+ if (!cookie) {
+ fscache_stat(&fscache_n_acquires_oom);
+ return NULL;
+ }
+
+ if (!fscache_hash_cookie(cookie)) {
+ fscache_see_cookie(cookie, fscache_cookie_discard);
+ fscache_free_cookie(cookie);
+ return NULL;
+ }
+
+ trace_fscache_acquire(cookie);
+ fscache_stat(&fscache_n_acquires_ok);
+ _leave(" = c=%08x", cookie->debug_id);
+ return cookie;
+}
+EXPORT_SYMBOL(__fscache_acquire_cookie);
+
+/*
+ * Prepare a cache object to be written to.
+ */
+static void fscache_prepare_to_write(struct fscache_cookie *cookie)
+{
+ cookie->volume->cache->ops->prepare_to_write(cookie);
+}
+
+/*
+ * Look up a cookie in the cache.
+ */
+static void fscache_perform_lookup(struct fscache_cookie *cookie)
+{
+ enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed;
+ bool need_withdraw = false;
+
+ _enter("");
+
+ if (!cookie->volume->cache_priv) {
+ fscache_create_volume(cookie->volume, true);
+ if (!cookie->volume->cache_priv) {
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
+ goto out;
+ }
+ }
+
+ if (!cookie->volume->cache->ops->lookup_cookie(cookie)) {
+ if (cookie->state != FSCACHE_COOKIE_STATE_FAILED)
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
+ need_withdraw = true;
+ _leave(" [fail]");
+ goto out;
+ }
+
+ fscache_see_cookie(cookie, fscache_cookie_see_active);
+ spin_lock(&cookie->lock);
+ if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_INVALIDATING);
+ else
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
+ spin_unlock(&cookie->lock);
+ wake_up_cookie_state(cookie);
+ trace = fscache_access_lookup_cookie_end;
+
+out:
+ fscache_end_cookie_access(cookie, trace);
+ if (need_withdraw)
+ fscache_withdraw_cookie(cookie);
+ fscache_end_volume_access(cookie->volume, cookie, trace);
+}
+
+/*
+ * Begin the process of looking up a cookie. We offload the actual process to
+ * a worker thread.
+ */
+static bool fscache_begin_lookup(struct fscache_cookie *cookie, bool will_modify)
+{
+ if (will_modify) {
+ set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags);
+ set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
+ }
+ if (!fscache_begin_volume_access(cookie->volume, cookie,
+ fscache_access_lookup_cookie))
+ return false;
+
+ __fscache_begin_cookie_access(cookie, fscache_access_lookup_cookie);
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_LOOKING_UP);
+ set_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags);
+ set_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags);
+ return true;
+}
+
+/*
+ * Start using the cookie for I/O. This prevents the backing object from being
+ * reaped by VM pressure.
+ */
+void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify)
+{
+ enum fscache_cookie_state state;
+ bool queue = false;
+ int n_active;
+
+ _enter("c=%08x", cookie->debug_id);
+
+ if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
+ "Trying to use relinquished cookie\n"))
+ return;
+
+ spin_lock(&cookie->lock);
+
+ n_active = atomic_inc_return(&cookie->n_active);
+ trace_fscache_active(cookie->debug_id, refcount_read(&cookie->ref),
+ n_active, atomic_read(&cookie->n_accesses),
+ will_modify ?
+ fscache_active_use_modify : fscache_active_use);
+
+again:
+ state = fscache_cookie_state(cookie);
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_QUIESCENT:
+ queue = fscache_begin_lookup(cookie, will_modify);
+ break;
+
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ case FSCACHE_COOKIE_STATE_CREATING:
+ if (will_modify)
+ set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags);
+ break;
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ case FSCACHE_COOKIE_STATE_INVALIDATING:
+ if (will_modify &&
+ !test_and_set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags)) {
+ set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
+ queue = true;
+ }
+ /*
+ * We could race with cookie_lru which may set LRU_DISCARD bit
+ * but has yet to run the cookie state machine. If this happens
+ * and another thread tries to use the cookie, clear LRU_DISCARD
+ * so we don't end up withdrawing the cookie while in use.
+ */
+ if (test_and_clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags))
+ fscache_see_cookie(cookie, fscache_cookie_see_lru_discard_clear);
+ break;
+
+ case FSCACHE_COOKIE_STATE_FAILED:
+ case FSCACHE_COOKIE_STATE_WITHDRAWING:
+ break;
+
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ spin_unlock(&cookie->lock);
+ wait_var_event(&cookie->state,
+ fscache_cookie_state(cookie) !=
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING);
+ spin_lock(&cookie->lock);
+ goto again;
+
+ case FSCACHE_COOKIE_STATE_DROPPED:
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ WARN(1, "Can't use cookie in state %u\n", state);
+ break;
+ }
+
+ spin_unlock(&cookie->lock);
+ if (queue)
+ fscache_queue_cookie(cookie, fscache_cookie_get_use_work);
+ _leave("");
+}
+EXPORT_SYMBOL(__fscache_use_cookie);
+
+static void fscache_unuse_cookie_locked(struct fscache_cookie *cookie)
+{
+ clear_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags);
+ if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags))
+ return;
+
+ cookie->unused_at = jiffies;
+ spin_lock(&fscache_cookie_lru_lock);
+ if (list_empty(&cookie->commit_link)) {
+ fscache_get_cookie(cookie, fscache_cookie_get_lru);
+ fscache_stat(&fscache_n_cookies_lru);
+ }
+ list_move_tail(&cookie->commit_link, &fscache_cookie_lru);
+
+ spin_unlock(&fscache_cookie_lru_lock);
+ timer_reduce(&fscache_cookie_lru_timer,
+ jiffies + fscache_lru_cookie_timeout);
+}
+
+/*
+ * Stop using the cookie for I/O.
+ */
+void __fscache_unuse_cookie(struct fscache_cookie *cookie,
+ const void *aux_data, const loff_t *object_size)
+{
+ unsigned int debug_id = cookie->debug_id;
+ unsigned int r = refcount_read(&cookie->ref);
+ unsigned int a = atomic_read(&cookie->n_accesses);
+ unsigned int c;
+
+ if (aux_data || object_size)
+ __fscache_update_cookie(cookie, aux_data, object_size);
+
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ c = atomic_fetch_add_unless(&cookie->n_active, -1, 1);
+ if (c != 1) {
+ trace_fscache_active(debug_id, r, c - 1, a, fscache_active_unuse);
+ return;
+ }
+
+ spin_lock(&cookie->lock);
+ r = refcount_read(&cookie->ref);
+ a = atomic_read(&cookie->n_accesses);
+ c = atomic_dec_return(&cookie->n_active);
+ trace_fscache_active(debug_id, r, c, a, fscache_active_unuse);
+ if (c == 0)
+ fscache_unuse_cookie_locked(cookie);
+ spin_unlock(&cookie->lock);
+}
+EXPORT_SYMBOL(__fscache_unuse_cookie);
+
+/*
+ * Perform work upon the cookie, such as committing its cache state,
+ * relinquishing it or withdrawing the backing cache. We're protected from the
+ * cache going away under us as object withdrawal must come through this
+ * non-reentrant work item.
+ */
+static void fscache_cookie_state_machine(struct fscache_cookie *cookie)
+{
+ enum fscache_cookie_state state;
+ bool wake = false;
+
+ _enter("c=%x", cookie->debug_id);
+
+again:
+ spin_lock(&cookie->lock);
+again_locked:
+ state = cookie->state;
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_QUIESCENT:
+ /* The QUIESCENT state is jumped to the LOOKING_UP state by
+ * fscache_use_cookie().
+ */
+
+ if (atomic_read(&cookie->n_accesses) == 0 &&
+ test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_RELINQUISHING);
+ wake = true;
+ goto again_locked;
+ }
+ break;
+
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ spin_unlock(&cookie->lock);
+ fscache_init_access_gate(cookie);
+ fscache_perform_lookup(cookie);
+ goto again;
+
+ case FSCACHE_COOKIE_STATE_INVALIDATING:
+ spin_unlock(&cookie->lock);
+ fscache_perform_invalidation(cookie);
+ goto again;
+
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ if (test_and_clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags)) {
+ spin_unlock(&cookie->lock);
+ fscache_prepare_to_write(cookie);
+ spin_lock(&cookie->lock);
+ }
+ if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) {
+ if (atomic_read(&cookie->n_accesses) != 0)
+ /* still being accessed: postpone it */
+ break;
+
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING);
+ wake = true;
+ goto again_locked;
+ }
+ fallthrough;
+
+ case FSCACHE_COOKIE_STATE_FAILED:
+ if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
+ fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
+
+ if (atomic_read(&cookie->n_accesses) != 0)
+ break;
+ if (test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_RELINQUISHING);
+ wake = true;
+ goto again_locked;
+ }
+ if (test_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags)) {
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_WITHDRAWING);
+ wake = true;
+ goto again_locked;
+ }
+ break;
+
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ case FSCACHE_COOKIE_STATE_WITHDRAWING:
+ if (cookie->cache_priv) {
+ spin_unlock(&cookie->lock);
+ cookie->volume->cache->ops->withdraw_cookie(cookie);
+ spin_lock(&cookie->lock);
+ }
+
+ if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
+ fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
+
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ fscache_see_cookie(cookie, fscache_cookie_see_relinquish);
+ fscache_unhash_cookie(cookie);
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_DROPPED);
+ wake = true;
+ goto out;
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ fscache_see_cookie(cookie, fscache_cookie_see_lru_discard);
+ break;
+ case FSCACHE_COOKIE_STATE_WITHDRAWING:
+ fscache_see_cookie(cookie, fscache_cookie_see_withdraw);
+ break;
+ default:
+ BUG();
+ }
+
+ clear_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
+ clear_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags);
+ clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
+ clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
+ set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
+ wake = true;
+ goto again_locked;
+
+ case FSCACHE_COOKIE_STATE_DROPPED:
+ break;
+
+ default:
+ WARN_ONCE(1, "Cookie %x in unexpected state %u\n",
+ cookie->debug_id, state);
+ break;
+ }
+
+out:
+ spin_unlock(&cookie->lock);
+ if (wake)
+ wake_up_cookie_state(cookie);
+ _leave("");
+}
+
+static void fscache_cookie_worker(struct work_struct *work)
+{
+ struct fscache_cookie *cookie = container_of(work, struct fscache_cookie, work);
+
+ fscache_see_cookie(cookie, fscache_cookie_see_work);
+ fscache_cookie_state_machine(cookie);
+ fscache_put_cookie(cookie, fscache_cookie_put_work);
+}
+
+/*
+ * Wait for the object to become inactive. The cookie's work item will be
+ * scheduled when someone transitions n_accesses to 0 - but if someone's
+ * already done that, schedule it anyway.
+ */
+static void __fscache_withdraw_cookie(struct fscache_cookie *cookie)
+{
+ int n_accesses;
+ bool unpinned;
+
+ unpinned = test_and_clear_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags);
+
+ /* Need to read the access count after unpinning */
+ n_accesses = atomic_read(&cookie->n_accesses);
+ if (unpinned)
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ n_accesses, fscache_access_cache_unpin);
+ if (n_accesses == 0)
+ fscache_queue_cookie(cookie, fscache_cookie_get_end_access);
+}
+
+static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie)
+{
+ fscache_see_cookie(cookie, fscache_cookie_see_lru_do_one);
+
+ spin_lock(&cookie->lock);
+ if (cookie->state != FSCACHE_COOKIE_STATE_ACTIVE ||
+ time_before(jiffies, cookie->unused_at + fscache_lru_cookie_timeout) ||
+ atomic_read(&cookie->n_active) > 0) {
+ spin_unlock(&cookie->lock);
+ fscache_stat(&fscache_n_cookies_lru_removed);
+ } else {
+ set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
+ spin_unlock(&cookie->lock);
+ fscache_stat(&fscache_n_cookies_lru_expired);
+ _debug("lru c=%x", cookie->debug_id);
+ __fscache_withdraw_cookie(cookie);
+ }
+
+ fscache_put_cookie(cookie, fscache_cookie_put_lru);
+}
+
+static void fscache_cookie_lru_worker(struct work_struct *work)
+{
+ struct fscache_cookie *cookie;
+ unsigned long unused_at;
+
+ spin_lock(&fscache_cookie_lru_lock);
+
+ while (!list_empty(&fscache_cookie_lru)) {
+ cookie = list_first_entry(&fscache_cookie_lru,
+ struct fscache_cookie, commit_link);
+ unused_at = cookie->unused_at + fscache_lru_cookie_timeout;
+ if (time_before(jiffies, unused_at)) {
+ timer_reduce(&fscache_cookie_lru_timer, unused_at);
+ break;
+ }
+
+ list_del_init(&cookie->commit_link);
+ fscache_stat_d(&fscache_n_cookies_lru);
+ spin_unlock(&fscache_cookie_lru_lock);
+ fscache_cookie_lru_do_one(cookie);
+ spin_lock(&fscache_cookie_lru_lock);
+ }
+
+ spin_unlock(&fscache_cookie_lru_lock);
+}
+
+static void fscache_cookie_lru_timed_out(struct timer_list *timer)
+{
+ queue_work(fscache_wq, &fscache_cookie_lru_work);
+}
+
+static void fscache_cookie_drop_from_lru(struct fscache_cookie *cookie)
+{
+ bool need_put = false;
+
+ if (!list_empty(&cookie->commit_link)) {
+ spin_lock(&fscache_cookie_lru_lock);
+ if (!list_empty(&cookie->commit_link)) {
+ list_del_init(&cookie->commit_link);
+ fscache_stat_d(&fscache_n_cookies_lru);
+ fscache_stat(&fscache_n_cookies_lru_dropped);
+ need_put = true;
+ }
+ spin_unlock(&fscache_cookie_lru_lock);
+ if (need_put)
+ fscache_put_cookie(cookie, fscache_cookie_put_lru);
+ }
+}
+
+/*
+ * Remove a cookie from the hash table.
+ */
+static void fscache_unhash_cookie(struct fscache_cookie *cookie)
+{
+ struct hlist_bl_head *h;
+ unsigned int bucket;
+
+ bucket = cookie->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
+ h = &fscache_cookie_hash[bucket];
+
+ hlist_bl_lock(h);
+ hlist_bl_del(&cookie->hash_link);
+ clear_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags);
+ hlist_bl_unlock(h);
+ fscache_stat(&fscache_n_relinquishes_dropped);
+}
+
+static void fscache_drop_withdraw_cookie(struct fscache_cookie *cookie)
+{
+ fscache_cookie_drop_from_lru(cookie);
+ __fscache_withdraw_cookie(cookie);
+}
+
+/**
+ * fscache_withdraw_cookie - Mark a cookie for withdrawal
+ * @cookie: The cookie to be withdrawn.
+ *
+ * Allow the cache backend to withdraw the backing for a cookie for its own
+ * reasons, even if that cookie is in active use.
+ */
+void fscache_withdraw_cookie(struct fscache_cookie *cookie)
+{
+ set_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags);
+ fscache_drop_withdraw_cookie(cookie);
+}
+EXPORT_SYMBOL(fscache_withdraw_cookie);
+
+/*
+ * Allow the netfs to release a cookie back to the cache.
+ * - the object will be marked as recyclable on disk if retire is true
+ */
+void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
+{
+ fscache_stat(&fscache_n_relinquishes);
+ if (retire)
+ fscache_stat(&fscache_n_relinquishes_retire);
+
+ _enter("c=%08x{%d},%d",
+ cookie->debug_id, atomic_read(&cookie->n_active), retire);
+
+ if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
+ "Cookie c=%x already relinquished\n", cookie->debug_id))
+ return;
+
+ if (retire)
+ set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
+ trace_fscache_relinquish(cookie, retire);
+
+ ASSERTCMP(atomic_read(&cookie->n_active), ==, 0);
+ ASSERTCMP(atomic_read(&cookie->volume->n_cookies), >, 0);
+ atomic_dec(&cookie->volume->n_cookies);
+
+ if (test_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags)) {
+ set_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags);
+ fscache_drop_withdraw_cookie(cookie);
+ } else {
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_DROPPED);
+ fscache_unhash_cookie(cookie);
+ }
+ fscache_put_cookie(cookie, fscache_cookie_put_relinquish);
+}
+EXPORT_SYMBOL(__fscache_relinquish_cookie);
+
+/*
+ * Drop a reference to a cookie.
+ */
+void fscache_put_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where)
+{
+ struct fscache_volume *volume = cookie->volume;
+ unsigned int cookie_debug_id = cookie->debug_id;
+ bool zero;
+ int ref;
+
+ zero = __refcount_dec_and_test(&cookie->ref, &ref);
+ trace_fscache_cookie(cookie_debug_id, ref - 1, where);
+ if (zero) {
+ fscache_free_cookie(cookie);
+ fscache_put_volume(volume, fscache_volume_put_cookie);
+ }
+}
+EXPORT_SYMBOL(fscache_put_cookie);
+
+/*
+ * Get a reference to a cookie.
+ */
+struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where)
+{
+ int ref;
+
+ __refcount_inc(&cookie->ref, &ref);
+ trace_fscache_cookie(cookie->debug_id, ref + 1, where);
+ return cookie;
+}
+EXPORT_SYMBOL(fscache_get_cookie);
+
+/*
+ * Ask the cache to effect invalidation of a cookie.
+ */
+static void fscache_perform_invalidation(struct fscache_cookie *cookie)
+{
+ if (!cookie->volume->cache->ops->invalidate_cookie(cookie))
+ fscache_caching_failed(cookie);
+ fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
+}
+
+/*
+ * Invalidate an object.
+ */
+void __fscache_invalidate(struct fscache_cookie *cookie,
+ const void *aux_data, loff_t new_size,
+ unsigned int flags)
+{
+ bool is_caching;
+
+ _enter("c=%x", cookie->debug_id);
+
+ fscache_stat(&fscache_n_invalidates);
+
+ if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
+ "Trying to invalidate relinquished cookie\n"))
+ return;
+
+ if ((flags & FSCACHE_INVAL_DIO_WRITE) &&
+ test_and_set_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags))
+ return;
+
+ spin_lock(&cookie->lock);
+ set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ fscache_update_aux(cookie, aux_data, &new_size);
+ cookie->inval_counter++;
+ trace_fscache_invalidate(cookie, new_size);
+
+ switch (cookie->state) {
+ case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */
+ default:
+ spin_unlock(&cookie->lock);
+ _leave(" [no %u]", cookie->state);
+ return;
+
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ if (!test_and_set_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
+ __fscache_begin_cookie_access(cookie, fscache_access_invalidate_cookie);
+ fallthrough;
+ case FSCACHE_COOKIE_STATE_CREATING:
+ spin_unlock(&cookie->lock);
+ _leave(" [look %x]", cookie->inval_counter);
+ return;
+
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ is_caching = fscache_begin_cookie_access(
+ cookie, fscache_access_invalidate_cookie);
+ if (is_caching)
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_INVALIDATING);
+ spin_unlock(&cookie->lock);
+ wake_up_cookie_state(cookie);
+
+ if (is_caching)
+ fscache_queue_cookie(cookie, fscache_cookie_get_inval_work);
+ _leave(" [inv]");
+ return;
+ }
+}
+EXPORT_SYMBOL(__fscache_invalidate);
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Generate a list of extant cookies in /proc/fs/fscache/cookies
+ */
+static int fscache_cookies_seq_show(struct seq_file *m, void *v)
+{
+ struct fscache_cookie *cookie;
+ unsigned int keylen = 0, auxlen = 0;
+ u8 *p;
+
+ if (v == &fscache_cookies) {
+ seq_puts(m,
+ "COOKIE VOLUME REF ACT ACC S FL DEF \n"
+ "======== ======== === === === = == ================\n"
+ );
+ return 0;
+ }
+
+ cookie = list_entry(v, struct fscache_cookie, proc_link);
+
+ seq_printf(m,
+ "%08x %08x %3d %3d %3d %c %02lx",
+ cookie->debug_id,
+ cookie->volume->debug_id,
+ refcount_read(&cookie->ref),
+ atomic_read(&cookie->n_active),
+ atomic_read(&cookie->n_accesses),
+ fscache_cookie_states[cookie->state],
+ cookie->flags);
+
+ keylen = cookie->key_len;
+ auxlen = cookie->aux_len;
+
+ if (keylen > 0 || auxlen > 0) {
+ seq_puts(m, " ");
+ p = keylen <= sizeof(cookie->inline_key) ?
+ cookie->inline_key : cookie->key;
+ for (; keylen > 0; keylen--)
+ seq_printf(m, "%02x", *p++);
+ if (auxlen > 0) {
+ seq_puts(m, ", ");
+ p = auxlen <= sizeof(cookie->inline_aux) ?
+ cookie->inline_aux : cookie->aux;
+ for (; auxlen > 0; auxlen--)
+ seq_printf(m, "%02x", *p++);
+ }
+ }
+
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static void *fscache_cookies_seq_start(struct seq_file *m, loff_t *_pos)
+ __acquires(fscache_cookies_lock)
+{
+ read_lock(&fscache_cookies_lock);
+ return seq_list_start_head(&fscache_cookies, *_pos);
+}
+
+static void *fscache_cookies_seq_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ return seq_list_next(v, &fscache_cookies, _pos);
+}
+
+static void fscache_cookies_seq_stop(struct seq_file *m, void *v)
+ __releases(rcu)
+{
+ read_unlock(&fscache_cookies_lock);
+}
+
+
+const struct seq_operations fscache_cookies_seq_ops = {
+ .start = fscache_cookies_seq_start,
+ .next = fscache_cookies_seq_next,
+ .stop = fscache_cookies_seq_stop,
+ .show = fscache_cookies_seq_show,
+};
+#endif
diff --git a/fs/netfs/fscache_internal.h b/fs/netfs/fscache_internal.h
new file mode 100644
index 000000000000..a09b948fcef2
--- /dev/null
+++ b/fs/netfs/fscache_internal.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Internal definitions for FS-Cache
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include "internal.h"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "FS-Cache: " fmt
diff --git a/fs/netfs/fscache_io.c b/fs/netfs/fscache_io.c
new file mode 100644
index 000000000000..e4308457633c
--- /dev/null
+++ b/fs/netfs/fscache_io.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Cache data I/O routines
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#define FSCACHE_DEBUG_LEVEL OPERATION
+#include <linux/fscache-cache.h>
+#include <linux/uio.h>
+#include <linux/bvec.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+/**
+ * fscache_wait_for_operation - Wait for an object become accessible
+ * @cres: The cache resources for the operation being performed
+ * @want_state: The minimum state the object must be at
+ *
+ * See if the target cache object is at the specified minimum state of
+ * accessibility yet, and if not, wait for it.
+ */
+bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state)
+{
+ struct fscache_cookie *cookie = fscache_cres_cookie(cres);
+ enum fscache_cookie_state state;
+
+again:
+ if (!fscache_cache_is_live(cookie->volume->cache)) {
+ _leave(" [broken]");
+ return false;
+ }
+
+ state = fscache_cookie_state(cookie);
+ _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
+
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_CREATING:
+ case FSCACHE_COOKIE_STATE_INVALIDATING:
+ if (want_state == FSCACHE_WANT_PARAMS)
+ goto ready; /* There can be no content */
+ fallthrough;
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ wait_var_event(&cookie->state,
+ fscache_cookie_state(cookie) != state);
+ goto again;
+
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ goto ready;
+ case FSCACHE_COOKIE_STATE_DROPPED:
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ default:
+ _leave(" [not live]");
+ return false;
+ }
+
+ready:
+ if (!cres->cache_priv2)
+ return cookie->volume->cache->ops->begin_operation(cres, want_state);
+ return true;
+}
+EXPORT_SYMBOL(fscache_wait_for_operation);
+
+/*
+ * Begin an I/O operation on the cache, waiting till we reach the right state.
+ *
+ * Attaches the resources required to the operation resources record.
+ */
+static int fscache_begin_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie,
+ enum fscache_want_state want_state,
+ enum fscache_access_trace why)
+{
+ enum fscache_cookie_state state;
+ long timeo;
+ bool once_only = false;
+
+ cres->ops = NULL;
+ cres->cache_priv = cookie;
+ cres->cache_priv2 = NULL;
+ cres->debug_id = cookie->debug_id;
+ cres->inval_counter = cookie->inval_counter;
+
+ if (!fscache_begin_cookie_access(cookie, why)) {
+ cres->cache_priv = NULL;
+ return -ENOBUFS;
+ }
+
+again:
+ spin_lock(&cookie->lock);
+
+ state = fscache_cookie_state(cookie);
+ _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
+
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ case FSCACHE_COOKIE_STATE_INVALIDATING:
+ goto wait_for_file_wrangling;
+ case FSCACHE_COOKIE_STATE_CREATING:
+ if (want_state == FSCACHE_WANT_PARAMS)
+ goto ready; /* There can be no content */
+ goto wait_for_file_wrangling;
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ goto ready;
+ case FSCACHE_COOKIE_STATE_DROPPED:
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ WARN(1, "Can't use cookie in state %u\n", cookie->state);
+ goto not_live;
+ default:
+ goto not_live;
+ }
+
+ready:
+ spin_unlock(&cookie->lock);
+ if (!cookie->volume->cache->ops->begin_operation(cres, want_state))
+ goto failed;
+ return 0;
+
+wait_for_file_wrangling:
+ spin_unlock(&cookie->lock);
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ atomic_read(&cookie->n_accesses),
+ fscache_access_io_wait);
+ timeo = wait_var_event_timeout(&cookie->state,
+ fscache_cookie_state(cookie) != state, 20 * HZ);
+ if (timeo <= 1 && !once_only) {
+ pr_warn("%s: cookie state change wait timed out: cookie->state=%u state=%u",
+ __func__, fscache_cookie_state(cookie), state);
+ fscache_print_cookie(cookie, 'O');
+ once_only = true;
+ }
+ goto again;
+
+not_live:
+ spin_unlock(&cookie->lock);
+failed:
+ cres->cache_priv = NULL;
+ cres->ops = NULL;
+ fscache_end_cookie_access(cookie, fscache_access_io_not_live);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+}
+
+int __fscache_begin_read_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
+{
+ return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
+ fscache_access_io_read);
+}
+EXPORT_SYMBOL(__fscache_begin_read_operation);
+
+int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
+{
+ return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
+ fscache_access_io_write);
+}
+EXPORT_SYMBOL(__fscache_begin_write_operation);
+
+struct fscache_write_request {
+ struct netfs_cache_resources cache_resources;
+ struct address_space *mapping;
+ loff_t start;
+ size_t len;
+ bool set_bits;
+ bool using_pgpriv2;
+ netfs_io_terminated_t term_func;
+ void *term_func_priv;
+};
+
+void __fscache_clear_page_bits(struct address_space *mapping,
+ loff_t start, size_t len)
+{
+ pgoff_t first = start / PAGE_SIZE;
+ pgoff_t last = (start + len - 1) / PAGE_SIZE;
+ struct page *page;
+
+ if (len) {
+ XA_STATE(xas, &mapping->i_pages, first);
+
+ rcu_read_lock();
+ xas_for_each(&xas, page, last) {
+ folio_end_private_2(page_folio(page));
+ }
+ rcu_read_unlock();
+ }
+}
+EXPORT_SYMBOL(__fscache_clear_page_bits);
+
+/*
+ * Deal with the completion of writing the data to the cache.
+ */
+static void fscache_wreq_done(void *priv, ssize_t transferred_or_error)
+{
+ struct fscache_write_request *wreq = priv;
+
+ if (wreq->using_pgpriv2)
+ fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
+ wreq->set_bits);
+
+ if (wreq->term_func)
+ wreq->term_func(wreq->term_func_priv, transferred_or_error);
+ fscache_end_operation(&wreq->cache_resources);
+ kfree(wreq);
+}
+
+void __fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool using_pgpriv2, bool cond)
+{
+ struct fscache_write_request *wreq;
+ struct netfs_cache_resources *cres;
+ struct iov_iter iter;
+ int ret = -ENOBUFS;
+
+ if (len == 0)
+ goto abandon;
+
+ _enter("%llx,%zx", start, len);
+
+ wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
+ if (!wreq)
+ goto abandon;
+ wreq->mapping = mapping;
+ wreq->start = start;
+ wreq->len = len;
+ wreq->using_pgpriv2 = using_pgpriv2;
+ wreq->set_bits = cond;
+ wreq->term_func = term_func;
+ wreq->term_func_priv = term_func_priv;
+
+ cres = &wreq->cache_resources;
+ if (fscache_begin_operation(cres, cookie, FSCACHE_WANT_WRITE,
+ fscache_access_io_write) < 0)
+ goto abandon_free;
+
+ ret = cres->ops->prepare_write(cres, &start, &len, len, i_size, false);
+ if (ret < 0)
+ goto abandon_end;
+
+ /* TODO: Consider clearing page bits now for space the write isn't
+ * covering. This is more complicated than it appears when THPs are
+ * taken into account.
+ */
+
+ iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
+ fscache_write(cres, start, &iter, fscache_wreq_done, wreq);
+ return;
+
+abandon_end:
+ return fscache_wreq_done(wreq, ret);
+abandon_free:
+ kfree(wreq);
+abandon:
+ if (using_pgpriv2)
+ fscache_clear_page_bits(mapping, start, len, cond);
+ if (term_func)
+ term_func(term_func_priv, ret);
+}
+EXPORT_SYMBOL(__fscache_write_to_cache);
+
+/*
+ * Change the size of a backing object.
+ */
+void __fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
+{
+ struct netfs_cache_resources cres;
+
+ trace_fscache_resize(cookie, new_size);
+ if (fscache_begin_operation(&cres, cookie, FSCACHE_WANT_WRITE,
+ fscache_access_io_resize) == 0) {
+ fscache_stat(&fscache_n_resizes);
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
+
+ /* We cannot defer a resize as we need to do it inside the
+ * netfs's inode lock so that we're serialised with respect to
+ * writes.
+ */
+ cookie->volume->cache->ops->resize_cookie(&cres, new_size);
+ fscache_end_operation(&cres);
+ } else {
+ fscache_stat(&fscache_n_resizes_null);
+ }
+}
+EXPORT_SYMBOL(__fscache_resize_cookie);
diff --git a/fs/netfs/fscache_main.c b/fs/netfs/fscache_main.c
new file mode 100644
index 000000000000..49849005eb7c
--- /dev/null
+++ b/fs/netfs/fscache_main.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* General filesystem local caching manager
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define FSCACHE_DEBUG_LEVEL CACHE
+#include <linux/module.h>
+#include <linux/init.h>
+#include "internal.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/fscache.h>
+
+EXPORT_TRACEPOINT_SYMBOL(fscache_access_cache);
+EXPORT_TRACEPOINT_SYMBOL(fscache_access_volume);
+EXPORT_TRACEPOINT_SYMBOL(fscache_access);
+
+struct workqueue_struct *fscache_wq;
+EXPORT_SYMBOL(fscache_wq);
+
+/*
+ * Mixing scores (in bits) for (7,20):
+ * Input delta: 1-bit 2-bit
+ * 1 round: 330.3 9201.6
+ * 2 rounds: 1246.4 25475.4
+ * 3 rounds: 1907.1 31295.1
+ * 4 rounds: 2042.3 31718.6
+ * Perfect: 2048 31744
+ * (32*64) (32*31/2 * 64)
+ */
+#define HASH_MIX(x, y, a) \
+ ( x ^= (a), \
+ y ^= x, x = rol32(x, 7),\
+ x += y, y = rol32(y,20),\
+ y *= 9 )
+
+static inline unsigned int fold_hash(unsigned long x, unsigned long y)
+{
+ /* Use arch-optimized multiply if one exists */
+ return __hash_32(y ^ __hash_32(x));
+}
+
+/*
+ * Generate a hash. This is derived from full_name_hash(), but we want to be
+ * sure it is arch independent and that it doesn't change as bits of the
+ * computed hash value might appear on disk. The caller must guarantee that
+ * the source data is a multiple of four bytes in size.
+ */
+unsigned int fscache_hash(unsigned int salt, const void *data, size_t len)
+{
+ const __le32 *p = data;
+ unsigned int a, x = 0, y = salt, n = len / sizeof(__le32);
+
+ for (; n; n--) {
+ a = le32_to_cpu(*p++);
+ HASH_MIX(x, y, a);
+ }
+ return fold_hash(x, y);
+}
+
+/*
+ * initialise the fs caching module
+ */
+int __init fscache_init(void)
+{
+ int ret = -ENOMEM;
+
+ fscache_wq = alloc_workqueue("fscache", WQ_UNBOUND | WQ_FREEZABLE, 0);
+ if (!fscache_wq)
+ goto error_wq;
+
+ ret = fscache_proc_init();
+ if (ret < 0)
+ goto error_proc;
+
+ fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar",
+ sizeof(struct fscache_cookie),
+ 0, 0, NULL);
+ if (!fscache_cookie_jar) {
+ pr_notice("Failed to allocate a cookie jar\n");
+ ret = -ENOMEM;
+ goto error_cookie_jar;
+ }
+
+ pr_notice("FS-Cache loaded\n");
+ return 0;
+
+error_cookie_jar:
+ fscache_proc_cleanup();
+error_proc:
+ destroy_workqueue(fscache_wq);
+error_wq:
+ return ret;
+}
+
+/*
+ * clean up on module removal
+ */
+void __exit fscache_exit(void)
+{
+ _enter("");
+
+ kmem_cache_destroy(fscache_cookie_jar);
+ fscache_proc_cleanup();
+ timer_shutdown_sync(&fscache_cookie_lru_timer);
+ destroy_workqueue(fscache_wq);
+ pr_notice("FS-Cache unloaded\n");
+}
diff --git a/fs/netfs/fscache_proc.c b/fs/netfs/fscache_proc.c
new file mode 100644
index 000000000000..874d951bc390
--- /dev/null
+++ b/fs/netfs/fscache_proc.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* FS-Cache statistics viewing interface
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define FSCACHE_DEBUG_LEVEL CACHE
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include "internal.h"
+
+/*
+ * Add files to /proc/fs/netfs/.
+ */
+int __init fscache_proc_init(void)
+{
+ if (!proc_symlink("fs/fscache", NULL, "netfs"))
+ goto error_sym;
+
+ if (!proc_create_seq("fs/netfs/caches", S_IFREG | 0444, NULL,
+ &fscache_caches_seq_ops))
+ goto error;
+
+ if (!proc_create_seq("fs/netfs/volumes", S_IFREG | 0444, NULL,
+ &fscache_volumes_seq_ops))
+ goto error;
+
+ if (!proc_create_seq("fs/netfs/cookies", S_IFREG | 0444, NULL,
+ &fscache_cookies_seq_ops))
+ goto error;
+ return 0;
+
+error:
+ remove_proc_entry("fs/fscache", NULL);
+error_sym:
+ return -ENOMEM;
+}
+
+/*
+ * Clean up the /proc/fs/fscache symlink.
+ */
+void fscache_proc_cleanup(void)
+{
+ remove_proc_subtree("fs/fscache", NULL);
+}
diff --git a/fs/netfs/fscache_stats.c b/fs/netfs/fscache_stats.c
new file mode 100644
index 000000000000..add21abdf713
--- /dev/null
+++ b/fs/netfs/fscache_stats.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* FS-Cache statistics
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define FSCACHE_DEBUG_LEVEL CACHE
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include "internal.h"
+
+/*
+ * operation counters
+ */
+atomic_t fscache_n_volumes;
+atomic_t fscache_n_volumes_collision;
+atomic_t fscache_n_volumes_nomem;
+atomic_t fscache_n_cookies;
+atomic_t fscache_n_cookies_lru;
+atomic_t fscache_n_cookies_lru_expired;
+atomic_t fscache_n_cookies_lru_removed;
+atomic_t fscache_n_cookies_lru_dropped;
+
+atomic_t fscache_n_acquires;
+atomic_t fscache_n_acquires_ok;
+atomic_t fscache_n_acquires_oom;
+
+atomic_t fscache_n_invalidates;
+
+atomic_t fscache_n_updates;
+EXPORT_SYMBOL(fscache_n_updates);
+
+atomic_t fscache_n_relinquishes;
+atomic_t fscache_n_relinquishes_retire;
+atomic_t fscache_n_relinquishes_dropped;
+
+atomic_t fscache_n_resizes;
+atomic_t fscache_n_resizes_null;
+
+atomic_t fscache_n_read;
+EXPORT_SYMBOL(fscache_n_read);
+atomic_t fscache_n_write;
+EXPORT_SYMBOL(fscache_n_write);
+atomic_t fscache_n_no_write_space;
+EXPORT_SYMBOL(fscache_n_no_write_space);
+atomic_t fscache_n_no_create_space;
+EXPORT_SYMBOL(fscache_n_no_create_space);
+atomic_t fscache_n_culled;
+EXPORT_SYMBOL(fscache_n_culled);
+atomic_t fscache_n_dio_misfit;
+EXPORT_SYMBOL(fscache_n_dio_misfit);
+
+/*
+ * display the general statistics
+ */
+int fscache_stats_show(struct seq_file *m)
+{
+ seq_puts(m, "-- FS-Cache statistics --\n");
+ seq_printf(m, "Cookies: n=%d v=%d vcol=%u voom=%u\n",
+ atomic_read(&fscache_n_cookies),
+ atomic_read(&fscache_n_volumes),
+ atomic_read(&fscache_n_volumes_collision),
+ atomic_read(&fscache_n_volumes_nomem)
+ );
+
+ seq_printf(m, "Acquire: n=%u ok=%u oom=%u\n",
+ atomic_read(&fscache_n_acquires),
+ atomic_read(&fscache_n_acquires_ok),
+ atomic_read(&fscache_n_acquires_oom));
+
+ seq_printf(m, "LRU : n=%u exp=%u rmv=%u drp=%u at=%ld\n",
+ atomic_read(&fscache_n_cookies_lru),
+ atomic_read(&fscache_n_cookies_lru_expired),
+ atomic_read(&fscache_n_cookies_lru_removed),
+ atomic_read(&fscache_n_cookies_lru_dropped),
+ timer_pending(&fscache_cookie_lru_timer) ?
+ fscache_cookie_lru_timer.expires - jiffies : 0);
+
+ seq_printf(m, "Invals : n=%u\n",
+ atomic_read(&fscache_n_invalidates));
+
+ seq_printf(m, "Updates: n=%u rsz=%u rsn=%u\n",
+ atomic_read(&fscache_n_updates),
+ atomic_read(&fscache_n_resizes),
+ atomic_read(&fscache_n_resizes_null));
+
+ seq_printf(m, "Relinqs: n=%u rtr=%u drop=%u\n",
+ atomic_read(&fscache_n_relinquishes),
+ atomic_read(&fscache_n_relinquishes_retire),
+ atomic_read(&fscache_n_relinquishes_dropped));
+
+ seq_printf(m, "NoSpace: nwr=%u ncr=%u cull=%u\n",
+ atomic_read(&fscache_n_no_write_space),
+ atomic_read(&fscache_n_no_create_space),
+ atomic_read(&fscache_n_culled));
+
+ seq_printf(m, "IO : rd=%u wr=%u mis=%u\n",
+ atomic_read(&fscache_n_read),
+ atomic_read(&fscache_n_write),
+ atomic_read(&fscache_n_dio_misfit));
+ return 0;
+}
diff --git a/fs/netfs/fscache_volume.c b/fs/netfs/fscache_volume.c
new file mode 100644
index 000000000000..ced14ac78cc1
--- /dev/null
+++ b/fs/netfs/fscache_volume.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Volume-level cache cookie handling.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define FSCACHE_DEBUG_LEVEL COOKIE
+#include <linux/export.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+#define fscache_volume_hash_shift 10
+static struct hlist_bl_head fscache_volume_hash[1 << fscache_volume_hash_shift];
+static atomic_t fscache_volume_debug_id;
+static LIST_HEAD(fscache_volumes);
+
+static void fscache_create_volume_work(struct work_struct *work);
+
+struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where)
+{
+ int ref;
+
+ __refcount_inc(&volume->ref, &ref);
+ trace_fscache_volume(volume->debug_id, ref + 1, where);
+ return volume;
+}
+
+struct fscache_volume *fscache_try_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where)
+{
+ int ref;
+
+ if (!__refcount_inc_not_zero(&volume->ref, &ref))
+ return NULL;
+
+ trace_fscache_volume(volume->debug_id, ref + 1, where);
+ return volume;
+}
+EXPORT_SYMBOL(fscache_try_get_volume);
+
+static void fscache_see_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where)
+{
+ int ref = refcount_read(&volume->ref);
+
+ trace_fscache_volume(volume->debug_id, ref, where);
+}
+
+/*
+ * Pin the cache behind a volume so that we can access it.
+ */
+static void __fscache_begin_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ n_accesses = atomic_inc_return(&volume->n_accesses);
+ smp_mb__after_atomic();
+ trace_fscache_access_volume(volume->debug_id, cookie ? cookie->debug_id : 0,
+ refcount_read(&volume->ref),
+ n_accesses, why);
+}
+
+/**
+ * fscache_begin_volume_access - Pin a cache so a volume can be accessed
+ * @volume: The volume cookie
+ * @cookie: A datafile cookie for a tracing reference (or NULL)
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Attempt to pin the cache to prevent it from going away whilst we're
+ * accessing a volume and returns true if successful. This works as follows:
+ *
+ * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
+ * then we return false to indicate access was not permitted.
+ *
+ * (2) If the cache tests as live, then we increment the volume's n_accesses
+ * count and then recheck the cache liveness, ending the access if it
+ * ceased to be live.
+ *
+ * (3) When we end the access, we decrement the volume's n_accesses and wake
+ * up the any waiters if it reaches 0.
+ *
+ * (4) Whilst the cache is caching, the volume's n_accesses is kept
+ * artificially incremented to prevent wakeups from happening.
+ *
+ * (5) When the cache is taken offline, the state is changed to prevent new
+ * accesses, the volume's n_accesses is decremented and we wait for it to
+ * become 0.
+ *
+ * The datafile @cookie and the @why indicator are merely provided for tracing
+ * purposes.
+ */
+bool fscache_begin_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ if (!fscache_cache_is_live(volume->cache))
+ return false;
+ __fscache_begin_volume_access(volume, cookie, why);
+ if (!fscache_cache_is_live(volume->cache)) {
+ fscache_end_volume_access(volume, cookie, fscache_access_unlive);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * fscache_end_volume_access - Unpin a cache at the end of an access.
+ * @volume: The volume cookie
+ * @cookie: A datafile cookie for a tracing reference (or NULL)
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Unpin a cache volume after we've accessed it. The datafile @cookie and the
+ * @why indicator are merely provided for tracing purposes.
+ */
+void fscache_end_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ smp_mb__before_atomic();
+ n_accesses = atomic_dec_return(&volume->n_accesses);
+ trace_fscache_access_volume(volume->debug_id, cookie ? cookie->debug_id : 0,
+ refcount_read(&volume->ref),
+ n_accesses, why);
+ if (n_accesses == 0)
+ wake_up_var(&volume->n_accesses);
+}
+EXPORT_SYMBOL(fscache_end_volume_access);
+
+static bool fscache_volume_same(const struct fscache_volume *a,
+ const struct fscache_volume *b)
+{
+ size_t klen;
+
+ if (a->key_hash != b->key_hash ||
+ a->cache != b->cache ||
+ a->key[0] != b->key[0])
+ return false;
+
+ klen = round_up(a->key[0] + 1, sizeof(__le32));
+ return memcmp(a->key, b->key, klen) == 0;
+}
+
+static bool fscache_is_acquire_pending(struct fscache_volume *volume)
+{
+ return test_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &volume->flags);
+}
+
+static void fscache_wait_on_volume_collision(struct fscache_volume *candidate,
+ unsigned int collidee_debug_id)
+{
+ wait_on_bit_timeout(&candidate->flags, FSCACHE_VOLUME_ACQUIRE_PENDING,
+ TASK_UNINTERRUPTIBLE, 20 * HZ);
+ if (fscache_is_acquire_pending(candidate)) {
+ pr_notice("Potential volume collision new=%08x old=%08x",
+ candidate->debug_id, collidee_debug_id);
+ fscache_stat(&fscache_n_volumes_collision);
+ wait_on_bit(&candidate->flags, FSCACHE_VOLUME_ACQUIRE_PENDING,
+ TASK_UNINTERRUPTIBLE);
+ }
+}
+
+/*
+ * Attempt to insert the new volume into the hash. If there's a collision, we
+ * wait for the old volume to complete if it's being relinquished and an error
+ * otherwise.
+ */
+static bool fscache_hash_volume(struct fscache_volume *candidate)
+{
+ struct fscache_volume *cursor;
+ struct hlist_bl_head *h;
+ struct hlist_bl_node *p;
+ unsigned int bucket, collidee_debug_id = 0;
+
+ bucket = candidate->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1);
+ h = &fscache_volume_hash[bucket];
+
+ hlist_bl_lock(h);
+ hlist_bl_for_each_entry(cursor, p, h, hash_link) {
+ if (fscache_volume_same(candidate, cursor)) {
+ if (!test_bit(FSCACHE_VOLUME_RELINQUISHED, &cursor->flags))
+ goto collision;
+ fscache_see_volume(cursor, fscache_volume_get_hash_collision);
+ set_bit(FSCACHE_VOLUME_COLLIDED_WITH, &cursor->flags);
+ set_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &candidate->flags);
+ collidee_debug_id = cursor->debug_id;
+ break;
+ }
+ }
+
+ hlist_bl_add_head(&candidate->hash_link, h);
+ hlist_bl_unlock(h);
+
+ if (fscache_is_acquire_pending(candidate))
+ fscache_wait_on_volume_collision(candidate, collidee_debug_id);
+ return true;
+
+collision:
+ fscache_see_volume(cursor, fscache_volume_collision);
+ hlist_bl_unlock(h);
+ return false;
+}
+
+/*
+ * Allocate and initialise a volume representation cookie.
+ */
+static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len)
+{
+ struct fscache_volume *volume;
+ struct fscache_cache *cache;
+ size_t klen, hlen;
+ u8 *key;
+
+ klen = strlen(volume_key);
+ if (klen > NAME_MAX)
+ return NULL;
+
+ if (!coherency_data)
+ coherency_len = 0;
+
+ cache = fscache_lookup_cache(cache_name, false);
+ if (IS_ERR(cache))
+ return NULL;
+
+ volume = kzalloc(struct_size(volume, coherency, coherency_len),
+ GFP_KERNEL);
+ if (!volume)
+ goto err_cache;
+
+ volume->cache = cache;
+ volume->coherency_len = coherency_len;
+ if (coherency_data)
+ memcpy(volume->coherency, coherency_data, coherency_len);
+ INIT_LIST_HEAD(&volume->proc_link);
+ INIT_WORK(&volume->work, fscache_create_volume_work);
+ refcount_set(&volume->ref, 1);
+ spin_lock_init(&volume->lock);
+
+ /* Stick the length on the front of the key and pad it out to make
+ * hashing easier.
+ */
+ hlen = round_up(1 + klen + 1, sizeof(__le32));
+ key = kzalloc(hlen, GFP_KERNEL);
+ if (!key)
+ goto err_vol;
+ key[0] = klen;
+ memcpy(key + 1, volume_key, klen);
+
+ volume->key = key;
+ volume->key_hash = fscache_hash(0, key, hlen);
+
+ volume->debug_id = atomic_inc_return(&fscache_volume_debug_id);
+ down_write(&fscache_addremove_sem);
+ atomic_inc(&cache->n_volumes);
+ list_add_tail(&volume->proc_link, &fscache_volumes);
+ fscache_see_volume(volume, fscache_volume_new_acquire);
+ fscache_stat(&fscache_n_volumes);
+ up_write(&fscache_addremove_sem);
+ _leave(" = v=%x", volume->debug_id);
+ return volume;
+
+err_vol:
+ kfree(volume);
+err_cache:
+ fscache_put_cache(cache, fscache_cache_put_alloc_volume);
+ fscache_stat(&fscache_n_volumes_nomem);
+ return NULL;
+}
+
+/*
+ * Create a volume's representation on disk. Have a volume ref and a cache
+ * access we have to release.
+ */
+static void fscache_create_volume_work(struct work_struct *work)
+{
+ const struct fscache_cache_ops *ops;
+ struct fscache_volume *volume =
+ container_of(work, struct fscache_volume, work);
+
+ fscache_see_volume(volume, fscache_volume_see_create_work);
+
+ ops = volume->cache->ops;
+ if (ops->acquire_volume)
+ ops->acquire_volume(volume);
+ fscache_end_cache_access(volume->cache,
+ fscache_access_acquire_volume_end);
+
+ clear_and_wake_up_bit(FSCACHE_VOLUME_CREATING, &volume->flags);
+ fscache_put_volume(volume, fscache_volume_put_create_work);
+}
+
+/*
+ * Dispatch a worker thread to create a volume's representation on disk.
+ */
+void fscache_create_volume(struct fscache_volume *volume, bool wait)
+{
+ if (test_and_set_bit(FSCACHE_VOLUME_CREATING, &volume->flags))
+ goto maybe_wait;
+ if (volume->cache_priv)
+ goto no_wait; /* We raced */
+ if (!fscache_begin_cache_access(volume->cache,
+ fscache_access_acquire_volume))
+ goto no_wait;
+
+ fscache_get_volume(volume, fscache_volume_get_create_work);
+ if (!schedule_work(&volume->work))
+ fscache_put_volume(volume, fscache_volume_put_create_work);
+
+maybe_wait:
+ if (wait) {
+ fscache_see_volume(volume, fscache_volume_wait_create_work);
+ wait_on_bit(&volume->flags, FSCACHE_VOLUME_CREATING,
+ TASK_UNINTERRUPTIBLE);
+ }
+ return;
+no_wait:
+ clear_and_wake_up_bit(FSCACHE_VOLUME_CREATING, &volume->flags);
+}
+
+/*
+ * Acquire a volume representation cookie and link it to a (proposed) cache.
+ */
+struct fscache_volume *__fscache_acquire_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len)
+{
+ struct fscache_volume *volume;
+
+ volume = fscache_alloc_volume(volume_key, cache_name,
+ coherency_data, coherency_len);
+ if (!volume)
+ return ERR_PTR(-ENOMEM);
+
+ if (!fscache_hash_volume(volume)) {
+ fscache_put_volume(volume, fscache_volume_put_hash_collision);
+ return ERR_PTR(-EBUSY);
+ }
+
+ fscache_create_volume(volume, false);
+ return volume;
+}
+EXPORT_SYMBOL(__fscache_acquire_volume);
+
+static void fscache_wake_pending_volume(struct fscache_volume *volume,
+ struct hlist_bl_head *h)
+{
+ struct fscache_volume *cursor;
+ struct hlist_bl_node *p;
+
+ hlist_bl_for_each_entry(cursor, p, h, hash_link) {
+ if (fscache_volume_same(cursor, volume)) {
+ fscache_see_volume(cursor, fscache_volume_see_hash_wake);
+ clear_and_wake_up_bit(FSCACHE_VOLUME_ACQUIRE_PENDING,
+ &cursor->flags);
+ return;
+ }
+ }
+}
+
+/*
+ * Remove a volume cookie from the hash table.
+ */
+static void fscache_unhash_volume(struct fscache_volume *volume)
+{
+ struct hlist_bl_head *h;
+ unsigned int bucket;
+
+ bucket = volume->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1);
+ h = &fscache_volume_hash[bucket];
+
+ hlist_bl_lock(h);
+ hlist_bl_del(&volume->hash_link);
+ if (test_bit(FSCACHE_VOLUME_COLLIDED_WITH, &volume->flags))
+ fscache_wake_pending_volume(volume, h);
+ hlist_bl_unlock(h);
+}
+
+/*
+ * Drop a cache's volume attachments.
+ */
+static void fscache_free_volume(struct fscache_volume *volume)
+{
+ struct fscache_cache *cache = volume->cache;
+
+ if (volume->cache_priv) {
+ __fscache_begin_volume_access(volume, NULL,
+ fscache_access_relinquish_volume);
+ if (volume->cache_priv)
+ cache->ops->free_volume(volume);
+ fscache_end_volume_access(volume, NULL,
+ fscache_access_relinquish_volume_end);
+ }
+
+ down_write(&fscache_addremove_sem);
+ list_del_init(&volume->proc_link);
+ atomic_dec(&volume->cache->n_volumes);
+ up_write(&fscache_addremove_sem);
+
+ if (!hlist_bl_unhashed(&volume->hash_link))
+ fscache_unhash_volume(volume);
+
+ trace_fscache_volume(volume->debug_id, 0, fscache_volume_free);
+ kfree(volume->key);
+ kfree(volume);
+ fscache_stat_d(&fscache_n_volumes);
+ fscache_put_cache(cache, fscache_cache_put_volume);
+}
+
+/*
+ * Drop a reference to a volume cookie.
+ */
+void fscache_put_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where)
+{
+ if (volume) {
+ unsigned int debug_id = volume->debug_id;
+ bool zero;
+ int ref;
+
+ zero = __refcount_dec_and_test(&volume->ref, &ref);
+ trace_fscache_volume(debug_id, ref - 1, where);
+ if (zero)
+ fscache_free_volume(volume);
+ }
+}
+EXPORT_SYMBOL(fscache_put_volume);
+
+/*
+ * Relinquish a volume representation cookie.
+ */
+void __fscache_relinquish_volume(struct fscache_volume *volume,
+ const void *coherency_data,
+ bool invalidate)
+{
+ if (WARN_ON(test_and_set_bit(FSCACHE_VOLUME_RELINQUISHED, &volume->flags)))
+ return;
+
+ if (invalidate) {
+ set_bit(FSCACHE_VOLUME_INVALIDATE, &volume->flags);
+ } else if (coherency_data) {
+ memcpy(volume->coherency, coherency_data, volume->coherency_len);
+ }
+
+ fscache_put_volume(volume, fscache_volume_put_relinquish);
+}
+EXPORT_SYMBOL(__fscache_relinquish_volume);
+
+/**
+ * fscache_withdraw_volume - Withdraw a volume from being cached
+ * @volume: Volume cookie
+ *
+ * Withdraw a cache volume from service, waiting for all accesses to complete
+ * before returning.
+ */
+void fscache_withdraw_volume(struct fscache_volume *volume)
+{
+ int n_accesses;
+
+ _debug("withdraw V=%x", volume->debug_id);
+
+ /* Allow wakeups on dec-to-0 */
+ n_accesses = atomic_dec_return(&volume->n_accesses);
+ trace_fscache_access_volume(volume->debug_id, 0,
+ refcount_read(&volume->ref),
+ n_accesses, fscache_access_cache_unpin);
+
+ wait_var_event(&volume->n_accesses,
+ atomic_read(&volume->n_accesses) == 0);
+}
+EXPORT_SYMBOL(fscache_withdraw_volume);
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Generate a list of volumes in /proc/fs/fscache/volumes
+ */
+static int fscache_volumes_seq_show(struct seq_file *m, void *v)
+{
+ struct fscache_volume *volume;
+
+ if (v == &fscache_volumes) {
+ seq_puts(m,
+ "VOLUME REF nCOOK ACC FL CACHE KEY\n"
+ "======== ===== ===== === == =============== ================\n");
+ return 0;
+ }
+
+ volume = list_entry(v, struct fscache_volume, proc_link);
+ seq_printf(m,
+ "%08x %5d %5d %3d %02lx %-15.15s %s\n",
+ volume->debug_id,
+ refcount_read(&volume->ref),
+ atomic_read(&volume->n_cookies),
+ atomic_read(&volume->n_accesses),
+ volume->flags,
+ volume->cache->name ?: "-",
+ volume->key + 1);
+ return 0;
+}
+
+static void *fscache_volumes_seq_start(struct seq_file *m, loff_t *_pos)
+ __acquires(&fscache_addremove_sem)
+{
+ down_read(&fscache_addremove_sem);
+ return seq_list_start_head(&fscache_volumes, *_pos);
+}
+
+static void *fscache_volumes_seq_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ return seq_list_next(v, &fscache_volumes, _pos);
+}
+
+static void fscache_volumes_seq_stop(struct seq_file *m, void *v)
+ __releases(&fscache_addremove_sem)
+{
+ up_read(&fscache_addremove_sem);
+}
+
+const struct seq_operations fscache_volumes_seq_ops = {
+ .start = fscache_volumes_seq_start,
+ .next = fscache_volumes_seq_next,
+ .stop = fscache_volumes_seq_stop,
+ .show = fscache_volumes_seq_show,
+};
+#endif /* CONFIG_PROC_FS */
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index b7f2c4459f33..4319611f5354 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -5,6 +5,15 @@
* Written by David Howells (dhowells@redhat.com)
*/
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/folio_queue.h>
+#include <linux/netfs.h>
+#include <linux/fscache.h>
+#include <linux/fscache-cache.h>
+#include <trace/events/netfs.h>
+#include <trace/events/fscache.h>
+
#ifdef pr_fmt
#undef pr_fmt
#endif
@@ -12,16 +21,116 @@
#define pr_fmt(fmt) "netfs: " fmt
/*
- * read_helper.c
+ * buffered_read.c
+ */
+void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error);
+int netfs_prefetch_for_write(struct file *file, struct folio *folio,
+ size_t offset, size_t len);
+
+/*
+ * buffered_write.c
+ */
+void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
+ loff_t pos, size_t copied);
+
+/*
+ * main.c
*/
extern unsigned int netfs_debug;
+extern struct list_head netfs_io_requests;
+extern spinlock_t netfs_proc_lock;
+extern mempool_t netfs_request_pool;
+extern mempool_t netfs_subrequest_pool;
+
+#ifdef CONFIG_PROC_FS
+static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
+{
+ spin_lock(&netfs_proc_lock);
+ list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests);
+ spin_unlock(&netfs_proc_lock);
+}
+static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq)
+{
+ if (!list_empty(&rreq->proc_link)) {
+ spin_lock(&netfs_proc_lock);
+ list_del_rcu(&rreq->proc_link);
+ spin_unlock(&netfs_proc_lock);
+ }
+}
+#else
+static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {}
+static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
+#endif
+
+/*
+ * misc.c
+ */
+struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq,
+ enum netfs_folioq_trace trace);
+void netfs_reset_iter(struct netfs_io_subrequest *subreq);
+void netfs_wake_collector(struct netfs_io_request *rreq);
+void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq);
+void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq,
+ struct netfs_io_stream *stream);
+ssize_t netfs_wait_for_read(struct netfs_io_request *rreq);
+ssize_t netfs_wait_for_write(struct netfs_io_request *rreq);
+void netfs_wait_for_paused_read(struct netfs_io_request *rreq);
+void netfs_wait_for_paused_write(struct netfs_io_request *rreq);
+
+/*
+ * objects.c
+ */
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+ struct file *file,
+ loff_t start, size_t len,
+ enum netfs_io_origin origin);
+void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
+void netfs_clear_subrequests(struct netfs_io_request *rreq);
+void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
+void netfs_put_failed_request(struct netfs_io_request *rreq);
+struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
+
+static inline void netfs_see_request(struct netfs_io_request *rreq,
+ enum netfs_rreq_ref_trace what)
+{
+ trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
+}
+
+static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what)
+{
+ trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index,
+ refcount_read(&subreq->ref), what);
+}
+
+/*
+ * read_collect.c
+ */
+bool netfs_read_collection(struct netfs_io_request *rreq);
+void netfs_read_collection_worker(struct work_struct *work);
+void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error);
+
+/*
+ * read_pgpriv2.c
+ */
+void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio);
+void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq);
+bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq);
+
+/*
+ * read_retry.c
+ */
+void netfs_retry_reads(struct netfs_io_request *rreq);
+void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq);
/*
* stats.c
*/
#ifdef CONFIG_NETFS_STATS
+extern atomic_t netfs_n_rh_dio_read;
extern atomic_t netfs_n_rh_readahead;
-extern atomic_t netfs_n_rh_readpage;
+extern atomic_t netfs_n_rh_read_folio;
+extern atomic_t netfs_n_rh_read_single;
extern atomic_t netfs_n_rh_rreq;
extern atomic_t netfs_n_rh_sreq;
extern atomic_t netfs_n_rh_download;
@@ -38,7 +147,27 @@ extern atomic_t netfs_n_rh_write_begin;
extern atomic_t netfs_n_rh_write_done;
extern atomic_t netfs_n_rh_write_failed;
extern atomic_t netfs_n_rh_write_zskip;
+extern atomic_t netfs_n_rh_retry_read_req;
+extern atomic_t netfs_n_rh_retry_read_subreq;
+extern atomic_t netfs_n_wh_buffered_write;
+extern atomic_t netfs_n_wh_writethrough;
+extern atomic_t netfs_n_wh_dio_write;
+extern atomic_t netfs_n_wh_writepages;
+extern atomic_t netfs_n_wh_copy_to_cache;
+extern atomic_t netfs_n_wh_wstream_conflict;
+extern atomic_t netfs_n_wh_upload;
+extern atomic_t netfs_n_wh_upload_done;
+extern atomic_t netfs_n_wh_upload_failed;
+extern atomic_t netfs_n_wh_write;
+extern atomic_t netfs_n_wh_write_done;
+extern atomic_t netfs_n_wh_write_failed;
+extern atomic_t netfs_n_wh_retry_write_req;
+extern atomic_t netfs_n_wh_retry_write_subreq;
+extern atomic_t netfs_n_wb_lock_skip;
+extern atomic_t netfs_n_wb_lock_wait;
+extern atomic_t netfs_n_folioq;
+int netfs_stats_show(struct seq_file *m, void *v);
static inline void netfs_stat(atomic_t *stat)
{
@@ -55,6 +184,261 @@ static inline void netfs_stat_d(atomic_t *stat)
#define netfs_stat_d(x) do {} while(0)
#endif
+/*
+ * write_collect.c
+ */
+int netfs_folio_written_back(struct folio *folio);
+bool netfs_write_collection(struct netfs_io_request *wreq);
+void netfs_write_collection_worker(struct work_struct *work);
+
+/*
+ * write_issue.c
+ */
+struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
+ struct file *file,
+ loff_t start,
+ enum netfs_io_origin origin);
+void netfs_reissue_write(struct netfs_io_stream *stream,
+ struct netfs_io_subrequest *subreq,
+ struct iov_iter *source);
+void netfs_issue_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream);
+size_t netfs_advance_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start, size_t len, bool to_eof);
+struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
+int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *folio, size_t copied, bool to_page_end,
+ struct folio **writethrough_cache);
+ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *writethrough_cache);
+int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
+
+/*
+ * write_retry.c
+ */
+void netfs_retry_writes(struct netfs_io_request *wreq);
+
+/*
+ * Miscellaneous functions.
+ */
+static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+ struct fscache_cookie *cookie = ctx->cache;
+
+ return fscache_cookie_valid(cookie) && cookie->cache_priv &&
+ fscache_cookie_enabled(cookie);
+#else
+ return false;
+#endif
+}
+
+/*
+ * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
+ */
+static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
+{
+ if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE)
+ refcount_inc(&netfs_group->ref);
+ return netfs_group;
+}
+
+/*
+ * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
+ */
+static inline void netfs_put_group(struct netfs_group *netfs_group)
+{
+ if (netfs_group &&
+ netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
+ refcount_dec_and_test(&netfs_group->ref))
+ netfs_group->free(netfs_group);
+}
+
+/*
+ * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
+ */
+static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
+{
+ if (netfs_group &&
+ netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
+ refcount_sub_and_test(nr, &netfs_group->ref))
+ netfs_group->free(netfs_group);
+}
+
+/*
+ * Clear and wake up a NETFS_RREQ_* flag bit on a request.
+ */
+static inline void netfs_wake_rreq_flag(struct netfs_io_request *rreq,
+ unsigned int rreq_flag,
+ enum netfs_rreq_trace trace)
+{
+ if (test_bit(rreq_flag, &rreq->flags)) {
+ clear_bit_unlock(rreq_flag, &rreq->flags);
+ smp_mb__after_atomic(); /* Set flag before task state */
+ trace_netfs_rreq(rreq, trace);
+ wake_up(&rreq->waitq);
+ }
+}
+
+/*
+ * Test the NETFS_RREQ_IN_PROGRESS flag, inserting an appropriate barrier.
+ */
+static inline bool netfs_check_rreq_in_progress(const struct netfs_io_request *rreq)
+{
+ /* Order read of flags before read of anything else, such as error. */
+ return test_bit_acquire(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+}
+
+/*
+ * Test the NETFS_SREQ_IN_PROGRESS flag, inserting an appropriate barrier.
+ */
+static inline bool netfs_check_subreq_in_progress(const struct netfs_io_subrequest *subreq)
+{
+ /* Order read of flags before read of anything else, such as error. */
+ return test_bit_acquire(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+}
+
+/*
+ * fscache-cache.c
+ */
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_caches_seq_ops;
+#endif
+bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
+void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
+struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
+void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
+
+static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
+{
+ return smp_load_acquire(&cache->state);
+}
+
+static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
+{
+ return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
+}
+
+static inline void fscache_set_cache_state(struct fscache_cache *cache,
+ enum fscache_cache_state new_state)
+{
+ smp_store_release(&cache->state, new_state);
+
+}
+
+static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
+ enum fscache_cache_state old_state,
+ enum fscache_cache_state new_state)
+{
+ return try_cmpxchg_release(&cache->state, &old_state, new_state);
+}
+
+/*
+ * fscache-cookie.c
+ */
+extern struct kmem_cache *fscache_cookie_jar;
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_cookies_seq_ops;
+#endif
+extern struct timer_list fscache_cookie_lru_timer;
+
+extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
+extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+
+static inline void fscache_see_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where)
+{
+ trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
+ where);
+}
+
+/*
+ * fscache-main.c
+ */
+extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
+#ifdef CONFIG_FSCACHE
+int __init fscache_init(void);
+void __exit fscache_exit(void);
+#else
+static inline int fscache_init(void) { return 0; }
+static inline void fscache_exit(void) {}
+#endif
+
+/*
+ * fscache-proc.c
+ */
+#ifdef CONFIG_PROC_FS
+extern int __init fscache_proc_init(void);
+extern void fscache_proc_cleanup(void);
+#else
+#define fscache_proc_init() (0)
+#define fscache_proc_cleanup() do {} while (0)
+#endif
+
+/*
+ * fscache-stats.c
+ */
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_volumes;
+extern atomic_t fscache_n_volumes_collision;
+extern atomic_t fscache_n_volumes_nomem;
+extern atomic_t fscache_n_cookies;
+extern atomic_t fscache_n_cookies_lru;
+extern atomic_t fscache_n_cookies_lru_expired;
+extern atomic_t fscache_n_cookies_lru_removed;
+extern atomic_t fscache_n_cookies_lru_dropped;
+
+extern atomic_t fscache_n_acquires;
+extern atomic_t fscache_n_acquires_ok;
+extern atomic_t fscache_n_acquires_oom;
+
+extern atomic_t fscache_n_invalidates;
+
+extern atomic_t fscache_n_relinquishes;
+extern atomic_t fscache_n_relinquishes_retire;
+extern atomic_t fscache_n_relinquishes_dropped;
+
+extern atomic_t fscache_n_resizes;
+extern atomic_t fscache_n_resizes_null;
+
+static inline void fscache_stat(atomic_t *stat)
+{
+ atomic_inc(stat);
+}
+
+static inline void fscache_stat_d(atomic_t *stat)
+{
+ atomic_dec(stat);
+}
+
+#define __fscache_stat(stat) (stat)
+
+int fscache_stats_show(struct seq_file *m);
+#else
+
+#define __fscache_stat(stat) (NULL)
+#define fscache_stat(stat) do {} while (0)
+#define fscache_stat_d(stat) do {} while (0)
+
+static inline int fscache_stats_show(struct seq_file *m) { return 0; }
+#endif
+
+/*
+ * fscache-volume.c
+ */
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_volumes_seq_ops;
+#endif
+
+struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+bool fscache_begin_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+void fscache_create_volume(struct fscache_volume *volume, bool wait);
+
/*****************************************************************************/
/*
* debug tracing
@@ -95,3 +479,57 @@ do { \
#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
#endif
+
+/*
+ * assertions
+ */
+#if 1 /* defined(__KDEBUGALL) */
+
+#define ASSERT(X) \
+do { \
+ if (unlikely(!(X))) { \
+ pr_err("\n"); \
+ pr_err("Assertion failed\n"); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTCMP(X, OP, Y) \
+do { \
+ if (unlikely(!((X) OP (Y)))) { \
+ pr_err("\n"); \
+ pr_err("Assertion failed\n"); \
+ pr_err("%lx " #OP " %lx is false\n", \
+ (unsigned long)(X), (unsigned long)(Y)); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTIF(C, X) \
+do { \
+ if (unlikely((C) && !(X))) { \
+ pr_err("\n"); \
+ pr_err("Assertion failed\n"); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTIFCMP(C, X, OP, Y) \
+do { \
+ if (unlikely((C) && !((X) OP (Y)))) { \
+ pr_err("\n"); \
+ pr_err("Assertion failed\n"); \
+ pr_err("%lx " #OP " %lx is false\n", \
+ (unsigned long)(X), (unsigned long)(Y)); \
+ BUG(); \
+ } \
+} while (0)
+
+#else
+
+#define ASSERT(X) do {} while (0)
+#define ASSERTCMP(X, OP, Y) do {} while (0)
+#define ASSERTIF(C, X) do {} while (0)
+#define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
+
+#endif /* assert or not */
diff --git a/fs/netfs/iterator.c b/fs/netfs/iterator.c
new file mode 100644
index 000000000000..72a435e5fc6d
--- /dev/null
+++ b/fs/netfs/iterator.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Iterator helpers.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uio.h>
+#include <linux/scatterlist.h>
+#include <linux/netfs.h>
+#include "internal.h"
+
+/**
+ * netfs_extract_user_iter - Extract the pages from a user iterator into a bvec
+ * @orig: The original iterator
+ * @orig_len: The amount of iterator to copy
+ * @new: The iterator to be set up
+ * @extraction_flags: Flags to qualify the request
+ *
+ * Extract the page fragments from the given amount of the source iterator and
+ * build up a second iterator that refers to all of those bits. This allows
+ * the original iterator to disposed of.
+ *
+ * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA be
+ * allowed on the pages extracted.
+ *
+ * On success, the number of elements in the bvec is returned, the original
+ * iterator will have been advanced by the amount extracted.
+ *
+ * The iov_iter_extract_mode() function should be used to query how cleanup
+ * should be performed.
+ */
+ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
+ struct iov_iter *new,
+ iov_iter_extraction_t extraction_flags)
+{
+ struct bio_vec *bv = NULL;
+ struct page **pages;
+ unsigned int cur_npages;
+ unsigned int max_pages;
+ unsigned int npages = 0;
+ unsigned int i;
+ ssize_t ret;
+ size_t count = orig_len, offset, len;
+ size_t bv_size, pg_size;
+
+ if (WARN_ON_ONCE(!iter_is_ubuf(orig) && !iter_is_iovec(orig)))
+ return -EIO;
+
+ max_pages = iov_iter_npages(orig, INT_MAX);
+ bv_size = array_size(max_pages, sizeof(*bv));
+ bv = kvmalloc(bv_size, GFP_KERNEL);
+ if (!bv)
+ return -ENOMEM;
+
+ /* Put the page list at the end of the bvec list storage. bvec
+ * elements are larger than page pointers, so as long as we work
+ * 0->last, we should be fine.
+ */
+ pg_size = array_size(max_pages, sizeof(*pages));
+ pages = (void *)bv + bv_size - pg_size;
+
+ while (count && npages < max_pages) {
+ ret = iov_iter_extract_pages(orig, &pages, count,
+ max_pages - npages, extraction_flags,
+ &offset);
+ if (ret < 0) {
+ pr_err("Couldn't get user pages (rc=%zd)\n", ret);
+ break;
+ }
+
+ if (ret > count) {
+ pr_err("get_pages rc=%zd more than %zu\n", ret, count);
+ break;
+ }
+
+ count -= ret;
+ ret += offset;
+ cur_npages = DIV_ROUND_UP(ret, PAGE_SIZE);
+
+ if (npages + cur_npages > max_pages) {
+ pr_err("Out of bvec array capacity (%u vs %u)\n",
+ npages + cur_npages, max_pages);
+ break;
+ }
+
+ for (i = 0; i < cur_npages; i++) {
+ len = ret > PAGE_SIZE ? PAGE_SIZE : ret;
+ bvec_set_page(bv + npages + i, *pages++, len - offset, offset);
+ ret -= len;
+ offset = 0;
+ }
+
+ npages += cur_npages;
+ }
+
+ iov_iter_bvec(new, orig->data_source, bv, npages, orig_len - count);
+ return npages;
+}
+EXPORT_SYMBOL_GPL(netfs_extract_user_iter);
+
+/*
+ * Select the span of a bvec iterator we're going to use. Limit it by both maximum
+ * size and maximum number of segments. Returns the size of the span in bytes.
+ */
+static size_t netfs_limit_bvec(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs)
+{
+ const struct bio_vec *bvecs = iter->bvec;
+ unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0;
+ size_t len, span = 0, n = iter->count;
+ size_t skip = iter->iov_offset + start_offset;
+
+ if (WARN_ON(!iov_iter_is_bvec(iter)) ||
+ WARN_ON(start_offset > n) ||
+ n == 0)
+ return 0;
+
+ while (n && ix < nbv && skip) {
+ len = bvecs[ix].bv_len;
+ if (skip < len)
+ break;
+ skip -= len;
+ n -= len;
+ ix++;
+ }
+
+ while (n && ix < nbv) {
+ len = min3(n, bvecs[ix].bv_len - skip, max_size);
+ span += len;
+ nsegs++;
+ ix++;
+ if (span >= max_size || nsegs >= max_segs)
+ break;
+ skip = 0;
+ n -= len;
+ }
+
+ return min(span, max_size);
+}
+
+/*
+ * Select the span of an xarray iterator we're going to use. Limit it by both
+ * maximum size and maximum number of segments. It is assumed that segments
+ * can be larger than a page in size, provided they're physically contiguous.
+ * Returns the size of the span in bytes.
+ */
+static size_t netfs_limit_xarray(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs)
+{
+ struct folio *folio;
+ unsigned int nsegs = 0;
+ loff_t pos = iter->xarray_start + iter->iov_offset;
+ pgoff_t index = pos / PAGE_SIZE;
+ size_t span = 0, n = iter->count;
+
+ XA_STATE(xas, iter->xarray, index);
+
+ if (WARN_ON(!iov_iter_is_xarray(iter)) ||
+ WARN_ON(start_offset > n) ||
+ n == 0)
+ return 0;
+ max_size = min(max_size, n - start_offset);
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, ULONG_MAX) {
+ size_t offset, flen, len;
+ if (xas_retry(&xas, folio))
+ continue;
+ if (WARN_ON(xa_is_value(folio)))
+ break;
+ if (WARN_ON(folio_test_hugetlb(folio)))
+ break;
+
+ flen = folio_size(folio);
+ offset = offset_in_folio(folio, pos);
+ len = min(max_size, flen - offset);
+ span += len;
+ nsegs++;
+ if (span >= max_size || nsegs >= max_segs)
+ break;
+ }
+
+ rcu_read_unlock();
+ return min(span, max_size);
+}
+
+/*
+ * Select the span of a folio queue iterator we're going to use. Limit it by
+ * both maximum size and maximum number of segments. Returns the size of the
+ * span in bytes.
+ */
+static size_t netfs_limit_folioq(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs)
+{
+ const struct folio_queue *folioq = iter->folioq;
+ unsigned int nsegs = 0;
+ unsigned int slot = iter->folioq_slot;
+ size_t span = 0, n = iter->count;
+
+ if (WARN_ON(!iov_iter_is_folioq(iter)) ||
+ WARN_ON(start_offset > n) ||
+ n == 0)
+ return 0;
+ max_size = umin(max_size, n - start_offset);
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+
+ start_offset += iter->iov_offset;
+ do {
+ size_t flen = folioq_folio_size(folioq, slot);
+
+ if (start_offset < flen) {
+ span += flen - start_offset;
+ nsegs++;
+ start_offset = 0;
+ } else {
+ start_offset -= flen;
+ }
+ if (span >= max_size || nsegs >= max_segs)
+ break;
+
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+ } while (folioq);
+
+ return umin(span, max_size);
+}
+
+size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs)
+{
+ if (iov_iter_is_folioq(iter))
+ return netfs_limit_folioq(iter, start_offset, max_size, max_segs);
+ if (iov_iter_is_bvec(iter))
+ return netfs_limit_bvec(iter, start_offset, max_size, max_segs);
+ if (iov_iter_is_xarray(iter))
+ return netfs_limit_xarray(iter, start_offset, max_size, max_segs);
+ BUG();
+}
+EXPORT_SYMBOL(netfs_limit_iter);
diff --git a/fs/netfs/locking.c b/fs/netfs/locking.c
new file mode 100644
index 000000000000..2249ecd09d0a
--- /dev/null
+++ b/fs/netfs/locking.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I/O and data path helper functionality.
+ *
+ * Borrowed from NFS Copyright (c) 2016 Trond Myklebust
+ */
+
+#include <linux/kernel.h>
+#include <linux/netfs.h>
+#include "internal.h"
+
+/*
+ * inode_dio_wait_interruptible - wait for outstanding DIO requests to finish
+ * @inode: inode to wait for
+ *
+ * Waits for all pending direct I/O requests to finish so that we can
+ * proceed with a truncate or equivalent operation.
+ *
+ * Must be called under a lock that serializes taking new references
+ * to i_dio_count, usually by inode->i_mutex.
+ */
+static int netfs_inode_dio_wait_interruptible(struct inode *inode)
+{
+ if (inode_dio_finished(inode))
+ return 0;
+
+ inode_dio_wait_interruptible(inode);
+ return !inode_dio_finished(inode) ? -ERESTARTSYS : 0;
+}
+
+/* Call with exclusively locked inode->i_rwsem */
+static int netfs_block_o_direct(struct netfs_inode *ictx)
+{
+ if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags))
+ return 0;
+ clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
+ return netfs_inode_dio_wait_interruptible(&ictx->inode);
+}
+
+/**
+ * netfs_start_io_read - declare the file is being used for buffered reads
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is about to start, and ensure
+ * that we block all direct I/O.
+ * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is unset,
+ * and holds a shared lock on inode->i_rwsem to ensure that the flag
+ * cannot be changed.
+ * In practice, this means that buffered read operations are allowed to
+ * execute in parallel, thanks to the shared lock, whereas direct I/O
+ * operations need to wait to grab an exclusive lock in order to set
+ * NETFS_ICTX_ODIRECT.
+ * Note that buffered writes and truncates both take a write lock on
+ * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
+ */
+int netfs_start_io_read(struct inode *inode)
+ __acquires(inode->i_rwsem)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+
+ /* Be an optimist! */
+ if (down_read_interruptible(&inode->i_rwsem) < 0)
+ return -ERESTARTSYS;
+ if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) == 0)
+ return 0;
+ up_read(&inode->i_rwsem);
+
+ /* Slow path.... */
+ if (down_write_killable(&inode->i_rwsem) < 0)
+ return -ERESTARTSYS;
+ if (netfs_block_o_direct(ictx) < 0) {
+ up_write(&inode->i_rwsem);
+ return -ERESTARTSYS;
+ }
+ downgrade_write(&inode->i_rwsem);
+ return 0;
+}
+EXPORT_SYMBOL(netfs_start_io_read);
+
+/**
+ * netfs_end_io_read - declare that the buffered read operation is done
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is done, and release the shared
+ * lock on inode->i_rwsem.
+ */
+void netfs_end_io_read(struct inode *inode)
+ __releases(inode->i_rwsem)
+{
+ up_read(&inode->i_rwsem);
+}
+EXPORT_SYMBOL(netfs_end_io_read);
+
+/**
+ * netfs_start_io_write - declare the file is being used for buffered writes
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is about to start, and ensure
+ * that we block all direct I/O.
+ */
+int netfs_start_io_write(struct inode *inode)
+ __acquires(inode->i_rwsem)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+
+ if (down_write_killable(&inode->i_rwsem) < 0)
+ return -ERESTARTSYS;
+ if (netfs_block_o_direct(ictx) < 0) {
+ up_write(&inode->i_rwsem);
+ return -ERESTARTSYS;
+ }
+ downgrade_write(&inode->i_rwsem);
+ return 0;
+}
+EXPORT_SYMBOL(netfs_start_io_write);
+
+/**
+ * netfs_end_io_write - declare that the buffered write operation is done
+ * @inode: file inode
+ *
+ * Declare that a buffered write operation is done, and release the
+ * lock on inode->i_rwsem.
+ */
+void netfs_end_io_write(struct inode *inode)
+ __releases(inode->i_rwsem)
+{
+ up_read(&inode->i_rwsem);
+}
+EXPORT_SYMBOL(netfs_end_io_write);
+
+/* Call with exclusively locked inode->i_rwsem */
+static int netfs_block_buffered(struct inode *inode)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+ int ret;
+
+ if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags)) {
+ set_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
+ if (inode->i_mapping->nrpages != 0) {
+ unmap_mapping_range(inode->i_mapping, 0, 0, 0);
+ ret = filemap_fdatawait(inode->i_mapping);
+ if (ret < 0) {
+ clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * netfs_start_io_direct - declare the file is being used for direct i/o
+ * @inode: file inode
+ *
+ * Declare that a direct I/O operation is about to start, and ensure
+ * that we block all buffered I/O.
+ * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is set,
+ * and holds a shared lock on inode->i_rwsem to ensure that the flag
+ * cannot be changed.
+ * In practice, this means that direct I/O operations are allowed to
+ * execute in parallel, thanks to the shared lock, whereas buffered I/O
+ * operations need to wait to grab an exclusive lock in order to clear
+ * NETFS_ICTX_ODIRECT.
+ * Note that buffered writes and truncates both take a write lock on
+ * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
+ */
+int netfs_start_io_direct(struct inode *inode)
+ __acquires(inode->i_rwsem)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+ int ret;
+
+ /* Be an optimist! */
+ if (down_read_interruptible(&inode->i_rwsem) < 0)
+ return -ERESTARTSYS;
+ if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) != 0)
+ return 0;
+ up_read(&inode->i_rwsem);
+
+ /* Slow path.... */
+ if (down_write_killable(&inode->i_rwsem) < 0)
+ return -ERESTARTSYS;
+ ret = netfs_block_buffered(inode);
+ if (ret < 0) {
+ up_write(&inode->i_rwsem);
+ return ret;
+ }
+ downgrade_write(&inode->i_rwsem);
+ return 0;
+}
+EXPORT_SYMBOL(netfs_start_io_direct);
+
+/**
+ * netfs_end_io_direct - declare that the direct i/o operation is done
+ * @inode: file inode
+ *
+ * Declare that a direct I/O operation is done, and release the shared
+ * lock on inode->i_rwsem.
+ */
+void netfs_end_io_direct(struct inode *inode)
+ __releases(inode->i_rwsem)
+{
+ up_read(&inode->i_rwsem);
+}
+EXPORT_SYMBOL(netfs_end_io_direct);
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
new file mode 100644
index 000000000000..73da6c9f5777
--- /dev/null
+++ b/fs/netfs/main.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Miscellaneous bits for the netfs support library.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/mempool.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include "internal.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/netfs.h>
+
+MODULE_DESCRIPTION("Network fs support");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+EXPORT_TRACEPOINT_SYMBOL(netfs_sreq);
+
+unsigned netfs_debug;
+module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
+
+static struct kmem_cache *netfs_request_slab;
+static struct kmem_cache *netfs_subrequest_slab;
+mempool_t netfs_request_pool;
+mempool_t netfs_subrequest_pool;
+
+#ifdef CONFIG_PROC_FS
+LIST_HEAD(netfs_io_requests);
+DEFINE_SPINLOCK(netfs_proc_lock);
+
+static const char *netfs_origins[nr__netfs_io_origin] = {
+ [NETFS_READAHEAD] = "RA",
+ [NETFS_READPAGE] = "RP",
+ [NETFS_READ_GAPS] = "RG",
+ [NETFS_READ_SINGLE] = "R1",
+ [NETFS_READ_FOR_WRITE] = "RW",
+ [NETFS_UNBUFFERED_READ] = "UR",
+ [NETFS_DIO_READ] = "DR",
+ [NETFS_WRITEBACK] = "WB",
+ [NETFS_WRITEBACK_SINGLE] = "W1",
+ [NETFS_WRITETHROUGH] = "WT",
+ [NETFS_UNBUFFERED_WRITE] = "UW",
+ [NETFS_DIO_WRITE] = "DW",
+ [NETFS_PGPRIV2_COPY_TO_CACHE] = "2C",
+};
+
+/*
+ * Generate a list of I/O requests in /proc/fs/netfs/requests
+ */
+static int netfs_requests_seq_show(struct seq_file *m, void *v)
+{
+ struct netfs_io_request *rreq;
+
+ if (v == &netfs_io_requests) {
+ seq_puts(m,
+ "REQUEST OR REF FLAG ERR OPS COVERAGE\n"
+ "======== == === ==== ==== === =========\n"
+ );
+ return 0;
+ }
+
+ rreq = list_entry(v, struct netfs_io_request, proc_link);
+ seq_printf(m,
+ "%08x %s %3d %4lx %4ld %3d @%04llx %llx/%llx",
+ rreq->debug_id,
+ netfs_origins[rreq->origin],
+ refcount_read(&rreq->ref),
+ rreq->flags,
+ rreq->error,
+ 0,
+ rreq->start, rreq->submitted, rreq->len);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static void *netfs_requests_seq_start(struct seq_file *m, loff_t *_pos)
+ __acquires(rcu)
+{
+ rcu_read_lock();
+ return seq_list_start_head(&netfs_io_requests, *_pos);
+}
+
+static void *netfs_requests_seq_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ return seq_list_next(v, &netfs_io_requests, _pos);
+}
+
+static void netfs_requests_seq_stop(struct seq_file *m, void *v)
+ __releases(rcu)
+{
+ rcu_read_unlock();
+}
+
+static const struct seq_operations netfs_requests_seq_ops = {
+ .start = netfs_requests_seq_start,
+ .next = netfs_requests_seq_next,
+ .stop = netfs_requests_seq_stop,
+ .show = netfs_requests_seq_show,
+};
+#endif /* CONFIG_PROC_FS */
+
+static int __init netfs_init(void)
+{
+ int ret = -ENOMEM;
+
+ netfs_request_slab = kmem_cache_create("netfs_request",
+ sizeof(struct netfs_io_request), 0,
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
+ NULL);
+ if (!netfs_request_slab)
+ goto error_req;
+
+ if (mempool_init_slab_pool(&netfs_request_pool, 100, netfs_request_slab) < 0)
+ goto error_reqpool;
+
+ netfs_subrequest_slab = kmem_cache_create("netfs_subrequest",
+ sizeof(struct netfs_io_subrequest) + 16, 0,
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
+ NULL);
+ if (!netfs_subrequest_slab)
+ goto error_subreq;
+
+ if (mempool_init_slab_pool(&netfs_subrequest_pool, 100, netfs_subrequest_slab) < 0)
+ goto error_subreqpool;
+
+#ifdef CONFIG_PROC_FS
+ if (!proc_mkdir("fs/netfs", NULL))
+ goto error_proc;
+ if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL,
+ &netfs_requests_seq_ops))
+ goto error_procfile;
+#endif
+#ifdef CONFIG_FSCACHE_STATS
+ if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL,
+ netfs_stats_show))
+ goto error_procfile;
+#endif
+
+ ret = fscache_init();
+ if (ret < 0)
+ goto error_fscache;
+ return 0;
+
+error_fscache:
+#ifdef CONFIG_PROC_FS
+error_procfile:
+ remove_proc_subtree("fs/netfs", NULL);
+error_proc:
+#endif
+ mempool_exit(&netfs_subrequest_pool);
+error_subreqpool:
+ kmem_cache_destroy(netfs_subrequest_slab);
+error_subreq:
+ mempool_exit(&netfs_request_pool);
+error_reqpool:
+ kmem_cache_destroy(netfs_request_slab);
+error_req:
+ return ret;
+}
+fs_initcall(netfs_init);
+
+static void __exit netfs_exit(void)
+{
+ fscache_exit();
+ remove_proc_subtree("fs/netfs", NULL);
+ mempool_exit(&netfs_subrequest_pool);
+ kmem_cache_destroy(netfs_subrequest_slab);
+ mempool_exit(&netfs_request_pool);
+ kmem_cache_destroy(netfs_request_slab);
+}
+module_exit(netfs_exit);
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
new file mode 100644
index 000000000000..6df89c92b10b
--- /dev/null
+++ b/fs/netfs/misc.c
@@ -0,0 +1,546 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Miscellaneous routines.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/swap.h>
+#include "internal.h"
+
+/**
+ * netfs_alloc_folioq_buffer - Allocate buffer space into a folio queue
+ * @mapping: Address space to set on the folio (or NULL).
+ * @_buffer: Pointer to the folio queue to add to (may point to a NULL; updated).
+ * @_cur_size: Current size of the buffer (updated).
+ * @size: Target size of the buffer.
+ * @gfp: The allocation constraints.
+ */
+int netfs_alloc_folioq_buffer(struct address_space *mapping,
+ struct folio_queue **_buffer,
+ size_t *_cur_size, ssize_t size, gfp_t gfp)
+{
+ struct folio_queue *tail = *_buffer, *p;
+
+ size = round_up(size, PAGE_SIZE);
+ if (*_cur_size >= size)
+ return 0;
+
+ if (tail)
+ while (tail->next)
+ tail = tail->next;
+
+ do {
+ struct folio *folio;
+ int order = 0, slot;
+
+ if (!tail || folioq_full(tail)) {
+ p = netfs_folioq_alloc(0, GFP_NOFS, netfs_trace_folioq_alloc_buffer);
+ if (!p)
+ return -ENOMEM;
+ if (tail) {
+ tail->next = p;
+ p->prev = tail;
+ } else {
+ *_buffer = p;
+ }
+ tail = p;
+ }
+
+ if (size - *_cur_size > PAGE_SIZE)
+ order = umin(ilog2(size - *_cur_size) - PAGE_SHIFT,
+ MAX_PAGECACHE_ORDER);
+
+ folio = folio_alloc(gfp, order);
+ if (!folio && order > 0)
+ folio = folio_alloc(gfp, 0);
+ if (!folio)
+ return -ENOMEM;
+
+ folio->mapping = mapping;
+ folio->index = *_cur_size / PAGE_SIZE;
+ trace_netfs_folio(folio, netfs_folio_trace_alloc_buffer);
+ slot = folioq_append_mark(tail, folio);
+ *_cur_size += folioq_folio_size(tail, slot);
+ } while (*_cur_size < size);
+
+ return 0;
+}
+EXPORT_SYMBOL(netfs_alloc_folioq_buffer);
+
+/**
+ * netfs_free_folioq_buffer - Free a folio queue.
+ * @fq: The start of the folio queue to free
+ *
+ * Free up a chain of folio_queues and, if marked, the marked folios they point
+ * to.
+ */
+void netfs_free_folioq_buffer(struct folio_queue *fq)
+{
+ struct folio_queue *next;
+ struct folio_batch fbatch;
+
+ folio_batch_init(&fbatch);
+
+ for (; fq; fq = next) {
+ for (int slot = 0; slot < folioq_count(fq); slot++) {
+ struct folio *folio = folioq_folio(fq, slot);
+
+ if (!folio ||
+ !folioq_is_marked(fq, slot))
+ continue;
+
+ trace_netfs_folio(folio, netfs_folio_trace_put);
+ if (folio_batch_add(&fbatch, folio))
+ folio_batch_release(&fbatch);
+ }
+
+ netfs_stat_d(&netfs_n_folioq);
+ next = fq->next;
+ kfree(fq);
+ }
+
+ folio_batch_release(&fbatch);
+}
+EXPORT_SYMBOL(netfs_free_folioq_buffer);
+
+/*
+ * Reset the subrequest iterator to refer just to the region remaining to be
+ * read. The iterator may or may not have been advanced by socket ops or
+ * extraction ops to an extent that may or may not match the amount actually
+ * read.
+ */
+void netfs_reset_iter(struct netfs_io_subrequest *subreq)
+{
+ struct iov_iter *io_iter = &subreq->io_iter;
+ size_t remain = subreq->len - subreq->transferred;
+
+ if (io_iter->count > remain)
+ iov_iter_advance(io_iter, io_iter->count - remain);
+ else if (io_iter->count < remain)
+ iov_iter_revert(io_iter, remain - io_iter->count);
+ iov_iter_truncate(&subreq->io_iter, remain);
+}
+
+/**
+ * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
+ * @mapping: The mapping the folio belongs to.
+ * @folio: The folio being dirtied.
+ *
+ * Set the dirty flag on a folio and pin an in-use cache object in memory so
+ * that writeback can later write to it. This is intended to be called from
+ * the filesystem's ->dirty_folio() method.
+ *
+ * Return: true if the dirty flag was set on the folio, false otherwise.
+ */
+bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ struct netfs_inode *ictx = netfs_inode(inode);
+ struct fscache_cookie *cookie = netfs_i_cookie(ictx);
+ bool need_use = false;
+
+ _enter("");
+
+ if (!filemap_dirty_folio(mapping, folio))
+ return false;
+ if (!fscache_cookie_valid(cookie))
+ return true;
+
+ if (!(inode_state_read_once(inode) & I_PINNING_NETFS_WB)) {
+ spin_lock(&inode->i_lock);
+ if (!(inode_state_read(inode) & I_PINNING_NETFS_WB)) {
+ inode_state_set(inode, I_PINNING_NETFS_WB);
+ need_use = true;
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (need_use)
+ fscache_use_cookie(cookie, true);
+ }
+ return true;
+}
+EXPORT_SYMBOL(netfs_dirty_folio);
+
+/**
+ * netfs_unpin_writeback - Unpin writeback resources
+ * @inode: The inode on which the cookie resides
+ * @wbc: The writeback control
+ *
+ * Unpin the writeback resources pinned by netfs_dirty_folio(). This is
+ * intended to be called as/by the netfs's ->write_inode() method.
+ */
+int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc)
+{
+ struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
+
+ if (wbc->unpinned_netfs_wb)
+ fscache_unuse_cookie(cookie, NULL, NULL);
+ return 0;
+}
+EXPORT_SYMBOL(netfs_unpin_writeback);
+
+/**
+ * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode
+ * @inode: The inode to clean up
+ * @aux: Auxiliary data to apply to the inode
+ *
+ * Clear any writeback resources held by an inode when the inode is evicted.
+ * This must be called before clear_inode() is called.
+ */
+void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
+{
+ struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
+
+ if (inode_state_read_once(inode) & I_PINNING_NETFS_WB) {
+ loff_t i_size = i_size_read(inode);
+ fscache_unuse_cookie(cookie, aux, &i_size);
+ }
+}
+EXPORT_SYMBOL(netfs_clear_inode_writeback);
+
+/**
+ * netfs_invalidate_folio - Invalidate or partially invalidate a folio
+ * @folio: Folio proposed for release
+ * @offset: Offset of the invalidated region
+ * @length: Length of the invalidated region
+ *
+ * Invalidate part or all of a folio for a network filesystem. The folio will
+ * be removed afterwards if the invalidated region covers the entire folio.
+ */
+void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
+{
+ struct netfs_folio *finfo;
+ struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
+ size_t flen = folio_size(folio);
+
+ _enter("{%lx},%zx,%zx", folio->index, offset, length);
+
+ if (offset == 0 && length == flen) {
+ unsigned long long i_size = i_size_read(&ctx->inode);
+ unsigned long long fpos = folio_pos(folio), end;
+
+ end = umin(fpos + flen, i_size);
+ if (fpos < i_size && end > ctx->zero_point)
+ ctx->zero_point = end;
+ }
+
+ folio_wait_private_2(folio); /* [DEPRECATED] */
+
+ if (!folio_test_private(folio))
+ return;
+
+ finfo = netfs_folio_info(folio);
+
+ if (offset == 0 && length >= flen)
+ goto erase_completely;
+
+ if (finfo) {
+ /* We have a partially uptodate page from a streaming write. */
+ unsigned int fstart = finfo->dirty_offset;
+ unsigned int fend = fstart + finfo->dirty_len;
+ unsigned int iend = offset + length;
+
+ if (offset >= fend)
+ return;
+ if (iend <= fstart)
+ return;
+
+ /* The invalidation region overlaps the data. If the region
+ * covers the start of the data, we either move along the start
+ * or just erase the data entirely.
+ */
+ if (offset <= fstart) {
+ if (iend >= fend)
+ goto erase_completely;
+ /* Move the start of the data. */
+ finfo->dirty_len = fend - iend;
+ finfo->dirty_offset = offset;
+ return;
+ }
+
+ /* Reduce the length of the data if the invalidation region
+ * covers the tail part.
+ */
+ if (iend >= fend) {
+ finfo->dirty_len = offset - fstart;
+ return;
+ }
+
+ /* A partial write was split. The caller has already zeroed
+ * it, so just absorb the hole.
+ */
+ }
+ return;
+
+erase_completely:
+ netfs_put_group(netfs_folio_group(folio));
+ folio_detach_private(folio);
+ folio_clear_uptodate(folio);
+ kfree(finfo);
+ return;
+}
+EXPORT_SYMBOL(netfs_invalidate_folio);
+
+/**
+ * netfs_release_folio - Try to release a folio
+ * @folio: Folio proposed for release
+ * @gfp: Flags qualifying the release
+ *
+ * Request release of a folio and clean up its private state if it's not busy.
+ * Returns true if the folio can now be released, false if not
+ */
+bool netfs_release_folio(struct folio *folio, gfp_t gfp)
+{
+ struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
+ unsigned long long end;
+
+ if (folio_test_dirty(folio))
+ return false;
+
+ end = umin(folio_next_pos(folio), i_size_read(&ctx->inode));
+ if (end > ctx->zero_point)
+ ctx->zero_point = end;
+
+ if (folio_test_private(folio))
+ return false;
+ if (unlikely(folio_test_private_2(folio))) { /* [DEPRECATED] */
+ if (current_is_kswapd() || !(gfp & __GFP_FS))
+ return false;
+ folio_wait_private_2(folio);
+ }
+ fscache_note_page_release(netfs_i_cookie(ctx));
+ return true;
+}
+EXPORT_SYMBOL(netfs_release_folio);
+
+/*
+ * Wake the collection work item.
+ */
+void netfs_wake_collector(struct netfs_io_request *rreq)
+{
+ if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) &&
+ !test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) {
+ queue_work(system_dfl_wq, &rreq->work);
+ } else {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue);
+ wake_up(&rreq->waitq);
+ }
+}
+
+/*
+ * Mark a subrequest as no longer being in progress and, if need be, wake the
+ * collector.
+ */
+void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[subreq->stream_nr];
+
+ clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+ smp_mb__after_atomic(); /* Clear IN_PROGRESS before task state */
+
+ /* If we are at the head of the queue, wake up the collector. */
+ if (list_is_first(&subreq->rreq_link, &stream->subrequests) ||
+ test_bit(NETFS_RREQ_RETRYING, &rreq->flags))
+ netfs_wake_collector(rreq);
+}
+
+/*
+ * Wait for all outstanding I/O in a stream to quiesce.
+ */
+void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq,
+ struct netfs_io_stream *stream)
+{
+ struct netfs_io_subrequest *subreq;
+ DEFINE_WAIT(myself);
+
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
+ if (!netfs_check_subreq_in_progress(subreq))
+ continue;
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wait_quiesce);
+ for (;;) {
+ prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
+
+ if (!netfs_check_subreq_in_progress(subreq))
+ break;
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_wait_for);
+ schedule();
+ }
+ }
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_waited_quiesce);
+ finish_wait(&rreq->waitq, &myself);
+}
+
+/*
+ * Perform collection in app thread if not offloaded to workqueue.
+ */
+static int netfs_collect_in_app(struct netfs_io_request *rreq,
+ bool (*collector)(struct netfs_io_request *rreq))
+{
+ bool need_collect = false, inactive = true, done = true;
+
+ if (!netfs_check_rreq_in_progress(rreq)) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_recollect);
+ return 1; /* Done */
+ }
+
+ for (int i = 0; i < NR_IO_STREAMS; i++) {
+ struct netfs_io_subrequest *subreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[i];
+
+ if (!stream->active)
+ continue;
+ inactive = false;
+ trace_netfs_collect_stream(rreq, stream);
+ subreq = list_first_entry_or_null(&stream->subrequests,
+ struct netfs_io_subrequest,
+ rreq_link);
+ if (subreq &&
+ (!netfs_check_subreq_in_progress(subreq) ||
+ test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
+ need_collect = true;
+ break;
+ }
+ if (subreq || !test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags))
+ done = false;
+ }
+
+ if (!need_collect && !inactive && !done)
+ return 0; /* Sleep */
+
+ __set_current_state(TASK_RUNNING);
+ if (collector(rreq)) {
+ /* Drop the ref from the NETFS_RREQ_IN_PROGRESS flag. */
+ netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
+ return 1; /* Done */
+ }
+
+ if (inactive) {
+ WARN(true, "Failed to collect inactive req R=%08x\n",
+ rreq->debug_id);
+ cond_resched();
+ }
+ return 2; /* Again */
+}
+
+/*
+ * Wait for a request to complete, successfully or otherwise.
+ */
+static ssize_t netfs_wait_for_in_progress(struct netfs_io_request *rreq,
+ bool (*collector)(struct netfs_io_request *rreq))
+{
+ DEFINE_WAIT(myself);
+ ssize_t ret;
+
+ for (;;) {
+ prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
+
+ if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
+ switch (netfs_collect_in_app(rreq, collector)) {
+ case 0:
+ break;
+ case 1:
+ goto all_collected;
+ case 2:
+ if (!netfs_check_rreq_in_progress(rreq))
+ break;
+ cond_resched();
+ continue;
+ }
+ }
+
+ if (!netfs_check_rreq_in_progress(rreq))
+ break;
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
+ schedule();
+ }
+
+all_collected:
+ trace_netfs_rreq(rreq, netfs_rreq_trace_waited_ip);
+ finish_wait(&rreq->waitq, &myself);
+
+ ret = rreq->error;
+ if (ret == 0) {
+ ret = rreq->transferred;
+ switch (rreq->origin) {
+ case NETFS_DIO_READ:
+ case NETFS_DIO_WRITE:
+ case NETFS_READ_SINGLE:
+ case NETFS_UNBUFFERED_READ:
+ case NETFS_UNBUFFERED_WRITE:
+ break;
+ default:
+ if (rreq->submitted < rreq->len) {
+ trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
+ ret = -EIO;
+ }
+ break;
+ }
+ }
+
+ return ret;
+}
+
+ssize_t netfs_wait_for_read(struct netfs_io_request *rreq)
+{
+ return netfs_wait_for_in_progress(rreq, netfs_read_collection);
+}
+
+ssize_t netfs_wait_for_write(struct netfs_io_request *rreq)
+{
+ return netfs_wait_for_in_progress(rreq, netfs_write_collection);
+}
+
+/*
+ * Wait for a paused operation to unpause or complete in some manner.
+ */
+static void netfs_wait_for_pause(struct netfs_io_request *rreq,
+ bool (*collector)(struct netfs_io_request *rreq))
+{
+ DEFINE_WAIT(myself);
+
+ for (;;) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wait_pause);
+ prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
+
+ if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
+ switch (netfs_collect_in_app(rreq, collector)) {
+ case 0:
+ break;
+ case 1:
+ goto all_collected;
+ case 2:
+ if (!netfs_check_rreq_in_progress(rreq) ||
+ !test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
+ break;
+ cond_resched();
+ continue;
+ }
+ }
+
+ if (!netfs_check_rreq_in_progress(rreq) ||
+ !test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
+ break;
+
+ schedule();
+ }
+
+all_collected:
+ trace_netfs_rreq(rreq, netfs_rreq_trace_waited_pause);
+ finish_wait(&rreq->waitq, &myself);
+}
+
+void netfs_wait_for_paused_read(struct netfs_io_request *rreq)
+{
+ return netfs_wait_for_pause(rreq, netfs_read_collection);
+}
+
+void netfs_wait_for_paused_write(struct netfs_io_request *rreq)
+{
+ return netfs_wait_for_pause(rreq, netfs_write_collection);
+}
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
new file mode 100644
index 000000000000..b8c4918d3dcd
--- /dev/null
+++ b/fs/netfs/objects.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Object lifetime handling and tracing.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include <linux/mempool.h>
+#include <linux/delay.h>
+#include "internal.h"
+
+static void netfs_free_request(struct work_struct *work);
+
+/*
+ * Allocate an I/O request and initialise it.
+ */
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+ struct file *file,
+ loff_t start, size_t len,
+ enum netfs_io_origin origin)
+{
+ static atomic_t debug_ids;
+ struct inode *inode = file ? file_inode(file) : mapping->host;
+ struct netfs_inode *ctx = netfs_inode(inode);
+ struct netfs_io_request *rreq;
+ mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool;
+ struct kmem_cache *cache = mempool->pool_data;
+ int ret;
+
+ for (;;) {
+ rreq = mempool_alloc(mempool, GFP_KERNEL);
+ if (rreq)
+ break;
+ msleep(10);
+ }
+
+ memset(rreq, 0, kmem_cache_size(cache));
+ INIT_WORK(&rreq->cleanup_work, netfs_free_request);
+ rreq->start = start;
+ rreq->len = len;
+ rreq->origin = origin;
+ rreq->netfs_ops = ctx->ops;
+ rreq->mapping = mapping;
+ rreq->inode = inode;
+ rreq->i_size = i_size_read(inode);
+ rreq->debug_id = atomic_inc_return(&debug_ids);
+ rreq->wsize = INT_MAX;
+ rreq->io_streams[0].sreq_max_len = ULONG_MAX;
+ rreq->io_streams[0].sreq_max_segs = 0;
+ spin_lock_init(&rreq->lock);
+ INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
+ INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
+ init_waitqueue_head(&rreq->waitq);
+ refcount_set(&rreq->ref, 2);
+
+ if (origin == NETFS_READAHEAD ||
+ origin == NETFS_READPAGE ||
+ origin == NETFS_READ_GAPS ||
+ origin == NETFS_READ_SINGLE ||
+ origin == NETFS_READ_FOR_WRITE ||
+ origin == NETFS_UNBUFFERED_READ ||
+ origin == NETFS_DIO_READ) {
+ INIT_WORK(&rreq->work, netfs_read_collection_worker);
+ rreq->io_streams[0].avail = true;
+ } else {
+ INIT_WORK(&rreq->work, netfs_write_collection_worker);
+ }
+
+ __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+ if (rreq->netfs_ops->init_request) {
+ ret = rreq->netfs_ops->init_request(rreq, file);
+ if (ret < 0) {
+ mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
+ return ERR_PTR(ret);
+ }
+ }
+
+ atomic_inc(&ctx->io_count);
+ trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), netfs_rreq_trace_new);
+ netfs_proc_add_rreq(rreq);
+ netfs_stat(&netfs_n_rh_rreq);
+ return rreq;
+}
+
+void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
+{
+ int r;
+
+ __refcount_inc(&rreq->ref, &r);
+ trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
+}
+
+void netfs_clear_subrequests(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *subreq;
+ struct netfs_io_stream *stream;
+ int s;
+
+ for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
+ stream = &rreq->io_streams[s];
+ while (!list_empty(&stream->subrequests)) {
+ subreq = list_first_entry(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ list_del(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_clear);
+ }
+ }
+}
+
+static void netfs_free_request_rcu(struct rcu_head *rcu)
+{
+ struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu);
+
+ mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
+ netfs_stat_d(&netfs_n_rh_rreq);
+}
+
+static void netfs_deinit_request(struct netfs_io_request *rreq)
+{
+ struct netfs_inode *ictx = netfs_inode(rreq->inode);
+ unsigned int i;
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_free);
+
+ /* Cancel/flush the result collection worker. That does not carry a
+ * ref of its own, so we must wait for it somewhere.
+ */
+ cancel_work_sync(&rreq->work);
+
+ netfs_proc_del_rreq(rreq);
+ netfs_clear_subrequests(rreq);
+ if (rreq->netfs_ops->free_request)
+ rreq->netfs_ops->free_request(rreq);
+ if (rreq->cache_resources.ops)
+ rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
+ if (rreq->direct_bv) {
+ for (i = 0; i < rreq->direct_bv_count; i++) {
+ if (rreq->direct_bv[i].bv_page) {
+ if (rreq->direct_bv_unpin)
+ unpin_user_page(rreq->direct_bv[i].bv_page);
+ }
+ }
+ kvfree(rreq->direct_bv);
+ }
+ rolling_buffer_clear(&rreq->buffer);
+
+ if (atomic_dec_and_test(&ictx->io_count))
+ wake_up_var(&ictx->io_count);
+}
+
+static void netfs_free_request(struct work_struct *work)
+{
+ struct netfs_io_request *rreq =
+ container_of(work, struct netfs_io_request, cleanup_work);
+
+ netfs_deinit_request(rreq);
+ call_rcu(&rreq->rcu, netfs_free_request_rcu);
+}
+
+void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
+{
+ unsigned int debug_id;
+ bool dead;
+ int r;
+
+ if (rreq) {
+ debug_id = rreq->debug_id;
+ dead = __refcount_dec_and_test(&rreq->ref, &r);
+ trace_netfs_rreq_ref(debug_id, r - 1, what);
+ if (dead)
+ WARN_ON(!queue_work(system_dfl_wq, &rreq->cleanup_work));
+ }
+}
+
+/*
+ * Free a request (synchronously) that was just allocated but has
+ * failed before it could be submitted.
+ */
+void netfs_put_failed_request(struct netfs_io_request *rreq)
+{
+ int r = refcount_read(&rreq->ref);
+
+ /* new requests have two references (see
+ * netfs_alloc_request(), and this function is only allowed on
+ * new request objects
+ */
+ WARN_ON_ONCE(r != 2);
+
+ trace_netfs_rreq_ref(rreq->debug_id, r, netfs_rreq_trace_put_failed);
+ netfs_free_request(&rreq->cleanup_work);
+}
+
+/*
+ * Allocate and partially initialise an I/O request structure.
+ */
+struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *subreq;
+ mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool;
+ struct kmem_cache *cache = mempool->pool_data;
+
+ for (;;) {
+ subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool,
+ GFP_KERNEL);
+ if (subreq)
+ break;
+ msleep(10);
+ }
+
+ memset(subreq, 0, kmem_cache_size(cache));
+ INIT_WORK(&subreq->work, NULL);
+ INIT_LIST_HEAD(&subreq->rreq_link);
+ refcount_set(&subreq->ref, 2);
+ subreq->rreq = rreq;
+ subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
+ netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
+ netfs_stat(&netfs_n_rh_sreq);
+ return subreq;
+}
+
+void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what)
+{
+ int r;
+
+ __refcount_inc(&subreq->ref, &r);
+ trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
+ what);
+}
+
+static void netfs_free_subrequest(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_free);
+ if (rreq->netfs_ops->free_subrequest)
+ rreq->netfs_ops->free_subrequest(subreq);
+ mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool);
+ netfs_stat_d(&netfs_n_rh_sreq);
+ netfs_put_request(rreq, netfs_rreq_trace_put_subreq);
+}
+
+void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what)
+{
+ unsigned int debug_index = subreq->debug_index;
+ unsigned int debug_id = subreq->rreq->debug_id;
+ bool dead;
+ int r;
+
+ dead = __refcount_dec_and_test(&subreq->ref, &r);
+ trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
+ if (dead)
+ netfs_free_subrequest(subreq);
+}
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
new file mode 100644
index 000000000000..a95e7aadafd0
--- /dev/null
+++ b/fs/netfs/read_collect.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem read subrequest result collection, assessment and
+ * retrying.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+/* Notes made in the collector */
+#define HIT_PENDING 0x01 /* A front op was still pending */
+#define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
+#define BUFFERED 0x08 /* The pagecache needs cleaning up */
+#define NEED_RETRY 0x10 /* A front op requests retrying */
+#define COPY_TO_CACHE 0x40 /* Need to copy subrequest to cache */
+#define ABANDON_SREQ 0x80 /* Need to abandon untransferred part of subrequest */
+
+/*
+ * Clear the unread part of an I/O request.
+ */
+static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
+{
+ netfs_reset_iter(subreq);
+ WARN_ON_ONCE(subreq->len - subreq->transferred != iov_iter_count(&subreq->io_iter));
+ iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter);
+ if (subreq->start + subreq->transferred >= subreq->rreq->i_size)
+ __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
+}
+
+/*
+ * Flush, mark and unlock a folio that's now completely read. If we want to
+ * cache the folio, we set the group to NETFS_FOLIO_COPY_TO_CACHE, mark it
+ * dirty and let writeback handle it.
+ */
+static void netfs_unlock_read_folio(struct netfs_io_request *rreq,
+ struct folio_queue *folioq,
+ int slot)
+{
+ struct netfs_folio *finfo;
+ struct folio *folio = folioq_folio(folioq, slot);
+
+ if (unlikely(folio_pos(folio) < rreq->abandon_to)) {
+ trace_netfs_folio(folio, netfs_folio_trace_abandon);
+ goto just_unlock;
+ }
+
+ flush_dcache_folio(folio);
+ folio_mark_uptodate(folio);
+
+ if (!test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
+ finfo = netfs_folio_info(folio);
+ if (finfo) {
+ trace_netfs_folio(folio, netfs_folio_trace_filled_gaps);
+ if (finfo->netfs_group)
+ folio_change_private(folio, finfo->netfs_group);
+ else
+ folio_detach_private(folio);
+ kfree(finfo);
+ }
+
+ if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags)) {
+ if (!WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
+ trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
+ folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
+ folio_mark_dirty(folio);
+ }
+ } else {
+ trace_netfs_folio(folio, netfs_folio_trace_read_done);
+ }
+
+ folioq_clear(folioq, slot);
+ } else {
+ // TODO: Use of PG_private_2 is deprecated.
+ if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags))
+ netfs_pgpriv2_copy_to_cache(rreq, folio);
+ }
+
+just_unlock:
+ if (folio->index == rreq->no_unlock_folio &&
+ test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) {
+ _debug("no unlock");
+ } else {
+ trace_netfs_folio(folio, netfs_folio_trace_read_unlock);
+ folio_unlock(folio);
+ }
+
+ folioq_clear(folioq, slot);
+}
+
+/*
+ * Unlock any folios we've finished with.
+ */
+static void netfs_read_unlock_folios(struct netfs_io_request *rreq,
+ unsigned int *notes)
+{
+ struct folio_queue *folioq = rreq->buffer.tail;
+ unsigned long long collected_to = rreq->collected_to;
+ unsigned int slot = rreq->buffer.first_tail_slot;
+
+ if (rreq->cleaned_to >= rreq->collected_to)
+ return;
+
+ // TODO: Begin decryption
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = rolling_buffer_delete_spent(&rreq->buffer);
+ if (!folioq) {
+ rreq->front_folio_order = 0;
+ return;
+ }
+ slot = 0;
+ }
+
+ for (;;) {
+ struct folio *folio;
+ unsigned long long fpos, fend;
+ unsigned int order;
+ size_t fsize;
+
+ if (*notes & COPY_TO_CACHE)
+ set_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
+
+ folio = folioq_folio(folioq, slot);
+ if (WARN_ONCE(!folio_test_locked(folio),
+ "R=%08x: folio %lx is not locked\n",
+ rreq->debug_id, folio->index))
+ trace_netfs_folio(folio, netfs_folio_trace_not_locked);
+
+ order = folioq_folio_order(folioq, slot);
+ rreq->front_folio_order = order;
+ fsize = PAGE_SIZE << order;
+ fpos = folio_pos(folio);
+ fend = umin(fpos + fsize, rreq->i_size);
+
+ trace_netfs_collect_folio(rreq, folio, fend, collected_to);
+
+ /* Unlock any folio we've transferred all of. */
+ if (collected_to < fend)
+ break;
+
+ netfs_unlock_read_folio(rreq, folioq, slot);
+ WRITE_ONCE(rreq->cleaned_to, fpos + fsize);
+ *notes |= MADE_PROGRESS;
+
+ clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
+
+ /* Clean up the head folioq. If we clear an entire folioq, then
+ * we can get rid of it provided it's not also the tail folioq
+ * being filled by the issuer.
+ */
+ folioq_clear(folioq, slot);
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = rolling_buffer_delete_spent(&rreq->buffer);
+ if (!folioq)
+ goto done;
+ slot = 0;
+ trace_netfs_folioq(folioq, netfs_trace_folioq_read_progress);
+ }
+
+ if (fpos + fsize >= collected_to)
+ break;
+ }
+
+ rreq->buffer.tail = folioq;
+done:
+ rreq->buffer.first_tail_slot = slot;
+}
+
+/*
+ * Collect and assess the results of various read subrequests. We may need to
+ * retry some of the results.
+ *
+ * Note that we have a sequence of subrequests, which may be drawing on
+ * different sources and may or may not be the same size or starting position
+ * and may not even correspond in boundary alignment.
+ */
+static void netfs_collect_read_results(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *front, *remove;
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ unsigned int notes;
+
+ _enter("%llx-%llx", rreq->start, rreq->start + rreq->len);
+ trace_netfs_rreq(rreq, netfs_rreq_trace_collect);
+ trace_netfs_collect(rreq);
+
+reassess:
+ if (rreq->origin == NETFS_READAHEAD ||
+ rreq->origin == NETFS_READPAGE ||
+ rreq->origin == NETFS_READ_FOR_WRITE)
+ notes = BUFFERED;
+ else
+ notes = 0;
+
+ /* Remove completed subrequests from the front of the stream and
+ * advance the completion point. We stop when we hit something that's
+ * in progress. The issuer thread may be adding stuff to the tail
+ * whilst we're doing this.
+ */
+ front = READ_ONCE(stream->front);
+ while (front) {
+ size_t transferred;
+
+ trace_netfs_collect_sreq(rreq, front);
+ _debug("sreq [%x] %llx %zx/%zx",
+ front->debug_index, front->start, front->transferred, front->len);
+
+ if (stream->collected_to < front->start) {
+ trace_netfs_collect_gap(rreq, stream, front->start, 'F');
+ stream->collected_to = front->start;
+ }
+
+ if (netfs_check_subreq_in_progress(front))
+ notes |= HIT_PENDING;
+ smp_rmb(); /* Read counters after IN_PROGRESS flag. */
+ transferred = READ_ONCE(front->transferred);
+
+ /* If we can now collect the next folio, do so. We don't want
+ * to defer this as we have to decide whether we need to copy
+ * to the cache or not, and that may differ between adjacent
+ * subreqs.
+ */
+ if (notes & BUFFERED) {
+ size_t fsize = PAGE_SIZE << rreq->front_folio_order;
+
+ /* Clear the tail of a short read. */
+ if (!(notes & HIT_PENDING) &&
+ front->error == 0 &&
+ transferred < front->len &&
+ (test_bit(NETFS_SREQ_HIT_EOF, &front->flags) ||
+ test_bit(NETFS_SREQ_CLEAR_TAIL, &front->flags))) {
+ netfs_clear_unread(front);
+ transferred = front->transferred = front->len;
+ trace_netfs_sreq(front, netfs_sreq_trace_clear);
+ }
+
+ stream->collected_to = front->start + transferred;
+ rreq->collected_to = stream->collected_to;
+
+ if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &front->flags))
+ notes |= COPY_TO_CACHE;
+
+ if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
+ rreq->abandon_to = front->start + front->len;
+ front->transferred = front->len;
+ transferred = front->len;
+ trace_netfs_rreq(rreq, netfs_rreq_trace_set_abandon);
+ }
+ if (front->start + transferred >= rreq->cleaned_to + fsize ||
+ test_bit(NETFS_SREQ_HIT_EOF, &front->flags))
+ netfs_read_unlock_folios(rreq, &notes);
+ } else {
+ stream->collected_to = front->start + transferred;
+ rreq->collected_to = stream->collected_to;
+ }
+
+ /* Stall if the front is still undergoing I/O. */
+ if (notes & HIT_PENDING)
+ break;
+
+ if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
+ if (!stream->failed) {
+ stream->error = front->error;
+ rreq->error = front->error;
+ set_bit(NETFS_RREQ_FAILED, &rreq->flags);
+ stream->failed = true;
+ }
+ notes |= MADE_PROGRESS | ABANDON_SREQ;
+ } else if (test_bit(NETFS_SREQ_NEED_RETRY, &front->flags)) {
+ stream->need_retry = true;
+ notes |= NEED_RETRY | MADE_PROGRESS;
+ break;
+ } else if (test_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags)) {
+ notes |= MADE_PROGRESS;
+ } else {
+ if (!stream->failed) {
+ stream->transferred += transferred;
+ stream->transferred_valid = true;
+ }
+ if (front->transferred < front->len)
+ set_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags);
+ notes |= MADE_PROGRESS;
+ }
+
+ /* Remove if completely consumed. */
+ stream->source = front->source;
+ spin_lock(&rreq->lock);
+
+ remove = front;
+ trace_netfs_sreq(front,
+ notes & ABANDON_SREQ ?
+ netfs_sreq_trace_abandoned : netfs_sreq_trace_consumed);
+ list_del_init(&front->rreq_link);
+ front = list_first_entry_or_null(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ stream->front = front;
+ spin_unlock(&rreq->lock);
+ netfs_put_subrequest(remove,
+ notes & ABANDON_SREQ ?
+ netfs_sreq_trace_put_abandon :
+ netfs_sreq_trace_put_done);
+ }
+
+ trace_netfs_collect_stream(rreq, stream);
+ trace_netfs_collect_state(rreq, rreq->collected_to, notes);
+
+ if (!(notes & BUFFERED))
+ rreq->cleaned_to = rreq->collected_to;
+
+ if (notes & NEED_RETRY)
+ goto need_retry;
+ if (notes & MADE_PROGRESS) {
+ netfs_wake_rreq_flag(rreq, NETFS_RREQ_PAUSE, netfs_rreq_trace_unpause);
+ //cond_resched();
+ goto reassess;
+ }
+
+out:
+ _leave(" = %x", notes);
+ return;
+
+need_retry:
+ /* Okay... We're going to have to retry parts of the stream. Note
+ * that any partially completed op will have had any wholly transferred
+ * folios removed from it.
+ */
+ _debug("retry");
+ netfs_retry_reads(rreq);
+ goto out;
+}
+
+/*
+ * Do page flushing and suchlike after DIO.
+ */
+static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
+{
+ unsigned int i;
+
+ if (rreq->origin == NETFS_UNBUFFERED_READ ||
+ rreq->origin == NETFS_DIO_READ) {
+ for (i = 0; i < rreq->direct_bv_count; i++) {
+ flush_dcache_page(rreq->direct_bv[i].bv_page);
+ // TODO: cifs marks pages in the destination buffer
+ // dirty under some circumstances after a read. Do we
+ // need to do that too?
+ set_page_dirty(rreq->direct_bv[i].bv_page);
+ }
+ }
+
+ if (rreq->iocb) {
+ rreq->iocb->ki_pos += rreq->transferred;
+ if (rreq->iocb->ki_complete) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_ki_complete);
+ rreq->iocb->ki_complete(
+ rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
+ }
+ }
+ if (rreq->netfs_ops->done)
+ rreq->netfs_ops->done(rreq);
+ if (rreq->origin == NETFS_UNBUFFERED_READ ||
+ rreq->origin == NETFS_DIO_READ)
+ inode_dio_end(rreq->inode);
+}
+
+/*
+ * Do processing after reading a monolithic single object.
+ */
+static void netfs_rreq_assess_single(struct netfs_io_request *rreq)
+{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+
+ if (!rreq->error && stream->source == NETFS_DOWNLOAD_FROM_SERVER &&
+ fscache_resources_valid(&rreq->cache_resources)) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_dirty);
+ netfs_single_mark_inode_dirty(rreq->inode);
+ }
+
+ if (rreq->iocb) {
+ rreq->iocb->ki_pos += rreq->transferred;
+ if (rreq->iocb->ki_complete) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_ki_complete);
+ rreq->iocb->ki_complete(
+ rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
+ }
+ }
+ if (rreq->netfs_ops->done)
+ rreq->netfs_ops->done(rreq);
+}
+
+/*
+ * Perform the collection of subrequests and folios.
+ *
+ * Note that we're in normal kernel thread context at this point, possibly
+ * running on a workqueue.
+ */
+bool netfs_read_collection(struct netfs_io_request *rreq)
+{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+
+ netfs_collect_read_results(rreq);
+
+ /* We're done when the app thread has finished posting subreqs and the
+ * queue is empty.
+ */
+ if (!test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags))
+ return false;
+ smp_rmb(); /* Read ALL_QUEUED before subreq lists. */
+
+ if (!list_empty(&stream->subrequests))
+ return false;
+
+ /* Okay, declare that all I/O is complete. */
+ rreq->transferred = stream->transferred;
+ trace_netfs_rreq(rreq, netfs_rreq_trace_complete);
+
+ //netfs_rreq_is_still_valid(rreq);
+
+ switch (rreq->origin) {
+ case NETFS_UNBUFFERED_READ:
+ case NETFS_DIO_READ:
+ case NETFS_READ_GAPS:
+ netfs_rreq_assess_dio(rreq);
+ break;
+ case NETFS_READ_SINGLE:
+ netfs_rreq_assess_single(rreq);
+ break;
+ default:
+ break;
+ }
+ task_io_account_read(rreq->transferred);
+
+ netfs_wake_rreq_flag(rreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
+ /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_done);
+ netfs_clear_subrequests(rreq);
+ netfs_unlock_abandoned_read_pages(rreq);
+ if (unlikely(rreq->copy_to_cache))
+ netfs_pgpriv2_end_copy_to_cache(rreq);
+ return true;
+}
+
+void netfs_read_collection_worker(struct work_struct *work)
+{
+ struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
+
+ netfs_see_request(rreq, netfs_rreq_trace_see_work);
+ if (netfs_check_rreq_in_progress(rreq)) {
+ if (netfs_read_collection(rreq))
+ /* Drop the ref from the IN_PROGRESS flag. */
+ netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
+ else
+ netfs_see_request(rreq, netfs_rreq_trace_see_work_complete);
+ }
+}
+
+/**
+ * netfs_read_subreq_progress - Note progress of a read operation.
+ * @subreq: The read request that has terminated.
+ *
+ * This tells the read side of netfs lib that a contributory I/O operation has
+ * made some progress and that it may be possible to unlock some folios.
+ *
+ * Before calling, the filesystem should update subreq->transferred to track
+ * the amount of data copied into the output buffer.
+ */
+void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ size_t fsize = PAGE_SIZE << rreq->front_folio_order;
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_progress);
+
+ /* If we are at the head of the queue, wake up the collector,
+ * getting a ref to it if we were the ones to do so.
+ */
+ if (subreq->start + subreq->transferred > rreq->cleaned_to + fsize &&
+ (rreq->origin == NETFS_READAHEAD ||
+ rreq->origin == NETFS_READPAGE ||
+ rreq->origin == NETFS_READ_FOR_WRITE) &&
+ list_is_first(&subreq->rreq_link, &stream->subrequests)
+ ) {
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ netfs_wake_collector(rreq);
+ }
+}
+EXPORT_SYMBOL(netfs_read_subreq_progress);
+
+/**
+ * netfs_read_subreq_terminated - Note the termination of an I/O operation.
+ * @subreq: The I/O request that has terminated.
+ *
+ * This tells the read helper that a contributory I/O operation has terminated,
+ * one way or another, and that it should integrate the results.
+ *
+ * The caller indicates the outcome of the operation through @subreq->error,
+ * supplying 0 to indicate a successful or retryable transfer (if
+ * NETFS_SREQ_NEED_RETRY is set) or a negative error code. The helper will
+ * look after reissuing I/O operations as appropriate and writing downloaded
+ * data to the cache.
+ *
+ * Before calling, the filesystem should update subreq->transferred to track
+ * the amount of data copied into the output buffer.
+ */
+void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+
+ switch (subreq->source) {
+ case NETFS_READ_FROM_CACHE:
+ netfs_stat(&netfs_n_rh_read_done);
+ break;
+ case NETFS_DOWNLOAD_FROM_SERVER:
+ netfs_stat(&netfs_n_rh_download_done);
+ break;
+ default:
+ break;
+ }
+
+ /* Deal with retry requests, short reads and errors. If we retry
+ * but don't make progress, we abandon the attempt.
+ */
+ if (!subreq->error && subreq->transferred < subreq->len) {
+ if (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_hit_eof);
+ } else if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_need_clear);
+ } else if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_need_retry);
+ } else if (test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
+ __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_partial_read);
+ } else {
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ subreq->error = -ENODATA;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_short);
+ }
+ }
+
+ if (unlikely(subreq->error < 0)) {
+ trace_netfs_failure(rreq, subreq, subreq->error, netfs_fail_read);
+ if (subreq->source == NETFS_READ_FROM_CACHE) {
+ netfs_stat(&netfs_n_rh_read_failed);
+ __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ } else {
+ netfs_stat(&netfs_n_rh_download_failed);
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ }
+ trace_netfs_rreq(rreq, netfs_rreq_trace_set_pause);
+ set_bit(NETFS_RREQ_PAUSE, &rreq->flags);
+ }
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+ netfs_subreq_clear_in_progress(subreq);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated);
+}
+EXPORT_SYMBOL(netfs_read_subreq_terminated);
+
+/*
+ * Handle termination of a read from the cache.
+ */
+void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error)
+{
+ struct netfs_io_subrequest *subreq = priv;
+
+ if (transferred_or_error > 0) {
+ subreq->error = 0;
+ if (transferred_or_error > 0) {
+ subreq->transferred += transferred_or_error;
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ }
+ } else {
+ subreq->error = transferred_or_error;
+ }
+ netfs_read_subreq_terminated(subreq);
+}
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
deleted file mode 100644
index 9320a42dfaf9..000000000000
--- a/fs/netfs/read_helper.c
+++ /dev/null
@@ -1,1209 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Network filesystem high-level read support.
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/module.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/uio.h>
-#include <linux/sched/mm.h>
-#include <linux/task_io_accounting_ops.h>
-#include <linux/netfs.h>
-#include "internal.h"
-#define CREATE_TRACE_POINTS
-#include <trace/events/netfs.h>
-
-MODULE_DESCRIPTION("Network fs support");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
-unsigned netfs_debug;
-module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
-
-static void netfs_rreq_work(struct work_struct *);
-static void __netfs_put_subrequest(struct netfs_read_subrequest *, bool);
-
-static void netfs_put_subrequest(struct netfs_read_subrequest *subreq,
- bool was_async)
-{
- if (refcount_dec_and_test(&subreq->usage))
- __netfs_put_subrequest(subreq, was_async);
-}
-
-static struct netfs_read_request *netfs_alloc_read_request(
- const struct netfs_read_request_ops *ops, void *netfs_priv,
- struct file *file)
-{
- static atomic_t debug_ids;
- struct netfs_read_request *rreq;
-
- rreq = kzalloc(sizeof(struct netfs_read_request), GFP_KERNEL);
- if (rreq) {
- rreq->netfs_ops = ops;
- rreq->netfs_priv = netfs_priv;
- rreq->inode = file_inode(file);
- rreq->i_size = i_size_read(rreq->inode);
- rreq->debug_id = atomic_inc_return(&debug_ids);
- INIT_LIST_HEAD(&rreq->subrequests);
- INIT_WORK(&rreq->work, netfs_rreq_work);
- refcount_set(&rreq->usage, 1);
- __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
- ops->init_rreq(rreq, file);
- netfs_stat(&netfs_n_rh_rreq);
- }
-
- return rreq;
-}
-
-static void netfs_get_read_request(struct netfs_read_request *rreq)
-{
- refcount_inc(&rreq->usage);
-}
-
-static void netfs_rreq_clear_subreqs(struct netfs_read_request *rreq,
- bool was_async)
-{
- struct netfs_read_subrequest *subreq;
-
- while (!list_empty(&rreq->subrequests)) {
- subreq = list_first_entry(&rreq->subrequests,
- struct netfs_read_subrequest, rreq_link);
- list_del(&subreq->rreq_link);
- netfs_put_subrequest(subreq, was_async);
- }
-}
-
-static void netfs_free_read_request(struct work_struct *work)
-{
- struct netfs_read_request *rreq =
- container_of(work, struct netfs_read_request, work);
- netfs_rreq_clear_subreqs(rreq, false);
- if (rreq->netfs_priv)
- rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
- trace_netfs_rreq(rreq, netfs_rreq_trace_free);
- if (rreq->cache_resources.ops)
- rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
- kfree(rreq);
- netfs_stat_d(&netfs_n_rh_rreq);
-}
-
-static void netfs_put_read_request(struct netfs_read_request *rreq, bool was_async)
-{
- if (refcount_dec_and_test(&rreq->usage)) {
- if (was_async) {
- rreq->work.func = netfs_free_read_request;
- if (!queue_work(system_unbound_wq, &rreq->work))
- BUG();
- } else {
- netfs_free_read_request(&rreq->work);
- }
- }
-}
-
-/*
- * Allocate and partially initialise an I/O request structure.
- */
-static struct netfs_read_subrequest *netfs_alloc_subrequest(
- struct netfs_read_request *rreq)
-{
- struct netfs_read_subrequest *subreq;
-
- subreq = kzalloc(sizeof(struct netfs_read_subrequest), GFP_KERNEL);
- if (subreq) {
- INIT_LIST_HEAD(&subreq->rreq_link);
- refcount_set(&subreq->usage, 2);
- subreq->rreq = rreq;
- netfs_get_read_request(rreq);
- netfs_stat(&netfs_n_rh_sreq);
- }
-
- return subreq;
-}
-
-static void netfs_get_read_subrequest(struct netfs_read_subrequest *subreq)
-{
- refcount_inc(&subreq->usage);
-}
-
-static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq,
- bool was_async)
-{
- struct netfs_read_request *rreq = subreq->rreq;
-
- trace_netfs_sreq(subreq, netfs_sreq_trace_free);
- kfree(subreq);
- netfs_stat_d(&netfs_n_rh_sreq);
- netfs_put_read_request(rreq, was_async);
-}
-
-/*
- * Clear the unread part of an I/O request.
- */
-static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
-{
- struct iov_iter iter;
-
- iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
- subreq->start + subreq->transferred,
- subreq->len - subreq->transferred);
- iov_iter_zero(iov_iter_count(&iter), &iter);
-}
-
-static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
- bool was_async)
-{
- struct netfs_read_subrequest *subreq = priv;
-
- netfs_subreq_terminated(subreq, transferred_or_error, was_async);
-}
-
-/*
- * Issue a read against the cache.
- * - Eats the caller's ref on subreq.
- */
-static void netfs_read_from_cache(struct netfs_read_request *rreq,
- struct netfs_read_subrequest *subreq,
- bool seek_data)
-{
- struct netfs_cache_resources *cres = &rreq->cache_resources;
- struct iov_iter iter;
-
- netfs_stat(&netfs_n_rh_read);
- iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
- subreq->start + subreq->transferred,
- subreq->len - subreq->transferred);
-
- cres->ops->read(cres, subreq->start, &iter, seek_data,
- netfs_cache_read_terminated, subreq);
-}
-
-/*
- * Fill a subrequest region with zeroes.
- */
-static void netfs_fill_with_zeroes(struct netfs_read_request *rreq,
- struct netfs_read_subrequest *subreq)
-{
- netfs_stat(&netfs_n_rh_zero);
- __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
- netfs_subreq_terminated(subreq, 0, false);
-}
-
-/*
- * Ask the netfs to issue a read request to the server for us.
- *
- * The netfs is expected to read from subreq->pos + subreq->transferred to
- * subreq->pos + subreq->len - 1. It may not backtrack and write data into the
- * buffer prior to the transferred point as it might clobber dirty data
- * obtained from the cache.
- *
- * Alternatively, the netfs is allowed to indicate one of two things:
- *
- * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
- * make progress.
- *
- * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
- * cleared.
- */
-static void netfs_read_from_server(struct netfs_read_request *rreq,
- struct netfs_read_subrequest *subreq)
-{
- netfs_stat(&netfs_n_rh_download);
- rreq->netfs_ops->issue_op(subreq);
-}
-
-/*
- * Release those waiting.
- */
-static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async)
-{
- trace_netfs_rreq(rreq, netfs_rreq_trace_done);
- netfs_rreq_clear_subreqs(rreq, was_async);
- netfs_put_read_request(rreq, was_async);
-}
-
-/*
- * Deal with the completion of writing the data to the cache. We have to clear
- * the PG_fscache bits on the folios involved and release the caller's ref.
- *
- * May be called in softirq mode and we inherit a ref from the caller.
- */
-static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
- bool was_async)
-{
- struct netfs_read_subrequest *subreq;
- struct folio *folio;
- pgoff_t unlocked = 0;
- bool have_unlocked = false;
-
- rcu_read_lock();
-
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
-
- xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
- /* We might have multiple writes from the same huge
- * folio, but we mustn't unlock a folio more than once.
- */
- if (have_unlocked && folio_index(folio) <= unlocked)
- continue;
- unlocked = folio_index(folio);
- folio_end_fscache(folio);
- have_unlocked = true;
- }
- }
-
- rcu_read_unlock();
- netfs_rreq_completed(rreq, was_async);
-}
-
-static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
- bool was_async)
-{
- struct netfs_read_subrequest *subreq = priv;
- struct netfs_read_request *rreq = subreq->rreq;
-
- if (IS_ERR_VALUE(transferred_or_error)) {
- netfs_stat(&netfs_n_rh_write_failed);
- trace_netfs_failure(rreq, subreq, transferred_or_error,
- netfs_fail_copy_to_cache);
- } else {
- netfs_stat(&netfs_n_rh_write_done);
- }
-
- trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
-
- /* If we decrement nr_wr_ops to 0, the ref belongs to us. */
- if (atomic_dec_and_test(&rreq->nr_wr_ops))
- netfs_rreq_unmark_after_write(rreq, was_async);
-
- netfs_put_subrequest(subreq, was_async);
-}
-
-/*
- * Perform any outstanding writes to the cache. We inherit a ref from the
- * caller.
- */
-static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
-{
- struct netfs_cache_resources *cres = &rreq->cache_resources;
- struct netfs_read_subrequest *subreq, *next, *p;
- struct iov_iter iter;
- int ret;
-
- trace_netfs_rreq(rreq, netfs_rreq_trace_write);
-
- /* We don't want terminating writes trying to wake us up whilst we're
- * still going through the list.
- */
- atomic_inc(&rreq->nr_wr_ops);
-
- list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
- if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) {
- list_del_init(&subreq->rreq_link);
- netfs_put_subrequest(subreq, false);
- }
- }
-
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- /* Amalgamate adjacent writes */
- while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
- next = list_next_entry(subreq, rreq_link);
- if (next->start != subreq->start + subreq->len)
- break;
- subreq->len += next->len;
- list_del_init(&next->rreq_link);
- netfs_put_subrequest(next, false);
- }
-
- ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
- rreq->i_size);
- if (ret < 0) {
- trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
- trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
- continue;
- }
-
- iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
- subreq->start, subreq->len);
-
- atomic_inc(&rreq->nr_wr_ops);
- netfs_stat(&netfs_n_rh_write);
- netfs_get_read_subrequest(subreq);
- trace_netfs_sreq(subreq, netfs_sreq_trace_write);
- cres->ops->write(cres, subreq->start, &iter,
- netfs_rreq_copy_terminated, subreq);
- }
-
- /* If we decrement nr_wr_ops to 0, the usage ref belongs to us. */
- if (atomic_dec_and_test(&rreq->nr_wr_ops))
- netfs_rreq_unmark_after_write(rreq, false);
-}
-
-static void netfs_rreq_write_to_cache_work(struct work_struct *work)
-{
- struct netfs_read_request *rreq =
- container_of(work, struct netfs_read_request, work);
-
- netfs_rreq_do_write_to_cache(rreq);
-}
-
-static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq,
- bool was_async)
-{
- if (was_async) {
- rreq->work.func = netfs_rreq_write_to_cache_work;
- if (!queue_work(system_unbound_wq, &rreq->work))
- BUG();
- } else {
- netfs_rreq_do_write_to_cache(rreq);
- }
-}
-
-/*
- * Unlock the folios in a read operation. We need to set PG_fscache on any
- * folios we're going to write back before we unlock them.
- */
-static void netfs_rreq_unlock(struct netfs_read_request *rreq)
-{
- struct netfs_read_subrequest *subreq;
- struct folio *folio;
- unsigned int iopos, account = 0;
- pgoff_t start_page = rreq->start / PAGE_SIZE;
- pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
- bool subreq_failed = false;
-
- XA_STATE(xas, &rreq->mapping->i_pages, start_page);
-
- if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
- __clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
- }
- }
-
- /* Walk through the pagecache and the I/O request lists simultaneously.
- * We may have a mixture of cached and uncached sections and we only
- * really want to write out the uncached sections. This is slightly
- * complicated by the possibility that we might have huge pages with a
- * mixture inside.
- */
- subreq = list_first_entry(&rreq->subrequests,
- struct netfs_read_subrequest, rreq_link);
- iopos = 0;
- subreq_failed = (subreq->error < 0);
-
- trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
-
- rcu_read_lock();
- xas_for_each(&xas, folio, last_page) {
- unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
- unsigned int pgend = pgpos + folio_size(folio);
- bool pg_failed = false;
-
- for (;;) {
- if (!subreq) {
- pg_failed = true;
- break;
- }
- if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
- folio_start_fscache(folio);
- pg_failed |= subreq_failed;
- if (pgend < iopos + subreq->len)
- break;
-
- account += subreq->transferred;
- iopos += subreq->len;
- if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
- subreq = list_next_entry(subreq, rreq_link);
- subreq_failed = (subreq->error < 0);
- } else {
- subreq = NULL;
- subreq_failed = false;
- }
- if (pgend == iopos)
- break;
- }
-
- if (!pg_failed) {
- flush_dcache_folio(folio);
- folio_mark_uptodate(folio);
- }
-
- if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
- if (folio_index(folio) == rreq->no_unlock_folio &&
- test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
- _debug("no unlock");
- else
- folio_unlock(folio);
- }
- }
- rcu_read_unlock();
-
- task_io_account_read(account);
- if (rreq->netfs_ops->done)
- rreq->netfs_ops->done(rreq);
-}
-
-/*
- * Handle a short read.
- */
-static void netfs_rreq_short_read(struct netfs_read_request *rreq,
- struct netfs_read_subrequest *subreq)
-{
- __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
- __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
-
- netfs_stat(&netfs_n_rh_short_read);
- trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
-
- netfs_get_read_subrequest(subreq);
- atomic_inc(&rreq->nr_rd_ops);
- if (subreq->source == NETFS_READ_FROM_CACHE)
- netfs_read_from_cache(rreq, subreq, true);
- else
- netfs_read_from_server(rreq, subreq);
-}
-
-/*
- * Resubmit any short or failed operations. Returns true if we got the rreq
- * ref back.
- */
-static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq)
-{
- struct netfs_read_subrequest *subreq;
-
- WARN_ON(in_interrupt());
-
- trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
-
- /* We don't want terminating submissions trying to wake us up whilst
- * we're still going through the list.
- */
- atomic_inc(&rreq->nr_rd_ops);
-
- __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- if (subreq->error) {
- if (subreq->source != NETFS_READ_FROM_CACHE)
- break;
- subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
- subreq->error = 0;
- netfs_stat(&netfs_n_rh_download_instead);
- trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
- netfs_get_read_subrequest(subreq);
- atomic_inc(&rreq->nr_rd_ops);
- netfs_read_from_server(rreq, subreq);
- } else if (test_bit(NETFS_SREQ_SHORT_READ, &subreq->flags)) {
- netfs_rreq_short_read(rreq, subreq);
- }
- }
-
- /* If we decrement nr_rd_ops to 0, the usage ref belongs to us. */
- if (atomic_dec_and_test(&rreq->nr_rd_ops))
- return true;
-
- wake_up_var(&rreq->nr_rd_ops);
- return false;
-}
-
-/*
- * Check to see if the data read is still valid.
- */
-static void netfs_rreq_is_still_valid(struct netfs_read_request *rreq)
-{
- struct netfs_read_subrequest *subreq;
-
- if (!rreq->netfs_ops->is_still_valid ||
- rreq->netfs_ops->is_still_valid(rreq))
- return;
-
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- if (subreq->source == NETFS_READ_FROM_CACHE) {
- subreq->error = -ESTALE;
- __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
- }
- }
-}
-
-/*
- * Assess the state of a read request and decide what to do next.
- *
- * Note that we could be in an ordinary kernel thread, on a workqueue or in
- * softirq context at this point. We inherit a ref from the caller.
- */
-static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
-{
- trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
-
-again:
- netfs_rreq_is_still_valid(rreq);
-
- if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
- test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
- if (netfs_rreq_perform_resubmissions(rreq))
- goto again;
- return;
- }
-
- netfs_rreq_unlock(rreq);
-
- clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
- wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
-
- if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
- return netfs_rreq_write_to_cache(rreq, was_async);
-
- netfs_rreq_completed(rreq, was_async);
-}
-
-static void netfs_rreq_work(struct work_struct *work)
-{
- struct netfs_read_request *rreq =
- container_of(work, struct netfs_read_request, work);
- netfs_rreq_assess(rreq, false);
-}
-
-/*
- * Handle the completion of all outstanding I/O operations on a read request.
- * We inherit a ref from the caller.
- */
-static void netfs_rreq_terminated(struct netfs_read_request *rreq,
- bool was_async)
-{
- if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
- was_async) {
- if (!queue_work(system_unbound_wq, &rreq->work))
- BUG();
- } else {
- netfs_rreq_assess(rreq, was_async);
- }
-}
-
-/**
- * netfs_subreq_terminated - Note the termination of an I/O operation.
- * @subreq: The I/O request that has terminated.
- * @transferred_or_error: The amount of data transferred or an error code.
- * @was_async: The termination was asynchronous
- *
- * This tells the read helper that a contributory I/O operation has terminated,
- * one way or another, and that it should integrate the results.
- *
- * The caller indicates in @transferred_or_error the outcome of the operation,
- * supplying a positive value to indicate the number of bytes transferred, 0 to
- * indicate a failure to transfer anything that should be retried or a negative
- * error code. The helper will look after reissuing I/O operations as
- * appropriate and writing downloaded data to the cache.
- *
- * If @was_async is true, the caller might be running in softirq or interrupt
- * context and we can't sleep.
- */
-void netfs_subreq_terminated(struct netfs_read_subrequest *subreq,
- ssize_t transferred_or_error,
- bool was_async)
-{
- struct netfs_read_request *rreq = subreq->rreq;
- int u;
-
- _enter("[%u]{%llx,%lx},%zd",
- subreq->debug_index, subreq->start, subreq->flags,
- transferred_or_error);
-
- switch (subreq->source) {
- case NETFS_READ_FROM_CACHE:
- netfs_stat(&netfs_n_rh_read_done);
- break;
- case NETFS_DOWNLOAD_FROM_SERVER:
- netfs_stat(&netfs_n_rh_download_done);
- break;
- default:
- break;
- }
-
- if (IS_ERR_VALUE(transferred_or_error)) {
- subreq->error = transferred_or_error;
- trace_netfs_failure(rreq, subreq, transferred_or_error,
- netfs_fail_read);
- goto failed;
- }
-
- if (WARN(transferred_or_error > subreq->len - subreq->transferred,
- "Subreq overread: R%x[%x] %zd > %zu - %zu",
- rreq->debug_id, subreq->debug_index,
- transferred_or_error, subreq->len, subreq->transferred))
- transferred_or_error = subreq->len - subreq->transferred;
-
- subreq->error = 0;
- subreq->transferred += transferred_or_error;
- if (subreq->transferred < subreq->len)
- goto incomplete;
-
-complete:
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
- if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
- set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
-
-out:
- trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
-
- /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
- u = atomic_dec_return(&rreq->nr_rd_ops);
- if (u == 0)
- netfs_rreq_terminated(rreq, was_async);
- else if (u == 1)
- wake_up_var(&rreq->nr_rd_ops);
-
- netfs_put_subrequest(subreq, was_async);
- return;
-
-incomplete:
- if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
- netfs_clear_unread(subreq);
- subreq->transferred = subreq->len;
- goto complete;
- }
-
- if (transferred_or_error == 0) {
- if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
- subreq->error = -ENODATA;
- goto failed;
- }
- } else {
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
- }
-
- __set_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
- set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
- goto out;
-
-failed:
- if (subreq->source == NETFS_READ_FROM_CACHE) {
- netfs_stat(&netfs_n_rh_read_failed);
- set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
- } else {
- netfs_stat(&netfs_n_rh_download_failed);
- set_bit(NETFS_RREQ_FAILED, &rreq->flags);
- rreq->error = subreq->error;
- }
- goto out;
-}
-EXPORT_SYMBOL(netfs_subreq_terminated);
-
-static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequest *subreq,
- loff_t i_size)
-{
- struct netfs_read_request *rreq = subreq->rreq;
- struct netfs_cache_resources *cres = &rreq->cache_resources;
-
- if (cres->ops)
- return cres->ops->prepare_read(subreq, i_size);
- if (subreq->start >= rreq->i_size)
- return NETFS_FILL_WITH_ZEROES;
- return NETFS_DOWNLOAD_FROM_SERVER;
-}
-
-/*
- * Work out what sort of subrequest the next one will be.
- */
-static enum netfs_read_source
-netfs_rreq_prepare_read(struct netfs_read_request *rreq,
- struct netfs_read_subrequest *subreq)
-{
- enum netfs_read_source source;
-
- _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
-
- source = netfs_cache_prepare_read(subreq, rreq->i_size);
- if (source == NETFS_INVALID_READ)
- goto out;
-
- if (source == NETFS_DOWNLOAD_FROM_SERVER) {
- /* Call out to the netfs to let it shrink the request to fit
- * its own I/O sizes and boundaries. If it shinks it here, it
- * will be called again to make simultaneous calls; if it wants
- * to make serial calls, it can indicate a short read and then
- * we will call it again.
- */
- if (subreq->len > rreq->i_size - subreq->start)
- subreq->len = rreq->i_size - subreq->start;
-
- if (rreq->netfs_ops->clamp_length &&
- !rreq->netfs_ops->clamp_length(subreq)) {
- source = NETFS_INVALID_READ;
- goto out;
- }
- }
-
- if (WARN_ON(subreq->len == 0))
- source = NETFS_INVALID_READ;
-
-out:
- subreq->source = source;
- trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
- return source;
-}
-
-/*
- * Slice off a piece of a read request and submit an I/O request for it.
- */
-static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
- unsigned int *_debug_index)
-{
- struct netfs_read_subrequest *subreq;
- enum netfs_read_source source;
-
- subreq = netfs_alloc_subrequest(rreq);
- if (!subreq)
- return false;
-
- subreq->debug_index = (*_debug_index)++;
- subreq->start = rreq->start + rreq->submitted;
- subreq->len = rreq->len - rreq->submitted;
-
- _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
- list_add_tail(&subreq->rreq_link, &rreq->subrequests);
-
- /* Call out to the cache to find out what it can do with the remaining
- * subset. It tells us in subreq->flags what it decided should be done
- * and adjusts subreq->len down if the subset crosses a cache boundary.
- *
- * Then when we hand the subset, it can choose to take a subset of that
- * (the starts must coincide), in which case, we go around the loop
- * again and ask it to download the next piece.
- */
- source = netfs_rreq_prepare_read(rreq, subreq);
- if (source == NETFS_INVALID_READ)
- goto subreq_failed;
-
- atomic_inc(&rreq->nr_rd_ops);
-
- rreq->submitted += subreq->len;
-
- trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
- switch (source) {
- case NETFS_FILL_WITH_ZEROES:
- netfs_fill_with_zeroes(rreq, subreq);
- break;
- case NETFS_DOWNLOAD_FROM_SERVER:
- netfs_read_from_server(rreq, subreq);
- break;
- case NETFS_READ_FROM_CACHE:
- netfs_read_from_cache(rreq, subreq, false);
- break;
- default:
- BUG();
- }
-
- return true;
-
-subreq_failed:
- rreq->error = subreq->error;
- netfs_put_subrequest(subreq, false);
- return false;
-}
-
-static void netfs_cache_expand_readahead(struct netfs_read_request *rreq,
- loff_t *_start, size_t *_len, loff_t i_size)
-{
- struct netfs_cache_resources *cres = &rreq->cache_resources;
-
- if (cres->ops && cres->ops->expand_readahead)
- cres->ops->expand_readahead(cres, _start, _len, i_size);
-}
-
-static void netfs_rreq_expand(struct netfs_read_request *rreq,
- struct readahead_control *ractl)
-{
- /* Give the cache a chance to change the request parameters. The
- * resultant request must contain the original region.
- */
- netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
-
- /* Give the netfs a chance to change the request parameters. The
- * resultant request must contain the original region.
- */
- if (rreq->netfs_ops->expand_readahead)
- rreq->netfs_ops->expand_readahead(rreq);
-
- /* Expand the request if the cache wants it to start earlier. Note
- * that the expansion may get further extended if the VM wishes to
- * insert THPs and the preferred start and/or end wind up in the middle
- * of THPs.
- *
- * If this is the case, however, the THP size should be an integer
- * multiple of the cache granule size, so we get a whole number of
- * granules to deal with.
- */
- if (rreq->start != readahead_pos(ractl) ||
- rreq->len != readahead_length(ractl)) {
- readahead_expand(ractl, rreq->start, rreq->len);
- rreq->start = readahead_pos(ractl);
- rreq->len = readahead_length(ractl);
-
- trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
- netfs_read_trace_expanded);
- }
-}
-
-/**
- * netfs_readahead - Helper to manage a read request
- * @ractl: The description of the readahead request
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
- *
- * Fulfil a readahead request by drawing data from the cache if possible, or
- * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
- * requests from different sources will get munged together. If necessary, the
- * readahead window can be expanded in either direction to a more convenient
- * alighment for RPC efficiency or to make storage in the cache feasible.
- *
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory. It may also be passed a private token, which will
- * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
- *
- * This is usable whether or not caching is enabled.
- */
-void netfs_readahead(struct readahead_control *ractl,
- const struct netfs_read_request_ops *ops,
- void *netfs_priv)
-{
- struct netfs_read_request *rreq;
- unsigned int debug_index = 0;
- int ret;
-
- _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
-
- if (readahead_count(ractl) == 0)
- goto cleanup;
-
- rreq = netfs_alloc_read_request(ops, netfs_priv, ractl->file);
- if (!rreq)
- goto cleanup;
- rreq->mapping = ractl->mapping;
- rreq->start = readahead_pos(ractl);
- rreq->len = readahead_length(ractl);
-
- if (ops->begin_cache_operation) {
- ret = ops->begin_cache_operation(rreq);
- if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
- goto cleanup_free;
- }
-
- netfs_stat(&netfs_n_rh_readahead);
- trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
- netfs_read_trace_readahead);
-
- netfs_rreq_expand(rreq, ractl);
-
- atomic_set(&rreq->nr_rd_ops, 1);
- do {
- if (!netfs_rreq_submit_slice(rreq, &debug_index))
- break;
-
- } while (rreq->submitted < rreq->len);
-
- /* Drop the refs on the folios here rather than in the cache or
- * filesystem. The locks will be dropped in netfs_rreq_unlock().
- */
- while (readahead_folio(ractl))
- ;
-
- /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
- if (atomic_dec_and_test(&rreq->nr_rd_ops))
- netfs_rreq_assess(rreq, false);
- return;
-
-cleanup_free:
- netfs_put_read_request(rreq, false);
- return;
-cleanup:
- if (netfs_priv)
- ops->cleanup(ractl->mapping, netfs_priv);
- return;
-}
-EXPORT_SYMBOL(netfs_readahead);
-
-/**
- * netfs_readpage - Helper to manage a readpage request
- * @file: The file to read from
- * @folio: The folio to read
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
- *
- * Fulfil a readpage request by drawing data from the cache if possible, or the
- * netfs if not. Space beyond the EOF is zero-filled. Multiple I/O requests
- * from different sources will get munged together.
- *
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory. It may also be passed a private token, which will
- * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
- *
- * This is usable whether or not caching is enabled.
- */
-int netfs_readpage(struct file *file,
- struct folio *folio,
- const struct netfs_read_request_ops *ops,
- void *netfs_priv)
-{
- struct netfs_read_request *rreq;
- unsigned int debug_index = 0;
- int ret;
-
- _enter("%lx", folio_index(folio));
-
- rreq = netfs_alloc_read_request(ops, netfs_priv, file);
- if (!rreq) {
- if (netfs_priv)
- ops->cleanup(netfs_priv, folio_file_mapping(folio));
- folio_unlock(folio);
- return -ENOMEM;
- }
- rreq->mapping = folio_file_mapping(folio);
- rreq->start = folio_file_pos(folio);
- rreq->len = folio_size(folio);
-
- if (ops->begin_cache_operation) {
- ret = ops->begin_cache_operation(rreq);
- if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
- folio_unlock(folio);
- goto out;
- }
- }
-
- netfs_stat(&netfs_n_rh_readpage);
- trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
-
- netfs_get_read_request(rreq);
-
- atomic_set(&rreq->nr_rd_ops, 1);
- do {
- if (!netfs_rreq_submit_slice(rreq, &debug_index))
- break;
-
- } while (rreq->submitted < rreq->len);
-
- /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
- * the service code isn't punted off to a random thread pool to
- * process.
- */
- do {
- wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
- netfs_rreq_assess(rreq, false);
- } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));
-
- ret = rreq->error;
- if (ret == 0 && rreq->submitted < rreq->len) {
- trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
- ret = -EIO;
- }
-out:
- netfs_put_read_request(rreq, false);
- return ret;
-}
-EXPORT_SYMBOL(netfs_readpage);
-
-/**
- * netfs_skip_folio_read - prep a folio for writing without reading first
- * @folio: The folio being prepared
- * @pos: starting position for the write
- * @len: length of write
- *
- * In some cases, write_begin doesn't need to read at all:
- * - full folio write
- * - write that lies in a folio that is completely beyond EOF
- * - write that covers the folio from start to EOF or beyond it
- *
- * If any of these criteria are met, then zero out the unwritten parts
- * of the folio and return true. Otherwise, return false.
- */
-static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len)
-{
- struct inode *inode = folio_inode(folio);
- loff_t i_size = i_size_read(inode);
- size_t offset = offset_in_folio(folio, pos);
-
- /* Full folio write */
- if (offset == 0 && len >= folio_size(folio))
- return true;
-
- /* pos beyond last folio in the file */
- if (pos - offset >= i_size)
- goto zero_out;
-
- /* Write that covers from the start of the folio to EOF or beyond */
- if (offset == 0 && (pos + len) >= i_size)
- goto zero_out;
-
- return false;
-zero_out:
- zero_user_segments(&folio->page, 0, offset, offset + len, folio_size(folio));
- return true;
-}
-
-/**
- * netfs_write_begin - Helper to prepare for writing
- * @file: The file to read from
- * @mapping: The mapping to read from
- * @pos: File position at which the write will begin
- * @len: The length of the write (may extend beyond the end of the folio chosen)
- * @aop_flags: AOP_* flags
- * @_folio: Where to put the resultant folio
- * @_fsdata: Place for the netfs to store a cookie
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
- *
- * Pre-read data for a write-begin request by drawing data from the cache if
- * possible, or the netfs if not. Space beyond the EOF is zero-filled.
- * Multiple I/O requests from different sources will get munged together. If
- * necessary, the readahead window can be expanded in either direction to a
- * more convenient alighment for RPC efficiency or to make storage in the cache
- * feasible.
- *
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory.
- *
- * The check_write_begin() operation can be provided to check for and flush
- * conflicting writes once the folio is grabbed and locked. It is passed a
- * pointer to the fsdata cookie that gets returned to the VM to be passed to
- * write_end. It is permitted to sleep. It should return 0 if the request
- * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
- * be regot; or return an error.
- *
- * This is usable whether or not caching is enabled.
- */
-int netfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int aop_flags,
- struct folio **_folio, void **_fsdata,
- const struct netfs_read_request_ops *ops,
- void *netfs_priv)
-{
- struct netfs_read_request *rreq;
- struct folio *folio;
- struct inode *inode = file_inode(file);
- unsigned int debug_index = 0, fgp_flags;
- pgoff_t index = pos >> PAGE_SHIFT;
- int ret;
-
- DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
-
-retry:
- fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
- if (aop_flags & AOP_FLAG_NOFS)
- fgp_flags |= FGP_NOFS;
- folio = __filemap_get_folio(mapping, index, fgp_flags,
- mapping_gfp_mask(mapping));
- if (!folio)
- return -ENOMEM;
-
- if (ops->check_write_begin) {
- /* Allow the netfs (eg. ceph) to flush conflicts. */
- ret = ops->check_write_begin(file, pos, len, folio, _fsdata);
- if (ret < 0) {
- trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
- if (ret == -EAGAIN)
- goto retry;
- goto error;
- }
- }
-
- if (folio_test_uptodate(folio))
- goto have_folio;
-
- /* If the page is beyond the EOF, we want to clear it - unless it's
- * within the cache granule containing the EOF, in which case we need
- * to preload the granule.
- */
- if (!ops->is_cache_enabled(inode) &&
- netfs_skip_folio_read(folio, pos, len)) {
- netfs_stat(&netfs_n_rh_write_zskip);
- goto have_folio_no_wait;
- }
-
- ret = -ENOMEM;
- rreq = netfs_alloc_read_request(ops, netfs_priv, file);
- if (!rreq)
- goto error;
- rreq->mapping = folio_file_mapping(folio);
- rreq->start = folio_file_pos(folio);
- rreq->len = folio_size(folio);
- rreq->no_unlock_folio = folio_index(folio);
- __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
- netfs_priv = NULL;
-
- if (ops->begin_cache_operation) {
- ret = ops->begin_cache_operation(rreq);
- if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
- goto error_put;
- }
-
- netfs_stat(&netfs_n_rh_write_begin);
- trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
-
- /* Expand the request to meet caching requirements and download
- * preferences.
- */
- ractl._nr_pages = folio_nr_pages(folio);
- netfs_rreq_expand(rreq, &ractl);
- netfs_get_read_request(rreq);
-
- /* We hold the folio locks, so we can drop the references */
- folio_get(folio);
- while (readahead_folio(&ractl))
- ;
-
- atomic_set(&rreq->nr_rd_ops, 1);
- do {
- if (!netfs_rreq_submit_slice(rreq, &debug_index))
- break;
-
- } while (rreq->submitted < rreq->len);
-
- /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
- * the service code isn't punted off to a random thread pool to
- * process.
- */
- for (;;) {
- wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
- netfs_rreq_assess(rreq, false);
- if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
- break;
- cond_resched();
- }
-
- ret = rreq->error;
- if (ret == 0 && rreq->submitted < rreq->len) {
- trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
- ret = -EIO;
- }
- netfs_put_read_request(rreq, false);
- if (ret < 0)
- goto error;
-
-have_folio:
- ret = folio_wait_fscache_killable(folio);
- if (ret < 0)
- goto error;
-have_folio_no_wait:
- if (netfs_priv)
- ops->cleanup(netfs_priv, mapping);
- *_folio = folio;
- _leave(" = 0");
- return 0;
-
-error_put:
- netfs_put_read_request(rreq, false);
-error:
- folio_unlock(folio);
- folio_put(folio);
- if (netfs_priv)
- ops->cleanup(netfs_priv, mapping);
- _leave(" = %d", ret);
- return ret;
-}
-EXPORT_SYMBOL(netfs_write_begin);
diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c
new file mode 100644
index 000000000000..a1489aa29f78
--- /dev/null
+++ b/fs/netfs/read_pgpriv2.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Read with PG_private_2 [DEPRECATED].
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+/*
+ * [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
+ */
+static void netfs_pgpriv2_copy_folio(struct netfs_io_request *creq, struct folio *folio)
+{
+ struct netfs_io_stream *cache = &creq->io_streams[1];
+ size_t fsize = folio_size(folio), flen = fsize;
+ loff_t fpos = folio_pos(folio), i_size;
+ bool to_eof = false;
+
+ _enter("");
+
+ /* netfs_perform_write() may shift i_size around the page or from out
+ * of the page to beyond it, but cannot move i_size into or through the
+ * page since we have it locked.
+ */
+ i_size = i_size_read(creq->inode);
+
+ if (fpos >= i_size) {
+ /* mmap beyond eof. */
+ _debug("beyond eof");
+ folio_end_private_2(folio);
+ return;
+ }
+
+ if (fpos + fsize > creq->i_size)
+ creq->i_size = i_size;
+
+ if (flen > i_size - fpos) {
+ flen = i_size - fpos;
+ to_eof = true;
+ } else if (flen == i_size - fpos) {
+ to_eof = true;
+ }
+
+ _debug("folio %zx %zx", flen, fsize);
+
+ trace_netfs_folio(folio, netfs_folio_trace_store_copy);
+
+ /* Attach the folio to the rolling buffer. */
+ if (rolling_buffer_append(&creq->buffer, folio, 0) < 0) {
+ clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &creq->flags);
+ return;
+ }
+
+ cache->submit_extendable_to = fsize;
+ cache->submit_off = 0;
+ cache->submit_len = flen;
+
+ /* Attach the folio to one or more subrequests. For a big folio, we
+ * could end up with thousands of subrequests if the wsize is small -
+ * but we might need to wait during the creation of subrequests for
+ * network resources (eg. SMB credits).
+ */
+ do {
+ ssize_t part;
+
+ creq->buffer.iter.iov_offset = cache->submit_off;
+
+ atomic64_set(&creq->issued_to, fpos + cache->submit_off);
+ cache->submit_extendable_to = fsize - cache->submit_off;
+ part = netfs_advance_write(creq, cache, fpos + cache->submit_off,
+ cache->submit_len, to_eof);
+ cache->submit_off += part;
+ if (part > cache->submit_len)
+ cache->submit_len = 0;
+ else
+ cache->submit_len -= part;
+ } while (cache->submit_len > 0);
+
+ creq->buffer.iter.iov_offset = 0;
+ rolling_buffer_advance(&creq->buffer, fsize);
+ atomic64_set(&creq->issued_to, fpos + fsize);
+
+ if (flen < fsize)
+ netfs_issue_write(creq, cache);
+}
+
+/*
+ * [DEPRECATED] Set up copying to the cache.
+ */
+static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache(
+ struct netfs_io_request *rreq, struct folio *folio)
+{
+ struct netfs_io_request *creq;
+
+ if (!fscache_resources_valid(&rreq->cache_resources))
+ goto cancel;
+
+ creq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
+ NETFS_PGPRIV2_COPY_TO_CACHE);
+ if (IS_ERR(creq))
+ goto cancel;
+
+ if (!creq->io_streams[1].avail)
+ goto cancel_put;
+
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &creq->flags);
+ trace_netfs_copy2cache(rreq, creq);
+ trace_netfs_write(creq, netfs_write_trace_copy_to_cache);
+ netfs_stat(&netfs_n_wh_copy_to_cache);
+ rreq->copy_to_cache = creq;
+ return creq;
+
+cancel_put:
+ netfs_put_failed_request(creq);
+cancel:
+ rreq->copy_to_cache = ERR_PTR(-ENOBUFS);
+ clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
+ return ERR_PTR(-ENOBUFS);
+}
+
+/*
+ * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2 and add
+ * it to the copy write request.
+ */
+void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio)
+{
+ struct netfs_io_request *creq = rreq->copy_to_cache;
+
+ if (!creq)
+ creq = netfs_pgpriv2_begin_copy_to_cache(rreq, folio);
+ if (IS_ERR(creq))
+ return;
+
+ trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
+ folio_start_private_2(folio);
+ netfs_pgpriv2_copy_folio(creq, folio);
+}
+
+/*
+ * [DEPRECATED] End writing to the cache, flushing out any outstanding writes.
+ */
+void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq)
+{
+ struct netfs_io_request *creq = rreq->copy_to_cache;
+
+ if (IS_ERR_OR_NULL(creq))
+ return;
+
+ netfs_issue_write(creq, &creq->io_streams[1]);
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags);
+ trace_netfs_rreq(rreq, netfs_rreq_trace_end_copy_to_cache);
+ if (list_empty_careful(&creq->io_streams[1].subrequests))
+ netfs_wake_collector(creq);
+
+ netfs_put_request(creq, netfs_rreq_trace_put_return);
+ creq->copy_to_cache = NULL;
+}
+
+/*
+ * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
+ * copying.
+ */
+bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *creq)
+{
+ struct folio_queue *folioq = creq->buffer.tail;
+ unsigned long long collected_to = creq->collected_to;
+ unsigned int slot = creq->buffer.first_tail_slot;
+ bool made_progress = false;
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = rolling_buffer_delete_spent(&creq->buffer);
+ slot = 0;
+ }
+
+ for (;;) {
+ struct folio *folio;
+ unsigned long long fpos, fend;
+ size_t fsize, flen;
+
+ folio = folioq_folio(folioq, slot);
+ if (WARN_ONCE(!folio_test_private_2(folio),
+ "R=%08x: folio %lx is not marked private_2\n",
+ creq->debug_id, folio->index))
+ trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
+
+ fpos = folio_pos(folio);
+ fsize = folio_size(folio);
+ flen = fsize;
+
+ fend = min_t(unsigned long long, fpos + flen, creq->i_size);
+
+ trace_netfs_collect_folio(creq, folio, fend, collected_to);
+
+ /* Unlock any folio we've transferred all of. */
+ if (collected_to < fend)
+ break;
+
+ trace_netfs_folio(folio, netfs_folio_trace_end_copy);
+ folio_end_private_2(folio);
+ creq->cleaned_to = fpos + fsize;
+ made_progress = true;
+
+ /* Clean up the head folioq. If we clear an entire folioq, then
+ * we can get rid of it provided it's not also the tail folioq
+ * being filled by the issuer.
+ */
+ folioq_clear(folioq, slot);
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = rolling_buffer_delete_spent(&creq->buffer);
+ if (!folioq)
+ goto done;
+ slot = 0;
+ }
+
+ if (fpos + fsize >= collected_to)
+ break;
+ }
+
+ creq->buffer.tail = folioq;
+done:
+ creq->buffer.first_tail_slot = slot;
+ return made_progress;
+}
diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c
new file mode 100644
index 000000000000..b99e84a8170a
--- /dev/null
+++ b/fs/netfs/read_retry.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem read subrequest retrying.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+static void netfs_reissue_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+ netfs_stat(&netfs_n_rh_retry_read_subreq);
+ subreq->rreq->netfs_ops->issue_read(subreq);
+}
+
+/*
+ * Go through the list of failed/short reads, retrying all retryable ones. We
+ * need to switch failed cache reads to network downloads.
+ */
+static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *subreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ struct list_head *next;
+
+ _enter("R=%x", rreq->debug_id);
+
+ if (list_empty(&stream->subrequests))
+ return;
+
+ if (rreq->netfs_ops->retry_request)
+ rreq->netfs_ops->retry_request(rreq, NULL);
+
+ /* If there's no renegotiation to do, just resend each retryable subreq
+ * up to the first permanently failed one.
+ */
+ if (!rreq->netfs_ops->prepare_read &&
+ !rreq->cache_resources.ops) {
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
+ if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
+ break;
+ if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ subreq->retry_count++;
+ netfs_reset_iter(subreq);
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ netfs_reissue_read(rreq, subreq);
+ }
+ }
+ return;
+ }
+
+ /* Okay, we need to renegotiate all the download requests and flip any
+ * failed cache reads over to being download requests and negotiate
+ * those also. All fully successful subreqs have been removed from the
+ * list and any spare data from those has been donated.
+ *
+ * What we do is decant the list and rebuild it one subreq at a time so
+ * that we don't end up with donations jumping over a gap we're busy
+ * populating with smaller subrequests. In the event that the subreq
+ * we just launched finishes before we insert the next subreq, it'll
+ * fill in rreq->prev_donated instead.
+ *
+ * Note: Alternatively, we could split the tail subrequest right before
+ * we reissue it and fix up the donations under lock.
+ */
+ next = stream->subrequests.next;
+
+ do {
+ struct netfs_io_subrequest *from, *to, *tmp;
+ struct iov_iter source;
+ unsigned long long start, len;
+ size_t part;
+ bool boundary = false, subreq_superfluous = false;
+
+ /* Go through the subreqs and find the next span of contiguous
+ * buffer that we then rejig (cifs, for example, needs the
+ * rsize renegotiating) and reissue.
+ */
+ from = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ to = from;
+ start = from->start + from->transferred;
+ len = from->len - from->transferred;
+
+ _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx",
+ rreq->debug_id, from->debug_index,
+ from->start, from->transferred, from->len);
+
+ if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
+ !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
+ goto abandon;
+
+ list_for_each_continue(next, &stream->subrequests) {
+ subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ if (subreq->start + subreq->transferred != start + len ||
+ test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
+ !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
+ break;
+ to = subreq;
+ len += to->len;
+ }
+
+ _debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
+
+ /* Determine the set of buffers we're going to use. Each
+ * subreq gets a subset of a single overall contiguous buffer.
+ */
+ netfs_reset_iter(from);
+ source = from->io_iter;
+ source.count = len;
+
+ /* Work through the sublist. */
+ subreq = from;
+ list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
+ if (!len) {
+ subreq_superfluous = true;
+ break;
+ }
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ subreq->start = start - subreq->transferred;
+ subreq->len = len + subreq->transferred;
+ __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ subreq->retry_count++;
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+
+ /* Renegotiate max_len (rsize) */
+ stream->sreq_max_len = subreq->len;
+ if (rreq->netfs_ops->prepare_read &&
+ rreq->netfs_ops->prepare_read(subreq) < 0) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ goto abandon;
+ }
+
+ part = umin(len, stream->sreq_max_len);
+ if (unlikely(stream->sreq_max_segs))
+ part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
+ subreq->len = subreq->transferred + part;
+ subreq->io_iter = source;
+ iov_iter_truncate(&subreq->io_iter, part);
+ iov_iter_advance(&source, part);
+ len -= part;
+ start += part;
+ if (!len) {
+ if (boundary)
+ __set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
+ } else {
+ __clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
+ }
+
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ netfs_reissue_read(rreq, subreq);
+ if (subreq == to) {
+ subreq_superfluous = false;
+ break;
+ }
+ }
+
+ /* If we managed to use fewer subreqs, we can discard the
+ * excess; if we used the same number, then we're done.
+ */
+ if (!len) {
+ if (!subreq_superfluous)
+ continue;
+ list_for_each_entry_safe_from(subreq, tmp,
+ &stream->subrequests, rreq_link) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_superfluous);
+ list_del(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
+ if (subreq == to)
+ break;
+ }
+ continue;
+ }
+
+ /* We ran out of subrequests, so we need to allocate some more
+ * and insert them after.
+ */
+ do {
+ subreq = netfs_alloc_subrequest(rreq);
+ if (!subreq) {
+ subreq = to;
+ goto abandon_after;
+ }
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ subreq->start = start;
+ subreq->len = len;
+ subreq->stream_nr = stream->stream_nr;
+ subreq->retry_count = 1;
+
+ trace_netfs_sreq_ref(rreq->debug_id, subreq->debug_index,
+ refcount_read(&subreq->ref),
+ netfs_sreq_trace_new);
+
+ list_add(&subreq->rreq_link, &to->rreq_link);
+ to = list_next_entry(to, rreq_link);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+
+ stream->sreq_max_len = umin(len, rreq->rsize);
+ stream->sreq_max_segs = 0;
+ if (unlikely(stream->sreq_max_segs))
+ part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
+
+ netfs_stat(&netfs_n_rh_download);
+ if (rreq->netfs_ops->prepare_read(subreq) < 0) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ goto abandon;
+ }
+
+ part = umin(len, stream->sreq_max_len);
+ subreq->len = subreq->transferred + part;
+ subreq->io_iter = source;
+ iov_iter_truncate(&subreq->io_iter, part);
+ iov_iter_advance(&source, part);
+
+ len -= part;
+ start += part;
+ if (!len && boundary) {
+ __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
+ boundary = false;
+ }
+
+ netfs_reissue_read(rreq, subreq);
+ } while (len);
+
+ } while (!list_is_head(next, &stream->subrequests));
+
+ return;
+
+ /* If we hit an error, fail all remaining incomplete subrequests */
+abandon_after:
+ if (list_is_last(&subreq->rreq_link, &stream->subrequests))
+ return;
+ subreq = list_next_entry(subreq, rreq_link);
+abandon:
+ list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
+ if (!subreq->error &&
+ !test_bit(NETFS_SREQ_FAILED, &subreq->flags) &&
+ !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
+ continue;
+ subreq->error = -ENOMEM;
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ }
+}
+
+/*
+ * Retry reads.
+ */
+void netfs_retry_reads(struct netfs_io_request *rreq)
+{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+
+ netfs_stat(&netfs_n_rh_retry_read_req);
+
+ /* Wait for all outstanding I/O to quiesce before performing retries as
+ * we may need to renegotiate the I/O sizes.
+ */
+ set_bit(NETFS_RREQ_RETRYING, &rreq->flags);
+ netfs_wait_for_in_progress_stream(rreq, stream);
+ clear_bit(NETFS_RREQ_RETRYING, &rreq->flags);
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
+ netfs_retry_read_subrequests(rreq);
+}
+
+/*
+ * Unlock any the pages that haven't been unlocked yet due to abandoned
+ * subrequests.
+ */
+void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
+{
+ struct folio_queue *p;
+
+ for (p = rreq->buffer.tail; p; p = p->next) {
+ for (int slot = 0; slot < folioq_count(p); slot++) {
+ struct folio *folio = folioq_folio(p, slot);
+
+ if (folio && !folioq_is_marked2(p, slot)) {
+ trace_netfs_folio(folio, netfs_folio_trace_abandon);
+ folio_unlock(folio);
+ }
+ }
+ }
+}
diff --git a/fs/netfs/read_single.c b/fs/netfs/read_single.c
new file mode 100644
index 000000000000..8e6264f62a8f
--- /dev/null
+++ b/fs/netfs/read_single.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Single, monolithic object support (e.g. AFS directory).
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/sched/mm.h>
+#include <linux/task_io_accounting_ops.h>
+#include <linux/netfs.h>
+#include "internal.h"
+
+/**
+ * netfs_single_mark_inode_dirty - Mark a single, monolithic object inode dirty
+ * @inode: The inode to mark
+ *
+ * Mark an inode that contains a single, monolithic object as dirty so that its
+ * writepages op will get called. If set, the SINGLE_NO_UPLOAD flag indicates
+ * that the object will only be written to the cache and not uploaded (e.g. AFS
+ * directory contents).
+ */
+void netfs_single_mark_inode_dirty(struct inode *inode)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+ bool cache_only = test_bit(NETFS_ICTX_SINGLE_NO_UPLOAD, &ictx->flags);
+ bool caching = fscache_cookie_enabled(netfs_i_cookie(netfs_inode(inode)));
+
+ if (cache_only && !caching)
+ return;
+
+ mark_inode_dirty(inode);
+
+ if (caching && !(inode_state_read_once(inode) & I_PINNING_NETFS_WB)) {
+ bool need_use = false;
+
+ spin_lock(&inode->i_lock);
+ if (!(inode_state_read(inode) & I_PINNING_NETFS_WB)) {
+ inode_state_set(inode, I_PINNING_NETFS_WB);
+ need_use = true;
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (need_use)
+ fscache_use_cookie(netfs_i_cookie(ictx), true);
+ }
+
+}
+EXPORT_SYMBOL(netfs_single_mark_inode_dirty);
+
+static int netfs_single_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx)
+{
+ return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx));
+}
+
+static void netfs_single_cache_prepare_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+ if (!cres->ops) {
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ return;
+ }
+ subreq->source = cres->ops->prepare_read(subreq, rreq->i_size);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+
+}
+
+static void netfs_single_read_cache(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+ _enter("R=%08x[%x]", rreq->debug_id, subreq->debug_index);
+ netfs_stat(&netfs_n_rh_read);
+ cres->ops->read(cres, subreq->start, &subreq->io_iter, NETFS_READ_HOLE_FAIL,
+ netfs_cache_read_terminated, subreq);
+}
+
+/*
+ * Perform a read to a buffer from the cache or the server. Only a single
+ * subreq is permitted as the object must be fetched in a single transaction.
+ */
+static int netfs_single_dispatch_read(struct netfs_io_request *rreq)
+{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ struct netfs_io_subrequest *subreq;
+ int ret = 0;
+
+ subreq = netfs_alloc_subrequest(rreq);
+ if (!subreq)
+ return -ENOMEM;
+
+ subreq->source = NETFS_SOURCE_UNKNOWN;
+ subreq->start = 0;
+ subreq->len = rreq->len;
+ subreq->io_iter = rreq->buffer.iter;
+
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+
+ spin_lock(&rreq->lock);
+ list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_added);
+ stream->front = subreq;
+ /* Store list pointers before active flag */
+ smp_store_release(&stream->active, true);
+ spin_unlock(&rreq->lock);
+
+ netfs_single_cache_prepare_read(rreq, subreq);
+ switch (subreq->source) {
+ case NETFS_DOWNLOAD_FROM_SERVER:
+ netfs_stat(&netfs_n_rh_download);
+ if (rreq->netfs_ops->prepare_read) {
+ ret = rreq->netfs_ops->prepare_read(subreq);
+ if (ret < 0)
+ goto cancel;
+ }
+
+ rreq->netfs_ops->issue_read(subreq);
+ rreq->submitted += subreq->len;
+ break;
+ case NETFS_READ_FROM_CACHE:
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ netfs_single_read_cache(rreq, subreq);
+ rreq->submitted += subreq->len;
+ ret = 0;
+ break;
+ default:
+ pr_warn("Unexpected single-read source %u\n", subreq->source);
+ WARN_ON_ONCE(true);
+ ret = -EIO;
+ break;
+ }
+
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ return ret;
+cancel:
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
+ return ret;
+}
+
+/**
+ * netfs_read_single - Synchronously read a single blob of pages.
+ * @inode: The inode to read from.
+ * @file: The file we're using to read or NULL.
+ * @iter: The buffer we're reading into.
+ *
+ * Fulfil a read request for a single monolithic object by drawing data from
+ * the cache if possible, or the netfs if not. The buffer may be larger than
+ * the file content; unused beyond the EOF will be zero-filled. The content
+ * will be read with a single I/O request (though this may be retried).
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled. If caching is enabled,
+ * the data will be stored as a single object into the cache.
+ */
+ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter)
+{
+ struct netfs_io_request *rreq;
+ struct netfs_inode *ictx = netfs_inode(inode);
+ ssize_t ret;
+
+ rreq = netfs_alloc_request(inode->i_mapping, file, 0, iov_iter_count(iter),
+ NETFS_READ_SINGLE);
+ if (IS_ERR(rreq))
+ return PTR_ERR(rreq);
+
+ ret = netfs_single_begin_cache_read(rreq, ictx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto cleanup_free;
+
+ netfs_stat(&netfs_n_rh_read_single);
+ trace_netfs_read(rreq, 0, rreq->len, netfs_read_trace_read_single);
+
+ rreq->buffer.iter = *iter;
+ netfs_single_dispatch_read(rreq);
+
+ ret = netfs_wait_for_read(rreq);
+ netfs_put_request(rreq, netfs_rreq_trace_put_return);
+ return ret;
+
+cleanup_free:
+ netfs_put_failed_request(rreq);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_read_single);
diff --git a/fs/netfs/rolling_buffer.c b/fs/netfs/rolling_buffer.c
new file mode 100644
index 000000000000..207b6a326651
--- /dev/null
+++ b/fs/netfs/rolling_buffer.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Rolling buffer helpers
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/bitops.h>
+#include <linux/pagemap.h>
+#include <linux/rolling_buffer.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+static atomic_t debug_ids;
+
+/**
+ * netfs_folioq_alloc - Allocate a folio_queue struct
+ * @rreq_id: Associated debugging ID for tracing purposes
+ * @gfp: Allocation constraints
+ * @trace: Trace tag to indicate the purpose of the allocation
+ *
+ * Allocate, initialise and account the folio_queue struct and log a trace line
+ * to mark the allocation.
+ */
+struct folio_queue *netfs_folioq_alloc(unsigned int rreq_id, gfp_t gfp,
+ unsigned int /*enum netfs_folioq_trace*/ trace)
+{
+ struct folio_queue *fq;
+
+ fq = kmalloc(sizeof(*fq), gfp);
+ if (fq) {
+ netfs_stat(&netfs_n_folioq);
+ folioq_init(fq, rreq_id);
+ fq->debug_id = atomic_inc_return(&debug_ids);
+ trace_netfs_folioq(fq, trace);
+ }
+ return fq;
+}
+EXPORT_SYMBOL(netfs_folioq_alloc);
+
+/**
+ * netfs_folioq_free - Free a folio_queue struct
+ * @folioq: The object to free
+ * @trace: Trace tag to indicate which free
+ *
+ * Free and unaccount the folio_queue struct.
+ */
+void netfs_folioq_free(struct folio_queue *folioq,
+ unsigned int /*enum netfs_trace_folioq*/ trace)
+{
+ trace_netfs_folioq(folioq, trace);
+ netfs_stat_d(&netfs_n_folioq);
+ kfree(folioq);
+}
+EXPORT_SYMBOL(netfs_folioq_free);
+
+/*
+ * Initialise a rolling buffer. We allocate an empty folio queue struct to so
+ * that the pointers can be independently driven by the producer and the
+ * consumer.
+ */
+int rolling_buffer_init(struct rolling_buffer *roll, unsigned int rreq_id,
+ unsigned int direction)
+{
+ struct folio_queue *fq;
+
+ fq = netfs_folioq_alloc(rreq_id, GFP_NOFS, netfs_trace_folioq_rollbuf_init);
+ if (!fq)
+ return -ENOMEM;
+
+ roll->head = fq;
+ roll->tail = fq;
+ iov_iter_folio_queue(&roll->iter, direction, fq, 0, 0, 0);
+ return 0;
+}
+
+/*
+ * Add another folio_queue to a rolling buffer if there's no space left.
+ */
+int rolling_buffer_make_space(struct rolling_buffer *roll)
+{
+ struct folio_queue *fq, *head = roll->head;
+
+ if (!folioq_full(head))
+ return 0;
+
+ fq = netfs_folioq_alloc(head->rreq_id, GFP_NOFS, netfs_trace_folioq_make_space);
+ if (!fq)
+ return -ENOMEM;
+ fq->prev = head;
+
+ roll->head = fq;
+ if (folioq_full(head)) {
+ /* Make sure we don't leave the master iterator pointing to a
+ * block that might get immediately consumed.
+ */
+ if (roll->iter.folioq == head &&
+ roll->iter.folioq_slot == folioq_nr_slots(head)) {
+ roll->iter.folioq = fq;
+ roll->iter.folioq_slot = 0;
+ }
+ }
+
+ /* Make sure the initialisation is stored before the next pointer.
+ *
+ * [!] NOTE: After we set head->next, the consumer is at liberty to
+ * immediately delete the old head.
+ */
+ smp_store_release(&head->next, fq);
+ return 0;
+}
+
+/*
+ * Decant the list of folios to read into a rolling buffer.
+ */
+ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll,
+ struct readahead_control *ractl,
+ struct folio_batch *put_batch)
+{
+ struct folio_queue *fq;
+ struct page **vec;
+ int nr, ix, to;
+ ssize_t size = 0;
+
+ if (rolling_buffer_make_space(roll) < 0)
+ return -ENOMEM;
+
+ fq = roll->head;
+ vec = (struct page **)fq->vec.folios;
+ nr = __readahead_batch(ractl, vec + folio_batch_count(&fq->vec),
+ folio_batch_space(&fq->vec));
+ ix = fq->vec.nr;
+ to = ix + nr;
+ fq->vec.nr = to;
+ for (; ix < to; ix++) {
+ struct folio *folio = folioq_folio(fq, ix);
+ unsigned int order = folio_order(folio);
+
+ fq->orders[ix] = order;
+ size += PAGE_SIZE << order;
+ trace_netfs_folio(folio, netfs_folio_trace_read);
+ if (!folio_batch_add(put_batch, folio))
+ folio_batch_release(put_batch);
+ }
+ WRITE_ONCE(roll->iter.count, roll->iter.count + size);
+
+ /* Store the counter after setting the slot. */
+ smp_store_release(&roll->next_head_slot, to);
+ return size;
+}
+
+/*
+ * Append a folio to the rolling buffer.
+ */
+ssize_t rolling_buffer_append(struct rolling_buffer *roll, struct folio *folio,
+ unsigned int flags)
+{
+ ssize_t size = folio_size(folio);
+ int slot;
+
+ if (rolling_buffer_make_space(roll) < 0)
+ return -ENOMEM;
+
+ slot = folioq_append(roll->head, folio);
+ if (flags & ROLLBUF_MARK_1)
+ folioq_mark(roll->head, slot);
+ if (flags & ROLLBUF_MARK_2)
+ folioq_mark2(roll->head, slot);
+
+ WRITE_ONCE(roll->iter.count, roll->iter.count + size);
+
+ /* Store the counter after setting the slot. */
+ smp_store_release(&roll->next_head_slot, slot);
+ return size;
+}
+
+/*
+ * Delete a spent buffer from a rolling queue and return the next in line. We
+ * don't return the last buffer to keep the pointers independent, but return
+ * NULL instead.
+ */
+struct folio_queue *rolling_buffer_delete_spent(struct rolling_buffer *roll)
+{
+ struct folio_queue *spent = roll->tail, *next = READ_ONCE(spent->next);
+
+ if (!next)
+ return NULL;
+ next->prev = NULL;
+ netfs_folioq_free(spent, netfs_trace_folioq_delete);
+ roll->tail = next;
+ return next;
+}
+
+/*
+ * Clear out a rolling queue. Folios that have mark 1 set are put.
+ */
+void rolling_buffer_clear(struct rolling_buffer *roll)
+{
+ struct folio_batch fbatch;
+ struct folio_queue *p;
+
+ folio_batch_init(&fbatch);
+
+ while ((p = roll->tail)) {
+ roll->tail = p->next;
+ for (int slot = 0; slot < folioq_count(p); slot++) {
+ struct folio *folio = folioq_folio(p, slot);
+
+ if (!folio)
+ continue;
+ if (folioq_is_marked(p, slot)) {
+ trace_netfs_folio(folio, netfs_folio_trace_put);
+ if (!folio_batch_add(&fbatch, folio))
+ folio_batch_release(&fbatch);
+ }
+ }
+
+ netfs_folioq_free(p, netfs_trace_folioq_clear);
+ }
+
+ folio_batch_release(&fbatch);
+}
diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c
index 9ae538c85378..ab6b916addc4 100644
--- a/fs/netfs/stats.c
+++ b/fs/netfs/stats.c
@@ -7,11 +7,12 @@
#include <linux/export.h>
#include <linux/seq_file.h>
-#include <linux/netfs.h>
#include "internal.h"
+atomic_t netfs_n_rh_dio_read;
atomic_t netfs_n_rh_readahead;
-atomic_t netfs_n_rh_readpage;
+atomic_t netfs_n_rh_read_folio;
+atomic_t netfs_n_rh_read_single;
atomic_t netfs_n_rh_rreq;
atomic_t netfs_n_rh_sreq;
atomic_t netfs_n_rh_download;
@@ -28,32 +29,75 @@ atomic_t netfs_n_rh_write_begin;
atomic_t netfs_n_rh_write_done;
atomic_t netfs_n_rh_write_failed;
atomic_t netfs_n_rh_write_zskip;
+atomic_t netfs_n_rh_retry_read_req;
+atomic_t netfs_n_rh_retry_read_subreq;
+atomic_t netfs_n_wh_buffered_write;
+atomic_t netfs_n_wh_writethrough;
+atomic_t netfs_n_wh_dio_write;
+atomic_t netfs_n_wh_writepages;
+atomic_t netfs_n_wh_copy_to_cache;
+atomic_t netfs_n_wh_wstream_conflict;
+atomic_t netfs_n_wh_upload;
+atomic_t netfs_n_wh_upload_done;
+atomic_t netfs_n_wh_upload_failed;
+atomic_t netfs_n_wh_write;
+atomic_t netfs_n_wh_write_done;
+atomic_t netfs_n_wh_write_failed;
+atomic_t netfs_n_wh_retry_write_req;
+atomic_t netfs_n_wh_retry_write_subreq;
+atomic_t netfs_n_wb_lock_skip;
+atomic_t netfs_n_wb_lock_wait;
+atomic_t netfs_n_folioq;
-void netfs_stats_show(struct seq_file *m)
+int netfs_stats_show(struct seq_file *m, void *v)
{
- seq_printf(m, "RdHelp : RA=%u RP=%u WB=%u WBZ=%u rr=%u sr=%u\n",
+ seq_printf(m, "Reads : DR=%u RA=%u RF=%u RS=%u WB=%u WBZ=%u\n",
+ atomic_read(&netfs_n_rh_dio_read),
atomic_read(&netfs_n_rh_readahead),
- atomic_read(&netfs_n_rh_readpage),
+ atomic_read(&netfs_n_rh_read_folio),
+ atomic_read(&netfs_n_rh_read_single),
atomic_read(&netfs_n_rh_write_begin),
- atomic_read(&netfs_n_rh_write_zskip),
- atomic_read(&netfs_n_rh_rreq),
- atomic_read(&netfs_n_rh_sreq));
- seq_printf(m, "RdHelp : ZR=%u sh=%u sk=%u\n",
+ atomic_read(&netfs_n_rh_write_zskip));
+ seq_printf(m, "Writes : BW=%u WT=%u DW=%u WP=%u 2C=%u\n",
+ atomic_read(&netfs_n_wh_buffered_write),
+ atomic_read(&netfs_n_wh_writethrough),
+ atomic_read(&netfs_n_wh_dio_write),
+ atomic_read(&netfs_n_wh_writepages),
+ atomic_read(&netfs_n_wh_copy_to_cache));
+ seq_printf(m, "ZeroOps: ZR=%u sh=%u sk=%u\n",
atomic_read(&netfs_n_rh_zero),
atomic_read(&netfs_n_rh_short_read),
atomic_read(&netfs_n_rh_write_zskip));
- seq_printf(m, "RdHelp : DL=%u ds=%u df=%u di=%u\n",
+ seq_printf(m, "DownOps: DL=%u ds=%u df=%u di=%u\n",
atomic_read(&netfs_n_rh_download),
atomic_read(&netfs_n_rh_download_done),
atomic_read(&netfs_n_rh_download_failed),
atomic_read(&netfs_n_rh_download_instead));
- seq_printf(m, "RdHelp : RD=%u rs=%u rf=%u\n",
+ seq_printf(m, "CaRdOps: RD=%u rs=%u rf=%u\n",
atomic_read(&netfs_n_rh_read),
atomic_read(&netfs_n_rh_read_done),
atomic_read(&netfs_n_rh_read_failed));
- seq_printf(m, "RdHelp : WR=%u ws=%u wf=%u\n",
- atomic_read(&netfs_n_rh_write),
- atomic_read(&netfs_n_rh_write_done),
- atomic_read(&netfs_n_rh_write_failed));
+ seq_printf(m, "UpldOps: UL=%u us=%u uf=%u\n",
+ atomic_read(&netfs_n_wh_upload),
+ atomic_read(&netfs_n_wh_upload_done),
+ atomic_read(&netfs_n_wh_upload_failed));
+ seq_printf(m, "CaWrOps: WR=%u ws=%u wf=%u\n",
+ atomic_read(&netfs_n_wh_write),
+ atomic_read(&netfs_n_wh_write_done),
+ atomic_read(&netfs_n_wh_write_failed));
+ seq_printf(m, "Retries: rq=%u rs=%u wq=%u ws=%u\n",
+ atomic_read(&netfs_n_rh_retry_read_req),
+ atomic_read(&netfs_n_rh_retry_read_subreq),
+ atomic_read(&netfs_n_wh_retry_write_req),
+ atomic_read(&netfs_n_wh_retry_write_subreq));
+ seq_printf(m, "Objs : rr=%u sr=%u foq=%u wsc=%u\n",
+ atomic_read(&netfs_n_rh_rreq),
+ atomic_read(&netfs_n_rh_sreq),
+ atomic_read(&netfs_n_folioq),
+ atomic_read(&netfs_n_wh_wstream_conflict));
+ seq_printf(m, "WbLock : skip=%u wait=%u\n",
+ atomic_read(&netfs_n_wb_lock_skip),
+ atomic_read(&netfs_n_wb_lock_wait));
+ return fscache_stats_show(m);
}
EXPORT_SYMBOL(netfs_stats_show);
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
new file mode 100644
index 000000000000..cbf3d9194c7b
--- /dev/null
+++ b/fs/netfs/write_collect.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem write subrequest result collection, assessment
+ * and retrying.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+/* Notes made in the collector */
+#define HIT_PENDING 0x01 /* A front op was still pending */
+#define NEED_REASSESS 0x02 /* Need to loop round and reassess */
+#define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
+#define NEED_UNLOCK 0x08 /* The pagecache needs unlocking */
+#define NEED_RETRY 0x10 /* A front op requests retrying */
+#define SAW_FAILURE 0x20 /* One stream or hit a permanent failure */
+
+static void netfs_dump_request(const struct netfs_io_request *rreq)
+{
+ pr_err("Request R=%08x r=%d fl=%lx or=%x e=%ld\n",
+ rreq->debug_id, refcount_read(&rreq->ref), rreq->flags,
+ rreq->origin, rreq->error);
+ pr_err(" st=%llx tsl=%zx/%llx/%llx\n",
+ rreq->start, rreq->transferred, rreq->submitted, rreq->len);
+ pr_err(" cci=%llx/%llx/%llx\n",
+ rreq->cleaned_to, rreq->collected_to, atomic64_read(&rreq->issued_to));
+ pr_err(" iw=%pSR\n", rreq->netfs_ops->issue_write);
+ for (int i = 0; i < NR_IO_STREAMS; i++) {
+ const struct netfs_io_subrequest *sreq;
+ const struct netfs_io_stream *s = &rreq->io_streams[i];
+
+ pr_err(" str[%x] s=%x e=%d acnf=%u,%u,%u,%u\n",
+ s->stream_nr, s->source, s->error,
+ s->avail, s->active, s->need_retry, s->failed);
+ pr_err(" str[%x] ct=%llx t=%zx\n",
+ s->stream_nr, s->collected_to, s->transferred);
+ list_for_each_entry(sreq, &s->subrequests, rreq_link) {
+ pr_err(" sreq[%x:%x] sc=%u s=%llx t=%zx/%zx r=%d f=%lx\n",
+ sreq->stream_nr, sreq->debug_index, sreq->source,
+ sreq->start, sreq->transferred, sreq->len,
+ refcount_read(&sreq->ref), sreq->flags);
+ }
+ }
+}
+
+/*
+ * Successful completion of write of a folio to the server and/or cache. Note
+ * that we are not allowed to lock the folio here on pain of deadlocking with
+ * truncate.
+ */
+int netfs_folio_written_back(struct folio *folio)
+{
+ enum netfs_folio_trace why = netfs_folio_trace_clear;
+ struct netfs_inode *ictx = netfs_inode(folio->mapping->host);
+ struct netfs_folio *finfo;
+ struct netfs_group *group = NULL;
+ int gcount = 0;
+
+ if ((finfo = netfs_folio_info(folio))) {
+ /* Streaming writes cannot be redirtied whilst under writeback,
+ * so discard the streaming record.
+ */
+ unsigned long long fend;
+
+ fend = folio_pos(folio) + finfo->dirty_offset + finfo->dirty_len;
+ if (fend > ictx->zero_point)
+ ictx->zero_point = fend;
+
+ folio_detach_private(folio);
+ group = finfo->netfs_group;
+ gcount++;
+ kfree(finfo);
+ why = netfs_folio_trace_clear_s;
+ goto end_wb;
+ }
+
+ if ((group = netfs_folio_group(folio))) {
+ if (group == NETFS_FOLIO_COPY_TO_CACHE) {
+ why = netfs_folio_trace_clear_cc;
+ folio_detach_private(folio);
+ goto end_wb;
+ }
+
+ /* Need to detach the group pointer if the page didn't get
+ * redirtied. If it has been redirtied, then it must be within
+ * the same group.
+ */
+ why = netfs_folio_trace_redirtied;
+ if (!folio_test_dirty(folio)) {
+ folio_detach_private(folio);
+ gcount++;
+ why = netfs_folio_trace_clear_g;
+ }
+ }
+
+end_wb:
+ trace_netfs_folio(folio, why);
+ folio_end_writeback(folio);
+ return gcount;
+}
+
+/*
+ * Unlock any folios we've finished with.
+ */
+static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
+ unsigned int *notes)
+{
+ struct folio_queue *folioq = wreq->buffer.tail;
+ unsigned long long collected_to = wreq->collected_to;
+ unsigned int slot = wreq->buffer.first_tail_slot;
+
+ if (WARN_ON_ONCE(!folioq)) {
+ pr_err("[!] Writeback unlock found empty rolling buffer!\n");
+ netfs_dump_request(wreq);
+ return;
+ }
+
+ if (wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) {
+ if (netfs_pgpriv2_unlock_copied_folios(wreq))
+ *notes |= MADE_PROGRESS;
+ return;
+ }
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = rolling_buffer_delete_spent(&wreq->buffer);
+ if (!folioq)
+ return;
+ slot = 0;
+ }
+
+ for (;;) {
+ struct folio *folio;
+ struct netfs_folio *finfo;
+ unsigned long long fpos, fend;
+ size_t fsize, flen;
+
+ folio = folioq_folio(folioq, slot);
+ if (WARN_ONCE(!folio_test_writeback(folio),
+ "R=%08x: folio %lx is not under writeback\n",
+ wreq->debug_id, folio->index))
+ trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
+
+ fpos = folio_pos(folio);
+ fsize = folio_size(folio);
+ finfo = netfs_folio_info(folio);
+ flen = finfo ? finfo->dirty_offset + finfo->dirty_len : fsize;
+
+ fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
+
+ trace_netfs_collect_folio(wreq, folio, fend, collected_to);
+
+ /* Unlock any folio we've transferred all of. */
+ if (collected_to < fend)
+ break;
+
+ wreq->nr_group_rel += netfs_folio_written_back(folio);
+ wreq->cleaned_to = fpos + fsize;
+ *notes |= MADE_PROGRESS;
+
+ /* Clean up the head folioq. If we clear an entire folioq, then
+ * we can get rid of it provided it's not also the tail folioq
+ * being filled by the issuer.
+ */
+ folioq_clear(folioq, slot);
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = rolling_buffer_delete_spent(&wreq->buffer);
+ if (!folioq)
+ goto done;
+ slot = 0;
+ }
+
+ if (fpos + fsize >= collected_to)
+ break;
+ }
+
+ wreq->buffer.tail = folioq;
+done:
+ wreq->buffer.first_tail_slot = slot;
+}
+
+/*
+ * Collect and assess the results of various write subrequests. We may need to
+ * retry some of the results - or even do an RMW cycle for content crypto.
+ *
+ * Note that we have a number of parallel, overlapping lists of subrequests,
+ * one to the server and one to the local cache for example, which may not be
+ * the same size or starting position and may not even correspond in boundary
+ * alignment.
+ */
+static void netfs_collect_write_results(struct netfs_io_request *wreq)
+{
+ struct netfs_io_subrequest *front, *remove;
+ struct netfs_io_stream *stream;
+ unsigned long long collected_to, issued_to;
+ unsigned int notes;
+ int s;
+
+ _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
+ trace_netfs_collect(wreq);
+ trace_netfs_rreq(wreq, netfs_rreq_trace_collect);
+
+reassess_streams:
+ issued_to = atomic64_read(&wreq->issued_to);
+ smp_rmb();
+ collected_to = ULLONG_MAX;
+ if (wreq->origin == NETFS_WRITEBACK ||
+ wreq->origin == NETFS_WRITETHROUGH ||
+ wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE)
+ notes = NEED_UNLOCK;
+ else
+ notes = 0;
+
+ /* Remove completed subrequests from the front of the streams and
+ * advance the completion point on each stream. We stop when we hit
+ * something that's in progress. The issuer thread may be adding stuff
+ * to the tail whilst we're doing this.
+ */
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ /* Read active flag before list pointers */
+ if (!smp_load_acquire(&stream->active))
+ continue;
+
+ front = stream->front;
+ while (front) {
+ trace_netfs_collect_sreq(wreq, front);
+ //_debug("sreq [%x] %llx %zx/%zx",
+ // front->debug_index, front->start, front->transferred, front->len);
+
+ if (stream->collected_to < front->start) {
+ trace_netfs_collect_gap(wreq, stream, issued_to, 'F');
+ stream->collected_to = front->start;
+ }
+
+ /* Stall if the front is still undergoing I/O. */
+ if (netfs_check_subreq_in_progress(front)) {
+ notes |= HIT_PENDING;
+ break;
+ }
+ smp_rmb(); /* Read counters after I-P flag. */
+
+ if (stream->failed) {
+ stream->collected_to = front->start + front->len;
+ notes |= MADE_PROGRESS | SAW_FAILURE;
+ goto cancel;
+ }
+ if (front->start + front->transferred > stream->collected_to) {
+ stream->collected_to = front->start + front->transferred;
+ stream->transferred = stream->collected_to - wreq->start;
+ stream->transferred_valid = true;
+ notes |= MADE_PROGRESS;
+ }
+ if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
+ stream->failed = true;
+ stream->error = front->error;
+ if (stream->source == NETFS_UPLOAD_TO_SERVER)
+ mapping_set_error(wreq->mapping, front->error);
+ notes |= NEED_REASSESS | SAW_FAILURE;
+ break;
+ }
+ if (front->transferred < front->len) {
+ stream->need_retry = true;
+ notes |= NEED_RETRY | MADE_PROGRESS;
+ break;
+ }
+
+ cancel:
+ /* Remove if completely consumed. */
+ spin_lock(&wreq->lock);
+
+ remove = front;
+ list_del_init(&front->rreq_link);
+ front = list_first_entry_or_null(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ stream->front = front;
+ spin_unlock(&wreq->lock);
+ netfs_put_subrequest(remove,
+ notes & SAW_FAILURE ?
+ netfs_sreq_trace_put_cancel :
+ netfs_sreq_trace_put_done);
+ }
+
+ /* If we have an empty stream, we need to jump it forward
+ * otherwise the collection point will never advance.
+ */
+ if (!front && issued_to > stream->collected_to) {
+ trace_netfs_collect_gap(wreq, stream, issued_to, 'E');
+ stream->collected_to = issued_to;
+ }
+
+ if (stream->collected_to < collected_to)
+ collected_to = stream->collected_to;
+ }
+
+ if (collected_to != ULLONG_MAX && collected_to > wreq->collected_to)
+ wreq->collected_to = collected_to;
+
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->active)
+ trace_netfs_collect_stream(wreq, stream);
+ }
+
+ trace_netfs_collect_state(wreq, wreq->collected_to, notes);
+
+ /* Unlock any folios that we have now finished with. */
+ if (notes & NEED_UNLOCK) {
+ if (wreq->cleaned_to < wreq->collected_to)
+ netfs_writeback_unlock_folios(wreq, &notes);
+ } else {
+ wreq->cleaned_to = wreq->collected_to;
+ }
+
+ // TODO: Discard encryption buffers
+
+ if (notes & NEED_RETRY)
+ goto need_retry;
+
+ if (notes & MADE_PROGRESS) {
+ netfs_wake_rreq_flag(wreq, NETFS_RREQ_PAUSE, netfs_rreq_trace_unpause);
+ //cond_resched();
+ goto reassess_streams;
+ }
+
+ if (notes & NEED_REASSESS) {
+ //cond_resched();
+ goto reassess_streams;
+ }
+
+out:
+ netfs_put_group_many(wreq->group, wreq->nr_group_rel);
+ wreq->nr_group_rel = 0;
+ _leave(" = %x", notes);
+ return;
+
+need_retry:
+ /* Okay... We're going to have to retry one or both streams. Note
+ * that any partially completed op will have had any wholly transferred
+ * folios removed from it.
+ */
+ _debug("retry");
+ netfs_retry_writes(wreq);
+ goto out;
+}
+
+/*
+ * Perform the collection of subrequests, folios and encryption buffers.
+ */
+bool netfs_write_collection(struct netfs_io_request *wreq)
+{
+ struct netfs_inode *ictx = netfs_inode(wreq->inode);
+ size_t transferred;
+ bool transferred_valid = false;
+ int s;
+
+ _enter("R=%x", wreq->debug_id);
+
+ netfs_collect_write_results(wreq);
+
+ /* We're done when the app thread has finished posting subreqs and all
+ * the queues in all the streams are empty.
+ */
+ if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags))
+ return false;
+ smp_rmb(); /* Read ALL_QUEUED before lists. */
+
+ transferred = LONG_MAX;
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ struct netfs_io_stream *stream = &wreq->io_streams[s];
+ if (!stream->active)
+ continue;
+ if (!list_empty(&stream->subrequests))
+ return false;
+ if (stream->transferred_valid &&
+ stream->transferred < transferred) {
+ transferred = stream->transferred;
+ transferred_valid = true;
+ }
+ }
+
+ /* Okay, declare that all I/O is complete. */
+ if (transferred_valid)
+ wreq->transferred = transferred;
+ trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
+
+ if (wreq->io_streams[1].active &&
+ wreq->io_streams[1].failed &&
+ ictx->ops->invalidate_cache) {
+ /* Cache write failure doesn't prevent writeback completion
+ * unless we're in disconnected mode.
+ */
+ ictx->ops->invalidate_cache(wreq);
+ }
+
+ if ((wreq->origin == NETFS_UNBUFFERED_WRITE ||
+ wreq->origin == NETFS_DIO_WRITE) &&
+ !wreq->error)
+ netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred);
+
+ if (wreq->origin == NETFS_DIO_WRITE &&
+ wreq->mapping->nrpages) {
+ /* mmap may have got underfoot and we may now have folios
+ * locally covering the region we just wrote. Attempt to
+ * discard the folios, but leave in place any modified locally.
+ * ->write_iter() is prevented from interfering by the DIO
+ * counter.
+ */
+ pgoff_t first = wreq->start >> PAGE_SHIFT;
+ pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
+ invalidate_inode_pages2_range(wreq->mapping, first, last);
+ }
+
+ if (wreq->origin == NETFS_DIO_WRITE)
+ inode_dio_end(wreq->inode);
+
+ _debug("finished");
+ netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
+ /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
+
+ if (wreq->iocb) {
+ size_t written = min(wreq->transferred, wreq->len);
+ wreq->iocb->ki_pos += written;
+ if (wreq->iocb->ki_complete) {
+ trace_netfs_rreq(wreq, netfs_rreq_trace_ki_complete);
+ wreq->iocb->ki_complete(
+ wreq->iocb, wreq->error ? wreq->error : written);
+ }
+ wreq->iocb = VFS_PTR_POISON;
+ }
+
+ netfs_clear_subrequests(wreq);
+ return true;
+}
+
+void netfs_write_collection_worker(struct work_struct *work)
+{
+ struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
+
+ netfs_see_request(rreq, netfs_rreq_trace_see_work);
+ if (netfs_check_rreq_in_progress(rreq)) {
+ if (netfs_write_collection(rreq))
+ /* Drop the ref from the IN_PROGRESS flag. */
+ netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
+ else
+ netfs_see_request(rreq, netfs_rreq_trace_see_work_complete);
+ }
+}
+
+/**
+ * netfs_write_subrequest_terminated - Note the termination of a write operation.
+ * @_op: The I/O request that has terminated.
+ * @transferred_or_error: The amount of data transferred or an error code.
+ *
+ * This tells the library that a contributory write I/O operation has
+ * terminated, one way or another, and that it should collect the results.
+ *
+ * The caller indicates in @transferred_or_error the outcome of the operation,
+ * supplying a positive value to indicate the number of bytes transferred or a
+ * negative error code. The library will look after reissuing I/O operations
+ * as appropriate and writing downloaded data to the cache.
+ *
+ * When this is called, ownership of the subrequest is transferred back to the
+ * library, along with a ref.
+ *
+ * Note that %_op is a void* so that the function can be passed to
+ * kiocb::term_func without the need for a casting wrapper.
+ */
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error)
+{
+ struct netfs_io_subrequest *subreq = _op;
+ struct netfs_io_request *wreq = subreq->rreq;
+
+ _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
+
+ switch (subreq->source) {
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload_done);
+ break;
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write_done);
+ break;
+ default:
+ BUG();
+ }
+
+ if (IS_ERR_VALUE(transferred_or_error)) {
+ subreq->error = transferred_or_error;
+ if (subreq->error == -EAGAIN)
+ set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ else
+ set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ trace_netfs_failure(wreq, subreq, transferred_or_error, netfs_fail_write);
+
+ switch (subreq->source) {
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write_failed);
+ break;
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload_failed);
+ break;
+ default:
+ break;
+ }
+ trace_netfs_rreq(wreq, netfs_rreq_trace_set_pause);
+ set_bit(NETFS_RREQ_PAUSE, &wreq->flags);
+ } else {
+ if (WARN(transferred_or_error > subreq->len - subreq->transferred,
+ "Subreq excess write: R=%x[%x] %zd > %zu - %zu",
+ wreq->debug_id, subreq->debug_index,
+ transferred_or_error, subreq->len, subreq->transferred))
+ transferred_or_error = subreq->len - subreq->transferred;
+
+ subreq->error = 0;
+ subreq->transferred += transferred_or_error;
+
+ if (subreq->transferred < subreq->len)
+ set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ }
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+ netfs_subreq_clear_in_progress(subreq);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated);
+}
+EXPORT_SYMBOL(netfs_write_subrequest_terminated);
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
new file mode 100644
index 000000000000..dd8743bc8d7f
--- /dev/null
+++ b/fs/netfs/write_issue.c
@@ -0,0 +1,926 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem high-level (buffered) writeback.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ *
+ * To support network filesystems with local caching, we manage a situation
+ * that can be envisioned like the following:
+ *
+ * +---+---+-----+-----+---+----------+
+ * Folios: | | | | | | |
+ * +---+---+-----+-----+---+----------+
+ *
+ * +------+------+ +----+----+
+ * Upload: | | |.....| | |
+ * (Stream 0) +------+------+ +----+----+
+ *
+ * +------+------+------+------+------+
+ * Cache: | | | | | |
+ * (Stream 1) +------+------+------+------+------+
+ *
+ * Where we have a sequence of folios of varying sizes that we need to overlay
+ * with multiple parallel streams of I/O requests, where the I/O requests in a
+ * stream may also be of various sizes (in cifs, for example, the sizes are
+ * negotiated with the server; in something like ceph, they may represent the
+ * sizes of storage objects).
+ *
+ * The sequence in each stream may contain gaps and noncontiguous subrequests
+ * may be glued together into single vectored write RPCs.
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include "internal.h"
+
+/*
+ * Kill all dirty folios in the event of an unrecoverable error, starting with
+ * a locked folio we've already obtained from writeback_iter().
+ */
+static void netfs_kill_dirty_pages(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct folio *folio)
+{
+ int error = 0;
+
+ do {
+ enum netfs_folio_trace why = netfs_folio_trace_kill;
+ struct netfs_group *group = NULL;
+ struct netfs_folio *finfo = NULL;
+ void *priv;
+
+ priv = folio_detach_private(folio);
+ if (priv) {
+ finfo = __netfs_folio_info(priv);
+ if (finfo) {
+ /* Kill folio from streaming write. */
+ group = finfo->netfs_group;
+ why = netfs_folio_trace_kill_s;
+ } else {
+ group = priv;
+ if (group == NETFS_FOLIO_COPY_TO_CACHE) {
+ /* Kill copy-to-cache folio */
+ why = netfs_folio_trace_kill_cc;
+ group = NULL;
+ } else {
+ /* Kill folio with group */
+ why = netfs_folio_trace_kill_g;
+ }
+ }
+ }
+
+ trace_netfs_folio(folio, why);
+
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+ folio_end_writeback(folio);
+
+ netfs_put_group(group);
+ kfree(finfo);
+
+ } while ((folio = writeback_iter(mapping, wbc, folio, &error)));
+}
+
+/*
+ * Create a write request and set it up appropriately for the origin type.
+ */
+struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
+ struct file *file,
+ loff_t start,
+ enum netfs_io_origin origin)
+{
+ struct netfs_io_request *wreq;
+ struct netfs_inode *ictx;
+ bool is_cacheable = (origin == NETFS_WRITEBACK ||
+ origin == NETFS_WRITEBACK_SINGLE ||
+ origin == NETFS_WRITETHROUGH ||
+ origin == NETFS_PGPRIV2_COPY_TO_CACHE);
+
+ wreq = netfs_alloc_request(mapping, file, start, 0, origin);
+ if (IS_ERR(wreq))
+ return wreq;
+
+ _enter("R=%x", wreq->debug_id);
+
+ ictx = netfs_inode(wreq->inode);
+ if (is_cacheable && netfs_is_cache_enabled(ictx))
+ fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx));
+ if (rolling_buffer_init(&wreq->buffer, wreq->debug_id, ITER_SOURCE) < 0)
+ goto nomem;
+
+ wreq->cleaned_to = wreq->start;
+
+ wreq->io_streams[0].stream_nr = 0;
+ wreq->io_streams[0].source = NETFS_UPLOAD_TO_SERVER;
+ wreq->io_streams[0].prepare_write = ictx->ops->prepare_write;
+ wreq->io_streams[0].issue_write = ictx->ops->issue_write;
+ wreq->io_streams[0].collected_to = start;
+ wreq->io_streams[0].transferred = 0;
+
+ wreq->io_streams[1].stream_nr = 1;
+ wreq->io_streams[1].source = NETFS_WRITE_TO_CACHE;
+ wreq->io_streams[1].collected_to = start;
+ wreq->io_streams[1].transferred = 0;
+ if (fscache_resources_valid(&wreq->cache_resources)) {
+ wreq->io_streams[1].avail = true;
+ wreq->io_streams[1].active = true;
+ wreq->io_streams[1].prepare_write = wreq->cache_resources.ops->prepare_write_subreq;
+ wreq->io_streams[1].issue_write = wreq->cache_resources.ops->issue_write;
+ }
+
+ return wreq;
+nomem:
+ netfs_put_failed_request(wreq);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * netfs_prepare_write_failed - Note write preparation failed
+ * @subreq: The subrequest to mark
+ *
+ * Mark a subrequest to note that preparation for write failed.
+ */
+void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq)
+{
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prep_failed);
+}
+EXPORT_SYMBOL(netfs_prepare_write_failed);
+
+/*
+ * Prepare a write subrequest. We need to allocate a new subrequest
+ * if we don't have one.
+ */
+static void netfs_prepare_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start)
+{
+ struct netfs_io_subrequest *subreq;
+ struct iov_iter *wreq_iter = &wreq->buffer.iter;
+
+ /* Make sure we don't point the iterator at a used-up folio_queue
+ * struct being used as a placeholder to prevent the queue from
+ * collapsing. In such a case, extend the queue.
+ */
+ if (iov_iter_is_folioq(wreq_iter) &&
+ wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq))
+ rolling_buffer_make_space(&wreq->buffer);
+
+ subreq = netfs_alloc_subrequest(wreq);
+ subreq->source = stream->source;
+ subreq->start = start;
+ subreq->stream_nr = stream->stream_nr;
+ subreq->io_iter = *wreq_iter;
+
+ _enter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+
+ stream->sreq_max_len = UINT_MAX;
+ stream->sreq_max_segs = INT_MAX;
+ switch (stream->source) {
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload);
+ stream->sreq_max_len = wreq->wsize;
+ break;
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ if (stream->prepare_write)
+ stream->prepare_write(subreq);
+
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+
+ /* We add to the end of the list whilst the collector may be walking
+ * the list. The collector only goes nextwards and uses the lock to
+ * remove entries off of the front.
+ */
+ spin_lock(&wreq->lock);
+ list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+ stream->front = subreq;
+ if (!stream->active) {
+ stream->collected_to = stream->front->start;
+ /* Write list pointers before active flag */
+ smp_store_release(&stream->active, true);
+ }
+ }
+
+ spin_unlock(&wreq->lock);
+
+ stream->construct = subreq;
+}
+
+/*
+ * Set the I/O iterator for the filesystem/cache to use and dispatch the I/O
+ * operation. The operation may be asynchronous and should call
+ * netfs_write_subrequest_terminated() when complete.
+ */
+static void netfs_do_issue_write(struct netfs_io_stream *stream,
+ struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *wreq = subreq->rreq;
+
+ _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
+
+ if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
+ return netfs_write_subrequest_terminated(subreq, subreq->error);
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ stream->issue_write(subreq);
+}
+
+void netfs_reissue_write(struct netfs_io_stream *stream,
+ struct netfs_io_subrequest *subreq,
+ struct iov_iter *source)
+{
+ size_t size = subreq->len - subreq->transferred;
+
+ // TODO: Use encrypted buffer
+ subreq->io_iter = *source;
+ iov_iter_advance(source, size);
+ iov_iter_truncate(&subreq->io_iter, size);
+
+ subreq->retry_count++;
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+ netfs_stat(&netfs_n_wh_retry_write_subreq);
+ netfs_do_issue_write(stream, subreq);
+}
+
+void netfs_issue_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream)
+{
+ struct netfs_io_subrequest *subreq = stream->construct;
+
+ if (!subreq)
+ return;
+ stream->construct = NULL;
+ subreq->io_iter.count = subreq->len;
+ netfs_do_issue_write(stream, subreq);
+}
+
+/*
+ * Add data to the write subrequest, dispatching each as we fill it up or if it
+ * is discontiguous with the previous. We only fill one part at a time so that
+ * we can avoid overrunning the credits obtained (cifs) and try to parallelise
+ * content-crypto preparation with network writes.
+ */
+size_t netfs_advance_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start, size_t len, bool to_eof)
+{
+ struct netfs_io_subrequest *subreq = stream->construct;
+ size_t part;
+
+ if (!stream->avail) {
+ _leave("no write");
+ return len;
+ }
+
+ _enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
+
+ if (subreq && start != subreq->start + subreq->len) {
+ netfs_issue_write(wreq, stream);
+ subreq = NULL;
+ }
+
+ if (!stream->construct)
+ netfs_prepare_write(wreq, stream, start);
+ subreq = stream->construct;
+
+ part = umin(stream->sreq_max_len - subreq->len, len);
+ _debug("part %zx/%zx %zx/%zx", subreq->len, stream->sreq_max_len, part, len);
+ subreq->len += part;
+ subreq->nr_segs++;
+ stream->submit_extendable_to -= part;
+
+ if (subreq->len >= stream->sreq_max_len ||
+ subreq->nr_segs >= stream->sreq_max_segs ||
+ to_eof) {
+ netfs_issue_write(wreq, stream);
+ subreq = NULL;
+ }
+
+ return part;
+}
+
+/*
+ * Write some of a pending folio data back to the server.
+ */
+static int netfs_write_folio(struct netfs_io_request *wreq,
+ struct writeback_control *wbc,
+ struct folio *folio)
+{
+ struct netfs_io_stream *upload = &wreq->io_streams[0];
+ struct netfs_io_stream *cache = &wreq->io_streams[1];
+ struct netfs_io_stream *stream;
+ struct netfs_group *fgroup; /* TODO: Use this with ceph */
+ struct netfs_folio *finfo;
+ size_t iter_off = 0;
+ size_t fsize = folio_size(folio), flen = fsize, foff = 0;
+ loff_t fpos = folio_pos(folio), i_size;
+ bool to_eof = false, streamw = false;
+ bool debug = false;
+
+ _enter("");
+
+ if (rolling_buffer_make_space(&wreq->buffer) < 0)
+ return -ENOMEM;
+
+ /* netfs_perform_write() may shift i_size around the page or from out
+ * of the page to beyond it, but cannot move i_size into or through the
+ * page since we have it locked.
+ */
+ i_size = i_size_read(wreq->inode);
+
+ if (fpos >= i_size) {
+ /* mmap beyond eof. */
+ _debug("beyond eof");
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+ wreq->nr_group_rel += netfs_folio_written_back(folio);
+ netfs_put_group_many(wreq->group, wreq->nr_group_rel);
+ wreq->nr_group_rel = 0;
+ return 0;
+ }
+
+ if (fpos + fsize > wreq->i_size)
+ wreq->i_size = i_size;
+
+ fgroup = netfs_folio_group(folio);
+ finfo = netfs_folio_info(folio);
+ if (finfo) {
+ foff = finfo->dirty_offset;
+ flen = foff + finfo->dirty_len;
+ streamw = true;
+ }
+
+ if (wreq->origin == NETFS_WRITETHROUGH) {
+ to_eof = false;
+ if (flen > i_size - fpos)
+ flen = i_size - fpos;
+ } else if (flen > i_size - fpos) {
+ flen = i_size - fpos;
+ if (!streamw)
+ folio_zero_segment(folio, flen, fsize);
+ to_eof = true;
+ } else if (flen == i_size - fpos) {
+ to_eof = true;
+ }
+ flen -= foff;
+
+ _debug("folio %zx %zx %zx", foff, flen, fsize);
+
+ /* Deal with discontinuities in the stream of dirty pages. These can
+ * arise from a number of sources:
+ *
+ * (1) Intervening non-dirty pages from random-access writes, multiple
+ * flushers writing back different parts simultaneously and manual
+ * syncing.
+ *
+ * (2) Partially-written pages from write-streaming.
+ *
+ * (3) Pages that belong to a different write-back group (eg. Ceph
+ * snapshots).
+ *
+ * (4) Actually-clean pages that were marked for write to the cache
+ * when they were read. Note that these appear as a special
+ * write-back group.
+ */
+ if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) {
+ netfs_issue_write(wreq, upload);
+ } else if (fgroup != wreq->group) {
+ /* We can't write this page to the server yet. */
+ kdebug("wrong group");
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
+ netfs_issue_write(wreq, upload);
+ netfs_issue_write(wreq, cache);
+ return 0;
+ }
+
+ if (foff > 0)
+ netfs_issue_write(wreq, upload);
+ if (streamw)
+ netfs_issue_write(wreq, cache);
+
+ /* Flip the page to the writeback state and unlock. If we're called
+ * from write-through, then the page has already been put into the wb
+ * state.
+ */
+ if (wreq->origin == NETFS_WRITEBACK)
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+
+ if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) {
+ if (!cache->avail) {
+ trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
+ netfs_issue_write(wreq, upload);
+ netfs_folio_written_back(folio);
+ return 0;
+ }
+ trace_netfs_folio(folio, netfs_folio_trace_store_copy);
+ } else if (!upload->avail && !cache->avail) {
+ trace_netfs_folio(folio, netfs_folio_trace_cancel_store);
+ netfs_folio_written_back(folio);
+ return 0;
+ } else if (!upload->construct) {
+ trace_netfs_folio(folio, netfs_folio_trace_store);
+ } else {
+ trace_netfs_folio(folio, netfs_folio_trace_store_plus);
+ }
+
+ /* Attach the folio to the rolling buffer. */
+ rolling_buffer_append(&wreq->buffer, folio, 0);
+
+ /* Move the submission point forward to allow for write-streaming data
+ * not starting at the front of the page. We don't do write-streaming
+ * with the cache as the cache requires DIO alignment.
+ *
+ * Also skip uploading for data that's been read and just needs copying
+ * to the cache.
+ */
+ for (int s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ stream->submit_off = foff;
+ stream->submit_len = flen;
+ if (!stream->avail ||
+ (stream->source == NETFS_WRITE_TO_CACHE && streamw) ||
+ (stream->source == NETFS_UPLOAD_TO_SERVER &&
+ fgroup == NETFS_FOLIO_COPY_TO_CACHE)) {
+ stream->submit_off = UINT_MAX;
+ stream->submit_len = 0;
+ }
+ }
+
+ /* Attach the folio to one or more subrequests. For a big folio, we
+ * could end up with thousands of subrequests if the wsize is small -
+ * but we might need to wait during the creation of subrequests for
+ * network resources (eg. SMB credits).
+ */
+ for (;;) {
+ ssize_t part;
+ size_t lowest_off = ULONG_MAX;
+ int choose_s = -1;
+
+ /* Always add to the lowest-submitted stream first. */
+ for (int s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->submit_len > 0 &&
+ stream->submit_off < lowest_off) {
+ lowest_off = stream->submit_off;
+ choose_s = s;
+ }
+ }
+
+ if (choose_s < 0)
+ break;
+ stream = &wreq->io_streams[choose_s];
+
+ /* Advance the iterator(s). */
+ if (stream->submit_off > iter_off) {
+ rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off);
+ iter_off = stream->submit_off;
+ }
+
+ atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
+ stream->submit_extendable_to = fsize - stream->submit_off;
+ part = netfs_advance_write(wreq, stream, fpos + stream->submit_off,
+ stream->submit_len, to_eof);
+ stream->submit_off += part;
+ if (part > stream->submit_len)
+ stream->submit_len = 0;
+ else
+ stream->submit_len -= part;
+ if (part > 0)
+ debug = true;
+ }
+
+ if (fsize > iter_off)
+ rolling_buffer_advance(&wreq->buffer, fsize - iter_off);
+ atomic64_set(&wreq->issued_to, fpos + fsize);
+
+ if (!debug)
+ kdebug("R=%x: No submit", wreq->debug_id);
+
+ if (foff + flen < fsize)
+ for (int s = 0; s < NR_IO_STREAMS; s++)
+ netfs_issue_write(wreq, &wreq->io_streams[s]);
+
+ _leave(" = 0");
+ return 0;
+}
+
+/*
+ * End the issuing of writes, letting the collector know we're done.
+ */
+static void netfs_end_issue_write(struct netfs_io_request *wreq)
+{
+ bool needs_poke = true;
+
+ smp_wmb(); /* Write subreq lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+
+ for (int s = 0; s < NR_IO_STREAMS; s++) {
+ struct netfs_io_stream *stream = &wreq->io_streams[s];
+
+ if (!stream->active)
+ continue;
+ if (!list_empty(&stream->subrequests))
+ needs_poke = false;
+ netfs_issue_write(wreq, stream);
+ }
+
+ if (needs_poke)
+ netfs_wake_collector(wreq);
+}
+
+/*
+ * Write some of the pending data back to the server
+ */
+int netfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct netfs_inode *ictx = netfs_inode(mapping->host);
+ struct netfs_io_request *wreq = NULL;
+ struct folio *folio;
+ int error = 0;
+
+ if (!mutex_trylock(&ictx->wb_lock)) {
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ netfs_stat(&netfs_n_wb_lock_skip);
+ return 0;
+ }
+ netfs_stat(&netfs_n_wb_lock_wait);
+ mutex_lock(&ictx->wb_lock);
+ }
+
+ /* Need the first folio to be able to set up the op. */
+ folio = writeback_iter(mapping, wbc, NULL, &error);
+ if (!folio)
+ goto out;
+
+ wreq = netfs_create_write_req(mapping, NULL, folio_pos(folio), NETFS_WRITEBACK);
+ if (IS_ERR(wreq)) {
+ error = PTR_ERR(wreq);
+ goto couldnt_start;
+ }
+
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
+ trace_netfs_write(wreq, netfs_write_trace_writeback);
+ netfs_stat(&netfs_n_wh_writepages);
+
+ do {
+ _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to));
+
+ /* It appears we don't have to handle cyclic writeback wrapping. */
+ WARN_ON_ONCE(wreq && folio_pos(folio) < atomic64_read(&wreq->issued_to));
+
+ if (netfs_folio_group(folio) != NETFS_FOLIO_COPY_TO_CACHE &&
+ unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) {
+ set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+ wreq->netfs_ops->begin_writeback(wreq);
+ }
+
+ error = netfs_write_folio(wreq, wbc, folio);
+ if (error < 0)
+ break;
+ } while ((folio = writeback_iter(mapping, wbc, folio, &error)));
+
+ netfs_end_issue_write(wreq);
+
+ mutex_unlock(&ictx->wb_lock);
+ netfs_wake_collector(wreq);
+
+ netfs_put_request(wreq, netfs_rreq_trace_put_return);
+ _leave(" = %d", error);
+ return error;
+
+couldnt_start:
+ netfs_kill_dirty_pages(mapping, wbc, folio);
+out:
+ mutex_unlock(&ictx->wb_lock);
+ _leave(" = %d", error);
+ return error;
+}
+EXPORT_SYMBOL(netfs_writepages);
+
+/*
+ * Begin a write operation for writing through the pagecache.
+ */
+struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len)
+{
+ struct netfs_io_request *wreq = NULL;
+ struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp));
+
+ mutex_lock(&ictx->wb_lock);
+
+ wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp,
+ iocb->ki_pos, NETFS_WRITETHROUGH);
+ if (IS_ERR(wreq)) {
+ mutex_unlock(&ictx->wb_lock);
+ return wreq;
+ }
+
+ wreq->io_streams[0].avail = true;
+ trace_netfs_write(wreq, netfs_write_trace_writethrough);
+ return wreq;
+}
+
+/*
+ * Advance the state of the write operation used when writing through the
+ * pagecache. Data has been copied into the pagecache that we need to append
+ * to the request. If we've added more than wsize then we need to create a new
+ * subrequest.
+ */
+int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *folio, size_t copied, bool to_page_end,
+ struct folio **writethrough_cache)
+{
+ _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
+ wreq->debug_id, wreq->buffer.iter.count, wreq->wsize, copied, to_page_end);
+
+ if (!*writethrough_cache) {
+ if (folio_test_dirty(folio))
+ /* Sigh. mmap. */
+ folio_clear_dirty_for_io(folio);
+
+ /* We can make multiple writes to the folio... */
+ folio_start_writeback(folio);
+ if (wreq->len == 0)
+ trace_netfs_folio(folio, netfs_folio_trace_wthru);
+ else
+ trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
+ *writethrough_cache = folio;
+ }
+
+ wreq->len += copied;
+ if (!to_page_end)
+ return 0;
+
+ *writethrough_cache = NULL;
+ return netfs_write_folio(wreq, wbc, folio);
+}
+
+/*
+ * End a write operation used when writing through the pagecache.
+ */
+ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *writethrough_cache)
+{
+ struct netfs_inode *ictx = netfs_inode(wreq->inode);
+ ssize_t ret;
+
+ _enter("R=%x", wreq->debug_id);
+
+ if (writethrough_cache)
+ netfs_write_folio(wreq, wbc, writethrough_cache);
+
+ netfs_end_issue_write(wreq);
+
+ mutex_unlock(&ictx->wb_lock);
+
+ if (wreq->iocb)
+ ret = -EIOCBQUEUED;
+ else
+ ret = netfs_wait_for_write(wreq);
+ netfs_put_request(wreq, netfs_rreq_trace_put_return);
+ return ret;
+}
+
+/*
+ * Write data to the server without going through the pagecache and without
+ * writing it to the local cache.
+ */
+int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len)
+{
+ struct netfs_io_stream *upload = &wreq->io_streams[0];
+ ssize_t part;
+ loff_t start = wreq->start;
+ int error = 0;
+
+ _enter("%zx", len);
+
+ if (wreq->origin == NETFS_DIO_WRITE)
+ inode_dio_begin(wreq->inode);
+
+ while (len) {
+ // TODO: Prepare content encryption
+
+ _debug("unbuffered %zx", len);
+ part = netfs_advance_write(wreq, upload, start, len, false);
+ start += part;
+ len -= part;
+ rolling_buffer_advance(&wreq->buffer, part);
+ if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags))
+ netfs_wait_for_paused_write(wreq);
+ if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
+ break;
+ }
+
+ netfs_end_issue_write(wreq);
+ _leave(" = %d", error);
+ return error;
+}
+
+/*
+ * Write some of a pending folio data back to the server and/or the cache.
+ */
+static int netfs_write_folio_single(struct netfs_io_request *wreq,
+ struct folio *folio)
+{
+ struct netfs_io_stream *upload = &wreq->io_streams[0];
+ struct netfs_io_stream *cache = &wreq->io_streams[1];
+ struct netfs_io_stream *stream;
+ size_t iter_off = 0;
+ size_t fsize = folio_size(folio), flen;
+ loff_t fpos = folio_pos(folio);
+ bool to_eof = false;
+ bool no_debug = false;
+
+ _enter("");
+
+ flen = folio_size(folio);
+ if (flen > wreq->i_size - fpos) {
+ flen = wreq->i_size - fpos;
+ folio_zero_segment(folio, flen, fsize);
+ to_eof = true;
+ } else if (flen == wreq->i_size - fpos) {
+ to_eof = true;
+ }
+
+ _debug("folio %zx/%zx", flen, fsize);
+
+ if (!upload->avail && !cache->avail) {
+ trace_netfs_folio(folio, netfs_folio_trace_cancel_store);
+ return 0;
+ }
+
+ if (!upload->construct)
+ trace_netfs_folio(folio, netfs_folio_trace_store);
+ else
+ trace_netfs_folio(folio, netfs_folio_trace_store_plus);
+
+ /* Attach the folio to the rolling buffer. */
+ folio_get(folio);
+ rolling_buffer_append(&wreq->buffer, folio, NETFS_ROLLBUF_PUT_MARK);
+
+ /* Move the submission point forward to allow for write-streaming data
+ * not starting at the front of the page. We don't do write-streaming
+ * with the cache as the cache requires DIO alignment.
+ *
+ * Also skip uploading for data that's been read and just needs copying
+ * to the cache.
+ */
+ for (int s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ stream->submit_off = 0;
+ stream->submit_len = flen;
+ if (!stream->avail) {
+ stream->submit_off = UINT_MAX;
+ stream->submit_len = 0;
+ }
+ }
+
+ /* Attach the folio to one or more subrequests. For a big folio, we
+ * could end up with thousands of subrequests if the wsize is small -
+ * but we might need to wait during the creation of subrequests for
+ * network resources (eg. SMB credits).
+ */
+ for (;;) {
+ ssize_t part;
+ size_t lowest_off = ULONG_MAX;
+ int choose_s = -1;
+
+ /* Always add to the lowest-submitted stream first. */
+ for (int s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->submit_len > 0 &&
+ stream->submit_off < lowest_off) {
+ lowest_off = stream->submit_off;
+ choose_s = s;
+ }
+ }
+
+ if (choose_s < 0)
+ break;
+ stream = &wreq->io_streams[choose_s];
+
+ /* Advance the iterator(s). */
+ if (stream->submit_off > iter_off) {
+ rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off);
+ iter_off = stream->submit_off;
+ }
+
+ atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
+ stream->submit_extendable_to = fsize - stream->submit_off;
+ part = netfs_advance_write(wreq, stream, fpos + stream->submit_off,
+ stream->submit_len, to_eof);
+ stream->submit_off += part;
+ if (part > stream->submit_len)
+ stream->submit_len = 0;
+ else
+ stream->submit_len -= part;
+ if (part > 0)
+ no_debug = true;
+ }
+
+ wreq->buffer.iter.iov_offset = 0;
+ if (fsize > iter_off)
+ rolling_buffer_advance(&wreq->buffer, fsize - iter_off);
+ atomic64_set(&wreq->issued_to, fpos + fsize);
+
+ if (!no_debug)
+ kdebug("R=%x: No submit", wreq->debug_id);
+ _leave(" = 0");
+ return 0;
+}
+
+/**
+ * netfs_writeback_single - Write back a monolithic payload
+ * @mapping: The mapping to write from
+ * @wbc: Hints from the VM
+ * @iter: Data to write, must be ITER_FOLIOQ.
+ *
+ * Write a monolithic, non-pagecache object back to the server and/or
+ * the cache.
+ */
+int netfs_writeback_single(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct iov_iter *iter)
+{
+ struct netfs_io_request *wreq;
+ struct netfs_inode *ictx = netfs_inode(mapping->host);
+ struct folio_queue *fq;
+ size_t size = iov_iter_count(iter);
+ int ret;
+
+ if (WARN_ON_ONCE(!iov_iter_is_folioq(iter)))
+ return -EIO;
+
+ if (!mutex_trylock(&ictx->wb_lock)) {
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ netfs_stat(&netfs_n_wb_lock_skip);
+ return 0;
+ }
+ netfs_stat(&netfs_n_wb_lock_wait);
+ mutex_lock(&ictx->wb_lock);
+ }
+
+ wreq = netfs_create_write_req(mapping, NULL, 0, NETFS_WRITEBACK_SINGLE);
+ if (IS_ERR(wreq)) {
+ ret = PTR_ERR(wreq);
+ goto couldnt_start;
+ }
+
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
+ trace_netfs_write(wreq, netfs_write_trace_writeback_single);
+ netfs_stat(&netfs_n_wh_writepages);
+
+ if (__test_and_set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))
+ wreq->netfs_ops->begin_writeback(wreq);
+
+ for (fq = (struct folio_queue *)iter->folioq; fq; fq = fq->next) {
+ for (int slot = 0; slot < folioq_count(fq); slot++) {
+ struct folio *folio = folioq_folio(fq, slot);
+ size_t part = umin(folioq_folio_size(fq, slot), size);
+
+ _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to));
+
+ ret = netfs_write_folio_single(wreq, folio);
+ if (ret < 0)
+ goto stop;
+ size -= part;
+ if (size <= 0)
+ goto stop;
+ }
+ }
+
+stop:
+ for (int s = 0; s < NR_IO_STREAMS; s++)
+ netfs_issue_write(wreq, &wreq->io_streams[s]);
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+
+ mutex_unlock(&ictx->wb_lock);
+ netfs_wake_collector(wreq);
+
+ netfs_put_request(wreq, netfs_rreq_trace_put_return);
+ _leave(" = %d", ret);
+ return ret;
+
+couldnt_start:
+ mutex_unlock(&ictx->wb_lock);
+ _leave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_writeback_single);
diff --git a/fs/netfs/write_retry.c b/fs/netfs/write_retry.c
new file mode 100644
index 000000000000..fc9c3e0d34d8
--- /dev/null
+++ b/fs/netfs/write_retry.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem write retrying.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * Perform retries on the streams that need it.
+ */
+static void netfs_retry_write_stream(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream)
+{
+ struct list_head *next;
+
+ _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
+
+ if (list_empty(&stream->subrequests))
+ return;
+
+ if (stream->source == NETFS_UPLOAD_TO_SERVER &&
+ wreq->netfs_ops->retry_request)
+ wreq->netfs_ops->retry_request(wreq, stream);
+
+ if (unlikely(stream->failed))
+ return;
+
+ /* If there's no renegotiation to do, just resend each failed subreq. */
+ if (!stream->prepare_write) {
+ struct netfs_io_subrequest *subreq;
+
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
+ if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
+ break;
+ if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ struct iov_iter source;
+
+ netfs_reset_iter(subreq);
+ source = subreq->io_iter;
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ netfs_reissue_write(stream, subreq, &source);
+ }
+ }
+ return;
+ }
+
+ next = stream->subrequests.next;
+
+ do {
+ struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
+ struct iov_iter source;
+ unsigned long long start, len;
+ size_t part;
+ bool boundary = false;
+
+ /* Go through the stream and find the next span of contiguous
+ * data that we then rejig (cifs, for example, needs the wsize
+ * renegotiating) and reissue.
+ */
+ from = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ to = from;
+ start = from->start + from->transferred;
+ len = from->len - from->transferred;
+
+ if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
+ !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
+ return;
+
+ list_for_each_continue(next, &stream->subrequests) {
+ subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ if (subreq->start + subreq->transferred != start + len ||
+ test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
+ !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
+ break;
+ to = subreq;
+ len += to->len;
+ }
+
+ /* Determine the set of buffers we're going to use. Each
+ * subreq gets a subset of a single overall contiguous buffer.
+ */
+ netfs_reset_iter(from);
+ source = from->io_iter;
+ source.count = len;
+
+ /* Work through the sublist. */
+ subreq = from;
+ list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
+ if (!len)
+ break;
+
+ subreq->start = start;
+ subreq->len = len;
+ __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ subreq->retry_count++;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+
+ /* Renegotiate max_len (wsize) */
+ stream->sreq_max_len = len;
+ stream->prepare_write(subreq);
+
+ part = umin(len, stream->sreq_max_len);
+ if (unlikely(stream->sreq_max_segs))
+ part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
+ subreq->len = part;
+ subreq->transferred = 0;
+ len -= part;
+ start += part;
+ if (len && subreq == to &&
+ __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
+ boundary = true;
+
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ netfs_reissue_write(stream, subreq, &source);
+ if (subreq == to)
+ break;
+ }
+
+ /* If we managed to use fewer subreqs, we can discard the
+ * excess; if we used the same number, then we're done.
+ */
+ if (!len) {
+ if (subreq == to)
+ continue;
+ list_for_each_entry_safe_from(subreq, tmp,
+ &stream->subrequests, rreq_link) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
+ list_del(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
+ if (subreq == to)
+ break;
+ }
+ continue;
+ }
+
+ /* We ran out of subrequests, so we need to allocate some more
+ * and insert them after.
+ */
+ do {
+ subreq = netfs_alloc_subrequest(wreq);
+ subreq->source = to->source;
+ subreq->start = start;
+ subreq->stream_nr = to->stream_nr;
+ subreq->retry_count = 1;
+
+ trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
+ refcount_read(&subreq->ref),
+ netfs_sreq_trace_new);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_split);
+
+ list_add(&subreq->rreq_link, &to->rreq_link);
+ to = list_next_entry(to, rreq_link);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+
+ stream->sreq_max_len = len;
+ stream->sreq_max_segs = INT_MAX;
+ switch (stream->source) {
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload);
+ stream->sreq_max_len = umin(len, wreq->wsize);
+ break;
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ stream->prepare_write(subreq);
+
+ part = umin(len, stream->sreq_max_len);
+ subreq->len = subreq->transferred + part;
+ len -= part;
+ start += part;
+ if (!len && boundary) {
+ __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
+ boundary = false;
+ }
+
+ netfs_reissue_write(stream, subreq, &source);
+ if (!len)
+ break;
+
+ } while (len);
+
+ } while (!list_is_head(next, &stream->subrequests));
+}
+
+/*
+ * Perform retries on the streams that need it. If we're doing content
+ * encryption and the server copy changed due to a third-party write, we may
+ * need to do an RMW cycle and also rewrite the data to the cache.
+ */
+void netfs_retry_writes(struct netfs_io_request *wreq)
+{
+ struct netfs_io_stream *stream;
+ int s;
+
+ netfs_stat(&netfs_n_wh_retry_write_req);
+
+ /* Wait for all outstanding I/O to quiesce before performing retries as
+ * we may need to renegotiate the I/O sizes.
+ */
+ set_bit(NETFS_RREQ_RETRYING, &wreq->flags);
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->active)
+ netfs_wait_for_in_progress_stream(wreq, stream);
+ }
+ clear_bit(NETFS_RREQ_RETRYING, &wreq->flags);
+
+ // TODO: Enc: Fetch changed partial pages
+ // TODO: Enc: Reencrypt content if needed.
+ // TODO: Enc: Wind back transferred point.
+ // TODO: Enc: Mark cache pages for retry.
+
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->need_retry) {
+ stream->need_retry = false;
+ netfs_retry_write_stream(wreq, stream);
+ }
+ }
+}
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 14a72224b657..07932ce9246c 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -2,8 +2,10 @@
config NFS_FS
tristate "NFS client support"
depends on INET && FILE_LOCKING && MULTIUSER
+ select CRC32
select LOCKD
select SUNRPC
+ select NFS_COMMON
select NFS_ACL_SUPPORT if NFS_V3_ACL
help
Choose Y here if you want to access files residing on other
@@ -33,12 +35,12 @@ config NFS_FS
config NFS_V2
tristate "NFS client support for NFS version 2"
depends on NFS_FS
- default y
+ default n
help
This option enables support for version 2 of the NFS protocol
(RFC 1094) in the kernel's NFS client.
- If unsure, say Y.
+ If unsure, say N.
config NFS_V3
tristate "NFS client support for NFS version 3"
@@ -75,7 +77,6 @@ config NFS_V3_ACL
config NFS_V4
tristate "NFS client support for NFS version 4"
depends on NFS_FS
- select SUNRPC_GSS
select KEYS
help
This option enables support for version 4 of the NFS protocol
@@ -126,7 +127,7 @@ config PNFS_BLOCK
config PNFS_FLEXFILE_LAYOUT
tristate
- depends on NFS_V4_1 && NFS_V3
+ depends on NFS_V4_1
default NFS_V4
config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
@@ -170,7 +171,9 @@ config ROOT_NFS
config NFS_FSCACHE
bool "Provide NFS client caching support"
- depends on NFS_FS=m && FSCACHE || NFS_FS=y && FSCACHE=y
+ depends on NFS_FS
+ select NETFS_SUPPORT
+ select FSCACHE
help
Say Y here if you want NFS data to be cached locally on disc through
the general filesystem cache manager
@@ -194,7 +197,6 @@ config NFS_USE_KERNEL_DNS
config NFS_DEBUG
bool
depends on NFS_FS && SUNRPC_DEBUG
- select CRC32
default y
config NFS_DISABLE_UDP_SUPPORT
@@ -209,8 +211,6 @@ config NFS_DISABLE_UDP_SUPPORT
config NFS_V4_2_READ_PLUS
bool "NFS: Enable support for the NFSv4.2 READ_PLUS operation"
depends on NFS_V4_2
- default n
+ default y
help
- This is intended for developers only. The READ_PLUS operation has
- been shown to have issues under specific conditions and should not
- be used in production.
+ Choose Y here to enable use of the NFS v4.2 READ_PLUS operation.
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 22d11fdc6deb..9fb2f2cac87e 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -12,7 +12,8 @@ nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
export.o sysfs.o fs_context.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
-nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
+nfs-$(CONFIG_NFS_FSCACHE) += fscache.o
+nfs-$(CONFIG_NFS_LOCALIO) += localio.o
obj-$(CONFIG_NFS_V2) += nfsv2.o
nfsv2-y := nfs2super.o proc.o nfs2xdr.o
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index fe860c538747..0e4c67373e4f 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -115,30 +115,13 @@ bl_submit_bio(struct bio *bio)
return NULL;
}
-static struct bio *bl_alloc_init_bio(unsigned int npg,
- struct block_device *bdev, sector_t disk_sector,
- bio_end_io_t end_io, struct parallel_io *par)
-{
- struct bio *bio;
-
- npg = bio_max_segs(npg);
- bio = bio_alloc(GFP_NOIO, npg);
- if (bio) {
- bio->bi_iter.bi_sector = disk_sector;
- bio_set_dev(bio, bdev);
- bio->bi_end_io = end_io;
- bio->bi_private = par;
- }
- return bio;
-}
-
static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map)
{
return offset >= map->start && offset < map->start + map->len;
}
static struct bio *
-do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
+do_add_page_to_bio(struct bio *bio, int npg, enum req_op op, sector_t isect,
struct page *page, struct pnfs_block_dev_map *map,
struct pnfs_block_extent *be, bio_end_io_t end_io,
struct parallel_io *par, unsigned int offset, int *len)
@@ -148,7 +131,7 @@ do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
u64 disk_addr, end;
dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
- npg, rw, (unsigned long long)isect, offset, *len);
+ npg, (__force u32)op, (unsigned long long)isect, offset, *len);
/* translate to device offset */
isect += be->be_v_offset;
@@ -166,16 +149,15 @@ do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
/* limit length to what the device mapping allows */
end = disk_addr + *len;
- if (end >= map->start + map->len)
- *len = map->start + map->len - disk_addr;
+ if (end >= map->disk_offset + map->len)
+ *len = map->disk_offset + map->len - disk_addr;
retry:
if (!bio) {
- bio = bl_alloc_init_bio(npg, map->bdev,
- disk_addr >> SECTOR_SHIFT, end_io, par);
- if (!bio)
- return ERR_PTR(-ENOMEM);
- bio_set_op_attrs(bio, rw, 0);
+ bio = bio_alloc(map->bdev, bio_max_segs(npg), op, GFP_NOIO);
+ bio->bi_iter.bi_sector = disk_addr >> SECTOR_SHIFT;
+ bio->bi_end_io = end_io;
+ bio->bi_private = par;
}
if (bio_add_page(bio, page, *len, offset) < *len) {
bio = bl_submit_bio(bio);
@@ -309,7 +291,7 @@ bl_read_pagelist(struct nfs_pgio_header *header)
} else {
bio = do_add_page_to_bio(bio,
header->page_array.npages - i,
- READ,
+ REQ_OP_READ,
isect, pages[i], &map, &be,
bl_end_io_read, par,
pg_offset, &pg_len);
@@ -438,9 +420,8 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
pg_len = PAGE_SIZE;
bio = do_add_page_to_bio(bio, header->page_array.npages - i,
- WRITE, isect, pages[i], &map, &be,
- bl_end_io_write, par,
- 0, &pg_len);
+ REQ_OP_WRITE, isect, pages[i], &map,
+ &be, bl_end_io_write, par, 0, &pg_len);
if (IS_ERR(bio)) {
header->pnfs_error = PTR_ERR(bio);
bio = NULL;
@@ -583,23 +564,45 @@ bl_find_get_deviceid(struct nfs_server *server,
gfp_t gfp_mask)
{
struct nfs4_deviceid_node *node;
- unsigned long start, end;
+ int err = -ENODEV;
retry:
node = nfs4_find_get_deviceid(server, id, cred, gfp_mask);
if (!node)
return ERR_PTR(-ENODEV);
- if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags) == 0)
- return node;
+ /*
+ * Devices that are marked unavailable are left in the cache with a
+ * timeout to avoid sending GETDEVINFO after every LAYOUTGET, or
+ * constantly attempting to register the device. Once marked as
+ * unavailable they must be deleted and never reused.
+ */
+ if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
+ unsigned long end = jiffies;
+ unsigned long start = end - PNFS_DEVICE_RETRY_TIMEOUT;
+
+ if (!time_in_range(node->timestamp_unavailable, start, end)) {
+ /* Uncork subsequent GETDEVINFO operations for this device */
+ nfs4_delete_deviceid(node->ld, node->nfs_client, id);
+ goto retry;
+ }
+ goto out_put;
+ }
- end = jiffies;
- start = end - PNFS_DEVICE_RETRY_TIMEOUT;
- if (!time_in_range(node->timestamp_unavailable, start, end)) {
- nfs4_delete_deviceid(node->ld, node->nfs_client, id);
- goto retry;
+ if (!bl_register_dev(container_of(node, struct pnfs_block_dev, node))) {
+ /*
+ * If we cannot register, treat this device as transient:
+ * Make a negative cache entry for the device
+ */
+ nfs4_mark_deviceid_unavailable(node);
+ goto out_put;
}
- return ERR_PTR(-ENODEV);
+
+ return node;
+
+out_put:
+ nfs4_put_deviceid_node(node);
+ return ERR_PTR(err);
}
static int
@@ -673,7 +676,7 @@ bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
struct pnfs_layout_segment *lseg;
struct xdr_buf buf;
struct xdr_stream xdr;
- struct page *scratch;
+ struct folio *scratch;
int status, i;
uint32_t count;
__be32 *p;
@@ -686,13 +689,13 @@ bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
return ERR_PTR(-ENOMEM);
status = -ENOMEM;
- scratch = alloc_page(gfp_mask);
+ scratch = folio_alloc(gfp_mask, 0);
if (!scratch)
goto out;
xdr_init_decode_pages(&xdr, &buf,
lgr->layoutp->pages, lgr->layoutp->len);
- xdr_set_scratch_page(&xdr, scratch);
+ xdr_set_scratch_folio(&xdr, scratch);
status = -EIO;
p = xdr_inline_decode(&xdr, 4);
@@ -741,7 +744,7 @@ process_extents:
}
out_free_scratch:
- __free_page(scratch);
+ folio_put(scratch);
out:
dprintk("%s returns %d\n", __func__, status);
switch (status) {
@@ -912,10 +915,9 @@ bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
}
if (pgio->pg_dreq == NULL)
- wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
- req->wb_index);
+ wb_size = pnfs_num_cont_bytes(pgio->pg_inode, req->wb_index);
else
- wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
+ wb_size = nfs_dreq_bytes_left(pgio->pg_dreq, req_offset(req));
pnfs_generic_pg_init_write(pgio, req, wb_size);
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index 716bc75e9ed2..6da40ca19570 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -104,20 +104,26 @@ struct pnfs_block_dev {
u64 start;
u64 len;
+ enum pnfs_block_volume_type type;
u32 nr_children;
struct pnfs_block_dev *children;
u64 chunk_size;
- struct block_device *bdev;
+ struct file *bdev_file;
u64 disk_offset;
+ unsigned long flags;
u64 pr_key;
- bool pr_registered;
bool (*map)(struct pnfs_block_dev *dev, u64 offset,
struct pnfs_block_dev_map *map);
};
+/* pnfs_block_dev flag bits */
+enum {
+ PNFS_BDEV_REGISTERED = 0,
+};
+
/* sector_t fields are all in 512-byte sectors */
struct pnfs_block_extent {
union {
@@ -172,6 +178,7 @@ struct bl_msg_hdr {
#define BL_DEVICE_REQUEST_ERR 0x2 /* User level process fails */
/* dev.c */
+bool bl_register_dev(struct pnfs_block_dev *d);
struct nfs4_deviceid_node *bl_alloc_deviceid_node(struct nfs_server *server,
struct pnfs_device *pdev, gfp_t gfp_mask);
void bl_free_deviceid_node(struct nfs4_deviceid_node *d);
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index 5e56da748b2a..ab76120705e2 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -10,12 +10,81 @@
#include <linux/pr.h>
#include "blocklayout.h"
+#include "../nfs4trace.h"
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+static void bl_unregister_scsi(struct pnfs_block_dev *dev)
+{
+ struct block_device *bdev = file_bdev(dev->bdev_file);
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ int status;
+
+ status = ops->pr_register(bdev, dev->pr_key, 0, false);
+ if (status)
+ trace_bl_pr_key_unreg_err(bdev, dev->pr_key, status);
+ else
+ trace_bl_pr_key_unreg(bdev, dev->pr_key);
+}
+
+static bool bl_register_scsi(struct pnfs_block_dev *dev)
+{
+ struct block_device *bdev = file_bdev(dev->bdev_file);
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ int status;
+
+ if (test_and_set_bit(PNFS_BDEV_REGISTERED, &dev->flags))
+ return true;
+
+ status = ops->pr_register(bdev, 0, dev->pr_key, true);
+ if (status) {
+ trace_bl_pr_key_reg_err(bdev, dev->pr_key, status);
+ return false;
+ }
+ trace_bl_pr_key_reg(bdev, dev->pr_key);
+ return true;
+}
+
+static void bl_unregister_dev(struct pnfs_block_dev *dev)
+{
+ u32 i;
+
+ if (dev->nr_children) {
+ for (i = 0; i < dev->nr_children; i++)
+ bl_unregister_dev(&dev->children[i]);
+ return;
+ }
+
+ if (dev->type == PNFS_BLOCK_VOLUME_SCSI &&
+ test_and_clear_bit(PNFS_BDEV_REGISTERED, &dev->flags))
+ bl_unregister_scsi(dev);
+}
+
+bool bl_register_dev(struct pnfs_block_dev *dev)
+{
+ u32 i;
+
+ if (dev->nr_children) {
+ for (i = 0; i < dev->nr_children; i++) {
+ if (!bl_register_dev(&dev->children[i])) {
+ while (i > 0)
+ bl_unregister_dev(&dev->children[--i]);
+ return false;
+ }
+ }
+ return true;
+ }
+
+ if (dev->type == PNFS_BLOCK_VOLUME_SCSI)
+ return bl_register_scsi(dev);
+ return true;
+}
+
static void
bl_free_device(struct pnfs_block_dev *dev)
{
+ bl_unregister_dev(dev);
+
if (dev->nr_children) {
int i;
@@ -23,19 +92,8 @@ bl_free_device(struct pnfs_block_dev *dev)
bl_free_device(&dev->children[i]);
kfree(dev->children);
} else {
- if (dev->pr_registered) {
- const struct pr_ops *ops =
- dev->bdev->bd_disk->fops->pr_ops;
- int error;
-
- error = ops->pr_register(dev->bdev, dev->pr_key, 0,
- false);
- if (error)
- pr_err("failed to unregister PR key.\n");
- }
-
- if (dev->bdev)
- blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE);
+ if (dev->bdev_file)
+ fput(dev->bdev_file);
}
}
@@ -169,7 +227,7 @@ static bool bl_map_simple(struct pnfs_block_dev *dev, u64 offset,
map->start = dev->start;
map->len = dev->len;
map->disk_offset = dev->disk_offset;
- map->bdev = dev->bdev;
+ map->bdev = file_bdev(dev->bdev_file);
return true;
}
@@ -199,10 +257,11 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
struct pnfs_block_dev *child;
u64 chunk;
u32 chunk_idx;
+ u64 disk_chunk;
u64 disk_offset;
chunk = div_u64(offset, dev->chunk_size);
- div_u64_rem(chunk, dev->nr_children, &chunk_idx);
+ disk_chunk = div_u64_rem(chunk, dev->nr_children, &chunk_idx);
if (chunk_idx >= dev->nr_children) {
dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
@@ -215,7 +274,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
offset = chunk * dev->chunk_size;
/* disk offset of the stripe */
- disk_offset = div_u64(offset, dev->nr_children);
+ disk_offset = disk_chunk * dev->chunk_size;
child = &dev->children[chunk_idx];
child->map(child, disk_offset, map);
@@ -236,27 +295,26 @@ bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d,
struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
{
struct pnfs_block_volume *v = &volumes[idx];
- struct block_device *bdev;
+ struct file *bdev_file;
dev_t dev;
dev = bl_resolve_deviceid(server, v, gfp_mask);
if (!dev)
return -EIO;
- bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL);
- if (IS_ERR(bdev)) {
+ bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ NULL, NULL);
+ if (IS_ERR(bdev_file)) {
printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n",
- MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
- return PTR_ERR(bdev);
+ MAJOR(dev), MINOR(dev), PTR_ERR(bdev_file));
+ return PTR_ERR(bdev_file);
}
- d->bdev = bdev;
-
-
- d->len = bdev_nr_bytes(d->bdev);
+ d->bdev_file = bdev_file;
+ d->len = bdev_nr_bytes(file_bdev(bdev_file));
d->map = bl_map_simple;
printk(KERN_INFO "pNFS: using block device %s\n",
- d->bdev->bd_disk->disk_name);
+ file_bdev(bdev_file)->bd_disk->disk_name);
return 0;
}
@@ -301,51 +359,26 @@ bl_validate_designator(struct pnfs_block_volume *v)
}
}
-/*
- * Try to open the udev path for the WWN. At least on Debian the udev
- * by-id path will always point to the dm-multipath device if one exists.
- */
-static struct block_device *
-bl_open_udev_path(struct pnfs_block_volume *v)
+static struct file *
+bl_open_path(struct pnfs_block_volume *v, const char *prefix)
{
- struct block_device *bdev;
+ struct file *bdev_file;
const char *devname;
- devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%*phN",
- v->scsi.designator_len, v->scsi.designator);
+ devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/%s%*phN",
+ prefix, v->scsi.designator_len, v->scsi.designator);
if (!devname)
return ERR_PTR(-ENOMEM);
- bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL);
- if (IS_ERR(bdev)) {
- pr_warn("pNFS: failed to open device %s (%ld)\n",
- devname, PTR_ERR(bdev));
+ bdev_file = bdev_file_open_by_path(devname, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ NULL, NULL);
+ if (IS_ERR(bdev_file)) {
+ dprintk("failed to open device %s (%ld)\n",
+ devname, PTR_ERR(bdev_file));
}
kfree(devname);
- return bdev;
-}
-
-/*
- * Try to open the RH/Fedora specific dm-mpath udev path for this WWN, as the
- * wwn- links will only point to the first discovered SCSI device there.
- */
-static struct block_device *
-bl_open_dm_mpath_udev_path(struct pnfs_block_volume *v)
-{
- struct block_device *bdev;
- const char *devname;
-
- devname = kasprintf(GFP_KERNEL,
- "/dev/disk/by-id/dm-uuid-mpath-%d%*phN",
- v->scsi.designator_type,
- v->scsi.designator_len, v->scsi.designator);
- if (!devname)
- return ERR_PTR(-ENOMEM);
-
- bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL);
- kfree(devname);
- return bdev;
+ return bdev_file;
}
static int
@@ -355,45 +388,50 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
struct pnfs_block_volume *v = &volumes[idx];
struct block_device *bdev;
const struct pr_ops *ops;
+ struct file *bdev_file;
int error;
if (!bl_validate_designator(v))
return -EINVAL;
- bdev = bl_open_dm_mpath_udev_path(v);
- if (IS_ERR(bdev))
- bdev = bl_open_udev_path(v);
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
- d->bdev = bdev;
+ /*
+ * Try to open the RH/Fedora specific dm-mpath udev path first, as the
+ * wwn- links will only point to the first discovered SCSI device there.
+ * On other distributions like Debian, the default SCSI by-id path will
+ * point to the dm-multipath device if one exists.
+ */
+ bdev_file = bl_open_path(v, "dm-uuid-mpath-0x");
+ if (IS_ERR(bdev_file))
+ bdev_file = bl_open_path(v, "wwn-0x");
+ if (IS_ERR(bdev_file))
+ bdev_file = bl_open_path(v, "nvme-eui.");
+ if (IS_ERR(bdev_file)) {
+ pr_warn("pNFS: no device found for volume %*phN\n",
+ v->scsi.designator_len, v->scsi.designator);
+ return PTR_ERR(bdev_file);
+ }
+ d->bdev_file = bdev_file;
+ bdev = file_bdev(bdev_file);
- d->len = bdev_nr_bytes(d->bdev);
+ d->len = bdev_nr_bytes(bdev);
d->map = bl_map_simple;
d->pr_key = v->scsi.pr_key;
- pr_info("pNFS: using block device %s (reservation key 0x%llx)\n",
- d->bdev->bd_disk->disk_name, d->pr_key);
+ if (d->len == 0)
+ return -ENODEV;
- ops = d->bdev->bd_disk->fops->pr_ops;
+ ops = bdev->bd_disk->fops->pr_ops;
if (!ops) {
pr_err("pNFS: block device %s does not support reservations.",
- d->bdev->bd_disk->disk_name);
+ bdev->bd_disk->disk_name);
error = -EINVAL;
goto out_blkdev_put;
}
- error = ops->pr_register(d->bdev, 0, d->pr_key, true);
- if (error) {
- pr_err("pNFS: failed to register key for block device %s.",
- d->bdev->bd_disk->disk_name);
- goto out_blkdev_put;
- }
-
- d->pr_registered = true;
return 0;
out_blkdev_put:
- blkdev_put(d->bdev, FMODE_READ | FMODE_WRITE);
+ fput(d->bdev_file);
return error;
}
@@ -422,7 +460,7 @@ bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d,
int ret, i;
d->children = kcalloc(v->concat.volumes_count,
- sizeof(struct pnfs_block_dev), GFP_KERNEL);
+ sizeof(struct pnfs_block_dev), gfp_mask);
if (!d->children)
return -ENOMEM;
@@ -451,7 +489,7 @@ bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d,
int ret, i;
d->children = kcalloc(v->stripe.volumes_count,
- sizeof(struct pnfs_block_dev), GFP_KERNEL);
+ sizeof(struct pnfs_block_dev), gfp_mask);
if (!d->children)
return -ENOMEM;
@@ -475,7 +513,9 @@ static int
bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d,
struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
{
- switch (volumes[idx].type) {
+ d->type = volumes[idx].type;
+
+ switch (d->type) {
case PNFS_BLOCK_VOLUME_SIMPLE:
return bl_parse_simple(server, d, volumes, idx, gfp_mask);
case PNFS_BLOCK_VOLUME_SLICE:
@@ -487,7 +527,7 @@ bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d,
case PNFS_BLOCK_VOLUME_SCSI:
return bl_parse_scsi(server, d, volumes, idx, gfp_mask);
default:
- dprintk("unsupported volume type: %d\n", volumes[idx].type);
+ dprintk("unsupported volume type: %d\n", d->type);
return -EIO;
}
}
@@ -501,16 +541,16 @@ bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
struct pnfs_block_dev *top;
struct xdr_stream xdr;
struct xdr_buf buf;
- struct page *scratch;
+ struct folio *scratch;
int nr_volumes, ret, i;
__be32 *p;
- scratch = alloc_page(gfp_mask);
+ scratch = folio_alloc(gfp_mask, 0);
if (!scratch)
goto out;
xdr_init_decode_pages(&xdr, &buf, pdev->pages, pdev->pglen);
- xdr_set_scratch_page(&xdr, scratch);
+ xdr_set_scratch_folio(&xdr, scratch);
p = xdr_inline_decode(&xdr, sizeof(__be32));
if (!p)
@@ -542,7 +582,7 @@ bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
out_free_volumes:
kfree(volumes);
out_free_scratch:
- __free_page(scratch);
+ folio_put(scratch);
out:
return node;
}
diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c
index 8f7cff7a4293..315949a7e92d 100644
--- a/fs/nfs/blocklayout/extent_tree.c
+++ b/fs/nfs/blocklayout/extent_tree.c
@@ -6,6 +6,7 @@
#include <linux/vmalloc.h>
#include "blocklayout.h"
+#include "../nfs4trace.h"
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
@@ -520,10 +521,71 @@ static __be32 *encode_scsi_range(struct pnfs_block_extent *be, __be32 *p)
return xdr_encode_hyper(p, be->be_length << SECTOR_SHIFT);
}
-static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
+/**
+ * ext_tree_try_encode_commit - try to encode all extents into the buffer
+ * @bl: pointer to the layout
+ * @p: pointer to the output buffer
+ * @buffer_size: size of the output buffer
+ * @count: output pointer to the number of encoded extents
+ * @lastbyte: output pointer to the last written byte
+ *
+ * Return values:
+ * %0: Success, all required extents encoded, outputs are valid
+ * %-ENOSPC: Buffer too small, nothing encoded, outputs are invalid
+ */
+static int
+ext_tree_try_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
size_t buffer_size, size_t *count, __u64 *lastbyte)
{
struct pnfs_block_extent *be;
+
+ spin_lock(&bl->bl_ext_lock);
+ for (be = ext_tree_first(&bl->bl_ext_rw); be; be = ext_tree_next(be)) {
+ if (be->be_state != PNFS_BLOCK_INVALID_DATA ||
+ be->be_tag != EXTENT_WRITTEN)
+ continue;
+
+ (*count)++;
+ if (ext_tree_layoutupdate_size(bl, *count) > buffer_size) {
+ spin_unlock(&bl->bl_ext_lock);
+ return -ENOSPC;
+ }
+ }
+ for (be = ext_tree_first(&bl->bl_ext_rw); be; be = ext_tree_next(be)) {
+ if (be->be_state != PNFS_BLOCK_INVALID_DATA ||
+ be->be_tag != EXTENT_WRITTEN)
+ continue;
+
+ if (bl->bl_scsi_layout)
+ p = encode_scsi_range(be, p);
+ else
+ p = encode_block_extent(be, p);
+ be->be_tag = EXTENT_COMMITTING;
+ }
+ *lastbyte = (bl->bl_lwb != 0) ? bl->bl_lwb - 1 : U64_MAX;
+ bl->bl_lwb = 0;
+ spin_unlock(&bl->bl_ext_lock);
+
+ return 0;
+}
+
+/**
+ * ext_tree_encode_commit - encode as much as possible extents into the buffer
+ * @bl: pointer to the layout
+ * @p: pointer to the output buffer
+ * @buffer_size: size of the output buffer
+ * @count: output pointer to the number of encoded extents
+ * @lastbyte: output pointer to the last written byte
+ *
+ * Return values:
+ * %0: Success, all required extents encoded, outputs are valid
+ * %-ENOSPC: Buffer too small, some extents are encoded, outputs are valid
+ */
+static int
+ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
+ size_t buffer_size, size_t *count, __u64 *lastbyte)
+{
+ struct pnfs_block_extent *be, *be_prev;
int ret = 0;
spin_lock(&bl->bl_ext_lock);
@@ -534,9 +596,9 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
(*count)++;
if (ext_tree_layoutupdate_size(bl, *count) > buffer_size) {
- /* keep counting.. */
+ (*count)--;
ret = -ENOSPC;
- continue;
+ break;
}
if (bl->bl_scsi_layout)
@@ -544,14 +606,30 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
else
p = encode_block_extent(be, p);
be->be_tag = EXTENT_COMMITTING;
+ be_prev = be;
+ }
+ if (!ret) {
+ *lastbyte = (bl->bl_lwb != 0) ? bl->bl_lwb - 1 : U64_MAX;
+ bl->bl_lwb = 0;
+ } else {
+ *lastbyte = be_prev->be_f_offset + be_prev->be_length;
+ *lastbyte <<= SECTOR_SHIFT;
+ *lastbyte -= 1;
}
- *lastbyte = bl->bl_lwb - 1;
- bl->bl_lwb = 0;
spin_unlock(&bl->bl_ext_lock);
return ret;
}
+/**
+ * ext_tree_prepare_commit - encode extents that need to be committed
+ * @arg: layout commit data
+ *
+ * Return values:
+ * %0: Success, all required extents are encoded
+ * %-ENOSPC: Some extents are encoded, but not all, due to RPC size limit
+ * %-ENOMEM: Out of memory, extents not encoded
+ */
int
ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
{
@@ -560,20 +638,18 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
__be32 *start_p;
int ret;
- dprintk("%s enter\n", __func__);
-
arg->layoutupdate_page = alloc_page(GFP_NOFS);
if (!arg->layoutupdate_page)
return -ENOMEM;
start_p = page_address(arg->layoutupdate_page);
arg->layoutupdate_pages = &arg->layoutupdate_page;
-retry:
- ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count, &arg->lastbytewritten);
+ ret = ext_tree_try_encode_commit(bl, start_p + 1, buffer_size,
+ &count, &arg->lastbytewritten);
if (unlikely(ret)) {
ext_tree_free_commitdata(arg, buffer_size);
- buffer_size = ext_tree_layoutupdate_size(bl, count);
+ buffer_size = NFS_SERVER(arg->inode)->wsize;
count = 0;
arg->layoutupdate_pages =
@@ -588,7 +664,8 @@ retry:
return -ENOMEM;
}
- goto retry;
+ ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size,
+ &count, &arg->lastbytewritten);
}
*start_p = cpu_to_be32(count);
@@ -607,8 +684,9 @@ retry:
}
}
- dprintk("%s found %zu ranges\n", __func__, count);
- return 0;
+ trace_bl_ext_tree_prepare_commit(ret, count,
+ arg->lastbytewritten, !!ret);
+ return ret;
}
void
diff --git a/fs/nfs/blocklayout/rpc_pipefs.c b/fs/nfs/blocklayout/rpc_pipefs.c
index ef9db135c649..d526f5ba7887 100644
--- a/fs/nfs/blocklayout/rpc_pipefs.c
+++ b/fs/nfs/blocklayout/rpc_pipefs.c
@@ -27,7 +27,6 @@
*/
#include <linux/module.h>
-#include <linux/genhd.h>
#include <linux/blkdev.h>
#include "blocklayout.h"
@@ -76,7 +75,7 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b,
msg->len = sizeof(*bl_msg) + b->simple.len;
msg->data = kzalloc(msg->len, gfp_mask);
if (!msg->data)
- goto out_free_data;
+ goto out_unlock;
bl_msg = msg->data;
bl_msg->type = BL_DEVICE_MOUNT;
@@ -142,24 +141,18 @@ static const struct rpc_pipe_ops bl_upcall_ops = {
.destroy_msg = bl_pipe_destroy_msg,
};
-static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
+static int nfs4blocklayout_register_sb(struct super_block *sb,
struct rpc_pipe *pipe)
{
- struct dentry *dir, *dentry;
+ struct dentry *dir;
+ int err;
dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
if (dir == NULL)
- return ERR_PTR(-ENOENT);
- dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
+ return -ENOENT;
+ err = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
dput(dir);
- return dentry;
-}
-
-static void nfs4blocklayout_unregister_sb(struct super_block *sb,
- struct rpc_pipe *pipe)
-{
- if (pipe->dentry)
- rpc_unlink(pipe->dentry);
+ return err;
}
static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
@@ -168,7 +161,6 @@ static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
struct super_block *sb = ptr;
struct net *net = sb->s_fs_info;
struct nfs_net *nn = net_generic(net, nfs_net_id);
- struct dentry *dentry;
int ret = 0;
if (!try_module_get(THIS_MODULE))
@@ -181,16 +173,10 @@ static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
switch (event) {
case RPC_PIPEFS_MOUNT:
- dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- break;
- }
- nn->bl_device_pipe->dentry = dentry;
+ ret = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
break;
case RPC_PIPEFS_UMOUNT:
- if (nn->bl_device_pipe->dentry)
- nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
+ rpc_unlink(nn->bl_device_pipe);
break;
default:
ret = -ENOTSUPP;
@@ -204,18 +190,17 @@ static struct notifier_block nfs4blocklayout_block = {
.notifier_call = rpc_pipefs_event,
};
-static struct dentry *nfs4blocklayout_register_net(struct net *net,
- struct rpc_pipe *pipe)
+static int nfs4blocklayout_register_net(struct net *net, struct rpc_pipe *pipe)
{
struct super_block *pipefs_sb;
- struct dentry *dentry;
+ int ret;
pipefs_sb = rpc_get_sb_net(net);
if (!pipefs_sb)
- return NULL;
- dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
+ return 0;
+ ret = nfs4blocklayout_register_sb(pipefs_sb, pipe);
rpc_put_sb_net(net);
- return dentry;
+ return ret;
}
static void nfs4blocklayout_unregister_net(struct net *net,
@@ -225,7 +210,7 @@ static void nfs4blocklayout_unregister_net(struct net *net,
pipefs_sb = rpc_get_sb_net(net);
if (pipefs_sb) {
- nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
+ rpc_unlink(pipe);
rpc_put_sb_net(net);
}
}
@@ -233,20 +218,17 @@ static void nfs4blocklayout_unregister_net(struct net *net,
static int nfs4blocklayout_net_init(struct net *net)
{
struct nfs_net *nn = net_generic(net, nfs_net_id);
- struct dentry *dentry;
+ int err;
mutex_init(&nn->bl_mutex);
init_waitqueue_head(&nn->bl_wq);
nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
if (IS_ERR(nn->bl_device_pipe))
return PTR_ERR(nn->bl_device_pipe);
- dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
- if (IS_ERR(dentry)) {
+ err = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
+ if (unlikely(err))
rpc_destroy_pipe_data(nn->bl_device_pipe);
- return PTR_ERR(dentry);
- }
- nn->bl_device_pipe->dentry = dentry;
- return 0;
+ return err;
}
static void nfs4blocklayout_net_exit(struct net *net)
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 86d856de1389..c8b837006bb2 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -17,7 +17,6 @@
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
-#include <linux/kthread.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/bc_xprt.h>
@@ -45,18 +44,18 @@ static int nfs4_callback_up_net(struct svc_serv *serv, struct net *net)
int ret;
struct nfs_net *nn = net_generic(net, nfs_net_id);
- ret = svc_create_xprt(serv, "tcp", net, PF_INET,
- nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS,
- cred);
+ ret = svc_xprt_create(serv, "tcp", net, PF_INET,
+ nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS,
+ cred);
if (ret <= 0)
goto out_err;
nn->nfs_callback_tcpport = ret;
dprintk("NFS: Callback listener port = %u (af %u, net %x)\n",
nn->nfs_callback_tcpport, PF_INET, net->ns.inum);
- ret = svc_create_xprt(serv, "tcp", net, PF_INET6,
- nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS,
- cred);
+ ret = svc_xprt_create(serv, "tcp", net, PF_INET6,
+ nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS,
+ cred);
if (ret > 0) {
nn->nfs_callback_tcpport6 = ret;
dprintk("NFS: Callback listener port = %u (af %u, net %x)\n",
@@ -75,72 +74,20 @@ out_err:
static int
nfs4_callback_svc(void *vrqstp)
{
- int err;
struct svc_rqst *rqstp = vrqstp;
+ svc_thread_init_status(rqstp, 0);
+
set_freezable();
- while (!kthread_freezable_should_stop(NULL)) {
+ while (!svc_thread_should_stop(rqstp))
+ svc_recv(rqstp);
- if (signal_pending(current))
- flush_signals(current);
- /*
- * Listen for a request on the socket
- */
- err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
- if (err == -EAGAIN || err == -EINTR)
- continue;
- svc_process(rqstp);
- }
svc_exit_thread(rqstp);
- module_put_and_exit(0);
return 0;
}
#if defined(CONFIG_NFS_V4_1)
-/*
- * The callback service for NFSv4.1 callbacks
- */
-static int
-nfs41_callback_svc(void *vrqstp)
-{
- struct svc_rqst *rqstp = vrqstp;
- struct svc_serv *serv = rqstp->rq_server;
- struct rpc_rqst *req;
- int error;
- DEFINE_WAIT(wq);
-
- set_freezable();
-
- while (!kthread_freezable_should_stop(NULL)) {
-
- if (signal_pending(current))
- flush_signals(current);
-
- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
- spin_lock_bh(&serv->sv_cb_lock);
- if (!list_empty(&serv->sv_cb_list)) {
- req = list_first_entry(&serv->sv_cb_list,
- struct rpc_rqst, rq_bc_list);
- list_del(&req->rq_bc_list);
- spin_unlock_bh(&serv->sv_cb_lock);
- finish_wait(&serv->sv_cb_waitq, &wq);
- dprintk("Invoking bc_svc_process()\n");
- error = bc_svc_process(serv, req, rqstp);
- dprintk("bc_svc_process() returned w/ error code= %d\n",
- error);
- } else {
- spin_unlock_bh(&serv->sv_cb_lock);
- if (!kthread_should_stop())
- schedule();
- finish_wait(&serv->sv_cb_waitq, &wq);
- }
- }
- svc_exit_thread(rqstp);
- module_put_and_exit(0);
- return 0;
-}
-
static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
struct svc_serv *serv)
{
@@ -169,12 +116,12 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
if (nrservs < NFS4_MIN_NR_CALLBACK_THREADS)
nrservs = NFS4_MIN_NR_CALLBACK_THREADS;
- if (serv->sv_nrthreads-1 == nrservs)
+ if (serv->sv_nrthreads == nrservs)
return 0;
- ret = serv->sv_ops->svo_setup(serv, NULL, nrservs);
+ ret = svc_set_num_threads(serv, NULL, nrservs);
if (ret) {
- serv->sv_ops->svo_setup(serv, NULL, 0);
+ svc_set_num_threads(serv, NULL, 0);
return ret;
}
dprintk("nfs_callback_up: service started\n");
@@ -189,7 +136,7 @@ static void nfs_callback_down_net(u32 minorversion, struct svc_serv *serv, struc
return;
dprintk("NFS: destroy per-net callback data; net=%x\n", net->ns.inum);
- svc_shutdown_net(serv, net);
+ svc_xprt_destroy_all(serv, net, false);
}
static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
@@ -206,7 +153,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
ret = svc_bind(serv, net);
if (ret < 0) {
printk(KERN_WARNING "NFS: bind callback service failed\n");
- goto err_bind;
+ goto err;
}
ret = 0;
@@ -219,72 +166,28 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
if (ret < 0) {
printk(KERN_ERR "NFS: callback service start failed\n");
- goto err_socks;
+ goto err;
}
return 0;
-err_socks:
- svc_rpcb_cleanup(serv, net);
-err_bind:
+err:
nn->cb_users[minorversion]--;
dprintk("NFS: Couldn't create callback socket: err = %d; "
"net = %x\n", ret, net->ns.inum);
return ret;
}
-static const struct svc_serv_ops nfs40_cb_sv_ops = {
- .svo_function = nfs4_callback_svc,
- .svo_enqueue_xprt = svc_xprt_do_enqueue,
- .svo_setup = svc_set_num_threads_sync,
- .svo_module = THIS_MODULE,
-};
-#if defined(CONFIG_NFS_V4_1)
-static const struct svc_serv_ops nfs41_cb_sv_ops = {
- .svo_function = nfs41_callback_svc,
- .svo_enqueue_xprt = svc_xprt_do_enqueue,
- .svo_setup = svc_set_num_threads_sync,
- .svo_module = THIS_MODULE,
-};
-
-static const struct svc_serv_ops *nfs4_cb_sv_ops[] = {
- [0] = &nfs40_cb_sv_ops,
- [1] = &nfs41_cb_sv_ops,
-};
-#else
-static const struct svc_serv_ops *nfs4_cb_sv_ops[] = {
- [0] = &nfs40_cb_sv_ops,
- [1] = NULL,
-};
-#endif
-
static struct svc_serv *nfs_callback_create_svc(int minorversion)
{
struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
- const struct svc_serv_ops *sv_ops;
+ int (*threadfn)(void *data);
struct svc_serv *serv;
/*
* Check whether we're already up and running.
*/
- if (cb_info->serv) {
- /*
- * Note: increase service usage, because later in case of error
- * svc_destroy() will be called.
- */
- svc_get(cb_info->serv);
+ if (cb_info->serv)
return cb_info->serv;
- }
-
- switch (minorversion) {
- case 0:
- sv_ops = nfs4_cb_sv_ops[0];
- break;
- default:
- sv_ops = nfs4_cb_sv_ops[1];
- }
-
- if (sv_ops == NULL)
- return ERR_PTR(-ENOTSUPP);
/*
* Sanity check: if there's no task,
@@ -294,16 +197,18 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n",
cb_info->users);
- serv = svc_create_pooled(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops);
+ threadfn = nfs4_callback_svc;
+#if !defined(CONFIG_NFS_V4_1)
+ if (minorversion)
+ return ERR_PTR(-ENOTSUPP);
+#endif
+ serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE,
+ threadfn);
if (!serv) {
printk(KERN_ERR "nfs_callback_create_svc: create service failed\n");
return ERR_PTR(-ENOMEM);
}
cb_info->serv = serv;
- /* As there is only one thread we need to over-ride the
- * default maximum of 80 connections
- */
- serv->sv_maxconn = 1024;
dprintk("nfs_callback_create_svc: service created\n");
return serv;
}
@@ -335,16 +240,11 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
goto err_start;
cb_info->users++;
- /*
- * svc_create creates the svc_serv with sv_nrthreads == 1, and then
- * svc_prepare_thread increments that. So we need to call svc_destroy
- * on both success and failure so that the refcount is 1 when the
- * thread exits.
- */
err_net:
- if (!cb_info->users)
- cb_info->serv = NULL;
- svc_destroy(serv);
+ if (!cb_info->users) {
+ svc_set_num_threads(cb_info->serv, NULL, 0);
+ svc_destroy(&cb_info->serv);
+ }
err_create:
mutex_unlock(&nfs_callback_mutex);
return ret;
@@ -368,11 +268,9 @@ void nfs_callback_down(int minorversion, struct net *net)
nfs_callback_down_net(minorversion, serv, net);
cb_info->users--;
if (cb_info->users == 0) {
- svc_get(serv);
- serv->sv_ops->svo_setup(serv, NULL, 0);
- svc_destroy(serv);
+ svc_set_num_threads(serv, NULL, 0);
dprintk("nfs_callback_down: service destroyed\n");
- cb_info->serv = NULL;
+ svc_destroy(&cb_info->serv);
}
mutex_unlock(&nfs_callback_mutex);
}
@@ -427,7 +325,7 @@ check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
* All other checking done after NFS decoding where the nfs_client can be
* found in nfs4_callback_compound
*/
-static int nfs_callback_authenticate(struct svc_rqst *rqstp)
+static enum svc_auth_status nfs_callback_authenticate(struct svc_rqst *rqstp)
{
rqstp->rq_auth_stat = rpc_autherr_badcred;
@@ -454,15 +352,12 @@ static const struct svc_version *nfs4_callback_version[] = {
[4] = &nfs4_callback_version4,
};
-static struct svc_stat nfs4_callback_stats;
-
static struct svc_program nfs4_callback_program = {
.pg_prog = NFS4_CALLBACK, /* RPC service number */
.pg_nvers = ARRAY_SIZE(nfs4_callback_version), /* Number of entries */
.pg_vers = nfs4_callback_version, /* version table */
.pg_name = "NFSv4 callback", /* service name */
.pg_class = "nfs", /* authentication class */
- .pg_stats = &nfs4_callback_stats,
.pg_authenticate = nfs_callback_authenticate,
.pg_init_request = svc_generic_init_request,
.pg_rpcbind_set = svc_generic_rpcbind_set,
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 6a2033131c06..154a6ed1299f 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -19,32 +19,14 @@ enum nfs4_callback_procnum {
CB_COMPOUND = 1,
};
-enum nfs4_callback_opnum {
- OP_CB_GETATTR = 3,
- OP_CB_RECALL = 4,
-/* Callback operations new to NFSv4.1 */
- OP_CB_LAYOUTRECALL = 5,
- OP_CB_NOTIFY = 6,
- OP_CB_PUSH_DELEG = 7,
- OP_CB_RECALL_ANY = 8,
- OP_CB_RECALLABLE_OBJ_AVAIL = 9,
- OP_CB_RECALL_SLOT = 10,
- OP_CB_SEQUENCE = 11,
- OP_CB_WANTS_CANCELLED = 12,
- OP_CB_NOTIFY_LOCK = 13,
- OP_CB_NOTIFY_DEVICEID = 14,
-/* Callback operations new to NFSv4.2 */
- OP_CB_OFFLOAD = 15,
- OP_CB_ILLEGAL = 10044,
-};
-
struct nfs4_slot;
struct cb_process_state {
- __be32 drc_status;
struct nfs_client *clp;
struct nfs4_slot *slot;
- u32 minorversion;
struct net *net;
+ u32 minorversion;
+ __be32 drc_status;
+ unsigned int referring_calls;
};
struct cb_compound_hdr_arg {
@@ -64,14 +46,15 @@ struct cb_compound_hdr_res {
struct cb_getattrargs {
struct nfs_fh fh;
- uint32_t bitmap[2];
+ uint32_t bitmap[3];
};
struct cb_getattrres {
__be32 status;
- uint32_t bitmap[2];
+ uint32_t bitmap[3];
uint64_t size;
uint64_t change_attr;
+ struct timespec64 atime;
struct timespec64 ctime;
struct timespec64 mtime;
};
@@ -170,7 +153,7 @@ struct cb_devicenotifyitem {
};
struct cb_devicenotifyargs {
- int ndevs;
+ uint32_t ndevs;
struct cb_devicenotifyitem *devs;
};
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 09c5b1cb3e07..8397c43358bd 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -37,7 +37,7 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
goto out;
- res->bitmap[0] = res->bitmap[1] = 0;
+ memset(res->bitmap, 0, sizeof(res->bitmap));
res->status = htonl(NFS4ERR_BADHANDLE);
dprintk_rcu("NFS: GETATTR callback request from %s\n",
@@ -59,12 +59,16 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
res->change_attr = delegation->change_attr;
if (nfs_have_writebacks(inode))
res->change_attr++;
- res->ctime = inode->i_ctime;
- res->mtime = inode->i_mtime;
- res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
- args->bitmap[0];
- res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
- args->bitmap[1];
+ res->atime = inode_get_atime(inode);
+ res->ctime = inode_get_ctime(inode);
+ res->mtime = inode_get_mtime(inode);
+ res->bitmap[0] = (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE) &
+ args->bitmap[0];
+ res->bitmap[1] = (FATTR4_WORD1_TIME_ACCESS |
+ FATTR4_WORD1_TIME_METADATA |
+ FATTR4_WORD1_TIME_MODIFY) & args->bitmap[1];
+ res->bitmap[2] = (FATTR4_WORD2_TIME_DELEG_ACCESS |
+ FATTR4_WORD2_TIME_DELEG_MODIFY) & args->bitmap[2];
res->status = 0;
out_iput:
rcu_read_unlock();
@@ -207,7 +211,8 @@ static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
* Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
*/
static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
- const nfs4_stateid *new)
+ const nfs4_stateid *new,
+ struct cb_process_state *cps)
{
u32 oldseq, newseq;
@@ -221,28 +226,29 @@ static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
newseq = be32_to_cpu(new->seqid);
/* Are we already in a layout recall situation? */
- if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
- lo->plh_return_seq != 0) {
- if (newseq < lo->plh_return_seq)
- return NFS4ERR_OLD_STATEID;
- if (newseq > lo->plh_return_seq)
- return NFS4ERR_DELAY;
- goto out;
- }
+ if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+ return NFS4ERR_DELAY;
- /* Check that the stateid matches what we think it should be. */
+ /*
+ * Check that the stateid matches what we think it should be.
+ * Note that if the server sent us a list of referring calls,
+ * and we know that those have completed, then we trust the
+ * stateid argument is correct.
+ */
oldseq = be32_to_cpu(lo->plh_stateid.seqid);
- if (newseq > oldseq + 1)
+ if (newseq > oldseq + 1 && !cps->referring_calls)
return NFS4ERR_DELAY;
+
/* Crazy server! */
if (newseq <= oldseq)
return NFS4ERR_OLD_STATEID;
-out:
+
return NFS_OK;
}
static u32 initiate_file_draining(struct nfs_client *clp,
- struct cb_layoutrecallargs *args)
+ struct cb_layoutrecallargs *args,
+ struct cb_process_state *cps)
{
struct inode *ino;
struct pnfs_layout_hdr *lo;
@@ -266,7 +272,7 @@ static u32 initiate_file_draining(struct nfs_client *clp,
goto out;
}
pnfs_get_layout_hdr(lo);
- rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
+ rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid, cps);
if (rv != NFS_OK)
goto unlock;
@@ -288,6 +294,7 @@ static u32 initiate_file_draining(struct nfs_client *clp,
rv = NFS4_OK;
break;
case -ENOENT:
+ set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags);
/* Embrace your forgetfulness! */
rv = NFS4ERR_NOMATCHING_LAYOUT;
@@ -316,19 +323,21 @@ static u32 initiate_bulk_draining(struct nfs_client *clp,
int stat;
if (args->cbl_recall_type == RETURN_FSID)
- stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
+ stat = pnfs_layout_destroy_byfsid(clp, &args->cbl_fsid,
+ PNFS_LAYOUT_BULK_RETURN);
else
- stat = pnfs_destroy_layouts_byclid(clp, true);
+ stat = pnfs_layout_destroy_byclid(clp, PNFS_LAYOUT_BULK_RETURN);
if (stat != 0)
return NFS4ERR_DELAY;
return NFS4ERR_NOMATCHING_LAYOUT;
}
static u32 do_callback_layoutrecall(struct nfs_client *clp,
- struct cb_layoutrecallargs *args)
+ struct cb_layoutrecallargs *args,
+ struct cb_process_state *cps)
{
if (args->cbl_recall_type == RETURN_FILE)
- return initiate_file_draining(clp, args);
+ return initiate_file_draining(clp, args, cps);
return initiate_bulk_draining(clp, args);
}
@@ -339,11 +348,12 @@ __be32 nfs4_callback_layoutrecall(void *argp, void *resp,
u32 res = NFS4ERR_OP_NOT_IN_SESSION;
if (cps->clp)
- res = do_callback_layoutrecall(cps->clp, args);
+ res = do_callback_layoutrecall(cps->clp, args, cps);
return cpu_to_be32(res);
}
-static void pnfs_recall_all_layouts(struct nfs_client *clp)
+static void pnfs_recall_all_layouts(struct nfs_client *clp,
+ struct cb_process_state *cps)
{
struct cb_layoutrecallargs args;
@@ -351,19 +361,18 @@ static void pnfs_recall_all_layouts(struct nfs_client *clp)
memset(&args, 0, sizeof(args));
args.cbl_recall_type = RETURN_ALL;
/* FIXME we ignore errors, what should we do? */
- do_callback_layoutrecall(clp, &args);
+ do_callback_layoutrecall(clp, &args, cps);
}
__be32 nfs4_callback_devicenotify(void *argp, void *resp,
struct cb_process_state *cps)
{
struct cb_devicenotifyargs *args = argp;
- int i;
+ const struct pnfs_layoutdriver_type *ld = NULL;
+ uint32_t i;
__be32 res = 0;
- struct nfs_client *clp = cps->clp;
- struct nfs_server *server = NULL;
- if (!clp) {
+ if (!cps->clp) {
res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
goto out;
}
@@ -371,23 +380,15 @@ __be32 nfs4_callback_devicenotify(void *argp, void *resp,
for (i = 0; i < args->ndevs; i++) {
struct cb_devicenotifyitem *dev = &args->devs[i];
- if (!server ||
- server->pnfs_curr_ld->id != dev->cbd_layout_type) {
- rcu_read_lock();
- list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
- if (server->pnfs_curr_ld &&
- server->pnfs_curr_ld->id == dev->cbd_layout_type) {
- rcu_read_unlock();
- goto found;
- }
- rcu_read_unlock();
- continue;
+ if (!ld || ld->id != dev->cbd_layout_type) {
+ pnfs_put_layoutdriver(ld);
+ ld = pnfs_find_layoutdriver(dev->cbd_layout_type);
+ if (!ld)
+ continue;
}
-
- found:
- nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
+ nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id);
}
-
+ pnfs_put_layoutdriver(ld);
out:
kfree(args->devs);
return res;
@@ -458,6 +459,7 @@ static int referring_call_exists(struct nfs_client *clp,
__acquires(lock)
{
int status = 0;
+ int found = 0;
int i, j;
struct nfs4_session *session;
struct nfs4_slot_table *tbl;
@@ -486,11 +488,12 @@ static int referring_call_exists(struct nfs_client *clp,
spin_lock(lock);
if (status)
goto out;
+ found++;
}
}
out:
- return status;
+ return status < 0 ? status : found;
}
__be32 nfs4_callback_sequence(void *argp, void *resp,
@@ -501,6 +504,7 @@ __be32 nfs4_callback_sequence(void *argp, void *resp,
struct nfs4_slot_table *tbl;
struct nfs4_slot *slot;
struct nfs_client *clp;
+ int ret;
int i;
__be32 status = htonl(NFS4ERR_BADSESSION);
@@ -560,11 +564,13 @@ __be32 nfs4_callback_sequence(void *argp, void *resp,
* related callback was received before the response to the original
* call.
*/
- if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
- &tbl->slot_tbl_lock) < 0) {
+ ret = referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
+ &tbl->slot_tbl_lock);
+ if (ret < 0) {
status = htonl(NFS4ERR_DELAY);
goto out_unlock;
}
+ cps->referring_calls = ret;
/*
* RFC5661 20.9.3
@@ -625,7 +631,7 @@ __be32 nfs4_callback_recallany(void *argp, void *resp,
nfs_expire_unused_delegation_types(cps->clp, flags);
if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
- pnfs_recall_all_layouts(cps->clp);
+ pnfs_recall_all_layouts(cps->clp, cps);
if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_READ)) {
set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &cps->clp->cl_state);
@@ -710,9 +716,9 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
struct nfs4_copy_state *copy, *tmp_copy;
bool found = false;
- copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
+ copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
if (!copy)
- return htonl(NFS4ERR_SERVERFAULT);
+ return cpu_to_be32(NFS4ERR_DELAY);
spin_lock(&cps->clp->cl_lock);
rcu_read_lock();
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index a67c41ec545f..4254ba3ee7c5 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -25,8 +25,9 @@
#define CB_OP_GETATTR_BITMAP_MAXSZ (4 * 4) // bitmap length, 3 bitmaps
#define CB_OP_GETATTR_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
CB_OP_GETATTR_BITMAP_MAXSZ + \
- /* change, size, ctime, mtime */\
- (2 + 2 + 3 + 3) * 4)
+ /* change, size, atime, ctime,
+ * mtime, deleg_atime, deleg_mtime */\
+ (2 + 2 + 3 + 3 + 3 + 3 + 3) * 4)
#define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#if defined(CONFIG_NFS_V4_1)
@@ -117,7 +118,9 @@ static __be32 decode_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
if (likely(attrlen > 0))
bitmap[0] = ntohl(*p++);
if (attrlen > 1)
- bitmap[1] = ntohl(*p);
+ bitmap[1] = ntohl(*p++);
+ if (attrlen > 2)
+ bitmap[2] = ntohl(*p);
return 0;
}
@@ -258,11 +261,9 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
void *argp)
{
struct cb_devicenotifyargs *args = argp;
+ uint32_t tmp, n, i;
__be32 *p;
__be32 status = 0;
- u32 tmp;
- int n, i;
- args->ndevs = 0;
/* Num of device notifications */
p = xdr_inline_decode(xdr, sizeof(uint32_t));
@@ -271,12 +272,8 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
goto out;
}
n = ntohl(*p++);
- if (n <= 0)
- goto out;
- if (n > ULONG_MAX / sizeof(*args->devs)) {
- status = htonl(NFS4ERR_BADXDR);
+ if (n == 0)
goto out;
- }
args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL);
if (!args->devs) {
@@ -330,19 +327,21 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
dev->cbd_immediate = 0;
}
- args->ndevs++;
-
dprintk("%s: type %d layout 0x%x immediate %d\n",
__func__, dev->cbd_notify_type, dev->cbd_layout_type,
dev->cbd_immediate);
}
+ args->ndevs = n;
+ dprintk("%s: ndevs %d\n", __func__, args->ndevs);
+ return 0;
+err:
+ kfree(args->devs);
out:
+ args->devs = NULL;
+ args->ndevs = 0;
dprintk("%s: status %d ndevs %d\n",
__func__, ntohl(status), args->ndevs);
return status;
-err:
- kfree(args->devs);
- goto out;
}
static __be32 decode_sessionid(struct xdr_stream *xdr,
@@ -376,6 +375,8 @@ static __be32 decode_rc_list(struct xdr_stream *xdr,
rc_list->rcl_nrefcalls = ntohl(*p++);
if (rc_list->rcl_nrefcalls) {
+ if (unlikely(rc_list->rcl_nrefcalls > xdr->buf->len))
+ goto out;
p = xdr_inline_decode(xdr,
rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
if (unlikely(p == NULL))
@@ -449,7 +450,7 @@ static __be32 decode_recallany_args(struct svc_rqst *rqstp,
void *argp)
{
struct cb_recallanyargs *args = argp;
- uint32_t bitmap[2];
+ uint32_t bitmap[3];
__be32 *p, status;
p = xdr_inline_decode(xdr, 4);
@@ -639,6 +640,13 @@ static __be32 encode_attr_time(struct xdr_stream *xdr, const struct timespec64 *
return 0;
}
+static __be32 encode_attr_atime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec64 *time)
+{
+ if (!(bitmap[1] & FATTR4_WORD1_TIME_ACCESS))
+ return 0;
+ return encode_attr_time(xdr,time);
+}
+
static __be32 encode_attr_ctime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec64 *time)
{
if (!(bitmap[1] & FATTR4_WORD1_TIME_METADATA))
@@ -653,6 +661,24 @@ static __be32 encode_attr_mtime(struct xdr_stream *xdr, const uint32_t *bitmap,
return encode_attr_time(xdr,time);
}
+static __be32 encode_attr_delegatime(struct xdr_stream *xdr,
+ const uint32_t *bitmap,
+ const struct timespec64 *time)
+{
+ if (!(bitmap[2] & FATTR4_WORD2_TIME_DELEG_ACCESS))
+ return 0;
+ return encode_attr_time(xdr,time);
+}
+
+static __be32 encode_attr_delegmtime(struct xdr_stream *xdr,
+ const uint32_t *bitmap,
+ const struct timespec64 *time)
+{
+ if (!(bitmap[2] & FATTR4_WORD2_TIME_DELEG_MODIFY))
+ return 0;
+ return encode_attr_time(xdr,time);
+}
+
static __be32 encode_compound_hdr_res(struct xdr_stream *xdr, struct cb_compound_hdr_res *hdr)
{
__be32 status;
@@ -703,10 +729,19 @@ static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr,
status = encode_attr_size(xdr, res->bitmap, res->size);
if (unlikely(status != 0))
goto out;
+ status = encode_attr_atime(xdr, res->bitmap, &res->atime);
+ if (unlikely(status != 0))
+ goto out;
status = encode_attr_ctime(xdr, res->bitmap, &res->ctime);
if (unlikely(status != 0))
goto out;
status = encode_attr_mtime(xdr, res->bitmap, &res->mtime);
+ if (unlikely(status != 0))
+ goto out;
+ status = encode_attr_delegatime(xdr, res->bitmap, &res->atime);
+ if (unlikely(status != 0))
+ goto out;
+ status = encode_attr_delegmtime(xdr, res->bitmap, &res->mtime);
*savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1)));
out:
return status;
@@ -949,6 +984,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
nfs_put_client(cps.clp);
goto out_invalidcred;
}
+ svc_xprt_set_valid(rqstp->rq_xprt);
}
cps.minorversion = hdr_arg.minorversion;
@@ -971,6 +1007,11 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
nops--;
}
+ if (svc_is_backchannel(rqstp) && cps.clp) {
+ rqstp->bc_to_initval = cps.clp->cl_rpcclient->cl_timeout->to_initval;
+ rqstp->bc_to_retries = cps.clp->cl_rpcclient->cl_timeout->to_retries;
+ }
+
*hdr_res.status = status;
*hdr_res.nops = htonl(nops);
nfs4_cb_free_slot(&cps);
@@ -984,14 +1025,11 @@ out_invalidcred:
}
static int
-nfs_callback_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+nfs_callback_dispatch(struct svc_rqst *rqstp)
{
const struct svc_procedure *procp = rqstp->rq_procinfo;
- svcxdr_init_decode(rqstp);
- svcxdr_init_encode(rqstp);
-
- *statp = procp->pc_func(rqstp);
+ *rqstp->rq_accept_statp = procp->pc_func(rqstp);
return 1;
}
@@ -1069,13 +1107,15 @@ static const struct svc_procedure nfs4_callback_procedures1[] = {
.pc_func = nfs4_callback_compound,
.pc_encode = nfs4_encode_void,
.pc_argsize = 256,
+ .pc_argzero = 256,
.pc_ressize = 256,
.pc_xdrressize = NFS4_CALLBACK_BUFSIZE,
.pc_name = "COMPOUND",
}
};
-static unsigned int nfs4_callback_count1[ARRAY_SIZE(nfs4_callback_procedures1)];
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ nfs4_callback_count1[ARRAY_SIZE(nfs4_callback_procedures1)]);
const struct svc_version nfs4_callback_version1 = {
.vs_vers = 1,
.vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
@@ -1087,7 +1127,8 @@ const struct svc_version nfs4_callback_version1 = {
.vs_need_cong_ctrl = true,
};
-static unsigned int nfs4_callback_count4[ARRAY_SIZE(nfs4_callback_procedures1)];
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ nfs4_callback_count4[ARRAY_SIZE(nfs4_callback_procedures1)]);
const struct svc_version nfs4_callback_version4 = {
.vs_vers = 4,
.vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 1e4dc1ab9312..54699299d5b1 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -38,7 +38,7 @@
#include <linux/sunrpc/bc_xprt.h>
#include <linux/nsproxy.h>
#include <linux/pid_namespace.h>
-
+#include <linux/nfslocalio.h>
#include "nfs4_fs.h"
#include "callback.h"
@@ -55,9 +55,13 @@
#define NFSDBG_FACILITY NFSDBG_CLIENT
static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
-static DEFINE_SPINLOCK(nfs_version_lock);
-static DEFINE_MUTEX(nfs_version_mutex);
-static LIST_HEAD(nfs_versions);
+static DEFINE_RWLOCK(nfs_version_lock);
+
+static struct nfs_subversion *nfs_version_mods[5] = {
+ [2] = NULL,
+ [3] = NULL,
+ [4] = NULL,
+};
/*
* RPC cruft for NFS
@@ -73,46 +77,41 @@ const struct rpc_program nfs_program = {
.number = NFS_PROGRAM,
.nrvers = ARRAY_SIZE(nfs_version),
.version = nfs_version,
- .stats = &nfs_rpcstat,
.pipe_dir_name = NFS_PIPE_DIRNAME,
};
-struct rpc_stat nfs_rpcstat = {
- .program = &nfs_program
-};
-
-static struct nfs_subversion *find_nfs_version(unsigned int version)
+static struct nfs_subversion *__find_nfs_version(unsigned int version)
{
struct nfs_subversion *nfs;
- spin_lock(&nfs_version_lock);
- list_for_each_entry(nfs, &nfs_versions, list) {
- if (nfs->rpc_ops->version == version) {
- spin_unlock(&nfs_version_lock);
- return nfs;
- }
- }
-
- spin_unlock(&nfs_version_lock);
- return ERR_PTR(-EPROTONOSUPPORT);
+ read_lock(&nfs_version_lock);
+ nfs = nfs_version_mods[version];
+ read_unlock(&nfs_version_lock);
+ return nfs;
}
-struct nfs_subversion *get_nfs_version(unsigned int version)
+struct nfs_subversion *find_nfs_version(unsigned int version)
{
- struct nfs_subversion *nfs = find_nfs_version(version);
+ struct nfs_subversion *nfs = __find_nfs_version(version);
- if (IS_ERR(nfs)) {
- mutex_lock(&nfs_version_mutex);
- request_module("nfsv%d", version);
- nfs = find_nfs_version(version);
- mutex_unlock(&nfs_version_mutex);
- }
+ if (!nfs && request_module("nfsv%d", version) == 0)
+ nfs = __find_nfs_version(version);
- if (!IS_ERR(nfs) && !try_module_get(nfs->owner))
+ if (!nfs)
+ return ERR_PTR(-EPROTONOSUPPORT);
+
+ if (!get_nfs_version(nfs))
return ERR_PTR(-EAGAIN);
+
return nfs;
}
+int get_nfs_version(struct nfs_subversion *nfs)
+{
+ return try_module_get(nfs->owner);
+}
+EXPORT_SYMBOL_GPL(get_nfs_version);
+
void put_nfs_version(struct nfs_subversion *nfs)
{
module_put(nfs->owner);
@@ -120,23 +119,23 @@ void put_nfs_version(struct nfs_subversion *nfs)
void register_nfs_version(struct nfs_subversion *nfs)
{
- spin_lock(&nfs_version_lock);
+ write_lock(&nfs_version_lock);
- list_add(&nfs->list, &nfs_versions);
+ nfs_version_mods[nfs->rpc_ops->version] = nfs;
nfs_version[nfs->rpc_ops->version] = nfs->rpc_vers;
- spin_unlock(&nfs_version_lock);
+ write_unlock(&nfs_version_lock);
}
EXPORT_SYMBOL_GPL(register_nfs_version);
void unregister_nfs_version(struct nfs_subversion *nfs)
{
- spin_lock(&nfs_version_lock);
+ write_lock(&nfs_version_lock);
nfs_version[nfs->rpc_ops->version] = NULL;
- list_del(&nfs->list);
+ nfs_version_mods[nfs->rpc_ops->version] = NULL;
- spin_unlock(&nfs_version_lock);
+ write_unlock(&nfs_version_lock);
}
EXPORT_SYMBOL_GPL(unregister_nfs_version);
@@ -156,7 +155,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_minorversion = cl_init->minorversion;
clp->cl_nfs_mod = cl_init->nfs_mod;
- if (!try_module_get(clp->cl_nfs_mod->owner))
+ if (!get_nfs_version(clp->cl_nfs_mod))
goto error_dealloc;
clp->rpc_ops = clp->cl_nfs_mod->rpc_ops;
@@ -177,14 +176,21 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
INIT_LIST_HEAD(&clp->cl_superblocks);
clp->cl_rpcclient = ERR_PTR(-EINVAL);
+ clp->cl_flags = cl_init->init_flags;
clp->cl_proto = cl_init->proto;
clp->cl_nconnect = cl_init->nconnect;
clp->cl_max_connect = cl_init->max_connect ? cl_init->max_connect : 1;
- clp->cl_net = get_net(cl_init->net);
+ clp->cl_net = get_net_track(cl_init->net, &clp->cl_ns_tracker, GFP_KERNEL);
- clp->cl_principal = "*";
- nfs_fscache_get_client_cookie(clp);
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ seqlock_init(&clp->cl_boot_lock);
+ ktime_get_real_ts64(&clp->cl_nfssvc_boot);
+ nfs_uuid_init(&clp->cl_uuid);
+ INIT_WORK(&clp->cl_local_probe_work, nfs_local_probe_async_work);
+#endif /* CONFIG_NFS_LOCALIO */
+ clp->cl_principal = "*";
+ clp->cl_xprtsec = cl_init->xprtsec;
return clp;
error_cleanup:
@@ -238,17 +244,17 @@ static void pnfs_init_server(struct nfs_server *server)
*/
void nfs_free_client(struct nfs_client *clp)
{
- nfs_fscache_release_client_cookie(clp);
+ nfs_localio_disable_client(clp);
/* -EIO all pending I/O */
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
- put_net(clp->cl_net);
+ put_net_track(clp->cl_net, &clp->cl_ns_tracker);
put_nfs_version(clp->cl_nfs_mod);
kfree(clp->cl_hostname);
kfree(clp->cl_acceptor);
- kfree(clp);
+ kfree_rcu(clp, rcu);
}
EXPORT_SYMBOL_GPL(nfs_free_client);
@@ -283,7 +289,7 @@ EXPORT_SYMBOL_GPL(nfs_put_client);
static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *data)
{
struct nfs_client *clp;
- const struct sockaddr *sap = data->addr;
+ const struct sockaddr *sap = (struct sockaddr *)data->addr;
struct nfs_net *nn = net_generic(data->net, nfs_net_id);
int error;
@@ -329,6 +335,18 @@ again:
sap))
continue;
+ /* Match the xprt security policy */
+ if (clp->cl_xprtsec.policy != data->xprtsec.policy)
+ continue;
+ if (clp->cl_xprtsec.policy == RPC_XPRTSEC_TLS_X509) {
+ if (clp->cl_xprtsec.cert_serial !=
+ data->xprtsec.cert_serial)
+ continue;
+ if (clp->cl_xprtsec.privkey_serial !=
+ data->xprtsec.privkey_serial)
+ continue;
+ }
+
refcount_inc(&clp->cl_count);
return clp;
}
@@ -427,8 +445,10 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
list_add_tail(&new->cl_share_link,
&nn->nfs_client_list);
spin_unlock(&nn->nfs_client_lock);
- new->cl_flags = cl_init->init_flags;
- return rpc_ops->init_client(new, cl_init);
+ new = rpc_ops->init_client(new, cl_init);
+ if (!IS_ERR(new))
+ nfs_local_probe_async(new);
+ return new;
}
spin_unlock(&nn->nfs_client_lock);
@@ -462,6 +482,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
switch (proto) {
case XPRT_TRANSPORT_TCP:
+ case XPRT_TRANSPORT_TCP_TLS:
case XPRT_TRANSPORT_RDMA:
if (retrans == NFS_UNSPEC_RETRANS)
to->to_retries = NFS_DEF_TCP_RETRANS;
@@ -500,6 +521,7 @@ int nfs_create_rpc_client(struct nfs_client *clp,
const struct nfs_client_initdata *cl_init,
rpc_authflavor_t flavor)
{
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
struct rpc_clnt *clnt = NULL;
struct rpc_create_args args = {
.net = clp->cl_net,
@@ -511,9 +533,13 @@ int nfs_create_rpc_client(struct nfs_client *clp,
.servername = clp->cl_hostname,
.nodename = cl_init->nodename,
.program = &nfs_program,
+ .stats = &nn->rpcstats,
.version = clp->rpc_ops->version,
.authflavor = flavor,
.cred = cl_init->cred,
+ .xprtsec = cl_init->xprtsec,
+ .connect_timeout = cl_init->connect_timeout,
+ .reconnect_timeout = cl_init->reconnect_timeout,
};
if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
@@ -528,6 +554,8 @@ int nfs_create_rpc_client(struct nfs_client *clp,
args.flags |= RPC_CLNT_CREATE_NOPING;
if (test_bit(NFS_CS_REUSEPORT, &clp->cl_flags))
args.flags |= RPC_CLNT_CREATE_REUSEPORT;
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ args.flags |= RPC_CLNT_CREATE_NETUNREACH_FATAL;
if (!IS_ERR(clp->cl_rpcclient))
return 0;
@@ -596,6 +624,7 @@ static int nfs_start_lockd(struct nfs_server *server)
server->nlm_host = host;
server->destroy = nfs_destroy_server;
+ nfs_sysfs_link_rpc_client(server, nlmclnt_rpc_clnt(host), NULL);
return 0;
}
@@ -625,6 +654,7 @@ int nfs_init_server_rpcclient(struct nfs_server *server,
if (server->flags & NFS_MOUNT_SOFT)
server->client->cl_softrtry = 1;
+ nfs_sysfs_link_rpc_client(server, server->client, NULL);
return 0;
}
EXPORT_SYMBOL_GPL(nfs_init_server_rpcclient);
@@ -660,6 +690,44 @@ struct nfs_client *nfs_init_client(struct nfs_client *clp,
}
EXPORT_SYMBOL_GPL(nfs_init_client);
+static void nfs4_server_set_init_caps(struct nfs_server *server)
+{
+#if IS_ENABLED(CONFIG_NFS_V4)
+ /* Set the basic capabilities */
+ server->caps = server->nfs_client->cl_mvops->init_caps;
+ if (server->flags & NFS_MOUNT_NORDIRPLUS)
+ server->caps &= ~NFS_CAP_READDIRPLUS;
+ if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA)
+ server->caps &= ~NFS_CAP_READ_PLUS;
+
+ /*
+ * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
+ * authentication.
+ */
+ if (nfs4_disable_idmapping &&
+ server->client->cl_auth->au_flavor == RPC_AUTH_UNIX)
+ server->caps |= NFS_CAP_UIDGID_NOMAP;
+#endif
+}
+
+void nfs_server_set_init_caps(struct nfs_server *server)
+{
+ switch (server->nfs_client->rpc_ops->version) {
+ case 2:
+ server->caps = NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS;
+ break;
+ case 3:
+ server->caps = NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS;
+ if (!(server->flags & NFS_MOUNT_NORDIRPLUS))
+ server->caps |= NFS_CAP_READDIRPLUS;
+ break;
+ default:
+ nfs4_server_set_init_caps(server);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_server_set_init_caps);
+
/*
* Create a version 2 or 3 client
*/
@@ -670,7 +738,7 @@ static int nfs_init_server(struct nfs_server *server,
struct rpc_timeout timeparms;
struct nfs_client_initdata cl_init = {
.hostname = ctx->nfs_server.hostname,
- .addr = (const struct sockaddr *)&ctx->nfs_server.address,
+ .addr = &ctx->nfs_server._address,
.addrlen = ctx->nfs_server.addrlen,
.nfs_mod = ctx->nfs_mod,
.proto = ctx->nfs_server.protocol,
@@ -679,6 +747,7 @@ static int nfs_init_server(struct nfs_server *server,
.cred = server->cred,
.nconnect = ctx->nfs_server.nconnect,
.init_flags = (1UL << NFS_CS_REUSEPORT),
+ .xprtsec = ctx->xprtsec,
};
struct nfs_client *clp;
int error;
@@ -688,17 +757,21 @@ static int nfs_init_server(struct nfs_server *server,
if (ctx->flags & NFS_MOUNT_NORESVPORT)
set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (ctx->flags & NFS_MOUNT_NETUNREACH_FATAL)
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
+
/* Allocate or find a client reference we can use */
clp = nfs_get_client(&cl_init);
if (IS_ERR(clp))
return PTR_ERR(clp);
server->nfs_client = clp;
+ nfs_sysfs_add_server(server);
+ nfs_sysfs_link_rpc_client(server, clp->cl_rpcclient, "_state");
/* Initialise the client representation from the mount data */
server->flags = ctx->flags;
server->options = ctx->options;
- server->caps |= NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS;
switch (clp->rpc_ops->version) {
case 2:
@@ -712,9 +785,9 @@ static int nfs_init_server(struct nfs_server *server,
}
if (ctx->rsize)
- server->rsize = nfs_block_size(ctx->rsize, NULL);
+ server->rsize = nfs_io_size(ctx->rsize, clp->cl_proto);
if (ctx->wsize)
- server->wsize = nfs_block_size(ctx->wsize, NULL);
+ server->wsize = nfs_io_size(ctx->wsize, clp->cl_proto);
server->acregmin = ctx->acregmin * HZ;
server->acregmax = ctx->acregmax * HZ;
@@ -734,6 +807,8 @@ static int nfs_init_server(struct nfs_server *server,
if (error < 0)
goto error;
+ nfs_server_set_init_caps(server);
+
/* Preserve the values of mount_server-related mount options */
if (ctx->mount_server.addrlen) {
memcpy(&server->mountd_address, &ctx->mount_server.address,
@@ -759,18 +834,19 @@ error:
static void nfs_server_set_fsinfo(struct nfs_server *server,
struct nfs_fsinfo *fsinfo)
{
+ struct nfs_client *clp = server->nfs_client;
unsigned long max_rpc_payload, raw_max_rpc_payload;
/* Work out a lot of parameters */
if (server->rsize == 0)
- server->rsize = nfs_block_size(fsinfo->rtpref, NULL);
+ server->rsize = nfs_io_size(fsinfo->rtpref, clp->cl_proto);
if (server->wsize == 0)
- server->wsize = nfs_block_size(fsinfo->wtpref, NULL);
+ server->wsize = nfs_io_size(fsinfo->wtpref, clp->cl_proto);
if (fsinfo->rtmax >= 512 && server->rsize > fsinfo->rtmax)
- server->rsize = nfs_block_size(fsinfo->rtmax, NULL);
+ server->rsize = nfs_io_size(fsinfo->rtmax, clp->cl_proto);
if (fsinfo->wtmax >= 512 && server->wsize > fsinfo->wtmax)
- server->wsize = nfs_block_size(fsinfo->wtmax, NULL);
+ server->wsize = nfs_io_size(fsinfo->wtmax, clp->cl_proto);
raw_max_rpc_payload = rpc_max_payload(server->client);
max_rpc_payload = nfs_block_size(raw_max_rpc_payload, NULL);
@@ -785,7 +861,6 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
server->wsize = max_rpc_payload;
if (server->wsize > NFS_MAX_FILE_IO_SIZE)
server->wsize = NFS_MAX_FILE_IO_SIZE;
- server->wpages = (server->wsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
@@ -802,7 +877,6 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
server->maxfilesize = fsinfo->maxfilesize;
- server->time_delta = fsinfo->time_delta;
server->change_attr_type = fsinfo->change_attr_type;
server->clone_blksize = fsinfo->clone_blksize;
@@ -822,6 +896,8 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
if (fsinfo->xattr_support)
server->caps |= NFS_CAP_XATTR;
+ else
+ server->caps &= ~NFS_CAP_XATTR;
#endif
}
@@ -860,6 +936,14 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str
server->namelen = pathinfo.max_namelen;
}
+ if (clp->rpc_ops->discover_trunking != NULL &&
+ (server->caps & NFS_CAP_FS_LOCATIONS &&
+ (server->flags & NFS_MOUNT_TRUNK_DISCOVERY))) {
+ error = clp->rpc_ops->discover_trunking(server, mntfh);
+ if (error < 0)
+ return error;
+ }
+
return 0;
}
@@ -899,7 +983,6 @@ void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *sour
target->acregmax = source->acregmax;
target->acdirmin = source->acdirmin;
target->acdirmax = source->acdirmax;
- target->caps = source->caps;
target->options = source->options;
target->auth_info = source->auth_info;
target->port = source->port;
@@ -939,6 +1022,8 @@ void nfs_server_remove_lists(struct nfs_server *server)
}
EXPORT_SYMBOL_GPL(nfs_server_remove_lists);
+static DEFINE_IDA(s_sysfs_ids);
+
/*
* Allocate and initialise a server record
*/
@@ -950,6 +1035,12 @@ struct nfs_server *nfs_alloc_server(void)
if (!server)
return NULL;
+ server->s_sysfs_id = ida_alloc(&s_sysfs_ids, GFP_KERNEL);
+ if (server->s_sysfs_id < 0) {
+ kfree(server);
+ return NULL;
+ }
+
server->client = server->client_acl = ERR_PTR(-EINVAL);
/* Zero out the NFS state stuff */
@@ -959,8 +1050,10 @@ struct nfs_server *nfs_alloc_server(void)
INIT_LIST_HEAD(&server->layouts);
INIT_LIST_HEAD(&server->state_owners_lru);
INIT_LIST_HEAD(&server->ss_copies);
+ INIT_LIST_HEAD(&server->ss_src_copies);
atomic_set(&server->active, 0);
+ atomic_long_set(&server->nr_active_delegations, 0);
server->io_stats = nfs_alloc_iostats();
if (!server->io_stats) {
@@ -970,8 +1063,11 @@ struct nfs_server *nfs_alloc_server(void)
server->change_attr_type = NFS4_CHANGE_TYPE_IS_UNDEFINED;
- ida_init(&server->openowner_id);
- ida_init(&server->lockowner_id);
+ init_waitqueue_head(&server->write_congestion_wait);
+ atomic_long_set(&server->writeback, 0);
+
+ atomic64_set(&server->owner_ctr, 0);
+
pnfs_init_server(server);
rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
@@ -979,6 +1075,14 @@ struct nfs_server *nfs_alloc_server(void)
}
EXPORT_SYMBOL_GPL(nfs_alloc_server);
+static void delayed_free(struct rcu_head *p)
+{
+ struct nfs_server *server = container_of(p, struct nfs_server, rcu);
+
+ nfs_free_iostats(server->io_stats);
+ kfree(server);
+}
+
/*
* Free up a server record
*/
@@ -996,12 +1100,15 @@ void nfs_free_server(struct nfs_server *server)
nfs_put_client(server->nfs_client);
- ida_destroy(&server->lockowner_id);
- ida_destroy(&server->openowner_id);
- nfs_free_iostats(server->io_stats);
+ if (server->kobj.state_initialized) {
+ nfs_sysfs_remove_server(server);
+ kobject_put(&server->kobj);
+ }
+ ida_free(&s_sysfs_ids, server->s_sysfs_id);
+
put_cred(server->cred);
- kfree(server);
nfs_release_automount_timer();
+ call_rcu(&server->rcu, delayed_free);
}
EXPORT_SYMBOL_GPL(nfs_free_server);
@@ -1045,6 +1152,8 @@ struct nfs_server *nfs_create_server(struct fs_context *fc)
if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
server->namelen = NFS2_MAXNAMLEN;
}
+ /* Linux 'subtree_check' borkenness mandates this setting */
+ server->fh_expire_type = NFS_FH_VOL_RENAME;
if (!(fattr->valid & NFS_ATTR_FATTR)) {
error = ctx->nfs_mod->rpc_ops->getattr(server, ctx->mntfh,
@@ -1097,12 +1206,19 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
server->fsid = fattr->fsid;
+ nfs_sysfs_add_server(server);
+
+ nfs_sysfs_link_rpc_client(server,
+ server->nfs_client->cl_rpcclient, "_state");
+
error = nfs_init_server_rpcclient(server,
source->client->cl_timeout,
flavor);
if (error < 0)
goto out_free_server;
+ nfs_server_set_init_caps(server);
+
/* probe the filesystem info for this server filesystem */
error = nfs_probe_server(server, fh);
if (error < 0)
@@ -1135,8 +1251,14 @@ void nfs_clients_init(struct net *net)
#if IS_ENABLED(CONFIG_NFS_V4)
idr_init(&nn->cb_ident_idr);
#endif
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+ INIT_LIST_HEAD(&nn->nfs4_data_server_cache);
+ spin_lock_init(&nn->nfs4_data_server_lock);
+#endif
spin_lock_init(&nn->nfs_client_lock);
nn->boot_time = ktime_get_real();
+ memset(&nn->rpcstats, 0, sizeof(nn->rpcstats));
+ nn->rpcstats.program = &nfs_program;
nfs_netns_sysfs_setup(nn, net);
}
@@ -1149,6 +1271,9 @@ void nfs_clients_exit(struct net *net)
nfs_cleanup_cb_ident_idr(net);
WARN_ON_ONCE(!list_empty(&nn->nfs_client_list));
WARN_ON_ONCE(!list_empty(&nn->nfs_volume_list));
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+ WARN_ON_ONCE(!list_empty(&nn->nfs4_data_server_cache));
+#endif
}
#ifdef CONFIG_PROC_FS
@@ -1380,6 +1505,7 @@ error_0:
void nfs_fs_proc_exit(void)
{
remove_proc_subtree("fs/nfsfs", NULL);
+ ida_destroy(&s_sysfs_ids);
}
#endif /* CONFIG_PROC_FS */
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 7c9eb679dbdb..9d3a5f29f17f 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -27,8 +27,15 @@
#define NFS_DEFAULT_DELEGATION_WATERMARK (5000U)
-static atomic_long_t nfs_active_delegations;
static unsigned nfs_delegation_watermark = NFS_DEFAULT_DELEGATION_WATERMARK;
+module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644);
+
+static struct hlist_head *nfs_delegation_hash(struct nfs_server *server,
+ const struct nfs_fh *fhandle)
+{
+ return server->delegation_hash_table +
+ (nfs_fhandle_hash(fhandle) & server->delegation_hash_mask);
+}
static void __nfs_free_delegation(struct nfs_delegation *delegation)
{
@@ -37,11 +44,12 @@ static void __nfs_free_delegation(struct nfs_delegation *delegation)
kfree_rcu(delegation, rcu);
}
-static void nfs_mark_delegation_revoked(struct nfs_delegation *delegation)
+static void nfs_mark_delegation_revoked(struct nfs_server *server,
+ struct nfs_delegation *delegation)
{
if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
- atomic_long_dec(&nfs_active_delegations);
+ atomic_long_dec(&server->nr_active_delegations);
if (!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
nfs_clear_verifier_delegated(delegation->inode);
}
@@ -59,9 +67,10 @@ static void nfs_put_delegation(struct nfs_delegation *delegation)
__nfs_free_delegation(delegation);
}
-static void nfs_free_delegation(struct nfs_delegation *delegation)
+static void nfs_free_delegation(struct nfs_server *server,
+ struct nfs_delegation *delegation)
{
- nfs_mark_delegation_revoked(delegation);
+ nfs_mark_delegation_revoked(server, delegation);
nfs_put_delegation(delegation);
}
@@ -79,14 +88,14 @@ static void nfs_mark_return_delegation(struct nfs_server *server,
struct nfs_delegation *delegation)
{
set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+ set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
}
-static bool
-nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
- fmode_t flags)
+static bool nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
+ fmode_t type)
{
- if (delegation != NULL && (delegation->type & flags) == flags &&
+ if (delegation != NULL && (delegation->type & type) == type &&
!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
return true;
@@ -103,19 +112,22 @@ struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode)
return NULL;
}
-static int
-nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
+static int nfs4_do_check_delegation(struct inode *inode, fmode_t type,
+ int flags, bool mark)
{
struct nfs_delegation *delegation;
int ret = 0;
- flags &= FMODE_READ|FMODE_WRITE;
+ type &= FMODE_READ|FMODE_WRITE;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
- if (nfs4_is_valid_delegation(delegation, flags)) {
+ if (nfs4_is_valid_delegation(delegation, type)) {
if (mark)
nfs_mark_delegation_referenced(delegation);
ret = 1;
+ if ((flags & NFS_DELEGATION_FLAG_TIME) &&
+ !test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags))
+ ret = 0;
}
rcu_read_unlock();
return ret;
@@ -124,29 +136,30 @@ nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
* nfs4_have_delegation - check if inode has a delegation, mark it
* NFS_DELEGATION_REFERENCED if there is one.
* @inode: inode to check
- * @flags: delegation types to check for
+ * @type: delegation types to check for
+ * @flags: various modifiers
*
* Returns one if inode has the indicated delegation, otherwise zero.
*/
-int nfs4_have_delegation(struct inode *inode, fmode_t flags)
+int nfs4_have_delegation(struct inode *inode, fmode_t type, int flags)
{
- return nfs4_do_check_delegation(inode, flags, true);
+ return nfs4_do_check_delegation(inode, type, flags, true);
}
/*
* nfs4_check_delegation - check if inode has a delegation, do not mark
* NFS_DELEGATION_REFERENCED if it has one.
*/
-int nfs4_check_delegation(struct inode *inode, fmode_t flags)
+int nfs4_check_delegation(struct inode *inode, fmode_t type)
{
- return nfs4_do_check_delegation(inode, flags, false);
+ return nfs4_do_check_delegation(inode, type, 0, false);
}
static int nfs_delegation_claim_locks(struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct inode *inode = state->inode;
struct file_lock *fl;
- struct file_lock_context *flctx = inode->i_flctx;
+ struct file_lock_context *flctx = locks_inode_context(inode);
struct list_head *list;
int status = 0;
@@ -156,8 +169,8 @@ static int nfs_delegation_claim_locks(struct nfs4_state *state, const nfs4_state
list = &flctx->flc_posix;
spin_lock(&flctx->flc_lock);
restart:
- list_for_each_entry(fl, list, fl_list) {
- if (nfs_file_open_context(fl->fl_file)->state != state)
+ for_each_file_lock(fl, list) {
+ if (nfs_file_open_context(fl->c.flc_file)->state != state)
continue;
spin_unlock(&flctx->flc_lock);
status = nfs4_lock_delegation_recall(fl, state, stateid);
@@ -181,7 +194,6 @@ static int nfs_delegation_claim_opens(struct inode *inode,
struct nfs_open_context *ctx;
struct nfs4_state_owner *sp;
struct nfs4_state *state;
- unsigned int seq;
int err;
again:
@@ -202,12 +214,9 @@ again:
sp = state->owner;
/* Block nfs4_proc_unlck */
mutex_lock(&sp->so_delegreturn_mutex);
- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
err = nfs4_open_delegation_recall(ctx, state, stateid);
if (!err)
err = nfs_delegation_claim_locks(state, stateid);
- if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
- err = -EAGAIN;
mutex_unlock(&sp->so_delegreturn_mutex);
put_nfs_open_context(ctx);
if (err != 0)
@@ -225,42 +234,51 @@ again:
* @type: delegation type
* @stateid: delegation stateid
* @pagemod_limit: write delegation "space_limit"
+ * @deleg_type: raw delegation type
*
*/
void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
- fmode_t type,
- const nfs4_stateid *stateid,
- unsigned long pagemod_limit)
+ fmode_t type, const nfs4_stateid *stateid,
+ unsigned long pagemod_limit, u32 deleg_type)
{
struct nfs_delegation *delegation;
const struct cred *oldcred = NULL;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
- if (delegation != NULL) {
- spin_lock(&delegation->lock);
- if (nfs4_is_valid_delegation(delegation, 0)) {
- nfs4_stateid_copy(&delegation->stateid, stateid);
- delegation->type = type;
- delegation->pagemod_limit = pagemod_limit;
- oldcred = delegation->cred;
- delegation->cred = get_cred(cred);
- clear_bit(NFS_DELEGATION_NEED_RECLAIM,
- &delegation->flags);
- spin_unlock(&delegation->lock);
- rcu_read_unlock();
- put_cred(oldcred);
- trace_nfs4_reclaim_delegation(inode, type);
- return;
- }
- /* We appear to have raced with a delegation return. */
- spin_unlock(&delegation->lock);
+ if (!delegation) {
+ rcu_read_unlock();
+ nfs_inode_set_delegation(inode, cred, type, stateid,
+ pagemod_limit, deleg_type);
+ return;
}
+
+ spin_lock(&delegation->lock);
+ nfs4_stateid_copy(&delegation->stateid, stateid);
+ delegation->type = type;
+ delegation->pagemod_limit = pagemod_limit;
+ oldcred = delegation->cred;
+ delegation->cred = get_cred(cred);
+ switch (deleg_type) {
+ case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
+ case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
+ set_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags);
+ break;
+ default:
+ clear_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags);
+ }
+ clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+ if (test_and_clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+ atomic_long_inc(&NFS_SERVER(inode)->nr_active_delegations);
+ spin_unlock(&delegation->lock);
rcu_read_unlock();
- nfs_inode_set_delegation(inode, cred, type, stateid, pagemod_limit);
+ put_cred(oldcred);
+ trace_nfs4_reclaim_delegation(inode, type);
}
-static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
+static int nfs_do_return_delegation(struct inode *inode,
+ struct nfs_delegation *delegation,
+ int issync)
{
const struct cred *cred;
int res = 0;
@@ -269,9 +287,8 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *
spin_lock(&delegation->lock);
cred = get_cred(delegation->cred);
spin_unlock(&delegation->lock);
- res = nfs4_proc_delegreturn(inode, cred,
- &delegation->stateid,
- issync);
+ res = nfs4_proc_delegreturn(inode, cred, &delegation->stateid,
+ delegation, issync);
put_cred(cred);
}
return res;
@@ -299,7 +316,8 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
if (delegation == NULL)
goto out;
spin_lock(&delegation->lock);
- if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ if (delegation->inode &&
+ !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
/* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);
@@ -323,14 +341,16 @@ nfs_start_delegation_return(struct nfs_inode *nfsi)
}
static void nfs_abort_delegation_return(struct nfs_delegation *delegation,
- struct nfs_client *clp, int err)
+ struct nfs_server *server, int err)
{
-
spin_lock(&delegation->lock);
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
if (err == -EAGAIN) {
set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state);
+ set_bit(NFS4SERV_DELEGRETURN_DELAYED,
+ &server->delegation_flags);
+ set_bit(NFS4CLNT_DELEGRETURN_DELAYED,
+ &server->nfs_client->cl_state);
}
spin_unlock(&delegation->lock);
}
@@ -344,6 +364,8 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi,
rcu_dereference_protected(nfsi->delegation,
lockdep_is_held(&clp->cl_lock));
+ trace_nfs4_detach_delegation(&nfsi->vfs_inode, delegation->type);
+
if (deleg_cur == NULL || delegation != deleg_cur)
return NULL;
@@ -352,6 +374,7 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi,
spin_unlock(&delegation->lock);
return NULL;
}
+ hlist_del_init_rcu(&delegation->hash);
list_del_rcu(&delegation->super_list);
delegation->inode = NULL;
rcu_assign_pointer(nfsi->delegation, NULL);
@@ -399,7 +422,8 @@ nfs_update_delegation_cred(struct nfs_delegation *delegation,
}
static void
-nfs_update_inplace_delegation(struct nfs_delegation *delegation,
+nfs_update_inplace_delegation(struct nfs_server *server,
+ struct nfs_delegation *delegation,
const struct nfs_delegation *update)
{
if (nfs4_stateid_is_newer(&update->stateid, &delegation->stateid)) {
@@ -412,7 +436,7 @@ nfs_update_inplace_delegation(struct nfs_delegation *delegation,
nfs_update_delegation_cred(delegation, update->cred);
/* smp_mb__before_atomic() is implicit due to xchg() */
clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
- atomic_long_inc(&nfs_active_delegations);
+ atomic_long_inc(&server->nr_active_delegations);
}
}
}
@@ -424,13 +448,13 @@ nfs_update_inplace_delegation(struct nfs_delegation *delegation,
* @type: delegation type
* @stateid: delegation stateid
* @pagemod_limit: write delegation "space_limit"
+ * @deleg_type: raw delegation type
*
* Returns zero on success, or a negative errno value.
*/
int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
- fmode_t type,
- const nfs4_stateid *stateid,
- unsigned long pagemod_limit)
+ fmode_t type, const nfs4_stateid *stateid,
+ unsigned long pagemod_limit, u32 deleg_type)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_client *clp = server->nfs_client;
@@ -439,7 +463,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
struct nfs_delegation *freeme = NULL;
int status = 0;
- delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
+ delegation = kmalloc(sizeof(*delegation), GFP_KERNEL_ACCOUNT);
if (delegation == NULL)
return -ENOMEM;
nfs4_stateid_copy(&delegation->stateid, stateid);
@@ -450,6 +474,12 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
delegation->cred = get_cred(cred);
delegation->inode = inode;
delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
+ switch (deleg_type) {
+ case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
+ case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
+ delegation->flags |= BIT(NFS_DELEGATION_DELEGTIME);
+ }
+ delegation->test_gen = 0;
spin_lock_init(&delegation->lock);
spin_lock(&clp->cl_lock);
@@ -461,7 +491,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
if (nfs4_stateid_match_other(&old_delegation->stateid,
&delegation->stateid)) {
spin_lock(&old_delegation->lock);
- nfs_update_inplace_delegation(old_delegation,
+ nfs_update_inplace_delegation(server, old_delegation,
delegation);
spin_unlock(&old_delegation->lock);
goto out;
@@ -507,19 +537,26 @@ add_new:
spin_unlock(&inode->i_lock);
list_add_tail_rcu(&delegation->super_list, &server->delegations);
+ hlist_add_head_rcu(&delegation->hash,
+ nfs_delegation_hash(server, &NFS_I(inode)->fh));
rcu_assign_pointer(nfsi->delegation, delegation);
delegation = NULL;
- atomic_long_inc(&nfs_active_delegations);
+ atomic_long_inc(&server->nr_active_delegations);
trace_nfs4_set_delegation(inode, type);
+
+ /* If we hold writebacks and have delegated mtime then update */
+ if (deleg_type == NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG &&
+ nfs_have_writebacks(inode))
+ nfs_update_delegated_mtime(inode);
out:
spin_unlock(&clp->cl_lock);
if (delegation != NULL)
__nfs_free_delegation(delegation);
if (freeme != NULL) {
nfs_do_return_delegation(inode, freeme, 0);
- nfs_free_delegation(freeme);
+ nfs_free_delegation(server, freeme);
}
return status;
}
@@ -529,7 +566,7 @@ out:
*/
static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
{
- struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ struct nfs_server *server = NFS_SERVER(inode);
unsigned int mode = O_WRONLY | O_RDWR;
int err = 0;
@@ -551,11 +588,11 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
/*
* Guard against state recovery
*/
- err = nfs4_wait_clnt_recover(clp);
+ err = nfs4_wait_clnt_recover(server->nfs_client);
}
if (err) {
- nfs_abort_delegation_return(delegation, clp, err);
+ nfs_abort_delegation_return(delegation, server, err);
goto out;
}
@@ -570,19 +607,10 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
{
bool ret = false;
+ trace_nfs_delegation_need_return(delegation);
+
if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
ret = true;
- else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
- struct inode *inode;
-
- spin_lock(&delegation->lock);
- inode = delegation->inode;
- if (inode && list_empty(&NFS_I(inode)->open_files))
- ret = true;
- spin_unlock(&delegation->lock);
- }
- if (ret)
- clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) ||
test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
@@ -601,6 +629,9 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
struct nfs_delegation *place_holder_deleg = NULL;
int err = 0;
+ if (!test_and_clear_bit(NFS4SERV_DELEGRETURN,
+ &server->delegation_flags))
+ return 0;
restart:
/*
* To avoid quadratic looping we hold a reference
@@ -629,6 +660,9 @@ restart:
prev = delegation;
continue;
}
+ inode = nfs_delegation_grab_inode(delegation);
+ if (inode == NULL)
+ continue;
if (prev) {
struct inode *tmp = nfs_delegation_grab_inode(prev);
@@ -639,12 +673,6 @@ restart:
}
}
- inode = nfs_delegation_grab_inode(delegation);
- if (inode == NULL) {
- rcu_read_unlock();
- iput(to_put);
- goto restart;
- }
delegation = nfs_start_delegation_return_locked(NFS_I(inode));
rcu_read_unlock();
@@ -655,6 +683,7 @@ restart:
cond_resched();
if (!err)
goto restart;
+ set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
goto out;
}
@@ -669,6 +698,9 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
struct nfs_delegation *d;
bool ret = false;
+ if (!test_and_clear_bit(NFS4SERV_DELEGRETURN_DELAYED,
+ &server->delegation_flags))
+ goto out;
list_for_each_entry_rcu (d, &server->delegations, super_list) {
if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags))
continue;
@@ -676,6 +708,7 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags);
ret = true;
}
+out:
return ret;
}
@@ -735,7 +768,7 @@ void nfs_inode_evict_delegation(struct inode *inode)
set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
nfs_do_return_delegation(inode, delegation, 1);
- nfs_free_delegation(delegation);
+ nfs_free_delegation(NFS_SERVER(inode), delegation);
}
}
@@ -766,6 +799,43 @@ int nfs4_inode_return_delegation(struct inode *inode)
}
/**
+ * nfs4_inode_set_return_delegation_on_close - asynchronously return a delegation
+ * @inode: inode to process
+ *
+ * This routine is called to request that the delegation be returned as soon
+ * as the file is closed. If the file is already closed, the delegation is
+ * immediately returned.
+ */
+void nfs4_inode_set_return_delegation_on_close(struct inode *inode)
+{
+ struct nfs_delegation *delegation;
+ struct nfs_delegation *ret = NULL;
+
+ if (!inode)
+ return;
+ rcu_read_lock();
+ delegation = nfs4_get_valid_delegation(inode);
+ if (!delegation)
+ goto out;
+ spin_lock(&delegation->lock);
+ if (!delegation->inode)
+ goto out_unlock;
+ if (list_empty(&NFS_I(inode)->open_files) &&
+ !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ /* Refcount matched in nfs_end_delegation_return() */
+ ret = nfs_get_delegation(delegation);
+ } else
+ set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+out_unlock:
+ spin_unlock(&delegation->lock);
+ if (ret)
+ nfs_clear_verifier_delegated(inode);
+out:
+ rcu_read_unlock();
+ nfs_end_delegation_return(inode, ret, 0);
+}
+
+/**
* nfs4_inode_return_delegation_on_close - asynchronously return a delegation
* @inode: inode to process
*
@@ -784,7 +854,8 @@ void nfs4_inode_return_delegation_on_close(struct inode *inode)
if (!delegation)
goto out;
if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) ||
- atomic_long_read(&nfs_active_delegations) >= nfs_delegation_watermark) {
+ atomic_long_read(&NFS_SERVER(inode)->nr_active_delegations) >=
+ nfs_delegation_watermark) {
spin_lock(&delegation->lock);
if (delegation->inode &&
list_empty(&NFS_I(inode)->open_files) &&
@@ -826,11 +897,25 @@ int nfs4_inode_make_writeable(struct inode *inode)
return nfs4_inode_return_delegation(inode);
}
-static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
- struct nfs_delegation *delegation)
+static void
+nfs_mark_return_if_closed_delegation(struct nfs_server *server,
+ struct nfs_delegation *delegation)
{
- set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+ struct inode *inode;
+
+ if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) ||
+ test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags))
+ return;
+ spin_lock(&delegation->lock);
+ inode = delegation->inode;
+ if (!inode)
+ goto out;
+ if (list_empty(&NFS_I(inode)->open_files))
+ nfs_mark_return_delegation(server, delegation);
+ else
+ set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+out:
+ spin_unlock(&delegation->lock);
}
static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
@@ -946,7 +1031,7 @@ static void nfs_revoke_delegation(struct inode *inode,
}
spin_unlock(&delegation->lock);
}
- nfs_mark_delegation_revoked(delegation);
+ nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
ret = true;
out:
rcu_read_unlock();
@@ -954,13 +1039,6 @@ out:
nfs_inode_find_state_and_recover(inode, stateid);
}
-void nfs_remove_bad_delegation(struct inode *inode,
- const nfs4_stateid *stateid)
-{
- nfs_revoke_delegation(inode, stateid);
-}
-EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
-
void nfs_delegation_mark_returned(struct inode *inode,
const nfs4_stateid *stateid)
{
@@ -985,7 +1063,12 @@ void nfs_delegation_mark_returned(struct inode *inode,
delegation->stateid.seqid = stateid->seqid;
}
- nfs_mark_delegation_revoked(delegation);
+ nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
+ clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
+ spin_unlock(&delegation->lock);
+ if (nfs_detach_delegation(NFS_I(inode), delegation, NFS_SERVER(inode)))
+ nfs_put_delegation(delegation);
+ goto out_rcu_unlock;
out_clear_returning:
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
@@ -998,6 +1081,24 @@ out_rcu_unlock:
}
/**
+ * nfs_remove_bad_delegation - handle delegations that are unusable
+ * @inode: inode to process
+ * @stateid: the delegation's stateid
+ *
+ * If the server ACK-ed our FREE_STATEID then clean
+ * up the delegation, else mark and keep the revoked state.
+ */
+void nfs_remove_bad_delegation(struct inode *inode,
+ const nfs4_stateid *stateid)
+{
+ if (stateid && stateid->type == NFS4_FREED_STATEID_TYPE)
+ nfs_delegation_mark_returned(inode, stateid);
+ else
+ nfs_revoke_delegation(inode, stateid);
+}
+EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
+
+/**
* nfs_expire_unused_delegation_types
* @clp: client to process
* @flags: delegation types to expire
@@ -1075,11 +1176,12 @@ static struct inode *
nfs_delegation_find_inode_server(struct nfs_server *server,
const struct nfs_fh *fhandle)
{
+ struct hlist_head *head = nfs_delegation_hash(server, fhandle);
struct nfs_delegation *delegation;
struct super_block *freeme = NULL;
struct inode *res = NULL;
- list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ hlist_for_each_entry_rcu(delegation, head, hash) {
spin_lock(&delegation->lock);
if (delegation->inode != NULL &&
!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
@@ -1166,7 +1268,6 @@ static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server,
struct inode *inode;
restart:
rcu_read_lock();
-restart_locked:
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
if (test_bit(NFS_DELEGATION_INODE_FREEING,
&delegation->flags) ||
@@ -1177,13 +1278,13 @@ restart_locked:
continue;
inode = nfs_delegation_grab_inode(delegation);
if (inode == NULL)
- goto restart_locked;
+ continue;
delegation = nfs_start_delegation_return_locked(NFS_I(inode));
rcu_read_unlock();
if (delegation != NULL) {
if (nfs_detach_delegation(NFS_I(inode), delegation,
server) != NULL)
- nfs_free_delegation(delegation);
+ nfs_free_delegation(server, delegation);
/* Match nfs_start_delegation_return_locked */
nfs_put_delegation(delegation);
}
@@ -1220,6 +1321,7 @@ static void nfs_mark_test_expired_delegation(struct nfs_server *server,
return;
clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
+ set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state);
}
@@ -1296,24 +1398,30 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
struct inode *inode;
const struct cred *cred;
nfs4_stateid stateid;
+ unsigned long gen = ++server->delegation_gen;
+
+ if (!test_and_clear_bit(NFS4SERV_DELEGATION_EXPIRED,
+ &server->delegation_flags))
+ return 0;
restart:
rcu_read_lock();
-restart_locked:
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
if (test_bit(NFS_DELEGATION_INODE_FREEING,
&delegation->flags) ||
test_bit(NFS_DELEGATION_RETURNING,
&delegation->flags) ||
test_bit(NFS_DELEGATION_TEST_EXPIRED,
- &delegation->flags) == 0)
+ &delegation->flags) == 0 ||
+ delegation->test_gen == gen)
continue;
inode = nfs_delegation_grab_inode(delegation);
if (inode == NULL)
- goto restart_locked;
+ continue;
spin_lock(&delegation->lock);
cred = get_cred_rcu(delegation->cred);
nfs4_stateid_copy(&stateid, &delegation->stateid);
spin_unlock(&delegation->lock);
+ delegation->test_gen = gen;
clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
rcu_read_unlock();
nfs_delegation_test_free_expired(inode, &stateid, cred);
@@ -1324,6 +1432,9 @@ restart_locked:
goto restart;
}
nfs_inode_mark_test_expired_delegation(server,inode);
+ set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags);
+ set_bit(NFS4CLNT_DELEGATION_EXPIRED,
+ &server->nfs_client->cl_state);
iput(inode);
return -EAGAIN;
}
@@ -1478,4 +1589,17 @@ out:
return ret;
}
-module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644);
+int nfs4_delegation_hash_alloc(struct nfs_server *server)
+{
+ int delegation_buckets, i;
+
+ delegation_buckets = roundup_pow_of_two(nfs_delegation_watermark / 16);
+ server->delegation_hash_mask = delegation_buckets - 1;
+ server->delegation_hash_table = kmalloc_array(delegation_buckets,
+ sizeof(*server->delegation_hash_table), GFP_KERNEL);
+ if (!server->delegation_hash_table)
+ return -ENOMEM;
+ for (i = 0; i < delegation_buckets; i++)
+ INIT_HLIST_HEAD(&server->delegation_hash_table[i]);
+ return 0;
+}
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 1c378992b7c0..08ec2e9c68a4 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -14,6 +14,7 @@
* NFSv4 delegation
*/
struct nfs_delegation {
+ struct hlist_node hash;
struct list_head super_list;
const struct cred *cred;
struct inode *inode;
@@ -21,6 +22,7 @@ struct nfs_delegation {
fmode_t type;
unsigned long pagemod_limit;
__u64 change_attr;
+ unsigned long test_gen;
unsigned long flags;
refcount_t refcount;
spinlock_t lock;
@@ -37,14 +39,18 @@ enum {
NFS_DELEGATION_TEST_EXPIRED,
NFS_DELEGATION_INODE_FREEING,
NFS_DELEGATION_RETURN_DELAYED,
+ NFS_DELEGATION_DELEGTIME,
};
int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
- fmode_t type, const nfs4_stateid *stateid, unsigned long pagemod_limit);
+ fmode_t type, const nfs4_stateid *stateid,
+ unsigned long pagemod_limit, u32 deleg_type);
void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
- fmode_t type, const nfs4_stateid *stateid, unsigned long pagemod_limit);
+ fmode_t type, const nfs4_stateid *stateid,
+ unsigned long pagemod_limit, u32 deleg_type);
int nfs4_inode_return_delegation(struct inode *inode);
void nfs4_inode_return_delegation_on_close(struct inode *inode);
+void nfs4_inode_set_return_delegation_on_close(struct inode *inode);
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
void nfs_inode_evict_delegation(struct inode *inode);
@@ -66,7 +72,9 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp);
void nfs_reap_expired_delegations(struct nfs_client *clp);
/* NFSv4 delegation-related procedures */
-int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync);
+int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
+ const nfs4_stateid *stateid,
+ struct nfs_delegation *delegation, int issync);
int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred);
@@ -74,8 +82,8 @@ bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode);
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
-int nfs4_have_delegation(struct inode *inode, fmode_t flags);
-int nfs4_check_delegation(struct inode *inode, fmode_t flags);
+int nfs4_have_delegation(struct inode *inode, fmode_t type, int flags);
+int nfs4_check_delegation(struct inode *inode, fmode_t type);
bool nfs4_delegation_flush_on_close(const struct inode *inode);
void nfs_inode_find_delegation_state_and_recover(struct inode *inode,
const nfs4_stateid *stateid);
@@ -83,9 +91,39 @@ int nfs4_inode_make_writeable(struct inode *inode);
#endif
+#define NFS_DELEGATION_FLAG_TIME BIT(1)
+
+void nfs_update_delegated_atime(struct inode *inode);
+void nfs_update_delegated_mtime(struct inode *inode);
+void nfs_update_delegated_mtime_locked(struct inode *inode);
+
+static inline int nfs_have_read_or_write_delegation(struct inode *inode)
+{
+ return NFS_PROTO(inode)->have_delegation(inode, FMODE_READ, 0);
+}
+
+static inline int nfs_have_write_delegation(struct inode *inode)
+{
+ return NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE, 0);
+}
+
static inline int nfs_have_delegated_attributes(struct inode *inode)
{
- return NFS_PROTO(inode)->have_delegation(inode, FMODE_READ);
+ return NFS_PROTO(inode)->have_delegation(inode, FMODE_READ, 0);
}
+static inline int nfs_have_delegated_atime(struct inode *inode)
+{
+ return NFS_PROTO(inode)->have_delegation(inode, FMODE_READ,
+ NFS_DELEGATION_FLAG_TIME);
+}
+
+static inline int nfs_have_delegated_mtime(struct inode *inode)
+{
+ return NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE,
+ NFS_DELEGATION_FLAG_TIME);
+}
+
+int nfs4_delegation_hash_alloc(struct nfs_server *server);
+
#endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 731d31015b6a..ea9f6ca8f30f 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -18,6 +18,7 @@
* 6 Jun 1999 Cache readdir lookups in the page cache. -DaveM
*/
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/time.h>
#include <linux/errno.h>
@@ -38,6 +39,7 @@
#include <linux/sched.h>
#include <linux/kmemleak.h>
#include <linux/xattr.h>
+#include <linux/hash.h>
#include "delegation.h"
#include "iostat.h"
@@ -53,7 +55,9 @@ static int nfs_closedir(struct inode *, struct file *);
static int nfs_readdir(struct file *, struct dir_context *);
static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
static loff_t nfs_llseek_dir(struct file *, loff_t, int);
-static void nfs_readdir_clear_array(struct page*);
+static void nfs_readdir_clear_array(struct folio *);
+static int nfs_do_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, int open_flags);
const struct file_operations nfs_dir_operations = {
.llseek = nfs_llseek_dir,
@@ -65,28 +69,29 @@ const struct file_operations nfs_dir_operations = {
};
const struct address_space_operations nfs_dir_aops = {
- .freepage = nfs_readdir_clear_array,
+ .free_folio = nfs_readdir_clear_array,
};
-static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir)
+#define NFS_INIT_DTSIZE PAGE_SIZE
+
+static struct nfs_open_dir_context *
+alloc_nfs_open_dir_context(struct inode *dir)
{
struct nfs_inode *nfsi = NFS_I(dir);
struct nfs_open_dir_context *ctx;
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
if (ctx != NULL) {
- ctx->duped = 0;
ctx->attr_gencount = nfsi->attr_gencount;
- ctx->dir_cookie = 0;
- ctx->dup_cookie = 0;
- ctx->page_index = 0;
+ ctx->dtsize = NFS_INIT_DTSIZE;
spin_lock(&dir->i_lock);
if (list_empty(&nfsi->open_files) &&
(nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
nfs_set_cache_invalid(dir,
NFS_INO_INVALID_DATA |
NFS_INO_REVAL_FORCED);
- list_add(&ctx->list, &nfsi->open_files);
- clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags);
+ list_add_tail_rcu(&ctx->list, &nfsi->open_files);
+ memcpy(ctx->verf, nfsi->cookieverf, sizeof(ctx->verf));
spin_unlock(&dir->i_lock);
return ctx;
}
@@ -96,9 +101,9 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir
static void put_nfs_open_dir_context(struct inode *dir, struct nfs_open_dir_context *ctx)
{
spin_lock(&dir->i_lock);
- list_del(&ctx->list);
+ list_del_rcu(&ctx->list);
spin_unlock(&dir->i_lock);
- kfree(ctx);
+ kfree_rcu(ctx, rcu_head);
}
/*
@@ -140,24 +145,24 @@ struct nfs_cache_array_entry {
};
struct nfs_cache_array {
+ u64 change_attr;
u64 last_cookie;
unsigned int size;
- unsigned char page_full : 1,
- page_is_eof : 1,
+ unsigned char folio_full : 1,
+ folio_is_eof : 1,
cookies_are_ordered : 1;
- struct nfs_cache_array_entry array[];
+ struct nfs_cache_array_entry array[] __counted_by(size);
};
struct nfs_readdir_descriptor {
struct file *file;
- struct page *page;
+ struct folio *folio;
struct dir_context *ctx;
- pgoff_t page_index;
+ pgoff_t folio_index;
+ pgoff_t folio_index_max;
u64 dir_cookie;
u64 last_cookie;
- u64 dup_cookie;
loff_t current_index;
- loff_t prev_index;
__be32 verf[NFS_DIR_VERIFIER_SIZE];
unsigned long dir_verifier;
@@ -165,69 +170,104 @@ struct nfs_readdir_descriptor {
unsigned long gencount;
unsigned long attr_gencount;
unsigned int cache_entry_index;
- signed char duped;
+ unsigned int buffer_fills;
+ unsigned int dtsize;
+ bool clear_cache;
bool plus;
+ bool eob;
bool eof;
};
-static void nfs_readdir_array_init(struct nfs_cache_array *array)
+static void nfs_set_dtsize(struct nfs_readdir_descriptor *desc, unsigned int sz)
+{
+ struct nfs_server *server = NFS_SERVER(file_inode(desc->file));
+ unsigned int maxsize = server->dtsize;
+
+ if (sz > maxsize)
+ sz = maxsize;
+ if (sz < NFS_MIN_FILE_IO_SIZE)
+ sz = NFS_MIN_FILE_IO_SIZE;
+ desc->dtsize = sz;
+}
+
+static void nfs_shrink_dtsize(struct nfs_readdir_descriptor *desc)
{
- memset(array, 0, sizeof(struct nfs_cache_array));
+ nfs_set_dtsize(desc, desc->dtsize >> 1);
}
-static void nfs_readdir_page_init_array(struct page *page, u64 last_cookie)
+static void nfs_grow_dtsize(struct nfs_readdir_descriptor *desc)
+{
+ nfs_set_dtsize(desc, desc->dtsize << 1);
+}
+
+static void nfs_readdir_folio_init_array(struct folio *folio, u64 last_cookie,
+ u64 change_attr)
{
struct nfs_cache_array *array;
- array = kmap_atomic(page);
- nfs_readdir_array_init(array);
+ array = kmap_local_folio(folio, 0);
+ array->change_attr = change_attr;
array->last_cookie = last_cookie;
+ array->size = 0;
+ array->folio_full = 0;
+ array->folio_is_eof = 0;
array->cookies_are_ordered = 1;
- kunmap_atomic(array);
+ kunmap_local(array);
}
/*
* we are freeing strings created by nfs_add_to_readdir_array()
*/
-static
-void nfs_readdir_clear_array(struct page *page)
+static void nfs_readdir_clear_array(struct folio *folio)
{
struct nfs_cache_array *array;
- int i;
+ unsigned int i;
- array = kmap_atomic(page);
+ array = kmap_local_folio(folio, 0);
for (i = 0; i < array->size; i++)
kfree(array->array[i].name);
- nfs_readdir_array_init(array);
- kunmap_atomic(array);
+ array->size = 0;
+ kunmap_local(array);
}
-static struct page *
-nfs_readdir_page_array_alloc(u64 last_cookie, gfp_t gfp_flags)
+static void nfs_readdir_folio_reinit_array(struct folio *folio, u64 last_cookie,
+ u64 change_attr)
{
- struct page *page = alloc_page(gfp_flags);
- if (page)
- nfs_readdir_page_init_array(page, last_cookie);
- return page;
+ nfs_readdir_clear_array(folio);
+ nfs_readdir_folio_init_array(folio, last_cookie, change_attr);
}
-static void nfs_readdir_page_array_free(struct page *page)
+static struct folio *
+nfs_readdir_folio_array_alloc(u64 last_cookie, gfp_t gfp_flags)
{
- if (page) {
- nfs_readdir_clear_array(page);
- put_page(page);
+ struct folio *folio = folio_alloc(gfp_flags, 0);
+ if (folio)
+ nfs_readdir_folio_init_array(folio, last_cookie, 0);
+ return folio;
+}
+
+static void nfs_readdir_folio_array_free(struct folio *folio)
+{
+ if (folio) {
+ nfs_readdir_clear_array(folio);
+ folio_put(folio);
}
}
+static u64 nfs_readdir_array_index_cookie(struct nfs_cache_array *array)
+{
+ return array->size == 0 ? array->last_cookie : array->array[0].cookie;
+}
+
static void nfs_readdir_array_set_eof(struct nfs_cache_array *array)
{
- array->page_is_eof = 1;
- array->page_full = 1;
+ array->folio_is_eof = 1;
+ array->folio_full = 1;
}
static bool nfs_readdir_array_is_full(struct nfs_cache_array *array)
{
- return array->page_full;
+ return array->folio_full;
}
/*
@@ -248,44 +288,49 @@ static const char *nfs_readdir_copy_name(const char *name, unsigned int len)
return ret;
}
+static size_t nfs_readdir_array_maxentries(void)
+{
+ return (PAGE_SIZE - sizeof(struct nfs_cache_array)) /
+ sizeof(struct nfs_cache_array_entry);
+}
+
/*
* Check that the next array entry lies entirely within the page bounds
*/
static int nfs_readdir_array_can_expand(struct nfs_cache_array *array)
{
- struct nfs_cache_array_entry *cache_entry;
-
- if (array->page_full)
+ if (array->folio_full)
return -ENOSPC;
- cache_entry = &array->array[array->size + 1];
- if ((char *)cache_entry - (char *)array > PAGE_SIZE) {
- array->page_full = 1;
+ if (array->size == nfs_readdir_array_maxentries()) {
+ array->folio_full = 1;
return -ENOSPC;
}
return 0;
}
-static
-int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page)
+static int nfs_readdir_folio_array_append(struct folio *folio,
+ const struct nfs_entry *entry,
+ u64 *cookie)
{
struct nfs_cache_array *array;
struct nfs_cache_array_entry *cache_entry;
const char *name;
- int ret;
+ int ret = -ENOMEM;
name = nfs_readdir_copy_name(entry->name, entry->len);
- if (!name)
- return -ENOMEM;
- array = kmap_atomic(page);
+ array = kmap_local_folio(folio, 0);
+ if (!name)
+ goto out;
ret = nfs_readdir_array_can_expand(array);
if (ret) {
kfree(name);
goto out;
}
- cache_entry = &array->array[array->size];
- cache_entry->cookie = entry->prev_cookie;
+ array->size++;
+ cache_entry = &array->array[array->size - 1];
+ cache_entry->cookie = array->last_cookie;
cache_entry->ino = entry->ino;
cache_entry->d_type = entry->d_type;
cache_entry->name_len = entry->len;
@@ -293,79 +338,124 @@ int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page)
array->last_cookie = entry->cookie;
if (array->last_cookie <= cache_entry->cookie)
array->cookies_are_ordered = 0;
- array->size++;
if (entry->eof != 0)
nfs_readdir_array_set_eof(array);
out:
- kunmap_atomic(array);
+ *cookie = array->last_cookie;
+ kunmap_local(array);
return ret;
}
-static struct page *nfs_readdir_page_get_locked(struct address_space *mapping,
- pgoff_t index, u64 last_cookie)
+#define NFS_READDIR_COOKIE_MASK (U32_MAX >> 14)
+/*
+ * Hash algorithm allowing content addressible access to sequences
+ * of directory cookies. Content is addressed by the value of the
+ * cookie index of the first readdir entry in a page.
+ *
+ * We select only the first 18 bits to avoid issues with excessive
+ * memory use for the page cache XArray. 18 bits should allow the caching
+ * of 262144 pages of sequences of readdir entries. Since each page holds
+ * 127 readdir entries for a typical 64-bit system, that works out to a
+ * cache of ~ 33 million entries per directory.
+ */
+static pgoff_t nfs_readdir_folio_cookie_hash(u64 cookie)
+{
+ if (cookie == 0)
+ return 0;
+ return hash_64(cookie, 18);
+}
+
+static bool nfs_readdir_folio_validate(struct folio *folio, u64 last_cookie,
+ u64 change_attr)
{
- struct page *page;
+ struct nfs_cache_array *array = kmap_local_folio(folio, 0);
+ int ret = true;
- page = grab_cache_page(mapping, index);
- if (page && !PageUptodate(page)) {
- nfs_readdir_page_init_array(page, last_cookie);
- if (invalidate_inode_pages2_range(mapping, index + 1, -1) < 0)
- nfs_zap_mapping(mapping->host, mapping);
- SetPageUptodate(page);
+ if (array->change_attr != change_attr)
+ ret = false;
+ if (nfs_readdir_array_index_cookie(array) != last_cookie)
+ ret = false;
+ kunmap_local(array);
+ return ret;
+}
+
+static void nfs_readdir_folio_unlock_and_put(struct folio *folio)
+{
+ folio_unlock(folio);
+ folio_put(folio);
+}
+
+static void nfs_readdir_folio_init_and_validate(struct folio *folio, u64 cookie,
+ u64 change_attr)
+{
+ if (folio_test_uptodate(folio)) {
+ if (nfs_readdir_folio_validate(folio, cookie, change_attr))
+ return;
+ nfs_readdir_clear_array(folio);
}
+ nfs_readdir_folio_init_array(folio, cookie, change_attr);
+ folio_mark_uptodate(folio);
+}
- return page;
+static struct folio *nfs_readdir_folio_get_locked(struct address_space *mapping,
+ u64 cookie, u64 change_attr)
+{
+ pgoff_t index = nfs_readdir_folio_cookie_hash(cookie);
+ struct folio *folio;
+
+ folio = filemap_grab_folio(mapping, index);
+ if (IS_ERR(folio))
+ return NULL;
+ nfs_readdir_folio_init_and_validate(folio, cookie, change_attr);
+ return folio;
}
-static u64 nfs_readdir_page_last_cookie(struct page *page)
+static u64 nfs_readdir_folio_last_cookie(struct folio *folio)
{
struct nfs_cache_array *array;
u64 ret;
- array = kmap_atomic(page);
+ array = kmap_local_folio(folio, 0);
ret = array->last_cookie;
- kunmap_atomic(array);
+ kunmap_local(array);
return ret;
}
-static bool nfs_readdir_page_needs_filling(struct page *page)
+static bool nfs_readdir_folio_needs_filling(struct folio *folio)
{
struct nfs_cache_array *array;
bool ret;
- array = kmap_atomic(page);
+ array = kmap_local_folio(folio, 0);
ret = !nfs_readdir_array_is_full(array);
- kunmap_atomic(array);
+ kunmap_local(array);
return ret;
}
-static void nfs_readdir_page_set_eof(struct page *page)
+static void nfs_readdir_folio_set_eof(struct folio *folio)
{
struct nfs_cache_array *array;
- array = kmap_atomic(page);
+ array = kmap_local_folio(folio, 0);
nfs_readdir_array_set_eof(array);
- kunmap_atomic(array);
-}
-
-static void nfs_readdir_page_unlock_and_put(struct page *page)
-{
- unlock_page(page);
- put_page(page);
+ kunmap_local(array);
}
-static struct page *nfs_readdir_page_get_next(struct address_space *mapping,
- pgoff_t index, u64 cookie)
+static struct folio *nfs_readdir_folio_get_next(struct address_space *mapping,
+ u64 cookie, u64 change_attr)
{
- struct page *page;
+ pgoff_t index = nfs_readdir_folio_cookie_hash(cookie);
+ struct folio *folio;
- page = nfs_readdir_page_get_locked(mapping, index, cookie);
- if (page) {
- if (nfs_readdir_page_last_cookie(page) == cookie)
- return page;
- nfs_readdir_page_unlock_and_put(page);
- }
- return NULL;
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return NULL;
+ nfs_readdir_folio_init_and_validate(folio, cookie, change_attr);
+ if (nfs_readdir_folio_last_cookie(folio) != cookie)
+ nfs_readdir_folio_reinit_array(folio, cookie, change_attr);
+ return folio;
}
static inline
@@ -387,6 +477,25 @@ bool nfs_readdir_use_cookie(const struct file *filp)
return true;
}
+static void nfs_readdir_seek_next_array(struct nfs_cache_array *array,
+ struct nfs_readdir_descriptor *desc)
+{
+ if (array->folio_full) {
+ desc->last_cookie = array->last_cookie;
+ desc->current_index += array->size;
+ desc->cache_entry_index = 0;
+ desc->folio_index++;
+ } else
+ desc->last_cookie = nfs_readdir_array_index_cookie(array);
+}
+
+static void nfs_readdir_rewind_search(struct nfs_readdir_descriptor *desc)
+{
+ desc->current_index = 0;
+ desc->last_cookie = 0;
+ desc->folio_index = 0;
+}
+
static int nfs_readdir_search_for_pos(struct nfs_cache_array *array,
struct nfs_readdir_descriptor *desc)
{
@@ -396,8 +505,9 @@ static int nfs_readdir_search_for_pos(struct nfs_cache_array *array,
if (diff < 0)
goto out_eof;
if (diff >= array->size) {
- if (array->page_is_eof)
+ if (array->folio_is_eof)
goto out_eof;
+ nfs_readdir_seek_next_array(array, desc);
return -EAGAIN;
}
@@ -410,16 +520,6 @@ out_eof:
return -EBADCOOKIE;
}
-static bool
-nfs_readdir_inode_mapping_valid(struct nfs_inode *nfsi)
-{
- if (nfsi->cache_validity & (NFS_INO_INVALID_CHANGE |
- NFS_INO_INVALID_DATA))
- return false;
- smp_rmb();
- return !test_bit(NFS_INO_INVALIDATING, &nfsi->flags);
-}
-
static bool nfs_readdir_array_cookie_in_range(struct nfs_cache_array *array,
u64 cookie)
{
@@ -436,8 +536,7 @@ static bool nfs_readdir_array_cookie_in_range(struct nfs_cache_array *array,
static int nfs_readdir_search_for_cookie(struct nfs_cache_array *array,
struct nfs_readdir_descriptor *desc)
{
- int i;
- loff_t new_pos;
+ unsigned int i;
int status = -EAGAIN;
if (!nfs_readdir_array_cookie_in_range(array, desc->dir_cookie))
@@ -445,44 +544,21 @@ static int nfs_readdir_search_for_cookie(struct nfs_cache_array *array,
for (i = 0; i < array->size; i++) {
if (array->array[i].cookie == desc->dir_cookie) {
- struct nfs_inode *nfsi = NFS_I(file_inode(desc->file));
-
- new_pos = desc->current_index + i;
- if (desc->attr_gencount != nfsi->attr_gencount ||
- !nfs_readdir_inode_mapping_valid(nfsi)) {
- desc->duped = 0;
- desc->attr_gencount = nfsi->attr_gencount;
- } else if (new_pos < desc->prev_index) {
- if (desc->duped > 0
- && desc->dup_cookie == desc->dir_cookie) {
- if (printk_ratelimit()) {
- pr_notice("NFS: directory %pD2 contains a readdir loop."
- "Please contact your server vendor. "
- "The file: %s has duplicate cookie %llu\n",
- desc->file, array->array[i].name, desc->dir_cookie);
- }
- status = -ELOOP;
- goto out;
- }
- desc->dup_cookie = desc->dir_cookie;
- desc->duped = -1;
- }
if (nfs_readdir_use_cookie(desc->file))
desc->ctx->pos = desc->dir_cookie;
else
- desc->ctx->pos = new_pos;
- desc->prev_index = new_pos;
+ desc->ctx->pos = desc->current_index + i;
desc->cache_entry_index = i;
return 0;
}
}
check_eof:
- if (array->page_is_eof) {
+ if (array->folio_is_eof) {
status = -EBADCOOKIE;
if (desc->dir_cookie == array->last_cookie)
desc->eof = true;
- }
-out:
+ } else
+ nfs_readdir_seek_next_array(array, desc);
return status;
}
@@ -491,19 +567,14 @@ static int nfs_readdir_search_array(struct nfs_readdir_descriptor *desc)
struct nfs_cache_array *array;
int status;
- array = kmap_atomic(desc->page);
+ array = kmap_local_folio(desc->folio, 0);
if (desc->dir_cookie == 0)
status = nfs_readdir_search_for_pos(array, desc);
else
status = nfs_readdir_search_for_cookie(array, desc);
- if (status == -EAGAIN) {
- desc->last_cookie = array->last_cookie;
- desc->current_index += array->size;
- desc->page_index++;
- }
- kunmap_atomic(array);
+ kunmap_local(array);
return status;
}
@@ -538,7 +609,6 @@ static int nfs_readdir_xdr_filler(struct nfs_readdir_descriptor *desc,
/* We requested READDIRPLUS, but the server doesn't grok it */
if (error == -ENOTSUPP && desc->plus) {
NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS;
- clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
desc->plus = arg.plus = false;
goto again;
}
@@ -588,51 +658,70 @@ int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
return 1;
}
-static
-bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx)
+#define NFS_READDIR_CACHE_USAGE_THRESHOLD (8UL)
+
+static bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx,
+ unsigned int cache_hits,
+ unsigned int cache_misses)
{
if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
return false;
- if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags))
+ if (NFS_SERVER(dir)->flags & NFS_MOUNT_FORCE_RDIRPLUS)
return true;
- if (ctx->pos == 0)
+ if (ctx->pos == 0 ||
+ cache_hits + cache_misses > NFS_READDIR_CACHE_USAGE_THRESHOLD)
return true;
return false;
}
/*
- * This function is called by the lookup and getattr code to request the
+ * This function is called by the getattr code to request the
* use of readdirplus to accelerate any future lookups in the same
* directory.
*/
-void nfs_advise_use_readdirplus(struct inode *dir)
+void nfs_readdir_record_entry_cache_hit(struct inode *dir)
{
struct nfs_inode *nfsi = NFS_I(dir);
+ struct nfs_open_dir_context *ctx;
if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
- !list_empty(&nfsi->open_files))
- set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
+ S_ISDIR(dir->i_mode)) {
+ rcu_read_lock();
+ list_for_each_entry_rcu (ctx, &nfsi->open_files, list)
+ atomic_inc(&ctx->cache_hits);
+ rcu_read_unlock();
+ }
}
/*
* This function is mainly for use by nfs_getattr().
*
* If this is an 'ls -l', we want to force use of readdirplus.
- * Do this by checking if there is an active file descriptor
- * and calling nfs_advise_use_readdirplus, then forcing a
- * cache flush.
*/
-void nfs_force_use_readdirplus(struct inode *dir)
+void nfs_readdir_record_entry_cache_miss(struct inode *dir)
{
struct nfs_inode *nfsi = NFS_I(dir);
+ struct nfs_open_dir_context *ctx;
if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
- !list_empty(&nfsi->open_files)) {
- set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
- set_bit(NFS_INO_FORCE_READDIR, &nfsi->flags);
+ S_ISDIR(dir->i_mode)) {
+ rcu_read_lock();
+ list_for_each_entry_rcu (ctx, &nfsi->open_files, list)
+ atomic_inc(&ctx->cache_misses);
+ rcu_read_unlock();
}
}
+static void nfs_lookup_advise_force_readdirplus(struct inode *dir,
+ unsigned int flags)
+{
+ if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
+ return;
+ if (flags & (LOOKUP_EXCL | LOOKUP_PARENT | LOOKUP_REVAL))
+ return;
+ nfs_readdir_record_entry_cache_miss(dir);
+}
+
static
void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry,
unsigned long dir_verifier)
@@ -683,8 +772,12 @@ again:
status = nfs_refresh_inode(d_inode(dentry), entry->fattr);
if (!status)
nfs_setsecurity(d_inode(dentry), entry->fattr);
+ trace_nfs_readdir_lookup_revalidate(d_inode(parent),
+ dentry, 0, status);
goto out;
} else {
+ trace_nfs_readdir_lookup_revalidate_failed(
+ d_inode(parent), dentry, 0);
d_invalidate(dentry);
dput(dentry);
dentry = NULL;
@@ -706,86 +799,99 @@ again:
dentry = alias;
}
nfs_set_verifier(dentry, dir_verifier);
+ trace_nfs_readdir_lookup(d_inode(parent), dentry, 0);
out:
dput(dentry);
}
+static int nfs_readdir_entry_decode(struct nfs_readdir_descriptor *desc,
+ struct nfs_entry *entry,
+ struct xdr_stream *stream)
+{
+ int ret;
+
+ if (entry->fattr->label)
+ entry->fattr->label->len = NFS4_MAXLABELLEN;
+ ret = xdr_decode(desc, entry, stream);
+ if (ret || !desc->plus)
+ return ret;
+ nfs_prime_dcache(file_dentry(desc->file), entry, desc->dir_verifier);
+ return 0;
+}
+
/* Perform conversion from xdr to cache array */
-static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc,
- struct nfs_entry *entry,
- struct page **xdr_pages,
- unsigned int buflen,
- struct page **arrays,
- size_t narrays)
+static int nfs_readdir_folio_filler(struct nfs_readdir_descriptor *desc,
+ struct nfs_entry *entry,
+ struct page **xdr_pages, unsigned int buflen,
+ struct folio **arrays, size_t narrays,
+ u64 change_attr)
{
struct address_space *mapping = desc->file->f_mapping;
+ struct folio *new, *folio = *arrays;
struct xdr_stream stream;
+ struct folio *scratch;
struct xdr_buf buf;
- struct page *scratch, *new, *page = *arrays;
+ u64 cookie;
int status;
- scratch = alloc_page(GFP_KERNEL);
+ scratch = folio_alloc(GFP_KERNEL, 0);
if (scratch == NULL)
return -ENOMEM;
xdr_init_decode_pages(&stream, &buf, xdr_pages, buflen);
- xdr_set_scratch_page(&stream, scratch);
+ xdr_set_scratch_folio(&stream, scratch);
do {
- if (entry->fattr->label)
- entry->fattr->label->len = NFS4_MAXLABELLEN;
-
- status = xdr_decode(desc, entry, &stream);
+ status = nfs_readdir_entry_decode(desc, entry, &stream);
if (status != 0)
break;
- if (desc->plus)
- nfs_prime_dcache(file_dentry(desc->file), entry,
- desc->dir_verifier);
-
- status = nfs_readdir_add_to_array(entry, page);
+ status = nfs_readdir_folio_array_append(folio, entry, &cookie);
if (status != -ENOSPC)
continue;
- if (page->mapping != mapping) {
+ if (folio->mapping != mapping) {
if (!--narrays)
break;
- new = nfs_readdir_page_array_alloc(entry->prev_cookie,
- GFP_KERNEL);
+ new = nfs_readdir_folio_array_alloc(cookie, GFP_KERNEL);
if (!new)
break;
arrays++;
- *arrays = page = new;
+ *arrays = folio = new;
} else {
- new = nfs_readdir_page_get_next(mapping,
- page->index + 1,
- entry->prev_cookie);
+ new = nfs_readdir_folio_get_next(mapping, cookie,
+ change_attr);
if (!new)
break;
- if (page != *arrays)
- nfs_readdir_page_unlock_and_put(page);
- page = new;
+ if (folio != *arrays)
+ nfs_readdir_folio_unlock_and_put(folio);
+ folio = new;
}
- status = nfs_readdir_add_to_array(entry, page);
+ desc->folio_index_max++;
+ status = nfs_readdir_folio_array_append(folio, entry, &cookie);
} while (!status && !entry->eof);
switch (status) {
case -EBADCOOKIE:
- if (entry->eof) {
- nfs_readdir_page_set_eof(page);
- status = 0;
- }
- break;
- case -ENOSPC:
+ if (!entry->eof)
+ break;
+ nfs_readdir_folio_set_eof(folio);
+ fallthrough;
case -EAGAIN:
status = 0;
break;
+ case -ENOSPC:
+ status = 0;
+ if (!desc->plus)
+ break;
+ while (!nfs_readdir_entry_decode(desc, entry, &stream))
+ ;
}
- if (page != *arrays)
- nfs_readdir_page_unlock_and_put(page);
+ if (folio != *arrays)
+ nfs_readdir_folio_unlock_and_put(folio);
- put_page(scratch);
+ folio_put(scratch);
return status;
}
@@ -823,20 +929,22 @@ out_freepages:
static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
__be32 *verf_arg, __be32 *verf_res,
- struct page **arrays, size_t narrays)
+ struct folio **arrays, size_t narrays)
{
+ u64 change_attr;
struct page **pages;
- struct page *page = *arrays;
+ struct folio *folio = *arrays;
struct nfs_entry *entry;
size_t array_size;
struct inode *inode = file_inode(desc->file);
- size_t dtsize = NFS_SERVER(inode)->dtsize;
+ unsigned int dtsize = desc->dtsize;
+ unsigned int pglen;
int status = -ENOMEM;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- entry->cookie = nfs_readdir_page_last_cookie(page);
+ entry->cookie = nfs_readdir_folio_last_cookie(folio);
entry->fh = nfs_alloc_fhandle();
entry->fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
entry->server = NFS_SERVER(inode);
@@ -848,26 +956,21 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
if (!pages)
goto out;
- do {
- unsigned int pglen;
- status = nfs_readdir_xdr_filler(desc, verf_arg, entry->cookie,
- pages, dtsize,
- verf_res);
- if (status < 0)
- break;
-
- pglen = status;
- if (pglen == 0) {
- nfs_readdir_page_set_eof(page);
- break;
- }
+ change_attr = inode_peek_iversion_raw(inode);
+ status = nfs_readdir_xdr_filler(desc, verf_arg, entry->cookie, pages,
+ dtsize, verf_res);
+ if (status < 0)
+ goto free_pages;
- verf_arg = verf_res;
-
- status = nfs_readdir_page_filler(desc, entry, pages, pglen,
- arrays, narrays);
- } while (!status && nfs_readdir_page_needs_filling(page));
+ pglen = status;
+ if (pglen != 0)
+ status = nfs_readdir_folio_filler(desc, entry, pages, pglen,
+ arrays, narrays, change_attr);
+ else
+ nfs_readdir_folio_set_eof(folio);
+ desc->buffer_fills++;
+free_pages:
nfs_readdir_free_pages(pages, array_size);
out:
nfs_free_fattr(entry->fattr);
@@ -876,25 +979,33 @@ out:
return status;
}
-static void nfs_readdir_page_put(struct nfs_readdir_descriptor *desc)
+static void nfs_readdir_folio_put(struct nfs_readdir_descriptor *desc)
{
- put_page(desc->page);
- desc->page = NULL;
+ folio_put(desc->folio);
+ desc->folio = NULL;
}
static void
-nfs_readdir_page_unlock_and_put_cached(struct nfs_readdir_descriptor *desc)
+nfs_readdir_folio_unlock_and_put_cached(struct nfs_readdir_descriptor *desc)
{
- unlock_page(desc->page);
- nfs_readdir_page_put(desc);
+ folio_unlock(desc->folio);
+ nfs_readdir_folio_put(desc);
}
-static struct page *
-nfs_readdir_page_get_cached(struct nfs_readdir_descriptor *desc)
+static struct folio *
+nfs_readdir_folio_get_cached(struct nfs_readdir_descriptor *desc)
{
- return nfs_readdir_page_get_locked(desc->file->f_mapping,
- desc->page_index,
- desc->last_cookie);
+ struct address_space *mapping = desc->file->f_mapping;
+ u64 change_attr = inode_peek_iversion_raw(mapping->host);
+ u64 cookie = desc->last_cookie;
+ struct folio *folio;
+
+ folio = nfs_readdir_folio_get_locked(mapping, cookie, change_attr);
+ if (!folio)
+ return NULL;
+ if (desc->clear_cache && !nfs_readdir_folio_needs_filling(folio))
+ nfs_readdir_folio_reinit_array(folio, cookie, change_attr);
+ return folio;
}
/*
@@ -908,17 +1019,27 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc)
__be32 verf[NFS_DIR_VERIFIER_SIZE];
int res;
- desc->page = nfs_readdir_page_get_cached(desc);
- if (!desc->page)
+ desc->folio = nfs_readdir_folio_get_cached(desc);
+ if (!desc->folio)
return -ENOMEM;
- if (nfs_readdir_page_needs_filling(desc->page)) {
+ if (nfs_readdir_folio_needs_filling(desc->folio)) {
+ /* Grow the dtsize if we had to go back for more pages */
+ if (desc->folio_index == desc->folio_index_max)
+ nfs_grow_dtsize(desc);
+ desc->folio_index_max = desc->folio_index;
+ trace_nfs_readdir_cache_fill(desc->file, nfsi->cookieverf,
+ desc->last_cookie,
+ desc->folio->index, desc->dtsize);
res = nfs_readdir_xdr_to_array(desc, nfsi->cookieverf, verf,
- &desc->page, 1);
+ &desc->folio, 1);
if (res < 0) {
- nfs_readdir_page_unlock_and_put_cached(desc);
+ nfs_readdir_folio_unlock_and_put_cached(desc);
+ trace_nfs_readdir_cache_fill_done(inode, res);
if (res == -EBADCOOKIE || res == -ENOTSYNC) {
invalidate_inode_pages2(desc->file->f_mapping);
- desc->page_index = 0;
+ nfs_readdir_rewind_search(desc);
+ trace_nfs_readdir_invalidate_cache_range(
+ inode, 0, MAX_LFS_FILESIZE);
return -EAGAIN;
}
return res;
@@ -926,50 +1047,37 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc)
/*
* Set the cookie verifier if the page cache was empty
*/
- if (desc->page_index == 0)
+ if (desc->last_cookie == 0 &&
+ memcmp(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf))) {
memcpy(nfsi->cookieverf, verf,
sizeof(nfsi->cookieverf));
+ invalidate_inode_pages2_range(desc->file->f_mapping, 1,
+ -1);
+ trace_nfs_readdir_invalidate_cache_range(
+ inode, 1, MAX_LFS_FILESIZE);
+ }
+ desc->clear_cache = false;
}
res = nfs_readdir_search_array(desc);
if (res == 0)
return 0;
- nfs_readdir_page_unlock_and_put_cached(desc);
+ nfs_readdir_folio_unlock_and_put_cached(desc);
return res;
}
-static bool nfs_readdir_dont_search_cache(struct nfs_readdir_descriptor *desc)
-{
- struct address_space *mapping = desc->file->f_mapping;
- struct inode *dir = file_inode(desc->file);
- unsigned int dtsize = NFS_SERVER(dir)->dtsize;
- loff_t size = i_size_read(dir);
-
- /*
- * Default to uncached readdir if the page cache is empty, and
- * we're looking for a non-zero cookie in a large directory.
- */
- return desc->dir_cookie != 0 && mapping->nrpages == 0 && size > dtsize;
-}
-
/* Search for desc->dir_cookie from the beginning of the page cache */
static int readdir_search_pagecache(struct nfs_readdir_descriptor *desc)
{
int res;
- if (nfs_readdir_dont_search_cache(desc))
- return -EBADCOOKIE;
-
do {
- if (desc->page_index == 0) {
- desc->current_index = 0;
- desc->prev_index = 0;
- desc->last_cookie = 0;
- }
res = find_and_lock_cache_page(desc);
} while (res == -EAGAIN);
return res;
}
+#define NFS_READDIR_CACHE_MISS_THRESHOLD (16UL)
+
/*
* Once we've found the start of the dirent within a page: fill 'er up...
*/
@@ -978,34 +1086,47 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
{
struct file *file = desc->file;
struct nfs_cache_array *array;
- unsigned int i = 0;
+ unsigned int i;
+ bool first_emit = !desc->dir_cookie;
- array = kmap(desc->page);
+ array = kmap_local_folio(desc->folio, 0);
for (i = desc->cache_entry_index; i < array->size; i++) {
struct nfs_cache_array_entry *ent;
+ /*
+ * nfs_readdir_handle_cache_misses return force clear at
+ * (cache_misses > NFS_READDIR_CACHE_MISS_THRESHOLD) for
+ * readdir heuristic, NFS_READDIR_CACHE_MISS_THRESHOLD + 1
+ * entries need be emitted here.
+ */
+ if (first_emit && i > NFS_READDIR_CACHE_MISS_THRESHOLD + 2) {
+ desc->eob = true;
+ break;
+ }
+
ent = &array->array[i];
if (!dir_emit(desc->ctx, ent->name, ent->name_len,
nfs_compat_user_ino64(ent->ino), ent->d_type)) {
- desc->eof = true;
+ desc->eob = true;
break;
}
memcpy(desc->verf, verf, sizeof(desc->verf));
- if (i < (array->size-1))
- desc->dir_cookie = array->array[i+1].cookie;
- else
+ if (i == array->size - 1) {
desc->dir_cookie = array->last_cookie;
+ nfs_readdir_seek_next_array(array, desc);
+ } else {
+ desc->dir_cookie = array->array[i + 1].cookie;
+ desc->last_cookie = array->array[0].cookie;
+ }
if (nfs_readdir_use_cookie(file))
desc->ctx->pos = desc->dir_cookie;
else
desc->ctx->pos++;
- if (desc->duped != 0)
- desc->duped = 1;
}
- if (array->page_is_eof)
- desc->eof = true;
+ if (array->folio_is_eof)
+ desc->eof = !desc->eob;
- kunmap(desc->page);
+ kunmap_local(array);
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %llu\n",
(unsigned long long)desc->dir_cookie);
}
@@ -1024,7 +1145,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
*/
static int uncached_readdir(struct nfs_readdir_descriptor *desc)
{
- struct page **arrays;
+ struct folio **arrays;
size_t i, sz = 512;
__be32 verf[NFS_DIR_VERIFIER_SIZE];
int status = -ENOMEM;
@@ -1035,31 +1156,66 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
arrays = kcalloc(sz, sizeof(*arrays), GFP_KERNEL);
if (!arrays)
goto out;
- arrays[0] = nfs_readdir_page_array_alloc(desc->dir_cookie, GFP_KERNEL);
+ arrays[0] = nfs_readdir_folio_array_alloc(desc->dir_cookie, GFP_KERNEL);
if (!arrays[0])
goto out;
- desc->page_index = 0;
+ desc->folio_index = 0;
+ desc->cache_entry_index = 0;
desc->last_cookie = desc->dir_cookie;
- desc->duped = 0;
+ desc->folio_index_max = 0;
+
+ trace_nfs_readdir_uncached(desc->file, desc->verf, desc->last_cookie,
+ -1, desc->dtsize);
status = nfs_readdir_xdr_to_array(desc, desc->verf, verf, arrays, sz);
+ if (status < 0) {
+ trace_nfs_readdir_uncached_done(file_inode(desc->file), status);
+ goto out_free;
+ }
- for (i = 0; !desc->eof && i < sz && arrays[i]; i++) {
- desc->page = arrays[i];
+ for (i = 0; !desc->eob && i < sz && arrays[i]; i++) {
+ desc->folio = arrays[i];
nfs_do_filldir(desc, verf);
}
- desc->page = NULL;
-
+ desc->folio = NULL;
+ /*
+ * Grow the dtsize if we have to go back for more pages,
+ * or shrink it if we're reading too many.
+ */
+ if (!desc->eof) {
+ if (!desc->eob)
+ nfs_grow_dtsize(desc);
+ else if (desc->buffer_fills == 1 &&
+ i < (desc->folio_index_max >> 1))
+ nfs_shrink_dtsize(desc);
+ }
+out_free:
for (i = 0; i < sz && arrays[i]; i++)
- nfs_readdir_page_array_free(arrays[i]);
+ nfs_readdir_folio_array_free(arrays[i]);
out:
+ if (!nfs_readdir_use_cookie(desc->file))
+ nfs_readdir_rewind_search(desc);
+ desc->folio_index_max = -1;
kfree(arrays);
dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, status);
return status;
}
+static bool nfs_readdir_handle_cache_misses(struct inode *inode,
+ struct nfs_readdir_descriptor *desc,
+ unsigned int cache_misses,
+ bool force_clear)
+{
+ if (desc->ctx->pos == 0 || !desc->plus)
+ return false;
+ if (cache_misses <= NFS_READDIR_CACHE_MISS_THRESHOLD && !force_clear)
+ return false;
+ trace_nfs_readdir_force_readdirplus(inode);
+ return true;
+}
+
/* The file offset position represents the dirent entry number. A
last cookie cache takes care of the common case of reading the
whole directory.
@@ -1071,7 +1227,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_open_dir_context *dir_ctx = file->private_data;
struct nfs_readdir_descriptor *desc;
- pgoff_t page_index;
+ unsigned int cache_hits, cache_misses;
+ bool force_clear;
int res;
dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
@@ -1084,11 +1241,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
* to either find the entry with the appropriate number or
* revalidate the cookie.
*/
- if (ctx->pos == 0 || nfs_attribute_cache_expired(inode)) {
- res = nfs_revalidate_mapping(inode, file->f_mapping);
- if (res < 0)
- goto out;
- }
+ nfs_revalidate_mapping(inode, file->f_mapping);
res = -ENOMEM;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
@@ -1096,20 +1249,30 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
goto out;
desc->file = file;
desc->ctx = ctx;
- desc->plus = nfs_use_readdirplus(inode, ctx);
+ desc->folio_index_max = -1;
spin_lock(&file->f_lock);
desc->dir_cookie = dir_ctx->dir_cookie;
- desc->dup_cookie = dir_ctx->dup_cookie;
- desc->duped = dir_ctx->duped;
- page_index = dir_ctx->page_index;
+ desc->folio_index = dir_ctx->page_index;
+ desc->last_cookie = dir_ctx->last_cookie;
desc->attr_gencount = dir_ctx->attr_gencount;
+ desc->eof = dir_ctx->eof;
+ nfs_set_dtsize(desc, dir_ctx->dtsize);
memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf));
+ cache_hits = atomic_xchg(&dir_ctx->cache_hits, 0);
+ cache_misses = atomic_xchg(&dir_ctx->cache_misses, 0);
+ force_clear = dir_ctx->force_clear;
spin_unlock(&file->f_lock);
- if (test_and_clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags) &&
- list_is_singular(&nfsi->open_files))
- invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1);
+ if (desc->eof) {
+ res = 0;
+ goto out_free;
+ }
+
+ desc->plus = nfs_use_readdirplus(inode, ctx, cache_hits, cache_misses);
+ force_clear = nfs_readdir_handle_cache_misses(inode, desc, cache_misses,
+ force_clear);
+ desc->clear_cache = force_clear;
do {
res = readdir_search_pagecache(desc);
@@ -1128,9 +1291,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
break;
}
if (res == -ETOOSMALL && desc->plus) {
- clear_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
nfs_zap_caches(inode);
- desc->page_index = 0;
desc->plus = false;
desc->eof = false;
continue;
@@ -1139,18 +1300,22 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
break;
nfs_do_filldir(desc, nfsi->cookieverf);
- nfs_readdir_page_unlock_and_put_cached(desc);
- } while (!desc->eof);
+ nfs_readdir_folio_unlock_and_put_cached(desc);
+ if (desc->folio_index == desc->folio_index_max)
+ desc->clear_cache = force_clear;
+ } while (!desc->eob && !desc->eof);
spin_lock(&file->f_lock);
dir_ctx->dir_cookie = desc->dir_cookie;
- dir_ctx->dup_cookie = desc->dup_cookie;
- dir_ctx->duped = desc->duped;
+ dir_ctx->last_cookie = desc->last_cookie;
dir_ctx->attr_gencount = desc->attr_gencount;
- dir_ctx->page_index = desc->page_index;
+ dir_ctx->page_index = desc->folio_index;
+ dir_ctx->force_clear = force_clear;
+ dir_ctx->eof = desc->eof;
+ dir_ctx->dtsize = desc->dtsize;
memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf));
spin_unlock(&file->f_lock);
-
+out_free:
kfree(desc);
out:
@@ -1185,13 +1350,15 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
}
if (offset != filp->f_pos) {
filp->f_pos = offset;
- if (nfs_readdir_use_cookie(filp))
- dir_ctx->dir_cookie = offset;
- else
+ dir_ctx->page_index = 0;
+ if (!nfs_readdir_use_cookie(filp)) {
dir_ctx->dir_cookie = 0;
- if (offset == 0)
- memset(dir_ctx->verf, 0, sizeof(dir_ctx->verf));
- dir_ctx->duped = 0;
+ dir_ctx->last_cookie = 0;
+ } else {
+ dir_ctx->dir_cookie = offset;
+ dir_ctx->last_cookie = offset;
+ }
+ dir_ctx->eof = false;
}
spin_unlock(&filp->f_lock);
return offset;
@@ -1268,11 +1435,11 @@ static bool nfs_verifier_is_delegated(struct dentry *dentry)
static void nfs_set_verifier_locked(struct dentry *dentry, unsigned long verf)
{
struct inode *inode = d_inode(dentry);
- struct inode *dir = d_inode(dentry->d_parent);
+ struct inode *dir = d_inode_rcu(dentry->d_parent);
- if (!nfs_verify_change_attribute(dir, verf))
+ if (!dir || !nfs_verify_change_attribute(dir, verf))
return;
- if (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+ if (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ, 0))
nfs_set_verifier_delegated(&verf);
dentry->d_time = verf;
}
@@ -1324,6 +1491,14 @@ void nfs_clear_verifier_delegated(struct inode *inode)
EXPORT_SYMBOL_GPL(nfs_clear_verifier_delegated);
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
+static int nfs_dentry_verify_change(struct inode *dir, struct dentry *dentry)
+{
+ if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE) &&
+ d_really_is_negative(dentry))
+ return dentry->d_time == inode_peek_iversion_raw(dir);
+ return nfs_verify_change_attribute(dir, dentry->d_time);
+}
+
/*
* A check for whether or not the parent directory has changed.
* In the case it has, we assume that the dentries are untrustworthy
@@ -1337,7 +1512,7 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
return 1;
if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
return 0;
- if (!nfs_verify_change_attribute(dir, dentry->d_time))
+ if (!nfs_dentry_verify_change(dir, dentry))
return 0;
/* Revalidate nfsi->cache_change_attribute before we declare a match */
if (nfs_mapping_need_revalidate_inode(dir)) {
@@ -1346,7 +1521,7 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
if (__nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0)
return 0;
}
- if (!nfs_verify_change_attribute(dir, dentry->d_time))
+ if (!nfs_dentry_verify_change(dir, dentry))
return 0;
return 1;
}
@@ -1359,7 +1534,8 @@ static int nfs_is_exclusive_create(struct inode *dir, unsigned int flags)
{
if (NFS_PROTO(dir)->version == 2)
return 0;
- return flags & LOOKUP_EXCL;
+ return (flags & (LOOKUP_CREATE | LOOKUP_EXCL)) ==
+ (LOOKUP_CREATE | LOOKUP_EXCL);
}
/*
@@ -1398,7 +1574,12 @@ int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags)
if (flags & LOOKUP_REVAL)
goto out_force;
out:
- return (inode->i_nlink == 0) ? -ESTALE : 0;
+ if (inode->i_nlink > 0 ||
+ (inode->i_nlink == 0 &&
+ test_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(inode)->flags)))
+ return 0;
+ else
+ return -ESTALE;
out_force:
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -1436,6 +1617,9 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
return 0;
if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG)
return 1;
+ /* Case insensitive server? Revalidate negative dentries */
+ if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
+ return 1;
return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU);
}
@@ -1445,10 +1629,17 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
{
switch (error) {
case 1:
- dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
- __func__, dentry);
- return 1;
- case 0:
+ break;
+ case -ETIMEDOUT:
+ if (inode && (IS_ROOT(dentry) ||
+ NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL))
+ error = 1;
+ break;
+ case -ESTALE:
+ case -ENOENT:
+ error = 0;
+ fallthrough;
+ default:
/*
* We can't d_drop the root of a disconnected tree:
* its d_hash is on the s_anon list and d_drop() would hide
@@ -1456,13 +1647,10 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
* inodes on unmount and further oopses.
*/
if (inode && IS_ROOT(dentry))
- return 1;
- dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
- __func__, dentry);
- return 0;
+ error = 1;
+ break;
}
- dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
- __func__, dentry, error);
+ trace_nfs_lookup_revalidate_exit(dir, dentry, 0, error);
return error;
}
@@ -1487,15 +1675,17 @@ nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry,
return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
}
-static int
-nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
- struct inode *inode)
+static int nfs_lookup_revalidate_dentry(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry,
+ struct inode *inode, unsigned int flags)
{
struct nfs_fh *fhandle;
struct nfs_fattr *fattr;
unsigned long dir_verifier;
int ret;
+ trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
+
ret = -ENOMEM;
fhandle = nfs_alloc_fhandle();
fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
@@ -1503,19 +1693,13 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
goto out;
dir_verifier = nfs_save_change_attribute(dir);
- ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
- if (ret < 0) {
- switch (ret) {
- case -ESTALE:
- case -ENOENT:
- ret = 0;
- break;
- case -ETIMEDOUT:
- if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
- ret = 1;
- }
+ ret = NFS_PROTO(dir)->lookup(dir, dentry, name, fhandle, fattr);
+ if (ret < 0)
goto out;
- }
+
+ /* Request help from readdirplus */
+ nfs_lookup_advise_force_readdirplus(dir, flags);
+
ret = 0;
if (nfs_compare_fh(NFS_FH(inode), fhandle))
goto out;
@@ -1525,8 +1709,6 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
nfs_setsecurity(inode, fattr);
nfs_set_verifier(dentry, dir_verifier);
- /* set a readdirplus hint that we had a cache miss */
- nfs_force_use_readdirplus(dir);
ret = 1;
out:
nfs_free_fattr(fattr);
@@ -1536,7 +1718,7 @@ out:
* If the lookup failed despite the dentry change attribute being
* a match, then we should revalidate the directory cache.
*/
- if (!ret && nfs_verify_change_attribute(dir, dentry->d_time))
+ if (!ret && nfs_dentry_verify_change(dir, dentry))
nfs_mark_dir_for_revalidate(dir);
return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
}
@@ -1553,11 +1735,11 @@ out:
* cached dentry and do a new lookup.
*/
static int
-nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
+nfs_do_lookup_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
- int error;
+ int error = 0;
nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
inode = d_inode(dentry);
@@ -1571,6 +1753,10 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
goto out_bad;
}
+ if ((flags & LOOKUP_RENAME_TARGET) && d_count(dentry) < 2 &&
+ nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
+ goto out_bad;
+
if (nfs_verifier_is_delegated(dentry))
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
@@ -1583,7 +1769,6 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
nfs_mark_dir_for_revalidate(dir);
goto out_bad;
}
- nfs_advise_use_readdirplus(dir);
goto out_valid;
}
@@ -1593,45 +1778,57 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
if (NFS_STALE(inode))
goto out_bad;
- trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
- error = nfs_lookup_revalidate_dentry(dir, dentry, inode);
- trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
- return error;
+ return nfs_lookup_revalidate_dentry(dir, name, dentry, inode, flags);
out_valid:
return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
out_bad:
if (flags & LOOKUP_RCU)
return -ECHILD;
- return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
+ return nfs_lookup_revalidate_done(dir, dentry, inode, error);
}
static int
-__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
- int (*reval)(struct inode *, struct dentry *, unsigned int))
+__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
{
- struct dentry *parent;
- struct inode *dir;
- int ret;
-
if (flags & LOOKUP_RCU) {
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- ret = reval(dir, dentry, flags);
- if (parent != READ_ONCE(dentry->d_parent))
+ if (dentry->d_fsdata == NFS_FSDATA_BLOCKED)
return -ECHILD;
} else {
- parent = dget_parent(dentry);
- ret = reval(d_inode(parent), dentry, flags);
- dput(parent);
+ /* Wait for unlink to complete - see unblock_revalidate() */
+ wait_var_event(&dentry->d_fsdata,
+ smp_load_acquire(&dentry->d_fsdata)
+ != NFS_FSDATA_BLOCKED);
}
- return ret;
+ return 0;
+}
+
+static int nfs_lookup_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
+{
+ if (__nfs_lookup_revalidate(dentry, flags))
+ return -ECHILD;
+ return nfs_do_lookup_revalidate(dir, name, dentry, flags);
+}
+
+static void block_revalidate(struct dentry *dentry)
+{
+ /* old devname - just in case */
+ kfree(dentry->d_fsdata);
+
+ /* Any new reference that could lead to an open
+ * will take ->d_lock in lookup_open() -> d_lookup().
+ * Holding this lock ensures we cannot race with
+ * __nfs_lookup_revalidate() and removes and need
+ * for further barriers.
+ */
+ lockdep_assert_held(&dentry->d_lock);
+
+ dentry->d_fsdata = NFS_FSDATA_BLOCKED;
}
-static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static void unblock_revalidate(struct dentry *dentry)
{
- return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
+ store_release_wake_up(&dentry->d_fsdata, NULL);
}
/*
@@ -1774,9 +1971,13 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
dir_verifier = nfs_save_change_attribute(dir);
trace_nfs_lookup_enter(dir, dentry, flags);
- error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
- if (error == -ENOENT)
+ error = NFS_PROTO(dir)->lookup(dir, dentry, &dentry->d_name,
+ fhandle, fattr);
+ if (error == -ENOENT) {
+ if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
+ dir_verifier = inode_peek_iversion_raw(dir);
goto no_entry;
+ }
if (error < 0) {
res = ERR_PTR(error);
goto out;
@@ -1787,7 +1988,7 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
goto out;
/* Notify readdir to use READDIRPLUS */
- nfs_force_use_readdirplus(dir);
+ nfs_lookup_advise_force_readdirplus(dir, flags);
no_entry:
res = d_splice_alias(inode, dentry);
@@ -1805,8 +2006,17 @@ out:
}
EXPORT_SYMBOL_GPL(nfs_lookup);
+void nfs_d_prune_case_insensitive_aliases(struct inode *inode)
+{
+ /* Case insensitive server? Revalidate dentries */
+ if (inode && nfs_server_capable(inode, NFS_CAP_CASE_INSENSITIVE))
+ d_prune_aliases(inode);
+}
+EXPORT_SYMBOL_GPL(nfs_d_prune_case_insensitive_aliases);
+
#if IS_ENABLED(CONFIG_NFS_V4)
-static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
+static int nfs4_lookup_revalidate(struct inode *, const struct qstr *,
+ struct dentry *, unsigned int);
const struct dentry_operations nfs4_dentry_operations = {
.d_revalidate = nfs4_lookup_revalidate,
@@ -1818,16 +2028,6 @@ const struct dentry_operations nfs4_dentry_operations = {
};
EXPORT_SYMBOL_GPL(nfs4_dentry_operations);
-static fmode_t flags_to_mode(int flags)
-{
- fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
- if ((flags & O_ACCMODE) != O_WRONLY)
- res |= FMODE_READ;
- if ((flags & O_ACCMODE) != O_RDONLY)
- res |= FMODE_WRITE;
- return res;
-}
-
static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp)
{
return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp);
@@ -1848,7 +2048,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
err = finish_open(file, dentry, do_open);
if (err)
goto out;
- if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
+ if (S_ISREG(file_inode(file)->i_mode))
nfs_file_set_open_context(file, ctx);
else
err = -EOPENSTALE;
@@ -1866,6 +2066,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct iattr attr = { .ia_valid = ATTR_OPEN };
struct inode *inode;
unsigned int lookup_flags = 0;
+ unsigned long dir_verifier;
bool switched = false;
int created = 0;
int err;
@@ -1939,7 +2140,11 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
switch (err) {
case -ENOENT:
d_splice_alias(NULL, dentry);
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
+ dir_verifier = inode_peek_iversion_raw(dir);
+ else
+ dir_verifier = nfs_save_change_attribute(dir);
+ nfs_set_verifier(dentry, dir_verifier);
break;
case -EISDIR:
case -ENOTDIR:
@@ -1954,6 +2159,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
}
goto out;
}
+ file->f_mode |= FMODE_CAN_ODIRECT;
err = nfs_finish_open(ctx, ctx->dentry, file, open_flags);
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
@@ -1967,6 +2173,24 @@ out:
no_open:
res = nfs_lookup(dir, dentry, lookup_flags);
+ if (!res) {
+ inode = d_inode(dentry);
+ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
+ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
+ res = ERR_PTR(-ENOTDIR);
+ else if (inode && S_ISREG(inode->i_mode))
+ res = ERR_PTR(-EOPENSTALE);
+ } else if (!IS_ERR(res)) {
+ inode = d_inode(res);
+ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
+ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) {
+ dput(res);
+ res = ERR_PTR(-ENOTDIR);
+ } else if (inode && S_ISREG(inode->i_mode)) {
+ dput(res);
+ res = ERR_PTR(-EOPENSTALE);
+ }
+ }
if (switched) {
d_lookup_done(dentry);
if (!res)
@@ -1974,18 +2198,21 @@ no_open:
else
dput(dentry);
}
- if (IS_ERR(res))
- return PTR_ERR(res);
return finish_no_open(file, res);
}
EXPORT_SYMBOL_GPL(nfs_atomic_open);
static int
-nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
+nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
+ if (__nfs_lookup_revalidate(dentry, flags))
+ return -ECHILD;
+
+ trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
+
if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
goto full_reval;
if (d_mountpoint(dentry))
@@ -2019,19 +2246,46 @@ nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
reval_dentry:
if (flags & LOOKUP_RCU)
return -ECHILD;
- return nfs_lookup_revalidate_dentry(dir, dentry, inode);
+ return nfs_lookup_revalidate_dentry(dir, name, dentry, inode, flags);
full_reval:
- return nfs_do_lookup_revalidate(dir, dentry, flags);
+ return nfs_do_lookup_revalidate(dir, name, dentry, flags);
}
-static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+#endif /* CONFIG_NFSV4 */
+
+int nfs_atomic_open_v23(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned int open_flags,
+ umode_t mode)
{
- return __nfs_lookup_revalidate(dentry, flags,
- nfs4_do_lookup_revalidate);
-}
+ struct dentry *res = NULL;
+ /* Same as look+open from lookup_open(), but with different O_TRUNC
+ * handling.
+ */
+ int error = 0;
-#endif /* CONFIG_NFSV4 */
+ if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
+ return -ENAMETOOLONG;
+
+ if (open_flags & O_CREAT) {
+ error = nfs_do_create(dir, dentry, mode, open_flags);
+ if (!error) {
+ file->f_mode |= FMODE_CREATED;
+ return finish_open(file, dentry, NULL);
+ } else if (error != -EEXIST || open_flags & O_EXCL)
+ return error;
+ }
+ if (d_in_lookup(dentry)) {
+ /* The only flags nfs_lookup considers are
+ * LOOKUP_EXCL and LOOKUP_RENAME_TARGET, and
+ * we want those to be zero so the lookup isn't skipped.
+ */
+ res = nfs_lookup(dir, dentry, 0);
+ }
+ return finish_no_open(file, res);
+
+}
+EXPORT_SYMBOL_GPL(nfs_atomic_open_v23);
struct dentry *
nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
@@ -2046,7 +2300,8 @@ nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
d_drop(dentry);
if (fhandle->size == 0) {
- error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(dir, dentry, &dentry->d_name,
+ fhandle, fattr);
if (error)
goto out_error;
}
@@ -2093,18 +2348,23 @@ EXPORT_SYMBOL_GPL(nfs_instantiate);
* that the operation succeeded on the server, but an error in the
* reply path made it appear to have failed.
*/
-int nfs_create(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode, bool excl)
+static int nfs_do_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, int open_flags)
{
struct iattr attr;
- int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT;
int error;
+ open_flags |= O_CREAT;
+
dfprintk(VFS, "NFS: create(%s/%lu), %pd\n",
dir->i_sb->s_id, dir->i_ino, dentry);
attr.ia_mode = mode;
attr.ia_valid = ATTR_MODE;
+ if (open_flags & O_TRUNC) {
+ attr.ia_size = 0;
+ attr.ia_valid |= ATTR_SIZE;
+ }
trace_nfs_create_enter(dir, dentry, open_flags);
error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags);
@@ -2116,13 +2376,19 @@ out_err:
d_drop(dentry);
return error;
}
+
+int nfs_create(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
+{
+ return nfs_do_create(dir, dentry, mode, excl ? O_EXCL : 0);
+}
EXPORT_SYMBOL_GPL(nfs_create);
/*
* See comments for nfs_proc_create regarding failed operations.
*/
int
-nfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+nfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct iattr attr;
@@ -2149,11 +2415,11 @@ EXPORT_SYMBOL_GPL(nfs_mknod);
/*
* See comments for nfs_proc_create regarding failed operations.
*/
-int nfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+struct dentry *nfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct iattr attr;
- int error;
+ struct dentry *ret;
dfprintk(VFS, "NFS: mkdir(%s/%lu), %pd\n",
dir->i_sb->s_id, dir->i_ino, dentry);
@@ -2162,14 +2428,9 @@ int nfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
attr.ia_mode = mode | S_IFDIR;
trace_nfs_mkdir_enter(dir, dentry);
- error = NFS_PROTO(dir)->mkdir(dir, dentry, &attr);
- trace_nfs_mkdir_exit(dir, dentry, error);
- if (error != 0)
- goto out_err;
- return 0;
-out_err:
- d_drop(dentry);
- return error;
+ ret = NFS_PROTO(dir)->mkdir(dir, dentry, &attr);
+ trace_nfs_mkdir_exit(dir, dentry, PTR_ERR_OR_ZERO(ret));
+ return ret;
}
EXPORT_SYMBOL_GPL(nfs_mkdir);
@@ -2184,9 +2445,12 @@ static void nfs_dentry_remove_handle_error(struct inode *dir,
{
switch (error) {
case -ENOENT:
- d_delete(dentry);
- fallthrough;
+ if (d_really_is_positive(dentry))
+ d_delete(dentry);
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ break;
case 0:
+ nfs_d_prune_case_insensitive_aliases(d_inode(dentry));
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
}
}
@@ -2263,29 +2527,37 @@ out:
int nfs_unlink(struct inode *dir, struct dentry *dentry)
{
int error;
- int need_rehash = 0;
dfprintk(VFS, "NFS: unlink(%s/%lu, %pd)\n", dir->i_sb->s_id,
dir->i_ino, dentry);
trace_nfs_unlink_enter(dir, dentry);
spin_lock(&dentry->d_lock);
- if (d_count(dentry) > 1) {
+ if (d_count(dentry) > 1 && !test_bit(NFS_INO_PRESERVE_UNLINKED,
+ &NFS_I(d_inode(dentry))->flags)) {
spin_unlock(&dentry->d_lock);
/* Start asynchronous writeout of the inode */
write_inode_now(d_inode(dentry), 0);
error = nfs_sillyrename(dir, dentry);
goto out;
}
- if (!d_unhashed(dentry)) {
- __d_drop(dentry);
- need_rehash = 1;
+ /* We must prevent any concurrent open until the unlink
+ * completes. ->d_revalidate will wait for ->d_fsdata
+ * to clear. We set it here to ensure no lookup succeeds until
+ * the unlink is complete on the server.
+ */
+ error = -ETXTBSY;
+ if (WARN_ON(dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
+ WARN_ON(dentry->d_fsdata == NFS_FSDATA_BLOCKED)) {
+ spin_unlock(&dentry->d_lock);
+ goto out;
}
+ block_revalidate(dentry);
+
spin_unlock(&dentry->d_lock);
error = nfs_safe_remove(dentry);
nfs_dentry_remove_handle_error(dir, dentry, error);
- if (need_rehash)
- d_rehash(dentry);
+ unblock_revalidate(dentry);
out:
trace_nfs_unlink_exit(dir, dentry, error);
return error;
@@ -2307,10 +2579,10 @@ EXPORT_SYMBOL_GPL(nfs_unlink);
* now have a new file handle and can instantiate an in-core NFS inode
* and move the raw page into its mapping.
*/
-int nfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+int nfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
- struct page *page;
+ struct folio *folio;
char *kaddr;
struct iattr attr;
unsigned int pathlen = strlen(symname);
@@ -2325,24 +2597,24 @@ int nfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
attr.ia_mode = S_IFLNK | S_IRWXUGO;
attr.ia_valid = ATTR_MODE;
- page = alloc_page(GFP_USER);
- if (!page)
+ folio = folio_alloc(GFP_USER, 0);
+ if (!folio)
return -ENOMEM;
- kaddr = page_address(page);
+ kaddr = folio_address(folio);
memcpy(kaddr, symname, pathlen);
if (pathlen < PAGE_SIZE)
memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
trace_nfs_symlink_enter(dir, dentry);
- error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
+ error = NFS_PROTO(dir)->symlink(dir, dentry, folio, pathlen, &attr);
trace_nfs_symlink_exit(dir, dentry, error);
if (error != 0) {
dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s) error %d\n",
dir->i_sb->s_id, dir->i_ino,
dentry, symname, error);
d_drop(dentry);
- __free_page(page);
+ folio_put(folio);
return error;
}
@@ -2352,18 +2624,13 @@ int nfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
* No big deal if we can't add this page to the page cache here.
* READLINK will get the missing page from the server if needed.
*/
- if (!add_to_page_cache_lru(page, d_inode(dentry)->i_mapping, 0,
- GFP_KERNEL)) {
- SetPageUptodate(page);
- unlock_page(page);
- /*
- * add_to_page_cache_lru() grabs an extra page refcount.
- * Drop it here to avoid leaking this page later.
- */
- put_page(page);
- } else
- __free_page(page);
+ if (filemap_add_folio(d_inode(dentry)->i_mapping, folio, 0,
+ GFP_KERNEL) == 0) {
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
+ }
+ folio_put(folio);
return 0;
}
EXPORT_SYMBOL_GPL(nfs_symlink);
@@ -2379,6 +2646,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
trace_nfs_link_enter(inode, dir, dentry);
d_drop(dentry);
+ if (S_ISREG(inode->i_mode))
+ nfs_sync_inode(inode);
error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
if (error == 0) {
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
@@ -2390,6 +2659,26 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
}
EXPORT_SYMBOL_GPL(nfs_link);
+static void
+nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data)
+{
+ struct dentry *new_dentry = data->new_dentry;
+
+ unblock_revalidate(new_dentry);
+}
+
+static bool nfs_rename_is_unsafe_cross_dir(struct dentry *old_dentry,
+ struct dentry *new_dentry)
+{
+ struct nfs_server *server = NFS_SB(old_dentry->d_sb);
+
+ if (old_dentry->d_parent != new_dentry->d_parent)
+ return false;
+ if (server->fh_expire_type & NFS_FH_RENAME_UNSAFE)
+ return !(server->fh_expire_type & NFS_FH_NOEXPIRE_WITH_OPEN);
+ return true;
+}
+
/*
* RENAME
* FIXME: Some nfsds, like the Linux user space nfsd, may generate a
@@ -2414,14 +2703,15 @@ EXPORT_SYMBOL_GPL(nfs_link);
* If these conditions are met, we can drop the dentries before doing
* the rename.
*/
-int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
- struct dentry *dentry = NULL, *rehash = NULL;
+ struct dentry *dentry = NULL;
struct rpc_task *task;
+ bool must_unblock = false;
int error = -EBUSY;
if (flags)
@@ -2439,18 +2729,22 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
* the new target.
*/
if (new_inode && !S_ISDIR(new_inode->i_mode)) {
- /*
- * To prevent any new references to the target during the
- * rename, we unhash the dentry in advance.
+ /* We must prevent any concurrent open until the unlink
+ * completes. ->d_revalidate will wait for ->d_fsdata
+ * to clear. We set it here to ensure no lookup succeeds until
+ * the unlink is complete on the server.
*/
- if (!d_unhashed(new_dentry)) {
- d_drop(new_dentry);
- rehash = new_dentry;
- }
+ error = -ETXTBSY;
+ if (WARN_ON(new_dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
+ WARN_ON(new_dentry->d_fsdata == NFS_FSDATA_BLOCKED))
+ goto out;
+ spin_lock(&new_dentry->d_lock);
if (d_count(new_dentry) > 2) {
int err;
+ spin_unlock(&new_dentry->d_lock);
+
/* copy the target dentry's name */
dentry = d_alloc(new_dentry->d_parent,
&new_dentry->d_name);
@@ -2463,13 +2757,23 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
goto out;
new_dentry = dentry;
- rehash = NULL;
new_inode = NULL;
+ } else {
+ block_revalidate(new_dentry);
+ must_unblock = true;
+ spin_unlock(&new_dentry->d_lock);
}
+
}
- task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
+ if (S_ISREG(old_inode->i_mode) &&
+ nfs_rename_is_unsafe_cross_dir(old_dentry, new_dentry))
+ nfs_sync_inode(old_inode);
+ task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
+ must_unblock ? nfs_unblock_rename : NULL);
if (IS_ERR(task)) {
+ if (must_unblock)
+ unblock_revalidate(new_dentry);
error = PTR_ERR(task);
goto out;
}
@@ -2492,8 +2796,6 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
spin_unlock(&old_inode->i_lock);
}
out:
- if (rehash)
- d_rehash(rehash);
trace_nfs_rename_exit(old_dir, old_dentry,
new_dir, new_dentry, error);
if (!error) {
@@ -2528,7 +2830,7 @@ MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache lengt
static void nfs_access_free_entry(struct nfs_access_entry *entry)
{
- put_cred(entry->cred);
+ put_group_info(entry->group_info);
kfree_rcu(entry, rcu_head);
smp_mb__before_atomic();
atomic_long_dec(&nfs_access_nr_entries);
@@ -2654,6 +2956,43 @@ void nfs_access_zap_cache(struct inode *inode)
}
EXPORT_SYMBOL_GPL(nfs_access_zap_cache);
+static int access_cmp(const struct cred *a, const struct nfs_access_entry *b)
+{
+ struct group_info *ga, *gb;
+ int g;
+
+ if (uid_lt(a->fsuid, b->fsuid))
+ return -1;
+ if (uid_gt(a->fsuid, b->fsuid))
+ return 1;
+
+ if (gid_lt(a->fsgid, b->fsgid))
+ return -1;
+ if (gid_gt(a->fsgid, b->fsgid))
+ return 1;
+
+ ga = a->group_info;
+ gb = b->group_info;
+ if (ga == gb)
+ return 0;
+ if (ga == NULL)
+ return -1;
+ if (gb == NULL)
+ return 1;
+ if (ga->ngroups < gb->ngroups)
+ return -1;
+ if (ga->ngroups > gb->ngroups)
+ return 1;
+
+ for (g = 0; g < ga->ngroups; g++) {
+ if (gid_lt(ga->gid[g], gb->gid[g]))
+ return -1;
+ if (gid_gt(ga->gid[g], gb->gid[g]))
+ return 1;
+ }
+ return 0;
+}
+
static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, const struct cred *cred)
{
struct rb_node *n = NFS_I(inode)->access_cache.rb_node;
@@ -2661,7 +3000,7 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, co
while (n != NULL) {
struct nfs_access_entry *entry =
rb_entry(n, struct nfs_access_entry, rb_node);
- int cmp = cred_fscmp(cred, entry->cred);
+ int cmp = access_cmp(cred, entry);
if (cmp < 0)
n = n->rb_left;
@@ -2673,9 +3012,30 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, co
return NULL;
}
-static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block)
+static u64 nfs_access_login_time(const struct task_struct *task,
+ const struct cred *cred)
+{
+ const struct task_struct *parent;
+ const struct cred *pcred;
+ u64 ret;
+
+ rcu_read_lock();
+ for (;;) {
+ parent = rcu_dereference(task->real_parent);
+ pcred = __task_cred(parent);
+ if (parent == task || cred_fscmp(pcred, cred) != 0)
+ break;
+ task = parent;
+ }
+ ret = task->start_time;
+ rcu_read_unlock();
+ return ret;
+}
+
+static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, u32 *mask, bool may_block)
{
struct nfs_inode *nfsi = NFS_I(inode);
+ u64 login_time = nfs_access_login_time(current, cred);
struct nfs_access_entry *cache;
bool retry = true;
int err;
@@ -2703,8 +3063,10 @@ static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *
spin_lock(&inode->i_lock);
retry = false;
}
- res->cred = cache->cred;
- res->mask = cache->mask;
+ err = -ENOENT;
+ if ((s64)(login_time - cache->timestamp) > 0)
+ goto out;
+ *mask = cache->mask;
list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru);
err = 0;
out:
@@ -2716,12 +3078,13 @@ out_zap:
return -ENOENT;
}
-static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res)
+static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, u32 *mask)
{
/* Only check the most recently returned cache entry,
* but do it without locking.
*/
struct nfs_inode *nfsi = NFS_I(inode);
+ u64 login_time = nfs_access_login_time(current, cred);
struct nfs_access_entry *cache;
int err = -ECHILD;
struct list_head *lh;
@@ -2732,35 +3095,38 @@ static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cre
lh = rcu_dereference(list_tail_rcu(&nfsi->access_cache_entry_lru));
cache = list_entry(lh, struct nfs_access_entry, lru);
if (lh == &nfsi->access_cache_entry_lru ||
- cred_fscmp(cred, cache->cred) != 0)
+ access_cmp(cred, cache) != 0)
cache = NULL;
if (cache == NULL)
goto out;
+ if ((s64)(login_time - cache->timestamp) > 0)
+ goto out;
if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
goto out;
- res->cred = cache->cred;
- res->mask = cache->mask;
+ *mask = cache->mask;
err = 0;
out:
rcu_read_unlock();
return err;
}
-int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct
-nfs_access_entry *res, bool may_block)
+int nfs_access_get_cached(struct inode *inode, const struct cred *cred,
+ u32 *mask, bool may_block)
{
int status;
- status = nfs_access_get_cached_rcu(inode, cred, res);
+ status = nfs_access_get_cached_rcu(inode, cred, mask);
if (status != 0)
- status = nfs_access_get_cached_locked(inode, cred, res,
+ status = nfs_access_get_cached_locked(inode, cred, mask,
may_block);
return status;
}
EXPORT_SYMBOL_GPL(nfs_access_get_cached);
-static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set)
+static void nfs_access_add_rbtree(struct inode *inode,
+ struct nfs_access_entry *set,
+ const struct cred *cred)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct rb_root *root_node = &nfsi->access_cache;
@@ -2773,7 +3139,7 @@ static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *
while (*p != NULL) {
parent = *p;
entry = rb_entry(parent, struct nfs_access_entry, rb_node);
- cmp = cred_fscmp(set->cred, entry->cred);
+ cmp = access_cmp(cred, entry);
if (cmp < 0)
p = &parent->rb_left;
@@ -2795,21 +3161,25 @@ found:
nfs_access_free_entry(entry);
}
-void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
+void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set,
+ const struct cred *cred)
{
struct nfs_access_entry *cache = kmalloc(sizeof(*cache), GFP_KERNEL);
if (cache == NULL)
return;
RB_CLEAR_NODE(&cache->rb_node);
- cache->cred = get_cred(set->cred);
+ cache->fsuid = cred->fsuid;
+ cache->fsgid = cred->fsgid;
+ cache->group_info = get_group_info(cred->group_info);
cache->mask = set->mask;
+ cache->timestamp = ktime_get_ns();
/* The above field assignments must be visible
* before this item appears on the lru. We cannot easily
* use rcu_assign_pointer, so just force the memory barrier.
*/
smp_wmb();
- nfs_access_add_rbtree(inode, cache);
+ nfs_access_add_rbtree(inode, cache, cred);
/* Update accounting */
smp_mb__before_atomic();
@@ -2874,7 +3244,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
trace_nfs_access_enter(inode);
- status = nfs_access_get_cached(inode, cred, &cache, may_block);
+ status = nfs_access_get_cached(inode, cred, &cache.mask, may_block);
if (status == 0)
goto out_cached;
@@ -2885,17 +3255,13 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
/*
* Determine which access bits we want to ask for...
*/
- cache.mask = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND;
- if (nfs_server_capable(inode, NFS_CAP_XATTR)) {
- cache.mask |= NFS_ACCESS_XAREAD | NFS_ACCESS_XAWRITE |
- NFS_ACCESS_XALIST;
- }
+ cache.mask = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND |
+ nfs_access_xattr_mask(NFS_SERVER(inode));
if (S_ISDIR(inode->i_mode))
cache.mask |= NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP;
else
cache.mask |= NFS_ACCESS_EXECUTE;
- cache.cred = cred;
- status = NFS_PROTO(inode)->access(inode, &cache);
+ status = NFS_PROTO(inode)->access(inode, &cache, cred);
if (status != 0) {
if (status == -ESTALE) {
if (!S_ISDIR(inode->i_mode))
@@ -2905,7 +3271,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
}
goto out;
}
- nfs_access_add_cache(inode, &cache);
+ nfs_access_add_cache(inode, &cache, cred);
out_cached:
cache_mask = nfs_access_calc_mask(cache.mask, inode->i_mode);
if ((mask & ~cache_mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) != 0)
@@ -2955,7 +3321,7 @@ static int nfs_execute_ok(struct inode *inode, int mask)
return ret;
}
-int nfs_permission(struct user_namespace *mnt_userns,
+int nfs_permission(struct mnt_idmap *idmap,
struct inode *inode,
int mask)
{
@@ -3006,7 +3372,7 @@ out_notsup:
res = nfs_revalidate_inode(inode, NFS_INO_INVALID_MODE |
NFS_INO_INVALID_OTHER);
if (res == 0)
- res = generic_permission(&init_user_ns, inode, mask);
+ res = generic_permission(&nop_mnt_idmap, inode, mask);
goto out;
}
EXPORT_SYMBOL_GPL(nfs_permission);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 9cff8709c80a..48d89716193a 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -56,47 +56,17 @@
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include "delegation.h"
#include "internal.h"
#include "iostat.h"
#include "pnfs.h"
+#include "fscache.h"
+#include "nfstrace.h"
#define NFSDBG_FACILITY NFSDBG_VFS
static struct kmem_cache *nfs_direct_cachep;
-struct nfs_direct_req {
- struct kref kref; /* release manager */
-
- /* I/O parameters */
- struct nfs_open_context *ctx; /* file open context info */
- struct nfs_lock_context *l_ctx; /* Lock context info */
- struct kiocb * iocb; /* controlling i/o request */
- struct inode * inode; /* target file of i/o */
-
- /* completion state */
- atomic_t io_count; /* i/os we're waiting for */
- spinlock_t lock; /* protect completion state */
-
- loff_t io_start; /* Start offset for I/O */
- ssize_t count, /* bytes actually processed */
- max_count, /* max expected count */
- bytes_left, /* bytes left to be sent */
- error; /* any reported error */
- struct completion completion; /* wait for i/o completion */
-
- /* commit state */
- struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
- struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
- struct work_struct work;
- int flags;
- /* for write */
-#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
-#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
- /* for read */
-#define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */
-#define NFS_ODIRECT_DONE INT_MAX /* write verification failed */
-};
-
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
@@ -124,12 +94,10 @@ nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
dreq->max_count = dreq_len;
if (dreq->count > dreq_len)
dreq->count = dreq_len;
-
- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
- dreq->error = hdr->error;
- else /* Clear outstanding error if this is EOF */
- dreq->error = 0;
}
+
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error)
+ dreq->error = hdr->error;
}
static void
@@ -151,29 +119,50 @@ nfs_direct_count_bytes(struct nfs_direct_req *dreq,
dreq->count = dreq_len;
}
+static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
+ struct nfs_page *req)
+{
+ loff_t offs = req_offset(req);
+ size_t req_start = (size_t)(offs - dreq->io_start);
+
+ if (req_start < dreq->max_count)
+ dreq->max_count = req_start;
+ if (req_start < dreq->count)
+ dreq->count = req_start;
+}
+
+static void nfs_direct_file_adjust_size_locked(struct inode *inode,
+ loff_t offset, size_t count)
+{
+ loff_t newsize = offset + (loff_t)count;
+ loff_t oldsize = i_size_read(inode);
+
+ if (newsize > oldsize) {
+ i_size_write(inode, newsize);
+ NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
+ trace_nfs_size_grow(inode, newsize);
+ nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
+ }
+}
+
/**
- * nfs_direct_IO - NFS address space operation for direct I/O
+ * nfs_swap_rw - NFS address space operation for swap I/O
* @iocb: target I/O control block
* @iter: I/O buffer
*
- * The presence of this routine in the address space ops vector means
- * the NFS client supports direct I/O. However, for most direct IO, we
- * shunt off direct read and write requests before the VFS gets them,
- * so this method is only ever called for swap.
+ * Perform IO to the swap-file. This is much like direct IO.
*/
-ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
{
- struct inode *inode = iocb->ki_filp->f_mapping->host;
-
- /* we only support swap file calling nfs_direct_IO */
- if (!IS_SWAPFILE(inode))
- return 0;
-
- VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
+ ssize_t ret;
if (iov_iter_rw(iter) == READ)
- return nfs_file_direct_read(iocb, iter);
- return nfs_file_direct_write(iocb, iter);
+ ret = nfs_file_direct_read(iocb, iter, true);
+ else
+ ret = nfs_file_direct_write(iocb, iter, true);
+ if (ret < 0)
+ return ret;
+ return 0;
}
static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
@@ -229,9 +218,10 @@ static void nfs_direct_req_release(struct nfs_direct_req *dreq)
kref_put(&dreq->kref, nfs_direct_req_free);
}
-ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
+ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset)
{
- return dreq->bytes_left;
+ loff_t start = offset - dreq->io_start;
+ return dreq->max_count - start;
}
EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
@@ -297,6 +287,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
nfs_direct_count_bytes(dreq, hdr);
spin_unlock(&dreq->lock);
+ nfs_update_delegated_atime(dreq->inode);
+
while (!list_empty(&hdr->pages)) {
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
struct page *page = req->wb_page;
@@ -328,6 +320,7 @@ static void nfs_read_sync_pgio_error(struct list_head *head, int error)
static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
{
get_dreq(hdr->dreq);
+ set_bit(NFS_IOHDR_ODIRECT, &hdr->flags);
}
static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
@@ -366,26 +359,23 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
size_t pgbase;
unsigned npages, i;
- result = iov_iter_get_pages_alloc(iter, &pagevec,
+ result = iov_iter_get_pages_alloc2(iter, &pagevec,
rsize, &pgbase);
if (result < 0)
break;
bytes = result;
- iov_iter_advance(iter, bytes);
npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
for (i = 0; i < npages; i++) {
struct nfs_page *req;
unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
/* XXX do we need to do the eof zeroing found in async_filler? */
- req = nfs_create_request(dreq->ctx, pagevec[i],
- pgbase, req_len);
+ req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
+ pgbase, pos, req_len);
if (IS_ERR(req)) {
result = PTR_ERR(req);
break;
}
- req->wb_index = pos >> PAGE_SHIFT;
- req->wb_offset = pos & ~PAGE_MASK;
if (!nfs_pageio_add_request(&desc, req)) {
result = desc.pg_error;
nfs_release_request(req);
@@ -395,7 +385,6 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
bytes -= req_len;
requested_bytes += req_len;
pos += req_len;
- dreq->bytes_left -= req_len;
}
nfs_direct_release_pages(pagevec, npages);
kvfree(pagevec);
@@ -424,6 +413,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
* nfs_file_direct_read - file direct read operation for NFS files
* @iocb: target I/O control block
* @iter: vector of user buffers into which to read data
+ * @swap: flag indicating this is swap IO, not O_DIRECT IO
*
* We use this function for direct reads instead of calling
* generic_file_aio_read() in order to avoid gfar's check to see if
@@ -439,7 +429,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
* client must read the updated atime from the server back into its
* cache.
*/
-ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
+ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+ bool swap)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -465,7 +456,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
goto out;
dreq->inode = inode;
- dreq->bytes_left = dreq->max_count = count;
+ dreq->max_count = count;
dreq->io_start = iocb->ki_pos;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
l_ctx = nfs_get_lock_context(dreq->ctx);
@@ -478,15 +469,25 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
- if (iter_is_iovec(iter))
+ if (user_backed_iter(iter))
dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
- nfs_start_io_direct(inode);
+ if (!swap) {
+ result = nfs_start_io_direct(inode);
+ if (result) {
+ /* release the reference that would usually be
+ * consumed by nfs_direct_read_schedule_iovec()
+ */
+ nfs_direct_req_release(dreq);
+ goto out_release;
+ }
+ }
NFS_I(inode)->read_io += count;
requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
- nfs_end_io_direct(inode);
+ if (!swap)
+ nfs_end_io_direct(inode);
if (requested > 0) {
result = nfs_direct_wait(dreq);
@@ -505,21 +506,47 @@ out:
return result;
}
-static void
-nfs_direct_join_group(struct list_head *list, struct inode *inode)
+static void nfs_direct_add_page_head(struct list_head *list,
+ struct nfs_page *req)
{
- struct nfs_page *req, *next;
+ struct nfs_page *head = req->wb_head;
+
+ if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
+ return;
+ if (!list_empty(&head->wb_list)) {
+ nfs_unlock_request(head);
+ return;
+ }
+ list_add(&head->wb_list, list);
+ kref_get(&head->wb_kref);
+ kref_get(&head->wb_kref);
+}
+
+static void nfs_direct_join_group(struct list_head *list,
+ struct nfs_commit_info *cinfo,
+ struct inode *inode)
+{
+ struct nfs_page *req, *subreq;
list_for_each_entry(req, list, wb_list) {
- if (req->wb_head != req || req->wb_this_page == req)
+ if (req->wb_head != req) {
+ nfs_direct_add_page_head(&req->wb_list, req);
continue;
- for (next = req->wb_this_page;
- next != req->wb_head;
- next = next->wb_this_page) {
- nfs_list_remove_request(next);
- nfs_release_request(next);
}
- nfs_join_page_group(req, inode);
+ subreq = req->wb_this_page;
+ if (subreq == req)
+ continue;
+ do {
+ /*
+ * Remove subrequests from this list before freeing
+ * them in the call to nfs_join_page_group().
+ */
+ if (!list_empty(&subreq->wb_list)) {
+ nfs_list_remove_request(subreq);
+ nfs_release_request(subreq);
+ }
+ } while ((subreq = subreq->wb_this_page) != req);
+ nfs_join_page_group(req, cinfo, inode);
}
}
@@ -537,20 +564,15 @@ nfs_direct_write_scan_commit_list(struct inode *inode,
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
struct nfs_pageio_descriptor desc;
- struct nfs_page *req, *tmp;
+ struct nfs_page *req;
LIST_HEAD(reqs);
struct nfs_commit_info cinfo;
- LIST_HEAD(failed);
nfs_init_cinfo_from_dreq(&cinfo, dreq);
nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
- nfs_direct_join_group(&reqs, dreq->inode);
+ nfs_direct_join_group(&reqs, &cinfo, dreq->inode);
- dreq->count = 0;
- dreq->max_count = 0;
- list_for_each_entry(req, &reqs, wb_list)
- dreq->max_count += req->wb_bytes;
nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
get_dreq(dreq);
@@ -558,27 +580,40 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
&nfs_direct_write_completion_ops);
desc.pg_dreq = dreq;
- list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
+ while (!list_empty(&reqs)) {
+ req = nfs_list_entry(reqs.next);
/* Bump the transmission count */
req->wb_nio++;
if (!nfs_pageio_add_request(&desc, req)) {
- nfs_list_move_request(req, &failed);
- spin_lock(&cinfo.inode->i_lock);
- dreq->flags = 0;
- if (desc.pg_error < 0)
+ spin_lock(&dreq->lock);
+ if (dreq->error < 0) {
+ desc.pg_error = dreq->error;
+ } else if (desc.pg_error != -EAGAIN) {
+ dreq->flags = 0;
+ if (!desc.pg_error)
+ desc.pg_error = -EIO;
dreq->error = desc.pg_error;
- else
- dreq->error = -EIO;
- spin_unlock(&cinfo.inode->i_lock);
+ } else
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ spin_unlock(&dreq->lock);
+ break;
}
nfs_release_request(req);
}
nfs_pageio_complete(&desc);
- while (!list_empty(&failed)) {
- req = nfs_list_entry(failed.next);
+ while (!list_empty(&reqs)) {
+ req = nfs_list_entry(reqs.next);
nfs_list_remove_request(req);
nfs_unlock_and_release_request(req);
+ if (desc.pg_error == -EAGAIN) {
+ nfs_mark_request_commit(req, NULL, &cinfo, 0);
+ } else {
+ spin_lock(&dreq->lock);
+ nfs_direct_truncate_request(dreq, req);
+ spin_unlock(&dreq->lock);
+ nfs_release_request(req);
+ }
}
if (put_dreq(dreq))
@@ -593,29 +628,40 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
struct nfs_page *req;
int status = data->task.tk_status;
+ trace_nfs_direct_commit_complete(dreq);
+
+ spin_lock(&dreq->lock);
if (status < 0) {
/* Errors in commit are fatal */
dreq->error = status;
- dreq->max_count = 0;
- dreq->count = 0;
dreq->flags = NFS_ODIRECT_DONE;
- } else if (dreq->flags == NFS_ODIRECT_DONE)
+ } else {
status = dreq->error;
+ }
+ spin_unlock(&dreq->lock);
nfs_init_cinfo_from_dreq(&cinfo, dreq);
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
- if (status >= 0 && !nfs_write_match_verf(verf, req)) {
- dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ if (status < 0) {
+ spin_lock(&dreq->lock);
+ nfs_direct_truncate_request(dreq, req);
+ spin_unlock(&dreq->lock);
+ nfs_release_request(req);
+ } else if (!nfs_write_match_verf(verf, req)) {
+ spin_lock(&dreq->lock);
+ if (dreq->flags == 0)
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ spin_unlock(&dreq->lock);
/*
* Despite the reboot, the write was successful,
* so reset wb_nio.
*/
req->wb_nio = 0;
nfs_mark_request_commit(req, NULL, &cinfo, 0);
- } else /* Error or match */
+ } else
nfs_release_request(req);
nfs_unlock_and_release_request(req);
}
@@ -629,6 +675,8 @@ static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
{
struct nfs_direct_req *dreq = cinfo->dreq;
+ trace_nfs_direct_resched_write(dreq);
+
spin_lock(&dreq->lock);
if (dreq->flags != NFS_ODIRECT_DONE)
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
@@ -648,10 +696,17 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
LIST_HEAD(mds_list);
nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ nfs_commit_begin(cinfo.mds);
nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
- if (res < 0) /* res == -ENOMEM */
- nfs_direct_write_reschedule(dreq);
+ if (res < 0) { /* res == -ENOMEM */
+ spin_lock(&dreq->lock);
+ if (dreq->flags == 0)
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ spin_unlock(&dreq->lock);
+ }
+ if (nfs_commit_end(cinfo.mds))
+ nfs_direct_write_complete(dreq);
}
static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
@@ -666,6 +721,7 @@ static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
while (!list_empty(&reqs)) {
req = nfs_list_entry(reqs.next);
nfs_list_remove_request(req);
+ nfs_direct_truncate_request(dreq, req);
nfs_release_request(req);
nfs_unlock_and_release_request(req);
}
@@ -693,6 +749,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
{
+ trace_nfs_direct_write_complete(dreq);
queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
}
@@ -700,9 +757,11 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
{
struct nfs_direct_req *dreq = hdr->dreq;
struct nfs_commit_info cinfo;
- struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ struct inode *inode = dreq->inode;
int flags = NFS_ODIRECT_DONE;
+ trace_nfs_direct_write_completion(dreq);
+
nfs_init_cinfo_from_dreq(&cinfo, dreq);
spin_lock(&dreq->lock);
@@ -712,14 +771,21 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
}
nfs_direct_count_bytes(dreq, hdr);
- if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) {
+ if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) &&
+ !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
if (!dreq->flags)
dreq->flags = NFS_ODIRECT_DO_COMMIT;
flags = dreq->flags;
}
spin_unlock(&dreq->lock);
+ spin_lock(&inode->i_lock);
+ nfs_direct_file_adjust_size_locked(inode, dreq->io_start, dreq->count);
+ nfs_update_delegated_mtime_locked(dreq->inode);
+ spin_unlock(&inode->i_lock);
+
while (!list_empty(&hdr->pages)) {
+ struct nfs_page *req;
req = nfs_list_entry(hdr->pages.next);
nfs_list_remove_request(req);
@@ -756,16 +822,23 @@ static void nfs_write_sync_pgio_error(struct list_head *head, int error)
static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
{
struct nfs_direct_req *dreq = hdr->dreq;
+ struct nfs_page *req;
+ struct nfs_commit_info cinfo;
+
+ trace_nfs_direct_write_reschedule_io(dreq);
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
spin_lock(&dreq->lock);
- if (dreq->error == 0) {
+ if (dreq->error == 0)
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
- /* fake unstable write to let common nfs resend pages */
- hdr->verf.committed = NFS_UNSTABLE;
- hdr->good_bytes = hdr->args.offset + hdr->args.count -
- hdr->io_start;
- }
+ set_bit(NFS_IOHDR_REDO, &hdr->flags);
spin_unlock(&dreq->lock);
+ while (!list_empty(&hdr->pages)) {
+ req = nfs_list_entry(hdr->pages.next);
+ nfs_list_remove_request(req);
+ nfs_unlock_request(req);
+ nfs_mark_request_commit(req, NULL, &cinfo, 0);
+ }
}
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
@@ -789,15 +862,19 @@ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
*/
static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
struct iov_iter *iter,
- loff_t pos)
+ loff_t pos, int ioflags)
{
struct nfs_pageio_descriptor desc;
struct inode *inode = dreq->inode;
+ struct nfs_commit_info cinfo;
ssize_t result = 0;
size_t requested_bytes = 0;
size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
+ bool defer = false;
+
+ trace_nfs_direct_write_schedule_iovec(dreq);
- nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
+ nfs_pageio_init_write(&desc, inode, ioflags, false,
&nfs_direct_write_completion_ops);
desc.pg_dreq = dreq;
get_dreq(dreq);
@@ -810,20 +887,19 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
size_t pgbase;
unsigned npages, i;
- result = iov_iter_get_pages_alloc(iter, &pagevec,
+ result = iov_iter_get_pages_alloc2(iter, &pagevec,
wsize, &pgbase);
if (result < 0)
break;
bytes = result;
- iov_iter_advance(iter, bytes);
npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
for (i = 0; i < npages; i++) {
struct nfs_page *req;
unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
- req = nfs_create_request(dreq->ctx, pagevec[i],
- pgbase, req_len);
+ req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
+ pgbase, pos, req_len);
if (IS_ERR(req)) {
result = PTR_ERR(req);
break;
@@ -835,19 +911,36 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
break;
}
+ pgbase = 0;
+ bytes -= req_len;
+ requested_bytes += req_len;
+ pos += req_len;
+
+ if (defer) {
+ nfs_mark_request_commit(req, NULL, &cinfo, 0);
+ continue;
+ }
+
nfs_lock_request(req);
- req->wb_index = pos >> PAGE_SHIFT;
- req->wb_offset = pos & ~PAGE_MASK;
- if (!nfs_pageio_add_request(&desc, req)) {
+ if (nfs_pageio_add_request(&desc, req))
+ continue;
+
+ /* Exit on hard errors */
+ if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) {
result = desc.pg_error;
nfs_unlock_and_release_request(req);
break;
}
- pgbase = 0;
- bytes -= req_len;
- requested_bytes += req_len;
- pos += req_len;
- dreq->bytes_left -= req_len;
+
+ /* If the error is soft, defer remaining requests */
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ spin_lock(&dreq->lock);
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ spin_unlock(&dreq->lock);
+ nfs_unlock_request(req);
+ nfs_mark_request_commit(req, NULL, &cinfo, 0);
+ desc.pg_error = 0;
+ defer = true;
}
nfs_direct_release_pages(pagevec, npages);
kvfree(pagevec);
@@ -875,6 +968,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
* nfs_file_direct_write - file direct write operation for NFS files
* @iocb: target I/O control block
* @iter: vector of user buffers from which to write data
+ * @swap: flag indicating this is swap IO, not O_DIRECT IO
*
* We use this function for direct writes instead of calling
* generic_file_aio_write() in order to avoid taking the inode
@@ -891,7 +985,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
* Note that O_APPEND is not supported for NFS direct writes, as there
* is no atomic O_APPEND write facility in the NFS protocol.
*/
-ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
+ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+ bool swap)
{
ssize_t result, requested;
size_t count;
@@ -905,7 +1000,11 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
file, iov_iter_count(iter), (long long) iocb->ki_pos);
- result = generic_write_checks(iocb, iter);
+ if (swap)
+ /* bypass generic checks */
+ result = iov_iter_count(iter);
+ else
+ result = generic_write_checks(iocb, iter);
if (result <= 0)
return result;
count = result;
@@ -922,7 +1021,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
goto out;
dreq->inode = inode;
- dreq->bytes_left = dreq->max_count = count;
+ dreq->max_count = count;
dreq->io_start = pos;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
l_ctx = nfs_get_lock_context(dreq->ctx);
@@ -936,16 +1035,29 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
dreq->iocb = iocb;
pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
- nfs_start_io_direct(inode);
+ if (swap) {
+ requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
+ FLUSH_STABLE);
+ } else {
+ result = nfs_start_io_direct(inode);
+ if (result) {
+ /* release the reference that would usually be
+ * consumed by nfs_direct_write_schedule_iovec()
+ */
+ nfs_direct_req_release(dreq);
+ goto out_release;
+ }
- requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
+ requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
+ FLUSH_COND_STABLE);
- if (mapping->nrpages) {
- invalidate_inode_pages2_range(mapping,
- pos >> PAGE_SHIFT, end);
- }
+ if (mapping->nrpages) {
+ invalidate_inode_pages2_range(mapping,
+ pos >> PAGE_SHIFT, end);
+ }
- nfs_end_io_direct(inode);
+ nfs_end_io_direct(inode);
+ }
if (requested > 0) {
result = nfs_direct_wait(dreq);
@@ -959,6 +1071,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
} else {
result = requested;
}
+ nfs_fscache_invalidate(inode, FSCACHE_INVAL_DIO_WRITE);
out_release:
nfs_direct_req_release(dreq);
out:
@@ -973,8 +1086,7 @@ int __init nfs_init_directcache(void)
{
nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
sizeof(struct nfs_direct_req),
- 0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD),
+ 0, SLAB_RECLAIM_ACCOUNT,
NULL);
if (nfs_direct_cachep == NULL)
return -ENOMEM;
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index e87d500ad95a..714975e5c0db 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -7,17 +7,20 @@
* Resolves DNS hostnames into valid ip addresses
*/
-#ifdef CONFIG_NFS_USE_KERNEL_DNS
-
#include <linux/module.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/addr.h>
-#include <linux/dns_resolver.h>
+
#include "dns_resolve.h"
+#ifdef CONFIG_NFS_USE_KERNEL_DNS
+
+#include <linux/dns_resolver.h>
+
ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
- struct sockaddr *sa, size_t salen)
+ struct sockaddr_storage *ss, size_t salen)
{
+ struct sockaddr *sa = (struct sockaddr *)ss;
ssize_t ret;
char *ip_addr = NULL;
int ip_len;
@@ -34,7 +37,6 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
#else
-#include <linux/module.h>
#include <linux/hash.h>
#include <linux/string.h>
#include <linux/kmod.h>
@@ -42,15 +44,12 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
#include <linux/socket.h>
#include <linux/seq_file.h>
#include <linux/inet.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/svcauth.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
-#include "dns_resolve.h"
#include "cache_lib.h"
#include "netns.h"
@@ -341,7 +340,7 @@ out:
}
ssize_t nfs_dns_resolve_name(struct net *net, char *name,
- size_t namelen, struct sockaddr *sa, size_t salen)
+ size_t namelen, struct sockaddr_storage *ss, size_t salen)
{
struct nfs_dns_ent key = {
.hostname = name,
@@ -354,7 +353,7 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name,
ret = do_cache_lookup_wait(nn->nfs_dns_resolve, &key, &item);
if (ret == 0) {
if (salen >= item->addrlen) {
- memcpy(sa, &item->addr, item->addrlen);
+ memcpy(ss, &item->addr, item->addrlen);
ret = item->addrlen;
} else
ret = -EOVERFLOW;
diff --git a/fs/nfs/dns_resolve.h b/fs/nfs/dns_resolve.h
index 576ff4b54c82..fe3b172c4de1 100644
--- a/fs/nfs/dns_resolve.h
+++ b/fs/nfs/dns_resolve.h
@@ -32,6 +32,6 @@ extern void nfs_dns_resolver_cache_destroy(struct net *net);
#endif
extern ssize_t nfs_dns_resolve_name(struct net *net, char *name,
- size_t namelen, struct sockaddr *sa, size_t salen);
+ size_t namelen, struct sockaddr_storage *sa, size_t salen);
#endif
diff --git a/fs/nfs/export.c b/fs/nfs/export.c
index 171c424cb6d5..a10dd5f9d078 100644
--- a/fs/nfs/export.c
+++ b/fs/nfs/export.c
@@ -42,7 +42,7 @@ nfs_encode_fh(struct inode *inode, __u32 *p, int *max_len, struct inode *parent)
dprintk("%s: max fh len %d inode %p parent %p",
__func__, *max_len, inode, parent);
- if (*max_len < len || IS_AUTOMOUNT(inode)) {
+ if (*max_len < len) {
dprintk("%s: fh len %d too small, required %d\n",
__func__, *max_len, len);
*max_len = len;
@@ -66,14 +66,21 @@ nfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
{
struct nfs_fattr *fattr = NULL;
struct nfs_fh *server_fh = nfs_exp_embedfh(fid->raw);
- size_t fh_size = offsetof(struct nfs_fh, data) + server_fh->size;
+ size_t fh_size = offsetof(struct nfs_fh, data);
const struct nfs_rpc_ops *rpc_ops;
struct dentry *dentry;
struct inode *inode;
- int len = EMBED_FH_OFF + XDR_QUADLEN(fh_size);
+ int len = EMBED_FH_OFF;
u32 *p = fid->raw;
int ret;
+ /* Initial check of bounds */
+ if (fh_len < len + XDR_QUADLEN(fh_size) ||
+ fh_len > XDR_QUADLEN(NFS_MAXFHSIZE))
+ return NULL;
+ /* Calculate embedded filehandle size */
+ fh_size += server_fh->size;
+ len += XDR_QUADLEN(fh_size);
/* NULL translates to ESTALE */
if (fh_len < len || fh_type != len)
return NULL;
@@ -145,18 +152,15 @@ out:
return parent;
}
-static u64 nfs_fetch_iversion(struct inode *inode)
-{
- nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
- return inode_peek_iversion_raw(inode);
-}
-
const struct export_operations nfs_export_ops = {
.encode_fh = nfs_encode_fh,
.fh_to_dentry = nfs_fh_to_dentry,
.get_parent = nfs_get_parent,
- .fetch_iversion = nfs_fetch_iversion,
- .flags = EXPORT_OP_NOWCC|EXPORT_OP_NOSUBTREECHK|
- EXPORT_OP_CLOSE_BEFORE_UNLINK|EXPORT_OP_REMOTE_FS|
- EXPORT_OP_NOATOMIC_ATTR|EXPORT_OP_SYNC_LOCKS,
+ .flags = EXPORT_OP_NOWCC |
+ EXPORT_OP_NOSUBTREECHK |
+ EXPORT_OP_CLOSE_BEFORE_UNLINK |
+ EXPORT_OP_REMOTE_FS |
+ EXPORT_OP_NOATOMIC_ATTR |
+ EXPORT_OP_FLUSH_ON_CLOSE |
+ EXPORT_OP_NOLOCKS,
};
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 24e7dccce355..d020aab40c64 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -28,9 +28,12 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/gfp.h>
+#include <linux/rmap.h>
#include <linux/swap.h>
+#include <linux/compaction.h>
#include <linux/uaccess.h>
+#include <linux/filelock.h>
#include "delegation.h"
#include "internal.h"
@@ -44,11 +47,6 @@
static const struct vm_operations_struct nfs_file_vm_ops;
-/* Hack for future NFS swap support */
-#ifndef IS_SWAPFILE
-# define IS_SWAPFILE(inode) (0)
-#endif
-
int nfs_check_flags(int flags)
{
if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT))
@@ -74,6 +72,8 @@ nfs_file_open(struct inode *inode, struct file *filp)
return res;
res = nfs_open(inode, filp);
+ if (res == 0)
+ filp->f_mode |= FMODE_CAN_ODIRECT;
return res;
}
@@ -84,6 +84,7 @@ nfs_file_release(struct inode *inode, struct file *filp)
nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
nfs_file_clear_open_context(filp);
+ nfs_fscache_release_file(inode, filp);
return 0;
}
EXPORT_SYMBOL_GPL(nfs_file_release);
@@ -160,14 +161,19 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
struct inode *inode = file_inode(iocb->ki_filp);
ssize_t result;
+ trace_nfs_file_read(iocb, to);
+
if (iocb->ki_flags & IOCB_DIRECT)
- return nfs_file_direct_read(iocb, to);
+ return nfs_file_direct_read(iocb, to, false);
dprintk("NFS: read(%pD2, %zu@%lu)\n",
iocb->ki_filp,
iov_iter_count(to), (unsigned long) iocb->ki_pos);
- nfs_start_io_read(inode);
+ result = nfs_start_io_read(inode);
+ if (result)
+ return result;
+
result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
if (!result) {
result = generic_file_read_iter(iocb, to);
@@ -179,25 +185,50 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
}
EXPORT_SYMBOL_GPL(nfs_file_read);
+ssize_t
+nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ struct inode *inode = file_inode(in);
+ ssize_t result;
+
+ dprintk("NFS: splice_read(%pD2, %zu@%llu)\n", in, len, *ppos);
+
+ result = nfs_start_io_read(inode);
+ if (result)
+ return result;
+
+ result = nfs_revalidate_mapping(inode, in->f_mapping);
+ if (!result) {
+ result = filemap_splice_read(in, ppos, pipe, len, flags);
+ if (result > 0)
+ nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
+ }
+ nfs_end_io_read(inode);
+ return result;
+}
+EXPORT_SYMBOL_GPL(nfs_file_splice_read);
+
int
-nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
+nfs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
struct inode *inode = file_inode(file);
int status;
dprintk("NFS: mmap(%pD2)\n", file);
- /* Note: generic_file_mmap() returns ENOSYS on nommu systems
+ /* Note: generic_file_mmap_prepare() returns ENOSYS on nommu systems
* so we call that before revalidating the mapping
*/
- status = generic_file_mmap(file, vma);
+ status = generic_file_mmap_prepare(desc);
if (!status) {
- vma->vm_ops = &nfs_file_vm_ops;
+ desc->vm_ops = &nfs_file_vm_ops;
status = nfs_revalidate_mapping(inode, file->f_mapping);
}
return status;
}
-EXPORT_SYMBOL_GPL(nfs_file_mmap);
+EXPORT_SYMBOL_GPL(nfs_file_mmap_prepare);
/*
* Flush any dirty pages for this process, and check for write errors.
@@ -208,22 +239,25 @@ static int
nfs_file_fsync_commit(struct file *file, int datasync)
{
struct inode *inode = file_inode(file);
- int ret;
+ int ret, ret2;
dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync);
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
ret = nfs_commit_inode(inode, FLUSH_SYNC);
- if (ret < 0)
- return ret;
- return file_check_and_advance_wb_err(file);
+ ret2 = file_check_and_advance_wb_err(file);
+ if (ret2 < 0)
+ return ret2;
+ return ret;
}
int
nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct nfs_open_context *ctx = nfs_file_open_context(file);
struct inode *inode = file_inode(file);
+ struct nfs_inode *nfsi = NFS_I(inode);
+ long save_nredirtied = atomic_long_read(&nfsi->redirtied_pages);
+ long nredirtied;
int ret;
trace_nfs_fsync_enter(inode);
@@ -238,15 +272,10 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
ret = pnfs_sync_inode(inode, !!datasync);
if (ret != 0)
break;
- if (!test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags))
+ nredirtied = atomic_long_read(&nfsi->redirtied_pages);
+ if (nredirtied == save_nredirtied)
break;
- /*
- * If nfs_file_fsync_commit detected a server reboot, then
- * resend all dirty pages that might have been covered by
- * the NFS_CONTEXT_RESEND_WRITES flag
- */
- start = 0;
- end = LLONG_MAX;
+ save_nredirtied = nredirtied;
}
trace_nfs_fsync_exit(inode, ret);
@@ -254,6 +283,37 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
EXPORT_SYMBOL_GPL(nfs_file_fsync);
+void nfs_truncate_last_folio(struct address_space *mapping, loff_t from,
+ loff_t to)
+{
+ struct folio *folio;
+
+ if (from >= to)
+ return;
+
+ folio = filemap_lock_folio(mapping, from >> PAGE_SHIFT);
+ if (IS_ERR(folio))
+ return;
+
+ if (folio_mkclean(folio))
+ folio_mark_dirty(folio);
+
+ if (folio_test_uptodate(folio)) {
+ loff_t fpos = folio_pos(folio);
+ size_t offset = from - fpos;
+ size_t end = folio_size(folio);
+
+ if (to - fpos < end)
+ end = to - fpos;
+ folio_zero_segment(folio, offset, end);
+ trace_nfs_size_truncate_folio(mapping->host, to);
+ }
+
+ folio_unlock(folio);
+ folio_put(folio);
+}
+EXPORT_SYMBOL_GPL(nfs_truncate_last_folio);
+
/*
* Decide whether a read/modify/write cycle may be more efficient
* then a modify/write/read cycle when writing to a page in the
@@ -280,28 +340,31 @@ EXPORT_SYMBOL_GPL(nfs_file_fsync);
* and that the new data won't completely replace the old data in
* that range of the file.
*/
-static bool nfs_full_page_write(struct page *page, loff_t pos, unsigned int len)
+static bool nfs_folio_is_full_write(struct folio *folio, loff_t pos,
+ unsigned int len)
{
- unsigned int pglen = nfs_page_length(page);
- unsigned int offset = pos & (PAGE_SIZE - 1);
+ unsigned int pglen = nfs_folio_length(folio);
+ unsigned int offset = offset_in_folio(folio, pos);
unsigned int end = offset + len;
return !pglen || (end >= pglen && !offset);
}
-static bool nfs_want_read_modify_write(struct file *file, struct page *page,
- loff_t pos, unsigned int len)
+static bool nfs_want_read_modify_write(struct file *file, struct folio *folio,
+ loff_t pos, unsigned int len)
{
/*
* Up-to-date pages, those with ongoing or full-page write
* don't need read/modify/write
*/
- if (PageUptodate(page) || PagePrivate(page) ||
- nfs_full_page_write(page, pos, len))
+ if (folio_test_uptodate(folio) || folio_test_private(folio) ||
+ nfs_folio_is_full_write(folio, pos, len))
return false;
- if (pnfs_ld_read_whole_page(file->f_mapping->host))
+ if (pnfs_ld_read_whole_page(file_inode(file)))
return true;
+ if (folio_test_dropbehind(folio))
+ return false;
/* Open for reading too? */
if (file->f_mode & FMODE_READ)
return true;
@@ -316,47 +379,59 @@ static bool nfs_want_read_modify_write(struct file *file, struct page *page,
* If the writer ends up delaying the write, the writer needs to
* increment the page use counts until he is done with the page.
*/
-static int nfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+static int nfs_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, struct folio **foliop,
+ void **fsdata)
{
- int ret;
- pgoff_t index = pos >> PAGE_SHIFT;
- struct page *page;
+ struct folio *folio;
+ struct file *file = iocb->ki_filp;
int once_thru = 0;
+ int ret;
+
+ trace_nfs_write_begin(file_inode(file), pos, len);
dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n",
file, mapping->host->i_ino, len, (long long) pos);
+ nfs_truncate_last_folio(mapping, i_size_read(mapping->host), pos);
start:
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
- *pagep = page;
+ folio = write_begin_get_folio(iocb, mapping, pos >> PAGE_SHIFT, len);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
+ goto out;
+ }
+ *foliop = folio;
- ret = nfs_flush_incompatible(file, page);
+ ret = nfs_flush_incompatible(file, folio);
if (ret) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
} else if (!once_thru &&
- nfs_want_read_modify_write(file, page, pos, len)) {
+ nfs_want_read_modify_write(file, folio, pos, len)) {
once_thru = 1;
- ret = nfs_readpage(file, page);
- put_page(page);
+ folio_clear_dropbehind(folio);
+ ret = nfs_read_folio(file, folio);
+ folio_put(folio);
if (!ret)
goto start;
}
+out:
+ trace_nfs_write_begin_done(file_inode(file), pos, len, ret);
return ret;
}
-static int nfs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+static int nfs_write_end(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct folio *folio, void *fsdata)
{
- unsigned offset = pos & (PAGE_SIZE - 1);
+ struct file *file = iocb->ki_filp;
struct nfs_open_context *ctx = nfs_file_open_context(file);
+ unsigned offset = offset_in_folio(folio, pos);
int status;
+ trace_nfs_write_end(file_inode(file), pos, len);
dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n",
file, mapping->host->i_ino, len, (long long) pos);
@@ -364,37 +439,37 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
* Zero any uninitialised parts of the page, and then mark the page
* as up to date if it turns out that we're extending the file.
*/
- if (!PageUptodate(page)) {
- unsigned pglen = nfs_page_length(page);
+ if (!folio_test_uptodate(folio)) {
+ size_t fsize = folio_size(folio);
+ unsigned pglen = nfs_folio_length(folio);
unsigned end = offset + copied;
if (pglen == 0) {
- zero_user_segments(page, 0, offset,
- end, PAGE_SIZE);
- SetPageUptodate(page);
+ folio_zero_segments(folio, 0, offset, end, fsize);
+ folio_mark_uptodate(folio);
} else if (end >= pglen) {
- zero_user_segment(page, end, PAGE_SIZE);
+ folio_zero_segment(folio, end, fsize);
if (offset == 0)
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
} else
- zero_user_segment(page, pglen, PAGE_SIZE);
+ folio_zero_segment(folio, pglen, fsize);
}
- status = nfs_updatepage(file, page, offset, copied);
+ status = nfs_update_folio(file, folio, offset, copied);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
- if (status < 0)
+ if (status < 0) {
+ trace_nfs_write_end_done(file_inode(file), pos, len, status);
return status;
+ }
NFS_I(mapping->host)->write_io += copied;
- if (nfs_ctx_key_to_expire(ctx, mapping->host)) {
- status = nfs_wb_all(mapping->host);
- if (status < 0)
- return status;
- }
+ if (nfs_ctx_key_to_expire(ctx, mapping->host))
+ nfs_wb_all(mapping->host);
+ trace_nfs_write_end_done(file_inode(file), pos, len, copied);
return copied;
}
@@ -405,49 +480,53 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
* - Called if either PG_private or PG_fscache is set on the page
* - Caller holds page lock
*/
-static void nfs_invalidate_page(struct page *page, unsigned int offset,
- unsigned int length)
+static void nfs_invalidate_folio(struct folio *folio, size_t offset,
+ size_t length)
{
- dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
- page, offset, length);
+ struct inode *inode = folio->mapping->host;
+ dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n",
+ folio->index, offset, length);
- if (offset != 0 || length < PAGE_SIZE)
- return;
/* Cancel any unstarted writes on this page */
- nfs_wb_page_cancel(page_file_mapping(page)->host, page);
-
- nfs_fscache_invalidate_page(page, page->mapping->host);
+ if (offset != 0 || length < folio_size(folio))
+ nfs_wb_folio(inode, folio);
+ else
+ nfs_wb_folio_cancel(inode, folio);
+ folio_wait_private_2(folio); /* [DEPRECATED] */
+ trace_nfs_invalidate_folio(inode, folio_pos(folio) + offset, length);
}
/*
- * Attempt to release the private state associated with a page
- * - Called if either PG_private or PG_fscache is set on the page
- * - Caller holds page lock
- * - Return true (may release page) or false (may not)
+ * Attempt to release the private state associated with a folio
+ * - Called if either private or fscache flags are set on the folio
+ * - Caller holds folio lock
+ * - Return true (may release folio) or false (may not)
*/
-static int nfs_release_page(struct page *page, gfp_t gfp)
+static bool nfs_release_folio(struct folio *folio, gfp_t gfp)
{
- dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
-
- /* If PagePrivate() is set, then the page is not freeable */
- if (PagePrivate(page))
- return 0;
- return nfs_fscache_release_page(page, gfp);
+ dfprintk(PAGECACHE, "NFS: release_folio(%p)\n", folio);
+
+ /* If the private flag is set, then the folio is not freeable */
+ if (folio_test_private(folio)) {
+ if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL ||
+ current_is_kswapd() || current_is_kcompactd())
+ return false;
+ if (nfs_wb_folio(folio->mapping->host, folio) < 0)
+ return false;
+ }
+ return nfs_fscache_release_folio(folio, gfp);
}
-static void nfs_check_dirty_writeback(struct page *page,
+static void nfs_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback)
{
struct nfs_inode *nfsi;
- struct address_space *mapping = page_file_mapping(page);
-
- if (!mapping || PageSwapCache(page))
- return;
+ struct address_space *mapping = folio->mapping;
/*
- * Check if an unstable page is currently being committed and
- * if so, have the VM treat it as if the page is under writeback
- * so it will not block due to pages that will shortly be freeable.
+ * Check if an unstable folio is currently being committed and
+ * if so, have the VM treat it as if the folio is under writeback
+ * so it will not block due to folios that will shortly be freeable.
*/
nfsi = NFS_I(mapping->host);
if (atomic_read(&nfsi->commit_info.rpcs_out)) {
@@ -456,11 +535,11 @@ static void nfs_check_dirty_writeback(struct page *page,
}
/*
- * If PagePrivate() is set, then the page is not freeable and as the
- * inode is not being committed, it's not going to be cleaned in the
- * near future so treat it as dirty
+ * If the private flag is set, then the folio is not freeable
+ * and as the inode is not being committed, it's not going to
+ * be cleaned in the near future so treat it as dirty
*/
- if (PagePrivate(page))
+ if (folio_test_private(folio))
*dirty = true;
}
@@ -472,16 +551,19 @@ static void nfs_check_dirty_writeback(struct page *page,
* - Caller holds page lock
* - Return 0 if successful, -error otherwise
*/
-static int nfs_launder_page(struct page *page)
+static int nfs_launder_folio(struct folio *folio)
{
- struct inode *inode = page_file_mapping(page)->host;
- struct nfs_inode *nfsi = NFS_I(inode);
+ struct inode *inode = folio->mapping->host;
+ int ret;
- dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
- inode->i_ino, (long long)page_offset(page));
+ dfprintk(PAGECACHE, "NFS: launder_folio(%ld, %llu)\n",
+ inode->i_ino, folio_pos(folio));
- nfs_fscache_wait_on_page_write(nfsi, page);
- return nfs_wb_page(inode, page);
+ folio_wait_private_2(folio); /* [DEPRECATED] */
+ ret = nfs_wb_folio(inode, folio);
+ trace_nfs_launder_folio_done(inode, folio_pos(folio),
+ folio_size(folio), ret);
+ return ret;
}
static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
@@ -489,8 +571,10 @@ static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
{
unsigned long blocks;
long long isize;
- struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
- struct inode *inode = file->f_mapping->host;
+ int ret;
+ struct inode *inode = file_inode(file);
+ struct rpc_clnt *clnt = NFS_CLIENT(inode);
+ struct nfs_client *cl = NFS_SERVER(inode)->nfs_client;
spin_lock(&inode->i_lock);
blocks = inode->i_blocks;
@@ -501,37 +585,51 @@ static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
return -EINVAL;
}
+ ret = rpc_clnt_swap_activate(clnt);
+ if (ret)
+ return ret;
+ ret = add_swap_extent(sis, 0, sis->max, 0);
+ if (ret < 0) {
+ rpc_clnt_swap_deactivate(clnt);
+ return ret;
+ }
+
*span = sis->pages;
- return rpc_clnt_swap_activate(clnt);
+ if (cl->rpc_ops->enable_swap)
+ cl->rpc_ops->enable_swap(inode);
+
+ sis->flags |= SWP_FS_OPS;
+ return ret;
}
static void nfs_swap_deactivate(struct file *file)
{
- struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
+ struct inode *inode = file_inode(file);
+ struct rpc_clnt *clnt = NFS_CLIENT(inode);
+ struct nfs_client *cl = NFS_SERVER(inode)->nfs_client;
rpc_clnt_swap_deactivate(clnt);
+ if (cl->rpc_ops->disable_swap)
+ cl->rpc_ops->disable_swap(file_inode(file));
}
const struct address_space_operations nfs_file_aops = {
- .readpage = nfs_readpage,
- .readpages = nfs_readpages,
- .set_page_dirty = __set_page_dirty_nobuffers,
- .writepage = nfs_writepage,
+ .read_folio = nfs_read_folio,
+ .readahead = nfs_readahead,
+ .dirty_folio = filemap_dirty_folio,
.writepages = nfs_writepages,
.write_begin = nfs_write_begin,
.write_end = nfs_write_end,
- .invalidatepage = nfs_invalidate_page,
- .releasepage = nfs_release_page,
- .direct_IO = nfs_direct_IO,
-#ifdef CONFIG_MIGRATION
- .migratepage = nfs_migrate_page,
-#endif
- .launder_page = nfs_launder_page,
+ .invalidate_folio = nfs_invalidate_folio,
+ .release_folio = nfs_release_folio,
+ .migrate_folio = nfs_migrate_folio,
+ .launder_folio = nfs_launder_folio,
.is_dirty_writeback = nfs_check_dirty_writeback,
- .error_remove_page = generic_error_remove_page,
+ .error_remove_folio = generic_error_remove_folio,
.swap_activate = nfs_swap_activate,
.swap_deactivate = nfs_swap_deactivate,
+ .swap_rw = nfs_swap_rw,
};
/*
@@ -541,44 +639,49 @@ const struct address_space_operations nfs_file_aops = {
*/
static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
struct file *filp = vmf->vma->vm_file;
struct inode *inode = file_inode(filp);
unsigned pagelen;
vm_fault_t ret = VM_FAULT_NOPAGE;
struct address_space *mapping;
+ struct folio *folio = page_folio(vmf->page);
dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n",
- filp, filp->f_mapping->host->i_ino,
- (long long)page_offset(page));
+ filp, filp->f_mapping->host->i_ino,
+ (long long)folio_pos(folio));
sb_start_pagefault(inode->i_sb);
/* make sure the cache has finished storing the page */
- nfs_fscache_wait_on_page_write(NFS_I(inode), page);
+ if (folio_test_private_2(folio) && /* [DEPRECATED] */
+ folio_wait_private_2_killable(folio) < 0) {
+ ret = VM_FAULT_RETRY;
+ goto out;
+ }
wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING,
- nfs_wait_bit_killable, TASK_KILLABLE);
+ nfs_wait_bit_killable,
+ TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
- lock_page(page);
- mapping = page_file_mapping(page);
+ folio_lock(folio);
+ mapping = folio->mapping;
if (mapping != inode->i_mapping)
goto out_unlock;
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
- pagelen = nfs_page_length(page);
+ pagelen = nfs_folio_length(folio);
if (pagelen == 0)
goto out_unlock;
ret = VM_FAULT_LOCKED;
- if (nfs_flush_incompatible(filp, page) == 0 &&
- nfs_updatepage(filp, page, 0, pagelen) == 0)
+ if (nfs_flush_incompatible(filp, folio) == 0 &&
+ nfs_update_folio(filp, folio, 0, pagelen) == 0)
goto out;
ret = VM_FAULT_SIGBUS;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
out:
sb_end_pagefault(inode->i_sb);
return ret;
@@ -590,18 +693,6 @@ static const struct vm_operations_struct nfs_file_vm_ops = {
.page_mkwrite = nfs_vm_page_mkwrite,
};
-static int nfs_need_check_write(struct file *filp, struct inode *inode,
- int error)
-{
- struct nfs_open_context *ctx;
-
- ctx = nfs_file_open_context(filp);
- if (nfs_error_is_fatal_on_server(error) ||
- nfs_ctx_key_to_expire(ctx, inode))
- return 1;
- return 0;
-}
-
ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
@@ -611,12 +702,14 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
errseq_t since;
int error;
+ trace_nfs_file_write(iocb, from);
+
result = nfs_key_timeout_notify(file, inode);
if (result)
return result;
if (iocb->ki_flags & IOCB_DIRECT)
- return nfs_file_direct_write(iocb, from);
+ return nfs_file_direct_write(iocb, from, false);
dprintk("NFS: write(%pD2, %zu@%Ld)\n",
file, iov_iter_count(from), (long long) iocb->ki_pos);
@@ -629,25 +722,24 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
if (iocb->ki_flags & IOCB_APPEND || iocb->ki_pos > i_size_read(inode)) {
result = nfs_revalidate_file_size(inode, file);
if (result)
- goto out;
+ return result;
}
nfs_clear_invalid_mapping(file->f_mapping);
since = filemap_sample_wb_err(file->f_mapping);
- nfs_start_io_write(inode);
+ error = nfs_start_io_write(inode);
+ if (error)
+ return error;
result = generic_write_checks(iocb, from);
- if (result > 0) {
- current->backing_dev_info = inode_to_bdi(inode);
- result = generic_perform_write(file, from, iocb->ki_pos);
- current->backing_dev_info = NULL;
- }
+ if (result > 0)
+ result = generic_perform_write(iocb, from);
nfs_end_io_write(inode);
if (result <= 0)
goto out;
written = result;
- iocb->ki_pos += written;
+ nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
if (mntflags & NFS_MOUNT_WRITE_EAGER) {
result = filemap_fdatawrite_range(file->f_mapping,
@@ -657,25 +749,28 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
if (mntflags & NFS_MOUNT_WRITE_WAIT) {
- result = filemap_fdatawait_range(file->f_mapping,
- iocb->ki_pos - written,
- iocb->ki_pos - 1);
- if (result < 0)
- goto out;
+ filemap_fdatawait_range(file->f_mapping,
+ iocb->ki_pos - written,
+ iocb->ki_pos - 1);
}
result = generic_write_sync(iocb, written);
if (result < 0)
- goto out;
+ return result;
+out:
/* Return error values */
error = filemap_check_wb_err(file->f_mapping, since);
- if (nfs_need_check_write(file, inode, error)) {
- int err = nfs_wb_all(inode);
- if (err < 0)
- result = err;
+ switch (error) {
+ default:
+ break;
+ case -EDQUOT:
+ case -EFBIG:
+ case -ENOSPC:
+ nfs_wb_all(inode);
+ error = file_check_and_advance_wb_err(file);
+ if (error < 0)
+ result = error;
}
- nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
-out:
return result;
out_swapfile:
@@ -689,17 +784,17 @@ do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
{
struct inode *inode = filp->f_mapping->host;
int status = 0;
- unsigned int saved_type = fl->fl_type;
+ unsigned int saved_type = fl->c.flc_type;
/* Try local locking first */
posix_test_lock(filp, fl);
- if (fl->fl_type != F_UNLCK) {
+ if (fl->c.flc_type != F_UNLCK) {
/* found a conflict */
goto out;
}
- fl->fl_type = saved_type;
+ fl->c.flc_type = saved_type;
- if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+ if (nfs_have_read_or_write_delegation(inode))
goto out_noconflict;
if (is_local)
@@ -709,7 +804,7 @@ do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
out:
return status;
out_noconflict:
- fl->fl_type = F_UNLCK;
+ fl->c.flc_type = F_UNLCK;
goto out;
}
@@ -734,7 +829,7 @@ do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
* If we're signalled while cleaning up locks on process exit, we
* still need to complete the unlock.
*/
- if (status < 0 && !(fl->fl_flags & FL_CLOSE))
+ if (status < 0 && !(fl->c.flc_flags & FL_CLOSE))
return status;
}
@@ -782,7 +877,7 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
* This makes locking act as a cache coherency point.
*/
nfs_sync_mapping(filp->f_mapping);
- if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
+ if (!nfs_have_read_or_write_delegation(inode)) {
nfs_zap_caches(inode);
if (mapping_mapped(filp->f_mapping))
nfs_revalidate_mapping(inode, filp->f_mapping);
@@ -801,12 +896,12 @@ int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
int is_local = 0;
dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n",
- filp, fl->fl_type, fl->fl_flags,
+ filp, fl->c.flc_type, fl->c.flc_flags,
(long long)fl->fl_start, (long long)fl->fl_end);
nfs_inc_stats(inode, NFSIOS_VFSLOCK);
- if (fl->fl_flags & FL_RECLAIM)
+ if (fl->c.flc_flags & FL_RECLAIM)
return -ENOGRACE;
if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL)
@@ -820,7 +915,7 @@ int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
if (IS_GETLK(cmd))
ret = do_getlk(filp, cmd, fl, is_local);
- else if (fl->fl_type == F_UNLCK)
+ else if (lock_is_unlock(fl))
ret = do_unlk(filp, cmd, fl, is_local);
else
ret = do_setlk(filp, cmd, fl, is_local);
@@ -838,16 +933,16 @@ int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
int is_local = 0;
dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n",
- filp, fl->fl_type, fl->fl_flags);
+ filp, fl->c.flc_type, fl->c.flc_flags);
- if (!(fl->fl_flags & FL_FLOCK))
+ if (!(fl->c.flc_flags & FL_FLOCK))
return -ENOLCK;
if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK)
is_local = 1;
/* We're simulating flock() locks using posix locks on the server */
- if (fl->fl_type == F_UNLCK)
+ if (lock_is_unlock(fl))
return do_unlk(filp, cmd, fl, is_local);
return do_setlk(filp, cmd, fl, is_local);
}
@@ -857,16 +952,17 @@ const struct file_operations nfs_file_operations = {
.llseek = nfs_file_llseek,
.read_iter = nfs_file_read,
.write_iter = nfs_file_write,
- .mmap = nfs_file_mmap,
+ .mmap_prepare = nfs_file_mmap_prepare,
.open = nfs_file_open,
.flush = nfs_file_flush,
.release = nfs_file_release,
.fsync = nfs_file_fsync,
.lock = nfs_lock,
.flock = nfs_flock,
- .splice_read = generic_file_splice_read,
+ .splice_read = nfs_file_splice_read,
.splice_write = iter_file_splice_write,
.check_flags = nfs_check_flags,
.setlease = simple_nosetlease,
+ .fop_flags = FOP_DONTCACHE,
};
EXPORT_SYMBOL_GPL(nfs_file_operations);
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 9c96e3e5ed35..5c4551117c58 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -181,6 +181,8 @@ static int filelayout_async_handle_error(struct rpc_task *task,
case -EIO:
case -ETIMEDOUT:
case -EPIPE:
+ case -EPROTO:
+ case -ENODEV:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
nfs4_mark_deviceid_unavailable(devid);
@@ -486,7 +488,7 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr)
/* Perform an asynchronous read to ds */
nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
NFS_PROTO(hdr->inode), &filelayout_read_call_ops,
- 0, RPC_TASK_SOFTCONN);
+ 0, RPC_TASK_SOFTCONN, NULL);
return PNFS_ATTEMPTED;
}
@@ -528,7 +530,7 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
/* Perform an asynchronous write */
nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
NFS_PROTO(hdr->inode), &filelayout_write_call_ops,
- sync, RPC_TASK_SOFTCONN);
+ sync, RPC_TASK_SOFTCONN, NULL);
return PNFS_ATTEMPTED;
}
@@ -603,14 +605,6 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
dprintk("--> %s\n", __func__);
- /* FIXME: remove this check when layout segment support is added */
- if (lgr->range.offset != 0 ||
- lgr->range.length != NFS4_MAX_UINT64) {
- dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
- __func__);
- goto out;
- }
-
if (fl->pattern_offset > lgr->range.offset) {
dprintk("%s pattern_offset %lld too large\n",
__func__, fl->pattern_offset);
@@ -652,19 +646,19 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
{
struct xdr_stream stream;
struct xdr_buf buf;
- struct page *scratch;
+ struct folio *scratch;
__be32 *p;
uint32_t nfl_util;
int i;
dprintk("%s: set_layout_map Begin\n", __func__);
- scratch = alloc_page(gfp_flags);
+ scratch = folio_alloc(gfp_flags, 0);
if (!scratch)
return -ENOMEM;
xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
- xdr_set_scratch_page(&stream, scratch);
+ xdr_set_scratch_folio(&stream, scratch);
/* 20 = ufl_util (4), first_stripe_index (4), pattern_offset (8),
* num_fh (4) */
@@ -730,11 +724,11 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
fl->fh_array[i]->size);
}
- __free_page(scratch);
+ folio_put(scratch);
return 0;
out_err:
- __free_page(scratch);
+ folio_put(scratch);
return -EIO;
}
@@ -781,6 +775,12 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
return &fl->generic_hdr;
}
+static bool
+filelayout_lseg_is_striped(const struct nfs4_filelayout_segment *flseg)
+{
+ return flseg->num_fh > 1;
+}
+
/*
* filelayout_pg_test(). Called by nfs_can_coalesce_requests()
*
@@ -801,6 +801,8 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
size = pnfs_generic_pg_test(pgio, prev, req);
if (!size)
return 0;
+ else if (!filelayout_lseg_is_striped(FILELAYOUT_LSEG(pgio->pg_lseg)))
+ return size;
/* see if req and prev are in the same stripe */
if (prev) {
@@ -839,7 +841,12 @@ fl_pnfs_update_layout(struct inode *ino,
lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
gfp_flags);
- if (IS_ERR_OR_NULL(lseg))
+ if (IS_ERR(lseg)) {
+ /* Fall back to MDS on recoverable errors */
+ if (!nfs_error_is_fatal_on_server(PTR_ERR(lseg)))
+ lseg = NULL;
+ goto out;
+ } else if (!lseg)
goto out;
lo = NFS_I(ino)->layout;
@@ -847,6 +854,8 @@ fl_pnfs_update_layout(struct inode *ino,
status = filelayout_check_deviceid(lo, fl, gfp_flags);
if (status) {
+ pnfs_error_mark_layout_for_return(ino, lseg);
+ pnfs_set_lo_fail(lseg);
pnfs_put_lseg(lseg);
lseg = NULL;
}
@@ -858,15 +867,15 @@ static void
filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
- pnfs_generic_pg_check_layout(pgio);
+ pnfs_generic_pg_check_layout(pgio, req);
if (!pgio->pg_lseg) {
pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
nfs_req_openctx(req),
- 0,
- NFS4_MAX_UINT64,
+ req_offset(req),
+ req->wb_bytes,
IOMODE_READ,
false,
- GFP_KERNEL);
+ nfs_io_gfp_mask());
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
@@ -882,15 +891,15 @@ static void
filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
- pnfs_generic_pg_check_layout(pgio);
+ pnfs_generic_pg_check_layout(pgio, req);
if (!pgio->pg_lseg) {
pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
nfs_req_openctx(req),
- 0,
- NFS4_MAX_UINT64,
+ req_offset(req),
+ req->wb_bytes,
IOMODE_RW,
false,
- GFP_NOFS);
+ nfs_io_gfp_mask());
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
@@ -1002,7 +1011,7 @@ static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
data->args.fh = fh;
return nfs_initiate_commit(ds_clnt, data, NFS_PROTO(data->inode),
&filelayout_commit_call_ops, how,
- RPC_TASK_SOFTCONN);
+ RPC_TASK_SOFTCONN, NULL);
out_err:
pnfs_generic_prepare_to_resend_writes(data);
pnfs_generic_commit_release(data);
@@ -1075,7 +1084,7 @@ filelayout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
unsigned int size = (fl->stripe_type == STRIPE_SPARSE) ?
fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
- new = pnfs_alloc_commit_array(size, GFP_NOIO);
+ new = pnfs_alloc_commit_array(size, nfs_io_gfp_mask());
if (new) {
spin_lock(&inode->i_lock);
array = pnfs_add_commit_array(fl_cinfo, new, lseg);
@@ -1101,7 +1110,6 @@ static const struct pnfs_commit_ops filelayout_commit_ops = {
.clear_request_commit = pnfs_generic_clear_request_commit,
.scan_commit_lists = pnfs_generic_scan_commit_lists,
.recover_commit_reqs = pnfs_generic_recover_commit_reqs,
- .search_commit_reqs = pnfs_generic_search_commit_reqs,
.commit_pagelist = filelayout_commit_pagelist,
};
diff --git a/fs/nfs/filelayout/filelayout.h b/fs/nfs/filelayout/filelayout.h
index 79323b5dab0c..c7bb5da93307 100644
--- a/fs/nfs/filelayout/filelayout.h
+++ b/fs/nfs/filelayout/filelayout.h
@@ -51,7 +51,7 @@ struct nfs4_file_layout_dsaddr {
u32 stripe_count;
u8 *stripe_indices;
u32 ds_num;
- struct nfs4_pnfs_ds *ds_list[1];
+ struct nfs4_pnfs_ds *ds_list[] __counted_by(ds_num);
};
struct nfs4_filelayout_segment {
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
index 86c3f7e69ec4..df79aeb68db4 100644
--- a/fs/nfs/filelayout/filelayoutdev.c
+++ b/fs/nfs/filelayout/filelayoutdev.c
@@ -35,6 +35,7 @@
#include "../internal.h"
#include "../nfs4session.h"
#include "filelayout.h"
+#include "../nfs4trace.h"
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
@@ -72,17 +73,18 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
struct nfs4_file_layout_dsaddr *dsaddr = NULL;
struct xdr_stream stream;
struct xdr_buf buf;
- struct page *scratch;
+ struct folio *scratch;
struct list_head dsaddrs;
struct nfs4_pnfs_ds_addr *da;
+ struct net *net = server->nfs_client->cl_net;
/* set up xdr stream */
- scratch = alloc_page(gfp_flags);
+ scratch = folio_alloc(gfp_flags, 0);
if (!scratch)
goto out_err;
xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
- xdr_set_scratch_page(&stream, scratch);
+ xdr_set_scratch_folio(&stream, scratch);
/* Get the stripe count (number of stripe index) */
p = xdr_inline_decode(&stream, 4);
@@ -136,9 +138,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
goto out_err_free_stripe_indices;
}
- dsaddr = kzalloc(sizeof(*dsaddr) +
- (sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
- gfp_flags);
+ dsaddr = kzalloc(struct_size(dsaddr, ds_list, num), gfp_flags);
if (!dsaddr)
goto out_err_free_stripe_indices;
@@ -160,8 +160,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
mp_count = be32_to_cpup(p); /* multipath count */
for (j = 0; j < mp_count; j++) {
- da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
- &stream, gfp_flags);
+ da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags);
if (da)
list_add_tail(&da->da_node, &dsaddrs);
}
@@ -171,9 +170,10 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
goto out_err_free_deviceid;
}
- dsaddr->ds_list[i] = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
+ dsaddr->ds_list[i] = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags);
if (!dsaddr->ds_list[i])
goto out_err_drain_dsaddrs;
+ trace_fl_getdevinfo(server, &pdev->dev_id, dsaddr->ds_list[i]->ds_remotestr);
/* If DS was already in cache, free ds addrs */
while (!list_empty(&dsaddrs)) {
@@ -186,7 +186,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
}
}
- __free_page(scratch);
+ folio_put(scratch);
return dsaddr;
out_err_drain_dsaddrs:
@@ -204,7 +204,7 @@ out_err_free_deviceid:
out_err_free_stripe_indices:
kfree(stripe_indices);
out_err_free_scratch:
- __free_page(scratch);
+ folio_put(scratch);
out_err:
dprintk("%s ERROR: returning NULL\n", __func__);
return NULL;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index a553d59afa8b..9056f05a67dc 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -11,6 +11,7 @@
#include <linux/nfs_mount.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
+#include <linux/file.h>
#include <linux/sched/mm.h>
#include <linux/sunrpc/metrics.h>
@@ -30,17 +31,23 @@
#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
#define FF_LAYOUTRETURN_MAXERR 20
+enum nfs4_ff_op_type {
+ NFS4_FF_OP_LAYOUTSTATS,
+ NFS4_FF_OP_LAYOUTRETURN,
+};
+
static unsigned short io_maxretrans;
static const struct pnfs_commit_ops ff_layout_commit_ops;
static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
struct nfs_pgio_header *hdr);
-static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
+static int
+ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
struct nfs42_layoutstat_devinfo *devinfo,
- int dev_limit);
+ int dev_limit, enum nfs4_ff_op_type type);
static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
const struct nfs42_layoutstat_devinfo *devinfo,
- struct nfs4_ff_layout_mirror *mirror);
+ struct nfs4_ff_layout_ds_stripe *dss_info);
static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
@@ -156,18 +163,33 @@ decode_name(struct xdr_stream *xdr, u32 *id)
return 0;
}
-static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
- const struct nfs4_ff_layout_mirror *m2)
+static struct nfsd_file *
+ff_local_open_fh(struct pnfs_layout_segment *lseg, u32 ds_idx, u32 dss_id,
+ struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, fmode_t mode)
+{
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
+
+ return nfs_local_open_fh(clp, cred, fh, &mirror->dss[dss_id].nfl, mode);
+#else
+ return NULL;
+#endif
+}
+
+static bool ff_dss_match_fh(const struct nfs4_ff_layout_ds_stripe *dss1,
+ const struct nfs4_ff_layout_ds_stripe *dss2)
{
int i, j;
- if (m1->fh_versions_cnt != m2->fh_versions_cnt)
+ if (dss1->fh_versions_cnt != dss2->fh_versions_cnt)
return false;
- for (i = 0; i < m1->fh_versions_cnt; i++) {
+
+ for (i = 0; i < dss1->fh_versions_cnt; i++) {
bool found_fh = false;
- for (j = 0; j < m2->fh_versions_cnt; j++) {
- if (nfs_compare_fh(&m1->fh_versions[i],
- &m2->fh_versions[j]) == 0) {
+ for (j = 0; j < dss2->fh_versions_cnt; j++) {
+ if (nfs_compare_fh(&dss1->fh_versions[i],
+ &dss2->fh_versions[j]) == 0) {
found_fh = true;
break;
}
@@ -178,6 +200,38 @@ static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
return true;
}
+static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
+ const struct nfs4_ff_layout_mirror *m2)
+{
+ u32 dss_id;
+
+ if (m1->dss_count != m2->dss_count)
+ return false;
+
+ for (dss_id = 0; dss_id < m1->dss_count; dss_id++)
+ if (!ff_dss_match_fh(&m1->dss[dss_id], &m2->dss[dss_id]))
+ return false;
+
+ return true;
+}
+
+static bool ff_mirror_match_devid(const struct nfs4_ff_layout_mirror *m1,
+ const struct nfs4_ff_layout_mirror *m2)
+{
+ u32 dss_id;
+
+ if (m1->dss_count != m2->dss_count)
+ return false;
+
+ for (dss_id = 0; dss_id < m1->dss_count; dss_id++)
+ if (memcmp(&m1->dss[dss_id].devid,
+ &m2->dss[dss_id].devid,
+ sizeof(m1->dss[dss_id].devid)) != 0)
+ return false;
+
+ return true;
+}
+
static struct nfs4_ff_layout_mirror *
ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
struct nfs4_ff_layout_mirror *mirror)
@@ -188,7 +242,7 @@ ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
spin_lock(&inode->i_lock);
list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
- if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
+ if (!ff_mirror_match_devid(mirror, pos))
continue;
if (!ff_mirror_match_fh(mirror, pos))
continue;
@@ -216,30 +270,52 @@ ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
mirror->layout = NULL;
}
-static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
+static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(u32 dss_count,
+ gfp_t gfp_flags)
{
struct nfs4_ff_layout_mirror *mirror;
mirror = kzalloc(sizeof(*mirror), gfp_flags);
- if (mirror != NULL) {
- spin_lock_init(&mirror->lock);
- refcount_set(&mirror->ref, 1);
- INIT_LIST_HEAD(&mirror->mirrors);
+ if (mirror == NULL)
+ return NULL;
+
+ spin_lock_init(&mirror->lock);
+ refcount_set(&mirror->ref, 1);
+ INIT_LIST_HEAD(&mirror->mirrors);
+
+ mirror->dss_count = dss_count;
+ mirror->dss =
+ kcalloc(dss_count, sizeof(struct nfs4_ff_layout_ds_stripe),
+ gfp_flags);
+ if (mirror->dss == NULL) {
+ kfree(mirror);
+ return NULL;
}
+
+ for (u32 dss_id = 0; dss_id < mirror->dss_count; dss_id++)
+ nfs_localio_file_init(&mirror->dss[dss_id].nfl);
+
return mirror;
}
static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
{
const struct cred *cred;
+ u32 dss_id;
ff_layout_remove_mirror(mirror);
- kfree(mirror->fh_versions);
- cred = rcu_access_pointer(mirror->ro_cred);
- put_cred(cred);
- cred = rcu_access_pointer(mirror->rw_cred);
- put_cred(cred);
- nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
+
+ for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
+ kfree(mirror->dss[dss_id].fh_versions);
+ cred = rcu_access_pointer(mirror->dss[dss_id].ro_cred);
+ put_cred(cred);
+ cred = rcu_access_pointer(mirror->dss[dss_id].rw_cred);
+ put_cred(cred);
+ nfs_close_local_fh(&mirror->dss[dss_id].nfl);
+ nfs4_ff_layout_put_deviceid(mirror->dss[dss_id].mirror_ds);
+ }
+
+ kfree(mirror->dss);
kfree(mirror);
}
@@ -270,7 +346,7 @@ ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
struct pnfs_layout_segment *l2)
{
const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
- const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
+ const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2);
u32 i;
if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
@@ -343,14 +419,24 @@ ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
free_me);
}
+static u32 ff_mirror_efficiency_sum(const struct nfs4_ff_layout_mirror *mirror)
+{
+ u32 dss_id, sum = 0;
+
+ for (dss_id = 0; dss_id < mirror->dss_count; dss_id++)
+ sum += mirror->dss[dss_id].efficiency;
+
+ return sum;
+}
+
static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
{
int i, j;
for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
for (j = i + 1; j < fls->mirror_array_cnt; j++)
- if (fls->mirror_array[i]->efficiency <
- fls->mirror_array[j]->efficiency)
+ if (ff_mirror_efficiency_sum(fls->mirror_array[i]) <
+ ff_mirror_efficiency_sum(fls->mirror_array[j]))
swap(fls->mirror_array[i],
fls->mirror_array[j]);
}
@@ -365,20 +451,21 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
struct nfs4_ff_layout_segment *fls = NULL;
struct xdr_stream stream;
struct xdr_buf buf;
- struct page *scratch;
+ struct folio *scratch;
u64 stripe_unit;
u32 mirror_array_cnt;
__be32 *p;
int i, rc;
+ struct nfs4_ff_layout_ds_stripe *dss_info;
dprintk("--> %s\n", __func__);
- scratch = alloc_page(gfp_flags);
+ scratch = folio_alloc(gfp_flags, 0);
if (!scratch)
return ERR_PTR(-ENOMEM);
xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
lgr->layoutp->len);
- xdr_set_scratch_page(&stream, scratch);
+ xdr_set_scratch_folio(&stream, scratch);
/* stripe unit and mirror_array_cnt */
rc = -EIO;
@@ -404,116 +491,134 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
fls->mirror_array_cnt = mirror_array_cnt;
fls->stripe_unit = stripe_unit;
+ u32 dss_count = 0;
for (i = 0; i < fls->mirror_array_cnt; i++) {
struct nfs4_ff_layout_mirror *mirror;
struct cred *kcred;
const struct cred __rcu *cred;
kuid_t uid;
kgid_t gid;
- u32 ds_count, fh_count, id;
- int j;
+ u32 fh_count, id;
+ int j, dss_id;
rc = -EIO;
p = xdr_inline_decode(&stream, 4);
if (!p)
goto out_err_free;
- ds_count = be32_to_cpup(p);
- /* FIXME: allow for striping? */
- if (ds_count != 1)
+ // Ensure all mirrors have same stripe count.
+ if (dss_count == 0)
+ dss_count = be32_to_cpup(p);
+ else if (dss_count != be32_to_cpup(p))
+ goto out_err_free;
+
+ if (dss_count > NFS4_FLEXFILE_LAYOUT_MAX_STRIPE_CNT ||
+ dss_count == 0)
goto out_err_free;
- fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
+ if (dss_count > 1 && stripe_unit == 0)
+ goto out_err_free;
+
+ fls->mirror_array[i] = ff_layout_alloc_mirror(dss_count, gfp_flags);
if (fls->mirror_array[i] == NULL) {
rc = -ENOMEM;
goto out_err_free;
}
- fls->mirror_array[i]->ds_count = ds_count;
+ for (dss_id = 0; dss_id < dss_count; dss_id++) {
+ dss_info = &fls->mirror_array[i]->dss[dss_id];
+ dss_info->mirror = fls->mirror_array[i];
- /* deviceid */
- rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
- if (rc)
- goto out_err_free;
+ /* deviceid */
+ rc = decode_deviceid(&stream, &dss_info->devid);
+ if (rc)
+ goto out_err_free;
- /* efficiency */
- rc = -EIO;
- p = xdr_inline_decode(&stream, 4);
- if (!p)
- goto out_err_free;
- fls->mirror_array[i]->efficiency = be32_to_cpup(p);
+ /* efficiency */
+ rc = -EIO;
+ p = xdr_inline_decode(&stream, 4);
+ if (!p)
+ goto out_err_free;
+ dss_info->efficiency = be32_to_cpup(p);
- /* stateid */
- rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
- if (rc)
- goto out_err_free;
+ /* stateid */
+ rc = decode_pnfs_stateid(&stream, &dss_info->stateid);
+ if (rc)
+ goto out_err_free;
- /* fh */
- rc = -EIO;
- p = xdr_inline_decode(&stream, 4);
- if (!p)
- goto out_err_free;
- fh_count = be32_to_cpup(p);
+ /* fh */
+ rc = -EIO;
+ p = xdr_inline_decode(&stream, 4);
+ if (!p)
+ goto out_err_free;
+ fh_count = be32_to_cpup(p);
- fls->mirror_array[i]->fh_versions =
- kcalloc(fh_count, sizeof(struct nfs_fh),
- gfp_flags);
- if (fls->mirror_array[i]->fh_versions == NULL) {
- rc = -ENOMEM;
- goto out_err_free;
- }
+ dss_info->fh_versions =
+ kcalloc(fh_count, sizeof(struct nfs_fh),
+ gfp_flags);
+ if (dss_info->fh_versions == NULL) {
+ rc = -ENOMEM;
+ goto out_err_free;
+ }
- for (j = 0; j < fh_count; j++) {
- rc = decode_nfs_fh(&stream,
- &fls->mirror_array[i]->fh_versions[j]);
+ for (j = 0; j < fh_count; j++) {
+ rc = decode_nfs_fh(&stream,
+ &dss_info->fh_versions[j]);
+ if (rc)
+ goto out_err_free;
+ }
+
+ dss_info->fh_versions_cnt = fh_count;
+
+ /* user */
+ rc = decode_name(&stream, &id);
if (rc)
goto out_err_free;
- }
- fls->mirror_array[i]->fh_versions_cnt = fh_count;
+ uid = make_kuid(&init_user_ns, id);
- /* user */
- rc = decode_name(&stream, &id);
- if (rc)
- goto out_err_free;
-
- uid = make_kuid(&init_user_ns, id);
+ /* group */
+ rc = decode_name(&stream, &id);
+ if (rc)
+ goto out_err_free;
- /* group */
- rc = decode_name(&stream, &id);
- if (rc)
- goto out_err_free;
+ gid = make_kgid(&init_user_ns, id);
- gid = make_kgid(&init_user_ns, id);
+ if (gfp_flags & __GFP_FS)
+ kcred = prepare_kernel_cred(&init_task);
+ else {
+ unsigned int nofs_flags = memalloc_nofs_save();
- if (gfp_flags & __GFP_FS)
- kcred = prepare_kernel_cred(NULL);
- else {
- unsigned int nofs_flags = memalloc_nofs_save();
- kcred = prepare_kernel_cred(NULL);
- memalloc_nofs_restore(nofs_flags);
+ kcred = prepare_kernel_cred(&init_task);
+ memalloc_nofs_restore(nofs_flags);
+ }
+ rc = -ENOMEM;
+ if (!kcred)
+ goto out_err_free;
+ kcred->fsuid = uid;
+ kcred->fsgid = gid;
+ cred = RCU_INITIALIZER(kcred);
+
+ if (lgr->range.iomode == IOMODE_READ)
+ rcu_assign_pointer(dss_info->ro_cred, cred);
+ else
+ rcu_assign_pointer(dss_info->rw_cred, cred);
}
- rc = -ENOMEM;
- if (!kcred)
- goto out_err_free;
- kcred->fsuid = uid;
- kcred->fsgid = gid;
- cred = RCU_INITIALIZER(kcred);
-
- if (lgr->range.iomode == IOMODE_READ)
- rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
- else
- rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
if (mirror != fls->mirror_array[i]) {
- /* swap cred ptrs so free_mirror will clean up old */
- if (lgr->range.iomode == IOMODE_READ) {
- cred = xchg(&mirror->ro_cred, cred);
- rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
- } else {
- cred = xchg(&mirror->rw_cred, cred);
- rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
+ for (dss_id = 0; dss_id < dss_count; dss_id++) {
+ dss_info = &fls->mirror_array[i]->dss[dss_id];
+ /* swap cred ptrs so free_mirror will clean up old */
+ if (lgr->range.iomode == IOMODE_READ) {
+ cred = xchg(&mirror->dss[dss_id].ro_cred,
+ dss_info->ro_cred);
+ rcu_assign_pointer(dss_info->ro_cred, cred);
+ } else {
+ cred = xchg(&mirror->dss[dss_id].rw_cred,
+ dss_info->rw_cred);
+ rcu_assign_pointer(dss_info->rw_cred, cred);
+ }
}
ff_layout_free_mirror(fls->mirror_array[i]);
fls->mirror_array[i] = mirror;
@@ -541,7 +646,7 @@ out_sort_mirrors:
ret = &fls->generic_hdr;
dprintk("<-- %s (success)\n", __func__);
out_free_page:
- __free_page(scratch);
+ folio_put(scratch);
return ret;
out_err_free:
_ff_layout_free_lseg(fls);
@@ -570,6 +675,26 @@ ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
_ff_layout_free_lseg(fls);
}
+static u32 calc_commit_idx(struct pnfs_layout_segment *lseg,
+ u32 mirror_idx, u32 dss_id)
+{
+ struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
+
+ return (mirror_idx * flseg->mirror_array[0]->dss_count) + dss_id;
+}
+
+static u32 calc_mirror_idx_from_commit(struct pnfs_layout_segment *lseg,
+ u32 commit_index)
+{
+ return commit_index / FF_LAYOUT_LSEG(lseg)->mirror_array[0]->dss_count;
+}
+
+static u32 calc_dss_id_from_commit(struct pnfs_layout_segment *lseg,
+ u32 commit_index)
+{
+ return commit_index % FF_LAYOUT_LSEG(lseg)->mirror_array[0]->dss_count;
+}
+
static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
{
@@ -594,6 +719,7 @@ nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
static bool
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
struct nfs4_ff_layoutstat *layoutstat,
ktime_t now)
{
@@ -601,8 +727,8 @@ nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
- if (!mirror->start_time)
- mirror->start_time = now;
+ if (!mirror->dss[dss_id].start_time)
+ mirror->dss[dss_id].start_time = now;
if (mirror->report_interval != 0)
report_interval = (s64)mirror->report_interval * 1000LL;
else if (layoutstats_timer != 0)
@@ -652,28 +778,32 @@ nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
static void
nfs4_ff_layout_stat_io_start_read(struct inode *inode,
struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
__u64 requested, ktime_t now)
{
bool report;
spin_lock(&mirror->lock);
- report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
- nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
+ report = nfs4_ff_layoutstat_start_io(
+ mirror, dss_id, &mirror->dss[dss_id].read_stat, now);
+ nfs4_ff_layout_stat_io_update_requested(
+ &mirror->dss[dss_id].read_stat, requested);
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
spin_unlock(&mirror->lock);
if (report)
- pnfs_report_layoutstat(inode, GFP_KERNEL);
+ pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
}
static void
nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
__u64 requested,
__u64 completed)
{
spin_lock(&mirror->lock);
- nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
+ nfs4_ff_layout_stat_io_update_completed(&mirror->dss[dss_id].read_stat,
requested, completed,
ktime_get(), task->tk_start);
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
@@ -683,23 +813,31 @@ nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
static void
nfs4_ff_layout_stat_io_start_write(struct inode *inode,
struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
__u64 requested, ktime_t now)
{
bool report;
spin_lock(&mirror->lock);
- report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
- nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
+ report = nfs4_ff_layoutstat_start_io(
+ mirror,
+ dss_id,
+ &mirror->dss[dss_id].write_stat,
+ now);
+ nfs4_ff_layout_stat_io_update_requested(
+ &mirror->dss[dss_id].write_stat,
+ requested);
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
spin_unlock(&mirror->lock);
if (report)
- pnfs_report_layoutstat(inode, GFP_NOIO);
+ pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
}
static void
nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
__u64 requested,
__u64 completed,
enum nfs3_stable_how committed)
@@ -708,25 +846,25 @@ nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
requested = completed = 0;
spin_lock(&mirror->lock);
- nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
+ nfs4_ff_layout_stat_io_update_completed(&mirror->dss[dss_id].write_stat,
requested, completed, ktime_get(), task->tk_start);
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
spin_unlock(&mirror->lock);
}
static void
-ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
+ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx, u32 dss_id)
{
- struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
if (devid)
nfs4_mark_deviceid_unavailable(devid);
}
static void
-ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
+ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx, u32 dss_id)
{
- struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
if (devid)
nfs4_mark_deviceid_available(devid);
@@ -735,69 +873,87 @@ ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
static struct nfs4_pnfs_ds *
ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
u32 start_idx, u32 *best_idx,
+ u32 offset, u32 *dss_id,
bool check_device)
{
struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
struct nfs4_ff_layout_mirror *mirror;
- struct nfs4_pnfs_ds *ds;
+ struct nfs4_pnfs_ds *ds = ERR_PTR(-EAGAIN);
u32 idx;
/* mirrors are initially sorted by efficiency */
for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
mirror = FF_LAYOUT_COMP(lseg, idx);
- ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
- if (!ds)
+ *dss_id = nfs4_ff_layout_calc_dss_id(
+ fls->stripe_unit,
+ fls->mirror_array[idx]->dss_count,
+ offset);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, *dss_id, false);
+ if (IS_ERR(ds))
continue;
if (check_device &&
- nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
+ nfs4_test_deviceid_unavailable(&mirror->dss[*dss_id].mirror_ds->id_node)) {
+ // reinitialize the error state in case if this is the last iteration
+ ds = ERR_PTR(-EINVAL);
continue;
+ }
*best_idx = idx;
- return ds;
+ break;
}
- return NULL;
+ return ds;
}
static struct nfs4_pnfs_ds *
ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
- u32 start_idx, u32 *best_idx)
+ u32 start_idx, u32 *best_idx,
+ u32 offset, u32 *dss_id)
{
- return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
+ return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx,
+ offset, dss_id, false);
}
static struct nfs4_pnfs_ds *
ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
- u32 start_idx, u32 *best_idx)
+ u32 start_idx, u32 *best_idx,
+ u32 offset, u32 *dss_id)
{
- return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
+ return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx,
+ offset, dss_id, true);
}
static struct nfs4_pnfs_ds *
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
- u32 start_idx, u32 *best_idx)
+ u32 start_idx, u32 *best_idx,
+ u32 offset, u32 *dss_id)
{
struct nfs4_pnfs_ds *ds;
- ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
- if (ds)
+ ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx,
+ offset, dss_id);
+ if (!IS_ERR(ds))
return ds;
- return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
+ return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx,
+ offset, dss_id);
}
static struct nfs4_pnfs_ds *
ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
- u32 *best_idx)
+ u32 *best_idx,
+ u32 offset,
+ u32 *dss_id)
{
struct pnfs_layout_segment *lseg = pgio->pg_lseg;
struct nfs4_pnfs_ds *ds;
ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
- best_idx);
- if (ds || !pgio->pg_mirror_idx)
+ best_idx, offset, dss_id);
+ if (!IS_ERR(ds) || !pgio->pg_mirror_idx)
return ds;
- return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
+ return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx,
+ offset, dss_id);
}
static void
@@ -806,25 +962,64 @@ ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
bool strict_iomode)
{
pnfs_put_lseg(pgio->pg_lseg);
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- nfs_req_openctx(req),
- req_offset(req),
- req->wb_bytes,
- IOMODE_READ,
- strict_iomode,
- GFP_KERNEL);
+ pgio->pg_lseg =
+ pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
+ req_offset(req), req->wb_bytes, IOMODE_READ,
+ strict_iomode, nfs_io_gfp_mask());
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
}
}
-static void
-ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio,
- struct nfs_page *req)
+static bool
+ff_layout_lseg_is_striped(const struct nfs4_ff_layout_segment *fls)
{
- pnfs_generic_pg_check_layout(pgio);
- pnfs_generic_pg_check_range(pgio, req);
+ return fls->mirror_array[0]->dss_count > 1;
+}
+
+/*
+ * ff_layout_pg_test(). Called by nfs_can_coalesce_requests()
+ *
+ * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
+ * of bytes (maximum @req->wb_bytes) that can be coalesced.
+ */
+static size_t
+ff_layout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
+ struct nfs_page *req)
+{
+ unsigned int size;
+ u64 p_stripe, r_stripe;
+ u32 stripe_offset;
+ u64 segment_offset = pgio->pg_lseg->pls_range.offset;
+ u32 stripe_unit = FF_LAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
+
+ /* calls nfs_generic_pg_test */
+ size = pnfs_generic_pg_test(pgio, prev, req);
+ if (!size)
+ return 0;
+ else if (!ff_layout_lseg_is_striped(FF_LAYOUT_LSEG(pgio->pg_lseg)))
+ return size;
+
+ /* see if req and prev are in the same stripe */
+ if (prev) {
+ p_stripe = (u64)req_offset(prev) - segment_offset;
+ r_stripe = (u64)req_offset(req) - segment_offset;
+ do_div(p_stripe, stripe_unit);
+ do_div(r_stripe, stripe_unit);
+
+ if (p_stripe != r_stripe)
+ return 0;
+ }
+
+ /* calculate remaining bytes in the current stripe */
+ div_u64_rem((u64)req_offset(req) - segment_offset,
+ stripe_unit,
+ &stripe_offset);
+ WARN_ON_ONCE(stripe_offset > stripe_unit);
+ if (stripe_offset >= stripe_unit)
+ return 0;
+ return min(stripe_unit - (unsigned int)stripe_offset, size);
}
static void
@@ -834,10 +1029,13 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_pgio_mirror *pgm;
struct nfs4_ff_layout_mirror *mirror;
struct nfs4_pnfs_ds *ds;
- u32 ds_idx;
+ u32 ds_idx, dss_id;
+ if (NFS_SERVER(pgio->pg_inode)->flags &
+ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+ pgio->pg_maxretrans = io_maxretrans;
retry:
- ff_layout_pg_check_layout(pgio, req);
+ pnfs_generic_pg_check_layout(pgio, req);
/* Use full layout for now */
if (!pgio->pg_lseg) {
ff_layout_pg_get_read(pgio, req, false);
@@ -849,9 +1047,12 @@ retry:
if (!pgio->pg_lseg)
goto out_nolseg;
}
+ /* Reset wb_nio, since getting layout segment was successful */
+ req->wb_nio = 0;
- ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
- if (!ds) {
+ ds = ff_layout_get_ds_for_read(pgio, &ds_idx,
+ req_offset(req), &dss_id);
+ if (IS_ERR(ds)) {
if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
goto out_mds;
pnfs_generic_pg_cleanup(pgio);
@@ -862,17 +1063,27 @@ retry:
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
pgm = &pgio->pg_mirrors[0];
- pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
+ pgm->pg_bsize = mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize;
pgio->pg_mirror_idx = ds_idx;
-
- if (NFS_SERVER(pgio->pg_inode)->flags &
- (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
- pgio->pg_maxretrans = io_maxretrans;
return;
out_nolseg:
- if (pgio->pg_error < 0)
- return;
+ if (pgio->pg_error < 0) {
+ if (pgio->pg_error != -EAGAIN)
+ return;
+ /* Retry getting layout segment if lower layer returned -EAGAIN */
+ if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
+ if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
+ pgio->pg_error = -ETIMEDOUT;
+ else
+ pgio->pg_error = -EIO;
+ return;
+ }
+ pgio->pg_error = 0;
+ /* Sleep for 1 second before retrying */
+ ssleep(1);
+ goto retry;
+ }
out_mds:
trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
0, NFS4_MAX_UINT64, IOMODE_READ,
@@ -889,18 +1100,15 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs4_ff_layout_mirror *mirror;
struct nfs_pgio_mirror *pgm;
struct nfs4_pnfs_ds *ds;
- u32 i;
+ u32 i, dss_id;
retry:
- ff_layout_pg_check_layout(pgio, req);
+ pnfs_generic_pg_check_layout(pgio, req);
if (!pgio->pg_lseg) {
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- nfs_req_openctx(req),
- req_offset(req),
- req->wb_bytes,
- IOMODE_RW,
- false,
- GFP_NOFS);
+ pgio->pg_lseg =
+ pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
+ req_offset(req), req->wb_bytes,
+ IOMODE_RW, false, nfs_io_gfp_mask());
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
@@ -917,8 +1125,13 @@ retry:
for (i = 0; i < pgio->pg_mirror_count; i++) {
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
- ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
- if (!ds) {
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(pgio->pg_lseg)->stripe_unit,
+ mirror->dss_count,
+ req_offset(req));
+ ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror,
+ dss_id, true);
+ if (IS_ERR(ds)) {
if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
goto out_mds;
pnfs_generic_pg_cleanup(pgio);
@@ -927,7 +1140,7 @@ retry:
goto retry;
}
pgm = &pgio->pg_mirrors[i];
- pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
+ pgm->pg_bsize = mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize;
}
if (NFS_SERVER(pgio->pg_inode)->flags &
@@ -953,13 +1166,10 @@ ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
if (!pgio->pg_lseg) {
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- nfs_req_openctx(req),
- req_offset(req),
- req->wb_bytes,
- IOMODE_RW,
- false,
- GFP_NOFS);
+ pgio->pg_lseg =
+ pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
+ req_offset(req), req->wb_bytes,
+ IOMODE_RW, false, nfs_io_gfp_mask());
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
@@ -996,14 +1206,14 @@ ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
.pg_init = ff_layout_pg_init_read,
- .pg_test = pnfs_generic_pg_test,
+ .pg_test = ff_layout_pg_test,
.pg_doio = pnfs_generic_pg_readpages,
.pg_cleanup = pnfs_generic_pg_cleanup,
};
static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
.pg_init = ff_layout_pg_init_write,
- .pg_test = pnfs_generic_pg_test,
+ .pg_test = ff_layout_pg_test,
.pg_doio = pnfs_generic_pg_writepages,
.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
.pg_cleanup = pnfs_generic_pg_cleanup,
@@ -1051,11 +1261,15 @@ static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
{
u32 idx = hdr->pgio_mirror_idx + 1;
u32 new_idx = 0;
+ u32 dss_id = 0;
+ struct nfs4_pnfs_ds *ds;
- if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
- ff_layout_send_layouterror(hdr->lseg);
- else
+ ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx,
+ hdr->args.offset, &dss_id);
+ if (IS_ERR(ds))
pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
+ else
+ ff_layout_send_layouterror(hdr->lseg);
pnfs_read_resend_pnfs(hdr, new_idx);
}
@@ -1084,42 +1298,53 @@ static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
}
static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+ u32 op_status,
struct nfs4_state *state,
struct nfs_client *clp,
struct pnfs_layout_segment *lseg,
- u32 idx)
+ u32 idx, u32 dss_id)
{
struct pnfs_layout_hdr *lo = lseg->pls_layout;
struct inode *inode = lo->plh_inode;
- struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
- switch (task->tk_status) {
- case -NFS4ERR_BADSESSION:
- case -NFS4ERR_BADSLOT:
- case -NFS4ERR_BAD_HIGH_SLOT:
- case -NFS4ERR_DEADSESSION:
- case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
- case -NFS4ERR_SEQ_FALSE_RETRY:
- case -NFS4ERR_SEQ_MISORDERED:
+ switch (op_status) {
+ case NFS4_OK:
+ case NFS4ERR_NXIO:
+ break;
+ case NFSERR_PERM:
+ if (!task->tk_xprt)
+ break;
+ xprt_force_disconnect(task->tk_xprt);
+ goto out_retry;
+ case NFS4ERR_BADSESSION:
+ case NFS4ERR_BADSLOT:
+ case NFS4ERR_BAD_HIGH_SLOT:
+ case NFS4ERR_DEADSESSION:
+ case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case NFS4ERR_SEQ_FALSE_RETRY:
+ case NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session. Exchangeid "
"flags 0x%x\n", __func__, task->tk_status,
clp->cl_exchange_flags);
nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
- break;
- case -NFS4ERR_DELAY:
- case -NFS4ERR_GRACE:
+ goto out_retry;
+ case NFS4ERR_DELAY:
+ nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
+ fallthrough;
+ case NFS4ERR_GRACE:
rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
- break;
- case -NFS4ERR_RETRY_UNCACHED_REP:
- break;
+ goto out_retry;
+ case NFS4ERR_RETRY_UNCACHED_REP:
+ goto out_retry;
/* Invalidate Layout errors */
- case -NFS4ERR_PNFS_NO_LAYOUT:
- case -ESTALE: /* mapped NFS4ERR_STALE */
- case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
- case -EISDIR: /* mapped NFS4ERR_ISDIR */
- case -NFS4ERR_FHEXPIRED:
- case -NFS4ERR_WRONG_TYPE:
+ case NFS4ERR_PNFS_NO_LAYOUT:
+ case NFS4ERR_STALE:
+ case NFS4ERR_BADHANDLE:
+ case NFS4ERR_ISDIR:
+ case NFS4ERR_FHEXPIRED:
+ case NFS4ERR_WRONG_TYPE:
dprintk("%s Invalid layout error %d\n", __func__,
task->tk_status);
/*
@@ -1132,38 +1357,79 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
pnfs_destroy_layout(NFS_I(inode));
rpc_wake_up(&tbl->slot_tbl_waitq);
goto reset;
+ default:
+ break;
+ }
+
+ switch (task->tk_status) {
/* RPC connection errors */
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ return -NFS4ERR_FATAL_IOERROR;
+ fallthrough;
case -ECONNREFUSED:
case -EHOSTDOWN:
case -EHOSTUNREACH:
- case -ENETUNREACH:
case -EIO:
case -ETIMEDOUT:
case -EPIPE:
+ case -EPROTO:
+ case -ENODEV:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
nfs4_delete_deviceid(devid->ld, devid->nfs_client,
&devid->deviceid);
rpc_wake_up(&tbl->slot_tbl_waitq);
- fallthrough;
+ break;
default:
- if (ff_layout_avoid_mds_available_ds(lseg))
- return -NFS4ERR_RESET_TO_PNFS;
-reset:
- dprintk("%s Retry through MDS. Error %d\n", __func__,
- task->tk_status);
- return -NFS4ERR_RESET_TO_MDS;
+ break;
}
+
+ if (ff_layout_avoid_mds_available_ds(lseg))
+ return -NFS4ERR_RESET_TO_PNFS;
+reset:
+ dprintk("%s Retry through MDS. Error %d\n", __func__,
+ task->tk_status);
+ return -NFS4ERR_RESET_TO_MDS;
+
+out_retry:
task->tk_status = 0;
return -EAGAIN;
}
/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ u32 op_status,
+ struct nfs_client *clp,
struct pnfs_layout_segment *lseg,
- u32 idx)
+ u32 idx, u32 dss_id)
{
- struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
+
+ switch (op_status) {
+ case NFS_OK:
+ case NFSERR_NXIO:
+ break;
+ case NFSERR_PERM:
+ if (!task->tk_xprt)
+ break;
+ xprt_force_disconnect(task->tk_xprt);
+ goto out_retry;
+ case NFSERR_ACCES:
+ case NFSERR_BADHANDLE:
+ case NFSERR_FBIG:
+ case NFSERR_IO:
+ case NFSERR_NOSPC:
+ case NFSERR_ROFS:
+ case NFSERR_STALE:
+ goto out_reset_to_pnfs;
+ case NFSERR_JUKEBOX:
+ nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
+ goto out_retry;
+ default:
+ break;
+ }
switch (task->tk_status) {
/* File access problems. Don't mark the device as unavailable */
@@ -1177,12 +1443,18 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
case -EJUKEBOX:
nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
goto out_retry;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ return -NFS4ERR_FATAL_IOERROR;
+ fallthrough;
default:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
nfs4_delete_deviceid(devid->ld, devid->nfs_client,
&devid->deviceid);
}
+out_reset_to_pnfs:
/* FIXME: Need to prevent infinite looping here. */
return -NFS4ERR_RESET_TO_PNFS;
out_retry:
@@ -1193,15 +1465,16 @@ out_retry:
}
static int ff_layout_async_handle_error(struct rpc_task *task,
+ u32 op_status,
struct nfs4_state *state,
struct nfs_client *clp,
struct pnfs_layout_segment *lseg,
- u32 idx)
+ u32 idx, u32 dss_id)
{
int vers = clp->cl_nfs_mod->rpc_vers->number;
if (task->tk_status >= 0) {
- ff_layout_mark_ds_reachable(lseg, idx);
+ ff_layout_mark_ds_reachable(lseg, idx, dss_id);
return 0;
}
@@ -1211,10 +1484,11 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
switch (vers) {
case 3:
- return ff_layout_async_handle_error_v3(task, lseg, idx);
+ return ff_layout_async_handle_error_v3(task, op_status, clp,
+ lseg, idx, dss_id);
case 4:
- return ff_layout_async_handle_error_v4(task, state, clp,
- lseg, idx);
+ return ff_layout_async_handle_error_v4(task, op_status, state,
+ clp, lseg, idx, dss_id);
default:
/* should never happen */
WARN_ON_ONCE(1);
@@ -1223,7 +1497,7 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
}
static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
- u32 idx, u64 offset, u64 length,
+ u32 idx, u32 dss_id, u64 offset, u64 length,
u32 *op_status, int opnum, int error)
{
struct nfs4_ff_layout_mirror *mirror;
@@ -1236,15 +1510,19 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
case -EPFNOSUPPORT:
case -EPROTONOSUPPORT:
case -EOPNOTSUPP:
+ case -EINVAL:
case -ECONNREFUSED:
case -ECONNRESET:
case -EHOSTDOWN:
case -EHOSTUNREACH:
+ case -ENETDOWN:
case -ENETUNREACH:
case -EADDRINUSE:
case -ENOBUFS:
case -EPIPE:
case -EPERM:
+ case -EPROTO:
+ case -ENODEV:
*op_status = status = NFS4ERR_NXIO;
break;
case -EACCES:
@@ -1257,15 +1535,16 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
mirror = FF_LAYOUT_COMP(lseg, idx);
err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
- mirror, offset, length, status, opnum,
- GFP_NOIO);
+ mirror, dss_id, offset, length, status, opnum,
+ nfs_io_gfp_mask());
switch (status) {
case NFS4ERR_DELAY:
case NFS4ERR_GRACE:
+ case NFS4ERR_PERM:
break;
case NFS4ERR_NXIO:
- ff_layout_mark_ds_unreachable(lseg, idx);
+ ff_layout_mark_ds_unreachable(lseg, idx, dss_id);
/*
* Don't return the layout if this is a read and we still
* have layouts to try
@@ -1285,19 +1564,27 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
static int ff_layout_read_done_cb(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(hdr->lseg);
+ u32 dss_id = nfs4_ff_layout_calc_dss_id(
+ flseg->stripe_unit,
+ flseg->mirror_array[hdr->pgio_mirror_idx]->dss_count,
+ hdr->args.offset);
int err;
if (task->tk_status < 0) {
- ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
+ ff_layout_io_track_ds_error(hdr->lseg,
+ hdr->pgio_mirror_idx, dss_id,
hdr->args.offset, hdr->args.count,
&hdr->res.op_status, OP_READ,
task->tk_status);
- trace_ff_layout_read_error(hdr);
+ trace_ff_layout_read_error(hdr, task->tk_status);
}
- err = ff_layout_async_handle_error(task, hdr->args.context->state,
+ err = ff_layout_async_handle_error(task, hdr->res.op_status,
+ hdr->args.context->state,
hdr->ds_clp, hdr->lseg,
- hdr->pgio_mirror_idx);
+ hdr->pgio_mirror_idx,
+ dss_id);
trace_nfs4_pnfs_read(hdr, err);
clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
@@ -1311,6 +1598,9 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
return task->tk_status;
case -EAGAIN:
goto out_eagain;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
return 0;
@@ -1350,23 +1640,47 @@ ff_layout_set_layoutcommit(struct inode *inode,
static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ struct nfs4_ff_layout_mirror *mirror;
+ u32 dss_id;
+
if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
return;
- nfs4_ff_layout_stat_io_start_read(hdr->inode,
- FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
- hdr->args.count,
- task->tk_start);
+
+ mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
+ mirror->dss_count,
+ hdr->args.offset);
+
+ nfs4_ff_layout_stat_io_start_read(
+ hdr->inode,
+ mirror,
+ dss_id,
+ hdr->args.count,
+ task->tk_start);
}
static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ struct nfs4_ff_layout_mirror *mirror;
+ u32 dss_id;
+
if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
return;
- nfs4_ff_layout_stat_io_end_read(task,
- FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
- hdr->args.count,
- hdr->res.count);
+
+ mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
+ mirror->dss_count,
+ hdr->args.offset);
+
+ nfs4_ff_layout_stat_io_end_read(
+ task,
+ mirror,
+ dss_id,
+ hdr->args.count,
+ hdr->res.count);
set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
}
@@ -1378,6 +1692,11 @@ static int ff_layout_read_prepare_common(struct rpc_task *task,
return -EIO;
}
+ if (!pnfs_is_valid_lseg(hdr->lseg)) {
+ rpc_exit(task, -EAGAIN);
+ return -EAGAIN;
+ }
+
ff_layout_read_record_layoutstats_start(task, hdr);
return 0;
}
@@ -1449,20 +1768,28 @@ static void ff_layout_read_release(void *data)
static int ff_layout_write_done_cb(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(hdr->lseg);
+ u32 dss_id = nfs4_ff_layout_calc_dss_id(
+ flseg->stripe_unit,
+ flseg->mirror_array[hdr->pgio_mirror_idx]->dss_count,
+ hdr->args.offset);
loff_t end_offs = 0;
int err;
if (task->tk_status < 0) {
- ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
+ ff_layout_io_track_ds_error(hdr->lseg,
+ hdr->pgio_mirror_idx, dss_id,
hdr->args.offset, hdr->args.count,
&hdr->res.op_status, OP_WRITE,
task->tk_status);
- trace_ff_layout_write_error(hdr);
+ trace_ff_layout_write_error(hdr, task->tk_status);
}
- err = ff_layout_async_handle_error(task, hdr->args.context->state,
+ err = ff_layout_async_handle_error(task, hdr->res.op_status,
+ hdr->args.context->state,
hdr->ds_clp, hdr->lseg,
- hdr->pgio_mirror_idx);
+ hdr->pgio_mirror_idx,
+ dss_id);
trace_nfs4_pnfs_write(hdr, err);
clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
@@ -1476,6 +1803,9 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
return task->tk_status;
case -EAGAIN:
return -EAGAIN;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
if (hdr->res.verf->committed == NFS_FILE_SYNC ||
@@ -1497,17 +1827,20 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
struct nfs_commit_data *data)
{
int err;
+ u32 idx = calc_mirror_idx_from_commit(data->lseg, data->ds_commit_index);
+ u32 dss_id = calc_dss_id_from_commit(data->lseg, data->ds_commit_index);
if (task->tk_status < 0) {
- ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
+ ff_layout_io_track_ds_error(data->lseg, idx, dss_id,
data->args.offset, data->args.count,
&data->res.op_status, OP_COMMIT,
task->tk_status);
- trace_ff_layout_commit_error(data);
+ trace_ff_layout_commit_error(data, task->tk_status);
}
- err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
- data->lseg, data->ds_commit_index);
+ err = ff_layout_async_handle_error(task, data->res.op_status,
+ NULL, data->ds_clp, data->lseg, idx,
+ dss_id);
trace_nfs4_pnfs_commit_ds(data, err);
switch (err) {
@@ -1520,33 +1853,60 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
case -EAGAIN:
rpc_restart_call_prepare(task);
return -EAGAIN;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
-
return 0;
}
static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ struct nfs4_ff_layout_mirror *mirror;
+ u32 dss_id;
+
if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
return;
- nfs4_ff_layout_stat_io_start_write(hdr->inode,
- FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
- hdr->args.count,
- task->tk_start);
+
+ mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
+ mirror->dss_count,
+ hdr->args.offset);
+
+ nfs4_ff_layout_stat_io_start_write(
+ hdr->inode,
+ mirror,
+ dss_id,
+ hdr->args.count,
+ task->tk_start);
}
static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ struct nfs4_ff_layout_mirror *mirror;
+ u32 dss_id;
+
if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
return;
- nfs4_ff_layout_stat_io_end_write(task,
- FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
- hdr->args.count, hdr->res.count,
- hdr->res.verf->committed);
+
+ mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
+ mirror->dss_count,
+ hdr->args.offset);
+
+ nfs4_ff_layout_stat_io_end_write(
+ task,
+ mirror,
+ dss_id,
+ hdr->args.count,
+ hdr->res.count,
+ hdr->res.verf->committed);
set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
}
@@ -1558,6 +1918,11 @@ static int ff_layout_write_prepare_common(struct rpc_task *task,
return -EIO;
}
+ if (!pnfs_is_valid_lseg(hdr->lseg)) {
+ rpc_exit(task, -EAGAIN);
+ return -EAGAIN;
+ }
+
ff_layout_write_record_layoutstats_start(task, hdr);
return 0;
}
@@ -1624,10 +1989,16 @@ static void ff_layout_write_release(void *data)
static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
struct nfs_commit_data *cdata)
{
+ u32 idx, dss_id;
+
if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
return;
+
+ idx = calc_mirror_idx_from_commit(cdata->lseg, cdata->ds_commit_index);
+ dss_id = calc_dss_id_from_commit(cdata->lseg, cdata->ds_commit_index);
nfs4_ff_layout_stat_io_start_write(cdata->inode,
- FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
+ FF_LAYOUT_COMP(cdata->lseg, idx),
+ dss_id,
0, task->tk_start);
}
@@ -1636,6 +2007,7 @@ static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
{
struct nfs_page *req;
__u64 count = 0;
+ u32 idx, dss_id;
if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
return;
@@ -1644,21 +2016,33 @@ static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
list_for_each_entry(req, &cdata->pages, wb_list)
count += req->wb_bytes;
}
+
+ idx = calc_mirror_idx_from_commit(cdata->lseg, cdata->ds_commit_index);
+ dss_id = calc_dss_id_from_commit(cdata->lseg, cdata->ds_commit_index);
nfs4_ff_layout_stat_io_end_write(task,
- FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
+ FF_LAYOUT_COMP(cdata->lseg, idx),
+ dss_id,
count, count, NFS_FILE_SYNC);
set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
}
-static void ff_layout_commit_prepare_common(struct rpc_task *task,
- struct nfs_commit_data *cdata)
+static int ff_layout_commit_prepare_common(struct rpc_task *task,
+ struct nfs_commit_data *cdata)
{
+ if (!pnfs_is_valid_lseg(cdata->lseg)) {
+ rpc_exit(task, -EAGAIN);
+ return -EAGAIN;
+ }
+
ff_layout_commit_record_layoutstats_start(task, cdata);
+ return 0;
}
static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
{
- ff_layout_commit_prepare_common(task, data);
+ if (ff_layout_commit_prepare_common(task, data))
+ return;
+
rpc_call_start(task);
}
@@ -1744,32 +2128,41 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
loff_t offset = hdr->args.offset;
u32 idx = hdr->pgio_mirror_idx;
int vers;
struct nfs_fh *fh;
+ u32 dss_id;
+ bool ds_fatal_error = false;
dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
__func__, hdr->inode->i_ino,
hdr->args.pgbase, (size_t)hdr->args.count, offset);
mirror = FF_LAYOUT_COMP(lseg, idx);
- ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
- if (!ds)
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(lseg)->stripe_unit,
+ mirror->dss_count,
+ offset);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, false);
+ if (IS_ERR(ds)) {
+ ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
goto out_failed;
+ }
ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
- hdr->inode);
+ hdr->inode, dss_id);
if (IS_ERR(ds_clnt))
goto out_failed;
- ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
+ ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred, dss_id);
if (!ds_cred)
goto out_failed;
- vers = nfs4_ff_layout_ds_version(mirror);
+ vers = nfs4_ff_layout_ds_version(mirror, dss_id);
dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
@@ -1777,11 +2170,11 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
hdr->pgio_done_cb = ff_layout_read_done_cb;
refcount_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
- fh = nfs4_ff_layout_select_ds_fh(mirror);
+ fh = nfs4_ff_layout_select_ds_fh(mirror, dss_id);
if (fh)
hdr->args.fh = fh;
- nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
+ nfs4_ff_layout_select_ds_stateid(mirror, dss_id, &hdr->args.stateid);
/*
* Note that if we ever decide to split across DSes,
@@ -1790,16 +2183,24 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
hdr->args.offset = offset;
hdr->mds_offset = offset;
+ /* Start IO accounting for local read */
+ localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
+ FMODE_READ);
+ if (localio) {
+ hdr->task.tk_start = ktime_get();
+ ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
+ }
+
/* Perform an asynchronous read to ds */
nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_read_call_ops_v3 :
&ff_layout_read_call_ops_v4,
- 0, RPC_TASK_SOFTCONN);
+ 0, RPC_TASK_SOFTCONN, localio);
put_cred(ds_cred);
return PNFS_ATTEMPTED;
out_failed:
- if (ff_layout_avoid_mds_available_ds(lseg))
+ if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
return PNFS_TRY_AGAIN;
trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
hdr->args.offset, hdr->args.count,
@@ -1814,28 +2215,37 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
loff_t offset = hdr->args.offset;
int vers;
struct nfs_fh *fh;
u32 idx = hdr->pgio_mirror_idx;
+ u32 dss_id;
+ bool ds_fatal_error = false;
mirror = FF_LAYOUT_COMP(lseg, idx);
- ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
- if (!ds)
+ dss_id = nfs4_ff_layout_calc_dss_id(
+ FF_LAYOUT_LSEG(lseg)->stripe_unit,
+ mirror->dss_count,
+ offset);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, true);
+ if (IS_ERR(ds)) {
+ ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
goto out_failed;
+ }
ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
- hdr->inode);
+ hdr->inode, dss_id);
if (IS_ERR(ds_clnt))
goto out_failed;
- ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
+ ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred, dss_id);
if (!ds_cred)
goto out_failed;
- vers = nfs4_ff_layout_ds_version(mirror);
+ vers = nfs4_ff_layout_ds_version(mirror, dss_id);
dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
@@ -1845,12 +2255,12 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
hdr->pgio_done_cb = ff_layout_write_done_cb;
refcount_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
- hdr->ds_commit_idx = idx;
- fh = nfs4_ff_layout_select_ds_fh(mirror);
+ hdr->ds_commit_idx = calc_commit_idx(lseg, idx, dss_id);
+ fh = nfs4_ff_layout_select_ds_fh(mirror, dss_id);
if (fh)
hdr->args.fh = fh;
- nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
+ nfs4_ff_layout_select_ds_stateid(mirror, dss_id, &hdr->args.stateid);
/*
* Note that if we ever decide to split across DSes,
@@ -1858,16 +2268,24 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
*/
hdr->args.offset = offset;
+ /* Start IO accounting for local write */
+ localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
+ FMODE_READ|FMODE_WRITE);
+ if (localio) {
+ hdr->task.tk_start = ktime_get();
+ ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
+ }
+
/* Perform an asynchronous write */
nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_write_call_ops_v3 :
&ff_layout_write_call_ops_v4,
- sync, RPC_TASK_SOFTCONN);
+ sync, RPC_TASK_SOFTCONN, localio);
put_cred(ds_cred);
return PNFS_ATTEMPTED;
out_failed:
- if (ff_layout_avoid_mds_available_ds(lseg))
+ if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
return PNFS_TRY_AGAIN;
trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
hdr->args.offset, hdr->args.count,
@@ -1875,20 +2293,15 @@ out_failed:
return PNFS_NOT_ATTEMPTED;
}
-static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
-{
- return i;
-}
-
static struct nfs_fh *
-select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
+select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i, u32 dss_id)
{
struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
/* FIXME: Assume that there is only one NFS version available
* for the DS.
*/
- return &flseg->mirror_array[i]->fh_versions[0];
+ return &flseg->mirror_array[i]->dss[dss_id].fh_versions[0];
}
static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
@@ -1896,9 +2309,10 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
struct pnfs_layout_segment *lseg = data->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
- u32 idx;
+ u32 idx, dss_id;
int vers, ret;
struct nfs_fh *fh;
@@ -1906,22 +2320,23 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
goto out_err;
- idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
+ idx = calc_mirror_idx_from_commit(lseg, data->ds_commit_index);
mirror = FF_LAYOUT_COMP(lseg, idx);
- ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
- if (!ds)
+ dss_id = calc_dss_id_from_commit(lseg, data->ds_commit_index);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, true);
+ if (IS_ERR(ds))
goto out_err;
ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
- data->inode);
+ data->inode, dss_id);
if (IS_ERR(ds_clnt))
goto out_err;
- ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
+ ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred, dss_id);
if (!ds_cred)
goto out_err;
- vers = nfs4_ff_layout_ds_version(mirror);
+ vers = nfs4_ff_layout_ds_version(mirror, dss_id);
dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
@@ -1930,14 +2345,22 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
data->cred = ds_cred;
refcount_inc(&ds->ds_clp->cl_count);
data->ds_clp = ds->ds_clp;
- fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
+ fh = select_ds_fh_from_commit(lseg, idx, dss_id);
if (fh)
data->args.fh = fh;
+ /* Start IO accounting for local commit */
+ localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
+ FMODE_READ|FMODE_WRITE);
+ if (localio) {
+ data->task.tk_start = ktime_get();
+ ff_layout_commit_record_layoutstats_start(&data->task, data);
+ }
+
ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_commit_call_ops_v3 :
&ff_layout_commit_call_ops_v4,
- how, RPC_TASK_SOFTCONN);
+ how, RPC_TASK_SOFTCONN, localio);
put_cred(ds_cred);
return ret;
out_err:
@@ -1954,6 +2377,68 @@ ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
ff_layout_initiate_commit);
}
+static bool ff_layout_match_rw(const struct rpc_task *task,
+ const struct nfs_pgio_header *hdr,
+ const struct pnfs_layout_segment *lseg)
+{
+ return hdr->lseg == lseg;
+}
+
+static bool ff_layout_match_commit(const struct rpc_task *task,
+ const struct nfs_commit_data *cdata,
+ const struct pnfs_layout_segment *lseg)
+{
+ return cdata->lseg == lseg;
+}
+
+static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
+{
+ const struct rpc_call_ops *ops = task->tk_ops;
+
+ if (ops == &ff_layout_read_call_ops_v3 ||
+ ops == &ff_layout_read_call_ops_v4 ||
+ ops == &ff_layout_write_call_ops_v3 ||
+ ops == &ff_layout_write_call_ops_v4)
+ return ff_layout_match_rw(task, task->tk_calldata, data);
+ if (ops == &ff_layout_commit_call_ops_v3 ||
+ ops == &ff_layout_commit_call_ops_v4)
+ return ff_layout_match_commit(task, task->tk_calldata, data);
+ return false;
+}
+
+static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
+{
+ struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
+ struct nfs4_ff_layout_mirror *mirror;
+ struct nfs4_ff_layout_ds *mirror_ds;
+ struct nfs4_pnfs_ds *ds;
+ struct nfs_client *ds_clp;
+ struct rpc_clnt *clnt;
+ u32 idx, dss_id;
+
+ for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
+ mirror = flseg->mirror_array[idx];
+ for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
+ mirror_ds = mirror->dss[dss_id].mirror_ds;
+ if (IS_ERR_OR_NULL(mirror_ds))
+ continue;
+ ds = mirror->dss[dss_id].mirror_ds->ds;
+ if (!ds)
+ continue;
+ ds_clp = ds->ds_clp;
+ if (!ds_clp)
+ continue;
+ clnt = ds_clp->cl_rpcclient;
+ if (!clnt)
+ continue;
+ if (!rpc_cancel_tasks(clnt, -EAGAIN,
+ ff_layout_match_io, lseg))
+ continue;
+ rpc_clnt_disconnect(clnt);
+ }
+ }
+}
+
static struct pnfs_ds_commit_info *
ff_layout_get_ds_info(struct inode *inode)
{
@@ -1972,8 +2457,10 @@ ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
struct inode *inode = lseg->pls_layout->plh_inode;
struct pnfs_commit_array *array, *new;
+ u32 size = flseg->mirror_array_cnt * flseg->mirror_array[0]->dss_count;
- new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, GFP_NOIO);
+ new = pnfs_alloc_commit_array(size,
+ nfs_io_gfp_mask());
if (new) {
spin_lock(&inode->i_lock);
array = pnfs_add_commit_array(fl_cinfo, new, lseg);
@@ -2015,12 +2502,6 @@ static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
}
static void
-encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
-{
- WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
-}
-
-static void
ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
const nfs4_stateid *stateid,
const struct nfs42_layoutstat_devinfo *devinfo)
@@ -2152,10 +2633,10 @@ ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
struct nfs4_flexfile_layoutreturn_args *ff_args;
struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
- ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
+ ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
if (!ff_args)
goto out_nomem;
- ff_args->pages[0] = alloc_page(GFP_KERNEL);
+ ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
if (!ff_args->pages[0])
goto out_nomem_free;
@@ -2165,8 +2646,9 @@ ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
FF_LAYOUTRETURN_MAXERR);
spin_lock(&args->inode->i_lock);
- ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
- &ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
+ ff_args->num_dev = ff_layout_mirror_prepare_stats(
+ &ff_layout->generic_hdr, &ff_args->devinfo[0],
+ ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
spin_unlock(&args->inode->i_lock);
args->ld_private->ops = &layoutreturn_ops;
@@ -2192,8 +2674,8 @@ ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
if (list_empty(&head))
return;
- errors = kmalloc_array(NFS42_LAYOUTERROR_MAX,
- sizeof(*errors), GFP_NOFS);
+ errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
+ nfs_io_gfp_mask());
if (errors != NULL) {
const struct nfs4_ff_layout_ds_err *pos;
size_t n = 0;
@@ -2341,11 +2823,11 @@ ff_layout_encode_io_latency(struct xdr_stream *xdr,
static void
ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
const struct nfs42_layoutstat_devinfo *devinfo,
- struct nfs4_ff_layout_mirror *mirror)
+ struct nfs4_ff_layout_ds_stripe *dss_info)
{
struct nfs4_pnfs_ds_addr *da;
- struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
- struct nfs_fh *fh = &mirror->fh_versions[0];
+ struct nfs4_pnfs_ds *ds = dss_info->mirror_ds->ds;
+ struct nfs_fh *fh = &dss_info->fh_versions[0];
__be32 *p;
da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
@@ -2357,13 +2839,17 @@ ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
p = xdr_reserve_space(xdr, 4 + fh->size);
xdr_encode_opaque(p, fh->data, fh->size);
/* ff_io_latency4 read */
- spin_lock(&mirror->lock);
- ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
+ spin_lock(&dss_info->mirror->lock);
+ ff_layout_encode_io_latency(xdr,
+ &dss_info->read_stat.io_stat);
/* ff_io_latency4 write */
- ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
- spin_unlock(&mirror->lock);
+ ff_layout_encode_io_latency(xdr,
+ &dss_info->write_stat.io_stat);
+ spin_unlock(&dss_info->mirror->lock);
/* nfstime4 */
- ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
+ ff_layout_encode_nfstime(xdr,
+ ktime_sub(ktime_get(),
+ dss_info->start_time));
/* bool */
p = xdr_reserve_space(xdr, 4);
*p = cpu_to_be32(false);
@@ -2387,7 +2873,8 @@ ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
static void
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
{
- struct nfs4_ff_layout_mirror *mirror = opaque->data;
+ struct nfs4_ff_layout_ds_stripe *dss_info = opaque->data;
+ struct nfs4_ff_layout_mirror *mirror = dss_info->mirror;
ff_layout_put_mirror(mirror);
}
@@ -2400,58 +2887,76 @@ static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
static int
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
struct nfs42_layoutstat_devinfo *devinfo,
- int dev_limit)
+ int dev_limit, enum nfs4_ff_op_type type)
{
struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
struct nfs4_ff_layout_mirror *mirror;
+ struct nfs4_ff_layout_ds_stripe *dss_info;
struct nfs4_deviceid_node *dev;
- int i = 0;
+ int i = 0, dss_id;
list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
- if (i >= dev_limit)
- break;
- if (IS_ERR_OR_NULL(mirror->mirror_ds))
- continue;
- if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
- continue;
- /* mirror refcount put in cleanup_layoutstats */
- if (!refcount_inc_not_zero(&mirror->ref))
- continue;
- dev = &mirror->mirror_ds->id_node;
- memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
- devinfo->offset = 0;
- devinfo->length = NFS4_MAX_UINT64;
- spin_lock(&mirror->lock);
- devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
- devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
- devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
- devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
- spin_unlock(&mirror->lock);
- devinfo->layout_type = LAYOUT_FLEX_FILES;
- devinfo->ld_private.ops = &layoutstat_ops;
- devinfo->ld_private.data = mirror;
-
- devinfo++;
- i++;
+ for (dss_id = 0; dss_id < mirror->dss_count; ++dss_id) {
+ dss_info = &mirror->dss[dss_id];
+ if (i >= dev_limit)
+ break;
+ if (IS_ERR_OR_NULL(dss_info->mirror_ds))
+ continue;
+ if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
+ &mirror->flags) &&
+ type != NFS4_FF_OP_LAYOUTRETURN)
+ continue;
+ /* mirror refcount put in cleanup_layoutstats */
+ if (!refcount_inc_not_zero(&mirror->ref))
+ continue;
+ dev = &dss_info->mirror_ds->id_node;
+ memcpy(&devinfo->dev_id,
+ &dev->deviceid,
+ NFS4_DEVICEID4_SIZE);
+ devinfo->offset = 0;
+ devinfo->length = NFS4_MAX_UINT64;
+ spin_lock(&mirror->lock);
+ devinfo->read_count =
+ dss_info->read_stat.io_stat.ops_completed;
+ devinfo->read_bytes =
+ dss_info->read_stat.io_stat.bytes_completed;
+ devinfo->write_count =
+ dss_info->write_stat.io_stat.ops_completed;
+ devinfo->write_bytes =
+ dss_info->write_stat.io_stat.bytes_completed;
+ spin_unlock(&mirror->lock);
+ devinfo->layout_type = LAYOUT_FLEX_FILES;
+ devinfo->ld_private.ops = &layoutstat_ops;
+ devinfo->ld_private.data = &mirror->dss[dss_id];
+
+ devinfo++;
+ i++;
+ }
}
return i;
}
-static int
-ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
+static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
{
+ struct pnfs_layout_hdr *lo;
struct nfs4_flexfile_layout *ff_layout;
const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
- args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
+ args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
+ nfs_io_gfp_mask());
if (!args->devinfo)
return -ENOMEM;
spin_lock(&args->inode->i_lock);
- ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
- args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
- &args->devinfo[0], dev_count);
+ lo = NFS_I(args->inode)->layout;
+ if (lo && pnfs_layout_is_valid(lo)) {
+ ff_layout = FF_LAYOUT_FROM_HDR(lo);
+ args->num_dev = ff_layout_mirror_prepare_stats(
+ &ff_layout->generic_hdr, &args->devinfo[0], dev_count,
+ NFS4_FF_OP_LAYOUTSTATS);
+ } else
+ args->num_dev = 0;
spin_unlock(&args->inode->i_lock);
if (!args->num_dev) {
kfree(args->devinfo);
@@ -2467,7 +2972,7 @@ ff_layout_set_layoutdriver(struct nfs_server *server,
const struct nfs_fh *dummy)
{
#if IS_ENABLED(CONFIG_NFS_V4_2)
- server->caps |= NFS_CAP_LAYOUTSTATS;
+ server->caps |= NFS_CAP_LAYOUTSTATS | NFS_CAP_REBOOT_LAYOUTRETURN;
#endif
return 0;
}
@@ -2504,6 +3009,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
.prepare_layoutreturn = ff_layout_prepare_layoutreturn,
.sync = pnfs_nfs_generic_sync,
.prepare_layoutstats = ff_layout_prepare_layoutstats,
+ .cancel_io = ff_layout_cancel_io,
};
static int __init nfs4flexfilelayout_init(void)
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index 354a031c69b1..17a008c8e97c 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -21,6 +21,8 @@
* due to network error etc. */
#define NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT 4096
+#define NFS4_FLEXFILE_LAYOUT_MAX_STRIPE_CNT 4096
+
/* LAYOUTSTATS report interval in ms */
#define FF_LAYOUTSTATS_REPORT_INTERVAL (60000L)
#define FF_LAYOUTSTATS_MAXDEV 4
@@ -71,24 +73,32 @@ struct nfs4_ff_layoutstat {
struct nfs4_ff_busy_timer busy_timer;
};
-struct nfs4_ff_layout_mirror {
- struct pnfs_layout_hdr *layout;
- struct list_head mirrors;
- u32 ds_count;
- u32 efficiency;
+struct nfs4_ff_layout_mirror;
+
+struct nfs4_ff_layout_ds_stripe {
+ struct nfs4_ff_layout_mirror *mirror;
struct nfs4_deviceid devid;
+ u32 efficiency;
struct nfs4_ff_layout_ds *mirror_ds;
u32 fh_versions_cnt;
struct nfs_fh *fh_versions;
nfs4_stateid stateid;
const struct cred __rcu *ro_cred;
const struct cred __rcu *rw_cred;
- refcount_t ref;
- spinlock_t lock;
- unsigned long flags;
+ struct nfs_file_localio nfl;
struct nfs4_ff_layoutstat read_stat;
struct nfs4_ff_layoutstat write_stat;
ktime_t start_time;
+};
+
+struct nfs4_ff_layout_mirror {
+ struct pnfs_layout_hdr *layout;
+ struct list_head mirrors;
+ u32 dss_count;
+ struct nfs4_ff_layout_ds_stripe *dss;
+ refcount_t ref;
+ spinlock_t lock;
+ unsigned long flags;
u32 report_interval;
};
@@ -99,7 +109,7 @@ struct nfs4_ff_layout_segment {
u64 stripe_unit;
u32 flags;
u32 mirror_array_cnt;
- struct nfs4_ff_layout_mirror *mirror_array[];
+ struct nfs4_ff_layout_mirror *mirror_array[] __counted_by(mirror_array_cnt);
};
struct nfs4_flexfile_layout {
@@ -149,12 +159,12 @@ FF_LAYOUT_COMP(struct pnfs_layout_segment *lseg, u32 idx)
}
static inline struct nfs4_deviceid_node *
-FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx)
+FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx, u32 dss_id)
{
struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, idx);
if (mirror != NULL) {
- struct nfs4_ff_layout_ds *mirror_ds = mirror->mirror_ds;
+ struct nfs4_ff_layout_ds *mirror_ds = mirror->dss[dss_id].mirror_ds;
if (!IS_ERR_OR_NULL(mirror_ds))
return &mirror_ds->id_node;
@@ -181,9 +191,22 @@ ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg)
}
static inline int
-nfs4_ff_layout_ds_version(const struct nfs4_ff_layout_mirror *mirror)
+nfs4_ff_layout_ds_version(const struct nfs4_ff_layout_mirror *mirror, u32 dss_id)
+{
+ return mirror->dss[dss_id].mirror_ds->ds_versions[0].version;
+}
+
+static inline u32
+nfs4_ff_layout_calc_dss_id(const u64 stripe_unit, const u32 dss_count, const loff_t offset)
{
- return mirror->mirror_ds->ds_versions[0].version;
+ u64 tmp = offset;
+
+ if (dss_count == 1 || stripe_unit == 0)
+ return 0;
+
+ do_div(tmp, stripe_unit);
+
+ return do_div(tmp, dss_count);
}
struct nfs4_ff_layout_ds *
@@ -192,9 +215,9 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds);
void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds);
int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
- struct nfs4_ff_layout_mirror *mirror, u64 offset,
- u64 length, int status, enum nfs_opnum4 opnum,
- gfp_t gfp_flags);
+ struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id, u64 offset, u64 length, int status,
+ enum nfs_opnum4 opnum, gfp_t gfp_flags);
void ff_layout_send_layouterror(struct pnfs_layout_segment *lseg);
int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head);
void ff_layout_free_ds_ioerr(struct list_head *head);
@@ -203,23 +226,27 @@ unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
struct list_head *head,
unsigned int maxnum);
struct nfs_fh *
-nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror);
+nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror, u32 dss_id);
void
nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror *mirror,
- nfs4_stateid *stateid);
+ u32 dss_id,
+ nfs4_stateid *stateid);
struct nfs4_pnfs_ds *
nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
bool fail_return);
struct rpc_clnt *
nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror *mirror,
struct nfs_client *ds_clp,
- struct inode *inode);
+ struct inode *inode,
+ u32 dss_id);
const struct cred *ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror,
const struct pnfs_layout_range *range,
- const struct cred *mdscred);
+ const struct cred *mdscred,
+ u32 dss_id);
bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg);
bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg);
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index bfa7202ca7be..c55ea8fa3bfa 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -44,18 +44,19 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
{
struct xdr_stream stream;
struct xdr_buf buf;
- struct page *scratch;
+ struct folio *scratch;
struct list_head dsaddrs;
struct nfs4_pnfs_ds_addr *da;
struct nfs4_ff_layout_ds *new_ds = NULL;
struct nfs4_ff_ds_version *ds_versions = NULL;
+ struct net *net = server->nfs_client->cl_net;
u32 mp_count;
u32 version_count;
__be32 *p;
int i, ret = -ENOMEM;
/* set up xdr stream */
- scratch = alloc_page(gfp_flags);
+ scratch = folio_alloc(gfp_flags, 0);
if (!scratch)
goto out_err;
@@ -69,7 +70,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
INIT_LIST_HEAD(&dsaddrs);
xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
- xdr_set_scratch_page(&stream, scratch);
+ xdr_set_scratch_folio(&stream, scratch);
/* multipath count */
p = xdr_inline_decode(&stream, 4);
@@ -80,8 +81,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
for (i = 0; i < mp_count; i++) {
/* multipath ds */
- da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
- &stream, gfp_flags);
+ da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags);
if (da)
list_add_tail(&da->da_node, &dsaddrs);
}
@@ -113,8 +113,10 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
goto out_err_drain_dsaddrs;
ds_versions[i].version = be32_to_cpup(p++);
ds_versions[i].minor_version = be32_to_cpup(p++);
- ds_versions[i].rsize = nfs_block_size(be32_to_cpup(p++), NULL);
- ds_versions[i].wsize = nfs_block_size(be32_to_cpup(p++), NULL);
+ ds_versions[i].rsize = nfs_io_size(be32_to_cpup(p++),
+ server->nfs_client->cl_proto);
+ ds_versions[i].wsize = nfs_io_size(be32_to_cpup(p++),
+ server->nfs_client->cl_proto);
ds_versions[i].tightly_coupled = be32_to_cpup(p);
if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE)
@@ -147,7 +149,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
new_ds->ds_versions = ds_versions;
new_ds->ds_versions_cnt = version_count;
- new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
+ new_ds->ds = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags);
if (!new_ds->ds)
goto out_err_drain_dsaddrs;
@@ -161,7 +163,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
kfree(da);
}
- __free_page(scratch);
+ folio_put(scratch);
return new_ds;
out_err_drain_dsaddrs:
@@ -175,7 +177,7 @@ out_err_drain_dsaddrs:
kfree(ds_versions);
out_scratch:
- __free_page(scratch);
+ folio_put(scratch);
out_err:
kfree(new_ds);
@@ -248,16 +250,16 @@ ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
}
int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
- struct nfs4_ff_layout_mirror *mirror, u64 offset,
- u64 length, int status, enum nfs_opnum4 opnum,
- gfp_t gfp_flags)
+ struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id, u64 offset, u64 length, int status,
+ enum nfs_opnum4 opnum, gfp_t gfp_flags)
{
struct nfs4_ff_layout_ds_err *dserr;
if (status == 0)
return 0;
- if (IS_ERR_OR_NULL(mirror->mirror_ds))
+ if (IS_ERR_OR_NULL(mirror->dss[dss_id].mirror_ds))
return -EINVAL;
dserr = kmalloc(sizeof(*dserr), gfp_flags);
@@ -269,8 +271,8 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
dserr->length = length;
dserr->status = status;
dserr->opnum = opnum;
- nfs4_stateid_copy(&dserr->stateid, &mirror->stateid);
- memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid,
+ nfs4_stateid_copy(&dserr->stateid, &mirror->dss[dss_id].stateid);
+ memcpy(&dserr->deviceid, &mirror->dss[dss_id].mirror_ds->id_node.deviceid,
NFS4_DEVICEID4_SIZE);
spin_lock(&flo->generic_hdr.plh_inode->i_lock);
@@ -280,14 +282,14 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
}
static const struct cred *
-ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
+ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode, u32 dss_id)
{
const struct cred *cred, __rcu **pcred;
if (iomode == IOMODE_READ)
- pcred = &mirror->ro_cred;
+ pcred = &mirror->dss[dss_id].ro_cred;
else
- pcred = &mirror->rw_cred;
+ pcred = &mirror->dss[dss_id].rw_cred;
rcu_read_lock();
do {
@@ -302,43 +304,45 @@ ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
}
struct nfs_fh *
-nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror)
+nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror, u32 dss_id)
{
/* FIXME: For now assume there is only 1 version available for the DS */
- return &mirror->fh_versions[0];
+ return &mirror->dss[dss_id].fh_versions[0];
}
void
nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror *mirror,
- nfs4_stateid *stateid)
+ u32 dss_id,
+ nfs4_stateid *stateid)
{
- if (nfs4_ff_layout_ds_version(mirror) == 4)
- nfs4_stateid_copy(stateid, &mirror->stateid);
+ if (nfs4_ff_layout_ds_version(mirror, dss_id) == 4)
+ nfs4_stateid_copy(stateid, &mirror->dss[dss_id].stateid);
}
static bool
ff_layout_init_mirror_ds(struct pnfs_layout_hdr *lo,
- struct nfs4_ff_layout_mirror *mirror)
+ struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id)
{
if (mirror == NULL)
goto outerr;
- if (mirror->mirror_ds == NULL) {
+ if (mirror->dss[dss_id].mirror_ds == NULL) {
struct nfs4_deviceid_node *node;
struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
node = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode),
- &mirror->devid, lo->plh_lc_cred,
+ &mirror->dss[dss_id].devid, lo->plh_lc_cred,
GFP_KERNEL);
if (node)
mirror_ds = FF_LAYOUT_MIRROR_DS(node);
/* check for race with another call to this function */
- if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) &&
+ if (cmpxchg(&mirror->dss[dss_id].mirror_ds, NULL, mirror_ds) &&
mirror_ds != ERR_PTR(-ENODEV))
nfs4_put_deviceid_node(node);
}
- if (IS_ERR(mirror->mirror_ds))
+ if (IS_ERR(mirror->dss[dss_id].mirror_ds))
goto outerr;
return true;
@@ -350,6 +354,7 @@ outerr:
* nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
* @lseg: the layout segment we're operating on
* @mirror: layout mirror describing the DS to use
+ * @dss_id: DS stripe id to select stripe to use
* @fail_return: return layout on connect failure?
*
* Try to prepare a DS connection to accept an RPC call. This involves
@@ -366,18 +371,19 @@ outerr:
struct nfs4_pnfs_ds *
nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
struct nfs4_ff_layout_mirror *mirror,
+ u32 dss_id,
bool fail_return)
{
- struct nfs4_pnfs_ds *ds = NULL;
+ struct nfs4_pnfs_ds *ds;
struct inode *ino = lseg->pls_layout->plh_inode;
struct nfs_server *s = NFS_SERVER(ino);
unsigned int max_payload;
- int status;
+ int status = -EAGAIN;
- if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror))
+ if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror, dss_id))
goto noconnect;
- ds = mirror->mirror_ds->ds;
+ ds = mirror->dss[dss_id].mirror_ds->ds;
if (READ_ONCE(ds->ds_clp))
goto out;
/* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
@@ -386,31 +392,37 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
/* FIXME: For now we assume the server sent only one version of NFS
* to use for the DS.
*/
- status = nfs4_pnfs_ds_connect(s, ds, &mirror->mirror_ds->id_node,
+ status = nfs4_pnfs_ds_connect(s, ds, &mirror->dss[dss_id].mirror_ds->id_node,
dataserver_timeo, dataserver_retrans,
- mirror->mirror_ds->ds_versions[0].version,
- mirror->mirror_ds->ds_versions[0].minor_version);
+ mirror->dss[dss_id].mirror_ds->ds_versions[0].version,
+ mirror->dss[dss_id].mirror_ds->ds_versions[0].minor_version);
/* connect success, check rsize/wsize limit */
if (!status) {
+ /*
+ * ds_clp is put in destroy_ds().
+ * keep ds_clp even if DS is local, so that if local IO cannot
+ * proceed somehow, we can fall back to NFS whenever we want.
+ */
+ nfs_local_probe_async(ds->ds_clp);
max_payload =
nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
NULL);
- if (mirror->mirror_ds->ds_versions[0].rsize > max_payload)
- mirror->mirror_ds->ds_versions[0].rsize = max_payload;
- if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
- mirror->mirror_ds->ds_versions[0].wsize = max_payload;
+ if (mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize > max_payload)
+ mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize = max_payload;
+ if (mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize > max_payload)
+ mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize = max_payload;
goto out;
}
noconnect:
ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
- mirror, lseg->pls_range.offset,
+ mirror, dss_id, lseg->pls_range.offset,
lseg->pls_range.length, NFS4ERR_NXIO,
OP_ILLEGAL, GFP_NOIO);
ff_layout_send_layouterror(lseg);
if (fail_return || !ff_layout_has_available_ds(lseg))
pnfs_error_mark_layout_for_return(ino, lseg);
- ds = NULL;
+ ds = ERR_PTR(status);
out:
return ds;
}
@@ -418,12 +430,13 @@ out:
const struct cred *
ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror,
const struct pnfs_layout_range *range,
- const struct cred *mdscred)
+ const struct cred *mdscred,
+ u32 dss_id)
{
const struct cred *cred;
- if (mirror && !mirror->mirror_ds->ds_versions[0].tightly_coupled) {
- cred = ff_layout_get_mirror_cred(mirror, range->iomode);
+ if (mirror && !mirror->dss[dss_id].mirror_ds->ds_versions[0].tightly_coupled) {
+ cred = ff_layout_get_mirror_cred(mirror, range->iomode, dss_id);
if (!cred)
cred = get_cred(mdscred);
} else {
@@ -437,15 +450,17 @@ ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror,
* @mirror: pointer to the mirror
* @ds_clp: nfs_client for the DS
* @inode: pointer to inode
+ * @dss_id: DS stripe id
*
* Find or create a DS rpc client with th MDS server rpc client auth flavor
* in the nfs_client cl_ds_clients list.
*/
struct rpc_clnt *
nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror *mirror,
- struct nfs_client *ds_clp, struct inode *inode)
+ struct nfs_client *ds_clp, struct inode *inode,
+ u32 dss_id)
{
- switch (mirror->mirror_ds->ds_versions[0].version) {
+ switch (mirror->dss[dss_id].mirror_ds->ds_versions[0].version) {
case 3:
/* For NFSv3 DS, flavor is set when creating DS connections */
return ds_clp->cl_rpcclient;
@@ -551,16 +566,18 @@ static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
{
struct nfs4_ff_layout_mirror *mirror;
struct nfs4_deviceid_node *devid;
- u32 idx;
+ u32 idx, dss_id;
for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
mirror = FF_LAYOUT_COMP(lseg, idx);
- if (mirror) {
- if (!mirror->mirror_ds)
+ if (!mirror)
+ continue;
+ for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
+ if (!mirror->dss[dss_id].mirror_ds)
return true;
- if (IS_ERR(mirror->mirror_ds))
+ if (IS_ERR(mirror->dss[dss_id].mirror_ds))
continue;
- devid = &mirror->mirror_ds->id_node;
+ devid = &mirror->dss[dss_id].mirror_ds->id_node;
if (!nfs4_test_deviceid_unavailable(devid))
return true;
}
@@ -573,17 +590,21 @@ static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
{
struct nfs4_ff_layout_mirror *mirror;
struct nfs4_deviceid_node *devid;
- u32 idx;
+ u32 idx, dss_id;
for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
mirror = FF_LAYOUT_COMP(lseg, idx);
- if (!mirror || IS_ERR(mirror->mirror_ds))
- return false;
- if (!mirror->mirror_ds)
- continue;
- devid = &mirror->mirror_ds->id_node;
- if (nfs4_test_deviceid_unavailable(devid))
+ if (!mirror)
return false;
+ for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
+ if (IS_ERR(mirror->dss[dss_id].mirror_ds))
+ return false;
+ if (!mirror->dss[dss_id].mirror_ds)
+ continue;
+ devid = &mirror->dss[dss_id].mirror_ds->id_node;
+ if (nfs4_test_deviceid_unavailable(devid))
+ return false;
+ }
}
return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index 0d444a90f513..b4679b7161b0 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -10,6 +10,7 @@
* Split from fs/nfs/super.c by David Howells <dhowells@redhat.com>
*/
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/fs_context.h>
@@ -17,9 +18,14 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs4_mount.h>
+
+#include <net/handshake.h>
+
#include "nfs.h"
#include "internal.h"
+#include "nfstrace.h"
+
#define NFSDBG_FACILITY NFSDBG_MOUNT
#if IS_ENABLED(CONFIG_NFS_V3)
@@ -43,6 +49,8 @@ enum nfs_param {
Opt_bsize,
Opt_clientaddr,
Opt_cto,
+ Opt_alignwrite,
+ Opt_fatal_neterrors,
Opt_fg,
Opt_fscache,
Opt_fscache_flag,
@@ -65,6 +73,8 @@ enum nfs_param {
Opt_posix,
Opt_proto,
Opt_rdirplus,
+ Opt_rdirplus_none,
+ Opt_rdirplus_force,
Opt_rdma,
Opt_resvport,
Opt_retrans,
@@ -79,11 +89,29 @@ enum nfs_param {
Opt_source,
Opt_tcp,
Opt_timeo,
+ Opt_trunkdiscovery,
Opt_udp,
Opt_v,
Opt_vers,
Opt_wsize,
Opt_write,
+ Opt_xprtsec,
+ Opt_cert_serial,
+ Opt_privkey_serial,
+};
+
+enum {
+ Opt_fatal_neterrors_default,
+ Opt_fatal_neterrors_enetunreach,
+ Opt_fatal_neterrors_none,
+};
+
+static const struct constant_table nfs_param_enums_fatal_neterrors[] = {
+ { "default", Opt_fatal_neterrors_default },
+ { "ENETDOWN:ENETUNREACH", Opt_fatal_neterrors_enetunreach },
+ { "ENETUNREACH:ENETDOWN", Opt_fatal_neterrors_enetunreach },
+ { "none", Opt_fatal_neterrors_none },
+ {}
};
enum {
@@ -141,6 +169,9 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_u32 ("bsize", Opt_bsize),
fsparam_string("clientaddr", Opt_clientaddr),
fsparam_flag_no("cto", Opt_cto),
+ fsparam_flag_no("alignwrite", Opt_alignwrite),
+ fsparam_enum("fatal_neterrors", Opt_fatal_neterrors,
+ nfs_param_enums_fatal_neterrors),
fsparam_flag ("fg", Opt_fg),
fsparam_flag_no("fsc", Opt_fscache_flag),
fsparam_string("fsc", Opt_fscache),
@@ -164,7 +195,8 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_u32 ("port", Opt_port),
fsparam_flag_no("posix", Opt_posix),
fsparam_string("proto", Opt_proto),
- fsparam_flag_no("rdirplus", Opt_rdirplus),
+ fsparam_flag_no("rdirplus", Opt_rdirplus), // rdirplus|nordirplus
+ fsparam_string("rdirplus", Opt_rdirplus), // rdirplus=...
fsparam_flag ("rdma", Opt_rdma),
fsparam_flag_no("resvport", Opt_resvport),
fsparam_u32 ("retrans", Opt_retrans),
@@ -179,6 +211,7 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_string("source", Opt_source),
fsparam_flag ("tcp", Opt_tcp),
fsparam_u32 ("timeo", Opt_timeo),
+ fsparam_flag_no("trunkdiscovery", Opt_trunkdiscovery),
fsparam_flag ("udp", Opt_udp),
fsparam_flag ("v2", Opt_v),
fsparam_flag ("v3", Opt_v),
@@ -189,6 +222,9 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_string("vers", Opt_vers),
fsparam_enum ("write", Opt_write, nfs_param_enums_write),
fsparam_u32 ("wsize", Opt_wsize),
+ fsparam_string("xprtsec", Opt_xprtsec),
+ fsparam_s32("cert_serial", Opt_cert_serial),
+ fsparam_s32("privkey_serial", Opt_privkey_serial),
{}
};
@@ -262,15 +298,35 @@ static const struct constant_table nfs_secflavor_tokens[] = {
{}
};
+enum {
+ Opt_xprtsec_none,
+ Opt_xprtsec_tls,
+ Opt_xprtsec_mtls,
+ nr__Opt_xprtsec
+};
+
+static const struct constant_table nfs_xprtsec_policies[] = {
+ { "none", Opt_xprtsec_none },
+ { "tls", Opt_xprtsec_tls },
+ { "mtls", Opt_xprtsec_mtls },
+ {}
+};
+
+static const struct constant_table nfs_rdirplus_tokens[] = {
+ { "none", Opt_rdirplus_none },
+ { "force", Opt_rdirplus_force },
+ {}
+};
+
/*
* Sanity-check a server address provided by the mount command.
*
* Address family must be initialized, and address must not be
* the ANY address for that family.
*/
-static int nfs_verify_server_address(struct sockaddr *addr)
+static int nfs_verify_server_address(struct sockaddr_storage *addr)
{
- switch (addr->sa_family) {
+ switch (addr->ss_family) {
case AF_INET: {
struct sockaddr_in *sa = (struct sockaddr_in *)addr;
return sa->sin_addr.s_addr != htonl(INADDR_ANY);
@@ -281,7 +337,6 @@ static int nfs_verify_server_address(struct sockaddr *addr)
}
}
- dfprintk(MOUNT, "NFS: Invalid IP address specified\n");
return 0;
}
@@ -316,9 +371,21 @@ static int nfs_validate_transport_protocol(struct fs_context *fc,
default:
ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP;
}
+
+ if (ctx->xprtsec.policy != RPC_XPRTSEC_NONE)
+ switch (ctx->nfs_server.protocol) {
+ case XPRT_TRANSPORT_TCP:
+ ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP_TLS;
+ break;
+ default:
+ goto out_invalid_xprtsec_policy;
+ }
+
return 0;
out_invalid_transport_udp:
return nfs_invalf(fc, "NFS: Unsupported transport protocol udp");
+out_invalid_xprtsec_policy:
+ return nfs_invalf(fc, "NFS: Transport does not support xprtsec");
}
/*
@@ -375,7 +442,7 @@ static int nfs_parse_security_flavors(struct fs_context *fc,
char *string = param->string, *p;
int ret;
- dfprintk(MOUNT, "NFS: parsing %s=%s option\n", param->key, param->string);
+ trace_nfs_mount_assign(param->key, string);
while ((p = strsep(&string, ":")) != NULL) {
if (!*p)
@@ -426,6 +493,29 @@ static int nfs_parse_security_flavors(struct fs_context *fc,
return 0;
}
+static int nfs_parse_xprtsec_policy(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+
+ trace_nfs_mount_assign(param->key, param->string);
+
+ switch (lookup_constant(nfs_xprtsec_policies, param->string, -1)) {
+ case Opt_xprtsec_none:
+ ctx->xprtsec.policy = RPC_XPRTSEC_NONE;
+ break;
+ case Opt_xprtsec_tls:
+ ctx->xprtsec.policy = RPC_XPRTSEC_TLS_ANON;
+ break;
+ case Opt_xprtsec_mtls:
+ ctx->xprtsec.policy = RPC_XPRTSEC_TLS_X509;
+ break;
+ default:
+ return nfs_invalf(fc, "NFS: Unrecognized transport security policy");
+ }
+ return 0;
+}
+
static int nfs_parse_version_string(struct fs_context *fc,
const char *string)
{
@@ -465,6 +555,32 @@ static int nfs_parse_version_string(struct fs_context *fc,
return 0;
}
+#ifdef CONFIG_KEYS
+static int nfs_tls_key_verify(key_serial_t key_id)
+{
+ struct key *key = key_lookup(key_id);
+ int error = 0;
+
+ if (IS_ERR(key)) {
+ pr_err("key id %08x not found\n", key_id);
+ return PTR_ERR(key);
+ }
+ if (test_bit(KEY_FLAG_REVOKED, &key->flags) ||
+ test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
+ pr_err("key id %08x revoked\n", key_id);
+ error = -EKEYREVOKED;
+ }
+
+ key_put(key);
+ return error;
+}
+#else
+static inline int nfs_tls_key_verify(key_serial_t key_id)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_KEYS */
+
/*
* Parse a single mount parameter.
*/
@@ -477,11 +593,11 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
unsigned int len;
int ret, opt;
- dfprintk(MOUNT, "NFS: parsing nfs mount option '%s'\n", param->key);
+ trace_nfs_mount_option(param);
opt = fs_parse(fc, nfs_fs_parameters, param, &result);
if (opt < 0)
- return ctx->sloppy ? 1 : opt;
+ return (opt == -ENOPARAM && ctx->sloppy) ? 1 : opt;
if (fc->security)
ctx->has_sec_mnt_opts = 1;
@@ -514,7 +630,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
if (result.negated)
ctx->flags &= ~NFS_MOUNT_SOFTREVAL;
else
- ctx->flags &= NFS_MOUNT_SOFTREVAL;
+ ctx->flags |= NFS_MOUNT_SOFTREVAL;
break;
case Opt_posix:
if (result.negated)
@@ -528,6 +644,18 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
else
ctx->flags &= ~NFS_MOUNT_NOCTO;
break;
+ case Opt_trunkdiscovery:
+ if (result.negated)
+ ctx->flags &= ~NFS_MOUNT_TRUNK_DISCOVERY;
+ else
+ ctx->flags |= NFS_MOUNT_TRUNK_DISCOVERY;
+ break;
+ case Opt_alignwrite:
+ if (result.negated)
+ ctx->flags |= NFS_MOUNT_NO_ALIGNWRITE;
+ else
+ ctx->flags &= ~NFS_MOUNT_NO_ALIGNWRITE;
+ break;
case Opt_ac:
if (result.negated)
ctx->flags |= NFS_MOUNT_NOAC;
@@ -536,9 +664,11 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
break;
case Opt_lock:
if (result.negated) {
+ ctx->lock_status = NFS_LOCK_NOLOCK;
ctx->flags |= NFS_MOUNT_NONLM;
ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL);
} else {
+ ctx->lock_status = NFS_LOCK_LOCK;
ctx->flags &= ~NFS_MOUNT_NONLM;
ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL);
}
@@ -562,10 +692,25 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
ctx->flags &= ~NFS_MOUNT_NOACL;
break;
case Opt_rdirplus:
- if (result.negated)
+ if (result.negated) {
+ ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS;
ctx->flags |= NFS_MOUNT_NORDIRPLUS;
- else
- ctx->flags &= ~NFS_MOUNT_NORDIRPLUS;
+ } else if (!param->string) {
+ ctx->flags &= ~(NFS_MOUNT_NORDIRPLUS | NFS_MOUNT_FORCE_RDIRPLUS);
+ } else {
+ switch (lookup_constant(nfs_rdirplus_tokens, param->string, -1)) {
+ case Opt_rdirplus_none:
+ ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS;
+ ctx->flags |= NFS_MOUNT_NORDIRPLUS;
+ break;
+ case Opt_rdirplus_force:
+ ctx->flags &= ~NFS_MOUNT_NORDIRPLUS;
+ ctx->flags |= NFS_MOUNT_FORCE_RDIRPLUS;
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ }
break;
case Opt_sharecache:
if (result.negated)
@@ -588,6 +733,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
ctx->fscache_uniq = NULL;
break;
case Opt_fscache:
+ trace_nfs_mount_assign(param->key, param->string);
ctx->options |= NFS_OPTION_FSCACHE;
kfree(ctx->fscache_uniq);
ctx->fscache_uniq = param->string;
@@ -674,6 +820,9 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
return ret;
break;
case Opt_vers:
+ if (!param->string)
+ goto out_invalid_value;
+ trace_nfs_mount_assign(param->key, param->string);
ret = nfs_parse_version_string(fc, param->string);
if (ret < 0)
return ret;
@@ -683,8 +832,28 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
if (ret < 0)
return ret;
break;
+ case Opt_xprtsec:
+ ret = nfs_parse_xprtsec_policy(fc, param);
+ if (ret < 0)
+ return ret;
+ break;
+ case Opt_cert_serial:
+ ret = nfs_tls_key_verify(result.int_32);
+ if (ret < 0)
+ return ret;
+ ctx->xprtsec.cert_serial = result.int_32;
+ break;
+ case Opt_privkey_serial:
+ ret = nfs_tls_key_verify(result.int_32);
+ if (ret < 0)
+ return ret;
+ ctx->xprtsec.privkey_serial = result.int_32;
+ break;
case Opt_proto:
+ if (!param->string)
+ goto out_invalid_value;
+ trace_nfs_mount_assign(param->key, param->string);
protofamily = AF_INET;
switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
case Opt_xprt_udp6:
@@ -720,6 +889,9 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
break;
case Opt_mountproto:
+ if (!param->string)
+ goto out_invalid_value;
+ trace_nfs_mount_assign(param->key, param->string);
mountfamily = AF_INET;
switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
case Opt_xprt_udp6:
@@ -742,6 +914,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
break;
case Opt_addr:
+ trace_nfs_mount_assign(param->key, param->string);
len = rpc_pton(fc->net_ns, param->string, param->size,
&ctx->nfs_server.address,
sizeof(ctx->nfs_server._address));
@@ -750,16 +923,19 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
ctx->nfs_server.addrlen = len;
break;
case Opt_clientaddr:
+ trace_nfs_mount_assign(param->key, param->string);
kfree(ctx->client_address);
ctx->client_address = param->string;
param->string = NULL;
break;
case Opt_mounthost:
+ trace_nfs_mount_assign(param->key, param->string);
kfree(ctx->mount_server.hostname);
ctx->mount_server.hostname = param->string;
param->string = NULL;
break;
case Opt_mountaddr:
+ trace_nfs_mount_assign(param->key, param->string);
len = rpc_pton(fc->net_ns, param->string, param->size,
&ctx->mount_server.address,
sizeof(ctx->mount_server._address));
@@ -768,16 +944,38 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
ctx->mount_server.addrlen = len;
break;
case Opt_nconnect:
+ trace_nfs_mount_assign(param->key, param->string);
if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_CONNECTIONS)
goto out_of_bounds;
ctx->nfs_server.nconnect = result.uint_32;
break;
case Opt_max_connect:
+ trace_nfs_mount_assign(param->key, param->string);
if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_TRANSPORTS)
goto out_of_bounds;
ctx->nfs_server.max_connect = result.uint_32;
break;
+ case Opt_fatal_neterrors:
+ trace_nfs_mount_assign(param->key, param->string);
+ switch (result.uint_32) {
+ case Opt_fatal_neterrors_default:
+ if (fc->net_ns != &init_net)
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+ else
+ ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ case Opt_fatal_neterrors_enetunreach:
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ case Opt_fatal_neterrors_none:
+ ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ break;
case Opt_lookupcache:
+ trace_nfs_mount_assign(param->key, param->string);
switch (result.uint_32) {
case Opt_lookupcache_all:
ctx->flags &= ~(NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE);
@@ -794,6 +992,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
}
break;
case Opt_local_lock:
+ trace_nfs_mount_assign(param->key, param->string);
switch (result.uint_32) {
case Opt_local_lock_all:
ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK |
@@ -814,6 +1013,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
}
break;
case Opt_write:
+ trace_nfs_mount_assign(param->key, param->string);
switch (result.uint_32) {
case Opt_write_lazy:
ctx->flags &=
@@ -837,7 +1037,6 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
*/
case Opt_sloppy:
ctx->sloppy = true;
- dfprintk(MOUNT, "NFS: relaxing parsing rules\n");
break;
}
@@ -870,10 +1069,8 @@ static int nfs_parse_source(struct fs_context *fc,
size_t len;
const char *end;
- if (unlikely(!dev_name || !*dev_name)) {
- dfprintk(MOUNT, "NFS: device name not specified\n");
+ if (unlikely(!dev_name || !*dev_name))
return -EINVAL;
- }
/* Is the host name protected with square brakcets? */
if (*dev_name == '[') {
@@ -913,7 +1110,7 @@ static int nfs_parse_source(struct fs_context *fc,
if (!ctx->nfs_server.export_path)
goto out_nomem;
- dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", ctx->nfs_server.export_path);
+ trace_nfs_mount_path(ctx->nfs_server.export_path);
return 0;
out_bad_devname:
@@ -955,7 +1152,7 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
{
struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct nfs_fh *mntfh = ctx->mntfh;
- struct sockaddr *sap = (struct sockaddr *)&ctx->nfs_server.address;
+ struct sockaddr_storage *sap = &ctx->nfs_server._address;
int extra_flags = NFS_MOUNT_LEGACY_INTERFACE;
int ret;
@@ -1027,10 +1224,13 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
ctx->acdirmax = data->acdirmax;
ctx->need_mount = false;
- memcpy(sap, &data->addr, sizeof(data->addr));
- ctx->nfs_server.addrlen = sizeof(data->addr);
- ctx->nfs_server.port = ntohs(data->addr.sin_port);
- if (sap->sa_family != AF_INET ||
+ if (!is_remount_fc(fc)) {
+ memcpy(sap, &data->addr, sizeof(data->addr));
+ ctx->nfs_server.addrlen = sizeof(data->addr);
+ ctx->nfs_server.port = ntohs(data->addr.sin_port);
+ }
+
+ if (sap->ss_family != AF_INET ||
!nfs_verify_server_address(sap))
goto out_no_address;
@@ -1069,8 +1269,7 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
int ret;
data->context[NFS_MAX_CONTEXT_LEN] = '\0';
- ret = vfs_parse_fs_string(fc, "context",
- data->context, strlen(data->context));
+ ret = vfs_parse_fs_string(fc, "context", data->context);
if (ret < 0)
return ret;
#else
@@ -1107,7 +1306,6 @@ out_no_sec:
return nfs_invalf(fc, "NFS: nfs_mount_data version supports only AUTH_SYS");
out_nomem:
- dfprintk(MOUNT, "NFS: not enough memory to handle mount options");
return -ENOMEM;
out_no_address:
@@ -1187,7 +1385,7 @@ static int nfs4_parse_monolithic(struct fs_context *fc,
struct nfs4_mount_data *data)
{
struct nfs_fs_context *ctx = nfs_fc2context(fc);
- struct sockaddr *sap = (struct sockaddr *)&ctx->nfs_server.address;
+ struct sockaddr_storage *sap = &ctx->nfs_server._address;
int ret;
char *c;
@@ -1239,7 +1437,7 @@ static int nfs4_parse_monolithic(struct fs_context *fc,
if (IS_ERR(c))
return PTR_ERR(c);
ctx->nfs_server.export_path = c;
- dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", c);
+ trace_nfs_mount_path(c);
c = strndup_user(data->client_addr.data, 16);
if (IS_ERR(c))
@@ -1301,7 +1499,7 @@ static int nfs_fs_context_validate(struct fs_context *fc)
{
struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct nfs_subversion *nfs_mod;
- struct sockaddr *sap = (struct sockaddr *)&ctx->nfs_server.address;
+ struct sockaddr_storage *sap = &ctx->nfs_server._address;
int max_namelen = PAGE_SIZE;
int max_pathlen = NFS_MAXPATHLEN;
int port = 0;
@@ -1370,7 +1568,7 @@ static int nfs_fs_context_validate(struct fs_context *fc)
/* Load the NFS protocol module if we haven't done so yet */
if (!ctx->nfs_mod) {
- nfs_mod = get_nfs_version(ctx->version);
+ nfs_mod = find_nfs_version(ctx->version);
if (IS_ERR(nfs_mod)) {
ret = PTR_ERR(nfs_mod);
goto out_version_unavailable;
@@ -1444,7 +1642,7 @@ static int nfs_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
}
nfs_copy_fh(ctx->mntfh, src->mntfh);
- __module_get(ctx->nfs_mod->owner);
+ get_nfs_version(ctx->nfs_mod);
ctx->client_address = NULL;
ctx->mount_server.hostname = NULL;
ctx->nfs_server.export_path = NULL;
@@ -1527,7 +1725,7 @@ static int nfs_init_fs_context(struct fs_context *fc)
ctx->version = nfss->nfs_client->rpc_ops->version;
ctx->minorversion = nfss->nfs_client->cl_minorversion;
- memcpy(&ctx->nfs_server.address, &nfss->nfs_client->cl_addr,
+ memcpy(&ctx->nfs_server._address, &nfss->nfs_client->cl_addr,
ctx->nfs_server.addrlen);
if (fc->net_ns != net) {
@@ -1536,7 +1734,7 @@ static int nfs_init_fs_context(struct fs_context *fc)
}
ctx->nfs_mod = nfss->nfs_client->cl_nfs_mod;
- __module_get(ctx->nfs_mod->owner);
+ get_nfs_version(ctx->nfs_mod);
} else {
/* defaults */
ctx->timeo = NFS_UNSPEC_TIMEO;
@@ -1550,6 +1748,12 @@ static int nfs_init_fs_context(struct fs_context *fc)
ctx->selected_flavor = RPC_AUTH_MAXFLAVOR;
ctx->minorversion = 0;
ctx->need_mount = true;
+ ctx->xprtsec.policy = RPC_XPRTSEC_NONE;
+ ctx->xprtsec.cert_serial = TLS_NO_CERT;
+ ctx->xprtsec.privkey_serial = TLS_NO_PRIVKEY;
+
+ if (fc->net_ns != &init_net)
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
fc->s_iflags |= SB_I_STABLE_WRITES;
}
diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
deleted file mode 100644
index 573b1da9342c..000000000000
--- a/fs/nfs/fscache-index.c
+++ /dev/null
@@ -1,140 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NFS FS-Cache index structure definition
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/nfs_fs.h>
-#include <linux/nfs_fs_sb.h>
-#include <linux/in6.h>
-#include <linux/iversion.h>
-
-#include "internal.h"
-#include "fscache.h"
-
-#define NFSDBG_FACILITY NFSDBG_FSCACHE
-
-/*
- * Define the NFS filesystem for FS-Cache. Upon registration FS-Cache sticks
- * the cookie for the top-level index object for NFS into here. The top-level
- * index can than have other cache objects inserted into it.
- */
-struct fscache_netfs nfs_fscache_netfs = {
- .name = "nfs",
- .version = 0,
-};
-
-/*
- * Register NFS for caching
- */
-int nfs_fscache_register(void)
-{
- return fscache_register_netfs(&nfs_fscache_netfs);
-}
-
-/*
- * Unregister NFS for caching
- */
-void nfs_fscache_unregister(void)
-{
- fscache_unregister_netfs(&nfs_fscache_netfs);
-}
-
-/*
- * Define the server object for FS-Cache. This is used to describe a server
- * object to fscache_acquire_cookie(). It is keyed by the NFS protocol and
- * server address parameters.
- */
-const struct fscache_cookie_def nfs_fscache_server_index_def = {
- .name = "NFS.server",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-/*
- * Define the superblock object for FS-Cache. This is used to describe a
- * superblock object to fscache_acquire_cookie(). It is keyed by all the NFS
- * parameters that might cause a separate superblock.
- */
-const struct fscache_cookie_def nfs_fscache_super_index_def = {
- .name = "NFS.super",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-/*
- * Consult the netfs about the state of an object
- * - This function can be absent if the index carries no state data
- * - The netfs data from the cookie being used as the target is
- * presented, as is the auxiliary data
- */
-static
-enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size)
-{
- struct nfs_fscache_inode_auxdata auxdata;
- struct nfs_inode *nfsi = cookie_netfs_data;
-
- if (datalen != sizeof(auxdata))
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
- auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
- auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
- auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
-
- if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
- auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
-
- if (memcmp(data, &auxdata, datalen) != 0)
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- return FSCACHE_CHECKAUX_OKAY;
-}
-
-/*
- * Get an extra reference on a read context.
- * - This function can be absent if the completion function doesn't require a
- * context.
- * - The read context is passed back to NFS in the event that a data read on the
- * cache fails with EIO - in which case the server must be contacted to
- * retrieve the data, which requires the read context for security.
- */
-static void nfs_fh_get_context(void *cookie_netfs_data, void *context)
-{
- get_nfs_open_context(context);
-}
-
-/*
- * Release an extra reference on a read context.
- * - This function can be absent if the completion function doesn't require a
- * context.
- */
-static void nfs_fh_put_context(void *cookie_netfs_data, void *context)
-{
- if (context)
- put_nfs_open_context(context);
-}
-
-/*
- * Define the inode object for FS-Cache. This is used to describe an inode
- * object to fscache_acquire_cookie(). It is keyed by the NFS file handle for
- * an inode.
- *
- * Coherency is managed by comparing the copies of i_size, i_mtime and i_ctime
- * held in the cache auxiliary data for the data storage object with those in
- * the inode struct in memory.
- */
-const struct fscache_cookie_def nfs_fscache_inode_object_def = {
- .name = "NFS.fh",
- .type = FSCACHE_COOKIE_TYPE_DATAFILE,
- .check_aux = nfs_fscache_inode_check_aux,
- .get_context = nfs_fh_get_context,
- .put_context = nfs_fh_put_context,
-};
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index d743629e05e1..8b0785178731 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -15,31 +15,27 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/iversion.h>
+#include <linux/xarray.h>
+#include <linux/fscache.h>
+#include <linux/netfs.h>
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
+#include "nfstrace.h"
-#define NFSDBG_FACILITY NFSDBG_FSCACHE
+#define NFS_MAX_KEY_LEN 1000
-static struct rb_root nfs_fscache_keys = RB_ROOT;
-static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
-
-/*
- * Layout of the key for an NFS server cache object.
- */
-struct nfs_server_key {
- struct {
- uint16_t nfsversion; /* NFS protocol version */
- uint32_t minorversion; /* NFSv4 minor version */
- uint16_t family; /* address family */
- __be16 port; /* IP port */
- } hdr;
- union {
- struct in_addr ipv4_addr; /* IPv4 address */
- struct in6_addr ipv6_addr; /* IPv6 address */
- };
-} __packed;
+static bool nfs_append_int(char *key, int *_len, unsigned long long x)
+{
+ if (*_len > NFS_MAX_KEY_LEN)
+ return false;
+ if (x == 0)
+ key[(*_len)++] = ',';
+ else
+ *_len += sprintf(key + *_len, ",%llx", x);
+ return true;
+}
/*
* Get the per-client index cookie for an NFS client if the appropriate mount
@@ -47,160 +43,106 @@ struct nfs_server_key {
* - We always try and get an index cookie for the client, but get filehandle
* cookies on a per-superblock basis, depending on the mount flags
*/
-void nfs_fscache_get_client_cookie(struct nfs_client *clp)
+static bool nfs_fscache_get_client_key(struct nfs_client *clp,
+ char *key, int *_len)
{
const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
- struct nfs_server_key key;
- uint16_t len = sizeof(key.hdr);
- memset(&key, 0, sizeof(key));
- key.hdr.nfsversion = clp->rpc_ops->version;
- key.hdr.minorversion = clp->cl_minorversion;
- key.hdr.family = clp->cl_addr.ss_family;
+ *_len += snprintf(key + *_len, NFS_MAX_KEY_LEN - *_len,
+ ",%u.%u,%x",
+ clp->rpc_ops->version,
+ clp->cl_minorversion,
+ clp->cl_addr.ss_family);
switch (clp->cl_addr.ss_family) {
case AF_INET:
- key.hdr.port = sin->sin_port;
- key.ipv4_addr = sin->sin_addr;
- len += sizeof(key.ipv4_addr);
- break;
+ if (!nfs_append_int(key, _len, sin->sin_port) ||
+ !nfs_append_int(key, _len, sin->sin_addr.s_addr))
+ return false;
+ return true;
case AF_INET6:
- key.hdr.port = sin6->sin6_port;
- key.ipv6_addr = sin6->sin6_addr;
- len += sizeof(key.ipv6_addr);
- break;
+ if (!nfs_append_int(key, _len, sin6->sin6_port) ||
+ !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[0]) ||
+ !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[1]) ||
+ !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[2]) ||
+ !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[3]))
+ return false;
+ return true;
default:
printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
clp->cl_addr.ss_family);
- clp->fscache = NULL;
- return;
+ return false;
}
-
- /* create a cache index for looking up filehandles */
- clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
- &nfs_fscache_server_index_def,
- &key, len,
- NULL, 0,
- clp, 0, true);
- dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
- clp, clp->fscache);
-}
-
-/*
- * Dispose of a per-client cookie
- */
-void nfs_fscache_release_client_cookie(struct nfs_client *clp)
-{
- dfprintk(FSCACHE, "NFS: releasing client cookie (0x%p/0x%p)\n",
- clp, clp->fscache);
-
- fscache_relinquish_cookie(clp->fscache, NULL, false);
- clp->fscache = NULL;
}
/*
- * Get the cache cookie for an NFS superblock. We have to handle
- * uniquification here because the cache doesn't do it for us.
+ * Get the cache cookie for an NFS superblock.
*
* The default uniquifier is just an empty string, but it may be overridden
* either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
* superblock across an automount point of some nature.
*/
-void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
+int nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
{
- struct nfs_fscache_key *key, *xkey;
+ struct fscache_volume *vcookie;
struct nfs_server *nfss = NFS_SB(sb);
- struct rb_node **p, *parent;
- int diff;
+ unsigned int len = 3;
+ char *key;
- nfss->fscache_key = NULL;
- nfss->fscache = NULL;
- if (!uniq) {
- uniq = "";
- ulen = 1;
+ if (uniq) {
+ nfss->fscache_uniq = kmemdup_nul(uniq, ulen, GFP_KERNEL);
+ if (!nfss->fscache_uniq)
+ return -ENOMEM;
}
- key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL);
+ key = kmalloc(NFS_MAX_KEY_LEN + 24, GFP_KERNEL);
if (!key)
- return;
-
- key->nfs_client = nfss->nfs_client;
- key->key.super.s_flags = sb->s_flags & NFS_SB_MASK;
- key->key.nfs_server.flags = nfss->flags;
- key->key.nfs_server.rsize = nfss->rsize;
- key->key.nfs_server.wsize = nfss->wsize;
- key->key.nfs_server.acregmin = nfss->acregmin;
- key->key.nfs_server.acregmax = nfss->acregmax;
- key->key.nfs_server.acdirmin = nfss->acdirmin;
- key->key.nfs_server.acdirmax = nfss->acdirmax;
- key->key.nfs_server.fsid = nfss->fsid;
- key->key.rpc_auth.au_flavor = nfss->client->cl_auth->au_flavor;
-
- key->key.uniq_len = ulen;
- memcpy(key->key.uniquifier, uniq, ulen);
-
- spin_lock(&nfs_fscache_keys_lock);
- p = &nfs_fscache_keys.rb_node;
- parent = NULL;
- while (*p) {
- parent = *p;
- xkey = rb_entry(parent, struct nfs_fscache_key, node);
-
- if (key->nfs_client < xkey->nfs_client)
- goto go_left;
- if (key->nfs_client > xkey->nfs_client)
- goto go_right;
-
- diff = memcmp(&key->key, &xkey->key, sizeof(key->key));
- if (diff < 0)
- goto go_left;
- if (diff > 0)
- goto go_right;
-
- if (key->key.uniq_len == 0)
- goto non_unique;
- diff = memcmp(key->key.uniquifier,
- xkey->key.uniquifier,
- key->key.uniq_len);
- if (diff < 0)
- goto go_left;
- if (diff > 0)
- goto go_right;
- goto non_unique;
-
- go_left:
- p = &(*p)->rb_left;
- continue;
- go_right:
- p = &(*p)->rb_right;
+ return -ENOMEM;
+
+ memcpy(key, "nfs", 3);
+ if (!nfs_fscache_get_client_key(nfss->nfs_client, key, &len) ||
+ !nfs_append_int(key, &len, nfss->fsid.major) ||
+ !nfs_append_int(key, &len, nfss->fsid.minor) ||
+ !nfs_append_int(key, &len, sb->s_flags & NFS_SB_MASK) ||
+ !nfs_append_int(key, &len, nfss->flags) ||
+ !nfs_append_int(key, &len, nfss->rsize) ||
+ !nfs_append_int(key, &len, nfss->wsize) ||
+ !nfs_append_int(key, &len, nfss->acregmin) ||
+ !nfs_append_int(key, &len, nfss->acregmax) ||
+ !nfs_append_int(key, &len, nfss->acdirmin) ||
+ !nfs_append_int(key, &len, nfss->acdirmax) ||
+ !nfs_append_int(key, &len, nfss->client->cl_auth->au_flavor))
+ goto out;
+
+ if (ulen > 0) {
+ if (ulen > NFS_MAX_KEY_LEN - len)
+ goto out;
+ key[len++] = ',';
+ memcpy(key + len, uniq, ulen);
+ len += ulen;
}
-
- rb_link_node(&key->node, parent, p);
- rb_insert_color(&key->node, &nfs_fscache_keys);
- spin_unlock(&nfs_fscache_keys_lock);
- nfss->fscache_key = key;
+ key[len] = 0;
/* create a cache index for looking up filehandles */
- nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
- &nfs_fscache_super_index_def,
- &key->key,
- sizeof(key->key) + ulen,
- NULL, 0,
- nfss, 0, true);
- dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
- nfss, nfss->fscache);
- return;
-
-non_unique:
- spin_unlock(&nfs_fscache_keys_lock);
+ vcookie = fscache_acquire_volume(key,
+ NULL, /* preferred_cache */
+ NULL, 0 /* coherency_data */);
+ if (IS_ERR(vcookie)) {
+ if (vcookie != ERR_PTR(-EBUSY)) {
+ kfree(key);
+ return PTR_ERR(vcookie);
+ }
+ pr_err("NFS: Cache volume key already in use (%s)\n", key);
+ vcookie = NULL;
+ }
+ nfss->fscache = vcookie;
+
+out:
kfree(key);
- nfss->fscache_key = NULL;
- nfss->fscache = NULL;
- printk(KERN_WARNING "NFS:"
- " Cache request denied due to non-unique superblock keys\n");
+ return 0;
}
/*
@@ -210,32 +152,9 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
{
struct nfs_server *nfss = NFS_SB(sb);
- dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n",
- nfss, nfss->fscache);
-
- fscache_relinquish_cookie(nfss->fscache, NULL, false);
+ fscache_relinquish_volume(nfss->fscache, NULL, false);
nfss->fscache = NULL;
-
- if (nfss->fscache_key) {
- spin_lock(&nfs_fscache_keys_lock);
- rb_erase(&nfss->fscache_key->node, &nfs_fscache_keys);
- spin_unlock(&nfs_fscache_keys_lock);
- kfree(nfss->fscache_key);
- nfss->fscache_key = NULL;
- }
-}
-
-static void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
- struct nfs_inode *nfsi)
-{
- memset(auxdata, 0, sizeof(*auxdata));
- auxdata->mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
- auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
- auxdata->ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
- auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
-
- if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
- auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
+ kfree(nfss->fscache_uniq);
}
/*
@@ -247,17 +166,23 @@ void nfs_fscache_init_inode(struct inode *inode)
struct nfs_server *nfss = NFS_SERVER(inode);
struct nfs_inode *nfsi = NFS_I(inode);
- nfsi->fscache = NULL;
+ netfs_inode(inode)->cache = NULL;
if (!(nfss->fscache && S_ISREG(inode->i_mode)))
return;
- nfs_fscache_update_auxdata(&auxdata, nfsi);
+ nfs_fscache_update_auxdata(&auxdata, inode);
- nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
- &nfs_fscache_inode_object_def,
- nfsi->fh.data, nfsi->fh.size,
- &auxdata, sizeof(auxdata),
- nfsi, nfsi->vfs_inode.i_size, false);
+ netfs_inode(inode)->cache = fscache_acquire_cookie(
+ nfss->fscache,
+ 0,
+ nfsi->fh.data, /* index_key */
+ nfsi->fh.size,
+ &auxdata, /* aux_data */
+ sizeof(auxdata),
+ i_size_read(inode));
+
+ if (netfs_inode(inode)->cache)
+ mapping_set_release_always(inode->i_mapping);
}
/*
@@ -265,22 +190,8 @@ void nfs_fscache_init_inode(struct inode *inode)
*/
void nfs_fscache_clear_inode(struct inode *inode)
{
- struct nfs_fscache_inode_auxdata auxdata;
- struct nfs_inode *nfsi = NFS_I(inode);
- struct fscache_cookie *cookie = nfs_i_fscache(inode);
-
- dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
-
- nfs_fscache_update_auxdata(&auxdata, nfsi);
- fscache_relinquish_cookie(cookie, &auxdata, false);
- nfsi->fscache = NULL;
-}
-
-static bool nfs_fscache_can_enable(void *data)
-{
- struct inode *inode = data;
-
- return !inode_is_open_for_write(inode);
+ fscache_relinquish_cookie(netfs_i_cookie(netfs_inode(inode)), false);
+ netfs_inode(inode)->cache = NULL;
}
/*
@@ -305,216 +216,172 @@ static bool nfs_fscache_can_enable(void *data)
void nfs_fscache_open_file(struct inode *inode, struct file *filp)
{
struct nfs_fscache_inode_auxdata auxdata;
- struct nfs_inode *nfsi = NFS_I(inode);
- struct fscache_cookie *cookie = nfs_i_fscache(inode);
+ struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
+ bool open_for_write = inode_is_open_for_write(inode);
if (!fscache_cookie_valid(cookie))
return;
- nfs_fscache_update_auxdata(&auxdata, nfsi);
-
- if (inode_is_open_for_write(inode)) {
- dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
- clear_bit(NFS_INO_FSCACHE, &nfsi->flags);
- fscache_disable_cookie(cookie, &auxdata, true);
- fscache_uncache_all_inode_pages(cookie, inode);
- } else {
- dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi);
- fscache_enable_cookie(cookie, &auxdata, nfsi->vfs_inode.i_size,
- nfs_fscache_can_enable, inode);
- if (fscache_cookie_enabled(cookie))
- set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
+ fscache_use_cookie(cookie, open_for_write);
+ if (open_for_write) {
+ nfs_fscache_update_auxdata(&auxdata, inode);
+ fscache_invalidate(cookie, &auxdata, i_size_read(inode),
+ FSCACHE_INVAL_DIO_WRITE);
}
}
EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
-/*
- * Release the caching state associated with a page, if the page isn't busy
- * interacting with the cache.
- * - Returns true (can release page) or false (page busy).
- */
-int nfs_fscache_release_page(struct page *page, gfp_t gfp)
+void nfs_fscache_release_file(struct inode *inode, struct file *filp)
{
- if (PageFsCache(page)) {
- struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host);
-
- BUG_ON(!cookie);
- dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
- cookie, page, NFS_I(page->mapping->host));
+ struct nfs_fscache_inode_auxdata auxdata;
+ struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
+ loff_t i_size = i_size_read(inode);
- if (!fscache_maybe_release_page(cookie, page, gfp))
- return 0;
+ nfs_fscache_update_auxdata(&auxdata, inode);
+ fscache_unuse_cookie(cookie, &auxdata, &i_size);
+}
- nfs_inc_fscache_stats(page->mapping->host,
- NFSIOS_FSCACHE_PAGES_UNCACHED);
- }
+int nfs_netfs_read_folio(struct file *file, struct folio *folio)
+{
+ if (!netfs_inode(folio_inode(folio))->cache)
+ return -ENOBUFS;
- return 1;
+ return netfs_read_folio(file, folio);
}
-/*
- * Release the caching state associated with a page if undergoing complete page
- * invalidation.
- */
-void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
+int nfs_netfs_readahead(struct readahead_control *ractl)
{
- struct fscache_cookie *cookie = nfs_i_fscache(inode);
+ struct inode *inode = ractl->mapping->host;
- BUG_ON(!cookie);
+ if (!netfs_inode(inode)->cache)
+ return -ENOBUFS;
- dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
- cookie, page, NFS_I(inode));
+ netfs_readahead(ractl);
+ return 0;
+}
- fscache_wait_on_page_write(cookie, page);
+static atomic_t nfs_netfs_debug_id;
+static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
+{
+ if (!file) {
+ if (WARN_ON_ONCE(rreq->origin != NETFS_PGPRIV2_COPY_TO_CACHE))
+ return -EIO;
+ return 0;
+ }
+
+ rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
+ rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
+ /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
+ __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
+ rreq->io_streams[0].sreq_max_len = NFS_SB(rreq->inode->i_sb)->rsize;
- BUG_ON(!PageLocked(page));
- fscache_uncache_page(cookie, page);
- nfs_inc_fscache_stats(page->mapping->host,
- NFSIOS_FSCACHE_PAGES_UNCACHED);
+ return 0;
}
-/*
- * Handle completion of a page being read from the cache.
- * - Called in process (keventd) context.
- */
-static void nfs_readpage_from_fscache_complete(struct page *page,
- void *context,
- int error)
+static void nfs_netfs_free_request(struct netfs_io_request *rreq)
{
- dfprintk(FSCACHE,
- "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
- page, context, error);
+ if (rreq->netfs_priv)
+ put_nfs_open_context(rreq->netfs_priv);
+}
- /*
- * If the read completes with an error, mark the page with PG_checked,
- * unlock the page, and let the VM reissue the readpage.
- */
- if (!error)
- SetPageUptodate(page);
- else
- SetPageChecked(page);
- unlock_page(page);
+static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
+{
+ struct nfs_netfs_io_data *netfs;
+
+ netfs = kzalloc(sizeof(*netfs), GFP_KERNEL_ACCOUNT);
+ if (!netfs)
+ return NULL;
+ netfs->sreq = sreq;
+ refcount_set(&netfs->refcount, 1);
+ return netfs;
}
-/*
- * Retrieve a page from fscache
- */
-int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
- struct inode *inode, struct page *page)
+static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
{
- int ret;
+ struct nfs_netfs_io_data *netfs;
+ struct nfs_pageio_descriptor pgio;
+ struct inode *inode = sreq->rreq->inode;
+ struct nfs_open_context *ctx = sreq->rreq->netfs_priv;
+ struct page *page;
+ unsigned long idx;
+ pgoff_t start, last;
+ int err;
+
+ start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
+ last = ((sreq->start + sreq->len - sreq->transferred - 1) >> PAGE_SHIFT);
+
+ nfs_pageio_init_read(&pgio, inode, false,
+ &nfs_async_read_completion_ops);
+
+ netfs = nfs_netfs_alloc(sreq);
+ if (!netfs) {
+ sreq->error = -ENOMEM;
+ return netfs_read_subreq_terminated(sreq);
+ }
- dfprintk(FSCACHE,
- "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
- nfs_i_fscache(inode), page, page->index, page->flags, inode);
+ pgio.pg_netfs = netfs; /* used in completion */
- if (PageChecked(page)) {
- ClearPageChecked(page);
- return 1;
+ xa_for_each_range(&sreq->rreq->mapping->i_pages, idx, page, start, last) {
+ /* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */
+ err = nfs_read_add_folio(&pgio, ctx, page_folio(page));
+ if (err < 0) {
+ netfs->error = err;
+ goto out;
+ }
}
+out:
+ nfs_pageio_complete_read(&pgio);
+ nfs_netfs_put(netfs);
+}
+
+void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr)
+{
+ struct nfs_netfs_io_data *netfs = hdr->netfs;
- ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
- page,
- nfs_readpage_from_fscache_complete,
- ctx,
- GFP_KERNEL);
-
- switch (ret) {
- case 0: /* read BIO submitted (page in fscache) */
- dfprintk(FSCACHE,
- "NFS: readpage_from_fscache: BIO submitted\n");
- nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK);
- return ret;
-
- case -ENOBUFS: /* inode not in cache */
- case -ENODATA: /* page not in cache */
- nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
- dfprintk(FSCACHE,
- "NFS: readpage_from_fscache %d\n", ret);
- return 1;
+ if (!netfs)
+ return;
- default:
- dfprintk(FSCACHE, "NFS: readpage_from_fscache %d\n", ret);
- nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
- }
- return ret;
+ nfs_netfs_get(netfs);
}
-/*
- * Retrieve a set of pages from fscache
- */
-int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
- struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
+int nfs_netfs_folio_unlock(struct folio *folio)
{
- unsigned npages = *nr_pages;
- int ret;
-
- dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
- nfs_i_fscache(inode), npages, inode);
-
- ret = fscache_read_or_alloc_pages(nfs_i_fscache(inode),
- mapping, pages, nr_pages,
- nfs_readpage_from_fscache_complete,
- ctx,
- mapping_gfp_mask(mapping));
- if (*nr_pages < npages)
- nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK,
- npages);
- if (*nr_pages > 0)
- nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL,
- *nr_pages);
-
- switch (ret) {
- case 0: /* read submitted to the cache for all pages */
- BUG_ON(!list_empty(pages));
- BUG_ON(*nr_pages != 0);
- dfprintk(FSCACHE,
- "NFS: nfs_getpages_from_fscache: submitted\n");
-
- return ret;
-
- case -ENOBUFS: /* some pages aren't cached and can't be */
- case -ENODATA: /* some pages aren't cached */
- dfprintk(FSCACHE,
- "NFS: nfs_getpages_from_fscache: no page: %d\n", ret);
- return 1;
+ struct inode *inode = folio->mapping->host;
- default:
- dfprintk(FSCACHE,
- "NFS: nfs_getpages_from_fscache: ret %d\n", ret);
- }
+ /*
+ * If fscache is enabled, netfs will unlock pages.
+ */
+ if (netfs_inode(inode)->cache)
+ return 0;
- return ret;
+ return 1;
}
-/*
- * Store a newly fetched page in fscache
- * - PG_fscache must be set on the page
- */
-void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
+void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
{
- int ret;
-
- dfprintk(FSCACHE,
- "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
- nfs_i_fscache(inode), page, page->index, page->flags, sync);
-
- ret = fscache_write_page(nfs_i_fscache(inode), page,
- inode->i_size, GFP_KERNEL);
- dfprintk(FSCACHE,
- "NFS: readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
- page, page->index, page->flags, ret);
-
- if (ret != 0) {
- fscache_uncache_page(nfs_i_fscache(inode), page);
- nfs_inc_fscache_stats(inode,
- NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL);
- nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED);
- } else {
- nfs_inc_fscache_stats(inode,
- NFSIOS_FSCACHE_PAGES_WRITTEN_OK);
- }
+ struct nfs_netfs_io_data *netfs = hdr->netfs;
+ struct netfs_io_subrequest *sreq;
+
+ if (!netfs)
+ return;
+
+ sreq = netfs->sreq;
+ if (test_bit(NFS_IOHDR_EOF, &hdr->flags) &&
+ sreq->rreq->origin != NETFS_UNBUFFERED_READ &&
+ sreq->rreq->origin != NETFS_DIO_READ)
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);
+
+ if (hdr->error)
+ netfs->error = hdr->error;
+ else
+ atomic64_add(hdr->res.count, &netfs->transferred);
+
+ nfs_netfs_put(netfs);
+ hdr->netfs = NULL;
}
+
+const struct netfs_request_ops nfs_netfs_ops = {
+ .init_request = nfs_netfs_init_request,
+ .free_request = nfs_netfs_free_request,
+ .issue_read = nfs_netfs_issue_read,
+};
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 6754c8607230..9d86868f4998 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -8,51 +8,16 @@
#ifndef _NFS_FSCACHE_H
#define _NFS_FSCACHE_H
+#include <linux/swap.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs4_mount.h>
#include <linux/fscache.h>
+#include <linux/iversion.h>
#ifdef CONFIG_NFS_FSCACHE
/*
- * set of NFS FS-Cache objects that form a superblock key
- */
-struct nfs_fscache_key {
- struct rb_node node;
- struct nfs_client *nfs_client; /* the server */
-
- /* the elements of the unique key - as used by nfs_compare_super() and
- * nfs_compare_mount_options() to distinguish superblocks */
- struct {
- struct {
- unsigned long s_flags; /* various flags
- * (& NFS_MS_MASK) */
- } super;
-
- struct {
- struct nfs_fsid fsid;
- int flags;
- unsigned int rsize; /* read size */
- unsigned int wsize; /* write size */
- unsigned int acregmin; /* attr cache timeouts */
- unsigned int acregmax;
- unsigned int acdirmin;
- unsigned int acdirmax;
- } nfs_server;
-
- struct {
- rpc_authflavor_t au_flavor;
- } rpc_auth;
-
- /* uniquifier - can be used if nfs_server.flags includes
- * NFS_MOUNT_UNSHARED */
- u8 uniq_len;
- char uniquifier[0];
- } key;
-};
-
-/*
* Definition of the auxiliary data attached to NFS inode storage objects
* within the cache.
*
@@ -69,114 +34,105 @@ struct nfs_fscache_inode_auxdata {
u64 change_attr;
};
-/*
- * fscache-index.c
- */
-extern struct fscache_netfs nfs_fscache_netfs;
-extern const struct fscache_cookie_def nfs_fscache_server_index_def;
-extern const struct fscache_cookie_def nfs_fscache_super_index_def;
-extern const struct fscache_cookie_def nfs_fscache_inode_object_def;
-
-extern int nfs_fscache_register(void);
-extern void nfs_fscache_unregister(void);
-
-/*
- * fscache.c
- */
-extern void nfs_fscache_get_client_cookie(struct nfs_client *);
-extern void nfs_fscache_release_client_cookie(struct nfs_client *);
-
-extern void nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
-extern void nfs_fscache_release_super_cookie(struct super_block *);
-
-extern void nfs_fscache_init_inode(struct inode *);
-extern void nfs_fscache_clear_inode(struct inode *);
-extern void nfs_fscache_open_file(struct inode *, struct file *);
-
-extern void __nfs_fscache_invalidate_page(struct page *, struct inode *);
-extern int nfs_fscache_release_page(struct page *, gfp_t);
-
-extern int __nfs_readpage_from_fscache(struct nfs_open_context *,
- struct inode *, struct page *);
-extern int __nfs_readpages_from_fscache(struct nfs_open_context *,
- struct inode *, struct address_space *,
- struct list_head *, unsigned *);
-extern void __nfs_readpage_to_fscache(struct inode *, struct page *, int);
+struct nfs_netfs_io_data {
+ /*
+ * NFS may split a netfs_io_subrequest into multiple RPCs, each
+ * with their own read completion. In netfs, we can only call
+ * netfs_subreq_terminated() once for each subrequest. Use the
+ * refcount here to double as a marker of the last RPC completion,
+ * and only call netfs via netfs_subreq_terminated() once.
+ */
+ refcount_t refcount;
+ struct netfs_io_subrequest *sreq;
+
+ /*
+ * Final disposition of the netfs_io_subrequest, sent in
+ * netfs_subreq_terminated()
+ */
+ atomic64_t transferred;
+ int error;
+};
-/*
- * wait for a page to complete writing to the cache
- */
-static inline void nfs_fscache_wait_on_page_write(struct nfs_inode *nfsi,
- struct page *page)
+static inline void nfs_netfs_get(struct nfs_netfs_io_data *netfs)
{
- if (PageFsCache(page))
- fscache_wait_on_page_write(nfsi->fscache, page);
+ refcount_inc(&netfs->refcount);
}
-/*
- * release the caching state associated with a page if undergoing complete page
- * invalidation
- */
-static inline void nfs_fscache_invalidate_page(struct page *page,
- struct inode *inode)
+static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
{
- if (PageFsCache(page))
- __nfs_fscache_invalidate_page(page, inode);
+ /* Only the last RPC completion should call netfs_subreq_terminated() */
+ if (!refcount_dec_and_test(&netfs->refcount))
+ return;
+
+ /*
+ * The NFS pageio interface may read a complete page, even when netfs
+ * only asked for a partial page. Specifically, this may be seen when
+ * one thread is truncating a file while another one is reading the last
+ * page of the file.
+ * Correct the final length here to be no larger than the netfs subrequest
+ * length, and thus avoid netfs's "Subreq overread" warning message.
+ */
+ netfs->sreq->transferred = min_t(s64, netfs->sreq->len,
+ atomic64_read(&netfs->transferred));
+ netfs->sreq->error = netfs->error;
+ netfs_read_subreq_terminated(netfs->sreq);
+ kfree(netfs);
}
-
-/*
- * Retrieve a page from an inode data storage object.
- */
-static inline int nfs_readpage_from_fscache(struct nfs_open_context *ctx,
- struct inode *inode,
- struct page *page)
+static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
{
- if (NFS_I(inode)->fscache)
- return __nfs_readpage_from_fscache(ctx, inode, page);
- return -ENOBUFS;
+ netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false);
}
+extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
+extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
+extern int nfs_netfs_folio_unlock(struct folio *folio);
/*
- * Retrieve a set of pages from an inode data storage object.
+ * fscache.c
*/
-static inline int nfs_readpages_from_fscache(struct nfs_open_context *ctx,
- struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
+extern int nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
+extern void nfs_fscache_release_super_cookie(struct super_block *);
+
+extern void nfs_fscache_init_inode(struct inode *);
+extern void nfs_fscache_clear_inode(struct inode *);
+extern void nfs_fscache_open_file(struct inode *, struct file *);
+extern void nfs_fscache_release_file(struct inode *, struct file *);
+extern int nfs_netfs_readahead(struct readahead_control *ractl);
+extern int nfs_netfs_read_folio(struct file *file, struct folio *folio);
+
+static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{
- if (NFS_I(inode)->fscache)
- return __nfs_readpages_from_fscache(ctx, inode, mapping, pages,
- nr_pages);
- return -ENOBUFS;
+ if (folio_test_private_2(folio)) { /* [DEPRECATED] */
+ if (current_is_kswapd() || !(gfp & __GFP_FS))
+ return false;
+ folio_wait_private_2(folio);
+ }
+ fscache_note_page_release(netfs_i_cookie(netfs_inode(folio->mapping->host)));
+ return true;
}
-/*
- * Store a page newly fetched from the server in an inode data storage object
- * in the cache.
- */
-static inline void nfs_readpage_to_fscache(struct inode *inode,
- struct page *page,
- int sync)
+static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
+ struct inode *inode)
{
- if (PageFsCache(page))
- __nfs_readpage_to_fscache(inode, page, sync);
+ memset(auxdata, 0, sizeof(*auxdata));
+ auxdata->mtime_sec = inode_get_mtime(inode).tv_sec;
+ auxdata->mtime_nsec = inode_get_mtime(inode).tv_nsec;
+ auxdata->ctime_sec = inode_get_ctime(inode).tv_sec;
+ auxdata->ctime_nsec = inode_get_ctime(inode).tv_nsec;
+
+ if (NFS_SERVER(inode)->nfs_client->rpc_ops->version == 4)
+ auxdata->change_attr = inode_peek_iversion_raw(inode);
}
/*
* Invalidate the contents of fscache for this inode. This will not sleep.
*/
-static inline void nfs_fscache_invalidate(struct inode *inode)
+static inline void nfs_fscache_invalidate(struct inode *inode, int flags)
{
- fscache_invalidate(NFS_I(inode)->fscache);
-}
+ struct nfs_fscache_inode_auxdata auxdata;
+ struct fscache_cookie *cookie = netfs_i_cookie(&NFS_I(inode)->netfs);
-/*
- * Wait for an object to finish being invalidated.
- */
-static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
-{
- fscache_wait_on_invalidate(NFS_I(inode)->fscache);
+ nfs_fscache_update_auxdata(&auxdata, inode);
+ fscache_invalidate(cookie, &auxdata, i_size_read(inode), flags);
}
/*
@@ -189,54 +145,58 @@ static inline const char *nfs_server_fscache_state(struct nfs_server *server)
return "no ";
}
+static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
+ struct nfs_pageio_descriptor *desc)
+{
+ hdr->netfs = desc->pg_netfs;
+}
+static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
+{
+ desc->pg_netfs = hdr->netfs;
+}
+static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc)
+{
+ desc->pg_netfs = NULL;
+}
#else /* CONFIG_NFS_FSCACHE */
-static inline int nfs_fscache_register(void) { return 0; }
-static inline void nfs_fscache_unregister(void) {}
-
-static inline void nfs_fscache_get_client_cookie(struct nfs_client *clp) {}
-static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
-
+static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi) {}
+static inline void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr) {}
+static inline void nfs_netfs_read_completion(struct nfs_pgio_header *hdr) {}
+static inline int nfs_netfs_folio_unlock(struct folio *folio)
+{
+ return 1;
+}
static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
static inline void nfs_fscache_init_inode(struct inode *inode) {}
static inline void nfs_fscache_clear_inode(struct inode *inode) {}
static inline void nfs_fscache_open_file(struct inode *inode,
struct file *filp) {}
-
-static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
-{
- return 1; /* True: may release page */
-}
-static inline void nfs_fscache_invalidate_page(struct page *page,
- struct inode *inode) {}
-static inline void nfs_fscache_wait_on_page_write(struct nfs_inode *nfsi,
- struct page *page) {}
-
-static inline int nfs_readpage_from_fscache(struct nfs_open_context *ctx,
- struct inode *inode,
- struct page *page)
+static inline void nfs_fscache_release_file(struct inode *inode, struct file *file) {}
+static inline int nfs_netfs_readahead(struct readahead_control *ractl)
{
return -ENOBUFS;
}
-static inline int nfs_readpages_from_fscache(struct nfs_open_context *ctx,
- struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
+static inline int nfs_netfs_read_folio(struct file *file, struct folio *folio)
{
return -ENOBUFS;
}
-static inline void nfs_readpage_to_fscache(struct inode *inode,
- struct page *page, int sync) {}
-
-static inline void nfs_fscache_invalidate(struct inode *inode) {}
-static inline void nfs_fscache_wait_on_invalidate(struct inode *inode) {}
+static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
+{
+ return true; /* may release folio */
+}
+static inline void nfs_fscache_invalidate(struct inode *inode, int flags) {}
static inline const char *nfs_server_fscache_state(struct nfs_server *server)
{
return "no ";
}
-
+static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
+ struct nfs_pageio_descriptor *desc) {}
+static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr) {}
+static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc) {}
#endif /* CONFIG_NFS_FSCACHE */
#endif /* _NFS_FSCACHE_H */
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 11ff2b2e060f..f13d25d95b85 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -62,7 +62,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
}
/*
- * get an NFS2/NFS3 root dentry from the root filehandle
+ * get a root dentry from the root filehandle
*/
int nfs_get_root(struct super_block *s, struct fs_context *fc)
{
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index dd53704c3f40..f76fe406937a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -72,18 +72,15 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
return nfs_fileid_to_ino_t(fattr->fileid);
}
-static int nfs_wait_killable(int mode)
+int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
{
- freezable_schedule_unsafe();
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
+ schedule();
if (signal_pending_state(mode, current))
return -ERESTARTSYS;
return 0;
}
-
-int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
-{
- return nfs_wait_killable(mode);
-}
EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
/**
@@ -111,7 +108,7 @@ u64 nfs_compat_user_ino64(u64 fileid)
int nfs_drop_inode(struct inode *inode)
{
- return NFS_STALE(inode) || generic_drop_inode(inode);
+ return NFS_STALE(inode) || inode_generic_drop(inode);
}
EXPORT_SYMBOL_GPL(nfs_drop_inode);
@@ -195,30 +192,34 @@ static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi)
void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
{
struct nfs_inode *nfsi = NFS_I(inode);
- bool have_delegation = NFS_PROTO(inode)->have_delegation(inode, FMODE_READ);
- if (have_delegation) {
+ if (nfs_have_delegated_attributes(inode)) {
if (!(flags & NFS_INO_REVAL_FORCED))
flags &= ~(NFS_INO_INVALID_MODE |
NFS_INO_INVALID_OTHER |
+ NFS_INO_INVALID_BTIME |
NFS_INO_INVALID_XATTR);
flags &= ~(NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE);
- } else if (flags & NFS_INO_REVAL_PAGECACHE)
- flags |= NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE;
+ }
if (!nfs_has_xattr_cache(nfsi))
flags &= ~NFS_INO_INVALID_XATTR;
if (flags & NFS_INO_INVALID_DATA)
- nfs_fscache_invalidate(inode);
- flags &= ~(NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED);
-
- nfsi->cache_validity |= flags;
+ nfs_fscache_invalidate(inode, 0);
+ flags &= ~NFS_INO_REVAL_FORCED;
+ flags |= nfsi->cache_validity;
if (inode->i_mapping->nrpages == 0)
- nfsi->cache_validity &= ~(NFS_INO_INVALID_DATA |
- NFS_INO_DATA_INVAL_DEFER);
- else if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
- nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER;
+ flags &= ~NFS_INO_INVALID_DATA;
+
+ /* pairs with nfs_clear_invalid_mapping()'s smp_load_acquire() */
+ smp_store_release(&nfsi->cache_validity, flags);
+
+ if (inode->i_mapping->nrpages == 0 ||
+ nfsi->cache_validity & NFS_INO_INVALID_DATA) {
+ nfs_ooo_clear(nfsi);
+ }
+ trace_nfs_set_cache_invalid(inode, 0);
}
EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
@@ -235,19 +236,17 @@ static void nfs_zap_caches_locked(struct inode *inode)
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
nfsi->attrtimeo_timestamp = jiffies;
- if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_DATA
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL
- | NFS_INO_INVALID_XATTR
- | NFS_INO_REVAL_PAGECACHE);
- } else
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL
- | NFS_INO_INVALID_XATTR
- | NFS_INO_REVAL_PAGECACHE);
+ if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR |
+ NFS_INO_INVALID_DATA |
+ NFS_INO_INVALID_ACCESS |
+ NFS_INO_INVALID_ACL |
+ NFS_INO_INVALID_XATTR);
+ else
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR |
+ NFS_INO_INVALID_ACCESS |
+ NFS_INO_INVALID_ACL |
+ NFS_INO_INVALID_XATTR);
nfs_zap_label_cache_locked(nfsi);
}
@@ -282,6 +281,8 @@ EXPORT_SYMBOL_GPL(nfs_zap_acl_cache);
void nfs_invalidate_atime(struct inode *inode)
{
+ if (nfs_have_delegated_atime(inode))
+ return;
spin_lock(&inode->i_lock);
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
spin_unlock(&inode->i_lock);
@@ -320,7 +321,7 @@ struct nfs_find_desc {
static int
nfs_find_actor(struct inode *inode, void *opaque)
{
- struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque;
+ struct nfs_find_desc *desc = opaque;
struct nfs_fh *fh = desc->fh;
struct nfs_fattr *fattr = desc->fattr;
@@ -338,7 +339,7 @@ nfs_find_actor(struct inode *inode, void *opaque)
static int
nfs_init_locked(struct inode *inode, void *opaque)
{
- struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque;
+ struct nfs_find_desc *desc = opaque;
struct nfs_fattr *fattr = desc->fattr;
set_nfs_fileid(inode, fattr->fileid);
@@ -428,6 +429,7 @@ nfs_ilookup(struct super_block *sb, struct nfs_fattr *fattr, struct nfs_fh *fh)
static void nfs_inode_init_regular(struct nfs_inode *nfsi)
{
atomic_long_set(&nfsi->nrequests, 0);
+ atomic_long_set(&nfsi->redirtied_pages, 0);
INIT_LIST_HEAD(&nfsi->commit_info.list);
atomic_long_set(&nfsi->commit_info.ncommit, 0);
atomic_set(&nfsi->commit_info.rpcs_out, 0);
@@ -473,7 +475,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
goto out_no_inode;
}
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
struct nfs_inode *nfsi = NFS_I(inode);
unsigned long now = jiffies;
@@ -496,6 +498,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
inode->i_data.a_ops = &nfs_file_aops;
nfs_inode_init_regular(nfsi);
+ mapping_set_large_folios(inode->i_mapping);
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
inode->i_fop = &nfs_dir_operations;
@@ -517,9 +520,10 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
} else
init_special_inode(inode, inode->i_mode, fattr->rdev);
- memset(&inode->i_atime, 0, sizeof(inode->i_atime));
- memset(&inode->i_mtime, 0, sizeof(inode->i_mtime));
- memset(&inode->i_ctime, 0, sizeof(inode->i_ctime));
+ inode_set_atime(inode, 0, 0);
+ inode_set_mtime(inode, 0, 0);
+ inode_set_ctime(inode, 0, 0);
+ memset(&nfsi->btime, 0, sizeof(nfsi->btime));
inode_set_iversion_raw(inode, 0);
inode->i_size = 0;
clear_nlink(inode);
@@ -532,17 +536,21 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
nfsi->read_cache_jiffies = fattr->time_start;
nfsi->attr_gencount = fattr->gencount;
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
- inode->i_atime = fattr->atime;
+ inode_set_atime_to_ts(inode, fattr->atime);
else if (fattr_supported & NFS_ATTR_FATTR_ATIME)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
- inode->i_mtime = fattr->mtime;
+ inode_set_mtime_to_ts(inode, fattr->mtime);
else if (fattr_supported & NFS_ATTR_FATTR_MTIME)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
- inode->i_ctime = fattr->ctime;
+ inode_set_ctime_to_ts(inode, fattr->ctime);
else if (fattr_supported & NFS_ATTR_FATTR_CTIME)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CTIME);
+ if (fattr->valid & NFS_ATTR_FATTR_BTIME)
+ nfsi->btime = fattr->btime;
+ else if (fattr_supported & NFS_ATTR_FATTR_BTIME)
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_BTIME);
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
inode_set_iversion_raw(inode, fattr->change_attr);
else
@@ -555,6 +563,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
set_nlink(inode, fattr->nlink);
else if (fattr_supported & NFS_ATTR_FATTR_NLINK)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK);
+ else
+ set_nlink(inode, 1);
if (fattr->valid & NFS_ATTR_FATTR_OWNER)
inode->i_uid = fattr->uid;
else if (fattr_supported & NFS_ATTR_FATTR_OWNER)
@@ -563,8 +573,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_gid = fattr->gid;
else if (fattr_supported & NFS_ATTR_FATTR_GROUP)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
- if (nfs_server_capable(inode, NFS_CAP_XATTR))
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR);
if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
else if (fattr_supported & NFS_ATTR_FATTR_BLOCKS_USED &&
@@ -600,7 +608,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode),
nfs_display_fhandle_hash(fh),
- atomic_read(&inode->i_count));
+ icount_read(inode));
out:
return inode;
@@ -611,15 +619,107 @@ out_no_inode:
}
EXPORT_SYMBOL_GPL(nfs_fhget);
+static void
+nfs_fattr_fixup_delegated(struct inode *inode, struct nfs_fattr *fattr)
+{
+ unsigned long cache_validity = NFS_I(inode)->cache_validity;
+
+ if (nfs_have_delegated_mtime(inode)) {
+ if (!(cache_validity & NFS_INO_INVALID_CTIME))
+ fattr->valid &= ~(NFS_ATTR_FATTR_PRECTIME |
+ NFS_ATTR_FATTR_CTIME);
+
+ if (!(cache_validity & NFS_INO_INVALID_MTIME))
+ fattr->valid &= ~(NFS_ATTR_FATTR_PREMTIME |
+ NFS_ATTR_FATTR_MTIME);
+
+ if (!(cache_validity & NFS_INO_INVALID_ATIME))
+ fattr->valid &= ~NFS_ATTR_FATTR_ATIME;
+ } else if (nfs_have_delegated_atime(inode)) {
+ if (!(cache_validity & NFS_INO_INVALID_ATIME))
+ fattr->valid &= ~NFS_ATTR_FATTR_ATIME;
+ }
+}
+
+static void nfs_set_timestamps_to_ts(struct inode *inode, struct iattr *attr)
+{
+ unsigned int cache_flags = 0;
+
+ if (attr->ia_valid & ATTR_MTIME_SET) {
+ struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 mtime = inode_get_mtime(inode);
+ struct timespec64 now;
+ int updated = 0;
+
+ now = inode_set_ctime_current(inode);
+ if (!timespec64_equal(&now, &ctime))
+ updated |= S_CTIME;
+
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
+ if (!timespec64_equal(&now, &mtime))
+ updated |= S_MTIME;
+
+ inode_maybe_inc_iversion(inode, updated);
+ cache_flags |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
+ }
+ if (attr->ia_valid & ATTR_ATIME_SET) {
+ inode_set_atime_to_ts(inode, attr->ia_atime);
+ cache_flags |= NFS_INO_INVALID_ATIME;
+ }
+ NFS_I(inode)->cache_validity &= ~cache_flags;
+}
+
+static void nfs_update_timestamps(struct inode *inode, unsigned int ia_valid)
+{
+ enum file_time_flags time_flags = 0;
+ unsigned int cache_flags = 0;
+
+ if (ia_valid & ATTR_MTIME) {
+ time_flags |= S_MTIME | S_CTIME;
+ cache_flags |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
+ }
+ if (ia_valid & ATTR_ATIME) {
+ time_flags |= S_ATIME;
+ cache_flags |= NFS_INO_INVALID_ATIME;
+ }
+ inode_update_timestamps(inode, time_flags);
+ NFS_I(inode)->cache_validity &= ~cache_flags;
+}
+
+void nfs_update_delegated_atime(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ if (nfs_have_delegated_atime(inode))
+ nfs_update_timestamps(inode, ATTR_ATIME);
+ spin_unlock(&inode->i_lock);
+}
+
+void nfs_update_delegated_mtime_locked(struct inode *inode)
+{
+ if (nfs_have_delegated_mtime(inode))
+ nfs_update_timestamps(inode, ATTR_MTIME);
+}
+
+void nfs_update_delegated_mtime(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ nfs_update_delegated_mtime_locked(inode);
+ spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_update_delegated_mtime);
+
#define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE|ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET|ATTR_FILE|ATTR_OPEN)
int
-nfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct nfs_fattr *fattr;
+ loff_t oldsize = i_size_read(inode);
int error = 0;
+ kuid_t task_uid = current_fsuid();
+ kuid_t owner_uid = inode->i_uid;
nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
@@ -634,10 +734,39 @@ nfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
if (error)
return error;
- if (attr->ia_size == i_size_read(inode))
+ if (attr->ia_size == oldsize)
attr->ia_valid &= ~ATTR_SIZE;
}
+ if (nfs_have_delegated_mtime(inode) && attr->ia_valid & ATTR_MTIME) {
+ spin_lock(&inode->i_lock);
+ if (attr->ia_valid & ATTR_MTIME_SET) {
+ if (uid_eq(task_uid, owner_uid)) {
+ nfs_set_timestamps_to_ts(inode, attr);
+ attr->ia_valid &= ~(ATTR_MTIME|ATTR_MTIME_SET|
+ ATTR_ATIME|ATTR_ATIME_SET);
+ }
+ } else {
+ nfs_update_timestamps(inode, attr->ia_valid);
+ attr->ia_valid &= ~(ATTR_MTIME|ATTR_ATIME);
+ }
+ spin_unlock(&inode->i_lock);
+ } else if (nfs_have_delegated_atime(inode) &&
+ attr->ia_valid & ATTR_ATIME &&
+ !(attr->ia_valid & ATTR_MTIME)) {
+ if (attr->ia_valid & ATTR_ATIME_SET) {
+ if (uid_eq(task_uid, owner_uid)) {
+ spin_lock(&inode->i_lock);
+ nfs_set_timestamps_to_ts(inode, attr);
+ spin_unlock(&inode->i_lock);
+ attr->ia_valid &= ~(ATTR_ATIME|ATTR_ATIME_SET);
+ }
+ } else {
+ nfs_update_delegated_atime(inode);
+ attr->ia_valid &= ~ATTR_ATIME;
+ }
+ }
+
/* Optimization: if the end result is no change, don't RPC */
if (((attr->ia_valid & NFS_VALID_ATTRS) & ~(ATTR_FILE|ATTR_OPEN)) == 0)
return 0;
@@ -645,8 +774,10 @@ nfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
trace_nfs_setattr_enter(inode);
/* Write all dirty data */
- if (S_ISREG(inode->i_mode))
+ if (S_ISREG(inode->i_mode)) {
+ nfs_file_block_o_direct(NFS_I(inode));
nfs_sync_inode(inode);
+ }
fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
if (fattr == NULL) {
@@ -655,8 +786,12 @@ nfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
}
error = NFS_PROTO(inode)->setattr(dentry, fattr, attr);
- if (error == 0)
+ if (error == 0) {
+ if (attr->ia_valid & ATTR_SIZE)
+ nfs_truncate_last_folio(inode->i_mapping, oldsize,
+ attr->ia_size);
error = nfs_refresh_inode(inode, fattr);
+ }
nfs_free_fattr(fattr);
out:
trace_nfs_setattr_exit(inode, error);
@@ -685,13 +820,15 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset)
trace_nfs_size_truncate(inode, offset);
i_size_write(inode, offset);
/* Optimisation */
- if (offset == 0)
- NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_DATA |
- NFS_INO_DATA_INVAL_DEFER);
+ if (offset == 0) {
+ NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
+ nfs_ooo_clear(NFS_I(inode));
+ }
NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
spin_unlock(&inode->i_lock);
truncate_pagecache(inode, offset);
+ nfs_update_delegated_mtime_locked(inode);
spin_lock(&inode->i_lock);
out:
return err;
@@ -715,8 +852,9 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
spin_lock(&inode->i_lock);
NFS_I(inode)->attr_gencount = fattr->gencount;
if ((attr->ia_valid & ATTR_SIZE) != 0) {
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME |
- NFS_INO_INVALID_BLOCKS);
+ if (!nfs_have_delegated_mtime(inode))
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS);
nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
nfs_vmtruncate(inode, attr->ia_size);
}
@@ -725,9 +863,7 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
if ((attr->ia_valid & ATTR_KILL_SUID) != 0 &&
inode->i_mode & S_ISUID)
inode->i_mode &= ~S_ISUID;
- if ((attr->ia_valid & ATTR_KILL_SGID) != 0 &&
- (inode->i_mode & (S_ISGID | S_IXGRP)) ==
- (S_ISGID | S_IXGRP))
+ if (setattr_should_drop_sgid(&nop_mnt_idmap, inode))
inode->i_mode &= ~S_ISGID;
if ((attr->ia_valid & ATTR_MODE) != 0) {
int mode = attr->ia_mode & S_IALLUGO;
@@ -739,7 +875,7 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
if ((attr->ia_valid & ATTR_GID) != 0)
inode->i_gid = attr->ia_gid;
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
- inode->i_ctime = fattr->ctime;
+ inode_set_ctime_to_ts(inode, fattr->ctime);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
| NFS_INO_INVALID_CTIME);
@@ -750,14 +886,14 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_ATIME
| NFS_INO_INVALID_CTIME);
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
- inode->i_atime = fattr->atime;
+ inode_set_atime_to_ts(inode, fattr->atime);
else if (attr->ia_valid & ATTR_ATIME_SET)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
- inode->i_ctime = fattr->ctime;
+ inode_set_ctime_to_ts(inode, fattr->ctime);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
| NFS_INO_INVALID_CTIME);
@@ -766,14 +902,14 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_MTIME
| NFS_INO_INVALID_CTIME);
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
- inode->i_mtime = fattr->mtime;
+ inode_set_mtime_to_ts(inode, fattr->mtime);
else if (attr->ia_valid & ATTR_MTIME_SET)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
- inode->i_ctime = fattr->ctime;
+ inode_set_ctime_to_ts(inode, fattr->ctime);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
| NFS_INO_INVALID_CTIME);
@@ -784,30 +920,37 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
}
EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
-static void nfs_readdirplus_parent_cache_miss(struct dentry *dentry)
+/*
+ * Don't request help from readdirplus if the file is being written to,
+ * or if attribute caching is turned off
+ */
+static bool nfs_getattr_readdirplus_enable(const struct inode *inode)
{
- struct dentry *parent;
+ return nfs_server_capable(inode, NFS_CAP_READDIRPLUS) &&
+ !nfs_have_writebacks(inode) && NFS_MAXATTRTIMEO(inode) > 5 * HZ;
+}
- if (!nfs_server_capable(d_inode(dentry), NFS_CAP_READDIRPLUS))
- return;
- parent = dget_parent(dentry);
- nfs_force_use_readdirplus(d_inode(parent));
- dput(parent);
+static void nfs_readdirplus_parent_cache_miss(struct dentry *dentry)
+{
+ if (!IS_ROOT(dentry)) {
+ struct dentry *parent = dget_parent(dentry);
+ nfs_readdir_record_entry_cache_miss(d_inode(parent));
+ dput(parent);
+ }
}
static void nfs_readdirplus_parent_cache_hit(struct dentry *dentry)
{
- struct dentry *parent;
-
- if (!nfs_server_capable(d_inode(dentry), NFS_CAP_READDIRPLUS))
- return;
- parent = dget_parent(dentry);
- nfs_advise_use_readdirplus(d_inode(parent));
- dput(parent);
+ if (!IS_ROOT(dentry)) {
+ struct dentry *parent = dget_parent(dentry);
+ nfs_readdir_record_entry_cache_hit(d_inode(parent));
+ dput(parent);
+ }
}
static u32 nfs_get_valid_attrmask(struct inode *inode)
{
+ u64 fattr_valid = NFS_SERVER(inode)->fattr_valid;
unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
u32 reply_mask = STATX_INO | STATX_TYPE;
@@ -827,36 +970,49 @@ static u32 nfs_get_valid_attrmask(struct inode *inode)
reply_mask |= STATX_UID | STATX_GID;
if (!(cache_validity & NFS_INO_INVALID_BLOCKS))
reply_mask |= STATX_BLOCKS;
+ if (!(cache_validity & NFS_INO_INVALID_BTIME) &&
+ (fattr_valid & NFS_ATTR_FATTR_BTIME))
+ reply_mask |= STATX_BTIME;
+ if (!(cache_validity & NFS_INO_INVALID_CHANGE))
+ reply_mask |= STATX_CHANGE_COOKIE;
return reply_mask;
}
-int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+int nfs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct nfs_server *server = NFS_SERVER(inode);
+ u64 fattr_valid = server->fattr_valid;
unsigned long cache_validity;
int err = 0;
bool force_sync = query_flags & AT_STATX_FORCE_SYNC;
bool do_update = false;
+ bool readdirplus_enabled = nfs_getattr_readdirplus_enable(inode);
trace_nfs_getattr_enter(inode);
request_mask &= STATX_TYPE | STATX_MODE | STATX_NLINK | STATX_UID |
STATX_GID | STATX_ATIME | STATX_MTIME | STATX_CTIME |
- STATX_INO | STATX_SIZE | STATX_BLOCKS;
+ STATX_INO | STATX_SIZE | STATX_BLOCKS | STATX_BTIME |
+ STATX_CHANGE_COOKIE;
+
+ if (!(fattr_valid & NFS_ATTR_FATTR_BTIME))
+ request_mask &= ~STATX_BTIME;
if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync) {
- nfs_readdirplus_parent_cache_hit(path->dentry);
+ if (readdirplus_enabled)
+ nfs_readdirplus_parent_cache_hit(path->dentry);
goto out_no_revalidate;
}
- /* Flush out writes to the server in order to update c/mtime. */
- if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
- S_ISREG(inode->i_mode)) {
- err = filemap_write_and_wait(inode->i_mapping);
- if (err)
- goto out;
+ /* Flush out writes to the server in order to update c/mtime/version. */
+ if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_CHANGE_COOKIE)) &&
+ S_ISREG(inode->i_mode)) {
+ if (nfs_have_delegated_mtime(inode))
+ filemap_fdatawrite(inode->i_mapping);
+ else
+ filemap_write_and_wait(inode->i_mapping);
}
/*
@@ -875,7 +1031,8 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
/* Is the user requesting attributes that might need revalidation? */
if (!(request_mask & (STATX_MODE|STATX_NLINK|STATX_ATIME|STATX_CTIME|
STATX_MTIME|STATX_UID|STATX_GID|
- STATX_SIZE|STATX_BLOCKS)))
+ STATX_SIZE|STATX_BLOCKS|STATX_BTIME|
+ STATX_CHANGE_COOKIE)))
goto out_no_revalidate;
/* Check whether the cached attributes are stale */
@@ -898,26 +1055,45 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
do_update |= cache_validity & NFS_INO_INVALID_OTHER;
if (request_mask & STATX_BLOCKS)
do_update |= cache_validity & NFS_INO_INVALID_BLOCKS;
+ if (request_mask & STATX_BTIME)
+ do_update |= cache_validity & NFS_INO_INVALID_BTIME;
if (do_update) {
- /* Update the attribute cache */
- if (!(server->flags & NFS_MOUNT_NOAC))
+ if (readdirplus_enabled)
nfs_readdirplus_parent_cache_miss(path->dentry);
- else
- nfs_readdirplus_parent_cache_hit(path->dentry);
err = __nfs_revalidate_inode(server, inode);
if (err)
goto out;
- } else
+ } else if (readdirplus_enabled)
nfs_readdirplus_parent_cache_hit(path->dentry);
out_no_revalidate:
/* Only return attributes that were revalidated. */
stat->result_mask = nfs_get_valid_attrmask(inode) | request_mask;
- generic_fillattr(&init_user_ns, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
+ stat->change_cookie = inode_peek_iversion_raw(inode);
+ stat->attributes_mask |= STATX_ATTR_CHANGE_MONOTONIC;
+ if (server->change_attr_type != NFS4_CHANGE_TYPE_IS_UNDEFINED)
+ stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
if (S_ISDIR(inode->i_mode))
stat->blksize = NFS_SERVER(inode)->dtsize;
+ stat->btime = NFS_I(inode)->btime;
+
+ /* Special handling for STATX_DIOALIGN and STATX_DIO_READ_ALIGN
+ * - NFS doesn't have DIO alignment constraints, avoid getting
+ * these DIO attrs from remote and just respond with most
+ * accommodating limits (so client will issue supported DIO).
+ * - this is unintuitive, but the most coarse-grained
+ * dio_offset_align is the most accommodating.
+ */
+ if ((request_mask & (STATX_DIOALIGN | STATX_DIO_READ_ALIGN)) &&
+ S_ISREG(inode->i_mode)) {
+ stat->result_mask |= STATX_DIOALIGN | STATX_DIO_READ_ALIGN;
+ stat->dio_mem_align = 4; /* 4-byte alignment */
+ stat->dio_offset_align = PAGE_SIZE;
+ stat->dio_read_offset_align = stat->dio_offset_align;
+ }
out:
trace_nfs_getattr_exit(inode, err);
return err;
@@ -954,7 +1130,7 @@ struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
res = __nfs_find_lock_context(ctx);
rcu_read_unlock();
if (res == NULL) {
- new = kmalloc(sizeof(*new), GFP_KERNEL);
+ new = kmalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
if (new == NULL)
return ERR_PTR(-ENOMEM);
nfs_init_lock_context(new);
@@ -1010,7 +1186,7 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
if (!is_sync)
return;
inode = d_inode(ctx->dentry);
- if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+ if (nfs_have_read_or_write_delegation(inode))
return;
nfsi = NFS_I(inode);
if (inode->i_mapping->nrpages == 0)
@@ -1032,7 +1208,7 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry,
{
struct nfs_open_context *ctx;
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
if (!ctx)
return ERR_PTR(-ENOMEM);
nfs_sb_active(dentry->d_sb);
@@ -1051,6 +1227,8 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry,
ctx->lock_context.open_context = ctx;
INIT_LIST_HEAD(&ctx->list);
ctx->mdsthreshold = NULL;
+ nfs_localio_file_init(&ctx->nfl);
+
return ctx;
}
EXPORT_SYMBOL_GPL(alloc_nfs_open_context);
@@ -1082,6 +1260,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
nfs_sb_deactive(sb);
put_rpccred(rcu_dereference_protected(ctx->ll_cred, 1));
kfree(ctx->mdsthreshold);
+ nfs_close_local_fh(&ctx->nfl);
kfree_rcu(ctx, rcu_head);
}
@@ -1107,7 +1286,7 @@ void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
spin_lock(&inode->i_lock);
if (list_empty(&nfsi->open_files) &&
- (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
+ nfs_ooo_test(nfsi))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA |
NFS_INO_REVAL_FORCED);
list_add_tail_rcu(&ctx->list, &nfsi->open_files);
@@ -1174,7 +1353,8 @@ int nfs_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
- ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
+ ctx = alloc_nfs_open_context(file_dentry(filp),
+ flags_to_mode(filp->f_flags), filp);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
nfs_file_set_open_context(filp, ctx);
@@ -1182,7 +1362,6 @@ int nfs_open(struct inode *inode, struct file *filp)
nfs_fscache_open_file(inode, filp);
return 0;
}
-EXPORT_SYMBOL_GPL(nfs_open);
/*
* This function is called whenever some part of NFS notices that
@@ -1288,6 +1467,7 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
{
int ret;
+ nfs_fscache_invalidate(inode, 0);
if (mapping->nrpages != 0) {
if (S_ISREG(inode->i_mode)) {
ret = nfs_sync_mapping(mapping);
@@ -1299,7 +1479,6 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
return ret;
}
nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
- nfs_fscache_wait_on_invalidate(inode);
dfprintk(PAGECACHE, "NFS: (%s/%Lu) data cache invalidated\n",
inode->i_sb->s_id,
@@ -1334,9 +1513,17 @@ int nfs_clear_invalid_mapping(struct address_space *mapping)
*/
for (;;) {
ret = wait_on_bit_action(bitlock, NFS_INO_INVALIDATING,
- nfs_wait_bit_killable, TASK_KILLABLE);
+ nfs_wait_bit_killable,
+ TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
if (ret)
goto out;
+ smp_rmb(); /* pairs with smp_wmb() below */
+ if (test_bit(NFS_INO_INVALIDATING, bitlock))
+ continue;
+ /* pairs with nfs_set_cache_invalid()'s smp_store_release() */
+ if (!(smp_load_acquire(&nfsi->cache_validity) & NFS_INO_INVALID_DATA))
+ goto out;
+ /* Slow-path that double-checks with spinlock held */
spin_lock(&inode->i_lock);
if (test_bit(NFS_INO_INVALIDATING, bitlock)) {
spin_unlock(&inode->i_lock);
@@ -1350,8 +1537,8 @@ int nfs_clear_invalid_mapping(struct address_space *mapping)
set_bit(NFS_INO_INVALIDATING, bitlock);
smp_wmb();
- nfsi->cache_validity &=
- ~(NFS_INO_INVALID_DATA | NFS_INO_DATA_INVAL_DEFER);
+ nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
+ nfs_ooo_clear(nfsi);
spin_unlock(&inode->i_lock);
trace_nfs_invalidate_mapping_enter(inode);
ret = nfs_invalidate_mapping(inode, mapping);
@@ -1441,18 +1628,18 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR);
}
/* If we have atomic WCC data, we may update some attributes */
- ts = inode->i_ctime;
+ ts = inode_get_ctime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME)
&& (fattr->valid & NFS_ATTR_FATTR_CTIME)
&& timespec64_equal(&ts, &fattr->pre_ctime)) {
- inode->i_ctime = fattr->ctime;
+ inode_set_ctime_to_ts(inode, fattr->ctime);
}
- ts = inode->i_mtime;
+ ts = inode_get_mtime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME)
&& (fattr->valid & NFS_ATTR_FATTR_MTIME)
&& timespec64_equal(&ts, &fattr->pre_mtime)) {
- inode->i_mtime = fattr->mtime;
+ inode_set_mtime_to_ts(inode, fattr->mtime);
}
if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
&& (fattr->valid & NFS_ATTR_FATTR_SIZE)
@@ -1479,7 +1666,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
unsigned long invalid = 0;
struct timespec64 ts;
- if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+ if (nfs_have_delegated_attributes(inode))
return 0;
if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
@@ -1503,11 +1690,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
invalid |= NFS_INO_INVALID_CHANGE;
- ts = inode->i_mtime;
+ ts = inode_get_mtime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec64_equal(&ts, &fattr->mtime))
invalid |= NFS_INO_INVALID_MTIME;
- ts = inode->i_ctime;
+ ts = inode_get_ctime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_CTIME) && !timespec64_equal(&ts, &fattr->ctime))
invalid |= NFS_INO_INVALID_CTIME;
@@ -1531,7 +1718,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink)
invalid |= NFS_INO_INVALID_NLINK;
- ts = inode->i_atime;
+ ts = inode_get_atime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_ATIME) && !timespec64_equal(&ts, &fattr->atime))
invalid |= NFS_INO_INVALID_ATIME;
@@ -1562,6 +1749,7 @@ void nfs_fattr_init(struct nfs_fattr *fattr)
fattr->gencount = nfs_inc_attr_generation_counter();
fattr->owner_name = NULL;
fattr->group_name = NULL;
+ fattr->mdsthreshold = NULL;
}
EXPORT_SYMBOL_GPL(nfs_fattr_init);
@@ -1585,7 +1773,7 @@ struct nfs_fattr *nfs_alloc_fattr(void)
{
struct nfs_fattr *fattr;
- fattr = kmalloc(sizeof(*fattr), GFP_NOFS);
+ fattr = kmalloc(sizeof(*fattr), GFP_KERNEL);
if (fattr != NULL) {
nfs_fattr_init(fattr);
fattr->label = NULL;
@@ -1601,7 +1789,7 @@ struct nfs_fattr *nfs_alloc_fattr_with_label(struct nfs_server *server)
if (!fattr)
return NULL;
- fattr->label = nfs4_label_alloc(server, GFP_NOFS);
+ fattr->label = nfs4_label_alloc(server, GFP_KERNEL);
if (IS_ERR(fattr->label)) {
kfree(fattr);
return NULL;
@@ -1615,7 +1803,7 @@ struct nfs_fh *nfs_alloc_fhandle(void)
{
struct nfs_fh *fh;
- fh = kmalloc(sizeof(struct nfs_fh), GFP_NOFS);
+ fh = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
if (fh != NULL)
fh->size = 0;
return fh;
@@ -1800,7 +1988,7 @@ static int nfs_inode_finish_partial_attr_update(const struct nfs_fattr *fattr,
NFS_INO_INVALID_ATIME | NFS_INO_INVALID_CTIME |
NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE |
NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_OTHER |
- NFS_INO_INVALID_NLINK;
+ NFS_INO_INVALID_NLINK | NFS_INO_INVALID_BTIME;
unsigned long cache_validity = NFS_I(inode)->cache_validity;
enum nfs4_change_attr_type ctype = NFS_SERVER(inode)->change_attr_type;
@@ -1813,6 +2001,66 @@ static int nfs_inode_finish_partial_attr_update(const struct nfs_fattr *fattr,
return 0;
}
+static void nfs_ooo_merge(struct nfs_inode *nfsi,
+ u64 start, u64 end)
+{
+ int i, cnt;
+
+ if (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER)
+ /* No point merging anything */
+ return;
+
+ if (!nfsi->ooo) {
+ nfsi->ooo = kmalloc(sizeof(*nfsi->ooo), GFP_ATOMIC);
+ if (!nfsi->ooo) {
+ nfsi->cache_validity |= NFS_INO_DATA_INVAL_DEFER;
+ return;
+ }
+ nfsi->ooo->cnt = 0;
+ }
+
+ /* add this range, merging if possible */
+ cnt = nfsi->ooo->cnt;
+ for (i = 0; i < cnt; i++) {
+ if (end == nfsi->ooo->gap[i].start)
+ end = nfsi->ooo->gap[i].end;
+ else if (start == nfsi->ooo->gap[i].end)
+ start = nfsi->ooo->gap[i].start;
+ else
+ continue;
+ /* Remove 'i' from table and loop to insert the new range */
+ cnt -= 1;
+ nfsi->ooo->gap[i] = nfsi->ooo->gap[cnt];
+ i = -1;
+ }
+ if (start != end) {
+ if (cnt >= ARRAY_SIZE(nfsi->ooo->gap)) {
+ nfsi->cache_validity |= NFS_INO_DATA_INVAL_DEFER;
+ kfree(nfsi->ooo);
+ nfsi->ooo = NULL;
+ return;
+ }
+ nfsi->ooo->gap[cnt].start = start;
+ nfsi->ooo->gap[cnt].end = end;
+ cnt += 1;
+ }
+ nfsi->ooo->cnt = cnt;
+}
+
+static void nfs_ooo_record(struct nfs_inode *nfsi,
+ struct nfs_fattr *fattr)
+{
+ /* This reply was out-of-order, so record in the
+ * pre/post change id, possibly cancelling
+ * gaps created when iversion was jumpped forward.
+ */
+ if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) &&
+ (fattr->valid & NFS_ATTR_FATTR_PRECHANGE))
+ nfs_ooo_merge(nfsi,
+ fattr->change_attr,
+ fattr->pre_change_attr);
+}
+
static int nfs_refresh_inode_locked(struct inode *inode,
struct nfs_fattr *fattr)
{
@@ -1823,8 +2071,12 @@ static int nfs_refresh_inode_locked(struct inode *inode,
if (attr_cmp > 0 || nfs_inode_finish_partial_attr_update(fattr, inode))
ret = nfs_update_inode(inode, fattr);
- else if (attr_cmp == 0)
- ret = nfs_check_inode_attributes(inode, fattr);
+ else {
+ nfs_ooo_record(NFS_I(inode), fattr);
+
+ if (attr_cmp == 0)
+ ret = nfs_check_inode_attributes(inode, fattr);
+ }
trace_nfs_refresh_inode_exit(inode, ret);
return ret;
@@ -1915,6 +2167,8 @@ int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fa
if (attr_cmp < 0)
return 0;
if ((fattr->valid & NFS_ATTR_FATTR) == 0 || !attr_cmp) {
+ /* Record the pre/post change info before clearing PRECHANGE */
+ nfs_ooo_record(NFS_I(inode), fattr);
fattr->valid &= ~(NFS_ATTR_FATTR_PRECHANGE
| NFS_ATTR_FATTR_PRESIZE
| NFS_ATTR_FATTR_PREMTIME
@@ -1928,12 +2182,12 @@ int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fa
}
if ((fattr->valid & NFS_ATTR_FATTR_CTIME) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PRECTIME) == 0) {
- fattr->pre_ctime = inode->i_ctime;
+ fattr->pre_ctime = inode_get_ctime(inode);
fattr->valid |= NFS_ATTR_FATTR_PRECTIME;
}
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PREMTIME) == 0) {
- fattr->pre_mtime = inode->i_mtime;
+ fattr->pre_mtime = inode_get_mtime(inode);
fattr->valid |= NFS_ATTR_FATTR_PREMTIME;
}
if ((fattr->valid & NFS_ATTR_FATTR_SIZE) != 0 &&
@@ -2000,10 +2254,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
bool attr_changed = false;
bool have_delegation;
- dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
+ dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%llx)\n",
__func__, inode->i_sb->s_id, inode->i_ino,
nfs_display_fhandle_hash(NFS_FH(inode)),
- atomic_read(&inode->i_count), fattr->valid);
+ icount_read(inode), fattr->valid);
if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
/* Only a mounted-on-fileid? Just exit */
@@ -2049,6 +2303,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
*/
nfsi->read_cache_jiffies = fattr->time_start;
+ /* Fix up any delegated attributes in the struct nfs_fattr */
+ nfs_fattr_fixup_delegated(inode, fattr);
+
save_cache_validity = nfsi->cache_validity;
nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ATIME
@@ -2069,6 +2326,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
/* More cache consistency checks */
if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
+ if (!have_writers && nfsi->ooo && nfsi->ooo->cnt == 1 &&
+ nfsi->ooo->gap[0].end == inode_peek_iversion_raw(inode)) {
+ /* There is one remaining gap that hasn't been
+ * merged into iversion - do that now.
+ */
+ inode_set_iversion_raw(inode, nfsi->ooo->gap[0].start);
+ kfree(nfsi->ooo);
+ nfsi->ooo = NULL;
+ }
if (!inode_eq_iversion_raw(inode, fattr->change_attr)) {
/* Could it be a race with writeback? */
if (!(have_writers || have_delegation)) {
@@ -2083,15 +2349,19 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
| NFS_INO_INVALID_BLOCKS
| NFS_INO_INVALID_NLINK
| NFS_INO_INVALID_MODE
- | NFS_INO_INVALID_OTHER;
+ | NFS_INO_INVALID_OTHER
+ | NFS_INO_INVALID_BTIME;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
attr_changed = true;
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id,
inode->i_ino);
- } else if (!have_delegation)
- nfsi->cache_validity |= NFS_INO_DATA_INVAL_DEFER;
+ } else if (!have_delegation) {
+ nfs_ooo_record(nfsi, fattr);
+ nfs_ooo_merge(nfsi, inode_peek_iversion_raw(inode),
+ fattr->change_attr);
+ }
inode_set_iversion_raw(inode, fattr->change_attr);
}
} else {
@@ -2103,17 +2373,23 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
}
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
- inode->i_mtime = fattr->mtime;
+ inode_set_mtime_to_ts(inode, fattr->mtime);
else if (fattr_supported & NFS_ATTR_FATTR_MTIME)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_MTIME;
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
- inode->i_ctime = fattr->ctime;
+ inode_set_ctime_to_ts(inode, fattr->ctime);
else if (fattr_supported & NFS_ATTR_FATTR_CTIME)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_CTIME;
+ if (fattr->valid & NFS_ATTR_FATTR_BTIME)
+ nfsi->btime = fattr->btime;
+ else if (fattr_supported & NFS_ATTR_FATTR_BTIME)
+ nfsi->cache_validity |=
+ save_cache_validity & NFS_INO_INVALID_BTIME;
+
/* Check if our cached file size is stale */
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
new_isize = nfs_size_to_loff_t(fattr->size);
@@ -2139,7 +2415,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
save_cache_validity & NFS_INO_INVALID_SIZE;
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
- inode->i_atime = fattr->atime;
+ inode_set_atime_to_ts(inode, fattr->atime);
else if (fattr_supported & NFS_ATTR_FATTR_ATIME)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_ATIME;
@@ -2240,23 +2516,27 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
struct inode *nfs_alloc_inode(struct super_block *sb)
{
struct nfs_inode *nfsi;
- nfsi = kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL);
+ nfsi = alloc_inode_sb(sb, nfs_inode_cachep, GFP_KERNEL);
if (!nfsi)
return NULL;
nfsi->flags = 0UL;
nfsi->cache_validity = 0UL;
+ nfsi->ooo = NULL;
#if IS_ENABLED(CONFIG_NFS_V4)
nfsi->nfs4_acl = NULL;
#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_V4_2
nfsi->xattr_cache = NULL;
#endif
+ nfs_netfs_inode_init(nfsi);
+
return &nfsi->vfs_inode;
}
EXPORT_SYMBOL_GPL(nfs_alloc_inode);
void nfs_free_inode(struct inode *inode)
{
+ kfree(NFS_I(inode)->ooo);
kmem_cache_free(nfs_inode_cachep, NFS_I(inode));
}
EXPORT_SYMBOL_GPL(nfs_free_inode);
@@ -2273,7 +2553,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
static void init_once(void *foo)
{
- struct nfs_inode *nfsi = (struct nfs_inode *) foo;
+ struct nfs_inode *nfsi = foo;
inode_init_once(&nfsi->vfs_inode);
INIT_LIST_HEAD(&nfsi->open_files);
@@ -2287,7 +2567,7 @@ static int __init nfs_init_inodecache(void)
nfs_inode_cachep = kmem_cache_create("nfs_inode_cache",
sizeof(struct nfs_inode),
0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
if (nfs_inode_cachep == NULL)
return -ENOMEM;
@@ -2305,35 +2585,54 @@ static void nfs_destroy_inodecache(void)
kmem_cache_destroy(nfs_inode_cachep);
}
+struct workqueue_struct *nfslocaliod_workqueue;
struct workqueue_struct *nfsiod_workqueue;
EXPORT_SYMBOL_GPL(nfsiod_workqueue);
/*
- * start up the nfsiod workqueue
+ * Destroy the nfsiod workqueues
*/
-static int nfsiod_start(void)
+static void nfsiod_stop(void)
{
struct workqueue_struct *wq;
- dprintk("RPC: creating workqueue nfsiod\n");
- wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
- if (wq == NULL)
- return -ENOMEM;
- nfsiod_workqueue = wq;
- return 0;
+
+ wq = nfsiod_workqueue;
+ if (wq != NULL) {
+ nfsiod_workqueue = NULL;
+ destroy_workqueue(wq);
+ }
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ wq = nfslocaliod_workqueue;
+ if (wq != NULL) {
+ nfslocaliod_workqueue = NULL;
+ destroy_workqueue(wq);
+ }
+#endif /* CONFIG_NFS_LOCALIO */
}
/*
- * Destroy the nfsiod workqueue
+ * Start the nfsiod workqueues
*/
-static void nfsiod_stop(void)
+static int nfsiod_start(void)
{
- struct workqueue_struct *wq;
-
- wq = nfsiod_workqueue;
- if (wq == NULL)
- return;
- nfsiod_workqueue = NULL;
- destroy_workqueue(wq);
+ dprintk("RPC: creating workqueue nfsiod\n");
+ nfsiod_workqueue = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (nfsiod_workqueue == NULL)
+ return -ENOMEM;
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ /*
+ * localio writes need to use a normal (non-memreclaim) workqueue.
+ * When we start getting low on space, XFS goes and calls flush_work() on
+ * a non-memreclaim work queue, which causes a priority inversion problem.
+ */
+ dprintk("RPC: creating workqueue nfslocaliod\n");
+ nfslocaliod_workqueue = alloc_workqueue("nfslocaliod", WQ_UNBOUND, 0);
+ if (unlikely(nfslocaliod_workqueue == NULL)) {
+ nfsiod_stop();
+ return -ENOMEM;
+ }
+#endif /* CONFIG_NFS_LOCALIO */
+ return 0;
}
unsigned int nfs_net_id;
@@ -2341,12 +2640,32 @@ EXPORT_SYMBOL_GPL(nfs_net_id);
static int nfs_net_init(struct net *net)
{
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+ int err;
+
nfs_clients_init(net);
- return nfs_fs_proc_net_init(net);
+
+ if (!rpc_proc_register(net, &nn->rpcstats)) {
+ err = -ENOMEM;
+ goto err_proc_rpc;
+ }
+
+ err = nfs_fs_proc_net_init(net);
+ if (err)
+ goto err_proc_nfs;
+
+ return 0;
+
+err_proc_nfs:
+ rpc_proc_unregister(net, "nfs");
+err_proc_rpc:
+ nfs_clients_exit(net);
+ return err;
}
static void nfs_net_exit(struct net *net)
{
+ rpc_proc_unregister(net, "nfs");
nfs_fs_proc_net_exit(net);
nfs_clients_exit(net);
}
@@ -2358,6 +2677,35 @@ static struct pernet_operations nfs_net_ops = {
.size = sizeof(struct nfs_net),
};
+#ifdef CONFIG_KEYS
+static struct key *nfs_keyring;
+
+static int __init nfs_init_keyring(void)
+{
+ nfs_keyring = keyring_alloc(".nfs",
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ current_cred(),
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ (KEY_USR_ALL & ~KEY_USR_SETATTR),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+ return PTR_ERR_OR_ZERO(nfs_keyring);
+}
+
+static void nfs_exit_keyring(void)
+{
+ key_put(nfs_keyring);
+}
+#else
+static inline int nfs_init_keyring(void)
+{
+ return 0;
+}
+
+static inline void nfs_exit_keyring(void)
+{
+}
+#endif /* CONFIG_KEYS */
+
/*
* Initialize NFS
*/
@@ -2365,6 +2713,10 @@ static int __init init_nfs_fs(void)
{
int err;
+ err = nfs_init_keyring();
+ if (err)
+ return err;
+
err = nfs_sysfs_init();
if (err < 0)
goto out10;
@@ -2373,10 +2725,6 @@ static int __init init_nfs_fs(void)
if (err < 0)
goto out9;
- err = nfs_fscache_register();
- if (err < 0)
- goto out8;
-
err = nfsiod_start();
if (err)
goto out7;
@@ -2405,15 +2753,12 @@ static int __init init_nfs_fs(void)
if (err)
goto out1;
- rpc_proc_register(&init_net, &nfs_rpcstat);
-
err = register_nfs_fs();
if (err)
goto out0;
return 0;
out0:
- rpc_proc_unregister(&init_net, "nfs");
nfs_destroy_directcache();
out1:
nfs_destroy_writepagecache();
@@ -2428,12 +2773,11 @@ out5:
out6:
nfsiod_stop();
out7:
- nfs_fscache_unregister();
-out8:
unregister_pernet_subsys(&nfs_net_ops);
out9:
nfs_sysfs_exit();
out10:
+ nfs_exit_keyring();
return err;
}
@@ -2444,17 +2788,17 @@ static void __exit exit_nfs_fs(void)
nfs_destroy_readpagecache();
nfs_destroy_inodecache();
nfs_destroy_nfspagecache();
- nfs_fscache_unregister();
unregister_pernet_subsys(&nfs_net_ops);
- rpc_proc_unregister(&init_net, "nfs");
unregister_nfs_fs();
nfs_fs_proc_exit();
nfsiod_stop();
nfs_sysfs_exit();
+ nfs_exit_keyring();
}
/* Not quite true; I just maintain it */
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+MODULE_DESCRIPTION("NFS client support");
MODULE_LICENSE("GPL");
module_param(enable_ino64, bool, 0644);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 12f6acb483bb..2ecd38e1d17a 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -6,12 +6,14 @@
#include "nfs4_fs.h"
#include <linux/fs_context.h>
#include <linux/security.h>
+#include <linux/compiler_attributes.h>
#include <linux/crc32.h>
#include <linux/sunrpc/addr.h>
#include <linux/nfs_page.h>
+#include <linux/nfslocalio.h>
#include <linux/wait_bit.h>
-#define NFS_SB_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
+#define NFS_SB_MASK (SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
extern const struct export_operations nfs_export_ops;
@@ -42,6 +44,16 @@ static inline bool nfs_lookup_is_soft_revalidate(const struct dentry *dentry)
return true;
}
+static inline fmode_t flags_to_mode(int flags)
+{
+ fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
+ if ((flags & O_ACCMODE) != O_WRONLY)
+ res |= FMODE_READ;
+ if ((flags & O_ACCMODE) != O_RDONLY)
+ res |= FMODE_WRITE;
+ return res;
+}
+
/*
* Note: RFC 1813 doesn't limit the number of auth flavors that
* a server can return, so make something up.
@@ -59,7 +71,7 @@ static inline bool nfs_lookup_is_soft_revalidate(const struct dentry *dentry)
struct nfs_client_initdata {
unsigned long init_flags;
const char *hostname; /* Hostname of the server */
- const struct sockaddr *addr; /* Address of the server */
+ const struct sockaddr_storage *addr; /* Address of the server */
const char *nodename; /* Hostname of the client */
const char *ip_addr; /* IP address of the client */
size_t addrlen;
@@ -71,6 +83,9 @@ struct nfs_client_initdata {
struct net *net;
const struct rpc_timeout *timeparms;
const struct cred *cred;
+ struct xprtsec_parms xprtsec;
+ unsigned long connect_timeout;
+ unsigned long reconnect_timeout;
};
/*
@@ -91,6 +106,7 @@ struct nfs_fs_context {
unsigned int bsize;
struct nfs_auth_info auth_info;
rpc_authflavor_t selected_flavor;
+ struct xprtsec_parms xprtsec;
char *client_address;
unsigned int version;
unsigned int minorversion;
@@ -98,6 +114,7 @@ struct nfs_fs_context {
unsigned short protofamily;
unsigned short mountfamily;
bool has_sec_mnt_opts;
+ int lock_status;
struct {
union {
@@ -139,6 +156,12 @@ struct nfs_fs_context {
} clone_data;
};
+enum nfs_lock_status {
+ NFS_LOCK_NOT_SET = 0,
+ NFS_LOCK_LOCK = 1,
+ NFS_LOCK_NOLOCK = 2,
+};
+
#define nfs_errorf(fc, fmt, ...) ((fc)->log.log ? \
errorf(fc, fmt, ## __VA_ARGS__) : \
({ dprintk(fmt "\n", ## __VA_ARGS__); }))
@@ -170,7 +193,7 @@ static inline struct nfs_fs_context *nfs_fc2context(const struct fs_context *fc)
/* mount_clnt.c */
struct nfs_mount_request {
- struct sockaddr *sap;
+ struct sockaddr_storage *sap;
size_t salen;
char *hostname;
char *dirpath;
@@ -184,7 +207,6 @@ struct nfs_mount_request {
};
extern int nfs_mount(struct nfs_mount_request *info, int timeo, int retrans);
-extern void nfs_umount(const struct nfs_mount_request *info);
/* client.c */
extern const struct rpc_program nfs_program;
@@ -209,11 +231,11 @@ extern struct nfs_client *
nfs4_find_client_sessionid(struct net *, const struct sockaddr *,
struct nfs4_sessionid *, u32);
extern struct nfs_server *nfs_create_server(struct fs_context *);
-extern void nfs4_server_set_init_caps(struct nfs_server *);
+extern void nfs_server_set_init_caps(struct nfs_server *);
extern struct nfs_server *nfs4_create_server(struct fs_context *);
extern struct nfs_server *nfs4_create_referral_server(struct fs_context *);
extern int nfs4_update_server(struct nfs_server *server, const char *hostname,
- struct sockaddr *sap, size_t salen,
+ struct sockaddr_storage *sap, size_t salen,
struct net *net);
extern void nfs_free_server(struct nfs_server *server);
extern struct nfs_server *nfs_clone_server(struct nfs_server *,
@@ -225,7 +247,7 @@ extern int nfs_client_init_status(const struct nfs_client *clp);
extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
- const struct sockaddr *ds_addr,
+ const struct sockaddr_storage *ds_addr,
int ds_addrlen, int ds_proto,
unsigned int ds_timeo,
unsigned int ds_retrans,
@@ -233,7 +255,7 @@ extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
extern struct rpc_clnt *nfs4_find_or_create_ds_client(struct nfs_client *,
struct inode *);
extern struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
- const struct sockaddr *ds_addr, int ds_addrlen,
+ const struct sockaddr_storage *ds_addr, int ds_addrlen,
int ds_proto, unsigned int ds_timeo,
unsigned int ds_retrans);
#ifdef CONFIG_PROC_FS
@@ -287,7 +309,8 @@ void nfs_pgio_header_free(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
- const struct rpc_call_ops *call_ops, int how, int flags);
+ const struct rpc_call_ops *call_ops, int how, int flags,
+ struct nfsd_file *localio);
void nfs_free_request(struct nfs_page *req);
struct nfs_pgio_mirror *
nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc);
@@ -366,40 +389,60 @@ extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
const struct nfs_client_initdata *);
/* dir.c */
-extern void nfs_advise_use_readdirplus(struct inode *dir);
-extern void nfs_force_use_readdirplus(struct inode *dir);
+extern void nfs_readdir_record_entry_cache_hit(struct inode *dir);
+extern void nfs_readdir_record_entry_cache_miss(struct inode *dir);
extern unsigned long nfs_access_cache_count(struct shrinker *shrink,
struct shrink_control *sc);
extern unsigned long nfs_access_cache_scan(struct shrinker *shrink,
struct shrink_control *sc);
struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int);
-int nfs_create(struct user_namespace *, struct inode *, struct dentry *,
+void nfs_d_prune_case_insensitive_aliases(struct inode *inode);
+int nfs_create(struct mnt_idmap *, struct inode *, struct dentry *,
umode_t, bool);
-int nfs_mkdir(struct user_namespace *, struct inode *, struct dentry *,
- umode_t);
+struct dentry *nfs_mkdir(struct mnt_idmap *, struct inode *, struct dentry *,
+ umode_t);
int nfs_rmdir(struct inode *, struct dentry *);
int nfs_unlink(struct inode *, struct dentry *);
-int nfs_symlink(struct user_namespace *, struct inode *, struct dentry *,
+int nfs_symlink(struct mnt_idmap *, struct inode *, struct dentry *,
const char *);
int nfs_link(struct dentry *, struct inode *, struct dentry *);
-int nfs_mknod(struct user_namespace *, struct inode *, struct dentry *, umode_t,
+int nfs_mknod(struct mnt_idmap *, struct inode *, struct dentry *, umode_t,
dev_t);
-int nfs_rename(struct user_namespace *, struct inode *, struct dentry *,
+int nfs_rename(struct mnt_idmap *, struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
+#ifdef CONFIG_NFS_V4_2
+static inline __u32 nfs_access_xattr_mask(const struct nfs_server *server)
+{
+ if (!(server->caps & NFS_CAP_XATTR))
+ return 0;
+ return NFS4_ACCESS_XAREAD | NFS4_ACCESS_XAWRITE | NFS4_ACCESS_XALIST;
+}
+#else
+static inline __u32 nfs_access_xattr_mask(const struct nfs_server *server)
+{
+ return 0;
+}
+#endif
+
/* file.c */
int nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
loff_t nfs_file_llseek(struct file *, loff_t, int);
ssize_t nfs_file_read(struct kiocb *, struct iov_iter *);
-int nfs_file_mmap(struct file *, struct vm_area_struct *);
+ssize_t nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
+int nfs_file_mmap_prepare(struct vm_area_desc *);
ssize_t nfs_file_write(struct kiocb *, struct iov_iter *);
int nfs_file_release(struct inode *, struct file *);
int nfs_lock(struct file *, int, struct file_lock *);
int nfs_flock(struct file *, int, struct file_lock *);
int nfs_check_flags(int);
+void nfs_truncate_last_folio(struct address_space *mapping, loff_t from,
+ loff_t to);
/* inode.c */
extern struct workqueue_struct *nfsiod_workqueue;
+extern struct workqueue_struct *nfslocaliod_workqueue;
extern struct inode *nfs_alloc_inode(struct super_block *sb);
extern void nfs_free_inode(struct inode *);
extern int nfs_write_inode(struct inode *, struct writeback_control *);
@@ -410,7 +453,63 @@ extern void nfs_zap_acl_cache(struct inode *inode);
extern void nfs_set_cache_invalid(struct inode *inode, unsigned long flags);
extern bool nfs_check_cache_invalid(struct inode *, unsigned long);
extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
-extern int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode);
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+/* localio.c */
+struct nfs_local_dio {
+ u32 mem_align;
+ u32 offset_align;
+ loff_t middle_offset;
+ loff_t end_offset;
+ ssize_t start_len; /* Length for misaligned first extent */
+ ssize_t middle_len; /* Length for DIO-aligned middle extent */
+ ssize_t end_len; /* Length for misaligned last extent */
+};
+
+extern void nfs_local_probe_async(struct nfs_client *);
+extern void nfs_local_probe_async_work(struct work_struct *);
+extern struct nfsd_file *nfs_local_open_fh(struct nfs_client *,
+ const struct cred *,
+ struct nfs_fh *,
+ struct nfs_file_localio *,
+ const fmode_t);
+extern int nfs_local_doio(struct nfs_client *,
+ struct nfsd_file *,
+ struct nfs_pgio_header *,
+ const struct rpc_call_ops *);
+extern int nfs_local_commit(struct nfsd_file *,
+ struct nfs_commit_data *,
+ const struct rpc_call_ops *, int);
+extern bool nfs_server_is_local(const struct nfs_client *clp);
+
+#else /* CONFIG_NFS_LOCALIO */
+static inline void nfs_local_probe(struct nfs_client *clp) {}
+static inline void nfs_local_probe_async(struct nfs_client *clp) {}
+static inline struct nfsd_file *
+nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, struct nfs_file_localio *nfl,
+ const fmode_t mode)
+{
+ return NULL;
+}
+static inline int nfs_local_doio(struct nfs_client *clp,
+ struct nfsd_file *localio,
+ struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ return -EINVAL;
+}
+static inline int nfs_local_commit(struct nfsd_file *localio,
+ struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops, int how)
+{
+ return -EINVAL;
+}
+static inline bool nfs_server_is_local(const struct nfs_client *clp)
+{
+ return false;
+}
+#endif /* CONFIG_NFS_LOCALIO */
/* super.c */
extern const struct super_operations nfs_sops;
@@ -419,8 +518,6 @@ int nfs_try_get_tree(struct fs_context *);
int nfs_get_tree_common(struct fs_context *);
void nfs_kill_super(struct super_block *);
-extern struct rpc_stat nfs_rpcstat;
-
extern int __init register_nfs_fs(void);
extern void __exit unregister_nfs_fs(void);
extern bool nfs_sb_active(struct super_block *sb);
@@ -428,12 +525,16 @@ extern void nfs_sb_deactive(struct super_block *sb);
extern int nfs_client_for_each_server(struct nfs_client *clp,
int (*fn)(struct nfs_server *, void *),
void *data);
+#ifdef CONFIG_NFS_FSCACHE
+extern const struct netfs_request_ops nfs_netfs_ops;
+#endif
+
/* io.c */
-extern void nfs_start_io_read(struct inode *inode);
+extern __must_check int nfs_start_io_read(struct inode *inode);
extern void nfs_end_io_read(struct inode *inode);
-extern void nfs_start_io_write(struct inode *inode);
+extern __must_check int nfs_start_io_write(struct inode *inode);
extern void nfs_end_io_write(struct inode *inode);
-extern void nfs_start_io_direct(struct inode *inode);
+extern __must_check int nfs_start_io_direct(struct inode *inode);
extern void nfs_end_io_direct(struct inode *inode);
static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
@@ -441,6 +542,16 @@ static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
return test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0;
}
+/* Must be called with exclusively locked inode->i_rwsem */
+static inline void nfs_file_block_o_direct(struct nfs_inode *nfsi)
+{
+ if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
+ clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
+ inode_dio_wait(&nfsi->vfs_inode);
+ }
+}
+
+
/* namespace.c */
#define NFS_PATH_CANONICAL 1
extern char *nfs_path(char **p, struct dentry *dentry,
@@ -457,10 +568,15 @@ extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh, bool
struct nfs_pgio_completion_ops;
/* read.c */
+extern const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
struct inode *inode, bool force_mds,
const struct nfs_pgio_completion_ops *compl_ops);
-extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
+extern bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size);
+extern int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
+ struct nfs_open_context *ctx,
+ struct folio *folio);
+extern void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
/* super.c */
@@ -478,13 +594,13 @@ extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_commit_free(struct nfs_commit_data *p);
-extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
extern void nfs_commit_prepare(struct rpc_task *task, void *calldata);
extern int nfs_initiate_commit(struct rpc_clnt *clnt,
struct nfs_commit_data *data,
const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
- int how, int flags);
+ int how, int flags,
+ struct nfsd_file *localio);
extern void nfs_init_commit(struct nfs_commit_data *data,
struct list_head *head,
struct pnfs_layout_segment *lseg,
@@ -553,8 +669,10 @@ void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo)
#endif
#ifdef CONFIG_MIGRATION
-extern int nfs_migrate_page(struct address_space *,
- struct page *, struct page *, enum migrate_mode);
+int nfs_migrate_folio(struct address_space *, struct folio *dst,
+ struct folio *src, enum migrate_mode);
+#else
+#define nfs_migrate_folio NULL
#endif
static inline int
@@ -572,6 +690,41 @@ nfs_write_match_verf(const struct nfs_writeverf *verf,
!nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier);
}
+static inline gfp_t nfs_io_gfp_mask(void)
+{
+ gfp_t ret = current_gfp_context(GFP_KERNEL);
+
+ /* For workers __GFP_NORETRY only with __GFP_IO or __GFP_FS */
+ if ((current->flags & PF_WQ_WORKER) && ret == GFP_KERNEL)
+ ret |= __GFP_NORETRY | __GFP_NOWARN;
+ return ret;
+}
+
+/*
+ * Special version of should_remove_suid() that ignores capabilities.
+ */
+static inline int nfs_should_remove_suid(const struct inode *inode)
+{
+ umode_t mode = inode->i_mode;
+ int kill = 0;
+
+ /* suid always must be killed */
+ if (unlikely(mode & S_ISUID))
+ kill = ATTR_KILL_SUID;
+
+ /*
+ * sgid without any exec bits is just a mandatory locking mark; leave
+ * it alone. If some exec bits are set, it's a real sgid; kill it.
+ */
+ if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
+ kill |= ATTR_KILL_SGID;
+
+ if (unlikely(kill && S_ISREG(mode)))
+ return kill;
+
+ return 0;
+}
+
/* unlink.c */
extern struct rpc_task *
nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
@@ -582,7 +735,7 @@ extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry);
/* direct.c */
void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
struct nfs_direct_req *dreq);
-extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
+extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset);
/* nfs4proc.c */
extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
@@ -639,9 +792,9 @@ unsigned long nfs_block_bits(unsigned long bsize, unsigned char *nrbitsp)
if ((bsize & (bsize - 1)) || nrbitsp) {
unsigned char nrbits;
- for (nrbits = 31; nrbits && !(bsize & (1 << nrbits)); nrbits--)
+ for (nrbits = 31; nrbits && !(bsize & (1UL << nrbits)); nrbits--)
;
- bsize = 1 << nrbits;
+ bsize = 1UL << nrbits;
if (nrbitsp)
*nrbitsp = nrbits;
}
@@ -673,6 +826,22 @@ unsigned long nfs_block_size(unsigned long bsize, unsigned char *nrbitsp)
}
/*
+ * Compute and set NFS server rsize / wsize
+ */
+static inline
+unsigned long nfs_io_size(unsigned long iosize, enum xprt_transports proto)
+{
+ if (iosize < NFS_MIN_FILE_IO_SIZE)
+ iosize = NFS_DEF_FILE_IO_SIZE;
+ else if (iosize >= NFS_MAX_FILE_IO_SIZE)
+ iosize = NFS_MAX_FILE_IO_SIZE;
+
+ if (proto == XPRT_TRANSPORT_UDP || iosize < PAGE_SIZE)
+ return nfs_block_bits(iosize, NULL);
+ return iosize & PAGE_MASK;
+}
+
+/*
* Determine the maximum file size for a superblock
*/
static inline
@@ -687,17 +856,18 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
* Record the page as unstable (an extra writeback period) and mark its
* inode as dirty.
*/
-static inline
-void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo)
+static inline void nfs_folio_mark_unstable(struct folio *folio,
+ struct nfs_commit_info *cinfo)
{
- if (!cinfo->dreq) {
- struct inode *inode = page_file_mapping(page)->host;
+ if (folio && !cinfo->dreq) {
+ struct inode *inode = folio->mapping->host;
+ long nr = folio_nr_pages(folio);
/* This page is really still in write-back - just that the
* writeback is happening on the server now.
*/
- inc_node_page_state(page, NR_WRITEBACK);
- inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
+ node_stat_mod_folio(folio, NR_WRITEBACK, nr);
+ wb_stat_mod(&inode_to_bdi(inode)->wb, WB_WRITEBACK, nr);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
}
}
@@ -705,18 +875,17 @@ void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo)
/*
* Determine the number of bytes of data the page contains
*/
-static inline
-unsigned int nfs_page_length(struct page *page)
+static inline size_t nfs_folio_length(struct folio *folio)
{
- loff_t i_size = i_size_read(page_file_mapping(page)->host);
+ loff_t i_size = i_size_read(folio->mapping->host);
if (i_size > 0) {
- pgoff_t index = page_index(page);
- pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT;
+ pgoff_t index = folio->index >> folio_order(folio);
+ pgoff_t end_index = (i_size - 1) >> folio_shift(folio);
if (index < end_index)
- return PAGE_SIZE;
+ return folio_size(folio);
if (index == end_index)
- return ((i_size - 1) & ~PAGE_MASK) + 1;
+ return offset_in_folio(folio, i_size - 1) + 1;
}
return 0;
}
@@ -734,11 +903,10 @@ unsigned char nfs_umode_to_dtype(umode_t mode)
* Determine the number of pages in an array of length 'len' and
* with a base offset of 'base'
*/
-static inline
-unsigned int nfs_page_array_len(unsigned int base, size_t len)
+static inline unsigned int nfs_page_array_len(unsigned int base, size_t len)
{
- return ((unsigned long)len + (unsigned long)base +
- PAGE_SIZE - 1) >> PAGE_SHIFT;
+ return ((unsigned long)len + (unsigned long)base + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
}
/*
@@ -754,33 +922,16 @@ u64 nfs_timespec_to_change_attr(const struct timespec64 *ts)
return ((u64)ts->tv_sec << 30) + ts->tv_nsec;
}
-#ifdef CONFIG_CRC32
-/**
- * nfs_fhandle_hash - calculate the crc32 hash for the filehandle
- * @fh - pointer to filehandle
- *
- * returns a crc32 hash for the filehandle that is compatible with
- * the one displayed by "wireshark".
- */
-static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
-{
- return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size);
-}
static inline u32 nfs_stateid_hash(const nfs4_stateid *stateid)
{
return ~crc32_le(0xFFFFFFFF, &stateid->other[0],
NFS4_STATEID_OTHER_SIZE);
}
-#else
-static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
-{
- return 0;
-}
-static inline u32 nfs_stateid_hash(nfs4_stateid *stateid)
+
+static inline bool nfs_current_task_exiting(void)
{
- return 0;
+ return (current->flags & PF_EXITING) != 0;
}
-#endif
static inline bool nfs_error_is_fatal(int err)
{
@@ -809,6 +960,7 @@ static inline bool nfs_error_is_fatal_on_server(int err)
case 0:
case -ERESTARTSYS:
case -EINTR:
+ case -ENOMEM:
return false;
}
return nfs_error_is_fatal(err);
@@ -818,11 +970,43 @@ static inline bool nfs_error_is_fatal_on_server(int err)
* Select between a default port value and a user-specified port value.
* If a zero value is set, then autobind will be used.
*/
-static inline void nfs_set_port(struct sockaddr *sap, int *port,
+static inline void nfs_set_port(struct sockaddr_storage *sap, int *port,
const unsigned short default_port)
{
if (*port == NFS_UNSPEC_PORT)
*port = default_port;
- rpc_set_port(sap, *port);
-}
+ rpc_set_port((struct sockaddr *)sap, *port);
+}
+
+struct nfs_direct_req {
+ struct kref kref; /* release manager */
+
+ /* I/O parameters */
+ struct nfs_open_context *ctx; /* file open context info */
+ struct nfs_lock_context *l_ctx; /* Lock context info */
+ struct kiocb * iocb; /* controlling i/o request */
+ struct inode * inode; /* target file of i/o */
+
+ /* completion state */
+ atomic_t io_count; /* i/os we're waiting for */
+ spinlock_t lock; /* protect completion state */
+
+ loff_t io_start; /* Start offset for I/O */
+ ssize_t count, /* bytes actually processed */
+ max_count, /* max expected count */
+ error; /* any reported error */
+ struct completion completion; /* wait for i/o completion */
+
+ /* commit state */
+ struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
+ struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
+ struct work_struct work;
+ int flags;
+ /* for write */
+#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
+#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
+ /* for read */
+#define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */
+#define NFS_ODIRECT_DONE INT_MAX /* write verification failed */
+};
diff --git a/fs/nfs/io.c b/fs/nfs/io.c
index b5551ed8f648..d275b0a250bf 100644
--- a/fs/nfs/io.c
+++ b/fs/nfs/io.c
@@ -14,15 +14,6 @@
#include "internal.h"
-/* Call with exclusively locked inode->i_rwsem */
-static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode)
-{
- if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
- clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
- inode_dio_wait(inode);
- }
-}
-
/**
* nfs_start_io_read - declare the file is being used for buffered reads
* @inode: file inode
@@ -39,19 +30,28 @@ static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode)
* Note that buffered writes and truncates both take a write lock on
* inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
*/
-void
+int
nfs_start_io_read(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
+ int err;
+
/* Be an optimist! */
- down_read(&inode->i_rwsem);
+ err = down_read_killable(&inode->i_rwsem);
+ if (err)
+ return err;
if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0)
- return;
+ return 0;
up_read(&inode->i_rwsem);
+
/* Slow path.... */
- down_write(&inode->i_rwsem);
- nfs_block_o_direct(nfsi, inode);
+ err = down_write_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+ nfs_file_block_o_direct(nfsi);
downgrade_write(&inode->i_rwsem);
+
+ return 0;
}
/**
@@ -74,11 +74,15 @@ nfs_end_io_read(struct inode *inode)
* Declare that a buffered read operation is about to start, and ensure
* that we block all direct I/O.
*/
-void
+int
nfs_start_io_write(struct inode *inode)
{
- down_write(&inode->i_rwsem);
- nfs_block_o_direct(NFS_I(inode), inode);
+ int err;
+
+ err = down_write_killable(&inode->i_rwsem);
+ if (!err)
+ nfs_file_block_o_direct(NFS_I(inode));
+ return err;
}
/**
@@ -119,19 +123,28 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode)
* Note that buffered writes and truncates both take a write lock on
* inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
*/
-void
+int
nfs_start_io_direct(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
+ int err;
+
/* Be an optimist! */
- down_read(&inode->i_rwsem);
+ err = down_read_killable(&inode->i_rwsem);
+ if (err)
+ return err;
if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) != 0)
- return;
+ return 0;
up_read(&inode->i_rwsem);
+
/* Slow path.... */
- down_write(&inode->i_rwsem);
+ err = down_write_killable(&inode->i_rwsem);
+ if (err)
+ return err;
nfs_block_buffered(nfsi, inode);
downgrade_write(&inode->i_rwsem);
+
+ return 0;
}
/**
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
index 2ddaab1ac653..49862c95b224 100644
--- a/fs/nfs/iostat.h
+++ b/fs/nfs/iostat.h
@@ -17,9 +17,6 @@
struct nfs_iostats {
unsigned long long bytes[__NFSIOS_BYTESMAX];
-#ifdef CONFIG_NFS_FSCACHE
- unsigned long long fscache[__NFSIOS_FSCACHEMAX];
-#endif
unsigned long events[__NFSIOS_COUNTSMAX];
} ____cacheline_aligned;
@@ -49,24 +46,11 @@ static inline void nfs_add_stats(const struct inode *inode,
nfs_add_server_stats(NFS_SERVER(inode), stat, addend);
}
-#ifdef CONFIG_NFS_FSCACHE
-static inline void nfs_add_fscache_stats(struct inode *inode,
- enum nfs_stat_fscachecounters stat,
- long addend)
-{
- this_cpu_add(NFS_SERVER(inode)->io_stats->fscache[stat], addend);
-}
-static inline void nfs_inc_fscache_stats(struct inode *inode,
- enum nfs_stat_fscachecounters stat)
-{
- this_cpu_inc(NFS_SERVER(inode)->io_stats->fscache[stat]);
-}
-#endif
-
-static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void)
-{
- return alloc_percpu(struct nfs_iostats);
-}
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag).
+ */
+#define nfs_alloc_iostats() alloc_percpu(struct nfs_iostats)
static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats)
{
diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
new file mode 100644
index 000000000000..f33bfa7b58e6
--- /dev/null
+++ b/fs/nfs/localio.c
@@ -0,0 +1,1072 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NFS client support for local clients to bypass network stack
+ *
+ * Copyright (C) 2014 Weston Andros Adamson <dros@primarydata.com>
+ * Copyright (C) 2019 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/vfs.h>
+#include <linux/file.h>
+#include <linux/inet.h>
+#include <linux/sunrpc/addr.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <linux/nfs_common.h>
+#include <linux/nfslocalio.h>
+#include <linux/bvec.h>
+
+#include <linux/nfs.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_xdr.h>
+
+#include "internal.h"
+#include "pnfs.h"
+#include "nfstrace.h"
+
+#define NFSDBG_FACILITY NFSDBG_VFS
+
+#define NFSLOCAL_MAX_IOS 3
+
+struct nfs_local_kiocb {
+ struct kiocb kiocb;
+ struct bio_vec *bvec;
+ struct nfs_pgio_header *hdr;
+ struct work_struct work;
+ void (*aio_complete_work)(struct work_struct *);
+ struct nfsd_file *localio;
+ /* Begin mostly DIO-specific members */
+ size_t end_len;
+ short int end_iter_index;
+ atomic_t n_iters;
+ bool iter_is_dio_aligned[NFSLOCAL_MAX_IOS];
+ struct iov_iter iters[NFSLOCAL_MAX_IOS] ____cacheline_aligned;
+ /* End mostly DIO-specific members */
+};
+
+struct nfs_local_fsync_ctx {
+ struct nfsd_file *localio;
+ struct nfs_commit_data *data;
+ struct work_struct work;
+ struct completion *done;
+};
+
+static bool localio_enabled __read_mostly = true;
+module_param(localio_enabled, bool, 0644);
+
+static inline bool nfs_client_is_local(const struct nfs_client *clp)
+{
+ return !!rcu_access_pointer(clp->cl_uuid.net);
+}
+
+bool nfs_server_is_local(const struct nfs_client *clp)
+{
+ return nfs_client_is_local(clp) && localio_enabled;
+}
+EXPORT_SYMBOL_GPL(nfs_server_is_local);
+
+/*
+ * UUID_IS_LOCAL XDR functions
+ */
+
+static void localio_xdr_enc_uuidargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const u8 *uuid = data;
+
+ encode_opaque_fixed(xdr, uuid, UUID_SIZE);
+}
+
+static int localio_xdr_dec_uuidres(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ void *result)
+{
+ /* void return */
+ return 0;
+}
+
+static const struct rpc_procinfo nfs_localio_procedures[] = {
+ [LOCALIOPROC_UUID_IS_LOCAL] = {
+ .p_proc = LOCALIOPROC_UUID_IS_LOCAL,
+ .p_encode = localio_xdr_enc_uuidargs,
+ .p_decode = localio_xdr_dec_uuidres,
+ .p_arglen = XDR_QUADLEN(UUID_SIZE),
+ .p_replen = 0,
+ .p_statidx = LOCALIOPROC_UUID_IS_LOCAL,
+ .p_name = "UUID_IS_LOCAL",
+ },
+};
+
+static unsigned int nfs_localio_counts[ARRAY_SIZE(nfs_localio_procedures)];
+static const struct rpc_version nfslocalio_version1 = {
+ .number = 1,
+ .nrprocs = ARRAY_SIZE(nfs_localio_procedures),
+ .procs = nfs_localio_procedures,
+ .counts = nfs_localio_counts,
+};
+
+static const struct rpc_version *nfslocalio_version[] = {
+ [1] = &nfslocalio_version1,
+};
+
+extern const struct rpc_program nfslocalio_program;
+static struct rpc_stat nfslocalio_rpcstat = { &nfslocalio_program };
+
+const struct rpc_program nfslocalio_program = {
+ .name = "nfslocalio",
+ .number = NFS_LOCALIO_PROGRAM,
+ .nrvers = ARRAY_SIZE(nfslocalio_version),
+ .version = nfslocalio_version,
+ .stats = &nfslocalio_rpcstat,
+};
+
+/*
+ * nfs_init_localioclient - Initialise an NFS localio client connection
+ */
+static struct rpc_clnt *nfs_init_localioclient(struct nfs_client *clp)
+{
+ struct rpc_clnt *rpcclient_localio;
+
+ rpcclient_localio = rpc_bind_new_program(clp->cl_rpcclient,
+ &nfslocalio_program, 1);
+
+ dprintk_rcu("%s: server (%s) %s NFS LOCALIO.\n",
+ __func__, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
+ (IS_ERR(rpcclient_localio) ? "does not support" : "supports"));
+
+ return rpcclient_localio;
+}
+
+static bool nfs_server_uuid_is_local(struct nfs_client *clp)
+{
+ u8 uuid[UUID_SIZE];
+ struct rpc_message msg = {
+ .rpc_argp = &uuid,
+ };
+ struct rpc_clnt *rpcclient_localio;
+ int status;
+
+ rpcclient_localio = nfs_init_localioclient(clp);
+ if (IS_ERR(rpcclient_localio))
+ return false;
+
+ export_uuid(uuid, &clp->cl_uuid.uuid);
+
+ msg.rpc_proc = &nfs_localio_procedures[LOCALIOPROC_UUID_IS_LOCAL];
+ status = rpc_call_sync(rpcclient_localio, &msg, 0);
+ dprintk("%s: NFS reply UUID_IS_LOCAL: status=%d\n",
+ __func__, status);
+ rpc_shutdown_client(rpcclient_localio);
+
+ /* Server is only local if it initialized required struct members */
+ if (status || !rcu_access_pointer(clp->cl_uuid.net) || !clp->cl_uuid.dom)
+ return false;
+
+ return true;
+}
+
+/*
+ * nfs_local_probe - probe local i/o support for an nfs_server and nfs_client
+ * - called after alloc_client and init_client (so cl_rpcclient exists)
+ * - this function is idempotent, it can be called for old or new clients
+ */
+static void nfs_local_probe(struct nfs_client *clp)
+{
+ /* Disallow localio if disabled via sysfs or AUTH_SYS isn't used */
+ if (!localio_enabled ||
+ clp->cl_rpcclient->cl_auth->au_flavor != RPC_AUTH_UNIX) {
+ nfs_localio_disable_client(clp);
+ return;
+ }
+
+ if (nfs_client_is_local(clp))
+ return;
+
+ if (!nfs_uuid_begin(&clp->cl_uuid))
+ return;
+ if (nfs_server_uuid_is_local(clp))
+ nfs_localio_enable_client(clp);
+ nfs_uuid_end(&clp->cl_uuid);
+}
+
+void nfs_local_probe_async_work(struct work_struct *work)
+{
+ struct nfs_client *clp =
+ container_of(work, struct nfs_client, cl_local_probe_work);
+
+ if (!refcount_inc_not_zero(&clp->cl_count))
+ return;
+ nfs_local_probe(clp);
+ nfs_put_client(clp);
+}
+
+void nfs_local_probe_async(struct nfs_client *clp)
+{
+ queue_work(nfsiod_workqueue, &clp->cl_local_probe_work);
+}
+EXPORT_SYMBOL_GPL(nfs_local_probe_async);
+
+static inline void nfs_local_file_put(struct nfsd_file *localio)
+{
+ /* nfs_to_nfsd_file_put_local() expects an __rcu pointer
+ * but we have a __kernel pointer. It is always safe
+ * to cast a __kernel pointer to an __rcu pointer
+ * because the cast only weakens what is known about the pointer.
+ */
+ struct nfsd_file __rcu *nf = (struct nfsd_file __rcu*) localio;
+
+ nfs_to_nfsd_file_put_local(&nf);
+}
+
+/*
+ * __nfs_local_open_fh - open a local filehandle in terms of nfsd_file.
+ *
+ * Returns a pointer to a struct nfsd_file or ERR_PTR.
+ * Caller must release returned nfsd_file with nfs_to_nfsd_file_put_local().
+ */
+static struct nfsd_file *
+__nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, struct nfs_file_localio *nfl,
+ struct nfsd_file __rcu **pnf,
+ const fmode_t mode)
+{
+ int status = 0;
+ struct nfsd_file *localio;
+
+ localio = nfs_open_local_fh(&clp->cl_uuid, clp->cl_rpcclient,
+ cred, fh, nfl, pnf, mode);
+ if (IS_ERR(localio)) {
+ status = PTR_ERR(localio);
+ switch (status) {
+ case -ENOMEM:
+ case -ENXIO:
+ case -ENOENT:
+ /* Revalidate localio */
+ nfs_localio_disable_client(clp);
+ nfs_local_probe(clp);
+ }
+ }
+ trace_nfs_local_open_fh(fh, mode, status);
+ return localio;
+}
+
+/*
+ * nfs_local_open_fh - open a local filehandle in terms of nfsd_file.
+ * First checking if the open nfsd_file is already cached, otherwise
+ * must __nfs_local_open_fh and insert the nfsd_file in nfs_file_localio.
+ *
+ * Returns a pointer to a struct nfsd_file or NULL.
+ */
+struct nfsd_file *
+nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, struct nfs_file_localio *nfl,
+ const fmode_t mode)
+{
+ struct nfsd_file *nf, __rcu **pnf;
+
+ if (!nfs_server_is_local(clp))
+ return NULL;
+ if (mode & ~(FMODE_READ | FMODE_WRITE))
+ return NULL;
+
+ if (mode & FMODE_WRITE)
+ pnf = &nfl->rw_file;
+ else
+ pnf = &nfl->ro_file;
+
+ nf = __nfs_local_open_fh(clp, cred, fh, nfl, pnf, mode);
+ if (IS_ERR(nf))
+ return NULL;
+ return nf;
+}
+EXPORT_SYMBOL_GPL(nfs_local_open_fh);
+
+static void
+nfs_local_iocb_free(struct nfs_local_kiocb *iocb)
+{
+ kfree(iocb->bvec);
+ kfree(iocb);
+}
+
+static struct nfs_local_kiocb *
+nfs_local_iocb_alloc(struct nfs_pgio_header *hdr,
+ struct file *file, gfp_t flags)
+{
+ struct nfs_local_kiocb *iocb;
+
+ iocb = kzalloc(sizeof(*iocb), flags);
+ if (iocb == NULL)
+ return NULL;
+
+ iocb->bvec = kmalloc_array(hdr->page_array.npages,
+ sizeof(struct bio_vec), flags);
+ if (iocb->bvec == NULL) {
+ kfree(iocb);
+ return NULL;
+ }
+
+ init_sync_kiocb(&iocb->kiocb, file);
+
+ iocb->hdr = hdr;
+ iocb->kiocb.ki_pos = hdr->args.offset;
+ iocb->kiocb.ki_flags &= ~IOCB_APPEND;
+ iocb->kiocb.ki_complete = NULL;
+ iocb->aio_complete_work = NULL;
+
+ iocb->end_iter_index = -1;
+
+ return iocb;
+}
+
+static bool
+nfs_is_local_dio_possible(struct nfs_local_kiocb *iocb, int rw,
+ size_t len, struct nfs_local_dio *local_dio)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ loff_t offset = hdr->args.offset;
+ u32 nf_dio_mem_align, nf_dio_offset_align, nf_dio_read_offset_align;
+ loff_t start_end, orig_end, middle_end;
+
+ nfs_to->nfsd_file_dio_alignment(iocb->localio, &nf_dio_mem_align,
+ &nf_dio_offset_align, &nf_dio_read_offset_align);
+ if (rw == ITER_DEST)
+ nf_dio_offset_align = nf_dio_read_offset_align;
+
+ if (unlikely(!nf_dio_mem_align || !nf_dio_offset_align))
+ return false;
+ if (unlikely(nf_dio_offset_align > PAGE_SIZE))
+ return false;
+ if (unlikely(len < nf_dio_offset_align))
+ return false;
+
+ local_dio->mem_align = nf_dio_mem_align;
+ local_dio->offset_align = nf_dio_offset_align;
+
+ start_end = round_up(offset, nf_dio_offset_align);
+ orig_end = offset + len;
+ middle_end = round_down(orig_end, nf_dio_offset_align);
+
+ local_dio->middle_offset = start_end;
+ local_dio->end_offset = middle_end;
+
+ local_dio->start_len = start_end - offset;
+ local_dio->middle_len = middle_end - start_end;
+ local_dio->end_len = orig_end - middle_end;
+
+ if (rw == ITER_DEST)
+ trace_nfs_local_dio_read(hdr->inode, offset, len, local_dio);
+ else
+ trace_nfs_local_dio_write(hdr->inode, offset, len, local_dio);
+ return true;
+}
+
+static bool nfs_iov_iter_aligned_bvec(const struct iov_iter *i,
+ unsigned int addr_mask, unsigned int len_mask)
+{
+ const struct bio_vec *bvec = i->bvec;
+ size_t skip = i->iov_offset;
+ size_t size = i->count;
+
+ if (size & len_mask)
+ return false;
+ do {
+ size_t len = bvec->bv_len;
+
+ if (len > size)
+ len = size;
+ if ((unsigned long)(bvec->bv_offset + skip) & addr_mask)
+ return false;
+ bvec++;
+ size -= len;
+ skip = 0;
+ } while (size);
+
+ return true;
+}
+
+static void
+nfs_local_iter_setup(struct iov_iter *iter, int rw, struct bio_vec *bvec,
+ unsigned int nvecs, unsigned long total,
+ size_t start, size_t len)
+{
+ iov_iter_bvec(iter, rw, bvec, nvecs, total);
+ if (start)
+ iov_iter_advance(iter, start);
+ iov_iter_truncate(iter, len);
+}
+
+/*
+ * Setup as many as 3 iov_iter based on extents described by @local_dio.
+ * Returns the number of iov_iter that were setup.
+ */
+static int
+nfs_local_iters_setup_dio(struct nfs_local_kiocb *iocb, int rw,
+ unsigned int nvecs, unsigned long total,
+ struct nfs_local_dio *local_dio)
+{
+ int n_iters = 0;
+ struct iov_iter *iters = iocb->iters;
+
+ /* Setup misaligned start? */
+ if (local_dio->start_len) {
+ nfs_local_iter_setup(&iters[n_iters], rw, iocb->bvec,
+ nvecs, total, 0, local_dio->start_len);
+ ++n_iters;
+ }
+
+ /*
+ * Setup DIO-aligned middle, if there is no misaligned end (below)
+ * then AIO completion is used, see nfs_local_call_{read,write}
+ */
+ nfs_local_iter_setup(&iters[n_iters], rw, iocb->bvec, nvecs,
+ total, local_dio->start_len, local_dio->middle_len);
+
+ iocb->iter_is_dio_aligned[n_iters] =
+ nfs_iov_iter_aligned_bvec(&iters[n_iters],
+ local_dio->mem_align-1, local_dio->offset_align-1);
+
+ if (unlikely(!iocb->iter_is_dio_aligned[n_iters])) {
+ trace_nfs_local_dio_misaligned(iocb->hdr->inode,
+ local_dio->start_len, local_dio->middle_len, local_dio);
+ return 0; /* no DIO-aligned IO possible */
+ }
+ iocb->end_iter_index = n_iters;
+ ++n_iters;
+
+ /* Setup misaligned end? */
+ if (local_dio->end_len) {
+ nfs_local_iter_setup(&iters[n_iters], rw, iocb->bvec,
+ nvecs, total, local_dio->start_len +
+ local_dio->middle_len, local_dio->end_len);
+ iocb->end_iter_index = n_iters;
+ ++n_iters;
+ }
+
+ atomic_set(&iocb->n_iters, n_iters);
+ return n_iters;
+}
+
+static noinline_for_stack void
+nfs_local_iters_init(struct nfs_local_kiocb *iocb, int rw)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ struct page **pagevec = hdr->page_array.pagevec;
+ unsigned long v, total;
+ unsigned int base;
+ size_t len;
+
+ v = 0;
+ total = hdr->args.count;
+ base = hdr->args.pgbase;
+ while (total && v < hdr->page_array.npages) {
+ len = min_t(size_t, total, PAGE_SIZE - base);
+ bvec_set_page(&iocb->bvec[v], *pagevec, len, base);
+ total -= len;
+ ++pagevec;
+ ++v;
+ base = 0;
+ }
+ len = hdr->args.count - total;
+
+ /*
+ * For each iocb, iocb->n_iters is always at least 1 and we always
+ * end io after first nfs_local_pgio_done call unless misaligned DIO.
+ */
+ atomic_set(&iocb->n_iters, 1);
+
+ if (test_bit(NFS_IOHDR_ODIRECT, &hdr->flags)) {
+ struct nfs_local_dio local_dio;
+
+ if (nfs_is_local_dio_possible(iocb, rw, len, &local_dio) &&
+ nfs_local_iters_setup_dio(iocb, rw, v, len, &local_dio) != 0) {
+ /* Ensure DIO WRITE's IO on stable storage upon completion */
+ if (rw == ITER_SOURCE)
+ iocb->kiocb.ki_flags |= IOCB_DSYNC|IOCB_SYNC;
+ return; /* is DIO-aligned */
+ }
+ }
+
+ /* Use buffered IO */
+ iov_iter_bvec(&iocb->iters[0], rw, iocb->bvec, v, len);
+}
+
+static void
+nfs_local_hdr_release(struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ call_ops->rpc_call_done(&hdr->task, hdr);
+ call_ops->rpc_release(hdr);
+}
+
+static void
+nfs_local_pgio_init(struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ hdr->task.tk_ops = call_ops;
+ if (!hdr->task.tk_start)
+ hdr->task.tk_start = ktime_get();
+}
+
+static bool
+nfs_local_pgio_done(struct nfs_local_kiocb *iocb, long status, bool force)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+
+ /* Must handle partial completions */
+ if (status >= 0) {
+ hdr->res.count += status;
+ /* @hdr was initialized to 0 (zeroed during allocation) */
+ if (hdr->task.tk_status == 0)
+ hdr->res.op_status = NFS4_OK;
+ } else {
+ hdr->res.op_status = nfs_localio_errno_to_nfs4_stat(status);
+ hdr->task.tk_status = status;
+ }
+
+ if (force)
+ return true;
+
+ BUG_ON(atomic_read(&iocb->n_iters) <= 0);
+ return atomic_dec_and_test(&iocb->n_iters);
+}
+
+static void
+nfs_local_iocb_release(struct nfs_local_kiocb *iocb)
+{
+ nfs_local_file_put(iocb->localio);
+ nfs_local_iocb_free(iocb);
+}
+
+static void
+nfs_local_pgio_release(struct nfs_local_kiocb *iocb)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+
+ nfs_local_iocb_release(iocb);
+ nfs_local_hdr_release(hdr, hdr->task.tk_ops);
+}
+
+/*
+ * Complete the I/O from iocb->kiocb.ki_complete()
+ *
+ * Note that this function can be called from a bottom half context,
+ * hence we need to queue the rpc_call_done() etc to a workqueue
+ */
+static inline void nfs_local_pgio_aio_complete(struct nfs_local_kiocb *iocb)
+{
+ INIT_WORK(&iocb->work, iocb->aio_complete_work);
+ queue_work(nfsiod_workqueue, &iocb->work);
+}
+
+static void nfs_local_read_done(struct nfs_local_kiocb *iocb)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ struct file *filp = iocb->kiocb.ki_filp;
+ long status = hdr->task.tk_status;
+
+ if ((iocb->kiocb.ki_flags & IOCB_DIRECT) && status == -EINVAL) {
+ /* Underlying FS will return -EINVAL if misaligned DIO is attempted. */
+ pr_info_ratelimited("nfs: Unexpected direct I/O read alignment failure\n");
+ }
+
+ /*
+ * Must clear replen otherwise NFSv3 data corruption will occur
+ * if/when switching from LOCALIO back to using normal RPC.
+ */
+ hdr->res.replen = 0;
+
+ /* nfs_readpage_result() handles short read */
+
+ if (hdr->args.offset + hdr->res.count >= i_size_read(file_inode(filp)))
+ hdr->res.eof = true;
+
+ dprintk("%s: read %ld bytes eof %d.\n", __func__,
+ status > 0 ? status : 0, hdr->res.eof);
+}
+
+static inline void nfs_local_read_iocb_done(struct nfs_local_kiocb *iocb)
+{
+ nfs_local_read_done(iocb);
+ nfs_local_pgio_release(iocb);
+}
+
+static void nfs_local_read_aio_complete_work(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+
+ nfs_local_read_iocb_done(iocb);
+}
+
+static void nfs_local_read_aio_complete(struct kiocb *kiocb, long ret)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(kiocb, struct nfs_local_kiocb, kiocb);
+
+ /* AIO completion of DIO read should always be last to complete */
+ if (unlikely(!nfs_local_pgio_done(iocb, ret, false)))
+ return;
+
+ nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_read_aio_complete_work */
+}
+
+static void nfs_local_call_read(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+ struct file *filp = iocb->kiocb.ki_filp;
+ bool force_done = false;
+ ssize_t status;
+ int n_iters;
+
+ n_iters = atomic_read(&iocb->n_iters);
+ for (int i = 0; i < n_iters ; i++) {
+ if (iocb->iter_is_dio_aligned[i]) {
+ iocb->kiocb.ki_flags |= IOCB_DIRECT;
+ /* Only use AIO completion if DIO-aligned segment is last */
+ if (i == iocb->end_iter_index) {
+ iocb->kiocb.ki_complete = nfs_local_read_aio_complete;
+ iocb->aio_complete_work = nfs_local_read_aio_complete_work;
+ }
+ } else
+ iocb->kiocb.ki_flags &= ~IOCB_DIRECT;
+
+ scoped_with_creds(filp->f_cred)
+ status = filp->f_op->read_iter(&iocb->kiocb, &iocb->iters[i]);
+
+ if (status != -EIOCBQUEUED) {
+ if (unlikely(status >= 0 && status < iocb->iters[i].count))
+ force_done = true; /* Partial read */
+ if (nfs_local_pgio_done(iocb, status, force_done)) {
+ nfs_local_read_iocb_done(iocb);
+ break;
+ }
+ }
+ }
+}
+
+static int
+nfs_local_do_read(struct nfs_local_kiocb *iocb,
+ const struct rpc_call_ops *call_ops)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+
+ dprintk("%s: vfs_read count=%u pos=%llu\n",
+ __func__, hdr->args.count, hdr->args.offset);
+
+ nfs_local_pgio_init(hdr, call_ops);
+ hdr->res.eof = false;
+
+ INIT_WORK(&iocb->work, nfs_local_call_read);
+ queue_work(nfslocaliod_workqueue, &iocb->work);
+
+ return 0;
+}
+
+static void
+nfs_copy_boot_verifier(struct nfs_write_verifier *verifier, struct inode *inode)
+{
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ u32 *verf = (u32 *)verifier->data;
+ unsigned int seq;
+
+ do {
+ seq = read_seqbegin(&clp->cl_boot_lock);
+ verf[0] = (u32)clp->cl_nfssvc_boot.tv_sec;
+ verf[1] = (u32)clp->cl_nfssvc_boot.tv_nsec;
+ } while (read_seqretry(&clp->cl_boot_lock, seq));
+}
+
+static void
+nfs_reset_boot_verifier(struct inode *inode)
+{
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+
+ write_seqlock(&clp->cl_boot_lock);
+ ktime_get_real_ts64(&clp->cl_nfssvc_boot);
+ write_sequnlock(&clp->cl_boot_lock);
+}
+
+static void
+nfs_set_local_verifier(struct inode *inode,
+ struct nfs_writeverf *verf,
+ enum nfs3_stable_how how)
+{
+ nfs_copy_boot_verifier(&verf->verifier, inode);
+ verf->committed = how;
+}
+
+/* Factored out from fs/nfsd/vfs.h:fh_getattr() */
+static int __vfs_getattr(const struct path *p, struct kstat *stat, int version)
+{
+ u32 request_mask = STATX_BASIC_STATS;
+
+ if (version == 4)
+ request_mask |= (STATX_BTIME | STATX_CHANGE_COOKIE);
+ return vfs_getattr(p, stat, request_mask, AT_STATX_SYNC_AS_STAT);
+}
+
+/* Copied from fs/nfsd/nfsfh.c:nfsd4_change_attribute() */
+static u64 __nfsd4_change_attribute(const struct kstat *stat,
+ const struct inode *inode)
+{
+ u64 chattr;
+
+ if (stat->result_mask & STATX_CHANGE_COOKIE) {
+ chattr = stat->change_cookie;
+ if (S_ISREG(inode->i_mode) &&
+ !(stat->attributes & STATX_ATTR_CHANGE_MONOTONIC)) {
+ chattr += (u64)stat->ctime.tv_sec << 30;
+ chattr += stat->ctime.tv_nsec;
+ }
+ } else {
+ chattr = time_to_chattr(&stat->ctime);
+ }
+ return chattr;
+}
+
+static void nfs_local_vfs_getattr(struct nfs_local_kiocb *iocb)
+{
+ struct kstat stat;
+ struct file *filp = iocb->kiocb.ki_filp;
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ struct nfs_fattr *fattr = hdr->res.fattr;
+ int version = NFS_PROTO(hdr->inode)->version;
+
+ if (unlikely(!fattr) || __vfs_getattr(&filp->f_path, &stat, version))
+ return;
+
+ fattr->valid = (NFS_ATTR_FATTR_FILEID |
+ NFS_ATTR_FATTR_CHANGE |
+ NFS_ATTR_FATTR_SIZE |
+ NFS_ATTR_FATTR_ATIME |
+ NFS_ATTR_FATTR_MTIME |
+ NFS_ATTR_FATTR_CTIME |
+ NFS_ATTR_FATTR_SPACE_USED);
+
+ fattr->fileid = stat.ino;
+ fattr->size = stat.size;
+ fattr->atime = stat.atime;
+ fattr->mtime = stat.mtime;
+ fattr->ctime = stat.ctime;
+ if (version == 4) {
+ fattr->change_attr =
+ __nfsd4_change_attribute(&stat, file_inode(filp));
+ } else
+ fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
+ fattr->du.nfs3.used = stat.blocks << 9;
+}
+
+static void nfs_local_write_done(struct nfs_local_kiocb *iocb)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ long status = hdr->task.tk_status;
+
+ dprintk("%s: wrote %ld bytes.\n", __func__, status > 0 ? status : 0);
+
+ if ((iocb->kiocb.ki_flags & IOCB_DIRECT) && status == -EINVAL) {
+ /* Underlying FS will return -EINVAL if misaligned DIO is attempted. */
+ pr_info_ratelimited("nfs: Unexpected direct I/O write alignment failure\n");
+ }
+
+ /* Handle short writes as if they are ENOSPC */
+ status = hdr->res.count;
+ if (status > 0 && status < hdr->args.count) {
+ hdr->mds_offset += status;
+ hdr->args.offset += status;
+ hdr->args.pgbase += status;
+ hdr->args.count -= status;
+ nfs_set_pgio_error(hdr, -ENOSPC, hdr->args.offset);
+ status = -ENOSPC;
+ /* record -ENOSPC in terms of nfs_local_pgio_done */
+ (void) nfs_local_pgio_done(iocb, status, true);
+ }
+ if (hdr->task.tk_status < 0)
+ nfs_reset_boot_verifier(hdr->inode);
+}
+
+static inline void nfs_local_write_iocb_done(struct nfs_local_kiocb *iocb)
+{
+ nfs_local_write_done(iocb);
+ nfs_local_vfs_getattr(iocb);
+ nfs_local_pgio_release(iocb);
+}
+
+static void nfs_local_write_aio_complete_work(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+
+ nfs_local_write_iocb_done(iocb);
+}
+
+static void nfs_local_write_aio_complete(struct kiocb *kiocb, long ret)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(kiocb, struct nfs_local_kiocb, kiocb);
+
+ /* AIO completion of DIO write should always be last to complete */
+ if (unlikely(!nfs_local_pgio_done(iocb, ret, false)))
+ return;
+
+ nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_write_aio_complete_work */
+}
+
+static void nfs_local_call_write(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+ struct file *filp = iocb->kiocb.ki_filp;
+ unsigned long old_flags = current->flags;
+ bool force_done = false;
+ ssize_t status;
+ int n_iters;
+
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
+
+ file_start_write(filp);
+ n_iters = atomic_read(&iocb->n_iters);
+ for (int i = 0; i < n_iters ; i++) {
+ if (iocb->iter_is_dio_aligned[i]) {
+ iocb->kiocb.ki_flags |= IOCB_DIRECT;
+ /* Only use AIO completion if DIO-aligned segment is last */
+ if (i == iocb->end_iter_index) {
+ iocb->kiocb.ki_complete = nfs_local_write_aio_complete;
+ iocb->aio_complete_work = nfs_local_write_aio_complete_work;
+ }
+ } else
+ iocb->kiocb.ki_flags &= ~IOCB_DIRECT;
+
+ scoped_with_creds(filp->f_cred)
+ status = filp->f_op->write_iter(&iocb->kiocb, &iocb->iters[i]);
+
+ if (status != -EIOCBQUEUED) {
+ if (unlikely(status >= 0 && status < iocb->iters[i].count))
+ force_done = true; /* Partial write */
+ if (nfs_local_pgio_done(iocb, status, force_done)) {
+ nfs_local_write_iocb_done(iocb);
+ break;
+ }
+ }
+ }
+ file_end_write(filp);
+
+ current->flags = old_flags;
+}
+
+static int
+nfs_local_do_write(struct nfs_local_kiocb *iocb,
+ const struct rpc_call_ops *call_ops)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+
+ dprintk("%s: vfs_write count=%u pos=%llu %s\n",
+ __func__, hdr->args.count, hdr->args.offset,
+ (hdr->args.stable == NFS_UNSTABLE) ? "unstable" : "stable");
+
+ switch (hdr->args.stable) {
+ default:
+ break;
+ case NFS_DATA_SYNC:
+ iocb->kiocb.ki_flags |= IOCB_DSYNC;
+ break;
+ case NFS_FILE_SYNC:
+ iocb->kiocb.ki_flags |= IOCB_DSYNC|IOCB_SYNC;
+ }
+
+ nfs_local_pgio_init(hdr, call_ops);
+
+ nfs_set_local_verifier(hdr->inode, hdr->res.verf, hdr->args.stable);
+
+ INIT_WORK(&iocb->work, nfs_local_call_write);
+ queue_work(nfslocaliod_workqueue, &iocb->work);
+
+ return 0;
+}
+
+static struct nfs_local_kiocb *
+nfs_local_iocb_init(struct nfs_pgio_header *hdr, struct nfsd_file *localio)
+{
+ struct file *file = nfs_to->nfsd_file_file(localio);
+ struct nfs_local_kiocb *iocb;
+ gfp_t gfp_mask;
+ int rw;
+
+ if (hdr->rw_mode & FMODE_READ) {
+ if (!file->f_op->read_iter)
+ return ERR_PTR(-EOPNOTSUPP);
+ gfp_mask = GFP_KERNEL;
+ rw = ITER_DEST;
+ } else {
+ if (!file->f_op->write_iter)
+ return ERR_PTR(-EOPNOTSUPP);
+ gfp_mask = GFP_NOIO;
+ rw = ITER_SOURCE;
+ }
+
+ iocb = nfs_local_iocb_alloc(hdr, file, gfp_mask);
+ if (iocb == NULL)
+ return ERR_PTR(-ENOMEM);
+ iocb->hdr = hdr;
+ iocb->localio = localio;
+
+ nfs_local_iters_init(iocb, rw);
+
+ return iocb;
+}
+
+int nfs_local_doio(struct nfs_client *clp, struct nfsd_file *localio,
+ struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ struct nfs_local_kiocb *iocb;
+ int status = 0;
+
+ if (!hdr->args.count)
+ return 0;
+
+ iocb = nfs_local_iocb_init(hdr, localio);
+ if (IS_ERR(iocb))
+ return PTR_ERR(iocb);
+
+ switch (hdr->rw_mode) {
+ case FMODE_READ:
+ status = nfs_local_do_read(iocb, call_ops);
+ break;
+ case FMODE_WRITE:
+ status = nfs_local_do_write(iocb, call_ops);
+ break;
+ default:
+ dprintk("%s: invalid mode: %d\n", __func__,
+ hdr->rw_mode);
+ status = -EOPNOTSUPP;
+ }
+
+ if (status != 0) {
+ if (status == -EAGAIN)
+ nfs_localio_disable_client(clp);
+ nfs_local_iocb_release(iocb);
+ hdr->task.tk_status = status;
+ nfs_local_hdr_release(hdr, call_ops);
+ }
+ return status;
+}
+
+static void
+nfs_local_init_commit(struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops)
+{
+ data->task.tk_ops = call_ops;
+}
+
+static int
+nfs_local_run_commit(struct file *filp, struct nfs_commit_data *data)
+{
+ loff_t start = data->args.offset;
+ loff_t end = LLONG_MAX;
+
+ if (data->args.count > 0) {
+ end = start + data->args.count - 1;
+ if (end < start)
+ end = LLONG_MAX;
+ }
+
+ dprintk("%s: commit %llu - %llu\n", __func__, start, end);
+ return vfs_fsync_range(filp, start, end, 0);
+}
+
+static void
+nfs_local_commit_done(struct nfs_commit_data *data, int status)
+{
+ if (status >= 0) {
+ nfs_set_local_verifier(data->inode,
+ data->res.verf,
+ NFS_FILE_SYNC);
+ data->res.op_status = NFS4_OK;
+ data->task.tk_status = 0;
+ } else {
+ nfs_reset_boot_verifier(data->inode);
+ data->res.op_status = nfs_localio_errno_to_nfs4_stat(status);
+ data->task.tk_status = status;
+ }
+}
+
+static void
+nfs_local_release_commit_data(struct nfsd_file *localio,
+ struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops)
+{
+ nfs_local_file_put(localio);
+ call_ops->rpc_call_done(&data->task, data);
+ call_ops->rpc_release(data);
+}
+
+static void
+nfs_local_fsync_ctx_free(struct nfs_local_fsync_ctx *ctx)
+{
+ nfs_local_release_commit_data(ctx->localio, ctx->data,
+ ctx->data->task.tk_ops);
+ kfree(ctx);
+}
+
+static void
+nfs_local_fsync_work(struct work_struct *work)
+{
+ struct nfs_local_fsync_ctx *ctx;
+ int status;
+
+ ctx = container_of(work, struct nfs_local_fsync_ctx, work);
+
+ status = nfs_local_run_commit(nfs_to->nfsd_file_file(ctx->localio),
+ ctx->data);
+ nfs_local_commit_done(ctx->data, status);
+ if (ctx->done != NULL)
+ complete(ctx->done);
+ nfs_local_fsync_ctx_free(ctx);
+}
+
+static struct nfs_local_fsync_ctx *
+nfs_local_fsync_ctx_alloc(struct nfs_commit_data *data,
+ struct nfsd_file *localio, gfp_t flags)
+{
+ struct nfs_local_fsync_ctx *ctx = kmalloc(sizeof(*ctx), flags);
+
+ if (ctx != NULL) {
+ ctx->localio = localio;
+ ctx->data = data;
+ INIT_WORK(&ctx->work, nfs_local_fsync_work);
+ ctx->done = NULL;
+ }
+ return ctx;
+}
+
+int nfs_local_commit(struct nfsd_file *localio,
+ struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops, int how)
+{
+ struct nfs_local_fsync_ctx *ctx;
+
+ ctx = nfs_local_fsync_ctx_alloc(data, localio, GFP_KERNEL);
+ if (!ctx) {
+ nfs_local_commit_done(data, -ENOMEM);
+ nfs_local_release_commit_data(localio, data, call_ops);
+ return -ENOMEM;
+ }
+
+ nfs_local_init_commit(data, call_ops);
+
+ if (how & FLUSH_SYNC) {
+ DECLARE_COMPLETION_ONSTACK(done);
+ ctx->done = &done;
+ queue_work(nfsiod_workqueue, &ctx->work);
+ wait_for_completion(&done);
+ } else
+ queue_work(nfsiod_workqueue, &ctx->work);
+
+ return 0;
+}
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index c5e3b6b3366a..db8dfb920394 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -128,11 +128,6 @@ struct mountres {
rpc_authflavor_t *auth_flavors;
};
-struct mnt_fhstatus {
- u32 status;
- struct nfs_fh *fh;
-};
-
/**
* nfs_mount - Obtain an NFS file handle for the given host and path
* @info: pointer to mount request arguments
@@ -158,7 +153,7 @@ int nfs_mount(struct nfs_mount_request *info, int timeo, int retrans)
struct rpc_create_args args = {
.net = info->net,
.protocol = info->protocol,
- .address = info->sap,
+ .address = (struct sockaddr *)info->sap,
.addrsize = info->salen,
.timeout = &mnt_timeout,
.servername = info->hostname,
@@ -228,74 +223,6 @@ out_mnt_err:
goto out;
}
-/**
- * nfs_umount - Notify a server that we have unmounted this export
- * @info: pointer to umount request arguments
- *
- * MOUNTPROC_UMNT is advisory, so we set a short timeout, and always
- * use UDP.
- */
-void nfs_umount(const struct nfs_mount_request *info)
-{
- static const struct rpc_timeout nfs_umnt_timeout = {
- .to_initval = 1 * HZ,
- .to_maxval = 3 * HZ,
- .to_retries = 2,
- };
- struct rpc_create_args args = {
- .net = info->net,
- .protocol = IPPROTO_UDP,
- .address = info->sap,
- .addrsize = info->salen,
- .timeout = &nfs_umnt_timeout,
- .servername = info->hostname,
- .program = &mnt_program,
- .version = info->version,
- .authflavor = RPC_AUTH_UNIX,
- .flags = RPC_CLNT_CREATE_NOPING,
- .cred = current_cred(),
- };
- struct rpc_message msg = {
- .rpc_argp = info->dirpath,
- };
- struct rpc_clnt *clnt;
- int status;
-
- if (strlen(info->dirpath) > MNTPATHLEN)
- return;
-
- if (info->noresvport)
- args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
-
- clnt = rpc_create(&args);
- if (IS_ERR(clnt))
- goto out_clnt_err;
-
- dprintk("NFS: sending UMNT request for %s:%s\n",
- (info->hostname ? info->hostname : "server"), info->dirpath);
-
- if (info->version == NFS_MNT3_VERSION)
- msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC3_UMNT];
- else
- msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC_UMNT];
-
- status = rpc_call_sync(clnt, &msg, 0);
- rpc_shutdown_client(clnt);
-
- if (unlikely(status < 0))
- goto out_call_err;
-
- return;
-
-out_clnt_err:
- dprintk("NFS: failed to create UMNT RPC client, status=%ld\n",
- PTR_ERR(clnt));
- return;
-
-out_call_err:
- dprintk("NFS: UMNT request failed, status=%d\n", status);
-}
-
/*
* XDR encode/decode functions for MOUNT
*/
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 3295af4110f1..5a4d193da1a9 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -147,7 +147,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
struct nfs_fs_context *ctx;
struct fs_context *fc;
struct vfsmount *mnt = ERR_PTR(-ENOMEM);
- struct nfs_server *server = NFS_SERVER(d_inode(path->dentry));
+ struct nfs_server *server = NFS_SB(path->dentry->d_sb);
struct nfs_client *client = server->nfs_client;
int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout);
int ret;
@@ -175,14 +175,14 @@ struct vfsmount *nfs_d_automount(struct path *path)
}
/* for submounts we want the same server; referrals will reassign */
- memcpy(&ctx->nfs_server.address, &client->cl_addr, client->cl_addrlen);
+ memcpy(&ctx->nfs_server._address, &client->cl_addr, client->cl_addrlen);
ctx->nfs_server.addrlen = client->cl_addrlen;
ctx->nfs_server.port = server->port;
ctx->version = client->rpc_ops->version;
ctx->minorversion = client->cl_minorversion;
ctx->nfs_mod = client->cl_nfs_mod;
- __module_get(ctx->nfs_mod->owner);
+ get_nfs_version(ctx->nfs_mod);
ret = client->rpc_ops->submount(fc, server);
if (ret < 0) {
@@ -195,7 +195,6 @@ struct vfsmount *nfs_d_automount(struct path *path)
if (IS_ERR(mnt))
goto out_fc;
- mntget(mnt); /* prevent immediate expiration */
if (timeout <= 0)
goto out_fc;
@@ -208,23 +207,24 @@ out_fc:
}
static int
-nfs_namespace_getattr(struct user_namespace *mnt_userns,
+nfs_namespace_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
if (NFS_FH(d_inode(path->dentry))->size != 0)
- return nfs_getattr(mnt_userns, path, stat, request_mask,
+ return nfs_getattr(idmap, path, stat, request_mask,
query_flags);
- generic_fillattr(&init_user_ns, d_inode(path->dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(path->dentry),
+ stat);
return 0;
}
static int
-nfs_namespace_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+nfs_namespace_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
if (NFS_FH(d_inode(dentry))->size != 0)
- return nfs_setattr(mnt_userns, dentry, attr);
+ return nfs_setattr(idmap, dentry, attr);
return -EACCES;
}
@@ -290,7 +290,8 @@ int nfs_do_submount(struct fs_context *fc)
nfs_errorf(fc, "NFS: Couldn't determine submount pathname");
ret = PTR_ERR(p);
} else {
- ret = vfs_parse_fs_string(fc, "source", p, buffer + 4096 - p);
+ ret = vfs_parse_fs_qstr(fc, "source",
+ &QSTR_LEN(p, buffer + 4096 - p));
if (!ret)
ret = vfs_get_tree(fc);
}
@@ -307,7 +308,7 @@ int nfs_submount(struct fs_context *fc, struct nfs_server *server)
int err;
/* Look it up again to get its attributes */
- err = server->nfs_client->rpc_ops->lookup(d_inode(parent), dentry,
+ err = server->nfs_client->rpc_ops->lookup(d_inode(parent), dentry, &dentry->d_name,
ctx->mntfh, ctx->clone_data.fattr);
dput(parent);
if (err != 0)
@@ -335,7 +336,7 @@ static int param_set_nfs_timeout(const char *val, const struct kernel_param *kp)
num *= HZ;
*((int *)kp->arg) = num;
if (!list_empty(&nfs_automount_list))
- mod_delayed_work(system_wq, &nfs_automount_task, num);
+ mod_delayed_work(system_percpu_wq, &nfs_automount_task, num);
} else {
*((int *)kp->arg) = -1*HZ;
cancel_delayed_work(&nfs_automount_task);
@@ -354,7 +355,7 @@ static int param_get_nfs_timeout(char *buffer, const struct kernel_param *kp)
num = (num + (HZ - 1)) / HZ;
} else
num = -1;
- return scnprintf(buffer, PAGE_SIZE, "%li\n", num);
+ return sysfs_emit(buffer, "%li\n", num);
}
static const struct kernel_param_ops param_ops_nfs_timeout = {
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
index c8374f74dce1..6ba3ea39e928 100644
--- a/fs/nfs/netns.h
+++ b/fs/nfs/netns.h
@@ -9,6 +9,7 @@
#include <linux/nfs4.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <linux/sunrpc/stats.h>
struct bl_dev_msg {
int32_t status;
@@ -30,10 +31,15 @@ struct nfs_net {
unsigned short nfs_callback_tcpport;
unsigned short nfs_callback_tcpport6;
int cb_users[NFS4_MAX_MINOR_VERSION + 1];
-#endif
+#endif /* CONFIG_NFS_V4 */
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+ struct list_head nfs4_data_server_cache;
+ spinlock_t nfs4_data_server_lock;
+#endif /* CONFIG_NFS_V4_1 */
struct nfs_netns_client *nfs_client;
spinlock_t nfs_client_lock;
ktime_t boot_time;
+ struct rpc_stat rpcstats;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_nfsfs;
#endif
diff --git a/fs/nfs/nfs.h b/fs/nfs/nfs.h
index 5ba00610aede..8a5f51be013a 100644
--- a/fs/nfs/nfs.h
+++ b/fs/nfs/nfs.h
@@ -18,11 +18,11 @@ struct nfs_subversion {
const struct rpc_version *rpc_vers; /* NFS version information */
const struct nfs_rpc_ops *rpc_ops; /* NFS operations */
const struct super_operations *sops; /* NFS Super operations */
- const struct xattr_handler **xattr; /* NFS xattr handlers */
- struct list_head list; /* List of NFS versions */
+ const struct xattr_handler * const *xattr; /* NFS xattr handlers */
};
-struct nfs_subversion *get_nfs_version(unsigned int);
+struct nfs_subversion *find_nfs_version(unsigned int);
+int get_nfs_version(struct nfs_subversion *);
void put_nfs_version(struct nfs_subversion *);
void register_nfs_version(struct nfs_subversion *);
void unregister_nfs_version(struct nfs_subversion *);
diff --git a/fs/nfs/nfs2super.c b/fs/nfs/nfs2super.c
index 467f21ee6a35..b1badc70bd71 100644
--- a/fs/nfs/nfs2super.c
+++ b/fs/nfs/nfs2super.c
@@ -26,6 +26,7 @@ static void __exit exit_nfs_v2(void)
unregister_nfs_version(&nfs_v2);
}
+MODULE_DESCRIPTION("NFSv2 client support");
MODULE_LICENSE("GPL");
module_init(init_nfs_v2);
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 7fba7711e6b3..9eff09158518 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -22,14 +22,12 @@
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs_fs.h>
-#include "nfstrace.h"
+#include <linux/nfs_common.h>
#include "internal.h"
+#include "nfstrace.h"
#define NFSDBG_FACILITY NFSDBG_XDR
-/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
-
/*
* Declare the space requirements for NFS arguments and replies as
* number of 32bit-words
@@ -64,8 +62,6 @@
#define NFS_readdirres_sz (1+NFS_pagepad_sz)
#define NFS_statfsres_sz (1+NFS_info_sz)
-static int nfs_stat_to_errno(enum nfs_stat);
-
/*
* Encode/decode NFSv2 basic data types
*
@@ -949,13 +945,12 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
error = decode_filename_inline(xdr, &entry->name, &entry->len);
if (unlikely(error))
- return error;
+ return error == -ENAMETOOLONG ? -ENAMETOOLONG : -EAGAIN;
/*
* The type (size and byte order) of nfscookie isn't defined in
* RFC 1094. This implementation assumes that it's an XDR uint32.
*/
- entry->prev_cookie = entry->cookie;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
return -EAGAIN;
@@ -1055,70 +1050,6 @@ out_default:
return nfs_stat_to_errno(status);
}
-
-/*
- * We need to translate between nfs status return values and
- * the local errno values which may not be the same.
- */
-static const struct {
- int stat;
- int errno;
-} nfs_errtbl[] = {
- { NFS_OK, 0 },
- { NFSERR_PERM, -EPERM },
- { NFSERR_NOENT, -ENOENT },
- { NFSERR_IO, -errno_NFSERR_IO},
- { NFSERR_NXIO, -ENXIO },
-/* { NFSERR_EAGAIN, -EAGAIN }, */
- { NFSERR_ACCES, -EACCES },
- { NFSERR_EXIST, -EEXIST },
- { NFSERR_XDEV, -EXDEV },
- { NFSERR_NODEV, -ENODEV },
- { NFSERR_NOTDIR, -ENOTDIR },
- { NFSERR_ISDIR, -EISDIR },
- { NFSERR_INVAL, -EINVAL },
- { NFSERR_FBIG, -EFBIG },
- { NFSERR_NOSPC, -ENOSPC },
- { NFSERR_ROFS, -EROFS },
- { NFSERR_MLINK, -EMLINK },
- { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
- { NFSERR_NOTEMPTY, -ENOTEMPTY },
- { NFSERR_DQUOT, -EDQUOT },
- { NFSERR_STALE, -ESTALE },
- { NFSERR_REMOTE, -EREMOTE },
-#ifdef EWFLUSH
- { NFSERR_WFLUSH, -EWFLUSH },
-#endif
- { NFSERR_BADHANDLE, -EBADHANDLE },
- { NFSERR_NOT_SYNC, -ENOTSYNC },
- { NFSERR_BAD_COOKIE, -EBADCOOKIE },
- { NFSERR_NOTSUPP, -ENOTSUPP },
- { NFSERR_TOOSMALL, -ETOOSMALL },
- { NFSERR_SERVERFAULT, -EREMOTEIO },
- { NFSERR_BADTYPE, -EBADTYPE },
- { NFSERR_JUKEBOX, -EJUKEBOX },
- { -1, -EIO }
-};
-
-/**
- * nfs_stat_to_errno - convert an NFS status code to a local errno
- * @status: NFS status code to convert
- *
- * Returns a local errno value, or -EIO if the NFS status code is
- * not recognized. This function is used jointly by NFSv2 and NFSv3.
- */
-static int nfs_stat_to_errno(enum nfs_stat status)
-{
- int i;
-
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == (int)status)
- return nfs_errtbl[i].errno;
- }
- dprintk("NFS: Unrecognized nfs status value: %u\n", status);
- return nfs_errtbl[i].errno;
-}
-
#define PROC(proc, argtype, restype, timer) \
[NFSPROC_##proc] = { \
.p_proc = NFSPROC_##proc, \
diff --git a/fs/nfs/nfs3_fs.h b/fs/nfs/nfs3_fs.h
index 03a4e679fd99..b333ea119ef5 100644
--- a/fs/nfs/nfs3_fs.h
+++ b/fs/nfs/nfs3_fs.h
@@ -12,12 +12,11 @@
*/
#ifdef CONFIG_NFS_V3_ACL
extern struct posix_acl *nfs3_get_acl(struct inode *inode, int type, bool rcu);
-extern int nfs3_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+extern int nfs3_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type);
extern int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
struct posix_acl *dfacl);
extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t);
-extern const struct xattr_handler *nfs3_xattr_handlers[];
#else
static inline int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
struct posix_acl *dfacl)
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 93de0b58647a..a126eb31f62f 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -21,9 +21,8 @@ static void nfs3_prepare_get_acl(struct posix_acl **p)
{
struct posix_acl *sentinel = uncached_acl_sentinel(current);
- if (cmpxchg(p, ACL_NOT_CACHED, sentinel) != ACL_NOT_CACHED) {
- /* Not the first reader or sentinel already in place. */
- }
+ /* If the ACL isn't being read yet, set our sentinel. */
+ cmpxchg(p, ACL_NOT_CACHED, sentinel);
}
static void nfs3_complete_get_acl(struct posix_acl **p, struct posix_acl *acl)
@@ -105,7 +104,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type, bool rcu)
switch (status) {
case 0:
- status = nfs_refresh_inode(inode, res.fattr);
+ nfs_refresh_inode(inode, res.fattr);
break;
case -EPFNOSUPPORT:
case -EPROTONOSUPPORT:
@@ -255,23 +254,24 @@ int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
}
-int nfs3_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+int nfs3_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
struct posix_acl *orig = acl, *dfacl = NULL, *alloc;
+ struct inode *inode = d_inode(dentry);
int status;
if (S_ISDIR(inode->i_mode)) {
switch(type) {
case ACL_TYPE_ACCESS:
- alloc = get_acl(inode, ACL_TYPE_DEFAULT);
+ alloc = get_inode_acl(inode, ACL_TYPE_DEFAULT);
if (IS_ERR(alloc))
goto fail;
dfacl = alloc;
break;
case ACL_TYPE_DEFAULT:
- alloc = get_acl(inode, ACL_TYPE_ACCESS);
+ alloc = get_inode_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(alloc))
goto fail;
dfacl = acl;
@@ -299,12 +299,6 @@ fail:
goto out;
}
-const struct xattr_handler *nfs3_xattr_handlers[] = {
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
- NULL,
-};
-
static int
nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
size_t size, ssize_t *result)
@@ -312,7 +306,7 @@ nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
struct posix_acl *acl;
char *p = data + *result;
- acl = get_acl(inode, type);
+ acl = get_inode_acl(inode, type);
if (IS_ERR_OR_NULL(acl))
return 0;
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index 5601e47360c2..5d97c1d38bb6 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -2,8 +2,11 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/sunrpc/addr.h>
+#include <net/handshake.h>
#include "internal.h"
#include "nfs3_fs.h"
+#include "netns.h"
+#include "sysfs.h"
#ifdef CONFIG_NFS_V3_ACL
static struct rpc_stat nfsacl_rpcstat = { &nfsacl_program };
@@ -31,6 +34,8 @@ static void nfs_init_server_aclclient(struct nfs_server *server)
if (IS_ERR(server->client_acl))
goto out_noacl;
+ nfs_sysfs_link_rpc_client(server, server->client_acl, NULL);
+
/* No errors! Assume that Sun nfsacls are supported */
server->caps |= NFS_CAP_ACLS;
return;
@@ -78,10 +83,11 @@ struct nfs_server *nfs3_clone_server(struct nfs_server *source,
* the MDS.
*/
struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
- const struct sockaddr *ds_addr, int ds_addrlen,
+ const struct sockaddr_storage *ds_addr, int ds_addrlen,
int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
{
struct rpc_timeout ds_timeout;
+ unsigned long connect_timeout = ds_timeo * (ds_retrans + 1) * HZ / 10;
struct nfs_client *mds_clp = mds_srv->nfs_client;
struct nfs_client_initdata cl_init = {
.addr = ds_addr,
@@ -93,22 +99,40 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
.net = mds_clp->cl_net,
.timeparms = &ds_timeout,
.cred = mds_srv->cred,
+ .xprtsec = {
+ .policy = RPC_XPRTSEC_NONE,
+ .cert_serial = TLS_NO_CERT,
+ .privkey_serial = TLS_NO_PRIVKEY,
+ },
+ .connect_timeout = connect_timeout,
+ .reconnect_timeout = connect_timeout,
};
struct nfs_client *clp;
char buf[INET6_ADDRSTRLEN + 1];
/* fake a hostname because lockd wants it */
- if (rpc_ntop(ds_addr, buf, sizeof(buf)) <= 0)
+ if (rpc_ntop((struct sockaddr *)ds_addr, buf, sizeof(buf)) <= 0)
return ERR_PTR(-EINVAL);
cl_init.hostname = buf;
- if (mds_clp->cl_nconnect > 1 && ds_proto == XPRT_TRANSPORT_TCP)
- cl_init.nconnect = mds_clp->cl_nconnect;
+ switch (ds_proto) {
+ case XPRT_TRANSPORT_TCP_TLS:
+ if (mds_clp->cl_xprtsec.policy != RPC_XPRTSEC_NONE)
+ cl_init.xprtsec = mds_clp->cl_xprtsec;
+ else
+ ds_proto = XPRT_TRANSPORT_TCP;
+ fallthrough;
+ case XPRT_TRANSPORT_RDMA:
+ case XPRT_TRANSPORT_TCP:
+ if (mds_clp->cl_nconnect > 1)
+ cl_init.nconnect = mds_clp->cl_nconnect;
+ }
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
- __set_bit(NFS_CS_NOPING, &cl_init.init_flags);
__set_bit(NFS_CS_DS, &cl_init.init_flags);
/* Use the MDS nfs_client cl_ipaddr. */
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 7100514d306b..a4cb67573aa7 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -36,9 +36,10 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
res = rpc_call_sync(clnt, msg, flags);
if (res != -EJUKEBOX)
break;
- freezable_schedule_timeout_killable_unsafe(NFS_JUKEBOX_RETRY_TIME);
+ __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
+ schedule_timeout(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
- } while (!fatal_signal_pending(current));
+ } while (!fatal_signal_pending(current) && !nfs_current_task_exiting());
return res;
}
@@ -191,7 +192,7 @@ __nfs3_proc_lookup(struct inode *dir, const char *name, size_t len,
}
static int
-nfs3_proc_lookup(struct inode *dir, struct dentry *dentry,
+nfs3_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
unsigned short task_flags = 0;
@@ -201,8 +202,7 @@ nfs3_proc_lookup(struct inode *dir, struct dentry *dentry,
task_flags |= RPC_TASK_TIMEOUT;
dprintk("NFS call lookup %pd2\n", dentry);
- return __nfs3_proc_lookup(dir, dentry->d_name.name,
- dentry->d_name.len, fhandle, fattr,
+ return __nfs3_proc_lookup(dir, name->name, name->len, fhandle, fattr,
task_flags);
}
@@ -220,7 +220,8 @@ static int nfs3_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
task_flags);
}
-static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry,
+ const struct cred *cred)
{
struct nfs3_accessargs arg = {
.fh = NFS_FH(inode),
@@ -231,7 +232,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
.rpc_proc = &nfs3_procedures[NFS3PROC_ACCESS],
.rpc_argp = &arg,
.rpc_resp = &res,
- .rpc_cred = entry->cred,
+ .rpc_cred = cred,
};
int status = -ENOMEM;
@@ -541,9 +542,10 @@ out:
}
static int
-nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
+nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct folio *folio,
unsigned int len, struct iattr *sattr)
{
+ struct page *page = &folio->page;
struct nfs3_createdata *data;
struct dentry *d_alias;
int status = -ENOMEM;
@@ -576,13 +578,13 @@ out:
return status;
}
-static int
+static struct dentry *
nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
struct posix_acl *default_acl, *acl;
struct nfs3_createdata *data;
- struct dentry *d_alias;
- int status = -ENOMEM;
+ struct dentry *ret = ERR_PTR(-ENOMEM);
+ int status;
dprintk("NFS call mkdir %pd\n", dentry);
@@ -590,8 +592,9 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
if (data == NULL)
goto out;
- status = posix_acl_create(dir, &sattr->ia_mode, &default_acl, &acl);
- if (status)
+ ret = ERR_PTR(posix_acl_create(dir, &sattr->ia_mode,
+ &default_acl, &acl));
+ if (IS_ERR(ret))
goto out;
data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_MKDIR];
@@ -600,25 +603,27 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
data->arg.mkdir.len = dentry->d_name.len;
data->arg.mkdir.sattr = sattr;
- d_alias = nfs3_do_create(dir, dentry, data);
- status = PTR_ERR_OR_ZERO(d_alias);
+ ret = nfs3_do_create(dir, dentry, data);
- if (status != 0)
+ if (IS_ERR(ret))
goto out_release_acls;
- if (d_alias)
- dentry = d_alias;
+ if (ret)
+ dentry = ret;
status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
+ if (status) {
+ dput(ret);
+ ret = ERR_PTR(status);
+ }
- dput(d_alias);
out_release_acls:
posix_acl_release(acl);
posix_acl_release(default_acl);
out:
nfs3_free_createdata(data);
- dprintk("NFS reply mkdir: %d\n", status);
- return status;
+ dprintk("NFS reply mkdir: %d\n", PTR_ERR_OR_ZERO(ret));
+ return ret;
}
static int
@@ -841,6 +846,41 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
return status;
}
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+static unsigned nfs3_localio_probe_throttle __read_mostly = 0;
+module_param(nfs3_localio_probe_throttle, uint, 0644);
+MODULE_PARM_DESC(nfs3_localio_probe_throttle,
+ "Probe for NFSv3 LOCALIO every N IO requests. Must be power-of-2, defaults to 0 (probing disabled).");
+
+static void nfs3_localio_probe(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+
+ /* Throttled to reduce nfs_local_probe_async() frequency */
+ if (!nfs3_localio_probe_throttle || nfs_server_is_local(clp))
+ return;
+
+ /*
+ * Try (re)enabling LOCALIO if isn't enabled -- admin deems
+ * it worthwhile to periodically check if LOCALIO possible by
+ * setting the 'nfs3_localio_probe_throttle' module parameter.
+ *
+ * This is useful if LOCALIO was previously enabled, but was
+ * disabled due to server restart, and IO has successfully
+ * completed in terms of normal RPC.
+ */
+ if ((clp->cl_uuid.nfs3_localio_probe_count++ &
+ (nfs3_localio_probe_throttle - 1)) == 0) {
+ if (!nfs_server_is_local(clp))
+ nfs_local_probe_async(clp);
+ }
+}
+
+#else
+static void nfs3_localio_probe(struct nfs_server *server) {}
+#endif
+
static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
struct inode *inode = hdr->inode;
@@ -852,8 +892,11 @@ static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
- if (task->tk_status >= 0 && !server->read_hdrsize)
- cmpxchg(&server->read_hdrsize, 0, hdr->res.replen);
+ if (task->tk_status >= 0) {
+ if (!server->read_hdrsize)
+ cmpxchg(&server->read_hdrsize, 0, hdr->res.replen);
+ nfs3_localio_probe(server);
+ }
nfs_invalidate_atime(inode);
nfs_refresh_inode(inode, &hdr->fattr);
@@ -883,8 +926,10 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
- if (task->tk_status >= 0)
+ if (task->tk_status >= 0) {
nfs_writeback_update_inode(hdr);
+ nfs3_localio_probe(NFS_SERVER(inode));
+ }
return 0;
}
@@ -960,7 +1005,7 @@ nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
struct nfs_open_context *ctx = nfs_file_open_context(filp);
int status;
- if (fl->fl_flags & FL_CLOSE) {
+ if (fl->c.flc_flags & FL_CLOSE) {
l_ctx = nfs_get_lock_context(ctx);
if (IS_ERR(l_ctx))
l_ctx = NULL;
@@ -976,13 +1021,21 @@ nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
return status;
}
-static int nfs3_have_delegation(struct inode *inode, fmode_t flags)
+static int nfs3_have_delegation(struct inode *inode, fmode_t type, int flags)
+{
+ return 0;
+}
+
+static int nfs3_return_delegation(struct inode *inode)
{
+ if (S_ISREG(inode->i_mode))
+ nfs_wb_all(inode);
return 0;
}
static const struct inode_operations nfs3_dir_inode_operations = {
.create = nfs_create,
+ .atomic_open = nfs_atomic_open_v23,
.lookup = nfs_lookup,
.link = nfs_link,
.unlink = nfs_unlink,
@@ -996,7 +1049,7 @@ static const struct inode_operations nfs3_dir_inode_operations = {
.setattr = nfs_setattr,
#ifdef CONFIG_NFS_V3_ACL
.listxattr = nfs3_listxattr,
- .get_acl = nfs3_get_acl,
+ .get_inode_acl = nfs3_get_acl,
.set_acl = nfs3_set_acl,
#endif
};
@@ -1007,7 +1060,7 @@ static const struct inode_operations nfs3_file_inode_operations = {
.setattr = nfs_setattr,
#ifdef CONFIG_NFS_V3_ACL
.listxattr = nfs3_listxattr,
- .get_acl = nfs3_get_acl,
+ .get_inode_acl = nfs3_get_acl,
.set_acl = nfs3_set_acl,
#endif
};
@@ -1058,6 +1111,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.clear_acl_cache = forget_all_cached_acls,
.close_context = nfs_close_context,
.have_delegation = nfs3_have_delegation,
+ .return_delegation = nfs3_return_delegation,
.alloc_client = nfs_alloc_client,
.init_client = nfs_init_client,
.free_client = nfs_free_client,
diff --git a/fs/nfs/nfs3super.c b/fs/nfs/nfs3super.c
index 7c5809431e61..20a80478449e 100644
--- a/fs/nfs/nfs3super.c
+++ b/fs/nfs/nfs3super.c
@@ -14,9 +14,6 @@ struct nfs_subversion nfs_v3 = {
.rpc_vers = &nfs_version3,
.rpc_ops = &nfs_v3_clientops,
.sops = &nfs_sops,
-#ifdef CONFIG_NFS_V3_ACL
- .xattr = nfs3_xattr_handlers,
-#endif
};
static int __init init_nfs_v3(void)
@@ -30,6 +27,7 @@ static void __exit exit_nfs_v3(void)
unregister_nfs_version(&nfs_v3);
}
+MODULE_DESCRIPTION("NFSv3 client support");
MODULE_LICENSE("GPL");
module_init(init_nfs_v3);
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 9274c9c5efea..e17d72908412 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -21,14 +21,13 @@
#include <linux/nfs3.h>
#include <linux/nfs_fs.h>
#include <linux/nfsacl.h>
-#include "nfstrace.h"
+#include <linux/nfs_common.h>
+
#include "internal.h"
+#include "nfstrace.h"
#define NFSDBG_FACILITY NFSDBG_XDR
-/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
-
/*
* Declare the space requirements for NFS arguments and replies as
* number of 32bit-words
@@ -91,8 +90,6 @@
NFS3_pagepad_sz)
#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
-static int nfs3_stat_to_errno(enum nfs_stat);
-
/*
* Map file type to S_IFMT bits
*/
@@ -1261,6 +1258,8 @@ static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
static void encode_readdirplus3args(struct xdr_stream *xdr,
const struct nfs3_readdirargs *args)
{
+ uint32_t dircount = args->count;
+ uint32_t maxcount = args->count;
__be32 *p;
encode_nfs_fh3(xdr, args->fh);
@@ -1273,9 +1272,8 @@ static void encode_readdirplus3args(struct xdr_stream *xdr,
* readdirplus: need dircount + buffer size.
* We just make sure we make dircount big enough
*/
- *p++ = cpu_to_be32(args->count >> 3);
-
- *p = cpu_to_be32(args->count);
+ *p++ = cpu_to_be32(dircount);
+ *p = cpu_to_be32(maxcount);
}
static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
@@ -1405,7 +1403,7 @@ static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1444,7 +1442,7 @@ static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1494,7 +1492,7 @@ out_default:
error = decode_post_op_attr(xdr, result->dir_attr, userns);
if (unlikely(error))
goto out;
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1536,7 +1534,7 @@ static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1577,7 +1575,7 @@ static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1657,7 +1655,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1727,7 +1725,7 @@ static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1794,7 +1792,7 @@ out_default:
error = decode_wcc_data(xdr, result->dir_attr, userns);
if (unlikely(error))
goto out;
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1834,7 +1832,7 @@ static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1880,7 +1878,7 @@ static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1925,7 +1923,7 @@ static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/**
@@ -1967,7 +1965,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
bool plus)
{
struct user_namespace *userns = rpc_userns(entry->server->client);
- struct nfs_entry old = *entry;
__be32 *p;
int error;
u64 new_cookie;
@@ -1987,15 +1984,15 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
error = decode_fileid3(xdr, &entry->ino);
if (unlikely(error))
- return error;
+ return -EAGAIN;
error = decode_inline_filename3(xdr, &entry->name, &entry->len);
if (unlikely(error))
- return error;
+ return error == -ENAMETOOLONG ? -ENAMETOOLONG : -EAGAIN;
error = decode_cookie3(xdr, &new_cookie);
if (unlikely(error))
- return error;
+ return -EAGAIN;
entry->d_type = DT_UNKNOWN;
@@ -2003,7 +2000,7 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
entry->fattr->valid = 0;
error = decode_post_op_attr(xdr, entry->fattr, userns);
if (unlikely(error))
- return error;
+ return -EAGAIN;
if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
@@ -2018,24 +2015,15 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
return -EAGAIN;
if (*p != xdr_zero) {
error = decode_nfs_fh3(xdr, entry->fh);
- if (unlikely(error)) {
- if (error == -E2BIG)
- goto out_truncated;
- return error;
- }
+ if (unlikely(error))
+ return -EAGAIN;
} else
zero_nfs_fh3(entry->fh);
}
- entry->prev_cookie = entry->cookie;
entry->cookie = new_cookie;
return 0;
-
-out_truncated:
- dprintk("NFS: directory entry contains invalid file handle\n");
- *entry = old;
- return -EAGAIN;
}
/*
@@ -2110,7 +2098,7 @@ out_default:
error = decode_post_op_attr(xdr, result->dir_attr, rpc_rqst_userns(req));
if (unlikely(error))
goto out;
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2176,7 +2164,7 @@ static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2228,6 +2216,7 @@ static int decode_fsinfo3resok(struct xdr_stream *xdr,
/* ignore properties */
result->lease_time = 0;
result->change_attr_type = NFS4_CHANGE_TYPE_IS_UNDEFINED;
+ result->xattr_support = 0;
return 0;
}
@@ -2251,7 +2240,7 @@ static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2312,7 +2301,7 @@ static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2358,7 +2347,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
#ifdef CONFIG_NFS_V3_ACL
@@ -2424,7 +2413,7 @@ static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
@@ -2443,76 +2432,11 @@ static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
#endif /* CONFIG_NFS_V3_ACL */
-
-/*
- * We need to translate between nfs status return values and
- * the local errno values which may not be the same.
- */
-static const struct {
- int stat;
- int errno;
-} nfs_errtbl[] = {
- { NFS_OK, 0 },
- { NFSERR_PERM, -EPERM },
- { NFSERR_NOENT, -ENOENT },
- { NFSERR_IO, -errno_NFSERR_IO},
- { NFSERR_NXIO, -ENXIO },
-/* { NFSERR_EAGAIN, -EAGAIN }, */
- { NFSERR_ACCES, -EACCES },
- { NFSERR_EXIST, -EEXIST },
- { NFSERR_XDEV, -EXDEV },
- { NFSERR_NODEV, -ENODEV },
- { NFSERR_NOTDIR, -ENOTDIR },
- { NFSERR_ISDIR, -EISDIR },
- { NFSERR_INVAL, -EINVAL },
- { NFSERR_FBIG, -EFBIG },
- { NFSERR_NOSPC, -ENOSPC },
- { NFSERR_ROFS, -EROFS },
- { NFSERR_MLINK, -EMLINK },
- { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
- { NFSERR_NOTEMPTY, -ENOTEMPTY },
- { NFSERR_DQUOT, -EDQUOT },
- { NFSERR_STALE, -ESTALE },
- { NFSERR_REMOTE, -EREMOTE },
-#ifdef EWFLUSH
- { NFSERR_WFLUSH, -EWFLUSH },
-#endif
- { NFSERR_BADHANDLE, -EBADHANDLE },
- { NFSERR_NOT_SYNC, -ENOTSYNC },
- { NFSERR_BAD_COOKIE, -EBADCOOKIE },
- { NFSERR_NOTSUPP, -ENOTSUPP },
- { NFSERR_TOOSMALL, -ETOOSMALL },
- { NFSERR_SERVERFAULT, -EREMOTEIO },
- { NFSERR_BADTYPE, -EBADTYPE },
- { NFSERR_JUKEBOX, -EJUKEBOX },
- { -1, -EIO }
-};
-
-/**
- * nfs3_stat_to_errno - convert an NFS status code to a local errno
- * @status: NFS status code to convert
- *
- * Returns a local errno value, or -EIO if the NFS status code is
- * not recognized. This function is used jointly by NFSv2 and NFSv3.
- */
-static int nfs3_stat_to_errno(enum nfs_stat status)
-{
- int i;
-
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == (int)status)
- return nfs_errtbl[i].errno;
- }
- dprintk("NFS: Unrecognized nfs status value: %u\n", status);
- return nfs_errtbl[i].errno;
-}
-
-
#define PROC(proc, argtype, restype, timer) \
[NFS3PROC_##proc] = { \
.p_proc = NFS3PROC_##proc, \
diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
index 0fe5aacbcfdf..aafd15a4afce 100644
--- a/fs/nfs/nfs42.h
+++ b/fs/nfs/nfs42.h
@@ -13,6 +13,7 @@
* more? Need to consider not to pre-alloc too much for a compound.
*/
#define PNFS_LAYOUTSTATS_MAXDEV (4)
+#define READ_PLUS_SCRATCH_SIZE (16)
/* nfs4.2proc.c */
#ifdef CONFIG_NFS_V4_2
@@ -20,6 +21,7 @@ int nfs42_proc_allocate(struct file *, loff_t, loff_t);
ssize_t nfs42_proc_copy(struct file *, loff_t, struct file *, loff_t, size_t,
struct nl4_server *, nfs4_stateid *, bool);
int nfs42_proc_deallocate(struct file *, loff_t, loff_t);
+int nfs42_proc_zero_range(struct file *, loff_t, loff_t);
loff_t nfs42_proc_llseek(struct file *, loff_t, int);
int nfs42_proc_layoutstats_generic(struct nfs_server *,
struct nfs42_layoutstat_data *);
@@ -54,11 +56,14 @@ int nfs42_proc_removexattr(struct inode *inode, const char *name);
* They would be 7 bytes long in the eventual buffer ("user.x\0"), and
* 8 bytes long XDR-encoded.
*
- * Include the trailing eof word as well.
+ * Include the trailing eof word as well and make the result a multiple
+ * of 4 bytes.
*/
static inline u32 nfs42_listxattr_xdrsize(u32 buflen)
{
- return ((buflen / (XATTR_USER_PREFIX_LEN + 2)) * 8) + 4;
+ u32 size = 8 * buflen / (XATTR_USER_PREFIX_LEN + 2) + 4;
+
+ return (size + 3) & ~3;
}
#endif /* CONFIG_NFS_V4_2 */
#endif /* __LINUX_FS_NFS_NFS4_2_H */
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 08355b66e7cb..d537fb0c230e 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -21,6 +21,8 @@
#define NFSDBG_FACILITY NFSDBG_PROC
static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
+static int nfs42_proc_offload_status(struct file *file, nfs4_stateid *stateid,
+ u64 *copied);
static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr)
{
@@ -46,7 +48,7 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
{
struct inode *inode = file_inode(filep);
struct nfs_server *server = NFS_SERVER(inode);
- u32 bitmask[3];
+ u32 bitmask[NFS_BITMASK_SZ];
struct nfs42_falloc_args args = {
.falloc_fh = NFS_FH(inode),
.falloc_offset = offset,
@@ -69,9 +71,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
return status;
}
- memcpy(bitmask, server->cache_consistency_bitmask, sizeof(bitmask));
- if (server->attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)
- bitmask[1] |= FATTR4_WORD1_SPACE_USED;
+ nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask, inode,
+ NFS_INO_INVALID_BLOCKS);
res.falloc_fattr = nfs_alloc_fattr();
if (!res.falloc_fattr)
@@ -79,10 +80,16 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
status = nfs4_call_sync(server->client, server, msg,
&args.seq_args, &res.seq_res, 0);
- if (status == 0)
+ if (status == 0) {
+ if (nfs_should_remove_suid(inode)) {
+ spin_lock(&inode->i_lock);
+ nfs_set_cache_invalid(inode,
+ NFS_INO_REVAL_FORCED | NFS_INO_INVALID_MODE);
+ spin_unlock(&inode->i_lock);
+ }
status = nfs_post_op_update_inode_force_wcc(inode,
res.falloc_fattr);
-
+ }
if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE])
trace_nfs4_fallocate(inode, &args, status);
else
@@ -107,6 +114,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
exception.inode = inode;
exception.state = lock->open_context->state;
+ nfs_file_block_o_direct(NFS_I(inode));
err = nfs_sync_inode(inode);
if (err)
goto out;
@@ -130,6 +138,7 @@ int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
};
struct inode *inode = file_inode(filep);
+ loff_t oldsize = i_size_read(inode);
int err;
if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
@@ -138,8 +147,13 @@ int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
inode_lock(inode);
err = nfs42_proc_fallocate(&msg, filep, offset, len);
- if (err == -EOPNOTSUPP)
- NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;
+
+ if (err == 0)
+ nfs_truncate_last_folio(inode->i_mapping, oldsize,
+ offset + len);
+ else if (err == -EOPNOTSUPP)
+ NFS_SERVER(inode)->caps &= ~(NFS_CAP_ALLOCATE |
+ NFS_CAP_ZERO_RANGE);
inode_unlock(inode);
return err;
@@ -162,12 +176,53 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
if (err == 0)
truncate_pagecache_range(inode, offset, (offset + len) -1);
if (err == -EOPNOTSUPP)
- NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
+ NFS_SERVER(inode)->caps &= ~(NFS_CAP_DEALLOCATE |
+ NFS_CAP_ZERO_RANGE);
+
+ inode_unlock(inode);
+ return err;
+}
+
+int nfs42_proc_zero_range(struct file *filep, loff_t offset, loff_t len)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ZERO_RANGE],
+ };
+ struct inode *inode = file_inode(filep);
+ loff_t oldsize = i_size_read(inode);
+ int err;
+
+ if (!nfs_server_capable(inode, NFS_CAP_ZERO_RANGE))
+ return -EOPNOTSUPP;
+
+ inode_lock(inode);
+
+ err = nfs42_proc_fallocate(&msg, filep, offset, len);
+ if (err == 0) {
+ nfs_truncate_last_folio(inode->i_mapping, oldsize,
+ offset + len);
+ truncate_pagecache_range(inode, offset, (offset + len) -1);
+ } else if (err == -EOPNOTSUPP)
+ NFS_SERVER(inode)->caps &= ~NFS_CAP_ZERO_RANGE;
inode_unlock(inode);
return err;
}
+static void nfs4_copy_dequeue_callback(struct nfs_server *dst_server,
+ struct nfs_server *src_server,
+ struct nfs4_copy_state *copy)
+{
+ spin_lock(&dst_server->nfs_client->cl_lock);
+ list_del_init(&copy->copies);
+ spin_unlock(&dst_server->nfs_client->cl_lock);
+ if (dst_server != src_server) {
+ spin_lock(&src_server->nfs_client->cl_lock);
+ list_del_init(&copy->src_copies);
+ spin_unlock(&src_server->nfs_client->cl_lock);
+ }
+}
+
static int handle_async_copy(struct nfs42_copy_res *res,
struct nfs_server *dst_server,
struct nfs_server *src_server,
@@ -176,28 +231,30 @@ static int handle_async_copy(struct nfs42_copy_res *res,
nfs4_stateid *src_stateid,
bool *restart)
{
- struct nfs4_copy_state *copy, *tmp_copy;
- int status = NFS4_OK;
- bool found_pending = false;
+ struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter;
struct nfs_open_context *dst_ctx = nfs_file_open_context(dst);
struct nfs_open_context *src_ctx = nfs_file_open_context(src);
+ struct nfs_client *clp = dst_server->nfs_client;
+ unsigned long timeout = 3 * HZ;
+ int status = NFS4_OK;
+ u64 copied;
- copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
+ copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
if (!copy)
return -ENOMEM;
spin_lock(&dst_server->nfs_client->cl_lock);
- list_for_each_entry(tmp_copy,
+ list_for_each_entry(iter,
&dst_server->nfs_client->pending_cb_stateids,
copies) {
- if (memcmp(&res->write_res.stateid, &tmp_copy->stateid,
+ if (memcmp(&res->write_res.stateid, &iter->stateid,
NFS4_STATEID_SIZE))
continue;
- found_pending = true;
- list_del(&tmp_copy->copies);
+ tmp_copy = iter;
+ list_del(&iter->copies);
break;
}
- if (found_pending) {
+ if (tmp_copy) {
spin_unlock(&dst_server->nfs_client->cl_lock);
kfree(copy);
copy = tmp_copy;
@@ -214,19 +271,16 @@ static int handle_async_copy(struct nfs42_copy_res *res,
if (dst_server != src_server) {
spin_lock(&src_server->nfs_client->cl_lock);
- list_add_tail(&copy->src_copies, &src_server->ss_copies);
+ list_add_tail(&copy->src_copies, &src_server->ss_src_copies);
spin_unlock(&src_server->nfs_client->cl_lock);
}
- status = wait_for_completion_interruptible(&copy->completion);
- spin_lock(&dst_server->nfs_client->cl_lock);
- list_del_init(&copy->copies);
- spin_unlock(&dst_server->nfs_client->cl_lock);
- if (dst_server != src_server) {
- spin_lock(&src_server->nfs_client->cl_lock);
- list_del_init(&copy->src_copies);
- spin_unlock(&src_server->nfs_client->cl_lock);
- }
+wait:
+ status = wait_for_completion_interruptible_timeout(&copy->completion,
+ timeout);
+ if (!status)
+ goto timeout;
+ nfs4_copy_dequeue_callback(dst_server, src_server, copy);
if (status == -ERESTARTSYS) {
goto out_cancel;
} else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) {
@@ -236,6 +290,7 @@ static int handle_async_copy(struct nfs42_copy_res *res,
}
out:
res->write_res.count = copy->count;
+ /* Copy out the updated write verifier provided by CB_OFFLOAD. */
memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf));
status = -copy->error;
@@ -247,6 +302,39 @@ out_cancel:
if (!nfs42_files_from_same_server(src, dst))
nfs42_do_offload_cancel_async(src, src_stateid);
goto out_free;
+timeout:
+ timeout <<= 1;
+ if (timeout > (clp->cl_lease_time >> 1))
+ timeout = clp->cl_lease_time >> 1;
+ status = nfs42_proc_offload_status(dst, &copy->stateid, &copied);
+ if (status == -EINPROGRESS)
+ goto wait;
+ nfs4_copy_dequeue_callback(dst_server, src_server, copy);
+ switch (status) {
+ case 0:
+ /* The server recognized the copy stateid, so it hasn't
+ * rebooted. Don't overwrite the verifier returned in the
+ * COPY result. */
+ res->write_res.count = copied;
+ goto out_free;
+ case -EREMOTEIO:
+ /* COPY operation failed on the server. */
+ status = -EOPNOTSUPP;
+ res->write_res.count = copied;
+ goto out_free;
+ case -EBADF:
+ /* Server did not recognize the copy stateid. It has
+ * probably restarted and lost the plot. */
+ res->write_res.count = 0;
+ status = -EOPNOTSUPP;
+ break;
+ case -EOPNOTSUPP:
+ /* RFC 7862 REQUIREs server to support OFFLOAD_STATUS when
+ * it has signed up for an async COPY, so server is not
+ * spec-compliant. */
+ res->write_res.count = 0;
+ }
+ goto out_free;
}
static int process_copy_commit(struct file *dst, loff_t pos_dst,
@@ -255,7 +343,7 @@ static int process_copy_commit(struct file *dst, loff_t pos_dst,
struct nfs_commitres cres;
int status = -ENOMEM;
- cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
+ cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL);
if (!cres.verf)
goto out;
@@ -275,21 +363,28 @@ out:
/**
* nfs42_copy_dest_done - perform inode cache updates after clone/copy offload
- * @inode: pointer to destination inode
+ * @file: pointer to destination file
* @pos: destination offset
* @len: copy length
+ * @oldsize: length of the file prior to clone/copy
*
* Punch a hole in the inode page cache, so that the NFS client will
* know to retrieve new data.
* Update the file size if necessary, and then mark the inode as having
* invalid cached values for change attribute, ctime, mtime and space used.
*/
-static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
+static void nfs42_copy_dest_done(struct file *file, loff_t pos, loff_t len,
+ loff_t oldsize)
{
+ struct inode *inode = file_inode(file);
+ struct address_space *mapping = file->f_mapping;
loff_t newsize = pos + len;
loff_t end = newsize - 1;
- truncate_pagecache_range(inode, pos, end);
+ nfs_truncate_last_folio(mapping, oldsize, pos);
+ WARN_ON_ONCE(invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
+ end >> PAGE_SHIFT));
+
spin_lock(&inode->i_lock);
if (newsize > i_size_read(inode))
i_size_write(inode, newsize);
@@ -321,6 +416,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
struct nfs_server *src_server = NFS_SERVER(src_inode);
loff_t pos_src = args->src_pos;
loff_t pos_dst = args->dst_pos;
+ loff_t oldsize_dst = i_size_read(dst_inode);
size_t count = args->count;
ssize_t status;
@@ -336,7 +432,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
return status;
}
}
- status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
+ status = nfs_filemap_write_and_wait_range(src->f_mapping,
pos_src, pos_src + (loff_t)count - 1);
if (status)
return status;
@@ -349,6 +445,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
return status;
}
+ nfs_file_block_o_direct(NFS_I(dst_inode));
status = nfs_sync_inode(dst_inode);
if (status)
return status;
@@ -356,7 +453,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
res->commit_res.verf = NULL;
if (args->sync) {
res->commit_res.verf =
- kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
+ kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL);
if (!res->commit_res.verf)
return -ENOMEM;
}
@@ -394,7 +491,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
goto out;
}
- nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count);
+ nfs42_copy_dest_done(dst, pos_dst, res->write_res.count, oldsize_dst);
nfs_invalidate_atime(src_inode);
status = res->write_res.count;
out:
@@ -455,7 +552,8 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
if (err >= 0)
break;
- if (err == -ENOTSUPP &&
+ if ((err == -ENOTSUPP ||
+ err == -NFS4ERR_OFFLOAD_DENIED) &&
nfs42_files_from_same_server(src, dst)) {
err = -EOPNOTSUPP;
break;
@@ -465,8 +563,9 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
continue;
}
break;
- } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) {
- args.sync = true;
+ } else if (err == -NFS4ERR_OFFLOAD_NO_REQS &&
+ args.sync != res.synchronous) {
+ args.sync = res.synchronous;
dst_exception.retry = 1;
continue;
} else if ((err == -ESTALE ||
@@ -490,15 +589,15 @@ out_put_src_lock:
return err;
}
-struct nfs42_offloadcancel_data {
+struct nfs42_offload_data {
struct nfs_server *seq_server;
struct nfs42_offload_status_args args;
struct nfs42_offload_status_res res;
};
-static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
+static void nfs42_offload_prepare(struct rpc_task *task, void *calldata)
{
- struct nfs42_offloadcancel_data *data = calldata;
+ struct nfs42_offload_data *data = calldata;
nfs4_setup_sequence(data->seq_server->nfs_client,
&data->args.osa_seq_args,
@@ -507,7 +606,7 @@ static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
{
- struct nfs42_offloadcancel_data *data = calldata;
+ struct nfs42_offload_data *data = calldata;
trace_nfs4_offload_cancel(&data->args, task->tk_status);
nfs41_sequence_done(task, &data->res.osr_seq_res);
@@ -517,22 +616,22 @@ static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
rpc_restart_call_prepare(task);
}
-static void nfs42_free_offloadcancel_data(void *data)
+static void nfs42_offload_release(void *data)
{
kfree(data);
}
static const struct rpc_call_ops nfs42_offload_cancel_ops = {
- .rpc_call_prepare = nfs42_offload_cancel_prepare,
+ .rpc_call_prepare = nfs42_offload_prepare,
.rpc_call_done = nfs42_offload_cancel_done,
- .rpc_release = nfs42_free_offloadcancel_data,
+ .rpc_release = nfs42_offload_release,
};
static int nfs42_do_offload_cancel_async(struct file *dst,
nfs4_stateid *stateid)
{
struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
- struct nfs42_offloadcancel_data *data = NULL;
+ struct nfs42_offload_data *data = NULL;
struct nfs_open_context *ctx = nfs_file_open_context(dst);
struct rpc_task *task;
struct rpc_message msg = {
@@ -544,14 +643,14 @@ static int nfs42_do_offload_cancel_async(struct file *dst,
.rpc_message = &msg,
.callback_ops = &nfs42_offload_cancel_ops,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
};
int status;
if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL))
return -EOPNOTSUPP;
- data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS);
+ data = kzalloc(sizeof(struct nfs42_offload_data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -574,6 +673,108 @@ static int nfs42_do_offload_cancel_async(struct file *dst,
return status;
}
+static int
+_nfs42_proc_offload_status(struct nfs_server *server, struct file *file,
+ struct nfs42_offload_data *data)
+{
+ struct nfs_open_context *ctx = nfs_file_open_context(file);
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_STATUS],
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = ctx->cred,
+ };
+ int status;
+
+ status = nfs4_call_sync(server->client, server, &msg,
+ &data->args.osa_seq_args,
+ &data->res.osr_seq_res, 1);
+ trace_nfs4_offload_status(&data->args, status);
+ switch (status) {
+ case 0:
+ break;
+
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+ /*
+ * Server does not recognize the COPY stateid. CB_OFFLOAD
+ * could have purged it, or server might have rebooted.
+ * Since COPY stateids don't have an associated inode,
+ * avoid triggering state recovery.
+ */
+ status = -EBADF;
+ break;
+ case -NFS4ERR_NOTSUPP:
+ case -ENOTSUPP:
+ case -EOPNOTSUPP:
+ server->caps &= ~NFS_CAP_OFFLOAD_STATUS;
+ status = -EOPNOTSUPP;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * nfs42_proc_offload_status - Poll completion status of an async copy operation
+ * @dst: handle of file being copied into
+ * @stateid: copy stateid (from async COPY result)
+ * @copied: OUT: number of bytes copied so far
+ *
+ * Return values:
+ * %0: Server returned an NFS4_OK completion status
+ * %-EINPROGRESS: Server returned no completion status
+ * %-EREMOTEIO: Server returned an error completion status
+ * %-EBADF: Server did not recognize the copy stateid
+ * %-EOPNOTSUPP: Server does not support OFFLOAD_STATUS
+ * %-ERESTARTSYS: Wait interrupted by signal
+ *
+ * Other negative errnos indicate the client could not complete the
+ * request.
+ */
+static int
+nfs42_proc_offload_status(struct file *dst, nfs4_stateid *stateid, u64 *copied)
+{
+ struct inode *inode = file_inode(dst);
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs4_exception exception = {
+ .inode = inode,
+ };
+ struct nfs42_offload_data *data;
+ int status;
+
+ if (!(server->caps & NFS_CAP_OFFLOAD_STATUS))
+ return -EOPNOTSUPP;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->seq_server = server;
+ data->args.osa_src_fh = NFS_FH(inode);
+ memcpy(&data->args.osa_stateid, stateid,
+ sizeof(data->args.osa_stateid));
+ exception.stateid = &data->args.osa_stateid;
+ do {
+ status = _nfs42_proc_offload_status(server, dst, data);
+ if (status == -EOPNOTSUPP)
+ goto out;
+ status = nfs4_handle_exception(server, status, &exception);
+ } while (exception.retry);
+ if (status)
+ goto out;
+
+ *copied = data->res.osr_count;
+ if (!data->res.complete_count)
+ status = -EINPROGRESS;
+ else if (data->res.osr_complete != NFS_OK)
+ status = -EREMOTEIO;
+
+out:
+ kfree(data);
+ return status;
+}
+
static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
struct nfs42_copy_notify_args *args,
struct nfs42_copy_notify_res *res)
@@ -590,8 +791,10 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
ctx = get_nfs_open_context(nfs_file_open_context(src));
l_ctx = nfs_get_lock_context(ctx);
- if (IS_ERR(l_ctx))
- return PTR_ERR(l_ctx);
+ if (IS_ERR(l_ctx)) {
+ status = PTR_ERR(l_ctx);
+ goto out;
+ }
status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
FMODE_READ);
@@ -599,7 +802,7 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
if (status) {
if (status == -EAGAIN)
status = -NFS4ERR_BAD_STATEID;
- return status;
+ goto out;
}
status = nfs4_call_sync(src_server->client, src_server, &msg,
@@ -608,6 +811,7 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
if (status == -ENOTSUPP)
src_server->caps &= ~NFS_CAP_COPY_NOTIFY;
+out:
put_nfs_open_context(nfs_file_open_context(src));
return status;
}
@@ -625,7 +829,7 @@ int nfs42_proc_copy_notify(struct file *src, struct file *dst,
if (!(src_server->caps & NFS_CAP_COPY_NOTIFY))
return -EOPNOTSUPP;
- args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS);
+ args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_KERNEL);
if (args == NULL)
return -ENOMEM;
@@ -850,7 +1054,7 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server,
.rpc_message = &msg,
.callback_ops = &nfs42_layoutstat_ops,
.callback_data = data,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
};
struct rpc_task *task;
@@ -1005,7 +1209,7 @@ int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
struct rpc_task_setup task_setup = {
.rpc_message = &msg,
.callback_ops = &nfs42_layouterror_ops,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
};
unsigned int i;
@@ -1013,7 +1217,7 @@ int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
return -EOPNOTSUPP;
if (n > NFS42_LAYOUTERROR_MAX)
return -EINVAL;
- data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS);
+ data = nfs42_alloc_layouterror_data(lseg, nfs_io_gfp_mask());
if (!data)
return -ENOMEM;
for (i = 0; i < n; i++) {
@@ -1042,17 +1246,19 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
struct inode *src_inode = file_inode(src_f);
struct inode *dst_inode = file_inode(dst_f);
struct nfs_server *server = NFS_SERVER(dst_inode);
+ __u32 dst_bitmask[NFS_BITMASK_SZ];
struct nfs42_clone_args args = {
.src_fh = NFS_FH(src_inode),
.dst_fh = NFS_FH(dst_inode),
.src_offset = src_offset,
.dst_offset = dst_offset,
.count = count,
- .dst_bitmask = server->cache_consistency_bitmask,
+ .dst_bitmask = dst_bitmask,
};
struct nfs42_clone_res res = {
.server = server,
};
+ loff_t oldsize_dst = i_size_read(dst_inode);
int status;
msg->rpc_argp = &args;
@@ -1077,11 +1283,17 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
if (!res.dst_fattr)
return -ENOMEM;
+ nfs4_bitmask_set(dst_bitmask, server->cache_consistency_bitmask,
+ dst_inode, NFS_INO_INVALID_BLOCKS);
+
status = nfs4_call_sync(server->client, server, msg,
&args.seq_args, &res.seq_res, 0);
trace_nfs4_clone(src_inode, dst_inode, &args, status);
if (status == 0) {
- nfs42_copy_dest_done(dst_inode, dst_offset, count);
+ /* a zero-length count means clone to EOF in src */
+ if (count == 0 && res.dst_fattr->valid & NFS_ATTR_FATTR_SIZE)
+ count = nfs_size_to_loff_t(res.dst_fattr->size) - dst_offset;
+ nfs42_copy_dest_done(dst_f, dst_offset, count, oldsize_dst);
status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
}
@@ -1163,6 +1375,7 @@ static int _nfs42_proc_removexattr(struct inode *inode, const char *name)
ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
&res.seq_res, 1);
+ trace_nfs4_removexattr(inode, name, ret);
if (!ret)
nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
@@ -1173,15 +1386,19 @@ static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
const void *buf, size_t buflen, int flags)
{
struct nfs_server *server = NFS_SERVER(inode);
+ __u32 bitmask[NFS_BITMASK_SZ];
struct page *pages[NFS4XATTR_MAXPAGES];
struct nfs42_setxattrargs arg = {
.fh = NFS_FH(inode),
+ .bitmask = bitmask,
.xattr_pages = pages,
.xattr_len = buflen,
.xattr_name = name,
.xattr_flags = flags,
};
- struct nfs42_setxattrres res;
+ struct nfs42_setxattrres res = {
+ .server = server,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR],
.rpc_argp = &arg,
@@ -1193,22 +1410,36 @@ static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
if (buflen > server->sxasize)
return -ERANGE;
+ res.fattr = nfs_alloc_fattr();
+ if (!res.fattr)
+ return -ENOMEM;
+
if (buflen > 0) {
np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages);
- if (np < 0)
- return np;
+ if (np < 0) {
+ ret = np;
+ goto out;
+ }
} else
np = 0;
+ nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask,
+ inode, NFS_INO_INVALID_CHANGE);
+
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
&res.seq_res, 1);
+ trace_nfs4_setxattr(inode, name, ret);
for (; np > 0; np--)
put_page(pages[np - 1]);
- if (!ret)
+ if (!ret) {
nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
+ ret = nfs_post_op_update_inode(inode, res.fattr);
+ }
+out:
+ kfree(res.fattr);
return ret;
}
@@ -1234,6 +1465,7 @@ static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name,
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
&res.seq_res, 0);
+ trace_nfs4_getxattr(inode, name, ret);
if (ret < 0)
return ret;
@@ -1282,7 +1514,7 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
ret = -ENOMEM;
- res.scratch = alloc_page(GFP_KERNEL);
+ res.scratch = folio_alloc(GFP_KERNEL, 0);
if (!res.scratch)
goto out;
@@ -1305,6 +1537,7 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
&res.seq_res, 0);
+ trace_nfs4_listxattr(inode, ret);
if (ret >= 0) {
ret = res.copied;
@@ -1319,7 +1552,7 @@ out_free_pages:
}
kfree(pages);
out_free_scratch:
- __free_page(res.scratch);
+ folio_put(res.scratch);
out:
return ret;
@@ -1340,7 +1573,6 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
for (i = 0; i < np; i++) {
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i]) {
- np = i + 1;
err = -ENOMEM;
goto out;
}
@@ -1364,8 +1596,8 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
} while (exception.retry);
out:
- while (--np >= 0)
- __free_page(pages[np]);
+ while (--i >= 0)
+ __free_page(pages[i]);
kfree(pages);
return err;
diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c
index 1c4d2a05b401..37d79400e5f4 100644
--- a/fs/nfs/nfs42xattr.c
+++ b/fs/nfs/nfs42xattr.c
@@ -132,7 +132,7 @@ nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry)
lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
&nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
- return list_lru_add(lru, &entry->lru);
+ return list_lru_add_obj(lru, &entry->lru);
}
static bool
@@ -143,7 +143,7 @@ nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry)
lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
&nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
- return list_lru_del(lru, &entry->lru);
+ return list_lru_del_obj(lru, &entry->lru);
}
/*
@@ -199,7 +199,7 @@ nfs4_xattr_alloc_entry(const char *name, const void *value,
flags = NFS4_XATTR_ENTRY_EXTVAL;
}
- buf = kmalloc(alloclen, GFP_KERNEL_ACCOUNT | GFP_NOFS);
+ buf = kmalloc(alloclen, GFP_KERNEL);
if (buf == NULL)
return NULL;
entry = (struct nfs4_xattr_entry *)buf;
@@ -213,7 +213,7 @@ nfs4_xattr_alloc_entry(const char *name, const void *value,
if (flags & NFS4_XATTR_ENTRY_EXTVAL) {
- valp = kvmalloc(len, GFP_KERNEL_ACCOUNT | GFP_NOFS);
+ valp = kvmalloc(len, GFP_KERNEL);
if (valp == NULL) {
kfree(buf);
return NULL;
@@ -289,8 +289,7 @@ nfs4_xattr_alloc_cache(void)
{
struct nfs4_xattr_cache *cache;
- cache = kmem_cache_alloc(nfs4_xattr_cache_cachep,
- GFP_KERNEL_ACCOUNT | GFP_NOFS);
+ cache = kmem_cache_alloc(nfs4_xattr_cache_cachep, GFP_KERNEL);
if (cache == NULL)
return NULL;
@@ -350,7 +349,7 @@ nfs4_xattr_cache_unlink(struct inode *inode)
oldcache = nfsi->xattr_cache;
if (oldcache != NULL) {
- list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru);
+ list_lru_del_obj(&nfs4_xattr_cache_lru, &oldcache->lru);
oldcache->inode = NULL;
}
nfsi->xattr_cache = NULL;
@@ -475,7 +474,7 @@ nfs4_xattr_get_cache(struct inode *inode, int add)
kref_get(&cache->ref);
nfsi->xattr_cache = cache;
cache->inode = inode;
- list_lru_add(&nfs4_xattr_cache_lru, &cache->lru);
+ list_lru_add_obj(&nfs4_xattr_cache_lru, &cache->lru);
}
spin_unlock(&inode->i_lock);
@@ -797,32 +796,13 @@ static unsigned long nfs4_xattr_cache_scan(struct shrinker *shrink,
static unsigned long nfs4_xattr_entry_scan(struct shrinker *shrink,
struct shrink_control *sc);
-static struct shrinker nfs4_xattr_cache_shrinker = {
- .count_objects = nfs4_xattr_cache_count,
- .scan_objects = nfs4_xattr_cache_scan,
- .seeks = DEFAULT_SEEKS,
- .flags = SHRINKER_MEMCG_AWARE,
-};
-
-static struct shrinker nfs4_xattr_entry_shrinker = {
- .count_objects = nfs4_xattr_entry_count,
- .scan_objects = nfs4_xattr_entry_scan,
- .seeks = DEFAULT_SEEKS,
- .batch = 512,
- .flags = SHRINKER_MEMCG_AWARE,
-};
-
-static struct shrinker nfs4_xattr_large_entry_shrinker = {
- .count_objects = nfs4_xattr_entry_count,
- .scan_objects = nfs4_xattr_entry_scan,
- .seeks = 1,
- .batch = 512,
- .flags = SHRINKER_MEMCG_AWARE,
-};
+static struct shrinker *nfs4_xattr_cache_shrinker;
+static struct shrinker *nfs4_xattr_entry_shrinker;
+static struct shrinker *nfs4_xattr_large_entry_shrinker;
static enum lru_status
cache_lru_isolate(struct list_head *item,
- struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, void *arg)
{
struct list_head *dispose = arg;
struct inode *inode;
@@ -887,7 +867,7 @@ nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc)
static enum lru_status
entry_lru_isolate(struct list_head *item,
- struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, void *arg)
{
struct list_head *dispose = arg;
struct nfs4_xattr_bucket *bucket;
@@ -944,7 +924,7 @@ nfs4_xattr_entry_scan(struct shrinker *shrink, struct shrink_control *sc)
struct nfs4_xattr_entry *entry;
struct list_lru *lru;
- lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
+ lru = (shrink == nfs4_xattr_large_entry_shrinker) ?
&nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, &dispose);
@@ -972,7 +952,7 @@ nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc)
unsigned long count;
struct list_lru *lru;
- lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
+ lru = (shrink == nfs4_xattr_large_entry_shrinker) ?
&nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
count = list_lru_shrink_count(lru, sc);
@@ -982,7 +962,7 @@ nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc)
static void nfs4_xattr_cache_init_once(void *p)
{
- struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p;
+ struct nfs4_xattr_cache *cache = p;
spin_lock_init(&cache->listxattr_lock);
atomic_long_set(&cache->nent, 0);
@@ -992,54 +972,84 @@ static void nfs4_xattr_cache_init_once(void *p)
INIT_LIST_HEAD(&cache->dispose);
}
+typedef unsigned long (*count_objects_cb)(struct shrinker *s,
+ struct shrink_control *sc);
+typedef unsigned long (*scan_objects_cb)(struct shrinker *s,
+ struct shrink_control *sc);
+
+static int __init nfs4_xattr_shrinker_init(struct shrinker **shrinker,
+ struct list_lru *lru, const char *name,
+ count_objects_cb count,
+ scan_objects_cb scan, long batch, int seeks)
+{
+ int ret;
+
+ *shrinker = shrinker_alloc(SHRINKER_MEMCG_AWARE, name);
+ if (!*shrinker)
+ return -ENOMEM;
+
+ ret = list_lru_init_memcg(lru, *shrinker);
+ if (ret) {
+ shrinker_free(*shrinker);
+ return ret;
+ }
+
+ (*shrinker)->count_objects = count;
+ (*shrinker)->scan_objects = scan;
+ (*shrinker)->batch = batch;
+ (*shrinker)->seeks = seeks;
+
+ shrinker_register(*shrinker);
+
+ return ret;
+}
+
+static void nfs4_xattr_shrinker_destroy(struct shrinker *shrinker,
+ struct list_lru *lru)
+{
+ shrinker_free(shrinker);
+ list_lru_destroy(lru);
+}
+
int __init nfs4_xattr_cache_init(void)
{
int ret = 0;
nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache",
sizeof(struct nfs4_xattr_cache), 0,
- (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ (SLAB_RECLAIM_ACCOUNT),
nfs4_xattr_cache_init_once);
if (nfs4_xattr_cache_cachep == NULL)
return -ENOMEM;
- ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru,
- &nfs4_xattr_large_entry_shrinker);
- if (ret)
- goto out4;
-
- ret = list_lru_init_memcg(&nfs4_xattr_entry_lru,
- &nfs4_xattr_entry_shrinker);
- if (ret)
- goto out3;
-
- ret = list_lru_init_memcg(&nfs4_xattr_cache_lru,
- &nfs4_xattr_cache_shrinker);
- if (ret)
- goto out2;
-
- ret = register_shrinker(&nfs4_xattr_cache_shrinker);
+ ret = nfs4_xattr_shrinker_init(&nfs4_xattr_cache_shrinker,
+ &nfs4_xattr_cache_lru, "nfs-xattr_cache",
+ nfs4_xattr_cache_count,
+ nfs4_xattr_cache_scan, 0, DEFAULT_SEEKS);
if (ret)
goto out1;
- ret = register_shrinker(&nfs4_xattr_entry_shrinker);
+ ret = nfs4_xattr_shrinker_init(&nfs4_xattr_entry_shrinker,
+ &nfs4_xattr_entry_lru, "nfs-xattr_entry",
+ nfs4_xattr_entry_count,
+ nfs4_xattr_entry_scan, 512, DEFAULT_SEEKS);
if (ret)
- goto out;
+ goto out2;
- ret = register_shrinker(&nfs4_xattr_large_entry_shrinker);
+ ret = nfs4_xattr_shrinker_init(&nfs4_xattr_large_entry_shrinker,
+ &nfs4_xattr_large_entry_lru,
+ "nfs-xattr_large_entry",
+ nfs4_xattr_entry_count,
+ nfs4_xattr_entry_scan, 512, 1);
if (!ret)
return 0;
- unregister_shrinker(&nfs4_xattr_entry_shrinker);
-out:
- unregister_shrinker(&nfs4_xattr_cache_shrinker);
-out1:
- list_lru_destroy(&nfs4_xattr_cache_lru);
+ nfs4_xattr_shrinker_destroy(nfs4_xattr_entry_shrinker,
+ &nfs4_xattr_entry_lru);
out2:
- list_lru_destroy(&nfs4_xattr_entry_lru);
-out3:
- list_lru_destroy(&nfs4_xattr_large_entry_lru);
-out4:
+ nfs4_xattr_shrinker_destroy(nfs4_xattr_cache_shrinker,
+ &nfs4_xattr_cache_lru);
+out1:
kmem_cache_destroy(nfs4_xattr_cache_cachep);
return ret;
@@ -1047,11 +1057,11 @@ out4:
void nfs4_xattr_cache_exit(void)
{
- unregister_shrinker(&nfs4_xattr_large_entry_shrinker);
- unregister_shrinker(&nfs4_xattr_entry_shrinker);
- unregister_shrinker(&nfs4_xattr_cache_shrinker);
- list_lru_destroy(&nfs4_xattr_large_entry_lru);
- list_lru_destroy(&nfs4_xattr_entry_lru);
- list_lru_destroy(&nfs4_xattr_cache_lru);
+ nfs4_xattr_shrinker_destroy(nfs4_xattr_large_entry_shrinker,
+ &nfs4_xattr_large_entry_lru);
+ nfs4_xattr_shrinker_destroy(nfs4_xattr_entry_shrinker,
+ &nfs4_xattr_entry_lru);
+ nfs4_xattr_shrinker_destroy(nfs4_xattr_cache_shrinker,
+ &nfs4_xattr_cache_lru);
kmem_cache_destroy(nfs4_xattr_cache_cachep);
}
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index c8bad735e4c1..e10d83ba835e 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -7,6 +7,9 @@
#include "nfs42.h"
+/* Not limited by NFS itself, limited by the generic xattr code */
+#define nfs4_xattr_name_maxsz XDR_QUADLEN(XATTR_NAME_MAX)
+
#define encode_fallocate_maxsz (encode_stateid_maxsz + \
2 /* offset */ + \
2 /* length */)
@@ -32,6 +35,11 @@
#define encode_offload_cancel_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_STATEID_SIZE))
#define decode_offload_cancel_maxsz (op_decode_hdr_maxsz)
+#define encode_offload_status_maxsz (op_encode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define decode_offload_status_maxsz (op_decode_hdr_maxsz + \
+ 2 /* osr_count */ + \
+ 2 /* osr_complete */)
#define encode_copy_notify_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_STATEID_SIZE) + \
1 + /* nl4_type */ \
@@ -47,13 +55,20 @@
#define decode_deallocate_maxsz (op_decode_hdr_maxsz)
#define encode_read_plus_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + 3)
-#define NFS42_READ_PLUS_SEGMENT_SIZE (1 /* data_content4 */ + \
+#define NFS42_READ_PLUS_DATA_SEGMENT_SIZE \
+ (1 /* data_content4 */ + \
+ 2 /* data_info4.di_offset */ + \
+ 1 /* data_info4.di_length */)
+#define NFS42_READ_PLUS_HOLE_SEGMENT_SIZE \
+ (1 /* data_content4 */ + \
2 /* data_info4.di_offset */ + \
2 /* data_info4.di_length */)
+#define READ_PLUS_SEGMENT_SIZE_DIFF (NFS42_READ_PLUS_HOLE_SEGMENT_SIZE - \
+ NFS42_READ_PLUS_DATA_SEGMENT_SIZE)
#define decode_read_plus_maxsz (op_decode_hdr_maxsz + \
1 /* rpr_eof */ + \
1 /* rpr_contents count */ + \
- 2 * NFS42_READ_PLUS_SEGMENT_SIZE)
+ NFS42_READ_PLUS_HOLE_SEGMENT_SIZE)
#define encode_seek_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + \
2 /* offset */ + \
@@ -88,6 +103,18 @@
2 /* dst offset */ + \
2 /* count */)
#define decode_clone_maxsz (op_decode_hdr_maxsz)
+#define encode_getxattr_maxsz (op_encode_hdr_maxsz + 1 + \
+ nfs4_xattr_name_maxsz)
+#define decode_getxattr_maxsz (op_decode_hdr_maxsz + 1 + pagepad_maxsz)
+#define encode_setxattr_maxsz (op_encode_hdr_maxsz + \
+ 1 + nfs4_xattr_name_maxsz + 1)
+#define decode_setxattr_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
+#define encode_listxattrs_maxsz (op_encode_hdr_maxsz + 2 + 1)
+#define decode_listxattrs_maxsz (op_decode_hdr_maxsz + 2 + 1 + 1 + 1)
+#define encode_removexattr_maxsz (op_encode_hdr_maxsz + 1 + \
+ nfs4_xattr_name_maxsz)
+#define decode_removexattr_maxsz (op_decode_hdr_maxsz + \
+ decode_change_info_maxsz)
#define NFS4_enc_allocate_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
@@ -121,10 +148,20 @@
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_offload_cancel_maxsz)
+#define NFS4_enc_offload_status_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_offload_status_maxsz)
+#define NFS4_dec_offload_status_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_offload_status_maxsz)
#define NFS4_enc_copy_notify_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_copy_notify_maxsz)
#define NFS4_dec_copy_notify_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_copy_notify_maxsz)
#define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \
@@ -137,6 +174,18 @@
decode_putfh_maxsz + \
decode_deallocate_maxsz + \
decode_getattr_maxsz)
+#define NFS4_enc_zero_range_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_deallocate_maxsz + \
+ encode_allocate_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_zero_range_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_deallocate_maxsz + \
+ decode_allocate_maxsz + \
+ decode_getattr_maxsz)
#define NFS4_enc_read_plus_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -185,55 +234,40 @@
decode_putfh_maxsz + \
decode_clone_maxsz + \
decode_getattr_maxsz)
-
-/* Not limited by NFS itself, limited by the generic xattr code */
-#define nfs4_xattr_name_maxsz XDR_QUADLEN(XATTR_NAME_MAX)
-
-#define encode_getxattr_maxsz (op_encode_hdr_maxsz + 1 + \
- nfs4_xattr_name_maxsz)
-#define decode_getxattr_maxsz (op_decode_hdr_maxsz + 1 + pagepad_maxsz)
-#define encode_setxattr_maxsz (op_encode_hdr_maxsz + \
- 1 + nfs4_xattr_name_maxsz + 1)
-#define decode_setxattr_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
-#define encode_listxattrs_maxsz (op_encode_hdr_maxsz + 2 + 1)
-#define decode_listxattrs_maxsz (op_decode_hdr_maxsz + 2 + 1 + 1 + 1)
-#define encode_removexattr_maxsz (op_encode_hdr_maxsz + 1 + \
- nfs4_xattr_name_maxsz)
-#define decode_removexattr_maxsz (op_decode_hdr_maxsz + \
- decode_change_info_maxsz)
-
-#define NFS4_enc_getxattr_sz (compound_encode_hdr_maxsz + \
- encode_sequence_maxsz + \
- encode_putfh_maxsz + \
- encode_getxattr_maxsz)
-#define NFS4_dec_getxattr_sz (compound_decode_hdr_maxsz + \
- decode_sequence_maxsz + \
- decode_putfh_maxsz + \
- decode_getxattr_maxsz)
-#define NFS4_enc_setxattr_sz (compound_encode_hdr_maxsz + \
- encode_sequence_maxsz + \
- encode_putfh_maxsz + \
- encode_setxattr_maxsz)
-#define NFS4_dec_setxattr_sz (compound_decode_hdr_maxsz + \
- decode_sequence_maxsz + \
- decode_putfh_maxsz + \
- decode_setxattr_maxsz)
-#define NFS4_enc_listxattrs_sz (compound_encode_hdr_maxsz + \
- encode_sequence_maxsz + \
- encode_putfh_maxsz + \
- encode_listxattrs_maxsz)
-#define NFS4_dec_listxattrs_sz (compound_decode_hdr_maxsz + \
- decode_sequence_maxsz + \
- decode_putfh_maxsz + \
- decode_listxattrs_maxsz)
-#define NFS4_enc_removexattr_sz (compound_encode_hdr_maxsz + \
- encode_sequence_maxsz + \
- encode_putfh_maxsz + \
- encode_removexattr_maxsz)
-#define NFS4_dec_removexattr_sz (compound_decode_hdr_maxsz + \
- decode_sequence_maxsz + \
- decode_putfh_maxsz + \
- decode_removexattr_maxsz)
+#define NFS4_enc_getxattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_getxattr_maxsz)
+#define NFS4_dec_getxattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_getxattr_maxsz)
+#define NFS4_enc_setxattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_setxattr_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_setxattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_setxattr_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_listxattrs_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_listxattrs_maxsz)
+#define NFS4_dec_listxattrs_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_listxattrs_maxsz)
+#define NFS4_enc_removexattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_removexattr_maxsz)
+#define NFS4_dec_removexattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_removexattr_maxsz)
/*
* These values specify the maximum amount of data that is not
@@ -316,6 +350,18 @@ static void encode_copy(struct xdr_stream *xdr,
encode_nl4_server(xdr, args->cp_src);
}
+static void encode_copy_commit(struct xdr_stream *xdr,
+ const struct nfs42_copy_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_COMMIT, decode_commit_maxsz, hdr);
+ p = reserve_space(xdr, 12);
+ p = xdr_encode_hyper(p, args->dst_pos);
+ *p = cpu_to_be32(args->count);
+}
+
static void encode_offload_cancel(struct xdr_stream *xdr,
const struct nfs42_offload_status_args *args,
struct compound_hdr *hdr)
@@ -324,6 +370,14 @@ static void encode_offload_cancel(struct xdr_stream *xdr,
encode_nfs4_stateid(xdr, &args->osa_stateid);
}
+static void encode_offload_status(struct xdr_stream *xdr,
+ const struct nfs42_offload_status_args *args,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_OFFLOAD_STATUS, decode_offload_status_maxsz, hdr);
+ encode_nfs4_stateid(xdr, &args->osa_stateid);
+}
+
static void encode_copy_notify(struct xdr_stream *xdr,
const struct nfs42_copy_notify_args *args,
struct compound_hdr *hdr)
@@ -451,20 +505,6 @@ static void encode_setxattr(struct xdr_stream *xdr,
xdr_write_pages(xdr, arg->xattr_pages, 0, arg->xattr_len);
}
-static int decode_setxattr(struct xdr_stream *xdr,
- struct nfs4_change_info *cinfo)
-{
- int status;
-
- status = decode_op_hdr(xdr, OP_SETXATTR);
- if (status)
- goto out;
- status = decode_change_info(xdr, cinfo);
-out:
- return status;
-}
-
-
static void encode_getxattr(struct xdr_stream *xdr, const char *name,
struct compound_hdr *hdr)
{
@@ -472,43 +512,6 @@ static void encode_getxattr(struct xdr_stream *xdr, const char *name,
encode_string(xdr, strlen(name), name);
}
-static int decode_getxattr(struct xdr_stream *xdr,
- struct nfs42_getxattrres *res,
- struct rpc_rqst *req)
-{
- int status;
- __be32 *p;
- u32 len, rdlen;
-
- status = decode_op_hdr(xdr, OP_GETXATTR);
- if (status)
- return status;
-
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- return -EIO;
-
- len = be32_to_cpup(p);
-
- /*
- * Only check against the page length here. The actual
- * requested length may be smaller, but that is only
- * checked against after possibly caching a valid reply.
- */
- if (len > req->rq_rcv_buf.page_len)
- return -ERANGE;
-
- res->xattr_len = len;
-
- if (len > 0) {
- rdlen = xdr_read_pages(xdr, len);
- if (rdlen < len)
- return -EIO;
- }
-
- return 0;
-}
-
static void encode_removexattr(struct xdr_stream *xdr, const char *name,
struct compound_hdr *hdr)
{
@@ -516,21 +519,6 @@ static void encode_removexattr(struct xdr_stream *xdr, const char *name,
encode_string(xdr, strlen(name), name);
}
-
-static int decode_removexattr(struct xdr_stream *xdr,
- struct nfs4_change_info *cinfo)
-{
- int status;
-
- status = decode_op_hdr(xdr, OP_REMOVEXATTR);
- if (status)
- goto out;
-
- status = decode_change_info(xdr, cinfo);
-out:
- return status;
-}
-
static void encode_listxattrs(struct xdr_stream *xdr,
const struct nfs42_listxattrsargs *arg,
struct compound_hdr *hdr)
@@ -552,96 +540,6 @@ static void encode_listxattrs(struct xdr_stream *xdr,
*p = cpu_to_be32(arg->count + 8 + 4);
}
-static int decode_listxattrs(struct xdr_stream *xdr,
- struct nfs42_listxattrsres *res)
-{
- int status;
- __be32 *p;
- u32 count, len, ulen;
- size_t left, copied;
- char *buf;
-
- status = decode_op_hdr(xdr, OP_LISTXATTRS);
- if (status) {
- /*
- * Special case: for LISTXATTRS, NFS4ERR_TOOSMALL
- * should be translated to ERANGE.
- */
- if (status == -ETOOSMALL)
- status = -ERANGE;
- goto out;
- }
-
- p = xdr_inline_decode(xdr, 8);
- if (unlikely(!p))
- return -EIO;
-
- xdr_decode_hyper(p, &res->cookie);
-
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- return -EIO;
-
- left = res->xattr_len;
- buf = res->xattr_buf;
-
- count = be32_to_cpup(p);
- copied = 0;
-
- /*
- * We have asked for enough room to encode the maximum number
- * of possible attribute names, so everything should fit.
- *
- * But, don't rely on that assumption. Just decode entries
- * until they don't fit anymore, just in case the server did
- * something odd.
- */
- while (count--) {
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- return -EIO;
-
- len = be32_to_cpup(p);
- if (len > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) {
- status = -ERANGE;
- goto out;
- }
-
- p = xdr_inline_decode(xdr, len);
- if (unlikely(!p))
- return -EIO;
-
- ulen = len + XATTR_USER_PREFIX_LEN + 1;
- if (buf) {
- if (ulen > left) {
- status = -ERANGE;
- goto out;
- }
-
- memcpy(buf, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
- memcpy(buf + XATTR_USER_PREFIX_LEN, p, len);
-
- buf[ulen - 1] = 0;
- buf += ulen;
- left -= ulen;
- }
- copied += ulen;
- }
-
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- return -EIO;
-
- res->eof = be32_to_cpup(p);
- res->copied = copied;
-
-out:
- if (status == -ERANGE && res->xattr_len == XATTR_LIST_MAX)
- status = -E2BIG;
-
- return status;
-}
-
/*
* Encode ALLOCATE request
*/
@@ -662,18 +560,6 @@ static void nfs4_xdr_enc_allocate(struct rpc_rqst *req,
encode_nops(&hdr);
}
-static void encode_copy_commit(struct xdr_stream *xdr,
- const struct nfs42_copy_args *args,
- struct compound_hdr *hdr)
-{
- __be32 *p;
-
- encode_op_hdr(xdr, OP_COMMIT, decode_commit_maxsz, hdr);
- p = reserve_space(xdr, 12);
- p = xdr_encode_hyper(p, args->dst_pos);
- *p = cpu_to_be32(args->count);
-}
-
/*
* Encode COPY request
*/
@@ -698,7 +584,7 @@ static void nfs4_xdr_enc_copy(struct rpc_rqst *req,
}
/*
- * Encode OFFLOAD_CANEL request
+ * Encode OFFLOAD_CANCEL request
*/
static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req,
struct xdr_stream *xdr,
@@ -717,6 +603,25 @@ static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req,
}
/*
+ * Encode OFFLOAD_STATUS request
+ */
+static void nfs4_xdr_enc_offload_status(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_offload_status_args *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->osa_seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->osa_seq_args, &hdr);
+ encode_putfh(xdr, args->osa_src_fh, &hdr);
+ encode_offload_status(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* Encode COPY_NOTIFY request
*/
static void nfs4_xdr_enc_copy_notify(struct rpc_rqst *req,
@@ -756,6 +661,27 @@ static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req,
}
/*
+ * Encode ZERO_RANGE request
+ */
+static void nfs4_xdr_enc_zero_range(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_falloc_args *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->falloc_fh, &hdr);
+ encode_deallocate(xdr, args, &hdr);
+ encode_allocate(xdr, args, &hdr);
+ encode_getfattr(xdr, args->falloc_bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* Encode READ_PLUS request
*/
static void nfs4_xdr_enc_read_plus(struct rpc_rqst *req,
@@ -772,8 +698,8 @@ static void nfs4_xdr_enc_read_plus(struct rpc_rqst *req,
encode_putfh(xdr, args->fh, &hdr);
encode_read_plus(xdr, args, &hdr);
- rpc_prepare_reply_pages(req, args->pages, args->pgbase,
- args->count, hdr.replen);
+ rpc_prepare_reply_pages(req, args->pages, args->pgbase, args->count,
+ hdr.replen - READ_PLUS_SEGMENT_SIZE_DIFF);
encode_nops(&hdr);
}
@@ -862,6 +788,90 @@ static void nfs4_xdr_enc_layouterror(struct rpc_rqst *req,
encode_nops(&hdr);
}
+/*
+ * Encode SETXATTR request
+ */
+static void nfs4_xdr_enc_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_setxattrargs *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_setxattr(xdr, args, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode GETXATTR request
+ */
+static void nfs4_xdr_enc_getxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_getxattrargs *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+ uint32_t replen;
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ replen = hdr.replen + op_decode_hdr_maxsz + 1;
+ encode_getxattr(xdr, args->xattr_name, &hdr);
+
+ rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->xattr_len,
+ replen);
+
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode LISTXATTR request
+ */
+static void nfs4_xdr_enc_listxattrs(struct rpc_rqst *req,
+ struct xdr_stream *xdr, const void *data)
+{
+ const struct nfs42_listxattrsargs *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+ uint32_t replen;
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ replen = hdr.replen + op_decode_hdr_maxsz + 2 + 1;
+ encode_listxattrs(xdr, args, &hdr);
+
+ rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count, replen);
+
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode REMOVEXATTR request
+ */
+static void nfs4_xdr_enc_removexattr(struct rpc_rqst *req,
+ struct xdr_stream *xdr, const void *data)
+{
+ const struct nfs42_removexattrargs *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_removexattr(xdr, args->xattr_name, &hdr);
+ encode_nops(&hdr);
+}
+
static int decode_allocate(struct xdr_stream *xdr, struct nfs42_falloc_res *res)
{
return decode_op_hdr(xdr, OP_ALLOCATE);
@@ -984,6 +994,26 @@ static int decode_offload_cancel(struct xdr_stream *xdr,
return decode_op_hdr(xdr, OP_OFFLOAD_CANCEL);
}
+static int decode_offload_status(struct xdr_stream *xdr,
+ struct nfs42_offload_status_res *res)
+{
+ ssize_t result;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_OFFLOAD_STATUS);
+ if (status)
+ return status;
+ /* osr_count */
+ if (xdr_stream_decode_u64(xdr, &res->osr_count) < 0)
+ return -EIO;
+ /* osr_complete<1> */
+ result = xdr_stream_decode_uint32_array(xdr, &res->osr_complete, 1);
+ if (result < 0)
+ return -EIO;
+ res->complete_count = result;
+ return 0;
+}
+
static int decode_copy_notify(struct xdr_stream *xdr,
struct nfs42_copy_notify_res *res)
{
@@ -1025,81 +1055,93 @@ static int decode_deallocate(struct xdr_stream *xdr, struct nfs42_falloc_res *re
return decode_op_hdr(xdr, OP_DEALLOCATE);
}
-static int decode_read_plus_data(struct xdr_stream *xdr,
- struct nfs_pgio_args *args,
- struct nfs_pgio_res *res)
-{
- uint32_t count, recvd;
+struct read_plus_segment {
+ enum data_content4 type;
uint64_t offset;
- __be32 *p;
-
- p = xdr_inline_decode(xdr, 8 + 4);
- if (!p)
- return 1;
+ union {
+ struct {
+ uint64_t length;
+ } hole;
+
+ struct {
+ uint32_t length;
+ unsigned int from;
+ } data;
+ };
+};
- p = xdr_decode_hyper(p, &offset);
- count = be32_to_cpup(p);
- recvd = xdr_align_data(xdr, res->count, xdr_align_size(count));
- if (recvd > count)
- recvd = count;
- if (res->count + recvd > args->count) {
- if (args->count > res->count)
- res->count += args->count - res->count;
- return 1;
- }
- res->count += recvd;
- if (count > recvd)
- return 1;
- return 0;
+static inline uint64_t read_plus_segment_length(struct read_plus_segment *seg)
+{
+ return seg->type == NFS4_CONTENT_DATA ? seg->data.length : seg->hole.length;
}
-static int decode_read_plus_hole(struct xdr_stream *xdr,
- struct nfs_pgio_args *args,
- struct nfs_pgio_res *res, uint32_t *eof)
+static int decode_read_plus_segment(struct xdr_stream *xdr,
+ struct read_plus_segment *seg)
{
- uint64_t offset, length, recvd;
__be32 *p;
- p = xdr_inline_decode(xdr, 8 + 8);
+ p = xdr_inline_decode(xdr, 4);
if (!p)
- return 1;
-
- p = xdr_decode_hyper(p, &offset);
- p = xdr_decode_hyper(p, &length);
- if (offset != args->offset + res->count) {
- /* Server returned an out-of-sequence extent */
- if (offset > args->offset + res->count ||
- offset + length < args->offset + res->count) {
- dprintk("NFS: server returned out of sequence extent: "
- "offset/size = %llu/%llu != expected %llu\n",
- (unsigned long long)offset,
- (unsigned long long)length,
- (unsigned long long)(args->offset +
- res->count));
- return 1;
- }
- length -= args->offset + res->count - offset;
- }
- if (length + res->count > args->count) {
- *eof = 0;
- if (unlikely(res->count >= args->count))
- return 1;
- length = args->count - res->count;
- }
- recvd = xdr_expand_hole(xdr, res->count, length);
- res->count += recvd;
+ return -EIO;
+ seg->type = be32_to_cpup(p++);
- if (recvd < length)
- return 1;
+ p = xdr_inline_decode(xdr, seg->type == NFS4_CONTENT_DATA ? 12 : 16);
+ if (!p)
+ return -EIO;
+ p = xdr_decode_hyper(p, &seg->offset);
+
+ if (seg->type == NFS4_CONTENT_DATA) {
+ struct xdr_buf buf;
+ uint32_t len = be32_to_cpup(p);
+
+ seg->data.length = len;
+ seg->data.from = xdr_stream_pos(xdr);
+
+ if (!xdr_stream_subsegment(xdr, &buf, xdr_align_size(len)))
+ return -EIO;
+ } else if (seg->type == NFS4_CONTENT_HOLE) {
+ xdr_decode_hyper(p, &seg->hole.length);
+ } else
+ return -EINVAL;
return 0;
}
+static int process_read_plus_segment(struct xdr_stream *xdr,
+ struct nfs_pgio_args *args,
+ struct nfs_pgio_res *res,
+ struct read_plus_segment *seg)
+{
+ unsigned long offset = seg->offset;
+ unsigned long length = read_plus_segment_length(seg);
+ unsigned int bufpos;
+
+ if (offset + length < args->offset)
+ return 0;
+ else if (offset > args->offset + args->count) {
+ res->eof = 0;
+ return 0;
+ } else if (offset < args->offset) {
+ length -= (args->offset - offset);
+ offset = args->offset;
+ } else if (offset + length > args->offset + args->count) {
+ length = (args->offset + args->count) - offset;
+ res->eof = 0;
+ }
+
+ bufpos = xdr->buf->head[0].iov_len + (offset - args->offset);
+ if (seg->type == NFS4_CONTENT_HOLE)
+ return xdr_stream_zero(xdr, bufpos, length);
+ else
+ return xdr_stream_move_subsegment(xdr, seg->data.from, bufpos, length);
+}
+
static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
{
struct nfs_pgio_header *hdr =
container_of(res, struct nfs_pgio_header, res);
struct nfs_pgio_args *args = &hdr->args;
- uint32_t eof, segments, type;
+ uint32_t segments;
+ struct read_plus_segment *segs;
int status, i;
__be32 *p;
@@ -1112,38 +1154,29 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
return -EIO;
res->count = 0;
- eof = be32_to_cpup(p++);
+ res->eof = be32_to_cpup(p++);
segments = be32_to_cpup(p++);
if (segments == 0)
- goto out;
+ return 0;
- for (i = 0; i < segments; i++) {
- p = xdr_inline_decode(xdr, 4);
- if (!p)
- goto early_out;
-
- type = be32_to_cpup(p++);
- if (type == NFS4_CONTENT_DATA)
- status = decode_read_plus_data(xdr, args, res);
- else if (type == NFS4_CONTENT_HOLE)
- status = decode_read_plus_hole(xdr, args, res, &eof);
- else
- return -EINVAL;
+ segs = kmalloc_array(segments, sizeof(*segs), GFP_KERNEL);
+ if (!segs)
+ return -ENOMEM;
+ for (i = 0; i < segments; i++) {
+ status = decode_read_plus_segment(xdr, &segs[i]);
if (status < 0)
- return status;
- if (status > 0)
- goto early_out;
+ goto out;
}
+ xdr_set_pagelen(xdr, xdr_align_size(args->count));
+ for (i = segments; i > 0; i--)
+ res->count += process_read_plus_segment(xdr, args, res, &segs[i-1]);
+ status = 0;
+
out:
- res->eof = eof;
- return 0;
-early_out:
- if (unlikely(!i))
- return -EIO;
- res->eof = 0;
- return 0;
+ kfree(segs);
+ return status;
}
static int decode_seek(struct xdr_stream *xdr, struct nfs42_seek_res *res)
@@ -1179,6 +1212,168 @@ static int decode_layouterror(struct xdr_stream *xdr)
return decode_op_hdr(xdr, OP_LAYOUTERROR);
}
+static int decode_setxattr(struct xdr_stream *xdr,
+ struct nfs4_change_info *cinfo)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_SETXATTR);
+ if (status)
+ goto out;
+ status = decode_change_info(xdr, cinfo);
+out:
+ return status;
+}
+
+static int decode_getxattr(struct xdr_stream *xdr,
+ struct nfs42_getxattrres *res,
+ struct rpc_rqst *req)
+{
+ int status;
+ __be32 *p;
+ u32 len, rdlen;
+
+ status = decode_op_hdr(xdr, OP_GETXATTR);
+ if (status)
+ return status;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+
+ len = be32_to_cpup(p);
+
+ /*
+ * Only check against the page length here. The actual
+ * requested length may be smaller, but that is only
+ * checked against after possibly caching a valid reply.
+ */
+ if (len > req->rq_rcv_buf.page_len)
+ return -ERANGE;
+
+ res->xattr_len = len;
+
+ if (len > 0) {
+ rdlen = xdr_read_pages(xdr, len);
+ if (rdlen < len)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int decode_removexattr(struct xdr_stream *xdr,
+ struct nfs4_change_info *cinfo)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_REMOVEXATTR);
+ if (status)
+ goto out;
+
+ status = decode_change_info(xdr, cinfo);
+out:
+ return status;
+}
+
+static int decode_listxattrs(struct xdr_stream *xdr,
+ struct nfs42_listxattrsres *res)
+{
+ int status;
+ __be32 *p;
+ u32 count, len, ulen;
+ size_t left, copied;
+ char *buf;
+
+ status = decode_op_hdr(xdr, OP_LISTXATTRS);
+ if (status) {
+ /*
+ * Special case: for LISTXATTRS, NFS4ERR_TOOSMALL
+ * should be translated to ERANGE.
+ */
+ if (status == -ETOOSMALL)
+ status = -ERANGE;
+ /*
+ * Special case: for LISTXATTRS, NFS4ERR_NOXATTR
+ * should be translated to success with zero-length reply.
+ */
+ if (status == -ENODATA) {
+ res->eof = true;
+ status = 0;
+ }
+ goto out;
+ }
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ return -EIO;
+
+ xdr_decode_hyper(p, &res->cookie);
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+
+ left = res->xattr_len;
+ buf = res->xattr_buf;
+
+ count = be32_to_cpup(p);
+ copied = 0;
+
+ /*
+ * We have asked for enough room to encode the maximum number
+ * of possible attribute names, so everything should fit.
+ *
+ * But, don't rely on that assumption. Just decode entries
+ * until they don't fit anymore, just in case the server did
+ * something odd.
+ */
+ while (count--) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+
+ len = be32_to_cpup(p);
+ if (len > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) {
+ status = -ERANGE;
+ goto out;
+ }
+
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return -EIO;
+
+ ulen = len + XATTR_USER_PREFIX_LEN + 1;
+ if (buf) {
+ if (ulen > left) {
+ status = -ERANGE;
+ goto out;
+ }
+
+ memcpy(buf, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
+ memcpy(buf + XATTR_USER_PREFIX_LEN, p, len);
+
+ buf[ulen - 1] = 0;
+ buf += ulen;
+ left -= ulen;
+ }
+ copied += ulen;
+ }
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+
+ res->eof = be32_to_cpup(p);
+ res->copied = copied;
+
+out:
+ if (status == -ERANGE && res->xattr_len == XATTR_LIST_MAX)
+ status = -E2BIG;
+
+ return status;
+}
+
/*
* Decode ALLOCATE request
*/
@@ -1269,6 +1464,32 @@ out:
}
/*
+ * Decode OFFLOAD_STATUS response
+ */
+static int nfs4_xdr_dec_offload_status(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfs42_offload_status_res *res = data;
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->osr_seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_offload_status(xdr, res);
+
+out:
+ return status;
+}
+
+/*
* Decode COPY_NOTIFY response
*/
static int nfs4_xdr_dec_copy_notify(struct rpc_rqst *rqstp,
@@ -1323,6 +1544,37 @@ out:
}
/*
+ * Decode ZERO_RANGE request
+ */
+static int nfs4_xdr_dec_zero_range(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfs42_falloc_res *res = data;
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_deallocate(xdr, res);
+ if (status)
+ goto out;
+ status = decode_allocate(xdr, res);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->falloc_fattr, res->falloc_server);
+out:
+ return status;
+}
+
+/*
* Decode READ_PLUS request
*/
static int nfs4_xdr_dec_read_plus(struct rpc_rqst *rqstp,
@@ -1333,6 +1585,8 @@ static int nfs4_xdr_dec_read_plus(struct rpc_rqst *rqstp,
struct compound_hdr hdr;
int status;
+ xdr_set_scratch_buffer(xdr, res->scratch, READ_PLUS_SCRATCH_SIZE);
+
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
@@ -1434,8 +1688,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
status = decode_clone(xdr);
if (status)
goto out;
- status = decode_getfattr(xdr, res->dst_fattr, res->server);
-
+ decode_getfattr(xdr, res->dst_fattr, res->server);
out:
res->rpc_status = status;
return status;
@@ -1467,22 +1720,9 @@ out:
return status;
}
-#ifdef CONFIG_NFS_V4_2
-static void nfs4_xdr_enc_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
- const void *data)
-{
- const struct nfs42_setxattrargs *args = data;
- struct compound_hdr hdr = {
- .minorversion = nfs4_xdr_minorversion(&args->seq_args),
- };
-
- encode_compound_hdr(xdr, req, &hdr);
- encode_sequence(xdr, &args->seq_args, &hdr);
- encode_putfh(xdr, args->fh, &hdr);
- encode_setxattr(xdr, args, &hdr);
- encode_nops(&hdr);
-}
-
+/*
+ * Decode SETXATTR request
+ */
static int nfs4_xdr_dec_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
void *data)
{
@@ -1499,33 +1739,17 @@ static int nfs4_xdr_dec_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status)
goto out;
-
status = decode_setxattr(xdr, &res->cinfo);
+ if (status)
+ goto out;
+ status = decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
-static void nfs4_xdr_enc_getxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
- const void *data)
-{
- const struct nfs42_getxattrargs *args = data;
- struct compound_hdr hdr = {
- .minorversion = nfs4_xdr_minorversion(&args->seq_args),
- };
- uint32_t replen;
-
- encode_compound_hdr(xdr, req, &hdr);
- encode_sequence(xdr, &args->seq_args, &hdr);
- encode_putfh(xdr, args->fh, &hdr);
- replen = hdr.replen + op_decode_hdr_maxsz + 1;
- encode_getxattr(xdr, args->xattr_name, &hdr);
-
- rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->xattr_len,
- replen);
-
- encode_nops(&hdr);
-}
-
+/*
+ * Decode GETXATTR request
+ */
static int nfs4_xdr_dec_getxattr(struct rpc_rqst *rqstp,
struct xdr_stream *xdr, void *data)
{
@@ -1547,26 +1771,9 @@ out:
return status;
}
-static void nfs4_xdr_enc_listxattrs(struct rpc_rqst *req,
- struct xdr_stream *xdr, const void *data)
-{
- const struct nfs42_listxattrsargs *args = data;
- struct compound_hdr hdr = {
- .minorversion = nfs4_xdr_minorversion(&args->seq_args),
- };
- uint32_t replen;
-
- encode_compound_hdr(xdr, req, &hdr);
- encode_sequence(xdr, &args->seq_args, &hdr);
- encode_putfh(xdr, args->fh, &hdr);
- replen = hdr.replen + op_decode_hdr_maxsz + 2 + 1;
- encode_listxattrs(xdr, args, &hdr);
-
- rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count, replen);
-
- encode_nops(&hdr);
-}
-
+/*
+ * Decode LISTXATTR request
+ */
static int nfs4_xdr_dec_listxattrs(struct rpc_rqst *rqstp,
struct xdr_stream *xdr, void *data)
{
@@ -1574,7 +1781,7 @@ static int nfs4_xdr_dec_listxattrs(struct rpc_rqst *rqstp,
struct compound_hdr hdr;
int status;
- xdr_set_scratch_page(xdr, res->scratch);
+ xdr_set_scratch_folio(xdr, res->scratch);
status = decode_compound_hdr(xdr, &hdr);
if (status)
@@ -1590,21 +1797,9 @@ out:
return status;
}
-static void nfs4_xdr_enc_removexattr(struct rpc_rqst *req,
- struct xdr_stream *xdr, const void *data)
-{
- const struct nfs42_removexattrargs *args = data;
- struct compound_hdr hdr = {
- .minorversion = nfs4_xdr_minorversion(&args->seq_args),
- };
-
- encode_compound_hdr(xdr, req, &hdr);
- encode_sequence(xdr, &args->seq_args, &hdr);
- encode_putfh(xdr, args->fh, &hdr);
- encode_removexattr(xdr, args->xattr_name, &hdr);
- encode_nops(&hdr);
-}
-
+/*
+ * Decode REMOVEXATTR request
+ */
static int nfs4_xdr_dec_removexattr(struct rpc_rqst *req,
struct xdr_stream *xdr, void *data)
{
@@ -1626,5 +1821,4 @@ static int nfs4_xdr_dec_removexattr(struct rpc_rqst *req,
out:
return status;
}
-#endif
#endif /* __LINUX_FS_NFS_NFS4_2XDR_H */
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index ed5eaca6801e..c34c89af9c7d 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -23,6 +23,7 @@
#define NFS4_MAX_LOOP_ON_RECOVER (10)
#include <linux/seqlock.h>
+#include <linux/filelock.h>
struct idmap;
@@ -42,6 +43,7 @@ enum nfs4_client_state {
NFS4CLNT_LEASE_MOVED,
NFS4CLNT_DELEGATION_EXPIRED,
NFS4CLNT_RUN_MANAGER,
+ NFS4CLNT_MANAGER_AVAILABLE,
NFS4CLNT_RECALL_RUNNING,
NFS4CLNT_RECALL_ANY_LAYOUT_READ,
NFS4CLNT_RECALL_ANY_LAYOUT_RW,
@@ -61,11 +63,11 @@ struct nfs4_minor_version_ops {
bool (*match_stateid)(const nfs4_stateid *,
const nfs4_stateid *);
int (*find_root_sec)(struct nfs_server *, struct nfs_fh *,
- struct nfs_fsinfo *);
+ struct nfs_fattr *);
void (*free_lock_state)(struct nfs_server *,
struct nfs4_lock_state *);
int (*test_and_free_expired)(struct nfs_server *,
- nfs4_stateid *, const struct cred *);
+ nfs4_stateid *, const struct cred *);
struct nfs_seqid *
(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
void (*session_trunk)(struct rpc_clnt *clnt,
@@ -80,7 +82,7 @@ struct nfs4_minor_version_ops {
#define NFS_SEQID_CONFIRMED 1
struct nfs_seqid_counter {
ktime_t create_time;
- int owner_id;
+ u64 owner_id;
int flags;
u32 counter;
spinlock_t lock; /* Protects the list */
@@ -118,7 +120,6 @@ struct nfs4_state_owner {
unsigned long so_flags;
struct list_head so_states;
struct nfs_seqid_counter so_seqid;
- seqcount_spinlock_t so_reclaim_seqcount;
struct mutex so_delegreturn_mutex;
};
@@ -148,6 +149,7 @@ struct nfs4_lock_state {
struct nfs4_state * ls_state; /* Pointer to open state */
#define NFS_LOCK_INITIALIZED 0
#define NFS_LOCK_LOST 1
+#define NFS_LOCK_UNLOCKING 2
unsigned long ls_flags;
struct nfs_seqid_counter ls_seqid;
nfs4_stateid ls_stateid;
@@ -206,6 +208,7 @@ struct nfs4_exception {
struct inode *inode;
nfs4_stateid *stateid;
long timeout;
+ unsigned short retrans;
unsigned char task_is_privileged : 1;
unsigned char delay : 1,
recovering : 1,
@@ -260,8 +263,8 @@ struct nfs4_state_maintenance_ops {
};
struct nfs4_mig_recovery_ops {
- int (*get_locations)(struct inode *, struct nfs4_fs_locations *,
- struct page *, const struct cred *);
+ int (*get_locations)(struct nfs_server *, struct nfs_fh *,
+ struct nfs4_fs_locations *, struct page *, const struct cred *);
int (*fsid_present)(struct inode *, const struct cred *);
};
@@ -280,7 +283,8 @@ struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *,
int nfs4_submount(struct fs_context *, struct nfs_server *);
int nfs4_replace_transport(struct nfs_server *server,
const struct nfs4_fs_locations *locations);
-
+size_t nfs_parse_server_name(char *string, size_t len, struct sockaddr_storage *ss,
+ size_t salen, struct net *net, int port);
/* nfs4proc.c */
extern int nfs4_handle_exception(struct nfs_server *, int, struct nfs4_exception *);
extern int nfs4_async_handle_error(struct rpc_task *task,
@@ -292,7 +296,8 @@ extern int nfs4_call_sync(struct rpc_clnt *, struct nfs_server *,
extern void nfs4_init_sequence(struct nfs4_sequence_args *, struct nfs4_sequence_res *, int, int);
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, const struct cred *, struct nfs4_setclientid_res *);
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, const struct cred *);
-extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *, bool);
+extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *,
+ struct nfs_fattr *, bool);
extern int nfs4_proc_bind_conn_to_session(struct nfs_client *, const struct cred *cred);
extern int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred);
extern int nfs4_destroy_clientid(struct nfs_client *clp);
@@ -302,27 +307,30 @@ extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait);
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struct qstr *,
struct nfs4_fs_locations *, struct page *);
-extern int nfs4_proc_get_locations(struct inode *, struct nfs4_fs_locations *,
- struct page *page, const struct cred *);
+extern int nfs4_proc_get_locations(struct nfs_server *, struct nfs_fh *,
+ struct nfs4_fs_locations *,
+ struct page *page, const struct cred *);
extern int nfs4_proc_fsid_present(struct inode *, const struct cred *);
extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *,
struct dentry *,
struct nfs_fh *,
struct nfs_fattr *);
extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
-extern const struct xattr_handler *nfs4_xattr_handlers[];
+extern const struct xattr_handler * const nfs4_xattr_handlers[];
extern int nfs4_set_rw_stateid(nfs4_stateid *stateid,
const struct nfs_open_context *ctx,
const struct nfs_lock_context *l_ctx,
fmode_t fmode);
+extern void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[],
+ struct inode *inode, unsigned long cache_validity);
extern int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fattr *fattr, struct inode *inode);
extern int update_open_stateid(struct nfs4_state *state,
const nfs4_stateid *open_stateid,
const nfs4_stateid *deleg_stateid,
fmode_t fmode);
-extern int nfs4_proc_setlease(struct file *file, long arg,
- struct file_lock **lease, void **priv);
+extern int nfs4_proc_setlease(struct file *file, int arg,
+ struct file_lease **lease, void **priv);
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
struct nfs_fsinfo *fsinfo);
extern void nfs4_update_changeattr(struct inode *dir,
@@ -454,7 +462,6 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *);
/* nfs4renewd.c */
extern void nfs4_schedule_state_renewal(struct nfs_client *);
-extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
extern void nfs4_kill_renewd(struct nfs_client *);
extern void nfs4_renew_state(struct work_struct *);
extern void nfs4_set_lease_period(struct nfs_client *clp, unsigned long lease);
@@ -540,6 +547,7 @@ extern unsigned short max_session_slots;
extern unsigned short max_session_cb_slots;
extern unsigned short send_implementation_id;
extern bool recover_lost_locks;
+extern short nfs_delay_retrans;
#define NFS4_CLIENT_ID_UNIQ_LEN (64)
extern char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN];
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index d8b5a250ca05..3a4baed993c9 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -11,6 +11,7 @@
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/bc_xprt.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <net/handshake.h>
#include "internal.h"
#include "callback.h"
#include "delegation.h"
@@ -18,6 +19,7 @@
#include "nfs4idmap.h"
#include "pnfs.h"
#include "netns.h"
+#include "sysfs.h"
#define NFSDBG_FACILITY NFSDBG_CLIENT
@@ -221,6 +223,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
clp->cl_mig_gen = 1;
+ clp->cl_last_renewal = jiffies;
#if IS_ENABLED(CONFIG_NFS_V4_1)
init_waitqueue_head(&clp->cl_lock_waitq);
#endif
@@ -230,7 +233,10 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
__set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
-
+ if (test_bit(NFS_CS_PNFS, &cl_init->init_flags))
+ __set_bit(NFS_CS_PNFS, &clp->cl_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &cl_init->init_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags);
/*
* Set up the connection to the server before we add add to the
* global list.
@@ -254,7 +260,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
goto error;
ip_addr = (const char *)buf;
}
- strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
+ strscpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
err = nfs_idmap_new(clp);
if (err < 0) {
@@ -346,6 +352,7 @@ int nfs40_init_client(struct nfs_client *clp)
ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE,
"NFSv4.0 transport Slot table");
if (ret) {
+ nfs4_shutdown_slot_table(tbl);
kfree(tbl);
return ret;
}
@@ -413,6 +420,8 @@ static void nfs4_add_trunk(struct nfs_client *clp, struct nfs_client *old)
.net = old->cl_net,
.servername = old->cl_hostname,
};
+ int max_connect = test_bit(NFS_CS_PNFS, &clp->cl_flags) ?
+ clp->cl_max_connect : old->cl_max_connect;
if (clp->cl_proto != old->cl_proto)
return;
@@ -426,7 +435,7 @@ static void nfs4_add_trunk(struct nfs_client *clp, struct nfs_client *old)
xprt_args.addrlen = clp_salen;
rpc_clnt_add_xprt(old->cl_rpcclient, &xprt_args,
- rpc_clnt_test_and_add_xprt, NULL);
+ rpc_clnt_test_and_add_xprt, &max_connect);
}
/**
@@ -795,6 +804,7 @@ static void nfs4_destroy_server(struct nfs_server *server)
unset_pnfs_layoutdriver(server);
nfs4_purge_state_owners(server, &freeme);
nfs4_free_state_owners(&freeme);
+ kfree(server->delegation_hash_table);
}
/*
@@ -888,46 +898,40 @@ nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
* Set up an NFS4 client
*/
static int nfs4_set_client(struct nfs_server *server,
- const char *hostname,
- const struct sockaddr *addr,
- const size_t addrlen,
- const char *ip_addr,
- int proto, const struct rpc_timeout *timeparms,
- u32 minorversion, unsigned int nconnect,
- unsigned int max_connect,
- struct net *net)
+ struct nfs_client_initdata *cl_init)
{
- struct nfs_client_initdata cl_init = {
- .hostname = hostname,
- .addr = addr,
- .addrlen = addrlen,
- .ip_addr = ip_addr,
- .nfs_mod = &nfs_v4,
- .proto = proto,
- .minorversion = minorversion,
- .net = net,
- .timeparms = timeparms,
- .cred = server->cred,
- };
struct nfs_client *clp;
- if (minorversion == 0)
- __set_bit(NFS_CS_REUSEPORT, &cl_init.init_flags);
- else
- cl_init.max_connect = max_connect;
- if (proto == XPRT_TRANSPORT_TCP)
- cl_init.nconnect = nconnect;
+ cl_init->nfs_mod = &nfs_v4;
+ cl_init->cred = server->cred;
+
+ if (cl_init->minorversion == 0) {
+ __set_bit(NFS_CS_REUSEPORT, &cl_init->init_flags);
+ cl_init->max_connect = 0;
+ }
+
+ switch (cl_init->proto) {
+ case XPRT_TRANSPORT_RDMA:
+ case XPRT_TRANSPORT_TCP:
+ case XPRT_TRANSPORT_TCP_TLS:
+ break;
+ default:
+ cl_init->nconnect = 0;
+ }
if (server->flags & NFS_MOUNT_NORESVPORT)
- __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ __set_bit(NFS_CS_NORESVPORT, &cl_init->init_flags);
if (server->options & NFS_OPTION_MIGRATION)
- __set_bit(NFS_CS_MIGRATION, &cl_init.init_flags);
+ __set_bit(NFS_CS_MIGRATION, &cl_init->init_flags);
if (test_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status))
- __set_bit(NFS_CS_TSM_POSSIBLE, &cl_init.init_flags);
- server->port = rpc_get_port(addr);
+ __set_bit(NFS_CS_TSM_POSSIBLE, &cl_init->init_flags);
+ server->port = rpc_get_port((struct sockaddr *)cl_init->addr);
+
+ if (server->flags & NFS_MOUNT_NETUNREACH_FATAL)
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init->init_flags);
/* Allocate or find a client reference we can use */
- clp = nfs_get_client(&cl_init);
+ clp = nfs_get_client(cl_init);
if (IS_ERR(clp))
return PTR_ERR(clp);
@@ -946,6 +950,9 @@ static int nfs4_set_client(struct nfs_server *server,
set_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state);
server->nfs_client = clp;
+ nfs_sysfs_add_server(server);
+ nfs_sysfs_link_rpc_client(server, clp->cl_rpcclient, "_state");
+
return 0;
}
@@ -960,7 +967,7 @@ static int nfs4_set_client(struct nfs_server *server,
* the MDS.
*/
struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
- const struct sockaddr *ds_addr, int ds_addrlen,
+ const struct sockaddr_storage *ds_addr, int ds_addrlen,
int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans,
u32 minor_version)
{
@@ -977,21 +984,40 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
.net = mds_clp->cl_net,
.timeparms = &ds_timeout,
.cred = mds_srv->cred,
+ .xprtsec = {
+ .policy = RPC_XPRTSEC_NONE,
+ .cert_serial = TLS_NO_CERT,
+ .privkey_serial = TLS_NO_PRIVKEY,
+ },
};
char buf[INET6_ADDRSTRLEN + 1];
- if (rpc_ntop(ds_addr, buf, sizeof(buf)) <= 0)
+ if (rpc_ntop((struct sockaddr *)ds_addr, buf, sizeof(buf)) <= 0)
return ERR_PTR(-EINVAL);
cl_init.hostname = buf;
- if (mds_clp->cl_nconnect > 1 && ds_proto == XPRT_TRANSPORT_TCP) {
- cl_init.nconnect = mds_clp->cl_nconnect;
- cl_init.max_connect = NFS_MAX_TRANSPORTS;
+ switch (ds_proto) {
+ case XPRT_TRANSPORT_TCP_TLS:
+ if (mds_srv->nfs_client->cl_xprtsec.policy != RPC_XPRTSEC_NONE)
+ cl_init.xprtsec = mds_srv->nfs_client->cl_xprtsec;
+ else
+ ds_proto = XPRT_TRANSPORT_TCP;
+ fallthrough;
+ case XPRT_TRANSPORT_RDMA:
+ case XPRT_TRANSPORT_TCP:
+ if (mds_clp->cl_nconnect > 1) {
+ cl_init.nconnect = mds_clp->cl_nconnect;
+ cl_init.max_connect = NFS_MAX_TRANSPORTS;
+ }
}
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
+ __set_bit(NFS_CS_PNFS, &cl_init.init_flags);
+ cl_init.max_connect = NFS_MAX_TRANSPORTS;
/*
* Set an authflavor equual to the MDS value. Use the MDS nfs_client
* cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
@@ -1059,29 +1085,15 @@ static void nfs4_session_limit_xasize(struct nfs_server *server)
#endif
}
-void nfs4_server_set_init_caps(struct nfs_server *server)
-{
- /* Set the basic capabilities */
- server->caps |= server->nfs_client->cl_mvops->init_caps;
- if (server->flags & NFS_MOUNT_NORDIRPLUS)
- server->caps &= ~NFS_CAP_READDIRPLUS;
- if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA)
- server->caps &= ~NFS_CAP_READ_PLUS;
-
- /*
- * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
- * authentication.
- */
- if (nfs4_disable_idmapping &&
- server->client->cl_auth->au_flavor == RPC_AUTH_UNIX)
- server->caps |= NFS_CAP_UIDGID_NOMAP;
-}
-
static int nfs4_server_common_setup(struct nfs_server *server,
struct nfs_fh *mntfh, bool auth_probe)
{
int error;
+ error = nfs4_delegation_hash_alloc(server);
+ if (error)
+ return error;
+
/* data servers support only a subset of NFSv4.1 */
if (is_ds_only_client(server->nfs_client))
return -EPROTONOSUPPORT;
@@ -1089,14 +1101,14 @@ static int nfs4_server_common_setup(struct nfs_server *server,
/* We must ensure the session is initialised first */
error = nfs4_init_session(server->nfs_client);
if (error < 0)
- goto out;
+ return error;
- nfs4_server_set_init_caps(server);
+ nfs_server_set_init_caps(server);
/* Probe the root fh to retrieve its FSID and filehandle */
error = nfs4_get_rootfh(server, mntfh, auth_probe);
if (error < 0)
- goto out;
+ return error;
dprintk("Server FSID: %llx:%llx\n",
(unsigned long long) server->fsid.major,
@@ -1105,7 +1117,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
error = nfs_probe_server(server, mntfh);
if (error < 0)
- goto out;
+ return error;
nfs4_session_limit_rwsize(server);
nfs4_session_limit_xasize(server);
@@ -1116,8 +1128,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
nfs_server_insert_lists(server);
server->mount_time = jiffies;
server->destroy = nfs4_destroy_server;
-out:
- return error;
+ return 0;
}
/*
@@ -1127,6 +1138,19 @@ static int nfs4_init_server(struct nfs_server *server, struct fs_context *fc)
{
struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct rpc_timeout timeparms;
+ struct nfs_client_initdata cl_init = {
+ .hostname = ctx->nfs_server.hostname,
+ .addr = &ctx->nfs_server._address,
+ .addrlen = ctx->nfs_server.addrlen,
+ .ip_addr = ctx->client_address,
+ .proto = ctx->nfs_server.protocol,
+ .minorversion = ctx->minorversion,
+ .net = fc->net_ns,
+ .timeparms = &timeparms,
+ .xprtsec = ctx->xprtsec,
+ .nconnect = ctx->nfs_server.nconnect,
+ .max_connect = ctx->nfs_server.max_connect,
+ };
int error;
nfs_init_timeout_values(&timeparms, ctx->nfs_server.protocol,
@@ -1146,24 +1170,14 @@ static int nfs4_init_server(struct nfs_server *server, struct fs_context *fc)
ctx->selected_flavor = RPC_AUTH_UNIX;
/* Get a client record */
- error = nfs4_set_client(server,
- ctx->nfs_server.hostname,
- &ctx->nfs_server.address,
- ctx->nfs_server.addrlen,
- ctx->client_address,
- ctx->nfs_server.protocol,
- &timeparms,
- ctx->minorversion,
- ctx->nfs_server.nconnect,
- ctx->nfs_server.max_connect,
- fc->net_ns);
+ error = nfs4_set_client(server, &cl_init);
if (error < 0)
return error;
if (ctx->rsize)
- server->rsize = nfs_block_size(ctx->rsize, NULL);
+ server->rsize = nfs_io_size(ctx->rsize, server->nfs_client->cl_proto);
if (ctx->wsize)
- server->wsize = nfs_block_size(ctx->wsize, NULL);
+ server->wsize = nfs_io_size(ctx->wsize, server->nfs_client->cl_proto);
server->acregmin = ctx->acregmin * HZ;
server->acregmax = ctx->acregmax * HZ;
@@ -1216,8 +1230,21 @@ error:
struct nfs_server *nfs4_create_referral_server(struct fs_context *fc)
{
struct nfs_fs_context *ctx = nfs_fc2context(fc);
- struct nfs_client *parent_client;
- struct nfs_server *server, *parent_server;
+ struct nfs_server *parent_server = NFS_SB(ctx->clone_data.sb);
+ struct nfs_client *parent_client = parent_server->nfs_client;
+ struct nfs_client_initdata cl_init = {
+ .hostname = ctx->nfs_server.hostname,
+ .addr = &ctx->nfs_server._address,
+ .addrlen = ctx->nfs_server.addrlen,
+ .ip_addr = parent_client->cl_ipaddr,
+ .minorversion = parent_client->cl_mvops->minor_version,
+ .net = parent_client->cl_net,
+ .timeparms = parent_server->client->cl_timeout,
+ .xprtsec = parent_client->cl_xprtsec,
+ .nconnect = parent_client->cl_nconnect,
+ .max_connect = parent_client->cl_max_connect,
+ };
+ struct nfs_server *server;
bool auth_probe;
int error;
@@ -1225,9 +1252,6 @@ struct nfs_server *nfs4_create_referral_server(struct fs_context *fc)
if (!server)
return ERR_PTR(-ENOMEM);
- parent_server = NFS_SB(ctx->clone_data.sb);
- parent_client = parent_server->nfs_client;
-
server->cred = get_cred(parent_server->cred);
/* Initialise the client representation from the parent server */
@@ -1236,33 +1260,17 @@ struct nfs_server *nfs4_create_referral_server(struct fs_context *fc)
/* Get a client representation */
#if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
rpc_set_port(&ctx->nfs_server.address, NFS_RDMA_PORT);
- error = nfs4_set_client(server,
- ctx->nfs_server.hostname,
- &ctx->nfs_server.address,
- ctx->nfs_server.addrlen,
- parent_client->cl_ipaddr,
- XPRT_TRANSPORT_RDMA,
- parent_server->client->cl_timeout,
- parent_client->cl_mvops->minor_version,
- parent_client->cl_nconnect,
- parent_client->cl_max_connect,
- parent_client->cl_net);
+ cl_init.proto = XPRT_TRANSPORT_RDMA;
+ error = nfs4_set_client(server, &cl_init);
if (!error)
goto init_server;
#endif /* IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) */
+ cl_init.proto = XPRT_TRANSPORT_TCP;
+ if (parent_client->cl_xprtsec.policy != RPC_XPRTSEC_NONE)
+ cl_init.proto = XPRT_TRANSPORT_TCP_TLS;
rpc_set_port(&ctx->nfs_server.address, NFS_PORT);
- error = nfs4_set_client(server,
- ctx->nfs_server.hostname,
- &ctx->nfs_server.address,
- ctx->nfs_server.addrlen,
- parent_client->cl_ipaddr,
- XPRT_TRANSPORT_TCP,
- parent_server->client->cl_timeout,
- parent_client->cl_mvops->minor_version,
- parent_client->cl_nconnect,
- parent_client->cl_max_connect,
- parent_client->cl_net);
+ error = nfs4_set_client(server, &cl_init);
if (error < 0)
goto error;
@@ -1303,20 +1311,34 @@ error:
* Returns zero on success, or a negative errno value.
*/
int nfs4_update_server(struct nfs_server *server, const char *hostname,
- struct sockaddr *sap, size_t salen, struct net *net)
+ struct sockaddr_storage *sap, size_t salen, struct net *net)
{
struct nfs_client *clp = server->nfs_client;
struct rpc_clnt *clnt = server->client;
struct xprt_create xargs = {
.ident = clp->cl_proto,
.net = net,
- .dstaddr = sap,
+ .dstaddr = (struct sockaddr *)sap,
.addrlen = salen,
.servername = hostname,
+ /* cel: bleh. We might need to pass TLS parameters here */
};
char buf[INET6_ADDRSTRLEN + 1];
struct sockaddr_storage address;
struct sockaddr *localaddr = (struct sockaddr *)&address;
+ struct nfs_client_initdata cl_init = {
+ .hostname = hostname,
+ .addr = sap,
+ .addrlen = salen,
+ .ip_addr = buf,
+ .proto = clp->cl_proto,
+ .minorversion = clp->cl_minorversion,
+ .net = net,
+ .timeparms = clnt->cl_timeout,
+ .xprtsec = clp->cl_xprtsec,
+ .nconnect = clp->cl_nconnect,
+ .max_connect = clp->cl_max_connect,
+ };
int error;
error = rpc_switch_client_transport(clnt, &xargs, clnt->cl_timeout);
@@ -1332,10 +1354,7 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
nfs_server_remove_lists(server);
set_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
- error = nfs4_set_client(server, hostname, sap, salen, buf,
- clp->cl_proto, clnt->cl_timeout,
- clp->cl_minorversion,
- clp->cl_nconnect, clp->cl_max_connect, net);
+ error = nfs4_set_client(server, &cl_init);
clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
if (error != 0) {
nfs_server_insert_lists(server);
@@ -1343,8 +1362,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
}
nfs_put_client(clp);
- if (server->nfs_client->cl_hostname == NULL)
+ if (server->nfs_client->cl_hostname == NULL) {
server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL);
+ if (server->nfs_client->cl_hostname == NULL)
+ return -ENOMEM;
+ }
nfs_server_insert_lists(server);
return nfs_probe_server(server, NFS_FH(d_inode(server->super->s_root)));
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index e79ae4cbc395..7317f26892c5 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -10,6 +10,7 @@
#include <linux/mount.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_ssc.h>
+#include <linux/splice.h>
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
@@ -50,16 +51,14 @@ nfs4_file_open(struct inode *inode, struct file *filp)
if (err)
return err;
- if ((openflags & O_ACCMODE) == 3)
- return nfs_open(inode, filp);
-
/* We can't create new files here */
openflags &= ~(O_CREAT|O_EXCL);
parent = dget_parent(dentry);
dir = d_inode(parent);
- ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
+ ctx = alloc_nfs_open_context(file_dentry(filp),
+ flags_to_mode(openflags), filp);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
goto out;
@@ -91,6 +90,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
nfs_file_set_open_context(filp, ctx);
nfs_fscache_open_file(inode, filp);
err = 0;
+ filp->f_mode |= FMODE_CAN_ODIRECT;
out_put_ctx:
put_nfs_open_context(ctx);
@@ -165,7 +165,7 @@ retry:
if (sync)
return -EOPNOTSUPP;
cn_resp = kzalloc(sizeof(struct nfs42_copy_notify_res),
- GFP_NOFS);
+ GFP_KERNEL);
if (unlikely(cn_resp == NULL))
return -ENOMEM;
@@ -180,8 +180,8 @@ retry:
ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count,
nss, cnrs, sync);
out:
- if (!nfs42_files_from_same_server(file_in, file_out))
- kfree(cn_resp);
+ kfree(cn_resp);
+
if (ret == -EAGAIN)
goto retry;
return ret;
@@ -196,8 +196,8 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
ret = __nfs4_copy_file_range(file_in, pos_in, file_out, pos_out, count,
flags);
if (ret == -EOPNOTSUPP || ret == -EXDEV)
- ret = generic_copy_file_range(file_in, pos_in, file_out,
- pos_out, count, flags);
+ ret = splice_copy_file_range(file_in, pos_in, file_out,
+ pos_out, count);
return ret;
}
@@ -225,8 +225,14 @@ static long nfs42_fallocate(struct file *filep, int mode, loff_t offset, loff_t
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
- if ((mode != 0) && (mode != (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)))
+ switch (mode) {
+ case 0:
+ case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
+ case FALLOC_FL_ZERO_RANGE:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
ret = inode_newsize_ok(inode, offset + len);
if (ret < 0)
@@ -234,6 +240,8 @@ static long nfs42_fallocate(struct file *filep, int mode, loff_t offset, loff_t
if (mode & FALLOC_FL_PUNCH_HOLE)
return nfs42_proc_deallocate(filep, offset, len);
+ else if (mode & FALLOC_FL_ZERO_RANGE)
+ return nfs42_proc_zero_range(filep, offset ,len);
return nfs42_proc_allocate(filep, offset, len);
}
@@ -245,7 +253,6 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
struct nfs_server *server = NFS_SERVER(dst_inode);
struct inode *src_inode = file_inode(src_file);
unsigned int bs = server->clone_blksize;
- bool same_inode = false;
int ret;
/* NFS does not support deduplication. */
@@ -267,25 +274,15 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
goto out;
}
- if (src_inode == dst_inode)
- same_inode = true;
-
/* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */
- if (same_inode) {
- inode_lock(src_inode);
- } else if (dst_inode < src_inode) {
- inode_lock_nested(dst_inode, I_MUTEX_PARENT);
- inode_lock_nested(src_inode, I_MUTEX_CHILD);
- } else {
- inode_lock_nested(src_inode, I_MUTEX_PARENT);
- inode_lock_nested(dst_inode, I_MUTEX_CHILD);
- }
-
+ lock_two_nondirectories(src_inode, dst_inode);
/* flush all pending writes on both src and dst so that server
* has the latest data */
+ nfs_file_block_o_direct(NFS_I(src_inode));
ret = nfs_sync_inode(src_inode);
if (ret)
goto out_unlock;
+ nfs_file_block_o_direct(NFS_I(dst_inode));
ret = nfs_sync_inode(dst_inode);
if (ret)
goto out_unlock;
@@ -298,15 +295,7 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1);
out_unlock:
- if (same_inode) {
- inode_unlock(src_inode);
- } else if (dst_inode < src_inode) {
- inode_unlock(src_inode);
- inode_unlock(dst_inode);
- } else {
- inode_unlock(dst_inode);
- inode_unlock(src_inode);
- }
+ unlock_two_nondirectories(src_inode, dst_inode);
out:
return ret < 0 ? ret : count;
}
@@ -326,7 +315,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
char *read_name = NULL;
int len, status = 0;
- server = NFS_SERVER(ss_mnt->mnt_root->d_inode);
+ server = NFS_SB(ss_mnt->mnt_sb);
if (!fattr)
return ERR_PTR(-ENOMEM);
@@ -337,14 +326,19 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
goto out;
}
+ if (!S_ISREG(fattr->mode)) {
+ res = ERR_PTR(-EBADF);
+ goto out;
+ }
+
res = ERR_PTR(-ENOMEM);
len = strlen(SSC_READ_NAME_BODY) + 16;
- read_name = kzalloc(len, GFP_NOFS);
+ read_name = kzalloc(len, GFP_KERNEL);
if (read_name == NULL)
goto out;
snprintf(read_name, len, SSC_READ_NAME_BODY, read_name_gen++);
- r_ino = nfs_fhget(ss_mnt->mnt_root->d_inode->i_sb, src_fh, fattr);
+ r_ino = nfs_fhget(ss_mnt->mnt_sb, src_fh, fattr);
if (IS_ERR(r_ino)) {
res = ERR_CAST(r_ino);
goto out_free_name;
@@ -354,11 +348,12 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
r_ino->i_fop);
if (IS_ERR(filep)) {
res = ERR_CAST(filep);
+ iput(r_ino);
goto out_free_name;
}
- ctx = alloc_nfs_open_context(filep->f_path.dentry, filep->f_mode,
- filep);
+ ctx = alloc_nfs_open_context(filep->f_path.dentry,
+ flags_to_mode(filep->f_flags), filep);
if (IS_ERR(ctx)) {
res = ERR_CAST(ctx);
goto out_filep;
@@ -433,23 +428,25 @@ void nfs42_ssc_unregister_ops(void)
}
#endif /* CONFIG_NFS_V4_2 */
-static int nfs4_setlease(struct file *file, long arg, struct file_lock **lease,
+static int nfs4_setlease(struct file *file, int arg, struct file_lease **lease,
void **priv)
{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return -EINVAL;
return nfs4_proc_setlease(file, arg, lease, priv);
}
const struct file_operations nfs4_file_operations = {
.read_iter = nfs_file_read,
.write_iter = nfs_file_write,
- .mmap = nfs_file_mmap,
+ .mmap_prepare = nfs_file_mmap_prepare,
.open = nfs4_file_open,
.flush = nfs4_file_flush,
.release = nfs_file_release,
.fsync = nfs_file_fsync,
.lock = nfs_lock,
.flock = nfs_flock,
- .splice_read = generic_file_splice_read,
+ .splice_read = nfs_file_splice_read,
.splice_write = iter_file_splice_write,
.check_flags = nfs_check_flags,
.setlease = nfs4_setlease,
@@ -461,4 +458,5 @@ const struct file_operations nfs4_file_operations = {
#else
.llseek = nfs_file_llseek,
#endif
+ .fop_flags = FOP_DONTCACHE,
};
diff --git a/fs/nfs/nfs4getroot.c b/fs/nfs/nfs4getroot.c
index 1a69479a3a59..e67ea345de69 100644
--- a/fs/nfs/nfs4getroot.c
+++ b/fs/nfs/nfs4getroot.c
@@ -12,30 +12,28 @@
int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh, bool auth_probe)
{
- struct nfs_fsinfo fsinfo;
+ struct nfs_fattr *fattr = nfs_alloc_fattr();
int ret = -ENOMEM;
- fsinfo.fattr = nfs_alloc_fattr();
- if (fsinfo.fattr == NULL)
+ if (fattr == NULL)
goto out;
/* Start by getting the root filehandle from the server */
- ret = nfs4_proc_get_rootfh(server, mntfh, &fsinfo, auth_probe);
+ ret = nfs4_proc_get_rootfh(server, mntfh, fattr, auth_probe);
if (ret < 0) {
dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
goto out;
}
- if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_TYPE)
- || !S_ISDIR(fsinfo.fattr->mode)) {
+ if (!(fattr->valid & NFS_ATTR_FATTR_TYPE) || !S_ISDIR(fattr->mode)) {
printk(KERN_ERR "nfs4_get_rootfh:"
" getroot encountered non-directory\n");
ret = -ENOTDIR;
goto out;
}
- memcpy(&server->fsid, &fsinfo.fattr->fsid, sizeof(server->fsid));
+ memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
out:
- nfs_free_fattr(fsinfo.fattr);
+ nfs_free_fattr(fattr);
return ret;
}
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index f331866dd418..9e1c48c5c0b8 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -203,7 +203,7 @@ int nfs_idmap_init(void)
printk(KERN_NOTICE "NFS: Registering the %s key type\n",
key_type_id_resolver.name);
- cred = prepare_kernel_cred(NULL);
+ cred = prepare_kernel_cred(&init_task);
if (!cred)
return -ENOMEM;
@@ -306,15 +306,12 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
const char *type, void *data,
size_t data_size, struct idmap *idmap)
{
- const struct cred *saved_cred;
struct key *rkey;
const struct user_key_payload *payload;
ssize_t ret;
- saved_cred = override_creds(id_resolver_cache);
- rkey = nfs_idmap_request_key(name, namelen, type, idmap);
- revert_creds(saved_cred);
-
+ scoped_with_creds(id_resolver_cache)
+ rkey = nfs_idmap_request_key(name, namelen, type, idmap);
if (IS_ERR(rkey)) {
ret = PTR_ERR(rkey);
goto out;
@@ -424,26 +421,16 @@ static void nfs_idmap_pipe_destroy(struct dentry *dir,
struct rpc_pipe_dir_object *pdo)
{
struct idmap *idmap = pdo->pdo_data;
- struct rpc_pipe *pipe = idmap->idmap_pipe;
- if (pipe->dentry) {
- rpc_unlink(pipe->dentry);
- pipe->dentry = NULL;
- }
+ rpc_unlink(idmap->idmap_pipe);
}
static int nfs_idmap_pipe_create(struct dentry *dir,
struct rpc_pipe_dir_object *pdo)
{
struct idmap *idmap = pdo->pdo_data;
- struct rpc_pipe *pipe = idmap->idmap_pipe;
- struct dentry *dentry;
- dentry = rpc_mkpipe_dentry(dir, "idmap", idmap, pipe);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
- pipe->dentry = dentry;
- return 0;
+ return rpc_mkpipe_dentry(dir, "idmap", idmap, idmap->idmap_pipe);
}
static const struct rpc_pipe_dir_object_ops nfs_idmap_pipe_dir_object_ops = {
@@ -561,22 +548,20 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
return true;
}
-static void
-nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
+static void nfs_idmap_complete_pipe_upcall(struct idmap_legacy_upcalldata *data,
+ int ret)
{
- struct key *authkey = idmap->idmap_upcall_data->authkey;
-
- kfree(idmap->idmap_upcall_data);
- idmap->idmap_upcall_data = NULL;
- complete_request_key(authkey, ret);
- key_put(authkey);
+ complete_request_key(data->authkey, ret);
+ key_put(data->authkey);
+ kfree(data);
}
-static void
-nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
+static void nfs_idmap_abort_pipe_upcall(struct idmap *idmap,
+ struct idmap_legacy_upcalldata *data,
+ int ret)
{
- if (idmap->idmap_upcall_data != NULL)
- nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
+ if (cmpxchg(&idmap->idmap_upcall_data, data, NULL) == data)
+ nfs_idmap_complete_pipe_upcall(data, ret);
}
static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
@@ -585,7 +570,7 @@ static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
struct request_key_auth *rka = get_request_key_auth(authkey);
struct rpc_pipe_msg *msg;
struct idmap_msg *im;
- struct idmap *idmap = (struct idmap *)aux;
+ struct idmap *idmap = aux;
struct key *key = rka->target_key;
int ret = -ENOKEY;
@@ -613,7 +598,7 @@ static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
if (ret < 0)
- nfs_idmap_abort_pipe_upcall(idmap, ret);
+ nfs_idmap_abort_pipe_upcall(idmap, data, ret);
return ret;
out2:
@@ -669,6 +654,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
struct request_key_auth *rka;
struct rpc_inode *rpci = RPC_I(file_inode(filp));
struct idmap *idmap = (struct idmap *)rpci->private;
+ struct idmap_legacy_upcalldata *data;
struct key *authkey;
struct idmap_msg im;
size_t namelen_in;
@@ -678,10 +664,11 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
* will have been woken up and someone else may now have used
* idmap_key_cons - so after this point we may no longer touch it.
*/
- if (idmap->idmap_upcall_data == NULL)
+ data = xchg(&idmap->idmap_upcall_data, NULL);
+ if (data == NULL)
goto out_noupcall;
- authkey = idmap->idmap_upcall_data->authkey;
+ authkey = data->authkey;
rka = get_request_key_auth(authkey);
if (mlen != sizeof(im)) {
@@ -703,18 +690,17 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
if (namelen_in == 0 || namelen_in == IDMAP_NAMESZ) {
ret = -EINVAL;
goto out;
-}
+ }
- ret = nfs_idmap_read_and_verify_message(&im,
- &idmap->idmap_upcall_data->idmap_msg,
- rka->target_key, authkey);
+ ret = nfs_idmap_read_and_verify_message(&im, &data->idmap_msg,
+ rka->target_key, authkey);
if (ret >= 0) {
key_set_timeout(rka->target_key, nfs_idmap_cache_timeout);
ret = mlen;
}
out:
- nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
+ nfs_idmap_complete_pipe_upcall(data, ret);
out_noupcall:
return ret;
}
@@ -728,7 +714,7 @@ idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
struct idmap *idmap = data->idmap;
if (msg->errno)
- nfs_idmap_abort_pipe_upcall(idmap, msg->errno);
+ nfs_idmap_abort_pipe_upcall(idmap, data, msg->errno);
}
static void
@@ -736,8 +722,11 @@ idmap_release_pipe(struct inode *inode)
{
struct rpc_inode *rpci = RPC_I(inode);
struct idmap *idmap = (struct idmap *)rpci->private;
+ struct idmap_legacy_upcalldata *data;
- nfs_idmap_abort_pipe_upcall(idmap, -EPIPE);
+ data = xchg(&idmap->idmap_upcall_data, NULL);
+ if (data)
+ nfs_idmap_complete_pipe_upcall(data, -EPIPE);
}
int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, kuid_t *uid)
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 873342308dc0..9a98595bb160 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -164,16 +164,22 @@ static int nfs4_validate_fspath(struct dentry *dentry,
return 0;
}
-static size_t nfs_parse_server_name(char *string, size_t len,
- struct sockaddr *sa, size_t salen, struct net *net)
+size_t nfs_parse_server_name(char *string, size_t len, struct sockaddr_storage *ss,
+ size_t salen, struct net *net, int port)
{
+ struct sockaddr *sa = (struct sockaddr *)ss;
ssize_t ret;
ret = rpc_pton(net, string, len, sa, salen);
if (ret == 0) {
- ret = nfs_dns_resolve_name(net, string, len, sa, salen);
- if (ret < 0)
- ret = 0;
+ ret = rpc_uaddr2sockaddr(net, string, len, sa, salen);
+ if (ret == 0) {
+ ret = nfs_dns_resolve_name(net, string, len, ss, salen);
+ if (ret < 0)
+ ret = 0;
+ }
+ } else if (port) {
+ rpc_set_port(sa, port);
}
return ret;
}
@@ -326,9 +332,9 @@ static int try_location(struct fs_context *fc,
ctx->nfs_server.addrlen =
nfs_parse_server_name(buf->data, buf->len,
- &ctx->nfs_server.address,
+ &ctx->nfs_server._address,
sizeof(ctx->nfs_server._address),
- fc->net_ns);
+ fc->net_ns, 0);
if (ctx->nfs_server.addrlen == 0)
continue;
@@ -412,6 +418,9 @@ static int nfs_do_refmount(struct fs_context *fc, struct rpc_clnt *client)
fs_locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
if (!fs_locations)
goto out_free;
+ fs_locations->fattr = nfs_alloc_fattr();
+ if (!fs_locations->fattr)
+ goto out_free_2;
/* Get locations */
dentry = ctx->clone_data.dentry;
@@ -422,14 +431,16 @@ static int nfs_do_refmount(struct fs_context *fc, struct rpc_clnt *client)
err = nfs4_proc_fs_locations(client, d_inode(parent), &dentry->d_name, fs_locations, page);
dput(parent);
if (err != 0)
- goto out_free_2;
+ goto out_free_3;
err = -ENOENT;
if (fs_locations->nlocations <= 0 ||
fs_locations->fs_path.ncomponents <= 0)
- goto out_free_2;
+ goto out_free_3;
err = nfs_follow_referral(fc, fs_locations);
+out_free_3:
+ kfree(fs_locations->fattr);
out_free_2:
kfree(fs_locations);
out_free:
@@ -473,14 +484,13 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
char *page, char *page2,
const struct nfs4_fs_location *location)
{
- const size_t addr_bufsize = sizeof(struct sockaddr_storage);
struct net *net = rpc_net_ns(server->client);
- struct sockaddr *sap;
+ struct sockaddr_storage *sap;
unsigned int s;
size_t salen;
int error;
- sap = kmalloc(addr_bufsize, GFP_KERNEL);
+ sap = kmalloc(sizeof(*sap), GFP_KERNEL);
if (sap == NULL)
return -ENOMEM;
@@ -496,10 +506,10 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
continue;
salen = nfs_parse_server_name(buf->data, buf->len,
- sap, addr_bufsize, net);
+ sap, sizeof(*sap), net, 0);
if (salen == 0)
continue;
- rpc_set_port(sap, NFS_PORT);
+ rpc_set_port((struct sockaddr *)sap, NFS_PORT);
error = -ENOMEM;
hostname = kmemdup_nul(buf->data, buf->len, GFP_KERNEL);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ee3bc79f6ca3..93c6ce04332b 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -103,21 +103,18 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
const struct cred *cred,
struct nfs4_slot *slot,
bool is_privileged);
-static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
- const struct cred *);
-static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
- const struct cred *, bool);
+static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *,
+ const struct cred *);
+static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
+ const struct cred *, bool);
#endif
-static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ],
- const __u32 *src, struct inode *inode,
- struct nfs_server *server,
- struct nfs4_label *label);
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
static inline struct nfs4_label *
nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
struct iattr *sattr, struct nfs4_label *label)
{
+ struct lsm_context shim;
int err;
if (label == NULL)
@@ -126,19 +123,32 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
return NULL;
+ label->lfs = 0;
+ label->pi = 0;
+ label->len = 0;
+ label->label = NULL;
+
err = security_dentry_init_security(dentry, sattr->ia_mode,
- &dentry->d_name, NULL,
- (void **)&label->label, &label->len);
- if (err == 0)
- return label;
+ &dentry->d_name, NULL, &shim);
+ if (err)
+ return NULL;
- return NULL;
+ label->lsmid = shim.id;
+ label->label = shim.context;
+ label->len = shim.len;
+ return label;
}
static inline void
nfs4_label_release_security(struct nfs4_label *label)
{
- if (label)
- security_release_secctx(label->label, label->len);
+ struct lsm_context shim;
+
+ if (label) {
+ shim.context = label->label;
+ shim.len = label->len;
+ shim.id = label->lsmid;
+ security_release_secctx(&shim);
+ }
}
static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
{
@@ -169,6 +179,7 @@ static int nfs4_map_errors(int err)
case -NFS4ERR_RESOURCE:
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
+ case -NFS4ERR_RETURNCONFLICT:
return -EREMOTEIO;
case -NFS4ERR_WRONGSEC:
case -NFS4ERR_WRONG_CRED:
@@ -184,6 +195,9 @@ static int nfs4_map_errors(int err)
return -EBUSY;
case -NFS4ERR_NOT_SAME:
return -ENOTSYNC;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ break;
default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err);
@@ -208,6 +222,7 @@ const u32 nfs4_fattr_bitmap[3] = {
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
| FATTR4_WORD1_TIME_ACCESS
+ | FATTR4_WORD1_TIME_CREATE
| FATTR4_WORD1_TIME_METADATA
| FATTR4_WORD1_TIME_MODIFY
| FATTR4_WORD1_MOUNTED_ON_FILEID,
@@ -229,6 +244,7 @@ static const u32 nfs4_pnfs_open_bitmap[3] = {
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
| FATTR4_WORD1_TIME_ACCESS
+ | FATTR4_WORD1_TIME_CREATE
| FATTR4_WORD1_TIME_METADATA
| FATTR4_WORD1_TIME_MODIFY,
FATTR4_WORD2_MDSTHRESHOLD
@@ -291,7 +307,7 @@ static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
unsigned long cache_validity;
memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst));
- if (!inode || !nfs4_have_delegation(inode, FMODE_READ))
+ if (!inode || !nfs_have_read_or_write_delegation(inode))
return;
cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags;
@@ -308,6 +324,21 @@ static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
dst[1] &= ~FATTR4_WORD1_MODE;
if (!(cache_validity & NFS_INO_INVALID_OTHER))
dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP);
+
+ if (!(cache_validity & NFS_INO_INVALID_BTIME))
+ dst[1] &= ~FATTR4_WORD1_TIME_CREATE;
+
+ if (nfs_have_delegated_mtime(inode)) {
+ if (!(cache_validity & NFS_INO_INVALID_ATIME))
+ dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET);
+ if (!(cache_validity & NFS_INO_INVALID_MTIME))
+ dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET);
+ if (!(cache_validity & NFS_INO_INVALID_CTIME))
+ dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET);
+ } else if (nfs_have_delegated_atime(inode)) {
+ if (!(cache_validity & NFS_INO_INVALID_ATIME))
+ dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET);
+ }
}
static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
@@ -360,13 +391,23 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
*p++ = htonl(attrs); /* bitmap */
*p++ = htonl(12); /* attribute buffer length */
*p++ = htonl(NF4DIR);
+ spin_lock(&dentry->d_lock);
p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
+ spin_unlock(&dentry->d_lock);
readdir->pgbase = (char *)p - (char *)start;
readdir->count -= readdir->pgbase;
kunmap_atomic(start);
}
+static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
+{
+ if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
+ fattr->pre_change_attr = version;
+ fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
+ }
+}
+
static void nfs4_test_and_free_stateid(struct nfs_server *server,
nfs4_stateid *stateid,
const struct cred *cred)
@@ -412,8 +453,10 @@ static int nfs4_delay_killable(long *timeout)
{
might_sleep();
- freezable_schedule_timeout_killable_unsafe(
- nfs4_update_delay(timeout));
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
+ __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
+ schedule_timeout(nfs4_update_delay(timeout));
if (!__fatal_signal_pending(current))
return 0;
return -EINTR;
@@ -423,7 +466,10 @@ static int nfs4_delay_interruptible(long *timeout)
{
might_sleep();
- freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout));
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
+ __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE);
+ schedule_timeout(nfs4_update_delay(timeout));
if (!signal_pending(current))
return 0;
return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
@@ -548,6 +594,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
case -NFS4ERR_GRACE:
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
+ case -NFS4ERR_RETURNCONFLICT:
exception->delay = 1;
return 0;
@@ -575,6 +622,21 @@ wait_on_recovery:
return 0;
}
+/*
+ * Track the number of NFS4ERR_DELAY related retransmissions and return
+ * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit
+ * set by 'nfs_delay_retrans'.
+ */
+static int nfs4_exception_should_retrans(const struct nfs_server *server,
+ struct nfs4_exception *exception)
+{
+ if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) {
+ if (exception->retrans++ >= (unsigned short)nfs_delay_retrans)
+ return -EAGAIN;
+ }
+ return 0;
+}
+
/* This is the error handling routine for processes that are allowed
* to sleep.
*/
@@ -585,6 +647,11 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_
ret = nfs4_do_handle_exception(server, errorcode, exception);
if (exception->delay) {
+ int ret2 = nfs4_exception_should_retrans(server, exception);
+ if (ret2 < 0) {
+ exception->retry = 0;
+ return ret2;
+ }
ret = nfs4_delay(&exception->timeout,
exception->interruptible);
goto out_retry;
@@ -611,8 +678,22 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
struct nfs_client *clp = server->nfs_client;
int ret;
+ if ((task->tk_rpc_status == -ENETDOWN ||
+ task->tk_rpc_status == -ENETUNREACH) &&
+ task->tk_flags & RPC_TASK_NETUNREACH_FATAL) {
+ exception->delay = 0;
+ exception->recovering = 0;
+ exception->retry = 0;
+ return -EIO;
+ }
+
ret = nfs4_do_handle_exception(server, errorcode, exception);
if (exception->delay) {
+ int ret2 = nfs4_exception_should_retrans(server, exception);
+ if (ret2 < 0) {
+ exception->retry = 0;
+ return ret2;
+ }
rpc_delay(task, nfs4_update_delay(&exception->timeout));
goto out_retry;
}
@@ -780,10 +861,9 @@ static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
slot->seq_nr_highest_sent = seqnr;
}
-static void nfs4_slot_sequence_acked(struct nfs4_slot *slot,
- u32 seqnr)
+static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr)
{
- slot->seq_nr_highest_sent = seqnr;
+ nfs4_slot_sequence_record_sent(slot, seqnr);
slot->seq_nr_last_acked = seqnr;
}
@@ -850,7 +930,6 @@ static int nfs41_sequence_process(struct rpc_task *task,
__func__,
slot->slot_nr,
slot->seq_nr);
- nfs4_slot_sequence_acked(slot, slot->seq_nr);
goto out_retry;
case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_SEQ_FALSE_RETRY:
@@ -913,6 +992,7 @@ out:
out_noaction:
return ret;
session_recover:
+ set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
nfs4_schedule_session_recovery(session, status);
dprintk("%s ERROR: %d Reset session\n", __func__, status);
nfs41_sequence_free_slot(res);
@@ -1158,7 +1238,7 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
{
unsigned short task_flags = 0;
- if (server->nfs_client->cl_minorversion)
+ if (server->caps & NFS_CAP_MOVEABLE)
task_flags = RPC_TASK_MOVEABLE;
return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags);
}
@@ -1209,7 +1289,8 @@ nfs4_update_changeattr_locked(struct inode *inode,
struct nfs_inode *nfsi = NFS_I(inode);
u64 change_attr = inode_peek_iversion_raw(inode);
- cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
+ if (!nfs_have_delegated_mtime(inode))
+ cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
if (S_ISDIR(inode->i_mode))
cache_validity |= NFS_INO_INVALID_DATA;
@@ -1228,13 +1309,13 @@ nfs4_update_changeattr_locked(struct inode *inode,
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
- if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+ if (!nfs_have_delegated_attributes(inode))
cache_validity |=
NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
- NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR |
- NFS_INO_REVAL_PAGECACHE;
+ NFS_INO_INVALID_MODE | NFS_INO_INVALID_BTIME |
+ NFS_INO_INVALID_XATTR;
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
}
nfsi->attrtimeo_timestamp = jiffies;
@@ -1285,8 +1366,7 @@ static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
}
static u32
-nfs4_map_atomic_open_share(struct nfs_server *server,
- fmode_t fmode, int openflags)
+nfs4_fmode_to_share_access(fmode_t fmode)
{
u32 res = 0;
@@ -1300,11 +1380,27 @@ nfs4_map_atomic_open_share(struct nfs_server *server,
case FMODE_READ|FMODE_WRITE:
res = NFS4_SHARE_ACCESS_BOTH;
}
+ return res;
+}
+
+static u32
+nfs4_map_atomic_open_share(struct nfs_server *server,
+ fmode_t fmode, int openflags)
+{
+ u32 res = nfs4_fmode_to_share_access(fmode);
+
if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
goto out;
/* Want no delegation if we're using O_DIRECT */
- if (openflags & O_DIRECT)
+ if (openflags & O_DIRECT) {
res |= NFS4_SHARE_WANT_NO_DELEG;
+ goto out;
+ }
+ /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */
+ if (server->caps & NFS_CAP_DELEGTIME)
+ res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS;
+ if (server->caps & NFS_CAP_OPEN_XOR)
+ res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION;
out:
return res;
}
@@ -1397,13 +1493,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
case NFS4_OPEN_CLAIM_FH:
p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE |
- NFS4_ACCESS_EXECUTE;
-#ifdef CONFIG_NFS_V4_2
- if (!(server->caps & NFS_CAP_XATTR))
- break;
- p->o_arg.access |= NFS4_ACCESS_XAREAD | NFS4_ACCESS_XAWRITE |
- NFS4_ACCESS_XALIST;
-#endif
+ NFS4_ACCESS_EXECUTE |
+ nfs_access_xattr_mask(server);
}
p->o_arg.clientid = server->nfs_client->cl_clientid;
p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
@@ -1707,7 +1798,8 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
rcu_read_unlock();
trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
- if (!fatal_signal_pending(current)) {
+ if (!fatal_signal_pending(current) &&
+ !nfs_current_task_exiting()) {
if (schedule_timeout(5*HZ) == 0)
status = -EAGAIN;
else
@@ -1924,44 +2016,41 @@ out_return_state:
}
static void
-nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
-{
- struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
- struct nfs_delegation *delegation;
- int delegation_flags = 0;
-
- rcu_read_lock();
- delegation = rcu_dereference(NFS_I(state->inode)->delegation);
- if (delegation)
- delegation_flags = delegation->flags;
- rcu_read_unlock();
- switch (data->o_arg.claim) {
- default:
+nfs4_process_delegation(struct inode *inode, const struct cred *cred,
+ enum open_claim_type4 claim,
+ const struct nfs4_open_delegation *delegation)
+{
+ switch (delegation->open_delegation_type) {
+ case NFS4_OPEN_DELEGATE_READ:
+ case NFS4_OPEN_DELEGATE_WRITE:
+ case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
+ case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
break;
+ default:
+ return;
+ }
+ switch (claim) {
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
"returning a delegation for "
"OPEN(CLAIM_DELEGATE_CUR)\n",
- clp->cl_hostname);
- return;
+ NFS_SERVER(inode)->nfs_client->cl_hostname);
+ break;
+ case NFS4_OPEN_CLAIM_PREVIOUS:
+ nfs_inode_reclaim_delegation(inode, cred, delegation->type,
+ &delegation->stateid,
+ delegation->pagemod_limit,
+ delegation->open_delegation_type);
+ break;
+ default:
+ nfs_inode_set_delegation(inode, cred, delegation->type,
+ &delegation->stateid,
+ delegation->pagemod_limit,
+ delegation->open_delegation_type);
}
- if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
- nfs_inode_set_delegation(state->inode,
- data->owner->so_cred,
- data->o_res.delegation_type,
- &data->o_res.delegation,
- data->o_res.pagemod_limit);
- else
- nfs_inode_reclaim_delegation(state->inode,
- data->owner->so_cred,
- data->o_res.delegation_type,
- &data->o_res.delegation,
- data->o_res.pagemod_limit);
-
- if (data->o_res.do_recall)
- nfs_async_inode_return_delegation(state->inode,
- &data->o_res.delegation);
+ if (delegation->do_recall)
+ nfs_async_inode_return_delegation(inode, &delegation->stateid);
}
/*
@@ -1978,19 +2067,23 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
if (!data->rpc_done) {
if (data->rpc_status)
return ERR_PTR(data->rpc_status);
- /* cached opens have already been processed */
- goto update;
+ return nfs4_try_open_cached(data);
}
ret = nfs_refresh_inode(inode, &data->f_attr);
if (ret)
return ERR_PTR(ret);
- if (data->o_res.delegation_type != 0)
- nfs4_opendata_check_deleg(data, state);
-update:
- if (!update_open_stateid(state, &data->o_res.stateid,
- NULL, data->o_arg.fmode))
+ nfs4_process_delegation(state->inode,
+ data->owner->so_cred,
+ data->o_arg.claim,
+ &data->o_res.delegation);
+
+ if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) {
+ if (!update_open_stateid(state, &data->o_res.stateid,
+ NULL, data->o_arg.fmode))
+ return ERR_PTR(-EAGAIN);
+ } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode))
return ERR_PTR(-EAGAIN);
refcount_inc(&state->count);
@@ -2054,10 +2147,18 @@ _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
if (IS_ERR(state))
goto out;
- if (data->o_res.delegation_type != 0)
- nfs4_opendata_check_deleg(data, state);
- if (!update_open_stateid(state, &data->o_res.stateid,
- NULL, data->o_arg.fmode)) {
+ nfs4_process_delegation(state->inode,
+ data->owner->so_cred,
+ data->o_arg.claim,
+ &data->o_res.delegation);
+
+ if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) {
+ if (!update_open_stateid(state, &data->o_res.stateid,
+ NULL, data->o_arg.fmode)) {
+ nfs4_put_open_state(state);
+ state = ERR_PTR(-EAGAIN);
+ }
+ } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) {
nfs4_put_open_state(state);
state = ERR_PTR(-EAGAIN);
}
@@ -2129,18 +2230,18 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
}
static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
- fmode_t fmode)
+ fmode_t fmode)
{
struct nfs4_state *newstate;
+ struct nfs_server *server = NFS_SB(opendata->dentry->d_sb);
+ int openflags = opendata->o_arg.open_flags;
int ret;
if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
return 0;
- opendata->o_arg.open_flags = 0;
opendata->o_arg.fmode = fmode;
- opendata->o_arg.share_access = nfs4_map_atomic_open_share(
- NFS_SB(opendata->dentry->d_sb),
- fmode, 0);
+ opendata->o_arg.share_access =
+ nfs4_map_atomic_open_share(server, fmode, openflags);
memset(&opendata->o_res, 0, sizeof(opendata->o_res));
memset(&opendata->c_res, 0, sizeof(opendata->c_res));
nfs4_init_opendata_res(opendata);
@@ -2193,7 +2294,7 @@ static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state
{
struct nfs_delegation *delegation;
struct nfs4_opendata *opendata;
- fmode_t delegation_type = 0;
+ u32 delegation_type = NFS4_OPEN_DELEGATE_NONE;
int status;
opendata = nfs4_open_recoverdata_alloc(ctx, state,
@@ -2202,8 +2303,20 @@ static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state
return PTR_ERR(opendata);
rcu_read_lock();
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
- if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
- delegation_type = delegation->type;
+ if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) {
+ switch(delegation->type) {
+ case FMODE_READ:
+ delegation_type = NFS4_OPEN_DELEGATE_READ;
+ if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags))
+ delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG;
+ break;
+ case FMODE_WRITE:
+ case FMODE_READ|FMODE_WRITE:
+ delegation_type = NFS4_OPEN_DELEGATE_WRITE;
+ if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags))
+ delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG;
+ }
+ }
rcu_read_unlock();
opendata->o_arg.u.delegation_type = delegation_type;
status = nfs4_open_recover(opendata, state);
@@ -2524,12 +2637,14 @@ static void nfs4_open_release(void *calldata)
struct nfs4_opendata *data = calldata;
struct nfs4_state *state = NULL;
+ /* In case of error, no cleanup! */
+ if (data->rpc_status != 0 || !data->rpc_done) {
+ nfs_release_seqid(data->o_arg.seqid);
+ goto out_free;
+ }
/* If this request hasn't been cancelled, do nothing */
if (!data->cancelled)
goto out_free;
- /* In case of error, no cleanup! */
- if (data->rpc_status != 0 || !data->rpc_done)
- goto out_free;
/* In case we need an open_confirm, no cleanup! */
if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
goto out_free;
@@ -2570,7 +2685,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data,
};
int status;
- if (server->nfs_client->cl_minorversion)
+ if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
task_setup_data.flags |= RPC_TASK_MOVEABLE;
kref_get(&data->kref);
@@ -2628,8 +2743,7 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
*/
static int nfs4_opendata_access(const struct cred *cred,
struct nfs4_opendata *opendata,
- struct nfs4_state *state, fmode_t fmode,
- int openflags)
+ struct nfs4_state *state, fmode_t fmode)
{
struct nfs_access_entry cache;
u32 mask, flags;
@@ -2640,11 +2754,7 @@ static int nfs4_opendata_access(const struct cred *cred,
return 0;
mask = 0;
- /*
- * Use openflags to check for exec, because fmode won't
- * always have FMODE_EXEC set when file open for exec.
- */
- if (openflags & __FMODE_EXEC) {
+ if (fmode & FMODE_EXEC) {
/* ONLY check for exec rights */
if (S_ISDIR(state->inode->i_mode))
mask = NFS4_ACCESS_LOOKUP;
@@ -2653,9 +2763,8 @@ static int nfs4_opendata_access(const struct cred *cred,
} else if ((fmode & FMODE_READ) && !opendata->file_created)
mask = NFS4_ACCESS_READ;
- cache.cred = cred;
nfs_access_set_mask(&cache, opendata->o_res.access_result);
- nfs_access_add_cache(state->inode, &cache);
+ nfs_access_add_cache(state->inode, &cache, cred);
flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
if ((mask & ~cache.mask & flags) == 0)
@@ -2707,8 +2816,12 @@ static int _nfs4_proc_open(struct nfs4_opendata *data,
return status;
}
if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
+ struct nfs_fh *fh = &o_res->fh;
+
nfs4_sequence_free_slot(&o_res->seq_res);
- nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, NULL);
+ if (o_arg->claim == NFS4_OPEN_CLAIM_FH)
+ fh = NFS_FH(d_inode(data->dentry));
+ nfs4_proc_getattr(server, fh, o_res->f_attr, NULL);
}
return 0;
}
@@ -2723,10 +2836,15 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s
struct nfs4_opendata *opendata;
int ret;
- opendata = nfs4_open_recoverdata_alloc(ctx, state,
- NFS4_OPEN_CLAIM_FH);
+ opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH);
if (IS_ERR(opendata))
return PTR_ERR(opendata);
+ /*
+ * We're not recovering a delegation, so ask for no delegation.
+ * Otherwise the recovery thread could deadlock with an outstanding
+ * delegation return.
+ */
+ opendata->o_arg.open_flags = O_DIRECT;
ret = nfs4_open_recover(opendata, state);
if (ret == -ESTALE)
d_drop(ctx->dentry);
@@ -2793,16 +2911,14 @@ static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
}
static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
- nfs4_stateid *stateid,
- const struct cred *cred)
+ nfs4_stateid *stateid, const struct cred *cred)
{
return -NFS4ERR_BAD_STATEID;
}
#if defined(CONFIG_NFS_V4_1)
static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
- nfs4_stateid *stateid,
- const struct cred *cred)
+ nfs4_stateid *stateid, const struct cred *cred)
{
int status;
@@ -2811,6 +2927,7 @@ static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
break;
case NFS4_INVALID_STATEID_TYPE:
case NFS4_SPECIAL_STATEID_TYPE:
+ case NFS4_FREED_STATEID_TYPE:
return -NFS4ERR_BAD_STATEID;
case NFS4_REVOKED_STATEID_TYPE:
goto out_free;
@@ -3028,7 +3145,7 @@ static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
}
static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
- int flags, struct nfs_open_context *ctx)
+ struct nfs_open_context *ctx)
{
struct nfs4_state_owner *sp = opendata->owner;
struct nfs_server *server = sp->so_server;
@@ -3037,10 +3154,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
struct inode *dir = d_inode(opendata->dir);
unsigned long dir_verifier;
- unsigned int seq;
int ret;
- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
dir_verifier = nfs_save_change_attribute(dir);
ret = _nfs4_proc_open(opendata, ctx);
@@ -3056,14 +3171,14 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
+ if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED)
+ set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags);
dentry = opendata->dentry;
if (d_really_is_negative(dentry)) {
struct dentry *alias;
d_drop(dentry);
- alias = d_exact_alias(dentry, state->inode);
- if (!alias)
- alias = d_splice_alias(igrab(state->inode), dentry);
+ alias = d_splice_alias(igrab(state->inode), dentry);
/* d_splice_alias() can't fail here - it's a non-directory */
if (alias) {
dput(ctx->dentry);
@@ -3079,7 +3194,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
if (!opendata->rpc_done)
break;
- if (opendata->o_res.delegation_type != 0)
+ if (opendata->o_res.delegation.type != 0)
dir_verifier = nfs_save_change_attribute(dir);
nfs_set_verifier(dentry, dir_verifier);
}
@@ -3087,20 +3202,21 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
/* Parse layoutget results before we check for access */
pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
- ret = nfs4_opendata_access(sp->so_cred, opendata, state,
- acc_mode, flags);
+ ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode);
if (ret != 0)
goto out;
- if (d_inode(dentry) == state->inode) {
+ if (d_inode(dentry) == state->inode)
nfs_inode_attach_open_context(ctx);
- if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
- nfs4_schedule_stateid_recovery(server, state);
- }
out:
- if (!opendata->cancelled)
+ if (!opendata->cancelled) {
+ if (opendata->lgp) {
+ nfs4_lgopen_release(opendata->lgp);
+ opendata->lgp = NULL;
+ }
nfs4_sequence_free_slot(&opendata->o_res.seq_res);
+ }
return ret;
}
@@ -3157,7 +3273,7 @@ static int _nfs4_do_open(struct inode *dir,
if (d_really_is_positive(dentry))
opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
- status = _nfs4_open_and_get_state(opendata, flags, ctx);
+ status = _nfs4_open_and_get_state(opendata, ctx);
if (status != 0)
goto err_opendata_put;
state = ctx->state;
@@ -3361,13 +3477,18 @@ static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
.inode = inode,
.stateid = &arg.stateid,
};
- unsigned long adjust_flags = NFS_INO_INVALID_CHANGE;
+ unsigned long adjust_flags = NFS_INO_INVALID_CHANGE |
+ NFS_INO_INVALID_CTIME;
int err;
if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID))
adjust_flags |= NFS_INO_INVALID_MODE;
if (sattr->ia_valid & (ATTR_UID | ATTR_GID))
adjust_flags |= NFS_INO_INVALID_OTHER;
+ if (sattr->ia_valid & ATTR_ATIME)
+ adjust_flags |= NFS_INO_INVALID_ATIME;
+ if (sattr->ia_valid & ATTR_MTIME)
+ adjust_flags |= NFS_INO_INVALID_MTIME;
do {
nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label),
@@ -3479,7 +3600,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
write_sequnlock(&state->seqlock);
trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
- if (fatal_signal_pending(current))
+ if (fatal_signal_pending(current) || nfs_current_task_exiting())
status = -EINTR;
else
if (schedule_timeout(5*HZ) != 0)
@@ -3515,6 +3636,7 @@ struct nfs4_closedata {
} lr;
struct nfs_fattr fattr;
unsigned long timestamp;
+ unsigned short retrans;
};
static void nfs4_free_closedata(void *data)
@@ -3543,6 +3665,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
.state = state,
.inode = calldata->inode,
.stateid = &calldata->arg.stateid,
+ .retrans = calldata->retrans,
};
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
@@ -3590,6 +3713,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
default:
task->tk_status = nfs4_async_handle_exception(task,
server, task->tk_status, &exception);
+ calldata->retrans = exception.retrans;
if (exception.retry)
goto out_restart;
}
@@ -3667,18 +3791,17 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
/* Close-to-open cache consistency revalidation */
- if (!nfs4_have_delegation(inode, FMODE_READ)) {
+ if (!nfs4_have_delegation(inode, FMODE_READ, 0)) {
nfs4_bitmask_set(calldata->arg.bitmask_store,
server->cache_consistency_bitmask,
- inode, server, NULL);
+ inode, 0);
calldata->arg.bitmask = calldata->arg.bitmask_store;
} else
calldata->arg.bitmask = NULL;
}
calldata->arg.share_access =
- nfs4_map_atomic_open_share(NFS_SERVER(inode),
- calldata->arg.fmode, 0);
+ nfs4_fmode_to_share_access(calldata->arg.fmode);
if (calldata->res.fattr == NULL)
calldata->arg.bitmask = NULL;
@@ -3734,7 +3857,7 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
};
int status = -ENOMEM;
- if (server->nfs_client->cl_minorversion)
+ if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
task_setup_data.flags |= RPC_TASK_MOVEABLE;
nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
@@ -3793,7 +3916,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
int open_flags, struct iattr *attr, int *opened)
{
struct nfs4_state *state;
- struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
+ struct nfs4_label l, *label;
label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
@@ -3809,8 +3932,11 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
{
+ struct dentry *dentry = ctx->dentry;
if (ctx->state == NULL)
return;
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ nfs4_inode_set_return_delegation_on_close(d_inode(dentry));
if (is_sync)
nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
else
@@ -3819,11 +3945,26 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
#define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
#define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
-#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL)
+#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL)
+
+#define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \
+ (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS)
+static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res)
+{
+ u32 share_access_want = res->open_caps.oa_share_access_want[0];
+ u32 attr_bitmask = res->attr_bitmask[2];
+
+ return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) &&
+ ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) ==
+ FATTR4_WORD2_NFS42_TIME_DELEG_MASK);
+}
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
- u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
+ u32 minorversion = server->nfs_client->cl_minorversion;
+ u32 bitmask[3] = {
+ [0] = FATTR4_WORD0_SUPPORTED_ATTRS,
+ };
struct nfs4_server_caps_arg args = {
.fhandle = fhandle,
.bitmask = bitmask,
@@ -3841,12 +3982,24 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
FATTR4_WORD0_FH_EXPIRE_TYPE |
FATTR4_WORD0_LINK_SUPPORT |
FATTR4_WORD0_SYMLINK_SUPPORT |
- FATTR4_WORD0_ACLSUPPORT;
+ FATTR4_WORD0_ACLSUPPORT |
+ FATTR4_WORD0_CASE_INSENSITIVE |
+ FATTR4_WORD0_CASE_PRESERVING;
if (minorversion)
bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
+ if (minorversion > 1)
+ bitmask[2] |= FATTR4_WORD2_OPEN_ARGUMENTS;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (status == 0) {
+ bitmask[0] = (FATTR4_WORD0_SUPPORTED_ATTRS |
+ FATTR4_WORD0_FH_EXPIRE_TYPE |
+ FATTR4_WORD0_LINK_SUPPORT |
+ FATTR4_WORD0_SYMLINK_SUPPORT |
+ FATTR4_WORD0_ACLSUPPORT |
+ FATTR4_WORD0_CASE_INSENSITIVE |
+ FATTR4_WORD0_CASE_PRESERVING) &
+ res.attr_bitmask[0];
/* Sanity check the server answers */
switch (minorversion) {
case 0:
@@ -3855,13 +4008,20 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
break;
case 1:
res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
+ bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT &
+ res.attr_bitmask[2];
break;
case 2:
res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
+ bitmask[2] = (FATTR4_WORD2_SUPPATTR_EXCLCREAT |
+ FATTR4_WORD2_OPEN_ARGUMENTS) &
+ res.attr_bitmask[2];
}
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
- server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
- NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
+ server->caps &=
+ ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS |
+ NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS |
+ NFS_CAP_OPEN_XOR | NFS_CAP_DELEGTIME);
server->fattr_valid = NFS_ATTR_FATTR_V4;
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
@@ -3870,10 +4030,16 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->caps |= NFS_CAP_HARDLINKS;
if (res.has_symlinks != 0)
server->caps |= NFS_CAP_SYMLINKS;
+ if (res.case_insensitive)
+ server->caps |= NFS_CAP_CASE_INSENSITIVE;
+ if (res.case_preserving)
+ server->caps |= NFS_CAP_CASE_PRESERVING;
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
server->caps |= NFS_CAP_SECURITY_LABEL;
#endif
+ if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
+ server->caps |= NFS_CAP_FS_LOCATIONS;
if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
@@ -3894,10 +4060,20 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
+ if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
+ server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
+ if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_CREATE))
+ server->fattr_valid &= ~NFS_ATTR_FATTR_BTIME;
memcpy(server->attr_bitmask_nl, res.attr_bitmask,
sizeof(server->attr_bitmask));
server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ if (res.open_caps.oa_share_access_want[0] &
+ NFS4_SHARE_WANT_OPEN_XOR_DELEGATION)
+ server->caps |= NFS_CAP_OPEN_XOR;
+ if (nfs4_server_delegtime_capable(&res))
+ server->caps |= NFS_CAP_DELEGTIME;
+
memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
@@ -3923,7 +4099,6 @@ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
};
int err;
- nfs4_server_set_init_caps(server);
do {
err = nfs4_handle_exception(server,
_nfs4_server_capabilities(server, fhandle),
@@ -3932,16 +4107,157 @@ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
return err;
}
+static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
+ struct nfs_client *clp,
+ struct nfs_server *server)
+{
+ int i;
+
+ for (i = 0; i < location->nservers; i++) {
+ struct nfs4_string *srv_loc = &location->servers[i];
+ struct sockaddr_storage addr;
+ size_t addrlen;
+ struct xprt_create xprt_args = {
+ .ident = 0,
+ .net = clp->cl_net,
+ };
+ struct nfs4_add_xprt_data xprtdata = {
+ .clp = clp,
+ };
+ struct rpc_add_xprt_test rpcdata = {
+ .add_xprt_test = clp->cl_mvops->session_trunk,
+ .data = &xprtdata,
+ };
+ char *servername = NULL;
+
+ if (!srv_loc->len)
+ continue;
+
+ addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len,
+ &addr, sizeof(addr),
+ clp->cl_net, server->port);
+ if (!addrlen)
+ return;
+ xprt_args.dstaddr = (struct sockaddr *)&addr;
+ xprt_args.addrlen = addrlen;
+ servername = kmalloc(srv_loc->len + 1, GFP_KERNEL);
+ if (!servername)
+ return;
+ memcpy(servername, srv_loc->data, srv_loc->len);
+ servername[srv_loc->len] = '\0';
+ xprt_args.servername = servername;
+
+ xprtdata.cred = nfs4_get_clid_cred(clp);
+ rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
+ rpc_clnt_setup_test_and_add_xprt,
+ &rpcdata);
+ if (xprtdata.cred)
+ put_cred(xprtdata.cred);
+ kfree(servername);
+ }
+}
+
+static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1,
+ struct nfs4_pathname *path2)
+{
+ int i;
+
+ if (path1->ncomponents != path2->ncomponents)
+ return false;
+ for (i = 0; i < path1->ncomponents; i++) {
+ if (path1->components[i].len != path2->components[i].len)
+ return false;
+ if (memcmp(path1->components[i].data, path2->components[i].data,
+ path1->components[i].len))
+ return false;
+ }
+ return true;
+}
+
+static int _nfs4_discover_trunking(struct nfs_server *server,
+ struct nfs_fh *fhandle)
+{
+ struct nfs4_fs_locations *locations = NULL;
+ struct page *page;
+ const struct cred *cred;
+ struct nfs_client *clp = server->nfs_client;
+ const struct nfs4_state_maintenance_ops *ops =
+ clp->cl_mvops->state_renewal_ops;
+ int status = -ENOMEM, i;
+
+ cred = ops->get_state_renewal_cred(clp);
+ if (cred == NULL) {
+ cred = nfs4_get_clid_cred(clp);
+ if (cred == NULL)
+ return -ENOKEY;
+ }
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ goto out_put_cred;
+ locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
+ if (!locations)
+ goto out_free;
+ locations->fattr = nfs_alloc_fattr();
+ if (!locations->fattr)
+ goto out_free_2;
+
+ status = nfs4_proc_get_locations(server, fhandle, locations, page,
+ cred);
+ if (status)
+ goto out_free_3;
+
+ for (i = 0; i < locations->nlocations; i++) {
+ if (!_is_same_nfs4_pathname(&locations->fs_path,
+ &locations->locations[i].rootpath))
+ continue;
+ test_fs_location_for_trunking(&locations->locations[i], clp,
+ server);
+ }
+out_free_3:
+ kfree(locations->fattr);
+out_free_2:
+ kfree(locations);
+out_free:
+ __free_page(page);
+out_put_cred:
+ put_cred(cred);
+ return status;
+}
+
+static int nfs4_discover_trunking(struct nfs_server *server,
+ struct nfs_fh *fhandle)
+{
+ struct nfs4_exception exception = {
+ .interruptible = true,
+ };
+ struct nfs_client *clp = server->nfs_client;
+ int err = 0;
+
+ if (!nfs4_has_session(clp))
+ goto out;
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_discover_trunking(server, fhandle),
+ &exception);
+ } while (exception.retry);
+out:
+ return err;
+}
+
static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+ struct nfs_fattr *fattr)
{
- u32 bitmask[3];
+ u32 bitmask[3] = {
+ [0] = FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE |
+ FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID,
+ };
struct nfs4_lookup_root_arg args = {
.bitmask = bitmask,
};
struct nfs4_lookup_res res = {
.server = server,
- .fattr = info->fattr,
+ .fattr = fattr,
.fh = fhandle,
};
struct rpc_message msg = {
@@ -3950,27 +4266,20 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_resp = &res,
};
- bitmask[0] = nfs4_fattr_bitmap[0];
- bitmask[1] = nfs4_fattr_bitmap[1];
- /*
- * Process the label in the upcoming getfattr
- */
- bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
-
- nfs_fattr_init(info->fattr);
+ nfs_fattr_init(fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+ struct nfs_fattr *fattr)
{
struct nfs4_exception exception = {
.interruptible = true,
};
int err;
do {
- err = _nfs4_lookup_root(server, fhandle, info);
- trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
+ err = _nfs4_lookup_root(server, fhandle, fattr);
+ trace_nfs4_lookup_root(server, fhandle, fattr, err);
switch (err) {
case 0:
case -NFS4ERR_WRONGSEC:
@@ -3983,8 +4292,9 @@ out:
return err;
}
-static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info, rpc_authflavor_t flavor)
+static int nfs4_lookup_root_sec(struct nfs_server *server,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr,
+ rpc_authflavor_t flavor)
{
struct rpc_auth_create_args auth_args = {
.pseudoflavor = flavor,
@@ -3994,7 +4304,7 @@ static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandl
auth = rpcauth_create(&auth_args, server->client);
if (IS_ERR(auth))
return -EACCES;
- return nfs4_lookup_root(server, fhandle, info);
+ return nfs4_lookup_root(server, fhandle, fattr);
}
/*
@@ -4007,7 +4317,7 @@ static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandl
* negative errno value.
*/
static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+ struct nfs_fattr *fattr)
{
/* Per 3530bis 15.33.5 */
static const rpc_authflavor_t flav_array[] = {
@@ -4023,8 +4333,9 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
if (server->auth_info.flavor_len > 0) {
/* try each flavor specified by user */
for (i = 0; i < server->auth_info.flavor_len; i++) {
- status = nfs4_lookup_root_sec(server, fhandle, info,
- server->auth_info.flavors[i]);
+ status = nfs4_lookup_root_sec(
+ server, fhandle, fattr,
+ server->auth_info.flavors[i]);
if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
continue;
break;
@@ -4032,7 +4343,7 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
} else {
/* no flavors specified by user, try default list */
for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
- status = nfs4_lookup_root_sec(server, fhandle, info,
+ status = nfs4_lookup_root_sec(server, fhandle, fattr,
flav_array[i]);
if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
continue;
@@ -4056,28 +4367,22 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
* nfs4_proc_get_rootfh - get file handle for server's pseudoroot
* @server: initialized nfs_server handle
* @fhandle: we fill in the pseudo-fs root file handle
- * @info: we fill in an FSINFO struct
+ * @fattr: we fill in a bare bones struct fattr
* @auth_probe: probe the auth flavours
*
* Returns zero on success, or a negative errno.
*/
int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info,
- bool auth_probe)
+ struct nfs_fattr *fattr, bool auth_probe)
{
int status = 0;
if (!auth_probe)
- status = nfs4_lookup_root(server, fhandle, info);
+ status = nfs4_lookup_root(server, fhandle, fattr);
if (auth_probe || status == NFS4ERR_WRONGSEC)
- status = server->nfs_client->cl_mvops->find_root_sec(server,
- fhandle, info);
-
- if (status == 0)
- status = nfs4_server_capabilities(server, fhandle);
- if (status == 0)
- status = nfs4_do_fsinfo(server, fhandle, info);
+ status = server->nfs_client->cl_mvops->find_root_sec(
+ server, fhandle, fattr);
return nfs4_map_errors(status);
}
@@ -4128,6 +4433,8 @@ static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
if (locations == NULL)
goto out;
+ locations->fattr = fattr;
+
status = nfs4_proc_fs_locations(client, dir, name, locations, page);
if (status != 0)
goto out;
@@ -4137,17 +4444,14 @@ static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
* referral. Cause us to drop into the exception handler, which
* will kick off migration recovery.
*/
- if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
+ if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) {
dprintk("%s: server did not return a different fsid for"
" a referral at %s\n", __func__, name->name);
status = -NFS4ERR_MOVED;
goto out;
}
/* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
- nfs_fixup_referral_attributes(&locations->fattr);
-
- /* replace the lookup nfs_fattr with the locations nfs_fattr */
- memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
+ nfs_fixup_referral_attributes(fattr);
memset(fhandle, 0, sizeof(struct nfs_fh));
out:
if (page)
@@ -4267,15 +4571,15 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
- struct dentry *dentry, struct nfs_fh *fhandle,
- struct nfs_fattr *fattr)
+ struct dentry *dentry, const struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs_server *server = NFS_SERVER(dir);
int status;
struct nfs4_lookup_arg args = {
.bitmask = server->attr_bitmask,
.dir_fh = NFS_FH(dir),
- .name = &dentry->d_name,
+ .name = name,
};
struct nfs4_lookup_res res = {
.server = server,
@@ -4289,7 +4593,7 @@ static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
};
unsigned short task_flags = 0;
- if (server->nfs_client->cl_minorversion)
+ if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
task_flags = RPC_TASK_MOVEABLE;
/* Is this is an attribute revalidation, subject to softreval? */
@@ -4317,17 +4621,16 @@ static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
}
static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
- struct dentry *dentry, struct nfs_fh *fhandle,
- struct nfs_fattr *fattr)
+ struct dentry *dentry, const struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_exception exception = {
.interruptible = true,
};
struct rpc_clnt *client = *clnt;
- const struct qstr *name = &dentry->d_name;
int err;
do {
- err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr);
+ err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr);
trace_nfs4_lookup(dir, name, err);
switch (err) {
case -NFS4ERR_BADNAME:
@@ -4362,13 +4665,13 @@ out:
return err;
}
-static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
+static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
int status;
struct rpc_clnt *client = NFS_CLIENT(dir);
- status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
+ status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr);
if (client != NFS_CLIENT(dir)) {
rpc_shutdown_client(client);
nfs_fixup_secinfo_attributes(fattr);
@@ -4383,7 +4686,8 @@ nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
struct rpc_clnt *client = NFS_CLIENT(dir);
int status;
- status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
+ status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name,
+ fhandle, fattr);
if (status < 0)
return ERR_PTR(status);
return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
@@ -4411,16 +4715,19 @@ static int _nfs4_proc_lookupp(struct inode *inode,
};
unsigned short task_flags = 0;
- if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
+ if (server->flags & NFS_MOUNT_SOFTREVAL)
task_flags |= RPC_TASK_TIMEOUT;
+ if (server->caps & NFS_CAP_MOVEABLE)
+ task_flags |= RPC_TASK_MOVEABLE;
args.bitmask = nfs4_bitmask(server, fattr->label);
nfs_fattr_init(fattr);
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
- status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
- &res.seq_res, task_flags);
+ status = nfs4_do_call_sync(clnt, server, &msg, &args.seq_args,
+ &res.seq_res, task_flags);
dprintk("NFS reply lookupp: %d\n", status);
return status;
}
@@ -4441,7 +4748,8 @@ static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
return err;
}
-static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
+ const struct cred *cred)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_accessargs args = {
@@ -4455,11 +4763,11 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
.rpc_argp = &args,
.rpc_resp = &res,
- .rpc_cred = entry->cred,
+ .rpc_cred = cred,
};
int status = 0;
- if (!nfs4_have_delegation(inode, FMODE_READ)) {
+ if (!nfs4_have_delegation(inode, FMODE_READ, 0)) {
res.fattr = nfs_alloc_fattr();
if (res.fattr == NULL)
return -ENOMEM;
@@ -4475,14 +4783,15 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
return status;
}
-static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
+ const struct cred *cred)
{
struct nfs4_exception exception = {
.interruptible = true,
};
int err;
do {
- err = _nfs4_proc_access(inode, entry);
+ err = _nfs4_proc_access(inode, entry, cred);
trace_nfs4_access(inode, err);
err = nfs4_handle_exception(NFS_SERVER(inode), err,
&exception);
@@ -4555,7 +4864,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
int flags)
{
struct nfs_server *server = NFS_SERVER(dir);
- struct nfs4_label l, *ilabel = NULL;
+ struct nfs4_label l, *ilabel;
struct nfs_open_context *ctx;
struct nfs4_state *state;
int status = 0;
@@ -4663,8 +4972,10 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg,
nfs_fattr_init(res->dir_attr);
- if (inode)
+ if (inode) {
nfs4_inode_return_delegation(inode);
+ nfs_d_prune_case_insensitive_aliases(inode);
+ }
}
static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
@@ -4730,6 +5041,7 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
return 0;
if (task->tk_status == 0) {
+ nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry));
if (new_dir != old_dir) {
/* Note: If we moved a directory, nlink will change */
nfs4_update_changeattr(old_dir, &res->old_cinfo,
@@ -4773,8 +5085,9 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct
goto out;
nfs4_inode_make_writeable(inode);
- nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), inode,
- NFS_INO_INVALID_CHANGE);
+ nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label),
+ inode,
+ NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME);
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start,
@@ -4852,9 +5165,6 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
&data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
spin_lock(&dir->i_lock);
- /* Creating a directory bumps nlink in the parent */
- if (data->arg.ftype == NF4DIR)
- nfs4_inc_nlink_locked(dir);
nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
data->res.fattr->time_start,
NFS_INO_INVALID_DATA);
@@ -4864,6 +5174,31 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
return status;
}
+static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry,
+ struct nfs4_createdata *data, int *statusp)
+{
+ struct dentry *ret;
+
+ *statusp = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
+ &data->arg.seq_args, &data->res.seq_res, 1);
+
+ if (*statusp)
+ return NULL;
+
+ spin_lock(&dir->i_lock);
+ /* Creating a directory bumps nlink in the parent */
+ nfs4_inc_nlink_locked(dir);
+ nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
+ data->res.fattr->time_start,
+ NFS_INO_INVALID_DATA);
+ spin_unlock(&dir->i_lock);
+ ret = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr);
+ if (!IS_ERR(ret))
+ return ret;
+ *statusp = PTR_ERR(ret);
+ return NULL;
+}
+
static void nfs4_free_createdata(struct nfs4_createdata *data)
{
nfs4_label_free(data->fattr.label);
@@ -4871,9 +5206,10 @@ static void nfs4_free_createdata(struct nfs4_createdata *data)
}
static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
- struct page *page, unsigned int len, struct iattr *sattr,
+ struct folio *folio, unsigned int len, struct iattr *sattr,
struct nfs4_label *label)
{
+ struct page *page = &folio->page;
struct nfs4_createdata *data;
int status = -ENAMETOOLONG;
@@ -4898,18 +5234,18 @@ out:
}
static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
- struct page *page, unsigned int len, struct iattr *sattr)
+ struct folio *folio, unsigned int len, struct iattr *sattr)
{
struct nfs4_exception exception = {
.interruptible = true,
};
- struct nfs4_label l, *label = NULL;
+ struct nfs4_label l, *label;
int err;
label = nfs4_label_init_security(dir, dentry, sattr, &l);
do {
- err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
+ err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label);
trace_nfs4_symlink(dir, &dentry->d_name, err);
err = nfs4_handle_exception(NFS_SERVER(dir), err,
&exception);
@@ -4919,32 +5255,35 @@ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
return err;
}
-static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
- struct iattr *sattr, struct nfs4_label *label)
+static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+ struct iattr *sattr,
+ struct nfs4_label *label, int *statusp)
{
struct nfs4_createdata *data;
- int status = -ENOMEM;
+ struct dentry *ret = NULL;
+ *statusp = -ENOMEM;
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
if (data == NULL)
goto out;
data->arg.label = label;
- status = nfs4_do_create(dir, dentry, data);
+ ret = nfs4_do_mkdir(dir, dentry, data, statusp);
nfs4_free_createdata(data);
out:
- return status;
+ return ret;
}
-static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
- struct iattr *sattr)
+static struct dentry *nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+ struct iattr *sattr)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_exception exception = {
.interruptible = true,
};
- struct nfs4_label l, *label = NULL;
+ struct nfs4_label l, *label;
+ struct dentry *alias;
int err;
label = nfs4_label_init_security(dir, dentry, sattr, &l);
@@ -4952,14 +5291,16 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
sattr->ia_mode &= ~current_umask();
do {
- err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
+ alias = _nfs4_proc_mkdir(dir, dentry, sattr, label, &err);
trace_nfs4_mkdir(dir, &dentry->d_name, err);
- err = nfs4_handle_exception(NFS_SERVER(dir), err,
- &exception);
+ if (err)
+ alias = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
+ err,
+ &exception));
} while (exception.retry);
nfs4_label_release_security(label);
- return err;
+ return alias;
}
static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg,
@@ -5063,7 +5404,7 @@ static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
struct nfs4_exception exception = {
.interruptible = true,
};
- struct nfs4_label l, *label = NULL;
+ struct nfs4_label l, *label;
int err;
label = nfs4_label_init_security(dir, dentry, sattr, &l);
@@ -5258,9 +5599,11 @@ static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
.inode = hdr->inode,
.state = hdr->args.context->state,
.stateid = &hdr->args.stateid,
+ .retrans = hdr->retrans,
};
task->tk_status = nfs4_async_handle_exception(task,
server, task->tk_status, &exception);
+ hdr->retrans = exception.retrans;
if (exception.retry) {
rpc_restart_call_prepare(task);
return -EAGAIN;
@@ -5293,7 +5636,7 @@ static bool nfs4_read_plus_not_supported(struct rpc_task *task,
struct rpc_message *msg = &task->tk_msg;
if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
- server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) {
+ task->tk_status == -ENOTSUPP) {
server->caps &= ~NFS_CAP_READ_PLUS;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
rpc_restart_call_prepare(task);
@@ -5317,17 +5660,21 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
}
#if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
-static void nfs42_read_plus_support(struct nfs_pgio_header *hdr,
+static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
struct rpc_message *msg)
{
/* Note: We don't use READ_PLUS with pNFS yet */
- if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp)
+ if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) {
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
+ return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE);
+ }
+ return false;
}
#else
-static void nfs42_read_plus_support(struct nfs_pgio_header *hdr,
+static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
struct rpc_message *msg)
{
+ return false;
}
#endif /* CONFIG_NFS_V4_2 */
@@ -5337,8 +5684,8 @@ static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
hdr->timestamp = jiffies;
if (!hdr->pgio_done_cb)
hdr->pgio_done_cb = nfs4_read_done_cb;
- msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
- nfs42_read_plus_support(hdr, msg);
+ if (!nfs42_read_plus_support(hdr, msg))
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
}
@@ -5370,10 +5717,12 @@ static int nfs4_write_done_cb(struct rpc_task *task,
.inode = hdr->inode,
.state = hdr->args.context->state,
.stateid = &hdr->args.stateid,
+ .retrans = hdr->retrans,
};
task->tk_status = nfs4_async_handle_exception(task,
NFS_SERVER(inode), task->tk_status,
&exception);
+ hdr->retrans = exception.retrans;
if (exception.retry) {
rpc_restart_call_prepare(task);
return -EAGAIN;
@@ -5419,17 +5768,17 @@ bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
/* Otherwise, request attributes if and only if we don't hold
* a delegation
*/
- return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
+ return nfs4_have_delegation(hdr->inode, FMODE_READ, 0) == 0;
}
-static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src,
- struct inode *inode, struct nfs_server *server,
- struct nfs4_label *label)
+void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[],
+ struct inode *inode, unsigned long cache_validity)
{
- unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
+ struct nfs_server *server = NFS_SERVER(inode);
unsigned int i;
memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
+ cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity);
if (cache_validity & NFS_INO_INVALID_CHANGE)
bitmask[0] |= FATTR4_WORD0_CHANGE;
@@ -5441,14 +5790,14 @@ static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src,
bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP;
if (cache_validity & NFS_INO_INVALID_NLINK)
bitmask[1] |= FATTR4_WORD1_NUMLINKS;
- if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
- bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
if (cache_validity & NFS_INO_INVALID_CTIME)
bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
if (cache_validity & NFS_INO_INVALID_MTIME)
bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
if (cache_validity & NFS_INO_INVALID_BLOCKS)
bitmask[1] |= FATTR4_WORD1_SPACE_USED;
+ if (cache_validity & NFS_INO_INVALID_BTIME)
+ bitmask[1] |= FATTR4_WORD1_TIME_CREATE;
if (cache_validity & NFS_INO_INVALID_SIZE)
bitmask[0] |= FATTR4_WORD0_SIZE;
@@ -5469,7 +5818,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
} else {
nfs4_bitmask_set(hdr->args.bitmask_store,
server->cache_consistency_bitmask,
- hdr->inode, server, NULL);
+ hdr->inode, NFS_INO_INVALID_BLOCKS);
hdr->args.bitmask = hdr->args.bitmask_store;
}
@@ -5480,7 +5829,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
- nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
+ nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
}
static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
@@ -5521,7 +5870,8 @@ static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
data->res.server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
- nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
+ nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
+ NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
}
static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
@@ -5650,9 +6000,17 @@ static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
return 0;
}
-static inline int nfs4_server_supports_acls(struct nfs_server *server)
+static bool nfs4_server_supports_acls(const struct nfs_server *server,
+ enum nfs4_acl_type type)
{
- return server->caps & NFS_CAP_ACLS;
+ switch (type) {
+ default:
+ return server->attr_bitmask[0] & FATTR4_WORD0_ACL;
+ case NFS4ACL_DACL:
+ return server->attr_bitmask[1] & FATTR4_WORD1_DACL;
+ case NFS4ACL_SACL:
+ return server->attr_bitmask[1] & FATTR4_WORD1_SACL;
+ }
}
/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
@@ -5691,6 +6049,7 @@ unwind:
}
struct nfs4_cached_acl {
+ enum nfs4_acl_type type;
int cached;
size_t len;
char data[];
@@ -5711,7 +6070,8 @@ static void nfs4_zap_acl_attr(struct inode *inode)
nfs4_set_cached_acl(inode, NULL);
}
-static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
+static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf,
+ size_t buflen, enum nfs4_acl_type type)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs4_cached_acl *acl;
@@ -5721,6 +6081,8 @@ static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_
acl = nfsi->nfs4_acl;
if (acl == NULL)
goto out;
+ if (acl->type != type)
+ goto out;
if (buf == NULL) /* user is just asking for length */
goto out_len;
if (acl->cached == 0)
@@ -5736,7 +6098,9 @@ out:
return ret;
}
-static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
+static void nfs4_write_cached_acl(struct inode *inode, struct page **pages,
+ size_t pgbase, size_t acl_len,
+ enum nfs4_acl_type type)
{
struct nfs4_cached_acl *acl;
size_t buflen = sizeof(*acl) + acl_len;
@@ -5753,6 +6117,7 @@ static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size
goto out;
acl->cached = 0;
}
+ acl->type = type;
acl->len = acl_len;
out:
nfs4_set_cached_acl(inode, acl);
@@ -5768,14 +6133,17 @@ out:
* length. The next getxattr call will then produce another round trip to
* the server, this time with the input buf of the required size.
*/
-static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf,
+ size_t buflen, enum nfs4_acl_type type)
{
struct page **pages;
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
+ .acl_type = type,
.acl_len = buflen,
};
struct nfs_getaclres res = {
+ .acl_type = type,
.acl_len = buflen,
};
struct rpc_message msg = {
@@ -5791,7 +6159,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
buflen = server->rsize;
npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
- pages = kmalloc_array(npages, sizeof(struct page *), GFP_NOFS);
+ pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!pages)
return -ENOMEM;
@@ -5804,7 +6172,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
}
/* for decoding across pages */
- res.acl_scratch = alloc_page(GFP_KERNEL);
+ res.acl_scratch = folio_alloc(GFP_KERNEL, 0);
if (!res.acl_scratch)
goto out_free;
@@ -5825,7 +6193,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
ret = -ERANGE;
goto out_free;
}
- nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
+ nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len,
+ type);
if (buf) {
if (res.acl_len > buflen) {
ret = -ERANGE;
@@ -5836,23 +6205,23 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
out_ok:
ret = res.acl_len;
out_free:
- for (i = 0; i < npages; i++)
- if (pages[i])
- __free_page(pages[i]);
+ while (--i >= 0)
+ __free_page(pages[i]);
if (res.acl_scratch)
- __free_page(res.acl_scratch);
+ folio_put(res.acl_scratch);
kfree(pages);
return ret;
}
-static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf,
+ size_t buflen, enum nfs4_acl_type type)
{
struct nfs4_exception exception = {
.interruptible = true,
};
ssize_t ret;
do {
- ret = __nfs4_get_acl_uncached(inode, buf, buflen);
+ ret = __nfs4_get_acl_uncached(inode, buf, buflen, type);
trace_nfs4_get_acl(inode, ret);
if (ret >= 0)
break;
@@ -5861,34 +6230,39 @@ static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bufl
return ret;
}
-static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
+static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen,
+ enum nfs4_acl_type type)
{
struct nfs_server *server = NFS_SERVER(inode);
int ret;
- if (!nfs4_server_supports_acls(server))
+ if (unlikely(NFS_FH(inode)->size == 0))
+ return -ENODATA;
+ if (!nfs4_server_supports_acls(server, type))
return -EOPNOTSUPP;
ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
if (ret < 0)
return ret;
if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
nfs_zap_acl_cache(inode);
- ret = nfs4_read_cached_acl(inode, buf, buflen);
+ ret = nfs4_read_cached_acl(inode, buf, buflen, type);
if (ret != -ENOENT)
/* -ENOENT is returned if there is no ACL or if there is an ACL
* but no cached acl data, just the acl length */
return ret;
- return nfs4_get_acl_uncached(inode, buf, buflen);
+ return nfs4_get_acl_uncached(inode, buf, buflen, type);
}
-static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
+static int __nfs4_proc_set_acl(struct inode *inode, const void *buf,
+ size_t buflen, enum nfs4_acl_type type)
{
struct nfs_server *server = NFS_SERVER(inode);
struct page *pages[NFS4ACL_MAXPAGES];
struct nfs_setaclargs arg = {
- .fh = NFS_FH(inode),
- .acl_pages = pages,
- .acl_len = buflen,
+ .fh = NFS_FH(inode),
+ .acl_type = type,
+ .acl_len = buflen,
+ .acl_pages = pages,
};
struct nfs_setaclres res;
struct rpc_message msg = {
@@ -5902,7 +6276,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
/* You can't remove system.nfs4_acl: */
if (buflen == 0)
return -EINVAL;
- if (!nfs4_server_supports_acls(server))
+ if (!nfs4_server_supports_acls(server, type))
return -EOPNOTSUPP;
if (npages > ARRAY_SIZE(pages))
return -ERANGE;
@@ -5933,12 +6307,16 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
return ret;
}
-static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
+static int nfs4_proc_set_acl(struct inode *inode, const void *buf,
+ size_t buflen, enum nfs4_acl_type type)
{
struct nfs4_exception exception = { };
int err;
+
+ if (unlikely(NFS_FH(inode)->size == 0))
+ return -ENODATA;
do {
- err = __nfs4_proc_set_acl(inode, buf, buflen);
+ err = __nfs4_proc_set_acl(inode, buf, buflen, type);
trace_nfs4_set_acl(inode, err);
if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
/*
@@ -5959,7 +6337,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
size_t buflen)
{
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs4_label label = {0, 0, buflen, buf};
+ struct nfs4_label label = {0, 0, 0, buflen, buf};
u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
struct nfs_fattr fattr = {
@@ -6064,7 +6442,7 @@ static int nfs4_do_set_security_label(struct inode *inode,
static int
nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
{
- struct nfs4_label ilabel = {0, 0, buflen, (char *)buf };
+ struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf };
struct nfs_fattr *fattr;
int status;
@@ -6079,6 +6457,7 @@ nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
if (status == 0)
nfs_setsecurity(inode, fattr);
+ nfs_free_fattr(fattr);
return status;
}
#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
@@ -6357,6 +6736,7 @@ struct nfs4_delegreturndata {
struct nfs_fh fh;
nfs4_stateid stateid;
unsigned long timestamp;
+ unsigned short retrans;
struct {
struct nfs4_layoutreturn_args arg;
struct nfs4_layoutreturn_res res;
@@ -6364,6 +6744,7 @@ struct nfs4_delegreturndata {
u32 roc_barrier;
bool roc;
} lr;
+ struct nfs4_delegattr sattr;
struct nfs_fattr fattr;
int rpc_status;
struct inode *inode;
@@ -6376,6 +6757,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
.inode = data->inode,
.stateid = &data->stateid,
.task_is_privileged = data->args.seq_args.sa_privileged,
+ .retrans = data->retrans,
};
if (!nfs4_sequence_done(task, &data->res.seq_res))
@@ -6388,6 +6770,30 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
&data->res.lr_ret) == -EAGAIN)
goto out_restart;
+ if (data->args.sattr_args && task->tk_status != 0) {
+ switch(data->res.sattr_ret) {
+ case 0:
+ data->args.sattr_args = NULL;
+ data->res.sattr_res = false;
+ break;
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_BAD_STATEID:
+ /* Let the main handler below do stateid recovery */
+ break;
+ case -NFS4ERR_OLD_STATEID:
+ if (nfs4_refresh_delegation_stateid(&data->stateid,
+ data->inode))
+ goto out_restart;
+ fallthrough;
+ default:
+ data->args.sattr_args = NULL;
+ data->res.sattr_res = false;
+ goto out_restart;
+ }
+ }
+
switch (task->tk_status) {
case 0:
renew_lease(data->res.server, data->timestamp);
@@ -6423,6 +6829,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
task->tk_status = nfs4_async_handle_exception(task,
data->res.server, task->tk_status,
&exception);
+ data->retrans = exception.retrans;
if (exception.retry)
goto out_restart;
}
@@ -6443,7 +6850,9 @@ static void nfs4_delegreturn_release(void *calldata)
pnfs_roc_release(&data->lr.arg, &data->lr.res,
data->res.lr_ret);
if (inode) {
- nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
+ nfs4_fattr_set_prechange(&data->fattr,
+ inode_peek_iversion_raw(inode));
+ nfs_refresh_inode(inode, &data->fattr);
nfs_iput_and_deactive(inode);
}
kfree(calldata);
@@ -6454,7 +6863,7 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
struct nfs4_delegreturndata *d_data;
struct pnfs_layout_hdr *lo;
- d_data = (struct nfs4_delegreturndata *)data;
+ d_data = data;
if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
nfs4_sequence_done(task, &d_data->res.seq_res);
@@ -6479,7 +6888,10 @@ static const struct rpc_call_ops nfs4_delegreturn_ops = {
.rpc_release = nfs4_delegreturn_release,
};
-static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
+static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
+ const nfs4_stateid *stateid,
+ struct nfs_delegation *delegation,
+ int issync)
{
struct nfs4_delegreturndata *data;
struct nfs_server *server = NFS_SERVER(inode);
@@ -6492,11 +6904,14 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_delegreturn_ops,
- .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
};
int status = 0;
- data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -6507,8 +6922,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
data->args.fhandle = &data->fh;
data->args.stateid = &data->stateid;
nfs4_bitmask_set(data->args.bitmask_store,
- server->cache_consistency_bitmask, inode, server,
- NULL);
+ server->cache_consistency_bitmask, inode, 0);
data->args.bitmask = data->args.bitmask_store;
nfs_copy_fh(&data->fh, NFS_FH(inode));
nfs4_stateid_copy(&data->stateid, stateid);
@@ -6529,12 +6943,27 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
}
}
+ if (delegation &&
+ test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) {
+ if (delegation->type & FMODE_READ) {
+ data->sattr.atime = inode_get_atime(inode);
+ data->sattr.atime_set = true;
+ }
+ if (delegation->type & FMODE_WRITE) {
+ data->sattr.mtime = inode_get_mtime(inode);
+ data->sattr.mtime_set = true;
+ }
+ data->args.sattr_args = &data->sattr;
+ data->res.sattr_res = true;
+ }
+
if (!data->inode)
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
1);
else
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
0);
+
task_setup_data.callback_data = data;
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
@@ -6552,13 +6981,16 @@ out:
return status;
}
-int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
+int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
+ const nfs4_stateid *stateid,
+ struct nfs_delegation *delegation, int issync)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_exception exception = { };
int err;
do {
- err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
+ err = _nfs4_proc_delegreturn(inode, cred, stateid,
+ delegation, issync);
trace_nfs4_delegreturn(inode, stateid, err);
switch (err) {
case -NFS4ERR_STALE_STATEID:
@@ -6602,7 +7034,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
switch (status) {
case 0:
- request->fl_type = F_UNLCK;
+ request->c.flc_type = F_UNLCK;
break;
case -NFS4ERR_DENIED:
status = 0;
@@ -6674,6 +7106,7 @@ struct nfs4_unlockdata {
struct file_lock fl;
struct nfs_server *server;
unsigned long timestamp;
+ unsigned short retrans;
};
static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
@@ -6684,10 +7117,18 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
struct nfs4_unlockdata *p;
struct nfs4_state *state = lsp->ls_state;
struct inode *inode = state->inode;
+ struct nfs_lock_context *l_ctx;
- p = kzalloc(sizeof(*p), GFP_NOFS);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL)
return NULL;
+ l_ctx = nfs_get_lock_context(ctx);
+ if (!IS_ERR(l_ctx)) {
+ p->l_ctx = l_ctx;
+ } else {
+ kfree(p);
+ return NULL;
+ }
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
@@ -6695,7 +7136,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
p->lsp = lsp;
/* Ensure we don't close file until we're done freeing locks! */
p->ctx = get_nfs_open_context(ctx);
- p->l_ctx = nfs_get_lock_context(ctx);
locks_init_lock(&p->fl);
locks_copy_lock(&p->fl, fl);
p->server = NFS_SERVER(inode);
@@ -6721,6 +7161,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
struct nfs4_exception exception = {
.inode = calldata->lsp->ls_state->inode,
.stateid = &calldata->arg.stateid,
+ .retrans = calldata->retrans,
};
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
@@ -6754,6 +7195,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
task->tk_status = nfs4_async_handle_exception(task,
calldata->server, task->tk_status,
&exception);
+ calldata->retrans = exception.retrans;
if (exception.retry)
rpc_restart_call_prepare(task);
}
@@ -6810,10 +7252,8 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
- struct nfs_client *client =
- NFS_SERVER(lsp->ls_state->inode)->nfs_client;
- if (client->cl_minorversion)
+ if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE))
task_setup_data.flags |= RPC_TASK_MOVEABLE;
nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
@@ -6822,8 +7262,8 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
/* Ensure this is an unlock - when canceling a lock, the
* canceled lock is passed in, and it won't be an unlock.
*/
- fl->fl_type = F_UNLCK;
- if (fl->fl_flags & FL_CLOSE)
+ fl->c.flc_type = F_UNLCK;
+ if (fl->c.flc_flags & FL_CLOSE)
set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
@@ -6849,11 +7289,11 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
struct rpc_task *task;
struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
int status = 0;
- unsigned char fl_flags = request->fl_flags;
+ unsigned char saved_flags = request->c.flc_flags;
status = nfs4_set_lock_state(state, request);
/* Unlock _before_ we do the RPC call */
- request->fl_flags |= FL_EXISTS;
+ request->c.flc_flags |= FL_EXISTS;
/* Exclude nfs_delegation_claim_locks() */
mutex_lock(&sp->so_delegreturn_mutex);
/* Exclude nfs4_reclaim_open_stateid() - note nesting! */
@@ -6863,12 +7303,13 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
mutex_unlock(&sp->so_delegreturn_mutex);
goto out;
}
+ lsp = request->fl_u.nfs4_fl.owner;
+ set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags);
up_read(&nfsi->rwsem);
mutex_unlock(&sp->so_delegreturn_mutex);
if (status != 0)
goto out;
/* Is this a delegated lock? */
- lsp = request->fl_u.nfs4_fl.owner;
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
goto out;
alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
@@ -6876,14 +7317,16 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
status = -ENOMEM;
if (IS_ERR(seqid))
goto out;
- task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
+ task = nfs4_do_unlck(request,
+ nfs_file_open_context(request->c.flc_file),
+ lsp, seqid);
status = PTR_ERR(task);
if (IS_ERR(task))
goto out;
status = rpc_wait_for_completion_task(task);
rpc_put_task(task);
out:
- request->fl_flags = fl_flags;
+ request->c.flc_flags = saved_flags;
trace_nfs4_unlock(request, state, F_SETLK, status);
return status;
}
@@ -6994,7 +7437,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
data->timestamp);
if (data->arg.new_lock && !data->cancelled) {
- data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
+ data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS);
if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
goto out_restart;
}
@@ -7005,8 +7448,15 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
goto out_restart;
break;
- case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
+ if (data->arg.new_lock_owner != 0 &&
+ nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
+ lsp->ls_state))
+ goto out_restart;
+ if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
+ goto out_restart;
+ fallthrough;
+ case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
if (data->arg.new_lock_owner != 0) {
@@ -7084,14 +7534,13 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
};
int ret;
- struct nfs_client *client = NFS_SERVER(state->inode)->nfs_client;
- if (client->cl_minorversion)
+ if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
task_setup_data.flags |= RPC_TASK_MOVEABLE;
- data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
- fl->fl_u.nfs4_fl.owner,
- recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
+ data = nfs4_alloc_lockdata(fl,
+ nfs_file_open_context(fl->c.flc_file),
+ fl->fl_u.nfs4_fl.owner, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
if (IS_SETLKW(cmd))
@@ -7196,10 +7645,10 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
{
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs4_state_owner *sp = state->owner;
- unsigned char fl_flags = request->fl_flags;
+ unsigned char flags = request->c.flc_flags;
int status;
- request->fl_flags |= FL_ACCESS;
+ request->c.flc_flags |= FL_ACCESS;
status = locks_lock_inode_wait(state->inode, request);
if (status < 0)
goto out;
@@ -7208,7 +7657,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
/* Yes: cache locks! */
/* ...but avoid races with delegation recall... */
- request->fl_flags = fl_flags & ~FL_SLEEP;
+ request->c.flc_flags = flags & ~FL_SLEEP;
status = locks_lock_inode_wait(state->inode, request);
up_read(&nfsi->rwsem);
mutex_unlock(&sp->so_delegreturn_mutex);
@@ -7218,7 +7667,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
mutex_unlock(&sp->so_delegreturn_mutex);
status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
out:
- request->fl_flags = fl_flags;
+ request->c.flc_flags = flags;
return status;
}
@@ -7255,7 +7704,8 @@ nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
status = nfs4_proc_setlk(state, cmd, request);
if ((status != -EAGAIN) || IS_SETLK(cmd))
break;
- freezable_schedule_timeout_interruptible(timeout);
+ __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
+ schedule_timeout(timeout);
timeout *= 2;
timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
status = -ERESTARTSYS;
@@ -7323,10 +7773,8 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
break;
status = -ERESTARTSYS;
- freezer_do_not_count();
- wait_woken(&waiter.wait, TASK_INTERRUPTIBLE,
+ wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE,
NFS4_LOCK_MAXTIMEOUT);
- freezer_count();
} while (!signalled());
remove_wait_queue(q, &waiter.wait);
@@ -7361,7 +7809,7 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
return -EINVAL;
- if (request->fl_type == F_UNLCK) {
+ if (lock_is_unlock(request)) {
if (state != NULL)
return nfs4_proc_unlck(state, cmd, request);
return 0;
@@ -7370,7 +7818,7 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
if (state == NULL)
return -ENOLCK;
- if ((request->fl_flags & FL_POSIX) &&
+ if ((request->c.flc_flags & FL_POSIX) &&
!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
return -ENOLCK;
@@ -7378,7 +7826,7 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
* Don't rely on the VFS having checked the file open mode,
* since it won't do this for flock() locks.
*/
- switch (request->fl_type) {
+ switch (request->c.flc_type) {
case F_RDLCK:
if (!(filp->f_mode & FMODE_READ))
return -EBADF;
@@ -7400,7 +7848,7 @@ static int nfs4_delete_lease(struct file *file, void **priv)
return generic_setlease(file, F_UNLCK, NULL, priv);
}
-static int nfs4_add_lease(struct file *file, long arg, struct file_lock **lease,
+static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease,
void **priv)
{
struct inode *inode = file_inode(file);
@@ -7408,17 +7856,17 @@ static int nfs4_add_lease(struct file *file, long arg, struct file_lock **lease,
int ret;
/* No delegation, no lease */
- if (!nfs4_have_delegation(inode, type))
+ if (!nfs4_have_delegation(inode, type, 0))
return -EAGAIN;
ret = generic_setlease(file, arg, lease, priv);
- if (ret || nfs4_have_delegation(inode, type))
+ if (ret || nfs4_have_delegation(inode, type, 0))
return ret;
/* We raced with a delegation return */
nfs4_delete_lease(file, priv);
return -EAGAIN;
}
-int nfs4_proc_setlease(struct file *file, long arg, struct file_lock **lease,
+int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease,
void **priv)
{
switch (arg) {
@@ -7442,10 +7890,10 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
return err;
do {
err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
- if (err != -NFS4ERR_DELAY)
+ if (err != -NFS4ERR_DELAY && err != -NFS4ERR_GRACE)
break;
ssleep(1);
- } while (err == -NFS4ERR_DELAY);
+ } while (err == -NFS4ERR_DELAY || err == -NFSERR_GRACE);
return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
}
@@ -7514,7 +7962,7 @@ nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
if (server->nfs_client->cl_mvops->minor_version != 0)
return;
- data = kmalloc(sizeof(*data), GFP_NOFS);
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return;
data->lsp = lsp;
@@ -7532,30 +7980,79 @@ nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *key, const void *buf,
size_t buflen, int flags)
{
- return nfs4_proc_set_acl(inode, buf, buflen);
+ return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL);
}
static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *key, void *buf, size_t buflen)
{
- return nfs4_proc_get_acl(inode, buf, buflen);
+ return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL);
}
static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
{
- return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
+ return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+#define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl"
+
+static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler,
+ struct mnt_idmap *idmap,
+ struct dentry *unused, struct inode *inode,
+ const char *key, const void *buf,
+ size_t buflen, int flags)
+{
+ return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL);
+}
+
+static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler,
+ struct dentry *unused, struct inode *inode,
+ const char *key, void *buf, size_t buflen)
+{
+ return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL);
}
+static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry)
+{
+ return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL);
+}
+
+#define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl"
+
+static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler,
+ struct mnt_idmap *idmap,
+ struct dentry *unused, struct inode *inode,
+ const char *key, const void *buf,
+ size_t buflen, int flags)
+{
+ return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL);
+}
+
+static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler,
+ struct dentry *unused, struct inode *inode,
+ const char *key, void *buf, size_t buflen)
+{
+ return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL);
+}
+
+static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry)
+{
+ return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL);
+}
+
+#endif
+
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *key, const void *buf,
size_t buflen, int flags)
@@ -7606,12 +8103,12 @@ nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
#ifdef CONFIG_NFS_V4_2
static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *key, const void *buf,
size_t buflen, int flags)
{
- struct nfs_access_entry cache;
+ u32 mask;
int ret;
if (!nfs_server_capable(inode, NFS_CAP_XATTR))
@@ -7626,8 +8123,8 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
* do a cached access check for the XA* flags to possibly avoid
* doing an RPC and getting EACCES back.
*/
- if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
- if (!(cache.mask & NFS_ACCESS_XAWRITE))
+ if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
+ if (!(mask & NFS_ACCESS_XAWRITE))
return -EACCES;
}
@@ -7648,14 +8145,14 @@ static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *key, void *buf, size_t buflen)
{
- struct nfs_access_entry cache;
+ u32 mask;
ssize_t ret;
if (!nfs_server_capable(inode, NFS_CAP_XATTR))
return -EOPNOTSUPP;
- if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
- if (!(cache.mask & NFS_ACCESS_XAREAD))
+ if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
+ if (!(mask & NFS_ACCESS_XAREAD))
return -EACCES;
}
@@ -7680,13 +8177,13 @@ nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
ssize_t ret, size;
char *buf;
size_t buflen;
- struct nfs_access_entry cache;
+ u32 mask;
if (!nfs_server_capable(inode, NFS_CAP_XATTR))
return 0;
- if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
- if (!(cache.mask & NFS_ACCESS_XALIST))
+ if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
+ if (!(mask & NFS_ACCESS_XALIST))
return 0;
}
@@ -7784,7 +8281,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
else
bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
- nfs_fattr_init(&fs_locations->fattr);
+ nfs_fattr_init(fs_locations->fattr);
fs_locations->server = server;
fs_locations->nlocations = 0;
status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
@@ -7818,18 +8315,18 @@ int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
* appended to this compound to identify the client ID which is
* performing recovery.
*/
-static int _nfs40_proc_get_locations(struct inode *inode,
+static int _nfs40_proc_get_locations(struct nfs_server *server,
+ struct nfs_fh *fhandle,
struct nfs4_fs_locations *locations,
struct page *page, const struct cred *cred)
{
- struct nfs_server *server = NFS_SERVER(inode);
struct rpc_clnt *clnt = server->client;
u32 bitmask[2] = {
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
};
struct nfs4_fs_locations_arg args = {
.clientid = server->nfs_client->cl_clientid,
- .fh = NFS_FH(inode),
+ .fh = fhandle,
.page = page,
.bitmask = bitmask,
.migration = 1, /* skip LOOKUP */
@@ -7849,7 +8346,7 @@ static int _nfs40_proc_get_locations(struct inode *inode,
unsigned long now = jiffies;
int status;
- nfs_fattr_init(&locations->fattr);
+ nfs_fattr_init(locations->fattr);
locations->server = server;
locations->nlocations = 0;
@@ -7875,17 +8372,17 @@ static int _nfs40_proc_get_locations(struct inode *inode,
* When the client supports GETATTR(fs_locations_info), it can
* be plumbed in here.
*/
-static int _nfs41_proc_get_locations(struct inode *inode,
+static int _nfs41_proc_get_locations(struct nfs_server *server,
+ struct nfs_fh *fhandle,
struct nfs4_fs_locations *locations,
struct page *page, const struct cred *cred)
{
- struct nfs_server *server = NFS_SERVER(inode);
struct rpc_clnt *clnt = server->client;
u32 bitmask[2] = {
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
};
struct nfs4_fs_locations_arg args = {
- .fh = NFS_FH(inode),
+ .fh = fhandle,
.page = page,
.bitmask = bitmask,
.migration = 1, /* skip LOOKUP */
@@ -7900,15 +8397,26 @@ static int _nfs41_proc_get_locations(struct inode *inode,
.rpc_resp = &res,
.rpc_cred = cred,
};
+ struct nfs4_call_sync_data data = {
+ .seq_server = server,
+ .seq_args = &args.seq_args,
+ .seq_res = &res.seq_res,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clnt,
+ .rpc_message = &msg,
+ .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
+ .callback_data = &data,
+ .flags = RPC_TASK_NO_ROUND_ROBIN,
+ };
int status;
- nfs_fattr_init(&locations->fattr);
+ nfs_fattr_init(locations->fattr);
locations->server = server;
locations->nlocations = 0;
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
- status = nfs4_call_sync_sequence(clnt, server, &msg,
- &args.seq_args, &res.seq_res);
+ status = nfs4_call_sync_custom(&task_setup_data);
if (status == NFS4_OK &&
res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
status = -NFS4ERR_LEASE_MOVED;
@@ -7919,7 +8427,8 @@ static int _nfs41_proc_get_locations(struct inode *inode,
/**
* nfs4_proc_get_locations - discover locations for a migrated FSID
- * @inode: inode on FSID that is migrating
+ * @server: pointer to nfs_server to process
+ * @fhandle: pointer to the kernel NFS client file handle
* @locations: result of query
* @page: buffer
* @cred: credential to use for this operation
@@ -7934,11 +8443,11 @@ static int _nfs41_proc_get_locations(struct inode *inode,
* -NFS4ERR_LEASE_MOVED is returned if the server still has leases
* from this client that require migration recovery.
*/
-int nfs4_proc_get_locations(struct inode *inode,
+int nfs4_proc_get_locations(struct nfs_server *server,
+ struct nfs_fh *fhandle,
struct nfs4_fs_locations *locations,
struct page *page, const struct cred *cred)
{
- struct nfs_server *server = NFS_SERVER(inode);
struct nfs_client *clp = server->nfs_client;
const struct nfs4_mig_recovery_ops *ops =
clp->cl_mvops->mig_recovery_ops;
@@ -7951,10 +8460,11 @@ int nfs4_proc_get_locations(struct inode *inode,
(unsigned long long)server->fsid.major,
(unsigned long long)server->fsid.minor,
clp->cl_hostname);
- nfs_display_fhandle(NFS_FH(inode), __func__);
+ nfs_display_fhandle(fhandle, __func__);
do {
- status = ops->get_locations(inode, locations, page, cred);
+ status = ops->get_locations(server, fhandle, locations, page,
+ cred);
if (status != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, status, &exception);
@@ -8219,6 +8729,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
case -NFS4ERR_DEADSESSION:
nfs4_schedule_session_recovery(clp->cl_session,
task->tk_status);
+ return;
}
if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
res->dir != NFS4_CDFS4_BOTH) {
@@ -8556,6 +9067,8 @@ nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
#ifdef CONFIG_NFS_V4_1_MIGRATION
calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
#endif
+ if (test_bit(NFS_CS_PNFS, &clp->cl_flags))
+ calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
msg.rpc_argp = &calldata->args;
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
@@ -8686,7 +9199,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
void *data)
{
- struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data;
+ struct nfs4_add_xprt_data *adata = data;
struct rpc_task *task;
int status;
@@ -8697,20 +9210,30 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
+try_again:
/* Test connection for session trunking. Async exchange_id call */
task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
if (IS_ERR(task))
return;
status = task->tk_status;
- if (status == 0)
+ if (status == 0) {
status = nfs4_detect_session_trunking(adata->clp,
task->tk_msg.rpc_resp, xprt);
-
+ trace_nfs4_trunked_exchange_id(adata->clp,
+ xprt->address_strings[RPC_DISPLAY_ADDR], status);
+ }
if (status == 0)
rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
+ else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
+ (struct sockaddr *)&xprt->addr))
+ rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
rpc_put_task(task);
+ if (status == -NFS4ERR_DELAY) {
+ ssleep(1);
+ goto try_again;
+ }
}
EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
@@ -8937,7 +9460,7 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args
goto out;
if (rcvd->max_rqst_sz > sent->max_rqst_sz)
return -EINVAL;
- if (rcvd->max_resp_sz < sent->max_resp_sz)
+ if (rcvd->max_resp_sz > sent->max_resp_sz)
return -EINVAL;
if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
return -EINVAL;
@@ -9033,6 +9556,13 @@ int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
int status;
unsigned *ptr;
struct nfs4_session *session = clp->cl_session;
+ struct nfs4_add_xprt_data xprtdata = {
+ .clp = clp,
+ };
+ struct rpc_add_xprt_test rpcdata = {
+ .add_xprt_test = clp->cl_mvops->session_trunk,
+ .data = &xprtdata,
+ };
dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
@@ -9049,6 +9579,7 @@ int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
ptr = (unsigned *)&session->sess_id.data[0];
dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
+ rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata);
out:
return status;
}
@@ -9078,6 +9609,7 @@ int nfs4_proc_destroy_session(struct nfs4_session *session,
if (status)
dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
"Session has been destroyed regardless...\n", status);
+ rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient);
return status;
}
@@ -9122,7 +9654,7 @@ static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
return;
trace_nfs4_sequence(clp, task->tk_status);
- if (task->tk_status < 0) {
+ if (task->tk_status < 0 && clp->cl_cons_state >= 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
if (refcount_read(&clp->cl_count) == 1)
return;
@@ -9177,7 +9709,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
goto out_err;
ret = ERR_PTR(-ENOMEM);
- calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
+ calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
if (calldata == NULL)
goto out_put_clp;
nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged);
@@ -9262,6 +9794,9 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
rpc_delay(task, NFS4_POLL_RETRY_MAX);
fallthrough;
case -NFS4ERR_RETRY_UNCACHED_REP:
+ case -EACCES:
+ dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n",
+ __func__, task->tk_status, clp->cl_hostname);
return -EAGAIN;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_DEADSESSION:
@@ -9369,6 +9904,9 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
nfs4_sequence_free_slot(&lgp->res.seq_res);
+ exception->state = NULL;
+ exception->stateid = NULL;
+
switch (nfs4err) {
case 0:
goto out;
@@ -9404,6 +9942,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
status = -EBUSY;
break;
case -NFS4ERR_RECALLCONFLICT:
+ case -NFS4ERR_RETURNCONFLICT:
status = -ERECALLCONFLICT;
break;
case -NFS4ERR_DELEG_REVOKED:
@@ -9464,7 +10003,8 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
};
struct pnfs_layout_segment *
-nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
+nfs4_proc_layoutget(struct nfs4_layoutget *lgp,
+ struct nfs4_exception *exception)
{
struct inode *inode = lgp->args.inode;
struct nfs_server *server = NFS_SERVER(inode);
@@ -9484,26 +10024,26 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
RPC_TASK_MOVEABLE,
};
struct pnfs_layout_segment *lseg = NULL;
- struct nfs4_exception exception = {
- .inode = inode,
- .timeout = *timeout,
- };
int status = 0;
nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
+ exception->retry = 0;
task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return ERR_CAST(task);
status = rpc_wait_for_completion_task(task);
if (status != 0)
goto out;
if (task->tk_status < 0) {
- status = nfs4_layoutget_handle_exception(task, lgp, &exception);
- *timeout = exception.timeout;
+ exception->retry = 1;
+ status = nfs4_layoutget_handle_exception(task, lgp, exception);
} else if (lgp->res.layoutp->len == 0) {
+ exception->retry = 1;
status = -EAGAIN;
- *timeout = nfs4_update_delay(&exception.timeout);
+ nfs4_update_delay(&exception->timeout);
} else
lseg = pnfs_layout_process(lgp);
out:
@@ -9541,6 +10081,11 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
if (!nfs41_sequence_process(task, &lrp->res.seq_res))
return;
+ if (task->tk_rpc_status == -ETIMEDOUT) {
+ lrp->rpc_status = -EAGAIN;
+ lrp->res.lrs_present = 0;
+ return;
+ }
/*
* Was there an RPC level error? Assume the call succeeded,
* and that we need to release the layout
@@ -9560,13 +10105,25 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
fallthrough;
default:
task->tk_status = 0;
+ lrp->res.lrs_present = 0;
fallthrough;
case 0:
break;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ nfs4_schedule_session_recovery(server->nfs_client->cl_session,
+ task->tk_status);
+ lrp->res.lrs_present = 0;
+ lrp->rpc_status = -EAGAIN;
+ task->tk_status = 0;
+ break;
case -NFS4ERR_DELAY:
- if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
- break;
- goto out_restart;
+ if (nfs4_async_handle_error(task, server, NULL, NULL) ==
+ -EAGAIN)
+ goto out_restart;
+ lrp->res.lrs_present = 0;
+ break;
}
return;
out_restart:
@@ -9580,8 +10137,13 @@ static void nfs4_layoutreturn_release(void *calldata)
struct nfs4_layoutreturn *lrp = calldata;
struct pnfs_layout_hdr *lo = lrp->args.layout;
- pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
+ if (lrp->rpc_status == 0 || !lrp->inode)
+ pnfs_layoutreturn_free_lsegs(
+ lo, &lrp->args.stateid, &lrp->args.range,
lrp->res.lrs_present ? &lrp->res.stateid : NULL);
+ else
+ pnfs_layoutreturn_retry_later(lo, &lrp->args.stateid,
+ &lrp->args.range);
nfs4_sequence_free_slot(&lrp->res.seq_res);
if (lrp->ld_private.ops && lrp->ld_private.ops->free)
lrp->ld_private.ops->free(&lrp->ld_private);
@@ -9597,7 +10159,7 @@ static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
.rpc_release = nfs4_layoutreturn_release,
};
-int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
+int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, unsigned int flags)
{
struct rpc_task *task;
struct rpc_message msg = {
@@ -9620,7 +10182,7 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
&task_setup_data.rpc_client, &msg);
lrp->inode = nfs_igrab_and_active(lrp->args.inode);
- if (!sync) {
+ if (flags & PNFS_FL_LAYOUTRETURN_ASYNC) {
if (!lrp->inode) {
nfs4_layoutreturn_release(lrp);
return -EAGAIN;
@@ -9628,6 +10190,8 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
task_setup_data.flags |= RPC_TASK_ASYNC;
}
if (!lrp->inode)
+ flags |= PNFS_FL_LAYOUTRETURN_PRIVILEGED;
+ if (flags & PNFS_FL_LAYOUTRETURN_PRIVILEGED)
nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
1);
else
@@ -9636,7 +10200,7 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
- if (sync)
+ if (!(flags & PNFS_FL_LAYOUTRETURN_ASYNC))
status = task->tk_status;
trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
dprintk("<-- %s status=%d\n", __func__, status);
@@ -9798,10 +10362,10 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
* Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
* possible) as per RFC3530bis and RFC5661 Security Considerations sections
*/
-static int
-_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info,
- struct nfs4_secinfo_flavors *flavors, bool use_integrity)
+static int _nfs41_proc_secinfo_no_name(struct nfs_server *server,
+ struct nfs_fh *fhandle,
+ struct nfs4_secinfo_flavors *flavors,
+ bool use_integrity)
{
struct nfs41_secinfo_no_name_args args = {
.style = SECINFO_STYLE_CURRENT_FH,
@@ -9845,9 +10409,9 @@ _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
return status;
}
-static int
-nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
+static int nfs41_proc_secinfo_no_name(struct nfs_server *server,
+ struct nfs_fh *fhandle,
+ struct nfs4_secinfo_flavors *flavors)
{
struct nfs4_exception exception = {
.interruptible = true,
@@ -9859,7 +10423,7 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
/* try to use integrity protection with machine cred */
if (_nfs4_is_integrity_protected(server->nfs_client))
- err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
+ err = _nfs41_proc_secinfo_no_name(server, fhandle,
flavors, true);
/*
@@ -9869,7 +10433,7 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
* the current filesystem's rpc_client and the user cred.
*/
if (err == -NFS4ERR_WRONGSEC)
- err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
+ err = _nfs41_proc_secinfo_no_name(server, fhandle,
flavors, false);
switch (err) {
@@ -9885,9 +10449,8 @@ out:
return err;
}
-static int
-nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+static int nfs41_find_root_sec(struct nfs_server *server,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
int err;
struct page *page;
@@ -9903,14 +10466,14 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
}
flavors = page_address(page);
- err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
+ err = nfs41_proc_secinfo_no_name(server, fhandle, flavors);
/*
* Fall back on "guess and check" method if
* the server doesn't support SECINFO_NO_NAME
*/
if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
- err = nfs4_find_root_sec(server, fhandle, info);
+ err = nfs4_find_root_sec(server, fhandle, fattr);
goto out_freepage;
}
if (err)
@@ -9935,8 +10498,8 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
flavor = RPC_AUTH_MAXFLAVOR;
if (flavor != RPC_AUTH_MAXFLAVOR) {
- err = nfs4_lookup_root_sec(server, fhandle,
- info, flavor);
+ err = nfs4_lookup_root_sec(server, fhandle, fattr,
+ flavor);
if (!err)
break;
}
@@ -9954,12 +10517,12 @@ out:
}
static int _nfs41_test_stateid(struct nfs_server *server,
- nfs4_stateid *stateid,
- const struct cred *cred)
+ const nfs4_stateid *stateid,
+ const struct cred *cred)
{
int status;
struct nfs41_test_stateid_args args = {
- .stateid = stateid,
+ .stateid = *stateid,
};
struct nfs41_test_stateid_res res;
struct rpc_message msg = {
@@ -10015,8 +10578,8 @@ static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
* failed or the state ID is not currently valid.
*/
static int nfs41_test_stateid(struct nfs_server *server,
- nfs4_stateid *stateid,
- const struct cred *cred)
+ const nfs4_stateid *stateid,
+ const struct cred *cred)
{
struct nfs4_exception exception = {
.interruptible = true,
@@ -10083,7 +10646,7 @@ static const struct rpc_call_ops nfs41_free_stateid_ops = {
* Note: this function is always asynchronous.
*/
static int nfs41_free_stateid(struct nfs_server *server,
- const nfs4_stateid *stateid,
+ nfs4_stateid *stateid,
const struct cred *cred,
bool privileged)
{
@@ -10108,7 +10671,7 @@ static int nfs41_free_stateid(struct nfs_server *server,
&task_setup.rpc_client, &msg);
dprintk("NFS call free_stateid %p\n", stateid);
- data = kmalloc(sizeof(*data), GFP_NOFS);
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->server = server;
@@ -10123,6 +10686,7 @@ static int nfs41_free_stateid(struct nfs_server *server,
if (IS_ERR(task))
return PTR_ERR(task);
rpc_put_task(task);
+ stateid->type = NFS4_FREED_STATEID_TYPE;
return 0;
}
@@ -10138,6 +10702,8 @@ nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
static bool nfs41_match_stateid(const nfs4_stateid *s1,
const nfs4_stateid *s2)
{
+ trace_nfs41_match_stateid(s1, s2);
+
if (s1->type != s2->type)
return false;
@@ -10155,6 +10721,8 @@ static bool nfs41_match_stateid(const nfs4_stateid *s1,
static bool nfs4_match_stateid(const nfs4_stateid *s1,
const nfs4_stateid *s2)
{
+ trace_nfs4_match_stateid(s1, s2);
+
return nfs4_stateid_match(s1, s2);
}
@@ -10257,7 +10825,8 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
| NFS_CAP_POSIX_LOCK
| NFS_CAP_STATEID_NFSV41
| NFS_CAP_ATOMIC_OPEN_V1
- | NFS_CAP_LGOPEN,
+ | NFS_CAP_LGOPEN
+ | NFS_CAP_MOVEABLE,
.init_client = nfs41_init_client,
.shutdown_client = nfs41_shutdown_client,
.match_stateid = nfs41_match_stateid,
@@ -10288,11 +10857,14 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
| NFS_CAP_OFFLOAD_CANCEL
| NFS_CAP_COPY_NOTIFY
| NFS_CAP_DEALLOCATE
+ | NFS_CAP_ZERO_RANGE
| NFS_CAP_SEEK
| NFS_CAP_LAYOUTSTATS
| NFS_CAP_CLONE
| NFS_CAP_LAYOUTERROR
- | NFS_CAP_READ_PLUS,
+ | NFS_CAP_READ_PLUS
+ | NFS_CAP_MOVEABLE
+ | NFS_CAP_OFFLOAD_STATUS,
.init_client = nfs41_init_client,
.shutdown_client = nfs41_shutdown_client,
.match_stateid = nfs41_match_stateid,
@@ -10321,30 +10893,66 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
{
- ssize_t error, error2, error3;
+ ssize_t error, error2, error3, error4 = 0;
+ size_t left = size;
- error = generic_listxattr(dentry, list, size);
+ error = generic_listxattr(dentry, list, left);
if (error < 0)
return error;
if (list) {
list += error;
- size -= error;
+ left -= error;
}
- error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
+ error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left);
if (error2 < 0)
return error2;
if (list) {
list += error2;
- size -= error2;
+ left -= error2;
}
- error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
+ error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left);
if (error3 < 0)
return error3;
+ if (list) {
+ list += error3;
+ left -= error3;
+ }
+
+ if (!nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
+ error4 = security_inode_listsecurity(d_inode(dentry), list, left);
+ if (error4 < 0)
+ return error4;
+ }
- return error + error2 + error3;
+ error += error2 + error3 + error4;
+ if (size && error > size)
+ return -ERANGE;
+ return error;
+}
+
+static void nfs4_enable_swap(struct inode *inode)
+{
+ /* The state manager thread must always be running.
+ * It will notice the client is a swapper, and stay put.
+ */
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+
+ nfs4_schedule_state_manager(clp);
+}
+
+static void nfs4_disable_swap(struct inode *inode)
+{
+ /* The state manager thread will now exit once it is
+ * woken.
+ */
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+
+ set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ wake_up_var(&clp->cl_state);
}
static const struct inode_operations nfs4_dir_inode_operations = {
@@ -10371,6 +10979,26 @@ static const struct inode_operations nfs4_file_inode_operations = {
.listxattr = nfs4_listxattr,
};
+static struct nfs_server *nfs4_clone_server(struct nfs_server *source,
+ struct nfs_fh *fh, struct nfs_fattr *fattr,
+ rpc_authflavor_t flavor)
+{
+ struct nfs_server *server;
+ int error;
+
+ server = nfs_clone_server(source, fh, fattr, flavor);
+ if (IS_ERR(server))
+ return server;
+
+ error = nfs4_delegation_hash_alloc(server);
+ if (error) {
+ nfs_free_server(server);
+ return ERR_PTR(error);
+ }
+
+ return server;
+}
+
const struct nfs_rpc_ops nfs_v4_clientops = {
.version = 4, /* protocol version */
.dentry_ops = &nfs4_dentry_operations,
@@ -10418,11 +11046,15 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.close_context = nfs4_close_context,
.open_context = nfs4_atomic_open,
.have_delegation = nfs4_have_delegation,
+ .return_delegation = nfs4_inode_return_delegation,
.alloc_client = nfs4_alloc_client,
.init_client = nfs4_init_client,
.free_client = nfs4_free_client,
.create_server = nfs4_create_server,
- .clone_server = nfs_clone_server,
+ .clone_server = nfs4_clone_server,
+ .discover_trunking = nfs4_discover_trunking,
+ .enable_swap = nfs4_enable_swap,
+ .disable_swap = nfs4_disable_swap,
};
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
@@ -10432,6 +11064,22 @@ static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
.set = nfs4_xattr_set_nfs4_acl,
};
+#if defined(CONFIG_NFS_V4_1)
+static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = {
+ .name = XATTR_NAME_NFSV4_DACL,
+ .list = nfs4_xattr_list_nfs4_dacl,
+ .get = nfs4_xattr_get_nfs4_dacl,
+ .set = nfs4_xattr_set_nfs4_dacl,
+};
+
+static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = {
+ .name = XATTR_NAME_NFSV4_SACL,
+ .list = nfs4_xattr_list_nfs4_sacl,
+ .get = nfs4_xattr_get_nfs4_sacl,
+ .set = nfs4_xattr_set_nfs4_sacl,
+};
+#endif
+
#ifdef CONFIG_NFS_V4_2
static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
.prefix = XATTR_USER_PREFIX,
@@ -10440,8 +11088,12 @@ static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
};
#endif
-const struct xattr_handler *nfs4_xattr_handlers[] = {
+const struct xattr_handler * const nfs4_xattr_handlers[] = {
&nfs4_xattr_nfs4_acl_handler,
+#if defined(CONFIG_NFS_V4_1)
+ &nfs4_xattr_nfs4_dacl_handler,
+ &nfs4_xattr_nfs4_sacl_handler,
+#endif
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
&nfs4_xattr_nfs4_label_handler,
#endif
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index db3811af0796..18ae614e5a6c 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -122,7 +122,7 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
timeout = 5 * HZ;
dprintk("%s: requeueing work. Lease period = %ld\n",
__func__, (timeout + HZ - 1) / HZ);
- mod_delayed_work(system_wq, &clp->cl_renewd, timeout);
+ mod_delayed_work(system_percpu_wq, &clp->cl_renewd, timeout);
set_bit(NFS_CS_RENEWD, &clp->cl_res_state);
spin_unlock(&clp->cl_lock);
}
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index 351616c61df5..f9c291e2165c 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -148,16 +148,12 @@ static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst,
memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN);
}
-#ifdef CONFIG_CRC32
/*
* nfs_session_id_hash - calculate the crc32 hash for the session id
* @session - pointer to session
*/
#define nfs_session_id_hash(sess_id) \
(~crc32_le(0xFFFFFFFF, &(sess_id)->data[0], sizeof((sess_id)->data)))
-#else
-#define nfs_session_id_hash(session) (0)
-#endif
#else /* defined(CONFIG_NFS_V4_1) */
static inline int nfs4_init_session(struct nfs_client *clp)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index ecc4594299d6..01179f7de322 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -49,6 +49,7 @@
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <linux/sched/mm.h>
#include <linux/sunrpc/clnt.h>
@@ -66,6 +67,8 @@
#define OPENOWNER_POOL_SIZE 8
+static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp);
+
const nfs4_stateid zero_stateid = {
{ .data = { 0 } },
.type = NFS4_SPECIAL_STATEID_TYPE,
@@ -329,6 +332,8 @@ do_confirm:
status = nfs4_proc_create_session(clp, cred);
if (status != 0)
goto out;
+ if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R))
+ nfs4_state_start_reclaim_reboot(clp);
nfs41_finish_session_reset(clp);
nfs_mark_client_ready(clp, NFS_CS_READY);
out:
@@ -496,12 +501,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
sp = kzalloc(sizeof(*sp), gfp_flags);
if (!sp)
return NULL;
- sp->so_seqid.owner_id = ida_simple_get(&server->openowner_id, 0, 0,
- gfp_flags);
- if (sp->so_seqid.owner_id < 0) {
- kfree(sp);
- return NULL;
- }
+ sp->so_seqid.owner_id = atomic64_inc_return(&server->owner_ctr);
sp->so_server = server;
sp->so_cred = get_cred(cred);
spin_lock_init(&sp->so_lock);
@@ -509,7 +509,6 @@ nfs4_alloc_state_owner(struct nfs_server *server,
nfs4_init_seqid_counter(&sp->so_seqid);
atomic_set(&sp->so_count, 1);
INIT_LIST_HEAD(&sp->so_lru);
- seqcount_spinlock_init(&sp->so_reclaim_seqcount, &sp->so_lock);
mutex_init(&sp->so_delegreturn_mutex);
return sp;
}
@@ -533,7 +532,6 @@ static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
{
nfs4_destroy_seqid_counter(&sp->so_seqid);
put_cred(sp->so_cred);
- ida_simple_remove(&sp->so_server->openowner_id, sp->so_seqid.owner_id);
kfree(sp);
}
@@ -666,7 +664,7 @@ nfs4_alloc_open_state(void)
{
struct nfs4_state *state;
- state = kzalloc(sizeof(*state), GFP_NOFS);
+ state = kzalloc(sizeof(*state), GFP_KERNEL_ACCOUNT);
if (!state)
return NULL;
refcount_set(&state->count, 1);
@@ -820,7 +818,7 @@ static void __nfs4_close(struct nfs4_state *state,
void nfs4_close_state(struct nfs4_state *state, fmode_t fmode)
{
- __nfs4_close(state, fmode, GFP_NOFS, 0);
+ __nfs4_close(state, fmode, GFP_KERNEL, 0);
}
void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
@@ -843,15 +841,15 @@ void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
*/
static struct nfs4_lock_state *
__nfs4_find_lock_state(struct nfs4_state *state,
- fl_owner_t fl_owner, fl_owner_t fl_owner2)
+ fl_owner_t owner, fl_owner_t owner2)
{
struct nfs4_lock_state *pos, *ret = NULL;
list_for_each_entry(pos, &state->lock_states, ls_locks) {
- if (pos->ls_owner == fl_owner) {
+ if (pos->ls_owner == owner) {
ret = pos;
break;
}
- if (pos->ls_owner == fl_owner2)
+ if (pos->ls_owner == owner2)
ret = pos;
}
if (ret)
@@ -864,31 +862,25 @@ __nfs4_find_lock_state(struct nfs4_state *state,
* exists, return an uninitialized one.
*
*/
-static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
+static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t owner)
{
struct nfs4_lock_state *lsp;
struct nfs_server *server = state->owner->so_server;
- lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
+ lsp = kzalloc(sizeof(*lsp), GFP_KERNEL_ACCOUNT);
if (lsp == NULL)
return NULL;
nfs4_init_seqid_counter(&lsp->ls_seqid);
refcount_set(&lsp->ls_count, 1);
lsp->ls_state = state;
- lsp->ls_owner = fl_owner;
- lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS);
- if (lsp->ls_seqid.owner_id < 0)
- goto out_free;
+ lsp->ls_owner = owner;
+ lsp->ls_seqid.owner_id = atomic64_inc_return(&server->owner_ctr);
INIT_LIST_HEAD(&lsp->ls_locks);
return lsp;
-out_free:
- kfree(lsp);
- return NULL;
}
void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
{
- ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id);
nfs4_destroy_seqid_counter(&lsp->ls_seqid);
kfree(lsp);
}
@@ -976,7 +968,7 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
if (fl->fl_ops != NULL)
return 0;
- lsp = nfs4_get_lock_state(state, fl->fl_owner);
+ lsp = nfs4_get_lock_state(state, fl->c.flc_owner);
if (lsp == NULL)
return -ENOMEM;
fl->fl_u.nfs4_fl.owner = lsp;
@@ -989,7 +981,7 @@ static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
const struct nfs_lock_context *l_ctx)
{
struct nfs4_lock_state *lsp;
- fl_owner_t fl_owner, fl_flock_owner;
+ fl_owner_t owner, fl_flock_owner;
int ret = -ENOENT;
if (l_ctx == NULL)
@@ -998,11 +990,11 @@ static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
goto out;
- fl_owner = l_ctx->lockowner;
+ owner = l_ctx->lockowner;
fl_flock_owner = l_ctx->open_context->flock_owner;
spin_lock(&state->state_lock);
- lsp = __nfs4_find_lock_state(state, fl_owner, fl_flock_owner);
+ lsp = __nfs4_find_lock_state(state, owner, fl_flock_owner);
if (lsp && test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
ret = -EIO;
else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
@@ -1091,14 +1083,12 @@ void nfs_release_seqid(struct nfs_seqid *seqid)
return;
sequence = seqid->sequence;
spin_lock(&sequence->lock);
- list_del_init(&seqid->list);
- if (!list_empty(&sequence->list)) {
- struct nfs_seqid *next;
-
- next = list_first_entry(&sequence->list,
- struct nfs_seqid, list);
+ if (list_is_first(&seqid->list, &sequence->list) &&
+ !list_is_singular(&sequence->list)) {
+ struct nfs_seqid *next = list_next_entry(seqid, list);
rpc_wake_up_queued_task(&sequence->wait, next->task);
}
+ list_del_init(&seqid->list);
spin_unlock(&sequence->lock);
}
@@ -1205,10 +1195,26 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
{
struct task_struct *task;
char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
+ struct rpc_clnt *clnt = clp->cl_rpcclient;
+ bool swapon = false;
+
+ if (clp->cl_cons_state < 0)
+ return;
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
+
+ if (atomic_read(&clnt->cl_swapper)) {
+ swapon = !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE,
+ &clp->cl_state);
+ if (!swapon) {
+ wake_up_var(&clp->cl_state);
+ return;
+ }
+ }
+
if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
return;
+
__module_get(THIS_MODULE);
refcount_inc(&clp->cl_count);
@@ -1223,6 +1229,10 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
if (IS_ERR(task)) {
printk(KERN_ERR "%s: kthread_run: %ld\n",
__func__, PTR_ERR(task));
+ if (!nfs_client_init_is_complete(clp))
+ nfs_mark_client_ready(clp, PTR_ERR(task));
+ if (swapon)
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
nfs4_clear_state_manager_bit(clp);
nfs_put_client(clp);
module_put(THIS_MODULE);
@@ -1304,7 +1314,8 @@ int nfs4_wait_clnt_recover(struct nfs_client *clp)
refcount_inc(&clp->cl_count);
res = wait_on_bit_action(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
- nfs_wait_bit_killable, TASK_KILLABLE);
+ nfs_wait_bit_killable,
+ TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
if (res)
goto out;
if (clp->cl_cons_state < 0)
@@ -1392,7 +1403,7 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_
dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
clp->cl_hostname);
nfs4_schedule_state_manager(clp);
- return 0;
+ return clp->cl_cons_state < 0 ? clp->cl_cons_state : 0;
}
EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
@@ -1492,7 +1503,7 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
struct file_lock *fl;
struct nfs4_lock_state *lsp;
int status = 0;
- struct file_lock_context *flctx = inode->i_flctx;
+ struct file_lock_context *flctx = locks_inode_context(inode);
struct list_head *list;
if (flctx == NULL)
@@ -1504,8 +1515,8 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
down_write(&nfsi->rwsem);
spin_lock(&flctx->flc_lock);
restart:
- list_for_each_entry(fl, list, fl_list) {
- if (nfs_file_open_context(fl->fl_file)->state != state)
+ for_each_file_lock(fl, list) {
+ if (nfs_file_open_context(fl->c.flc_file)->state != state)
continue;
spin_unlock(&flctx->flc_lock);
status = ops->recover_lock(state, fl);
@@ -1572,7 +1583,7 @@ static void nfs42_complete_copies(struct nfs4_state_owner *sp, struct nfs4_state
complete(&copy->completion);
}
}
- list_for_each_entry(copy, &sp->so_server->ss_copies, src_copies) {
+ list_for_each_entry(copy, &sp->so_server->ss_src_copies, src_copies) {
if ((test_bit(NFS_CLNT_SRC_SSC_COPY_STATE, &state->flags) &&
!nfs4_stateid_match_other(&state->stateid,
&copy->parent_src_state->stateid)))
@@ -1592,7 +1603,8 @@ static inline void nfs42_complete_copies(struct nfs4_state_owner *sp,
#endif /* CONFIG_NFS_V4_2 */
static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_state *state,
- const struct nfs4_state_recovery_ops *ops)
+ const struct nfs4_state_recovery_ops *ops,
+ int *lost_locks)
{
struct nfs4_lock_state *lock;
int status;
@@ -1609,8 +1621,9 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st
spin_lock(&state->state_lock);
list_for_each_entry(lock, &state->lock_states, ls_locks) {
trace_nfs4_state_lock_reclaim(state, lock);
- if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
- pr_warn_ratelimited("NFS: %s: Lock reclaim failed!\n", __func__);
+ if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags) &&
+ !test_bit(NFS_LOCK_UNLOCKING, &lock->ls_flags))
+ *lost_locks += 1;
}
spin_unlock(&state->state_lock);
}
@@ -1620,7 +1633,9 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st
return status;
}
-static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
+static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp,
+ const struct nfs4_state_recovery_ops *ops,
+ int *lost_locks)
{
struct nfs4_state *state;
unsigned int loop = 0;
@@ -1638,7 +1653,6 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
* server that doesn't support a grace period.
*/
spin_lock(&sp->so_lock);
- raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
restart:
list_for_each_entry(state, &sp->so_states, open_states) {
if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
@@ -1656,7 +1670,7 @@ restart:
#endif /* CONFIG_NFS_V4_2 */
refcount_inc(&state->count);
spin_unlock(&sp->so_lock);
- status = __nfs4_reclaim_open_state(sp, state, ops);
+ status = __nfs4_reclaim_open_state(sp, state, ops, lost_locks);
switch (status) {
default:
@@ -1706,7 +1720,6 @@ restart:
spin_lock(&sp->so_lock);
goto restart;
}
- raw_write_seqcount_end(&sp->so_reclaim_seqcount);
spin_unlock(&sp->so_lock);
#ifdef CONFIG_NFS_V4_2
if (found_ssc_copy_state)
@@ -1716,7 +1729,6 @@ restart:
out_err:
nfs4_put_open_state(state);
spin_lock(&sp->so_lock);
- raw_write_seqcount_end(&sp->so_reclaim_seqcount);
spin_unlock(&sp->so_lock);
return status;
}
@@ -1774,6 +1786,7 @@ static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
{
+ set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
/* Mark all delegations for reclaim */
nfs_delegation_mark_reclaim(clp);
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
@@ -1837,6 +1850,7 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
if (!nfs4_state_clear_reclaim_reboot(clp))
return;
+ pnfs_destroy_all_layouts(clp);
ops = clp->cl_mvops->reboot_recovery_ops;
cred = nfs4_get_clid_cred(clp);
err = nfs4_reclaim_complete(clp, ops, cred);
@@ -1898,8 +1912,12 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
struct nfs_server *server;
struct rb_node *pos;
LIST_HEAD(freeme);
- int status = 0;
+ int lost_locks = 0;
+ int status;
+ status = nfs4_begin_drain_session(clp);
+ if (status < 0)
+ return status;
restart:
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
@@ -1918,11 +1936,15 @@ restart:
spin_unlock(&clp->cl_lock);
rcu_read_unlock();
- status = nfs4_reclaim_open_state(sp, ops);
+ status = nfs4_reclaim_open_state(sp, ops, &lost_locks);
if (status < 0) {
+ if (lost_locks)
+ pr_warn("NFS: %s: lost %d locks\n",
+ clp->cl_hostname, lost_locks);
set_bit(ops->owner_flag_bit, &sp->so_flags);
nfs4_put_state_owner(sp);
status = nfs4_recovery_handle_error(clp, status);
+ nfs4_free_state_owners(&freeme);
return (status != 0) ? status : -EAGAIN;
}
@@ -1933,6 +1955,10 @@ restart:
}
rcu_read_unlock();
nfs4_free_state_owners(&freeme);
+ nfs_local_probe_async(clp);
+ if (lost_locks)
+ pr_warn("NFS: %s: lost %d locks\n",
+ clp->cl_hostname, lost_locks);
return 0;
}
@@ -1986,6 +2012,12 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
nfs_mark_client_ready(clp, -EPERM);
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
return -EPERM;
+ case -ETIMEDOUT:
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
+ nfs_mark_client_ready(clp, -EIO);
+ return -EIO;
+ }
+ fallthrough;
case -EACCES:
case -NFS4ERR_DELAY:
case -EAGAIN:
@@ -1998,6 +2030,10 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
dprintk("%s: exit with error %d for server %s\n",
__func__, -EPROTONOSUPPORT, clp->cl_hostname);
return -EPROTONOSUPPORT;
+ case -ENOSPC:
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
+ nfs_mark_client_ready(clp, -EIO);
+ return -EIO;
case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
* in nfs4_exchange_id */
default:
@@ -2028,7 +2064,6 @@ static int nfs4_establish_lease(struct nfs_client *clp)
put_cred(cred);
if (status != 0)
return status;
- pnfs_destroy_all_layouts(clp);
return 0;
}
@@ -2076,6 +2111,7 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_fs_locations *locations = NULL;
+ struct nfs_fattr *fattr;
struct inode *inode;
struct page *page;
int status, result;
@@ -2085,16 +2121,19 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
(unsigned long long)server->fsid.minor,
clp->cl_hostname);
- result = 0;
page = alloc_page(GFP_KERNEL);
locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
- if (page == NULL || locations == NULL) {
+ fattr = nfs_alloc_fattr();
+ if (page == NULL || locations == NULL || fattr == NULL) {
dprintk("<-- %s: no memory\n", __func__);
+ result = 0;
goto out;
}
+ locations->fattr = fattr;
inode = d_inode(server->super->s_root);
- result = nfs4_proc_get_locations(inode, locations, page, cred);
+ result = nfs4_proc_get_locations(server, NFS_FH(inode), locations,
+ page, cred);
if (result) {
dprintk("<-- %s: failed to retrieve fs_locations: %d\n",
__func__, result);
@@ -2102,7 +2141,10 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
}
result = -NFS4ERR_NXIO;
- if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) {
+ if (!locations->nlocations)
+ goto out;
+
+ if (!(locations->fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)) {
dprintk("<-- %s: No fs_locations data, migration skipped\n",
__func__);
goto out;
@@ -2127,6 +2169,8 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
out:
if (page != NULL)
__free_page(page);
+ if (locations != NULL)
+ kfree(locations->fattr);
kfree(locations);
if (result) {
pr_err("NFS: migration recovery failed (server %s)\n",
@@ -2552,9 +2596,17 @@ static void nfs4_layoutreturn_any_run(struct nfs_client *clp)
static void nfs4_state_manager(struct nfs_client *clp)
{
+ unsigned int memflags;
int status = 0;
const char *section = "", *section_sep = "";
+ /*
+ * State recovery can deadlock if the direct reclaim code tries
+ * start NFS writeback. So ensure memory allocations are all
+ * GFP_NOFS.
+ */
+ memflags = memalloc_nofs_save();
+
/* Ensure exclusive access to NFSv4 state */
do {
trace_nfs4_state_mgr(clp);
@@ -2623,16 +2675,22 @@ static void nfs4_state_manager(struct nfs_client *clp)
section = "reclaim reboot";
status = nfs4_do_reclaim(clp,
clp->cl_mvops->reboot_recovery_ops);
+ if (status == 0)
+ status = pnfs_layout_handle_reboot(clp);
if (status == -EAGAIN)
continue;
if (status < 0)
goto out_error;
nfs4_state_end_reclaim_reboot(clp);
+ continue;
}
/* Detect expired delegations... */
if (test_and_clear_bit(NFS4CLNT_DELEGATION_EXPIRED, &clp->cl_state)) {
section = "detect expired delegations";
+ status = nfs4_begin_drain_session(clp);
+ if (status < 0)
+ goto out_error;
nfs_reap_expired_delegations(clp);
continue;
}
@@ -2649,9 +2707,17 @@ static void nfs4_state_manager(struct nfs_client *clp)
clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
}
+ memalloc_nofs_restore(memflags);
nfs4_end_drain_session(clp);
nfs4_clear_state_manager_bit(clp);
+ if (test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
+ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING,
+ &clp->cl_state)) {
+ memflags = memalloc_nofs_save();
+ continue;
+ }
+
if (!test_and_set_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state)) {
if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
nfs_client_return_marked_delegations(clp);
@@ -2661,11 +2727,8 @@ static void nfs4_state_manager(struct nfs_client *clp)
clear_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state);
}
- /* Did we race with an attempt to give us more work? */
- if (!test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
- return;
- if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
- return;
+ return;
+
} while (refcount_read(&clp->cl_count) > 1 && !signalled());
goto out_drain;
@@ -2676,8 +2739,20 @@ out_error:
pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
" with error %d\n", section_sep, section,
clp->cl_hostname, -status);
- ssleep(1);
+ switch (status) {
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ nfs_mark_client_ready(clp, -EIO);
+ break;
+ case -EINVAL:
+ nfs_mark_client_ready(clp, status);
+ break;
+ default:
+ ssleep(1);
+ break;
+ }
out_drain:
+ memalloc_nofs_restore(memflags);
nfs4_end_drain_session(clp);
nfs4_clear_state_manager_bit(clp);
}
@@ -2685,10 +2760,35 @@ out_drain:
static int nfs4_run_state_manager(void *ptr)
{
struct nfs_client *clp = ptr;
+ struct rpc_clnt *cl = clp->cl_rpcclient;
+
+ while (cl != cl->cl_parent)
+ cl = cl->cl_parent;
allow_signal(SIGKILL);
+again:
nfs4_state_manager(clp);
+
+ if (test_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) &&
+ !test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state)) {
+ wait_var_event_interruptible(&clp->cl_state,
+ test_bit(NFS4CLNT_RUN_MANAGER,
+ &clp->cl_state));
+ if (!atomic_read(&cl->cl_swapper))
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
+ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
+ goto again;
+ /* Either no longer a swapper, or were signalled */
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ }
+
+ if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
+ test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
+ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
+ goto again;
+
nfs_put_client(clp);
- module_put_and_exit(0);
+ module_put_and_kthread_exit(0);
return 0;
}
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index d09bcfd7db89..5ec9c83f1ef0 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -145,18 +145,13 @@ static int do_nfs4_mount(struct nfs_server *server,
const char *export_path)
{
struct nfs_fs_context *root_ctx;
+ struct nfs_fs_context *ctx;
struct fs_context *root_fc;
struct vfsmount *root_mnt;
struct dentry *dentry;
- size_t len;
+ char *source;
int ret;
- struct fs_parameter param = {
- .key = "source",
- .type = fs_value_is_string,
- .dirfd = -1,
- };
-
if (IS_ERR(server))
return PTR_ERR(server);
@@ -168,25 +163,32 @@ static int do_nfs4_mount(struct nfs_server *server,
kfree(root_fc->source);
root_fc->source = NULL;
+ ctx = nfs_fc2context(fc);
root_ctx = nfs_fc2context(root_fc);
root_ctx->internal = true;
root_ctx->server = server;
- /* We leave export_path unset as it's not used to find the root. */
- len = strlen(hostname) + 5;
- param.string = kmalloc(len, GFP_KERNEL);
- if (param.string == NULL) {
- put_fs_context(root_fc);
- return -ENOMEM;
+ if (ctx->fscache_uniq) {
+ ret = vfs_parse_fs_string(root_fc, "fsc", ctx->fscache_uniq);
+ if (ret < 0) {
+ put_fs_context(root_fc);
+ return ret;
+ }
}
+ /* We leave export_path unset as it's not used to find the root. */
/* Does hostname needs to be enclosed in brackets? */
if (strchr(hostname, ':'))
- param.size = snprintf(param.string, len, "[%s]:/", hostname);
+ source = kasprintf(GFP_KERNEL, "[%s]:/", hostname);
else
- param.size = snprintf(param.string, len, "%s:/", hostname);
- ret = vfs_parse_fs_param(root_fc, &param);
- kfree(param.string);
+ source = kasprintf(GFP_KERNEL, "%s:/", hostname);
+
+ if (!source) {
+ put_fs_context(root_fc);
+ return -ENOMEM;
+ }
+ ret = vfs_parse_fs_string(root_fc, "source", source);
+ kfree(source);
if (ret < 0) {
put_fs_context(root_fc);
return ret;
@@ -308,6 +310,7 @@ static void __exit exit_nfs_v4(void)
nfs_dns_resolver_destroy();
}
+MODULE_DESCRIPTION("NFSv4 client support");
MODULE_LICENSE("GPL");
module_init(init_nfs_v4);
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
index c394e4447100..d1a92d8f8ba4 100644
--- a/fs/nfs/nfs4sysctl.c
+++ b/fs/nfs/nfs4sysctl.c
@@ -17,7 +17,7 @@ static const int nfs_set_port_min;
static const int nfs_set_port_max = 65535;
static struct ctl_table_header *nfs4_callback_sysctl_table;
-static struct ctl_table nfs4_cb_sysctls[] = {
+static const struct ctl_table nfs4_cb_sysctls[] = {
{
.procname = "nfs_callback_tcpport",
.data = &nfs_callback_set_tcpport,
@@ -34,30 +34,12 @@ static struct ctl_table nfs4_cb_sysctls[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { }
-};
-
-static struct ctl_table nfs4_cb_sysctl_dir[] = {
- {
- .procname = "nfs",
- .mode = 0555,
- .child = nfs4_cb_sysctls,
- },
- { }
-};
-
-static struct ctl_table nfs4_cb_sysctl_root[] = {
- {
- .procname = "fs",
- .mode = 0555,
- .child = nfs4_cb_sysctl_dir,
- },
- { }
};
int nfs4_register_sysctl(void)
{
- nfs4_callback_sysctl_table = register_sysctl_table(nfs4_cb_sysctl_root);
+ nfs4_callback_sysctl_table = register_sysctl("fs/nfs",
+ nfs4_cb_sysctls);
if (nfs4_callback_sysctl_table == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/nfs/nfs4trace.c b/fs/nfs/nfs4trace.c
index d9ac556bebcf..987c92d6364b 100644
--- a/fs/nfs/nfs4trace.c
+++ b/fs/nfs/nfs4trace.c
@@ -2,6 +2,8 @@
/*
* Copyright (c) 2013 Trond Myklebust <Trond.Myklebust@netapp.com>
*/
+#include <uapi/linux/pr.h>
+#include <linux/blkdev.h>
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
#include "internal.h"
@@ -24,8 +26,17 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_read_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_write_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_read_pagelist);
EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_write_pagelist);
+EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_ds_connect);
EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_read_error);
EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_write_error);
EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_commit_error);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bl_ext_tree_prepare_commit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bl_pr_key_reg);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bl_pr_key_reg_err);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bl_pr_key_unreg);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bl_pr_key_unreg_err);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(fl_getdevinfo);
#endif
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 6ee6ad3674a2..9776d220cec3 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -9,10 +9,12 @@
#define _TRACE_NFS4_H
#include <linux/tracepoint.h>
-#include <trace/events/sunrpc_base.h>
+#include <trace/misc/sunrpc.h>
-#include <trace/events/fs.h>
-#include <trace/events/nfs.h>
+#include <trace/misc/fs.h>
+#include <trace/misc/nfs.h>
+
+#include "delegation.h"
#define show_nfs_fattr_flags(valid) \
__print_flags((unsigned long)valid, "|", \
@@ -30,7 +32,8 @@
{ NFS_ATTR_FATTR_CTIME, "CTIME" }, \
{ NFS_ATTR_FATTR_CHANGE, "CHANGE" }, \
{ NFS_ATTR_FATTR_OWNER_NAME, "OWNER_NAME" }, \
- { NFS_ATTR_FATTR_GROUP_NAME, "GROUP_NAME" })
+ { NFS_ATTR_FATTR_GROUP_NAME, "GROUP_NAME" }, \
+ { NFS_ATTR_FATTR_BTIME, "BTIME" })
DECLARE_EVENT_CLASS(nfs4_clientid_event,
TP_PROTO(
@@ -47,7 +50,7 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
TP_fast_assign(
__entry->error = error < 0 ? -error : 0;
- __assign_str(dstaddr, clp->cl_hostname);
+ __assign_str(dstaddr);
),
TP_printk(
@@ -77,6 +80,36 @@ DEFINE_NFS4_CLIENTID_EVENT(nfs4_bind_conn_to_session);
DEFINE_NFS4_CLIENTID_EVENT(nfs4_sequence);
DEFINE_NFS4_CLIENTID_EVENT(nfs4_reclaim_complete);
+TRACE_EVENT(nfs4_trunked_exchange_id,
+ TP_PROTO(
+ const struct nfs_client *clp,
+ const char *addr,
+ int error
+ ),
+
+ TP_ARGS(clp, addr, error),
+
+ TP_STRUCT__entry(
+ __string(main_addr, clp->cl_hostname)
+ __string(trunk_addr, addr)
+ __field(unsigned long, error)
+ ),
+
+ TP_fast_assign(
+ __entry->error = error < 0 ? -error : 0;
+ __assign_str(main_addr);
+ __assign_str(trunk_addr);
+ ),
+
+ TP_printk(
+ "error=%ld (%s) main_addr=%s trunk_addr=%s",
+ -__entry->error,
+ show_nfs4_status(__entry->error),
+ __get_str(main_addr),
+ __get_str(trunk_addr)
+ )
+);
+
TRACE_EVENT(nfs4_sequence_done,
TP_PROTO(
const struct nfs4_session *session,
@@ -243,6 +276,32 @@ TRACE_EVENT(nfs4_cb_offload,
show_nfs_stable_how(__entry->cb_how)
)
);
+
+TRACE_EVENT(pnfs_ds_connect,
+ TP_PROTO(
+ char *ds_remotestr,
+ int status
+ ),
+
+ TP_ARGS(ds_remotestr, status),
+
+ TP_STRUCT__entry(
+ __string(ds_ips, ds_remotestr)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(ds_ips);
+ __entry->status = status;
+ ),
+
+ TP_printk(
+ "ds_ips=%s, status=%d",
+ __get_str(ds_ips),
+ __entry->status
+ )
+);
+
#endif /* CONFIG_NFS_V4_1 */
TRACE_EVENT(nfs4_setup_sequence,
@@ -292,32 +351,34 @@ TRACE_DEFINE_ENUM(NFS4CLNT_MOVED);
TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_MOVED);
TRACE_DEFINE_ENUM(NFS4CLNT_DELEGATION_EXPIRED);
TRACE_DEFINE_ENUM(NFS4CLNT_RUN_MANAGER);
+TRACE_DEFINE_ENUM(NFS4CLNT_MANAGER_AVAILABLE);
TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_RUNNING);
TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_READ);
TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_RW);
+TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN_DELAYED);
#define show_nfs4_clp_state(state) \
__print_flags(state, "|", \
- { NFS4CLNT_MANAGER_RUNNING, "MANAGER_RUNNING" }, \
- { NFS4CLNT_CHECK_LEASE, "CHECK_LEASE" }, \
- { NFS4CLNT_LEASE_EXPIRED, "LEASE_EXPIRED" }, \
- { NFS4CLNT_RECLAIM_REBOOT, "RECLAIM_REBOOT" }, \
- { NFS4CLNT_RECLAIM_NOGRACE, "RECLAIM_NOGRACE" }, \
- { NFS4CLNT_DELEGRETURN, "DELEGRETURN" }, \
- { NFS4CLNT_SESSION_RESET, "SESSION_RESET" }, \
- { NFS4CLNT_LEASE_CONFIRM, "LEASE_CONFIRM" }, \
- { NFS4CLNT_SERVER_SCOPE_MISMATCH, \
- "SERVER_SCOPE_MISMATCH" }, \
- { NFS4CLNT_PURGE_STATE, "PURGE_STATE" }, \
- { NFS4CLNT_BIND_CONN_TO_SESSION, \
- "BIND_CONN_TO_SESSION" }, \
- { NFS4CLNT_MOVED, "MOVED" }, \
- { NFS4CLNT_LEASE_MOVED, "LEASE_MOVED" }, \
- { NFS4CLNT_DELEGATION_EXPIRED, "DELEGATION_EXPIRED" }, \
- { NFS4CLNT_RUN_MANAGER, "RUN_MANAGER" }, \
- { NFS4CLNT_RECALL_RUNNING, "RECALL_RUNNING" }, \
- { NFS4CLNT_RECALL_ANY_LAYOUT_READ, "RECALL_ANY_LAYOUT_READ" }, \
- { NFS4CLNT_RECALL_ANY_LAYOUT_RW, "RECALL_ANY_LAYOUT_RW" })
+ { BIT(NFS4CLNT_MANAGER_RUNNING), "MANAGER_RUNNING" }, \
+ { BIT(NFS4CLNT_CHECK_LEASE), "CHECK_LEASE" }, \
+ { BIT(NFS4CLNT_LEASE_EXPIRED), "LEASE_EXPIRED" }, \
+ { BIT(NFS4CLNT_RECLAIM_REBOOT), "RECLAIM_REBOOT" }, \
+ { BIT(NFS4CLNT_RECLAIM_NOGRACE), "RECLAIM_NOGRACE" }, \
+ { BIT(NFS4CLNT_DELEGRETURN), "DELEGRETURN" }, \
+ { BIT(NFS4CLNT_SESSION_RESET), "SESSION_RESET" }, \
+ { BIT(NFS4CLNT_LEASE_CONFIRM), "LEASE_CONFIRM" }, \
+ { BIT(NFS4CLNT_SERVER_SCOPE_MISMATCH), "SERVER_SCOPE_MISMATCH" }, \
+ { BIT(NFS4CLNT_PURGE_STATE), "PURGE_STATE" }, \
+ { BIT(NFS4CLNT_BIND_CONN_TO_SESSION), "BIND_CONN_TO_SESSION" }, \
+ { BIT(NFS4CLNT_MOVED), "MOVED" }, \
+ { BIT(NFS4CLNT_LEASE_MOVED), "LEASE_MOVED" }, \
+ { BIT(NFS4CLNT_DELEGATION_EXPIRED), "DELEGATION_EXPIRED" }, \
+ { BIT(NFS4CLNT_RUN_MANAGER), "RUN_MANAGER" }, \
+ { BIT(NFS4CLNT_MANAGER_AVAILABLE), "MANAGER_AVAILABLE" }, \
+ { BIT(NFS4CLNT_RECALL_RUNNING), "RECALL_RUNNING" }, \
+ { BIT(NFS4CLNT_RECALL_ANY_LAYOUT_READ), "RECALL_ANY_LAYOUT_READ" }, \
+ { BIT(NFS4CLNT_RECALL_ANY_LAYOUT_RW), "RECALL_ANY_LAYOUT_RW" }, \
+ { BIT(NFS4CLNT_DELEGRETURN_DELAYED), "DELERETURN_DELAYED" })
TRACE_EVENT(nfs4_state_mgr,
TP_PROTO(
@@ -333,7 +394,7 @@ TRACE_EVENT(nfs4_state_mgr,
TP_fast_assign(
__entry->state = clp->cl_state;
- __assign_str(hostname, clp->cl_hostname);
+ __assign_str(hostname);
),
TP_printk(
@@ -361,8 +422,8 @@ TRACE_EVENT(nfs4_state_mgr_failed,
TP_fast_assign(
__entry->error = status < 0 ? -status : 0;
__entry->state = clp->cl_state;
- __assign_str(hostname, clp->cl_hostname);
- __assign_str(section, section);
+ __assign_str(hostname);
+ __assign_str(section);
),
TP_printk(
@@ -546,7 +607,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
__entry->fhandle = 0;
}
__entry->dir = NFS_FILEID(d_inode(ctx->dentry->d_parent));
- __assign_str(name, ctx->dentry->d_name.name);
+ __assign_str(name);
),
TP_printk(
@@ -697,7 +758,7 @@ DECLARE_EVENT_CLASS(nfs4_lock_event,
__entry->error = error < 0 ? -error : 0;
__entry->cmd = cmd;
- __entry->type = request->fl_type;
+ __entry->type = request->c.flc_type;
__entry->start = request->fl_start;
__entry->end = request->fl_end;
__entry->dev = inode->i_sb->s_dev;
@@ -769,7 +830,7 @@ TRACE_EVENT(nfs4_set_lock,
__entry->error = error < 0 ? -error : 0;
__entry->cmd = cmd;
- __entry->type = request->fl_type;
+ __entry->type = request->c.flc_type;
__entry->start = request->fl_start;
__entry->end = request->fl_end;
__entry->dev = inode->i_sb->s_dev;
@@ -924,6 +985,52 @@ DECLARE_EVENT_CLASS(nfs4_set_delegation_event,
TP_ARGS(inode, fmode))
DEFINE_NFS4_SET_DELEGATION_EVENT(nfs4_set_delegation);
DEFINE_NFS4_SET_DELEGATION_EVENT(nfs4_reclaim_delegation);
+DEFINE_NFS4_SET_DELEGATION_EVENT(nfs4_detach_delegation);
+
+#define show_delegation_flags(flags) \
+ __print_flags(flags, "|", \
+ { BIT(NFS_DELEGATION_NEED_RECLAIM), "NEED_RECLAIM" }, \
+ { BIT(NFS_DELEGATION_RETURN), "RETURN" }, \
+ { BIT(NFS_DELEGATION_RETURN_IF_CLOSED), "RETURN_IF_CLOSED" }, \
+ { BIT(NFS_DELEGATION_REFERENCED), "REFERENCED" }, \
+ { BIT(NFS_DELEGATION_RETURNING), "RETURNING" }, \
+ { BIT(NFS_DELEGATION_REVOKED), "REVOKED" }, \
+ { BIT(NFS_DELEGATION_TEST_EXPIRED), "TEST_EXPIRED" }, \
+ { BIT(NFS_DELEGATION_INODE_FREEING), "INODE_FREEING" }, \
+ { BIT(NFS_DELEGATION_RETURN_DELAYED), "RETURN_DELAYED" })
+
+DECLARE_EVENT_CLASS(nfs4_delegation_event,
+ TP_PROTO(
+ const struct nfs_delegation *delegation
+ ),
+
+ TP_ARGS(delegation),
+
+ TP_STRUCT__entry(
+ __field(u32, fhandle)
+ __field(unsigned int, fmode)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->fhandle = nfs_fhandle_hash(NFS_FH(delegation->inode));
+ __entry->fmode = delegation->type;
+ __entry->flags = delegation->flags;
+ ),
+
+ TP_printk(
+ "fhandle=0x%08x fmode=%s flags=%s",
+ __entry->fhandle, show_fs_fmode_flags(__entry->fmode),
+ show_delegation_flags(__entry->flags)
+ )
+);
+#define DEFINE_NFS4_DELEGATION_EVENT(name) \
+ DEFINE_EVENT(nfs4_delegation_event, name, \
+ TP_PROTO( \
+ const struct nfs_delegation *delegation \
+ ), \
+ TP_ARGS(delegation))
+DEFINE_NFS4_DELEGATION_EVENT(nfs_delegation_need_return);
TRACE_EVENT(nfs4_delegreturn_exit,
TP_PROTO(
@@ -1040,7 +1147,7 @@ DECLARE_EVENT_CLASS(nfs4_lookup_event,
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
__entry->error = -error;
- __assign_str(name, name->name);
+ __assign_str(name);
),
TP_printk(
@@ -1124,8 +1231,8 @@ TRACE_EVENT(nfs4_rename,
__entry->olddir = NFS_FILEID(olddir);
__entry->newdir = NFS_FILEID(newdir);
__entry->error = error < 0 ? -error : 0;
- __assign_str(oldname, oldname->name);
- __assign_str(newname, newname->name);
+ __assign_str(oldname);
+ __assign_str(newname);
),
TP_printk(
@@ -1327,7 +1434,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
__entry->fileid = 0;
__entry->dev = 0;
}
- __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown");
+ __assign_str(dstaddr);
),
TP_printk(
@@ -1384,7 +1491,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
__entry->fileid = 0;
__entry->dev = 0;
}
- __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown");
+ __assign_str(dstaddr);
__entry->stateid_seq =
be32_to_cpu(stateid->seqid);
__entry->stateid_hash =
@@ -1417,6 +1524,63 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
DEFINE_NFS4_INODE_STATEID_CALLBACK_EVENT(nfs4_cb_recall);
DEFINE_NFS4_INODE_STATEID_CALLBACK_EVENT(nfs4_cb_layoutrecall_file);
+#define show_stateid_type(type) \
+ __print_symbolic(type, \
+ { NFS4_INVALID_STATEID_TYPE, "INVALID" }, \
+ { NFS4_SPECIAL_STATEID_TYPE, "SPECIAL" }, \
+ { NFS4_OPEN_STATEID_TYPE, "OPEN" }, \
+ { NFS4_LOCK_STATEID_TYPE, "LOCK" }, \
+ { NFS4_DELEGATION_STATEID_TYPE, "DELEGATION" }, \
+ { NFS4_LAYOUT_STATEID_TYPE, "LAYOUT" }, \
+ { NFS4_PNFS_DS_STATEID_TYPE, "PNFS_DS" }, \
+ { NFS4_REVOKED_STATEID_TYPE, "REVOKED" }, \
+ { NFS4_FREED_STATEID_TYPE, "FREED" })
+
+DECLARE_EVENT_CLASS(nfs4_match_stateid_event,
+ TP_PROTO(
+ const nfs4_stateid *s1,
+ const nfs4_stateid *s2
+ ),
+
+ TP_ARGS(s1, s2),
+
+ TP_STRUCT__entry(
+ __field(int, s1_seq)
+ __field(int, s2_seq)
+ __field(u32, s1_hash)
+ __field(u32, s2_hash)
+ __field(int, s1_type)
+ __field(int, s2_type)
+ ),
+
+ TP_fast_assign(
+ __entry->s1_seq = s1->seqid;
+ __entry->s1_hash = nfs_stateid_hash(s1);
+ __entry->s1_type = s1->type;
+ __entry->s2_seq = s2->seqid;
+ __entry->s2_hash = nfs_stateid_hash(s2);
+ __entry->s2_type = s2->type;
+ ),
+
+ TP_printk(
+ "s1=%s:%x:%u s2=%s:%x:%u",
+ show_stateid_type(__entry->s1_type),
+ __entry->s1_hash, __entry->s1_seq,
+ show_stateid_type(__entry->s2_type),
+ __entry->s2_hash, __entry->s2_seq
+ )
+);
+
+#define DEFINE_NFS4_MATCH_STATEID_EVENT(name) \
+ DEFINE_EVENT(nfs4_match_stateid_event, name, \
+ TP_PROTO( \
+ const nfs4_stateid *s1, \
+ const nfs4_stateid *s2 \
+ ), \
+ TP_ARGS(s1, s2))
+DEFINE_NFS4_MATCH_STATEID_EVENT(nfs41_match_stateid);
+DEFINE_NFS4_MATCH_STATEID_EVENT(nfs4_match_stateid);
+
DECLARE_EVENT_CLASS(nfs4_idmap_event,
TP_PROTO(
const char *name,
@@ -1815,7 +1979,7 @@ TRACE_EVENT(pnfs_update_layout,
__entry->count = count;
__entry->iomode = iomode;
__entry->reason = reason;
- if (lo != NULL) {
+ if (lo != NULL && pnfs_layout_is_valid(lo)) {
__entry->layoutstateid_seq =
be32_to_cpu(lo->plh_stateid.seqid);
__entry->layoutstateid_hash =
@@ -1869,7 +2033,7 @@ DECLARE_EVENT_CLASS(pnfs_layout_event,
__entry->pos = pos;
__entry->count = count;
__entry->iomode = iomode;
- if (lo != NULL) {
+ if (lo != NULL && pnfs_layout_is_valid(lo)) {
__entry->layoutstateid_seq =
be32_to_cpu(lo->plh_stateid.seqid);
__entry->layoutstateid_hash =
@@ -1928,7 +2092,7 @@ DECLARE_EVENT_CLASS(nfs4_deviceid_event,
),
TP_fast_assign(
- __assign_str(dstaddr, clp->cl_hostname);
+ __assign_str(dstaddr);
memcpy(__entry->deviceid, deviceid->data,
NFS4_DEVICEID4_SIZE);
),
@@ -1966,7 +2130,7 @@ DECLARE_EVENT_CLASS(nfs4_deviceid_status,
TP_fast_assign(
__entry->dev = server->s_dev;
__entry->status = status;
- __assign_str(dstaddr, server->nfs_client->cl_hostname);
+ __assign_str(dstaddr);
memcpy(__entry->deviceid, deviceid->data,
NFS4_DEVICEID4_SIZE);
),
@@ -1989,15 +2153,45 @@ DECLARE_EVENT_CLASS(nfs4_deviceid_status,
DEFINE_PNFS_DEVICEID_STATUS(nfs4_getdeviceinfo);
DEFINE_PNFS_DEVICEID_STATUS(nfs4_find_deviceid);
+TRACE_EVENT(fl_getdevinfo,
+ TP_PROTO(
+ const struct nfs_server *server,
+ const struct nfs4_deviceid *deviceid,
+ char *ds_remotestr
+ ),
+ TP_ARGS(server, deviceid, ds_remotestr),
+
+ TP_STRUCT__entry(
+ __string(mds_addr, server->nfs_client->cl_hostname)
+ __array(unsigned char, deviceid, NFS4_DEVICEID4_SIZE)
+ __string(ds_ips, ds_remotestr)
+ ),
+
+ TP_fast_assign(
+ __assign_str(mds_addr);
+ __assign_str(ds_ips);
+ memcpy(__entry->deviceid, deviceid->data,
+ NFS4_DEVICEID4_SIZE);
+ ),
+ TP_printk(
+ "deviceid=%s, mds_addr=%s, ds_ips=%s",
+ __print_hex(__entry->deviceid, NFS4_DEVICEID4_SIZE),
+ __get_str(mds_addr),
+ __get_str(ds_ips)
+ )
+);
+
DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event,
TP_PROTO(
- const struct nfs_pgio_header *hdr
+ const struct nfs_pgio_header *hdr,
+ int error
),
- TP_ARGS(hdr),
+ TP_ARGS(hdr, error),
TP_STRUCT__entry(
__field(unsigned long, error)
+ __field(unsigned long, nfs_error)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
@@ -2013,7 +2207,8 @@ DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event,
TP_fast_assign(
const struct inode *inode = hdr->inode;
- __entry->error = hdr->res.op_status;
+ __entry->error = -error;
+ __entry->nfs_error = hdr->res.op_status;
__entry->fhandle = nfs_fhandle_hash(hdr->args.fh);
__entry->fileid = NFS_FILEID(inode);
__entry->dev = inode->i_sb->s_dev;
@@ -2023,14 +2218,13 @@ DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event,
be32_to_cpu(hdr->args.stateid.seqid);
__entry->stateid_hash =
nfs_stateid_hash(&hdr->args.stateid);
- __assign_str(dstaddr, hdr->ds_clp ?
- rpc_peeraddr2str(hdr->ds_clp->cl_rpcclient,
- RPC_DISPLAY_ADDR) : "unknown");
+ __assign_str(dstaddr);
),
TP_printk(
"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%llu count=%u stateid=%d:0x%08x dstaddr=%s",
+ "offset=%llu count=%u stateid=%d:0x%08x dstaddr=%s "
+ "nfs_error=%lu (%s)",
-__entry->error,
show_nfs4_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -2038,28 +2232,32 @@ DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event,
__entry->fhandle,
__entry->offset, __entry->count,
__entry->stateid_seq, __entry->stateid_hash,
- __get_str(dstaddr)
+ __get_str(dstaddr), __entry->nfs_error,
+ show_nfs4_status(__entry->nfs_error)
)
);
#define DEFINE_NFS4_FLEXFILES_IO_EVENT(name) \
DEFINE_EVENT(nfs4_flexfiles_io_event, name, \
TP_PROTO( \
- const struct nfs_pgio_header *hdr \
+ const struct nfs_pgio_header *hdr, \
+ int error \
), \
- TP_ARGS(hdr))
+ TP_ARGS(hdr, error))
DEFINE_NFS4_FLEXFILES_IO_EVENT(ff_layout_read_error);
DEFINE_NFS4_FLEXFILES_IO_EVENT(ff_layout_write_error);
TRACE_EVENT(ff_layout_commit_error,
TP_PROTO(
- const struct nfs_commit_data *data
+ const struct nfs_commit_data *data,
+ int error
),
- TP_ARGS(data),
+ TP_ARGS(data, error),
TP_STRUCT__entry(
__field(unsigned long, error)
+ __field(unsigned long, nfs_error)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
@@ -2073,30 +2271,153 @@ TRACE_EVENT(ff_layout_commit_error,
TP_fast_assign(
const struct inode *inode = data->inode;
- __entry->error = data->res.op_status;
+ __entry->error = -error;
+ __entry->nfs_error = data->res.op_status;
__entry->fhandle = nfs_fhandle_hash(data->args.fh);
__entry->fileid = NFS_FILEID(inode);
__entry->dev = inode->i_sb->s_dev;
__entry->offset = data->args.offset;
__entry->count = data->args.count;
- __assign_str(dstaddr, data->ds_clp ?
- rpc_peeraddr2str(data->ds_clp->cl_rpcclient,
- RPC_DISPLAY_ADDR) : "unknown");
+ __assign_str(dstaddr);
),
TP_printk(
"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%llu count=%u dstaddr=%s",
+ "offset=%llu count=%u dstaddr=%s nfs_error=%lu (%s)",
-__entry->error,
show_nfs4_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
__entry->offset, __entry->count,
- __get_str(dstaddr)
+ __get_str(dstaddr), __entry->nfs_error,
+ show_nfs4_status(__entry->nfs_error)
+ )
+);
+
+TRACE_EVENT(bl_ext_tree_prepare_commit,
+ TP_PROTO(
+ int ret,
+ size_t count,
+ u64 lwb,
+ bool not_all_ranges
+ ),
+
+ TP_ARGS(ret, count, lwb, not_all_ranges),
+
+ TP_STRUCT__entry(
+ __field(int, ret)
+ __field(size_t, count)
+ __field(u64, lwb)
+ __field(bool, not_all_ranges)
+ ),
+
+ TP_fast_assign(
+ __entry->ret = ret;
+ __entry->count = count;
+ __entry->lwb = lwb;
+ __entry->not_all_ranges = not_all_ranges;
+ ),
+
+ TP_printk(
+ "ret=%d, found %zu ranges, lwb=%llu%s",
+ __entry->ret,
+ __entry->count,
+ __entry->lwb,
+ __entry->not_all_ranges ? ", not all ranges encoded" :
+ ""
)
);
+DECLARE_EVENT_CLASS(pnfs_bl_pr_key_class,
+ TP_PROTO(
+ const struct block_device *bdev,
+ u64 key
+ ),
+ TP_ARGS(bdev, key),
+ TP_STRUCT__entry(
+ __field(u64, key)
+ __field(dev_t, dev)
+ __string(device, bdev->bd_disk->disk_name)
+ ),
+ TP_fast_assign(
+ __entry->key = key;
+ __entry->dev = bdev->bd_dev;
+ __assign_str(device);
+ ),
+ TP_printk("dev=%d,%d (%s) key=0x%016llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __get_str(device), __entry->key
+ )
+);
+
+#define DEFINE_NFS4_BLOCK_PRKEY_EVENT(name) \
+ DEFINE_EVENT(pnfs_bl_pr_key_class, name, \
+ TP_PROTO( \
+ const struct block_device *bdev, \
+ u64 key \
+ ), \
+ TP_ARGS(bdev, key))
+DEFINE_NFS4_BLOCK_PRKEY_EVENT(bl_pr_key_reg);
+DEFINE_NFS4_BLOCK_PRKEY_EVENT(bl_pr_key_unreg);
+
+/*
+ * From uapi/linux/pr.h
+ */
+TRACE_DEFINE_ENUM(PR_STS_SUCCESS);
+TRACE_DEFINE_ENUM(PR_STS_IOERR);
+TRACE_DEFINE_ENUM(PR_STS_RESERVATION_CONFLICT);
+TRACE_DEFINE_ENUM(PR_STS_RETRY_PATH_FAILURE);
+TRACE_DEFINE_ENUM(PR_STS_PATH_FAST_FAILED);
+TRACE_DEFINE_ENUM(PR_STS_PATH_FAILED);
+
+#define show_pr_status(x) \
+ __print_symbolic(x, \
+ { PR_STS_SUCCESS, "SUCCESS" }, \
+ { PR_STS_IOERR, "IOERR" }, \
+ { PR_STS_RESERVATION_CONFLICT, "RESERVATION_CONFLICT" }, \
+ { PR_STS_RETRY_PATH_FAILURE, "RETRY_PATH_FAILURE" }, \
+ { PR_STS_PATH_FAST_FAILED, "PATH_FAST_FAILED" }, \
+ { PR_STS_PATH_FAILED, "PATH_FAILED" })
+
+DECLARE_EVENT_CLASS(pnfs_bl_pr_key_err_class,
+ TP_PROTO(
+ const struct block_device *bdev,
+ u64 key,
+ int status
+ ),
+ TP_ARGS(bdev, key, status),
+ TP_STRUCT__entry(
+ __field(u64, key)
+ __field(dev_t, dev)
+ __field(unsigned long, status)
+ __string(device, bdev->bd_disk->disk_name)
+ ),
+ TP_fast_assign(
+ __entry->key = key;
+ __entry->dev = bdev->bd_dev;
+ __entry->status = status;
+ __assign_str(device);
+ ),
+ TP_printk("dev=%d,%d (%s) key=0x%016llx status=%s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __get_str(device), __entry->key,
+ show_pr_status(__entry->status)
+ )
+);
+
+#define DEFINE_NFS4_BLOCK_PRKEY_ERR_EVENT(name) \
+ DEFINE_EVENT(pnfs_bl_pr_key_err_class, name, \
+ TP_PROTO( \
+ const struct block_device *bdev, \
+ u64 key, \
+ int status \
+ ), \
+ TP_ARGS(bdev, key, status))
+DEFINE_NFS4_BLOCK_PRKEY_ERR_EVENT(bl_pr_key_reg_err);
+DEFINE_NFS4_BLOCK_PRKEY_ERR_EVENT(bl_pr_key_unreg_err);
+
+#ifdef CONFIG_NFS_V4_2
TRACE_DEFINE_ENUM(NFS4_CONTENT_DATA);
TRACE_DEFINE_ENUM(NFS4_CONTENT_HOLE);
@@ -2105,7 +2426,6 @@ TRACE_DEFINE_ENUM(NFS4_CONTENT_HOLE);
{ NFS4_CONTENT_DATA, "DATA" }, \
{ NFS4_CONTENT_HOLE, "HOLE" })
-#ifdef CONFIG_NFS_V4_2
TRACE_EVENT(nfs4_llseek,
TP_PROTO(
const struct inode *inode,
@@ -2464,7 +2784,7 @@ TRACE_EVENT(nfs4_copy_notify,
)
);
-TRACE_EVENT(nfs4_offload_cancel,
+DECLARE_EVENT_CLASS(nfs4_offload_class,
TP_PROTO(
const struct nfs42_offload_status_args *args,
int error
@@ -2496,6 +2816,63 @@ TRACE_EVENT(nfs4_offload_cancel,
__entry->stateid_seq, __entry->stateid_hash
)
);
+#define DEFINE_NFS4_OFFLOAD_EVENT(name) \
+ DEFINE_EVENT(nfs4_offload_class, name, \
+ TP_PROTO( \
+ const struct nfs42_offload_status_args *args, \
+ int error \
+ ), \
+ TP_ARGS(args, error))
+DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_cancel);
+DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_status);
+
+DECLARE_EVENT_CLASS(nfs4_xattr_event,
+ TP_PROTO(
+ const struct inode *inode,
+ const char *name,
+ int error
+ ),
+
+ TP_ARGS(inode, name, error),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, error)
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __string(name, name)
+ ),
+
+ TP_fast_assign(
+ __entry->error = error < 0 ? -error : 0;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = NFS_FILEID(inode);
+ __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
+ __assign_str(name);
+ ),
+
+ TP_printk(
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "name=%s",
+ -__entry->error, show_nfs4_status(__entry->error),
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle, __get_str(name)
+ )
+);
+#define DEFINE_NFS4_XATTR_EVENT(name) \
+ DEFINE_EVENT(nfs4_xattr_event, name, \
+ TP_PROTO( \
+ const struct inode *inode, \
+ const char *name, \
+ int error \
+ ), \
+ TP_ARGS(inode, name, error))
+DEFINE_NFS4_XATTR_EVENT(nfs4_getxattr);
+DEFINE_NFS4_XATTR_EVENT(nfs4_setxattr);
+DEFINE_NFS4_XATTR_EVENT(nfs4_removexattr);
+
+DEFINE_NFS4_INODE_EVENT(nfs4_listxattr);
#endif /* CONFIG_NFS_V4_2 */
#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 69862bf6db00..1d0e6c10f921 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -52,6 +52,7 @@
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
+#include <linux/nfs_common.h>
#include "nfs4_fs.h"
#include "nfs4trace.h"
@@ -63,11 +64,7 @@
#define NFSDBG_FACILITY NFSDBG_XDR
-/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
-
struct compound_hdr;
-static int nfs4_stat_to_errno(int);
static void encode_layoutget(struct xdr_stream *xdr,
const struct nfs4_layoutget_args *args,
struct compound_hdr *hdr);
@@ -85,9 +82,8 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
* we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2)
*/
#define pagepad_maxsz (1)
-#define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2)
-#define lock_owner_id_maxsz (1 + 1 + 4)
-#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+#define open_owner_id_maxsz (2 + 1 + 2 + 2)
+#define lock_owner_id_maxsz (2 + 1 + 2)
#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define op_encode_hdr_maxsz (1)
@@ -188,7 +184,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define encode_claim_null_maxsz (1 + nfs4_name_maxsz)
#define encode_open_maxsz (op_encode_hdr_maxsz + \
2 + encode_share_access_maxsz + 2 + \
- open_owner_id_maxsz + \
+ 1 + open_owner_id_maxsz + \
encode_opentype_maxsz + \
encode_claim_null_maxsz)
#define decode_space_limit_maxsz (3)
@@ -224,6 +220,11 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
encode_attrs_maxsz)
#define decode_setattr_maxsz (op_decode_hdr_maxsz + \
nfs4_fattr_bitmap_maxsz)
+#define encode_delegattr_maxsz (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + \
+ nfs4_fattr_bitmap_maxsz + \
+ 2*nfstime4_maxsz)
+#define decode_delegattr_maxsz (decode_setattr_maxsz)
#define encode_read_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + 3)
#define decode_read_maxsz (op_decode_hdr_maxsz + 2 + pagepad_maxsz)
@@ -253,13 +254,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define encode_link_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
-#define encode_lockowner_maxsz (7)
+#define encode_lockowner_maxsz (2 + 1 + lock_owner_id_maxsz)
+
#define encode_lock_maxsz (op_encode_hdr_maxsz + \
7 + \
1 + encode_stateid_maxsz + 1 + \
encode_lockowner_maxsz)
#define decode_lock_denied_maxsz \
- (8 + decode_lockowner_maxsz)
+ (2 + 2 + 1 + 2 + 1 + lock_owner_id_maxsz)
#define decode_lock_maxsz (op_decode_hdr_maxsz + \
decode_lock_denied_maxsz)
#define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \
@@ -615,7 +617,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
encode_lockowner_maxsz)
#define NFS4_dec_release_lockowner_sz \
(compound_decode_hdr_maxsz + \
- decode_lockowner_maxsz)
+ decode_release_lockowner_maxsz)
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -758,12 +760,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_layoutreturn_maxsz + \
+ encode_delegattr_maxsz + \
encode_delegreturn_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_layoutreturn_maxsz + \
+ decode_delegattr_maxsz + \
decode_delegreturn_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \
@@ -968,11 +972,6 @@ static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes)
return p;
}
-static void encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
-{
- WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
-}
-
static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
{
WARN_ON_ONCE(xdr_stream_encode_opaque(xdr, str, len) < 0);
@@ -1060,9 +1059,10 @@ static void encode_nops(struct compound_hdr *hdr)
*hdr->nops_p = htonl(hdr->nops);
}
-static void encode_nfs4_stateid(struct xdr_stream *xdr, const nfs4_stateid *stateid)
+static void encode_nfs4_stateid(struct xdr_stream *xdr,
+ const nfs4_stateid *stateid)
{
- encode_opaque_fixed(xdr, stateid, NFS4_STATEID_SIZE);
+ encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
}
static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *verf)
@@ -1305,7 +1305,7 @@ static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct
static inline int nfs4_lock_type(struct file_lock *fl, int block)
{
- if (fl->fl_type == F_RDLCK)
+ if (lock_is_read(fl))
return block ? NFS4_READW_LT : NFS4_READ_LT;
return block ? NFS4_WRITEW_LT : NFS4_WRITE_LT;
}
@@ -1412,16 +1412,16 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
__be32 *p;
/*
* opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
- * owner 4 = 32
+ * owner 28
*/
encode_nfs4_seqid(xdr, arg->seqid);
encode_share_access(xdr, arg->share_access);
- p = reserve_space(xdr, 36);
+ p = reserve_space(xdr, 40);
p = xdr_encode_hyper(p, arg->clientid);
- *p++ = cpu_to_be32(24);
+ *p++ = cpu_to_be32(28);
p = xdr_encode_opaque_fixed(p, "open id:", 8);
*p++ = cpu_to_be32(arg->server->s_dev);
- *p++ = cpu_to_be32(arg->id.uniquifier);
+ p = xdr_encode_hyper(p, arg->id.uniquifier);
xdr_encode_hyper(p, arg->id.create_time);
}
@@ -1468,20 +1468,18 @@ static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *a
}
}
-static inline void encode_delegation_type(struct xdr_stream *xdr, fmode_t delegation_type)
+static inline void encode_delegation_type(struct xdr_stream *xdr, u32 delegation_type)
{
__be32 *p;
p = reserve_space(xdr, 4);
switch (delegation_type) {
- case 0:
- *p = cpu_to_be32(NFS4_OPEN_DELEGATE_NONE);
- break;
- case FMODE_READ:
- *p = cpu_to_be32(NFS4_OPEN_DELEGATE_READ);
- break;
- case FMODE_WRITE|FMODE_READ:
- *p = cpu_to_be32(NFS4_OPEN_DELEGATE_WRITE);
+ case NFS4_OPEN_DELEGATE_NONE:
+ case NFS4_OPEN_DELEGATE_READ:
+ case NFS4_OPEN_DELEGATE_WRITE:
+ case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
+ case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
+ *p = cpu_to_be32(delegation_type);
break;
default:
BUG();
@@ -1497,7 +1495,7 @@ static inline void encode_claim_null(struct xdr_stream *xdr, const struct qstr *
encode_string(xdr, name->len, name->name);
}
-static inline void encode_claim_previous(struct xdr_stream *xdr, fmode_t type)
+static inline void encode_claim_previous(struct xdr_stream *xdr, u32 type)
{
__be32 *p;
@@ -1602,23 +1600,33 @@ static void encode_read(struct xdr_stream *xdr, const struct nfs_pgio_args *args
static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr)
{
uint32_t attrs[3] = {
- FATTR4_WORD0_RDATTR_ERROR,
+ FATTR4_WORD0_TYPE
+ | FATTR4_WORD0_RDATTR_ERROR,
FATTR4_WORD1_MOUNTED_ON_FILEID,
};
- uint32_t dircount = readdir->count >> 1;
+ uint32_t dircount = readdir->count;
+ uint32_t maxcount = readdir->count;
__be32 *p, verf[2];
uint32_t attrlen = 0;
unsigned int i;
if (readdir->plus) {
- attrs[0] |= FATTR4_WORD0_TYPE|FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE|
- FATTR4_WORD0_FSID|FATTR4_WORD0_FILEHANDLE|FATTR4_WORD0_FILEID;
- attrs[1] |= FATTR4_WORD1_MODE|FATTR4_WORD1_NUMLINKS|FATTR4_WORD1_OWNER|
- FATTR4_WORD1_OWNER_GROUP|FATTR4_WORD1_RAWDEV|
- FATTR4_WORD1_SPACE_USED|FATTR4_WORD1_TIME_ACCESS|
- FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
+ attrs[0] |= FATTR4_WORD0_CHANGE
+ | FATTR4_WORD0_SIZE
+ | FATTR4_WORD0_FSID
+ | FATTR4_WORD0_FILEHANDLE
+ | FATTR4_WORD0_FILEID;
+ attrs[1] |= FATTR4_WORD1_MODE
+ | FATTR4_WORD1_NUMLINKS
+ | FATTR4_WORD1_OWNER
+ | FATTR4_WORD1_OWNER_GROUP
+ | FATTR4_WORD1_RAWDEV
+ | FATTR4_WORD1_SPACE_USED
+ | FATTR4_WORD1_TIME_ACCESS
+ | FATTR4_WORD1_TIME_CREATE
+ | FATTR4_WORD1_TIME_METADATA
+ | FATTR4_WORD1_TIME_MODIFY;
attrs[2] |= FATTR4_WORD2_SECURITY_LABEL;
- dircount >>= 1;
}
/* Use mounted_on_fileid only if the server supports it */
if (!(readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID))
@@ -1634,7 +1642,7 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
encode_nfs4_verifier(xdr, &readdir->verifier);
p = reserve_space(xdr, 12 + (attrlen << 2));
*p++ = cpu_to_be32(dircount);
- *p++ = cpu_to_be32(readdir->count);
+ *p++ = cpu_to_be32(maxcount);
*p++ = cpu_to_be32(attrlen);
for (i = 0; i < attrlen; i++)
*p++ = cpu_to_be32(attrs[i]);
@@ -1680,19 +1688,35 @@ encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
encode_op_hdr(xdr, OP_RESTOREFH, decode_restorefh_maxsz, hdr);
}
-static void
-encode_setacl(struct xdr_stream *xdr, const struct nfs_setaclargs *arg,
- struct compound_hdr *hdr)
+static void nfs4_acltype_to_bitmap(enum nfs4_acl_type type, __u32 bitmap[2])
{
- __be32 *p;
+ switch (type) {
+ default:
+ bitmap[0] = FATTR4_WORD0_ACL;
+ bitmap[1] = 0;
+ break;
+ case NFS4ACL_DACL:
+ bitmap[0] = 0;
+ bitmap[1] = FATTR4_WORD1_DACL;
+ break;
+ case NFS4ACL_SACL:
+ bitmap[0] = 0;
+ bitmap[1] = FATTR4_WORD1_SACL;
+ }
+}
+
+static void encode_setacl(struct xdr_stream *xdr,
+ const struct nfs_setaclargs *arg,
+ struct compound_hdr *hdr)
+{
+ __u32 bitmap[2];
+
+ nfs4_acltype_to_bitmap(arg->acl_type, bitmap);
encode_op_hdr(xdr, OP_SETATTR, decode_setacl_maxsz, hdr);
encode_nfs4_stateid(xdr, &zero_stateid);
- p = reserve_space(xdr, 2*4);
- *p++ = cpu_to_be32(1);
- *p = cpu_to_be32(FATTR4_WORD0_ACL);
- p = reserve_space(xdr, 4);
- *p = cpu_to_be32(arg->acl_len);
+ xdr_encode_bitmap4(xdr, bitmap, ARRAY_SIZE(bitmap));
+ encode_uint32(xdr, arg->acl_len);
xdr_write_pages(xdr, arg->acl_pages, 0, arg->acl_len);
}
@@ -1710,6 +1734,33 @@ static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs
server->attr_bitmask);
}
+static void encode_delegattr(struct xdr_stream *xdr,
+ const nfs4_stateid *stateid,
+ const struct nfs4_delegattr *attr,
+ struct compound_hdr *hdr)
+{
+ uint32_t bitmap[3] = { 0 };
+ uint32_t len = 0;
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_SETATTR, encode_delegattr_maxsz, hdr);
+ encode_nfs4_stateid(xdr, stateid);
+ if (attr->atime_set) {
+ bitmap[2] |= FATTR4_WORD2_TIME_DELEG_ACCESS;
+ len += (nfstime4_maxsz << 2);
+ }
+ if (attr->mtime_set) {
+ bitmap[2] |= FATTR4_WORD2_TIME_DELEG_MODIFY;
+ len += (nfstime4_maxsz << 2);
+ }
+ xdr_encode_bitmap4(xdr, bitmap, ARRAY_SIZE(bitmap));
+ xdr_stream_encode_opaque_inline(xdr, (void **)&p, len);
+ if (bitmap[2] & FATTR4_WORD2_TIME_DELEG_ACCESS)
+ p = xdr_encode_nfstime4(p, &attr->atime);
+ if (bitmap[2] & FATTR4_WORD2_TIME_DELEG_MODIFY)
+ p = xdr_encode_nfstime4(p, &attr->mtime);
+}
+
static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid, struct compound_hdr *hdr)
{
__be32 *p;
@@ -2080,7 +2131,7 @@ static void encode_test_stateid(struct xdr_stream *xdr,
{
encode_op_hdr(xdr, OP_TEST_STATEID, decode_test_stateid_maxsz, hdr);
encode_uint32(xdr, 1);
- encode_nfs4_stateid(xdr, args->stateid);
+ encode_nfs4_stateid(xdr, &args->stateid);
}
static void encode_free_stateid(struct xdr_stream *xdr,
@@ -2587,11 +2638,11 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- const __u32 nfs4_acl_bitmap[1] = {
- [0] = FATTR4_WORD0_ACL,
- };
+ __u32 nfs4_acl_bitmap[2];
uint32_t replen;
+ nfs4_acltype_to_bitmap(args->acl_type, nfs4_acl_bitmap);
+
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
@@ -2787,6 +2838,8 @@ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
encode_putfh(xdr, args->fhandle, &hdr);
if (args->lr_args)
encode_layoutreturn(xdr, args->lr_args, &hdr);
+ if (args->sattr_args)
+ encode_delegattr(xdr, args->stateid, args->sattr_args, &hdr);
if (args->bitmask)
encode_getfattr(xdr, args->bitmask, &hdr);
encode_delegreturn(xdr, args->stateid, &hdr);
@@ -3387,7 +3440,7 @@ static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, ui
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT;
}
- dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true");
+ dprintk("%s: link support=%s\n", __func__, str_false_true(*res == 0));
return 0;
}
@@ -3405,7 +3458,7 @@ static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap,
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT;
}
- dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true");
+ dprintk("%s: symlink support=%s\n", __func__, str_false_true(*res == 0));
return 0;
}
@@ -3533,6 +3586,42 @@ static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint
return 0;
}
+static int decode_attr_case_insensitive(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+ __be32 *p;
+
+ *res = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_CASE_INSENSITIVE - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_CASE_INSENSITIVE)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+ *res = be32_to_cpup(p);
+ bitmap[0] &= ~FATTR4_WORD0_CASE_INSENSITIVE;
+ }
+ dprintk("%s: case_insensitive=%s\n", __func__, str_false_true(*res == 0));
+ return 0;
+}
+
+static int decode_attr_case_preserving(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+ __be32 *p;
+
+ *res = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_CASE_PRESERVING - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_CASE_PRESERVING)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+ *res = be32_to_cpup(p);
+ bitmap[0] &= ~FATTR4_WORD0_CASE_PRESERVING;
+ }
+ dprintk("%s: case_preserving=%s\n", __func__, str_false_true(*res == 0));
+ return 0;
+}
+
static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
{
__be32 *p;
@@ -3696,8 +3785,6 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
if (unlikely(!p))
goto out_eio;
n = be32_to_cpup(p);
- if (n <= 0)
- goto out_eio;
for (res->nlocations = 0; res->nlocations < n; res->nlocations++) {
u32 m;
struct nfs4_fs_location *loc;
@@ -4121,6 +4208,24 @@ static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, str
return status;
}
+static int decode_attr_time_create(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec64 *time)
+{
+ int status = 0;
+
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_CREATE - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_TIME_CREATE)) {
+ status = decode_attr_time(xdr, time);
+ if (status == 0)
+ status = NFS_ATTR_FATTR_BTIME;
+ bitmap[1] &= ~FATTR4_WORD1_TIME_CREATE;
+ }
+ dprintk("%s: btime=%lld\n", __func__, time->tv_sec);
+ return status;
+}
+
static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec64 *time)
{
int status = 0;
@@ -4184,26 +4289,25 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
return -EIO;
+ bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
if (len < NFS4_MAXLABELLEN) {
- if (label) {
- if (label->len) {
- if (label->len < len)
- return -ERANGE;
- memcpy(label->label, p, len);
- }
+ if (label && label->len) {
+ if (label->len < len)
+ return -ERANGE;
+ memcpy(label->label, p, len);
label->len = len;
label->pi = pi;
label->lfs = lfs;
status = NFS_ATTR_FATTR_V4_SECURITY_LABEL;
}
- bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
} else
printk(KERN_WARNING "%s: label too long (%u)!\n",
__func__, len);
+ if (label && label->label)
+ dprintk("%s: label=%.*s, len=%d, PI=%d, LFS=%d\n",
+ __func__, label->len, (char *)label->label,
+ label->len, label->pi, label->lfs);
}
- if (label && label->label)
- dprintk("%s: label=%s, len=%d, PI=%d, LFS=%d\n", __func__,
- (char *)label->label, label->len, label->pi, label->lfs);
return status;
}
@@ -4240,8 +4344,29 @@ static int decode_attr_xattrsupport(struct xdr_stream *xdr, uint32_t *bitmap,
*res = be32_to_cpup(p);
bitmap[2] &= ~FATTR4_WORD2_XATTR_SUPPORT;
}
- dprintk("%s: XATTR support=%s\n", __func__,
- *res == 0 ? "false" : "true");
+ dprintk("%s: XATTR support=%s\n", __func__, str_false_true(*res == 0));
+ return 0;
+}
+
+static int decode_attr_open_arguments(struct xdr_stream *xdr, uint32_t *bitmap,
+ struct nfs4_open_caps *res)
+{
+ memset(res, 0, sizeof(*res));
+ if (unlikely(bitmap[2] & (FATTR4_WORD2_OPEN_ARGUMENTS - 1U)))
+ return -EIO;
+ if (likely(bitmap[2] & FATTR4_WORD2_OPEN_ARGUMENTS)) {
+ if (decode_bitmap4(xdr, res->oa_share_access, ARRAY_SIZE(res->oa_share_access)) < 0)
+ return -EIO;
+ if (decode_bitmap4(xdr, res->oa_share_deny, ARRAY_SIZE(res->oa_share_deny)) < 0)
+ return -EIO;
+ if (decode_bitmap4(xdr, res->oa_share_access_want, ARRAY_SIZE(res->oa_share_access_want)) < 0)
+ return -EIO;
+ if (decode_bitmap4(xdr, res->oa_open_claim, ARRAY_SIZE(res->oa_open_claim)) < 0)
+ return -EIO;
+ if (decode_bitmap4(xdr, res->oa_createmode, ARRAY_SIZE(res->oa_createmode)) < 0)
+ return -EIO;
+ bitmap[2] &= ~FATTR4_WORD2_OPEN_ARGUMENTS;
+ }
return 0;
}
@@ -4294,14 +4419,6 @@ static int decode_access(struct xdr_stream *xdr, u32 *supported, u32 *access)
return 0;
}
-static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len)
-{
- ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len);
- if (unlikely(ret < 0))
- return -EIO;
- return 0;
-}
-
static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
return decode_opaque_fixed(xdr, stateid, NFS4_STATEID_SIZE);
@@ -4412,9 +4529,15 @@ static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_re
goto xdr_error;
if ((status = decode_attr_aclsupport(xdr, bitmap, &res->acl_bitmask)) != 0)
goto xdr_error;
+ if ((status = decode_attr_case_insensitive(xdr, bitmap, &res->case_insensitive)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_case_preserving(xdr, bitmap, &res->case_preserving)) != 0)
+ goto xdr_error;
if ((status = decode_attr_exclcreat_supported(xdr, bitmap,
res->exclcreat_bitmask)) != 0)
goto xdr_error;
+ if ((status = decode_attr_open_arguments(xdr, bitmap, &res->open_caps)) != 0)
+ goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
dprintk("%s: xdr returned %d!\n", __func__, -status);
@@ -4677,6 +4800,11 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
goto xdr_error;
fattr->valid |= status;
+ status = decode_attr_time_create(xdr, bitmap, &fattr->btime);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
status = decode_attr_time_metadata(xdr, bitmap, &fattr->ctime);
if (status < 0)
goto xdr_error;
@@ -4700,12 +4828,10 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
if (status < 0)
goto xdr_error;
- if (fattr->label) {
- status = decode_attr_security_label(xdr, bitmap, fattr->label);
- if (status < 0)
- goto xdr_error;
- fattr->valid |= status;
- }
+ status = decode_attr_security_label(xdr, bitmap, fattr->label);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
xdr_error:
dprintk("%s: xdr returned %d\n", __func__, -status);
@@ -4804,7 +4930,7 @@ static int decode_attr_pnfstype(struct xdr_stream *xdr, uint32_t *bitmap,
}
/*
- * The prefered block size for layout directed io
+ * The preferred block size for layout directed io
*/
static int decode_attr_layout_blksize(struct xdr_stream *xdr, uint32_t *bitmap,
uint32_t *res)
@@ -4975,7 +5101,7 @@ static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
/*
* We create the owner, so we know a proper owner.id length is 4.
*/
-static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
+static int decode_lock_denied(struct xdr_stream *xdr, struct file_lock *fl)
{
uint64_t offset, length, clientid;
__be32 *p;
@@ -4992,10 +5118,10 @@ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
fl->fl_end = fl->fl_start + (loff_t)length - 1;
if (length == ~(uint64_t)0)
fl->fl_end = OFFSET_MAX;
- fl->fl_type = F_WRLCK;
+ fl->c.flc_type = F_WRLCK;
if (type & 1)
- fl->fl_type = F_RDLCK;
- fl->fl_pid = 0;
+ fl->c.flc_type = F_RDLCK;
+ fl->c.flc_pid = 0;
}
p = xdr_decode_hyper(p, &clientid); /* read 8 bytes */
namelen = be32_to_cpup(p); /* read 4 bytes */ /* have read all 32 bytes now */
@@ -5088,13 +5214,12 @@ static int decode_space_limit(struct xdr_stream *xdr,
}
static int decode_rw_delegation(struct xdr_stream *xdr,
- uint32_t delegation_type,
- struct nfs_openres *res)
+ struct nfs4_open_delegation *res)
{
__be32 *p;
int status;
- status = decode_delegation_stateid(xdr, &res->delegation);
+ status = decode_delegation_stateid(xdr, &res->stateid);
if (unlikely(status))
return status;
p = xdr_inline_decode(xdr, 4);
@@ -5102,52 +5227,57 @@ static int decode_rw_delegation(struct xdr_stream *xdr,
return -EIO;
res->do_recall = be32_to_cpup(p);
- switch (delegation_type) {
+ switch (res->open_delegation_type) {
case NFS4_OPEN_DELEGATE_READ:
- res->delegation_type = FMODE_READ;
+ case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
+ res->type = FMODE_READ;
break;
case NFS4_OPEN_DELEGATE_WRITE:
- res->delegation_type = FMODE_WRITE|FMODE_READ;
+ case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
+ res->type = FMODE_WRITE|FMODE_READ;
if (decode_space_limit(xdr, &res->pagemod_limit) < 0)
return -EIO;
}
return decode_ace(xdr, NULL);
}
-static int decode_no_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
+static int decode_no_delegation(struct xdr_stream *xdr,
+ struct nfs4_open_delegation *res)
{
__be32 *p;
- uint32_t why_no_delegation;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
return -EIO;
- why_no_delegation = be32_to_cpup(p);
- switch (why_no_delegation) {
+ res->why_no_delegation = be32_to_cpup(p);
+ switch (res->why_no_delegation) {
case WND4_CONTENTION:
case WND4_RESOURCE:
- xdr_inline_decode(xdr, 4);
- /* Ignore for now */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+ res->will_notify = be32_to_cpup(p);
}
return 0;
}
-static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
+static int decode_delegation(struct xdr_stream *xdr,
+ struct nfs4_open_delegation *res)
{
__be32 *p;
- uint32_t delegation_type;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
return -EIO;
- delegation_type = be32_to_cpup(p);
- res->delegation_type = 0;
- switch (delegation_type) {
+ res->open_delegation_type = be32_to_cpup(p);
+ switch (res->open_delegation_type) {
case NFS4_OPEN_DELEGATE_NONE:
return 0;
case NFS4_OPEN_DELEGATE_READ:
case NFS4_OPEN_DELEGATE_WRITE:
- return decode_rw_delegation(xdr, delegation_type, res);
+ case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
+ case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
+ return decode_rw_delegation(xdr, res);
case NFS4_OPEN_DELEGATE_NONE_EXT:
return decode_no_delegation(xdr, res);
}
@@ -5188,7 +5318,7 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
for (; i < NFS4_BITMAP_SIZE; i++)
res->attrset[i] = 0;
- return decode_delegation(xdr, res);
+ return decode_delegation(xdr, &res->delegation);
xdr_error:
dprintk("%s: Bitmap too large! Length = %u\n", __func__, bmlen);
return -EIO;
@@ -5347,7 +5477,7 @@ decode_restorefh(struct xdr_stream *xdr)
}
static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
- struct nfs_getaclres *res)
+ struct nfs_getaclres *res, enum nfs4_acl_type type)
{
unsigned int savep;
uint32_t attrlen,
@@ -5365,26 +5495,39 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
goto out;
- if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
- return -EIO;
- if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
-
- /* The bitmap (xdr len + bitmaps) and the attr xdr len words
- * are stored with the acl data to handle the problem of
- * variable length bitmaps.*/
- res->acl_data_offset = xdr_page_pos(xdr);
- res->acl_len = attrlen;
-
- /* Check for receive buffer overflow */
- if (res->acl_len > xdr_stream_remaining(xdr) ||
- res->acl_len + res->acl_data_offset > xdr->buf->page_len) {
- res->acl_flags |= NFS4_ACL_TRUNC;
- dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
- attrlen, xdr_stream_remaining(xdr));
- }
- } else
- status = -EOPNOTSUPP;
+ switch (type) {
+ default:
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
+ return -EIO;
+ if (!(bitmap[0] & FATTR4_WORD0_ACL))
+ return -EOPNOTSUPP;
+ break;
+ case NFS4ACL_DACL:
+ if (unlikely(bitmap[0] || bitmap[1] & (FATTR4_WORD1_DACL - 1U)))
+ return -EIO;
+ if (!(bitmap[1] & FATTR4_WORD1_DACL))
+ return -EOPNOTSUPP;
+ break;
+ case NFS4ACL_SACL:
+ if (unlikely(bitmap[0] || bitmap[1] & (FATTR4_WORD1_SACL - 1U)))
+ return -EIO;
+ if (!(bitmap[1] & FATTR4_WORD1_SACL))
+ return -EOPNOTSUPP;
+ }
+ /* The bitmap (xdr len + bitmaps) and the attr xdr len words
+ * are stored with the acl data to handle the problem of
+ * variable length bitmaps.*/
+ res->acl_data_offset = xdr_page_pos(xdr);
+ res->acl_len = attrlen;
+
+ /* Check for receive buffer overflow */
+ if (res->acl_len > xdr_stream_remaining(xdr) ||
+ res->acl_len + res->acl_data_offset > xdr->buf->page_len) {
+ res->acl_flags |= NFS4_ACL_TRUNC;
+ dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
+ attrlen, xdr_stream_remaining(xdr));
+ }
out:
return status;
}
@@ -5407,6 +5550,11 @@ static int decode_setattr(struct xdr_stream *xdr)
return -EIO;
}
+static int decode_delegattr(struct xdr_stream *xdr)
+{
+ return decode_setattr(xdr);
+}
+
static int decode_setclientid(struct xdr_stream *xdr, struct nfs4_setclientid_res *res)
{
__be32 *p;
@@ -6437,7 +6585,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
int status;
if (res->acl_scratch != NULL)
- xdr_set_scratch_page(xdr, res->acl_scratch);
+ xdr_set_scratch_folio(xdr, res->acl_scratch);
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
@@ -6447,7 +6595,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_getacl(xdr, rqstp, res);
+ status = decode_getacl(xdr, rqstp, res, res->acl_type);
out:
return status;
@@ -6979,6 +7127,12 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
if (status)
goto out;
}
+ if (res->sattr_res) {
+ status = decode_delegattr(xdr);
+ res->sattr_ret = status;
+ if (status)
+ goto out;
+ }
if (res->fattr) {
status = decode_getfattr(xdr, res->fattr, res->server);
if (status != 0)
@@ -7012,7 +7166,7 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
if (res->migration) {
xdr_enter_page(xdr, PAGE_SIZE);
status = decode_getfattr_generic(xdr,
- &res->fs_locations->fattr,
+ res->fs_locations->fattr,
NULL, res->fs_locations,
res->fs_locations->server);
if (status)
@@ -7025,7 +7179,7 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
goto out;
xdr_enter_page(xdr, PAGE_SIZE);
status = decode_getfattr_generic(xdr,
- &res->fs_locations->fattr,
+ res->fs_locations->fattr,
NULL, res->fs_locations,
res->fs_locations->server);
}
@@ -7469,78 +7623,11 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE)
entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
- entry->prev_cookie = entry->cookie;
entry->cookie = new_cookie;
return 0;
}
-/*
- * We need to translate between nfs status return values and
- * the local errno values which may not be the same.
- */
-static struct {
- int stat;
- int errno;
-} nfs_errtbl[] = {
- { NFS4_OK, 0 },
- { NFS4ERR_PERM, -EPERM },
- { NFS4ERR_NOENT, -ENOENT },
- { NFS4ERR_IO, -errno_NFSERR_IO},
- { NFS4ERR_NXIO, -ENXIO },
- { NFS4ERR_ACCESS, -EACCES },
- { NFS4ERR_EXIST, -EEXIST },
- { NFS4ERR_XDEV, -EXDEV },
- { NFS4ERR_NOTDIR, -ENOTDIR },
- { NFS4ERR_ISDIR, -EISDIR },
- { NFS4ERR_INVAL, -EINVAL },
- { NFS4ERR_FBIG, -EFBIG },
- { NFS4ERR_NOSPC, -ENOSPC },
- { NFS4ERR_ROFS, -EROFS },
- { NFS4ERR_MLINK, -EMLINK },
- { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
- { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
- { NFS4ERR_DQUOT, -EDQUOT },
- { NFS4ERR_STALE, -ESTALE },
- { NFS4ERR_BADHANDLE, -EBADHANDLE },
- { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
- { NFS4ERR_NOTSUPP, -ENOTSUPP },
- { NFS4ERR_TOOSMALL, -ETOOSMALL },
- { NFS4ERR_SERVERFAULT, -EREMOTEIO },
- { NFS4ERR_BADTYPE, -EBADTYPE },
- { NFS4ERR_LOCKED, -EAGAIN },
- { NFS4ERR_SYMLINK, -ELOOP },
- { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
- { NFS4ERR_DEADLOCK, -EDEADLK },
- { NFS4ERR_NOXATTR, -ENODATA },
- { NFS4ERR_XATTR2BIG, -E2BIG },
- { -1, -EIO }
-};
-
-/*
- * Convert an NFS error code to a local one.
- * This one is used jointly by NFSv2 and NFSv3.
- */
-static int
-nfs4_stat_to_errno(int stat)
-{
- int i;
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == stat)
- return nfs_errtbl[i].errno;
- }
- if (stat <= 10000 || stat > 10100) {
- /* The server is looney tunes. */
- return -EREMOTEIO;
- }
- /* If we cannot translate the error, the recovery routines should
- * handle it.
- * Note: remaining NFSv4 error codes have values > 10000, so should
- * not conflict with native Linux error codes.
- */
- return -stat;
-}
-
#ifdef CONFIG_NFS_V4_2
#include "nfs42xdr.c"
#endif /* CONFIG_NFS_V4_2 */
@@ -7639,6 +7726,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC42(CLONE, enc_clone, dec_clone),
PROC42(COPY, enc_copy, dec_copy),
PROC42(OFFLOAD_CANCEL, enc_offload_cancel, dec_offload_cancel),
+ PROC42(OFFLOAD_STATUS, enc_offload_status, dec_offload_status),
PROC42(COPY_NOTIFY, enc_copy_notify, dec_copy_notify),
PROC(LOOKUPP, enc_lookupp, dec_lookupp),
PROC42(LAYOUTERROR, enc_layouterror, dec_layouterror),
@@ -7647,6 +7735,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC42(LISTXATTRS, enc_listxattrs, dec_listxattrs),
PROC42(REMOVEXATTR, enc_removexattr, dec_removexattr),
PROC42(READ_PLUS, enc_read_plus, dec_read_plus),
+ PROC42(ZERO_RANGE, enc_zero_range, dec_zero_range),
};
static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)];
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index fa148308822c..432612d22437 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -139,7 +139,7 @@ static int __init nfs_root_setup(char *line)
ROOT_DEV = Root_NFS;
if (line[0] == '/' || line[0] == ',' || (line[0] >= '0' && line[0] <= '9')) {
- strlcpy(nfs_root_parms, line, sizeof(nfs_root_parms));
+ strscpy(nfs_root_parms, line, sizeof(nfs_root_parms));
} else {
size_t n = strlen(line) + sizeof(NFS_ROOT) - 1;
if (n >= sizeof(nfs_root_parms))
@@ -164,7 +164,7 @@ __setup("nfsroot=", nfs_root_setup);
static int __init root_nfs_copy(char *dest, const char *src,
const size_t destlen)
{
- if (strlcpy(dest, src, destlen) > destlen)
+ if (strscpy(dest, src, destlen) == -E2BIG)
return -1;
return 0;
}
@@ -175,10 +175,10 @@ static int __init root_nfs_cat(char *dest, const char *src,
size_t len = strlen(dest);
if (len && dest[len - 1] != ',')
- if (strlcat(dest, ",", destlen) > destlen)
+ if (strlcat(dest, ",", destlen) >= destlen)
return -1;
- if (strlcat(dest, src, destlen) > destlen)
+ if (strlcat(dest, src, destlen) >= destlen)
return -1;
return 0;
}
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 21dac847f1e4..6ce55e8e6b67 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -11,9 +11,9 @@
#include <linux/tracepoint.h>
#include <linux/iversion.h>
-#include <trace/events/fs.h>
-#include <trace/events/nfs.h>
-#include <trace/events/sunrpc_base.h>
+#include <trace/misc/fs.h>
+#include <trace/misc/nfs.h>
+#include <trace/misc/sunrpc.h>
#define nfs_show_cache_validity(v) \
__print_flags(v, "|", \
@@ -21,7 +21,6 @@
{ NFS_INO_INVALID_ATIME, "INVALID_ATIME" }, \
{ NFS_INO_INVALID_ACCESS, "INVALID_ACCESS" }, \
{ NFS_INO_INVALID_ACL, "INVALID_ACL" }, \
- { NFS_INO_REVAL_PAGECACHE, "REVAL_PAGECACHE" }, \
{ NFS_INO_REVAL_FORCED, "REVAL_FORCED" }, \
{ NFS_INO_INVALID_LABEL, "INVALID_LABEL" }, \
{ NFS_INO_INVALID_CHANGE, "INVALID_CHANGE" }, \
@@ -33,21 +32,36 @@
{ NFS_INO_INVALID_BLOCKS, "INVALID_BLOCKS" }, \
{ NFS_INO_INVALID_XATTR, "INVALID_XATTR" }, \
{ NFS_INO_INVALID_NLINK, "INVALID_NLINK" }, \
- { NFS_INO_INVALID_MODE, "INVALID_MODE" })
+ { NFS_INO_INVALID_MODE, "INVALID_MODE" }, \
+ { NFS_INO_INVALID_BTIME, "INVALID_BTIME" })
#define nfs_show_nfsi_flags(v) \
__print_flags(v, "|", \
- { BIT(NFS_INO_ADVISE_RDPLUS), "ADVISE_RDPLUS" }, \
{ BIT(NFS_INO_STALE), "STALE" }, \
{ BIT(NFS_INO_ACL_LRU_SET), "ACL_LRU_SET" }, \
{ BIT(NFS_INO_INVALIDATING), "INVALIDATING" }, \
- { BIT(NFS_INO_FSCACHE), "FSCACHE" }, \
- { BIT(NFS_INO_FSCACHE_LOCK), "FSCACHE_LOCK" }, \
{ BIT(NFS_INO_LAYOUTCOMMIT), "NEED_LAYOUTCOMMIT" }, \
{ BIT(NFS_INO_LAYOUTCOMMITTING), "LAYOUTCOMMIT" }, \
{ BIT(NFS_INO_LAYOUTSTATS), "LAYOUTSTATS" }, \
{ BIT(NFS_INO_ODIRECT), "ODIRECT" })
+#define nfs_show_wb_flags(v) \
+ __print_flags(v, "|", \
+ { BIT(PG_BUSY), "BUSY" }, \
+ { BIT(PG_MAPPED), "MAPPED" }, \
+ { BIT(PG_FOLIO), "FOLIO" }, \
+ { BIT(PG_CLEAN), "CLEAN" }, \
+ { BIT(PG_COMMIT_TO_DS), "COMMIT_TO_DS" }, \
+ { BIT(PG_INODE_REF), "INODE_REF" }, \
+ { BIT(PG_HEADLOCK), "HEADLOCK" }, \
+ { BIT(PG_TEARDOWN), "TEARDOWN" }, \
+ { BIT(PG_UNLOCKPAGE), "UNLOCKPAGE" }, \
+ { BIT(PG_UPTODATE), "UPTODATE" }, \
+ { BIT(PG_WB_END), "WB_END" }, \
+ { BIT(PG_REMOVE), "REMOVE" }, \
+ { BIT(PG_CONTENDED1), "CONTENDED1" }, \
+ { BIT(PG_CONTENDED2), "CONTENDED2" })
+
DECLARE_EVENT_CLASS(nfs_inode_event,
TP_PROTO(
const struct inode *inode
@@ -60,6 +74,7 @@ DECLARE_EVENT_CLASS(nfs_inode_event,
__field(u32, fhandle)
__field(u64, fileid)
__field(u64, version)
+ __field(unsigned long, cache_validity)
),
TP_fast_assign(
@@ -68,14 +83,17 @@ DECLARE_EVENT_CLASS(nfs_inode_event,
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
__entry->version = inode_peek_iversion_raw(inode);
+ __entry->cache_validity = nfsi->cache_validity;
),
TP_printk(
- "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu ",
+ "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu cache_validity=0x%lx (%s)",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
- (unsigned long long)__entry->version
+ (unsigned long long)__entry->version,
+ __entry->cache_validity,
+ nfs_show_cache_validity(__entry->cache_validity)
)
);
@@ -155,13 +173,15 @@ DEFINE_NFS_INODE_EVENT(nfs_getattr_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_getattr_exit);
DEFINE_NFS_INODE_EVENT(nfs_setattr_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_setattr_exit);
-DEFINE_NFS_INODE_EVENT(nfs_writeback_page_enter);
-DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_page_exit);
DEFINE_NFS_INODE_EVENT(nfs_writeback_inode_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_inode_exit);
DEFINE_NFS_INODE_EVENT(nfs_fsync_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit);
DEFINE_NFS_INODE_EVENT(nfs_access_enter);
+DEFINE_NFS_INODE_EVENT_DONE(nfs_set_cache_invalid);
+DEFINE_NFS_INODE_EVENT(nfs_readdir_force_readdirplus);
+DEFINE_NFS_INODE_EVENT_DONE(nfs_readdir_cache_fill_done);
+DEFINE_NFS_INODE_EVENT_DONE(nfs_readdir_uncached_done);
TRACE_EVENT(nfs_access_exit,
TP_PROTO(
@@ -269,10 +289,127 @@ DECLARE_EVENT_CLASS(nfs_update_size_class,
TP_ARGS(inode, new_size))
DEFINE_NFS_UPDATE_SIZE_EVENT(truncate);
+DEFINE_NFS_UPDATE_SIZE_EVENT(truncate_folio);
DEFINE_NFS_UPDATE_SIZE_EVENT(wcc);
DEFINE_NFS_UPDATE_SIZE_EVENT(update);
DEFINE_NFS_UPDATE_SIZE_EVENT(grow);
+DECLARE_EVENT_CLASS(nfs_inode_range_event,
+ TP_PROTO(
+ const struct inode *inode,
+ loff_t range_start,
+ loff_t range_end
+ ),
+
+ TP_ARGS(inode, range_start, range_end),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(u64, version)
+ __field(loff_t, range_start)
+ __field(loff_t, range_end)
+ ),
+
+ TP_fast_assign(
+ const struct nfs_inode *nfsi = NFS_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->fileid = nfsi->fileid;
+ __entry->version = inode_peek_iversion_raw(inode);
+ __entry->range_start = range_start;
+ __entry->range_end = range_end;
+ ),
+
+ TP_printk(
+ "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu "
+ "range=[%lld, %lld]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle, __entry->version,
+ __entry->range_start, __entry->range_end
+ )
+);
+
+#define DEFINE_NFS_INODE_RANGE_EVENT(name) \
+ DEFINE_EVENT(nfs_inode_range_event, name, \
+ TP_PROTO( \
+ const struct inode *inode, \
+ loff_t range_start, \
+ loff_t range_end \
+ ), \
+ TP_ARGS(inode, range_start, range_end))
+
+DEFINE_NFS_INODE_RANGE_EVENT(nfs_readdir_invalidate_cache_range);
+
+DECLARE_EVENT_CLASS(nfs_readdir_event,
+ TP_PROTO(
+ const struct file *file,
+ const __be32 *verifier,
+ u64 cookie,
+ pgoff_t page_index,
+ unsigned int dtsize
+ ),
+
+ TP_ARGS(file, verifier, cookie, page_index, dtsize),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(u64, version)
+ __array(char, verifier, NFS4_VERIFIER_SIZE)
+ __field(u64, cookie)
+ __field(pgoff_t, index)
+ __field(unsigned int, dtsize)
+ ),
+
+ TP_fast_assign(
+ const struct inode *dir = file_inode(file);
+ const struct nfs_inode *nfsi = NFS_I(dir);
+
+ __entry->dev = dir->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->version = inode_peek_iversion_raw(dir);
+ if (cookie != 0)
+ memcpy(__entry->verifier, verifier,
+ NFS4_VERIFIER_SIZE);
+ else
+ memset(__entry->verifier, 0,
+ NFS4_VERIFIER_SIZE);
+ __entry->cookie = cookie;
+ __entry->index = page_index;
+ __entry->dtsize = dtsize;
+ ),
+
+ TP_printk(
+ "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu "
+ "cookie=%s:0x%llx cache_index=%lu dtsize=%u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid, __entry->fhandle,
+ __entry->version, show_nfs4_verifier(__entry->verifier),
+ (unsigned long long)__entry->cookie, __entry->index,
+ __entry->dtsize
+ )
+);
+
+#define DEFINE_NFS_READDIR_EVENT(name) \
+ DEFINE_EVENT(nfs_readdir_event, name, \
+ TP_PROTO( \
+ const struct file *file, \
+ const __be32 *verifier, \
+ u64 cookie, \
+ pgoff_t page_index, \
+ unsigned int dtsize \
+ ), \
+ TP_ARGS(file, verifier, cookie, page_index, dtsize))
+
+DEFINE_NFS_READDIR_EVENT(nfs_readdir_cache_fill);
+DEFINE_NFS_READDIR_EVENT(nfs_readdir_uncached);
+
DECLARE_EVENT_CLASS(nfs_lookup_event,
TP_PROTO(
const struct inode *dir,
@@ -286,6 +423,7 @@ DECLARE_EVENT_CLASS(nfs_lookup_event,
__field(unsigned long, flags)
__field(dev_t, dev)
__field(u64, dir)
+ __field(u64, fileid)
__string(name, dentry->d_name.name)
),
@@ -293,16 +431,18 @@ DECLARE_EVENT_CLASS(nfs_lookup_event,
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
__entry->flags = flags;
- __assign_str(name, dentry->d_name.name);
+ __entry->fileid = d_is_negative(dentry) ? 0 : NFS_FILEID(d_inode(dentry));
+ __assign_str(name);
),
TP_printk(
- "flags=0x%lx (%s) name=%02x:%02x:%llu/%s",
+ "flags=0x%lx (%s) name=%02x:%02x:%llu/%s fileid=%llu",
__entry->flags,
show_fs_lookup_flags(__entry->flags),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->dir,
- __get_str(name)
+ __get_str(name),
+ __entry->fileid
)
);
@@ -330,6 +470,7 @@ DECLARE_EVENT_CLASS(nfs_lookup_event_done,
__field(unsigned long, flags)
__field(dev_t, dev)
__field(u64, dir)
+ __field(u64, fileid)
__string(name, dentry->d_name.name)
),
@@ -338,17 +479,19 @@ DECLARE_EVENT_CLASS(nfs_lookup_event_done,
__entry->dir = NFS_FILEID(dir);
__entry->error = error < 0 ? -error : 0;
__entry->flags = flags;
- __assign_str(name, dentry->d_name.name);
+ __entry->fileid = d_is_negative(dentry) ? 0 : NFS_FILEID(d_inode(dentry));
+ __assign_str(name);
),
TP_printk(
- "error=%ld (%s) flags=0x%lx (%s) name=%02x:%02x:%llu/%s",
+ "error=%ld (%s) flags=0x%lx (%s) name=%02x:%02x:%llu/%s fileid=%llu",
-__entry->error, show_nfs_status(__entry->error),
__entry->flags,
show_fs_lookup_flags(__entry->flags),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->dir,
- __get_str(name)
+ __get_str(name),
+ __entry->fileid
)
);
@@ -366,6 +509,9 @@ DEFINE_NFS_LOOKUP_EVENT(nfs_lookup_enter);
DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_lookup_exit);
DEFINE_NFS_LOOKUP_EVENT(nfs_lookup_revalidate_enter);
DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_lookup_revalidate_exit);
+DEFINE_NFS_LOOKUP_EVENT(nfs_readdir_lookup);
+DEFINE_NFS_LOOKUP_EVENT(nfs_readdir_lookup_revalidate_failed);
+DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_readdir_lookup_revalidate);
TRACE_EVENT(nfs_atomic_open_enter,
TP_PROTO(
@@ -389,7 +535,7 @@ TRACE_EVENT(nfs_atomic_open_enter,
__entry->dir = NFS_FILEID(dir);
__entry->flags = flags;
__entry->fmode = (__force unsigned long)ctx->mode;
- __assign_str(name, ctx->dentry->d_name.name);
+ __assign_str(name);
),
TP_printk(
@@ -428,7 +574,7 @@ TRACE_EVENT(nfs_atomic_open_exit,
__entry->dir = NFS_FILEID(dir);
__entry->flags = flags;
__entry->fmode = (__force unsigned long)ctx->mode;
- __assign_str(name, ctx->dentry->d_name.name);
+ __assign_str(name);
),
TP_printk(
@@ -464,7 +610,7 @@ TRACE_EVENT(nfs_create_enter,
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
__entry->flags = flags;
- __assign_str(name, dentry->d_name.name);
+ __assign_str(name);
),
TP_printk(
@@ -500,7 +646,7 @@ TRACE_EVENT(nfs_create_exit,
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
__entry->flags = flags;
- __assign_str(name, dentry->d_name.name);
+ __assign_str(name);
),
TP_printk(
@@ -531,7 +677,7 @@ DECLARE_EVENT_CLASS(nfs_directory_event,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
- __assign_str(name, dentry->d_name.name);
+ __assign_str(name);
),
TP_printk(
@@ -570,7 +716,7 @@ DECLARE_EVENT_CLASS(nfs_directory_event_done,
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
__entry->error = error < 0 ? -error : 0;
- __assign_str(name, dentry->d_name.name);
+ __assign_str(name);
),
TP_printk(
@@ -624,7 +770,7 @@ TRACE_EVENT(nfs_link_enter,
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = NFS_FILEID(inode);
__entry->dir = NFS_FILEID(dir);
- __assign_str(name, dentry->d_name.name);
+ __assign_str(name);
),
TP_printk(
@@ -660,7 +806,7 @@ TRACE_EVENT(nfs_link_exit,
__entry->fileid = NFS_FILEID(inode);
__entry->dir = NFS_FILEID(dir);
__entry->error = error < 0 ? -error : 0;
- __assign_str(name, dentry->d_name.name);
+ __assign_str(name);
),
TP_printk(
@@ -696,8 +842,8 @@ DECLARE_EVENT_CLASS(nfs_rename_event,
__entry->dev = old_dir->i_sb->s_dev;
__entry->old_dir = NFS_FILEID(old_dir);
__entry->new_dir = NFS_FILEID(new_dir);
- __assign_str(old_name, old_dentry->d_name.name);
- __assign_str(new_name, new_dentry->d_name.name);
+ __assign_str(old_name);
+ __assign_str(new_name);
),
TP_printk(
@@ -745,8 +891,8 @@ DECLARE_EVENT_CLASS(nfs_rename_event_done,
__entry->error = -error;
__entry->old_dir = NFS_FILEID(old_dir);
__entry->new_dir = NFS_FILEID(new_dir);
- __assign_str(old_name, old_dentry->d_name.name);
- __assign_str(new_name, new_dentry->d_name.name);
+ __assign_str(old_name);
+ __assign_str(new_name);
),
TP_printk(
@@ -776,7 +922,7 @@ DECLARE_EVENT_CLASS(nfs_rename_event_done,
DEFINE_NFS_RENAME_EVENT(nfs_rename_enter);
DEFINE_NFS_RENAME_EVENT_DONE(nfs_rename_exit);
-DEFINE_NFS_RENAME_EVENT_DONE(nfs_sillyrename_rename);
+DEFINE_NFS_RENAME_EVENT_DONE(nfs_async_rename_done);
TRACE_EVENT(nfs_sillyrename_unlink,
TP_PROTO(
@@ -813,13 +959,14 @@ TRACE_EVENT(nfs_sillyrename_unlink,
)
);
-TRACE_EVENT(nfs_aop_readpage,
+DECLARE_EVENT_CLASS(nfs_folio_event,
TP_PROTO(
const struct inode *inode,
- struct page *page
+ loff_t offset,
+ size_t count
),
- TP_ARGS(inode, page),
+ TP_ARGS(inode, offset, count),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -827,6 +974,7 @@ TRACE_EVENT(nfs_aop_readpage,
__field(u64, fileid)
__field(u64, version)
__field(loff_t, offset)
+ __field(size_t, count)
),
TP_fast_assign(
@@ -836,26 +984,38 @@ TRACE_EVENT(nfs_aop_readpage,
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
__entry->version = inode_peek_iversion_raw(inode);
- __entry->offset = page_index(page) << PAGE_SHIFT;
+ __entry->offset = offset;
+ __entry->count = count;
),
TP_printk(
- "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu offset=%lld",
+ "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu "
+ "offset=%lld count=%zu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle, __entry->version,
- __entry->offset
+ __entry->offset, __entry->count
)
);
-TRACE_EVENT(nfs_aop_readpage_done,
+#define DEFINE_NFS_FOLIO_EVENT(name) \
+ DEFINE_EVENT(nfs_folio_event, name, \
+ TP_PROTO( \
+ const struct inode *inode, \
+ loff_t offset, \
+ size_t count \
+ ), \
+ TP_ARGS(inode, offset, count))
+
+DECLARE_EVENT_CLASS(nfs_folio_event_done,
TP_PROTO(
const struct inode *inode,
- struct page *page,
+ loff_t offset,
+ size_t count,
int ret
),
- TP_ARGS(inode, page, ret),
+ TP_ARGS(inode, offset, count, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -864,6 +1024,7 @@ TRACE_EVENT(nfs_aop_readpage_done,
__field(u64, fileid)
__field(u64, version)
__field(loff_t, offset)
+ __field(size_t, count)
),
TP_fast_assign(
@@ -873,27 +1034,115 @@ TRACE_EVENT(nfs_aop_readpage_done,
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
__entry->version = inode_peek_iversion_raw(inode);
- __entry->offset = page_index(page) << PAGE_SHIFT;
+ __entry->offset = offset;
+ __entry->count = count;
__entry->ret = ret;
),
TP_printk(
- "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu offset=%lld ret=%d",
+ "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu "
+ "offset=%lld count=%zu ret=%d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle, __entry->version,
+ __entry->offset, __entry->count, __entry->ret
+ )
+);
+
+#define DEFINE_NFS_FOLIO_EVENT_DONE(name) \
+ DEFINE_EVENT(nfs_folio_event_done, name, \
+ TP_PROTO( \
+ const struct inode *inode, \
+ loff_t offset, \
+ size_t count, \
+ int ret \
+ ), \
+ TP_ARGS(inode, offset, count, ret))
+
+DEFINE_NFS_FOLIO_EVENT(nfs_aop_readpage);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_aop_readpage_done);
+
+DEFINE_NFS_FOLIO_EVENT(nfs_writeback_folio);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writeback_folio_done);
+
+DEFINE_NFS_FOLIO_EVENT(nfs_invalidate_folio);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_launder_folio_done);
+
+DEFINE_NFS_FOLIO_EVENT(nfs_try_to_update_request);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_try_to_update_request_done);
+
+DEFINE_NFS_FOLIO_EVENT(nfs_update_folio);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_update_folio_done);
+
+DEFINE_NFS_FOLIO_EVENT(nfs_write_begin);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_write_begin_done);
+
+DEFINE_NFS_FOLIO_EVENT(nfs_write_end);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_write_end_done);
+
+DEFINE_NFS_FOLIO_EVENT(nfs_writepages);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writepages_done);
+
+DECLARE_EVENT_CLASS(nfs_kiocb_event,
+ TP_PROTO(
+ const struct kiocb *iocb,
+ const struct iov_iter *iter
+ ),
+
+ TP_ARGS(iocb, iter),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(u64, version)
+ __field(loff_t, offset)
+ __field(size_t, count)
+ __field(int, flags)
+ ),
+
+ TP_fast_assign(
+ const struct inode *inode = file_inode(iocb->ki_filp);
+ const struct nfs_inode *nfsi = NFS_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->version = inode_peek_iversion_raw(inode);
+ __entry->offset = iocb->ki_pos;
+ __entry->count = iov_iter_count(iter);
+ __entry->flags = iocb->ki_flags;
+ ),
+
+ TP_printk(
+ "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu offset=%lld count=%zu ki_flags=%s",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle, __entry->version,
- __entry->offset, __entry->ret
+ __entry->offset, __entry->count,
+ __print_flags(__entry->flags, "|", TRACE_IOCB_STRINGS)
)
);
+#define DEFINE_NFS_KIOCB_EVENT(name) \
+ DEFINE_EVENT(nfs_kiocb_event, name, \
+ TP_PROTO( \
+ const struct kiocb *iocb, \
+ const struct iov_iter *iter \
+ ), \
+ TP_ARGS(iocb, iter))
+
+DEFINE_NFS_KIOCB_EVENT(nfs_file_read);
+DEFINE_NFS_KIOCB_EVENT(nfs_file_write);
+
TRACE_EVENT(nfs_aop_readahead,
TP_PROTO(
const struct inode *inode,
- struct page *page,
+ loff_t pos,
unsigned int nr_pages
),
- TP_ARGS(inode, page, nr_pages),
+ TP_ARGS(inode, pos, nr_pages),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -911,7 +1160,7 @@ TRACE_EVENT(nfs_aop_readahead,
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
__entry->version = inode_peek_iversion_raw(inode);
- __entry->offset = page_index(page) << PAGE_SHIFT;
+ __entry->offset = pos;
__entry->nr_pages = nr_pages;
),
@@ -1017,7 +1266,7 @@ TRACE_EVENT(nfs_readpage_done,
__field(u32, arg_count)
__field(u32, res_count)
__field(bool, eof)
- __field(int, status)
+ __field(int, error)
),
TP_fast_assign(
@@ -1026,7 +1275,7 @@ TRACE_EVENT(nfs_readpage_done,
const struct nfs_fh *fh = hdr->args.fh ?
hdr->args.fh : &nfsi->fh;
- __entry->status = task->tk_status;
+ __entry->error = task->tk_status;
__entry->offset = hdr->args.offset;
__entry->arg_count = hdr->args.count;
__entry->res_count = hdr->res.count;
@@ -1037,14 +1286,13 @@ TRACE_EVENT(nfs_readpage_done,
),
TP_printk(
- "fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%u res=%u status=%d%s",
+ "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u res=%u%s", __entry->error,
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
(long long)__entry->offset, __entry->arg_count,
- __entry->res_count, __entry->status,
- __entry->eof ? " eof" : ""
+ __entry->res_count, __entry->eof ? " eof" : ""
)
);
@@ -1064,7 +1312,7 @@ TRACE_EVENT(nfs_readpage_short,
__field(u32, arg_count)
__field(u32, res_count)
__field(bool, eof)
- __field(int, status)
+ __field(int, error)
),
TP_fast_assign(
@@ -1073,7 +1321,7 @@ TRACE_EVENT(nfs_readpage_short,
const struct nfs_fh *fh = hdr->args.fh ?
hdr->args.fh : &nfsi->fh;
- __entry->status = task->tk_status;
+ __entry->error = task->tk_status;
__entry->offset = hdr->args.offset;
__entry->arg_count = hdr->args.count;
__entry->res_count = hdr->res.count;
@@ -1084,17 +1332,17 @@ TRACE_EVENT(nfs_readpage_short,
),
TP_printk(
- "fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%u res=%u status=%d%s",
+ "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u res=%u%s", __entry->error,
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
(long long)__entry->offset, __entry->arg_count,
- __entry->res_count, __entry->status,
- __entry->eof ? " eof" : ""
+ __entry->res_count, __entry->eof ? " eof" : ""
)
);
+
TRACE_EVENT(nfs_pgio_error,
TP_PROTO(
const struct nfs_pgio_header *hdr,
@@ -1112,7 +1360,7 @@ TRACE_EVENT(nfs_pgio_error,
__field(u32, arg_count)
__field(u32, res_count)
__field(loff_t, pos)
- __field(int, status)
+ __field(int, error)
),
TP_fast_assign(
@@ -1121,7 +1369,7 @@ TRACE_EVENT(nfs_pgio_error,
const struct nfs_fh *fh = hdr->args.fh ?
hdr->args.fh : &nfsi->fh;
- __entry->status = error;
+ __entry->error = error;
__entry->offset = hdr->args.offset;
__entry->arg_count = hdr->args.count;
__entry->res_count = hdr->res.count;
@@ -1130,12 +1378,12 @@ TRACE_EVENT(nfs_pgio_error,
__entry->fhandle = nfs_fhandle_hash(fh);
),
- TP_printk("fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%u res=%u pos=%llu status=%d",
+ TP_printk("error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u res=%u pos=%llu", __entry->error,
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, __entry->fhandle,
(long long)__entry->offset, __entry->arg_count, __entry->res_count,
- __entry->pos, __entry->status
+ __entry->pos
)
);
@@ -1195,7 +1443,7 @@ TRACE_EVENT(nfs_writeback_done,
__field(loff_t, offset)
__field(u32, arg_count)
__field(u32, res_count)
- __field(int, status)
+ __field(int, error)
__field(unsigned long, stable)
__array(char, verifier, NFS4_VERIFIER_SIZE)
),
@@ -1207,7 +1455,7 @@ TRACE_EVENT(nfs_writeback_done,
hdr->args.fh : &nfsi->fh;
const struct nfs_writeverf *verf = hdr->res.verf;
- __entry->status = task->tk_status;
+ __entry->error = task->tk_status;
__entry->offset = hdr->args.offset;
__entry->arg_count = hdr->args.count;
__entry->res_count = hdr->res.count;
@@ -1221,59 +1469,114 @@ TRACE_EVENT(nfs_writeback_done,
),
TP_printk(
- "fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%u res=%u status=%d stable=%s "
- "verifier=%s",
+ "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u res=%u stable=%s "
+ "verifier=%s", __entry->error,
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
(long long)__entry->offset, __entry->arg_count,
- __entry->res_count, __entry->status,
+ __entry->res_count,
show_nfs_stable_how(__entry->stable),
show_nfs4_verifier(__entry->verifier)
)
);
+DECLARE_EVENT_CLASS(nfs_page_class,
+ TP_PROTO(
+ const struct nfs_page *req
+ ),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(const struct nfs_page *__private, req)
+ __field(loff_t, offset)
+ __field(unsigned int, count)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ const struct inode *inode = folio_inode(req->wb_folio);
+ const struct nfs_inode *nfsi = NFS_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->req = req;
+ __entry->offset = req_offset(req);
+ __entry->count = req->wb_bytes;
+ __entry->flags = req->wb_flags;
+ ),
+
+ TP_printk(
+ "fileid=%02x:%02x:%llu fhandle=0x%08x req=%p offset=%lld count=%u flags=%s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid, __entry->fhandle,
+ __entry->req, __entry->offset, __entry->count,
+ nfs_show_wb_flags(__entry->flags)
+ )
+);
+
+#define DEFINE_NFS_PAGE_EVENT(name) \
+ DEFINE_EVENT(nfs_page_class, name, \
+ TP_PROTO( \
+ const struct nfs_page *req \
+ ), \
+ TP_ARGS(req))
+
+DEFINE_NFS_PAGE_EVENT(nfs_writepage_setup);
+DEFINE_NFS_PAGE_EVENT(nfs_do_writepage);
+
DECLARE_EVENT_CLASS(nfs_page_error_class,
TP_PROTO(
+ const struct inode *inode,
const struct nfs_page *req,
int error
),
- TP_ARGS(req, error),
+ TP_ARGS(inode, req, error),
TP_STRUCT__entry(
- __field(const void *, req)
- __field(pgoff_t, index)
- __field(unsigned int, offset)
- __field(unsigned int, pgbase)
- __field(unsigned int, bytes)
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(loff_t, offset)
+ __field(unsigned int, count)
__field(int, error)
),
TP_fast_assign(
- __entry->req = req;
- __entry->index = req->wb_index;
- __entry->offset = req->wb_offset;
- __entry->pgbase = req->wb_pgbase;
- __entry->bytes = req->wb_bytes;
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->offset = req_offset(req);
+ __entry->count = req->wb_bytes;
__entry->error = error;
),
TP_printk(
- "req=%p index=%lu offset=%u pgbase=%u bytes=%u error=%d",
- __entry->req, __entry->index, __entry->offset,
- __entry->pgbase, __entry->bytes, __entry->error
+ "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u", __entry->error,
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle, __entry->offset,
+ __entry->count
)
);
#define DEFINE_NFS_PAGEERR_EVENT(name) \
DEFINE_EVENT(nfs_page_error_class, name, \
TP_PROTO( \
+ const struct inode *inode, \
const struct nfs_page *req, \
int error \
), \
- TP_ARGS(req, error))
+ TP_ARGS(inode, req, error))
DEFINE_NFS_PAGEERR_EVENT(nfs_write_error);
DEFINE_NFS_PAGEERR_EVENT(nfs_comp_error);
@@ -1330,7 +1633,7 @@ TRACE_EVENT(nfs_commit_done,
__field(u32, fhandle)
__field(u64, fileid)
__field(loff_t, offset)
- __field(int, status)
+ __field(int, error)
__field(unsigned long, stable)
__array(char, verifier, NFS4_VERIFIER_SIZE)
),
@@ -1342,7 +1645,7 @@ TRACE_EVENT(nfs_commit_done,
data->args.fh : &nfsi->fh;
const struct nfs_writeverf *verf = data->res.verf;
- __entry->status = task->tk_status;
+ __entry->error = task->tk_status;
__entry->offset = data->args.offset;
__entry->stable = verf->committed;
memcpy(__entry->verifier,
@@ -1354,17 +1657,151 @@ TRACE_EVENT(nfs_commit_done,
),
TP_printk(
- "fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld status=%d stable=%s verifier=%s",
+ "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld stable=%s verifier=%s", __entry->error,
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
- (long long)__entry->offset, __entry->status,
+ (long long)__entry->offset,
show_nfs_stable_how(__entry->stable),
show_nfs4_verifier(__entry->verifier)
)
);
+#define nfs_show_direct_req_flags(v) \
+ __print_flags(v, "|", \
+ { NFS_ODIRECT_DO_COMMIT, "DO_COMMIT" }, \
+ { NFS_ODIRECT_RESCHED_WRITES, "RESCHED_WRITES" }, \
+ { NFS_ODIRECT_SHOULD_DIRTY, "SHOULD DIRTY" }, \
+ { NFS_ODIRECT_DONE, "DONE" } )
+
+DECLARE_EVENT_CLASS(nfs_direct_req_class,
+ TP_PROTO(
+ const struct nfs_direct_req *dreq
+ ),
+
+ TP_ARGS(dreq),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, fileid)
+ __field(u32, fhandle)
+ __field(loff_t, offset)
+ __field(ssize_t, count)
+ __field(ssize_t, error)
+ __field(int, flags)
+ ),
+
+ TP_fast_assign(
+ const struct inode *inode = dreq->inode;
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = &nfsi->fh;
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(fh);
+ __entry->offset = dreq->io_start;
+ __entry->count = dreq->count;
+ __entry->error = dreq->error;
+ __entry->flags = dreq->flags;
+ ),
+
+ TP_printk(
+ "error=%zd fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%zd flags=%s",
+ __entry->error, MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle, __entry->offset,
+ __entry->count,
+ nfs_show_direct_req_flags(__entry->flags)
+ )
+);
+
+#define DEFINE_NFS_DIRECT_REQ_EVENT(name) \
+ DEFINE_EVENT(nfs_direct_req_class, name, \
+ TP_PROTO( \
+ const struct nfs_direct_req *dreq \
+ ), \
+ TP_ARGS(dreq))
+
+DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_commit_complete);
+DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_resched_write);
+DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_complete);
+DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_completion);
+DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_schedule_iovec);
+DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_reschedule_io);
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+DECLARE_EVENT_CLASS(nfs_local_dio_class,
+ TP_PROTO(
+ const struct inode *inode,
+ loff_t offset,
+ ssize_t count,
+ const struct nfs_local_dio *local_dio
+ ),
+ TP_ARGS(inode, offset, count, local_dio),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, fileid)
+ __field(u32, fhandle)
+ __field(loff_t, offset)
+ __field(ssize_t, count)
+ __field(u32, mem_align)
+ __field(u32, offset_align)
+ __field(loff_t, start)
+ __field(ssize_t, start_len)
+ __field(loff_t, middle)
+ __field(ssize_t, middle_len)
+ __field(loff_t, end)
+ __field(ssize_t, end_len)
+ ),
+ TP_fast_assign(
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = &nfsi->fh;
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(fh);
+ __entry->offset = offset;
+ __entry->count = count;
+ __entry->mem_align = local_dio->mem_align;
+ __entry->offset_align = local_dio->offset_align;
+ __entry->start = offset;
+ __entry->start_len = local_dio->start_len;
+ __entry->middle = local_dio->middle_offset;
+ __entry->middle_len = local_dio->middle_len;
+ __entry->end = local_dio->end_offset;
+ __entry->end_len = local_dio->end_len;
+ ),
+ TP_printk("fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%zd "
+ "mem_align=%u offset_align=%u "
+ "start=%llu+%zd middle=%llu+%zd end=%llu+%zd",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle, __entry->offset, __entry->count,
+ __entry->mem_align, __entry->offset_align,
+ __entry->start, __entry->start_len,
+ __entry->middle, __entry->middle_len,
+ __entry->end, __entry->end_len)
+)
+
+#define DEFINE_NFS_LOCAL_DIO_EVENT(name) \
+DEFINE_EVENT(nfs_local_dio_class, nfs_local_dio_##name, \
+ TP_PROTO(const struct inode *inode, \
+ loff_t offset, \
+ ssize_t count, \
+ const struct nfs_local_dio *local_dio),\
+ TP_ARGS(inode, offset, count, local_dio))
+
+DEFINE_NFS_LOCAL_DIO_EVENT(read);
+DEFINE_NFS_LOCAL_DIO_EVENT(write);
+DEFINE_NFS_LOCAL_DIO_EVENT(misaligned);
+
+#endif /* CONFIG_NFS_LOCALIO */
+
TRACE_EVENT(nfs_fh_to_dentry,
TP_PROTO(
const struct super_block *sb,
@@ -1398,6 +1835,94 @@ TRACE_EVENT(nfs_fh_to_dentry,
)
);
+TRACE_EVENT(nfs_mount_assign,
+ TP_PROTO(
+ const char *option,
+ const char *value
+ ),
+
+ TP_ARGS(option, value),
+
+ TP_STRUCT__entry(
+ __string(option, option)
+ __string(value, value)
+ ),
+
+ TP_fast_assign(
+ __assign_str(option);
+ __assign_str(value);
+ ),
+
+ TP_printk("option %s=%s",
+ __get_str(option), __get_str(value)
+ )
+);
+
+TRACE_EVENT(nfs_mount_option,
+ TP_PROTO(
+ const struct fs_parameter *param
+ ),
+
+ TP_ARGS(param),
+
+ TP_STRUCT__entry(
+ __string(option, param->key)
+ ),
+
+ TP_fast_assign(
+ __assign_str(option);
+ ),
+
+ TP_printk("option %s", __get_str(option))
+);
+
+TRACE_EVENT(nfs_mount_path,
+ TP_PROTO(
+ const char *path
+ ),
+
+ TP_ARGS(path),
+
+ TP_STRUCT__entry(
+ __string(path, path)
+ ),
+
+ TP_fast_assign(
+ __assign_str(path);
+ ),
+
+ TP_printk("path='%s'", __get_str(path))
+);
+
+TRACE_EVENT(nfs_local_open_fh,
+ TP_PROTO(
+ const struct nfs_fh *fh,
+ fmode_t fmode,
+ int error
+ ),
+
+ TP_ARGS(fh, fmode, error),
+
+ TP_STRUCT__entry(
+ __field(int, error)
+ __field(u32, fhandle)
+ __field(unsigned int, fmode)
+ ),
+
+ TP_fast_assign(
+ __entry->error = error;
+ __entry->fhandle = nfs_fhandle_hash(fh);
+ __entry->fmode = (__force unsigned int)fmode;
+ ),
+
+ TP_printk(
+ "fhandle=0x%08x mode=%s result=%d",
+ __entry->fhandle,
+ show_fs_fmode_flags(__entry->fmode),
+ __entry->error
+ )
+);
+
DECLARE_EVENT_CLASS(nfs_xdr_event,
TP_PROTO(
const struct xdr_stream *xdr,
@@ -1427,9 +1952,8 @@ DECLARE_EVENT_CLASS(nfs_xdr_event,
__entry->xid = be32_to_cpu(rqstp->rq_xid);
__entry->version = task->tk_client->cl_vers;
__entry->error = error;
- __assign_str(program,
- task->tk_client->cl_program->name);
- __assign_str(procedure, task->tk_msg.rpc_proc->p_name);
+ __assign_str(program);
+ __assign_str(procedure);
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ad7f83dc9a2d..6e69ce43a13f 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -21,16 +21,54 @@
#include <linux/nfs_page.h>
#include <linux/nfs_mount.h>
#include <linux/export.h>
+#include <linux/filelock.h>
#include "internal.h"
#include "pnfs.h"
#include "nfstrace.h"
+#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
static struct kmem_cache *nfs_page_cachep;
static const struct rpc_call_ops nfs_pgio_common_ops;
+struct nfs_page_iter_page {
+ const struct nfs_page *req;
+ size_t count;
+};
+
+static void nfs_page_iter_page_init(struct nfs_page_iter_page *i,
+ const struct nfs_page *req)
+{
+ i->req = req;
+ i->count = 0;
+}
+
+static void nfs_page_iter_page_advance(struct nfs_page_iter_page *i, size_t sz)
+{
+ const struct nfs_page *req = i->req;
+ size_t tmp = i->count + sz;
+
+ i->count = (tmp < req->wb_bytes) ? tmp : req->wb_bytes;
+}
+
+static struct page *nfs_page_iter_page_get(struct nfs_page_iter_page *i)
+{
+ const struct nfs_page *req = i->req;
+ struct page *page;
+
+ if (i->count != req->wb_bytes) {
+ size_t base = i->count + req->wb_pgbase;
+ size_t len = PAGE_SIZE - offset_in_page(base);
+
+ page = nfs_page_to_page(req, base);
+ nfs_page_iter_page_advance(i, len);
+ return page;
+ }
+ return NULL;
+}
+
static struct nfs_pgio_mirror *
nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
{
@@ -68,6 +106,7 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
hdr->good_bytes = mirror->pg_count;
hdr->io_completion = desc->pg_io_completion;
hdr->dreq = desc->pg_dreq;
+ nfs_netfs_set_pgio_header(hdr, desc);
hdr->release = release;
hdr->completion_ops = desc->pg_completion_ops;
if (hdr->completion_ops->init_hdr)
@@ -90,10 +129,10 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
}
}
-static inline struct nfs_page *
-nfs_page_alloc(void)
+static inline struct nfs_page *nfs_page_alloc(void)
{
- struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
+ struct nfs_page *p =
+ kmem_cache_zalloc(nfs_page_cachep, nfs_io_gfp_mask());
if (p)
INIT_LIST_HEAD(&p->wb_list);
return p;
@@ -149,102 +188,6 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
/*
- * nfs_page_lock_head_request - page lock the head of the page group
- * @req: any member of the page group
- */
-struct nfs_page *
-nfs_page_group_lock_head(struct nfs_page *req)
-{
- struct nfs_page *head = req->wb_head;
-
- while (!nfs_lock_request(head)) {
- int ret = nfs_wait_on_request(head);
- if (ret < 0)
- return ERR_PTR(ret);
- }
- if (head != req)
- kref_get(&head->wb_kref);
- return head;
-}
-
-/*
- * nfs_unroll_locks - unlock all newly locked reqs and wait on @req
- * @head: head request of page group, must be holding head lock
- * @req: request that couldn't lock and needs to wait on the req bit lock
- *
- * This is a helper function for nfs_lock_and_join_requests
- * returns 0 on success, < 0 on error.
- */
-static void
-nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
-{
- struct nfs_page *tmp;
-
- /* relinquish all the locks successfully grabbed this run */
- for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
- if (!kref_read(&tmp->wb_kref))
- continue;
- nfs_unlock_and_release_request(tmp);
- }
-}
-
-/*
- * nfs_page_group_lock_subreq - try to lock a subrequest
- * @head: head request of page group
- * @subreq: request to lock
- *
- * This is a helper function for nfs_lock_and_join_requests which
- * must be called with the head request and page group both locked.
- * On error, it returns with the page group unlocked.
- */
-static int
-nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
-{
- int ret;
-
- if (!kref_get_unless_zero(&subreq->wb_kref))
- return 0;
- while (!nfs_lock_request(subreq)) {
- nfs_page_group_unlock(head);
- ret = nfs_wait_on_request(subreq);
- if (!ret)
- ret = nfs_page_group_lock(head);
- if (ret < 0) {
- nfs_unroll_locks(head, subreq);
- nfs_release_request(subreq);
- return ret;
- }
- }
- return 0;
-}
-
-/*
- * nfs_page_group_lock_subrequests - try to lock the subrequests
- * @head: head request of page group
- *
- * This is a helper function for nfs_lock_and_join_requests which
- * must be called with the head request locked.
- */
-int nfs_page_group_lock_subrequests(struct nfs_page *head)
-{
- struct nfs_page *subreq;
- int ret;
-
- ret = nfs_page_group_lock(head);
- if (ret < 0)
- return ret;
- /* lock each request in the page group */
- for (subreq = head->wb_this_page; subreq != head;
- subreq = subreq->wb_this_page) {
- ret = nfs_page_group_lock_subreq(head, subreq);
- if (ret < 0)
- return ret;
- }
- nfs_page_group_unlock(head);
- return 0;
-}
-
-/*
* nfs_page_set_headlock - set the request PG_HEADLOCK
* @req: request that is to be locked
*
@@ -310,13 +253,14 @@ nfs_page_group_unlock(struct nfs_page *req)
nfs_page_clear_headlock(req);
}
-/*
- * nfs_page_group_sync_on_bit_locked
+/**
+ * nfs_page_group_sync_on_bit_locked - Test if all requests have @bit set
+ * @req: request in page group
+ * @bit: PG_* bit that is used to sync page group
*
* must be called with page group lock held
*/
-static bool
-nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
+bool nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
{
struct nfs_page *head = req->wb_head;
struct nfs_page *tmp;
@@ -390,7 +334,7 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
* has extra ref from the write/commit path to handle handoff
* between write and commit lists. */
if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
- inode = page_file_mapping(req->wb_page)->host;
+ inode = nfs_page_to_inode(req);
set_bit(PG_INODE_REF, &req->wb_flags);
kref_get(&req->wb_kref);
atomic_long_inc(&NFS_I(inode)->nrequests);
@@ -430,10 +374,9 @@ out:
nfs_release_request(head);
}
-static struct nfs_page *
-__nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
- unsigned int pgbase, unsigned int offset,
- unsigned int count)
+static struct nfs_page *nfs_page_create(struct nfs_lock_context *l_ctx,
+ unsigned int pgbase, pgoff_t index,
+ unsigned int offset, unsigned int count)
{
struct nfs_page *req;
struct nfs_open_context *ctx = l_ctx->open_context;
@@ -452,42 +395,90 @@ __nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
/* Initialize the request struct. Initially, we assume a
* long write-back delay. This will be adjusted in
* update_nfs_request below if the region is not locked. */
- req->wb_page = page;
- if (page) {
- req->wb_index = page_index(page);
- get_page(page);
- }
- req->wb_offset = offset;
- req->wb_pgbase = pgbase;
- req->wb_bytes = count;
+ req->wb_pgbase = pgbase;
+ req->wb_index = index;
+ req->wb_offset = offset;
+ req->wb_bytes = count;
kref_init(&req->wb_kref);
req->wb_nio = 0;
return req;
}
+static void nfs_page_assign_folio(struct nfs_page *req, struct folio *folio)
+{
+ if (folio != NULL) {
+ req->wb_folio = folio;
+ folio_get(folio);
+ set_bit(PG_FOLIO, &req->wb_flags);
+ }
+}
+
+static void nfs_page_assign_page(struct nfs_page *req, struct page *page)
+{
+ if (page != NULL) {
+ req->wb_page = page;
+ get_page(page);
+ }
+}
+
/**
- * nfs_create_request - Create an NFS read/write request.
+ * nfs_page_create_from_page - Create an NFS read/write request.
* @ctx: open context to use
* @page: page to write
- * @offset: starting offset within the page for the write
+ * @pgbase: starting offset within the page for the write
+ * @offset: file offset for the write
* @count: number of bytes to read/write
*
* The page must be locked by the caller. This makes sure we never
* create two different requests for the same page.
* User should ensure it is safe to sleep in this function.
*/
-struct nfs_page *
-nfs_create_request(struct nfs_open_context *ctx, struct page *page,
- unsigned int offset, unsigned int count)
+struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx,
+ struct page *page,
+ unsigned int pgbase, loff_t offset,
+ unsigned int count)
{
struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
struct nfs_page *ret;
if (IS_ERR(l_ctx))
return ERR_CAST(l_ctx);
- ret = __nfs_create_request(l_ctx, page, offset, offset, count);
- if (!IS_ERR(ret))
+ ret = nfs_page_create(l_ctx, pgbase, offset >> PAGE_SHIFT,
+ offset_in_page(offset), count);
+ if (!IS_ERR(ret)) {
+ nfs_page_assign_page(ret, page);
nfs_page_group_init(ret, NULL);
+ }
+ nfs_put_lock_context(l_ctx);
+ return ret;
+}
+
+/**
+ * nfs_page_create_from_folio - Create an NFS read/write request.
+ * @ctx: open context to use
+ * @folio: folio to write
+ * @offset: starting offset within the folio for the write
+ * @count: number of bytes to read/write
+ *
+ * The page must be locked by the caller. This makes sure we never
+ * create two different requests for the same page.
+ * User should ensure it is safe to sleep in this function.
+ */
+struct nfs_page *nfs_page_create_from_folio(struct nfs_open_context *ctx,
+ struct folio *folio,
+ unsigned int offset,
+ unsigned int count)
+{
+ struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
+ struct nfs_page *ret;
+
+ if (IS_ERR(l_ctx))
+ return ERR_CAST(l_ctx);
+ ret = nfs_page_create(l_ctx, offset, folio->index, offset, count);
+ if (!IS_ERR(ret)) {
+ nfs_page_assign_folio(ret, folio);
+ nfs_page_group_init(ret, NULL);
+ }
nfs_put_lock_context(l_ctx);
return ret;
}
@@ -500,10 +491,16 @@ nfs_create_subreq(struct nfs_page *req,
{
struct nfs_page *last;
struct nfs_page *ret;
+ struct folio *folio = nfs_page_to_folio(req);
+ struct page *page = nfs_page_to_page(req, pgbase);
- ret = __nfs_create_request(req->wb_lock_context, req->wb_page,
- pgbase, offset, count);
+ ret = nfs_page_create(req->wb_lock_context, pgbase, req->wb_index,
+ offset, count);
if (!IS_ERR(ret)) {
+ if (folio)
+ nfs_page_assign_folio(ret, folio);
+ else
+ nfs_page_assign_page(ret, page);
/* find the last request */
for (last = req->wb_head;
last->wb_this_page != req->wb_head;
@@ -511,7 +508,6 @@ nfs_create_subreq(struct nfs_page *req,
;
nfs_lock_request(ret);
- ret->wb_index = req->wb_index;
nfs_page_group_init(ret, last);
ret->wb_nio = req->wb_nio;
}
@@ -550,11 +546,16 @@ void nfs_unlock_and_release_request(struct nfs_page *req)
*/
static void nfs_clear_request(struct nfs_page *req)
{
+ struct folio *folio = nfs_page_to_folio(req);
struct page *page = req->wb_page;
struct nfs_lock_context *l_ctx = req->wb_lock_context;
struct nfs_open_context *ctx;
- if (page != NULL) {
+ if (folio != NULL) {
+ folio_put(folio);
+ req->wb_folio = NULL;
+ clear_bit(PG_FOLIO, &req->wb_flags);
+ } else if (page != NULL) {
put_page(page);
req->wb_page = NULL;
}
@@ -598,25 +599,6 @@ void nfs_release_request(struct nfs_page *req)
}
EXPORT_SYMBOL_GPL(nfs_release_request);
-/**
- * nfs_wait_on_request - Wait for a request to complete.
- * @req: request to wait upon.
- *
- * Interruptible by fatal signals only.
- * The user is responsible for holding a count on the request.
- */
-int
-nfs_wait_on_request(struct nfs_page *req)
-{
- if (!test_bit(PG_BUSY, &req->wb_flags))
- return 0;
- set_bit(PG_CONTENDED2, &req->wb_flags);
- smp_mb__after_atomic();
- return wait_on_bit_io(&req->wb_flags, PG_BUSY,
- TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL_GPL(nfs_wait_on_request);
-
/*
* nfs_generic_pg_test - determine if requests can be coalesced
* @desc: pointer to descriptor
@@ -692,13 +674,14 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
/**
* nfs_pgio_rpcsetup - Set up arguments for a pageio call
* @hdr: The pageio hdr
+ * @pgbase: base
* @count: Number of bytes to read
* @how: How to commit data (writes only)
* @cinfo: Commit information for the call (writes only)
*/
-static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
- unsigned int count,
- int how, struct nfs_commit_info *cinfo)
+static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, unsigned int pgbase,
+ unsigned int count, int how,
+ struct nfs_commit_info *cinfo)
{
struct nfs_page *req = hdr->req;
@@ -709,7 +692,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
hdr->args.offset = req_offset(req);
/* pnfs_set_layoutcommit needs this */
hdr->mds_offset = hdr->args.offset;
- hdr->args.pgbase = req->wb_pgbase;
+ hdr->args.pgbase = pgbase;
hdr->args.pages = hdr->page_array.pagevec;
hdr->args.count = count;
hdr->args.context = get_nfs_open_context(nfs_req_openctx(req));
@@ -749,7 +732,8 @@ static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
- const struct rpc_call_ops *call_ops, int how, int flags)
+ const struct rpc_call_ops *call_ops, int how, int flags,
+ struct nfsd_file *localio)
{
struct rpc_task *task;
struct rpc_message msg = {
@@ -767,6 +751,9 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
.flags = RPC_TASK_ASYNC | flags,
};
+ if (nfs_server_capable(hdr->inode, NFS_CAP_MOVEABLE))
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
dprintk("NFS: initiated pgio call "
@@ -776,6 +763,10 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
hdr->args.count,
(unsigned long long)hdr->args.offset);
+ if (localio)
+ return nfs_local_doio(NFS_SERVER(hdr->inode)->nfs_client,
+ localio, hdr, call_ops);
+
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -843,6 +834,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_lseg = NULL;
desc->pg_io_completion = NULL;
desc->pg_dreq = NULL;
+ nfs_netfs_reset_pageio_descriptor(desc);
desc->pg_bsize = bsize;
desc->pg_mirror_count = 1;
@@ -892,9 +884,10 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
struct nfs_commit_info cinfo;
struct nfs_page_array *pg_array = &hdr->page_array;
unsigned int pagecount, pageused;
- gfp_t gfp_flags = GFP_KERNEL;
+ unsigned int pg_base = offset_in_page(mirror->pg_base);
+ gfp_t gfp_flags = nfs_io_gfp_mask();
- pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
+ pagecount = nfs_page_array_len(pg_base, mirror->pg_count);
pg_array->npages = pagecount;
if (pagecount <= ARRAY_SIZE(pg_array->page_array))
@@ -914,16 +907,26 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
last_page = NULL;
pageused = 0;
while (!list_empty(head)) {
+ struct nfs_page_iter_page i;
+ struct page *page;
+
req = nfs_list_entry(head->next);
nfs_list_move_request(req, &hdr->pages);
- if (!last_page || last_page != req->wb_page) {
- pageused++;
- if (pageused > pagecount)
- break;
- *pages++ = last_page = req->wb_page;
+ if (req->wb_pgbase == 0)
+ last_page = NULL;
+
+ nfs_page_iter_page_init(&i, req);
+ while ((page = nfs_page_iter_page_get(&i)) != NULL) {
+ if (last_page != page) {
+ pageused++;
+ if (pageused > pagecount)
+ goto full;
+ *pages++ = last_page = page;
+ }
}
}
+full:
if (WARN_ON_ONCE(pageused != pagecount)) {
nfs_pgio_error(hdr);
desc->pg_error = -EINVAL;
@@ -935,7 +938,8 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
/* Set up the argument struct */
- nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo);
+ nfs_pgio_rpcsetup(hdr, pg_base, mirror->pg_count, desc->pg_ioflags,
+ &cinfo);
desc->pg_rpc_callops = &nfs_pgio_common_ops;
return 0;
}
@@ -955,6 +959,13 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
ret = nfs_generic_pgio(desc, hdr);
if (ret == 0) {
+ struct nfs_client *clp = NFS_SERVER(hdr->inode)->nfs_client;
+
+ struct nfsd_file *localio =
+ nfs_local_open_fh(clp, hdr->cred, hdr->args.fh,
+ &hdr->args.context->nfl,
+ hdr->args.context->mode);
+
if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion)
task_flags = RPC_TASK_MOVEABLE;
ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
@@ -963,7 +974,8 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
NFS_PROTO(hdr->inode),
desc->pg_rpc_callops,
desc->pg_ioflags,
- RPC_TASK_CRED_NOREF | task_flags);
+ RPC_TASK_CRED_NOREF | task_flags,
+ localio);
}
return ret;
}
@@ -979,7 +991,7 @@ nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
desc->pg_mirrors_dynamic = NULL;
if (mirror_count == 1)
return desc->pg_mirrors_static;
- ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_KERNEL);
+ ret = kmalloc_array(mirror_count, sizeof(*ret), nfs_io_gfp_mask());
if (ret != NULL) {
for (i = 0; i < mirror_count; i++)
nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
@@ -1031,6 +1043,24 @@ static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
return l1->lockowner == l2->lockowner;
}
+static bool nfs_page_is_contiguous(const struct nfs_page *prev,
+ const struct nfs_page *req)
+{
+ size_t prev_end = prev->wb_pgbase + prev->wb_bytes;
+
+ if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
+ return false;
+ if (req->wb_pgbase == 0)
+ return prev_end == nfs_page_max_length(prev);
+ if (req->wb_pgbase == prev_end) {
+ struct folio *folio = nfs_page_to_folio(req);
+ if (folio)
+ return folio == nfs_page_to_folio(prev);
+ return req->wb_page == prev->wb_page;
+ }
+ return false;
+}
+
/**
* nfs_coalesce_size - test two requests for compatibility
* @prev: pointer to nfs_page
@@ -1052,23 +1082,15 @@ static unsigned int nfs_coalesce_size(struct nfs_page *prev,
if (prev) {
if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev)))
return 0;
- flctx = d_inode(nfs_req_openctx(req)->dentry)->i_flctx;
+ flctx = locks_inode_context(d_inode(nfs_req_openctx(req)->dentry));
if (flctx != NULL &&
!(list_empty_careful(&flctx->flc_posix) &&
list_empty_careful(&flctx->flc_flock)) &&
!nfs_match_lock_context(req->wb_lock_context,
prev->wb_lock_context))
return 0;
- if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
+ if (!nfs_page_is_contiguous(prev, req))
return 0;
- if (req->wb_page == prev->wb_page) {
- if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
- return 0;
- } else {
- if (req->wb_pgbase != 0 ||
- prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
- return 0;
- }
}
return pgio->pg_ops->pg_test(pgio, prev, req);
}
@@ -1218,6 +1240,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
do {
list_splice_init(&mirror->pg_list, &head);
+ mirror->pg_recoalesce = 0;
while (!list_empty(&head)) {
struct nfs_page *req;
@@ -1356,6 +1379,7 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
desc->pg_io_completion = hdr->io_completion;
desc->pg_dreq = hdr->dreq;
+ nfs_netfs_set_pageio_descriptor(desc, hdr);
list_splice_init(&hdr->pages, &pages);
while (!list_empty(&pages)) {
struct nfs_page *req = nfs_list_entry(pages.next);
@@ -1407,16 +1431,26 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
{
struct nfs_pgio_mirror *mirror;
struct nfs_page *prev;
+ struct folio *folio;
u32 midx;
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
mirror = nfs_pgio_get_mirror(desc, midx);
if (!list_empty(&mirror->pg_list)) {
prev = nfs_list_entry(mirror->pg_list.prev);
- if (index != prev->wb_index + 1) {
- nfs_pageio_complete(desc);
- break;
- }
+ folio = nfs_page_to_folio(prev);
+ if (folio) {
+ if (index == folio_next_index(folio))
+ continue;
+ } else if (index == prev->wb_index + 1)
+ continue;
+ /*
+ * We will submit more requests after these. Indicate
+ * this to the underlying layers.
+ */
+ desc->pg_moreio = 1;
+ nfs_pageio_complete(desc);
+ break;
}
}
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 7c9090a28e5c..f157d43d1312 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -61,6 +61,7 @@ static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
u32 seq);
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
struct list_head *tmp_list);
+static int pnfs_layout_return_on_reboot(struct pnfs_layout_hdr *lo);
/* Return the registered pnfs layout driver module matching given id */
static struct pnfs_layoutdriver_type *
@@ -92,6 +93,17 @@ find_pnfs_driver(u32 id)
return local;
}
+const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id)
+{
+ return find_pnfs_driver(id);
+}
+
+void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld)
+{
+ if (ld)
+ module_put(ld->owner);
+}
+
void
unset_pnfs_layoutdriver(struct nfs_server *nfss)
{
@@ -294,7 +306,6 @@ void
pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
{
struct inode *inode;
- unsigned long i_state;
if (!lo)
return;
@@ -305,12 +316,11 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
if (!list_empty(&lo->plh_segs))
WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
pnfs_detach_layout_hdr(lo);
- i_state = inode->i_state;
+ /* Notify pnfs_destroy_layout_final() that we're done */
+ if (inode_state_read(inode) & (I_FREEING | I_CLEAR))
+ wake_up_var_locked(lo, &inode->i_lock);
spin_unlock(&inode->i_lock);
pnfs_free_layout_hdr(lo);
- /* Notify pnfs_destroy_layout_final() that we're done */
- if (i_state & (I_FREEING | I_CLEAR))
- wake_up_var(lo);
}
}
@@ -458,12 +468,25 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
pnfs_clear_lseg_state(lseg, lseg_list);
pnfs_clear_layoutreturn_info(lo);
pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
+ set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags);
if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
!test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
pnfs_clear_layoutreturn_waitbit(lo);
return !list_empty(&lo->plh_segs);
}
+static int pnfs_mark_layout_stateid_return(struct pnfs_layout_hdr *lo,
+ struct list_head *lseg_list,
+ enum pnfs_iomode iomode, u32 seq)
+{
+ struct pnfs_layout_range range = {
+ .iomode = iomode,
+ .length = NFS4_MAX_UINT64,
+ };
+
+ return pnfs_mark_matching_lsegs_return(lo, lseg_list, &range, seq);
+}
+
static int
pnfs_iomode_to_fail_bit(u32 iomode)
{
@@ -499,7 +522,7 @@ pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
spin_lock(&inode->i_lock);
pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
- pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
+ pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head);
dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
@@ -698,6 +721,7 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
u32 seq)
{
struct pnfs_layout_segment *lseg, *next;
+ struct nfs_server *server = NFS_SERVER(lo->plh_inode);
int remaining = 0;
dprintk("%s:Begin lo %p\n", __func__, lo);
@@ -710,13 +734,23 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
"offset %llu length %llu\n", __func__,
lseg, lseg->pls_range.iomode, lseg->pls_seq,
lseg->pls_range.offset, lseg->pls_range.length);
- if (!mark_lseg_invalid(lseg, tmp_list))
- remaining++;
+ if (mark_lseg_invalid(lseg, tmp_list))
+ continue;
+ remaining++;
+ pnfs_lseg_cancel_io(server, lseg);
}
dprintk("%s:Return %i\n", __func__, remaining);
return remaining;
}
+static void pnfs_reset_return_info(struct pnfs_layout_hdr *lo)
+{
+ struct pnfs_layout_segment *lseg;
+
+ list_for_each_entry(lseg, &lo->plh_return_segs, pls_list)
+ pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
+}
+
static void
pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
struct list_head *free_me,
@@ -773,23 +807,17 @@ void pnfs_destroy_layout(struct nfs_inode *nfsi)
}
EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
-static bool pnfs_layout_removed(struct nfs_inode *nfsi,
- struct pnfs_layout_hdr *lo)
-{
- bool ret;
-
- spin_lock(&nfsi->vfs_inode.i_lock);
- ret = nfsi->layout != lo;
- spin_unlock(&nfsi->vfs_inode.i_lock);
- return ret;
-}
-
void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
{
struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi);
+ struct inode *inode = &nfsi->vfs_inode;
- if (lo)
- wait_var_event(lo, pnfs_layout_removed(nfsi, lo));
+ if (lo) {
+ spin_lock(&inode->i_lock);
+ wait_var_event_spinlock(lo, nfsi->layout != lo,
+ &inode->i_lock);
+ spin_unlock(&inode->i_lock);
+ }
}
static bool
@@ -831,8 +859,6 @@ pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
break;
inode = pnfs_grab_inode_layout_hdr(lo);
if (inode != NULL) {
- if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags))
- list_del_rcu(&lo->plh_layouts);
if (pnfs_layout_add_bulk_destroy_list(inode,
layout_list))
continue;
@@ -853,7 +879,7 @@ pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
static int
pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
- bool is_bulk_recall)
+ enum pnfs_layout_destroy_mode mode)
{
struct pnfs_layout_hdr *lo;
struct inode *inode;
@@ -871,8 +897,11 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
spin_lock(&inode->i_lock);
list_del_init(&lo->plh_bulk_destroy);
- if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
- if (is_bulk_recall)
+ if (mode == PNFS_LAYOUT_FILE_BULK_RETURN) {
+ pnfs_mark_layout_stateid_return(lo, &lseg_list,
+ IOMODE_ANY, 0);
+ } else if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
+ if (mode == PNFS_LAYOUT_BULK_RETURN)
set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
ret = -EAGAIN;
}
@@ -886,10 +915,8 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
return ret;
}
-int
-pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
- struct nfs_fsid *fsid,
- bool is_recall)
+int pnfs_layout_destroy_byfsid(struct nfs_client *clp, struct nfs_fsid *fsid,
+ enum pnfs_layout_destroy_mode mode)
{
struct nfs_server *server;
LIST_HEAD(layout_list);
@@ -908,33 +935,40 @@ restart:
rcu_read_unlock();
spin_unlock(&clp->cl_lock);
- if (list_empty(&layout_list))
- return 0;
- return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
+ return pnfs_layout_free_bulk_destroy_list(&layout_list, mode);
}
-int
-pnfs_destroy_layouts_byclid(struct nfs_client *clp,
- bool is_recall)
+static void pnfs_layout_build_destroy_list_byclient(struct nfs_client *clp,
+ struct list_head *list)
{
struct nfs_server *server;
- LIST_HEAD(layout_list);
spin_lock(&clp->cl_lock);
rcu_read_lock();
restart:
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
- if (pnfs_layout_bulk_destroy_byserver_locked(clp,
- server,
- &layout_list) != 0)
+ if (pnfs_layout_bulk_destroy_byserver_locked(clp, server,
+ list) != 0)
goto restart;
}
rcu_read_unlock();
spin_unlock(&clp->cl_lock);
+}
- if (list_empty(&layout_list))
- return 0;
- return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
+static int pnfs_layout_do_destroy_byclid(struct nfs_client *clp,
+ struct list_head *list,
+ enum pnfs_layout_destroy_mode mode)
+{
+ pnfs_layout_build_destroy_list_byclient(clp, list);
+ return pnfs_layout_free_bulk_destroy_list(list, mode);
+}
+
+int pnfs_layout_destroy_byclid(struct nfs_client *clp,
+ enum pnfs_layout_destroy_mode mode)
+{
+ LIST_HEAD(layout_list);
+
+ return pnfs_layout_do_destroy_byclid(clp, &layout_list, mode);
}
/*
@@ -947,7 +981,68 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
nfs4_deviceid_mark_client_invalid(clp);
nfs4_deviceid_purge_client(clp);
- pnfs_destroy_layouts_byclid(clp, false);
+ pnfs_layout_destroy_byclid(clp, PNFS_LAYOUT_INVALIDATE);
+}
+
+static void pnfs_layout_build_recover_list_byclient(struct nfs_client *clp,
+ struct list_head *list)
+{
+ struct nfs_server *server;
+
+ spin_lock(&clp->cl_lock);
+ rcu_read_lock();
+restart:
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ if (!(server->caps & NFS_CAP_REBOOT_LAYOUTRETURN))
+ continue;
+ if (pnfs_layout_bulk_destroy_byserver_locked(clp, server,
+ list) != 0)
+ goto restart;
+ }
+ rcu_read_unlock();
+ spin_unlock(&clp->cl_lock);
+}
+
+static int pnfs_layout_bulk_list_reboot(struct list_head *list)
+{
+ struct pnfs_layout_hdr *lo;
+ struct nfs_server *server;
+ int ret;
+
+ list_for_each_entry(lo, list, plh_bulk_destroy) {
+ server = NFS_SERVER(lo->plh_inode);
+ ret = pnfs_layout_return_on_reboot(lo);
+ switch (ret) {
+ case 0:
+ continue;
+ case -NFS4ERR_BAD_STATEID:
+ server->caps &= ~NFS_CAP_REBOOT_LAYOUTRETURN;
+ break;
+ case -NFS4ERR_NO_GRACE:
+ break;
+ default:
+ goto err;
+ }
+ break;
+ }
+ return 0;
+err:
+ return ret;
+}
+
+int pnfs_layout_handle_reboot(struct nfs_client *clp)
+{
+ LIST_HEAD(list);
+ int ret = 0, ret2;
+
+ pnfs_layout_build_recover_list_byclient(clp, &list);
+ if (!list_empty(&list))
+ ret = pnfs_layout_bulk_list_reboot(&list);
+ ret2 = pnfs_layout_do_destroy_byclid(clp, &list,
+ PNFS_LAYOUT_INVALIDATE);
+ if (!ret)
+ ret = ret2;
+ return (ret == 0) ? 0 : -EAGAIN;
}
static void
@@ -1148,6 +1243,33 @@ static void pnfs_clear_layoutcommit(struct inode *inode,
}
}
+static void
+pnfs_layoutreturn_retry_later_locked(struct pnfs_layout_hdr *lo,
+ const nfs4_stateid *arg_stateid,
+ const struct pnfs_layout_range *range,
+ struct list_head *freeme)
+{
+ if (pnfs_layout_is_valid(lo) &&
+ nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
+ pnfs_reset_return_info(lo);
+ else
+ pnfs_mark_layout_stateid_invalid(lo, freeme);
+ pnfs_clear_layoutreturn_waitbit(lo);
+}
+
+void pnfs_layoutreturn_retry_later(struct pnfs_layout_hdr *lo,
+ const nfs4_stateid *arg_stateid,
+ const struct pnfs_layout_range *range)
+{
+ struct inode *inode = lo->plh_inode;
+ LIST_HEAD(freeme);
+
+ spin_lock(&inode->i_lock);
+ pnfs_layoutreturn_retry_later_locked(lo, arg_stateid, range, &freeme);
+ spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&freeme);
+}
+
void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
const nfs4_stateid *arg_stateid,
const struct pnfs_layout_range *range,
@@ -1157,15 +1279,15 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
LIST_HEAD(freeme);
spin_lock(&inode->i_lock);
- if (!pnfs_layout_is_valid(lo) ||
- !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
+ if (!nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
goto out_unlock;
- if (stateid) {
+ if (stateid && pnfs_layout_is_valid(lo)) {
u32 seq = be32_to_cpu(arg_stateid->seqid);
pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
pnfs_free_returned_lsegs(lo, &freeme, range, seq);
pnfs_set_layout_stateid(lo, stateid, NULL, true);
+ pnfs_reset_return_info(lo);
} else
pnfs_mark_layout_stateid_invalid(lo, &freeme);
out_unlock:
@@ -1182,7 +1304,7 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
enum pnfs_iomode *iomode)
{
/* Serialise LAYOUTGET/LAYOUTRETURN */
- if (atomic_read(&lo->plh_outstanding) != 0)
+ if (atomic_read(&lo->plh_outstanding) != 0 && lo->plh_return_seq == 0)
return false;
if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
return false;
@@ -1224,7 +1346,7 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo,
const nfs4_stateid *stateid,
const struct cred **pcred,
enum pnfs_iomode iomode,
- bool sync)
+ unsigned int flags)
{
struct inode *ino = lo->plh_inode;
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
@@ -1233,7 +1355,7 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo,
int status = 0;
*pcred = NULL;
- lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
+ lrp = kzalloc(sizeof(*lrp), nfs_io_gfp_mask());
if (unlikely(lrp == NULL)) {
status = -ENOMEM;
spin_lock(&ino->i_lock);
@@ -1251,33 +1373,21 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo,
if (ld->prepare_layoutreturn)
ld->prepare_layoutreturn(&lrp->args);
- status = nfs4_proc_layoutreturn(lrp, sync);
+ status = nfs4_proc_layoutreturn(lrp, flags);
out:
dprintk("<-- %s status: %d\n", __func__, status);
return status;
}
-static bool
-pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo,
- enum pnfs_iomode iomode,
- u32 seq)
-{
- struct pnfs_layout_range recall_range = {
- .length = NFS4_MAX_UINT64,
- .iomode = iomode,
- };
- return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
- &recall_range, seq) != -EBUSY;
-}
-
/* Return true if layoutreturn is needed */
static bool
pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
{
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
return false;
- return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode,
- lo->plh_return_seq);
+ return pnfs_mark_layout_stateid_return(lo, &lo->plh_return_segs,
+ lo->plh_return_iomode,
+ lo->plh_return_seq) != EBUSY;
}
static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
@@ -1297,7 +1407,8 @@ static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
spin_unlock(&inode->i_lock);
if (send) {
/* Send an async layoutreturn so we dont deadlock */
- pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
+ pnfs_send_layoutreturn(lo, &stateid, &cred, iomode,
+ PNFS_FL_LAYOUTRETURN_ASYNC);
}
} else
spin_unlock(&inode->i_lock);
@@ -1364,7 +1475,8 @@ _pnfs_return_layout(struct inode *ino)
send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, NULL);
spin_unlock(&ino->i_lock);
if (send)
- status = pnfs_send_layoutreturn(lo, &stateid, &cred, IOMODE_ANY, true);
+ status = pnfs_send_layoutreturn(lo, &stateid, &cred, IOMODE_ANY,
+ 0);
out_wait_layoutreturn:
wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN, TASK_UNINTERRUPTIBLE);
out_put_layout_hdr:
@@ -1402,6 +1514,24 @@ pnfs_commit_and_return_layout(struct inode *inode)
return ret;
}
+static int pnfs_layout_return_on_reboot(struct pnfs_layout_hdr *lo)
+{
+ struct inode *inode = lo->plh_inode;
+ const struct cred *cred;
+
+ spin_lock(&inode->i_lock);
+ if (!pnfs_layout_is_valid(lo)) {
+ spin_unlock(&inode->i_lock);
+ return 0;
+ }
+ cred = get_cred(lo->plh_lc_cred);
+ pnfs_get_layout_hdr(lo);
+ spin_unlock(&inode->i_lock);
+
+ return pnfs_send_layoutreturn(lo, &zero_stateid, &cred, IOMODE_ANY,
+ PNFS_FL_LAYOUTRETURN_PRIVILEGED);
+}
+
bool pnfs_roc(struct inode *ino,
struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
@@ -1505,7 +1635,7 @@ out_noroc:
return true;
}
if (layoutreturn)
- pnfs_send_layoutreturn(lo, &stateid, &lc_cred, iomode, true);
+ pnfs_send_layoutreturn(lo, &stateid, &lc_cred, iomode, 0);
pnfs_put_layout_hdr(lo);
return false;
}
@@ -1527,6 +1657,18 @@ int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
/* Was there an RPC level error? If not, retry */
if (task->tk_rpc_status == 0)
break;
+ /*
+ * Is there a fatal network level error?
+ * If so release the layout, but flag the error.
+ */
+ if ((task->tk_rpc_status == -ENETDOWN ||
+ task->tk_rpc_status == -ENETUNREACH) &&
+ task->tk_flags & RPC_TASK_NETUNREACH_FATAL) {
+ *ret = 0;
+ (*respp)->lrs_present = 0;
+ retval = -EIO;
+ break;
+ }
/* If the call was not sent, let caller handle it */
if (!RPC_WAS_SENT(task))
return 0;
@@ -1555,22 +1697,24 @@ int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
}
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
- struct nfs4_layoutreturn_res *res,
- int ret)
+ struct nfs4_layoutreturn_res *res, int ret)
{
struct pnfs_layout_hdr *lo = args->layout;
struct inode *inode = args->inode;
const nfs4_stateid *res_stateid = NULL;
struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
+ LIST_HEAD(freeme);
switch (ret) {
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_NOMATCHING_LAYOUT:
spin_lock(&inode->i_lock);
- if (pnfs_layout_is_valid(lo) &&
- nfs4_stateid_match_other(&args->stateid, &lo->plh_stateid))
- pnfs_set_plh_return_info(lo, args->range.iomode, 0);
- pnfs_clear_layoutreturn_waitbit(lo);
+ pnfs_layoutreturn_retry_later_locked(lo, &args->stateid,
+ &args->range, &freeme);
spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&freeme);
break;
case 0:
if (res->lrs_present)
@@ -1896,7 +2040,7 @@ static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
pnfs_layoutcommit_inode(lo->plh_inode, false);
return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
nfs_wait_bit_killable,
- TASK_KILLABLE);
+ TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
}
static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
@@ -1906,8 +2050,11 @@ static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
{
- if (atomic_dec_and_test(&lo->plh_outstanding))
- wake_up_var(&lo->plh_outstanding);
+ if (atomic_dec_and_test(&lo->plh_outstanding) &&
+ test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags)) {
+ smp_mb__after_atomic();
+ wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN);
+ }
}
static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
@@ -1964,7 +2111,9 @@ pnfs_update_layout(struct inode *ino,
struct pnfs_layout_segment *lseg = NULL;
struct nfs4_layoutget *lgp;
nfs4_stateid stateid;
- long timeout = 0;
+ struct nfs4_exception exception = {
+ .inode = ino,
+ };
unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
bool first;
@@ -1981,6 +2130,14 @@ pnfs_update_layout(struct inode *ino,
}
lookup_again:
+ if (!nfs4_valid_open_stateid(ctx->state)) {
+ trace_pnfs_update_layout(ino, pos, count,
+ iomode, lo, lseg,
+ PNFS_UPDATE_LAYOUT_INVALID_OPEN);
+ lseg = ERR_PTR(-EIO);
+ goto out;
+ }
+
lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
if (IS_ERR(lseg))
goto out;
@@ -1989,6 +2146,7 @@ lookup_again:
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
if (lo == NULL) {
spin_unlock(&ino->i_lock);
+ lseg = ERR_PTR(-ENOMEM);
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_NOMEM);
goto out;
@@ -2013,11 +2171,11 @@ lookup_again:
* If the layout segment list is empty, but there are outstanding
* layoutget calls, then they might be subject to a layoutrecall.
*/
- if ((list_empty(&lo->plh_segs) || !pnfs_layout_is_valid(lo)) &&
+ if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) &&
atomic_read(&lo->plh_outstanding) != 0) {
spin_unlock(&ino->i_lock);
- lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
- !atomic_read(&lo->plh_outstanding)));
+ lseg = ERR_PTR(wait_on_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN,
+ TASK_KILLABLE));
if (IS_ERR(lseg))
goto out_put_layout_hdr;
pnfs_put_layout_hdr(lo);
@@ -2117,6 +2275,7 @@ lookup_again:
lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags);
if (!lgp) {
+ lseg = ERR_PTR(-ENOMEM);
trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
PNFS_UPDATE_LAYOUT_NOMEM);
nfs_layoutget_end(lo);
@@ -2126,7 +2285,7 @@ lookup_again:
lgp->lo = lo;
pnfs_get_layout_hdr(lo);
- lseg = nfs4_proc_layoutget(lgp, &timeout);
+ lseg = nfs4_proc_layoutget(lgp, &exception);
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
nfs_layoutget_end(lo);
@@ -2139,6 +2298,12 @@ lookup_again:
case -ERECALLCONFLICT:
case -EAGAIN:
break;
+ case -ENODATA:
+ /* The server returned NFS4ERR_LAYOUTUNAVAILABLE */
+ pnfs_layout_set_fail_bit(
+ lo, pnfs_iomode_to_fail_bit(iomode));
+ lseg = NULL;
+ goto out_put_layout_hdr;
default:
if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
@@ -2147,6 +2312,8 @@ lookup_again:
goto out_put_layout_hdr;
}
if (lseg) {
+ if (!exception.retry)
+ goto out_put_layout_hdr;
if (first)
pnfs_clear_first_layoutget(lo);
trace_pnfs_update_layout(ino, pos, count,
@@ -2206,7 +2373,7 @@ _pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx)
struct pnfs_layout_hdr *lo;
spin_lock(&ino->i_lock);
- lo = pnfs_find_alloc_layout(ino, ctx, GFP_KERNEL);
+ lo = pnfs_find_alloc_layout(ino, ctx, nfs_io_gfp_mask());
if (!lo)
goto out_unlock;
if (!test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
@@ -2249,8 +2416,8 @@ static void _lgopen_prepare_attached(struct nfs4_opendata *data,
lo = _pnfs_grab_empty_layout(ino, ctx);
if (!lo)
return;
- lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid,
- &rng, GFP_KERNEL);
+ lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid, &rng,
+ nfs_io_gfp_mask());
if (!lgp) {
pnfs_clear_first_layoutget(lo);
nfs_layoutget_end(lo);
@@ -2275,8 +2442,8 @@ static void _lgopen_prepare_floating(struct nfs4_opendata *data,
};
struct nfs4_layoutget *lgp;
- lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid,
- &rng, GFP_KERNEL);
+ lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid, &rng,
+ nfs_io_gfp_mask());
if (!lgp)
return;
data->lgp = lgp;
@@ -2394,7 +2561,8 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
goto out_forget;
}
- if (!pnfs_layout_is_valid(lo) && !pnfs_is_first_layoutget(lo))
+ if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) &&
+ !pnfs_is_first_layoutget(lo))
goto out_forget;
if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
@@ -2463,6 +2631,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
u32 seq)
{
struct pnfs_layout_segment *lseg, *next;
+ struct nfs_server *server = NFS_SERVER(lo->plh_inode);
int remaining = 0;
dprintk("%s:Begin lo %p\n", __func__, lo);
@@ -2485,6 +2654,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
continue;
remaining++;
set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
+ pnfs_lseg_cancel_io(server, lseg);
}
if (remaining) {
@@ -2527,7 +2697,8 @@ pnfs_mark_layout_for_return(struct inode *inode,
return_now = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
spin_unlock(&inode->i_lock);
if (return_now)
- pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
+ pnfs_send_layoutreturn(lo, &stateid, &cred, iomode,
+ PNFS_FL_LAYOUTRETURN_ASYNC);
} else {
spin_unlock(&inode->i_lock);
nfs_commit_inode(inode, 0);
@@ -2607,31 +2778,45 @@ pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
return mode == 0;
}
-static int
-pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data)
+static int pnfs_layout_return_unused_byserver(struct nfs_server *server,
+ void *data)
{
const struct pnfs_layout_range *range = data;
+ const struct cred *cred;
struct pnfs_layout_hdr *lo;
struct inode *inode;
+ nfs4_stateid stateid;
+ enum pnfs_iomode iomode;
+
restart:
rcu_read_lock();
list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
- if (!pnfs_layout_can_be_returned(lo) ||
+ inode = lo->plh_inode;
+ if (!inode || !pnfs_layout_can_be_returned(lo) ||
test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
continue;
- inode = lo->plh_inode;
spin_lock(&inode->i_lock);
- if (!pnfs_should_return_unused_layout(lo, range)) {
+ if (!lo->plh_inode ||
+ !pnfs_should_return_unused_layout(lo, range)) {
spin_unlock(&inode->i_lock);
continue;
}
+ pnfs_get_layout_hdr(lo);
+ pnfs_set_plh_return_info(lo, range->iomode, 0);
+ if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
+ range, 0) != 0 ||
+ !pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode)) {
+ spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
+ pnfs_put_layout_hdr(lo);
+ cond_resched();
+ goto restart;
+ }
spin_unlock(&inode->i_lock);
- inode = pnfs_grab_inode_layout_hdr(lo);
- if (!inode)
- continue;
rcu_read_unlock();
- pnfs_mark_layout_for_return(inode, range);
- iput(inode);
+ pnfs_send_layoutreturn(lo, &stateid, &cred, iomode,
+ PNFS_FL_LAYOUTRETURN_ASYNC);
+ pnfs_put_layout_hdr(lo);
cond_resched();
goto restart;
}
@@ -2653,51 +2838,40 @@ pnfs_layout_return_unused_byclid(struct nfs_client *clp,
&range);
}
+/* Check if we have we have a valid layout but if there isn't an intersection
+ * between the request and the pgio->pg_lseg, put this pgio->pg_lseg away.
+ */
void
-pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
+pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
{
if (pgio->pg_lseg == NULL ||
- test_bit(NFS_LSEG_VALID, &pgio->pg_lseg->pls_flags))
+ (test_bit(NFS_LSEG_VALID, &pgio->pg_lseg->pls_flags) &&
+ pnfs_lseg_request_intersecting(pgio->pg_lseg, req)))
return;
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
-/*
- * Check for any intersection between the request and the pgio->pg_lseg,
- * and if none, put this pgio->pg_lseg away.
- */
-void
-pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
-{
- if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
- pnfs_put_lseg(pgio->pg_lseg);
- pgio->pg_lseg = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range);
-
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
u64 rd_size;
- pnfs_generic_pg_check_layout(pgio);
- pnfs_generic_pg_check_range(pgio, req);
+ pnfs_generic_pg_check_layout(pgio, req);
if (pgio->pg_lseg == NULL) {
if (pgio->pg_dreq == NULL)
rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
else
- rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
-
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- nfs_req_openctx(req),
- req_offset(req),
- rd_size,
- IOMODE_READ,
- false,
- GFP_KERNEL);
+ rd_size = nfs_dreq_bytes_left(pgio->pg_dreq,
+ req_offset(req));
+
+ pgio->pg_lseg =
+ pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
+ req_offset(req), rd_size,
+ IOMODE_READ, false,
+ nfs_io_gfp_mask());
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
@@ -2715,16 +2889,12 @@ void
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req, u64 wb_size)
{
- pnfs_generic_pg_check_layout(pgio);
- pnfs_generic_pg_check_range(pgio, req);
+ pnfs_generic_pg_check_layout(pgio, req);
if (pgio->pg_lseg == NULL) {
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- nfs_req_openctx(req),
- req_offset(req),
- wb_size,
- IOMODE_RW,
- false,
- GFP_KERNEL);
+ pgio->pg_lseg =
+ pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
+ req_offset(req), wb_size, IOMODE_RW,
+ false, nfs_io_gfp_mask());
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
@@ -2800,7 +2970,6 @@ int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
/* Resend all requests through the MDS */
nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
hdr->completion_ops);
- set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
return nfs_pageio_resend(&pgio, hdr);
}
EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
@@ -3163,6 +3332,7 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
struct nfs_inode *nfsi = NFS_I(inode);
loff_t end_pos;
int status;
+ bool mark_as_dirty = false;
if (!pnfs_layoutcommit_outstanding(inode))
return 0;
@@ -3176,14 +3346,14 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
status = wait_on_bit_lock_action(&nfsi->flags,
NFS_INO_LAYOUTCOMMITTING,
nfs_wait_bit_killable,
- TASK_KILLABLE);
+ TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
if (status)
goto out;
}
status = -ENOMEM;
/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
- data = kzalloc(sizeof(*data), GFP_NOFS);
+ data = kzalloc(sizeof(*data), nfs_io_gfp_mask());
if (!data)
goto clear_layoutcommitting;
@@ -3214,19 +3384,23 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
if (ld->prepare_layoutcommit) {
status = ld->prepare_layoutcommit(&data->args);
if (status) {
- put_cred(data->cred);
+ if (status != -ENOSPC)
+ put_cred(data->cred);
spin_lock(&inode->i_lock);
set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
if (end_pos > nfsi->layout->plh_lwb)
nfsi->layout->plh_lwb = end_pos;
- goto out_unlock;
+ if (status != -ENOSPC)
+ goto out_unlock;
+ spin_unlock(&inode->i_lock);
+ mark_as_dirty = true;
}
}
status = nfs4_proc_layoutcommit(data, sync);
out:
- if (status)
+ if (status || mark_as_dirty)
mark_inode_dirty_sync(inode);
dprintk("<-- %s status %d\n", __func__, status);
return status;
@@ -3250,7 +3424,7 @@ struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
{
struct nfs4_threshold *thp;
- thp = kzalloc(sizeof(*thp), GFP_NOFS);
+ thp = kzalloc(sizeof(*thp), nfs_io_gfp_mask());
if (!thp) {
dprintk("%s mdsthreshold allocation failed\n", __func__);
return NULL;
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index f4d7548d67b2..91ff877185c8 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -35,6 +35,7 @@
#include <linux/nfs_page.h>
#include <linux/workqueue.h>
+struct nfs4_exception;
struct nfs4_opendata;
enum {
@@ -59,6 +60,7 @@ struct nfs4_pnfs_ds {
struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */
char *ds_remotestr; /* comma sep list of addrs */
struct list_head ds_addrs;
+ const struct net *ds_net;
struct nfs_client *ds_clp;
refcount_t ds_count;
unsigned long ds_state;
@@ -105,6 +107,7 @@ enum {
NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */
NFS_LAYOUT_INODE_FREEING, /* The inode is being freed */
NFS_LAYOUT_HASHED, /* The layout visible */
+ NFS_LAYOUT_DRAIN,
};
enum layoutdriver_policy_flags {
@@ -116,6 +119,12 @@ enum layoutdriver_policy_flags {
PNFS_LAYOUTGET_ON_OPEN = 1 << 3,
};
+enum pnfs_layout_destroy_mode {
+ PNFS_LAYOUT_INVALIDATE = 0,
+ PNFS_LAYOUT_BULK_RETURN,
+ PNFS_LAYOUT_FILE_BULK_RETURN,
+};
+
struct nfs4_deviceid_node;
/* Per-layout driver specific registration structure */
@@ -125,7 +134,6 @@ struct pnfs_layoutdriver_type {
const char *name;
struct module *owner;
unsigned flags;
- unsigned max_deviceinfo_size;
unsigned max_layoutget_response;
int (*set_layoutdriver) (struct nfs_server *, const struct nfs_fh *);
@@ -168,6 +176,8 @@ struct pnfs_layoutdriver_type {
void (*cleanup_layoutcommit) (struct nfs4_layoutcommit_data *data);
int (*prepare_layoutcommit) (struct nfs4_layoutcommit_args *args);
int (*prepare_layoutstats) (struct nfs42_layoutstat_args *args);
+
+ void (*cancel_io)(struct pnfs_layout_segment *lseg);
};
struct pnfs_commit_ops {
@@ -189,8 +199,6 @@ struct pnfs_commit_ops {
int max);
void (*recover_commit_reqs) (struct list_head *list,
struct nfs_commit_info *cinfo);
- struct nfs_page * (*search_commit_reqs)(struct nfs_commit_info *cinfo,
- struct page *page);
};
struct pnfs_layout_hdr {
@@ -234,14 +242,22 @@ struct pnfs_devicelist {
extern int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *);
extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *);
+extern const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id);
+extern void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld);
/* nfs4proc.c */
+#define PNFS_FL_LAYOUTRETURN_ASYNC (1U << 0)
+#define PNFS_FL_LAYOUTRETURN_PRIVILEGED (1U << 1)
+
extern size_t max_response_pages(struct nfs_server *server);
extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *dev,
const struct cred *cred);
-extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout);
-extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync);
+extern struct pnfs_layout_segment *
+nfs4_proc_layoutget(struct nfs4_layoutget *lgp,
+ struct nfs4_exception *exception);
+extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp,
+ unsigned int flags);
/* pnfs.c */
void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo);
@@ -249,8 +265,7 @@ void pnfs_put_lseg(struct pnfs_layout_segment *lseg);
void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, struct nfs_fsinfo *);
void unset_pnfs_layoutdriver(struct nfs_server *);
-void pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio);
-void pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req);
+void pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio, struct nfs_page *req);
void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page *);
int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
@@ -266,11 +281,10 @@ void pnfs_free_lseg_list(struct list_head *tmp_list);
void pnfs_destroy_layout(struct nfs_inode *);
void pnfs_destroy_layout_final(struct nfs_inode *);
void pnfs_destroy_all_layouts(struct nfs_client *);
-int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
- struct nfs_fsid *fsid,
- bool is_recall);
-int pnfs_destroy_layouts_byclid(struct nfs_client *clp,
- bool is_recall);
+int pnfs_layout_destroy_byfsid(struct nfs_client *clp, struct nfs_fsid *fsid,
+ enum pnfs_layout_destroy_mode mode);
+int pnfs_layout_destroy_byclid(struct nfs_client *clp,
+ enum pnfs_layout_destroy_mode mode);
bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
struct pnfs_layout_range *dst_range,
struct inode *inode);
@@ -316,6 +330,9 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
enum pnfs_iomode iomode,
bool strict_iomode,
gfp_t gfp_flags);
+void pnfs_layoutreturn_retry_later(struct pnfs_layout_hdr *lo,
+ const nfs4_stateid *arg_stateid,
+ const struct pnfs_layout_range *range);
void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
const nfs4_stateid *arg_stateid,
const struct pnfs_layout_range *range,
@@ -337,6 +354,7 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
struct pnfs_layout_segment *lseg);
void pnfs_layout_return_unused_byclid(struct nfs_client *clp,
enum pnfs_iomode iomode);
+int pnfs_layout_handle_reboot(struct nfs_client *clp);
/* nfs4_deviceid_flags */
enum {
@@ -389,8 +407,6 @@ void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data);
void pnfs_generic_rw_release(void *data);
void pnfs_generic_recover_commit_reqs(struct list_head *dst,
struct nfs_commit_info *cinfo);
-struct nfs_page *pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo,
- struct page *page);
int pnfs_generic_commit_pagelist(struct inode *inode,
struct list_head *mds_pages,
int how,
@@ -400,7 +416,8 @@ int pnfs_generic_commit_pagelist(struct inode *inode,
int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max);
void pnfs_generic_write_commit_done(struct rpc_task *task, void *data);
void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds);
-struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs,
+struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(const struct net *net,
+ struct list_head *dsaddrs,
gfp_t gfp_flags);
void nfs4_pnfs_v3_ds_connect_unload(void);
int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
@@ -550,17 +567,6 @@ pnfs_recover_commit_reqs(struct list_head *head, struct nfs_commit_info *cinfo)
fl_cinfo->ops->recover_commit_reqs(head, cinfo);
}
-static inline struct nfs_page *
-pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo,
- struct page *page)
-{
- struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
-
- if (!fl_cinfo->ops || !fl_cinfo->ops->search_commit_reqs)
- return NULL;
- return fl_cinfo->ops->search_commit_reqs(cinfo, page);
-}
-
/* Should the pNFS client commit and return the layout upon a setattr */
static inline bool
pnfs_ld_layoutret_on_setattr(struct inode *inode)
@@ -682,6 +688,13 @@ pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page
req_offset(req), req_last);
}
+static inline void pnfs_lseg_cancel_io(struct nfs_server *server,
+ struct pnfs_layout_segment *lseg)
+{
+ if (server->pnfs_curr_ld->cancel_io)
+ server->pnfs_curr_ld->cancel_io(lseg);
+}
+
extern unsigned int layoutstats_timer;
#ifdef NFS_DEBUG
@@ -711,6 +724,11 @@ static inline void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
{
}
+static inline int pnfs_layout_handle_reboot(struct nfs_client *clp)
+{
+ return 0;
+}
+
static inline struct pnfs_layout_segment *
pnfs_get_lseg(struct pnfs_layout_segment *lseg)
{
@@ -850,13 +868,6 @@ pnfs_recover_commit_reqs(struct list_head *head, struct nfs_commit_info *cinfo)
{
}
-static inline struct nfs_page *
-pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo,
- struct page *page)
-{
- return NULL;
-}
-
static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync)
{
return 0;
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index ddbbf4fcda86..bf0f2d67e96c 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -110,9 +110,6 @@ nfs4_get_device_info(struct nfs_server *server,
* GETDEVICEINFO's maxcount
*/
max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
- if (server->pnfs_curr_ld->max_deviceinfo_size &&
- server->pnfs_curr_ld->max_deviceinfo_size < max_resp_sz)
- max_resp_sz = server->pnfs_curr_ld->max_deviceinfo_size;
max_pages = nfs_page_array_len(0, max_resp_sz);
dprintk("%s: server %p max_resp_sz %u max_pages %d\n",
__func__, server, max_resp_sz, max_pages);
@@ -154,7 +151,7 @@ nfs4_get_device_info(struct nfs_server *server,
set_bit(NFS_DEVICEID_NOCACHE, &d->flags);
out_free_pages:
- for (i = 0; i < max_pages; i++)
+ while (--i >= 0)
__free_page(pages[i]);
kfree(pages);
out_free_pdev:
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 316f68f96e57..9976cc16b689 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -16,6 +16,8 @@
#include "nfs4session.h"
#include "internal.h"
#include "pnfs.h"
+#include "netns.h"
+#include "nfs4trace.h"
#define NFSDBG_FACILITY NFSDBG_PNFS
@@ -351,53 +353,6 @@ void pnfs_generic_recover_commit_reqs(struct list_head *dst,
}
EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
-static struct nfs_page *
-pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets,
- unsigned int nbuckets, struct page *page)
-{
- struct nfs_page *req;
- struct pnfs_commit_bucket *b;
- unsigned int i;
-
- /* Linearly search the commit lists for each bucket until a matching
- * request is found */
- for (i = 0, b = buckets; i < nbuckets; i++, b++) {
- list_for_each_entry(req, &b->written, wb_list) {
- if (req->wb_page == page)
- return req->wb_head;
- }
- list_for_each_entry(req, &b->committing, wb_list) {
- if (req->wb_page == page)
- return req->wb_head;
- }
- }
- return NULL;
-}
-
-/* pnfs_generic_search_commit_reqs - Search lists in @cinfo for the head reqest
- * for @page
- * @cinfo - commit info for current inode
- * @page - page to search for matching head request
- *
- * Returns a the head request if one is found, otherwise returns NULL.
- */
-struct nfs_page *
-pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page)
-{
- struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
- struct pnfs_commit_array *array;
- struct nfs_page *req;
-
- list_for_each_entry(array, &fl_cinfo->commits, cinfo_list) {
- req = pnfs_bucket_search_commit_reqs(array->buckets,
- array->nbuckets, page);
- if (req)
- return req;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(pnfs_generic_search_commit_reqs);
-
static struct pnfs_layout_segment *
pnfs_bucket_get_committing(struct list_head *head,
struct pnfs_commit_bucket *bucket,
@@ -419,7 +374,7 @@ static struct nfs_commit_data *
pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket,
struct nfs_commit_info *cinfo)
{
- struct nfs_commit_data *data = nfs_commitdata_alloc(false);
+ struct nfs_commit_data *data = nfs_commitdata_alloc();
if (!data)
return NULL;
@@ -515,7 +470,11 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
unsigned int nreq = 0;
if (!list_empty(mds_pages)) {
- data = nfs_commitdata_alloc(true);
+ data = nfs_commitdata_alloc();
+ if (!data) {
+ nfs_retry_commit(mds_pages, NULL, cinfo, -1);
+ return -ENOMEM;
+ }
data->ds_commit_index = -1;
list_splice_init(mds_pages, &data->pages);
list_add_tail(&data->list, &list);
@@ -533,7 +492,7 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
nfs_initiate_commit(NFS_CLIENT(inode), data,
NFS_PROTO(data->inode),
data->mds_ops, how,
- RPC_TASK_CRED_NOREF);
+ RPC_TASK_CRED_NOREF, NULL);
} else {
nfs_init_commit(data, NULL, data->lseg, cinfo);
initiate_commit(data, how);
@@ -547,14 +506,14 @@ EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist);
/*
* Data server cache
*
- * Data servers can be mapped to different device ids.
- * nfs4_pnfs_ds reference counting
+ * Data servers can be mapped to different device ids, but should
+ * never be shared between net namespaces.
+ *
+ * nfs4_pnfs_ds reference counting:
* - set to 1 on allocation
* - incremented when a device id maps a data server already in the cache.
* - decremented when deviceid is removed from the cache.
*/
-static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
-static LIST_HEAD(nfs4_data_server_cache);
/* Debug routines */
static void
@@ -647,11 +606,11 @@ _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
* Lookup DS by addresses. nfs4_ds_cache_lock is held
*/
static struct nfs4_pnfs_ds *
-_data_server_lookup_locked(const struct list_head *dsaddrs)
+_data_server_lookup_locked(const struct nfs_net *nn, const struct list_head *dsaddrs)
{
struct nfs4_pnfs_ds *ds;
- list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
+ list_for_each_entry(ds, &nn->nfs4_data_server_cache, ds_node)
if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
return ds;
return NULL;
@@ -696,10 +655,11 @@ static void destroy_ds(struct nfs4_pnfs_ds *ds)
void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
{
- if (refcount_dec_and_lock(&ds->ds_count,
- &nfs4_ds_cache_lock)) {
+ struct nfs_net *nn = net_generic(ds->ds_net, nfs_net_id);
+
+ if (refcount_dec_and_lock(&ds->ds_count, &nn->nfs4_data_server_lock)) {
list_del_init(&ds->ds_node);
- spin_unlock(&nfs4_ds_cache_lock);
+ spin_unlock(&nn->nfs4_data_server_lock);
destroy_ds(ds);
}
}
@@ -759,8 +719,9 @@ out_err:
* uncached and return cached struct nfs4_pnfs_ds.
*/
struct nfs4_pnfs_ds *
-nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
+nfs4_pnfs_ds_add(const struct net *net, struct list_head *dsaddrs, gfp_t gfp_flags)
{
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
char *remotestr;
@@ -776,16 +737,17 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
/* this is only used for debugging, so it's ok if its NULL */
remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
- spin_lock(&nfs4_ds_cache_lock);
- tmp_ds = _data_server_lookup_locked(dsaddrs);
+ spin_lock(&nn->nfs4_data_server_lock);
+ tmp_ds = _data_server_lookup_locked(nn, dsaddrs);
if (tmp_ds == NULL) {
INIT_LIST_HEAD(&ds->ds_addrs);
list_splice_init(dsaddrs, &ds->ds_addrs);
ds->ds_remotestr = remotestr;
refcount_set(&ds->ds_count, 1);
INIT_LIST_HEAD(&ds->ds_node);
+ ds->ds_net = net;
ds->ds_clp = NULL;
- list_add(&ds->ds_node, &nfs4_data_server_cache);
+ list_add(&ds->ds_node, &nn->nfs4_data_server_cache);
dprintk("%s add new data server %s\n", __func__,
ds->ds_remotestr);
} else {
@@ -797,7 +759,7 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
refcount_read(&tmp_ds->ds_count));
ds = tmp_ds;
}
- spin_unlock(&nfs4_ds_cache_lock);
+ spin_unlock(&nn->nfs4_data_server_lock);
out:
return ds;
}
@@ -817,7 +779,7 @@ static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
static struct nfs_client *(*get_v3_ds_connect)(
struct nfs_server *mds_srv,
- const struct sockaddr *ds_addr,
+ const struct sockaddr_storage *ds_addr,
int ds_addrlen,
int ds_proto,
unsigned int ds_timeo,
@@ -847,7 +809,11 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
unsigned int retrans)
{
struct nfs_client *clp = ERR_PTR(-EIO);
+ struct nfs_client *mds_clp = mds_srv->nfs_client;
+ enum xprtsec_policies xprtsec_policy = mds_clp->cl_xprtsec.policy;
struct nfs4_pnfs_ds_addr *da;
+ unsigned long connect_timeout = timeo * (retrans + 1) * HZ / 10;
+ int ds_proto;
int status = 0;
dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
@@ -866,21 +832,33 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
.dstaddr = (struct sockaddr *)&da->da_addr,
.addrlen = da->da_addrlen,
.servername = clp->cl_hostname,
+ .connect_timeout = connect_timeout,
+ .reconnect_timeout = connect_timeout,
+ .xprtsec = clp->cl_xprtsec,
};
- if (da->da_transport != clp->cl_proto)
+ if (xprt_args.ident == XPRT_TRANSPORT_TCP &&
+ clp->cl_proto == XPRT_TRANSPORT_TCP_TLS)
+ xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
+
+ if (xprt_args.ident != clp->cl_proto)
continue;
- if (da->da_addr.ss_family != clp->cl_addr.ss_family)
+ if (xprt_args.dstaddr->sa_family !=
+ clp->cl_addr.ss_family)
continue;
/* Add this address as an alias */
rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
- rpc_clnt_test_and_add_xprt, NULL);
+ rpc_clnt_test_and_add_xprt, NULL);
continue;
}
- clp = get_v3_ds_connect(mds_srv,
- (struct sockaddr *)&da->da_addr,
- da->da_addrlen, da->da_transport,
- timeo, retrans);
+
+ ds_proto = da->da_transport;
+ if (ds_proto == XPRT_TRANSPORT_TCP &&
+ xprtsec_policy != RPC_XPRTSEC_NONE)
+ ds_proto = XPRT_TRANSPORT_TCP_TLS;
+
+ clp = get_v3_ds_connect(mds_srv, &da->da_addr, da->da_addrlen,
+ ds_proto, timeo, retrans);
if (IS_ERR(clp))
continue;
clp->cl_rpcclient->cl_softerr = 0;
@@ -906,12 +884,17 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
u32 minor_version)
{
struct nfs_client *clp = ERR_PTR(-EIO);
+ struct nfs_client *mds_clp = mds_srv->nfs_client;
+ enum xprtsec_policies xprtsec_policy = mds_clp->cl_xprtsec.policy;
struct nfs4_pnfs_ds_addr *da;
+ int ds_proto;
int status = 0;
dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
list_for_each_entry(da, &ds->ds_addrs, da_node) {
+ char servername[48];
+
dprintk("%s: DS %s: trying address %s\n",
__func__, ds->ds_remotestr, da->da_remotestr);
@@ -922,6 +905,7 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
.dstaddr = (struct sockaddr *)&da->da_addr,
.addrlen = da->da_addrlen,
.servername = clp->cl_hostname,
+ .xprtsec = clp->cl_xprtsec,
};
struct nfs4_add_xprt_data xprtdata = {
.clp = clp,
@@ -931,26 +915,63 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
.data = &xprtdata,
};
- if (da->da_transport != clp->cl_proto)
+ if (xprt_args.ident == XPRT_TRANSPORT_TCP &&
+ clp->cl_proto == XPRT_TRANSPORT_TCP_TLS) {
+ struct sockaddr *addr =
+ (struct sockaddr *)&da->da_addr;
+ struct sockaddr_in *sin =
+ (struct sockaddr_in *)&da->da_addr;
+ struct sockaddr_in6 *sin6 =
+ (struct sockaddr_in6 *)&da->da_addr;
+
+ /* for NFS with TLS we need to supply a correct
+ * servername of the trunked transport, not the
+ * servername of the main transport stored in
+ * clp->cl_hostname. And set the protocol to
+ * indicate to use TLS
+ */
+ servername[0] = '\0';
+ switch(addr->sa_family) {
+ case AF_INET:
+ snprintf(servername, sizeof(servername),
+ "%pI4", &sin->sin_addr.s_addr);
+ break;
+ case AF_INET6:
+ snprintf(servername, sizeof(servername),
+ "%pI6", &sin6->sin6_addr);
+ break;
+ default:
+ /* do not consider this address */
+ continue;
+ }
+ xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
+ xprt_args.servername = servername;
+ }
+ if (xprt_args.ident != clp->cl_proto)
continue;
- if (da->da_addr.ss_family != clp->cl_addr.ss_family)
+ if (xprt_args.dstaddr->sa_family !=
+ clp->cl_addr.ss_family)
continue;
+
/**
* Test this address for session trunking and
* add as an alias
*/
- xprtdata.cred = nfs4_get_clid_cred(clp),
+ xprtdata.cred = nfs4_get_clid_cred(clp);
rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
rpc_clnt_setup_test_and_add_xprt,
&rpcdata);
if (xprtdata.cred)
put_cred(xprtdata.cred);
} else {
- clp = nfs4_set_ds_client(mds_srv,
- (struct sockaddr *)&da->da_addr,
- da->da_addrlen,
- da->da_transport, timeo,
- retrans, minor_version);
+ ds_proto = da->da_transport;
+ if (ds_proto == XPRT_TRANSPORT_TCP &&
+ xprtsec_policy != RPC_XPRTSEC_NONE)
+ ds_proto = XPRT_TRANSPORT_TCP_TLS;
+
+ clp = nfs4_set_ds_client(mds_srv, &da->da_addr,
+ da->da_addrlen, ds_proto,
+ timeo, retrans, minor_version);
if (IS_ERR(clp))
continue;
@@ -961,7 +982,6 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
clp = ERR_PTR(-EIO);
continue;
}
-
}
}
@@ -992,8 +1012,10 @@ int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
err = nfs4_wait_ds_connect(ds);
if (err || ds->ds_clp)
goto out;
- if (nfs4_test_deviceid_unavailable(devid))
- return -ENODEV;
+ if (nfs4_test_deviceid_unavailable(devid)) {
+ err = -ENODEV;
+ goto out;
+ }
} while (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) != 0);
if (ds->ds_clp)
@@ -1023,11 +1045,12 @@ out:
if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
WARN_ON_ONCE(ds->ds_clp ||
!nfs4_test_deviceid_unavailable(devid));
- return -EINVAL;
- }
- err = nfs_client_init_status(ds->ds_clp);
+ err = -EINVAL;
+ } else
+ err = nfs_client_init_status(ds->ds_clp);
}
+ trace_pnfs_ds_connect(ds->ds_remotestr, err);
return err;
}
EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
@@ -1176,7 +1199,7 @@ pnfs_layout_mark_request_commit(struct nfs_page *req,
nfs_request_add_commit_list_locked(req, list, cinfo);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
- nfs_mark_page_unstable(req->wb_page, cinfo);
+ nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo);
return;
out_resched:
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 73dcaa99fa9b..63e71310b9f6 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -92,6 +92,7 @@ nfs_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
info->maxfilesize = 0x7FFFFFFF;
info->lease_time = 0;
info->change_attr_type = NFS4_CHANGE_TYPE_IS_UNDEFINED;
+ info->xattr_support = 0;
return 0;
}
@@ -152,13 +153,13 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs_proc_lookup(struct inode *dir, struct dentry *dentry,
+nfs_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs_diropargs arg = {
.fh = NFS_FH(dir),
- .name = dentry->d_name.name,
- .len = dentry->d_name.len
+ .name = name->name,
+ .len = name->len
};
struct nfs_diropok res = {
.fh = fhandle,
@@ -395,9 +396,10 @@ nfs_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
}
static int
-nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
+nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct folio *folio,
unsigned int len, struct iattr *sattr)
{
+ struct page *page = &folio->page;
struct nfs_fh *fh;
struct nfs_fattr *fattr;
struct nfs_symlinkargs arg = {
@@ -444,13 +446,14 @@ out:
return status;
}
-static int
+static struct dentry *
nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
struct nfs_createdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_MKDIR],
};
+ struct dentry *alias = NULL;
int status = -ENOMEM;
dprintk("NFS call mkdir %pd\n", dentry);
@@ -462,12 +465,15 @@ nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
- if (status == 0)
- status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ if (status == 0) {
+ alias = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr);
+ status = PTR_ERR_OR_ZERO(alias);
+ } else
+ alias = ERR_PTR(status);
nfs_free_createdata(data);
out:
dprintk("NFS reply mkdir: %d\n", status);
- return status;
+ return alias;
}
static int
@@ -685,14 +691,22 @@ out_einval:
return -EINVAL;
}
-static int nfs_have_delegation(struct inode *inode, fmode_t flags)
+static int nfs_have_delegation(struct inode *inode, fmode_t type, int flags)
+{
+ return 0;
+}
+
+static int nfs_return_delegation(struct inode *inode)
{
+ if (S_ISREG(inode->i_mode))
+ nfs_wb_all(inode);
return 0;
}
static const struct inode_operations nfs_dir_inode_operations = {
.create = nfs_create,
.lookup = nfs_lookup,
+ .atomic_open = nfs_atomic_open_v23,
.link = nfs_link,
.unlink = nfs_unlink,
.symlink = nfs_symlink,
@@ -754,6 +768,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.lock_check_bounds = nfs_lock_check_bounds,
.close_context = nfs_close_context,
.have_delegation = nfs_have_delegation,
+ .return_delegation = nfs_return_delegation,
.alloc_client = nfs_alloc_client,
.init_client = nfs_init_client,
.free_client = nfs_free_client,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index d11af2a9299c..3c1fa320b3f1 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -15,6 +15,7 @@
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/task_io_accounting_ops.h>
#include <linux/pagemap.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
@@ -27,10 +28,11 @@
#include "fscache.h"
#include "pnfs.h"
#include "nfstrace.h"
+#include "delegation.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
-static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
+const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
static const struct nfs_rw_ops nfs_rw_read_ops;
static struct kmem_cache *nfs_rdata_cachep;
@@ -46,15 +48,16 @@ static struct nfs_pgio_header *nfs_readhdr_alloc(void)
static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
{
+ kfree(rhdr->res.scratch);
kmem_cache_free(nfs_rdata_cachep, rhdr);
}
-static
-int nfs_return_empty_page(struct page *page)
+static int nfs_return_empty_folio(struct folio *folio)
{
- zero_user(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
- unlock_page(page);
+ folio_zero_segment(folio, 0, folio_size(folio));
+ folio_mark_uptodate(folio);
+ if (nfs_netfs_folio_unlock(folio))
+ folio_unlock(folio);
return 0;
}
@@ -74,7 +77,7 @@ void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
}
EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
-static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
+void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
{
struct nfs_pgio_mirror *pgm;
unsigned long npages;
@@ -108,38 +111,29 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
+bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size)
+{
+ WARN_ON(hdr->res.scratch != NULL);
+ hdr->res.scratch = kmalloc(size, GFP_KERNEL);
+ return hdr->res.scratch != NULL;
+}
+EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch);
+
static void nfs_readpage_release(struct nfs_page *req, int error)
{
- struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
- struct page *page = req->wb_page;
-
- dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
- (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
- (long long)req_offset(req));
-
- if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
- SetPageError(page);
- if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
- struct address_space *mapping = page_file_mapping(page);
-
- if (PageUptodate(page))
- nfs_readpage_to_fscache(inode, page, 0);
- else if (!PageError(page) && !PagePrivate(page))
- generic_error_remove_page(mapping, page);
- unlock_page(page);
- }
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
+ if (nfs_netfs_folio_unlock(folio))
+ folio_unlock(folio);
+
nfs_release_request(req);
}
-struct nfs_readdesc {
- struct nfs_pageio_descriptor pgio;
- struct nfs_open_context *ctx;
-};
-
static void nfs_page_group_set_uptodate(struct nfs_page *req)
{
if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
- SetPageUptodate(req->wb_page);
+ folio_mark_uptodate(nfs_page_to_folio(req));
}
static void nfs_read_completion(struct nfs_pgio_header *hdr)
@@ -151,24 +145,25 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
goto out;
while (!list_empty(&hdr->pages)) {
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
- struct page *page = req->wb_page;
+ struct folio *folio = nfs_page_to_folio(req);
unsigned long start = req->wb_pgbase;
unsigned long end = req->wb_pgbase + req->wb_bytes;
if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
/* note: regions of the page not covered by a
- * request are zeroed in readpage_async_filler */
+ * request are zeroed in nfs_read_add_folio
+ */
if (bytes > hdr->good_bytes) {
/* nothing in this request was good, so zero
* the full extent of the request */
- zero_user_segment(page, start, end);
+ folio_zero_segment(folio, start, end);
} else if (hdr->good_bytes - bytes < req->wb_bytes) {
/* part of this request has good bytes, but
* not all. zero the bad bytes */
start += hdr->good_bytes - bytes;
WARN_ON(start < req->wb_pgbase);
- zero_user_segment(page, start, end);
+ folio_zero_segment(folio, start, end);
}
}
error = 0;
@@ -185,6 +180,8 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
nfs_list_remove_request(req);
nfs_readpage_release(req, error);
}
+ nfs_netfs_read_completion(hdr);
+
out:
hdr->release(hdr);
}
@@ -194,11 +191,8 @@ static void nfs_initiate_read(struct nfs_pgio_header *hdr,
const struct nfs_rpc_ops *rpc_ops,
struct rpc_task_setup *task_setup_data, int how)
{
- struct inode *inode = hdr->inode;
- int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
-
- task_setup_data->flags |= swap_flags;
rpc_ops->read_setup(hdr, msg);
+ nfs_netfs_initiate_read(hdr);
trace_nfs_initiate_read(hdr);
}
@@ -214,7 +208,7 @@ nfs_async_read_error(struct list_head *head, int error)
}
}
-static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
+const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
.error_cleanup = nfs_async_read_error,
.completion = nfs_read_completion,
};
@@ -289,156 +283,171 @@ static void nfs_readpage_result(struct rpc_task *task,
nfs_readpage_retry(task, hdr);
}
-static int
-readpage_async_filler(void *data, struct page *page)
+int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
+ struct nfs_open_context *ctx,
+ struct folio *folio)
{
- struct nfs_readdesc *desc = data;
- struct inode *inode = page_file_mapping(page)->host;
- unsigned int rsize = NFS_SERVER(inode)->rsize;
+ struct inode *inode = folio->mapping->host;
+ struct nfs_server *server = NFS_SERVER(inode);
+ size_t fsize = folio_size(folio);
+ unsigned int rsize = server->rsize;
struct nfs_page *new;
unsigned int len, aligned_len;
int error;
- len = nfs_page_length(page);
+ len = nfs_folio_length(folio);
if (len == 0)
- return nfs_return_empty_page(page);
+ return nfs_return_empty_folio(folio);
- aligned_len = min_t(unsigned int, ALIGN(len, rsize), PAGE_SIZE);
+ aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
- new = nfs_create_request(desc->ctx, page, 0, aligned_len);
- if (IS_ERR(new))
- goto out_error;
+ new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
+ if (IS_ERR(new)) {
+ error = PTR_ERR(new);
+ if (nfs_netfs_folio_unlock(folio))
+ folio_unlock(folio);
+ goto out;
+ }
- if (len < PAGE_SIZE)
- zero_user_segment(page, len, PAGE_SIZE);
- if (!nfs_pageio_add_request(&desc->pgio, new)) {
+ if (len < fsize)
+ folio_zero_segment(folio, len, fsize);
+ if (!nfs_pageio_add_request(pgio, new)) {
nfs_list_remove_request(new);
- error = desc->pgio.pg_error;
+ error = pgio->pg_error;
nfs_readpage_release(new, error);
goto out;
}
return 0;
-out_error:
- error = PTR_ERR(new);
- unlock_page(page);
out:
return error;
}
/*
- * Read a page over NFS.
- * We read the page synchronously in the following case:
- * - The error flag is set for this page. This happens only when a
- * previous async read operation failed.
+ * Actually read a folio over the wire.
+ */
+static int nfs_do_read_folio(struct file *file, struct folio *folio)
+{
+ struct inode *inode = file_inode(file);
+ struct nfs_pageio_descriptor pgio;
+ struct nfs_open_context *ctx;
+ int ret;
+
+ ctx = get_nfs_open_context(nfs_file_open_context(file));
+
+ xchg(&ctx->error, 0);
+ nfs_pageio_init_read(&pgio, inode, false,
+ &nfs_async_read_completion_ops);
+
+ ret = nfs_read_add_folio(&pgio, ctx, folio);
+ if (ret)
+ goto out_put;
+
+ nfs_pageio_complete_read(&pgio);
+ nfs_update_delegated_atime(inode);
+ if (pgio.pg_error < 0) {
+ ret = pgio.pg_error;
+ goto out_put;
+ }
+
+ ret = folio_wait_locked_killable(folio);
+ if (!folio_test_uptodate(folio) && !ret)
+ ret = xchg(&ctx->error, 0);
+
+out_put:
+ put_nfs_open_context(ctx);
+ return ret;
+}
+
+/*
+ * Synchronously read a folio.
+ *
+ * This is not heavily used as most users to try an asynchronous
+ * large read through ->readahead first.
*/
-int nfs_readpage(struct file *file, struct page *page)
+int nfs_read_folio(struct file *file, struct folio *folio)
{
- struct nfs_readdesc desc;
- struct inode *inode = page_file_mapping(page)->host;
+ struct inode *inode = file_inode(file);
+ loff_t pos = folio_pos(folio);
+ size_t len = folio_size(folio);
int ret;
- trace_nfs_aop_readpage(inode, page);
+ trace_nfs_aop_readpage(inode, pos, len);
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
+ task_io_account_read(len);
/*
* Try to flush any pending writes to the file..
*
- * NOTE! Because we own the page lock, there cannot
+ * NOTE! Because we own the folio lock, there cannot
* be any new pending writes generated at this point
- * for this page (other pages can be written to).
+ * for this folio (other folios can be written to).
*/
- ret = nfs_wb_page(inode, page);
+ ret = nfs_wb_folio(inode, folio);
if (ret)
goto out_unlock;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
goto out_unlock;
ret = -ESTALE;
if (NFS_STALE(inode))
goto out_unlock;
- if (file == NULL) {
- ret = -EBADF;
- desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
- if (desc.ctx == NULL)
- goto out_unlock;
- } else
- desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
-
- xchg(&desc.ctx->error, 0);
- if (!IS_SYNC(inode)) {
- ret = nfs_readpage_from_fscache(desc.ctx, inode, page);
- if (ret == 0)
- goto out_wait;
- }
-
- nfs_pageio_init_read(&desc.pgio, inode, false,
- &nfs_async_read_completion_ops);
-
- ret = readpage_async_filler(&desc, page);
+ ret = nfs_netfs_read_folio(file, folio);
if (ret)
- goto out;
-
- nfs_pageio_complete_read(&desc.pgio);
- ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
-out_wait:
- if (!ret) {
- ret = wait_on_page_locked_killable(page);
- if (!PageUptodate(page) && !ret)
- ret = xchg(&desc.ctx->error, 0);
- }
+ ret = nfs_do_read_folio(file, folio);
out:
- put_nfs_open_context(desc.ctx);
- trace_nfs_aop_readpage_done(inode, page, ret);
+ trace_nfs_aop_readpage_done(inode, pos, len, ret);
return ret;
out_unlock:
- unlock_page(page);
- trace_nfs_aop_readpage_done(inode, page, ret);
- return ret;
+ folio_unlock(folio);
+ goto out;
}
-int nfs_readpages(struct file *file, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
+void nfs_readahead(struct readahead_control *ractl)
{
- struct nfs_readdesc desc;
- struct inode *inode = mapping->host;
+ struct nfs_pageio_descriptor pgio;
+ struct nfs_open_context *ctx;
+ unsigned int nr_pages = readahead_count(ractl);
+ struct file *file = ractl->file;
+ struct inode *inode = ractl->mapping->host;
+ struct folio *folio;
int ret;
- trace_nfs_aop_readahead(inode, lru_to_page(pages), nr_pages);
+ trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
+ task_io_account_read(readahead_length(ractl));
ret = -ESTALE;
if (NFS_STALE(inode))
goto out;
+ ret = nfs_netfs_readahead(ractl);
+ if (!ret)
+ goto out;
+
if (file == NULL) {
ret = -EBADF;
- desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
- if (desc.ctx == NULL)
+ ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
+ if (ctx == NULL)
goto out;
} else
- desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
-
- /* attempt to read as many of the pages as possible from the cache
- * - this returns -ENOBUFS immediately if the cookie is negative
- */
- ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
- pages, &nr_pages);
- if (ret == 0)
- goto read_complete; /* all pages were read */
+ ctx = get_nfs_open_context(nfs_file_open_context(file));
- nfs_pageio_init_read(&desc.pgio, inode, false,
+ nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops);
- ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
+ while ((folio = readahead_folio(ractl)) != NULL) {
+ ret = nfs_read_add_folio(&pgio, ctx, folio);
+ if (ret)
+ break;
+ }
- nfs_pageio_complete_read(&desc.pgio);
+ nfs_pageio_complete_read(&pgio);
+ nfs_update_delegated_atime(inode);
-read_complete:
- put_nfs_open_context(desc.ctx);
+ put_nfs_open_context(ctx);
out:
trace_nfs_aop_readahead_done(inode, nr_pages, ret);
- return ret;
}
int __init nfs_init_readpagecache(void)
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 3aced401735c..72dee6f3050e 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -47,6 +47,7 @@
#include <linux/vfs.h>
#include <linux/inet.h>
#include <linux/in6.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <net/ipv6.h>
#include <linux/netdevice.h>
@@ -59,6 +60,8 @@
#include <linux/uaccess.h>
#include <linux/nfs_ssc.h>
+#include <uapi/linux/tls.h>
+
#include "nfs4_fs.h"
#include "callback.h"
#include "delegation.h"
@@ -68,6 +71,9 @@
#include "nfs4session.h"
#include "pnfs.h"
#include "nfs.h"
+#include "netns.h"
+#include "sysfs.h"
+#include "nfs4idmap.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -125,11 +131,7 @@ static void nfs_ssc_unregister_ops(void)
}
#endif /* CONFIG_NFS_V4_2 */
-static struct shrinker acl_shrinker = {
- .count_objects = nfs_access_cache_count,
- .scan_objects = nfs_access_cache_scan,
- .seeks = DEFAULT_SEEKS,
-};
+static struct shrinker *acl_shrinker;
/*
* Register the NFS filesystems
@@ -149,9 +151,18 @@ int __init register_nfs_fs(void)
ret = nfs_register_sysctl();
if (ret < 0)
goto error_2;
- ret = register_shrinker(&acl_shrinker);
- if (ret < 0)
+
+ acl_shrinker = shrinker_alloc(0, "nfs-acl");
+ if (!acl_shrinker) {
+ ret = -ENOMEM;
goto error_3;
+ }
+
+ acl_shrinker->count_objects = nfs_access_cache_count;
+ acl_shrinker->scan_objects = nfs_access_cache_scan;
+
+ shrinker_register(acl_shrinker);
+
#ifdef CONFIG_NFS_V4_2
nfs_ssc_register_ops();
#endif
@@ -171,7 +182,7 @@ error_0:
*/
void __exit unregister_nfs_fs(void)
{
- unregister_shrinker(&acl_shrinker);
+ shrinker_free(acl_shrinker);
nfs_unregister_sysctl();
unregister_nfs4_fs();
#ifdef CONFIG_NFS_V4_2
@@ -219,6 +230,7 @@ static int __nfs_list_for_each_server(struct list_head *head,
ret = fn(server, data);
if (ret)
goto out;
+ cond_resched();
rcu_read_lock();
}
rcu_read_unlock();
@@ -442,8 +454,12 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
{ NFS_MOUNT_NONLM, ",nolock", "" },
{ NFS_MOUNT_NOACL, ",noacl", "" },
{ NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" },
+ { NFS_MOUNT_FORCE_RDIRPLUS, ",rdirplus=force", "" },
{ NFS_MOUNT_UNSHARED, ",nosharecache", "" },
{ NFS_MOUNT_NORESVPORT, ",noresvport", "" },
+ { NFS_MOUNT_NETUNREACH_FATAL,
+ ",fatal_neterrors=ENETDOWN:ENETUNREACH",
+ ",fatal_neterrors=none" },
{ 0, NULL, NULL }
};
const struct proc_nfs_info *nfs_infop;
@@ -491,14 +507,32 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
seq_printf(m, ",timeo=%lu", 10U * nfss->client->cl_timeout->to_initval / HZ);
seq_printf(m, ",retrans=%u", nfss->client->cl_timeout->to_retries);
seq_printf(m, ",sec=%s", nfs_pseudoflavour_to_name(nfss->client->cl_auth->au_flavor));
+ switch (clp->cl_xprtsec.policy) {
+ case RPC_XPRTSEC_TLS_ANON:
+ seq_puts(m, ",xprtsec=tls");
+ break;
+ case RPC_XPRTSEC_TLS_X509:
+ seq_puts(m, ",xprtsec=mtls");
+ break;
+ default:
+ break;
+ }
if (version != 4)
nfs_show_mountd_options(m, nfss, showdefaults);
else
nfs_show_nfsv4_options(m, nfss, showdefaults);
- if (nfss->options & NFS_OPTION_FSCACHE)
+ if (nfss->options & NFS_OPTION_FSCACHE) {
+#ifdef CONFIG_NFS_FSCACHE
+ if (nfss->fscache_uniq)
+ seq_printf(m, ",fsc=%s", nfss->fscache_uniq);
+ else
+ seq_puts(m, ",fsc");
+#else
seq_puts(m, ",fsc");
+#endif
+ }
if (nfss->options & NFS_OPTION_MIGRATION)
seq_puts(m, ",migration");
@@ -522,6 +556,9 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
else
seq_puts(m, ",local_lock=posix");
+ if (nfss->flags & NFS_MOUNT_NO_ALIGNWRITE)
+ seq_puts(m, ",noalignwrite");
+
if (nfss->flags & NFS_MOUNT_WRITE_EAGER) {
if (nfss->flags & NFS_MOUNT_WRITE_WAIT)
seq_puts(m, ",write=wait");
@@ -692,10 +729,6 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
totals.events[i] += stats->events[i];
for (i = 0; i < __NFSIOS_BYTESMAX; i++)
totals.bytes[i] += stats->bytes[i];
-#ifdef CONFIG_NFS_FSCACHE
- for (i = 0; i < __NFSIOS_FSCACHEMAX; i++)
- totals.fscache[i] += stats->fscache[i];
-#endif
preempt_enable();
}
@@ -706,13 +739,6 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
seq_puts(m, "\n\tbytes:\t");
for (i = 0; i < __NFSIOS_BYTESMAX; i++)
seq_printf(m, "%Lu ", totals.bytes[i]);
-#ifdef CONFIG_NFS_FSCACHE
- if (nfss->options & NFS_OPTION_FSCACHE) {
- seq_puts(m, "\n\tfsc:\t");
- for (i = 0; i < __NFSIOS_FSCACHEMAX; i++)
- seq_printf(m, "%Lu ", totals.fscache[i]);
- }
-#endif
seq_putc(m, '\n');
rpc_clnt_show_stats(m, nfss->client);
@@ -822,8 +848,7 @@ static int nfs_request_mount(struct fs_context *fc,
{
struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct nfs_mount_request request = {
- .sap = (struct sockaddr *)
- &ctx->mount_server.address,
+ .sap = &ctx->mount_server._address,
.dirpath = ctx->nfs_server.export_path,
.protocol = ctx->mount_server.protocol,
.fh = root_fh,
@@ -854,7 +879,7 @@ static int nfs_request_mount(struct fs_context *fc,
* Construct the mount server's address.
*/
if (ctx->mount_server.address.sa_family == AF_UNSPEC) {
- memcpy(request.sap, &ctx->nfs_server.address,
+ memcpy(request.sap, &ctx->nfs_server._address,
ctx->nfs_server.addrlen);
ctx->mount_server.addrlen = ctx->nfs_server.addrlen;
}
@@ -865,7 +890,15 @@ static int nfs_request_mount(struct fs_context *fc,
* Now ask the mount server to map our export path
* to a file handle.
*/
- status = nfs_mount(&request, ctx->timeo, ctx->retrans);
+ if ((request.protocol == XPRT_TRANSPORT_UDP) ==
+ !(ctx->flags & NFS_MOUNT_TCP))
+ /*
+ * NFS protocol and mount protocol are both UDP or neither UDP
+ * so timeouts are compatible. Use NFS timeouts for MOUNT
+ */
+ status = nfs_mount(&request, ctx->timeo, ctx->retrans);
+ else
+ status = nfs_mount(&request, NFS_UNSPEC_TIMEO, NFS_UNSPEC_RETRANS);
if (status != 0) {
dfprintk(MOUNT, "NFS: unable to mount server %s, error %d\n",
request.hostname, status);
@@ -886,6 +919,16 @@ static struct nfs_server *nfs_try_mount_request(struct fs_context *fc)
rpc_authflavor_t authlist[NFS_MAX_SECFLAVORS];
unsigned int authlist_len = ARRAY_SIZE(authlist);
+ /* make sure 'nolock'/'lock' override the 'local_lock' mount option */
+ if (ctx->lock_status) {
+ if (ctx->lock_status == NFS_LOCK_NOLOCK) {
+ ctx->flags |= NFS_MOUNT_NONLM;
+ ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL);
+ } else {
+ ctx->flags &= ~NFS_MOUNT_NONLM;
+ ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL);
+ }
+ }
status = nfs_request_mount(fc, ctx->mntfh, authlist, &authlist_len);
if (status)
return ERR_PTR(status);
@@ -1009,6 +1052,16 @@ int nfs_reconfigure(struct fs_context *fc)
sync_filesystem(sb);
/*
+ * The SB_RDONLY flag has been removed from the superblock during
+ * mounts to prevent interference between different filesystems.
+ * Similarly, it is also necessary to ignore the SB_RDONLY flag
+ * during reconfiguration; otherwise, it may also result in the
+ * creation of redundant superblocks when mounting a directory with
+ * different rw and ro flags multiple times.
+ */
+ fc->sb_flags_mask &= ~SB_RDONLY;
+
+ /*
* Userspace mount programs that send binary options generally send
* them populated with default values. We have no way to know which
* ones were explicitly specified. Fall back to legacy behavior and
@@ -1051,22 +1104,31 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx)
if (ctx->bsize)
sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits);
- if (server->nfs_client->rpc_ops->version != 2) {
- /* The VFS shouldn't apply the umask to mode bits. We will do
- * so ourselves when necessary.
+ switch (server->nfs_client->rpc_ops->version) {
+ case 2:
+ sb->s_time_gran = 1000;
+ sb->s_time_min = 0;
+ sb->s_time_max = U32_MAX;
+ break;
+ case 3:
+ /*
+ * The VFS shouldn't apply the umask to mode bits.
+ * We will do so ourselves when necessary.
*/
sb->s_flags |= SB_POSIXACL;
sb->s_time_gran = 1;
- sb->s_export_op = &nfs_export_ops;
- } else
- sb->s_time_gran = 1000;
-
- if (server->nfs_client->rpc_ops->version != 4) {
sb->s_time_min = 0;
sb->s_time_max = U32_MAX;
- } else {
+ sb->s_export_op = &nfs_export_ops;
+ break;
+ case 4:
+ sb->s_iflags |= SB_I_NOUMASK;
+ sb->s_time_gran = 1;
sb->s_time_min = S64_MIN;
sb->s_time_max = S64_MAX;
+ if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
+ sb->s_export_op = &nfs_export_ops;
+ break;
}
sb->s_magic = NFS_SUPER_MAGIC;
@@ -1080,6 +1142,7 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx)
&sb->s_blocksize_bits);
nfs_super_set_maxbytes(sb, server->maxfilesize);
+ nfs_sysfs_move_server_to_sb(sb);
server->has_sec_mnt_opts = ctx->has_sec_mnt_opts;
}
@@ -1120,7 +1183,7 @@ static int nfs_set_super(struct super_block *s, struct fs_context *fc)
struct nfs_server *server = fc->s_fs_info;
int ret;
- s->s_d_op = server->nfs_client->rpc_ops->dentry_ops;
+ set_default_d_op(s, server->nfs_client->rpc_ops->dentry_ops);
ret = set_anon_super(s, server);
if (ret == 0)
server->s_dev = s->s_dev;
@@ -1204,42 +1267,42 @@ static int nfs_compare_super(struct super_block *sb, struct fs_context *fc)
}
#ifdef CONFIG_NFS_FSCACHE
-static void nfs_get_cache_cookie(struct super_block *sb,
- struct nfs_fs_context *ctx)
+static int nfs_get_cache_cookie(struct super_block *sb,
+ struct nfs_fs_context *ctx)
{
struct nfs_server *nfss = NFS_SB(sb);
char *uniq = NULL;
int ulen = 0;
- nfss->fscache_key = NULL;
nfss->fscache = NULL;
if (!ctx)
- return;
+ return 0;
if (ctx->clone_data.sb) {
struct nfs_server *mnt_s = NFS_SB(ctx->clone_data.sb);
if (!(mnt_s->options & NFS_OPTION_FSCACHE))
- return;
- if (mnt_s->fscache_key) {
- uniq = mnt_s->fscache_key->key.uniquifier;
- ulen = mnt_s->fscache_key->key.uniq_len;
+ return 0;
+ if (mnt_s->fscache_uniq) {
+ uniq = mnt_s->fscache_uniq;
+ ulen = strlen(uniq);
}
} else {
if (!(ctx->options & NFS_OPTION_FSCACHE))
- return;
+ return 0;
if (ctx->fscache_uniq) {
uniq = ctx->fscache_uniq;
ulen = strlen(ctx->fscache_uniq);
}
}
- nfs_fscache_get_super_cookie(sb, uniq, ulen);
+ return nfs_fscache_get_super_cookie(sb, uniq, ulen);
}
#else
-static void nfs_get_cache_cookie(struct super_block *sb,
- struct nfs_fs_context *ctx)
+static int nfs_get_cache_cookie(struct super_block *sb,
+ struct nfs_fs_context *ctx)
{
+ return 0;
}
#endif
@@ -1255,8 +1318,17 @@ int nfs_get_tree_common(struct fs_context *fc)
if (IS_ERR(server))
return PTR_ERR(server);
+ /*
+ * When NFS_MOUNT_UNSHARED is not set, NFS forces the sharing of a
+ * superblock among each filesystem that mounts sub-directories
+ * belonging to a single exported root path.
+ * To prevent interference between different filesystems, the
+ * SB_RDONLY flag should be removed from the superblock.
+ */
if (server->flags & NFS_MOUNT_UNSHARED)
compare_super = NULL;
+ else
+ fc->sb_flags &= ~SB_RDONLY;
/* -o noac implies -o sync */
if (server->flags & NFS_MOUNT_NOAC)
@@ -1266,9 +1338,6 @@ int nfs_get_tree_common(struct fs_context *fc)
if (ctx->clone_data.sb->s_flags & SB_SYNCHRONOUS)
fc->sb_flags |= SB_SYNCHRONOUS;
- if (server->caps & NFS_CAP_SECURITY_LABEL)
- fc->lsm_flags |= SECURITY_LSM_NATIVE_LABELS;
-
/* Get a superblock - note that we may end up sharing one that already exists */
fc->s_fs_info = server;
s = sget_fc(fc, compare_super, nfs_set_super);
@@ -1299,7 +1368,9 @@ int nfs_get_tree_common(struct fs_context *fc)
s->s_blocksize_bits = bsize;
s->s_blocksize = 1U << bsize;
}
- nfs_get_cache_cookie(s, ctx);
+ error = nfs_get_cache_cookie(s, ctx);
+ if (error < 0)
+ goto error_splat_super;
}
error = nfs_get_root(s, fc);
@@ -1323,19 +1394,18 @@ error_splat_super:
}
/*
- * Destroy an NFS2/3 superblock
+ * Destroy an NFS superblock
*/
void nfs_kill_super(struct super_block *s)
{
struct nfs_server *server = NFS_SB(s);
- dev_t dev = s->s_dev;
- generic_shutdown_super(s);
+ nfs_sysfs_move_sb_to_server(server);
+ kill_anon_super(s);
nfs_fscache_release_super_cookie(s);
nfs_free_server(server);
- free_anon_bdev(dev);
}
EXPORT_SYMBOL_GPL(nfs_kill_super);
@@ -1356,6 +1426,7 @@ unsigned short max_session_cb_slots = NFS4_DEF_CB_SLOT_TABLE_SIZE;
unsigned short send_implementation_id = 1;
char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN] = "";
bool recover_lost_locks = false;
+short nfs_delay_retrans = -1;
EXPORT_SYMBOL_GPL(nfs_callback_nr_threads);
EXPORT_SYMBOL_GPL(nfs_callback_set_tcpport);
@@ -1366,6 +1437,7 @@ EXPORT_SYMBOL_GPL(max_session_cb_slots);
EXPORT_SYMBOL_GPL(send_implementation_id);
EXPORT_SYMBOL_GPL(nfs4_client_id_uniquifier);
EXPORT_SYMBOL_GPL(recover_lost_locks);
+EXPORT_SYMBOL_GPL(nfs_delay_retrans);
#define NFS_CALLBACK_MAXPORTNR (65535U)
@@ -1414,5 +1486,9 @@ MODULE_PARM_DESC(recover_lost_locks,
"If the server reports that a lock might be lost, "
"try to recover it risking data corruption.");
-
+module_param_named(delay_retrans, nfs_delay_retrans, short, 0644);
+MODULE_PARM_DESC(delay_retrans,
+ "Unless negative, specifies the number of times the NFSv4 "
+ "client retries a request before returning an EAGAIN error, "
+ "after a reply of NFS4ERR_DELAY from the server.");
#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
index 25ba299fdac2..58146e935402 100644
--- a/fs/nfs/symlink.c
+++ b/fs/nfs/symlink.c
@@ -26,53 +26,45 @@
* and straight-forward than readdir caching.
*/
-static int nfs_symlink_filler(void *data, struct page *page)
+static int nfs_symlink_filler(struct file *file, struct folio *folio)
{
- struct inode *inode = data;
+ struct inode *inode = folio->mapping->host;
int error;
- error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
- if (error < 0)
- goto error;
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
-
-error:
- SetPageError(page);
- unlock_page(page);
- return -EIO;
+ error = NFS_PROTO(inode)->readlink(inode, &folio->page, 0, PAGE_SIZE);
+ folio_end_read(folio, error == 0);
+ return error;
}
static const char *nfs_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- struct page *page;
+ struct folio *folio;
void *err;
if (!dentry) {
err = ERR_PTR(nfs_revalidate_mapping_rcu(inode));
if (err)
return err;
- page = find_get_page(inode->i_mapping, 0);
- if (!page)
+ folio = filemap_get_folio(inode->i_mapping, 0);
+ if (IS_ERR(folio))
return ERR_PTR(-ECHILD);
- if (!PageUptodate(page)) {
- put_page(page);
+ if (!folio_test_uptodate(folio)) {
+ folio_put(folio);
return ERR_PTR(-ECHILD);
}
} else {
err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping));
if (err)
return err;
- page = read_cache_page(&inode->i_data, 0, nfs_symlink_filler,
- inode);
- if (IS_ERR(page))
- return ERR_CAST(page);
+ folio = read_cache_folio(&inode->i_data, 0, nfs_symlink_filler,
+ NULL);
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
}
- set_delayed_call(done, page_put_link, page);
- return page_address(page);
+ set_delayed_call(done, page_put_link, folio);
+ return folio_address(folio);
}
/*
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index 7aea195ddb35..f579df0e8d67 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -14,7 +14,7 @@
static struct ctl_table_header *nfs_callback_sysctl_table;
-static struct ctl_table nfs_cb_sysctls[] = {
+static const struct ctl_table nfs_cb_sysctls[] = {
{
.procname = "nfs_mountpoint_timeout",
.data = &nfs_mountpoint_expiry_timeout,
@@ -29,30 +29,11 @@ static struct ctl_table nfs_cb_sysctls[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- { }
-};
-
-static struct ctl_table nfs_cb_sysctl_dir[] = {
- {
- .procname = "nfs",
- .mode = 0555,
- .child = nfs_cb_sysctls,
- },
- { }
-};
-
-static struct ctl_table nfs_cb_sysctl_root[] = {
- {
- .procname = "fs",
- .mode = 0555,
- .child = nfs_cb_sysctl_dir,
- },
- { }
};
int nfs_register_sysctl(void)
{
- nfs_callback_sysctl_table = register_sysctl_table(nfs_cb_sysctl_root);
+ nfs_callback_sysctl_table = register_sysctl("fs/nfs", nfs_cb_sysctls);
if (nfs_callback_sysctl_table == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
index 8cb70755e3c9..ea6e6168092b 100644
--- a/fs/nfs/sysfs.c
+++ b/fs/nfs/sysfs.c
@@ -12,65 +12,63 @@
#include <linux/string.h>
#include <linux/nfs_fs.h>
#include <linux/rcupdate.h>
+#include <linux/lockd/lockd.h>
+#include "internal.h"
#include "nfs4_fs.h"
#include "netns.h"
#include "sysfs.h"
-struct kobject *nfs_client_kobj;
-static struct kset *nfs_client_kset;
+static struct kset *nfs_kset;
-static void nfs_netns_object_release(struct kobject *kobj)
+static void nfs_kset_release(struct kobject *kobj)
{
- kfree(kobj);
+ struct kset *kset = container_of(kobj, struct kset, kobj);
+ kfree(kset);
}
static const struct kobj_ns_type_operations *nfs_netns_object_child_ns_type(
- struct kobject *kobj)
+ const struct kobject *kobj)
{
return &net_ns_type_operations;
}
-static struct kobj_type nfs_netns_object_type = {
- .release = nfs_netns_object_release,
+static struct kobj_type nfs_kset_type = {
+ .release = nfs_kset_release,
.sysfs_ops = &kobj_sysfs_ops,
.child_ns_type = nfs_netns_object_child_ns_type,
};
-static struct kobject *nfs_netns_object_alloc(const char *name,
- struct kset *kset, struct kobject *parent)
+int nfs_sysfs_init(void)
{
- struct kobject *kobj;
+ int ret;
- kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
- if (kobj) {
- kobj->kset = kset;
- if (kobject_init_and_add(kobj, &nfs_netns_object_type,
- parent, "%s", name) == 0)
- return kobj;
- kobject_put(kobj);
+ nfs_kset = kzalloc(sizeof(*nfs_kset), GFP_KERNEL);
+ if (!nfs_kset)
+ return -ENOMEM;
+
+ ret = kobject_set_name(&nfs_kset->kobj, "nfs");
+ if (ret) {
+ kfree(nfs_kset);
+ return ret;
}
- return NULL;
-}
-int nfs_sysfs_init(void)
-{
- nfs_client_kset = kset_create_and_add("nfs", NULL, fs_kobj);
- if (!nfs_client_kset)
- return -ENOMEM;
- nfs_client_kobj = nfs_netns_object_alloc("net", nfs_client_kset, NULL);
- if (!nfs_client_kobj) {
- kset_unregister(nfs_client_kset);
- nfs_client_kset = NULL;
- return -ENOMEM;
+ nfs_kset->kobj.parent = fs_kobj;
+ nfs_kset->kobj.ktype = &nfs_kset_type;
+ nfs_kset->kobj.kset = NULL;
+
+ ret = kset_register(nfs_kset);
+ if (ret) {
+ kfree(nfs_kset);
+ return ret;
}
+
return 0;
}
void nfs_sysfs_exit(void)
{
- kobject_put(nfs_client_kobj);
- kset_unregister(nfs_client_kset);
+ kset_unregister(nfs_kset);
}
static ssize_t nfs_netns_identifier_show(struct kobject *kobj,
@@ -82,7 +80,7 @@ static ssize_t nfs_netns_identifier_show(struct kobject *kobj,
ssize_t ret;
rcu_read_lock();
- ret = scnprintf(buf, PAGE_SIZE, "%s\n", rcu_dereference(c->identifier));
+ ret = sysfs_emit(buf, "%s\n", rcu_dereference(c->identifier));
rcu_read_unlock();
return ret;
}
@@ -127,10 +125,9 @@ static void nfs_netns_client_release(struct kobject *kobj)
kobject);
kfree(rcu_dereference_raw(c->identifier));
- kfree(c);
}
-static const void *nfs_netns_client_namespace(struct kobject *kobj)
+static const void *nfs_netns_client_namespace(const struct kobject *kobj)
{
return container_of(kobj, struct nfs_netns_client, kobject)->net;
}
@@ -142,14 +139,34 @@ static struct attribute *nfs_netns_client_attrs[] = {
&nfs_netns_client_id.attr,
NULL,
};
+ATTRIBUTE_GROUPS(nfs_netns_client);
static struct kobj_type nfs_netns_client_type = {
.release = nfs_netns_client_release,
- .default_attrs = nfs_netns_client_attrs,
+ .default_groups = nfs_netns_client_groups,
.sysfs_ops = &kobj_sysfs_ops,
.namespace = nfs_netns_client_namespace,
};
+static void nfs_netns_object_release(struct kobject *kobj)
+{
+ struct nfs_netns_client *c = container_of(kobj,
+ struct nfs_netns_client,
+ nfs_net_kobj);
+ kfree(c);
+}
+
+static const void *nfs_netns_namespace(const struct kobject *kobj)
+{
+ return container_of(kobj, struct nfs_netns_client, nfs_net_kobj)->net;
+}
+
+static struct kobj_type nfs_netns_object_type = {
+ .release = nfs_netns_object_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .namespace = nfs_netns_namespace,
+};
+
static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent,
struct net *net)
{
@@ -158,11 +175,21 @@ static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent,
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p) {
p->net = net;
- p->kobject.kset = nfs_client_kset;
+ p->kobject.kset = nfs_kset;
+ p->nfs_net_kobj.kset = nfs_kset;
+
+ if (kobject_init_and_add(&p->nfs_net_kobj, &nfs_netns_object_type,
+ parent, "net") != 0) {
+ kobject_put(&p->nfs_net_kobj);
+ return NULL;
+ }
+
if (kobject_init_and_add(&p->kobject, &nfs_netns_client_type,
- parent, "nfs_client") == 0)
+ &p->nfs_net_kobj, "nfs_client") == 0)
return p;
+
kobject_put(&p->kobject);
+ kobject_put(&p->nfs_net_kobj);
}
return NULL;
}
@@ -171,7 +198,7 @@ void nfs_netns_sysfs_setup(struct nfs_net *netns, struct net *net)
{
struct nfs_netns_client *clp;
- clp = nfs_netns_client_alloc(nfs_client_kobj, net);
+ clp = nfs_netns_client_alloc(&nfs_kset->kobj, net);
if (clp) {
netns->nfs_client = clp;
kobject_uevent(&clp->kobject, KOBJ_ADD);
@@ -186,6 +213,258 @@ void nfs_netns_sysfs_destroy(struct nfs_net *netns)
kobject_uevent(&clp->kobject, KOBJ_REMOVE);
kobject_del(&clp->kobject);
kobject_put(&clp->kobject);
+ kobject_del(&clp->nfs_net_kobj);
+ kobject_put(&clp->nfs_net_kobj);
netns->nfs_client = NULL;
}
}
+
+static bool shutdown_match_client(const struct rpc_task *task, const void *data)
+{
+ return true;
+}
+
+static void shutdown_client(struct rpc_clnt *clnt)
+{
+ clnt->cl_shutdown = 1;
+ rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL);
+}
+
+/*
+ * Shut down the nfs_client only once all the superblocks
+ * have been shut down.
+ */
+static void shutdown_nfs_client(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ if (!(server->flags & NFS_MOUNT_SHUTDOWN)) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+ nfs_mark_client_ready(clp, -EIO);
+ shutdown_client(clp->cl_rpcclient);
+}
+
+static ssize_t
+shutdown_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ bool shutdown = server->flags & NFS_MOUNT_SHUTDOWN;
+ return sysfs_emit(buf, "%d\n", shutdown);
+}
+
+static ssize_t
+shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nfs_server *server;
+ int ret, val;
+
+ server = container_of(kobj, struct nfs_server, kobj);
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val != 1)
+ return -EINVAL;
+
+ /* already shut down? */
+ if (server->flags & NFS_MOUNT_SHUTDOWN)
+ goto out;
+
+ server->flags |= NFS_MOUNT_SHUTDOWN;
+ shutdown_client(server->client);
+
+ if (!IS_ERR(server->client_acl))
+ shutdown_client(server->client_acl);
+
+ if (server->nlm_host)
+ shutdown_client(server->nlm_host->h_rpcclnt);
+out:
+ shutdown_nfs_client(server->nfs_client);
+ return count;
+}
+
+static struct kobj_attribute nfs_sysfs_attr_shutdown = __ATTR_RW(shutdown);
+
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+static ssize_t
+implid_domain_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid;
+
+ if (!impl_id || strlen(impl_id->domain) == 0)
+ return 0; //sysfs_emit(buf, "");
+ return sysfs_emit(buf, "%s\n", impl_id->domain);
+}
+
+static struct kobj_attribute nfs_sysfs_attr_implid_domain = __ATTR_RO(implid_domain);
+
+
+static ssize_t
+implid_name_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid;
+
+ if (!impl_id || strlen(impl_id->name) == 0)
+ return 0; //sysfs_emit(buf, "");
+ return sysfs_emit(buf, "%s\n", impl_id->name);
+}
+
+static struct kobj_attribute nfs_sysfs_attr_implid_name = __ATTR_RO(implid_name);
+
+#endif /* IS_ENABLED(CONFIG_NFS_V4_1) */
+
+#define RPC_CLIENT_NAME_SIZE 64
+
+void nfs_sysfs_link_rpc_client(struct nfs_server *server,
+ struct rpc_clnt *clnt, const char *uniq)
+{
+ char name[RPC_CLIENT_NAME_SIZE];
+ int ret;
+
+ strscpy(name, clnt->cl_program->name, sizeof(name));
+ strncat(name, uniq ? uniq : "", sizeof(name) - strlen(name) - 1);
+ strncat(name, "_client", sizeof(name) - strlen(name) - 1);
+
+ ret = sysfs_create_link_nowarn(&server->kobj,
+ &clnt->cl_sysfs->kobject, name);
+ if (ret < 0)
+ pr_warn("NFS: can't create link to %s in sysfs (%d)\n",
+ name, ret);
+}
+EXPORT_SYMBOL_GPL(nfs_sysfs_link_rpc_client);
+
+static void nfs_sysfs_sb_release(struct kobject *kobj)
+{
+ /* no-op: why? see lib/kobject.c kobject_cleanup() */
+}
+
+static const void *nfs_netns_server_namespace(const struct kobject *kobj)
+{
+ return container_of(kobj, struct nfs_server, kobj)->nfs_client->cl_net;
+}
+
+static struct kobj_type nfs_sb_ktype = {
+ .release = nfs_sysfs_sb_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .namespace = nfs_netns_server_namespace,
+ .child_ns_type = nfs_netns_object_child_ns_type,
+};
+
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+static void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
+{
+ int ret;
+
+ if (!server->nfs_client->cl_implid)
+ return;
+
+ ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_domain.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+
+ ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_name.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+}
+#else /* CONFIG_NFS_V4_1 */
+static inline void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+static ssize_t
+localio_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ bool localio = nfs_server_is_local(server->nfs_client);
+ return sysfs_emit(buf, "%d\n", localio);
+}
+
+static struct kobj_attribute nfs_sysfs_attr_localio = __ATTR_RO(localio);
+
+static void nfs_sysfs_add_nfs_localio_server(struct nfs_server *server)
+{
+ int ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_localio.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+}
+#else
+static inline void nfs_sysfs_add_nfs_localio_server(struct nfs_server *server)
+{
+}
+#endif /* IS_ENABLED(CONFIG_NFS_LOCALIO) */
+
+void nfs_sysfs_add_server(struct nfs_server *server)
+{
+ int ret;
+
+ ret = kobject_init_and_add(&server->kobj, &nfs_sb_ktype,
+ &nfs_kset->kobj, "server-%d", server->s_sysfs_id);
+ if (ret < 0) {
+ pr_warn("NFS: nfs sysfs add server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+ return;
+ }
+ ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_shutdown.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+
+ nfs_sysfs_add_nfsv41_server(server);
+ nfs_sysfs_add_nfs_localio_server(server);
+}
+EXPORT_SYMBOL_GPL(nfs_sysfs_add_server);
+
+void nfs_sysfs_move_server_to_sb(struct super_block *s)
+{
+ struct nfs_server *server = s->s_fs_info;
+ int ret;
+
+ ret = kobject_rename(&server->kobj, s->s_id);
+ if (ret < 0)
+ pr_warn("NFS: rename sysfs %s failed (%d)\n",
+ server->kobj.name, ret);
+}
+
+void nfs_sysfs_move_sb_to_server(struct nfs_server *server)
+{
+ const char *s;
+ int ret = -ENOMEM;
+
+ s = kasprintf(GFP_KERNEL, "server-%d", server->s_sysfs_id);
+ if (s) {
+ ret = kobject_rename(&server->kobj, s);
+ kfree(s);
+ }
+ if (ret < 0)
+ pr_warn("NFS: rename sysfs %s failed (%d)\n",
+ server->kobj.name, ret);
+}
+
+/* unlink, not dec-ref */
+void nfs_sysfs_remove_server(struct nfs_server *server)
+{
+ kobject_del(&server->kobj);
+}
diff --git a/fs/nfs/sysfs.h b/fs/nfs/sysfs.h
index 5501ef573c32..c5d1990cade5 100644
--- a/fs/nfs/sysfs.h
+++ b/fs/nfs/sysfs.h
@@ -10,11 +10,12 @@
struct nfs_netns_client {
struct kobject kobject;
+ struct kobject nfs_net_kobj;
struct net *net;
const char __rcu *identifier;
};
-extern struct kobject *nfs_client_kobj;
+extern struct kobject *nfs_net_kobj;
extern int nfs_sysfs_init(void);
extern void nfs_sysfs_exit(void);
@@ -22,4 +23,11 @@ extern void nfs_sysfs_exit(void);
void nfs_netns_sysfs_setup(struct nfs_net *netns, struct net *net);
void nfs_netns_sysfs_destroy(struct nfs_net *netns);
+void nfs_sysfs_link_rpc_client(struct nfs_server *server,
+ struct rpc_clnt *clnt, const char *sysfs_prefix);
+void nfs_sysfs_add_server(struct nfs_server *s);
+void nfs_sysfs_move_server_to_sb(struct super_block *s);
+void nfs_sysfs_move_sb_to_server(struct nfs_server *s);
+void nfs_sysfs_remove_server(struct nfs_server *s);
+
#endif
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 5fa11e1aca4c..b55467911648 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -102,6 +102,10 @@ static void nfs_do_call_unlink(struct inode *inode, struct nfs_unlinkdata *data)
};
struct rpc_task *task;
struct inode *dir = d_inode(data->dentry->d_parent);
+
+ if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
nfs_sb_active(dir->i_sb);
data->args.fh = NFS_FH(dir);
nfs_fattr_init(data->res.dir_attr);
@@ -135,6 +139,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct inode *inode, struct nf
*/
spin_lock(&alias->d_lock);
if (d_really_is_positive(alias) &&
+ !nfs_compare_fh(NFS_FH(inode), NFS_FH(d_inode(alias))) &&
!(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
devname_garbage = alias->d_fsdata;
alias->d_fsdata = data;
@@ -227,6 +232,8 @@ nfs_complete_unlink(struct dentry *dentry, struct inode *inode)
dentry->d_fsdata = NULL;
spin_unlock(&dentry->d_lock);
+ NFS_PROTO(inode)->return_delegation(inode);
+
if (NFS_STALE(inode) || !nfs_call_unlink(dentry, inode, data))
nfs_free_unlinkdata(data);
}
@@ -262,7 +269,7 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
struct inode *new_dir = data->new_dir;
struct dentry *old_dentry = data->old_dentry;
- trace_nfs_sillyrename_rename(old_dir, old_dentry,
+ trace_nfs_async_rename_done(old_dir, old_dentry,
new_dir, data->new_dentry, task->tk_status);
if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
rpc_restart_call_prepare(task);
@@ -344,9 +351,14 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
};
+ if (nfs_server_capable(old_dir, NFS_CAP_MOVEABLE) &&
+ nfs_server_capable(new_dir, NFS_CAP_MOVEABLE))
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL)
return ERR_PTR(-ENOMEM);
+ task_setup_data.task = &data->task;
task_setup_data.callback_data = data;
data->cred = get_current_cred();
@@ -452,18 +464,17 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
sdentry = NULL;
do {
- int slen;
dput(sdentry);
sillycounter++;
- slen = scnprintf(silly, sizeof(silly),
- SILLYNAME_PREFIX "%0*llx%0*x",
- SILLYNAME_FILEID_LEN, fileid,
- SILLYNAME_COUNTER_LEN, sillycounter);
+ scnprintf(silly, sizeof(silly),
+ SILLYNAME_PREFIX "%0*llx%0*x",
+ SILLYNAME_FILEID_LEN, fileid,
+ SILLYNAME_COUNTER_LEN, sillycounter);
dfprintk(VFS, "NFS: trying to rename %pd to %s\n",
dentry, silly);
- sdentry = lookup_one_len(silly, dentry->d_parent, slen);
+ sdentry = lookup_noperm(&QSTR(silly), dentry->d_parent);
/*
* N.B. Better to return EBUSY here ... it could be
* dangerous to delete the file while it's in use.
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 9b7619ce17a7..336c510f3750 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -25,6 +25,7 @@
#include <linux/freezer.h>
#include <linux/wait.h>
#include <linux/iversion.h>
+#include <linux/filelock.h>
#include <linux/uaccess.h>
#include <linux/sched/mm.h>
@@ -58,39 +59,27 @@ static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
static const struct nfs_rw_ops nfs_rw_write_ops;
static void nfs_inode_remove_request(struct nfs_page *req);
-static void nfs_clear_request_commit(struct nfs_page *req);
+static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
+ struct nfs_page *req);
static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
struct inode *inode);
-static struct nfs_page *
-nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
- struct page *page);
static struct kmem_cache *nfs_wdata_cachep;
static mempool_t *nfs_wdata_mempool;
static struct kmem_cache *nfs_cdata_cachep;
static mempool_t *nfs_commit_mempool;
-struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail)
+struct nfs_commit_data *nfs_commitdata_alloc(void)
{
struct nfs_commit_data *p;
- if (never_fail)
- p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
- else {
- /* It is OK to do some reclaim, not no safe to wait
- * for anything to be returned to the pool.
- * mempool_alloc() cannot handle that particular combination,
- * so we need two separate attempts.
- */
+ p = kmem_cache_zalloc(nfs_cdata_cachep, nfs_io_gfp_mask());
+ if (!p) {
p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT);
if (!p)
- p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO |
- __GFP_NOWARN | __GFP_NORETRY);
- if (!p)
return NULL;
+ memset(p, 0, sizeof(*p));
}
-
- memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
return p;
}
@@ -104,9 +93,15 @@ EXPORT_SYMBOL_GPL(nfs_commit_free);
static struct nfs_pgio_header *nfs_writehdr_alloc(void)
{
- struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL);
+ struct nfs_pgio_header *p;
- memset(p, 0, sizeof(*p));
+ p = kmem_cache_zalloc(nfs_wdata_cachep, nfs_io_gfp_mask());
+ if (!p) {
+ p = mempool_alloc(nfs_wdata_mempool, GFP_NOWAIT);
+ if (!p)
+ return NULL;
+ memset(p, 0, sizeof(*p));
+ }
p->rw_mode = FMODE_WRITE;
return p;
}
@@ -158,134 +153,51 @@ nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
}
}
-static int
-nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
+static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
{
- int ret;
-
- if (!test_bit(PG_REMOVE, &req->wb_flags))
- return 0;
- ret = nfs_page_group_lock(req);
- if (ret)
- return ret;
if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
nfs_page_set_inode_ref(req, inode);
- nfs_page_group_unlock(req);
- return 0;
-}
-
-static struct nfs_page *
-nfs_page_private_request(struct page *page)
-{
- if (!PagePrivate(page))
- return NULL;
- return (struct nfs_page *)page_private(page);
}
-/*
- * nfs_page_find_head_request_locked - find head request associated with @page
+/**
+ * nfs_folio_find_head_request - find head request associated with a folio
+ * @folio: pointer to folio
*
* must be called while holding the inode lock.
*
* returns matching head request with reference held, or NULL if not found.
*/
-static struct nfs_page *
-nfs_page_find_private_request(struct page *page)
+static struct nfs_page *nfs_folio_find_head_request(struct folio *folio)
{
- struct address_space *mapping = page_file_mapping(page);
+ struct address_space *mapping = folio->mapping;
struct nfs_page *req;
- if (!PagePrivate(page))
+ if (!folio_test_private(folio))
return NULL;
- spin_lock(&mapping->private_lock);
- req = nfs_page_private_request(page);
+ spin_lock(&mapping->i_private_lock);
+ req = folio->private;
if (req) {
WARN_ON_ONCE(req->wb_head != req);
kref_get(&req->wb_kref);
}
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return req;
}
-static struct nfs_page *
-nfs_page_find_swap_request(struct page *page)
-{
- struct inode *inode = page_file_mapping(page)->host;
- struct nfs_inode *nfsi = NFS_I(inode);
- struct nfs_page *req = NULL;
- if (!PageSwapCache(page))
- return NULL;
- mutex_lock(&nfsi->commit_mutex);
- if (PageSwapCache(page)) {
- req = nfs_page_search_commits_for_head_request_locked(nfsi,
- page);
- if (req) {
- WARN_ON_ONCE(req->wb_head != req);
- kref_get(&req->wb_kref);
- }
- }
- mutex_unlock(&nfsi->commit_mutex);
- return req;
-}
-
-/*
- * nfs_page_find_head_request - find head request associated with @page
- *
- * returns matching head request with reference held, or NULL if not found.
- */
-static struct nfs_page *nfs_page_find_head_request(struct page *page)
-{
- struct nfs_page *req;
-
- req = nfs_page_find_private_request(page);
- if (!req)
- req = nfs_page_find_swap_request(page);
- return req;
-}
-
-static struct nfs_page *nfs_find_and_lock_page_request(struct page *page)
-{
- struct inode *inode = page_file_mapping(page)->host;
- struct nfs_page *req, *head;
- int ret;
-
- for (;;) {
- req = nfs_page_find_head_request(page);
- if (!req)
- return req;
- head = nfs_page_group_lock_head(req);
- if (head != req)
- nfs_release_request(req);
- if (IS_ERR(head))
- return head;
- ret = nfs_cancel_remove_inode(head, inode);
- if (ret < 0) {
- nfs_unlock_and_release_request(head);
- return ERR_PTR(ret);
- }
- /* Ensure that nobody removed the request before we locked it */
- if (head == nfs_page_private_request(page))
- break;
- if (PageSwapCache(page))
- break;
- nfs_unlock_and_release_request(head);
- }
- return head;
-}
-
/* Adjust the file length if we're writing beyond the end */
-static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
+static void nfs_grow_file(struct folio *folio, unsigned int offset,
+ unsigned int count)
{
- struct inode *inode = page_file_mapping(page)->host;
+ struct inode *inode = folio->mapping->host;
loff_t end, i_size;
pgoff_t end_index;
spin_lock(&inode->i_lock);
i_size = i_size_read(inode);
- end_index = (i_size - 1) >> PAGE_SHIFT;
- if (i_size > 0 && page_index(page) < end_index)
+ end_index = ((i_size - 1) >> folio_shift(folio)) << folio_order(folio);
+ if (i_size > 0 && folio->index < end_index)
goto out;
- end = page_file_offset(page) + ((loff_t)offset+count);
+ end = folio_pos(folio) + (loff_t)offset + (loff_t)count;
if (i_size >= end)
goto out;
trace_nfs_size_grow(inode, end);
@@ -293,7 +205,10 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
out:
+ /* Atomically update timestamps if they are delegated to us. */
+ nfs_update_delegated_mtime_locked(inode);
spin_unlock(&inode->i_lock);
+ nfs_fscache_invalidate(inode, 0);
}
/* A writeback failed: mark the page as bad, and invalidate the page cache */
@@ -305,74 +220,34 @@ static void nfs_set_pageerror(struct address_space *mapping)
/* Force file size revalidation */
spin_lock(&inode->i_lock);
nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED |
- NFS_INO_REVAL_PAGECACHE |
+ NFS_INO_INVALID_CHANGE |
NFS_INO_INVALID_SIZE);
spin_unlock(&inode->i_lock);
}
-static void nfs_mapping_set_error(struct page *page, int error)
+static void nfs_mapping_set_error(struct folio *folio, int error)
{
- struct address_space *mapping = page_file_mapping(page);
+ struct address_space *mapping = folio->mapping;
- SetPageError(page);
- mapping_set_error(mapping, error);
+ filemap_set_wb_err(mapping, error);
+ if (mapping->host)
+ errseq_set(&mapping->host->i_sb->s_wb_err,
+ error == -ENOSPC ? -ENOSPC : -EIO);
nfs_set_pageerror(mapping);
}
/*
- * nfs_page_group_search_locked
- * @head - head request of page group
- * @page_offset - offset into page
- *
- * Search page group with head @head to find a request that contains the
- * page offset @page_offset.
- *
- * Returns a pointer to the first matching nfs request, or NULL if no
- * match is found.
- *
- * Must be called with the page group lock held
- */
-static struct nfs_page *
-nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
-{
- struct nfs_page *req;
-
- req = head;
- do {
- if (page_offset >= req->wb_pgbase &&
- page_offset < (req->wb_pgbase + req->wb_bytes))
- return req;
-
- req = req->wb_this_page;
- } while (req != head);
-
- return NULL;
-}
-
-/*
- * nfs_page_group_covers_page
- * @head - head request of page group
+ * nfs_page_covers_folio
+ * @req: struct nfs_page
*
- * Return true if the page group with head @head covers the whole page,
- * returns false otherwise
+ * Return true if the request covers the whole folio.
+ * Note that the caller should ensure all subrequests have been joined
*/
static bool nfs_page_group_covers_page(struct nfs_page *req)
{
- struct nfs_page *tmp;
- unsigned int pos = 0;
- unsigned int len = nfs_page_length(req->wb_page);
-
- nfs_page_group_lock(req);
+ unsigned int len = nfs_folio_length(nfs_page_to_folio(req));
- for (;;) {
- tmp = nfs_page_group_search_locked(req->wb_head, pos);
- if (!tmp)
- break;
- pos = tmp->wb_pgbase + tmp->wb_bytes;
- }
-
- nfs_page_group_unlock(req);
- return pos >= len;
+ return req->wb_pgbase == 0 && req->wb_bytes == len;
}
/* We can set the PG_uptodate flag if we see that a write request
@@ -380,11 +255,13 @@ static bool nfs_page_group_covers_page(struct nfs_page *req)
*/
static void nfs_mark_uptodate(struct nfs_page *req)
{
- if (PageUptodate(req->wb_page))
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio_test_uptodate(folio))
return;
if (!nfs_page_group_covers_page(req))
return;
- SetPageUptodate(req->wb_page);
+ folio_mark_uptodate(folio);
}
static int wb_priority(struct writeback_control *wbc)
@@ -406,33 +283,34 @@ int nfs_congestion_kb;
#define NFS_CONGESTION_OFF_THRESH \
(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
-static void nfs_set_page_writeback(struct page *page)
+static void nfs_folio_set_writeback(struct folio *folio)
{
- struct inode *inode = page_file_mapping(page)->host;
- struct nfs_server *nfss = NFS_SERVER(inode);
- int ret = test_set_page_writeback(page);
-
- WARN_ON_ONCE(ret != 0);
+ struct nfs_server *nfss = NFS_SERVER(folio->mapping->host);
- if (atomic_long_inc_return(&nfss->writeback) >
- NFS_CONGESTION_ON_THRESH)
- set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
+ folio_start_writeback(folio);
+ if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH)
+ nfss->write_congested = 1;
}
-static void nfs_end_page_writeback(struct nfs_page *req)
+static void nfs_folio_end_writeback(struct folio *folio)
{
- struct inode *inode = page_file_mapping(req->wb_page)->host;
- struct nfs_server *nfss = NFS_SERVER(inode);
- bool is_done;
+ struct nfs_server *nfss = NFS_SERVER(folio->mapping->host);
- is_done = nfs_page_group_sync_on_bit(req, PG_WB_END);
- nfs_unlock_request(req);
- if (!is_done)
- return;
+ folio_end_writeback_no_dropbehind(folio);
+ if (atomic_long_dec_return(&nfss->writeback) <
+ NFS_CONGESTION_OFF_THRESH) {
+ nfss->write_congested = 0;
+ wake_up_all(&nfss->write_congestion_wait);
+ }
+}
- end_page_writeback(req->wb_page);
- if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
- clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
+static void nfs_page_end_writeback(struct nfs_page *req)
+{
+ if (nfs_page_group_sync_on_bit(req, PG_WB_END)) {
+ nfs_unlock_request(req);
+ nfs_folio_end_writeback(nfs_page_to_folio(req));
+ } else
+ nfs_unlock_request(req);
}
/*
@@ -502,8 +380,8 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
* the (former) group. All subrequests are removed from any write or commit
* lists, unlinked from the group and destroyed.
*/
-void
-nfs_join_page_group(struct nfs_page *head, struct inode *inode)
+void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo,
+ struct inode *inode)
{
struct nfs_page *subreq;
struct nfs_page *destroy_list = NULL;
@@ -533,7 +411,7 @@ nfs_join_page_group(struct nfs_page *head, struct inode *inode)
* Commit list removal accounting is done after locks are dropped */
subreq = head;
do {
- nfs_clear_request_commit(subreq);
+ nfs_clear_request_commit(cinfo, subreq);
subreq = subreq->wb_this_page;
} while (subreq != head);
@@ -547,9 +425,77 @@ nfs_join_page_group(struct nfs_page *head, struct inode *inode)
nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
}
+/**
+ * nfs_wait_on_request - Wait for a request to complete.
+ * @req: request to wait upon.
+ *
+ * Interruptible by fatal signals only.
+ * The user is responsible for holding a count on the request.
+ */
+static int nfs_wait_on_request(struct nfs_page *req)
+{
+ if (!test_bit(PG_BUSY, &req->wb_flags))
+ return 0;
+ set_bit(PG_CONTENDED2, &req->wb_flags);
+ smp_mb__after_atomic();
+ return wait_on_bit_io(&req->wb_flags, PG_BUSY,
+ TASK_UNINTERRUPTIBLE);
+}
+
+/*
+ * nfs_unroll_locks - unlock all newly locked reqs and wait on @req
+ * @head: head request of page group, must be holding head lock
+ * @req: request that couldn't lock and needs to wait on the req bit lock
+ *
+ * This is a helper function for nfs_lock_and_join_requests
+ * returns 0 on success, < 0 on error.
+ */
+static void
+nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
+{
+ struct nfs_page *tmp;
+
+ /* relinquish all the locks successfully grabbed this run */
+ for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
+ if (!kref_read(&tmp->wb_kref))
+ continue;
+ nfs_unlock_and_release_request(tmp);
+ }
+}
+
+/*
+ * nfs_page_group_lock_subreq - try to lock a subrequest
+ * @head: head request of page group
+ * @subreq: request to lock
+ *
+ * This is a helper function for nfs_lock_and_join_requests which
+ * must be called with the head request and page group both locked.
+ * On error, it returns with the page group unlocked.
+ */
+static int
+nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
+{
+ int ret;
+
+ if (!kref_get_unless_zero(&subreq->wb_kref))
+ return 0;
+ while (!nfs_lock_request(subreq)) {
+ nfs_page_group_unlock(head);
+ ret = nfs_wait_on_request(subreq);
+ if (!ret)
+ ret = nfs_page_group_lock(head);
+ if (ret < 0) {
+ nfs_unroll_locks(head, subreq);
+ nfs_release_request(subreq);
+ return ret;
+ }
+ }
+ return 0;
+}
+
/*
* nfs_lock_and_join_requests - join all subreqs to the head req
- * @page: the page used to lookup the "page group" of nfs_page structures
+ * @folio: the folio used to lookup the "page group" of nfs_page structures
*
* This function joins all sub requests to the head request by first
* locking all requests in the group, cancelling any pending operations
@@ -559,14 +505,14 @@ nfs_join_page_group(struct nfs_page *head, struct inode *inode)
*
* Returns a locked, referenced pointer to the head request - which after
* this call is guaranteed to be the only request associated with the page.
- * Returns NULL if no requests are found for @page, or a ERR_PTR if an
+ * Returns NULL if no requests are found for @folio, or a ERR_PTR if an
* error was encountered.
*/
-static struct nfs_page *
-nfs_lock_and_join_requests(struct page *page)
+static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
{
- struct inode *inode = page_file_mapping(page)->host;
- struct nfs_page *head;
+ struct inode *inode = folio->mapping->host;
+ struct nfs_page *head, *subreq;
+ struct nfs_commit_info cinfo;
int ret;
/*
@@ -574,28 +520,58 @@ nfs_lock_and_join_requests(struct page *page)
* reference to the whole page group - the group will not be destroyed
* until the head reference is released.
*/
- head = nfs_find_and_lock_page_request(page);
- if (IS_ERR_OR_NULL(head))
- return head;
+retry:
+ head = nfs_folio_find_head_request(folio);
+ if (!head)
+ return NULL;
- /* lock each request in the page group */
- ret = nfs_page_group_lock_subrequests(head);
- if (ret < 0) {
+ while (!nfs_lock_request(head)) {
+ ret = nfs_wait_on_request(head);
+ if (ret < 0) {
+ nfs_release_request(head);
+ return ERR_PTR(ret);
+ }
+ }
+
+ ret = nfs_page_group_lock(head);
+ if (ret < 0)
+ goto out_unlock;
+
+ /* Ensure that nobody removed the request before we locked it */
+ if (head != folio->private) {
+ nfs_page_group_unlock(head);
nfs_unlock_and_release_request(head);
- return ERR_PTR(ret);
+ goto retry;
+ }
+
+ nfs_cancel_remove_inode(head, inode);
+
+ /* lock each request in the page group */
+ for (subreq = head->wb_this_page;
+ subreq != head;
+ subreq = subreq->wb_this_page) {
+ ret = nfs_page_group_lock_subreq(head, subreq);
+ if (ret < 0)
+ goto out_unlock;
}
- nfs_join_page_group(head, inode);
+ nfs_page_group_unlock(head);
+ nfs_init_cinfo_from_inode(&cinfo, inode);
+ nfs_join_page_group(head, &cinfo, inode);
return head;
+
+out_unlock:
+ nfs_unlock_and_release_request(head);
+ return ERR_PTR(ret);
}
static void nfs_write_error(struct nfs_page *req, int error)
{
- trace_nfs_write_error(req, error);
- nfs_mapping_set_error(req->wb_page, error);
+ trace_nfs_write_error(nfs_page_to_inode(req), req, error);
+ nfs_mapping_set_error(nfs_page_to_folio(req), error);
nfs_inode_remove_request(req);
- nfs_end_page_writeback(req);
+ nfs_page_end_writeback(req);
nfs_release_request(req);
}
@@ -603,20 +579,22 @@ static void nfs_write_error(struct nfs_page *req, int error)
* Find an associated nfs write request, and prepare to flush it out
* May return an error if the user signalled nfs_wait_on_request().
*/
-static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
- struct page *page)
+static int nfs_do_writepage(struct folio *folio, struct writeback_control *wbc,
+ struct nfs_pageio_descriptor *pgio)
{
struct nfs_page *req;
- int ret = 0;
+ int ret;
- req = nfs_lock_and_join_requests(page);
+ nfs_pageio_cond_complete(pgio, folio->index);
+
+ req = nfs_lock_and_join_requests(folio);
if (!req)
- goto out;
- ret = PTR_ERR(req);
+ return 0;
if (IS_ERR(req))
- goto out;
+ return PTR_ERR(req);
- nfs_set_page_writeback(page);
+ trace_nfs_do_writepage(req);
+ nfs_folio_set_writeback(folio);
WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
/* If there is a fatal error that covers this write, just exit */
@@ -624,84 +602,44 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
if (nfs_error_is_fatal_on_server(ret))
goto out_launder;
- ret = 0;
if (!nfs_pageio_add_request(pgio, req)) {
ret = pgio->pg_error;
/*
* Remove the problematic req upon fatal errors on the server
*/
- if (nfs_error_is_fatal(ret)) {
- if (nfs_error_is_fatal_on_server(ret))
- goto out_launder;
- } else
- ret = -EAGAIN;
+ if (nfs_error_is_fatal_on_server(ret))
+ goto out_launder;
+ folio_redirty_for_writepage(wbc, folio);
nfs_redirty_request(req);
pgio->pg_error = 0;
- } else
- nfs_add_stats(page_file_mapping(page)->host,
- NFSIOS_WRITEPAGES, 1);
-out:
- return ret;
+ return ret;
+ }
+
+ nfs_add_stats(folio->mapping->host, NFSIOS_WRITEPAGES, 1);
+ return 0;
+
out_launder:
nfs_write_error(req, ret);
return 0;
}
-static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
- struct nfs_pageio_descriptor *pgio)
-{
- int ret;
-
- nfs_pageio_cond_complete(pgio, page_index(page));
- ret = nfs_page_async_flush(pgio, page);
- if (ret == -EAGAIN) {
- redirty_page_for_writepage(wbc, page);
- ret = AOP_WRITEPAGE_ACTIVATE;
- }
- return ret;
-}
-
/*
* Write an mmapped page to the server.
*/
-static int nfs_writepage_locked(struct page *page,
+static int nfs_writepage_locked(struct folio *folio,
struct writeback_control *wbc)
{
struct nfs_pageio_descriptor pgio;
- struct inode *inode = page_file_mapping(page)->host;
+ struct inode *inode = folio->mapping->host;
int err;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
- nfs_pageio_init_write(&pgio, inode, 0,
- false, &nfs_async_write_completion_ops);
- err = nfs_do_writepage(page, wbc, &pgio);
+ nfs_pageio_init_write(&pgio, inode, 0, false,
+ &nfs_async_write_completion_ops);
+ err = nfs_do_writepage(folio, wbc, &pgio);
pgio.pg_error = 0;
nfs_pageio_complete(&pgio);
- if (err < 0)
- return err;
- if (nfs_error_is_fatal(pgio.pg_error))
- return pgio.pg_error;
- return 0;
-}
-
-int nfs_writepage(struct page *page, struct writeback_control *wbc)
-{
- int ret;
-
- ret = nfs_writepage_locked(page, wbc);
- if (ret != AOP_WRITEPAGE_ACTIVATE)
- unlock_page(page);
- return ret;
-}
-
-static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
-{
- int ret;
-
- ret = nfs_do_writepage(page, wbc, data);
- if (ret != AOP_WRITEPAGE_ACTIVATE)
- unlock_page(page);
- return ret;
+ return err;
}
static void nfs_io_completion_commit(void *inode)
@@ -715,13 +653,24 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
struct nfs_pageio_descriptor pgio;
struct nfs_io_completion *ioc = NULL;
unsigned int mntflags = NFS_SERVER(inode)->flags;
+ struct nfs_server *nfss = NFS_SERVER(inode);
int priority = 0;
int err;
+ trace_nfs_writepages(inode, wbc->range_start, wbc->range_end - wbc->range_start);
+
+ /* Wait with writeback until write congestion eases */
+ if (wbc->sync_mode == WB_SYNC_NONE && nfss->write_congested) {
+ err = wait_event_killable(nfss->write_congestion_wait,
+ nfss->write_congested == 0);
+ if (err)
+ goto out_err;
+ }
+
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate ||
- wbc->for_background || wbc->for_sync || wbc->for_reclaim) {
+ wbc->for_background || wbc->for_sync) {
ioc = nfs_io_completion_alloc(GFP_KERNEL);
if (ioc)
nfs_io_completion_init(ioc, nfs_io_completion_commit,
@@ -729,48 +678,48 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
priority = wb_priority(wbc);
}
- nfs_pageio_init_write(&pgio, inode, priority, false,
- &nfs_async_write_completion_ops);
- pgio.pg_io_completion = ioc;
- err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
- pgio.pg_error = 0;
- nfs_pageio_complete(&pgio);
+ do {
+ struct folio *folio = NULL;
+
+ nfs_pageio_init_write(&pgio, inode, priority, false,
+ &nfs_async_write_completion_ops);
+ pgio.pg_io_completion = ioc;
+ while ((folio = writeback_iter(mapping, wbc, folio, &err))) {
+ err = nfs_do_writepage(folio, wbc, &pgio);
+ folio_unlock(folio);
+ }
+ pgio.pg_error = 0;
+ nfs_pageio_complete(&pgio);
+ if (err == -EAGAIN && mntflags & NFS_MOUNT_SOFTERR)
+ break;
+ } while (err < 0 && !nfs_error_is_fatal(err));
nfs_io_completion_put(ioc);
- if (err < 0)
- goto out_err;
- err = pgio.pg_error;
- if (nfs_error_is_fatal(err))
- goto out_err;
- return 0;
+ if (err > 0)
+ err = 0;
out_err:
+ trace_nfs_writepages_done(inode, wbc->range_start, wbc->range_end - wbc->range_start, err);
return err;
}
/*
* Insert a write request into an inode
*/
-static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
+static void nfs_inode_add_request(struct nfs_page *req)
{
- struct address_space *mapping = page_file_mapping(req->wb_page);
- struct nfs_inode *nfsi = NFS_I(inode);
+ struct folio *folio = nfs_page_to_folio(req);
+ struct address_space *mapping = folio->mapping;
+ struct nfs_inode *nfsi = NFS_I(mapping->host);
WARN_ON_ONCE(req->wb_this_page != req);
/* Lock the request! */
nfs_lock_request(req);
-
- /*
- * Swap-space should not get truncated. Hence no need to plug the race
- * with invalidate/truncate.
- */
- spin_lock(&mapping->private_lock);
- if (likely(!PageSwapCache(req->wb_page))) {
- set_bit(PG_MAPPED, &req->wb_flags);
- SetPagePrivate(req->wb_page);
- set_page_private(req->wb_page, (unsigned long)req);
- }
- spin_unlock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
+ set_bit(PG_MAPPED, &req->wb_flags);
+ folio_set_private(folio);
+ folio->private = req;
+ spin_unlock(&mapping->i_private_lock);
atomic_long_inc(&nfsi->nrequests);
/* this a head request for a page group - mark it as having an
* extra reference so sub groups can follow suit.
@@ -785,66 +734,36 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
*/
static void nfs_inode_remove_request(struct nfs_page *req)
{
- struct address_space *mapping = page_file_mapping(req->wb_page);
- struct inode *inode = mapping->host;
- struct nfs_inode *nfsi = NFS_I(inode);
- struct nfs_page *head;
+ struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
- if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
- head = req->wb_head;
-
- spin_lock(&mapping->private_lock);
- if (likely(head->wb_page && !PageSwapCache(head->wb_page))) {
- set_page_private(head->wb_page, 0);
- ClearPagePrivate(head->wb_page);
- clear_bit(PG_MAPPED, &head->wb_flags);
+ nfs_page_group_lock(req);
+ if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) {
+ struct folio *folio = nfs_page_to_folio(req->wb_head);
+ struct address_space *mapping = folio->mapping;
+
+ spin_lock(&mapping->i_private_lock);
+ if (likely(folio)) {
+ folio->private = NULL;
+ folio_clear_private(folio);
+ clear_bit(PG_MAPPED, &req->wb_head->wb_flags);
}
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
+
+ folio_end_dropbehind(folio);
}
+ nfs_page_group_unlock(req);
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
- nfs_release_request(req);
atomic_long_dec(&nfsi->nrequests);
+ nfs_release_request(req);
}
}
-static void
-nfs_mark_request_dirty(struct nfs_page *req)
-{
- if (req->wb_page)
- __set_page_dirty_nobuffers(req->wb_page);
-}
-
-/*
- * nfs_page_search_commits_for_head_request_locked
- *
- * Search through commit lists on @inode for the head request for @page.
- * Must be called while holding the inode (which is cinfo) lock.
- *
- * Returns the head request if found, or NULL if not found.
- */
-static struct nfs_page *
-nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
- struct page *page)
+static void nfs_mark_request_dirty(struct nfs_page *req)
{
- struct nfs_page *freq, *t;
- struct nfs_commit_info cinfo;
- struct inode *inode = &nfsi->vfs_inode;
-
- nfs_init_cinfo_from_inode(&cinfo, inode);
-
- /* search through pnfs commit lists */
- freq = pnfs_search_commit_reqs(inode, &cinfo, page);
- if (freq)
- return freq->wb_head;
-
- /* Linearly search the commit list for the correct request */
- list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
- if (freq->wb_page == page)
- return freq->wb_head;
- }
-
- return NULL;
+ struct folio *folio = nfs_page_to_folio(req);
+ if (folio)
+ filemap_dirty_folio(folio_mapping(folio), folio);
}
/**
@@ -888,8 +807,7 @@ nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
- if (req->wb_page)
- nfs_mark_page_unstable(req->wb_page, cinfo);
+ nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo);
}
EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
@@ -948,30 +866,31 @@ nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
nfs_request_add_commit_list(req, cinfo);
}
-static void
-nfs_clear_page_commit(struct page *page)
+static void nfs_folio_clear_commit(struct folio *folio)
{
- dec_node_page_state(page, NR_WRITEBACK);
- dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
- WB_WRITEBACK);
+ if (folio) {
+ long nr = folio_nr_pages(folio);
+
+ node_stat_mod_folio(folio, NR_WRITEBACK, -nr);
+ wb_stat_mod(&inode_to_bdi(folio->mapping->host)->wb,
+ WB_WRITEBACK, -nr);
+ }
}
/* Called holding the request lock on @req */
-static void
-nfs_clear_request_commit(struct nfs_page *req)
+static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
+ struct nfs_page *req)
{
if (test_bit(PG_CLEAN, &req->wb_flags)) {
struct nfs_open_context *ctx = nfs_req_openctx(req);
struct inode *inode = d_inode(ctx->dentry);
- struct nfs_commit_info cinfo;
- nfs_init_cinfo_from_inode(&cinfo, inode);
mutex_lock(&NFS_I(inode)->commit_mutex);
- if (!pnfs_clear_request_commit(req, &cinfo)) {
- nfs_request_remove_commit_list(req, &cinfo);
+ if (!pnfs_clear_request_commit(req, cinfo)) {
+ nfs_request_remove_commit_list(req, cinfo);
}
mutex_unlock(&NFS_I(inode)->commit_mutex);
- nfs_clear_page_commit(req->wb_page);
+ nfs_folio_clear_commit(nfs_page_to_folio(req));
}
}
@@ -1002,8 +921,9 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
nfs_list_remove_request(req);
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
(hdr->good_bytes < bytes)) {
- trace_nfs_comp_error(req, hdr->error);
- nfs_mapping_set_error(req->wb_page, hdr->error);
+ trace_nfs_comp_error(hdr->inode, req, hdr->error);
+ nfs_mapping_set_error(nfs_page_to_folio(req),
+ hdr->error);
goto remove_req;
}
if (nfs_write_need_commit(hdr)) {
@@ -1011,13 +931,13 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
req->wb_nio = 0;
memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
nfs_mark_request_commit(req, hdr->lseg, &cinfo,
- hdr->pgio_mirror_idx);
+ hdr->ds_commit_idx);
goto next;
}
remove_req:
nfs_inode_remove_request(req);
next:
- nfs_end_page_writeback(req);
+ nfs_page_end_writeback(req);
nfs_release_request(req);
}
out:
@@ -1093,21 +1013,21 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst,
* If the attempt fails, then the existing request is flushed out
* to disk.
*/
-static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
- struct page *page,
- unsigned int offset,
- unsigned int bytes)
+static struct nfs_page *nfs_try_to_update_request(struct folio *folio,
+ unsigned int offset,
+ unsigned int bytes)
{
struct nfs_page *req;
unsigned int rqend;
unsigned int end;
int error;
+ trace_nfs_try_to_update_request(folio_inode(folio), offset, bytes);
end = offset + bytes;
- req = nfs_lock_and_join_requests(page);
+ req = nfs_lock_and_join_requests(folio);
if (IS_ERR_OR_NULL(req))
- return req;
+ goto out;
rqend = req->wb_offset + req->wb_bytes;
/*
@@ -1129,6 +1049,9 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
else
req->wb_bytes = rqend - req->wb_offset;
req->wb_nio = 0;
+out:
+ trace_nfs_try_to_update_request_done(folio_inode(folio), offset, bytes,
+ PTR_ERR_OR_ZERO(req));
return req;
out_flushme:
/*
@@ -1138,7 +1061,8 @@ out_flushme:
*/
nfs_mark_request_dirty(req);
nfs_unlock_and_release_request(req);
- error = nfs_wb_page(inode, page);
+ error = nfs_wb_folio(folio->mapping->host, folio);
+ trace_nfs_try_to_update_request_done(folio_inode(folio), offset, bytes, error);
return (error < 0) ? ERR_PTR(error) : NULL;
}
@@ -1149,44 +1073,47 @@ out_flushme:
* if we have to add a new request. Also assumes that the caller has
* already called nfs_flush_incompatible() if necessary.
*/
-static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
- struct page *page, unsigned int offset, unsigned int bytes)
+static struct nfs_page *nfs_setup_write_request(struct nfs_open_context *ctx,
+ struct folio *folio,
+ unsigned int offset,
+ unsigned int bytes)
{
- struct inode *inode = page_file_mapping(page)->host;
- struct nfs_page *req;
+ struct nfs_page *req;
- req = nfs_try_to_update_request(inode, page, offset, bytes);
+ req = nfs_try_to_update_request(folio, offset, bytes);
if (req != NULL)
goto out;
- req = nfs_create_request(ctx, page, offset, bytes);
+ req = nfs_page_create_from_folio(ctx, folio, offset, bytes);
if (IS_ERR(req))
goto out;
- nfs_inode_add_request(inode, req);
+ nfs_inode_add_request(req);
out:
return req;
}
-static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
- unsigned int offset, unsigned int count)
+static int nfs_writepage_setup(struct nfs_open_context *ctx,
+ struct folio *folio, unsigned int offset,
+ unsigned int count)
{
- struct nfs_page *req;
+ struct nfs_page *req;
- req = nfs_setup_write_request(ctx, page, offset, count);
+ req = nfs_setup_write_request(ctx, folio, offset, count);
if (IS_ERR(req))
return PTR_ERR(req);
+ trace_nfs_writepage_setup(req);
/* Update file length */
- nfs_grow_file(page, offset, count);
+ nfs_grow_file(folio, offset, count);
nfs_mark_uptodate(req);
nfs_mark_request_dirty(req);
nfs_unlock_and_release_request(req);
return 0;
}
-int nfs_flush_incompatible(struct file *file, struct page *page)
+int nfs_flush_incompatible(struct file *file, struct folio *folio)
{
struct nfs_open_context *ctx = nfs_file_open_context(file);
struct nfs_lock_context *l_ctx;
- struct file_lock_context *flctx = file_inode(file)->i_flctx;
+ struct file_lock_context *flctx = locks_inode_context(file_inode(file));
struct nfs_page *req;
int do_flush, status;
/*
@@ -1198,12 +1125,12 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
* dropped page.
*/
do {
- req = nfs_page_find_head_request(page);
+ req = nfs_folio_find_head_request(folio);
if (req == NULL)
return 0;
l_ctx = req->wb_lock_context;
- do_flush = req->wb_page != page ||
- !nfs_match_open_context(nfs_req_openctx(req), ctx);
+ do_flush = nfs_page_to_folio(req) != folio ||
+ !nfs_match_open_context(nfs_req_openctx(req), ctx);
if (l_ctx && flctx &&
!(list_empty_careful(&flctx->flc_posix) &&
list_empty_careful(&flctx->flc_flock))) {
@@ -1212,7 +1139,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
nfs_release_request(req);
if (!do_flush)
return 0;
- status = nfs_wb_page(page_file_mapping(page)->host, page);
+ status = nfs_wb_folio(folio->mapping->host, folio);
} while (status == 0);
return status;
}
@@ -1284,9 +1211,9 @@ out:
* the PageUptodate() flag. In this case, we will need to turn off
* write optimisations that depend on the page contents being correct.
*/
-static bool nfs_write_pageuptodate(struct page *page, struct inode *inode,
- unsigned int pagelen)
+static bool nfs_folio_write_uptodate(struct folio *folio, unsigned int pagelen)
{
+ struct inode *inode = folio->mapping->host;
struct nfs_inode *nfsi = NFS_I(inode);
if (nfs_have_delegated_attributes(inode))
@@ -1300,14 +1227,14 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode,
out:
if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0)
return false;
- return PageUptodate(page) != 0;
+ return folio_test_uptodate(folio) != 0;
}
static bool
is_whole_file_wrlock(struct file_lock *fl)
{
return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
- fl->fl_type == F_WRLCK;
+ lock_is_write(fl);
}
/* If we know the page is up to date, and we're not using byte range locks (or
@@ -1318,18 +1245,22 @@ is_whole_file_wrlock(struct file_lock *fl)
* If the file is opened for synchronous writes then we can just skip the rest
* of the checks.
*/
-static int nfs_can_extend_write(struct file *file, struct page *page,
- struct inode *inode, unsigned int pagelen)
+static int nfs_can_extend_write(struct file *file, struct folio *folio,
+ unsigned int pagelen)
{
- int ret;
- struct file_lock_context *flctx = inode->i_flctx;
+ struct inode *inode = file_inode(file);
+ struct file_lock_context *flctx = locks_inode_context(inode);
struct file_lock *fl;
+ int ret;
+ unsigned int mntflags = NFS_SERVER(inode)->flags;
+ if (mntflags & NFS_MOUNT_NO_ALIGNWRITE)
+ return 0;
if (file->f_flags & O_DSYNC)
return 0;
- if (!nfs_write_pageuptodate(page, inode, pagelen))
+ if (!nfs_folio_write_uptodate(folio, pagelen))
return 0;
- if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
+ if (nfs_have_write_delegation(inode))
return 1;
if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
list_empty_careful(&flctx->flc_posix)))
@@ -1340,13 +1271,13 @@ static int nfs_can_extend_write(struct file *file, struct page *page,
spin_lock(&flctx->flc_lock);
if (!list_empty(&flctx->flc_posix)) {
fl = list_first_entry(&flctx->flc_posix, struct file_lock,
- fl_list);
+ c.flc_list);
if (is_whole_file_wrlock(fl))
ret = 1;
} else if (!list_empty(&flctx->flc_flock)) {
fl = list_first_entry(&flctx->flc_flock, struct file_lock,
- fl_list);
- if (fl->fl_type == F_WRLCK)
+ c.flc_list);
+ if (lock_is_write(fl))
ret = 1;
}
spin_unlock(&flctx->flc_lock);
@@ -1359,33 +1290,40 @@ static int nfs_can_extend_write(struct file *file, struct page *page,
* XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
* things with a page scheduled for an RPC call (e.g. invalidate it).
*/
-int nfs_updatepage(struct file *file, struct page *page,
- unsigned int offset, unsigned int count)
+int nfs_update_folio(struct file *file, struct folio *folio,
+ unsigned int offset, unsigned int count)
{
struct nfs_open_context *ctx = nfs_file_open_context(file);
- struct address_space *mapping = page_file_mapping(page);
- struct inode *inode = mapping->host;
- unsigned int pagelen = nfs_page_length(page);
+ struct address_space *mapping = folio->mapping;
+ struct inode *inode = mapping->host;
+ unsigned int pagelen = nfs_folio_length(folio);
int status = 0;
nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
- dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
- file, count, (long long)(page_file_offset(page) + offset));
+ trace_nfs_update_folio(inode, offset, count);
+
+ dprintk("NFS: nfs_update_folio(%pD2 %d@%lld)\n", file, count,
+ (long long)(folio_pos(folio) + offset));
if (!count)
goto out;
- if (nfs_can_extend_write(file, page, inode, pagelen)) {
- count = max(count + offset, pagelen);
- offset = 0;
+ if (nfs_can_extend_write(file, folio, pagelen)) {
+ unsigned int end = count + offset;
+
+ offset = round_down(offset, PAGE_SIZE);
+ if (end < pagelen)
+ end = min(round_up(end, PAGE_SIZE), pagelen);
+ count = end - offset;
}
- status = nfs_writepage_setup(ctx, page, offset, count);
+ status = nfs_writepage_setup(ctx, folio, offset, count);
if (status < 0)
nfs_set_pageerror(mapping);
out:
- dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
+ trace_nfs_update_folio_done(inode, offset, count, status);
+ dprintk("NFS: nfs_update_folio returns %d (isize %lld)\n",
status, (long long)i_size_read(inode));
return status;
}
@@ -1408,6 +1346,8 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
{
int priority = flush_task_priority(how);
+ if (IS_SWAPFILE(hdr->inode))
+ task_setup_data->flags |= RPC_TASK_SWAPPER;
task_setup_data->priority = priority;
rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client);
trace_nfs_initiate_write(hdr);
@@ -1419,11 +1359,13 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
*/
static void nfs_redirty_request(struct nfs_page *req)
{
+ struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
+
/* Bump the transmission count */
req->wb_nio++;
nfs_mark_request_dirty(req);
- set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
- nfs_end_page_writeback(req);
+ atomic_long_inc(&nfsi->redirtied_pages);
+ nfs_page_end_writeback(req);
nfs_release_request(req);
}
@@ -1434,7 +1376,7 @@ static void nfs_async_write_error(struct list_head *head, int error)
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- if (nfs_error_is_fatal(error))
+ if (nfs_error_is_fatal_on_server(error))
nfs_write_error(req, error);
else
nfs_redirty_request(req);
@@ -1444,8 +1386,6 @@ static void nfs_async_write_error(struct list_head *head, int error)
static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
{
nfs_async_write_error(&hdr->pages, 0);
- filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset,
- hdr->args.offset + hdr->args.count - 1);
}
static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
@@ -1495,31 +1435,6 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata)
NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
}
-/*
- * Special version of should_remove_suid() that ignores capabilities.
- */
-static int nfs_should_remove_suid(const struct inode *inode)
-{
- umode_t mode = inode->i_mode;
- int kill = 0;
-
- /* suid always must be killed */
- if (unlikely(mode & S_ISUID))
- kill = ATTR_KILL_SUID;
-
- /*
- * sgid without any exec bits is just a mandatory locking mark; leave
- * it alone. If some exec bits are set, it's a real sgid; kill it.
- */
- if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
- kill |= ATTR_KILL_SGID;
-
- if (unlikely(kill && S_ISREG(mode)))
- return kill;
-
- return 0;
-}
-
static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
struct nfs_fattr *fattr)
{
@@ -1546,6 +1461,13 @@ void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
struct nfs_fattr *fattr = &hdr->fattr;
struct inode *inode = hdr->inode;
+ if (nfs_have_delegated_mtime(inode)) {
+ spin_lock(&inode->i_lock);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS);
+ spin_unlock(&inode->i_lock);
+ return;
+ }
+
spin_lock(&inode->i_lock);
nfs_writeback_check_extend(hdr, fattr);
nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
@@ -1576,32 +1498,45 @@ static int nfs_writeback_done(struct rpc_task *task,
nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
trace_nfs_writeback_done(task, hdr);
- if (hdr->res.verf->committed < hdr->args.stable &&
- task->tk_status >= 0) {
- /* We tried a write call, but the server did not
- * commit data to stable storage even though we
- * requested it.
- * Note: There is a known bug in Tru64 < 5.0 in which
- * the server reports NFS_DATA_SYNC, but performs
- * NFS_FILE_SYNC. We therefore implement this checking
- * as a dprintk() in order to avoid filling syslog.
- */
- static unsigned long complain;
+ if (task->tk_status >= 0) {
+ enum nfs3_stable_how committed = hdr->res.verf->committed;
- /* Note this will print the MDS for a DS write */
- if (time_before(complain, jiffies)) {
- dprintk("NFS: faulty NFS server %s:"
- " (committed = %d) != (stable = %d)\n",
- NFS_SERVER(inode)->nfs_client->cl_hostname,
- hdr->res.verf->committed, hdr->args.stable);
- complain = jiffies + 300 * HZ;
+ if (committed == NFS_UNSTABLE) {
+ /*
+ * We have some uncommitted data on the server at
+ * this point, so ensure that we keep track of that
+ * fact irrespective of what later writes do.
+ */
+ set_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags);
+ }
+
+ if (committed < hdr->args.stable) {
+ /* We tried a write call, but the server did not
+ * commit data to stable storage even though we
+ * requested it.
+ * Note: There is a known bug in Tru64 < 5.0 in which
+ * the server reports NFS_DATA_SYNC, but performs
+ * NFS_FILE_SYNC. We therefore implement this checking
+ * as a dprintk() in order to avoid filling syslog.
+ */
+ static unsigned long complain;
+
+ /* Note this will print the MDS for a DS write */
+ if (time_before(complain, jiffies)) {
+ dprintk("NFS: faulty NFS server %s:"
+ " (committed = %d) != (stable = %d)\n",
+ NFS_SERVER(inode)->nfs_client->cl_hostname,
+ committed, hdr->args.stable);
+ complain = jiffies + 300 * HZ;
+ }
}
}
/* Deal with the suid/sgid bit corner case */
if (nfs_should_remove_suid(inode)) {
spin_lock(&inode->i_lock);
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE
+ | NFS_INO_REVAL_FORCED);
spin_unlock(&inode->i_lock);
}
return 0;
@@ -1666,7 +1601,7 @@ static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
!atomic_read(&cinfo->rpcs_out));
}
-static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
+void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
{
atomic_inc(&cinfo->rpcs_out);
}
@@ -1690,7 +1625,8 @@ EXPORT_SYMBOL_GPL(nfs_commitdata_release);
int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
- int how, int flags)
+ int how, int flags,
+ struct nfsd_file *localio)
{
struct rpc_task *task;
int priority = flush_task_priority(how);
@@ -1709,12 +1645,19 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
.flags = RPC_TASK_ASYNC | flags,
.priority = priority,
};
+
+ if (nfs_server_capable(data->inode, NFS_CAP_MOVEABLE))
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
/* Set up the initial task struct. */
nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client);
trace_nfs_initiate_commit(data);
dprintk("NFS: initiated commit call\n");
+ if (localio)
+ return nfs_local_commit(localio, data, call_ops, how);
+
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -1792,18 +1735,18 @@ void nfs_retry_commit(struct list_head *page_list,
req = nfs_list_entry(page_list->next);
nfs_list_remove_request(req);
nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
- if (!cinfo->dreq)
- nfs_clear_page_commit(req->wb_page);
+ nfs_folio_clear_commit(nfs_page_to_folio(req));
nfs_unlock_and_release_request(req);
}
}
EXPORT_SYMBOL_GPL(nfs_retry_commit);
-static void
-nfs_commit_resched_write(struct nfs_commit_info *cinfo,
- struct nfs_page *req)
+static void nfs_commit_resched_write(struct nfs_commit_info *cinfo,
+ struct nfs_page *req)
{
- __set_page_dirty_nobuffers(req->wb_page);
+ struct folio *folio = nfs_page_to_folio(req);
+
+ filemap_dirty_folio(folio_mapping(folio), folio);
}
/*
@@ -1814,21 +1757,30 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
struct nfs_commit_info *cinfo)
{
struct nfs_commit_data *data;
+ struct nfsd_file *localio;
unsigned short task_flags = 0;
/* another commit raced with us */
if (list_empty(head))
return 0;
- data = nfs_commitdata_alloc(true);
+ data = nfs_commitdata_alloc();
+ if (!data) {
+ nfs_retry_commit(head, NULL, cinfo, -1);
+ return -ENOMEM;
+ }
/* Set up the argument struct */
nfs_init_commit(data, head, NULL, cinfo);
if (NFS_SERVER(inode)->nfs_client->cl_minorversion)
task_flags = RPC_TASK_MOVEABLE;
+
+ localio = nfs_local_open_fh(NFS_SERVER(inode)->nfs_client, data->cred,
+ data->args.fh, &data->context->nfl,
+ data->context->mode);
return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
data->mds_ops, how,
- RPC_TASK_CRED_NOREF | task_flags);
+ RPC_TASK_CRED_NOREF | task_flags, localio);
}
/*
@@ -1849,13 +1801,13 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
struct nfs_page *req;
int status = data->task.tk_status;
struct nfs_commit_info cinfo;
- struct nfs_server *nfss;
+ struct folio *folio;
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
- if (req->wb_page)
- nfs_clear_page_commit(req->wb_page);
+ folio = nfs_page_to_folio(req);
+ nfs_folio_clear_commit(folio);
dprintk("NFS: commit (%s/%llu %d@%lld)",
nfs_req_openctx(req)->dentry->d_sb->s_id,
@@ -1863,12 +1815,13 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
req->wb_bytes,
(long long)req_offset(req));
if (status < 0) {
- if (req->wb_page) {
- trace_nfs_commit_error(req, status);
- nfs_mapping_set_error(req->wb_page, status);
+ if (folio) {
+ trace_nfs_commit_error(data->inode, req,
+ status);
+ nfs_mapping_set_error(folio, status);
nfs_inode_remove_request(req);
}
- dprintk_cont(", error = %d\n", status);
+ dprintk(", error = %d\n", status);
goto next;
}
@@ -1876,23 +1829,20 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
* returned by the server against all stored verfs. */
if (nfs_write_match_verf(verf, req)) {
/* We have a match */
- if (req->wb_page)
+ if (folio)
nfs_inode_remove_request(req);
- dprintk_cont(" OK\n");
+ dprintk(" OK\n");
goto next;
}
/* We have a mismatch. Write the page again */
- dprintk_cont(" mismatch\n");
+ dprintk(" mismatch\n");
nfs_mark_request_dirty(req);
- set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
+ atomic_long_inc(&NFS_I(data->inode)->redirtied_pages);
next:
nfs_unlock_and_release_request(req);
/* Latency breaker */
cond_resched();
}
- nfss = NFS_SERVER(data->inode);
- if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
- clear_bdi_congested(inode_to_bdi(data->inode), BLK_RW_ASYNC);
nfs_init_cinfo(&cinfo, data->inode, data->dreq);
nfs_commit_end(cinfo.mds);
@@ -2048,87 +1998,100 @@ out:
}
EXPORT_SYMBOL_GPL(nfs_wb_all);
-int nfs_wb_page_cancel(struct inode *inode, struct page *page)
+int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
{
struct nfs_page *req;
int ret = 0;
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
/* blocking call to cancel all requests and join to a single (head)
* request */
- req = nfs_lock_and_join_requests(page);
+ req = nfs_lock_and_join_requests(folio);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
} else if (req) {
- /* all requests from this page have been cancelled by
+ /* all requests from this folio have been cancelled by
* nfs_lock_and_join_requests, so just remove the head
* request from the inode / page_private pointer and
* release it */
nfs_inode_remove_request(req);
nfs_unlock_and_release_request(req);
+ folio_cancel_dirty(folio);
}
return ret;
}
-/*
- * Write back all requests on one page - we do this before reading it.
+/**
+ * nfs_wb_folio - Write back all requests on one page
+ * @inode: pointer to page
+ * @folio: pointer to folio
+ *
+ * Assumes that the folio has been locked by the caller, and will
+ * not unlock it.
*/
-int nfs_wb_page(struct inode *inode, struct page *page)
+int nfs_wb_folio(struct inode *inode, struct folio *folio)
{
- loff_t range_start = page_file_offset(page);
- loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
+ loff_t range_start = folio_pos(folio);
+ size_t len = folio_size(folio);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 0,
.range_start = range_start,
- .range_end = range_end,
+ .range_end = range_start + len - 1,
};
int ret;
- trace_nfs_writeback_page_enter(inode);
+ trace_nfs_writeback_folio(inode, range_start, len);
for (;;) {
- wait_on_page_writeback(page);
- if (clear_page_dirty_for_io(page)) {
- ret = nfs_writepage_locked(page, &wbc);
+ folio_wait_writeback(folio);
+ if (folio_clear_dirty_for_io(folio)) {
+ ret = nfs_writepage_locked(folio, &wbc);
if (ret < 0)
goto out_error;
continue;
}
ret = 0;
- if (!PagePrivate(page))
+ if (!folio_test_private(folio))
break;
ret = nfs_commit_inode(inode, FLUSH_SYNC);
if (ret < 0)
goto out_error;
}
out_error:
- trace_nfs_writeback_page_exit(inode, ret);
+ trace_nfs_writeback_folio_done(inode, range_start, len, ret);
return ret;
}
#ifdef CONFIG_MIGRATION
-int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode)
+int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode)
{
/*
- * If PagePrivate is set, then the page is currently associated with
+ * If the private flag is set, the folio is currently associated with
* an in-progress read or write request. Don't try to migrate it.
*
* FIXME: we could do this in principle, but we'll need a way to ensure
* that we can safely release the inode reference while holding
- * the page lock.
+ * the folio lock.
*/
- if (PagePrivate(page))
- return -EBUSY;
+ if (folio_test_private(src)) {
+ if (mode == MIGRATE_SYNC)
+ nfs_wb_folio(src->mapping->host, src);
+ if (folio_test_private(src))
+ return -EBUSY;
+ }
- if (!nfs_fscache_release_page(page, GFP_KERNEL))
- return -EBUSY;
+ if (folio_test_private_2(src)) { /* [DEPRECATED] */
+ if (mode == MIGRATE_ASYNC)
+ return -EBUSY;
+ folio_wait_private_2(src);
+ }
- return migrate_page(mapping, newpage, page, mode);
+ return migrate_folio(mapping, dst, src, mode);
}
#endif
diff --git a/fs/nfs_common/Makefile b/fs/nfs_common/Makefile
index 119c75ab9fd0..c10ead273ff2 100644
--- a/fs/nfs_common/Makefile
+++ b/fs/nfs_common/Makefile
@@ -6,5 +6,11 @@
obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o
nfs_acl-objs := nfsacl.o
+CFLAGS_localio_trace.o += -I$(src)
+obj-$(CONFIG_NFS_COMMON_LOCALIO_SUPPORT) += nfs_localio.o
+nfs_localio-objs := nfslocalio.o localio_trace.o
+
obj-$(CONFIG_GRACE_PERIOD) += grace.o
obj-$(CONFIG_NFS_V4_2_SSC_HELPER) += nfs_ssc.o
+
+obj-$(CONFIG_NFS_COMMON) += common.o
diff --git a/fs/nfs_common/common.c b/fs/nfs_common/common.c
new file mode 100644
index 000000000000..af09aed09fd2
--- /dev/null
+++ b/fs/nfs_common/common.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/module.h>
+#include <linux/nfs_common.h>
+#include <linux/nfs4.h>
+
+/*
+ * We need to translate between nfs status return values and
+ * the local errno values which may not be the same.
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs_errtbl[] = {
+ { NFS_OK, 0 },
+ { NFSERR_PERM, -EPERM },
+ { NFSERR_NOENT, -ENOENT },
+ { NFSERR_IO, -EIO },
+ { NFSERR_NXIO, -ENXIO },
+/* { NFSERR_EAGAIN, -EAGAIN }, */
+ { NFSERR_ACCES, -EACCES },
+ { NFSERR_EXIST, -EEXIST },
+ { NFSERR_XDEV, -EXDEV },
+ { NFSERR_NODEV, -ENODEV },
+ { NFSERR_NOTDIR, -ENOTDIR },
+ { NFSERR_ISDIR, -EISDIR },
+ { NFSERR_INVAL, -EINVAL },
+ { NFSERR_FBIG, -EFBIG },
+ { NFSERR_NOSPC, -ENOSPC },
+ { NFSERR_ROFS, -EROFS },
+ { NFSERR_MLINK, -EMLINK },
+ { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFSERR_NOTEMPTY, -ENOTEMPTY },
+ { NFSERR_DQUOT, -EDQUOT },
+ { NFSERR_STALE, -ESTALE },
+ { NFSERR_REMOTE, -EREMOTE },
+#ifdef EWFLUSH
+ { NFSERR_WFLUSH, -EWFLUSH },
+#endif
+ { NFSERR_BADHANDLE, -EBADHANDLE },
+ { NFSERR_NOT_SYNC, -ENOTSYNC },
+ { NFSERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFSERR_NOTSUPP, -ENOTSUPP },
+ { NFSERR_TOOSMALL, -ETOOSMALL },
+ { NFSERR_SERVERFAULT, -EREMOTEIO },
+ { NFSERR_BADTYPE, -EBADTYPE },
+ { NFSERR_JUKEBOX, -EJUKEBOX },
+};
+
+/**
+ * nfs_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized. This function is used jointly by NFSv2 and NFSv3.
+ */
+int nfs_stat_to_errno(enum nfs_stat status)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) {
+ if (nfs_errtbl[i].stat == (int)status)
+ return nfs_errtbl[i].errno;
+ }
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(nfs_stat_to_errno);
+
+/*
+ * We need to translate between nfs v4 status return values and
+ * the local errno values which may not be the same.
+ *
+ * nfs4_errtbl_common[] is used before more specialized mappings
+ * available in nfs4_errtbl[] or nfs4_errtbl_localio[].
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs4_errtbl_common[] = {
+ { NFS4_OK, 0 },
+ { NFS4ERR_PERM, -EPERM },
+ { NFS4ERR_NOENT, -ENOENT },
+ { NFS4ERR_IO, -EIO },
+ { NFS4ERR_NXIO, -ENXIO },
+ { NFS4ERR_ACCESS, -EACCES },
+ { NFS4ERR_EXIST, -EEXIST },
+ { NFS4ERR_XDEV, -EXDEV },
+ { NFS4ERR_NOTDIR, -ENOTDIR },
+ { NFS4ERR_ISDIR, -EISDIR },
+ { NFS4ERR_INVAL, -EINVAL },
+ { NFS4ERR_FBIG, -EFBIG },
+ { NFS4ERR_NOSPC, -ENOSPC },
+ { NFS4ERR_ROFS, -EROFS },
+ { NFS4ERR_MLINK, -EMLINK },
+ { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
+ { NFS4ERR_DQUOT, -EDQUOT },
+ { NFS4ERR_STALE, -ESTALE },
+ { NFS4ERR_BADHANDLE, -EBADHANDLE },
+ { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFS4ERR_NOTSUPP, -ENOTSUPP },
+ { NFS4ERR_TOOSMALL, -ETOOSMALL },
+ { NFS4ERR_BADTYPE, -EBADTYPE },
+ { NFS4ERR_SYMLINK, -ELOOP },
+ { NFS4ERR_DEADLOCK, -EDEADLK },
+};
+
+static const struct {
+ int stat;
+ int errno;
+} nfs4_errtbl[] = {
+ { NFS4ERR_SERVERFAULT, -EREMOTEIO },
+ { NFS4ERR_LOCKED, -EAGAIN },
+ { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
+ { NFS4ERR_NOXATTR, -ENODATA },
+ { NFS4ERR_XATTR2BIG, -E2BIG },
+};
+
+/*
+ * Convert an NFS error code to a local one.
+ * This one is used by NFSv4.
+ */
+int nfs4_stat_to_errno(int stat)
+{
+ int i;
+
+ /* First check nfs4_errtbl_common */
+ for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) {
+ if (nfs4_errtbl_common[i].stat == stat)
+ return nfs4_errtbl_common[i].errno;
+ }
+ /* Then check nfs4_errtbl */
+ for (i = 0; i < ARRAY_SIZE(nfs4_errtbl); i++) {
+ if (nfs4_errtbl[i].stat == stat)
+ return nfs4_errtbl[i].errno;
+ }
+ if (stat <= 10000 || stat > 10100) {
+ /* The server is looney tunes. */
+ return -EREMOTEIO;
+ }
+ /* If we cannot translate the error, the recovery routines should
+ * handle it.
+ * Note: remaining NFSv4 error codes have values > 10000, so should
+ * not conflict with native Linux error codes.
+ */
+ return -stat;
+}
+EXPORT_SYMBOL_GPL(nfs4_stat_to_errno);
+
+/*
+ * This table is useful for conversion from local errno to NFS error.
+ * It provides more logically correct mappings for use with LOCALIO
+ * (which is focused on converting from errno to NFS status).
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs4_errtbl_localio[] = {
+ /* Map errors differently than nfs4_errtbl */
+ { NFS4ERR_IO, -EREMOTEIO },
+ { NFS4ERR_DELAY, -EAGAIN },
+ { NFS4ERR_FBIG, -E2BIG },
+ /* Map errors not handled by nfs4_errtbl */
+ { NFS4ERR_STALE, -EBADF },
+ { NFS4ERR_STALE, -EOPENSTALE },
+ { NFS4ERR_DELAY, -ETIMEDOUT },
+ { NFS4ERR_DELAY, -ERESTARTSYS },
+ { NFS4ERR_DELAY, -ENOMEM },
+ { NFS4ERR_IO, -ETXTBSY },
+ { NFS4ERR_IO, -EBUSY },
+ { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
+ { NFS4ERR_SERVERFAULT, -ENFILE },
+ { NFS4ERR_IO, -EUCLEAN },
+ { NFS4ERR_PERM, -ENOKEY },
+};
+
+/*
+ * Convert an errno to an NFS error code for LOCALIO.
+ */
+__u32 nfs_localio_errno_to_nfs4_stat(int errno)
+{
+ int i;
+
+ /* First check nfs4_errtbl_common */
+ for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) {
+ if (nfs4_errtbl_common[i].errno == errno)
+ return nfs4_errtbl_common[i].stat;
+ }
+ /* Then check nfs4_errtbl_localio */
+ for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_localio); i++) {
+ if (nfs4_errtbl_localio[i].errno == errno)
+ return nfs4_errtbl_localio[i].stat;
+ }
+ /* If we cannot translate the error, the recovery routines should
+ * handle it.
+ * Note: remaining NFSv4 error codes have values > 10000, so should
+ * not conflict with native Linux error codes.
+ */
+ return NFS4ERR_SERVERFAULT;
+}
+EXPORT_SYMBOL_GPL(nfs_localio_errno_to_nfs4_stat);
diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
index 0a9b72685f98..27cd0d13143b 100644
--- a/fs/nfs_common/grace.c
+++ b/fs/nfs_common/grace.c
@@ -9,6 +9,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
static unsigned int grace_net_id;
static DEFINE_SPINLOCK(grace_lock);
@@ -138,6 +139,7 @@ exit_grace(void)
}
MODULE_AUTHOR("Jeff Layton <jlayton@primarydata.com>");
+MODULE_DESCRIPTION("NFS client and server infrastructure");
MODULE_LICENSE("GPL");
module_init(init_grace)
module_exit(exit_grace)
diff --git a/fs/nfs_common/localio_trace.c b/fs/nfs_common/localio_trace.c
new file mode 100644
index 000000000000..7decfe57abeb
--- /dev/null
+++ b/fs/nfs_common/localio_trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ */
+#include <linux/nfs_fs.h>
+#include <linux/namei.h>
+
+#define CREATE_TRACE_POINTS
+#include "localio_trace.h"
diff --git a/fs/nfs_common/localio_trace.h b/fs/nfs_common/localio_trace.h
new file mode 100644
index 000000000000..4055aec9ff8d
--- /dev/null
+++ b/fs/nfs_common/localio_trace.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nfs_localio
+
+#if !defined(_TRACE_NFS_COMMON_LOCALIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NFS_COMMON_LOCALIO_H
+
+#include <linux/tracepoint.h>
+
+#include <trace/misc/fs.h>
+#include <trace/misc/nfs.h>
+#include <trace/misc/sunrpc.h>
+
+DECLARE_EVENT_CLASS(nfs_local_client_event,
+ TP_PROTO(
+ const struct nfs_client *clp
+ ),
+
+ TP_ARGS(clp),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, protocol)
+ __string(server, clp->cl_hostname)
+ ),
+
+ TP_fast_assign(
+ __entry->protocol = clp->rpc_ops->version;
+ __assign_str(server);
+ ),
+
+ TP_printk(
+ "server=%s NFSv%u", __get_str(server), __entry->protocol
+ )
+);
+
+#define DEFINE_NFS_LOCAL_CLIENT_EVENT(name) \
+ DEFINE_EVENT(nfs_local_client_event, name, \
+ TP_PROTO( \
+ const struct nfs_client *clp \
+ ), \
+ TP_ARGS(clp))
+
+DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_localio_enable_client);
+DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_localio_disable_client);
+
+#endif /* _TRACE_NFS_COMMON_LOCALIO_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE localio_trace
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/fs/nfs_common/nfs_ssc.c b/fs/nfs_common/nfs_ssc.c
index 7c1509e968c8..832246b22c51 100644
--- a/fs/nfs_common/nfs_ssc.c
+++ b/fs/nfs_common/nfs_ssc.c
@@ -12,7 +12,6 @@
#include <linux/nfs_ssc.h>
#include "../nfs/nfs4_fs.h"
-MODULE_LICENSE("GPL");
struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl;
EXPORT_SYMBOL_GPL(nfs_ssc_client_tbl);
diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c
index 5a5bd85d08f8..e2eaac14fd8e 100644
--- a/fs/nfs_common/nfsacl.c
+++ b/fs/nfs_common/nfsacl.c
@@ -29,6 +29,7 @@
#include <linux/nfs3.h>
#include <linux/sort.h>
+MODULE_DESCRIPTION("NFS ACL support");
MODULE_LICENSE("GPL");
struct nfsacl_encode_desc {
@@ -41,7 +42,7 @@ struct nfsacl_encode_desc {
};
struct nfsacl_simple_acl {
- struct posix_acl acl;
+ struct posix_acl_hdr acl;
struct posix_acl_entry ace[4];
};
@@ -111,7 +112,8 @@ int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
xdr_encode_word(buf, base, entries))
return -EINVAL;
if (encode_entries && acl && acl->a_count == 3) {
- struct posix_acl *acl2 = &aclbuf.acl;
+ struct posix_acl *acl2 =
+ container_of(&aclbuf.acl, struct posix_acl, hdr);
/* Avoid the use of posix_acl_alloc(). nfsacl_encode() is
* invoked in contexts where a memory allocation failure is
@@ -176,7 +178,8 @@ bool nfs_stream_encode_acl(struct xdr_stream *xdr, struct inode *inode,
return false;
if (encode_entries && acl && acl->a_count == 3) {
- struct posix_acl *acl2 = &aclbuf.acl;
+ struct posix_acl *acl2 =
+ container_of(&aclbuf.acl, struct posix_acl, hdr);
/* Avoid the use of posix_acl_alloc(). nfsacl_encode() is
* invoked in contexts where a memory allocation failure is
diff --git a/fs/nfs_common/nfslocalio.c b/fs/nfs_common/nfslocalio.c
new file mode 100644
index 000000000000..dd715cdb6c04
--- /dev/null
+++ b/fs/nfs_common/nfslocalio.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/nfslocalio.h>
+#include <linux/nfs3.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include <net/netns/generic.h>
+
+#include "localio_trace.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("NFS localio protocol bypass support");
+
+static DEFINE_SPINLOCK(nfs_uuids_lock);
+
+/*
+ * Global list of nfs_uuid_t instances
+ * that is protected by nfs_uuids_lock.
+ */
+static LIST_HEAD(nfs_uuids);
+
+/*
+ * Lock ordering:
+ * 1: nfs_uuid->lock
+ * 2: nfs_uuids_lock
+ * 3: nfs_uuid->list_lock (aka nn->local_clients_lock)
+ *
+ * May skip locks in select cases, but never hold multiple
+ * locks out of order.
+ */
+
+void nfs_uuid_init(nfs_uuid_t *nfs_uuid)
+{
+ RCU_INIT_POINTER(nfs_uuid->net, NULL);
+ nfs_uuid->dom = NULL;
+ nfs_uuid->list_lock = NULL;
+ INIT_LIST_HEAD(&nfs_uuid->list);
+ INIT_LIST_HEAD(&nfs_uuid->files);
+ spin_lock_init(&nfs_uuid->lock);
+ nfs_uuid->nfs3_localio_probe_count = 0;
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_init);
+
+bool nfs_uuid_begin(nfs_uuid_t *nfs_uuid)
+{
+ spin_lock(&nfs_uuid->lock);
+ if (rcu_access_pointer(nfs_uuid->net)) {
+ /* This nfs_uuid is already in use */
+ spin_unlock(&nfs_uuid->lock);
+ return false;
+ }
+
+ spin_lock(&nfs_uuids_lock);
+ if (!list_empty(&nfs_uuid->list)) {
+ /* This nfs_uuid is already in use */
+ spin_unlock(&nfs_uuids_lock);
+ spin_unlock(&nfs_uuid->lock);
+ return false;
+ }
+ list_add_tail(&nfs_uuid->list, &nfs_uuids);
+ spin_unlock(&nfs_uuids_lock);
+
+ uuid_gen(&nfs_uuid->uuid);
+ spin_unlock(&nfs_uuid->lock);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_begin);
+
+void nfs_uuid_end(nfs_uuid_t *nfs_uuid)
+{
+ if (!rcu_access_pointer(nfs_uuid->net)) {
+ spin_lock(&nfs_uuid->lock);
+ if (!rcu_access_pointer(nfs_uuid->net)) {
+ /* Not local, remove from nfs_uuids */
+ spin_lock(&nfs_uuids_lock);
+ list_del_init(&nfs_uuid->list);
+ spin_unlock(&nfs_uuids_lock);
+ }
+ spin_unlock(&nfs_uuid->lock);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_end);
+
+static nfs_uuid_t * nfs_uuid_lookup_locked(const uuid_t *uuid)
+{
+ nfs_uuid_t *nfs_uuid;
+
+ list_for_each_entry(nfs_uuid, &nfs_uuids, list)
+ if (uuid_equal(&nfs_uuid->uuid, uuid))
+ return nfs_uuid;
+
+ return NULL;
+}
+
+static struct module *nfsd_mod;
+
+void nfs_uuid_is_local(const uuid_t *uuid, struct list_head *list,
+ spinlock_t *list_lock, struct net *net,
+ struct auth_domain *dom, struct module *mod)
+{
+ nfs_uuid_t *nfs_uuid;
+
+ spin_lock(&nfs_uuids_lock);
+ nfs_uuid = nfs_uuid_lookup_locked(uuid);
+ if (!nfs_uuid) {
+ spin_unlock(&nfs_uuids_lock);
+ return;
+ }
+
+ /*
+ * We don't hold a ref on the net, but instead put
+ * ourselves on @list (nn->local_clients) so the net
+ * pointer can be invalidated.
+ */
+ spin_lock(list_lock); /* list_lock is nn->local_clients_lock */
+ list_move(&nfs_uuid->list, list);
+ spin_unlock(list_lock);
+
+ spin_unlock(&nfs_uuids_lock);
+ /* Once nfs_uuid is parented to @list, avoid global nfs_uuids_lock */
+ spin_lock(&nfs_uuid->lock);
+
+ __module_get(mod);
+ nfsd_mod = mod;
+
+ nfs_uuid->list_lock = list_lock;
+ kref_get(&dom->ref);
+ nfs_uuid->dom = dom;
+ rcu_assign_pointer(nfs_uuid->net, net);
+ spin_unlock(&nfs_uuid->lock);
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_is_local);
+
+void nfs_localio_enable_client(struct nfs_client *clp)
+{
+ /* nfs_uuid_is_local() does the actual enablement */
+ trace_nfs_localio_enable_client(clp);
+}
+EXPORT_SYMBOL_GPL(nfs_localio_enable_client);
+
+/*
+ * Cleanup the nfs_uuid_t embedded in an nfs_client.
+ * This is the long-form of nfs_uuid_init().
+ */
+static bool nfs_uuid_put(nfs_uuid_t *nfs_uuid)
+{
+ struct nfs_file_localio *nfl;
+
+ spin_lock(&nfs_uuid->lock);
+ if (unlikely(!rcu_access_pointer(nfs_uuid->net))) {
+ spin_unlock(&nfs_uuid->lock);
+ return false;
+ }
+ RCU_INIT_POINTER(nfs_uuid->net, NULL);
+
+ if (nfs_uuid->dom) {
+ auth_domain_put(nfs_uuid->dom);
+ nfs_uuid->dom = NULL;
+ }
+
+ /* Walk list of files and ensure their last references dropped */
+
+ while ((nfl = list_first_entry_or_null(&nfs_uuid->files,
+ struct nfs_file_localio,
+ list)) != NULL) {
+ /* If nfs_uuid is already NULL, nfs_close_local_fh is
+ * closing and we must wait, else we unlink and close.
+ */
+ if (rcu_access_pointer(nfl->nfs_uuid) == NULL) {
+ /* nfs_close_local_fh() is doing the
+ * close and we must wait. until it unlinks
+ */
+ wait_var_event_spinlock(nfs_uuid,
+ list_first_entry_or_null(
+ &nfs_uuid->files,
+ struct nfs_file_localio,
+ list) != nfl,
+ &nfs_uuid->lock);
+ continue;
+ }
+
+ /* Remove nfl from nfs_uuid->files list */
+ list_del_init(&nfl->list);
+ spin_unlock(&nfs_uuid->lock);
+
+ nfs_to_nfsd_file_put_local(&nfl->ro_file);
+ nfs_to_nfsd_file_put_local(&nfl->rw_file);
+ cond_resched();
+
+ spin_lock(&nfs_uuid->lock);
+ /* Now we can allow racing nfs_close_local_fh() to
+ * skip the locking.
+ */
+ store_release_wake_up(&nfl->nfs_uuid, RCU_INITIALIZER(NULL));
+ }
+
+ /* Remove client from nn->local_clients */
+ if (nfs_uuid->list_lock) {
+ spin_lock(nfs_uuid->list_lock);
+ BUG_ON(list_empty(&nfs_uuid->list));
+ list_del_init(&nfs_uuid->list);
+ spin_unlock(nfs_uuid->list_lock);
+ nfs_uuid->list_lock = NULL;
+ }
+
+ module_put(nfsd_mod);
+ spin_unlock(&nfs_uuid->lock);
+
+ return true;
+}
+
+void nfs_localio_disable_client(struct nfs_client *clp)
+{
+ if (nfs_uuid_put(&clp->cl_uuid))
+ trace_nfs_localio_disable_client(clp);
+}
+EXPORT_SYMBOL_GPL(nfs_localio_disable_client);
+
+void nfs_localio_invalidate_clients(struct list_head *nn_local_clients,
+ spinlock_t *nn_local_clients_lock)
+{
+ LIST_HEAD(local_clients);
+ nfs_uuid_t *nfs_uuid, *tmp;
+ struct nfs_client *clp;
+
+ spin_lock(nn_local_clients_lock);
+ list_splice_init(nn_local_clients, &local_clients);
+ spin_unlock(nn_local_clients_lock);
+ list_for_each_entry_safe(nfs_uuid, tmp, &local_clients, list) {
+ if (WARN_ON(nfs_uuid->list_lock != nn_local_clients_lock))
+ break;
+ clp = container_of(nfs_uuid, struct nfs_client, cl_uuid);
+ nfs_localio_disable_client(clp);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_localio_invalidate_clients);
+
+static int nfs_uuid_add_file(nfs_uuid_t *nfs_uuid, struct nfs_file_localio *nfl)
+{
+ int ret = 0;
+
+ /* Add nfl to nfs_uuid->files if it isn't already */
+ spin_lock(&nfs_uuid->lock);
+ if (rcu_access_pointer(nfs_uuid->net) == NULL) {
+ ret = -ENXIO;
+ } else if (list_empty(&nfl->list)) {
+ rcu_assign_pointer(nfl->nfs_uuid, nfs_uuid);
+ list_add_tail(&nfl->list, &nfs_uuid->files);
+ }
+ spin_unlock(&nfs_uuid->lock);
+ return ret;
+}
+
+/*
+ * Caller is responsible for calling nfsd_net_put and
+ * nfsd_file_put (via nfs_to_nfsd_file_put_local).
+ */
+struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid,
+ struct rpc_clnt *rpc_clnt, const struct cred *cred,
+ const struct nfs_fh *nfs_fh, struct nfs_file_localio *nfl,
+ struct nfsd_file __rcu **pnf,
+ const fmode_t fmode)
+{
+ struct net *net;
+ struct nfsd_file *localio;
+
+ /*
+ * Not running in nfsd context, so must safely get reference on nfsd_serv.
+ * But the server may already be shutting down, if so disallow new localio.
+ * uuid->net is NOT a counted reference, but rcu_read_lock() ensures that
+ * if uuid->net is not NULL, then calling nfsd_net_try_get() is safe
+ * and if it succeeds we will have an implied reference to the net.
+ *
+ * Otherwise NFS may not have ref on NFSD and therefore cannot safely
+ * make 'nfs_to' calls.
+ */
+ rcu_read_lock();
+ net = rcu_dereference(uuid->net);
+ if (!net || !nfs_to->nfsd_net_try_get(net)) {
+ rcu_read_unlock();
+ return ERR_PTR(-ENXIO);
+ }
+ rcu_read_unlock();
+ /* We have an implied reference to net thanks to nfsd_net_try_get */
+ localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt, cred,
+ nfs_fh, pnf, fmode);
+ if (!IS_ERR(localio) && nfs_uuid_add_file(uuid, nfl) < 0) {
+ /* Delete the cached file when racing with nfs_uuid_put() */
+ nfs_to_nfsd_file_put_local(pnf);
+ }
+ nfs_to_nfsd_net_put(net);
+
+ return localio;
+}
+EXPORT_SYMBOL_GPL(nfs_open_local_fh);
+
+void nfs_close_local_fh(struct nfs_file_localio *nfl)
+{
+ nfs_uuid_t *nfs_uuid;
+
+ rcu_read_lock();
+ nfs_uuid = rcu_dereference(nfl->nfs_uuid);
+ if (!nfs_uuid) {
+ /* regular (non-LOCALIO) NFS will hammer this */
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock(&nfs_uuid->lock);
+ if (!rcu_access_pointer(nfl->nfs_uuid)) {
+ /* nfs_uuid_put has finished here */
+ spin_unlock(&nfs_uuid->lock);
+ rcu_read_unlock();
+ return;
+ }
+ if (list_empty(&nfl->list)) {
+ /* nfs_uuid_put() has started closing files, wait for it
+ * to finished
+ */
+ spin_unlock(&nfs_uuid->lock);
+ rcu_read_unlock();
+ wait_var_event(&nfl->nfs_uuid,
+ rcu_access_pointer(nfl->nfs_uuid) == NULL);
+ return;
+ }
+ /* tell nfs_uuid_put() to wait for us */
+ RCU_INIT_POINTER(nfl->nfs_uuid, NULL);
+ spin_unlock(&nfs_uuid->lock);
+ rcu_read_unlock();
+
+ nfs_to_nfsd_file_put_local(&nfl->ro_file);
+ nfs_to_nfsd_file_put_local(&nfl->rw_file);
+
+ /* Remove nfl from nfs_uuid->files list and signal nfs_uuid_put()
+ * that we are done. The moment we drop the spinlock the
+ * nfs_uuid could be freed.
+ */
+ spin_lock(&nfs_uuid->lock);
+ list_del_init(&nfl->list);
+ wake_up_var_locked(nfs_uuid, &nfs_uuid->lock);
+ spin_unlock(&nfs_uuid->lock);
+}
+EXPORT_SYMBOL_GPL(nfs_close_local_fh);
+
+/*
+ * The NFS LOCALIO code needs to call into NFSD using various symbols,
+ * but cannot be statically linked, because that will make the NFS
+ * module always depend on the NFSD module.
+ *
+ * 'nfs_to' provides NFS access to NFSD functions needed for LOCALIO,
+ * its lifetime is tightly coupled to the NFSD module and will always
+ * be available to NFS LOCALIO because any successful client<->server
+ * LOCALIO handshake results in a reference on the NFSD module (above),
+ * so NFS implicitly holds a reference to the NFSD module and its
+ * functions in the 'nfs_to' nfsd_localio_operations cannot disappear.
+ *
+ * If the last NFS client using LOCALIO disconnects (and its reference
+ * on NFSD dropped) then NFSD could be unloaded, resulting in 'nfs_to'
+ * functions being invalid pointers. But if NFSD isn't loaded then NFS
+ * will not be able to handshake with NFSD and will have no cause to
+ * try to call 'nfs_to' function pointers. If/when NFSD is reloaded it
+ * will reinitialize the 'nfs_to' function pointers and make LOCALIO
+ * possible.
+ */
+const struct nfsd_localio_operations *nfs_to;
+EXPORT_SYMBOL_GPL(nfs_to);
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 3d1d17256a91..0b5c1a0bf1cf 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -4,10 +4,15 @@ config NFSD
depends on INET
depends on FILE_LOCKING
depends on FSNOTIFY
+ select CRC32
+ select CRYPTO_LIB_MD5 if NFSD_LEGACY_CLIENT_TRACKING
+ select CRYPTO_LIB_SHA256 if NFSD_V4
select LOCKD
select SUNRPC
select EXPORTFS
+ select NFS_COMMON
select NFS_ACL_SUPPORT if NFSD_V2_ACL
+ select NFS_ACL_SUPPORT if NFSD_V3_ACL
depends on MULTIUSER
help
Choose Y here if you want to allow other computers to access
@@ -26,28 +31,29 @@ config NFSD
Below you can choose which versions of the NFS protocol are
available to clients mounting the NFS server on this system.
- Support for NFS version 2 (RFC 1094) is always available when
+ Support for NFS version 3 (RFC 1813) is always available when
CONFIG_NFSD is selected.
If unsure, say N.
-config NFSD_V2_ACL
- bool
- depends on NFSD
-
-config NFSD_V3
- bool "NFS server support for NFS version 3"
+config NFSD_V2
+ bool "NFS server support for NFS version 2 (DEPRECATED)"
depends on NFSD
+ default n
help
- This option enables support in your system's NFS server for
- version 3 of the NFS protocol (RFC 1813).
+ NFSv2 (RFC 1094) was the first publicly-released version of NFS.
+ Unless you are hosting ancient (1990's era) NFS clients, you don't
+ need this.
+
+ If unsure, say N.
- If unsure, say Y.
+config NFSD_V2_ACL
+ bool "NFS server support for the NFSv2 ACL protocol extension"
+ depends on NFSD_V2
config NFSD_V3_ACL
bool "NFS server support for the NFSv3 ACL protocol extension"
- depends on NFSD_V3
- select NFSD_V2_ACL
+ depends on NFSD
help
Solaris NFS servers support an auxiliary NFSv3 ACL protocol that
never became an official part of the NFS version 3 protocol.
@@ -70,12 +76,9 @@ config NFSD_V3_ACL
config NFSD_V4
bool "NFS server support for NFS version 4"
depends on NFSD && PROC_FS
- select NFSD_V3
select FS_POSIX_ACL
- select SUNRPC_GSS
- select CRYPTO
- select CRYPTO_MD5
- select CRYPTO_SHA256
+ select RPCSEC_GSS_KRB5
+ select CRYPTO # required by RPCSEC_GSS_KRB5
select GRACE_PERIOD
select NFS_V4_2_SSC_HELPER if NFS_V4_2
help
@@ -157,3 +160,29 @@ config NFSD_V4_SECURITY_LABEL
If you do not wish to enable fine-grained security labels SELinux or
Smack policies on NFSv4 files, say N.
+
+config NFSD_LEGACY_CLIENT_TRACKING
+ bool "Support legacy NFSv4 client tracking methods (DEPRECATED)"
+ depends on NFSD_V4
+ default n
+ help
+ The NFSv4 server needs to store a small amount of information on
+ stable storage in order to handle state recovery after reboot. Most
+ modern deployments upcall to a userland daemon for this (nfsdcld),
+ but older NFS servers may store information directly in a
+ recoverydir, or spawn a process directly using a usermodehelper
+ upcall.
+
+ These legacy client tracking methods have proven to be problematic
+ and will be removed in the future. Say Y here if you need support
+ for them in the interim.
+
+config NFSD_V4_DELEG_TIMESTAMPS
+ bool "Support delegated timestamps"
+ depends on NFSD_V4
+ default n
+ help
+ NFSD implements delegated timestamps according to
+ draft-ietf-nfsv4-delstid-08 "Extending the Opening of Files". This
+ is currently an experimental feature and is therefore left disabled
+ by default.
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index 3f0983e93a99..55744bb786c9 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -10,15 +10,32 @@ obj-$(CONFIG_NFSD) += nfsd.o
# this one should be compiled first, as the tracing macros can easily blow up
nfsd-y += trace.o
-nfsd-y += nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \
- export.o auth.o lockd.o nfscache.o nfsxdr.o \
- stats.o filecache.o
+nfsd-y += nfssvc.o nfsctl.o nfsfh.o vfs.o \
+ export.o auth.o lockd.o nfscache.o \
+ stats.o filecache.o nfs3proc.o nfs3xdr.o \
+ netlink.o
+nfsd-$(CONFIG_NFSD_V2) += nfsproc.o nfsxdr.o
nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o
-nfsd-$(CONFIG_NFSD_V3) += nfs3proc.o nfs3xdr.o
nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o
nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \
- nfs4acl.o nfs4callback.o nfs4recover.o
+ nfs4acl.o nfs4callback.o nfs4recover.o nfs4xdr_gen.o
nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o
nfsd-$(CONFIG_NFSD_BLOCKLAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_SCSILAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_FLEXFILELAYOUT) += flexfilelayout.o flexfilelayoutxdr.o
+nfsd-$(CONFIG_NFS_LOCALIO) += localio.o
+nfsd-$(CONFIG_DEBUG_FS) += debugfs.o
+
+
+.PHONY: xdrgen
+
+xdrgen: ../../include/linux/sunrpc/xdrgen/nfs4_1.h nfs4xdr_gen.h nfs4xdr_gen.c
+
+../../include/linux/sunrpc/xdrgen/nfs4_1.h: ../../Documentation/sunrpc/xdr/nfs4_1.x
+ ../../tools/net/sunrpc/xdrgen/xdrgen definitions $< > $@
+
+nfs4xdr_gen.h: ../../Documentation/sunrpc/xdr/nfs4_1.x
+ ../../tools/net/sunrpc/xdrgen/xdrgen declarations $< > $@
+
+nfs4xdr_gen.c: ../../Documentation/sunrpc/xdr/nfs4_1.x
+ ../../tools/net/sunrpc/xdrgen/xdrgen source $< > $@
diff --git a/fs/nfsd/acl.h b/fs/nfsd/acl.h
index ba14d2f4b64f..4b7324458a94 100644
--- a/fs/nfsd/acl.h
+++ b/fs/nfsd/acl.h
@@ -38,6 +38,8 @@
struct nfs4_acl;
struct svc_fh;
struct svc_rqst;
+struct nfsd_attrs;
+enum nfs_ftype4;
int nfs4_acl_bytes(int entries);
int nfs4_acl_get_whotype(char *, u32);
@@ -45,7 +47,7 @@ __be32 nfs4_acl_write_who(struct xdr_stream *xdr, int who);
int nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
struct nfs4_acl **acl);
-__be32 nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct nfs4_acl *acl);
+__be32 nfsd4_acl_to_attr(enum nfs_ftype4 type, struct nfs4_acl *acl,
+ struct nfsd_attrs *attr);
#endif /* LINUX_NFS4_ACL_H */
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index fdf2aad73470..4dc327e02456 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -5,39 +5,37 @@
#include "nfsd.h"
#include "auth.h"
-int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
+int nfsexp_flags(struct svc_cred *cred, struct svc_export *exp)
{
struct exp_flavor_info *f;
struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
for (f = exp->ex_flavors; f < end; f++) {
- if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
+ if (f->pseudoflavor == cred->cr_flavor)
return f->flags;
}
return exp->ex_flags;
}
-int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
+int nfsd_setuser(struct svc_cred *cred, struct svc_export *exp)
{
struct group_info *rqgi;
struct group_info *gi;
struct cred *new;
int i;
- int flags = nfsexp_flags(rqstp, exp);
-
- validate_process_creds();
+ int flags = nfsexp_flags(cred, exp);
/* discard any old override before preparing the new set */
- revert_creds(get_cred(current_real_cred()));
+ put_cred(revert_creds(get_cred(current_real_cred())));
new = prepare_creds();
if (!new)
return -ENOMEM;
- new->fsuid = rqstp->rq_cred.cr_uid;
- new->fsgid = rqstp->rq_cred.cr_gid;
+ new->fsuid = cred->cr_uid;
+ new->fsgid = cred->cr_gid;
- rqgi = rqstp->rq_cred.cr_group_info;
+ rqgi = cred->cr_group_info;
if (flags & NFSEXP_ALLSQUASH) {
new->fsuid = exp->ex_anon_uid;
@@ -81,10 +79,7 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
else
new->cap_effective = cap_raise_nfsd_set(new->cap_effective,
new->cap_permitted);
- validate_process_creds();
put_cred(override_creds(new));
- put_cred(new);
- validate_process_creds();
return 0;
oom:
diff --git a/fs/nfsd/auth.h b/fs/nfsd/auth.h
index dbd66424f600..8c5031bbbcee 100644
--- a/fs/nfsd/auth.h
+++ b/fs/nfsd/auth.h
@@ -12,6 +12,6 @@
* Set the current process's fsuid/fsgid etc to those of the NFS
* client user
*/
-int nfsd_setuser(struct svc_rqst *, struct svc_export *);
+int nfsd_setuser(struct svc_cred *cred, struct svc_export *exp);
#endif /* LINUX_NFSD_AUTH_H */
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index e5c0982a381d..afa16d7a8013 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -4,7 +4,6 @@
*/
#include <linux/exportfs.h>
#include <linux/iomap.h>
-#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/pr.h>
@@ -13,67 +12,50 @@
#include "blocklayoutxdr.h"
#include "pnfs.h"
#include "filecache.h"
+#include "vfs.h"
+#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_PNFS
+/*
+ * Get an extent from the file system that starts at offset or below
+ * and may be shorter than the requested length.
+ */
static __be32
-nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
- struct nfsd4_layoutget *args)
+nfsd4_block_map_extent(struct inode *inode, const struct svc_fh *fhp,
+ u64 offset, u64 length, u32 iomode, u64 minlength,
+ struct pnfs_block_extent *bex)
{
- struct nfsd4_layout_seg *seg = &args->lg_seg;
struct super_block *sb = inode->i_sb;
- u32 block_size = i_blocksize(inode);
- struct pnfs_block_extent *bex;
struct iomap iomap;
u32 device_generation = 0;
int error;
- if (seg->offset & (block_size - 1)) {
- dprintk("pnfsd: I/O misaligned\n");
- goto out_layoutunavailable;
- }
-
- /*
- * Some clients barf on non-zero block numbers for NONE or INVALID
- * layouts, so make sure to zero the whole structure.
- */
- error = -ENOMEM;
- bex = kzalloc(sizeof(*bex), GFP_KERNEL);
- if (!bex)
- goto out_error;
- args->lg_content = bex;
-
- error = sb->s_export_op->map_blocks(inode, seg->offset, seg->length,
- &iomap, seg->iomode != IOMODE_READ,
- &device_generation);
+ error = sb->s_export_op->map_blocks(inode, offset, length, &iomap,
+ iomode != IOMODE_READ, &device_generation);
if (error) {
if (error == -ENXIO)
- goto out_layoutunavailable;
- goto out_error;
- }
-
- if (iomap.length < args->lg_minlength) {
- dprintk("pnfsd: extent smaller than minlength\n");
- goto out_layoutunavailable;
+ return nfserr_layoutunavailable;
+ return nfserrno(error);
}
switch (iomap.type) {
case IOMAP_MAPPED:
- if (seg->iomode == IOMODE_READ)
+ if (iomode == IOMODE_READ)
bex->es = PNFS_BLOCK_READ_DATA;
else
bex->es = PNFS_BLOCK_READWRITE_DATA;
bex->soff = iomap.addr;
break;
case IOMAP_UNWRITTEN:
- if (seg->iomode & IOMODE_RW) {
+ if (iomode & IOMODE_RW) {
/*
* Crack monkey special case from section 2.3.1.
*/
- if (args->lg_minlength == 0) {
+ if (minlength == 0) {
dprintk("pnfsd: no soup for you!\n");
- goto out_layoutunavailable;
+ return nfserr_layoutunavailable;
}
bex->es = PNFS_BLOCK_INVALID_DATA;
@@ -82,7 +64,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
}
fallthrough;
case IOMAP_HOLE:
- if (seg->iomode == IOMODE_READ) {
+ if (iomode == IOMODE_READ) {
bex->es = PNFS_BLOCK_NONE_DATA;
break;
}
@@ -90,46 +72,126 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
case IOMAP_DELALLOC:
default:
WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type);
- goto out_layoutunavailable;
+ return nfserr_layoutunavailable;
}
error = nfsd4_set_deviceid(&bex->vol_id, fhp, device_generation);
if (error)
- goto out_error;
+ return nfserrno(error);
+
bex->foff = iomap.offset;
bex->len = iomap.length;
+ return nfs_ok;
+}
- seg->offset = iomap.offset;
- seg->length = iomap.length;
+static __be32
+nfsd4_block_proc_layoutget(struct svc_rqst *rqstp, struct inode *inode,
+ const struct svc_fh *fhp, struct nfsd4_layoutget *args)
+{
+ struct nfsd4_layout_seg *seg = &args->lg_seg;
+ struct pnfs_block_layout *bl;
+ struct pnfs_block_extent *first_bex, *last_bex;
+ u64 offset = seg->offset, length = seg->length;
+ u32 i, nr_extents_max, block_size = i_blocksize(inode);
+ __be32 nfserr;
- dprintk("GET: 0x%llx:0x%llx %d\n", bex->foff, bex->len, bex->es);
- return 0;
+ if (locks_in_grace(SVC_NET(rqstp)))
+ return nfserr_grace;
+
+ nfserr = nfserr_layoutunavailable;
+ if (seg->offset & (block_size - 1)) {
+ dprintk("pnfsd: I/O misaligned\n");
+ goto out_error;
+ }
+
+ /*
+ * RFC 8881, section 3.3.17:
+ * The layout4 data type defines a layout for a file.
+ *
+ * RFC 8881, section 18.43.3:
+ * The loga_maxcount field specifies the maximum layout size
+ * (in bytes) that the client can handle. If the size of the
+ * layout structure exceeds the size specified by maxcount,
+ * the metadata server will return the NFS4ERR_TOOSMALL error.
+ */
+ nfserr = nfserr_toosmall;
+ if (args->lg_maxcount < PNFS_BLOCK_LAYOUT4_SIZE +
+ PNFS_BLOCK_EXTENT_SIZE)
+ goto out_error;
+
+ /*
+ * Limit the maximum layout size to avoid allocating
+ * a large buffer on the server for each layout request.
+ */
+ nr_extents_max = (min(args->lg_maxcount, PAGE_SIZE) -
+ PNFS_BLOCK_LAYOUT4_SIZE) / PNFS_BLOCK_EXTENT_SIZE;
+
+ /*
+ * Some clients barf on non-zero block numbers for NONE or INVALID
+ * layouts, so make sure to zero the whole structure.
+ */
+ nfserr = nfserrno(-ENOMEM);
+ bl = kzalloc(struct_size(bl, extents, nr_extents_max), GFP_KERNEL);
+ if (!bl)
+ goto out_error;
+ bl->nr_extents = nr_extents_max;
+ args->lg_content = bl;
+
+ for (i = 0; i < bl->nr_extents; i++) {
+ struct pnfs_block_extent *bex = bl->extents + i;
+ u64 bex_length;
+
+ nfserr = nfsd4_block_map_extent(inode, fhp, offset, length,
+ seg->iomode, args->lg_minlength, bex);
+ if (nfserr != nfs_ok)
+ goto out_error;
+
+ bex_length = bex->len - (offset - bex->foff);
+ if (bex_length >= length) {
+ bl->nr_extents = i + 1;
+ break;
+ }
+
+ offset = bex->foff + bex->len;
+ length -= bex_length;
+ }
+
+ first_bex = bl->extents;
+ last_bex = bl->extents + bl->nr_extents - 1;
+
+ nfserr = nfserr_layoutunavailable;
+ length = last_bex->foff + last_bex->len - seg->offset;
+ if (length < args->lg_minlength) {
+ dprintk("pnfsd: extent smaller than minlength\n");
+ goto out_error;
+ }
+
+ seg->offset = first_bex->foff;
+ seg->length = last_bex->foff - first_bex->foff + last_bex->len;
+ return nfs_ok;
out_error:
seg->length = 0;
- return nfserrno(error);
-out_layoutunavailable:
- seg->length = 0;
- return nfserr_layoutunavailable;
+ return nfserr;
}
static __be32
nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
struct iomap *iomaps, int nr_iomaps)
{
- loff_t new_size = lcp->lc_last_wr + 1;
+ struct timespec64 mtime = inode_get_mtime(inode);
struct iattr iattr = { .ia_valid = 0 };
int error;
if (lcp->lc_mtime.tv_nsec == UTIME_NOW ||
- timespec64_compare(&lcp->lc_mtime, &inode->i_mtime) < 0)
+ timespec64_compare(&lcp->lc_mtime, &mtime) < 0)
lcp->lc_mtime = current_time(inode);
iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
- if (new_size > i_size_read(inode)) {
+ if (lcp->lc_size_chg) {
iattr.ia_valid |= ATTR_SIZE;
- iattr.ia_size = new_size;
+ iattr.ia_size = lcp->lc_newsize;
}
error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
@@ -146,8 +208,7 @@ nfsd4_block_get_device_info_simple(struct super_block *sb,
struct pnfs_block_deviceaddr *dev;
struct pnfs_block_volume *b;
- dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) +
- sizeof(struct pnfs_block_volume), GFP_KERNEL);
+ dev = kzalloc(struct_size(dev, volumes, 1), GFP_KERNEL);
if (!dev)
return -ENOMEM;
gdp->gd_device = dev;
@@ -173,16 +234,20 @@ nfsd4_block_proc_getdeviceinfo(struct super_block *sb,
}
static __be32
-nfsd4_block_proc_layoutcommit(struct inode *inode,
+nfsd4_block_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
struct nfsd4_layoutcommit *lcp)
{
struct iomap *iomaps;
int nr_iomaps;
+ __be32 nfserr;
+
+ rqstp->rq_arg = lcp->lc_up_layout;
+ svcxdr_init_decode(rqstp);
- nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
- lcp->lc_up_len, &iomaps, i_blocksize(inode));
- if (nr_iomaps < 0)
- return nfserrno(nr_iomaps);
+ nfserr = nfsd4_block_decode_layoutupdate(&rqstp->rq_arg_stream,
+ &iomaps, &nr_iomaps, i_blocksize(inode));
+ if (nfserr != nfs_ok)
+ return nfserr;
return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps);
}
@@ -254,8 +319,7 @@ nfsd4_block_get_device_info_scsi(struct super_block *sb,
const struct pr_ops *ops;
int ret;
- dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) +
- sizeof(struct pnfs_block_volume), GFP_KERNEL);
+ dev = kzalloc(struct_size(dev, volumes, 1), GFP_KERNEL);
if (!dev)
return -ENOMEM;
gdp->gd_device = dev;
@@ -297,6 +361,7 @@ nfsd4_block_get_device_info_scsi(struct super_block *sb,
out_free_dev:
kfree(dev);
+ gdp->gd_device = NULL;
return ret;
}
@@ -311,28 +376,35 @@ nfsd4_scsi_proc_getdeviceinfo(struct super_block *sb,
return nfserrno(nfsd4_block_get_device_info_scsi(sb, clp, gdp));
}
static __be32
-nfsd4_scsi_proc_layoutcommit(struct inode *inode,
+nfsd4_scsi_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
struct nfsd4_layoutcommit *lcp)
{
struct iomap *iomaps;
int nr_iomaps;
+ __be32 nfserr;
+
+ rqstp->rq_arg = lcp->lc_up_layout;
+ svcxdr_init_decode(rqstp);
- nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout,
- lcp->lc_up_len, &iomaps, i_blocksize(inode));
- if (nr_iomaps < 0)
- return nfserrno(nr_iomaps);
+ nfserr = nfsd4_scsi_decode_layoutupdate(&rqstp->rq_arg_stream,
+ &iomaps, &nr_iomaps, i_blocksize(inode));
+ if (nfserr != nfs_ok)
+ return nfserr;
return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps);
}
static void
-nfsd4_scsi_fence_client(struct nfs4_layout_stateid *ls)
+nfsd4_scsi_fence_client(struct nfs4_layout_stateid *ls, struct nfsd_file *file)
{
struct nfs4_client *clp = ls->ls_stid.sc_client;
- struct block_device *bdev = ls->ls_file->nf_file->f_path.mnt->mnt_sb->s_bdev;
+ struct block_device *bdev = file->nf_file->f_path.mnt->mnt_sb->s_bdev;
+ int status;
- bdev->bd_disk->fops->pr_ops->pr_preempt(bdev, NFSD_MDS_PR_KEY,
- nfsd4_scsi_pr_key(clp), 0, true);
+ status = bdev->bd_disk->fops->pr_ops->pr_preempt(bdev, NFSD_MDS_PR_KEY,
+ nfsd4_scsi_pr_key(clp),
+ PR_EXCLUSIVE_ACCESS_REG_ONLY, true);
+ trace_nfsd_pnfs_fence(clp, bdev->bd_disk->disk_name, status);
}
const struct nfsd4_layout_ops scsi_layout_ops = {
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
index 442543304930..196ef4245604 100644
--- a/fs/nfsd/blocklayoutxdr.c
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -9,16 +9,30 @@
#include "nfsd.h"
#include "blocklayoutxdr.h"
+#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PNFS
+/**
+ * nfsd4_block_encode_layoutget - encode block/scsi layout extent array
+ * @xdr: stream for data encoding
+ * @lgp: layoutget content, actually an array of extents to encode
+ *
+ * Encode the opaque loc_body field in the layoutget response. Since the
+ * pnfs_block_layout4 and pnfs_scsi_layout4 structures on the wire are
+ * the same, this function is used by both layout drivers.
+ *
+ * Return values:
+ * %nfs_ok: Success, all extents encoded into @xdr
+ * %nfserr_toosmall: Not enough space in @xdr to encode all the data
+ */
__be32
nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
- struct nfsd4_layoutget *lgp)
+ const struct nfsd4_layoutget *lgp)
{
- struct pnfs_block_extent *b = lgp->lg_content;
- int len = sizeof(__be32) + 5 * sizeof(__be64) + sizeof(__be32);
+ const struct pnfs_block_layout *bl = lgp->lg_content;
+ u32 i, len = sizeof(__be32) + bl->nr_extents * PNFS_BLOCK_EXTENT_SIZE;
__be32 *p;
p = xdr_reserve_space(xdr, sizeof(__be32) + len);
@@ -26,15 +40,19 @@ nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
return nfserr_toosmall;
*p++ = cpu_to_be32(len);
- *p++ = cpu_to_be32(1); /* we always return a single extent */
-
- p = xdr_encode_opaque_fixed(p, &b->vol_id,
- sizeof(struct nfsd4_deviceid));
- p = xdr_encode_hyper(p, b->foff);
- p = xdr_encode_hyper(p, b->len);
- p = xdr_encode_hyper(p, b->soff);
- *p++ = cpu_to_be32(b->es);
- return 0;
+ *p++ = cpu_to_be32(bl->nr_extents);
+
+ for (i = 0; i < bl->nr_extents; i++) {
+ const struct pnfs_block_extent *bex = bl->extents + i;
+
+ p = svcxdr_encode_deviceid4(p, &bex->vol_id);
+ p = xdr_encode_hyper(p, bex->foff);
+ p = xdr_encode_hyper(p, bex->len);
+ p = xdr_encode_hyper(p, bex->soff);
+ *p++ = cpu_to_be32(bex->es);
+ }
+
+ return nfs_ok;
}
static int
@@ -76,12 +94,21 @@ nfsd4_block_encode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
__be32
nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdp)
+ const struct nfsd4_getdeviceinfo *gdp)
{
struct pnfs_block_deviceaddr *dev = gdp->gd_device;
int len = sizeof(__be32), ret, i;
__be32 *p;
+ /*
+ * See paragraph 5 of RFC 8881 S18.40.3.
+ */
+ if (!gdp->gd_maxcount) {
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
+ }
+
p = xdr_reserve_space(xdr, len + sizeof(__be32));
if (!p)
return nfserr_resource;
@@ -102,64 +129,86 @@ nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
return 0;
}
-int
-nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
- u32 block_size)
+/**
+ * nfsd4_block_decode_layoutupdate - decode the block layout extent array
+ * @xdr: subbuf set to the encoded array
+ * @iomapp: pointer to store the decoded extent array
+ * @nr_iomapsp: pointer to store the number of extents
+ * @block_size: alignment of extent offset and length
+ *
+ * This function decodes the opaque field of the layoutupdate4 structure
+ * in a layoutcommit request for the block layout driver. The field is
+ * actually an array of extents sent by the client. It also checks that
+ * the file offset, storage offset and length of each extent are aligned
+ * by @block_size.
+ *
+ * Return values:
+ * %nfs_ok: Successful decoding, @iomapp and @nr_iomapsp are valid
+ * %nfserr_bad_xdr: The encoded array in @xdr is invalid
+ * %nfserr_inval: An unaligned extent found
+ * %nfserr_delay: Failed to allocate memory for @iomapp
+ */
+__be32
+nfsd4_block_decode_layoutupdate(struct xdr_stream *xdr, struct iomap **iomapp,
+ int *nr_iomapsp, u32 block_size)
{
struct iomap *iomaps;
- u32 nr_iomaps, i;
+ u32 nr_iomaps, expected, len, i;
+ __be32 nfserr;
- if (len < sizeof(u32)) {
- dprintk("%s: extent array too small: %u\n", __func__, len);
- return -EINVAL;
- }
- len -= sizeof(u32);
- if (len % PNFS_BLOCK_EXTENT_SIZE) {
- dprintk("%s: extent array invalid: %u\n", __func__, len);
- return -EINVAL;
- }
+ if (xdr_stream_decode_u32(xdr, &nr_iomaps))
+ return nfserr_bad_xdr;
- nr_iomaps = be32_to_cpup(p++);
- if (nr_iomaps != len / PNFS_BLOCK_EXTENT_SIZE) {
- dprintk("%s: extent array size mismatch: %u/%u\n",
- __func__, len, nr_iomaps);
- return -EINVAL;
- }
+ len = sizeof(__be32) + xdr_stream_remaining(xdr);
+ expected = sizeof(__be32) + nr_iomaps * PNFS_BLOCK_EXTENT_SIZE;
+ if (len != expected)
+ return nfserr_bad_xdr;
iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL);
- if (!iomaps) {
- dprintk("%s: failed to allocate extent array\n", __func__);
- return -ENOMEM;
- }
+ if (!iomaps)
+ return nfserr_delay;
for (i = 0; i < nr_iomaps; i++) {
struct pnfs_block_extent bex;
- memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid));
- p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid));
+ if (nfsd4_decode_deviceid4(xdr, &bex.vol_id)) {
+ nfserr = nfserr_bad_xdr;
+ goto fail;
+ }
- p = xdr_decode_hyper(p, &bex.foff);
+ if (xdr_stream_decode_u64(xdr, &bex.foff)) {
+ nfserr = nfserr_bad_xdr;
+ goto fail;
+ }
if (bex.foff & (block_size - 1)) {
- dprintk("%s: unaligned offset 0x%llx\n",
- __func__, bex.foff);
+ nfserr = nfserr_inval;
+ goto fail;
+ }
+
+ if (xdr_stream_decode_u64(xdr, &bex.len)) {
+ nfserr = nfserr_bad_xdr;
goto fail;
}
- p = xdr_decode_hyper(p, &bex.len);
if (bex.len & (block_size - 1)) {
- dprintk("%s: unaligned length 0x%llx\n",
- __func__, bex.foff);
+ nfserr = nfserr_inval;
+ goto fail;
+ }
+
+ if (xdr_stream_decode_u64(xdr, &bex.soff)) {
+ nfserr = nfserr_bad_xdr;
goto fail;
}
- p = xdr_decode_hyper(p, &bex.soff);
if (bex.soff & (block_size - 1)) {
- dprintk("%s: unaligned disk offset 0x%llx\n",
- __func__, bex.soff);
+ nfserr = nfserr_inval;
+ goto fail;
+ }
+
+ if (xdr_stream_decode_u32(xdr, &bex.es)) {
+ nfserr = nfserr_bad_xdr;
goto fail;
}
- bex.es = be32_to_cpup(p++);
if (bex.es != PNFS_BLOCK_READWRITE_DATA) {
- dprintk("%s: incorrect extent state %d\n",
- __func__, bex.es);
+ nfserr = nfserr_inval;
goto fail;
}
@@ -168,59 +217,79 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
}
*iomapp = iomaps;
- return nr_iomaps;
+ *nr_iomapsp = nr_iomaps;
+ return nfs_ok;
fail:
kfree(iomaps);
- return -EINVAL;
+ return nfserr;
}
-int
-nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
- u32 block_size)
+/**
+ * nfsd4_scsi_decode_layoutupdate - decode the scsi layout extent array
+ * @xdr: subbuf set to the encoded array
+ * @iomapp: pointer to store the decoded extent array
+ * @nr_iomapsp: pointer to store the number of extents
+ * @block_size: alignment of extent offset and length
+ *
+ * This function decodes the opaque field of the layoutupdate4 structure
+ * in a layoutcommit request for the scsi layout driver. The field is
+ * actually an array of extents sent by the client. It also checks that
+ * the offset and length of each extent are aligned by @block_size.
+ *
+ * Return values:
+ * %nfs_ok: Successful decoding, @iomapp and @nr_iomapsp are valid
+ * %nfserr_bad_xdr: The encoded array in @xdr is invalid
+ * %nfserr_inval: An unaligned extent found
+ * %nfserr_delay: Failed to allocate memory for @iomapp
+ */
+__be32
+nfsd4_scsi_decode_layoutupdate(struct xdr_stream *xdr, struct iomap **iomapp,
+ int *nr_iomapsp, u32 block_size)
{
struct iomap *iomaps;
- u32 nr_iomaps, expected, i;
+ u32 nr_iomaps, expected, len, i;
+ __be32 nfserr;
- if (len < sizeof(u32)) {
- dprintk("%s: extent array too small: %u\n", __func__, len);
- return -EINVAL;
- }
+ if (xdr_stream_decode_u32(xdr, &nr_iomaps))
+ return nfserr_bad_xdr;
- nr_iomaps = be32_to_cpup(p++);
+ len = sizeof(__be32) + xdr_stream_remaining(xdr);
expected = sizeof(__be32) + nr_iomaps * PNFS_SCSI_RANGE_SIZE;
- if (len != expected) {
- dprintk("%s: extent array size mismatch: %u/%u\n",
- __func__, len, expected);
- return -EINVAL;
- }
+ if (len != expected)
+ return nfserr_bad_xdr;
iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL);
- if (!iomaps) {
- dprintk("%s: failed to allocate extent array\n", __func__);
- return -ENOMEM;
- }
+ if (!iomaps)
+ return nfserr_delay;
for (i = 0; i < nr_iomaps; i++) {
u64 val;
- p = xdr_decode_hyper(p, &val);
+ if (xdr_stream_decode_u64(xdr, &val)) {
+ nfserr = nfserr_bad_xdr;
+ goto fail;
+ }
if (val & (block_size - 1)) {
- dprintk("%s: unaligned offset 0x%llx\n", __func__, val);
+ nfserr = nfserr_inval;
goto fail;
}
iomaps[i].offset = val;
- p = xdr_decode_hyper(p, &val);
+ if (xdr_stream_decode_u64(xdr, &val)) {
+ nfserr = nfserr_bad_xdr;
+ goto fail;
+ }
if (val & (block_size - 1)) {
- dprintk("%s: unaligned length 0x%llx\n", __func__, val);
+ nfserr = nfserr_inval;
goto fail;
}
iomaps[i].length = val;
}
*iomapp = iomaps;
- return nr_iomaps;
+ *nr_iomapsp = nr_iomaps;
+ return nfs_ok;
fail:
kfree(iomaps);
- return -EINVAL;
+ return nfserr;
}
diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h
index bc5166bfe46b..2e0c6c7d2b42 100644
--- a/fs/nfsd/blocklayoutxdr.h
+++ b/fs/nfsd/blocklayoutxdr.h
@@ -8,6 +8,15 @@
struct iomap;
struct xdr_stream;
+/* On the wire size of the layout4 struct with zero number of extents */
+#define PNFS_BLOCK_LAYOUT4_SIZE \
+ (sizeof(__be32) * 2 + /* offset4 */ \
+ sizeof(__be32) * 2 + /* length4 */ \
+ sizeof(__be32) + /* layoutiomode4 */ \
+ sizeof(__be32) + /* layouttype4 */ \
+ sizeof(__be32) + /* number of bytes */ \
+ sizeof(__be32)) /* number of extents */
+
struct pnfs_block_extent {
struct nfsd4_deviceid vol_id;
u64 foff;
@@ -21,6 +30,11 @@ struct pnfs_block_range {
u64 len;
};
+struct pnfs_block_layout {
+ u32 nr_extents;
+ struct pnfs_block_extent extents[] __counted_by(nr_extents);
+};
+
/*
* Random upper cap for the uuid length to avoid unbounded allocation.
* Not actually limited by the protocol.
@@ -47,16 +61,16 @@ struct pnfs_block_volume {
struct pnfs_block_deviceaddr {
u32 nr_volumes;
- struct pnfs_block_volume volumes[];
+ struct pnfs_block_volume volumes[] __counted_by(nr_volumes);
};
__be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdp);
+ const struct nfsd4_getdeviceinfo *gdp);
__be32 nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
- struct nfsd4_layoutget *lgp);
-int nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
- u32 block_size);
-int nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
- u32 block_size);
+ const struct nfsd4_layoutget *lgp);
+__be32 nfsd4_block_decode_layoutupdate(struct xdr_stream *xdr,
+ struct iomap **iomapp, int *nr_iomapsp, u32 block_size);
+__be32 nfsd4_scsi_decode_layoutupdate(struct xdr_stream *xdr,
+ struct iomap **iomapp, int *nr_iomapsp, u32 block_size);
#endif /* _NFSD_BLOCKLAYOUTXDR_H */
diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
index 65c331f75e9c..bb7addef4a31 100644
--- a/fs/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -10,7 +10,7 @@
#define NFSCACHE_H
#include <linux/sunrpc/svc.h>
-#include "netns.h"
+#include "nfsd.h"
/*
* Representation of a reply cache entry.
@@ -19,7 +19,7 @@
* typical sockaddr_storage. This is for space reasons, since sockaddr_storage
* is much larger than a sockaddr_in6.
*/
-struct svc_cacherep {
+struct nfsd_cacherep {
struct {
/* Keep often-read xid, csum in the same cache line: */
__be32 k_xid;
@@ -82,8 +82,10 @@ int nfsd_drc_slab_create(void);
void nfsd_drc_slab_free(void);
int nfsd_reply_cache_init(struct nfsd_net *);
void nfsd_reply_cache_shutdown(struct nfsd_net *);
-int nfsd_cache_lookup(struct svc_rqst *);
-void nfsd_cache_update(struct svc_rqst *, int, __be32 *);
-int nfsd_reply_cache_stats_open(struct inode *, struct file *);
+int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
+ unsigned int len, struct nfsd_cacherep **cacherep);
+void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
+ int cachetype, __be32 *statp);
+int nfsd_reply_cache_stats_show(struct seq_file *m, void *v);
#endif /* NFSCACHE_H */
diff --git a/fs/nfsd/debugfs.c b/fs/nfsd/debugfs.c
new file mode 100644
index 000000000000..7f44689e0a53
--- /dev/null
+++ b/fs/nfsd/debugfs.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/debugfs.h>
+
+#include "nfsd.h"
+
+static struct dentry *nfsd_top_dir __read_mostly;
+
+/*
+ * /sys/kernel/debug/nfsd/disable-splice-read
+ *
+ * Contents:
+ * %0: NFS READ is allowed to use page splicing
+ * %1: NFS READ uses only iov iter read
+ *
+ * The default value of this setting is zero (page splicing is
+ * allowed). This setting takes immediate effect for all NFS
+ * versions, all exports, and in all NFSD net namespaces.
+ */
+
+static int nfsd_dsr_get(void *data, u64 *val)
+{
+ *val = nfsd_disable_splice_read ? 1 : 0;
+ return 0;
+}
+
+static int nfsd_dsr_set(void *data, u64 val)
+{
+ nfsd_disable_splice_read = (val > 0);
+ if (!nfsd_disable_splice_read) {
+ /*
+ * Must use buffered I/O if splice_read is enabled.
+ */
+ nfsd_io_cache_read = NFSD_IO_BUFFERED;
+ }
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(nfsd_dsr_fops, nfsd_dsr_get, nfsd_dsr_set, "%llu\n");
+
+/*
+ * /sys/kernel/debug/nfsd/io_cache_read
+ *
+ * Contents:
+ * %0: NFS READ will use buffered IO
+ * %1: NFS READ will use dontcache (buffered IO w/ dropbehind)
+ * %2: NFS READ will use direct IO
+ *
+ * This setting takes immediate effect for all NFS versions,
+ * all exports, and in all NFSD net namespaces.
+ */
+
+static int nfsd_io_cache_read_get(void *data, u64 *val)
+{
+ *val = nfsd_io_cache_read;
+ return 0;
+}
+
+static int nfsd_io_cache_read_set(void *data, u64 val)
+{
+ int ret = 0;
+
+ switch (val) {
+ case NFSD_IO_BUFFERED:
+ nfsd_io_cache_read = NFSD_IO_BUFFERED;
+ break;
+ case NFSD_IO_DONTCACHE:
+ case NFSD_IO_DIRECT:
+ /*
+ * Must disable splice_read when enabling
+ * NFSD_IO_DONTCACHE.
+ */
+ nfsd_disable_splice_read = true;
+ nfsd_io_cache_read = val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(nfsd_io_cache_read_fops, nfsd_io_cache_read_get,
+ nfsd_io_cache_read_set, "%llu\n");
+
+/*
+ * /sys/kernel/debug/nfsd/io_cache_write
+ *
+ * Contents:
+ * %0: NFS WRITE will use buffered IO
+ * %1: NFS WRITE will use dontcache (buffered IO w/ dropbehind)
+ *
+ * This setting takes immediate effect for all NFS versions,
+ * all exports, and in all NFSD net namespaces.
+ */
+
+static int nfsd_io_cache_write_get(void *data, u64 *val)
+{
+ *val = nfsd_io_cache_write;
+ return 0;
+}
+
+static int nfsd_io_cache_write_set(void *data, u64 val)
+{
+ int ret = 0;
+
+ switch (val) {
+ case NFSD_IO_BUFFERED:
+ case NFSD_IO_DONTCACHE:
+ case NFSD_IO_DIRECT:
+ nfsd_io_cache_write = val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(nfsd_io_cache_write_fops, nfsd_io_cache_write_get,
+ nfsd_io_cache_write_set, "%llu\n");
+
+void nfsd_debugfs_exit(void)
+{
+ debugfs_remove_recursive(nfsd_top_dir);
+ nfsd_top_dir = NULL;
+}
+
+void nfsd_debugfs_init(void)
+{
+ nfsd_top_dir = debugfs_create_dir("nfsd", NULL);
+
+ debugfs_create_file("disable-splice-read", S_IWUSR | S_IRUGO,
+ nfsd_top_dir, NULL, &nfsd_dsr_fops);
+
+ debugfs_create_file("io_cache_read", 0644, nfsd_top_dir, NULL,
+ &nfsd_io_cache_read_fops);
+
+ debugfs_create_file("io_cache_write", 0644, nfsd_top_dir, NULL,
+ &nfsd_io_cache_write_fops);
+}
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 9421dae22737..9d55512d0cc9 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -82,8 +82,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
int len;
struct auth_domain *dom = NULL;
int err;
- int fsidtype;
- char *ep;
+ u8 fsidtype;
struct svc_expkey key;
struct svc_expkey *ek = NULL;
@@ -97,7 +96,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
goto out;
err = -EINVAL;
- if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
+ if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
goto out;
err = -ENOENT;
@@ -107,12 +106,11 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
dprintk("found domain %s\n", buf);
err = -EINVAL;
- if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
+ if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
goto out;
- fsidtype = simple_strtoul(buf, &ep, 10);
- if (*ep)
+ if (kstrtou8(buf, 10, &fsidtype))
goto out;
- dprintk("found fsidtype %d\n", fsidtype);
+ dprintk("found fsidtype %u\n", fsidtype);
if (key_len(fsidtype)==0) /* invalid type */
goto out;
if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
@@ -123,11 +121,11 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
/* OK, we seem to have a valid key */
key.h.flags = 0;
- key.h.expiry_time = get_expiry(&mesg);
- if (key.h.expiry_time == 0)
+ err = get_expiry(&mesg, &key.h.expiry_time);
+ if (err)
goto out;
- key.ek_client = dom;
+ key.ek_client = dom;
key.ek_fsidtype = fsidtype;
memcpy(key.ek_fsid, buf, len);
@@ -334,28 +332,46 @@ static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
static int export_stats_init(struct export_stats *stats)
{
stats->start_time = ktime_get_seconds();
- return nfsd_percpu_counters_init(stats->counter, EXP_STATS_COUNTERS_NUM);
+ return percpu_counter_init_many(stats->counter, 0, GFP_KERNEL,
+ EXP_STATS_COUNTERS_NUM);
}
static void export_stats_reset(struct export_stats *stats)
{
- nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM);
+ if (stats) {
+ int i;
+
+ for (i = 0; i < EXP_STATS_COUNTERS_NUM; i++)
+ percpu_counter_set(&stats->counter[i], 0);
+ }
}
static void export_stats_destroy(struct export_stats *stats)
{
- nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM);
+ if (stats)
+ percpu_counter_destroy_many(stats->counter,
+ EXP_STATS_COUNTERS_NUM);
+}
+
+static void svc_export_release(struct rcu_head *rcu_head)
+{
+ struct svc_export *exp = container_of(rcu_head, struct svc_export,
+ ex_rcu);
+
+ nfsd4_fslocs_free(&exp->ex_fslocs);
+ export_stats_destroy(exp->ex_stats);
+ kfree(exp->ex_stats);
+ kfree(exp->ex_uuid);
+ kfree(exp);
}
static void svc_export_put(struct kref *ref)
{
struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
+
path_put(&exp->ex_path);
auth_domain_put(exp->ex_client);
- nfsd4_fslocs_free(&exp->ex_fslocs);
- export_stats_destroy(&exp->ex_stats);
- kfree(exp->ex_uuid);
- kfree_rcu(exp, ex_rcu);
+ call_rcu(&exp->ex_rcu, svc_export_release);
}
static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
@@ -386,7 +402,7 @@ static struct svc_export *svc_export_update(struct svc_export *new,
struct svc_export *old);
static struct svc_export *svc_export_lookup(struct svc_export *);
-static int check_export(struct path *path, int *flags, unsigned char *uuid)
+static int check_export(const struct path *path, int *flags, unsigned char *uuid)
{
struct inode *inode = d_inode(path->dentry);
@@ -421,13 +437,12 @@ static int check_export(struct path *path, int *flags, unsigned char *uuid)
return -EINVAL;
}
- if (!inode->i_sb->s_export_op ||
- !inode->i_sb->s_export_op->fh_to_dentry) {
+ if (!exportfs_can_decode_fh(inode->i_sb->s_export_op)) {
dprintk("exp_export: export of invalid fs type.\n");
return -EINVAL;
}
- if (mnt_user_ns(path->mnt) != &init_user_ns) {
+ if (is_idmapped_mnt(path->mnt)) {
dprintk("exp_export: export of idmapped mounts not yet supported.\n");
return -EINVAL;
}
@@ -439,7 +454,6 @@ static int check_export(struct path *path, int *flags, unsigned char *uuid)
return -EINVAL;
}
return 0;
-
}
#ifdef CONFIG_NFSD_V4
@@ -546,6 +560,29 @@ static inline int
secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { return 0; }
#endif
+static int xprtsec_parse(char **mesg, char *buf, struct svc_export *exp)
+{
+ unsigned int i, mode, listsize;
+ int err;
+
+ err = get_uint(mesg, &listsize);
+ if (err)
+ return err;
+ if (listsize > NFSEXP_XPRTSEC_NUM)
+ return -EINVAL;
+
+ exp->ex_xprtsec_modes = 0;
+ for (i = 0; i < listsize; i++) {
+ err = get_uint(mesg, &mode);
+ if (err)
+ return err;
+ if (mode > NFSEXP_XPRTSEC_MTLS)
+ return -EINVAL;
+ exp->ex_xprtsec_modes |= mode;
+ }
+ return 0;
+}
+
static inline int
nfsd_uuid_parse(char **mesg, char *buf, unsigned char **puuid)
{
@@ -571,7 +608,6 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
{
/* client path expiry [flags anonuid anongid fsid] */
char *buf;
- int len;
int err;
struct auth_domain *dom = NULL;
struct svc_export exp = {}, *expp;
@@ -587,8 +623,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
/* client */
err = -EINVAL;
- len = qword_get(&mesg, buf, PAGE_SIZE);
- if (len <= 0)
+ if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
goto out;
err = -ENOENT;
@@ -598,7 +633,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
/* path */
err = -EINVAL;
- if ((len = qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
+ if (qword_get(&mesg, buf, PAGE_SIZE) <= 0)
goto out1;
err = kern_path(buf, 0, &exp.ex_path);
@@ -608,11 +643,11 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
exp.ex_client = dom;
exp.cd = cd;
exp.ex_devid_map = NULL;
+ exp.ex_xprtsec_modes = NFSEXP_XPRTSEC_ALL;
/* expiry */
- err = -EINVAL;
- exp.h.expiry_time = get_expiry(&mesg);
- if (exp.h.expiry_time == 0)
+ err = get_expiry(&mesg, &exp.h.expiry_time);
+ if (err)
goto out3;
/* flags */
@@ -624,7 +659,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
if (err || an_int < 0)
goto out3;
exp.ex_flags= an_int;
-
+
/* anon uid */
err = get_int(&mesg, &an_int);
if (err)
@@ -643,13 +678,15 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
goto out3;
exp.ex_fsid = an_int;
- while ((len = qword_get(&mesg, buf, PAGE_SIZE)) > 0) {
+ while (qword_get(&mesg, buf, PAGE_SIZE) > 0) {
if (strcmp(buf, "fsloc") == 0)
err = fsloc_parse(&mesg, buf, &exp.ex_fslocs);
else if (strcmp(buf, "uuid") == 0)
err = nfsd_uuid_parse(&mesg, buf, &exp.ex_uuid);
else if (strcmp(buf, "secinfo") == 0)
err = secinfo_parse(&mesg, buf, &exp);
+ else if (strcmp(buf, "xprtsec") == 0)
+ err = xprtsec_parse(&mesg, buf, &exp);
else
/* quietly ignore unknown words and anything
* following. Newer user-space can try to set
@@ -663,6 +700,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
err = check_export(&exp.ex_path, &exp.ex_flags, exp.ex_uuid);
if (err)
goto out4;
+
/*
* No point caching this if it would immediately expire.
* Also, this protects exportfs's dummy export from the
@@ -744,13 +782,15 @@ static int svc_export_show(struct seq_file *m,
seq_putc(m, '\t');
seq_escape(m, exp->ex_client->name, " \t\n\\");
if (export_stats) {
- seq_printf(m, "\t%lld\n", exp->ex_stats.start_time);
+ struct percpu_counter *counter = exp->ex_stats->counter;
+
+ seq_printf(m, "\t%lld\n", exp->ex_stats->start_time);
seq_printf(m, "\tfh_stale: %lld\n",
- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_FH_STALE]));
+ percpu_counter_sum_positive(&counter[EXP_STATS_FH_STALE]));
seq_printf(m, "\tio_read: %lld\n",
- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_READ]));
+ percpu_counter_sum_positive(&counter[EXP_STATS_IO_READ]));
seq_printf(m, "\tio_write: %lld\n",
- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_WRITE]));
+ percpu_counter_sum_positive(&counter[EXP_STATS_IO_WRITE]));
seq_putc(m, '\n');
return 0;
}
@@ -796,7 +836,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_layout_types = 0;
new->ex_uuid = NULL;
new->cd = item->cd;
- export_stats_reset(&new->ex_stats);
+ export_stats_reset(new->ex_stats);
}
static void export_update(struct cache_head *cnew, struct cache_head *citem)
@@ -824,6 +864,7 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
for (i = 0; i < MAX_SECINFO_LIST; i++) {
new->ex_flavors[i] = item->ex_flavors[i];
}
+ new->ex_xprtsec_modes = item->ex_xprtsec_modes;
}
static struct cache_head *svc_export_alloc(void)
@@ -832,7 +873,14 @@ static struct cache_head *svc_export_alloc(void)
if (!i)
return NULL;
- if (export_stats_init(&i->ex_stats)) {
+ i->ex_stats = kmalloc(sizeof(*(i->ex_stats)), GFP_KERNEL);
+ if (!i->ex_stats) {
+ kfree(i);
+ return NULL;
+ }
+
+ if (export_stats_init(i->ex_stats)) {
+ kfree(i->ex_stats);
kfree(i);
return NULL;
}
@@ -1033,24 +1081,76 @@ static struct svc_export *exp_find(struct cache_detail *cd,
return exp;
}
-__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
+/**
+ * check_xprtsec_policy - check if access to export is allowed by the
+ * xprtsec policy
+ * @exp: svc_export that is being accessed.
+ * @rqstp: svc_rqst attempting to access @exp.
+ *
+ * Helper function for check_nfsd_access(). Note that callers should be
+ * using check_nfsd_access() instead of calling this function directly. The
+ * one exception is __fh_verify() since it has logic that may result in one
+ * or both of the helpers being skipped.
+ *
+ * Return values:
+ * %nfs_ok if access is granted, or
+ * %nfserr_wrongsec if access is denied
+ */
+__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp)
{
- struct exp_flavor_info *f;
- struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+
+ if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) {
+ if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags))
+ return nfs_ok;
+ }
+ if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_TLS) {
+ if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
+ !test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
+ return nfs_ok;
+ }
+ if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_MTLS) {
+ if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
+ test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
+ return nfs_ok;
+ }
+ return nfserr_wrongsec;
+}
+
+/**
+ * check_security_flavor - check if access to export is allowed by the
+ * security flavor
+ * @exp: svc_export that is being accessed.
+ * @rqstp: svc_rqst attempting to access @exp.
+ * @may_bypass_gss: reduce strictness of authorization check
+ *
+ * Helper function for check_nfsd_access(). Note that callers should be
+ * using check_nfsd_access() instead of calling this function directly. The
+ * one exception is __fh_verify() since it has logic that may result in one
+ * or both of the helpers being skipped.
+ *
+ * Return values:
+ * %nfs_ok if access is granted, or
+ * %nfserr_wrongsec if access is denied
+ */
+__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp,
+ bool may_bypass_gss)
+{
+ struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
/* legacy gss-only clients are always OK: */
if (exp->ex_client == rqstp->rq_gssclient)
- return 0;
+ return nfs_ok;
/* ip-address based client; check sec= export option: */
for (f = exp->ex_flavors; f < end; f++) {
if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
- return 0;
+ return nfs_ok;
}
/* defaults in absence of sec= options: */
if (exp->ex_nflavors == 0) {
if (rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL ||
rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)
- return 0;
+ return nfs_ok;
}
/* If the compound op contains a spo_must_allowed op,
@@ -1060,9 +1160,47 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
*/
if (nfsd4_spo_must_allow(rqstp))
- return 0;
+ return nfs_ok;
+
+ /* Some calls may be processed without authentication
+ * on GSS exports. For example NFS2/3 calls on root
+ * directory, see section 2.3.2 of rfc 2623.
+ * For "may_bypass_gss" check that export has really
+ * enabled some flavor with authentication (GSS or any
+ * other) and also check that the used auth flavor is
+ * without authentication (none or sys).
+ */
+ if (may_bypass_gss && (
+ rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL ||
+ rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)) {
+ for (f = exp->ex_flavors; f < end; f++) {
+ if (f->pseudoflavor >= RPC_AUTH_DES)
+ return 0;
+ }
+ }
+
+ return nfserr_wrongsec;
+}
+
+/**
+ * check_nfsd_access - check if access to export is allowed.
+ * @exp: svc_export that is being accessed.
+ * @rqstp: svc_rqst attempting to access @exp.
+ * @may_bypass_gss: reduce strictness of authorization check
+ *
+ * Return values:
+ * %nfs_ok if access is granted, or
+ * %nfserr_wrongsec if access is denied
+ */
+__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
+ bool may_bypass_gss)
+{
+ __be32 status;
- return rqstp->rq_vers < 4 ? nfserr_acces : nfserr_wrongsec;
+ status = check_xprtsec_policy(exp, rqstp);
+ if (status != nfs_ok)
+ return status;
+ return check_security_flavor(exp, rqstp, may_bypass_gss);
}
/*
@@ -1075,7 +1213,7 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
* use exp_get_by_name() or exp_find().
*/
struct svc_export *
-rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path)
+rqst_exp_get_by_name(struct svc_rqst *rqstp, const struct path *path)
{
struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
@@ -1105,19 +1243,35 @@ gss:
return gssexp;
}
+/**
+ * rqst_exp_find - Find an svc_export in the context of a rqst or similar
+ * @reqp: The handle to be used to suspend the request if a cache-upcall is needed
+ * If NULL, missing in-cache information will result in failure.
+ * @net: The network namespace in which the request exists
+ * @cl: default auth_domain to use for looking up the export
+ * @gsscl: an alternate auth_domain defined using deprecated gss/krb5 format.
+ * @fsid_type: The type of fsid to look for
+ * @fsidv: The actual fsid to look up in the context of either client.
+ *
+ * Perform a lookup for @cl/@fsidv in the given @net for an export. If
+ * none found and @gsscl specified, repeat the lookup.
+ *
+ * Returns an export, or an error pointer.
+ */
struct svc_export *
-rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
+rqst_exp_find(struct cache_req *reqp, struct net *net,
+ struct auth_domain *cl, struct auth_domain *gsscl,
+ int fsid_type, u32 *fsidv)
{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct cache_detail *cd = nn->svc_export_cache;
- if (rqstp->rq_client == NULL)
+ if (!cl)
goto gss;
/* First try the auth_unix client: */
- exp = exp_find(cd, rqstp->rq_client, fsid_type,
- fsidv, &rqstp->rq_chandle);
+ exp = exp_find(cd, cl, fsid_type, fsidv, reqp);
if (PTR_ERR(exp) == -ENOENT)
goto gss;
if (IS_ERR(exp))
@@ -1127,10 +1281,9 @@ rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
return exp;
gss:
/* Otherwise, try falling back on gss client */
- if (rqstp->rq_gssclient == NULL)
+ if (!gsscl)
return exp;
- gssexp = exp_find(cd, rqstp->rq_gssclient, fsid_type, fsidv,
- &rqstp->rq_chandle);
+ gssexp = exp_find(cd, gsscl, fsid_type, fsidv, reqp);
if (PTR_ERR(gssexp) == -ENOENT)
return exp;
if (!IS_ERR(exp))
@@ -1161,7 +1314,9 @@ struct svc_export *rqst_find_fsidzero_export(struct svc_rqst *rqstp)
mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
- return rqst_exp_find(rqstp, FSID_NUM, fsidv);
+ return rqst_exp_find(&rqstp->rq_chandle, SVC_NET(rqstp),
+ rqstp->rq_client, rqstp->rq_gssclient,
+ FSID_NUM, fsidv);
}
/*
@@ -1310,10 +1465,9 @@ static int e_show(struct seq_file *m, void *p)
return 0;
}
- exp_get(exp);
- if (cache_check(cd, &exp->h, NULL))
+ if (cache_check_rcu(cd, &exp->h, NULL))
return 0;
- exp_put(exp);
+
return svc_export_show(m, cd, cp);
}
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index ee0e3aba4a6e..d2b09cd76145 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -64,10 +64,10 @@ struct svc_export {
struct cache_head h;
struct auth_domain * ex_client;
int ex_flags;
+ int ex_fsid;
struct path ex_path;
kuid_t ex_anon_uid;
kgid_t ex_anon_gid;
- int ex_fsid;
unsigned char * ex_uuid; /* 16 byte fsid */
struct nfsd4_fs_locations ex_fslocs;
uint32_t ex_nflavors;
@@ -76,7 +76,8 @@ struct svc_export {
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
struct rcu_head ex_rcu;
- struct export_stats ex_stats;
+ unsigned long ex_xprtsec_modes;
+ struct export_stats *ex_stats;
};
/* an "export key" (expkey) maps a filehandlefragement to an
@@ -87,7 +88,7 @@ struct svc_expkey {
struct cache_head h;
struct auth_domain * ek_client;
- int ek_fsidtype;
+ u8 ek_fsidtype;
u32 ek_fsid[6];
struct path ek_path;
@@ -98,8 +99,13 @@ struct svc_expkey {
#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE)
#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
-int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp);
-__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
+struct svc_cred;
+int nfsexp_flags(struct svc_cred *cred, struct svc_export *exp);
+__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp);
+__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp,
+ bool may_bypass_gss);
+__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
+ bool may_bypass_gss);
/*
* Function declarations
@@ -108,14 +114,13 @@ int nfsd_export_init(struct net *);
void nfsd_export_shutdown(struct net *);
void nfsd_export_flush(struct net *);
struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
- struct path *);
+ const struct path *);
struct svc_export * rqst_exp_parent(struct svc_rqst *,
struct path *);
struct svc_export * rqst_find_fsidzero_export(struct svc_rqst *);
int exp_rootfh(struct net *, struct auth_domain *,
char *path, struct knfsd_fh *, int maxsize);
__be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
-__be32 nfserrno(int errno);
static inline void exp_put(struct svc_export *exp)
{
@@ -127,6 +132,8 @@ static inline struct svc_export *exp_get(struct svc_export *exp)
cache_get(&exp->h);
return exp;
}
-struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *);
+struct svc_export *rqst_exp_find(struct cache_req *reqp, struct net *net,
+ struct auth_domain *cl, struct auth_domain *gsscl,
+ int fsid_type, u32 *fsidv);
#endif /* NFSD_EXPORT_H */
diff --git a/fs/nfsd/fault_inject.c b/fs/nfsd/fault_inject.c
deleted file mode 100644
index 76bee0a0d308..000000000000
--- a/fs/nfsd/fault_inject.c
+++ /dev/null
@@ -1,142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2011 Bryan Schumaker <bjschuma@netapp.com>
- *
- * Uses debugfs to create fault injection points for client testing
- */
-
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/nsproxy.h>
-#include <linux/sunrpc/addr.h>
-#include <linux/uaccess.h>
-#include <linux/kernel.h>
-
-#include "state.h"
-#include "netns.h"
-
-struct nfsd_fault_inject_op {
- char *file;
- u64 (*get)(void);
- u64 (*set_val)(u64);
- u64 (*set_clnt)(struct sockaddr_storage *, size_t);
-};
-
-static struct dentry *debug_dir;
-
-static ssize_t fault_inject_read(struct file *file, char __user *buf,
- size_t len, loff_t *ppos)
-{
- static u64 val;
- char read_buf[25];
- size_t size;
- loff_t pos = *ppos;
- struct nfsd_fault_inject_op *op = file_inode(file)->i_private;
-
- if (!pos)
- val = op->get();
- size = scnprintf(read_buf, sizeof(read_buf), "%llu\n", val);
-
- return simple_read_from_buffer(buf, len, ppos, read_buf, size);
-}
-
-static ssize_t fault_inject_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
-{
- char write_buf[INET6_ADDRSTRLEN];
- size_t size = min(sizeof(write_buf) - 1, len);
- struct net *net = current->nsproxy->net_ns;
- struct sockaddr_storage sa;
- struct nfsd_fault_inject_op *op = file_inode(file)->i_private;
- u64 val;
- char *nl;
-
- if (copy_from_user(write_buf, buf, size))
- return -EFAULT;
- write_buf[size] = '\0';
-
- /* Deal with any embedded newlines in the string */
- nl = strchr(write_buf, '\n');
- if (nl) {
- size = nl - write_buf;
- *nl = '\0';
- }
-
- size = rpc_pton(net, write_buf, size, (struct sockaddr *)&sa, sizeof(sa));
- if (size > 0) {
- val = op->set_clnt(&sa, size);
- if (val)
- pr_info("NFSD [%s]: Client %s had %llu state object(s)\n",
- op->file, write_buf, val);
- } else {
- val = simple_strtoll(write_buf, NULL, 0);
- if (val == 0)
- pr_info("NFSD Fault Injection: %s (all)", op->file);
- else
- pr_info("NFSD Fault Injection: %s (n = %llu)",
- op->file, val);
- val = op->set_val(val);
- pr_info("NFSD: %s: found %llu", op->file, val);
- }
- return len; /* on success, claim we got the whole input */
-}
-
-static const struct file_operations fops_nfsd = {
- .owner = THIS_MODULE,
- .read = fault_inject_read,
- .write = fault_inject_write,
-};
-
-void nfsd_fault_inject_cleanup(void)
-{
- debugfs_remove_recursive(debug_dir);
-}
-
-static struct nfsd_fault_inject_op inject_ops[] = {
- {
- .file = "forget_clients",
- .get = nfsd_inject_print_clients,
- .set_val = nfsd_inject_forget_clients,
- .set_clnt = nfsd_inject_forget_client,
- },
- {
- .file = "forget_locks",
- .get = nfsd_inject_print_locks,
- .set_val = nfsd_inject_forget_locks,
- .set_clnt = nfsd_inject_forget_client_locks,
- },
- {
- .file = "forget_openowners",
- .get = nfsd_inject_print_openowners,
- .set_val = nfsd_inject_forget_openowners,
- .set_clnt = nfsd_inject_forget_client_openowners,
- },
- {
- .file = "forget_delegations",
- .get = nfsd_inject_print_delegations,
- .set_val = nfsd_inject_forget_delegations,
- .set_clnt = nfsd_inject_forget_client_delegations,
- },
- {
- .file = "recall_delegations",
- .get = nfsd_inject_print_delegations,
- .set_val = nfsd_inject_recall_delegations,
- .set_clnt = nfsd_inject_recall_client_delegations,
- },
-};
-
-void nfsd_fault_inject_init(void)
-{
- unsigned int i;
- struct nfsd_fault_inject_op *op;
- umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
-
- debug_dir = debugfs_create_dir("nfsd", NULL);
-
- for (i = 0; i < ARRAY_SIZE(inject_ops); i++) {
- op = &inject_ops[i];
- debugfs_create_file(op->file, mode, debug_dir, op, &fops_nfsd);
- }
-}
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index fdf89fcf1a0c..93798575b807 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -1,17 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Open file cache.
+ * The NFSD open file cache.
*
* (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
+ *
+ * An nfsd_file object is a per-file collection of open state that binds
+ * together:
+ * - a struct file *
+ * - a user credential
+ * - a network namespace
+ * - a read-ahead context
+ * - monitoring for writeback errors
+ *
+ * nfsd_file objects are reference-counted. Consumers acquire a new
+ * object via the nfsd_file_acquire API. They manage their interest in
+ * the acquired object, and hence the object's reference count, via
+ * nfsd_file_get and nfsd_file_put. There are two varieties of nfsd_file
+ * object:
+ *
+ * * non-garbage-collected: When a consumer wants to precisely control
+ * the lifetime of a file's open state, it acquires a non-garbage-
+ * collected nfsd_file. The final nfsd_file_put releases the open
+ * state immediately.
+ *
+ * * garbage-collected: When a consumer does not control the lifetime
+ * of open state, it acquires a garbage-collected nfsd_file. The
+ * final nfsd_file_put allows the open state to linger for a period
+ * during which it may be re-used.
*/
#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/file.h>
+#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/list_lru.h>
#include <linux/fsnotify_backend.h>
#include <linux/fsnotify.h>
#include <linux/seq_file.h>
+#include <linux/rhashtable.h>
+#include <linux/nfslocalio.h>
#include "vfs.h"
#include "nfsd.h"
@@ -20,63 +48,73 @@
#include "filecache.h"
#include "trace.h"
-#define NFSDDBG_FACILITY NFSDDBG_FH
-
-/* FIXME: dynamically size this for the machine somehow? */
-#define NFSD_FILE_HASH_BITS 12
-#define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
#define NFSD_LAUNDRETTE_DELAY (2 * HZ)
-#define NFSD_FILE_SHUTDOWN (1)
-#define NFSD_FILE_LRU_THRESHOLD (4096UL)
-#define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2)
+#define NFSD_FILE_CACHE_UP (0)
/* We only care about NFSD_MAY_READ/WRITE for this cache */
-#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
-
-struct nfsd_fcache_bucket {
- struct hlist_head nfb_head;
- spinlock_t nfb_lock;
- unsigned int nfb_count;
- unsigned int nfb_maxcount;
-};
+#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE|NFSD_MAY_LOCALIO)
static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
+static DEFINE_PER_CPU(unsigned long, nfsd_file_acquisitions);
+static DEFINE_PER_CPU(unsigned long, nfsd_file_allocations);
+static DEFINE_PER_CPU(unsigned long, nfsd_file_releases);
+static DEFINE_PER_CPU(unsigned long, nfsd_file_total_age);
+static DEFINE_PER_CPU(unsigned long, nfsd_file_evictions);
struct nfsd_fcache_disposal {
- struct list_head list;
- struct work_struct work;
- struct net *net;
spinlock_t lock;
struct list_head freeme;
- struct rcu_head rcu;
};
-static struct workqueue_struct *nfsd_filecache_wq __read_mostly;
-
static struct kmem_cache *nfsd_file_slab;
static struct kmem_cache *nfsd_file_mark_slab;
-static struct nfsd_fcache_bucket *nfsd_file_hashtbl;
static struct list_lru nfsd_file_lru;
-static long nfsd_file_lru_flags;
+static unsigned long nfsd_file_flags;
static struct fsnotify_group *nfsd_file_fsnotify_group;
-static atomic_long_t nfsd_filecache_count;
static struct delayed_work nfsd_filecache_laundrette;
-static DEFINE_SPINLOCK(laundrette_lock);
-static LIST_HEAD(laundrettes);
+static struct rhltable nfsd_file_rhltable
+ ____cacheline_aligned_in_smp;
+
+static bool
+nfsd_match_cred(const struct cred *c1, const struct cred *c2)
+{
+ int i;
+
+ if (!uid_eq(c1->fsuid, c2->fsuid))
+ return false;
+ if (!gid_eq(c1->fsgid, c2->fsgid))
+ return false;
+ if (c1->group_info == NULL || c2->group_info == NULL)
+ return c1->group_info == c2->group_info;
+ if (c1->group_info->ngroups != c2->group_info->ngroups)
+ return false;
+ for (i = 0; i < c1->group_info->ngroups; i++) {
+ if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
+ return false;
+ }
+ return true;
+}
-static void nfsd_file_gc(void);
+static const struct rhashtable_params nfsd_file_rhash_params = {
+ .key_len = sizeof_field(struct nfsd_file, nf_inode),
+ .key_offset = offsetof(struct nfsd_file, nf_inode),
+ .head_offset = offsetof(struct nfsd_file, nf_rlist),
+
+ /*
+ * Start with a single page hash table to reduce resizing churn
+ * on light workloads.
+ */
+ .min_size = 256,
+ .automatic_shrinking = true,
+};
static void
nfsd_file_schedule_laundrette(void)
{
- long count = atomic_long_read(&nfsd_filecache_count);
-
- if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
- return;
-
- queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
- NFSD_LAUNDRETTE_DELAY);
+ if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags))
+ queue_delayed_work(system_dfl_wq, &nfsd_filecache_laundrette,
+ NFSD_LAUNDRETTE_DELAY);
}
static void
@@ -115,22 +153,21 @@ nfsd_file_mark_put(struct nfsd_file_mark *nfm)
}
static struct nfsd_file_mark *
-nfsd_file_mark_find_or_create(struct nfsd_file *nf)
+nfsd_file_mark_find_or_create(struct inode *inode)
{
int err;
struct fsnotify_mark *mark;
struct nfsd_file_mark *nfm = NULL, *new;
- struct inode *inode = nf->nf_inode;
do {
- mutex_lock(&nfsd_file_fsnotify_group->mark_mutex);
- mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
- nfsd_file_fsnotify_group);
+ fsnotify_group_lock(nfsd_file_fsnotify_group);
+ mark = fsnotify_find_inode_mark(inode,
+ nfsd_file_fsnotify_group);
if (mark) {
nfm = nfsd_file_mark_get(container_of(mark,
struct nfsd_file_mark,
nfm_mark));
- mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
+ fsnotify_group_unlock(nfsd_file_fsnotify_group);
if (nfm) {
fsnotify_put_mark(mark);
break;
@@ -138,8 +175,9 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf)
/* Avoid soft lockup race with nfsd_file_mark_put() */
fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group);
fsnotify_put_mark(mark);
- } else
- mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
+ } else {
+ fsnotify_group_unlock(nfsd_file_fsnotify_group);
+ }
/* allocate a new nfm */
new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
@@ -170,320 +208,389 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf)
}
static struct nfsd_file *
-nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
- struct net *net)
+nfsd_file_alloc(struct net *net, struct inode *inode, unsigned char need,
+ bool want_gc)
{
struct nfsd_file *nf;
nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
- if (nf) {
- INIT_HLIST_NODE(&nf->nf_node);
- INIT_LIST_HEAD(&nf->nf_lru);
- nf->nf_file = NULL;
- nf->nf_cred = get_current_cred();
- nf->nf_net = net;
- nf->nf_flags = 0;
- nf->nf_inode = inode;
- nf->nf_hashval = hashval;
- refcount_set(&nf->nf_ref, 1);
- nf->nf_may = may & NFSD_FILE_MAY_MASK;
- if (may & NFSD_MAY_NOT_BREAK_LEASE) {
- if (may & NFSD_MAY_WRITE)
- __set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
- if (may & NFSD_MAY_READ)
- __set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
- }
- nf->nf_mark = NULL;
- init_rwsem(&nf->nf_rwsem);
- trace_nfsd_file_alloc(nf);
- }
- return nf;
-}
-
-static bool
-nfsd_file_free(struct nfsd_file *nf)
-{
- bool flush = false;
-
- trace_nfsd_file_put_final(nf);
- if (nf->nf_mark)
- nfsd_file_mark_put(nf->nf_mark);
- if (nf->nf_file) {
- get_file(nf->nf_file);
- filp_close(nf->nf_file, NULL);
- fput(nf->nf_file);
- flush = true;
- }
- call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
- return flush;
-}
-
-static bool
-nfsd_file_check_writeback(struct nfsd_file *nf)
-{
- struct file *file = nf->nf_file;
- struct address_space *mapping;
+ if (unlikely(!nf))
+ return NULL;
- if (!file || !(file->f_mode & FMODE_WRITE))
- return false;
- mapping = file->f_mapping;
- return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
- mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
+ this_cpu_inc(nfsd_file_allocations);
+ INIT_LIST_HEAD(&nf->nf_lru);
+ INIT_LIST_HEAD(&nf->nf_gc);
+ nf->nf_birthtime = ktime_get();
+ nf->nf_file = NULL;
+ nf->nf_cred = get_current_cred();
+ nf->nf_net = net;
+ nf->nf_flags = want_gc ?
+ BIT(NFSD_FILE_HASHED) | BIT(NFSD_FILE_PENDING) | BIT(NFSD_FILE_GC) :
+ BIT(NFSD_FILE_HASHED) | BIT(NFSD_FILE_PENDING);
+ nf->nf_inode = inode;
+ refcount_set(&nf->nf_ref, 1);
+ nf->nf_may = need;
+ nf->nf_mark = NULL;
+ nf->nf_dio_mem_align = 0;
+ nf->nf_dio_offset_align = 0;
+ nf->nf_dio_read_offset_align = 0;
+ return nf;
}
-static int
+/**
+ * nfsd_file_check_write_error - check for writeback errors on a file
+ * @nf: nfsd_file to check for writeback errors
+ *
+ * Check whether a nfsd_file has an unseen error. Reset the write
+ * verifier if so.
+ */
+static void
nfsd_file_check_write_error(struct nfsd_file *nf)
{
struct file *file = nf->nf_file;
- if (!file || !(file->f_mode & FMODE_WRITE))
- return 0;
- return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
+ if ((file->f_mode & FMODE_WRITE) &&
+ filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err)))
+ nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
}
static void
-nfsd_file_do_unhash(struct nfsd_file *nf)
+nfsd_file_hash_remove(struct nfsd_file *nf)
{
- lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
-
trace_nfsd_file_unhash(nf);
-
- if (nfsd_file_check_write_error(nf))
- nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id));
- --nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
- hlist_del_rcu(&nf->nf_node);
- atomic_long_dec(&nfsd_filecache_count);
+ rhltable_remove(&nfsd_file_rhltable, &nf->nf_rlist,
+ nfsd_file_rhash_params);
}
static bool
nfsd_file_unhash(struct nfsd_file *nf)
{
if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
- nfsd_file_do_unhash(nf);
- if (!list_empty(&nf->nf_lru))
- list_lru_del(&nfsd_file_lru, &nf->nf_lru);
+ nfsd_file_hash_remove(nf);
return true;
}
return false;
}
-/*
- * Return true if the file was unhashed.
- */
-static bool
-nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
+static void
+nfsd_file_free(struct nfsd_file *nf)
{
- lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
+ s64 age = ktime_to_ms(ktime_sub(ktime_get(), nf->nf_birthtime));
- trace_nfsd_file_unhash_and_release_locked(nf);
- if (!nfsd_file_unhash(nf))
- return false;
- /* keep final reference for nfsd_file_lru_dispose */
- if (refcount_dec_not_one(&nf->nf_ref))
- return true;
+ trace_nfsd_file_free(nf);
- list_add(&nf->nf_lru, dispose);
- return true;
+ this_cpu_inc(nfsd_file_releases);
+ this_cpu_add(nfsd_file_total_age, age);
+
+ nfsd_file_unhash(nf);
+ if (nf->nf_mark)
+ nfsd_file_mark_put(nf->nf_mark);
+ if (nf->nf_file) {
+ nfsd_file_check_write_error(nf);
+ nfsd_filp_close(nf->nf_file);
+ }
+
+ /*
+ * If this item is still linked via nf_lru, that's a bug.
+ * WARN and leak it to preserve system stability.
+ */
+ if (WARN_ON_ONCE(!list_empty(&nf->nf_lru)))
+ return;
+
+ call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
}
-static void
-nfsd_file_put_noref(struct nfsd_file *nf)
+static bool
+nfsd_file_check_writeback(struct nfsd_file *nf)
{
- trace_nfsd_file_put(nf);
+ struct file *file = nf->nf_file;
+ struct address_space *mapping;
- if (refcount_dec_and_test(&nf->nf_ref)) {
- WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
- nfsd_file_free(nf);
- }
+ /* File not open for write? */
+ if (!(file->f_mode & FMODE_WRITE))
+ return false;
+
+ /*
+ * Some filesystems (e.g. NFS) flush all dirty data on close.
+ * On others, there is no need to wait for writeback.
+ */
+ if (!(file_inode(file)->i_sb->s_export_op->flags & EXPORT_OP_FLUSH_ON_CLOSE))
+ return false;
+
+ mapping = file->f_mapping;
+ return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
+ mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
}
-void
-nfsd_file_put(struct nfsd_file *nf)
+static void nfsd_file_lru_add(struct nfsd_file *nf)
{
- bool is_hashed;
+ refcount_inc(&nf->nf_ref);
+ if (list_lru_add_obj(&nfsd_file_lru, &nf->nf_lru))
+ trace_nfsd_file_lru_add(nf);
+ else
+ WARN_ON(1);
+ nfsd_file_schedule_laundrette();
+}
- set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
- if (refcount_read(&nf->nf_ref) > 2 || !nf->nf_file) {
- nfsd_file_put_noref(nf);
- return;
+static bool nfsd_file_lru_remove(struct nfsd_file *nf)
+{
+ if (list_lru_del_obj(&nfsd_file_lru, &nf->nf_lru)) {
+ trace_nfsd_file_lru_del(nf);
+ return true;
}
-
- filemap_flush(nf->nf_file->f_mapping);
- is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
- nfsd_file_put_noref(nf);
- if (is_hashed)
- nfsd_file_schedule_laundrette();
- if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
- nfsd_file_gc();
+ return false;
}
struct nfsd_file *
nfsd_file_get(struct nfsd_file *nf)
{
- if (likely(refcount_inc_not_zero(&nf->nf_ref)))
+ if (nf && refcount_inc_not_zero(&nf->nf_ref))
return nf;
return NULL;
}
-static void
-nfsd_file_dispose_list(struct list_head *dispose)
+/**
+ * nfsd_file_put - put the reference to a nfsd_file
+ * @nf: nfsd_file of which to put the reference
+ *
+ * Put a reference to a nfsd_file. In the non-GC case, we just put the
+ * reference immediately. In the GC case, if the reference would be
+ * the last one, the put it on the LRU instead to be cleaned up later.
+ */
+void
+nfsd_file_put(struct nfsd_file *nf)
{
- struct nfsd_file *nf;
+ might_sleep();
+ trace_nfsd_file_put(nf);
- while(!list_empty(dispose)) {
- nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
- list_del(&nf->nf_lru);
- nfsd_file_put_noref(nf);
+ if (test_bit(NFSD_FILE_GC, &nf->nf_flags) &&
+ test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
+ set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
+ set_bit(NFSD_FILE_RECENT, &nf->nf_flags);
}
+
+ if (refcount_dec_and_test(&nf->nf_ref))
+ nfsd_file_free(nf);
}
-static void
-nfsd_file_dispose_list_sync(struct list_head *dispose)
+/**
+ * nfsd_file_put_local - put nfsd_file reference and arm nfsd_net_put in caller
+ * @pnf: nfsd_file of which to put the reference
+ *
+ * First save the associated net to return to caller, then put
+ * the reference of the nfsd_file.
+ */
+struct net *
+nfsd_file_put_local(struct nfsd_file __rcu **pnf)
{
- bool flush = false;
struct nfsd_file *nf;
+ struct net *net = NULL;
- while(!list_empty(dispose)) {
- nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
- list_del(&nf->nf_lru);
- if (!refcount_dec_and_test(&nf->nf_ref))
- continue;
- if (nfsd_file_free(nf))
- flush = true;
+ nf = unrcu_pointer(xchg(pnf, NULL));
+ if (nf) {
+ net = nf->nf_net;
+ nfsd_file_put(nf);
}
- if (flush)
- flush_delayed_fput();
+ return net;
}
-static void
-nfsd_file_list_remove_disposal(struct list_head *dst,
- struct nfsd_fcache_disposal *l)
+/**
+ * nfsd_file_file - get the backing file of an nfsd_file
+ * @nf: nfsd_file of which to access the backing file.
+ *
+ * Return backing file for @nf.
+ */
+struct file *
+nfsd_file_file(struct nfsd_file *nf)
{
- spin_lock(&l->lock);
- list_splice_init(&l->freeme, dst);
- spin_unlock(&l->lock);
+ return nf->nf_file;
}
static void
-nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
+nfsd_file_dispose_list(struct list_head *dispose)
{
- struct nfsd_fcache_disposal *l;
+ struct nfsd_file *nf;
- rcu_read_lock();
- list_for_each_entry_rcu(l, &laundrettes, list) {
- if (l->net == net) {
- spin_lock(&l->lock);
- list_splice_tail_init(files, &l->freeme);
- spin_unlock(&l->lock);
- queue_work(nfsd_filecache_wq, &l->work);
- break;
- }
+ while (!list_empty(dispose)) {
+ nf = list_first_entry(dispose, struct nfsd_file, nf_gc);
+ list_del_init(&nf->nf_gc);
+ nfsd_file_free(nf);
}
- rcu_read_unlock();
}
+/**
+ * nfsd_file_dispose_list_delayed - move list of dead files to net's freeme list
+ * @dispose: list of nfsd_files to be disposed
+ *
+ * Transfers each file to the "freeme" list for its nfsd_net, to eventually
+ * be disposed of by the per-net garbage collector.
+ */
static void
-nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src,
- struct net *net)
+nfsd_file_dispose_list_delayed(struct list_head *dispose)
{
- struct nfsd_file *nf, *tmp;
+ while(!list_empty(dispose)) {
+ struct nfsd_file *nf = list_first_entry(dispose,
+ struct nfsd_file, nf_gc);
+ struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
+ struct nfsd_fcache_disposal *l = nn->fcache_disposal;
+ struct svc_serv *serv;
+
+ spin_lock(&l->lock);
+ list_move_tail(&nf->nf_gc, &l->freeme);
+ spin_unlock(&l->lock);
- list_for_each_entry_safe(nf, tmp, src, nf_lru) {
- if (nf->nf_net == net)
- list_move_tail(&nf->nf_lru, dst);
+ /*
+ * The filecache laundrette is shut down after the
+ * nn->nfsd_serv pointer is cleared, but before the
+ * svc_serv is freed.
+ */
+ serv = nn->nfsd_serv;
+ if (serv)
+ svc_wake_up(serv);
}
}
-static void
-nfsd_file_dispose_list_delayed(struct list_head *dispose)
+/**
+ * nfsd_file_net_dispose - deal with nfsd_files waiting to be disposed.
+ * @nn: nfsd_net in which to find files to be disposed.
+ *
+ * When files held open for nfsv3 are removed from the filecache, whether
+ * due to memory pressure or garbage collection, they are queued to
+ * a per-net-ns queue. This function completes the disposal, either
+ * directly or by waking another nfsd thread to help with the work.
+ */
+void nfsd_file_net_dispose(struct nfsd_net *nn)
{
- LIST_HEAD(list);
- struct nfsd_file *nf;
-
- while(!list_empty(dispose)) {
- nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
- nfsd_file_list_add_pernet(&list, dispose, nf->nf_net);
- nfsd_file_list_add_disposal(&list, nf->nf_net);
+ struct nfsd_fcache_disposal *l = nn->fcache_disposal;
+
+ if (!list_empty(&l->freeme)) {
+ LIST_HEAD(dispose);
+ int i;
+
+ spin_lock(&l->lock);
+ for (i = 0; i < 8 && !list_empty(&l->freeme); i++)
+ list_move(l->freeme.next, &dispose);
+ spin_unlock(&l->lock);
+ if (!list_empty(&l->freeme))
+ /* Wake up another thread to share the work
+ * *before* doing any actual disposing.
+ */
+ svc_wake_up(nn->nfsd_serv);
+ nfsd_file_dispose_list(&dispose);
}
}
-/*
- * Note this can deadlock with nfsd_file_cache_purge.
+/**
+ * nfsd_file_lru_cb - Examine an entry on the LRU list
+ * @item: LRU entry to examine
+ * @lru: controlling LRU
+ * @arg: dispose list
+ *
+ * Return values:
+ * %LRU_REMOVED: @item was removed from the LRU
+ * %LRU_ROTATE: @item is to be moved to the LRU tail
+ * %LRU_SKIP: @item cannot be evicted
*/
static enum lru_status
nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
- spinlock_t *lock, void *arg)
- __releases(lock)
- __acquires(lock)
+ void *arg)
{
struct list_head *head = arg;
struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
- /*
- * Do a lockless refcount check. The hashtable holds one reference, so
- * we look to see if anything else has a reference, or if any have
- * been put since the shrinker last ran. Those don't get unhashed and
- * released.
- *
- * Note that in the put path, we set the flag and then decrement the
- * counter. Here we check the counter and then test and clear the flag.
- * That order is deliberate to ensure that we can do this locklessly.
- */
- if (refcount_read(&nf->nf_ref) > 1)
- goto out_skip;
+ /* We should only be dealing with GC entries here */
+ WARN_ON_ONCE(!test_bit(NFSD_FILE_GC, &nf->nf_flags));
/*
* Don't throw out files that are still undergoing I/O or
* that have uncleared errors pending.
*/
- if (nfsd_file_check_writeback(nf))
- goto out_skip;
+ if (nfsd_file_check_writeback(nf)) {
+ trace_nfsd_file_gc_writeback(nf);
+ return LRU_SKIP;
+ }
- if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
- goto out_skip;
+ /* If it was recently added to the list, skip it */
+ if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags)) {
+ trace_nfsd_file_gc_referenced(nf);
+ return LRU_ROTATE;
+ }
- if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
- goto out_skip;
+ /*
+ * Put the reference held on behalf of the LRU if it is the last
+ * reference, else rotate.
+ */
+ if (!refcount_dec_if_one(&nf->nf_ref)) {
+ trace_nfsd_file_gc_in_use(nf);
+ return LRU_ROTATE;
+ }
- list_lru_isolate_move(lru, &nf->nf_lru, head);
+ /* Refcount went to zero. Unhash it and queue it to the dispose list */
+ nfsd_file_unhash(nf);
+ list_lru_isolate(lru, &nf->nf_lru);
+ list_add(&nf->nf_gc, head);
+ this_cpu_inc(nfsd_file_evictions);
+ trace_nfsd_file_gc_disposed(nf);
return LRU_REMOVED;
-out_skip:
- return LRU_SKIP;
}
-static unsigned long
-nfsd_file_lru_walk_list(struct shrink_control *sc)
+static enum lru_status
+nfsd_file_gc_cb(struct list_head *item, struct list_lru_one *lru,
+ void *arg)
{
- LIST_HEAD(head);
- struct nfsd_file *nf;
- unsigned long ret;
+ struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
- if (sc)
- ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
- nfsd_file_lru_cb, &head);
- else
- ret = list_lru_walk(&nfsd_file_lru,
- nfsd_file_lru_cb,
- &head, LONG_MAX);
- list_for_each_entry(nf, &head, nf_lru) {
- spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
- nfsd_file_do_unhash(nf);
- spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
+ if (test_and_clear_bit(NFSD_FILE_RECENT, &nf->nf_flags)) {
+ /*
+ * "REFERENCED" really means "should be at the end of the
+ * LRU. As we are putting it there we can clear the flag.
+ */
+ clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
+ trace_nfsd_file_gc_aged(nf);
+ return LRU_ROTATE;
}
- nfsd_file_dispose_list_delayed(&head);
- return ret;
+ return nfsd_file_lru_cb(item, lru, arg);
}
+/* If the shrinker runs between calls to list_lru_walk_node() in
+ * nfsd_file_gc(), the "remaining" count will be wrong. This could
+ * result in premature freeing of some files. This may not matter much
+ * but is easy to fix with this spinlock which temporarily disables
+ * the shrinker.
+ */
+static DEFINE_SPINLOCK(nfsd_gc_lock);
static void
nfsd_file_gc(void)
{
- nfsd_file_lru_walk_list(NULL);
+ unsigned long ret = 0;
+ LIST_HEAD(dispose);
+ int nid;
+
+ spin_lock(&nfsd_gc_lock);
+ for_each_node_state(nid, N_NORMAL_MEMORY) {
+ unsigned long remaining = list_lru_count_node(&nfsd_file_lru, nid);
+
+ while (remaining > 0) {
+ unsigned long nr = min(remaining, NFSD_FILE_GC_BATCH);
+
+ remaining -= nr;
+ ret += list_lru_walk_node(&nfsd_file_lru, nid, nfsd_file_gc_cb,
+ &dispose, &nr);
+ if (nr)
+ /* walk aborted early */
+ remaining = 0;
+ }
+ }
+ spin_unlock(&nfsd_gc_lock);
+ trace_nfsd_file_gc_removed(ret, list_lru_count(&nfsd_file_lru));
+ nfsd_file_dispose_list_delayed(&dispose);
}
static void
nfsd_file_gc_worker(struct work_struct *work)
{
- nfsd_file_gc();
+ if (list_lru_count(&nfsd_file_lru))
+ nfsd_file_gc();
nfsd_file_schedule_laundrette();
}
@@ -496,100 +603,136 @@ nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
static unsigned long
nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
{
- return nfsd_file_lru_walk_list(sc);
+ LIST_HEAD(dispose);
+ unsigned long ret;
+
+ if (!spin_trylock(&nfsd_gc_lock))
+ return SHRINK_STOP;
+
+ ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
+ nfsd_file_lru_cb, &dispose);
+ spin_unlock(&nfsd_gc_lock);
+ trace_nfsd_file_shrinker_removed(ret, list_lru_count(&nfsd_file_lru));
+ nfsd_file_dispose_list_delayed(&dispose);
+ return ret;
}
-static struct shrinker nfsd_file_shrinker = {
- .scan_objects = nfsd_file_lru_scan,
- .count_objects = nfsd_file_lru_count,
- .seeks = 1,
-};
+static struct shrinker *nfsd_file_shrinker;
+/**
+ * nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file
+ * @nf: nfsd_file to attempt to queue
+ * @dispose: private list to queue successfully-put objects
+ *
+ * Unhash an nfsd_file, try to get a reference to it, and then put that
+ * reference. If it's the last reference, queue it to the dispose list.
+ */
static void
-__nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
- struct list_head *dispose)
+nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
+ __must_hold(RCU)
{
- struct nfsd_file *nf;
- struct hlist_node *tmp;
+ int decrement = 1;
- spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
- hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
- if (inode == nf->nf_inode)
- nfsd_file_unhash_and_release_locked(nf, dispose);
+ /* If we raced with someone else unhashing, ignore it */
+ if (!nfsd_file_unhash(nf))
+ return;
+
+ /* If we can't get a reference, ignore it */
+ if (!nfsd_file_get(nf))
+ return;
+
+ /* Extra decrement if we remove from the LRU */
+ if (nfsd_file_lru_remove(nf))
+ ++decrement;
+
+ /* If refcount goes to 0, then put on the dispose list */
+ if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
+ list_add(&nf->nf_gc, dispose);
+ trace_nfsd_file_closing(nf);
}
- spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
}
/**
- * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
- * @inode: inode of the file to attempt to remove
+ * nfsd_file_queue_for_close: try to close out any open nfsd_files for an inode
+ * @inode: inode on which to close out nfsd_files
+ * @dispose: list on which to gather nfsd_files to close out
+ *
+ * An nfsd_file represents a struct file being held open on behalf of nfsd.
+ * An open file however can block other activity (such as leases), or cause
+ * undesirable behavior (e.g. spurious silly-renames when reexporting NFS).
+ *
+ * This function is intended to find open nfsd_files when this sort of
+ * conflicting access occurs and then attempt to close those files out.
*
- * Walk the whole hash bucket, looking for any files that correspond to "inode".
- * If any do, then unhash them and put the hashtable reference to them and
- * destroy any that had their last reference put. Also ensure that any of the
- * fputs also have their final __fput done as well.
+ * Populates the dispose list with entries that have already had their
+ * refcounts go to zero. The actual free of an nfsd_file can be expensive,
+ * so we leave it up to the caller whether it wants to wait or not.
*/
-void
-nfsd_file_close_inode_sync(struct inode *inode)
+static void
+nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose)
{
- unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
- NFSD_FILE_HASH_BITS);
- LIST_HEAD(dispose);
+ struct rhlist_head *tmp, *list;
+ struct nfsd_file *nf;
- __nfsd_file_close_inode(inode, hashval, &dispose);
- trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
- nfsd_file_dispose_list_sync(&dispose);
+ rcu_read_lock();
+ list = rhltable_lookup(&nfsd_file_rhltable, &inode,
+ nfsd_file_rhash_params);
+ rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist) {
+ if (!test_bit(NFSD_FILE_GC, &nf->nf_flags))
+ continue;
+ nfsd_file_cond_queue(nf, dispose);
+ }
+ rcu_read_unlock();
}
/**
* nfsd_file_close_inode - attempt a delayed close of a nfsd_file
* @inode: inode of the file to attempt to remove
*
- * Walk the whole hash bucket, looking for any files that correspond to "inode".
- * If any do, then unhash them and put the hashtable reference to them and
- * destroy any that had their last reference put.
+ * Close out any open nfsd_files that can be reaped for @inode. The
+ * actual freeing is deferred to the dispose_list_delayed infrastructure.
+ *
+ * This is used by the fsnotify callbacks and setlease notifier.
*/
static void
nfsd_file_close_inode(struct inode *inode)
{
- unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
- NFSD_FILE_HASH_BITS);
LIST_HEAD(dispose);
- __nfsd_file_close_inode(inode, hashval, &dispose);
- trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
+ nfsd_file_queue_for_close(inode, &dispose);
nfsd_file_dispose_list_delayed(&dispose);
}
/**
- * nfsd_file_delayed_close - close unused nfsd_files
- * @work: dummy
+ * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
+ * @inode: inode of the file to attempt to remove
*
- * Walk the LRU list and close any entries that have not been used since
- * the last scan.
+ * Close out any open nfsd_files that can be reaped for @inode. The
+ * nfsd_files are closed out synchronously.
*
- * Note this can deadlock with nfsd_file_cache_purge.
+ * This is called from nfsd_rename and nfsd_unlink to avoid silly-renames
+ * when reexporting NFS.
*/
-static void
-nfsd_file_delayed_close(struct work_struct *work)
+void
+nfsd_file_close_inode_sync(struct inode *inode)
{
- LIST_HEAD(head);
- struct nfsd_fcache_disposal *l = container_of(work,
- struct nfsd_fcache_disposal, work);
+ LIST_HEAD(dispose);
+
+ trace_nfsd_file_close(inode);
- nfsd_file_list_remove_disposal(&head, l);
- nfsd_file_dispose_list(&head);
+ nfsd_file_queue_for_close(inode, &dispose);
+ nfsd_file_dispose_list(&dispose);
}
static int
nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
void *data)
{
- struct file_lock *fl = data;
+ struct file_lease *fl = data;
/* Only close files for F_SETLEASE leases */
- if (fl->fl_flags & FL_LEASE)
- nfsd_file_close_inode_sync(file_inode(fl->fl_file));
+ if (fl->c.flc_flags & FL_LEASE)
+ nfsd_file_close_inode(file_inode(fl->c.flc_file));
return 0;
}
@@ -632,59 +775,56 @@ static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
int
nfsd_file_cache_init(void)
{
- int ret = -ENOMEM;
- unsigned int i;
+ int ret;
- clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
-
- if (nfsd_file_hashtbl)
+ lockdep_assert_held(&nfsd_mutex);
+ if (test_and_set_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
return 0;
- nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
- if (!nfsd_filecache_wq)
+ ret = rhltable_init(&nfsd_file_rhltable, &nfsd_file_rhash_params);
+ if (ret)
goto out;
- nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE,
- sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
- if (!nfsd_file_hashtbl) {
- pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
- goto out_err;
- }
-
- nfsd_file_slab = kmem_cache_create("nfsd_file",
- sizeof(struct nfsd_file), 0, 0, NULL);
+ ret = -ENOMEM;
+ nfsd_file_slab = KMEM_CACHE(nfsd_file, 0);
if (!nfsd_file_slab) {
pr_err("nfsd: unable to create nfsd_file_slab\n");
goto out_err;
}
- nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
- sizeof(struct nfsd_file_mark), 0, 0, NULL);
+ nfsd_file_mark_slab = KMEM_CACHE(nfsd_file_mark, 0);
if (!nfsd_file_mark_slab) {
pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
goto out_err;
}
-
ret = list_lru_init(&nfsd_file_lru);
if (ret) {
pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
goto out_err;
}
- ret = register_shrinker(&nfsd_file_shrinker);
- if (ret) {
- pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
+ nfsd_file_shrinker = shrinker_alloc(0, "nfsd-filecache");
+ if (!nfsd_file_shrinker) {
+ ret = -ENOMEM;
+ pr_err("nfsd: failed to allocate nfsd_file_shrinker\n");
goto out_lru;
}
+ nfsd_file_shrinker->count_objects = nfsd_file_lru_count;
+ nfsd_file_shrinker->scan_objects = nfsd_file_lru_scan;
+ nfsd_file_shrinker->seeks = 1;
+
+ shrinker_register(nfsd_file_shrinker);
+
ret = lease_register_notifier(&nfsd_file_lease_notifier);
if (ret) {
pr_err("nfsd: unable to register lease notifier: %d\n", ret);
goto out_shrinker;
}
- nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
+ nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops,
+ 0);
if (IS_ERR(nfsd_file_fsnotify_group)) {
pr_err("nfsd: unable to create fsnotify group: %ld\n",
PTR_ERR(nfsd_file_fsnotify_group));
@@ -693,18 +833,15 @@ nfsd_file_cache_init(void)
goto out_notifier;
}
- for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
- INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
- spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
- }
-
INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
out:
+ if (ret)
+ clear_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags);
return ret;
out_notifier:
lease_unregister_notifier(&nfsd_file_lease_notifier);
out_shrinker:
- unregister_shrinker(&nfsd_file_shrinker);
+ shrinker_free(nfsd_file_shrinker);
out_lru:
list_lru_destroy(&nfsd_file_lru);
out_err:
@@ -712,58 +849,59 @@ out_err:
nfsd_file_slab = NULL;
kmem_cache_destroy(nfsd_file_mark_slab);
nfsd_file_mark_slab = NULL;
- kfree(nfsd_file_hashtbl);
- nfsd_file_hashtbl = NULL;
- destroy_workqueue(nfsd_filecache_wq);
- nfsd_filecache_wq = NULL;
+ rhltable_destroy(&nfsd_file_rhltable);
goto out;
}
-/*
- * Note this can deadlock with nfsd_file_lru_cb.
+/**
+ * __nfsd_file_cache_purge: clean out the cache for shutdown
+ * @net: net-namespace to shut down the cache (may be NULL)
+ *
+ * Walk the nfsd_file cache and close out any that match @net. If @net is NULL,
+ * then close out everything. Called when an nfsd instance is being shut down,
+ * and when the exports table is flushed.
*/
-void
-nfsd_file_cache_purge(struct net *net)
+static void
+__nfsd_file_cache_purge(struct net *net)
{
- unsigned int i;
- struct nfsd_file *nf;
- struct hlist_node *next;
+ struct rhashtable_iter iter;
+ struct nfsd_file *nf;
LIST_HEAD(dispose);
- bool del;
- if (!nfsd_file_hashtbl)
- return;
-
- for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
- struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ if (net) {
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ nfs_localio_invalidate_clients(&nn->local_clients,
+ &nn->local_clients_lock);
+ }
+#endif
- spin_lock(&nfb->nfb_lock);
- hlist_for_each_entry_safe(nf, next, &nfb->nfb_head, nf_node) {
- if (net && nf->nf_net != net)
- continue;
- del = nfsd_file_unhash_and_release_locked(nf, &dispose);
+ rhltable_walk_enter(&nfsd_file_rhltable, &iter);
+ do {
+ rhashtable_walk_start(&iter);
- /*
- * Deadlock detected! Something marked this entry as
- * unhased, but hasn't removed it from the hash list.
- */
- WARN_ON_ONCE(!del);
+ nf = rhashtable_walk_next(&iter);
+ while (!IS_ERR_OR_NULL(nf)) {
+ if (!net || nf->nf_net == net)
+ nfsd_file_cond_queue(nf, &dispose);
+ nf = rhashtable_walk_next(&iter);
}
- spin_unlock(&nfb->nfb_lock);
- nfsd_file_dispose_list(&dispose);
- }
+
+ rhashtable_walk_stop(&iter);
+ } while (nf == ERR_PTR(-EAGAIN));
+ rhashtable_walk_exit(&iter);
+
+ nfsd_file_dispose_list(&dispose);
}
static struct nfsd_fcache_disposal *
-nfsd_alloc_fcache_disposal(struct net *net)
+nfsd_alloc_fcache_disposal(void)
{
struct nfsd_fcache_disposal *l;
l = kmalloc(sizeof(*l), GFP_KERNEL);
if (!l)
return NULL;
- INIT_WORK(&l->work, nfsd_file_delayed_close);
- l->net = net;
spin_lock_init(&l->lock);
INIT_LIST_HEAD(&l->freeme);
return l;
@@ -772,61 +910,39 @@ nfsd_alloc_fcache_disposal(struct net *net)
static void
nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
{
- rcu_assign_pointer(l->net, NULL);
- cancel_work_sync(&l->work);
nfsd_file_dispose_list(&l->freeme);
- kfree_rcu(l, rcu);
+ kfree(l);
}
static void
-nfsd_add_fcache_disposal(struct nfsd_fcache_disposal *l)
+nfsd_free_fcache_disposal_net(struct net *net)
{
- spin_lock(&laundrette_lock);
- list_add_tail_rcu(&l->list, &laundrettes);
- spin_unlock(&laundrette_lock);
-}
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct nfsd_fcache_disposal *l = nn->fcache_disposal;
-static void
-nfsd_del_fcache_disposal(struct nfsd_fcache_disposal *l)
-{
- spin_lock(&laundrette_lock);
- list_del_rcu(&l->list);
- spin_unlock(&laundrette_lock);
+ nfsd_free_fcache_disposal(l);
}
-static int
-nfsd_alloc_fcache_disposal_net(struct net *net)
+int
+nfsd_file_cache_start_net(struct net *net)
{
- struct nfsd_fcache_disposal *l;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- l = nfsd_alloc_fcache_disposal(net);
- if (!l)
- return -ENOMEM;
- nfsd_add_fcache_disposal(l);
- return 0;
+ nn->fcache_disposal = nfsd_alloc_fcache_disposal();
+ return nn->fcache_disposal ? 0 : -ENOMEM;
}
-static void
-nfsd_free_fcache_disposal_net(struct net *net)
-{
- struct nfsd_fcache_disposal *l;
-
- rcu_read_lock();
- list_for_each_entry_rcu(l, &laundrettes, list) {
- if (l->net != net)
- continue;
- nfsd_del_fcache_disposal(l);
- rcu_read_unlock();
- nfsd_free_fcache_disposal(l);
- return;
- }
- rcu_read_unlock();
-}
-
-int
-nfsd_file_cache_start_net(struct net *net)
+/**
+ * nfsd_file_cache_purge - Remove all cache items associated with @net
+ * @net: target net namespace
+ *
+ */
+void
+nfsd_file_cache_purge(struct net *net)
{
- return nfsd_alloc_fcache_disposal_net(net);
+ lockdep_assert_held(&nfsd_mutex);
+ if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
+ __nfsd_file_cache_purge(net);
}
void
@@ -839,16 +955,20 @@ nfsd_file_cache_shutdown_net(struct net *net)
void
nfsd_file_cache_shutdown(void)
{
- set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
+ int i;
+
+ lockdep_assert_held(&nfsd_mutex);
+ if (test_and_clear_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 0)
+ return;
lease_unregister_notifier(&nfsd_file_lease_notifier);
- unregister_shrinker(&nfsd_file_shrinker);
+ shrinker_free(nfsd_file_shrinker);
/*
* make sure all callers of nfsd_file_lru_cb are done before
* calling nfsd_file_cache_purge
*/
cancel_delayed_work_sync(&nfsd_filecache_laundrette);
- nfsd_file_cache_purge(NULL);
+ __nfsd_file_cache_purge(NULL);
list_lru_destroy(&nfsd_file_lru);
rcu_barrier();
fsnotify_put_group(nfsd_file_fsnotify_group);
@@ -858,240 +978,453 @@ nfsd_file_cache_shutdown(void)
fsnotify_wait_marks_destroyed();
kmem_cache_destroy(nfsd_file_mark_slab);
nfsd_file_mark_slab = NULL;
- kfree(nfsd_file_hashtbl);
- nfsd_file_hashtbl = NULL;
- destroy_workqueue(nfsd_filecache_wq);
- nfsd_filecache_wq = NULL;
-}
-
-static bool
-nfsd_match_cred(const struct cred *c1, const struct cred *c2)
-{
- int i;
-
- if (!uid_eq(c1->fsuid, c2->fsuid))
- return false;
- if (!gid_eq(c1->fsgid, c2->fsgid))
- return false;
- if (c1->group_info == NULL || c2->group_info == NULL)
- return c1->group_info == c2->group_info;
- if (c1->group_info->ngroups != c2->group_info->ngroups)
- return false;
- for (i = 0; i < c1->group_info->ngroups; i++) {
- if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
- return false;
+ rhltable_destroy(&nfsd_file_rhltable);
+
+ for_each_possible_cpu(i) {
+ per_cpu(nfsd_file_cache_hits, i) = 0;
+ per_cpu(nfsd_file_acquisitions, i) = 0;
+ per_cpu(nfsd_file_allocations, i) = 0;
+ per_cpu(nfsd_file_releases, i) = 0;
+ per_cpu(nfsd_file_total_age, i) = 0;
+ per_cpu(nfsd_file_evictions, i) = 0;
}
- return true;
}
static struct nfsd_file *
-nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
- unsigned int hashval, struct net *net)
+nfsd_file_lookup_locked(const struct net *net, const struct cred *cred,
+ struct inode *inode, unsigned char need,
+ bool want_gc)
{
+ struct rhlist_head *tmp, *list;
struct nfsd_file *nf;
- unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
- hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
- nf_node, lockdep_is_held(&nfsd_file_hashtbl[hashval].nfb_lock)) {
+ list = rhltable_lookup(&nfsd_file_rhltable, &inode,
+ nfsd_file_rhash_params);
+ rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist) {
if (nf->nf_may != need)
continue;
- if (nf->nf_inode != inode)
- continue;
if (nf->nf_net != net)
continue;
- if (!nfsd_match_cred(nf->nf_cred, current_cred()))
+ if (!nfsd_match_cred(nf->nf_cred, cred))
+ continue;
+ if (test_bit(NFSD_FILE_GC, &nf->nf_flags) != want_gc)
continue;
- if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags))
+ if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0)
continue;
- if (nfsd_file_get(nf) != NULL)
- return nf;
+
+ if (!nfsd_file_get(nf))
+ continue;
+ return nf;
}
return NULL;
}
/**
- * nfsd_file_is_cached - are there any cached open files for this fh?
- * @inode: inode of the file to check
+ * nfsd_file_is_cached - are there any cached open files for this inode?
+ * @inode: inode to check
*
- * Scan the hashtable for open files that match this fh. Returns true if there
- * are any, and false if not.
+ * The lookup matches inodes in all net namespaces and is atomic wrt
+ * nfsd_file_acquire().
+ *
+ * Return values:
+ * %true: filecache contains at least one file matching this inode
+ * %false: filecache contains no files matching this inode
*/
bool
nfsd_file_is_cached(struct inode *inode)
{
- bool ret = false;
- struct nfsd_file *nf;
- unsigned int hashval;
-
- hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
+ struct rhlist_head *tmp, *list;
+ struct nfsd_file *nf;
+ bool ret = false;
rcu_read_lock();
- hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
- nf_node) {
- if (inode == nf->nf_inode) {
+ list = rhltable_lookup(&nfsd_file_rhltable, &inode,
+ nfsd_file_rhash_params);
+ rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist)
+ if (test_bit(NFSD_FILE_GC, &nf->nf_flags)) {
ret = true;
break;
}
- }
rcu_read_unlock();
- trace_nfsd_file_is_cached(inode, hashval, (int)ret);
+
+ trace_nfsd_file_is_cached(inode, (int)ret);
return ret;
}
-__be32
-nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
- unsigned int may_flags, struct nfsd_file **pnf)
+static __be32
+nfsd_file_get_dio_attrs(const struct svc_fh *fhp, struct nfsd_file *nf)
{
- __be32 status;
- struct net *net = SVC_NET(rqstp);
- struct nfsd_file *nf, *new;
- struct inode *inode;
- unsigned int hashval;
- bool retry = true;
+ struct inode *inode = file_inode(nf->nf_file);
+ struct kstat stat;
+ __be32 status;
+
+ /* Currently only need to get DIO alignment info for regular files */
+ if (!S_ISREG(inode->i_mode))
+ return nfs_ok;
- /* FIXME: skip this if fh_dentry is already set? */
- status = fh_verify(rqstp, fhp, S_IFREG,
- may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ status = fh_getattr(fhp, &stat);
if (status != nfs_ok)
return status;
- inode = d_inode(fhp->fh_dentry);
- hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
+ trace_nfsd_file_get_dio_attrs(inode, &stat);
+
+ if (stat.result_mask & STATX_DIOALIGN) {
+ nf->nf_dio_mem_align = stat.dio_mem_align;
+ nf->nf_dio_offset_align = stat.dio_offset_align;
+ }
+ if (stat.result_mask & STATX_DIO_READ_ALIGN)
+ nf->nf_dio_read_offset_align = stat.dio_read_offset_align;
+ else
+ nf->nf_dio_read_offset_align = nf->nf_dio_offset_align;
+
+ return nfs_ok;
+}
+
+static __be32
+nfsd_file_do_acquire(struct svc_rqst *rqstp, struct net *net,
+ struct svc_cred *cred,
+ struct auth_domain *client,
+ struct svc_fh *fhp,
+ unsigned int may_flags, struct file *file,
+ umode_t type, bool want_gc, struct nfsd_file **pnf)
+{
+ unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
+ struct nfsd_file *new, *nf;
+ bool stale_retry = true;
+ bool open_retry = true;
+ struct inode *inode;
+ __be32 status;
+ int ret;
+
retry:
+ if (rqstp)
+ status = fh_verify(rqstp, fhp, type,
+ may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ else
+ status = fh_verify_local(net, cred, client, fhp, type,
+ may_flags|NFSD_MAY_OWNER_OVERRIDE);
+
+ if (status != nfs_ok)
+ return status;
+ inode = d_inode(fhp->fh_dentry);
+
rcu_read_lock();
- nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
+ nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
rcu_read_unlock();
+
if (nf)
goto wait_for_construction;
- new = nfsd_file_alloc(inode, may_flags, hashval, net);
+ new = nfsd_file_alloc(net, inode, need, want_gc);
if (!new) {
- trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
- NULL, nfserr_jukebox);
- return nfserr_jukebox;
+ status = nfserr_jukebox;
+ goto out;
}
- spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
- nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
- if (nf == NULL)
+ rcu_read_lock();
+ spin_lock(&inode->i_lock);
+ nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
+ if (unlikely(nf)) {
+ spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
+ nfsd_file_free(new);
+ goto wait_for_construction;
+ }
+ nf = new;
+ ret = rhltable_insert(&nfsd_file_rhltable, &nf->nf_rlist,
+ nfsd_file_rhash_params);
+ spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
+ if (likely(ret == 0))
goto open_file;
- spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
- nfsd_file_slab_free(&new->nf_rcu);
+
+ trace_nfsd_file_insert_err(rqstp, inode, may_flags, ret);
+ status = nfserr_jukebox;
+ goto construction_err;
wait_for_construction:
wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
/* Did construction of this file fail? */
if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
- if (!retry) {
+ trace_nfsd_file_cons_err(rqstp, inode, may_flags, nf);
+ if (!open_retry) {
status = nfserr_jukebox;
- goto out;
+ goto construction_err;
}
- retry = false;
- nfsd_file_put_noref(nf);
+ nfsd_file_put(nf);
+ open_retry = false;
+ fh_put(fhp);
goto retry;
}
-
this_cpu_inc(nfsd_file_cache_hits);
- if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
- bool write = (may_flags & NFSD_MAY_WRITE);
-
- if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
- (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
- status = nfserrno(nfsd_open_break_lease(
- file_inode(nf->nf_file), may_flags));
- if (status == nfs_ok) {
- clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
- if (write)
- clear_bit(NFSD_FILE_BREAK_WRITE,
- &nf->nf_flags);
- }
- }
+ status = nfserrno(nfsd_open_break_lease(file_inode(nf->nf_file), may_flags));
+ if (status != nfs_ok) {
+ nfsd_file_put(nf);
+ nf = NULL;
}
+
out:
if (status == nfs_ok) {
+ this_cpu_inc(nfsd_file_acquisitions);
+ nfsd_file_check_write_error(nf);
*pnf = nf;
- } else {
- nfsd_file_put(nf);
- nf = NULL;
}
-
- trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
+ trace_nfsd_file_acquire(rqstp, inode, may_flags, nf, status);
return status;
-open_file:
- nf = new;
- /* Take reference for the hashtable */
- refcount_inc(&nf->nf_ref);
- __set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
- __set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
- list_lru_add(&nfsd_file_lru, &nf->nf_lru);
- hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
- ++nfsd_file_hashtbl[hashval].nfb_count;
- nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
- nfsd_file_hashtbl[hashval].nfb_count);
- spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
- if (atomic_long_inc_return(&nfsd_filecache_count) >= NFSD_FILE_LRU_THRESHOLD)
- nfsd_file_gc();
- nf->nf_mark = nfsd_file_mark_find_or_create(nf);
- if (nf->nf_mark)
- status = nfsd_open_verified(rqstp, fhp, S_IFREG,
- may_flags, &nf->nf_file);
- else
+open_file:
+ trace_nfsd_file_alloc(nf);
+
+ if (type == S_IFREG)
+ nf->nf_mark = nfsd_file_mark_find_or_create(inode);
+
+ if (type != S_IFREG || nf->nf_mark) {
+ if (file) {
+ get_file(file);
+ nf->nf_file = file;
+ status = nfs_ok;
+ trace_nfsd_file_opened(nf, status);
+ } else {
+ ret = nfsd_open_verified(fhp, type, may_flags, &nf->nf_file);
+ if (ret == -EOPENSTALE && stale_retry) {
+ stale_retry = false;
+ nfsd_file_unhash(nf);
+ clear_and_wake_up_bit(NFSD_FILE_PENDING,
+ &nf->nf_flags);
+ if (refcount_dec_and_test(&nf->nf_ref))
+ nfsd_file_free(nf);
+ nf = NULL;
+ fh_put(fhp);
+ goto retry;
+ }
+ status = nfserrno(ret);
+ trace_nfsd_file_open(nf, status);
+ if (status == nfs_ok)
+ status = nfsd_file_get_dio_attrs(fhp, nf);
+ }
+ } else
status = nfserr_jukebox;
/*
* If construction failed, or we raced with a call to unlink()
* then unhash.
*/
- if (status != nfs_ok || inode->i_nlink == 0) {
- bool do_free;
- spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
- do_free = nfsd_file_unhash(nf);
- spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
- if (do_free)
- nfsd_file_put_noref(nf);
- }
- clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
- smp_mb__after_atomic();
- wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
+ if (status != nfs_ok || inode->i_nlink == 0)
+ nfsd_file_unhash(nf);
+ else if (want_gc)
+ nfsd_file_lru_add(nf);
+
+ clear_and_wake_up_bit(NFSD_FILE_PENDING, &nf->nf_flags);
+ if (status == nfs_ok)
+ goto out;
+
+construction_err:
+ if (refcount_dec_and_test(&nf->nf_ref))
+ nfsd_file_free(nf);
+ nf = NULL;
goto out;
}
+/**
+ * nfsd_file_acquire_gc - Get a struct nfsd_file with an open file
+ * @rqstp: the RPC transaction being executed
+ * @fhp: the NFS filehandle of the file to be opened
+ * @may_flags: NFSD_MAY_ settings for the file
+ * @pnf: OUT: new or found "struct nfsd_file" object
+ *
+ * The nfsd_file object returned by this API is reference-counted
+ * and garbage-collected. The object is retained for a few
+ * seconds after the final nfsd_file_put() in case the caller
+ * wants to re-use it.
+ *
+ * Return values:
+ * %nfs_ok - @pnf points to an nfsd_file with its reference
+ * count boosted.
+ *
+ * On error, an nfsstat value in network byte order is returned.
+ */
+__be32
+nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **pnf)
+{
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
+ fhp, may_flags, NULL, S_IFREG, true, pnf);
+}
+
+/**
+ * nfsd_file_acquire - Get a struct nfsd_file with an open file
+ * @rqstp: the RPC transaction being executed
+ * @fhp: the NFS filehandle of the file to be opened
+ * @may_flags: NFSD_MAY_ settings for the file
+ * @pnf: OUT: new or found "struct nfsd_file" object
+ *
+ * The nfsd_file_object returned by this API is reference-counted
+ * but not garbage-collected. The object is unhashed after the
+ * final nfsd_file_put().
+ *
+ * Return values:
+ * %nfs_ok - @pnf points to an nfsd_file with its reference
+ * count boosted.
+ *
+ * On error, an nfsstat value in network byte order is returned.
+ */
+__be32
+nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **pnf)
+{
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
+ fhp, may_flags, NULL, S_IFREG, false, pnf);
+}
+
+/**
+ * nfsd_file_acquire_local - Get a struct nfsd_file with an open file for localio
+ * @net: The network namespace in which to perform a lookup
+ * @cred: the user credential with which to validate access
+ * @client: the auth_domain for LOCALIO lookup
+ * @fhp: the NFS filehandle of the file to be opened
+ * @may_flags: NFSD_MAY_ settings for the file
+ * @pnf: OUT: new or found "struct nfsd_file" object
+ *
+ * This file lookup interface provide access to a file given the
+ * filehandle and credential. No connection-based authorisation
+ * is performed and in that way it is quite different to other
+ * file access mediated by nfsd. It allows a kernel module such as the NFS
+ * client to reach across network and filesystem namespaces to access
+ * a file. The security implications of this should be carefully
+ * considered before use.
+ *
+ * The nfsd_file_object returned by this API is reference-counted
+ * but not garbage-collected. The object is unhashed after the
+ * final nfsd_file_put().
+ *
+ * Return values:
+ * %nfs_ok - @pnf points to an nfsd_file with its reference
+ * count boosted.
+ *
+ * On error, an nfsstat value in network byte order is returned.
+ */
+__be32
+nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
+ struct auth_domain *client, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **pnf)
+{
+ /*
+ * Save creds before calling nfsd_file_do_acquire() (which calls
+ * nfsd_setuser). Important because caller (LOCALIO) is from
+ * client context.
+ */
+ const struct cred *save_cred = get_current_cred();
+ __be32 beres;
+
+ beres = nfsd_file_do_acquire(NULL, net, cred, client, fhp, may_flags,
+ NULL, S_IFREG, false, pnf);
+ put_cred(revert_creds(save_cred));
+ return beres;
+}
+
+/**
+ * nfsd_file_acquire_opened - Get a struct nfsd_file using existing open file
+ * @rqstp: the RPC transaction being executed
+ * @fhp: the NFS filehandle of the file just created
+ * @may_flags: NFSD_MAY_ settings for the file
+ * @file: cached, already-open file (may be NULL)
+ * @pnf: OUT: new or found "struct nfsd_file" object
+ *
+ * Acquire a nfsd_file object that is not GC'ed. If one doesn't already exist,
+ * and @file is non-NULL, use it to instantiate a new nfsd_file instead of
+ * opening a new one.
+ *
+ * Return values:
+ * %nfs_ok - @pnf points to an nfsd_file with its reference
+ * count boosted.
+ *
+ * On error, an nfsstat value in network byte order is returned.
+ */
+__be32
+nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ unsigned int may_flags, struct file *file,
+ struct nfsd_file **pnf)
+{
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
+ fhp, may_flags, file, S_IFREG, false, pnf);
+}
+
+/**
+ * nfsd_file_acquire_dir - Get a struct nfsd_file with an open directory
+ * @rqstp: the RPC transaction being executed
+ * @fhp: the NFS filehandle of the file to be opened
+ * @pnf: OUT: new or found "struct nfsd_file" object
+ *
+ * The nfsd_file_object returned by this API is reference-counted
+ * but not garbage-collected. The object is unhashed after the
+ * final nfsd_file_put(). This opens directories only, and only
+ * in O_RDONLY mode.
+ *
+ * Return values:
+ * %nfs_ok - @pnf points to an nfsd_file with its reference
+ * count boosted.
+ *
+ * On error, an nfsstat value in network byte order is returned.
+ */
+__be32
+nfsd_file_acquire_dir(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct nfsd_file **pnf)
+{
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL, fhp,
+ NFSD_MAY_READ|NFSD_MAY_64BIT_COOKIE,
+ NULL, S_IFDIR, false, pnf);
+}
+
/*
* Note that fields may be added, removed or reordered in the future. Programs
* scraping this file for info should test the labels to ensure they're
* getting the correct field.
*/
-static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
+int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
{
- unsigned int i, count = 0, longest = 0;
- unsigned long hits = 0;
+ unsigned long allocations = 0, releases = 0, evictions = 0;
+ unsigned long hits = 0, acquisitions = 0;
+ unsigned int i, count = 0, buckets = 0;
+ unsigned long lru = 0, total_age = 0;
- /*
- * No need for spinlocks here since we're not terribly interested in
- * accuracy. We do take the nfsd_mutex simply to ensure that we
- * don't end up racing with server shutdown
- */
+ /* Serialize with server shutdown */
mutex_lock(&nfsd_mutex);
- if (nfsd_file_hashtbl) {
- for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
- count += nfsd_file_hashtbl[i].nfb_count;
- longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
- }
+ if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1) {
+ struct bucket_table *tbl;
+ struct rhashtable *ht;
+
+ lru = list_lru_count(&nfsd_file_lru);
+
+ rcu_read_lock();
+ ht = &nfsd_file_rhltable.ht;
+ count = atomic_read(&ht->nelems);
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+ buckets = tbl->size;
+ rcu_read_unlock();
}
mutex_unlock(&nfsd_mutex);
- for_each_possible_cpu(i)
+ for_each_possible_cpu(i) {
hits += per_cpu(nfsd_file_cache_hits, i);
+ acquisitions += per_cpu(nfsd_file_acquisitions, i);
+ allocations += per_cpu(nfsd_file_allocations, i);
+ releases += per_cpu(nfsd_file_releases, i);
+ total_age += per_cpu(nfsd_file_total_age, i);
+ evictions += per_cpu(nfsd_file_evictions, i);
+ }
- seq_printf(m, "total entries: %u\n", count);
- seq_printf(m, "longest chain: %u\n", longest);
+ seq_printf(m, "total inodes: %u\n", count);
+ seq_printf(m, "hash buckets: %u\n", buckets);
+ seq_printf(m, "lru entries: %lu\n", lru);
seq_printf(m, "cache hits: %lu\n", hits);
+ seq_printf(m, "acquisitions: %lu\n", acquisitions);
+ seq_printf(m, "allocations: %lu\n", allocations);
+ seq_printf(m, "releases: %lu\n", releases);
+ seq_printf(m, "evictions: %lu\n", evictions);
+ if (releases)
+ seq_printf(m, "mean age (ms): %ld\n", total_age / releases);
+ else
+ seq_printf(m, "mean age (ms): -\n");
return 0;
}
-
-int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nfsd_file_cache_stats_show, NULL);
-}
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
index 7872df5a0fe3..b383dbc5b921 100644
--- a/fs/nfsd/filecache.h
+++ b/fs/nfsd/filecache.h
@@ -4,6 +4,12 @@
#include <linux/fsnotify_backend.h>
/*
+ * Limit the time that the list_lru_one lock is held during
+ * an LRU scan.
+ */
+#define NFSD_FILE_GC_BATCH (16UL)
+
+/*
* This is the fsnotify_mark container that nfsd attaches to the files that it
* is holding open. Note that we have a separate refcount here aside from the
* one in the fsnotify_mark. We only want a single fsnotify_mark attached to
@@ -29,24 +35,29 @@ struct nfsd_file_mark {
* never be dereferenced, only used for comparison.
*/
struct nfsd_file {
- struct hlist_node nf_node;
- struct list_head nf_lru;
- struct rcu_head nf_rcu;
+ struct rhlist_head nf_rlist;
+ void *nf_inode;
struct file *nf_file;
const struct cred *nf_cred;
struct net *nf_net;
#define NFSD_FILE_HASHED (0)
#define NFSD_FILE_PENDING (1)
-#define NFSD_FILE_BREAK_READ (2)
-#define NFSD_FILE_BREAK_WRITE (3)
-#define NFSD_FILE_REFERENCED (4)
+#define NFSD_FILE_REFERENCED (2)
+#define NFSD_FILE_GC (3)
+#define NFSD_FILE_RECENT (4)
unsigned long nf_flags;
- struct inode *nf_inode;
- unsigned int nf_hashval;
refcount_t nf_ref;
unsigned char nf_may;
+
struct nfsd_file_mark *nf_mark;
- struct rw_semaphore nf_rwsem;
+ struct list_head nf_lru;
+ struct list_head nf_gc;
+ struct rcu_head nf_rcu;
+ ktime_t nf_birthtime;
+
+ u32 nf_dio_mem_align;
+ u32 nf_dio_offset_align;
+ u32 nf_dio_read_offset_align;
};
int nfsd_file_cache_init(void);
@@ -55,10 +66,23 @@ void nfsd_file_cache_shutdown(void);
int nfsd_file_cache_start_net(struct net *net);
void nfsd_file_cache_shutdown_net(struct net *net);
void nfsd_file_put(struct nfsd_file *nf);
+struct net *nfsd_file_put_local(struct nfsd_file __rcu **nf);
struct nfsd_file *nfsd_file_get(struct nfsd_file *nf);
+struct file *nfsd_file_file(struct nfsd_file *nf);
void nfsd_file_close_inode_sync(struct inode *inode);
+void nfsd_file_net_dispose(struct nfsd_net *nn);
bool nfsd_file_is_cached(struct inode *inode);
+__be32 nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **nfp);
__be32 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **nfp);
-int nfsd_file_cache_stats_open(struct inode *, struct file *);
+__be32 nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ unsigned int may_flags, struct file *file,
+ struct nfsd_file **nfp);
+__be32 nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
+ struct auth_domain *client, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **pnf);
+__be32 nfsd_file_acquire_dir(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct nfsd_file **pnf);
+int nfsd_file_cache_stats_show(struct seq_file *m, void *v);
#endif /* _FS_NFSD_FILECACHE_H */
diff --git a/fs/nfsd/flexfilelayout.c b/fs/nfsd/flexfilelayout.c
index 2e2f1d5e9f62..0f1a35400cd5 100644
--- a/fs/nfsd/flexfilelayout.c
+++ b/fs/nfsd/flexfilelayout.c
@@ -15,12 +15,13 @@
#include "flexfilelayoutxdr.h"
#include "pnfs.h"
+#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PNFS
static __be32
-nfsd4_ff_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
- struct nfsd4_layoutget *args)
+nfsd4_ff_proc_layoutget(struct svc_rqst *rqstp, struct inode *inode,
+ const struct svc_fh *fhp, struct nfsd4_layoutget *args)
{
struct nfsd4_layout_seg *seg = &args->lg_seg;
u32 device_generation = 0;
@@ -117,13 +118,20 @@ nfsd4_ff_proc_getdeviceinfo(struct super_block *sb, struct svc_rqst *rqstp,
da->netaddr.addr_len =
snprintf(da->netaddr.addr, FF_ADDR_LEN + 1,
- "%s.%hhu.%hhu", addr, port >> 8, port & 0xff);
+ "%s.%d.%d", addr, port >> 8, port & 0xff);
da->tightly_coupled = false;
return 0;
}
+static __be32
+nfsd4_ff_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
+ struct nfsd4_layoutcommit *lcp)
+{
+ return nfs_ok;
+}
+
const struct nfsd4_layout_ops ff_layout_ops = {
.notify_types =
NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
@@ -132,4 +140,5 @@ const struct nfsd4_layout_ops ff_layout_ops = {
.encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo,
.proc_layoutget = nfsd4_ff_proc_layoutget,
.encode_layoutget = nfsd4_ff_encode_layoutget,
+ .proc_layoutcommit = nfsd4_ff_proc_layoutcommit,
};
diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c
index e81d2a5cf381..f9f7e38cba13 100644
--- a/fs/nfsd/flexfilelayoutxdr.c
+++ b/fs/nfsd/flexfilelayoutxdr.c
@@ -17,9 +17,9 @@ struct ff_idmap {
__be32
nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
- struct nfsd4_layoutget *lgp)
+ const struct nfsd4_layoutget *lgp)
{
- struct pnfs_ff_layout *fl = lgp->lg_content;
+ const struct pnfs_ff_layout *fl = lgp->lg_content;
int len, mirror_len, ds_len, fh_len;
__be32 *p;
@@ -54,8 +54,7 @@ nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
*p++ = cpu_to_be32(1); /* single mirror */
*p++ = cpu_to_be32(1); /* single data server */
- p = xdr_encode_opaque_fixed(p, &fl->deviceid,
- sizeof(struct nfsd4_deviceid));
+ p = svcxdr_encode_deviceid4(p, &fl->deviceid);
*p++ = cpu_to_be32(1); /* efficiency */
@@ -77,7 +76,7 @@ nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
__be32
nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdp)
+ const struct nfsd4_getdeviceinfo *gdp)
{
struct pnfs_ff_device_addr *da = gdp->gd_device;
int len;
@@ -85,6 +84,15 @@ nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
int addr_len;
__be32 *p;
+ /*
+ * See paragraph 5 of RFC 8881 S18.40.3.
+ */
+ if (!gdp->gd_maxcount) {
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
+ }
+
/* len + padding for two strings */
addr_len = 16 + da->netaddr.netid_len + da->netaddr.addr_len;
ver_len = 20;
diff --git a/fs/nfsd/flexfilelayoutxdr.h b/fs/nfsd/flexfilelayoutxdr.h
index 8e195aeca023..6d5a1066a903 100644
--- a/fs/nfsd/flexfilelayoutxdr.h
+++ b/fs/nfsd/flexfilelayoutxdr.h
@@ -43,8 +43,8 @@ struct pnfs_ff_layout {
};
__be32 nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdp);
+ const struct nfsd4_getdeviceinfo *gdp);
__be32 nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
- struct nfsd4_layoutget *lgp);
+ const struct nfsd4_layoutget *lgp);
#endif /* _NFSD_FLEXFILELAYOUTXDR_H */
diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c
new file mode 100644
index 000000000000..be710d809a3b
--- /dev/null
+++ b/fs/nfsd/localio.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NFS server support for local clients to bypass network stack
+ *
+ * Copyright (C) 2014 Weston Andros Adamson <dros@primarydata.com>
+ * Copyright (C) 2019 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+
+#include <linux/exportfs.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs_common.h>
+#include <linux/nfslocalio.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_xdr.h>
+#include <linux/string.h>
+
+#include "nfsd.h"
+#include "vfs.h"
+#include "netns.h"
+#include "filecache.h"
+#include "cache.h"
+
+/**
+ * nfsd_open_local_fh - lookup a local filehandle @nfs_fh and map to nfsd_file
+ *
+ * @net: 'struct net' to get the proper nfsd_net required for LOCALIO access
+ * @dom: 'struct auth_domain' required for LOCALIO access
+ * @rpc_clnt: rpc_clnt that the client established
+ * @cred: cred that the client established
+ * @nfs_fh: filehandle to lookup
+ * @pnf: place to find the nfsd_file, or store it if it was non-NULL
+ * @fmode: fmode_t to use for open
+ *
+ * This function maps a local fh to a path on a local filesystem.
+ * This is useful when the nfs client has the local server mounted - it can
+ * avoid all the NFS overhead with reads, writes and commits.
+ *
+ * On successful return, returned nfsd_file will have its nf_net member
+ * set. Caller (NFS client) is responsible for calling nfsd_net_put and
+ * nfsd_file_put (via nfs_to_nfsd_file_put_local).
+ */
+static struct nfsd_file *
+nfsd_open_local_fh(struct net *net, struct auth_domain *dom,
+ struct rpc_clnt *rpc_clnt, const struct cred *cred,
+ const struct nfs_fh *nfs_fh, struct nfsd_file __rcu **pnf,
+ const fmode_t fmode)
+{
+ int mayflags = NFSD_MAY_LOCALIO;
+ struct svc_cred rq_cred;
+ struct svc_fh fh;
+ struct nfsd_file *localio;
+ __be32 beres;
+
+ if (nfs_fh->size > NFS4_FHSIZE)
+ return ERR_PTR(-EINVAL);
+
+ if (!nfsd_net_try_get(net))
+ return ERR_PTR(-ENXIO);
+
+ rcu_read_lock();
+ localio = nfsd_file_get(rcu_dereference(*pnf));
+ rcu_read_unlock();
+ if (localio)
+ return localio;
+
+ /* nfs_fh -> svc_fh */
+ fh_init(&fh, NFS4_FHSIZE);
+ fh.fh_handle.fh_size = nfs_fh->size;
+ memcpy(fh.fh_handle.fh_raw, nfs_fh->data, nfs_fh->size);
+
+ if (fmode & FMODE_READ)
+ mayflags |= NFSD_MAY_READ;
+ if (fmode & FMODE_WRITE)
+ mayflags |= NFSD_MAY_WRITE;
+
+ svcauth_map_clnt_to_svc_cred_local(rpc_clnt, cred, &rq_cred);
+
+ beres = nfsd_file_acquire_local(net, &rq_cred, dom,
+ &fh, mayflags, &localio);
+ if (beres)
+ localio = ERR_PTR(nfs_stat_to_errno(be32_to_cpu(beres)));
+
+ fh_put(&fh);
+ if (rq_cred.cr_group_info)
+ put_group_info(rq_cred.cr_group_info);
+
+ if (!IS_ERR(localio)) {
+ struct nfsd_file *new;
+ if (!nfsd_net_try_get(net)) {
+ nfsd_file_put(localio);
+ nfsd_net_put(net);
+ return ERR_PTR(-ENXIO);
+ }
+ nfsd_file_get(localio);
+ again:
+ new = unrcu_pointer(cmpxchg(pnf, NULL, RCU_INITIALIZER(localio)));
+ if (new) {
+ /* Some other thread installed an nfsd_file */
+ if (nfsd_file_get(new) == NULL)
+ goto again;
+ /*
+ * Drop the ref we were going to install (both file and
+ * net) and the one we were going to return (only file).
+ */
+ nfsd_file_put(localio);
+ nfsd_net_put(net);
+ nfsd_file_put(localio);
+ localio = new;
+ }
+ } else
+ nfsd_net_put(net);
+
+ return localio;
+}
+
+static void nfsd_file_dio_alignment(struct nfsd_file *nf,
+ u32 *nf_dio_mem_align,
+ u32 *nf_dio_offset_align,
+ u32 *nf_dio_read_offset_align)
+{
+ *nf_dio_mem_align = nf->nf_dio_mem_align;
+ *nf_dio_offset_align = nf->nf_dio_offset_align;
+ *nf_dio_read_offset_align = nf->nf_dio_read_offset_align;
+}
+
+static const struct nfsd_localio_operations nfsd_localio_ops = {
+ .nfsd_net_try_get = nfsd_net_try_get,
+ .nfsd_net_put = nfsd_net_put,
+ .nfsd_open_local_fh = nfsd_open_local_fh,
+ .nfsd_file_put_local = nfsd_file_put_local,
+ .nfsd_file_file = nfsd_file_file,
+ .nfsd_file_dio_alignment = nfsd_file_dio_alignment,
+};
+
+void nfsd_localio_ops_init(void)
+{
+ nfs_to = &nfsd_localio_ops;
+}
+
+/*
+ * UUID_IS_LOCAL XDR functions
+ */
+
+static __be32 localio_proc_null(struct svc_rqst *rqstp)
+{
+ return rpc_success;
+}
+
+struct localio_uuidarg {
+ uuid_t uuid;
+};
+
+static __be32 localio_proc_uuid_is_local(struct svc_rqst *rqstp)
+{
+ struct localio_uuidarg *argp = rqstp->rq_argp;
+ struct net *net = SVC_NET(rqstp);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfs_uuid_is_local(&argp->uuid, &nn->local_clients,
+ &nn->local_clients_lock,
+ net, rqstp->rq_client, THIS_MODULE);
+
+ return rpc_success;
+}
+
+static bool localio_decode_uuidarg(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr)
+{
+ struct localio_uuidarg *argp = rqstp->rq_argp;
+ u8 uuid[UUID_SIZE];
+
+ if (decode_opaque_fixed(xdr, uuid, UUID_SIZE))
+ return false;
+ import_uuid(&argp->uuid, uuid);
+
+ return true;
+}
+
+static const struct svc_procedure localio_procedures1[] = {
+ [LOCALIOPROC_NULL] = {
+ .pc_func = localio_proc_null,
+ .pc_decode = nfssvc_decode_voidarg,
+ .pc_encode = nfssvc_encode_voidres,
+ .pc_argsize = sizeof(struct nfsd_voidargs),
+ .pc_ressize = sizeof(struct nfsd_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = 0,
+ .pc_name = "NULL",
+ },
+ [LOCALIOPROC_UUID_IS_LOCAL] = {
+ .pc_func = localio_proc_uuid_is_local,
+ .pc_decode = localio_decode_uuidarg,
+ .pc_encode = nfssvc_encode_voidres,
+ .pc_argsize = sizeof(struct localio_uuidarg),
+ .pc_argzero = sizeof(struct localio_uuidarg),
+ .pc_ressize = sizeof(struct nfsd_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_name = "UUID_IS_LOCAL",
+ },
+};
+
+#define LOCALIO_NR_PROCEDURES ARRAY_SIZE(localio_procedures1)
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ localio_count[LOCALIO_NR_PROCEDURES]);
+const struct svc_version localio_version1 = {
+ .vs_vers = 1,
+ .vs_nproc = LOCALIO_NR_PROCEDURES,
+ .vs_proc = localio_procedures1,
+ .vs_dispatch = nfsd_dispatch,
+ .vs_count = localio_count,
+ .vs_xdrsize = XDR_QUADLEN(UUID_SIZE),
+ .vs_hidden = true,
+};
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index 46a7f9b813e5..c774ce9aa296 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -38,17 +38,39 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp,
memcpy(&fh.fh_handle.fh_raw, f->data, f->size);
fh.fh_export = NULL;
+ /*
+ * Allow BYPASS_GSS as some client implementations use AUTH_SYS
+ * for NLM even when GSS is used for NFS.
+ * Allow OWNER_OVERRIDE as permission might have been changed
+ * after the file was opened.
+ * Pass MAY_NLM so that authentication can be completely bypassed
+ * if NFSEXP_NOAUTHNLM is set. Some older clients use AUTH_NULL
+ * for NLM requests.
+ */
access = (mode == O_WRONLY) ? NFSD_MAY_WRITE : NFSD_MAY_READ;
- access |= NFSD_MAY_LOCK;
+ access |= NFSD_MAY_NLM | NFSD_MAY_OWNER_OVERRIDE | NFSD_MAY_BYPASS_GSS;
nfserr = nfsd_open(rqstp, &fh, S_IFREG, access, filp);
fh_put(&fh);
- /* We return nlm error codes as nlm doesn't know
+ /* We return nlm error codes as nlm doesn't know
* about nfsd, but nfsd does know about nlm..
*/
switch (nfserr) {
case nfs_ok:
return 0;
- case nfserr_dropit:
+ case nfserr_jukebox:
+ /* this error can indicate a presence of a conflicting
+ * delegation to an NLM lock request. Options are:
+ * (1) For now, drop this request and make the client
+ * retry. When delegation is returned, client's lock retry
+ * will complete.
+ * (2) NLM4_DENIED as per "spec" signals to the client
+ * that the lock is unavailable now but client can retry.
+ * Linux client implementation does not. It treats
+ * NLM4_DENIED same as NLM4_FAILED and errors the request.
+ * (3) For the future, treat this as blocked lock and try
+ * to callback when the delegation is returned but might
+ * not have a proper lock request to block on.
+ */
return nlm_drop_reply;
case nfserr_stale:
return nlm_stale_fh;
diff --git a/fs/nfsd/netlink.c b/fs/nfsd/netlink.c
new file mode 100644
index 000000000000..ac51a44e1065
--- /dev/null
+++ b/fs/nfsd/netlink.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netlink.h"
+
+#include <uapi/linux/nfsd_netlink.h>
+
+/* Common nested types */
+const struct nla_policy nfsd_sock_nl_policy[NFSD_A_SOCK_TRANSPORT_NAME + 1] = {
+ [NFSD_A_SOCK_ADDR] = { .type = NLA_BINARY, },
+ [NFSD_A_SOCK_TRANSPORT_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+const struct nla_policy nfsd_version_nl_policy[NFSD_A_VERSION_ENABLED + 1] = {
+ [NFSD_A_VERSION_MAJOR] = { .type = NLA_U32, },
+ [NFSD_A_VERSION_MINOR] = { .type = NLA_U32, },
+ [NFSD_A_VERSION_ENABLED] = { .type = NLA_FLAG, },
+};
+
+/* NFSD_CMD_THREADS_SET - do */
+static const struct nla_policy nfsd_threads_set_nl_policy[NFSD_A_SERVER_SCOPE + 1] = {
+ [NFSD_A_SERVER_THREADS] = { .type = NLA_U32, },
+ [NFSD_A_SERVER_GRACETIME] = { .type = NLA_U32, },
+ [NFSD_A_SERVER_LEASETIME] = { .type = NLA_U32, },
+ [NFSD_A_SERVER_SCOPE] = { .type = NLA_NUL_STRING, },
+};
+
+/* NFSD_CMD_VERSION_SET - do */
+static const struct nla_policy nfsd_version_set_nl_policy[NFSD_A_SERVER_PROTO_VERSION + 1] = {
+ [NFSD_A_SERVER_PROTO_VERSION] = NLA_POLICY_NESTED(nfsd_version_nl_policy),
+};
+
+/* NFSD_CMD_LISTENER_SET - do */
+static const struct nla_policy nfsd_listener_set_nl_policy[NFSD_A_SERVER_SOCK_ADDR + 1] = {
+ [NFSD_A_SERVER_SOCK_ADDR] = NLA_POLICY_NESTED(nfsd_sock_nl_policy),
+};
+
+/* NFSD_CMD_POOL_MODE_SET - do */
+static const struct nla_policy nfsd_pool_mode_set_nl_policy[NFSD_A_POOL_MODE_MODE + 1] = {
+ [NFSD_A_POOL_MODE_MODE] = { .type = NLA_NUL_STRING, },
+};
+
+/* Ops table for nfsd */
+static const struct genl_split_ops nfsd_nl_ops[] = {
+ {
+ .cmd = NFSD_CMD_RPC_STATUS_GET,
+ .dumpit = nfsd_nl_rpc_status_get_dumpit,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = NFSD_CMD_THREADS_SET,
+ .doit = nfsd_nl_threads_set_doit,
+ .policy = nfsd_threads_set_nl_policy,
+ .maxattr = NFSD_A_SERVER_SCOPE,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_THREADS_GET,
+ .doit = nfsd_nl_threads_get_doit,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_VERSION_SET,
+ .doit = nfsd_nl_version_set_doit,
+ .policy = nfsd_version_set_nl_policy,
+ .maxattr = NFSD_A_SERVER_PROTO_VERSION,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_VERSION_GET,
+ .doit = nfsd_nl_version_get_doit,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_LISTENER_SET,
+ .doit = nfsd_nl_listener_set_doit,
+ .policy = nfsd_listener_set_nl_policy,
+ .maxattr = NFSD_A_SERVER_SOCK_ADDR,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_LISTENER_GET,
+ .doit = nfsd_nl_listener_get_doit,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_POOL_MODE_SET,
+ .doit = nfsd_nl_pool_mode_set_doit,
+ .policy = nfsd_pool_mode_set_nl_policy,
+ .maxattr = NFSD_A_POOL_MODE_MODE,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_POOL_MODE_GET,
+ .doit = nfsd_nl_pool_mode_get_doit,
+ .flags = GENL_CMD_CAP_DO,
+ },
+};
+
+struct genl_family nfsd_nl_family __ro_after_init = {
+ .name = NFSD_FAMILY_NAME,
+ .version = NFSD_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = nfsd_nl_ops,
+ .n_split_ops = ARRAY_SIZE(nfsd_nl_ops),
+};
diff --git a/fs/nfsd/netlink.h b/fs/nfsd/netlink.h
new file mode 100644
index 000000000000..478117ff6b8c
--- /dev/null
+++ b/fs/nfsd/netlink.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#ifndef _LINUX_NFSD_GEN_H
+#define _LINUX_NFSD_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/nfsd_netlink.h>
+
+/* Common nested types */
+extern const struct nla_policy nfsd_sock_nl_policy[NFSD_A_SOCK_TRANSPORT_NAME + 1];
+extern const struct nla_policy nfsd_version_nl_policy[NFSD_A_VERSION_ENABLED + 1];
+
+int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_version_set_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_version_get_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_listener_get_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_pool_mode_set_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_pool_mode_get_doit(struct sk_buff *skb, struct genl_info *info);
+
+extern struct genl_family nfsd_nl_family;
+
+#endif /* _LINUX_NFSD_GEN_H */
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 935c1028c217..3e2d0fde80a7 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -10,7 +10,12 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <linux/filelock.h>
+#include <linux/nfs4.h>
#include <linux/percpu_counter.h>
+#include <linux/percpu-refcount.h>
+#include <linux/siphash.h>
+#include <linux/sunrpc/stats.h>
/* Hash tables for nfs4_clientid state */
#define CLIENT_HASH_BITS 4
@@ -24,10 +29,22 @@ struct nfsd4_client_tracking_ops;
enum {
/* cache misses due only to checksum comparison failures */
- NFSD_NET_PAYLOAD_MISSES,
+ NFSD_STATS_PAYLOAD_MISSES,
/* amount of memory (in bytes) currently consumed by the DRC */
- NFSD_NET_DRC_MEM_USAGE,
- NFSD_NET_COUNTERS_NUM
+ NFSD_STATS_DRC_MEM_USAGE,
+ NFSD_STATS_RC_HITS, /* repcache hits */
+ NFSD_STATS_RC_MISSES, /* repcache misses */
+ NFSD_STATS_RC_NOCACHE, /* uncached reqs */
+ NFSD_STATS_FH_STALE, /* FH stale error */
+ NFSD_STATS_IO_READ, /* bytes returned to read requests */
+ NFSD_STATS_IO_WRITE, /* bytes passed in write requests */
+#ifdef CONFIG_NFSD_V4
+ NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */
+ NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
+#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op))
+ NFSD_STATS_WDELEG_GETATTR, /* count of getattr conflict with wdeleg */
+#endif
+ NFSD_STATS_COUNTERS_NUM
};
/*
@@ -108,27 +125,19 @@ struct nfsd_net {
bool nfsd_net_up;
bool lockd_up;
- /* Time of server startup */
- struct timespec64 nfssvc_boot;
- seqlock_t boot_lock;
-
- /*
- * Max number of connections this nfsd container will allow. Defaults
- * to '0' which is means that it bases this on the number of threads.
- */
- unsigned int max_connections;
+ seqlock_t writeverf_lock;
+ unsigned char writeverf[8];
u32 clientid_base;
u32 clientid_counter;
u32 clverifier_counter;
- struct svc_serv *nfsd_serv;
-
- wait_queue_head_t ntf_wq;
- atomic_t ntf_refcnt;
+ struct svc_info nfsd_info;
+#define nfsd_serv nfsd_info.serv
- /* Allow umount to wait for nfsd state cleanup */
- struct completion nfsd_shutdown_complete;
+ struct percpu_ref nfsd_net_ref;
+ struct completion nfsd_net_confirm_done;
+ struct completion nfsd_net_free_done;
/*
* clientid and stateid data for construction of net unique COPY
@@ -137,12 +146,13 @@ struct nfsd_net {
u32 s2s_cp_cl_id;
struct idr s2s_cp_stateids;
spinlock_t s2s_cp_lock;
+ atomic_t pending_async_copies;
/*
* Version information
*/
- bool *nfsd_versions;
- bool *nfsd4_minorversions;
+ bool nfsd_versions[NFSD_MAXVERS + 1];
+ bool nfsd4_minorversions[NFSD_SUPPORTED_MINOR_VERSION + 1];
/*
* Duplicate reply cache
@@ -167,7 +177,10 @@ struct nfsd_net {
atomic_t num_drc_entries;
/* Per-netns stats counters */
- struct percpu_counter counter[NFSD_NET_COUNTERS_NUM];
+ struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM];
+
+ /* sunrpc svc stats */
+ struct svc_stat nfsd_svcstats;
/* longest hash chain seen */
unsigned int longest_chain;
@@ -175,7 +188,7 @@ struct nfsd_net {
/* size of cache when we saw the longest hash chain */
unsigned int longest_chain_cachesize;
- struct shrinker nfsd_reply_cache_shrinker;
+ struct shrinker *nfsd_reply_cache_shrinker;
/* tracking server-to-server copy mounts */
spinlock_t nfsd_ssc_lock;
@@ -184,15 +197,37 @@ struct nfsd_net {
/* utsname taken from the process that starts the server */
char nfsd_name[UNX_MAXNODENAME+1];
+
+ struct nfsd_fcache_disposal *fcache_disposal;
+
+ siphash_key_t siphash_key;
+
+ atomic_t nfs4_client_count;
+ int nfs4_max_clients;
+
+ atomic_t nfsd_courtesy_clients;
+ struct shrinker *nfsd_client_shrinker;
+ struct work_struct nfsd_shrinker_work;
+
+ /* last time an admin-revoke happened for NFSv4.0 */
+ time64_t nfs40_last_revoke;
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ /* Local clients to be invalidated when net is shut down */
+ spinlock_t local_clients_lock;
+ struct list_head local_clients;
+#endif
};
/* Simple check to find out if a given net was properly initialized */
#define nfsd_netns_ready(nn) ((nn)->sessionid_hashtbl)
-extern void nfsd_netns_free_versions(struct nfsd_net *nn);
-
+extern bool nfsd_support_version(int vers);
extern unsigned int nfsd_net_id;
-void nfsd_copy_boot_verifier(__be32 verf[2], struct nfsd_net *nn);
-void nfsd_reset_boot_verifier(struct nfsd_net *nn);
+bool nfsd_net_try_get(struct net *net);
+void nfsd_net_put(struct net *net);
+
+void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn);
+void nfsd_reset_write_verifier(struct nfsd_net *nn);
#endif /* __NFSD_NETNS_H__ */
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index 367551bddfc6..5fb202acb0fd 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -55,7 +55,7 @@ static __be32 nfsacld_proc_getacl(struct svc_rqst *rqstp)
goto out;
if (resp->mask & (NFS_ACL|NFS_ACLCNT)) {
- acl = get_acl(inode, ACL_TYPE_ACCESS);
+ acl = get_inode_acl(inode, ACL_TYPE_ACCESS);
if (acl == NULL) {
/* Solaris returns the inode's minimum ACL. */
acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
@@ -69,7 +69,7 @@ static __be32 nfsacld_proc_getacl(struct svc_rqst *rqstp)
if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) {
/* Check how Solaris handles requests for the Default ACL
of a non-directory! */
- acl = get_acl(inode, ACL_TYPE_DEFAULT);
+ acl = get_inode_acl(inode, ACL_TYPE_DEFAULT);
if (IS_ERR(acl)) {
resp->status = nfserrno(PTR_ERR(acl));
goto fail;
@@ -84,6 +84,8 @@ out:
fail:
posix_acl_release(resp->acl_access);
posix_acl_release(resp->acl_default);
+ resp->acl_access = NULL;
+ resp->acl_default = NULL;
goto out;
}
@@ -111,18 +113,18 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst *rqstp)
if (error)
goto out_errno;
- fh_lock(fh);
+ inode_lock(inode);
- error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_ACCESS,
+ error = set_posix_acl(&nop_mnt_idmap, fh->fh_dentry, ACL_TYPE_ACCESS,
argp->acl_access);
if (error)
goto out_drop_lock;
- error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_DEFAULT,
+ error = set_posix_acl(&nop_mnt_idmap, fh->fh_dentry, ACL_TYPE_DEFAULT,
argp->acl_default);
if (error)
goto out_drop_lock;
- fh_unlock(fh);
+ inode_unlock(inode);
fh_drop_write(fh);
@@ -136,7 +138,7 @@ out:
return rpc_success;
out_drop_lock:
- fh_unlock(fh);
+ inode_unlock(inode);
fh_drop_write(fh);
out_errno:
resp->status = nfserrno(error);
@@ -246,37 +248,27 @@ nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
struct nfsd3_getaclres *resp = rqstp->rq_resp;
struct dentry *dentry = resp->fh.fh_dentry;
struct inode *inode;
- int w;
if (!svcxdr_encode_stat(xdr, resp->status))
- return 0;
+ return false;
if (dentry == NULL || d_really_is_negative(dentry))
- return 1;
+ return true;
inode = d_inode(dentry);
if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
- return 0;
+ return false;
if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
- return 0;
-
- rqstp->rq_res.page_len = w = nfsacl_size(
- (resp->mask & NFS_ACL) ? resp->acl_access : NULL,
- (resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
- while (w > 0) {
- if (!*(rqstp->rq_next_page++))
- return 1;
- w -= PAGE_SIZE;
- }
+ return false;
if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access,
resp->mask & NFS_ACL, 0))
- return 0;
+ return false;
if (!nfs_stream_encode_acl(xdr, inode, resp->acl_default,
resp->mask & NFS_DFACL, NFS_ACL_DEFAULT))
- return 0;
+ return false;
- return 1;
+ return true;
}
/* ACCESS */
@@ -286,17 +278,17 @@ nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
struct nfsd3_accessres *resp = rqstp->rq_resp;
if (!svcxdr_encode_stat(xdr, resp->status))
- return 0;
+ return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
- return 0;
+ return false;
if (xdr_stream_encode_u32(xdr, resp->access) < 0)
- return 0;
+ return false;
break;
}
- return 1;
+ return true;
}
/*
@@ -318,8 +310,6 @@ static void nfsaclsvc_release_access(struct svc_rqst *rqstp)
fh_put(&resp->fh);
}
-struct nfsd3_voidargs { int dummy; };
-
#define ST 1 /* status*/
#define AT 21 /* attributes */
#define pAT (1+AT) /* post attributes - conditional */
@@ -331,6 +321,7 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
+ .pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
@@ -342,6 +333,7 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_encode = nfsaclsvc_encode_getaclres,
.pc_release = nfsaclsvc_release_getacl,
.pc_argsize = sizeof(struct nfsd3_getaclargs),
+ .pc_argzero = sizeof(struct nfsd3_getaclargs),
.pc_ressize = sizeof(struct nfsd3_getaclres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+2*(1+ACL),
@@ -353,6 +345,7 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_encode = nfssvc_encode_attrstatres,
.pc_release = nfssvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd3_setaclargs),
+ .pc_argzero = sizeof(struct nfsd3_setaclargs),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
@@ -364,6 +357,7 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_encode = nfssvc_encode_attrstatres,
.pc_release = nfssvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd_fhandle),
+ .pc_argzero = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
@@ -375,6 +369,7 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_encode = nfsaclsvc_encode_accessres,
.pc_release = nfsaclsvc_release_access,
.pc_argsize = sizeof(struct nfsd3_accessargs),
+ .pc_argzero = sizeof(struct nfsd3_accessargs),
.pc_ressize = sizeof(struct nfsd3_accessres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT+1,
@@ -382,10 +377,11 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
},
};
-static unsigned int nfsd_acl_count2[ARRAY_SIZE(nfsd_acl_procedures2)];
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ nfsd_acl_count2[ARRAY_SIZE(nfsd_acl_procedures2)]);
const struct svc_version nfsd_acl_version2 = {
.vs_vers = 2,
- .vs_nproc = 5,
+ .vs_nproc = ARRAY_SIZE(nfsd_acl_procedures2),
.vs_proc = nfsd_acl_procedures2,
.vs_count = nfsd_acl_count2,
.vs_dispatch = nfsd_dispatch,
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 35b2ebda14da..7b5433bd3019 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -47,7 +47,7 @@ static __be32 nfsd3_proc_getacl(struct svc_rqst *rqstp)
resp->mask = argp->mask;
if (resp->mask & (NFS_ACL|NFS_ACLCNT)) {
- acl = get_acl(inode, ACL_TYPE_ACCESS);
+ acl = get_inode_acl(inode, ACL_TYPE_ACCESS);
if (acl == NULL) {
/* Solaris returns the inode's minimum ACL. */
acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
@@ -61,7 +61,7 @@ static __be32 nfsd3_proc_getacl(struct svc_rqst *rqstp)
if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) {
/* Check how Solaris handles requests for the Default ACL
of a non-directory! */
- acl = get_acl(inode, ACL_TYPE_DEFAULT);
+ acl = get_inode_acl(inode, ACL_TYPE_DEFAULT);
if (IS_ERR(acl)) {
resp->status = nfserrno(PTR_ERR(acl));
goto fail;
@@ -76,6 +76,8 @@ out:
fail:
posix_acl_release(resp->acl_access);
posix_acl_release(resp->acl_default);
+ resp->acl_access = NULL;
+ resp->acl_default = NULL;
goto out;
}
@@ -101,17 +103,17 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst *rqstp)
if (error)
goto out_errno;
- fh_lock(fh);
+ inode_lock(inode);
- error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_ACCESS,
+ error = set_posix_acl(&nop_mnt_idmap, fh->fh_dentry, ACL_TYPE_ACCESS,
argp->acl_access);
if (error)
goto out_drop_lock;
- error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_DEFAULT,
+ error = set_posix_acl(&nop_mnt_idmap, fh->fh_dentry, ACL_TYPE_DEFAULT,
argp->acl_default);
out_drop_lock:
- fh_unlock(fh);
+ inode_unlock(inode);
fh_drop_write(fh);
out_errno:
resp->status = nfserrno(error);
@@ -171,11 +173,7 @@ nfs3svc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_getaclres *resp = rqstp->rq_resp;
struct dentry *dentry = resp->fh.fh_dentry;
- struct kvec *head = rqstp->rq_res.head;
struct inode *inode;
- unsigned int base;
- int n;
- int w;
if (!svcxdr_encode_nfsstat3(xdr, resp->status))
return false;
@@ -187,26 +185,12 @@ nfs3svc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
return false;
- base = (char *)xdr->p - (char *)head->iov_base;
-
- rqstp->rq_res.page_len = w = nfsacl_size(
- (resp->mask & NFS_ACL) ? resp->acl_access : NULL,
- (resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
- while (w > 0) {
- if (!*(rqstp->rq_next_page++))
- return false;
- w -= PAGE_SIZE;
- }
-
- n = nfsacl_encode(&rqstp->rq_res, base, inode,
- resp->acl_access,
- resp->mask & NFS_ACL, 0);
- if (n > 0)
- n = nfsacl_encode(&rqstp->rq_res, base + n, inode,
- resp->acl_default,
- resp->mask & NFS_DFACL,
- NFS_ACL_DEFAULT);
- if (n <= 0)
+ if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access,
+ resp->mask & NFS_ACL, 0))
+ return false;
+ if (!nfs_stream_encode_acl(xdr, inode, resp->acl_default,
+ resp->mask & NFS_DFACL,
+ NFS_ACL_DEFAULT))
return false;
break;
default:
@@ -239,8 +223,6 @@ static void nfs3svc_release_getacl(struct svc_rqst *rqstp)
posix_acl_release(resp->acl_default);
}
-struct nfsd3_voidargs { int dummy; };
-
#define ST 1 /* status*/
#define AT 21 /* attributes */
#define pAT (1+AT) /* post attributes - conditional */
@@ -252,6 +234,7 @@ static const struct svc_procedure nfsd_acl_procedures3[3] = {
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
+ .pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
@@ -263,6 +246,7 @@ static const struct svc_procedure nfsd_acl_procedures3[3] = {
.pc_encode = nfs3svc_encode_getaclres,
.pc_release = nfs3svc_release_getacl,
.pc_argsize = sizeof(struct nfsd3_getaclargs),
+ .pc_argzero = sizeof(struct nfsd3_getaclargs),
.pc_ressize = sizeof(struct nfsd3_getaclres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+2*(1+ACL),
@@ -274,6 +258,7 @@ static const struct svc_procedure nfsd_acl_procedures3[3] = {
.pc_encode = nfs3svc_encode_setaclres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_setaclargs),
+ .pc_argzero = sizeof(struct nfsd3_setaclargs),
.pc_ressize = sizeof(struct nfsd3_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT,
@@ -281,10 +266,11 @@ static const struct svc_procedure nfsd_acl_procedures3[3] = {
},
};
-static unsigned int nfsd_acl_count3[ARRAY_SIZE(nfsd_acl_procedures3)];
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ nfsd_acl_count3[ARRAY_SIZE(nfsd_acl_procedures3)]);
const struct svc_version nfsd_acl_version3 = {
.vs_vers = 3,
- .vs_nproc = 3,
+ .vs_nproc = ARRAY_SIZE(nfsd_acl_procedures3),
.vs_proc = nfsd_acl_procedures3,
.vs_count = nfsd_acl_count3,
.vs_dispatch = nfsd_dispatch,
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 4418517f6f12..42adc5461db0 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -8,10 +8,13 @@
#include <linux/fs.h>
#include <linux/ext2_fs.h>
#include <linux/magic.h>
+#include <linux/namei.h>
#include "cache.h"
#include "xdr3.h"
#include "vfs.h"
+#include "filecache.h"
+#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -26,6 +29,29 @@ static int nfs3_ftypes[] = {
S_IFIFO, /* NF3FIFO */
};
+static __be32 nfsd3_map_status(__be32 status)
+{
+ switch (status) {
+ case nfs_ok:
+ break;
+ case nfserr_nofilehandle:
+ status = nfserr_badhandle;
+ break;
+ case nfserr_wrongsec:
+ case nfserr_file_open:
+ status = nfserr_acces;
+ break;
+ case nfserr_symlink_not_dir:
+ status = nfserr_notdir;
+ break;
+ case nfserr_symlink:
+ case nfserr_wrong_type:
+ status = nfserr_inval;
+ break;
+ }
+ return status;
+}
+
/*
* NULL call.
*/
@@ -44,8 +70,7 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp)
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
- dprintk("nfsd: GETATTR(3) %s\n",
- SVCFH_fmt(&argp->fh));
+ trace_nfsd_vfs_getattr(rqstp, &argp->fh);
fh_copy(&resp->fh, &argp->fh);
resp->status = fh_verify(rqstp, &resp->fh, 0,
@@ -55,6 +80,7 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -66,13 +92,19 @@ nfsd3_proc_setattr(struct svc_rqst *rqstp)
{
struct nfsd3_sattrargs *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &argp->attrs,
+ };
+ const struct timespec64 *guardtime = NULL;
dprintk("nfsd: SETATTR(3) %s\n",
SVCFH_fmt(&argp->fh));
fh_copy(&resp->fh, &argp->fh);
- resp->status = nfsd_setattr(rqstp, &resp->fh, &argp->attrs,
- argp->check_guard, argp->guardtime);
+ if (argp->check_guard)
+ guardtime = &argp->guardtime;
+ resp->status = nfsd_setattr(rqstp, &resp->fh, &attrs, guardtime);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -96,6 +128,7 @@ nfsd3_proc_lookup(struct svc_rqst *rqstp)
resp->status = nfsd_lookup(rqstp, &resp->dirfh,
argp->name, argp->len,
&resp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -115,6 +148,7 @@ nfsd3_proc_access(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->access = argp->access;
resp->status = nfsd_access(rqstp, &resp->fh, &resp->access, NULL);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -135,6 +169,7 @@ nfsd3_proc_readlink(struct svc_rqst *rqstp)
resp->pages = rqstp->rq_next_page++;
resp->status = nfsd_readlink(rqstp, &resp->fh,
page_address(*resp->pages), &resp->len);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -146,39 +181,33 @@ nfsd3_proc_read(struct svc_rqst *rqstp)
{
struct nfsd3_readargs *argp = rqstp->rq_argp;
struct nfsd3_readres *resp = rqstp->rq_resp;
- u32 max_blocksize = svc_max_payload(rqstp);
- unsigned int len;
- int v;
-
- argp->count = min_t(u32, argp->count, max_blocksize);
dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n",
SVCFH_fmt(&argp->fh),
(unsigned long) argp->count,
(unsigned long long) argp->offset);
- v = 0;
- len = argp->count;
- resp->pages = rqstp->rq_next_page;
- while (len > 0) {
- struct page *page = *(rqstp->rq_next_page++);
+ argp->count = min_t(u32, argp->count, svc_max_payload(rqstp));
+ argp->count = min_t(u32, argp->count, rqstp->rq_res.buflen);
+ if (argp->offset > (u64)OFFSET_MAX)
+ argp->offset = (u64)OFFSET_MAX;
+ if (argp->offset + argp->count > (u64)OFFSET_MAX)
+ argp->count = (u64)OFFSET_MAX - argp->offset;
- rqstp->rq_vec[v].iov_base = page_address(page);
- rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE);
- len -= rqstp->rq_vec[v].iov_len;
- v++;
- }
+ resp->pages = rqstp->rq_next_page;
/* Obtain buffer pointer for payload.
* 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof)
* + 1 (xdr opaque byte count) = 26
*/
resp->count = argp->count;
- svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4);
+ svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3) << 2) +
+ resp->count + 4);
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_read(rqstp, &resp->fh, argp->offset,
- rqstp->rq_vec, v, &resp->count, &resp->eof);
+ &resp->count, &resp->eof);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -191,64 +220,168 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
struct nfsd3_writeargs *argp = rqstp->rq_argp;
struct nfsd3_writeres *resp = rqstp->rq_resp;
unsigned long cnt = argp->len;
- unsigned int nvecs;
dprintk("nfsd: WRITE(3) %s %d bytes at %Lu%s\n",
SVCFH_fmt(&argp->fh),
argp->len,
(unsigned long long) argp->offset,
- argp->stable? " stable" : "");
+ argp->stable ? " stable" : "");
+
+ resp->status = nfserr_fbig;
+ if (argp->offset > (u64)OFFSET_MAX ||
+ argp->offset + argp->len > (u64)OFFSET_MAX)
+ return rpc_success;
fh_copy(&resp->fh, &argp->fh);
resp->committed = argp->stable;
- nvecs = svc_fill_write_vector(rqstp, &argp->payload);
- if (!nvecs) {
- resp->status = nfserr_io;
- goto out;
- }
resp->status = nfsd_write(rqstp, &resp->fh, argp->offset,
- rqstp->rq_vec, nvecs, &cnt,
+ &argp->payload, &cnt,
resp->committed, resp->verf);
resp->count = cnt;
-out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
/*
- * With NFSv3, CREATE processing is a lot easier than with NFSv2.
- * At least in theory; we'll see how it fares in practice when the
- * first reports about SunOS compatibility problems start to pour in...
+ * Implement NFSv3's unchecked, guarded, and exclusive CREATE
+ * semantics for regular files. Except for the created file,
+ * this operation is stateless on the server.
+ *
+ * Upon return, caller must release @fhp and @resfhp.
*/
static __be32
+nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct svc_fh *resfhp, struct nfsd3_createargs *argp)
+{
+ struct iattr *iap = &argp->attrs;
+ struct dentry *parent, *child;
+ struct nfsd_attrs attrs = {
+ .na_iattr = iap,
+ };
+ __u32 v_mtime, v_atime;
+ struct inode *inode;
+ __be32 status;
+ int host_err;
+
+ trace_nfsd_vfs_create(rqstp, fhp, S_IFREG, argp->name, argp->len);
+
+ if (isdotent(argp->name, argp->len))
+ return nfserr_exist;
+ if (!(iap->ia_valid & ATTR_MODE))
+ iap->ia_mode = 0;
+
+ status = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
+ if (status != nfs_ok)
+ return status;
+
+ parent = fhp->fh_dentry;
+ inode = d_inode(parent);
+
+ host_err = fh_want_write(fhp);
+ if (host_err)
+ return nfserrno(host_err);
+
+ child = start_creating(&nop_mnt_idmap, parent,
+ &QSTR_LEN(argp->name, argp->len));
+ if (IS_ERR(child)) {
+ status = nfserrno(PTR_ERR(child));
+ goto out_write;
+ }
+
+ if (d_really_is_negative(child)) {
+ status = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
+ if (status != nfs_ok)
+ goto out;
+ }
+
+ status = fh_compose(resfhp, fhp->fh_export, child, fhp);
+ if (status != nfs_ok)
+ goto out;
+
+ v_mtime = 0;
+ v_atime = 0;
+ if (argp->createmode == NFS3_CREATE_EXCLUSIVE) {
+ u32 *verifier = (u32 *)argp->verf;
+
+ /*
+ * Solaris 7 gets confused (bugid 4218508) if these have
+ * the high bit set, as do xfs filesystems without the
+ * "bigtime" feature. So just clear the high bits.
+ */
+ v_mtime = verifier[0] & 0x7fffffff;
+ v_atime = verifier[1] & 0x7fffffff;
+ }
+
+ if (d_really_is_positive(child)) {
+ status = nfs_ok;
+
+ switch (argp->createmode) {
+ case NFS3_CREATE_UNCHECKED:
+ if (!d_is_reg(child))
+ break;
+ iap->ia_valid &= ATTR_SIZE;
+ goto set_attr;
+ case NFS3_CREATE_GUARDED:
+ status = nfserr_exist;
+ break;
+ case NFS3_CREATE_EXCLUSIVE:
+ if (inode_get_mtime_sec(d_inode(child)) == v_mtime &&
+ inode_get_atime_sec(d_inode(child)) == v_atime &&
+ d_inode(child)->i_size == 0) {
+ break;
+ }
+ status = nfserr_exist;
+ }
+ goto out;
+ }
+
+ if (!IS_POSIXACL(inode))
+ iap->ia_mode &= ~current_umask();
+
+ status = fh_fill_pre_attrs(fhp);
+ if (status != nfs_ok)
+ goto out;
+ host_err = vfs_create(&nop_mnt_idmap, child, iap->ia_mode, NULL);
+ if (host_err < 0) {
+ status = nfserrno(host_err);
+ goto out;
+ }
+ fh_fill_post_attrs(fhp);
+
+ /* A newly created file already has a file size of zero. */
+ if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0))
+ iap->ia_valid &= ~ATTR_SIZE;
+ if (argp->createmode == NFS3_CREATE_EXCLUSIVE) {
+ iap->ia_valid = ATTR_MTIME | ATTR_ATIME |
+ ATTR_MTIME_SET | ATTR_ATIME_SET;
+ iap->ia_mtime.tv_sec = v_mtime;
+ iap->ia_atime.tv_sec = v_atime;
+ iap->ia_mtime.tv_nsec = 0;
+ iap->ia_atime.tv_nsec = 0;
+ }
+
+set_attr:
+ status = nfsd_create_setattr(rqstp, fhp, resfhp, &attrs);
+
+out:
+ end_creating(child);
+out_write:
+ fh_drop_write(fhp);
+ return status;
+}
+
+static __be32
nfsd3_proc_create(struct svc_rqst *rqstp)
{
struct nfsd3_createargs *argp = rqstp->rq_argp;
struct nfsd3_diropres *resp = rqstp->rq_resp;
- svc_fh *dirfhp, *newfhp = NULL;
- struct iattr *attr;
-
- dprintk("nfsd: CREATE(3) %s %.*s\n",
- SVCFH_fmt(&argp->fh),
- argp->len,
- argp->name);
+ svc_fh *dirfhp, *newfhp;
dirfhp = fh_copy(&resp->dirfh, &argp->fh);
newfhp = fh_init(&resp->fh, NFS3_FHSIZE);
- attr = &argp->attrs;
-
- /* Unfudge the mode bits */
- attr->ia_mode &= ~S_IFMT;
- if (!(attr->ia_valid & ATTR_MODE)) {
- attr->ia_valid |= ATTR_MODE;
- attr->ia_mode = S_IFREG;
- } else {
- attr->ia_mode = (attr->ia_mode & ~S_IFMT) | S_IFREG;
- }
- /* Now create the file and set attributes */
- resp->status = do_nfsd_create(rqstp, dirfhp, argp->name, argp->len,
- attr, newfhp, argp->createmode,
- (u32 *)argp->verf, NULL, NULL);
+ resp->status = nfsd3_create_file(rqstp, dirfhp, newfhp, argp);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -260,18 +393,16 @@ nfsd3_proc_mkdir(struct svc_rqst *rqstp)
{
struct nfsd3_createargs *argp = rqstp->rq_argp;
struct nfsd3_diropres *resp = rqstp->rq_resp;
-
- dprintk("nfsd: MKDIR(3) %s %.*s\n",
- SVCFH_fmt(&argp->fh),
- argp->len,
- argp->name);
+ struct nfsd_attrs attrs = {
+ .na_iattr = &argp->attrs,
+ };
argp->attrs.ia_valid &= ~ATTR_SIZE;
fh_copy(&resp->dirfh, &argp->fh);
fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len,
- &argp->attrs, S_IFDIR, 0, &resp->fh);
- fh_unlock(&resp->dirfh);
+ &attrs, S_IFDIR, 0, &resp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -280,6 +411,9 @@ nfsd3_proc_symlink(struct svc_rqst *rqstp)
{
struct nfsd3_symlinkargs *argp = rqstp->rq_argp;
struct nfsd3_diropres *resp = rqstp->rq_resp;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &argp->attrs,
+ };
if (argp->tlen == 0) {
resp->status = nfserr_inval;
@@ -298,17 +432,13 @@ nfsd3_proc_symlink(struct svc_rqst *rqstp)
goto out;
}
- dprintk("nfsd: SYMLINK(3) %s %.*s -> %.*s\n",
- SVCFH_fmt(&argp->ffh),
- argp->flen, argp->fname,
- argp->tlen, argp->tname);
-
fh_copy(&resp->dirfh, &argp->ffh);
fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd_symlink(rqstp, &resp->dirfh, argp->fname,
- argp->flen, argp->tname, &resp->fh);
+ argp->flen, argp->tname, &attrs, &resp->fh);
kfree(argp->tname);
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -320,14 +450,12 @@ nfsd3_proc_mknod(struct svc_rqst *rqstp)
{
struct nfsd3_mknodargs *argp = rqstp->rq_argp;
struct nfsd3_diropres *resp = rqstp->rq_resp;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &argp->attrs,
+ };
int type;
dev_t rdev = 0;
- dprintk("nfsd: MKNOD(3) %s %.*s\n",
- SVCFH_fmt(&argp->fh),
- argp->len,
- argp->name);
-
fh_copy(&resp->dirfh, &argp->fh);
fh_init(&resp->fh, NFS3_FHSIZE);
@@ -345,9 +473,9 @@ nfsd3_proc_mknod(struct svc_rqst *rqstp)
type = nfs3_ftypes[argp->ftype];
resp->status = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len,
- &argp->attrs, type, rdev, &resp->fh);
- fh_unlock(&resp->dirfh);
+ &attrs, type, rdev, &resp->fh);
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -360,16 +488,11 @@ nfsd3_proc_remove(struct svc_rqst *rqstp)
struct nfsd3_diropargs *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
- dprintk("nfsd: REMOVE(3) %s %.*s\n",
- SVCFH_fmt(&argp->fh),
- argp->len,
- argp->name);
-
/* Unlink. -S_IFDIR means file must not be a directory */
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_unlink(rqstp, &resp->fh, -S_IFDIR,
argp->name, argp->len);
- fh_unlock(&resp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -382,15 +505,10 @@ nfsd3_proc_rmdir(struct svc_rqst *rqstp)
struct nfsd3_diropargs *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
- dprintk("nfsd: RMDIR(3) %s %.*s\n",
- SVCFH_fmt(&argp->fh),
- argp->len,
- argp->name);
-
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_unlink(rqstp, &resp->fh, S_IFDIR,
argp->name, argp->len);
- fh_unlock(&resp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -400,19 +518,11 @@ nfsd3_proc_rename(struct svc_rqst *rqstp)
struct nfsd3_renameargs *argp = rqstp->rq_argp;
struct nfsd3_renameres *resp = rqstp->rq_resp;
- dprintk("nfsd: RENAME(3) %s %.*s ->\n",
- SVCFH_fmt(&argp->ffh),
- argp->flen,
- argp->fname);
- dprintk("nfsd: -> %s %.*s\n",
- SVCFH_fmt(&argp->tfh),
- argp->tlen,
- argp->tname);
-
fh_copy(&resp->ffh, &argp->ffh);
fh_copy(&resp->tfh, &argp->tfh);
resp->status = nfsd_rename(rqstp, &resp->ffh, argp->fname, argp->flen,
&resp->tfh, argp->tname, argp->tlen);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -422,48 +532,32 @@ nfsd3_proc_link(struct svc_rqst *rqstp)
struct nfsd3_linkargs *argp = rqstp->rq_argp;
struct nfsd3_linkres *resp = rqstp->rq_resp;
- dprintk("nfsd: LINK(3) %s ->\n",
- SVCFH_fmt(&argp->ffh));
- dprintk("nfsd: -> %s %.*s\n",
- SVCFH_fmt(&argp->tfh),
- argp->tlen,
- argp->tname);
-
fh_copy(&resp->fh, &argp->ffh);
fh_copy(&resp->tfh, &argp->tfh);
resp->status = nfsd_link(rqstp, &resp->tfh, argp->tname, argp->tlen,
&resp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp,
struct nfsd3_readdirres *resp,
- int count)
+ u32 count)
{
struct xdr_buf *buf = &resp->dirlist;
struct xdr_stream *xdr = &resp->xdr;
-
- count = min_t(u32, count, svc_max_payload(rqstp));
+ unsigned int sendbuf = min_t(unsigned int, rqstp->rq_res.buflen,
+ svc_max_payload(rqstp));
memset(buf, 0, sizeof(*buf));
/* Reserve room for the NULL ptr & eof flag (-2 words) */
- buf->buflen = count - XDR_UNIT * 2;
+ buf->buflen = clamp(count, (u32)(XDR_UNIT * 2), sendbuf);
+ buf->buflen -= XDR_UNIT * 2;
buf->pages = rqstp->rq_next_page;
- while (count > 0) {
- rqstp->rq_next_page++;
- count -= PAGE_SIZE;
- }
+ rqstp->rq_next_page += (buf->buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
- /* This is xdr_init_encode(), but it assumes that
- * the head kvec has already been consumed. */
- xdr_set_scratch_buffer(xdr, NULL, 0);
- xdr->buf = buf;
- xdr->page_ptr = buf->pages;
- xdr->iov = NULL;
- xdr->p = page_address(*buf->pages);
- xdr->end = xdr->p + (PAGE_SIZE >> 2);
- xdr->rqst = NULL;
+ xdr_init_encode_pages(xdr, buf);
}
/*
@@ -476,9 +570,7 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
struct nfsd3_readdirres *resp = rqstp->rq_resp;
loff_t offset;
- dprintk("nfsd: READDIR(3) %s %d bytes at %d\n",
- SVCFH_fmt(&argp->fh),
- argp->count, (u32) argp->cookie);
+ trace_nfsd_vfs_readdir(rqstp, &argp->fh, argp->count, argp->cookie);
nfsd3_init_dirlist_pages(rqstp, resp, argp->count);
@@ -495,6 +587,7 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
/* Recycle only pages that were part of the reply */
rqstp->rq_next_page = resp->xdr.page_ptr + 1;
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -509,9 +602,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
struct nfsd3_readdirres *resp = rqstp->rq_resp;
loff_t offset;
- dprintk("nfsd: READDIR+(3) %s %d bytes at %d\n",
- SVCFH_fmt(&argp->fh),
- argp->count, (u32) argp->cookie);
+ trace_nfsd_vfs_readdir(rqstp, &argp->fh, argp->count, argp->cookie);
nfsd3_init_dirlist_pages(rqstp, resp, argp->count);
@@ -539,6 +630,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
rqstp->rq_next_page = resp->xdr.page_ptr + 1;
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -551,11 +643,9 @@ nfsd3_proc_fsstat(struct svc_rqst *rqstp)
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd3_fsstatres *resp = rqstp->rq_resp;
- dprintk("nfsd: FSSTAT(3) %s\n",
- SVCFH_fmt(&argp->fh));
-
resp->status = nfsd_statfs(rqstp, &argp->fh, &resp->stats, 0);
fh_put(&argp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -599,6 +689,7 @@ nfsd3_proc_fsinfo(struct svc_rqst *rqstp)
}
fh_put(&argp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -641,6 +732,7 @@ nfsd3_proc_pathconf(struct svc_rqst *rqstp)
}
fh_put(&argp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -652,21 +744,23 @@ nfsd3_proc_commit(struct svc_rqst *rqstp)
{
struct nfsd3_commitargs *argp = rqstp->rq_argp;
struct nfsd3_commitres *resp = rqstp->rq_resp;
+ struct nfsd_file *nf;
dprintk("nfsd: COMMIT(3) %s %u@%Lu\n",
SVCFH_fmt(&argp->fh),
argp->count,
(unsigned long long) argp->offset);
- if (argp->offset > NFS_OFFSET_MAX) {
- resp->status = nfserr_inval;
- goto out;
- }
-
fh_copy(&resp->fh, &argp->fh);
- resp->status = nfsd_commit(rqstp, &resp->fh, argp->offset,
+ resp->status = nfsd_file_acquire_gc(rqstp, &resp->fh, NFSD_MAY_WRITE |
+ NFSD_MAY_NOT_BREAK_LEASE, &nf);
+ if (resp->status)
+ goto out;
+ resp->status = nfsd_commit(rqstp, &resp->fh, nf, argp->offset,
argp->count, resp->verf);
+ nfsd_file_put(nf);
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -696,6 +790,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
+ .pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
@@ -707,6 +802,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_getattrres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd_fhandle),
+ .pc_argzero = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd3_attrstatres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
@@ -718,6 +814,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_wccstatres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_sattrargs),
+ .pc_argzero = sizeof(struct nfsd3_sattrargs),
.pc_ressize = sizeof(struct nfsd3_wccstatres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC,
@@ -729,6 +826,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_lookupres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_diropargs),
+ .pc_argzero = sizeof(struct nfsd3_diropargs),
.pc_ressize = sizeof(struct nfsd3_diropres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+FH+pAT+pAT,
@@ -740,6 +838,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_accessres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_accessargs),
+ .pc_argzero = sizeof(struct nfsd3_accessargs),
.pc_ressize = sizeof(struct nfsd3_accessres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+1,
@@ -751,6 +850,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_readlinkres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd_fhandle),
+ .pc_argzero = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd3_readlinkres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+1+NFS3_MAXPATHLEN/4,
@@ -762,6 +862,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_readres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_readargs),
+ .pc_argzero = sizeof(struct nfsd3_readargs),
.pc_ressize = sizeof(struct nfsd3_readres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+4+NFSSVC_MAXBLKSIZE/4,
@@ -773,6 +874,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_writeres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_writeargs),
+ .pc_argzero = sizeof(struct nfsd3_writeargs),
.pc_ressize = sizeof(struct nfsd3_writeres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC+4,
@@ -784,6 +886,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_createres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_createargs),
+ .pc_argzero = sizeof(struct nfsd3_createargs),
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
@@ -795,6 +898,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_createres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_mkdirargs),
+ .pc_argzero = sizeof(struct nfsd3_mkdirargs),
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
@@ -806,6 +910,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_createres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_symlinkargs),
+ .pc_argzero = sizeof(struct nfsd3_symlinkargs),
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
@@ -817,6 +922,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_createres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_mknodargs),
+ .pc_argzero = sizeof(struct nfsd3_mknodargs),
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
@@ -828,6 +934,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_wccstatres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_diropargs),
+ .pc_argzero = sizeof(struct nfsd3_diropargs),
.pc_ressize = sizeof(struct nfsd3_wccstatres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC,
@@ -839,6 +946,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_wccstatres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_diropargs),
+ .pc_argzero = sizeof(struct nfsd3_diropargs),
.pc_ressize = sizeof(struct nfsd3_wccstatres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC,
@@ -850,6 +958,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_renameres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_renameargs),
+ .pc_argzero = sizeof(struct nfsd3_renameargs),
.pc_ressize = sizeof(struct nfsd3_renameres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC+WC,
@@ -861,6 +970,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_linkres,
.pc_release = nfs3svc_release_fhandle2,
.pc_argsize = sizeof(struct nfsd3_linkargs),
+ .pc_argzero = sizeof(struct nfsd3_linkargs),
.pc_ressize = sizeof(struct nfsd3_linkres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+pAT+WC,
@@ -872,6 +982,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_readdirres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_readdirargs),
+ .pc_argzero = sizeof(struct nfsd3_readdirargs),
.pc_ressize = sizeof(struct nfsd3_readdirres),
.pc_cachetype = RC_NOCACHE,
.pc_name = "READDIR",
@@ -882,6 +993,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_readdirres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_readdirplusargs),
+ .pc_argzero = sizeof(struct nfsd3_readdirplusargs),
.pc_ressize = sizeof(struct nfsd3_readdirres),
.pc_cachetype = RC_NOCACHE,
.pc_name = "READDIRPLUS",
@@ -891,6 +1003,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_decode = nfs3svc_decode_fhandleargs,
.pc_encode = nfs3svc_encode_fsstatres,
.pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_argzero = sizeof(struct nfsd3_fhandleargs),
.pc_ressize = sizeof(struct nfsd3_fsstatres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+2*6+1,
@@ -901,6 +1014,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_decode = nfs3svc_decode_fhandleargs,
.pc_encode = nfs3svc_encode_fsinfores,
.pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_argzero = sizeof(struct nfsd3_fhandleargs),
.pc_ressize = sizeof(struct nfsd3_fsinfores),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+12,
@@ -911,6 +1025,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_decode = nfs3svc_decode_fhandleargs,
.pc_encode = nfs3svc_encode_pathconfres,
.pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_argzero = sizeof(struct nfsd3_fhandleargs),
.pc_ressize = sizeof(struct nfsd3_pathconfres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+6,
@@ -922,6 +1037,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_encode = nfs3svc_encode_commitres,
.pc_release = nfs3svc_release_fhandle,
.pc_argsize = sizeof(struct nfsd3_commitargs),
+ .pc_argzero = sizeof(struct nfsd3_commitargs),
.pc_ressize = sizeof(struct nfsd3_commitres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+WC+2,
@@ -929,10 +1045,11 @@ static const struct svc_procedure nfsd_procedures3[22] = {
},
};
-static unsigned int nfsd_count3[ARRAY_SIZE(nfsd_procedures3)];
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ nfsd_count3[ARRAY_SIZE(nfsd_procedures3)]);
const struct svc_version nfsd_version3 = {
.vs_vers = 3,
- .vs_nproc = 22,
+ .vs_nproc = ARRAY_SIZE(nfsd_procedures3),
.vs_proc = nfsd_procedures3,
.vs_dispatch = nfsd_dispatch,
.vs_count = nfsd_count3,
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index c3ac1b6aa3aa..ef4971d71ac4 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -254,7 +254,7 @@ svcxdr_decode_sattr3(struct svc_rqst *rqstp, struct xdr_stream *xdr,
if (xdr_stream_decode_u64(xdr, &newsize) < 0)
return false;
iap->ia_valid |= ATTR_SIZE;
- iap->ia_size = min_t(u64, newsize, NFS_OFFSET_MAX);
+ iap->ia_size = newsize;
}
if (xdr_stream_decode_u32(xdr, &set_it) < 0)
return false;
@@ -295,17 +295,14 @@ svcxdr_decode_sattr3(struct svc_rqst *rqstp, struct xdr_stream *xdr,
static bool
svcxdr_decode_sattrguard3(struct xdr_stream *xdr, struct nfsd3_sattrargs *args)
{
- __be32 *p;
u32 check;
if (xdr_stream_decode_bool(xdr, &check) < 0)
return false;
if (check) {
- p = xdr_inline_decode(xdr, XDR_UNIT * 2);
- if (!p)
+ if (!svcxdr_decode_nfstime3(xdr, &args->guardtime))
return false;
args->check_guard = 1;
- args->guardtime = be32_to_cpup(p);
} else
args->check_guard = 0;
@@ -487,71 +484,6 @@ neither:
return true;
}
-static bool fs_supports_change_attribute(struct super_block *sb)
-{
- return sb->s_flags & SB_I_VERSION || sb->s_export_op->fetch_iversion;
-}
-
-/*
- * Fill in the pre_op attr for the wcc data
- */
-void fill_pre_wcc(struct svc_fh *fhp)
-{
- struct inode *inode;
- struct kstat stat;
- bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
-
- if (fhp->fh_no_wcc || fhp->fh_pre_saved)
- return;
- inode = d_inode(fhp->fh_dentry);
- if (fs_supports_change_attribute(inode->i_sb) || !v4) {
- __be32 err = fh_getattr(fhp, &stat);
-
- if (err) {
- /* Grab the times from inode anyway */
- stat.mtime = inode->i_mtime;
- stat.ctime = inode->i_ctime;
- stat.size = inode->i_size;
- }
- fhp->fh_pre_mtime = stat.mtime;
- fhp->fh_pre_ctime = stat.ctime;
- fhp->fh_pre_size = stat.size;
- }
- if (v4)
- fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode);
-
- fhp->fh_pre_saved = true;
-}
-
-/*
- * Fill in the post_op attr for the wcc data
- */
-void fill_post_wcc(struct svc_fh *fhp)
-{
- bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
- struct inode *inode = d_inode(fhp->fh_dentry);
-
- if (fhp->fh_no_wcc)
- return;
-
- if (fhp->fh_post_saved)
- printk("nfsd: inode locked twice during operation.\n");
-
- fhp->fh_post_saved = true;
-
- if (fs_supports_change_attribute(inode->i_sb) || !v4) {
- __be32 err = fh_getattr(fhp, &fhp->fh_post_attr);
-
- if (err) {
- fhp->fh_post_saved = false;
- fhp->fh_post_attr.ctime = inode->i_ctime;
- }
- }
- if (v4)
- fhp->fh_post_change =
- nfsd4_change_attribute(&fhp->fh_post_attr, inode);
-}
-
/*
* XDR decode functions
*/
@@ -636,10 +568,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
args->count = max_blocksize;
args->len = max_blocksize;
}
- if (!xdr_stream_subsegment(xdr, &args->payload, args->count))
- return false;
- return true;
+ return xdr_stream_subsegment(xdr, &args->payload, args->count);
}
bool
@@ -681,8 +611,6 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd3_symlinkargs *args = rqstp->rq_argp;
struct kvec *head = rqstp->rq_arg.head;
- struct kvec *tail = rqstp->rq_arg.tail;
- size_t remaining;
if (!svcxdr_decode_diropargs3(xdr, &args->ffh, &args->fname, &args->flen))
return false;
@@ -691,16 +619,10 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
if (xdr_stream_decode_u32(xdr, &args->tlen) < 0)
return false;
- /* request sanity */
- remaining = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len;
- remaining -= xdr_stream_pos(xdr);
- if (remaining < xdr_align_size(args->tlen))
- return false;
-
- args->first.iov_base = xdr->p;
+ /* symlink_data */
args->first.iov_len = head->iov_len - xdr_stream_pos(xdr);
-
- return true;
+ args->first.iov_base = xdr_inline_decode(xdr, args->tlen);
+ return args->first.iov_base != NULL;
}
bool
@@ -903,7 +825,8 @@ nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
return false;
if (xdr_stream_encode_u32(xdr, resp->len) < 0)
return false;
- xdr_write_pages(xdr, resp->pages, 0, resp->len);
+ svcxdr_encode_opaque_pages(rqstp, xdr, resp->pages, 0,
+ resp->len);
if (svc_encode_result_payload(rqstp, head->iov_len, resp->len) < 0)
return false;
break;
@@ -934,8 +857,9 @@ nfs3svc_encode_readres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
return false;
if (xdr_stream_encode_u32(xdr, resp->count) < 0)
return false;
- xdr_write_pages(xdr, resp->pages, rqstp->rq_res.page_base,
- resp->count);
+ svcxdr_encode_opaque_pages(rqstp, xdr, resp->pages,
+ rqstp->rq_res.page_base,
+ resp->count);
if (svc_encode_result_payload(rqstp, head->iov_len, resp->count) < 0)
return false;
break;
@@ -1036,7 +960,8 @@ nfs3svc_encode_readdirres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
return false;
if (!svcxdr_encode_cookieverf3(xdr, resp->verf))
return false;
- xdr_write_pages(xdr, dirlist->pages, 0, dirlist->len);
+ svcxdr_encode_opaque_pages(rqstp, xdr, dirlist->pages, 0,
+ dirlist->len);
/* no more entries */
if (xdr_stream_encode_item_absent(xdr) < 0)
return false;
@@ -1076,7 +1001,9 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
} else
dchild = dget(dparent);
} else
- dchild = lookup_positive_unlocked(name, dparent, namlen);
+ dchild = lookup_one_positive_unlocked(&nop_mnt_idmap,
+ &QSTR_LEN(name, namlen),
+ dparent);
if (IS_ERR(dchild))
return rv;
if (d_mountpoint(dchild))
@@ -1125,7 +1052,7 @@ svcxdr_encode_entry3_common(struct nfsd3_readdirres *resp, const char *name,
return false;
/* cookie */
resp->cookie_offset = dirlist->len;
- if (xdr_stream_encode_u64(xdr, NFS_OFFSET_MAX) < 0)
+ if (xdr_stream_encode_u64(xdr, OFFSET_MAX) < 0)
return false;
return true;
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index eaa3a0cf38f1..936ea1ad9586 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -135,7 +135,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
unsigned int flags = 0;
int size = 0;
- pacl = get_acl(inode, ACL_TYPE_ACCESS);
+ pacl = get_inode_acl(inode, ACL_TYPE_ACCESS);
if (!pacl)
pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
@@ -147,7 +147,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
if (S_ISDIR(inode->i_mode)) {
flags = NFS4_ACL_DIR;
- dpacl = get_acl(inode, ACL_TYPE_DEFAULT);
+ dpacl = get_inode_acl(inode, ACL_TYPE_DEFAULT);
if (IS_ERR(dpacl)) {
error = PTR_ERR(dpacl);
goto rel_pacl;
@@ -198,8 +198,6 @@ summarize_posix_acl(struct posix_acl *acl, struct posix_acl_summary *pas)
memset(pas, 0, sizeof(*pas));
pas->mask = 07;
- pe = acl->a_entries + acl->a_count;
-
FOREACH_ACL_ENTRY(pa, acl, pe) {
switch (pa->e_tag) {
case ACL_USER_OBJ:
@@ -441,7 +439,7 @@ struct posix_ace_state_array {
* calculated so far: */
struct posix_acl_state {
- int empty;
+ unsigned char valid;
struct posix_ace_state owner;
struct posix_ace_state group;
struct posix_ace_state other;
@@ -457,7 +455,6 @@ init_state(struct posix_acl_state *state, int cnt)
int alloc;
memset(state, 0, sizeof(struct posix_acl_state));
- state->empty = 1;
/*
* In the worst case, each individual acl could be for a distinct
* named user or group, but we don't know which, so we allocate
@@ -500,7 +497,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
* and effective cases: when there are no inheritable ACEs,
* calls ->set_acl with a NULL ACL structure.
*/
- if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
+ if (!state->valid && (flags & NFS4_ACL_TYPE_DEFAULT))
return NULL;
/*
@@ -622,11 +619,12 @@ static void process_one_v4_ace(struct posix_acl_state *state,
struct nfs4_ace *ace)
{
u32 mask = ace->access_mask;
+ short type = ace2type(ace);
int i;
- state->empty = 0;
+ state->valid |= type;
- switch (ace2type(ace)) {
+ switch (type) {
case ACL_USER_OBJ:
if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
allow_bits(&state->owner, mask);
@@ -726,6 +724,30 @@ static int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl,
if (!(ace->flag & NFS4_ACE_INHERIT_ONLY_ACE))
process_one_v4_ace(&effective_acl_state, ace);
}
+
+ /*
+ * At this point, the default ACL may have zeroed-out entries for owner,
+ * group and other. That usually results in a non-sensical resulting ACL
+ * that denies all access except to any ACE that was explicitly added.
+ *
+ * The setfacl command solves a similar problem with this logic:
+ *
+ * "If a Default ACL entry is created, and the Default ACL contains
+ * no owner, owning group, or others entry, a copy of the ACL
+ * owner, owning group, or others entry is added to the Default ACL."
+ *
+ * Copy any missing ACEs from the effective set, if any ACEs were
+ * explicitly set.
+ */
+ if (default_acl_state.valid) {
+ if (!(default_acl_state.valid & ACL_USER_OBJ))
+ default_acl_state.owner = effective_acl_state.owner;
+ if (!(default_acl_state.valid & ACL_GROUP_OBJ))
+ default_acl_state.group = effective_acl_state.group;
+ if (!(default_acl_state.valid & ACL_OTHER))
+ default_acl_state.other = effective_acl_state.other;
+ }
+
*pacl = posix_state_to_acl(&effective_acl_state, flags);
if (IS_ERR(*pacl)) {
ret = PTR_ERR(*pacl);
@@ -751,58 +773,26 @@ out_estate:
return ret;
}
-__be32
-nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct nfs4_acl *acl)
+__be32 nfsd4_acl_to_attr(enum nfs_ftype4 type, struct nfs4_acl *acl,
+ struct nfsd_attrs *attr)
{
- __be32 error;
int host_error;
- struct dentry *dentry;
- struct inode *inode;
- struct posix_acl *pacl = NULL, *dpacl = NULL;
unsigned int flags = 0;
- /* Get inode */
- error = fh_verify(rqstp, fhp, 0, NFSD_MAY_SATTR);
- if (error)
- return error;
-
- dentry = fhp->fh_dentry;
- inode = d_inode(dentry);
+ if (!acl)
+ return nfs_ok;
- if (S_ISDIR(inode->i_mode))
+ if (type == NF4DIR)
flags = NFS4_ACL_DIR;
- host_error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags);
+ host_error = nfs4_acl_nfsv4_to_posix(acl, &attr->na_pacl,
+ &attr->na_dpacl, flags);
if (host_error == -EINVAL)
return nfserr_attrnotsupp;
- if (host_error < 0)
- goto out_nfserr;
-
- fh_lock(fhp);
-
- host_error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_ACCESS, pacl);
- if (host_error < 0)
- goto out_drop_lock;
-
- if (S_ISDIR(inode->i_mode)) {
- host_error = set_posix_acl(&init_user_ns, inode,
- ACL_TYPE_DEFAULT, dpacl);
- }
-
-out_drop_lock:
- fh_unlock(fhp);
-
- posix_acl_release(pacl);
- posix_acl_release(dpacl);
-out_nfserr:
- if (host_error == -EOPNOTSUPP)
- return nfserr_attrnotsupp;
else
return nfserrno(host_error);
}
-
static short
ace2type(struct nfs4_ace *ace)
{
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 11f8715d92d6..e00b2aea8da2 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -31,6 +31,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/nfs4.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/svc_xprt.h>
@@ -41,11 +42,10 @@
#include "trace.h"
#include "xdr4cb.h"
#include "xdr4.h"
+#include "nfs4xdr_gen.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
-static void nfsd4_mark_cb_fault(struct nfs4_client *, int reason);
-
#define NFSPROC4_CB_NULL 0
#define NFSPROC4_CB_COMPOUND 1
@@ -76,30 +76,53 @@ static __be32 *xdr_encode_empty_array(__be32 *p)
* 1 Protocol"
*/
-/*
- * nfs_cb_opnum4
- *
- * enum nfs_cb_opnum4 {
- * OP_CB_GETATTR = 3,
- * ...
- * };
- */
-enum nfs_cb_opnum4 {
- OP_CB_GETATTR = 3,
- OP_CB_RECALL = 4,
- OP_CB_LAYOUTRECALL = 5,
- OP_CB_NOTIFY = 6,
- OP_CB_PUSH_DELEG = 7,
- OP_CB_RECALL_ANY = 8,
- OP_CB_RECALLABLE_OBJ_AVAIL = 9,
- OP_CB_RECALL_SLOT = 10,
- OP_CB_SEQUENCE = 11,
- OP_CB_WANTS_CANCELLED = 12,
- OP_CB_NOTIFY_LOCK = 13,
- OP_CB_NOTIFY_DEVICEID = 14,
- OP_CB_OFFLOAD = 15,
- OP_CB_ILLEGAL = 10044
-};
+static void encode_uint32(struct xdr_stream *xdr, u32 n)
+{
+ WARN_ON_ONCE(xdr_stream_encode_u32(xdr, n) < 0);
+}
+
+static void encode_bitmap4(struct xdr_stream *xdr, const __u32 *bitmap,
+ size_t len)
+{
+ xdr_stream_encode_uint32_array(xdr, bitmap, len);
+}
+
+static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap,
+ struct nfs4_cb_fattr *fattr)
+{
+ fattr->ncf_cb_change = 0;
+ fattr->ncf_cb_fsize = 0;
+ fattr->ncf_cb_atime.tv_sec = 0;
+ fattr->ncf_cb_atime.tv_nsec = 0;
+ fattr->ncf_cb_mtime.tv_sec = 0;
+ fattr->ncf_cb_mtime.tv_nsec = 0;
+
+ if (bitmap[0] & FATTR4_WORD0_CHANGE)
+ if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_change) < 0)
+ return -EIO;
+ if (bitmap[0] & FATTR4_WORD0_SIZE)
+ if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_fsize) < 0)
+ return -EIO;
+ if (bitmap[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) {
+ fattr4_time_deleg_access access;
+
+ if (!xdrgen_decode_fattr4_time_deleg_access(xdr, &access))
+ return -EIO;
+ fattr->ncf_cb_atime.tv_sec = access.seconds;
+ fattr->ncf_cb_atime.tv_nsec = access.nseconds;
+
+ }
+ if (bitmap[2] & FATTR4_WORD2_TIME_DELEG_MODIFY) {
+ fattr4_time_deleg_modify modify;
+
+ if (!xdrgen_decode_fattr4_time_deleg_modify(xdr, &modify))
+ return -EIO;
+ fattr->ncf_cb_mtime.tv_sec = modify.seconds;
+ fattr->ncf_cb_mtime.tv_nsec = modify.nseconds;
+
+ }
+ return 0;
+}
static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
{
@@ -286,17 +309,17 @@ static int decode_cb_compound4res(struct xdr_stream *xdr,
u32 length;
__be32 *p;
- p = xdr_inline_decode(xdr, 4 + 4);
+ p = xdr_inline_decode(xdr, XDR_UNIT);
if (unlikely(p == NULL))
goto out_overflow;
- hdr->status = be32_to_cpup(p++);
+ hdr->status = be32_to_cpup(p);
/* Ignore the tag */
- length = be32_to_cpup(p++);
- p = xdr_inline_decode(xdr, length + 4);
- if (unlikely(p == NULL))
+ if (xdr_stream_decode_u32(xdr, &length) < 0)
+ goto out_overflow;
+ if (xdr_inline_decode(xdr, length) == NULL)
+ goto out_overflow;
+ if (xdr_stream_decode_u32(xdr, &hdr->nops) < 0)
goto out_overflow;
- p += XDR_QUADLEN(length);
- hdr->nops = be32_to_cpup(p);
return 0;
out_overflow:
return -EIO;
@@ -329,6 +352,95 @@ static void encode_cb_recall4args(struct xdr_stream *xdr,
}
/*
+ * CB_RECALLANY4args
+ *
+ * struct CB_RECALLANY4args {
+ * uint32_t craa_objects_to_keep;
+ * bitmap4 craa_type_mask;
+ * };
+ */
+static void
+encode_cb_recallany4args(struct xdr_stream *xdr,
+ struct nfs4_cb_compound_hdr *hdr, struct nfsd4_cb_recall_any *ra)
+{
+ encode_nfs_cb_opnum4(xdr, OP_CB_RECALL_ANY);
+ encode_uint32(xdr, ra->ra_keep);
+ encode_bitmap4(xdr, ra->ra_bmval, ARRAY_SIZE(ra->ra_bmval));
+ hdr->nops++;
+}
+
+/*
+ * CB_GETATTR4args
+ * struct CB_GETATTR4args {
+ * nfs_fh4 fh;
+ * bitmap4 attr_request;
+ * };
+ *
+ * The size and change attributes are the only one
+ * guaranteed to be serviced by the client.
+ */
+static void
+encode_cb_getattr4args(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr,
+ struct nfs4_cb_fattr *fattr)
+{
+ struct nfs4_delegation *dp = container_of(fattr, struct nfs4_delegation, dl_cb_fattr);
+ struct knfsd_fh *fh = &dp->dl_stid.sc_file->fi_fhandle;
+ struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr;
+ u32 bmap_size = 1;
+ u32 bmap[3];
+
+ bmap[0] = FATTR4_WORD0_SIZE;
+ if (!ncf->ncf_file_modified)
+ bmap[0] |= FATTR4_WORD0_CHANGE;
+
+ if (deleg_attrs_deleg(dp->dl_type)) {
+ bmap[1] = 0;
+ bmap[2] = FATTR4_WORD2_TIME_DELEG_ACCESS | FATTR4_WORD2_TIME_DELEG_MODIFY;
+ bmap_size = 3;
+ }
+ encode_nfs_cb_opnum4(xdr, OP_CB_GETATTR);
+ encode_nfs_fh4(xdr, fh);
+ encode_bitmap4(xdr, bmap, bmap_size);
+ hdr->nops++;
+}
+
+static u32 highest_slotid(struct nfsd4_session *ses)
+{
+ u32 idx;
+
+ spin_lock(&ses->se_lock);
+ idx = fls(~ses->se_cb_slot_avail);
+ if (idx > 0)
+ --idx;
+ idx = max(idx, ses->se_cb_highest_slot);
+ spin_unlock(&ses->se_lock);
+ return idx;
+}
+
+static void
+encode_referring_call4(struct xdr_stream *xdr,
+ const struct nfsd4_referring_call *rc)
+{
+ encode_uint32(xdr, rc->rc_sequenceid);
+ encode_uint32(xdr, rc->rc_slotid);
+}
+
+static void
+encode_referring_call_list4(struct xdr_stream *xdr,
+ const struct nfsd4_referring_call_list *rcl)
+{
+ struct nfsd4_referring_call *rc;
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
+ xdr_encode_opaque_fixed(p, rcl->rcl_sessionid.data,
+ NFS4_MAX_SESSIONID_LEN);
+ encode_uint32(xdr, rcl->__nr_referring_calls);
+ list_for_each_entry(rc, &rcl->rcl_referring_calls, __list)
+ encode_referring_call4(xdr, rc);
+}
+
+/*
* CB_SEQUENCE4args
*
* struct CB_SEQUENCE4args {
@@ -345,6 +457,7 @@ static void encode_cb_sequence4args(struct xdr_stream *xdr,
struct nfs4_cb_compound_hdr *hdr)
{
struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
+ struct nfsd4_referring_call_list *rcl;
__be32 *p;
if (hdr->minorversion == 0)
@@ -353,16 +466,45 @@ static void encode_cb_sequence4args(struct xdr_stream *xdr,
encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
encode_sessionid4(xdr, session);
- p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
- *p++ = cpu_to_be32(session->se_cb_seq_nr); /* csa_sequenceid */
- *p++ = xdr_zero; /* csa_slotid */
- *p++ = xdr_zero; /* csa_highest_slotid */
+ p = xdr_reserve_space(xdr, XDR_UNIT * 4);
+ *p++ = cpu_to_be32(session->se_cb_seq_nr[cb->cb_held_slot]); /* csa_sequenceid */
+ *p++ = cpu_to_be32(cb->cb_held_slot); /* csa_slotid */
+ *p++ = cpu_to_be32(highest_slotid(session)); /* csa_highest_slotid */
*p++ = xdr_zero; /* csa_cachethis */
- xdr_encode_empty_array(p); /* csa_referring_call_lists */
+
+ /* csa_referring_call_lists */
+ encode_uint32(xdr, cb->cb_nr_referring_call_list);
+ list_for_each_entry(rcl, &cb->cb_referring_call_list, __list)
+ encode_referring_call_list4(xdr, rcl);
hdr->nops++;
}
+static void update_cb_slot_table(struct nfsd4_session *ses, u32 target)
+{
+ /* No need to do anything if nothing changed */
+ if (likely(target == READ_ONCE(ses->se_cb_highest_slot)))
+ return;
+
+ spin_lock(&ses->se_lock);
+ if (target > ses->se_cb_highest_slot) {
+ int i;
+
+ target = min(target, NFSD_BC_SLOT_TABLE_SIZE - 1);
+
+ /*
+ * Growing the slot table. Reset any new sequences to 1.
+ *
+ * NB: There is some debate about whether the RFC requires this,
+ * but the Linux client expects it.
+ */
+ for (i = ses->se_cb_highest_slot + 1; i <= target; ++i)
+ ses->se_cb_seq_nr[i] = 1;
+ }
+ ses->se_cb_highest_slot = target;
+ spin_unlock(&ses->se_lock);
+}
+
/*
* CB_SEQUENCE4resok
*
@@ -390,7 +532,7 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr,
struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
int status = -ESERVERFAULT;
__be32 *p;
- u32 dummy;
+ u32 seqid, slotid, target;
/*
* If the server returns different values for sessionID, slotID or
@@ -406,21 +548,22 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr,
}
p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
- dummy = be32_to_cpup(p++);
- if (dummy != session->se_cb_seq_nr) {
+ seqid = be32_to_cpup(p++);
+ if (seqid != session->se_cb_seq_nr[cb->cb_held_slot]) {
dprintk("NFS: %s Invalid sequence number\n", __func__);
goto out;
}
- dummy = be32_to_cpup(p++);
- if (dummy != 0) {
+ slotid = be32_to_cpup(p++);
+ if (slotid != cb->cb_held_slot) {
dprintk("NFS: %s Invalid slotid\n", __func__);
goto out;
}
- /*
- * FIXME: process highest slotid and target highest slotid
- */
+ p++; // ignore current highest slot value
+
+ target = be32_to_cpup(p++);
+ update_cb_slot_table(session, target);
status = 0;
out:
cb->cb_seq_status = status;
@@ -464,6 +607,26 @@ static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
}
/*
+ * 20.1. Operation 3: CB_GETATTR - Get Attributes
+ */
+static void nfs4_xdr_enc_cb_getattr(struct rpc_rqst *req,
+ struct xdr_stream *xdr, const void *data)
+{
+ const struct nfsd4_callback *cb = data;
+ struct nfs4_cb_fattr *ncf =
+ container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+ struct nfs4_cb_compound_hdr hdr = {
+ .ident = cb->cb_clp->cl_cb_ident,
+ .minorversion = cb->cb_clp->cl_minorversion,
+ };
+
+ encode_cb_compound4args(xdr, &hdr);
+ encode_cb_sequence4args(xdr, cb, &hdr);
+ encode_cb_getattr4args(xdr, &hdr, ncf);
+ encode_cb_nops(&hdr);
+}
+
+/*
* 20.2. Operation 4: CB_RECALL - Recall a Delegation
*/
static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
@@ -482,6 +645,26 @@ static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_cb_nops(&hdr);
}
+/*
+ * 20.6. Operation 8: CB_RECALL_ANY - Keep Any N Recallable Objects
+ */
+static void
+nfs4_xdr_enc_cb_recall_any(struct rpc_rqst *req,
+ struct xdr_stream *xdr, const void *data)
+{
+ const struct nfsd4_callback *cb = data;
+ struct nfsd4_cb_recall_any *ra;
+ struct nfs4_cb_compound_hdr hdr = {
+ .ident = cb->cb_clp->cl_cb_ident,
+ .minorversion = cb->cb_clp->cl_minorversion,
+ };
+
+ ra = container_of(cb, struct nfsd4_cb_recall_any, ra_cb);
+ encode_cb_compound4args(xdr, &hdr);
+ encode_cb_sequence4args(xdr, cb, &hdr);
+ encode_cb_recallany4args(xdr, &hdr, ra);
+ encode_cb_nops(&hdr);
+}
/*
* NFSv4.0 and NFSv4.1 XDR decode functions
@@ -499,6 +682,46 @@ static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
}
/*
+ * 20.1. Operation 3: CB_GETATTR - Get Attributes
+ */
+static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfsd4_callback *cb = data;
+ struct nfs4_cb_compound_hdr hdr;
+ int status;
+ u32 bitmap[3] = {0};
+ u32 attrlen, maxlen;
+ struct nfs4_cb_fattr *ncf =
+ container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+
+ status = decode_cb_compound4res(xdr, &hdr);
+ if (unlikely(status))
+ return status;
+
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status || cb->cb_seq_status))
+ return status;
+
+ status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status);
+ if (unlikely(status || cb->cb_status))
+ return status;
+ if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0)
+ return -EIO;
+ if (xdr_stream_decode_u32(xdr, &attrlen) < 0)
+ return -EIO;
+ maxlen = sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize);
+ if (bitmap[2] != 0)
+ maxlen += (sizeof(ncf->ncf_cb_mtime.tv_sec) +
+ sizeof(ncf->ncf_cb_mtime.tv_nsec)) * 2;
+ if (attrlen > maxlen)
+ return -EIO;
+ status = decode_cb_fattr4(xdr, bitmap, ncf);
+ return status;
+}
+
+/*
* 20.2. Operation 4: CB_RECALL - Recall a Delegation
*/
static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
@@ -520,6 +743,28 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
}
+/*
+ * 20.6. Operation 8: CB_RECALL_ANY - Keep Any N Recallable Objects
+ */
+static int
+nfs4_xdr_dec_cb_recall_any(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfsd4_callback *cb = data;
+ struct nfs4_cb_compound_hdr hdr;
+ int status;
+
+ status = decode_cb_compound4res(xdr, &hdr);
+ if (unlikely(status))
+ return status;
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status || cb->cb_seq_status))
+ return status;
+ status = decode_cb_op_status(xdr, OP_CB_RECALL_ANY, &cb->cb_status);
+ return status;
+}
+
#ifdef CONFIG_NFSD_PNFS
/*
* CB_LAYOUTRECALL4args
@@ -627,7 +872,7 @@ static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
const struct nfsd4_callback *cb = data;
const struct nfsd4_blocked_lock *nbl =
container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
- struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.fl_owner;
+ struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.c.flc_owner;
struct nfs4_cb_compound_hdr hdr = {
.ident = 0,
.minorversion = cb->cb_clp->cl_minorversion,
@@ -679,7 +924,7 @@ static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
* case NFS4_OK:
* write_response4 coa_resok4;
* default:
- * length4 coa_bytes_copied;
+ * length4 coa_bytes_copied;
* };
* struct CB_OFFLOAD4args {
* nfs_fh4 coa_fh;
@@ -688,21 +933,22 @@ static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
* };
*/
static void encode_offload_info4(struct xdr_stream *xdr,
- __be32 nfserr,
- const struct nfsd4_copy *cp)
+ const struct nfsd4_cb_offload *cbo)
{
__be32 *p;
p = xdr_reserve_space(xdr, 4);
- *p++ = nfserr;
- if (!nfserr) {
+ *p = cbo->co_nfserr;
+ switch (cbo->co_nfserr) {
+ case nfs_ok:
p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
p = xdr_encode_empty_array(p);
- p = xdr_encode_hyper(p, cp->cp_res.wr_bytes_written);
- *p++ = cpu_to_be32(cp->cp_res.wr_stable_how);
- p = xdr_encode_opaque_fixed(p, cp->cp_res.wr_verifier.data,
+ p = xdr_encode_hyper(p, cbo->co_res.wr_bytes_written);
+ *p++ = cpu_to_be32(cbo->co_res.wr_stable_how);
+ p = xdr_encode_opaque_fixed(p, cbo->co_res.wr_verifier.data,
NFS4_VERIFIER_SIZE);
- } else {
+ break;
+ default:
p = xdr_reserve_space(xdr, 8);
/* We always return success if bytes were written */
p = xdr_encode_hyper(p, 0);
@@ -710,18 +956,16 @@ static void encode_offload_info4(struct xdr_stream *xdr,
}
static void encode_cb_offload4args(struct xdr_stream *xdr,
- __be32 nfserr,
- const struct knfsd_fh *fh,
- const struct nfsd4_copy *cp,
+ const struct nfsd4_cb_offload *cbo,
struct nfs4_cb_compound_hdr *hdr)
{
__be32 *p;
p = xdr_reserve_space(xdr, 4);
- *p++ = cpu_to_be32(OP_CB_OFFLOAD);
- encode_nfs_fh4(xdr, fh);
- encode_stateid4(xdr, &cp->cp_res.cb_stateid);
- encode_offload_info4(xdr, nfserr, cp);
+ *p = cpu_to_be32(OP_CB_OFFLOAD);
+ encode_nfs_fh4(xdr, &cbo->co_fh);
+ encode_stateid4(xdr, &cbo->co_res.cb_stateid);
+ encode_offload_info4(xdr, cbo);
hdr->nops++;
}
@@ -731,8 +975,8 @@ static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
const void *data)
{
const struct nfsd4_callback *cb = data;
- const struct nfsd4_copy *cp =
- container_of(cb, struct nfsd4_copy, cp_cb);
+ const struct nfsd4_cb_offload *cbo =
+ container_of(cb, struct nfsd4_cb_offload, co_cb);
struct nfs4_cb_compound_hdr hdr = {
.ident = 0,
.minorversion = cb->cb_clp->cl_minorversion,
@@ -740,7 +984,7 @@ static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
encode_cb_compound4args(xdr, &hdr);
encode_cb_sequence4args(xdr, cb, &hdr);
- encode_cb_offload4args(xdr, cp->nfserr, &cp->fh, cp, &hdr);
+ encode_cb_offload4args(xdr, cbo, &hdr);
encode_cb_nops(&hdr);
}
@@ -784,6 +1028,8 @@ static const struct rpc_procinfo nfs4_cb_procedures[] = {
#endif
PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock),
PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload),
+ PROC(CB_RECALL_ANY, COMPOUND, cb_recall_any, cb_recall_any),
+ PROC(CB_GETATTR, COMPOUND, cb_getattr, cb_getattr),
};
static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
@@ -836,11 +1082,23 @@ static int max_cb_time(struct net *net)
return max(((u32)nn->nfsd4_lease)/10, 1u) * HZ;
}
-static struct workqueue_struct *callback_wq;
-
static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
{
- return queue_work(callback_wq, &cb->cb_work);
+ struct nfs4_client *clp = cb->cb_clp;
+
+ trace_nfsd_cb_queue(clp, cb);
+ return queue_work(clp->cl_callback_wq, &cb->cb_work);
+}
+
+static void nfsd4_requeue_cb(struct rpc_task *task, struct nfsd4_callback *cb)
+{
+ struct nfs4_client *clp = cb->cb_clp;
+
+ if (!test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) {
+ trace_nfsd_cb_restart(clp, cb);
+ task->tk_status = 0;
+ set_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags);
+ }
}
static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
@@ -851,8 +1109,7 @@ static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
static void nfsd41_cb_inflight_end(struct nfs4_client *clp)
{
- if (atomic_dec_and_test(&clp->cl_cb_inflight))
- wake_up_var(&clp->cl_cb_inflight);
+ atomic_dec_and_wake_up(&clp->cl_cb_inflight);
}
static void nfsd41_cb_inflight_wait_complete(struct nfs4_client *clp)
@@ -871,12 +1128,12 @@ static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct r
} else {
struct cred *kcred;
- kcred = prepare_kernel_cred(NULL);
+ kcred = prepare_kernel_cred(&init_task);
if (!kcred)
return NULL;
- kcred->uid = ses->se_cb_sec.uid;
- kcred->gid = ses->se_cb_sec.gid;
+ kcred->fsuid = ses->se_cb_sec.uid;
+ kcred->fsgid = ses->se_cb_sec.gid;
return kcred;
}
}
@@ -915,9 +1172,8 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
args.authflavor = clp->cl_cred.cr_flavor;
clp->cl_cb_ident = conn->cb_ident;
} else {
- if (!conn->cb_xprt)
+ if (!conn->cb_xprt || !ses)
return -EINVAL;
- clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
clp->cl_cb_session = ses;
args.bc_xprt = conn->cb_xprt;
args.prognumber = clp->cl_cb_session->se_cb_prog;
@@ -937,6 +1193,9 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
rpc_shutdown_client(client);
return -ENOMEM;
}
+
+ if (clp->cl_minorversion != 0)
+ clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
clp->cl_cb_client = client;
clp->cl_cb_cred = cred;
rcu_read_lock();
@@ -950,18 +1209,18 @@ static void nfsd4_mark_cb_state(struct nfs4_client *clp, int newstate)
{
if (clp->cl_cb_state != newstate) {
clp->cl_cb_state = newstate;
- trace_nfsd_cb_state(clp);
+ trace_nfsd_cb_new_state(clp);
}
}
-static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
+static void nfsd4_mark_cb_down(struct nfs4_client *clp)
{
if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
return;
nfsd4_mark_cb_state(clp, NFSD4_CB_DOWN);
}
-static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
+static void nfsd4_mark_cb_fault(struct nfs4_client *clp)
{
if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
return;
@@ -973,7 +1232,7 @@ static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
if (task->tk_status)
- nfsd4_mark_cb_down(clp, task->tk_status);
+ nfsd4_mark_cb_down(clp);
else
nfsd4_mark_cb_state(clp, NFSD4_CB_UP);
}
@@ -1008,7 +1267,7 @@ void nfsd4_probe_callback(struct nfs4_client *clp)
void nfsd4_probe_callback_sync(struct nfs4_client *clp)
{
nfsd4_probe_callback(clp);
- flush_workqueue(callback_wq);
+ flush_workqueue(clp->cl_callback_wq);
}
void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
@@ -1019,6 +1278,22 @@ void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
spin_unlock(&clp->cl_lock);
}
+static int grab_slot(struct nfsd4_session *ses)
+{
+ int idx;
+
+ spin_lock(&ses->se_lock);
+ idx = ffs(ses->se_cb_slot_avail) - 1;
+ if (idx < 0 || idx > ses->se_cb_highest_slot) {
+ spin_unlock(&ses->se_lock);
+ return -1;
+ }
+ /* clear the bit for the slot */
+ ses->se_cb_slot_avail &= ~BIT(idx);
+ spin_unlock(&ses->se_lock);
+ return idx;
+}
+
/*
* There's currently a single callback channel slot.
* If the slot is available, then mark it busy. Otherwise, set the
@@ -1027,28 +1302,32 @@ void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
static bool nfsd41_cb_get_slot(struct nfsd4_callback *cb, struct rpc_task *task)
{
struct nfs4_client *clp = cb->cb_clp;
+ struct nfsd4_session *ses = clp->cl_cb_session;
- if (!cb->cb_holds_slot &&
- test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
+ if (cb->cb_held_slot >= 0)
+ return true;
+ cb->cb_held_slot = grab_slot(ses);
+ if (cb->cb_held_slot < 0) {
rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
/* Race breaker */
- if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
- dprintk("%s slot is busy\n", __func__);
+ cb->cb_held_slot = grab_slot(ses);
+ if (cb->cb_held_slot < 0)
return false;
- }
rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
}
- cb->cb_holds_slot = true;
return true;
}
static void nfsd41_cb_release_slot(struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
+ struct nfsd4_session *ses = clp->cl_cb_session;
- if (cb->cb_holds_slot) {
- cb->cb_holds_slot = false;
- clear_bit(0, &clp->cl_cb_slot_busy);
+ if (cb->cb_held_slot >= 0) {
+ spin_lock(&ses->se_lock);
+ ses->se_cb_slot_avail |= BIT(cb->cb_held_slot);
+ spin_unlock(&ses->se_lock);
+ cb->cb_held_slot = -1;
rpc_wake_up_next(&clp->cl_cb_waitq);
}
}
@@ -1057,16 +1336,115 @@ static void nfsd41_destroy_cb(struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
+ trace_nfsd_cb_destroy(clp, cb);
nfsd41_cb_release_slot(cb);
+ if (test_bit(NFSD4_CALLBACK_WAKE, &cb->cb_flags))
+ clear_and_wake_up_bit(NFSD4_CALLBACK_RUNNING, &cb->cb_flags);
+ else
+ clear_bit(NFSD4_CALLBACK_RUNNING, &cb->cb_flags);
+
if (cb->cb_ops && cb->cb_ops->release)
cb->cb_ops->release(cb);
nfsd41_cb_inflight_end(clp);
}
-/*
- * TODO: cb_sequence should support referring call lists, cachethis, multiple
- * slots, and mark callback channel down on communication errors.
+/**
+ * nfsd41_cb_referring_call - add a referring call to a callback operation
+ * @cb: context of callback to add the rc to
+ * @sessionid: referring call's session ID
+ * @slotid: referring call's session slot index
+ * @seqno: referring call's slot sequence number
+ *
+ * Caller serializes access to @cb.
+ *
+ * NB: If memory allocation fails, the referring call is not added.
*/
+void nfsd41_cb_referring_call(struct nfsd4_callback *cb,
+ struct nfs4_sessionid *sessionid,
+ u32 slotid, u32 seqno)
+{
+ struct nfsd4_referring_call_list *rcl;
+ struct nfsd4_referring_call *rc;
+ bool found;
+
+ might_sleep();
+
+ found = false;
+ list_for_each_entry(rcl, &cb->cb_referring_call_list, __list) {
+ if (!memcmp(rcl->rcl_sessionid.data, sessionid->data,
+ NFS4_MAX_SESSIONID_LEN)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ rcl = kmalloc(sizeof(*rcl), GFP_KERNEL);
+ if (!rcl)
+ return;
+ memcpy(rcl->rcl_sessionid.data, sessionid->data,
+ NFS4_MAX_SESSIONID_LEN);
+ rcl->__nr_referring_calls = 0;
+ INIT_LIST_HEAD(&rcl->rcl_referring_calls);
+ list_add(&rcl->__list, &cb->cb_referring_call_list);
+ cb->cb_nr_referring_call_list++;
+ }
+
+ found = false;
+ list_for_each_entry(rc, &rcl->rcl_referring_calls, __list) {
+ if (rc->rc_sequenceid == seqno && rc->rc_slotid == slotid) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ rc = kmalloc(sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ goto out;
+ rc->rc_sequenceid = seqno;
+ rc->rc_slotid = slotid;
+ rcl->__nr_referring_calls++;
+ list_add(&rc->__list, &rcl->rcl_referring_calls);
+ }
+
+out:
+ if (!rcl->__nr_referring_calls) {
+ cb->cb_nr_referring_call_list--;
+ list_del(&rcl->__list);
+ kfree(rcl);
+ }
+}
+
+/**
+ * nfsd41_cb_destroy_referring_call_list - release referring call info
+ * @cb: context of a callback that has completed
+ *
+ * Callers who allocate referring calls using nfsd41_cb_referring_call() must
+ * release those resources by calling nfsd41_cb_destroy_referring_call_list.
+ *
+ * Caller serializes access to @cb.
+ */
+void nfsd41_cb_destroy_referring_call_list(struct nfsd4_callback *cb)
+{
+ struct nfsd4_referring_call_list *rcl;
+ struct nfsd4_referring_call *rc;
+
+ while (!list_empty(&cb->cb_referring_call_list)) {
+ rcl = list_first_entry(&cb->cb_referring_call_list,
+ struct nfsd4_referring_call_list,
+ __list);
+
+ while (!list_empty(&rcl->rcl_referring_calls)) {
+ rc = list_first_entry(&rcl->rcl_referring_calls,
+ struct nfsd4_referring_call,
+ __list);
+ list_del(&rc->__list);
+ kfree(rc);
+ }
+ list_del(&rcl->__list);
+ kfree(rcl);
+ }
+}
+
static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
{
struct nfsd4_callback *cb = calldata;
@@ -1077,6 +1455,7 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
* cb_seq_status is only set in decode_cb_sequence4res,
* and so will remain 1 if an rpc level failure occurs.
*/
+ trace_nfsd_cb_rpc_prepare(clp);
cb->cb_seq_status = 1;
cb->cb_status = 0;
if (minorversion && !nfsd41_cb_get_slot(cb, task))
@@ -1084,31 +1463,17 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
rpc_call_start(task);
}
+/* Returns true if CB_COMPOUND processing should continue */
static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
{
- struct nfs4_client *clp = cb->cb_clp;
- struct nfsd4_session *session = clp->cl_cb_session;
- bool ret = true;
-
- if (!clp->cl_minorversion) {
- /*
- * If the backchannel connection was shut down while this
- * task was queued, we need to resubmit it after setting up
- * a new backchannel connection.
- *
- * Note that if we lost our callback connection permanently
- * the submission code will error out, so we don't need to
- * handle that case here.
- */
- if (RPC_SIGNALLED(task))
- goto need_restart;
-
- return true;
- }
+ struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
+ bool ret = false;
- if (!cb->cb_holds_slot)
- goto need_restart;
+ if (cb->cb_held_slot < 0)
+ goto requeue;
+ /* This is the operation status code for CB_SEQUENCE */
+ trace_nfsd_cb_seq_status(task, cb);
switch (cb->cb_seq_status) {
case 0:
/*
@@ -1118,53 +1483,64 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
* If CB_SEQUENCE returns an error, then the state of the slot
* (sequence ID, cached reply) MUST NOT change.
*/
- ++session->se_cb_seq_nr;
+ ++session->se_cb_seq_nr[cb->cb_held_slot];
+ ret = true;
break;
case -ESERVERFAULT:
- ++session->se_cb_seq_nr;
- fallthrough;
+ /*
+ * Call succeeded, but the session, slot index, or slot
+ * sequence number in the response do not match the same
+ * in the server's call. The sequence information is thus
+ * untrustworthy.
+ */
+ nfsd4_mark_cb_fault(cb->cb_clp);
+ break;
case 1:
+ /*
+ * cb_seq_status remains 1 if an RPC Reply was never
+ * received. NFSD can't know if the client processed
+ * the CB_SEQUENCE operation. Ask the client to send a
+ * DESTROY_SESSION to recover.
+ */
+ fallthrough;
case -NFS4ERR_BADSESSION:
- nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
- ret = false;
- break;
+ nfsd4_mark_cb_fault(cb->cb_clp);
+ goto requeue;
case -NFS4ERR_DELAY:
- if (!rpc_restart_call(task))
- goto out;
-
+ cb->cb_seq_status = 1;
+ if (RPC_SIGNALLED(task) || !rpc_restart_call(task))
+ goto requeue;
rpc_delay(task, 2 * HZ);
return false;
+ case -NFS4ERR_SEQ_MISORDERED:
case -NFS4ERR_BADSLOT:
+ /*
+ * A SEQ_MISORDERED or BADSLOT error means that the client and
+ * server are out of sync as to the backchannel parameters. Mark
+ * the backchannel faulty and restart the RPC, but leak the slot
+ * so that it's no longer used.
+ */
+ nfsd4_mark_cb_fault(cb->cb_clp);
+ cb->cb_held_slot = -1;
goto retry_nowait;
- case -NFS4ERR_SEQ_MISORDERED:
- if (session->se_cb_seq_nr != 1) {
- session->se_cb_seq_nr = 1;
- goto retry_nowait;
- }
- break;
default:
- nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
- dprintk("%s: unprocessed error %d\n", __func__,
- cb->cb_seq_status);
+ nfsd4_mark_cb_fault(cb->cb_clp);
}
-
+ trace_nfsd_cb_free_slot(task, cb);
nfsd41_cb_release_slot(cb);
- dprintk("%s: freed slot, new seqid=%d\n", __func__,
- clp->cl_cb_session->se_cb_seq_nr);
-
- if (RPC_SIGNALLED(task))
- goto need_restart;
-out:
return ret;
retry_nowait:
- if (rpc_restart_call_prepare(task))
- ret = false;
- goto out;
-need_restart:
- if (!test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) {
- task->tk_status = 0;
- cb->cb_need_restart = true;
+ /*
+ * RPC_SIGNALLED() means that the rpc_client is being torn down and
+ * (possibly) recreated. Requeue the call in that case.
+ */
+ if (!RPC_SIGNALLED(task)) {
+ if (rpc_restart_call_prepare(task))
+ return false;
}
+requeue:
+ nfsd41_cb_release_slot(cb);
+ nfsd4_requeue_cb(task, cb);
return false;
}
@@ -1173,11 +1549,28 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
struct nfsd4_callback *cb = calldata;
struct nfs4_client *clp = cb->cb_clp;
- if (!nfsd4_cb_sequence_done(task, cb))
+ trace_nfsd_cb_rpc_done(clp);
+
+ if (!clp->cl_minorversion) {
+ /*
+ * If the backchannel connection was shut down while this
+ * task was queued, we need to resubmit it after setting up
+ * a new backchannel connection.
+ *
+ * Note that if we lost our callback connection permanently
+ * the submission code will error out, so we don't need to
+ * handle that case here.
+ */
+ if (RPC_SIGNALLED(task))
+ nfsd4_requeue_cb(task, cb);
+ } else if (!nfsd4_cb_sequence_done(task, cb)) {
return;
+ }
if (cb->cb_status) {
- WARN_ON_ONCE(task->tk_status);
+ WARN_ONCE(task->tk_status,
+ "cb_status=%d tk_status=%d cb_opcode=%d",
+ cb->cb_status, task->tk_status, cb->cb_ops->opcode);
task->tk_status = cb->cb_status;
}
@@ -1191,7 +1584,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
case -EIO:
case -ETIMEDOUT:
case -EACCES:
- nfsd4_mark_cb_down(clp, task->tk_status);
+ nfsd4_mark_cb_down(clp);
}
break;
default:
@@ -1203,7 +1596,9 @@ static void nfsd4_cb_release(void *calldata)
{
struct nfsd4_callback *cb = calldata;
- if (cb->cb_need_restart)
+ trace_nfsd_cb_rpc_release(cb->cb_clp);
+
+ if (test_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags))
nfsd4_queue_cb(cb);
else
nfsd41_destroy_cb(cb);
@@ -1216,19 +1611,6 @@ static const struct rpc_call_ops nfsd4_cb_ops = {
.rpc_release = nfsd4_cb_release,
};
-int nfsd4_create_callback_queue(void)
-{
- callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
- if (!callback_wq)
- return -ENOMEM;
- return 0;
-}
-
-void nfsd4_destroy_callback_queue(void)
-{
- destroy_workqueue(callback_wq);
-}
-
/* must be called under the state lock */
void nfsd4_shutdown_callback(struct nfs4_client *clp)
{
@@ -1242,16 +1624,17 @@ void nfsd4_shutdown_callback(struct nfs4_client *clp)
* client, destroy the rpc client, and stop:
*/
nfsd4_run_cb(&clp->cl_cb_null);
- flush_workqueue(callback_wq);
+ flush_workqueue(clp->cl_callback_wq);
nfsd41_cb_inflight_wait_complete(clp);
}
-/* requires cl_lock: */
static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
{
struct nfsd4_session *s;
struct nfsd4_conn *c;
+ lockdep_assert_held(&clp->cl_lock);
+
list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
list_for_each_entry(c, &s->se_conns, cn_persession) {
if (c->cn_flags & NFS4_CDFC4_BACK)
@@ -1263,9 +1646,9 @@ static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
/*
* Note there isn't a lot of locking in this code; instead we depend on
- * the fact that it is run from the callback_wq, which won't run two
- * work items at once. So, for example, callback_wq handles all access
- * of cl_cb_client and all calls to rpc_create or rpc_shutdown_client.
+ * the fact that it is run from clp->cl_callback_wq, which won't run two
+ * work items at once. So, for example, clp->cl_callback_wq handles all
+ * access of cl_cb_client and all calls to rpc_create or rpc_shutdown_client.
*/
static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
{
@@ -1275,11 +1658,14 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
struct nfsd4_conn *c;
int err;
+ trace_nfsd_cb_bc_update(clp, cb);
+
/*
* This is either an update, or the client dying; in either case,
* kill the old client:
*/
if (clp->cl_cb_client) {
+ trace_nfsd_cb_bc_shutdown(clp, cb);
rpc_shutdown_client(clp->cl_cb_client);
clp->cl_cb_client = NULL;
put_cred(clp->cl_cb_cred);
@@ -1291,13 +1677,15 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
}
if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
return;
+
spin_lock(&clp->cl_lock);
/*
* Only serialized callback code is allowed to clear these
* flags; main nfsd code can only set them:
*/
- BUG_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
+ WARN_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
+
memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
c = __nfsd4_find_backchannel(clp);
if (c) {
@@ -1309,7 +1697,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
err = setup_callback_client(clp, &conn, ses);
if (err) {
- nfsd4_mark_cb_down(clp, err);
+ nfsd4_mark_cb_down(clp);
if (c)
svc_xprt_put(c->cn_xprt);
return;
@@ -1323,21 +1711,19 @@ nfsd4_run_cb_work(struct work_struct *work)
container_of(work, struct nfsd4_callback, cb_work);
struct nfs4_client *clp = cb->cb_clp;
struct rpc_clnt *clnt;
- int flags;
+ int flags, ret;
- if (cb->cb_need_restart) {
- cb->cb_need_restart = false;
- } else {
- if (cb->cb_ops && cb->cb_ops->prepare)
- cb->cb_ops->prepare(cb);
- }
+ trace_nfsd_cb_start(clp);
if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
nfsd4_process_cb_update(cb);
clnt = clp->cl_cb_client;
- if (!clnt) {
- /* Callback channel broken, or client killed; give up: */
+ if (!clnt || clp->cl_state == NFSD4_COURTESY) {
+ /*
+ * Callback channel broken, client killed or
+ * nfs4_client in courtesy state; give up.
+ */
nfsd41_destroy_cb(cb);
return;
}
@@ -1351,10 +1737,19 @@ nfsd4_run_cb_work(struct work_struct *work)
return;
}
+ if (!test_and_clear_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags)) {
+ if (cb->cb_ops && cb->cb_ops->prepare)
+ cb->cb_ops->prepare(cb);
+ }
+
cb->cb_msg.rpc_cred = clp->cl_cb_cred;
flags = clp->cl_minorversion ? RPC_TASK_NOCONNECT : RPC_TASK_SOFTCONN;
- rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
- cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
+ ret = rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
+ cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
+ if (ret != 0) {
+ set_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags);
+ nfsd4_queue_cb(cb);
+ }
}
void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
@@ -1364,19 +1759,30 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
cb->cb_msg.rpc_proc = &nfs4_cb_procedures[op];
cb->cb_msg.rpc_argp = cb;
cb->cb_msg.rpc_resp = cb;
+ cb->cb_flags = 0;
cb->cb_ops = ops;
INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
- cb->cb_seq_status = 1;
cb->cb_status = 0;
- cb->cb_need_restart = false;
- cb->cb_holds_slot = false;
+ cb->cb_held_slot = -1;
+ cb->cb_nr_referring_call_list = 0;
+ INIT_LIST_HEAD(&cb->cb_referring_call_list);
}
-void nfsd4_run_cb(struct nfsd4_callback *cb)
+/**
+ * nfsd4_run_cb - queue up a callback job to run
+ * @cb: callback to queue
+ *
+ * Kick off a callback to do its thing. Returns false if it was already
+ * on a queue, true otherwise.
+ */
+bool nfsd4_run_cb(struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
+ bool queued;
nfsd41_cb_inflight_begin(clp);
- if (!nfsd4_queue_cb(cb))
+ queued = nfsd4_queue_cb(cb);
+ if (!queued)
nfsd41_cb_inflight_end(clp);
+ return queued;
}
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index f92161ce1f97..8cca1329f348 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -41,6 +41,7 @@
#include "idmap.h"
#include "nfsd.h"
#include "netns.h"
+#include "vfs.h"
/*
* Turn off idmapping when using AUTH_SYS.
@@ -82,8 +83,8 @@ ent_init(struct cache_head *cnew, struct cache_head *citm)
new->id = itm->id;
new->type = itm->type;
- strlcpy(new->name, itm->name, sizeof(new->name));
- strlcpy(new->authname, itm->authname, sizeof(new->authname));
+ strscpy(new->name, itm->name, sizeof(new->name));
+ strscpy(new->authname, itm->authname, sizeof(new->authname));
}
static void
@@ -239,8 +240,8 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
goto out;
/* expiry */
- ent.h.expiry_time = get_expiry(&buf);
- if (ent.h.expiry_time == 0)
+ error = get_expiry(&buf, &ent.h.expiry_time);
+ if (error)
goto out;
error = -ENOMEM;
@@ -407,8 +408,8 @@ nametoid_parse(struct cache_detail *cd, char *buf, int buflen)
memcpy(ent.name, buf1, sizeof(ent.name));
/* expiry */
- ent.h.expiry_time = get_expiry(&buf);
- if (ent.h.expiry_time == 0)
+ error = get_expiry(&buf, &ent.h.expiry_time);
+ if (error)
goto out;
/* ID */
@@ -548,7 +549,7 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
return nfserr_badowner;
memcpy(key.name, name, namelen);
key.name[namelen] = '\0';
- strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
+ strscpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
ret = idmap_lookup(rqstp, nametoid_lookup, &key, nn->nametoid_cache, &item);
if (ret == -ENOENT)
return nfserr_badowner;
@@ -580,11 +581,12 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr,
.id = id,
.type = type,
};
+ __be32 status = nfs_ok;
__be32 *p;
int ret;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
+ strscpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item);
if (ret == -ENOENT)
return encode_ascii_id(xdr, id);
@@ -592,12 +594,16 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr,
return nfserrno(ret);
ret = strlen(item->name);
WARN_ON_ONCE(ret > IDMAP_NAMESZ);
+
p = xdr_reserve_space(xdr, ret + 4);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque(p, item->name, ret);
+ if (unlikely(!p)) {
+ status = nfserr_resource;
+ goto out_put;
+ }
+ xdr_encode_opaque(p, item->name, ret);
+out_put:
cache_put(&item->h, nn->idtoname_cache);
- return 0;
+ return status;
}
static bool
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 6d1b5bb051c5..683bd1130afe 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -25,7 +25,7 @@ static struct kmem_cache *nfs4_layout_cache;
static struct kmem_cache *nfs4_layout_stateid_cache;
static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
-static const struct lock_manager_operations nfsd4_layouts_lm_ops;
+static const struct lease_manager_operations nfsd4_layouts_lm_ops;
const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = {
#ifdef CONFIG_NFSD_FLEXFILELAYOUT
@@ -65,7 +65,7 @@ nfsd4_alloc_devid_map(const struct svc_fh *fhp)
return;
map->fsid_type = fh->fh_fsid_type;
- memcpy(&map->fsid, fh->fh_fsid, fsid_len);
+ memcpy(&map->fsid, fh_fsid(fh), fsid_len);
spin_lock(&nfsd_devid_lock);
if (fhp->fh_export->ex_devid_map)
@@ -75,7 +75,7 @@ nfsd4_alloc_devid_map(const struct svc_fh *fhp)
list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
if (old->fsid_type != fh->fh_fsid_type)
continue;
- if (memcmp(old->fsid, fh->fh_fsid,
+ if (memcmp(old->fsid, fh_fsid(fh),
key_len(old->fsid_type)))
continue;
@@ -120,7 +120,6 @@ nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
id->generation = device_generation;
- id->pad = 0;
return 0;
}
@@ -152,6 +151,23 @@ void nfsd4_setup_layout_type(struct svc_export *exp)
#endif
}
+void nfsd4_close_layout(struct nfs4_layout_stateid *ls)
+{
+ struct nfsd_file *fl;
+
+ spin_lock(&ls->ls_stid.sc_file->fi_lock);
+ fl = ls->ls_file;
+ ls->ls_file = NULL;
+ spin_unlock(&ls->ls_stid.sc_file->fi_lock);
+
+ if (fl) {
+ if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
+ kernel_setlease(fl->nf_file, F_UNLCK, NULL,
+ (void **)&ls);
+ nfsd_file_put(fl);
+ }
+}
+
static void
nfsd4_free_layout_stateid(struct nfs4_stid *stid)
{
@@ -169,9 +185,7 @@ nfsd4_free_layout_stateid(struct nfs4_stid *stid)
list_del_init(&ls->ls_perfile);
spin_unlock(&fp->fi_lock);
- if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
- vfs_setlease(ls->ls_file->nf_file, F_UNLCK, NULL, (void **)&ls);
- nfsd_file_put(ls->ls_file);
+ nfsd4_close_layout(ls);
if (ls->ls_recalled)
atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
@@ -182,27 +196,26 @@ nfsd4_free_layout_stateid(struct nfs4_stid *stid)
static int
nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
{
- struct file_lock *fl;
+ struct file_lease *fl;
int status;
if (nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
return 0;
- fl = locks_alloc_lock();
+ fl = locks_alloc_lease();
if (!fl)
return -ENOMEM;
- locks_init_lock(fl);
+ locks_init_lease(fl);
fl->fl_lmops = &nfsd4_layouts_lm_ops;
- fl->fl_flags = FL_LAYOUT;
- fl->fl_type = F_RDLCK;
- fl->fl_end = OFFSET_MAX;
- fl->fl_owner = ls;
- fl->fl_pid = current->tgid;
- fl->fl_file = ls->ls_file->nf_file;
-
- status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
+ fl->c.flc_flags = FL_LAYOUT;
+ fl->c.flc_type = F_RDLCK;
+ fl->c.flc_owner = ls;
+ fl->c.flc_pid = current->tgid;
+ fl->c.flc_file = ls->ls_file->nf_file;
+
+ status = kernel_setlease(fl->c.flc_file, fl->c.flc_type, &fl, NULL);
if (status) {
- locks_free_lock(fl);
+ locks_free_lease(fl);
return status;
}
BUG_ON(fl != NULL);
@@ -236,7 +249,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
NFSPROC4_CLNT_CB_LAYOUT);
- if (parent->sc_type == NFS4_DELEG_STID)
+ if (parent->sc_type == SC_TYPE_DELEG)
ls->ls_file = nfsd_file_get(fp->fi_deleg_file);
else
ls->ls_file = find_any_file(fp);
@@ -250,7 +263,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
}
spin_lock(&clp->cl_lock);
- stp->sc_type = NFS4_LAYOUT_STID;
+ stp->sc_type = SC_TYPE_LAYOUT;
list_add(&ls->ls_perclnt, &clp->cl_lo_states);
spin_unlock(&clp->cl_lock);
@@ -269,13 +282,13 @@ nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
{
struct nfs4_layout_stateid *ls;
struct nfs4_stid *stid;
- unsigned char typemask = NFS4_LAYOUT_STID;
+ unsigned short typemask = SC_TYPE_LAYOUT;
__be32 status;
if (create)
- typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
+ typemask |= (SC_TYPE_OPEN | SC_TYPE_LOCK | SC_TYPE_DELEG);
- status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
+ status = nfsd4_lookup_stateid(cstate, stateid, typemask, 0, &stid,
net_generic(SVC_NET(rqstp), nfsd_net_id));
if (status)
goto out;
@@ -286,7 +299,7 @@ nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
goto out_put_stid;
}
- if (stid->sc_type != NFS4_LAYOUT_STID) {
+ if (stid->sc_type != SC_TYPE_LAYOUT) {
ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
nfs4_put_stid(stid);
@@ -323,16 +336,17 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
if (ls->ls_recalled)
goto out_unlock;
- ls->ls_recalled = true;
- atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
if (list_empty(&ls->ls_layouts))
goto out_unlock;
+ ls->ls_recalled = true;
+ atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
- refcount_inc(&ls->ls_stid.sc_count);
- nfsd4_run_cb(&ls->ls_recall);
-
+ if (!test_and_set_bit(NFSD4_CALLBACK_RUNNING, &ls->ls_recall.cb_flags)) {
+ refcount_inc(&ls->ls_stid.sc_count);
+ nfsd4_run_cb(&ls->ls_recall);
+ }
out_unlock:
spin_unlock(&ls->ls_lock);
}
@@ -422,7 +436,7 @@ nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
if (!new)
return nfserr_jukebox;
- memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg));
+ memcpy(&new->lo_seg, seg, sizeof(new->lo_seg));
new->lo_state = ls;
spin_lock(&fp->fi_lock);
@@ -515,11 +529,11 @@ nfsd4_return_file_layouts(struct svc_rqst *rqstp,
if (!list_empty(&ls->ls_layouts)) {
if (found)
nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
- lrp->lrs_present = 1;
+ lrp->lrs_present = true;
} else {
trace_nfsd_layoutstate_unhash(&ls->ls_stid.sc_stateid);
- nfs4_unhash_stid(&ls->ls_stid);
- lrp->lrs_present = 0;
+ ls->ls_stid.sc_status |= SC_STATUS_CLOSED;
+ lrp->lrs_present = false;
}
spin_unlock(&ls->ls_lock);
@@ -539,7 +553,7 @@ nfsd4_return_client_layouts(struct svc_rqst *rqstp,
struct nfs4_layout *lp, *t;
LIST_HEAD(reaplist);
- lrp->lrs_present = 0;
+ lrp->lrs_present = false;
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
@@ -605,7 +619,7 @@ nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
}
static void
-nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
+nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls, struct nfsd_file *file)
{
struct nfs4_client *clp = ls->ls_stid.sc_client;
char addr_str[INET6_ADDRSTRLEN];
@@ -627,7 +641,7 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
argv[0] = (char *)nfsd_recall_failed;
argv[1] = addr_str;
- argv[2] = ls->ls_file->nf_file->f_path.mnt->mnt_sb->s_id;
+ argv[2] = file->nf_file->f_path.mnt->mnt_sb->s_id;
argv[3] = NULL;
error = call_usermodehelper(nfsd_recall_failed, argv, envp,
@@ -657,8 +671,9 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
struct nfsd_net *nn;
ktime_t now, cutoff;
const struct nfsd4_layout_ops *ops;
+ struct nfsd_file *fl;
-
+ trace_nfsd_cb_layout_done(&ls->ls_stid.sc_stateid, task);
switch (task->tk_status) {
case 0:
case -NFS4ERR_DELAY:
@@ -688,12 +703,17 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
* Unknown error or non-responding client, we'll need to fence.
*/
trace_nfsd_layout_recall_fail(&ls->ls_stid.sc_stateid);
-
- ops = nfsd4_layout_ops[ls->ls_layout_type];
- if (ops->fence_client)
- ops->fence_client(ls);
- else
- nfsd4_cb_layout_fail(ls);
+ rcu_read_lock();
+ fl = nfsd_file_get(ls->ls_file);
+ rcu_read_unlock();
+ if (fl) {
+ ops = nfsd4_layout_ops[ls->ls_layout_type];
+ if (ops->fence_client)
+ ops->fence_client(ls, fl);
+ else
+ nfsd4_cb_layout_fail(ls, fl);
+ nfsd_file_put(fl);
+ }
return 1;
case -NFS4ERR_NOMATCHING_LAYOUT:
trace_nfsd_layout_recall_done(&ls->ls_stid.sc_stateid);
@@ -720,10 +740,11 @@ static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
.prepare = nfsd4_cb_layout_prepare,
.done = nfsd4_cb_layout_done,
.release = nfsd4_cb_layout_release,
+ .opcode = OP_CB_LAYOUTRECALL,
};
static bool
-nfsd4_layout_lm_break(struct file_lock *fl)
+nfsd4_layout_lm_break(struct file_lease *fl)
{
/*
* We don't want the locks code to timeout the lease for us;
@@ -731,19 +752,19 @@ nfsd4_layout_lm_break(struct file_lock *fl)
* in time:
*/
fl->fl_break_time = 0;
- nfsd4_recall_file_layout(fl->fl_owner);
+ nfsd4_recall_file_layout(fl->c.flc_owner);
return false;
}
static int
-nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
+nfsd4_layout_lm_change(struct file_lease *onlist, int arg,
struct list_head *dispose)
{
BUG_ON(!(arg & F_UNLCK));
return lease_modify(onlist, arg, dispose);
}
-static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
+static const struct lease_manager_operations nfsd4_layouts_lm_ops = {
.lm_break = nfsd4_layout_lm_break,
.lm_change = nfsd4_layout_lm_change,
};
@@ -756,13 +777,11 @@ nfsd4_init_pnfs(void)
for (i = 0; i < DEVID_HASH_SIZE; i++)
INIT_LIST_HEAD(&nfsd_devid_hash[i]);
- nfs4_layout_cache = kmem_cache_create("nfs4_layout",
- sizeof(struct nfs4_layout), 0, 0, NULL);
+ nfs4_layout_cache = KMEM_CACHE(nfs4_layout, 0);
if (!nfs4_layout_cache)
return -ENOMEM;
- nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
- sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
+ nfs4_layout_stateid_cache = KMEM_CACHE(nfs4_layout_stateid, 0);
if (!nfs4_layout_stateid_cache) {
kmem_cache_destroy(nfs4_layout_cache);
return -ENOMEM;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index a36261f89bdf..b74800917583 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -37,6 +37,8 @@
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/kthread.h>
+#include <linux/namei.h>
+
#include <linux/sunrpc/addr.h>
#include <linux/nfs_ssc.h>
@@ -55,6 +57,8 @@ module_param(inter_copy_offload_enable, bool, 0644);
MODULE_PARM_DESC(inter_copy_offload_enable,
"Enable inter server to server copy offload. Default: false");
+static void cleanup_async_copy(struct nfsd4_copy *copy);
+
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
static int nfsd4_ssc_umount_timeout = 900000; /* default to 15 mins */
module_param(nfsd4_ssc_umount_timeout, int, 0644);
@@ -62,36 +66,6 @@ MODULE_PARM_DESC(nfsd4_ssc_umount_timeout,
"idle msecs before unmount export from source server");
#endif
-#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
-#include <linux/security.h>
-
-static inline void
-nfsd4_security_inode_setsecctx(struct svc_fh *resfh, struct xdr_netobj *label, u32 *bmval)
-{
- struct inode *inode = d_inode(resfh->fh_dentry);
- int status;
-
- inode_lock(inode);
- status = security_inode_setsecctx(resfh->fh_dentry,
- label->data, label->len);
- inode_unlock(inode);
-
- if (status)
- /*
- * XXX: We should really fail the whole open, but we may
- * already have created a new file, so it may be too
- * late. For now this seems the least of evils:
- */
- bmval[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
-
- return;
-}
-#else
-static inline void
-nfsd4_security_inode_setsecctx(struct svc_fh *resfh, struct xdr_netobj *label, u32 *bmval)
-{ }
-#endif
-
#define NFSDDBG_FACILITY NFSDDBG_PROC
static u32 nfsd_attrmask[] = {
@@ -156,26 +130,6 @@ is_create_with_attrs(struct nfsd4_open *open)
|| open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1);
}
-/*
- * if error occurs when setting the acl, just clear the acl bit
- * in the returned attr bitmap.
- */
-static void
-do_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct nfs4_acl *acl, u32 *bmval)
-{
- __be32 status;
-
- status = nfsd4_set_nfs4_acl(rqstp, fhp, acl);
- if (status)
- /*
- * We should probably fail the whole open at this point,
- * but we've already created the file, so it's too late;
- * So this seems the least of evils:
- */
- bmval[0] &= ~FATTR4_WORD0_ACL;
-}
-
static inline void
fh_dup2(struct svc_fh *dst, struct svc_fh *src)
{
@@ -189,7 +143,6 @@ fh_dup2(struct svc_fh *dst, struct svc_fh *src)
static __be32
do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open, int accmode)
{
- __be32 status;
if (open->op_truncate &&
!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
@@ -204,12 +157,10 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
if (open->op_share_deny & NFS4_SHARE_DENY_READ)
accmode |= NFSD_MAY_WRITE;
- status = fh_verify(rqstp, current_fh, S_IFREG, accmode);
-
- return status;
+ return fh_verify(rqstp, current_fh, S_IFREG, accmode);
}
-static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
+static __be32 nfsd_check_obj_isreg(struct svc_fh *fh, u32 minor_version)
{
umode_t mode = d_inode(fh->fh_dentry)->i_mode;
@@ -217,14 +168,15 @@ static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
return nfs_ok;
if (S_ISDIR(mode))
return nfserr_isdir;
- /*
- * Using err_symlink as our catch-all case may look odd; but
- * there's no other obvious error for this case in 4.0, and we
- * happen to know that it will cause the linux v4 client to do
- * the right thing on attempts to open something other than a
- * regular file.
- */
- return nfserr_symlink;
+ if (S_ISLNK(mode))
+ return nfserr_symlink;
+
+ /* RFC 7530 - 16.16.6 */
+ if (minor_version == 0)
+ return nfserr_symlink;
+ else
+ return nfserr_wrong_type;
+
}
static void nfsd4_set_open_owner_reply_cache(struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh *resfh)
@@ -235,6 +187,234 @@ static void nfsd4_set_open_owner_reply_cache(struct nfsd4_compound_state *cstate
&resfh->fh_handle);
}
+static inline bool nfsd4_create_is_exclusive(int createmode)
+{
+ return createmode == NFS4_CREATE_EXCLUSIVE ||
+ createmode == NFS4_CREATE_EXCLUSIVE4_1;
+}
+
+static __be32
+nfsd4_vfs_create(struct svc_fh *fhp, struct dentry *child,
+ struct nfsd4_open *open)
+{
+ struct file *filp;
+ struct path path;
+ int oflags;
+
+ oflags = O_CREAT | O_LARGEFILE;
+ switch (open->op_share_access & NFS4_SHARE_ACCESS_BOTH) {
+ case NFS4_SHARE_ACCESS_WRITE:
+ oflags |= O_WRONLY;
+ break;
+ case NFS4_SHARE_ACCESS_BOTH:
+ oflags |= O_RDWR;
+ break;
+ default:
+ oflags |= O_RDONLY;
+ }
+
+ path.mnt = fhp->fh_export->ex_path.mnt;
+ path.dentry = child;
+ filp = dentry_create(&path, oflags, open->op_iattr.ia_mode,
+ current_cred());
+ if (IS_ERR(filp))
+ return nfserrno(PTR_ERR(filp));
+
+ open->op_filp = filp;
+ return nfs_ok;
+}
+
+/*
+ * Implement NFSv4's unchecked, guarded, and exclusive create
+ * semantics for regular files. Open state for this new file is
+ * subsequently fabricated in nfsd4_process_open2().
+ *
+ * Upon return, caller must release @fhp and @resfhp.
+ */
+static __be32
+nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct svc_fh *resfhp, struct nfsd4_open *open)
+{
+ struct iattr *iap = &open->op_iattr;
+ struct nfsd_attrs attrs = {
+ .na_iattr = iap,
+ .na_seclabel = &open->op_label,
+ };
+ struct dentry *parent, *child;
+ __u32 v_mtime, v_atime;
+ struct inode *inode;
+ __be32 status;
+ int host_err;
+
+ if (isdotent(open->op_fname, open->op_fnamelen))
+ return nfserr_exist;
+ if (!(iap->ia_valid & ATTR_MODE))
+ iap->ia_mode = 0;
+
+ status = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
+ if (status != nfs_ok)
+ return status;
+ parent = fhp->fh_dentry;
+ inode = d_inode(parent);
+
+ host_err = fh_want_write(fhp);
+ if (host_err)
+ return nfserrno(host_err);
+
+ if (is_create_with_attrs(open))
+ nfsd4_acl_to_attr(NF4REG, open->op_acl, &attrs);
+
+ child = start_creating(&nop_mnt_idmap, parent,
+ &QSTR_LEN(open->op_fname, open->op_fnamelen));
+ if (IS_ERR(child)) {
+ status = nfserrno(PTR_ERR(child));
+ goto out_write;
+ }
+
+ if (d_really_is_negative(child)) {
+ status = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
+ if (status != nfs_ok)
+ goto out;
+ }
+
+ status = fh_compose(resfhp, fhp->fh_export, child, fhp);
+ if (status != nfs_ok)
+ goto out;
+
+ v_mtime = 0;
+ v_atime = 0;
+ if (nfsd4_create_is_exclusive(open->op_createmode)) {
+ u32 *verifier = (u32 *)open->op_verf.data;
+
+ /*
+ * Solaris 7 gets confused (bugid 4218508) if these have
+ * the high bit set, as do xfs filesystems without the
+ * "bigtime" feature. So just clear the high bits. If this
+ * is ever changed to use different attrs for storing the
+ * verifier, then do_open_lookup() will also need to be
+ * fixed accordingly.
+ */
+ v_mtime = verifier[0] & 0x7fffffff;
+ v_atime = verifier[1] & 0x7fffffff;
+ }
+
+ if (d_really_is_positive(child)) {
+ /* NFSv4 protocol requires change attributes even though
+ * no change happened.
+ */
+ status = fh_fill_both_attrs(fhp);
+ if (status != nfs_ok)
+ goto out;
+
+ switch (open->op_createmode) {
+ case NFS4_CREATE_UNCHECKED:
+ if (!d_is_reg(child))
+ break;
+
+ /*
+ * In NFSv4, we don't want to truncate the file
+ * now. This would be wrong if the OPEN fails for
+ * some other reason. Furthermore, if the size is
+ * nonzero, we should ignore it according to spec!
+ */
+ open->op_truncate = (iap->ia_valid & ATTR_SIZE) &&
+ !iap->ia_size;
+ break;
+ case NFS4_CREATE_GUARDED:
+ status = nfserr_exist;
+ break;
+ case NFS4_CREATE_EXCLUSIVE:
+ if (inode_get_mtime_sec(d_inode(child)) == v_mtime &&
+ inode_get_atime_sec(d_inode(child)) == v_atime &&
+ d_inode(child)->i_size == 0) {
+ open->op_created = true;
+ break; /* subtle */
+ }
+ status = nfserr_exist;
+ break;
+ case NFS4_CREATE_EXCLUSIVE4_1:
+ if (inode_get_mtime_sec(d_inode(child)) == v_mtime &&
+ inode_get_atime_sec(d_inode(child)) == v_atime &&
+ d_inode(child)->i_size == 0) {
+ open->op_created = true;
+ goto set_attr; /* subtle */
+ }
+ status = nfserr_exist;
+ }
+ goto out;
+ }
+
+ if (!IS_POSIXACL(inode))
+ iap->ia_mode &= ~current_umask();
+
+ status = fh_fill_pre_attrs(fhp);
+ if (status != nfs_ok)
+ goto out;
+ status = nfsd4_vfs_create(fhp, child, open);
+ if (status != nfs_ok)
+ goto out;
+ open->op_created = true;
+ fh_fill_post_attrs(fhp);
+
+ /* A newly created file already has a file size of zero. */
+ if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0))
+ iap->ia_valid &= ~ATTR_SIZE;
+ if (nfsd4_create_is_exclusive(open->op_createmode)) {
+ iap->ia_valid = ATTR_MTIME | ATTR_ATIME |
+ ATTR_MTIME_SET|ATTR_ATIME_SET;
+ iap->ia_mtime.tv_sec = v_mtime;
+ iap->ia_atime.tv_sec = v_atime;
+ iap->ia_mtime.tv_nsec = 0;
+ iap->ia_atime.tv_nsec = 0;
+ }
+
+set_attr:
+ status = nfsd_create_setattr(rqstp, fhp, resfhp, &attrs);
+
+ if (attrs.na_labelerr)
+ open->op_bmval[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ if (attrs.na_aclerr)
+ open->op_bmval[0] &= ~FATTR4_WORD0_ACL;
+out:
+ end_creating(child);
+ nfsd_attrs_free(&attrs);
+out_write:
+ fh_drop_write(fhp);
+ return status;
+}
+
+/**
+ * set_change_info - set up the change_info4 for a reply
+ * @cinfo: pointer to nfsd4_change_info to be populated
+ * @fhp: pointer to svc_fh to use as source
+ *
+ * Many operations in NFSv4 require change_info4 in the reply. This function
+ * populates that from the info that we (should!) have already collected. In
+ * the event that we didn't get any pre-attrs, just zero out both.
+ */
+static void
+set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
+{
+ cinfo->atomic = (u32)(fhp->fh_pre_saved && fhp->fh_post_saved && !fhp->fh_no_atomic_attr);
+ cinfo->before_change = fhp->fh_pre_change;
+ cinfo->after_change = fhp->fh_post_change;
+
+ /*
+ * If fetching the pre-change attributes failed, then we should
+ * have already failed the whole operation. We could have still
+ * failed to fetch post-change attributes however.
+ *
+ * If we didn't get post-op attrs, just zero-out the after
+ * field since we don't know what it should be. If the pre_saved
+ * field isn't set for some reason, throw warning and just copy
+ * whatever is in the after field.
+ */
+ if (WARN_ON_ONCE(!fhp->fh_pre_saved))
+ cinfo->before_change = 0;
+ if (!fhp->fh_post_saved)
+ cinfo->after_change = cinfo->before_change + 1;
+}
+
static __be32
do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh **resfh)
{
@@ -264,47 +444,33 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
* yes | yes | GUARDED4 | GUARDED4
*/
- /*
- * Note: create modes (UNCHECKED,GUARDED...) are the same
- * in NFSv4 as in v3 except EXCLUSIVE4_1.
- */
current->fs->umask = open->op_umask;
- status = do_nfsd_create(rqstp, current_fh, open->op_fname,
- open->op_fnamelen, &open->op_iattr,
- *resfh, open->op_createmode,
- (u32 *)open->op_verf.data,
- &open->op_truncate, &open->op_created);
+ status = nfsd4_create_file(rqstp, current_fh, *resfh, open);
current->fs->umask = 0;
- if (!status && open->op_label.len)
- nfsd4_security_inode_setsecctx(*resfh, &open->op_label, open->op_bmval);
-
/*
* Following rfc 3530 14.2.16, and rfc 5661 18.16.4
* use the returned bitmask to indicate which attributes
* we used to store the verifier:
*/
- if (nfsd_create_is_exclusive(open->op_createmode) && status == 0)
+ if (nfsd4_create_is_exclusive(open->op_createmode) && status == 0)
open->op_bmval[1] |= (FATTR4_WORD1_TIME_ACCESS |
FATTR4_WORD1_TIME_MODIFY);
- } else
- /*
- * Note this may exit with the parent still locked.
- * We will hold the lock until nfsd4_open's final
- * lookup, to prevent renames or unlinks until we've had
- * a chance to an acquire a delegation if appropriate.
- */
+ } else {
status = nfsd_lookup(rqstp, current_fh,
open->op_fname, open->op_fnamelen, *resfh);
+ if (status == nfs_ok)
+ /* NFSv4 protocol requires change attributes even though
+ * no change happened.
+ */
+ status = fh_fill_both_attrs(current_fh);
+ }
if (status)
goto out;
- status = nfsd_check_obj_isreg(*resfh);
+ status = nfsd_check_obj_isreg(*resfh, cstate->minorversion);
if (status)
goto out;
- if (is_create_with_attrs(open) && open->op_acl != NULL)
- do_set_nfs4_acl(rqstp, *resfh, open->op_acl, open->op_bmval);
-
nfsd4_set_open_owner_reply_cache(cstate, open, *resfh);
accmode = NFSD_MAY_NOP;
if (open->op_created ||
@@ -320,7 +486,6 @@ static __be32
do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
{
struct svc_fh *current_fh = &cstate->current_fh;
- __be32 status;
int accmode = 0;
/* We don't know the target directory, and therefore can not
@@ -345,9 +510,7 @@ do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, str
if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
accmode = NFSD_MAY_OWNER_OVERRIDE;
- status = do_open_permission(rqstp, current_fh, open, accmode);
-
- return status;
+ return do_open_permission(rqstp, current_fh, open, accmode);
}
static void
@@ -375,6 +538,9 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
(int)open->op_fnamelen, open->op_fname,
open->op_openowner);
+ open->op_filp = NULL;
+ open->op_rqstp = rqstp;
+
/* This check required by spec. */
if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
return nfserr_inval;
@@ -427,50 +593,46 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
switch (open->op_claim_type) {
- case NFS4_OPEN_CLAIM_DELEGATE_CUR:
- case NFS4_OPEN_CLAIM_NULL:
- status = do_open_lookup(rqstp, cstate, open, &resfh);
- if (status)
- goto out;
- break;
- case NFS4_OPEN_CLAIM_PREVIOUS:
- status = nfs4_check_open_reclaim(cstate->clp);
- if (status)
- goto out;
- open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
- reclaim = true;
- fallthrough;
- case NFS4_OPEN_CLAIM_FH:
- case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
- status = do_open_fhandle(rqstp, cstate, open);
- if (status)
- goto out;
- resfh = &cstate->current_fh;
- break;
- case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
- case NFS4_OPEN_CLAIM_DELEGATE_PREV:
- dprintk("NFSD: unsupported OPEN claim type %d\n",
- open->op_claim_type);
- status = nfserr_notsupp;
+ case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+ case NFS4_OPEN_CLAIM_NULL:
+ status = do_open_lookup(rqstp, cstate, open, &resfh);
+ if (status)
goto out;
- default:
- dprintk("NFSD: Invalid OPEN claim type %d\n",
- open->op_claim_type);
- status = nfserr_inval;
+ break;
+ case NFS4_OPEN_CLAIM_PREVIOUS:
+ status = nfs4_check_open_reclaim(cstate->clp);
+ if (status)
+ goto out;
+ open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
+ reclaim = true;
+ fallthrough;
+ case NFS4_OPEN_CLAIM_FH:
+ case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
+ status = do_open_fhandle(rqstp, cstate, open);
+ if (status)
goto out;
+ resfh = &cstate->current_fh;
+ break;
+ case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
+ case NFS4_OPEN_CLAIM_DELEGATE_PREV:
+ status = nfserr_notsupp;
+ goto out;
+ default:
+ status = nfserr_inval;
+ goto out;
}
- /*
- * nfsd4_process_open2() does the actual opening of the file. If
- * successful, it (1) truncates the file if open->op_truncate was
- * set, (2) sets open->op_stateid, (3) sets open->op_delegation.
- */
+
status = nfsd4_process_open2(rqstp, resfh, open);
- WARN(status && open->op_created,
- "nfsd4_process_open2 failed to open newly-created file! status=%u\n",
- be32_to_cpu(status));
+ if (status && open->op_created)
+ pr_warn("nfsd4_process_open2 failed to open newly-created file: status=%u\n",
+ be32_to_cpu(status));
if (reclaim && !status)
nn->somebody_reclaimed = true;
out:
+ if (open->op_filp) {
+ fput(open->op_filp);
+ open->op_filp = NULL;
+ }
if (resfh && resfh != &cstate->current_fh) {
fh_dup2(&cstate->current_fh, resfh);
fh_put(resfh);
@@ -535,11 +697,9 @@ static __be32
nfsd4_putrootfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
- __be32 status;
-
fh_put(&cstate->current_fh);
- status = exp_pseudoroot(rqstp, &cstate->current_fh);
- return status;
+
+ return exp_pseudoroot(rqstp, &cstate->current_fh);
}
static __be32
@@ -592,24 +752,24 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&access->ac_supported);
}
-static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
-{
- __be32 *verf = (__be32 *)verifier->data;
-
- BUILD_BUG_ON(2*sizeof(*verf) != sizeof(verifier->data));
-
- nfsd_copy_boot_verifier(verf, net_generic(net, nfsd_net_id));
-}
-
static __be32
nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_commit *commit = &u->commit;
+ struct nfsd_file *nf;
+ __be32 status;
- return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
+ status = nfsd_file_acquire(rqstp, &cstate->current_fh, NFSD_MAY_WRITE |
+ NFSD_MAY_NOT_BREAK_LEASE, &nf);
+ if (status != nfs_ok)
+ return status;
+
+ status = nfsd_commit(rqstp, &cstate->current_fh, nf, commit->co_offset,
commit->co_count,
(__be32 *)commit->co_verf.data);
+ nfsd_file_put(nf);
+ return status;
}
static __be32
@@ -617,6 +777,10 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_create *create = &u->create;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &create->cr_iattr,
+ .na_seclabel = &create->cr_label,
+ };
struct svc_fh resfh;
__be32 status;
dev_t rdev;
@@ -632,12 +796,13 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
return status;
+ status = nfsd4_acl_to_attr(create->cr_type, create->cr_acl, &attrs);
current->fs->umask = create->cr_umask;
switch (create->cr_type) {
case NF4LNK:
status = nfsd_symlink(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
- create->cr_data, &resfh);
+ create->cr_data, &attrs, &resfh);
break;
case NF4BLK:
@@ -648,7 +813,7 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out_umask;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
- &create->cr_iattr, S_IFBLK, rdev, &resfh);
+ &attrs, S_IFBLK, rdev, &resfh);
break;
case NF4CHR:
@@ -659,26 +824,26 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out_umask;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
- &create->cr_iattr,S_IFCHR, rdev, &resfh);
+ &attrs, S_IFCHR, rdev, &resfh);
break;
case NF4SOCK:
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
- &create->cr_iattr, S_IFSOCK, 0, &resfh);
+ &attrs, S_IFSOCK, 0, &resfh);
break;
case NF4FIFO:
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
- &create->cr_iattr, S_IFIFO, 0, &resfh);
+ &attrs, S_IFIFO, 0, &resfh);
break;
case NF4DIR:
create->cr_iattr.ia_valid &= ~ATTR_SIZE;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
- &create->cr_iattr, S_IFDIR, 0, &resfh);
+ &attrs, S_IFDIR, 0, &resfh);
break;
default:
@@ -688,20 +853,17 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
- if (create->cr_label.len)
- nfsd4_security_inode_setsecctx(&resfh, &create->cr_label, create->cr_bmval);
-
- if (create->cr_acl != NULL)
- do_set_nfs4_acl(rqstp, &resfh, create->cr_acl,
- create->cr_bmval);
-
- fh_unlock(&cstate->current_fh);
+ if (attrs.na_labelerr)
+ create->cr_bmval[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ if (attrs.na_aclerr)
+ create->cr_bmval[0] &= ~FATTR4_WORD0_ACL;
set_change_info(&create->cr_cinfo, &cstate->current_fh);
fh_dup2(&cstate->current_fh, &resfh);
out:
fh_put(&resfh);
out_umask:
current->fs->umask = 0;
+ nfsd_attrs_free(&attrs);
return status;
}
@@ -712,6 +874,8 @@ nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_getattr *getattr = &u->getattr;
__be32 status;
+ trace_nfsd_vfs_getattr(rqstp, &cstate->current_fh);
+
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
if (status)
return status;
@@ -782,12 +946,16 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
read->rd_nf = NULL;
- if (read->rd_offset >= OFFSET_MAX)
- return nfserr_inval;
trace_nfsd_read_start(rqstp, &cstate->current_fh,
read->rd_offset, read->rd_length);
+ read->rd_length = min_t(u32, read->rd_length, svc_max_payload(rqstp));
+ if (read->rd_offset > (u64)OFFSET_MAX)
+ read->rd_offset = (u64)OFFSET_MAX;
+ if (read->rd_offset + read->rd_length > (u64)OFFSET_MAX)
+ read->rd_length = (u64)OFFSET_MAX - read->rd_offset;
+
/*
* If we do a zero copy read, then a client will see read data
* that reflects the state of the file *after* performing the
@@ -796,19 +964,17 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* To ensure proper ordering, we therefore turn off zero copy if
* the client wants us to do more in this compound:
*/
- if (!nfsd4_last_compound_op(rqstp))
- clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+ if (!nfsd4_last_compound_op(rqstp)) {
+ struct nfsd4_compoundargs *argp = rqstp->rq_argp;
+
+ argp->splice_ok = false;
+ }
/* check stateid */
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&read->rd_stateid, RD_STATE,
&read->rd_nf, NULL);
- if (status) {
- dprintk("NFSD: nfsd4_read: couldn't process stateid!\n");
- goto out;
- }
- status = nfs_ok;
-out:
+
read->rd_rqstp = rqstp;
read->rd_fhp = &cstate->current_fh;
return status;
@@ -818,10 +984,11 @@ out:
static void
nfsd4_read_release(union nfsd4_op_u *u)
{
- if (u->read.rd_nf)
+ if (u->read.rd_nf) {
+ trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp,
+ u->read.rd_offset, u->read.rd_length);
nfsd_file_put(u->read.rd_nf);
- trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp,
- u->read.rd_offset, u->read.rd_length);
+ }
}
static __be32
@@ -832,6 +999,9 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
u64 cookie = readdir->rd_cookie;
static const nfs4_verifier zeroverf;
+ trace_nfsd_vfs_readdir(rqstp, &cstate->current_fh,
+ readdir->rd_maxcount, readdir->rd_cookie);
+
/* no need to check permission - this will be done in nfsd_readdir() */
if (readdir->rd_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
@@ -870,10 +1040,8 @@ nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_grace;
status = nfsd_unlink(rqstp, &cstate->current_fh, 0,
remove->rm_name, remove->rm_namelen);
- if (!status) {
- fh_unlock(&cstate->current_fh);
+ if (!status)
set_change_info(&remove->rm_cinfo, &cstate->current_fh);
- }
return status;
}
@@ -891,8 +1059,8 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
rename->rn_tname, rename->rn_tnamelen);
if (status)
return status;
- set_change_info(&rename->rn_sinfo, &cstate->current_fh);
- set_change_info(&rename->rn_tinfo, &cstate->save_fh);
+ set_change_info(&rename->rn_sinfo, &cstate->save_fh);
+ set_change_info(&rename->rn_tinfo, &cstate->current_fh);
return nfs_ok;
}
@@ -913,7 +1081,6 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&exp, &dentry);
if (err)
return err;
- fh_unlock(&cstate->current_fh);
if (d_really_is_negative(dentry)) {
exp_put(exp);
err = nfserr_noent;
@@ -963,23 +1130,83 @@ nfsd4_secinfo_no_name_release(union nfsd4_op_u *u)
exp_put(u->secinfo_no_name.sin_exp);
}
+/*
+ * Validate that the requested timestamps are within the acceptable range. If
+ * timestamp appears to be in the future, then it will be clamped to
+ * current_time().
+ */
+static void
+vet_deleg_attrs(struct nfsd4_setattr *setattr, struct nfs4_delegation *dp)
+{
+ struct timespec64 now = current_time(dp->dl_stid.sc_file->fi_inode);
+ struct iattr *iattr = &setattr->sa_iattr;
+
+ if ((setattr->sa_bmval[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) &&
+ !nfsd4_vet_deleg_time(&iattr->ia_atime, &dp->dl_atime, &now))
+ iattr->ia_valid &= ~(ATTR_ATIME | ATTR_ATIME_SET);
+
+ if (setattr->sa_bmval[2] & FATTR4_WORD2_TIME_DELEG_MODIFY) {
+ if (nfsd4_vet_deleg_time(&iattr->ia_mtime, &dp->dl_mtime, &now)) {
+ iattr->ia_ctime = iattr->ia_mtime;
+ if (nfsd4_vet_deleg_time(&iattr->ia_ctime, &dp->dl_ctime, &now))
+ dp->dl_setattr = true;
+ else
+ iattr->ia_valid &= ~(ATTR_CTIME | ATTR_CTIME_SET);
+ } else {
+ iattr->ia_valid &= ~(ATTR_CTIME | ATTR_CTIME_SET |
+ ATTR_MTIME | ATTR_MTIME_SET);
+ }
+ }
+}
+
static __be32
nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_setattr *setattr = &u->setattr;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &setattr->sa_iattr,
+ .na_seclabel = &setattr->sa_label,
+ };
+ bool save_no_wcc, deleg_attrs;
+ struct nfs4_stid *st = NULL;
+ struct inode *inode;
__be32 status = nfs_ok;
int err;
- if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
+ deleg_attrs = setattr->sa_bmval[2] & (FATTR4_WORD2_TIME_DELEG_ACCESS |
+ FATTR4_WORD2_TIME_DELEG_MODIFY);
+
+ if (deleg_attrs || (setattr->sa_iattr.ia_valid & ATTR_SIZE)) {
+ int flags = WR_STATE;
+
+ if (setattr->sa_bmval[2] & FATTR4_WORD2_TIME_DELEG_ACCESS)
+ flags |= RD_STATE;
+
status = nfs4_preprocess_stateid_op(rqstp, cstate,
&cstate->current_fh, &setattr->sa_stateid,
- WR_STATE, NULL, NULL);
- if (status) {
- dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n");
+ flags, NULL, &st);
+ if (status)
return status;
+ }
+
+ if (deleg_attrs) {
+ status = nfserr_bad_stateid;
+ if (st->sc_type & SC_TYPE_DELEG) {
+ struct nfs4_delegation *dp = delegstateid(st);
+
+ /* Only for *_ATTRS_DELEG flavors */
+ if (deleg_attrs_deleg(dp->dl_type)) {
+ vet_deleg_attrs(setattr, dp);
+ status = nfs_ok;
+ }
}
}
+ if (st)
+ nfs4_put_stid(st);
+ if (status)
+ return status;
+
err = fh_want_write(&cstate->current_fh);
if (err)
return nfserrno(err);
@@ -990,55 +1217,71 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
- if (setattr->sa_acl != NULL)
- status = nfsd4_set_nfs4_acl(rqstp, &cstate->current_fh,
- setattr->sa_acl);
- if (status)
- goto out;
- if (setattr->sa_label.len)
- status = nfsd4_set_nfs4_label(rqstp, &cstate->current_fh,
- &setattr->sa_label);
+ inode = cstate->current_fh.fh_dentry->d_inode;
+ status = nfsd4_acl_to_attr(S_ISDIR(inode->i_mode) ? NF4DIR : NF4REG,
+ setattr->sa_acl, &attrs);
+
if (status)
goto out;
- status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr,
- 0, (time64_t)0);
+ save_no_wcc = cstate->current_fh.fh_no_wcc;
+ cstate->current_fh.fh_no_wcc = true;
+ status = nfsd_setattr(rqstp, &cstate->current_fh, &attrs, NULL);
+ cstate->current_fh.fh_no_wcc = save_no_wcc;
+ if (!status)
+ status = nfserrno(attrs.na_labelerr);
+ if (!status)
+ status = nfserrno(attrs.na_aclerr);
out:
+ nfsd_attrs_free(&attrs);
fh_drop_write(&cstate->current_fh);
return status;
}
+static void nfsd4_file_mark_deleg_written(struct nfs4_file *fi)
+{
+ spin_lock(&fi->fi_lock);
+ if (!list_empty(&fi->fi_delegations)) {
+ struct nfs4_delegation *dp = list_first_entry(&fi->fi_delegations,
+ struct nfs4_delegation, dl_perfile);
+
+ if (dp->dl_type == OPEN_DELEGATE_WRITE_ATTRS_DELEG)
+ dp->dl_written = true;
+ }
+ spin_unlock(&fi->fi_lock);
+}
+
static __be32
nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_write *write = &u->write;
stateid_t *stateid = &write->wr_stateid;
+ struct nfs4_stid *stid = NULL;
struct nfsd_file *nf = NULL;
__be32 status = nfs_ok;
unsigned long cnt;
- int nvecs;
- if (write->wr_offset >= OFFSET_MAX)
- return nfserr_inval;
+ if (write->wr_offset > (u64)OFFSET_MAX ||
+ write->wr_offset + write->wr_buflen > (u64)OFFSET_MAX)
+ return nfserr_fbig;
cnt = write->wr_buflen;
trace_nfsd_write_start(rqstp, &cstate->current_fh,
write->wr_offset, cnt);
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
- stateid, WR_STATE, &nf, NULL);
- if (status) {
- dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
+ stateid, WR_STATE, &nf, &stid);
+ if (status)
return status;
+
+ if (stid) {
+ nfsd4_file_mark_deleg_written(stid->sc_file);
+ nfs4_put_stid(stid);
}
write->wr_how_written = write->wr_stable_how;
-
- nvecs = svc_fill_write_vector(rqstp, &write->wr_payload);
- WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
-
status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf,
- write->wr_offset, rqstp->rq_vec, nvecs, &cnt,
- write->wr_how_written,
+ write->wr_offset, &write->wr_payload,
+ &cnt, write->wr_how_written,
(__be32 *)write->wr_verifier.data);
nfsd_file_put(nf);
@@ -1060,17 +1303,13 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
src_stateid, RD_STATE, src, NULL);
- if (status) {
- dprintk("NFSD: %s: couldn't process src stateid!\n", __func__);
+ if (status)
goto out;
- }
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
dst_stateid, WR_STATE, dst, NULL);
- if (status) {
- dprintk("NFSD: %s: couldn't process dst stateid!\n", __func__);
+ if (status)
goto out_put_src;
- }
/* fix up for NFS-specific error code */
if (!S_ISREG(file_inode((*src)->nf_file)->i_mode) ||
@@ -1083,8 +1322,10 @@ out:
return status;
out_put_dst:
nfsd_file_put(*dst);
+ *dst = NULL;
out_put_src:
nfsd_file_put(*src);
+ *src = NULL;
goto out;
}
@@ -1101,7 +1342,7 @@ nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
- status = nfsd4_clone_file_range(src, clone->cl_src_pos,
+ status = nfsd4_clone_file_range(rqstp, src, clone->cl_src_pos,
dst, clone->cl_dst_pos, clone->cl_count,
EX_ISSYNC(cstate->current_fh.fh_export));
@@ -1111,35 +1352,91 @@ out:
return status;
}
-void nfs4_put_copy(struct nfsd4_copy *copy)
+/**
+ * nfsd4_has_active_async_copies - Check for ongoing copy operations
+ * @clp: Client to be checked
+ *
+ * NFSD maintains state for async COPY operations after they complete,
+ * and this state remains in the nfs4_client's async_copies list.
+ * Ongoing copies should block the destruction of the nfs4_client, but
+ * completed copies should not.
+ *
+ * Return values:
+ * %true: At least one active async COPY is ongoing
+ * %false: No active async COPY operations were found
+ */
+bool nfsd4_has_active_async_copies(struct nfs4_client *clp)
{
- if (!refcount_dec_and_test(&copy->refcount))
- return;
- kfree(copy);
+ struct nfsd4_copy *copy;
+ bool result = false;
+
+ spin_lock(&clp->async_lock);
+ list_for_each_entry(copy, &clp->async_copies, copies) {
+ if (!test_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags) &&
+ !test_bit(NFSD4_COPY_F_STOPPED, &copy->cp_flags)) {
+ result = true;
+ break;
+ }
+ }
+ spin_unlock(&clp->async_lock);
+ return result;
}
-static bool
-check_and_set_stop_copy(struct nfsd4_copy *copy)
+/**
+ * nfsd4_async_copy_reaper - Purge completed copies
+ * @nn: Network namespace with possible active copy information
+ */
+void nfsd4_async_copy_reaper(struct nfsd_net *nn)
{
- bool value;
+ struct nfs4_client *clp;
+ struct nfsd4_copy *copy;
+ LIST_HEAD(reaplist);
+
+ spin_lock(&nn->client_lock);
+ list_for_each_entry(clp, &nn->client_lru, cl_lru) {
+ struct list_head *pos, *next;
+
+ spin_lock(&clp->async_lock);
+ list_for_each_safe(pos, next, &clp->async_copies) {
+ copy = list_entry(pos, struct nfsd4_copy, copies);
+ if (test_bit(NFSD4_COPY_F_OFFLOAD_DONE, &copy->cp_flags)) {
+ if (--copy->cp_ttl) {
+ list_del_init(&copy->copies);
+ list_add(&copy->copies, &reaplist);
+ }
+ }
+ }
+ spin_unlock(&clp->async_lock);
+ }
+ spin_unlock(&nn->client_lock);
+
+ while (!list_empty(&reaplist)) {
+ copy = list_first_entry(&reaplist, struct nfsd4_copy, copies);
+ list_del_init(&copy->copies);
+ cleanup_async_copy(copy);
+ }
+}
- spin_lock(&copy->cp_clp->async_lock);
- value = copy->stopped;
- if (!copy->stopped)
- copy->stopped = true;
- spin_unlock(&copy->cp_clp->async_lock);
- return value;
+static void nfs4_put_copy(struct nfsd4_copy *copy)
+{
+ if (!refcount_dec_and_test(&copy->refcount))
+ return;
+ kfree(copy->cp_src);
+ kfree(copy);
}
static void nfsd4_stop_copy(struct nfsd4_copy *copy)
{
- /* only 1 thread should stop the copy */
- if (!check_and_set_stop_copy(copy))
+ trace_nfsd_copy_async_cancel(copy);
+ if (!test_and_set_bit(NFSD4_COPY_F_STOPPED, &copy->cp_flags)) {
kthread_stop(copy->copy_task);
+ copy->nfserr = nfs_ok;
+ set_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags);
+ }
nfs4_put_copy(copy);
}
-static struct nfsd4_copy *nfsd4_get_copy(struct nfs4_client *clp)
+static struct nfsd4_copy *nfsd4_unhash_copy(struct nfs4_client *clp)
{
struct nfsd4_copy *copy = NULL;
@@ -1148,6 +1445,9 @@ static struct nfsd4_copy *nfsd4_get_copy(struct nfs4_client *clp)
copy = list_first_entry(&clp->async_copies, struct nfsd4_copy,
copies);
refcount_inc(&copy->refcount);
+ copy->cp_clp = NULL;
+ if (!list_empty(&copy->copies))
+ list_del_init(&copy->copies);
}
spin_unlock(&clp->async_lock);
return copy;
@@ -1157,7 +1457,7 @@ void nfsd4_shutdown_copy(struct nfs4_client *clp)
{
struct nfsd4_copy *copy;
- while ((copy = nfsd4_get_copy(clp)) != NULL)
+ while ((copy = nfsd4_unhash_copy(clp)) != NULL)
nfsd4_stop_copy(copy);
}
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
@@ -1175,15 +1475,16 @@ extern void nfs_sb_deactive(struct super_block *sb);
* setup a work entry in the ssc delayed unmount list.
*/
static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
- struct nfsd4_ssc_umount_item **retwork, struct vfsmount **ss_mnt)
+ struct nfsd4_ssc_umount_item **nsui,
+ struct svc_rqst *rqstp)
{
struct nfsd4_ssc_umount_item *ni = NULL;
struct nfsd4_ssc_umount_item *work = NULL;
struct nfsd4_ssc_umount_item *tmp;
DEFINE_WAIT(wait);
+ __be32 status = 0;
- *ss_mnt = NULL;
- *retwork = NULL;
+ *nsui = NULL;
work = kzalloc(sizeof(*work), GFP_KERNEL);
try_again:
spin_lock(&nn->nfsd_ssc_lock);
@@ -1193,57 +1494,58 @@ try_again:
/* found a match */
if (ni->nsui_busy) {
/* wait - and try again */
- prepare_to_wait(&nn->nfsd_ssc_waitq, &wait,
- TASK_INTERRUPTIBLE);
+ prepare_to_wait(&nn->nfsd_ssc_waitq, &wait, TASK_IDLE);
spin_unlock(&nn->nfsd_ssc_lock);
/* allow 20secs for mount/unmount for now - revisit */
- if (signal_pending(current) ||
+ if (svc_thread_should_stop(rqstp) ||
(schedule_timeout(20*HZ) == 0)) {
+ finish_wait(&nn->nfsd_ssc_waitq, &wait);
kfree(work);
return nfserr_eagain;
}
finish_wait(&nn->nfsd_ssc_waitq, &wait);
goto try_again;
}
- *ss_mnt = ni->nsui_vfsmount;
+ *nsui = ni;
refcount_inc(&ni->nsui_refcnt);
spin_unlock(&nn->nfsd_ssc_lock);
kfree(work);
- /* return vfsmount in ss_mnt */
+ /* return vfsmount in (*nsui)->nsui_vfsmount */
return 0;
}
if (work) {
- strncpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr));
+ strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr));
refcount_set(&work->nsui_refcnt, 2);
work->nsui_busy = true;
list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
- *retwork = work;
- }
+ *nsui = work;
+ } else
+ status = nfserr_resource;
spin_unlock(&nn->nfsd_ssc_lock);
- return 0;
+ return status;
}
-static void nfsd4_ssc_update_dul_work(struct nfsd_net *nn,
- struct nfsd4_ssc_umount_item *work, struct vfsmount *ss_mnt)
+static void nfsd4_ssc_update_dul(struct nfsd_net *nn,
+ struct nfsd4_ssc_umount_item *nsui,
+ struct vfsmount *ss_mnt)
{
- /* set nsui_vfsmount, clear busy flag and wakeup waiters */
spin_lock(&nn->nfsd_ssc_lock);
- work->nsui_vfsmount = ss_mnt;
- work->nsui_busy = false;
+ nsui->nsui_vfsmount = ss_mnt;
+ nsui->nsui_busy = false;
wake_up_all(&nn->nfsd_ssc_waitq);
spin_unlock(&nn->nfsd_ssc_lock);
}
-static void nfsd4_ssc_cancel_dul_work(struct nfsd_net *nn,
- struct nfsd4_ssc_umount_item *work)
+static void nfsd4_ssc_cancel_dul(struct nfsd_net *nn,
+ struct nfsd4_ssc_umount_item *nsui)
{
spin_lock(&nn->nfsd_ssc_lock);
- list_del(&work->nsui_list);
+ list_del(&nsui->nsui_list);
wake_up_all(&nn->nfsd_ssc_waitq);
spin_unlock(&nn->nfsd_ssc_lock);
- kfree(work);
+ kfree(nsui);
}
/*
@@ -1251,7 +1553,7 @@ static void nfsd4_ssc_cancel_dul_work(struct nfsd_net *nn,
*/
static __be32
nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
- struct vfsmount **mount)
+ struct nfsd4_ssc_umount_item **nsui)
{
struct file_system_type *type;
struct vfsmount *ss_mnt;
@@ -1262,7 +1564,6 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
char *ipaddr, *dev_name, *raw_data;
int len, raw_len;
__be32 status = nfserr_inval;
- struct nfsd4_ssc_umount_item *work = NULL;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
naddr = &nss->u.nl4_addr;
@@ -1270,6 +1571,7 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
naddr->addr_len,
(struct sockaddr *)&tmp_addr,
sizeof(tmp_addr));
+ *nsui = NULL;
if (tmp_addrlen == 0)
goto out_err;
@@ -1312,10 +1614,10 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
goto out_free_rawdata;
snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep);
- status = nfsd4_ssc_setup_dul(nn, ipaddr, &work, &ss_mnt);
+ status = nfsd4_ssc_setup_dul(nn, ipaddr, nsui, rqstp);
if (status)
goto out_free_devname;
- if (ss_mnt)
+ if ((*nsui)->nsui_vfsmount)
goto out_done;
/* Use an 'internal' mount: SB_KERNMOUNT -> MNT_INTERNAL */
@@ -1323,15 +1625,12 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
module_put(type->owner);
if (IS_ERR(ss_mnt)) {
status = nfserr_nodev;
- if (work)
- nfsd4_ssc_cancel_dul_work(nn, work);
+ nfsd4_ssc_cancel_dul(nn, *nsui);
goto out_free_devname;
}
- if (work)
- nfsd4_ssc_update_dul_work(nn, work, ss_mnt);
+ nfsd4_ssc_update_dul(nn, *nsui, ss_mnt);
out_done:
status = 0;
- *mount = ss_mnt;
out_free_devname:
kfree(dev_name);
@@ -1343,13 +1642,6 @@ out_err:
return status;
}
-static void
-nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
-{
- nfs_do_sb_deactive(ss_mnt->mnt_sb);
- mntput(ss_mnt);
-}
-
/*
* Verify COPY destination stateid.
*
@@ -1362,7 +1654,7 @@ nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
static __be32
nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
- struct nfsd4_copy *copy, struct vfsmount **mount)
+ struct nfsd4_copy *copy)
{
struct svc_fh *s_fh = NULL;
stateid_t *s_stid = &copy->cp_src_stateid;
@@ -1375,7 +1667,7 @@ nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
if (status)
goto out;
- status = nfsd4_interssc_connect(&copy->cp_src, rqstp, mount);
+ status = nfsd4_interssc_connect(copy->cp_src, rqstp, &copy->ss_nsui);
if (status)
goto out;
@@ -1393,45 +1685,26 @@ out:
}
static void
-nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
+nfsd4_cleanup_inter_ssc(struct nfsd4_ssc_umount_item *nsui, struct file *filp,
struct nfsd_file *dst)
{
- bool found = false;
- long timeout;
- struct nfsd4_ssc_umount_item *tmp;
- struct nfsd4_ssc_umount_item *ni = NULL;
struct nfsd_net *nn = net_generic(dst->nf_net, nfsd_net_id);
+ long timeout = msecs_to_jiffies(nfsd4_ssc_umount_timeout);
- nfs42_ssc_close(src->nf_file);
- nfsd_file_put(dst);
- fput(src->nf_file);
+ nfs42_ssc_close(filp);
+ fput(filp);
- if (!nn) {
- mntput(ss_mnt);
- return;
- }
spin_lock(&nn->nfsd_ssc_lock);
- timeout = msecs_to_jiffies(nfsd4_ssc_umount_timeout);
- list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
- if (ni->nsui_vfsmount->mnt_sb == ss_mnt->mnt_sb) {
- list_del(&ni->nsui_list);
- /*
- * vfsmount can be shared by multiple exports,
- * decrement refcnt. If the count drops to 1 it
- * will be unmounted when nsui_expire expires.
- */
- refcount_dec(&ni->nsui_refcnt);
- ni->nsui_expire = jiffies + timeout;
- list_add_tail(&ni->nsui_list, &nn->nfsd_ssc_mount_list);
- found = true;
- break;
- }
- }
+ list_del(&nsui->nsui_list);
+ /*
+ * vfsmount can be shared by multiple exports,
+ * decrement refcnt. If the count drops to 1 it
+ * will be unmounted when nsui_expire expires.
+ */
+ refcount_dec(&nsui->nsui_refcnt);
+ nsui->nsui_expire = jiffies + timeout;
+ list_add_tail(&nsui->nsui_list, &nn->nfsd_ssc_mount_list);
spin_unlock(&nn->nfsd_ssc_lock);
- if (!found) {
- mntput(ss_mnt);
- return;
- }
}
#else /* CONFIG_NFSD_V4_2_INTER_SSC */
@@ -1439,24 +1712,17 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
static __be32
nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
- struct nfsd4_copy *copy,
- struct vfsmount **mount)
+ struct nfsd4_copy *copy)
{
- *mount = NULL;
return nfserr_inval;
}
static void
-nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
+nfsd4_cleanup_inter_ssc(struct nfsd4_ssc_umount_item *nsui, struct file *filp,
struct nfsd_file *dst)
{
}
-static void
-nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
-{
-}
-
static struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
struct nfs_fh *src_fh,
nfs4_stateid *stateid)
@@ -1475,82 +1741,98 @@ nfsd4_setup_intra_ssc(struct svc_rqst *rqstp,
&copy->nf_dst);
}
-static void
-nfsd4_cleanup_intra_ssc(struct nfsd_file *src, struct nfsd_file *dst)
-{
- nfsd_file_put(src);
- nfsd_file_put(dst);
-}
-
static void nfsd4_cb_offload_release(struct nfsd4_callback *cb)
{
- struct nfsd4_copy *copy = container_of(cb, struct nfsd4_copy, cp_cb);
+ struct nfsd4_cb_offload *cbo =
+ container_of(cb, struct nfsd4_cb_offload, co_cb);
+ struct nfsd4_copy *copy =
+ container_of(cbo, struct nfsd4_copy, cp_cb_offload);
- nfs4_put_copy(copy);
+ set_bit(NFSD4_COPY_F_OFFLOAD_DONE, &copy->cp_flags);
}
static int nfsd4_cb_offload_done(struct nfsd4_callback *cb,
struct rpc_task *task)
{
+ struct nfsd4_cb_offload *cbo =
+ container_of(cb, struct nfsd4_cb_offload, co_cb);
+
+ trace_nfsd_cb_offload_done(&cbo->co_res.cb_stateid, task);
+ switch (task->tk_status) {
+ case -NFS4ERR_DELAY:
+ if (cbo->co_retries--) {
+ rpc_delay(task, HZ / 5);
+ return 0;
+ }
+ }
+ nfsd41_cb_destroy_referring_call_list(cb);
return 1;
}
static const struct nfsd4_callback_ops nfsd4_cb_offload_ops = {
.release = nfsd4_cb_offload_release,
- .done = nfsd4_cb_offload_done
+ .done = nfsd4_cb_offload_done,
+ .opcode = OP_CB_OFFLOAD,
};
static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
{
copy->cp_res.wr_stable_how =
- copy->committed ? NFS_FILE_SYNC : NFS_UNSTABLE;
- copy->cp_synchronous = sync;
- gen_boot_verifier(&copy->cp_res.wr_verifier, copy->cp_clp->net);
+ test_bit(NFSD4_COPY_F_COMMITTED, &copy->cp_flags) ?
+ NFS_FILE_SYNC : NFS_UNSTABLE;
+ nfsd4_copy_set_sync(copy, sync);
}
-static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
+static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
+ struct file *dst,
+ struct file *src)
{
+ errseq_t since;
ssize_t bytes_copied = 0;
u64 bytes_total = copy->cp_count;
u64 src_pos = copy->cp_src_pos;
u64 dst_pos = copy->cp_dst_pos;
- __be32 status;
+ int status;
+ loff_t end;
/* See RFC 7862 p.67: */
if (bytes_total == 0)
bytes_total = ULLONG_MAX;
do {
+ /* Only async copies can be stopped here */
if (kthread_should_stop())
break;
- bytes_copied = nfsd_copy_file_range(copy->nf_src->nf_file,
- src_pos, copy->nf_dst->nf_file, dst_pos,
- bytes_total);
+ bytes_copied = nfsd_copy_file_range(src, src_pos, dst, dst_pos,
+ bytes_total);
if (bytes_copied <= 0)
break;
bytes_total -= bytes_copied;
copy->cp_res.wr_bytes_written += bytes_copied;
src_pos += bytes_copied;
dst_pos += bytes_copied;
- } while (bytes_total > 0 && !copy->cp_synchronous);
+ } while (bytes_total > 0 && nfsd4_copy_is_async(copy));
/* for a non-zero asynchronous copy do a commit of data */
- if (!copy->cp_synchronous && copy->cp_res.wr_bytes_written > 0) {
- down_write(&copy->nf_dst->nf_rwsem);
- status = vfs_fsync_range(copy->nf_dst->nf_file,
- copy->cp_dst_pos,
- copy->cp_res.wr_bytes_written, 0);
- up_write(&copy->nf_dst->nf_rwsem);
+ if (nfsd4_copy_is_async(copy) && copy->cp_res.wr_bytes_written > 0) {
+ since = READ_ONCE(dst->f_wb_err);
+ end = copy->cp_dst_pos + copy->cp_res.wr_bytes_written - 1;
+ status = vfs_fsync_range(dst, copy->cp_dst_pos, end, 0);
+ if (!status)
+ status = filemap_check_wb_err(dst->f_mapping, since);
if (!status)
- copy->committed = true;
+ set_bit(NFSD4_COPY_F_COMMITTED, &copy->cp_flags);
}
return bytes_copied;
}
-static __be32 nfsd4_do_copy(struct nfsd4_copy *copy, bool sync)
+static __be32 nfsd4_do_copy(struct nfsd4_copy *copy,
+ struct file *src, struct file *dst,
+ bool sync)
{
__be32 status;
ssize_t bytes;
- bytes = _nfsd_copy_file_range(copy);
+ bytes = _nfsd_copy_file_range(copy, dst, src);
+
/* for async copy, we ignore the error, client can always retry
* to get the error
*/
@@ -1560,13 +1842,6 @@ static __be32 nfsd4_do_copy(struct nfsd4_copy *copy, bool sync)
nfsd4_init_copy_res(copy, sync);
status = nfs_ok;
}
-
- if (!copy->cp_intra) /* Inter server SSC */
- nfsd4_cleanup_inter_ssc(copy->ss_mnt, copy->nf_src,
- copy->nf_dst);
- else
- nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst);
-
return status;
}
@@ -1575,74 +1850,107 @@ static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
dst->cp_src_pos = src->cp_src_pos;
dst->cp_dst_pos = src->cp_dst_pos;
dst->cp_count = src->cp_count;
- dst->cp_synchronous = src->cp_synchronous;
+ dst->cp_flags = src->cp_flags;
memcpy(&dst->cp_res, &src->cp_res, sizeof(src->cp_res));
memcpy(&dst->fh, &src->fh, sizeof(src->fh));
dst->cp_clp = src->cp_clp;
dst->nf_dst = nfsd_file_get(src->nf_dst);
- dst->cp_intra = src->cp_intra;
- if (src->cp_intra) /* for inter, file_src doesn't exist yet */
+ /* for inter, nf_src doesn't exist yet */
+ if (!nfsd4_ssc_is_inter(src))
dst->nf_src = nfsd_file_get(src->nf_src);
memcpy(&dst->cp_stateid, &src->cp_stateid, sizeof(src->cp_stateid));
- memcpy(&dst->cp_src, &src->cp_src, sizeof(struct nl4_server));
+ memcpy(dst->cp_src, src->cp_src, sizeof(struct nl4_server));
memcpy(&dst->stateid, &src->stateid, sizeof(src->stateid));
memcpy(&dst->c_fh, &src->c_fh, sizeof(src->c_fh));
- dst->ss_mnt = src->ss_mnt;
+ dst->ss_nsui = src->ss_nsui;
+}
+
+static void release_copy_files(struct nfsd4_copy *copy)
+{
+ if (copy->nf_src)
+ nfsd_file_put(copy->nf_src);
+ if (copy->nf_dst)
+ nfsd_file_put(copy->nf_dst);
}
static void cleanup_async_copy(struct nfsd4_copy *copy)
{
nfs4_free_copy_state(copy);
- nfsd_file_put(copy->nf_dst);
- if (copy->cp_intra)
- nfsd_file_put(copy->nf_src);
- spin_lock(&copy->cp_clp->async_lock);
- list_del(&copy->copies);
- spin_unlock(&copy->cp_clp->async_lock);
+ release_copy_files(copy);
+ if (copy->cp_clp) {
+ spin_lock(&copy->cp_clp->async_lock);
+ if (!list_empty(&copy->copies))
+ list_del_init(&copy->copies);
+ spin_unlock(&copy->cp_clp->async_lock);
+ }
nfs4_put_copy(copy);
}
+static void nfsd4_send_cb_offload(struct nfsd4_copy *copy)
+{
+ struct nfsd4_cb_offload *cbo = &copy->cp_cb_offload;
+
+ memcpy(&cbo->co_res, &copy->cp_res, sizeof(copy->cp_res));
+ memcpy(&cbo->co_fh, &copy->fh, sizeof(copy->fh));
+ cbo->co_nfserr = copy->nfserr;
+ cbo->co_retries = 5;
+
+ nfsd4_init_cb(&cbo->co_cb, copy->cp_clp, &nfsd4_cb_offload_ops,
+ NFSPROC4_CLNT_CB_OFFLOAD);
+ nfsd41_cb_referring_call(&cbo->co_cb, &cbo->co_referring_sessionid,
+ cbo->co_referring_slotid,
+ cbo->co_referring_seqno);
+ trace_nfsd_cb_offload(copy->cp_clp, &cbo->co_res.cb_stateid,
+ &cbo->co_fh, copy->cp_count, copy->nfserr);
+ nfsd4_try_run_cb(&cbo->co_cb);
+}
+
+/**
+ * nfsd4_do_async_copy - kthread function for background server-side COPY
+ * @data: arguments for COPY operation
+ *
+ * Return values:
+ * %0: Copy operation is done.
+ */
static int nfsd4_do_async_copy(void *data)
{
struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
- struct nfsd4_copy *cb_copy;
- if (!copy->cp_intra) { /* Inter server SSC */
- copy->nf_src = kzalloc(sizeof(struct nfsd_file), GFP_KERNEL);
- if (!copy->nf_src) {
- copy->nfserr = nfserr_serverfault;
- nfsd4_interssc_disconnect(copy->ss_mnt);
- goto do_callback;
- }
- copy->nf_src->nf_file = nfs42_ssc_open(copy->ss_mnt, &copy->c_fh,
- &copy->stateid);
- if (IS_ERR(copy->nf_src->nf_file)) {
- copy->nfserr = nfserr_offload_denied;
- nfsd4_interssc_disconnect(copy->ss_mnt);
+ trace_nfsd_copy_async(copy);
+ if (nfsd4_ssc_is_inter(copy)) {
+ struct file *filp;
+
+ filp = nfs42_ssc_open(copy->ss_nsui->nsui_vfsmount,
+ &copy->c_fh, &copy->stateid);
+ if (IS_ERR(filp)) {
+ switch (PTR_ERR(filp)) {
+ case -EBADF:
+ copy->nfserr = nfserr_wrong_type;
+ break;
+ default:
+ copy->nfserr = nfserr_offload_denied;
+ }
+ /* ss_mnt will be unmounted by the laundromat */
goto do_callback;
}
+ copy->nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file,
+ false);
+ nfsd4_cleanup_inter_ssc(copy->ss_nsui, filp, copy->nf_dst);
+ } else {
+ copy->nfserr = nfsd4_do_copy(copy, copy->nf_src->nf_file,
+ copy->nf_dst->nf_file, false);
}
- copy->nfserr = nfsd4_do_copy(copy, 0);
do_callback:
- cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
- if (!cb_copy)
- goto out;
- refcount_set(&cb_copy->refcount, 1);
- memcpy(&cb_copy->cp_res, &copy->cp_res, sizeof(copy->cp_res));
- cb_copy->cp_clp = copy->cp_clp;
- cb_copy->nfserr = copy->nfserr;
- memcpy(&cb_copy->fh, &copy->fh, sizeof(copy->fh));
- nfsd4_init_cb(&cb_copy->cp_cb, cb_copy->cp_clp,
- &nfsd4_cb_offload_ops, NFSPROC4_CLNT_CB_OFFLOAD);
- trace_nfsd_cb_offload(copy->cp_clp, &copy->cp_res.cb_stateid,
- &copy->fh, copy->cp_count, copy->nfserr);
- nfsd4_run_cb(&cb_copy->cp_cb);
-out:
- if (!copy->cp_intra)
- kfree(copy->nf_src);
- cleanup_async_copy(copy);
+ /* The kthread exits forthwith. Ensure that a subsequent
+ * OFFLOAD_CANCEL won't try to kill it again. */
+ set_bit(NFSD4_COPY_F_STOPPED, &copy->cp_flags);
+
+ set_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags);
+ trace_nfsd_copy_async_done(copy);
+ nfsd4_send_cb_offload(copy);
+ atomic_dec(&copy->cp_nn->pending_async_copies);
return 0;
}
@@ -1650,45 +1958,67 @@ static __be32
nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfsd4_copy *async_copy = NULL;
struct nfsd4_copy *copy = &u->copy;
+ struct nfsd42_write_res *result;
__be32 status;
- struct nfsd4_copy *async_copy = NULL;
- if (!copy->cp_intra) { /* Inter server SSC */
- if (!inter_copy_offload_enable || copy->cp_synchronous) {
+ result = &copy->cp_res;
+ nfsd_copy_write_verifier((__be32 *)&result->wr_verifier.data, nn);
+
+ copy->cp_clp = cstate->clp;
+ if (nfsd4_ssc_is_inter(copy)) {
+ trace_nfsd_copy_inter(copy);
+ if (!inter_copy_offload_enable || nfsd4_copy_is_sync(copy)) {
status = nfserr_notsupp;
goto out;
}
- status = nfsd4_setup_inter_ssc(rqstp, cstate, copy,
- &copy->ss_mnt);
- if (status)
+ status = nfsd4_setup_inter_ssc(rqstp, cstate, copy);
+ if (status) {
+ trace_nfsd_copy_done(copy, status);
return nfserr_offload_denied;
+ }
} else {
+ trace_nfsd_copy_intra(copy);
status = nfsd4_setup_intra_ssc(rqstp, cstate, copy);
- if (status)
+ if (status) {
+ trace_nfsd_copy_done(copy, status);
return status;
+ }
}
- copy->cp_clp = cstate->clp;
memcpy(&copy->fh, &cstate->current_fh.fh_handle,
sizeof(struct knfsd_fh));
- if (!copy->cp_synchronous) {
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
-
- status = nfserrno(-ENOMEM);
+ if (nfsd4_copy_is_async(copy)) {
async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
if (!async_copy)
goto out_err;
- if (!nfs4_init_copy_state(nn, copy))
- goto out_err;
+ async_copy->cp_nn = nn;
+ INIT_LIST_HEAD(&async_copy->copies);
refcount_set(&async_copy->refcount, 1);
- memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.stid,
- sizeof(copy->cp_res.cb_stateid));
+ async_copy->cp_ttl = NFSD_COPY_INITIAL_TTL;
+ /* Arbitrary cap on number of pending async copy operations */
+ if (atomic_inc_return(&nn->pending_async_copies) >
+ (int)rqstp->rq_pool->sp_nrthreads)
+ goto out_dec_async_copy_err;
+ async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
+ if (!async_copy->cp_src)
+ goto out_dec_async_copy_err;
+ if (!nfs4_init_copy_state(nn, copy))
+ goto out_dec_async_copy_err;
+ memcpy(&result->cb_stateid, &copy->cp_stateid.cs_stid,
+ sizeof(result->cb_stateid));
dup_copy_fields(copy, async_copy);
+ memcpy(async_copy->cp_cb_offload.co_referring_sessionid.data,
+ cstate->session->se_sessionid.data,
+ NFS4_MAX_SESSIONID_LEN);
+ async_copy->cp_cb_offload.co_referring_slotid = cstate->slot->sl_index;
+ async_copy->cp_cb_offload.co_referring_seqno = cstate->slot->sl_seqid;
async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
async_copy, "%s", "copy thread");
if (IS_ERR(async_copy->copy_task))
- goto out_err;
+ goto out_dec_async_copy_err;
spin_lock(&async_copy->cp_clp->async_lock);
list_add(&async_copy->copies,
&async_copy->cp_clp->async_copies);
@@ -1696,36 +2026,59 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
wake_up_process(async_copy->copy_task);
status = nfs_ok;
} else {
- status = nfsd4_do_copy(copy, 1);
+ status = nfsd4_do_copy(copy, copy->nf_src->nf_file,
+ copy->nf_dst->nf_file, true);
}
out:
+ trace_nfsd_copy_done(copy, status);
+ release_copy_files(copy);
return status;
+out_dec_async_copy_err:
+ if (async_copy)
+ atomic_dec(&nn->pending_async_copies);
out_err:
+ if (nfsd4_ssc_is_inter(copy)) {
+ /*
+ * Source's vfsmount of inter-copy will be unmounted
+ * by the laundromat. Use copy instead of async_copy
+ * since async_copy->ss_nsui might not be set yet.
+ */
+ refcount_dec(&copy->ss_nsui->nsui_refcnt);
+ }
if (async_copy)
cleanup_async_copy(async_copy);
- status = nfserrno(-ENOMEM);
- if (!copy->cp_intra)
- nfsd4_interssc_disconnect(copy->ss_mnt);
+ status = nfserr_jukebox;
goto out;
}
-struct nfsd4_copy *
-find_async_copy(struct nfs4_client *clp, stateid_t *stateid)
+static struct nfsd4_copy *
+find_async_copy_locked(struct nfs4_client *clp, stateid_t *stateid)
{
struct nfsd4_copy *copy;
- spin_lock(&clp->async_lock);
+ lockdep_assert_held(&clp->async_lock);
+
list_for_each_entry(copy, &clp->async_copies, copies) {
- if (memcmp(&copy->cp_stateid.stid, stateid, NFS4_STATEID_SIZE))
+ if (memcmp(&copy->cp_stateid.cs_stid, stateid, NFS4_STATEID_SIZE))
continue;
- refcount_inc(&copy->refcount);
- spin_unlock(&clp->async_lock);
return copy;
}
- spin_unlock(&clp->async_lock);
return NULL;
}
+static struct nfsd4_copy *
+find_async_copy(struct nfs4_client *clp, stateid_t *stateid)
+{
+ struct nfsd4_copy *copy;
+
+ spin_lock(&clp->async_lock);
+ copy = find_async_copy_locked(clp, stateid);
+ if (copy)
+ refcount_inc(&copy->refcount);
+ spin_unlock(&clp->async_lock);
+ return copy;
+}
+
static __be32
nfsd4_offload_cancel(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
@@ -1753,7 +2106,7 @@ nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_copy_notify *cn = &u->copy_notify;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- struct nfs4_stid *stid;
+ struct nfs4_stid *stid = NULL;
struct nfs4_cpntf_state *cps;
struct nfs4_client *clp = cstate->clp;
@@ -1762,24 +2115,26 @@ nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&stid);
if (status)
return status;
+ if (!stid)
+ return nfserr_bad_stateid;
- cn->cpn_sec = nn->nfsd4_lease;
- cn->cpn_nsec = 0;
+ cn->cpn_lease_time.tv_sec = nn->nfsd4_lease;
+ cn->cpn_lease_time.tv_nsec = 0;
status = nfserrno(-ENOMEM);
cps = nfs4_alloc_init_cpntf_state(nn, stid);
if (!cps)
goto out;
- memcpy(&cn->cpn_cnr_stateid, &cps->cp_stateid.stid, sizeof(stateid_t));
+ memcpy(&cn->cpn_cnr_stateid, &cps->cp_stateid.cs_stid, sizeof(stateid_t));
memcpy(&cps->cp_p_stateid, &stid->sc_stateid, sizeof(stateid_t));
memcpy(&cps->cp_p_clid, &clp->cl_clientid, sizeof(clientid_t));
/* For now, only return one server address in cpn_src, the
* address used by the client to connect to this server.
*/
- cn->cpn_src.nl4_type = NL4_NETADDR;
+ cn->cpn_src->nl4_type = NL4_NETADDR;
status = nfsd4_set_netaddr((struct sockaddr *)&rqstp->rq_daddr,
- &cn->cpn_src.u.nl4_addr);
+ &cn->cpn_src->u.nl4_addr);
WARN_ON_ONCE(status);
if (status) {
nfs4_put_cpntf_state(nn, cps);
@@ -1800,10 +2155,8 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&fallocate->falloc_stateid,
WR_STATE, &nf, NULL);
- if (status != nfs_ok) {
- dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
+ if (status != nfs_ok)
return status;
- }
status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, nf->nf_file,
fallocate->falloc_offset,
@@ -1812,22 +2165,29 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfsd_file_put(nf);
return status;
}
+
static __be32
nfsd4_offload_status(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_offload_status *os = &u->offload_status;
- __be32 status = 0;
+ __be32 status = nfs_ok;
struct nfsd4_copy *copy;
struct nfs4_client *clp = cstate->clp;
- copy = find_async_copy(clp, &os->stateid);
+ os->completed = false;
+ spin_lock(&clp->async_lock);
+ copy = find_async_copy_locked(clp, &os->stateid);
if (copy) {
os->count = copy->cp_res.wr_bytes_written;
- nfs4_put_copy(copy);
+ if (test_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags)) {
+ os->completed = true;
+ os->status = copy->nfserr;
+ }
} else
status = nfserr_bad_stateid;
+ spin_unlock(&clp->async_lock);
return status;
}
@@ -1859,10 +2219,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&seek->seek_stateid,
RD_STATE, &nf, NULL);
- if (status) {
- dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
+ if (status)
return status;
- }
switch (seek->seek_whence) {
case NFS4_CONTENT_DATA:
@@ -1974,6 +2332,49 @@ nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return status == nfserr_same ? nfs_ok : status;
}
+static __be32
+nfsd4_get_dir_delegation(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_get_dir_delegation *gdd = &u->get_dir_delegation;
+ struct nfs4_delegation *dd;
+ struct nfsd_file *nf;
+ __be32 status;
+
+ status = nfsd_file_acquire_dir(rqstp, &cstate->current_fh, &nf);
+ if (status != nfs_ok)
+ return status;
+
+ /*
+ * RFC 8881, section 18.39.3 says:
+ *
+ * "The server may refuse to grant the delegation. In that case, the
+ * server will return NFS4ERR_DIRDELEG_UNAVAIL."
+ *
+ * This is sub-optimal, since it means that the server would need to
+ * abort compound processing just because the delegation wasn't
+ * available. RFC8881bis should change this to allow the server to
+ * return NFS4_OK with a non-fatal status of GDD4_UNAVAIL in this
+ * situation.
+ */
+ dd = nfsd_get_dir_deleg(cstate, gdd, nf);
+ nfsd_file_put(nf);
+ if (IS_ERR(dd)) {
+ int err = PTR_ERR(dd);
+
+ if (err != -EAGAIN)
+ return nfserrno(err);
+ gdd->gddrnf_status = GDD4_UNAVAIL;
+ return nfs_ok;
+ }
+
+ gdd->gddrnf_status = GDD4_OK;
+ memcpy(&gdd->gddr_stateid, &dd->dl_stid.sc_stateid, sizeof(gdd->gddr_stateid));
+ nfs4_put_stid(&dd->dl_stid);
+ return nfs_ok;
+}
+
#ifdef CONFIG_NFSD_PNFS
static const struct nfsd4_layout_ops *
nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
@@ -2016,7 +2417,9 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
return nfserr_noent;
}
- exp = rqst_exp_find(rqstp, map->fsid_type, map->fsid);
+ exp = rqst_exp_find(&rqstp->rq_chandle, SVC_NET(rqstp),
+ rqstp->rq_client, rqstp->rq_gssclient,
+ map->fsid_type, map->fsid);
if (IS_ERR(exp)) {
dprintk("%s: could not find device id\n", __func__);
return nfserr_noent;
@@ -2054,7 +2457,7 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
const struct nfsd4_layout_ops *ops;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
- int accmode = NFSD_MAY_READ_IF_EXEC;
+ int accmode = NFSD_MAY_READ_IF_EXEC | NFSD_MAY_OWNER_OVERRIDE;
switch (lgp->lg_seg.iomode) {
case IOMODE_READ:
@@ -2111,7 +2514,7 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls))
goto out_put_stid;
- nfserr = ops->proc_layoutget(d_inode(current_fh->fh_dentry),
+ nfserr = ops->proc_layoutget(rqstp, d_inode(current_fh->fh_dentry),
current_fh, lgp);
if (nfserr)
goto out_put_stid;
@@ -2135,16 +2538,17 @@ static __be32
nfsd4_layoutcommit(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
+ struct net *net = SVC_NET(rqstp);
struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
struct svc_fh *current_fh = &cstate->current_fh;
const struct nfsd4_layout_ops *ops;
- loff_t new_size = lcp->lc_last_wr + 1;
struct inode *inode;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
- nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_WRITE);
+ nfserr = fh_verify(rqstp, current_fh, 0,
+ NFSD_MAY_WRITE | NFSD_MAY_OWNER_OVERRIDE);
if (nfserr)
goto out;
@@ -2154,43 +2558,50 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
goto out;
inode = d_inode(current_fh->fh_dentry);
- nfserr = nfserr_inval;
- if (new_size <= seg->offset) {
- dprintk("pnfsd: last write before layout segment\n");
- goto out;
+ lcp->lc_size_chg = false;
+ if (lcp->lc_newoffset) {
+ loff_t new_size = lcp->lc_last_wr + 1;
+
+ nfserr = nfserr_inval;
+ if (new_size <= seg->offset)
+ goto out;
+ if (new_size > seg->offset + seg->length)
+ goto out;
+
+ if (new_size > i_size_read(inode)) {
+ lcp->lc_size_chg = true;
+ lcp->lc_newsize = new_size;
+ }
}
- if (new_size > seg->offset + seg->length) {
- dprintk("pnfsd: last write beyond layout segment\n");
+
+ nfserr = nfserr_grace;
+ if (locks_in_grace(net) && !lcp->lc_reclaim)
goto out;
- }
- if (!lcp->lc_newoffset && new_size > i_size_read(inode)) {
- dprintk("pnfsd: layoutcommit beyond EOF\n");
+ nfserr = nfserr_no_grace;
+ if (!locks_in_grace(net) && lcp->lc_reclaim)
goto out;
- }
- nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
- false, lcp->lc_layout_type,
- &ls);
- if (nfserr) {
- trace_nfsd_layout_commit_lookup_fail(&lcp->lc_sid);
- /* fixup error code as per RFC5661 */
- if (nfserr == nfserr_bad_stateid)
- nfserr = nfserr_badlayout;
- goto out;
+ if (!lcp->lc_reclaim) {
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate,
+ &lcp->lc_sid, false, lcp->lc_layout_type, &ls);
+ if (nfserr) {
+ trace_nfsd_layout_commit_lookup_fail(&lcp->lc_sid);
+ /* fixup error code as per RFC5661 */
+ if (nfserr == nfserr_bad_stateid)
+ nfserr = nfserr_badlayout;
+ goto out;
+ }
+
+ /* LAYOUTCOMMIT does not require any serialization */
+ mutex_unlock(&ls->ls_mutex);
}
- /* LAYOUTCOMMIT does not require any serialization */
- mutex_unlock(&ls->ls_mutex);
+ nfserr = ops->proc_layoutcommit(inode, rqstp, lcp);
- if (new_size > i_size_read(inode)) {
- lcp->lc_size_chg = 1;
- lcp->lc_newsize = new_size;
- } else {
- lcp->lc_size_chg = 0;
+ if (!lcp->lc_reclaim) {
+ nfsd4_file_mark_deleg_written(ls->ls_stid.sc_file);
+ nfs4_put_stid(&ls->ls_stid);
}
-
- nfserr = ops->proc_layoutcommit(inode, lcp);
- nfs4_put_stid(&ls->ls_stid);
out:
return nfserr;
}
@@ -2313,10 +2724,10 @@ nfsd4_proc_null(struct svc_rqst *rqstp)
return rpc_success;
}
-static inline void nfsd4_increment_op_stats(u32 opnum)
+static inline void nfsd4_increment_op_stats(struct nfsd_net *nn, u32 opnum)
{
if (opnum >= FIRST_NFS4_OP && opnum <= LAST_NFS4_OP)
- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_NFS4_OP(opnum)]);
+ percpu_counter_inc(&nn->counter[NFSD_STATS_NFS4_OP(opnum)]);
}
static const struct nfsd4_operation nfsd4_ops[];
@@ -2433,7 +2844,7 @@ check_if_stalefh_allowed(struct nfsd4_compoundargs *args)
return;
}
putfh = (struct nfsd4_putfh *)&saved_op->u;
- if (!copy->cp_intra)
+ if (nfsd4_ssc_is_inter(copy))
putfh->no_verify = true;
}
}
@@ -2486,9 +2897,6 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
status = nfserr_minor_vers_mismatch;
if (nfsd_minorversion(nn, args->minorversion, NFSD_TEST) <= 0)
goto out;
- status = nfserr_resource;
- if (args->opcnt > NFSD_MAX_OPS_PER_COMPOUND)
- goto out;
status = nfs41_check_op_ordering(args);
if (status) {
@@ -2501,10 +2909,20 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
rqstp->rq_lease_breaker = (void **)&cstate->clp;
- trace_nfsd_compound(rqstp, args->opcnt);
+ trace_nfsd_compound(rqstp, args->tag, args->taglen, args->client_opcnt);
while (!status && resp->opcnt < args->opcnt) {
op = &args->ops[resp->opcnt++];
+ if (unlikely(resp->opcnt == NFSD_MAX_OPS_PER_COMPOUND)) {
+ /* If there are still more operations to process,
+ * stop here and report NFS4ERR_RESOURCE. */
+ if (cstate->minorversion == 0 &&
+ args->client_opcnt > resp->opcnt) {
+ op->status = nfserr_resource;
+ goto encode_op;
+ }
+ }
+
/*
* The XDR decode routines may have pre-set op->status;
* for example, if there is a miscellaneous XDR error
@@ -2528,13 +2946,13 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
goto encode_op;
}
- fh_clear_wcc(current_fh);
+ fh_clear_pre_post_attrs(current_fh);
/* If op is non-idempotent */
if (op->opdesc->op_flags & OP_MODIFIES_SOMETHING) {
/*
* Don't execute this op if we couldn't encode a
- * succesful reply:
+ * successful reply:
*/
u32 plen = op->opdesc->op_rsize_bop(rqstp, op);
/*
@@ -2552,6 +2970,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
if (op->opdesc->op_get_currentstateid)
op->opdesc->op_get_currentstateid(cstate, &op->u);
op->status = op->opdesc->op_func(rqstp, cstate, &op->u);
+ trace_nfsd_compound_op_err(rqstp, op->opnum, op->status);
/* Only from SEQUENCE */
if (cstate->status == nfserr_replay_cache) {
@@ -2568,7 +2987,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
if (current_fh->fh_export &&
need_wrongsec_check(rqstp))
- op->status = check_nfsd_access(current_fh->fh_export, rqstp);
+ op->status = check_nfsd_access(current_fh->fh_export, rqstp, false);
}
encode_op:
if (op->status == nfserr_replay_me) {
@@ -2580,11 +2999,11 @@ encode_op:
status = op->status;
}
- trace_nfsd_compound_status(args->opcnt, resp->opcnt, status,
- nfsd4_op_name(op->opnum));
+ trace_nfsd_compound_status(args->client_opcnt, resp->opcnt,
+ status, nfsd4_op_name(op->opnum));
nfsd4_cstate_clear_replay(cstate);
- nfsd4_increment_op_stats(op->opnum);
+ nfsd4_increment_op_stats(nn, op->opnum);
}
fh_put(current_fh);
@@ -2615,28 +3034,49 @@ out:
#define op_encode_channel_attrs_maxsz (6 + 1 + 1)
-static inline u32 nfsd4_only_status_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+/*
+ * The _rsize() helpers are invoked by the NFSv4 COMPOUND decoder, which
+ * is called before sunrpc sets rq_res.buflen. Thus we have to compute
+ * the maximum payload size here, based on transport limits and the size
+ * of the remaining space in the rq_pages array.
+ */
+static u32 nfsd4_max_payload(const struct svc_rqst *rqstp)
+{
+ u32 buflen;
+
+ buflen = (rqstp->rq_page_end - rqstp->rq_next_page) * PAGE_SIZE;
+ buflen -= rqstp->rq_auth_slack;
+ buflen -= rqstp->rq_res.head[0].iov_len;
+ return min_t(u32, buflen, svc_max_payload(rqstp));
+}
+
+static u32 nfsd4_only_status_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size) * sizeof(__be32);
}
-static inline u32 nfsd4_status_stateid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_status_stateid_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_stateid_maxsz)* sizeof(__be32);
}
-static inline u32 nfsd4_access_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_access_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
/* ac_supported, ac_resp_access */
return (op_encode_hdr_size + 2)* sizeof(__be32);
}
-static inline u32 nfsd4_commit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_commit_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_verifier_maxsz) * sizeof(__be32);
}
-static inline u32 nfsd4_create_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_create_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz
+ nfs4_fattr_bitmap_maxsz) * sizeof(__be32);
@@ -2647,17 +3087,17 @@ static inline u32 nfsd4_create_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op
* the op prematurely if the estimate is too large. We may turn off splice
* reads unnecessarily.
*/
-static inline u32 nfsd4_getattr_rsize(struct svc_rqst *rqstp,
- struct nfsd4_op *op)
+static u32 nfsd4_getattr_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
- u32 *bmap = op->u.getattr.ga_bmval;
+ const u32 *bmap = op->u.getattr.ga_bmval;
u32 bmap0 = bmap[0], bmap1 = bmap[1], bmap2 = bmap[2];
u32 ret = 0;
if (bmap0 & FATTR4_WORD0_ACL)
- return svc_max_payload(rqstp);
+ return nfsd4_max_payload(rqstp);
if (bmap0 & FATTR4_WORD0_FS_LOCATIONS)
- return svc_max_payload(rqstp);
+ return nfsd4_max_payload(rqstp);
if (bmap1 & FATTR4_WORD1_OWNER) {
ret += IDMAP_NAMESZ + 4;
@@ -2685,24 +3125,28 @@ static inline u32 nfsd4_getattr_rsize(struct svc_rqst *rqstp,
return ret;
}
-static inline u32 nfsd4_getfh_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_getfh_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 1) * sizeof(__be32) + NFS4_FHSIZE;
}
-static inline u32 nfsd4_link_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_link_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
-static inline u32 nfsd4_lock_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_lock_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_lock_denied_maxsz)
* sizeof(__be32);
}
-static inline u32 nfsd4_open_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_open_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_stateid_maxsz
+ op_encode_change_info_maxsz + 1
@@ -2710,20 +3154,18 @@ static inline u32 nfsd4_open_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+ op_encode_delegation_maxsz) * sizeof(__be32);
}
-static inline u32 nfsd4_read_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_read_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
- u32 maxcount = 0, rlen = 0;
-
- maxcount = svc_max_payload(rqstp);
- rlen = min(op->u.read.rd_length, maxcount);
+ u32 rlen = min(op->u.read.rd_length, nfsd4_max_payload(rqstp));
return (op_encode_hdr_size + 2 + XDR_QUADLEN(rlen)) * sizeof(__be32);
}
-static inline u32 nfsd4_read_plus_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_read_plus_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
- u32 maxcount = svc_max_payload(rqstp);
- u32 rlen = min(op->u.read.rd_length, maxcount);
+ u32 rlen = min(op->u.read.rd_length, nfsd4_max_payload(rqstp));
/*
* If we detect that the file changed during hole encoding, then we
* recover by encoding the remaining reply as data. This means we need
@@ -2734,70 +3176,77 @@ static inline u32 nfsd4_read_plus_rsize(struct svc_rqst *rqstp, struct nfsd4_op
return (op_encode_hdr_size + 2 + seg_len + XDR_QUADLEN(rlen)) * sizeof(__be32);
}
-static inline u32 nfsd4_readdir_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_readdir_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
- u32 maxcount = 0, rlen = 0;
-
- maxcount = svc_max_payload(rqstp);
- rlen = min(op->u.readdir.rd_maxcount, maxcount);
+ u32 rlen = min(op->u.readdir.rd_maxcount, nfsd4_max_payload(rqstp));
return (op_encode_hdr_size + op_encode_verifier_maxsz +
XDR_QUADLEN(rlen)) * sizeof(__be32);
}
-static inline u32 nfsd4_readlink_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_readlink_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 1) * sizeof(__be32) + PAGE_SIZE;
}
-static inline u32 nfsd4_remove_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_remove_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
-static inline u32 nfsd4_rename_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_rename_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz
+ op_encode_change_info_maxsz) * sizeof(__be32);
}
-static inline u32 nfsd4_sequence_rsize(struct svc_rqst *rqstp,
- struct nfsd4_op *op)
+static u32 nfsd4_sequence_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) * sizeof(__be32);
}
-static inline u32 nfsd4_test_stateid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_test_stateid_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 1 + op->u.test_stateid.ts_num_ids)
* sizeof(__be32);
}
-static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_setattr_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + nfs4_fattr_bitmap_maxsz) * sizeof(__be32);
}
-static inline u32 nfsd4_secinfo_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_secinfo_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + RPC_AUTH_MAXFLAVOR *
(4 + XDR_QUADLEN(GSS_OID_MAX_LEN))) * sizeof(__be32);
}
-static inline u32 nfsd4_setclientid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_setclientid_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + XDR_QUADLEN(NFS4_VERIFIER_SIZE)) *
sizeof(__be32);
}
-static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_write_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + op_encode_verifier_maxsz) * sizeof(__be32);
}
-static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_exchange_id_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
1 + 1 + /* eir_flags, spr_how */\
@@ -2811,14 +3260,16 @@ static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_o
0 /* ignored eir_server_impl_id contents */) * sizeof(__be32);
}
-static inline u32 nfsd4_bind_conn_to_session_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_bind_conn_to_session_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + /* bctsr_sessid */\
2 /* bctsr_dir, use_conn_in_rdma_mode */) * sizeof(__be32);
}
-static inline u32 nfsd4_create_session_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_create_session_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + /* sessionid */\
@@ -2827,7 +3278,8 @@ static inline u32 nfsd4_create_session_rsize(struct svc_rqst *rqstp, struct nfsd
op_encode_channel_attrs_maxsz) * sizeof(__be32);
}
-static inline u32 nfsd4_copy_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_copy_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* wr_callback */ +
@@ -2839,16 +3291,16 @@ static inline u32 nfsd4_copy_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
1 /* cr_synchronous */) * sizeof(__be32);
}
-static inline u32 nfsd4_offload_status_rsize(struct svc_rqst *rqstp,
- struct nfsd4_op *op)
+static u32 nfsd4_offload_status_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
2 /* osr_count */ +
1 /* osr_complete<1> optional 0 for now */) * sizeof(__be32);
}
-static inline u32 nfsd4_copy_notify_rsize(struct svc_rqst *rqstp,
- struct nfsd4_op *op)
+static u32 nfsd4_copy_notify_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
3 /* cnr_lease_time */ +
@@ -2862,13 +3314,23 @@ static inline u32 nfsd4_copy_notify_rsize(struct svc_rqst *rqstp,
* sizeof(__be32);
}
-#ifdef CONFIG_NFSD_PNFS
-static inline u32 nfsd4_getdeviceinfo_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_get_dir_delegation_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
- u32 maxcount = 0, rlen = 0;
+ return (op_encode_hdr_size +
+ 1 /* gddr_status */ +
+ op_encode_verifier_maxsz +
+ op_encode_stateid_maxsz +
+ 2 /* gddr_notification */ +
+ 2 /* gddr_child_attributes */ +
+ 2 /* gddr_dir_attributes */);
+}
- maxcount = svc_max_payload(rqstp);
- rlen = min(op->u.getdeviceinfo.gd_maxcount, maxcount);
+#ifdef CONFIG_NFSD_PNFS
+static u32 nfsd4_getdeviceinfo_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
+{
+ u32 rlen = min(op->u.getdeviceinfo.gd_maxcount, nfsd4_max_payload(rqstp));
return (op_encode_hdr_size +
1 /* gd_layout_type*/ +
@@ -2881,7 +3343,8 @@ static inline u32 nfsd4_getdeviceinfo_rsize(struct svc_rqst *rqstp, struct nfsd4
* so we need to define an arbitrary upper bound here.
*/
#define MAX_LAYOUT_SIZE 128
-static inline u32 nfsd4_layoutget_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_layoutget_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* logr_return_on_close */ +
@@ -2890,14 +3353,16 @@ static inline u32 nfsd4_layoutget_rsize(struct svc_rqst *rqstp, struct nfsd4_op
MAX_LAYOUT_SIZE) * sizeof(__be32);
}
-static inline u32 nfsd4_layoutcommit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_layoutcommit_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* locr_newsize */ +
2 /* ns_size */) * sizeof(__be32);
}
-static inline u32 nfsd4_layoutreturn_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_layoutreturn_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* lrs_stateid */ +
@@ -2906,41 +3371,36 @@ static inline u32 nfsd4_layoutreturn_rsize(struct svc_rqst *rqstp, struct nfsd4_
#endif /* CONFIG_NFSD_PNFS */
-static inline u32 nfsd4_seek_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+static u32 nfsd4_seek_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + 3) * sizeof(__be32);
}
-static inline u32 nfsd4_getxattr_rsize(struct svc_rqst *rqstp,
- struct nfsd4_op *op)
+static u32 nfsd4_getxattr_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
- u32 maxcount, rlen;
-
- maxcount = svc_max_payload(rqstp);
- rlen = min_t(u32, XATTR_SIZE_MAX, maxcount);
+ u32 rlen = min_t(u32, XATTR_SIZE_MAX, nfsd4_max_payload(rqstp));
return (op_encode_hdr_size + 1 + XDR_QUADLEN(rlen)) * sizeof(__be32);
}
-static inline u32 nfsd4_setxattr_rsize(struct svc_rqst *rqstp,
- struct nfsd4_op *op)
+static u32 nfsd4_setxattr_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
}
-static inline u32 nfsd4_listxattrs_rsize(struct svc_rqst *rqstp,
- struct nfsd4_op *op)
+static u32 nfsd4_listxattrs_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
- u32 maxcount, rlen;
-
- maxcount = svc_max_payload(rqstp);
- rlen = min(op->u.listxattrs.lsxa_maxcount, maxcount);
+ u32 rlen = min(op->u.listxattrs.lsxa_maxcount, nfsd4_max_payload(rqstp));
return (op_encode_hdr_size + 4 + XDR_QUADLEN(rlen)) * sizeof(__be32);
}
-static inline u32 nfsd4_removexattr_rsize(struct svc_rqst *rqstp,
- struct nfsd4_op *op)
+static u32 nfsd4_removexattr_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
{
return (op_encode_hdr_size + op_encode_change_info_maxsz)
* sizeof(__be32);
@@ -3000,6 +3460,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
},
[OP_LOCK] = {
.op_func = nfsd4_lock,
+ .op_release = nfsd4_lock_release,
.op_flags = OP_MODIFIES_SOMETHING |
OP_NONTRIVIAL_ERROR_ENCODE,
.op_name = "OP_LOCK",
@@ -3008,6 +3469,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
},
[OP_LOCKT] = {
.op_func = nfsd4_lockt,
+ .op_release = nfsd4_lockt_release,
.op_flags = OP_NONTRIVIAL_ERROR_ENCODE,
.op_name = "OP_LOCKT",
.op_rsize_bop = nfsd4_lock_rsize,
@@ -3181,6 +3643,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
/* NFSv4.1 operations */
[OP_EXCHANGE_ID] = {
.op_func = nfsd4_exchange_id,
+ .op_release = nfsd4_exchange_id_release,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
| OP_MODIFIES_SOMETHING,
.op_name = "OP_EXCHANGE_ID",
@@ -3252,6 +3715,12 @@ static const struct nfsd4_operation nfsd4_ops[] = {
.op_get_currentstateid = nfsd4_get_freestateid,
.op_rsize_bop = nfsd4_only_status_rsize,
},
+ [OP_GET_DIR_DELEGATION] = {
+ .op_func = nfsd4_get_dir_delegation,
+ .op_flags = OP_MODIFIES_SOMETHING,
+ .op_name = "OP_GET_DIR_DELEGATION",
+ .op_rsize_bop = nfsd4_get_dir_delegation_rsize,
+ },
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = {
.op_func = nfsd4_getdeviceinfo,
@@ -3378,7 +3847,8 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
struct nfs4_op_map *allow = &cstate->clp->cl_spo_must_allow;
u32 opiter;
- if (!cstate->minorversion)
+ if (rqstp->rq_procinfo != &nfsd_version4.vs_proc[NFSPROC4_COMPOUND] ||
+ cstate->minorversion == 0)
return false;
if (cstate->spo_must_allowed)
@@ -3429,6 +3899,7 @@ static const struct svc_procedure nfsd_procedures4[2] = {
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
+ .pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 1,
@@ -3439,20 +3910,22 @@ static const struct svc_procedure nfsd_procedures4[2] = {
.pc_decode = nfs4svc_decode_compoundargs,
.pc_encode = nfs4svc_encode_compoundres,
.pc_argsize = sizeof(struct nfsd4_compoundargs),
+ .pc_argzero = offsetof(struct nfsd4_compoundargs, iops),
.pc_ressize = sizeof(struct nfsd4_compoundres),
.pc_release = nfsd4_release_compoundargs,
.pc_cachetype = RC_NOCACHE,
- .pc_xdrressize = NFSD_BUFSIZE/4,
+ .pc_xdrressize = 3+NFSSVC_MAXBLKSIZE/4,
.pc_name = "COMPOUND",
},
};
-static unsigned int nfsd_count3[ARRAY_SIZE(nfsd_procedures4)];
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ nfsd_count4[ARRAY_SIZE(nfsd_procedures4)]);
const struct svc_version nfsd_version4 = {
.vs_vers = 4,
- .vs_nproc = 2,
+ .vs_nproc = ARRAY_SIZE(nfsd_procedures4),
.vs_proc = nfsd_procedures4,
- .vs_count = nfsd_count3,
+ .vs_count = nfsd_count4,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS4_SVC_XDRSIZE,
.vs_rpcb_optnl = true,
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 6fedc49726bf..441dfbfe2d2b 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -32,7 +32,8 @@
*
*/
-#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha2.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/namei.h>
@@ -66,6 +67,7 @@ struct nfsd4_client_tracking_ops {
static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops;
static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v2;
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
/* Globals */
static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
@@ -81,110 +83,38 @@ nfs4_save_creds(const struct cred **original_creds)
new->fsuid = GLOBAL_ROOT_UID;
new->fsgid = GLOBAL_ROOT_GID;
*original_creds = override_creds(new);
- put_cred(new);
return 0;
}
static void
nfs4_reset_creds(const struct cred *original)
{
- revert_creds(original);
+ put_cred(revert_creds(original));
}
static void
-md5_to_hex(char *out, char *md5)
+nfs4_make_rec_clidname(char dname[HEXDIR_LEN], const struct xdr_netobj *clname)
{
- int i;
-
- for (i=0; i<16; i++) {
- unsigned char c = md5[i];
-
- *out++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
- *out++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
- }
- *out = '\0';
-}
-
-static int
-nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
-{
- struct xdr_netobj cksum;
- struct crypto_shash *tfm;
- int status;
+ u8 digest[MD5_DIGEST_SIZE];
dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
clname->len, clname->data);
- tfm = crypto_alloc_shash("md5", 0, 0);
- if (IS_ERR(tfm)) {
- status = PTR_ERR(tfm);
- goto out_no_tfm;
- }
- cksum.len = crypto_shash_digestsize(tfm);
- cksum.data = kmalloc(cksum.len, GFP_KERNEL);
- if (cksum.data == NULL) {
- status = -ENOMEM;
- goto out;
- }
+ md5(clname->data, clname->len, digest);
- status = crypto_shash_tfm_digest(tfm, clname->data, clname->len,
- cksum.data);
- if (status)
- goto out;
-
- md5_to_hex(dname, cksum.data);
-
- status = 0;
-out:
- kfree(cksum.data);
- crypto_free_shash(tfm);
-out_no_tfm:
- return status;
-}
-
-/*
- * If we had an error generating the recdir name for the legacy tracker
- * then warn the admin. If the error doesn't appear to be transient,
- * then disable recovery tracking.
- */
-static void
-legacy_recdir_name_error(struct nfs4_client *clp, int error)
-{
- printk(KERN_ERR "NFSD: unable to generate recoverydir "
- "name (%d).\n", error);
-
- /*
- * if the algorithm just doesn't exist, then disable the recovery
- * tracker altogether. The crypto libs will generally return this if
- * FIPS is enabled as well.
- */
- if (error == -ENOENT) {
- printk(KERN_ERR "NFSD: disabling legacy clientid tracking. "
- "Reboot recovery will not function correctly!\n");
- nfsd4_client_tracking_exit(clp->net);
- }
+ static_assert(HEXDIR_LEN == 2 * MD5_DIGEST_SIZE + 1);
+ sprintf(dname, "%*phN", MD5_DIGEST_SIZE, digest);
}
static void
__nfsd4_create_reclaim_record_grace(struct nfs4_client *clp,
- const char *dname, int len, struct nfsd_net *nn)
+ char *dname, struct nfsd_net *nn)
{
- struct xdr_netobj name;
+ struct xdr_netobj name = { .len = strlen(dname), .data = dname };
struct xdr_netobj princhash = { .len = 0, .data = NULL };
struct nfs4_client_reclaim *crp;
- name.data = kmemdup(dname, len, GFP_KERNEL);
- if (!name.data) {
- dprintk("%s: failed to allocate memory for name.data!\n",
- __func__);
- return;
- }
- name.len = len;
crp = nfs4_client_to_reclaim(name, princhash, nn);
- if (!crp) {
- kfree(name.data);
- return;
- }
crp->cr_clp = clp;
}
@@ -202,9 +132,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
if (!nn->rec_file)
return;
- status = nfs4_make_rec_clidname(dname, &clp->cl_name);
- if (status)
- return legacy_recdir_name_error(clp, status);
+ nfs4_make_rec_clidname(dname, &clp->cl_name);
status = nfs4_save_creds(&original_cred);
if (status < 0)
@@ -215,13 +143,11 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
goto out_creds;
dir = nn->rec_file->f_path.dentry;
- /* lock the parent */
- inode_lock(d_inode(dir));
- dentry = lookup_one_len(dname, dir, HEXDIR_LEN-1);
+ dentry = start_creating(&nop_mnt_idmap, dir, &QSTR(dname));
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
- goto out_unlock;
+ goto out;
}
if (d_really_is_positive(dentry))
/*
@@ -232,16 +158,16 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
* In the 4.0 case, we should never get here; but we may
* as well be forgiving and just succeed silently.
*/
- goto out_put;
- status = vfs_mkdir(&init_user_ns, d_inode(dir), dentry, S_IRWXU);
-out_put:
- dput(dentry);
-out_unlock:
- inode_unlock(d_inode(dir));
+ goto out_end;
+ dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), dentry, 0700, NULL);
+ if (IS_ERR(dentry))
+ status = PTR_ERR(dentry);
+out_end:
+ end_creating(dentry);
+out:
if (status == 0) {
if (nn->in_grace)
- __nfsd4_create_reclaim_record_grace(clp, dname,
- HEXDIR_LEN, nn);
+ __nfsd4_create_reclaim_record_grace(clp, dname, nn);
vfs_fsync(nn->rec_file, 0);
} else {
printk(KERN_ERR "NFSD: failed to write recovery record"
@@ -254,7 +180,7 @@ out_creds:
nfs4_reset_creds(original_cred);
}
-typedef int (recdir_func)(struct dentry *, struct dentry *, struct nfsd_net *);
+typedef int (recdir_func)(struct dentry *, char *, struct nfsd_net *);
struct name_list {
char name[HEXDIR_LEN];
@@ -266,7 +192,7 @@ struct nfs4_dir_ctx {
struct list_head names;
};
-static int
+static bool
nfsd4_build_namelist(struct dir_context *__ctx, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
@@ -275,14 +201,14 @@ nfsd4_build_namelist(struct dir_context *__ctx, const char *name, int namlen,
struct name_list *entry;
if (namlen != HEXDIR_LEN - 1)
- return 0;
+ return true;
entry = kmalloc(sizeof(struct name_list), GFP_KERNEL);
if (entry == NULL)
- return -ENOMEM;
+ return false;
memcpy(entry->name, name, HEXDIR_LEN - 1);
entry->name[HEXDIR_LEN - 1] = '\0';
list_add(&entry->list, &ctx->names);
- return 0;
+ return true;
}
static int
@@ -308,23 +234,14 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn)
}
status = iterate_dir(nn->rec_file, &ctx.ctx);
- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
list_for_each_entry_safe(entry, tmp, &ctx.names, list) {
- if (!status) {
- struct dentry *dentry;
- dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1);
- if (IS_ERR(dentry)) {
- status = PTR_ERR(dentry);
- break;
- }
- status = f(dir, dentry, nn);
- dput(dentry);
- }
+ if (!status)
+ status = f(dir, entry->name, nn);
+
list_del(&entry->list);
kfree(entry);
}
- inode_unlock(d_inode(dir));
nfs4_reset_creds(original_cred);
list_for_each_entry_safe(entry, tmp, &ctx.names, list) {
@@ -336,28 +253,20 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn)
}
static int
-nfsd4_unlink_clid_dir(char *name, int namlen, struct nfsd_net *nn)
+nfsd4_unlink_clid_dir(char *name, struct nfsd_net *nn)
{
struct dentry *dir, *dentry;
int status;
- dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
+ dprintk("NFSD: nfsd4_unlink_clid_dir. name %s\n", name);
dir = nn->rec_file->f_path.dentry;
- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
- dentry = lookup_one_len(name, dir, namlen);
- if (IS_ERR(dentry)) {
- status = PTR_ERR(dentry);
- goto out_unlock;
- }
- status = -ENOENT;
- if (d_really_is_negative(dentry))
- goto out;
- status = vfs_rmdir(&init_user_ns, d_inode(dir), dentry);
-out:
- dput(dentry);
-out_unlock:
- inode_unlock(d_inode(dir));
+ dentry = start_removing(&nop_mnt_idmap, dir, &QSTR(name));
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ status = vfs_rmdir(&nop_mnt_idmap, d_inode(dir), dentry, NULL);
+ end_removing(dentry);
return status;
}
@@ -392,9 +301,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
if (!nn->rec_file || !test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
- status = nfs4_make_rec_clidname(dname, &clp->cl_name);
- if (status)
- return legacy_recdir_name_error(clp, status);
+ nfs4_make_rec_clidname(dname, &clp->cl_name);
status = mnt_want_write_file(nn->rec_file);
if (status)
@@ -405,7 +312,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
if (status < 0)
goto out_drop_write;
- status = nfsd4_unlink_clid_dir(dname, HEXDIR_LEN-1, nn);
+ status = nfsd4_unlink_clid_dir(dname, nn);
nfs4_reset_creds(original_cred);
if (status == 0) {
vfs_fsync(nn->rec_file, 0);
@@ -422,18 +329,19 @@ out:
}
static int
-purge_old(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
+purge_old(struct dentry *parent, char *cname, struct nfsd_net *nn)
{
int status;
+ struct dentry *child;
struct xdr_netobj name;
- if (child->d_name.len != HEXDIR_LEN - 1) {
- printk("%s: illegal name %pd in recovery directory\n",
- __func__, child);
+ if (strlen(cname) != HEXDIR_LEN - 1) {
+ printk("%s: illegal name %s in recovery directory\n",
+ __func__, cname);
/* Keep trying; maybe the others are OK: */
return 0;
}
- name.data = kmemdup_nul(child->d_name.name, child->d_name.len, GFP_KERNEL);
+ name.data = kstrdup(cname, GFP_KERNEL);
if (!name.data) {
dprintk("%s: failed to allocate memory for name.data!\n",
__func__);
@@ -443,10 +351,17 @@ purge_old(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
if (nfs4_has_reclaimed_state(name, nn))
goto out_free;
- status = vfs_rmdir(&init_user_ns, d_inode(parent), child);
- if (status)
- printk("failed to remove client recovery directory %pd\n",
- child);
+ inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
+ child = lookup_one(&nop_mnt_idmap, &QSTR(cname), parent);
+ if (!IS_ERR(child)) {
+ status = vfs_rmdir(&nop_mnt_idmap, d_inode(parent), child, NULL);
+ if (status)
+ printk("failed to remove client recovery directory %pd\n",
+ child);
+ dput(child);
+ }
+ inode_unlock(d_inode(parent));
+
out_free:
kfree(name.data);
out:
@@ -477,27 +392,18 @@ out:
}
static int
-load_recdir(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
+load_recdir(struct dentry *parent, char *cname, struct nfsd_net *nn)
{
- struct xdr_netobj name;
+ struct xdr_netobj name = { .len = HEXDIR_LEN, .data = cname };
struct xdr_netobj princhash = { .len = 0, .data = NULL };
- if (child->d_name.len != HEXDIR_LEN - 1) {
- printk("%s: illegal name %pd in recovery directory\n",
- __func__, child);
+ if (strlen(cname) != HEXDIR_LEN - 1) {
+ printk("%s: illegal name %s in recovery directory\n",
+ __func__, cname);
/* Keep trying; maybe the others are OK: */
return 0;
}
- name.data = kmemdup_nul(child->d_name.name, child->d_name.len, GFP_KERNEL);
- if (!name.data) {
- dprintk("%s: failed to allocate memory for name.data!\n",
- __func__);
- goto out;
- }
- name.len = HEXDIR_LEN;
- if (!nfs4_client_to_reclaim(name, princhash, nn))
- kfree(name.data);
-out:
+ nfs4_client_to_reclaim(name, princhash, nn);
return 0;
}
@@ -658,7 +564,8 @@ nfs4_reset_recoverydir(char *recdir)
return status;
status = -ENOTDIR;
if (d_is_dir(path.dentry)) {
- strcpy(user_recovery_dirname, recdir);
+ strscpy(user_recovery_dirname, recdir,
+ sizeof(user_recovery_dirname));
status = 0;
}
path_put(&path);
@@ -674,7 +581,6 @@ nfs4_recoverydir(void)
static int
nfsd4_check_legacy_client(struct nfs4_client *clp)
{
- int status;
char dname[HEXDIR_LEN];
struct nfs4_client_reclaim *crp;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
@@ -684,11 +590,7 @@ nfsd4_check_legacy_client(struct nfs4_client *clp)
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return 0;
- status = nfs4_make_rec_clidname(dname, &clp->cl_name);
- if (status) {
- legacy_recdir_name_error(clp, status);
- return status;
- }
+ nfs4_make_rec_clidname(dname, &clp->cl_name);
/* look for it in the reclaim hashtable otherwise */
name.data = kmemdup(dname, HEXDIR_LEN, GFP_KERNEL);
@@ -720,6 +622,7 @@ static const struct nfsd4_client_tracking_ops nfsd4_legacy_tracking_ops = {
.version = 1,
.msglen = 0,
};
+#endif /* CONFIG_NFSD_LEGACY_CLIENT_TRACKING */
/* Globals */
#define NFSD_PIPE_DIR "nfsd"
@@ -731,8 +634,9 @@ struct cld_net {
spinlock_t cn_lock;
struct list_head cn_list;
unsigned int cn_xid;
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
bool cn_has_legacy;
- struct crypto_shash *cn_tfm;
+#endif
};
struct cld_upcall {
@@ -792,8 +696,9 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
{
uint8_t cmd, princhashlen;
struct xdr_netobj name, princhash = { .len = 0, .data = NULL };
+ char *namecopy __free(kfree) = NULL;
+ char *princhashcopy __free(kfree) = NULL;
uint16_t namelen;
- struct cld_net *cn = nn->cld_net;
if (get_user(cmd, &cmsg->cm_cmd)) {
dprintk("%s: error when copying cmd from userspace", __func__);
@@ -806,17 +711,23 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
ci = &cmsg->cm_u.cm_clntinfo;
if (get_user(namelen, &ci->cc_name.cn_len))
return -EFAULT;
- name.data = memdup_user(&ci->cc_name.cn_id, namelen);
- if (IS_ERR_OR_NULL(name.data))
- return -EFAULT;
+ if (namelen == 0 || namelen > NFS4_OPAQUE_LIMIT) {
+ dprintk("%s: invalid namelen (%u)", __func__, namelen);
+ return -EINVAL;
+ }
+ namecopy = memdup_user(&ci->cc_name.cn_id, namelen);
+ if (IS_ERR(namecopy))
+ return PTR_ERR(namecopy);
+ name.data = namecopy;
name.len = namelen;
get_user(princhashlen, &ci->cc_princhash.cp_len);
if (princhashlen > 0) {
- princhash.data = memdup_user(
- &ci->cc_princhash.cp_data,
- princhashlen);
- if (IS_ERR_OR_NULL(princhash.data))
- return -EFAULT;
+ princhashcopy = memdup_user(
+ &ci->cc_princhash.cp_data,
+ princhashlen);
+ if (IS_ERR(princhashcopy))
+ return PTR_ERR(princhashcopy);
+ princhash.data = princhashcopy;
princhash.len = princhashlen;
} else
princhash.len = 0;
@@ -826,21 +737,27 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
cnm = &cmsg->cm_u.cm_name;
if (get_user(namelen, &cnm->cn_len))
return -EFAULT;
- name.data = memdup_user(&cnm->cn_id, namelen);
- if (IS_ERR_OR_NULL(name.data))
- return -EFAULT;
+ if (namelen == 0 || namelen > NFS4_OPAQUE_LIMIT) {
+ dprintk("%s: invalid namelen (%u)", __func__, namelen);
+ return -EINVAL;
+ }
+ namecopy = memdup_user(&cnm->cn_id, namelen);
+ if (IS_ERR(namecopy))
+ return PTR_ERR(namecopy);
+ name.data = namecopy;
name.len = namelen;
}
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
if (name.len > 5 && memcmp(name.data, "hash:", 5) == 0) {
+ struct cld_net *cn = nn->cld_net;
+
name.len = name.len - 5;
- memmove(name.data, name.data + 5, name.len);
+ name.data = name.data + 5;
cn->cn_has_legacy = true;
}
- if (!nfs4_client_to_reclaim(name, princhash, nn)) {
- kfree(name.data);
- kfree(princhash.data);
+#endif
+ if (!nfs4_client_to_reclaim(name, princhash, nn))
return -EFAULT;
- }
return nn->client_tracking_ops->msglen;
}
return -EFAULT;
@@ -929,38 +846,32 @@ static const struct rpc_pipe_ops cld_upcall_ops = {
.destroy_msg = cld_pipe_destroy_msg,
};
-static struct dentry *
+static int
nfsd4_cld_register_sb(struct super_block *sb, struct rpc_pipe *pipe)
{
- struct dentry *dir, *dentry;
+ struct dentry *dir;
+ int err;
dir = rpc_d_lookup_sb(sb, NFSD_PIPE_DIR);
if (dir == NULL)
- return ERR_PTR(-ENOENT);
- dentry = rpc_mkpipe_dentry(dir, NFSD_CLD_PIPE, NULL, pipe);
+ return -ENOENT;
+ err = rpc_mkpipe_dentry(dir, NFSD_CLD_PIPE, NULL, pipe);
dput(dir);
- return dentry;
-}
-
-static void
-nfsd4_cld_unregister_sb(struct rpc_pipe *pipe)
-{
- if (pipe->dentry)
- rpc_unlink(pipe->dentry);
+ return err;
}
-static struct dentry *
+static int
nfsd4_cld_register_net(struct net *net, struct rpc_pipe *pipe)
{
struct super_block *sb;
- struct dentry *dentry;
+ int err;
sb = rpc_get_sb_net(net);
if (!sb)
- return NULL;
- dentry = nfsd4_cld_register_sb(sb, pipe);
+ return 0;
+ err = nfsd4_cld_register_sb(sb, pipe);
rpc_put_sb_net(net);
- return dentry;
+ return err;
}
static void
@@ -970,7 +881,7 @@ nfsd4_cld_unregister_net(struct net *net, struct rpc_pipe *pipe)
sb = rpc_get_sb_net(net);
if (sb) {
- nfsd4_cld_unregister_sb(pipe);
+ rpc_unlink(pipe);
rpc_put_sb_net(net);
}
}
@@ -980,7 +891,6 @@ static int
__nfsd4_init_cld_pipe(struct net *net)
{
int ret;
- struct dentry *dentry;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct cld_net *cn;
@@ -1001,14 +911,13 @@ __nfsd4_init_cld_pipe(struct net *net)
spin_lock_init(&cn->cn_lock);
INIT_LIST_HEAD(&cn->cn_list);
- dentry = nfsd4_cld_register_net(net, cn->cn_pipe);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
+ ret = nfsd4_cld_register_net(net, cn->cn_pipe);
+ if (unlikely(ret))
goto err_destroy_data;
- }
- cn->cn_pipe->dentry = dentry;
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
cn->cn_has_legacy = false;
+#endif
nn->cld_net = cn;
return 0;
@@ -1040,8 +949,6 @@ nfsd4_remove_cld_pipe(struct net *net)
nfsd4_cld_unregister_net(net, cn->cn_pipe);
rpc_destroy_pipe_data(cn->cn_pipe);
- if (cn->cn_tfm)
- crypto_free_shash(cn->cn_tfm);
kfree(nn->cld_net);
nn->cld_net = NULL;
}
@@ -1135,8 +1042,6 @@ nfsd4_cld_create_v2(struct nfs4_client *clp)
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
struct cld_msg_v2 *cmsg;
- struct crypto_shash *tfm = cn->cn_tfm;
- struct xdr_netobj cksum;
char *principal = NULL;
/* Don't upcall if it's already stored */
@@ -1159,22 +1064,9 @@ nfsd4_cld_create_v2(struct nfs4_client *clp)
else if (clp->cl_cred.cr_principal)
principal = clp->cl_cred.cr_principal;
if (principal) {
- cksum.len = crypto_shash_digestsize(tfm);
- cksum.data = kmalloc(cksum.len, GFP_KERNEL);
- if (cksum.data == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- ret = crypto_shash_tfm_digest(tfm, principal, strlen(principal),
- cksum.data);
- if (ret) {
- kfree(cksum.data);
- goto out;
- }
- cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = cksum.len;
- memcpy(cmsg->cm_u.cm_clntinfo.cc_princhash.cp_data,
- cksum.data, cksum.len);
- kfree(cksum.data);
+ sha256(principal, strlen(principal),
+ cmsg->cm_u.cm_clntinfo.cc_princhash.cp_data);
+ cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = SHA256_DIGEST_SIZE;
} else
cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = 0;
@@ -1184,7 +1076,6 @@ nfsd4_cld_create_v2(struct nfs4_client *clp)
set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
}
-out:
free_cld_upcall(cup);
out_err:
if (ret)
@@ -1280,10 +1171,6 @@ nfsd4_cld_check(struct nfs4_client *clp)
{
struct nfs4_client_reclaim *crp;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
- struct cld_net *cn = nn->cld_net;
- int status;
- char dname[HEXDIR_LEN];
- struct xdr_netobj name;
/* did we already find that this client is stable? */
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
@@ -1294,10 +1181,12 @@ nfsd4_cld_check(struct nfs4_client *clp)
if (crp)
goto found;
- if (cn->cn_has_legacy) {
- status = nfs4_make_rec_clidname(dname, &clp->cl_name);
- if (status)
- return -ENOENT;
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
+ if (nn->cld_net->cn_has_legacy) {
+ char dname[HEXDIR_LEN];
+ struct xdr_netobj name;
+
+ nfs4_make_rec_clidname(dname, &clp->cl_name);
name.data = kmemdup(dname, HEXDIR_LEN, GFP_KERNEL);
if (!name.data) {
@@ -1312,6 +1201,7 @@ nfsd4_cld_check(struct nfs4_client *clp)
goto found;
}
+#endif
return -ENOENT;
found:
crp->cr_clp = clp;
@@ -1321,14 +1211,11 @@ found:
static int
nfsd4_cld_check_v2(struct nfs4_client *clp)
{
- struct nfs4_client_reclaim *crp;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
struct cld_net *cn = nn->cld_net;
- int status;
- char dname[HEXDIR_LEN];
- struct xdr_netobj name;
- struct crypto_shash *tfm = cn->cn_tfm;
- struct xdr_netobj cksum;
+#endif
+ struct nfs4_client_reclaim *crp;
char *principal = NULL;
/* did we already find that this client is stable? */
@@ -1340,10 +1227,12 @@ nfsd4_cld_check_v2(struct nfs4_client *clp)
if (crp)
goto found;
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
if (cn->cn_has_legacy) {
- status = nfs4_make_rec_clidname(dname, &clp->cl_name);
- if (status)
- return -ENOENT;
+ struct xdr_netobj name;
+ char dname[HEXDIR_LEN];
+
+ nfs4_make_rec_clidname(dname, &clp->cl_name);
name.data = kmemdup(dname, HEXDIR_LEN, GFP_KERNEL);
if (!name.data) {
@@ -1358,31 +1247,22 @@ nfsd4_cld_check_v2(struct nfs4_client *clp)
goto found;
}
+#endif
return -ENOENT;
found:
if (crp->cr_princhash.len) {
+ u8 digest[SHA256_DIGEST_SIZE];
+
if (clp->cl_cred.cr_raw_principal)
principal = clp->cl_cred.cr_raw_principal;
else if (clp->cl_cred.cr_principal)
principal = clp->cl_cred.cr_principal;
if (principal == NULL)
return -ENOENT;
- cksum.len = crypto_shash_digestsize(tfm);
- cksum.data = kmalloc(cksum.len, GFP_KERNEL);
- if (cksum.data == NULL)
- return -ENOENT;
- status = crypto_shash_tfm_digest(tfm, principal,
- strlen(principal), cksum.data);
- if (status) {
- kfree(cksum.data);
- return -ENOENT;
- }
- if (memcmp(crp->cr_princhash.data, cksum.data,
- crp->cr_princhash.len)) {
- kfree(cksum.data);
+ sha256(principal, strlen(principal), digest);
+ if (memcmp(crp->cr_princhash.data, digest,
+ crp->cr_princhash.len))
return -ENOENT;
- }
- kfree(cksum.data);
}
crp->cr_clp = clp;
return 0;
@@ -1562,7 +1442,6 @@ nfsd4_cld_tracking_init(struct net *net)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
bool running;
int retries = 10;
- struct crypto_shash *tfm;
status = nfs4_cld_state_init(net);
if (status)
@@ -1587,12 +1466,6 @@ nfsd4_cld_tracking_init(struct net *net)
status = -ETIMEDOUT;
goto err_remove;
}
- tfm = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(tfm)) {
- status = PTR_ERR(tfm);
- goto err_remove;
- }
- nn->cld_net->cn_tfm = tfm;
status = nfsd4_cld_get_version(nn);
if (status == -EOPNOTSUPP)
@@ -1661,6 +1534,7 @@ static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v2 = {
.msglen = sizeof(struct cld_msg_v2),
};
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
/* upcall via usermodehelper */
static char cltrack_prog[PATH_MAX] = "/sbin/nfsdcltrack";
module_param_string(cltrack_prog, cltrack_prog, sizeof(cltrack_prog),
@@ -1731,11 +1605,7 @@ nfsd4_cltrack_legacy_recdir(const struct xdr_netobj *name)
return NULL;
}
- copied = nfs4_make_rec_clidname(result + copied, name);
- if (copied) {
- kfree(result);
- return NULL;
- }
+ nfs4_make_rec_clidname(result + copied, name);
return result;
}
@@ -1878,10 +1748,7 @@ nfsd4_cltrack_upcall_lock(struct nfs4_client *clp)
static void
nfsd4_cltrack_upcall_unlock(struct nfs4_client *clp)
{
- smp_mb__before_atomic();
- clear_bit(NFSD4_CLIENT_UPCALL_LOCK, &clp->cl_flags);
- smp_mb__after_atomic();
- wake_up_bit(&clp->cl_flags, NFSD4_CLIENT_UPCALL_LOCK);
+ clear_and_wake_up_bit(NFSD4_CLIENT_UPCALL_LOCK, &clp->cl_flags);
}
static void
@@ -2005,28 +1872,10 @@ static const struct nfsd4_client_tracking_ops nfsd4_umh_tracking_ops = {
.msglen = 0,
};
-int
-nfsd4_client_tracking_init(struct net *net)
+static inline int check_for_legacy_methods(int status, struct net *net)
{
- int status;
- struct path path;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-
- /* just run the init if it the method is already decided */
- if (nn->client_tracking_ops)
- goto do_init;
-
- /* First, try to use nfsdcld */
- nn->client_tracking_ops = &nfsd4_cld_tracking_ops;
- status = nn->client_tracking_ops->init(net);
- if (!status)
- return status;
- if (status != -ETIMEDOUT) {
- nn->client_tracking_ops = &nfsd4_cld_tracking_ops_v0;
- status = nn->client_tracking_ops->init(net);
- if (!status)
- return status;
- }
+ struct path path;
/*
* Next, try the UMH upcall.
@@ -2043,20 +1892,51 @@ nfsd4_client_tracking_init(struct net *net)
nn->client_tracking_ops = &nfsd4_legacy_tracking_ops;
status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
if (!status) {
- status = d_is_dir(path.dentry);
+ status = !d_is_dir(path.dentry);
path_put(&path);
- if (!status) {
- status = -EINVAL;
- goto out;
- }
+ if (status)
+ return -ENOTDIR;
+ }
+ return status;
+}
+#else
+static inline int check_for_legacy_methods(int status, struct net *net)
+{
+ return status;
+}
+#endif /* CONFIG_LEGACY_NFSD_CLIENT_TRACKING */
+
+int
+nfsd4_client_tracking_init(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ int status;
+
+ /* just run the init if it the method is already decided */
+ if (nn->client_tracking_ops)
+ goto do_init;
+
+ /* First, try to use nfsdcld */
+ nn->client_tracking_ops = &nfsd4_cld_tracking_ops;
+ status = nn->client_tracking_ops->init(net);
+ if (!status)
+ return status;
+ if (status != -ETIMEDOUT) {
+ nn->client_tracking_ops = &nfsd4_cld_tracking_ops_v0;
+ status = nn->client_tracking_ops->init(net);
+ if (!status)
+ return status;
}
+ status = check_for_legacy_methods(status, net);
+ if (status)
+ goto out;
do_init:
status = nn->client_tracking_ops->init(net);
out:
if (status) {
- printk(KERN_WARNING "NFSD: Unable to initialize client "
- "recovery tracking! (%d)\n", status);
+ pr_warn("NFSD: Unable to initialize client recovery tracking! (%d)\n", status);
+ pr_warn("NFSD: Is nfsdcld running? If not, enable CONFIG_NFSD_LEGACY_CLIENT_TRACKING.\n");
nn->client_tracking_ops = NULL;
}
return status;
@@ -2117,7 +1997,6 @@ rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr)
struct net *net = sb->s_fs_info;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
- struct dentry *dentry;
int ret = 0;
if (!try_module_get(THIS_MODULE))
@@ -2130,16 +2009,10 @@ rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr)
switch (event) {
case RPC_PIPEFS_MOUNT:
- dentry = nfsd4_cld_register_sb(sb, cn->cn_pipe);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- break;
- }
- cn->cn_pipe->dentry = dentry;
+ ret = nfsd4_cld_register_sb(sb, cn->cn_pipe);
break;
case RPC_PIPEFS_UMOUNT:
- if (cn->cn_pipe->dentry)
- nfsd4_cld_unregister_sb(cn->cn_pipe);
+ rpc_unlink(cn->cn_pipe);
break;
default:
ret = -ENOTSUPP;
@@ -2156,6 +2029,7 @@ static struct notifier_block nfsd4_cld_block = {
int
register_cld_notifier(void)
{
+ WARN_ON(!nfsd_net_id);
return rpc_pipefs_notifier_register(&nfsd4_cld_block);
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index bfad94c70b84..808c24fb5c9a 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -44,7 +44,9 @@
#include <linux/jhash.h>
#include <linux/string_helpers.h>
#include <linux/fsnotify.h>
+#include <linux/rhashtable.h>
#include <linux/nfs_ssc.h>
+
#include "xdr4.h"
#include "xdr4cb.h"
#include "vfs.h"
@@ -57,7 +59,7 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
-#define all_ones {{~0,~0},~0}
+#define all_ones {{ ~0, ~0}, ~0}
static const stateid_t one_stateid = {
.si_generation = ~0,
.si_opaque = all_ones,
@@ -84,6 +86,8 @@ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
void nfsd4_end_grace(struct nfsd_net *nn);
static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
+static void nfsd4_file_hash_remove(struct nfs4_file *fi);
+static void deleg_reaper(struct nfsd_net *nn);
/* Locking: */
@@ -124,17 +128,35 @@ static void free_session(struct nfsd4_session *);
static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
+static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops;
+
+static struct workqueue_struct *laundry_wq;
+
+int nfsd4_create_laundry_wq(void)
+{
+ int rc = 0;
+
+ laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
+ if (laundry_wq == NULL)
+ rc = -ENOMEM;
+ return rc;
+}
+
+void nfsd4_destroy_laundry_wq(void)
+{
+ destroy_workqueue(laundry_wq);
+}
static bool is_session_dead(struct nfsd4_session *ses)
{
- return ses->se_flags & NFS4_SESSION_DEAD;
+ return ses->se_dead;
}
static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
{
if (atomic_read(&ses->se_ref) > ref_held_by_me)
return nfserr_jukebox;
- ses->se_flags |= NFS4_SESSION_DEAD;
+ ses->se_dead = true;
return nfs_ok;
}
@@ -143,6 +165,13 @@ static bool is_client_expired(struct nfs4_client *clp)
return clp->cl_time == 0;
}
+static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn,
+ struct nfs4_client *clp)
+{
+ if (clp->cl_state != NFSD4_ACTIVE)
+ atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0);
+}
+
static __be32 get_client_locked(struct nfs4_client *clp)
{
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
@@ -152,6 +181,8 @@ static __be32 get_client_locked(struct nfs4_client *clp)
if (is_client_expired(clp))
return nfserr_expired;
atomic_inc(&clp->cl_rpc_users);
+ nfsd4_dec_courtesy_client_count(nn, clp);
+ clp->cl_state = NFSD4_ACTIVE;
return nfs_ok;
}
@@ -172,6 +203,8 @@ renew_client_locked(struct nfs4_client *clp)
list_move_tail(&clp->cl_lru, &nn->client_lru);
clp->cl_time = ktime_get_boottime_seconds();
+ nfsd4_dec_courtesy_client_count(nn, clp);
+ clp->cl_state = NFSD4_ACTIVE;
}
static void put_client_renew_locked(struct nfs4_client *clp)
@@ -246,6 +279,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
if (fh_match(fh, &cur->nbl_fh)) {
list_del_init(&cur->nbl_list);
+ WARN_ON(list_empty(&cur->nbl_lru));
list_del_init(&cur->nbl_lru);
found = cur;
break;
@@ -265,12 +299,13 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
nbl = find_blocked_lock(lo, fh, nn);
if (!nbl) {
- nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
+ nbl = kmalloc(sizeof(*nbl), GFP_KERNEL);
if (nbl) {
INIT_LIST_HEAD(&nbl->nbl_list);
INIT_LIST_HEAD(&nbl->nbl_lru);
fh_copy_shallow(&nbl->nbl_fh, fh);
locks_init_lock(&nbl->nbl_lock);
+ kref_init(&nbl->nbl_kref);
nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
&nfsd4_cb_notify_lock_ops,
NFSPROC4_CLNT_CB_NOTIFY_LOCK);
@@ -280,14 +315,23 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
}
static void
-free_blocked_lock(struct nfsd4_blocked_lock *nbl)
+free_nbl(struct kref *kref)
{
- locks_delete_block(&nbl->nbl_lock);
+ struct nfsd4_blocked_lock *nbl;
+
+ nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
locks_release_private(&nbl->nbl_lock);
kfree(nbl);
}
static void
+free_blocked_lock(struct nfsd4_blocked_lock *nbl)
+{
+ locks_delete_block(&nbl->nbl_lock);
+ kref_put(&nbl->nbl_kref, free_nbl);
+}
+
+static void
remove_blocked_locks(struct nfs4_lockowner *lo)
{
struct nfs4_client *clp = lo->lo_owner.so_client;
@@ -302,6 +346,7 @@ remove_blocked_locks(struct nfs4_lockowner *lo)
struct nfsd4_blocked_lock,
nbl_list);
list_del_init(&nbl->nbl_list);
+ WARN_ON(list_empty(&nbl->nbl_lru));
list_move(&nbl->nbl_lru, &reaplist);
}
spin_unlock(&nn->blocked_locks_lock);
@@ -326,6 +371,8 @@ nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
{
+ trace_nfsd_cb_notify_lock_done(&zero_stateid, task);
+
/*
* Since this is just an optimization, we don't try very hard if it
* turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
@@ -353,6 +400,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
.prepare = nfsd4_cb_notify_lock_prepare,
.done = nfsd4_cb_notify_lock_done,
.release = nfsd4_cb_notify_lock_release,
+ .opcode = OP_CB_NOTIFY_LOCK,
};
/*
@@ -360,11 +408,13 @@ static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
* st_{access,deny}_bmap field of the stateid, in order to track not
* only what share bits are currently in force, but also what
* combinations of share bits previous opens have used. This allows us
- * to enforce the recommendation of rfc 3530 14.2.19 that the server
- * return an error if the client attempt to downgrade to a combination
- * of share bits not explicable by closing some of its previous opens.
+ * to enforce the recommendation in
+ * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
+ * the server return an error if the client attempt to downgrade to a
+ * combination of share bits not explicable by closing some of its
+ * previous opens.
*
- * XXX: This enforcement is actually incomplete, since we don't keep
+ * This enforcement is arguably incomplete, since we don't keep
* track of access/deny bit combinations; so, e.g., we allow:
*
* OPEN allow read, deny write
@@ -372,6 +422,10 @@ static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
* DOWNGRADE allow read, deny none
*
* which we should reject.
+ *
+ * But you could also argue that our current code is already overkill,
+ * since it only exists to return NFS4ERR_INVAL on incorrect client
+ * behavior.
*/
static unsigned int
bmap_to_share_mode(unsigned long bmap)
@@ -488,7 +542,7 @@ same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
}
static struct nfs4_openowner *
-find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
+find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
struct nfs4_client *clp)
{
struct nfs4_stateowner *so;
@@ -505,18 +559,6 @@ find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
return NULL;
}
-static struct nfs4_openowner *
-find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
- struct nfs4_client *clp)
-{
- struct nfs4_openowner *oo;
-
- spin_lock(&clp->cl_lock);
- oo = find_openstateowner_str_locked(hashval, open, clp);
- spin_unlock(&clp->cl_lock);
- return oo;
-}
-
static inline u32
opaque_hashval(const void *ptr, int nbytes)
{
@@ -530,45 +572,27 @@ opaque_hashval(const void *ptr, int nbytes)
return x;
}
-static void nfsd4_free_file_rcu(struct rcu_head *rcu)
-{
- struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
-
- kmem_cache_free(file_slab, fp);
-}
-
void
put_nfs4_file(struct nfs4_file *fi)
{
- might_lock(&state_lock);
-
- if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
- hlist_del_rcu(&fi->fi_hash);
- spin_unlock(&state_lock);
+ if (refcount_dec_and_test(&fi->fi_ref)) {
+ nfsd4_file_hash_remove(fi);
WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
- call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
+ kfree_rcu(fi, fi_rcu);
}
}
static struct nfsd_file *
-__nfs4_get_fd(struct nfs4_file *f, int oflag)
-{
- if (f->fi_fds[oflag])
- return nfsd_file_get(f->fi_fds[oflag]);
- return NULL;
-}
-
-static struct nfsd_file *
find_writeable_file_locked(struct nfs4_file *f)
{
struct nfsd_file *ret;
lockdep_assert_held(&f->fi_lock);
- ret = __nfs4_get_fd(f, O_WRONLY);
+ ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
if (!ret)
- ret = __nfs4_get_fd(f, O_RDWR);
+ ret = nfsd_file_get(f->fi_fds[O_RDWR]);
return ret;
}
@@ -591,9 +615,9 @@ find_readable_file_locked(struct nfs4_file *f)
lockdep_assert_held(&f->fi_lock);
- ret = __nfs4_get_fd(f, O_RDONLY);
+ ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
if (!ret)
- ret = __nfs4_get_fd(f, O_RDWR);
+ ret = nfsd_file_get(f->fi_fds[O_RDWR]);
return ret;
}
@@ -617,25 +641,27 @@ find_any_file(struct nfs4_file *f)
if (!f)
return NULL;
spin_lock(&f->fi_lock);
- ret = __nfs4_get_fd(f, O_RDWR);
+ ret = nfsd_file_get(f->fi_fds[O_RDWR]);
if (!ret) {
- ret = __nfs4_get_fd(f, O_WRONLY);
+ ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
if (!ret)
- ret = __nfs4_get_fd(f, O_RDONLY);
+ ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
}
spin_unlock(&f->fi_lock);
return ret;
}
-static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
+static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
{
- struct nfsd_file *ret = NULL;
+ lockdep_assert_held(&f->fi_lock);
- spin_lock(&f->fi_lock);
- if (f->fi_deleg_file)
- ret = nfsd_file_get(f->fi_deleg_file);
- spin_unlock(&f->fi_lock);
- return ret;
+ if (f->fi_fds[O_RDWR])
+ return f->fi_fds[O_RDWR];
+ if (f->fi_fds[O_WRONLY])
+ return f->fi_fds[O_WRONLY];
+ if (f->fi_fds[O_RDONLY])
+ return f->fi_fds[O_RDONLY];
+ return NULL;
}
static atomic_long_t num_delegations;
@@ -658,20 +684,72 @@ static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
return ret & OWNER_HASH_MASK;
}
-/* hash table for nfs4_file */
-#define FILE_HASH_BITS 8
-#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
+static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp;
+
+static const struct rhashtable_params nfs4_file_rhash_params = {
+ .key_len = sizeof_field(struct nfs4_file, fi_inode),
+ .key_offset = offsetof(struct nfs4_file, fi_inode),
+ .head_offset = offsetof(struct nfs4_file, fi_rlist),
+
+ /*
+ * Start with a single page hash table to reduce resizing churn
+ * on light workloads.
+ */
+ .min_size = 256,
+ .automatic_shrinking = true,
+};
-static unsigned int file_hashval(struct svc_fh *fh)
+/*
+ * Check if courtesy clients have conflicting access and resolve it if possible
+ *
+ * access: is op_share_access if share_access is true.
+ * Check if access mode, op_share_access, would conflict with
+ * the current deny mode of the file 'fp'.
+ * access: is op_share_deny if share_access is false.
+ * Check if the deny mode, op_share_deny, would conflict with
+ * current access of the file 'fp'.
+ * stp: skip checking this entry.
+ * new_stp: normal open, not open upgrade.
+ *
+ * Function returns:
+ * false - access/deny mode conflict with normal client.
+ * true - no conflict or conflict with courtesy client(s) is resolved.
+ */
+static bool
+nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
+ struct nfs4_ol_stateid *stp, u32 access, bool share_access)
{
- struct inode *inode = d_inode(fh->fh_dentry);
+ struct nfs4_ol_stateid *st;
+ bool resolvable = true;
+ unsigned char bmap;
+ struct nfsd_net *nn;
+ struct nfs4_client *clp;
- /* XXX: why not (here & in file cache) use inode? */
- return (unsigned int)hash_long(inode->i_ino, FILE_HASH_BITS);
+ lockdep_assert_held(&fp->fi_lock);
+ list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
+ /* ignore lock stateid */
+ if (st->st_openstp)
+ continue;
+ if (st == stp && new_stp)
+ continue;
+ /* check file access against deny mode or vice versa */
+ bmap = share_access ? st->st_deny_bmap : st->st_access_bmap;
+ if (!(access & bmap_to_share_mode(bmap)))
+ continue;
+ clp = st->st_stid.sc_client;
+ if (try_to_expire_client(clp))
+ continue;
+ resolvable = false;
+ break;
+ }
+ if (resolvable) {
+ clp = stp->st_stid.sc_client;
+ nn = net_generic(clp->net, nfsd_net_id);
+ mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
+ }
+ return resolvable;
}
-static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
-
static void
__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
{
@@ -856,15 +934,6 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *sla
spin_lock_init(&stid->sc_lock);
INIT_LIST_HEAD(&stid->sc_cp_list);
- /*
- * It shouldn't be a problem to reuse an opaque stateid value.
- * I don't think it is for 4.1. But with 4.0 I worry that, for
- * example, a stray write retransmission could be accepted by
- * the server when it should have been rejected. Therefore,
- * adopt a trick from the sctp code to attempt to maximize the
- * amount of time until an id is reused, by ensuring they always
- * "increase" (mod INT_MAX):
- */
return stid;
out_free:
kmem_cache_free(slab, stid);
@@ -875,23 +944,23 @@ out_free:
* Create a unique stateid_t to represent each COPY.
*/
static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
- unsigned char sc_type)
+ unsigned char cs_type)
{
int new_id;
- stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
- stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
- stid->sc_type = sc_type;
+ stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
+ stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
idr_preload(GFP_KERNEL);
spin_lock(&nn->s2s_cp_lock);
new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
- stid->stid.si_opaque.so_id = new_id;
- stid->stid.si_generation = 1;
+ stid->cs_stid.si_opaque.so_id = new_id;
+ stid->cs_stid.si_generation = 1;
spin_unlock(&nn->s2s_cp_lock);
idr_preload_end();
if (new_id < 0)
return 0;
+ stid->cs_type = cs_type;
return 1;
}
@@ -909,7 +978,7 @@ struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
if (!cps)
return NULL;
cps->cpntf_time = ktime_get_boottime_seconds();
- refcount_set(&cps->cp_stateid.sc_count, 1);
+ refcount_set(&cps->cp_stateid.cs_count, 1);
if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
goto out_free;
spin_lock(&nn->s2s_cp_lock);
@@ -925,11 +994,12 @@ void nfs4_free_copy_state(struct nfsd4_copy *copy)
{
struct nfsd_net *nn;
- WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
+ if (copy->cp_stateid.cs_type != NFS4_COPY_STID)
+ return;
nn = net_generic(copy->cp_clp->net, nfsd_net_id);
spin_lock(&nn->s2s_cp_lock);
idr_remove(&nn->s2s_cp_stateids,
- copy->cp_stateid.stid.si_opaque.so_id);
+ copy->cp_stateid.cs_stid.si_opaque.so_id);
spin_unlock(&nn->s2s_cp_lock);
}
@@ -959,8 +1029,20 @@ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
return openlockstateid(stid);
}
+/*
+ * As the sc_free callback of deleg, this may be called by nfs4_put_stid
+ * in nfsd_break_one_deleg.
+ * Considering nfsd_break_one_deleg is called with the flc->flc_lock held,
+ * this function mustn't ever sleep.
+ */
static void nfs4_free_deleg(struct nfs4_stid *stid)
{
+ struct nfs4_delegation *dp = delegstateid(stid);
+
+ WARN_ON_ONCE(!list_empty(&stid->sc_cp_list));
+ WARN_ON_ONCE(!list_empty(&dp->dl_perfile));
+ WARN_ON_ONCE(!list_empty(&dp->dl_perclnt));
+ WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru));
kmem_cache_free(deleg_slab, stid);
atomic_long_dec(&num_delegations);
}
@@ -974,7 +1056,8 @@ static void nfs4_free_deleg(struct nfs4_stid *stid)
* When a delegation is recalled, the filehandle is stored in the "new"
* filter.
* Every 30 seconds we swap the filters and clear the "new" one,
- * unless both are empty of course.
+ * unless both are empty of course. This results in delegations for a
+ * given filehandle being blocked for between 30 and 60 seconds.
*
* Each filter is 256 bits. We hash the filehandle to 32bit and use the
* low 3 bytes as hash-table indices.
@@ -1003,9 +1086,9 @@ static int delegation_blocked(struct knfsd_fh *fh)
if (ktime_get_seconds() - bd->swap_time > 30) {
bd->entries -= bd->old_entries;
bd->old_entries = bd->entries;
+ bd->new = 1-bd->new;
memset(bd->set[bd->new], 0,
sizeof(bd->set[0]));
- bd->new = 1-bd->new;
bd->swap_time = ktime_get_seconds();
}
spin_unlock(&blocked_delegations_lock);
@@ -1043,21 +1126,22 @@ static void block_delegations(struct knfsd_fh *fh)
static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
- struct svc_fh *current_fh,
- struct nfs4_clnt_odstate *odstate)
+ struct nfs4_clnt_odstate *odstate, u32 dl_type)
{
struct nfs4_delegation *dp;
+ struct nfs4_stid *stid;
long n;
dprintk("NFSD alloc_init_deleg\n");
n = atomic_long_inc_return(&num_delegations);
if (n < 0 || n > max_delegations)
goto out_dec;
- if (delegation_blocked(&current_fh->fh_handle))
+ if (delegation_blocked(&fp->fi_fhandle))
goto out_dec;
- dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
- if (dp == NULL)
+ stid = nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg);
+ if (stid == NULL)
goto out_dec;
+ dp = delegstateid(stid);
/*
* delegation seqid's are never incremented. The 4.1 special
@@ -1070,10 +1154,14 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
INIT_LIST_HEAD(&dp->dl_recall_lru);
dp->dl_clnt_odstate = odstate;
get_clnt_odstate(odstate);
- dp->dl_type = NFS4_OPEN_DELEGATE_READ;
+ dp->dl_type = dl_type;
dp->dl_retries = 1;
+ dp->dl_recalled = false;
nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
&nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
+ nfsd4_init_cb(&dp->dl_cb_fattr.ncf_getattr, dp->dl_stid.sc_client,
+ &nfsd4_cb_getattr_ops, NFSPROC4_CLNT_CB_GETATTR);
+ dp->dl_cb_fattr.ncf_file_modified = false;
get_nfs4_file(fp);
dp->dl_stid.sc_file = fp;
return dp;
@@ -1095,6 +1183,8 @@ nfs4_put_stid(struct nfs4_stid *s)
return;
}
idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
+ if (s->sc_status & SC_STATUS_ADMIN_REVOKED)
+ atomic_dec(&s->sc_client->cl_admin_revoked);
nfs4_free_cpntf_statelist(clp->net, s);
spin_unlock(&clp->cl_lock);
s->sc_free(s);
@@ -1116,15 +1206,56 @@ nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
static void put_deleg_file(struct nfs4_file *fp)
{
+ struct nfsd_file *rnf = NULL;
struct nfsd_file *nf = NULL;
spin_lock(&fp->fi_lock);
- if (--fp->fi_delegees == 0)
+ if (--fp->fi_delegees == 0) {
swap(nf, fp->fi_deleg_file);
+ swap(rnf, fp->fi_rdeleg_file);
+ }
spin_unlock(&fp->fi_lock);
if (nf)
nfsd_file_put(nf);
+ if (rnf)
+ nfs4_file_put_access(fp, NFS4_SHARE_ACCESS_READ);
+}
+
+static void nfsd4_finalize_deleg_timestamps(struct nfs4_delegation *dp, struct file *f)
+{
+ struct iattr ia = { .ia_valid = ATTR_ATIME | ATTR_CTIME | ATTR_MTIME };
+ struct inode *inode = file_inode(f);
+ int ret;
+
+ /* don't do anything if FMODE_NOCMTIME isn't set */
+ if ((READ_ONCE(f->f_mode) & FMODE_NOCMTIME) == 0)
+ return;
+
+ spin_lock(&f->f_lock);
+ f->f_mode &= ~FMODE_NOCMTIME;
+ spin_unlock(&f->f_lock);
+
+ /* was it never written? */
+ if (!dp->dl_written)
+ return;
+
+ /* did it get a setattr for the timestamps at some point? */
+ if (dp->dl_setattr)
+ return;
+
+ /* Stamp everything to "now" */
+ inode_lock(inode);
+ ret = notify_change(&nop_mnt_idmap, f->f_path.dentry, &ia, NULL);
+ inode_unlock(inode);
+ if (ret) {
+ struct inode *inode = file_inode(f);
+
+ pr_notice_ratelimited("Unable to update timestamps on inode %02x:%02x:%lu: %d\n",
+ MAJOR(inode->i_sb->s_dev),
+ MINOR(inode->i_sb->s_dev),
+ inode->i_ino, ret);
+ }
}
static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
@@ -1134,7 +1265,8 @@ static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
WARN_ON_ONCE(!fp->fi_delegees);
- vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
+ nfsd4_finalize_deleg_timestamps(dp, nf->nf_file);
+ kernel_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
put_deleg_file(fp);
}
@@ -1145,11 +1277,6 @@ static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
nfs4_put_stid(&dp->dl_stid);
}
-void nfs4_unhash_stid(struct nfs4_stid *s)
-{
- s->sc_type = 0;
-}
-
/**
* nfs4_delegation_exists - Discover if this delegation already exists
* @clp: a pointer to the nfs4_client we're granting a delegation to
@@ -1197,27 +1324,39 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
lockdep_assert_held(&state_lock);
lockdep_assert_held(&fp->fi_lock);
+ lockdep_assert_held(&clp->cl_lock);
if (nfs4_delegation_exists(clp, fp))
return -EAGAIN;
refcount_inc(&dp->dl_stid.sc_count);
- dp->dl_stid.sc_type = NFS4_DELEG_STID;
+ dp->dl_stid.sc_type = SC_TYPE_DELEG;
list_add(&dp->dl_perfile, &fp->fi_delegations);
list_add(&dp->dl_perclnt, &clp->cl_delegations);
return 0;
}
+static bool delegation_hashed(struct nfs4_delegation *dp)
+{
+ return !(list_empty(&dp->dl_perfile));
+}
+
static bool
-unhash_delegation_locked(struct nfs4_delegation *dp)
+unhash_delegation_locked(struct nfs4_delegation *dp, unsigned short statusmask)
{
struct nfs4_file *fp = dp->dl_stid.sc_file;
lockdep_assert_held(&state_lock);
- if (list_empty(&dp->dl_perfile))
+ if (!delegation_hashed(dp))
return false;
- dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
+ if (statusmask == SC_STATUS_REVOKED &&
+ dp->dl_stid.sc_client->cl_minorversion == 0)
+ statusmask = SC_STATUS_CLOSED;
+ dp->dl_stid.sc_status |= statusmask;
+ if (statusmask & SC_STATUS_ADMIN_REVOKED)
+ atomic_inc(&dp->dl_stid.sc_client->cl_admin_revoked);
+
/* Ensure that deleg break won't try to requeue it */
++dp->dl_time;
spin_lock(&fp->fi_lock);
@@ -1233,30 +1372,59 @@ static void destroy_delegation(struct nfs4_delegation *dp)
bool unhashed;
spin_lock(&state_lock);
- unhashed = unhash_delegation_locked(dp);
+ unhashed = unhash_delegation_locked(dp, SC_STATUS_CLOSED);
spin_unlock(&state_lock);
if (unhashed)
destroy_unhashed_deleg(dp);
}
+/**
+ * revoke_delegation - perform nfs4 delegation structure cleanup
+ * @dp: pointer to the delegation
+ *
+ * This function assumes that it's called either from the administrative
+ * interface (nfsd4_revoke_states()) that's revoking a specific delegation
+ * stateid or it's called from a laundromat thread (nfsd4_landromat()) that
+ * determined that this specific state has expired and needs to be revoked
+ * (both mark state with the appropriate stid sc_status mode). It is also
+ * assumed that a reference was taken on the @dp state.
+ *
+ * If this function finds that the @dp state is SC_STATUS_FREED it means
+ * that a FREE_STATEID operation for this stateid has been processed and
+ * we can proceed to removing it from recalled list. However, if @dp state
+ * isn't marked SC_STATUS_FREED, it means we need place it on the cl_revoked
+ * list and wait for the FREE_STATEID to arrive from the client. At the same
+ * time, we need to mark it as SC_STATUS_FREEABLE to indicate to the
+ * nfsd4_free_stateid() function that this stateid has already been added
+ * to the cl_revoked list and that nfsd4_free_stateid() is now responsible
+ * for removing it from the list. Inspection of where the delegation state
+ * in the revocation process is protected by the clp->cl_lock.
+ */
static void revoke_delegation(struct nfs4_delegation *dp)
{
struct nfs4_client *clp = dp->dl_stid.sc_client;
WARN_ON(!list_empty(&dp->dl_recall_lru));
+ WARN_ON_ONCE(dp->dl_stid.sc_client->cl_minorversion > 0 &&
+ !(dp->dl_stid.sc_status &
+ (SC_STATUS_REVOKED | SC_STATUS_ADMIN_REVOKED)));
- if (clp->cl_minorversion) {
- dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
- refcount_inc(&dp->dl_stid.sc_count);
- spin_lock(&clp->cl_lock);
- list_add(&dp->dl_recall_lru, &clp->cl_revoked);
- spin_unlock(&clp->cl_lock);
+ trace_nfsd_stid_revoke(&dp->dl_stid);
+
+ spin_lock(&clp->cl_lock);
+ if (dp->dl_stid.sc_status & SC_STATUS_FREED) {
+ list_del_init(&dp->dl_recall_lru);
+ goto out;
}
+ list_add(&dp->dl_recall_lru, &clp->cl_revoked);
+ dp->dl_stid.sc_status |= SC_STATUS_FREEABLE;
+out:
+ spin_unlock(&clp->cl_lock);
destroy_unhashed_deleg(dp);
}
-/*
- * SETCLIENTID state
+/*
+ * SETCLIENTID state
*/
static unsigned int clientid_hashval(u32 id)
@@ -1277,11 +1445,16 @@ static void
recalculate_deny_mode(struct nfs4_file *fp)
{
struct nfs4_ol_stateid *stp;
+ u32 old_deny;
spin_lock(&fp->fi_lock);
+ old_deny = fp->fi_share_deny;
fp->fi_share_deny = 0;
- list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
+ list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
+ if (fp->fi_share_deny == old_deny)
+ break;
+ }
spin_unlock(&fp->fi_lock);
}
@@ -1369,6 +1542,8 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
release_all_access(stp);
if (stp->st_stateowner)
nfs4_put_stateowner(stp->st_stateowner);
+ if (!list_empty(&stid->sc_cp_list))
+ nfs4_free_cpntf_statelist(stid->sc_client->net, stid);
kmem_cache_free(stateid_slab, stid);
}
@@ -1408,6 +1583,8 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
}
idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
+ if (s->sc_status & SC_STATUS_ADMIN_REVOKED)
+ atomic_dec(&s->sc_client->cl_admin_revoked);
list_add(&stp->st_locks, reaplist);
}
@@ -1418,7 +1595,7 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
if (!unhash_ol_stateid(stp))
return false;
list_del_init(&stp->st_locks);
- nfs4_unhash_stid(&stp->st_stid);
+ stp->st_stid.sc_status |= SC_STATUS_CLOSED;
return true;
}
@@ -1476,7 +1653,7 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
while (!list_empty(&open_stp->st_locks)) {
stp = list_entry(open_stp->st_locks.next,
struct nfs4_ol_stateid, st_locks);
- WARN_ON(!unhash_lock_stateid(stp));
+ unhash_lock_stateid(stp);
put_ol_stateid_locked(stp, reaplist);
}
}
@@ -1497,12 +1674,21 @@ static void release_open_stateid(struct nfs4_ol_stateid *stp)
LIST_HEAD(reaplist);
spin_lock(&stp->st_stid.sc_client->cl_lock);
+ stp->st_stid.sc_status |= SC_STATUS_CLOSED;
if (unhash_open_stateid(stp, &reaplist))
put_ol_stateid_locked(stp, &reaplist);
spin_unlock(&stp->st_stid.sc_client->cl_lock);
free_ol_stateid_reaplist(&reaplist);
}
+static bool nfs4_openowner_unhashed(struct nfs4_openowner *oo)
+{
+ lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
+
+ return list_empty(&oo->oo_owner.so_strhash) &&
+ list_empty(&oo->oo_perclient);
+}
+
static void unhash_openowner_locked(struct nfs4_openowner *oo)
{
struct nfs4_client *clp = oo->oo_owner.so_client;
@@ -1534,9 +1720,7 @@ static void release_openowner(struct nfs4_openowner *oo)
{
struct nfs4_ol_stateid *stp;
struct nfs4_client *clp = oo->oo_owner.so_client;
- struct list_head reaplist;
-
- INIT_LIST_HEAD(&reaplist);
+ LIST_HEAD(reaplist);
spin_lock(&clp->cl_lock);
unhash_openowner_locked(oo);
@@ -1552,6 +1736,137 @@ static void release_openowner(struct nfs4_openowner *oo)
nfs4_put_stateowner(&oo->oo_owner);
}
+static struct nfs4_stid *find_one_sb_stid(struct nfs4_client *clp,
+ struct super_block *sb,
+ unsigned int sc_types)
+{
+ unsigned long id, tmp;
+ struct nfs4_stid *stid;
+
+ spin_lock(&clp->cl_lock);
+ idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id)
+ if ((stid->sc_type & sc_types) &&
+ stid->sc_status == 0 &&
+ stid->sc_file->fi_inode->i_sb == sb) {
+ refcount_inc(&stid->sc_count);
+ break;
+ }
+ spin_unlock(&clp->cl_lock);
+ return stid;
+}
+
+/**
+ * nfsd4_revoke_states - revoke all nfsv4 states associated with given filesystem
+ * @net: used to identify instance of nfsd (there is one per net namespace)
+ * @sb: super_block used to identify target filesystem
+ *
+ * All nfs4 states (open, lock, delegation, layout) held by the server instance
+ * and associated with a file on the given filesystem will be revoked resulting
+ * in any files being closed and so all references from nfsd to the filesystem
+ * being released. Thus nfsd will no longer prevent the filesystem from being
+ * unmounted.
+ *
+ * The clients which own the states will subsequently being notified that the
+ * states have been "admin-revoked".
+ */
+void nfsd4_revoke_states(struct net *net, struct super_block *sb)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ unsigned int idhashval;
+ unsigned int sc_types;
+
+ sc_types = SC_TYPE_OPEN | SC_TYPE_LOCK | SC_TYPE_DELEG | SC_TYPE_LAYOUT;
+
+ spin_lock(&nn->client_lock);
+ for (idhashval = 0; idhashval < CLIENT_HASH_MASK; idhashval++) {
+ struct list_head *head = &nn->conf_id_hashtbl[idhashval];
+ struct nfs4_client *clp;
+ retry:
+ list_for_each_entry(clp, head, cl_idhash) {
+ struct nfs4_stid *stid = find_one_sb_stid(clp, sb,
+ sc_types);
+ if (stid) {
+ struct nfs4_ol_stateid *stp;
+ struct nfs4_delegation *dp;
+ struct nfs4_layout_stateid *ls;
+
+ spin_unlock(&nn->client_lock);
+ switch (stid->sc_type) {
+ case SC_TYPE_OPEN:
+ stp = openlockstateid(stid);
+ mutex_lock_nested(&stp->st_mutex,
+ OPEN_STATEID_MUTEX);
+
+ spin_lock(&clp->cl_lock);
+ if (stid->sc_status == 0) {
+ stid->sc_status |=
+ SC_STATUS_ADMIN_REVOKED;
+ atomic_inc(&clp->cl_admin_revoked);
+ spin_unlock(&clp->cl_lock);
+ release_all_access(stp);
+ } else
+ spin_unlock(&clp->cl_lock);
+ mutex_unlock(&stp->st_mutex);
+ break;
+ case SC_TYPE_LOCK:
+ stp = openlockstateid(stid);
+ mutex_lock_nested(&stp->st_mutex,
+ LOCK_STATEID_MUTEX);
+ spin_lock(&clp->cl_lock);
+ if (stid->sc_status == 0) {
+ struct nfs4_lockowner *lo =
+ lockowner(stp->st_stateowner);
+ struct nfsd_file *nf;
+
+ stid->sc_status |=
+ SC_STATUS_ADMIN_REVOKED;
+ atomic_inc(&clp->cl_admin_revoked);
+ spin_unlock(&clp->cl_lock);
+ nf = find_any_file(stp->st_stid.sc_file);
+ if (nf) {
+ get_file(nf->nf_file);
+ filp_close(nf->nf_file,
+ (fl_owner_t)lo);
+ nfsd_file_put(nf);
+ }
+ release_all_access(stp);
+ } else
+ spin_unlock(&clp->cl_lock);
+ mutex_unlock(&stp->st_mutex);
+ break;
+ case SC_TYPE_DELEG:
+ refcount_inc(&stid->sc_count);
+ dp = delegstateid(stid);
+ spin_lock(&state_lock);
+ if (!unhash_delegation_locked(
+ dp, SC_STATUS_ADMIN_REVOKED))
+ dp = NULL;
+ spin_unlock(&state_lock);
+ if (dp)
+ revoke_delegation(dp);
+ break;
+ case SC_TYPE_LAYOUT:
+ ls = layoutstateid(stid);
+ nfsd4_close_layout(ls);
+ break;
+ }
+ nfs4_put_stid(stid);
+ spin_lock(&nn->client_lock);
+ if (clp->cl_minorversion == 0)
+ /* Allow cleanup after a lease period.
+ * store_release ensures cleanup will
+ * see any newly revoked states if it
+ * sees the time updated.
+ */
+ nn->nfs40_last_revoke =
+ ktime_get_boottime_seconds();
+ goto retry;
+ }
+ }
+ }
+ spin_unlock(&nn->client_lock);
+}
+
static inline int
hash_sessionid(struct nfs4_sessionid *sessionid)
{
@@ -1623,114 +1938,145 @@ gen_sessionid(struct nfsd4_session *ses)
*/
#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
+static struct shrinker *nfsd_slot_shrinker;
+static DEFINE_SPINLOCK(nfsd_session_list_lock);
+static LIST_HEAD(nfsd_session_list);
+/* The sum of "target_slots-1" on every session. The shrinker can push this
+ * down, though it can take a little while for the memory to actually
+ * be freed. The "-1" is because we can never free slot 0 while the
+ * session is active.
+ */
+static atomic_t nfsd_total_target_slots = ATOMIC_INIT(0);
+
static void
-free_session_slots(struct nfsd4_session *ses)
+free_session_slots(struct nfsd4_session *ses, int from)
{
int i;
- for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
- free_svc_cred(&ses->se_slots[i]->sl_cred);
- kfree(ses->se_slots[i]);
+ if (from >= ses->se_fchannel.maxreqs)
+ return;
+
+ for (i = from; i < ses->se_fchannel.maxreqs; i++) {
+ struct nfsd4_slot *slot = xa_load(&ses->se_slots, i);
+
+ /*
+ * Save the seqid in case we reactivate this slot.
+ * This will never require a memory allocation so GFP
+ * flag is irrelevant
+ */
+ xa_store(&ses->se_slots, i, xa_mk_value(slot->sl_seqid), 0);
+ free_svc_cred(&slot->sl_cred);
+ kfree(slot);
+ }
+ ses->se_fchannel.maxreqs = from;
+ if (ses->se_target_maxslots > from) {
+ int new_target = from ?: 1;
+ atomic_sub(ses->se_target_maxslots - new_target, &nfsd_total_target_slots);
+ ses->se_target_maxslots = new_target;
}
}
-/*
- * We don't actually need to cache the rpc and session headers, so we
- * can allocate a little less for each slot:
+/**
+ * reduce_session_slots - reduce the target max-slots of a session if possible
+ * @ses: The session to affect
+ * @dec: how much to decrease the target by
+ *
+ * This interface can be used by a shrinker to reduce the target max-slots
+ * for a session so that some slots can eventually be freed.
+ * It uses spin_trylock() as it may be called in a context where another
+ * spinlock is held that has a dependency on client_lock. As shrinkers are
+ * best-effort, skiping a session is client_lock is already held has no
+ * great coast
+ *
+ * Return value:
+ * The number of slots that the target was reduced by.
*/
-static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
+static int
+reduce_session_slots(struct nfsd4_session *ses, int dec)
{
- u32 size;
+ struct nfsd_net *nn = net_generic(ses->se_client->net,
+ nfsd_net_id);
+ int ret = 0;
- if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
- size = 0;
- else
- size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
- return size + sizeof(struct nfsd4_slot);
+ if (ses->se_target_maxslots <= 1)
+ return ret;
+ if (!spin_trylock(&nn->client_lock))
+ return ret;
+ ret = min(dec, ses->se_target_maxslots-1);
+ ses->se_target_maxslots -= ret;
+ atomic_sub(ret, &nfsd_total_target_slots);
+ ses->se_slot_gen += 1;
+ if (ses->se_slot_gen == 0) {
+ int i;
+ ses->se_slot_gen = 1;
+ for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
+ struct nfsd4_slot *slot = xa_load(&ses->se_slots, i);
+ slot->sl_generation = 0;
+ }
+ }
+ spin_unlock(&nn->client_lock);
+ return ret;
}
-/*
- * XXX: If we run out of reserved DRC memory we could (up to a point)
- * re-negotiate active sessions and reduce their slot usage to make
- * room for new connections. For now we just fail the create session.
- */
-static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
+static struct nfsd4_slot *nfsd4_alloc_slot(struct nfsd4_channel_attrs *fattrs,
+ int index, gfp_t gfp)
{
- u32 slotsize = slot_bytes(ca);
- u32 num = ca->maxreqs;
- unsigned long avail, total_avail;
- unsigned int scale_factor;
+ struct nfsd4_slot *slot;
+ size_t size;
- spin_lock(&nfsd_drc_lock);
- if (nfsd_drc_max_mem > nfsd_drc_mem_used)
- total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
- else
- /* We have handed out more space than we chose in
- * set_max_drc() to allow. That isn't really a
- * problem as long as that doesn't make us think we
- * have lots more due to integer overflow.
- */
- total_avail = 0;
- avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
/*
- * Never use more than a fraction of the remaining memory,
- * unless it's the only way to give this client a slot.
- * The chosen fraction is either 1/8 or 1/number of threads,
- * whichever is smaller. This ensures there are adequate
- * slots to support multiple clients per thread.
- * Give the client one slot even if that would require
- * over-allocation--it is better than failure.
+ * The RPC and NFS session headers are never saved in
+ * the slot reply cache buffer.
*/
- scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
-
- avail = clamp_t(unsigned long, avail, slotsize,
- total_avail/scale_factor);
- num = min_t(int, num, avail / slotsize);
- num = max_t(int, num, 1);
- nfsd_drc_mem_used += num * slotsize;
- spin_unlock(&nfsd_drc_lock);
-
- return num;
-}
-
-static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
-{
- int slotsize = slot_bytes(ca);
+ size = fattrs->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ ?
+ 0 : fattrs->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
- spin_lock(&nfsd_drc_lock);
- nfsd_drc_mem_used -= slotsize * ca->maxreqs;
- spin_unlock(&nfsd_drc_lock);
+ slot = kzalloc(struct_size(slot, sl_data, size), gfp);
+ if (!slot)
+ return NULL;
+ slot->sl_index = index;
+ return slot;
}
static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
struct nfsd4_channel_attrs *battrs)
{
int numslots = fattrs->maxreqs;
- int slotsize = slot_bytes(fattrs);
struct nfsd4_session *new;
- int mem, i;
-
- BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
- + sizeof(struct nfsd4_session) > PAGE_SIZE);
- mem = numslots * sizeof(struct nfsd4_slot *);
+ struct nfsd4_slot *slot;
+ int i;
- new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
- /* allocate each struct nfsd4_slot and data cache in one piece */
- for (i = 0; i < numslots; i++) {
- new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
- if (!new->se_slots[i])
- goto out_free;
- }
+ xa_init(&new->se_slots);
- memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
- memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
+ slot = nfsd4_alloc_slot(fattrs, 0, GFP_KERNEL);
+ if (!slot || xa_is_err(xa_store(&new->se_slots, 0, slot, GFP_KERNEL)))
+ goto out_free;
+ for (i = 1; i < numslots; i++) {
+ const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
+ slot = nfsd4_alloc_slot(fattrs, i, gfp);
+ if (!slot)
+ break;
+ if (xa_is_err(xa_store(&new->se_slots, i, slot, gfp))) {
+ kfree(slot);
+ break;
+ }
+ }
+ fattrs->maxreqs = i;
+ memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
+ new->se_target_maxslots = i;
+ atomic_add(i - 1, &nfsd_total_target_slots);
+ new->se_cb_slot_avail = ~0U;
+ new->se_cb_highest_slot = min(battrs->maxreqs - 1,
+ NFSD_BC_SLOT_TABLE_SIZE - 1);
+ spin_lock_init(&new->se_lock);
return new;
out_free:
- while (i--)
- kfree(new->se_slots[i]);
+ kfree(slot);
+ xa_destroy(&new->se_slots);
kfree(new);
return NULL;
}
@@ -1836,17 +2182,47 @@ static void nfsd4_del_conns(struct nfsd4_session *s)
static void __free_session(struct nfsd4_session *ses)
{
- free_session_slots(ses);
+ free_session_slots(ses, 0);
+ xa_destroy(&ses->se_slots);
kfree(ses);
}
static void free_session(struct nfsd4_session *ses)
{
nfsd4_del_conns(ses);
- nfsd4_put_drc_mem(&ses->se_fchannel);
__free_session(ses);
}
+static unsigned long
+nfsd_slot_count(struct shrinker *s, struct shrink_control *sc)
+{
+ unsigned long cnt = atomic_read(&nfsd_total_target_slots);
+
+ return cnt ? cnt : SHRINK_EMPTY;
+}
+
+static unsigned long
+nfsd_slot_scan(struct shrinker *s, struct shrink_control *sc)
+{
+ struct nfsd4_session *ses;
+ unsigned long scanned = 0;
+ unsigned long freed = 0;
+
+ spin_lock(&nfsd_session_list_lock);
+ list_for_each_entry(ses, &nfsd_session_list, se_all_sessions) {
+ freed += reduce_session_slots(ses, 1);
+ scanned += 1;
+ if (scanned >= sc->nr_to_scan) {
+ /* Move starting point for next scan */
+ list_move(&nfsd_session_list, &ses->se_all_sessions);
+ break;
+ }
+ }
+ spin_unlock(&nfsd_session_list_lock);
+ sc->nr_scanned = scanned;
+ return freed;
+}
+
static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
{
int idx;
@@ -1857,17 +2233,24 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru
INIT_LIST_HEAD(&new->se_conns);
- new->se_cb_seq_nr = 1;
- new->se_flags = cses->flags;
+ atomic_set(&new->se_ref, 0);
+ new->se_dead = false;
new->se_cb_prog = cses->callback_prog;
new->se_cb_sec = cses->cb_sec;
- atomic_set(&new->se_ref, 0);
+
+ for (idx = 0; idx < NFSD_BC_SLOT_TABLE_SIZE; ++idx)
+ new->se_cb_seq_nr[idx] = 1;
+
idx = hash_sessionid(&new->se_sessionid);
list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
spin_lock(&clp->cl_lock);
list_add(&new->se_perclnt, &clp->cl_sessions);
spin_unlock(&clp->cl_lock);
+ spin_lock(&nfsd_session_list_lock);
+ list_add_tail(&new->se_all_sessions, &nfsd_session_list);
+ spin_unlock(&nfsd_session_list_lock);
+
{
struct sockaddr *sa = svc_addr(rqstp);
/*
@@ -1937,6 +2320,9 @@ unhash_session(struct nfsd4_session *ses)
spin_lock(&ses->se_client->cl_lock);
list_del(&ses->se_perclnt);
spin_unlock(&ses->se_client->cl_lock);
+ spin_lock(&nfsd_session_list_lock);
+ list_del(&ses->se_all_sessions);
+ spin_unlock(&nfsd_session_list_lock);
}
/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
@@ -1954,16 +2340,16 @@ STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
return 1;
}
-/*
- * XXX Should we use a slab cache ?
- * This type of memory management is somewhat inefficient, but we use it
- * anyway since SETCLIENTID is not a common operation.
- */
-static struct nfs4_client *alloc_client(struct xdr_netobj name)
+static struct nfs4_client *alloc_client(struct xdr_netobj name,
+ struct nfsd_net *nn)
{
struct nfs4_client *clp;
int i;
+ if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients &&
+ atomic_read(&nn->nfsd_courtesy_clients) > 0)
+ mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
+
clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
if (clp == NULL)
return NULL;
@@ -1975,12 +2361,19 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
GFP_KERNEL);
if (!clp->cl_ownerstr_hashtbl)
goto err_no_hashtbl;
+ clp->cl_callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
+ if (!clp->cl_callback_wq)
+ goto err_no_callback_wq;
+
for (i = 0; i < OWNER_HASH_SIZE; i++)
INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
INIT_LIST_HEAD(&clp->cl_sessions);
idr_init(&clp->cl_stateids);
atomic_set(&clp->cl_rpc_users, 0);
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+ clp->cl_state = NFSD4_ACTIVE;
+ atomic_inc(&nn->nfs4_client_count);
+ atomic_set(&clp->cl_delegs_in_recall, 0);
INIT_LIST_HEAD(&clp->cl_idhash);
INIT_LIST_HEAD(&clp->cl_openowners);
INIT_LIST_HEAD(&clp->cl_delegations);
@@ -1994,6 +2387,8 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
spin_lock_init(&clp->cl_lock);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
return clp;
+err_no_callback_wq:
+ kfree(clp->cl_ownerstr_hashtbl);
err_no_hashtbl:
kfree(clp->cl_name.data);
err_no_name:
@@ -2007,11 +2402,13 @@ static void __free_client(struct kref *k)
struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
free_svc_cred(&clp->cl_cred);
+ destroy_workqueue(clp->cl_callback_wq);
kfree(clp->cl_ownerstr_hashtbl);
kfree(clp->cl_name.data);
kfree(clp->cl_nii_domain.data);
kfree(clp->cl_nii_name.data);
idr_destroy(&clp->cl_stateids);
+ kfree(clp->cl_ra);
kmem_cache_free(client_slab, clp);
}
@@ -2061,8 +2458,12 @@ unhash_client_locked(struct nfs4_client *clp)
}
list_del_init(&clp->cl_lru);
spin_lock(&clp->cl_lock);
- list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
+ spin_lock(&nfsd_session_list_lock);
+ list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) {
list_del_init(&ses->se_hash);
+ list_del_init(&ses->se_all_sessions);
+ }
+ spin_unlock(&nfsd_session_list_lock);
spin_unlock(&clp->cl_lock);
}
@@ -2078,7 +2479,11 @@ unhash_client(struct nfs4_client *clp)
static __be32 mark_client_expired_locked(struct nfs4_client *clp)
{
- if (atomic_read(&clp->cl_rpc_users))
+ int users = atomic_read(&clp->cl_rpc_users);
+
+ trace_nfsd_mark_client_expired(clp, users);
+
+ if (users)
return nfserr_jukebox;
unhash_client_locked(clp);
return nfs_ok;
@@ -2087,16 +2492,16 @@ static __be32 mark_client_expired_locked(struct nfs4_client *clp)
static void
__destroy_client(struct nfs4_client *clp)
{
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
int i;
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
- struct list_head reaplist;
+ LIST_HEAD(reaplist);
- INIT_LIST_HEAD(&reaplist);
spin_lock(&state_lock);
while (!list_empty(&clp->cl_delegations)) {
dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
- WARN_ON(!unhash_delegation_locked(dp));
+ unhash_delegation_locked(dp, SC_STATUS_CLOSED);
list_add(&dp->dl_recall_lru, &reaplist);
}
spin_unlock(&state_lock);
@@ -2130,6 +2535,8 @@ __destroy_client(struct nfs4_client *clp)
nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
+ atomic_add_unless(&nn->nfs4_client_count, -1, 0);
+ nfsd4_dec_courtesy_client_count(nn, clp);
free_client(clp);
wake_up_all(&expiry_wq);
}
@@ -2326,14 +2733,16 @@ find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
}
static struct nfs4_stid *
-find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
+find_stateid_by_type(struct nfs4_client *cl, stateid_t *t,
+ unsigned short typemask, unsigned short ok_states)
{
struct nfs4_stid *s;
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, t);
if (s != NULL) {
- if (typemask & s->sc_type)
+ if ((s->sc_status & ~ok_states) == 0 &&
+ (typemask & s->sc_type))
refcount_inc(&s->sc_count);
else
s = NULL;
@@ -2353,9 +2762,9 @@ static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
static void seq_quote_mem(struct seq_file *m, char *data, int len)
{
- seq_printf(m, "\"");
+ seq_puts(m, "\"");
seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
- seq_printf(m, "\"");
+ seq_puts(m, "\"");
}
static const char *cb_state2str(int state)
@@ -2375,7 +2784,8 @@ static const char *cb_state2str(int state)
static int client_info_show(struct seq_file *m, void *v)
{
- struct inode *inode = m->private;
+ struct inode *inode = file_inode(m->file);
+ struct nfsd4_session *ses;
struct nfs4_client *clp;
u64 clid;
@@ -2385,40 +2795,49 @@ static int client_info_show(struct seq_file *m, void *v)
memcpy(&clid, &clp->cl_clientid, sizeof(clid));
seq_printf(m, "clientid: 0x%llx\n", clid);
seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
- if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
+
+ if (clp->cl_state == NFSD4_COURTESY)
+ seq_puts(m, "status: courtesy\n");
+ else if (clp->cl_state == NFSD4_EXPIRABLE)
+ seq_puts(m, "status: expirable\n");
+ else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
seq_puts(m, "status: confirmed\n");
else
seq_puts(m, "status: unconfirmed\n");
- seq_printf(m, "name: ");
+ seq_printf(m, "seconds from last renew: %lld\n",
+ ktime_get_boottime_seconds() - clp->cl_time);
+ seq_puts(m, "name: ");
seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
if (clp->cl_nii_domain.data) {
- seq_printf(m, "Implementation domain: ");
+ seq_puts(m, "Implementation domain: ");
seq_quote_mem(m, clp->cl_nii_domain.data,
clp->cl_nii_domain.len);
- seq_printf(m, "\nImplementation name: ");
+ seq_puts(m, "\nImplementation name: ");
seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
}
seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
- seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
+ seq_printf(m, "callback address: \"%pISpc\"\n", &clp->cl_cb_conn.cb_addr);
+ seq_printf(m, "admin-revoked states: %d\n",
+ atomic_read(&clp->cl_admin_revoked));
+ spin_lock(&clp->cl_lock);
+ seq_printf(m, "session slots:");
+ list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
+ seq_printf(m, " %u", ses->se_fchannel.maxreqs);
+ seq_printf(m, "\nsession target slots:");
+ list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
+ seq_printf(m, " %u", ses->se_target_maxslots);
+ spin_unlock(&clp->cl_lock);
+ seq_puts(m, "\n");
+
drop_client(clp);
return 0;
}
-static int client_info_open(struct inode *inode, struct file *file)
-{
- return single_open(file, client_info_show, inode);
-}
-
-static const struct file_operations client_info_fops = {
- .open = client_info_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(client_info);
static void *states_start(struct seq_file *s, loff_t *pos)
__acquires(&clp->cl_lock)
@@ -2461,7 +2880,7 @@ static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
{
- struct inode *inode = f->nf_inode;
+ struct inode *inode = file_inode(f->nf_file);
seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
MAJOR(inode->i_sb->s_dev),
@@ -2471,7 +2890,7 @@ static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
{
- seq_printf(s, "owner: ");
+ seq_puts(s, "owner: ");
seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
}
@@ -2489,18 +2908,13 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
struct nfs4_stateowner *oo;
unsigned int access, deny;
- if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
- return 0; /* XXX: or SEQ_SKIP? */
ols = openlockstateid(st);
oo = ols->st_stateowner;
nf = st->sc_file;
- file = find_any_file(nf);
- if (!file)
- return 0;
- seq_printf(s, "- ");
+ seq_puts(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
- seq_printf(s, ": { type: open, ");
+ seq_puts(s, ": { type: open, ");
access = bmap_to_share_mode(ols->st_access_bmap);
deny = bmap_to_share_mode(ols->st_deny_bmap);
@@ -2512,14 +2926,22 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
- nfs4_show_superblock(s, file);
- seq_printf(s, ", ");
- nfs4_show_fname(s, file);
- seq_printf(s, ", ");
+ if (nf) {
+ spin_lock(&nf->fi_lock);
+ file = find_any_file_locked(nf);
+ if (file) {
+ nfs4_show_superblock(s, file);
+ seq_puts(s, ", ");
+ nfs4_show_fname(s, file);
+ seq_puts(s, ", ");
+ }
+ spin_unlock(&nf->fi_lock);
+ } else
+ seq_puts(s, "closed, ");
nfs4_show_owner(s, oo);
- seq_printf(s, " }\n");
- nfsd_file_put(file);
-
+ if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
+ seq_puts(s, ", admin-revoked");
+ seq_puts(s, " }\n");
return 0;
}
@@ -2533,33 +2955,50 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
ols = openlockstateid(st);
oo = ols->st_stateowner;
nf = st->sc_file;
- file = find_any_file(nf);
- if (!file)
- return 0;
- seq_printf(s, "- ");
+ seq_puts(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
- seq_printf(s, ": { type: lock, ");
+ seq_puts(s, ": { type: lock, ");
- /*
- * Note: a lock stateid isn't really the same thing as a lock,
- * it's the locking state held by one owner on a file, and there
- * may be multiple (or no) lock ranges associated with it.
- * (Same for the matter is true of open stateids.)
- */
+ spin_lock(&nf->fi_lock);
+ file = find_any_file_locked(nf);
+ if (file) {
+ /*
+ * Note: a lock stateid isn't really the same thing as a lock,
+ * it's the locking state held by one owner on a file, and there
+ * may be multiple (or no) lock ranges associated with it.
+ * (Same for the matter is true of open stateids.)
+ */
- nfs4_show_superblock(s, file);
- /* XXX: open stateid? */
- seq_printf(s, ", ");
- nfs4_show_fname(s, file);
- seq_printf(s, ", ");
+ nfs4_show_superblock(s, file);
+ /* XXX: open stateid? */
+ seq_puts(s, ", ");
+ nfs4_show_fname(s, file);
+ seq_puts(s, ", ");
+ }
nfs4_show_owner(s, oo);
- seq_printf(s, " }\n");
- nfsd_file_put(file);
-
+ if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
+ seq_puts(s, ", admin-revoked");
+ seq_puts(s, " }\n");
+ spin_unlock(&nf->fi_lock);
return 0;
}
+static char *nfs4_show_deleg_type(u32 dl_type)
+{
+ switch (dl_type) {
+ case OPEN_DELEGATE_READ:
+ return "r";
+ case OPEN_DELEGATE_WRITE:
+ return "w";
+ case OPEN_DELEGATE_READ_ATTRS_DELEG:
+ return "ra";
+ case OPEN_DELEGATE_WRITE_ATTRS_DELEG:
+ return "wa";
+ }
+ return "?";
+}
+
static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_delegation *ds;
@@ -2568,26 +3007,27 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
ds = delegstateid(st);
nf = st->sc_file;
- file = find_deleg_file(nf);
- if (!file)
- return 0;
- seq_printf(s, "- ");
+ seq_puts(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
- seq_printf(s, ": { type: deleg, ");
+ seq_puts(s, ": { type: deleg, ");
- /* Kinda dead code as long as we only support read delegs: */
- seq_printf(s, "access: %s, ",
- ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
+ seq_printf(s, "access: %s", nfs4_show_deleg_type(ds->dl_type));
/* XXX: lease time, whether it's being recalled. */
- nfs4_show_superblock(s, file);
- seq_printf(s, ", ");
- nfs4_show_fname(s, file);
- seq_printf(s, " }\n");
- nfsd_file_put(file);
-
+ spin_lock(&nf->fi_lock);
+ file = nf->fi_deleg_file;
+ if (file) {
+ seq_puts(s, ", ");
+ nfs4_show_superblock(s, file);
+ seq_puts(s, ", ");
+ nfs4_show_fname(s, file);
+ }
+ spin_unlock(&nf->fi_lock);
+ if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
+ seq_puts(s, ", admin-revoked");
+ seq_puts(s, " }\n");
return 0;
}
@@ -2597,18 +3037,25 @@ static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
struct nfsd_file *file;
ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
- file = ls->ls_file;
- seq_printf(s, "- ");
+ seq_puts(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
- seq_printf(s, ": { type: layout, ");
+ seq_puts(s, ": { type: layout");
/* XXX: What else would be useful? */
- nfs4_show_superblock(s, file);
- seq_printf(s, ", ");
- nfs4_show_fname(s, file);
- seq_printf(s, " }\n");
+ spin_lock(&ls->ls_stid.sc_file->fi_lock);
+ file = ls->ls_file;
+ if (file) {
+ seq_puts(s, ", ");
+ nfs4_show_superblock(s, file);
+ seq_puts(s, ", ");
+ nfs4_show_fname(s, file);
+ }
+ spin_unlock(&ls->ls_stid.sc_file->fi_lock);
+ if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
+ seq_puts(s, ", admin-revoked");
+ seq_puts(s, " }\n");
return 0;
}
@@ -2618,13 +3065,13 @@ static int states_show(struct seq_file *s, void *v)
struct nfs4_stid *st = v;
switch (st->sc_type) {
- case NFS4_OPEN_STID:
+ case SC_TYPE_OPEN:
return nfs4_show_open(s, st);
- case NFS4_LOCK_STID:
+ case SC_TYPE_LOCK:
return nfs4_show_lock(s, st);
- case NFS4_DELEG_STID:
+ case SC_TYPE_DELEG:
return nfs4_show_deleg(s, st);
- case NFS4_LAYOUT_STID:
+ case SC_TYPE_LAYOUT:
return nfs4_show_layout(s, st);
default:
return 0; /* XXX: or SEQ_SKIP? */
@@ -2664,7 +3111,7 @@ static int client_opens_release(struct inode *inode, struct file *file)
/* XXX: alternatively, we could get/drop in seq start/stop */
drop_client(clp);
- return 0;
+ return seq_release(inode, file);
}
static const struct file_operations client_states_fops = {
@@ -2735,6 +3182,88 @@ static const struct tree_descr client_files[] = {
[3] = {""},
};
+static int
+nfsd4_cb_recall_any_done(struct nfsd4_callback *cb,
+ struct rpc_task *task)
+{
+ trace_nfsd_cb_recall_any_done(cb, task);
+ switch (task->tk_status) {
+ case -NFS4ERR_DELAY:
+ rpc_delay(task, 2 * HZ);
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+static void
+nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
+{
+ struct nfs4_client *clp = cb->cb_clp;
+
+ drop_client(clp);
+}
+
+static int
+nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task)
+{
+ struct nfs4_cb_fattr *ncf =
+ container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+ struct nfs4_delegation *dp =
+ container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
+
+ trace_nfsd_cb_getattr_done(&dp->dl_stid.sc_stateid, task);
+ ncf->ncf_cb_status = task->tk_status;
+ switch (task->tk_status) {
+ case -NFS4ERR_DELAY:
+ rpc_delay(task, 2 * HZ);
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+static void
+nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
+{
+ struct nfs4_cb_fattr *ncf =
+ container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+ struct nfs4_delegation *dp =
+ container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
+
+ nfs4_put_stid(&dp->dl_stid);
+}
+
+static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
+ .done = nfsd4_cb_recall_any_done,
+ .release = nfsd4_cb_recall_any_release,
+ .opcode = OP_CB_RECALL_ANY,
+};
+
+static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops = {
+ .done = nfsd4_cb_getattr_done,
+ .release = nfsd4_cb_getattr_release,
+ .opcode = OP_CB_GETATTR,
+};
+
+static void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf)
+{
+ struct nfs4_delegation *dp =
+ container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
+
+ if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &ncf->ncf_getattr.cb_flags))
+ return;
+
+ /* set to proper status when nfsd4_cb_getattr_done runs */
+ ncf->ncf_cb_status = NFS4ERR_IO;
+
+ /* ensure that wake_bit is done when RUNNING is cleared */
+ set_bit(NFSD4_CALLBACK_WAKE, &ncf->ncf_getattr.cb_flags);
+
+ refcount_inc(&dp->dl_stid.sc_count);
+ nfsd4_run_cb(&ncf->ncf_getattr);
+}
+
static struct nfs4_client *create_client(struct xdr_netobj name,
struct svc_rqst *rqstp, nfs4_verifier *verf)
{
@@ -2745,7 +3274,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct dentry *dentries[ARRAY_SIZE(client_files)];
- clp = alloc_client(name);
+ clp = alloc_client(name, nn);
if (clp == NULL)
return NULL;
@@ -2758,7 +3287,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
kref_init(&clp->cl_nfsdfs.cl_ref);
nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
clp->cl_time = ktime_get_boottime_seconds();
- clear_bit(0, &clp->cl_cb_slot_busy);
copy_verf(clp, verf);
memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
clp->cl_cb_session = NULL;
@@ -2772,6 +3300,14 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
free_client(clp);
return NULL;
}
+ clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL);
+ if (!clp->cl_ra) {
+ free_client(clp);
+ return NULL;
+ }
+ clp->cl_ra_time = 0;
+ nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops,
+ NFSPROC4_CLNT_CB_RECALL_ANY);
return clp;
}
@@ -2951,7 +3487,20 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
struct nfsd4_slot *slot = resp->cstate.slot;
unsigned int base;
- dprintk("--> %s slot %p\n", __func__, slot);
+ /*
+ * RFC 5661 Section 2.10.6.1.2:
+ *
+ * Any time SEQUENCE ... returns an error ... [t]he replier MUST NOT
+ * modify the reply cache entry for the slot whenever an error is
+ * returned from SEQUENCE ...
+ *
+ * Because nfsd4_store_cache_entry is called only by
+ * nfsd4_sequence_done(), nfsd4_store_cache_entry() is called only
+ * when a SEQUENCE operation was part of the COMPOUND.
+ * nfs41_check_op_ordering() ensures SEQUENCE is the first op.
+ */
+ if (resp->opcnt == 1 && resp->cstate.status != nfs_ok)
+ return;
slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
slot->sl_opcnt = resp->opcnt;
@@ -2959,7 +3508,7 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
free_svc_cred(&slot->sl_cred);
copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
- if (!nfsd4_cache_this(resp)) {
+ if (!(resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
slot->sl_flags &= ~NFSD4_SLOT_CACHED;
return;
}
@@ -2974,41 +3523,6 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
}
/*
- * Encode the replay sequence operation from the slot values.
- * If cachethis is FALSE encode the uncached rep error on the next
- * operation which sets resp->p and increments resp->opcnt for
- * nfs4svc_encode_compoundres.
- *
- */
-static __be32
-nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
- struct nfsd4_compoundres *resp)
-{
- struct nfsd4_op *op;
- struct nfsd4_slot *slot = resp->cstate.slot;
-
- /* Encode the replayed sequence operation */
- op = &args->ops[resp->opcnt - 1];
- nfsd4_encode_operation(resp, op);
-
- if (slot->sl_flags & NFSD4_SLOT_CACHED)
- return op->status;
- if (args->opcnt == 1) {
- /*
- * The original operation wasn't a solo sequence--we
- * always cache those--so this retry must not match the
- * original:
- */
- op->status = nfserr_seq_false_retry;
- } else {
- op = &args->ops[resp->opcnt++];
- op->status = nfserr_retry_uncached_rep;
- nfsd4_encode_operation(resp, op);
- }
- return op->status;
-}
-
-/*
* The sequence operation is not cached because we can use the slot and
* session values.
*/
@@ -3016,17 +3530,30 @@ static __be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
struct nfsd4_sequence *seq)
{
+ struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
struct nfsd4_slot *slot = resp->cstate.slot;
struct xdr_stream *xdr = resp->xdr;
__be32 *p;
- __be32 status;
dprintk("--> %s slot %p\n", __func__, slot);
- status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
- if (status)
- return status;
+ /* Always encode the SEQUENCE response. */
+ nfsd4_encode_operation(resp, &args->ops[0]);
+ if (args->opcnt == 1)
+ /* A solo SEQUENCE - nothing was cached */
+ return args->ops[0].status;
+
+ if (!(slot->sl_flags & NFSD4_SLOT_CACHED)) {
+ /* We weren't asked to cache this. */
+ struct nfsd4_op *op;
+ op = &args->ops[resp->opcnt++];
+ op->status = nfserr_retry_uncached_rep;
+ nfsd4_encode_operation(resp, op);
+ return op->status;
+ }
+
+ /* return reply from cache */
p = xdr_reserve_space(xdr, slot->sl_datalen);
if (!p) {
WARN_ON_ONCE(1);
@@ -3077,7 +3604,7 @@ static bool client_has_state(struct nfs4_client *clp)
#endif
|| !list_empty(&clp->cl_delegations)
|| !list_empty(&clp->cl_sessions)
- || !list_empty(&clp->async_copies);
+ || nfsd4_has_active_async_copies(clp);
}
static __be32 copy_impl_id(struct nfs4_client *clp,
@@ -3115,6 +3642,12 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__func__, rqstp, exid, exid->clname.len, exid->clname.data,
addr_str, exid->flags, exid->spa_how);
+ exid->server_impl_name = kasprintf(GFP_KERNEL, "%s %s %s %s",
+ utsname()->sysname, utsname()->release,
+ utsname()->version, utsname()->machine);
+ if (!exid->server_impl_name)
+ return nfserr_jukebox;
+
if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
return nfserr_inval;
@@ -3240,6 +3773,9 @@ out_new:
new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
+ /* Contrived initial CREATE_SESSION response */
+ new->cl_cs_slot.sl_status = nfserr_seq_misordered;
+
add_to_unconfirmed(new);
swap(new, conf);
out_copy:
@@ -3249,6 +3785,23 @@ out_copy:
exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
nfsd4_set_ex_flags(conf, exid);
+ exid->nii_domain.len = sizeof("kernel.org") - 1;
+ exid->nii_domain.data = "kernel.org";
+
+ /*
+ * Note that RFC 8881 places no length limit on
+ * nii_name, but this implementation permits no
+ * more than NFS4_OPAQUE_LIMIT bytes.
+ */
+ exid->nii_name.len = strlen(exid->server_impl_name);
+ if (exid->nii_name.len > NFS4_OPAQUE_LIMIT)
+ exid->nii_name.len = NFS4_OPAQUE_LIMIT;
+ exid->nii_name.data = exid->server_impl_name;
+
+ /* just send zeros - the date is in nii_name */
+ exid->nii_time.tv_sec = 0;
+ exid->nii_time.tv_nsec = 0;
+
dprintk("nfsd4_exchange_id seqid %d flags %x\n",
conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
status = nfs_ok;
@@ -3265,14 +3818,18 @@ out_nolock:
return status;
}
-static __be32
-check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
+void
+nfsd4_exchange_id_release(union nfsd4_op_u *u)
{
- dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
- slot_seqid);
+ struct nfsd4_exchange_id *exid = &u->exchange_id;
+ kfree(exid->server_impl_name);
+}
+
+static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, u8 flags)
+{
/* The slot is in use, and no response has been sent. */
- if (slot_inuse) {
+ if (flags & NFSD4_SLOT_INUSE) {
if (seqid == slot_seqid)
return nfserr_jukebox;
else
@@ -3281,6 +3838,8 @@ check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
/* Note unsigned 32-bit arithmetic handles wraparound: */
if (likely(seqid == slot_seqid + 1))
return nfs_ok;
+ if ((flags & NFSD4_SLOT_REUSED) && seqid == 1)
+ return nfs_ok;
if (seqid == slot_seqid)
return nfserr_replay_cache;
return nfserr_seq_misordered;
@@ -3339,17 +3898,6 @@ static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfs
ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
- /*
- * Note decreasing slot size below client's request may make it
- * difficult for client to function correctly, whereas
- * decreasing the number of slots will (just?) affect
- * performance. When short on memory we therefore prefer to
- * decrease number of slots instead of their size. Clients that
- * request larger slots than they need will get poor results:
- * Note that we always allow at least one slot, because our
- * accounting is soft and provides no guarantees either way.
- */
- ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
return nfs_ok;
}
@@ -3410,10 +3958,10 @@ nfsd4_create_session(struct svc_rqst *rqstp,
struct nfsd4_create_session *cr_ses = &u->create_session;
struct sockaddr *sa = svc_addr(rqstp);
struct nfs4_client *conf, *unconf;
+ struct nfsd4_clid_slot *cs_slot;
struct nfs4_client *old = NULL;
struct nfsd4_session *new;
struct nfsd4_conn *conn;
- struct nfsd4_clid_slot *cs_slot = NULL;
__be32 status = 0;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
@@ -3427,76 +3975,89 @@ nfsd4_create_session(struct svc_rqst *rqstp,
return status;
status = check_backchannel_attrs(&cr_ses->back_channel);
if (status)
- goto out_release_drc_mem;
+ goto out_err;
status = nfserr_jukebox;
new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
if (!new)
- goto out_release_drc_mem;
+ goto out_err;
conn = alloc_conn_from_crses(rqstp, cr_ses);
if (!conn)
goto out_free_session;
spin_lock(&nn->client_lock);
+
+ /* RFC 8881 Section 18.36.4 Phase 1: Client record look-up. */
unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
conf = find_confirmed_client(&cr_ses->clientid, true, nn);
- WARN_ON_ONCE(conf && unconf);
+ if (!conf && !unconf) {
+ status = nfserr_stale_clientid;
+ goto out_free_conn;
+ }
+ /* RFC 8881 Section 18.36.4 Phase 2: Sequence ID processing. */
+ if (conf) {
+ cs_slot = &conf->cl_cs_slot;
+ trace_nfsd_slot_seqid_conf(conf, cr_ses);
+ } else {
+ cs_slot = &unconf->cl_cs_slot;
+ trace_nfsd_slot_seqid_unconf(unconf, cr_ses);
+ }
+ status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
+ switch (status) {
+ case nfs_ok:
+ cs_slot->sl_seqid++;
+ cr_ses->seqid = cs_slot->sl_seqid;
+ break;
+ case nfserr_replay_cache:
+ status = nfsd4_replay_create_session(cr_ses, cs_slot);
+ fallthrough;
+ case nfserr_jukebox:
+ /* The server MUST NOT cache NFS4ERR_DELAY */
+ goto out_free_conn;
+ default:
+ goto out_cache_error;
+ }
+
+ /* RFC 8881 Section 18.36.4 Phase 3: Client ID confirmation. */
if (conf) {
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(conf, rqstp))
- goto out_free_conn;
- cs_slot = &conf->cl_cs_slot;
- status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
- if (status) {
- if (status == nfserr_replay_cache)
- status = nfsd4_replay_create_session(cr_ses, cs_slot);
- goto out_free_conn;
- }
- } else if (unconf) {
+ goto out_cache_error;
+ } else {
status = nfserr_clid_inuse;
if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
!rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
trace_nfsd_clid_cred_mismatch(unconf, rqstp);
- goto out_free_conn;
+ goto out_cache_error;
}
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(unconf, rqstp))
- goto out_free_conn;
- cs_slot = &unconf->cl_cs_slot;
- status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
- if (status) {
- /* an unconfirmed replay returns misordered */
- status = nfserr_seq_misordered;
- goto out_free_conn;
- }
+ goto out_cache_error;
old = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (old) {
status = mark_client_expired_locked(old);
- if (status) {
- old = NULL;
- goto out_free_conn;
- }
+ if (status)
+ goto out_expired_error;
trace_nfsd_clid_replaced(&old->cl_clientid);
}
move_to_confirmed(unconf);
conf = unconf;
- } else {
- status = nfserr_stale_clientid;
- goto out_free_conn;
}
+
+ /* RFC 8881 Section 18.36.4 Phase 4: Session creation. */
status = nfs_ok;
/* Persistent sessions are not supported */
cr_ses->flags &= ~SESSION4_PERSIST;
/* Upshifting from TCP to RDMA is not supported */
cr_ses->flags &= ~SESSION4_RDMA;
+ /* Report the correct number of backchannel slots */
+ cr_ses->back_channel.maxreqs = new->se_cb_highest_slot + 1;
init_session(rqstp, new, conf, cr_ses);
nfsd4_get_session_locked(new);
memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
NFS4_MAX_SESSIONID_LEN);
- cs_slot->sl_seqid++;
- cr_ses->seqid = cs_slot->sl_seqid;
/* cache solo and embedded create sessions under the client_lock */
nfsd4_cache_create_session(cr_ses, cs_slot, status);
@@ -3509,15 +4070,25 @@ nfsd4_create_session(struct svc_rqst *rqstp,
if (old)
expire_client(old);
return status;
+
+out_expired_error:
+ /*
+ * Revert the slot seq_nr change so the server will process
+ * the client's resend instead of returning a cached response.
+ */
+ if (status == nfserr_jukebox) {
+ cs_slot->sl_seqid--;
+ cr_ses->seqid = cs_slot->sl_seqid;
+ goto out_free_conn;
+ }
+out_cache_error:
+ nfsd4_cache_create_session(cr_ses, cs_slot, status);
out_free_conn:
spin_unlock(&nn->client_lock);
free_conn(conn);
- if (old)
- expire_client(old);
out_free_session:
__free_session(new);
-out_release_drc_mem:
- nfsd4_put_drc_mem(&cr_ses->fore_channel);
+out_err:
return status;
}
@@ -3770,6 +4341,36 @@ static bool replay_matches_cache(struct svc_rqst *rqstp,
return true;
}
+/*
+ * Note that the response is constructed here both for the case
+ * of a new SEQUENCE request and for a replayed SEQUENCE request.
+ * We do not cache SEQUENCE responses as SEQUENCE is idempotent.
+ */
+static void nfsd4_construct_sequence_response(struct nfsd4_session *session,
+ struct nfsd4_sequence *seq)
+{
+ struct nfs4_client *clp = session->se_client;
+
+ seq->maxslots_response = max(session->se_target_maxslots,
+ seq->maxslots);
+ seq->target_maxslots = session->se_target_maxslots;
+
+ switch (clp->cl_cb_state) {
+ case NFSD4_CB_DOWN:
+ seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
+ break;
+ case NFSD4_CB_FAULT:
+ seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
+ break;
+ default:
+ seq->status_flags = 0;
+ }
+ if (!list_empty(&clp->cl_revoked))
+ seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
+ if (atomic_read(&clp->cl_admin_revoked))
+ seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED;
+}
+
__be32
nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -3815,16 +4416,14 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (seq->slotid >= session->se_fchannel.maxreqs)
goto out_put_session;
- slot = session->se_slots[seq->slotid];
+ slot = xa_load(&session->se_slots, seq->slotid);
dprintk("%s: slotid %d\n", __func__, seq->slotid);
- /* We do not negotiate the number of slots yet, so set the
- * maxslots to the session maxreqs which is used to encode
- * sr_highest_slotid and the sr_target_slot id to maxslots */
- seq->maxslots = session->se_fchannel.maxreqs;
+ trace_nfsd_slot_seqid_sequence(clp, seq, slot);
- status = check_slot_seqid(seq->seqid, slot->sl_seqid,
- slot->sl_flags & NFSD4_SLOT_INUSE);
+ nfsd4_construct_sequence_response(session, seq);
+
+ status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_flags);
if (status == nfserr_replay_cache) {
status = nfserr_seq_misordered;
if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
@@ -3849,6 +4448,12 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out_put_session;
+ if (session->se_target_maxslots < session->se_fchannel.maxreqs &&
+ slot->sl_generation == session->se_slot_gen &&
+ seq->maxslots <= session->se_target_maxslots)
+ /* Client acknowledged our reduce maxreqs */
+ free_session_slots(session, session->se_target_maxslots);
+
buflen = (seq->cachethis) ?
session->se_fchannel.maxresp_cached :
session->se_fchannel.maxresp_sz;
@@ -3856,12 +4461,14 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfserr_rep_too_big;
if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
goto out_put_session;
- svc_reserve(rqstp, buflen);
+ svc_reserve_auth(rqstp, buflen);
status = nfs_ok;
- /* Success! bump slot seqid */
+ /* Success! accept new slot seqid */
slot->sl_seqid = seq->seqid;
+ slot->sl_flags &= ~NFSD4_SLOT_REUSED;
slot->sl_flags |= NFSD4_SLOT_INUSE;
+ slot->sl_generation = session->se_slot_gen;
if (seq->cachethis)
slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
else
@@ -3871,19 +4478,49 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
cstate->session = session;
cstate->clp = clp;
-out:
- switch (clp->cl_cb_state) {
- case NFSD4_CB_DOWN:
- seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
- break;
- case NFSD4_CB_FAULT:
- seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
- break;
- default:
- seq->status_flags = 0;
+ /*
+ * If the client ever uses the highest available slot,
+ * gently try to allocate another 20%. This allows
+ * fairly quick growth without grossly over-shooting what
+ * the client might use.
+ */
+ if (seq->slotid == session->se_fchannel.maxreqs - 1 &&
+ session->se_target_maxslots >= session->se_fchannel.maxreqs &&
+ session->se_fchannel.maxreqs < NFSD_MAX_SLOTS_PER_SESSION) {
+ int s = session->se_fchannel.maxreqs;
+ int cnt = DIV_ROUND_UP(s, 5);
+ void *prev_slot;
+
+ do {
+ /*
+ * GFP_NOWAIT both allows allocation under a
+ * spinlock, and only succeeds if there is
+ * plenty of memory.
+ */
+ slot = nfsd4_alloc_slot(&session->se_fchannel, s,
+ GFP_NOWAIT);
+ prev_slot = xa_load(&session->se_slots, s);
+ if (xa_is_value(prev_slot) && slot) {
+ slot->sl_seqid = xa_to_value(prev_slot);
+ slot->sl_flags |= NFSD4_SLOT_REUSED;
+ }
+ if (slot &&
+ !xa_is_err(xa_store(&session->se_slots, s, slot,
+ GFP_NOWAIT))) {
+ s += 1;
+ session->se_fchannel.maxreqs = s;
+ atomic_add(s - session->se_target_maxslots,
+ &nfsd_total_target_slots);
+ session->se_target_maxslots = s;
+ } else {
+ kfree(slot);
+ slot = NULL;
+ }
+ } while (slot && --cnt > 0);
}
- if (!list_empty(&clp->cl_revoked))
- seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
+
+out:
+ trace_nfsd_seq4_status(rqstp, seq);
out_no_session:
if (conn)
free_conn(conn);
@@ -4098,17 +4735,25 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
}
status = nfs_ok;
if (conf) {
- old = unconf;
- unhash_client_locked(old);
- nfsd4_change_callback(conf, &unconf->cl_cb_conn);
- } else {
+ if (get_client_locked(conf) == nfs_ok) {
+ old = unconf;
+ unhash_client_locked(old);
+ nfsd4_change_callback(conf, &unconf->cl_cb_conn);
+ } else {
+ conf = NULL;
+ }
+ }
+
+ if (!conf) {
old = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (old) {
status = nfserr_clid_inuse;
if (client_has_state(old)
&& !same_creds(&unconf->cl_cred,
- &old->cl_cred))
+ &old->cl_cred)) {
+ old = NULL;
goto out;
+ }
status = mark_client_expired_locked(old);
if (status) {
old = NULL;
@@ -4116,10 +4761,14 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
}
trace_nfsd_clid_replaced(&old->cl_clientid);
}
+ status = get_client_locked(unconf);
+ if (status != nfs_ok) {
+ old = NULL;
+ goto out;
+ }
move_to_confirmed(unconf);
conf = unconf;
}
- get_client_locked(conf);
spin_unlock(&nn->client_lock);
if (conf == unconf)
fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
@@ -4139,11 +4788,9 @@ static struct nfs4_file *nfsd4_alloc_file(void)
}
/* OPEN Share state helper functions */
-static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval,
- struct nfs4_file *fp)
-{
- lockdep_assert_held(&state_lock);
+static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp)
+{
refcount_set(&fp->fi_ref, 1);
spin_lock_init(&fp->fi_lock);
INIT_LIST_HEAD(&fp->fi_stateids);
@@ -4151,6 +4798,7 @@ static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval,
INIT_LIST_HEAD(&fp->fi_clnt_odstate);
fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
fp->fi_deleg_file = NULL;
+ fp->fi_rdeleg_file = NULL;
fp->fi_had_conflict = false;
fp->fi_share_deny = 0;
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
@@ -4161,7 +4809,6 @@ static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval,
INIT_LIST_HEAD(&fp->fi_lo_states);
atomic_set(&fp->fi_lo_recalls, 0);
#endif
- hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
}
void
@@ -4179,32 +4826,25 @@ nfsd4_free_slabs(void)
int
nfsd4_init_slabs(void)
{
- client_slab = kmem_cache_create("nfsd4_clients",
- sizeof(struct nfs4_client), 0, 0, NULL);
+ client_slab = KMEM_CACHE(nfs4_client, 0);
if (client_slab == NULL)
goto out;
- openowner_slab = kmem_cache_create("nfsd4_openowners",
- sizeof(struct nfs4_openowner), 0, 0, NULL);
+ openowner_slab = KMEM_CACHE(nfs4_openowner, 0);
if (openowner_slab == NULL)
goto out_free_client_slab;
- lockowner_slab = kmem_cache_create("nfsd4_lockowners",
- sizeof(struct nfs4_lockowner), 0, 0, NULL);
+ lockowner_slab = KMEM_CACHE(nfs4_lockowner, 0);
if (lockowner_slab == NULL)
goto out_free_openowner_slab;
- file_slab = kmem_cache_create("nfsd4_files",
- sizeof(struct nfs4_file), 0, 0, NULL);
+ file_slab = KMEM_CACHE(nfs4_file, 0);
if (file_slab == NULL)
goto out_free_lockowner_slab;
- stateid_slab = kmem_cache_create("nfsd4_stateids",
- sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
+ stateid_slab = KMEM_CACHE(nfs4_ol_stateid, 0);
if (stateid_slab == NULL)
goto out_free_file_slab;
- deleg_slab = kmem_cache_create("nfsd4_delegations",
- sizeof(struct nfs4_delegation), 0, 0, NULL);
+ deleg_slab = KMEM_CACHE(nfs4_delegation, 0);
if (deleg_slab == NULL)
goto out_free_stateid_slab;
- odstate_slab = kmem_cache_create("nfsd4_odstate",
- sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
+ odstate_slab = KMEM_CACHE(nfs4_clnt_odstate, 0);
if (odstate_slab == NULL)
goto out_free_deleg_slab;
return 0;
@@ -4225,21 +4865,76 @@ out:
return -ENOMEM;
}
+static unsigned long
+nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct nfsd_net *nn = shrink->private_data;
+ long count;
+
+ count = atomic_read(&nn->nfsd_courtesy_clients);
+ if (!count)
+ count = atomic_long_read(&num_delegations);
+ if (count)
+ queue_work(laundry_wq, &nn->nfsd_shrinker_work);
+ return (unsigned long)count;
+}
+
+static unsigned long
+nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ return SHRINK_STOP;
+}
+
+void
+nfsd4_init_leases_net(struct nfsd_net *nn)
+{
+ struct sysinfo si;
+ u64 max_clients;
+
+ nn->nfsd4_lease = 90; /* default lease time */
+ nn->nfsd4_grace = 90;
+ nn->somebody_reclaimed = false;
+ nn->track_reclaim_completes = false;
+ nn->clverifier_counter = get_random_u32();
+ nn->clientid_base = get_random_u32();
+ nn->clientid_counter = nn->clientid_base + 1;
+ nn->s2s_cp_cl_id = nn->clientid_counter++;
+
+ atomic_set(&nn->nfs4_client_count, 0);
+ si_meminfo(&si);
+ max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
+ max_clients *= NFS4_CLIENTS_PER_GB;
+ nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
+
+ atomic_set(&nn->nfsd_courtesy_clients, 0);
+}
+
+enum rp_lock {
+ RP_UNLOCKED,
+ RP_LOCKED,
+ RP_UNHASHED,
+};
+
static void init_nfs4_replay(struct nfs4_replay *rp)
{
rp->rp_status = nfserr_serverfault;
rp->rp_buflen = 0;
rp->rp_buf = rp->rp_ibuf;
- mutex_init(&rp->rp_mutex);
+ rp->rp_locked = RP_UNLOCKED;
}
-static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
- struct nfs4_stateowner *so)
+static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
+ struct nfs4_stateowner *so)
{
if (!nfsd4_has_session(cstate)) {
- mutex_lock(&so->so_replay.rp_mutex);
+ wait_var_event(&so->so_replay.rp_locked,
+ cmpxchg(&so->so_replay.rp_locked,
+ RP_UNLOCKED, RP_LOCKED) != RP_LOCKED);
+ if (so->so_replay.rp_locked == RP_UNHASHED)
+ return -EAGAIN;
cstate->replay_owner = nfs4_get_stateowner(so);
}
+ return 0;
}
void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
@@ -4248,7 +4943,7 @@ void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
if (so != NULL) {
cstate->replay_owner = NULL;
- mutex_unlock(&so->so_replay.rp_mutex);
+ store_release_wake_up(&so->so_replay.rp_locked, RP_UNLOCKED);
nfs4_put_stateowner(so);
}
}
@@ -4314,7 +5009,8 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
continue;
if (local->st_stateowner != &oo->oo_owner)
continue;
- if (local->st_stid.sc_type == NFS4_OPEN_STID) {
+ if (local->st_stid.sc_type == SC_TYPE_OPEN &&
+ !local->st_stid.sc_status) {
ret = local;
refcount_inc(&ret->st_stid.sc_count);
break;
@@ -4323,22 +5019,75 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
return ret;
}
-static __be32
-nfsd4_verify_open_stid(struct nfs4_stid *s)
+static void nfsd4_drop_revoked_stid(struct nfs4_stid *s)
+ __releases(&s->sc_client->cl_lock)
{
- __be32 ret = nfs_ok;
+ struct nfs4_client *cl = s->sc_client;
+ LIST_HEAD(reaplist);
+ struct nfs4_ol_stateid *stp;
+ struct nfs4_delegation *dp;
+ bool unhashed;
switch (s->sc_type) {
- default:
+ case SC_TYPE_OPEN:
+ stp = openlockstateid(s);
+ if (unhash_open_stateid(stp, &reaplist))
+ put_ol_stateid_locked(stp, &reaplist);
+ spin_unlock(&cl->cl_lock);
+ free_ol_stateid_reaplist(&reaplist);
break;
- case 0:
- case NFS4_CLOSED_STID:
- case NFS4_CLOSED_DELEG_STID:
- ret = nfserr_bad_stateid;
+ case SC_TYPE_LOCK:
+ stp = openlockstateid(s);
+ unhashed = unhash_lock_stateid(stp);
+ spin_unlock(&cl->cl_lock);
+ if (unhashed)
+ nfs4_put_stid(s);
break;
- case NFS4_REVOKED_DELEG_STID:
- ret = nfserr_deleg_revoked;
+ case SC_TYPE_DELEG:
+ dp = delegstateid(s);
+ list_del_init(&dp->dl_recall_lru);
+ spin_unlock(&cl->cl_lock);
+ nfs4_put_stid(s);
+ break;
+ default:
+ spin_unlock(&cl->cl_lock);
+ }
+}
+
+static void nfsd40_drop_revoked_stid(struct nfs4_client *cl,
+ stateid_t *stid)
+{
+ /* NFSv4.0 has no way for the client to tell the server
+ * that it can forget an admin-revoked stateid.
+ * So we keep it around until the first time that the
+ * client uses it, and drop it the first time
+ * nfserr_admin_revoked is returned.
+ * For v4.1 and later we wait until explicitly told
+ * to free the stateid.
+ */
+ if (cl->cl_minorversion == 0) {
+ struct nfs4_stid *st;
+
+ spin_lock(&cl->cl_lock);
+ st = find_stateid_locked(cl, stid);
+ if (st)
+ nfsd4_drop_revoked_stid(st);
+ else
+ spin_unlock(&cl->cl_lock);
}
+}
+
+static __be32
+nfsd4_verify_open_stid(struct nfs4_stid *s)
+{
+ __be32 ret = nfs_ok;
+
+ if (s->sc_status & SC_STATUS_ADMIN_REVOKED)
+ ret = nfserr_admin_revoked;
+ else if (s->sc_status & SC_STATUS_REVOKED)
+ ret = nfserr_deleg_revoked;
+ else if (s->sc_status & SC_STATUS_CLOSED)
+ ret = nfserr_bad_stateid;
return ret;
}
@@ -4350,6 +5099,10 @@ nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
ret = nfsd4_verify_open_stid(&stp->st_stid);
+ if (ret == nfserr_admin_revoked)
+ nfsd40_drop_revoked_stid(stp->st_stid.sc_client,
+ &stp->st_stid.sc_stateid);
+
if (ret != nfs_ok)
mutex_unlock(&stp->st_mutex);
return ret;
@@ -4371,34 +5124,46 @@ nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
}
static struct nfs4_openowner *
-alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
- struct nfsd4_compound_state *cstate)
+find_or_alloc_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
+ struct nfsd4_compound_state *cstate)
{
struct nfs4_client *clp = cstate->clp;
- struct nfs4_openowner *oo, *ret;
+ struct nfs4_openowner *oo, *new = NULL;
- oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
- if (!oo)
- return NULL;
- oo->oo_owner.so_ops = &openowner_ops;
- oo->oo_owner.so_is_open_owner = 1;
- oo->oo_owner.so_seqid = open->op_seqid;
- oo->oo_flags = 0;
- if (nfsd4_has_session(cstate))
- oo->oo_flags |= NFS4_OO_CONFIRMED;
- oo->oo_time = 0;
- oo->oo_last_closed_stid = NULL;
- INIT_LIST_HEAD(&oo->oo_close_lru);
+retry:
spin_lock(&clp->cl_lock);
- ret = find_openstateowner_str_locked(strhashval, open, clp);
- if (ret == NULL) {
- hash_openowner(oo, clp, strhashval);
- ret = oo;
- } else
- nfs4_free_stateowner(&oo->oo_owner);
-
+ oo = find_openstateowner_str(strhashval, open, clp);
+ if (!oo && new) {
+ hash_openowner(new, clp, strhashval);
+ spin_unlock(&clp->cl_lock);
+ return new;
+ }
spin_unlock(&clp->cl_lock);
- return ret;
+
+ if (oo && !(oo->oo_flags & NFS4_OO_CONFIRMED)) {
+ /* Replace unconfirmed owners without checking for replay. */
+ release_openowner(oo);
+ oo = NULL;
+ }
+ if (oo) {
+ if (new)
+ nfs4_free_stateowner(&new->oo_owner);
+ return oo;
+ }
+
+ new = alloc_stateowner(openowner_slab, &open->op_owner, clp);
+ if (!new)
+ return NULL;
+ new->oo_owner.so_ops = &openowner_ops;
+ new->oo_owner.so_is_open_owner = 1;
+ new->oo_owner.so_seqid = open->op_seqid;
+ new->oo_flags = 0;
+ if (nfsd4_has_session(cstate))
+ new->oo_flags |= NFS4_OO_CONFIRMED;
+ new->oo_time = 0;
+ new->oo_last_closed_stid = NULL;
+ INIT_LIST_HEAD(&new->oo_close_lru);
+ goto retry;
}
static struct nfs4_ol_stateid *
@@ -4418,13 +5183,19 @@ retry:
spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&fp->fi_lock);
+ if (nfs4_openowner_unhashed(oo)) {
+ mutex_unlock(&stp->st_mutex);
+ stp = NULL;
+ goto out_unlock;
+ }
+
retstp = nfsd4_find_existing_open(fp, open);
if (retstp)
goto out_unlock;
open->op_stp = NULL;
refcount_inc(&stp->st_stid.sc_count);
- stp->st_stid.sc_type = NFS4_OPEN_STID;
+ stp->st_stid.sc_type = SC_TYPE_OPEN;
INIT_LIST_HEAD(&stp->st_locks);
stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
get_nfs4_file(fp);
@@ -4474,7 +5245,10 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
* Wait for the refcount to drop to 2. Since it has been unhashed,
* there should be no danger of the refcount going back up again at
* this point.
+ * Some threads with a reference might be waiting for rp_locked,
+ * so tell them to stop waiting.
*/
+ store_release_wake_up(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED);
wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
release_all_access(s);
@@ -4493,71 +5267,80 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
nfs4_put_stid(&last->st_stid);
}
-/* search file_hashtbl[] for file */
-static struct nfs4_file *
-find_file_locked(struct svc_fh *fh, unsigned int hashval)
+static noinline_for_stack struct nfs4_file *
+nfsd4_file_hash_lookup(const struct svc_fh *fhp)
{
- struct nfs4_file *fp;
+ struct inode *inode = d_inode(fhp->fh_dentry);
+ struct rhlist_head *tmp, *list;
+ struct nfs4_file *fi;
- hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
- lockdep_is_held(&state_lock)) {
- if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
- if (refcount_inc_not_zero(&fp->fi_ref))
- return fp;
+ rcu_read_lock();
+ list = rhltable_lookup(&nfs4_file_rhltable, &inode,
+ nfs4_file_rhash_params);
+ rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
+ if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
+ if (refcount_inc_not_zero(&fi->fi_ref)) {
+ rcu_read_unlock();
+ return fi;
+ }
}
}
+ rcu_read_unlock();
return NULL;
}
-static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh,
- unsigned int hashval)
+/*
+ * On hash insertion, identify entries with the same inode but
+ * distinct filehandles. They will all be on the list returned
+ * by rhltable_lookup().
+ *
+ * inode->i_lock prevents racing insertions from adding an entry
+ * for the same inode/fhp pair twice.
+ */
+static noinline_for_stack struct nfs4_file *
+nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp)
{
- struct nfs4_file *fp;
+ struct inode *inode = d_inode(fhp->fh_dentry);
+ struct rhlist_head *tmp, *list;
struct nfs4_file *ret = NULL;
bool alias_found = false;
+ struct nfs4_file *fi;
+ int err;
- spin_lock(&state_lock);
- hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
- lockdep_is_held(&state_lock)) {
- if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
- if (refcount_inc_not_zero(&fp->fi_ref))
- ret = fp;
- } else if (d_inode(fh->fh_dentry) == fp->fi_inode)
- fp->fi_aliased = alias_found = true;
- }
- if (likely(ret == NULL)) {
- nfsd4_init_file(fh, hashval, new);
- new->fi_aliased = alias_found;
- ret = new;
+ rcu_read_lock();
+ spin_lock(&inode->i_lock);
+
+ list = rhltable_lookup(&nfs4_file_rhltable, &inode,
+ nfs4_file_rhash_params);
+ rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
+ if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
+ if (refcount_inc_not_zero(&fi->fi_ref))
+ ret = fi;
+ } else
+ fi->fi_aliased = alias_found = true;
}
- spin_unlock(&state_lock);
- return ret;
-}
+ if (ret)
+ goto out_unlock;
-static struct nfs4_file * find_file(struct svc_fh *fh)
-{
- struct nfs4_file *fp;
- unsigned int hashval = file_hashval(fh);
+ nfsd4_file_init(fhp, new);
+ err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist,
+ nfs4_file_rhash_params);
+ if (err)
+ goto out_unlock;
- rcu_read_lock();
- fp = find_file_locked(fh, hashval);
+ new->fi_aliased = alias_found;
+ ret = new;
+
+out_unlock:
+ spin_unlock(&inode->i_lock);
rcu_read_unlock();
- return fp;
+ return ret;
}
-static struct nfs4_file *
-find_or_add_file(struct nfs4_file *new, struct svc_fh *fh)
+static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi)
{
- struct nfs4_file *fp;
- unsigned int hashval = file_hashval(fh);
-
- rcu_read_lock();
- fp = find_file_locked(fh, hashval);
- rcu_read_unlock();
- if (fp)
- return fp;
-
- return insert_file(new, fh, hashval);
+ rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist,
+ nfs4_file_rhash_params);
}
/*
@@ -4570,9 +5353,10 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
struct nfs4_file *fp;
__be32 ret = nfs_ok;
- fp = find_file(current_fh);
+ fp = nfsd4_file_hash_lookup(current_fh);
if (!fp)
return ret;
+
/* Check for conflicting share reservations */
spin_lock(&fp->fi_lock);
if (fp->fi_share_deny & deny_type)
@@ -4582,6 +5366,35 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
return ret;
}
+static bool nfsd4_deleg_present(const struct inode *inode)
+{
+ struct file_lock_context *ctx = locks_inode_context(inode);
+
+ return ctx && !list_empty_careful(&ctx->flc_lease);
+}
+
+/**
+ * nfsd_wait_for_delegreturn - wait for delegations to be returned
+ * @rqstp: the RPC transaction being executed
+ * @inode: in-core inode of the file being waited for
+ *
+ * The timeout prevents deadlock if all nfsd threads happen to be
+ * tied up waiting for returning delegations.
+ *
+ * Return values:
+ * %true: delegation was returned
+ * %false: timed out waiting for delegreturn
+ */
+bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode)
+{
+ long __maybe_unused timeo;
+
+ timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode),
+ NFSD_DELEGRETURN_TIMEOUT);
+ trace_nfsd_delegret_wakeup(rqstp, inode, timeo);
+ return timeo > 0;
+}
+
static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
{
struct nfs4_delegation *dp = cb_to_delegation(cb);
@@ -4598,7 +5411,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
* queued for a lease break. Don't queue it again.
*/
spin_lock(&state_lock);
- if (dp->dl_time == 0) {
+ if (delegation_hashed(dp) && dp->dl_time == 0) {
dp->dl_time = ktime_get_boottime_seconds();
list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
}
@@ -4610,9 +5423,11 @@ static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
{
struct nfs4_delegation *dp = cb_to_delegation(cb);
- if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
- dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
- return 1;
+ trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task);
+
+ if (dp->dl_stid.sc_status)
+ /* CLOSED or REVOKED */
+ return 1;
switch (task->tk_status) {
case 0:
@@ -4647,31 +5462,48 @@ static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
.prepare = nfsd4_cb_recall_prepare,
.done = nfsd4_cb_recall_done,
.release = nfsd4_cb_recall_release,
+ .opcode = OP_CB_RECALL,
};
static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
{
+ bool queued;
+
+ if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &dp->dl_recall.cb_flags))
+ return;
+
/*
* We're assuming the state code never drops its reference
* without first removing the lease. Since we're in this lease
* callback (and since the lease code is serialized by the
- * i_lock) we know the server hasn't removed the lease yet, and
+ * flc_lock) we know the server hasn't removed the lease yet, and
* we know it's safe to take a reference.
*/
refcount_inc(&dp->dl_stid.sc_count);
- nfsd4_run_cb(&dp->dl_recall);
+ queued = nfsd4_run_cb(&dp->dl_recall);
+ WARN_ON_ONCE(!queued);
+ if (!queued)
+ refcount_dec(&dp->dl_stid.sc_count);
}
-/* Called from break_lease() with i_lock held. */
+/* Called from break_lease() with flc_lock held. */
static bool
-nfsd_break_deleg_cb(struct file_lock *fl)
+nfsd_break_deleg_cb(struct file_lease *fl)
{
- bool ret = false;
- struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
+ struct nfs4_delegation *dp = (struct nfs4_delegation *) fl->c.flc_owner;
struct nfs4_file *fp = dp->dl_stid.sc_file;
+ struct nfs4_client *clp = dp->dl_stid.sc_client;
+ struct nfsd_net *nn;
trace_nfsd_cb_recall(&dp->dl_stid);
+ dp->dl_recalled = true;
+ atomic_inc(&clp->cl_delegs_in_recall);
+ if (try_to_expire_client(clp)) {
+ nn = net_generic(clp->net, nfsd_net_id);
+ mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
+ }
+
/*
* We don't want the locks code to timeout the lease for us;
* we'll remove it ourself if a delegation isn't returned
@@ -4679,40 +5511,48 @@ nfsd_break_deleg_cb(struct file_lock *fl)
*/
fl->fl_break_time = 0;
- spin_lock(&fp->fi_lock);
fp->fi_had_conflict = true;
nfsd_break_one_deleg(dp);
- spin_unlock(&fp->fi_lock);
- return ret;
+ return false;
}
-static bool nfsd_breaker_owns_lease(struct file_lock *fl)
+/**
+ * nfsd_breaker_owns_lease - Check if lease conflict was resolved
+ * @fl: Lock state to check
+ *
+ * Return values:
+ * %true: Lease conflict was resolved
+ * %false: Lease conflict was not resolved.
+ */
+static bool nfsd_breaker_owns_lease(struct file_lease *fl)
{
- struct nfs4_delegation *dl = fl->fl_owner;
+ struct nfs4_delegation *dl = fl->c.flc_owner;
struct svc_rqst *rqst;
struct nfs4_client *clp;
- if (!i_am_nfsd())
- return NULL;
- rqst = kthread_data(current);
- /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
- if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
- return NULL;
+ rqst = nfsd_current_rqst();
+ if (!nfsd_v4client(rqst))
+ return false;
clp = *(rqst->rq_lease_breaker);
return dl->dl_stid.sc_client == clp;
}
static int
-nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
+nfsd_change_deleg_cb(struct file_lease *onlist, int arg,
struct list_head *dispose)
{
- if (arg & F_UNLCK)
+ struct nfs4_delegation *dp = (struct nfs4_delegation *) onlist->c.flc_owner;
+ struct nfs4_client *clp = dp->dl_stid.sc_client;
+
+ if (arg & F_UNLCK) {
+ if (dp->dl_recalled)
+ atomic_dec(&clp->cl_delegs_in_recall);
return lease_modify(onlist, arg, dispose);
- else
+ } else
return -EAGAIN;
}
-static const struct lock_manager_operations nfsd_lease_mng_ops = {
+static const struct lease_manager_operations nfsd_lease_mng_ops = {
.lm_breaker_owns_lease = nfsd_breaker_owns_lease,
.lm_break = nfsd_break_deleg_cb,
.lm_change = nfsd_change_deleg_cb,
@@ -4787,27 +5627,19 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
clp = cstate->clp;
strhashval = ownerstr_hashval(&open->op_owner);
- oo = find_openstateowner_str(strhashval, open, clp);
+retry:
+ oo = find_or_alloc_open_stateowner(strhashval, open, cstate);
open->op_openowner = oo;
- if (!oo) {
- goto new_owner;
- }
- if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
- /* Replace unconfirmed owners without checking for replay. */
- release_openowner(oo);
- open->op_openowner = NULL;
- goto new_owner;
+ if (!oo)
+ return nfserr_jukebox;
+ if (nfsd4_cstate_assign_replay(cstate, &oo->oo_owner) == -EAGAIN) {
+ nfs4_put_stateowner(&oo->oo_owner);
+ goto retry;
}
status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
if (status)
return status;
- goto alloc_stateid;
-new_owner:
- oo = alloc_init_open_stateowner(strhashval, open, cstate);
- if (oo == NULL)
- return nfserr_jukebox;
- open->op_openowner = oo;
-alloc_stateid:
+
open->op_stp = nfs4_alloc_open_stateid(clp);
if (!open->op_stp)
return nfserr_jukebox;
@@ -4825,7 +5657,7 @@ alloc_stateid:
static inline __be32
nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
{
- if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
+ if (!(flags & RD_STATE) && deleg_is_read(dp->dl_type))
return nfserr_openmode;
else
return nfs_ok;
@@ -4836,12 +5668,12 @@ static int share_access_to_flags(u32 share_access)
return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
}
-static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
+static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl,
+ stateid_t *s)
{
struct nfs4_stid *ret;
- ret = find_stateid_by_type(cl, s,
- NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
+ ret = find_stateid_by_type(cl, s, SC_TYPE_DELEG, SC_STATUS_REVOKED);
if (!ret)
return NULL;
return delegstateid(ret);
@@ -4864,10 +5696,15 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
if (deleg == NULL)
goto out;
- if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
+ if (deleg->dl_stid.sc_status & SC_STATUS_ADMIN_REVOKED) {
nfs4_put_stid(&deleg->dl_stid);
- if (cl->cl_minorversion)
- status = nfserr_deleg_revoked;
+ status = nfserr_admin_revoked;
+ goto out;
+ }
+ if (deleg->dl_stid.sc_status & SC_STATUS_REVOKED) {
+ nfs4_put_stid(&deleg->dl_stid);
+ nfsd40_drop_revoked_stid(cl, &open->op_delegate_stateid);
+ status = nfserr_deleg_revoked;
goto out;
}
flags = share_access_to_flags(open->op_share_access);
@@ -4905,16 +5742,19 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
.ia_valid = ATTR_SIZE,
.ia_size = 0,
};
+ struct nfsd_attrs attrs = {
+ .na_iattr = &iattr,
+ };
if (!open->op_truncate)
return 0;
if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
return nfserr_inval;
- return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
+ return nfsd_setattr(rqstp, fh, &attrs, NULL);
}
static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
- struct nfsd4_open *open)
+ struct nfsd4_open *open, bool new_stp)
{
struct nfsd_file *nf = NULL;
__be32 status;
@@ -4930,6 +5770,13 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
*/
status = nfs4_file_check_deny(fp, open->op_share_deny);
if (status != nfs_ok) {
+ if (status != nfserr_share_denied) {
+ spin_unlock(&fp->fi_lock);
+ goto out;
+ }
+ if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
+ stp, open->op_share_deny, false))
+ status = nfserr_jukebox;
spin_unlock(&fp->fi_lock);
goto out;
}
@@ -4937,6 +5784,13 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
/* set access to the file */
status = nfs4_file_get_access(fp, open->op_share_access);
if (status != nfs_ok) {
+ if (status != nfserr_share_denied) {
+ spin_unlock(&fp->fi_lock);
+ goto out;
+ }
+ if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
+ stp, open->op_share_access, true))
+ status = nfserr_jukebox;
spin_unlock(&fp->fi_lock);
goto out;
}
@@ -4952,9 +5806,12 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
if (!fp->fi_fds[oflag]) {
spin_unlock(&fp->fi_lock);
- status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
- if (status)
+
+ status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
+ open->op_filp, &nf);
+ if (status != nfs_ok)
goto out_put_access;
+
spin_lock(&fp->fi_lock);
if (!fp->fi_fds[oflag]) {
fp->fi_fds[oflag] = nf;
@@ -4983,21 +5840,30 @@ out_put_access:
}
static __be32
-nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
+nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
+ struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
+ struct nfsd4_open *open)
{
__be32 status;
unsigned char old_deny_bmap = stp->st_deny_bmap;
if (!test_access(open->op_share_access, stp))
- return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
+ return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false);
/* test and set deny mode */
spin_lock(&fp->fi_lock);
status = nfs4_file_check_deny(fp, open->op_share_deny);
- if (status == nfs_ok) {
+ switch (status) {
+ case nfs_ok:
set_deny(open->op_share_deny, stp);
fp->fi_share_deny |=
- (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
+ (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
+ break;
+ case nfserr_share_denied:
+ if (nfs4_resolve_deny_conflicts_locked(fp, false,
+ stp, open->op_share_deny, false))
+ status = nfserr_jukebox;
+ break;
}
spin_unlock(&fp->fi_lock);
@@ -5023,21 +5889,19 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
}
-static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
- int flag)
+static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp)
{
- struct file_lock *fl;
+ struct file_lease *fl;
- fl = locks_alloc_lock();
+ fl = locks_alloc_lease();
if (!fl)
return NULL;
fl->fl_lmops = &nfsd_lease_mng_ops;
- fl->fl_flags = FL_DELEG;
- fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
- fl->fl_end = OFFSET_MAX;
- fl->fl_owner = (fl_owner_t)dp;
- fl->fl_pid = current->tgid;
- fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
+ fl->c.flc_flags = FL_DELEG;
+ fl->c.flc_type = deleg_is_read(dp->dl_type) ? F_RDLCK : F_WRLCK;
+ fl->c.flc_owner = (fl_owner_t)dp;
+ fl->c.flc_pid = current->tgid;
+ fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
return fl;
}
@@ -5046,7 +5910,7 @@ static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
{
struct nfs4_ol_stateid *st;
struct file *f = fp->fi_deleg_file->nf_file;
- struct inode *ino = locks_inode(f);
+ struct inode *ino = file_inode(f);
int writes;
writes = atomic_read(&ino->i_writecount);
@@ -5100,14 +5964,76 @@ static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
return 0;
}
+/*
+ * It's possible that between opening the dentry and setting the delegation,
+ * that it has been renamed or unlinked. Redo the lookup to verify that this
+ * hasn't happened.
+ */
+static int
+nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
+ struct svc_fh *parent)
+{
+ struct svc_export *exp;
+ struct dentry *child;
+ __be32 err;
+
+ err = nfsd_lookup_dentry(open->op_rqstp, parent,
+ open->op_fname, open->op_fnamelen,
+ &exp, &child);
+
+ if (err)
+ return -EAGAIN;
+
+ exp_put(exp);
+ dput(child);
+ if (child != file_dentry(fp->fi_deleg_file->nf_file))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * We avoid breaking delegations held by a client due to its own activity, but
+ * clearing setuid/setgid bits on a write is an implicit activity and the client
+ * may not notice and continue using the old mode. Avoid giving out a delegation
+ * on setuid/setgid files when the client is requesting an open for write.
+ */
+static int
+nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
+{
+ struct inode *inode = file_inode(nf->nf_file);
+
+ if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) &&
+ (inode->i_mode & (S_ISUID|S_ISGID)))
+ return -EAGAIN;
+ return 0;
+}
+
+#ifdef CONFIG_NFSD_V4_DELEG_TIMESTAMPS
+static bool nfsd4_want_deleg_timestamps(const struct nfsd4_open *open)
+{
+ return open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS;
+}
+#else /* CONFIG_NFSD_V4_DELEG_TIMESTAMPS */
+static bool nfsd4_want_deleg_timestamps(const struct nfsd4_open *open)
+{
+ return false;
+}
+#endif /* CONFIG NFSD_V4_DELEG_TIMESTAMPS */
+
static struct nfs4_delegation *
-nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
- struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
+nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
+ struct svc_fh *parent)
{
- int status = 0;
+ bool deleg_ts = nfsd4_want_deleg_timestamps(open);
+ struct nfs4_client *clp = stp->st_stid.sc_client;
+ struct nfs4_file *fp = stp->st_stid.sc_file;
+ struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
struct nfs4_delegation *dp;
- struct nfsd_file *nf;
- struct file_lock *fl;
+ struct nfsd_file *nf = NULL;
+ struct file_lease *fl;
+ int status = 0;
+ u32 dl_type;
/*
* The fi_had_conflict and nfs_get_existing_delegation checks
@@ -5117,19 +6043,55 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
if (fp->fi_had_conflict)
return ERR_PTR(-EAGAIN);
- nf = find_readable_file(fp);
- if (!nf) {
- /*
- * We probably could attempt another open and get a read
- * delegation, but for now, don't bother until the
- * client actually sends us one.
- */
+ /*
+ * Try for a write delegation first. RFC8881 section 10.4 says:
+ *
+ * "An OPEN_DELEGATE_WRITE delegation allows the client to handle,
+ * on its own, all opens."
+ *
+ * Furthermore, section 9.1.2 says:
+ *
+ * "In the case of READ, the server may perform the corresponding
+ * check on the access mode, or it may choose to allow READ for
+ * OPEN4_SHARE_ACCESS_WRITE, to accommodate clients whose WRITE
+ * implementation may unavoidably do reads (e.g., due to buffer
+ * cache constraints)."
+ *
+ * We choose to offer a write delegation for OPEN with the
+ * OPEN4_SHARE_ACCESS_WRITE access mode to accommodate such clients.
+ */
+ if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
+ nf = find_writeable_file(fp);
+ dl_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG : OPEN_DELEGATE_WRITE;
+ }
+
+ /*
+ * If the file is being opened O_RDONLY or we couldn't get a O_RDWR
+ * file for some reason, then try for a read delegation instead.
+ */
+ if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) {
+ nf = find_readable_file(fp);
+ dl_type = deleg_ts ? OPEN_DELEGATE_READ_ATTRS_DELEG : OPEN_DELEGATE_READ;
+ }
+
+ if (!nf)
return ERR_PTR(-EAGAIN);
+
+ /*
+ * File delegations and associated locks cannot be recovered if the
+ * export is from an NFS proxy server.
+ */
+ if (exportfs_cannot_lock(nf->nf_file->f_path.mnt->mnt_sb->s_export_op)) {
+ nfsd_file_put(nf);
+ return ERR_PTR(-EOPNOTSUPP);
}
+
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
if (nfs4_delegation_exists(clp, fp))
status = -EAGAIN;
+ else if (nfsd4_verify_setuid_write(open, nf))
+ status = -EAGAIN;
else if (!fp->fi_deleg_file) {
fp->fi_deleg_file = nf;
/* increment early to prevent fi_deleg_file from being
@@ -5146,30 +6108,49 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
return ERR_PTR(status);
status = -ENOMEM;
- dp = alloc_init_deleg(clp, fp, fh, odstate);
+ dp = alloc_init_deleg(clp, fp, odstate, dl_type);
if (!dp)
goto out_delegees;
- fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
+ fl = nfs4_alloc_init_lease(dp);
if (!fl)
goto out_clnt_odstate;
- status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
+ status = kernel_setlease(fp->fi_deleg_file->nf_file,
+ fl->c.flc_type, &fl, NULL);
if (fl)
- locks_free_lock(fl);
+ locks_free_lease(fl);
if (status)
goto out_clnt_odstate;
+
+ if (parent) {
+ status = nfsd4_verify_deleg_dentry(open, fp, parent);
+ if (status)
+ goto out_unlock;
+ }
+
status = nfsd4_check_conflicting_opens(clp, fp);
if (status)
goto out_unlock;
+ /*
+ * Now that the deleg is set, check again to ensure that nothing
+ * raced in and changed the mode while we weren't looking.
+ */
+ status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
+ if (status)
+ goto out_unlock;
+
+ status = -EAGAIN;
+ if (fp->fi_had_conflict)
+ goto out_unlock;
+
spin_lock(&state_lock);
+ spin_lock(&clp->cl_lock);
spin_lock(&fp->fi_lock);
- if (fp->fi_had_conflict)
- status = -EAGAIN;
- else
- status = hash_delegation_locked(dp, fp);
+ status = hash_delegation_locked(dp, fp);
spin_unlock(&fp->fi_lock);
+ spin_unlock(&clp->cl_lock);
spin_unlock(&state_lock);
if (status)
@@ -5177,7 +6158,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
return dp;
out_unlock:
- vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
+ kernel_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
out_clnt_odstate:
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_stid(&dp->dl_stid);
@@ -5188,51 +6169,124 @@ out_delegees:
static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
{
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
+ open->op_delegate_type = OPEN_DELEGATE_NONE_EXT;
if (status == -EAGAIN)
open->op_why_no_deleg = WND4_CONTENTION;
else {
open->op_why_no_deleg = WND4_RESOURCE;
switch (open->op_deleg_want) {
- case NFS4_SHARE_WANT_READ_DELEG:
- case NFS4_SHARE_WANT_WRITE_DELEG:
- case NFS4_SHARE_WANT_ANY_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_READ_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_ANY_DELEG:
break;
- case NFS4_SHARE_WANT_CANCEL:
+ case OPEN4_SHARE_ACCESS_WANT_CANCEL:
open->op_why_no_deleg = WND4_CANCELLED;
break;
- case NFS4_SHARE_WANT_NO_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_NO_DELEG:
WARN_ON_ONCE(1);
}
}
}
+static bool
+nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh,
+ struct kstat *stat)
+{
+ struct nfsd_file *nf = find_writeable_file(dp->dl_stid.sc_file);
+ struct path path;
+ int rc;
+
+ if (!nf)
+ return false;
+
+ path.mnt = currentfh->fh_export->ex_path.mnt;
+ path.dentry = file_dentry(nf->nf_file);
+
+ rc = vfs_getattr(&path, stat,
+ STATX_MODE | STATX_SIZE | STATX_ATIME |
+ STATX_MTIME | STATX_CTIME | STATX_CHANGE_COOKIE,
+ AT_STATX_SYNC_AS_STAT);
+
+ nfsd_file_put(nf);
+ return rc == 0;
+}
+
+/*
+ * Add NFS4_SHARE_ACCESS_READ to the write delegation granted on OPEN
+ * with NFS4_SHARE_ACCESS_WRITE by allocating separate nfsd_file and
+ * struct file to be used for read with delegation stateid.
+ *
+ */
+static bool
+nfsd4_add_rdaccess_to_wrdeleg(struct svc_rqst *rqstp, struct nfsd4_open *open,
+ struct svc_fh *fh, struct nfs4_ol_stateid *stp)
+{
+ struct nfs4_file *fp;
+ struct nfsd_file *nf = NULL;
+
+ if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) ==
+ NFS4_SHARE_ACCESS_WRITE) {
+ if (nfsd_file_acquire_opened(rqstp, fh, NFSD_MAY_READ, NULL, &nf))
+ return (false);
+ fp = stp->st_stid.sc_file;
+ spin_lock(&fp->fi_lock);
+ __nfs4_file_get_access(fp, NFS4_SHARE_ACCESS_READ);
+ fp = stp->st_stid.sc_file;
+ fp->fi_fds[O_RDONLY] = nf;
+ fp->fi_rdeleg_file = nf;
+ spin_unlock(&fp->fi_lock);
+ }
+ return true;
+}
+
/*
- * Attempt to hand out a delegation.
+ * The Linux NFS server does not offer write delegations to NFSv4.0
+ * clients in order to avoid conflicts between write delegations and
+ * GETATTRs requesting CHANGE or SIZE attributes.
+ *
+ * With NFSv4.1 and later minorversions, the SEQUENCE operation that
+ * begins each COMPOUND contains a client ID. Delegation recall can
+ * be avoided when the server recognizes the client sending a
+ * GETATTR also holds write delegation it conflicts with.
+ *
+ * However, the NFSv4.0 protocol does not enable a server to
+ * determine that a GETATTR originated from the client holding the
+ * conflicting delegation versus coming from some other client. Per
+ * RFC 7530 Section 16.7.5, the server must recall or send a
+ * CB_GETATTR even when the GETATTR originates from the client that
+ * holds the conflicting delegation.
*
- * Note we don't support write delegations, and won't until the vfs has
- * proper support for them.
+ * An NFSv4.0 client can trigger a pathological situation if it
+ * always sends a DELEGRETURN preceded by a conflicting GETATTR in
+ * the same COMPOUND. COMPOUND execution will always stop at the
+ * GETATTR and the DELEGRETURN will never get executed. The server
+ * eventually revokes the delegation, which can result in loss of
+ * open or lock state.
*/
static void
-nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
- struct nfs4_ol_stateid *stp)
+nfs4_open_delegation(struct svc_rqst *rqstp, struct nfsd4_open *open,
+ struct nfs4_ol_stateid *stp, struct svc_fh *currentfh,
+ struct svc_fh *fh)
{
- struct nfs4_delegation *dp;
struct nfs4_openowner *oo = openowner(stp->st_stateowner);
+ bool deleg_ts = nfsd4_want_deleg_timestamps(open);
struct nfs4_client *clp = stp->st_stid.sc_client;
- int cb_up;
+ struct svc_fh *parent = NULL;
+ struct nfs4_delegation *dp;
+ struct kstat stat;
int status = 0;
+ int cb_up;
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
- open->op_recall = 0;
+ open->op_recall = false;
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_PREVIOUS:
if (!cb_up)
- open->op_recall = 1;
- if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
- goto out_no_deleg;
+ open->op_recall = true;
break;
case NFS4_OPEN_CLAIM_NULL:
+ parent = currentfh;
+ fallthrough;
case NFS4_OPEN_CLAIM_FH:
/*
* Let's not give out any delegations till everyone's
@@ -5243,27 +6297,49 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
goto out_no_deleg;
if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
goto out_no_deleg;
+ if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE &&
+ !clp->cl_minorversion)
+ goto out_no_deleg;
break;
default:
goto out_no_deleg;
}
- dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
+ dp = nfs4_set_delegation(open, stp, parent);
if (IS_ERR(dp))
goto out_no_deleg;
memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
- trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
- open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
+ if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
+ struct file *f = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
+
+ if (!nfsd4_add_rdaccess_to_wrdeleg(rqstp, open, fh, stp) ||
+ !nfs4_delegation_stat(dp, currentfh, &stat)) {
+ nfs4_put_stid(&dp->dl_stid);
+ destroy_delegation(dp);
+ goto out_no_deleg;
+ }
+ open->op_delegate_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG :
+ OPEN_DELEGATE_WRITE;
+ dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
+ dp->dl_cb_fattr.ncf_initial_cinfo = nfsd4_change_attribute(&stat);
+ dp->dl_atime = stat.atime;
+ dp->dl_ctime = stat.ctime;
+ dp->dl_mtime = stat.mtime;
+ spin_lock(&f->f_lock);
+ f->f_mode |= FMODE_NOCMTIME;
+ spin_unlock(&f->f_lock);
+ trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
+ } else {
+ open->op_delegate_type = deleg_ts && nfs4_delegation_stat(dp, currentfh, &stat) ?
+ OPEN_DELEGATE_READ_ATTRS_DELEG : OPEN_DELEGATE_READ;
+ dp->dl_atime = stat.atime;
+ trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
+ }
nfs4_put_stid(&dp->dl_stid);
return;
out_no_deleg:
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
- if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
- open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
- dprintk("NFSD: WARNING: refusing delegation reclaim\n");
- open->op_recall = 1;
- }
+ open->op_delegate_type = OPEN_DELEGATE_NONE;
/* 4.1 client asking for a delegation? */
if (open->op_deleg_want)
@@ -5274,21 +6350,44 @@ out_no_deleg:
static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
struct nfs4_delegation *dp)
{
- if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
- dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
- open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
- } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
- dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
- open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
+ if (deleg_is_write(dp->dl_type)) {
+ if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_READ_DELEG) {
+ open->op_delegate_type = OPEN_DELEGATE_NONE_EXT;
+ open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
+ } else if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG) {
+ open->op_delegate_type = OPEN_DELEGATE_NONE_EXT;
+ open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
+ }
}
/* Otherwise the client must be confused wanting a delegation
* it already has, therefore we don't return
- * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
+ * OPEN_DELEGATE_NONE_EXT and reason.
*/
}
+/* Are we returning only a delegation stateid? */
+static bool open_xor_delegation(struct nfsd4_open *open)
+{
+ if (!(open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION))
+ return false;
+ /* Did we actually get a delegation? */
+ if (!deleg_is_read(open->op_delegate_type) && !deleg_is_write(open->op_delegate_type))
+ return false;
+ return true;
+}
+
+/**
+ * nfsd4_process_open2 - finish open processing
+ * @rqstp: the RPC transaction being executed
+ * @current_fh: NFSv4 COMPOUND's current filehandle
+ * @open: OPEN arguments
+ *
+ * If successful, (1) truncate the file if open->op_truncate was
+ * set, (2) set open->op_stateid, (3) set open->op_delegation.
+ *
+ * Returns %nfs_ok on success; otherwise an nfs4stat value in
+ * network byte order is returned.
+ */
__be32
nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
@@ -5305,11 +6404,27 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
* and check for delegations in the process of being recalled.
* If not found, create the nfs4_file struct
*/
- fp = find_or_add_file(open->op_file, current_fh);
+ fp = nfsd4_file_hash_insert(open->op_file, current_fh);
+ if (unlikely(!fp))
+ return nfserr_jukebox;
if (fp != open->op_file) {
status = nfs4_check_deleg(cl, open, &dp);
if (status)
goto out;
+ if (dp && nfsd4_is_deleg_cur(open) &&
+ (dp->dl_stid.sc_file != fp)) {
+ /*
+ * RFC8881 section 8.2.4 mandates the server to return
+ * NFS4ERR_BAD_STATEID if the selected table entry does
+ * not match the current filehandle. However returning
+ * NFS4ERR_BAD_STATEID in the OPEN can cause the client
+ * to repeatedly retry the operation with the same
+ * stateid, since the stateid itself is valid. To avoid
+ * this situation NFSD returns NFS4ERR_INVAL instead.
+ */
+ status = nfserr_inval;
+ goto out;
+ }
stp = nfsd4_find_and_lock_existing_open(fp, open);
} else {
open->op_file = NULL;
@@ -5320,6 +6435,11 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
if (!stp) {
stp = init_open_stateid(fp, open);
+ if (!stp) {
+ status = nfserr_jukebox;
+ goto out;
+ }
+
if (!open->op_stp)
new_stp = true;
}
@@ -5338,9 +6458,8 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
goto out;
}
} else {
- status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
+ status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true);
if (status) {
- stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_open_stateid(stp);
mutex_unlock(&stp->st_mutex);
goto out;
@@ -5356,8 +6475,8 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
mutex_unlock(&stp->st_mutex);
if (nfsd4_has_session(&resp->cstate)) {
- if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
+ if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_NO_DELEG) {
+ open->op_delegate_type = OPEN_DELEGATE_NONE_EXT;
open->op_why_no_deleg = WND4_NOT_WANTED;
goto nodeleg;
}
@@ -5367,13 +6486,25 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
* Attempt to hand out a delegation. No error return, because the
* OPEN succeeds even if we fail.
*/
- nfs4_open_delegation(current_fh, open, stp);
+ nfs4_open_delegation(rqstp, open, stp,
+ &resp->cstate.current_fh, current_fh);
+
+ /*
+ * If there is an existing open stateid, it must be updated and
+ * returned. Only respect WANT_OPEN_XOR_DELEGATION when a new
+ * open stateid would have to be created.
+ */
+ if (new_stp && open_xor_delegation(open)) {
+ memcpy(&open->op_stateid, &zero_stateid, sizeof(open->op_stateid));
+ open->op_rflags |= OPEN4_RESULT_NO_OPEN_STATEID;
+ release_open_stateid(stp);
+ }
nodeleg:
status = nfs_ok;
trace_nfsd_open(&stp->st_stid.sc_stateid);
out:
/* 4.1 client trying to upgrade/downgrade delegation? */
- if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
+ if (open->op_delegate_type == OPEN_DELEGATE_NONE && dp &&
open->op_deleg_want)
nfsd4_deleg_xgrade_none_ext(open, dp);
@@ -5384,7 +6515,7 @@ out:
/*
* To finish the open response, we just need to set the rflags.
*/
- open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
+ open->op_rflags |= NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
if (nfsd4_has_session(&resp->cstate))
open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
@@ -5401,12 +6532,8 @@ out:
void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
struct nfsd4_open *open)
{
- if (open->op_openowner) {
- struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
-
- nfsd4_cstate_assign_replay(cstate, so);
- nfs4_put_stateowner(so);
- }
+ if (open->op_openowner)
+ nfs4_put_stateowner(&open->op_openowner->oo_owner);
if (open->op_file)
kmem_cache_free(file_slab, open->op_file);
if (open->op_stp)
@@ -5516,7 +6643,6 @@ void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
init_waitqueue_head(&nn->nfsd_ssc_waitq);
}
-EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
/*
* This is called when nfsd is being shutdown, after all inter_ssc
@@ -5572,10 +6698,168 @@ static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
}
#endif
+/* Check if any lock belonging to this lockowner has any blockers */
+static bool
+nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
+{
+ struct file_lock_context *ctx;
+ struct nfs4_ol_stateid *stp;
+ struct nfs4_file *nf;
+
+ list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
+ nf = stp->st_stid.sc_file;
+ ctx = locks_inode_context(nf->fi_inode);
+ if (!ctx)
+ continue;
+ if (locks_owner_has_blockers(ctx, lo))
+ return true;
+ }
+ return false;
+}
+
+static bool
+nfs4_anylock_blockers(struct nfs4_client *clp)
+{
+ int i;
+ struct nfs4_stateowner *so;
+ struct nfs4_lockowner *lo;
+
+ if (atomic_read(&clp->cl_delegs_in_recall))
+ return true;
+ spin_lock(&clp->cl_lock);
+ for (i = 0; i < OWNER_HASH_SIZE; i++) {
+ list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
+ so_strhash) {
+ if (so->so_is_open_owner)
+ continue;
+ lo = lockowner(so);
+ if (nfs4_lockowner_has_blockers(lo)) {
+ spin_unlock(&clp->cl_lock);
+ return true;
+ }
+ }
+ }
+ spin_unlock(&clp->cl_lock);
+ return false;
+}
+
+static void
+nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
+ struct laundry_time *lt)
+{
+ unsigned int maxreap, reapcnt = 0;
+ struct list_head *pos, *next;
+ struct nfs4_client *clp;
+
+ maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
+ NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
+ INIT_LIST_HEAD(reaplist);
+ spin_lock(&nn->client_lock);
+ list_for_each_safe(pos, next, &nn->client_lru) {
+ clp = list_entry(pos, struct nfs4_client, cl_lru);
+ if (clp->cl_state == NFSD4_EXPIRABLE)
+ goto exp_client;
+ if (!state_expired(lt, clp->cl_time))
+ break;
+ if (!atomic_read(&clp->cl_rpc_users)) {
+ if (clp->cl_state == NFSD4_ACTIVE)
+ atomic_inc(&nn->nfsd_courtesy_clients);
+ clp->cl_state = NFSD4_COURTESY;
+ }
+ if (!client_has_state(clp))
+ goto exp_client;
+ if (!nfs4_anylock_blockers(clp))
+ if (reapcnt >= maxreap)
+ continue;
+exp_client:
+ if (!mark_client_expired_locked(clp)) {
+ list_add(&clp->cl_lru, reaplist);
+ reapcnt++;
+ }
+ }
+ spin_unlock(&nn->client_lock);
+}
+
+static void
+nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn,
+ struct list_head *reaplist)
+{
+ unsigned int maxreap = 0, reapcnt = 0;
+ struct list_head *pos, *next;
+ struct nfs4_client *clp;
+
+ maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN;
+ INIT_LIST_HEAD(reaplist);
+
+ spin_lock(&nn->client_lock);
+ list_for_each_safe(pos, next, &nn->client_lru) {
+ clp = list_entry(pos, struct nfs4_client, cl_lru);
+ if (clp->cl_state == NFSD4_ACTIVE)
+ break;
+ if (reapcnt >= maxreap)
+ break;
+ if (!mark_client_expired_locked(clp)) {
+ list_add(&clp->cl_lru, reaplist);
+ reapcnt++;
+ }
+ }
+ spin_unlock(&nn->client_lock);
+}
+
+static void
+nfs4_process_client_reaplist(struct list_head *reaplist)
+{
+ struct list_head *pos, *next;
+ struct nfs4_client *clp;
+
+ list_for_each_safe(pos, next, reaplist) {
+ clp = list_entry(pos, struct nfs4_client, cl_lru);
+ trace_nfsd_clid_purged(&clp->cl_clientid);
+ list_del_init(&clp->cl_lru);
+ expire_client(clp);
+ }
+}
+
+static void nfs40_clean_admin_revoked(struct nfsd_net *nn,
+ struct laundry_time *lt)
+{
+ struct nfs4_client *clp;
+
+ spin_lock(&nn->client_lock);
+ if (nn->nfs40_last_revoke == 0 ||
+ nn->nfs40_last_revoke > lt->cutoff) {
+ spin_unlock(&nn->client_lock);
+ return;
+ }
+ nn->nfs40_last_revoke = 0;
+
+retry:
+ list_for_each_entry(clp, &nn->client_lru, cl_lru) {
+ unsigned long id, tmp;
+ struct nfs4_stid *stid;
+
+ if (atomic_read(&clp->cl_admin_revoked) == 0)
+ continue;
+
+ spin_lock(&clp->cl_lock);
+ idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id)
+ if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) {
+ refcount_inc(&stid->sc_count);
+ spin_unlock(&nn->client_lock);
+ /* this function drops ->cl_lock */
+ nfsd4_drop_revoked_stid(stid);
+ nfs4_put_stid(stid);
+ spin_lock(&nn->client_lock);
+ goto retry;
+ }
+ spin_unlock(&clp->cl_lock);
+ }
+ spin_unlock(&nn->client_lock);
+}
+
static time64_t
nfs4_laundromat(struct nfsd_net *nn)
{
- struct nfs4_client *clp;
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
struct nfs4_ol_stateid *stp;
@@ -5594,39 +6878,28 @@ nfs4_laundromat(struct nfsd_net *nn)
goto out;
}
nfsd4_end_grace(nn);
- INIT_LIST_HEAD(&reaplist);
spin_lock(&nn->s2s_cp_lock);
idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
- if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
+ if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID &&
state_expired(&lt, cps->cpntf_time))
_free_cpntf_state_locked(nn, cps);
}
spin_unlock(&nn->s2s_cp_lock);
+ nfsd4_async_copy_reaper(nn);
+ nfs4_get_client_reaplist(nn, &reaplist, &lt);
+ nfs4_process_client_reaplist(&reaplist);
+
+ nfs40_clean_admin_revoked(nn, &lt);
- spin_lock(&nn->client_lock);
- list_for_each_safe(pos, next, &nn->client_lru) {
- clp = list_entry(pos, struct nfs4_client, cl_lru);
- if (!state_expired(&lt, clp->cl_time))
- break;
- if (mark_client_expired_locked(clp))
- continue;
- list_add(&clp->cl_lru, &reaplist);
- }
- spin_unlock(&nn->client_lock);
- list_for_each_safe(pos, next, &reaplist) {
- clp = list_entry(pos, struct nfs4_client, cl_lru);
- trace_nfsd_clid_purged(&clp->cl_clientid);
- list_del_init(&clp->cl_lru);
- expire_client(clp);
- }
spin_lock(&state_lock);
list_for_each_safe(pos, next, &nn->del_recall_lru) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
if (!state_expired(&lt, dp->dl_time))
break;
- WARN_ON(!unhash_delegation_locked(dp));
+ refcount_inc(&dp->dl_stid.sc_count);
+ unhash_delegation_locked(dp, SC_STATUS_REVOKED);
list_add(&dp->dl_recall_lru, &reaplist);
}
spin_unlock(&state_lock);
@@ -5685,11 +6958,12 @@ nfs4_laundromat(struct nfsd_net *nn)
/* service the server-to-server copy delayed unmount list */
nfsd4_ssc_expire_umount(nn);
#endif
+ if (atomic_long_read(&num_delegations) >= max_delegations)
+ deleg_reaper(nn);
out:
return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
}
-static struct workqueue_struct *laundry_wq;
static void laundromat_main(struct work_struct *);
static void
@@ -5704,6 +6978,60 @@ laundromat_main(struct work_struct *laundry)
queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
}
+static void
+courtesy_client_reaper(struct nfsd_net *nn)
+{
+ struct list_head reaplist;
+
+ nfs4_get_courtesy_client_reaplist(nn, &reaplist);
+ nfs4_process_client_reaplist(&reaplist);
+}
+
+static void
+deleg_reaper(struct nfsd_net *nn)
+{
+ struct list_head *pos, *next;
+ struct nfs4_client *clp;
+
+ spin_lock(&nn->client_lock);
+ list_for_each_safe(pos, next, &nn->client_lru) {
+ clp = list_entry(pos, struct nfs4_client, cl_lru);
+
+ if (clp->cl_state != NFSD4_ACTIVE)
+ continue;
+ if (list_empty(&clp->cl_delegations))
+ continue;
+ if (atomic_read(&clp->cl_delegs_in_recall))
+ continue;
+ if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &clp->cl_ra->ra_cb.cb_flags))
+ continue;
+ if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5)
+ continue;
+ if (clp->cl_cb_state != NFSD4_CB_UP)
+ continue;
+
+ /* release in nfsd4_cb_recall_any_release */
+ kref_get(&clp->cl_nfsdfs.cl_ref);
+ clp->cl_ra_time = ktime_get_boottime_seconds();
+ clp->cl_ra->ra_keep = 0;
+ clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) |
+ BIT(RCA4_TYPE_MASK_WDATA_DLG);
+ trace_nfsd_cb_recall_any(clp->cl_ra);
+ nfsd4_run_cb(&clp->cl_ra->ra_cb);
+ }
+ spin_unlock(&nn->client_lock);
+}
+
+static void
+nfsd4_state_shrinker_worker(struct work_struct *work)
+{
+ struct nfsd_net *nn = container_of(work, struct nfsd_net,
+ nfsd_shrinker_work);
+
+ courtesy_client_reaper(nn);
+ deleg_reaper(nn);
+}
+
static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
{
if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
@@ -5782,6 +7110,9 @@ static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_sti
if (ret == nfs_ok)
ret = check_stateid_generation(in, &s->sc_stateid, has_session);
spin_unlock(&s->sc_lock);
+ if (ret == nfserr_admin_revoked)
+ nfsd40_drop_revoked_stid(s->sc_client,
+ &s->sc_stateid);
return ret;
}
@@ -5801,8 +7132,6 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return status;
- if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
- return status;
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, stateid);
if (!s)
@@ -5810,45 +7139,52 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
if (status)
goto out_unlock;
+ status = nfsd4_verify_open_stid(s);
+ if (status)
+ goto out_unlock;
+
switch (s->sc_type) {
- case NFS4_DELEG_STID:
+ case SC_TYPE_DELEG:
status = nfs_ok;
break;
- case NFS4_REVOKED_DELEG_STID:
- status = nfserr_deleg_revoked;
- break;
- case NFS4_OPEN_STID:
- case NFS4_LOCK_STID:
+ case SC_TYPE_OPEN:
+ case SC_TYPE_LOCK:
status = nfsd4_check_openowner_confirmed(openlockstateid(s));
break;
default:
printk("unknown stateid type %x\n", s->sc_type);
- fallthrough;
- case NFS4_CLOSED_STID:
- case NFS4_CLOSED_DELEG_STID:
status = nfserr_bad_stateid;
}
out_unlock:
spin_unlock(&cl->cl_lock);
+ if (status == nfserr_admin_revoked)
+ nfsd40_drop_revoked_stid(cl, stateid);
return status;
}
__be32
nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
- stateid_t *stateid, unsigned char typemask,
+ stateid_t *stateid,
+ unsigned short typemask, unsigned short statusmask,
struct nfs4_stid **s, struct nfsd_net *nn)
{
__be32 status;
+ struct nfs4_stid *stid;
bool return_revoked = false;
/*
* only return revoked delegations if explicitly asked.
* otherwise we report revoked or bad_stateid status.
*/
- if (typemask & NFS4_REVOKED_DELEG_STID)
+ if (statusmask & SC_STATUS_REVOKED)
return_revoked = true;
- else if (typemask & NFS4_DELEG_STID)
- typemask |= NFS4_REVOKED_DELEG_STID;
+ if (typemask & SC_TYPE_DELEG)
+ /* Always allow REVOKED for DELEG so we can
+ * return the appropriate error.
+ */
+ statusmask |= SC_STATUS_REVOKED;
+
+ statusmask |= SC_STATUS_ADMIN_REVOKED | SC_STATUS_FREEABLE;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
@@ -5861,38 +7197,41 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
}
if (status)
return status;
- *s = find_stateid_by_type(cstate->clp, stateid, typemask);
- if (!*s)
- return nfserr_bad_stateid;
- if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
- nfs4_put_stid(*s);
- if (cstate->minorversion)
- return nfserr_deleg_revoked;
+ stid = find_stateid_by_type(cstate->clp, stateid, typemask, statusmask);
+ if (!stid)
return nfserr_bad_stateid;
+ if ((stid->sc_status & SC_STATUS_REVOKED) && !return_revoked) {
+ nfs4_put_stid(stid);
+ return nfserr_deleg_revoked;
+ }
+ if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) {
+ nfsd40_drop_revoked_stid(cstate->clp, stateid);
+ nfs4_put_stid(stid);
+ return nfserr_admin_revoked;
}
+ *s = stid;
return nfs_ok;
}
static struct nfsd_file *
nfs4_find_file(struct nfs4_stid *s, int flags)
{
- if (!s)
+ struct nfsd_file *ret = NULL;
+
+ if (!s || s->sc_status)
return NULL;
switch (s->sc_type) {
- case NFS4_DELEG_STID:
- if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
- return NULL;
- return nfsd_file_get(s->sc_file->fi_deleg_file);
- case NFS4_OPEN_STID:
- case NFS4_LOCK_STID:
+ case SC_TYPE_DELEG:
+ case SC_TYPE_OPEN:
+ case SC_TYPE_LOCK:
if (flags & RD_STATE)
- return find_readable_file(s->sc_file);
+ ret = find_readable_file(s->sc_file);
else
- return find_writeable_file(s->sc_file);
+ ret = find_writeable_file(s->sc_file);
}
- return NULL;
+ return ret;
}
static __be32
@@ -5916,7 +7255,8 @@ nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
nf = nfs4_find_file(s, flags);
if (nf) {
- status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
+ status = nfsd_permission(&rqstp->rq_cred,
+ fhp->fh_export, fhp->fh_dentry,
acc | NFSD_MAY_OWNER_OVERRIDE);
if (status) {
nfsd_file_put(nf);
@@ -5934,12 +7274,12 @@ out:
static void
_free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
{
- WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
- if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
+ WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID);
+ if (!refcount_dec_and_test(&cps->cp_stateid.cs_count))
return;
list_del(&cps->cp_list);
idr_remove(&nn->s2s_cp_stateids,
- cps->cp_stateid.stid.si_opaque.so_id);
+ cps->cp_stateid.cs_stid.si_opaque.so_id);
kfree(cps);
}
/*
@@ -5961,12 +7301,12 @@ __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
if (cps_t) {
state = container_of(cps_t, struct nfs4_cpntf_state,
cp_stateid);
- if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
+ if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) {
state = NULL;
goto unlock;
}
if (!clp)
- refcount_inc(&state->cp_stateid.sc_count);
+ refcount_inc(&state->cp_stateid.cs_count);
else
_free_cpntf_state_locked(nn, state);
}
@@ -5974,7 +7314,7 @@ unlock:
spin_unlock(&nn->s2s_cp_lock);
if (!state)
return nfserr_bad_stateid;
- if (!clp && state)
+ if (!clp)
*cps = state;
return 0;
}
@@ -5998,7 +7338,8 @@ static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
goto out;
*stid = find_stateid_by_type(found, &cps->cp_p_stateid,
- NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
+ SC_TYPE_DELEG|SC_TYPE_OPEN|SC_TYPE_LOCK,
+ 0);
if (*stid)
status = nfs_ok;
else
@@ -6017,8 +7358,19 @@ void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
spin_unlock(&nn->s2s_cp_lock);
}
-/*
- * Checks for stateid operations
+/**
+ * nfs4_preprocess_stateid_op - find and prep stateid for an operation
+ * @rqstp: incoming request from client
+ * @cstate: current compound state
+ * @fhp: filehandle associated with requested stateid
+ * @stateid: stateid (provided by client)
+ * @flags: flags describing type of operation to be done
+ * @nfp: optional nfsd_file return pointer (may be NULL)
+ * @cstid: optional returned nfs4_stid pointer (may be NULL)
+ *
+ * Given info from the client, look up a nfs4_stid for the operation. On
+ * success, it returns a reference to the nfs4_stid and/or the nfsd_file
+ * associated with it.
*/
__be32
nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
@@ -6040,8 +7392,8 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
}
status = nfsd4_lookup_stateid(cstate, stateid,
- NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
- &s, nn);
+ SC_TYPE_DELEG|SC_TYPE_OPEN|SC_TYPE_LOCK,
+ 0, &s, nn);
if (status == nfserr_bad_stateid)
status = find_cpntf_state(nn, stateid, &s);
if (status)
@@ -6052,16 +7404,13 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
goto out;
switch (s->sc_type) {
- case NFS4_DELEG_STID:
+ case SC_TYPE_DELEG:
status = nfs4_check_delegmode(delegstateid(s), flags);
break;
- case NFS4_OPEN_STID:
- case NFS4_LOCK_STID:
+ case SC_TYPE_OPEN:
+ case SC_TYPE_LOCK:
status = nfs4_check_olstateid(openlockstateid(s), flags);
break;
- default:
- status = nfserr_bad_stateid;
- break;
}
if (status)
goto out;
@@ -6140,34 +7489,42 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, stateid);
- if (!s)
+ if (!s || s->sc_status & SC_STATUS_CLOSED)
goto out_unlock;
+ if (s->sc_status & SC_STATUS_ADMIN_REVOKED) {
+ nfsd4_drop_revoked_stid(s);
+ ret = nfs_ok;
+ goto out;
+ }
spin_lock(&s->sc_lock);
switch (s->sc_type) {
- case NFS4_DELEG_STID:
+ case SC_TYPE_DELEG:
+ if (s->sc_status & SC_STATUS_REVOKED) {
+ s->sc_status |= SC_STATUS_CLOSED;
+ spin_unlock(&s->sc_lock);
+ dp = delegstateid(s);
+ if (s->sc_status & SC_STATUS_FREEABLE)
+ list_del_init(&dp->dl_recall_lru);
+ s->sc_status |= SC_STATUS_FREED;
+ spin_unlock(&cl->cl_lock);
+ nfs4_put_stid(s);
+ ret = nfs_ok;
+ goto out;
+ }
ret = nfserr_locks_held;
break;
- case NFS4_OPEN_STID:
+ case SC_TYPE_OPEN:
ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
if (ret)
break;
ret = nfserr_locks_held;
break;
- case NFS4_LOCK_STID:
+ case SC_TYPE_LOCK:
spin_unlock(&s->sc_lock);
refcount_inc(&s->sc_count);
spin_unlock(&cl->cl_lock);
ret = nfsd4_free_lock_stateid(stateid, s);
goto out;
- case NFS4_REVOKED_DELEG_STID:
- spin_unlock(&s->sc_lock);
- dp = delegstateid(s);
- list_del_init(&dp->dl_recall_lru);
- spin_unlock(&cl->cl_lock);
- nfs4_put_stid(s);
- ret = nfs_ok;
- goto out;
- /* Default falls through and returns nfserr_bad_stateid */
}
spin_unlock(&s->sc_lock);
out_unlock:
@@ -6203,12 +7560,24 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
return status;
}
-/*
- * Checks for sequence id mutating operations.
+/**
+ * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
+ * @cstate: compund state
+ * @seqid: seqid (provided by client)
+ * @stateid: stateid (provided by client)
+ * @typemask: mask of allowable types for this operation
+ * @statusmask: mask of allowed states: 0 or STID_CLOSED
+ * @stpp: return pointer for the stateid found
+ * @nn: net namespace for request
+ *
+ * Given a stateid+seqid from a client, look up an nfs4_ol_stateid and
+ * return it in @stpp. On a nfs_ok return, the returned stateid will
+ * have its st_mutex locked.
*/
static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
- stateid_t *stateid, char typemask,
+ stateid_t *stateid,
+ unsigned short typemask, unsigned short statusmask,
struct nfs4_ol_stateid **stpp,
struct nfsd_net *nn)
{
@@ -6219,11 +7588,16 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
trace_nfsd_preprocess(seqid, stateid);
*stpp = NULL;
- status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
+retry:
+ status = nfsd4_lookup_stateid(cstate, stateid,
+ typemask, statusmask, &s, nn);
if (status)
return status;
stp = openlockstateid(s);
- nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
+ if (nfsd4_cstate_assign_replay(cstate, stp->st_stateowner) == -EAGAIN) {
+ nfs4_put_stateowner(stp->st_stateowner);
+ goto retry;
+ }
status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
if (!status)
@@ -6241,7 +7615,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
struct nfs4_ol_stateid *stp;
status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
- NFS4_OPEN_STID, &stp, nn);
+ SC_TYPE_OPEN, 0, &stp, nn);
if (status)
return status;
oo = openowner(stp->st_stateowner);
@@ -6272,8 +7646,8 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return status;
status = nfs4_preprocess_seqid_op(cstate,
- oc->oc_seqid, &oc->oc_req_stateid,
- NFS4_OPEN_STID, &stp, nn);
+ oc->oc_seqid, &oc->oc_req_stateid,
+ SC_TYPE_OPEN, 0, &stp, nn);
if (status)
goto out;
oo = openowner(stp->st_stateowner);
@@ -6365,11 +7739,12 @@ out:
return status;
}
-static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
+static bool nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
{
struct nfs4_client *clp = s->st_stid.sc_client;
bool unhashed;
LIST_HEAD(reaplist);
+ struct nfs4_ol_stateid *stp;
spin_lock(&clp->cl_lock);
unhashed = unhash_open_stateid(s, &reaplist);
@@ -6378,12 +7753,14 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
if (unhashed)
put_ol_stateid_locked(s, &reaplist);
spin_unlock(&clp->cl_lock);
+ list_for_each_entry(stp, &reaplist, st_locks)
+ nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
free_ol_stateid_reaplist(&reaplist);
+ return false;
} else {
spin_unlock(&clp->cl_lock);
free_ol_stateid_reaplist(&reaplist);
- if (unhashed)
- move_to_close_lru(s, clp->net);
+ return unhashed;
}
}
@@ -6399,19 +7776,22 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfs4_ol_stateid *stp;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ bool need_move_to_close_list;
- dprintk("NFSD: nfsd4_close on file %pd\n",
+ dprintk("NFSD: nfsd4_close on file %pd\n",
cstate->current_fh.fh_dentry);
status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
- &close->cl_stateid,
- NFS4_OPEN_STID|NFS4_CLOSED_STID,
- &stp, nn);
+ &close->cl_stateid,
+ SC_TYPE_OPEN, SC_STATUS_CLOSED,
+ &stp, nn);
nfsd4_bump_seqid(cstate, status);
if (status)
- goto out;
+ goto out;
- stp->st_stid.sc_type = NFS4_CLOSED_STID;
+ spin_lock(&stp->st_stid.sc_client->cl_lock);
+ stp->st_stid.sc_status |= SC_STATUS_CLOSED;
+ spin_unlock(&stp->st_stid.sc_client->cl_lock);
/*
* Technically we don't _really_ have to increment or copy it, since
@@ -6421,8 +7801,10 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
*/
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
- nfsd4_close_open_stateid(stp);
+ need_move_to_close_list = nfsd4_close_open_stateid(stp);
mutex_unlock(&stp->st_mutex);
+ if (need_move_to_close_list)
+ move_to_close_lru(stp, net);
/* v4.1+ suggests that we send a special stateid in here, since the
* clients should just ignore this anyway. Since this is not useful
@@ -6450,10 +7832,11 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
+ status = fh_verify(rqstp, &cstate->current_fh, 0, 0);
+ if (status)
return status;
- status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
+ status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, SC_STATUS_REVOKED, &s, nn);
if (status)
goto out;
dp = delegstateid(s);
@@ -6461,7 +7844,10 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto put_stateid;
+ trace_nfsd_deleg_return(stateid);
destroy_delegation(dp);
+ smp_mb__after_atomic();
+ wake_up_var(d_inode(cstate->current_fh.fh_dentry));
put_stateid:
nfs4_put_stid(&dp->dl_stid);
out:
@@ -6497,7 +7883,7 @@ nfs4_transform_lock_offset(struct file_lock *lock)
}
static fl_owner_t
-nfsd4_fl_get_owner(fl_owner_t owner)
+nfsd4_lm_get_owner(fl_owner_t owner)
{
struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
@@ -6506,7 +7892,7 @@ nfsd4_fl_get_owner(fl_owner_t owner)
}
static void
-nfsd4_fl_put_owner(fl_owner_t owner)
+nfsd4_lm_put_owner(fl_owner_t owner)
{
struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
@@ -6514,10 +7900,33 @@ nfsd4_fl_put_owner(fl_owner_t owner)
nfs4_put_stateowner(&lo->lo_owner);
}
+/* return pointer to struct nfs4_client if client is expirable */
+static bool
+nfsd4_lm_lock_expirable(struct file_lock *cfl)
+{
+ struct nfs4_lockowner *lo = (struct nfs4_lockowner *) cfl->c.flc_owner;
+ struct nfs4_client *clp = lo->lo_owner.so_client;
+ struct nfsd_net *nn;
+
+ if (try_to_expire_client(clp)) {
+ nn = net_generic(clp->net, nfsd_net_id);
+ mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
+ return true;
+ }
+ return false;
+}
+
+/* schedule laundromat to run immediately and wait for it to complete */
+static void
+nfsd4_lm_expire_lock(void)
+{
+ flush_workqueue(laundry_wq);
+}
+
static void
nfsd4_lm_notify(struct file_lock *fl)
{
- struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
+ struct nfs4_lockowner *lo = (struct nfs4_lockowner *) fl->c.flc_owner;
struct net *net = lo->lo_owner.so_client->net;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct nfsd4_blocked_lock *nbl = container_of(fl,
@@ -6535,14 +7944,17 @@ nfsd4_lm_notify(struct file_lock *fl)
if (queue) {
trace_nfsd_cb_notify_lock(lo, nbl);
- nfsd4_run_cb(&nbl->nbl_cb);
+ nfsd4_try_run_cb(&nbl->nbl_cb);
}
}
static const struct lock_manager_operations nfsd_posix_mng_ops = {
+ .lm_mod_owner = THIS_MODULE,
.lm_notify = nfsd4_lm_notify,
- .lm_get_owner = nfsd4_fl_get_owner,
- .lm_put_owner = nfsd4_fl_put_owner,
+ .lm_get_owner = nfsd4_lm_get_owner,
+ .lm_put_owner = nfsd4_lm_put_owner,
+ .lm_lock_expirable = nfsd4_lm_lock_expirable,
+ .lm_expire_lock = nfsd4_lm_expire_lock,
};
static inline void
@@ -6551,7 +7963,7 @@ nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
struct nfs4_lockowner *lo;
if (fl->fl_lmops == &nfsd_posix_mng_ops) {
- lo = (struct nfs4_lockowner *) fl->fl_owner;
+ lo = (struct nfs4_lockowner *) fl->c.flc_owner;
xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
GFP_KERNEL);
if (!deny->ld_owner.data)
@@ -6570,7 +7982,7 @@ nevermind:
if (fl->fl_end != NFS4_MAX_UINT64)
deny->ld_length = fl->fl_end - fl->fl_start + 1;
deny->ld_type = NFS4_READ_LT;
- if (fl->fl_type != F_RDLCK)
+ if (fl->c.flc_type != F_RDLCK)
deny->ld_type = NFS4_WRITE_LT;
}
@@ -6692,7 +8104,7 @@ retry:
if (retstp)
goto out_found;
refcount_inc(&stp->st_stid.sc_count);
- stp->st_stid.sc_type = NFS4_LOCK_STID;
+ stp->st_stid.sc_type = SC_TYPE_LOCK;
stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
get_nfs4_file(fp);
stp->st_stid.sc_file = fp;
@@ -6831,13 +8243,12 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_blocked_lock *nbl = NULL;
struct file_lock *file_lock = NULL;
struct file_lock *conflock = NULL;
- struct super_block *sb;
__be32 status = 0;
int lkflg;
int err;
bool new = false;
- unsigned char fl_type;
- unsigned int fl_flags = FL_POSIX;
+ unsigned char type;
+ unsigned int flags = FL_POSIX;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -6848,12 +8259,13 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (check_lock_length(lock->lk_offset, lock->lk_length))
return nfserr_inval;
- if ((status = fh_verify(rqstp, &cstate->current_fh,
- S_IFREG, NFSD_MAY_LOCK))) {
- dprintk("NFSD: nfsd4_lock: permission denied!\n");
+ status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
+ if (status != nfs_ok)
return status;
+ if (exportfs_cannot_lock(cstate->current_fh.fh_dentry->d_sb->s_export_op)) {
+ status = nfserr_notsupp;
+ goto out;
}
- sb = cstate->current_fh.fh_dentry->d_sb;
if (lock->lk_is_new) {
if (nfsd4_has_session(cstate))
@@ -6879,9 +8291,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&lock_stp, &new);
} else {
status = nfs4_preprocess_seqid_op(cstate,
- lock->lk_old_lock_seqid,
- &lock->lk_old_lock_stateid,
- NFS4_LOCK_STID, &lock_stp, nn);
+ lock->lk_old_lock_seqid,
+ &lock->lk_old_lock_stateid,
+ SC_TYPE_LOCK, 0, &lock_stp,
+ nn);
}
if (status)
goto out;
@@ -6900,14 +8313,11 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
if (lock->lk_reclaim)
- fl_flags |= FL_RECLAIM;
+ flags |= FL_RECLAIM;
fp = lock_stp->st_stid.sc_file;
switch (lock->lk_type) {
case NFS4_READW_LT:
- if (nfsd4_has_session(cstate) &&
- !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
- fl_flags |= FL_SLEEP;
fallthrough;
case NFS4_READ_LT:
spin_lock(&fp->fi_lock);
@@ -6915,12 +8325,9 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (nf)
get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
spin_unlock(&fp->fi_lock);
- fl_type = F_RDLCK;
+ type = F_RDLCK;
break;
case NFS4_WRITEW_LT:
- if (nfsd4_has_session(cstate) &&
- !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
- fl_flags |= FL_SLEEP;
fallthrough;
case NFS4_WRITE_LT:
spin_lock(&fp->fi_lock);
@@ -6928,7 +8335,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (nf)
get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
spin_unlock(&fp->fi_lock);
- fl_type = F_WRLCK;
+ type = F_WRLCK;
break;
default:
status = nfserr_inval;
@@ -6940,6 +8347,11 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
}
+ if (lock->lk_type & (NFS4_READW_LT | NFS4_WRITEW_LT) &&
+ nfsd4_has_session(cstate) &&
+ locks_can_async_lock(nf->nf_file->f_op))
+ flags |= FL_SLEEP;
+
nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
if (!nbl) {
dprintk("NFSD: %s: unable to allocate block!\n", __func__);
@@ -6948,11 +8360,11 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
}
file_lock = &nbl->nbl_lock;
- file_lock->fl_type = fl_type;
- file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
- file_lock->fl_pid = current->tgid;
- file_lock->fl_file = nf->nf_file;
- file_lock->fl_flags = fl_flags;
+ file_lock->c.flc_type = type;
+ file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
+ file_lock->c.flc_pid = current->tgid;
+ file_lock->c.flc_file = nf->nf_file;
+ file_lock->c.flc_flags = flags;
file_lock->fl_lmops = &nfsd_posix_mng_ops;
file_lock->fl_start = lock->lk_offset;
file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
@@ -6965,11 +8377,12 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
}
- if (fl_flags & FL_SLEEP) {
+ if (flags & FL_SLEEP) {
nbl->nbl_time = ktime_get_boottime_seconds();
spin_lock(&nn->blocked_locks_lock);
list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
+ kref_get(&nbl->nbl_kref);
spin_unlock(&nn->blocked_locks_lock);
}
@@ -6982,6 +8395,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nn->somebody_reclaimed = true;
break;
case FILE_LOCK_DEFERRED:
+ kref_put(&nbl->nbl_kref, free_nbl);
nbl = NULL;
fallthrough;
case -EAGAIN: /* conflock holds conflicting lock */
@@ -7000,10 +8414,15 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
out:
if (nbl) {
/* dequeue it if we queued it before */
- if (fl_flags & FL_SLEEP) {
+ if (flags & FL_SLEEP) {
spin_lock(&nn->blocked_locks_lock);
- list_del_init(&nbl->nbl_list);
- list_del_init(&nbl->nbl_lru);
+ if (!list_empty(&nbl->nbl_list) &&
+ !list_empty(&nbl->nbl_lru)) {
+ list_del_init(&nbl->nbl_list);
+ list_del_init(&nbl->nbl_lru);
+ kref_put(&nbl->nbl_kref, free_nbl);
+ }
+ /* nbl can use one of lists to be linked to reaplist */
spin_unlock(&nn->blocked_locks_lock);
}
free_blocked_lock(nbl);
@@ -7036,6 +8455,14 @@ out:
return status;
}
+void nfsd4_lock_release(union nfsd4_op_u *u)
+{
+ struct nfsd4_lock *lock = &u->lock;
+ struct nfsd4_lock_denied *deny = &lock->lk_denied;
+
+ kfree(deny->ld_owner.data);
+}
+
/*
* The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
* so we do a temporary open here just to get an open file to pass to
@@ -7044,21 +8471,22 @@ out:
static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
{
struct nfsd_file *nf;
+ struct inode *inode;
__be32 err;
err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
if (err)
return err;
- fh_lock(fhp); /* to block new leases till after test_lock: */
- err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
- NFSD_MAY_READ));
+ inode = fhp->fh_dentry->d_inode;
+ inode_lock(inode); /* to block new leases till after test_lock: */
+ err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
if (err)
goto out;
- lock->fl_file = nf->nf_file;
+ lock->c.flc_file = nf->nf_file;
err = nfserrno(vfs_test_lock(nf->nf_file, lock));
- lock->fl_file = NULL;
+ lock->c.flc_file = NULL;
out:
- fh_unlock(fhp);
+ inode_unlock(inode);
nfsd_file_put(nf);
return err;
}
@@ -7101,11 +8529,11 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
switch (lockt->lt_type) {
case NFS4_READ_LT:
case NFS4_READW_LT:
- file_lock->fl_type = F_RDLCK;
+ file_lock->c.flc_type = F_RDLCK;
break;
case NFS4_WRITE_LT:
case NFS4_WRITEW_LT:
- file_lock->fl_type = F_WRLCK;
+ file_lock->c.flc_type = F_WRLCK;
break;
default:
dprintk("NFSD: nfs4_lockt: bad lock type!\n");
@@ -7115,9 +8543,9 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
if (lo)
- file_lock->fl_owner = (fl_owner_t)lo;
- file_lock->fl_pid = current->tgid;
- file_lock->fl_flags = FL_POSIX;
+ file_lock->c.flc_owner = (fl_owner_t)lo;
+ file_lock->c.flc_pid = current->tgid;
+ file_lock->c.flc_flags = FL_POSIX;
file_lock->fl_start = lockt->lt_offset;
file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
@@ -7128,7 +8556,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
- if (file_lock->fl_type != F_UNLCK) {
+ if (file_lock->c.flc_type != F_UNLCK) {
status = nfserr_denied;
nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
}
@@ -7140,6 +8568,14 @@ out:
return status;
}
+void nfsd4_lockt_release(union nfsd4_op_u *u)
+{
+ struct nfsd4_lockt *lockt = &u->lockt;
+ struct nfsd4_lock_denied *deny = &lockt->lt_denied;
+
+ kfree(deny->ld_owner.data);
+}
+
__be32
nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -7160,8 +8596,8 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_inval;
status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
- &locku->lu_stateid, NFS4_LOCK_STID,
- &stp, nn);
+ &locku->lu_stateid, SC_TYPE_LOCK, 0,
+ &stp, nn);
if (status)
goto out;
nf = find_any_file(stp->st_stid.sc_file);
@@ -7169,6 +8605,11 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfserr_lock_range;
goto put_stateid;
}
+ if (exportfs_cannot_lock(nf->nf_file->f_path.mnt->mnt_sb->s_export_op)) {
+ status = nfserr_notsupp;
+ goto put_file;
+ }
+
file_lock = locks_alloc_lock();
if (!file_lock) {
dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
@@ -7176,11 +8617,11 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto put_file;
}
- file_lock->fl_type = F_UNLCK;
- file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
- file_lock->fl_pid = current->tgid;
- file_lock->fl_file = nf->nf_file;
- file_lock->fl_flags = FL_POSIX;
+ file_lock->c.flc_type = F_UNLCK;
+ file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
+ file_lock->c.flc_pid = current->tgid;
+ file_lock->c.flc_file = nf->nf_file;
+ file_lock->c.flc_flags = FL_POSIX;
file_lock->fl_lmops = &nfsd_posix_mng_ops;
file_lock->fl_start = locku->lu_offset;
@@ -7220,49 +8661,64 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
{
struct file_lock *fl;
int status = false;
- struct nfsd_file *nf = find_any_file(fp);
+ struct nfsd_file *nf;
struct inode *inode;
struct file_lock_context *flctx;
+ spin_lock(&fp->fi_lock);
+ nf = find_any_file_locked(fp);
if (!nf) {
/* Any valid lock stateid should have some sort of access */
WARN_ON_ONCE(1);
- return status;
+ goto out;
}
- inode = locks_inode(nf->nf_file);
- flctx = inode->i_flctx;
+ inode = file_inode(nf->nf_file);
+ flctx = locks_inode_context(inode);
if (flctx && !list_empty_careful(&flctx->flc_posix)) {
spin_lock(&flctx->flc_lock);
- list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
- if (fl->fl_owner == (fl_owner_t)lowner) {
+ for_each_file_lock(fl, &flctx->flc_posix) {
+ if (fl->c.flc_owner == (fl_owner_t)lowner) {
status = true;
break;
}
}
spin_unlock(&flctx->flc_lock);
}
- nfsd_file_put(nf);
+out:
+ spin_unlock(&fp->fi_lock);
return status;
}
+/**
+ * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
+ * @rqstp: RPC transaction
+ * @cstate: NFSv4 COMPOUND state
+ * @u: RELEASE_LOCKOWNER arguments
+ *
+ * Check if there are any locks still held and if not, free the lockowner
+ * and any lock state that is owned.
+ *
+ * Return values:
+ * %nfs_ok: lockowner released or not found
+ * %nfserr_locks_held: lockowner still in use
+ * %nfserr_stale_clientid: clientid no longer active
+ * %nfserr_expired: clientid not recognized
+ */
__be32
nfsd4_release_lockowner(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
clientid_t *clid = &rlockowner->rl_clientid;
- struct nfs4_stateowner *sop;
- struct nfs4_lockowner *lo = NULL;
struct nfs4_ol_stateid *stp;
- struct xdr_netobj *owner = &rlockowner->rl_owner;
- unsigned int hashval = ownerstr_hashval(owner);
- __be32 status;
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfs4_lockowner *lo;
struct nfs4_client *clp;
- LIST_HEAD (reaplist);
+ LIST_HEAD(reaplist);
+ __be32 status;
dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
clid->cl_boot, clid->cl_id);
@@ -7270,48 +8726,36 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
status = set_client(clid, cstate, nn);
if (status)
return status;
-
clp = cstate->clp;
- /* Find the matching lock stateowner */
- spin_lock(&clp->cl_lock);
- list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
- so_strhash) {
-
- if (sop->so_is_open_owner || !same_owner_str(sop, owner))
- continue;
-
- /* see if there are still any locks associated with it */
- lo = lockowner(sop);
- list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
- if (check_for_locks(stp->st_stid.sc_file, lo)) {
- status = nfserr_locks_held;
- spin_unlock(&clp->cl_lock);
- return status;
- }
- }
- nfs4_get_stateowner(sop);
- break;
- }
+ spin_lock(&clp->cl_lock);
+ lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
if (!lo) {
spin_unlock(&clp->cl_lock);
- return status;
+ return nfs_ok;
}
+ list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
+ if (check_for_locks(stp->st_stid.sc_file, lo)) {
+ spin_unlock(&clp->cl_lock);
+ nfs4_put_stateowner(&lo->lo_owner);
+ return nfserr_locks_held;
+ }
+ }
unhash_lockowner_locked(lo);
while (!list_empty(&lo->lo_owner.so_stateids)) {
stp = list_first_entry(&lo->lo_owner.so_stateids,
struct nfs4_ol_stateid,
st_perstateowner);
- WARN_ON(!unhash_lock_stateid(stp));
+ unhash_lock_stateid(stp);
put_ol_stateid_locked(stp, &reaplist);
}
spin_unlock(&clp->cl_lock);
+
free_ol_stateid_reaplist(&reaplist);
remove_blocked_locks(lo);
nfs4_put_stateowner(&lo->lo_owner);
-
- return status;
+ return nfs_ok;
}
static inline struct nfs4_client_reclaim *
@@ -7331,9 +8775,6 @@ nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
/*
* failure => all reset bets are off, nfserr_no_grace...
- *
- * The caller is responsible for freeing name.data if NULL is returned (it
- * will be freed in nfs4_remove_reclaim_record in the normal case).
*/
struct nfs4_client_reclaim *
nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
@@ -7342,6 +8783,22 @@ nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
unsigned int strhashval;
struct nfs4_client_reclaim *crp;
+ name.data = kmemdup(name.data, name.len, GFP_KERNEL);
+ if (!name.data) {
+ dprintk("%s: failed to allocate memory for name.data!\n",
+ __func__);
+ return NULL;
+ }
+ if (princhash.len) {
+ princhash.data = kmemdup(princhash.data, princhash.len, GFP_KERNEL);
+ if (!princhash.data) {
+ dprintk("%s: failed to allocate memory for princhash.data!\n",
+ __func__);
+ kfree(name.data);
+ return NULL;
+ }
+ } else
+ princhash.data = NULL;
crp = alloc_reclaim();
if (crp) {
strhashval = clientstr_hashval(name);
@@ -7353,6 +8810,9 @@ nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
crp->cr_princhash.len = princhash.len;
crp->cr_clp = NULL;
nn->reclaim_str_hashtbl_size++;
+ } else {
+ kfree(name.data);
+ kfree(princhash.data);
}
return crp;
}
@@ -7472,15 +8932,30 @@ static int nfs4_state_create_net(struct net *net)
spin_lock_init(&nn->client_lock);
spin_lock_init(&nn->s2s_cp_lock);
idr_init(&nn->s2s_cp_stateids);
+ atomic_set(&nn->pending_async_copies, 0);
spin_lock_init(&nn->blocked_locks_lock);
INIT_LIST_HEAD(&nn->blocked_locks_lru);
INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
+ INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
get_net(net);
+ nn->nfsd_client_shrinker = shrinker_alloc(0, "nfsd-client");
+ if (!nn->nfsd_client_shrinker)
+ goto err_shrinker;
+
+ nn->nfsd_client_shrinker->scan_objects = nfsd4_state_shrinker_scan;
+ nn->nfsd_client_shrinker->count_objects = nfsd4_state_shrinker_count;
+ nn->nfsd_client_shrinker->private_data = nn;
+
+ shrinker_register(nn->nfsd_client_shrinker);
+
return 0;
+err_shrinker:
+ put_net(net);
+ kfree(nn->sessionid_hashtbl);
err_sessionid:
kfree(nn->unconf_id_hashtbl);
err_unconf_id:
@@ -7546,28 +9021,26 @@ skip_grace:
}
/* initialization to perform when the nfsd service is started: */
-
int
nfs4_state_start(void)
{
int ret;
- laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
- if (laundry_wq == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- ret = nfsd4_create_callback_queue();
+ ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params);
if (ret)
- goto out_free_laundry;
+ return ret;
+
+ nfsd_slot_shrinker = shrinker_alloc(0, "nfsd-DRC-slot");
+ if (!nfsd_slot_shrinker) {
+ rhltable_destroy(&nfs4_file_rhltable);
+ return -ENOMEM;
+ }
+ nfsd_slot_shrinker->count_objects = nfsd_slot_count;
+ nfsd_slot_shrinker->scan_objects = nfsd_slot_scan;
+ shrinker_register(nfsd_slot_shrinker);
set_max_delegations();
return 0;
-
-out_free_laundry:
- destroy_workqueue(laundry_wq);
-out:
- return ret;
}
void
@@ -7577,6 +9050,8 @@ nfs4_state_shutdown_net(struct net *net)
struct list_head *pos, *next, reaplist;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ shrinker_free(nn->nfsd_client_shrinker);
+ cancel_work_sync(&nn->nfsd_shrinker_work);
cancel_delayed_work_sync(&nn->laundromat_work);
locks_end_grace(&nn->nfsd4_manager);
@@ -7584,7 +9059,7 @@ nfs4_state_shutdown_net(struct net *net)
spin_lock(&state_lock);
list_for_each_safe(pos, next, &nn->del_recall_lru) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
- WARN_ON(!unhash_delegation_locked(dp));
+ unhash_delegation_locked(dp, SC_STATUS_CLOSED);
list_add(&dp->dl_recall_lru, &reaplist);
}
spin_unlock(&state_lock);
@@ -7604,8 +9079,8 @@ nfs4_state_shutdown_net(struct net *net)
void
nfs4_state_shutdown(void)
{
- destroy_workqueue(laundry_wq);
- nfsd4_destroy_callback_queue();
+ rhltable_destroy(&nfs4_file_rhltable);
+ shrinker_free(nfsd_slot_shrinker);
}
static void
@@ -7721,3 +9196,274 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
{
get_stateid(cstate, &u->write.wr_stateid);
}
+
+/**
+ * nfsd4_vet_deleg_time - vet and set the timespec for a delegated timestamp update
+ * @req: timestamp from the client
+ * @orig: original timestamp in the inode
+ * @now: current time
+ *
+ * Given a timestamp from the client response, check it against the
+ * current timestamp in the inode and the current time. Returns true
+ * if the inode's timestamp needs to be updated, and false otherwise.
+ * @req may also be changed if the timestamp needs to be clamped.
+ */
+bool nfsd4_vet_deleg_time(struct timespec64 *req, const struct timespec64 *orig,
+ const struct timespec64 *now)
+{
+
+ /*
+ * "When the time presented is before the original time, then the
+ * update is ignored." Also no need to update if there is no change.
+ */
+ if (timespec64_compare(req, orig) <= 0)
+ return false;
+
+ /*
+ * "When the time presented is in the future, the server can either
+ * clamp the new time to the current time, or it may
+ * return NFS4ERR_DELAY to the client, allowing it to retry."
+ */
+ if (timespec64_compare(req, now) > 0)
+ *req = *now;
+
+ return true;
+}
+
+static int cb_getattr_update_times(struct dentry *dentry, struct nfs4_delegation *dp)
+{
+ struct inode *inode = d_inode(dentry);
+ struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr;
+ struct iattr attrs = { };
+ int ret;
+
+ if (deleg_attrs_deleg(dp->dl_type)) {
+ struct timespec64 now = current_time(inode);
+
+ attrs.ia_atime = ncf->ncf_cb_atime;
+ attrs.ia_mtime = ncf->ncf_cb_mtime;
+
+ if (nfsd4_vet_deleg_time(&attrs.ia_atime, &dp->dl_atime, &now))
+ attrs.ia_valid |= ATTR_ATIME | ATTR_ATIME_SET;
+
+ if (nfsd4_vet_deleg_time(&attrs.ia_mtime, &dp->dl_mtime, &now)) {
+ attrs.ia_valid |= ATTR_MTIME | ATTR_MTIME_SET;
+ attrs.ia_ctime = attrs.ia_mtime;
+ if (nfsd4_vet_deleg_time(&attrs.ia_ctime, &dp->dl_ctime, &now))
+ attrs.ia_valid |= ATTR_CTIME | ATTR_CTIME_SET;
+ }
+ } else {
+ attrs.ia_valid |= ATTR_MTIME | ATTR_CTIME;
+ }
+
+ if (!attrs.ia_valid)
+ return 0;
+
+ attrs.ia_valid |= ATTR_DELEG;
+ inode_lock(inode);
+ ret = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
+ inode_unlock(inode);
+ return ret;
+}
+
+/**
+ * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
+ * @rqstp: RPC transaction context
+ * @dentry: dentry of inode to be checked for a conflict
+ * @pdp: returned WRITE delegation, if one was found
+ *
+ * This function is called when there is a conflict between a write
+ * delegation and a change/size GETATTR from another client. The server
+ * must either use the CB_GETATTR to get the current values of the
+ * attributes from the client that holds the delegation or recall the
+ * delegation before replying to the GETATTR. See RFC 8881 section
+ * 18.7.4.
+ *
+ * Returns 0 if there is no conflict; otherwise an nfs_stat
+ * code is returned. If @pdp is set to a non-NULL value, then the
+ * caller must put the reference.
+ */
+__be32
+nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
+ struct nfs4_delegation **pdp)
+{
+ __be32 status;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct file_lock_context *ctx;
+ struct nfs4_delegation *dp = NULL;
+ struct file_lease *fl;
+ struct nfs4_cb_fattr *ncf;
+ struct inode *inode = d_inode(dentry);
+
+ ctx = locks_inode_context(inode);
+ if (!ctx)
+ return nfs_ok;
+
+#define NON_NFSD_LEASE ((void *)1)
+
+ spin_lock(&ctx->flc_lock);
+ for_each_file_lock(fl, &ctx->flc_lease) {
+ if (fl->c.flc_flags == FL_LAYOUT)
+ continue;
+ if (fl->c.flc_type == F_WRLCK) {
+ if (fl->fl_lmops == &nfsd_lease_mng_ops)
+ dp = fl->c.flc_owner;
+ else
+ dp = NON_NFSD_LEASE;
+ }
+ break;
+ }
+ if (dp == NULL || dp == NON_NFSD_LEASE ||
+ dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
+ spin_unlock(&ctx->flc_lock);
+ if (dp == NON_NFSD_LEASE) {
+ status = nfserrno(nfsd_open_break_lease(inode,
+ NFSD_MAY_READ));
+ if (status != nfserr_jukebox ||
+ !nfsd_wait_for_delegreturn(rqstp, inode))
+ return status;
+ }
+ return 0;
+ }
+
+ nfsd_stats_wdeleg_getattr_inc(nn);
+ refcount_inc(&dp->dl_stid.sc_count);
+ ncf = &dp->dl_cb_fattr;
+ nfs4_cb_getattr(&dp->dl_cb_fattr);
+ spin_unlock(&ctx->flc_lock);
+
+ wait_on_bit_timeout(&ncf->ncf_getattr.cb_flags, NFSD4_CALLBACK_RUNNING,
+ TASK_UNINTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT);
+ if (ncf->ncf_cb_status) {
+ /* Recall delegation only if client didn't respond */
+ status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
+ if (status != nfserr_jukebox ||
+ !nfsd_wait_for_delegreturn(rqstp, inode))
+ goto out_status;
+ }
+ if (!ncf->ncf_file_modified &&
+ (ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
+ ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
+ ncf->ncf_file_modified = true;
+ if (ncf->ncf_file_modified) {
+ int err;
+
+ /*
+ * Per section 10.4.3 of RFC 8881, the server would
+ * not update the file's metadata with the client's
+ * modified size
+ */
+ err = cb_getattr_update_times(dentry, dp);
+ if (err) {
+ status = nfserrno(err);
+ goto out_status;
+ }
+ ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
+ *pdp = dp;
+ return nfs_ok;
+ }
+ status = nfs_ok;
+out_status:
+ nfs4_put_stid(&dp->dl_stid);
+ return status;
+}
+
+/**
+ * nfsd_get_dir_deleg - attempt to get a directory delegation
+ * @cstate: compound state
+ * @gdd: GET_DIR_DELEGATION arg/resp structure
+ * @nf: nfsd_file opened on the directory
+ *
+ * Given a GET_DIR_DELEGATION request @gdd, attempt to acquire a delegation
+ * on the directory to which @nf refers. Note that this does not set up any
+ * sort of async notifications for the delegation.
+ */
+struct nfs4_delegation *
+nfsd_get_dir_deleg(struct nfsd4_compound_state *cstate,
+ struct nfsd4_get_dir_delegation *gdd,
+ struct nfsd_file *nf)
+{
+ struct nfs4_client *clp = cstate->clp;
+ struct nfs4_delegation *dp;
+ struct file_lease *fl;
+ struct nfs4_file *fp, *rfp;
+ int status = 0;
+
+ fp = nfsd4_alloc_file();
+ if (!fp)
+ return ERR_PTR(-ENOMEM);
+
+ nfsd4_file_init(&cstate->current_fh, fp);
+
+ rfp = nfsd4_file_hash_insert(fp, &cstate->current_fh);
+ if (unlikely(!rfp)) {
+ put_nfs4_file(fp);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (rfp != fp) {
+ put_nfs4_file(fp);
+ fp = rfp;
+ }
+
+ /* if this client already has one, return that it's unavailable */
+ spin_lock(&state_lock);
+ spin_lock(&fp->fi_lock);
+ /* existing delegation? */
+ if (nfs4_delegation_exists(clp, fp)) {
+ status = -EAGAIN;
+ } else if (!fp->fi_deleg_file) {
+ fp->fi_deleg_file = nfsd_file_get(nf);
+ fp->fi_delegees = 1;
+ } else {
+ ++fp->fi_delegees;
+ }
+ spin_unlock(&fp->fi_lock);
+ spin_unlock(&state_lock);
+
+ if (status) {
+ put_nfs4_file(fp);
+ return ERR_PTR(status);
+ }
+
+ /* Try to set up the lease */
+ status = -ENOMEM;
+ dp = alloc_init_deleg(clp, fp, NULL, NFS4_OPEN_DELEGATE_READ);
+ if (!dp)
+ goto out_delegees;
+
+ fl = nfs4_alloc_init_lease(dp);
+ if (!fl)
+ goto out_put_stid;
+
+ status = kernel_setlease(nf->nf_file,
+ fl->c.flc_type, &fl, NULL);
+ if (fl)
+ locks_free_lease(fl);
+ if (status)
+ goto out_put_stid;
+
+ /*
+ * Now, try to hash it. This can fail if we race another nfsd task
+ * trying to set a delegation on the same file. If that happens,
+ * then just say UNAVAIL.
+ */
+ spin_lock(&state_lock);
+ spin_lock(&clp->cl_lock);
+ spin_lock(&fp->fi_lock);
+ status = hash_delegation_locked(dp, fp);
+ spin_unlock(&fp->fi_lock);
+ spin_unlock(&clp->cl_lock);
+ spin_unlock(&state_lock);
+
+ if (!status)
+ return dp;
+
+ /* Something failed. Drop the lease and clean up the stid */
+ kernel_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
+out_put_stid:
+ nfs4_put_stid(&dp->dl_stid);
+out_delegees:
+ put_deleg_file(fp);
+ return ERR_PTR(status);
+}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b2a1d969a172..30ce5851fe4c 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -42,6 +42,8 @@
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/addr.h>
#include <linux/xattr.h>
+#include <linux/vmalloc.h>
+
#include <uapi/linux/xattr.h>
#include "idmap.h"
@@ -53,6 +55,7 @@
#include "netns.h"
#include "pnfs.h"
#include "filecache.h"
+#include "nfs4xdr_gen.h"
#include "trace.h"
@@ -116,11 +119,11 @@ static int zero_clientid(clientid_t *clid)
* operation described in @argp finishes.
*/
static void *
-svcxdr_tmpalloc(struct nfsd4_compoundargs *argp, u32 len)
+svcxdr_tmpalloc(struct nfsd4_compoundargs *argp, size_t len)
{
struct svcxdr_tmpbuf *tb;
- tb = kmalloc(sizeof(*tb) + len, GFP_KERNEL);
+ tb = kmalloc(struct_size(tb, buf, len), GFP_KERNEL);
if (!tb)
return NULL;
tb->next = argp->to_free;
@@ -136,9 +139,9 @@ svcxdr_tmpalloc(struct nfsd4_compoundargs *argp, u32 len)
* buffer might end on a page boundary.
*/
static char *
-svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
+svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, size_t len)
{
- char *p = svcxdr_tmpalloc(argp, len + 1);
+ char *p = svcxdr_tmpalloc(argp, size_add(len, 1));
if (!p)
return NULL;
@@ -148,7 +151,7 @@ svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
}
static void *
-svcxdr_savemem(struct nfsd4_compoundargs *argp, __be32 *p, u32 len)
+svcxdr_savemem(struct nfsd4_compoundargs *argp, __be32 *p, size_t len)
{
__be32 *tmp;
@@ -277,24 +280,10 @@ nfsd4_decode_verifier4(struct nfsd4_compoundargs *argp, nfs4_verifier *verf)
static __be32
nfsd4_decode_bitmap4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen)
{
- u32 i, count;
- __be32 *p;
+ ssize_t status;
- if (xdr_stream_decode_u32(argp->xdr, &count) < 0)
- return nfserr_bad_xdr;
- /* request sanity */
- if (count > 1000)
- return nfserr_bad_xdr;
- p = xdr_inline_decode(argp->xdr, count << 2);
- if (!p)
- return nfserr_bad_xdr;
- i = 0;
- while (i < count)
- bmval[i++] = be32_to_cpup(p++);
- while (i < bmlen)
- bmval[i++] = 0;
-
- return nfs_ok;
+ status = xdr_stream_decode_uint32_array(argp->xdr, bmval, bmlen);
+ return status == -EBADMSG ? nfserr_bad_xdr : nfs_ok;
}
static __be32
@@ -484,6 +473,15 @@ nfsd4_decode_fattr4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen,
return nfserr_bad_xdr;
}
}
+ if (bmval[1] & FATTR4_WORD1_TIME_CREATE) {
+ struct timespec64 ts;
+
+ /* No Linux filesystem supports setting this attribute. */
+ bmval[1] &= ~FATTR4_WORD1_TIME_CREATE;
+ status = nfsd4_decode_nfstime4(argp, &ts);
+ if (status)
+ return status;
+ }
if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) {
u32 set_it;
@@ -523,6 +521,27 @@ nfsd4_decode_fattr4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen,
*umask = mask & S_IRWXUGO;
iattr->ia_valid |= ATTR_MODE;
}
+ if (bmval[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) {
+ fattr4_time_deleg_access access;
+
+ if (!xdrgen_decode_fattr4_time_deleg_access(argp->xdr, &access))
+ return nfserr_bad_xdr;
+ iattr->ia_atime.tv_sec = access.seconds;
+ iattr->ia_atime.tv_nsec = access.nseconds;
+ iattr->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET | ATTR_DELEG;
+ }
+ if (bmval[2] & FATTR4_WORD2_TIME_DELEG_MODIFY) {
+ fattr4_time_deleg_modify modify;
+
+ if (!xdrgen_decode_fattr4_time_deleg_modify(argp->xdr, &modify))
+ return nfserr_bad_xdr;
+ iattr->ia_mtime.tv_sec = modify.seconds;
+ iattr->ia_mtime.tv_nsec = modify.nseconds;
+ iattr->ia_ctime.tv_sec = modify.seconds;
+ iattr->ia_ctime.tv_nsec = modify.nseconds;
+ iattr->ia_valid |= ATTR_CTIME | ATTR_CTIME_SET |
+ ATTR_MTIME | ATTR_MTIME_SET | ATTR_DELEG;
+ }
/* request sanity: did attrlist4 contain the expected number of words? */
if (attrlist4_count != xdr_stream_pos(argp->xdr) - starting_pos)
@@ -569,23 +588,13 @@ nfsd4_decode_state_owner4(struct nfsd4_compoundargs *argp,
}
#ifdef CONFIG_NFSD_PNFS
-static __be32
-nfsd4_decode_deviceid4(struct nfsd4_compoundargs *argp,
- struct nfsd4_deviceid *devid)
-{
- __be32 *p;
-
- p = xdr_inline_decode(argp->xdr, NFS4_DEVICEID4_SIZE);
- if (!p)
- return nfserr_bad_xdr;
- memcpy(devid, p, sizeof(*devid));
- return nfs_ok;
-}
static __be32
nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp,
struct nfsd4_layoutcommit *lcp)
{
+ u32 len;
+
if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_layout_type) < 0)
return nfserr_bad_xdr;
if (lcp->lc_layout_type < LAYOUT_NFSV4_1_FILES)
@@ -593,13 +602,10 @@ nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp,
if (lcp->lc_layout_type >= LAYOUT_TYPE_MAX)
return nfserr_bad_xdr;
- if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_up_len) < 0)
+ if (xdr_stream_decode_u32(argp->xdr, &len) < 0)
+ return nfserr_bad_xdr;
+ if (!xdr_stream_subsegment(argp->xdr, &lcp->lc_up_layout, len))
return nfserr_bad_xdr;
- if (lcp->lc_up_len > 0) {
- lcp->lc_up_layout = xdr_inline_decode(argp->xdr, lcp->lc_up_len);
- if (!lcp->lc_up_layout)
- return nfserr_bad_xdr;
- }
return nfs_ok;
}
@@ -773,16 +779,18 @@ nfsd4_decode_cb_sec(struct nfsd4_compoundargs *argp, struct nfsd4_cb_sec *cbs)
static __be32
nfsd4_decode_access(struct nfsd4_compoundargs *argp,
- struct nfsd4_access *access)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_access *access = &u->access;
if (xdr_stream_decode_u32(argp->xdr, &access->ac_req_access) < 0)
return nfserr_bad_xdr;
return nfs_ok;
}
static __be32
-nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
+nfsd4_decode_close(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_close *close = &u->close;
if (xdr_stream_decode_u32(argp->xdr, &close->cl_seqid) < 0)
return nfserr_bad_xdr;
return nfsd4_decode_stateid4(argp, &close->cl_stateid);
@@ -790,20 +798,24 @@ nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
static __be32
-nfsd4_decode_commit(struct nfsd4_compoundargs *argp, struct nfsd4_commit *commit)
+nfsd4_decode_commit(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_commit *commit = &u->commit;
if (xdr_stream_decode_u64(argp->xdr, &commit->co_offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &commit->co_count) < 0)
return nfserr_bad_xdr;
+ memset(&commit->co_verf, 0, sizeof(commit->co_verf));
return nfs_ok;
}
static __be32
-nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create)
+nfsd4_decode_create(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_create *create = &u->create;
__be32 *p, status;
+ memset(create, 0, sizeof(*create));
if (xdr_stream_decode_u32(argp->xdr, &create->cr_type) < 0)
return nfserr_bad_xdr;
switch (create->cr_type) {
@@ -845,21 +857,26 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
}
static inline __be32
-nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, struct nfsd4_delegreturn *dr)
+nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_delegreturn *dr = &u->delegreturn;
return nfsd4_decode_stateid4(argp, &dr->dr_stateid);
}
static inline __be32
-nfsd4_decode_getattr(struct nfsd4_compoundargs *argp, struct nfsd4_getattr *getattr)
+nfsd4_decode_getattr(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_getattr *getattr = &u->getattr;
+ memset(getattr, 0, sizeof(*getattr));
return nfsd4_decode_bitmap4(argp, getattr->ga_bmval,
ARRAY_SIZE(getattr->ga_bmval));
}
static __be32
-nfsd4_decode_link(struct nfsd4_compoundargs *argp, struct nfsd4_link *link)
+nfsd4_decode_link(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_link *link = &u->link;
+ memset(link, 0, sizeof(*link));
return nfsd4_decode_component4(argp, &link->li_name, &link->li_namelen);
}
@@ -906,8 +923,10 @@ nfsd4_decode_locker4(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
}
static __be32
-nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
+nfsd4_decode_lock(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_lock *lock = &u->lock;
+ memset(lock, 0, sizeof(*lock));
if (xdr_stream_decode_u32(argp->xdr, &lock->lk_type) < 0)
return nfserr_bad_xdr;
if ((lock->lk_type < NFS4_READ_LT) || (lock->lk_type > NFS4_WRITEW_LT))
@@ -922,8 +941,10 @@ nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
}
static __be32
-nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, struct nfsd4_lockt *lockt)
+nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_lockt *lockt = &u->lockt;
+ memset(lockt, 0, sizeof(*lockt));
if (xdr_stream_decode_u32(argp->xdr, &lockt->lt_type) < 0)
return nfserr_bad_xdr;
if ((lockt->lt_type < NFS4_READ_LT) || (lockt->lt_type > NFS4_WRITEW_LT))
@@ -937,8 +958,9 @@ nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, struct nfsd4_lockt *lockt)
}
static __be32
-nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
+nfsd4_decode_locku(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_locku *locku = &u->locku;
__be32 status;
if (xdr_stream_decode_u32(argp->xdr, &locku->lu_type) < 0)
@@ -959,8 +981,9 @@ nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
}
static __be32
-nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup)
+nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_lookup *lookup = &u->lookup;
return nfsd4_decode_component4(argp, &lookup->lo_name, &lookup->lo_len);
}
@@ -1052,13 +1075,13 @@ static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *sh
return nfs_ok;
if (!argp->minorversion)
return nfserr_bad_xdr;
- switch (w & NFS4_SHARE_WANT_MASK) {
- case NFS4_SHARE_WANT_NO_PREFERENCE:
- case NFS4_SHARE_WANT_READ_DELEG:
- case NFS4_SHARE_WANT_WRITE_DELEG:
- case NFS4_SHARE_WANT_ANY_DELEG:
- case NFS4_SHARE_WANT_NO_DELEG:
- case NFS4_SHARE_WANT_CANCEL:
+ switch (w & NFS4_SHARE_WANT_TYPE_MASK) {
+ case OPEN4_SHARE_ACCESS_WANT_NO_PREFERENCE:
+ case OPEN4_SHARE_ACCESS_WANT_READ_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_ANY_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_NO_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_CANCEL:
break;
default:
return nfserr_bad_xdr;
@@ -1140,16 +1163,14 @@ nfsd4_decode_open_claim4(struct nfsd4_compoundargs *argp,
}
static __be32
-nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
+nfsd4_decode_open(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_open *open = &u->open;
__be32 status;
u32 dummy;
- memset(open->op_bmval, 0, sizeof(open->op_bmval));
- open->op_iattr.ia_valid = 0;
- open->op_openowner = NULL;
+ memset(open, 0, sizeof(*open));
- open->op_xdr_error = 0;
if (xdr_stream_decode_u32(argp->xdr, &open->op_seqid) < 0)
return nfserr_bad_xdr;
/* deleg_want is ignored */
@@ -1171,8 +1192,10 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
}
static __be32
-nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_confirm *open_conf)
+nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_open_confirm *open_conf = &u->open_confirm;
__be32 status;
if (argp->minorversion >= 1)
@@ -1184,14 +1207,19 @@ nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_con
if (xdr_stream_decode_u32(argp->xdr, &open_conf->oc_seqid) < 0)
return nfserr_bad_xdr;
+ memset(&open_conf->oc_resp_stateid, 0,
+ sizeof(open_conf->oc_resp_stateid));
return nfs_ok;
}
static __be32
-nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_downgrade *open_down)
+nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_open_downgrade *open_down = &u->open_downgrade;
__be32 status;
+ memset(open_down, 0, sizeof(*open_down));
status = nfsd4_decode_stateid4(argp, &open_down->od_stateid);
if (status)
return status;
@@ -1206,8 +1234,9 @@ nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_d
}
static __be32
-nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
+nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_putfh *putfh = &u->putfh;
__be32 *p;
if (xdr_stream_decode_u32(argp->xdr, &putfh->pf_fhlen) < 0)
@@ -1221,22 +1250,17 @@ nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
if (!putfh->pf_fhval)
return nfserr_jukebox;
+ putfh->no_verify = false;
return nfs_ok;
}
static __be32
-nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, void *p)
-{
- if (argp->minorversion == 0)
- return nfs_ok;
- return nfserr_notsupp;
-}
-
-static __be32
-nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read)
+nfsd4_decode_read(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_read *read = &u->read;
__be32 status;
+ memset(read, 0, sizeof(*read));
status = nfsd4_decode_stateid4(argp, &read->rd_stateid);
if (status)
return status;
@@ -1249,10 +1273,12 @@ nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read)
}
static __be32
-nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, struct nfsd4_readdir *readdir)
+nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_readdir *readdir = &u->readdir;
__be32 status;
+ memset(readdir, 0, sizeof(*readdir));
if (xdr_stream_decode_u64(argp->xdr, &readdir->rd_cookie) < 0)
return nfserr_bad_xdr;
status = nfsd4_decode_verifier4(argp, &readdir->rd_verf);
@@ -1270,16 +1296,20 @@ nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, struct nfsd4_readdir *read
}
static __be32
-nfsd4_decode_remove(struct nfsd4_compoundargs *argp, struct nfsd4_remove *remove)
+nfsd4_decode_remove(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_remove *remove = &u->remove;
+ memset(&remove->rm_cinfo, 0, sizeof(remove->rm_cinfo));
return nfsd4_decode_component4(argp, &remove->rm_name, &remove->rm_namelen);
}
static __be32
-nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename)
+nfsd4_decode_rename(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_rename *rename = &u->rename;
__be32 status;
+ memset(rename, 0, sizeof(*rename));
status = nfsd4_decode_component4(argp, &rename->rn_sname, &rename->rn_snamelen);
if (status)
return status;
@@ -1287,23 +1317,28 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename
}
static __be32
-nfsd4_decode_renew(struct nfsd4_compoundargs *argp, clientid_t *clientid)
+nfsd4_decode_renew(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ clientid_t *clientid = &u->renew;
return nfsd4_decode_clientid4(argp, clientid);
}
static __be32
nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp,
- struct nfsd4_secinfo *secinfo)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_secinfo *secinfo = &u->secinfo;
+ secinfo->si_exp = NULL;
return nfsd4_decode_component4(argp, &secinfo->si_name, &secinfo->si_namelen);
}
static __be32
-nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
+nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_setattr *setattr = &u->setattr;
__be32 status;
+ memset(setattr, 0, sizeof(*setattr));
status = nfsd4_decode_stateid4(argp, &setattr->sa_stateid);
if (status)
return status;
@@ -1314,10 +1349,13 @@ nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *seta
}
static __be32
-nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid *setclientid)
+nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_setclientid *setclientid = &u->setclientid;
__be32 *p, status;
+ memset(setclientid, 0, sizeof(*setclientid));
+
if (argp->minorversion >= 1)
return nfserr_notsupp;
@@ -1355,8 +1393,10 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
}
static __be32
-nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid_confirm *scd_c)
+nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_setclientid_confirm *scd_c = &u->setclientid_confirm;
__be32 status;
if (argp->minorversion >= 1)
@@ -1370,10 +1410,13 @@ nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_s
/* Also used for NVERIFY */
static __be32
-nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify)
+nfsd4_decode_verify(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_verify *verify = &u->verify;
__be32 *p, status;
+ memset(verify, 0, sizeof(*verify));
+
status = nfsd4_decode_bitmap4(argp, verify->ve_bmval,
ARRAY_SIZE(verify->ve_bmval));
if (status)
@@ -1395,8 +1438,9 @@ nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify
}
static __be32
-nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
+nfsd4_decode_write(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_write *write = &u->write;
__be32 status;
status = nfsd4_decode_stateid4(argp, &write->wr_stateid);
@@ -1413,12 +1457,17 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
if (!xdr_stream_subsegment(argp->xdr, &write->wr_payload, write->wr_buflen))
return nfserr_bad_xdr;
+ write->wr_bytes_written = 0;
+ write->wr_how_written = 0;
+ memset(&write->wr_verifier, 0, sizeof(write->wr_verifier));
return nfs_ok;
}
static __be32
-nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, struct nfsd4_release_lockowner *rlockowner)
+nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
__be32 status;
if (argp->minorversion >= 1)
@@ -1435,18 +1484,24 @@ nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, struct nfsd4_rel
return nfs_ok;
}
-static __be32 nfsd4_decode_backchannel_ctl(struct nfsd4_compoundargs *argp, struct nfsd4_backchannel_ctl *bc)
+static __be32 nfsd4_decode_backchannel_ctl(struct nfsd4_compoundargs *argp,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
+ memset(bc, 0, sizeof(*bc));
if (xdr_stream_decode_u32(argp->xdr, &bc->bc_cb_program) < 0)
return nfserr_bad_xdr;
return nfsd4_decode_cb_sec(argp, &bc->bc_cb_sec);
}
-static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts)
+static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
u32 use_conn_in_rdma_mode;
__be32 status;
+ memset(bcts, 0, sizeof(*bcts));
status = nfsd4_decode_sessionid4(argp, &bcts->sessionid);
if (status)
return status;
@@ -1584,10 +1639,12 @@ nfsd4_decode_nfs_impl_id4(struct nfsd4_compoundargs *argp,
static __be32
nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
- struct nfsd4_exchange_id *exid)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_exchange_id *exid = &u->exchange_id;
__be32 status;
+ memset(exid, 0, sizeof(*exid));
status = nfsd4_decode_verifier4(argp, &exid->verifier);
if (status)
return status;
@@ -1636,10 +1693,12 @@ nfsd4_decode_channel_attrs4(struct nfsd4_compoundargs *argp,
static __be32
nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
- struct nfsd4_create_session *sess)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_create_session *sess = &u->create_session;
__be32 status;
+ memset(sess, 0, sizeof(*sess));
status = nfsd4_decode_clientid4(argp, &sess->clientid);
if (status)
return status;
@@ -1655,35 +1714,64 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
return status;
if (xdr_stream_decode_u32(argp->xdr, &sess->callback_prog) < 0)
return nfserr_bad_xdr;
- status = nfsd4_decode_cb_sec(argp, &sess->cb_sec);
- if (status)
- return status;
-
- return nfs_ok;
+ return nfsd4_decode_cb_sec(argp, &sess->cb_sec);
}
static __be32
nfsd4_decode_destroy_session(struct nfsd4_compoundargs *argp,
- struct nfsd4_destroy_session *destroy_session)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_destroy_session *destroy_session = &u->destroy_session;
return nfsd4_decode_sessionid4(argp, &destroy_session->sessionid);
}
static __be32
nfsd4_decode_free_stateid(struct nfsd4_compoundargs *argp,
- struct nfsd4_free_stateid *free_stateid)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
return nfsd4_decode_stateid4(argp, &free_stateid->fr_stateid);
}
+static __be32
+nfsd4_decode_get_dir_delegation(struct nfsd4_compoundargs *argp,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_get_dir_delegation *gdd = &u->get_dir_delegation;
+ __be32 status;
+
+ memset(gdd, 0, sizeof(*gdd));
+
+ if (xdr_stream_decode_bool(argp->xdr, &gdd->gdda_signal_deleg_avail) < 0)
+ return nfserr_bad_xdr;
+ status = nfsd4_decode_bitmap4(argp, gdd->gdda_notification_types,
+ ARRAY_SIZE(gdd->gdda_notification_types));
+ if (status)
+ return status;
+ status = nfsd4_decode_nfstime4(argp, &gdd->gdda_child_attr_delay);
+ if (status)
+ return status;
+ status = nfsd4_decode_nfstime4(argp, &gdd->gdda_dir_attr_delay);
+ if (status)
+ return status;
+ status = nfsd4_decode_bitmap4(argp, gdd->gdda_child_attributes,
+ ARRAY_SIZE(gdd->gdda_child_attributes));
+ if (status)
+ return status;
+ return nfsd4_decode_bitmap4(argp, gdd->gdda_dir_attributes,
+ ARRAY_SIZE(gdd->gdda_dir_attributes));
+}
+
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
- struct nfsd4_getdeviceinfo *gdev)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_getdeviceinfo *gdev = &u->getdeviceinfo;
__be32 status;
- status = nfsd4_decode_deviceid4(argp, &gdev->gd_devid);
+ memset(gdev, 0, sizeof(*gdev));
+ status = nfsd4_decode_deviceid4(argp->xdr, &gdev->gd_devid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &gdev->gd_layout_type) < 0)
@@ -1699,10 +1787,12 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
static __be32
nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
- struct nfsd4_layoutcommit *lcp)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
__be32 *p, status;
+ memset(lcp, 0, sizeof(*lcp));
if (xdr_stream_decode_u64(argp->xdr, &lcp->lc_seg.offset) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u64(argp->xdr, &lcp->lc_seg.length) < 0)
@@ -1712,7 +1802,7 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
status = nfsd4_decode_stateid4(argp, &lcp->lc_sid);
if (status)
return status;
- if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_newoffset) < 0)
+ if (xdr_stream_decode_bool(argp->xdr, &lcp->lc_newoffset) < 0)
return nfserr_bad_xdr;
if (lcp->lc_newoffset) {
if (xdr_stream_decode_u64(argp->xdr, &lcp->lc_last_wr) < 0)
@@ -1734,10 +1824,12 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
static __be32
nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
- struct nfsd4_layoutget *lgp)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_layoutget *lgp = &u->layoutget;
__be32 status;
+ memset(lgp, 0, sizeof(*lgp));
if (xdr_stream_decode_u32(argp->xdr, &lgp->lg_signal) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &lgp->lg_layout_type) < 0)
@@ -1761,8 +1853,10 @@ nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
static __be32
nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
- struct nfsd4_layoutreturn *lrp)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_layoutreturn *lrp = &u->layoutreturn;
+ memset(lrp, 0, sizeof(*lrp));
if (xdr_stream_decode_bool(argp->xdr, &lrp->lr_reclaim) < 0)
return nfserr_bad_xdr;
if (xdr_stream_decode_u32(argp->xdr, &lrp->lr_layout_type) < 0)
@@ -1774,17 +1868,21 @@ nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
#endif /* CONFIG_NFSD_PNFS */
static __be32 nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp,
- struct nfsd4_secinfo_no_name *sin)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_secinfo_no_name *sin = &u->secinfo_no_name;
if (xdr_stream_decode_u32(argp->xdr, &sin->sin_style) < 0)
return nfserr_bad_xdr;
+
+ sin->sin_exp = NULL;
return nfs_ok;
}
static __be32
nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
- struct nfsd4_sequence *seq)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_sequence *seq = &u->sequence;
__be32 *p, status;
status = nfsd4_decode_sessionid4(argp, &seq->sessionid);
@@ -1795,19 +1893,24 @@ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
return nfserr_bad_xdr;
seq->seqid = be32_to_cpup(p++);
seq->slotid = be32_to_cpup(p++);
- seq->maxslots = be32_to_cpup(p++);
+ /* sa_highest_slotid counts from 0 but maxslots counts from 1 ... */
+ seq->maxslots = be32_to_cpup(p++) + 1;
seq->cachethis = be32_to_cpup(p);
+ seq->status_flags = 0;
return nfs_ok;
}
static __be32
-nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid)
+nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
struct nfsd4_test_stateid_id *stateid;
__be32 status;
u32 i;
+ memset(test_stateid, 0, sizeof(*test_stateid));
if (xdr_stream_decode_u32(argp->xdr, &test_stateid->ts_num_ids) < 0)
return nfserr_bad_xdr;
@@ -1815,7 +1918,7 @@ nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_sta
for (i = 0; i < test_stateid->ts_num_ids; i++) {
stateid = svcxdr_tmpalloc(argp, sizeof(*stateid));
if (!stateid)
- return nfserrno(-ENOMEM); /* XXX: not jukebox? */
+ return nfserr_jukebox;
INIT_LIST_HEAD(&stateid->ts_id_list);
list_add_tail(&stateid->ts_id_list, &test_stateid->ts_stateid_list);
status = nfsd4_decode_stateid4(argp, &stateid->ts_id_stateid);
@@ -1827,14 +1930,16 @@ nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_sta
}
static __be32 nfsd4_decode_destroy_clientid(struct nfsd4_compoundargs *argp,
- struct nfsd4_destroy_clientid *dc)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
return nfsd4_decode_clientid4(argp, &dc->clientid);
}
static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp,
- struct nfsd4_reclaim_complete *rc)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
if (xdr_stream_decode_bool(argp->xdr, &rc->rca_one_fs) < 0)
return nfserr_bad_xdr;
return nfs_ok;
@@ -1842,8 +1947,9 @@ static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp,
static __be32
nfsd4_decode_fallocate(struct nfsd4_compoundargs *argp,
- struct nfsd4_fallocate *fallocate)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_fallocate *fallocate = &u->allocate;
__be32 status;
status = nfsd4_decode_stateid4(argp, &fallocate->falloc_stateid);
@@ -1899,12 +2005,14 @@ static __be32 nfsd4_decode_nl4_server(struct nfsd4_compoundargs *argp,
}
static __be32
-nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
+nfsd4_decode_copy(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_copy *copy = &u->copy;
+ u32 consecutive, i, count, sync;
struct nl4_server *ns_dummy;
- u32 consecutive, i, count;
__be32 status;
+ memset(copy, 0, sizeof(*copy));
status = nfsd4_decode_stateid4(argp, &copy->cp_src_stateid);
if (status)
return status;
@@ -1920,25 +2028,28 @@ nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
/* ca_consecutive: we always do consecutive copies */
if (xdr_stream_decode_u32(argp->xdr, &consecutive) < 0)
return nfserr_bad_xdr;
- if (xdr_stream_decode_u32(argp->xdr, &copy->cp_synchronous) < 0)
+ if (xdr_stream_decode_bool(argp->xdr, &sync) < 0)
return nfserr_bad_xdr;
+ nfsd4_copy_set_sync(copy, sync);
if (xdr_stream_decode_u32(argp->xdr, &count) < 0)
return nfserr_bad_xdr;
- copy->cp_intra = false;
+ copy->cp_src = svcxdr_tmpalloc(argp, sizeof(*copy->cp_src));
+ if (copy->cp_src == NULL)
+ return nfserr_jukebox;
if (count == 0) { /* intra-server copy */
- copy->cp_intra = true;
+ __set_bit(NFSD4_COPY_F_INTRA, &copy->cp_flags);
return nfs_ok;
}
/* decode all the supplied server addresses but use only the first */
- status = nfsd4_decode_nl4_server(argp, &copy->cp_src);
+ status = nfsd4_decode_nl4_server(argp, copy->cp_src);
if (status)
return status;
ns_dummy = kmalloc(sizeof(struct nl4_server), GFP_KERNEL);
if (ns_dummy == NULL)
- return nfserrno(-ENOMEM); /* XXX: jukebox? */
+ return nfserr_jukebox;
for (i = 0; i < count - 1; i++) {
status = nfsd4_decode_nl4_server(argp, ns_dummy);
if (status) {
@@ -1953,26 +2064,39 @@ nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
static __be32
nfsd4_decode_copy_notify(struct nfsd4_compoundargs *argp,
- struct nfsd4_copy_notify *cn)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_copy_notify *cn = &u->copy_notify;
__be32 status;
+ memset(cn, 0, sizeof(*cn));
+ cn->cpn_src = svcxdr_tmpalloc(argp, sizeof(*cn->cpn_src));
+ if (cn->cpn_src == NULL)
+ return nfserr_jukebox;
+ cn->cpn_dst = svcxdr_tmpalloc(argp, sizeof(*cn->cpn_dst));
+ if (cn->cpn_dst == NULL)
+ return nfserr_jukebox;
+
status = nfsd4_decode_stateid4(argp, &cn->cpn_src_stateid);
if (status)
return status;
- return nfsd4_decode_nl4_server(argp, &cn->cpn_dst);
+ return nfsd4_decode_nl4_server(argp, cn->cpn_dst);
}
static __be32
nfsd4_decode_offload_status(struct nfsd4_compoundargs *argp,
- struct nfsd4_offload_status *os)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_offload_status *os = &u->offload_status;
+ os->count = 0;
+ os->status = 0;
return nfsd4_decode_stateid4(argp, &os->stateid);
}
static __be32
-nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
+nfsd4_decode_seek(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_seek *seek = &u->seek;
__be32 status;
status = nfsd4_decode_stateid4(argp, &seek->seek_stateid);
@@ -1983,12 +2107,15 @@ nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
if (xdr_stream_decode_u32(argp->xdr, &seek->seek_whence) < 0)
return nfserr_bad_xdr;
+ seek->seek_eof = 0;
+ seek->seek_pos = 0;
return nfs_ok;
}
static __be32
-nfsd4_decode_clone(struct nfsd4_compoundargs *argp, struct nfsd4_clone *clone)
+nfsd4_decode_clone(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
+ struct nfsd4_clone *clone = &u->clone;
__be32 status;
status = nfsd4_decode_stateid4(argp, &clone->cl_src_stateid);
@@ -2021,7 +2148,7 @@ nfsd4_decode_clone(struct nfsd4_compoundargs *argp, struct nfsd4_clone *clone)
*/
static __be32
nfsd4_vbuf_from_vector(struct nfsd4_compoundargs *argp, struct xdr_buf *xdr,
- char **bufp, u32 buflen)
+ char **bufp, size_t buflen)
{
struct page **pages = xdr->pages;
struct kvec *head = xdr->head;
@@ -2113,11 +2240,13 @@ nfsd4_decode_xattr_name(struct nfsd4_compoundargs *argp, char **namep)
*/
static __be32
nfsd4_decode_getxattr(struct nfsd4_compoundargs *argp,
- struct nfsd4_getxattr *getxattr)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_getxattr *getxattr = &u->getxattr;
__be32 status;
u32 maxcount;
+ memset(getxattr, 0, sizeof(*getxattr));
status = nfsd4_decode_xattr_name(argp, &getxattr->getxa_name);
if (status)
return status;
@@ -2126,17 +2255,19 @@ nfsd4_decode_getxattr(struct nfsd4_compoundargs *argp,
maxcount = min_t(u32, XATTR_SIZE_MAX, maxcount);
getxattr->getxa_len = maxcount;
-
- return status;
+ return nfs_ok;
}
static __be32
nfsd4_decode_setxattr(struct nfsd4_compoundargs *argp,
- struct nfsd4_setxattr *setxattr)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_setxattr *setxattr = &u->setxattr;
u32 flags, maxcount, size;
__be32 status;
+ memset(setxattr, 0, sizeof(*setxattr));
+
if (xdr_stream_decode_u32(argp->xdr, &flags) < 0)
return nfserr_bad_xdr;
@@ -2171,10 +2302,13 @@ nfsd4_decode_setxattr(struct nfsd4_compoundargs *argp,
static __be32
nfsd4_decode_listxattrs(struct nfsd4_compoundargs *argp,
- struct nfsd4_listxattrs *listxattrs)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_listxattrs *listxattrs = &u->listxattrs;
u32 maxcount;
+ memset(listxattrs, 0, sizeof(*listxattrs));
+
if (xdr_stream_decode_u64(argp->xdr, &listxattrs->lsxa_cookie) < 0)
return nfserr_bad_xdr;
@@ -2200,112 +2334,114 @@ nfsd4_decode_listxattrs(struct nfsd4_compoundargs *argp,
static __be32
nfsd4_decode_removexattr(struct nfsd4_compoundargs *argp,
- struct nfsd4_removexattr *removexattr)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_removexattr *removexattr = &u->removexattr;
+ memset(removexattr, 0, sizeof(*removexattr));
return nfsd4_decode_xattr_name(argp, &removexattr->rmxa_name);
}
static __be32
-nfsd4_decode_noop(struct nfsd4_compoundargs *argp, void *p)
+nfsd4_decode_noop(struct nfsd4_compoundargs *argp, union nfsd4_op_u *p)
{
return nfs_ok;
}
static __be32
-nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
+nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, union nfsd4_op_u *p)
{
return nfserr_notsupp;
}
-typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
+typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u);
static const nfsd4_dec nfsd4_dec_ops[] = {
- [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
- [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
- [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
- [OP_CREATE] = (nfsd4_dec)nfsd4_decode_create,
- [OP_DELEGPURGE] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_DELEGRETURN] = (nfsd4_dec)nfsd4_decode_delegreturn,
- [OP_GETATTR] = (nfsd4_dec)nfsd4_decode_getattr,
- [OP_GETFH] = (nfsd4_dec)nfsd4_decode_noop,
- [OP_LINK] = (nfsd4_dec)nfsd4_decode_link,
- [OP_LOCK] = (nfsd4_dec)nfsd4_decode_lock,
- [OP_LOCKT] = (nfsd4_dec)nfsd4_decode_lockt,
- [OP_LOCKU] = (nfsd4_dec)nfsd4_decode_locku,
- [OP_LOOKUP] = (nfsd4_dec)nfsd4_decode_lookup,
- [OP_LOOKUPP] = (nfsd4_dec)nfsd4_decode_noop,
- [OP_NVERIFY] = (nfsd4_dec)nfsd4_decode_verify,
- [OP_OPEN] = (nfsd4_dec)nfsd4_decode_open,
- [OP_OPENATTR] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OPEN_CONFIRM] = (nfsd4_dec)nfsd4_decode_open_confirm,
- [OP_OPEN_DOWNGRADE] = (nfsd4_dec)nfsd4_decode_open_downgrade,
- [OP_PUTFH] = (nfsd4_dec)nfsd4_decode_putfh,
- [OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_putpubfh,
- [OP_PUTROOTFH] = (nfsd4_dec)nfsd4_decode_noop,
- [OP_READ] = (nfsd4_dec)nfsd4_decode_read,
- [OP_READDIR] = (nfsd4_dec)nfsd4_decode_readdir,
- [OP_READLINK] = (nfsd4_dec)nfsd4_decode_noop,
- [OP_REMOVE] = (nfsd4_dec)nfsd4_decode_remove,
- [OP_RENAME] = (nfsd4_dec)nfsd4_decode_rename,
- [OP_RENEW] = (nfsd4_dec)nfsd4_decode_renew,
- [OP_RESTOREFH] = (nfsd4_dec)nfsd4_decode_noop,
- [OP_SAVEFH] = (nfsd4_dec)nfsd4_decode_noop,
- [OP_SECINFO] = (nfsd4_dec)nfsd4_decode_secinfo,
- [OP_SETATTR] = (nfsd4_dec)nfsd4_decode_setattr,
- [OP_SETCLIENTID] = (nfsd4_dec)nfsd4_decode_setclientid,
- [OP_SETCLIENTID_CONFIRM] = (nfsd4_dec)nfsd4_decode_setclientid_confirm,
- [OP_VERIFY] = (nfsd4_dec)nfsd4_decode_verify,
- [OP_WRITE] = (nfsd4_dec)nfsd4_decode_write,
- [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
+ [OP_ACCESS] = nfsd4_decode_access,
+ [OP_CLOSE] = nfsd4_decode_close,
+ [OP_COMMIT] = nfsd4_decode_commit,
+ [OP_CREATE] = nfsd4_decode_create,
+ [OP_DELEGPURGE] = nfsd4_decode_notsupp,
+ [OP_DELEGRETURN] = nfsd4_decode_delegreturn,
+ [OP_GETATTR] = nfsd4_decode_getattr,
+ [OP_GETFH] = nfsd4_decode_noop,
+ [OP_LINK] = nfsd4_decode_link,
+ [OP_LOCK] = nfsd4_decode_lock,
+ [OP_LOCKT] = nfsd4_decode_lockt,
+ [OP_LOCKU] = nfsd4_decode_locku,
+ [OP_LOOKUP] = nfsd4_decode_lookup,
+ [OP_LOOKUPP] = nfsd4_decode_noop,
+ [OP_NVERIFY] = nfsd4_decode_verify,
+ [OP_OPEN] = nfsd4_decode_open,
+ [OP_OPENATTR] = nfsd4_decode_notsupp,
+ [OP_OPEN_CONFIRM] = nfsd4_decode_open_confirm,
+ [OP_OPEN_DOWNGRADE] = nfsd4_decode_open_downgrade,
+ [OP_PUTFH] = nfsd4_decode_putfh,
+ [OP_PUTPUBFH] = nfsd4_decode_noop,
+ [OP_PUTROOTFH] = nfsd4_decode_noop,
+ [OP_READ] = nfsd4_decode_read,
+ [OP_READDIR] = nfsd4_decode_readdir,
+ [OP_READLINK] = nfsd4_decode_noop,
+ [OP_REMOVE] = nfsd4_decode_remove,
+ [OP_RENAME] = nfsd4_decode_rename,
+ [OP_RENEW] = nfsd4_decode_renew,
+ [OP_RESTOREFH] = nfsd4_decode_noop,
+ [OP_SAVEFH] = nfsd4_decode_noop,
+ [OP_SECINFO] = nfsd4_decode_secinfo,
+ [OP_SETATTR] = nfsd4_decode_setattr,
+ [OP_SETCLIENTID] = nfsd4_decode_setclientid,
+ [OP_SETCLIENTID_CONFIRM] = nfsd4_decode_setclientid_confirm,
+ [OP_VERIFY] = nfsd4_decode_verify,
+ [OP_WRITE] = nfsd4_decode_write,
+ [OP_RELEASE_LOCKOWNER] = nfsd4_decode_release_lockowner,
/* new operations for NFSv4.1 */
- [OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_backchannel_ctl,
- [OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session,
- [OP_EXCHANGE_ID] = (nfsd4_dec)nfsd4_decode_exchange_id,
- [OP_CREATE_SESSION] = (nfsd4_dec)nfsd4_decode_create_session,
- [OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session,
- [OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_free_stateid,
- [OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_BACKCHANNEL_CTL] = nfsd4_decode_backchannel_ctl,
+ [OP_BIND_CONN_TO_SESSION] = nfsd4_decode_bind_conn_to_session,
+ [OP_EXCHANGE_ID] = nfsd4_decode_exchange_id,
+ [OP_CREATE_SESSION] = nfsd4_decode_create_session,
+ [OP_DESTROY_SESSION] = nfsd4_decode_destroy_session,
+ [OP_FREE_STATEID] = nfsd4_decode_free_stateid,
+ [OP_GET_DIR_DELEGATION] = nfsd4_decode_get_dir_delegation,
#ifdef CONFIG_NFSD_PNFS
- [OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_getdeviceinfo,
- [OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_layoutcommit,
- [OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_layoutget,
- [OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_layoutreturn,
+ [OP_GETDEVICEINFO] = nfsd4_decode_getdeviceinfo,
+ [OP_GETDEVICELIST] = nfsd4_decode_notsupp,
+ [OP_LAYOUTCOMMIT] = nfsd4_decode_layoutcommit,
+ [OP_LAYOUTGET] = nfsd4_decode_layoutget,
+ [OP_LAYOUTRETURN] = nfsd4_decode_layoutreturn,
#else
- [OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_GETDEVICEINFO] = nfsd4_decode_notsupp,
+ [OP_GETDEVICELIST] = nfsd4_decode_notsupp,
+ [OP_LAYOUTCOMMIT] = nfsd4_decode_notsupp,
+ [OP_LAYOUTGET] = nfsd4_decode_notsupp,
+ [OP_LAYOUTRETURN] = nfsd4_decode_notsupp,
#endif
- [OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_secinfo_no_name,
- [OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence,
- [OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_test_stateid,
- [OP_WANT_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_destroy_clientid,
- [OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_reclaim_complete,
+ [OP_SECINFO_NO_NAME] = nfsd4_decode_secinfo_no_name,
+ [OP_SEQUENCE] = nfsd4_decode_sequence,
+ [OP_SET_SSV] = nfsd4_decode_notsupp,
+ [OP_TEST_STATEID] = nfsd4_decode_test_stateid,
+ [OP_WANT_DELEGATION] = nfsd4_decode_notsupp,
+ [OP_DESTROY_CLIENTID] = nfsd4_decode_destroy_clientid,
+ [OP_RECLAIM_COMPLETE] = nfsd4_decode_reclaim_complete,
/* new operations for NFSv4.2 */
- [OP_ALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
- [OP_COPY] = (nfsd4_dec)nfsd4_decode_copy,
- [OP_COPY_NOTIFY] = (nfsd4_dec)nfsd4_decode_copy_notify,
- [OP_DEALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
- [OP_IO_ADVISE] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTERROR] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTSTATS] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OFFLOAD_CANCEL] = (nfsd4_dec)nfsd4_decode_offload_status,
- [OP_OFFLOAD_STATUS] = (nfsd4_dec)nfsd4_decode_offload_status,
- [OP_READ_PLUS] = (nfsd4_dec)nfsd4_decode_read,
- [OP_SEEK] = (nfsd4_dec)nfsd4_decode_seek,
- [OP_WRITE_SAME] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_CLONE] = (nfsd4_dec)nfsd4_decode_clone,
+ [OP_ALLOCATE] = nfsd4_decode_fallocate,
+ [OP_COPY] = nfsd4_decode_copy,
+ [OP_COPY_NOTIFY] = nfsd4_decode_copy_notify,
+ [OP_DEALLOCATE] = nfsd4_decode_fallocate,
+ [OP_IO_ADVISE] = nfsd4_decode_notsupp,
+ [OP_LAYOUTERROR] = nfsd4_decode_notsupp,
+ [OP_LAYOUTSTATS] = nfsd4_decode_notsupp,
+ [OP_OFFLOAD_CANCEL] = nfsd4_decode_offload_status,
+ [OP_OFFLOAD_STATUS] = nfsd4_decode_offload_status,
+ [OP_READ_PLUS] = nfsd4_decode_read,
+ [OP_SEEK] = nfsd4_decode_seek,
+ [OP_WRITE_SAME] = nfsd4_decode_notsupp,
+ [OP_CLONE] = nfsd4_decode_clone,
/* RFC 8276 extended atributes operations */
- [OP_GETXATTR] = (nfsd4_dec)nfsd4_decode_getxattr,
- [OP_SETXATTR] = (nfsd4_dec)nfsd4_decode_setxattr,
- [OP_LISTXATTRS] = (nfsd4_dec)nfsd4_decode_listxattrs,
- [OP_REMOVEXATTR] = (nfsd4_dec)nfsd4_decode_removexattr,
+ [OP_GETXATTR] = nfsd4_decode_getxattr,
+ [OP_SETXATTR] = nfsd4_decode_setxattr,
+ [OP_LISTXATTRS] = nfsd4_decode_listxattrs,
+ [OP_REMOVEXATTR] = nfsd4_decode_removexattr,
};
static inline bool
@@ -2352,22 +2488,15 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
if (xdr_stream_decode_u32(argp->xdr, &argp->minorversion) < 0)
return false;
- if (xdr_stream_decode_u32(argp->xdr, &argp->opcnt) < 0)
+ if (xdr_stream_decode_u32(argp->xdr, &argp->client_opcnt) < 0)
return false;
-
- /*
- * NFS4ERR_RESOURCE is a more helpful error than GARBAGE_ARGS
- * here, so we return success at the xdr level so that
- * nfsd4_proc can handle this is an NFS-level error.
- */
- if (argp->opcnt > NFSD_MAX_OPS_PER_COMPOUND)
- return true;
+ argp->opcnt = min_t(u32, argp->client_opcnt,
+ NFSD_MAX_OPS_PER_COMPOUND);
if (argp->opcnt > ARRAY_SIZE(argp->iops)) {
- argp->ops = kzalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL);
+ argp->ops = vcalloc(argp->opcnt, sizeof(*argp->ops));
if (!argp->ops) {
argp->ops = argp->iops;
- dprintk("nfsd: couldn't allocate room for COMPOUND\n");
return false;
}
}
@@ -2378,10 +2507,12 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
for (i = 0; i < argp->opcnt; i++) {
op = &argp->ops[i];
op->replay = NULL;
+ op->opdesc = NULL;
if (xdr_stream_decode_u32(argp->xdr, &op->opnum) < 0)
return false;
if (nfsd4_opnum_in_range(argp, op)) {
+ op->opdesc = OPDESC(op);
op->status = nfsd4_dec_ops[op->opnum](argp, &op->u);
if (op->status != nfs_ok)
trace_nfsd_compound_decode_err(argp->rqstp,
@@ -2392,7 +2523,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
op->opnum = OP_ILLEGAL;
op->status = nfserr_op_illegal;
}
- op->opdesc = OPDESC(op);
+
/*
* We'll try to cache the result in the DRC if any one
* op in the compound wants to be cached:
@@ -2421,57 +2552,72 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
/* Sessions make the DRC unnecessary: */
if (argp->minorversion)
cachethis = false;
- svc_reserve(argp->rqstp, max_reply + readbytes);
+ svc_reserve_auth(argp->rqstp, max_reply + readbytes);
argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
+ argp->splice_ok = nfsd_read_splice_ok(argp->rqstp);
if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
- clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
+ argp->splice_ok = false;
return true;
}
-static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
- struct svc_export *exp)
+static __be32 nfsd4_encode_nfs_fh4(struct xdr_stream *xdr,
+ struct knfsd_fh *fh_handle)
{
- if (exp->ex_flags & NFSEXP_V4ROOT) {
- *p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time));
- *p++ = 0;
- } else
- p = xdr_encode_hyper(p, nfsd4_change_attribute(stat, inode));
- return p;
+ return nfsd4_encode_opaque(xdr, fh_handle->fh_raw, fh_handle->fh_size);
}
-/*
- * ctime (in NFSv4, time_metadata) is not writeable, and the client
- * doesn't really care what resolution could theoretically be stored by
- * the filesystem.
- *
- * The client cares how close together changes can be while still
- * guaranteeing ctime changes. For most filesystems (which have
- * timestamps with nanosecond fields) that is limited by the resolution
- * of the time returned from current_time() (which I'm assuming to be
- * 1/HZ).
- */
-static __be32 *encode_time_delta(__be32 *p, struct inode *inode)
+/* This is a frequently-encoded type; open-coded for speed */
+static __be32 nfsd4_encode_nfstime4(struct xdr_stream *xdr,
+ const struct timespec64 *tv)
{
- struct timespec64 ts;
- u32 ns;
+ __be32 *p;
- ns = max_t(u32, NSEC_PER_SEC/HZ, inode->i_sb->s_time_gran);
- ts = ns_to_timespec64(ns);
+ p = xdr_reserve_space(xdr, XDR_UNIT * 3);
+ if (!p)
+ return nfserr_resource;
+ p = xdr_encode_hyper(p, tv->tv_sec);
+ *p = cpu_to_be32(tv->tv_nsec);
+ return nfs_ok;
+}
- p = xdr_encode_hyper(p, ts.tv_sec);
- *p++ = cpu_to_be32(ts.tv_nsec);
+static __be32 nfsd4_encode_specdata4(struct xdr_stream *xdr,
+ unsigned int major, unsigned int minor)
+{
+ __be32 status;
- return p;
+ status = nfsd4_encode_uint32_t(xdr, major);
+ if (status != nfs_ok)
+ return status;
+ return nfsd4_encode_uint32_t(xdr, minor);
}
-static __be32 *encode_cinfo(__be32 *p, struct nfsd4_change_info *c)
+static __be32
+nfsd4_encode_change_info4(struct xdr_stream *xdr, const struct nfsd4_change_info *c)
{
- *p++ = cpu_to_be32(c->atomic);
- p = xdr_encode_hyper(p, c->before_change);
- p = xdr_encode_hyper(p, c->after_change);
- return p;
+ __be32 status;
+
+ status = nfsd4_encode_bool(xdr, c->atomic);
+ if (status != nfs_ok)
+ return status;
+ status = nfsd4_encode_changeid4(xdr, c->before_change);
+ if (status != nfs_ok)
+ return status;
+ return nfsd4_encode_changeid4(xdr, c->after_change);
+}
+
+static __be32 nfsd4_encode_netaddr4(struct xdr_stream *xdr,
+ const struct nfs42_netaddr *addr)
+{
+ __be32 status;
+
+ /* na_r_netid */
+ status = nfsd4_encode_opaque(xdr, addr->netid, addr->netid_len);
+ if (status != nfs_ok)
+ return status;
+ /* na_r_addr */
+ return nfsd4_encode_opaque(xdr, addr->addr, addr->addr_len);
}
/* Encode as an array of strings the string given with components
@@ -2484,10 +2630,8 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,
__be32 *p;
__be32 pathlen;
int pathlen_offset;
- int strlen, count=0;
char *str, *end, *next;
-
- dprintk("nfsd4_encode_components(%s)\n", components);
+ int count = 0;
pathlen_offset = xdr->buf->len;
p = xdr_reserve_space(xdr, 4);
@@ -2514,15 +2658,11 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,
for (; *end && (*end != sep); end++)
/* find sep or end of string */;
- strlen = end - str;
- if (strlen) {
- p = xdr_reserve_space(xdr, strlen + 4);
- if (!p)
+ if (end > str) {
+ if (xdr_stream_encode_opaque(xdr, str, end - str) < 0)
return nfserr_resource;
- p = xdr_encode_opaque(p, str, strlen);
count++;
- }
- else
+ } else
end++;
if (found_esc)
end = next;
@@ -2543,9 +2683,6 @@ static __be32 nfsd4_encode_components(struct xdr_stream *xdr, char sep,
return nfsd4_encode_components_esc(xdr, sep, components, 0, 0);
}
-/*
- * encode a location element of a fs_locations structure
- */
static __be32 nfsd4_encode_fs_location4(struct xdr_stream *xdr,
struct nfsd4_fs_location *location)
{
@@ -2558,18 +2695,14 @@ static __be32 nfsd4_encode_fs_location4(struct xdr_stream *xdr,
status = nfsd4_encode_components(xdr, '/', location->path);
if (status)
return status;
- return 0;
+ return nfs_ok;
}
-/*
- * Encode a path in RFC3530 'pathname4' format
- */
-static __be32 nfsd4_encode_path(struct xdr_stream *xdr,
- const struct path *root,
- const struct path *path)
+static __be32 nfsd4_encode_pathname4(struct xdr_stream *xdr,
+ const struct path *root,
+ const struct path *path)
{
struct path cur = *path;
- __be32 *p;
struct dentry **components = NULL;
unsigned int ncomponents = 0;
__be32 err = nfserr_jukebox;
@@ -2600,24 +2733,19 @@ static __be32 nfsd4_encode_path(struct xdr_stream *xdr,
components[ncomponents++] = cur.dentry;
cur.dentry = dget_parent(cur.dentry);
}
+
err = nfserr_resource;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ if (xdr_stream_encode_u32(xdr, ncomponents) != XDR_UNIT)
goto out_free;
- *p++ = cpu_to_be32(ncomponents);
-
while (ncomponents) {
struct dentry *dentry = components[ncomponents - 1];
- unsigned int len;
spin_lock(&dentry->d_lock);
- len = dentry->d_name.len;
- p = xdr_reserve_space(xdr, len + 4);
- if (!p) {
+ if (xdr_stream_encode_opaque(xdr, dentry->d_name.name,
+ dentry->d_name.len) < 0) {
spin_unlock(&dentry->d_lock);
goto out_free;
}
- p = xdr_encode_opaque(p, dentry->d_name.name, len);
dprintk("/%pd", dentry);
spin_unlock(&dentry->d_lock);
dput(dentry);
@@ -2634,89 +2762,59 @@ out_free:
return err;
}
-static __be32 nfsd4_encode_fsloc_fsroot(struct xdr_stream *xdr,
- struct svc_rqst *rqstp, const struct path *path)
+static __be32 nfsd4_encode_fs_locations4(struct xdr_stream *xdr,
+ struct svc_rqst *rqstp,
+ struct svc_export *exp)
{
+ struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
struct svc_export *exp_ps;
- __be32 res;
+ unsigned int i;
+ __be32 status;
+ /* fs_root */
exp_ps = rqst_find_fsidzero_export(rqstp);
if (IS_ERR(exp_ps))
return nfserrno(PTR_ERR(exp_ps));
- res = nfsd4_encode_path(xdr, &exp_ps->ex_path, path);
+ status = nfsd4_encode_pathname4(xdr, &exp_ps->ex_path, &exp->ex_path);
exp_put(exp_ps);
- return res;
-}
-
-/*
- * encode a fs_locations structure
- */
-static __be32 nfsd4_encode_fs_locations(struct xdr_stream *xdr,
- struct svc_rqst *rqstp, struct svc_export *exp)
-{
- __be32 status;
- int i;
- __be32 *p;
- struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
-
- status = nfsd4_encode_fsloc_fsroot(xdr, rqstp, &exp->ex_path);
- if (status)
+ if (status != nfs_ok)
return status;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+
+ /* locations<> */
+ if (xdr_stream_encode_u32(xdr, fslocs->locations_count) != XDR_UNIT)
return nfserr_resource;
- *p++ = cpu_to_be32(fslocs->locations_count);
- for (i=0; i<fslocs->locations_count; i++) {
+ for (i = 0; i < fslocs->locations_count; i++) {
status = nfsd4_encode_fs_location4(xdr, &fslocs->locations[i]);
- if (status)
+ if (status != nfs_ok)
return status;
}
- return 0;
-}
-static u32 nfs4_file_type(umode_t mode)
-{
- switch (mode & S_IFMT) {
- case S_IFIFO: return NF4FIFO;
- case S_IFCHR: return NF4CHR;
- case S_IFDIR: return NF4DIR;
- case S_IFBLK: return NF4BLK;
- case S_IFLNK: return NF4LNK;
- case S_IFREG: return NF4REG;
- case S_IFSOCK: return NF4SOCK;
- default: return NF4BAD;
- }
+ return nfs_ok;
}
-static inline __be32
-nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
- struct nfs4_ace *ace)
+static __be32 nfsd4_encode_nfsace4(struct xdr_stream *xdr, struct svc_rqst *rqstp,
+ struct nfs4_ace *ace)
{
+ __be32 status;
+
+ /* type */
+ status = nfsd4_encode_acetype4(xdr, ace->type);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* flag */
+ status = nfsd4_encode_aceflag4(xdr, ace->flag);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* access mask */
+ status = nfsd4_encode_acemask4(xdr, ace->access_mask & NFS4_ACE_MASK_ALL);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* who */
if (ace->whotype != NFS4_ACL_WHO_NAMED)
return nfs4_acl_write_who(xdr, ace->whotype);
- else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
+ if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
return nfsd4_encode_group(xdr, rqstp, ace->who_gid);
- else
- return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
-}
-
-static inline __be32
-nfsd4_encode_layout_types(struct xdr_stream *xdr, u32 layout_types)
-{
- __be32 *p;
- unsigned long i = hweight_long(layout_types);
-
- p = xdr_reserve_space(xdr, 4 + 4 * i);
- if (!p)
- return nfserr_resource;
-
- *p++ = cpu_to_be32(i);
-
- for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
- if (layout_types & (1 << i))
- *p++ = cpu_to_be32(i);
-
- return 0;
+ return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
}
#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
@@ -2727,11 +2825,11 @@ nfsd4_encode_layout_types(struct xdr_stream *xdr, u32 layout_types)
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
static inline __be32
nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
- void *context, int len)
+ const struct lsm_context *context)
{
__be32 *p;
- p = xdr_reserve_space(xdr, len + 4 + 4 + 4);
+ p = xdr_reserve_space(xdr, context->len + 4 + 4 + 4);
if (!p)
return nfserr_resource;
@@ -2741,13 +2839,13 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
*/
*p++ = cpu_to_be32(0); /* lfs */
*p++ = cpu_to_be32(0); /* pi */
- p = xdr_encode_opaque(p, context, len);
+ p = xdr_encode_opaque(p, context->context, context->len);
return 0;
}
#else
static inline __be32
nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
- void *context, int len)
+ struct lsm_context *context)
{ return 0; }
#endif
@@ -2769,9 +2867,10 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32
}
-static int get_parent_attributes(struct svc_export *exp, struct kstat *stat)
+static int nfsd4_get_mounted_on_ino(struct svc_export *exp, u64 *pino)
{
struct path path = exp->ex_path;
+ struct kstat stat;
int err;
path_get(&path);
@@ -2779,18 +2878,20 @@ static int get_parent_attributes(struct svc_export *exp, struct kstat *stat)
if (path.dentry != path.mnt->mnt_root)
break;
}
- err = vfs_getattr(&path, stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
+ err = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
path_put(&path);
+ if (!err)
+ *pino = stat.ino;
return err;
}
static __be32
-nfsd4_encode_bitmap(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
+nfsd4_encode_bitmap4(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
{
__be32 *p;
if (bmval2) {
- p = xdr_reserve_space(xdr, 16);
+ p = xdr_reserve_space(xdr, XDR_UNIT * 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(3);
@@ -2798,85 +2899,778 @@ nfsd4_encode_bitmap(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
*p++ = cpu_to_be32(bmval1);
*p++ = cpu_to_be32(bmval2);
} else if (bmval1) {
- p = xdr_reserve_space(xdr, 12);
+ p = xdr_reserve_space(xdr, XDR_UNIT * 3);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(bmval0);
*p++ = cpu_to_be32(bmval1);
} else {
- p = xdr_reserve_space(xdr, 8);
+ p = xdr_reserve_space(xdr, XDR_UNIT * 2);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
*p++ = cpu_to_be32(bmval0);
}
- return 0;
+ return nfs_ok;
out_resource:
return nfserr_resource;
}
+struct nfsd4_fattr_args {
+ struct svc_rqst *rqstp;
+ struct svc_fh *fhp;
+ struct svc_export *exp;
+ struct dentry *dentry;
+ struct kstat stat;
+ struct kstatfs statfs;
+ struct nfs4_acl *acl;
+ u64 change_attr;
+#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+ struct lsm_context context;
+#endif
+ u32 rdattr_err;
+ bool contextsupport;
+ bool ignore_crossmnt;
+};
+
+typedef __be32(*nfsd4_enc_attr)(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args);
+
+static __be32 nfsd4_encode_fattr4__inval(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfserr_inval;
+}
+
+static __be32 nfsd4_encode_fattr4__noop(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4__true(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_bool(xdr, true);
+}
+
+static __be32 nfsd4_encode_fattr4__false(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_bool(xdr, false);
+}
+
+static __be32 nfsd4_encode_fattr4_supported_attrs(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct nfsd4_compoundres *resp = args->rqstp->rq_resp;
+ u32 minorversion = resp->cstate.minorversion;
+ u32 supp[3];
+
+ memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
+ if (!IS_POSIXACL(d_inode(args->dentry)))
+ supp[0] &= ~FATTR4_WORD0_ACL;
+ if (!args->contextsupport)
+ supp[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+
+ return nfsd4_encode_bitmap4(xdr, supp[0], supp[1], supp[2]);
+}
+
+static __be32 nfsd4_encode_fattr4_type(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, XDR_UNIT);
+ if (!p)
+ return nfserr_resource;
+
+ switch (args->stat.mode & S_IFMT) {
+ case S_IFIFO:
+ *p = cpu_to_be32(NF4FIFO);
+ break;
+ case S_IFCHR:
+ *p = cpu_to_be32(NF4CHR);
+ break;
+ case S_IFDIR:
+ *p = cpu_to_be32(NF4DIR);
+ break;
+ case S_IFBLK:
+ *p = cpu_to_be32(NF4BLK);
+ break;
+ case S_IFLNK:
+ *p = cpu_to_be32(NF4LNK);
+ break;
+ case S_IFREG:
+ *p = cpu_to_be32(NF4REG);
+ break;
+ case S_IFSOCK:
+ *p = cpu_to_be32(NF4SOCK);
+ break;
+ default:
+ return nfserr_serverfault;
+ }
+
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_fh_expire_type(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u32 mask;
+
+ mask = NFS4_FH_PERSISTENT;
+ if (!(args->exp->ex_flags & NFSEXP_NOSUBTREECHECK))
+ mask |= NFS4_FH_VOL_RENAME;
+ return nfsd4_encode_uint32_t(xdr, mask);
+}
+
+static __be32 nfsd4_encode_fattr4_change(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ const struct svc_export *exp = args->exp;
+
+ if (unlikely(exp->ex_flags & NFSEXP_V4ROOT)) {
+ u32 flush_time = convert_to_wallclock(exp->cd->flush_time);
+
+ if (xdr_stream_encode_u32(xdr, flush_time) != XDR_UNIT)
+ return nfserr_resource;
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
+ }
+ return nfsd4_encode_changeid4(xdr, args->change_attr);
+}
+
+static __be32 nfsd4_encode_fattr4_size(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->stat.size);
+}
+
+static __be32 nfsd4_encode_fattr4_fsid(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, XDR_UNIT * 2 + XDR_UNIT * 2);
+ if (!p)
+ return nfserr_resource;
+
+ if (unlikely(args->exp->ex_fslocs.migrated)) {
+ p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MAJOR);
+ xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MINOR);
+ return nfs_ok;
+ }
+ switch (fsid_source(args->fhp)) {
+ case FSIDSOURCE_FSID:
+ p = xdr_encode_hyper(p, (u64)args->exp->ex_fsid);
+ xdr_encode_hyper(p, (u64)0);
+ break;
+ case FSIDSOURCE_DEV:
+ *p++ = xdr_zero;
+ *p++ = cpu_to_be32(MAJOR(args->stat.dev));
+ *p++ = xdr_zero;
+ *p = cpu_to_be32(MINOR(args->stat.dev));
+ break;
+ case FSIDSOURCE_UUID:
+ xdr_encode_opaque_fixed(p, args->exp->ex_uuid, EX_UUID_LEN);
+ break;
+ }
+
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_lease_time(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct nfsd_net *nn = net_generic(SVC_NET(args->rqstp), nfsd_net_id);
+
+ return nfsd4_encode_nfs_lease4(xdr, nn->nfsd4_lease);
+}
+
+static __be32 nfsd4_encode_fattr4_rdattr_error(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, args->rdattr_err);
+}
+
+static __be32 nfsd4_encode_fattr4_aclsupport(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u32 mask;
+
+ mask = 0;
+ if (IS_POSIXACL(d_inode(args->dentry)))
+ mask = ACL4_SUPPORT_ALLOW_ACL | ACL4_SUPPORT_DENY_ACL;
+ return nfsd4_encode_uint32_t(xdr, mask);
+}
+
+static __be32 nfsd4_encode_fattr4_acl(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct nfs4_acl *acl = args->acl;
+ struct nfs4_ace *ace;
+ __be32 status;
+
+ /* nfsace4<> */
+ if (!acl) {
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ } else {
+ if (xdr_stream_encode_u32(xdr, acl->naces) != XDR_UNIT)
+ return nfserr_resource;
+ for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
+ status = nfsd4_encode_nfsace4(xdr, args->rqstp, ace);
+ if (status != nfs_ok)
+ return status;
+ }
+ }
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_filehandle(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfs_fh4(xdr, &args->fhp->fh_handle);
+}
+
+static __be32 nfsd4_encode_fattr4_fileid(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->stat.ino);
+}
+
+static __be32 nfsd4_encode_fattr4_files_avail(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->statfs.f_ffree);
+}
+
+static __be32 nfsd4_encode_fattr4_files_free(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->statfs.f_ffree);
+}
+
+static __be32 nfsd4_encode_fattr4_files_total(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->statfs.f_files);
+}
+
+static __be32 nfsd4_encode_fattr4_fs_locations(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_fs_locations4(xdr, args->rqstp, args->exp);
+}
+
+static __be32 nfsd4_encode_fattr4_maxfilesize(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct super_block *sb = args->exp->ex_path.mnt->mnt_sb;
+
+ return nfsd4_encode_uint64_t(xdr, sb->s_maxbytes);
+}
+
+static __be32 nfsd4_encode_fattr4_maxlink(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, 255);
+}
+
+static __be32 nfsd4_encode_fattr4_maxname(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, args->statfs.f_namelen);
+}
+
+static __be32 nfsd4_encode_fattr4_maxread(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, svc_max_payload(args->rqstp));
+}
+
+static __be32 nfsd4_encode_fattr4_maxwrite(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, svc_max_payload(args->rqstp));
+}
+
+static __be32 nfsd4_encode_fattr4_mode(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_mode4(xdr, args->stat.mode & S_IALLUGO);
+}
+
+static __be32 nfsd4_encode_fattr4_numlinks(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, args->stat.nlink);
+}
+
+static __be32 nfsd4_encode_fattr4_owner(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_user(xdr, args->rqstp, args->stat.uid);
+}
+
+static __be32 nfsd4_encode_fattr4_owner_group(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_group(xdr, args->rqstp, args->stat.gid);
+}
+
+static __be32 nfsd4_encode_fattr4_rawdev(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_specdata4(xdr, MAJOR(args->stat.rdev),
+ MINOR(args->stat.rdev));
+}
+
+static __be32 nfsd4_encode_fattr4_space_avail(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u64 avail = (u64)args->statfs.f_bavail * (u64)args->statfs.f_bsize;
+
+ return nfsd4_encode_uint64_t(xdr, avail);
+}
+
+static __be32 nfsd4_encode_fattr4_space_free(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u64 free = (u64)args->statfs.f_bfree * (u64)args->statfs.f_bsize;
+
+ return nfsd4_encode_uint64_t(xdr, free);
+}
+
+static __be32 nfsd4_encode_fattr4_space_total(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u64 total = (u64)args->statfs.f_blocks * (u64)args->statfs.f_bsize;
+
+ return nfsd4_encode_uint64_t(xdr, total);
+}
+
+static __be32 nfsd4_encode_fattr4_space_used(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, (u64)args->stat.blocks << 9);
+}
+
+static __be32 nfsd4_encode_fattr4_time_access(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfstime4(xdr, &args->stat.atime);
+}
+
+static __be32 nfsd4_encode_fattr4_time_create(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfstime4(xdr, &args->stat.btime);
+}
+
+/*
+ * ctime (in NFSv4, time_metadata) is not writeable, and the client
+ * doesn't really care what resolution could theoretically be stored by
+ * the filesystem.
+ *
+ * The client cares how close together changes can be while still
+ * guaranteeing ctime changes. For most filesystems (which have
+ * timestamps with nanosecond fields) that is limited by the resolution
+ * of the time returned from current_time() (which I'm assuming to be
+ * 1/HZ).
+ */
+static __be32 nfsd4_encode_fattr4_time_delta(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ const struct inode *inode = d_inode(args->dentry);
+ u32 ns = max_t(u32, NSEC_PER_SEC/HZ, inode->i_sb->s_time_gran);
+ struct timespec64 ts = ns_to_timespec64(ns);
+
+ return nfsd4_encode_nfstime4(xdr, &ts);
+}
+
+static __be32 nfsd4_encode_fattr4_time_metadata(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfstime4(xdr, &args->stat.ctime);
+}
+
+static __be32 nfsd4_encode_fattr4_time_modify(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfstime4(xdr, &args->stat.mtime);
+}
+
+static __be32 nfsd4_encode_fattr4_mounted_on_fileid(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u64 ino;
+ int err;
+
+ if (!args->ignore_crossmnt &&
+ args->dentry == args->exp->ex_path.mnt->mnt_root) {
+ err = nfsd4_get_mounted_on_ino(args->exp, &ino);
+ if (err)
+ return nfserrno(err);
+ } else
+ ino = args->stat.ino;
+
+ return nfsd4_encode_uint64_t(xdr, ino);
+}
+
+#ifdef CONFIG_NFSD_PNFS
+
+static __be32 nfsd4_encode_fattr4_fs_layout_types(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ unsigned long mask = args->exp->ex_layout_types;
+ int i;
+
+ /* Hamming weight of @mask is the number of layout types to return */
+ if (xdr_stream_encode_u32(xdr, hweight_long(mask)) != XDR_UNIT)
+ return nfserr_resource;
+ for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
+ if (mask & BIT(i)) {
+ /* layouttype4 */
+ if (xdr_stream_encode_u32(xdr, i) != XDR_UNIT)
+ return nfserr_resource;
+ }
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_layout_types(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ unsigned long mask = args->exp->ex_layout_types;
+ int i;
+
+ /* Hamming weight of @mask is the number of layout types to return */
+ if (xdr_stream_encode_u32(xdr, hweight_long(mask)) != XDR_UNIT)
+ return nfserr_resource;
+ for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
+ if (mask & BIT(i)) {
+ /* layouttype4 */
+ if (xdr_stream_encode_u32(xdr, i) != XDR_UNIT)
+ return nfserr_resource;
+ }
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_layout_blksize(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, args->stat.blksize);
+}
+
+#endif
+
+static __be32 nfsd4_encode_fattr4_suppattr_exclcreat(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct nfsd4_compoundres *resp = args->rqstp->rq_resp;
+ u32 supp[3];
+
+ memcpy(supp, nfsd_suppattrs[resp->cstate.minorversion], sizeof(supp));
+ supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
+ supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
+ supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
+
+ return nfsd4_encode_bitmap4(xdr, supp[0], supp[1], supp[2]);
+}
+
+/*
+ * Copied from generic_remap_checks/generic_remap_file_range_prep.
+ *
+ * These generic functions use the file system's s_blocksize, but
+ * individual file systems aren't required to use
+ * generic_remap_file_range_prep. Until there is a mechanism for
+ * determining a particular file system's (or file's) clone block
+ * size, this is the best NFSD can do.
+ */
+static __be32 nfsd4_encode_fattr4_clone_blksize(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct inode *inode = d_inode(args->dentry);
+
+ return nfsd4_encode_uint32_t(xdr, inode->i_sb->s_blocksize);
+}
+
+#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+static __be32 nfsd4_encode_fattr4_sec_label(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_security_label(xdr, args->rqstp, &args->context);
+}
+#endif
+
+static __be32 nfsd4_encode_fattr4_xattr_support(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ int err = xattr_supports_user_prefix(d_inode(args->dentry));
+
+ return nfsd4_encode_bool(xdr, err == 0);
+}
+
+#define NFSD_OA_SHARE_ACCESS (BIT(OPEN_ARGS_SHARE_ACCESS_READ) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WRITE) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_BOTH))
+
+#define NFSD_OA_SHARE_DENY (BIT(OPEN_ARGS_SHARE_DENY_NONE) | \
+ BIT(OPEN_ARGS_SHARE_DENY_READ) | \
+ BIT(OPEN_ARGS_SHARE_DENY_WRITE) | \
+ BIT(OPEN_ARGS_SHARE_DENY_BOTH))
+
+#define NFSD_OA_SHARE_ACCESS_WANT (BIT(OPEN_ARGS_SHARE_ACCESS_WANT_ANY_DELEG) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WANT_NO_DELEG) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WANT_CANCEL) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION))
+
+#define NFSD_OA_OPEN_CLAIM (BIT(OPEN_ARGS_OPEN_CLAIM_NULL) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_PREVIOUS) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_DELEGATE_CUR) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_DELEGATE_PREV)| \
+ BIT(OPEN_ARGS_OPEN_CLAIM_FH) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_DELEG_CUR_FH) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_DELEG_PREV_FH))
+
+#define NFSD_OA_CREATE_MODE (BIT(OPEN_ARGS_CREATEMODE_UNCHECKED4) | \
+ BIT(OPEN_ARGS_CREATE_MODE_GUARDED) | \
+ BIT(OPEN_ARGS_CREATEMODE_EXCLUSIVE4) | \
+ BIT(OPEN_ARGS_CREATE_MODE_EXCLUSIVE4_1))
+
+static uint32_t oa_share_access = NFSD_OA_SHARE_ACCESS;
+static uint32_t oa_share_deny = NFSD_OA_SHARE_DENY;
+static uint32_t oa_share_access_want = NFSD_OA_SHARE_ACCESS_WANT;
+static uint32_t oa_open_claim = NFSD_OA_OPEN_CLAIM;
+static uint32_t oa_create_mode = NFSD_OA_CREATE_MODE;
+
+static const struct open_arguments4 nfsd_open_arguments = {
+ .oa_share_access = { .count = 1, .element = &oa_share_access },
+ .oa_share_deny = { .count = 1, .element = &oa_share_deny },
+ .oa_share_access_want = { .count = 1, .element = &oa_share_access_want },
+ .oa_open_claim = { .count = 1, .element = &oa_open_claim },
+ .oa_create_mode = { .count = 1, .element = &oa_create_mode },
+};
+
+static __be32 nfsd4_encode_fattr4_open_arguments(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ if (!xdrgen_encode_fattr4_open_arguments(xdr, &nfsd_open_arguments))
+ return nfserr_resource;
+ return nfs_ok;
+}
+
+static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = {
+ [FATTR4_SUPPORTED_ATTRS] = nfsd4_encode_fattr4_supported_attrs,
+ [FATTR4_TYPE] = nfsd4_encode_fattr4_type,
+ [FATTR4_FH_EXPIRE_TYPE] = nfsd4_encode_fattr4_fh_expire_type,
+ [FATTR4_CHANGE] = nfsd4_encode_fattr4_change,
+ [FATTR4_SIZE] = nfsd4_encode_fattr4_size,
+ [FATTR4_LINK_SUPPORT] = nfsd4_encode_fattr4__true,
+ [FATTR4_SYMLINK_SUPPORT] = nfsd4_encode_fattr4__true,
+ [FATTR4_NAMED_ATTR] = nfsd4_encode_fattr4__false,
+ [FATTR4_FSID] = nfsd4_encode_fattr4_fsid,
+ [FATTR4_UNIQUE_HANDLES] = nfsd4_encode_fattr4__true,
+ [FATTR4_LEASE_TIME] = nfsd4_encode_fattr4_lease_time,
+ [FATTR4_RDATTR_ERROR] = nfsd4_encode_fattr4_rdattr_error,
+ [FATTR4_ACL] = nfsd4_encode_fattr4_acl,
+ [FATTR4_ACLSUPPORT] = nfsd4_encode_fattr4_aclsupport,
+ [FATTR4_ARCHIVE] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CANSETTIME] = nfsd4_encode_fattr4__true,
+ [FATTR4_CASE_INSENSITIVE] = nfsd4_encode_fattr4__false,
+ [FATTR4_CASE_PRESERVING] = nfsd4_encode_fattr4__true,
+ [FATTR4_CHOWN_RESTRICTED] = nfsd4_encode_fattr4__true,
+ [FATTR4_FILEHANDLE] = nfsd4_encode_fattr4_filehandle,
+ [FATTR4_FILEID] = nfsd4_encode_fattr4_fileid,
+ [FATTR4_FILES_AVAIL] = nfsd4_encode_fattr4_files_avail,
+ [FATTR4_FILES_FREE] = nfsd4_encode_fattr4_files_free,
+ [FATTR4_FILES_TOTAL] = nfsd4_encode_fattr4_files_total,
+ [FATTR4_FS_LOCATIONS] = nfsd4_encode_fattr4_fs_locations,
+ [FATTR4_HIDDEN] = nfsd4_encode_fattr4__noop,
+ [FATTR4_HOMOGENEOUS] = nfsd4_encode_fattr4__true,
+ [FATTR4_MAXFILESIZE] = nfsd4_encode_fattr4_maxfilesize,
+ [FATTR4_MAXLINK] = nfsd4_encode_fattr4_maxlink,
+ [FATTR4_MAXNAME] = nfsd4_encode_fattr4_maxname,
+ [FATTR4_MAXREAD] = nfsd4_encode_fattr4_maxread,
+ [FATTR4_MAXWRITE] = nfsd4_encode_fattr4_maxwrite,
+ [FATTR4_MIMETYPE] = nfsd4_encode_fattr4__noop,
+ [FATTR4_MODE] = nfsd4_encode_fattr4_mode,
+ [FATTR4_NO_TRUNC] = nfsd4_encode_fattr4__true,
+ [FATTR4_NUMLINKS] = nfsd4_encode_fattr4_numlinks,
+ [FATTR4_OWNER] = nfsd4_encode_fattr4_owner,
+ [FATTR4_OWNER_GROUP] = nfsd4_encode_fattr4_owner_group,
+ [FATTR4_QUOTA_AVAIL_HARD] = nfsd4_encode_fattr4__noop,
+ [FATTR4_QUOTA_AVAIL_SOFT] = nfsd4_encode_fattr4__noop,
+ [FATTR4_QUOTA_USED] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RAWDEV] = nfsd4_encode_fattr4_rawdev,
+ [FATTR4_SPACE_AVAIL] = nfsd4_encode_fattr4_space_avail,
+ [FATTR4_SPACE_FREE] = nfsd4_encode_fattr4_space_free,
+ [FATTR4_SPACE_TOTAL] = nfsd4_encode_fattr4_space_total,
+ [FATTR4_SPACE_USED] = nfsd4_encode_fattr4_space_used,
+ [FATTR4_SYSTEM] = nfsd4_encode_fattr4__noop,
+ [FATTR4_TIME_ACCESS] = nfsd4_encode_fattr4_time_access,
+ [FATTR4_TIME_ACCESS_SET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_TIME_BACKUP] = nfsd4_encode_fattr4__noop,
+ [FATTR4_TIME_CREATE] = nfsd4_encode_fattr4_time_create,
+ [FATTR4_TIME_DELTA] = nfsd4_encode_fattr4_time_delta,
+ [FATTR4_TIME_METADATA] = nfsd4_encode_fattr4_time_metadata,
+ [FATTR4_TIME_MODIFY] = nfsd4_encode_fattr4_time_modify,
+ [FATTR4_TIME_MODIFY_SET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_MOUNTED_ON_FILEID] = nfsd4_encode_fattr4_mounted_on_fileid,
+ [FATTR4_DIR_NOTIF_DELAY] = nfsd4_encode_fattr4__noop,
+ [FATTR4_DIRENT_NOTIF_DELAY] = nfsd4_encode_fattr4__noop,
+ [FATTR4_DACL] = nfsd4_encode_fattr4__noop,
+ [FATTR4_SACL] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CHANGE_POLICY] = nfsd4_encode_fattr4__noop,
+ [FATTR4_FS_STATUS] = nfsd4_encode_fattr4__noop,
+
+#ifdef CONFIG_NFSD_PNFS
+ [FATTR4_FS_LAYOUT_TYPES] = nfsd4_encode_fattr4_fs_layout_types,
+ [FATTR4_LAYOUT_HINT] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_TYPES] = nfsd4_encode_fattr4_layout_types,
+ [FATTR4_LAYOUT_BLKSIZE] = nfsd4_encode_fattr4_layout_blksize,
+ [FATTR4_LAYOUT_ALIGNMENT] = nfsd4_encode_fattr4__noop,
+#else
+ [FATTR4_FS_LAYOUT_TYPES] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_HINT] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_TYPES] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_BLKSIZE] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_ALIGNMENT] = nfsd4_encode_fattr4__noop,
+#endif
+
+ [FATTR4_FS_LOCATIONS_INFO] = nfsd4_encode_fattr4__noop,
+ [FATTR4_MDSTHRESHOLD] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTION_GET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTION_SET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTEVT_GET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTEVT_SET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTION_HOLD] = nfsd4_encode_fattr4__noop,
+ [FATTR4_MODE_SET_MASKED] = nfsd4_encode_fattr4__noop,
+ [FATTR4_SUPPATTR_EXCLCREAT] = nfsd4_encode_fattr4_suppattr_exclcreat,
+ [FATTR4_FS_CHARSET_CAP] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CLONE_BLKSIZE] = nfsd4_encode_fattr4_clone_blksize,
+ [FATTR4_SPACE_FREED] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CHANGE_ATTR_TYPE] = nfsd4_encode_fattr4__noop,
+
+#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+ [FATTR4_SEC_LABEL] = nfsd4_encode_fattr4_sec_label,
+#else
+ [FATTR4_SEC_LABEL] = nfsd4_encode_fattr4__noop,
+#endif
+
+ [FATTR4_MODE_UMASK] = nfsd4_encode_fattr4__noop,
+ [FATTR4_XATTR_SUPPORT] = nfsd4_encode_fattr4_xattr_support,
+ [FATTR4_TIME_DELEG_ACCESS] = nfsd4_encode_fattr4__inval,
+ [FATTR4_TIME_DELEG_MODIFY] = nfsd4_encode_fattr4__inval,
+ [FATTR4_OPEN_ARGUMENTS] = nfsd4_encode_fattr4_open_arguments,
+};
+
/*
* Note: @fhp can be NULL; in this case, we might have to compose the filehandle
* ourselves.
*/
static __be32
-nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
- struct svc_export *exp,
- struct dentry *dentry, u32 *bmval,
- struct svc_rqst *rqstp, int ignore_crossmnt)
+nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
+ struct svc_fh *fhp, struct svc_export *exp,
+ struct dentry *dentry, const u32 *bmval,
+ int ignore_crossmnt)
{
- u32 bmval0 = bmval[0];
- u32 bmval1 = bmval[1];
- u32 bmval2 = bmval[2];
- struct kstat stat;
+ DECLARE_BITMAP(attr_bitmap, ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops));
+ struct nfs4_delegation *dp = NULL;
+ struct nfsd4_fattr_args args;
struct svc_fh *tempfh = NULL;
- struct kstatfs statfs;
- __be32 *p;
int starting_len = xdr->buf->len;
- int attrlen_offset;
- __be32 attrlen;
- u32 dummy;
- u64 dummy64;
- u32 rdattr_err = 0;
- __be32 status;
+ unsigned int attrlen_offset;
+ __be32 attrlen, status;
+ u32 attrmask[3];
int err;
- struct nfs4_acl *acl = NULL;
-#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- void *context = NULL;
- int contextlen;
-#endif
- bool contextsupport = false;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
u32 minorversion = resp->cstate.minorversion;
struct path path = {
.mnt = exp->ex_path.mnt,
.dentry = dentry,
};
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ unsigned long bit;
+
+ WARN_ON_ONCE(bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1);
+ WARN_ON_ONCE(!nfsd_attrs_supported(minorversion, bmval));
+
+ args.rqstp = rqstp;
+ args.exp = exp;
+ args.dentry = dentry;
+ args.ignore_crossmnt = (ignore_crossmnt != 0);
+ args.acl = NULL;
+#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+ args.context.context = NULL;
+#endif
- BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
- BUG_ON(!nfsd_attrs_supported(minorversion, bmval));
+ /*
+ * Make a local copy of the attribute bitmap that can be modified.
+ */
+ attrmask[0] = bmval[0];
+ attrmask[1] = bmval[1];
+ attrmask[2] = bmval[2];
+ args.rdattr_err = 0;
if (exp->ex_fslocs.migrated) {
- status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
+ status = fattr_handle_absent_fs(&attrmask[0], &attrmask[1],
+ &attrmask[2], &args.rdattr_err);
+ if (status)
+ goto out;
+ }
+ if ((attrmask[0] & (FATTR4_WORD0_CHANGE |
+ FATTR4_WORD0_SIZE)) ||
+ (attrmask[1] & (FATTR4_WORD1_TIME_ACCESS |
+ FATTR4_WORD1_TIME_MODIFY |
+ FATTR4_WORD1_TIME_METADATA))) {
+ status = nfsd4_deleg_getattr_conflict(rqstp, dentry, &dp);
if (status)
goto out;
}
- err = vfs_getattr(&path, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
+ err = vfs_getattr(&path, &args.stat,
+ STATX_BASIC_STATS | STATX_BTIME | STATX_CHANGE_COOKIE,
+ AT_STATX_SYNC_AS_STAT);
+ if (dp) {
+ struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr;
+
+ if (ncf->ncf_file_modified) {
+ ++ncf->ncf_initial_cinfo;
+ args.stat.size = ncf->ncf_cur_fsize;
+ if (!timespec64_is_epoch(&ncf->ncf_cb_mtime))
+ args.stat.mtime = ncf->ncf_cb_mtime;
+ }
+ args.change_attr = ncf->ncf_initial_cinfo;
+
+ if (!timespec64_is_epoch(&ncf->ncf_cb_atime))
+ args.stat.atime = ncf->ncf_cb_atime;
+
+ nfs4_put_stid(&dp->dl_stid);
+ } else {
+ args.change_attr = nfsd4_change_attribute(&args.stat);
+ }
+
if (err)
goto out_nfserr;
- if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
+
+ if (!(args.stat.result_mask & STATX_BTIME))
+ /* underlying FS does not offer btime so we can't share it */
+ attrmask[1] &= ~FATTR4_WORD1_TIME_CREATE;
+ if ((attrmask[0] & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
- (bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
+ (attrmask[1] & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
- err = vfs_statfs(&path, &statfs);
+ err = vfs_statfs(&path, &args.statfs);
if (err)
goto out_nfserr;
}
- if ((bmval0 & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) && !fhp) {
+ if ((attrmask[0] & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) &&
+ !fhp) {
tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
status = nfserr_jukebox;
if (!tempfh)
@@ -2885,12 +3679,14 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
status = fh_compose(tempfh, exp, dentry, NULL);
if (status)
goto out;
- fhp = tempfh;
- }
- if (bmval0 & FATTR4_WORD0_ACL) {
- err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
+ args.fhp = tempfh;
+ } else
+ args.fhp = fhp;
+
+ if (attrmask[0] & FATTR4_WORD0_ACL) {
+ err = nfsd4_get_nfs4_acl(rqstp, dentry, &args.acl);
if (err == -EOPNOTSUPP)
- bmval0 &= ~FATTR4_WORD0_ACL;
+ attrmask[0] &= ~FATTR4_WORD0_ACL;
else if (err == -EINVAL) {
status = nfserr_attrnotsupp;
goto out;
@@ -2898,457 +3694,54 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
goto out_nfserr;
}
+ args.contextsupport = false;
+
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
- bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
+ if ((attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
+ attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
err = security_inode_getsecctx(d_inode(dentry),
- &context, &contextlen);
+ &args.context);
else
err = -EOPNOTSUPP;
- contextsupport = (err == 0);
- if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
+ args.contextsupport = (err == 0);
+ if (attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) {
if (err == -EOPNOTSUPP)
- bmval2 &= ~FATTR4_WORD2_SECURITY_LABEL;
+ attrmask[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
else if (err)
goto out_nfserr;
}
}
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
- status = nfsd4_encode_bitmap(xdr, bmval0, bmval1, bmval2);
+ /* attrmask */
+ status = nfsd4_encode_bitmap4(xdr, attrmask[0], attrmask[1],
+ attrmask[2]);
if (status)
goto out;
+ /* attr_vals */
attrlen_offset = xdr->buf->len;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT)))
goto out_resource;
- p++; /* to be backfilled later */
-
- if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
- u32 supp[3];
-
- memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
-
- if (!IS_POSIXACL(dentry->d_inode))
- supp[0] &= ~FATTR4_WORD0_ACL;
- if (!contextsupport)
- supp[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
- if (!supp[2]) {
- p = xdr_reserve_space(xdr, 12);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(2);
- *p++ = cpu_to_be32(supp[0]);
- *p++ = cpu_to_be32(supp[1]);
- } else {
- p = xdr_reserve_space(xdr, 16);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(3);
- *p++ = cpu_to_be32(supp[0]);
- *p++ = cpu_to_be32(supp[1]);
- *p++ = cpu_to_be32(supp[2]);
- }
- }
- if (bmval0 & FATTR4_WORD0_TYPE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- dummy = nfs4_file_type(stat.mode);
- if (dummy == NF4BAD) {
- status = nfserr_serverfault;
+ bitmap_from_arr32(attr_bitmap, attrmask,
+ ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops));
+ for_each_set_bit(bit, attr_bitmap,
+ ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops)) {
+ status = nfsd4_enc_fattr4_encode_ops[bit](xdr, &args);
+ if (status != nfs_ok)
goto out;
- }
- *p++ = cpu_to_be32(dummy);
- }
- if (bmval0 & FATTR4_WORD0_FH_EXPIRE_TYPE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- if (exp->ex_flags & NFSEXP_NOSUBTREECHECK)
- *p++ = cpu_to_be32(NFS4_FH_PERSISTENT);
- else
- *p++ = cpu_to_be32(NFS4_FH_PERSISTENT|
- NFS4_FH_VOL_RENAME);
- }
- if (bmval0 & FATTR4_WORD0_CHANGE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = encode_change(p, &stat, d_inode(dentry), exp);
- }
- if (bmval0 & FATTR4_WORD0_SIZE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, stat.size);
- }
- if (bmval0 & FATTR4_WORD0_LINK_SUPPORT) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_SYMLINK_SUPPORT) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_NAMED_ATTR) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(0);
- }
- if (bmval0 & FATTR4_WORD0_FSID) {
- p = xdr_reserve_space(xdr, 16);
- if (!p)
- goto out_resource;
- if (exp->ex_fslocs.migrated) {
- p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MAJOR);
- p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MINOR);
- } else switch(fsid_source(fhp)) {
- case FSIDSOURCE_FSID:
- p = xdr_encode_hyper(p, (u64)exp->ex_fsid);
- p = xdr_encode_hyper(p, (u64)0);
- break;
- case FSIDSOURCE_DEV:
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(MAJOR(stat.dev));
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(MINOR(stat.dev));
- break;
- case FSIDSOURCE_UUID:
- p = xdr_encode_opaque_fixed(p, exp->ex_uuid,
- EX_UUID_LEN);
- break;
- }
- }
- if (bmval0 & FATTR4_WORD0_UNIQUE_HANDLES) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(0);
- }
- if (bmval0 & FATTR4_WORD0_LEASE_TIME) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(nn->nfsd4_lease);
- }
- if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(rdattr_err);
- }
- if (bmval0 & FATTR4_WORD0_ACL) {
- struct nfs4_ace *ace;
-
- if (acl == NULL) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
-
- *p++ = cpu_to_be32(0);
- goto out_acl;
- }
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(acl->naces);
-
- for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
- p = xdr_reserve_space(xdr, 4*3);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(ace->type);
- *p++ = cpu_to_be32(ace->flag);
- *p++ = cpu_to_be32(ace->access_mask &
- NFS4_ACE_MASK_ALL);
- status = nfsd4_encode_aclname(xdr, rqstp, ace);
- if (status)
- goto out;
- }
- }
-out_acl:
- if (bmval0 & FATTR4_WORD0_ACLSUPPORT) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(IS_POSIXACL(dentry->d_inode) ?
- ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL : 0);
}
- if (bmval0 & FATTR4_WORD0_CANSETTIME) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(0);
- }
- if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_CHOWN_RESTRICTED) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_FILEHANDLE) {
- p = xdr_reserve_space(xdr, fhp->fh_handle.fh_size + 4);
- if (!p)
- goto out_resource;
- p = xdr_encode_opaque(p, &fhp->fh_handle.fh_raw,
- fhp->fh_handle.fh_size);
- }
- if (bmval0 & FATTR4_WORD0_FILEID) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, stat.ino);
- }
- if (bmval0 & FATTR4_WORD0_FILES_AVAIL) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
- }
- if (bmval0 & FATTR4_WORD0_FILES_FREE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
- }
- if (bmval0 & FATTR4_WORD0_FILES_TOTAL) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) statfs.f_files);
- }
- if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
- status = nfsd4_encode_fs_locations(xdr, rqstp, exp);
- if (status)
- goto out;
- }
- if (bmval0 & FATTR4_WORD0_HOMOGENEOUS) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_MAXFILESIZE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, exp->ex_path.mnt->mnt_sb->s_maxbytes);
- }
- if (bmval0 & FATTR4_WORD0_MAXLINK) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(255);
- }
- if (bmval0 & FATTR4_WORD0_MAXNAME) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(statfs.f_namelen);
- }
- if (bmval0 & FATTR4_WORD0_MAXREAD) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
- }
- if (bmval0 & FATTR4_WORD0_MAXWRITE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
- }
- if (bmval1 & FATTR4_WORD1_MODE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(stat.mode & S_IALLUGO);
- }
- if (bmval1 & FATTR4_WORD1_NO_TRUNC) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval1 & FATTR4_WORD1_NUMLINKS) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(stat.nlink);
- }
- if (bmval1 & FATTR4_WORD1_OWNER) {
- status = nfsd4_encode_user(xdr, rqstp, stat.uid);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
- status = nfsd4_encode_group(xdr, rqstp, stat.gid);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_RAWDEV) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32((u32) MAJOR(stat.rdev));
- *p++ = cpu_to_be32((u32) MINOR(stat.rdev));
- }
- if (bmval1 & FATTR4_WORD1_SPACE_AVAIL) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- dummy64 = (u64)statfs.f_bavail * (u64)statfs.f_bsize;
- p = xdr_encode_hyper(p, dummy64);
- }
- if (bmval1 & FATTR4_WORD1_SPACE_FREE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- dummy64 = (u64)statfs.f_bfree * (u64)statfs.f_bsize;
- p = xdr_encode_hyper(p, dummy64);
- }
- if (bmval1 & FATTR4_WORD1_SPACE_TOTAL) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- dummy64 = (u64)statfs.f_blocks * (u64)statfs.f_bsize;
- p = xdr_encode_hyper(p, dummy64);
- }
- if (bmval1 & FATTR4_WORD1_SPACE_USED) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- dummy64 = (u64)stat.blocks << 9;
- p = xdr_encode_hyper(p, dummy64);
- }
- if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
- p = xdr_reserve_space(xdr, 12);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (s64)stat.atime.tv_sec);
- *p++ = cpu_to_be32(stat.atime.tv_nsec);
- }
- if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
- p = xdr_reserve_space(xdr, 12);
- if (!p)
- goto out_resource;
- p = encode_time_delta(p, d_inode(dentry));
- }
- if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
- p = xdr_reserve_space(xdr, 12);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (s64)stat.ctime.tv_sec);
- *p++ = cpu_to_be32(stat.ctime.tv_nsec);
- }
- if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
- p = xdr_reserve_space(xdr, 12);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (s64)stat.mtime.tv_sec);
- *p++ = cpu_to_be32(stat.mtime.tv_nsec);
- }
- if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
- struct kstat parent_stat;
- u64 ino = stat.ino;
-
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- /*
- * Get parent's attributes if not ignoring crossmount
- * and this is the root of a cross-mounted filesystem.
- */
- if (ignore_crossmnt == 0 &&
- dentry == exp->ex_path.mnt->mnt_root) {
- err = get_parent_attributes(exp, &parent_stat);
- if (err)
- goto out_nfserr;
- ino = parent_stat.ino;
- }
- p = xdr_encode_hyper(p, ino);
- }
-#ifdef CONFIG_NFSD_PNFS
- if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
- status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
- if (status)
- goto out;
- }
-
- if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
- status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
- if (status)
- goto out;
- }
-
- if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(stat.blksize);
- }
-#endif /* CONFIG_NFSD_PNFS */
- if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
- u32 supp[3];
-
- memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
- supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
- supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
- supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
-
- status = nfsd4_encode_bitmap(xdr, supp[0], supp[1], supp[2]);
- if (status)
- goto out;
- }
-
-#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
- status = nfsd4_encode_security_label(xdr, rqstp, context,
- contextlen);
- if (status)
- goto out;
- }
-#endif
-
- if (bmval2 & FATTR4_WORD2_XATTR_SUPPORT) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- err = xattr_supported_namespace(d_inode(dentry),
- XATTR_USER_PREFIX);
- *p++ = cpu_to_be32(err == 0);
- }
-
- attrlen = htonl(xdr->buf->len - attrlen_offset - 4);
- write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, 4);
+ attrlen = cpu_to_be32(xdr->buf->len - attrlen_offset - XDR_UNIT);
+ write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, XDR_UNIT);
status = nfs_ok;
out:
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if (context)
- security_release_secctx(context, contextlen);
+ if (args.context.context)
+ security_release_secctx(&args.context);
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
- kfree(acl);
+ kfree(args.acl);
if (tempfh) {
fh_put(tempfh);
kfree(tempfh);
@@ -3389,12 +3782,28 @@ __be32 nfsd4_encode_fattr_to_buf(__be32 **p, int words,
__be32 ret;
svcxdr_init_encode_from_buffer(&xdr, &dummy, *p, words << 2);
- ret = nfsd4_encode_fattr(&xdr, fhp, exp, dentry, bmval, rqstp,
- ignore_crossmnt);
+ ret = nfsd4_encode_fattr4(rqstp, &xdr, fhp, exp, dentry, bmval,
+ ignore_crossmnt);
*p = xdr.p;
return ret;
}
+/*
+ * The buffer space for this field was reserved during a previous
+ * call to nfsd4_encode_entry4().
+ */
+static void nfsd4_encode_entry4_nfs_cookie4(const struct nfsd4_readdir *readdir,
+ u64 offset)
+{
+ __be64 cookie = cpu_to_be64(offset);
+ struct xdr_stream *xdr = readdir->xdr;
+
+ if (!readdir->cookie_offset)
+ return;
+ write_bytes_to_xdr_buf(xdr->buf, readdir->cookie_offset, &cookie,
+ sizeof(cookie));
+}
+
static inline int attributes_need_mount(u32 *bmval)
{
if (bmval[0] & ~(FATTR4_WORD0_RDATTR_ERROR | FATTR4_WORD0_LEASE_TIME))
@@ -3405,15 +3814,17 @@ static inline int attributes_need_mount(u32 *bmval)
}
static __be32
-nfsd4_encode_dirent_fattr(struct xdr_stream *xdr, struct nfsd4_readdir *cd,
- const char *name, int namlen)
+nfsd4_encode_entry4_fattr(struct nfsd4_readdir *cd, const char *name,
+ int namlen)
{
struct svc_export *exp = cd->rd_fhp->fh_export;
struct dentry *dentry;
__be32 nfserr;
int ignore_crossmnt = 0;
- dentry = lookup_positive_unlocked(name, cd->rd_fhp->fh_dentry, namlen);
+ dentry = lookup_one_positive_unlocked(&nop_mnt_idmap,
+ &QSTR_LEN(name, namlen),
+ cd->rd_fhp->fh_dentry);
if (IS_ERR(dentry))
return nfserrno(PTR_ERR(dentry));
@@ -3443,39 +3854,40 @@ nfsd4_encode_dirent_fattr(struct xdr_stream *xdr, struct nfsd4_readdir *cd,
nfserr = nfserrno(err);
goto out_put;
}
- nfserr = check_nfsd_access(exp, cd->rd_rqstp);
+ nfserr = check_nfsd_access(exp, cd->rd_rqstp, false);
if (nfserr)
goto out_put;
}
out_encode:
- nfserr = nfsd4_encode_fattr(xdr, NULL, exp, dentry, cd->rd_bmval,
- cd->rd_rqstp, ignore_crossmnt);
+ nfserr = nfsd4_encode_fattr4(cd->rd_rqstp, cd->xdr, NULL, exp, dentry,
+ cd->rd_bmval, ignore_crossmnt);
out_put:
dput(dentry);
exp_put(exp);
return nfserr;
}
-static __be32 *
-nfsd4_encode_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
+static __be32
+nfsd4_encode_entry4_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
{
- __be32 *p;
-
- p = xdr_reserve_space(xdr, 20);
- if (!p)
- return NULL;
- *p++ = htonl(2);
- *p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */
- *p++ = htonl(0); /* bmval1 */
+ __be32 status;
- *p++ = htonl(4); /* attribute length */
- *p++ = nfserr; /* no htonl */
- return p;
+ /* attrmask */
+ status = nfsd4_encode_bitmap4(xdr, FATTR4_WORD0_RDATTR_ERROR, 0, 0);
+ if (status != nfs_ok)
+ return status;
+ /* attr_vals */
+ if (xdr_stream_encode_u32(xdr, XDR_UNIT) != XDR_UNIT)
+ return nfserr_resource;
+ /* rdattr_error */
+ if (xdr_stream_encode_be32(xdr, nfserr) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
}
static int
-nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
+nfsd4_encode_entry4(void *ccdv, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct readdir_cd *ccd = ccdv;
@@ -3486,8 +3898,6 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
u32 name_and_cookie;
int entry_bytes;
__be32 nfserr = nfserr_toosmall;
- __be64 wire_offset;
- __be32 *p;
/* In nfsv4, "." and ".." never make it onto the wire.. */
if (name && isdotent(name, namlen)) {
@@ -3495,24 +3905,19 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
return 0;
}
- if (cd->cookie_offset) {
- wire_offset = cpu_to_be64(offset);
- write_bytes_to_xdr_buf(xdr->buf, cd->cookie_offset,
- &wire_offset, 8);
- }
+ /* Encode the previous entry's cookie value */
+ nfsd4_encode_entry4_nfs_cookie4(cd, offset);
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ if (xdr_stream_encode_item_present(xdr) != XDR_UNIT)
goto fail;
- *p++ = xdr_one; /* mark entry present */
+
+ /* Reserve send buffer space for this entry's cookie value. */
cookie_offset = xdr->buf->len;
- p = xdr_reserve_space(xdr, 3*4 + namlen);
- if (!p)
+ if (nfsd4_encode_nfs_cookie4(xdr, OFFSET_MAX) != nfs_ok)
goto fail;
- p = xdr_encode_hyper(p, NFS_OFFSET_MAX); /* offset of next entry */
- p = xdr_encode_array(p, name, namlen); /* name length & name */
-
- nfserr = nfsd4_encode_dirent_fattr(xdr, cd, name, namlen);
+ if (nfsd4_encode_component4(xdr, name, namlen) != nfs_ok)
+ goto fail;
+ nfserr = nfsd4_encode_entry4_fattr(cd, name, namlen);
switch (nfserr) {
case nfs_ok:
break;
@@ -3522,6 +3927,17 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
case nfserr_noent:
xdr_truncate_encode(xdr, start_offset);
goto skip_entry;
+ case nfserr_jukebox:
+ /*
+ * The pseudoroot should only display dentries that lead to
+ * exports. If we get EJUKEBOX here, then we can't tell whether
+ * this entry should be included. Just fail the whole READDIR
+ * with NFS4ERR_DELAY in that case, and hope that the situation
+ * will resolve itself by the client's next attempt.
+ */
+ if (cd->rd_fhp->fh_export->ex_flags & NFSEXP_V4ROOT)
+ goto fail;
+ fallthrough;
default:
/*
* If the client requested the RDATTR_ERROR attribute,
@@ -3532,8 +3948,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
*/
if (!(cd->rd_bmval[0] & FATTR4_WORD0_RDATTR_ERROR))
goto fail;
- p = nfsd4_encode_rdattr_error(xdr, nfserr);
- if (p == NULL) {
+ if (nfsd4_encode_entry4_rdattr_error(xdr, nfserr)) {
nfserr = nfserr_toosmall;
goto fail;
}
@@ -3568,307 +3983,427 @@ fail:
}
static __be32
-nfsd4_encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
+nfsd4_encode_verifier4(struct xdr_stream *xdr, const nfs4_verifier *verf)
{
__be32 *p;
- p = xdr_reserve_space(xdr, sizeof(stateid_t));
+ p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
- *p++ = cpu_to_be32(sid->si_generation);
- p = xdr_encode_opaque_fixed(p, &sid->si_opaque,
- sizeof(stateid_opaque_t));
- return 0;
+ memcpy(p, verf->data, sizeof(verf->data));
+ return nfs_ok;
}
static __be32
-nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_access *access)
+nfsd4_encode_clientid4(struct xdr_stream *xdr, const clientid_t *clientid)
{
- struct xdr_stream *xdr = resp->xdr;
__be32 *p;
- p = xdr_reserve_space(xdr, 8);
+ p = xdr_reserve_space(xdr, sizeof(__be64));
if (!p)
return nfserr_resource;
- *p++ = cpu_to_be32(access->ac_supported);
- *p++ = cpu_to_be32(access->ac_resp_access);
- return 0;
+ memcpy(p, clientid, sizeof(*clientid));
+ return nfs_ok;
}
-static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts)
+/* This is a frequently-encoded item; open-coded for speed */
+static __be32
+nfsd4_encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
{
- struct xdr_stream *xdr = resp->xdr;
__be32 *p;
- p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 8);
+ p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
if (!p)
return nfserr_resource;
- p = xdr_encode_opaque_fixed(p, bcts->sessionid.data,
- NFS4_MAX_SESSIONID_LEN);
- *p++ = cpu_to_be32(bcts->dir);
- /* Upshifting from TCP to RDMA is not supported */
- *p++ = cpu_to_be32(0);
- return 0;
+ *p++ = cpu_to_be32(sid->si_generation);
+ memcpy(p, &sid->si_opaque, sizeof(sid->si_opaque));
+ return nfs_ok;
}
static __be32
-nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close)
+nfsd4_encode_sessionid4(struct xdr_stream *xdr,
+ const struct nfs4_sessionid *sessionid)
{
+ return nfsd4_encode_opaque_fixed(xdr, sessionid->data,
+ NFS4_MAX_SESSIONID_LEN);
+}
+
+static __be32
+nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_access *access = &u->access;
struct xdr_stream *xdr = resp->xdr;
+ __be32 status;
- return nfsd4_encode_stateid(xdr, &close->cl_stateid);
+ /* supported */
+ status = nfsd4_encode_uint32_t(xdr, access->ac_supported);
+ if (status != nfs_ok)
+ return status;
+ /* access */
+ return nfsd4_encode_uint32_t(xdr, access->ac_resp_access);
}
+static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
+ struct xdr_stream *xdr = resp->xdr;
+
+ /* bctsr_sessid */
+ nfserr = nfsd4_encode_sessionid4(xdr, &bcts->sessionid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* bctsr_dir */
+ if (xdr_stream_encode_u32(xdr, bcts->dir) != XDR_UNIT)
+ return nfserr_resource;
+ /* bctsr_use_conn_in_rdma_mode */
+ return nfsd4_encode_bool(xdr, false);
+}
static __be32
-nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_commit *commit)
+nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_close *close = &u->close;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque_fixed(p, commit->co_verf.data,
- NFS4_VERIFIER_SIZE);
- return 0;
+ /* open_stateid */
+ return nfsd4_encode_stateid4(xdr, &close->cl_stateid);
+}
+
+
+static __be32
+nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_commit *commit = &u->commit;
+
+ return nfsd4_encode_verifier4(resp->xdr, &commit->co_verf);
}
static __be32
-nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_create *create)
+nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_create *create = &u->create;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 20);
- if (!p)
- return nfserr_resource;
- encode_cinfo(p, &create->cr_cinfo);
- return nfsd4_encode_bitmap(xdr, create->cr_bmval[0],
- create->cr_bmval[1], create->cr_bmval[2]);
+ /* cinfo */
+ nfserr = nfsd4_encode_change_info4(xdr, &create->cr_cinfo);
+ if (nfserr)
+ return nfserr;
+ /* attrset */
+ return nfsd4_encode_bitmap4(xdr, create->cr_bmval[0],
+ create->cr_bmval[1], create->cr_bmval[2]);
}
static __be32
-nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_getattr *getattr)
+nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_getattr *getattr = &u->getattr;
struct svc_fh *fhp = getattr->ga_fhp;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_fattr(xdr, fhp, fhp->fh_export, fhp->fh_dentry,
- getattr->ga_bmval, resp->rqstp, 0);
+ /* obj_attributes */
+ return nfsd4_encode_fattr4(resp->rqstp, xdr, fhp, fhp->fh_export,
+ fhp->fh_dentry, getattr->ga_bmval, 0);
}
static __be32
-nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, struct svc_fh **fhpp)
+nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
struct xdr_stream *xdr = resp->xdr;
- struct svc_fh *fhp = *fhpp;
- unsigned int len;
- __be32 *p;
+ struct svc_fh *fhp = u->getfh;
- len = fhp->fh_handle.fh_size;
- p = xdr_reserve_space(xdr, len + 4);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque(p, &fhp->fh_handle.fh_raw, len);
- return 0;
+ /* object */
+ return nfsd4_encode_nfs_fh4(xdr, &fhp->fh_handle);
}
-/*
-* Including all fields other than the name, a LOCK4denied structure requires
-* 8(clientid) + 4(namelen) + 8(offset) + 8(length) + 4(type) = 32 bytes.
-*/
static __be32
-nfsd4_encode_lock_denied(struct xdr_stream *xdr, struct nfsd4_lock_denied *ld)
+nfsd4_encode_lock_owner4(struct xdr_stream *xdr, const clientid_t *clientid,
+ const struct xdr_netobj *owner)
{
- struct xdr_netobj *conf = &ld->ld_owner;
- __be32 *p;
+ __be32 status;
-again:
- p = xdr_reserve_space(xdr, 32 + XDR_LEN(conf->len));
- if (!p) {
- /*
- * Don't fail to return the result just because we can't
- * return the conflicting open:
- */
- if (conf->len) {
- kfree(conf->data);
- conf->len = 0;
- conf->data = NULL;
- goto again;
- }
+ /* clientid */
+ status = nfsd4_encode_clientid4(xdr, clientid);
+ if (status != nfs_ok)
+ return status;
+ /* owner */
+ return nfsd4_encode_opaque(xdr, owner->data, owner->len);
+}
+
+static __be32
+nfsd4_encode_lock4denied(struct xdr_stream *xdr,
+ const struct nfsd4_lock_denied *ld)
+{
+ __be32 status;
+
+ /* offset */
+ status = nfsd4_encode_offset4(xdr, ld->ld_start);
+ if (status != nfs_ok)
+ return status;
+ /* length */
+ status = nfsd4_encode_length4(xdr, ld->ld_length);
+ if (status != nfs_ok)
+ return status;
+ /* locktype */
+ if (xdr_stream_encode_u32(xdr, ld->ld_type) != XDR_UNIT)
return nfserr_resource;
- }
- p = xdr_encode_hyper(p, ld->ld_start);
- p = xdr_encode_hyper(p, ld->ld_length);
- *p++ = cpu_to_be32(ld->ld_type);
- if (conf->len) {
- p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
- p = xdr_encode_opaque(p, conf->data, conf->len);
- kfree(conf->data);
- } else { /* non - nfsv4 lock in conflict, no clientid nor owner */
- p = xdr_encode_hyper(p, (u64)0); /* clientid */
- *p++ = cpu_to_be32(0); /* length of owner name */
- }
- return nfserr_denied;
+ /* owner */
+ return nfsd4_encode_lock_owner4(xdr, &ld->ld_clientid,
+ &ld->ld_owner);
}
static __be32
-nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lock *lock)
+nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_lock *lock = &u->lock;
struct xdr_stream *xdr = resp->xdr;
+ __be32 status;
- if (!nfserr)
- nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
- else if (nfserr == nfserr_denied)
- nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
-
- return nfserr;
+ switch (nfserr) {
+ case nfs_ok:
+ /* resok4 */
+ status = nfsd4_encode_stateid4(xdr, &lock->lk_resp_stateid);
+ break;
+ case nfserr_denied:
+ /* denied */
+ status = nfsd4_encode_lock4denied(xdr, &lock->lk_denied);
+ break;
+ default:
+ return nfserr;
+ }
+ return status != nfs_ok ? status : nfserr;
}
static __be32
-nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lockt *lockt)
+nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_lockt *lockt = &u->lockt;
struct xdr_stream *xdr = resp->xdr;
+ __be32 status;
- if (nfserr == nfserr_denied)
- nfsd4_encode_lock_denied(xdr, &lockt->lt_denied);
+ if (nfserr == nfserr_denied) {
+ /* denied */
+ status = nfsd4_encode_lock4denied(xdr, &lockt->lt_denied);
+ if (status != nfs_ok)
+ return status;
+ }
return nfserr;
}
static __be32
-nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_locku *locku)
+nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_locku *locku = &u->locku;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_stateid(xdr, &locku->lu_stateid);
+ /* lock_stateid */
+ return nfsd4_encode_stateid4(xdr, &locku->lu_stateid);
}
static __be32
-nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_link *link)
+nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_link *link = &u->link;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 20);
- if (!p)
- return nfserr_resource;
- p = encode_cinfo(p, &link->li_cinfo);
- return 0;
+ return nfsd4_encode_change_info4(xdr, &link->li_cinfo);
}
-
+/*
+ * This implementation does not yet support returning an ACE in an
+ * OPEN that offers a delegation.
+ */
static __be32
-nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open)
+nfsd4_encode_open_nfsace4(struct xdr_stream *xdr)
{
- struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
+ __be32 status;
- nfserr = nfsd4_encode_stateid(xdr, &open->op_stateid);
- if (nfserr)
- return nfserr;
- p = xdr_reserve_space(xdr, 24);
- if (!p)
+ /* type */
+ status = nfsd4_encode_acetype4(xdr, NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* flag */
+ status = nfsd4_encode_aceflag4(xdr, 0);
+ if (status != nfs_ok)
return nfserr_resource;
- p = encode_cinfo(p, &open->op_cinfo);
- *p++ = cpu_to_be32(open->op_rflags);
+ /* access mask */
+ status = nfsd4_encode_acemask4(xdr, 0);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* who - empty for now */
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
+}
- nfserr = nfsd4_encode_bitmap(xdr, open->op_bmval[0], open->op_bmval[1],
- open->op_bmval[2]);
- if (nfserr)
- return nfserr;
+static __be32
+nfsd4_encode_open_read_delegation4(struct xdr_stream *xdr, struct nfsd4_open *open)
+{
+ __be32 status;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ /* stateid */
+ status = nfsd4_encode_stateid4(xdr, &open->op_delegate_stateid);
+ if (status != nfs_ok)
+ return status;
+ /* recall */
+ status = nfsd4_encode_bool(xdr, open->op_recall);
+ if (status != nfs_ok)
+ return status;
+ /* permissions */
+ return nfsd4_encode_open_nfsace4(xdr);
+}
+
+static __be32
+nfsd4_encode_nfs_space_limit4(struct xdr_stream *xdr, u64 filesize)
+{
+ /* limitby */
+ if (xdr_stream_encode_u32(xdr, NFS4_LIMIT_SIZE) != XDR_UNIT)
return nfserr_resource;
+ /* filesize */
+ return nfsd4_encode_uint64_t(xdr, filesize);
+}
- *p++ = cpu_to_be32(open->op_delegate_type);
- switch (open->op_delegate_type) {
- case NFS4_OPEN_DELEGATE_NONE:
- break;
- case NFS4_OPEN_DELEGATE_READ:
- nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
- if (nfserr)
- return nfserr;
- p = xdr_reserve_space(xdr, 20);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(open->op_recall);
+static __be32
+nfsd4_encode_open_write_delegation4(struct xdr_stream *xdr,
+ struct nfsd4_open *open)
+{
+ __be32 status;
- /*
- * TODO: ACE's in delegations
- */
- *p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0); /* XXX: is NULL principal ok? */
+ /* stateid */
+ status = nfsd4_encode_stateid4(xdr, &open->op_delegate_stateid);
+ if (status != nfs_ok)
+ return status;
+ /* recall */
+ status = nfsd4_encode_bool(xdr, open->op_recall);
+ if (status != nfs_ok)
+ return status;
+ /* space_limit */
+ status = nfsd4_encode_nfs_space_limit4(xdr, 0);
+ if (status != nfs_ok)
+ return status;
+ return nfsd4_encode_open_nfsace4(xdr);
+}
+
+static __be32
+nfsd4_encode_open_none_delegation4(struct xdr_stream *xdr,
+ struct nfsd4_open *open)
+{
+ __be32 status = nfs_ok;
+
+ /* ond_why */
+ if (xdr_stream_encode_u32(xdr, open->op_why_no_deleg) != XDR_UNIT)
+ return nfserr_resource;
+ switch (open->op_why_no_deleg) {
+ case WND4_CONTENTION:
+ /* ond_server_will_push_deleg */
+ status = nfsd4_encode_bool(xdr, false);
break;
- case NFS4_OPEN_DELEGATE_WRITE:
- nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
- if (nfserr)
- return nfserr;
- p = xdr_reserve_space(xdr, 32);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(0);
+ case WND4_RESOURCE:
+ /* ond_server_will_signal_avail */
+ status = nfsd4_encode_bool(xdr, false);
+ }
+ return status;
+}
- /*
- * TODO: space_limit's in delegations
- */
- *p++ = cpu_to_be32(NFS4_LIMIT_SIZE);
- *p++ = cpu_to_be32(~(u32)0);
- *p++ = cpu_to_be32(~(u32)0);
+static __be32
+nfsd4_encode_open_delegation4(struct xdr_stream *xdr, struct nfsd4_open *open)
+{
+ __be32 status;
- /*
- * TODO: ACE's in delegations
- */
- *p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0); /* XXX: is NULL principal ok? */
+ /* delegation_type */
+ if (xdr_stream_encode_u32(xdr, open->op_delegate_type) != XDR_UNIT)
+ return nfserr_resource;
+ switch (open->op_delegate_type) {
+ case OPEN_DELEGATE_NONE:
+ status = nfs_ok;
break;
- case NFS4_OPEN_DELEGATE_NONE_EXT: /* 4.1 */
- switch (open->op_why_no_deleg) {
- case WND4_CONTENTION:
- case WND4_RESOURCE:
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(open->op_why_no_deleg);
- /* deleg signaling not supported yet: */
- *p++ = cpu_to_be32(0);
- break;
- default:
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(open->op_why_no_deleg);
- }
+ case OPEN_DELEGATE_READ:
+ case OPEN_DELEGATE_READ_ATTRS_DELEG:
+ /* read */
+ status = nfsd4_encode_open_read_delegation4(xdr, open);
+ break;
+ case OPEN_DELEGATE_WRITE:
+ case OPEN_DELEGATE_WRITE_ATTRS_DELEG:
+ /* write */
+ status = nfsd4_encode_open_write_delegation4(xdr, open);
+ break;
+ case OPEN_DELEGATE_NONE_EXT:
+ /* od_whynone */
+ status = nfsd4_encode_open_none_delegation4(xdr, open);
break;
default:
- BUG();
+ status = nfserr_serverfault;
}
- /* XXX save filehandle here */
- return 0;
+
+ return status;
+}
+
+static __be32
+nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_open *open = &u->open;
+ struct xdr_stream *xdr = resp->xdr;
+
+ /* stateid */
+ nfserr = nfsd4_encode_stateid4(xdr, &open->op_stateid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* cinfo */
+ nfserr = nfsd4_encode_change_info4(xdr, &open->op_cinfo);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* rflags */
+ nfserr = nfsd4_encode_uint32_t(xdr, open->op_rflags);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* attrset */
+ nfserr = nfsd4_encode_bitmap4(xdr, open->op_bmval[0],
+ open->op_bmval[1], open->op_bmval[2]);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* delegation */
+ return nfsd4_encode_open_delegation4(xdr, open);
}
static __be32
-nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc)
+nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_open_confirm *oc = &u->open_confirm;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_stateid(xdr, &oc->oc_resp_stateid);
+ /* open_stateid */
+ return nfsd4_encode_stateid4(xdr, &oc->oc_resp_stateid);
}
static __be32
-nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od)
+nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_open_downgrade *od = &u->open_downgrade;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_stateid(xdr, &od->od_stateid);
+ /* open_stateid */
+ return nfsd4_encode_stateid4(xdr, &od->od_stateid);
}
+/*
+ * The operation of this function assumes that this is the only
+ * READ operation in the COMPOUND. If there are multiple READs,
+ * we use nfsd4_encode_readv().
+ */
static __be32 nfsd4_encode_splice_read(
struct nfsd4_compoundres *resp,
struct nfsd4_read *read,
@@ -3877,19 +4412,33 @@ static __be32 nfsd4_encode_splice_read(
struct xdr_stream *xdr = resp->xdr;
struct xdr_buf *buf = xdr->buf;
int status, space_left;
- u32 eof;
__be32 nfserr;
- __be32 *p = xdr->p - 2;
- /* Make sure there will be room for padding if needed */
- if (xdr->end - xdr->p < 1)
+ /*
+ * Splice read doesn't work if encoding has already wandered
+ * into the XDR buf's page array.
+ */
+ if (unlikely(xdr->buf->page_len)) {
+ WARN_ON_ONCE(1);
+ return nfserr_serverfault;
+ }
+
+ /*
+ * Make sure there is room at the end of buf->head for
+ * svcxdr_encode_opaque_pages() to create a tail buffer
+ * to XDR-pad the payload.
+ */
+ if (xdr->iov != xdr->buf->head || xdr->end - xdr->p < 1)
return nfserr_resource;
nfserr = nfsd_splice_read(read->rd_rqstp, read->rd_fhp,
- file, read->rd_offset, &maxcount, &eof);
+ file, read->rd_offset, &maxcount,
+ &read->rd_eof);
read->rd_length = maxcount;
if (nfserr)
goto out_err;
+ svcxdr_encode_opaque_pages(read->rd_rqstp, xdr, buf->pages,
+ buf->page_base, maxcount);
status = svc_encode_result_payload(read->rd_rqstp,
buf->head[0].iov_len, maxcount);
if (status) {
@@ -3897,34 +4446,19 @@ static __be32 nfsd4_encode_splice_read(
goto out_err;
}
- *(p++) = htonl(eof);
- *(p++) = htonl(maxcount);
-
- buf->page_len = maxcount;
- buf->len += maxcount;
- xdr->page_ptr += (buf->page_base + maxcount + PAGE_SIZE - 1)
- / PAGE_SIZE;
-
- /* Use rest of head for padding and remaining ops: */
- buf->tail[0].iov_base = xdr->p;
- buf->tail[0].iov_len = 0;
- xdr->iov = buf->tail;
- if (maxcount&3) {
- int pad = 4 - (maxcount&3);
-
- *(xdr->p++) = 0;
-
- buf->tail[0].iov_base += maxcount&3;
- buf->tail[0].iov_len = pad;
- buf->len += pad;
- }
-
+ /*
+ * Prepare to encode subsequent operations.
+ *
+ * xdr_truncate_encode() is not safe to use after a successful
+ * splice read has been done, so the following stream
+ * manipulations are open-coded.
+ */
space_left = min_t(int, (void *)xdr->end - (void *)xdr->p,
buf->buflen - buf->len);
buf->buflen = buf->len + space_left;
xdr->end = (__be32 *)((void *)xdr->end + space_left);
- return 0;
+ return nfs_ok;
out_err:
/*
@@ -3938,110 +4472,108 @@ out_err:
static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
struct nfsd4_read *read,
- struct file *file, unsigned long maxcount)
+ unsigned long maxcount)
{
struct xdr_stream *xdr = resp->xdr;
- u32 eof;
- int starting_len = xdr->buf->len - 8;
+ unsigned int base = xdr->buf->page_len & ~PAGE_MASK;
+ unsigned int starting_len = xdr->buf->len;
+ __be32 zero = xdr_zero;
__be32 nfserr;
- __be32 tmp;
- int pad;
- read->rd_vlen = xdr_reserve_space_vec(xdr, resp->rqstp->rq_vec, maxcount);
- if (read->rd_vlen < 0)
- return nfserr_resource;
-
- nfserr = nfsd_readv(resp->rqstp, read->rd_fhp, file, read->rd_offset,
- resp->rqstp->rq_vec, read->rd_vlen, &maxcount,
- &eof);
+ nfserr = nfsd_iter_read(resp->rqstp, read->rd_fhp, read->rd_nf,
+ read->rd_offset, &maxcount, base,
+ &read->rd_eof);
read->rd_length = maxcount;
if (nfserr)
return nfserr;
- if (svc_encode_result_payload(resp->rqstp, starting_len + 8, maxcount))
- return nfserr_io;
- xdr_truncate_encode(xdr, starting_len + 8 + xdr_align_size(maxcount));
- tmp = htonl(eof);
- write_bytes_to_xdr_buf(xdr->buf, starting_len , &tmp, 4);
- tmp = htonl(maxcount);
- write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
+ /*
+ * svcxdr_encode_opaque_pages() is not used here because
+ * we don't want to encode subsequent results in this
+ * COMPOUND into the xdr->buf's tail, but rather those
+ * results should follow the NFS READ payload in the
+ * buf's pages.
+ */
+ if (xdr_reserve_space_vec(xdr, maxcount) < 0)
+ return nfserr_resource;
- tmp = xdr_zero;
- pad = (maxcount&3) ? 4 - (maxcount&3) : 0;
- write_bytes_to_xdr_buf(xdr->buf, starting_len + 8 + maxcount,
- &tmp, pad);
- return 0;
+ /*
+ * Mark the buffer location of the NFS READ payload so that
+ * direct placement-capable transports send only the
+ * payload bytes out-of-band.
+ */
+ if (svc_encode_result_payload(resp->rqstp, starting_len, maxcount))
+ return nfserr_io;
+ write_bytes_to_xdr_buf(xdr->buf, starting_len + maxcount, &zero,
+ xdr_pad_size(maxcount));
+ return nfs_ok;
}
static __be32
nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_read *read)
+ union nfsd4_op_u *u)
{
- unsigned long maxcount;
+ struct nfsd4_compoundargs *argp = resp->rqstp->rq_argp;
+ struct nfsd4_read *read = &u->read;
struct xdr_stream *xdr = resp->xdr;
+ bool splice_ok = argp->splice_ok;
+ unsigned int eof_offset;
+ unsigned long maxcount;
+ __be32 wire_data[2];
struct file *file;
- int starting_len = xdr->buf->len;
- __be32 *p;
if (nfserr)
return nfserr;
+
+ eof_offset = xdr->buf->len;
file = read->rd_nf->nf_file;
- p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */
- if (!p) {
- WARN_ON_ONCE(test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags));
- return nfserr_resource;
- }
- if (resp->xdr->buf->page_len &&
- test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) {
- WARN_ON_ONCE(1);
+ /* Reserve space for the eof flag and byte count */
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT * 2))) {
+ WARN_ON_ONCE(splice_ok);
return nfserr_resource;
}
xdr_commit_encode(xdr);
- maxcount = svc_max_payload(resp->rqstp);
- maxcount = min_t(unsigned long, maxcount,
+ maxcount = min_t(unsigned long, read->rd_length,
(xdr->buf->buflen - xdr->buf->len));
- maxcount = min_t(unsigned long, maxcount, read->rd_length);
- if (file->f_op->splice_read &&
- test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
+ if (file->f_op->splice_read && splice_ok)
nfserr = nfsd4_encode_splice_read(resp, read, file, maxcount);
else
- nfserr = nfsd4_encode_readv(resp, read, file, maxcount);
-
- if (nfserr)
- xdr_truncate_encode(xdr, starting_len);
+ nfserr = nfsd4_encode_readv(resp, read, maxcount);
+ if (nfserr) {
+ xdr_truncate_encode(xdr, eof_offset);
+ return nfserr;
+ }
- return nfserr;
+ wire_data[0] = read->rd_eof ? xdr_one : xdr_zero;
+ wire_data[1] = cpu_to_be32(read->rd_length);
+ write_bytes_to_xdr_buf(xdr->buf, eof_offset, &wire_data, XDR_UNIT * 2);
+ return nfs_ok;
}
static __be32
-nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readlink *readlink)
+nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
- int maxcount;
- __be32 wire_count;
- int zero = 0;
+ struct nfsd4_readlink *readlink = &u->readlink;
+ __be32 *p, wire_count, zero = xdr_zero;
struct xdr_stream *xdr = resp->xdr;
- int length_offset = xdr->buf->len;
- int status;
- __be32 *p;
+ unsigned int length_offset;
+ int maxcount, status;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ /* linktext4.count */
+ length_offset = xdr->buf->len;
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT)))
return nfserr_resource;
- maxcount = PAGE_SIZE;
+ /* linktext4.data */
+ maxcount = PAGE_SIZE;
p = xdr_reserve_space(xdr, maxcount);
if (!p)
return nfserr_resource;
- /*
- * XXX: By default, vfs_readlink() will truncate symlinks if they
- * would overflow the buffer. Is this kosher in NFSv4? If not, one
- * easy fix is: if vfs_readlink() precisely fills the buffer, assume
- * that truncation occurred, and return NFS4ERR_RESOURCE.
- */
nfserr = nfsd_readlink(readlink->rl_rqstp, readlink->rl_fhp,
(char *)p, &maxcount);
if (nfserr == nfserr_isdir)
@@ -4055,145 +4587,178 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
goto out_err;
}
- wire_count = htonl(maxcount);
- write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, 4);
- xdr_truncate_encode(xdr, length_offset + 4 + ALIGN(maxcount, 4));
- if (maxcount & 3)
- write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount,
- &zero, 4 - (maxcount&3));
- return 0;
+ wire_count = cpu_to_be32(maxcount);
+ write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, XDR_UNIT);
+ xdr_truncate_encode(xdr, length_offset + 4 + xdr_align_size(maxcount));
+ write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount, &zero,
+ xdr_pad_size(maxcount));
+ return nfs_ok;
out_err:
xdr_truncate_encode(xdr, length_offset);
return nfserr;
}
-static __be32
-nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readdir *readdir)
+static __be32 nfsd4_encode_dirlist4(struct xdr_stream *xdr,
+ struct nfsd4_readdir *readdir,
+ u32 max_payload)
{
- int maxcount;
- int bytes_left;
+ int bytes_left, maxcount, starting_len = xdr->buf->len;
loff_t offset;
- __be64 wire_offset;
- struct xdr_stream *xdr = resp->xdr;
- int starting_len = xdr->buf->len;
- __be32 *p;
-
- p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
- if (!p)
- return nfserr_resource;
-
- /* XXX: Following NFSv3, we ignore the READDIR verifier for now. */
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- xdr->buf->head[0].iov_len = (char *)xdr->p -
- (char *)xdr->buf->head[0].iov_base;
+ __be32 status;
/*
* Number of bytes left for directory entries allowing for the
- * final 8 bytes of the readdir and a following failed op:
+ * final 8 bytes of the readdir and a following failed op.
*/
- bytes_left = xdr->buf->buflen - xdr->buf->len
- - COMPOUND_ERR_SLACK_SPACE - 8;
- if (bytes_left < 0) {
- nfserr = nfserr_resource;
- goto err_no_verf;
- }
- maxcount = svc_max_payload(resp->rqstp);
- maxcount = min_t(u32, readdir->rd_maxcount, maxcount);
+ bytes_left = xdr->buf->buflen - xdr->buf->len -
+ COMPOUND_ERR_SLACK_SPACE - XDR_UNIT * 2;
+ if (bytes_left < 0)
+ return nfserr_resource;
+ maxcount = min_t(u32, readdir->rd_maxcount, max_payload);
+
/*
- * Note the rfc defines rd_maxcount as the size of the
- * READDIR4resok structure, which includes the verifier above
- * and the 8 bytes encoded at the end of this function:
+ * The RFC defines rd_maxcount as the size of the
+ * READDIR4resok structure, which includes the verifier
+ * and the 8 bytes encoded at the end of this function.
*/
- if (maxcount < 16) {
- nfserr = nfserr_toosmall;
- goto err_no_verf;
- }
- maxcount = min_t(int, maxcount-16, bytes_left);
+ if (maxcount < XDR_UNIT * 4)
+ return nfserr_toosmall;
+ maxcount = min_t(int, maxcount - XDR_UNIT * 4, bytes_left);
- /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
+ /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0 */
if (!readdir->rd_dircount)
- readdir->rd_dircount = svc_max_payload(resp->rqstp);
+ readdir->rd_dircount = max_payload;
+ /* *entries */
readdir->xdr = xdr;
readdir->rd_maxcount = maxcount;
readdir->common.err = 0;
readdir->cookie_offset = 0;
-
offset = readdir->rd_cookie;
- nfserr = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp,
- &offset,
- &readdir->common, nfsd4_encode_dirent);
- if (nfserr == nfs_ok &&
- readdir->common.err == nfserr_toosmall &&
- xdr->buf->len == starting_len + 8) {
- /* nothing encoded; which limit did we hit?: */
- if (maxcount - 16 < bytes_left)
- /* It was the fault of rd_maxcount: */
- nfserr = nfserr_toosmall;
- else
- /* We ran out of buffer space: */
- nfserr = nfserr_resource;
+ status = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp, &offset,
+ &readdir->common, nfsd4_encode_entry4);
+ if (status)
+ return status;
+ if (readdir->common.err == nfserr_toosmall &&
+ xdr->buf->len == starting_len) {
+ /* No entries were encoded. Which limit did we hit? */
+ if (maxcount - XDR_UNIT * 4 < bytes_left)
+ /* It was the fault of rd_maxcount */
+ return nfserr_toosmall;
+ /* We ran out of buffer space */
+ return nfserr_resource;
}
- if (nfserr)
- goto err_no_verf;
+ /* Encode the final entry's cookie value */
+ nfsd4_encode_entry4_nfs_cookie4(readdir, offset);
+ /* No entries follow */
+ if (xdr_stream_encode_item_absent(xdr) != XDR_UNIT)
+ return nfserr_resource;
- if (readdir->cookie_offset) {
- wire_offset = cpu_to_be64(offset);
- write_bytes_to_xdr_buf(xdr->buf, readdir->cookie_offset,
- &wire_offset, 8);
- }
+ /* eof */
+ return nfsd4_encode_bool(xdr, readdir->common.err == nfserr_eof);
+}
- p = xdr_reserve_space(xdr, 8);
- if (!p) {
- WARN_ON_ONCE(1);
- goto err_no_verf;
- }
- *p++ = 0; /* no more entries */
- *p++ = htonl(readdir->common.err == nfserr_eof);
+static __be32
+nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_readdir *readdir = &u->readdir;
+ struct xdr_stream *xdr = resp->xdr;
+ int starting_len = xdr->buf->len;
- return 0;
-err_no_verf:
- xdr_truncate_encode(xdr, starting_len);
+ /* cookieverf */
+ nfserr = nfsd4_encode_verifier4(xdr, &readdir->rd_verf);
+ if (nfserr != nfs_ok)
+ return nfserr;
+
+ /* reply */
+ nfserr = nfsd4_encode_dirlist4(xdr, readdir, svc_max_payload(resp->rqstp));
+ if (nfserr != nfs_ok)
+ xdr_truncate_encode(xdr, starting_len);
return nfserr;
}
static __be32
-nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_remove *remove)
+nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_remove *remove = &u->remove;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 20);
- if (!p)
- return nfserr_resource;
- p = encode_cinfo(p, &remove->rm_cinfo);
- return 0;
+ return nfsd4_encode_change_info4(xdr, &remove->rm_cinfo);
}
static __be32
-nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_rename *rename)
+nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_rename *rename = &u->rename;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 40);
- if (!p)
+ nfserr = nfsd4_encode_change_info4(xdr, &rename->rn_sinfo);
+ if (nfserr)
+ return nfserr;
+ return nfsd4_encode_change_info4(xdr, &rename->rn_tinfo);
+}
+
+static __be32
+nfsd4_encode_rpcsec_gss_info(struct xdr_stream *xdr,
+ struct rpcsec_gss_info *info)
+{
+ __be32 status;
+
+ /* oid */
+ if (xdr_stream_encode_opaque(xdr, info->oid.data, info->oid.len) < 0)
return nfserr_resource;
- p = encode_cinfo(p, &rename->rn_sinfo);
- p = encode_cinfo(p, &rename->rn_tinfo);
- return 0;
+ /* qop */
+ status = nfsd4_encode_qop4(xdr, info->qop);
+ if (status != nfs_ok)
+ return status;
+ /* service */
+ if (xdr_stream_encode_u32(xdr, info->service) != XDR_UNIT)
+ return nfserr_resource;
+
+ return nfs_ok;
+}
+
+static __be32
+nfsd4_encode_secinfo4(struct xdr_stream *xdr, rpc_authflavor_t pf,
+ u32 *supported)
+{
+ struct rpcsec_gss_info info;
+ __be32 status;
+
+ if (rpcauth_get_gssinfo(pf, &info) == 0) {
+ (*supported)++;
+
+ /* flavor */
+ status = nfsd4_encode_uint32_t(xdr, RPC_AUTH_GSS);
+ if (status != nfs_ok)
+ return status;
+ /* flavor_info */
+ status = nfsd4_encode_rpcsec_gss_info(xdr, &info);
+ if (status != nfs_ok)
+ return status;
+ } else if (pf < RPC_AUTH_MAXFLAVOR) {
+ (*supported)++;
+
+ /* flavor */
+ status = nfsd4_encode_uint32_t(xdr, pf);
+ if (status != nfs_ok)
+ return status;
+ }
+ return nfs_ok;
}
static __be32
-nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
+nfsd4_encode_SECINFO4resok(struct xdr_stream *xdr, struct svc_export *exp)
{
u32 i, nflavs, supported;
struct exp_flavor_info *flavs;
struct exp_flavor_info def_flavs[2];
- __be32 *p, *flavorsp;
- static bool report = true;
+ unsigned int count_offset;
+ __be32 status, wire_count;
if (exp->ex_nflavors) {
flavs = exp->ex_flavors;
@@ -4215,727 +4780,735 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
}
}
- supported = 0;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ count_offset = xdr->buf->len;
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT)))
return nfserr_resource;
- flavorsp = p++; /* to be backfilled later */
-
- for (i = 0; i < nflavs; i++) {
- rpc_authflavor_t pf = flavs[i].pseudoflavor;
- struct rpcsec_gss_info info;
- if (rpcauth_get_gssinfo(pf, &info) == 0) {
- supported++;
- p = xdr_reserve_space(xdr, 4 + 4 +
- XDR_LEN(info.oid.len) + 4 + 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(RPC_AUTH_GSS);
- p = xdr_encode_opaque(p, info.oid.data, info.oid.len);
- *p++ = cpu_to_be32(info.qop);
- *p++ = cpu_to_be32(info.service);
- } else if (pf < RPC_AUTH_MAXFLAVOR) {
- supported++;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(pf);
- } else {
- if (report)
- pr_warn("NFS: SECINFO: security flavor %u "
- "is not supported\n", pf);
- }
+ for (i = 0, supported = 0; i < nflavs; i++) {
+ status = nfsd4_encode_secinfo4(xdr, flavs[i].pseudoflavor,
+ &supported);
+ if (status != nfs_ok)
+ return status;
}
- if (nflavs != supported)
- report = false;
- *flavorsp = htonl(supported);
+ wire_count = cpu_to_be32(supported);
+ write_bytes_to_xdr_buf(xdr->buf, count_offset, &wire_count,
+ XDR_UNIT);
return 0;
}
static __be32
nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_secinfo *secinfo)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_secinfo *secinfo = &u->secinfo;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_do_encode_secinfo(xdr, secinfo->si_exp);
+ return nfsd4_encode_SECINFO4resok(xdr, secinfo->si_exp);
}
static __be32
nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_secinfo_no_name *secinfo)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_secinfo_no_name *secinfo = &u->secinfo_no_name;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_do_encode_secinfo(xdr, secinfo->sin_exp);
+ return nfsd4_encode_SECINFO4resok(xdr, secinfo->sin_exp);
}
-/*
- * The SETATTR encode routine is special -- it always encodes a bitmap,
- * regardless of the error status.
- */
static __be32
-nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setattr *setattr)
+nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
- struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
+ struct nfsd4_setattr *setattr = &u->setattr;
+ __be32 status;
- p = xdr_reserve_space(xdr, 16);
- if (!p)
- return nfserr_resource;
- if (nfserr) {
- *p++ = cpu_to_be32(3);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- }
- else {
- *p++ = cpu_to_be32(3);
- *p++ = cpu_to_be32(setattr->sa_bmval[0]);
- *p++ = cpu_to_be32(setattr->sa_bmval[1]);
- *p++ = cpu_to_be32(setattr->sa_bmval[2]);
+ switch (nfserr) {
+ case nfs_ok:
+ /* attrsset */
+ status = nfsd4_encode_bitmap4(resp->xdr, setattr->sa_bmval[0],
+ setattr->sa_bmval[1],
+ setattr->sa_bmval[2]);
+ break;
+ default:
+ /* attrsset */
+ status = nfsd4_encode_bitmap4(resp->xdr, 0, 0, 0);
}
- return nfserr;
+ return status != nfs_ok ? status : nfserr;
}
static __be32
-nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setclientid *scd)
+nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_setclientid *scd = &u->setclientid;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
if (!nfserr) {
- p = xdr_reserve_space(xdr, 8 + NFS4_VERIFIER_SIZE);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque_fixed(p, &scd->se_clientid, 8);
- p = xdr_encode_opaque_fixed(p, &scd->se_confirm,
- NFS4_VERIFIER_SIZE);
- }
- else if (nfserr == nfserr_clid_inuse) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
+ nfserr = nfsd4_encode_clientid4(xdr, &scd->se_clientid);
+ if (nfserr != nfs_ok)
+ goto out;
+ nfserr = nfsd4_encode_verifier4(xdr, &scd->se_confirm);
+ } else if (nfserr == nfserr_clid_inuse) {
+ /* empty network id */
+ if (xdr_stream_encode_u32(xdr, 0) < 0) {
+ nfserr = nfserr_resource;
+ goto out;
+ }
+ /* empty universal address */
+ if (xdr_stream_encode_u32(xdr, 0) < 0) {
+ nfserr = nfserr_resource;
+ goto out;
+ }
}
+out:
return nfserr;
}
static __be32
-nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_write *write)
+nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_write *write = &u->write;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 16);
- if (!p)
+ /* count */
+ nfserr = nfsd4_encode_count4(xdr, write->wr_bytes_written);
+ if (nfserr)
+ return nfserr;
+ /* committed */
+ if (xdr_stream_encode_u32(xdr, write->wr_how_written) != XDR_UNIT)
return nfserr_resource;
- *p++ = cpu_to_be32(write->wr_bytes_written);
- *p++ = cpu_to_be32(write->wr_how_written);
- p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
- NFS4_VERIFIER_SIZE);
- return 0;
+ /* writeverf */
+ return nfsd4_encode_verifier4(xdr, &write->wr_verifier);
}
static __be32
-nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_exchange_id *exid)
+nfsd4_encode_state_protect_ops4(struct xdr_stream *xdr,
+ struct nfsd4_exchange_id *exid)
{
- struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- char *major_id;
- char *server_scope;
- int major_id_sz;
- int server_scope_sz;
- uint64_t minor_id = 0;
- struct nfsd_net *nn = net_generic(SVC_NET(resp->rqstp), nfsd_net_id);
-
- major_id = nn->nfsd_name;
- major_id_sz = strlen(nn->nfsd_name);
- server_scope = nn->nfsd_name;
- server_scope_sz = strlen(nn->nfsd_name);
-
- p = xdr_reserve_space(xdr,
- 8 /* eir_clientid */ +
- 4 /* eir_sequenceid */ +
- 4 /* eir_flags */ +
- 4 /* spr_how */);
- if (!p)
- return nfserr_resource;
+ __be32 status;
- p = xdr_encode_opaque_fixed(p, &exid->clientid, 8);
- *p++ = cpu_to_be32(exid->seqid);
- *p++ = cpu_to_be32(exid->flags);
+ /* spo_must_enforce */
+ status = nfsd4_encode_bitmap4(xdr, exid->spo_must_enforce[0],
+ exid->spo_must_enforce[1],
+ exid->spo_must_enforce[2]);
+ if (status != nfs_ok)
+ return status;
+ /* spo_must_allow */
+ return nfsd4_encode_bitmap4(xdr, exid->spo_must_allow[0],
+ exid->spo_must_allow[1],
+ exid->spo_must_allow[2]);
+}
- *p++ = cpu_to_be32(exid->spa_how);
+static __be32
+nfsd4_encode_state_protect4_r(struct xdr_stream *xdr, struct nfsd4_exchange_id *exid)
+{
+ __be32 status;
+ if (xdr_stream_encode_u32(xdr, exid->spa_how) != XDR_UNIT)
+ return nfserr_resource;
switch (exid->spa_how) {
case SP4_NONE:
+ status = nfs_ok;
break;
case SP4_MACH_CRED:
- /* spo_must_enforce bitmap: */
- nfserr = nfsd4_encode_bitmap(xdr,
- exid->spo_must_enforce[0],
- exid->spo_must_enforce[1],
- exid->spo_must_enforce[2]);
- if (nfserr)
- return nfserr;
- /* spo_must_allow bitmap: */
- nfserr = nfsd4_encode_bitmap(xdr,
- exid->spo_must_allow[0],
- exid->spo_must_allow[1],
- exid->spo_must_allow[2]);
- if (nfserr)
- return nfserr;
+ /* spr_mach_ops */
+ status = nfsd4_encode_state_protect_ops4(xdr, exid);
break;
default:
- WARN_ON_ONCE(1);
+ status = nfserr_serverfault;
}
+ return status;
+}
- p = xdr_reserve_space(xdr,
- 8 /* so_minor_id */ +
- 4 /* so_major_id.len */ +
- (XDR_QUADLEN(major_id_sz) * 4) +
- 4 /* eir_server_scope.len */ +
- (XDR_QUADLEN(server_scope_sz) * 4) +
- 4 /* eir_server_impl_id.count (0) */);
- if (!p)
- return nfserr_resource;
+static __be32
+nfsd4_encode_server_owner4(struct xdr_stream *xdr, struct svc_rqst *rqstp)
+{
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ __be32 status;
- /* The server_owner struct */
- p = xdr_encode_hyper(p, minor_id); /* Minor id */
- /* major id */
- p = xdr_encode_opaque(p, major_id, major_id_sz);
+ /* so_minor_id */
+ status = nfsd4_encode_uint64_t(xdr, 0);
+ if (status != nfs_ok)
+ return status;
+ /* so_major_id */
+ return nfsd4_encode_opaque(xdr, nn->nfsd_name, strlen(nn->nfsd_name));
+}
- /* Server scope */
- p = xdr_encode_opaque(p, server_scope, server_scope_sz);
+static __be32
+nfsd4_encode_nfs_impl_id4(struct xdr_stream *xdr, struct nfsd4_exchange_id *exid)
+{
+ __be32 status;
- /* Implementation id */
- *p++ = cpu_to_be32(0); /* zero length nfs_impl_id4 array */
- return 0;
+ /* nii_domain */
+ status = nfsd4_encode_opaque(xdr, exid->nii_domain.data,
+ exid->nii_domain.len);
+ if (status != nfs_ok)
+ return status;
+ /* nii_name */
+ status = nfsd4_encode_opaque(xdr, exid->nii_name.data,
+ exid->nii_name.len);
+ if (status != nfs_ok)
+ return status;
+ /* nii_time */
+ return nfsd4_encode_nfstime4(xdr, &exid->nii_time);
}
static __be32
-nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_create_session *sess)
+nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd_net *nn = net_generic(SVC_NET(resp->rqstp), nfsd_net_id);
+ struct nfsd4_exchange_id *exid = &u->exchange_id;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 24);
- if (!p)
+ /* eir_clientid */
+ nfserr = nfsd4_encode_clientid4(xdr, &exid->clientid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_sequenceid */
+ nfserr = nfsd4_encode_sequenceid4(xdr, exid->seqid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_flags */
+ nfserr = nfsd4_encode_uint32_t(xdr, exid->flags);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_state_protect */
+ nfserr = nfsd4_encode_state_protect4_r(xdr, exid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_server_owner */
+ nfserr = nfsd4_encode_server_owner4(xdr, resp->rqstp);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_server_scope */
+ nfserr = nfsd4_encode_opaque(xdr, nn->nfsd_name,
+ strlen(nn->nfsd_name));
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_server_impl_id<1> */
+ if (xdr_stream_encode_u32(xdr, 1) != XDR_UNIT)
return nfserr_resource;
- p = xdr_encode_opaque_fixed(p, sess->sessionid.data,
- NFS4_MAX_SESSIONID_LEN);
- *p++ = cpu_to_be32(sess->seqid);
- *p++ = cpu_to_be32(sess->flags);
+ nfserr = nfsd4_encode_nfs_impl_id4(xdr, exid);
+ if (nfserr != nfs_ok)
+ return nfserr;
- p = xdr_reserve_space(xdr, 28);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(0); /* headerpadsz */
- *p++ = cpu_to_be32(sess->fore_channel.maxreq_sz);
- *p++ = cpu_to_be32(sess->fore_channel.maxresp_sz);
- *p++ = cpu_to_be32(sess->fore_channel.maxresp_cached);
- *p++ = cpu_to_be32(sess->fore_channel.maxops);
- *p++ = cpu_to_be32(sess->fore_channel.maxreqs);
- *p++ = cpu_to_be32(sess->fore_channel.nr_rdma_attrs);
-
- if (sess->fore_channel.nr_rdma_attrs) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(sess->fore_channel.rdma_attrs);
- }
+ return nfs_ok;
+}
- p = xdr_reserve_space(xdr, 28);
- if (!p)
+static __be32
+nfsd4_encode_channel_attrs4(struct xdr_stream *xdr,
+ const struct nfsd4_channel_attrs *attrs)
+{
+ __be32 status;
+
+ /* ca_headerpadsize */
+ status = nfsd4_encode_count4(xdr, 0);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxrequestsize */
+ status = nfsd4_encode_count4(xdr, attrs->maxreq_sz);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxresponsesize */
+ status = nfsd4_encode_count4(xdr, attrs->maxresp_sz);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxresponsesize_cached */
+ status = nfsd4_encode_count4(xdr, attrs->maxresp_cached);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxoperations */
+ status = nfsd4_encode_count4(xdr, attrs->maxops);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxrequests */
+ status = nfsd4_encode_count4(xdr, attrs->maxreqs);
+ if (status != nfs_ok)
+ return status;
+ /* ca_rdma_ird<1> */
+ if (xdr_stream_encode_u32(xdr, attrs->nr_rdma_attrs) != XDR_UNIT)
return nfserr_resource;
- *p++ = cpu_to_be32(0); /* headerpadsz */
- *p++ = cpu_to_be32(sess->back_channel.maxreq_sz);
- *p++ = cpu_to_be32(sess->back_channel.maxresp_sz);
- *p++ = cpu_to_be32(sess->back_channel.maxresp_cached);
- *p++ = cpu_to_be32(sess->back_channel.maxops);
- *p++ = cpu_to_be32(sess->back_channel.maxreqs);
- *p++ = cpu_to_be32(sess->back_channel.nr_rdma_attrs);
-
- if (sess->back_channel.nr_rdma_attrs) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(sess->back_channel.rdma_attrs);
- }
- return 0;
+ if (attrs->nr_rdma_attrs)
+ return nfsd4_encode_uint32_t(xdr, attrs->rdma_attrs);
+ return nfs_ok;
+}
+
+static __be32
+nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_create_session *sess = &u->create_session;
+ struct xdr_stream *xdr = resp->xdr;
+
+ /* csr_sessionid */
+ nfserr = nfsd4_encode_sessionid4(xdr, &sess->sessionid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* csr_sequence */
+ nfserr = nfsd4_encode_sequenceid4(xdr, sess->seqid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* csr_flags */
+ nfserr = nfsd4_encode_uint32_t(xdr, sess->flags);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* csr_fore_chan_attrs */
+ nfserr = nfsd4_encode_channel_attrs4(xdr, &sess->fore_channel);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* csr_back_chan_attrs */
+ return nfsd4_encode_channel_attrs4(xdr, &sess->back_channel);
}
static __be32
nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_sequence *seq)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_sequence *seq = &u->sequence;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 20);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque_fixed(p, seq->sessionid.data,
- NFS4_MAX_SESSIONID_LEN);
- *p++ = cpu_to_be32(seq->seqid);
- *p++ = cpu_to_be32(seq->slotid);
+ /* sr_sessionid */
+ nfserr = nfsd4_encode_sessionid4(xdr, &seq->sessionid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_sequenceid */
+ nfserr = nfsd4_encode_sequenceid4(xdr, seq->seqid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_slotid */
+ nfserr = nfsd4_encode_slotid4(xdr, seq->slotid);
+ if (nfserr != nfs_ok)
+ return nfserr;
/* Note slotid's are numbered from zero: */
- *p++ = cpu_to_be32(seq->maxslots - 1); /* sr_highest_slotid */
- *p++ = cpu_to_be32(seq->maxslots - 1); /* sr_target_highest_slotid */
- *p++ = cpu_to_be32(seq->status_flags);
+ /* sr_highest_slotid */
+ nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots_response - 1);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_target_highest_slotid */
+ nfserr = nfsd4_encode_slotid4(xdr, seq->target_maxslots - 1);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_status_flags */
+ nfserr = nfsd4_encode_uint32_t(xdr, seq->status_flags);
+ if (nfserr != nfs_ok)
+ return nfserr;
resp->cstate.data_offset = xdr->buf->len; /* DRC cache data pointer */
- return 0;
+ return nfs_ok;
}
static __be32
nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_test_stateid *test_stateid)
+ union nfsd4_op_u *u)
{
- struct xdr_stream *xdr = resp->xdr;
+ struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
struct nfsd4_test_stateid_id *stateid, *next;
- __be32 *p;
+ struct xdr_stream *xdr = resp->xdr;
- p = xdr_reserve_space(xdr, 4 + (4 * test_stateid->ts_num_ids));
- if (!p)
+ /* tsr_status_codes<> */
+ if (xdr_stream_encode_u32(xdr, test_stateid->ts_num_ids) != XDR_UNIT)
return nfserr_resource;
- *p++ = htonl(test_stateid->ts_num_ids);
-
- list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) {
- *p++ = stateid->ts_id_status;
+ list_for_each_entry_safe(stateid, next,
+ &test_stateid->ts_stateid_list, ts_id_list) {
+ if (xdr_stream_encode_be32(xdr, stateid->ts_id_status) != XDR_UNIT)
+ return nfserr_resource;
}
+ return nfs_ok;
+}
- return 0;
+static __be32
+nfsd4_encode_get_dir_delegation(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_get_dir_delegation *gdd = &u->get_dir_delegation;
+ struct xdr_stream *xdr = resp->xdr;
+ __be32 status = nfserr_resource;
+
+ switch(gdd->gddrnf_status) {
+ case GDD4_OK:
+ if (xdr_stream_encode_u32(xdr, GDD4_OK) != XDR_UNIT)
+ break;
+ status = nfsd4_encode_verifier4(xdr, &gdd->gddr_cookieverf);
+ if (status)
+ break;
+ status = nfsd4_encode_stateid4(xdr, &gdd->gddr_stateid);
+ if (status)
+ break;
+ status = nfsd4_encode_bitmap4(xdr, gdd->gddr_notification[0], 0, 0);
+ if (status)
+ break;
+ status = nfsd4_encode_bitmap4(xdr, gdd->gddr_child_attributes[0],
+ gdd->gddr_child_attributes[1],
+ gdd->gddr_child_attributes[2]);
+ if (status)
+ break;
+ status = nfsd4_encode_bitmap4(xdr, gdd->gddr_dir_attributes[0],
+ gdd->gddr_dir_attributes[1],
+ gdd->gddr_dir_attributes[2]);
+ break;
+ default:
+ pr_warn("nfsd: bad gddrnf_status (%u)\n", gdd->gddrnf_status);
+ gdd->gddrnf_will_signal_deleg_avail = 0;
+ fallthrough;
+ case GDD4_UNAVAIL:
+ if (xdr_stream_encode_u32(xdr, GDD4_UNAVAIL) != XDR_UNIT)
+ break;
+ status = nfsd4_encode_bool(xdr, gdd->gddrnf_will_signal_deleg_avail);
+ break;
+ }
+ return status;
}
#ifdef CONFIG_NFSD_PNFS
static __be32
-nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_getdeviceinfo *gdev)
+nfsd4_encode_device_addr4(struct xdr_stream *xdr,
+ const struct nfsd4_getdeviceinfo *gdev)
{
- struct xdr_stream *xdr = resp->xdr;
+ u32 needed_len, starting_len = xdr->buf->len;
const struct nfsd4_layout_ops *ops;
- u32 starting_len = xdr->buf->len, needed_len;
- __be32 *p;
+ __be32 status;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ /* da_layout_type */
+ if (xdr_stream_encode_u32(xdr, gdev->gd_layout_type) != XDR_UNIT)
return nfserr_resource;
-
- *p++ = cpu_to_be32(gdev->gd_layout_type);
-
- /* If maxcount is 0 then just update notifications */
- if (gdev->gd_maxcount != 0) {
- ops = nfsd4_layout_ops[gdev->gd_layout_type];
- nfserr = ops->encode_getdeviceinfo(xdr, gdev);
- if (nfserr) {
- /*
- * We don't bother to burden the layout drivers with
- * enforcing gd_maxcount, just tell the client to
- * come back with a bigger buffer if it's not enough.
- */
- if (xdr->buf->len + 4 > gdev->gd_maxcount)
- goto toosmall;
- return nfserr;
- }
+ /* da_addr_body */
+ ops = nfsd4_layout_ops[gdev->gd_layout_type];
+ status = ops->encode_getdeviceinfo(xdr, gdev);
+ if (status != nfs_ok) {
+ /*
+ * Don't burden the layout drivers with enforcing
+ * gd_maxcount. Just tell the client to come back
+ * with a bigger buffer if it's not enough.
+ */
+ if (xdr->buf->len + XDR_UNIT > gdev->gd_maxcount)
+ goto toosmall;
+ return status;
}
- if (gdev->gd_notify_types) {
- p = xdr_reserve_space(xdr, 4 + 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(1); /* bitmap length */
- *p++ = cpu_to_be32(gdev->gd_notify_types);
- } else {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = 0;
- }
+ return nfs_ok;
- return 0;
toosmall:
- dprintk("%s: maxcount too small\n", __func__);
- needed_len = xdr->buf->len + 4 /* notifications */;
+ needed_len = xdr->buf->len + XDR_UNIT; /* notifications */
xdr_truncate_encode(xdr, starting_len);
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(needed_len);
+
+ status = nfsd4_encode_count4(xdr, needed_len);
+ if (status != nfs_ok)
+ return status;
return nfserr_toosmall;
}
static __be32
-nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_layoutget *lgp)
+nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_getdeviceinfo *gdev = &u->getdeviceinfo;
struct xdr_stream *xdr = resp->xdr;
- const struct nfsd4_layout_ops *ops;
- __be32 *p;
- p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t));
- if (!p)
- return nfserr_resource;
-
- *p++ = cpu_to_be32(1); /* we always set return-on-close */
- *p++ = cpu_to_be32(lgp->lg_sid.si_generation);
- p = xdr_encode_opaque_fixed(p, &lgp->lg_sid.si_opaque,
- sizeof(stateid_opaque_t));
+ /* gdir_device_addr */
+ nfserr = nfsd4_encode_device_addr4(xdr, gdev);
+ if (nfserr)
+ return nfserr;
+ /* gdir_notification */
+ return nfsd4_encode_bitmap4(xdr, gdev->gd_notify_types, 0, 0);
+}
- *p++ = cpu_to_be32(1); /* we always return a single layout */
- p = xdr_encode_hyper(p, lgp->lg_seg.offset);
- p = xdr_encode_hyper(p, lgp->lg_seg.length);
- *p++ = cpu_to_be32(lgp->lg_seg.iomode);
- *p++ = cpu_to_be32(lgp->lg_layout_type);
+static __be32
+nfsd4_encode_layout4(struct xdr_stream *xdr, const struct nfsd4_layoutget *lgp)
+{
+ const struct nfsd4_layout_ops *ops = nfsd4_layout_ops[lgp->lg_layout_type];
+ __be32 status;
- ops = nfsd4_layout_ops[lgp->lg_layout_type];
+ /* lo_offset */
+ status = nfsd4_encode_offset4(xdr, lgp->lg_seg.offset);
+ if (status != nfs_ok)
+ return status;
+ /* lo_length */
+ status = nfsd4_encode_length4(xdr, lgp->lg_seg.length);
+ if (status != nfs_ok)
+ return status;
+ /* lo_iomode */
+ if (xdr_stream_encode_u32(xdr, lgp->lg_seg.iomode) != XDR_UNIT)
+ return nfserr_resource;
+ /* lo_content */
+ if (xdr_stream_encode_u32(xdr, lgp->lg_layout_type) != XDR_UNIT)
+ return nfserr_resource;
return ops->encode_layoutget(xdr, lgp);
}
static __be32
-nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_layoutcommit *lcp)
+nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_layoutget *lgp = &u->layoutget;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ /* logr_return_on_close */
+ nfserr = nfsd4_encode_bool(xdr, true);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* logr_stateid */
+ nfserr = nfsd4_encode_stateid4(xdr, &lgp->lg_sid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* logr_layout<> */
+ if (xdr_stream_encode_u32(xdr, 1) != XDR_UNIT)
return nfserr_resource;
- *p++ = cpu_to_be32(lcp->lc_size_chg);
- if (lcp->lc_size_chg) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_hyper(p, lcp->lc_newsize);
- }
+ return nfsd4_encode_layout4(xdr, lgp);
+}
- return 0;
+static __be32
+nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
+ struct xdr_stream *xdr = resp->xdr;
+
+ /* ns_sizechanged */
+ nfserr = nfsd4_encode_bool(xdr, lcp->lc_size_chg);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ if (lcp->lc_size_chg)
+ /* ns_size */
+ return nfsd4_encode_length4(xdr, lcp->lc_newsize);
+ return nfs_ok;
}
static __be32
nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_layoutreturn *lrp)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_layoutreturn *lrp = &u->layoutreturn;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(lrp->lrs_present);
+ /* lrs_present */
+ nfserr = nfsd4_encode_bool(xdr, lrp->lrs_present);
+ if (nfserr != nfs_ok)
+ return nfserr;
if (lrp->lrs_present)
- return nfsd4_encode_stateid(xdr, &lrp->lr_sid);
- return 0;
+ /* lrs_stateid */
+ return nfsd4_encode_stateid4(xdr, &lrp->lr_sid);
+ return nfs_ok;
}
#endif /* CONFIG_NFSD_PNFS */
static __be32
-nfsd42_encode_write_res(struct nfsd4_compoundres *resp,
- struct nfsd42_write_res *write, bool sync)
+nfsd4_encode_write_response4(struct xdr_stream *xdr,
+ const struct nfsd4_copy *copy)
{
- __be32 *p;
- p = xdr_reserve_space(resp->xdr, 4);
- if (!p)
- return nfserr_resource;
+ const struct nfsd42_write_res *write = &copy->cp_res;
+ u32 count = nfsd4_copy_is_sync(copy) ? 0 : 1;
+ __be32 status;
- if (sync)
- *p++ = cpu_to_be32(0);
- else {
- __be32 nfserr;
- *p++ = cpu_to_be32(1);
- nfserr = nfsd4_encode_stateid(resp->xdr, &write->cb_stateid);
- if (nfserr)
- return nfserr;
+ /* wr_callback_id<1> */
+ if (xdr_stream_encode_u32(xdr, count) != XDR_UNIT)
+ return nfserr_resource;
+ if (count) {
+ status = nfsd4_encode_stateid4(xdr, &write->cb_stateid);
+ if (status != nfs_ok)
+ return status;
}
- p = xdr_reserve_space(resp->xdr, 8 + 4 + NFS4_VERIFIER_SIZE);
- if (!p)
+
+ /* wr_count */
+ status = nfsd4_encode_length4(xdr, write->wr_bytes_written);
+ if (status != nfs_ok)
+ return status;
+ /* wr_committed */
+ if (xdr_stream_encode_u32(xdr, write->wr_stable_how) != XDR_UNIT)
return nfserr_resource;
+ /* wr_writeverf */
+ return nfsd4_encode_verifier4(xdr, &write->wr_verifier);
+}
- p = xdr_encode_hyper(p, write->wr_bytes_written);
- *p++ = cpu_to_be32(write->wr_stable_how);
- p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
- NFS4_VERIFIER_SIZE);
- return nfs_ok;
+static __be32 nfsd4_encode_copy_requirements4(struct xdr_stream *xdr,
+ const struct nfsd4_copy *copy)
+{
+ __be32 status;
+
+ /* cr_consecutive */
+ status = nfsd4_encode_bool(xdr, true);
+ if (status != nfs_ok)
+ return status;
+ /* cr_synchronous */
+ return nfsd4_encode_bool(xdr, nfsd4_copy_is_sync(copy));
}
static __be32
-nfsd42_encode_nl4_server(struct nfsd4_compoundres *resp, struct nl4_server *ns)
+nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
- struct xdr_stream *xdr = resp->xdr;
- struct nfs42_netaddr *addr;
- __be32 *p;
+ struct nfsd4_copy *copy = &u->copy;
- p = xdr_reserve_space(xdr, 4);
- *p++ = cpu_to_be32(ns->nl4_type);
+ nfserr = nfsd4_encode_write_response4(resp->xdr, copy);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ return nfsd4_encode_copy_requirements4(resp->xdr, copy);
+}
+
+static __be32
+nfsd4_encode_netloc4(struct xdr_stream *xdr, const struct nl4_server *ns)
+{
+ __be32 status;
+ if (xdr_stream_encode_u32(xdr, ns->nl4_type) != XDR_UNIT)
+ return nfserr_resource;
switch (ns->nl4_type) {
case NL4_NETADDR:
- addr = &ns->u.nl4_addr;
-
- /* netid_len, netid, uaddr_len, uaddr (port included
- * in RPCBIND_MAXUADDRLEN)
- */
- p = xdr_reserve_space(xdr,
- 4 /* netid len */ +
- (XDR_QUADLEN(addr->netid_len) * 4) +
- 4 /* uaddr len */ +
- (XDR_QUADLEN(addr->addr_len) * 4));
- if (!p)
- return nfserr_resource;
-
- *p++ = cpu_to_be32(addr->netid_len);
- p = xdr_encode_opaque_fixed(p, addr->netid,
- addr->netid_len);
- *p++ = cpu_to_be32(addr->addr_len);
- p = xdr_encode_opaque_fixed(p, addr->addr,
- addr->addr_len);
+ /* nl_addr */
+ status = nfsd4_encode_netaddr4(xdr, &ns->u.nl4_addr);
break;
default:
- WARN_ON_ONCE(ns->nl4_type != NL4_NETADDR);
- return nfserr_inval;
+ status = nfserr_serverfault;
}
-
- return 0;
+ return status;
}
static __be32
-nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_copy *copy)
+nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
- __be32 *p;
+ struct nfsd4_copy_notify *cn = &u->copy_notify;
+ struct xdr_stream *xdr = resp->xdr;
- nfserr = nfsd42_encode_write_res(resp, &copy->cp_res,
- !!copy->cp_synchronous);
+ /* cnr_lease_time */
+ nfserr = nfsd4_encode_nfstime4(xdr, &cn->cpn_lease_time);
if (nfserr)
return nfserr;
-
- p = xdr_reserve_space(resp->xdr, 4 + 4);
- *p++ = xdr_one; /* cr_consecutive */
- *p++ = cpu_to_be32(copy->cp_synchronous);
- return 0;
+ /* cnr_stateid */
+ nfserr = nfsd4_encode_stateid4(xdr, &cn->cpn_cnr_stateid);
+ if (nfserr)
+ return nfserr;
+ /* cnr_source_server<> */
+ if (xdr_stream_encode_u32(xdr, 1) != XDR_UNIT)
+ return nfserr_resource;
+ return nfsd4_encode_netloc4(xdr, cn->cpn_src);
}
static __be32
nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_offload_status *os)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_offload_status *os = &u->offload_status;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 8 + 4);
- if (!p)
+ /* osr_count */
+ nfserr = nfsd4_encode_length4(xdr, os->count);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* osr_complete<1> */
+ if (os->completed) {
+ if (xdr_stream_encode_u32(xdr, 1) != XDR_UNIT)
+ return nfserr_resource;
+ if (xdr_stream_encode_be32(xdr, os->status) != XDR_UNIT)
+ return nfserr_resource;
+ } else if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
return nfserr_resource;
- p = xdr_encode_hyper(p, os->count);
- *p++ = cpu_to_be32(0);
- return nfserr;
+ return nfs_ok;
}
static __be32
nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
- struct nfsd4_read *read,
- unsigned long *maxcount, u32 *eof,
- loff_t *pos)
+ struct nfsd4_read *read)
{
- struct xdr_stream *xdr = resp->xdr;
+ struct nfsd4_compoundargs *argp = resp->rqstp->rq_argp;
struct file *file = read->rd_nf->nf_file;
- int starting_len = xdr->buf->len;
- loff_t hole_pos;
- __be32 nfserr;
- __be32 *p, tmp;
- __be64 tmp64;
+ struct xdr_stream *xdr = resp->xdr;
+ bool splice_ok = argp->splice_ok;
+ unsigned int offset_offset;
+ __be32 nfserr, wire_count;
+ unsigned long maxcount;
+ __be64 wire_offset;
- hole_pos = pos ? *pos : vfs_llseek(file, read->rd_offset, SEEK_HOLE);
- if (hole_pos > read->rd_offset)
- *maxcount = min_t(unsigned long, *maxcount, hole_pos - read->rd_offset);
- *maxcount = min_t(unsigned long, *maxcount, (xdr->buf->buflen - xdr->buf->len));
+ if (xdr_stream_encode_u32(xdr, NFS4_CONTENT_DATA) != XDR_UNIT)
+ return nfserr_io;
- /* Content type, offset, byte count */
- p = xdr_reserve_space(xdr, 4 + 8 + 4);
- if (!p)
- return nfserr_resource;
+ offset_offset = xdr->buf->len;
- read->rd_vlen = xdr_reserve_space_vec(xdr, resp->rqstp->rq_vec, *maxcount);
- if (read->rd_vlen < 0)
- return nfserr_resource;
+ /* Reserve space for the byte offset and count */
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT * 3)))
+ return nfserr_io;
+ xdr_commit_encode(xdr);
+
+ maxcount = min_t(unsigned long, read->rd_length,
+ (xdr->buf->buflen - xdr->buf->len));
- nfserr = nfsd_readv(resp->rqstp, read->rd_fhp, file, read->rd_offset,
- resp->rqstp->rq_vec, read->rd_vlen, maxcount, eof);
+ if (file->f_op->splice_read && splice_ok)
+ nfserr = nfsd4_encode_splice_read(resp, read, file, maxcount);
+ else
+ nfserr = nfsd4_encode_readv(resp, read, maxcount);
if (nfserr)
return nfserr;
- xdr_truncate_encode(xdr, starting_len + 16 + xdr_align_size(*maxcount));
-
- tmp = htonl(NFS4_CONTENT_DATA);
- write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4);
- tmp64 = cpu_to_be64(read->rd_offset);
- write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp64, 8);
- tmp = htonl(*maxcount);
- write_bytes_to_xdr_buf(xdr->buf, starting_len + 12, &tmp, 4);
-
- tmp = xdr_zero;
- write_bytes_to_xdr_buf(xdr->buf, starting_len + 16 + *maxcount, &tmp,
- xdr_pad_size(*maxcount));
- return nfs_ok;
-}
-static __be32
-nfsd4_encode_read_plus_hole(struct nfsd4_compoundres *resp,
- struct nfsd4_read *read,
- unsigned long *maxcount, u32 *eof)
-{
- struct file *file = read->rd_nf->nf_file;
- loff_t data_pos = vfs_llseek(file, read->rd_offset, SEEK_DATA);
- loff_t f_size = i_size_read(file_inode(file));
- unsigned long count;
- __be32 *p;
-
- if (data_pos == -ENXIO)
- data_pos = f_size;
- else if (data_pos <= read->rd_offset || (data_pos < f_size && data_pos % PAGE_SIZE))
- return nfsd4_encode_read_plus_data(resp, read, maxcount, eof, &f_size);
- count = data_pos - read->rd_offset;
-
- /* Content type, offset, byte count */
- p = xdr_reserve_space(resp->xdr, 4 + 8 + 8);
- if (!p)
- return nfserr_resource;
-
- *p++ = htonl(NFS4_CONTENT_HOLE);
- p = xdr_encode_hyper(p, read->rd_offset);
- p = xdr_encode_hyper(p, count);
-
- *eof = (read->rd_offset + count) >= f_size;
- *maxcount = min_t(unsigned long, count, *maxcount);
+ wire_offset = cpu_to_be64(read->rd_offset);
+ write_bytes_to_xdr_buf(xdr->buf, offset_offset, &wire_offset,
+ XDR_UNIT * 2);
+ wire_count = cpu_to_be32(read->rd_length);
+ write_bytes_to_xdr_buf(xdr->buf, offset_offset + XDR_UNIT * 2,
+ &wire_count, XDR_UNIT);
return nfs_ok;
}
static __be32
nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_read *read)
+ union nfsd4_op_u *u)
{
- unsigned long maxcount, count;
+ struct nfsd4_read *read = &u->read;
+ struct file *file = read->rd_nf->nf_file;
struct xdr_stream *xdr = resp->xdr;
- struct file *file;
- int starting_len = xdr->buf->len;
- int last_segment = xdr->buf->len;
- int segments = 0;
- __be32 *p, tmp;
- bool is_data;
- loff_t pos;
- u32 eof;
+ unsigned int eof_offset;
+ __be32 wire_data[2];
+ u32 segments = 0;
if (nfserr)
return nfserr;
- file = read->rd_nf->nf_file;
- /* eof flag, segment count */
- p = xdr_reserve_space(xdr, 4 + 4);
- if (!p)
- return nfserr_resource;
- xdr_commit_encode(xdr);
+ eof_offset = xdr->buf->len;
- maxcount = svc_max_payload(resp->rqstp);
- maxcount = min_t(unsigned long, maxcount,
- (xdr->buf->buflen - xdr->buf->len));
- maxcount = min_t(unsigned long, maxcount, read->rd_length);
- count = maxcount;
+ /* Reserve space for the eof flag and segment count */
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT * 2)))
+ return nfserr_io;
+ xdr_commit_encode(xdr);
- eof = read->rd_offset >= i_size_read(file_inode(file));
- if (eof)
+ read->rd_eof = read->rd_offset >= i_size_read(file_inode(file));
+ if (read->rd_eof)
goto out;
- pos = vfs_llseek(file, read->rd_offset, SEEK_HOLE);
- is_data = pos > read->rd_offset;
-
- while (count > 0 && !eof) {
- maxcount = count;
- if (is_data)
- nfserr = nfsd4_encode_read_plus_data(resp, read, &maxcount, &eof,
- segments == 0 ? &pos : NULL);
- else
- nfserr = nfsd4_encode_read_plus_hole(resp, read, &maxcount, &eof);
- if (nfserr)
- goto out;
- count -= maxcount;
- read->rd_offset += maxcount;
- is_data = !is_data;
- last_segment = xdr->buf->len;
- segments++;
+ nfserr = nfsd4_encode_read_plus_data(resp, read);
+ if (nfserr) {
+ xdr_truncate_encode(xdr, eof_offset);
+ return nfserr;
}
-out:
- if (nfserr && segments == 0)
- xdr_truncate_encode(xdr, starting_len);
- else {
- if (nfserr) {
- xdr_truncate_encode(xdr, last_segment);
- nfserr = nfs_ok;
- eof = 0;
- }
- tmp = htonl(eof);
- write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4);
- tmp = htonl(segments);
- write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
- }
+ segments++;
+out:
+ wire_data[0] = read->rd_eof ? xdr_one : xdr_zero;
+ wire_data[1] = cpu_to_be32(segments);
+ write_bytes_to_xdr_buf(xdr->buf, eof_offset, &wire_data, XDR_UNIT * 2);
return nfserr;
}
static __be32
-nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_copy_notify *cn)
+nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
+ struct nfsd4_seek *seek = &u->seek;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
-
- if (nfserr)
- return nfserr;
-
- /* 8 sec, 4 nsec */
- p = xdr_reserve_space(xdr, 12);
- if (!p)
- return nfserr_resource;
- /* cnr_lease_time */
- p = xdr_encode_hyper(p, cn->cpn_sec);
- *p++ = cpu_to_be32(cn->cpn_nsec);
-
- /* cnr_stateid */
- nfserr = nfsd4_encode_stateid(xdr, &cn->cpn_cnr_stateid);
- if (nfserr)
+ /* sr_eof */
+ nfserr = nfsd4_encode_bool(xdr, seek->seek_eof);
+ if (nfserr != nfs_ok)
return nfserr;
-
- /* cnr_src.nl_nsvr */
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
-
- *p++ = cpu_to_be32(1);
-
- return nfsd42_encode_nl4_server(resp, &cn->cpn_src);
+ /* sr_offset */
+ return nfsd4_encode_offset4(xdr, seek->seek_pos);
}
static __be32
-nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_seek *seek)
-{
- __be32 *p;
-
- p = xdr_reserve_space(resp->xdr, 4 + 8);
- *p++ = cpu_to_be32(seek->seek_eof);
- p = xdr_encode_hyper(p, seek->seek_pos);
-
- return 0;
-}
-
-static __be32
-nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr, void *p)
+nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *p)
{
return nfserr;
}
@@ -4986,8 +5559,9 @@ nfsd4_vbuf_to_stream(struct xdr_stream *xdr, char *buf, u32 buflen)
static __be32
nfsd4_encode_getxattr(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_getxattr *getxattr)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_getxattr *getxattr = &u->getxattr;
struct xdr_stream *xdr = resp->xdr;
__be32 *p, err;
@@ -5010,18 +5584,12 @@ nfsd4_encode_getxattr(struct nfsd4_compoundres *resp, __be32 nfserr,
static __be32
nfsd4_encode_setxattr(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_setxattr *setxattr)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_setxattr *setxattr = &u->setxattr;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
-
- p = xdr_reserve_space(xdr, 20);
- if (!p)
- return nfserr_resource;
-
- encode_cinfo(p, &setxattr->setxa_cinfo);
- return 0;
+ return nfsd4_encode_change_info4(xdr, &setxattr->setxa_cinfo);
}
/*
@@ -5035,24 +5603,20 @@ nfsd4_listxattr_validate_cookie(struct nfsd4_listxattrs *listxattrs,
/*
* If the cookie is larger than the maximum number we can fit
- * in either the buffer we just got back from vfs_listxattr, or,
- * XDR-encoded, in the return buffer, it's invalid.
+ * in the buffer we just got back from vfs_listxattr, it's invalid.
*/
if (cookie > (listxattrs->lsxa_len) / (XATTR_USER_PREFIX_LEN + 2))
return nfserr_badcookie;
- if (cookie > (listxattrs->lsxa_maxcount /
- (XDR_QUADLEN(XATTR_USER_PREFIX_LEN + 2) + 4)))
- return nfserr_badcookie;
-
*offsetp = (u32)cookie;
return 0;
}
static __be32
nfsd4_encode_listxattrs(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_listxattrs *listxattrs)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_listxattrs *listxattrs = &u->listxattrs;
struct xdr_stream *xdr = resp->xdr;
u32 cookie_offset, count_offset, eof;
u32 left, xdrleft, slen, count;
@@ -5060,6 +5624,7 @@ nfsd4_encode_listxattrs(struct nfsd4_compoundres *resp, __be32 nfserr,
u64 cookie;
char *sp;
__be32 status, tmp;
+ __be64 wire_cookie;
__be32 *p;
u32 nuser;
@@ -5075,7 +5640,7 @@ nfsd4_encode_listxattrs(struct nfsd4_compoundres *resp, __be32 nfserr,
*/
cookie_offset = xdr->buf->len;
count_offset = cookie_offset + 8;
- p = xdr_reserve_space(xdr, 12);
+ p = xdr_reserve_space(xdr, XDR_UNIT * 3);
if (!p) {
status = nfserr_resource;
goto out;
@@ -5086,7 +5651,8 @@ nfsd4_encode_listxattrs(struct nfsd4_compoundres *resp, __be32 nfserr,
sp = listxattrs->lsxa_buf;
nuser = 0;
- xdrleft = listxattrs->lsxa_maxcount;
+ /* Bytes left is maxcount - 8 (cookie) - 4 (array count) */
+ xdrleft = listxattrs->lsxa_maxcount - XDR_UNIT * 3;
while (left > 0 && xdrleft > 0) {
slen = strlen(sp);
@@ -5099,7 +5665,8 @@ nfsd4_encode_listxattrs(struct nfsd4_compoundres *resp, __be32 nfserr,
slen -= XATTR_USER_PREFIX_LEN;
xdrlen = 4 + ((slen + 3) & ~3);
- if (xdrlen > xdrleft) {
+ /* Check if both entry and eof can fit in the XDR buffer */
+ if (xdrlen + XDR_UNIT > xdrleft) {
if (count == 0) {
/*
* Can't even fit the first attribute name.
@@ -5151,7 +5718,8 @@ wreof:
cookie = offset + count;
- write_bytes_to_xdr_buf(xdr->buf, cookie_offset, &cookie, 8);
+ wire_cookie = cpu_to_be64(cookie);
+ write_bytes_to_xdr_buf(xdr->buf, cookie_offset, &wire_cookie, 8);
tmp = cpu_to_be32(count);
write_bytes_to_xdr_buf(xdr->buf, count_offset, &tmp, 4);
out:
@@ -5162,20 +5730,15 @@ out:
static __be32
nfsd4_encode_removexattr(struct nfsd4_compoundres *resp, __be32 nfserr,
- struct nfsd4_removexattr *removexattr)
+ union nfsd4_op_u *u)
{
+ struct nfsd4_removexattr *removexattr = &u->removexattr;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
-
- p = xdr_reserve_space(xdr, 20);
- if (!p)
- return nfserr_resource;
- p = encode_cinfo(p, &removexattr->rmxa_cinfo);
- return 0;
+ return nfsd4_encode_change_info4(xdr, &removexattr->rmxa_cinfo);
}
-typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *);
+typedef __be32(*nfsd4_enc)(struct nfsd4_compoundres *, __be32, union nfsd4_op_u *u);
/*
* Note: nfsd4_enc_ops vector is shared for v4.0 and v4.1
@@ -5183,93 +5746,93 @@ typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *);
* done in the decoding phase.
*/
static const nfsd4_enc nfsd4_enc_ops[] = {
- [OP_ACCESS] = (nfsd4_enc)nfsd4_encode_access,
- [OP_CLOSE] = (nfsd4_enc)nfsd4_encode_close,
- [OP_COMMIT] = (nfsd4_enc)nfsd4_encode_commit,
- [OP_CREATE] = (nfsd4_enc)nfsd4_encode_create,
- [OP_DELEGPURGE] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_DELEGRETURN] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_GETATTR] = (nfsd4_enc)nfsd4_encode_getattr,
- [OP_GETFH] = (nfsd4_enc)nfsd4_encode_getfh,
- [OP_LINK] = (nfsd4_enc)nfsd4_encode_link,
- [OP_LOCK] = (nfsd4_enc)nfsd4_encode_lock,
- [OP_LOCKT] = (nfsd4_enc)nfsd4_encode_lockt,
- [OP_LOCKU] = (nfsd4_enc)nfsd4_encode_locku,
- [OP_LOOKUP] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_LOOKUPP] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_NVERIFY] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_OPEN] = (nfsd4_enc)nfsd4_encode_open,
- [OP_OPENATTR] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_OPEN_CONFIRM] = (nfsd4_enc)nfsd4_encode_open_confirm,
- [OP_OPEN_DOWNGRADE] = (nfsd4_enc)nfsd4_encode_open_downgrade,
- [OP_PUTFH] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_PUTPUBFH] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_PUTROOTFH] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_READ] = (nfsd4_enc)nfsd4_encode_read,
- [OP_READDIR] = (nfsd4_enc)nfsd4_encode_readdir,
- [OP_READLINK] = (nfsd4_enc)nfsd4_encode_readlink,
- [OP_REMOVE] = (nfsd4_enc)nfsd4_encode_remove,
- [OP_RENAME] = (nfsd4_enc)nfsd4_encode_rename,
- [OP_RENEW] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_RESTOREFH] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_SAVEFH] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_SECINFO] = (nfsd4_enc)nfsd4_encode_secinfo,
- [OP_SETATTR] = (nfsd4_enc)nfsd4_encode_setattr,
- [OP_SETCLIENTID] = (nfsd4_enc)nfsd4_encode_setclientid,
- [OP_SETCLIENTID_CONFIRM] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_VERIFY] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_WRITE] = (nfsd4_enc)nfsd4_encode_write,
- [OP_RELEASE_LOCKOWNER] = (nfsd4_enc)nfsd4_encode_noop,
+ [OP_ACCESS] = nfsd4_encode_access,
+ [OP_CLOSE] = nfsd4_encode_close,
+ [OP_COMMIT] = nfsd4_encode_commit,
+ [OP_CREATE] = nfsd4_encode_create,
+ [OP_DELEGPURGE] = nfsd4_encode_noop,
+ [OP_DELEGRETURN] = nfsd4_encode_noop,
+ [OP_GETATTR] = nfsd4_encode_getattr,
+ [OP_GETFH] = nfsd4_encode_getfh,
+ [OP_LINK] = nfsd4_encode_link,
+ [OP_LOCK] = nfsd4_encode_lock,
+ [OP_LOCKT] = nfsd4_encode_lockt,
+ [OP_LOCKU] = nfsd4_encode_locku,
+ [OP_LOOKUP] = nfsd4_encode_noop,
+ [OP_LOOKUPP] = nfsd4_encode_noop,
+ [OP_NVERIFY] = nfsd4_encode_noop,
+ [OP_OPEN] = nfsd4_encode_open,
+ [OP_OPENATTR] = nfsd4_encode_noop,
+ [OP_OPEN_CONFIRM] = nfsd4_encode_open_confirm,
+ [OP_OPEN_DOWNGRADE] = nfsd4_encode_open_downgrade,
+ [OP_PUTFH] = nfsd4_encode_noop,
+ [OP_PUTPUBFH] = nfsd4_encode_noop,
+ [OP_PUTROOTFH] = nfsd4_encode_noop,
+ [OP_READ] = nfsd4_encode_read,
+ [OP_READDIR] = nfsd4_encode_readdir,
+ [OP_READLINK] = nfsd4_encode_readlink,
+ [OP_REMOVE] = nfsd4_encode_remove,
+ [OP_RENAME] = nfsd4_encode_rename,
+ [OP_RENEW] = nfsd4_encode_noop,
+ [OP_RESTOREFH] = nfsd4_encode_noop,
+ [OP_SAVEFH] = nfsd4_encode_noop,
+ [OP_SECINFO] = nfsd4_encode_secinfo,
+ [OP_SETATTR] = nfsd4_encode_setattr,
+ [OP_SETCLIENTID] = nfsd4_encode_setclientid,
+ [OP_SETCLIENTID_CONFIRM] = nfsd4_encode_noop,
+ [OP_VERIFY] = nfsd4_encode_noop,
+ [OP_WRITE] = nfsd4_encode_write,
+ [OP_RELEASE_LOCKOWNER] = nfsd4_encode_noop,
/* NFSv4.1 operations */
- [OP_BACKCHANNEL_CTL] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session,
- [OP_EXCHANGE_ID] = (nfsd4_enc)nfsd4_encode_exchange_id,
- [OP_CREATE_SESSION] = (nfsd4_enc)nfsd4_encode_create_session,
- [OP_DESTROY_SESSION] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_FREE_STATEID] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_GET_DIR_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
+ [OP_BACKCHANNEL_CTL] = nfsd4_encode_noop,
+ [OP_BIND_CONN_TO_SESSION] = nfsd4_encode_bind_conn_to_session,
+ [OP_EXCHANGE_ID] = nfsd4_encode_exchange_id,
+ [OP_CREATE_SESSION] = nfsd4_encode_create_session,
+ [OP_DESTROY_SESSION] = nfsd4_encode_noop,
+ [OP_FREE_STATEID] = nfsd4_encode_noop,
+ [OP_GET_DIR_DELEGATION] = nfsd4_encode_get_dir_delegation,
#ifdef CONFIG_NFSD_PNFS
- [OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_getdeviceinfo,
- [OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_layoutcommit,
- [OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_layoutget,
- [OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_layoutreturn,
+ [OP_GETDEVICEINFO] = nfsd4_encode_getdeviceinfo,
+ [OP_GETDEVICELIST] = nfsd4_encode_noop,
+ [OP_LAYOUTCOMMIT] = nfsd4_encode_layoutcommit,
+ [OP_LAYOUTGET] = nfsd4_encode_layoutget,
+ [OP_LAYOUTRETURN] = nfsd4_encode_layoutreturn,
#else
- [OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_noop,
+ [OP_GETDEVICEINFO] = nfsd4_encode_noop,
+ [OP_GETDEVICELIST] = nfsd4_encode_noop,
+ [OP_LAYOUTCOMMIT] = nfsd4_encode_noop,
+ [OP_LAYOUTGET] = nfsd4_encode_noop,
+ [OP_LAYOUTRETURN] = nfsd4_encode_noop,
#endif
- [OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo_no_name,
- [OP_SEQUENCE] = (nfsd4_enc)nfsd4_encode_sequence,
- [OP_SET_SSV] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_TEST_STATEID] = (nfsd4_enc)nfsd4_encode_test_stateid,
- [OP_WANT_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_DESTROY_CLIENTID] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_RECLAIM_COMPLETE] = (nfsd4_enc)nfsd4_encode_noop,
+ [OP_SECINFO_NO_NAME] = nfsd4_encode_secinfo_no_name,
+ [OP_SEQUENCE] = nfsd4_encode_sequence,
+ [OP_SET_SSV] = nfsd4_encode_noop,
+ [OP_TEST_STATEID] = nfsd4_encode_test_stateid,
+ [OP_WANT_DELEGATION] = nfsd4_encode_noop,
+ [OP_DESTROY_CLIENTID] = nfsd4_encode_noop,
+ [OP_RECLAIM_COMPLETE] = nfsd4_encode_noop,
/* NFSv4.2 operations */
- [OP_ALLOCATE] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_COPY] = (nfsd4_enc)nfsd4_encode_copy,
- [OP_COPY_NOTIFY] = (nfsd4_enc)nfsd4_encode_copy_notify,
- [OP_DEALLOCATE] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_IO_ADVISE] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_LAYOUTERROR] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_LAYOUTSTATS] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_OFFLOAD_CANCEL] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_OFFLOAD_STATUS] = (nfsd4_enc)nfsd4_encode_offload_status,
- [OP_READ_PLUS] = (nfsd4_enc)nfsd4_encode_read_plus,
- [OP_SEEK] = (nfsd4_enc)nfsd4_encode_seek,
- [OP_WRITE_SAME] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_CLONE] = (nfsd4_enc)nfsd4_encode_noop,
+ [OP_ALLOCATE] = nfsd4_encode_noop,
+ [OP_COPY] = nfsd4_encode_copy,
+ [OP_COPY_NOTIFY] = nfsd4_encode_copy_notify,
+ [OP_DEALLOCATE] = nfsd4_encode_noop,
+ [OP_IO_ADVISE] = nfsd4_encode_noop,
+ [OP_LAYOUTERROR] = nfsd4_encode_noop,
+ [OP_LAYOUTSTATS] = nfsd4_encode_noop,
+ [OP_OFFLOAD_CANCEL] = nfsd4_encode_noop,
+ [OP_OFFLOAD_STATUS] = nfsd4_encode_offload_status,
+ [OP_READ_PLUS] = nfsd4_encode_read_plus,
+ [OP_SEEK] = nfsd4_encode_seek,
+ [OP_WRITE_SAME] = nfsd4_encode_noop,
+ [OP_CLONE] = nfsd4_encode_noop,
/* RFC 8276 extended atributes operations */
- [OP_GETXATTR] = (nfsd4_enc)nfsd4_encode_getxattr,
- [OP_SETXATTR] = (nfsd4_enc)nfsd4_encode_setxattr,
- [OP_LISTXATTRS] = (nfsd4_enc)nfsd4_encode_listxattrs,
- [OP_REMOVEXATTR] = (nfsd4_enc)nfsd4_encode_removexattr,
+ [OP_GETXATTR] = nfsd4_encode_getxattr,
+ [OP_SETXATTR] = nfsd4_encode_setxattr,
+ [OP_LISTXATTRS] = nfsd4_encode_listxattrs,
+ [OP_REMOVEXATTR] = nfsd4_encode_removexattr,
};
/*
@@ -5301,6 +5864,23 @@ __be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 respsize)
return nfserr_rep_too_big;
}
+static __be32 nfsd4_map_status(__be32 status, u32 minor)
+{
+ switch (status) {
+ case nfs_ok:
+ break;
+ case nfserr_wrong_type:
+ /* RFC 8881 - 15.1.2.9 */
+ if (minor == 0)
+ status = nfserr_inval;
+ break;
+ case nfserr_symlink_not_dir:
+ status = nfserr_symlink;
+ break;
+ }
+ return status;
+}
+
void
nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
@@ -5308,17 +5888,14 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
struct nfs4_stateowner *so = resp->cstate.replay_owner;
struct svc_rqst *rqstp = resp->rqstp;
const struct nfsd4_operation *opdesc = op->opdesc;
- int post_err_offset;
+ unsigned int op_status_offset;
nfsd4_enc encoder;
- __be32 *p;
- p = xdr_reserve_space(xdr, 8);
- if (!p) {
- WARN_ON_ONCE(1);
- return;
- }
- *p++ = cpu_to_be32(op->opnum);
- post_err_offset = xdr->buf->len;
+ if (xdr_stream_encode_u32(xdr, op->opnum) != XDR_UNIT)
+ goto release;
+ op_status_offset = xdr->buf->len;
+ if (!xdr_reserve_space(xdr, XDR_UNIT))
+ goto release;
if (op->opnum == OP_ILLEGAL)
goto status;
@@ -5331,8 +5908,6 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
op->status = encoder(resp, op->status, &op->u);
if (op->status)
trace_nfsd_compound_encode_err(rqstp, op->opnum, op->status);
- if (opdesc && opdesc->op_release)
- opdesc->op_release(&op->u);
xdr_commit_encode(xdr);
/* nfsd4_check_resp_size guarantees enough room for error status */
@@ -5361,42 +5936,49 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
* bug if we had to do this on a non-idempotent op:
*/
warn_on_nonidempotent_op(op);
- xdr_truncate_encode(xdr, post_err_offset);
- }
- if (so) {
- int len = xdr->buf->len - post_err_offset;
+ xdr_truncate_encode(xdr, op_status_offset + XDR_UNIT);
+ } else if (so) {
+ int len = xdr->buf->len - (op_status_offset + XDR_UNIT);
so->so_replay.rp_status = op->status;
so->so_replay.rp_buflen = len;
- read_bytes_from_xdr_buf(xdr->buf, post_err_offset,
+ read_bytes_from_xdr_buf(xdr->buf, op_status_offset + XDR_UNIT,
so->so_replay.rp_buf, len);
}
status:
- /* Note that op->status is already in network byte order: */
- write_bytes_to_xdr_buf(xdr->buf, post_err_offset - 4, &op->status, 4);
+ op->status = nfsd4_map_status(op->status,
+ resp->cstate.minorversion);
+ write_bytes_to_xdr_buf(xdr->buf, op_status_offset,
+ &op->status, XDR_UNIT);
+release:
+ if (opdesc && opdesc->op_release)
+ opdesc->op_release(&op->u);
+
+ /*
+ * Account for pages consumed while encoding this operation.
+ * The xdr_stream primitives don't manage rq_next_page.
+ */
+ rqstp->rq_next_page = xdr->page_ptr + 1;
}
-/*
- * Encode the reply stored in the stateowner reply cache
- *
- * XDR note: do not encode rp->rp_buflen: the buffer contains the
- * previously sent already encoded operation.
+/**
+ * nfsd4_encode_replay - encode a result stored in the stateowner reply cache
+ * @xdr: send buffer's XDR stream
+ * @op: operation being replayed
+ *
+ * @op->replay->rp_buf contains the previously-sent already-encoded result.
*/
-void
-nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op)
+void nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op)
{
- __be32 *p;
struct nfs4_replay *rp = op->replay;
- p = xdr_reserve_space(xdr, 8 + rp->rp_buflen);
- if (!p) {
- WARN_ON_ONCE(1);
- return;
- }
- *p++ = cpu_to_be32(op->opnum);
- *p++ = rp->rp_status; /* already xdr'ed */
+ trace_nfsd_stateowner_replay(op->opnum, rp);
- p = xdr_encode_opaque_fixed(p, rp->rp_buf, rp->rp_buflen);
+ if (xdr_stream_encode_u32(xdr, op->opnum) != XDR_UNIT)
+ return;
+ if (xdr_stream_encode_be32(xdr, rp->rp_status) != XDR_UNIT)
+ return;
+ xdr_stream_encode_opaque_fixed(xdr, rp->rp_buf, rp->rp_buflen);
}
void nfsd4_release_compoundargs(struct svc_rqst *rqstp)
@@ -5404,7 +5986,7 @@ void nfsd4_release_compoundargs(struct svc_rqst *rqstp)
struct nfsd4_compoundargs *args = rqstp->rq_argp;
if (args->ops != args->iops) {
- kfree(args->ops);
+ vfree(args->ops);
args->ops = args->iops;
}
while (args->to_free) {
@@ -5433,12 +6015,8 @@ bool
nfs4svc_encode_compoundres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
- struct xdr_buf *buf = xdr->buf;
__be32 *p;
- WARN_ON_ONCE(buf->len != buf->head[0].iov_len + buf->page_len +
- buf->tail[0].iov_len);
-
/*
* Send buffer space for the following items is reserved
* at the top of nfsd4_proc_compound().
@@ -5446,9 +6024,6 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
p = resp->statusp;
*p++ = resp->cstate.status;
-
- rqstp->rq_next_page = xdr->page_ptr + 1;
-
*p++ = htonl(resp->taglen);
memcpy(p, resp->tag, resp->taglen);
p += XDR_QUADLEN(resp->taglen);
diff --git a/fs/nfsd/nfs4xdr_gen.c b/fs/nfsd/nfs4xdr_gen.c
new file mode 100644
index 000000000000..a17b5d8e60b3
--- /dev/null
+++ b/fs/nfsd/nfs4xdr_gen.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+// Generated by xdrgen. Manual edits will be lost.
+// XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x
+// XDR specification modification time: Mon Oct 14 09:10:13 2024
+
+#include <linux/sunrpc/svc.h>
+
+#include "nfs4xdr_gen.h"
+
+static bool __maybe_unused
+xdrgen_decode_int64_t(struct xdr_stream *xdr, int64_t *ptr)
+{
+ return xdrgen_decode_hyper(xdr, ptr);
+};
+
+static bool __maybe_unused
+xdrgen_decode_uint32_t(struct xdr_stream *xdr, uint32_t *ptr)
+{
+ return xdrgen_decode_unsigned_int(xdr, ptr);
+};
+
+static bool __maybe_unused
+xdrgen_decode_bitmap4(struct xdr_stream *xdr, bitmap4 *ptr)
+{
+ if (xdr_stream_decode_u32(xdr, &ptr->count) < 0)
+ return false;
+ for (u32 i = 0; i < ptr->count; i++)
+ if (!xdrgen_decode_uint32_t(xdr, &ptr->element[i]))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_decode_nfstime4(struct xdr_stream *xdr, struct nfstime4 *ptr)
+{
+ if (!xdrgen_decode_int64_t(xdr, &ptr->seconds))
+ return false;
+ if (!xdrgen_decode_uint32_t(xdr, &ptr->nseconds))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_decode_fattr4_offline(struct xdr_stream *xdr, fattr4_offline *ptr)
+{
+ return xdrgen_decode_bool(xdr, ptr);
+};
+
+static bool __maybe_unused
+xdrgen_decode_open_arguments4(struct xdr_stream *xdr, struct open_arguments4 *ptr)
+{
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_share_access))
+ return false;
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_share_deny))
+ return false;
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_share_access_want))
+ return false;
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_open_claim))
+ return false;
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_create_mode))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_decode_open_args_share_access4(struct xdr_stream *xdr, open_args_share_access4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_decode_open_args_share_deny4(struct xdr_stream *xdr, open_args_share_deny4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_decode_open_args_share_access_want4(struct xdr_stream *xdr, open_args_share_access_want4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_decode_open_args_open_claim4(struct xdr_stream *xdr, open_args_open_claim4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_decode_open_args_createmode4(struct xdr_stream *xdr, open_args_createmode4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+bool
+xdrgen_decode_fattr4_open_arguments(struct xdr_stream *xdr, fattr4_open_arguments *ptr)
+{
+ return xdrgen_decode_open_arguments4(xdr, ptr);
+};
+
+bool
+xdrgen_decode_fattr4_time_deleg_access(struct xdr_stream *xdr, fattr4_time_deleg_access *ptr)
+{
+ return xdrgen_decode_nfstime4(xdr, ptr);
+};
+
+bool
+xdrgen_decode_fattr4_time_deleg_modify(struct xdr_stream *xdr, fattr4_time_deleg_modify *ptr)
+{
+ return xdrgen_decode_nfstime4(xdr, ptr);
+};
+
+static bool __maybe_unused
+xdrgen_decode_open_delegation_type4(struct xdr_stream *xdr, open_delegation_type4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_encode_int64_t(struct xdr_stream *xdr, const int64_t value)
+{
+ return xdrgen_encode_hyper(xdr, value);
+};
+
+static bool __maybe_unused
+xdrgen_encode_uint32_t(struct xdr_stream *xdr, const uint32_t value)
+{
+ return xdrgen_encode_unsigned_int(xdr, value);
+};
+
+static bool __maybe_unused
+xdrgen_encode_bitmap4(struct xdr_stream *xdr, const bitmap4 value)
+{
+ if (xdr_stream_encode_u32(xdr, value.count) != XDR_UNIT)
+ return false;
+ for (u32 i = 0; i < value.count; i++)
+ if (!xdrgen_encode_uint32_t(xdr, value.element[i]))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_encode_nfstime4(struct xdr_stream *xdr, const struct nfstime4 *value)
+{
+ if (!xdrgen_encode_int64_t(xdr, value->seconds))
+ return false;
+ if (!xdrgen_encode_uint32_t(xdr, value->nseconds))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_encode_fattr4_offline(struct xdr_stream *xdr, const fattr4_offline value)
+{
+ return xdrgen_encode_bool(xdr, value);
+};
+
+static bool __maybe_unused
+xdrgen_encode_open_arguments4(struct xdr_stream *xdr, const struct open_arguments4 *value)
+{
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_share_access))
+ return false;
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_share_deny))
+ return false;
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_share_access_want))
+ return false;
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_open_claim))
+ return false;
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_create_mode))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_encode_open_args_share_access4(struct xdr_stream *xdr, open_args_share_access4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+static bool __maybe_unused
+xdrgen_encode_open_args_share_deny4(struct xdr_stream *xdr, open_args_share_deny4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+static bool __maybe_unused
+xdrgen_encode_open_args_share_access_want4(struct xdr_stream *xdr, open_args_share_access_want4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+static bool __maybe_unused
+xdrgen_encode_open_args_open_claim4(struct xdr_stream *xdr, open_args_open_claim4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+static bool __maybe_unused
+xdrgen_encode_open_args_createmode4(struct xdr_stream *xdr, open_args_createmode4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+bool
+xdrgen_encode_fattr4_open_arguments(struct xdr_stream *xdr, const fattr4_open_arguments *value)
+{
+ return xdrgen_encode_open_arguments4(xdr, value);
+};
+
+bool
+xdrgen_encode_fattr4_time_deleg_access(struct xdr_stream *xdr, const fattr4_time_deleg_access *value)
+{
+ return xdrgen_encode_nfstime4(xdr, value);
+};
+
+bool
+xdrgen_encode_fattr4_time_deleg_modify(struct xdr_stream *xdr, const fattr4_time_deleg_modify *value)
+{
+ return xdrgen_encode_nfstime4(xdr, value);
+};
+
+static bool __maybe_unused
+xdrgen_encode_open_delegation_type4(struct xdr_stream *xdr, open_delegation_type4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
diff --git a/fs/nfsd/nfs4xdr_gen.h b/fs/nfsd/nfs4xdr_gen.h
new file mode 100644
index 000000000000..41a0033b7256
--- /dev/null
+++ b/fs/nfsd/nfs4xdr_gen.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Generated by xdrgen. Manual edits will be lost. */
+/* XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x */
+/* XDR specification modification time: Mon Oct 14 09:10:13 2024 */
+
+#ifndef _LINUX_XDRGEN_NFS4_1_DECL_H
+#define _LINUX_XDRGEN_NFS4_1_DECL_H
+
+#include <linux/types.h>
+
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/xdrgen/_defs.h>
+#include <linux/sunrpc/xdrgen/_builtins.h>
+#include <linux/sunrpc/xdrgen/nfs4_1.h>
+
+bool xdrgen_decode_fattr4_open_arguments(struct xdr_stream *xdr, fattr4_open_arguments *ptr);
+bool xdrgen_encode_fattr4_open_arguments(struct xdr_stream *xdr, const fattr4_open_arguments *value);
+
+bool xdrgen_decode_fattr4_time_deleg_access(struct xdr_stream *xdr, fattr4_time_deleg_access *ptr);
+bool xdrgen_encode_fattr4_time_deleg_access(struct xdr_stream *xdr, const fattr4_time_deleg_access *value);
+
+bool xdrgen_decode_fattr4_time_deleg_modify(struct xdr_stream *xdr, fattr4_time_deleg_modify *ptr);
+bool xdrgen_encode_fattr4_time_deleg_modify(struct xdr_stream *xdr, const fattr4_time_deleg_modify *value);
+
+#endif /* _LINUX_XDRGEN_NFS4_1_DECL_H */
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 6e0b6f3148dc..ab13ee9c7fd8 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -27,7 +27,7 @@
* cache size, the idea being that when the cache is at its maximum number
* of entries, then this should be the average number of entries per bucket.
*/
-#define TARGET_BUCKET_SIZE 64
+#define TARGET_BUCKET_SIZE 8
struct nfsd_drc_bucket {
struct rb_root rb_head;
@@ -84,17 +84,11 @@ nfsd_hashsize(unsigned int limit)
return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
}
-static u32
-nfsd_cache_hash(__be32 xid, struct nfsd_net *nn)
+static struct nfsd_cacherep *
+nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum,
+ struct nfsd_net *nn)
{
- return hash_32(be32_to_cpu(xid), nn->maskbits);
-}
-
-static struct svc_cacherep *
-nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
- struct nfsd_net *nn)
-{
- struct svc_cacherep *rp;
+ struct nfsd_cacherep *rp;
rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
if (rp) {
@@ -116,36 +110,63 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
return rp;
}
-static void
-nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
- struct nfsd_net *nn)
+static void nfsd_cacherep_free(struct nfsd_cacherep *rp)
{
- if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
- nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
+ if (rp->c_type == RC_REPLBUFF)
kfree(rp->c_replvec.iov_base);
+ kmem_cache_free(drc_slab, rp);
+}
+
+static unsigned long
+nfsd_cacherep_dispose(struct list_head *dispose)
+{
+ struct nfsd_cacherep *rp;
+ unsigned long freed = 0;
+
+ while (!list_empty(dispose)) {
+ rp = list_first_entry(dispose, struct nfsd_cacherep, c_lru);
+ list_del(&rp->c_lru);
+ nfsd_cacherep_free(rp);
+ freed++;
}
+ return freed;
+}
+
+static void
+nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
+ struct nfsd_cacherep *rp)
+{
+ if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base)
+ nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
if (rp->c_state != RC_UNUSED) {
rb_erase(&rp->c_node, &b->rb_head);
list_del(&rp->c_lru);
atomic_dec(&nn->num_drc_entries);
nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
}
- kmem_cache_free(drc_slab, rp);
}
static void
-nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
+nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
+ struct nfsd_net *nn)
+{
+ nfsd_cacherep_unlink_locked(nn, b, rp);
+ nfsd_cacherep_free(rp);
+}
+
+static void
+nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
struct nfsd_net *nn)
{
spin_lock(&b->cache_lock);
- nfsd_reply_cache_free_locked(b, rp, nn);
+ nfsd_cacherep_unlink_locked(nn, b, rp);
spin_unlock(&b->cache_lock);
+ nfsd_cacherep_free(rp);
}
int nfsd_drc_slab_create(void)
{
- drc_slab = kmem_cache_create("nfsd_drc",
- sizeof(struct svc_cacherep), 0, 0, NULL);
+ drc_slab = KMEM_CACHE(nfsd_cacherep, 0);
return drc_slab ? 0: -ENOMEM;
}
@@ -154,43 +175,33 @@ void nfsd_drc_slab_free(void)
kmem_cache_destroy(drc_slab);
}
-static int nfsd_reply_cache_stats_init(struct nfsd_net *nn)
-{
- return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
-}
-
-static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn)
-{
- nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
-}
-
int nfsd_reply_cache_init(struct nfsd_net *nn)
{
unsigned int hashsize;
unsigned int i;
- int status = 0;
nn->max_drc_entries = nfsd_cache_size_limit();
atomic_set(&nn->num_drc_entries, 0);
hashsize = nfsd_hashsize(nn->max_drc_entries);
nn->maskbits = ilog2(hashsize);
- status = nfsd_reply_cache_stats_init(nn);
- if (status)
- goto out_nomem;
-
- nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
- nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
- nn->nfsd_reply_cache_shrinker.seeks = 1;
- status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
- if (status)
- goto out_stats_destroy;
-
nn->drc_hashtbl = kvzalloc(array_size(hashsize,
sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
if (!nn->drc_hashtbl)
+ return -ENOMEM;
+
+ nn->nfsd_reply_cache_shrinker = shrinker_alloc(0, "nfsd-reply:%s",
+ nn->nfsd_name);
+ if (!nn->nfsd_reply_cache_shrinker)
goto out_shrinker;
+ nn->nfsd_reply_cache_shrinker->scan_objects = nfsd_reply_cache_scan;
+ nn->nfsd_reply_cache_shrinker->count_objects = nfsd_reply_cache_count;
+ nn->nfsd_reply_cache_shrinker->seeks = 1;
+ nn->nfsd_reply_cache_shrinker->private_data = nn;
+
+ shrinker_register(nn->nfsd_reply_cache_shrinker);
+
for (i = 0; i < hashsize; i++) {
INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
@@ -199,26 +210,22 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
return 0;
out_shrinker:
- unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
-out_stats_destroy:
- nfsd_reply_cache_stats_destroy(nn);
-out_nomem:
+ kvfree(nn->drc_hashtbl);
printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
return -ENOMEM;
}
void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
{
- struct svc_cacherep *rp;
+ struct nfsd_cacherep *rp;
unsigned int i;
- nfsd_reply_cache_stats_destroy(nn);
- unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
+ shrinker_free(nn->nfsd_reply_cache_shrinker);
for (i = 0; i < nn->drc_hashsize; i++) {
struct list_head *head = &nn->drc_hashtbl[i].lru_head;
while (!list_empty(head)) {
- rp = list_first_entry(head, struct svc_cacherep, c_lru);
+ rp = list_first_entry(head, struct nfsd_cacherep, c_lru);
nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
rp, nn);
}
@@ -230,111 +237,149 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
}
-/*
- * Move cache entry to end of LRU list, and queue the cleaner to run if it's
- * not already scheduled.
- */
static void
-lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
+lru_put_end(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp)
{
rp->c_timestamp = jiffies;
list_move_tail(&rp->c_lru, &b->lru_head);
}
-static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
- unsigned int max)
+static noinline struct nfsd_drc_bucket *
+nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn)
+{
+ unsigned int hash = hash_32((__force u32)xid, nn->maskbits);
+
+ return &nn->drc_hashtbl[hash];
+}
+
+/*
+ * Remove and return no more than @max expired entries in bucket @b.
+ * If @max is zero, do not limit the number of removed entries.
+ */
+static void
+nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
+ unsigned int max, struct list_head *dispose)
{
- struct svc_cacherep *rp, *tmp;
- long freed = 0;
+ unsigned long expiry = jiffies - RC_EXPIRE;
+ struct nfsd_cacherep *rp, *tmp;
+ unsigned int freed = 0;
+
+ lockdep_assert_held(&b->cache_lock);
+ /* The bucket LRU is ordered oldest-first. */
list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
- /*
- * Don't free entries attached to calls that are still
- * in-progress, but do keep scanning the list.
- */
- if (rp->c_state == RC_INPROG)
- continue;
if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
- time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
+ time_before(expiry, rp->c_timestamp))
break;
- nfsd_reply_cache_free_locked(b, rp, nn);
- if (max && freed++ > max)
+
+ nfsd_cacherep_unlink_locked(nn, b, rp);
+ list_add(&rp->c_lru, dispose);
+
+ if (max && ++freed > max)
break;
}
- return freed;
}
-static long nfsd_prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
+/**
+ * nfsd_reply_cache_count - count_objects method for the DRC shrinker
+ * @shrink: our registered shrinker context
+ * @sc: garbage collection parameters
+ *
+ * Returns the total number of entries in the duplicate reply cache. To
+ * keep things simple and quick, this is not the number of expired entries
+ * in the cache (ie, the number that would be removed by a call to
+ * nfsd_reply_cache_scan).
+ */
+static unsigned long
+nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
{
- return prune_bucket(b, nn, 3);
+ struct nfsd_net *nn = shrink->private_data;
+
+ return atomic_read(&nn->num_drc_entries);
}
-/*
- * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
- * Also prune the oldest ones when the total exceeds the max number of entries.
+/**
+ * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
+ * @shrink: our registered shrinker context
+ * @sc: garbage collection parameters
+ *
+ * Free expired entries on each bucket's LRU list until we've released
+ * nr_to_scan freed objects. Nothing will be released if the cache
+ * has not exceeded it's max_drc_entries limit.
+ *
+ * Returns the number of entries released by this call.
*/
-static long
-prune_cache_entries(struct nfsd_net *nn)
+static unsigned long
+nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{
+ struct nfsd_net *nn = shrink->private_data;
+ unsigned long freed = 0;
+ LIST_HEAD(dispose);
unsigned int i;
- long freed = 0;
for (i = 0; i < nn->drc_hashsize; i++) {
struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
if (list_empty(&b->lru_head))
continue;
+
spin_lock(&b->cache_lock);
- freed += prune_bucket(b, nn, 0);
+ nfsd_prune_bucket_locked(nn, b, 0, &dispose);
spin_unlock(&b->cache_lock);
+
+ freed += nfsd_cacherep_dispose(&dispose);
+ if (freed > sc->nr_to_scan)
+ break;
}
return freed;
}
-static unsigned long
-nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
-{
- struct nfsd_net *nn = container_of(shrink,
- struct nfsd_net, nfsd_reply_cache_shrinker);
-
- return atomic_read(&nn->num_drc_entries);
-}
-
-static unsigned long
-nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
-{
- struct nfsd_net *nn = container_of(shrink,
- struct nfsd_net, nfsd_reply_cache_shrinker);
-
- return prune_cache_entries(nn);
-}
-/*
- * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
+/**
+ * nfsd_cache_csum - Checksum incoming NFS Call arguments
+ * @buf: buffer containing a whole RPC Call message
+ * @start: starting byte of the NFS Call header
+ * @remaining: size of the NFS Call header, in bytes
+ *
+ * Compute a weak checksum of the leading bytes of an NFS procedure
+ * call header to help verify that a retransmitted Call matches an
+ * entry in the duplicate reply cache.
+ *
+ * To avoid assumptions about how the RPC message is laid out in
+ * @buf and what else it might contain (eg, a GSS MIC suffix), the
+ * caller passes us the exact location and length of the NFS Call
+ * header.
+ *
+ * Returns a 32-bit checksum value, as defined in RFC 793.
*/
-static __wsum
-nfsd_cache_csum(struct svc_rqst *rqstp)
+static __wsum nfsd_cache_csum(struct xdr_buf *buf, unsigned int start,
+ unsigned int remaining)
{
+ unsigned int base, len;
+ struct xdr_buf subbuf;
+ __wsum csum = 0;
+ void *p;
int idx;
- unsigned int base;
- __wsum csum;
- struct xdr_buf *buf = &rqstp->rq_arg;
- const unsigned char *p = buf->head[0].iov_base;
- size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
- RC_CSUMLEN);
- size_t len = min(buf->head[0].iov_len, csum_len);
+
+ if (remaining > RC_CSUMLEN)
+ remaining = RC_CSUMLEN;
+ if (xdr_buf_subsegment(buf, &subbuf, start, remaining))
+ return csum;
/* rq_arg.head first */
- csum = csum_partial(p, len, 0);
- csum_len -= len;
+ if (subbuf.head[0].iov_len) {
+ len = min_t(unsigned int, subbuf.head[0].iov_len, remaining);
+ csum = csum_partial(subbuf.head[0].iov_base, len, csum);
+ remaining -= len;
+ }
/* Continue into page array */
- idx = buf->page_base / PAGE_SIZE;
- base = buf->page_base & ~PAGE_MASK;
- while (csum_len) {
- p = page_address(buf->pages[idx]) + base;
- len = min_t(size_t, PAGE_SIZE - base, csum_len);
+ idx = subbuf.page_base / PAGE_SIZE;
+ base = subbuf.page_base & ~PAGE_MASK;
+ while (remaining) {
+ p = page_address(subbuf.pages[idx]) + base;
+ len = min_t(unsigned int, PAGE_SIZE - base, remaining);
csum = csum_partial(p, len, csum);
- csum_len -= len;
+ remaining -= len;
base = 0;
++idx;
}
@@ -342,8 +387,8 @@ nfsd_cache_csum(struct svc_rqst *rqstp)
}
static int
-nfsd_cache_key_cmp(const struct svc_cacherep *key,
- const struct svc_cacherep *rp, struct nfsd_net *nn)
+nfsd_cache_key_cmp(const struct nfsd_cacherep *key,
+ const struct nfsd_cacherep *rp, struct nfsd_net *nn)
{
if (key->c_key.k_xid == rp->c_key.k_xid &&
key->c_key.k_csum != rp->c_key.k_csum) {
@@ -359,11 +404,11 @@ nfsd_cache_key_cmp(const struct svc_cacherep *key,
* Must be called with cache_lock held. Returns the found entry or
* inserts an empty key on failure.
*/
-static struct svc_cacherep *
-nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
+static struct nfsd_cacherep *
+nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key,
struct nfsd_net *nn)
{
- struct svc_cacherep *rp, *ret = key;
+ struct nfsd_cacherep *rp, *ret = key;
struct rb_node **p = &b->rb_head.rb_node,
*parent = NULL;
unsigned int entries = 0;
@@ -372,7 +417,7 @@ nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
while (*p != NULL) {
++entries;
parent = *p;
- rp = rb_entry(parent, struct svc_cacherep, c_node);
+ rp = rb_entry(parent, struct nfsd_cacherep, c_node);
cmp = nfsd_cache_key_cmp(key, rp, nn);
if (cmp < 0)
@@ -397,14 +442,15 @@ out:
nn->longest_chain_cachesize,
atomic_read(&nn->num_drc_entries));
}
-
- lru_put_end(b, ret);
return ret;
}
/**
* nfsd_cache_lookup - Find an entry in the duplicate reply cache
* @rqstp: Incoming Call to find
+ * @start: starting byte in @rqstp->rq_arg of the NFS Call header
+ * @len: size of the NFS Call header, in bytes
+ * @cacherep: OUT: DRC entry for this request
*
* Try to find an entry matching the current call in the cache. When none
* is found, we try to grab the oldest expired entry off the LRU list. If
@@ -417,59 +463,55 @@ out:
* %RC_REPLY: Reply from cache
* %RC_DROPIT: Do not process the request further
*/
-int nfsd_cache_lookup(struct svc_rqst *rqstp)
+int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
+ unsigned int len, struct nfsd_cacherep **cacherep)
{
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- struct svc_cacherep *rp, *found;
- __be32 xid = rqstp->rq_xid;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfsd_cacherep *rp, *found;
__wsum csum;
- u32 hash = nfsd_cache_hash(xid, nn);
- struct nfsd_drc_bucket *b = &nn->drc_hashtbl[hash];
+ struct nfsd_drc_bucket *b;
int type = rqstp->rq_cachetype;
+ LIST_HEAD(dispose);
int rtn = RC_DOIT;
- rqstp->rq_cacherep = NULL;
if (type == RC_NOCACHE) {
- nfsd_stats_rc_nocache_inc();
+ nfsd_stats_rc_nocache_inc(nn);
goto out;
}
- csum = nfsd_cache_csum(rqstp);
+ csum = nfsd_cache_csum(&rqstp->rq_arg, start, len);
/*
* Since the common case is a cache miss followed by an insert,
* preallocate an entry.
*/
- rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
+ rp = nfsd_cacherep_alloc(rqstp, csum, nn);
if (!rp)
goto out;
+ b = nfsd_cache_bucket_find(rqstp->rq_xid, nn);
spin_lock(&b->cache_lock);
found = nfsd_cache_insert(b, rp, nn);
- if (found != rp) {
- nfsd_reply_cache_free_locked(NULL, rp, nn);
- rp = found;
+ if (found != rp)
goto found_entry;
- }
-
- nfsd_stats_rc_misses_inc();
- rqstp->rq_cacherep = rp;
+ *cacherep = rp;
rp->c_state = RC_INPROG;
+ nfsd_prune_bucket_locked(nn, b, 3, &dispose);
+ spin_unlock(&b->cache_lock);
+
+ nfsd_cacherep_dispose(&dispose);
+ nfsd_stats_rc_misses_inc(nn);
atomic_inc(&nn->num_drc_entries);
nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
-
- nfsd_prune_bucket(b, nn);
-
-out_unlock:
- spin_unlock(&b->cache_lock);
-out:
- return rtn;
+ goto out;
found_entry:
/* We found a matching entry which is either in progress or done. */
- nfsd_stats_rc_hits_inc();
+ nfsd_reply_cache_free_locked(NULL, rp, nn);
+ nfsd_stats_rc_hits_inc(nn);
rtn = RC_DROPIT;
+ rp = found;
/* Request being processed */
if (rp->c_state == RC_INPROG)
@@ -486,7 +528,7 @@ found_entry:
case RC_NOCACHE:
break;
case RC_REPLSTAT:
- svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
+ xdr_stream_encode_be32(&rqstp->rq_res_stream, rp->c_replstat);
rtn = RC_REPLY;
break;
case RC_REPLBUFF:
@@ -500,14 +542,18 @@ found_entry:
out_trace:
trace_nfsd_drc_found(nn, rqstp, rtn);
- goto out_unlock;
+out_unlock:
+ spin_unlock(&b->cache_lock);
+out:
+ return rtn;
}
/**
* nfsd_cache_update - Update an entry in the duplicate reply cache.
* @rqstp: svc_rqst with a finished Reply
+ * @rp: IN: DRC entry for this request
* @cachetype: which cache to update
- * @statp: Reply's status code
+ * @statp: pointer to Reply's NFS status code, or NULL
*
* This is called from nfsd_dispatch when the procedure has been
* executed and the complete reply is in rqstp->rq_res.
@@ -523,12 +569,11 @@ out_trace:
* nfsd failed to encode a reply that otherwise would have been cached.
* In this case, nfsd_cache_update is called with statp == NULL.
*/
-void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
+ int cachetype, __be32 *statp)
{
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- struct svc_cacherep *rp = rqstp->rq_cacherep;
struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
- u32 hash;
struct nfsd_drc_bucket *b;
int len;
size_t bufsize = 0;
@@ -536,8 +581,7 @@ void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
if (!rp)
return;
- hash = nfsd_cache_hash(rp->c_key.k_xid, nn);
- b = &nn->drc_hashtbl[hash];
+ b = nfsd_cache_bucket_find(rp->c_key.k_xid, nn);
len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
len >>= 2;
@@ -579,24 +623,17 @@ void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
return;
}
-/*
- * Copy cached reply to current reply buffer. Should always fit.
- * FIXME as reply is in a page, we should just attach the page, and
- * keep a refcount....
- */
static int
nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
{
- struct kvec *vec = &rqstp->rq_res.head[0];
-
- if (vec->iov_len + data->iov_len > PAGE_SIZE) {
- printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
- data->iov_len);
- return 0;
- }
- memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
- vec->iov_len += data->iov_len;
- return 1;
+ __be32 *p;
+
+ p = xdr_reserve_space(&rqstp->rq_res_stream, data->iov_len);
+ if (unlikely(!p))
+ return false;
+ memcpy(p, data->iov_base, data->iov_len);
+ xdr_commit_encode(&rqstp->rq_res_stream);
+ return true;
}
/*
@@ -604,33 +641,26 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
* scraping this file for info should test the labels to ensure they're
* getting the correct field.
*/
-static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
+int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
{
- struct nfsd_net *nn = m->private;
+ struct nfsd_net *nn = net_generic(file_inode(m->file)->i_sb->s_fs_info,
+ nfsd_net_id);
seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
seq_printf(m, "num entries: %u\n",
atomic_read(&nn->num_drc_entries));
seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
seq_printf(m, "mem usage: %lld\n",
- percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_DRC_MEM_USAGE]));
seq_printf(m, "cache hits: %lld\n",
- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]));
seq_printf(m, "cache misses: %lld\n",
- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]));
seq_printf(m, "not cached: %lld\n",
- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]));
seq_printf(m, "payload misses: %lld\n",
- percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]));
seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
return 0;
}
-
-int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
-{
- struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info,
- nfsd_net_id);
-
- return single_open(file, nfsd_reply_cache_stats_show, nn);
-}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index af8531c3854a..5ce9a49e76ba 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -14,10 +14,11 @@
#include <linux/lockd/lockd.h>
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/gss_api.h>
-#include <linux/sunrpc/gss_krb5_enctypes.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <linux/sunrpc/svc.h>
#include <linux/module.h>
#include <linux/fsnotify.h>
+#include <linux/nfslocalio.h>
#include "idmap.h"
#include "nfsd.h"
@@ -25,6 +26,9 @@
#include "state.h"
#include "netns.h"
#include "pnfs.h"
+#include "filecache.h"
+#include "trace.h"
+#include "netlink.h"
/*
* We have a single directory with several nodes in it.
@@ -44,18 +48,11 @@ enum {
NFSD_Versions,
NFSD_Ports,
NFSD_MaxBlkSize,
- NFSD_MaxConnections,
- NFSD_SupportedEnctypes,
- /*
- * The below MUST come last. Otherwise we leave a hole in nfsd_files[]
- * with !CONFIG_NFSD_V4 and simple_fill_super() goes oops
- */
-#ifdef CONFIG_NFSD_V4
+ NFSD_Filecache,
NFSD_Leasetime,
NFSD_Gracetime,
NFSD_RecoveryDir,
NFSD_V4EndGrace,
-#endif
NFSD_MaxReserved
};
@@ -70,11 +67,12 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size);
static ssize_t write_versions(struct file *file, char *buf, size_t size);
static ssize_t write_ports(struct file *file, char *buf, size_t size);
static ssize_t write_maxblksize(struct file *file, char *buf, size_t size);
-static ssize_t write_maxconn(struct file *file, char *buf, size_t size);
#ifdef CONFIG_NFSD_V4
static ssize_t write_leasetime(struct file *file, char *buf, size_t size);
static ssize_t write_gracetime(struct file *file, char *buf, size_t size);
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
static ssize_t write_recoverydir(struct file *file, char *buf, size_t size);
+#endif
static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size);
#endif
@@ -87,11 +85,12 @@ static ssize_t (*const write_op[])(struct file *, char *, size_t) = {
[NFSD_Versions] = write_versions,
[NFSD_Ports] = write_ports,
[NFSD_MaxBlkSize] = write_maxblksize,
- [NFSD_MaxConnections] = write_maxconn,
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = write_leasetime,
[NFSD_Gracetime] = write_gracetime,
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
[NFSD_RecoveryDir] = write_recoverydir,
+#endif
[NFSD_V4EndGrace] = write_v4_end_grace,
#endif
};
@@ -109,12 +108,12 @@ static ssize_t nfsctl_transaction_write(struct file *file, const char __user *bu
if (IS_ERR(data))
return PTR_ERR(data);
- rv = write_op[ino](file, data, size);
- if (rv >= 0) {
- simple_transaction_set(file, rv);
- rv = size;
- }
- return rv;
+ rv = write_op[ino](file, data, size);
+ if (rv < 0)
+ return rv;
+
+ simple_transaction_set(file, rv);
+ return size;
}
static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
@@ -153,18 +152,6 @@ static int exports_net_open(struct net *net, struct file *file)
return 0;
}
-static int exports_proc_open(struct inode *inode, struct file *file)
-{
- return exports_net_open(current->nsproxy->net_ns, file);
-}
-
-static const struct proc_ops exports_proc_ops = {
- .proc_open = exports_proc_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_release = seq_release,
-};
-
static int exports_nfsd_open(struct inode *inode, struct file *file)
{
return exports_net_open(inode->i_sb->s_fs_info, file);
@@ -183,51 +170,25 @@ static int export_features_show(struct seq_file *m, void *v)
return 0;
}
-static int export_features_open(struct inode *inode, struct file *file)
-{
- return single_open(file, export_features_show, NULL);
-}
+DEFINE_SHOW_ATTRIBUTE(export_features);
-static const struct file_operations export_features_operations = {
- .open = export_features_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
-static int supported_enctypes_show(struct seq_file *m, void *v)
+static int nfsd_pool_stats_open(struct inode *inode, struct file *file)
{
- seq_printf(m, KRB5_SUPPORTED_ENCTYPES);
- return 0;
-}
+ struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
-static int supported_enctypes_open(struct inode *inode, struct file *file)
-{
- return single_open(file, supported_enctypes_show, NULL);
+ return svc_pool_stats_open(&nn->nfsd_info, file);
}
-static const struct file_operations supported_enctypes_ops = {
- .open = supported_enctypes_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
-
static const struct file_operations pool_stats_operations = {
.open = nfsd_pool_stats_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = nfsd_pool_stats_release,
+ .release = seq_release,
};
-static const struct file_operations reply_cache_stats_operations = {
- .open = nfsd_reply_cache_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(nfsd_reply_cache_stats);
+
+DEFINE_SHOW_ATTRIBUTE(nfsd_file_cache_stats);
/*----------------------------------------------------------------------------*/
/*
@@ -275,6 +236,7 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
if (rpc_pton(net, fo_path, size, sap, salen) == 0)
return -EINVAL;
+ trace_nfsd_ctl_unlock_ip(net, buf);
return nlmsvc_unlock_all_by_ip(sap);
}
@@ -308,7 +270,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size)
fo_path = buf;
if (qword_get(&buf, fo_path, size) < 0)
return -EINVAL;
-
+ trace_nfsd_ctl_unlock_fs(netns(file), fo_path);
error = kern_path(fo_path, 0, &path);
if (error)
return error;
@@ -323,6 +285,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size)
* 3. Is that directory the root of an exported file system?
*/
error = nlmsvc_unlock_all_by_sb(path.dentry->d_sb);
+ nfsd4_revoke_states(netns(file), path.dentry->d_sb);
path_put(&path);
return error;
@@ -369,7 +332,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
len = qword_get(&mesg, dname, size);
if (len <= 0)
return -EINVAL;
-
+
path = dname+len+1;
len = qword_get(&mesg, path, size);
if (len <= 0)
@@ -383,15 +346,17 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
return -EINVAL;
maxsize = min(maxsize, NFS3_FHSIZE);
- if (qword_get(&mesg, mesg, size)>0)
+ if (qword_get(&mesg, mesg, size) > 0)
return -EINVAL;
+ trace_nfsd_ctl_filehandle(netns(file), dname, path, maxsize);
+
/* we have all the words, they are in buf.. */
dom = unix_domain_find(dname);
if (!dom)
return -ENOMEM;
- len = exp_rootfh(netns(file), dom, path, &fh, maxsize);
+ len = exp_rootfh(netns(file), dom, path, &fh, maxsize);
auth_domain_put(dom);
if (len)
return len;
@@ -444,7 +409,10 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
return rv;
if (newthreads < 0)
return -EINVAL;
- rv = nfsd_svc(newthreads, net, file->f_cred);
+ trace_nfsd_ctl_threads(net, newthreads);
+ mutex_lock(&nfsd_mutex);
+ rv = nfsd_svc(1, &newthreads, net, file->f_cred, NULL);
+ mutex_unlock(&nfsd_mutex);
if (rv < 0)
return rv;
} else
@@ -463,8 +431,8 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
* OR
*
* Input:
- * buf: C string containing whitespace-
- * separated unsigned integer values
+ * buf: C string containing whitespace-
+ * separated unsigned integer values
* representing the number of NFSD
* threads to start in each pool
* size: non-zero length of C string in @buf
@@ -516,7 +484,16 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size)
rv = -EINVAL;
if (nthreads[i] < 0)
goto out_free;
+ trace_nfsd_ctl_pool_threads(net, i, nthreads[i]);
}
+
+ /*
+ * There must always be a thread in pool 0; the admin
+ * can't shut down NFS completely using pool_threads.
+ */
+ if (nthreads[0] == 0)
+ nthreads[0] = 1;
+
rv = nfsd_set_nrthreads(i, nthreads, net);
if (rv)
goto out_free;
@@ -571,7 +548,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
char *sep;
struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id);
- if (size>0) {
+ if (size > 0) {
if (nn->nfsd_serv)
/* Cannot change versions without updating
* nn->nfsd_serv->sv_xdrsize, and reallocing
@@ -581,6 +558,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
if (buf[size-1] != '\n')
return -EINVAL;
buf[size-1] = 0;
+ trace_nfsd_ctl_version(netns(file), buf);
vers = mesg;
len = qword_get(&mesg, vers, size);
@@ -602,7 +580,9 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET;
switch(num) {
+#ifdef CONFIG_NFSD_V2
case 2:
+#endif
case 3:
nfsd_vers(nn, num, cmd);
break;
@@ -622,7 +602,9 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
}
break;
default:
- return -EINVAL;
+ /* Ignore requests to disable non-existent versions */
+ if (cmd == NFSD_SET)
+ return -EINVAL;
}
vers += len + 1;
} while ((len = qword_get(&mesg, vers, size)) > 0);
@@ -633,7 +615,6 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
}
/* Now write current state into reply buffer */
- len = 0;
sep = "";
remaining = SIMPLE_TRANSACTION_LIMIT;
for (num=2 ; num <= 4 ; num++) {
@@ -679,11 +660,11 @@ out:
* OR
*
* Input:
- * buf: C string containing whitespace-
- * separated positive or negative
- * integer values representing NFS
- * protocol versions to enable ("+n")
- * or disable ("-n")
+ * buf: C string containing whitespace-
+ * separated positive or negative
+ * integer values representing NFS
+ * protocol versions to enable ("+n")
+ * or disable ("-n")
* size: non-zero length of C string in @buf
* Output:
* On success: status of zero or more protocol versions has
@@ -727,33 +708,28 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
char *mesg = buf;
int fd, err;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv;
err = get_int(&mesg, &fd);
if (err != 0 || fd < 0)
return -EINVAL;
-
- if (svc_alien_sock(net, fd)) {
- printk(KERN_ERR "%s: socket net is different to NFSd's one\n", __func__);
- return -EINVAL;
- }
+ trace_nfsd_ctl_ports_addfd(net, fd);
err = nfsd_create_serv(net);
if (err != 0)
return err;
- err = svc_addsock(nn->nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
- if (err < 0) {
- nfsd_destroy(net);
- return err;
- }
+ serv = nn->nfsd_serv;
+ err = svc_addsock(serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
+
+ if (!serv->sv_nrthreads && list_empty(&nn->nfsd_serv->sv_permsocks))
+ nfsd_destroy_serv(net);
- /* Decrease the count, but don't shut down the service */
- nn->nfsd_serv->sv_nrthreads--;
return err;
}
/*
- * A transport listener is added by writing it's transport name and
+ * A transport listener is added by writing its transport name and
* a port number.
*/
static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cred *cred)
@@ -762,41 +738,41 @@ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cr
struct svc_xprt *xprt;
int port, err;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv;
if (sscanf(buf, "%15s %5u", transport, &port) != 2)
return -EINVAL;
if (port < 1 || port > USHRT_MAX)
return -EINVAL;
+ trace_nfsd_ctl_ports_addxprt(net, transport, port);
err = nfsd_create_serv(net);
if (err != 0)
return err;
- err = svc_create_xprt(nn->nfsd_serv, transport, net,
- PF_INET, port, SVC_SOCK_ANONYMOUS, cred);
+ serv = nn->nfsd_serv;
+ err = svc_xprt_create(serv, transport, net,
+ PF_INET, port, SVC_SOCK_ANONYMOUS, cred);
if (err < 0)
goto out_err;
- err = svc_create_xprt(nn->nfsd_serv, transport, net,
- PF_INET6, port, SVC_SOCK_ANONYMOUS, cred);
+ err = svc_xprt_create(serv, transport, net,
+ PF_INET6, port, SVC_SOCK_ANONYMOUS, cred);
if (err < 0 && err != -EAFNOSUPPORT)
goto out_close;
- /* Decrease the count, but don't shut down the service */
- nn->nfsd_serv->sv_nrthreads--;
return 0;
out_close:
- xprt = svc_find_xprt(nn->nfsd_serv, transport, net, PF_INET, port);
+ xprt = svc_find_xprt(serv, transport, net, PF_INET, port);
if (xprt != NULL) {
- svc_close_xprt(xprt);
+ svc_xprt_close(xprt);
svc_xprt_put(xprt);
}
out_err:
- if (!list_empty(&nn->nfsd_serv->sv_permsocks))
- nn->nfsd_serv->sv_nrthreads--;
- else
- nfsd_destroy(net);
+ if (!serv->sv_nrthreads && list_empty(&nn->nfsd_serv->sv_permsocks))
+ nfsd_destroy_serv(net);
+
return err;
}
@@ -881,9 +857,9 @@ int nfsd_max_blksize;
* OR
*
* Input:
- * buf: C string containing an unsigned
- * integer value representing the new
- * NFS blksize
+ * buf: C string containing an unsigned
+ * integer value representing the new
+ * NFS blksize
* size: non-zero length of C string in @buf
* Output:
* On success: passed-in buffer filled with '\n'-terminated C string
@@ -902,6 +878,8 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
int rv = get_int(&mesg, &bsize);
if (rv)
return rv;
+ trace_nfsd_ctl_maxblksize(netns(file), bsize);
+
/* force bsize into allowed range and
* required alignment.
*/
@@ -921,47 +899,11 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
nfsd_max_blksize);
}
-/*
- * write_maxconn - Set or report the current max number of connections
- *
- * Input:
- * buf: ignored
- * size: zero
- * OR
- *
- * Input:
- * buf: C string containing an unsigned
- * integer value representing the new
- * number of max connections
- * size: non-zero length of C string in @buf
- * Output:
- * On success: passed-in buffer filled with '\n'-terminated C string
- * containing numeric value of max_connections setting
- * for this net namespace;
- * return code is the size in bytes of the string
- * On error: return code is zero or a negative errno value
- */
-static ssize_t write_maxconn(struct file *file, char *buf, size_t size)
-{
- char *mesg = buf;
- struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id);
- unsigned int maxconn = nn->max_connections;
-
- if (size > 0) {
- int rv = get_uint(&mesg, &maxconn);
-
- if (rv)
- return rv;
- nn->max_connections = maxconn;
- }
-
- return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%u\n", maxconn);
-}
-
#ifdef CONFIG_NFSD_V4
static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size,
time64_t *time, struct nfsd_net *nn)
{
+ struct dentry *dentry = file_dentry(file);
char *mesg = buf;
int rv, i;
@@ -971,6 +913,9 @@ static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size,
rv = get_int(&mesg, &i);
if (rv)
return rv;
+ trace_nfsd_ctl_time(netns(file), dentry->d_name.name,
+ dentry->d_name.len, i);
+
/*
* Some sanity checking. We don't have a reason for
* these particular numbers, but problems with the
@@ -1045,6 +990,7 @@ static ssize_t write_gracetime(struct file *file, char *buf, size_t size)
return nfsd4_write_time(file, buf, size, &nn->nfsd4_grace, nn);
}
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size,
struct nfsd_net *nn)
{
@@ -1063,6 +1009,7 @@ static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size,
len = qword_get(&mesg, recdir, size);
if (len <= 0)
return -EINVAL;
+ trace_nfsd_ctl_recoverydir(netns(file), recdir);
status = nfs4_reset_recoverydir(recdir);
if (status)
@@ -1104,6 +1051,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
mutex_unlock(&nfsd_mutex);
return rv;
}
+#endif
/*
* write_v4_end_grace - release grace period for nfsd's v4.x lock manager
@@ -1114,7 +1062,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
* OR
*
* Input:
- * buf: any value
+ * buf: any value
* size: non-zero length of C string in @buf
* Output:
* passed-in buffer filled with "Y" or "N" with a newline
@@ -1136,6 +1084,7 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
case '1':
if (!nn->nfsd_serv)
return -EBUSY;
+ trace_nfsd_end_grace(netns(file));
nfsd4_end_grace(nn);
break;
default:
@@ -1154,154 +1103,138 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
* populating the filesystem.
*/
-/* Basically copying rpc_get_inode. */
static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
{
struct inode *inode = new_inode(sb);
- if (!inode)
- return NULL;
- /* Following advice from simple_fill_super documentation: */
- inode->i_ino = iunique(sb, NFSD_MaxReserved);
- inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
- switch (mode & S_IFMT) {
- case S_IFDIR:
- inode->i_fop = &simple_dir_operations;
- inode->i_op = &simple_dir_inode_operations;
- inc_nlink(inode);
- break;
- default:
- break;
+ if (inode) {
+ /* Following advice from simple_fill_super documentation: */
+ inode->i_ino = iunique(sb, NFSD_MaxReserved);
+ inode->i_mode = mode;
+ simple_inode_init_ts(inode);
}
return inode;
}
-static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, struct nfsdfs_client *ncl)
+static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *ncl, char *name)
{
+ struct inode *dir = parent->d_inode;
+ struct dentry *dentry;
struct inode *inode;
- inode = nfsd_get_inode(dir->i_sb, mode);
+ inode = nfsd_get_inode(parent->d_sb, S_IFDIR | 0600);
if (!inode)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+
+ dentry = simple_start_creating(parent, name);
+ if (IS_ERR(dentry)) {
+ iput(inode);
+ return dentry;
+ }
+ inode->i_fop = &simple_dir_operations;
+ inode->i_op = &simple_dir_inode_operations;
+ inc_nlink(inode);
if (ncl) {
inode->i_private = ncl;
kref_get(&ncl->cl_ref);
}
- d_add(dentry, inode);
+ d_make_persistent(dentry, inode);
inc_nlink(dir);
fsnotify_mkdir(dir, dentry);
- return 0;
+ simple_done_creating(dentry);
+ return dentry; // borrowed
}
-static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *ncl, char *name)
+#if IS_ENABLED(CONFIG_SUNRPC_GSS)
+/*
+ * @content is assumed to be a NUL-terminated string that lives
+ * longer than the symlink itself.
+ */
+static void _nfsd_symlink(struct dentry *parent, const char *name,
+ const char *content)
{
struct inode *dir = parent->d_inode;
+ struct inode *inode;
struct dentry *dentry;
- int ret = -ENOMEM;
- inode_lock(dir);
- dentry = d_alloc_name(parent, name);
- if (!dentry)
- goto out_err;
- ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600, ncl);
- if (ret)
- goto out_err;
-out:
- inode_unlock(dir);
- return dentry;
-out_err:
- dput(dentry);
- dentry = ERR_PTR(ret);
- goto out;
+ inode = nfsd_get_inode(dir->i_sb, S_IFLNK | 0777);
+ if (!inode)
+ return;
+
+ dentry = simple_start_creating(parent, name);
+ if (IS_ERR(dentry)) {
+ iput(inode);
+ return;
+ }
+
+ inode->i_op = &simple_symlink_inode_operations;
+ inode->i_link = (char *)content;
+ inode->i_size = strlen(content);
+
+ d_make_persistent(dentry, inode);
+ fsnotify_create(dir, dentry);
+ simple_done_creating(dentry);
+}
+#else
+static inline void _nfsd_symlink(struct dentry *parent, const char *name,
+ const char *content)
+{
}
-static void clear_ncl(struct inode *inode)
+#endif
+
+static void clear_ncl(struct dentry *dentry)
{
+ struct inode *inode = d_inode(dentry);
struct nfsdfs_client *ncl = inode->i_private;
+ spin_lock(&inode->i_lock);
inode->i_private = NULL;
+ spin_unlock(&inode->i_lock);
kref_put(&ncl->cl_ref, ncl->cl_release);
}
-static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode)
-{
- struct nfsdfs_client *nc = inode->i_private;
-
- if (nc)
- kref_get(&nc->cl_ref);
- return nc;
-}
-
struct nfsdfs_client *get_nfsdfs_client(struct inode *inode)
{
struct nfsdfs_client *nc;
- inode_lock_shared(inode);
- nc = __get_nfsdfs_client(inode);
- inode_unlock_shared(inode);
+ spin_lock(&inode->i_lock);
+ nc = inode->i_private;
+ if (nc)
+ kref_get(&nc->cl_ref);
+ spin_unlock(&inode->i_lock);
return nc;
}
-/* from __rpc_unlink */
-static void nfsdfs_remove_file(struct inode *dir, struct dentry *dentry)
-{
- int ret;
-
- clear_ncl(d_inode(dentry));
- dget(dentry);
- ret = simple_unlink(dir, dentry);
- d_delete(dentry);
- dput(dentry);
- WARN_ON_ONCE(ret);
-}
-
-static void nfsdfs_remove_files(struct dentry *root)
-{
- struct dentry *dentry, *tmp;
-
- list_for_each_entry_safe(dentry, tmp, &root->d_subdirs, d_child) {
- if (!simple_positive(dentry)) {
- WARN_ON_ONCE(1); /* I think this can't happen? */
- continue;
- }
- nfsdfs_remove_file(d_inode(root), dentry);
- }
-}
/* XXX: cut'n'paste from simple_fill_super; figure out if we could share
* code instead. */
-static int nfsdfs_create_files(struct dentry *root,
+static int nfsdfs_create_files(struct dentry *root,
const struct tree_descr *files,
+ struct nfsdfs_client *ncl,
struct dentry **fdentries)
{
struct inode *dir = d_inode(root);
- struct inode *inode;
struct dentry *dentry;
- int i;
- inode_lock(dir);
- for (i = 0; files->name && files->name[0]; i++, files++) {
- dentry = d_alloc_name(root, files->name);
- if (!dentry)
- goto out;
- inode = nfsd_get_inode(d_inode(root)->i_sb,
- S_IFREG | files->mode);
- if (!inode) {
- dput(dentry);
- goto out;
+ for (int i = 0; files->name && files->name[0]; i++, files++) {
+ struct inode *inode = nfsd_get_inode(root->d_sb,
+ S_IFREG | files->mode);
+ if (!inode)
+ return -ENOMEM;
+ dentry = simple_start_creating(root, files->name);
+ if (IS_ERR(dentry)) {
+ iput(inode);
+ return PTR_ERR(dentry);
}
+ kref_get(&ncl->cl_ref);
inode->i_fop = files->ops;
- inode->i_private = __get_nfsdfs_client(dir);
- d_add(dentry, inode);
+ inode->i_private = ncl;
+ d_make_persistent(dentry, inode);
fsnotify_create(dir, dentry);
if (fdentries)
- fdentries[i] = dentry;
+ fdentries[i] = dentry; // borrowed
+ simple_done_creating(dentry);
}
- inode_unlock(dir);
return 0;
-out:
- nfsdfs_remove_files(root);
- inode_unlock(dir);
- return -ENOMEM;
}
/* on success, returns positive number unique to that client. */
@@ -1319,7 +1252,7 @@ struct dentry *nfsd_client_mkdir(struct nfsd_net *nn,
dentry = nfsd_mkdir(nn->nfsd_client_dir, ncl, name);
if (IS_ERR(dentry)) /* XXX: tossing errors? */
return NULL;
- ret = nfsdfs_create_files(dentry, files, fdentries);
+ ret = nfsdfs_create_files(dentry, files, ncl, fdentries);
if (ret) {
nfsd_client_rmdir(dentry);
return NULL;
@@ -1330,20 +1263,7 @@ struct dentry *nfsd_client_mkdir(struct nfsd_net *nn,
/* Taken from __rpc_rmdir: */
void nfsd_client_rmdir(struct dentry *dentry)
{
- struct inode *dir = d_inode(dentry->d_parent);
- struct inode *inode = d_inode(dentry);
- int ret;
-
- inode_lock(dir);
- nfsdfs_remove_files(dentry);
- clear_ncl(inode);
- dget(dentry);
- ret = simple_rmdir(dir, dentry);
- WARN_ON_ONCE(ret);
- fsnotify_rmdir(dir, dentry);
- d_delete(dentry);
- dput(dentry);
- inode_unlock(dir);
+ simple_recursive_removal(dentry, clear_ncl);
}
static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
@@ -1358,7 +1278,7 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
/* Per-export io stats use same ops as exports file */
[NFSD_Export_Stats] = {"export_stats", &exports_nfsd_operations, S_IRUGO},
[NFSD_Export_features] = {"export_features",
- &export_features_operations, S_IRUGO},
+ &export_features_fops, S_IRUGO},
[NFSD_FO_UnlockIP] = {"unlock_ip",
&transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_FO_UnlockFS] = {"unlock_filesystem",
@@ -1367,18 +1287,18 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
[NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Pool_Threads] = {"pool_threads", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Pool_Stats] = {"pool_stats", &pool_stats_operations, S_IRUGO},
- [NFSD_Reply_Cache_Stats] = {"reply_cache_stats", &reply_cache_stats_operations, S_IRUGO},
+ [NFSD_Reply_Cache_Stats] = {"reply_cache_stats",
+ &nfsd_reply_cache_stats_fops, S_IRUGO},
[NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
[NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
- [NFSD_MaxConnections] = {"max_connections", &transaction_ops, S_IWUSR|S_IRUGO},
-#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
- [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO},
-#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
+ [NFSD_Filecache] = {"filecache", &nfsd_file_cache_stats_fops, S_IRUGO},
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
+#endif
[NFSD_V4EndGrace] = {"v4_end_grace", &transaction_ops, S_IWUSR|S_IRUGO},
#endif
/* last one */ {""}
@@ -1387,6 +1307,8 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
ret = simple_fill_super(sb, 0x6e667364, nfsd_files);
if (ret)
return ret;
+ _nfsd_symlink(sb->s_root, "supported_krb5_enctypes",
+ "/proc/net/rpc/gss_krb5_enctypes");
dentry = nfsd_mkdir(sb->s_root, NULL, "clients");
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -1424,7 +1346,7 @@ static void nfsd_umount(struct super_block *sb)
nfsd_shutdown_threads(net);
- kill_litter_super(sb);
+ kill_anon_super(sb);
put_net(net);
}
@@ -1437,6 +1359,19 @@ static struct file_system_type nfsd_fs_type = {
MODULE_ALIAS_FS("nfsd");
#ifdef CONFIG_PROC_FS
+
+static int exports_proc_open(struct inode *inode, struct file *file)
+{
+ return exports_net_open(current->nsproxy->net_ns, file);
+}
+
+static const struct proc_ops exports_proc_ops = {
+ .proc_open = exports_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
+};
+
static int create_proc_exports_entry(void)
{
struct proc_dir_entry *entry;
@@ -1460,10 +1395,755 @@ static int create_proc_exports_entry(void)
unsigned int nfsd_net_id;
-static __net_init int nfsd_init_net(struct net *net)
+static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct nfsd_genl_rqstp *genl_rqstp)
+{
+ void *hdr;
+ u32 i;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &nfsd_nl_family, 0, NFSD_CMD_RPC_STATUS_GET);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put_be32(skb, NFSD_A_RPC_STATUS_XID, genl_rqstp->rq_xid) ||
+ nla_put_u32(skb, NFSD_A_RPC_STATUS_FLAGS, genl_rqstp->rq_flags) ||
+ nla_put_u32(skb, NFSD_A_RPC_STATUS_PROG, genl_rqstp->rq_prog) ||
+ nla_put_u32(skb, NFSD_A_RPC_STATUS_PROC, genl_rqstp->rq_proc) ||
+ nla_put_u8(skb, NFSD_A_RPC_STATUS_VERSION, genl_rqstp->rq_vers) ||
+ nla_put_s64(skb, NFSD_A_RPC_STATUS_SERVICE_TIME,
+ ktime_to_us(genl_rqstp->rq_stime),
+ NFSD_A_RPC_STATUS_PAD))
+ return -ENOBUFS;
+
+ switch (genl_rqstp->rq_saddr.sa_family) {
+ case AF_INET: {
+ const struct sockaddr_in *s_in, *d_in;
+
+ s_in = (const struct sockaddr_in *)&genl_rqstp->rq_saddr;
+ d_in = (const struct sockaddr_in *)&genl_rqstp->rq_daddr;
+ if (nla_put_in_addr(skb, NFSD_A_RPC_STATUS_SADDR4,
+ s_in->sin_addr.s_addr) ||
+ nla_put_in_addr(skb, NFSD_A_RPC_STATUS_DADDR4,
+ d_in->sin_addr.s_addr) ||
+ nla_put_be16(skb, NFSD_A_RPC_STATUS_SPORT,
+ s_in->sin_port) ||
+ nla_put_be16(skb, NFSD_A_RPC_STATUS_DPORT,
+ d_in->sin_port))
+ return -ENOBUFS;
+ break;
+ }
+ case AF_INET6: {
+ const struct sockaddr_in6 *s_in, *d_in;
+
+ s_in = (const struct sockaddr_in6 *)&genl_rqstp->rq_saddr;
+ d_in = (const struct sockaddr_in6 *)&genl_rqstp->rq_daddr;
+ if (nla_put_in6_addr(skb, NFSD_A_RPC_STATUS_SADDR6,
+ &s_in->sin6_addr) ||
+ nla_put_in6_addr(skb, NFSD_A_RPC_STATUS_DADDR6,
+ &d_in->sin6_addr) ||
+ nla_put_be16(skb, NFSD_A_RPC_STATUS_SPORT,
+ s_in->sin6_port) ||
+ nla_put_be16(skb, NFSD_A_RPC_STATUS_DPORT,
+ d_in->sin6_port))
+ return -ENOBUFS;
+ break;
+ }
+ }
+
+ for (i = 0; i < genl_rqstp->rq_opcnt; i++)
+ if (nla_put_u32(skb, NFSD_A_RPC_STATUS_COMPOUND_OPS,
+ genl_rqstp->rq_opnum[i]))
+ return -ENOBUFS;
+
+ genlmsg_end(skb, hdr);
+ return 0;
+}
+
+/**
+ * nfsd_nl_rpc_status_get_dumpit - Handle rpc_status_get dumpit
+ * @skb: reply buffer
+ * @cb: netlink metadata and command arguments
+ *
+ * Returns the size of the reply or a negative errno.
+ */
+int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ int i, ret, rqstp_index = 0;
+ struct nfsd_net *nn;
+
+ mutex_lock(&nfsd_mutex);
+
+ nn = net_generic(sock_net(skb->sk), nfsd_net_id);
+ if (!nn->nfsd_serv) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ rcu_read_lock();
+
+ for (i = 0; i < nn->nfsd_serv->sv_nrpools; i++) {
+ struct svc_rqst *rqstp;
+
+ if (i < cb->args[0]) /* already consumed */
+ continue;
+
+ rqstp_index = 0;
+ list_for_each_entry_rcu(rqstp,
+ &nn->nfsd_serv->sv_pools[i].sp_all_threads,
+ rq_all) {
+ struct nfsd_genl_rqstp genl_rqstp;
+ unsigned int status_counter;
+
+ if (rqstp_index++ < cb->args[1]) /* already consumed */
+ continue;
+ /*
+ * Acquire rq_status_counter before parsing the rqst
+ * fields. rq_status_counter is set to an odd value in
+ * order to notify the consumers the rqstp fields are
+ * meaningful.
+ */
+ status_counter =
+ smp_load_acquire(&rqstp->rq_status_counter);
+ if (!(status_counter & 1))
+ continue;
+
+ genl_rqstp.rq_xid = rqstp->rq_xid;
+ genl_rqstp.rq_flags = rqstp->rq_flags;
+ genl_rqstp.rq_vers = rqstp->rq_vers;
+ genl_rqstp.rq_prog = rqstp->rq_prog;
+ genl_rqstp.rq_proc = rqstp->rq_proc;
+ genl_rqstp.rq_stime = rqstp->rq_stime;
+ genl_rqstp.rq_opcnt = 0;
+ memcpy(&genl_rqstp.rq_daddr, svc_daddr(rqstp),
+ sizeof(struct sockaddr));
+ memcpy(&genl_rqstp.rq_saddr, svc_addr(rqstp),
+ sizeof(struct sockaddr));
+
+#ifdef CONFIG_NFSD_V4
+ if (rqstp->rq_vers == NFS4_VERSION &&
+ rqstp->rq_proc == NFSPROC4_COMPOUND) {
+ /* NFSv4 compound */
+ struct nfsd4_compoundargs *args;
+ int j;
+
+ args = rqstp->rq_argp;
+ genl_rqstp.rq_opcnt = min_t(u32, args->opcnt,
+ ARRAY_SIZE(genl_rqstp.rq_opnum));
+ for (j = 0; j < genl_rqstp.rq_opcnt; j++)
+ genl_rqstp.rq_opnum[j] =
+ args->ops[j].opnum;
+ }
+#endif /* CONFIG_NFSD_V4 */
+
+ /*
+ * Acquire rq_status_counter before reporting the rqst
+ * fields to the user.
+ */
+ if (smp_load_acquire(&rqstp->rq_status_counter) !=
+ status_counter)
+ continue;
+
+ ret = nfsd_genl_rpc_status_compose_msg(skb, cb,
+ &genl_rqstp);
+ if (ret)
+ goto out;
+ }
+ }
+
+ cb->args[0] = i;
+ cb->args[1] = rqstp_index;
+ ret = skb->len;
+out:
+ rcu_read_unlock();
+out_unlock:
+ mutex_unlock(&nfsd_mutex);
+
+ return ret;
+}
+
+/**
+ * nfsd_nl_threads_set_doit - set the number of running threads
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ int *nthreads, nrpools = 0, i, ret = -EOPNOTSUPP, rem;
+ struct net *net = genl_info_net(info);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ const struct nlattr *attr;
+ const char *scope = NULL;
+
+ if (GENL_REQ_ATTR_CHECK(info, NFSD_A_SERVER_THREADS))
+ return -EINVAL;
+
+ /* count number of SERVER_THREADS values */
+ nlmsg_for_each_attr_type(attr, NFSD_A_SERVER_THREADS, info->nlhdr,
+ GENL_HDRLEN, rem)
+ nrpools++;
+
+ mutex_lock(&nfsd_mutex);
+
+ nthreads = kcalloc(nrpools, sizeof(int), GFP_KERNEL);
+ if (!nthreads) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ i = 0;
+ nlmsg_for_each_attr_type(attr, NFSD_A_SERVER_THREADS, info->nlhdr,
+ GENL_HDRLEN, rem) {
+ nthreads[i++] = nla_get_u32(attr);
+ if (i >= nrpools)
+ break;
+ }
+
+ if (info->attrs[NFSD_A_SERVER_GRACETIME] ||
+ info->attrs[NFSD_A_SERVER_LEASETIME] ||
+ info->attrs[NFSD_A_SERVER_SCOPE]) {
+ ret = -EBUSY;
+ if (nn->nfsd_serv && nn->nfsd_serv->sv_nrthreads)
+ goto out_unlock;
+
+ ret = -EINVAL;
+ attr = info->attrs[NFSD_A_SERVER_GRACETIME];
+ if (attr) {
+ u32 gracetime = nla_get_u32(attr);
+
+ if (gracetime < 10 || gracetime > 3600)
+ goto out_unlock;
+
+ nn->nfsd4_grace = gracetime;
+ }
+
+ attr = info->attrs[NFSD_A_SERVER_LEASETIME];
+ if (attr) {
+ u32 leasetime = nla_get_u32(attr);
+
+ if (leasetime < 10 || leasetime > 3600)
+ goto out_unlock;
+
+ nn->nfsd4_lease = leasetime;
+ }
+
+ attr = info->attrs[NFSD_A_SERVER_SCOPE];
+ if (attr)
+ scope = nla_data(attr);
+ }
+
+ ret = nfsd_svc(nrpools, nthreads, net, get_current_cred(), scope);
+ if (ret > 0)
+ ret = 0;
+out_unlock:
+ mutex_unlock(&nfsd_mutex);
+ kfree(nthreads);
+ return ret;
+}
+
+/**
+ * nfsd_nl_threads_get_doit - get the number of running threads
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ void *hdr;
+ int err;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(skb, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_free_msg;
+ }
+
+ mutex_lock(&nfsd_mutex);
+
+ err = nla_put_u32(skb, NFSD_A_SERVER_GRACETIME,
+ nn->nfsd4_grace) ||
+ nla_put_u32(skb, NFSD_A_SERVER_LEASETIME,
+ nn->nfsd4_lease) ||
+ nla_put_string(skb, NFSD_A_SERVER_SCOPE,
+ nn->nfsd_name);
+ if (err)
+ goto err_unlock;
+
+ if (nn->nfsd_serv) {
+ int i;
+
+ for (i = 0; i < nfsd_nrpools(net); ++i) {
+ struct svc_pool *sp = &nn->nfsd_serv->sv_pools[i];
+
+ err = nla_put_u32(skb, NFSD_A_SERVER_THREADS,
+ sp->sp_nrthreads);
+ if (err)
+ goto err_unlock;
+ }
+ } else {
+ err = nla_put_u32(skb, NFSD_A_SERVER_THREADS, 0);
+ if (err)
+ goto err_unlock;
+ }
+
+ mutex_unlock(&nfsd_mutex);
+
+ genlmsg_end(skb, hdr);
+
+ return genlmsg_reply(skb, info);
+
+err_unlock:
+ mutex_unlock(&nfsd_mutex);
+err_free_msg:
+ nlmsg_free(skb);
+
+ return err;
+}
+
+/**
+ * nfsd_nl_version_set_doit - set the nfs enabled versions
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_version_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ const struct nlattr *attr;
+ struct nfsd_net *nn;
+ int i, rem;
+
+ if (GENL_REQ_ATTR_CHECK(info, NFSD_A_SERVER_PROTO_VERSION))
+ return -EINVAL;
+
+ mutex_lock(&nfsd_mutex);
+
+ nn = net_generic(genl_info_net(info), nfsd_net_id);
+ if (nn->nfsd_serv) {
+ mutex_unlock(&nfsd_mutex);
+ return -EBUSY;
+ }
+
+ /* clear current supported versions. */
+ nfsd_vers(nn, 2, NFSD_CLEAR);
+ nfsd_vers(nn, 3, NFSD_CLEAR);
+ for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++)
+ nfsd_minorversion(nn, i, NFSD_CLEAR);
+
+ nlmsg_for_each_attr_type(attr, NFSD_A_SERVER_PROTO_VERSION, info->nlhdr,
+ GENL_HDRLEN, rem) {
+ struct nlattr *tb[NFSD_A_VERSION_MAX + 1];
+ u32 major, minor = 0;
+ bool enabled;
+
+ if (nla_parse_nested(tb, NFSD_A_VERSION_MAX, attr,
+ nfsd_version_nl_policy, info->extack) < 0)
+ continue;
+
+ if (!tb[NFSD_A_VERSION_MAJOR])
+ continue;
+
+ major = nla_get_u32(tb[NFSD_A_VERSION_MAJOR]);
+ if (tb[NFSD_A_VERSION_MINOR])
+ minor = nla_get_u32(tb[NFSD_A_VERSION_MINOR]);
+
+ enabled = nla_get_flag(tb[NFSD_A_VERSION_ENABLED]);
+
+ switch (major) {
+ case 4:
+ nfsd_minorversion(nn, minor, enabled ? NFSD_SET : NFSD_CLEAR);
+ break;
+ case 3:
+ case 2:
+ if (!minor)
+ nfsd_vers(nn, major, enabled ? NFSD_SET : NFSD_CLEAR);
+ break;
+ default:
+ break;
+ }
+ }
+
+ mutex_unlock(&nfsd_mutex);
+
+ return 0;
+}
+
+/**
+ * nfsd_nl_version_get_doit - get the enabled status for all supported nfs versions
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_version_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nfsd_net *nn;
+ int i, err;
+ void *hdr;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(skb, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_free_msg;
+ }
+
+ mutex_lock(&nfsd_mutex);
+ nn = net_generic(genl_info_net(info), nfsd_net_id);
+
+ for (i = 2; i <= 4; i++) {
+ int j;
+
+ for (j = 0; j <= NFSD_SUPPORTED_MINOR_VERSION; j++) {
+ struct nlattr *attr;
+
+ /* Don't record any versions the kernel doesn't have
+ * compiled in
+ */
+ if (!nfsd_support_version(i))
+ continue;
+
+ /* NFSv{2,3} does not support minor numbers */
+ if (i < 4 && j)
+ continue;
+
+ attr = nla_nest_start(skb,
+ NFSD_A_SERVER_PROTO_VERSION);
+ if (!attr) {
+ err = -EINVAL;
+ goto err_nfsd_unlock;
+ }
+
+ if (nla_put_u32(skb, NFSD_A_VERSION_MAJOR, i) ||
+ nla_put_u32(skb, NFSD_A_VERSION_MINOR, j)) {
+ err = -EINVAL;
+ goto err_nfsd_unlock;
+ }
+
+ /* Set the enabled flag if the version is enabled */
+ if (nfsd_vers(nn, i, NFSD_TEST) &&
+ (i < 4 || nfsd_minorversion(nn, j, NFSD_TEST)) &&
+ nla_put_flag(skb, NFSD_A_VERSION_ENABLED)) {
+ err = -EINVAL;
+ goto err_nfsd_unlock;
+ }
+
+ nla_nest_end(skb, attr);
+ }
+ }
+
+ mutex_unlock(&nfsd_mutex);
+ genlmsg_end(skb, hdr);
+
+ return genlmsg_reply(skb, info);
+
+err_nfsd_unlock:
+ mutex_unlock(&nfsd_mutex);
+err_free_msg:
+ nlmsg_free(skb);
+
+ return err;
+}
+
+/**
+ * nfsd_nl_listener_set_doit - set the nfs running sockets
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct svc_xprt *xprt, *tmp;
+ const struct nlattr *attr;
+ struct svc_serv *serv;
+ LIST_HEAD(permsocks);
+ struct nfsd_net *nn;
+ bool delete = false;
+ int err, rem;
+
+ mutex_lock(&nfsd_mutex);
+
+ err = nfsd_create_serv(net);
+ if (err) {
+ mutex_unlock(&nfsd_mutex);
+ return err;
+ }
+
+ nn = net_generic(net, nfsd_net_id);
+ serv = nn->nfsd_serv;
+
+ spin_lock_bh(&serv->sv_lock);
+
+ /* Move all of the old listener sockets to a temp list */
+ list_splice_init(&serv->sv_permsocks, &permsocks);
+
+ /*
+ * Walk the list of server_socks from userland and move any that match
+ * back to sv_permsocks
+ */
+ nlmsg_for_each_attr_type(attr, NFSD_A_SERVER_SOCK_ADDR, info->nlhdr,
+ GENL_HDRLEN, rem) {
+ struct nlattr *tb[NFSD_A_SOCK_MAX + 1];
+ const char *xcl_name;
+ struct sockaddr *sa;
+
+ if (nla_parse_nested(tb, NFSD_A_SOCK_MAX, attr,
+ nfsd_sock_nl_policy, info->extack) < 0)
+ continue;
+
+ if (!tb[NFSD_A_SOCK_ADDR] || !tb[NFSD_A_SOCK_TRANSPORT_NAME])
+ continue;
+
+ if (nla_len(tb[NFSD_A_SOCK_ADDR]) < sizeof(*sa))
+ continue;
+
+ xcl_name = nla_data(tb[NFSD_A_SOCK_TRANSPORT_NAME]);
+ sa = nla_data(tb[NFSD_A_SOCK_ADDR]);
+
+ /* Put back any matching sockets */
+ list_for_each_entry_safe(xprt, tmp, &permsocks, xpt_list) {
+ /* This shouldn't be possible */
+ if (WARN_ON_ONCE(xprt->xpt_net != net)) {
+ list_move(&xprt->xpt_list, &serv->sv_permsocks);
+ continue;
+ }
+
+ /* If everything matches, put it back */
+ if (!strcmp(xprt->xpt_class->xcl_name, xcl_name) &&
+ rpc_cmp_addr_port(sa, (struct sockaddr *)&xprt->xpt_local)) {
+ list_move(&xprt->xpt_list, &serv->sv_permsocks);
+ break;
+ }
+ }
+ }
+
+ /*
+ * If there are listener transports remaining on the permsocks list,
+ * it means we were asked to remove a listener.
+ */
+ if (!list_empty(&permsocks)) {
+ list_splice_init(&permsocks, &serv->sv_permsocks);
+ delete = true;
+ }
+ spin_unlock_bh(&serv->sv_lock);
+
+ /* Do not remove listeners while there are active threads. */
+ if (serv->sv_nrthreads) {
+ err = -EBUSY;
+ goto out_unlock_mtx;
+ }
+
+ /*
+ * Since we can't delete an arbitrary llist entry, destroy the
+ * remaining listeners and recreate the list.
+ */
+ if (delete)
+ svc_xprt_destroy_all(serv, net, false);
+
+ /* walk list of addrs again, open any that still don't exist */
+ nlmsg_for_each_attr_type(attr, NFSD_A_SERVER_SOCK_ADDR, info->nlhdr,
+ GENL_HDRLEN, rem) {
+ struct nlattr *tb[NFSD_A_SOCK_MAX + 1];
+ const char *xcl_name;
+ struct sockaddr *sa;
+ int ret;
+
+ if (nla_parse_nested(tb, NFSD_A_SOCK_MAX, attr,
+ nfsd_sock_nl_policy, info->extack) < 0)
+ continue;
+
+ if (!tb[NFSD_A_SOCK_ADDR] || !tb[NFSD_A_SOCK_TRANSPORT_NAME])
+ continue;
+
+ if (nla_len(tb[NFSD_A_SOCK_ADDR]) < sizeof(*sa))
+ continue;
+
+ xcl_name = nla_data(tb[NFSD_A_SOCK_TRANSPORT_NAME]);
+ sa = nla_data(tb[NFSD_A_SOCK_ADDR]);
+
+ xprt = svc_find_listener(serv, xcl_name, net, sa);
+ if (xprt) {
+ if (delete)
+ WARN_ONCE(1, "Transport type=%s already exists\n",
+ xcl_name);
+ svc_xprt_put(xprt);
+ continue;
+ }
+
+ ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa, 0,
+ get_current_cred());
+ /* always save the latest error */
+ if (ret < 0)
+ err = ret;
+ }
+
+ if (!serv->sv_nrthreads && list_empty(&nn->nfsd_serv->sv_permsocks))
+ nfsd_destroy_serv(net);
+
+out_unlock_mtx:
+ mutex_unlock(&nfsd_mutex);
+
+ return err;
+}
+
+/**
+ * nfsd_nl_listener_get_doit - get the nfs running listeners
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_listener_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct svc_xprt *xprt;
+ struct svc_serv *serv;
+ struct nfsd_net *nn;
+ void *hdr;
+ int err;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(skb, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_free_msg;
+ }
+
+ mutex_lock(&nfsd_mutex);
+ nn = net_generic(genl_info_net(info), nfsd_net_id);
+
+ /* no nfs server? Just send empty socket list */
+ if (!nn->nfsd_serv)
+ goto out_unlock_mtx;
+
+ serv = nn->nfsd_serv;
+ spin_lock_bh(&serv->sv_lock);
+ list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
+ struct nlattr *attr;
+
+ attr = nla_nest_start(skb, NFSD_A_SERVER_SOCK_ADDR);
+ if (!attr) {
+ err = -EINVAL;
+ goto err_serv_unlock;
+ }
+
+ if (nla_put_string(skb, NFSD_A_SOCK_TRANSPORT_NAME,
+ xprt->xpt_class->xcl_name) ||
+ nla_put(skb, NFSD_A_SOCK_ADDR,
+ sizeof(struct sockaddr_storage),
+ &xprt->xpt_local)) {
+ err = -EINVAL;
+ goto err_serv_unlock;
+ }
+
+ nla_nest_end(skb, attr);
+ }
+ spin_unlock_bh(&serv->sv_lock);
+out_unlock_mtx:
+ mutex_unlock(&nfsd_mutex);
+ genlmsg_end(skb, hdr);
+
+ return genlmsg_reply(skb, info);
+
+err_serv_unlock:
+ spin_unlock_bh(&serv->sv_lock);
+ mutex_unlock(&nfsd_mutex);
+err_free_msg:
+ nlmsg_free(skb);
+
+ return err;
+}
+
+/**
+ * nfsd_nl_pool_mode_set_doit - set the number of running threads
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_pool_mode_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ const struct nlattr *attr;
+
+ if (GENL_REQ_ATTR_CHECK(info, NFSD_A_POOL_MODE_MODE))
+ return -EINVAL;
+
+ attr = info->attrs[NFSD_A_POOL_MODE_MODE];
+ return sunrpc_set_pool_mode(nla_data(attr));
+}
+
+/**
+ * nfsd_nl_pool_mode_get_doit - get info about pool_mode
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_pool_mode_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ char buf[16];
+ void *hdr;
+ int err;
+
+ if (sunrpc_get_pool_mode(buf, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
+ return -ERANGE;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = -EMSGSIZE;
+ hdr = genlmsg_iput(skb, info);
+ if (!hdr)
+ goto err_free_msg;
+
+ err = nla_put_string(skb, NFSD_A_POOL_MODE_MODE, buf) |
+ nla_put_u32(skb, NFSD_A_POOL_MODE_NPOOLS, nfsd_nrpools(net));
+ if (err)
+ goto err_free_msg;
+
+ genlmsg_end(skb, hdr);
+ return genlmsg_reply(skb, info);
+
+err_free_msg:
+ nlmsg_free(skb);
+ return err;
+}
+
+/**
+ * nfsd_net_init - Prepare the nfsd_net portion of a new net namespace
+ * @net: a freshly-created network namespace
+ *
+ * This information stays around as long as the network namespace is
+ * alive whether or not there is an NFSD instance running in the
+ * namespace.
+ *
+ * Returns zero on success, or a negative errno otherwise.
+ */
+static __net_init int nfsd_net_init(struct net *net)
{
- int retval;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ int retval;
+ int i;
retval = nfsd_export_init(net);
if (retval)
@@ -1471,27 +2151,36 @@ static __net_init int nfsd_init_net(struct net *net)
retval = nfsd_idmap_init(net);
if (retval)
goto out_idmap_error;
- nn->nfsd_versions = NULL;
- nn->nfsd4_minorversions = NULL;
- retval = nfsd_reply_cache_init(nn);
+ retval = percpu_counter_init_many(nn->counter, 0, GFP_KERNEL,
+ NFSD_STATS_COUNTERS_NUM);
if (retval)
- goto out_drc_error;
- nn->nfsd4_lease = 90; /* default lease time */
- nn->nfsd4_grace = 90;
- nn->somebody_reclaimed = false;
- nn->track_reclaim_completes = false;
- nn->clverifier_counter = prandom_u32();
- nn->clientid_base = prandom_u32();
- nn->clientid_counter = nn->clientid_base + 1;
- nn->s2s_cp_cl_id = nn->clientid_counter++;
-
- atomic_set(&nn->ntf_refcnt, 0);
- init_waitqueue_head(&nn->ntf_wq);
- seqlock_init(&nn->boot_lock);
+ goto out_repcache_error;
+ memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
+ nn->nfsd_svcstats.program = &nfsd_programs[0];
+ if (!nfsd_proc_stat_init(net)) {
+ retval = -ENOMEM;
+ goto out_proc_error;
+ }
+
+ for (i = 0; i < sizeof(nn->nfsd_versions); i++)
+ nn->nfsd_versions[i] = nfsd_support_version(i);
+ for (i = 0; i < sizeof(nn->nfsd4_minorversions); i++)
+ nn->nfsd4_minorversions[i] = nfsd_support_version(4);
+ nn->nfsd_info.mutex = &nfsd_mutex;
+ nn->nfsd_serv = NULL;
+ nfsd4_init_leases_net(nn);
+ get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
+ seqlock_init(&nn->writeverf_lock);
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ spin_lock_init(&nn->local_clients_lock);
+ INIT_LIST_HEAD(&nn->local_clients);
+#endif
return 0;
-out_drc_error:
+out_proc_error:
+ percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM);
+out_repcache_error:
nfsd_idmap_shutdown(net);
out_idmap_error:
nfsd_export_shutdown(net);
@@ -1499,19 +2188,44 @@ out_export_error:
return retval;
}
-static __net_exit void nfsd_exit_net(struct net *net)
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+/**
+ * nfsd_net_pre_exit - Disconnect localio clients from net namespace
+ * @net: a network namespace that is about to be destroyed
+ *
+ * This invalidates ->net pointers held by localio clients
+ * while they can still safely access nn->counter.
+ */
+static __net_exit void nfsd_net_pre_exit(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- nfsd_reply_cache_shutdown(nn);
+ nfs_localio_invalidate_clients(&nn->local_clients,
+ &nn->local_clients_lock);
+}
+#endif
+
+/**
+ * nfsd_net_exit - Release the nfsd_net portion of a net namespace
+ * @net: a network namespace that is about to be destroyed
+ *
+ */
+static __net_exit void nfsd_net_exit(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfsd_proc_stat_shutdown(net);
+ percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM);
nfsd_idmap_shutdown(net);
nfsd_export_shutdown(net);
- nfsd_netns_free_versions(net_generic(net, nfsd_net_id));
}
static struct pernet_operations nfsd_net_ops = {
- .init = nfsd_init_net,
- .exit = nfsd_exit_net,
+ .init = nfsd_net_init,
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ .pre_exit = nfsd_net_pre_exit,
+#endif
+ .exit = nfsd_net_exit,
.id = &nfsd_net_id,
.size = sizeof(struct nfsd_net),
};
@@ -1519,68 +2233,79 @@ static struct pernet_operations nfsd_net_ops = {
static int __init init_nfsd(void)
{
int retval;
- printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
- retval = register_cld_notifier();
- if (retval)
- return retval;
+ nfsd_debugfs_init();
+
retval = nfsd4_init_slabs();
if (retval)
- goto out_unregister_notifier;
+ return retval;
retval = nfsd4_init_pnfs();
if (retval)
goto out_free_slabs;
- retval = nfsd_stat_init(); /* Statistics */
- if (retval)
- goto out_free_pnfs;
retval = nfsd_drc_slab_create();
if (retval)
- goto out_free_stat;
+ goto out_free_pnfs;
nfsd_lockd_init(); /* lockd->nfsd callbacks */
- retval = create_proc_exports_entry();
- if (retval)
+ retval = register_pernet_subsys(&nfsd_net_ops);
+ if (retval < 0)
goto out_free_lockd;
+ retval = register_cld_notifier();
+ if (retval)
+ goto out_free_subsys;
+ retval = nfsd4_create_laundry_wq();
+ if (retval)
+ goto out_free_cld;
retval = register_filesystem(&nfsd_fs_type);
if (retval)
- goto out_free_exports;
- retval = register_pernet_subsys(&nfsd_net_ops);
- if (retval < 0)
+ goto out_free_nfsd4;
+ retval = genl_register_family(&nfsd_nl_family);
+ if (retval)
+ goto out_free_filesystem;
+ retval = create_proc_exports_entry();
+ if (retval)
goto out_free_all;
+ nfsd_localio_ops_init();
+
return 0;
out_free_all:
+ genl_unregister_family(&nfsd_nl_family);
+out_free_filesystem:
unregister_filesystem(&nfsd_fs_type);
-out_free_exports:
- remove_proc_entry("fs/nfs/exports", NULL);
- remove_proc_entry("fs/nfs", NULL);
+out_free_nfsd4:
+ nfsd4_destroy_laundry_wq();
+out_free_cld:
+ unregister_cld_notifier();
+out_free_subsys:
+ unregister_pernet_subsys(&nfsd_net_ops);
out_free_lockd:
nfsd_lockd_shutdown();
nfsd_drc_slab_free();
-out_free_stat:
- nfsd_stat_shutdown();
out_free_pnfs:
nfsd4_exit_pnfs();
out_free_slabs:
nfsd4_free_slabs();
-out_unregister_notifier:
- unregister_cld_notifier();
+ nfsd_debugfs_exit();
return retval;
}
static void __exit exit_nfsd(void)
{
- unregister_pernet_subsys(&nfsd_net_ops);
- nfsd_drc_slab_free();
remove_proc_entry("fs/nfs/exports", NULL);
remove_proc_entry("fs/nfs", NULL);
- nfsd_stat_shutdown();
+ genl_unregister_family(&nfsd_nl_family);
+ unregister_filesystem(&nfsd_fs_type);
+ nfsd4_destroy_laundry_wq();
+ unregister_cld_notifier();
+ unregister_pernet_subsys(&nfsd_net_ops);
+ nfsd_drc_slab_free();
nfsd_lockd_shutdown();
nfsd4_free_slabs();
nfsd4_exit_pnfs();
- unregister_filesystem(&nfsd_fs_type);
- unregister_cld_notifier();
+ nfsd_debugfs_exit();
}
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+MODULE_DESCRIPTION("In-kernel NFS server");
MODULE_LICENSE("GPL");
module_init(init_nfsd)
module_exit(exit_nfsd)
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 498e5a489826..e4263326ca4a 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -23,9 +23,7 @@
#include <uapi/linux/nfsd/debug.h>
-#include "netns.h"
#include "export.h"
-#include "stats.h"
#undef ifdebug
#ifdef CONFIG_SUNRPC_DEBUG
@@ -37,39 +35,50 @@
/*
* nfsd version
*/
+#define NFSD_MINVERS 2
+#define NFSD_MAXVERS 4
#define NFSD_SUPPORTED_MINOR_VERSION 2
-/*
- * Maximum blocksizes supported by daemon under various circumstances.
- */
-#define NFSSVC_MAXBLKSIZE RPCSVC_MAXPAYLOAD
-/* NFSv2 is limited by the protocol specification, see RFC 1094 */
-#define NFSSVC_MAXBLKSIZE_V2 (8*1024)
+bool nfsd_support_version(int vers);
+#include "netns.h"
+#include "stats.h"
/*
- * Largest number of bytes we need to allocate for an NFS
- * call or reply. Used to control buffer sizes. We use
- * the length of v3 WRITE, READDIR and READDIR replies
- * which are an RPC header, up to 26 XDR units of reply
- * data, and some page data.
- *
- * Note that accuracy here doesn't matter too much as the
- * size is rounded up to a page size when allocating space.
+ * Default and maximum payload size (NFS READ or WRITE), in bytes.
+ * The default is historical, and the maximum is an implementation
+ * limit.
*/
-#define NFSD_BUFSIZE ((RPC_MAX_HEADER_WITH_AUTH+26)*XDR_UNIT + NFSSVC_MAXBLKSIZE)
+enum {
+ NFSSVC_DEFBLKSIZE = 1 * 1024 * 1024,
+ NFSSVC_MAXBLKSIZE = RPCSVC_MAXPAYLOAD,
+};
struct readdir_cd {
__be32 err; /* 0, nfserr, or nfserr_eof */
};
+/* Maximum number of operations per session compound */
+#define NFSD_MAX_OPS_PER_COMPOUND 200
+
+struct nfsd_genl_rqstp {
+ struct sockaddr rq_daddr;
+ struct sockaddr rq_saddr;
+ unsigned long rq_flags;
+ ktime_t rq_stime;
+ __be32 rq_xid;
+ u32 rq_vers;
+ u32 rq_prog;
+ u32 rq_proc;
+
+ /* NFSv4 compound */
+ u32 rq_opcnt;
+ u32 rq_opnum[16];
+};
-extern struct svc_program nfsd_program;
-extern const struct svc_version nfsd_version2, nfsd_version3,
- nfsd_version4;
+extern struct svc_program nfsd_programs[];
+extern const struct svc_version nfsd_version2, nfsd_version3, nfsd_version4;
extern struct mutex nfsd_mutex;
-extern spinlock_t nfsd_drc_lock;
-extern unsigned long nfsd_drc_max_mem;
-extern unsigned long nfsd_drc_mem_used;
+extern atomic_t nfsd_th_cnt; /* number of available threads */
extern const struct seq_operations nfs_exports_op;
@@ -86,20 +95,17 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp,
/*
* Function prototypes.
*/
-int nfsd_svc(int nrservs, struct net *net, const struct cred *cred);
-int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp);
+int nfsd_svc(int n, int *nservers, struct net *net,
+ const struct cred *cred, const char *scope);
+int nfsd_dispatch(struct svc_rqst *rqstp);
int nfsd_nrthreads(struct net *);
int nfsd_nrpools(struct net *);
int nfsd_get_nrthreads(int n, int *, struct net *);
int nfsd_set_nrthreads(int n, int *, struct net *);
-int nfsd_pool_stats_open(struct inode *, struct file *);
-int nfsd_pool_stats_release(struct inode *, struct file *);
void nfsd_shutdown_threads(struct net *net);
-void nfsd_destroy(struct net *net);
-
-bool i_am_nfsd(void);
+struct svc_rqst *nfsd_current_rqst(void);
struct nfsdfs_client {
struct kref cl_ref;
@@ -127,6 +133,10 @@ extern const struct svc_version nfsd_acl_version3;
#endif
#endif
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+extern const struct svc_version localio_version1;
+#endif
+
struct nfsd_net;
enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
@@ -134,12 +144,33 @@ int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change);
int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change);
void nfsd_reset_versions(struct nfsd_net *nn);
int nfsd_create_serv(struct net *net);
+void nfsd_destroy_serv(struct net *net);
+
+#ifdef CONFIG_DEBUG_FS
+void nfsd_debugfs_init(void);
+void nfsd_debugfs_exit(void);
+#else
+static inline void nfsd_debugfs_init(void) {}
+static inline void nfsd_debugfs_exit(void) {}
+#endif
+
+extern bool nfsd_disable_splice_read __read_mostly;
+
+enum {
+ /* Any new NFSD_IO enum value must be added at the end */
+ NFSD_IO_BUFFERED,
+ NFSD_IO_DONTCACHE,
+ NFSD_IO_DIRECT,
+};
+
+extern u64 nfsd_io_cache_read __read_mostly;
+extern u64 nfsd_io_cache_write __read_mostly;
extern int nfsd_max_blksize;
static inline int nfsd_v4client(struct svc_rqst *rq)
{
- return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
+ return rq && rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
}
static inline struct user_namespace *
nfsd_user_namespace(const struct svc_rqst *rqstp)
@@ -162,6 +193,9 @@ void nfs4_state_shutdown_net(struct net *net);
int nfs4_reset_recoverydir(char *recdir);
char * nfs4_recoverydir(void);
bool nfsd4_spo_must_allow(struct svc_rqst *rqstp);
+int nfsd4_create_laundry_wq(void);
+void nfsd4_destroy_laundry_wq(void);
+bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode);
#else
static inline int nfsd4_init_slabs(void) { return 0; }
static inline void nfsd4_free_slabs(void) { }
@@ -175,6 +209,13 @@ static inline bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
{
return false;
}
+static inline int nfsd4_create_laundry_wq(void) { return 0; };
+static inline void nfsd4_destroy_laundry_wq(void) {};
+static inline bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp,
+ struct inode *inode)
+{
+ return false;
+}
#endif
/*
@@ -204,7 +245,6 @@ void nfsd_lockd_shutdown(void);
#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC)
#define nfserr_rofs cpu_to_be32(NFSERR_ROFS)
#define nfserr_mlink cpu_to_be32(NFSERR_MLINK)
-#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP)
#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG)
#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY)
#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT)
@@ -249,9 +289,11 @@ void nfsd_lockd_shutdown(void);
#define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE)
#define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD)
#define nfserr_badname cpu_to_be32(NFSERR_BADNAME)
+#define nfserr_admin_revoked cpu_to_be32(NFS4ERR_ADMIN_REVOKED)
#define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN)
#define nfserr_locked cpu_to_be32(NFSERR_LOCKED)
#define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC)
+#define nfserr_delay cpu_to_be32(NFS4ERR_DELAY)
#define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE)
#define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT)
#define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST)
@@ -300,17 +342,30 @@ void nfsd_lockd_shutdown(void);
#define nfserr_xattr2big cpu_to_be32(NFS4ERR_XATTR2BIG)
#define nfserr_noxattr cpu_to_be32(NFS4ERR_NOXATTR)
-/* error codes for internal use */
-/* if a request fails due to kmalloc failure, it gets dropped.
- * Client should resend eventually
+/*
+ * Error codes for internal use. We use enum to choose numbers that are
+ * not already assigned, then covert to be32 resulting in a number that
+ * cannot conflict with any existing be32 nfserr value.
*/
-#define nfserr_dropit cpu_to_be32(30000)
+enum {
/* end-of-file indicator in readdir */
-#define nfserr_eof cpu_to_be32(30001)
+ NFSERR_EOF = NFS4ERR_FIRST_FREE,
+#define nfserr_eof cpu_to_be32(NFSERR_EOF)
+
/* replay detected */
-#define nfserr_replay_me cpu_to_be32(11001)
+ NFSERR_REPLAY_ME,
+#define nfserr_replay_me cpu_to_be32(NFSERR_REPLAY_ME)
+
/* nfs41 replay detected */
-#define nfserr_replay_cache cpu_to_be32(11002)
+ NFSERR_REPLAY_CACHE,
+#define nfserr_replay_cache cpu_to_be32(NFSERR_REPLAY_CACHE)
+
+/* symlink found where dir expected - handled differently to
+ * other symlink found errors by NFSv3.
+ */
+ NFSERR_SYMLINK_NOT_DIR,
+#define nfserr_symlink_not_dir cpu_to_be32(NFSERR_SYMLINK_NOT_DIR)
+};
/* Check for dir entries '.' and '..' */
#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.'))
@@ -336,16 +391,20 @@ void nfsd_lockd_shutdown(void);
#define COMPOUND_ERR_SLACK_SPACE 16 /* OP_SETATTR */
#define NFSD_LAUNDROMAT_MINTIMEOUT 1 /* seconds */
+#define NFSD_COURTESY_CLIENT_TIMEOUT (24 * 60 * 60) /* seconds */
+#define NFSD_CLIENT_MAX_TRIM_PER_RUN 128
+#define NFS4_CLIENTS_PER_GB 1024
+#define NFSD_DELEGRETURN_TIMEOUT (HZ / 34) /* 30ms */
+#define NFSD_CB_GETATTR_TIMEOUT NFSD_DELEGRETURN_TIMEOUT
/*
- * The following attributes are currently not supported by the NFSv4 server:
+ * The following attributes are not implemented by NFSD:
* ARCHIVE (deprecated anyway)
* HIDDEN (unlikely to be supported any time soon)
* MIMETYPE (unlikely to be supported any time soon)
* QUOTA_* (will be supported in a forthcoming patch)
* SYSTEM (unlikely to be supported any time soon)
* TIME_BACKUP (unlikely to be supported any time soon)
- * TIME_CREATE (unlikely to be supported any time soon)
*/
#define NFSD4_SUPPORTED_ATTRS_WORD0 \
(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \
@@ -364,7 +423,7 @@ void nfsd_lockd_shutdown(void);
| FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \
| FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \
| FATTR4_WORD1_SPACE_USED | FATTR4_WORD1_TIME_ACCESS | FATTR4_WORD1_TIME_ACCESS_SET \
- | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \
+ | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA | FATTR4_WORD1_TIME_CREATE \
| FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID)
#define NFSD4_SUPPORTED_ATTRS_WORD2 0
@@ -399,8 +458,12 @@ void nfsd_lockd_shutdown(void);
#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
(NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
FATTR4_WORD2_MODE_UMASK | \
+ FATTR4_WORD2_CLONE_BLKSIZE | \
NFSD4_2_SECURITY_ATTRS | \
- FATTR4_WORD2_XATTR_SUPPORT)
+ FATTR4_WORD2_XATTR_SUPPORT | \
+ FATTR4_WORD2_TIME_DELEG_ACCESS | \
+ FATTR4_WORD2_TIME_DELEG_MODIFY | \
+ FATTR4_WORD2_OPEN_ARGUMENTS)
extern const u32 nfsd_suppattrs[3][3];
@@ -460,7 +523,8 @@ static inline bool nfsd_attrs_supported(u32 minorversion, const u32 *bmval)
(FATTR4_WORD0_SIZE | FATTR4_WORD0_ACL)
#define NFSD_WRITEABLE_ATTRS_WORD1 \
(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \
- | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
+ | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_CREATE \
+ | FATTR4_WORD1_TIME_MODIFY_SET)
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#define MAYBE_FATTR4_WORD2_SECURITY_LABEL \
FATTR4_WORD2_SECURITY_LABEL
@@ -469,7 +533,10 @@ static inline bool nfsd_attrs_supported(u32 minorversion, const u32 *bmval)
#endif
#define NFSD_WRITEABLE_ATTRS_WORD2 \
(FATTR4_WORD2_MODE_UMASK \
- | MAYBE_FATTR4_WORD2_SECURITY_LABEL)
+ | MAYBE_FATTR4_WORD2_SECURITY_LABEL \
+ | FATTR4_WORD2_TIME_DELEG_ACCESS \
+ | FATTR4_WORD2_TIME_DELEG_MODIFY \
+ )
#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \
NFSD_WRITEABLE_ATTRS_WORD0
@@ -490,12 +557,16 @@ extern void unregister_cld_notifier(void);
extern void nfsd4_ssc_init_umount_work(struct nfsd_net *nn);
#endif
+extern void nfsd4_init_leases_net(struct nfsd_net *nn);
+
#else /* CONFIG_NFSD_V4 */
static inline int nfsd4_is_junction(struct dentry *dentry)
{
return 0;
}
+static inline void nfsd4_init_leases_net(struct nfsd_net *nn) { };
+
#define register_cld_notifier() 0
#define unregister_cld_notifier() do { } while(0)
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index f3779fa72c89..ed85dd43da18 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -40,7 +40,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
/* make sure parents give x permission to user */
int err;
parent = dget_parent(tdentry);
- err = inode_permission(&init_user_ns,
+ err = inode_permission(&nop_mnt_idmap,
d_inode(parent), MAY_EXEC);
if (err < 0) {
dput(parent);
@@ -62,8 +62,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
* the write call).
*/
static inline __be32
-nfsd_mode_check(struct svc_rqst *rqstp, struct dentry *dentry,
- umode_t requested)
+nfsd_mode_check(struct dentry *dentry, umode_t requested)
{
umode_t mode = d_inode(dentry)->i_mode & S_IFMT;
@@ -76,36 +75,36 @@ nfsd_mode_check(struct svc_rqst *rqstp, struct dentry *dentry,
}
return nfs_ok;
}
- /*
- * v4 has an error more specific than err_notdir which we should
- * return in preference to err_notdir:
- */
- if (rqstp->rq_vers == 4 && mode == S_IFLNK)
+ if (mode == S_IFLNK) {
+ if (requested == S_IFDIR)
+ return nfserr_symlink_not_dir;
return nfserr_symlink;
+ }
if (requested == S_IFDIR)
return nfserr_notdir;
if (mode == S_IFDIR)
return nfserr_isdir;
- return nfserr_inval;
+ return nfserr_wrong_type;
}
-static bool nfsd_originating_port_ok(struct svc_rqst *rqstp, int flags)
+static bool nfsd_originating_port_ok(struct svc_rqst *rqstp,
+ struct svc_cred *cred,
+ struct svc_export *exp)
{
- if (flags & NFSEXP_INSECURE_PORT)
+ if (nfsexp_flags(cred, exp) & NFSEXP_INSECURE_PORT)
return true;
/* We don't require gss requests to use low ports: */
- if (rqstp->rq_cred.cr_flavor >= RPC_AUTH_GSS)
+ if (cred->cr_flavor >= RPC_AUTH_GSS)
return true;
return test_bit(RQ_SECURE, &rqstp->rq_flags);
}
static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
+ struct svc_cred *cred,
struct svc_export *exp)
{
- int flags = nfsexp_flags(rqstp, exp);
-
/* Check if the request originated from a secure port. */
- if (!nfsd_originating_port_ok(rqstp, flags)) {
+ if (rqstp && !nfsd_originating_port_ok(rqstp, cred, exp)) {
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
dprintk("nfsd: request from insecure port %s!\n",
svc_print_addr(rqstp, buf, sizeof(buf)));
@@ -113,23 +112,15 @@ static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
}
/* Set user creds for this exportpoint */
- return nfserrno(nfsd_setuser(rqstp, exp));
+ return nfserrno(nfsd_setuser(cred, exp));
}
-static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
- struct dentry *dentry, struct svc_export *exp)
+static inline __be32 check_pseudo_root(struct dentry *dentry,
+ struct svc_export *exp)
{
if (!(exp->ex_flags & NFSEXP_V4ROOT))
return nfs_ok;
/*
- * v2/v3 clients have no need for the V4ROOT export--they use
- * the mount protocl instead; also, further V4ROOT checks may be
- * in v4-specific code, in which case v2/v3 clients could bypass
- * them.
- */
- if (!nfsd_v4client(rqstp))
- return nfserr_stale;
- /*
* We're exposing only the directories and symlinks that have to be
* traversed on the way to real exports:
*/
@@ -151,7 +142,11 @@ static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
* dentry. On success, the results are used to set fh_export and
* fh_dentry.
*/
-static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
+static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
+ struct svc_cred *cred,
+ struct auth_domain *client,
+ struct auth_domain *gssclient,
+ struct svc_fh *fhp)
{
struct knfsd_fh *fh = &fhp->fh_handle;
struct fid *fid = NULL;
@@ -162,10 +157,8 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
int len;
__be32 error;
- error = nfserr_stale;
- if (rqstp->rq_vers > 2)
- error = nfserr_badhandle;
- if (rqstp->rq_vers == 4 && fh->fh_size == 0)
+ error = nfserr_badhandle;
+ if (fh->fh_size == 0)
return nfserr_nofilehandle;
if (fh->fh_version != 1)
@@ -179,6 +172,8 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
if (len == 0)
return error;
if (fh->fh_fsid_type == FSID_MAJOR_MINOR) {
+ u32 *fsid = fh_fsid(fh);
+
/* deprecated, convert to type 3 */
len = key_len(FSID_ENCODE_DEV)/4;
fh->fh_fsid_type = FSID_ENCODE_DEV;
@@ -188,15 +183,17 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
* confuses sparse, so we must use __force here to
* keep it from complaining.
*/
- fh->fh_fsid[0] = new_encode_dev(MKDEV(ntohl((__force __be32)fh->fh_fsid[0]),
- ntohl((__force __be32)fh->fh_fsid[1])));
- fh->fh_fsid[1] = fh->fh_fsid[2];
+ fsid[0] = new_encode_dev(MKDEV(ntohl((__force __be32)fsid[0]),
+ ntohl((__force __be32)fsid[1])));
+ fsid[1] = fsid[2];
}
data_left -= len;
if (data_left < 0)
return error;
- exp = rqst_exp_find(rqstp, fh->fh_fsid_type, fh->fh_fsid);
- fid = (struct fid *)(fh->fh_fsid + len);
+ exp = rqst_exp_find(rqstp ? &rqstp->rq_chandle : NULL,
+ net, client, gssclient,
+ fh->fh_fsid_type, fh_fsid(fh));
+ fid = (struct fid *)(fh_fsid(fh) + len);
error = nfserr_stale;
if (IS_ERR(exp)) {
@@ -227,9 +224,8 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
cap_raise_nfsd_set(new->cap_effective,
new->cap_permitted);
put_cred(override_creds(new));
- put_cred(new);
} else {
- error = nfsd_setuser_and_check_port(rqstp, exp);
+ error = nfsd_setuser_and_check_port(rqstp, cred, exp);
if (error)
goto out;
}
@@ -237,9 +233,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
/*
* Look up the dentry using the NFS file handle.
*/
- error = nfserr_stale;
- if (rqstp->rq_vers > 2)
- error = nfserr_badhandle;
+ error = nfserr_badhandle;
fileid_type = fh->fh_fileid_type;
@@ -247,7 +241,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
dentry = dget(exp->ex_path.dentry);
else {
dentry = exportfs_decode_fh_raw(exp->ex_path.mnt, fid,
- data_left, fileid_type,
+ data_left, fileid_type, 0,
nfsd_acceptable, exp);
if (IS_ERR_OR_NULL(dentry)) {
trace_nfsd_set_fh_dentry_badhandle(rqstp, fhp,
@@ -275,22 +269,30 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
dentry);
}
- fhp->fh_dentry = dentry;
- fhp->fh_export = exp;
-
- switch (rqstp->rq_vers) {
- case 4:
+ switch (fhp->fh_maxsize) {
+ case NFS4_FHSIZE:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOATOMIC_ATTR)
fhp->fh_no_atomic_attr = true;
+ fhp->fh_64bit_cookies = true;
break;
- case 3:
+ case NFS3_FHSIZE:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOWCC)
fhp->fh_no_wcc = true;
+ fhp->fh_64bit_cookies = true;
+ if (exp->ex_flags & NFSEXP_V4ROOT)
+ goto out;
break;
- case 2:
+ case NFS_FHSIZE:
fhp->fh_no_wcc = true;
+ if (EX_WGATHER(exp))
+ fhp->fh_use_wgather = true;
+ if (exp->ex_flags & NFSEXP_V4ROOT)
+ goto out;
}
+ fhp->fh_dentry = dentry;
+ fhp->fh_export = exp;
+
return 0;
out:
exp_put(exp);
@@ -298,48 +300,42 @@ out:
}
/**
- * fh_verify - filehandle lookup and access checking
- * @rqstp: pointer to current rpc request
+ * __fh_verify - filehandle lookup and access checking
+ * @rqstp: RPC transaction context, or NULL
+ * @net: net namespace in which to perform the export lookup
+ * @cred: RPC user credential
+ * @client: RPC auth domain
+ * @gssclient: RPC GSS auth domain, or NULL
* @fhp: filehandle to be verified
* @type: expected type of object pointed to by filehandle
* @access: type of access needed to object
*
- * Look up a dentry from the on-the-wire filehandle, check the client's
- * access to the export, and set the current task's credentials.
- *
- * Regardless of success or failure of fh_verify(), fh_put() should be
- * called on @fhp when the caller is finished with the filehandle.
- *
- * fh_verify() may be called multiple times on a given filehandle, for
- * example, when processing an NFSv4 compound. The first call will look
- * up a dentry using the on-the-wire filehandle. Subsequent calls will
- * skip the lookup and just perform the other checks and possibly change
- * the current task's credentials.
- *
- * @type specifies the type of object expected using one of the S_IF*
- * constants defined in include/linux/stat.h. The caller may use zero
- * to indicate that it doesn't care, or a negative integer to indicate
- * that it expects something not of the given type.
- *
- * @access is formed from the NFSD_MAY_* constants defined in
- * fs/nfsd/vfs.h.
+ * See fh_verify() for further descriptions of @fhp, @type, and @access.
*/
-__be32
-fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+static __be32
+__fh_verify(struct svc_rqst *rqstp,
+ struct net *net, struct svc_cred *cred,
+ struct auth_domain *client,
+ struct auth_domain *gssclient,
+ struct svc_fh *fhp, umode_t type, int access)
{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_export *exp = NULL;
+ bool may_bypass_gss = false;
struct dentry *dentry;
__be32 error;
- dprintk("nfsd: fh_verify(%s)\n", SVCFH_fmt(fhp));
-
if (!fhp->fh_dentry) {
- error = nfsd_set_fh_dentry(rqstp, fhp);
+ error = nfsd_set_fh_dentry(rqstp, net, cred, client,
+ gssclient, fhp);
if (error)
goto out;
}
dentry = fhp->fh_dentry;
exp = fhp->fh_export;
+
+ trace_nfsd_fh_verify(rqstp, fhp, type, access);
+
/*
* We still have to do all these permission checks, even when
* fh_dentry is already set:
@@ -356,25 +352,44 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
* (for example, if different id-squashing options are in
* effect on the new filesystem).
*/
- error = check_pseudo_root(rqstp, dentry, exp);
+ error = check_pseudo_root(dentry, exp);
if (error)
goto out;
- error = nfsd_setuser_and_check_port(rqstp, exp);
+ error = nfsd_setuser_and_check_port(rqstp, cred, exp);
if (error)
goto out;
- error = nfsd_mode_check(rqstp, dentry, type);
+ error = nfsd_mode_check(dentry, type);
if (error)
goto out;
/*
- * pseudoflavor restrictions are not enforced on NLM,
- * which clients virtually always use auth_sys for,
- * even while using RPCSEC_GSS for NFS.
+ * If rqstp is NULL, this is a LOCALIO request which will only
+ * ever use a filehandle/credential pair for which access has
+ * been affirmed (by ACCESS or OPEN NFS requests) over the
+ * wire. Skip both the xprtsec policy and the security flavor
+ * checks.
*/
- if (access & NFSD_MAY_LOCK || access & NFSD_MAY_BYPASS_GSS)
- goto skip_pseudoflavor_check;
+ if (!rqstp)
+ goto check_permissions;
+
+ if ((access & NFSD_MAY_NLM) && (exp->ex_flags & NFSEXP_NOAUTHNLM))
+ /* NLM is allowed to fully bypass authentication */
+ goto out;
+
+ /*
+ * NLM is allowed to bypass the xprtsec policy check because lockd
+ * doesn't support xprtsec.
+ */
+ if (!(access & NFSD_MAY_NLM)) {
+ error = check_xprtsec_policy(exp, rqstp);
+ if (error)
+ goto out;
+ }
+
+ if (access & NFSD_MAY_BYPASS_GSS)
+ may_bypass_gss = true;
/*
* Clients may expect to be able to use auth_sys during mount,
* even if they use gss for everything else; see section 2.3.2
@@ -382,28 +397,81 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
*/
if (access & NFSD_MAY_BYPASS_GSS_ON_ROOT
&& exp->ex_path.dentry == dentry)
- goto skip_pseudoflavor_check;
+ may_bypass_gss = true;
- error = check_nfsd_access(exp, rqstp);
+ error = check_security_flavor(exp, rqstp, may_bypass_gss);
if (error)
goto out;
-skip_pseudoflavor_check:
- /* Finally, check access permissions. */
- error = nfsd_permission(rqstp, exp, dentry, access);
+ svc_xprt_set_valid(rqstp->rq_xprt);
- if (error) {
- dprintk("fh_verify: %pd2 permission failure, "
- "acc=%x, error=%d\n",
- dentry,
- access, ntohl(error));
- }
+check_permissions:
+ /* Finally, check access permissions. */
+ error = nfsd_permission(cred, exp, dentry, access);
out:
+ trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error);
if (error == nfserr_stale)
- nfsd_stats_fh_stale_inc(exp);
+ nfsd_stats_fh_stale_inc(nn, exp);
return error;
}
+/**
+ * fh_verify_local - filehandle lookup and access checking
+ * @net: net namespace in which to perform the export lookup
+ * @cred: RPC user credential
+ * @client: RPC auth domain
+ * @fhp: filehandle to be verified
+ * @type: expected type of object pointed to by filehandle
+ * @access: type of access needed to object
+ *
+ * This API can be used by callers who do not have an RPC
+ * transaction context (ie are not running in an nfsd thread).
+ *
+ * See fh_verify() for further descriptions of @fhp, @type, and @access.
+ */
+__be32
+fh_verify_local(struct net *net, struct svc_cred *cred,
+ struct auth_domain *client, struct svc_fh *fhp,
+ umode_t type, int access)
+{
+ return __fh_verify(NULL, net, cred, client, NULL,
+ fhp, type, access);
+}
+
+/**
+ * fh_verify - filehandle lookup and access checking
+ * @rqstp: pointer to current rpc request
+ * @fhp: filehandle to be verified
+ * @type: expected type of object pointed to by filehandle
+ * @access: type of access needed to object
+ *
+ * Look up a dentry from the on-the-wire filehandle, check the client's
+ * access to the export, and set the current task's credentials.
+ *
+ * Regardless of success or failure of fh_verify(), fh_put() should be
+ * called on @fhp when the caller is finished with the filehandle.
+ *
+ * fh_verify() may be called multiple times on a given filehandle, for
+ * example, when processing an NFSv4 compound. The first call will look
+ * up a dentry using the on-the-wire filehandle. Subsequent calls will
+ * skip the lookup and just perform the other checks and possibly change
+ * the current task's credentials.
+ *
+ * @type specifies the type of object expected using one of the S_IF*
+ * constants defined in include/linux/stat.h. The caller may use zero
+ * to indicate that it doesn't care, or a negative integer to indicate
+ * that it expects something not of the given type.
+ *
+ * @access is formed from the NFSD_MAY_* constants defined in
+ * fs/nfsd/vfs.h.
+ */
+__be32
+fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+{
+ return __fh_verify(rqstp, SVC_NET(rqstp), &rqstp->rq_cred,
+ rqstp->rq_client, rqstp->rq_gssclient,
+ fhp, type, access);
+}
/*
* Compose a file handle for an NFS reply.
@@ -417,12 +485,15 @@ static void _fh_update(struct svc_fh *fhp, struct svc_export *exp,
{
if (dentry != exp->ex_path.dentry) {
struct fid *fid = (struct fid *)
- (fhp->fh_handle.fh_fsid + fhp->fh_handle.fh_size/4 - 1);
+ (fh_fsid(&fhp->fh_handle) + fhp->fh_handle.fh_size/4 - 1);
int maxsize = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4;
- int subtreecheck = !(exp->ex_flags & NFSEXP_NOSUBTREECHECK);
+ int fh_flags = (exp->ex_flags & NFSEXP_NOSUBTREECHECK) ? 0 :
+ EXPORT_FH_CONNECTABLE;
+ int fileid_type =
+ exportfs_encode_fh(dentry, fid, &maxsize, fh_flags);
fhp->fh_handle.fh_fileid_type =
- exportfs_encode_fh(dentry, fid, &maxsize, subtreecheck);
+ fileid_type > 0 ? fileid_type : FILEID_INVALID;
fhp->fh_handle.fh_size += maxsize * 4;
} else {
fhp->fh_handle.fh_fileid_type = FILEID_ROOT;
@@ -548,7 +619,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
if (ref_fh == fhp)
fh_put(ref_fh);
- if (fhp->fh_locked || fhp->fh_dentry) {
+ if (fhp->fh_dentry) {
printk(KERN_ERR "fh_compose: fh %pd2 not initialized!\n",
dentry);
}
@@ -565,7 +636,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
fhp->fh_handle.fh_auth_type = 0;
mk_fsid(fhp->fh_handle.fh_fsid_type,
- fhp->fh_handle.fh_fsid,
+ fh_fsid(&fhp->fh_handle),
ex_dev,
d_inode(exp->ex_path.dentry)->i_ino,
exp->ex_fsid, exp->ex_uuid);
@@ -574,7 +645,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
_fh_update(fhp, exp, dentry);
if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID) {
fh_put(fhp);
- return nfserr_opnotsupp;
+ return nfserr_stale;
}
return 0;
@@ -600,7 +671,7 @@ fh_update(struct svc_fh *fhp)
_fh_update(fhp, fhp->fh_export, dentry);
if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID)
- return nfserr_opnotsupp;
+ return nfserr_stale;
return 0;
out_bad:
printk(KERN_ERR "fh_update: fh not verified!\n");
@@ -611,6 +682,111 @@ out_negative:
return nfserr_serverfault;
}
+/**
+ * fh_getattr - Retrieve attributes on a local file
+ * @fhp: File handle of target file
+ * @stat: Caller-supplied kstat buffer to be filled in
+ *
+ * Returns nfs_ok on success, otherwise an NFS status code is
+ * returned.
+ */
+__be32 fh_getattr(const struct svc_fh *fhp, struct kstat *stat)
+{
+ struct path p = {
+ .mnt = fhp->fh_export->ex_path.mnt,
+ .dentry = fhp->fh_dentry,
+ };
+ struct inode *inode = d_inode(p.dentry);
+ u32 request_mask = STATX_BASIC_STATS;
+
+ if (S_ISREG(inode->i_mode))
+ request_mask |= (STATX_DIOALIGN | STATX_DIO_READ_ALIGN);
+
+ if (fhp->fh_maxsize == NFS4_FHSIZE)
+ request_mask |= (STATX_BTIME | STATX_CHANGE_COOKIE);
+
+ return nfserrno(vfs_getattr(&p, stat, request_mask,
+ AT_STATX_SYNC_AS_STAT));
+}
+
+/**
+ * fh_fill_pre_attrs - Fill in pre-op attributes
+ * @fhp: file handle to be updated
+ *
+ */
+__be32 __must_check fh_fill_pre_attrs(struct svc_fh *fhp)
+{
+ bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
+ struct kstat stat;
+ __be32 err;
+
+ if (fhp->fh_no_wcc || fhp->fh_pre_saved)
+ return nfs_ok;
+
+ err = fh_getattr(fhp, &stat);
+ if (err)
+ return err;
+
+ if (v4)
+ fhp->fh_pre_change = nfsd4_change_attribute(&stat);
+
+ fhp->fh_pre_mtime = stat.mtime;
+ fhp->fh_pre_ctime = stat.ctime;
+ fhp->fh_pre_size = stat.size;
+ fhp->fh_pre_saved = true;
+ return nfs_ok;
+}
+
+/**
+ * fh_fill_post_attrs - Fill in post-op attributes
+ * @fhp: file handle to be updated
+ *
+ */
+__be32 fh_fill_post_attrs(struct svc_fh *fhp)
+{
+ bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
+ __be32 err;
+
+ if (fhp->fh_no_wcc)
+ return nfs_ok;
+
+ if (fhp->fh_post_saved)
+ printk("nfsd: inode locked twice during operation.\n");
+
+ err = fh_getattr(fhp, &fhp->fh_post_attr);
+ if (err)
+ return err;
+
+ fhp->fh_post_saved = true;
+ if (v4)
+ fhp->fh_post_change =
+ nfsd4_change_attribute(&fhp->fh_post_attr);
+ return nfs_ok;
+}
+
+/**
+ * fh_fill_both_attrs - Fill pre-op and post-op attributes
+ * @fhp: file handle to be updated
+ *
+ * This is used when the directory wasn't changed, but wcc attributes
+ * are needed anyway.
+ */
+__be32 __must_check fh_fill_both_attrs(struct svc_fh *fhp)
+{
+ __be32 err;
+
+ err = fh_fill_post_attrs(fhp);
+ if (err)
+ return err;
+
+ fhp->fh_pre_change = fhp->fh_post_change;
+ fhp->fh_pre_mtime = fhp->fh_post_attr.mtime;
+ fhp->fh_pre_ctime = fhp->fh_post_attr.ctime;
+ fhp->fh_pre_size = fhp->fh_post_attr.size;
+ fhp->fh_pre_saved = true;
+ return nfs_ok;
+}
+
/*
* Release a file handle.
*/
@@ -620,10 +796,9 @@ fh_put(struct svc_fh *fhp)
struct dentry * dentry = fhp->fh_dentry;
struct svc_export * exp = fhp->fh_export;
if (dentry) {
- fh_unlock(fhp);
fhp->fh_dentry = NULL;
dput(dentry);
- fh_clear_wcc(fhp);
+ fh_clear_pre_post_attrs(fhp);
}
fh_drop_write(fhp);
if (exp) {
@@ -642,7 +817,7 @@ char * SVCFH_fmt(struct svc_fh *fhp)
struct knfsd_fh *fh = &fhp->fh_handle;
static char buf[2+1+1+64*3+1];
- if (fh->fh_size < 0 || fh->fh_size> 64)
+ if (fh->fh_size > 64)
return "bad-fh";
sprintf(buf, "%d: %*ph", fh->fh_size, fh->fh_size, fh->fh_raw);
return buf;
@@ -675,3 +850,44 @@ enum fsid_source fsid_source(const struct svc_fh *fhp)
return FSIDSOURCE_UUID;
return FSIDSOURCE_DEV;
}
+
+/**
+ * nfsd4_change_attribute - Generate an NFSv4 change_attribute value
+ * @stat: inode attributes
+ *
+ * Caller must fill in @stat before calling, typically by invoking
+ * vfs_getattr() with STATX_MODE, STATX_CTIME, and STATX_CHANGE_COOKIE.
+ * Returns an unsigned 64-bit changeid4 value (RFC 8881 Section 3.2).
+ *
+ * We could use i_version alone as the change attribute. However, i_version
+ * can go backwards on a regular file after an unclean shutdown. On its own
+ * that doesn't necessarily cause a problem, but if i_version goes backwards
+ * and then is incremented again it could reuse a value that was previously
+ * used before boot, and a client who queried the two values might incorrectly
+ * assume nothing changed.
+ *
+ * By using both ctime and the i_version counter we guarantee that as long as
+ * time doesn't go backwards we never reuse an old value. If the filesystem
+ * advertises STATX_ATTR_CHANGE_MONOTONIC, then this mitigation is not
+ * needed.
+ *
+ * We only need to do this for regular files as well. For directories, we
+ * assume that the new change attr is always logged to stable storage in some
+ * fashion before the results can be seen.
+ */
+u64 nfsd4_change_attribute(const struct kstat *stat)
+{
+ u64 chattr;
+
+ if (stat->result_mask & STATX_CHANGE_COOKIE) {
+ chattr = stat->change_cookie;
+ if (S_ISREG(stat->mode) &&
+ !(stat->attributes & STATX_ATTR_CHANGE_MONOTONIC)) {
+ chattr += (u64)stat->ctime.tv_sec << 30;
+ chattr += stat->ctime.tv_nsec;
+ }
+ } else {
+ chattr = time_to_chattr(&stat->ctime);
+ }
+ return chattr;
+}
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index d11e4b6870d6..5ef7191f8ad8 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -14,6 +14,8 @@
#include <linux/exportfs.h>
#include <linux/nfs4.h>
+#include "export.h"
+
/*
* The file handle starts with a sequence of four-byte words.
* The first word contains a version number (1) and three descriptor bytes
@@ -49,18 +51,19 @@ struct knfsd_fh {
* Points to the current size while
* building a new file handle.
*/
- union {
- char fh_raw[NFS4_FHSIZE];
- struct {
- u8 fh_version; /* == 1 */
- u8 fh_auth_type; /* deprecated */
- u8 fh_fsid_type;
- u8 fh_fileid_type;
- u32 fh_fsid[]; /* flexible-array member */
- };
- };
+ u8 fh_raw[NFS4_FHSIZE];
};
+#define fh_version fh_raw[0]
+#define fh_auth_type fh_raw[1]
+#define fh_fsid_type fh_raw[2]
+#define fh_fileid_type fh_raw[3]
+
+static inline u32 *fh_fsid(const struct knfsd_fh *fh)
+{
+ return (u32 *)&fh->fh_raw[4];
+}
+
static inline __u32 ino_t_to_u32(ino_t ino)
{
return (__u32) ino;
@@ -81,7 +84,6 @@ typedef struct svc_fh {
struct dentry * fh_dentry; /* validated dentry */
struct svc_export * fh_export; /* export pointer */
- bool fh_locked; /* inode locked by us */
bool fh_want_write; /* remount protection taken */
bool fh_no_wcc; /* no wcc data needed */
bool fh_no_atomic_attr;
@@ -89,12 +91,13 @@ typedef struct svc_fh {
* wcc data is not atomic with
* operation
*/
+ bool fh_use_wgather; /* NFSv2 wgather option */
+ bool fh_64bit_cookies;/* readdir cookie size */
int fh_flags; /* FH flags */
-#ifdef CONFIG_NFSD_V3
bool fh_post_saved; /* post-op attrs saved */
bool fh_pre_saved; /* pre-op attrs saved */
- /* Pre-op attributes saved during fh_lock */
+ /* Pre-op attributes saved when inode is locked */
__u64 fh_pre_size; /* size before operation */
struct timespec64 fh_pre_mtime; /* mtime before oper */
struct timespec64 fh_pre_ctime; /* ctime before oper */
@@ -104,10 +107,9 @@ typedef struct svc_fh {
*/
u64 fh_pre_change;
- /* Post-op attributes saved in fh_unlock */
+ /* Post-op attributes saved in fh_fill_post_attrs() */
struct kstat fh_post_attr; /* full attrs after operation */
u64 fh_post_change; /* nfsv4 change; see above */
-#endif /* CONFIG_NFSD_V3 */
} svc_fh;
#define NFSD4_FH_FOREIGN (1<<0)
#define SET_FH_FLAG(c, f) ((c)->fh_flags |= (f))
@@ -218,21 +220,24 @@ extern char * SVCFH_fmt(struct svc_fh *fhp);
* Function prototypes
*/
__be32 fh_verify(struct svc_rqst *, struct svc_fh *, umode_t, int);
+__be32 fh_verify_local(struct net *, struct svc_cred *, struct auth_domain *,
+ struct svc_fh *, umode_t, int);
+__be32 fh_getattr(const struct svc_fh *fhp, struct kstat *stat);
__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
__be32 fh_update(struct svc_fh *);
void fh_put(struct svc_fh *);
static __inline__ struct svc_fh *
-fh_copy(struct svc_fh *dst, struct svc_fh *src)
+fh_copy(struct svc_fh *dst, const struct svc_fh *src)
{
- WARN_ON(src->fh_dentry || src->fh_locked);
-
+ WARN_ON(src->fh_dentry);
+
*dst = *src;
return dst;
}
static inline void
-fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src)
+fh_copy_shallow(struct knfsd_fh *dst, const struct knfsd_fh *src)
{
dst->fh_size = src->fh_size;
memcpy(&dst->fh_raw, &src->fh_raw, src->fh_size);
@@ -246,7 +251,8 @@ fh_init(struct svc_fh *fhp, int maxsize)
return fhp;
}
-static inline bool fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
+static inline bool fh_match(const struct knfsd_fh *fh1,
+ const struct knfsd_fh *fh2)
{
if (fh1->fh_size != fh2->fh_size)
return false;
@@ -255,127 +261,79 @@ static inline bool fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
return true;
}
-static inline bool fh_fsid_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
+static inline bool fh_fsid_match(const struct knfsd_fh *fh1,
+ const struct knfsd_fh *fh2)
{
+ u32 *fsid1 = fh_fsid(fh1);
+ u32 *fsid2 = fh_fsid(fh2);
+
if (fh1->fh_fsid_type != fh2->fh_fsid_type)
return false;
- if (memcmp(fh1->fh_fsid, fh2->fh_fsid, key_len(fh1->fh_fsid_type)) != 0)
+ if (memcmp(fsid1, fsid2, key_len(fh1->fh_fsid_type)) != 0)
return false;
return true;
}
-#ifdef CONFIG_CRC32
/**
- * knfsd_fh_hash - calculate the crc32 hash for the filehandle
- * @fh - pointer to filehandle
+ * fh_want_write - Get write access to an export
+ * @fhp: File handle of file to be written
*
- * returns a crc32 hash for the filehandle that is compatible with
- * the one displayed by "wireshark".
- */
-static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh)
-{
- return ~crc32_le(0xFFFFFFFF, fh->fh_raw, fh->fh_size);
-}
-#else
-static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh)
-{
- return 0;
-}
-#endif
-
-#ifdef CONFIG_NFSD_V3
-/*
- * The wcc data stored in current_fh should be cleared
- * between compound ops.
- */
-static inline void
-fh_clear_wcc(struct svc_fh *fhp)
-{
- fhp->fh_post_saved = false;
- fhp->fh_pre_saved = false;
-}
-
-/*
- * We could use i_version alone as the change attribute. However,
- * i_version can go backwards after a reboot. On its own that doesn't
- * necessarily cause a problem, but if i_version goes backwards and then
- * is incremented again it could reuse a value that was previously used
- * before boot, and a client who queried the two values might
- * incorrectly assume nothing changed.
+ * Caller must invoke fh_drop_write() when its write operation
+ * is complete.
*
- * By using both ctime and the i_version counter we guarantee that as
- * long as time doesn't go backwards we never reuse an old value.
+ * Returns 0 if the file handle's export can be written to. Otherwise
+ * the export is not prepared for updates, and the returned negative
+ * errno value reflects the reason for the failure.
*/
-static inline u64 nfsd4_change_attribute(struct kstat *stat,
- struct inode *inode)
+static inline int fh_want_write(struct svc_fh *fhp)
{
- if (inode->i_sb->s_export_op->fetch_iversion)
- return inode->i_sb->s_export_op->fetch_iversion(inode);
- else if (IS_I_VERSION(inode)) {
- u64 chattr;
-
- chattr = stat->ctime.tv_sec;
- chattr <<= 30;
- chattr += stat->ctime.tv_nsec;
- chattr += inode_query_iversion(inode);
- return chattr;
- } else
- return time_to_chattr(&stat->ctime);
+ int ret;
+
+ if (fhp->fh_want_write)
+ return 0;
+ ret = mnt_want_write(fhp->fh_export->ex_path.mnt);
+ if (!ret)
+ fhp->fh_want_write = true;
+ return ret;
}
-extern void fill_pre_wcc(struct svc_fh *fhp);
-extern void fill_post_wcc(struct svc_fh *fhp);
-#else
-#define fh_clear_wcc(ignored)
-#define fill_pre_wcc(ignored)
-#define fill_post_wcc(notused)
-#endif /* CONFIG_NFSD_V3 */
-
-
-/*
- * Lock a file handle/inode
- * NOTE: both fh_lock and fh_unlock are done "by hand" in
- * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once
- * so, any changes here should be reflected there.
+/**
+ * fh_drop_write - Release write access on an export
+ * @fhp: File handle of file on which fh_want_write() was previously called
*/
-
-static inline void
-fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
+static inline void fh_drop_write(struct svc_fh *fhp)
{
- struct dentry *dentry = fhp->fh_dentry;
- struct inode *inode;
-
- BUG_ON(!dentry);
-
- if (fhp->fh_locked) {
- printk(KERN_WARNING "fh_lock: %pd2 already locked!\n",
- dentry);
- return;
+ if (fhp->fh_want_write) {
+ fhp->fh_want_write = false;
+ mnt_drop_write(fhp->fh_export->ex_path.mnt);
}
-
- inode = d_inode(dentry);
- inode_lock_nested(inode, subclass);
- fill_pre_wcc(fhp);
- fhp->fh_locked = true;
}
-static inline void
-fh_lock(struct svc_fh *fhp)
+/**
+ * knfsd_fh_hash - calculate the crc32 hash for the filehandle
+ * @fh - pointer to filehandle
+ *
+ * returns a crc32 hash for the filehandle that is compatible with
+ * the one displayed by "wireshark".
+ */
+static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh)
{
- fh_lock_nested(fhp, I_MUTEX_NORMAL);
+ return ~crc32_le(0xFFFFFFFF, fh->fh_raw, fh->fh_size);
}
-/*
- * Unlock a file handle/inode
+/**
+ * fh_clear_pre_post_attrs - Reset pre/post attributes
+ * @fhp: file handle to be updated
+ *
*/
-static inline void
-fh_unlock(struct svc_fh *fhp)
+static inline void fh_clear_pre_post_attrs(struct svc_fh *fhp)
{
- if (fhp->fh_locked) {
- fill_post_wcc(fhp);
- inode_unlock(d_inode(fhp->fh_dentry));
- fhp->fh_locked = false;
- }
+ fhp->fh_post_saved = false;
+ fhp->fh_pre_saved = false;
}
+u64 nfsd4_change_attribute(const struct kstat *stat);
+__be32 __must_check fh_fill_pre_attrs(struct svc_fh *fhp);
+__be32 fh_fill_post_attrs(struct svc_fh *fhp);
+__be32 __must_check fh_fill_both_attrs(struct svc_fh *fhp);
#endif /* _LINUX_NFSD_NFSFH_H */
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index eea5b59b6a6c..481e789a7697 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -10,9 +10,35 @@
#include "cache.h"
#include "xdr.h"
#include "vfs.h"
+#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
+static __be32 nfsd_map_status(__be32 status)
+{
+ switch (status) {
+ case nfs_ok:
+ break;
+ case nfserr_nofilehandle:
+ case nfserr_badhandle:
+ status = nfserr_stale;
+ break;
+ case nfserr_wrongsec:
+ case nfserr_xdev:
+ case nfserr_file_open:
+ status = nfserr_acces;
+ break;
+ case nfserr_symlink_not_dir:
+ status = nfserr_notdir;
+ break;
+ case nfserr_symlink:
+ case nfserr_wrong_type:
+ status = nfserr_inval;
+ break;
+ }
+ return status;
+}
+
static __be32
nfsd_proc_null(struct svc_rqst *rqstp)
{
@@ -29,7 +55,7 @@ nfsd_proc_getattr(struct svc_rqst *rqstp)
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd_attrstat *resp = rqstp->rq_resp;
- dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh));
+ trace_nfsd_vfs_getattr(rqstp, &argp->fh);
fh_copy(&resp->fh, &argp->fh);
resp->status = fh_verify(rqstp, &resp->fh, 0,
@@ -38,6 +64,7 @@ nfsd_proc_getattr(struct svc_rqst *rqstp)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -51,6 +78,9 @@ nfsd_proc_setattr(struct svc_rqst *rqstp)
struct nfsd_sattrargs *argp = rqstp->rq_argp;
struct nfsd_attrstat *resp = rqstp->rq_resp;
struct iattr *iap = &argp->attrs;
+ struct nfsd_attrs attrs = {
+ .na_iattr = iap,
+ };
struct svc_fh *fhp;
dprintk("nfsd: SETATTR %s, valid=%x, size=%ld\n",
@@ -90,7 +120,7 @@ nfsd_proc_setattr(struct svc_rqst *rqstp)
if (delta < 0)
delta = -delta;
if (delta < MAX_TOUCH_TIME_ERROR &&
- setattr_prepare(&init_user_ns, fhp->fh_dentry, iap) != 0) {
+ setattr_prepare(&nop_mnt_idmap, fhp->fh_dentry, iap) != 0) {
/*
* Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME.
* This will cause notify_change to set these times
@@ -100,12 +130,13 @@ nfsd_proc_setattr(struct svc_rqst *rqstp)
}
}
- resp->status = nfsd_setattr(rqstp, fhp, iap, 0, (time64_t)0);
+ resp->status = nfsd_setattr(rqstp, fhp, &attrs, NULL);
if (resp->status != nfs_ok)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -140,6 +171,7 @@ nfsd_proc_lookup(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -161,6 +193,7 @@ nfsd_proc_readlink(struct svc_rqst *rqstp)
page_address(resp->page), &resp->len);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -173,27 +206,16 @@ nfsd_proc_read(struct svc_rqst *rqstp)
{
struct nfsd_readargs *argp = rqstp->rq_argp;
struct nfsd_readres *resp = rqstp->rq_resp;
- unsigned int len;
u32 eof;
- int v;
dprintk("nfsd: READ %s %d bytes at %d\n",
SVCFH_fmt(&argp->fh),
argp->count, argp->offset);
- argp->count = min_t(u32, argp->count, NFSSVC_MAXBLKSIZE_V2);
+ argp->count = min_t(u32, argp->count, NFS_MAXDATA);
+ argp->count = min_t(u32, argp->count, rqstp->rq_res.buflen);
- v = 0;
- len = argp->count;
resp->pages = rqstp->rq_next_page;
- while (len > 0) {
- struct page *page = *(rqstp->rq_next_page++);
-
- rqstp->rq_vec[v].iov_base = page_address(page);
- rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE);
- len -= rqstp->rq_vec[v].iov_len;
- v++;
- }
/* Obtain buffer pointer for payload. 19 is 1 word for
* status, 17 words for fattr, and 1 word for the byte count.
@@ -203,11 +225,12 @@ nfsd_proc_read(struct svc_rqst *rqstp)
resp->count = argp->count;
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_read(rqstp, &resp->fh, argp->offset,
- rqstp->rq_vec, v, &resp->count, &eof);
+ &resp->count, &eof);
if (resp->status == nfs_ok)
resp->status = fh_getattr(&resp->fh, &resp->stat);
else if (resp->status == nfserr_jukebox)
- return rpc_drop_reply;
+ set_bit(RQ_DROPME, &rqstp->rq_flags);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -228,26 +251,19 @@ nfsd_proc_write(struct svc_rqst *rqstp)
struct nfsd_writeargs *argp = rqstp->rq_argp;
struct nfsd_attrstat *resp = rqstp->rq_resp;
unsigned long cnt = argp->len;
- unsigned int nvecs;
- dprintk("nfsd: WRITE %s %d bytes at %d\n",
+ dprintk("nfsd: WRITE %s %u bytes at %d\n",
SVCFH_fmt(&argp->fh),
argp->len, argp->offset);
- nvecs = svc_fill_write_vector(rqstp, &argp->payload);
- if (!nvecs) {
- resp->status = nfserr_io;
- goto out;
- }
-
- resp->status = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh),
- argp->offset, rqstp->rq_vec, nvecs,
- &cnt, NFS_DATA_SYNC, NULL);
+ fh_copy(&resp->fh, &argp->fh);
+ resp->status = nfsd_write(rqstp, &resp->fh, argp->offset,
+ &argp->payload, &cnt, NFS_DATA_SYNC, NULL);
if (resp->status == nfs_ok)
resp->status = fh_getattr(&resp->fh, &resp->stat);
else if (resp->status == nfserr_jukebox)
- return rpc_drop_reply;
-out:
+ set_bit(RQ_DROPME, &rqstp->rq_flags);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -265,15 +281,15 @@ nfsd_proc_create(struct svc_rqst *rqstp)
svc_fh *dirfhp = &argp->fh;
svc_fh *newfhp = &resp->fh;
struct iattr *attr = &argp->attrs;
+ struct nfsd_attrs attrs = {
+ .na_iattr = attr,
+ };
struct inode *inode;
struct dentry *dchild;
int type, mode;
int hosterr;
dev_t rdev = 0, wanted = new_decode_dev(attr->ia_size);
- dprintk("nfsd: CREATE %s %.*s\n",
- SVCFH_fmt(dirfhp), argp->len, argp->name);
-
/* First verify the parent file handle */
resp->status = fh_verify(rqstp, dirfhp, S_IFDIR, NFSD_MAY_EXEC);
if (resp->status != nfs_ok)
@@ -290,17 +306,16 @@ nfsd_proc_create(struct svc_rqst *rqstp)
goto done;
}
- fh_lock_nested(dirfhp, I_MUTEX_PARENT);
- dchild = lookup_one_len(argp->name, dirfhp->fh_dentry, argp->len);
+ dchild = start_creating(&nop_mnt_idmap, dirfhp->fh_dentry,
+ &QSTR_LEN(argp->name, argp->len));
if (IS_ERR(dchild)) {
resp->status = nfserrno(PTR_ERR(dchild));
- goto out_unlock;
+ goto out_write;
}
fh_init(newfhp, NFS_FHSIZE);
resp->status = fh_compose(newfhp, dirfhp->fh_export, dchild, dirfhp);
if (!resp->status && d_really_is_negative(dchild))
resp->status = nfserr_noent;
- dput(dchild);
if (resp->status) {
if (resp->status != nfserr_noent)
goto out_unlock;
@@ -310,7 +325,7 @@ nfsd_proc_create(struct svc_rqst *rqstp)
*/
resp->status = nfserr_acces;
if (!newfhp->fh_dentry) {
- printk(KERN_WARNING
+ printk(KERN_WARNING
"nfsd_proc_create: file handle not verified\n");
goto out_unlock;
}
@@ -341,10 +356,11 @@ nfsd_proc_create(struct svc_rqst *rqstp)
* echo thing > device-special-file-or-pipe
* by doing a CREATE with type==0
*/
- resp->status = nfsd_permission(rqstp,
- newfhp->fh_export,
- newfhp->fh_dentry,
- NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS);
+ resp->status = nfsd_permission(
+ &rqstp->rq_cred,
+ newfhp->fh_export,
+ newfhp->fh_dentry,
+ NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS);
if (resp->status && resp->status != nfserr_rofs)
goto out_unlock;
}
@@ -389,9 +405,11 @@ nfsd_proc_create(struct svc_rqst *rqstp)
resp->status = nfs_ok;
if (!inode) {
/* File doesn't exist. Create it and set attrs */
- resp->status = nfsd_create_locked(rqstp, dirfhp, argp->name,
- argp->len, attr, type, rdev,
- newfhp);
+ resp->status = nfsd_create_locked(rqstp, dirfhp, &attrs, type,
+ rdev, newfhp);
+ /* nfsd_create_locked() unlocked the parent */
+ dput(dchild);
+ goto out_write;
} else if (type == S_IFREG) {
dprintk("nfsd: existing %s, valid=%x, size=%ld\n",
argp->name, attr->ia_valid, (long) attr->ia_size);
@@ -401,13 +419,13 @@ nfsd_proc_create(struct svc_rqst *rqstp)
*/
attr->ia_valid &= ATTR_SIZE;
if (attr->ia_valid)
- resp->status = nfsd_setattr(rqstp, newfhp, attr, 0,
- (time64_t)0);
+ resp->status = nfsd_setattr(rqstp, newfhp, &attrs,
+ NULL);
}
out_unlock:
- /* We don't really need to unlock, as fh_put does it. */
- fh_unlock(dirfhp);
+ end_creating(dchild);
+out_write:
fh_drop_write(dirfhp);
done:
fh_put(dirfhp);
@@ -415,6 +433,7 @@ done:
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -424,13 +443,11 @@ nfsd_proc_remove(struct svc_rqst *rqstp)
struct nfsd_diropargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
- dprintk("nfsd: REMOVE %s %.*s\n", SVCFH_fmt(&argp->fh),
- argp->len, argp->name);
-
/* Unlink. -SIFDIR means file must not be a directory */
resp->status = nfsd_unlink(rqstp, &argp->fh, -S_IFDIR,
argp->name, argp->len);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -440,15 +457,11 @@ nfsd_proc_rename(struct svc_rqst *rqstp)
struct nfsd_renameargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
- dprintk("nfsd: RENAME %s %.*s -> \n",
- SVCFH_fmt(&argp->ffh), argp->flen, argp->fname);
- dprintk("nfsd: -> %s %.*s\n",
- SVCFH_fmt(&argp->tfh), argp->tlen, argp->tname);
-
resp->status = nfsd_rename(rqstp, &argp->ffh, argp->fname, argp->flen,
&argp->tfh, argp->tname, argp->tlen);
fh_put(&argp->ffh);
fh_put(&argp->tfh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -458,17 +471,11 @@ nfsd_proc_link(struct svc_rqst *rqstp)
struct nfsd_linkargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
- dprintk("nfsd: LINK %s ->\n",
- SVCFH_fmt(&argp->ffh));
- dprintk("nfsd: %s %.*s\n",
- SVCFH_fmt(&argp->tfh),
- argp->tlen,
- argp->tname);
-
resp->status = nfsd_link(rqstp, &argp->tfh, argp->tname, argp->tlen,
&argp->ffh);
fh_put(&argp->ffh);
fh_put(&argp->tfh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -477,6 +484,9 @@ nfsd_proc_symlink(struct svc_rqst *rqstp)
{
struct nfsd_symlinkargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &argp->attrs,
+ };
struct svc_fh newfh;
if (argp->tlen > NFS_MAXPATHLEN) {
@@ -492,18 +502,15 @@ nfsd_proc_symlink(struct svc_rqst *rqstp)
goto out;
}
- dprintk("nfsd: SYMLINK %s %.*s -> %.*s\n",
- SVCFH_fmt(&argp->ffh), argp->flen, argp->fname,
- argp->tlen, argp->tname);
-
fh_init(&newfh, NFS_FHSIZE);
resp->status = nfsd_symlink(rqstp, &argp->ffh, argp->fname, argp->flen,
- argp->tname, &newfh);
+ argp->tname, &attrs, &newfh);
kfree(argp->tname);
fh_put(&argp->ffh);
fh_put(&newfh);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -516,8 +523,9 @@ nfsd_proc_mkdir(struct svc_rqst *rqstp)
{
struct nfsd_createargs *argp = rqstp->rq_argp;
struct nfsd_diropres *resp = rqstp->rq_resp;
-
- dprintk("nfsd: MKDIR %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
+ struct nfsd_attrs attrs = {
+ .na_iattr = &argp->attrs,
+ };
if (resp->fh.fh_dentry) {
printk(KERN_WARNING
@@ -527,13 +535,14 @@ nfsd_proc_mkdir(struct svc_rqst *rqstp)
argp->attrs.ia_valid &= ~ATTR_SIZE;
fh_init(&resp->fh, NFS_FHSIZE);
resp->status = nfsd_create(rqstp, &argp->fh, argp->name, argp->len,
- &argp->attrs, S_IFDIR, 0, &resp->fh);
+ &attrs, S_IFDIR, 0, &resp->fh);
fh_put(&argp->fh);
if (resp->status != nfs_ok)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -546,39 +555,29 @@ nfsd_proc_rmdir(struct svc_rqst *rqstp)
struct nfsd_diropargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
- dprintk("nfsd: RMDIR %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
-
resp->status = nfsd_unlink(rqstp, &argp->fh, S_IFDIR,
argp->name, argp->len);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
struct nfsd_readdirres *resp,
- int count)
+ u32 count)
{
struct xdr_buf *buf = &resp->dirlist;
struct xdr_stream *xdr = &resp->xdr;
- count = min_t(u32, count, PAGE_SIZE);
-
memset(buf, 0, sizeof(*buf));
/* Reserve room for the NULL ptr & eof flag (-2 words) */
- buf->buflen = count - sizeof(__be32) * 2;
+ buf->buflen = clamp(count, (u32)(XDR_UNIT * 2), (u32)PAGE_SIZE);
+ buf->buflen -= XDR_UNIT * 2;
buf->pages = rqstp->rq_next_page;
rqstp->rq_next_page++;
- /* This is xdr_init_encode(), but it assumes that
- * the head kvec has already been consumed. */
- xdr_set_scratch_buffer(xdr, NULL, 0);
- xdr->buf = buf;
- xdr->page_ptr = buf->pages;
- xdr->iov = NULL;
- xdr->p = page_address(*buf->pages);
- xdr->end = xdr->p + (PAGE_SIZE >> 2);
- xdr->rqst = NULL;
+ xdr_init_encode_pages(xdr, buf);
}
/*
@@ -591,9 +590,7 @@ nfsd_proc_readdir(struct svc_rqst *rqstp)
struct nfsd_readdirres *resp = rqstp->rq_resp;
loff_t offset;
- dprintk("nfsd: READDIR %s %d bytes at %d\n",
- SVCFH_fmt(&argp->fh),
- argp->count, argp->cookie);
+ trace_nfsd_vfs_readdir(rqstp, &argp->fh, argp->count, argp->cookie);
nfsd_init_dirlist_pages(rqstp, resp, argp->count);
@@ -605,6 +602,7 @@ nfsd_proc_readdir(struct svc_rqst *rqstp)
nfssvc_encode_nfscookie(resp, offset);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -617,11 +615,10 @@ nfsd_proc_statfs(struct svc_rqst *rqstp)
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd_statfsres *resp = rqstp->rq_resp;
- dprintk("nfsd: STATFS %s\n", SVCFH_fmt(&argp->fh));
-
resp->status = nfsd_statfs(rqstp, &argp->fh, &resp->stats,
NFSD_MAY_BYPASS_GSS_ON_ROOT);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -640,6 +637,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
+ .pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 0,
@@ -651,6 +649,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_encode = nfssvc_encode_attrstatres,
.pc_release = nfssvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd_fhandle),
+ .pc_argzero = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
@@ -662,6 +661,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_encode = nfssvc_encode_attrstatres,
.pc_release = nfssvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd_sattrargs),
+ .pc_argzero = sizeof(struct nfsd_sattrargs),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+AT,
@@ -672,6 +672,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
+ .pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 0,
@@ -683,6 +684,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_encode = nfssvc_encode_diropres,
.pc_release = nfssvc_release_diropres,
.pc_argsize = sizeof(struct nfsd_diropargs),
+ .pc_argzero = sizeof(struct nfsd_diropargs),
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+FH+AT,
@@ -693,6 +695,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfssvc_encode_readlinkres,
.pc_argsize = sizeof(struct nfsd_fhandle),
+ .pc_argzero = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_readlinkres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+NFS_MAXPATHLEN/4,
@@ -704,9 +707,10 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_encode = nfssvc_encode_readres,
.pc_release = nfssvc_release_readres,
.pc_argsize = sizeof(struct nfsd_readargs),
+ .pc_argzero = sizeof(struct nfsd_readargs),
.pc_ressize = sizeof(struct nfsd_readres),
.pc_cachetype = RC_NOCACHE,
- .pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
+ .pc_xdrressize = ST+AT+1+NFS_MAXDATA/4,
.pc_name = "READ",
},
[NFSPROC_WRITECACHE] = {
@@ -714,6 +718,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_decode = nfssvc_decode_voidarg,
.pc_encode = nfssvc_encode_voidres,
.pc_argsize = sizeof(struct nfsd_voidargs),
+ .pc_argzero = sizeof(struct nfsd_voidargs),
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 0,
@@ -725,6 +730,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_encode = nfssvc_encode_attrstatres,
.pc_release = nfssvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd_writeargs),
+ .pc_argzero = sizeof(struct nfsd_writeargs),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+AT,
@@ -736,6 +742,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_encode = nfssvc_encode_diropres,
.pc_release = nfssvc_release_diropres,
.pc_argsize = sizeof(struct nfsd_createargs),
+ .pc_argzero = sizeof(struct nfsd_createargs),
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+FH+AT,
@@ -746,6 +753,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_decode = nfssvc_decode_diropargs,
.pc_encode = nfssvc_encode_statres,
.pc_argsize = sizeof(struct nfsd_diropargs),
+ .pc_argzero = sizeof(struct nfsd_diropargs),
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
@@ -756,6 +764,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_decode = nfssvc_decode_renameargs,
.pc_encode = nfssvc_encode_statres,
.pc_argsize = sizeof(struct nfsd_renameargs),
+ .pc_argzero = sizeof(struct nfsd_renameargs),
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
@@ -766,6 +775,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_decode = nfssvc_decode_linkargs,
.pc_encode = nfssvc_encode_statres,
.pc_argsize = sizeof(struct nfsd_linkargs),
+ .pc_argzero = sizeof(struct nfsd_linkargs),
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
@@ -776,6 +786,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_decode = nfssvc_decode_symlinkargs,
.pc_encode = nfssvc_encode_statres,
.pc_argsize = sizeof(struct nfsd_symlinkargs),
+ .pc_argzero = sizeof(struct nfsd_symlinkargs),
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
@@ -787,6 +798,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_encode = nfssvc_encode_diropres,
.pc_release = nfssvc_release_diropres,
.pc_argsize = sizeof(struct nfsd_createargs),
+ .pc_argzero = sizeof(struct nfsd_createargs),
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+FH+AT,
@@ -797,6 +809,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_decode = nfssvc_decode_diropargs,
.pc_encode = nfssvc_encode_statres,
.pc_argsize = sizeof(struct nfsd_diropargs),
+ .pc_argzero = sizeof(struct nfsd_diropargs),
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
@@ -807,6 +820,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_decode = nfssvc_decode_readdirargs,
.pc_encode = nfssvc_encode_readdirres,
.pc_argsize = sizeof(struct nfsd_readdirargs),
+ .pc_argzero = sizeof(struct nfsd_readdirargs),
.pc_ressize = sizeof(struct nfsd_readdirres),
.pc_cachetype = RC_NOCACHE,
.pc_name = "READDIR",
@@ -816,6 +830,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfssvc_encode_statfsres,
.pc_argsize = sizeof(struct nfsd_fhandle),
+ .pc_argzero = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_statfsres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+5,
@@ -823,72 +838,13 @@ static const struct svc_procedure nfsd_procedures2[18] = {
},
};
-
-static unsigned int nfsd_count2[ARRAY_SIZE(nfsd_procedures2)];
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ nfsd_count2[ARRAY_SIZE(nfsd_procedures2)]);
const struct svc_version nfsd_version2 = {
.vs_vers = 2,
- .vs_nproc = 18,
+ .vs_nproc = ARRAY_SIZE(nfsd_procedures2),
.vs_proc = nfsd_procedures2,
.vs_count = nfsd_count2,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS2_SVC_XDRSIZE,
};
-
-/*
- * Map errnos to NFS errnos.
- */
-__be32
-nfserrno (int errno)
-{
- static struct {
- __be32 nfserr;
- int syserr;
- } nfs_errtbl[] = {
- { nfs_ok, 0 },
- { nfserr_perm, -EPERM },
- { nfserr_noent, -ENOENT },
- { nfserr_io, -EIO },
- { nfserr_nxio, -ENXIO },
- { nfserr_fbig, -E2BIG },
- { nfserr_acces, -EACCES },
- { nfserr_exist, -EEXIST },
- { nfserr_xdev, -EXDEV },
- { nfserr_mlink, -EMLINK },
- { nfserr_nodev, -ENODEV },
- { nfserr_notdir, -ENOTDIR },
- { nfserr_isdir, -EISDIR },
- { nfserr_inval, -EINVAL },
- { nfserr_fbig, -EFBIG },
- { nfserr_nospc, -ENOSPC },
- { nfserr_rofs, -EROFS },
- { nfserr_mlink, -EMLINK },
- { nfserr_nametoolong, -ENAMETOOLONG },
- { nfserr_notempty, -ENOTEMPTY },
-#ifdef EDQUOT
- { nfserr_dquot, -EDQUOT },
-#endif
- { nfserr_stale, -ESTALE },
- { nfserr_jukebox, -ETIMEDOUT },
- { nfserr_jukebox, -ERESTARTSYS },
- { nfserr_jukebox, -EAGAIN },
- { nfserr_jukebox, -EWOULDBLOCK },
- { nfserr_jukebox, -ENOMEM },
- { nfserr_io, -ETXTBSY },
- { nfserr_notsupp, -EOPNOTSUPP },
- { nfserr_toosmall, -ETOOSMALL },
- { nfserr_serverfault, -ESERVERFAULT },
- { nfserr_serverfault, -ENFILE },
- { nfserr_io, -EUCLEAN },
- { nfserr_perm, -ENOKEY },
- { nfserr_no_grace, -ENOGRACE},
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) {
- if (nfs_errtbl[i].syserr == errno)
- return nfs_errtbl[i].nfserr;
- }
- WARN_ONCE(1, "nfsd: non-standard errno: %d\n", errno);
- return nfserr_io;
-}
-
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 80431921e5d7..b08ae85d53ef 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -12,12 +12,14 @@
#include <linux/module.h>
#include <linux/fs_struct.h>
#include <linux/swap.h>
+#include <linux/siphash.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/bind.h>
#include <linux/nfsacl.h>
+#include <linux/nfslocalio.h>
#include <linux/seq_file.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
@@ -33,7 +35,7 @@
#define NFSDDBG_FACILITY NFSDDBG_SVC
-extern struct svc_program nfsd_program;
+atomic_t nfsd_th_cnt = ATOMIC_INIT(0);
static int nfsd(void *vrqstp);
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static int nfsd_acl_rpcbind_set(struct net *,
@@ -55,18 +57,8 @@ static __be32 nfsd_init_request(struct svc_rqst *,
struct svc_process_info *);
/*
- * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and the members
- * of the svc_serv struct. In particular, ->sv_nrthreads but also to some
- * extent ->sv_temp_socks and ->sv_permsocks. It also protects nfsdstats.th_cnt
- *
- * If (out side the lock) nn->nfsd_serv is non-NULL, then it must point to a
- * properly initialised 'struct svc_serv' with ->sv_nrthreads > 0. That number
- * of nfsd threads must exist and each must listed in ->sp_all_threads in each
- * entry of ->sv_pools[].
- *
- * Transitions of the thread count between zero and non-zero are of particular
- * interest since the svc_serv needs to be created and initialized at that
- * point, or freed.
+ * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and some members
+ * of the svc_serv struct such as ->sv_temp_socks and ->sv_permsocks.
*
* Finally, the nfsd_mutex also protects some of the global variables that are
* accessed when nfsd starts and that are settable via the write_* routines in
@@ -78,146 +70,97 @@ static __be32 nfsd_init_request(struct svc_rqst *,
*/
DEFINE_MUTEX(nfsd_mutex);
-/*
- * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
- * nfsd_drc_max_pages limits the total amount of memory available for
- * version 4.1 DRC caches.
- * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage.
- */
-DEFINE_SPINLOCK(nfsd_drc_lock);
-unsigned long nfsd_drc_max_mem;
-unsigned long nfsd_drc_mem_used;
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+static const struct svc_version *localio_versions[] = {
+ [1] = &localio_version1,
+};
+
+#define NFSD_LOCALIO_NRVERS ARRAY_SIZE(localio_versions)
+
+#endif /* CONFIG_NFS_LOCALIO */
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
-static struct svc_stat nfsd_acl_svcstats;
static const struct svc_version *nfsd_acl_version[] = {
+# if defined(CONFIG_NFSD_V2_ACL)
[2] = &nfsd_acl_version2,
+# endif
+# if defined(CONFIG_NFSD_V3_ACL)
[3] = &nfsd_acl_version3,
+# endif
};
-#define NFSD_ACL_MINVERS 2
+#define NFSD_ACL_MINVERS 2
#define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
-static struct svc_program nfsd_acl_program = {
- .pg_prog = NFS_ACL_PROGRAM,
- .pg_nvers = NFSD_ACL_NRVERS,
- .pg_vers = nfsd_acl_version,
- .pg_name = "nfsacl",
- .pg_class = "nfsd",
- .pg_stats = &nfsd_acl_svcstats,
- .pg_authenticate = &svc_set_client,
- .pg_init_request = nfsd_acl_init_request,
- .pg_rpcbind_set = nfsd_acl_rpcbind_set,
-};
-
-static struct svc_stat nfsd_acl_svcstats = {
- .program = &nfsd_acl_program,
-};
#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
-static const struct svc_version *nfsd_version[] = {
+static const struct svc_version *nfsd_version[NFSD_MAXVERS+1] = {
+#if defined(CONFIG_NFSD_V2)
[2] = &nfsd_version2,
-#if defined(CONFIG_NFSD_V3)
- [3] = &nfsd_version3,
#endif
+ [3] = &nfsd_version3,
#if defined(CONFIG_NFSD_V4)
[4] = &nfsd_version4,
#endif
};
-#define NFSD_MINVERS 2
-#define NFSD_NRVERS ARRAY_SIZE(nfsd_version)
-
-struct svc_program nfsd_program = {
-#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
- .pg_next = &nfsd_acl_program,
-#endif
+struct svc_program nfsd_programs[] = {
+ {
.pg_prog = NFS_PROGRAM, /* program number */
- .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */
+ .pg_nvers = NFSD_MAXVERS+1, /* nr of entries in nfsd_version */
.pg_vers = nfsd_version, /* version table */
.pg_name = "nfsd", /* program name */
.pg_class = "nfsd", /* authentication class */
- .pg_stats = &nfsd_svcstats, /* version table */
- .pg_authenticate = &svc_set_client, /* export authentication */
+ .pg_authenticate = svc_set_client, /* export authentication */
.pg_init_request = nfsd_init_request,
.pg_rpcbind_set = nfsd_rpcbind_set,
+ },
+#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+ {
+ .pg_prog = NFS_ACL_PROGRAM,
+ .pg_nvers = NFSD_ACL_NRVERS,
+ .pg_vers = nfsd_acl_version,
+ .pg_name = "nfsacl",
+ .pg_class = "nfsd",
+ .pg_authenticate = svc_set_client,
+ .pg_init_request = nfsd_acl_init_request,
+ .pg_rpcbind_set = nfsd_acl_rpcbind_set,
+ },
+#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ {
+ .pg_prog = NFS_LOCALIO_PROGRAM,
+ .pg_nvers = NFSD_LOCALIO_NRVERS,
+ .pg_vers = localio_versions,
+ .pg_name = "nfslocalio",
+ .pg_class = "nfsd",
+ .pg_authenticate = svc_set_client,
+ .pg_init_request = svc_generic_init_request,
+ .pg_rpcbind_set = svc_generic_rpcbind_set,
+ }
+#endif /* CONFIG_NFS_LOCALIO */
};
-static bool
-nfsd_support_version(int vers)
+bool nfsd_support_version(int vers)
{
- if (vers >= NFSD_MINVERS && vers < NFSD_NRVERS)
+ if (vers >= NFSD_MINVERS && vers <= NFSD_MAXVERS)
return nfsd_version[vers] != NULL;
return false;
}
-static bool *
-nfsd_alloc_versions(void)
-{
- bool *vers = kmalloc_array(NFSD_NRVERS, sizeof(bool), GFP_KERNEL);
- unsigned i;
-
- if (vers) {
- /* All compiled versions are enabled by default */
- for (i = 0; i < NFSD_NRVERS; i++)
- vers[i] = nfsd_support_version(i);
- }
- return vers;
-}
-
-static bool *
-nfsd_alloc_minorversions(void)
-{
- bool *vers = kmalloc_array(NFSD_SUPPORTED_MINOR_VERSION + 1,
- sizeof(bool), GFP_KERNEL);
- unsigned i;
-
- if (vers) {
- /* All minor versions are enabled by default */
- for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++)
- vers[i] = nfsd_support_version(4);
- }
- return vers;
-}
-
-void
-nfsd_netns_free_versions(struct nfsd_net *nn)
-{
- kfree(nn->nfsd_versions);
- kfree(nn->nfsd4_minorversions);
- nn->nfsd_versions = NULL;
- nn->nfsd4_minorversions = NULL;
-}
-
-static void
-nfsd_netns_init_versions(struct nfsd_net *nn)
-{
- if (!nn->nfsd_versions) {
- nn->nfsd_versions = nfsd_alloc_versions();
- nn->nfsd4_minorversions = nfsd_alloc_minorversions();
- if (!nn->nfsd_versions || !nn->nfsd4_minorversions)
- nfsd_netns_free_versions(nn);
- }
-}
-
int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change)
{
- if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
+ if (vers < NFSD_MINVERS || vers > NFSD_MAXVERS)
return 0;
switch(change) {
case NFSD_SET:
- if (nn->nfsd_versions)
- nn->nfsd_versions[vers] = nfsd_support_version(vers);
+ nn->nfsd_versions[vers] = nfsd_support_version(vers);
break;
case NFSD_CLEAR:
- nfsd_netns_init_versions(nn);
- if (nn->nfsd_versions)
- nn->nfsd_versions[vers] = false;
+ nn->nfsd_versions[vers] = false;
break;
case NFSD_TEST:
- if (nn->nfsd_versions)
- return nn->nfsd_versions[vers];
- fallthrough;
+ return nn->nfsd_versions[vers];
case NFSD_AVAIL:
return nfsd_support_version(vers);
}
@@ -244,23 +187,16 @@ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change
switch(change) {
case NFSD_SET:
- if (nn->nfsd4_minorversions) {
- nfsd_vers(nn, 4, NFSD_SET);
- nn->nfsd4_minorversions[minorversion] =
- nfsd_vers(nn, 4, NFSD_TEST);
- }
+ nfsd_vers(nn, 4, NFSD_SET);
+ nn->nfsd4_minorversions[minorversion] =
+ nfsd_vers(nn, 4, NFSD_TEST);
break;
case NFSD_CLEAR:
- nfsd_netns_init_versions(nn);
- if (nn->nfsd4_minorversions) {
- nn->nfsd4_minorversions[minorversion] = false;
- nfsd_adjust_nfsd_versions4(nn);
- }
+ nn->nfsd4_minorversions[minorversion] = false;
+ nfsd_adjust_nfsd_versions4(nn);
break;
case NFSD_TEST:
- if (nn->nfsd4_minorversions)
- return nn->nfsd4_minorversions[minorversion];
- return nfsd_vers(nn, 4, NFSD_TEST);
+ return nn->nfsd4_minorversions[minorversion];
case NFSD_AVAIL:
return minorversion <= NFSD_SUPPORTED_MINOR_VERSION &&
nfsd_vers(nn, 4, NFSD_AVAIL);
@@ -268,6 +204,34 @@ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change
return 0;
}
+bool nfsd_net_try_get(struct net *net) __must_hold(rcu)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ return (nn && percpu_ref_tryget_live(&nn->nfsd_net_ref));
+}
+
+void nfsd_net_put(struct net *net) __must_hold(rcu)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ percpu_ref_put(&nn->nfsd_net_ref);
+}
+
+static void nfsd_net_done(struct percpu_ref *ref)
+{
+ struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_net_ref);
+
+ complete(&nn->nfsd_net_confirm_done);
+}
+
+static void nfsd_net_free(struct percpu_ref *ref)
+{
+ struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_net_ref);
+
+ complete(&nn->nfsd_net_free_done);
+}
+
/*
* Maximum number of nfsd processes
*/
@@ -285,27 +249,6 @@ int nfsd_nrthreads(struct net *net)
return rv;
}
-static int nfsd_init_socks(struct net *net, const struct cred *cred)
-{
- int error;
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-
- if (!list_empty(&nn->nfsd_serv->sv_permsocks))
- return 0;
-
- error = svc_create_xprt(nn->nfsd_serv, "udp", net, PF_INET, NFS_PORT,
- SVC_SOCK_DEFAULTS, cred);
- if (error < 0)
- return error;
-
- error = svc_create_xprt(nn->nfsd_serv, "tcp", net, PF_INET, NFS_PORT,
- SVC_SOCK_DEFAULTS, cred);
- if (error < 0)
- return error;
-
- return 0;
-}
-
static int nfsd_users = 0;
static int nfsd_startup_generic(void)
@@ -345,35 +288,63 @@ static bool nfsd_needs_lockd(struct nfsd_net *nn)
return nfsd_vers(nn, 2, NFSD_TEST) || nfsd_vers(nn, 3, NFSD_TEST);
}
-void nfsd_copy_boot_verifier(__be32 verf[2], struct nfsd_net *nn)
+/**
+ * nfsd_copy_write_verifier - Atomically copy a write verifier
+ * @verf: buffer in which to receive the verifier cookie
+ * @nn: NFS net namespace
+ *
+ * This function provides a wait-free mechanism for copying the
+ * namespace's write verifier without tearing it.
+ */
+void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn)
{
- int seq = 0;
+ unsigned int seq;
do {
- read_seqbegin_or_lock(&nn->boot_lock, &seq);
- /*
- * This is opaque to client, so no need to byte-swap. Use
- * __force to keep sparse happy. y2038 time_t overflow is
- * irrelevant in this usage
- */
- verf[0] = (__force __be32)nn->nfssvc_boot.tv_sec;
- verf[1] = (__force __be32)nn->nfssvc_boot.tv_nsec;
- } while (need_seqretry(&nn->boot_lock, seq));
- done_seqretry(&nn->boot_lock, seq);
+ seq = read_seqbegin(&nn->writeverf_lock);
+ memcpy(verf, nn->writeverf, sizeof(nn->writeverf));
+ } while (read_seqretry(&nn->writeverf_lock, seq));
}
-static void nfsd_reset_boot_verifier_locked(struct nfsd_net *nn)
+static void nfsd_reset_write_verifier_locked(struct nfsd_net *nn)
{
- ktime_get_real_ts64(&nn->nfssvc_boot);
+ struct timespec64 now;
+ u64 verf;
+
+ /*
+ * Because the time value is hashed, y2038 time_t overflow
+ * is irrelevant in this usage.
+ */
+ ktime_get_raw_ts64(&now);
+ verf = siphash_2u64(now.tv_sec, now.tv_nsec, &nn->siphash_key);
+ memcpy(nn->writeverf, &verf, sizeof(nn->writeverf));
}
-void nfsd_reset_boot_verifier(struct nfsd_net *nn)
+/**
+ * nfsd_reset_write_verifier - Generate a new write verifier
+ * @nn: NFS net namespace
+ *
+ * This function updates the ->writeverf field of @nn. This field
+ * contains an opaque cookie that, according to Section 18.32.3 of
+ * RFC 8881, "the client can use to determine whether a server has
+ * changed instance state (e.g., server restart) between a call to
+ * WRITE and a subsequent call to either WRITE or COMMIT. This
+ * cookie MUST be unchanged during a single instance of the NFSv4.1
+ * server and MUST be unique between instances of the NFSv4.1
+ * server."
+ */
+void nfsd_reset_write_verifier(struct nfsd_net *nn)
{
- write_seqlock(&nn->boot_lock);
- nfsd_reset_boot_verifier_locked(nn);
- write_sequnlock(&nn->boot_lock);
+ write_seqlock(&nn->writeverf_lock);
+ nfsd_reset_write_verifier_locked(nn);
+ write_sequnlock(&nn->writeverf_lock);
}
+/*
+ * Crank up a set of per-namespace resources for a new NFSD instance,
+ * including lockd, a duplicate reply cache, an open file cache
+ * instance, and a cache of NFSv4 state objects.
+ */
static int nfsd_startup_net(struct net *net, const struct cred *cred)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -385,9 +356,12 @@ static int nfsd_startup_net(struct net *net, const struct cred *cred)
ret = nfsd_startup_generic();
if (ret)
return ret;
- ret = nfsd_init_socks(net, cred);
- if (ret)
+
+ if (list_empty(&nn->nfsd_serv->sv_permsocks)) {
+ pr_warn("NFSD: Failed to start, no listeners configured.\n");
+ ret = -EIO;
goto out_socks;
+ }
if (nfsd_needs_lockd(nn) && !nn->lockd_up) {
ret = lockd_up(net, cred);
@@ -399,16 +373,23 @@ static int nfsd_startup_net(struct net *net, const struct cred *cred)
ret = nfsd_file_cache_start_net(net);
if (ret)
goto out_lockd;
- ret = nfs4_state_start_net(net);
+
+ ret = nfsd_reply_cache_init(nn);
if (ret)
goto out_filecache;
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
nfsd4_ssc_init_umount_work(nn);
#endif
+ ret = nfs4_state_start_net(net);
+ if (ret)
+ goto out_reply_cache;
+
nn->nfsd_net_up = true;
return 0;
+out_reply_cache:
+ nfsd_reply_cache_shutdown(nn);
out_filecache:
nfsd_file_cache_shutdown_net(net);
out_lockd:
@@ -425,16 +406,29 @@ static void nfsd_shutdown_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- nfsd_file_cache_shutdown_net(net);
+ if (!nn->nfsd_net_up)
+ return;
+
+ percpu_ref_kill_and_confirm(&nn->nfsd_net_ref, nfsd_net_done);
+ wait_for_completion(&nn->nfsd_net_confirm_done);
+
+ nfsd_export_flush(net);
nfs4_state_shutdown_net(net);
+ nfsd_reply_cache_shutdown(nn);
+ nfsd_file_cache_shutdown_net(net);
if (nn->lockd_up) {
lockd_down(net);
nn->lockd_up = false;
}
+
+ wait_for_completion(&nn->nfsd_net_free_done);
+ percpu_ref_exit(&nn->nfsd_net_ref);
+
nn->nfsd_net_up = false;
nfsd_shutdown_generic();
}
+static DEFINE_SPINLOCK(nfsd_notifier_lock);
static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
@@ -444,18 +438,17 @@ static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct sockaddr_in sin;
- if ((event != NETDEV_DOWN) ||
- !atomic_inc_not_zero(&nn->ntf_refcnt))
+ if (event != NETDEV_DOWN || !nn->nfsd_serv)
goto out;
+ spin_lock(&nfsd_notifier_lock);
if (nn->nfsd_serv) {
dprintk("nfsd_inetaddr_event: removed %pI4\n", &ifa->ifa_local);
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ifa->ifa_local;
svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin);
}
- atomic_dec(&nn->ntf_refcnt);
- wake_up(&nn->ntf_wq);
+ spin_unlock(&nfsd_notifier_lock);
out:
return NOTIFY_DONE;
@@ -475,10 +468,10 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct sockaddr_in6 sin6;
- if ((event != NETDEV_DOWN) ||
- !atomic_inc_not_zero(&nn->ntf_refcnt))
+ if (event != NETDEV_DOWN || !nn->nfsd_serv)
goto out;
+ spin_lock(&nfsd_notifier_lock);
if (nn->nfsd_serv) {
dprintk("nfsd_inet6addr_event: removed %pI6\n", &ifa->addr);
sin6.sin6_family = AF_INET6;
@@ -487,8 +480,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
sin6.sin6_scope_id = ifa->idev->dev->ifindex;
svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6);
}
- atomic_dec(&nn->ntf_refcnt);
- wake_up(&nn->ntf_wq);
+ spin_unlock(&nfsd_notifier_lock);
+
out:
return NOTIFY_DONE;
}
@@ -501,11 +494,21 @@ static struct notifier_block nfsd_inet6addr_notifier = {
/* Only used under nfsd_mutex, so this atomic may be overkill: */
static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
-static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
+/**
+ * nfsd_destroy_serv - tear down NFSD's svc_serv for a namespace
+ * @net: network namespace the NFS service is associated with
+ */
+void nfsd_destroy_serv(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv = nn->nfsd_serv;
+
+ lockdep_assert_held(&nfsd_mutex);
+
+ spin_lock(&nfsd_notifier_lock);
+ nn->nfsd_serv = NULL;
+ spin_unlock(&nfsd_notifier_lock);
- atomic_dec(&nn->ntf_refcnt);
/* check if the notifier still has clients */
if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
@@ -513,32 +516,27 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
}
- wait_event(nn->ntf_wq, atomic_read(&nn->ntf_refcnt) == 0);
/*
* write_ports can create the server without actually starting
- * any threads--if we get shut down before any threads are
- * started, then nfsd_last_thread will be run before any of this
+ * any threads. If we get shut down before any threads are
+ * started, then nfsd_destroy_serv will be run before any of this
* other initialization has been done except the rpcb information.
*/
- svc_rpcb_cleanup(serv, net);
- if (!nn->nfsd_net_up)
- return;
-
+ svc_xprt_destroy_all(serv, net, true);
nfsd_shutdown_net(net);
- pr_info("nfsd: last server has exited, flushing export cache\n");
- nfsd_export_flush(net);
+ svc_destroy(&serv);
}
void nfsd_reset_versions(struct nfsd_net *nn)
{
int i;
- for (i = 0; i < NFSD_NRVERS; i++)
+ for (i = 0; i <= NFSD_MAXVERS; i++)
if (nfsd_vers(nn, i, NFSD_TEST))
return;
- for (i = 0; i < NFSD_NRVERS; i++)
+ for (i = 0; i <= NFSD_MAXVERS; i++)
if (i != 4)
nfsd_vers(nn, i, NFSD_SET);
else {
@@ -548,27 +546,6 @@ void nfsd_reset_versions(struct nfsd_net *nn)
}
}
-/*
- * Each session guarantees a negotiated per slot memory cache for replies
- * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated
- * NFSv4.1 server might want to use more memory for a DRC than a machine
- * with mutiple services.
- *
- * Impose a hard limit on the number of pages for the DRC which varies
- * according to the machines free pages. This is of course only a default.
- *
- * For now this is a #defined shift which could be under admin control
- * in the future.
- */
-static void set_max_drc(void)
-{
- #define NFSD_DRC_SIZE_SHIFT 7
- nfsd_drc_max_mem = (nr_free_buffer_pages()
- >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
- nfsd_drc_mem_used = 0;
- dprintk("%s nfsd_drc_max_mem %lu \n", __func__, nfsd_drc_max_mem);
-}
-
static int nfsd_get_default_max_blksize(void)
{
struct sysinfo i;
@@ -584,30 +561,12 @@ static int nfsd_get_default_max_blksize(void)
*/
target >>= 12;
- ret = NFSSVC_MAXBLKSIZE;
+ ret = NFSSVC_DEFBLKSIZE;
while (ret > target && ret >= 8*1024*2)
ret /= 2;
return ret;
}
-static const struct svc_serv_ops nfsd_thread_sv_ops = {
- .svo_shutdown = nfsd_last_thread,
- .svo_function = nfsd,
- .svo_enqueue_xprt = svc_xprt_do_enqueue,
- .svo_setup = svc_set_num_threads,
- .svo_module = THIS_MODULE,
-};
-
-static void nfsd_complete_shutdown(struct net *net)
-{
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-
- WARN_ON(!mutex_is_locked(&nfsd_mutex));
-
- nn->nfsd_serv = NULL;
- complete(&nn->nfsd_shutdown_complete);
-}
-
void nfsd_shutdown_threads(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -620,48 +579,54 @@ void nfsd_shutdown_threads(struct net *net)
return;
}
- svc_get(serv);
/* Kill outstanding nfsd threads */
- serv->sv_ops->svo_setup(serv, NULL, 0);
- nfsd_destroy(net);
+ svc_set_num_threads(serv, NULL, 0);
+ nfsd_destroy_serv(net);
mutex_unlock(&nfsd_mutex);
- /* Wait for shutdown of nfsd_serv to complete */
- wait_for_completion(&nn->nfsd_shutdown_complete);
}
-bool i_am_nfsd(void)
+struct svc_rqst *nfsd_current_rqst(void)
{
- return kthread_func(current) == nfsd;
+ if (kthread_func(current) == nfsd)
+ return kthread_data(current);
+ return NULL;
}
int nfsd_create_serv(struct net *net)
{
int error;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv;
WARN_ON(!mutex_is_locked(&nfsd_mutex));
- if (nn->nfsd_serv) {
- svc_get(nn->nfsd_serv);
+ if (nn->nfsd_serv)
return 0;
- }
+
+ error = percpu_ref_init(&nn->nfsd_net_ref, nfsd_net_free,
+ 0, GFP_KERNEL);
+ if (error)
+ return error;
+ init_completion(&nn->nfsd_net_free_done);
+ init_completion(&nn->nfsd_net_confirm_done);
+
if (nfsd_max_blksize == 0)
nfsd_max_blksize = nfsd_get_default_max_blksize();
nfsd_reset_versions(nn);
- nn->nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
- &nfsd_thread_sv_ops);
- if (nn->nfsd_serv == NULL)
+ serv = svc_create_pooled(nfsd_programs, ARRAY_SIZE(nfsd_programs),
+ &nn->nfsd_svcstats,
+ nfsd_max_blksize, nfsd);
+ if (serv == NULL)
return -ENOMEM;
- init_completion(&nn->nfsd_shutdown_complete);
- nn->nfsd_serv->sv_maxconn = nn->max_connections;
- error = svc_bind(nn->nfsd_serv, net);
+ error = svc_bind(serv, net);
if (error < 0) {
- svc_destroy(nn->nfsd_serv);
- nfsd_complete_shutdown(net);
+ svc_destroy(&serv);
return error;
}
+ spin_lock(&nfsd_notifier_lock);
+ nn->nfsd_serv = serv;
+ spin_unlock(&nfsd_notifier_lock);
- set_max_drc();
/* check if the notifier is already set */
if (atomic_inc_return(&nfsd_notifier_refcount) == 1) {
register_inetaddr_notifier(&nfsd_inetaddr_notifier);
@@ -669,8 +634,7 @@ int nfsd_create_serv(struct net *net)
register_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
}
- atomic_inc(&nn->ntf_refcnt);
- nfsd_reset_boot_verifier(nn);
+ nfsd_reset_write_verifier(nn);
return 0;
}
@@ -686,29 +650,29 @@ int nfsd_nrpools(struct net *net)
int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
{
- int i = 0;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv = nn->nfsd_serv;
+ int i;
- if (nn->nfsd_serv != NULL) {
- for (i = 0; i < nn->nfsd_serv->sv_nrpools && i < n; i++)
- nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads;
- }
-
+ if (serv)
+ for (i = 0; i < serv->sv_nrpools && i < n; i++)
+ nthreads[i] = serv->sv_pools[i].sp_nrthreads;
return 0;
}
-void nfsd_destroy(struct net *net)
-{
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- int destroy = (nn->nfsd_serv->sv_nrthreads == 1);
-
- if (destroy)
- svc_shutdown_net(nn->nfsd_serv, net);
- svc_destroy(nn->nfsd_serv);
- if (destroy)
- nfsd_complete_shutdown(net);
-}
-
+/**
+ * nfsd_set_nrthreads - set the number of running threads in the net's service
+ * @n: number of array members in @nthreads
+ * @nthreads: array of thread counts for each pool
+ * @net: network namespace to operate within
+ *
+ * This function alters the number of running threads for the given network
+ * namespace in each pool. If passed an array longer then the number of pools
+ * the extra pool settings are ignored. If passed an array shorter than the
+ * number of pools, the missing values are interpreted as 0's.
+ *
+ * Returns 0 on success or a negative errno on error.
+ */
int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
{
int i = 0;
@@ -716,11 +680,18 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
int err = 0;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- WARN_ON(!mutex_is_locked(&nfsd_mutex));
+ lockdep_assert_held(&nfsd_mutex);
if (nn->nfsd_serv == NULL || n <= 0)
return 0;
+ /*
+ * Special case: When n == 1, pass in NULL for the pool, so that the
+ * change is distributed equally among them.
+ */
+ if (n == 1)
+ return svc_set_num_threads(nn->nfsd_serv, NULL, nthreads[0]);
+
if (n > nn->nfsd_serv->sv_nrpools)
n = nn->nfsd_serv->sv_nrpools;
@@ -733,7 +704,7 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
if (tot > NFSD_MAXSERVS) {
/* total too large: scale down requested numbers */
for (i = 0; i < n && tot > 0; i++) {
- int new = nthreads[i] * NFSD_MAXSERVS / tot;
+ int new = nthreads[i] * NFSD_MAXSERVS / tot;
tot -= (nthreads[i] - new);
nthreads[i] = new;
}
@@ -743,75 +714,68 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
}
}
- /*
- * There must always be a thread in pool 0; the admin
- * can't shut down NFS completely using pool_threads.
- */
- if (nthreads[0] == 0)
- nthreads[0] = 1;
-
/* apply the new numbers */
- svc_get(nn->nfsd_serv);
for (i = 0; i < n; i++) {
- err = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv,
- &nn->nfsd_serv->sv_pools[i], nthreads[i]);
+ err = svc_set_num_threads(nn->nfsd_serv,
+ &nn->nfsd_serv->sv_pools[i],
+ nthreads[i]);
if (err)
- break;
+ goto out;
}
- nfsd_destroy(net);
+
+ /* Anything undefined in array is considered to be 0 */
+ for (i = n; i < nn->nfsd_serv->sv_nrpools; ++i) {
+ err = svc_set_num_threads(nn->nfsd_serv,
+ &nn->nfsd_serv->sv_pools[i],
+ 0);
+ if (err)
+ goto out;
+ }
+out:
return err;
}
-/*
- * Adjust the number of threads and return the new number of threads.
- * This is also the function that starts the server if necessary, if
- * this is the first time nrservs is nonzero.
+/**
+ * nfsd_svc: start up or shut down the nfsd server
+ * @n: number of array members in @nthreads
+ * @nthreads: array of thread counts for each pool
+ * @net: network namespace to operate within
+ * @cred: credentials to use for xprt creation
+ * @scope: server scope value (defaults to nodename)
+ *
+ * Adjust the number of threads in each pool and return the new
+ * total number of threads in the service.
*/
int
-nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+nfsd_svc(int n, int *nthreads, struct net *net, const struct cred *cred, const char *scope)
{
int error;
- bool nfsd_up_before;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv;
- mutex_lock(&nfsd_mutex);
- dprintk("nfsd: creating service\n");
-
- nrservs = max(nrservs, 0);
- nrservs = min(nrservs, NFSD_MAXSERVS);
- error = 0;
+ lockdep_assert_held(&nfsd_mutex);
- if (nrservs == 0 && nn->nfsd_serv == NULL)
- goto out;
+ dprintk("nfsd: creating service\n");
- strlcpy(nn->nfsd_name, utsname()->nodename,
+ strscpy(nn->nfsd_name, scope ? scope : utsname()->nodename,
sizeof(nn->nfsd_name));
error = nfsd_create_serv(net);
if (error)
goto out;
-
- nfsd_up_before = nn->nfsd_net_up;
+ serv = nn->nfsd_serv;
error = nfsd_startup_net(net, cred);
if (error)
- goto out_destroy;
- error = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv,
- NULL, nrservs);
+ goto out_put;
+ error = nfsd_set_nrthreads(n, nthreads, net);
if (error)
- goto out_shutdown;
- /* We are holding a reference to nn->nfsd_serv which
- * we don't want to count in the return value,
- * so subtract 1
- */
- error = nn->nfsd_serv->sv_nrthreads - 1;
-out_shutdown:
- if (error < 0 && !nfsd_up_before)
- nfsd_shutdown_net(net);
-out_destroy:
- nfsd_destroy(net); /* Release server */
+ goto out_put;
+ error = serv->sv_nrthreads;
+out_put:
+ if (serv->sv_nrthreads == 0)
+ nfsd_destroy_serv(net);
out:
- mutex_unlock(&nfsd_mutex);
return error;
}
@@ -892,17 +856,17 @@ nfsd_init_request(struct svc_rqst *rqstp,
if (likely(nfsd_vers(nn, rqstp->rq_vers, NFSD_TEST)))
return svc_generic_init_request(rqstp, progp, ret);
- ret->mismatch.lovers = NFSD_NRVERS;
- for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
+ ret->mismatch.lovers = NFSD_MAXVERS + 1;
+ for (i = NFSD_MINVERS; i <= NFSD_MAXVERS; i++) {
if (nfsd_vers(nn, i, NFSD_TEST)) {
ret->mismatch.lovers = i;
break;
}
}
- if (ret->mismatch.lovers == NFSD_NRVERS)
+ if (ret->mismatch.lovers > NFSD_MAXVERS)
return rpc_prog_unavail;
ret->mismatch.hivers = NFSD_MINVERS;
- for (i = NFSD_NRVERS - 1; i >= NFSD_MINVERS; i--) {
+ for (i = NFSD_MAXVERS; i >= NFSD_MINVERS; i--) {
if (nfsd_vers(nn, i, NFSD_TEST)) {
ret->mismatch.hivers = i;
break;
@@ -921,79 +885,37 @@ nfsd(void *vrqstp)
struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list);
struct net *net = perm_sock->xpt_net;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- int err;
-
- /* Lock module and set up kernel thread */
- mutex_lock(&nfsd_mutex);
/* At this point, the thread shares current->fs
* with the init process. We need to create files with the
- * umask as defined by the client instead of init's umask. */
- if (unshare_fs_struct() < 0) {
- printk("Unable to start nfsd thread: out of memory\n");
- goto out;
- }
+ * umask as defined by the client instead of init's umask.
+ */
+ svc_thread_init_status(rqstp, unshare_fs_struct());
current->fs->umask = 0;
- /*
- * thread is spawned with all signals set to SIG_IGN, re-enable
- * the ones that will bring down the thread
- */
- allow_signal(SIGKILL);
- allow_signal(SIGHUP);
- allow_signal(SIGINT);
- allow_signal(SIGQUIT);
-
- nfsdstats.th_cnt++;
- mutex_unlock(&nfsd_mutex);
+ atomic_inc(&nfsd_th_cnt);
set_freezable();
/*
* The main request loop
*/
- for (;;) {
- /* Update sv_maxconn if it has changed */
- rqstp->rq_server->sv_maxconn = nn->max_connections;
-
- /*
- * Find a socket with data available and call its
- * recvfrom routine.
- */
- while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
- ;
- if (err == -EINTR)
- break;
- validate_process_creds();
- svc_process(rqstp);
- validate_process_creds();
+ while (!svc_thread_should_stop(rqstp)) {
+ svc_recv(rqstp);
+ nfsd_file_net_dispose(nn);
}
- /* Clear signals before calling svc_exit_thread() */
- flush_signals(current);
-
- mutex_lock(&nfsd_mutex);
- nfsdstats.th_cnt --;
-
-out:
- rqstp->rq_server = NULL;
+ atomic_dec(&nfsd_th_cnt);
/* Release the thread */
svc_exit_thread(rqstp);
-
- nfsd_destroy(net);
-
- /* Release module */
- mutex_unlock(&nfsd_mutex);
- module_put_and_exit(0);
return 0;
}
/**
- * nfsd_dispatch - Process an NFS or NFSACL Request
+ * nfsd_dispatch - Process an NFS or NFSACL or LOCALIO Request
* @rqstp: incoming request
- * @statp: pointer to location of accept_stat field in RPC Reply buffer
*
* This RPC dispatcher integrates the NFS server's duplicate reply cache.
*
@@ -1001,9 +923,13 @@ out:
* %0: Processing complete; do not send a Reply
* %1: Processing complete; send Reply in rqstp->rq_res
*/
-int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+int nfsd_dispatch(struct svc_rqst *rqstp)
{
const struct svc_procedure *proc = rqstp->rq_procinfo;
+ __be32 *statp = rqstp->rq_accept_statp;
+ struct nfsd_cacherep *rp;
+ unsigned int start, len;
+ __be32 *nfs_reply;
/*
* Give the xdr decoder a chance to change this if it wants
@@ -1011,11 +937,27 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
*/
rqstp->rq_cachetype = proc->pc_cachetype;
- svcxdr_init_decode(rqstp);
+ /*
+ * ->pc_decode advances the argument stream past the NFS
+ * Call header, so grab the header's starting location and
+ * size now for the call to nfsd_cache_lookup().
+ */
+ start = xdr_stream_pos(&rqstp->rq_arg_stream);
+ len = xdr_stream_remaining(&rqstp->rq_arg_stream);
if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
goto out_decode_err;
- switch (nfsd_cache_lookup(rqstp)) {
+ /*
+ * Release rq_status_counter setting it to an odd value after the rpc
+ * request has been properly parsed. rq_status_counter is used to
+ * notify the consumers if the rqstp fields are stable
+ * (rq_status_counter is odd) or not meaningful (rq_status_counter
+ * is even).
+ */
+ smp_store_release(&rqstp->rq_status_counter, rqstp->rq_status_counter | 1);
+
+ rp = NULL;
+ switch (nfsd_cache_lookup(rqstp, start, len, &rp)) {
case RC_DOIT:
break;
case RC_REPLY:
@@ -1024,20 +966,21 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
goto out_dropit;
}
- /*
- * Need to grab the location to store the status, as
- * NFSv4 does some encoding while processing
- */
- svcxdr_init_encode(rqstp);
-
+ nfs_reply = xdr_inline_decode(&rqstp->rq_res_stream, 0);
*statp = proc->pc_func(rqstp);
- if (*statp == rpc_drop_reply || test_bit(RQ_DROPME, &rqstp->rq_flags))
+ if (test_bit(RQ_DROPME, &rqstp->rq_flags))
goto out_update_drop;
if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
goto out_encode_err;
- nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1);
+ /*
+ * Release rq_status_counter setting it to an even value after the rpc
+ * request has been properly processed.
+ */
+ smp_store_release(&rqstp->rq_status_counter, rqstp->rq_status_counter + 1);
+
+ nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, nfs_reply);
out_cached_reply:
return 1;
@@ -1047,13 +990,13 @@ out_decode_err:
return 1;
out_update_drop:
- nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
+ nfsd_cache_update(rqstp, rp, RC_NOCACHE, NULL);
out_dropit:
return 0;
out_encode_err:
trace_nfsd_cant_encode_err(rqstp);
- nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
+ nfsd_cache_update(rqstp, rp, RC_NOCACHE, NULL);
*statp = rpc_system_err;
return 1;
}
@@ -1085,32 +1028,3 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
return true;
}
-
-int nfsd_pool_stats_open(struct inode *inode, struct file *file)
-{
- int ret;
- struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
-
- mutex_lock(&nfsd_mutex);
- if (nn->nfsd_serv == NULL) {
- mutex_unlock(&nfsd_mutex);
- return -ENODEV;
- }
- /* bump up the psudo refcount while traversing */
- svc_get(nn->nfsd_serv);
- ret = svc_pool_stats_open(nn->nfsd_serv, file);
- mutex_unlock(&nfsd_mutex);
- return ret;
-}
-
-int nfsd_pool_stats_release(struct inode *inode, struct file *file)
-{
- int ret = seq_release(inode, file);
- struct net *net = inode->i_sb->s_fs_info;
-
- mutex_lock(&nfsd_mutex);
- /* this function really, really should have been called svc_put() */
- nfsd_destroy(net);
- mutex_unlock(&nfsd_mutex);
- return ret;
-}
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index aba8520b4b8b..fc262ceafca9 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -336,12 +336,10 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, struct xdr_stream *xdr)
/* opaque data */
if (xdr_stream_decode_u32(xdr, &args->len) < 0)
return false;
- if (args->len > NFSSVC_MAXBLKSIZE_V2)
- return false;
- if (!xdr_stream_subsegment(xdr, &args->payload, args->len))
+ if (args->len > NFS_MAXDATA)
return false;
- return true;
+ return xdr_stream_subsegment(xdr, &args->payload, args->len);
}
bool
@@ -470,7 +468,8 @@ nfssvc_encode_readlinkres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
case nfs_ok:
if (xdr_stream_encode_u32(xdr, resp->len) < 0)
return false;
- xdr_write_pages(xdr, &resp->page, 0, resp->len);
+ svcxdr_encode_opaque_pages(rqstp, xdr, &resp->page, 0,
+ resp->len);
if (svc_encode_result_payload(rqstp, head->iov_len, resp->len) < 0)
return false;
break;
@@ -493,8 +492,9 @@ nfssvc_encode_readres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
return false;
if (xdr_stream_encode_u32(xdr, resp->count) < 0)
return false;
- xdr_write_pages(xdr, resp->pages, rqstp->rq_res.page_base,
- resp->count);
+ svcxdr_encode_opaque_pages(rqstp, xdr, resp->pages,
+ rqstp->rq_res.page_base,
+ resp->count);
if (svc_encode_result_payload(rqstp, head->iov_len, resp->count) < 0)
return false;
break;
@@ -513,7 +513,8 @@ nfssvc_encode_readdirres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
return false;
switch (resp->status) {
case nfs_ok:
- xdr_write_pages(xdr, dirlist->pages, 0, dirlist->len);
+ svcxdr_encode_opaque_pages(rqstp, xdr, dirlist->pages, 0,
+ dirlist->len);
/* no more entries */
if (xdr_stream_encode_item_absent(xdr) < 0)
return false;
@@ -539,7 +540,7 @@ nfssvc_encode_statfsres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
p = xdr_reserve_space(xdr, XDR_UNIT * 5);
if (!p)
return false;
- *p++ = cpu_to_be32(NFSSVC_MAXBLKSIZE_V2);
+ *p++ = cpu_to_be32(NFS_MAXDATA);
*p++ = cpu_to_be32(stat->f_bsize);
*p++ = cpu_to_be32(stat->f_blocks);
*p++ = cpu_to_be32(stat->f_bfree);
diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h
index 4f4282d4eeca..db9af780438b 100644
--- a/fs/nfsd/pnfs.h
+++ b/fs/nfsd/pnfs.h
@@ -27,17 +27,19 @@ struct nfsd4_layout_ops {
struct nfs4_client *clp,
struct nfsd4_getdeviceinfo *gdevp);
__be32 (*encode_getdeviceinfo)(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdevp);
+ const struct nfsd4_getdeviceinfo *gdevp);
- __be32 (*proc_layoutget)(struct inode *, const struct svc_fh *fhp,
- struct nfsd4_layoutget *lgp);
- __be32 (*encode_layoutget)(struct xdr_stream *,
- struct nfsd4_layoutget *lgp);
+ __be32 (*proc_layoutget)(struct svc_rqst *rqstp, struct inode *inode,
+ const struct svc_fh *fhp, struct nfsd4_layoutget *lgp);
+ __be32 (*encode_layoutget)(struct xdr_stream *xdr,
+ const struct nfsd4_layoutget *lgp);
__be32 (*proc_layoutcommit)(struct inode *inode,
+ struct svc_rqst *rqstp,
struct nfsd4_layoutcommit *lcp);
- void (*fence_client)(struct nfs4_layout_stateid *ls);
+ void (*fence_client)(struct nfs4_layout_stateid *ls,
+ struct nfsd_file *file);
};
extern const struct nfsd4_layout_ops *nfsd4_layout_ops[];
@@ -72,11 +74,13 @@ void nfsd4_setup_layout_type(struct svc_export *exp);
void nfsd4_return_all_client_layouts(struct nfs4_client *);
void nfsd4_return_all_file_layouts(struct nfs4_client *clp,
struct nfs4_file *fp);
+void nfsd4_close_layout(struct nfs4_layout_stateid *ls);
int nfsd4_init_pnfs(void);
void nfsd4_exit_pnfs(void);
#else
struct nfs4_client;
struct nfs4_file;
+struct nfs4_layout_stateid;
static inline void nfsd4_setup_layout_type(struct svc_export *exp)
{
@@ -89,6 +93,9 @@ static inline void nfsd4_return_all_file_layouts(struct nfs4_client *clp,
struct nfs4_file *fp)
{
}
+static inline void nfsd4_close_layout(struct nfs4_layout_stateid *ls)
+{
+}
static inline void nfsd4_exit_pnfs(void)
{
}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index e73bdbb1634a..b052c1effdc5 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -35,6 +35,7 @@
#ifndef _NFSD4_STATE_H
#define _NFSD4_STATE_H
+#include <crypto/md5.h>
#include <linux/idr.h>
#include <linux/refcount.h>
#include <linux/sunrpc/svc_xprt.h>
@@ -57,28 +58,50 @@ typedef struct {
} stateid_t;
typedef struct {
- stateid_t stid;
+ stateid_t cs_stid;
#define NFS4_COPY_STID 1
#define NFS4_COPYNOTIFY_STID 2
- unsigned char sc_type;
- refcount_t sc_count;
+ unsigned char cs_type;
+ refcount_t cs_count;
} copy_stateid_t;
+struct nfsd4_referring_call {
+ struct list_head __list;
+
+ u32 rc_sequenceid;
+ u32 rc_slotid;
+};
+
+struct nfsd4_referring_call_list {
+ struct list_head __list;
+
+ struct nfs4_sessionid rcl_sessionid;
+ int __nr_referring_calls;
+ struct list_head rcl_referring_calls;
+};
+
struct nfsd4_callback {
struct nfs4_client *cb_clp;
struct rpc_message cb_msg;
+#define NFSD4_CALLBACK_RUNNING (0)
+#define NFSD4_CALLBACK_WAKE (1)
+#define NFSD4_CALLBACK_REQUEUE (2)
+ unsigned long cb_flags;
const struct nfsd4_callback_ops *cb_ops;
struct work_struct cb_work;
int cb_seq_status;
int cb_status;
- bool cb_need_restart;
- bool cb_holds_slot;
+ int cb_held_slot;
+
+ int cb_nr_referring_call_list;
+ struct list_head cb_referring_call_list;
};
struct nfsd4_callback_ops {
void (*prepare)(struct nfsd4_callback *);
int (*done)(struct nfsd4_callback *, struct rpc_task *);
void (*release)(struct nfsd4_callback *);
+ uint32_t opcode;
};
/*
@@ -88,17 +111,36 @@ struct nfsd4_callback_ops {
*/
struct nfs4_stid {
refcount_t sc_count;
-#define NFS4_OPEN_STID 1
-#define NFS4_LOCK_STID 2
-#define NFS4_DELEG_STID 4
-/* For an open stateid kept around *only* to process close replays: */
-#define NFS4_CLOSED_STID 8
+
+ /* A new stateid is added to the cl_stateids idr early before it
+ * is fully initialised. Its sc_type is then zero. After
+ * initialisation the sc_type it set under cl_lock, and then
+ * never changes.
+ */
+#define SC_TYPE_OPEN BIT(0)
+#define SC_TYPE_LOCK BIT(1)
+#define SC_TYPE_DELEG BIT(2)
+#define SC_TYPE_LAYOUT BIT(3)
+ unsigned short sc_type;
+
+/* state_lock protects sc_status for delegation stateids.
+ * ->cl_lock protects sc_status for open and lock stateids.
+ * ->st_mutex also protect sc_status for open stateids.
+ * ->ls_lock protects sc_status for layout stateids.
+ */
+/*
+ * For an open stateid kept around *only* to process close replays.
+ * For deleg stateid, kept in idr until last reference is dropped.
+ */
+#define SC_STATUS_CLOSED BIT(0)
/* For a deleg stateid kept around only to process free_stateid's: */
-#define NFS4_REVOKED_DELEG_STID 16
-#define NFS4_CLOSED_DELEG_STID 32
-#define NFS4_LAYOUT_STID 64
+#define SC_STATUS_REVOKED BIT(1)
+#define SC_STATUS_ADMIN_REVOKED BIT(2)
+#define SC_STATUS_FREEABLE BIT(3)
+#define SC_STATUS_FREED BIT(4)
+ unsigned short sc_status;
+
struct list_head sc_cp_list;
- unsigned char sc_type;
stateid_t sc_stateid;
spinlock_t sc_lock;
struct nfs4_client *sc_client;
@@ -118,6 +160,36 @@ struct nfs4_cpntf_state {
};
/*
+ * RFC 7862 Section 4.8 states:
+ *
+ * | A copy offload stateid will be valid until either (A) the client
+ * | or server restarts or (B) the client returns the resource by
+ * | issuing an OFFLOAD_CANCEL operation or the client replies to a
+ * | CB_OFFLOAD operation.
+ *
+ * Because a client might not reply to a CB_OFFLOAD, or a reply
+ * might get lost due to connection loss, NFSD purges async copy
+ * state after a short period to prevent it from accumulating
+ * over time.
+ */
+#define NFSD_COPY_INITIAL_TTL 10
+
+struct nfs4_cb_fattr {
+ struct nfsd4_callback ncf_getattr;
+ u32 ncf_cb_status;
+
+ /* from CB_GETATTR reply */
+ u64 ncf_cb_change;
+ u64 ncf_cb_fsize;
+ struct timespec64 ncf_cb_mtime;
+ struct timespec64 ncf_cb_atime;
+
+ bool ncf_file_modified;
+ u64 ncf_initial_cinfo;
+ u64 ncf_cur_fsize;
+};
+
+/*
* Represents a delegation stateid. The nfs4_client holds references to these
* and they are put when it is being destroyed or when the delegation is
* returned by the client:
@@ -144,13 +216,43 @@ struct nfs4_delegation {
struct list_head dl_perclnt;
struct list_head dl_recall_lru; /* delegation recalled */
struct nfs4_clnt_odstate *dl_clnt_odstate;
- u32 dl_type;
time64_t dl_time;
-/* For recall: */
+ u32 dl_type;
+ /* For recall: */
int dl_retries;
struct nfsd4_callback dl_recall;
+ bool dl_recalled;
+ bool dl_written;
+ bool dl_setattr;
+
+ /* for CB_GETATTR */
+ struct nfs4_cb_fattr dl_cb_fattr;
+
+ /* For delegated timestamps */
+ struct timespec64 dl_atime;
+ struct timespec64 dl_mtime;
+ struct timespec64 dl_ctime;
};
+static inline bool deleg_is_read(u32 dl_type)
+{
+ return (dl_type == OPEN_DELEGATE_READ || dl_type == OPEN_DELEGATE_READ_ATTRS_DELEG);
+}
+
+static inline bool deleg_is_write(u32 dl_type)
+{
+ return (dl_type == OPEN_DELEGATE_WRITE || dl_type == OPEN_DELEGATE_WRITE_ATTRS_DELEG);
+}
+
+static inline bool deleg_attrs_deleg(u32 dl_type)
+{
+ return dl_type == OPEN_DELEGATE_READ_ATTRS_DELEG ||
+ dl_type == OPEN_DELEGATE_WRITE_ATTRS_DELEG;
+}
+
+bool nfsd4_vet_deleg_time(struct timespec64 *cb, const struct timespec64 *orig,
+ const struct timespec64 *now);
+
#define cb_to_delegation(cb) \
container_of(cb, struct nfs4_delegation, dl_recall)
@@ -171,10 +273,11 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
return container_of(s, struct nfs4_delegation, dl_stid);
}
-/* Maximum number of slots per session. 160 is useful for long haul TCP */
-#define NFSD_MAX_SLOTS_PER_SESSION 160
-/* Maximum number of operations per session compound */
-#define NFSD_MAX_OPS_PER_COMPOUND 16
+/* Maximum number of slots per session. This is for sanity-check only.
+ * It could be increased if we had a mechanism to shutdown misbehaving clients.
+ * A large number can be needed to get good throughput on high-latency servers.
+ */
+#define NFSD_MAX_SLOTS_PER_SESSION 2048
/* Maximum session per slot cache size */
#define NFSD_SLOT_CACHE_SIZE 2048
/* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */
@@ -186,12 +289,15 @@ struct nfsd4_slot {
u32 sl_seqid;
__be32 sl_status;
struct svc_cred sl_cred;
+ u32 sl_index;
u32 sl_datalen;
u16 sl_opcnt;
+ u16 sl_generation;
#define NFSD4_SLOT_INUSE (1 << 0)
#define NFSD4_SLOT_CACHETHIS (1 << 1)
#define NFSD4_SLOT_INITIALIZED (1 << 2)
#define NFSD4_SLOT_CACHED (1 << 3)
+#define NFSD4_SLOT_REUSED (1 << 4)
u8 sl_flags;
char sl_data[];
};
@@ -250,6 +356,9 @@ struct nfsd4_conn {
unsigned char cn_flags;
};
+/* Maximum number of slots that nfsd will use in the backchannel */
+#define NFSD_BC_SLOT_TABLE_SIZE (sizeof(u32) * 8)
+
/*
* Representation of a v4.1+ session. These are refcounted in a similar fashion
* to the nfs4_client. References are only taken when the server is actively
@@ -257,20 +366,23 @@ struct nfsd4_conn {
*/
struct nfsd4_session {
atomic_t se_ref;
+ spinlock_t se_lock;
+ u32 se_cb_slot_avail; /* bitmap of available slots */
+ u32 se_cb_highest_slot; /* highest slot client wants */
+ u32 se_cb_prog;
struct list_head se_hash; /* hash by sessionid */
struct list_head se_perclnt;
-/* See SESSION4_PERSIST, etc. for standard flags; this is internal-only: */
-#define NFS4_SESSION_DEAD 0x010
- u32 se_flags;
+ struct list_head se_all_sessions;/* global list of sessions */
struct nfs4_client *se_client;
struct nfs4_sessionid se_sessionid;
struct nfsd4_channel_attrs se_fchannel;
- struct nfsd4_channel_attrs se_bchannel;
struct nfsd4_cb_sec se_cb_sec;
struct list_head se_conns;
- u32 se_cb_prog;
- u32 se_cb_seq_nr;
- struct nfsd4_slot *se_slots[]; /* forward channel slots */
+ u32 se_cb_seq_nr[NFSD_BC_SLOT_TABLE_SIZE];
+ struct xarray se_slots; /* forward channel slots */
+ u16 se_slot_gen;
+ bool se_dead;
+ u32 se_target_maxslots;
};
/* formatted contents of nfs4_sessionid */
@@ -280,7 +392,30 @@ struct nfsd4_sessionid {
u32 reserved;
};
-#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */
+/* Length of MD5 digest as hex, plus terminating '\0' */
+#define HEXDIR_LEN (2 * MD5_DIGEST_SIZE + 1)
+
+/*
+ * State Meaning Where set
+ * --------------------------------------------------------------------------
+ * | NFSD4_ACTIVE | Confirmed, active | Default |
+ * |------------------- ----------------------------------------------------|
+ * | NFSD4_COURTESY | Courtesy state. | nfs4_get_client_reaplist |
+ * | | Lease/lock/share | |
+ * | | reservation conflict | |
+ * | | can cause Courtesy | |
+ * | | client to be expired | |
+ * |------------------------------------------------------------------------|
+ * | NFSD4_EXPIRABLE | Courtesy client to be| nfs4_laundromat |
+ * | | expired by Laundromat| try_to_expire_client |
+ * | | due to conflict | |
+ * |------------------------------------------------------------------------|
+ */
+enum {
+ NFSD4_ACTIVE = 0,
+ NFSD4_COURTESY,
+ NFSD4_EXPIRABLE,
+};
/*
* struct nfs4_client - one per client. Clientids live here.
@@ -296,8 +431,9 @@ struct nfsd4_sessionid {
* 0. If they are not renewed within a lease period, they become eligible for
* destruction by the laundromat.
*
- * These objects can also be destroyed prematurely by the fault injection code,
- * or if the client sends certain forms of SETCLIENTID or EXCHANGE_ID updates.
+ * These objects can also be destroyed if the client sends certain forms of
+ * SETCLIENTID or EXCHANGE_ID operations.
+ *
* Care is taken *not* to do this however when the objects have an elevated
* refcount.
*
@@ -305,7 +441,7 @@ struct nfsd4_sessionid {
*
* o Each nfs4_clients is also hashed by name (the opaque quantity initially
* sent by the client to identify itself).
- *
+ *
* o cl_perclient list is used to ensure no dangling stateowner references
* when we expire the nfs4_client
*/
@@ -330,6 +466,7 @@ struct nfs4_client {
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
u32 cl_minorversion;
+ atomic_t cl_admin_revoked; /* count of admin-revoked states */
/* NFSv4.1 client implementation id: */
struct xdr_netobj cl_nii_domain;
struct xdr_netobj cl_nii_name;
@@ -346,6 +483,8 @@ struct nfs4_client {
#define NFSD4_CLIENT_CB_FLAG_MASK (1 << NFSD4_CLIENT_CB_UPDATE | \
1 << NFSD4_CLIENT_CB_KILL)
unsigned long cl_flags;
+
+ struct workqueue_struct *cl_callback_wq;
const struct cred *cl_cb_cred;
struct rpc_clnt *cl_cb_client;
u32 cl_cb_ident;
@@ -376,15 +515,18 @@ struct nfs4_client {
*/
struct dentry *cl_nfsd_info_dentry;
- /* for nfs41 callbacks */
- /* We currently support a single back channel with a single slot */
- unsigned long cl_cb_slot_busy;
struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
/* wait here for slots */
struct net *net;
struct list_head async_copies; /* list of async copies */
spinlock_t async_lock; /* lock for async copies */
atomic_t cl_cb_inflight; /* Outstanding callbacks */
+
+ unsigned int cl_state;
+ atomic_t cl_delegs_in_recall;
+
+ struct nfsd4_cb_recall_any *cl_ra;
+ time64_t cl_ra_time;
};
/* struct nfs4_client_reset
@@ -417,7 +559,7 @@ struct nfs4_replay {
unsigned int rp_buflen;
char *rp_buf;
struct knfsd_fh rp_openfh;
- struct mutex rp_mutex;
+ int rp_locked;
char rp_ibuf[NFSD4_REPLAY_ISIZE];
};
@@ -510,16 +652,13 @@ struct nfs4_clnt_odstate {
* inode can have multiple filehandles associated with it, so there is
* (potentially) a many to one relationship between this struct and struct
* inode.
- *
- * These are hashed by filehandle in the file_hashtbl, which is protected by
- * the global state_lock spinlock.
*/
struct nfs4_file {
refcount_t fi_ref;
struct inode * fi_inode;
bool fi_aliased;
spinlock_t fi_lock;
- struct hlist_node fi_hash; /* hash on fi_fhandle */
+ struct rhlist_head fi_rlist;
struct list_head fi_stateids;
union {
struct list_head fi_delegations;
@@ -538,6 +677,7 @@ struct nfs4_file {
atomic_t fi_access[2];
u32 fi_share_deny;
struct nfsd_file *fi_deleg_file;
+ struct nfsd_file *fi_rdeleg_file;
int fi_delegees;
struct knfsd_fh fi_fhandle;
bool fi_had_conflict;
@@ -568,6 +708,10 @@ struct nfs4_ol_stateid {
struct list_head st_locks;
struct nfs4_stateowner *st_stateowner;
struct nfs4_clnt_odstate *st_clnt_odstate;
+/*
+ * These bitmasks use 3 separate bits for READ, ALLOW, and BOTH; see the
+ * comment above bmap_to_share_mode() for explanation:
+ */
unsigned char st_access_bmap;
unsigned char st_deny_bmap;
struct nfs4_ol_stateid *st_openstp;
@@ -609,6 +753,8 @@ enum nfsd4_cb_op {
NFSPROC4_CLNT_CB_OFFLOAD,
NFSPROC4_CLNT_CB_SEQUENCE,
NFSPROC4_CLNT_CB_NOTIFY_LOCK,
+ NFSPROC4_CLNT_CB_RECALL_ANY,
+ NFSPROC4_CLNT_CB_GETATTR,
};
/* Returns true iff a is later than b: */
@@ -629,6 +775,7 @@ struct nfsd4_blocked_lock {
struct file_lock nbl_lock;
struct knfsd_fh nbl_fh;
struct nfsd4_callback nbl_cb;
+ struct kref nbl_kref;
};
struct nfsd4_compound_state;
@@ -640,15 +787,15 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
stateid_t *stateid, int flags, struct nfsd_file **filp,
struct nfs4_stid **cstid);
__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
- stateid_t *stateid, unsigned char typemask,
- struct nfs4_stid **s, struct nfsd_net *nn);
+ stateid_t *stateid, unsigned short typemask,
+ unsigned short statusmask,
+ struct nfs4_stid **s, struct nfsd_net *nn);
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
void (*sc_free)(struct nfs4_stid *));
int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy);
void nfs4_free_copy_state(struct nfsd4_copy *copy);
struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
struct nfs4_stid *p_stid);
-void nfs4_unhash_stid(struct nfs4_stid *s);
void nfs4_put_stid(struct nfs4_stid *s);
void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *, struct nfsd_net *);
@@ -659,22 +806,29 @@ extern __be32 nfs4_check_open_reclaim(struct nfs4_client *);
extern void nfsd4_probe_callback(struct nfs4_client *clp);
extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
+extern void nfsd41_cb_referring_call(struct nfsd4_callback *cb,
+ struct nfs4_sessionid *sessionid,
+ u32 slotid, u32 seqno);
+extern void nfsd41_cb_destroy_referring_call_list(struct nfsd4_callback *cb);
extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
-extern void nfsd4_run_cb(struct nfsd4_callback *cb);
-extern int nfsd4_create_callback_queue(void);
-extern void nfsd4_destroy_callback_queue(void);
+extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
+
+static inline void nfsd4_try_run_cb(struct nfsd4_callback *cb)
+{
+ if (!test_and_set_bit(NFSD4_CALLBACK_RUNNING, &cb->cb_flags))
+ WARN_ON_ONCE(!nfsd4_run_cb(cb));
+}
+
extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfsd4_shutdown_copy(struct nfs4_client *clp);
-extern void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp);
+void nfsd4_async_copy_reaper(struct nfsd_net *nn);
+bool nfsd4_has_active_async_copies(struct nfs4_client *clp);
extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(struct xdr_netobj name,
struct xdr_netobj princhash, struct nfsd_net *nn);
extern bool nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn);
void put_nfs4_file(struct nfs4_file *fi);
-extern void nfs4_put_copy(struct nfsd4_copy *copy);
-extern struct nfsd4_copy *
-find_async_copy(struct nfs4_client *clp, stateid_t *staetid);
extern void nfs4_put_cpntf_state(struct nfsd_net *nn,
struct nfs4_cpntf_state *cps);
extern __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
@@ -686,6 +840,14 @@ static inline void get_nfs4_file(struct nfs4_file *fi)
}
struct nfsd_file *find_any_file(struct nfs4_file *f);
+#ifdef CONFIG_NFSD_V4
+void nfsd4_revoke_states(struct net *net, struct super_block *sb);
+#else
+static inline void nfsd4_revoke_states(struct net *net, struct super_block *sb)
+{
+}
+#endif
+
/* grace period management */
void nfsd4_end_grace(struct nfsd_net *nn);
@@ -697,4 +859,17 @@ extern void nfsd4_client_record_remove(struct nfs4_client *clp);
extern int nfsd4_client_record_check(struct nfs4_client *clp);
extern void nfsd4_record_grace_done(struct nfsd_net *nn);
+static inline bool try_to_expire_client(struct nfs4_client *clp)
+{
+ cmpxchg(&clp->cl_state, NFSD4_COURTESY, NFSD4_EXPIRABLE);
+ return clp->cl_state == NFSD4_EXPIRABLE;
+}
+
+extern __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp,
+ struct dentry *dentry, struct nfs4_delegation **pdp);
+
+struct nfsd4_get_dir_delegation;
+struct nfs4_delegation *nfsd_get_dir_deleg(struct nfsd4_compound_state *cstate,
+ struct nfsd4_get_dir_delegation *gdd,
+ struct nfsd_file *nf);
#endif /* NFSD4_STATE_H */
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index 1d3b881e7382..f7eaf95e20fc 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -27,25 +27,22 @@
#include "nfsd.h"
-struct nfsd_stats nfsdstats;
-struct svc_stat nfsd_svcstats = {
- .program = &nfsd_program,
-};
-
-static int nfsd_proc_show(struct seq_file *seq, void *v)
+static int nfsd_show(struct seq_file *seq, void *v)
{
+ struct net *net = pde_data(file_inode(seq->file));
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int i;
seq_printf(seq, "rc %lld %lld %lld\nfh %lld 0 0 0 0\nio %lld %lld\n",
- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]),
- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]),
- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]),
- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_FH_STALE]),
- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_READ]),
- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_WRITE]));
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]),
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]),
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]),
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_FH_STALE]),
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_READ]),
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_WRITE]));
/* thread usage: */
- seq_printf(seq, "th %u 0", nfsdstats.th_cnt);
+ seq_printf(seq, "th %u 0", atomic_read(&nfsd_th_cnt));
/* deprecated thread usage histogram stats */
for (i = 0; i < 10; i++)
@@ -55,16 +52,18 @@ static int nfsd_proc_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nra 0 0 0 0 0 0 0 0 0 0 0 0\n");
/* show my rpc info */
- svc_seq_show(seq, &nfsd_svcstats);
+ svc_seq_show(seq, &nn->nfsd_svcstats);
#ifdef CONFIG_NFSD_V4
/* Show count for individual nfsv4 operations */
/* Writing operation numbers 0 1 2 also for maintaining uniformity */
- seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1);
+ seq_printf(seq, "proc4ops %u", LAST_NFS4_OP + 1);
for (i = 0; i <= LAST_NFS4_OP; i++) {
seq_printf(seq, " %lld",
- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)]));
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_NFS4_OP(i)]));
}
+ seq_printf(seq, "\nwdeleg_getattr %lld",
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_WDELEG_GETATTR]));
seq_putc(seq, '\n');
#endif
@@ -72,75 +71,16 @@ static int nfsd_proc_show(struct seq_file *seq, void *v)
return 0;
}
-static int nfsd_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nfsd_proc_show, NULL);
-}
-
-static const struct proc_ops nfsd_proc_ops = {
- .proc_open = nfsd_proc_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_release = single_release,
-};
-
-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num)
-{
- int i, err = 0;
-
- for (i = 0; !err && i < num; i++)
- err = percpu_counter_init(&counters[i], 0, GFP_KERNEL);
-
- if (!err)
- return 0;
-
- for (; i > 0; i--)
- percpu_counter_destroy(&counters[i-1]);
-
- return err;
-}
-
-void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num)
-{
- int i;
-
- for (i = 0; i < num; i++)
- percpu_counter_set(&counters[i], 0);
-}
-
-void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num)
-{
- int i;
-
- for (i = 0; i < num; i++)
- percpu_counter_destroy(&counters[i]);
-}
+DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
-static int nfsd_stat_counters_init(void)
+struct proc_dir_entry *nfsd_proc_stat_init(struct net *net)
{
- return nfsd_percpu_counters_init(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
-}
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-static void nfsd_stat_counters_destroy(void)
-{
- nfsd_percpu_counters_destroy(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
-}
-
-int nfsd_stat_init(void)
-{
- int err;
-
- err = nfsd_stat_counters_init();
- if (err)
- return err;
-
- svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops);
-
- return 0;
+ return svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
}
-void nfsd_stat_shutdown(void)
+void nfsd_proc_stat_shutdown(struct net *net)
{
- nfsd_stat_counters_destroy();
- svc_proc_unregister(&init_net, "nfsd");
+ svc_proc_unregister(net, "nfsd");
}
diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
index 51ecda852e23..e4efb0e4e56d 100644
--- a/fs/nfsd/stats.h
+++ b/fs/nfsd/stats.h
@@ -10,89 +10,67 @@
#include <uapi/linux/nfsd/stats.h>
#include <linux/percpu_counter.h>
+struct proc_dir_entry *nfsd_proc_stat_init(struct net *net);
+void nfsd_proc_stat_shutdown(struct net *net);
-enum {
- NFSD_STATS_RC_HITS, /* repcache hits */
- NFSD_STATS_RC_MISSES, /* repcache misses */
- NFSD_STATS_RC_NOCACHE, /* uncached reqs */
- NFSD_STATS_FH_STALE, /* FH stale error */
- NFSD_STATS_IO_READ, /* bytes returned to read requests */
- NFSD_STATS_IO_WRITE, /* bytes passed in write requests */
-#ifdef CONFIG_NFSD_V4
- NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */
- NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
-#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op))
-#endif
- NFSD_STATS_COUNTERS_NUM
-};
-
-struct nfsd_stats {
- struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM];
-
- /* Protected by nfsd_mutex */
- unsigned int th_cnt; /* number of available threads */
-};
-
-
-extern struct nfsd_stats nfsdstats;
-
-extern struct svc_stat nfsd_svcstats;
-
-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num);
-void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num);
-void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num);
-int nfsd_stat_init(void);
-void nfsd_stat_shutdown(void);
-
-static inline void nfsd_stats_rc_hits_inc(void)
+static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn)
{
- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_HITS]);
+ percpu_counter_inc(&nn->counter[NFSD_STATS_RC_HITS]);
}
-static inline void nfsd_stats_rc_misses_inc(void)
+static inline void nfsd_stats_rc_misses_inc(struct nfsd_net *nn)
{
- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_MISSES]);
+ percpu_counter_inc(&nn->counter[NFSD_STATS_RC_MISSES]);
}
-static inline void nfsd_stats_rc_nocache_inc(void)
+static inline void nfsd_stats_rc_nocache_inc(struct nfsd_net *nn)
{
- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]);
+ percpu_counter_inc(&nn->counter[NFSD_STATS_RC_NOCACHE]);
}
-static inline void nfsd_stats_fh_stale_inc(struct svc_export *exp)
+static inline void nfsd_stats_fh_stale_inc(struct nfsd_net *nn,
+ struct svc_export *exp)
{
- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_FH_STALE]);
- if (exp)
- percpu_counter_inc(&exp->ex_stats.counter[EXP_STATS_FH_STALE]);
+ percpu_counter_inc(&nn->counter[NFSD_STATS_FH_STALE]);
+ if (exp && exp->ex_stats)
+ percpu_counter_inc(&exp->ex_stats->counter[EXP_STATS_FH_STALE]);
}
-static inline void nfsd_stats_io_read_add(struct svc_export *exp, s64 amount)
+static inline void nfsd_stats_io_read_add(struct nfsd_net *nn,
+ struct svc_export *exp, s64 amount)
{
- percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_READ], amount);
- if (exp)
- percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_READ], amount);
+ percpu_counter_add(&nn->counter[NFSD_STATS_IO_READ], amount);
+ if (exp && exp->ex_stats)
+ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_READ], amount);
}
-static inline void nfsd_stats_io_write_add(struct svc_export *exp, s64 amount)
+static inline void nfsd_stats_io_write_add(struct nfsd_net *nn,
+ struct svc_export *exp, s64 amount)
{
- percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_WRITE], amount);
- if (exp)
- percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_WRITE], amount);
+ percpu_counter_add(&nn->counter[NFSD_STATS_IO_WRITE], amount);
+ if (exp && exp->ex_stats)
+ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_WRITE], amount);
}
static inline void nfsd_stats_payload_misses_inc(struct nfsd_net *nn)
{
- percpu_counter_inc(&nn->counter[NFSD_NET_PAYLOAD_MISSES]);
+ percpu_counter_inc(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]);
}
static inline void nfsd_stats_drc_mem_usage_add(struct nfsd_net *nn, s64 amount)
{
- percpu_counter_add(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
+ percpu_counter_add(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount);
}
static inline void nfsd_stats_drc_mem_usage_sub(struct nfsd_net *nn, s64 amount)
{
- percpu_counter_sub(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
+ percpu_counter_sub(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount);
}
+#ifdef CONFIG_NFSD_V4
+static inline void nfsd_stats_wdeleg_getattr_inc(struct nfsd_net *nn)
+{
+ percpu_counter_inc(&nn->counter[NFSD_STATS_WDELEG_GETATTR]);
+}
+#endif
#endif /* _NFSD_STATS_H */
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index f1e0d3c51bc2..5ae2a611e57f 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -9,87 +9,87 @@
#define _NFSD_TRACE_H
#include <linux/tracepoint.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/xprt.h>
+#include <trace/misc/fs.h>
+#include <trace/misc/nfs.h>
+#include <trace/misc/sunrpc.h>
#include "export.h"
#include "nfsfh.h"
+#include "xdr4.h"
-#define NFSD_TRACE_PROC_ARG_FIELDS \
+#define NFSD_TRACE_PROC_CALL_FIELDS(r) \
__field(unsigned int, netns_ino) \
__field(u32, xid) \
- __array(unsigned char, server, sizeof(struct sockaddr_in6)) \
- __array(unsigned char, client, sizeof(struct sockaddr_in6))
+ __sockaddr(server, (r)->rq_xprt->xpt_locallen) \
+ __sockaddr(client, (r)->rq_xprt->xpt_remotelen)
-#define NFSD_TRACE_PROC_ARG_ASSIGNMENTS \
+#define NFSD_TRACE_PROC_CALL_ASSIGNMENTS(r) \
do { \
- __entry->netns_ino = SVC_NET(rqstp)->ns.inum; \
- __entry->xid = be32_to_cpu(rqstp->rq_xid); \
- memcpy(__entry->server, &rqstp->rq_xprt->xpt_local, \
- rqstp->rq_xprt->xpt_locallen); \
- memcpy(__entry->client, &rqstp->rq_xprt->xpt_remote, \
- rqstp->rq_xprt->xpt_remotelen); \
- } while (0);
-
-#define NFSD_TRACE_PROC_RES_FIELDS \
+ struct svc_xprt *xprt = (r)->rq_xprt; \
+ __entry->netns_ino = SVC_NET(r)->ns.inum; \
+ __entry->xid = be32_to_cpu((r)->rq_xid); \
+ __assign_sockaddr(server, &xprt->xpt_local, \
+ xprt->xpt_locallen); \
+ __assign_sockaddr(client, &xprt->xpt_remote, \
+ xprt->xpt_remotelen); \
+ } while (0)
+
+#define NFSD_TRACE_PROC_RES_FIELDS(r) \
__field(unsigned int, netns_ino) \
__field(u32, xid) \
__field(unsigned long, status) \
- __array(unsigned char, server, sizeof(struct sockaddr_in6)) \
- __array(unsigned char, client, sizeof(struct sockaddr_in6))
+ __sockaddr(server, (r)->rq_xprt->xpt_locallen) \
+ __sockaddr(client, (r)->rq_xprt->xpt_remotelen)
-#define NFSD_TRACE_PROC_RES_ASSIGNMENTS(error) \
+#define NFSD_TRACE_PROC_RES_ASSIGNMENTS(r, error) \
do { \
- __entry->netns_ino = SVC_NET(rqstp)->ns.inum; \
- __entry->xid = be32_to_cpu(rqstp->rq_xid); \
+ struct svc_xprt *xprt = (r)->rq_xprt; \
+ __entry->netns_ino = SVC_NET(r)->ns.inum; \
+ __entry->xid = be32_to_cpu((r)->rq_xid); \
__entry->status = be32_to_cpu(error); \
- memcpy(__entry->server, &rqstp->rq_xprt->xpt_local, \
- rqstp->rq_xprt->xpt_locallen); \
- memcpy(__entry->client, &rqstp->rq_xprt->xpt_remote, \
- rqstp->rq_xprt->xpt_remotelen); \
+ __assign_sockaddr(server, &xprt->xpt_local, \
+ xprt->xpt_locallen); \
+ __assign_sockaddr(client, &xprt->xpt_remote, \
+ xprt->xpt_remotelen); \
} while (0);
-TRACE_EVENT(nfsd_garbage_args_err,
+DECLARE_EVENT_CLASS(nfsd_xdr_err_class,
TP_PROTO(
const struct svc_rqst *rqstp
),
TP_ARGS(rqstp),
TP_STRUCT__entry(
- NFSD_TRACE_PROC_ARG_FIELDS
-
+ __field(unsigned int, netns_ino)
+ __field(u32, xid)
__field(u32, vers)
__field(u32, proc)
+ __sockaddr(server, rqstp->rq_xprt->xpt_locallen)
+ __sockaddr(client, rqstp->rq_xprt->xpt_remotelen)
),
TP_fast_assign(
- NFSD_TRACE_PROC_ARG_ASSIGNMENTS
+ const struct svc_xprt *xprt = rqstp->rq_xprt;
+ __entry->netns_ino = xprt->xpt_net->ns.inum;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
__entry->vers = rqstp->rq_vers;
__entry->proc = rqstp->rq_proc;
+ __assign_sockaddr(server, &xprt->xpt_local, xprt->xpt_locallen);
+ __assign_sockaddr(client, &xprt->xpt_remote, xprt->xpt_remotelen);
),
TP_printk("xid=0x%08x vers=%u proc=%u",
__entry->xid, __entry->vers, __entry->proc
)
);
-TRACE_EVENT(nfsd_cant_encode_err,
- TP_PROTO(
- const struct svc_rqst *rqstp
- ),
- TP_ARGS(rqstp),
- TP_STRUCT__entry(
- NFSD_TRACE_PROC_ARG_FIELDS
+#define DEFINE_NFSD_XDR_ERR_EVENT(name) \
+DEFINE_EVENT(nfsd_xdr_err_class, nfsd_##name##_err, \
+ TP_PROTO(const struct svc_rqst *rqstp), \
+ TP_ARGS(rqstp))
- __field(u32, vers)
- __field(u32, proc)
- ),
- TP_fast_assign(
- NFSD_TRACE_PROC_ARG_ASSIGNMENTS
-
- __entry->vers = rqstp->rq_vers;
- __entry->proc = rqstp->rq_proc;
- ),
- TP_printk("xid=0x%08x vers=%u proc=%u",
- __entry->xid, __entry->vers, __entry->proc
- )
-);
+DEFINE_NFSD_XDR_ERR_EVENT(garbage_args);
+DEFINE_NFSD_XDR_ERR_EVENT(cant_encode);
#define show_nfsd_may_flags(x) \
__print_flags(x, "|", \
@@ -98,29 +98,37 @@ TRACE_EVENT(nfsd_cant_encode_err,
{ NFSD_MAY_READ, "READ" }, \
{ NFSD_MAY_SATTR, "SATTR" }, \
{ NFSD_MAY_TRUNC, "TRUNC" }, \
- { NFSD_MAY_LOCK, "LOCK" }, \
+ { NFSD_MAY_NLM, "NLM" }, \
{ NFSD_MAY_OWNER_OVERRIDE, "OWNER_OVERRIDE" }, \
{ NFSD_MAY_LOCAL_ACCESS, "LOCAL_ACCESS" }, \
{ NFSD_MAY_BYPASS_GSS_ON_ROOT, "BYPASS_GSS_ON_ROOT" }, \
{ NFSD_MAY_NOT_BREAK_LEASE, "NOT_BREAK_LEASE" }, \
{ NFSD_MAY_BYPASS_GSS, "BYPASS_GSS" }, \
{ NFSD_MAY_READ_IF_EXEC, "READ_IF_EXEC" }, \
- { NFSD_MAY_64BIT_COOKIE, "64BIT_COOKIE" })
+ { NFSD_MAY_64BIT_COOKIE, "64BIT_COOKIE" }, \
+ { NFSD_MAY_LOCALIO, "LOCALIO" })
TRACE_EVENT(nfsd_compound,
- TP_PROTO(const struct svc_rqst *rqst,
- u32 args_opcnt),
- TP_ARGS(rqst, args_opcnt),
+ TP_PROTO(
+ const struct svc_rqst *rqst,
+ const char *tag,
+ u32 taglen,
+ u32 opcnt
+ ),
+ TP_ARGS(rqst, tag, taglen, opcnt),
TP_STRUCT__entry(
__field(u32, xid)
- __field(u32, args_opcnt)
+ __field(u32, opcnt)
+ __string_len(tag, tag, taglen)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->args_opcnt = args_opcnt;
+ __entry->opcnt = opcnt;
+ __assign_str(tag);
),
- TP_printk("xid=0x%08x opcnt=%u",
- __entry->xid, __entry->args_opcnt)
+ TP_printk("xid=0x%08x opcnt=%u tag=%s",
+ __entry->xid, __entry->opcnt, __get_str(tag)
+ )
)
TRACE_EVENT(nfsd_compound_status,
@@ -139,7 +147,7 @@ TRACE_EVENT(nfsd_compound_status,
__entry->args_opcnt = args_opcnt;
__entry->resp_opcnt = resp_opcnt;
__entry->status = be32_to_cpu(status);
- __assign_str(name, name);
+ __assign_str(name);
),
TP_printk("op=%u/%u %s status=%d",
__entry->resp_opcnt, __entry->args_opcnt,
@@ -156,14 +164,14 @@ TRACE_EVENT(nfsd_compound_decode_err,
),
TP_ARGS(rqstp, args_opcnt, resp_opcnt, opnum, status),
TP_STRUCT__entry(
- NFSD_TRACE_PROC_RES_FIELDS
+ NFSD_TRACE_PROC_RES_FIELDS(rqstp)
__field(u32, args_opcnt)
__field(u32, resp_opcnt)
__field(u32, opnum)
),
TP_fast_assign(
- NFSD_TRACE_PROC_RES_ASSIGNMENTS(status)
+ NFSD_TRACE_PROC_RES_ASSIGNMENTS(rqstp, status)
__entry->args_opcnt = args_opcnt;
__entry->resp_opcnt = resp_opcnt;
@@ -174,7 +182,7 @@ TRACE_EVENT(nfsd_compound_decode_err,
__entry->opnum, __entry->status)
);
-TRACE_EVENT(nfsd_compound_encode_err,
+DECLARE_EVENT_CLASS(nfsd_compound_err_class,
TP_PROTO(
const struct svc_rqst *rqstp,
u32 opnum,
@@ -182,12 +190,12 @@ TRACE_EVENT(nfsd_compound_encode_err,
),
TP_ARGS(rqstp, opnum, status),
TP_STRUCT__entry(
- NFSD_TRACE_PROC_RES_FIELDS
+ NFSD_TRACE_PROC_RES_FIELDS(rqstp)
__field(u32, opnum)
),
TP_fast_assign(
- NFSD_TRACE_PROC_RES_ASSIGNMENTS(status)
+ NFSD_TRACE_PROC_RES_ASSIGNMENTS(rqstp, status)
__entry->opnum = opnum;
),
@@ -195,6 +203,110 @@ TRACE_EVENT(nfsd_compound_encode_err,
__entry->opnum, __entry->status)
);
+#define DEFINE_NFSD_COMPOUND_ERR_EVENT(name) \
+DEFINE_EVENT(nfsd_compound_err_class, nfsd_compound_##name##_err, \
+ TP_PROTO( \
+ const struct svc_rqst *rqstp, \
+ u32 opnum, \
+ __be32 status \
+ ), \
+ TP_ARGS(rqstp, opnum, status))
+
+DEFINE_NFSD_COMPOUND_ERR_EVENT(op);
+DEFINE_NFSD_COMPOUND_ERR_EVENT(encode);
+
+#define show_fs_file_type(x) \
+ __print_symbolic(x, \
+ { S_IFLNK, "LNK" }, \
+ { S_IFREG, "REG" }, \
+ { S_IFDIR, "DIR" }, \
+ { S_IFCHR, "CHR" }, \
+ { S_IFBLK, "BLK" }, \
+ { S_IFIFO, "FIFO" }, \
+ { S_IFSOCK, "SOCK" })
+
+TRACE_EVENT_CONDITION(nfsd_fh_verify,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct svc_fh *fhp,
+ umode_t type,
+ int access
+ ),
+ TP_ARGS(rqstp, fhp, type, access),
+ TP_CONDITION(rqstp != NULL),
+ TP_STRUCT__entry(
+ __field(unsigned int, netns_ino)
+ __sockaddr(server, rqstp->rq_xprt->xpt_remotelen)
+ __sockaddr(client, rqstp->rq_xprt->xpt_remotelen)
+ __field(u32, xid)
+ __field(u32, fh_hash)
+ __field(const void *, inode)
+ __field(unsigned long, type)
+ __field(unsigned long, access)
+ ),
+ TP_fast_assign(
+ __entry->netns_ino = SVC_NET(rqstp)->ns.inum;
+ __assign_sockaddr(server, &rqstp->rq_xprt->xpt_local,
+ rqstp->rq_xprt->xpt_locallen);
+ __assign_sockaddr(client, &rqstp->rq_xprt->xpt_remote,
+ rqstp->rq_xprt->xpt_remotelen);
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
+ __entry->inode = d_inode(fhp->fh_dentry);
+ __entry->type = type;
+ __entry->access = access;
+ ),
+ TP_printk("xid=0x%08x fh_hash=0x%08x type=%s access=%s",
+ __entry->xid, __entry->fh_hash,
+ show_fs_file_type(__entry->type),
+ show_nfsd_may_flags(__entry->access)
+ )
+);
+
+TRACE_EVENT_CONDITION(nfsd_fh_verify_err,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct svc_fh *fhp,
+ umode_t type,
+ int access,
+ __be32 error
+ ),
+ TP_ARGS(rqstp, fhp, type, access, error),
+ TP_CONDITION(rqstp != NULL && error),
+ TP_STRUCT__entry(
+ __field(unsigned int, netns_ino)
+ __sockaddr(server, rqstp->rq_xprt->xpt_remotelen)
+ __sockaddr(client, rqstp->rq_xprt->xpt_remotelen)
+ __field(u32, xid)
+ __field(u32, fh_hash)
+ __field(const void *, inode)
+ __field(unsigned long, type)
+ __field(unsigned long, access)
+ __field(int, error)
+ ),
+ TP_fast_assign(
+ __entry->netns_ino = SVC_NET(rqstp)->ns.inum;
+ __assign_sockaddr(server, &rqstp->rq_xprt->xpt_local,
+ rqstp->rq_xprt->xpt_locallen);
+ __assign_sockaddr(client, &rqstp->rq_xprt->xpt_remote,
+ rqstp->rq_xprt->xpt_remotelen);
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
+ if (fhp->fh_dentry)
+ __entry->inode = d_inode(fhp->fh_dentry);
+ else
+ __entry->inode = NULL;
+ __entry->type = type;
+ __entry->access = access;
+ __entry->error = be32_to_cpu(error);
+ ),
+ TP_printk("xid=0x%08x fh_hash=0x%08x type=%s access=%s error=%d",
+ __entry->xid, __entry->fh_hash,
+ show_fs_file_type(__entry->type),
+ show_nfsd_may_flags(__entry->access),
+ __entry->error
+ )
+);
DECLARE_EVENT_CLASS(nfsd_fh_err_class,
TP_PROTO(struct svc_rqst *rqstp,
@@ -216,12 +328,13 @@ DECLARE_EVENT_CLASS(nfsd_fh_err_class,
__entry->status)
)
-#define DEFINE_NFSD_FH_ERR_EVENT(name) \
-DEFINE_EVENT(nfsd_fh_err_class, nfsd_##name, \
- TP_PROTO(struct svc_rqst *rqstp, \
- struct svc_fh *fhp, \
- int status), \
- TP_ARGS(rqstp, fhp, status))
+#define DEFINE_NFSD_FH_ERR_EVENT(name) \
+DEFINE_EVENT_CONDITION(nfsd_fh_err_class, nfsd_##name, \
+ TP_PROTO(struct svc_rqst *rqstp, \
+ struct svc_fh *fhp, \
+ int status), \
+ TP_ARGS(rqstp, fhp, status), \
+ TP_CONDITION(rqstp != NULL))
DEFINE_NFSD_FH_ERR_EVENT(set_fh_dentry_badexport);
DEFINE_NFSD_FH_ERR_EVENT(set_fh_dentry_badhandle);
@@ -231,7 +344,7 @@ TRACE_EVENT(nfsd_exp_find_key,
int status),
TP_ARGS(key, status),
TP_STRUCT__entry(
- __field(int, fsidtype)
+ __field(u8, fsidtype)
__array(u32, fsid, 6)
__string(auth_domain, key->ek_client->name)
__field(int, status)
@@ -239,7 +352,7 @@ TRACE_EVENT(nfsd_exp_find_key,
TP_fast_assign(
__entry->fsidtype = key->ek_fsidtype;
memcpy(__entry->fsid, key->ek_fsid, 4*6);
- __assign_str(auth_domain, key->ek_client->name);
+ __assign_str(auth_domain);
__entry->status = status;
),
TP_printk("fsid=%x::%s domain=%s status=%d",
@@ -254,7 +367,7 @@ TRACE_EVENT(nfsd_expkey_update,
TP_PROTO(const struct svc_expkey *key, const char *exp_path),
TP_ARGS(key, exp_path),
TP_STRUCT__entry(
- __field(int, fsidtype)
+ __field(u8, fsidtype)
__array(u32, fsid, 6)
__string(auth_domain, key->ek_client->name)
__string(path, exp_path)
@@ -263,8 +376,8 @@ TRACE_EVENT(nfsd_expkey_update,
TP_fast_assign(
__entry->fsidtype = key->ek_fsidtype;
memcpy(__entry->fsid, key->ek_fsid, 4*6);
- __assign_str(auth_domain, key->ek_client->name);
- __assign_str(path, exp_path);
+ __assign_str(auth_domain);
+ __assign_str(path);
__entry->cache = !test_bit(CACHE_NEGATIVE, &key->h.flags);
),
TP_printk("fsid=%x::%s domain=%s path=%s cache=%s",
@@ -286,8 +399,8 @@ TRACE_EVENT(nfsd_exp_get_by_name,
__field(int, status)
),
TP_fast_assign(
- __assign_str(path, key->ex_path.dentry->d_name.name);
- __assign_str(auth_domain, key->ex_client->name);
+ __assign_str(path);
+ __assign_str(auth_domain);
__entry->status = status;
),
TP_printk("path=%s domain=%s status=%d",
@@ -306,8 +419,8 @@ TRACE_EVENT(nfsd_export_update,
__field(bool, cache)
),
TP_fast_assign(
- __assign_str(path, key->ex_path.dentry->d_name.name);
- __assign_str(auth_domain, key->ex_client->name);
+ __assign_str(path);
+ __assign_str(auth_domain);
__entry->cache = !test_bit(CACHE_NEGATIVE, &key->h.flags);
),
TP_printk("path=%s domain=%s cache=%s",
@@ -320,14 +433,14 @@ TRACE_EVENT(nfsd_export_update,
DECLARE_EVENT_CLASS(nfsd_io_class,
TP_PROTO(struct svc_rqst *rqstp,
struct svc_fh *fhp,
- loff_t offset,
- unsigned long len),
+ u64 offset,
+ u32 len),
TP_ARGS(rqstp, fhp, offset, len),
TP_STRUCT__entry(
__field(u32, xid)
__field(u32, fh_hash)
- __field(loff_t, offset)
- __field(unsigned long, len)
+ __field(u64, offset)
+ __field(u32, len)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqstp->rq_xid);
@@ -335,7 +448,7 @@ DECLARE_EVENT_CLASS(nfsd_io_class,
__entry->offset = offset;
__entry->len = len;
),
- TP_printk("xid=0x%08x fh_hash=0x%08x offset=%lld len=%lu",
+ TP_printk("xid=0x%08x fh_hash=0x%08x offset=%llu len=%u",
__entry->xid, __entry->fh_hash,
__entry->offset, __entry->len)
)
@@ -344,19 +457,24 @@ DECLARE_EVENT_CLASS(nfsd_io_class,
DEFINE_EVENT(nfsd_io_class, nfsd_##name, \
TP_PROTO(struct svc_rqst *rqstp, \
struct svc_fh *fhp, \
- loff_t offset, \
- unsigned long len), \
+ u64 offset, \
+ u32 len), \
TP_ARGS(rqstp, fhp, offset, len))
DEFINE_NFSD_IO_EVENT(read_start);
DEFINE_NFSD_IO_EVENT(read_splice);
DEFINE_NFSD_IO_EVENT(read_vector);
+DEFINE_NFSD_IO_EVENT(read_direct);
DEFINE_NFSD_IO_EVENT(read_io_done);
DEFINE_NFSD_IO_EVENT(read_done);
DEFINE_NFSD_IO_EVENT(write_start);
DEFINE_NFSD_IO_EVENT(write_opened);
+DEFINE_NFSD_IO_EVENT(write_direct);
+DEFINE_NFSD_IO_EVENT(write_vector);
DEFINE_NFSD_IO_EVENT(write_io_done);
DEFINE_NFSD_IO_EVENT(write_done);
+DEFINE_NFSD_IO_EVENT(commit_start);
+DEFINE_NFSD_IO_EVENT(commit_done);
DECLARE_EVENT_CLASS(nfsd_err_class,
TP_PROTO(struct svc_rqst *rqstp,
@@ -406,17 +524,90 @@ TRACE_EVENT(nfsd_dirent,
TP_fast_assign(
__entry->fh_hash = fhp ? knfsd_fh_hash(&fhp->fh_handle) : 0;
__entry->ino = ino;
- __assign_str_len(name, name, namlen)
+ __assign_str(name);
),
TP_printk("fh_hash=0x%08x ino=%llu name=%s",
__entry->fh_hash, __entry->ino, __get_str(name)
)
)
+DECLARE_EVENT_CLASS(nfsd_copy_err_class,
+ TP_PROTO(struct svc_rqst *rqstp,
+ struct svc_fh *src_fhp,
+ loff_t src_offset,
+ struct svc_fh *dst_fhp,
+ loff_t dst_offset,
+ u64 count,
+ int status),
+ TP_ARGS(rqstp, src_fhp, src_offset, dst_fhp, dst_offset, count, status),
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, src_fh_hash)
+ __field(loff_t, src_offset)
+ __field(u32, dst_fh_hash)
+ __field(loff_t, dst_offset)
+ __field(u64, count)
+ __field(int, status)
+ ),
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->src_fh_hash = knfsd_fh_hash(&src_fhp->fh_handle);
+ __entry->src_offset = src_offset;
+ __entry->dst_fh_hash = knfsd_fh_hash(&dst_fhp->fh_handle);
+ __entry->dst_offset = dst_offset;
+ __entry->count = count;
+ __entry->status = status;
+ ),
+ TP_printk("xid=0x%08x src_fh_hash=0x%08x src_offset=%lld "
+ "dst_fh_hash=0x%08x dst_offset=%lld "
+ "count=%llu status=%d",
+ __entry->xid, __entry->src_fh_hash, __entry->src_offset,
+ __entry->dst_fh_hash, __entry->dst_offset,
+ (unsigned long long)__entry->count,
+ __entry->status)
+)
+
+#define DEFINE_NFSD_COPY_ERR_EVENT(name) \
+DEFINE_EVENT(nfsd_copy_err_class, nfsd_##name, \
+ TP_PROTO(struct svc_rqst *rqstp, \
+ struct svc_fh *src_fhp, \
+ loff_t src_offset, \
+ struct svc_fh *dst_fhp, \
+ loff_t dst_offset, \
+ u64 count, \
+ int status), \
+ TP_ARGS(rqstp, src_fhp, src_offset, dst_fhp, dst_offset, \
+ count, status))
+
+DEFINE_NFSD_COPY_ERR_EVENT(clone_file_range_err);
+
#include "state.h"
#include "filecache.h"
#include "vfs.h"
+TRACE_EVENT(nfsd_delegret_wakeup,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct inode *inode,
+ long timeo
+ ),
+ TP_ARGS(rqstp, inode, timeo),
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(const void *, inode)
+ __field(long, timeo)
+ ),
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->inode = inode;
+ __entry->timeo = timeo;
+ ),
+ TP_printk("xid=0x%08x inode=%p%s",
+ __entry->xid, __entry->inode,
+ __entry->timeo == 0 ? " (timed out)" : ""
+ )
+);
+
DECLARE_EVENT_CLASS(nfsd_stateid_class,
TP_PROTO(stateid_t *stp),
TP_ARGS(stp),
@@ -457,7 +648,8 @@ DEFINE_STATEID_EVENT(layout_recall_release);
DEFINE_STATEID_EVENT(open);
DEFINE_STATEID_EVENT(deleg_read);
-DEFINE_STATEID_EVENT(deleg_recall);
+DEFINE_STATEID_EVENT(deleg_write);
+DEFINE_STATEID_EVENT(deleg_return);
DECLARE_EVENT_CLASS(nfsd_stateseqid_class,
TP_PROTO(u32 seqid, const stateid_t *stp),
@@ -489,6 +681,193 @@ DEFINE_EVENT(nfsd_stateseqid_class, nfsd_##name, \
DEFINE_STATESEQID_EVENT(preprocess);
DEFINE_STATESEQID_EVENT(open_confirm);
+#define show_stid_type(x) \
+ __print_flags(x, "|", \
+ { SC_TYPE_OPEN, "OPEN" }, \
+ { SC_TYPE_LOCK, "LOCK" }, \
+ { SC_TYPE_DELEG, "DELEG" }, \
+ { SC_TYPE_LAYOUT, "LAYOUT" })
+
+#define show_stid_status(x) \
+ __print_flags(x, "|", \
+ { SC_STATUS_CLOSED, "CLOSED" }, \
+ { SC_STATUS_REVOKED, "REVOKED" }, \
+ { SC_STATUS_ADMIN_REVOKED, "ADMIN_REVOKED" })
+
+DECLARE_EVENT_CLASS(nfsd_stid_class,
+ TP_PROTO(
+ const struct nfs4_stid *stid
+ ),
+ TP_ARGS(stid),
+ TP_STRUCT__entry(
+ __field(unsigned long, sc_type)
+ __field(unsigned long, sc_status)
+ __field(int, sc_count)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, si_id)
+ __field(u32, si_generation)
+ ),
+ TP_fast_assign(
+ const stateid_t *stp = &stid->sc_stateid;
+
+ __entry->sc_type = stid->sc_type;
+ __entry->sc_status = stid->sc_status;
+ __entry->sc_count = refcount_read(&stid->sc_count);
+ __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
+ __entry->cl_id = stp->si_opaque.so_clid.cl_id;
+ __entry->si_id = stp->si_opaque.so_id;
+ __entry->si_generation = stp->si_generation;
+ ),
+ TP_printk("client %08x:%08x stateid %08x:%08x ref=%d type=%s state=%s",
+ __entry->cl_boot, __entry->cl_id,
+ __entry->si_id, __entry->si_generation,
+ __entry->sc_count, show_stid_type(__entry->sc_type),
+ show_stid_status(__entry->sc_status)
+ )
+);
+
+#define DEFINE_STID_EVENT(name) \
+DEFINE_EVENT(nfsd_stid_class, nfsd_stid_##name, \
+ TP_PROTO(const struct nfs4_stid *stid), \
+ TP_ARGS(stid))
+
+DEFINE_STID_EVENT(revoke);
+
+TRACE_EVENT(nfsd_stateowner_replay,
+ TP_PROTO(
+ u32 opnum,
+ const struct nfs4_replay *rp
+ ),
+ TP_ARGS(opnum, rp),
+ TP_STRUCT__entry(
+ __field(unsigned long, status)
+ __field(u32, opnum)
+ ),
+ TP_fast_assign(
+ __entry->status = be32_to_cpu(rp->rp_status);
+ __entry->opnum = opnum;
+ ),
+ TP_printk("opnum=%u status=%lu",
+ __entry->opnum, __entry->status)
+);
+
+TRACE_EVENT_CONDITION(nfsd_seq4_status,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct nfsd4_sequence *sequence
+ ),
+ TP_ARGS(rqstp, sequence),
+ TP_CONDITION(sequence->status_flags),
+ TP_STRUCT__entry(
+ __field(unsigned int, netns_ino)
+ __field(u32, xid)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, seqno)
+ __field(u32, reserved)
+ __field(unsigned long, status_flags)
+ ),
+ TP_fast_assign(
+ const struct nfsd4_sessionid *sid =
+ (struct nfsd4_sessionid *)&sequence->sessionid;
+
+ __entry->netns_ino = SVC_NET(rqstp)->ns.inum;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->cl_boot = sid->clientid.cl_boot;
+ __entry->cl_id = sid->clientid.cl_id;
+ __entry->seqno = sid->sequence;
+ __entry->reserved = sid->reserved;
+ __entry->status_flags = sequence->status_flags;
+ ),
+ TP_printk("xid=0x%08x sessionid=%08x:%08x:%08x:%08x status_flags=%s",
+ __entry->xid, __entry->cl_boot, __entry->cl_id,
+ __entry->seqno, __entry->reserved,
+ show_nfs4_seq4_status(__entry->status_flags)
+ )
+);
+
+DECLARE_EVENT_CLASS(nfsd_cs_slot_class,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ const struct nfsd4_create_session *cs
+ ),
+ TP_ARGS(clp, cs),
+ TP_STRUCT__entry(
+ __field(u32, seqid)
+ __field(u32, slot_seqid)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
+ ),
+ TP_fast_assign(
+ const struct nfsd4_clid_slot *slot = &clp->cl_cs_slot;
+
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen);
+ __entry->seqid = cs->seqid;
+ __entry->slot_seqid = slot->sl_seqid;
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x seqid=%u slot_seqid=%u",
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
+ __entry->seqid, __entry->slot_seqid
+ )
+);
+
+#define DEFINE_CS_SLOT_EVENT(name) \
+DEFINE_EVENT(nfsd_cs_slot_class, nfsd_##name, \
+ TP_PROTO( \
+ const struct nfs4_client *clp, \
+ const struct nfsd4_create_session *cs \
+ ), \
+ TP_ARGS(clp, cs))
+
+DEFINE_CS_SLOT_EVENT(slot_seqid_conf);
+DEFINE_CS_SLOT_EVENT(slot_seqid_unconf);
+
+#define show_nfs_slot_flags(val) \
+ __print_flags(val, "|", \
+ { NFSD4_SLOT_INUSE, "INUSE" }, \
+ { NFSD4_SLOT_CACHETHIS, "CACHETHIS" }, \
+ { NFSD4_SLOT_INITIALIZED, "INITIALIZED" }, \
+ { NFSD4_SLOT_CACHED, "CACHED" }, \
+ { NFSD4_SLOT_REUSED, "REUSED" })
+
+TRACE_EVENT(nfsd_slot_seqid_sequence,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ const struct nfsd4_sequence *seq,
+ const struct nfsd4_slot *slot
+ ),
+ TP_ARGS(clp, seq, slot),
+ TP_STRUCT__entry(
+ __field(u32, seqid)
+ __field(u32, slot_seqid)
+ __field(u32, slot_index)
+ __field(unsigned long, slot_flags)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen);
+ __entry->seqid = seq->seqid;
+ __entry->slot_seqid = slot->sl_seqid;
+ __entry->slot_index = seq->slotid;
+ __entry->slot_flags = slot->sl_flags;
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x idx=%u seqid=%u slot_seqid=%u flags=%s",
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
+ __entry->slot_index, __entry->seqid, __entry->slot_seqid,
+ show_nfs_slot_flags(__entry->slot_flags)
+ )
+);
+
DECLARE_EVENT_CLASS(nfsd_clientid_class,
TP_PROTO(const clientid_t *clid),
TP_ARGS(clid),
@@ -518,6 +897,30 @@ DEFINE_CLIENTID_EVENT(purged);
DEFINE_CLIENTID_EVENT(renew);
DEFINE_CLIENTID_EVENT(stale);
+TRACE_EVENT(nfsd_mark_client_expired,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ int cl_rpc_users
+ ),
+ TP_ARGS(clp, cl_rpc_users),
+ TP_STRUCT__entry(
+ __field(int, cl_rpc_users)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
+ ),
+ TP_fast_assign(
+ __entry->cl_rpc_users = cl_rpc_users;
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen)
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x cl_rpc_users=%d",
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
+ __entry->cl_rpc_users)
+);
+
DECLARE_EVENT_CLASS(nfsd_net_class,
TP_PROTO(const struct nfsd_net *nn),
TP_ARGS(nn),
@@ -538,6 +941,34 @@ DEFINE_EVENT(nfsd_net_class, nfsd_##name, \
DEFINE_NET_EVENT(grace_start);
DEFINE_NET_EVENT(grace_complete);
+TRACE_EVENT(nfsd_writeverf_reset,
+ TP_PROTO(
+ const struct nfsd_net *nn,
+ const struct svc_rqst *rqstp,
+ int error
+ ),
+ TP_ARGS(nn, rqstp, error),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ __field(u32, xid)
+ __field(int, error)
+ __array(unsigned char, verifier, NFS4_VERIFIER_SIZE)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->error = error;
+
+ /* avoid seqlock inside TP_fast_assign */
+ memcpy(__entry->verifier, nn->writeverf,
+ NFS4_VERIFIER_SIZE);
+ ),
+ TP_printk("boot_time=%16llx xid=0x%08x error=%d new verifier=0x%s",
+ __entry->boot_time, __entry->xid, __entry->error,
+ __print_hex_str(__entry->verifier, NFS4_VERIFIER_SIZE)
+ )
+);
+
TRACE_EVENT(nfsd_clid_cred_mismatch,
TP_PROTO(
const struct nfs4_client *clp,
@@ -549,20 +980,21 @@ TRACE_EVENT(nfsd_clid_cred_mismatch,
__field(u32, cl_id)
__field(unsigned long, cl_flavor)
__field(unsigned long, new_flavor)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __sockaddr(addr, rqstp->rq_xprt->xpt_remotelen)
),
TP_fast_assign(
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
__entry->cl_flavor = clp->cl_cred.cr_flavor;
__entry->new_flavor = rqstp->rq_cred.cr_flavor;
- memcpy(__entry->addr, &rqstp->rq_xprt->xpt_remote,
- sizeof(struct sockaddr_in6));
+ __assign_sockaddr(addr, &rqstp->rq_xprt->xpt_remote,
+ rqstp->rq_xprt->xpt_remotelen);
),
TP_printk("client %08x:%08x flavor=%s, conflict=%s from addr=%pISpc",
__entry->cl_boot, __entry->cl_id,
show_nfsd_authflavor(__entry->cl_flavor),
- show_nfsd_authflavor(__entry->new_flavor), __entry->addr
+ show_nfsd_authflavor(__entry->new_flavor),
+ __get_sockaddr(addr)
)
)
@@ -578,7 +1010,7 @@ TRACE_EVENT(nfsd_clid_verf_mismatch,
__field(u32, cl_id)
__array(unsigned char, cl_verifier, NFS4_VERIFIER_SIZE)
__array(unsigned char, new_verifier, NFS4_VERIFIER_SIZE)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __sockaddr(addr, rqstp->rq_xprt->xpt_remotelen)
),
TP_fast_assign(
__entry->cl_boot = clp->cl_clientid.cl_boot;
@@ -587,14 +1019,14 @@ TRACE_EVENT(nfsd_clid_verf_mismatch,
NFS4_VERIFIER_SIZE);
memcpy(__entry->new_verifier, (void *)verf,
NFS4_VERIFIER_SIZE);
- memcpy(__entry->addr, &rqstp->rq_xprt->xpt_remote,
- sizeof(struct sockaddr_in6));
+ __assign_sockaddr(addr, &rqstp->rq_xprt->xpt_remote,
+ rqstp->rq_xprt->xpt_remotelen);
),
TP_printk("client %08x:%08x verf=0x%s, updated=0x%s from addr=%pISpc",
__entry->cl_boot, __entry->cl_id,
__print_hex_str(__entry->cl_verifier, NFS4_VERIFIER_SIZE),
__print_hex_str(__entry->new_verifier, NFS4_VERIFIER_SIZE),
- __entry->addr
+ __get_sockaddr(addr)
)
);
@@ -607,7 +1039,7 @@ DECLARE_EVENT_CLASS(nfsd_clid_class,
__array(unsigned char, addr, sizeof(struct sockaddr_in6))
__field(unsigned long, flavor)
__array(unsigned char, verifier, NFS4_VERIFIER_SIZE)
- __string_len(name, name, clp->cl_name.len)
+ __string_len(name, clp->cl_name.data, clp->cl_name.len)
),
TP_fast_assign(
__entry->cl_boot = clp->cl_clientid.cl_boot;
@@ -617,7 +1049,7 @@ DECLARE_EVENT_CLASS(nfsd_clid_class,
__entry->flavor = clp->cl_cred.cr_flavor;
memcpy(__entry->verifier, (void *)&clp->cl_verifier,
NFS4_VERIFIER_SIZE);
- __assign_str_len(name, clp->cl_name.data, clp->cl_name.len);
+ __assign_str(name);
),
TP_printk("addr=%pISpc name='%s' verifier=0x%s flavor=%s client=%08x:%08x",
__entry->addr, __get_str(name),
@@ -637,25 +1069,18 @@ DEFINE_CLID_EVENT(confirmed_r);
/*
* from fs/nfsd/filecache.h
*/
-TRACE_DEFINE_ENUM(NFSD_FILE_HASHED);
-TRACE_DEFINE_ENUM(NFSD_FILE_PENDING);
-TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_READ);
-TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_WRITE);
-TRACE_DEFINE_ENUM(NFSD_FILE_REFERENCED);
-
#define show_nf_flags(val) \
__print_flags(val, "|", \
{ 1 << NFSD_FILE_HASHED, "HASHED" }, \
{ 1 << NFSD_FILE_PENDING, "PENDING" }, \
- { 1 << NFSD_FILE_BREAK_READ, "BREAK_READ" }, \
- { 1 << NFSD_FILE_BREAK_WRITE, "BREAK_WRITE" }, \
- { 1 << NFSD_FILE_REFERENCED, "REFERENCED"})
+ { 1 << NFSD_FILE_REFERENCED, "REFERENCED" }, \
+ { 1 << NFSD_FILE_RECENT, "RECENT" }, \
+ { 1 << NFSD_FILE_GC, "GC" })
DECLARE_EVENT_CLASS(nfsd_file_class,
TP_PROTO(struct nfsd_file *nf),
TP_ARGS(nf),
TP_STRUCT__entry(
- __field(unsigned int, nf_hashval)
__field(void *, nf_inode)
__field(int, nf_ref)
__field(unsigned long, nf_flags)
@@ -663,15 +1088,13 @@ DECLARE_EVENT_CLASS(nfsd_file_class,
__field(struct file *, nf_file)
),
TP_fast_assign(
- __entry->nf_hashval = nf->nf_hashval;
__entry->nf_inode = nf->nf_inode;
__entry->nf_ref = refcount_read(&nf->nf_ref);
__entry->nf_flags = nf->nf_flags;
__entry->nf_may = nf->nf_may;
__entry->nf_file = nf->nf_file;
),
- TP_printk("hash=0x%x inode=%p ref=%d flags=%s may=%s file=%p",
- __entry->nf_hashval,
+ TP_printk("inode=%p ref=%d flags=%s may=%s nf_file=%p",
__entry->nf_inode,
__entry->nf_ref,
show_nf_flags(__entry->nf_flags),
@@ -684,34 +1107,86 @@ DEFINE_EVENT(nfsd_file_class, name, \
TP_PROTO(struct nfsd_file *nf), \
TP_ARGS(nf))
-DEFINE_NFSD_FILE_EVENT(nfsd_file_alloc);
-DEFINE_NFSD_FILE_EVENT(nfsd_file_put_final);
+DEFINE_NFSD_FILE_EVENT(nfsd_file_free);
DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash);
DEFINE_NFSD_FILE_EVENT(nfsd_file_put);
-DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash_and_release_locked);
+DEFINE_NFSD_FILE_EVENT(nfsd_file_closing);
+
+TRACE_EVENT(nfsd_file_alloc,
+ TP_PROTO(
+ const struct nfsd_file *nf
+ ),
+ TP_ARGS(nf),
+ TP_STRUCT__entry(
+ __field(const void *, nf_inode)
+ __field(unsigned long, nf_flags)
+ __field(unsigned long, nf_may)
+ __field(unsigned int, nf_ref)
+ ),
+ TP_fast_assign(
+ __entry->nf_inode = nf->nf_inode;
+ __entry->nf_flags = nf->nf_flags;
+ __entry->nf_ref = refcount_read(&nf->nf_ref);
+ __entry->nf_may = nf->nf_may;
+ ),
+ TP_printk("inode=%p ref=%u flags=%s may=%s",
+ __entry->nf_inode, __entry->nf_ref,
+ show_nf_flags(__entry->nf_flags),
+ show_nfsd_may_flags(__entry->nf_may)
+ )
+);
+
+TRACE_EVENT(nfsd_file_get_dio_attrs,
+ TP_PROTO(
+ const struct inode *inode,
+ const struct kstat *stat
+ ),
+ TP_ARGS(inode, stat),
+ TP_STRUCT__entry(
+ __field(const void *, inode)
+ __field(unsigned long, mask)
+ __field(u32, mem_align)
+ __field(u32, offset_align)
+ __field(u32, read_offset_align)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->mask = stat->result_mask;
+ __entry->mem_align = stat->dio_mem_align;
+ __entry->offset_align = stat->dio_offset_align;
+ __entry->read_offset_align = stat->dio_read_offset_align;
+ ),
+ TP_printk("inode=%p flags=%s mem_align=%u offset_align=%u read_offset_align=%u",
+ __entry->inode, show_statx_mask(__entry->mask),
+ __entry->mem_align, __entry->offset_align,
+ __entry->read_offset_align
+ )
+);
TRACE_EVENT(nfsd_file_acquire,
- TP_PROTO(struct svc_rqst *rqstp, unsigned int hash,
- struct inode *inode, unsigned int may_flags,
- struct nfsd_file *nf, __be32 status),
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct inode *inode,
+ unsigned int may_flags,
+ const struct nfsd_file *nf,
+ __be32 status
+ ),
- TP_ARGS(rqstp, hash, inode, may_flags, nf, status),
+ TP_ARGS(rqstp, inode, may_flags, nf, status),
TP_STRUCT__entry(
__field(u32, xid)
- __field(unsigned int, hash)
- __field(void *, inode)
+ __field(const void *, inode)
__field(unsigned long, may_flags)
- __field(int, nf_ref)
+ __field(unsigned int, nf_ref)
__field(unsigned long, nf_flags)
__field(unsigned long, nf_may)
- __field(struct file *, nf_file)
+ __field(const void *, nf_file)
__field(u32, status)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqstp->rq_xid);
- __entry->hash = hash;
+ __entry->xid = rqstp ? be32_to_cpu(rqstp->rq_xid) : 0;
__entry->inode = inode;
__entry->may_flags = may_flags;
__entry->nf_ref = nf ? refcount_read(&nf->nf_ref) : 0;
@@ -721,39 +1196,131 @@ TRACE_EVENT(nfsd_file_acquire,
__entry->status = be32_to_cpu(status);
),
- TP_printk("xid=0x%x hash=0x%x inode=%p may_flags=%s ref=%d nf_flags=%s nf_may=%s nf_file=%p status=%u",
- __entry->xid, __entry->hash, __entry->inode,
+ TP_printk("xid=0x%x inode=%p may_flags=%s ref=%u nf_flags=%s nf_may=%s nf_file=%p status=%u",
+ __entry->xid, __entry->inode,
show_nfsd_may_flags(__entry->may_flags),
__entry->nf_ref, show_nf_flags(__entry->nf_flags),
show_nfsd_may_flags(__entry->nf_may),
- __entry->nf_file, __entry->status)
+ __entry->nf_file, __entry->status
+ )
);
-DECLARE_EVENT_CLASS(nfsd_file_search_class,
- TP_PROTO(struct inode *inode, unsigned int hash, int found),
- TP_ARGS(inode, hash, found),
+TRACE_EVENT(nfsd_file_insert_err,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct inode *inode,
+ unsigned int may_flags,
+ long error
+ ),
+ TP_ARGS(rqstp, inode, may_flags, error),
TP_STRUCT__entry(
- __field(struct inode *, inode)
- __field(unsigned int, hash)
- __field(int, found)
+ __field(u32, xid)
+ __field(const void *, inode)
+ __field(unsigned long, may_flags)
+ __field(long, error)
),
TP_fast_assign(
+ __entry->xid = rqstp ? be32_to_cpu(rqstp->rq_xid) : 0;
__entry->inode = inode;
- __entry->hash = hash;
- __entry->found = found;
+ __entry->may_flags = may_flags;
+ __entry->error = error;
),
- TP_printk("hash=0x%x inode=%p found=%d", __entry->hash,
- __entry->inode, __entry->found)
+ TP_printk("xid=0x%x inode=%p may_flags=%s error=%ld",
+ __entry->xid, __entry->inode,
+ show_nfsd_may_flags(__entry->may_flags),
+ __entry->error
+ )
);
-#define DEFINE_NFSD_FILE_SEARCH_EVENT(name) \
-DEFINE_EVENT(nfsd_file_search_class, name, \
- TP_PROTO(struct inode *inode, unsigned int hash, int found), \
- TP_ARGS(inode, hash, found))
+TRACE_EVENT(nfsd_file_cons_err,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct inode *inode,
+ unsigned int may_flags,
+ const struct nfsd_file *nf
+ ),
+ TP_ARGS(rqstp, inode, may_flags, nf),
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(const void *, inode)
+ __field(unsigned long, may_flags)
+ __field(unsigned int, nf_ref)
+ __field(unsigned long, nf_flags)
+ __field(unsigned long, nf_may)
+ __field(const void *, nf_file)
+ ),
+ TP_fast_assign(
+ __entry->xid = rqstp ? be32_to_cpu(rqstp->rq_xid) : 0;
+ __entry->inode = inode;
+ __entry->may_flags = may_flags;
+ __entry->nf_ref = refcount_read(&nf->nf_ref);
+ __entry->nf_flags = nf->nf_flags;
+ __entry->nf_may = nf->nf_may;
+ __entry->nf_file = nf->nf_file;
+ ),
+ TP_printk("xid=0x%x inode=%p may_flags=%s ref=%u nf_flags=%s nf_may=%s nf_file=%p",
+ __entry->xid, __entry->inode,
+ show_nfsd_may_flags(__entry->may_flags), __entry->nf_ref,
+ show_nf_flags(__entry->nf_flags),
+ show_nfsd_may_flags(__entry->nf_may), __entry->nf_file
+ )
+);
+
+DECLARE_EVENT_CLASS(nfsd_file_open_class,
+ TP_PROTO(const struct nfsd_file *nf, __be32 status),
+ TP_ARGS(nf, status),
+ TP_STRUCT__entry(
+ __field(void *, nf_inode) /* cannot be dereferenced */
+ __field(int, nf_ref)
+ __field(unsigned long, nf_flags)
+ __field(unsigned long, nf_may)
+ __field(void *, nf_file) /* cannot be dereferenced */
+ ),
+ TP_fast_assign(
+ __entry->nf_inode = nf->nf_inode;
+ __entry->nf_ref = refcount_read(&nf->nf_ref);
+ __entry->nf_flags = nf->nf_flags;
+ __entry->nf_may = nf->nf_may;
+ __entry->nf_file = nf->nf_file;
+ ),
+ TP_printk("inode=%p ref=%d flags=%s may=%s file=%p",
+ __entry->nf_inode,
+ __entry->nf_ref,
+ show_nf_flags(__entry->nf_flags),
+ show_nfsd_may_flags(__entry->nf_may),
+ __entry->nf_file)
+)
-DEFINE_NFSD_FILE_SEARCH_EVENT(nfsd_file_close_inode_sync);
-DEFINE_NFSD_FILE_SEARCH_EVENT(nfsd_file_close_inode);
-DEFINE_NFSD_FILE_SEARCH_EVENT(nfsd_file_is_cached);
+#define DEFINE_NFSD_FILE_OPEN_EVENT(name) \
+DEFINE_EVENT(nfsd_file_open_class, name, \
+ TP_PROTO( \
+ const struct nfsd_file *nf, \
+ __be32 status \
+ ), \
+ TP_ARGS(nf, status))
+
+DEFINE_NFSD_FILE_OPEN_EVENT(nfsd_file_open);
+DEFINE_NFSD_FILE_OPEN_EVENT(nfsd_file_opened);
+
+TRACE_EVENT(nfsd_file_is_cached,
+ TP_PROTO(
+ const struct inode *inode,
+ int found
+ ),
+ TP_ARGS(inode, found),
+ TP_STRUCT__entry(
+ __field(const struct inode *, inode)
+ __field(int, found)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->found = found;
+ ),
+ TP_printk("inode=%p is %scached",
+ __entry->inode,
+ __entry->found ? "" : "not "
+ )
+);
TRACE_EVENT(nfsd_file_fsnotify_handle_event,
TP_PROTO(struct inode *inode, u32 mask),
@@ -774,6 +1341,90 @@ TRACE_EVENT(nfsd_file_fsnotify_handle_event,
__entry->nlink, __entry->mode, __entry->mask)
);
+DECLARE_EVENT_CLASS(nfsd_file_gc_class,
+ TP_PROTO(
+ const struct nfsd_file *nf
+ ),
+ TP_ARGS(nf),
+ TP_STRUCT__entry(
+ __field(void *, nf_inode)
+ __field(void *, nf_file)
+ __field(int, nf_ref)
+ __field(unsigned long, nf_flags)
+ ),
+ TP_fast_assign(
+ __entry->nf_inode = nf->nf_inode;
+ __entry->nf_file = nf->nf_file;
+ __entry->nf_ref = refcount_read(&nf->nf_ref);
+ __entry->nf_flags = nf->nf_flags;
+ ),
+ TP_printk("inode=%p ref=%d nf_flags=%s nf_file=%p",
+ __entry->nf_inode, __entry->nf_ref,
+ show_nf_flags(__entry->nf_flags),
+ __entry->nf_file
+ )
+);
+
+#define DEFINE_NFSD_FILE_GC_EVENT(name) \
+DEFINE_EVENT(nfsd_file_gc_class, name, \
+ TP_PROTO( \
+ const struct nfsd_file *nf \
+ ), \
+ TP_ARGS(nf))
+
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_in_use);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_writeback);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_referenced);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_aged);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_disposed);
+
+DECLARE_EVENT_CLASS(nfsd_file_lruwalk_class,
+ TP_PROTO(
+ unsigned long removed,
+ unsigned long remaining
+ ),
+ TP_ARGS(removed, remaining),
+ TP_STRUCT__entry(
+ __field(unsigned long, removed)
+ __field(unsigned long, remaining)
+ ),
+ TP_fast_assign(
+ __entry->removed = removed;
+ __entry->remaining = remaining;
+ ),
+ TP_printk("%lu entries removed, %lu remaining",
+ __entry->removed, __entry->remaining)
+);
+
+#define DEFINE_NFSD_FILE_LRUWALK_EVENT(name) \
+DEFINE_EVENT(nfsd_file_lruwalk_class, name, \
+ TP_PROTO( \
+ unsigned long removed, \
+ unsigned long remaining \
+ ), \
+ TP_ARGS(removed, remaining))
+
+DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_gc_removed);
+DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_shrinker_removed);
+
+TRACE_EVENT(nfsd_file_close,
+ TP_PROTO(
+ const struct inode *inode
+ ),
+ TP_ARGS(inode),
+ TP_STRUCT__entry(
+ __field(const void *, inode)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ ),
+ TP_printk("inode=%p",
+ __entry->inode
+ )
+);
+
#include "cache.h"
TRACE_DEFINE_ENUM(RC_DROPIT);
@@ -812,8 +1463,8 @@ TRACE_EVENT(nfsd_drc_found,
TRACE_EVENT(nfsd_drc_mismatch,
TP_PROTO(
const struct nfsd_net *nn,
- const struct svc_cacherep *key,
- const struct svc_cacherep *rp
+ const struct nfsd_cacherep *key,
+ const struct nfsd_cacherep *rp
),
TP_ARGS(nn, key, rp),
TP_STRUCT__entry(
@@ -844,18 +1495,17 @@ TRACE_EVENT(nfsd_cb_args,
__field(u32, cl_id)
__field(u32, prog)
__field(u32, ident)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __sockaddr(addr, conn->cb_addrlen)
),
TP_fast_assign(
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
__entry->prog = conn->cb_prog;
__entry->ident = conn->cb_ident;
- memcpy(__entry->addr, &conn->cb_addr,
- sizeof(struct sockaddr_in6));
+ __assign_sockaddr(addr, &conn->cb_addr, conn->cb_addrlen);
),
TP_printk("addr=%pISpc client %08x:%08x prog=%u ident=%u",
- __entry->addr, __entry->cl_boot, __entry->cl_id,
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
__entry->prog, __entry->ident)
);
@@ -887,17 +1537,17 @@ DECLARE_EVENT_CLASS(nfsd_cb_class,
__field(unsigned long, state)
__field(u32, cl_boot)
__field(u32, cl_id)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
),
TP_fast_assign(
__entry->state = clp->cl_cb_state;
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
- memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
- sizeof(struct sockaddr_in6));
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen)
),
TP_printk("addr=%pISpc client %08x:%08x state=%s",
- __entry->addr, __entry->cl_boot, __entry->cl_id,
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
show_cb_state(__entry->state))
);
@@ -906,10 +1556,14 @@ DEFINE_EVENT(nfsd_cb_class, nfsd_cb_##name, \
TP_PROTO(const struct nfs4_client *clp), \
TP_ARGS(clp))
-DEFINE_NFSD_CB_EVENT(state);
+DEFINE_NFSD_CB_EVENT(start);
+DEFINE_NFSD_CB_EVENT(new_state);
DEFINE_NFSD_CB_EVENT(probe);
DEFINE_NFSD_CB_EVENT(lost);
DEFINE_NFSD_CB_EVENT(shutdown);
+DEFINE_NFSD_CB_EVENT(rpc_prepare);
+DEFINE_NFSD_CB_EVENT(rpc_done);
+DEFINE_NFSD_CB_EVENT(rpc_release);
TRACE_DEFINE_ENUM(RPC_AUTH_NULL);
TRACE_DEFINE_ENUM(RPC_AUTH_UNIX);
@@ -937,20 +1591,20 @@ TRACE_EVENT(nfsd_cb_setup,
__field(u32, cl_boot)
__field(u32, cl_id)
__field(unsigned long, authflavor)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
- __array(unsigned char, netid, 8)
+ __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
+ __string(netid, netid)
),
TP_fast_assign(
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
- strlcpy(__entry->netid, netid, sizeof(__entry->netid));
+ __assign_str(netid);
__entry->authflavor = authflavor;
- memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
- sizeof(struct sockaddr_in6));
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen)
),
TP_printk("addr=%pISpc client %08x:%08x proto=%s flavor=%s",
- __entry->addr, __entry->cl_boot, __entry->cl_id,
- __entry->netid, show_nfsd_authflavor(__entry->authflavor))
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
+ __get_str(netid), show_nfsd_authflavor(__entry->authflavor))
);
TRACE_EVENT(nfsd_cb_setup_err,
@@ -963,30 +1617,169 @@ TRACE_EVENT(nfsd_cb_setup_err,
__field(long, error)
__field(u32, cl_boot)
__field(u32, cl_id)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
),
TP_fast_assign(
__entry->error = error;
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
- memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
- sizeof(struct sockaddr_in6));
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen)
),
TP_printk("addr=%pISpc client %08x:%08x error=%ld",
- __entry->addr, __entry->cl_boot, __entry->cl_id, __entry->error)
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
+ __entry->error)
);
-TRACE_EVENT(nfsd_cb_recall,
+/* Not a real opcode, but there is no 0 operation. */
+#define _CB_NULL 0
+
+#define show_nfsd_cb_opcode(val) \
+ __print_symbolic(val, \
+ { _CB_NULL, "CB_NULL" }, \
+ { OP_CB_GETATTR, "CB_GETATTR" }, \
+ { OP_CB_RECALL, "CB_RECALL" }, \
+ { OP_CB_LAYOUTRECALL, "CB_LAYOUTRECALL" }, \
+ { OP_CB_RECALL_ANY, "CB_RECALL_ANY" }, \
+ { OP_CB_NOTIFY_LOCK, "CB_NOTIFY_LOCK" }, \
+ { OP_CB_OFFLOAD, "CB_OFFLOAD" })
+
+DECLARE_EVENT_CLASS(nfsd_cb_lifetime_class,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ const struct nfsd4_callback *cb
+ ),
+ TP_ARGS(clp, cb),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(const void *, cb)
+ __field(unsigned long, opcode)
+ __field(bool, need_restart)
+ __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __entry->cb = cb;
+ __entry->opcode = cb->cb_ops ? cb->cb_ops->opcode : _CB_NULL;
+ __entry->need_restart = test_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags);
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen)
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x cb=%p%s opcode=%s",
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id, __entry->cb,
+ __entry->need_restart ? " (need restart)" : " (first try)",
+ show_nfsd_cb_opcode(__entry->opcode)
+ )
+);
+
+#define DEFINE_NFSD_CB_LIFETIME_EVENT(name) \
+DEFINE_EVENT(nfsd_cb_lifetime_class, nfsd_cb_##name, \
+ TP_PROTO( \
+ const struct nfs4_client *clp, \
+ const struct nfsd4_callback *cb \
+ ), \
+ TP_ARGS(clp, cb))
+
+DEFINE_NFSD_CB_LIFETIME_EVENT(queue);
+DEFINE_NFSD_CB_LIFETIME_EVENT(destroy);
+DEFINE_NFSD_CB_LIFETIME_EVENT(restart);
+DEFINE_NFSD_CB_LIFETIME_EVENT(bc_update);
+DEFINE_NFSD_CB_LIFETIME_EVENT(bc_shutdown);
+
+TRACE_EVENT(nfsd_cb_seq_status,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct nfsd4_callback *cb
+ ),
+ TP_ARGS(task, cb),
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, seqno)
+ __field(u32, reserved)
+ __field(int, tk_status)
+ __field(int, seq_status)
+ ),
+ TP_fast_assign(
+ const struct nfs4_client *clp = cb->cb_clp;
+ const struct nfsd4_session *session = clp->cl_cb_session;
+ const struct nfsd4_sessionid *sid =
+ (struct nfsd4_sessionid *)&session->se_sessionid;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client ?
+ task->tk_client->cl_clid : -1;
+ __entry->cl_boot = sid->clientid.cl_boot;
+ __entry->cl_id = sid->clientid.cl_id;
+ __entry->seqno = sid->sequence;
+ __entry->reserved = sid->reserved;
+ __entry->tk_status = task->tk_status;
+ __entry->seq_status = cb->cb_seq_status;
+ ),
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " sessionid=%08x:%08x:%08x:%08x tk_status=%d seq_status=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->cl_boot, __entry->cl_id,
+ __entry->seqno, __entry->reserved,
+ __entry->tk_status, __entry->seq_status
+ )
+);
+
+TRACE_EVENT(nfsd_cb_free_slot,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct nfsd4_callback *cb
+ ),
+ TP_ARGS(task, cb),
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, seqno)
+ __field(u32, reserved)
+ __field(u32, slot_seqno)
+ ),
+ TP_fast_assign(
+ const struct nfs4_client *clp = cb->cb_clp;
+ const struct nfsd4_session *session = clp->cl_cb_session;
+ const struct nfsd4_sessionid *sid =
+ (struct nfsd4_sessionid *)&session->se_sessionid;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client ?
+ task->tk_client->cl_clid : -1;
+ __entry->cl_boot = sid->clientid.cl_boot;
+ __entry->cl_id = sid->clientid.cl_id;
+ __entry->seqno = sid->sequence;
+ __entry->reserved = sid->reserved;
+ __entry->slot_seqno = session->se_cb_seq_nr[cb->cb_held_slot];
+ ),
+ TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
+ " sessionid=%08x:%08x:%08x:%08x new slot seqno=%u",
+ __entry->task_id, __entry->client_id,
+ __entry->cl_boot, __entry->cl_id,
+ __entry->seqno, __entry->reserved,
+ __entry->slot_seqno
+ )
+);
+
+TRACE_EVENT_CONDITION(nfsd_cb_recall,
TP_PROTO(
const struct nfs4_stid *stid
),
TP_ARGS(stid),
+ TP_CONDITION(stid->sc_client),
TP_STRUCT__entry(
__field(u32, cl_boot)
__field(u32, cl_id)
__field(u32, si_id)
__field(u32, si_generation)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __sockaddr(addr, stid->sc_client->cl_cb_conn.cb_addrlen)
),
TP_fast_assign(
const stateid_t *stp = &stid->sc_stateid;
@@ -996,14 +1789,11 @@ TRACE_EVENT(nfsd_cb_recall,
__entry->cl_id = stp->si_opaque.so_clid.cl_id;
__entry->si_id = stp->si_opaque.so_id;
__entry->si_generation = stp->si_generation;
- if (clp)
- memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
- sizeof(struct sockaddr_in6));
- else
- memset(__entry->addr, 0, sizeof(struct sockaddr_in6));
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen)
),
TP_printk("addr=%pISpc client %08x:%08x stateid %08x:%08x",
- __entry->addr, __entry->cl_boot, __entry->cl_id,
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
__entry->si_id, __entry->si_generation)
);
@@ -1017,7 +1807,7 @@ TRACE_EVENT(nfsd_cb_notify_lock,
__field(u32, cl_boot)
__field(u32, cl_id)
__field(u32, fh_hash)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __sockaddr(addr, lo->lo_owner.so_client->cl_cb_conn.cb_addrlen)
),
TP_fast_assign(
const struct nfs4_client *clp = lo->lo_owner.so_client;
@@ -1025,11 +1815,11 @@ TRACE_EVENT(nfsd_cb_notify_lock,
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
__entry->fh_hash = knfsd_fh_hash(&nbl->nbl_fh);
- memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
- sizeof(struct sockaddr_in6));
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen)
),
TP_printk("addr=%pISpc client %08x:%08x fh_hash=0x%08x",
- __entry->addr, __entry->cl_boot, __entry->cl_id,
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
__entry->fh_hash)
);
@@ -1050,7 +1840,7 @@ TRACE_EVENT(nfsd_cb_offload,
__field(u32, fh_hash)
__field(int, status)
__field(u64, count)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
),
TP_fast_assign(
__entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
@@ -1060,15 +1850,810 @@ TRACE_EVENT(nfsd_cb_offload,
__entry->fh_hash = knfsd_fh_hash(fh);
__entry->status = be32_to_cpu(status);
__entry->count = count;
- memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
- sizeof(struct sockaddr_in6));
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen)
),
TP_printk("addr=%pISpc client %08x:%08x stateid %08x:%08x fh_hash=0x%08x count=%llu status=%d",
- __entry->addr, __entry->cl_boot, __entry->cl_id,
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
__entry->si_id, __entry->si_generation,
__entry->fh_hash, __entry->count, __entry->status)
);
+TRACE_EVENT(nfsd_cb_recall_any,
+ TP_PROTO(
+ const struct nfsd4_cb_recall_any *ra
+ ),
+ TP_ARGS(ra),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, keep)
+ __field(unsigned long, bmval0)
+ __sockaddr(addr, ra->ra_cb.cb_clp->cl_cb_conn.cb_addrlen)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = ra->ra_cb.cb_clp->cl_clientid.cl_boot;
+ __entry->cl_id = ra->ra_cb.cb_clp->cl_clientid.cl_id;
+ __entry->keep = ra->ra_keep;
+ __entry->bmval0 = ra->ra_bmval[0];
+ __assign_sockaddr(addr, &ra->ra_cb.cb_clp->cl_addr,
+ ra->ra_cb.cb_clp->cl_cb_conn.cb_addrlen);
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x keep=%u bmval0=%s",
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
+ __entry->keep, show_rca_mask(__entry->bmval0)
+ )
+);
+
+DECLARE_EVENT_CLASS(nfsd_cb_done_class,
+ TP_PROTO(
+ const stateid_t *stp,
+ const struct rpc_task *task
+ ),
+ TP_ARGS(stp, task),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, si_id)
+ __field(u32, si_generation)
+ __field(int, status)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
+ __entry->cl_id = stp->si_opaque.so_clid.cl_id;
+ __entry->si_id = stp->si_opaque.so_id;
+ __entry->si_generation = stp->si_generation;
+ __entry->status = task->tk_status;
+ ),
+ TP_printk("client %08x:%08x stateid %08x:%08x status=%d",
+ __entry->cl_boot, __entry->cl_id, __entry->si_id,
+ __entry->si_generation, __entry->status
+ )
+);
+
+#define DEFINE_NFSD_CB_DONE_EVENT(name) \
+DEFINE_EVENT(nfsd_cb_done_class, name, \
+ TP_PROTO( \
+ const stateid_t *stp, \
+ const struct rpc_task *task \
+ ), \
+ TP_ARGS(stp, task))
+
+DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_recall_done);
+DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_notify_lock_done);
+DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_layout_done);
+DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_offload_done);
+DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_getattr_done);
+
+TRACE_EVENT(nfsd_cb_recall_any_done,
+ TP_PROTO(
+ const struct nfsd4_callback *cb,
+ const struct rpc_task *task
+ ),
+ TP_ARGS(cb, task),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(int, status)
+ ),
+ TP_fast_assign(
+ __entry->status = task->tk_status;
+ __entry->cl_boot = cb->cb_clp->cl_clientid.cl_boot;
+ __entry->cl_id = cb->cb_clp->cl_clientid.cl_id;
+ ),
+ TP_printk("client %08x:%08x status=%d",
+ __entry->cl_boot, __entry->cl_id, __entry->status
+ )
+);
+
+TRACE_EVENT(nfsd_ctl_unlock_ip,
+ TP_PROTO(
+ const struct net *net,
+ const char *address
+ ),
+ TP_ARGS(net, address),
+ TP_STRUCT__entry(
+ __field(unsigned int, netns_ino)
+ __string(address, address)
+ ),
+ TP_fast_assign(
+ __entry->netns_ino = net->ns.inum;
+ __assign_str(address);
+ ),
+ TP_printk("address=%s",
+ __get_str(address)
+ )
+);
+
+TRACE_EVENT(nfsd_ctl_unlock_fs,
+ TP_PROTO(
+ const struct net *net,
+ const char *path
+ ),
+ TP_ARGS(net, path),
+ TP_STRUCT__entry(
+ __field(unsigned int, netns_ino)
+ __string(path, path)
+ ),
+ TP_fast_assign(
+ __entry->netns_ino = net->ns.inum;
+ __assign_str(path);
+ ),
+ TP_printk("path=%s",
+ __get_str(path)
+ )
+);
+
+TRACE_EVENT(nfsd_ctl_filehandle,
+ TP_PROTO(
+ const struct net *net,
+ const char *domain,
+ const char *path,
+ int maxsize
+ ),
+ TP_ARGS(net, domain, path, maxsize),
+ TP_STRUCT__entry(
+ __field(unsigned int, netns_ino)
+ __field(int, maxsize)
+ __string(domain, domain)
+ __string(path, path)
+ ),
+ TP_fast_assign(
+ __entry->netns_ino = net->ns.inum;
+ __entry->maxsize = maxsize;
+ __assign_str(domain);
+ __assign_str(path);
+ ),
+ TP_printk("domain=%s path=%s maxsize=%d",
+ __get_str(domain), __get_str(path), __entry->maxsize
+ )
+);
+
+TRACE_EVENT(nfsd_ctl_threads,
+ TP_PROTO(
+ const struct net *net,
+ int newthreads
+ ),
+ TP_ARGS(net, newthreads),
+ TP_STRUCT__entry(
+ __field(unsigned int, netns_ino)
+ __field(int, newthreads)
+ ),
+ TP_fast_assign(
+ __entry->netns_ino = net->ns.inum;
+ __entry->newthreads = newthreads;
+ ),
+ TP_printk("newthreads=%d",
+ __entry-